Merge tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A set of fixes and improvements for the perf subsystem:

  Kernel fixes:

   - Install cgroup events to the correct CPU context to prevent a
     potential list double add

   - Prevent an integer underflow in the perf mlock accounting

   - Add a missing prototype for arch_perf_update_userpage()

  Tooling:

   - Add a missing unlock in the error path of maps__insert() in perf
     maps.

   - Fix the build with the latest libbfd

   - Fix the perf parser so it does not delete parse event terms, which
     caused a regression for using perf with the ARM CoreSight as the
     sink configuration was missing due to the deletion.

   - Fix the double free in the perf CPU map merging test case

   - Add the missing ustring support for the perf probe command"

* tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf maps: Add missing unlock to maps__insert() error case
  perf probe: Add ustring support for perf probe command
  perf: Make perf able to build with latest libbfd
  perf test: Fix test case Merge cpu map
  perf parse: Copy string to perf_evsel_config_term
  perf parse: Refactor 'struct perf_evsel_config_term'
  kernel/events: Add a missing prototype for arch_perf_update_userpage()
  perf/cgroups: Install cgroup events to correct cpuctx
  perf/core: Fix mlock accounting in perf_mmap()
diff --git a/.mailmap b/.mailmap
index bf58162..ffb8f28 100644
--- a/.mailmap
+++ b/.mailmap
@@ -18,6 +18,7 @@
 Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
 Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
 Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
+Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
 Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
 Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
 Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
@@ -27,6 +28,8 @@
 Andreas Herrmann <aherrman@de.ibm.com>
 Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
+Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
+Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
 Antoine Tenart <antoine.tenart@free-electrons.com>
@@ -259,6 +262,7 @@
 Tejun Heo <htejun@gmail.com>
 Thomas Graf <tgraf@suug.ch>
 Thomas Pedersen <twp@codeaurora.org>
+Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
 Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org>
 Tony Luck <tony.luck@intel.com>
 TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
diff --git a/CREDITS b/CREDITS
index 9602b0f..a97d328 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3302,7 +3302,9 @@
 N: Aleksa Sarai
 E: cyphar@cyphar.com
 W: https://www.cyphar.com/
-D: `pids` cgroup subsystem
+D: /sys/fs/cgroup/pids
+D: openat2(2)
+S: Sydney, Australia
 
 N: Dipankar Sarma
 E: dipankar@in.ibm.com
diff --git a/Documentation/ABI/testing/configfs-usb-gadget b/Documentation/ABI/testing/configfs-usb-gadget
index 95a3658..4594cc2 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget
+++ b/Documentation/ABI/testing/configfs-usb-gadget
@@ -16,6 +16,10 @@
 				write UDC's name found in /sys/class/udc/*
 				to bind a gadget, empty string "" to unbind.
 
+		max_speed	- maximum speed the driver supports. Valid
+				names are super-speed-plus, super-speed,
+				high-speed, full-speed, and low-speed.
+
 		bDeviceClass	- USB device class code
 		bDeviceSubClass	- USB device subclass code
 		bDeviceProtocol	- USB device protocol code
diff --git a/Documentation/ABI/testing/rtc-cdev b/Documentation/ABI/testing/rtc-cdev
index 9744728..25910c3 100644
--- a/Documentation/ABI/testing/rtc-cdev
+++ b/Documentation/ABI/testing/rtc-cdev
@@ -33,6 +33,14 @@
 		  Requires a separate RTC_PIE_ON call to enable the periodic
 		  interrupts.
 
+		* RTC_VL_READ: Read the voltage inputs status of the RTC when
+		  supported. The value is a bit field of RTC_VL_*, giving the
+		  status of the main and backup voltages.
+
+		* RTC_VL_CLEAR: Clear the voltage status of the RTC. Some RTCs
+		  need user interaction when the backup power provider is
+		  replaced or charged to be able to clear the status.
+
 		The ioctl() calls supported by the older /dev/rtc interface are
 		also supported by the newer RTC class framework. However,
 		because the chips and systems are not standardized, some PC/AT
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index faaa216..d3e53a6 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1726,3 +1726,16 @@
 Description:
 		List of valid periods (in seconds) for which the light intensity
 		must be above the threshold level before interrupt is asserted.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_filter_notch_center_frequency
+KernelVersion:	5.5
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Center frequency in Hz for a notch filter. Used i.e. for line
+		noise suppression.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_temp_thermocouple_type
+KernelVersion:	5.5
+Contact:	linux-iio@vger.kernel.org
+Description:
+		One of the following thermocouple types: B, E, J, K, N, R, S, T.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dma-buffer b/Documentation/ABI/testing/sysfs-bus-iio-dma-buffer
new file mode 100644
index 0000000..d526e65
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dma-buffer
@@ -0,0 +1,19 @@
+What:		/sys/bus/iio/devices/iio:deviceX/buffer/length_align_bytes
+KernelVersion:	5.4
+Contact:	linux-iio@vger.kernel.org
+Description:
+		DMA buffers tend to have a alignment requirement for the
+		buffers. If this alignment requirement is not met samples might
+		be dropped from the buffer.
+
+		This property reports the alignment requirements in bytes.
+		This means that the buffer size in bytes needs to be a integer
+		multiple of the number reported by this file.
+
+		The alignment requirements in number of sample sets will depend
+		on the enabled channels and the bytes per channel. This means
+		that the alignment requirement in samples sets might change
+		depending on which and how many channels are enabled. Whereas
+		the alignment requirement reported in bytes by this property
+		will remain static and does not depend on which channels are
+		enabled.
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index 27edc06..bf3b48f 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -189,7 +189,8 @@
 		Access: Read
 		Valid values: "Unknown", "Good", "Overheat", "Dead",
 			      "Over voltage", "Unspecified failure", "Cold",
-			      "Watchdog timer expire", "Safety timer expire"
+			      "Watchdog timer expire", "Safety timer expire",
+			      "Over current"
 
 What:		/sys/class/power_supply/<supply_name>/precharge_current
 Date:		June 2017
diff --git a/Documentation/ABI/testing/sysfs-driver-pciback b/Documentation/ABI/testing/sysfs-driver-pciback
index 6a733bf..73308c2 100644
--- a/Documentation/ABI/testing/sysfs-driver-pciback
+++ b/Documentation/ABI/testing/sysfs-driver-pciback
@@ -11,3 +11,16 @@
                 #echo 00:19.0-E0:2:FF > /sys/bus/pci/drivers/pciback/quirks
                 will allow the guest to read and write to the configuration
                 register 0x0E.
+
+What:           /sys/bus/pci/drivers/pciback/allow_interrupt_control
+Date:           Jan 2020
+KernelVersion:  5.6
+Contact:        xen-devel@lists.xenproject.org
+Description:
+                List of devices which can have interrupt control flag (INTx,
+                MSI, MSI-X) set by a connected guest. It is meant to be set
+                only when the guest is a stubdomain hosting device model (qemu)
+                and the actual device is assigned to a HVM. It is not safe
+                (similar to permissive attribute) to set for a devices assigned
+                to a PV guest. The device is automatically removed from this
+                list when the connected pcifront terminates.
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback
index 4e7babb3..ecb7942 100644
--- a/Documentation/ABI/testing/sysfs-driver-xen-blkback
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback
@@ -25,3 +25,13 @@
                 allocated without being in use. The time is in
                 seconds, 0 means indefinitely long.
                 The default is 60 seconds.
+
+What:           /sys/module/xen_blkback/parameters/buffer_squeeze_duration_ms
+Date:           December 2019
+KernelVersion:  5.6
+Contact:        SeongJae Park <sjpark@amazon.de>
+Description:
+                When memory pressure is reported to blkback this option
+                controls the duration in milliseconds that blkback will not
+                cache any page not backed by a grant mapping.
+                The default is 10ms.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index aedeae1..1a6cd53 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -1,37 +1,40 @@
 What:		/sys/fs/f2fs/<disk>/gc_max_sleep_time
 Date:		July 2013
 Contact:	"Namjae Jeon" <namjae.jeon@samsung.com>
-Description:
-		 Controls the maximun sleep time for gc_thread. Time
-		 is in milliseconds.
+Description:	Controls the maximum sleep time for gc_thread. Time
+		is in milliseconds.
 
 What:		/sys/fs/f2fs/<disk>/gc_min_sleep_time
 Date:		July 2013
 Contact:	"Namjae Jeon" <namjae.jeon@samsung.com>
-Description:
-		 Controls the minimum sleep time for gc_thread. Time
-		 is in milliseconds.
+Description:	Controls the minimum sleep time for gc_thread. Time
+		is in milliseconds.
 
 What:		/sys/fs/f2fs/<disk>/gc_no_gc_sleep_time
 Date:		July 2013
 Contact:	"Namjae Jeon" <namjae.jeon@samsung.com>
-Description:
-		 Controls the default sleep time for gc_thread. Time
-		 is in milliseconds.
+Description:	Controls the default sleep time for gc_thread. Time
+		is in milliseconds.
 
 What:		/sys/fs/f2fs/<disk>/gc_idle
 Date:		July 2013
 Contact:	"Namjae Jeon" <namjae.jeon@samsung.com>
-Description:
-		 Controls the victim selection policy for garbage collection.
+Description:	Controls the victim selection policy for garbage collection.
+		Setting gc_idle = 0(default) will disable this option. Setting
+		gc_idle = 1 will select the Cost Benefit approach & setting
+		gc_idle = 2 will select the greedy approach.
 
 What:		/sys/fs/f2fs/<disk>/reclaim_segments
 Date:		October 2013
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
-Description:
-		 Controls the issue rate of segment discard commands.
+Description:	This parameter controls the number of prefree segments to be
+		reclaimed. If the number of prefree segments is larger than
+		the number of segments in the proportion to the percentage
+		over total volume size, f2fs tries to conduct checkpoint to
+		reclaim the prefree segments to free segments.
+		By default, 5% over total # of segments.
 
-What:		/sys/fs/f2fs/<disk>/max_blkaddr
+What:		/sys/fs/f2fs/<disk>/main_blkaddr
 Date:		November 2019
 Contact:	"Ramon Pantin" <pantin@google.com>
 Description:
@@ -40,227 +43,278 @@
 What:		/sys/fs/f2fs/<disk>/ipu_policy
 Date:		November 2013
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
-Description:
-		 Controls the in-place-update policy.
+Description:	Controls the in-place-update policy.
+		updates in f2fs. User can set:
+		0x01: F2FS_IPU_FORCE, 0x02: F2FS_IPU_SSR,
+		0x04: F2FS_IPU_UTIL,  0x08: F2FS_IPU_SSR_UTIL,
+		0x10: F2FS_IPU_FSYNC, 0x20: F2FS_IPU_ASYNC,
+		0x40: F2FS_IPU_NOCACHE.
+		Refer segment.h for details.
 
 What:		/sys/fs/f2fs/<disk>/min_ipu_util
 Date:		November 2013
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
-Description:
-		 Controls the FS utilization condition for the in-place-update
-		 policies.
+Description:	Controls the FS utilization condition for the in-place-update
+		policies. It is used by F2FS_IPU_UTIL and F2FS_IPU_SSR_UTIL policies.
 
 What:		/sys/fs/f2fs/<disk>/min_fsync_blocks
 Date:		September 2014
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Controls the dirty page count condition for the in-place-update
-		 policies.
+Description:	Controls the dirty page count condition for the in-place-update
+		policies.
 
 What:		/sys/fs/f2fs/<disk>/min_seq_blocks
 Date:		August 2018
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Controls the dirty page count condition for batched sequential
-		 writes in ->writepages.
-
+Description:	Controls the dirty page count condition for batched sequential
+		writes in writepages.
 
 What:		/sys/fs/f2fs/<disk>/min_hot_blocks
 Date:		March 2017
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Controls the dirty page count condition for redefining hot data.
+Description:	Controls the dirty page count condition for redefining hot data.
 
 What:		/sys/fs/f2fs/<disk>/min_ssr_sections
 Date:		October 2017
 Contact:	"Chao Yu" <yuchao0@huawei.com>
-Description:
-		 Controls the fee section threshold to trigger SSR allocation.
+Description:	Controls the free section threshold to trigger SSR allocation.
+		If this is large, SSR mode will be enabled early.
 
 What:		/sys/fs/f2fs/<disk>/max_small_discards
 Date:		November 2013
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
-Description:
-		 Controls the issue rate of small discard commands.
+Description:	Controls the issue rate of discard commands that consist of small
+		blocks less than 2MB. The candidates to be discarded are cached until
+		checkpoint is triggered, and issued during the checkpoint.
+		By default, it is disabled with 0.
 
-What:          /sys/fs/f2fs/<disk>/discard_granularity
-Date:          July 2017
-Contact:       "Chao Yu" <yuchao0@huawei.com>
-Description:
-		Controls discard granularity of inner discard thread, inner thread
+What:		/sys/fs/f2fs/<disk>/discard_granularity
+Date:		July 2017
+Contact:	"Chao Yu" <yuchao0@huawei.com>
+Description:	Controls discard granularity of inner discard thread. Inner thread
 		will not issue discards with size that is smaller than granularity.
-		The unit size is one block, now only support configuring in range
-		of [1, 512].
+		The unit size is one block(4KB), now only support configuring
+		in range of [1, 512]. Default value is 4(=16KB).
 
-What:          /sys/fs/f2fs/<disk>/umount_discard_timeout
-Date:          January 2019
-Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		Set timeout to issue discard commands during umount.
-		Default: 5 secs
+What:		/sys/fs/f2fs/<disk>/umount_discard_timeout
+Date:		January 2019
+Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:	Set timeout to issue discard commands during umount.
+	        Default: 5 secs
 
 What:		/sys/fs/f2fs/<disk>/max_victim_search
 Date:		January 2014
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
-Description:
-		 Controls the number of trials to find a victim segment.
+Description:	Controls the number of trials to find a victim segment
+		when conducting SSR and cleaning operations. The default value
+		is 4096 which covers 8GB block address range.
 
 What:		/sys/fs/f2fs/<disk>/migration_granularity
 Date:		October 2018
 Contact:	"Chao Yu" <yuchao0@huawei.com>
-Description:
-		 Controls migration granularity of garbage collection on large
-		 section, it can let GC move partial segment{s} of one section
-		 in one GC cycle, so that dispersing heavy overhead GC to
-		 multiple lightweight one.
+Description:	Controls migration granularity of garbage collection on large
+		section, it can let GC move partial segment{s} of one section
+		in one GC cycle, so that dispersing heavy overhead GC to
+		multiple lightweight one.
 
 What:		/sys/fs/f2fs/<disk>/dir_level
 Date:		March 2014
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
-Description:
-		 Controls the directory level for large directory.
+Description:	Controls the directory level for large directory. If a
+		directory has a number of files, it can reduce the file lookup
+		latency by increasing this dir_level value. Otherwise, it
+		needs to decrease this value to reduce the space overhead.
+		The default value is 0.
 
 What:		/sys/fs/f2fs/<disk>/ram_thresh
 Date:		March 2014
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
-Description:
-		 Controls the memory footprint used by f2fs.
+Description:	Controls the memory footprint used by free nids and cached
+		nat entries. By default, 1 is set, which indicates
+		10 MB / 1 GB RAM.
 
 What:		/sys/fs/f2fs/<disk>/batched_trim_sections
 Date:		February 2015
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Controls the trimming rate in batch mode.
-		 <deprecated>
+Description:	Controls the trimming rate in batch mode.
+		<deprecated>
 
 What:		/sys/fs/f2fs/<disk>/cp_interval
 Date:		October 2015
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Controls the checkpoint timing.
+Description:	Controls the checkpoint timing, set to 60 seconds by default.
 
 What:		/sys/fs/f2fs/<disk>/idle_interval
 Date:		January 2016
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Controls the idle timing for all paths other than
-		 discard and gc path.
+Description:	Controls the idle timing of system, if there is no FS operation
+		during given interval.
+		Set to 5 seconds by default.
 
 What:		/sys/fs/f2fs/<disk>/discard_idle_interval
 Date:		September 2018
 Contact:	"Chao Yu" <yuchao0@huawei.com>
 Contact:	"Sahitya Tummala" <stummala@codeaurora.org>
-Description:
-		 Controls the idle timing for discard path.
+Description:	Controls the idle timing of discard thread given
+		this time interval.
+		Default is 5 secs.
 
 What:		/sys/fs/f2fs/<disk>/gc_idle_interval
 Date:		September 2018
 Contact:	"Chao Yu" <yuchao0@huawei.com>
 Contact:	"Sahitya Tummala" <stummala@codeaurora.org>
-Description:
-		 Controls the idle timing for gc path.
+Description:    Controls the idle timing for gc path. Set to 5 seconds by default.
 
 What:		/sys/fs/f2fs/<disk>/iostat_enable
 Date:		August 2017
 Contact:	"Chao Yu" <yuchao0@huawei.com>
-Description:
-		 Controls to enable/disable IO stat.
+Description:	Controls to enable/disable IO stat.
 
 What:		/sys/fs/f2fs/<disk>/ra_nid_pages
 Date:		October 2015
 Contact:	"Chao Yu" <chao2.yu@samsung.com>
-Description:
-		 Controls the count of nid pages to be readaheaded.
+Description:	Controls the count of nid pages to be readaheaded.
+		When building free nids, F2FS reads NAT blocks ahead for
+		speed up. Default is 0.
 
 What:		/sys/fs/f2fs/<disk>/dirty_nats_ratio
 Date:		January 2016
 Contact:	"Chao Yu" <chao2.yu@samsung.com>
-Description:
-		 Controls dirty nat entries ratio threshold, if current
-		 ratio exceeds configured threshold, checkpoint will
-		 be triggered for flushing dirty nat entries.
+Description:	Controls dirty nat entries ratio threshold, if current
+		ratio exceeds configured threshold, checkpoint will
+		be triggered for flushing dirty nat entries.
 
 What:		/sys/fs/f2fs/<disk>/lifetime_write_kbytes
 Date:		January 2016
 Contact:	"Shuoran Liu" <liushuoran@huawei.com>
-Description:
-		 Shows total written kbytes issued to disk.
+Description:	Shows total written kbytes issued to disk.
 
 What:		/sys/fs/f2fs/<disk>/features
 Date:		July 2017
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Shows all enabled features in current device.
+Description:	Shows all enabled features in current device.
 
 What:		/sys/fs/f2fs/<disk>/inject_rate
 Date:		May 2016
 Contact:	"Sheng Yong" <shengyong1@huawei.com>
-Description:
-		 Controls the injection rate.
+Description:	Controls the injection rate of arbitrary faults.
 
 What:		/sys/fs/f2fs/<disk>/inject_type
 Date:		May 2016
 Contact:	"Sheng Yong" <shengyong1@huawei.com>
-Description:
-		 Controls the injection type.
+Description:	Controls the injection type of arbitrary faults.
+
+What:		/sys/fs/f2fs/<disk>/dirty_segments
+Date:		October 2017
+Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:	Shows the number of dirty segments.
 
 What:		/sys/fs/f2fs/<disk>/reserved_blocks
 Date:		June 2017
 Contact:	"Chao Yu" <yuchao0@huawei.com>
-Description:
-		 Controls target reserved blocks in system, the threshold
-		 is soft, it could exceed current available user space.
+Description:	Controls target reserved blocks in system, the threshold
+		is soft, it could exceed current available user space.
 
 What:		/sys/fs/f2fs/<disk>/current_reserved_blocks
 Date:		October 2017
 Contact:	"Yunlong Song" <yunlong.song@huawei.com>
 Contact:	"Chao Yu" <yuchao0@huawei.com>
-Description:
-		 Shows current reserved blocks in system, it may be temporarily
-		 smaller than target_reserved_blocks, but will gradually
-		 increase to target_reserved_blocks when more free blocks are
-		 freed by user later.
+Description:	Shows current reserved blocks in system, it may be temporarily
+		smaller than target_reserved_blocks, but will gradually
+		increase to target_reserved_blocks when more free blocks are
+		freed by user later.
 
 What:		/sys/fs/f2fs/<disk>/gc_urgent
 Date:		August 2017
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Do background GC agressively
+Description:	Do background GC agressively when set. When gc_urgent = 1,
+		background thread starts to do GC by given gc_urgent_sleep_time
+		interval. It is set to 0 by default.
 
 What:		/sys/fs/f2fs/<disk>/gc_urgent_sleep_time
 Date:		August 2017
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
-Description:
-		 Controls sleep time of GC urgent mode
+Description:	Controls sleep time of GC urgent mode. Set to 500ms by default.
 
 What:		/sys/fs/f2fs/<disk>/readdir_ra
 Date:		November 2017
 Contact:	"Sheng Yong" <shengyong1@huawei.com>
-Description:
-		 Controls readahead inode block in readdir.
+Description:	Controls readahead inode block in readdir. Enabled by default.
+
+What:		/sys/fs/f2fs/<disk>/gc_pin_file_thresh
+Date:		January 2018
+Contact:	Jaegeuk Kim <jaegeuk@kernel.org>
+Description:	This indicates how many GC can be failed for the pinned
+		file. If it exceeds this, F2FS doesn't guarantee its pinning
+		state. 2048 trials is set by default.
 
 What:		/sys/fs/f2fs/<disk>/extension_list
 Date:		Feburary 2018
 Contact:	"Chao Yu" <yuchao0@huawei.com>
-Description:
-		 Used to control configure extension list:
-		 - Query: cat /sys/fs/f2fs/<disk>/extension_list
-		 - Add: echo '[h/c]extension' > /sys/fs/f2fs/<disk>/extension_list
-		 - Del: echo '[h/c]!extension' > /sys/fs/f2fs/<disk>/extension_list
-		 - [h] means add/del hot file extension
-		 - [c] means add/del cold file extension
+Description:	Used to control configure extension list:
+		- Query: cat /sys/fs/f2fs/<disk>/extension_list
+		- Add: echo '[h/c]extension' > /sys/fs/f2fs/<disk>/extension_list
+		- Del: echo '[h/c]!extension' > /sys/fs/f2fs/<disk>/extension_list
+		- [h] means add/del hot file extension
+		- [c] means add/del cold file extension
 
 What:		/sys/fs/f2fs/<disk>/unusable
 Date		April 2019
 Contact:	"Daniel Rosenberg" <drosen@google.com>
-Description:
-		If checkpoint=disable, it displays the number of blocks that are unusable.
-                If checkpoint=enable it displays the enumber of blocks that would be unusable
-                if checkpoint=disable were to be set.
+Description:	If checkpoint=disable, it displays the number of blocks that
+		are unusable.
+		If checkpoint=enable it displays the enumber of blocks that
+		would be unusable if checkpoint=disable were to be set.
 
 What:		/sys/fs/f2fs/<disk>/encoding
 Date		July 2019
 Contact:	"Daniel Rosenberg" <drosen@google.com>
-Description:
-		Displays name and version of the encoding set for the filesystem.
-                If no encoding is set, displays (none)
+Description:	Displays name and version of the encoding set for the filesystem.
+		If no encoding is set, displays (none)
+
+What:		/sys/fs/f2fs/<disk>/free_segments
+Date:		September 2019
+Contact:	"Hridya Valsaraju" <hridya@google.com>
+Description:	Number of free segments in disk.
+
+What:		/sys/fs/f2fs/<disk>/cp_foreground_calls
+Date:		September 2019
+Contact:	"Hridya Valsaraju" <hridya@google.com>
+Description:	Number of checkpoint operations performed on demand. Available when
+		CONFIG_F2FS_STAT_FS=y.
+
+What:		/sys/fs/f2fs/<disk>/cp_background_calls
+Date:		September 2019
+Contact:	"Hridya Valsaraju" <hridya@google.com>
+Description:	Number of checkpoint operations performed in the background to
+		free segments. Available when CONFIG_F2FS_STAT_FS=y.
+
+What:		/sys/fs/f2fs/<disk>/gc_foreground_calls
+Date:		September 2019
+Contact:	"Hridya Valsaraju" <hridya@google.com>
+Description:	Number of garbage collection operations performed on demand.
+		Available when CONFIG_F2FS_STAT_FS=y.
+
+What:		/sys/fs/f2fs/<disk>/gc_background_calls
+Date:		September 2019
+Contact:	"Hridya Valsaraju" <hridya@google.com>
+Description:	Number of garbage collection operations triggered in background.
+		Available when CONFIG_F2FS_STAT_FS=y.
+
+What:		/sys/fs/f2fs/<disk>/moved_blocks_foreground
+Date:		September 2019
+Contact:	"Hridya Valsaraju" <hridya@google.com>
+Description:	Number of blocks moved by garbage collection in foreground.
+		Available when CONFIG_F2FS_STAT_FS=y.
+
+What:		/sys/fs/f2fs/<disk>/moved_blocks_background
+Date:		September 2019
+Contact:	"Hridya Valsaraju" <hridya@google.com>
+Description:	Number of blocks moved by garbage collection in background.
+		Available when CONFIG_F2FS_STAT_FS=y.
+
+What:		/sys/fs/f2fs/<disk>/avg_vblocks
+Date:		September 2019
+Contact:	"Hridya Valsaraju" <hridya@google.com>
+Description:	Average number of valid blocks.
+		Available when CONFIG_F2FS_STAT_FS=y.
diff --git a/Documentation/ABI/testing/usb-charger-uevent b/Documentation/ABI/testing/usb-charger-uevent
new file mode 100644
index 0000000..419a92d
--- /dev/null
+++ b/Documentation/ABI/testing/usb-charger-uevent
@@ -0,0 +1,46 @@
+What:		Raise a uevent when a USB charger is inserted or removed
+Date:		2020-01-14
+KernelVersion:	5.6
+Contact:	linux-usb@vger.kernel.org
+Description:	There are two USB charger states:
+		USB_CHARGER_ABSENT
+		USB_CHARGER_PRESENT
+		There are five USB charger types:
+		USB_CHARGER_UNKNOWN_TYPE: Charger type is unknown
+		USB_CHARGER_SDP_TYPE: Standard Downstream Port
+		USB_CHARGER_CDP_TYPE: Charging Downstream Port
+		USB_CHARGER_DCP_TYPE: Dedicated Charging Port
+		USB_CHARGER_ACA_TYPE: Accessory Charging Adapter
+		https://www.usb.org/document-library/battery-charging-v12-spec-and-adopters-agreement
+
+		Here are two examples taken using udevadm monitor -p when
+		USB charger is online:
+		UDEV  change   /devices/soc0/usbphynop1 (platform)
+		ACTION=change
+		DEVPATH=/devices/soc0/usbphynop1
+		DRIVER=usb_phy_generic
+		MODALIAS=of:Nusbphynop1T(null)Cusb-nop-xceiv
+		OF_COMPATIBLE_0=usb-nop-xceiv
+		OF_COMPATIBLE_N=1
+		OF_FULLNAME=/usbphynop1
+		OF_NAME=usbphynop1
+		SEQNUM=2493
+		SUBSYSTEM=platform
+		USB_CHARGER_STATE=USB_CHARGER_PRESENT
+		USB_CHARGER_TYPE=USB_CHARGER_SDP_TYPE
+		USEC_INITIALIZED=227422826
+
+		USB charger is offline:
+		KERNEL change   /devices/soc0/usbphynop1 (platform)
+		ACTION=change
+		DEVPATH=/devices/soc0/usbphynop1
+		DRIVER=usb_phy_generic
+		MODALIAS=of:Nusbphynop1T(null)Cusb-nop-xceiv
+		OF_COMPATIBLE_0=usb-nop-xceiv
+		OF_COMPATIBLE_N=1
+		OF_FULLNAME=/usbphynop1
+		OF_NAME=usbphynop1
+		SEQNUM=2494
+		SUBSYSTEM=platform
+		USB_CHARGER_STATE=USB_CHARGER_ABSENT
+		USB_CHARGER_TYPE=USB_CHARGER_UNKNOWN_TYPE
diff --git a/Documentation/PCI/msi-howto.rst b/Documentation/PCI/msi-howto.rst
index 994cbb6..aa2046a 100644
--- a/Documentation/PCI/msi-howto.rst
+++ b/Documentation/PCI/msi-howto.rst
@@ -283,5 +283,5 @@
 to bridges between the PCI root and the device, MSIs are disabled.
 
 It is also worth checking the device driver to see whether it supports MSIs.
-For example, it may contain calls to pci_irq_alloc_vectors() with the
+For example, it may contain calls to pci_alloc_irq_vectors() with the
 PCI_IRQ_MSI or PCI_IRQ_MSIX flags.
diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst
index 6eccf13..27c77d8 100644
--- a/Documentation/admin-guide/blockdev/zram.rst
+++ b/Documentation/admin-guide/blockdev/zram.rst
@@ -1,15 +1,15 @@
 ========================================
-zram: Compressed RAM based block devices
+zram: Compressed RAM-based block devices
 ========================================
 
 Introduction
 ============
 
-The zram module creates RAM based block devices named /dev/zram<id>
+The zram module creates RAM-based block devices named /dev/zram<id>
 (<id> = 0, 1, ...). Pages written to these disks are compressed and stored
 in memory itself. These disks allow very fast I/O and compression provides
-good amounts of memory savings. Some of the usecases include /tmp storage,
-use as swap disks, various caches under /var and maybe many more :)
+good amounts of memory savings. Some of the use cases include /tmp storage,
+use as swap disks, various caches under /var and maybe many more. :)
 
 Statistics for individual zram devices are exported through sysfs nodes at
 /sys/block/zram<id>/
@@ -43,17 +43,17 @@
 
 ========  =============================================================
 -EBUSY	  an attempt to modify an attribute that cannot be changed once
-	  the device has been initialised. Please reset device first;
+	  the device has been initialised. Please reset device first.
 -ENOMEM	  zram was not able to allocate enough memory to fulfil your
-	  needs;
+	  needs.
 -EINVAL	  invalid input has been provided.
 ========  =============================================================
 
-If you use 'echo', the returned value that is changed by 'echo' utility,
+If you use 'echo', the returned value is set by the 'echo' utility,
 and, in general case, something like::
 
 	echo 3 > /sys/block/zram0/max_comp_streams
-	if [ $? -ne 0 ];
+	if [ $? -ne 0 ]; then
 		handle_error
 	fi
 
@@ -65,7 +65,8 @@
 ::
 
 	modprobe zram num_devices=4
-	This creates 4 devices: /dev/zram{0,1,2,3}
+
+This creates 4 devices: /dev/zram{0,1,2,3}
 
 num_devices parameter is optional and tells zram how many devices should be
 pre-created. Default: 1.
@@ -73,12 +74,12 @@
 2) Set max number of compression streams
 ========================================
 
-Regardless the value passed to this attribute, ZRAM will always
-allocate multiple compression streams - one per online CPUs - thus
+Regardless of the value passed to this attribute, ZRAM will always
+allocate multiple compression streams - one per online CPU - thus
 allowing several concurrent compression operations. The number of
 allocated compression streams goes down when some of the CPUs
 become offline. There is no single-compression-stream mode anymore,
-unless you are running a UP system or has only 1 CPU online.
+unless you are running a UP system or have only 1 CPU online.
 
 To find out how many streams are currently available::
 
@@ -89,7 +90,7 @@
 
 Using comp_algorithm device attribute one can see available and
 currently selected (shown in square brackets) compression algorithms,
-change selected compression algorithm (once the device is initialised
+or change the selected compression algorithm (once the device is initialised
 there is no way to change compression algorithm).
 
 Examples::
@@ -167,9 +168,9 @@
 zram provides a control interface, which enables dynamic (on-demand) device
 addition and removal.
 
-In order to add a new /dev/zramX device, perform read operation on hot_add
-attribute. This will return either new device's device id (meaning that you
-can use /dev/zram<id>) or error code.
+In order to add a new /dev/zramX device, perform a read operation on the hot_add
+attribute. This will return either the new device's device id (meaning that you
+can use /dev/zram<id>) or an error code.
 
 Example::
 
@@ -186,8 +187,8 @@
 
 Per-device statistics are exported as various nodes under /sys/block/zram<id>/
 
-A brief description of exported device attributes. For more details please
-read Documentation/ABI/testing/sysfs-block-zram.
+A brief description of exported device attributes follows. For more details
+please read Documentation/ABI/testing/sysfs-block-zram.
 
 ======================  ======  ===============================================
 Name            	access            description
@@ -245,7 +246,7 @@
 
 File /sys/block/zram<id>/mm_stat
 
-The stat file represents device's mm statistics. It consists of a single
+The mm_stat file represents the device's mm statistics. It consists of a single
 line of text and contains the following stats separated by whitespace:
 
  ================ =============================================================
@@ -261,7 +262,7 @@
                   Unit: bytes
  mem_limit        the maximum amount of memory ZRAM can use to store
                   the compressed data
- mem_used_max     the maximum amount of memory zram have consumed to
+ mem_used_max     the maximum amount of memory zram has consumed to
                   store the data
  same_pages       the number of same element filled pages written to this disk.
                   No memory is allocated for such pages.
@@ -271,7 +272,7 @@
 
 File /sys/block/zram<id>/bd_stat
 
-The stat file represents device's backing device statistics. It consists of
+The bd_stat file represents a device's backing device statistics. It consists of
 a single line of text and contains the following stats separated by whitespace:
 
  ============== =============================================================
@@ -316,9 +317,9 @@
 	echo /dev/sda5 > /sys/block/zramX/backing_dev
 
 before disksize setting. It supports only partition at this moment.
-If admin want to use incompressible page writeback, they could do via::
+If admin wants to use incompressible page writeback, they could do via::
 
-	echo huge > /sys/block/zramX/write
+	echo huge > /sys/block/zramX/writeback
 
 To use idle page writeback, first, user need to declare zram pages
 as idle::
@@ -326,7 +327,7 @@
 	echo all > /sys/block/zramX/idle
 
 From now on, any pages on zram are idle pages. The idle mark
-will be removed until someone request access of the block.
+will be removed until someone requests access of the block.
 IOW, unless there is access request, those pages are still idle pages.
 
 Admin can request writeback of those idle pages at right timing via::
@@ -341,16 +342,16 @@
 
 To overcome the concern, zram supports "writeback_limit" feature.
 The "writeback_limit_enable"'s default value is 0 so that it doesn't limit
-any writeback. IOW, if admin want to apply writeback budget, he should
+any writeback. IOW, if admin wants to apply writeback budget, he should
 enable writeback_limit_enable via::
 
 	$ echo 1 > /sys/block/zramX/writeback_limit_enable
 
 Once writeback_limit_enable is set, zram doesn't allow any writeback
-until admin set the budget via /sys/block/zramX/writeback_limit.
+until admin sets the budget via /sys/block/zramX/writeback_limit.
 
 (If admin doesn't enable writeback_limit_enable, writeback_limit's value
-assigned via /sys/block/zramX/writeback_limit is meaninless.)
+assigned via /sys/block/zramX/writeback_limit is meaningless.)
 
 If admin want to limit writeback as per-day 400M, he could do it
 like below::
@@ -361,13 +362,13 @@
 		/sys/block/zram0/writeback_limit.
 	$ echo 1 > /sys/block/zram0/writeback_limit_enable
 
-If admin want to allow further write again once the bugdet is exausted,
+If admins want to allow further write again once the bugdet is exhausted,
 he could do it like below::
 
 	$ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
 		/sys/block/zram0/writeback_limit
 
-If admin want to see remaining writeback budget since he set::
+If admin wants to see remaining writeback budget since last set::
 
 	$ cat /sys/block/zramX/writeback_limit
 
@@ -375,12 +376,12 @@
 
 	$ echo 0 > /sys/block/zramX/writeback_limit_enable
 
-The writeback_limit count will reset whenever you reset zram(e.g.,
+The writeback_limit count will reset whenever you reset zram (e.g.,
 system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of
 writeback happened until you reset the zram to allocate extra writeback
 budget in next setting is user's job.
 
-If admin want to measure writeback count in a certain period, he could
+If admin wants to measure writeback count in a certain period, he could
 know it via /sys/block/zram0/bd_stat's 3rd column.
 
 memory tracking
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
new file mode 100644
index 0000000..b342a67
--- /dev/null
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -0,0 +1,190 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _bootconfig:
+
+==================
+Boot Configuration
+==================
+
+:Author: Masami Hiramatsu <mhiramat@kernel.org>
+
+Overview
+========
+
+The boot configuration expands the current kernel command line to support
+additional key-value data when booting the kernel in an efficient way.
+This allows administrators to pass a structured-Key config file.
+
+Config File Syntax
+==================
+
+The boot config syntax is a simple structured key-value. Each key consists
+of dot-connected-words, and key and value are connected by ``=``. The value
+has to be terminated by semi-colon (``;``) or newline (``\n``).
+For array value, array entries are separated by comma (``,``). ::
+
+KEY[.WORD[...]] = VALUE[, VALUE2[...]][;]
+
+Unlike the kernel command line syntax, spaces are OK around the comma and ``=``.
+
+Each key word must contain only alphabets, numbers, dash (``-``) or underscore
+(``_``). And each value only contains printable characters or spaces except
+for delimiters such as semi-colon (``;``), new-line (``\n``), comma (``,``),
+hash (``#``) and closing brace (``}``).
+
+If you want to use those delimiters in a value, you can use either double-
+quotes (``"VALUE"``) or single-quotes (``'VALUE'``) to quote it. Note that
+you can not escape these quotes.
+
+There can be a key which doesn't have value or has an empty value. Those keys
+are used for checking if the key exists or not (like a boolean).
+
+Key-Value Syntax
+----------------
+
+The boot config file syntax allows user to merge partially same word keys
+by brace. For example::
+
+ foo.bar.baz = value1
+ foo.bar.qux.quux = value2
+
+These can be written also in::
+
+ foo.bar {
+    baz = value1
+    qux.quux = value2
+ }
+
+Or more shorter, written as following::
+
+ foo.bar { baz = value1; qux.quux = value2 }
+
+In both styles, same key words are automatically merged when parsing it
+at boot time. So you can append similar trees or key-values.
+
+Comments
+--------
+
+The config syntax accepts shell-script style comments. The comments starting
+with hash ("#") until newline ("\n") will be ignored.
+
+::
+
+ # comment line
+ foo = value # value is set to foo.
+ bar = 1, # 1st element
+       2, # 2nd element
+       3  # 3rd element
+
+This is parsed as below::
+
+ foo = value
+ bar = 1, 2, 3
+
+Note that you can not put a comment between value and delimiter(``,`` or
+``;``). This means following config has a syntax error ::
+
+ key = 1 # comment
+       ,2
+
+
+/proc/bootconfig
+================
+
+/proc/bootconfig is a user-space interface of the boot config.
+Unlike /proc/cmdline, this file shows the key-value style list.
+Each key-value pair is shown in each line with following style::
+
+ KEY[.WORDS...] = "[VALUE]"[,"VALUE2"...]
+
+
+Boot Kernel With a Boot Config
+==============================
+
+Since the boot configuration file is loaded with initrd, it will be added
+to the end of the initrd (initramfs) image file. The Linux kernel decodes
+the last part of the initrd image in memory to get the boot configuration
+data.
+Because of this "piggyback" method, there is no need to change or
+update the boot loader and the kernel image itself.
+
+To do this operation, Linux kernel provides "bootconfig" command under
+tools/bootconfig, which allows admin to apply or delete the config file
+to/from initrd image. You can build it by the following command::
+
+ # make -C tools/bootconfig
+
+To add your boot config file to initrd image, run bootconfig as below
+(Old data is removed automatically if exists)::
+
+ # tools/bootconfig/bootconfig -a your-config /boot/initrd.img-X.Y.Z
+
+To remove the config from the image, you can use -d option as below::
+
+ # tools/bootconfig/bootconfig -d /boot/initrd.img-X.Y.Z
+
+Then add "bootconfig" on the normal kernel command line to tell the
+kernel to look for the bootconfig at the end of the initrd file.
+
+Config File Limitation
+======================
+
+Currently the maximum config size size is 32KB and the total key-words (not
+key-value entries) must be under 1024 nodes.
+Note: this is not the number of entries but nodes, an entry must consume
+more than 2 nodes (a key-word and a value). So theoretically, it will be
+up to 512 key-value pairs. If keys contains 3 words in average, it can
+contain 256 key-value pairs. In most cases, the number of config items
+will be under 100 entries and smaller than 8KB, so it would be enough.
+If the node number exceeds 1024, parser returns an error even if the file
+size is smaller than 32KB.
+Anyway, since bootconfig command verifies it when appending a boot config
+to initrd image, user can notice it before boot.
+
+
+Bootconfig APIs
+===============
+
+User can query or loop on key-value pairs, also it is possible to find
+a root (prefix) key node and find key-values under that node.
+
+If you have a key string, you can query the value directly with the key
+using xbc_find_value(). If you want to know what keys exist in the boot
+config, you can use xbc_for_each_key_value() to iterate key-value pairs.
+Note that you need to use xbc_array_for_each_value() for accessing
+each array's value, e.g.::
+
+ vnode = NULL;
+ xbc_find_value("key.word", &vnode);
+ if (vnode && xbc_node_is_array(vnode))
+    xbc_array_for_each_value(vnode, value) {
+      printk("%s ", value);
+    }
+
+If you want to focus on keys which have a prefix string, you can use
+xbc_find_node() to find a node by the prefix string, and iterate
+keys under the prefix node with xbc_node_for_each_key_value().
+
+But the most typical usage is to get the named value under prefix
+or get the named array under prefix as below::
+
+ root = xbc_find_node("key.prefix");
+ value = xbc_node_find_value(root, "option", &vnode);
+ ...
+ xbc_node_for_each_array_value(root, "array-option", value, anode) {
+    ...
+ }
+
+This accesses a value of "key.prefix.option" and an array of
+"key.prefix.array-option".
+
+Locking is not needed, since after initialization, the config becomes
+read-only. All data and keys must be copied if you need to modify it.
+
+
+Functions and structures
+========================
+
+.. kernel-doc:: include/linux/bootconfig.h
+.. kernel-doc:: lib/bootconfig.c
+
diff --git a/Documentation/admin-guide/device-mapper/dm-raid.rst b/Documentation/admin-guide/device-mapper/dm-raid.rst
index f634467..695a2ea 100644
--- a/Documentation/admin-guide/device-mapper/dm-raid.rst
+++ b/Documentation/admin-guide/device-mapper/dm-raid.rst
@@ -419,3 +419,5 @@
 	rebuild errors.
  1.15.0 Fix size extensions not being synchronized in case of new MD bitmap
         pages allocated;  also fix those not occuring after previous reductions
+ 1.15.1 Fix argument count and arguments for rebuild/write_mostly/journal_(dev|mode)
+        on the status line.
diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst
index 9bc93f0..9443fce 100644
--- a/Documentation/admin-guide/ext4.rst
+++ b/Documentation/admin-guide/ext4.rst
@@ -92,6 +92,8 @@
 * efficient new ordered mode in JBD2 and ext4 (avoid using buffer head to force
   the ordering)
 * Case-insensitive file name lookups
+* file-based encryption support (fscrypt)
+* file-based verity support (fsverity)
 
 [1] Filesystems with a block size of 1k may see a limit imposed by the
 directory hash tree having a maximum depth of two.
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 4405b74..f1d0ccf 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -64,6 +64,7 @@
    binderfs
    binfmt-misc
    blockdev/index
+   bootconfig
    braille-console
    btmrvl
    cgroup-v1/index
@@ -76,6 +77,7 @@
    device-mapper/index
    efi-stub
    ext4
+   nfs/index
    gpio/index
    highuid
    hw_random
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index ec92120..dbc22d6 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -437,6 +437,12 @@
 			no delay (0).
 			Format: integer
 
+	bootconfig	[KNL]
+			Extended command line options can be added to an initrd
+			and this will cause the kernel to look for it.
+
+			See Documentation/admin-guide/bootconfig.rst
+
 	bert_disable	[ACPI]
 			Disable BERT OS support on buggy BIOSes.
 
@@ -834,6 +840,18 @@
 			dump out devices still on the deferred probe list after
 			retrying.
 
+	dfltcc=		[HW,S390]
+			Format: { on | off | def_only | inf_only | always }
+			on:       s390 zlib hardware support for compression on
+			          level 1 and decompression (default)
+			off:      No s390 zlib hardware support
+			def_only: s390 zlib hardware support for deflate
+			          only (compression on level 1)
+			inf_only: s390 zlib hardware support for inflate
+			          only (decompression)
+			always:   Same as 'on' but ignores the selected compression
+			          level always using hardware support (used for debugging)
+
 	dhash_entries=	[KNL]
 			Set number of hash buckets for dentry cache.
 
diff --git a/Documentation/admin-guide/nfs/fault_injection.rst b/Documentation/admin-guide/nfs/fault_injection.rst
new file mode 100644
index 0000000..eb029c0
--- /dev/null
+++ b/Documentation/admin-guide/nfs/fault_injection.rst
@@ -0,0 +1,70 @@
+===================
+NFS Fault Injection
+===================
+
+Fault injection is a method for forcing errors that may not normally occur, or
+may be difficult to reproduce.  Forcing these errors in a controlled environment
+can help the developer find and fix bugs before their code is shipped in a
+production system.  Injecting an error on the Linux NFS server will allow us to
+observe how the client reacts and if it manages to recover its state correctly.
+
+NFSD_FAULT_INJECTION must be selected when configuring the kernel to use this
+feature.
+
+
+Using Fault Injection
+=====================
+On the client, mount the fault injection server through NFS v4.0+ and do some
+work over NFS (open files, take locks, ...).
+
+On the server, mount the debugfs filesystem to <debug_dir> and ls
+<debug_dir>/nfsd.  This will show a list of files that will be used for
+injecting faults on the NFS server.  As root, write a number n to the file
+corresponding to the action you want the server to take.  The server will then
+process the first n items it finds.  So if you want to forget 5 locks, echo '5'
+to <debug_dir>/nfsd/forget_locks.  A value of 0 will tell the server to forget
+all corresponding items.  A log message will be created containing the number
+of items forgotten (check dmesg).
+
+Go back to work on the client and check if the client recovered from the error
+correctly.
+
+
+Available Faults
+================
+forget_clients:
+     The NFS server keeps a list of clients that have placed a mount call.  If
+     this list is cleared, the server will have no knowledge of who the client
+     is, forcing the client to reauthenticate with the server.
+
+forget_openowners:
+     The NFS server keeps a list of what files are currently opened and who
+     they were opened by.  Clearing this list will force the client to reopen
+     its files.
+
+forget_locks:
+     The NFS server keeps a list of what files are currently locked in the VFS.
+     Clearing this list will force the client to reclaim its locks (files are
+     unlocked through the VFS as they are cleared from this list).
+
+forget_delegations:
+     A delegation is used to assure the client that a file, or part of a file,
+     has not changed since the delegation was awarded.  Clearing this list will
+     force the client to reacquire its delegation before accessing the file
+     again.
+
+recall_delegations:
+     Delegations can be recalled by the server when another client attempts to
+     access a file.  This test will notify the client that its delegation has
+     been revoked, forcing the client to reacquire the delegation before using
+     the file again.
+
+
+tools/nfs/inject_faults.sh script
+=================================
+This script has been created to ease the fault injection process.  This script
+will detect the mounted debugfs directory and write to the files located there
+based on the arguments passed by the user.  For example, running
+`inject_faults.sh forget_locks 1` as root will instruct the server to forget
+one lock.  Running `inject_faults forget_locks` will instruct the server to
+forgetall locks.
diff --git a/Documentation/admin-guide/nfs/index.rst b/Documentation/admin-guide/nfs/index.rst
new file mode 100644
index 0000000..6b5a3c9
--- /dev/null
+++ b/Documentation/admin-guide/nfs/index.rst
@@ -0,0 +1,15 @@
+=============
+NFS
+=============
+
+.. toctree::
+    :maxdepth: 1
+
+    nfs-client
+    nfsroot
+    nfs-rdma
+    nfsd-admin-interfaces
+    nfs-idmapper
+    pnfs-block-server
+    pnfs-scsi-server
+    fault_injection
diff --git a/Documentation/admin-guide/nfs/nfs-client.rst b/Documentation/admin-guide/nfs/nfs-client.rst
new file mode 100644
index 0000000..c4b777c
--- /dev/null
+++ b/Documentation/admin-guide/nfs/nfs-client.rst
@@ -0,0 +1,141 @@
+==========
+NFS Client
+==========
+
+The NFS client
+==============
+
+The NFS version 2 protocol was first documented in RFC1094 (March 1989).
+Since then two more major releases of NFS have been published, with NFSv3
+being documented in RFC1813 (June 1995), and NFSv4 in RFC3530 (April
+2003).
+
+The Linux NFS client currently supports all the above published versions,
+and work is in progress on adding support for minor version 1 of the NFSv4
+protocol.
+
+The purpose of this document is to provide information on some of the
+special features of the NFS client that can be configured by system
+administrators.
+
+
+The nfs4_unique_id parameter
+============================
+
+NFSv4 requires clients to identify themselves to servers with a unique
+string.  File open and lock state shared between one client and one server
+is associated with this identity.  To support robust NFSv4 state recovery
+and transparent state migration, this identity string must not change
+across client reboots.
+
+Without any other intervention, the Linux client uses a string that contains
+the local system's node name.  System administrators, however, often do not
+take care to ensure that node names are fully qualified and do not change
+over the lifetime of a client system.  Node names can have other
+administrative requirements that require particular behavior that does not
+work well as part of an nfs_client_id4 string.
+
+The nfs.nfs4_unique_id boot parameter specifies a unique string that can be
+used instead of a system's node name when an NFS client identifies itself to
+a server.  Thus, if the system's node name is not unique, or it changes, its
+nfs.nfs4_unique_id stays the same, preventing collision with other clients
+or loss of state during NFS reboot recovery or transparent state migration.
+
+The nfs.nfs4_unique_id string is typically a UUID, though it can contain
+anything that is believed to be unique across all NFS clients.  An
+nfs4_unique_id string should be chosen when a client system is installed,
+just as a system's root file system gets a fresh UUID in its label at
+install time.
+
+The string should remain fixed for the lifetime of the client.  It can be
+changed safely if care is taken that the client shuts down cleanly and all
+outstanding NFSv4 state has expired, to prevent loss of NFSv4 state.
+
+This string can be stored in an NFS client's grub.conf, or it can be provided
+via a net boot facility such as PXE.  It may also be specified as an nfs.ko
+module parameter.  Specifying a uniquifier string is not support for NFS
+clients running in containers.
+
+
+The DNS resolver
+================
+
+NFSv4 allows for one server to refer the NFS client to data that has been
+migrated onto another server by means of the special "fs_locations"
+attribute. See `RFC3530 Section 6: Filesystem Migration and Replication`_ and
+`Implementation Guide for Referrals in NFSv4`_.
+
+.. _RFC3530 Section 6\: Filesystem Migration and Replication: http://tools.ietf.org/html/rfc3530#section-6
+.. _Implementation Guide for Referrals in NFSv4: http://tools.ietf.org/html/draft-ietf-nfsv4-referrals-00
+
+The fs_locations information can take the form of either an ip address and
+a path, or a DNS hostname and a path. The latter requires the NFS client to
+do a DNS lookup in order to mount the new volume, and hence the need for an
+upcall to allow userland to provide this service.
+
+Assuming that the user has the 'rpc_pipefs' filesystem mounted in the usual
+/var/lib/nfs/rpc_pipefs, the upcall consists of the following steps:
+
+   (1) The process checks the dns_resolve cache to see if it contains a
+       valid entry. If so, it returns that entry and exits.
+
+   (2) If no valid entry exists, the helper script '/sbin/nfs_cache_getent'
+       (may be changed using the 'nfs.cache_getent' kernel boot parameter)
+       is run, with two arguments:
+       - the cache name, "dns_resolve"
+       - the hostname to resolve
+
+   (3) After looking up the corresponding ip address, the helper script
+       writes the result into the rpc_pipefs pseudo-file
+       '/var/lib/nfs/rpc_pipefs/cache/dns_resolve/channel'
+       in the following (text) format:
+
+		"<ip address> <hostname> <ttl>\n"
+
+       Where <ip address> is in the usual IPv4 (123.456.78.90) or IPv6
+       (ffee:ddcc:bbaa:9988:7766:5544:3322:1100, ffee::1100, ...) format.
+       <hostname> is identical to the second argument of the helper
+       script, and <ttl> is the 'time to live' of this cache entry (in
+       units of seconds).
+
+       .. note::
+            If <ip address> is invalid, say the string "0", then a negative
+            entry is created, which will cause the kernel to treat the hostname
+            as having no valid DNS translation.
+
+
+
+
+A basic sample /sbin/nfs_cache_getent
+=====================================
+.. code-block:: sh
+
+    #!/bin/bash
+    #
+    ttl=600
+    #
+    cut=/usr/bin/cut
+    getent=/usr/bin/getent
+    rpc_pipefs=/var/lib/nfs/rpc_pipefs
+    #
+    die()
+    {
+        echo "Usage: $0 cache_name entry_name"
+        exit 1
+    }
+
+    [ $# -lt 2 ] && die
+    cachename="$1"
+    cache_path=${rpc_pipefs}/cache/${cachename}/channel
+
+    case "${cachename}" in
+        dns_resolve)
+            name="$2"
+            result="$(${getent} hosts ${name} | ${cut} -f1 -d\ )"
+            [ -z "${result}" ] && result="0"
+            ;;
+        *)
+            die
+            ;;
+    esac
+    echo "${result} ${name} ${ttl}" >${cache_path}
diff --git a/Documentation/admin-guide/nfs/nfs-idmapper.rst b/Documentation/admin-guide/nfs/nfs-idmapper.rst
new file mode 100644
index 0000000..58b8e63
--- /dev/null
+++ b/Documentation/admin-guide/nfs/nfs-idmapper.rst
@@ -0,0 +1,78 @@
+=============
+NFS ID Mapper
+=============
+
+Id mapper is used by NFS to translate user and group ids into names, and to
+translate user and group names into ids.  Part of this translation involves
+performing an upcall to userspace to request the information.  There are two
+ways NFS could obtain this information: placing a call to /sbin/request-key
+or by placing a call to the rpc.idmap daemon.
+
+NFS will attempt to call /sbin/request-key first.  If this succeeds, the
+result will be cached using the generic request-key cache.  This call should
+only fail if /etc/request-key.conf is not configured for the id_resolver key
+type, see the "Configuring" section below if you wish to use the request-key
+method.
+
+If the call to /sbin/request-key fails (if /etc/request-key.conf is not
+configured with the id_resolver key type), then the idmapper will ask the
+legacy rpc.idmap daemon for the id mapping.  This result will be stored
+in a custom NFS idmap cache.
+
+
+Configuring
+===========
+
+The file /etc/request-key.conf will need to be modified so /sbin/request-key can
+direct the upcall.  The following line should be added:
+
+``#OP	TYPE	DESCRIPTION	CALLOUT INFO	PROGRAM ARG1 ARG2 ARG3 ...``
+``#======	=======	===============	===============	===============================``
+``create	id_resolver	*	*		/usr/sbin/nfs.idmap %k %d 600``
+
+
+This will direct all id_resolver requests to the program /usr/sbin/nfs.idmap.
+The last parameter, 600, defines how many seconds into the future the key will
+expire.  This parameter is optional for /usr/sbin/nfs.idmap.  When the timeout
+is not specified, nfs.idmap will default to 600 seconds.
+
+id mapper uses for key descriptions::
+
+	  uid:  Find the UID for the given user
+	  gid:  Find the GID for the given group
+	 user:  Find the user  name for the given UID
+	group:  Find the group name for the given GID
+
+You can handle any of these individually, rather than using the generic upcall
+program.  If you would like to use your own program for a uid lookup then you
+would edit your request-key.conf so it look similar to this:
+
+``#OP	TYPE	DESCRIPTION	CALLOUT INFO	PROGRAM ARG1 ARG2 ARG3 ...``
+``#======	=======	===============	===============	===============================``
+``create	id_resolver	uid:*	*		/some/other/program %k %d 600``
+``create	id_resolver	*	*		/usr/sbin/nfs.idmap %k %d 600``
+
+
+Notice that the new line was added above the line for the generic program.
+request-key will find the first matching line and corresponding program.  In
+this case, /some/other/program will handle all uid lookups and
+/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
+
+See Documentation/security/keys/request-key.rst for more information
+about the request-key function.
+
+
+nfs.idmap
+=========
+
+nfs.idmap is designed to be called by request-key, and should not be run "by
+hand".  This program takes two arguments, a serialized key and a key
+description.  The serialized key is first converted into a key_serial_t, and
+then passed as an argument to keyctl_instantiate (both are part of keyutils.h).
+
+The actual lookups are performed by functions found in nfsidmap.h.  nfs.idmap
+determines the correct function to call by looking at the first part of the
+description string.  For example, a uid lookup description will appear as
+"uid:user@domain".
+
+nfs.idmap will return 0 if the key was instantiated, and non-zero otherwise.
diff --git a/Documentation/admin-guide/nfs/nfs-rdma.rst b/Documentation/admin-guide/nfs/nfs-rdma.rst
new file mode 100644
index 0000000..ef0f367
--- /dev/null
+++ b/Documentation/admin-guide/nfs/nfs-rdma.rst
@@ -0,0 +1,292 @@
+===================
+Setting up NFS/RDMA
+===================
+
+:Author:
+  NetApp and Open Grid Computing (May 29, 2008)
+
+.. warning::
+  This document is probably obsolete.
+
+Overview
+========
+
+This document describes how to install and setup the Linux NFS/RDMA client
+and server software.
+
+The NFS/RDMA client was first included in Linux 2.6.24. The NFS/RDMA server
+was first included in the following release, Linux 2.6.25.
+
+In our testing, we have obtained excellent performance results (full 10Gbit
+wire bandwidth at minimal client CPU) under many workloads. The code passes
+the full Connectathon test suite and operates over both Infiniband and iWARP
+RDMA adapters.
+
+Getting Help
+============
+
+If you get stuck, you can ask questions on the
+nfs-rdma-devel@lists.sourceforge.net mailing list.
+
+Installation
+============
+
+These instructions are a step by step guide to building a machine for
+use with NFS/RDMA.
+
+- Install an RDMA device
+
+  Any device supported by the drivers in drivers/infiniband/hw is acceptable.
+
+  Testing has been performed using several Mellanox-based IB cards, the
+  Ammasso AMS1100 iWARP adapter, and the Chelsio cxgb3 iWARP adapter.
+
+- Install a Linux distribution and tools
+
+  The first kernel release to contain both the NFS/RDMA client and server was
+  Linux 2.6.25  Therefore, a distribution compatible with this and subsequent
+  Linux kernel release should be installed.
+
+  The procedures described in this document have been tested with
+  distributions from Red Hat's Fedora Project (http://fedora.redhat.com/).
+
+- Install nfs-utils-1.1.2 or greater on the client
+
+  An NFS/RDMA mount point can be obtained by using the mount.nfs command in
+  nfs-utils-1.1.2 or greater (nfs-utils-1.1.1 was the first nfs-utils
+  version with support for NFS/RDMA mounts, but for various reasons we
+  recommend using nfs-utils-1.1.2 or greater). To see which version of
+  mount.nfs you are using, type:
+
+  .. code-block:: sh
+
+    $ /sbin/mount.nfs -V
+
+  If the version is less than 1.1.2 or the command does not exist,
+  you should install the latest version of nfs-utils.
+
+  Download the latest package from: http://www.kernel.org/pub/linux/utils/nfs
+
+  Uncompress the package and follow the installation instructions.
+
+  If you will not need the idmapper and gssd executables (you do not need
+  these to create an NFS/RDMA enabled mount command), the installation
+  process can be simplified by disabling these features when running
+  configure:
+
+  .. code-block:: sh
+
+    $ ./configure --disable-gss --disable-nfsv4
+
+  To build nfs-utils you will need the tcp_wrappers package installed. For
+  more information on this see the package's README and INSTALL files.
+
+  After building the nfs-utils package, there will be a mount.nfs binary in
+  the utils/mount directory. This binary can be used to initiate NFS v2, v3,
+  or v4 mounts. To initiate a v4 mount, the binary must be called
+  mount.nfs4.  The standard technique is to create a symlink called
+  mount.nfs4 to mount.nfs.
+
+  This mount.nfs binary should be installed at /sbin/mount.nfs as follows:
+
+  .. code-block:: sh
+
+    $ sudo cp utils/mount/mount.nfs /sbin/mount.nfs
+
+  In this location, mount.nfs will be invoked automatically for NFS mounts
+  by the system mount command.
+
+    .. note::
+      mount.nfs and therefore nfs-utils-1.1.2 or greater is only needed
+      on the NFS client machine. You do not need this specific version of
+      nfs-utils on the server. Furthermore, only the mount.nfs command from
+      nfs-utils-1.1.2 is needed on the client.
+
+- Install a Linux kernel with NFS/RDMA
+
+  The NFS/RDMA client and server are both included in the mainline Linux
+  kernel version 2.6.25 and later. This and other versions of the Linux
+  kernel can be found at: https://www.kernel.org/pub/linux/kernel/
+
+  Download the sources and place them in an appropriate location.
+
+- Configure the RDMA stack
+
+  Make sure your kernel configuration has RDMA support enabled. Under
+  Device Drivers -> InfiniBand support, update the kernel configuration
+  to enable InfiniBand support [NOTE: the option name is misleading. Enabling
+  InfiniBand support is required for all RDMA devices (IB, iWARP, etc.)].
+
+  Enable the appropriate IB HCA support (mlx4, mthca, ehca, ipath, etc.) or
+  iWARP adapter support (amso, cxgb3, etc.).
+
+  If you are using InfiniBand, be sure to enable IP-over-InfiniBand support.
+
+- Configure the NFS client and server
+
+  Your kernel configuration must also have NFS file system support and/or
+  NFS server support enabled. These and other NFS related configuration
+  options can be found under File Systems -> Network File Systems.
+
+- Build, install, reboot
+
+  The NFS/RDMA code will be enabled automatically if NFS and RDMA
+  are turned on. The NFS/RDMA client and server are configured via the hidden
+  SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The
+  value of SUNRPC_XPRT_RDMA will be:
+
+    #. N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client
+       and server will not be built
+
+    #. M if both SUNRPC and INFINIBAND are on (M or Y) and at least one is M,
+       in this case the NFS/RDMA client and server will be built as modules
+
+    #. Y if both SUNRPC and INFINIBAND are Y, in this case the NFS/RDMA client
+       and server will be built into the kernel
+
+  Therefore, if you have followed the steps above and turned no NFS and RDMA,
+  the NFS/RDMA client and server will be built.
+
+  Build a new kernel, install it, boot it.
+
+Check RDMA and NFS Setup
+========================
+
+Before configuring the NFS/RDMA software, it is a good idea to test
+your new kernel to ensure that the kernel is working correctly.
+In particular, it is a good idea to verify that the RDMA stack
+is functioning as expected and standard NFS over TCP/IP and/or UDP/IP
+is working properly.
+
+- Check RDMA Setup
+
+  If you built the RDMA components as modules, load them at
+  this time. For example, if you are using a Mellanox Tavor/Sinai/Arbel
+  card:
+
+  .. code-block:: sh
+
+    $ modprobe ib_mthca
+    $ modprobe ib_ipoib
+
+  If you are using InfiniBand, make sure there is a Subnet Manager (SM)
+  running on the network. If your IB switch has an embedded SM, you can
+  use it. Otherwise, you will need to run an SM, such as OpenSM, on one
+  of your end nodes.
+
+  If an SM is running on your network, you should see the following:
+
+  .. code-block:: sh
+
+    $ cat /sys/class/infiniband/driverX/ports/1/state
+    4: ACTIVE
+
+  where driverX is mthca0, ipath5, ehca3, etc.
+
+  To further test the InfiniBand software stack, use IPoIB (this
+  assumes you have two IB hosts named host1 and host2):
+
+  .. code-block:: sh
+
+    host1$ ip link set dev ib0 up
+    host1$ ip address add dev ib0 a.b.c.x
+    host2$ ip link set dev ib0 up
+    host2$ ip address add dev ib0 a.b.c.y
+    host1$ ping a.b.c.y
+    host2$ ping a.b.c.x
+
+  For other device types, follow the appropriate procedures.
+
+- Check NFS Setup
+
+  For the NFS components enabled above (client and/or server),
+  test their functionality over standard Ethernet using TCP/IP or UDP/IP.
+
+NFS/RDMA Setup
+==============
+
+We recommend that you use two machines, one to act as the client and
+one to act as the server.
+
+One time configuration:
+-----------------------
+
+- On the server system, configure the /etc/exports file and start the NFS/RDMA server.
+
+  Exports entries with the following formats have been tested::
+
+  /vol0   192.168.0.47(fsid=0,rw,async,insecure,no_root_squash)
+  /vol0   192.168.0.0/255.255.255.0(fsid=0,rw,async,insecure,no_root_squash)
+
+  The IP address(es) is(are) the client's IPoIB address for an InfiniBand
+  HCA or the client's iWARP address(es) for an RNIC.
+
+  .. note::
+    The "insecure" option must be used because the NFS/RDMA client does
+    not use a reserved port.
+
+Each time a machine boots:
+--------------------------
+
+- Load and configure the RDMA drivers
+
+  For InfiniBand using a Mellanox adapter:
+
+  .. code-block:: sh
+
+    $ modprobe ib_mthca
+    $ modprobe ib_ipoib
+    $ ip li set dev ib0 up
+    $ ip addr add dev ib0 a.b.c.d
+
+  .. note::
+    Please use unique addresses for the client and server!
+
+- Start the NFS server
+
+  If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
+  kernel config), load the RDMA transport module:
+
+  .. code-block:: sh
+
+    $ modprobe svcrdma
+
+  Regardless of how the server was built (module or built-in), start the
+  server:
+
+  .. code-block:: sh
+
+    $ /etc/init.d/nfs start
+
+  or
+
+  .. code-block:: sh
+
+    $ service nfs start
+
+  Instruct the server to listen on the RDMA transport:
+
+  .. code-block:: sh
+
+    $ echo rdma 20049 > /proc/fs/nfsd/portlist
+
+- On the client system
+
+  If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
+  kernel config), load the RDMA client module:
+
+  .. code-block:: sh
+
+    $ modprobe xprtrdma.ko
+
+  Regardless of how the client was built (module or built-in), use this
+  command to mount the NFS/RDMA server:
+
+  .. code-block:: sh
+
+    $ mount -o rdma,port=20049 <IPoIB-server-name-or-address>:/<export> /mnt
+
+  To verify that the mount is using RDMA, run "cat /proc/mounts" and check
+  the "proto" field for the given mount.
+
+  Congratulations! You're using NFS/RDMA!
diff --git a/Documentation/admin-guide/nfs/nfsd-admin-interfaces.rst b/Documentation/admin-guide/nfs/nfsd-admin-interfaces.rst
new file mode 100644
index 0000000..c05926f
--- /dev/null
+++ b/Documentation/admin-guide/nfs/nfsd-admin-interfaces.rst
@@ -0,0 +1,40 @@
+==================================
+Administrative interfaces for nfsd
+==================================
+
+Note that normally these interfaces are used only by the utilities in
+nfs-utils.
+
+nfsd is controlled mainly by pseudofiles under the "nfsd" filesystem,
+which is normally mounted at /proc/fs/nfsd/.
+
+The server is always started by the first write of a nonzero value to
+nfsd/threads.
+
+Before doing that, NFSD can be told which sockets to listen on by
+writing to nfsd/portlist; that write may be:
+
+	-  an ascii-encoded file descriptor, which should refer to a
+	   bound (and listening, for tcp) socket, or
+	-  "transportname port", where transportname is currently either
+	   "udp", "tcp", or "rdma".
+
+If nfsd is started without doing any of these, then it will create one
+udp and one tcp listener at port 2049 (see nfsd_init_socks).
+
+On startup, nfsd and lockd grace periods start. nfsd is shut down by a write of
+0 to nfsd/threads.  All locks and state are thrown away at that point.
+
+Between startup and shutdown, the number of threads may be adjusted up
+or down by additional writes to nfsd/threads or by writes to
+nfsd/pool_threads.
+
+For more detail about files under nfsd/ and what they control, see
+fs/nfsd/nfsctl.c; most of them have detailed comments.
+
+Implementation notes
+====================
+
+Note that the rpc server requires the caller to serialize addition and
+removal of listening sockets, and startup and shutdown of the server.
+For nfsd this is done using nfsd_mutex.
diff --git a/Documentation/admin-guide/nfs/nfsroot.rst b/Documentation/admin-guide/nfs/nfsroot.rst
new file mode 100644
index 0000000..82a4fda
--- /dev/null
+++ b/Documentation/admin-guide/nfs/nfsroot.rst
@@ -0,0 +1,364 @@
+===============================================
+Mounting the root filesystem via NFS (nfsroot)
+===============================================
+
+:Authors:
+	Written 1996 by Gero Kuhlmann <gero@gkminix.han.de>
+
+	Updated 1997 by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+
+	Updated 2006 by Nico Schottelius <nico-kernel-nfsroot@schottelius.org>
+
+	Updated 2006 by Horms <horms@verge.net.au>
+
+	Updated 2018 by Chris Novakovic <chris@chrisn.me.uk>
+
+
+
+In order to use a diskless system, such as an X-terminal or printer server for
+example, it is necessary for the root filesystem to be present on a non-disk
+device. This may be an initramfs (see
+Documentation/filesystems/ramfs-rootfs-initramfs.txt), a ramdisk (see
+Documentation/admin-guide/initrd.rst) or a filesystem mounted via NFS. The
+following text describes on how to use NFS for the root filesystem. For the rest
+of this text 'client' means the diskless system, and 'server' means the NFS
+server.
+
+
+
+
+Enabling nfsroot capabilities
+=============================
+
+In order to use nfsroot, NFS client support needs to be selected as
+built-in during configuration. Once this has been selected, the nfsroot
+option will become available, which should also be selected.
+
+In the networking options, kernel level autoconfiguration can be selected,
+along with the types of autoconfiguration to support. Selecting all of
+DHCP, BOOTP and RARP is safe.
+
+
+
+
+Kernel command line
+===================
+
+When the kernel has been loaded by a boot loader (see below) it needs to be
+told what root fs device to use. And in the case of nfsroot, where to find
+both the server and the name of the directory on the server to mount as root.
+This can be established using the following kernel command line parameters:
+
+
+root=/dev/nfs
+  This is necessary to enable the pseudo-NFS-device. Note that it's not a
+  real device but just a synonym to tell the kernel to use NFS instead of
+  a real device.
+
+
+nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>]
+  If the `nfsroot' parameter is NOT given on the command line,
+  the default ``"/tftpboot/%s"`` will be used.
+
+  <server-ip>	Specifies the IP address of the NFS server.
+		The default address is determined by the ip parameter
+		(see below). This parameter allows the use of different
+		servers for IP autoconfiguration and NFS.
+
+  <root-dir>	Name of the directory on the server to mount as root.
+		If there is a "%s" token in the string, it will be
+		replaced by the ASCII-representation of the client's
+		IP address.
+
+  <nfs-options>	Standard NFS options. All options are separated by commas.
+		The following defaults are used::
+
+			port		= as given by server portmap daemon
+			rsize		= 4096
+			wsize		= 4096
+			timeo		= 7
+			retrans		= 3
+			acregmin	= 3
+			acregmax	= 60
+			acdirmin	= 30
+			acdirmax	= 60
+			flags		= hard, nointr, noposix, cto, ac
+
+
+ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>:<dns0-ip>:<dns1-ip>:<ntp0-ip>
+  This parameter tells the kernel how to configure IP addresses of devices
+  and also how to set up the IP routing table. It was originally called
+  nfsaddrs, but now the boot-time IP configuration works independently of
+  NFS, so it was renamed to ip and the old name remained as an alias for
+  compatibility reasons.
+
+  If this parameter is missing from the kernel command line, all fields are
+  assumed to be empty, and the defaults mentioned below apply. In general
+  this means that the kernel tries to configure everything using
+  autoconfiguration.
+
+  The <autoconf> parameter can appear alone as the value to the ip
+  parameter (without all the ':' characters before).  If the value is
+  "ip=off" or "ip=none", no autoconfiguration will take place, otherwise
+  autoconfiguration will take place.  The most common way to use this
+  is "ip=dhcp".
+
+  <client-ip>	IP address of the client.
+  		Default:  Determined using autoconfiguration.
+
+  <server-ip>	IP address of the NFS server.
+		If RARP is used to determine
+		the client address and this parameter is NOT empty only
+		replies from the specified server are accepted.
+
+		Only required for NFS root. That is autoconfiguration
+		will not be triggered if it is missing and NFS root is not
+		in operation.
+
+		Value is exported to /proc/net/pnp with the prefix "bootserver "
+		(see below).
+
+		Default: Determined using autoconfiguration.
+		The address of the autoconfiguration server is used.
+
+  <gw-ip>	IP address of a gateway if the server is on a different subnet.
+		Default: Determined using autoconfiguration.
+
+  <netmask>	Netmask for local network interface.
+		If unspecified the netmask is derived from the client IP address
+		assuming classful addressing.
+
+		Default:  Determined using autoconfiguration.
+
+  <hostname>	Name of the client.
+		If a '.' character is present, anything
+		before the first '.' is used as the client's hostname, and anything
+		after it is used as its NIS domain name. May be supplied by
+		autoconfiguration, but its absence will not trigger autoconfiguration.
+		If specified and DHCP is used, the user-provided hostname (and NIS
+		domain name, if present) will be carried in the DHCP request; this
+		may cause a DNS record to be created or updated for the client.
+
+  		Default: Client IP address is used in ASCII notation.
+
+  <device>	Name of network device to use.
+		Default: If the host only has one device, it is used.
+		Otherwise the device is determined using
+		autoconfiguration. This is done by sending
+		autoconfiguration requests out of all devices,
+		and using the device that received the first reply.
+
+  <autoconf>	Method to use for autoconfiguration.
+		In the case of options
+		which specify multiple autoconfiguration protocols,
+		requests are sent using all protocols, and the first one
+		to reply is used.
+
+		Only autoconfiguration protocols that have been compiled
+		into the kernel will be used, regardless of the value of
+		this option::
+
+                  off or none: don't use autoconfiguration
+				(do static IP assignment instead)
+		  on or any:   use any protocol available in the kernel
+			       (default)
+		  dhcp:        use DHCP
+		  bootp:       use BOOTP
+		  rarp:        use RARP
+		  both:        use both BOOTP and RARP but not DHCP
+		               (old option kept for backwards compatibility)
+
+		if dhcp is used, the client identifier can be used by following
+		format "ip=dhcp,client-id-type,client-id-value"
+
+                Default: any
+
+  <dns0-ip>	IP address of primary nameserver.
+		Value is exported to /proc/net/pnp with the prefix "nameserver "
+		(see below).
+
+		Default: None if not using autoconfiguration; determined
+		automatically if using autoconfiguration.
+
+  <dns1-ip>	IP address of secondary nameserver.
+		See <dns0-ip>.
+
+  <ntp0-ip>	IP address of a Network Time Protocol (NTP) server.
+		Value is exported to /proc/net/ipconfig/ntp_servers, but is
+		otherwise unused (see below).
+
+		Default: None if not using autoconfiguration; determined
+		automatically if using autoconfiguration.
+
+  After configuration (whether manual or automatic) is complete, two files
+  are created in the following format; lines are omitted if their respective
+  value is empty following configuration:
+
+  - /proc/net/pnp:
+
+	#PROTO: <DHCP|BOOTP|RARP|MANUAL>	(depending on configuration method)
+	domain <dns-domain>			(if autoconfigured, the DNS domain)
+	nameserver <dns0-ip>			(primary name server IP)
+	nameserver <dns1-ip>			(secondary name server IP)
+	nameserver <dns2-ip>			(tertiary name server IP)
+	bootserver <server-ip>			(NFS server IP)
+
+  - /proc/net/ipconfig/ntp_servers:
+
+	<ntp0-ip>				(NTP server IP)
+	<ntp1-ip>				(NTP server IP)
+	<ntp2-ip>				(NTP server IP)
+
+  <dns-domain> and <dns2-ip> (in /proc/net/pnp) and <ntp1-ip> and <ntp2-ip>
+  (in /proc/net/ipconfig/ntp_servers) are requested during autoconfiguration;
+  they cannot be specified as part of the "ip=" kernel command line parameter.
+
+  Because the "domain" and "nameserver" options are recognised by DNS
+  resolvers, /etc/resolv.conf is often linked to /proc/net/pnp on systems
+  that use an NFS root filesystem.
+
+  Note that the kernel will not synchronise the system time with any NTP
+  servers it discovers; this is the responsibility of a user space process
+  (e.g. an initrd/initramfs script that passes the IP addresses listed in
+  /proc/net/ipconfig/ntp_servers to an NTP client before mounting the real
+  root filesystem if it is on NFS).
+
+
+nfsrootdebug
+  This parameter enables debugging messages to appear in the kernel
+  log at boot time so that administrators can verify that the correct
+  NFS mount options, server address, and root path are passed to the
+  NFS client.
+
+
+rdinit=<executable file>
+  To specify which file contains the program that starts system
+  initialization, administrators can use this command line parameter.
+  The default value of this parameter is "/init".  If the specified
+  file exists and the kernel can execute it, root filesystem related
+  kernel command line parameters, including 'nfsroot=', are ignored.
+
+  A description of the process of mounting the root file system can be
+  found in Documentation/driver-api/early-userspace/early_userspace_support.rst
+
+
+Boot Loader
+===========
+
+To get the kernel into memory different approaches can be used.
+They depend on various facilities being available:
+
+
+- Booting from a floppy using syslinux
+
+	When building kernels, an easy way to create a boot floppy that uses
+	syslinux is to use the zdisk or bzdisk make targets which use zimage
+      	and bzimage images respectively. Both targets accept the
+     	FDARGS parameter which can be used to set the kernel command line.
+
+	e.g::
+
+	   make bzdisk FDARGS="root=/dev/nfs"
+
+   	Note that the user running this command will need to have
+     	access to the floppy drive device, /dev/fd0
+
+     	For more information on syslinux, including how to create bootdisks
+     	for prebuilt kernels, see http://syslinux.zytor.com/
+
+	.. note::
+		Previously it was possible to write a kernel directly to
+		a floppy using dd, configure the boot device using rdev, and
+		boot using the resulting floppy. Linux no longer supports this
+		method of booting.
+
+- Booting from a cdrom using isolinux
+
+     	When building kernels, an easy way to create a bootable cdrom that
+     	uses isolinux is to use the isoimage target which uses a bzimage
+     	image. Like zdisk and bzdisk, this target accepts the FDARGS
+     	parameter which can be used to set the kernel command line.
+
+	e.g::
+
+	  make isoimage FDARGS="root=/dev/nfs"
+
+     	The resulting iso image will be arch/<ARCH>/boot/image.iso
+     	This can be written to a cdrom using a variety of tools including
+     	cdrecord.
+
+	e.g::
+
+	  cdrecord dev=ATAPI:1,0,0 arch/x86/boot/image.iso
+
+     	For more information on isolinux, including how to create bootdisks
+     	for prebuilt kernels, see http://syslinux.zytor.com/
+
+- Using LILO
+
+	When using LILO all the necessary command line parameters may be
+	specified using the 'append=' directive in the LILO configuration
+	file.
+
+	However, to use the 'root=' directive you also need to create
+	a dummy root device, which may be removed after LILO is run.
+
+	e.g::
+
+	  mknod /dev/boot255 c 0 255
+
+	For information on configuring LILO, please refer to its documentation.
+
+- Using GRUB
+
+	When using GRUB, kernel parameter are simply appended after the kernel
+	specification: kernel <kernel> <parameters>
+
+- Using loadlin
+
+	loadlin may be used to boot Linux from a DOS command prompt without
+	requiring a local hard disk to mount as root. This has not been
+	thoroughly tested by the authors of this document, but in general
+	it should be possible configure the kernel command line similarly
+	to the configuration of LILO.
+
+	Please refer to the loadlin documentation for further information.
+
+- Using a boot ROM
+
+	This is probably the most elegant way of booting a diskless client.
+	With a boot ROM the kernel is loaded using the TFTP protocol. The
+	authors of this document are not aware of any no commercial boot
+	ROMs that support booting Linux over the network. However, there
+	are two free implementations of a boot ROM, netboot-nfs and
+	etherboot, both of which are available on sunsite.unc.edu, and both
+	of which contain everything you need to boot a diskless Linux client.
+
+- Using pxelinux
+
+	Pxelinux may be used to boot linux using the PXE boot loader
+	which is present on many modern network cards.
+
+	When using pxelinux, the kernel image is specified using
+	"kernel <relative-path-below /tftpboot>". The nfsroot parameters
+	are passed to the kernel by adding them to the "append" line.
+	It is common to use serial console in conjunction with pxeliunx,
+	see Documentation/admin-guide/serial-console.rst for more information.
+
+	For more information on isolinux, including how to create bootdisks
+	for prebuilt kernels, see http://syslinux.zytor.com/
+
+
+
+
+Credits
+=======
+
+  The nfsroot code in the kernel and the RARP support have been written
+  by Gero Kuhlmann <gero@gkminix.han.de>.
+
+  The rest of the IP layer autoconfiguration code has been written
+  by Martin Mares <mj@atrey.karlin.mff.cuni.cz>.
+
+  In order to write the initial version of nfsroot I would like to thank
+  Jens-Uwe Mager <jum@anubis.han.de> for his help.
diff --git a/Documentation/admin-guide/nfs/pnfs-block-server.rst b/Documentation/admin-guide/nfs/pnfs-block-server.rst
new file mode 100644
index 0000000..b00a2e7
--- /dev/null
+++ b/Documentation/admin-guide/nfs/pnfs-block-server.rst
@@ -0,0 +1,42 @@
+===================================
+pNFS block layout server user guide
+===================================
+
+The Linux NFS server now supports the pNFS block layout extension.  In this
+case the NFS server acts as Metadata Server (MDS) for pNFS, which in addition
+to handling all the metadata access to the NFS export also hands out layouts
+to the clients to directly access the underlying block devices that are
+shared with the client.
+
+To use pNFS block layouts with with the Linux NFS server the exported file
+system needs to support the pNFS block layouts (currently just XFS), and the
+file system must sit on shared storage (typically iSCSI) that is accessible
+to the clients in addition to the MDS.  As of now the file system needs to
+sit directly on the exported volume, striping or concatenation of
+volumes on the MDS and clients is not supported yet.
+
+On the server, pNFS block volume support is automatically if the file system
+support it.  On the client make sure the kernel has the CONFIG_PNFS_BLOCK
+option enabled, the blkmapd daemon from nfs-utils is running, and the
+file system is mounted using the NFSv4.1 protocol version (mount -o vers=4.1).
+
+If the nfsd server needs to fence a non-responding client it calls
+/sbin/nfsd-recall-failed with the first argument set to the IP address of
+the client, and the second argument set to the device node without the /dev
+prefix for the file system to be fenced. Below is an example file that shows
+how to translate the device into a serial number from SCSI EVPD 0x80::
+
+	cat > /sbin/nfsd-recall-failed << EOF
+
+.. code-block:: sh
+
+	#!/bin/sh
+
+	CLIENT="$1"
+	DEV="/dev/$2"
+	EVPD=`sg_inq --page=0x80 ${DEV} | \
+		grep "Unit serial number:" | \
+		awk -F ': ' '{print $2}'`
+
+	echo "fencing client ${CLIENT} serial ${EVPD}" >> /var/log/pnfsd-fence.log
+	EOF
diff --git a/Documentation/admin-guide/nfs/pnfs-scsi-server.rst b/Documentation/admin-guide/nfs/pnfs-scsi-server.rst
new file mode 100644
index 0000000..d2f6ee5
--- /dev/null
+++ b/Documentation/admin-guide/nfs/pnfs-scsi-server.rst
@@ -0,0 +1,24 @@
+
+==================================
+pNFS SCSI layout server user guide
+==================================
+
+This document describes support for pNFS SCSI layouts in the Linux NFS server.
+With pNFS SCSI layouts, the NFS server acts as Metadata Server (MDS) for pNFS,
+which in addition to handling all the metadata access to the NFS export,
+also hands out layouts to the clients so that they can directly access the
+underlying SCSI LUNs that are shared with the client.
+
+To use pNFS SCSI layouts with with the Linux NFS server, the exported file
+system needs to support the pNFS SCSI layouts (currently just XFS), and the
+file system must sit on a SCSI LUN that is accessible to the clients in
+addition to the MDS.  As of now the file system needs to sit directly on the
+exported LUN, striping or concatenation of LUNs on the MDS and clients
+is not supported yet.
+
+On a server built with CONFIG_NFSD_SCSI, the pNFS SCSI volume support is
+automatically enabled if the file system is exported using the "pnfs"
+option and the underlying SCSI device support persistent reservations.
+On the client make sure the kernel has the CONFIG_PNFS_BLOCK option
+enabled, and the file system is mounted using the NFSv4.1 protocol
+version (mount -o vers=4.1).
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 311cd7c..6a06dc4 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -632,16 +632,16 @@
 will be used, again, to determine the new effective value for the whole list
 and that value will become the new real constraint.
 
-In turn, for each CPU there is only one resume latency PM QoS request
-associated with the :file:`power/pm_qos_resume_latency_us` file under
+In turn, for each CPU there is one resume latency PM QoS request associated with
+the :file:`power/pm_qos_resume_latency_us` file under
 :file:`/sys/devices/system/cpu/cpu<N>/` in ``sysfs`` and writing to it causes
 this single PM QoS request to be updated regardless of which user space
 process does that.  In other words, this PM QoS request is shared by the entire
 user space, so access to the file associated with it needs to be arbitrated
 to avoid confusion.  [Arguably, the only legitimate use of this mechanism in
 practice is to pin a process to the CPU in question and let it use the
-``sysfs`` interface to control the resume latency constraint for it.]  It
-still only is a request, however.  It is a member of a priority list used to
+``sysfs`` interface to control the resume latency constraint for it.]  It is
+still only a request, however.  It is an entry in a priority list used to
 determine the effective value to be set as the resume latency constraint for the
 CPU in question every time the list of requests is updated this way or another
 (there may be other requests coming from kernel code in that list).
diff --git a/Documentation/admin-guide/pm/intel_idle.rst b/Documentation/admin-guide/pm/intel_idle.rst
index afbf778..89309e1 100644
--- a/Documentation/admin-guide/pm/intel_idle.rst
+++ b/Documentation/admin-guide/pm/intel_idle.rst
@@ -60,6 +60,9 @@
 recognized by ``intel_idle`` and the latter are used if that is required for
 the given processor model (which is the case for all server processor models
 recognized by ``intel_idle``) or if the processor model is not recognized.
+[There is a module parameter that can be used to make the driver use the ACPI
+tables with any processor model recognized by it; see
+`below <intel-idle-parameters_>`_.]
 
 If the ACPI tables are going to be used for building the list of available idle
 states, ``intel_idle`` first looks for a ``_CST`` object under one of the ACPI
@@ -165,7 +168,7 @@
 ``MWAIT`` instruction is not allowed to be used, so the initialization of
 ``intel_idle`` will fail.
 
-Apart from that there are two module parameters recognized by ``intel_idle``
+Apart from that there are four module parameters recognized by ``intel_idle``
 itself that can be set via the kernel command line (they cannot be updated via
 sysfs, so that is the only way to change their values).
 
@@ -186,9 +189,28 @@
 even if they have been enumerated (see :ref:`cpu-pm-qos` in :doc:`cpuidle`).
 Setting ``max_cstate`` to 0 causes the ``intel_idle`` initialization to fail.
 
-The ``noacpi`` module parameter (which is recognized by ``intel_idle`` if the
-kernel has been configured with ACPI support), can be set to make the driver
-ignore the system's ACPI tables entirely (it is unset by default).
+The ``no_acpi`` and ``use_acpi`` module parameters (recognized by ``intel_idle``
+if the kernel has been configured with ACPI support) can be set to make the
+driver ignore the system's ACPI tables entirely or use them for all of the
+recognized processor models, respectively (they both are unset by default and
+``use_acpi`` has no effect if ``no_acpi`` is set).
+
+The value of the ``states_off`` module parameter (0 by default) represents a
+list of idle states to be disabled by default in the form of a bitmask.
+
+Namely, the positions of the bits that are set in the ``states_off`` value are
+the indices of idle states to be disabled by default (as reflected by the names
+of the corresponding idle state directories in ``sysfs``, :file:`state0`,
+:file:`state1` ... :file:`state<i>` ..., where ``<i>`` is the index of the given
+idle state; see :ref:`idle-states-representation` in :doc:`cpuidle`).
+
+For example, if ``states_off`` is equal to 3, the driver will disable idle
+states 0 and 1 by default, and if it is equal to 8, idle state 3 will be
+disabled by default and so on (bit positions beyond the maximum idle state index
+are ignored).
+
+The idle states disabled this way can be enabled (on a per-CPU basis) from user
+space via ``sysfs``.
 
 
 .. _intel-idle-core-and-package-idle-states:
diff --git a/Documentation/admin-guide/pm/sleep-states.rst b/Documentation/admin-guide/pm/sleep-states.rst
index cd3a28c..ee55a46 100644
--- a/Documentation/admin-guide/pm/sleep-states.rst
+++ b/Documentation/admin-guide/pm/sleep-states.rst
@@ -153,8 +153,11 @@
 Basic ``sysfs`` Interfaces for System Suspend and Hibernation
 =============================================================
 
-The following files located in the :file:`/sys/power/` directory can be used by
-user space for sleep states control.
+The power management subsystem provides userspace with a unified ``sysfs``
+interface for system sleep regardless of the underlying system architecture or
+platform.  That interface is located in the :file:`/sys/power/` directory
+(assuming that ``sysfs`` is mounted at :file:`/sys`) and it consists of the
+following attributes (files):
 
 ``state``
 	This file contains a list of strings representing sleep states supported
@@ -162,9 +165,9 @@
 	to start a transition of the system into the sleep state represented by
 	that string.
 
-	In particular, the strings "disk", "freeze" and "standby" represent the
+	In particular, the "disk", "freeze" and "standby" strings represent the
 	:ref:`hibernation <hibernation>`, :ref:`suspend-to-idle <s2idle>` and
-	:ref:`standby <standby>` sleep states, respectively.  The string "mem"
+	:ref:`standby <standby>` sleep states, respectively.  The "mem" string
 	is interpreted in accordance with the contents of the ``mem_sleep`` file
 	described below.
 
@@ -177,7 +180,7 @@
 	associated with the "mem" string in the ``state`` file described above.
 
 	The strings that may be present in this file are "s2idle", "shallow"
-	and "deep".  The string "s2idle" always represents :ref:`suspend-to-idle
+	and "deep".  The "s2idle" string always represents :ref:`suspend-to-idle
 	<s2idle>` and, by convention, "shallow" and "deep" represent
 	:ref:`standby <standby>` and :ref:`suspend-to-RAM <s2ram>`,
 	respectively.
@@ -185,15 +188,17 @@
 	Writing one of the listed strings into this file causes the system
 	suspend variant represented by it to be associated with the "mem" string
 	in the ``state`` file.  The string representing the suspend variant
-	currently associated with the "mem" string in the ``state`` file
-	is listed in square brackets.
+	currently associated with the "mem" string in the ``state`` file is
+	shown in square brackets.
 
 	If the kernel does not support system suspend, this file is not present.
 
 ``disk``
-	This file contains a list of strings representing different operations
-	that can be carried out after the hibernation image has been saved.  The
-	possible options are as follows:
+	This file controls the operating mode of hibernation (Suspend-to-Disk).
+	Specifically, it tells the kernel what to do after creating a
+	hibernation image.
+
+	Reading from it returns a list of supported options encoded as:
 
 	``platform``
 		Put the system into a special low-power state (e.g. ACPI S4) to
@@ -201,6 +206,11 @@
 		platform firmware to take a simplified initialization path after
 		wakeup.
 
+		It is only available if the platform provides a special
+		mechanism to put the system to sleep after creating a
+		hibernation image (platforms with ACPI do that as a rule, for
+		example).
+
 	``shutdown``
 		Power off the system.
 
@@ -214,22 +224,53 @@
 		the hibernation image and continue.  Otherwise, use the image
 		to restore the previous state of the system.
 
+		It is available if system suspend is supported.
+
 	``test_resume``
 		Diagnostic operation.  Load the image as though the system had
 		just woken up from hibernation and the currently running kernel
 		instance was a restore kernel and follow up with full system
 		resume.
 
-	Writing one of the listed strings into this file causes the option
+	Writing one of the strings listed above into this file causes the option
 	represented by it to be selected.
 
-	The currently selected option is shown in square brackets which means
+	The currently selected option is shown in square brackets, which means
 	that the operation represented by it will be carried out after creating
-	and saving the image next time hibernation is triggered by writing
-	``disk`` to :file:`/sys/power/state`.
+	and saving the image when hibernation is triggered by writing ``disk``
+	to :file:`/sys/power/state`.
 
 	If the kernel does not support hibernation, this file is not present.
 
+``image_size``
+	This file controls the size of hibernation images.
+
+	It can be written a string representing a non-negative integer that will
+	be used as a best-effort upper limit of the image size, in bytes.  The
+	hibernation core will do its best to ensure that the image size will not
+	exceed that number, but if that turns out to be impossible to achieve, a
+	hibernation image will still be created and its size will be as small as
+	possible.  In particular, writing '0' to this file causes the size of
+	hibernation images to be minimum.
+
+	Reading from it returns the current image size limit, which is set to
+	around 2/5 of the available RAM size by default.
+
+``pm_trace``
+	This file controls the "PM trace" mechanism saving the last suspend
+	or resume event point in the RTC memory across reboots.  It helps to
+	debug hard lockups or reboots due to device driver failures that occur
+	during system suspend or resume (which is more common) more effectively.
+
+	If it contains "1", the fingerprint of each suspend/resume event point
+	in turn will be stored in the RTC memory (overwriting the actual RTC
+	information), so it will survive a system crash if one occurs right
+	after storing it and it can be used later to identify the driver that
+	caused the crash to happen.
+
+	It contains "0" by default, which may be changed to "1" by writing a
+	string representing a nonzero integer into it.
+
 According to the above, there are two ways to make the system go into the
 :ref:`suspend-to-idle <s2idle>` state.  The first one is to write "freeze"
 directly to :file:`/sys/power/state`.  The second one is to write "s2idle" to
@@ -244,6 +285,7 @@
 The default suspend variant (ie. the one to be used without writing anything
 into :file:`/sys/power/mem_sleep`) is either "deep" (on the majority of systems
 supporting :ref:`suspend-to-RAM <s2ram>`) or "s2idle", but it can be overridden
-by the value of the "mem_sleep_default" parameter in the kernel command line.
-On some ACPI-based systems, depending on the information in the ACPI tables, the
-default may be "s2idle" even if :ref:`suspend-to-RAM <s2ram>` is supported.
+by the value of the ``mem_sleep_default`` parameter in the kernel command line.
+On some systems with ACPI, depending on the information in the ACPI tables, the
+default may be "s2idle" even if :ref:`suspend-to-RAM <s2ram>` is supported in
+principle.
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
index 898ad78..10c4f0c 100644
--- a/Documentation/admin-guide/thunderbolt.rst
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -1,6 +1,28 @@
-=============
- Thunderbolt
-=============
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+ USB4 and Thunderbolt
+======================
+USB4 is the public specification based on Thunderbolt 3 protocol with
+some differences at the register level among other things. Connection
+manager is an entity running on the host router (host controller)
+responsible for enumerating routers and establishing tunnels. A
+connection manager can be implemented either in firmware or software.
+Typically PCs come with a firmware connection manager for Thunderbolt 3
+and early USB4 capable systems. Apple systems on the other hand use
+software connection manager and the later USB4 compliant devices follow
+the suit.
+
+The Linux Thunderbolt driver supports both and can detect at runtime which
+connection manager implementation is to be used. To be on the safe side the
+software connection manager in Linux also advertises security level
+``user`` which means PCIe tunneling is disabled by default. The
+documentation below applies to both implementations with the exception that
+the software connection manager only supports ``user`` security level and
+is expected to be accompanied with an IOMMU based DMA protection.
+
+Security levels and how to use them
+-----------------------------------
 The interface presented here is not meant for end users. Instead there
 should be a userspace tool that handles all the low-level details, keeps
 a database of the authorized devices and prompts users for new connections.
@@ -18,8 +40,6 @@
 keep in mind that this bypasses the security levels and makes the system
 vulnerable to DMA attacks.
 
-Security levels and how to use them
------------------------------------
 Starting with Intel Falcon Ridge Thunderbolt controller there are 4
 security levels available. Intel Titan Ridge added one more security level
 (usbonly). The reason for these is the fact that the connected devices can
diff --git a/Documentation/arm/microchip.rst b/Documentation/arm/microchip.rst
index 1adf53d..05e5f2d 100644
--- a/Documentation/arm/microchip.rst
+++ b/Documentation/arm/microchip.rst
@@ -92,6 +92,12 @@
 
           http://ww1.microchip.com/downloads/en/DeviceDoc/DS60001517A.pdf
 
+      - sam9x60
+
+          * Datasheet
+
+          http://ww1.microchip.com/downloads/en/DeviceDoc/SAM9X60-Data-Sheet-DS60001579A.pdf
+
     * ARM Cortex-A5 based SoCs
       - sama5d3 family
 
diff --git a/Documentation/asm-annotations.rst b/Documentation/asm-annotations.rst
index f55c2bb..32ea574 100644
--- a/Documentation/asm-annotations.rst
+++ b/Documentation/asm-annotations.rst
@@ -73,10 +73,11 @@
 three main groups:
 
 1. ``SYM_FUNC_*`` -- to annotate C-like functions. This means functions with
-   standard C calling conventions, i.e. the stack contains a return address at
-   the predefined place and a return from the function can happen in a
-   standard way. When frame pointers are enabled, save/restore of frame
-   pointer shall happen at the start/end of a function, respectively, too.
+   standard C calling conventions. For example, on x86, this means that the
+   stack contains a return address at the predefined place and a return from
+   the function can happen in a standard way. When frame pointers are enabled,
+   save/restore of frame pointer shall happen at the start/end of a function,
+   respectively, too.
 
    Checking tools like ``objtool`` should ensure such marked functions conform
    to these rules. The tools can also easily annotate these functions with
diff --git a/Documentation/block/biovecs.rst b/Documentation/block/biovecs.rst
index 86fa66c8..ad303a2 100644
--- a/Documentation/block/biovecs.rst
+++ b/Documentation/block/biovecs.rst
@@ -47,7 +47,7 @@
 advantages:
 
  * Before, iterating over bios was very awkward when you weren't processing
-   exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
+   exactly one bvec at a time - for example, bio_copy_data() in block/bio.c,
    which copies the contents of one bio into another. Because the biovecs
    wouldn't necessarily be the same size, the old code was tricky convoluted -
    it had to walk two different bios at the same time, keeping both bi_idx and
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index ab0b9ec..a501dc1 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -31,6 +31,7 @@
    generic-radix-tree
    memory-allocation
    mm-api
+   pin_user_pages
    gfp_mask-from-fs-io
    timekeeping
    boot-time-mm
@@ -40,6 +41,7 @@
    gcc-plugins
    symbol-namespaces
    padata
+   ioctl
 
 
 Interfaces for kernel debugging
diff --git a/Documentation/core-api/ioctl.rst b/Documentation/core-api/ioctl.rst
new file mode 100644
index 0000000..c455db0
--- /dev/null
+++ b/Documentation/core-api/ioctl.rst
@@ -0,0 +1,253 @@
+======================
+ioctl based interfaces
+======================
+
+ioctl() is the most common way for applications to interface
+with device drivers. It is flexible and easily extended by adding new
+commands and can be passed through character devices, block devices as
+well as sockets and other special file descriptors.
+
+However, it is also very easy to get ioctl command definitions wrong,
+and hard to fix them later without breaking existing applications,
+so this documentation tries to help developers get it right.
+
+Command number definitions
+==========================
+
+The command number, or request number, is the second argument passed to
+the ioctl system call. While this can be any 32-bit number that uniquely
+identifies an action for a particular driver, there are a number of
+conventions around defining them.
+
+``include/uapi/asm-generic/ioctl.h`` provides four macros for defining
+ioctl commands that follow modern conventions: ``_IO``, ``_IOR``,
+``_IOW``, and ``_IOWR``. These should be used for all new commands,
+with the correct parameters:
+
+_IO/_IOR/_IOW/_IOWR
+   The macro name specifies how the argument will be used.  It may be a
+   pointer to data to be passed into the kernel (_IOW), out of the kernel
+   (_IOR), or both (_IOWR).  _IO can indicate either commands with no
+   argument or those passing an integer value instead of a pointer.
+   It is recommended to only use _IO for commands without arguments,
+   and use pointers for passing data.
+
+type
+   An 8-bit number, often a character literal, specific to a subsystem
+   or driver, and listed in :doc:`../userspace-api/ioctl/ioctl-number`
+
+nr
+  An 8-bit number identifying the specific command, unique for a give
+  value of 'type'
+
+data_type
+  The name of the data type pointed to by the argument, the command number
+  encodes the ``sizeof(data_type)`` value in a 13-bit or 14-bit integer,
+  leading to a limit of 8191 bytes for the maximum size of the argument.
+  Note: do not pass sizeof(data_type) type into _IOR/_IOW/IOWR, as that
+  will lead to encoding sizeof(sizeof(data_type)), i.e. sizeof(size_t).
+  _IO does not have a data_type parameter.
+
+
+Interface versions
+==================
+
+Some subsystems use version numbers in data structures to overload
+commands with different interpretations of the argument.
+
+This is generally a bad idea, since changes to existing commands tend
+to break existing applications.
+
+A better approach is to add a new ioctl command with a new number. The
+old command still needs to be implemented in the kernel for compatibility,
+but this can be a wrapper around the new implementation.
+
+Return code
+===========
+
+ioctl commands can return negative error codes as documented in errno(3);
+these get turned into errno values in user space. On success, the return
+code should be zero. It is also possible but not recommended to return
+a positive 'long' value.
+
+When the ioctl callback is called with an unknown command number, the
+handler returns either -ENOTTY or -ENOIOCTLCMD, which also results in
+-ENOTTY being returned from the system call. Some subsystems return
+-ENOSYS or -EINVAL here for historic reasons, but this is wrong.
+
+Prior to Linux 5.5, compat_ioctl handlers were required to return
+-ENOIOCTLCMD in order to use the fallback conversion into native
+commands. As all subsystems are now responsible for handling compat
+mode themselves, this is no longer needed, but it may be important to
+consider when backporting bug fixes to older kernels.
+
+Timestamps
+==========
+
+Traditionally, timestamps and timeout values are passed as ``struct
+timespec`` or ``struct timeval``, but these are problematic because of
+incompatible definitions of these structures in user space after the
+move to 64-bit time_t.
+
+The ``struct __kernel_timespec`` type can be used instead to be embedded
+in other data structures when separate second/nanosecond values are
+desired, or passed to user space directly. This is still not ideal though,
+as the structure matches neither the kernel's timespec64 nor the user
+space timespec exactly. The get_timespec64() and put_timespec64() helper
+functions can be used to ensure that the layout remains compatible with
+user space and the padding is treated correctly.
+
+As it is cheap to convert seconds to nanoseconds, but the opposite
+requires an expensive 64-bit division, a simple __u64 nanosecond value
+can be simpler and more efficient.
+
+Timeout values and timestamps should ideally use CLOCK_MONOTONIC time,
+as returned by ktime_get_ns() or ktime_get_ts64().  Unlike
+CLOCK_REALTIME, this makes the timestamps immune from jumping backwards
+or forwards due to leap second adjustments and clock_settime() calls.
+
+ktime_get_real_ns() can be used for CLOCK_REALTIME timestamps that
+need to be persistent across a reboot or between multiple machines.
+
+32-bit compat mode
+==================
+
+In order to support 32-bit user space running on a 64-bit machine, each
+subsystem or driver that implements an ioctl callback handler must also
+implement the corresponding compat_ioctl handler.
+
+As long as all the rules for data structures are followed, this is as
+easy as setting the .compat_ioctl pointer to a helper function such as
+compat_ptr_ioctl() or blkdev_compat_ptr_ioctl().
+
+compat_ptr()
+------------
+
+On the s390 architecture, 31-bit user space has ambiguous representations
+for data pointers, with the upper bit being ignored. When running such
+a process in compat mode, the compat_ptr() helper must be used to
+clear the upper bit of a compat_uptr_t and turn it into a valid 64-bit
+pointer.  On other architectures, this macro only performs a cast to a
+``void __user *`` pointer.
+
+In an compat_ioctl() callback, the last argument is an unsigned long,
+which can be interpreted as either a pointer or a scalar depending on
+the command. If it is a scalar, then compat_ptr() must not be used, to
+ensure that the 64-bit kernel behaves the same way as a 32-bit kernel
+for arguments with the upper bit set.
+
+The compat_ptr_ioctl() helper can be used in place of a custom
+compat_ioctl file operation for drivers that only take arguments that
+are pointers to compatible data structures.
+
+Structure layout
+----------------
+
+Compatible data structures have the same layout on all architectures,
+avoiding all problematic members:
+
+* ``long`` and ``unsigned long`` are the size of a register, so
+  they can be either 32-bit or 64-bit wide and cannot be used in portable
+  data structures. Fixed-length replacements are ``__s32``, ``__u32``,
+  ``__s64`` and ``__u64``.
+
+* Pointers have the same problem, in addition to requiring the
+  use of compat_ptr(). The best workaround is to use ``__u64``
+  in place of pointers, which requires a cast to ``uintptr_t`` in user
+  space, and the use of u64_to_user_ptr() in the kernel to convert
+  it back into a user pointer.
+
+* On the x86-32 (i386) architecture, the alignment of 64-bit variables
+  is only 32-bit, but they are naturally aligned on most other
+  architectures including x86-64. This means a structure like::
+
+    struct foo {
+        __u32 a;
+        __u64 b;
+        __u32 c;
+    };
+
+  has four bytes of padding between a and b on x86-64, plus another four
+  bytes of padding at the end, but no padding on i386, and it needs a
+  compat_ioctl conversion handler to translate between the two formats.
+
+  To avoid this problem, all structures should have their members
+  naturally aligned, or explicit reserved fields added in place of the
+  implicit padding. The ``pahole`` tool can be used for checking the
+  alignment.
+
+* On ARM OABI user space, structures are padded to multiples of 32-bit,
+  making some structs incompatible with modern EABI kernels if they
+  do not end on a 32-bit boundary.
+
+* On the m68k architecture, struct members are not guaranteed to have an
+  alignment greater than 16-bit, which is a problem when relying on
+  implicit padding.
+
+* Bitfields and enums generally work as one would expect them to,
+  but some properties of them are implementation-defined, so it is better
+  to avoid them completely in ioctl interfaces.
+
+* ``char`` members can be either signed or unsigned, depending on
+  the architecture, so the __u8 and __s8 types should be used for 8-bit
+  integer values, though char arrays are clearer for fixed-length strings.
+
+Information leaks
+=================
+
+Uninitialized data must not be copied back to user space, as this can
+cause an information leak, which can be used to defeat kernel address
+space layout randomization (KASLR), helping in an attack.
+
+For this reason (and for compat support) it is best to avoid any
+implicit padding in data structures.  Where there is implicit padding
+in an existing structure, kernel drivers must be careful to fully
+initialize an instance of the structure before copying it to user
+space.  This is usually done by calling memset() before assigning to
+individual members.
+
+Subsystem abstractions
+======================
+
+While some device drivers implement their own ioctl function, most
+subsystems implement the same command for multiple drivers.  Ideally the
+subsystem has an .ioctl() handler that copies the arguments from and
+to user space, passing them into subsystem specific callback functions
+through normal kernel pointers.
+
+This helps in various ways:
+
+* Applications written for one driver are more likely to work for
+  another one in the same subsystem if there are no subtle differences
+  in the user space ABI.
+
+* The complexity of user space access and data structure layout is done
+  in one place, reducing the potential for implementation bugs.
+
+* It is more likely to be reviewed by experienced developers
+  that can spot problems in the interface when the ioctl is shared
+  between multiple drivers than when it is only used in a single driver.
+
+Alternatives to ioctl
+=====================
+
+There are many cases in which ioctl is not the best solution for a
+problem. Alternatives include:
+
+* System calls are a better choice for a system-wide feature that
+  is not tied to a physical device or constrained by the file system
+  permissions of a character device node
+
+* netlink is the preferred way of configuring any network related
+  objects through sockets.
+
+* debugfs is used for ad-hoc interfaces for debugging functionality
+  that does not need to be exposed as a stable interface to applications.
+
+* sysfs is a good way to expose the state of an in-kernel object
+  that is not tied to a file descriptor.
+
+* configfs can be used for more complex configuration than sysfs
+
+* A custom file system can provide extra flexibility with a simple
+  user interface but adds a lot of complexity to the implementation.
diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst
new file mode 100644
index 0000000..1d49015
--- /dev/null
+++ b/Documentation/core-api/pin_user_pages.rst
@@ -0,0 +1,232 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================================================
+pin_user_pages() and related calls
+====================================================
+
+.. contents:: :local:
+
+Overview
+========
+
+This document describes the following functions::
+
+ pin_user_pages()
+ pin_user_pages_fast()
+ pin_user_pages_remote()
+
+Basic description of FOLL_PIN
+=============================
+
+FOLL_PIN and FOLL_LONGTERM are flags that can be passed to the get_user_pages*()
+("gup") family of functions. FOLL_PIN has significant interactions and
+interdependencies with FOLL_LONGTERM, so both are covered here.
+
+FOLL_PIN is internal to gup, meaning that it should not appear at the gup call
+sites. This allows the associated wrapper functions  (pin_user_pages*() and
+others) to set the correct combination of these flags, and to check for problems
+as well.
+
+FOLL_LONGTERM, on the other hand, *is* allowed to be set at the gup call sites.
+This is in order to avoid creating a large number of wrapper functions to cover
+all combinations of get*(), pin*(), FOLL_LONGTERM, and more. Also, the
+pin_user_pages*() APIs are clearly distinct from the get_user_pages*() APIs, so
+that's a natural dividing line, and a good point to make separate wrapper calls.
+In other words, use pin_user_pages*() for DMA-pinned pages, and
+get_user_pages*() for other cases. There are four cases described later on in
+this document, to further clarify that concept.
+
+FOLL_PIN and FOLL_GET are mutually exclusive for a given gup call. However,
+multiple threads and call sites are free to pin the same struct pages, via both
+FOLL_PIN and FOLL_GET. It's just the call site that needs to choose one or the
+other, not the struct page(s).
+
+The FOLL_PIN implementation is nearly the same as FOLL_GET, except that FOLL_PIN
+uses a different reference counting technique.
+
+FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying that is,
+FOLL_LONGTERM is a specific case, more restrictive case of FOLL_PIN.
+
+Which flags are set by each wrapper
+===================================
+
+For these pin_user_pages*() functions, FOLL_PIN is OR'd in with whatever gup
+flags the caller provides. The caller is required to pass in a non-null struct
+pages* array, and the function then pin pages by incrementing each by a special
+value. For now, that value is +1, just like get_user_pages*().::
+
+ Function
+ --------
+ pin_user_pages          FOLL_PIN is always set internally by this function.
+ pin_user_pages_fast     FOLL_PIN is always set internally by this function.
+ pin_user_pages_remote   FOLL_PIN is always set internally by this function.
+
+For these get_user_pages*() functions, FOLL_GET might not even be specified.
+Behavior is a little more complex than above. If FOLL_GET was *not* specified,
+but the caller passed in a non-null struct pages* array, then the function
+sets FOLL_GET for you, and proceeds to pin pages by incrementing the refcount
+of each page by +1.::
+
+ Function
+ --------
+ get_user_pages           FOLL_GET is sometimes set internally by this function.
+ get_user_pages_fast      FOLL_GET is sometimes set internally by this function.
+ get_user_pages_remote    FOLL_GET is sometimes set internally by this function.
+
+Tracking dma-pinned pages
+=========================
+
+Some of the key design constraints, and solutions, for tracking dma-pinned
+pages:
+
+* An actual reference count, per struct page, is required. This is because
+  multiple processes may pin and unpin a page.
+
+* False positives (reporting that a page is dma-pinned, when in fact it is not)
+  are acceptable, but false negatives are not.
+
+* struct page may not be increased in size for this, and all fields are already
+  used.
+
+* Given the above, we can overload the page->_refcount field by using, sort of,
+  the upper bits in that field for a dma-pinned count. "Sort of", means that,
+  rather than dividing page->_refcount into bit fields, we simple add a medium-
+  large value (GUP_PIN_COUNTING_BIAS, initially chosen to be 1024: 10 bits) to
+  page->_refcount. This provides fuzzy behavior: if a page has get_page() called
+  on it 1024 times, then it will appear to have a single dma-pinned count.
+  And again, that's acceptable.
+
+This also leads to limitations: there are only 31-10==21 bits available for a
+counter that increments 10 bits at a time.
+
+TODO: for 1GB and larger huge pages, this is cutting it close. That's because
+when pin_user_pages() follows such pages, it increments the head page by "1"
+(where "1" used to mean "+1" for get_user_pages(), but now means "+1024" for
+pin_user_pages()) for each tail page. So if you have a 1GB huge page:
+
+* There are 256K (18 bits) worth of 4 KB tail pages.
+* There are 21 bits available to count up via GUP_PIN_COUNTING_BIAS (that is,
+  10 bits at a time)
+* There are 21 - 18 == 3 bits available to count. Except that there aren't,
+  because you need to allow for a few normal get_page() calls on the head page,
+  as well. Fortunately, the approach of using addition, rather than "hard"
+  bitfields, within page->_refcount, allows for sharing these bits gracefully.
+  But we're still looking at about 8 references.
+
+This, however, is a missing feature more than anything else, because it's easily
+solved by addressing an obvious inefficiency in the original get_user_pages()
+approach of retrieving pages: stop treating all the pages as if they were
+PAGE_SIZE. Retrieve huge pages as huge pages. The callers need to be aware of
+this, so some work is required. Once that's in place, this limitation mostly
+disappears from view, because there will be ample refcounting range available.
+
+* Callers must specifically request "dma-pinned tracking of pages". In other
+  words, just calling get_user_pages() will not suffice; a new set of functions,
+  pin_user_page() and related, must be used.
+
+FOLL_PIN, FOLL_GET, FOLL_LONGTERM: when to use which flags
+==========================================================
+
+Thanks to Jan Kara, Vlastimil Babka and several other -mm people, for describing
+these categories:
+
+CASE 1: Direct IO (DIO)
+-----------------------
+There are GUP references to pages that are serving
+as DIO buffers. These buffers are needed for a relatively short time (so they
+are not "long term"). No special synchronization with page_mkclean() or
+munmap() is provided. Therefore, flags to set at the call site are: ::
+
+    FOLL_PIN
+
+...but rather than setting FOLL_PIN directly, call sites should use one of
+the pin_user_pages*() routines that set FOLL_PIN.
+
+CASE 2: RDMA
+------------
+There are GUP references to pages that are serving as DMA
+buffers. These buffers are needed for a long time ("long term"). No special
+synchronization with page_mkclean() or munmap() is provided. Therefore, flags
+to set at the call site are: ::
+
+    FOLL_PIN | FOLL_LONGTERM
+
+NOTE: Some pages, such as DAX pages, cannot be pinned with longterm pins. That's
+because DAX pages do not have a separate page cache, and so "pinning" implies
+locking down file system blocks, which is not (yet) supported in that way.
+
+CASE 3: Hardware with page faulting support
+-------------------------------------------
+Here, a well-written driver doesn't normally need to pin pages at all. However,
+if the driver does choose to do so, it can register MMU notifiers for the range,
+and will be called back upon invalidation. Either way (avoiding page pinning, or
+using MMU notifiers to unpin upon request), there is proper synchronization with
+both filesystem and mm (page_mkclean(), munmap(), etc).
+
+Therefore, neither flag needs to be set.
+
+In this case, ideally, neither get_user_pages() nor pin_user_pages() should be
+called. Instead, the software should be written so that it does not pin pages.
+This allows mm and filesystems to operate more efficiently and reliably.
+
+CASE 4: Pinning for struct page manipulation only
+-------------------------------------------------
+Here, normal GUP calls are sufficient, so neither flag needs to be set.
+
+page_dma_pinned(): the whole point of pinning
+=============================================
+
+The whole point of marking pages as "DMA-pinned" or "gup-pinned" is to be able
+to query, "is this page DMA-pinned?" That allows code such as page_mkclean()
+(and file system writeback code in general) to make informed decisions about
+what to do when a page cannot be unmapped due to such pins.
+
+What to do in those cases is the subject of a years-long series of discussions
+and debates (see the References at the end of this document). It's a TODO item
+here: fill in the details once that's worked out. Meanwhile, it's safe to say
+that having this available: ::
+
+        static inline bool page_dma_pinned(struct page *page)
+
+...is a prerequisite to solving the long-running gup+DMA problem.
+
+Another way of thinking about FOLL_GET, FOLL_PIN, and FOLL_LONGTERM
+===================================================================
+
+Another way of thinking about these flags is as a progression of restrictions:
+FOLL_GET is for struct page manipulation, without affecting the data that the
+struct page refers to. FOLL_PIN is a *replacement* for FOLL_GET, and is for
+short term pins on pages whose data *will* get accessed. As such, FOLL_PIN is
+a "more severe" form of pinning. And finally, FOLL_LONGTERM is an even more
+restrictive case that has FOLL_PIN as a prerequisite: this is for pages that
+will be pinned longterm, and whose data will be accessed.
+
+Unit testing
+============
+This file::
+
+ tools/testing/selftests/vm/gup_benchmark.c
+
+has the following new calls to exercise the new pin*() wrapper functions:
+
+* PIN_FAST_BENCHMARK (./gup_benchmark -a)
+* PIN_BENCHMARK (./gup_benchmark -b)
+
+You can monitor how many total dma-pinned pages have been acquired and released
+since the system was booted, via two new /proc/vmstat entries: ::
+
+    /proc/vmstat/nr_foll_pin_requested
+    /proc/vmstat/nr_foll_pin_requested
+
+Those are both going to show zero, unless CONFIG_DEBUG_VM is set. This is
+because there is a noticeable performance drop in unpin_user_page(), when they
+are activated.
+
+References
+==========
+
+* `Some slow progress on get_user_pages() (Apr 2, 2019) <https://lwn.net/Articles/784574/>`_
+* `DMA and get_user_pages() (LPC: Dec 12, 2018) <https://lwn.net/Articles/774411/>`_
+* `The trouble with get_user_pages() (Apr 30, 2018) <https://lwn.net/Articles/753027/>`_
+
+John Hubbard, October, 2019
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index e4d66e7..c652d740 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -21,8 +21,8 @@
 
 Tag-based KASAN is only supported in Clang and requires version 7.0.0 or later.
 
-Currently generic KASAN is supported for the x86_64, arm64, xtensa and s390
-architectures, and tag-based KASAN is supported only for arm64.
+Currently generic KASAN is supported for the x86_64, arm64, xtensa, s390 and
+riscv architectures, and tag-based KASAN is supported only for arm64.
 
 Usage
 -----
diff --git a/Documentation/dev-tools/kunit/faq.rst b/Documentation/dev-tools/kunit/faq.rst
index bf20951..ea55b24 100644
--- a/Documentation/dev-tools/kunit/faq.rst
+++ b/Documentation/dev-tools/kunit/faq.rst
@@ -29,7 +29,8 @@
 
 For the most part, the KUnit core framework (what you use to write the tests)
 can compile to any architecture; it compiles like just another part of the
-kernel and runs when the kernel boots. However, there is some infrastructure,
+kernel and runs when the kernel boots, or when built as a module, when the
+module is loaded.  However, there is some infrastructure,
 like the KUnit Wrapper (``tools/testing/kunit/kunit.py``) that does not support
 other architectures.
 
diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst
index c60d760..d16a4d2 100644
--- a/Documentation/dev-tools/kunit/index.rst
+++ b/Documentation/dev-tools/kunit/index.rst
@@ -49,6 +49,9 @@
 of a host operating system; to be clear, it does not require any virtualization
 support; it is just a regular program.
 
+Alternatively, kunit and kunit tests can be built as modules and tests will
+run when the test module is loaded.
+
 KUnit is fast. Excluding build time, from invocation to completion KUnit can run
 several dozen tests in only 10 to 20 seconds; this might not sound like a big
 deal to some people, but having such fast and easy to run tests fundamentally
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index b9a065a..7cd56a1 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -539,6 +539,22 @@
 
 Congratulations, you just ran a KUnit test on the x86 architecture!
 
+In a similar manner, kunit and kunit tests can also be built as modules,
+so if you wanted to run tests in this way you might add the following config
+options to your ``.config``:
+
+.. code-block:: none
+
+	CONFIG_KUNIT=m
+	CONFIG_KUNIT_EXAMPLE_TEST=m
+
+Once the kernel is built and installed, a simple
+
+.. code-block:: bash
+	modprobe example-test
+
+...will run the tests.
+
 Writing new tests for other architectures
 -----------------------------------------
 
diff --git a/Documentation/devicetree/bindings/arm/amlogic.yaml b/Documentation/devicetree/bindings/arm/amlogic.yaml
index c6a4433..f74aba4 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic.yaml
@@ -59,6 +59,7 @@
               - friendlyarm,nanopi-k2
               - hardkernel,odroid-c2
               - nexbox,a95x
+              - videostrong,kii-pro
               - wetek,hub
               - wetek,play2
           - const: amlogic,meson-gxbb
@@ -104,6 +105,7 @@
           - enum:
               - amlogic,p230
               - amlogic,p231
+              - libretech,aml-s905d-pc
               - phicomm,n1
           - const: amlogic,s905d
           - const: amlogic,meson-gxl
@@ -115,6 +117,7 @@
               - amlogic,q201
               - khadas,vim2
               - kingnovel,r-box-pro
+              - libretech,aml-s912-pc
               - nexbox,a1
               - tronsmart,vega-s96
           - const: amlogic,s912
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
index b2a9f9f..96b1dad 100644
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ b/Documentation/devicetree/bindings/arm/arm-boards
@@ -121,7 +121,7 @@
 Required nodes:
 
 - soc: some node of the RealView platforms must be the SoC
-  node that contain the SoC-specific devices, withe the compatible
+  node that contain the SoC-specific devices, with the compatible
   string set to one of these tuples:
    "arm,realview-eb-soc", "simple-bus"
    "arm,realview-pb1176-soc", "simple-bus"
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.yaml b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
index 6dd8be4..0357314 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.yaml
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
@@ -37,6 +37,16 @@
 
       - items:
           - enum:
+              - overkiz,kizboxmini-base # Overkiz kizbox Mini Base Board
+              - overkiz,kizboxmini-mb   # Overkiz kizbox Mini Mother Board
+              - overkiz,kizboxmini-rd   # Overkiz kizbox Mini RailDIN
+              - overkiz,smartkiz        # Overkiz SmartKiz Board
+          - const: atmel,at91sam9g25
+          - const: atmel,at91sam9x5
+          - const: atmel,at91sam9
+
+      - items:
+          - enum:
               - atmel,at91sam9g15
               - atmel,at91sam9g25
               - atmel,at91sam9g35
@@ -52,11 +62,32 @@
           - const: atmel,sama5d2
           - const: atmel,sama5
 
+      - description: Microchip SAMA5D27 WLSOM1
+        items:
+          - const: microchip,sama5d27-wlsom1
+          - const: atmel,sama5d27
+          - const: atmel,sama5d2
+          - const: atmel,sama5
+
+      - description: Microchip SAMA5D27 WLSOM1 Evaluation Kit
+        items:
+          - const: microchip,sama5d27-wlsom1-ek
+          - const: microchip,sama5d27-wlsom1
+          - const: atmel,sama5d27
+          - const: atmel,sama5d2
+          - const: atmel,sama5
+
       - items:
           - const: atmel,sama5d27
           - const: atmel,sama5d2
           - const: atmel,sama5
 
+      - description: SAM9X60-EK board
+        items:
+          - const: microchip,sam9x60ek
+          - const: microchip,sam9x60
+          - const: atmel,at91sam9
+
       - description: Nattis v2 board with Natte v2 power board
         items:
           - const: axentia,nattis-2
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index e003a55..62cd4e8 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -45,6 +45,7 @@
 			"atmel,at91sam9260-sdramc",
 			"atmel,at91sam9g45-ddramc",
 			"atmel,sama5d3-ddramc",
+			"microchip,sam9x60-ddramc"
 - reg: Should contain registers location and length
 
 Examples:
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index c23c24f..7a9c3ce 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -242,6 +242,21 @@
 
       where voltage is in V, frequency is in MHz.
 
+  power-domains:
+    $ref: '/schemas/types.yaml#/definitions/phandle-array'
+    description:
+      List of phandles and PM domain specifiers, as defined by bindings of the
+      PM domain provider (see also ../power_domain.txt).
+
+  power-domain-names:
+    $ref: '/schemas/types.yaml#/definitions/string-array'
+    description:
+      A list of power domain name strings sorted in the same order as the
+      power-domains property.
+
+      For PSCI based platforms, the name corresponding to the index of the PSCI
+      PM domain provider, must be "psci".
+
   qcom,saw:
     $ref: '/schemas/types.yaml#/definitions/phandle'
     description: |
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index f79683a..a8e0b4a 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/arm/fsl.yaml#
+$id: http://devicetree.org/schemas/arm/fsl.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Freescale i.MX Platforms Device Tree Bindings
@@ -128,6 +128,27 @@
               - variscite,dt6customboard
           - const: fsl,imx6q
 
+      - description: i.MX6Q Gateworks Ventana Boards
+        items:
+          - enum:
+              - gw,imx6q-gw51xx
+              - gw,imx6q-gw52xx
+              - gw,imx6q-gw53xx
+              - gw,imx6q-gw5400-a
+              - gw,imx6q-gw54xx
+              - gw,imx6q-gw551x
+              - gw,imx6q-gw552x
+              - gw,imx6q-gw553x
+              - gw,imx6q-gw560x
+              - gw,imx6q-gw5903
+              - gw,imx6q-gw5904
+              - gw,imx6q-gw5907
+              - gw,imx6q-gw5910
+              - gw,imx6q-gw5912
+              - gw,imx6q-gw5913
+          - const: gw,ventana
+          - const: fsl,imx6q
+
       - description: i.MX6QP based Boards
         items:
           - enum:
@@ -154,10 +175,31 @@
               - ysoft,imx6dl-yapp4-ursa   # i.MX6 Solo Y Soft IOTA Ursa board
           - const: fsl,imx6dl
 
+      - description: i.MX6DL Gateworks Ventana Boards
+        items:
+          - enum:
+              - gw,imx6dl-gw51xx
+              - gw,imx6dl-gw52xx
+              - gw,imx6dl-gw53xx
+              - gw,imx6dl-gw54xx
+              - gw,imx6dl-gw551x
+              - gw,imx6dl-gw552x
+              - gw,imx6dl-gw553x
+              - gw,imx6dl-gw560x
+              - gw,imx6dl-gw5903
+              - gw,imx6dl-gw5904
+              - gw,imx6dl-gw5907
+              - gw,imx6dl-gw5910
+              - gw,imx6dl-gw5912
+              - gw,imx6dl-gw5913
+          - const: gw,ventana
+          - const: fsl,imx6dl
+
       - description: i.MX6SL based Boards
         items:
           - enum:
               - fsl,imx6sl-evk            # i.MX6 SoloLite EVK Board
+              - kobo,tolino-shine3
           - const: fsl,imx6sl
 
       - description: i.MX6SLL based Boards
@@ -172,6 +214,7 @@
           - enum:
               - fsl,imx6sx-sabreauto      # i.MX6 SoloX Sabre Auto Board
               - fsl,imx6sx-sdb            # i.MX6 SoloX SDB Board
+              - fsl,imx6sx-sdb-reva       # i.MX6 SoloX SDB Rev-A Board
           - const: fsl,imx6sx
 
       - description: i.MX6UL based Boards
@@ -239,6 +282,7 @@
         items:
           - enum:
               - fsl,imx7d-sdb             # i.MX7 SabreSD Board
+              - fsl,imx7d-sdb-reva        # i.MX7 SabreSD Rev-A Board
               - novtech,imx7d-meerkat96   # i.MX7 Meerkat96 Board
               - toradex,colibri-imx7d                   # Colibri iMX7 Dual Module
               - toradex,colibri-imx7d-emmc              # Colibri iMX7 Dual 1GB (eMMC) Module
@@ -263,6 +307,7 @@
       - description: i.MX7ULP based Boards
         items:
           - enum:
+              - ea,imx7ulp-com           # i.MX7ULP Embedded Artists COM Board
               - fsl,imx7ulp-evk           # i.MX7ULP Evaluation Kit
           - const: fsl,imx7ulp
 
@@ -283,7 +328,9 @@
         items:
           - enum:
               - boundary,imx8mq-nitrogen8m # i.MX8MQ NITROGEN Board
+              - einfochips,imx8mq-thor96  # i.MX8MQ Thor96 Board
               - fsl,imx8mq-evk            # i.MX8MQ EVK Board
+              - google,imx8mq-phanbell    # Google Coral Edge TPU
               - purism,librem5-devkit     # Purism Librem5 devkit
               - solidrun,hummingboard-pulse # SolidRun Hummingboard Pulse
               - technexion,pico-pi-imx8m  # TechNexion PICO-PI-8M evk
@@ -385,6 +432,13 @@
               - fsl,ls2088a-rdb
           - const: fsl,ls2088a
 
+      - description: LX2160A based Boards
+        items:
+          - enum:
+              - fsl,lx2160a-qds
+              - fsl,lx2160a-rdb
+          - const: fsl,lx2160a
+
       - description: S32V234 based Boards
         items:
           - enum:
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
deleted file mode 100644
index 771f5d2..0000000
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ /dev/null
@@ -1,706 +0,0 @@
-==========================================
-ARM idle states binding description
-==========================================
-
-==========================================
-1 - Introduction
-==========================================
-
-ARM systems contain HW capable of managing power consumption dynamically,
-where cores can be put in different low-power states (ranging from simple
-wfi to power gating) according to OS PM policies. The CPU states representing
-the range of dynamic idle states that a processor can enter at run-time, can be
-specified through device tree bindings representing the parameters required
-to enter/exit specific idle states on a given processor.
-
-According to the Server Base System Architecture document (SBSA, [3]), the
-power states an ARM CPU can be put into are identified by the following list:
-
-- Running
-- Idle_standby
-- Idle_retention
-- Sleep
-- Off
-
-The power states described in the SBSA document define the basic CPU states on
-top of which ARM platforms implement power management schemes that allow an OS
-PM implementation to put the processor in different idle states (which include
-states listed above; "off" state is not an idle state since it does not have
-wake-up capabilities, hence it is not considered in this document).
-
-Idle state parameters (e.g. entry latency) are platform specific and need to be
-characterized with bindings that provide the required information to OS PM
-code so that it can build the required tables and use them at runtime.
-
-The device tree binding definition for ARM idle states is the subject of this
-document.
-
-===========================================
-2 - idle-states definitions
-===========================================
-
-Idle states are characterized for a specific system through a set of
-timing and energy related properties, that underline the HW behaviour
-triggered upon idle states entry and exit.
-
-The following diagram depicts the CPU execution phases and related timing
-properties required to enter and exit an idle state:
-
-..__[EXEC]__|__[PREP]__|__[ENTRY]__|__[IDLE]__|__[EXIT]__|__[EXEC]__..
-	    |          |           |          |          |
-
-	    |<------ entry ------->|
-	    |       latency        |
-					      |<- exit ->|
-					      |  latency |
-	    |<-------- min-residency -------->|
-		       |<-------  wakeup-latency ------->|
-
-		Diagram 1: CPU idle state execution phases
-
-EXEC:	Normal CPU execution.
-
-PREP:	Preparation phase before committing the hardware to idle mode
-	like cache flushing. This is abortable on pending wake-up
-	event conditions. The abort latency is assumed to be negligible
-	(i.e. less than the ENTRY + EXIT duration). If aborted, CPU
-	goes back to EXEC. This phase is optional. If not abortable,
-	this should be included in the ENTRY phase instead.
-
-ENTRY:	The hardware is committed to idle mode. This period must run
-	to completion up to IDLE before anything else can happen.
-
-IDLE:	This is the actual energy-saving idle period. This may last
-	between 0 and infinite time, until a wake-up event occurs.
-
-EXIT:	Period during which the CPU is brought back to operational
-	mode (EXEC).
-
-entry-latency: Worst case latency required to enter the idle state. The
-exit-latency may be guaranteed only after entry-latency has passed.
-
-min-residency: Minimum period, including preparation and entry, for a given
-idle state to be worthwhile energywise.
-
-wakeup-latency: Maximum delay between the signaling of a wake-up event and the
-CPU being able to execute normal code again. If not specified, this is assumed
-to be entry-latency + exit-latency.
-
-These timing parameters can be used by an OS in different circumstances.
-
-An idle CPU requires the expected min-residency time to select the most
-appropriate idle state based on the expected expiry time of the next IRQ
-(i.e. wake-up) that causes the CPU to return to the EXEC phase.
-
-An operating system scheduler may need to compute the shortest wake-up delay
-for CPUs in the system by detecting how long will it take to get a CPU out
-of an idle state, e.g.:
-
-wakeup-delay = exit-latency + max(entry-latency - (now - entry-timestamp), 0)
-
-In other words, the scheduler can make its scheduling decision by selecting
-(e.g. waking-up) the CPU with the shortest wake-up delay.
-The wake-up delay must take into account the entry latency if that period
-has not expired. The abortable nature of the PREP period can be ignored
-if it cannot be relied upon (e.g. the PREP deadline may occur much sooner than
-the worst case since it depends on the CPU operating conditions, i.e. caches
-state).
-
-An OS has to reliably probe the wakeup-latency since some devices can enforce
-latency constraint guarantees to work properly, so the OS has to detect the
-worst case wake-up latency it can incur if a CPU is allowed to enter an
-idle state, and possibly to prevent that to guarantee reliable device
-functioning.
-
-The min-residency time parameter deserves further explanation since it is
-expressed in time units but must factor in energy consumption coefficients.
-
-The energy consumption of a cpu when it enters a power state can be roughly
-characterised by the following graph:
-
-               |
-               |
-               |
-           e   |
-           n   |                                      /---
-           e   |                               /------
-           r   |                        /------
-           g   |                  /-----
-           y   |           /------
-               |       ----
-               |      /|
-               |     / |
-               |    /  |
-               |   /   |
-               |  /    |
-               | /     |
-               |/      |
-          -----|-------+----------------------------------
-              0|       1                              time(ms)
-
-		Graph 1: Energy vs time example
-
-The graph is split in two parts delimited by time 1ms on the X-axis.
-The graph curve with X-axis values = { x | 0 < x < 1ms } has a steep slope
-and denotes the energy costs incurred while entering and leaving the idle
-state.
-The graph curve in the area delimited by X-axis values = {x | x > 1ms } has
-shallower slope and essentially represents the energy consumption of the idle
-state.
-
-min-residency is defined for a given idle state as the minimum expected
-residency time for a state (inclusive of preparation and entry) after
-which choosing that state become the most energy efficient option. A good
-way to visualise this, is by taking the same graph above and comparing some
-states energy consumptions plots.
-
-For sake of simplicity, let's consider a system with two idle states IDLE1,
-and IDLE2:
-
-          |
-          |
-          |
-          |                                                  /-- IDLE1
-       e  |                                              /---
-       n  |                                         /----
-       e  |                                     /---
-       r  |                                /-----/--------- IDLE2
-       g  |                    /-------/---------
-       y  |        ------------    /---|
-          |       /           /----    |
-          |      /        /---         |
-          |     /    /----             |
-          |    / /---                  |
-          |   ---                      |
-          |  /                         |
-          | /                          |
-          |/                           |                  time
-       ---/----------------------------+------------------------
-          |IDLE1-energy < IDLE2-energy | IDLE2-energy < IDLE1-energy
-                                       |
-                                IDLE2-min-residency
-
-		Graph 2: idle states min-residency example
-
-In graph 2 above, that takes into account idle states entry/exit energy
-costs, it is clear that if the idle state residency time (i.e. time till next
-wake-up IRQ) is less than IDLE2-min-residency, IDLE1 is the better idle state
-choice energywise.
-
-This is mainly down to the fact that IDLE1 entry/exit energy costs are lower
-than IDLE2.
-
-However, the lower power consumption (i.e. shallower energy curve slope) of
-idle state IDLE2 implies that after a suitable time, IDLE2 becomes more energy
-efficient.
-
-The time at which IDLE2 becomes more energy efficient than IDLE1 (and other
-shallower states in a system with multiple idle states) is defined
-IDLE2-min-residency and corresponds to the time when energy consumption of
-IDLE1 and IDLE2 states breaks even.
-
-The definitions provided in this section underpin the idle states
-properties specification that is the subject of the following sections.
-
-===========================================
-3 - idle-states node
-===========================================
-
-ARM processor idle states are defined within the idle-states node, which is
-a direct child of the cpus node [1] and provides a container where the
-processor idle states, defined as device tree nodes, are listed.
-
-- idle-states node
-
-	Usage: Optional - On ARM systems, it is a container of processor idle
-			  states nodes. If the system does not provide CPU
-			  power management capabilities, or the processor just
-			  supports idle_standby, an idle-states node is not
-			  required.
-
-	Description: idle-states node is a container node, where its
-		     subnodes describe the CPU idle states.
-
-	Node name must be "idle-states".
-
-	The idle-states node's parent node must be the cpus node.
-
-	The idle-states node's child nodes can be:
-
-	- one or more state nodes
-
-	Any other configuration is considered invalid.
-
-	An idle-states node defines the following properties:
-
-	- entry-method
-		Value type: <stringlist>
-		Usage and definition depend on ARM architecture version.
-			# On ARM v8 64-bit this property is required and must
-			  be:
-			   - "psci"
-			# On ARM 32-bit systems this property is optional
-
-This assumes that the "enable-method" property is set to "psci" in the cpu
-node[6] that is responsible for setting up CPU idle management in the OS
-implementation.
-
-The nodes describing the idle states (state) can only be defined
-within the idle-states node, any other configuration is considered invalid
-and therefore must be ignored.
-
-===========================================
-4 - state node
-===========================================
-
-A state node represents an idle state description and must be defined as
-follows:
-
-- state node
-
-	Description: must be child of the idle-states node
-
-	The state node name shall follow standard device tree naming
-	rules ([5], 2.2.1 "Node names"), in particular state nodes which
-	are siblings within a single common parent must be given a unique name.
-
-	The idle state entered by executing the wfi instruction (idle_standby
-	SBSA,[3][4]) is considered standard on all ARM platforms and therefore
-	must not be listed.
-
-	With the definitions provided above, the following list represents
-	the valid properties for a state node:
-
-	- compatible
-		Usage: Required
-		Value type: <stringlist>
-		Definition: Must be "arm,idle-state".
-
-	- local-timer-stop
-		Usage: See definition
-		Value type: <none>
-		Definition: if present the CPU local timer control logic is
-			    lost on state entry, otherwise it is retained.
-
-	- entry-latency-us
-		Usage: Required
-		Value type: <prop-encoded-array>
-		Definition: u32 value representing worst case latency in
-			    microseconds required to enter the idle state.
-
-	- exit-latency-us
-		Usage: Required
-		Value type: <prop-encoded-array>
-		Definition: u32 value representing worst case latency
-			    in microseconds required to exit the idle state.
-			    The exit-latency-us duration may be guaranteed
-			    only after entry-latency-us has passed.
-
-	- min-residency-us
-		Usage: Required
-		Value type: <prop-encoded-array>
-		Definition: u32 value representing minimum residency duration
-			    in microseconds, inclusive of preparation and
-			    entry, for this idle state to be considered
-			    worthwhile energy wise (refer to section 2 of
-			    this document for a complete description).
-
-	- wakeup-latency-us:
-		Usage: Optional
-		Value type: <prop-encoded-array>
-		Definition: u32 value representing maximum delay between the
-			    signaling of a wake-up event and the CPU being
-			    able to execute normal code again. If omitted,
-			    this is assumed to be equal to:
-
-				entry-latency-us + exit-latency-us
-
-			    It is important to supply this value on systems
-			    where the duration of PREP phase (see diagram 1,
-			    section 2) is non-neglibigle.
-			    In such systems entry-latency-us + exit-latency-us
-			    will exceed wakeup-latency-us by this duration.
-
-	- status:
-		Usage: Optional
-		Value type: <string>
-		Definition: A standard device tree property [5] that indicates
-			    the operational status of an idle-state.
-			    If present, it shall be:
-			    "okay": to indicate that the idle state is
-				    operational.
-			    "disabled": to indicate that the idle state has
-					been disabled in firmware so it is not
-					operational.
-			    If the property is not present the idle-state must
-			    be considered operational.
-
-	- idle-state-name:
-		Usage: Optional
-		Value type: <string>
-		Definition: A string used as a descriptive name for the idle
-			    state.
-
-	In addition to the properties listed above, a state node may require
-	additional properties specific to the entry-method defined in the
-	idle-states node. Please refer to the entry-method bindings
-	documentation for properties definitions.
-
-===========================================
-4 - Examples
-===========================================
-
-Example 1 (ARM 64-bit, 16-cpu system, PSCI enable-method):
-
-cpus {
-	#size-cells = <0>;
-	#address-cells = <2>;
-
-	CPU0: cpu@0 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a57";
-		reg = <0x0 0x0>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
-				   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU1: cpu@1 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a57";
-		reg = <0x0 0x1>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
-				   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU2: cpu@100 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a57";
-		reg = <0x0 0x100>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
-				   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU3: cpu@101 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a57";
-		reg = <0x0 0x101>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
-				   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU4: cpu@10000 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a57";
-		reg = <0x0 0x10000>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
-				   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU5: cpu@10001 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a57";
-		reg = <0x0 0x10001>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
-				   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU6: cpu@10100 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a57";
-		reg = <0x0 0x10100>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
-				   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU7: cpu@10101 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a57";
-		reg = <0x0 0x10101>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
-				   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU8: cpu@100000000 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a53";
-		reg = <0x1 0x0>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
-				   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU9: cpu@100000001 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a53";
-		reg = <0x1 0x1>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
-				   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU10: cpu@100000100 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a53";
-		reg = <0x1 0x100>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
-				   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU11: cpu@100000101 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a53";
-		reg = <0x1 0x101>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
-				   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU12: cpu@100010000 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a53";
-		reg = <0x1 0x10000>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
-				   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU13: cpu@100010001 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a53";
-		reg = <0x1 0x10001>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
-				   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU14: cpu@100010100 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a53";
-		reg = <0x1 0x10100>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
-				   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU15: cpu@100010101 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a53";
-		reg = <0x1 0x10101>;
-		enable-method = "psci";
-		cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
-				   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
-	};
-
-	idle-states {
-		entry-method = "psci";
-
-		CPU_RETENTION_0_0: cpu-retention-0-0 {
-			compatible = "arm,idle-state";
-			arm,psci-suspend-param = <0x0010000>;
-			entry-latency-us = <20>;
-			exit-latency-us = <40>;
-			min-residency-us = <80>;
-		};
-
-		CLUSTER_RETENTION_0: cluster-retention-0 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			arm,psci-suspend-param = <0x1010000>;
-			entry-latency-us = <50>;
-			exit-latency-us = <100>;
-			min-residency-us = <250>;
-			wakeup-latency-us = <130>;
-		};
-
-		CPU_SLEEP_0_0: cpu-sleep-0-0 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			arm,psci-suspend-param = <0x0010000>;
-			entry-latency-us = <250>;
-			exit-latency-us = <500>;
-			min-residency-us = <950>;
-		};
-
-		CLUSTER_SLEEP_0: cluster-sleep-0 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			arm,psci-suspend-param = <0x1010000>;
-			entry-latency-us = <600>;
-			exit-latency-us = <1100>;
-			min-residency-us = <2700>;
-			wakeup-latency-us = <1500>;
-		};
-
-		CPU_RETENTION_1_0: cpu-retention-1-0 {
-			compatible = "arm,idle-state";
-			arm,psci-suspend-param = <0x0010000>;
-			entry-latency-us = <20>;
-			exit-latency-us = <40>;
-			min-residency-us = <90>;
-		};
-
-		CLUSTER_RETENTION_1: cluster-retention-1 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			arm,psci-suspend-param = <0x1010000>;
-			entry-latency-us = <50>;
-			exit-latency-us = <100>;
-			min-residency-us = <270>;
-			wakeup-latency-us = <100>;
-		};
-
-		CPU_SLEEP_1_0: cpu-sleep-1-0 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			arm,psci-suspend-param = <0x0010000>;
-			entry-latency-us = <70>;
-			exit-latency-us = <100>;
-			min-residency-us = <300>;
-			wakeup-latency-us = <150>;
-		};
-
-		CLUSTER_SLEEP_1: cluster-sleep-1 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			arm,psci-suspend-param = <0x1010000>;
-			entry-latency-us = <500>;
-			exit-latency-us = <1200>;
-			min-residency-us = <3500>;
-			wakeup-latency-us = <1300>;
-		};
-	};
-
-};
-
-Example 2 (ARM 32-bit, 8-cpu system, two clusters):
-
-cpus {
-	#size-cells = <0>;
-	#address-cells = <1>;
-
-	CPU0: cpu@0 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a15";
-		reg = <0x0>;
-		cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU1: cpu@1 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a15";
-		reg = <0x1>;
-		cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU2: cpu@2 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a15";
-		reg = <0x2>;
-		cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU3: cpu@3 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a15";
-		reg = <0x3>;
-		cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>;
-	};
-
-	CPU4: cpu@100 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a7";
-		reg = <0x100>;
-		cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU5: cpu@101 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a7";
-		reg = <0x101>;
-		cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU6: cpu@102 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a7";
-		reg = <0x102>;
-		cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>;
-	};
-
-	CPU7: cpu@103 {
-		device_type = "cpu";
-		compatible = "arm,cortex-a7";
-		reg = <0x103>;
-		cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>;
-	};
-
-	idle-states {
-		CPU_SLEEP_0_0: cpu-sleep-0-0 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			entry-latency-us = <200>;
-			exit-latency-us = <100>;
-			min-residency-us = <400>;
-			wakeup-latency-us = <250>;
-		};
-
-		CLUSTER_SLEEP_0: cluster-sleep-0 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			entry-latency-us = <500>;
-			exit-latency-us = <1500>;
-			min-residency-us = <2500>;
-			wakeup-latency-us = <1700>;
-		};
-
-		CPU_SLEEP_1_0: cpu-sleep-1-0 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			entry-latency-us = <300>;
-			exit-latency-us = <500>;
-			min-residency-us = <900>;
-			wakeup-latency-us = <600>;
-		};
-
-		CLUSTER_SLEEP_1: cluster-sleep-1 {
-			compatible = "arm,idle-state";
-			local-timer-stop;
-			entry-latency-us = <800>;
-			exit-latency-us = <2000>;
-			min-residency-us = <6500>;
-			wakeup-latency-us = <2300>;
-		};
-	};
-
-};
-
-===========================================
-5 - References
-===========================================
-
-[1] ARM Linux Kernel documentation - CPUs bindings
-    Documentation/devicetree/bindings/arm/cpus.yaml
-
-[2] ARM Linux Kernel documentation - PSCI bindings
-    Documentation/devicetree/bindings/arm/psci.yaml
-
-[3] ARM Server Base System Architecture (SBSA)
-    http://infocenter.arm.com/help/index.jsp
-
-[4] ARM Architecture Reference Manuals
-    http://infocenter.arm.com/help/index.jsp
-
-[5] Devicetree Specification
-    https://www.devicetree.org/specifications/
-
-[6] ARM Linux Kernel documentation - Booting AArch64 Linux
-    Documentation/arm64/booting.rst
diff --git a/Documentation/devicetree/bindings/arm/idle-states.yaml b/Documentation/devicetree/bindings/arm/idle-states.yaml
new file mode 100644
index 0000000..ea805c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/idle-states.yaml
@@ -0,0 +1,661 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/idle-states.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM idle states binding description
+
+maintainers:
+  - Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+description: |+
+  ==========================================
+  1 - Introduction
+  ==========================================
+
+  ARM systems contain HW capable of managing power consumption dynamically,
+  where cores can be put in different low-power states (ranging from simple wfi
+  to power gating) according to OS PM policies. The CPU states representing the
+  range of dynamic idle states that a processor can enter at run-time, can be
+  specified through device tree bindings representing the parameters required to
+  enter/exit specific idle states on a given processor.
+
+  According to the Server Base System Architecture document (SBSA, [3]), the
+  power states an ARM CPU can be put into are identified by the following list:
+
+  - Running
+  - Idle_standby
+  - Idle_retention
+  - Sleep
+  - Off
+
+  The power states described in the SBSA document define the basic CPU states on
+  top of which ARM platforms implement power management schemes that allow an OS
+  PM implementation to put the processor in different idle states (which include
+  states listed above; "off" state is not an idle state since it does not have
+  wake-up capabilities, hence it is not considered in this document).
+
+  Idle state parameters (e.g. entry latency) are platform specific and need to
+  be characterized with bindings that provide the required information to OS PM
+  code so that it can build the required tables and use them at runtime.
+
+  The device tree binding definition for ARM idle states is the subject of this
+  document.
+
+  ===========================================
+  2 - idle-states definitions
+  ===========================================
+
+  Idle states are characterized for a specific system through a set of
+  timing and energy related properties, that underline the HW behaviour
+  triggered upon idle states entry and exit.
+
+  The following diagram depicts the CPU execution phases and related timing
+  properties required to enter and exit an idle state:
+
+  ..__[EXEC]__|__[PREP]__|__[ENTRY]__|__[IDLE]__|__[EXIT]__|__[EXEC]__..
+              |          |           |          |          |
+
+              |<------ entry ------->|
+              |       latency        |
+                                                |<- exit ->|
+                                                |  latency |
+              |<-------- min-residency -------->|
+                         |<-------  wakeup-latency ------->|
+
+      Diagram 1: CPU idle state execution phases
+
+  EXEC:  Normal CPU execution.
+
+  PREP:  Preparation phase before committing the hardware to idle mode
+    like cache flushing. This is abortable on pending wake-up
+    event conditions. The abort latency is assumed to be negligible
+    (i.e. less than the ENTRY + EXIT duration). If aborted, CPU
+    goes back to EXEC. This phase is optional. If not abortable,
+    this should be included in the ENTRY phase instead.
+
+  ENTRY:  The hardware is committed to idle mode. This period must run
+    to completion up to IDLE before anything else can happen.
+
+  IDLE:  This is the actual energy-saving idle period. This may last
+    between 0 and infinite time, until a wake-up event occurs.
+
+  EXIT:  Period during which the CPU is brought back to operational
+    mode (EXEC).
+
+  entry-latency: Worst case latency required to enter the idle state. The
+  exit-latency may be guaranteed only after entry-latency has passed.
+
+  min-residency: Minimum period, including preparation and entry, for a given
+  idle state to be worthwhile energywise.
+
+  wakeup-latency: Maximum delay between the signaling of a wake-up event and the
+  CPU being able to execute normal code again. If not specified, this is assumed
+  to be entry-latency + exit-latency.
+
+  These timing parameters can be used by an OS in different circumstances.
+
+  An idle CPU requires the expected min-residency time to select the most
+  appropriate idle state based on the expected expiry time of the next IRQ
+  (i.e. wake-up) that causes the CPU to return to the EXEC phase.
+
+  An operating system scheduler may need to compute the shortest wake-up delay
+  for CPUs in the system by detecting how long will it take to get a CPU out
+  of an idle state, e.g.:
+
+  wakeup-delay = exit-latency + max(entry-latency - (now - entry-timestamp), 0)
+
+  In other words, the scheduler can make its scheduling decision by selecting
+  (e.g. waking-up) the CPU with the shortest wake-up delay.
+  The wake-up delay must take into account the entry latency if that period
+  has not expired. The abortable nature of the PREP period can be ignored
+  if it cannot be relied upon (e.g. the PREP deadline may occur much sooner than
+  the worst case since it depends on the CPU operating conditions, i.e. caches
+  state).
+
+  An OS has to reliably probe the wakeup-latency since some devices can enforce
+  latency constraint guarantees to work properly, so the OS has to detect the
+  worst case wake-up latency it can incur if a CPU is allowed to enter an
+  idle state, and possibly to prevent that to guarantee reliable device
+  functioning.
+
+  The min-residency time parameter deserves further explanation since it is
+  expressed in time units but must factor in energy consumption coefficients.
+
+  The energy consumption of a cpu when it enters a power state can be roughly
+  characterised by the following graph:
+
+                 |
+                 |
+                 |
+             e   |
+             n   |                                      /---
+             e   |                               /------
+             r   |                        /------
+             g   |                  /-----
+             y   |           /------
+                 |       ----
+                 |      /|
+                 |     / |
+                 |    /  |
+                 |   /   |
+                 |  /    |
+                 | /     |
+                 |/      |
+            -----|-------+----------------------------------
+                0|       1                              time(ms)
+
+      Graph 1: Energy vs time example
+
+  The graph is split in two parts delimited by time 1ms on the X-axis.
+  The graph curve with X-axis values = { x | 0 < x < 1ms } has a steep slope
+  and denotes the energy costs incurred while entering and leaving the idle
+  state.
+  The graph curve in the area delimited by X-axis values = {x | x > 1ms } has
+  shallower slope and essentially represents the energy consumption of the idle
+  state.
+
+  min-residency is defined for a given idle state as the minimum expected
+  residency time for a state (inclusive of preparation and entry) after
+  which choosing that state become the most energy efficient option. A good
+  way to visualise this, is by taking the same graph above and comparing some
+  states energy consumptions plots.
+
+  For sake of simplicity, let's consider a system with two idle states IDLE1,
+  and IDLE2:
+
+            |
+            |
+            |
+            |                                                  /-- IDLE1
+         e  |                                              /---
+         n  |                                         /----
+         e  |                                     /---
+         r  |                                /-----/--------- IDLE2
+         g  |                    /-------/---------
+         y  |        ------------    /---|
+            |       /           /----    |
+            |      /        /---         |
+            |     /    /----             |
+            |    / /---                  |
+            |   ---                      |
+            |  /                         |
+            | /                          |
+            |/                           |                  time
+         ---/----------------------------+------------------------
+            |IDLE1-energy < IDLE2-energy | IDLE2-energy < IDLE1-energy
+                                         |
+                                  IDLE2-min-residency
+
+      Graph 2: idle states min-residency example
+
+  In graph 2 above, that takes into account idle states entry/exit energy
+  costs, it is clear that if the idle state residency time (i.e. time till next
+  wake-up IRQ) is less than IDLE2-min-residency, IDLE1 is the better idle state
+  choice energywise.
+
+  This is mainly down to the fact that IDLE1 entry/exit energy costs are lower
+  than IDLE2.
+
+  However, the lower power consumption (i.e. shallower energy curve slope) of
+  idle state IDLE2 implies that after a suitable time, IDLE2 becomes more energy
+  efficient.
+
+  The time at which IDLE2 becomes more energy efficient than IDLE1 (and other
+  shallower states in a system with multiple idle states) is defined
+  IDLE2-min-residency and corresponds to the time when energy consumption of
+  IDLE1 and IDLE2 states breaks even.
+
+  The definitions provided in this section underpin the idle states
+  properties specification that is the subject of the following sections.
+
+  ===========================================
+  3 - idle-states node
+  ===========================================
+
+  ARM processor idle states are defined within the idle-states node, which is
+  a direct child of the cpus node [1] and provides a container where the
+  processor idle states, defined as device tree nodes, are listed.
+
+  On ARM systems, it is a container of processor idle states nodes. If the
+  system does not provide CPU power management capabilities, or the processor
+  just supports idle_standby, an idle-states node is not required.
+
+  ===========================================
+  4 - References
+  ===========================================
+
+  [1] ARM Linux Kernel documentation - CPUs bindings
+      Documentation/devicetree/bindings/arm/cpus.yaml
+
+  [2] ARM Linux Kernel documentation - PSCI bindings
+      Documentation/devicetree/bindings/arm/psci.yaml
+
+  [3] ARM Server Base System Architecture (SBSA)
+      http://infocenter.arm.com/help/index.jsp
+
+  [4] ARM Architecture Reference Manuals
+      http://infocenter.arm.com/help/index.jsp
+
+  [6] ARM Linux Kernel documentation - Booting AArch64 Linux
+      Documentation/arm64/booting.rst
+
+properties:
+  $nodename:
+    const: idle-states
+
+  entry-method:
+    description: |
+      Usage and definition depend on ARM architecture version.
+
+      On ARM v8 64-bit this property is required.
+      On ARM 32-bit systems this property is optional
+
+      This assumes that the "enable-method" property is set to "psci" in the cpu
+      node[6] that is responsible for setting up CPU idle management in the OS
+      implementation.
+    const: psci
+
+patternProperties:
+  "^(cpu|cluster)-":
+    type: object
+    description: |
+      Each state node represents an idle state description and must be defined
+      as follows.
+
+      The idle state entered by executing the wfi instruction (idle_standby
+      SBSA,[3][4]) is considered standard on all ARM platforms and therefore
+      must not be listed.
+
+      In addition to the properties listed above, a state node may require
+      additional properties specific to the entry-method defined in the
+      idle-states node. Please refer to the entry-method bindings
+      documentation for properties definitions.
+
+    properties:
+      compatible:
+        const: arm,idle-state
+
+      local-timer-stop:
+        description:
+          If present the CPU local timer control logic is
+             lost on state entry, otherwise it is retained.
+        type: boolean
+
+      entry-latency-us:
+        description:
+          Worst case latency in microseconds required to enter the idle state.
+
+      exit-latency-us:
+        description:
+          Worst case latency in microseconds required to exit the idle state.
+          The exit-latency-us duration may be guaranteed only after
+          entry-latency-us has passed.
+
+      min-residency-us:
+        description:
+          Minimum residency duration in microseconds, inclusive of preparation
+          and entry, for this idle state to be considered worthwhile energy wise
+          (refer to section 2 of this document for a complete description).
+
+      wakeup-latency-us:
+        description: |
+          Maximum delay between the signaling of a wake-up event and the CPU
+          being able to execute normal code again. If omitted, this is assumed
+          to be equal to:
+
+            entry-latency-us + exit-latency-us
+
+          It is important to supply this value on systems where the duration of
+          PREP phase (see diagram 1, section 2) is non-neglibigle. In such
+          systems entry-latency-us + exit-latency-us will exceed
+          wakeup-latency-us by this duration.
+
+      idle-state-name:
+        $ref: /schemas/types.yaml#definitions/string
+        description:
+          A string used as a descriptive name for the idle state.
+
+    required:
+      - compatible
+      - entry-latency-us
+      - exit-latency-us
+      - min-residency-us
+
+additionalProperties: false
+
+examples:
+  - |
+
+    cpus {
+        #size-cells = <0>;
+        #address-cells = <2>;
+
+        cpu@0 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x0>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
+                   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
+        };
+
+        cpu@1 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x1>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
+                   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
+        };
+
+        cpu@100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x100>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
+                   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
+        };
+
+        cpu@101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x101>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
+                   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
+        };
+
+        cpu@10000 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x10000>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
+                   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
+        };
+
+        cpu@10001 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x10001>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
+                   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
+        };
+
+        cpu@10100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x10100>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
+                   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
+        };
+
+        cpu@10101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a57";
+            reg = <0x0 0x10101>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
+                   &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
+        };
+
+        cpu@100000000 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x0>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
+                   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100000001 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x1>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
+                   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100000100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x100>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
+                   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100000101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x101>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
+                   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100010000 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x10000>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
+                   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100010001 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x10001>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
+                   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100010100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x10100>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
+                   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
+        };
+
+        cpu@100010101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a53";
+            reg = <0x1 0x10101>;
+            enable-method = "psci";
+            cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
+                   &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
+        };
+
+        idle-states {
+            entry-method = "psci";
+
+            CPU_RETENTION_0_0: cpu-retention-0-0 {
+                compatible = "arm,idle-state";
+                arm,psci-suspend-param = <0x0010000>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <80>;
+            };
+
+            CLUSTER_RETENTION_0: cluster-retention-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x1010000>;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <250>;
+                wakeup-latency-us = <130>;
+            };
+
+            CPU_SLEEP_0_0: cpu-sleep-0-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x0010000>;
+                entry-latency-us = <250>;
+                exit-latency-us = <500>;
+                min-residency-us = <950>;
+            };
+
+            CLUSTER_SLEEP_0: cluster-sleep-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x1010000>;
+                entry-latency-us = <600>;
+                exit-latency-us = <1100>;
+                min-residency-us = <2700>;
+                wakeup-latency-us = <1500>;
+            };
+
+            CPU_RETENTION_1_0: cpu-retention-1-0 {
+                compatible = "arm,idle-state";
+                arm,psci-suspend-param = <0x0010000>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <90>;
+            };
+
+            CLUSTER_RETENTION_1: cluster-retention-1 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x1010000>;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <270>;
+                wakeup-latency-us = <100>;
+            };
+
+            CPU_SLEEP_1_0: cpu-sleep-1-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x0010000>;
+                entry-latency-us = <70>;
+                exit-latency-us = <100>;
+                min-residency-us = <300>;
+                wakeup-latency-us = <150>;
+            };
+
+            CLUSTER_SLEEP_1: cluster-sleep-1 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                arm,psci-suspend-param = <0x1010000>;
+                entry-latency-us = <500>;
+                exit-latency-us = <1200>;
+                min-residency-us = <3500>;
+                wakeup-latency-us = <1300>;
+            };
+        };
+    };
+
+  - |
+    // Example 2 (ARM 32-bit, 8-cpu system, two clusters):
+
+    cpus {
+        #size-cells = <0>;
+        #address-cells = <1>;
+
+        cpu@0 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a15";
+            reg = <0x0>;
+            cpu-idle-states = <&cpu_sleep_0_0 &cluster_sleep_0>;
+        };
+
+        cpu@1 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a15";
+            reg = <0x1>;
+            cpu-idle-states = <&cpu_sleep_0_0 &cluster_sleep_0>;
+        };
+
+        cpu@2 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a15";
+            reg = <0x2>;
+            cpu-idle-states = <&cpu_sleep_0_0 &cluster_sleep_0>;
+        };
+
+        cpu@3 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a15";
+            reg = <0x3>;
+            cpu-idle-states = <&cpu_sleep_0_0 &cluster_sleep_0>;
+        };
+
+        cpu@100 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a7";
+            reg = <0x100>;
+            cpu-idle-states = <&cpu_sleep_1_0 &cluster_sleep_1>;
+        };
+
+        cpu@101 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a7";
+            reg = <0x101>;
+            cpu-idle-states = <&cpu_sleep_1_0 &cluster_sleep_1>;
+        };
+
+        cpu@102 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a7";
+            reg = <0x102>;
+            cpu-idle-states = <&cpu_sleep_1_0 &cluster_sleep_1>;
+        };
+
+        cpu@103 {
+            device_type = "cpu";
+            compatible = "arm,cortex-a7";
+            reg = <0x103>;
+            cpu-idle-states = <&cpu_sleep_1_0 &cluster_sleep_1>;
+        };
+
+        idle-states {
+            cpu_sleep_0_0: cpu-sleep-0-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                entry-latency-us = <200>;
+                exit-latency-us = <100>;
+                min-residency-us = <400>;
+                wakeup-latency-us = <250>;
+            };
+
+            cluster_sleep_0: cluster-sleep-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                entry-latency-us = <500>;
+                exit-latency-us = <1500>;
+                min-residency-us = <2500>;
+                wakeup-latency-us = <1700>;
+            };
+
+            cpu_sleep_1_0: cpu-sleep-1-0 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                entry-latency-us = <300>;
+                exit-latency-us = <500>;
+                min-residency-us = <900>;
+                wakeup-latency-us = <600>;
+            };
+
+            cluster_sleep_1: cluster-sleep-1 {
+                compatible = "arm,idle-state";
+                local-timer-stop;
+                entry-latency-us = <800>;
+                exit-latency-us = <2000>;
+                min-residency-us = <6500>;
+                wakeup-latency-us = <2300>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
index 5587490..79902f4 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
@@ -47,7 +47,7 @@
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-    cache-controller@1100000 {
+    system-cache-controller@1100000 {
       compatible = "qcom,sdm845-llcc";
       reg = <0x1100000 0x200000>, <0x1300000 0x50000> ;
       reg-names = "llcc_base", "llcc_broadcast_base";
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index 7abdf58b3..8ef8542 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -102,6 +102,34 @@
       [1] Kernel documentation - ARM idle states bindings
         Documentation/devicetree/bindings/arm/idle-states.txt
 
+  "#power-domain-cells":
+    description:
+      The number of cells in a PM domain specifier as per binding in [3].
+      Must be 0 as to represent a single PM domain.
+
+      ARM systems can have multiple cores, sometimes in an hierarchical
+      arrangement. This often, but not always, maps directly to the processor
+      power topology of the system. Individual nodes in a topology have their
+      own specific power states and can be better represented hierarchically.
+
+      For these cases, the definitions of the idle states for the CPUs and the
+      CPU topology, must conform to the binding in [3]. The idle states
+      themselves must conform to the binding in [4] and must specify the
+      arm,psci-suspend-param property.
+
+      It should also be noted that, in PSCI firmware v1.0 the OS-Initiated
+      (OSI) CPU suspend mode is introduced. Using a hierarchical representation
+      helps to implement support for OSI mode and OS implementations may choose
+      to mandate it.
+
+      [3] Documentation/devicetree/bindings/power/power_domain.txt
+      [4] Documentation/devicetree/bindings/power/domain-idle-state.txt
+
+  power-domains:
+    $ref: '/schemas/types.yaml#/definitions/phandle-array'
+    description:
+      List of phandles and PM domain specifiers, as defined by bindings of the
+      PM domain provider.
 
 required:
   - compatible
@@ -160,4 +188,80 @@
       cpu_on = <0x95c10002>;
       cpu_off = <0x95c10001>;
     };
+
+  - |+
+
+    // Case 4: CPUs and CPU idle states described using the hierarchical model.
+
+    cpus {
+      #size-cells = <0>;
+      #address-cells = <1>;
+
+      CPU0: cpu@0 {
+        device_type = "cpu";
+        compatible = "arm,cortex-a53", "arm,armv8";
+        reg = <0x0>;
+        enable-method = "psci";
+        power-domains = <&CPU_PD0>;
+        power-domain-names = "psci";
+      };
+
+      CPU1: cpu@1 {
+        device_type = "cpu";
+        compatible = "arm,cortex-a57", "arm,armv8";
+        reg = <0x100>;
+        enable-method = "psci";
+        power-domains = <&CPU_PD1>;
+        power-domain-names = "psci";
+      };
+
+      idle-states {
+
+        CPU_PWRDN: cpu-power-down {
+          compatible = "arm,idle-state";
+          arm,psci-suspend-param = <0x0000001>;
+          entry-latency-us = <10>;
+          exit-latency-us = <10>;
+          min-residency-us = <100>;
+        };
+
+        CLUSTER_RET: cluster-retention {
+          compatible = "domain-idle-state";
+          arm,psci-suspend-param = <0x1000011>;
+          entry-latency-us = <500>;
+          exit-latency-us = <500>;
+          min-residency-us = <2000>;
+        };
+
+        CLUSTER_PWRDN: cluster-power-down {
+          compatible = "domain-idle-state";
+          arm,psci-suspend-param = <0x1000031>;
+          entry-latency-us = <2000>;
+          exit-latency-us = <2000>;
+          min-residency-us = <6000>;
+        };
+      };
+    };
+
+    psci {
+      compatible = "arm,psci-1.0";
+      method = "smc";
+
+      CPU_PD0: cpu-pd0 {
+        #power-domain-cells = <0>;
+        domain-idle-states = <&CPU_PWRDN>;
+        power-domains = <&CLUSTER_PD>;
+      };
+
+      CPU_PD1: cpu-pd1 {
+        #power-domain-cells = <0>;
+        domain-idle-states =  <&CPU_PWRDN>;
+        power-domains = <&CLUSTER_PD>;
+      };
+
+      CLUSTER_PD: cluster-pd {
+        #power-domain-cells = <0>;
+        domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>;
+      };
+    };
 ...
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index e39d8f0..5976c0b 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/arm/qcom.yaml#
+$id: http://devicetree.org/schemas/arm/qcom.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: QCOM device tree bindings
@@ -24,28 +24,30 @@
 
   The 'SoC' element must be one of the following strings:
 
-  	apq8016
-  	apq8074
-  	apq8084
-  	apq8096
-  	msm8916
-  	msm8974
-  	msm8992
-  	msm8994
-  	msm8996
-  	mdm9615
-  	ipq8074
-  	sdm845
+        apq8016
+        apq8074
+        apq8084
+        apq8096
+        ipq8074
+        mdm9615
+        msm8916
+        msm8974
+        msm8992
+        msm8994
+        msm8996
+        sc7180
+        sdm845
 
   The 'board' element must be one of the following strings:
 
-  	cdp
-  	liquid
-  	dragonboard
-  	mtp
-  	sbc
-  	hk01
-  	qrd
+        cdp
+        dragonboard
+        hk01
+        idp
+        liquid
+        mtp
+        qrd
+        sbc
 
   The 'soc_version' and 'board_version' elements take the form of v<Major>.<Minor>
   where the minor number may be omitted when it's zero, i.e.  v1.0 is the same
@@ -144,4 +146,8 @@
               - qcom,ipq8074-hk01
           - const: qcom,ipq8074
 
+      - items:
+          - enum:
+              - qcom,sc7180-idp
+          - const: qcom,sc7180
 ...
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index d9847b3..874b0ea 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -409,6 +409,9 @@
 
       - description: Pine64 RockPro64
         items:
+          - enum:
+              - pine64,rockpro64-v2.1
+              - pine64,rockpro64-v2.0
           - const: pine64,rockpro64
           - const: rockchip,rk3399
 
@@ -422,6 +425,12 @@
           - const: radxa,rockpi4
           - const: rockchip,rk3399
 
+      - description: Radxa ROCK Pi N10
+        items:
+          - const: radxa,rockpi-n10
+          - const: vamrs,rk3399pro-vmarc-som
+          - const: rockchip,rk3399pro
+
       - description: Radxa Rock2 Square
         items:
           - const: radxa,rock2-square
diff --git a/Documentation/devicetree/bindings/arm/sprd.yaml b/Documentation/devicetree/bindings/arm/sprd.yaml
deleted file mode 100644
index c35fb84..0000000
--- a/Documentation/devicetree/bindings/arm/sprd.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-# Copyright 2019 Unisoc Inc.
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/arm/sprd.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Unisoc platforms device tree bindings
-
-maintainers:
-  - Orson Zhai <orsonzhai@gmail.com>
-  - Baolin Wang <baolin.wang7@gmail.com>
-  - Chunyan Zhang <zhang.lyra@gmail.com>
-
-properties:
-  $nodename:
-    const: '/'
-  compatible:
-    oneOf:
-      - items:
-          - enum:
-              - sprd,sc9836-openphone
-          - const: sprd,sc9836
-      - items:
-          - enum:
-              - sprd,sp9860g-1h10
-          - const: sprd,sc9860
-      - items:
-          - enum:
-              - sprd,sp9863a-1h10
-          - const: sprd,sc9863a
-
-...
diff --git a/Documentation/devicetree/bindings/arm/sprd/sprd.yaml b/Documentation/devicetree/bindings/arm/sprd/sprd.yaml
new file mode 100644
index 0000000..0258a96b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/sprd/sprd.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2019 Unisoc Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/sprd/sprd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Unisoc platforms device tree bindings
+
+maintainers:
+  - Orson Zhai <orsonzhai@gmail.com>
+  - Baolin Wang <baolin.wang7@gmail.com>
+  - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+  $nodename:
+    const: '/'
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - sprd,sc9836-openphone
+          - const: sprd,sc9836
+      - items:
+          - enum:
+              - sprd,sp9860g-1h10
+          - const: sprd,sc9860
+      - items:
+          - enum:
+              - sprd,sp9863a-1h10
+          - const: sprd,sc9863a
+
+...
diff --git a/Documentation/devicetree/bindings/arm/stm32/mlahb.txt b/Documentation/devicetree/bindings/arm/stm32/mlahb.txt
deleted file mode 100644
index 25307aa..0000000
--- a/Documentation/devicetree/bindings/arm/stm32/mlahb.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-ML-AHB interconnect bindings
-
-These bindings describe the STM32 SoCs ML-AHB interconnect bus which connects
-a Cortex-M subsystem with dedicated memories.
-The MCU SRAM and RETRAM memory parts can be accessed through different addresses
-(see "RAM aliases" in [1]) using different buses (see [2]) : balancing the
-Cortex-M firmware accesses among those ports allows to tune the system
-performance.
-
-[1]: https://www.st.com/resource/en/reference_manual/dm00327659.pdf
-[2]: https://wiki.st.com/stm32mpu/wiki/STM32MP15_RAM_mapping
-
-Required properties:
-- compatible: should be "simple-bus"
-- dma-ranges: describes memory addresses translation between the local CPU and
-	   the remote Cortex-M processor. Each memory region, is declared with
-	   3 parameters:
-		 - param 1: device base address (Cortex-M processor address)
-		 - param 2: physical base address (local CPU address)
-		 - param 3: size of the memory region.
-
-The Cortex-M remote processor accessed via the mlahb interconnect is described
-by a child node.
-
-Example:
-mlahb {
-	compatible = "simple-bus";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	dma-ranges = <0x00000000 0x38000000 0x10000>,
-		     <0x10000000 0x10000000 0x60000>,
-		     <0x30000000 0x30000000 0x60000>;
-
-	m4_rproc: m4@10000000 {
-		...
-	};
-};
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
new file mode 100644
index 0000000..68917bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/stm32/st,mlahb.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: STMicroelectronics STM32 ML-AHB interconnect bindings
+
+maintainers:
+  - Fabien Dessenne <fabien.dessenne@st.com>
+  - Arnaud Pouliquen <arnaud.pouliquen@st.com>
+
+description: |
+  These bindings describe the STM32 SoCs ML-AHB interconnect bus which connects
+  a Cortex-M subsystem with dedicated memories. The MCU SRAM and RETRAM memory
+  parts can be accessed through different addresses (see "RAM aliases" in [1])
+  using different buses (see [2]): balancing the Cortex-M firmware accesses
+  among those ports allows to tune the system performance.
+  [1]: https://www.st.com/resource/en/reference_manual/dm00327659.pdf
+  [2]: https://wiki.st.com/stm32mpu/wiki/STM32MP15_RAM_mapping
+
+allOf:
+ - $ref: /schemas/simple-bus.yaml#
+
+properties:
+  compatible:
+    contains:
+      enum:
+        - st,mlahb
+
+  dma-ranges:
+    description: |
+      Describe memory addresses translation between the local CPU and the
+      remote Cortex-M processor. Each memory region, is declared with
+      3 parameters:
+      - param 1: device base address (Cortex-M processor address)
+      - param 2: physical base address (local CPU address)
+      - param 3: size of the memory region.
+    maxItems: 3
+
+  '#address-cells':
+    const: 1
+
+  '#size-cells':
+    const: 1
+
+required:
+  - compatible
+  - '#address-cells'
+  - '#size-cells'
+  - dma-ranges
+
+examples:
+  - |
+    mlahb: ahb {
+      compatible = "st,mlahb", "simple-bus";
+      #address-cells = <1>;
+      #size-cells = <1>;
+      reg = <0x10000000 0x40000>;
+      ranges;
+      dma-ranges = <0x00000000 0x38000000 0x10000>,
+                   <0x10000000 0x10000000 0x60000>,
+                   <0x30000000 0x30000000 0x60000>;
+
+      m4_rproc: m4@10000000 {
+       reg = <0x10000000 0x40000>;
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
new file mode 100644
index 0000000..0dedf94
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/stm32/st,stm32-syscon.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: STMicroelectronics STM32 Platforms System Controller bindings
+
+maintainers:
+  - Alexandre Torgue <alexandre.torgue@st.com>
+  - Christophe Roullier <christophe.roullier@st.com>
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+        - enum:
+          - st,stm32mp157-syscfg
+        - const: syscon
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+
+examples:
+  - |
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    syscfg: syscon@50020000 {
+        compatible = "st,stm32mp157-syscfg", "syscon";
+        reg = <0x50020000 0x400>;
+        clocks = <&rcc SYSCFG>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32-syscon.txt b/Documentation/devicetree/bindings/arm/stm32/stm32-syscon.txt
deleted file mode 100644
index c92d411..0000000
--- a/Documentation/devicetree/bindings/arm/stm32/stm32-syscon.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-STMicroelectronics STM32 Platforms System Controller
-
-Properties:
-   - compatible : should contain two values. First value must be :
-                 - " st,stm32mp157-syscfg " - for stm32mp157 based SoCs,
-                 second value must be always "syscon".
-   - reg : offset and length of the register set.
-   - clocks: phandle to the syscfg clock
-
- Example:
-         syscfg: syscon@50020000 {
-                 compatible = "st,stm32mp157-syscfg", "syscon";
-                 reg = <0x50020000 0x400>;
-                 clocks = <&rcc SYSCFG>;
-         };
-
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index cffe8bb..327ce67 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -342,6 +342,16 @@
           - const: libretech,all-h3-cc-h5
           - const: allwinner,sun50i-h5
 
+      - description: Libre Computer Board ALL-H3-IT H5
+        items:
+          - const: libretech,all-h3-it-h5
+          - const: allwinner,sun50i-h5
+
+      - description: Libre Computer Board ALL-H5-CC H5
+        items:
+          - const: libretech,all-h5-cc-h5
+          - const: allwinner,sun50i-h5
+
       - description: Lichee Pi One
         items:
           - const: licheepi,licheepi-one
@@ -470,6 +480,12 @@
           - const: emlid,neutis-n5
           - const: allwinner,sun50i-h5
 
+      - description: Emlid Neutis N5H3 Developper Board
+        items:
+          - const: emlid,neutis-n5h3-devboard
+          - const: emlid,neutis-n5h3
+          - const: allwinner,sun8i-h3
+
       - description: NextThing Co. CHIP
         items:
           - const: nextthing,chip
@@ -599,11 +615,16 @@
           - const: pine64,pine64-plus
           - const: allwinner,sun50i-a64
 
-      - description: Pine64 PineH64
+      - description: Pine64 PineH64 model A
         items:
           - const: pine64,pine-h64
           - const: allwinner,sun50i-h6
 
+      - description: Pine64 PineH64 model B
+        items:
+          - const: pine64,pine-h64-model-b
+          - const: allwinner,sun50i-h6
+
       - description: Pine64 LTS
         items:
           - const: pine64,pine64-lts
diff --git a/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml b/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml
new file mode 100644
index 0000000..9370e64
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/sunxi/allwinner,sun4i-a10-mbus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner Memory Bus (MBUS) controller
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  The MBUS controller drives the MBUS that other devices in the SoC
+  will use to perform DMA. It also has a register interface that
+  allows to monitor and control the bandwidth and priorities for
+  masters on that bus.
+
+  Each device having to perform their DMA through the MBUS must have
+  the interconnects and interconnect-names properties set to the MBUS
+  controller and with "dma-mem" as the interconnect name.
+
+properties:
+  "#interconnect-cells":
+    const: 1
+    description:
+      The content of the cell is the MBUS ID.
+
+  compatible:
+    enum:
+      - allwinner,sun5i-a13-mbus
+      - allwinner,sun8i-h3-mbus
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  dma-ranges:
+    description:
+      See section 2.3.9 of the DeviceTree Specification.
+
+required:
+  - "#interconnect-cells"
+  - compatible
+  - reg
+  - clocks
+  - dma-ranges
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun5i-ccu.h>
+
+    mbus: dram-controller@1c01000 {
+        compatible = "allwinner,sun5i-a13-mbus";
+        reg = <0x01c01000 0x1000>;
+        clocks = <&ccu CLK_MBUS>;
+        dma-ranges = <0x00000000 0x40000000 0x20000000>;
+        #interconnect-cells = <1>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/arm/sunxi/sunxi-mbus.txt b/Documentation/devicetree/bindings/arm/sunxi/sunxi-mbus.txt
deleted file mode 100644
index 2005bb4..0000000
--- a/Documentation/devicetree/bindings/arm/sunxi/sunxi-mbus.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-Allwinner Memory Bus (MBUS) controller
-
-The MBUS controller drives the MBUS that other devices in the SoC will
-use to perform DMA. It also has a register interface that allows to
-monitor and control the bandwidth and priorities for masters on that
-bus.
-
-Required properties:
- - compatible: Must be one of:
-	- allwinner,sun5i-a13-mbus
-	- allwinner,sun8i-h3-mbus
- - reg: Offset and length of the register set for the controller
- - clocks: phandle to the clock driving the controller
- - dma-ranges: See section 2.3.9 of the DeviceTree Specification
- - #interconnect-cells: Must be one, with the argument being the MBUS
-   port ID
-
-Each device having to perform their DMA through the MBUS must have the
-interconnects and interconnect-names properties set to the MBUS
-controller and with "dma-mem" as the interconnect name.
-
-Example:
-
-mbus: dram-controller@1c01000 {
-	compatible = "allwinner,sun5i-a13-mbus";
-	reg = <0x01c01000 0x1000>;
-	clocks = <&ccu CLK_MBUS>;
-	dma-ranges = <0x00000000 0x40000000 0x20000000>;
-	#interconnect-cells = <1>;
-};
-
-fe0: display-frontend@1e00000 {
-	compatible = "allwinner,sun5i-a13-display-frontend";
-	...
-	interconnects = <&mbus 19>;
-	interconnect-names = "dma-mem";
-};
diff --git a/Documentation/devicetree/bindings/arm/ux500.yaml b/Documentation/devicetree/bindings/arm/ux500.yaml
new file mode 100644
index 0000000..accaee9
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/ux500.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/ux500.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ux500 platforms device tree bindings
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+properties:
+  $nodename:
+    const: '/'
+  compatible:
+    oneOf:
+
+      - description: ST-Ericsson HREF (pre-v60)
+        items:
+          - const: st-ericsson,mop500
+          - const: st-ericsson,u8500
+
+      - description: ST-Ericsson HREF (v60+)
+        items:
+          - const: st-ericsson,hrefv60+
+          - const: st-ericsson,u8500
+
+      - description: Calao Systems Snowball
+        items:
+          - const: calaosystems,snowball-a9500
+          - const: st-ericsson,u9500
+
+      - description: Samsung Galaxy S III mini (GT-I8190)
+        items:
+          - const: samsung,golden
+          - const: st-ericsson,u8500
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 55c6fab..77091a2 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -9,8 +9,6 @@
 
 Required properties:
 - compatible        : compatible string, one of:
-  - "allwinner,sun4i-a10-ahci"
-  - "allwinner,sun8i-r40-ahci"
   - "brcm,iproc-ahci"
   - "hisilicon,hisi-ahci"
   - "cavium,octeon-7130-ahci"
@@ -45,8 +43,6 @@
 - #address-cells    : number of cells to encode an address
 - #size-cells       : number of cells representing the size of an address
 
-For allwinner,sun8i-r40-ahci, the reset property must be present.
-
 Sub-nodes required properties:
 - reg		    : the port number
 And at least one of the following properties:
@@ -60,14 +56,6 @@
 		interrupts = <115>;
         };
 
-	ahci: sata@1c18000 {
-		compatible = "allwinner,sun4i-a10-ahci";
-		reg = <0x01c18000 0x1000>;
-		interrupts = <56>;
-		clocks = <&pll6 0>, <&ahb_gates 25>;
-		target-supply = <&reg_ahci_5v>;
-	};
-
 With sub-nodes:
 	sata@f7e90000 {
 		compatible = "marvell,berlin2q-achi", "generic-ahci";
diff --git a/Documentation/devicetree/bindings/ata/allwinner,sun4i-a10-ahci.yaml b/Documentation/devicetree/bindings/ata/allwinner,sun4i-a10-ahci.yaml
new file mode 100644
index 0000000..cb530b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/allwinner,sun4i-a10-ahci.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/allwinner,sun4i-a10-ahci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 AHCI SATA Controller bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    const: allwinner,sun4i-a10-ahci
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: AHCI Bus Clock
+      - description: AHCI Module Clock
+
+  interrupts:
+    maxItems: 1
+
+  target-supply:
+    description: Regulator for SATA target power
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+
+additionalProperties: false
+
+examples:
+  - |
+    ahci: sata@1c18000 {
+        compatible = "allwinner,sun4i-a10-ahci";
+        reg = <0x01c18000 0x1000>;
+        interrupts = <56>;
+        clocks = <&pll6 0>, <&ahb_gates 25>;
+        target-supply = <&reg_ahci_5v>;
+    };
diff --git a/Documentation/devicetree/bindings/ata/allwinner,sun8i-r40-ahci.yaml b/Documentation/devicetree/bindings/ata/allwinner,sun8i-r40-ahci.yaml
new file mode 100644
index 0000000..e6b42a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/allwinner,sun8i-r40-ahci.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/allwinner,sun8i-r40-ahci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner R40 AHCI SATA Controller bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    const: allwinner,sun8i-r40-ahci
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: AHCI Bus Clock
+      - description: AHCI Module Clock
+
+  interrupts:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  reset-names:
+    const: ahci
+
+  ahci-supply:
+    description: Regulator for the AHCI controller
+
+  phy-supply:
+    description: Regulator for the SATA PHY power
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/sun8i-r40-ccu.h>
+    #include <dt-bindings/reset/sun8i-r40-ccu.h>
+
+    ahci: sata@1c18000 {
+        compatible = "allwinner,sun8i-r40-ahci";
+        reg = <0x01c18000 0x1000>;
+        interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>;
+        resets = <&ccu RST_BUS_SATA>;
+        reset-names = "ahci";
+        ahci-supply = <&reg_dldo4>;
+        phy-supply = <&reg_eldo3>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/ata/faraday,ftide010.txt b/Documentation/devicetree/bindings/ata/faraday,ftide010.txt
deleted file mode 100644
index a0c64a2..0000000
--- a/Documentation/devicetree/bindings/ata/faraday,ftide010.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-* Faraday Technology FTIDE010 PATA controller
-
-This controller is the first Faraday IDE interface block, used in the
-StorLink SL2312 and SL3516, later known as the Cortina Systems Gemini
-platform. The controller can do PIO modes 0 through 4, Multi-word DMA
-(MWDM)modes 0 through 2 and Ultra DMA modes 0 through 6.
-
-On the Gemini platform, this PATA block is accompanied by a PATA to
-SATA bridge in order to support SATA. This is why a phandle to that
-controller is compulsory on that platform.
-
-The timing properties are unique per-SoC, not per-board.
-
-Required properties:
-- compatible: should be one of
-  "cortina,gemini-pata", "faraday,ftide010"
-  "faraday,ftide010"
-- interrupts: interrupt for the block
-- reg: registers and size for the block
-
-Optional properties:
-- clocks: a SoC clock running the peripheral.
-- clock-names: should be set to "PCLK" for the peripheral clock.
-
-Required properties for "cortina,gemini-pata" compatible:
-- sata: a phande to the Gemini PATA to SATA bridge, see
-  cortina,gemini-sata-bridge.txt for details.
-
-Example:
-
-ata@63000000 {
-	compatible = "cortina,gemini-pata", "faraday,ftide010";
-	reg = <0x63000000 0x100>;
-	interrupts = <4 IRQ_TYPE_EDGE_RISING>;
-	clocks = <&gcc GEMINI_CLK_GATE_IDE>;
-	clock-names = "PCLK";
-	sata = <&sata>;
-};
diff --git a/Documentation/devicetree/bindings/ata/faraday,ftide010.yaml b/Documentation/devicetree/bindings/ata/faraday,ftide010.yaml
new file mode 100644
index 0000000..bfc6357
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/faraday,ftide010.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/faraday,ftide010.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Faraday Technology FTIDE010 PATA controller
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  This controller is the first Faraday IDE interface block, used in the
+  StorLink SL3512 and SL3516, later known as the Cortina Systems Gemini
+  platform. The controller can do PIO modes 0 through 4, Multi-word DMA
+  (MWDM) modes 0 through 2 and Ultra DMA modes 0 through 6.
+
+  On the Gemini platform, this PATA block is accompanied by a PATA to
+  SATA bridge in order to support SATA. This is why a phandle to that
+  controller is compulsory on that platform.
+
+  The timing properties are unique per-SoC, not per-board.
+
+properties:
+  compatible:
+    oneOf:
+      - const: faraday,ftide010
+      - items:
+        - const: cortina,gemini-pata
+        - const: faraday,ftide010
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+
+  clock-names:
+    const: PCLK
+
+  sata:
+    description:
+      phandle to the Gemini PATA to SATA bridge, if available
+    $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+allOf:
+  - $ref: pata-common.yaml#
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: cortina,gemini-pata
+
+    then:
+      required:
+        - sata
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/clock/cortina,gemini-clock.h>
+
+    ide@63000000 {
+      compatible = "cortina,gemini-pata", "faraday,ftide010";
+      reg = <0x63000000 0x100>;
+      interrupts = <4 IRQ_TYPE_EDGE_RISING>;
+      clocks = <&gcc GEMINI_CLK_GATE_IDE>;
+      clock-names = "PCLK";
+      sata = <&sata>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+      ide-port@0 {
+        reg = <0>;
+      };
+      ide-port@1 {
+        reg = <1>;
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/ata/pata-common.yaml b/Documentation/devicetree/bindings/ata/pata-common.yaml
new file mode 100644
index 0000000..fc5ebbe
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/pata-common.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/pata-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common Properties for Parallel AT attachment (PATA) controllers
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  This document defines device tree properties common to most Parallel
+  ATA (PATA, also known as IDE) AT attachment storage devices.
+  It doesn't constitue a device tree binding specification by itself but is
+  meant to be referenced by device tree bindings.
+
+  The PATA (IDE) controller-specific device tree bindings are responsible for
+  defining whether each property is required or optional.
+
+properties:
+  $nodename:
+    pattern: "^ide(@.*)?$"
+    description:
+      Specifies the host controller node. PATA host controller nodes are named
+      "ide".
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+patternProperties:
+  "^ide-port@[0-1]$":
+    description: |
+      DT nodes for ports connected on the PATA host. The master drive will have
+      ID number 0 and the slave drive will have ID number 1. The PATA port
+      nodes will be named "ide-port".
+    type: object
+
+    properties:
+      reg:
+        minimum: 0
+        maximum: 1
+        description:
+          The ID number of the drive port, 0 for the master port and 1 for the
+          slave port.
+
+...
diff --git a/Documentation/devicetree/bindings/ata/sata-common.yaml b/Documentation/devicetree/bindings/ata/sata-common.yaml
new file mode 100644
index 0000000..6783a4d
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/sata-common.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/sata-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common Properties for Serial AT attachment (SATA) controllers
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  This document defines device tree properties common to most Serial
+  AT attachment (SATA) storage devices. It doesn't constitute a device tree
+  binding specification by itself but is meant to be referenced by device
+  tree bindings.
+
+  The SATA controller-specific device tree bindings are responsible for
+  defining whether each property is required or optional.
+
+properties:
+  $nodename:
+    pattern: "^sata(@.*)?$"
+    description:
+      Specifies the host controller node. SATA host controller nodes are named
+      "sata"
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+patternProperties:
+  "^sata-port@[0-9a-e]$":
+    description: |
+      DT nodes for ports connected on the SATA host. The SATA port
+      nodes will be named "sata-port".
+    type: object
+
+    properties:
+      reg:
+        minimum: 0
+        maximum: 14
+        description:
+          The ID number of the drive port SATA can potentially use a port
+          multiplier making it possible to connect up to 15 disks to a single
+          SATA port.
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ahb-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ahb-clk.yaml
new file mode 100644
index 0000000..558db4b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ahb-clk.yaml
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-ahb-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 AHB Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-ahb-clk
+      - allwinner,sun6i-a31-ahb1-clk
+      - allwinner,sun8i-h3-ahb2-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 4
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun4i-a10-ahb-clk
+
+    then:
+      properties:
+        clocks:
+          maxItems: 1
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun6i-a31-ahb1-clk
+
+    then:
+      properties:
+        clocks:
+          maxItems: 4
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun8i-h3-ahb2-clk
+
+    then:
+      properties:
+        clocks:
+          maxItems: 2
+
+examples:
+  - |
+    ahb@1c20054 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-ahb-clk";
+        reg = <0x01c20054 0x4>;
+        clocks = <&axi>;
+        clock-output-names = "ahb";
+    };
+
+  - |
+    ahb1@1c20054 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun6i-a31-ahb1-clk";
+        reg = <0x01c20054 0x4>;
+        clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
+        clock-output-names = "ahb1";
+    };
+
+  - |
+    ahb2_clk@1c2005c {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun8i-h3-ahb2-clk";
+        reg = <0x01c2005c 0x4>;
+        clocks = <&ahb1>, <&pll6d2>;
+        clock-output-names = "ahb2";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-apb0-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-apb0-clk.yaml
new file mode 100644
index 0000000..b1e3d73
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-apb0-clk.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-apb0-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 APB0 Bus Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun4i-a10-apb0-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    apb0@1c20054 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-apb0-clk";
+        reg = <0x01c20054 0x4>;
+        clocks = <&ahb>;
+        clock-output-names = "apb0";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-apb1-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-apb1-clk.yaml
new file mode 100644
index 0000000..51b7a6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-apb1-clk.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-apb1-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 APB1 Bus Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun4i-a10-apb1-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 3
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20058 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-apb1-clk";
+        reg = <0x01c20058 0x4>;
+        clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
+        clock-output-names = "apb1";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-axi-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-axi-clk.yaml
new file mode 100644
index 0000000..d801158
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-axi-clk.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-axi-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 AXI Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-axi-clk
+      - allwinner,sun8i-a23-axi-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    axi@1c20054 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-axi-clk";
+        reg = <0x01c20054 0x4>;
+        clocks = <&cpu>;
+        clock-output-names = "axi";
+    };
+
+  - |
+    axi_clk@1c20050 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun8i-a23-axi-clk";
+        reg = <0x01c20050 0x4>;
+        clocks = <&cpu>;
+        clock-output-names = "axi";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-cpu-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-cpu-clk.yaml
new file mode 100644
index 0000000..0dfafba
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-cpu-clk.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-cpu-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 CPU Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun4i-a10-cpu-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 4
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    cpu@1c20054 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-cpu-clk";
+        reg = <0x01c20054 0x4>;
+        clocks = <&osc32k>, <&osc24M>, <&pll1>, <&dummy>;
+        clock-output-names = "cpu";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-display-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-display-clk.yaml
new file mode 100644
index 0000000..7484a7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-display-clk.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-display-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Display Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  "#reset-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun4i-a10-display-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 3
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20104 {
+        #clock-cells = <0>;
+        #reset-cells = <0>;
+        compatible = "allwinner,sun4i-a10-display-clk";
+        reg = <0x01c20104 0x4>;
+        clocks = <&pll3>, <&pll7>, <&pll5 1>;
+        clock-output-names = "de-be";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-gates-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-gates-clk.yaml
new file mode 100644
index 0000000..ed1b212
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-gates-clk.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-gates-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Bus Gates Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      This additional argument passed to that clock is the offset of
+      the bit controlling this particular gate in the register.
+
+  compatible:
+    oneOf:
+      - const: allwinner,sun4i-a10-gates-clk
+      - const: allwinner,sun4i-a10-axi-gates-clk
+      - const: allwinner,sun4i-a10-ahb-gates-clk
+      - const: allwinner,sun5i-a10s-ahb-gates-clk
+      - const: allwinner,sun5i-a13-ahb-gates-clk
+      - const: allwinner,sun7i-a20-ahb-gates-clk
+      - const: allwinner,sun6i-a31-ahb1-gates-clk
+      - const: allwinner,sun8i-a23-ahb1-gates-clk
+      - const: allwinner,sun9i-a80-ahb0-gates-clk
+      - const: allwinner,sun9i-a80-ahb1-gates-clk
+      - const: allwinner,sun9i-a80-ahb2-gates-clk
+      - const: allwinner,sun4i-a10-apb0-gates-clk
+      - const: allwinner,sun5i-a10s-apb0-gates-clk
+      - const: allwinner,sun5i-a13-apb0-gates-clk
+      - const: allwinner,sun7i-a20-apb0-gates-clk
+      - const: allwinner,sun9i-a80-apb0-gates-clk
+      - const: allwinner,sun8i-a83t-apb0-gates-clk
+      - const: allwinner,sun4i-a10-apb1-gates-clk
+      - const: allwinner,sun5i-a13-apb1-gates-clk
+      - const: allwinner,sun5i-a10s-apb1-gates-clk
+      - const: allwinner,sun6i-a31-apb1-gates-clk
+      - const: allwinner,sun7i-a20-apb1-gates-clk
+      - const: allwinner,sun8i-a23-apb1-gates-clk
+      - const: allwinner,sun9i-a80-apb1-gates-clk
+      - const: allwinner,sun6i-a31-apb2-gates-clk
+      - const: allwinner,sun8i-a23-apb2-gates-clk
+      - const: allwinner,sun8i-a83t-bus-gates-clk
+      - const: allwinner,sun9i-a80-apbs-gates-clk
+      - const: allwinner,sun4i-a10-dram-gates-clk
+
+      - items:
+        - const: allwinner,sun5i-a13-dram-gates-clk
+        - const: allwinner,sun4i-a10-gates-clk
+
+      - items:
+        - const: allwinner,sun8i-h3-apb0-gates-clk
+        - const: allwinner,sun4i-a10-gates-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-indices:
+    minItems: 1
+    maxItems: 64
+
+  clock-output-names:
+    minItems: 1
+    maxItems: 64
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-indices
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c2005c {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun4i-a10-axi-gates-clk";
+        reg = <0x01c2005c 0x4>;
+        clocks = <&axi>;
+        clock-indices = <0>;
+        clock-output-names = "axi_dram";
+    };
+
+  - |
+    clk@1c20060 {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun4i-a10-ahb-gates-clk";
+        reg = <0x01c20060 0x8>;
+        clocks = <&ahb>;
+        clock-indices = <0>, <1>,
+                        <2>, <3>,
+                        <4>, <5>, <6>,
+                        <7>, <8>, <9>,
+                        <10>, <11>, <12>,
+                        <13>, <14>, <16>,
+                        <17>, <18>, <20>,
+                        <21>, <22>, <23>,
+                        <24>, <25>, <26>,
+                        <32>, <33>, <34>,
+                        <35>, <36>, <37>,
+                        <40>, <41>, <43>,
+                        <44>, <45>,
+                        <46>, <47>,
+                        <50>, <52>;
+        clock-output-names = "ahb_usb0", "ahb_ehci0",
+                             "ahb_ohci0", "ahb_ehci1",
+                             "ahb_ohci1", "ahb_ss", "ahb_dma",
+                             "ahb_bist", "ahb_mmc0", "ahb_mmc1",
+                             "ahb_mmc2", "ahb_mmc3", "ahb_ms",
+                             "ahb_nand", "ahb_sdram", "ahb_ace",
+                             "ahb_emac", "ahb_ts", "ahb_spi0",
+                             "ahb_spi1", "ahb_spi2", "ahb_spi3",
+                             "ahb_pata", "ahb_sata", "ahb_gps",
+                             "ahb_ve", "ahb_tvd", "ahb_tve0",
+                             "ahb_tve1", "ahb_lcd0", "ahb_lcd1",
+                             "ahb_csi0", "ahb_csi1", "ahb_hdmi",
+                             "ahb_de_be0", "ahb_de_be1",
+                             "ahb_de_fe0", "ahb_de_fe1",
+                             "ahb_mp", "ahb_mali400";
+    };
+
+
+  - |
+    clk@1c20068 {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun4i-a10-apb0-gates-clk";
+        reg = <0x01c20068 0x4>;
+        clocks = <&apb0>;
+        clock-indices = <0>, <1>,
+                        <2>, <3>,
+                        <5>, <6>,
+                        <7>, <10>;
+        clock-output-names = "apb0_codec", "apb0_spdif",
+                             "apb0_ac97", "apb0_iis",
+                             "apb0_pio", "apb0_ir0",
+                             "apb0_ir1", "apb0_keypad";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mbus-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mbus-clk.yaml
new file mode 100644
index 0000000..18f131e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mbus-clk.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-mbus-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 MBUS Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    enum:
+      - allwinner,sun5i-a13-mbus-clk
+      - allwinner,sun8i-a23-mbus-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 3
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c2015c {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun5i-a13-mbus-clk";
+        reg = <0x01c2015c 0x4>;
+        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+        clock-output-names = "mbus";
+    };
+
+  - |
+    clk@1c2015c {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun8i-a23-mbus-clk";
+        reg = <0x01c2015c 0x4>;
+        clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+        clock-output-names = "mbus";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mmc-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mmc-clk.yaml
new file mode 100644
index 0000000..5199285
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mmc-clk.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-mmc-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Module 1 Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      There is three different outputs: the main clock, with the ID 0,
+      and the output and sample clocks, with the IDs 1 and 2,
+      respectively.
+
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-mmc-clk
+      - allwinner,sun9i-a80-mmc-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 3
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 3
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: allwinner,sun4i-a10-mmc-clk
+
+then:
+  properties:
+    clocks:
+      maxItems: 3
+
+else:
+  properties:
+    clocks:
+      maxItems: 2
+
+examples:
+  - |
+    clk@1c20088 {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun4i-a10-mmc-clk";
+        reg = <0x01c20088 0x4>;
+        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+        clock-output-names = "mmc0",
+                             "mmc0_output",
+                             "mmc0_sample";
+    };
+
+  - |
+    clk@6000410 {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun9i-a80-mmc-clk";
+        reg = <0x06000410 0x4>;
+        clocks = <&osc24M>, <&pll4>;
+        clock-output-names = "mmc0", "mmc0_output",
+                             "mmc0_sample";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mod0-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mod0-clk.yaml
new file mode 100644
index 0000000..3e2abe3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mod0-clk.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-mod0-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Module 0 Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+select:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - allwinner,sun4i-a10-mod0-clk
+          - allwinner,sun9i-a80-mod0-clk
+
+  # The PRCM on the A31 and A23 will have the reg property missing,
+  # since it's set at the upper level node, and will be validated by
+  # PRCM's schema. Make sure we only validate standalone nodes.
+  required:
+    - compatible
+    - reg
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-mod0-clk
+      - allwinner,sun9i-a80-mod0-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    # On the A80, the PRCM mod0 clocks have 2 parents.
+    minItems: 2
+    maxItems: 3
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20080 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-mod0-clk";
+        reg = <0x01c20080 0x4>;
+        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+        clock-output-names = "nand";
+    };
+
+  - |
+    clk@8001454 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-mod0-clk";
+        reg = <0x08001454 0x4>;
+        clocks = <&osc32k>, <&osc24M>;
+        clock-output-names = "r_ir";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mod1-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mod1-clk.yaml
new file mode 100644
index 0000000..7ddb55c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-mod1-clk.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-mod1-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Module 1 Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun4i-a10-mod1-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 4
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun4i-a10-pll2.h>
+
+    clk@1c200c0 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-mod1-clk";
+        reg = <0x01c200c0 0x4>;
+        clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+                 <&pll2 SUN4I_A10_PLL2_4X>,
+                 <&pll2 SUN4I_A10_PLL2_2X>,
+                 <&pll2 SUN4I_A10_PLL2_1X>;
+        clock-output-names = "spdif";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml
new file mode 100644
index 0000000..69cfa4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-osc-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Gatable Oscillator Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun4i-a10-osc-clk
+
+  reg:
+    maxItems: 1
+
+  clock-frequency:
+    description: >
+      Frequency of the main oscillator.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clock-frequency
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    osc24M: clk@01c20050 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-osc-clk";
+        reg = <0x01c20050 0x4>;
+        clock-frequency = <24000000>;
+        clock-output-names = "osc24M";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll1-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll1-clk.yaml
new file mode 100644
index 0000000..e9c4cf8
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll1-clk.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-pll1-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 CPU PLL Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-pll1-clk
+      - allwinner,sun6i-a31-pll1-clk
+      - allwinner,sun8i-a23-pll1-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20000 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-pll1";
+        reg = <0x01c20000 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "osc24M";
+    };
+
+  - |
+    clk@1c20000 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun6i-a31-pll1-clk";
+        reg = <0x01c20000 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "pll1";
+    };
+
+  - |
+    clk@1c20000 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun8i-a23-pll1-clk";
+        reg = <0x01c20000 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "pll1";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll3-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll3-clk.yaml
new file mode 100644
index 0000000..4b80a42
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll3-clk.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-pll3-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Video PLL Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun4i-a10-pll3-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20010 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-pll3-clk";
+        reg = <0x01c20010 0x4>;
+        clocks = <&osc3M>;
+        clock-output-names = "pll3";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll5-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll5-clk.yaml
new file mode 100644
index 0000000..415bd77
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll5-clk.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-pll5-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 DRAM PLL Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      The first output is the DRAM clock output, the second is meant
+      for peripherals on the SoC.
+
+  compatible:
+    const: allwinner,sun4i-a10-pll5-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 2
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20020 {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun4i-a10-pll5-clk";
+        reg = <0x01c20020 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "pll5_ddr", "pll5_other";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll6-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll6-clk.yaml
new file mode 100644
index 0000000..ec5652f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-pll6-clk.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-pll6-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Peripheral PLL Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      The first output is the SATA clock output, the second is the
+      regular PLL output, the third is a PLL output at twice the rate.
+
+  compatible:
+    const: allwinner,sun4i-a10-pll6-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 3
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20028 {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun4i-a10-pll6-clk";
+        reg = <0x01c20028 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "pll6_sata", "pll6_other", "pll6";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-tcon-ch0-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-tcon-ch0-clk.yaml
new file mode 100644
index 0000000..0a335c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-tcon-ch0-clk.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-tcon-ch0-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 TCON Channel 0 Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-tcon-ch0-clk
+      - allwinner,sun4i-a10-tcon-ch1-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 4
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: allwinner,sun4i-a10-tcon-ch0-clk
+
+then:
+  required:
+    - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20118 {
+        #clock-cells = <0>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun4i-a10-tcon-ch0-clk";
+        reg = <0x01c20118 0x4>;
+        clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+        clock-output-names = "tcon-ch0-sclk";
+    };
+
+  - |
+    clk@1c2012c {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun4i-a10-tcon-ch1-clk";
+        reg = <0x01c2012c 0x4>;
+        clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+        clock-output-names = "tcon-ch1-sclk";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-usb-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-usb-clk.yaml
new file mode 100644
index 0000000..cd95d25
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-usb-clk.yaml
@@ -0,0 +1,166 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-usb-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 USB Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      The additional ID argument passed to the clock shall refer to
+      the index of the output.
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-usb-clk
+      - allwinner,sun5i-a13-usb-clk
+      - allwinner,sun6i-a31-usb-clk
+      - allwinner,sun8i-a23-usb-clk
+      - allwinner,sun8i-h3-usb-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    minItems: 2
+    maxItems: 8
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun4i-a10-usb-clk
+
+    then:
+      properties:
+        clock-output-names:
+          maxItems: 3
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun5i-a13-usb-clk
+
+    then:
+      properties:
+        clock-output-names:
+          maxItems: 2
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun6i-a31-usb-clk
+
+    then:
+      properties:
+        clock-output-names:
+          maxItems: 6
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun8i-a23-usb-clk
+
+    then:
+      properties:
+        clock-output-names:
+          maxItems: 5
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: allwinner,sun8i-h3-usb-clk
+
+    then:
+      properties:
+        clock-output-names:
+          maxItems: 8
+
+examples:
+  - |
+    clk@1c200cc {
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun4i-a10-usb-clk";
+        reg = <0x01c200cc 0x4>;
+        clocks = <&pll6 1>;
+        clock-output-names = "usb_ohci0", "usb_ohci1", "usb_phy";
+    };
+
+  - |
+    clk@1c200cc {
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun5i-a13-usb-clk";
+        reg = <0x01c200cc 0x4>;
+        clocks = <&pll6 1>;
+        clock-output-names = "usb_ohci0", "usb_phy";
+    };
+
+  - |
+    clk@1c200cc {
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun6i-a31-usb-clk";
+        reg = <0x01c200cc 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "usb_phy0", "usb_phy1", "usb_phy2",
+                             "usb_ohci0", "usb_ohci1",
+                             "usb_ohci2";
+    };
+
+  - |
+    clk@1c200cc {
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun8i-a23-usb-clk";
+        reg = <0x01c200cc 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "usb_phy0", "usb_phy1", "usb_hsic",
+                             "usb_hsic_12M", "usb_ohci0";
+    };
+
+  - |
+    clk@1c200cc {
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun8i-h3-usb-clk";
+        reg = <0x01c200cc 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "usb_phy0", "usb_phy1",
+                             "usb_phy2", "usb_phy3",
+                             "usb_ohci0", "usb_ohci1",
+                             "usb_ohci2", "usb_ohci3";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ve-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ve-clk.yaml
new file mode 100644
index 0000000..5dfd0c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ve-clk.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-ve-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Video Engine Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  "#reset-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun4i-a10-ve-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c2013c {
+        #clock-cells = <0>;
+        #reset-cells = <0>;
+        compatible = "allwinner,sun4i-a10-ve-clk";
+        reg = <0x01c2013c 0x4>;
+        clocks = <&pll4>;
+        clock-output-names = "ve";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun5i-a13-ahb-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun5i-a13-ahb-clk.yaml
new file mode 100644
index 0000000..99add79
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun5i-a13-ahb-clk.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun5i-a13-ahb-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A13 AHB Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun5i-a13-ahb-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 3
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    ahb@1c20054 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun5i-a13-ahb-clk";
+        reg = <0x01c20054 0x4>;
+        clocks = <&axi>, <&cpu>, <&pll6 1>;
+        clock-output-names = "ahb";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun6i-a31-pll6-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun6i-a31-pll6-clk.yaml
new file mode 100644
index 0000000..5f37720
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun6i-a31-pll6-clk.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun6i-a31-pll6-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 Peripheral PLL Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      The first output is the regular PLL output, the second is a PLL
+      output at twice the rate.
+
+  compatible:
+    const: allwinner,sun6i-a31-pll6-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 2
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20028 {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun6i-a31-pll6-clk";
+        reg = <0x01c20028 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "pll6", "pll6x2";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun7i-a20-gmac-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun7i-a20-gmac-clk.yaml
new file mode 100644
index 0000000..59e5dce
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun7i-a20-gmac-clk.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun7i-a20-gmac-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A20 GMAC TX Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun7i-a20-gmac-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 2
+    description: >
+      The parent clocks shall be fixed rate dummy clocks at 25 MHz and
+      125 MHz, respectively.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20164 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun7i-a20-gmac-clk";
+        reg = <0x01c20164 0x4>;
+        clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>;
+        clock-output-names = "gmac_tx";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun7i-a20-out-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun7i-a20-out-clk.yaml
new file mode 100644
index 0000000..c745733
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun7i-a20-out-clk.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun7i-a20-out-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A20 Output Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun7i-a20-out-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 3
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c201f0 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun7i-a20-out-clk";
+        reg = <0x01c201f0 0x4>;
+        clocks = <&osc24M_32k>, <&osc32k>, <&osc24M>;
+        clock-output-names = "clk_out_a";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun8i-a83t-de2-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun8i-a83t-de2-clk.yaml
new file mode 100644
index 0000000..3f995d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun8i-a83t-de2-clk.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun8i-a83t-de2-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A83t Display Engine 2/3 Clock Controller Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#clock-cells":
+    const: 1
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    oneOf:
+      - const: allwinner,sun8i-a83t-de2-clk
+      - const: allwinner,sun8i-h3-de2-clk
+      - const: allwinner,sun8i-v3s-de2-clk
+      - const: allwinner,sun50i-a64-de2-clk
+      - const: allwinner,sun50i-h5-de2-clk
+      - const: allwinner,sun50i-h6-de2-clk
+      - items:
+          - const: allwinner,sun8i-r40-de2-clk
+          - const: allwinner,sun8i-h3-de2-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: Module Clock
+
+  clock-names:
+    items:
+      - const: bus
+      - const: mod
+
+  resets:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun8i-h3-ccu.h>
+    #include <dt-bindings/reset/sun8i-h3-ccu.h>
+
+    de2_clocks: clock@1000000 {
+        compatible = "allwinner,sun8i-h3-de2-clk";
+        reg = <0x01000000 0x100000>;
+        clocks = <&ccu CLK_BUS_DE>,
+                 <&ccu CLK_DE>;
+        clock-names = "bus",
+                      "mod";
+        resets = <&ccu RST_BUS_DE>;
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun8i-h3-bus-gates-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun8i-h3-bus-gates-clk.yaml
new file mode 100644
index 0000000..3eb2bf6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun8i-h3-bus-gates-clk.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun8i-h3-bus-gates-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Bus Gates Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      This additional argument passed to that clock is the offset of
+      the bit controlling this particular gate in the register.
+
+  compatible:
+    const: allwinner,sun8i-h3-bus-gates-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 4
+
+  clock-names:
+    maxItems: 4
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-indices:
+    minItems: 1
+    maxItems: 64
+
+  clock-output-names:
+    minItems: 1
+    maxItems: 64
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-indices
+  - clock-names
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c20060 {
+        #clock-cells = <1>;
+        compatible = "allwinner,sun8i-h3-bus-gates-clk";
+        reg = <0x01c20060 0x14>;
+        clocks = <&ahb1>, <&ahb2>, <&apb1>, <&apb2>;
+        clock-names = "ahb1", "ahb2", "apb1", "apb2";
+        clock-indices = <5>, <6>, <8>,
+                        <9>, <10>, <13>,
+                        <14>, <17>, <18>,
+                        <19>, <20>,
+                        <21>, <23>,
+                        <24>, <25>,
+                        <26>, <27>,
+                        <28>, <29>,
+                        <30>, <31>, <32>,
+                        <35>, <36>, <37>,
+                        <40>, <41>, <43>,
+                        <44>, <52>, <53>,
+                        <54>, <64>,
+                        <65>, <69>, <72>,
+                        <76>, <77>, <78>,
+                        <96>, <97>, <98>,
+                        <112>, <113>,
+                        <114>, <115>,
+                        <116>, <128>, <135>;
+        clock-output-names = "bus_ce", "bus_dma", "bus_mmc0",
+                             "bus_mmc1", "bus_mmc2", "bus_nand",
+                             "bus_sdram", "bus_gmac", "bus_ts",
+                             "bus_hstimer", "bus_spi0",
+                             "bus_spi1", "bus_otg",
+                             "bus_otg_ehci0", "bus_ehci1",
+                             "bus_ehci2", "bus_ehci3",
+                             "bus_otg_ohci0", "bus_ohci1",
+                             "bus_ohci2", "bus_ohci3", "bus_ve",
+                             "bus_lcd0", "bus_lcd1", "bus_deint",
+                             "bus_csi", "bus_tve", "bus_hdmi",
+                             "bus_de", "bus_gpu", "bus_msgbox",
+                             "bus_spinlock", "bus_codec",
+                             "bus_spdif", "bus_pio", "bus_ths",
+                             "bus_i2s0", "bus_i2s1", "bus_i2s2",
+                             "bus_i2c0", "bus_i2c1", "bus_i2c2",
+                             "bus_uart0", "bus_uart1",
+                             "bus_uart2", "bus_uart3",
+                             "bus_scr", "bus_ephy", "bus_dbg";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-ahb-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-ahb-clk.yaml
new file mode 100644
index 0000000..d178da9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-ahb-clk.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-ahb-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 AHB Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun9i-a80-ahb-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 4
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@6000060 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun9i-a80-ahb-clk";
+        reg = <0x06000060 0x4>;
+        clocks = <&gt_clk>, <&pll4>, <&pll12>, <&pll12>;
+        clock-output-names = "ahb0";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-apb0-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-apb0-clk.yaml
new file mode 100644
index 0000000..0351c79
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-apb0-clk.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-apb0-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 APB0 Bus Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    enum:
+      - allwinner,sun9i-a80-apb0-clk
+      - allwinner,sun9i-a80-apb1-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 2
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@6000070 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun9i-a80-apb0-clk";
+        reg = <0x06000070 0x4>;
+        clocks = <&osc24M>, <&pll4>;
+        clock-output-names = "apb0";
+    };
+
+  - |
+    clk@6000074 {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun9i-a80-apb1-clk";
+        reg = <0x06000074 0x4>;
+        clocks = <&osc24M>, <&pll4>;
+        clock-output-names = "apb1";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-cpus-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-cpus-clk.yaml
new file mode 100644
index 0000000..24d5b2f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-cpus-clk.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-cpus-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 CPUS Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun9i-a80-cpus-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 4
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@8001410 {
+        compatible = "allwinner,sun9i-a80-cpus-clk";
+        reg = <0x08001410 0x4>;
+        #clock-cells = <0>;
+        clocks = <&osc32k>, <&osc24M>, <&pll4>, <&pll3>;
+        clock-output-names = "cpus";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-de-clks.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-de-clks.yaml
new file mode 100644
index 0000000..a82c7c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-de-clks.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-de-clks.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 Display Engine Clock Controller Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#clock-cells":
+    const: 1
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun9i-a80-de-clks
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: RAM Bus Clock
+      - description: Module Clock
+
+  clock-names:
+    items:
+      - const: mod
+      - const: dram
+      - const: bus
+
+  resets:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun9i-a80-ccu.h>
+    #include <dt-bindings/reset/sun9i-a80-ccu.h>
+
+    de_clocks: clock@3000000 {
+        compatible = "allwinner,sun9i-a80-de-clks";
+        reg = <0x03000000 0x30>;
+        clocks = <&ccu CLK_DE>, <&ccu CLK_SDRAM>, <&ccu CLK_BUS_DE>;
+        clock-names = "mod", "dram", "bus";
+        resets = <&ccu RST_BUS_DE>;
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml
new file mode 100644
index 0000000..07f38de
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-gt-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 GT Bus Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun9i-a80-gt-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 4
+    description: >
+      The parent order must match the hardware programming order.
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@0600005c {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun9i-a80-gt-clk";
+        reg = <0x0600005c 0x4>;
+        clocks = <&osc24M>, <&pll4>, <&pll12>, <&pll12>;
+        clock-output-names = "gt";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-mmc-config-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-mmc-config-clk.yaml
new file mode 100644
index 0000000..20dc115
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-mmc-config-clk.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-mmc-config-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 MMC Configuration Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+description: >
+  There is one clock/reset output per mmc controller. The number of
+  outputs is determined by the size of the address block, which is
+  related to the overall mmc block.
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      The additional ID argument passed to the clock shall refer to
+      the index of the output.
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun9i-a80-mmc-config-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 4
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@1c13000 {
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun9i-a80-mmc-config-clk";
+        reg = <0x01c13000 0x10>;
+        clocks = <&ahb0_gates 8>;
+        resets = <&ahb0_resets 8>;
+        clock-output-names = "mmc0_config", "mmc1_config",
+                             "mmc2_config", "mmc3_config";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-pll4-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-pll4-clk.yaml
new file mode 100644
index 0000000..b76bab6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-pll4-clk.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-pll4-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 Peripheral PLL Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun9i-a80-pll4-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@600000c {
+        #clock-cells = <0>;
+        compatible = "allwinner,sun9i-a80-pll4-clk";
+        reg = <0x0600000c 0x4>;
+        clocks = <&osc24M>;
+        clock-output-names = "pll4";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clocks.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clocks.yaml
new file mode 100644
index 0000000..fa0ee03
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clocks.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-usb-clocks.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 USB Clock Controller Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#clock-cells":
+    const: 1
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun9i-a80-usb-clocks
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: High Frequency Oscillator
+
+  clock-names:
+    items:
+      - const: bus
+      - const: hosc
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun9i-a80-ccu.h>
+
+    usb_clocks: clock@a08000 {
+        compatible = "allwinner,sun9i-a80-usb-clks";
+        reg = <0x00a08000 0x8>;
+        clocks = <&ccu CLK_BUS_USB>, <&osc24M>;
+        clock-names = "bus", "hosc";
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-mod-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-mod-clk.yaml
new file mode 100644
index 0000000..15218d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-mod-clk.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-usb-mod-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 USB Module Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      The additional ID argument passed to the clock shall refer to
+      the index of the output.
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun9i-a80-usb-mod-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 6
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@a08000 {
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun9i-a80-usb-mod-clk";
+        reg = <0x00a08000 0x4>;
+        clocks = <&ahb1_gates 1>;
+        clock-output-names = "usb0_ahb", "usb_ohci0",
+                             "usb1_ahb", "usb_ohci1",
+                             "usb2_ahb", "usb_ohci2";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-phy-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-phy-clk.yaml
new file mode 100644
index 0000000..2569041
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-phy-clk.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-usb-phy-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 USB PHY Clock Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  "#clock-cells":
+    const: 1
+    description: >
+      The additional ID argument passed to the clock shall refer to
+      the index of the output.
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun9i-a80-usb-phy-clk
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-output-names:
+    maxItems: 6
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-output-names
+
+additionalProperties: false
+
+examples:
+  - |
+    clk@a08004 {
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+        compatible = "allwinner,sun9i-a80-usb-phy-clk";
+        reg = <0x00a08004 0x4>;
+        clocks = <&ahb1_gates 1>;
+        clock-output-names = "usb_phy0", "usb_hsic1_480M",
+                             "usb_phy1", "usb_hsic2_480M",
+                             "usb_phy2", "usb_hsic_12M";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8-ddr-clkc.yaml b/Documentation/devicetree/bindings/clock/amlogic,meson8-ddr-clkc.yaml
new file mode 100644
index 0000000..4b8669f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/amlogic,meson8-ddr-clkc.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/amlogic,meson8-ddr-clkc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic DDR Clock Controller Device Tree Bindings
+
+maintainers:
+  - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+properties:
+  compatible:
+    enum:
+      - amlogic,meson8-ddr-clkc
+      - amlogic,meson8b-ddr-clkc
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: xtal
+
+  "#clock-cells":
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    ddr_clkc: clock-controller@400 {
+      compatible = "amlogic,meson8-ddr-clkc";
+      reg = <0x400 0x20>;
+      clocks = <&xtal>;
+      clock-names = "xtal";
+      #clock-cells = <1>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
index 4d94091..cc51e47 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
@@ -11,6 +11,11 @@
 	- "amlogic,meson8m2-clkc" for Meson8m2 (S812) SoCs
 - #clock-cells: should be 1.
 - #reset-cells: should be 1.
+- clocks: list of clock phandles, one for each entry in clock-names
+- clock-names: should contain the following:
+  * "xtal": the 24MHz system oscillator
+  * "ddr_pll": the DDR PLL clock
+  * "clk_32k": (if present) the 32kHz clock signal from GPIOAO_6 (CLK_32K_IN)
 
 Parent node should have the following properties :
 - compatible: "amlogic,meson-hhi-sysctrl", "simple-mfd", "syscon"
diff --git a/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml b/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
index e638273..8559fe8 100644
--- a/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/bitmain,bm1880-clk.yaml#
+$id: http://devicetree.org/schemas/clock/bitmain,bm1880-clk.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Bitmain BM1880 Clock Controller
diff --git a/Documentation/devicetree/bindings/clock/fsl,plldig.yaml b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
new file mode 100644
index 0000000..c835003
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/fsl,plldig.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP QorIQ Layerscape LS1028A Display PIXEL Clock Binding
+
+maintainers:
+  - Wen He <wen.he_1@nxp.com>
+
+description: |
+  NXP LS1028A has a clock domain PXLCLK0 used for the Display output
+  interface in the display core, as implemented in TSMC CLN28HPM PLL.
+  which generate and offers pixel clocks to Display.
+
+properties:
+  compatible:
+    const: fsl,ls1028a-plldig
+
+  reg:
+    maxItems: 1
+
+  '#clock-cells':
+    const: 0
+
+  fsl,vco-hz:
+     description: Optional for VCO frequency of the PLL in Hertz.
+        The VCO frequency of this PLL cannot be changed during runtime
+        only at startup. Therefore, the output frequencies are very
+        limited and might not even closely match the requested frequency.
+        To work around this restriction the user may specify its own
+        desired VCO frequency for the PLL.
+     minimum: 650000000
+     maximum: 1300000000
+     default: 1188000000
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - '#clock-cells'
+
+examples:
+  # Display PIXEL Clock node:
+  - |
+    dpclk: clock-display@f1f0000 {
+        compatible = "fsl,ls1028a-plldig";
+        reg = <0x0 0xf1f0000 0x0 0xffff>;
+        #clock-cells = <0>;
+        clocks = <&osc_27m>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml b/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml
new file mode 100644
index 0000000..fc3bdfd
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/fsl,sai-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale SAI bitclock-as-a-clock binding
+
+maintainers:
+  - Michael Walle <michael@walle.cc>
+
+description: |
+  It is possible to use the BCLK pin of a SAI module as a generic clock
+  output. Some SoC are very constrained in their pin multiplexer
+  configuration. Eg. pins can only be changed groups. For example, on the
+  LS1028A SoC you can only enable SAIs in pairs. If you use only one SAI,
+  the second pins are wasted. Using this binding it is possible to use the
+  clock of the second SAI as a MCLK clock for an audio codec, for example.
+
+  This is a composite of a gated clock and a divider clock.
+
+properties:
+  compatible:
+    const: fsl,vf610-sai-clock
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  '#clock-cells':
+    const: 0
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    soc {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        mclk: clock-mclk@f130080 {
+            compatible = "fsl,vf610-sai-clock";
+            reg = <0x0 0xf130080 0x0 0x80>;
+            #clock-cells = <0>;
+            clocks = <&parentclk>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/clock/imx8mn-clock.yaml b/Documentation/devicetree/bindings/clock/imx8mn-clock.yaml
index 622f365..cd0b8a3 100644
--- a/Documentation/devicetree/bindings/clock/imx8mn-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/imx8mn-clock.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/imx8mn-clock.yaml#
+$id: http://devicetree.org/schemas/clock/imx8mn-clock.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: NXP i.MX8M Nano Clock Control Module Binding
diff --git a/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml b/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml
new file mode 100644
index 0000000..89aee63
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx8mp-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8M Plus Clock Control Module Binding
+
+maintainers:
+  - Anson Huang <Anson.Huang@nxp.com>
+
+description:
+  NXP i.MX8M Plus clock control module is an integrated clock controller, which
+  generates and supplies to all modules.
+
+properties:
+  compatible:
+    const: fsl,imx8mp-ccm
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: 32k osc
+      - description: 24m osc
+      - description: ext1 clock input
+      - description: ext2 clock input
+      - description: ext3 clock input
+      - description: ext4 clock input
+
+  clock-names:
+    items:
+      - const: osc_32k
+      - const: osc_24m
+      - const: clk_ext1
+      - const: clk_ext2
+      - const: clk_ext3
+      - const: clk_ext4
+
+  '#clock-cells':
+    const: 1
+    description:
+      The clock consumer should specify the desired clock by having the clock
+      ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mp-clock.h
+      for the full list of i.MX8M Plus clock IDs.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+
+examples:
+  # Clock Control Module node:
+  - |
+    clk: clock-controller@30380000 {
+        compatible = "fsl,imx8mp-ccm";
+        reg = <0x30380000 0x10000>;
+        #clock-cells = <1>;
+        clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>,
+                 <&clk_ext2>, <&clk_ext3>, <&clk_ext4>;
+        clock-names = "osc_32k", "osc_24m", "clk_ext1",
+                      "clk_ext2", "clk_ext3", "clk_ext4";
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml b/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
index 5cf0b81..f0b804a7 100644
--- a/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/milbeaut-clock.yaml#
+$id: http://devicetree.org/schemas/clock/milbeaut-clock.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Milbeaut SoCs Clock Controller Binding
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
deleted file mode 100644
index d639e18..0000000
--- a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Qualcomm Technologies, Inc. Display Clock Controller Binding
-------------------------------------------------------------
-
-Required properties :
-
-- compatible : shall contain "qcom,sdm845-dispcc"
-- reg : shall contain base register location and length.
-- #clock-cells : from common clock binding, shall contain 1.
-- #reset-cells : from common reset binding, shall contain 1.
-- #power-domain-cells : from generic power domain binding, shall contain 1.
-
-Example:
-	dispcc: clock-controller@af00000 {
-		compatible = "qcom,sdm845-dispcc";
-		reg = <0xaf00000 0x100000>;
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-		#power-domain-cells = <1>;
-	};
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
new file mode 100644
index 0000000..17f8717
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-apq8064.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for APQ8064
+
+maintainers:
+  - Stephen Boyd <sboyd@kernel.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains on APQ8064.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-msm8960.h
+  - dt-bindings/reset/qcom,gcc-msm8960.h
+
+properties:
+  compatible:
+    const: qcom,gcc-apq8064
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  nvmem-cells:
+    minItems: 1
+    maxItems: 2
+    description:
+      Qualcomm TSENS (thermal sensor device) on some devices can
+      be part of GCC and hence the TSENS properties can also be part
+      of the GCC/clock-controller node.
+      For more details on the TSENS properties please refer
+      Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+
+  nvmem-cell-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      - const: calib
+      - const: calib_backup
+
+  '#thermal-sensor-cells':
+    const: 1
+
+  protected-clocks:
+    description:
+      Protected clock specifier list as per common clock binding.
+
+required:
+  - compatible
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+  - nvmem-cells
+  - nvmem-cell-names
+  - '#thermal-sensor-cells'
+
+examples:
+  - |
+    clock-controller@900000 {
+      compatible = "qcom,gcc-apq8064";
+      reg = <0x00900000 0x4000>;
+      nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+      nvmem-cell-names = "calib", "calib_backup";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+      #thermal-sensor-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
new file mode 100644
index 0000000..89c6e07
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-ipq8074.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Bindingfor IPQ8074
+
+maintainers:
+  - Stephen Boyd <sboyd@kernel.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains on IPQ8074.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-ipq8074.h
+
+properties:
+  compatible:
+    const: qcom,gcc-ipq8074
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  protected-clocks:
+    description:
+      Protected clock specifier list as per common clock binding.
+
+required:
+  - compatible
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+
+examples:
+  - |
+    clock-controller@1800000 {
+      compatible = "qcom,gcc-ipq8074";
+      reg = <0x01800000 0x80000>;
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
new file mode 100644
index 0000000..18e4e77
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-msm8996.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for MSM8996
+
+maintainers:
+  - Stephen Boyd <sboyd@kernel.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains on MSM8996.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-msm8996.h
+
+properties:
+  compatible:
+    const: qcom,gcc-msm8996
+
+  clocks:
+    items:
+      - description: XO source
+      - description: Second XO source
+      - description: Sleep clock source
+
+  clock-names:
+    items:
+      - const: cxo
+      - const: cxo2
+      - const: sleep_clk
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  protected-clocks:
+    description:
+      Protected clock specifier list as per common clock binding.
+
+required:
+  - compatible
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    clock-controller@300000 {
+      compatible = "qcom,gcc-msm8996";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+      reg = <0x300000 0x90000>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml
new file mode 100644
index 0000000..1d3cae9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-msm8998.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for MSM8998
+
+maintainers:
+  - Stephen Boyd <sboyd@kernel.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains on MSM8998.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-msm8998.h
+
+properties:
+  compatible:
+    const: qcom,gcc-msm8998
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: Sleep clock source
+      - description: USB 3.0 phy pipe clock
+      - description: UFS phy rx symbol clock for pipe 0
+      - description: UFS phy rx symbol clock for pipe 1
+      - description: UFS phy tx symbol clock
+      - description: PCIE phy pipe clock
+
+  clock-names:
+    items:
+      - const: xo
+      - const: sleep_clk
+      - const: usb3_pipe
+      - const: ufs_rx_symbol0
+      - const: ufs_rx_symbol1
+      - const: ufs_tx_symbol0
+      - const: pcie0_pipe
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  protected-clocks:
+    description:
+      Protected clock specifier list as per common clock binding.
+
+required:
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmcc.h>
+    clock-controller@100000 {
+      compatible = "qcom,gcc-msm8998";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+      reg = <0x00100000 0xb0000>;
+      clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+               <&sleep>,
+               <0>,
+               <0>,
+               <0>,
+               <0>,
+               <0>;
+      clock-names = "xo",
+                    "sleep_clk",
+                    "usb3_pipe",
+                    "ufs_rx_symbol0",
+                    "ufs_rx_symbol1",
+                    "ufs_tx_symbol0",
+                    "pcie0_pipe";
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml
new file mode 100644
index 0000000..8cdece3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-qcs404.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Bindingfor QCS404
+
+maintainers:
+  - Stephen Boyd <sboyd@kernel.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains on QCS404.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-qcs404.h
+
+properties:
+  compatible:
+    const: qcom,gcc-qcs404
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  protected-clocks:
+    description:
+      Protected clock specifier list as per common clock binding.
+
+required:
+  - compatible
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+
+examples:
+  - |
+    clock-controller@1800000 {
+      compatible = "qcom,gcc-qcs404";
+      reg = <0x01800000 0x80000>;
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
new file mode 100644
index 0000000..ee4f968
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-sc7180.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for SC7180
+
+maintainers:
+  - Stephen Boyd <sboyd@kernel.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains on SC7180.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-sc7180.h
+
+properties:
+  compatible:
+    const: qcom,gcc-sc7180
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: Board active XO source
+      - description: Sleep clock source
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: bi_tcxo_ao
+      - const: sleep_clk
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  protected-clocks:
+    description:
+      Protected clock specifier list as per common clock binding.
+
+required:
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@100000 {
+      compatible = "qcom,gcc-sc7180";
+      reg = <0 0x00100000 0 0x1f0000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>,
+               <&rpmhcc RPMH_CXO_CLK_A>,
+               <&sleep_clk>;
+      clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
new file mode 100644
index 0000000..888e9a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-sm8150.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for SM8150
+
+maintainers:
+  - Stephen Boyd <sboyd@kernel.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm global clock control module which supports the clocks, resets and
+  power domains on SM8150.
+
+  See also:
+  - dt-bindings/clock/qcom,gcc-sm8150.h
+
+properties:
+  compatible:
+    const: qcom,gcc-sm8150
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: Sleep clock source
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: sleep_clk
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  protected-clocks:
+    description:
+      Protected clock specifier list as per common clock binding.
+
+required:
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@100000 {
+      compatible = "qcom,gcc-sm8150";
+      reg = <0 0x00100000 0 0x1f0000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>,
+               <&sleep_clk>;
+      clock-names = "bi_tcxo", "sleep_clk";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
index e73a56f..d18f8ab 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/qcom,gcc.yaml#
+$id: http://devicetree.org/schemas/clock/qcom,gcc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Qualcomm Global Clock & Reset Controller Binding
@@ -14,46 +14,42 @@
   Qualcomm global clock control module which supports the clocks, resets and
   power domains.
 
+  See also:
+  - dt-bindings/clock/qcom,gcc-apq8084.h
+  - dt-bindings/reset/qcom,gcc-apq8084.h
+  - dt-bindings/clock/qcom,gcc-ipq4019.h
+  - dt-bindings/clock/qcom,gcc-ipq6018.h
+  - dt-bindings/reset/qcom,gcc-ipq6018.h
+  - dt-bindings/clock/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064)
+  - dt-bindings/reset/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064)
+  - dt-bindings/clock/qcom,gcc-msm8660.h
+  - dt-bindings/reset/qcom,gcc-msm8660.h
+  - dt-bindings/clock/qcom,gcc-msm8974.h
+  - dt-bindings/reset/qcom,gcc-msm8974.h
+  - dt-bindings/clock/qcom,gcc-msm8994.h
+  - dt-bindings/clock/qcom,gcc-mdm9615.h
+  - dt-bindings/reset/qcom,gcc-mdm9615.h
+  - dt-bindings/clock/qcom,gcc-sdm660.h  (qcom,gcc-sdm630 and qcom,gcc-sdm660)
+  - dt-bindings/clock/qcom,gcc-sdm845.h
+
 properties:
-  compatible :
+  compatible:
     enum:
-       - qcom,gcc-apq8064
-       - qcom,gcc-apq8084
-       - qcom,gcc-ipq8064
-       - qcom,gcc-ipq4019
-       - qcom,gcc-ipq8074
-       - qcom,gcc-msm8660
-       - qcom,gcc-msm8916
-       - qcom,gcc-msm8960
-       - qcom,gcc-msm8974
-       - qcom,gcc-msm8974pro
-       - qcom,gcc-msm8974pro-ac
-       - qcom,gcc-msm8994
-       - qcom,gcc-msm8996
-       - qcom,gcc-msm8998
-       - qcom,gcc-mdm9615
-       - qcom,gcc-qcs404
-       - qcom,gcc-sc7180
-       - qcom,gcc-sdm630
-       - qcom,gcc-sdm660
-       - qcom,gcc-sdm845
-       - qcom,gcc-sm8150
-
-  clocks:
-    minItems: 1
-    maxItems: 3
-    items:
-      - description: Board XO source
-      - description: Board active XO source
-      - description: Sleep clock source
-
-  clock-names:
-    minItems: 1
-    maxItems: 3
-    items:
-      - const: bi_tcxo
-      - const: bi_tcxo_ao
-      - const: sleep_clk
+      - qcom,gcc-apq8084
+      - qcom,gcc-ipq4019
+      - qcom,gcc-ipq6018
+      - qcom,gcc-ipq8064
+      - qcom,gcc-msm8660
+      - qcom,gcc-msm8916
+      - qcom,gcc-msm8960
+      - qcom,gcc-msm8974
+      - qcom,gcc-msm8974pro
+      - qcom,gcc-msm8974pro-ac
+      - qcom,gcc-msm8994
+      - qcom,gcc-mdm9615
+      - qcom,gcc-sdm630
+      - qcom,gcc-sdm660
+      - qcom,gcc-sdm845
 
   '#clock-cells':
     const: 1
@@ -67,31 +63,9 @@
   reg:
     maxItems: 1
 
-  nvmem-cells:
-    minItems: 1
-    maxItems: 2
-    description:
-      Qualcomm TSENS (thermal sensor device) on some devices can
-      be part of GCC and hence the TSENS properties can also be part
-      of the GCC/clock-controller node.
-      For more details on the TSENS properties please refer
-      Documentation/devicetree/bindings/thermal/qcom-tsens.txt
-
-  nvmem-cell-names:
-    minItems: 1
-    maxItems: 2
-    description:
-      Names for each nvmem-cells specified.
-    items:
-      - const: calib
-      - const: calib_backup
-
-  'thermal-sensor-cells':
-    const: 1
-
   protected-clocks:
     description:
-       Protected clock specifier list as per common clock binding
+      Protected clock specifier list as per common clock binding.
 
 required:
   - compatible
@@ -100,32 +74,6 @@
   - '#reset-cells'
   - '#power-domain-cells'
 
-if:
-  properties:
-    compatible:
-      contains:
-        const: qcom,gcc-apq8064
-
-then:
-  required:
-    - nvmem-cells
-    - nvmem-cell-names
-    - '#thermal-sensor-cells'
-
-else:
-  if:
-    properties:
-      compatible:
-        contains:
-          enum:
-            - qcom,gcc-sm8150
-            - qcom,gcc-sc7180
-  then:
-    required:
-       - clocks
-       - clock-names
-
-
 examples:
   # Example for GCC for MSM8960:
   - |
@@ -136,53 +84,4 @@
       #reset-cells = <1>;
       #power-domain-cells = <1>;
     };
-
-
-  # Example of GCC with TSENS properties:
-  - |
-    clock-controller@900000 {
-      compatible = "qcom,gcc-apq8064";
-      reg = <0x00900000 0x4000>;
-      nvmem-cells = <&tsens_calib>, <&tsens_backup>;
-      nvmem-cell-names = "calib", "calib_backup";
-      #clock-cells = <1>;
-      #reset-cells = <1>;
-      #power-domain-cells = <1>;
-      #thermal-sensor-cells = <1>;
-    };
-
-  # Example of GCC with protected-clocks properties:
-  - |
-    clock-controller@100000 {
-      compatible = "qcom,gcc-sdm845";
-      reg = <0x100000 0x1f0000>;
-      protected-clocks = <187>, <188>, <189>, <190>, <191>;
-      #clock-cells = <1>;
-      #reset-cells = <1>;
-      #power-domain-cells = <1>;
-    };
-
-  # Example of GCC with clock node properties for SM8150:
-  - |
-    clock-controller@100000 {
-      compatible = "qcom,gcc-sm8150";
-      reg = <0x00100000 0x1f0000>;
-      clocks = <&rpmhcc 0>, <&rpmhcc 1>, <&sleep_clk>;
-      clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
-      #clock-cells = <1>;
-      #reset-cells = <1>;
-      #power-domain-cells = <1>;
-     };
-
-  # Example of GCC with clock nodes properties for SC7180:
-  - |
-    clock-controller@100000 {
-      compatible = "qcom,gcc-sc7180";
-      reg = <0x100000 0x1f0000>;
-      clocks = <&rpmhcc 0>, <&rpmhcc 1>;
-      clock-names = "bi_tcxo", "bi_tcxo_ao";
-      #clock-cells = <1>;
-      #reset-cells = <1>;
-      #power-domain-cells = <1>;
-    };
 ...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
deleted file mode 100644
index 269afe8a..0000000
--- a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Qualcomm Graphics Clock & Reset Controller Binding
---------------------------------------------------
-
-Required properties :
-- compatible : shall contain "qcom,sdm845-gpucc" or "qcom,msm8998-gpucc"
-- reg : shall contain base register location and length
-- #clock-cells : from common clock binding, shall contain 1
-- #reset-cells : from common reset binding, shall contain 1
-- #power-domain-cells : from generic power domain binding, shall contain 1
-- clocks : shall contain the XO clock
-	   shall contain the gpll0 out main clock (msm8998)
-- clock-names : shall be "xo"
-		shall be "gpll0" (msm8998)
-
-Example:
-	gpucc: clock-controller@5090000 {
-		compatible = "qcom,sdm845-gpucc";
-		reg = <0x5090000 0x9000>;
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-		#power-domain-cells = <1>;
-		clocks = <&rpmhcc RPMH_CXO_CLK>;
-		clock-names = "xo";
-	};
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.txt b/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
deleted file mode 100644
index 8b0f784..0000000
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Qualcomm Multimedia Clock & Reset Controller Binding
-----------------------------------------------------
-
-Required properties :
-- compatible : shall contain only one of the following:
-
-			"qcom,mmcc-apq8064"
-			"qcom,mmcc-apq8084"
-			"qcom,mmcc-msm8660"
-			"qcom,mmcc-msm8960"
-			"qcom,mmcc-msm8974"
-			"qcom,mmcc-msm8996"
-
-- reg : shall contain base register location and length
-- #clock-cells : shall contain 1
-- #reset-cells : shall contain 1
-
-Optional properties :
-- #power-domain-cells : shall contain 1
-
-Example:
-	clock-controller@4000000 {
-		compatible = "qcom,mmcc-msm8960";
-		reg = <0x4000000 0x1000>;
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-		#power-domain-cells = <1>;
-	};
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
new file mode 100644
index 0000000..8551849
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,mmcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Multimedia Clock & Reset Controller Binding
+
+maintainers:
+  - Jeffrey Hugo <jhugo@codeaurora.org>
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm multimedia clock control module which supports the clocks, resets and
+  power domains.
+
+properties:
+  compatible :
+    enum:
+       - qcom,mmcc-apq8064
+       - qcom,mmcc-apq8084
+       - qcom,mmcc-msm8660
+       - qcom,mmcc-msm8960
+       - qcom,mmcc-msm8974
+       - qcom,mmcc-msm8996
+       - qcom,mmcc-msm8998
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: Board sleep source
+      - description: Global PLL 0 clock
+      - description: DSI phy instance 0 dsi clock
+      - description: DSI phy instance 0 byte clock
+      - description: DSI phy instance 1 dsi clock
+      - description: DSI phy instance 1 byte clock
+      - description: HDMI phy PLL clock
+      - description: DisplayPort phy PLL vco clock
+      - description: DisplayPort phy PLL link clock
+
+  clock-names:
+    items:
+      - const: xo
+      - const: sleep
+      - const: gpll0
+      - const: dsi0dsi
+      - const: dsi0byte
+      - const: dsi1dsi
+      - const: dsi1byte
+      - const: hdmipll
+      - const: dpvco
+      - const: dplink
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+  protected-clocks:
+    description:
+       Protected clock specifier list as per common clock binding
+
+required:
+  - compatible
+  - reg
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: qcom,mmcc-msm8998
+
+then:
+  required:
+    - clocks
+    - clock-names
+
+examples:
+  # Example for MMCC for MSM8960:
+  - |
+    clock-controller@4000000 {
+      compatible = "qcom,mmcc-msm8960";
+      reg = <0x4000000 0x1000>;
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml
new file mode 100644
index 0000000..7d853c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,msm8998-gpucc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Graphics Clock & Reset Controller Binding for MSM8998
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm graphics clock control module which supports the clocks, resets and
+  power domains on MSM8998.
+
+  See also dt-bindings/clock/qcom,gpucc-msm8998.h.
+
+properties:
+  compatible:
+    const: qcom,msm8998-gpucc
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: GPLL0 main branch source (gcc_gpu_gpll0_clk_src)
+
+  clock-names:
+    items:
+      - const: xo
+      - const: gpll0
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,gcc-msm8998.h>
+    #include <dt-bindings/clock/qcom,rpmcc.h>
+    clock-controller@5065000 {
+      compatible = "qcom,msm8998-gpucc";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+      reg = <0x05065000 0x9000>;
+      clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, <&gcc GPLL0_OUT_MAIN>;
+      clock-names = "xo", "gpll0";
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
index 94e2f14..2cd158f 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/clock/qcom,rpmhcc.yaml#
+$id: http://devicetree.org/schemas/clock/qcom,rpmhcc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Qualcomm Technologies, Inc. RPMh Clocks Bindings
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
new file mode 100644
index 0000000..0429062
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sc7180-dispcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock & Reset Controller Binding for SC7180
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm display clock control module which supports the clocks, resets and
+  power domains on SC7180.
+
+  See also dt-bindings/clock/qcom,dispcc-sc7180.h.
+
+properties:
+  compatible:
+    const: qcom,sc7180-dispcc
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: GPLL0 source from GCC
+      - description: Byte clock from DSI PHY
+      - description: Pixel clock from DSI PHY
+      - description: Link clock from DP PHY
+      - description: VCO DIV clock from DP PHY
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: gcc_disp_gpll0_clk_src
+      - const: dsi0_phy_pll_out_byteclk
+      - const: dsi0_phy_pll_out_dsiclk
+      - const: dp_phy_pll_link_clk
+      - const: dp_phy_pll_vco_div_clk
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,gcc-sc7180.h>
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@af00000 {
+      compatible = "qcom,sc7180-dispcc";
+      reg = <0 0x0af00000 0 0x200000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>,
+               <&gcc GCC_DISP_GPLL0_CLK_SRC>,
+               <&dsi_phy 0>,
+               <&dsi_phy 1>,
+               <&dp_phy 0>,
+               <&dp_phy 1>;
+      clock-names = "bi_tcxo",
+                    "gcc_disp_gpll0_clk_src",
+                    "dsi0_phy_pll_out_byteclk",
+                    "dsi0_phy_pll_out_dsiclk",
+                    "dp_phy_pll_link_clk",
+                    "dp_phy_pll_vco_div_clk";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
new file mode 100644
index 0000000..5785192
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sc7180-gpucc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Graphics Clock & Reset Controller Binding for SC7180
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm graphics clock control module which supports the clocks, resets and
+  power domains on SC7180.
+
+  See also dt-bindings/clock/qcom,gpucc-sc7180.h.
+
+properties:
+  compatible:
+    const: qcom,sc7180-gpucc
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: GPLL0 main branch source
+      - description: GPLL0 div branch source
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: gcc_gpu_gpll0_clk_src
+      - const: gcc_gpu_gpll0_div_clk_src
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,gcc-sc7180.h>
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@5090000 {
+      compatible = "qcom,sc7180-gpucc";
+      reg = <0 0x05090000 0 0x9000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>,
+               <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+               <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+      clock-names = "bi_tcxo",
+                    "gcc_gpu_gpll0_clk_src",
+                    "gcc_gpu_gpll0_div_clk_src";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml
new file mode 100644
index 0000000..31df901
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sc7180-videocc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Video Clock & Reset Controller Binding for SC7180
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm video clock control module which supports the clocks, resets and
+  power domains on SC7180.
+
+  See also dt-bindings/clock/qcom,videocc-sc7180.h.
+
+properties:
+  compatible:
+    const: qcom,sc7180-videocc
+
+  clocks:
+    items:
+      - description: Board XO source
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@ab00000 {
+      compatible = "qcom,sc7180-videocc";
+      reg = <0 0x0ab00000 0 0x10000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>;
+      clock-names = "bi_tcxo";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
new file mode 100644
index 0000000..89269dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sdm845-dispcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock & Reset Controller Binding for SDM845
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm display clock control module which supports the clocks, resets and
+  power domains on SDM845.
+
+  See also dt-bindings/clock/qcom,dispcc-sdm845.h.
+
+properties:
+  compatible:
+    const: qcom,sdm845-dispcc
+
+  # NOTE: sdm845.dtsi existed for quite some time and specified no clocks.
+  # The code had to use hardcoded mechanisms to find the input clocks.
+  # New dts files should have these clocks.
+  clocks:
+    items:
+      - description: Board XO source
+      - description: GPLL0 source from GCC
+      - description: GPLL0 div source from GCC
+      - description: Byte clock from DSI PHY0
+      - description: Pixel clock from DSI PHY0
+      - description: Byte clock from DSI PHY1
+      - description: Pixel clock from DSI PHY1
+      - description: Link clock from DP PHY
+      - description: VCO DIV clock from DP PHY
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: gcc_disp_gpll0_clk_src
+      - const: gcc_disp_gpll0_div_clk_src
+      - const: dsi0_phy_pll_out_byteclk
+      - const: dsi0_phy_pll_out_dsiclk
+      - const: dsi1_phy_pll_out_byteclk
+      - const: dsi1_phy_pll_out_dsiclk
+      - const: dp_link_clk_divsel_ten
+      - const: dp_vco_divided_clk_src_mux
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@af00000 {
+      compatible = "qcom,sdm845-dispcc";
+      reg = <0 0x0af00000 0 0x10000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>,
+               <&gcc GCC_DISP_GPLL0_CLK_SRC>,
+               <&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
+               <&dsi0_phy 0>,
+               <&dsi0_phy 1>,
+               <&dsi1_phy 0>,
+               <&dsi1_phy 1>,
+               <&dp_phy 0>,
+               <&dp_phy 1>;
+      clock-names = "bi_tcxo",
+                    "gcc_disp_gpll0_clk_src",
+                    "gcc_disp_gpll0_div_clk_src",
+                    "dsi0_phy_pll_out_byteclk",
+                    "dsi0_phy_pll_out_dsiclk",
+                    "dsi1_phy_pll_out_byteclk",
+                    "dsi1_phy_pll_out_dsiclk",
+                    "dp_link_clk_divsel_ten",
+                    "dp_vco_divided_clk_src_mux";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml
new file mode 100644
index 0000000..bac04f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sdm845-gpucc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Graphics Clock & Reset Controller Binding for SDM845
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm graphics clock control module which supports the clocks, resets and
+  power domains on SDM845.
+
+  See also dt-bindings/clock/qcom,gpucc-sdm845.h.
+
+properties:
+  compatible:
+    const: qcom,sdm845-gpucc
+
+  clocks:
+    items:
+      - description: Board XO source
+      - description: GPLL0 main branch source
+      - description: GPLL0 div branch source
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+      - const: gcc_gpu_gpll0_clk_src
+      - const: gcc_gpu_gpll0_div_clk_src
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@5090000 {
+      compatible = "qcom,sdm845-gpucc";
+      reg = <0 0x05090000 0 0x9000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>,
+               <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+               <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+      clock-names = "bi_tcxo",
+                    "gcc_gpu_gpll0_clk_src",
+                    "gcc_gpu_gpll0_div_clk_src";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml
new file mode 100644
index 0000000..9d216c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sdm845-videocc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Video Clock & Reset Controller Binding for SDM845
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm video clock control module which supports the clocks, resets and
+  power domains on SDM845.
+
+  See also dt-bindings/clock/qcom,videocc-sdm845.h.
+
+properties:
+  compatible:
+    const: qcom,sdm845-videocc
+
+  clocks:
+    items:
+      - description: Board XO source
+
+  clock-names:
+    items:
+      - const: bi_tcxo
+
+  '#clock-cells':
+    const: 1
+
+  '#reset-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#reset-cells'
+  - '#power-domain-cells'
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    clock-controller@ab00000 {
+      compatible = "qcom,sdm845-videocc";
+      reg = <0 0x0ab00000 0 0x10000>;
+      clocks = <&rpmhcc RPMH_CXO_CLK>;
+      clock-names = "bi_tcxo";
+      #clock-cells = <1>;
+      #reset-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
deleted file mode 100644
index 8a8622c..0000000
--- a/Documentation/devicetree/bindings/clock/qcom,videocc.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Qualcomm Video Clock & Reset Controller Binding
------------------------------------------------
-
-Required properties :
-- compatible : shall contain "qcom,sdm845-videocc"
-- reg : shall contain base register location and length
-- #clock-cells : from common clock binding, shall contain 1.
-- #power-domain-cells : from generic power domain binding, shall contain 1.
-- #reset-cells : from common reset binding, shall contain 1.
-
-Example:
-	videocc: clock-controller@ab00000 {
-		compatible = "qcom,sdm845-videocc";
-		reg = <0xab00000 0x10000>;
-		#clock-cells = <1>;
-		#power-domain-cells = <1>;
-		#reset-cells = <1>;
-	};
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
index c7674d0..f4d153f 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
@@ -19,7 +19,7 @@
       - "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
       - "renesas,r8a77470-cpg-mssr" for the r8a77470 SoC (RZ/G1C)
       - "renesas,r8a774a1-cpg-mssr" for the r8a774a1 SoC (RZ/G2M)
-      - "renesas,r8a774b1-cpg-mssr" for the r8a774a1 SoC (RZ/G2N)
+      - "renesas,r8a774b1-cpg-mssr" for the r8a774b1 SoC (RZ/G2N)
       - "renesas,r8a774c0-cpg-mssr" for the r8a774c0 SoC (RZ/G2E)
       - "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2)
       - "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt
deleted file mode 100644
index fb9495e..0000000
--- a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-STMicroelectronics STM32 Peripheral Reset Clock Controller
-==========================================================
-
-The RCC IP is both a reset and a clock controller.
-
-RCC makes also power management (resume/supend and wakeup interrupt).
-
-Please also refer to reset.txt for common reset controller binding usage.
-
-Please also refer to clock-bindings.txt for common clock controller
-binding usage.
-
-
-Required properties:
-- compatible: "st,stm32mp1-rcc", "syscon"
-- reg: should be register base and length as documented in the datasheet
-- #clock-cells: 1, device nodes should specify the clock in their
-  "clocks" property, containing a phandle to the clock device node,
-  an index specifying the clock to use.
-- #reset-cells: Shall be 1
-- interrupts: Should contain a general interrupt line and a interrupt line
-  to the wake-up of processor (CSTOP).
-
-Example:
-	rcc: rcc@50000000 {
-		compatible = "st,stm32mp1-rcc", "syscon";
-		reg = <0x50000000 0x1000>;
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-		interrupts = <GIC_SPI 5 IRQ_TYPE_NONE>,
-			     <GIC_SPI 145 IRQ_TYPE_NONE>;
-	};
-
-Specifying clocks
-=================
-
-All available clocks are defined as preprocessor macros in
-dt-bindings/clock/stm32mp1-clks.h header and can be used in device
-tree sources.
-
-Specifying softreset control of devices
-=======================================
-
-Device nodes should specify the reset channel required in their "resets"
-property, containing a phandle to the reset device node and an index specifying
-which channel to use.
-The index is the bit number within the RCC registers bank, starting from RCC
-base address.
-It is calculated as: index = register_offset / 4 * 32 + bit_offset.
-Where bit_offset is the bit offset within the register.
-
-For example on STM32MP1, for LTDC reset:
- ltdc = APB4_RSTSETR_offset / 4 * 32 + LTDC_bit_offset
-      = 0x180 / 4 * 32 + 0 = 3072
-
-The list of valid indices for STM32MP1 is available in:
-include/dt-bindings/reset-controller/stm32mp1-resets.h
-
-This file implements defines like:
-#define LTDC_R	3072
diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
new file mode 100644
index 0000000..4e38550
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/st,stm32mp1-rcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Reset Clock Controller Binding
+
+maintainers:
+  - Gabriel Fernandez <gabriel.fernandez@st.com>
+
+description: |
+  The RCC IP is both a reset and a clock controller.
+  RCC makes also power management (resume/supend and wakeup interrupt).
+  Please also refer to reset.txt for common reset controller binding usage.
+
+  This binding uses common clock bindings
+  Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+  Specifying clocks
+  =================
+
+  All available clocks are defined as preprocessor macros in
+  dt-bindings/clock/stm32mp1-clks.h header and can be used in device
+  tree sources.
+
+  Specifying softreset control of devices
+  =======================================
+
+  Device nodes should specify the reset channel required in their "resets"
+  property, containing a phandle to the reset device node and an index specifying
+  which channel to use.
+  The index is the bit number within the RCC registers bank, starting from RCC
+  base address.
+  It is calculated as: index = register_offset / 4 * 32 + bit_offset.
+  Where bit_offset is the bit offset within the register.
+
+  For example on STM32MP1, for LTDC reset:
+     ltdc = APB4_RSTSETR_offset / 4 * 32 + LTDC_bit_offset
+          = 0x180 / 4 * 32 + 0 = 3072
+
+  The list of valid indices for STM32MP1 is available in:
+  include/dt-bindings/reset-controller/stm32mp1-resets.h
+
+  This file implements defines like:
+  #define LTDC_R	3072
+
+properties:
+  "#clock-cells":
+    const: 1
+
+  "#reset-cells":
+    const: 1
+
+  compatible:
+    items:
+      - const: st,stm32mp1-rcc
+      - const: syscon
+
+  reg:
+    maxItems: 1
+
+required:
+  - "#clock-cells"
+  - "#reset-cells"
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    rcc: rcc@50000000 {
+        compatible = "st,stm32mp1-rcc", "syscon";
+        reg = <0x50000000 0x1000>;
+        #clock-cells = <1>;
+        #reset-cells = <1>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/clock/sun8i-de2.txt b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
deleted file mode 100644
index 41a52c2..0000000
--- a/Documentation/devicetree/bindings/clock/sun8i-de2.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Allwinner Display Engine 2.0/3.0 Clock Control Binding
-------------------------------------------------------
-
-Required properties :
-- compatible: must contain one of the following compatibles:
-		- "allwinner,sun8i-a83t-de2-clk"
-		- "allwinner,sun8i-h3-de2-clk"
-		- "allwinner,sun8i-v3s-de2-clk"
-		- "allwinner,sun50i-a64-de2-clk"
-		- "allwinner,sun50i-h5-de2-clk"
-		- "allwinner,sun50i-h6-de3-clk"
-
-- reg: Must contain the registers base address and length
-- clocks: phandle to the clocks feeding the display engine subsystem.
-	  Three are needed:
-  - "mod": the display engine module clock (on A83T it's the DE PLL)
-  - "bus": the bus clock for the whole display engine subsystem
-- clock-names: Must contain the clock names described just above
-- resets: phandle to the reset control for the display engine subsystem.
-- #clock-cells : must contain 1
-- #reset-cells : must contain 1
-
-Example:
-de2_clocks: clock@1000000 {
-	compatible = "allwinner,sun8i-h3-de2-clk";
-	reg = <0x01000000 0x100000>;
-	clocks = <&ccu CLK_BUS_DE>,
-		 <&ccu CLK_DE>;
-	clock-names = "bus",
-		      "mod";
-	resets = <&ccu RST_BUS_DE>;
-	#clock-cells = <1>;
-	#reset-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/clock/sun9i-de.txt b/Documentation/devicetree/bindings/clock/sun9i-de.txt
deleted file mode 100644
index fb18f32..0000000
--- a/Documentation/devicetree/bindings/clock/sun9i-de.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Allwinner A80 Display Engine Clock Control Binding
---------------------------------------------------
-
-Required properties :
-- compatible: must contain one of the following compatibles:
-		- "allwinner,sun9i-a80-de-clks"
-
-- reg: Must contain the registers base address and length
-- clocks: phandle to the clocks feeding the display engine subsystem.
-	  Three are needed:
-  - "mod": the display engine module clock
-  - "dram": the DRAM bus clock for the system
-  - "bus": the bus clock for the whole display engine subsystem
-- clock-names: Must contain the clock names described just above
-- resets: phandle to the reset control for the display engine subsystem.
-- #clock-cells : must contain 1
-- #reset-cells : must contain 1
-
-Example:
-de_clocks: clock@3000000 {
-	compatible = "allwinner,sun9i-a80-de-clks";
-	reg = <0x03000000 0x30>;
-	clocks = <&ccu CLK_DE>, <&ccu CLK_SDRAM>, <&ccu CLK_BUS_DE>;
-	clock-names = "mod", "dram", "bus";
-	resets = <&ccu RST_BUS_DE>;
-	#clock-cells = <1>;
-	#reset-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/clock/sun9i-usb.txt b/Documentation/devicetree/bindings/clock/sun9i-usb.txt
deleted file mode 100644
index 3564bd4..0000000
--- a/Documentation/devicetree/bindings/clock/sun9i-usb.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Allwinner A80 USB Clock Control Binding
----------------------------------------
-
-Required properties :
-- compatible: must contain one of the following compatibles:
-		- "allwinner,sun9i-a80-usb-clocks"
-
-- reg: Must contain the registers base address and length
-- clocks: phandle to the clocks feeding the USB subsystem. Two are needed:
-  - "bus": the bus clock for the whole USB subsystem
-  - "hosc": the high frequency oscillator (usually at 24MHz)
-- clock-names: Must contain the clock names described just above
-- #clock-cells : must contain 1
-- #reset-cells : must contain 1
-
-Example:
-usb_clocks: clock@a08000 {
-	compatible = "allwinner,sun9i-a80-usb-clks";
-	reg = <0x00a08000 0x8>;
-	clocks = <&ccu CLK_BUS_USB>, <&osc24M>;
-	clock-names = "bus", "hosc";
-	#clock-cells = <1>;
-	#reset-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
deleted file mode 100644
index 1a042e2..0000000
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ /dev/null
@@ -1,225 +0,0 @@
-Device Tree Clock bindings for arch-sunxi
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible : shall be one of the following:
-	"allwinner,sun4i-a10-osc-clk" - for a gatable oscillator
-	"allwinner,sun4i-a10-pll1-clk" - for the main PLL clock and PLL4
-	"allwinner,sun6i-a31-pll1-clk" - for the main PLL clock on A31
-	"allwinner,sun8i-a23-pll1-clk" - for the main PLL clock on A23
-	"allwinner,sun4i-a10-pll3-clk" - for the video PLL clock on A10
-	"allwinner,sun9i-a80-pll4-clk" - for the peripheral PLLs on A80
-	"allwinner,sun4i-a10-pll5-clk" - for the PLL5 clock
-	"allwinner,sun4i-a10-pll6-clk" - for the PLL6 clock
-	"allwinner,sun6i-a31-pll6-clk" - for the PLL6 clock on A31
-	"allwinner,sun9i-a80-gt-clk" - for the GT bus clock on A80
-	"allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock
-	"allwinner,sun4i-a10-axi-clk" - for the AXI clock
-	"allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23
-	"allwinner,sun4i-a10-gates-clk" - for generic gates on all compatible SoCs
-	"allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates
-	"allwinner,sun4i-a10-ahb-clk" - for the AHB clock
-	"allwinner,sun5i-a13-ahb-clk" - for the AHB clock on A13
-	"allwinner,sun9i-a80-ahb-clk" - for the AHB bus clocks on A80
-	"allwinner,sun4i-a10-ahb-gates-clk" - for the AHB gates on A10
-	"allwinner,sun5i-a13-ahb-gates-clk" - for the AHB gates on A13
-	"allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
-	"allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20
-	"allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
-	"allwinner,sun9i-a80-cpus-clk" - for the CPUS on A80
-	"allwinner,sun6i-a31-ahb1-clk" - for the AHB1 clock on A31
-	"allwinner,sun8i-h3-ahb2-clk" - for the AHB2 clock on H3
-	"allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
-	"allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
-	"allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80
-	"allwinner,sun9i-a80-ahb1-gates-clk" - for the AHB1 gates on A80
-	"allwinner,sun9i-a80-ahb2-gates-clk" - for the AHB2 gates on A80
-	"allwinner,sun4i-a10-apb0-clk" - for the APB0 clock
-	"allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31
-	"allwinner,sun8i-a23-apb0-clk" - for the APB0 clock on A23
-	"allwinner,sun9i-a80-apb0-clk" - for the APB0 bus clock on A80
-	"allwinner,sun8i-a83t-apb0-gates-clk" - for the APB0 gates on A83T
-	"allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10
-	"allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
-	"allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
-	"allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31
-	"allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
-	"allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23
-	"allwinner,sun8i-h3-apb0-gates-clk" - for the APB0 gates on H3
-	"allwinner,sun9i-a80-apb0-gates-clk" - for the APB0 gates on A80
-	"allwinner,sun4i-a10-apb1-clk" - for the APB1 clock
-	"allwinner,sun9i-a80-apb1-clk" - for the APB1 bus clock on A80
-	"allwinner,sun4i-a10-apb1-gates-clk" - for the APB1 gates on A10
-	"allwinner,sun5i-a13-apb1-gates-clk" - for the APB1 gates on A13
-	"allwinner,sun5i-a10s-apb1-gates-clk" - for the APB1 gates on A10s
-	"allwinner,sun6i-a31-apb1-gates-clk" - for the APB1 gates on A31
-	"allwinner,sun7i-a20-apb1-gates-clk" - for the APB1 gates on A20
-	"allwinner,sun8i-a23-apb1-gates-clk" - for the APB1 gates on A23
-	"allwinner,sun9i-a80-apb1-gates-clk" - for the APB1 gates on A80
-	"allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
-	"allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
-	"allwinner,sun8i-a83t-bus-gates-clk" - for the bus gates on A83T
-	"allwinner,sun8i-h3-bus-gates-clk" - for the bus gates on H3
-	"allwinner,sun9i-a80-apbs-gates-clk" - for the APBS gates on A80
-	"allwinner,sun4i-a10-display-clk" - for the display clocks on the A10
-	"allwinner,sun4i-a10-dram-gates-clk" - for the DRAM gates on A10
-	"allwinner,sun5i-a13-dram-gates-clk" - for the DRAM gates on A13
-	"allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
-	"allwinner,sun4i-a10-mmc-clk" - for the MMC clock
-	"allwinner,sun9i-a80-mmc-clk" - for mmc module clocks on A80
-	"allwinner,sun9i-a80-mmc-config-clk" - for mmc gates + resets on A80
-	"allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
-	"allwinner,sun9i-a80-mod0-clk" - for module 0 (storage) clocks on A80
-	"allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23
-	"allwinner,sun7i-a20-out-clk" - for the external output clocks
-	"allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
-	"allwinner,sun4i-a10-tcon-ch0-clk" - for the TCON channel 0 clock on the A10
-	"allwinner,sun4i-a10-tcon-ch1-clk" - for the TCON channel 1 clock on the A10
-	"allwinner,sun4i-a10-usb-clk" - for usb gates + resets on A10 / A20
-	"allwinner,sun5i-a13-usb-clk" - for usb gates + resets on A13
-	"allwinner,sun6i-a31-usb-clk" - for usb gates + resets on A31
-	"allwinner,sun8i-a23-usb-clk" - for usb gates + resets on A23
-	"allwinner,sun8i-h3-usb-clk" - for usb gates + resets on H3
-	"allwinner,sun9i-a80-usb-mod-clk" - for usb gates + resets on A80
-	"allwinner,sun9i-a80-usb-phy-clk" - for usb phy gates + resets on A80
-	"allwinner,sun4i-a10-ve-clk" - for the Video Engine clock
-	"allwinner,sun6i-a31-display-clk" - for the display clocks
-
-Required properties for all clocks:
-- reg : shall be the control register address for the clock.
-- clocks : shall be the input parent clock(s) phandle for the clock. For
-	multiplexed clocks, the list order must match the hardware
-	programming order.
-- #clock-cells : from common clock binding; shall be set to 0 except for
-	the following compatibles where it shall be set to 1:
-	"allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
-	"allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk",
-	"allwinner,*-usb-clk", "allwinner,*-mmc-clk",
-	"allwinner,*-mmc-config-clk"
-- clock-output-names : shall be the corresponding names of the outputs.
-	If the clock module only has one output, the name shall be the
-	module name.
-
-And "allwinner,*-usb-clk" clocks also require:
-- reset-cells : shall be set to 1
-
-The "allwinner,sun4i-a10-ve-clk" clock also requires:
-- reset-cells : shall be set to 0
-
-The "allwinner,sun9i-a80-mmc-config-clk" clock also requires:
-- #reset-cells : shall be set to 1
-- resets : shall be the reset control phandle for the mmc block.
-
-For "allwinner,sun7i-a20-gmac-clk", the parent clocks shall be fixed rate
-dummy clocks at 25 MHz and 125 MHz, respectively. See example.
-
-Clock consumers should specify the desired clocks they use with a
-"clocks" phandle cell. Consumers that are using a gated clock should
-provide an additional ID in their clock property. This ID is the
-offset of the bit controlling this particular gate in the register.
-For the other clocks with "#clock-cells" = 1, the additional ID shall
-refer to the index of the output.
-
-For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output
-is the normal PLL6 output, or "pll6". The second output is rate doubled
-PLL6, or "pll6x2".
-
-The "allwinner,*-mmc-clk" clocks have three different outputs: the
-main clock, with the ID 0, and the output and sample clocks, with the
-IDs 1 and 2, respectively.
-
-The "allwinner,sun9i-a80-mmc-config-clk" clock has one clock/reset output
-per mmc controller. The number of outputs is determined by the size of
-the address block, which is related to the overall mmc block.
-
-For example:
-
-osc24M: clk@1c20050 {
-	#clock-cells = <0>;
-	compatible = "allwinner,sun4i-a10-osc-clk";
-	reg = <0x01c20050 0x4>;
-	clocks = <&osc24M_fixed>;
-	clock-output-names = "osc24M";
-};
-
-pll1: clk@1c20000 {
-	#clock-cells = <0>;
-	compatible = "allwinner,sun4i-a10-pll1-clk";
-	reg = <0x01c20000 0x4>;
-	clocks = <&osc24M>;
-	clock-output-names = "pll1";
-};
-
-pll5: clk@1c20020 {
-	#clock-cells = <1>;
-	compatible = "allwinner,sun4i-pll5-clk";
-	reg = <0x01c20020 0x4>;
-	clocks = <&osc24M>;
-	clock-output-names = "pll5_ddr", "pll5_other";
-};
-
-pll6: clk@1c20028 {
-	#clock-cells = <1>;
-	compatible = "allwinner,sun6i-a31-pll6-clk";
-	reg = <0x01c20028 0x4>;
-	clocks = <&osc24M>;
-	clock-output-names = "pll6", "pll6x2";
-};
-
-cpu: cpu@1c20054 {
-	#clock-cells = <0>;
-	compatible = "allwinner,sun4i-a10-cpu-clk";
-	reg = <0x01c20054 0x4>;
-	clocks = <&osc32k>, <&osc24M>, <&pll1>;
-	clock-output-names = "cpu";
-};
-
-mmc0_clk: clk@1c20088 {
-	#clock-cells = <1>;
-	compatible = "allwinner,sun4i-a10-mmc-clk";
-	reg = <0x01c20088 0x4>;
-	clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-	clock-output-names = "mmc0", "mmc0_output", "mmc0_sample";
-};
-
-mii_phy_tx_clk: clk@2 {
-	#clock-cells = <0>;
-	compatible = "fixed-clock";
-	clock-frequency = <25000000>;
-	clock-output-names = "mii_phy_tx";
-};
-
-gmac_int_tx_clk: clk@3 {
-	#clock-cells = <0>;
-	compatible = "fixed-clock";
-	clock-frequency = <125000000>;
-	clock-output-names = "gmac_int_tx";
-};
-
-gmac_clk: clk@1c20164 {
-	#clock-cells = <0>;
-	compatible = "allwinner,sun7i-a20-gmac-clk";
-	reg = <0x01c20164 0x4>;
-	/*
-	 * The first clock must be fixed at 25MHz;
-	 * the second clock must be fixed at 125MHz
-	 */
-	clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>;
-	clock-output-names = "gmac";
-};
-
-mmc_config_clk: clk@1c13000 {
-	compatible = "allwinner,sun9i-a80-mmc-config-clk";
-	reg = <0x01c13000 0x10>;
-	clocks = <&ahb0_gates 8>;
-	clock-names = "ahb";
-	resets = <&ahb0_resets 8>;
-	reset-names = "ahb";
-	#clock-cells = <1>;
-	#reset-cells = <1>;
-	clock-output-names = "mmc0_config", "mmc1_config",
-			     "mmc2_config", "mmc3_config";
-};
diff --git a/Documentation/devicetree/bindings/clock/ti-clkctrl.txt b/Documentation/devicetree/bindings/clock/ti-clkctrl.txt
index 48ee699..18af6b9 100644
--- a/Documentation/devicetree/bindings/clock/ti-clkctrl.txt
+++ b/Documentation/devicetree/bindings/clock/ti-clkctrl.txt
@@ -16,18 +16,23 @@
 Documentation/devicetree/bindings/clock/clock-bindings.txt.
 
 Required properties :
-- compatible : shall be "ti,clkctrl"
+- compatible : shall be "ti,clkctrl" or a clock domain specific name:
+	       "ti,clkctrl-l4-cfg"
+	       "ti,clkctrl-l4-per"
+	       "ti,clkctrl-l4-secure"
+	       "ti,clkctrl-l4-wkup"
 - #clock-cells : shall contain 2 with the first entry being the instance
 		 offset from the clock domain base and the second being the
 		 clock index
+- reg : clock registers
 
 Example: Clock controller node on omap 4430:
 
 &cm2 {
 	l4per: cm@1400 {
 		cm_l4per@0 {
-			cm_l4per_clkctrl: clk@20 {
-				compatible = "ti,clkctrl";
+			cm_l4per_clkctrl: clock@20 {
+				compatible = "ti,clkctrl-l4-per", "ti,clkctrl";
 				reg = <0x20 0x1b0>;
 				#clock-cells = <2>;
 			};
diff --git a/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt b/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
index 10f7047..21c002d 100644
--- a/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
+++ b/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
@@ -43,7 +43,7 @@
 	- aws : Audio word select signal selection
 };
 
-For valid word select signals, see the dt-bindings/clk/ti-dra7-atl.h include
+For valid word select signals, see the dt-bindings/clock/ti-dra7-atl.h include
 file.
 
 Examples:
@@ -83,7 +83,7 @@
 	clock-names = "fck";
 };
 
-#include <dt-bindings/clk/ti-dra7-atl.h>
+#include <dt-bindings/clock/ti-dra7-atl.h>
 
 &atl {
 
diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
new file mode 100644
index 0000000..229af98
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/xlnx,versal-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Versal clock controller
+
+maintainers:
+  - Michal Simek <michal.simek@xilinx.com>
+  - Jolly Shah <jolly.shah@xilinx.com>
+  - Rajan Vaja <rajan.vaja@xilinx.com>
+
+description: |
+  The clock controller is a hardware block of Xilinx versal clock tree. It
+  reads required input clock frequencies from the devicetree and acts as clock
+  provider for all clock consumers of PS clocks.
+
+select: false
+
+properties:
+  compatible:
+    const: xlnx,versal-clk
+
+  "#clock-cells":
+    const: 1
+
+  clocks:
+    description: List of clock specifiers which are external input
+      clocks to the given clock controller.
+    items:
+      - description: reference clock
+      - description: alternate reference clock
+      - description: alternate reference clock for programmable logic
+
+  clock-names:
+    items:
+      - const: ref
+      - const: alt_ref
+      - const: pl_alt_ref
+
+required:
+  - compatible
+  - "#clock-cells"
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    firmware {
+      zynqmp_firmware: zynqmp-firmware {
+        compatible = "xlnx,zynqmp-firmware";
+        method = "smc";
+        versal_clk: clock-controller {
+          #clock-cells = <1>;
+          compatible = "xlnx,versal-clk";
+          clocks = <&ref>, <&alt_ref>, <&pl_alt_ref>;
+          clock-names = "ref", "alt_ref", "pl_alt_ref";
+        };
+      };
+    };
+...
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.txt b/Documentation/devicetree/bindings/connector/usb-connector.txt
index d357987..88578ac 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.txt
+++ b/Documentation/devicetree/bindings/connector/usb-connector.txt
@@ -1,8 +1,8 @@
 USB Connector
 =============
 
-USB connector node represents physical USB connector. It should be
-a child of USB interface controller.
+A USB connector node represents a physical USB connector. It should be
+a child of a USB interface controller.
 
 Required properties:
 - compatible: describes type of the connector, must be one of:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
new file mode 100644
index 0000000..86057d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
@@ -0,0 +1,291 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-display-backend.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Display Engine Backend Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  The display engine backend exposes layers and sprites to the system.
+
+properties:
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-display-backend
+      - allwinner,sun5i-a13-display-backend
+      - allwinner,sun6i-a31-display-backend
+      - allwinner,sun7i-a20-display-backend
+      - allwinner,sun8i-a23-display-backend
+      - allwinner,sun8i-a33-display-backend
+      - allwinner,sun9i-a80-display-backend
+
+  reg:
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: Display Backend registers
+      - description: SAT registers
+
+  reg-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      - const: be
+      - const: sat
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 3
+    maxItems: 4
+    items:
+      - description: The backend interface clock
+      - description: The backend module clock
+      - description: The backend DRAM clock
+      - description: The SAT clock
+
+  clock-names:
+    minItems: 3
+    maxItems: 4
+    items:
+      - const: ahb
+      - const: mod
+      - const: ram
+      - const: sat
+
+  resets:
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: The Backend reset line
+      - description: The SAT reset line
+
+  reset-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      - const: be
+      - const: sat
+
+  # FIXME: This should be made required eventually once every SoC will
+  # have the MBUS declared.
+  interconnects:
+    maxItems: 1
+
+  # FIXME: This should be made required eventually once every SoC will
+  # have the MBUS declared.
+  interconnect-names:
+    const: dma-mem
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoints of the controller.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoints of the controller.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - ports
+
+additionalProperties: false
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: allwinner,sun8i-a33-display-backend
+
+then:
+  properties:
+    reg:
+      minItems: 2
+
+    reg-names:
+      minItems: 2
+
+    clocks:
+      minItems: 4
+
+    clock-names:
+      minItems: 4
+
+    resets:
+      minItems: 2
+
+    reset-names:
+      minItems: 2
+
+  required:
+    - reg-names
+    - reset-names
+
+else:
+  properties:
+    reg:
+      maxItems: 1
+
+    reg-names:
+      maxItems: 1
+
+    clocks:
+      maxItems: 3
+
+    clock-names:
+      maxItems: 3
+
+    resets:
+      maxItems: 1
+
+    reset-names:
+      maxItems: 1
+
+examples:
+  - |
+    /*
+     * This comes from the clock/sun4i-a10-ccu.h and
+     * reset/sun4i-a10-ccu.h headers, but we can't include them since
+     * it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+
+    #define CLK_AHB_DE_BE0	42
+    #define CLK_DRAM_DE_BE0	140
+    #define CLK_DE_BE0		144
+    #define RST_DE_BE0		5
+
+    display-backend@1e60000 {
+        compatible = "allwinner,sun4i-a10-display-backend";
+        reg = <0x01e60000 0x10000>;
+        interrupts = <47>;
+        clocks = <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_DE_BE0>,
+                 <&ccu CLK_DRAM_DE_BE0>;
+        clock-names = "ahb", "mod",
+                      "ram";
+        resets = <&ccu RST_DE_BE0>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <0>;
+
+                endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&fe0_out_be0>;
+                };
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&fe1_out_be0>;
+                };
+            };
+
+            port@1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <1>;
+
+                endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&tcon0_in_be0>;
+                };
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&tcon1_in_be0>;
+                };
+            };
+        };
+    };
+
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    /*
+     * This comes from the clock/sun8i-a23-a33-ccu.h and
+     * reset/sun8i-a23-a33-ccu.h headers, but we can't include them
+     * since it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+
+    #define CLK_BUS_DE_BE	40
+    #define CLK_BUS_SAT		46
+    #define CLK_DRAM_DE_BE	84
+    #define CLK_DE_BE		85
+    #define RST_BUS_DE_BE	21
+    #define RST_BUS_SAT		27
+
+    display-backend@1e60000 {
+        compatible = "allwinner,sun8i-a33-display-backend";
+        reg = <0x01e60000 0x10000>, <0x01e80000 0x1000>;
+        reg-names = "be", "sat";
+        interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>,
+                 <&ccu CLK_DRAM_DE_BE>, <&ccu CLK_BUS_SAT>;
+        clock-names = "ahb", "mod",
+                      "ram", "sat";
+        resets = <&ccu RST_BUS_DE_BE>, <&ccu RST_BUS_SAT>;
+        reset-names = "be", "sat";
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                endpoint {
+                    remote-endpoint = <&fe0_out_be0>;
+                };
+            };
+
+            port@1 {
+                reg = <1>;
+
+                endpoint {
+                    remote-endpoint = <&drc0_in_be0>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml
new file mode 100644
index 0000000..944ff2f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-display-engine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Display Engine Pipeline Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  The display engine pipeline (and its entry point, since it can be
+  either directly the backend or the frontend) is represented as an
+  extra node.
+
+  The Allwinner A10 Display pipeline is composed of several components
+  that are going to be documented below:
+
+  For all connections between components up to the TCONs in the
+  display pipeline, when there are multiple components of the same
+  type at the same depth, the local endpoint ID must be the same as
+  the remote component's index. For example, if the remote endpoint is
+  Frontend 1, then the local endpoint ID must be 1.
+
+  Frontend 0  [0] ------- [0]  Backend 0  [0] ------- [0]  TCON 0
+              [1] --   -- [1]             [1] --   -- [1]
+                    \ /                         \ /
+                     X                           X
+                    / \                         / \
+              [0] --   -- [0]             [0] --   -- [0]
+  Frontend 1  [1] ------- [1]  Backend 1  [1] ------- [1]  TCON 1
+
+  For a two pipeline system such as the one depicted above, the lines
+  represent the connections between the components, while the numbers
+  within the square brackets corresponds to the ID of the local endpoint.
+
+  The same rule also applies to DE 2.0 mixer-TCON connections:
+
+  Mixer 0  [0] ----------- [0]  TCON 0
+           [1] ----   ---- [1]
+                   \ /
+                    X
+                   / \
+           [0] ----   ---- [0]
+  Mixer 1  [1] ----------- [1]  TCON 1
+
+properties:
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-display-engine
+      - allwinner,sun5i-a10s-display-engine
+      - allwinner,sun5i-a13-display-engine
+      - allwinner,sun6i-a31-display-engine
+      - allwinner,sun6i-a31s-display-engine
+      - allwinner,sun7i-a20-display-engine
+      - allwinner,sun8i-a23-display-engine
+      - allwinner,sun8i-a33-display-engine
+      - allwinner,sun8i-a83t-display-engine
+      - allwinner,sun8i-h3-display-engine
+      - allwinner,sun8i-r40-display-engine
+      - allwinner,sun8i-v3s-display-engine
+      - allwinner,sun9i-a80-display-engine
+      - allwinner,sun50i-a64-display-engine
+      - allwinner,sun50i-h6-display-engine
+
+  allwinner,pipelines:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/phandle-array
+      - minItems: 1
+        maxItems: 2
+    description: |
+      Available display engine frontends (DE 1.0) or mixers (DE
+      2.0/3.0) available.
+
+required:
+  - compatible
+  - allwinner,pipelines
+
+additionalProperties: false
+
+if:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - allwinner,sun4i-a10-display-engine
+          - allwinner,sun6i-a31-display-engine
+          - allwinner,sun6i-a31s-display-engine
+          - allwinner,sun7i-a20-display-engine
+          - allwinner,sun8i-a83t-display-engine
+          - allwinner,sun8i-r40-display-engine
+          - allwinner,sun9i-a80-display-engine
+          - allwinner,sun50i-a64-display-engine
+
+then:
+  properties:
+    allwinner,pipelines:
+      minItems: 2
+
+else:
+  properties:
+    allwinner,pipelines:
+      maxItems: 1
+
+examples:
+  - |
+      de: display-engine {
+          compatible = "allwinner,sun4i-a10-display-engine";
+          allwinner,pipelines = <&fe0>, <&fe1>;
+      };
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
new file mode 100644
index 0000000..3eb1c2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
@@ -0,0 +1,138 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-display-frontend.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Display Engine Frontend Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  The display engine frontend does formats conversion, scaling,
+  deinterlacing and color space conversion.
+
+properties:
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-display-frontend
+      - allwinner,sun5i-a13-display-frontend
+      - allwinner,sun6i-a31-display-frontend
+      - allwinner,sun7i-a20-display-frontend
+      - allwinner,sun8i-a23-display-frontend
+      - allwinner,sun8i-a33-display-frontend
+      - allwinner,sun9i-a80-display-frontend
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: The frontend interface clock
+      - description: The frontend module clock
+      - description: The frontend DRAM clock
+
+  clock-names:
+    items:
+      - const: ahb
+      - const: mod
+      - const: ram
+
+  # FIXME: This should be made required eventually once every SoC will
+  # have the MBUS declared.
+  interconnects:
+    maxItems: 1
+
+  # FIXME: This should be made required eventually once every SoC will
+  # have the MBUS declared.
+  interconnect-names:
+    const: dma-mem
+
+  resets:
+    maxItems: 1
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoints of the controller.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoints of the controller.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun4i-a10-ccu.h>
+    #include <dt-bindings/reset/sun4i-a10-ccu.h>
+
+    fe0: display-frontend@1e00000 {
+        compatible = "allwinner,sun4i-a10-display-frontend";
+        reg = <0x01e00000 0x20000>;
+        interrupts = <47>;
+        clocks = <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_FE0>,
+                 <&ccu CLK_DRAM_DE_FE0>;
+        clock-names = "ahb", "mod",
+                      "ram";
+        resets = <&ccu RST_DE_FE0>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            fe0_out: port@1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <1>;
+
+                fe0_out_be0: endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&be0_in_fe0>;
+                };
+
+                fe0_out_be1: endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&be1_in_fe0>;
+                };
+            };
+        };
+    };
+
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
new file mode 100644
index 0000000..5d4915a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
@@ -0,0 +1,183 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-hdmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 HDMI Controller Device Tree Bindings
+
+description: |
+  The HDMI Encoder supports the HDMI video and audio outputs, and does
+  CEC. It is one end of the pipeline.
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    oneOf:
+      - const: allwinner,sun4i-a10-hdmi
+      - const: allwinner,sun5i-a10s-hdmi
+      - const: allwinner,sun6i-a31-hdmi
+      - items:
+        - const: allwinner,sun7i-a20-hdmi
+        - const: allwinner,sun5i-a10s-hdmi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    oneOf:
+      - items:
+        - description: The HDMI interface clock
+        - description: The HDMI module clock
+        - description: The first video PLL
+        - description: The second video PLL
+
+      - items:
+        - description: The HDMI interface clock
+        - description: The HDMI module clock
+        - description: The HDMI DDC clock
+        - description: The first video PLL
+        - description: The second video PLL
+
+  clock-names:
+    oneOf:
+      - items:
+        - const: ahb
+        - const: mod
+        - const: pll-0
+        - const: pll-1
+
+      - items:
+        - const: ahb
+        - const: mod
+        - const: ddc
+        - const: pll-0
+        - const: pll-1
+
+  resets:
+    maxItems: 1
+
+  dmas:
+    items:
+      - description: DDC Transmission DMA Channel
+      - description: DDC Reception DMA Channel
+      - description: Audio Transmission DMA Channel
+
+  dma-names:
+    items:
+      - const: ddc-tx
+      - const: ddc-rx
+      - const: audio-tx
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoints of the controller.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoints of the controller. Usually an HDMI
+          connector.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - dmas
+  - dma-names
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: allwinner,sun6i-a31-hdmi
+
+then:
+  properties:
+    clocks:
+      minItems: 5
+
+    clock-names:
+      minItems: 5
+
+  required:
+    - resets
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun4i-a10-ccu.h>
+    #include <dt-bindings/dma/sun4i-a10.h>
+    #include <dt-bindings/reset/sun4i-a10-ccu.h>
+
+    hdmi: hdmi@1c16000 {
+        compatible = "allwinner,sun4i-a10-hdmi";
+        reg = <0x01c16000 0x1000>;
+        interrupts = <58>;
+        clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
+                 <&ccu CLK_PLL_VIDEO0_2X>,
+                 <&ccu CLK_PLL_VIDEO1_2X>;
+        clock-names = "ahb", "mod", "pll-0", "pll-1";
+        dmas = <&dma SUN4I_DMA_NORMAL 16>,
+               <&dma SUN4I_DMA_NORMAL 16>,
+               <&dma SUN4I_DMA_DEDICATED 24>;
+        dma-names = "ddc-tx", "ddc-rx", "audio-tx";
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            hdmi_in: port@0 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <0>;
+
+                hdmi_in_tcon0: endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&tcon0_out_hdmi>;
+                };
+
+                hdmi_in_tcon1: endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&tcon1_out_hdmi>;
+                };
+            };
+
+            hdmi_out: port@1 {
+                reg = <1>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
new file mode 100644
index 0000000..86ad617
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -0,0 +1,676 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-tcon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Timings Controller (TCON) Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  The TCON acts as a timing controller for RGB, LVDS and TV
+  interfaces.
+
+properties:
+  "#clock-cells":
+    const: 0
+
+  compatible:
+    oneOf:
+      - const: allwinner,sun4i-a10-tcon
+      - const: allwinner,sun5i-a13-tcon
+      - const: allwinner,sun6i-a31-tcon
+      - const: allwinner,sun6i-a31s-tcon
+      - const: allwinner,sun7i-a20-tcon
+      - const: allwinner,sun8i-a23-tcon
+      - const: allwinner,sun8i-a33-tcon
+      - const: allwinner,sun8i-a83t-tcon-lcd
+      - const: allwinner,sun8i-a83t-tcon-tv
+      - const: allwinner,sun8i-r40-tcon-tv
+      - const: allwinner,sun8i-v3s-tcon
+      - const: allwinner,sun9i-a80-tcon-lcd
+      - const: allwinner,sun9i-a80-tcon-tv
+
+      - items:
+        - enum:
+            - allwinner,sun50i-a64-tcon-lcd
+        - const: allwinner,sun8i-a83t-tcon-lcd
+
+      - items:
+        - enum:
+          - allwinner,sun8i-h3-tcon-tv
+          - allwinner,sun50i-a64-tcon-tv
+          - allwinner,sun50i-h6-tcon-tv
+        - const: allwinner,sun8i-a83t-tcon-tv
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 4
+
+  clock-names:
+    minItems: 1
+    maxItems: 4
+
+  clock-output-names:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/string-array
+      - maxItems: 1
+    description:
+      Name of the LCD pixel clock created.
+
+  dmas:
+    maxItems: 1
+
+  resets:
+    anyOf:
+      - items:
+        - description: TCON Reset Line
+
+      - items:
+        - description: TCON Reset Line
+        - description: TCON LVDS Reset Line
+
+      - items:
+        - description: TCON Reset Line
+        - description: TCON eDP Reset Line
+
+      - items:
+        - description: TCON Reset Line
+        - description: TCON eDP Reset Line
+        - description: TCON LVDS Reset Line
+
+  reset-names:
+    oneOf:
+      - const: lcd
+
+      - items:
+        - const: lcd
+        - const: lvds
+
+      - items:
+        - const: lcd
+        - const: edp
+
+      - items:
+        - const: lcd
+        - const: edp
+        - const: lvds
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoints of the controller.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoints of the controller.
+
+        patternProperties:
+          "^endpoint(@[0-9])$":
+            type: object
+
+            properties:
+              allwinner,tcon-channel:
+                $ref: /schemas/types.yaml#/definitions/uint32
+                description: |
+                  TCON can have 1 or 2 channels, usually with the
+                  first channel being used for the panels interfaces
+                  (RGB, LVDS, etc.), and the second being used for the
+                  outputs that require another controller (TV Encoder,
+                  HDMI, etc.).
+
+                  If that property is present, specifies the TCON
+                  channel the endpoint is associated to. If that
+                  property is not present, the endpoint number will be
+                  used as the channel number.
+
+            unevaluatedProperties: true
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - ports
+
+additionalProperties: false
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun4i-a10-tcon
+              - allwinner,sun5i-a13-tcon
+              - allwinner,sun7i-a20-tcon
+
+    then:
+      properties:
+        clocks:
+          minItems: 3
+
+        clock-names:
+          items:
+            - const: ahb
+            - const: tcon-ch0
+            - const: tcon-ch1
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun6i-a31-tcon
+              - allwinner,sun6i-a31s-tcon
+
+    then:
+      properties:
+        clocks:
+          minItems: 4
+
+        clock-names:
+          items:
+            - const: ahb
+            - const: tcon-ch0
+            - const: tcon-ch1
+            - const: lvds-alt
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun8i-a23-tcon
+              - allwinner,sun8i-a33-tcon
+
+    then:
+      properties:
+        clocks:
+          minItems: 3
+
+        clock-names:
+          items:
+            - const: ahb
+            - const: tcon-ch0
+            - const: lvds-alt
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun8i-a83t-tcon-lcd
+              - allwinner,sun8i-v3s-tcon
+              - allwinner,sun9i-a80-tcon-lcd
+
+    then:
+      properties:
+        clocks:
+          minItems: 2
+
+        clock-names:
+          items:
+            - const: ahb
+            - const: tcon-ch0
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun8i-a83t-tcon-tv
+              - allwinner,sun8i-r40-tcon-tv
+              - allwinner,sun9i-a80-tcon-tv
+
+    then:
+      properties:
+        clocks:
+          minItems: 2
+
+        clock-names:
+          items:
+            - const: ahb
+            - const: tcon-ch1
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun5i-a13-tcon
+              - allwinner,sun6i-a31-tcon
+              - allwinner,sun6i-a31s-tcon
+              - allwinner,sun7i-a20-tcon
+              - allwinner,sun8i-a23-tcon
+              - allwinner,sun8i-a33-tcon
+              - allwinner,sun8i-v3s-tcon
+              - allwinner,sun9i-a80-tcon-lcd
+              - allwinner,sun4i-a10-tcon
+              - allwinner,sun8i-a83t-tcon-lcd
+
+    then:
+      required:
+        - "#clock-cells"
+        - clock-output-names
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun6i-a31-tcon
+              - allwinner,sun6i-a31s-tcon
+              - allwinner,sun8i-a23-tcon
+              - allwinner,sun8i-a33-tcon
+              - allwinner,sun8i-a83t-tcon-lcd
+
+    then:
+      properties:
+        resets:
+          minItems: 2
+
+        reset-names:
+          items:
+            - const: lcd
+            - const: lvds
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun9i-a80-tcon-lcd
+
+    then:
+      properties:
+        resets:
+          minItems: 3
+
+        reset-names:
+          items:
+            - const: lcd
+            - const: edp
+            - const: lvds
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun9i-a80-tcon-tv
+
+    then:
+      properties:
+        resets:
+          minItems: 2
+
+        reset-names:
+          items:
+            - const: lcd
+            - const: edp
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun4i-a10-tcon
+              - allwinner,sun5i-a13-tcon
+              - allwinner,sun6i-a31-tcon
+              - allwinner,sun6i-a31s-tcon
+              - allwinner,sun7i-a20-tcon
+              - allwinner,sun8i-a23-tcon
+              - allwinner,sun8i-a33-tcon
+
+    then:
+      required:
+        - dmas
+
+examples:
+  - |
+    #include <dt-bindings/dma/sun4i-a10.h>
+
+    /*
+     * This comes from the clock/sun4i-a10-ccu.h and
+     * reset/sun4i-a10-ccu.h headers, but we can't include them since
+     * it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+
+    #define CLK_AHB_LCD0	56
+    #define CLK_TCON0_CH0	149
+    #define CLK_TCON0_CH1	155
+    #define RST_TCON0		11
+
+    lcd-controller@1c0c000 {
+        compatible = "allwinner,sun4i-a10-tcon";
+        reg = <0x01c0c000 0x1000>;
+        interrupts = <44>;
+        resets = <&ccu RST_TCON0>;
+        reset-names = "lcd";
+        clocks = <&ccu CLK_AHB_LCD0>,
+                 <&ccu CLK_TCON0_CH0>,
+                 <&ccu CLK_TCON0_CH1>;
+        clock-names = "ahb",
+                      "tcon-ch0",
+                      "tcon-ch1";
+        clock-output-names = "tcon0-pixel-clock";
+        #clock-cells = <0>;
+        dmas = <&dma SUN4I_DMA_DEDICATED 14>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <0>;
+
+                endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&be0_out_tcon0>;
+                };
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&be1_out_tcon0>;
+                };
+            };
+
+            port@1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <1>;
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&hdmi_in_tcon0>;
+                    allwinner,tcon-channel = <1>;
+                };
+            };
+        };
+    };
+
+    #undef CLK_AHB_LCD0
+    #undef CLK_TCON0_CH0
+    #undef CLK_TCON0_CH1
+    #undef RST_TCON0
+
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    /*
+     * This comes from the clock/sun6i-a31-ccu.h and
+     * reset/sun6i-a31-ccu.h headers, but we can't include them since
+     * it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+
+    #define CLK_PLL_MIPI	15
+    #define CLK_AHB1_LCD0	47
+    #define CLK_LCD0_CH0	127
+    #define CLK_LCD0_CH1	129
+    #define RST_AHB1_LCD0	27
+    #define RST_AHB1_LVDS	41
+
+    lcd-controller@1c0c000 {
+        compatible = "allwinner,sun6i-a31-tcon";
+        reg = <0x01c0c000 0x1000>;
+        interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+        dmas = <&dma 11>;
+        resets = <&ccu RST_AHB1_LCD0>, <&ccu RST_AHB1_LVDS>;
+        reset-names = "lcd", "lvds";
+        clocks = <&ccu CLK_AHB1_LCD0>,
+                 <&ccu CLK_LCD0_CH0>,
+                 <&ccu CLK_LCD0_CH1>,
+                 <&ccu CLK_PLL_MIPI>;
+        clock-names = "ahb",
+                      "tcon-ch0",
+                      "tcon-ch1",
+                      "lvds-alt";
+        clock-output-names = "tcon0-pixel-clock";
+        #clock-cells = <0>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <0>;
+
+                endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&drc0_out_tcon0>;
+                };
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&drc1_out_tcon0>;
+                };
+            };
+
+            port@1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <1>;
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&hdmi_in_tcon0>;
+                    allwinner,tcon-channel = <1>;
+                };
+            };
+        };
+    };
+
+    #undef CLK_PLL_MIPI
+    #undef CLK_AHB1_LCD0
+    #undef CLK_LCD0_CH0
+    #undef CLK_LCD0_CH1
+    #undef RST_AHB1_LCD0
+    #undef RST_AHB1_LVDS
+
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    /*
+     * This comes from the clock/sun9i-a80-ccu.h and
+     * reset/sun9i-a80-ccu.h headers, but we can't include them since
+     * it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+
+    #define CLK_BUS_LCD0	102
+    #define CLK_LCD0		58
+    #define RST_BUS_LCD0	22
+    #define RST_BUS_EDP		24
+    #define RST_BUS_LVDS	25
+
+    lcd-controller@3c00000 {
+        compatible = "allwinner,sun9i-a80-tcon-lcd";
+        reg = <0x03c00000 0x10000>;
+        interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_BUS_LCD0>, <&ccu CLK_LCD0>;
+        clock-names = "ahb", "tcon-ch0";
+        resets = <&ccu RST_BUS_LCD0>, <&ccu RST_BUS_EDP>, <&ccu RST_BUS_LVDS>;
+        reset-names = "lcd", "edp", "lvds";
+        clock-output-names = "tcon0-pixel-clock";
+        #clock-cells = <0>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                endpoint {
+                    remote-endpoint = <&drc0_out_tcon0>;
+                };
+            };
+
+            port@1 {
+                reg = <1>;
+            };
+        };
+    };
+
+    #undef CLK_BUS_TCON0
+    #undef CLK_TCON0
+    #undef RST_BUS_TCON0
+    #undef RST_BUS_EDP
+    #undef RST_BUS_LVDS
+
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    /*
+     * This comes from the clock/sun8i-a83t-ccu.h and
+     * reset/sun8i-a83t-ccu.h headers, but we can't include them since
+     * it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+
+    #define CLK_BUS_TCON0	36
+    #define CLK_TCON0		85
+    #define RST_BUS_TCON0	22
+    #define RST_BUS_LVDS	31
+
+    lcd-controller@1c0c000 {
+        compatible = "allwinner,sun8i-a83t-tcon-lcd";
+        reg = <0x01c0c000 0x1000>;
+        interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_BUS_TCON0>, <&ccu CLK_TCON0>;
+        clock-names = "ahb", "tcon-ch0";
+        clock-output-names = "tcon-pixel-clock";
+        #clock-cells = <0>;
+        resets = <&ccu RST_BUS_TCON0>, <&ccu RST_BUS_LVDS>;
+        reset-names = "lcd", "lvds";
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <0>;
+
+                endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&mixer0_out_tcon0>;
+                };
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&mixer1_out_tcon0>;
+                };
+            };
+
+            port@1 {
+                reg = <1>;
+            };
+        };
+    };
+
+    #undef CLK_BUS_TCON0
+    #undef CLK_TCON0
+    #undef RST_BUS_TCON0
+    #undef RST_BUS_LVDS
+
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    /*
+     * This comes from the clock/sun8i-r40-ccu.h and
+     * reset/sun8i-r40-ccu.h headers, but we can't include them since
+     * it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+
+    #define CLK_BUS_TCON_TV0	73
+    #define RST_BUS_TCON_TV0	49
+
+    tcon_tv0: lcd-controller@1c73000 {
+        compatible = "allwinner,sun8i-r40-tcon-tv";
+        reg = <0x01c73000 0x1000>;
+        interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_BUS_TCON_TV0>, <&tcon_top 0>;
+        clock-names = "ahb", "tcon-ch1";
+        resets = <&ccu RST_BUS_TCON_TV0>;
+        reset-names = "lcd";
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <0>;
+
+                endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&tcon_top_mixer0_out_tcon_tv0>;
+                };
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&tcon_top_mixer1_out_tcon_tv0>;
+                };
+            };
+
+            tcon_tv0_out: port@1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <1>;
+
+                endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&tcon_top_hdmi_in_tcon_tv0>;
+                };
+            };
+        };
+    };
+
+    #undef CLK_BUS_TCON_TV0
+    #undef RST_BUS_TCON_TV0
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
new file mode 100644
index 0000000..5d5d3966
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-tv-encoder.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 TV Encoder Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    const: allwinner,sun4i-a10-tv-encoder
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  port:
+    type: object
+    description:
+      A port node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt. The
+      first port should be the input endpoint, usually coming from the
+      associated TCON.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - resets
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    tve0: tv-encoder@1c0a000 {
+        compatible = "allwinner,sun4i-a10-tv-encoder";
+        reg = <0x01c0a000 0x1000>;
+        clocks = <&ahb_gates 34>;
+        resets = <&tcon_ch0_clk 0>;
+
+        port {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            tve0_in_tcon0: endpoint@0 {
+                reg = <0>;
+                remote-endpoint = <&tcon0_out_tve0>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
new file mode 100644
index 0000000..0c1ce55
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
@@ -0,0 +1,138 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun6i-a31-drc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 Dynamic Range Controller Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  The DRC (Dynamic Range Controller) allows to dynamically adjust
+  pixel brightness/contrast based on histogram measurements for LCD
+  content adaptive backlight control.
+
+properties:
+  compatible:
+    enum:
+      - allwinner,sun6i-a31-drc
+      - allwinner,sun6i-a31s-drc
+      - allwinner,sun8i-a23-drc
+      - allwinner,sun8i-a33-drc
+      - allwinner,sun9i-a80-drc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: The DRC interface clock
+      - description: The DRC module clock
+      - description: The DRC DRAM clock
+
+  clock-names:
+    items:
+      - const: ahb
+      - const: mod
+      - const: ram
+
+  resets:
+    maxItems: 1
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoints of the controller.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoints of the controller.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    #include <dt-bindings/clock/sun6i-a31-ccu.h>
+    #include <dt-bindings/reset/sun6i-a31-ccu.h>
+
+    drc0: drc@1e70000 {
+        compatible = "allwinner,sun6i-a31-drc";
+        reg = <0x01e70000 0x10000>;
+        interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_AHB1_DRC0>, <&ccu CLK_IEP_DRC0>,
+                 <&ccu CLK_DRAM_DRC0>;
+        clock-names = "ahb", "mod",
+                      "ram";
+        resets = <&ccu RST_AHB1_DRC0>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            drc0_in: port@0 {
+                reg = <0>;
+
+                drc0_in_be0: endpoint {
+                    remote-endpoint = <&be0_out_drc0>;
+                };
+            };
+
+            drc0_out: port@1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <1>;
+
+                drc0_out_tcon0: endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&tcon0_in_drc0>;
+                };
+
+                drc0_out_tcon1: endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&tcon1_in_drc0>;
+                };
+            };
+        };
+    };
+
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index 0f70749..9e90c2b 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -15,7 +15,9 @@
   "#size-cells": true
 
   compatible:
-    const: allwinner,sun6i-a31-mipi-dsi
+    enum:
+      - allwinner,sun6i-a31-mipi-dsi
+      - allwinner,sun50i-a64-mipi-dsi
 
   reg:
     maxItems: 1
@@ -24,6 +26,8 @@
     maxItems: 1
 
   clocks:
+    minItems: 1
+    maxItems: 2
     items:
       - description: Bus Clock
       - description: Module Clock
@@ -63,13 +67,38 @@
   - reg
   - interrupts
   - clocks
-  - clock-names
   - phys
   - phy-names
   - resets
   - vcc-dsi-supply
   - port
 
+allOf:
+  - if:
+      properties:
+         compatible:
+           contains:
+             const: allwinner,sun6i-a31-mipi-dsi
+
+    then:
+        properties:
+          clocks:
+            minItems: 2
+
+        required:
+          - clock-names
+
+  - if:
+      properties:
+         compatible:
+           contains:
+             const: allwinner,sun50i-a64-mipi-dsi
+
+    then:
+        properties:
+          clocks:
+            minItems: 1
+
 additionalProperties: false
 
 examples:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
new file mode 100644
index 0000000..1dee641
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun8i-a83t-de2-mixer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner Display Engine 2.0 Mixer Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    enum:
+      - allwinner,sun8i-a83t-de2-mixer-0
+      - allwinner,sun8i-a83t-de2-mixer-1
+      - allwinner,sun8i-h3-de2-mixer-0
+      - allwinner,sun8i-r40-de2-mixer-0
+      - allwinner,sun8i-r40-de2-mixer-1
+      - allwinner,sun8i-v3s-de2-mixer
+      - allwinner,sun50i-a64-de2-mixer-0
+      - allwinner,sun50i-a64-de2-mixer-1
+      - allwinner,sun50i-h6-de3-mixer-0
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: The mixer interface clock
+      - description: The mixer module clock
+
+  clock-names:
+    items:
+      - const: bus
+      - const: mod
+
+  resets:
+    maxItems: 1
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoints of the controller.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoints of the controller.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun8i-de2.h>
+    #include <dt-bindings/reset/sun8i-de2.h>
+
+    mixer0: mixer@1100000 {
+        compatible = "allwinner,sun8i-a83t-de2-mixer-0";
+        reg = <0x01100000 0x100000>;
+        clocks = <&display_clocks CLK_BUS_MIXER0>,
+                 <&display_clocks CLK_MIXER0>;
+        clock-names = "bus",
+                      "mod";
+        resets = <&display_clocks RST_MIXER0>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            mixer0_out: port@1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <1>;
+
+                mixer0_out_tcon0: endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&tcon0_in_mixer0>;
+                };
+
+                mixer0_out_tcon1: endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&tcon1_in_mixer0>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
new file mode 100644
index 0000000..4d67956
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
@@ -0,0 +1,273 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun8i-a83t-dw-hdmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A83t DWC HDMI TX Encoder Device Tree Bindings
+
+description: |
+  The HDMI transmitter is a Synopsys DesignWare HDMI 1.4 TX controller
+  IP with Allwinner\'s own PHY IP. It supports audio and video outputs
+  and CEC.
+
+  These DT bindings follow the Synopsys DWC HDMI TX bindings defined
+  in Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt with
+  the following device-specific properties.
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 0
+
+  compatible:
+    oneOf:
+      - const: allwinner,sun8i-a83t-dw-hdmi
+      - const: allwinner,sun50i-h6-dw-hdmi
+
+      - items:
+        - enum:
+          - allwinner,sun8i-h3-dw-hdmi
+          - allwinner,sun8i-r40-dw-hdmi
+          - allwinner,sun50i-a64-dw-hdmi
+        - const: allwinner,sun8i-a83t-dw-hdmi
+
+  reg:
+    maxItems: 1
+
+  reg-io-width:
+    const: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 3
+    maxItems: 6
+    items:
+      - description: Bus Clock
+      - description: Register Clock
+      - description: TMDS Clock
+      - description: HDMI CEC Clock
+      - description: HDCP Clock
+      - description: HDCP Bus Clock
+
+  clock-names:
+    minItems: 3
+    maxItems: 6
+    items:
+      - const: iahb
+      - const: isfr
+      - const: tmds
+      - const: cec
+      - const: hdcp
+      - const: hdcp-bus
+
+  resets:
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: HDMI Controller Reset
+      - description: HDCP Reset
+
+  reset-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      - const: ctrl
+      - const: hdcp
+
+  phys:
+    maxItems: 1
+    description:
+      Phandle to the DWC HDMI PHY.
+
+  phy-names:
+    const: phy
+
+  hvcc-supply:
+    description:
+      The VCC power supply of the controller
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoints of the controller. Usually the associated
+          TCON.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoints of the controller. Usually an HDMI
+          connector.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - reg-io-width
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+  - phys
+  - phy-names
+  - ports
+
+if:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - allwinner,sun50i-h6-dw-hdmi
+
+then:
+  properties:
+    clocks:
+      minItems: 6
+
+    clock-names:
+      minItems: 6
+
+    resets:
+      minItems: 2
+
+    reset-names:
+      minItems: 2
+
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    /*
+     * This comes from the clock/sun8i-a83t-ccu.h and
+     * reset/sun8i-a83t-ccu.h headers, but we can't include them since
+     * it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+    #define CLK_BUS_HDMI	39
+    #define CLK_HDMI		93
+    #define CLK_HDMI_SLOW	94
+    #define RST_BUS_HDMI1	26
+
+    hdmi@1ee0000 {
+        compatible = "allwinner,sun8i-a83t-dw-hdmi";
+        reg = <0x01ee0000 0x10000>;
+        reg-io-width = <1>;
+        interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_BUS_HDMI>, <&ccu CLK_HDMI_SLOW>,
+                 <&ccu CLK_HDMI>;
+        clock-names = "iahb", "isfr", "tmds";
+        resets = <&ccu RST_BUS_HDMI1>;
+        reset-names = "ctrl";
+        phys = <&hdmi_phy>;
+        phy-names = "phy";
+        pinctrl-names = "default";
+        pinctrl-0 = <&hdmi_pins>;
+        status = "disabled";
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                endpoint {
+                    remote-endpoint = <&tcon1_out_hdmi>;
+                };
+            };
+
+            port@1 {
+                reg = <1>;
+            };
+        };
+    };
+
+    /* Cleanup after ourselves */
+    #undef CLK_BUS_HDMI
+    #undef CLK_HDMI
+    #undef CLK_HDMI_SLOW
+
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    /*
+     * This comes from the clock/sun50i-h6-ccu.h and
+     * reset/sun50i-h6-ccu.h headers, but we can't include them since
+     * it would trigger a bunch of warnings for redefinitions of
+     * symbols with the other example.
+     */
+    #define CLK_BUS_HDMI	126
+    #define CLK_BUS_HDCP	137
+    #define CLK_HDMI		123
+    #define CLK_HDMI_SLOW	124
+    #define CLK_HDMI_CEC	125
+    #define CLK_HDCP		136
+    #define RST_BUS_HDMI_SUB	57
+    #define RST_BUS_HDCP	62
+
+    hdmi@6000000 {
+        compatible = "allwinner,sun50i-h6-dw-hdmi";
+        reg = <0x06000000 0x10000>;
+        reg-io-width = <1>;
+        interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_BUS_HDMI>, <&ccu CLK_HDMI_SLOW>,
+                 <&ccu CLK_HDMI>, <&ccu CLK_HDMI_CEC>,
+                 <&ccu CLK_HDCP>, <&ccu CLK_BUS_HDCP>;
+        clock-names = "iahb", "isfr", "tmds", "cec", "hdcp",
+                      "hdcp-bus";
+        resets = <&ccu RST_BUS_HDMI_SUB>, <&ccu RST_BUS_HDCP>;
+        reset-names = "ctrl", "hdcp";
+        phys = <&hdmi_phy>;
+        phy-names = "phy";
+        pinctrl-names = "default";
+        pinctrl-0 = <&hdmi_pins>;
+        status = "disabled";
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                endpoint {
+                    remote-endpoint = <&tcon_top_hdmi_out_hdmi>;
+                };
+            };
+
+            port@1 {
+                reg = <1>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml
new file mode 100644
index 0000000..501cec1
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun8i-a83t-hdmi-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A83t HDMI PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 0
+
+  compatible:
+    enum:
+      - allwinner,sun8i-a83t-hdmi-phy
+      - allwinner,sun8i-h3-hdmi-phy
+      - allwinner,sun8i-r40-hdmi-phy
+      - allwinner,sun50i-a64-hdmi-phy
+      - allwinner,sun50i-h6-hdmi-phy
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 4
+    items:
+      - description: Bus Clock
+      - description: Module Clock
+      - description: Parent of the PHY clock
+      - description: Second possible parent of the PHY clock
+
+  clock-names:
+    minItems: 2
+    maxItems: 4
+    items:
+      - const: bus
+      - const: mod
+      - const: pll-0
+      - const: pll-1
+
+  resets:
+    maxItems: 1
+
+  reset-names:
+    const: phy
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+
+if:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - allwinner,sun8i-r40-hdmi-phy
+
+then:
+  properties:
+    clocks:
+      minItems: 4
+
+    clock-names:
+      minItems: 4
+
+else:
+  if:
+    properties:
+      compatible:
+        contains:
+          enum:
+            - allwinner,sun8i-h3-hdmi-phy
+            - allwinner,sun50i-a64-hdmi-phy
+
+  then:
+    properties:
+      clocks:
+        minItems: 3
+
+      clock-names:
+        minItems: 3
+
+  else:
+    properties:
+      clocks:
+        maxItems: 2
+
+      clock-names:
+        maxItems: 2
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun8i-a83t-ccu.h>
+    #include <dt-bindings/reset/sun8i-a83t-ccu.h>
+
+    hdmi_phy: hdmi-phy@1ef0000 {
+        compatible = "allwinner,sun8i-a83t-hdmi-phy";
+        reg = <0x01ef0000 0x10000>;
+        clocks = <&ccu CLK_BUS_HDMI>, <&ccu CLK_HDMI_SLOW>;
+        clock-names = "bus", "mod";
+        resets = <&ccu RST_BUS_HDMI0>;
+        reset-names = "phy";
+        #phy-cells = <0>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
new file mode 100644
index 0000000..b98ca60
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
@@ -0,0 +1,382 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun8i-r40-tcon-top.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner R40 TCON TOP Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  TCON TOPs main purpose is to configure whole display pipeline. It
+  determines relationships between mixers and TCONs, selects source
+  TCON for HDMI, muxes LCD and TV encoder GPIO output, selects TV
+  encoder clock source and contains additional TV TCON and DSI gates.
+
+  It allows display pipeline to be configured in very different ways:
+
+                                  / LCD0/LVDS0
+                   / [0] TCON-LCD0
+                   |              \ MIPI DSI
+   mixer0          |
+          \        / [1] TCON-LCD1 - LCD1/LVDS1
+           TCON-TOP
+          /        \ [2] TCON-TV0 [0] - TVE0/RGB
+   mixer1          |                  \
+                   |                   TCON-TOP - HDMI
+                   |                  /
+                   \ [3] TCON-TV1 [1] - TVE1/RGB
+
+  Note that both TCON TOP references same physical unit. Both mixers
+  can be connected to any TCON. Not all TCON TOP variants support all
+  features.
+
+properties:
+  "#clock-cells":
+    const: 1
+
+  compatible:
+    enum:
+      - allwinner,sun8i-r40-tcon-top
+      - allwinner,sun50i-h6-tcon-top
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 6
+    items:
+      - description: The TCON TOP interface clock
+      - description: The TCON TOP TV0 clock
+      - description: The TCON TOP TVE0 clock
+      - description: The TCON TOP TV1 clock
+      - description: The TCON TOP TVE1 clock
+      - description: The TCON TOP MIPI DSI clock
+
+  clock-names:
+    minItems: 2
+    maxItems: 6
+    items:
+      - const: bus
+      - const: tcon-tv0
+      - const: tve0
+      - const: tcon-tv1
+      - const: tve1
+      - const: dsi
+
+  clock-output-names:
+    minItems: 1
+    maxItems: 3
+    description: >
+      The first item is the name of the clock created for the TV0
+      channel, the second item is the name of the TCON TV1 channel
+      clock and the third one is the name of the DSI channel clock.
+
+  resets:
+    maxItems: 1
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+      All ports should have only one endpoint connected to
+      remote endpoint.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoint for Mixer 0 mux.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoint for Mixer 0 mux
+
+        properties:
+          "#address-cells":
+            const: 1
+
+          "#size-cells":
+            const: 0
+
+          reg: true
+
+        patternProperties:
+          "^endpoint@[0-9]$":
+            type: object
+
+            properties:
+              reg:
+                description: |
+                  ID of the target TCON
+
+            required:
+              - reg
+
+        required:
+          - "#address-cells"
+          - "#size-cells"
+
+        additionalProperties: false
+
+      port@2:
+        type: object
+        description: |
+          Input endpoint for Mixer 1 mux.
+
+      port@3:
+        type: object
+        description: |
+          Output endpoint for Mixer 1 mux
+
+        properties:
+          "#address-cells":
+            const: 1
+
+          "#size-cells":
+            const: 0
+
+          reg: true
+
+        patternProperties:
+          "^endpoint@[0-9]$":
+            type: object
+
+            properties:
+              reg:
+                description: |
+                  ID of the target TCON
+
+            required:
+              - reg
+
+        required:
+          - "#address-cells"
+          - "#size-cells"
+
+        additionalProperties: false
+
+      port@4:
+        type: object
+        description: |
+          Input endpoint for HDMI mux.
+
+        properties:
+          "#address-cells":
+            const: 1
+
+          "#size-cells":
+            const: 0
+
+          reg: true
+
+        patternProperties:
+          "^endpoint@[0-9]$":
+            type: object
+
+            properties:
+              reg:
+                description: |
+                  ID of the target TCON
+
+            required:
+              - reg
+
+        required:
+          - "#address-cells"
+          - "#size-cells"
+
+        additionalProperties: false
+
+      port@5:
+        type: object
+        description: |
+          Output endpoint for HDMI mux
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+      - port@4
+      - port@5
+
+    additionalProperties: false
+
+required:
+  - "#clock-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - clock-output-names
+  - resets
+  - ports
+
+additionalProperties: false
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: allwinner,sun50i-h6-tcon-top
+
+then:
+  properties:
+    clocks:
+      maxItems: 2
+
+    clock-output-names:
+      maxItems: 1
+
+else:
+  properties:
+    clocks:
+      minItems: 6
+
+    clock-output-names:
+      minItems: 3
+
+    ports:
+      required:
+        - port@2
+        - port@3
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    #include <dt-bindings/clock/sun8i-r40-ccu.h>
+    #include <dt-bindings/reset/sun8i-r40-ccu.h>
+
+      tcon_top: tcon-top@1c70000 {
+          compatible = "allwinner,sun8i-r40-tcon-top";
+          reg = <0x01c70000 0x1000>;
+          clocks = <&ccu CLK_BUS_TCON_TOP>,
+                   <&ccu CLK_TCON_TV0>,
+                   <&ccu CLK_TVE0>,
+                   <&ccu CLK_TCON_TV1>,
+                   <&ccu CLK_TVE1>,
+                   <&ccu CLK_DSI_DPHY>;
+          clock-names = "bus",
+                        "tcon-tv0",
+                        "tve0",
+                        "tcon-tv1",
+                        "tve1",
+                        "dsi";
+          clock-output-names = "tcon-top-tv0",
+                               "tcon-top-tv1",
+                               "tcon-top-dsi";
+          resets = <&ccu RST_BUS_TCON_TOP>;
+          #clock-cells = <1>;
+
+          ports {
+              #address-cells = <1>;
+              #size-cells = <0>;
+
+              tcon_top_mixer0_in: port@0 {
+                  reg = <0>;
+
+                  tcon_top_mixer0_in_mixer0: endpoint {
+                      remote-endpoint = <&mixer0_out_tcon_top>;
+                  };
+              };
+
+              tcon_top_mixer0_out: port@1 {
+                  #address-cells = <1>;
+                  #size-cells = <0>;
+                  reg = <1>;
+
+                  tcon_top_mixer0_out_tcon_lcd0: endpoint@0 {
+                      reg = <0>;
+                  };
+
+                  tcon_top_mixer0_out_tcon_lcd1: endpoint@1 {
+                      reg = <1>;
+                  };
+
+                  tcon_top_mixer0_out_tcon_tv0: endpoint@2 {
+                      reg = <2>;
+                      remote-endpoint = <&tcon_tv0_in_tcon_top_mixer0>;
+                  };
+
+                  tcon_top_mixer0_out_tcon_tv1: endpoint@3 {
+                      reg = <3>;
+                      remote-endpoint = <&tcon_tv1_in_tcon_top_mixer0>;
+                  };
+              };
+
+              tcon_top_mixer1_in: port@2 {
+                  #address-cells = <1>;
+                  #size-cells = <0>;
+                  reg = <2>;
+
+                  tcon_top_mixer1_in_mixer1: endpoint@1 {
+                      reg = <1>;
+                      remote-endpoint = <&mixer1_out_tcon_top>;
+                  };
+              };
+
+              tcon_top_mixer1_out: port@3 {
+                  #address-cells = <1>;
+                  #size-cells = <0>;
+                  reg = <3>;
+
+                  tcon_top_mixer1_out_tcon_lcd0: endpoint@0 {
+                      reg = <0>;
+                  };
+
+                  tcon_top_mixer1_out_tcon_lcd1: endpoint@1 {
+                      reg = <1>;
+                  };
+
+                  tcon_top_mixer1_out_tcon_tv0: endpoint@2 {
+                      reg = <2>;
+                      remote-endpoint = <&tcon_tv0_in_tcon_top_mixer1>;
+                  };
+
+                  tcon_top_mixer1_out_tcon_tv1: endpoint@3 {
+                      reg = <3>;
+                      remote-endpoint = <&tcon_tv1_in_tcon_top_mixer1>;
+                  };
+              };
+
+              tcon_top_hdmi_in: port@4 {
+                  #address-cells = <1>;
+                  #size-cells = <0>;
+                  reg = <4>;
+
+                  tcon_top_hdmi_in_tcon_tv0: endpoint@0 {
+                      reg = <0>;
+                      remote-endpoint = <&tcon_tv0_out_tcon_top>;
+                  };
+
+                  tcon_top_hdmi_in_tcon_tv1: endpoint@1 {
+                      reg = <1>;
+                      remote-endpoint = <&tcon_tv1_out_tcon_top>;
+                  };
+              };
+
+              tcon_top_hdmi_out: port@5 {
+                  reg = <5>;
+
+                  tcon_top_hdmi_out_hdmi: endpoint {
+                      remote-endpoint = <&hdmi_in_tcon_top>;
+                  };
+              };
+          };
+      };
+
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
new file mode 100644
index 0000000..96de41d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
@@ -0,0 +1,133 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun9i-a80-deu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 Detail Enhancement Unit Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  The DEU (Detail Enhancement Unit), found in the Allwinner A80 SoC,
+  can sharpen the display content in both luma and chroma channels.
+
+properties:
+  compatible:
+    const: allwinner,sun9i-a80-deu
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: The DEU interface clock
+      - description: The DEU module clock
+      - description: The DEU DRAM clock
+
+  clock-names:
+    items:
+      - const: ahb
+      - const: mod
+      - const: ram
+
+  resets:
+    maxItems: 1
+
+  ports:
+    type: object
+    description: |
+      A ports node with endpoint definitions as defined in
+      Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: |
+          Input endpoints of the controller.
+
+      port@1:
+        type: object
+        description: |
+          Output endpoints of the controller.
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    #include <dt-bindings/clock/sun9i-a80-de.h>
+    #include <dt-bindings/reset/sun9i-a80-de.h>
+
+    deu0: deu@3300000 {
+        compatible = "allwinner,sun9i-a80-deu";
+        reg = <0x03300000 0x40000>;
+        interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&de_clocks CLK_BUS_DEU0>,
+                 <&de_clocks CLK_IEP_DEU0>,
+                 <&de_clocks CLK_DRAM_DEU0>;
+        clock-names = "ahb",
+                      "mod",
+                      "ram";
+        resets = <&de_clocks RST_DEU0>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            deu0_in: port@0 {
+                reg = <0>;
+
+                deu0_in_fe0: endpoint {
+                    remote-endpoint = <&fe0_out_deu0>;
+                };
+            };
+
+            deu0_out: port@1 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+                reg = <1>;
+
+                deu0_out_be0: endpoint@0 {
+                    reg = <0>;
+                    remote-endpoint = <&be0_in_deu0>;
+                };
+
+                deu0_out_be1: endpoint@1 {
+                    reg = <1>;
+                    remote-endpoint = <&be1_in_deu0>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
new file mode 100644
index 0000000..8f37302
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/lvds-codec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Transparent LVDS encoders and decoders
+
+maintainers:
+  - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description: |
+  This binding supports transparent LVDS encoders and decoders that don't
+  require any configuration.
+
+  LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple
+  incompatible data link layers have been used over time to transmit image data
+  to LVDS panels. This binding targets devices compatible with the following
+  specifications only.
+
+  [JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February
+  1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA)
+  [LDI] "Open LVDS Display Interface", May 1999 (Version 0.95), National
+  Semiconductor
+  [VESA] "VESA Notebook Panel Standard", October 2007 (Version 1.0), Video
+  Electronics Standards Association (VESA)
+
+  Those devices have been marketed under the FPD-Link and FlatLink brand names
+  among others.
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+        - enum:
+          - ti,ds90c185       # For the TI DS90C185 FPD-Link Serializer
+          - ti,ds90c187       # For the TI DS90C187 FPD-Link Serializer
+          - ti,sn75lvds83     # For the TI SN75LVDS83 FlatLink transmitter
+        - const: lvds-encoder # Generic LVDS encoder compatible fallback
+      - items:
+        - enum:
+          - ti,ds90cf384a     # For the DS90CF384A FPD-Link LVDS Receiver
+        - const: lvds-decoder # Generic LVDS decoders compatible fallback
+      - enum:
+        - thine,thc63lvdm83d  # For the THC63LVDM83D LVDS serializer
+
+  ports:
+    type: object
+    description: |
+      This device has two video ports. Their connections are modeled using the
+      OF graph bindings specified in Documentation/devicetree/bindings/graph.txt
+    properties:
+      port@0:
+        type: object
+        description: |
+          For LVDS encoders, port 0 is the parallel input
+          For LVDS decoders, port 0 is the LVDS input
+
+      port@1:
+        type: object
+        description: |
+          For LVDS encoders, port 1 is the LVDS output
+          For LVDS decoders, port 1 is the parallel output
+
+    required:
+      - port@0
+      - port@1
+
+  powerdown-gpios:
+    description:
+      The GPIO used to control the power down line of this device.
+    maxItems: 1
+
+required:
+  - compatible
+  - ports
+
+examples:
+  - |
+    lvds-encoder {
+      compatible = "ti,ds90c185", "lvds-encoder";
+
+      ports {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        port@0 {
+          reg = <0>;
+
+          lvds_enc_in: endpoint {
+            remote-endpoint = <&display_out_rgb>;
+          };
+        };
+
+        port@1 {
+          reg = <1>;
+
+          lvds_enc_out: endpoint {
+            remote-endpoint = <&lvds_panel_in>;
+          };
+        };
+      };
+    };
+
+  - |
+    lvds-decoder {
+      compatible = "ti,ds90cf384a", "lvds-decoder";
+
+      ports {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        port@0 {
+          reg = <0>;
+
+          lvds_dec_in: endpoint {
+            remote-endpoint = <&display_out_lvds>;
+          };
+        };
+
+        port@1 {
+          reg = <1>;
+
+          lvds_dec_out: endpoint {
+            remote-endpoint = <&rgb_panel_in>;
+          };
+        };
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt b/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt
deleted file mode 100644
index 60091db..0000000
--- a/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-Parallel to LVDS Encoder
-------------------------
-
-This binding supports the parallel to LVDS encoders that don't require any
-configuration.
-
-LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple
-incompatible data link layers have been used over time to transmit image data
-to LVDS panels. This binding targets devices compatible with the following
-specifications only.
-
-[JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February
-1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA)
-[LDI] "Open LVDS Display Interface", May 1999 (Version 0.95), National
-Semiconductor
-[VESA] "VESA Notebook Panel Standard", October 2007 (Version 1.0), Video
-Electronics Standards Association (VESA)
-
-Those devices have been marketed under the FPD-Link and FlatLink brand names
-among others.
-
-
-Required properties:
-
-- compatible: Must be "lvds-encoder"
-
-  Any encoder compatible with this generic binding, but with additional
-  properties not listed here, must list a device specific compatible first
-  followed by this generic compatible.
-
-Required nodes:
-
-This device has two video ports. Their connections are modeled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for parallel input
-- Video port 1 for LVDS output
-
-
-Example
--------
-
-lvds-encoder {
-	compatible = "lvds-encoder";
-
-	ports {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		port@0 {
-			reg = <0>;
-
-			lvds_enc_in: endpoint {
-				remote-endpoint = <&display_out_rgb>;
-			};
-		};
-
-		port@1 {
-			reg = <1>;
-
-			lvds_enc_out: endpoint {
-				remote-endpoint = <&lvds_panel_in>;
-			};
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvdm83d.txt b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvdm83d.txt
deleted file mode 100644
index fee3c88..0000000
--- a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvdm83d.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-THine Electronics THC63LVDM83D LVDS serializer
-----------------------------------------------
-
-The THC63LVDM83D is an LVDS serializer designed to support pixel data
-transmission between a host and a flat panel.
-
-Required properties:
-
-- compatible: Should be "thine,thc63lvdm83d"
-
-Optional properties:
-
-- powerdown-gpios: Power down control GPIO (the /PWDN pin, active low).
-
-Required nodes:
-
-The THC63LVDM83D has two video ports. Their connections are modeled using the
-OFgraph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for CMOS/TTL input
-- Video port 1 for LVDS output
-
-
-Example
--------
-
-	lvds_enc: encoder@0 {
-		compatible = "thine,thc63lvdm83d";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				reg = <0>;
-
-				lvds_enc_in: endpoint@0 {
-					remote-endpoint = <&rgb_out>;
-				};
-			};
-
-			port@1 {
-				reg = <1>;
-
-				lvds_enc_out: endpoint@0 {
-					remote-endpoint = <&panel_in>;
-				};
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt b/Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt
deleted file mode 100644
index e575f99..0000000
--- a/Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Texas Instruments FPD-Link (LVDS) Serializer
---------------------------------------------
-
-The DS90C185 and DS90C187 are low-power serializers for portable
-battery-powered applications that reduces the size of the RGB
-interface between the host GPU and the display.
-
-Required properties:
-
-- compatible: Should be
-  "ti,ds90c185", "lvds-encoder"  for the TI DS90C185 FPD-Link Serializer
-  "ti,ds90c187", "lvds-encoder"  for the TI DS90C187 FPD-Link Serializer
-
-Optional properties:
-
-- powerdown-gpios: Power down control GPIO (the PDB pin, active-low)
-
-Required nodes:
-
-The devices have two video ports. Their connections are modeled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for parallel input
-- Video port 1 for LVDS output
-
-
-Example
--------
-
-lvds-encoder {
-	compatible = "ti,ds90c185", "lvds-encoder";
-
-	powerdown-gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
-
-	ports {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		port@0 {
-			reg = <0>;
-
-			lvds_enc_in: endpoint {
-				remote-endpoint = <&lcdc_out_rgb>;
-			};
-		};
-
-		port@1 {
-			reg = <1>;
-
-			lvds_enc_out: endpoint {
-				remote-endpoint = <&lvds_panel_in>;
-			};
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/display/dsi-controller.yaml b/Documentation/devicetree/bindings/display/dsi-controller.yaml
new file mode 100644
index 0000000..fd986c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/dsi-controller.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/dsi-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common Properties for DSI Display Panels
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  This document defines device tree properties common to DSI, Display
+  Serial Interface controllers and attached panels. It doesn't constitute
+  a device tree binding specification by itself but is meant to be referenced
+  by device tree bindings.
+
+  When referenced from panel device tree bindings the properties defined in
+  this document are defined as follows. The panel device tree bindings are
+  responsible for defining whether each property is required or optional.
+
+  Notice: this binding concerns DSI panels connected directly to a master
+  without any intermediate port graph to the panel. Each DSI master
+  can control one to four virtual channels to one panel. Each virtual
+  channel should have a node "panel" for their virtual channel with their
+  reg-property set to the virtual channel number, usually there is just
+  one virtual channel, number 0.
+
+properties:
+  $nodename:
+    pattern: "^dsi-controller(@.*)?$"
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+patternProperties:
+  "^panel@[0-3]$":
+    description: Panels connected to the DSI link
+    type: object
+
+    properties:
+      reg:
+        minimum: 0
+        maximum: 3
+        description:
+          The virtual channel number of a DSI peripheral. Must be in the range
+          from 0 to 3, as DSI uses a 2-bit addressing scheme. Some DSI
+          peripherals respond to more than a single virtual channel. In that
+          case the reg property can take multiple entries, one for each virtual
+          channel that the peripheral responds to.
+
+      clock-master:
+        type: boolean
+        description:
+           Should be enabled if the host is being used in conjunction with
+           another DSI host to drive the same peripheral. Hardware supporting
+           such a configuration generally requires the data on both the busses
+           to be driven by the same clock. Only the DSI host instance
+           controlling this clock should contain this property.
+
+      enforce-video-mode:
+        type: boolean
+        description:
+          The best option is usually to run a panel in command mode, as this
+          gives better control over the panel hardware. However for different
+          reasons like broken hardware, missing features or testing, it may be
+          useful to be able to force a command mode-capable panel into video
+          mode.
+
+    required:
+      - reg
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    dsi-controller@a0351000 {
+        reg = <0xa0351000 0x1000>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "sony,acx424akp";
+            reg = <0>;
+            vddi-supply = <&ab8500_ldo_aux1_reg>;
+            reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/ingenic,lcd.txt b/Documentation/devicetree/bindings/display/ingenic,lcd.txt
index 7b536c8..01e3261 100644
--- a/Documentation/devicetree/bindings/display/ingenic,lcd.txt
+++ b/Documentation/devicetree/bindings/display/ingenic,lcd.txt
@@ -4,6 +4,7 @@
 - compatible: one of:
   * ingenic,jz4740-lcd
   * ingenic,jz4725b-lcd
+  * ingenic,jz4770-lcd
 - reg: LCD registers location and length
 - clocks: LCD pixclock and device clock specifiers.
 	   The device clock is only required on the JZ4740.
diff --git a/Documentation/devicetree/bindings/display/msm/dpu.txt b/Documentation/devicetree/bindings/display/msm/dpu.txt
index a61dd40..551ae26 100644
--- a/Documentation/devicetree/bindings/display/msm/dpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/dpu.txt
@@ -8,7 +8,7 @@
 
 MDSS:
 Required properties:
-- compatible: "qcom,sdm845-mdss"
+- compatible:  "qcom,sdm845-mdss", "qcom,sc7180-mdss"
 - reg: physical base address and length of contoller's registers.
 - reg-names: register region names. The following region is required:
   * "mdss"
@@ -41,7 +41,7 @@
 
 MDP:
 Required properties:
-- compatible: "qcom,sdm845-dpu"
+- compatible: "qcom,sdm845-dpu", "qcom,sc7180-dpu"
 - reg: physical base address and length of controller's registers.
 - reg-names : register region names. The following region is required:
   * "mdp"
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index 2b8fd26..7edc298 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -23,13 +23,18 @@
 - iommus: optional phandle to an adreno iommu instance
 - operating-points-v2: optional phandle to the OPP operating points
 - interconnects: optional phandle to an interconnect provider.  See
-  ../interconnect/interconnect.txt for details.
+  ../interconnect/interconnect.txt for details. Some A3xx and all A4xx platforms
+  will have two paths; all others will have one path.
+- interconnect-names: The names of the interconnect paths that correspond to the
+  interconnects property. Values must be gfx-mem and ocmem.
 - qcom,gmu: For GMU attached devices a phandle to the GMU device that will
   control the power for the GPU. Applicable targets:
     - qcom,adreno-630.2
 - zap-shader: For a5xx and a6xx devices this node contains a memory-region that
   points to reserved memory to store the zap shader that can be used to help
   bring the GPU out of secure mode.
+- firmware-name: optional property of the 'zap-shader' node, listing the
+  relative path of the device specific zap firmware.
 
 Example 3xx/4xx/a5xx:
 
@@ -76,11 +81,13 @@
 		operating-points-v2 = <&gpu_opp_table>;
 
 		interconnects = <&rsc_hlos MASTER_GFX3D &rsc_hlos SLAVE_EBI1>;
+		interconnect-names = "gfx-mem";
 
 		qcom,gmu = <&gmu>;
 
 		zap-shader {
 			memory-region = <&zap_shader_region>;
+			firmware-name = "qcom/LENOVO/81JL/qcdxkmsuc850.mbn"
 		};
 	};
 };
diff --git a/Documentation/devicetree/bindings/display/mxsfb.txt b/Documentation/devicetree/bindings/display/mxsfb.txt
index 472e1ea..c985871 100644
--- a/Documentation/devicetree/bindings/display/mxsfb.txt
+++ b/Documentation/devicetree/bindings/display/mxsfb.txt
@@ -6,6 +6,7 @@
 - compatible:	Should be "fsl,imx23-lcdif" for i.MX23.
 		Should be "fsl,imx28-lcdif" for i.MX28.
 		Should be "fsl,imx6sx-lcdif" for i.MX6SX.
+		Should be "fsl,imx8mq-lcdif" for i.MX8MQ.
 - reg:		Address and length of the register set for LCDIF
 - interrupts:	Should contain LCDIF interrupt
 - clocks:	A list of phandle + clock-specifier pairs, one for each
diff --git a/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.yaml b/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.yaml
deleted file mode 100644
index c6e33e7..0000000
--- a/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/display/panel/ampire,am-480272h3tmqw-t01h.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel
-
-maintainers:
-  - Yannick Fertre <yannick.fertre@st.com>
-  - Thierry Reding <treding@nvidia.com>
-
-allOf:
-  - $ref: panel-common.yaml#
-
-properties:
-  compatible:
-    const: ampire,am-480272h3tmqw-t01h
-
-  power-supply: true
-  enable-gpios: true
-  backlight: true
-  port: true
-
-required:
-  - compatible
-
-additionalProperties: false
-
-examples:
-  - |
-    panel_rgb: panel {
-      compatible = "ampire,am-480272h3tmqw-t01h";
-      enable-gpios = <&gpioa 8 1>;
-      port {
-        panel_in_rgb: endpoint {
-          remote-endpoint = <&controller_out_rgb>;
-        };
-      };
-    };
-
-...
diff --git a/Documentation/devicetree/bindings/display/panel/ampire,am800480r3tmqwa1h.txt b/Documentation/devicetree/bindings/display/panel/ampire,am800480r3tmqwa1h.txt
deleted file mode 100644
index 83e2cae..0000000
--- a/Documentation/devicetree/bindings/display/panel/ampire,am800480r3tmqwa1h.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "ampire,am800480r3tmqwa1h"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/giantplus,gpm940b0.txt b/Documentation/devicetree/bindings/display/panel/giantplus,gpm940b0.txt
deleted file mode 100644
index 3dab52f..0000000
--- a/Documentation/devicetree/bindings/display/panel/giantplus,gpm940b0.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-GiantPlus 3.0" (320x240 pixels) 24-bit TFT LCD panel
-
-Required properties:
-- compatible: should be "giantplus,gpm940b0"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
new file mode 100644
index 0000000..4ebcea7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/leadtek,ltk500hd1829.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Leadtek LTK500HD1829 5.0in 720x1280 DSI panel
+
+maintainers:
+  - Heiko Stuebner <heiko.stuebner@theobroma-systems.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: leadtek,ltk500hd1829
+  reg: true
+  backlight: true
+  reset-gpios: true
+  iovcc-supply:
+     description: regulator that supplies the iovcc voltage
+  vcc-supply:
+     description: regulator that supplies the vcc voltage
+
+required:
+  - compatible
+  - reg
+  - backlight
+  - iovcc-supply
+  - vcc-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    dsi@ff450000 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "leadtek,ltk500hd1829";
+            reg = <0>;
+            backlight = <&backlight>;
+            iovcc-supply = <&vcc_1v8>;
+            vcc-supply = <&vcc_2v8>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/logicpd,type28.yaml b/Documentation/devicetree/bindings/display/panel/logicpd,type28.yaml
new file mode 100644
index 0000000..2834287
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/logicpd,type28.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/logicpd,type28.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Logic PD Type 28 4.3" WQVGA TFT LCD panel
+
+maintainers:
+  - Adam Ford <aford173@gmail.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: logicpd,type28
+
+  power-supply: true
+  enable-gpios: true
+  backlight: true
+  port: true
+
+required:
+  - compatible
+
+additionalProperties: false
+
+examples:
+  - |
+    lcd0: display {
+      compatible = "logicpd,type28";
+      enable-gpios = <&gpio5 27 0>;
+      backlight = <&backlight>;
+      port {
+        lcd_in: endpoint {
+          remote-endpoint = <&dpi_out>;
+        };
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
new file mode 100644
index 0000000..8fe60ee2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-simple.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Simple panels with one power supply
+
+maintainers:
+  - Thierry Reding <thierry.reding@gmail.com>
+  - Sam Ravnborg <sam@ravnborg.org>
+
+description: |
+  This binding file is a collection of the simple (dumb) panels that
+  requires only a single power-supply.
+  There are optionally a backlight and an enable GPIO.
+  The panel may use an OF graph binding for the association to the display,
+  or it may be a direct child node of the display.
+
+  If the panel is more advanced a dedicated binding file is required.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+
+  compatible:
+    enum:
+    # compatible must be listed in alphabetical order, ordered by compatible.
+    # The description in the comment is mandatory for each compatible.
+
+        # Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel
+      - ampire,am-480272h3tmqw-t01h
+        # Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
+      - ampire,am800480r3tmqwa1h
+        # AUO B116XAK01 eDP TFT LCD panel
+      - auo,b116xa01
+        # BOE NV140FHM-N49 14.0" FHD a-Si FT panel
+      - boe,nv140fhmn49
+        # GiantPlus GPM940B0 3.0" QVGA TFT LCD panel
+      - giantplus,gpm940b0
+        # Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel
+      - satoz,sat050at40h12r2
+        # Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel
+      - sharp,ls020b1dd01d
+
+  backlight: true
+  enable-gpios: true
+  port: true
+  power-supply: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - power-supply
+
+examples:
+  - |
+    panel_rgb: panel-rgb {
+      compatible = "ampire,am-480272h3tmqw-t01h";
+      power-supply = <&vcc_lcd_reg>;
+
+      port {
+        panel_in_rgb: endpoint {
+          remote-endpoint = <&ltdc_out_rgb>;
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls020b1dd01d.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls020b1dd01d.txt
deleted file mode 100644
index e45edbc..0000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,ls020b1dd01d.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Sharp 2.0" (240x160 pixels) 16-bit TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,ls020b1dd01d"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
new file mode 100644
index 0000000..185dcc8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sony,acx424akp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sony ACX424AKP 4" 480x864 AMOLED panel
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: sony,acx424akp
+  reg: true
+  reset-gpios: true
+  vddi-supply:
+     description: regulator that supplies the vddi voltage
+  enforce-video-mode: true
+
+required:
+  - compatible
+  - reg
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi-controller@a0351000 {
+        compatible = "ste,mcde-dsi";
+        reg = <0xa0351000 0x1000>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "sony,acx424akp";
+            reg = <0>;
+            vddi-supply = <&foo>;
+            reset-gpios = <&foo_gpio 0 GPIO_ACTIVE_LOW>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
new file mode 100644
index 0000000..186e5e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/xinpeng,xpp055c272.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xinpeng XPP055C272 5.5in 720x1280 DSI panel
+
+maintainers:
+  - Heiko Stuebner <heiko.stuebner@theobroma-systems.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: xinpeng,xpp055c272
+  reg: true
+  backlight: true
+  reset-gpios: true
+  iovcc-supply:
+     description: regulator that supplies the iovcc voltage
+  vci-supply:
+     description: regulator that supplies the vci voltage
+
+required:
+  - compatible
+  - reg
+  - backlight
+  - iovcc-supply
+  - vci-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    dsi@ff450000 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "xinpeng,xpp055c272";
+            reg = <0>;
+            backlight = <&backlight>;
+            iovcc-supply = <&vcc_1v8>;
+            vci-supply = <&vcc3v3_lcd>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/renesas,cmm.yaml b/Documentation/devicetree/bindings/display/renesas,cmm.yaml
new file mode 100644
index 0000000..a57037b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/renesas,cmm.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/renesas,cmm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car Color Management Module (CMM)
+
+maintainers:
+  - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+  - Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+  - Jacopo Mondi <jacopo+renesas@jmondi.org>
+
+description: |+
+  Renesas R-Car color management module connected to R-Car DU video channels.
+  It provides image enhancement functions such as 1-D look-up tables (LUT),
+  3-D look-up tables (CLU), 1D-histogram generation (HGO), and color
+  space conversion (CSC).
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+        - enum:
+          - renesas,r8a7795-cmm
+          - renesas,r8a7796-cmm
+          - renesas,r8a77965-cmm
+          - renesas,r8a77990-cmm
+          - renesas,r8a77995-cmm
+        - const: renesas,rcar-gen3-cmm
+      - items:
+        - const: renesas,rcar-gen2-cmm
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - resets
+  - power-domains
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/r8a7796-cpg-mssr.h>
+    #include <dt-bindings/power/r8a7796-sysc.h>
+
+    cmm0: cmm@fea40000 {
+         compatible = "renesas,r8a7796-cmm",
+                      "renesas,rcar-gen3-cmm";
+         reg = <0 0xfea40000 0 0x1000>;
+         power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+         clocks = <&cpg CPG_MOD 711>;
+         resets = <&cpg 711>;
+    };
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 17cb277..eb4ae41 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -41,10 +41,14 @@
       supplied they must be named "dclkin.x" with "x" being the input clock
       numerical index.
 
-  - vsps: A list of phandle and channel index tuples to the VSPs that handle
-    the memory interfaces for the DU channels. The phandle identifies the VSP
-    instance that serves the DU channel, and the channel index identifies the
-    LIF instance in that VSP.
+  - renesas,cmms: A list of phandles to the CMM instances present in the SoC,
+    one for each available DU channel. The property shall not be specified for
+    SoCs that do not provide any CMM (such as V3M and V3H).
+
+  - renesas,vsps: A list of phandle and channel index tuples to the VSPs that
+    handle the memory interfaces for the DU channels. The phandle identifies the
+    VSP instance that serves the DU channel, and the channel index identifies
+    the LIF instance in that VSP.
 
 Required nodes:
 
@@ -92,7 +96,8 @@
 			 <&cpg CPG_MOD 722>,
 			 <&cpg CPG_MOD 721>;
 		clock-names = "du.0", "du.1", "du.2", "du.3";
-		vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd0 1>;
+		renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>, <&cmm3>;
+		renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd0 1>;
 
 		ports {
 			#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
index ce4c1fc..151be3b 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
@@ -4,13 +4,16 @@
 Required properties:
 - #address-cells: Should be <1>.
 - #size-cells: Should be <0>.
-- compatible: "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi".
-	      "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi".
+- compatible: one of
+	"rockchip,px30-mipi-dsi", "snps,dw-mipi-dsi"
+	"rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi"
+	"rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"
 - reg: Represent the physical address range of the controller.
 - interrupts: Represent the controller's interrupt to the CPU(s).
 - clocks, clock-names: Phandles to the controller's pll reference
-  clock(ref) and APB clock(pclk). For RK3399, a phy config clock
-  (phy_cfg) and a grf clock(grf) are required. As described in [1].
+  clock(ref) when using an internal dphy and APB clock(pclk).
+  For RK3399, a phy config clock (phy_cfg) and a grf clock(grf)
+  are required. As described in [1].
 - rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
 - ports: contain a port node with endpoint definitions as defined in [2].
   For vopb,set the reg = <0> and set the reg = <1> for vopl.
@@ -18,6 +21,8 @@
 - video port 1 for either a panel or subsequent encoder
 
 Optional properties:
+- phys: from general PHY binding: the phandle for the PHY device.
+- phy-names: Should be "dphy" if phys references an external phy.
 - power-domains: a phandle to mipi dsi power domain node.
 - resets: list of phandle + reset specifier pairs, as described in [3].
 - reset-names: string reset name, must be "apb".
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
index 7849ff0..aaf8c44 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
@@ -4,6 +4,7 @@
 Required properties:
 - compatible: matching the soc type, one of
 	- "rockchip,rk3288-lvds";
+	- "rockchip,px30-lvds";
 
 - reg: physical base address of the controller and length
 	of memory mapped region.
@@ -18,6 +19,9 @@
 - rockchip,grf: phandle to the general register files syscon
 - rockchip,output: "rgb", "lvds" or "duallvds", This describes the output interface
 
+- phys: LVDS/DSI DPHY (px30 only)
+- phy-names: name of the PHY, must be "dphy" (px30 only)
+
 Optional properties:
 - pinctrl-names: must contain a "lcdc" entry.
 - pinctrl-0: pin control group to be used for this controller.
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
deleted file mode 100644
index 31ab72c..0000000
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ /dev/null
@@ -1,637 +0,0 @@
-Allwinner A10 Display Pipeline
-==============================
-
-The Allwinner A10 Display pipeline is composed of several components
-that are going to be documented below:
-
-For all connections between components up to the TCONs in the display
-pipeline, when there are multiple components of the same type at the
-same depth, the local endpoint ID must be the same as the remote
-component's index. For example, if the remote endpoint is Frontend 1,
-then the local endpoint ID must be 1.
-
-    Frontend 0  [0] ------- [0]  Backend 0  [0] ------- [0]  TCON 0
-		[1] --   -- [1]             [1] --   -- [1]
-		      \ /                         \ /
-		       X                           X
-		      / \                         / \
-		[0] --   -- [0]             [0] --   -- [0]
-    Frontend 1  [1] ------- [1]  Backend 1  [1] ------- [1]  TCON 1
-
-For a two pipeline system such as the one depicted above, the lines
-represent the connections between the components, while the numbers
-within the square brackets corresponds to the ID of the local endpoint.
-
-The same rule also applies to DE 2.0 mixer-TCON connections:
-
-    Mixer 0  [0] ----------- [0]  TCON 0
-	     [1] ----   ---- [1]
-		     \ /
-		      X
-		     / \
-	     [0] ----   ---- [0]
-    Mixer 1  [1] ----------- [1]  TCON 1
-
-HDMI Encoder
-------------
-
-The HDMI Encoder supports the HDMI video and audio outputs, and does
-CEC. It is one end of the pipeline.
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun4i-a10-hdmi
-    * allwinner,sun5i-a10s-hdmi
-    * allwinner,sun6i-a31-hdmi
-  - reg: base address and size of memory-mapped region
-  - interrupts: interrupt associated to this IP
-  - clocks: phandles to the clocks feeding the HDMI encoder
-    * ahb: the HDMI interface clock
-    * mod: the HDMI module clock
-    * ddc: the HDMI ddc clock (A31 only)
-    * pll-0: the first video PLL
-    * pll-1: the second video PLL
-  - clock-names: the clock names mentioned above
-  - resets: phandle to the reset control for the HDMI encoder (A31 only)
-  - dmas: phandles to the DMA channels used by the HDMI encoder
-    * ddc-tx: The channel for DDC transmission
-    * ddc-rx: The channel for DDC reception
-    * audio-tx: The channel used for audio transmission
-  - dma-names: the channel names mentioned above
-
-  - ports: A ports node with endpoint definitions as defined in
-    Documentation/devicetree/bindings/media/video-interfaces.txt. The
-    first port should be the input endpoint. The second should be the
-    output, usually to an HDMI connector.
-
-DWC HDMI TX Encoder
--------------------
-
-The HDMI transmitter is a Synopsys DesignWare HDMI 1.4 TX controller IP
-with Allwinner's own PHY IP. It supports audio and video outputs and CEC.
-
-These DT bindings follow the Synopsys DWC HDMI TX bindings defined in
-Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt with the
-following device-specific properties.
-
-Required properties:
-
-  - compatible: value must be one of:
-    * "allwinner,sun8i-a83t-dw-hdmi"
-    * "allwinner,sun50i-a64-dw-hdmi", "allwinner,sun8i-a83t-dw-hdmi"
-    * "allwinner,sun50i-h6-dw-hdmi"
-  - reg: base address and size of memory-mapped region
-  - reg-io-width: See dw_hdmi.txt. Shall be 1.
-  - interrupts: HDMI interrupt number
-  - clocks: phandles to the clocks feeding the HDMI encoder
-    * iahb: the HDMI bus clock
-    * isfr: the HDMI register clock
-    * tmds: TMDS clock
-    * cec: HDMI CEC clock (H6 only)
-    * hdcp: HDCP clock (H6 only)
-    * hdcp-bus: HDCP bus clock (H6 only)
-  - clock-names: the clock names mentioned above
-  - resets:
-    * ctrl: HDMI controller reset
-    * hdcp: HDCP reset (H6 only)
-  - reset-names: reset names mentioned above
-  - phys: phandle to the DWC HDMI PHY
-  - phy-names: must be "phy"
-
-  - ports: A ports node with endpoint definitions as defined in
-    Documentation/devicetree/bindings/media/video-interfaces.txt. The
-    first port should be the input endpoint. The second should be the
-    output, usually to an HDMI connector.
-
-Optional properties:
-  - hvcc-supply: the VCC power supply of the controller
-
-DWC HDMI PHY
-------------
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun8i-a83t-hdmi-phy
-    * allwinner,sun8i-h3-hdmi-phy
-    * allwinner,sun8i-r40-hdmi-phy
-    * allwinner,sun50i-a64-hdmi-phy
-    * allwinner,sun50i-h6-hdmi-phy
-  - reg: base address and size of memory-mapped region
-  - clocks: phandles to the clocks feeding the HDMI PHY
-    * bus: the HDMI PHY interface clock
-    * mod: the HDMI PHY module clock
-  - clock-names: the clock names mentioned above
-  - resets: phandle to the reset controller driving the PHY
-  - reset-names: must be "phy"
-
-H3, A64 and R40 HDMI PHY require additional clocks:
-  - pll-0: parent of phy clock
-  - pll-1: second possible phy clock parent (A64/R40 only)
-
-TV Encoder
-----------
-
-The TV Encoder supports the composite and VGA output. It is one end of
-the pipeline.
-
-Required properties:
- - compatible: value should be "allwinner,sun4i-a10-tv-encoder".
- - reg: base address and size of memory-mapped region
- - clocks: the clocks driving the TV encoder
- - resets: phandle to the reset controller driving the encoder
-
-- ports: A ports node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt. The
-  first port should be the input endpoint.
-
-TCON
-----
-
-The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
-
-Required properties:
- - compatible: value must be either:
-   * allwinner,sun4i-a10-tcon
-   * allwinner,sun5i-a13-tcon
-   * allwinner,sun6i-a31-tcon
-   * allwinner,sun6i-a31s-tcon
-   * allwinner,sun7i-a20-tcon
-   * allwinner,sun8i-a23-tcon
-   * allwinner,sun8i-a33-tcon
-   * allwinner,sun8i-a83t-tcon-lcd
-   * allwinner,sun8i-a83t-tcon-tv
-   * allwinner,sun8i-r40-tcon-tv
-   * allwinner,sun8i-v3s-tcon
-   * allwinner,sun9i-a80-tcon-lcd
-   * allwinner,sun9i-a80-tcon-tv
-   * "allwinner,sun50i-a64-tcon-lcd", "allwinner,sun8i-a83t-tcon-lcd"
-   * "allwinner,sun50i-a64-tcon-tv", "allwinner,sun8i-a83t-tcon-tv"
-   * allwinner,sun50i-h6-tcon-tv, allwinner,sun8i-r40-tcon-tv
- - reg: base address and size of memory-mapped region
- - interrupts: interrupt associated to this IP
- - clocks: phandles to the clocks feeding the TCON.
-   - 'ahb': the interface clocks
-   - 'tcon-ch0': The clock driving the TCON channel 0, if supported
- - resets: phandles to the reset controllers driving the encoder
-   - "lcd": the reset line for the TCON
-   - "edp": the reset line for the eDP block (A80 only)
-
- - clock-names: the clock names mentioned above
- - reset-names: the reset names mentioned above
- - clock-output-names: Name of the pixel clock created, if TCON supports
-   channel 0.
-
-- ports: A ports node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt. The
-  first port should be the input endpoint, the second one the output
-
-  The output may have multiple endpoints. TCON can have 1 or 2 channels,
-  usually with the first channel being used for the panels interfaces
-  (RGB, LVDS, etc.), and the second being used for the outputs that
-  require another controller (TV Encoder, HDMI, etc.). The endpoints
-  will take an extra property, allwinner,tcon-channel, to specify the
-  channel the endpoint is associated to. If that property is not
-  present, the endpoint number will be used as the channel number.
-
-For TCONs with channel 0, there is one more clock required:
-   - 'tcon-ch0': The clock driving the TCON channel 0
-For TCONs with channel 1, there is one more clock required:
-   - 'tcon-ch1': The clock driving the TCON channel 1
-
-When TCON support LVDS (all TCONs except TV TCONs on A83T, R40 and those found
-in A13, H3, H5 and V3s SoCs), you need one more reset line:
-   - 'lvds': The reset line driving the LVDS logic
-
-And on the A23, A31, A31s and A33, you need one more clock line:
-   - 'lvds-alt': An alternative clock source, separate from the TCON channel 0
-                 clock, that can be used to drive the LVDS clock
-
-TCON TOP
---------
-
-TCON TOPs main purpose is to configure whole display pipeline. It determines
-relationships between mixers and TCONs, selects source TCON for HDMI, muxes
-LCD and TV encoder GPIO output, selects TV encoder clock source and contains
-additional TV TCON and DSI gates.
-
-It allows display pipeline to be configured in very different ways:
-
-                                / LCD0/LVDS0
-                 / [0] TCON-LCD0
-                 |              \ MIPI DSI
- mixer0          |
-        \        / [1] TCON-LCD1 - LCD1/LVDS1
-         TCON-TOP
-        /        \ [2] TCON-TV0 [0] - TVE0/RGB
- mixer1          |                  \
-                 |                   TCON-TOP - HDMI
-                 |                  /
-                 \ [3] TCON-TV1 [1] - TVE1/RGB
-
-Note that both TCON TOP references same physical unit. Both mixers can be
-connected to any TCON. Not all TCON TOP variants support all features.
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun8i-r40-tcon-top
-    * allwinner,sun50i-h6-tcon-top
-  - reg: base address and size of the memory-mapped region.
-  - clocks: phandle to the clocks feeding the TCON TOP
-    * bus: TCON TOP interface clock
-    * tcon-tv0: TCON TV0 clock
-    * tve0: TVE0 clock (R40 only)
-    * tcon-tv1: TCON TV1 clock (R40 only)
-    * tve1: TVE0 clock (R40 only)
-    * dsi: MIPI DSI clock (R40 only)
-  - clock-names: clock name mentioned above
-  - resets: phandle to the reset line driving the TCON TOP
-  - #clock-cells : must contain 1
-  - clock-output-names: Names of clocks created for TCON TV0 channel clock,
-    TCON TV1 channel clock (R40 only) and DSI channel clock (R40 only), in
-    that order.
-
-- ports: A ports node with endpoint definitions as defined in
-    Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should
-    be defined:
-    * port 0 is input for mixer0 mux
-    * port 1 is output for mixer0 mux
-    * port 2 is input for mixer1 mux
-    * port 3 is output for mixer1 mux
-    * port 4 is input for HDMI mux
-    * port 5 is output for HDMI mux
-    All output endpoints for mixer muxes and input endpoints for HDMI mux should
-    have reg property with the id of the target TCON, as shown in above graph
-    (0-3 for mixer muxes and 0-1 for HDMI mux). All ports should have only one
-    endpoint connected to remote endpoint.
-
-DRC
----
-
-The DRC (Dynamic Range Controller), found in the latest Allwinner SoCs
-(A31, A23, A33, A80), allows to dynamically adjust pixel
-brightness/contrast based on histogram measurements for LCD content
-adaptive backlight control.
-
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun6i-a31-drc
-    * allwinner,sun6i-a31s-drc
-    * allwinner,sun8i-a23-drc
-    * allwinner,sun8i-a33-drc
-    * allwinner,sun9i-a80-drc
-  - reg: base address and size of the memory-mapped region.
-  - interrupts: interrupt associated to this IP
-  - clocks: phandles to the clocks feeding the DRC
-    * ahb: the DRC interface clock
-    * mod: the DRC module clock
-    * ram: the DRC DRAM clock
-  - clock-names: the clock names mentioned above
-  - resets: phandles to the reset line driving the DRC
-
-- ports: A ports node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt. The
-  first port should be the input endpoints, the second one the outputs
-
-Display Engine Backend
-----------------------
-
-The display engine backend exposes layers and sprites to the
-system.
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun4i-a10-display-backend
-    * allwinner,sun5i-a13-display-backend
-    * allwinner,sun6i-a31-display-backend
-    * allwinner,sun7i-a20-display-backend
-    * allwinner,sun8i-a23-display-backend
-    * allwinner,sun8i-a33-display-backend
-    * allwinner,sun9i-a80-display-backend
-  - reg: base address and size of the memory-mapped region.
-  - interrupts: interrupt associated to this IP
-  - clocks: phandles to the clocks feeding the frontend and backend
-    * ahb: the backend interface clock
-    * mod: the backend module clock
-    * ram: the backend DRAM clock
-  - clock-names: the clock names mentioned above
-  - resets: phandles to the reset controllers driving the backend
-
-- ports: A ports node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt. The
-  first port should be the input endpoints, the second one the output
-
-On the A33, some additional properties are required:
-  - reg needs to have an additional region corresponding to the SAT
-  - reg-names need to be set, with "be" and "sat"
-  - clocks and clock-names need to have a phandle to the SAT bus
-    clocks, whose name will be "sat"
-  - resets and reset-names need to have a phandle to the SAT bus
-    resets, whose name will be "sat"
-
-DEU
----
-
-The DEU (Detail Enhancement Unit), found in the Allwinner A80 SoC,
-can sharpen the display content in both luma and chroma channels.
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun9i-a80-deu
-  - reg: base address and size of the memory-mapped region.
-  - interrupts: interrupt associated to this IP
-  - clocks: phandles to the clocks feeding the DEU
-    * ahb: the DEU interface clock
-    * mod: the DEU module clock
-    * ram: the DEU DRAM clock
-  - clock-names: the clock names mentioned above
-  - resets: phandles to the reset line driving the DEU
-
-- ports: A ports node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt. The
-  first port should be the input endpoints, the second one the outputs
-
-Display Engine Frontend
------------------------
-
-The display engine frontend does formats conversion, scaling,
-deinterlacing and color space conversion.
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun4i-a10-display-frontend
-    * allwinner,sun5i-a13-display-frontend
-    * allwinner,sun6i-a31-display-frontend
-    * allwinner,sun7i-a20-display-frontend
-    * allwinner,sun8i-a23-display-frontend
-    * allwinner,sun8i-a33-display-frontend
-    * allwinner,sun9i-a80-display-frontend
-  - reg: base address and size of the memory-mapped region.
-  - interrupts: interrupt associated to this IP
-  - clocks: phandles to the clocks feeding the frontend and backend
-    * ahb: the backend interface clock
-    * mod: the backend module clock
-    * ram: the backend DRAM clock
-  - clock-names: the clock names mentioned above
-  - resets: phandles to the reset controllers driving the backend
-
-- ports: A ports node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt. The
-  first port should be the input endpoints, the second one the outputs
-
-Display Engine 2.0 Mixer
-------------------------
-
-The DE2 mixer have many functionalities, currently only layer blending is
-supported.
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun8i-a83t-de2-mixer-0
-    * allwinner,sun8i-a83t-de2-mixer-1
-    * allwinner,sun8i-h3-de2-mixer-0
-    * allwinner,sun8i-r40-de2-mixer-0
-    * allwinner,sun8i-r40-de2-mixer-1
-    * allwinner,sun8i-v3s-de2-mixer
-    * allwinner,sun50i-a64-de2-mixer-0
-    * allwinner,sun50i-a64-de2-mixer-1
-    * allwinner,sun50i-h6-de3-mixer-0
-  - reg: base address and size of the memory-mapped region.
-  - clocks: phandles to the clocks feeding the mixer
-    * bus: the mixer interface clock
-    * mod: the mixer module clock
-  - clock-names: the clock names mentioned above
-  - resets: phandles to the reset controllers driving the mixer
-
-- ports: A ports node with endpoint definitions as defined in
-  Documentation/devicetree/bindings/media/video-interfaces.txt. The
-  first port should be the input endpoints, the second one the output
-
-
-Display Engine Pipeline
------------------------
-
-The display engine pipeline (and its entry point, since it can be
-either directly the backend or the frontend) is represented as an
-extra node.
-
-Required properties:
-  - compatible: value must be one of:
-    * allwinner,sun4i-a10-display-engine
-    * allwinner,sun5i-a10s-display-engine
-    * allwinner,sun5i-a13-display-engine
-    * allwinner,sun6i-a31-display-engine
-    * allwinner,sun6i-a31s-display-engine
-    * allwinner,sun7i-a20-display-engine
-    * allwinner,sun8i-a23-display-engine
-    * allwinner,sun8i-a33-display-engine
-    * allwinner,sun8i-a83t-display-engine
-    * allwinner,sun8i-h3-display-engine
-    * allwinner,sun8i-r40-display-engine
-    * allwinner,sun8i-v3s-display-engine
-    * allwinner,sun9i-a80-display-engine
-    * allwinner,sun50i-a64-display-engine
-    * allwinner,sun50i-h6-display-engine
-
-  - allwinner,pipelines: list of phandle to the display engine
-    frontends (DE 1.0) or mixers (DE 2.0/3.0) available.
-
-Example:
-
-panel: panel {
-	compatible = "olimex,lcd-olinuxino-43-ts";
-	#address-cells = <1>;
-	#size-cells = <0>;
-
-	port {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		panel_input: endpoint {
-			remote-endpoint = <&tcon0_out_panel>;
-		};
-	};
-};
-
-connector {
-	compatible = "hdmi-connector";
-	type = "a";
-
-	port {
-		hdmi_con_in: endpoint {
-			remote-endpoint = <&hdmi_out_con>;
-		};
-	};
-};
-
-hdmi: hdmi@1c16000 {
-	compatible = "allwinner,sun5i-a10s-hdmi";
-	reg = <0x01c16000 0x1000>;
-	interrupts = <58>;
-	clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>,
-		 <&ccu CLK_PLL_VIDEO0_2X>,
-		 <&ccu CLK_PLL_VIDEO1_2X>;
-	clock-names = "ahb", "mod", "pll-0", "pll-1";
-	dmas = <&dma SUN4I_DMA_NORMAL 16>,
-	       <&dma SUN4I_DMA_NORMAL 16>,
-	       <&dma SUN4I_DMA_DEDICATED 24>;
-	dma-names = "ddc-tx", "ddc-rx", "audio-tx";
-
-	ports {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		port@0 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0>;
-
-			hdmi_in_tcon0: endpoint {
-				remote-endpoint = <&tcon0_out_hdmi>;
-			};
-		};
-
-		port@1 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <1>;
-
-			hdmi_out_con: endpoint {
-				remote-endpoint = <&hdmi_con_in>;
-			};
-		};
-	};
-};
-
-tve0: tv-encoder@1c0a000 {
-	compatible = "allwinner,sun4i-a10-tv-encoder";
-	reg = <0x01c0a000 0x1000>;
-	clocks = <&ahb_gates 34>;
-	resets = <&tcon_ch0_clk 0>;
-
-	port {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		tve0_in_tcon0: endpoint@0 {
-			reg = <0>;
-			remote-endpoint = <&tcon0_out_tve0>;
-		};
-	};
-};
-
-tcon0: lcd-controller@1c0c000 {
-	compatible = "allwinner,sun5i-a13-tcon";
-	reg = <0x01c0c000 0x1000>;
-	interrupts = <44>;
-	resets = <&tcon_ch0_clk 1>;
-	reset-names = "lcd";
-	clocks = <&ahb_gates 36>,
-		 <&tcon_ch0_clk>,
-		 <&tcon_ch1_clk>;
-	clock-names = "ahb",
-		      "tcon-ch0",
-		      "tcon-ch1";
-	clock-output-names = "tcon-pixel-clock";
-
-	ports {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		tcon0_in: port@0 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0>;
-
-			tcon0_in_be0: endpoint@0 {
-				reg = <0>;
-				remote-endpoint = <&be0_out_tcon0>;
-			};
-		};
-
-		tcon0_out: port@1 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <1>;
-
-			tcon0_out_panel: endpoint@0 {
-				reg = <0>;
-				remote-endpoint = <&panel_input>;
-			};
-
-			tcon0_out_tve0: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&tve0_in_tcon0>;
-			};
-		};
-	};
-};
-
-fe0: display-frontend@1e00000 {
-	compatible = "allwinner,sun5i-a13-display-frontend";
-	reg = <0x01e00000 0x20000>;
-	interrupts = <47>;
-	clocks = <&ahb_gates 46>, <&de_fe_clk>,
-		 <&dram_gates 25>;
-	clock-names = "ahb", "mod",
-		      "ram";
-	resets = <&de_fe_clk>;
-
-	ports {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		fe0_out: port@1 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <1>;
-
-			fe0_out_be0: endpoint {
-				remote-endpoint = <&be0_in_fe0>;
-			};
-		};
-	};
-};
-
-be0: display-backend@1e60000 {
-	compatible = "allwinner,sun5i-a13-display-backend";
-	reg = <0x01e60000 0x10000>;
-	interrupts = <47>;
-	clocks = <&ahb_gates 44>, <&de_be_clk>,
-		 <&dram_gates 26>;
-	clock-names = "ahb", "mod",
-		      "ram";
-	resets = <&de_be_clk>;
-
-	ports {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		be0_in: port@0 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0>;
-
-			be0_in_fe0: endpoint@0 {
-				reg = <0>;
-				remote-endpoint = <&fe0_out_be0>;
-			};
-		};
-
-		be0_out: port@1 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <1>;
-
-			be0_out_tcon0: endpoint@0 {
-				reg = <0>;
-				remote-endpoint = <&tcon0_in_be0>;
-			};
-		};
-	};
-};
-
-display-engine {
-	compatible = "allwinner,sun5i-a13-display-engine";
-	allwinner,pipelines = <&fe0>;
-};
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tfp410.txt b/Documentation/devicetree/bindings/display/tilcdc/tfp410.txt
deleted file mode 100644
index a58ae77..0000000
--- a/Documentation/devicetree/bindings/display/tilcdc/tfp410.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Device-Tree bindings for tilcdc DRM TFP410 output driver
-
-Required properties:
- - compatible: value should be "ti,tilcdc,tfp410".
- - i2c: the phandle for the i2c device to use for DDC
-
-Recommended properties:
- - pinctrl-names, pinctrl-0: the pincontrol settings to configure
-   muxing properly for pins that connect to TFP410 device
- - powerdn-gpio: the powerdown GPIO, pulled low to power down the
-   TFP410 device (for DPMS_OFF)
-
-Example:
-
-	dvicape {
-		compatible = "ti,tilcdc,tfp410";
-		i2c = <&i2c2>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&bone_dvi_cape_dvi_00A1_pins>;
-		powerdn-gpio = <&gpio2 31 0>;
-	};
diff --git a/Documentation/devicetree/bindings/dma/atmel-xdma.txt b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
index 0eb2b32..4dc398e 100644
--- a/Documentation/devicetree/bindings/dma/atmel-xdma.txt
+++ b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
@@ -2,9 +2,7 @@
 
 * XDMA Controller
 Required properties:
-- compatible: Should be "atmel,<chip>-dma".
-  <chip> compatible description:
-  - sama5d4: first SoC adding the XDMAC
+- compatible: Should be "atmel,sama5d4-dma" or "microchip,sam9x60-dma".
 - reg: Should contain DMA registers location and length.
 - interrupts: Should contain DMA interrupt.
 - #dma-cells: Must be <1>, used to represent the number of integer cells in
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
new file mode 100644
index 0000000..0c0ac11
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/st,stm32-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 DMA Controller bindings
+
+description: |
+  The STM32 DMA is a general-purpose direct memory access controller capable of
+  supporting 8 independent DMA channels. Each channel can have up to 8 requests.
+  DMA clients connected to the STM32 DMA controller must use the format
+  described in the dma.txt file, using a four-cell specifier for each
+  channel: a phandle to the DMA controller plus the following four integer cells:
+    1. The channel id
+    2. The request line number
+    3. A 32bit mask specifying the DMA channel configuration which are device
+      dependent:
+        -bit 9: Peripheral Increment Address
+          0x0: no address increment between transfers
+          0x1: increment address between transfers
+        -bit 10: Memory Increment Address
+          0x0: no address increment between transfers
+          0x1: increment address between transfers
+        -bit 15: Peripheral Increment Offset Size
+          0x0: offset size is linked to the peripheral bus width
+          0x1: offset size is fixed to 4 (32-bit alignment)
+        -bit 16-17: Priority level
+          0x0: low
+          0x1: medium
+          0x2: high
+          0x3: very high
+    4. A 32bit bitfield value specifying DMA features which are device dependent:
+       -bit 0-1: DMA FIFO threshold selection
+         0x0: 1/4 full FIFO
+         0x1: 1/2 full FIFO
+         0x2: 3/4 full FIFO
+         0x3: full FIFO
+
+maintainers:
+  - Amelie Delaunay <amelie.delaunay@st.com>
+
+allOf:
+  - $ref: "dma-controller.yaml#"
+
+properties:
+  "#dma-cells":
+    const: 4
+
+  compatible:
+    const: st,stm32-dma
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 8
+    description: Should contain all of the per-channel DMA
+      interrupts in ascending order with respect to the
+      DMA channel index.
+
+  resets:
+    maxItems: 1
+
+  st,mem2mem:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description: if defined, it indicates that the controller
+      supports memory-to-memory transfer
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    #include <dt-bindings/reset/stm32mp1-resets.h>
+    dma-controller@40026400 {
+      compatible = "st,stm32-dma";
+      reg = <0x40026400 0x400>;
+      interrupts = <56>,
+                   <57>,
+                   <58>,
+                   <59>,
+                   <60>,
+                   <68>,
+                   <69>,
+                   <70>;
+      clocks = <&clk_hclk>;
+      #dma-cells = <4>;
+      st,mem2mem;
+      resets = <&rcc 150>;
+      dma-requests = <8>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml
new file mode 100644
index 0000000..915bc4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/st,stm32-dmamux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 DMA MUX (DMA request router) bindings
+
+maintainers:
+  - Amelie Delaunay <amelie.delaunay@st.com>
+
+allOf:
+  - $ref: "dma-router.yaml#"
+
+properties:
+  "#dma-cells":
+    const: 3
+
+  compatible:
+    const: st,stm32h7-dmamux
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - dma-masters
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    #include <dt-bindings/reset/stm32mp1-resets.h>
+    dma-router@40020800 {
+      compatible = "st,stm32h7-dmamux";
+      reg = <0x40020800 0x3c>;
+      #dma-cells = <3>;
+      dma-requests = <128>;
+      dma-channels = <16>;
+      dma-masters = <&dma1 &dma2>;
+      clocks = <&timer_clk>;
+    };
+
+...
+
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml b/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml
new file mode 100644
index 0000000..c66543d
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/st,stm32-mdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 MDMA Controller bindings
+
+description: |
+  The STM32 MDMA is a general-purpose direct memory access controller capable of
+  supporting 64 independent DMA channels with 256 HW requests.
+  DMA clients connected to the STM32 MDMA controller must use the format
+  described in the dma.txt file, using a five-cell specifier for each channel:
+  a phandle to the MDMA controller plus the following five integer cells:
+    1. The request line number
+    2. The priority level
+      0x0: Low
+      0x1: Medium
+      0x2: High
+      0x3: Very high
+    3. A 32bit mask specifying the DMA channel configuration
+      -bit 0-1: Source increment mode
+        0x0: Source address pointer is fixed
+        0x2: Source address pointer is incremented after each data transfer
+        0x3: Source address pointer is decremented after each data transfer
+      -bit 2-3: Destination increment mode
+        0x0: Destination address pointer is fixed
+        0x2: Destination address pointer is incremented after each data transfer
+        0x3: Destination address pointer is decremented after each data transfer
+      -bit 8-9: Source increment offset size
+        0x0: byte (8bit)
+        0x1: half-word (16bit)
+        0x2: word (32bit)
+        0x3: double-word (64bit)
+      -bit 10-11: Destination increment offset size
+        0x0: byte (8bit)
+        0x1: half-word (16bit)
+        0x2: word (32bit)
+        0x3: double-word (64bit)
+      -bit 25-18: The number of bytes to be transferred in a single transfer
+                  (min = 1 byte, max = 128 bytes)
+      -bit 29:28: Trigger Mode
+        0x00: Each MDMA request triggers a buffer transfer (max 128 bytes)
+        0x1: Each MDMA request triggers a block transfer (max 64K bytes)
+        0x2: Each MDMA request triggers a repeated block transfer
+        0x3: Each MDMA request triggers a linked list transfer
+    4. A 32bit value specifying the register to be used to acknowledge the request
+       if no HW ack signal is used by the MDMA client
+    5. A 32bit mask specifying the value to be written to acknowledge the request
+       if no HW ack signal is used by the MDMA client
+
+maintainers:
+  - Amelie Delaunay <amelie.delaunay@st.com>
+
+allOf:
+  - $ref: "dma-controller.yaml#"
+
+properties:
+  "#dma-cells":
+    const: 5
+
+  compatible:
+    const: st,stm32h7-mdma
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  st,ahb-addr-masks:
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    description: Array of u32 mask to list memory devices addressed via AHB bus.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    #include <dt-bindings/reset/stm32mp1-resets.h>
+    dma-controller@52000000 {
+      compatible = "st,stm32h7-mdma";
+      reg = <0x52000000 0x1000>;
+      interrupts = <122>;
+      clocks = <&timer_clk>;
+      resets = <&rcc 992>;
+      #dma-cells = <5>;
+      dma-channels = <16>;
+      dma-requests = <32>;
+      st,ahb-addr-masks = <0x20000000>, <0x00000000>;
+    };
+
+...
+
diff --git a/Documentation/devicetree/bindings/dma/stm32-dma.txt b/Documentation/devicetree/bindings/dma/stm32-dma.txt
deleted file mode 100644
index c5f5190..0000000
--- a/Documentation/devicetree/bindings/dma/stm32-dma.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-* STMicroelectronics STM32 DMA controller
-
-The STM32 DMA is a general-purpose direct memory access controller capable of
-supporting 8 independent DMA channels. Each channel can have up to 8 requests.
-
-Required properties:
-- compatible: Should be "st,stm32-dma"
-- reg: Should contain DMA registers location and length. This should include
-  all of the per-channel registers.
-- interrupts: Should contain all of the per-channel DMA interrupts in
-  ascending order with respect to the DMA channel index.
-- clocks: Should contain the input clock of the DMA instance.
-- #dma-cells : Must be <4>. See DMA client paragraph for more details.
-
-Optional properties:
-- dma-requests : Number of DMA requests supported.
-- resets: Reference to a reset controller asserting the DMA controller
-- st,mem2mem: boolean; if defined, it indicates that the controller supports
-  memory-to-memory transfer
-
-Example:
-
-	dma2: dma-controller@40026400 {
-		compatible = "st,stm32-dma";
-		reg = <0x40026400 0x400>;
-		interrupts = <56>,
-			     <57>,
-			     <58>,
-			     <59>,
-			     <60>,
-			     <68>,
-			     <69>,
-			     <70>;
-		clocks = <&clk_hclk>;
-		#dma-cells = <4>;
-		st,mem2mem;
-		resets = <&rcc 150>;
-		dma-requests = <8>;
-	};
-
-* DMA client
-
-DMA clients connected to the STM32 DMA controller must use the format
-described in the dma.txt file, using a four-cell specifier for each
-channel: a phandle to the DMA controller plus the following four integer cells:
-
-1. The channel id
-2. The request line number
-3. A 32bit mask specifying the DMA channel configuration which are device
-   dependent:
-  -bit 9: Peripheral Increment Address
-	0x0: no address increment between transfers
-	0x1: increment address between transfers
- -bit 10: Memory Increment Address
-	0x0: no address increment between transfers
-	0x1: increment address between transfers
- -bit 15: Peripheral Increment Offset Size
-	0x0: offset size is linked to the peripheral bus width
-	0x1: offset size is fixed to 4 (32-bit alignment)
- -bit 16-17: Priority level
-	0x0: low
-	0x1: medium
-	0x2: high
-	0x3: very high
-4. A 32bit bitfield value specifying DMA features which are device dependent:
- -bit 0-1: DMA FIFO threshold selection
-	0x0: 1/4 full FIFO
-	0x1: 1/2 full FIFO
-	0x2: 3/4 full FIFO
-	0x3: full FIFO
-
-
-Example:
-
-	usart1: serial@40011000 {
-		compatible = "st,stm32-uart";
-		reg = <0x40011000 0x400>;
-		interrupts = <37>;
-		clocks = <&clk_pclk2>;
-		dmas = <&dma2 2 4 0x10400 0x3>,
-		       <&dma2 7 5 0x10200 0x3>;
-		dma-names = "rx", "tx";
-	};
diff --git a/Documentation/devicetree/bindings/dma/stm32-dmamux.txt b/Documentation/devicetree/bindings/dma/stm32-dmamux.txt
deleted file mode 100644
index 1b893b2..0000000
--- a/Documentation/devicetree/bindings/dma/stm32-dmamux.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-STM32 DMA MUX (DMA request router)
-
-Required properties:
-- compatible:	"st,stm32h7-dmamux"
-- reg:		Memory map for accessing module
-- #dma-cells:	Should be set to <3>.
-		First parameter is request line number.
-		Second is DMA channel configuration
-		Third is Fifo threshold
-		For more details about the three cells, please see
-		stm32-dma.txt documentation binding file
-- dma-masters:	Phandle pointing to the DMA controllers.
-		Several controllers are allowed. Only "st,stm32-dma" DMA
-		compatible are supported.
-
-Optional properties:
-- dma-channels : Number of DMA requests supported.
-- dma-requests : Number of DMAMUX requests supported.
-- resets: Reference to a reset controller asserting the DMA controller
-- clocks: Input clock of the DMAMUX instance.
-
-Example:
-
-/* DMA controller 1 */
-dma1: dma-controller@40020000 {
-	compatible = "st,stm32-dma";
-	reg = <0x40020000 0x400>;
-	interrupts = <11>,
-		     <12>,
-		     <13>,
-		     <14>,
-		     <15>,
-		     <16>,
-		     <17>,
-		     <47>;
-	clocks = <&timer_clk>;
-	#dma-cells = <4>;
-	st,mem2mem;
-	resets = <&rcc 150>;
-	dma-channels = <8>;
-	dma-requests = <8>;
-};
-
-/* DMA controller 1 */
-dma2: dma@40020400 {
-	compatible = "st,stm32-dma";
-	reg = <0x40020400 0x400>;
-	interrupts = <56>,
-		     <57>,
-		     <58>,
-		     <59>,
-		     <60>,
-		     <68>,
-		     <69>,
-		     <70>;
-	clocks = <&timer_clk>;
-	#dma-cells = <4>;
-	st,mem2mem;
-	resets = <&rcc 150>;
-	dma-channels = <8>;
-	dma-requests = <8>;
-};
-
-/* DMA mux */
-dmamux1: dma-router@40020800 {
-	compatible = "st,stm32h7-dmamux";
-	reg = <0x40020800 0x3c>;
-	#dma-cells = <3>;
-	dma-requests = <128>;
-	dma-channels = <16>;
-	dma-masters = <&dma1 &dma2>;
-	clocks = <&timer_clk>;
-};
-
-/* DMA client */
-usart1: serial@40011000 {
-	compatible = "st,stm32-usart", "st,stm32-uart";
-	reg = <0x40011000 0x400>;
-	interrupts = <37>;
-	clocks = <&timer_clk>;
-	dmas = <&dmamux1 41 0x414 0>,
-	       <&dmamux1 42 0x414 0>;
-	dma-names = "rx", "tx";
-};
diff --git a/Documentation/devicetree/bindings/dma/stm32-mdma.txt b/Documentation/devicetree/bindings/dma/stm32-mdma.txt
deleted file mode 100644
index d18772d..0000000
--- a/Documentation/devicetree/bindings/dma/stm32-mdma.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-* STMicroelectronics STM32 MDMA controller
-
-The STM32 MDMA is a general-purpose direct memory access controller capable of
-supporting 64 independent DMA channels with 256 HW requests.
-
-Required properties:
-- compatible: Should be "st,stm32h7-mdma"
-- reg: Should contain MDMA registers location and length. This should include
-  all of the per-channel registers.
-- interrupts: Should contain the MDMA interrupt.
-- clocks: Should contain the input clock of the DMA instance.
-- resets: Reference to a reset controller asserting the DMA controller.
-- #dma-cells : Must be <5>. See DMA client paragraph for more details.
-
-Optional properties:
-- dma-channels: Number of DMA channels supported by the controller.
-- dma-requests: Number of DMA request signals supported by the controller.
-- st,ahb-addr-masks: Array of u32 mask to list memory devices addressed via
-  AHB bus.
-
-Example:
-
-	mdma1: dma@52000000 {
-		compatible = "st,stm32h7-mdma";
-		reg = <0x52000000 0x1000>;
-		interrupts = <122>;
-		clocks = <&timer_clk>;
-		resets = <&rcc 992>;
-		#dma-cells = <5>;
-		dma-channels = <16>;
-		dma-requests = <32>;
-		st,ahb-addr-masks = <0x20000000>, <0x00000000>;
-	};
-
-* DMA client
-
-DMA clients connected to the STM32 MDMA controller must use the format
-described in the dma.txt file, using a five-cell specifier for each channel:
-a phandle to the MDMA controller plus the following five integer cells:
-
-1. The request line number
-2. The priority level
-	0x00: Low
-	0x01: Medium
-	0x10: High
-	0x11: Very high
-3. A 32bit mask specifying the DMA channel configuration
- -bit 0-1: Source increment mode
-	0x00: Source address pointer is fixed
-	0x10: Source address pointer is incremented after each data transfer
-	0x11: Source address pointer is decremented after each data transfer
- -bit 2-3: Destination increment mode
-	0x00: Destination address pointer is fixed
-	0x10: Destination address pointer is incremented after each data
-	transfer
-	0x11: Destination address pointer is decremented after each data
-	transfer
- -bit 8-9: Source increment offset size
-	0x00: byte (8bit)
-	0x01: half-word (16bit)
-	0x10: word (32bit)
-	0x11: double-word (64bit)
- -bit 10-11: Destination increment offset size
-	0x00: byte (8bit)
-	0x01: half-word (16bit)
-	0x10: word (32bit)
-	0x11: double-word (64bit)
--bit 25-18: The number of bytes to be transferred in a single transfer
-	(min = 1 byte, max = 128 bytes)
--bit 29:28: Trigger Mode
-	0x00: Each MDMA request triggers a buffer transfer (max 128 bytes)
-	0x01: Each MDMA request triggers a block transfer (max 64K bytes)
-	0x10: Each MDMA request triggers a repeated block transfer
-	0x11: Each MDMA request triggers a linked list transfer
-4. A 32bit value specifying the register to be used to acknowledge the request
-   if no HW ack signal is used by the MDMA client
-5. A 32bit mask specifying the value to be written to acknowledge the request
-   if no HW ack signal is used by the MDMA client
-
-Example:
-
-	i2c4: i2c@5c002000 {
-		compatible = "st,stm32f7-i2c";
-		reg = <0x5c002000 0x400>;
-		interrupts = <95>,
-			     <96>;
-		clocks = <&timer_clk>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		dmas = <&mdma1 36 0x0 0x40008 0x0 0x0>,
-		       <&mdma1 37 0x0 0x40002 0x0 0x0>;
-		dma-names = "rx", "tx";
-		status = "disabled";
-	};
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index e877856..0f6d8db 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -145,10 +145,7 @@
       over reads to the next slave address. Please consult the manual of
       your device.
 
-  wp-gpios:
-    description:
-      GPIO to which the write-protect pin of the chip is connected.
-    maxItems: 1
+  wp-gpios: true
 
   address-width:
     allOf:
@@ -167,6 +164,10 @@
     minimum: 1
     maximum: 8
 
+  vcc-supply:
+    description:
+      phandle of the regulator that provides the supply voltage.
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/eeprom/at25.txt b/Documentation/devicetree/bindings/eeprom/at25.txt
index 42577dd..fcacd97 100644
--- a/Documentation/devicetree/bindings/eeprom/at25.txt
+++ b/Documentation/devicetree/bindings/eeprom/at25.txt
@@ -20,6 +20,7 @@
 - spi-cpha : SPI shifted clock phase, as per spi-bus bindings.
 - spi-cpol : SPI inverse clock polarity, as per spi-bus bindings.
 - read-only : this parameter-less property disables writes to the eeprom
+- wp-gpios : GPIO to which the write-protect pin of the chip is connected
 
 Obsolete legacy properties can be used in place of "size", "pagesize",
 "address-width", and "read-only":
@@ -36,6 +37,7 @@
 		spi-max-frequency = <5000000>;
 		spi-cpha;
 		spi-cpol;
+		wp-gpios = <&gpio1 3 0>;
 
 		pagesize = <64>;
 		size = <32768>;
diff --git a/Documentation/devicetree/bindings/gpio/qcom,wcd934x-gpio.yaml b/Documentation/devicetree/bindings/gpio/qcom,wcd934x-gpio.yaml
new file mode 100644
index 0000000..32a566e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/qcom,wcd934x-gpio.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/qcom,wcd934x-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: WCD9340/WCD9341 GPIO controller
+
+maintainers:
+  - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+description: |
+  Qualcomm Technologies Inc WCD9340/WCD9341 Audio Codec has integrated
+  gpio controller to control 5 gpios on the chip.
+
+properties:
+  compatible:
+    enum:
+      - qcom,wcd9340-gpio
+      - qcom,wcd9341-gpio
+
+  reg:
+    maxItems: 1
+
+  gpio-controller: true
+
+  '#gpio-cells':
+    const: 2
+
+required:
+  - compatible
+  - reg
+  - gpio-controller
+  - "#gpio-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    wcdgpio: gpio@42 {
+        compatible = "qcom,wcd9340-gpio";
+        reg = <0x042 0x2>;
+        gpio-controller;
+        #gpio-cells = <2>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 41e5fed..10dce84 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -18,7 +18,8 @@
     - "renesas,gpio-r8a7793": for R8A7793 (R-Car M2-N) compatible GPIO controller.
     - "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
     - "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
-    - "renesas,gpio-r8a7796": for R8A7796 (R-Car M3-W) compatible GPIO controller.
+    - "renesas,gpio-r8a7796": for R8A77960 (R-Car M3-W) compatible GPIO controller.
+    - "renesas,gpio-r8a77961": for R8A77961 (R-Car M3-W+) compatible GPIO controller.
     - "renesas,gpio-r8a77965": for R8A77965 (R-Car M3-N) compatible GPIO controller.
     - "renesas,gpio-r8a77970": for R8A77970 (R-Car V3M) compatible GPIO controller.
     - "renesas,gpio-r8a77980": for R8A77980 (R-Car V3H) compatible GPIO controller.
diff --git a/Documentation/devicetree/bindings/gpio/xylon,logicvc-gpio.yaml b/Documentation/devicetree/bindings/gpio/xylon,logicvc-gpio.yaml
new file mode 100644
index 0000000..d102888
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/xylon,logicvc-gpio.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Bootlin
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/gpio/xylon,logicvc-gpio.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Xylon LogiCVC GPIO controller
+
+maintainers:
+  - Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+
+description: |
+  The LogiCVC GPIO describes the GPIO block included in the LogiCVC display
+  controller. These are meant to be used for controlling display-related
+  signals.
+
+  The controller exposes GPIOs from the display and power control registers,
+  which are mapped by the driver as follows:
+  - GPIO[4:0] (display control) mapped to index 0-4
+  - EN_BLIGHT (power control) mapped to index 5
+  - EN_VDD (power control) mapped to index 6
+  - EN_VEE (power control) mapped to index 7
+  - V_EN (power control) mapped to index 8
+
+properties:
+  $nodename:
+    pattern: "^gpio@[0-9a-f]+$"
+
+  compatible:
+    enum:
+      - xylon,logicvc-3.02.a-gpio
+
+  reg:
+    maxItems: 1
+
+  "#gpio-cells":
+    const: 2
+
+  gpio-controller: true
+
+  gpio-line-names:
+    minItems: 1
+    maxItems: 9
+
+required:
+  - compatible
+  - reg
+  - "#gpio-cells"
+  - gpio-controller
+
+examples:
+  - |
+    logicvc: logicvc@43c00000 {
+      compatible = "xylon,logicvc-3.02.a", "syscon", "simple-mfd";
+      reg = <0x43c00000 0x6000>;
+
+      #address-cells = <1>;
+      #size-cells = <1>;
+
+      logicvc_gpio: gpio@40 {
+        compatible = "xylon,logicvc-3.02.a-gpio";
+        reg = <0x40 0x40>;
+        gpio-controller;
+        #gpio-cells = <2>;
+        gpio-line-names = "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4",
+               "EN_BLIGHT", "EN_VDD", "EN_VEE", "V_EN";
+      };
+    };
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 0c426e3..4ea6a87 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -18,6 +18,7 @@
       - enum:
           - amlogic,meson-g12a-mali
           - realtek,rtd1619-mali
+          - rockchip,px30-mali
       - const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
 
   reg:
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
index ae04903..6a742a5 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/hwmon/adi,ltc2947.yaml#
+$id: http://devicetree.org/schemas/hwmon/adi,ltc2947.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Analog Devices LTC2947 high precision power and energy monitor
diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
index 8347b1e..d4bad86 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
@@ -1,10 +1,16 @@
 I2C for Atmel platforms
 
 Required properties :
-- compatible : Must be "atmel,at91rm9200-i2c", "atmel,at91sam9261-i2c",
-     "atmel,at91sam9260-i2c", "atmel,at91sam9g20-i2c", "atmel,at91sam9g10-i2c",
-     "atmel,at91sam9x5-i2c", "atmel,sama5d4-i2c", "atmel,sama5d2-i2c" or
-     "microchip,sam9x60-i2c"
+- compatible : Must be one of:
+	"atmel,at91rm9200-i2c",
+	"atmel,at91sam9261-i2c",
+	"atmel,at91sam9260-i2c",
+	"atmel,at91sam9g20-i2c",
+	"atmel,at91sam9g10-i2c",
+	"atmel,at91sam9x5-i2c",
+	"atmel,sama5d4-i2c",
+	"atmel,sama5d2-i2c",
+	"microchip,sam9x60-i2c".
 - reg: physical base address of the controller and length of memory mapped
      region.
 - interrupts: interrupt number to the cpu.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
index b245363..f0c072f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
@@ -4,6 +4,7 @@
 - compatible :
   - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
   - "fsl,imx8qxp-lpi2c" for LPI2C compatible with the one integrated on i.MX8QXP soc
+  - "fsl,imx8qm-lpi2c" for LPI2C compatible with the one integrated on i.MX8QM soc
 - reg : address and length of the lpi2c master registers
 - interrupts : lpi2c interrupt
 - clocks : lpi2c clock specifier
diff --git a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
index 3738cfb..d229eff 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
@@ -1,7 +1,9 @@
 * Ingenic JZ4780 I2C Bus controller
 
 Required properties:
-- compatible: should be "ingenic,jz4780-i2c"
+- compatible: should be one of the following:
+  - "ingenic,jz4780-i2c" for the JZ4780
+  - "ingenic,x1000-i2c" for the X1000
 - reg: Should contain the address & size of the I2C controller registers.
 - interrupts: Should specify the interrupt provided by parent.
 - clocks: Should contain a single clock specifier for the JZ4780 I2C clock.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
index 30ac6a6..7abda50 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
@@ -25,6 +25,8 @@
 Optional Properties:
 
   - reset-gpios: Reference to the GPIO connected to the reset input.
+  - idle-state: if present, overrides i2c-mux-idle-disconnect,
+    Please refer to Documentation/devicetree/bindings/mux/mux-controller.txt
   - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
     children in idle state. This is necessary for example, if there are several
     multiplexers on the bus and the devices behind them use same I2C addresses.
diff --git a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
index 0660a3e..c359965 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
@@ -17,7 +17,8 @@
 	"renesas,i2c-r8a7793" if the device is a part of a R8A7793 SoC.
 	"renesas,i2c-r8a7794" if the device is a part of a R8A7794 SoC.
 	"renesas,i2c-r8a7795" if the device is a part of a R8A7795 SoC.
-	"renesas,i2c-r8a7796" if the device is a part of a R8A7796 SoC.
+	"renesas,i2c-r8a7796" if the device is a part of a R8A77960 SoC.
+	"renesas,i2c-r8a77961" if the device is a part of a R8A77961 SoC.
 	"renesas,i2c-r8a77965" if the device is a part of a R8A77965 SoC.
 	"renesas,i2c-r8a77970" if the device is a part of a R8A77970 SoC.
 	"renesas,i2c-r8a77980" if the device is a part of a R8A77980 SoC.
diff --git a/Documentation/devicetree/bindings/i2c/renesas,iic.txt b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
index 64d11ff..ffe085c 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,iic.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
@@ -17,6 +17,7 @@
 			- "renesas,iic-r8a7794" (R-Car E2)
 			- "renesas,iic-r8a7795" (R-Car H3)
 			- "renesas,iic-r8a7796" (R-Car M3-W)
+			- "renesas,iic-r8a77961" (R-Car M3-W+)
 			- "renesas,iic-r8a77965" (R-Car M3-N)
 			- "renesas,iic-r8a77990" (R-Car E3)
 			- "renesas,iic-sh73a0" (SH-Mobile AG5)
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adis16240.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adis16240.yaml
new file mode 100644
index 0000000..4147f02
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adis16240.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/adi,adis16240.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ADIS16240 Programmable Impact Sensor and Recorder driver
+
+maintainers:
+  - Alexandru Ardelean <alexandru.ardelean@analog.com>
+
+description: |
+  ADIS16240 Programmable Impact Sensor and Recorder driver that supports
+  SPI interface.
+    https://www.analog.com/en/products/adis16240.html
+
+properties:
+  compatible:
+    enum:
+      - adi,adis16240
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    spi0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        /* Example for a SPI device node */
+        accelerometer@0 {
+            compatible = "adi,adis16240";
+            reg = <0>;
+            spi-max-frequency = <2500000>;
+            interrupt-parent = <&gpio0>;
+            interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
index 3b25b4c..f532372 100644
--- a/Documentation/devicetree/bindings/iio/accel/bma180.txt
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -1,11 +1,14 @@
-* Bosch BMA180 / BMA250 triaxial acceleration sensor
+* Bosch BMA180 / BMA25x triaxial acceleration sensor
 
 http://omapworld.com/BMA180_111_1002839.pdf
 http://ae-bst.resource.bosch.com/media/products/dokumente/bma250/bst-bma250-ds002-05.pdf
 
 Required properties:
 
-  - compatible : should be "bosch,bma180" or "bosch,bma250"
+  - compatible : should be one of:
+    "bosch,bma180"
+    "bosch,bma250"
+    "bosch,bma254"
   - reg : the I2C address of the sensor
 
 Optional properties:
diff --git a/Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml b/Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
new file mode 100644
index 0000000..c1c6d6f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/bosch,bma400.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bosch BMA400 triaxial acceleration sensor
+
+maintainers:
+  - Dan Robertson <dan@dlrobertson.com>
+
+description: |
+  Acceleration and temperature iio sensors with an i2c interface
+
+  Specifications about the sensor can be found at:
+    https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMA400-DS000.pdf
+
+properties:
+  compatible:
+    enum:
+      - bosch,bma400
+
+  reg:
+    maxItems: 1
+
+  vdd-supply:
+    description: phandle to the regulator that provides power to the accelerometer
+
+  vddio-supply:
+    description: phandle to the regulator that provides power to the sensor's IO
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      accelerometer@14 {
+        compatible = "bosch,bma400";
+        reg = <0x14>;
+        vdd-supply = <&vdd>;
+        vddio-supply = <&vddio>;
+        interrupt-parent = <&gpio0>;
+        interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+      };
+    };
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.txt b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.txt
index eb76a02..ce950e1 100644
--- a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.txt
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.txt
@@ -9,9 +9,16 @@
     "kionix,kxtf9"
  - reg: i2c slave address
 
+Optional properties:
+
+  - mount-matrix: an optional 3x3 mounting rotation matrix
+
 Example:
 
 kxtf9@f {
 	compatible = "kionix,kxtf9";
 	reg = <0x0F>;
+	mount-matrix =	"0", "1", "0",
+			"1", "0", "0",
+			"0", "0", "1";
 };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
new file mode 100644
index 0000000..31ffa27
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7091r5.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD7091R5 4-Channel 12-Bit ADC
+
+maintainers:
+  - Beniamin Bia <beniamin.bia@analog.com>
+
+description: |
+  Analog Devices AD7091R5 4-Channel 12-Bit ADC
+  https://www.analog.com/media/en/technical-documentation/data-sheets/ad7091r-5.pdf
+
+properties:
+  compatible:
+    enum:
+      - adi,ad7091r5
+
+  reg:
+    maxItems: 1
+
+  vref-supply:
+    description:
+      Phandle to the vref power supply
+
+  interrupts:
+    maxItems: 1
+
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        adc@2f {
+                compatible = "adi,ad7091r5";
+                reg = <0x2f>;
+
+                interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+                interrupt-parent = <&gpio>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
index e932d5a..f0934b2 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
@@ -2,7 +2,7 @@
 # Copyright 2019 Analog Devices Inc.
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/iio/adc/adi,ad7124.yaml#
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7124.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Analog Devices AD7124 ADC device driver
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index 567a33a..84d25bd 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -2,7 +2,7 @@
 # Copyright 2019 Analog Devices Inc.
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/iio/adc/adi,ad7192.yaml#
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7192.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Analog Devices AD7192 ADC device driver
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index 6eb3320..5117ad6 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -82,7 +82,7 @@
       Must be the device tree identifier of the over-sampling
       mode pins. As the line is active high, it should be marked
       GPIO_ACTIVE_HIGH.
-    maxItems: 1
+    maxItems: 3
 
   adi,sw-mode:
     description:
@@ -125,9 +125,9 @@
                 adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
                 reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
                 adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
-                adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH
-                                                &gpio 23 GPIO_ACTIVE_HIGH
-                                                &gpio 26 GPIO_ACTIVE_HIGH>;
+                adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>,
+                                               <&gpio 23 GPIO_ACTIVE_HIGH>,
+                                               <&gpio 26 GPIO_ACTIVE_HIGH>;
                 standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
                 adi,sw-mode;
         };
diff --git a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
index 4a3c1d4..07c59f3 100644
--- a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
@@ -1,7 +1,7 @@
 * AT91 SAMA5D2 Analog to Digital Converter (ADC)
 
 Required properties:
-  - compatible: Should be "atmel,sama5d2-adc".
+  - compatible: Should be "atmel,sama5d2-adc" or "microchip,sam9x60-adc".
   - reg: Should contain ADC registers location and length.
   - interrupts: Should contain the IRQ line for the ADC.
   - clocks: phandle to device clock.
diff --git a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
new file mode 100644
index 0000000..5900999
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/lltc,ltc2496.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Linear Technology / Analog Devices LTC2496 ADC
+
+maintainers:
+ - Lars-Peter Clausen <lars@metafoo.de>
+ - Michael Hennerich <Michael.Hennerich@analog.com>
+ - Stefan Popa <stefan.popa@analog.com>
+
+properties:
+  compatible:
+    enum:
+      - lltc,ltc2496
+
+  vref-supply:
+    description: phandle to an external regulator providing the reference voltage
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/phandle
+
+  reg:
+    description: spi chipselect number according to the usual spi bindings
+
+  spi-max-frequency:
+    description: maximal spi bus frequency supported
+
+required:
+  - compatible
+  - vref-supply
+  - reg
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        adc@0 {
+            compatible = "lltc,ltc2496";
+            reg = <0>;
+            vref-supply = <&ltc2496_reg>;
+            spi-max-frequency = <2000000>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
index 881059b..0ce2904 100644
--- a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
@@ -2,7 +2,7 @@
 # Copyright 2019 Marcus Folkesson <marcus.folkesson@gmail.com>
 %YAML 1.2
 ---
-$id: "http://devicetree.org/schemas/bindings/iio/adc/microchip,mcp3911.yaml#"
+$id: "http://devicetree.org/schemas/iio/adc/microchip,mcp3911.yaml#"
 $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 
 title: Microchip MCP3911 Dual channel analog front end (ADC)
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
deleted file mode 100644
index 59b92cd..0000000
--- a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Device-Tree bindings for sigma delta modulator
-
-Required properties:
-- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
-	as a generic SD modulator if modulator not specified in compatible list.
-- #io-channel-cells = <0>: See the IIO bindings section "IIO consumers".
-
-Example node:
-
-	ads1202: adc {
-		compatible = "sd-modulator";
-		#io-channel-cells = <0>;
-	};
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml
new file mode 100644
index 0000000..a390343d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/sigma-delta-modulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Device-Tree bindings for sigma delta modulator
+
+maintainers:
+  - Arnaud Pouliquen <arnaud.pouliquen@st.com>
+
+properties:
+  compatible:
+    description: |
+      "sd-modulator" can be used as a generic SD modulator,
+      if the modulator is not specified in the compatible list.
+    enum:
+      - sd-modulator
+      - ads1201
+
+  '#io-channel-cells':
+    const: 0
+
+required:
+  - compatible
+  - '#io-channel-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    ads1202: adc {
+      compatible = "sd-modulator";
+      #io-channel-cells = <0>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
deleted file mode 100644
index 75ba25d..0000000
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
+++ /dev/null
@@ -1,135 +0,0 @@
-STMicroelectronics STM32 DFSDM ADC device driver
-
-
-STM32 DFSDM ADC is a sigma delta analog-to-digital converter dedicated to
-interface external sigma delta modulators to STM32 micro controllers.
-It is mainly targeted for:
-- Sigma delta modulators (motor control, metering...)
-- PDM microphones (audio digital microphone)
-
-It features up to 8 serial digital interfaces (SPI or Manchester) and
-up to 4 filters on stm32h7 or 6 filters on stm32mp1.
-
-Each child node match with a filter instance.
-
-Contents of a STM32 DFSDM root node:
-------------------------------------
-Required properties:
-- compatible: Should be one of:
-  "st,stm32h7-dfsdm"
-  "st,stm32mp1-dfsdm"
-- reg: Offset and length of the DFSDM block register set.
-- clocks: IP and serial interfaces clocking. Should be set according
-		to rcc clock ID and "clock-names".
-- clock-names: Input clock name "dfsdm" must be defined,
-		"audio" is optional. If defined CLKOUT is based on the audio
-		clock, else "dfsdm" is used.
-- #interrupt-cells = <1>;
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Optional properties:
-- spi-max-frequency: Requested only for SPI master mode.
-		  SPI clock OUT frequency (Hz). This clock must be set according
-		  to "clock" property. Frequency must be a multiple of the rcc
-		  clock frequency. If not, SPI CLKOUT frequency will not be
-		  accurate.
-- pinctrl-names:	Set to "default".
-- pinctrl-0:		List of phandles pointing to pin configuration
-			nodes to set pins in mode of operation for dfsdm
-			on external pin.
-
-Contents of a STM32 DFSDM child nodes:
---------------------------------------
-
-Required properties:
-- compatible: Must be:
-	"st,stm32-dfsdm-adc" for sigma delta ADCs
-	"st,stm32-dfsdm-dmic" for audio digital microphone.
-- reg: Specifies the DFSDM filter instance used.
-	Valid values are from 0 to 3 on stm32h7, 0 to 5 on stm32mp1.
-- interrupts: IRQ lines connected to each DFSDM filter instance.
-- st,adc-channels:	List of single-ended channels muxed for this ADC.
-			valid values:
-				"st,stm32h7-dfsdm" compatibility: 0 to 7.
-- st,adc-channel-names:	List of single-ended channel names.
-- st,filter-order:  SinC filter order from 0 to 5.
-			0: FastSinC
-			[1-5]: order 1 to 5.
-			For audio purpose it is recommended to use order 3 to 5.
-- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
-
-Required properties for "st,stm32-dfsdm-adc" compatibility:
-- io-channels: From common IIO binding. Used to pipe external sigma delta
-		modulator or internal ADC output to DFSDM channel.
-		This is not required for "st,stm32-dfsdm-pdm" compatibility as
-		PDM microphone is binded in Audio DT node.
-
-Required properties for "st,stm32-dfsdm-pdm" compatibility:
-- #sound-dai-cells: Must be set to 0.
-- dma: DMA controller phandle and DMA request line associated to the
-		filter instance (specified by the field "reg")
-- dma-names: Must be "rx"
-
-Optional properties:
-- st,adc-channel-types:	Single-ended channel input type.
-			- "SPI_R": SPI with data on rising edge (default)
-			- "SPI_F": SPI with data on falling edge
-			- "MANCH_R": manchester codec, rising edge = logic 0, falling edge = logic 1
-			- "MANCH_F": manchester codec, rising edge = logic 1, falling edge = logic 0
-- st,adc-channel-clk-src: Conversion clock source.
-			  - "CLKIN": external SPI clock (CLKIN x)
-			  - "CLKOUT": internal SPI clock (CLKOUT) (default)
-			  - "CLKOUT_F": internal SPI clock divided by 2 (falling edge).
-			  - "CLKOUT_R": internal SPI clock divided by 2 (rising edge).
-
-- st,adc-alt-channel: Must be defined if two sigma delta modulator are
-			  connected on same SPI input.
-			  If not set, channel n is connected to SPI input n.
-			  If set, channel n is connected to SPI input n + 1.
-
-- st,filter0-sync: Set to 1 to synchronize with DFSDM filter instance 0.
-		   Used for multi microphones synchronization.
-
-Example of a sigma delta adc connected on DFSDM SPI port 0
-and a pdm microphone connected on DFSDM SPI port 1:
-
-	ads1202: simple_sd_adc@0 {
-		compatible = "ads1202";
-		#io-channel-cells = <1>;
-	};
-
-	dfsdm: dfsdm@40017000 {
-		compatible = "st,stm32h7-dfsdm";
-		reg = <0x40017000 0x400>;
-		clocks = <&rcc DFSDM1_CK>;
-		clock-names = "dfsdm";
-		#interrupt-cells = <1>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		dfsdm_adc0: filter@0 {
-			compatible = "st,stm32-dfsdm-adc";
-			#io-channel-cells = <1>;
-			reg = <0>;
-			interrupts = <110>;
-			st,adc-channels = <0>;
-			st,adc-channel-names = "sd_adc0";
-			st,adc-channel-types = "SPI_F";
-			st,adc-channel-clk-src = "CLKOUT";
-			io-channels = <&ads1202 0>;
-			st,filter-order = <3>;
-		};
-		dfsdm_pdm1: filter@1 {
-			compatible = "st,stm32-dfsdm-dmic";
-			reg = <1>;
-			interrupts = <111>;
-			dmas = <&dmamux1 102 0x400 0x00>;
-			dma-names = "rx";
-			st,adc-channels = <1>;
-			st,adc-channel-names = "dmic1";
-			st,adc-channel-types = "SPI_R";
-			st,adc-channel-clk-src = "CLKOUT";
-			st,filter-order = <5>;
-		};
-	}
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
new file mode 100644
index 0000000..acf36ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -0,0 +1,332 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/st,stm32-dfsdm-adc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 DFSDM ADC device driver
+
+maintainers:
+  - Fabrice Gasnier <fabrice.gasnier@st.com>
+  - Olivier Moysan <olivier.moysan@st.com>
+
+description: |
+  STM32 DFSDM ADC is a sigma delta analog-to-digital converter dedicated to
+  interface external sigma delta modulators to STM32 micro controllers.
+  It is mainly targeted for:
+  - Sigma delta modulators (motor control, metering...)
+  - PDM microphones (audio digital microphone)
+
+  It features up to 8 serial digital interfaces (SPI or Manchester) and
+  up to 4 filters on stm32h7 or 6 filters on stm32mp1.
+
+  Each child node matches with a filter instance.
+
+properties:
+  compatible:
+    enum:
+      - st,stm32h7-dfsdm
+      - st,stm32mp1-dfsdm
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description:
+          Internal clock used for DFSDM digital processing and control blocks.
+          dfsdm clock can also feed CLKOUT, when CLKOUT is used.
+      - description: audio clock can be used as an alternate to feed CLKOUT.
+    minItems: 1
+    maxItems: 2
+
+  clock-names:
+    items:
+      - const: dfsdm
+      - const: audio
+    minItems: 1
+    maxItems: 2
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+  spi-max-frequency:
+    description:
+      SPI clock OUT frequency (Hz). Requested only for SPI master mode.
+      This clock must be set according to the "clock" property.
+      Frequency must be a multiple of the rcc clock frequency.
+      If not, SPI CLKOUT frequency will not be accurate.
+    maximum: 20000000
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - "#address-cells"
+  - "#size-cells"
+
+patternProperties:
+  "^filter@[0-9]+$":
+    type: object
+    description: child node
+
+    properties:
+      compatible:
+        enum:
+          - st,stm32-dfsdm-adc
+          - st,stm32-dfsdm-dmic
+
+      reg:
+        description: Specifies the DFSDM filter instance used.
+        maxItems: 1
+
+      interrupts:
+        maxItems: 1
+
+      st,adc-channels:
+        description: |
+          List of single-ended channels muxed for this ADC.
+          On stm32h7 and stm32mp1:
+          - For st,stm32-dfsdm-adc: up to 8 channels numbered from 0 to 7.
+          - For st,stm32-dfsdm-dmic: 1 channel numbered from 0 to 7.
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32-array
+          - items:
+              minimum: 0
+              maximum: 7
+
+      st,adc-channel-names:
+        description: List of single-ended channel names.
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/string-array
+
+      st,filter-order:
+        description: |
+          SinC filter order from 0 to 5.
+          - 0: FastSinC
+          - [1-5]: order 1 to 5.
+          For audio purpose it is recommended to use order 3 to 5.
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - items:
+              minimum: 0
+              maximum: 5
+
+      "#io-channel-cells":
+        const: 1
+
+      st,adc-channel-types:
+        description: |
+          Single-ended channel input type.
+          - "SPI_R": SPI with data on rising edge (default)
+          - "SPI_F": SPI with data on falling edge
+          - "MANCH_R": manchester codec, rising edge = logic 0, falling edge = logic 1
+          - "MANCH_F": manchester codec, rising edge = logic 1, falling edge = logic 0
+        items:
+          enum: [ SPI_R, SPI_F, MANCH_R, MANCH_F ]
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+
+      st,adc-channel-clk-src:
+        description: |
+          Conversion clock source.
+          - "CLKIN": external SPI clock (CLKIN x)
+          - "CLKOUT": internal SPI clock (CLKOUT) (default)
+          - "CLKOUT_F": internal SPI clock divided by 2 (falling edge).
+          - "CLKOUT_R": internal SPI clock divided by 2 (rising edge).
+        items:
+          enum: [ CLKIN, CLKOUT, CLKOUT_F, CLKOUT_R ]
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+
+      st,adc-alt-channel:
+        description:
+          Must be defined if two sigma delta modulators are
+          connected on same SPI input.
+          If not set, channel n is connected to SPI input n.
+          If set, channel n is connected to SPI input n + 1.
+        type: boolean
+
+      st,filter0-sync:
+        description:
+          Set to 1 to synchronize with DFSDM filter instance 0.
+          Used for multi microphones synchronization.
+        type: boolean
+
+      dmas:
+        maxItems: 1
+
+      dma-names:
+        items:
+          - const: rx
+
+    required:
+      - compatible
+      - reg
+      - interrupts
+      - st,adc-channels
+      - st,adc-channel-names
+      - st,filter-order
+      - "#io-channel-cells"
+
+    allOf:
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: st,stm32-dfsdm-adc
+
+      - then:
+          properties:
+            st,adc-channels:
+              minItems: 1
+              maxItems: 8
+
+            st,adc-channel-names:
+              minItems: 1
+              maxItems: 8
+
+            st,adc-channel-types:
+              minItems: 1
+              maxItems: 8
+
+            st,adc-channel-clk-src:
+              minItems: 1
+              maxItems: 8
+
+            io-channels:
+              description:
+                From common IIO binding. Used to pipe external sigma delta
+                modulator or internal ADC output to DFSDM channel.
+                This is not required for "st,stm32-dfsdm-pdm" compatibility as
+                PDM microphone is binded in Audio DT node.
+
+          required:
+            - io-channels
+
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: st,stm32-dfsdm-dmic
+
+      - then:
+          properties:
+            st,adc-channels:
+              maxItems: 1
+
+            st,adc-channel-names:
+              maxItems: 1
+
+            st,adc-channel-types:
+              maxItems: 1
+
+            st,adc-channel-clk-src:
+              maxItems: 1
+
+          required:
+            - dmas
+            - dma-names
+
+          patternProperties:
+            "^dfsdm-dai+$":
+              type: object
+              description: child node
+
+              properties:
+                "#sound-dai-cells":
+                  const: 0
+
+                io-channels:
+                  description:
+                    From common IIO binding. Used to pipe external sigma delta
+                    modulator or internal ADC output to DFSDM channel.
+
+              required:
+                - "#sound-dai-cells"
+                - io-channels
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32h7-dfsdm
+
+  - then:
+      patternProperties:
+        "^filter@[0-9]+$":
+          properties:
+            reg:
+              items:
+                minimum: 0
+                maximum: 3
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32mp1-dfsdm
+
+  - then:
+      patternProperties:
+        "^filter@[0-9]+$":
+          properties:
+            reg:
+              items:
+                minimum: 0
+                maximum: 5
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    dfsdm: dfsdm@4400d000 {
+      compatible = "st,stm32mp1-dfsdm";
+      reg = <0x4400d000 0x800>;
+      clocks = <&rcc DFSDM_K>, <&rcc ADFSDM_K>;
+      clock-names = "dfsdm", "audio";
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      dfsdm0: filter@0 {
+        compatible = "st,stm32-dfsdm-dmic";
+        reg = <0>;
+        interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+        dmas = <&dmamux1 101 0x400 0x01>;
+        dma-names = "rx";
+        #io-channel-cells = <1>;
+        st,adc-channels = <1>;
+        st,adc-channel-names = "dmic0";
+        st,adc-channel-types = "SPI_R";
+        st,adc-channel-clk-src = "CLKOUT";
+        st,filter-order = <5>;
+
+        asoc_pdm0: dfsdm-dai {
+          compatible = "st,stm32h7-dfsdm-dai";
+          #sound-dai-cells = <0>;
+          io-channels = <&dfsdm0 0>;
+        };
+      };
+
+      dfsdm_pdm1: filter@1 {
+        compatible = "st,stm32-dfsdm-adc";
+        reg = <1>;
+        interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+        dmas = <&dmamux1 102 0x400 0x01>;
+        dma-names = "rx";
+        #io-channel-cells = <1>;
+        st,adc-channels = <2 3>;
+        st,adc-channel-names = "in2", "in3";
+        st,adc-channel-types = "SPI_R", "SPI_R";
+        st,adc-channel-clk-src = "CLKOUT_F", "CLKOUT_F";
+        io-channels = <&sd_adc2 &sd_adc3>;
+        st,filter-order = <1>;
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml b/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
index 13d005b..a285eab 100644
--- a/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
@@ -2,7 +2,7 @@
 # Copyright 2019 Marcus Folkesson <marcus.folkesson@gmail.com>
 %YAML 1.2
 ---
-$id: "http://devicetree.org/schemas/bindings/iio/dac/lltc,ltc1660.yaml#"
+$id: "http://devicetree.org/schemas/iio/dac/lltc,ltc1660.yaml#"
 $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 
 title: Linear Technology Micropower octal 8-Bit and 10-Bit DACs
diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16480.txt b/Documentation/devicetree/bindings/iio/imu/adi,adis16480.txt
index ed7783f..cd903a1 100644
--- a/Documentation/devicetree/bindings/iio/imu/adi,adis16480.txt
+++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16480.txt
@@ -8,6 +8,7 @@
 	* "adi,adis16480"
 	* "adi,adis16485"
 	* "adi,adis16488"
+	* "adi,adis16490"
 	* "adi,adis16495-1"
 	* "adi,adis16495-2"
 	* "adi,adis16495-3"
diff --git a/Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml b/Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml
new file mode 100644
index 0000000..9f5ca9c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/pressure/asc,dlhl60d.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: All Sensors DLH series low voltage digital pressure sensors
+
+maintainers:
+  - Tomislav Denis <tomislav.denis@avl.com>
+
+description: |
+  Bindings for the All Sensors DLH series pressure sensors.
+
+  Specifications about the sensors can be found at:
+    http://www.allsensors.com/cad/DS-0355_Rev_B.PDF
+
+properties:
+  compatible:
+    enum:
+      - asc,dlhl60d
+      - asc,dlhl60g
+
+  reg:
+    description: I2C device address
+    maxItems: 1
+
+  interrupts:
+    description: interrupt mapping for EOC(data ready) pin
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        pressure@29 {
+            compatible = "asc,dlhl60d";
+            reg = <0x29>;
+            interrupt-parent = <&gpio0>;
+            interrupts = <10 IRQ_TYPE_EDGE_RISING>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml b/Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml
new file mode 100644
index 0000000..a079c99
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/parallax-ping.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Parallax PING))) and LaserPING range finder
+
+maintainers:
+  - Andreas Klinger <ak@it-klinger.de>
+
+description: |
+  Bit-banging driver using one GPIO:
+  - ping-gpios is raised by the driver to start measurement
+  - direction of ping-gpio is then switched into input with an interrupt
+    for receiving distance value as PWM signal
+
+  Specifications about the devices can be found at:
+  http://parallax.com/sites/default/files/downloads/28041-LaserPING-2m-Rangefinder-Guide.pdf
+  http://parallax.com/sites/default/files/downloads/28015-PING-Documentation-v1.6.pdf
+
+properties:
+  compatible:
+    enum:
+      - parallax,ping
+      - parallax,laserping
+
+  ping-gpios:
+    description:
+      Definition of the GPIO for the triggering and echo (output and input)
+      This GPIO is set for about 5 us by the driver to tell the device it
+      should initiate the measurement cycle. Afterwards the GPIO is switched
+      to input direction with an interrupt. The device sets it and the
+      length of the input signal corresponds to the measured distance.
+      It needs to be an GPIO which is able to deliver an interrupt because
+      the time between two interrupts is measured in the driver.
+      See Documentation/devicetree/bindings/gpio/gpio.txt for information
+      on how to specify a consumer gpio.
+    maxItems: 1
+
+required:
+  - compatible
+  - ping-gpios
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    proximity {
+        compatible = "parallax,laserping";
+        ping-gpios = <&gpio0 26 GPIO_ACTIVE_HIGH>;
+    };
diff --git a/Documentation/devicetree/bindings/iio/temperature/maxim_thermocouple.txt b/Documentation/devicetree/bindings/iio/temperature/maxim_thermocouple.txt
index 28bc5c4..bb85cd0 100644
--- a/Documentation/devicetree/bindings/iio/temperature/maxim_thermocouple.txt
+++ b/Documentation/devicetree/bindings/iio/temperature/maxim_thermocouple.txt
@@ -5,7 +5,10 @@
 
 Required properties:
 
-	- compatible: must be "maxim,max31855" or "maxim,max6675"
+	- compatible: must be "maxim,max6675" or one of the following:
+	   "maxim,max31855k", "maxim,max31855j", "maxim,max31855n",
+	   "maxim,max31855s", "maxim,max31855t", "maxim,max31855e",
+	   "maxim,max31855r"; the generic "max,max31855" is deprecated.
 	- reg: SPI chip select number for the device
 	- spi-max-frequency: must be 4300000
 	- spi-cpha: must be defined for max6675 to enable SPI mode 1
@@ -15,7 +18,7 @@
 Example:
 
 	max31855@0 {
-		compatible = "maxim,max31855";
+		compatible = "maxim,max31855k";
 		reg = <0>;
 		spi-max-frequency = <4300000>;
 	};
diff --git a/Documentation/devicetree/bindings/input/gpio-vibrator.yaml b/Documentation/devicetree/bindings/input/gpio-vibrator.yaml
index 903475f..b98bf93 100644
--- a/Documentation/devicetree/bindings/input/gpio-vibrator.yaml
+++ b/Documentation/devicetree/bindings/input/gpio-vibrator.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/bindings/input/gpio-vibrator.yaml#
+$id: http://devicetree.org/schemas/input/gpio-vibrator.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: GPIO vibrator
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
index 0f69500..0e57315 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
@@ -36,6 +36,8 @@
  - pinctrl-0:   a phandle pointing to the pin settings for the
                 control gpios
 
+ - wakeup-source: If present the device will act as wakeup-source
+
  - threshold:   allows setting the "click"-threshold in the range
                 from 0 to 80.
 
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
deleted file mode 100644
index fc03ea4..0000000
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Device tree bindings for Goodix GT9xx series touchscreen controller
-
-Required properties:
-
- - compatible		: Should be "goodix,gt1151"
-				 or "goodix,gt5663"
-				 or "goodix,gt5688"
-				 or "goodix,gt911"
-				 or "goodix,gt9110"
-				 or "goodix,gt912"
-				 or "goodix,gt927"
-				 or "goodix,gt9271"
-				 or "goodix,gt928"
-				 or "goodix,gt967"
- - reg			: I2C address of the chip. Should be 0x5d or 0x14
- - interrupts		: Interrupt to which the chip is connected
-
-Optional properties:
-
- - irq-gpios		: GPIO pin used for IRQ. The driver uses the
-			  interrupt gpio pin as output to reset the device.
- - reset-gpios		: GPIO pin used for reset
- - AVDD28-supply	: Analog power supply regulator on AVDD28 pin
- - VDDIO-supply		: GPIO power supply regulator on VDDIO pin
- - touchscreen-inverted-x
- - touchscreen-inverted-y
- - touchscreen-size-x
- - touchscreen-size-y
- - touchscreen-swapped-x-y
-
-The touchscreen-* properties are documented in touchscreen.txt in this
-directory.
-
-Example:
-
-	i2c@00000000 {
-		/* ... */
-
-		gt928@5d {
-			compatible = "goodix,gt928";
-			reg = <0x5d>;
-			interrupt-parent = <&gpio>;
-			interrupts = <0 0>;
-
-			irq-gpios = <&gpio1 0 0>;
-			reset-gpios = <&gpio1 1 0>;
-		};
-
-		/* ... */
-	};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
new file mode 100644
index 0000000..d7c3262
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/goodix.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Goodix GT9xx series touchscreen controller Bindings
+
+maintainers:
+  - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  compatible:
+    enum:
+      - goodix,gt1151
+      - goodix,gt5663
+      - goodix,gt5688
+      - goodix,gt911
+      - goodix,gt9110
+      - goodix,gt912
+      - goodix,gt927
+      - goodix,gt9271
+      - goodix,gt928
+      - goodix,gt967
+
+  reg:
+    enum: [ 0x5d, 0x14 ]
+
+  interrupts:
+    maxItems: 1
+
+  irq-gpios:
+    description: GPIO pin used for IRQ.
+                 The driver uses the interrupt gpio pin as
+                 output to reset the device.
+    maxItems: 1
+
+  reset-gpios:
+    maxItems: 1
+
+  AVDD28-supply:
+    description: Analog power supply regulator on AVDD28 pin
+
+  VDDIO-supply:
+    description: GPIO power supply regulator on VDDIO pin
+
+  touchscreen-inverted-x: true
+  touchscreen-inverted-y: true
+  touchscreen-size-x: true
+  touchscreen-size-y: true
+  touchscreen-swapped-x-y: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+examples:
+- |
+    i2c@00000000 {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      gt928@5d {
+        compatible = "goodix,gt928";
+        reg = <0x5d>;
+        interrupt-parent = <&gpio>;
+        interrupts = <0 0>;
+        irq-gpios = <&gpio1 0 0>;
+        reset-gpios = <&gpio1 1 0>;
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
index 8641a2d..e1adb90 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
@@ -1,39 +1 @@
-General Touchscreen Properties:
-
-Optional properties for Touchscreens:
- - touchscreen-min-x		: minimum x coordinate reported (0 if not set)
- - touchscreen-min-y		: minimum y coordinate reported (0 if not set)
- - touchscreen-size-x		: horizontal resolution of touchscreen
-				  (maximum x coordinate reported + 1)
- - touchscreen-size-y		: vertical resolution of touchscreen
-				  (maximum y coordinate reported + 1)
- - touchscreen-max-pressure	: maximum reported pressure (arbitrary range
-				  dependent on the controller)
- - touchscreen-min-pressure	: minimum pressure on the touchscreen to be
-				  achieved in order for the touchscreen
-				  driver to report a touch event.
- - touchscreen-fuzz-x		: horizontal noise value of the absolute input
-				  device (in pixels)
- - touchscreen-fuzz-y		: vertical noise value of the absolute input
-				  device (in pixels)
- - touchscreen-fuzz-pressure	: pressure noise value of the absolute input
-				  device (arbitrary range dependent on the
-				  controller)
- - touchscreen-average-samples : Number of data samples which are averaged
-				  for each read (valid values dependent on the
-				  controller)
- - touchscreen-inverted-x	: X axis is inverted (boolean)
- - touchscreen-inverted-y	: Y axis is inverted (boolean)
- - touchscreen-swapped-x-y	: X and Y axis are swapped (boolean)
-				  Swapping is done after inverting the axis
- - touchscreen-x-mm		: horizontal length in mm of the touchscreen
- - touchscreen-y-mm		: vertical length in mm of the touchscreen
-
-Deprecated properties for Touchscreens:
- - x-size			: deprecated name for touchscreen-size-x
- - y-size			: deprecated name for touchscreen-size-y
- - moving-threshold		: deprecated name for a combination of
-				  touchscreen-fuzz-x and touchscreen-fuzz-y
- - contact-threshold		: deprecated name for touchscreen-fuzz-pressure
- - x-invert			: deprecated name for touchscreen-inverted-x
- - y-invert			: deprecated name for touchscreen-inverted-y
+See touchscreen.yaml
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
new file mode 100644
index 0000000..d7dac16
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/touchscreen.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common touchscreen Bindings
+
+maintainers:
+  - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+properties:
+  touchscreen-min-x:
+    description: minimum x coordinate reported
+    $ref: /schemas/types.yaml#/definitions/uint32
+    default: 0
+
+  touchscreen-min-y:
+    description: minimum y coordinate reported
+    $ref: /schemas/types.yaml#/definitions/uint32
+    default: 0
+
+  touchscreen-size-x:
+    description: horizontal resolution of touchscreen (maximum x coordinate reported + 1)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-size-y:
+    description: vertical resolution of touchscreen (maximum y coordinate reported + 1)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-max-pressure:
+    description: maximum reported pressure (arbitrary range dependent on the controller)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-min-pressure:
+    description: minimum pressure on the touchscreen to be achieved in order for the
+                 touchscreen driver to report a touch event.
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-fuzz-x:
+    description: horizontal noise value of the absolute input device (in pixels)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-fuzz-y:
+    description: vertical noise value of the absolute input device (in pixels)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-fuzz-pressure:
+    description: pressure noise value of the absolute input device (arbitrary range
+                 dependent on the controller)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-average-samples:
+    description: Number of data samples which are averaged for each read (valid values
+                 dependent on the controller)
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-inverted-x:
+    description: X axis is inverted
+    type: boolean
+
+  touchscreen-inverted-y:
+    description: Y axis is inverted
+    type: boolean
+
+  touchscreen-swapped-x-y:
+    description: X and Y axis are swapped
+                 Swapping is done after inverting the axis
+    type: boolean
+
+  touchscreen-x-mm:
+    description: horizontal length in mm of the touchscreen
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  touchscreen-y-mm:
+    description: vertical length in mm of the touchscreen
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+dependencies:
+  touchscreen-size-x: [ touchscreen-size-y ]
+  touchscreen-size-y: [ touchscreen-size-x ]
+  touchscreen-x-mm: [ touchscreen-y-mm ]
+  touchscreen-y-mm: [ touchscreen-x-mm ]
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml
new file mode 100644
index 0000000..4107e60
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,msm8916.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm MSM8916 Network-On-Chip interconnect
+
+maintainers:
+  - Georgi Djakov <georgi.djakov@linaro.org>
+
+description: |
+   The Qualcomm MSM8916 interconnect providers support adjusting the
+   bandwidth requirements between the various NoC fabrics.
+
+properties:
+  compatible:
+    enum:
+      - qcom,msm8916-bimc
+      - qcom,msm8916-pcnoc
+      - qcom,msm8916-snoc
+
+  reg:
+    maxItems: 1
+
+  '#interconnect-cells':
+    const: 1
+
+  clock-names:
+    items:
+      - const: bus
+      - const: bus_a
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: Bus A Clock
+
+required:
+  - compatible
+  - reg
+  - '#interconnect-cells'
+  - clock-names
+  - clocks
+
+additionalProperties: false
+
+examples:
+  - |
+      #include <dt-bindings/clock/qcom,rpmcc.h>
+
+      bimc: interconnect@400000 {
+              compatible = "qcom,msm8916-bimc";
+              reg = <0x00400000 0x62000>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a";
+              clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
+                       <&rpmcc RPM_SMD_BIMC_A_CLK>;
+      };
+
+      pcnoc: interconnect@500000 {
+              compatible = "qcom,msm8916-pcnoc";
+              reg = <0x00500000 0x11000>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a";
+              clocks = <&rpmcc RPM_SMD_PCNOC_CLK>,
+                       <&rpmcc RPM_SMD_PCNOC_A_CLK>;
+      };
+
+      snoc: interconnect@580000 {
+              compatible = "qcom,msm8916-snoc";
+              reg = <0x00580000 0x14000>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a";
+              clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
+                       <&rpmcc RPM_SMD_SNOC_A_CLK>;
+      };
diff --git a/Documentation/devicetree/bindings/iommu/iommu.txt b/Documentation/devicetree/bindings/iommu/iommu.txt
index 5a8b462..3c36334 100644
--- a/Documentation/devicetree/bindings/iommu/iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/iommu.txt
@@ -86,6 +86,12 @@
 disable the IOMMU's device tree node in the first place because it would
 prevent any driver from properly setting up the translations.
 
+Optional properties:
+--------------------
+- pasid-num-bits: Some masters support multiple address spaces for DMA, by
+  tagging DMA transactions with an address space identifier. By default,
+  this is 0, which means that the device only has one address space.
+
 
 Notes:
 ======
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 9fa6f97..26d770e 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -1,173 +1 @@
-* Common leds properties.
-
-LED and flash LED devices provide the same basic functionality as current
-regulators, but extended with LED and flash LED specific features like
-blinking patterns, flash timeout, flash faults and external flash strobe mode.
-
-Many LED devices expose more than one current output that can be connected
-to one or more discrete LED component. Since the arrangement of connections
-can influence the way of the LED device initialization, the LED components
-have to be tightly coupled with the LED device binding. They are represented
-by child nodes of the parent LED device binding.
-
-
-Optional properties for child nodes:
-- led-sources : List of device current outputs the LED is connected to. The
-		outputs are identified by the numbers that must be defined
-		in the LED device binding documentation.
-
-- function: LED functon. Use one of the LED_FUNCTION_* prefixed definitions
-	    from the header include/dt-bindings/leds/common.h.
-	    If there is no matching LED_FUNCTION available, add a new one.
-
-- color : Color of the LED. Use one of the LED_COLOR_ID_* prefixed definitions
-	  from the header include/dt-bindings/leds/common.h.
-	  If there is no matching LED_COLOR_ID available, add a new one.
-
-- function-enumerator: Integer to be used when more than one instance
-                       of the same function is needed, differing only with
-		       an ordinal number.
-
-- label : The label for this LED. If omitted, the label is taken from the node
-	  name (excluding the unit address). It has to uniquely identify
-	  a device, i.e. no other LED class device can be assigned the same
-	  label. This property is deprecated - use 'function' and 'color'
-	  properties instead. function-enumerator has no effect when this
-	  property is present.
-
-- default-state : The initial state of the LED. Valid values are "on", "off",
-  and "keep". If the LED is already on or off and the default-state property is
-  set the to same value, then no glitch should be produced where the LED
-  momentarily turns off (or on). The "keep" setting will keep the LED at
-  whatever its current state is, without producing a glitch.  The default is
-  off if this property is not present.
-
-- linux,default-trigger :  This parameter, if present, is a
-    string defining the trigger assigned to the LED.  Current triggers are:
-     "backlight" - LED will act as a back-light, controlled by the framebuffer
-		   system
-     "default-on" - LED will turn on (but for leds-gpio see "default-state"
-		    property in Documentation/devicetree/bindings/leds/leds-gpio.txt)
-     "heartbeat" - LED "double" flashes at a load average based rate
-     "disk-activity" - LED indicates disk activity
-     "ide-disk" - LED indicates IDE disk activity (deprecated),
-                  in new implementations use "disk-activity"
-     "timer" - LED flashes at a fixed, configurable rate
-     "pattern" - LED alters the brightness for the specified duration with one
-                 software timer (requires "led-pattern" property)
-
-- led-pattern : Array of integers with default pattern for certain triggers.
-                Each trigger may parse this property differently:
-                - one-shot : two numbers specifying delay on and delay off (in ms),
-                - timer : two numbers specifying delay on and delay off (in ms),
-                - pattern : the pattern is given by a series of tuples, of
-                  brightness and duration (in ms).  The exact format is
-                  described in:
-                  Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt
-
-
-- led-max-microamp : Maximum LED supply current in microamperes. This property
-                     can be made mandatory for the board configurations
-                     introducing a risk of hardware damage in case an excessive
-                     current is set.
-                     For flash LED controllers with configurable current this
-                     property is mandatory for the LEDs in the non-flash modes
-                     (e.g. torch or indicator).
-
-- panic-indicator : This property specifies that the LED should be used,
-		    if at all possible, as a panic indicator.
-
-- trigger-sources : List of devices which should be used as a source triggering
-		    this LED activity. Some LEDs can be related to a specific
-		    device and should somehow indicate its state. E.g. USB 2.0
-		    LED may react to device(s) in a USB 2.0 port(s).
-		    Another common example is switch or router with multiple
-		    Ethernet ports each of them having its own LED assigned
-		    (assuming they are not hardwired). In such cases this
-		    property should contain phandle(s) of related source
-		    device(s).
-		    In many cases LED can be related to more than one device
-		    (e.g. one USB LED vs. multiple USB ports). Each source
-		    should be represented by a node in the device tree and be
-		    referenced by a phandle and a set of phandle arguments. A
-		    length of arguments should be specified by the
-		    #trigger-source-cells property in the source node.
-
-Required properties for flash LED child nodes:
-- flash-max-microamp : Maximum flash LED supply current in microamperes.
-- flash-max-timeout-us : Maximum timeout in microseconds after which the flash
-                         LED is turned off.
-
-For controllers that have no configurable current the flash-max-microamp
-property can be omitted.
-For controllers that have no configurable timeout the flash-max-timeout-us
-property can be omitted.
-
-* Trigger source providers
-
-Each trigger source should be represented by a device tree node. It may be e.g.
-a USB port or an Ethernet device.
-
-Required properties for trigger source:
-- #trigger-source-cells : Number of cells in a source trigger. Typically 0 for
-			  nodes of simple trigger sources (e.g. a specific USB
-			  port).
-
-* Examples
-
-#include <dt-bindings/leds/common.h>
-
-led-controller@0 {
-	compatible = "gpio-leds";
-
-	led0 {
-		function = LED_FUNCTION_STATUS;
-		linux,default-trigger = "heartbeat";
-		gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
-	};
-
-	led1 {
-		function = LED_FUNCTION_USB;
-		gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
-		trigger-sources = <&ohci_port1>, <&ehci_port1>;
-	};
-};
-
-led-controller@0 {
-	compatible = "maxim,max77693-led";
-
-	led {
-		function = LED_FUNCTION_FLASH;
-		color = <LED_COLOR_ID_WHITE>;
-		led-sources = <0>, <1>;
-		led-max-microamp = <50000>;
-		flash-max-microamp = <320000>;
-		flash-max-timeout-us = <500000>;
-	};
-};
-
-led-controller@30 {
-        compatible = "panasonic,an30259a";
-        reg = <0x30>;
-        #address-cells = <1>;
-        #size-cells = <0>;
-
-        led@1 {
-		reg = <1>;
-		linux,default-trigger = "heartbeat";
-		function = LED_FUNCTION_INDICATOR;
-		function-enumerator = <1>;
-        };
-
-        led@2 {
-		reg = <2>;
-		function = LED_FUNCTION_INDICATOR;
-		function-enumerator = <2>;
-        };
-
-        led@3 {
-		reg = <3>;
-		function = LED_FUNCTION_INDICATOR;
-		function-enumerator = <3>;
-        };
-};
+This file has moved to ./common.yaml.
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
new file mode 100644
index 0000000..d97d099
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -0,0 +1,228 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common leds properties
+
+maintainers:
+  - Jacek Anaszewski <jacek.anaszewski@gmail.com>
+  - Pavel Machek <pavel@ucw.cz>
+
+description:
+  LED and flash LED devices provide the same basic functionality as current
+  regulators, but extended with LED and flash LED specific features like
+  blinking patterns, flash timeout, flash faults and external flash strobe mode.
+
+  Many LED devices expose more than one current output that can be connected
+  to one or more discrete LED component. Since the arrangement of connections
+  can influence the way of the LED device initialization, the LED components
+  have to be tightly coupled with the LED device binding. They are represented
+  by child nodes of the parent LED device binding.
+
+properties:
+  led-sources:
+    description:
+      List of device current outputs the LED is connected to. The outputs are
+      identified by the numbers that must be defined in the LED device binding
+      documentation.
+    $ref: /schemas/types.yaml#definitions/uint32-array
+
+  function:
+    description:
+      LED function. Use one of the LED_FUNCTION_* prefixed definitions
+      from the header include/dt-bindings/leds/common.h. If there is no
+      matching LED_FUNCTION available, add a new one.
+    $ref: /schemas/types.yaml#definitions/string
+
+  color:
+    description:
+      Color of the LED. Use one of the LED_COLOR_ID_* prefixed definitions from
+      the header include/dt-bindings/leds/common.h. If there is no matching
+      LED_COLOR_ID available, add a new one.
+    allOf:
+      - $ref: /schemas/types.yaml#definitions/uint32
+    minimum: 0
+    maximum: 8
+
+  function-enumerator:
+    description:
+      Integer to be used when more than one instance of the same function is
+      needed, differing only with an ordinal number.
+    $ref: /schemas/types.yaml#definitions/uint32
+
+  label:
+    description:
+      The label for this LED. If omitted, the label is taken from the node name
+      (excluding the unit address). It has to uniquely identify a device, i.e.
+      no other LED class device can be assigned the same label. This property is
+      deprecated - use 'function' and 'color' properties instead.
+      function-enumerator has no effect when this property is present.
+
+  default-state:
+    description:
+      The initial state of the LED. If the LED is already on or off and the
+      default-state property is set the to same value, then no glitch should be
+      produced where the LED momentarily turns off (or on). The "keep" setting
+      will keep the LED at whatever its current state is, without producing a
+      glitch.
+    allOf:
+      - $ref: /schemas/types.yaml#definitions/string
+    enum:
+      - on
+      - off
+      - keep
+    default: off
+
+  linux,default-trigger:
+    description:
+      This parameter, if present, is a string defining the trigger assigned to
+      the LED.
+    allOf:
+      - $ref: /schemas/types.yaml#definitions/string
+    enum:
+        # LED will act as a back-light, controlled by the framebuffer system
+      - backlight
+        # LED will turn on (but for leds-gpio see "default-state" property in
+        # Documentation/devicetree/bindings/leds/leds-gpio.txt)
+      - default-on
+        # LED "double" flashes at a load average based rate
+      - heartbeat
+        # LED indicates disk activity
+      - disk-activity
+        # LED indicates IDE disk activity (deprecated), in new implementations
+        # use "disk-activity"
+      - ide-disk
+        # LED flashes at a fixed, configurable rate
+      - timer
+        # LED alters the brightness for the specified duration with one software
+        # timer (requires "led-pattern" property)
+      - pattern
+
+  led-pattern:
+    description: |
+      Array of integers with default pattern for certain triggers.
+
+      Each trigger may parse this property differently:
+        - one-shot : two numbers specifying delay on and delay off (in ms),
+        - timer : two numbers specifying delay on and delay off (in ms),
+        - pattern : the pattern is given by a series of tuples, of
+          brightness and duration (in ms).  The exact format is
+          described in:
+          Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt
+    allOf:
+      - $ref: /schemas/types.yaml#definitions/uint32-matrix
+    items:
+      minItems: 2
+      maxItems: 2
+
+  led-max-microamp:
+    description:
+      Maximum LED supply current in microamperes. This property can be made
+      mandatory for the board configurations introducing a risk of hardware
+      damage in case an excessive current is set.
+      For flash LED controllers with configurable current this property is
+      mandatory for the LEDs in the non-flash modes (e.g. torch or indicator).
+
+  panic-indicator:
+    description:
+      This property specifies that the LED should be used, if at all possible,
+      as a panic indicator.
+    type: boolean
+
+  trigger-sources:
+    description: |
+      List of devices which should be used as a source triggering this LED
+      activity. Some LEDs can be related to a specific device and should somehow
+      indicate its state. E.g. USB 2.0 LED may react to device(s) in a USB 2.0
+      port(s).
+      Another common example is switch or router with multiple Ethernet ports
+      each of them having its own LED assigned (assuming they are not
+      hardwired). In such cases this property should contain phandle(s) of
+      related source device(s).
+      In many cases LED can be related to more than one device (e.g. one USB LED
+      vs. multiple USB ports). Each source should be represented by a node in
+      the device tree and be referenced by a phandle and a set of phandle
+      arguments. A length of arguments should be specified by the
+      #trigger-source-cells property in the source node.
+    $ref: /schemas/types.yaml#definitions/phandle-array
+
+  # Required properties for flash LED child nodes:
+  flash-max-microamp:
+    description:
+      Maximum flash LED supply current in microamperes. Required for flash LED
+      nodes with configurable current.
+
+  flash-max-timeout-us:
+    description:
+      Maximum timeout in microseconds after which the flash LED is turned off.
+      Required for flash LED nodes with configurable timeout.
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/leds/common.h>
+
+    led-controller {
+        compatible = "gpio-leds";
+
+        led0 {
+            function = LED_FUNCTION_STATUS;
+            linux,default-trigger = "heartbeat";
+            gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
+        };
+
+        led1 {
+            function = LED_FUNCTION_USB;
+            gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+            trigger-sources = <&ohci_port1>, <&ehci_port1>;
+        };
+    };
+
+    led-controller@0 {
+        compatible = "maxim,max77693-led";
+        reg = <0 0x100>;
+
+        led {
+            function = LED_FUNCTION_FLASH;
+            color = <LED_COLOR_ID_WHITE>;
+            led-sources = <0>, <1>;
+            led-max-microamp = <50000>;
+            flash-max-microamp = <320000>;
+            flash-max-timeout-us = <500000>;
+        };
+    };
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        led-controller@30 {
+            compatible = "panasonic,an30259a";
+            reg = <0x30>;
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            led@1 {
+                reg = <1>;
+                linux,default-trigger = "heartbeat";
+                function = LED_FUNCTION_INDICATOR;
+                function-enumerator = <1>;
+            };
+
+            led@2 {
+                reg = <2>;
+                function = LED_FUNCTION_INDICATOR;
+                function-enumerator = <2>;
+            };
+
+            led@3 {
+                reg = <3>;
+                function = LED_FUNCTION_INDICATOR;
+                function-enumerator = <3>;
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/irled/spi-ir-led.txt b/Documentation/devicetree/bindings/leds/irled/spi-ir-led.txt
index 21882c8..83ff1b4 100644
--- a/Documentation/devicetree/bindings/leds/irled/spi-ir-led.txt
+++ b/Documentation/devicetree/bindings/leds/irled/spi-ir-led.txt
@@ -8,7 +8,7 @@
 	- compatible: should be "ir-spi-led".
 
 Optional properties:
-	- duty-cycle: 8 bit balue that represents the percentage of one period
+	- duty-cycle: 8 bit value that represents the percentage of one period
 	  in which the signal is active.  It can be 50, 60, 70, 75, 80 or 90.
 	- led-active-low: boolean value that specifies whether the output is
 	  negated with a NOT gate.
diff --git a/Documentation/devicetree/bindings/leds/leds-gpio.txt b/Documentation/devicetree/bindings/leds/leds-gpio.txt
deleted file mode 100644
index d21281b..0000000
--- a/Documentation/devicetree/bindings/leds/leds-gpio.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-LEDs connected to GPIO lines
-
-Required properties:
-- compatible : should be "gpio-leds".
-
-Each LED is represented as a sub-node of the gpio-leds device.  Each
-node's name represents the name of the corresponding LED.
-
-LED sub-node properties:
-- gpios :  Should specify the LED's GPIO, see "gpios property" in
-  Documentation/devicetree/bindings/gpio/gpio.txt.  Active low LEDs should be
-  indicated using flags in the GPIO specifier.
-- function :  (optional)
-  see Documentation/devicetree/bindings/leds/common.txt
-- color :  (optional)
-  see Documentation/devicetree/bindings/leds/common.txt
-- label :  (optional)
-  see Documentation/devicetree/bindings/leds/common.txt (deprecated)
-- linux,default-trigger :  (optional)
-  see Documentation/devicetree/bindings/leds/common.txt
-- default-state:  (optional) The initial state of the LED.
-  see Documentation/devicetree/bindings/leds/common.txt
-- retain-state-suspended: (optional) The suspend state can be retained.Such
-  as charge-led gpio.
-- retain-state-shutdown: (optional) Retain the state of the LED on shutdown.
-  Useful in BMC systems, for example when the BMC is rebooted while the host
-  remains up.
-- panic-indicator : (optional)
-  see Documentation/devicetree/bindings/leds/common.txt
-
-Examples:
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/leds/common.h>
-
-leds {
-	compatible = "gpio-leds";
-	led0 {
-		gpios = <&mcu_pio 0 GPIO_ACTIVE_LOW>;
-		linux,default-trigger = "disk-activity";
-		function = LED_FUNCTION_DISK;
-	};
-
-	led1 {
-		gpios = <&mcu_pio 1 GPIO_ACTIVE_HIGH>;
-		/* Keep LED on if BIOS detected hardware fault */
-		default-state = "keep";
-		function = LED_FUNCTION_FAULT;
-	};
-};
-
-run-control {
-	compatible = "gpio-leds";
-	led0 {
-		gpios = <&mpc8572 6 GPIO_ACTIVE_HIGH>;
-		color = <LED_COLOR_ID_RED>;
-		default-state = "off";
-	};
-	led1 {
-		gpios = <&mpc8572 7 GPIO_ACTIVE_HIGH>;
-		color = <LED_COLOR_ID_GREEN>;
-		default-state = "on";
-	};
-};
-
-leds {
-	compatible = "gpio-leds";
-
-	led0 {
-		gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
-		linux,default-trigger = "max8903-charger-charging";
-		retain-state-suspended;
-		function = LED_FUNCTION_CHARGE;
-	};
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-gpio.yaml b/Documentation/devicetree/bindings/leds/leds-gpio.yaml
new file mode 100644
index 0000000..0e75b18
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-gpio.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LEDs connected to GPIO lines
+
+maintainers:
+  - Jacek Anaszewski <jacek.anaszewski@gmail.com>
+  - Pavel Machek <pavel@ucw.cz>
+
+description:
+  Each LED is represented as a sub-node of the gpio-leds device.  Each
+  node's name represents the name of the corresponding LED.
+
+properties:
+  compatible:
+    const: gpio-leds
+
+patternProperties:
+  # The first form is preferred, but fall back to just 'led' anywhere in the
+  # node name to at least catch some child nodes.
+  "(^led-[0-9a-f]$|led)":
+    type: object
+
+    allOf:
+      - $ref: common.yaml#
+
+    properties:
+      gpios:
+        maxItems: 1
+
+      retain-state-suspended:
+        description:
+          The suspend state can be retained.Such as charge-led gpio.
+        type: boolean
+
+      retain-state-shutdown:
+        description:
+          Retain the state of the LED on shutdown. Useful in BMC systems, for
+          example when the BMC is rebooted while the host remains up.
+        type: boolean
+
+    required:
+      - gpios
+
+additionalProperties: false
+
+examples:
+  - |
+
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/leds/common.h>
+
+    leds {
+        compatible = "gpio-leds";
+        led-0 {
+            gpios = <&mcu_pio 0 GPIO_ACTIVE_LOW>;
+            linux,default-trigger = "disk-activity";
+            function = LED_FUNCTION_DISK;
+        };
+
+        led-1 {
+            gpios = <&mcu_pio 1 GPIO_ACTIVE_HIGH>;
+            /* Keep LED on if BIOS detected hardware fault */
+            default-state = "keep";
+            function = LED_FUNCTION_FAULT;
+        };
+    };
+
+    run-control {
+        compatible = "gpio-leds";
+        led-0 {
+            gpios = <&mpc8572 6 GPIO_ACTIVE_HIGH>;
+            color = <LED_COLOR_ID_RED>;
+            default-state = "off";
+        };
+        led-1 {
+            gpios = <&mpc8572 7 GPIO_ACTIVE_HIGH>;
+            color = <LED_COLOR_ID_GREEN>;
+            default-state = "on";
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
index 4c2d923..501468a 100644
--- a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
+++ b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
@@ -18,6 +18,10 @@
 Optional properties:
 	- enable-gpios : gpio pin to enable/disable the device.
 	- vled-supply : LED supply
+	- ti,ovp-microvolt: Overvoltage protection in
+	    micro-volt, can be 17000000, 21000000, 25000000 or
+	    29000000. If ti,ovp-microvolt is not specified it
+	    defaults to 29000000.
 
 Required child properties:
 	- reg : 0 - Will enable all LED sync paths
@@ -31,6 +35,8 @@
 	- label : see Documentation/devicetree/bindings/leds/common.txt (deprecated)
 	- linux,default-trigger :
 	   see Documentation/devicetree/bindings/leds/common.txt
+	- led-max-microamp :
+	   see Documentation/devicetree/bindings/leds/common.txt
 
 Example:
 
@@ -44,12 +50,14 @@
 
 	enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
 	vled-supply = <&vbatt>;
+	ti,ovp-microvolt = <29000000>;
 
 	led@0 {
 		reg = <0>;
 		function = LED_FUNCTION_BACKLIGHT;
 		color = <LED_COLOR_ID_WHITE>;
 		linux,default-trigger = "backlight";
+		led-max-microamp = <20000>;
 	};
 }
 
diff --git a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
new file mode 100644
index 0000000..b50f4bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/rohm,bd71828-leds.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD71828 Power Management Integrated Circuit LED driver
+
+maintainers:
+  - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+description: |
+  This module is part of the ROHM BD71828 MFD device. For more details
+  see Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml.
+
+  The LED controller is represented as a sub-node of the PMIC node on the device
+  tree.
+
+  The device has two LED outputs referred as GRNLED and AMBLED in data-sheet.
+
+select: false
+
+properties:
+  compatible:
+    const: rohm,bd71828-leds
+
+patternProperties:
+  "^led-[1-2]$":
+    type: object
+    description:
+      Properties for a single LED.
+    properties:
+      #allOf:
+        #- $ref: "common.yaml#"
+      rohm,led-compatible:
+        description: LED identification string
+        allOf:
+          - $ref: "/schemas/types.yaml#/definitions/string"
+          - enum:
+            - bd71828-ambled
+            - bd71828-grnled
+      function:
+        description:
+          Purpose of LED as defined in dt-bindings/leds/common.h
+        $ref: "/schemas/types.yaml#/definitions/string"
+      color:
+        description:
+          LED colour as defined in dt-bindings/leds/common.h
+        $ref: "/schemas/types.yaml#/definitions/uint32"
+
+required:
+  - compatible
diff --git a/Documentation/devicetree/bindings/leds/trigger-source.yaml b/Documentation/devicetree/bindings/leds/trigger-source.yaml
new file mode 100644
index 0000000..0618003
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/trigger-source.yaml
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/trigger-source.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Trigger source providers
+
+maintainers:
+  - Jacek Anaszewski <jacek.anaszewski@gmail.com>
+  - Pavel Machek <pavel@ucw.cz>
+
+description:
+  Each trigger source provider should be represented by a device tree node. It
+  may be e.g. a USB port or an Ethernet device.
+
+properties:
+  '#trigger-source-cells':
+    description:
+      Number of cells in a source trigger. Typically 0 for nodes of simple
+      trigger sources (e.g. a specific USB port).
+    enum: [ 0, 1 ]
+
+...
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
index 0278482..beec612 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
@@ -21,10 +21,11 @@
 	Usage: required
 	Value type: <prop-encoded-array>
 	Definition: must specify the base address and size of the global block
+
 - clocks:
-	Usage: required if #clocks-cells property is present
-	Value type: <phandle>
-	Definition: phandle to the input PLL, which feeds the APCS mux/divider
+	Usage: required if #clock-names property is present
+	Value type: <phandle array>
+	Definition: phandles to the two parent clocks of the clock driver.
 
 - #mbox-cells:
 	Usage: required
@@ -36,6 +37,12 @@
 	Value type: <u32>
 	Definition: as described in clock.txt, must be 0
 
+- clock-names:
+	Usage: required if the platform data based clock driver needs to
+	retrieve the parent clock names from device tree.
+	This will requires two mandatory clocks to be defined.
+	Value type: <string-array>
+	Definition: must be "pll" and "aux"
 
 = EXAMPLE
 The following example describes the APCS HMSS found in MSM8996 and part of the
@@ -68,3 +75,14 @@
 		clocks = <&a53pll>;
 		#clock-cells = <0>;
 	};
+
+Below is another example of the APCS binding on QCS404 platforms:
+
+	apcs_glb: mailbox@b011000 {
+		compatible = "qcom,qcs404-apcs-apps-global", "syscon";
+		reg = <0x0b011000 0x1000>;
+		#mbox-cells = <1>;
+		clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
+		clock-names = "pll", "aux";
+		#clock-cells = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
index 0f6374c..9af873b 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
@@ -16,7 +16,15 @@
 
 properties:
   compatible:
-    const: allwinner,sun7i-a20-csi0
+    oneOf:
+      - const: allwinner,sun4i-a10-csi1
+      - const: allwinner,sun7i-a20-csi0
+      - items:
+        - const: allwinner,sun7i-a20-csi1
+        - const: allwinner,sun4i-a10-csi1
+      - items:
+        - const: allwinner,sun8i-r40-csi0
+        - const: allwinner,sun7i-a20-csi0
 
   reg:
     maxItems: 1
@@ -25,12 +33,16 @@
     maxItems: 1
 
   clocks:
+    minItems: 2
+    maxItems: 3
     items:
       - description: The CSI interface clock
       - description: The CSI ISP clock
       - description: The CSI DRAM clock
 
   clock-names:
+    minItems: 2
+    maxItems: 3
     items:
       - const: bus
       - const: isp
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml
new file mode 100644
index 0000000..526593c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/allwinner,sun4i-a10-video-engine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Video Engine Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-video-engine
+      - allwinner,sun5i-a13-video-engine
+      - allwinner,sun7i-a20-video-engine
+      - allwinner,sun8i-a33-video-engine
+      - allwinner,sun8i-h3-video-engine
+      - allwinner,sun50i-a64-video-engine
+      - allwinner,sun50i-h5-video-engine
+      - allwinner,sun50i-h6-video-engine
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: Module Clock
+      - description: RAM Clock
+
+  clock-names:
+    items:
+      - const: ahb
+      - const: mod
+      - const: ram
+
+  resets:
+    maxItems: 1
+
+  allwinner,sram:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description: Phandle to the device SRAM
+
+  memory-region:
+    description:
+      CMA pool to use for buffers allocation instead of the default
+      CMA pool.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+  - allwinner,sram
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/sun7i-a20-ccu.h>
+    #include <dt-bindings/reset/sun4i-a10-ccu.h>
+
+    video-codec@1c0e000 {
+        compatible = "allwinner,sun7i-a20-video-engine";
+        reg = <0x01c0e000 0x1000>;
+        interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_AHB_VE>, <&ccu CLK_VE>,
+                 <&ccu CLK_DRAM_VE>;
+        clock-names = "ahb", "mod", "ram";
+        resets = <&ccu RST_VE>;
+        allwinner,sram = <&ve_sram 1>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml b/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
new file mode 100644
index 0000000..1fd9b55
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/allwinner,sun6i-a31-csi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 CMOS Sensor Interface (CSI) Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  compatible:
+    enum:
+      - allwinner,sun6i-a31-csi
+      - allwinner,sun8i-a83t-csi
+      - allwinner,sun8i-h3-csi
+      - allwinner,sun8i-v3s-csi
+      - allwinner,sun50i-a64-csi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: Module Clock
+      - description: DRAM Clock
+
+  clock-names:
+    items:
+      - const: bus
+      - const: mod
+      - const: ram
+
+  resets:
+    maxItems: 1
+
+  # See ./video-interfaces.txt for details
+  port:
+    type: object
+
+    properties:
+      endpoint:
+        type: object
+
+        properties:
+          remote-endpoint: true
+
+          bus-width:
+            enum: [ 8, 10, 12, 16 ]
+
+          pclk-sample: true
+          hsync-active: true
+          vsync-active: true
+
+        required:
+          - bus-width
+          - remote-endpoint
+
+    required:
+      - endpoint
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/sun8i-v3s-ccu.h>
+    #include <dt-bindings/reset/sun8i-v3s-ccu.h>
+
+    csi1: csi@1cb4000 {
+        compatible = "allwinner,sun8i-v3s-csi";
+        reg = <0x01cb4000 0x1000>;
+        interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&ccu CLK_BUS_CSI>,
+                 <&ccu CLK_CSI1_SCLK>,
+                 <&ccu CLK_DRAM_CSI>;
+        clock-names = "bus",
+                      "mod",
+                      "ram";
+        resets = <&ccu RST_BUS_CSI>;
+
+        port {
+            /* Parallel bus endpoint */
+            csi1_ep: endpoint {
+                remote-endpoint = <&adv7611_ep>;
+                bus-width = <16>;
+
+                /*
+                 * If hsync-active/vsync-active are missing,
+                 * embedded BT.656 sync is used.
+                 */
+                 hsync-active = <0>; /* Active low */
+                 vsync-active = <0>; /* Active low */
+                 pclk-sample = <1>;  /* Rising */
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
new file mode 100644
index 0000000..335717e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/media/amlogic,gx-vdec.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Video Decoder
+
+maintainers:
+  - Neil Armstrong <narmstrong@baylibre.com>
+  - Maxime Jourdan <mjourdan@baylibre.com>
+
+description: |
+  The video decoding IP lies within the DOS memory region,
+  except for the hardware bitstream parser that makes use of an undocumented
+  region.
+
+  It makes use of the following blocks:
+  - ESPARSER is a bitstream parser that outputs to a VIFIFO. Further VDEC blocks
+    then feed from this VIFIFO.
+  - VDEC_1 can decode MPEG-1, MPEG-2, MPEG-4 part 2, MJPEG, H.263, H.264, VC-1.
+  - VDEC_HEVC can decode HEVC and VP9.
+
+  Both VDEC_1 and VDEC_HEVC share the "vdec" IRQ and as such cannot run
+  concurrently.
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+        - enum:
+          - amlogic,gxbb-vdec # GXBB (S905)
+          - amlogic,gxl-vdec # GXL (S905X, S905D)
+          - amlogic,gxm-vdec # GXM (S912)
+        - const: amlogic,gx-vdec
+      - enum:
+        - amlogic,g12a-vdec # G12A (S905X2, S905D2)
+        - amlogic,sm1-vdec # SM1 (S905X3, S905D3)
+
+  interrupts:
+    minItems: 2
+
+  interrupt-names:
+    items:
+      - const: vdec
+      - const: esparser
+
+  reg:
+    minItems: 2
+
+  reg-names:
+    items:
+      - const: dos
+      - const: esparser
+
+  resets:
+    maxItems: 1
+
+  reset-names:
+    items:
+      - const: esparser
+
+  clocks:
+    minItems: 4
+    maxItems: 5
+
+  clock-names:
+    minItems: 4
+    maxItems: 5
+    items:
+      - const: dos_parser
+      - const: dos
+      - const: vdec_1
+      - const: vdec_hevc
+      - const: vdec_hevcf
+
+  amlogic,ao-sysctrl:
+    description: should point to the AOBUS sysctrl node
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/phandle
+
+  amlogic,canvas:
+    description: should point to a canvas provider node
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/phandle
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - amlogic,gx-vdec
+
+    then:
+      properties:
+        clock-names:
+          maxItems: 4
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - amlogic,g12a-vdec
+              - amlogic,sm1-vdec
+
+    then:
+      properties:
+        clock-names:
+          minItems: 5
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - interrupts
+  - interrupt-names
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+  - amlogic,ao-sysctrl
+  - amlogic,canvas
+
+examples:
+  - |
+    vdec: video-decoder@c8820000 {
+          compatible = "amlogic,gxl-vdec", "amlogic,gx-vdec";
+          reg = <0xc8820000 0x10000>, <0xc110a580 0xe4>;
+          reg-names = "dos", "esparser";
+          interrupts = <44>, <32>;
+          interrupt-names = "vdec", "esparser";
+          clocks = <&clk_dos_parser> ,<&clk_dos>, <&clk_vdec_1>, <&clk_vdec_hevc>;
+          clock-names = "dos_parser", "dos", "vdec_1", "vdec_hevc";
+          resets = <&reset_parser>;
+          reset-names = "esparser";
+          amlogic,ao-sysctrl = <&sysctrl_AO>;
+          amlogic,canvas = <&canvas>;
+    };
diff --git a/Documentation/devicetree/bindings/media/amlogic,vdec.txt b/Documentation/devicetree/bindings/media/amlogic,vdec.txt
deleted file mode 100644
index 9b6aace..0000000
--- a/Documentation/devicetree/bindings/media/amlogic,vdec.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-Amlogic Video Decoder
-================================
-
-The video decoding IP lies within the DOS memory region,
-except for the hardware bitstream parser that makes use of an undocumented
-region.
-
-It makes use of the following blocks:
-
-- ESPARSER is a bitstream parser that outputs to a VIFIFO. Further VDEC blocks
-then feed from this VIFIFO.
-- VDEC_1 can decode MPEG-1, MPEG-2, MPEG-4 part 2, MJPEG, H.263, H.264, VC-1.
-- VDEC_HEVC can decode HEVC and VP9.
-
-Both VDEC_1 and VDEC_HEVC share the "vdec" IRQ and as such cannot run
-concurrently.
-
-Device Tree Bindings:
----------------------
-
-VDEC: Video Decoder
---------------------------
-
-Required properties:
-- compatible: value should be different for each SoC family as :
-	- GXBB (S905) : "amlogic,gxbb-vdec"
-	- GXL (S905X, S905D) : "amlogic,gxl-vdec"
-	- GXM (S912) : "amlogic,gxm-vdec"
-	followed by the common "amlogic,gx-vdec"
-- reg: base address and size of he following memory-mapped regions :
-	- dos
-	- esparser
-- reg-names: should contain the names of the previous memory regions
-- interrupts: should contain the following IRQs:
-	- vdec
-	- esparser
-- interrupt-names: should contain the names of the previous interrupts
-- amlogic,ao-sysctrl: should point to the AOBUS sysctrl node
-- amlogic,canvas: should point to a canvas provider node
-- clocks: should contain the following clocks :
-	- dos_parser
-	- dos
-	- vdec_1
-	- vdec_hevc
-- clock-names: should contain the names of the previous clocks
-- resets: should contain the parser reset
-- reset-names: should be "esparser"
-
-Example:
-
-vdec: video-codec@c8820000 {
-	compatible = "amlogic,gxbb-vdec", "amlogic,gx-vdec";
-	reg = <0x0 0xc8820000 0x0 0x10000>,
-	      <0x0 0xc110a580 0x0 0xe4>;
-	reg-names = "dos", "esparser";
-
-	interrupts = <GIC_SPI 44 IRQ_TYPE_EDGE_RISING>,
-		     <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>;
-	interrupt-names = "vdec", "esparser";
-
-	amlogic,ao-sysctrl = <&sysctrl_AO>;
-	amlogic,canvas = <&canvas>;
-
-	clocks = <&clkc CLKID_DOS_PARSER>,
-		 <&clkc CLKID_DOS>,
-		 <&clkc CLKID_VDEC_1>,
-		 <&clkc CLKID_VDEC_HEVC>;
-	clock-names = "dos_parser", "dos", "vdec_1", "vdec_hevc";
-
-	resets = <&reset RESET_PARSER>;
-	reset-names = "esparser";
-};
diff --git a/Documentation/devicetree/bindings/media/atmel-isi.txt b/Documentation/devicetree/bindings/media/atmel-isi.txt
index 332513a..8924c75 100644
--- a/Documentation/devicetree/bindings/media/atmel-isi.txt
+++ b/Documentation/devicetree/bindings/media/atmel-isi.txt
@@ -2,7 +2,7 @@
 ----------------------------------
 
 Required properties for ISI:
-- compatible: must be "atmel,at91sam9g45-isi".
+- compatible: must be "atmel,at91sam9g45-isi" or "microchip,sam9x60-isi".
 - reg: physical base address and length of the registers set for the device.
 - interrupts: should contain IRQ line for the ISI.
 - clocks: list of clock specifiers, corresponding to entries in the clock-names
diff --git a/Documentation/devicetree/bindings/media/cedrus.txt b/Documentation/devicetree/bindings/media/cedrus.txt
deleted file mode 100644
index 20c82fb..0000000
--- a/Documentation/devicetree/bindings/media/cedrus.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Device-tree bindings for the VPU found in Allwinner SoCs, referred to as the
-Video Engine (VE) in Allwinner literature.
-
-The VPU can only access the first 256 MiB of DRAM, that are DMA-mapped starting
-from the DRAM base. This requires specific memory allocation and handling.
-
-Required properties:
-- compatible		: must be one of the following compatibles:
-			- "allwinner,sun4i-a10-video-engine"
-			- "allwinner,sun5i-a13-video-engine"
-			- "allwinner,sun7i-a20-video-engine"
-			- "allwinner,sun8i-a33-video-engine"
-			- "allwinner,sun8i-h3-video-engine"
-			- "allwinner,sun50i-a64-video-engine"
-			- "allwinner,sun50i-h5-video-engine"
-			- "allwinner,sun50i-h6-video-engine"
-- reg			: register base and length of VE;
-- clocks		: list of clock specifiers, corresponding to entries in
-			  the clock-names property;
-- clock-names		: should contain "ahb", "mod" and "ram" entries;
-- resets		: phandle for reset;
-- interrupts		: VE interrupt number;
-- allwinner,sram	: SRAM region to use with the VE.
-
-Optional properties:
-- memory-region		: CMA pool to use for buffers allocation instead of the
-			  default CMA pool.
-
-Example:
-
-reserved-memory {
-	#address-cells = <1>;
-	#size-cells = <1>;
-	ranges;
-
-	/* Address must be kept in the lower 256 MiBs of DRAM for VE. */
-	cma_pool: default-pool {
-		compatible = "shared-dma-pool";
-		size = <0x6000000>;
-		alloc-ranges = <0x4a000000 0x6000000>;
-		reusable;
-		linux,cma-default;
-	};
-};
-
-video-codec@1c0e000 {
-	compatible = "allwinner,sun7i-a20-video-engine";
-	reg = <0x01c0e000 0x1000>;
-
-	clocks = <&ccu CLK_AHB_VE>, <&ccu CLK_VE>,
-		 <&ccu CLK_DRAM_VE>;
-	clock-names = "ahb", "mod", "ram";
-
-	resets = <&ccu RST_VE>;
-	interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
-	allwinner,sram = <&ve_sram 1>;
-};
diff --git a/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt b/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
index 38941db..ce9a226 100644
--- a/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
+++ b/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
@@ -1,4 +1,4 @@
-Samsung S5P/EXYNOS SoC series JPEG codec
+Samsung S5P/Exynos SoC series JPEG codec
 
 Required properties:
 
diff --git a/Documentation/devicetree/bindings/media/exynos5-gsc.txt b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
index bc963a6..1872688 100644
--- a/Documentation/devicetree/bindings/media/exynos5-gsc.txt
+++ b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
@@ -1,6 +1,6 @@
 * Samsung Exynos5 G-Scaler device
 
-G-Scaler is used for scaling and color space conversion on EXYNOS5 SoCs.
+G-Scaler is used for scaling and color space conversion on Exynos5 SoCs.
 
 Required properties:
 - compatible: should be one of
diff --git a/Documentation/devicetree/bindings/media/fsl-pxp.txt b/Documentation/devicetree/bindings/media/fsl-pxp.txt
index 2477e7f..f8090e0 100644
--- a/Documentation/devicetree/bindings/media/fsl-pxp.txt
+++ b/Documentation/devicetree/bindings/media/fsl-pxp.txt
@@ -8,7 +8,7 @@
 
 Required properties:
 - compatible: should be "fsl,<soc>-pxp", where SoC can be one of imx23, imx28,
-  imx6dl, imx6sl, imx6ul, imx6sx, imx6ull, or imx7d.
+  imx6dl, imx6sl, imx6sll, imx6ul, imx6sx, imx6ull, or imx7d.
 - reg: the register base and size for the device registers
 - interrupts: the PXP interrupt, two interrupts for imx6ull and imx7d.
 - clock-names: should be "axi"
diff --git a/Documentation/devicetree/bindings/media/hix5hd2-ir.txt b/Documentation/devicetree/bindings/media/hix5hd2-ir.txt
index 13ebc0f..ca4cf77 100644
--- a/Documentation/devicetree/bindings/media/hix5hd2-ir.txt
+++ b/Documentation/devicetree/bindings/media/hix5hd2-ir.txt
@@ -1,7 +1,8 @@
 Device-Tree bindings for hix5hd2 ir IP
 
 Required properties:
-	- compatible: Should contain "hisilicon,hix5hd2-ir".
+	- compatible: Should contain "hisilicon,hix5hd2-ir", or:
+		- "hisilicon,hi3796cv300-ir" for Hi3796CV300 IR device.
 	- reg: Base physical address of the controller and length of memory
 	  mapped region.
 	- interrupts: interrupt-specifier for the sole interrupt generated by
diff --git a/Documentation/devicetree/bindings/media/rc.yaml b/Documentation/devicetree/bindings/media/rc.yaml
index d113807..a64ee03 100644
--- a/Documentation/devicetree/bindings/media/rc.yaml
+++ b/Documentation/devicetree/bindings/media/rc.yaml
@@ -123,6 +123,7 @@
           - rc-su3000
           - rc-tango
           - rc-tanix-tx3mini
+          - rc-tanix-tx5max
           - rc-tbs-nec
           - rc-technisat-ts35
           - rc-technisat-usb2
diff --git a/Documentation/devicetree/bindings/media/renesas,ceu.txt b/Documentation/devicetree/bindings/media/renesas,ceu.txt
deleted file mode 100644
index 3e2a265..0000000
--- a/Documentation/devicetree/bindings/media/renesas,ceu.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-Renesas Capture Engine Unit (CEU)
-----------------------------------------------
-
-The Capture Engine Unit is the image capture interface found in the Renesas
-SH Mobile, R-Mobile and RZ SoCs.
-
-The interface supports a single parallel input with data bus width of 8 or 16
-bits.
-
-Required properties:
-- compatible: Shall be one of the following values:
-	"renesas,r7s72100-ceu" for CEU units found in RZ/A1H and RZ/A1M SoCs
-	"renesas,r8a7740-ceu" for CEU units found in R-Mobile A1 R8A7740 SoCs
-- reg: Registers address base and size.
-- interrupts: The interrupt specifier.
-
-The CEU supports a single parallel input and should contain a single 'port'
-subnode with a single 'endpoint'. Connection to input devices are modeled
-according to the video interfaces OF bindings specified in:
-[1] Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Optional endpoint properties applicable to parallel input bus described in
-the above mentioned "video-interfaces.txt" file are supported.
-
-- hsync-active: See [1] for description. If property is not present,
-  default is active high.
-- vsync-active: See [1] for description. If property is not present,
-  default is active high.
-- bus-width: See [1] for description. Accepted values are '8' and '16'.
-  If property is not present, default is '8'.
-- field-even-active: See [1] for description. If property is not present,
-  an even field is identified by a logic 0 (active-low signal).
-
-Example:
-
-The example describes the connection between the Capture Engine Unit and an
-OV7670 image sensor connected to i2c1 interface.
-
-ceu: ceu@e8210000 {
-	reg = <0xe8210000 0x209c>;
-	compatible = "renesas,r7s72100-ceu";
-	interrupts = <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&vio_pins>;
-
-	status = "okay";
-
-	port {
-		ceu_in: endpoint {
-			remote-endpoint = <&ov7670_out>;
-
-			hsync-active = <1>;
-			vsync-active = <0>;
-		};
-	};
-};
-
-i2c1: i2c@fcfee400 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&i2c1_pins>;
-
-	status = "okay";
-
-	clock-frequency = <100000>;
-
-	ov7670: camera@21 {
-		compatible = "ovti,ov7670";
-		reg = <0x21>;
-
-		pinctrl-names = "default";
-		pinctrl-0 = <&vio_pins>;
-
-		reset-gpios = <&port3 11 GPIO_ACTIVE_LOW>;
-		powerdown-gpios = <&port3 12 GPIO_ACTIVE_HIGH>;
-
-		port {
-			ov7670_out: endpoint {
-				remote-endpoint = <&ceu_in>;
-
-				hsync-active = <1>;
-				vsync-active = <0>;
-			};
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/media/renesas,ceu.yaml b/Documentation/devicetree/bindings/media/renesas,ceu.yaml
new file mode 100644
index 0000000..8e9251a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/renesas,ceu.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/renesas,ceu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Capture Engine Unit (CEU) Bindings
+
+maintainers:
+  - Jacopo Mondi <jacopo+renesas@jmondi.org>
+  - linux-renesas-soc@vger.kernel.org
+
+description: |+
+  The Capture Engine Unit is the image capture interface found in the Renesas SH
+  Mobile, R-Mobile and RZ SoCs. The interface supports a single parallel input
+  with data bus width of 8 or 16 bits.
+
+properties:
+  compatible:
+    enum:
+      - renesas,r7s72100-ceu
+      - renesas,r8a7740-ceu
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  port:
+    type: object
+    additionalProperties: false
+
+    properties:
+       endpoint:
+         type: object
+         additionalProperties: false
+
+         # Properties described in
+         # Documentation/devicetree/bindings/media/video-interfaces.txt
+         properties:
+           remote-endpoint: true
+           hsync-active: true
+           vsync-active: true
+           field-even-active: false
+           bus-width:
+             enum: [8, 16]
+             default: 8
+
+         required:
+           - remote-endpoint
+
+    required:
+      - endpoint
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - port
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    ceu: ceu@e8210000 {
+        reg = <0xe8210000 0x209c>;
+        compatible = "renesas,r7s72100-ceu";
+        interrupts = <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+
+        port {
+            ceu_in: endpoint {
+                remote-endpoint = <&ov7670_out>;
+                hsync-active = <1>;
+                vsync-active = <0>;
+            };
+        };
+    };
diff --git a/Documentation/devicetree/bindings/media/renesas,csi2.txt b/Documentation/devicetree/bindings/media/renesas,csi2.txt
deleted file mode 100644
index 2da6f60..0000000
--- a/Documentation/devicetree/bindings/media/renesas,csi2.txt
+++ /dev/null
@@ -1,107 +0,0 @@
-Renesas R-Car MIPI CSI-2
-------------------------
-
-The R-Car CSI-2 receiver device provides MIPI CSI-2 capabilities for the
-Renesas R-Car and RZ/G2 family of devices. It is used in conjunction with the
-R-Car VIN module, which provides the video capture capabilities.
-
-Mandatory properties
---------------------
- - compatible: Must be one or more of the following
-   - "renesas,r8a774a1-csi2" for the R8A774A1 device.
-   - "renesas,r8a774b1-csi2" for the R8A774B1 device.
-   - "renesas,r8a774c0-csi2" for the R8A774C0 device.
-   - "renesas,r8a7795-csi2" for the R8A7795 device.
-   - "renesas,r8a7796-csi2" for the R8A7796 device.
-   - "renesas,r8a77965-csi2" for the R8A77965 device.
-   - "renesas,r8a77970-csi2" for the R8A77970 device.
-   - "renesas,r8a77980-csi2" for the R8A77980 device.
-   - "renesas,r8a77990-csi2" for the R8A77990 device.
-
- - reg: the register base and size for the device registers
- - interrupts: the interrupt for the device
- - clocks: A phandle + clock specifier for the module clock
- - resets: A phandle + reset specifier for the module reset
-
-The device node shall contain two 'port' child nodes according to the
-bindings defined in Documentation/devicetree/bindings/media/
-video-interfaces.txt. port@0 shall connect to the CSI-2 source. port@1
-shall connect to all the R-Car VIN modules that have a hardware
-connection to the CSI-2 receiver.
-
-- port@0- Video source (mandatory)
-	- endpoint@0 - sub-node describing the endpoint that is the video source
-
-- port@1 - VIN instances (optional)
-	- One endpoint sub-node for every R-Car VIN instance which is connected
-	  to the R-Car CSI-2 receiver.
-
-Example:
-
-	csi20: csi2@fea80000 {
-		compatible = "renesas,r8a7796-csi2";
-		reg = <0 0xfea80000 0 0x10000>;
-		interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&cpg CPG_MOD 714>;
-		power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-		resets = <&cpg 714>;
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				reg = <0>;
-
-				csi20_in: endpoint@0 {
-					reg = <0>;
-					clock-lanes = <0>;
-					data-lanes = <1>;
-					remote-endpoint = <&adv7482_txb>;
-				};
-			};
-
-			port@1 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				reg = <1>;
-
-				csi20vin0: endpoint@0 {
-					reg = <0>;
-					remote-endpoint = <&vin0csi20>;
-				};
-				csi20vin1: endpoint@1 {
-					reg = <1>;
-					remote-endpoint = <&vin1csi20>;
-				};
-				csi20vin2: endpoint@2 {
-					reg = <2>;
-					remote-endpoint = <&vin2csi20>;
-				};
-				csi20vin3: endpoint@3 {
-					reg = <3>;
-					remote-endpoint = <&vin3csi20>;
-				};
-				csi20vin4: endpoint@4 {
-					reg = <4>;
-					remote-endpoint = <&vin4csi20>;
-				};
-				csi20vin5: endpoint@5 {
-					reg = <5>;
-					remote-endpoint = <&vin5csi20>;
-				};
-				csi20vin6: endpoint@6 {
-					reg = <6>;
-					remote-endpoint = <&vin6csi20>;
-				};
-				csi20vin7: endpoint@7 {
-					reg = <7>;
-					remote-endpoint = <&vin7csi20>;
-				};
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/media/renesas,csi2.yaml b/Documentation/devicetree/bindings/media/renesas,csi2.yaml
new file mode 100644
index 0000000..408442a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/renesas,csi2.yaml
@@ -0,0 +1,198 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2020 Renesas Electronics Corp.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/renesas,csi2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car MIPI CSI-2 receiver
+
+maintainers:
+  - Niklas Söderlund <niklas.soderlund@ragnatech.se>
+
+description:
+  The R-Car CSI-2 receiver device provides MIPI CSI-2 capabilities for the
+  Renesas R-Car and RZ/G2 family of devices. It is used in conjunction with the
+  R-Car VIN module, which provides the video capture capabilities.
+
+properties:
+  compatible:
+    items:
+      - enum:
+        - renesas,r8a774a1-csi2 # RZ/G2M
+        - renesas,r8a774b1-csi2 # RZ/G2N
+        - renesas,r8a774c0-csi2 # RZ/G2E
+        - renesas,r8a7795-csi2  # R-Car H3
+        - renesas,r8a7796-csi2  # R-Car M3-W
+        - renesas,r8a77965-csi2 # R-Car M3-N
+        - renesas,r8a77970-csi2 # R-Car V3M
+        - renesas,r8a77980-csi2 # R-Car V3H
+        - renesas,r8a77990-csi2 # R-Car E3
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  ports:
+    type: object
+    description:
+      A node containing input and output port nodes with endpoint definitions
+      as documented in
+      Documentation/devicetree/bindings/media/video-interfaces.txt
+
+    properties:
+      port@0:
+        type: object
+        description:
+          Input port node, single endpoint describing the CSI-2 transmitter.
+
+        properties:
+          reg:
+            const: 0
+
+          endpoint:
+            type: object
+
+            properties:
+              clock-lanes:
+                maxItems: 1
+
+              data-lanes:
+                maxItems: 1
+
+              remote-endpoint: true
+
+            required:
+              - clock-lanes
+              - data-lanes
+              - remote-endpoint
+
+            additionalProperties: false
+
+        additionalProperties: false
+
+      port@1:
+        type: object
+        description:
+          Output port node, multiple endpoints describing all the R-Car VIN
+          modules connected the CSI-2 receiver.
+
+        properties:
+          '#address-cells':
+            const: 1
+
+          '#size-cells':
+            const: 0
+
+          reg:
+            const: 1
+
+        patternProperties:
+          "^endpoint@[0-9a-f]$":
+            type: object
+
+            properties:
+              reg:
+                maxItems: 1
+
+              remote-endpoint: true
+
+            required:
+              - reg
+              - remote-endpoint
+
+            additionalProperties: false
+
+        additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - power-domains
+  - resets
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/r8a7796-cpg-mssr.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/power/r8a7796-sysc.h>
+
+    csi20: csi2@fea80000 {
+            compatible = "renesas,r8a7796-csi2";
+            reg = <0 0xfea80000 0 0x10000>;
+            interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+            clocks = <&cpg CPG_MOD 714>;
+            power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+            resets = <&cpg 714>;
+
+            ports {
+                    #address-cells = <1>;
+                    #size-cells = <0>;
+
+                    port@0 {
+                            reg = <0>;
+
+                            csi20_in: endpoint {
+                                    clock-lanes = <0>;
+                                    data-lanes = <1>;
+                                    remote-endpoint = <&adv7482_txb>;
+                            };
+                    };
+
+                    port@1 {
+                            #address-cells = <1>;
+                            #size-cells = <0>;
+
+                            reg = <1>;
+
+                            csi20vin0: endpoint@0 {
+                                    reg = <0>;
+                                    remote-endpoint = <&vin0csi20>;
+                            };
+                            csi20vin1: endpoint@1 {
+                                    reg = <1>;
+                                    remote-endpoint = <&vin1csi20>;
+                            };
+                            csi20vin2: endpoint@2 {
+                                    reg = <2>;
+                                    remote-endpoint = <&vin2csi20>;
+                            };
+                            csi20vin3: endpoint@3 {
+                                    reg = <3>;
+                                    remote-endpoint = <&vin3csi20>;
+                            };
+                            csi20vin4: endpoint@4 {
+                                    reg = <4>;
+                                    remote-endpoint = <&vin4csi20>;
+                            };
+                            csi20vin5: endpoint@5 {
+                                    reg = <5>;
+                                    remote-endpoint = <&vin5csi20>;
+                            };
+                            csi20vin6: endpoint@6 {
+                                    reg = <6>;
+                                    remote-endpoint = <&vin6csi20>;
+                            };
+                            csi20vin7: endpoint@7 {
+                                    reg = <7>;
+                                    remote-endpoint = <&vin7csi20>;
+                            };
+                    };
+            };
+    };
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.txt b/Documentation/devicetree/bindings/media/renesas,vin.txt
index e30b0d4..5eefd62 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.txt
+++ b/Documentation/devicetree/bindings/media/renesas,vin.txt
@@ -13,6 +13,7 @@
    - "renesas,vin-r8a7743" for the R8A7743 device
    - "renesas,vin-r8a7744" for the R8A7744 device
    - "renesas,vin-r8a7745" for the R8A7745 device
+   - "renesas,vin-r8a77470" for the R8A77470 device
    - "renesas,vin-r8a774a1" for the R8A774A1 device
    - "renesas,vin-r8a774b1" for the R8A774B1 device
    - "renesas,vin-r8a774c0" for the R8A774C0 device
@@ -41,9 +42,6 @@
  - interrupts: the interrupt for the device
  - clocks: Reference to the parent clock
 
-Additionally, an alias named vinX will need to be created to specify
-which video input device this is.
-
 The per-board settings for Gen2 and RZ/G1 platforms:
 
 - port - sub-node describing a single endpoint connected to the VIN
diff --git a/Documentation/devicetree/bindings/media/samsung-fimc.txt b/Documentation/devicetree/bindings/media/samsung-fimc.txt
index 48c599d..f91b9dc 100644
--- a/Documentation/devicetree/bindings/media/samsung-fimc.txt
+++ b/Documentation/devicetree/bindings/media/samsung-fimc.txt
@@ -1,4 +1,4 @@
-Samsung S5P/EXYNOS SoC Camera Subsystem (FIMC)
+Samsung S5P/Exynos SoC Camera Subsystem (FIMC)
 ----------------------------------------------
 
 The S5P/Exynos SoC Camera subsystem comprises of multiple sub-devices
diff --git a/Documentation/devicetree/bindings/media/samsung-mipi-csis.txt b/Documentation/devicetree/bindings/media/samsung-mipi-csis.txt
index be45f0b..a4149c9 100644
--- a/Documentation/devicetree/bindings/media/samsung-mipi-csis.txt
+++ b/Documentation/devicetree/bindings/media/samsung-mipi-csis.txt
@@ -1,4 +1,4 @@
-Samsung S5P/EXYNOS SoC series MIPI CSI-2 receiver (MIPI CSIS)
+Samsung S5P/Exynos SoC series MIPI CSI-2 receiver (MIPI CSIS)
 -------------------------------------------------------------
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/media/sun6i-csi.txt b/Documentation/devicetree/bindings/media/sun6i-csi.txt
deleted file mode 100644
index a2e3e56..0000000
--- a/Documentation/devicetree/bindings/media/sun6i-csi.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-Allwinner V3s Camera Sensor Interface
--------------------------------------
-
-Allwinner V3s SoC features a CSI module(CSI1) with parallel interface.
-
-Required properties:
-  - compatible: value must be one of:
-    * "allwinner,sun6i-a31-csi"
-    * "allwinner,sun8i-a83t-csi"
-    * "allwinner,sun8i-h3-csi"
-    * "allwinner,sun8i-v3s-csi"
-    * "allwinner,sun50i-a64-csi"
-  - reg: base address and size of the memory-mapped region.
-  - interrupts: interrupt associated to this IP
-  - clocks: phandles to the clocks feeding the CSI
-    * bus: the CSI interface clock
-    * mod: the CSI module clock
-    * ram: the CSI DRAM clock
-  - clock-names: the clock names mentioned above
-  - resets: phandles to the reset line driving the CSI
-
-The CSI node should contain one 'port' child node with one child 'endpoint'
-node, according to the bindings defined in
-Documentation/devicetree/bindings/media/video-interfaces.txt.
-
-Endpoint node properties for CSI
----------------------------------
-See the video-interfaces.txt for a detailed description of these properties.
-- remote-endpoint	: (required) a phandle to the bus receiver's endpoint
-			   node
-- bus-width:		: (required) must be 8, 10, 12 or 16
-- pclk-sample		: (optional) (default: sample on falling edge)
-- hsync-active		: (required; parallel-only)
-- vsync-active		: (required; parallel-only)
-
-Example:
-
-csi1: csi@1cb4000 {
-	compatible = "allwinner,sun8i-v3s-csi";
-	reg = <0x01cb4000 0x1000>;
-	interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
-	clocks = <&ccu CLK_BUS_CSI>,
-		 <&ccu CLK_CSI1_SCLK>,
-		 <&ccu CLK_DRAM_CSI>;
-	clock-names = "bus", "mod", "ram";
-	resets = <&ccu RST_BUS_CSI>;
-
-	port {
-		/* Parallel bus endpoint */
-		csi1_ep: endpoint {
-			remote-endpoint = <&adv7611_ep>;
-			bus-width = <16>;
-
-			/* If hsync-active/vsync-active are missing,
-			   embedded BT.656 sync is used */
-			hsync-active = <0>; /* Active low */
-			vsync-active = <0>; /* Active low */
-			pclk-sample = <1>;  /* Rising */
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/media/ti,cal.yaml b/Documentation/devicetree/bindings/media/ti,cal.yaml
new file mode 100644
index 0000000..1ea7841
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/ti,cal.yaml
@@ -0,0 +1,202 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/ti,cal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments DRA72x CAMERA ADAPTATION LAYER (CAL) Device Tree Bindings
+
+maintainers:
+  - Benoit Parrot <bparrot@ti.com>
+
+description: |-
+  The Camera Adaptation Layer (CAL) is a key component for image capture
+  applications. The capture module provides the system interface and the
+  processing capability to connect CSI2 image-sensor modules to the
+  DRA72x device.
+
+  CAL supports 2 camera port nodes on MIPI bus. Each CSI2 camera port nodes
+  should contain a 'port' child node with child 'endpoint' node. Please
+  refer to the bindings defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+properties:
+  compatible:
+    enum:
+      # for DRA72 controllers
+      - ti,dra72-cal
+      # for DRA72 controllers pre ES2.0
+      - ti,dra72-pre-es2-cal
+      # for DRA76 controllers
+      - ti,dra76-cal
+      # for AM654 controllers
+      - ti,am654-cal
+
+  reg:
+    minItems: 2
+    items:
+      - description: The CAL main register region
+      - description: The RX Core0 (DPHY0) register region
+      - description: The RX Core1 (DPHY1) register region
+
+  reg-names:
+    minItems: 2
+    items:
+      - const: cal_top
+      - const: cal_rx_core0
+      - const: cal_rx_core1
+
+  interrupts:
+    maxItems: 1
+
+  ti,camerrx-control:
+    $ref: "/schemas/types.yaml#/definitions/phandle-array"
+    description:
+      phandle to the device control module and offset to the
+      control_camerarx_core register
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: fck
+
+  power-domains:
+    description:
+      List of phandle and PM domain specifier as documented in
+      Documentation/devicetree/bindings/power/power_domain.txt
+    maxItems: 1
+
+  # See ./video-interfaces.txt for details
+  ports:
+    type: object
+    additionalProperties: false
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        additionalProperties: false
+
+        properties:
+          reg:
+            const: 0
+            description: CSI2 Port #0
+
+        patternProperties:
+          endpoint:
+            type: object
+            additionalProperties: false
+
+            properties:
+              clock-lanes:
+                maxItems: 1
+
+              data-lanes:
+                minItems: 1
+                maxItems: 4
+
+              remote-endpoint: true
+
+        required:
+          - reg
+
+      port@1:
+        type: object
+        additionalProperties: false
+
+        properties:
+          reg:
+            const: 1
+            description: CSI2 Port #1
+
+        patternProperties:
+          endpoint:
+            type: object
+            additionalProperties: false
+
+            properties:
+              clock-lanes:
+                maxItems: 1
+
+              data-lanes:
+                minItems: 1
+                maxItems: 4
+
+              remote-endpoint: true
+
+        required:
+          - reg
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - interrupts
+  - ti,camerrx-control
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    cal: cal@4845b000 {
+        compatible = "ti,dra72-cal";
+        reg = <0x4845B000 0x400>,
+              <0x4845B800 0x40>,
+              <0x4845B900 0x40>;
+        reg-names = "cal_top",
+                    "cal_rx_core0",
+                    "cal_rx_core1";
+        interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+        ti,camerrx-control = <&scm_conf 0xE94>;
+
+        ports {
+              #address-cells = <1>;
+              #size-cells = <0>;
+
+              csi2_0: port@0 {
+                    reg = <0>;
+                    csi2_phy0: endpoint {
+                           remote-endpoint = <&csi2_cam0>;
+                           clock-lanes = <0>;
+                           data-lanes = <1 2>;
+                    };
+              };
+        };
+    };
+
+    i2c5: i2c@4807c000 {
+        clock-frequency = <400000>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        camera-sensor@3c {
+               compatible = "ovti,ov5640";
+               reg = <0x3c>;
+
+               clocks = <&clk_ov5640_fixed>;
+               clock-names = "xclk";
+
+               port {
+                    csi2_cam0: endpoint {
+                            remote-endpoint = <&csi2_phy0>;
+                            clock-lanes = <0>;
+                            data-lanes = <1 2>;
+                    };
+               };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/media/ti-cal.txt b/Documentation/devicetree/bindings/media/ti-cal.txt
deleted file mode 100644
index ae9b52f..0000000
--- a/Documentation/devicetree/bindings/media/ti-cal.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-Texas Instruments DRA72x CAMERA ADAPTATION LAYER (CAL)
-------------------------------------------------------
-
-The Camera Adaptation Layer (CAL) is a key component for image capture
-applications. The capture module provides the system interface and the
-processing capability to connect CSI2 image-sensor modules to the
-DRA72x device.
-
-Required properties:
-- compatible: must be "ti,dra72-cal"
-- reg:	CAL Top level, Receiver Core #0, Receiver Core #1 and Camera RX
-	control address space
-- reg-names: cal_top, cal_rx_core0, cal_rx_core1, and camerrx_control
-	     registers
-- interrupts: should contain IRQ line for the CAL;
-
-CAL supports 2 camera port nodes on MIPI bus. Each CSI2 camera port nodes
-should contain a 'port' child node with child 'endpoint' node. Please
-refer to the bindings defined in
-Documentation/devicetree/bindings/media/video-interfaces.txt.
-
-Example:
-	cal: cal@4845b000 {
-		compatible = "ti,dra72-cal";
-		ti,hwmods = "cal";
-		reg = <0x4845B000 0x400>,
-		      <0x4845B800 0x40>,
-		      <0x4845B900 0x40>,
-		      <0x4A002e94 0x4>;
-		reg-names = "cal_top",
-			    "cal_rx_core0",
-			    "cal_rx_core1",
-			    "camerrx_control";
-		interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			csi2_0: port@0 {
-				reg = <0>;
-				endpoint {
-					slave-mode;
-					remote-endpoint = <&ar0330_1>;
-				};
-			};
-			csi2_1: port@1 {
-				reg = <1>;
-			};
-		};
-	};
-
-	i2c5: i2c@4807c000 {
-		ar0330@10 {
-			compatible = "ti,ar0330";
-			reg = <0x10>;
-
-			port {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				ar0330_1: endpoint {
-					reg = <0>;
-					clock-lanes = <1>;
-					data-lanes = <0 2 3 4>;
-					remote-endpoint = <&csi2_0>;
-				};
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.txt b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.txt
deleted file mode 100644
index ba0bc3f..0000000
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.txt
+++ /dev/null
@@ -1,374 +0,0 @@
-NVIDIA Tegra124 SoC EMC (external memory controller)
-====================================================
-
-Required properties :
-- compatible : Should be "nvidia,tegra124-emc".
-- reg : physical base address and length of the controller's registers.
-- nvidia,memory-controller : phandle of the MC driver.
-
-The node should contain a "emc-timings" subnode for each supported RAM type
-(see field RAM_CODE in register PMC_STRAPPING_OPT_A), with its unit address
-being its RAM_CODE.
-
-Required properties for "emc-timings" nodes :
-- nvidia,ram-code : Should contain the value of RAM_CODE this timing set is
-used for.
-
-Each "emc-timings" node should contain a "timing" subnode for every supported
-EMC clock rate. The "timing" subnodes should have the clock rate in Hz as
-their unit address.
-
-Required properties for "timing" nodes :
-- clock-frequency : Should contain the memory clock rate in Hz.
-- The following properties contain EMC timing characterization values
-(specified in the board documentation) :
-  - nvidia,emc-auto-cal-config : EMC_AUTO_CAL_CONFIG
-  - nvidia,emc-auto-cal-config2 : EMC_AUTO_CAL_CONFIG2
-  - nvidia,emc-auto-cal-config3 : EMC_AUTO_CAL_CONFIG3
-  - nvidia,emc-auto-cal-interval : EMC_AUTO_CAL_INTERVAL
-  - nvidia,emc-bgbias-ctl0 : EMC_BGBIAS_CTL0
-  - nvidia,emc-cfg : EMC_CFG
-  - nvidia,emc-cfg-2 : EMC_CFG_2
-  - nvidia,emc-ctt-term-ctrl : EMC_CTT_TERM_CTRL
-  - nvidia,emc-mode-1 : Mode Register 1
-  - nvidia,emc-mode-2 : Mode Register 2
-  - nvidia,emc-mode-4 : Mode Register 4
-  - nvidia,emc-mode-reset : Mode Register 0
-  - nvidia,emc-mrs-wait-cnt : EMC_MRS_WAIT_CNT
-  - nvidia,emc-sel-dpd-ctrl : EMC_SEL_DPD_CTRL
-  - nvidia,emc-xm2dqspadctrl2 : EMC_XM2DQSPADCTRL2
-  - nvidia,emc-zcal-cnt-long : EMC_ZCAL_WAIT_CNT after clock change
-  - nvidia,emc-zcal-interval : EMC_ZCAL_INTERVAL
-- nvidia,emc-configuration : EMC timing characterization data. These are the
-registers (see section "15.6.2 EMC Registers" in the TRM) whose values need to
-be specified, according to the board documentation:
-
-	EMC_RC
-	EMC_RFC
-	EMC_RFC_SLR
-	EMC_RAS
-	EMC_RP
-	EMC_R2W
-	EMC_W2R
-	EMC_R2P
-	EMC_W2P
-	EMC_RD_RCD
-	EMC_WR_RCD
-	EMC_RRD
-	EMC_REXT
-	EMC_WEXT
-	EMC_WDV
-	EMC_WDV_MASK
-	EMC_QUSE
-	EMC_QUSE_WIDTH
-	EMC_IBDLY
-	EMC_EINPUT
-	EMC_EINPUT_DURATION
-	EMC_PUTERM_EXTRA
-	EMC_PUTERM_WIDTH
-	EMC_PUTERM_ADJ
-	EMC_CDB_CNTL_1
-	EMC_CDB_CNTL_2
-	EMC_CDB_CNTL_3
-	EMC_QRST
-	EMC_QSAFE
-	EMC_RDV
-	EMC_RDV_MASK
-	EMC_REFRESH
-	EMC_BURST_REFRESH_NUM
-	EMC_PRE_REFRESH_REQ_CNT
-	EMC_PDEX2WR
-	EMC_PDEX2RD
-	EMC_PCHG2PDEN
-	EMC_ACT2PDEN
-	EMC_AR2PDEN
-	EMC_RW2PDEN
-	EMC_TXSR
-	EMC_TXSRDLL
-	EMC_TCKE
-	EMC_TCKESR
-	EMC_TPD
-	EMC_TFAW
-	EMC_TRPAB
-	EMC_TCLKSTABLE
-	EMC_TCLKSTOP
-	EMC_TREFBW
-	EMC_FBIO_CFG6
-	EMC_ODT_WRITE
-	EMC_ODT_READ
-	EMC_FBIO_CFG5
-	EMC_CFG_DIG_DLL
-	EMC_CFG_DIG_DLL_PERIOD
-	EMC_DLL_XFORM_DQS0
-	EMC_DLL_XFORM_DQS1
-	EMC_DLL_XFORM_DQS2
-	EMC_DLL_XFORM_DQS3
-	EMC_DLL_XFORM_DQS4
-	EMC_DLL_XFORM_DQS5
-	EMC_DLL_XFORM_DQS6
-	EMC_DLL_XFORM_DQS7
-	EMC_DLL_XFORM_DQS8
-	EMC_DLL_XFORM_DQS9
-	EMC_DLL_XFORM_DQS10
-	EMC_DLL_XFORM_DQS11
-	EMC_DLL_XFORM_DQS12
-	EMC_DLL_XFORM_DQS13
-	EMC_DLL_XFORM_DQS14
-	EMC_DLL_XFORM_DQS15
-	EMC_DLL_XFORM_QUSE0
-	EMC_DLL_XFORM_QUSE1
-	EMC_DLL_XFORM_QUSE2
-	EMC_DLL_XFORM_QUSE3
-	EMC_DLL_XFORM_QUSE4
-	EMC_DLL_XFORM_QUSE5
-	EMC_DLL_XFORM_QUSE6
-	EMC_DLL_XFORM_QUSE7
-	EMC_DLL_XFORM_ADDR0
-	EMC_DLL_XFORM_ADDR1
-	EMC_DLL_XFORM_ADDR2
-	EMC_DLL_XFORM_ADDR3
-	EMC_DLL_XFORM_ADDR4
-	EMC_DLL_XFORM_ADDR5
-	EMC_DLL_XFORM_QUSE8
-	EMC_DLL_XFORM_QUSE9
-	EMC_DLL_XFORM_QUSE10
-	EMC_DLL_XFORM_QUSE11
-	EMC_DLL_XFORM_QUSE12
-	EMC_DLL_XFORM_QUSE13
-	EMC_DLL_XFORM_QUSE14
-	EMC_DLL_XFORM_QUSE15
-	EMC_DLI_TRIM_TXDQS0
-	EMC_DLI_TRIM_TXDQS1
-	EMC_DLI_TRIM_TXDQS2
-	EMC_DLI_TRIM_TXDQS3
-	EMC_DLI_TRIM_TXDQS4
-	EMC_DLI_TRIM_TXDQS5
-	EMC_DLI_TRIM_TXDQS6
-	EMC_DLI_TRIM_TXDQS7
-	EMC_DLI_TRIM_TXDQS8
-	EMC_DLI_TRIM_TXDQS9
-	EMC_DLI_TRIM_TXDQS10
-	EMC_DLI_TRIM_TXDQS11
-	EMC_DLI_TRIM_TXDQS12
-	EMC_DLI_TRIM_TXDQS13
-	EMC_DLI_TRIM_TXDQS14
-	EMC_DLI_TRIM_TXDQS15
-	EMC_DLL_XFORM_DQ0
-	EMC_DLL_XFORM_DQ1
-	EMC_DLL_XFORM_DQ2
-	EMC_DLL_XFORM_DQ3
-	EMC_DLL_XFORM_DQ4
-	EMC_DLL_XFORM_DQ5
-	EMC_DLL_XFORM_DQ6
-	EMC_DLL_XFORM_DQ7
-	EMC_XM2CMDPADCTRL
-	EMC_XM2CMDPADCTRL4
-	EMC_XM2CMDPADCTRL5
-	EMC_XM2DQPADCTRL2
-	EMC_XM2DQPADCTRL3
-	EMC_XM2CLKPADCTRL
-	EMC_XM2CLKPADCTRL2
-	EMC_XM2COMPPADCTRL
-	EMC_XM2VTTGENPADCTRL
-	EMC_XM2VTTGENPADCTRL2
-	EMC_XM2VTTGENPADCTRL3
-	EMC_XM2DQSPADCTRL3
-	EMC_XM2DQSPADCTRL4
-	EMC_XM2DQSPADCTRL5
-	EMC_XM2DQSPADCTRL6
-	EMC_DSR_VTTGEN_DRV
-	EMC_TXDSRVTTGEN
-	EMC_FBIO_SPARE
-	EMC_ZCAL_WAIT_CNT
-	EMC_MRS_WAIT_CNT2
-	EMC_CTT
-	EMC_CTT_DURATION
-	EMC_CFG_PIPE
-	EMC_DYN_SELF_REF_CONTROL
-	EMC_QPOP
-
-Example SoC include file:
-
-/ {
-	emc@7001b000 {
-		compatible = "nvidia,tegra124-emc";
-		reg = <0x0 0x7001b000 0x0 0x1000>;
-
-		nvidia,memory-controller = <&mc>;
-	};
-};
-
-Example board file:
-
-/ {
-	emc@7001b000 {
-		emc-timings-3 {
-			nvidia,ram-code = <3>;
-
-			timing-12750000 {
-				clock-frequency = <12750000>;
-
-				nvidia,emc-zcal-cnt-long = <0x00000042>;
-				nvidia,emc-auto-cal-interval = <0x001fffff>;
-				nvidia,emc-ctt-term-ctrl = <0x00000802>;
-				nvidia,emc-cfg = <0x73240000>;
-				nvidia,emc-cfg-2 = <0x000008c5>;
-				nvidia,emc-sel-dpd-ctrl = <0x00040128>;
-				nvidia,emc-bgbias-ctl0 = <0x00000008>;
-				nvidia,emc-auto-cal-config = <0xa1430000>;
-				nvidia,emc-auto-cal-config2 = <0x00000000>;
-				nvidia,emc-auto-cal-config3 = <0x00000000>;
-				nvidia,emc-mode-reset = <0x80001221>;
-				nvidia,emc-mode-1 = <0x80100003>;
-				nvidia,emc-mode-2 = <0x80200008>;
-				nvidia,emc-mode-4 = <0x00000000>;
-
-				nvidia,emc-configuration = <
-					0x00000000 /* EMC_RC */
-					0x00000003 /* EMC_RFC */
-					0x00000000 /* EMC_RFC_SLR */
-					0x00000000 /* EMC_RAS */
-					0x00000000 /* EMC_RP */
-					0x00000004 /* EMC_R2W */
-					0x0000000a /* EMC_W2R */
-					0x00000003 /* EMC_R2P */
-					0x0000000b /* EMC_W2P */
-					0x00000000 /* EMC_RD_RCD */
-					0x00000000 /* EMC_WR_RCD */
-					0x00000003 /* EMC_RRD */
-					0x00000003 /* EMC_REXT */
-					0x00000000 /* EMC_WEXT */
-					0x00000006 /* EMC_WDV */
-					0x00000006 /* EMC_WDV_MASK */
-					0x00000006 /* EMC_QUSE */
-					0x00000002 /* EMC_QUSE_WIDTH */
-					0x00000000 /* EMC_IBDLY */
-					0x00000005 /* EMC_EINPUT */
-					0x00000005 /* EMC_EINPUT_DURATION */
-					0x00010000 /* EMC_PUTERM_EXTRA */
-					0x00000003 /* EMC_PUTERM_WIDTH */
-					0x00000000 /* EMC_PUTERM_ADJ */
-					0x00000000 /* EMC_CDB_CNTL_1 */
-					0x00000000 /* EMC_CDB_CNTL_2 */
-					0x00000000 /* EMC_CDB_CNTL_3 */
-					0x00000004 /* EMC_QRST */
-					0x0000000c /* EMC_QSAFE */
-					0x0000000d /* EMC_RDV */
-					0x0000000f /* EMC_RDV_MASK */
-					0x00000060 /* EMC_REFRESH */
-					0x00000000 /* EMC_BURST_REFRESH_NUM */
-					0x00000018 /* EMC_PRE_REFRESH_REQ_CNT */
-					0x00000002 /* EMC_PDEX2WR */
-					0x00000002 /* EMC_PDEX2RD */
-					0x00000001 /* EMC_PCHG2PDEN */
-					0x00000000 /* EMC_ACT2PDEN */
-					0x00000007 /* EMC_AR2PDEN */
-					0x0000000f /* EMC_RW2PDEN */
-					0x00000005 /* EMC_TXSR */
-					0x00000005 /* EMC_TXSRDLL */
-					0x00000004 /* EMC_TCKE */
-					0x00000005 /* EMC_TCKESR */
-					0x00000004 /* EMC_TPD */
-					0x00000000 /* EMC_TFAW */
-					0x00000000 /* EMC_TRPAB */
-					0x00000005 /* EMC_TCLKSTABLE */
-					0x00000005 /* EMC_TCLKSTOP */
-					0x00000064 /* EMC_TREFBW */
-					0x00000000 /* EMC_FBIO_CFG6 */
-					0x00000000 /* EMC_ODT_WRITE */
-					0x00000000 /* EMC_ODT_READ */
-					0x106aa298 /* EMC_FBIO_CFG5 */
-					0x002c00a0 /* EMC_CFG_DIG_DLL */
-					0x00008000 /* EMC_CFG_DIG_DLL_PERIOD */
-					0x00064000 /* EMC_DLL_XFORM_DQS0 */
-					0x00064000 /* EMC_DLL_XFORM_DQS1 */
-					0x00064000 /* EMC_DLL_XFORM_DQS2 */
-					0x00064000 /* EMC_DLL_XFORM_DQS3 */
-					0x00064000 /* EMC_DLL_XFORM_DQS4 */
-					0x00064000 /* EMC_DLL_XFORM_DQS5 */
-					0x00064000 /* EMC_DLL_XFORM_DQS6 */
-					0x00064000 /* EMC_DLL_XFORM_DQS7 */
-					0x00064000 /* EMC_DLL_XFORM_DQS8 */
-					0x00064000 /* EMC_DLL_XFORM_DQS9 */
-					0x00064000 /* EMC_DLL_XFORM_DQS10 */
-					0x00064000 /* EMC_DLL_XFORM_DQS11 */
-					0x00064000 /* EMC_DLL_XFORM_DQS12 */
-					0x00064000 /* EMC_DLL_XFORM_DQS13 */
-					0x00064000 /* EMC_DLL_XFORM_DQS14 */
-					0x00064000 /* EMC_DLL_XFORM_DQS15 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE0 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE1 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE2 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE3 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE4 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE5 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE6 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE7 */
-					0x00000000 /* EMC_DLL_XFORM_ADDR0 */
-					0x00000000 /* EMC_DLL_XFORM_ADDR1 */
-					0x00000000 /* EMC_DLL_XFORM_ADDR2 */
-					0x00000000 /* EMC_DLL_XFORM_ADDR3 */
-					0x00000000 /* EMC_DLL_XFORM_ADDR4 */
-					0x00000000 /* EMC_DLL_XFORM_ADDR5 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE8 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE9 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE10 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE11 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE12 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE13 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE14 */
-					0x00000000 /* EMC_DLL_XFORM_QUSE15 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS0 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS1 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS2 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS3 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS4 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS5 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS6 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS7 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS8 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS9 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS10 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS11 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS12 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS13 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS14 */
-					0x00000000 /* EMC_DLI_TRIM_TXDQS15 */
-					0x000fc000 /* EMC_DLL_XFORM_DQ0 */
-					0x000fc000 /* EMC_DLL_XFORM_DQ1 */
-					0x000fc000 /* EMC_DLL_XFORM_DQ2 */
-					0x000fc000 /* EMC_DLL_XFORM_DQ3 */
-					0x0000fc00 /* EMC_DLL_XFORM_DQ4 */
-					0x0000fc00 /* EMC_DLL_XFORM_DQ5 */
-					0x0000fc00 /* EMC_DLL_XFORM_DQ6 */
-					0x0000fc00 /* EMC_DLL_XFORM_DQ7 */
-					0x10000280 /* EMC_XM2CMDPADCTRL */
-					0x00000000 /* EMC_XM2CMDPADCTRL4 */
-					0x00111111 /* EMC_XM2CMDPADCTRL5 */
-					0x00000000 /* EMC_XM2DQPADCTRL2 */
-					0x00000000 /* EMC_XM2DQPADCTRL3 */
-					0x77ffc081 /* EMC_XM2CLKPADCTRL */
-					0x00000e0e /* EMC_XM2CLKPADCTRL2 */
-					0x81f1f108 /* EMC_XM2COMPPADCTRL */
-					0x07070004 /* EMC_XM2VTTGENPADCTRL */
-					0x0000003f /* EMC_XM2VTTGENPADCTRL2 */
-					0x016eeeee /* EMC_XM2VTTGENPADCTRL3 */
-					0x51451400 /* EMC_XM2DQSPADCTRL3 */
-					0x00514514 /* EMC_XM2DQSPADCTRL4 */
-					0x00514514 /* EMC_XM2DQSPADCTRL5 */
-					0x51451400 /* EMC_XM2DQSPADCTRL6 */
-					0x0000003f /* EMC_DSR_VTTGEN_DRV */
-					0x00000007 /* EMC_TXDSRVTTGEN */
-					0x00000000 /* EMC_FBIO_SPARE */
-					0x00000042 /* EMC_ZCAL_WAIT_CNT */
-					0x000e000e /* EMC_MRS_WAIT_CNT2 */
-					0x00000000 /* EMC_CTT */
-					0x00000003 /* EMC_CTT_DURATION */
-					0x0000f2f3 /* EMC_CFG_PIPE */
-					0x800001c5 /* EMC_DYN_SELF_REF_CONTROL */
-					0x0000000a /* EMC_QPOP */
-				>;
-			};
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml
new file mode 100644
index 0000000..dd184348
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml
@@ -0,0 +1,528 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/nvidia,tegra124-emc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra124 SoC External Memory Controller
+
+maintainers:
+  - Thierry Reding <thierry.reding@gmail.com>
+  - Jon Hunter <jonathanh@nvidia.com>
+
+description: |
+  The EMC interfaces with the off-chip SDRAM to service the request stream
+  sent from the memory controller.
+
+properties:
+  compatible:
+    const: nvidia,tegra124-emc
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: external memory clock
+
+  clock-names:
+    items:
+      - const: emc
+
+  nvidia,memory-controller:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      phandle of the memory controller node
+
+patternProperties:
+  "^emc-timings-[0-9]+$":
+    type: object
+    properties:
+      nvidia,ram-code:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description:
+          value of the RAM_CODE field in the PMC_STRAPPING_OPT_A register that
+          this timing set is used for
+
+    patternProperties:
+      "^timing-[0-9]+$":
+        type: object
+        properties:
+          clock-frequency:
+            description:
+              external memory clock rate in Hz
+            minimum: 1000000
+            maximum: 1000000000
+
+          nvidia,emc-auto-cal-config:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_AUTO_CAL_CONFIG register for this set of
+              timings
+
+          nvidia,emc-auto-cal-config2:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_AUTO_CAL_CONFIG2 register for this set of
+              timings
+
+          nvidia,emc-auto-cal-config3:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_AUTO_CAL_CONFIG3 register for this set of
+              timings
+
+          nvidia,emc-auto-cal-interval:
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              pad calibration interval in microseconds
+            minimum: 0
+            maximum: 2097151
+
+          nvidia,emc-bgbias-ctl0:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_BGBIAS_CTL0 register for this set of timings
+
+          nvidia,emc-cfg:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_CFG register for this set of timings
+
+          nvidia,emc-cfg-2:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_CFG_2 register for this set of timings
+
+          nvidia,emc-ctt-term-ctrl:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_CTT_TERM_CTRL register for this set of timings
+
+          nvidia,emc-mode-1:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_MRW register for this set of timings
+
+          nvidia,emc-mode-2:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_MRW2 register for this set of timings
+
+          nvidia,emc-mode-4:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_MRW4 register for this set of timings
+
+          nvidia,emc-mode-reset:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              reset value of the EMC_MRS register for this set of timings
+
+          nvidia,emc-mrs-wait-cnt:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMR_MRS_WAIT_CNT register for this set of timings
+
+          nvidia,emc-sel-dpd-ctrl:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_SEL_DPD_CTRL register for this set of timings
+
+          nvidia,emc-xm2dqspadctrl2:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_XM2DQSPADCTRL2 register for this set of timings
+
+          nvidia,emc-zcal-cnt-long:
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              number of EMC clocks to wait before issuing any commands after
+              clock change
+            minimum: 0
+            maximum: 1023
+
+          nvidia,emc-zcal-interval:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description:
+              value of the EMC_ZCAL_INTERVAL register for this set of timings
+
+          nvidia,emc-configuration:
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32-array
+            description:
+              EMC timing characterization data. These are the registers (see
+              section "15.6.2 EMC Registers" in the TRM) whose values need to
+              be specified, according to the board documentation.
+            items:
+              - description: EMC_RC
+              - description: EMC_RFC
+              - description: EMC_RFC_SLR
+              - description: EMC_RAS
+              - description: EMC_RP
+              - description: EMC_R2W
+              - description: EMC_W2R
+              - description: EMC_R2P
+              - description: EMC_W2P
+              - description: EMC_RD_RCD
+              - description: EMC_WR_RCD
+              - description: EMC_RRD
+              - description: EMC_REXT
+              - description: EMC_WEXT
+              - description: EMC_WDV
+              - description: EMC_WDV_MASK
+              - description: EMC_QUSE
+              - description: EMC_QUSE_WIDTH
+              - description: EMC_IBDLY
+              - description: EMC_EINPUT
+              - description: EMC_EINPUT_DURATION
+              - description: EMC_PUTERM_EXTRA
+              - description: EMC_PUTERM_WIDTH
+              - description: EMC_PUTERM_ADJ
+              - description: EMC_CDB_CNTL_1
+              - description: EMC_CDB_CNTL_2
+              - description: EMC_CDB_CNTL_3
+              - description: EMC_QRST
+              - description: EMC_QSAFE
+              - description: EMC_RDV
+              - description: EMC_RDV_MASK
+              - description: EMC_REFRESH
+              - description: EMC_BURST_REFRESH_NUM
+              - description: EMC_PRE_REFRESH_REQ_CNT
+              - description: EMC_PDEX2WR
+              - description: EMC_PDEX2RD
+              - description: EMC_PCHG2PDEN
+              - description: EMC_ACT2PDEN
+              - description: EMC_AR2PDEN
+              - description: EMC_RW2PDEN
+              - description: EMC_TXSR
+              - description: EMC_TXSRDLL
+              - description: EMC_TCKE
+              - description: EMC_TCKESR
+              - description: EMC_TPD
+              - description: EMC_TFAW
+              - description: EMC_TRPAB
+              - description: EMC_TCLKSTABLE
+              - description: EMC_TCLKSTOP
+              - description: EMC_TREFBW
+              - description: EMC_FBIO_CFG6
+              - description: EMC_ODT_WRITE
+              - description: EMC_ODT_READ
+              - description: EMC_FBIO_CFG5
+              - description: EMC_CFG_DIG_DLL
+              - description: EMC_CFG_DIG_DLL_PERIOD
+              - description: EMC_DLL_XFORM_DQS0
+              - description: EMC_DLL_XFORM_DQS1
+              - description: EMC_DLL_XFORM_DQS2
+              - description: EMC_DLL_XFORM_DQS3
+              - description: EMC_DLL_XFORM_DQS4
+              - description: EMC_DLL_XFORM_DQS5
+              - description: EMC_DLL_XFORM_DQS6
+              - description: EMC_DLL_XFORM_DQS7
+              - description: EMC_DLL_XFORM_DQS8
+              - description: EMC_DLL_XFORM_DQS9
+              - description: EMC_DLL_XFORM_DQS10
+              - description: EMC_DLL_XFORM_DQS11
+              - description: EMC_DLL_XFORM_DQS12
+              - description: EMC_DLL_XFORM_DQS13
+              - description: EMC_DLL_XFORM_DQS14
+              - description: EMC_DLL_XFORM_DQS15
+              - description: EMC_DLL_XFORM_QUSE0
+              - description: EMC_DLL_XFORM_QUSE1
+              - description: EMC_DLL_XFORM_QUSE2
+              - description: EMC_DLL_XFORM_QUSE3
+              - description: EMC_DLL_XFORM_QUSE4
+              - description: EMC_DLL_XFORM_QUSE5
+              - description: EMC_DLL_XFORM_QUSE6
+              - description: EMC_DLL_XFORM_QUSE7
+              - description: EMC_DLL_XFORM_ADDR0
+              - description: EMC_DLL_XFORM_ADDR1
+              - description: EMC_DLL_XFORM_ADDR2
+              - description: EMC_DLL_XFORM_ADDR3
+              - description: EMC_DLL_XFORM_ADDR4
+              - description: EMC_DLL_XFORM_ADDR5
+              - description: EMC_DLL_XFORM_QUSE8
+              - description: EMC_DLL_XFORM_QUSE9
+              - description: EMC_DLL_XFORM_QUSE10
+              - description: EMC_DLL_XFORM_QUSE11
+              - description: EMC_DLL_XFORM_QUSE12
+              - description: EMC_DLL_XFORM_QUSE13
+              - description: EMC_DLL_XFORM_QUSE14
+              - description: EMC_DLL_XFORM_QUSE15
+              - description: EMC_DLI_TRIM_TXDQS0
+              - description: EMC_DLI_TRIM_TXDQS1
+              - description: EMC_DLI_TRIM_TXDQS2
+              - description: EMC_DLI_TRIM_TXDQS3
+              - description: EMC_DLI_TRIM_TXDQS4
+              - description: EMC_DLI_TRIM_TXDQS5
+              - description: EMC_DLI_TRIM_TXDQS6
+              - description: EMC_DLI_TRIM_TXDQS7
+              - description: EMC_DLI_TRIM_TXDQS8
+              - description: EMC_DLI_TRIM_TXDQS9
+              - description: EMC_DLI_TRIM_TXDQS10
+              - description: EMC_DLI_TRIM_TXDQS11
+              - description: EMC_DLI_TRIM_TXDQS12
+              - description: EMC_DLI_TRIM_TXDQS13
+              - description: EMC_DLI_TRIM_TXDQS14
+              - description: EMC_DLI_TRIM_TXDQS15
+              - description: EMC_DLL_XFORM_DQ0
+              - description: EMC_DLL_XFORM_DQ1
+              - description: EMC_DLL_XFORM_DQ2
+              - description: EMC_DLL_XFORM_DQ3
+              - description: EMC_DLL_XFORM_DQ4
+              - description: EMC_DLL_XFORM_DQ5
+              - description: EMC_DLL_XFORM_DQ6
+              - description: EMC_DLL_XFORM_DQ7
+              - description: EMC_XM2CMDPADCTRL
+              - description: EMC_XM2CMDPADCTRL4
+              - description: EMC_XM2CMDPADCTRL5
+              - description: EMC_XM2DQPADCTRL2
+              - description: EMC_XM2DQPADCTRL3
+              - description: EMC_XM2CLKPADCTRL
+              - description: EMC_XM2CLKPADCTRL2
+              - description: EMC_XM2COMPPADCTRL
+              - description: EMC_XM2VTTGENPADCTRL
+              - description: EMC_XM2VTTGENPADCTRL2
+              - description: EMC_XM2VTTGENPADCTRL3
+              - description: EMC_XM2DQSPADCTRL3
+              - description: EMC_XM2DQSPADCTRL4
+              - description: EMC_XM2DQSPADCTRL5
+              - description: EMC_XM2DQSPADCTRL6
+              - description: EMC_DSR_VTTGEN_DRV
+              - description: EMC_TXDSRVTTGEN
+              - description: EMC_FBIO_SPARE
+              - description: EMC_ZCAL_WAIT_CNT
+              - description: EMC_MRS_WAIT_CNT2
+              - description: EMC_CTT
+              - description: EMC_CTT_DURATION
+              - description: EMC_CFG_PIPE
+              - description: EMC_DYN_SELF_REF_CONTROL
+              - description: EMC_QPOP
+
+        required:
+          - clock-frequency
+          - nvidia,emc-auto-cal-config
+          - nvidia,emc-auto-cal-config2
+          - nvidia,emc-auto-cal-config3
+          - nvidia,emc-auto-cal-interval
+          - nvidia,emc-bgbias-ctl0
+          - nvidia,emc-cfg
+          - nvidia,emc-cfg-2
+          - nvidia,emc-ctt-term-ctrl
+          - nvidia,emc-mode-1
+          - nvidia,emc-mode-2
+          - nvidia,emc-mode-4
+          - nvidia,emc-mode-reset
+          - nvidia,emc-mrs-wait-cnt
+          - nvidia,emc-sel-dpd-ctrl
+          - nvidia,emc-xm2dqspadctrl2
+          - nvidia,emc-zcal-cnt-long
+          - nvidia,emc-zcal-interval
+          - nvidia,emc-configuration
+
+        additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - nvidia,memory-controller
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/tegra124-car.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    mc: memory-controller@70019000 {
+        compatible = "nvidia,tegra124-mc";
+        reg = <0x0 0x70019000 0x0 0x1000>;
+        clocks = <&tegra_car TEGRA124_CLK_MC>;
+        clock-names = "mc";
+
+        interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+
+        #iommu-cells = <1>;
+    };
+
+    external-memory-controller@7001b000 {
+        compatible = "nvidia,tegra124-emc";
+        reg = <0x0 0x7001b000 0x0 0x1000>;
+        clocks = <&car TEGRA124_CLK_EMC>;
+        clock-names = "emc";
+
+        nvidia,memory-controller = <&mc>;
+
+        emc-timings-0 {
+            nvidia,ram-code = <3>;
+
+            timing-0 {
+                clock-frequency = <12750000>;
+
+                nvidia,emc-zcal-cnt-long = <0x00000042>;
+                nvidia,emc-auto-cal-interval = <0x001fffff>;
+                nvidia,emc-ctt-term-ctrl = <0x00000802>;
+                nvidia,emc-cfg = <0x73240000>;
+                nvidia,emc-cfg-2 = <0x000008c5>;
+                nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+                nvidia,emc-bgbias-ctl0 = <0x00000008>;
+                nvidia,emc-auto-cal-config = <0xa1430000>;
+                nvidia,emc-auto-cal-config2 = <0x00000000>;
+                nvidia,emc-auto-cal-config3 = <0x00000000>;
+                nvidia,emc-mode-reset = <0x80001221>;
+                nvidia,emc-mode-1 = <0x80100003>;
+                nvidia,emc-mode-2 = <0x80200008>;
+                nvidia,emc-mode-4 = <0x00000000>;
+
+                nvidia,emc-configuration = <
+                    0x00000000 /* EMC_RC */
+                    0x00000003 /* EMC_RFC */
+                    0x00000000 /* EMC_RFC_SLR */
+                    0x00000000 /* EMC_RAS */
+                    0x00000000 /* EMC_RP */
+                    0x00000004 /* EMC_R2W */
+                    0x0000000a /* EMC_W2R */
+                    0x00000003 /* EMC_R2P */
+                    0x0000000b /* EMC_W2P */
+                    0x00000000 /* EMC_RD_RCD */
+                    0x00000000 /* EMC_WR_RCD */
+                    0x00000003 /* EMC_RRD */
+                    0x00000003 /* EMC_REXT */
+                    0x00000000 /* EMC_WEXT */
+                    0x00000006 /* EMC_WDV */
+                    0x00000006 /* EMC_WDV_MASK */
+                    0x00000006 /* EMC_QUSE */
+                    0x00000002 /* EMC_QUSE_WIDTH */
+                    0x00000000 /* EMC_IBDLY */
+                    0x00000005 /* EMC_EINPUT */
+                    0x00000005 /* EMC_EINPUT_DURATION */
+                    0x00010000 /* EMC_PUTERM_EXTRA */
+                    0x00000003 /* EMC_PUTERM_WIDTH */
+                    0x00000000 /* EMC_PUTERM_ADJ */
+                    0x00000000 /* EMC_CDB_CNTL_1 */
+                    0x00000000 /* EMC_CDB_CNTL_2 */
+                    0x00000000 /* EMC_CDB_CNTL_3 */
+                    0x00000004 /* EMC_QRST */
+                    0x0000000c /* EMC_QSAFE */
+                    0x0000000d /* EMC_RDV */
+                    0x0000000f /* EMC_RDV_MASK */
+                    0x00000060 /* EMC_REFRESH */
+                    0x00000000 /* EMC_BURST_REFRESH_NUM */
+                    0x00000018 /* EMC_PRE_REFRESH_REQ_CNT */
+                    0x00000002 /* EMC_PDEX2WR */
+                    0x00000002 /* EMC_PDEX2RD */
+                    0x00000001 /* EMC_PCHG2PDEN */
+                    0x00000000 /* EMC_ACT2PDEN */
+                    0x00000007 /* EMC_AR2PDEN */
+                    0x0000000f /* EMC_RW2PDEN */
+                    0x00000005 /* EMC_TXSR */
+                    0x00000005 /* EMC_TXSRDLL */
+                    0x00000004 /* EMC_TCKE */
+                    0x00000005 /* EMC_TCKESR */
+                    0x00000004 /* EMC_TPD */
+                    0x00000000 /* EMC_TFAW */
+                    0x00000000 /* EMC_TRPAB */
+                    0x00000005 /* EMC_TCLKSTABLE */
+                    0x00000005 /* EMC_TCLKSTOP */
+                    0x00000064 /* EMC_TREFBW */
+                    0x00000000 /* EMC_FBIO_CFG6 */
+                    0x00000000 /* EMC_ODT_WRITE */
+                    0x00000000 /* EMC_ODT_READ */
+                    0x106aa298 /* EMC_FBIO_CFG5 */
+                    0x002c00a0 /* EMC_CFG_DIG_DLL */
+                    0x00008000 /* EMC_CFG_DIG_DLL_PERIOD */
+                    0x00064000 /* EMC_DLL_XFORM_DQS0 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS1 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS2 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS3 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS4 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS5 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS6 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS7 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS8 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS9 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS10 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS11 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS12 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS13 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS14 */
+                    0x00064000 /* EMC_DLL_XFORM_DQS15 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE0 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE1 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE2 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE3 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE4 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE5 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE6 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE7 */
+                    0x00000000 /* EMC_DLL_XFORM_ADDR0 */
+                    0x00000000 /* EMC_DLL_XFORM_ADDR1 */
+                    0x00000000 /* EMC_DLL_XFORM_ADDR2 */
+                    0x00000000 /* EMC_DLL_XFORM_ADDR3 */
+                    0x00000000 /* EMC_DLL_XFORM_ADDR4 */
+                    0x00000000 /* EMC_DLL_XFORM_ADDR5 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE8 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE9 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE10 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE11 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE12 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE13 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE14 */
+                    0x00000000 /* EMC_DLL_XFORM_QUSE15 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS0 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS1 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS2 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS3 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS4 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS5 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS6 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS7 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS8 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS9 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS10 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS11 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS12 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS13 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS14 */
+                    0x00000000 /* EMC_DLI_TRIM_TXDQS15 */
+                    0x000fc000 /* EMC_DLL_XFORM_DQ0 */
+                    0x000fc000 /* EMC_DLL_XFORM_DQ1 */
+                    0x000fc000 /* EMC_DLL_XFORM_DQ2 */
+                    0x000fc000 /* EMC_DLL_XFORM_DQ3 */
+                    0x0000fc00 /* EMC_DLL_XFORM_DQ4 */
+                    0x0000fc00 /* EMC_DLL_XFORM_DQ5 */
+                    0x0000fc00 /* EMC_DLL_XFORM_DQ6 */
+                    0x0000fc00 /* EMC_DLL_XFORM_DQ7 */
+                    0x10000280 /* EMC_XM2CMDPADCTRL */
+                    0x00000000 /* EMC_XM2CMDPADCTRL4 */
+                    0x00111111 /* EMC_XM2CMDPADCTRL5 */
+                    0x00000000 /* EMC_XM2DQPADCTRL2 */
+                    0x00000000 /* EMC_XM2DQPADCTRL3 */
+                    0x77ffc081 /* EMC_XM2CLKPADCTRL */
+                    0x00000e0e /* EMC_XM2CLKPADCTRL2 */
+                    0x81f1f108 /* EMC_XM2COMPPADCTRL */
+                    0x07070004 /* EMC_XM2VTTGENPADCTRL */
+                    0x0000003f /* EMC_XM2VTTGENPADCTRL2 */
+                    0x016eeeee /* EMC_XM2VTTGENPADCTRL3 */
+                    0x51451400 /* EMC_XM2DQSPADCTRL3 */
+                    0x00514514 /* EMC_XM2DQSPADCTRL4 */
+                    0x00514514 /* EMC_XM2DQSPADCTRL5 */
+                    0x51451400 /* EMC_XM2DQSPADCTRL6 */
+                    0x0000003f /* EMC_DSR_VTTGEN_DRV */
+                    0x00000007 /* EMC_TXDSRVTTGEN */
+                    0x00000000 /* EMC_FBIO_SPARE */
+                    0x00000042 /* EMC_ZCAL_WAIT_CNT */
+                    0x000e000e /* EMC_MRS_WAIT_CNT2 */
+                    0x00000000 /* EMC_CTT */
+                    0x00000003 /* EMC_CTT_DURATION */
+                    0x0000f2f3 /* EMC_CFG_PIPE */
+                    0x800001c5 /* EMC_DYN_SELF_REF_CONTROL */
+                    0x0000000a /* EMC_QPOP */
+                >;
+            };
+        };
+    };
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml
new file mode 100644
index 0000000..12516bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/nvidia,tegra186-mc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra186 (and later) SoC Memory Controller
+
+maintainers:
+  - Jon Hunter <jonathanh@nvidia.com>
+  - Thierry Reding <thierry.reding@gmail.com>
+
+description: |
+  The NVIDIA Tegra186 SoC features a 128 bit memory controller that is split
+  into four 32 bit channels to support LPDDR4 with x16 subpartitions. The MC
+  handles memory requests for 40-bit virtual addresses from internal clients
+  and arbitrates among them to allocate memory bandwidth.
+
+  Up to 15 GiB of physical memory can be supported. Security features such as
+  encryption of traffic to and from DRAM via general security apertures are
+  available for video and other secure applications, as well as DRAM ECC for
+  automotive safety applications (single bit error correction and double bit
+  error detection).
+
+properties:
+  $nodename:
+    pattern: "^memory-controller@[0-9a-f]+$"
+
+  compatible:
+    items:
+      - enum:
+          - nvidia,tegra186-mc
+          - nvidia,tegra194-mc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  "#address-cells":
+    const: 2
+
+  "#size-cells":
+    const: 2
+
+  ranges: true
+
+  dma-ranges: true
+
+patternProperties:
+  "^external-memory-controller@[0-9a-f]+$":
+    description:
+      The bulk of the work involved in controlling the external memory
+      controller on NVIDIA Tegra186 and later is performed on the BPMP. This
+      coprocessor exposes the EMC clock that is used to set the frequency at
+      which the external memory is clocked and a remote procedure call that
+      can be used to obtain the set of available frequencies.
+    type: object
+    properties:
+      compatible:
+        items:
+          - enum:
+              - nvidia,tegra186-emc
+              - nvidia,tegra194-emc
+
+      reg:
+        maxItems: 1
+
+      interrupts:
+        maxItems: 1
+
+      clocks:
+        items:
+          - description: external memory clock
+
+      clock-names:
+        items:
+          - const: emc
+
+      nvidia,bpmp:
+        $ref: /schemas/types.yaml#/definitions/phandle
+        description:
+          phandle of the node representing the BPMP
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - "#address-cells"
+  - "#size-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/tegra186-clock.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    memory-controller@2c00000 {
+        compatible = "nvidia,tegra186-mc";
+        reg = <0x0 0x02c00000 0x0 0xb0000>;
+        interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        ranges = <0x0 0x02c00000 0x02c00000 0x0 0xb0000>;
+
+        /*
+         * Memory clients have access to all 40 bits that the memory
+         * controller can address.
+         */
+        dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
+
+        external-memory-controller@2c60000 {
+            compatible = "nvidia,tegra186-emc";
+            reg = <0x0 0x02c60000 0x0 0x50000>;
+            interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+            clocks = <&bpmp TEGRA186_CLK_EMC>;
+            clock-names = "emc";
+
+            nvidia,bpmp = <&bpmp>;
+        };
+    };
+
+    bpmp: bpmp {
+        compatible = "nvidia,tegra186-bpmp";
+        #clock-cells = <1>;
+    };
diff --git a/Documentation/devicetree/bindings/mfd/ab8500.txt b/Documentation/devicetree/bindings/mfd/ab8500.txt
index b6bc30d..5c6eabe 100644
--- a/Documentation/devicetree/bindings/mfd/ab8500.txt
+++ b/Documentation/devicetree/bindings/mfd/ab8500.txt
@@ -1,7 +1,7 @@
 * AB8500 Multi-Functional Device (MFD)
 
 Required parent device properties:
-- compatible             : contains "stericsson,ab8500";
+- compatible             : contains "stericsson,ab8500" or "stericsson,ab8505";
 - interrupts             : contains the IRQ line for the AB8500
 - interrupt-controller   : describes the AB8500 as an Interrupt Controller (has its own domain)
 - #interrupt-cells       : should be 2, for 2-cell format
@@ -49,11 +49,13 @@
 			 : CH_WD_EXP		:	       : Charger watchdog detected
 ab8500-gpadc             : HW_CONV_END          : vddadc       : Analogue to Digital Converter
                            SW_CONV_END          :              :
-ab8500-gpio              :                      :              : GPIO Controller
+ab8500-gpio              :                      :              : GPIO Controller (AB8500)
+ab8505-gpio              :                      :              : GPIO Controller (AB8505)
 ab8500-ponkey            : ONKEY_DBF            :              : Power-on Key
                            ONKEY_DBR            :              :
 ab8500-pwm               :                      :              : Pulse Width Modulator
-ab8500-regulator         :                      :              : Regulators
+ab8500-regulator         :                      :              : Regulators (AB8500)
+ab8505-regulator         :                      :              : Regulators (AB8505)
 ab8500-rtc               : 60S                  :              : Real Time Clock
                          : ALARM                :              :
 ab8500-sysctrl           :                      :              : System Control
diff --git a/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml b/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml
new file mode 100644
index 0000000..d131759
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml
@@ -0,0 +1,219 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/allwinner,sun6i-a31-prcm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 PRCM Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  compatible:
+    const: allwinner,sun6i-a31-prcm
+
+  reg:
+    maxItems: 1
+
+patternProperties:
+  "^.*_(clk|rst)$":
+    type: object
+
+    properties:
+      compatible:
+        enum:
+          - allwinner,sun4i-a10-mod0-clk
+          - allwinner,sun6i-a31-apb0-clk
+          - allwinner,sun6i-a31-apb0-gates-clk
+          - allwinner,sun6i-a31-ar100-clk
+          - allwinner,sun6i-a31-clock-reset
+          - fixed-factor-clock
+
+    allOf:
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: allwinner,sun6i-a31-apb0-clk
+
+        then:
+          properties:
+            "#clock-cells":
+              const: 0
+
+            # Already checked in the main schema
+            compatible: true
+
+            clocks:
+              maxItems: 1
+
+            clock-output-names:
+              maxItems: 1
+
+            phandle: true
+
+          required:
+            - "#clock-cells"
+            - compatible
+            - clocks
+            - clock-output-names
+
+          additionalProperties: false
+
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: allwinner,sun6i-a31-apb0-gates-clk
+
+        then:
+          properties:
+            "#clock-cells":
+              const: 1
+              description: >
+                This additional argument passed to that clock is the
+                offset of the bit controlling this particular gate in
+                the register.
+
+            # Already checked in the main schema
+            compatible: true
+
+            clocks:
+              maxItems: 1
+
+            clock-output-names:
+              minItems: 1
+              maxItems: 32
+
+            phandle: true
+
+          required:
+            - "#clock-cells"
+            - compatible
+            - clocks
+            - clock-output-names
+
+          additionalProperties: false
+
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: allwinner,sun6i-a31-ar100-clk
+
+        then:
+          properties:
+            "#clock-cells":
+              const: 0
+
+            # Already checked in the main schema
+            compatible: true
+
+            clocks:
+              maxItems: 4
+              description: >
+                The parent order must match the hardware programming
+                order.
+
+            clock-output-names:
+              maxItems: 1
+
+            phandle: true
+
+          required:
+            - "#clock-cells"
+            - compatible
+            - clocks
+            - clock-output-names
+
+          additionalProperties: false
+
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: allwinner,sun6i-a31-clock-reset
+
+        then:
+          properties:
+            "#reset-cells":
+              const: 1
+
+            # Already checked in the main schema
+            compatible: true
+
+            phandle: true
+
+          required:
+            - "#reset-cells"
+            - compatible
+
+          additionalProperties: false
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun6i-a31-ccu.h>
+
+    prcm@1f01400 {
+        compatible = "allwinner,sun6i-a31-prcm";
+        reg = <0x01f01400 0x200>;
+
+        ar100: ar100_clk {
+            compatible = "allwinner,sun6i-a31-ar100-clk";
+            #clock-cells = <0>;
+            clocks = <&rtc 0>, <&osc24M>,
+                     <&ccu CLK_PLL_PERIPH>,
+                     <&ccu CLK_PLL_PERIPH>;
+            clock-output-names = "ar100";
+        };
+
+        ahb0: ahb0_clk {
+            compatible = "fixed-factor-clock";
+            #clock-cells = <0>;
+            clock-div = <1>;
+            clock-mult = <1>;
+            clocks = <&ar100>;
+            clock-output-names = "ahb0";
+        };
+
+        apb0: apb0_clk {
+            compatible = "allwinner,sun6i-a31-apb0-clk";
+            #clock-cells = <0>;
+            clocks = <&ahb0>;
+            clock-output-names = "apb0";
+        };
+
+        apb0_gates: apb0_gates_clk {
+            compatible = "allwinner,sun6i-a31-apb0-gates-clk";
+            #clock-cells = <1>;
+            clocks = <&apb0>;
+            clock-output-names = "apb0_pio", "apb0_ir",
+                                 "apb0_timer", "apb0_p2wi",
+                                 "apb0_uart", "apb0_1wire",
+                                 "apb0_i2c";
+        };
+
+        ir_clk: ir_clk {
+            #clock-cells = <0>;
+            compatible = "allwinner,sun4i-a10-mod0-clk";
+            clocks = <&rtc 0>, <&osc24M>;
+            clock-output-names = "ir";
+        };
+
+        apb0_rst: apb0_rst {
+            compatible = "allwinner,sun6i-a31-clock-reset";
+            #reset-cells = <1>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/allwinner,sun8i-a23-prcm.yaml b/Documentation/devicetree/bindings/mfd/allwinner,sun8i-a23-prcm.yaml
new file mode 100644
index 0000000..aa5e683
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/allwinner,sun8i-a23-prcm.yaml
@@ -0,0 +1,200 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/allwinner,sun8i-a23-prcm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A23 PRCM Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+properties:
+  compatible:
+    const: allwinner,sun8i-a23-prcm
+
+  reg:
+    maxItems: 1
+
+patternProperties:
+  "^.*(clk|rst|codec).*$":
+    type: object
+
+    properties:
+      compatible:
+        enum:
+          - fixed-factor-clock
+          - allwinner,sun8i-a23-apb0-clk
+          - allwinner,sun8i-a23-apb0-gates-clk
+          - allwinner,sun6i-a31-clock-reset
+          - allwinner,sun8i-a23-codec-analog
+
+    required:
+      - compatible
+
+    allOf:
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: allwinner,sun8i-a23-apb0-clk
+
+        then:
+          properties:
+            "#clock-cells":
+              const: 0
+
+            # Already checked in the main schema
+            compatible: true
+
+            clocks:
+              maxItems: 1
+
+            clock-output-names:
+              maxItems: 1
+
+            phandle: true
+
+          required:
+            - "#clock-cells"
+            - compatible
+            - clocks
+            - clock-output-names
+
+          additionalProperties: false
+
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: allwinner,sun8i-a23-apb0-gates-clk
+
+        then:
+          properties:
+            "#clock-cells":
+              const: 1
+              description: >
+                This additional argument passed to that clock is the
+                offset of the bit controlling this particular gate in
+                the register.
+
+            # Already checked in the main schema
+            compatible: true
+
+            clocks:
+              maxItems: 1
+
+            clock-output-names:
+              minItems: 1
+              maxItems: 32
+
+            phandle: true
+
+          required:
+            - "#clock-cells"
+            - compatible
+            - clocks
+            - clock-output-names
+
+          additionalProperties: false
+
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: allwinner,sun6i-a31-clock-reset
+
+        then:
+          properties:
+            "#reset-cells":
+              const: 1
+
+            # Already checked in the main schema
+            compatible: true
+
+            phandle: true
+
+          required:
+            - "#reset-cells"
+            - compatible
+
+          additionalProperties: false
+
+      - if:
+          properties:
+            compatible:
+              contains:
+                const: allwinner,sun8i-a23-codec-analog
+
+        then:
+          properties:
+            # Already checked in the main schema
+            compatible: true
+
+            phandle: true
+
+          required:
+            - compatible
+
+          additionalProperties: false
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    prcm@1f01400 {
+        compatible = "allwinner,sun8i-a23-prcm";
+        reg = <0x01f01400 0x200>;
+
+        ar100: ar100_clk {
+            compatible = "fixed-factor-clock";
+            #clock-cells = <0>;
+            clock-div = <1>;
+            clock-mult = <1>;
+            clocks = <&osc24M>;
+            clock-output-names = "ar100";
+        };
+
+        ahb0: ahb0_clk {
+            compatible = "fixed-factor-clock";
+            #clock-cells = <0>;
+            clock-div = <1>;
+            clock-mult = <1>;
+            clocks = <&ar100>;
+            clock-output-names = "ahb0";
+        };
+
+        apb0: apb0_clk {
+            compatible = "allwinner,sun8i-a23-apb0-clk";
+            #clock-cells = <0>;
+            clocks = <&ahb0>;
+            clock-output-names = "apb0";
+        };
+
+        apb0_gates: apb0_gates_clk {
+            compatible = "allwinner,sun8i-a23-apb0-gates-clk";
+            #clock-cells = <1>;
+            clocks = <&apb0>;
+            clock-output-names = "apb0_pio", "apb0_timer",
+                                 "apb0_rsb", "apb0_uart",
+                                 "apb0_i2c";
+        };
+
+        apb0_rst: apb0_rst {
+            compatible = "allwinner,sun6i-a31-clock-reset";
+            #reset-cells = <1>;
+        };
+
+        codec_analog: codec-analog {
+            compatible = "allwinner,sun8i-a23-codec-analog";
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/atmel-gpbr.txt b/Documentation/devicetree/bindings/mfd/atmel-gpbr.txt
index a285695..e8c5255 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-gpbr.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-gpbr.txt
@@ -3,7 +3,9 @@
 The GPBR are a set of battery-backed registers.
 
 Required properties:
-- compatible:		"atmel,at91sam9260-gpbr", "syscon"
+- compatible:		Should be one of the following:
+			"atmel,at91sam9260-gpbr", "syscon"
+			"microchip,sam9x60-gpbr", "syscon"
 - reg:			contains offset/length value of the GPBR memory
 			region.
 
diff --git a/Documentation/devicetree/bindings/mfd/atmel-matrix.txt b/Documentation/devicetree/bindings/mfd/atmel-matrix.txt
index e3ef50c..89d05c6 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-matrix.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-matrix.txt
@@ -13,6 +13,7 @@
 			"atmel,at91sam9n12-matrix", "syscon"
 			"atmel,at91sam9x5-matrix", "syscon"
 			"atmel,sama5d3-matrix", "syscon"
+			"microchip,sam9x60-matrix", "syscon"
 - reg:			Contains offset/length value of the Bus Matrix
 			memory region.
 
diff --git a/Documentation/devicetree/bindings/mfd/atmel-smc.txt b/Documentation/devicetree/bindings/mfd/atmel-smc.txt
index 1103ce2..5696d9f 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-smc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-smc.txt
@@ -9,6 +9,7 @@
 			"atmel,at91sam9260-smc", "syscon"
 			"atmel,sama5d3-smc", "syscon"
 			"atmel,sama5d2-smc", "syscon"
+			"microchip,sam9x60-smc", "syscon"
 - reg:			Contains offset/length value of the SMC memory
 			region.
 
diff --git a/Documentation/devicetree/bindings/mfd/atmel-usart.txt b/Documentation/devicetree/bindings/mfd/atmel-usart.txt
index 699fd3c..a091330 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-usart.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-usart.txt
@@ -1,10 +1,13 @@
 * Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART)
 
 Required properties for USART:
-- compatible: Should be "atmel,<chip>-usart" or "atmel,<chip>-dbgu"
-  The compatible <chip> indicated will be the first SoC to support an
-  additional mode or an USART new feature.
-  For the dbgu UART, use "atmel,<chip>-dbgu", "atmel,<chip>-usart"
+- compatible: Should be one of the following:
+	- "atmel,at91rm9200-usart"
+	- "atmel,at91sam9260-usart"
+	- "microchip,sam9x60-usart"
+	- "atmel,at91rm9200-dbgu", "atmel,at91rm9200-usart"
+	- "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"
+	- "microchip,sam9x60-dbgu", "microchip,sam9x60-usart"
 - reg: Should contain registers location and length
 - interrupts: Should contain interrupt
 - clock-names: tuple listing input clock names.
diff --git a/Documentation/devicetree/bindings/mfd/da9062.txt b/Documentation/devicetree/bindings/mfd/da9062.txt
index bc4b59d..857af98 100644
--- a/Documentation/devicetree/bindings/mfd/da9062.txt
+++ b/Documentation/devicetree/bindings/mfd/da9062.txt
@@ -13,6 +13,7 @@
 da9062-onkey            :               : On Key
 da9062-watchdog         :               : Watchdog Timer
 da9062-thermal          :               : Thermal
+da9062-gpio             :               : GPIOs
 
 The DA9061 PMIC consists of:
 
@@ -38,6 +39,15 @@
 See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
 further information on IRQ bindings.
 
+Optional properties:
+
+- gpio-controller : Marks the device as a gpio controller.
+- #gpio-cells     : Should be two. The first cell is the pin number and the
+                    second cell is used to specify the gpio polarity.
+
+See Documentation/devicetree/bindings/gpio/gpio.txt for further information on
+GPIO bindings.
+
 Sub-nodes:
 
 - regulators : This node defines the settings for the LDOs and BUCKs.
diff --git a/Documentation/devicetree/bindings/mfd/max14577.txt b/Documentation/devicetree/bindings/mfd/max14577.txt
index fc6f0f4..92070b3 100644
--- a/Documentation/devicetree/bindings/mfd/max14577.txt
+++ b/Documentation/devicetree/bindings/mfd/max14577.txt
@@ -5,6 +5,8 @@
 interfaced to host controller using I2C.
 
 MAX77836 additionally contains PMIC (with two LDO regulators) and Fuel Gauge.
+For the description of Fuel Gauge low SOC alert interrupt see:
+../power/supply/max17040_battery.txt
 
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml
new file mode 100644
index 0000000..4fbb9e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/rohm,bd71828-pmic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD71828 Power Management Integrated Circuit bindings
+
+maintainers:
+  - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+description: |
+  BD71828GW is a single-chip power management IC for battery-powered portable
+  devices. The IC integrates 7 buck converters, 7 LDOs, and a 1500 mA
+  single-cell linear charger. Also included is a Coulomb counter, a real-time
+  clock (RTC), and a 32.768 kHz clock gate.
+
+properties:
+  compatible:
+    const: rohm,bd71828
+
+  reg:
+    description:
+      I2C slave address.
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  gpio-controller: true
+
+  "#gpio-cells":
+    const: 2
+    description: |
+      The first cell is the pin number and the second cell is used to specify
+      flags. See ../gpio/gpio.txt for more information.
+
+  clocks:
+    maxItems: 1
+
+  "#clock-cells":
+    const: 0
+
+  rohm,charger-sense-resistor-ohms:
+    minimum: 10000000
+    maximum: 50000000
+    description: |
+      BD71827 and BD71828 have SAR ADC for measuring charging currents.
+      External sense resistor (RSENSE in data sheet) should be used. If some
+      other but 30MOhm resistor is used the resistance value should be given
+      here in Ohms.
+
+  regulators:
+    $ref: ../regulator/rohm,bd71828-regulator.yaml
+    description:
+      List of child nodes that specify the regulators.
+
+  leds:
+    $ref: ../leds/rohm,bd71828-leds.yaml
+
+  gpio-reserved-ranges:
+    description: |
+      Usage of BD71828 GPIO pins can be changed via OTP. This property can be
+      used to mark the pins which should not be configured for GPIO. Please see
+      the ../gpio/gpio.txt for more information.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - "#clock-cells"
+  - regulators
+  - gpio-controller
+  - "#gpio-cells"
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/leds/common.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        pmic: pmic@4b {
+            compatible = "rohm,bd71828";
+            reg = <0x4b>;
+
+            interrupt-parent = <&gpio1>;
+            interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
+
+            clocks = <&osc 0>;
+            #clock-cells = <0>;
+            clock-output-names = "bd71828-32k-out";
+
+            gpio-controller;
+            #gpio-cells = <2>;
+            gpio-reserved-ranges = <0 1>, <2 1>;
+
+            rohm,charger-sense-resistor-ohms = <10000000>;
+
+            regulators {
+                buck1: BUCK1 {
+                    regulator-name = "buck1";
+                    regulator-min-microvolt = <500000>;
+                    regulator-max-microvolt = <2000000>;
+                    regulator-ramp-delay = <2500>;
+                };
+                buck2: BUCK2 {
+                    regulator-name = "buck2";
+                    regulator-min-microvolt = <500000>;
+                    regulator-max-microvolt = <2000000>;
+                    regulator-ramp-delay = <2500>;
+                };
+                buck3: BUCK3 {
+                    regulator-name = "buck3";
+                    regulator-min-microvolt = <1200000>;
+                    regulator-max-microvolt = <2000000>;
+                };
+                buck4: BUCK4 {
+                    regulator-name = "buck4";
+                    regulator-min-microvolt = <1000000>;
+                    regulator-max-microvolt = <1800000>;
+                };
+                buck5: BUCK5 {
+                    regulator-name = "buck5";
+                    regulator-min-microvolt = <2500000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+                buck6: BUCK6 {
+                    regulator-name = "buck6";
+                    regulator-min-microvolt = <500000>;
+                    regulator-max-microvolt = <2000000>;
+                    regulator-ramp-delay = <2500>;
+                };
+                buck7: BUCK7 {
+                    regulator-name = "buck7";
+                    regulator-min-microvolt = <500000>;
+                    regulator-max-microvolt = <2000000>;
+                    regulator-ramp-delay = <2500>;
+                };
+                ldo1: LDO1 {
+                    regulator-name = "ldo1";
+                    regulator-min-microvolt = <800000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+                ldo2: LDO2 {
+                    regulator-name = "ldo2";
+                    regulator-min-microvolt = <800000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+                ldo3: LDO3 {
+                    regulator-name = "ldo3";
+                    regulator-min-microvolt = <800000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+                ldo4: LDO4 {
+                    regulator-name = "ldo4";
+                    regulator-min-microvolt = <800000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+                ldo5: LDO5 {
+                    regulator-name = "ldo5";
+                    regulator-min-microvolt = <800000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+                ldo6: LDO6 {
+                    regulator-name = "ldo6";
+                    regulator-min-microvolt = <1800000>;
+                    regulator-max-microvolt = <1800000>;
+                };
+                ldo7_reg: LDO7 {
+                    regulator-name = "ldo7";
+                    regulator-min-microvolt = <800000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+            };
+
+            leds {
+                compatible = "rohm,bd71828-leds";
+
+                led-1 {
+                    rohm,led-compatible = "bd71828-grnled";
+                    function = LED_FUNCTION_INDICATOR;
+                    color = <LED_COLOR_ID_GREEN>;
+                };
+                led-2 {
+                    rohm,led-compatible = "bd71828-ambled";
+                    function = LED_FUNCTION_CHARGING;
+                    color = <LED_COLOR_ID_AMBER>;
+                };
+            };
+        };
+    };
diff --git a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt
deleted file mode 100644
index daa091c..0000000
--- a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-* Allwinner PRCM (Power/Reset/Clock Management) Multi-Functional Device
-
-PRCM is an MFD device exposing several Power Management related devices
-(like clks and reset controllers).
-
-Required properties:
- - compatible: "allwinner,sun6i-a31-prcm" or "allwinner,sun8i-a23-prcm"
- - reg: The PRCM registers range
-
-The prcm node may contain several subdevices definitions:
- - see Documentation/devicetree/bindings/clock/sunxi.txt for clock devices
- - see Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt for reset
-   controller devices
-
-
-Example:
-
-	prcm: prcm@1f01400 {
-		compatible = "allwinner,sun6i-a31-prcm";
-		reg = <0x01f01400 0x200>;
-
-		/* Put subdevices here */
-		ar100: ar100_clk {
-			compatible = "allwinner,sun6i-a31-ar100-clk";
-			#clock-cells = <0>;
-			clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
-		};
-
-		ahb0: ahb0_clk {
-			compatible = "fixed-factor-clock";
-			#clock-cells = <0>;
-			clock-div = <1>;
-			clock-mult = <1>;
-			clocks = <&ar100_div>;
-			clock-output-names = "ahb0";
-		};
-
-		apb0: apb0_clk {
-			compatible = "allwinner,sun6i-a31-apb0-clk";
-			#clock-cells = <0>;
-			clocks = <&ahb0>;
-			clock-output-names = "apb0";
-		};
-
-		apb0_gates: apb0_gates_clk {
-			compatible = "allwinner,sun6i-a31-apb0-gates-clk";
-			#clock-cells = <1>;
-			clocks = <&apb0>;
-			clock-output-names = "apb0_pio", "apb0_ir",
-					"apb0_timer01", "apb0_p2wi",
-					"apb0_uart", "apb0_1wire",
-					"apb0_i2c";
-		};
-
-		apb0_rst: apb0_rst {
-			compatible = "allwinner,sun6i-a31-clock-reset";
-			#reset-cells = <1>;
-		};
-	};
diff --git a/Documentation/devicetree/bindings/mfd/tps6105x.txt b/Documentation/devicetree/bindings/mfd/tps6105x.txt
index 93602c7..dc448a9 100644
--- a/Documentation/devicetree/bindings/mfd/tps6105x.txt
+++ b/Documentation/devicetree/bindings/mfd/tps6105x.txt
@@ -7,7 +7,22 @@
 - compatible:		"ti,tps61050" or "ti,tps61052"
 - reg:			Specifies the I2C slave address
 
-Example:
+Optional sub-node:
+
+This subnode selects the chip's operational mode.
+There can be at most one single available subnode.
+
+- regulator: presence of this sub-node puts the chip in regulator mode.
+	see ../regulator/regulator.yaml
+
+- led: presence of this sub-node puts the chip in led mode.
+	Optional properties:
+	- function : see ../leds/common.txt
+	- color    : see ../leds/common.txt
+	- label    : see ../leds/common.txt
+			(deprecated)
+
+Example (GPIO operation only):
 
 i2c0 {
 	tps61052@33 {
@@ -15,3 +30,33 @@
 		reg = <0x33>;
 	};
 };
+
+Example (GPIO + regulator operation):
+
+i2c0 {
+	tps61052@33 {
+		compatible = "ti,tps61052";
+		reg = <0x33>;
+
+		regulator {
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			regulator-always-on;
+		};
+	};
+};
+
+Example (GPIO + led operation):
+
+#include <dt-bindings/leds/common.h>
+
+i2c0 {
+	tps61052@33 {
+		compatible = "ti,tps61052";
+		reg = <0x33>;
+
+		led {
+			color = <LED_COLOR_ID_WHITE>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/mfd/xylon,logicvc.yaml b/Documentation/devicetree/bindings/mfd/xylon,logicvc.yaml
new file mode 100644
index 0000000..abc9937
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/xylon,logicvc.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Bootlin
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mfd/xylon,logicvc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Xylon LogiCVC multi-function device
+
+maintainers:
+  - Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+
+description: |
+  The LogiCVC is a display controller that also contains a GPIO controller.
+  As a result, a multi-function device is exposed as parent of the display
+  and GPIO blocks.
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - xylon,logicvc-3.02.a
+      - const: syscon
+      - const: simple-mfd
+
+  reg:
+    maxItems: 1
+
+select:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - xylon,logicvc-3.02.a
+
+  required:
+    - compatible
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    logicvc: logicvc@43c00000 {
+      compatible = "xylon,logicvc-3.02.a", "syscon", "simple-mfd";
+      reg = <0x43c00000 0x6000>;
+      #address-cells = <1>;
+      #size-cells = <1>;
+    };
diff --git a/Documentation/devicetree/bindings/mips/ingenic/devices.yaml b/Documentation/devicetree/bindings/mips/ingenic/devices.yaml
new file mode 100644
index 0000000..78dcf6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/ingenic/devices.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mips/ingenic/devices.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic XBurst based Platforms Device Tree Bindings
+
+maintainers:
+  - 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+description: |
+  Devices with a Ingenic XBurst CPU shall have the following properties.
+
+properties:
+  $nodename:
+    const: '/'
+  compatible:
+    oneOf:
+
+      - description: Qi Hardware Ben NanoNote
+        items:
+          - const: qi,lb60
+
+      - description: Game Consoles Worldwide GCW Zero
+        items:
+          - const: gcw,zero
+
+      - description: MIPS Creator CI20
+        items:
+          - const: img,ci20
+
+      - description: YSH & ATIL General Board CU Neo
+        items:
+          - const: yna,cu1000-neo
+...
diff --git a/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt b/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt
index 0e1fa5b..f2e2e28 100644
--- a/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt
+++ b/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt
@@ -18,6 +18,7 @@
 Optional properties:
 ===================
 
+- reg: A hint for the memory regions associated with the P2A controller
 - memory-region: A phandle to a reserved_memory region to be used for the PCI
 		to AHB mapping
 
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index c93643f..0f97d71 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -22,6 +22,7 @@
 	       "fsl,imx8mm-usdhc"
 	       "fsl,imx8mn-usdhc"
 	       "fsl,imx8mp-usdhc"
+	       "fsl,imx8qm-usdhc"
 	       "fsl,imx8qxp-usdhc"
 
 Optional properties:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index b130450..3c0df40 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -96,11 +96,10 @@
     description:
       When set, no physical write-protect line is present. This
       property should only be specified when the controller has a
-      dedicated write-protect detection logic. If a GPIO is always
-      used for the write-protect detection. If a GPIO is always used
+      dedicated write-protect detection logic. If a GPIO is always used
       for the write-protect detection logic, it is sufficient to not
       specify the wp-gpios property in the absence of a write-protect
-      line.
+      line. Not used in combination with eMMC or SDIO.
 
   wp-gpios:
     description:
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 68b51dc..3aa297c 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -57,6 +57,7 @@
 	"atmel,at91sam9g45-pmecc"
 	"atmel,sama5d4-pmecc"
 	"atmel,sama5d2-pmecc"
+	"microchip,sam9x60-pmecc"
 - reg: should contain 2 register ranges. The first one is pointing to the PMECC
        block, and the second one to the PMECC_ERRLOC block.
 
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
index b32aed1..98916a8 100644
--- a/Documentation/devicetree/bindings/mtd/denali-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt
@@ -14,6 +14,11 @@
     interface clock, and the ECC circuit clock.
   - clock-names: should contain "nand", "nand_x", "ecc"
 
+Optional properties:
+  - resets: may contain phandles to the controller core reset, the register
+    reset
+  - reset-names: may contain "nand", "reg"
+
 Sub-nodes:
   Sub-nodes represent available NAND chips.
 
@@ -46,6 +51,8 @@
 	reg-names = "nand_data", "denali_reg";
 	clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>;
 	clock-names = "nand", "nand_x", "ecc";
+	resets = <&nand_rst>, <&nand_reg_rst>;
+	reset-names = "nand", "reg";
 	interrupts = <0 144 4>;
 
 	nand@0 {
diff --git a/Documentation/devicetree/bindings/net/can/atmel-can.txt b/Documentation/devicetree/bindings/net/can/atmel-can.txt
index 14e52a0..218a3b3 100644
--- a/Documentation/devicetree/bindings/net/can/atmel-can.txt
+++ b/Documentation/devicetree/bindings/net/can/atmel-can.txt
@@ -1,7 +1,8 @@
 * AT91 CAN *
 
 Required properties:
-  - compatible: Should be "atmel,at91sam9263-can" or "atmel,at91sam9x5-can"
+  - compatible: Should be "atmel,at91sam9263-can", "atmel,at91sam9x5-can" or
+    "microchip,sam9x60-can"
   - reg: Should contain CAN controller registers location and length
   - interrupts: Should contain IRQ line for the CAN controller
 
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index 19e4a7d..85c6551 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -7,6 +7,7 @@
 	      "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
 	      "renesas,can-r8a77470" if CAN controller is a part of R8A77470 SoC.
 	      "renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
+	      "renesas,can-r8a774b1" if CAN controller is a part of R8A774B1 SoC.
 	      "renesas,can-r8a774c0" if CAN controller is a part of R8A774C0 SoC.
 	      "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
 	      "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
@@ -36,8 +37,8 @@
 - pinctrl-0: pin control group to be used for this controller.
 - pinctrl-names: must be "default".
 
-Required properties for R8A774A1, R8A774C0, R8A7795, R8A7796, R8A77965,
-R8A77990, and R8A77995:
+Required properties for R8A774A1, R8A774B1, R8A774C0, R8A7795, R8A7796,
+R8A77965, R8A77990, and R8A77995:
 For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
 be used by both CAN and CAN FD controller at the same time. It needs to be
 scaled to maximum frequency if any of these controllers use it. This is done
diff --git a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
index a901cd9..13a4e34 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
@@ -5,6 +5,7 @@
 - compatible: Must contain one or more of the following:
   - "renesas,rcar-gen3-canfd" for R-Car Gen3 and RZ/G2 compatible controllers.
   - "renesas,r8a774a1-canfd" for R8A774A1 (RZ/G2M) compatible controller.
+  - "renesas,r8a774b1-canfd" for R8A774B1 (RZ/G2N) compatible controller.
   - "renesas,r8a774c0-canfd" for R8A774C0 (RZ/G2E) compatible controller.
   - "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller.
   - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3-W) compatible controller.
@@ -31,8 +32,8 @@
 child node supports the "status" property only, which is used to
 enable/disable the respective channel.
 
-Required properties for R8A774A1, R8A774C0, R8A7795, R8A7796, R8A77965,
-R8A77990, and R8A77995:
+Required properties for R8A774A1, R8A774B1, R8A774C0, R8A7795, R8A7796,
+R8A77965, R8A77990, and R8A77995:
 In the denoted SoCs, canfd clock is a div6 clock and can be used by both CAN
 and CAN FD controller at the same time. It needs to be scaled to maximum
 frequency if any of these controllers use it. This is done using the below
diff --git a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
index 68b67d9..999acea 100644
--- a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
@@ -11,6 +11,7 @@
  - compatible: should contain one of the following:
    * "qcom,qca6174-bt"
    * "qcom,wcn3990-bt"
+   * "qcom,wcn3991-bt"
    * "qcom,wcn3998-bt"
 
 Optional properties for compatible string qcom,qca6174-bt:
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 5df4aa7..87dad2d 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -21,7 +21,8 @@
       - "renesas,etheravb-r8a774b1" for the R8A774B1 SoC.
       - "renesas,etheravb-r8a774c0" for the R8A774C0 SoC.
       - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
-      - "renesas,etheravb-r8a7796" for the R8A7796 SoC.
+      - "renesas,etheravb-r8a7796" for the R8A77960 SoC.
+      - "renesas,etheravb-r8a77961" for the R8A77961 SoC.
       - "renesas,etheravb-r8a77965" for the R8A77965 SoC.
       - "renesas,etheravb-r8a77970" for the R8A77970 SoC.
       - "renesas,etheravb-r8a77980" for the R8A77980 SoC.
@@ -37,8 +38,8 @@
 - reg: Offset and length of (1) the register block and (2) the stream buffer.
        The region for the register block is mandatory.
        The region for the stream buffer is optional, as it is only present on
-       R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A7796),
-       and M3-N (R8A77965).
+       R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A77960),
+       M3-W+ (R8A77961), and M3-N (R8A77965).
 - interrupts: A list of interrupt-specifiers, one for each entry in
 	      interrupt-names.
 	      If interrupt-names is not present, an interrupt specifier
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 904dadf..6e346d5 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -2,7 +2,7 @@
 
 This binding represents the on-chip eFuse OTP controller found on
 i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
-i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM and i.MX8MN SoCs.
+i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN and i.MX8MP SoCs.
 
 Required properties:
 - compatible: should be one of
@@ -17,6 +17,7 @@
 	"fsl,imx8mq-ocotp" (i.MX8MQ),
 	"fsl,imx8mm-ocotp" (i.MX8MM),
 	"fsl,imx8mn-ocotp" (i.MX8MN),
+	"fsl,imx8mp-ocotp" (i.MX8MP),
 	followed by "syscon".
 - #address-cells : Should be 1
 - #size-cells : Should be 1
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
index 1c75a05..b43c6c6 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -34,6 +34,14 @@
     description:
       Mark the provider as read only.
 
+  wp-gpios:
+    description:
+      GPIO to which the write-protect pin of the chip is connected.
+      The write-protect GPIO is asserted, when it's driven high
+      (logical '1') to block the write operation. It's deasserted,
+      when it's driven low (logical '0') to allow writing.
+    maxItems: 1
+
 patternProperties:
   "^.*@[0-9a-f]+$":
     type: object
@@ -63,9 +71,12 @@
 
 examples:
   - |
+      #include <dt-bindings/gpio/gpio.h>
+
       qfprom: eeprom@700000 {
           #address-cells = <1>;
           #size-cells = <1>;
+          wp-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
 
           /* ... */
 
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml b/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml
new file mode 100644
index 0000000..7bbd4e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/qcom,spmi-sdam.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SPMI SDAM DT bindings
+
+maintainers:
+  - Shyam Kumar Thella <sthella@codeaurora.org>
+
+description: |
+  The SDAM provides scratch register space for the PMIC clients. This
+  memory can be used by software to store information or communicate
+  to/from the PBUS.
+
+allOf:
+  - $ref: "nvmem.yaml#"
+
+properties:
+  compatible:
+    enum:
+      - qcom,spmi-sdam
+
+  reg:
+    maxItems: 1
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 1
+
+  ranges: true
+
+required:
+  - compatible
+  - reg
+  - ranges
+
+patternProperties:
+  "^.*@[0-9a-f]+$":
+    type: object
+
+    properties:
+      reg:
+        maxItems: 1
+        description:
+          Offset and size in bytes within the storage device.
+
+      bits:
+        $ref: /schemas/types.yaml#/definitions/uint32-array
+        maxItems: 1
+        items:
+          items:
+            - minimum: 0
+              maximum: 7
+              description:
+                Offset in bit within the address range specified by reg.
+            - minimum: 1
+              description:
+                Size in bit within the address range specified by reg.
+
+    required:
+      - reg
+
+    additionalProperties: false
+
+examples:
+  - |
+      sdam_1: nvram@b000 {
+          #address-cells = <1>;
+          #size-cells = <1>;
+          compatible = "qcom,spmi-sdam";
+          reg = <0xb000 0x100>;
+          ranges = <0 0xb000 0x100>;
+
+          /* Data cells */
+          restart_reason: restart@50 {
+              reg = <0x50 0x1>;
+              bits = <6 2>;
+          };
+      };
+...
diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt
deleted file mode 100644
index 142a51d..0000000
--- a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-STMicroelectronics STM32 Factory-programmed data device tree bindings
-
-This represents STM32 Factory-programmed read only non-volatile area: locked
-flash, OTP, read-only HW regs... This contains various information such as:
-analog calibration data for temperature sensor (e.g. TS_CAL1, TS_CAL2),
-internal vref (VREFIN_CAL), unique device ID...
-
-Required properties:
-- compatible:		Should be one of:
-			"st,stm32f4-otp"
-			"st,stm32mp15-bsec"
-- reg:			Offset and length of factory-programmed area.
-- #address-cells:	Should be '<1>'.
-- #size-cells:		Should be '<1>'.
-
-Optional Data cells:
-- Must be child nodes as described in nvmem.txt.
-
-Example on stm32f4:
-	romem: nvmem@1fff7800 {
-		compatible = "st,stm32f4-otp";
-		reg = <0x1fff7800 0x400>;
-		#address-cells = <1>;
-		#size-cells = <1>;
-
-		/* Data cells: ts_cal1 at 0x1fff7a2c */
-		ts_cal1: calib@22c {
-			reg = <0x22c 0x2>;
-		};
-		...
-	};
diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
new file mode 100644
index 0000000..d84deb4
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/st,stm32-romem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Factory-programmed data bindings
+
+description: |
+  This represents STM32 Factory-programmed read only non-volatile area: locked
+  flash, OTP, read-only HW regs... This contains various information such as:
+  analog calibration data for temperature sensor (e.g. TS_CAL1, TS_CAL2),
+  internal vref (VREFIN_CAL), unique device ID...
+
+maintainers:
+  - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+allOf:
+  - $ref: "nvmem.yaml#"
+
+properties:
+  compatible:
+    enum:
+      - st,stm32f4-otp
+      - st,stm32mp15-bsec
+
+required:
+  - "#address-cells"
+  - "#size-cells"
+  - compatible
+  - reg
+
+examples:
+  - |
+    efuse@1fff7800 {
+      compatible = "st,stm32f4-otp";
+      reg = <0x1fff7800 0x400>;
+      #address-cells = <1>;
+      #size-cells = <1>;
+
+      calib@22c {
+        reg = <0x22c 0x2>;
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml b/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
new file mode 100644
index 0000000..aef87a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/opp/allwinner,sun50i-h6-operating-points.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner H6 CPU OPP Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description: |
+  For some SoCs, the CPU frequency subset and voltage value of each
+  OPP varies based on the silicon variant in use. Allwinner Process
+  Voltage Scaling Tables defines the voltage and frequency value based
+  on the speedbin blown in the efuse combination. The
+  sun50i-cpufreq-nvmem driver reads the efuse value from the SoC to
+  provide the OPP framework with required information.
+
+properties:
+  compatible:
+    const: allwinner,sun50i-h6-operating-points
+
+  nvmem-cells:
+    description: |
+      A phandle pointing to a nvmem-cells node representing the efuse
+      registers that has information about the speedbin that is used
+      to select the right frequency/voltage value pair. Please refer
+      the for nvmem-cells bindings
+      Documentation/devicetree/bindings/nvmem/nvmem.txt and also
+      examples below.
+
+required:
+  - compatible
+  - nvmem-cells
+
+patternProperties:
+  "opp-[0-9]+":
+    type: object
+
+    properties:
+      opp-hz: true
+
+    patternProperties:
+      "opp-microvolt-.*": true
+
+    required:
+      - opp-hz
+      - opp-microvolt-speed0
+      - opp-microvolt-speed1
+      - opp-microvolt-speed2
+
+    unevaluatedProperties: false
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    cpu_opp_table: opp-table {
+        compatible = "allwinner,sun50i-h6-operating-points";
+        nvmem-cells = <&speedbin_efuse>;
+        opp-shared;
+
+        opp-480000000 {
+            clock-latency-ns = <244144>; /* 8 32k periods */
+            opp-hz = /bits/ 64 <480000000>;
+
+            opp-microvolt-speed0 = <880000>;
+            opp-microvolt-speed1 = <820000>;
+            opp-microvolt-speed2 = <800000>;
+        };
+
+        opp-720000000 {
+            clock-latency-ns = <244144>; /* 8 32k periods */
+            opp-hz = /bits/ 64 <720000000>;
+
+            opp-microvolt-speed0 = <880000>;
+            opp-microvolt-speed1 = <820000>;
+            opp-microvolt-speed2 = <800000>;
+        };
+
+        opp-816000000 {
+            clock-latency-ns = <244144>; /* 8 32k periods */
+            opp-hz = /bits/ 64 <816000000>;
+
+            opp-microvolt-speed0 = <880000>;
+            opp-microvolt-speed1 = <820000>;
+            opp-microvolt-speed2 = <800000>;
+        };
+
+        opp-888000000 {
+            clock-latency-ns = <244144>; /* 8 32k periods */
+            opp-hz = /bits/ 64 <888000000>;
+
+            opp-microvolt-speed0 = <940000>;
+            opp-microvolt-speed1 = <820000>;
+            opp-microvolt-speed2 = <800000>;
+        };
+
+        opp-1080000000 {
+            clock-latency-ns = <244144>; /* 8 32k periods */
+            opp-hz = /bits/ 64 <1080000000>;
+
+            opp-microvolt-speed0 = <1060000>;
+            opp-microvolt-speed1 = <880000>;
+            opp-microvolt-speed2 = <840000>;
+        };
+
+        opp-1320000000 {
+            clock-latency-ns = <244144>; /* 8 32k periods */
+            opp-hz = /bits/ 64 <1320000000>;
+
+            opp-microvolt-speed0 = <1160000>;
+            opp-microvolt-speed1 = <940000>;
+            opp-microvolt-speed2 = <900000>;
+        };
+
+        opp-1488000000 {
+            clock-latency-ns = <244144>; /* 8 32k periods */
+            opp-hz = /bits/ 64 <1488000000>;
+
+            opp-microvolt-speed0 = <1160000>;
+            opp-microvolt-speed1 = <1000000>;
+            opp-microvolt-speed2 = <960000>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt b/Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
deleted file mode 100644
index 7deae57..0000000
--- a/Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
+++ /dev/null
@@ -1,167 +0,0 @@
-Allwinner Technologies, Inc. NVMEM CPUFreq and OPP bindings
-===================================
-
-For some SoCs, the CPU frequency subset and voltage value of each OPP
-varies based on the silicon variant in use. Allwinner Process Voltage
-Scaling Tables defines the voltage and frequency value based on the
-speedbin blown in the efuse combination. The sun50i-cpufreq-nvmem driver
-reads the efuse value from the SoC to provide the OPP framework with
-required information.
-
-Required properties:
---------------------
-In 'cpus' nodes:
-- operating-points-v2: Phandle to the operating-points-v2 table to use.
-
-In 'operating-points-v2' table:
-- compatible: Should be
-	- 'allwinner,sun50i-h6-operating-points'.
-- nvmem-cells: A phandle pointing to a nvmem-cells node representing the
-		efuse registers that has information about the speedbin
-		that is used to select the right frequency/voltage value
-		pair. Please refer the for nvmem-cells bindings
-		Documentation/devicetree/bindings/nvmem/nvmem.txt and
-		also examples below.
-
-In every OPP node:
-- opp-microvolt-<name>: Voltage in micro Volts.
-			At runtime, the platform can pick a <name> and
-			matching opp-microvolt-<name> property.
-			[See: opp.txt]
-			HW:		<name>:
-			sun50i-h6	speed0 speed1 speed2
-
-Example 1:
----------
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: cpu@0 {
-			compatible = "arm,cortex-a53";
-			device_type = "cpu";
-			reg = <0>;
-			enable-method = "psci";
-			clocks = <&ccu CLK_CPUX>;
-			clock-latency-ns = <244144>; /* 8 32k periods */
-			operating-points-v2 = <&cpu_opp_table>;
-			#cooling-cells = <2>;
-		};
-
-		cpu1: cpu@1 {
-			compatible = "arm,cortex-a53";
-			device_type = "cpu";
-			reg = <1>;
-			enable-method = "psci";
-			clocks = <&ccu CLK_CPUX>;
-			clock-latency-ns = <244144>; /* 8 32k periods */
-			operating-points-v2 = <&cpu_opp_table>;
-			#cooling-cells = <2>;
-		};
-
-		cpu2: cpu@2 {
-			compatible = "arm,cortex-a53";
-			device_type = "cpu";
-			reg = <2>;
-			enable-method = "psci";
-			clocks = <&ccu CLK_CPUX>;
-			clock-latency-ns = <244144>; /* 8 32k periods */
-			operating-points-v2 = <&cpu_opp_table>;
-			#cooling-cells = <2>;
-		};
-
-		cpu3: cpu@3 {
-			compatible = "arm,cortex-a53";
-			device_type = "cpu";
-			reg = <3>;
-			enable-method = "psci";
-			clocks = <&ccu CLK_CPUX>;
-			clock-latency-ns = <244144>; /* 8 32k periods */
-			operating-points-v2 = <&cpu_opp_table>;
-			#cooling-cells = <2>;
-		};
-        };
-
-        cpu_opp_table: opp_table {
-                compatible = "allwinner,sun50i-h6-operating-points";
-                nvmem-cells = <&speedbin_efuse>;
-                opp-shared;
-
-                opp@480000000 {
-                        clock-latency-ns = <244144>; /* 8 32k periods */
-                        opp-hz = /bits/ 64 <480000000>;
-
-                        opp-microvolt-speed0 = <880000>;
-                        opp-microvolt-speed1 = <820000>;
-                        opp-microvolt-speed2 = <800000>;
-                };
-
-                opp@720000000 {
-                        clock-latency-ns = <244144>; /* 8 32k periods */
-                        opp-hz = /bits/ 64 <720000000>;
-
-                        opp-microvolt-speed0 = <880000>;
-                        opp-microvolt-speed1 = <820000>;
-                        opp-microvolt-speed2 = <800000>;
-                };
-
-                opp@816000000 {
-                        clock-latency-ns = <244144>; /* 8 32k periods */
-                        opp-hz = /bits/ 64 <816000000>;
-
-                        opp-microvolt-speed0 = <880000>;
-                        opp-microvolt-speed1 = <820000>;
-                        opp-microvolt-speed2 = <800000>;
-                };
-
-                opp@888000000 {
-                        clock-latency-ns = <244144>; /* 8 32k periods */
-                        opp-hz = /bits/ 64 <888000000>;
-
-                        opp-microvolt-speed0 = <940000>;
-                        opp-microvolt-speed1 = <820000>;
-                        opp-microvolt-speed2 = <800000>;
-                };
-
-                opp@1080000000 {
-                        clock-latency-ns = <244144>; /* 8 32k periods */
-                        opp-hz = /bits/ 64 <1080000000>;
-
-                        opp-microvolt-speed0 = <1060000>;
-                        opp-microvolt-speed1 = <880000>;
-                        opp-microvolt-speed2 = <840000>;
-                };
-
-                opp@1320000000 {
-                        clock-latency-ns = <244144>; /* 8 32k periods */
-                        opp-hz = /bits/ 64 <1320000000>;
-
-                        opp-microvolt-speed0 = <1160000>;
-                        opp-microvolt-speed1 = <940000>;
-                        opp-microvolt-speed2 = <900000>;
-                };
-
-                opp@1488000000 {
-                        clock-latency-ns = <244144>; /* 8 32k periods */
-                        opp-hz = /bits/ 64 <1488000000>;
-
-                        opp-microvolt-speed0 = <1160000>;
-                        opp-microvolt-speed1 = <1000000>;
-                        opp-microvolt-speed2 = <960000>;
-                };
-        };
-....
-soc {
-....
-	sid: sid@3006000 {
-		compatible = "allwinner,sun50i-h6-sid";
-		reg = <0x03006000 0x400>;
-		#address-cells = <1>;
-		#size-cells = <1>;
-		....
-		speedbin_efuse: speed@1c {
-			reg = <0x1c 4>;
-		};
-        };
-};
diff --git a/Documentation/devicetree/bindings/pci/arm,juno-r1-pcie.txt b/Documentation/devicetree/bindings/pci/arm,juno-r1-pcie.txt
deleted file mode 100644
index f7514c1..0000000
--- a/Documentation/devicetree/bindings/pci/arm,juno-r1-pcie.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-* ARM Juno R1 PCIe interface
-
-This PCIe host controller is based on PLDA XpressRICH3-AXI IP
-and thus inherits all the common properties defined in plda,xpressrich3-axi.txt
-as well as the base properties defined in host-generic-pci.txt.
-
-Required properties:
- - compatible: "arm,juno-r1-pcie"
- - dma-coherent: The host controller bridges the AXI transactions into PCIe bus
-   in a manner that makes the DMA operations to appear coherent to the CPUs.
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
new file mode 100644
index 0000000..77d3e81
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/brcm,stb-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Brcmstb PCIe Host Controller Device Tree Bindings
+
+maintainers:
+  - Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+
+allOf:
+  - $ref: /schemas/pci/pci-bus.yaml#
+
+properties:
+  compatible:
+    const: brcm,bcm2711-pcie # The Raspberry Pi 4
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: PCIe host controller
+      - description: builtin MSI controller
+
+  interrupt-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      - const: pcie
+      - const: msi
+
+  ranges:
+    maxItems: 1
+
+  dma-ranges:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: sw_pcie
+
+  msi-controller:
+    description: Identifies the node as an MSI controller.
+
+  msi-parent:
+    description: MSI controller the device is capable of using.
+
+  brcm,enable-ssc:
+    description: Indicates usage of spread-spectrum clocking.
+    type: boolean
+
+required:
+  - reg
+  - dma-ranges
+  - "#interrupt-cells"
+  - interrupts
+  - interrupt-names
+  - interrupt-map-mask
+  - interrupt-map
+  - msi-controller
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    scb {
+            #address-cells = <2>;
+            #size-cells = <1>;
+            pcie0: pcie@7d500000 {
+                    compatible = "brcm,bcm2711-pcie";
+                    reg = <0x0 0x7d500000 0x9310>;
+                    device_type = "pci";
+                    #address-cells = <3>;
+                    #size-cells = <2>;
+                    #interrupt-cells = <1>;
+                    interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+                                 <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+                    interrupt-names = "pcie", "msi";
+                    interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+                    interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+                    msi-parent = <&pcie0>;
+                    msi-controller;
+                    ranges = <0x02000000 0x0 0xf8000000 0x6 0x00000000 0x0 0x04000000>;
+                    dma-ranges = <0x02000000 0x0 0x00000000 0x0 0x00000000 0x0 0x80000000>;
+                    brcm,enable-ssc;
+            };
+    };
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie-ecam.txt b/Documentation/devicetree/bindings/pci/designware-pcie-ecam.txt
deleted file mode 100644
index 515b2f9..0000000
--- a/Documentation/devicetree/bindings/pci/designware-pcie-ecam.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-* Synopsys DesignWare PCIe root complex in ECAM shift mode
-
-In some cases, firmware may already have configured the Synopsys DesignWare
-PCIe controller in RC mode with static ATU window mappings that cover all
-config, MMIO and I/O spaces in a [mostly] ECAM compatible fashion.
-In this case, there is no need for the OS to perform any low level setup
-of clocks, PHYs or device registers, nor is there any reason for the driver
-to reconfigure ATU windows for config and/or IO space accesses at runtime.
-
-In cases where the IP was synthesized with a minimum ATU window size of
-64 KB, it cannot be supported by the generic ECAM driver, because it
-requires special config space accessors that filter accesses to device #1
-and beyond on the first bus.
-
-Required properties:
-- compatible: "marvell,armada8k-pcie-ecam" or
-              "socionext,synquacer-pcie-ecam" or
-              "snps,dw-pcie-ecam" (must be preceded by a more specific match)
-
-Please refer to the binding document of "pci-host-ecam-generic" in the
-file host-generic-pci.txt for a description of the remaining required
-and optional properties.
-
-Example:
-
-    pcie1: pcie@7f000000 {
-        compatible = "socionext,synquacer-pcie-ecam", "snps,dw-pcie-ecam";
-        device_type = "pci";
-        reg = <0x0 0x7f000000 0x0 0xf00000>;
-        bus-range = <0x0 0xe>;
-        #address-cells = <3>;
-        #size-cells = <2>;
-        ranges = <0x1000000 0x00 0x00010000 0x00 0x7ff00000 0x0 0x00010000>,
-                 <0x2000000 0x00 0x70000000 0x00 0x70000000 0x0 0x0f000000>,
-                 <0x3000000 0x3f 0x00000000 0x3f 0x00000000 0x1 0x00000000>;
-
-        #interrupt-cells = <0x1>;
-        interrupt-map-mask = <0x0 0x0 0x0 0x0>;
-        interrupt-map = <0x0 0x0 0x0 0x0 &gic 0x0 0x0 0x0 182 0x4>;
-        msi-map = <0x0 &its 0x0 0x10000>;
-        dma-coherent;
-    };
diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
index 0dcb87d..d6796ef 100644
--- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
@@ -41,45 +41,3 @@
 				 0x0 0 0 3 &mbigen_pcie 3 12
 				 0x0 0 0 4 &mbigen_pcie 4 13>;
 	};
-
-HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
-
-Some BIOSes place the host controller in a mode where it is ECAM
-compliant for all devices other than the root complex. In such cases,
-the host controller should be described as below.
-
-The properties and their meanings are identical to those described in
-host-generic-pci.txt except as listed below.
-
-Properties of the host controller node that differ from
-host-generic-pci.txt:
-
-- compatible     : Must be "hisilicon,hip06-pcie-ecam", or
-		   "hisilicon,hip07-pcie-ecam"
-
-- reg            : Two entries: First the ECAM configuration space for any
-		   other bus underneath the root bus. Second, the base
-		   and size of the HiSilicon host bridge registers include
-		   the RC's own config space.
-
-Example:
-	pcie0: pcie@a0090000 {
-		compatible = "hisilicon,hip06-pcie-ecam";
-		reg = <0 0xb0000000 0 0x2000000>,  /*  ECAM configuration space */
-		      <0 0xa0090000 0 0x10000>; /* host bridge registers */
-		bus-range = <0  31>;
-		msi-map = <0x0000 &its_dsa 0x0000 0x2000>;
-		msi-map-mask = <0xffff>;
-		#address-cells = <3>;
-		#size-cells = <2>;
-		device_type = "pci";
-		dma-coherent;
-		ranges = <0x02000000 0 0xb2000000 0x0 0xb2000000 0 0x5ff0000
-			  0x01000000 0 0 0 0xb7ff0000 0 0x10000>;
-		#interrupt-cells = <1>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <0x0 0 0 1 &mbigen_pcie0 650 4
-				 0x0 0 0 2 &mbigen_pcie0 650 4
-				 0x0 0 0 3 &mbigen_pcie0 650 4
-				 0x0 0 0 4 &mbigen_pcie0 650 4>;
-	};
diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.txt b/Documentation/devicetree/bindings/pci/host-generic-pci.txt
deleted file mode 100644
index 614b594..0000000
--- a/Documentation/devicetree/bindings/pci/host-generic-pci.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-* Generic PCI host controller
-
-Firmware-initialised PCI host controllers and PCI emulations, such as the
-virtio-pci implementations found in kvmtool and other para-virtualised
-systems, do not require driver support for complexities such as regulator
-and clock management. In fact, the controller may not even require the
-configuration of a control interface by the operating system, instead
-presenting a set of fixed windows describing a subset of IO, Memory and
-Configuration Spaces.
-
-Such a controller can be described purely in terms of the standardized device
-tree bindings communicated in pci.txt:
-
-
-Properties of the host controller node:
-
-- compatible     : Must be "pci-host-cam-generic" or "pci-host-ecam-generic"
-                   depending on the layout of configuration space (CAM vs
-                   ECAM respectively).
-
-- device_type    : Must be "pci".
-
-- ranges         : As described in IEEE Std 1275-1994, but must provide
-                   at least a definition of non-prefetchable memory. One
-                   or both of prefetchable Memory and IO Space may also
-                   be provided.
-
-- bus-range      : Optional property (also described in IEEE Std 1275-1994)
-                   to indicate the range of bus numbers for this controller.
-                   If absent, defaults to <0 255> (i.e. all buses).
-
-- #address-cells : Must be 3.
-
-- #size-cells    : Must be 2.
-
-- reg            : The Configuration Space base address and size, as accessed
-                   from the parent bus.  The base address corresponds to
-                   the first bus in the "bus-range" property.  If no
-                   "bus-range" is specified, this will be bus 0 (the default).
-
-Properties of the /chosen node:
-
-- linux,pci-probe-only
-                 : Optional property which takes a single-cell argument.
-                   If '0', then Linux will assign devices in its usual manner,
-                   otherwise it will not try to assign devices and instead use
-                   them as they are configured already.
-
-Configuration Space is assumed to be memory-mapped (as opposed to being
-accessed via an ioport) and laid out with a direct correspondence to the
-geography of a PCI bus address by concatenating the various components to
-form an offset.
-
-For CAM, this 24-bit offset is:
-
-        cfg_offset(bus, device, function, register) =
-                   bus << 16 | device << 11 | function << 8 | register
-
-While ECAM extends this by 4 bits to accommodate 4k of function space:
-
-        cfg_offset(bus, device, function, register) =
-                   bus << 20 | device << 15 | function << 12 | register
-
-Interrupt mapping is exactly as described in `Open Firmware Recommended
-Practice: Interrupt Mapping' and requires the following properties:
-
-- #interrupt-cells   : Must be 1
-
-- interrupt-map      : <see aforementioned specification>
-
-- interrupt-map-mask : <see aforementioned specification>
-
-
-Example:
-
-pci {
-    compatible = "pci-host-cam-generic"
-    device_type = "pci";
-    #address-cells = <3>;
-    #size-cells = <2>;
-    bus-range = <0x0 0x1>;
-
-    // CPU_PHYSICAL(2)  SIZE(2)
-    reg = <0x0 0x40000000  0x0 0x1000000>;
-
-    // BUS_ADDRESS(3)  CPU_PHYSICAL(2)  SIZE(2)
-    ranges = <0x01000000 0x0 0x01000000  0x0 0x01000000  0x0 0x00010000>,
-             <0x02000000 0x0 0x41000000  0x0 0x41000000  0x0 0x3f000000>;
-
-
-    #interrupt-cells = <0x1>;
-
-    // PCI_DEVICE(3)  INT#(1)  CONTROLLER(PHANDLE)  CONTROLLER_DATA(3)
-    interrupt-map = <  0x0 0x0 0x0  0x1  &gic  0x0 0x4 0x1
-                     0x800 0x0 0x0  0x1  &gic  0x0 0x5 0x1
-                    0x1000 0x0 0x0  0x1  &gic  0x0 0x6 0x1
-                    0x1800 0x0 0x0  0x1  &gic  0x0 0x7 0x1>;
-
-    // PCI_DEVICE(3)  INT#(1)
-    interrupt-map-mask = <0xf800 0x0 0x0  0x7>;
-}
diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
new file mode 100644
index 0000000..47353d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
@@ -0,0 +1,172 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/host-generic-pci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic PCI host controller
+
+maintainers:
+  - Will Deacon <will@kernel.org>
+
+description: |
+  Firmware-initialised PCI host controllers and PCI emulations, such as the
+  virtio-pci implementations found in kvmtool and other para-virtualised
+  systems, do not require driver support for complexities such as regulator
+  and clock management. In fact, the controller may not even require the
+  configuration of a control interface by the operating system, instead
+  presenting a set of fixed windows describing a subset of IO, Memory and
+  Configuration Spaces.
+
+  Configuration Space is assumed to be memory-mapped (as opposed to being
+  accessed via an ioport) and laid out with a direct correspondence to the
+  geography of a PCI bus address by concatenating the various components to
+  form an offset.
+
+  For CAM, this 24-bit offset is:
+
+          cfg_offset(bus, device, function, register) =
+                     bus << 16 | device << 11 | function << 8 | register
+
+  While ECAM extends this by 4 bits to accommodate 4k of function space:
+
+          cfg_offset(bus, device, function, register) =
+                     bus << 20 | device << 15 | function << 12 | register
+
+properties:
+  compatible:
+    description: Depends on the layout of configuration space (CAM vs ECAM
+      respectively). May also have more specific compatibles.
+    oneOf:
+      - description:
+          PCIe host controller in Arm Juno based on PLDA XpressRICH3-AXI IP
+        items:
+          - const: arm,juno-r1-pcie
+          - const: plda,xpressrich3-axi
+          - const: pci-host-ecam-generic
+      - description: |
+          ThunderX PCI host controller for pass-1.x silicon
+
+          Firmware-initialized PCI host controller to on-chip devices found on
+          some Cavium ThunderX processors.  These devices have ECAM-based config
+          access, but the BARs are all at fixed addresses.  We handle the fixed
+          addresses by synthesizing Enhanced Allocation (EA) capabilities for
+          these devices.
+        const: cavium,pci-host-thunder-ecam
+      - description:
+          Cavium ThunderX PEM firmware-initialized PCIe host controller
+        const: cavium,pci-host-thunder-pem
+      - description:
+          HiSilicon Hip06/Hip07 PCIe host bridge in almost-ECAM mode. Some
+          firmware places the host controller in a mode where it is ECAM
+          compliant for all devices other than the root complex.
+        enum:
+          - hisilicon,hip06-pcie-ecam
+          - hisilicon,hip07-pcie-ecam
+      - description: |
+          In some cases, firmware may already have configured the Synopsys
+          DesignWare PCIe controller in RC mode with static ATU window mappings
+          that cover all config, MMIO and I/O spaces in a [mostly] ECAM
+          compatible fashion. In this case, there is no need for the OS to
+          perform any low level setup of clocks, PHYs or device registers, nor
+          is there any reason for the driver to reconfigure ATU windows for
+          config and/or IO space accesses at runtime.
+
+          In cases where the IP was synthesized with a minimum ATU window size
+          of 64 KB, it cannot be supported by the generic ECAM driver, because
+          it requires special config space accessors that filter accesses to
+          device #1 and beyond on the first bus.
+        items:
+          - enum:
+              - marvell,armada8k-pcie-ecam
+              - socionext,synquacer-pcie-ecam
+          - const: snps,dw-pcie-ecam
+      - description:
+          CAM or ECAM compliant PCI host controllers without any quirks
+        enum:
+          - pci-host-cam-generic
+          - pci-host-ecam-generic
+
+  reg:
+    description:
+      The Configuration Space base address and size, as accessed from the parent
+      bus. The base address corresponds to the first bus in the "bus-range"
+      property. If no "bus-range" is specified, this will be bus 0 (the
+      default). Some host controllers have a 2nd non-compliant address range,
+      so 2 entries are allowed.
+    minItems: 1
+    maxItems: 2
+
+  ranges:
+    description:
+      As described in IEEE Std 1275-1994, but must provide at least a
+      definition of non-prefetchable memory. One or both of prefetchable Memory
+      and IO Space may also be provided.
+    minItems: 1
+    maxItems: 3
+
+  dma-coherent: true
+
+required:
+  - compatible
+  - reg
+  - ranges
+
+allOf:
+  - $ref: /schemas/pci/pci-bus.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: arm,juno-r1-pcie
+    then:
+      required:
+        - dma-coherent
+
+  - if:
+      properties:
+        compatible:
+          not:
+            contains:
+              enum:
+                - cavium,pci-host-thunder-pem
+                - hisilicon,hip06-pcie-ecam
+                - hisilicon,hip07-pcie-ecam
+    then:
+      properties:
+        reg:
+          maxItems: 1
+
+examples:
+  - |
+
+    bus {
+        #address-cells = <2>;
+        #size-cells = <2>;
+        pcie@40000000 {
+            compatible = "pci-host-cam-generic";
+            device_type = "pci";
+            #address-cells = <3>;
+            #size-cells = <2>;
+            bus-range = <0x0 0x1>;
+
+            // CPU_PHYSICAL(2)  SIZE(2)
+            reg = <0x0 0x40000000  0x0 0x1000000>;
+
+            // BUS_ADDRESS(3)  CPU_PHYSICAL(2)  SIZE(2)
+            ranges = <0x01000000 0x0 0x01000000  0x0 0x01000000  0x0 0x00010000>,
+                     <0x02000000 0x0 0x41000000  0x0 0x41000000  0x0 0x3f000000>;
+
+            #interrupt-cells = <0x1>;
+
+            // PCI_DEVICE(3)  INT#(1)  CONTROLLER(PHANDLE)  CONTROLLER_DATA(3)
+            interrupt-map = <   0x0 0x0 0x0  0x1  &gic  0x0 0x4 0x1>,
+                            < 0x800 0x0 0x0  0x1  &gic  0x0 0x5 0x1>,
+                            <0x1000 0x0 0x0  0x1  &gic  0x0 0x6 0x1>,
+                            <0x1800 0x0 0x0  0x1  &gic  0x0 0x7 0x1>;
+
+            // PCI_DEVICE(3)  INT#(1)
+            interrupt-map-mask = <0xf800 0x0 0x0  0x7>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml b/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
new file mode 100644
index 0000000..48a98da
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/intel-gw-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PCIe RC controller on Intel Gateway SoCs
+
+maintainers:
+  - Dilip Kota <eswara.kota@linux.intel.com>
+
+properties:
+  compatible:
+    items:
+      - const: intel,lgm-pcie
+      - const: snps,dw-pcie
+
+  device_type:
+    const: pci
+
+  "#address-cells":
+    const: 3
+
+  "#size-cells":
+    const: 2
+
+  reg:
+    items:
+      - description: Controller control and status registers.
+      - description: PCIe configuration registers.
+      - description: Controller application registers.
+
+  reg-names:
+    items:
+      - const: dbi
+      - const: config
+      - const: app
+
+  ranges:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  phys:
+    maxItems: 1
+
+  phy-names:
+    const: pcie
+
+  reset-gpios:
+    maxItems: 1
+
+  linux,pci-domain: true
+
+  num-lanes:
+    maximum: 2
+    description: Number of lanes to use for this port.
+
+  '#interrupt-cells':
+    const: 1
+
+  interrupt-map-mask:
+    description: Standard PCI IRQ mapping properties.
+
+  interrupt-map:
+    description: Standard PCI IRQ mapping properties.
+
+  max-link-speed:
+    description: Specify PCI Gen for link capability.
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [ 1, 2, 3, 4 ]
+      - default: 1
+
+  bus-range:
+    description: Range of bus numbers associated with this controller.
+
+  reset-assert-ms:
+    description: |
+      Delay after asserting reset to the PCIe device.
+    maximum: 500
+    default: 100
+
+required:
+  - compatible
+  - device_type
+  - "#address-cells"
+  - "#size-cells"
+  - reg
+  - reg-names
+  - ranges
+  - resets
+  - clocks
+  - phys
+  - phy-names
+  - reset-gpios
+  - '#interrupt-cells'
+  - interrupt-map
+  - interrupt-map-mask
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    pcie10: pcie@d0e00000 {
+      compatible = "intel,lgm-pcie", "snps,dw-pcie";
+      device_type = "pci";
+      #address-cells = <3>;
+      #size-cells = <2>;
+      reg = <0xd0e00000 0x1000>,
+            <0xd2000000 0x800000>,
+            <0xd0a41000 0x1000>;
+      reg-names = "dbi", "config", "app";
+      linux,pci-domain = <0>;
+      max-link-speed = <4>;
+      bus-range = <0x00 0x08>;
+      #interrupt-cells = <1>;
+      interrupt-map-mask = <0 0 0 0x7>;
+      interrupt-map = <0 0 0 1 &ioapic1 27 1>,
+                      <0 0 0 2 &ioapic1 28 1>,
+                      <0 0 0 3 &ioapic1 29 1>,
+                      <0 0 0 4 &ioapic1 30 1>;
+      ranges = <0x02000000 0 0xd4000000 0xd4000000 0 0x04000000>;
+      resets = <&rcu0 0x50 0>;
+      clocks = <&cgu0 120>;
+      phys = <&cb0phy0>;
+      phy-names = "pcie";
+      reset-assert-ms = <500>;
+      reset-gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+      num-lanes = <2>;
+    };
diff --git a/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt b/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
deleted file mode 100644
index f478874..0000000
--- a/Documentation/devicetree/bindings/pci/pci-thunder-ecam.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-* ThunderX PCI host controller for pass-1.x silicon
-
-Firmware-initialized PCI host controller to on-chip devices found on
-some Cavium ThunderX processors.  These devices have ECAM-based config
-access, but the BARs are all at fixed addresses.  We handle the fixed
-addresses by synthesizing Enhanced Allocation (EA) capabilities for
-these devices.
-
-The properties and their meanings are identical to those described in
-host-generic-pci.txt except as listed below.
-
-Properties of the host controller node that differ from
-host-generic-pci.txt:
-
-- compatible     : Must be "cavium,pci-host-thunder-ecam"
-
-Example:
-
-	pcie@84b000000000 {
-		compatible = "cavium,pci-host-thunder-ecam";
-		device_type = "pci";
-		msi-parent = <&its>;
-		msi-map = <0 &its 0x30000 0x10000>;
-		bus-range = <0 31>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		#stream-id-cells = <1>;
-		reg = <0x84b0 0x00000000 0 0x02000000>;  /* Configuration space */
-		ranges = <0x03000000 0x8180 0x00000000 0x8180 0x00000000 0x80 0x00000000>; /* mem ranges */
-	};
diff --git a/Documentation/devicetree/bindings/pci/pci-thunder-pem.txt b/Documentation/devicetree/bindings/pci/pci-thunder-pem.txt
deleted file mode 100644
index f131fae..0000000
--- a/Documentation/devicetree/bindings/pci/pci-thunder-pem.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-* ThunderX PEM PCIe host controller
-
-Firmware-initialized PCI host controller found on some Cavium
-ThunderX processors.
-
-The properties and their meanings are identical to those described in
-host-generic-pci.txt except as listed below.
-
-Properties of the host controller node that differ from
-host-generic-pci.txt:
-
-- compatible     : Must be "cavium,pci-host-thunder-pem"
-
-- reg            : Two entries: First the configuration space for down
-                   stream devices base address and size, as accessed
-                   from the parent bus. Second, the register bank of
-                   the PEM device PCIe bridge.
-
-Example:
-
-    pci@87e0,c2000000 {
-	compatible = "cavium,pci-host-thunder-pem";
-	device_type = "pci";
-	msi-parent = <&its>;
-	msi-map = <0 &its 0x10000 0x10000>;
-	bus-range = <0x8f 0xc7>;
-	#size-cells = <2>;
-	#address-cells = <3>;
-
-	reg = <0x8880 0x8f000000 0x0 0x39000000>,  /* Configuration space */
-	      <0x87e0 0xc2000000 0x0 0x00010000>; /* PEM space */
-	ranges = <0x01000000 0x00 0x00020000 0x88b0 0x00020000 0x00 0x00010000>, /* I/O */
-		 <0x03000000 0x00 0x10000000 0x8890 0x10000000 0x0f 0xf0000000>, /* mem64 */
-		 <0x43000000 0x10 0x00000000 0x88a0 0x00000000 0x10 0x00000000>, /* mem64-pref */
-		 <0x03000000 0x87e0 0xc2f00000 0x87e0 0xc2000000 0x00 0x00100000>; /* mem64 PEM BAR4 */
-
-	#interrupt-cells = <1>;
-	interrupt-map-mask = <0 0 0 7>;
-	interrupt-map = <0 0 0 1 &gic0 0 0 0 24 4>, /* INTA */
-			<0 0 0 2 &gic0 0 0 0 25 4>, /* INTB */
-			<0 0 0 3 &gic0 0 0 0 26 4>, /* INTC */
-			<0 0 0 4 &gic0 0 0 0 27 4>; /* INTD */
-    };
diff --git a/Documentation/devicetree/bindings/pci/plda,xpressrich3-axi.txt b/Documentation/devicetree/bindings/pci/plda,xpressrich3-axi.txt
deleted file mode 100644
index f3f75bf..0000000
--- a/Documentation/devicetree/bindings/pci/plda,xpressrich3-axi.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-* PLDA XpressRICH3-AXI host controller
-
-The PLDA XpressRICH3-AXI host controller can be configured in a manner that
-makes it compliant with the SBSA[1] standard published by ARM Ltd. For those
-scenarios, the host-generic-pci.txt bindings apply with the following additions
-to the compatible property:
-
-Required properties:
- - compatible: should contain "plda,xpressrich3-axi" to identify the IP used.
-
-
-[1] http://infocenter.arm.com/help/topic/com.arm.doc.den0029a/
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
index ada80b0..981b4de 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
@@ -11,6 +11,7 @@
 			- "qcom,pcie-ipq4019" for ipq4019
 			- "qcom,pcie-ipq8074" for ipq8074
 			- "qcom,pcie-qcs404" for qcs404
+			- "qcom,pcie-sdm845" for sdm845
 
 - reg:
 	Usage: required
@@ -126,6 +127,18 @@
 			- "master_bus"	AXI Master clock
 			- "slave_bus"	AXI Slave clock
 
+-clock-names:
+	Usage: required for sdm845
+	Value type: <stringlist>
+	Definition: Should contain the following entries
+			- "aux"		Auxiliary clock
+			- "cfg"		Configuration clock
+			- "bus_master"	Master AXI clock
+			- "bus_slave"	Slave AXI clock
+			- "slave_q2a"	Slave Q2A clock
+			- "tbu"		PCIe TBU clock
+			- "pipe"	PIPE clock
+
 - resets:
 	Usage: required
 	Value type: <prop-encoded-array>
@@ -188,6 +201,12 @@
 			- "pwr"			PWR reset
 			- "ahb"			AHB reset
 
+- reset-names:
+	Usage: required for sdm845
+	Value type: <stringlist>
+	Definition: Should contain the following entries
+			- "pci"			PCIe core reset
+
 - power-domains:
 	Usage: required for apq8084 and msm8996/apq8096
 	Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/pci/versatile.txt b/Documentation/devicetree/bindings/pci/versatile.txt
deleted file mode 100644
index 0a702b1..0000000
--- a/Documentation/devicetree/bindings/pci/versatile.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-* ARM Versatile Platform Baseboard PCI interface
-
-PCI host controller found on the ARM Versatile PB board's FPGA.
-
-Required properties:
-- compatible: should contain "arm,versatile-pci" to identify the Versatile PCI
-  controller.
-- reg: base addresses and lengths of the PCI controller. There must be 3
-  entries:
-	- Versatile-specific registers
-	- Self Config space
-	- Config space
-- #address-cells: set to <3>
-- #size-cells: set to <2>
-- device_type: set to "pci"
-- bus-range: set to <0 0xff>
-- ranges: ranges for the PCI memory and I/O regions
-- #interrupt-cells: set to <1>
-- interrupt-map-mask and interrupt-map: standard PCI properties to define
-	the mapping of the PCI interface to interrupt numbers.
-
-Example:
-
-pci-controller@10001000 {
-	compatible = "arm,versatile-pci";
-	device_type = "pci";
-	reg = <0x10001000 0x1000
-	       0x41000000 0x10000
-	       0x42000000 0x100000>;
-	bus-range = <0 0xff>;
-	#address-cells = <3>;
-	#size-cells = <2>;
-	#interrupt-cells = <1>;
-
-	ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000   /* downstream I/O */
-		  0x02000000 0 0x50000000 0x50000000 0 0x10000000   /* non-prefetchable memory */
-		  0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
-
-	interrupt-map-mask = <0x1800 0 0 7>;
-	interrupt-map = <0x1800 0 0 1 &sic 28
-			 0x1800 0 0 2 &sic 29
-			 0x1800 0 0 3 &sic 30
-			 0x1800 0 0 4 &sic 27
-
-			 0x1000 0 0 1 &sic 27
-			 0x1000 0 0 2 &sic 28
-			 0x1000 0 0 3 &sic 29
-			 0x1000 0 0 4 &sic 30
-
-			 0x0800 0 0 1 &sic 30
-			 0x0800 0 0 2 &sic 27
-			 0x0800 0 0 3 &sic 28
-			 0x0800 0 0 4 &sic 29
-
-			 0x0000 0 0 1 &sic 29
-			 0x0000 0 0 2 &sic 30
-			 0x0000 0 0 3 &sic 27
-			 0x0000 0 0 4 &sic 28>;
-};
diff --git a/Documentation/devicetree/bindings/pci/versatile.yaml b/Documentation/devicetree/bindings/pci/versatile.yaml
new file mode 100644
index 0000000..07a48c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/versatile.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/versatile.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Versatile Platform Baseboard PCI interface
+
+maintainers:
+  - Rob Herring <robh@kernel.org>
+
+description: |+
+  PCI host controller found on the ARM Versatile PB board's FPGA.
+
+allOf:
+  - $ref: /schemas/pci/pci-bus.yaml#
+
+properties:
+  compatible:
+    const: arm,versatile-pci
+
+  reg:
+    items:
+      - description: Versatile-specific registers
+      - description: Self Config space
+      - description: Config space
+
+  ranges:
+    maxItems: 3
+
+  "#interrupt-cells": true
+
+  interrupt-map:
+    maxItems: 16
+
+  interrupt-map-mask:
+    items:
+      - const: 0x1800
+      - const: 0
+      - const: 0
+      - const: 7
+
+required:
+  - compatible
+  - reg
+  - ranges
+  - "#interrupt-cells"
+  - interrupt-map
+  - interrupt-map-mask
+
+examples:
+  - |
+    pci@10001000 {
+      compatible = "arm,versatile-pci";
+      device_type = "pci";
+      reg = <0x10001000 0x1000>,
+            <0x41000000 0x10000>,
+            <0x42000000 0x100000>;
+      bus-range = <0 0xff>;
+      #address-cells = <3>;
+      #size-cells = <2>;
+      #interrupt-cells = <1>;
+
+      ranges =
+          <0x01000000 0 0x00000000 0x43000000 0 0x00010000>,  /* downstream I/O */
+          <0x02000000 0 0x50000000 0x50000000 0 0x10000000>,  /* non-prefetchable memory */
+          <0x42000000 0 0x60000000 0x60000000 0 0x10000000>;  /* prefetchable memory */
+
+      interrupt-map-mask = <0x1800 0 0 7>;
+      interrupt-map = <0x1800 0 0 1 &sic 28>,
+          <0x1800 0 0 2 &sic 29>,
+          <0x1800 0 0 3 &sic 30>,
+          <0x1800 0 0 4 &sic 27>,
+
+          <0x1000 0 0 1 &sic 27>,
+          <0x1000 0 0 2 &sic 28>,
+          <0x1000 0 0 3 &sic 29>,
+          <0x1000 0 0 4 &sic 30>,
+
+          <0x0800 0 0 1 &sic 30>,
+          <0x0800 0 0 2 &sic 27>,
+          <0x0800 0 0 3 &sic 28>,
+          <0x0800 0 0 4 &sic 29>,
+
+          <0x0000 0 0 1 &sic 29>,
+          <0x0000 0 0 2 &sic 30>,
+          <0x0000 0 0 3 &sic 27>,
+          <0x0000 0 0 4 &sic 28>;
+    };
+
+
+...
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
new file mode 100644
index 0000000..020ef9e
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun4i-a10-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    enum:
+      - allwinner,sun4i-a10-usb-phy
+      - allwinner,sun7i-a20-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU1 registers
+      - description: PHY PMU2 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu1
+      - const: pmu2
+
+  clocks:
+    maxItems: 1
+    description: USB PHY bus clock
+
+  clock-names:
+    const: usb_phy
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host 1 Controller reset
+      - description: USB Host 2 Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb1_reset
+      - const: usb2_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb1_vbus-supply:
+    description: Regulator controlling USB1 Host controller
+
+  usb2_vbus-supply:
+    description: Regulator controlling USB2 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun4i-a10-ccu.h>
+    #include <dt-bindings/reset/sun4i-a10-ccu.h>
+
+    usbphy: phy@01c13400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun4i-a10-usb-phy";
+        reg = <0x01c13400 0x10>, <0x01c14800 0x4>, <0x01c1c800 0x4>;
+        reg-names = "phy_ctrl", "pmu1", "pmu2";
+        clocks = <&ccu CLK_USB_PHY>;
+        clock-names = "usb_phy";
+        resets = <&ccu RST_USB_PHY0>,
+                 <&ccu RST_USB_PHY1>,
+                 <&ccu RST_USB_PHY2>;
+        reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
+        usb0_id_det-gpios = <&pio 7 19 GPIO_ACTIVE_HIGH>;
+        usb0_vbus_det-gpios = <&pio 7 22 GPIO_ACTIVE_HIGH>;
+        usb0_vbus-supply = <&reg_usb0_vbus>;
+        usb1_vbus-supply = <&reg_usb1_vbus>;
+        usb2_vbus-supply = <&reg_usb2_vbus>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml
new file mode 100644
index 0000000..fd6e126
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun50i-a64-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A64 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun50i-a64-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU0 registers
+      - description: PHY PMU1 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu0
+      - const: pmu1
+
+  clocks:
+    items:
+      - description: USB OTG PHY bus clock
+      - description: USB Host 0 PHY bus clock
+
+  clock-names:
+    items:
+      - const: usb0_phy
+      - const: usb1_phy
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host 1 Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb1_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb1_vbus-supply:
+    description: Regulator controlling USB1 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun50i-a64-ccu.h>
+    #include <dt-bindings/reset/sun50i-a64-ccu.h>
+
+    phy@1c19400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun50i-a64-usb-phy";
+        reg = <0x01c19400 0x14>,
+              <0x01c1a800 0x4>,
+              <0x01c1b800 0x4>;
+        reg-names = "phy_ctrl",
+                    "pmu0",
+                    "pmu1";
+        clocks = <&ccu CLK_USB_PHY0>,
+                 <&ccu CLK_USB_PHY1>;
+        clock-names = "usb0_phy",
+                      "usb1_phy";
+        resets = <&ccu RST_USB_PHY0>,
+                 <&ccu RST_USB_PHY1>;
+        reset-names = "usb0_reset",
+                      "usb1_reset";
+        usb0_id_det-gpios = <&pio 7 9 GPIO_ACTIVE_HIGH>; /* PH9 */
+        usb0_vbus_power-supply = <&usb_power_supply>;
+        usb0_vbus-supply = <&reg_drivevbus>;
+        usb1_vbus-supply = <&reg_usb1_vbus>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb-phy.yaml
new file mode 100644
index 0000000..7670411
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb-phy.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun50i-h6-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner H6 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun50i-h6-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU0 registers
+      - description: PHY PMU3 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu0
+      - const: pmu3
+
+  clocks:
+    items:
+      - description: USB OTG PHY bus clock
+      - description: USB Host PHY bus clock
+
+  clock-names:
+    items:
+      - const: usb0_phy
+      - const: usb3_phy
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb3_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb3_vbus-supply:
+    description: Regulator controlling USB3 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun50i-h6-ccu.h>
+    #include <dt-bindings/reset/sun50i-h6-ccu.h>
+
+    phy@5100400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun50i-h6-usb-phy";
+        reg = <0x05100400 0x24>,
+              <0x05101800 0x4>,
+              <0x05311800 0x4>;
+        reg-names = "phy_ctrl",
+                    "pmu0",
+                    "pmu3";
+        clocks = <&ccu CLK_USB_PHY0>,
+                 <&ccu CLK_USB_PHY3>;
+        clock-names = "usb0_phy",
+                      "usb3_phy";
+        resets = <&ccu RST_USB_PHY0>,
+                 <&ccu RST_USB_PHY3>;
+        reset-names = "usb0_reset",
+                      "usb3_reset";
+        usb0_id_det-gpios = <&pio 2 6 GPIO_ACTIVE_HIGH>; /* PC6 */
+        usb0_vbus-supply = <&reg_vcc5v>;
+        usb3_vbus-supply = <&reg_vcc5v>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun5i-a13-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun5i-a13-usb-phy.yaml
new file mode 100644
index 0000000..9b31938
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun5i-a13-usb-phy.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun5i-a13-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A13 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun5i-a13-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU1 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu1
+
+  clocks:
+    maxItems: 1
+    description: USB OTG PHY bus clock
+
+  clock-names:
+    const: usb_phy
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host 1 Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb1_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb1_vbus-supply:
+    description: Regulator controlling USB1 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun5i-ccu.h>
+    #include <dt-bindings/reset/sun5i-ccu.h>
+
+    phy@1c13400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun5i-a13-usb-phy";
+        reg = <0x01c13400 0x10>, <0x01c14800 0x4>;
+        reg-names = "phy_ctrl", "pmu1";
+        clocks = <&ccu CLK_USB_PHY0>;
+        clock-names = "usb_phy";
+        resets = <&ccu RST_USB_PHY0>, <&ccu RST_USB_PHY1>;
+        reset-names = "usb0_reset", "usb1_reset";
+        usb0_id_det-gpios = <&pio 6 2 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG2 */
+        usb0_vbus_det-gpios = <&pio 6 1 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PG1 */
+        usb0_vbus-supply = <&reg_usb0_vbus>;
+        usb1_vbus-supply = <&reg_usb1_vbus>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
index 230d74f..d0b541a 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
@@ -15,7 +15,11 @@
     const: 0
 
   compatible:
-    const: allwinner,sun6i-a31-mipi-dphy
+    oneOf:
+      - const: allwinner,sun6i-a31-mipi-dphy
+      - items:
+          - const: allwinner,sun50i-a64-mipi-dphy
+          - const: allwinner,sun6i-a31-mipi-dphy
 
   reg:
     maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-usb-phy.yaml
new file mode 100644
index 0000000..b0ed01b
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-usb-phy.yaml
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun6i-a31-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun6i-a31-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU1 registers
+      - description: PHY PMU2 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu1
+      - const: pmu2
+
+  clocks:
+    items:
+      - description: USB OTG PHY bus clock
+      - description: USB Host 0 PHY bus clock
+      - description: USB Host 1 PHY bus clock
+
+  clock-names:
+    items:
+      - const: usb0_phy
+      - const: usb1_phy
+      - const: usb2_phy
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host 1 Controller reset
+      - description: USB Host 2 Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb1_reset
+      - const: usb2_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb1_vbus-supply:
+    description: Regulator controlling USB1 Host controller
+
+  usb2_vbus-supply:
+    description: Regulator controlling USB2 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun6i-a31-ccu.h>
+    #include <dt-bindings/reset/sun6i-a31-ccu.h>
+
+    phy@1c19400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun6i-a31-usb-phy";
+        reg = <0x01c19400 0x10>,
+              <0x01c1a800 0x4>,
+              <0x01c1b800 0x4>;
+        reg-names = "phy_ctrl",
+                    "pmu1",
+                    "pmu2";
+        clocks = <&ccu CLK_USB_PHY0>,
+                 <&ccu CLK_USB_PHY1>,
+                 <&ccu CLK_USB_PHY2>;
+        clock-names = "usb0_phy",
+                      "usb1_phy",
+                      "usb2_phy";
+        resets = <&ccu RST_USB_PHY0>,
+                 <&ccu RST_USB_PHY1>,
+                 <&ccu RST_USB_PHY2>;
+        reset-names = "usb0_reset",
+                      "usb1_reset",
+                      "usb2_reset";
+        usb0_id_det-gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
+        usb0_vbus_det-gpios = <&pio 0 16 GPIO_ACTIVE_HIGH>; /* PA16 */
+        usb0_vbus_power-supply = <&usb_power_supply>;
+        usb0_vbus-supply = <&reg_drivevbus>;
+        usb1_vbus-supply = <&reg_usb1_vbus>;
+        usb2_vbus-supply = <&reg_usb2_vbus>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-a23-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-a23-usb-phy.yaml
new file mode 100644
index 0000000..b067440
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-a23-usb-phy.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun8i-a23-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A23 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    enum:
+      - allwinner,sun8i-a23-usb-phy
+      - allwinner,sun8i-a33-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU1 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu1
+
+  clocks:
+    items:
+      - description: USB OTG PHY bus clock
+      - description: USB Host 0 PHY bus clock
+
+  clock-names:
+    items:
+      - const: usb0_phy
+      - const: usb1_phy
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host 1 Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb1_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb1_vbus-supply:
+    description: Regulator controlling USB1 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun8i-a23-a33-ccu.h>
+    #include <dt-bindings/reset/sun8i-a23-a33-ccu.h>
+
+    phy@1c19400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun8i-a23-usb-phy";
+        reg = <0x01c19400 0x10>, <0x01c1a800 0x4>;
+        reg-names = "phy_ctrl", "pmu1";
+        clocks = <&ccu CLK_USB_PHY0>,
+                 <&ccu CLK_USB_PHY1>;
+        clock-names = "usb0_phy",
+                      "usb1_phy";
+        resets = <&ccu RST_USB_PHY0>,
+                 <&ccu RST_USB_PHY1>;
+        reset-names = "usb0_reset",
+                      "usb1_reset";
+        usb0_id_det-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
+        usb0_vbus_power-supply = <&usb_power_supply>;
+        usb0_vbus-supply = <&reg_drivevbus>;
+        usb1_vbus-supply = <&reg_usb1_vbus>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-a83t-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-a83t-usb-phy.yaml
new file mode 100644
index 0000000..48dc9c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-a83t-usb-phy.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun8i-a83t-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A83t USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun8i-a83t-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU1 registers
+      - description: PHY PMU2 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu1
+      - const: pmu2
+
+  clocks:
+    items:
+      - description: USB OTG PHY bus clock
+      - description: USB Host 0 PHY bus clock
+      - description: USB Host 1 PHY bus clock
+      - description: USB HSIC 12MHz clock
+
+  clock-names:
+    items:
+      - const: usb0_phy
+      - const: usb1_phy
+      - const: usb2_phy
+      - const: usb2_hsic_12M
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host 1 Controller reset
+      - description: USB Host 2 Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb1_reset
+      - const: usb2_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb1_vbus-supply:
+    description: Regulator controlling USB1 Host controller
+
+  usb2_vbus-supply:
+    description: Regulator controlling USB2 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun8i-a83t-ccu.h>
+    #include <dt-bindings/reset/sun8i-a83t-ccu.h>
+
+    phy@1c19400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun8i-a83t-usb-phy";
+        reg = <0x01c19400 0x10>,
+              <0x01c1a800 0x14>,
+              <0x01c1b800 0x14>;
+        reg-names = "phy_ctrl",
+                    "pmu1",
+                    "pmu2";
+        clocks = <&ccu CLK_USB_PHY0>,
+                 <&ccu CLK_USB_PHY1>,
+                 <&ccu CLK_USB_HSIC>,
+                 <&ccu CLK_USB_HSIC_12M>;
+        clock-names = "usb0_phy",
+                      "usb1_phy",
+                      "usb2_phy",
+                      "usb2_hsic_12M";
+        resets = <&ccu RST_USB_PHY0>,
+                 <&ccu RST_USB_PHY1>,
+                 <&ccu RST_USB_HSIC>;
+        reset-names = "usb0_reset",
+                      "usb1_reset",
+                      "usb2_reset";
+        usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
+        usb0_vbus_power-supply = <&usb_power_supply>;
+        usb0_vbus-supply = <&reg_drivevbus>;
+        usb1_vbus-supply = <&reg_usb1_vbus>;
+        usb2_vbus-supply = <&reg_usb2_vbus>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml
new file mode 100644
index 0000000..60c3445
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun8i-h3-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner H3 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun8i-h3-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU0 registers
+      - description: PHY PMU1 registers
+      - description: PHY PMU2 registers
+      - description: PHY PMU3 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu0
+      - const: pmu1
+      - const: pmu2
+      - const: pmu3
+
+  clocks:
+    items:
+      - description: USB OTG PHY bus clock
+      - description: USB Host 0 PHY bus clock
+      - description: USB Host 1 PHY bus clock
+      - description: USB Host 2 PHY bus clock
+
+  clock-names:
+    items:
+      - const: usb0_phy
+      - const: usb1_phy
+      - const: usb2_phy
+      - const: usb3_phy
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host 1 Controller reset
+      - description: USB Host 2 Controller reset
+      - description: USB Host 3 Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb1_reset
+      - const: usb2_reset
+      - const: usb3_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb1_vbus-supply:
+    description: Regulator controlling USB1 Host controller
+
+  usb2_vbus-supply:
+    description: Regulator controlling USB2 Host controller
+
+  usb3_vbus-supply:
+    description: Regulator controlling USB3 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun8i-h3-ccu.h>
+    #include <dt-bindings/reset/sun8i-h3-ccu.h>
+
+    phy@1c19400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun8i-h3-usb-phy";
+        reg = <0x01c19400 0x2c>,
+              <0x01c1a800 0x4>,
+              <0x01c1b800 0x4>,
+              <0x01c1c800 0x4>,
+              <0x01c1d800 0x4>;
+        reg-names = "phy_ctrl",
+                    "pmu0",
+                    "pmu1",
+                    "pmu2",
+                    "pmu3";
+        clocks = <&ccu CLK_USB_PHY0>,
+                 <&ccu CLK_USB_PHY1>,
+                 <&ccu CLK_USB_PHY2>,
+                 <&ccu CLK_USB_PHY3>;
+        clock-names = "usb0_phy",
+                      "usb1_phy",
+                      "usb2_phy",
+                      "usb3_phy";
+        resets = <&ccu RST_USB_PHY0>,
+                 <&ccu RST_USB_PHY1>,
+                 <&ccu RST_USB_PHY2>,
+                 <&ccu RST_USB_PHY3>;
+        reset-names = "usb0_reset",
+                      "usb1_reset",
+                      "usb2_reset",
+                      "usb3_reset";
+        usb0_id_det-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+        usb0_vbus-supply = <&reg_usb0_vbus>;
+        usb1_vbus-supply = <&reg_usb1_vbus>;
+        usb2_vbus-supply = <&reg_usb2_vbus>;
+        usb3_vbus-supply = <&reg_usb3_vbus>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-r40-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-r40-usb-phy.yaml
new file mode 100644
index 0000000..a2bb367
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-r40-usb-phy.yaml
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun8i-r40-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner R40 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun8i-r40-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU0 registers
+      - description: PHY PMU1 registers
+      - description: PHY PMU2 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu0
+      - const: pmu1
+      - const: pmu2
+
+  clocks:
+    items:
+      - description: USB OTG PHY bus clock
+      - description: USB Host 0 PHY bus clock
+      - description: USB Host 1 PHY bus clock
+
+  clock-names:
+    items:
+      - const: usb0_phy
+      - const: usb1_phy
+      - const: usb2_phy
+
+  resets:
+    items:
+      - description: USB OTG reset
+      - description: USB Host 1 Controller reset
+      - description: USB Host 2 Controller reset
+
+  reset-names:
+    items:
+      - const: usb0_reset
+      - const: usb1_reset
+      - const: usb2_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+  usb1_vbus-supply:
+    description: Regulator controlling USB1 Host controller
+
+  usb2_vbus-supply:
+    description: Regulator controlling USB2 Host controller
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun8i-r40-ccu.h>
+    #include <dt-bindings/reset/sun8i-r40-ccu.h>
+
+    phy@1c13400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun8i-r40-usb-phy";
+        reg = <0x01c13400 0x14>,
+              <0x01c14800 0x4>,
+              <0x01c19800 0x4>,
+              <0x01c1c800 0x4>;
+        reg-names = "phy_ctrl",
+                    "pmu0",
+                    "pmu1",
+                    "pmu2";
+        clocks = <&ccu CLK_USB_PHY0>,
+                 <&ccu CLK_USB_PHY1>,
+                 <&ccu CLK_USB_PHY2>;
+        clock-names = "usb0_phy",
+                      "usb1_phy",
+                      "usb2_phy";
+        resets = <&ccu RST_USB_PHY0>,
+                 <&ccu RST_USB_PHY1>,
+                 <&ccu RST_USB_PHY2>;
+        reset-names = "usb0_reset",
+                      "usb1_reset",
+                      "usb2_reset";
+        usb1_vbus-supply = <&reg_vcc5v0>;
+        usb2_vbus-supply = <&reg_vcc5v0>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-v3s-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-v3s-usb-phy.yaml
new file mode 100644
index 0000000..eadfd0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-v3s-usb-phy.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun8i-v3s-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner V3s USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 1
+
+  compatible:
+    const: allwinner,sun8i-v3s-usb-phy
+
+  reg:
+    items:
+      - description: PHY Control registers
+      - description: PHY PMU0 registers
+
+  reg-names:
+    items:
+      - const: phy_ctrl
+      - const: pmu0
+
+  clocks:
+    maxItems: 1
+    description: USB OTG PHY bus clock
+
+  clock-names:
+    const: usb0_phy
+
+  resets:
+    maxItems: 1
+    description: USB OTG reset
+
+  reset-names:
+    const: usb0_reset
+
+  usb0_id_det-gpios:
+    description: GPIO to the USB OTG ID pin
+
+  usb0_vbus_det-gpios:
+    description: GPIO to the USB OTG VBUS detect pin
+
+  usb0_vbus_power-supply:
+    description: Power supply to detect the USB OTG VBUS
+
+  usb0_vbus-supply:
+    description: Regulator controlling USB OTG VBUS
+
+required:
+  - "#phy-cells"
+  - compatible
+  - clocks
+  - clock-names
+  - reg
+  - reg-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/clock/sun8i-v3s-ccu.h>
+    #include <dt-bindings/reset/sun8i-v3s-ccu.h>
+
+    phy@1c19400 {
+        #phy-cells = <1>;
+        compatible = "allwinner,sun8i-v3s-usb-phy";
+        reg = <0x01c19400 0x2c>,
+              <0x01c1a800 0x4>;
+        reg-names = "phy_ctrl",
+                    "pmu0";
+        clocks = <&ccu CLK_USB_PHY0>;
+        clock-names = "usb0_phy";
+        resets = <&ccu RST_USB_PHY0>;
+        reset-names = "usb0_reset";
+        usb0_id_det-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun9i-a80-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun9i-a80-usb-phy.yaml
new file mode 100644
index 0000000..ded7d6f
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun9i-a80-usb-phy.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/allwinner,sun9i-a80-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A80 USB PHY Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#phy-cells":
+    const: 0
+
+  compatible:
+    const: allwinner,sun9i-a80-usb-phy
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    anyOf:
+      - description: Main PHY Clock
+
+      - items:
+          - description: Main PHY clock
+          - description: HSIC 12MHz clock
+          - description: HSIC 480MHz clock
+
+  clock-names:
+    oneOf:
+      - const: phy
+
+      - items:
+          - const: phy
+          - const: hsic_12M
+          - const: hsic_480M
+
+  resets:
+    anyOf:
+      - description: Normal USB PHY reset
+
+      - items:
+          - description: Normal USB PHY reset
+          - description: HSIC Reset
+
+  reset-names:
+    oneOf:
+      - const: phy
+
+      - items:
+          - const: phy
+          - const: hsic
+
+  phy_type:
+    const: hsic
+    description:
+      When absent, the PHY type will be assumed to be normal USB.
+
+  phy-supply:
+    description:
+      Regulator that powers VBUS
+
+required:
+  - "#phy-cells"
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+if:
+  properties:
+    phy_type:
+      const: hsic
+
+  required:
+    - phy_type
+
+then:
+  properties:
+    clocks:
+      maxItems: 3
+
+    clock-names:
+      maxItems: 3
+
+    resets:
+      maxItems: 2
+
+    reset-names:
+      maxItems: 2
+
+examples:
+  - |
+    #include <dt-bindings/clock/sun9i-a80-usb.h>
+    #include <dt-bindings/reset/sun9i-a80-usb.h>
+
+    usbphy1: phy@a00800 {
+        compatible = "allwinner,sun9i-a80-usb-phy";
+        reg = <0x00a00800 0x4>;
+        clocks = <&usb_clocks CLK_USB0_PHY>;
+        clock-names = "phy";
+        resets = <&usb_clocks RST_USB0_PHY>;
+        reset-names = "phy";
+        phy-supply = <&reg_usb1_vbus>;
+        #phy-cells = <0>;
+    };
+
+  - |
+    #include <dt-bindings/clock/sun9i-a80-usb.h>
+    #include <dt-bindings/reset/sun9i-a80-usb.h>
+
+    usbphy3: phy@a02800 {
+        compatible = "allwinner,sun9i-a80-usb-phy";
+        reg = <0x00a02800 0x4>;
+        clocks = <&usb_clocks CLK_USB2_PHY>,
+                 <&usb_clocks CLK_USB_HSIC>,
+                 <&usb_clocks CLK_USB2_HSIC>;
+        clock-names = "phy",
+                      "hsic_12M",
+                      "hsic_480M";
+        resets = <&usb_clocks RST_USB2_PHY>,
+                 <&usb_clocks RST_USB2_HSIC>;
+        reset-names = "phy",
+                      "hsic";
+        phy_type = "hsic";
+        phy-supply = <&reg_usb3_vbus>;
+        #phy-cells = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt
index 24a0d06..698aacb 100644
--- a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt
@@ -1,30 +1,49 @@
 Broadcom STB USB PHY
 
 Required properties:
- - compatible: brcm,brcmstb-usb-phy
- - reg: two offset and length pairs.
-	The first pair specifies a manditory set of memory mapped
-	registers used for general control of the PHY.
-	The second pair specifies optional registers used by some of
-	the SoCs that support USB 3.x
- - #phy-cells: Shall be 1 as it expects one argument for setting
-	       the type of the PHY. Possible values are:
-	       - PHY_TYPE_USB2 for USB1.1/2.0 PHY
-	       - PHY_TYPE_USB3 for USB3.x PHY
+- compatible: should be one of
+	"brcm,brcmstb-usb-phy"
+	"brcm,bcm7216-usb-phy"
+	"brcm,bcm7211-usb-phy"
+
+- reg and reg-names properties requirements are specific to the
+  compatible string.
+  "brcm,brcmstb-usb-phy":
+    - reg: 1 or 2 offset and length pairs. One for the base CTRL registers
+           and an optional pair for systems with USB 3.x support
+    - reg-names: not specified
+  "brcm,bcm7216-usb-phy":
+    - reg: 3 offset and length pairs for CTRL, XHCI_EC and XHCI_GBL
+           registers
+    - reg-names: "ctrl", "xhci_ec", "xhci_gbl"
+  "brcm,bcm7211-usb-phy":
+    - reg: 5 offset and length pairs for CTRL, XHCI_EC, XHCI_GBL,
+           USB_PHY and USB_MDIO registers and an optional pair
+	   for the BDC registers
+    - reg-names: "ctrl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
+
+- #phy-cells: Shall be 1 as it expects one argument for setting
+	      the type of the PHY. Possible values are:
+	      - PHY_TYPE_USB2 for USB1.1/2.0 PHY
+	      - PHY_TYPE_USB3 for USB3.x PHY
 
 Optional Properties:
 - clocks : clock phandles.
 - clock-names: String, clock name.
+- interrupts: wakeup interrupt
+- interrupt-names: "wakeup"
 - brcm,ipp: Boolean, Invert Port Power.
   Possible values are: 0 (Don't invert), 1 (Invert)
 - brcm,ioc: Boolean, Invert Over Current detection.
   Possible values are: 0 (Don't invert), 1 (Invert)
-NOTE: one or both of the following two properties must be set
-- brcm,has-xhci: Boolean indicating the phy has an XHCI phy.
-- brcm,has-eohci: Boolean indicating the phy has an EHCI/OHCI phy.
 - dr_mode: String, PHY Device mode.
   Possible values are: "host", "peripheral ", "drd" or "typec-pd"
   If this property is not defined, the phy will default to "host" mode.
+- brcm,syscon-piarbctl: phandle to syscon for handling config registers
+NOTE: one or both of the following two properties must be set
+- brcm,has-xhci: Boolean indicating the phy has an XHCI phy.
+- brcm,has-eohci: Boolean indicating the phy has an EHCI/OHCI phy.
+
 
 Example:
 
@@ -41,3 +60,27 @@
 	clocks = <&usb20>, <&usb30>;
 	clock-names = "sw_usb", "sw_usb3";
 };
+
+usb-phy@29f0200 {
+	reg = <0x29f0200 0x200>,
+		<0x29c0880 0x30>,
+		<0x29cc100 0x534>,
+		<0x2808000 0x24>,
+		<0x2980080 0x8>;
+	reg-names = "ctrl",
+		"xhci_ec",
+		"xhci_gbl",
+		"usb_phy",
+		"usb_mdio";
+	brcm,ioc = <0x0>;
+	brcm,ipp = <0x0>;
+	compatible = "brcm,bcm7211-usb-phy";
+	interrupts = <0x30>;
+	interrupt-parent = <&vpu_intr1_nosec_intc>;
+	interrupt-names = "wake";
+	#phy-cells = <0x1>;
+	brcm,has-xhci;
+	syscon-piarbctl = <&syscon_piarbctl>;
+	clocks = <&scmi_clk 256>;
+	clock-names = "sw_usb";
+};
diff --git a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
index b640845..c03ad21 100644
--- a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
+++ b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
@@ -2,6 +2,7 @@
 
 Required properties:
 - compatible: should be one or more of
+     "brcm,bcm7216-sata-phy"
      "brcm,bcm7425-sata-phy"
      "brcm,bcm7445-sata-phy"
      "brcm,iproc-ns2-sata-phy"
diff --git a/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml b/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
new file mode 100644
index 0000000..0ccee64
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/intel,lgm-emmc-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Lightning Mountain(LGM) eMMC PHY Device Tree Bindings
+
+maintainers:
+  - Ramuthevar Vadivel Murugan <vadivel.muruganx.ramuthevar@linux.intel.com>
+
+description: |+
+  Bindings for eMMC PHY on Intel's Lightning Mountain SoC, syscon
+  node is used to reference the base address of eMMC phy registers.
+
+  The eMMC PHY node should be the child of a syscon node with the
+  required property:
+
+  - compatible:         Should be one of the following:
+                        "intel,lgm-syscon", "syscon"
+  - reg:
+      maxItems: 1
+
+properties:
+  compatible:
+      const: intel,lgm-emmc-phy
+
+  "#phy-cells":
+    const: 0
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - "#phy-cells"
+  - compatible
+  - reg
+  - clocks
+
+examples:
+  - |
+    sysconf: chiptop@e0200000 {
+      compatible = "intel,lgm-syscon", "syscon";
+      reg = <0xe0200000 0x100>;
+      #address-cells = <1>;
+      #size-cells = <1>;
+
+      emmc_phy: emmc-phy@a8 {
+        compatible = "intel,lgm-emmc-phy";
+        reg = <0x00a8 0x10>;
+        clocks = <&emmc>;
+        #phy-cells = <0>;
+      };
+    };
+...
diff --git a/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
new file mode 100644
index 0000000..5ab4361
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright 2019 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/marvell,mmp3-hsic-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Marvell MMP3 HSIC PHY
+
+maintainers:
+  - Lubomir Rintel <lkundrak@v3.sk>
+
+properties:
+  compatible:
+    const: marvell,mmp3-hsic-phy
+
+  reg:
+    maxItems: 1
+    description: base address of the device
+
+  reset-gpios:
+    maxItems: 1
+    description: GPIO connected to reset
+
+  "#phy-cells":
+    const: 0
+
+required:
+  - compatible
+  - reg
+  - reset-gpios
+  - "#phy-cells"
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    hsic-phy@f0001800 {
+            compatible = "marvell,mmp3-hsic-phy";
+            reg = <0xf0001800 0x40>;
+            reset-gpios = <&gpio 63 GPIO_ACTIVE_HIGH>;
+            #phy-cells = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
index 6e1b47b..03f5939 100644
--- a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
@@ -2,21 +2,24 @@
 -----------------------
 
 Required properties:
-- compatible:	cdns,sierra-phy-t0
-- clocks:	Must contain an entry in clock-names.
-		See ../clocks/clock-bindings.txt for details.
-- clock-names:	Must be "phy_clk"
+- compatible:	Must be "cdns,sierra-phy-t0" for Sierra in Cadence platform
+		Must be "ti,sierra-phy-t0" for Sierra in TI's J721E SoC.
 - resets:	Must contain an entry for each in reset-names.
 		See ../reset/reset.txt for details.
 - reset-names:	Must include "sierra_reset" and "sierra_apb".
 		"sierra_reset" must control the reset line to the PHY.
 		"sierra_apb" must control the reset line to the APB PHY
-		interface.
+		interface ("sierra_apb" is optional).
 - reg:		register range for the PHY.
 - #address-cells: Must be 1
 - #size-cells:	Must be 0
 
 Optional properties:
+- clocks:		Must contain an entry in clock-names.
+			See ../clocks/clock-bindings.txt for details.
+- clock-names:		Must contain "cmn_refclk_dig_div" and
+			"cmn_refclk1_dig_div" for configuring the frequency of
+			the clock to the lanes. "phy_clk" is deprecated.
 - cdns,autoconf:	A boolean property whose presence indicates that the
 			PHY registers will be configured by hardware. If not
 			present, all sub-node optional properties must be
diff --git a/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
index bb0da87..72aca81 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
@@ -13,9 +13,6 @@
   "#phy-cells":
     const: 0
 
-  "#clock-cells":
-    const: 0
-
   compatible:
     enum:
       - rockchip,px30-dsi-dphy
@@ -49,7 +46,6 @@
 
 required:
   - "#phy-cells"
-  - "#clock-cells"
   - compatible
   - reg
   - clocks
@@ -62,11 +58,10 @@
 examples:
   - |
     dsi_dphy: phy@ff2e0000 {
-        compatible = "rockchip,px30-video-phy";
+        compatible = "rockchip,px30-dsi-dphy";
         reg = <0x0 0xff2e0000 0x0 0x10000>;
         clocks = <&pmucru 13>, <&cru 12>;
         clock-names = "ref", "pclk";
-        #clock-cells = <0>;
         resets = <&cru 12>;
         reset-names = "apb";
         #phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/phy/samsung-phy.txt b/Documentation/devicetree/bindings/phy/samsung-phy.txt
index 1c40ccd..7510830 100644
--- a/Documentation/devicetree/bindings/phy/samsung-phy.txt
+++ b/Documentation/devicetree/bindings/phy/samsung-phy.txt
@@ -1,4 +1,4 @@
-Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY
+Samsung S5P/Exynos SoC series MIPI CSIS/DSIM DPHY
 -------------------------------------------------
 
 Required properties:
@@ -27,7 +27,7 @@
 supports additional fifth PHY:
   4 - MIPI CSIS 2.
 
-Samsung EXYNOS SoC series Display Port PHY
+Samsung Exynos SoC series Display Port PHY
 -------------------------------------------------
 
 Required properties:
@@ -38,7 +38,7 @@
 		      control pmu registers for power isolation.
 - #phy-cells : from the generic PHY bindings, must be 0;
 
-Samsung S5P/EXYNOS SoC series USB PHY
+Samsung S5P/Exynos SoC series USB PHY
 -------------------------------------------------
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
deleted file mode 100644
index f2e120a..0000000
--- a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-Allwinner sun4i USB PHY
------------------------
-
-Required properties:
-- compatible : should be one of
-  * allwinner,sun4i-a10-usb-phy
-  * allwinner,sun5i-a13-usb-phy
-  * allwinner,sun6i-a31-usb-phy
-  * allwinner,sun7i-a20-usb-phy
-  * allwinner,sun8i-a23-usb-phy
-  * allwinner,sun8i-a33-usb-phy
-  * allwinner,sun8i-a83t-usb-phy
-  * allwinner,sun8i-h3-usb-phy
-  * allwinner,sun8i-r40-usb-phy
-  * allwinner,sun8i-v3s-usb-phy
-  * allwinner,sun50i-a64-usb-phy
-  * allwinner,sun50i-h6-usb-phy
-- reg : a list of offset + length pairs
-- reg-names :
-  * "phy_ctrl"
-  * "pmu0" for H3, V3s, A64 or H6
-  * "pmu1"
-  * "pmu2" for sun4i, sun6i, sun7i, sun8i-a83t or sun8i-h3
-  * "pmu3" for sun8i-h3 or sun50i-h6
-- #phy-cells : from the generic phy bindings, must be 1
-- clocks : phandle + clock specifier for the phy clocks
-- clock-names :
-  * "usb_phy" for sun4i, sun5i or sun7i
-  * "usb0_phy", "usb1_phy" and "usb2_phy" for sun6i
-  * "usb0_phy", "usb1_phy" for sun8i
-  * "usb0_phy", "usb1_phy", "usb2_phy" and "usb2_hsic_12M" for sun8i-a83t
-  * "usb0_phy", "usb1_phy", "usb2_phy" and "usb3_phy" for sun8i-h3
-  * "usb0_phy" and "usb3_phy" for sun50i-h6
-- resets : a list of phandle + reset specifier pairs
-- reset-names :
-  * "usb0_reset"
-  * "usb1_reset"
-  * "usb2_reset" for sun4i, sun6i, sun7i, sun8i-a83t or sun8i-h3
-  * "usb3_reset" for sun8i-h3 and sun50i-h6
-
-Optional properties:
-- usb0_id_det-gpios : gpio phandle for reading the otg id pin value
-- usb0_vbus_det-gpios : gpio phandle for detecting the presence of usb0 vbus
-- usb0_vbus_power-supply: power-supply phandle for usb0 vbus presence detect
-- usb0_vbus-supply : regulator phandle for controller usb0 vbus
-- usb1_vbus-supply : regulator phandle for controller usb1 vbus
-- usb2_vbus-supply : regulator phandle for controller usb2 vbus
-- usb3_vbus-supply : regulator phandle for controller usb3 vbus
-
-Example:
-	usbphy: phy@01c13400 {
-		#phy-cells = <1>;
-		compatible = "allwinner,sun4i-a10-usb-phy";
-		/* phy base regs, phy1 pmu reg, phy2 pmu reg */
-		reg = <0x01c13400 0x10 0x01c14800 0x4 0x01c1c800 0x4>;
-		reg-names = "phy_ctrl", "pmu1", "pmu2";
-		clocks = <&usb_clk 8>;
-		clock-names = "usb_phy";
-		resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
-		reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
-		pinctrl-names = "default";
-		pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
-		usb0_id_det-gpios = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
-		usb0_vbus_det-gpios = <&pio 7 22 GPIO_ACTIVE_HIGH>; /* PH22 */
-		usb0_vbus-supply = <&reg_usb0_vbus>;
-		usb1_vbus-supply = <&reg_usb1_vbus>;
-		usb2_vbus-supply = <&reg_usb2_vbus>;
-	};
diff --git a/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt
deleted file mode 100644
index 64f7109..0000000
--- a/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-Allwinner sun9i USB PHY
------------------------
-
-Required properties:
-- compatible : should be one of
-  * allwinner,sun9i-a80-usb-phy
-- reg : a list of offset + length pairs
-- #phy-cells : from the generic phy bindings, must be 0
-- phy_type : "hsic" for HSIC usage;
-	     other values or absence of this property indicates normal USB
-- clocks : phandle + clock specifier for the phy clocks
-- clock-names : depending on the "phy_type" property,
-  * "phy" for normal USB
-  * "hsic_480M", "hsic_12M" for HSIC
-- resets : a list of phandle + reset specifier pairs
-- reset-names : depending on the "phy_type" property,
-  * "phy" for normal USB
-  * "hsic" for HSIC
-
-Optional Properties:
-- phy-supply : from the generic phy bindings, a phandle to a regulator that
-	       provides power to VBUS.
-
-It is recommended to list all clocks and resets available.
-The driver will only use those matching the phy_type.
-
-Example:
-	usbphy1: phy@a01800 {
-		compatible = "allwinner,sun9i-a80-usb-phy";
-		reg = <0x00a01800 0x4>;
-		clocks = <&usb_phy_clk 2>, <&usb_phy_clk 10>,
-		       <&usb_phy_clk 3>;
-		clock-names = "hsic_480M", "hsic_12M", "phy";
-		resets = <&usb_phy_clk 18>, <&usb_phy_clk 19>;
-		reset-names = "hsic", "phy";
-		#phy-cells = <0>;
-	};
diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
new file mode 100644
index 0000000..452cee1
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
@@ -0,0 +1,221 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/ti,phy-j721e-wiz.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: TI J721E WIZ (SERDES Wrapper)
+
+maintainers:
+  - Kishon Vijay Abraham I <kishon@ti.com>
+
+properties:
+  compatible:
+    enum:
+      - ti,j721e-wiz-16g
+      - ti,j721e-wiz-10g
+
+  power-domains:
+    maxItems: 1
+
+  clocks:
+    maxItems: 3
+    description: clock-specifier to represent input to the WIZ
+
+  clock-names:
+    items:
+      - const: fck
+      - const: core_ref_clk
+      - const: ext_ref_clk
+
+  num-lanes:
+    minimum: 1
+    maximum: 4
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 1
+
+  "#reset-cells":
+    const: 1
+
+  ranges: true
+
+  assigned-clocks:
+    maxItems: 2
+
+  assigned-clock-parents:
+    maxItems: 2
+
+  typec-dir-gpios:
+    maxItems: 1
+    description:
+      GPIO to signal Type-C cable orientation for lane swap.
+      If GPIO is active, lane 0 and lane 1 of SERDES will be swapped to
+      achieve the funtionality of an external type-C plug flip mux.
+
+  typec-dir-debounce-ms:
+    minimum: 100
+    maximum: 1000
+    default: 100
+    description:
+      Number of milliseconds to wait before sampling typec-dir-gpio.
+      If not specified, the default debounce of 100ms will be used.
+      Type-C spec states minimum CC pin debounce of 100 ms and maximum
+      of 200 ms. However, some solutions might need more than 200 ms.
+
+patternProperties:
+  "^pll[0|1]-refclk$":
+    type: object
+    description: |
+      WIZ node should have subnodes for each of the PLLs present in
+      the SERDES.
+    properties:
+      clocks:
+        maxItems: 2
+        description: Phandle to clock nodes representing the two inputs to PLL.
+
+      "#clock-cells":
+        const: 0
+
+      assigned-clocks:
+        maxItems: 1
+
+      assigned-clock-parents:
+        maxItems: 1
+
+    required:
+      - clocks
+      - "#clock-cells"
+      - assigned-clocks
+      - assigned-clock-parents
+
+  "^cmn-refclk1?-dig-div$":
+    type: object
+    description:
+      WIZ node should have subnodes for each of the PMA common refclock
+      provided by the SERDES.
+    properties:
+      clocks:
+        maxItems: 1
+        description: Phandle to the clock node representing the input to the
+          divider clock.
+
+      "#clock-cells":
+        const: 0
+
+    required:
+      - clocks
+      - "#clock-cells"
+
+  "^refclk-dig$":
+    type: object
+    description: |
+      WIZ node should have subnode for refclk_dig to select the reference
+      clock source for the reference clock used in the PHY and PMA digital
+      logic.
+    properties:
+      clocks:
+        maxItems: 4
+        description: Phandle to four clock nodes representing the inputs to
+          refclk_dig
+
+      "#clock-cells":
+        const: 0
+
+      assigned-clocks:
+        maxItems: 1
+
+      assigned-clock-parents:
+        maxItems: 1
+
+    required:
+      - clocks
+      - "#clock-cells"
+      - assigned-clocks
+      - assigned-clock-parents
+
+  "^serdes@[0-9a-f]+$":
+    type: object
+    description: |
+      WIZ node should have '1' subnode for the SERDES. It could be either
+      Sierra SERDES or Torrent SERDES. Sierra SERDES should follow the
+      bindings specified in
+      Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
+      Torrent SERDES should follow the bindings specified in
+      Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
+
+required:
+  - compatible
+  - power-domains
+  - clocks
+  - clock-names
+  - num-lanes
+  - "#address-cells"
+  - "#size-cells"
+  - "#reset-cells"
+  - ranges
+
+examples:
+  - |
+    #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+    wiz@5000000 {
+           compatible = "ti,j721e-wiz-16g";
+           #address-cells = <1>;
+           #size-cells = <1>;
+           power-domains = <&k3_pds 292 TI_SCI_PD_EXCLUSIVE>;
+           clocks = <&k3_clks 292 5>, <&k3_clks 292 11>, <&dummy_cmn_refclk>;
+           clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+           assigned-clocks = <&k3_clks 292 11>, <&k3_clks 292 0>;
+           assigned-clock-parents = <&k3_clks 292 15>, <&k3_clks 292 4>;
+           num-lanes = <2>;
+           #reset-cells = <1>;
+           ranges = <0x5000000 0x5000000 0x10000>;
+
+           pll0-refclk {
+                  clocks = <&k3_clks 293 13>, <&dummy_cmn_refclk>;
+                  #clock-cells = <0>;
+                  assigned-clocks = <&wiz1_pll0_refclk>;
+                  assigned-clock-parents = <&k3_clks 293 13>;
+           };
+
+           pll1-refclk {
+                  clocks = <&k3_clks 293 0>, <&dummy_cmn_refclk1>;
+                  #clock-cells = <0>;
+                  assigned-clocks = <&wiz1_pll1_refclk>;
+                  assigned-clock-parents = <&k3_clks 293 0>;
+           };
+
+           cmn-refclk-dig-div {
+                  clocks = <&wiz1_refclk_dig>;
+                  #clock-cells = <0>;
+           };
+
+           cmn-refclk1-dig-div {
+                  clocks = <&wiz1_pll1_refclk>;
+                  #clock-cells = <0>;
+           };
+
+           refclk-dig {
+                  clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
+                  #clock-cells = <0>;
+                  assigned-clocks = <&wiz0_refclk_dig>;
+                  assigned-clock-parents = <&k3_clks 292 11>;
+           };
+
+           serdes@5000000 {
+                  compatible = "cdns,ti,sierra-phy-t0";
+                  reg-names = "serdes";
+                  reg = <0x5000000 0x10000>;
+                  #address-cells = <1>;
+                  #size-cells = <0>;
+                  resets = <&serdes_wiz0 0>;
+                  reset-names = "sierra_reset";
+                  clocks = <&wiz0_cmn_refclk_dig_div>, <&wiz0_cmn_refclk1_dig_div>;
+                  clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div";
+           };
+    };
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
index 39ad865..bb690e2 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
@@ -22,6 +22,9 @@
 properties:
   compatible:
     const: aspeed,ast2400-pinctrl
+  reg:
+    description: |
+      A hint for the memory regions associated with the pin-controller
 
 patternProperties:
   '^.*$':
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index 3c6405b..f7f5d57 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -23,6 +23,9 @@
 properties:
   compatible:
     const: aspeed,ast2500-pinctrl
+  reg:
+    description: |
+      A hint for the memory regions associated with the pin-controller
   aspeed,external-nodes:
     minItems: 2
     maxItems: 2
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index 064b7df..3749fa2 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -54,8 +54,9 @@
               TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
               TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
               THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13,
-              UART6, UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
-              WDTRST3, WDTRST4, ]
+              UART6, UART7, UART8, UART9, USBAD, USBADP, USB2AH, USB2AHP,
+              USB2BD, USB2BH, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
+              WDTRST4, ]
         groups:
           allOf:
             - $ref: "/schemas/types.yaml#/definitions/string"
@@ -85,8 +86,8 @@
               TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
               TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
               THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12G0,
-              UART12G1, UART13G0, UART13G1, UART6, UART7, UART8, UART9, VB,
-              VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4, ]
+              UART12G1, UART13G0, UART13G1, UART6, UART7, UART8, UART9, USBA,
+              USBB, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4, ]
 
 required:
   - compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
new file mode 100644
index 0000000..2e31e12
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,imx8mp-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale IMX8MP IOMUX Controller
+
+maintainers:
+  - Anson Huang <Anson.Huang@nxp.com>
+
+description:
+  Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+  for common binding part and usage.
+
+properties:
+  compatible:
+    const: fsl,imx8mp-iomuxc
+
+  reg:
+    maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+  'grp$':
+    type: object
+    description:
+      Pinctrl node's client devices use subnodes for desired pin configuration.
+      Client device subnodes use below standard properties.
+
+    properties:
+      fsl,pins:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32-array
+        description:
+          each entry consists of 6 integers and represents the mux and config
+          setting for one pin. The first 5 integers <mux_reg conf_reg input_reg
+          mux_val input_val> are specified using a PIN_FUNC_ID macro, which can
+          be found in <arch/arm64/boot/dts/freescale/imx8mp-pinfunc.h>. The last
+          integer CONFIG is the pad setting value like pull-up on this pin. Please
+          refer to i.MX8M Plus Reference Manual for detailed CONFIG settings.
+
+    required:
+      - fsl,pins
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  # Pinmux controller node
+  - |
+    iomuxc: pinctrl@30330000 {
+        compatible = "fsl,imx8mp-iomuxc";
+        reg = <0x30330000 0x10000>;
+
+        pinctrl_uart2: uart2grp {
+            fsl,pins = <
+                0x228 0x488 0x5F0 0x0 0x6	0x49
+                0x228 0x488 0x000 0x0 0x0	0x49
+            >;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
index 0014d98..d9b2100 100644
--- a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
@@ -10,9 +10,9 @@
 naming scheme "PxN" where x is a character identifying the GPIO port with
 which the pin is associated and N is an integer from 0 to 31 identifying the
 pin within that GPIO port. For example PA0 is the first pin in GPIO port A, and
-PB31 is the last pin in GPIO port B. The jz4740 and the x1000 contains 4 GPIO
-ports, PA to PD, for a total of 128 pins. The jz4760, the jz4770 and the jz4780
-contains 6 GPIO ports, PA to PF, for a total of 192 pins.
+PB31 is the last pin in GPIO port B. The jz4740, the x1000 and the x1830
+contains 4 GPIO ports, PA to PD, for a total of 128 pins. The jz4760, the
+jz4770 and the jz4780 contains 6 GPIO ports, PA to PF, for a total of 192 pins.
 
 
 Required properties:
@@ -28,6 +28,7 @@
     - "ingenic,x1000-pinctrl"
     - "ingenic,x1000e-pinctrl"
     - "ingenic,x1500-pinctrl"
+    - "ingenic,x1830-pinctrl"
  - reg: Address range of the pinctrl registers.
 
 
@@ -40,6 +41,7 @@
     - "ingenic,jz4770-gpio"
     - "ingenic,jz4780-gpio"
     - "ingenic,x1000-gpio"
+    - "ingenic,x1830-gpio"
  - reg: The GPIO bank number.
  - interrupt-controller: Marks the device node as an interrupt controller.
  - interrupts: Interrupt specifier for the controllers interrupt.
diff --git a/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml b/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml
new file mode 100644
index 0000000..cd2b436
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/intel,lgm-io.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Lightning Mountain SoC pinmux & GPIO controller binding
+
+maintainers:
+  - Rahul Tanwar <rahul.tanwar@linux.intel.com>
+
+description: |
+  Pinmux & GPIO controller controls pin multiplexing & configuration including
+  GPIO function selection & GPIO attributes configuration.
+
+properties:
+  compatible:
+    const: intel,lgm-io
+
+  reg:
+    maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+  '-pins$':
+    type: object
+    allOf:
+      - $ref: pincfg-node.yaml#
+      - $ref: pinmux-node.yaml#
+    description:
+      Pinctrl node's client devices use subnodes for desired pin configuration.
+      Client device subnodes use below standard properties.
+
+    properties:
+      function: true
+      groups: true
+      pins: true
+      pinmux: true
+      bias-pull-up: true
+      bias-pull-down: true
+      drive-strength: true
+      slew-rate: true
+      drive-open-drain: true
+      output-enable: true
+
+    required:
+      - function
+      - groups
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  # Pinmux controller node
+  - |
+    pinctrl: pinctrl@e2880000 {
+        compatible = "intel,lgm-io";
+        reg = <0xe2880000 0x100000>;
+
+        uart0-pins {
+             pins = <64>, /* UART_RX0 */
+                    <65>; /* UART_TX0 */
+             function = "CONSOLE_UART0";
+             pinmux = <1>,
+                      <1>;
+             groups = "CONSOLE_UART0";
+          };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml
deleted file mode 100644
index 240d429..0000000
--- a/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/bindings/pinctrl/intel,lgm-pinctrl.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Intel Lightning Mountain SoC pinmux & GPIO controller binding
-
-maintainers:
-  - Rahul Tanwar <rahul.tanwar@linux.intel.com>
-
-description: |
-  Pinmux & GPIO controller controls pin multiplexing & configuration including
-  GPIO function selection & GPIO attributes configuration.
-
-  Please refer to [1] for details of the common pinctrl bindings used by the
-  client devices.
-
-  [1] Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
-
-properties:
-  compatible:
-    const: intel,lgm-io
-
-  reg:
-    maxItems: 1
-
-# Client device subnode's properties
-patternProperties:
-  '-pins$':
-    type: object
-    description:
-      Pinctrl node's client devices use subnodes for desired pin configuration.
-      Client device subnodes use below standard properties.
-
-    properties:
-      function:
-        $ref: /schemas/types.yaml#/definitions/string
-        description:
-          A string containing the name of the function to mux to the group.
-
-      groups:
-        $ref: /schemas/types.yaml#/definitions/string-array
-        description:
-          An array of strings identifying the list of groups.
-
-      pins:
-        $ref: /schemas/types.yaml#/definitions/uint32-array
-        description:
-          List of pins to select with this function.
-
-      pinmux:
-        description: The applicable mux group.
-        allOf:
-          - $ref: "/schemas/types.yaml#/definitions/uint32-array"
-
-      bias-pull-up:
-        type: boolean
-
-      bias-pull-down:
-        type: boolean
-
-      drive-strength:
-        description: |
-          Selects the drive strength for the specified pins in mA.
-          0: 2 mA
-          1: 4 mA
-          2: 8 mA
-          3: 12 mA
-        allOf:
-          - $ref: /schemas/types.yaml#/definitions/uint32
-          - enum: [0, 1, 2, 3]
-
-      slew-rate:
-        type: boolean
-        description: |
-          Sets slew rate for specified pins.
-          0: slow slew
-          1: fast slew
-
-      drive-open-drain:
-        type: boolean
-
-      output-enable:
-        type: boolean
-
-    required:
-      - function
-      - groups
-
-    additionalProperties: false
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
-
-examples:
-  # Pinmux controller node
-  - |
-    pinctrl: pinctrl@e2880000 {
-        compatible = "intel,lgm-pinctrl";
-        reg = <0xe2880000 0x100000>;
-
-        uart0-pins {
-             pins = <64>, /* UART_RX0 */
-                    <65>; /* UART_TX0 */
-             function = "CONSOLE_UART0";
-             pinmux = <1>,
-                      <1>;
-             groups = "CONSOLE_UART0";
-          };
-    };
-
-...
diff --git a/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
index 777623a..732d907 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
@@ -114,7 +114,7 @@
       specific binding for the hardware defines whether the entries are integers
       or strings, and their meaning.
 
-  group:
+  groups:
     $ref: /schemas/types.yaml#/definitions/string-array
     description:
       the group to apply the properties to, if the driver supports
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc7180-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,sc7180-pinctrl.txt
index b5767ee..6ffeac9 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sc7180-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc7180-pinctrl.txt
@@ -125,8 +125,9 @@
 		    mi2s_1, mi2s_2, mss_lte, m_voc, pa_indicator, phase_flag,
 		    PLL_BIST, pll_bypassnl, pll_reset, prng_rosc, qdss,
 		    qdss_cti, qlink_enable, qlink_request, qspi_clk, qspi_cs,
-		    qspi_data, qup00, qup01, qup02, qup03, qup04, qup05,
-		    qup10, qup11, qup12, qup13, qup14, qup15, sdc1_tb,
+		    qspi_data, qup00, qup01, qup02_i2c, qup02_uart, qup03,
+		    qup04_i2c, qup04_uart, qup05, qup10, qup11_i2c, qup11_uart,
+		    qup12, qup13_i2c, qup13_uart, qup14, qup15, sdc1_tb,
 		    sdc2_tb, sd_write, sp_cmu, tgu_ch0, tgu_ch1, tgu_ch2,
 		    tgu_ch3, tsense_pwm1, tsense_pwm2, uim1, uim2, uim_batt,
 		    usb_phy, vfr_1, _V_GPIO, _V_PPS_IN, _V_PPS_OUT,
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.txt b/Documentation/devicetree/bindings/power/qcom,rpmpd.txt
deleted file mode 100644
index bc75bf4..0000000
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-Qualcomm RPM/RPMh Power domains
-
-For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
-which then translates it into a corresponding voltage on a rail
-
-Required Properties:
- - compatible: Should be one of the following
-	* qcom,msm8976-rpmpd: RPM Power domain for the msm8976 family of SoC
-	* qcom,msm8996-rpmpd: RPM Power domain for the msm8996 family of SoC
-	* qcom,msm8998-rpmpd: RPM Power domain for the msm8998 family of SoC
-	* qcom,qcs404-rpmpd: RPM Power domain for the qcs404 family of SoC
-	* qcom,sdm845-rpmhpd: RPMh Power domain for the sdm845 family of SoC
- - #power-domain-cells: number of cells in Power domain specifier
-	must be 1.
- - operating-points-v2: Phandle to the OPP table for the Power domain.
-	Refer to Documentation/devicetree/bindings/power/power_domain.txt
-	and Documentation/devicetree/bindings/opp/opp.txt for more details
-
-Refer to <dt-bindings/power/qcom-rpmpd.h> for the level values for
-various OPPs for different platforms as well as Power domain indexes
-
-Example: rpmh power domain controller and OPP table
-
-#include <dt-bindings/power/qcom-rpmhpd.h>
-
-opp-level values specified in the OPP tables for RPMh power domains
-should use the RPMH_REGULATOR_LEVEL_* constants from
-<dt-bindings/power/qcom-rpmhpd.h>
-
-	rpmhpd: power-controller {
-		compatible = "qcom,sdm845-rpmhpd";
-		#power-domain-cells = <1>;
-		operating-points-v2 = <&rpmhpd_opp_table>;
-
-		rpmhpd_opp_table: opp-table {
-			compatible = "operating-points-v2";
-
-			rpmhpd_opp_ret: opp1 {
-				opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
-			};
-
-			rpmhpd_opp_min_svs: opp2 {
-				opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
-			};
-
-			rpmhpd_opp_low_svs: opp3 {
-				opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
-			};
-
-			rpmhpd_opp_svs: opp4 {
-				opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
-			};
-
-			rpmhpd_opp_svs_l1: opp5 {
-				opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
-			};
-
-			rpmhpd_opp_nom: opp6 {
-				opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
-			};
-
-			rpmhpd_opp_nom_l1: opp7 {
-				opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
-			};
-
-			rpmhpd_opp_nom_l2: opp8 {
-				opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
-			};
-
-			rpmhpd_opp_turbo: opp9 {
-				opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
-			};
-
-			rpmhpd_opp_turbo_l1: opp10 {
-				opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
-			};
-		};
-	};
-
-Example: rpm power domain controller and OPP table
-
-	rpmpd: power-controller {
-		compatible = "qcom,msm8996-rpmpd";
-		#power-domain-cells = <1>;
-		operating-points-v2 = <&rpmpd_opp_table>;
-
-		rpmpd_opp_table: opp-table {
-			compatible = "operating-points-v2";
-
-			rpmpd_opp_low: opp1 {
-				opp-level = <1>;
-			};
-
-			rpmpd_opp_ret: opp2 {
-				opp-level = <2>;
-			};
-
-			rpmpd_opp_svs: opp3 {
-				opp-level = <3>;
-			};
-
-			rpmpd_opp_normal: opp4 {
-				opp-level = <4>;
-			};
-
-			rpmpd_opp_high: opp5 {
-				opp-level = <5>;
-			};
-
-			rpmpd_opp_turbo: opp6 {
-				opp-level = <6>;
-			};
-		};
-	};
-
-Example: Client/Consumer device using OPP table
-
-	leaky-device0@12350000 {
-		compatible = "foo,i-leak-current";
-		reg = <0x12350000 0x1000>;
-		power-domains = <&rpmhpd SDM845_MX>;
-		operating-points-v2 = <&leaky_opp_table>;
-	};
-
-
-	leaky_opp_table: opp-table {
-		compatible = "operating-points-v2";
-
-		opp1 {
-			opp-hz = /bits/ 64 <144000>;
-			required-opps = <&rpmhpd_opp_low>;
-		};
-
-		opp2 {
-			opp-hz = /bits/ 64 <400000>;
-			required-opps = <&rpmhpd_opp_ret>;
-		};
-
-		opp3 {
-			opp-hz = /bits/ 64 <20000000>;
-			required-opps = <&rpmpd_opp_svs>;
-		};
-
-		opp4 {
-			opp-hz = /bits/ 64 <25000000>;
-			required-opps = <&rpmpd_opp_normal>;
-		};
-	};
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
new file mode 100644
index 0000000..ba60531
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -0,0 +1,170 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/qcom,rpmpd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm RPM/RPMh Power domains
+
+maintainers:
+  - Rajendra Nayak <rnayak@codeaurora.org>
+
+description:
+  For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
+  which then translates it into a corresponding voltage on a rail.
+
+properties:
+  compatible:
+    enum:
+      - qcom,msm8976-rpmpd
+      - qcom,msm8996-rpmpd
+      - qcom,msm8998-rpmpd
+      - qcom,qcs404-rpmpd
+      - qcom,sc7180-rpmhpd
+      - qcom,sdm845-rpmhpd
+      - qcom,sm8150-rpmhpd
+
+  '#power-domain-cells':
+    const: 1
+
+  operating-points-v2: true
+
+  opp-table:
+    type: object
+
+required:
+  - compatible
+  - '#power-domain-cells'
+  - operating-points-v2
+
+additionalProperties: false
+
+examples:
+  - |
+
+    // Example 1 (rpmh power domain controller and OPP table):
+
+    #include <dt-bindings/power/qcom-rpmpd.h>
+
+    rpmhpd: power-controller {
+      compatible = "qcom,sdm845-rpmhpd";
+      #power-domain-cells = <1>;
+      operating-points-v2 = <&rpmhpd_opp_table>;
+
+      rpmhpd_opp_table: opp-table {
+        compatible = "operating-points-v2";
+
+        rpmhpd_opp_ret: opp1 {
+          opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+        };
+
+        rpmhpd_opp_min_svs: opp2 {
+          opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+        };
+
+        rpmhpd_opp_low_svs: opp3 {
+          opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+        };
+
+        rpmhpd_opp_svs: opp4 {
+          opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+        };
+
+        rpmhpd_opp_svs_l1: opp5 {
+          opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+        };
+
+        rpmhpd_opp_nom: opp6 {
+          opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+        };
+
+        rpmhpd_opp_nom_l1: opp7 {
+          opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+        };
+
+        rpmhpd_opp_nom_l2: opp8 {
+          opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+        };
+
+        rpmhpd_opp_turbo: opp9 {
+          opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+        };
+
+        rpmhpd_opp_turbo_l1: opp10 {
+          opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+        };
+      };
+    };
+
+  - |
+
+    // Example 2 (rpm power domain controller and OPP table):
+
+    rpmpd: power-controller {
+      compatible = "qcom,msm8996-rpmpd";
+      #power-domain-cells = <1>;
+      operating-points-v2 = <&rpmpd_opp_table>;
+
+      rpmpd_opp_table: opp-table {
+        compatible = "operating-points-v2";
+
+        rpmpd_opp_low: opp1 {
+          opp-level = <1>;
+        };
+
+        rpmpd_opp_ret: opp2 {
+          opp-level = <2>;
+        };
+
+        rpmpd_opp_svs: opp3 {
+          opp-level = <3>;
+        };
+
+        rpmpd_opp_normal: opp4 {
+          opp-level = <4>;
+        };
+
+        rpmpd_opp_high: opp5 {
+          opp-level = <5>;
+        };
+
+        rpmpd_opp_turbo: opp6 {
+          opp-level = <6>;
+        };
+      };
+    };
+
+  - |
+
+    // Example 3 (Client/Consumer device using OPP table):
+
+    leaky-device0@12350000 {
+      compatible = "foo,i-leak-current";
+      reg = <0x12350000 0x1000>;
+      power-domains = <&rpmhpd 0>;
+      operating-points-v2 = <&leaky_opp_table>;
+    };
+
+    leaky_opp_table: opp-table {
+      compatible = "operating-points-v2";
+      opp1 {
+        opp-hz = /bits/ 64 <144000>;
+        required-opps = <&rpmhpd_opp_low>;
+      };
+
+      opp2 {
+        opp-hz = /bits/ 64 <400000>;
+        required-opps = <&rpmhpd_opp_ret>;
+      };
+
+      opp3 {
+        opp-hz = /bits/ 64 <20000000>;
+        required-opps = <&rpmpd_opp_svs>;
+      };
+
+      opp4 {
+        opp-hz = /bits/ 64 <25000000>;
+        required-opps = <&rpmpd_opp_normal>;
+      };
+    };
+...
diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
index d366f1e..bb529ec 100644
--- a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
+++ b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
@@ -8,9 +8,27 @@
  - compatible:		Must contain:	"xlnx,zynqmp-power"
  - interrupts:		Interrupt specifier
 
--------
-Example
--------
+Optional properties:
+ - mbox-names	: Name given to channels seen in the 'mboxes' property.
+		  "tx" - Mailbox corresponding to transmit path
+		  "rx" - Mailbox corresponding to receive path
+ - mboxes	: Standard property to specify a Mailbox. Each value of
+		  the mboxes property should contain a phandle to the
+		  mailbox controller device node and an args specifier
+		  that will be the phandle to the intended sub-mailbox
+		  child node to be used for communication. See
+		  Documentation/devicetree/bindings/mailbox/mailbox.txt
+		  for more details about the generic mailbox controller
+		  and client driver bindings. Also see
+		  Documentation/devicetree/bindings/mailbox/ \
+		  xlnx,zynqmp-ipi-mailbox.txt for typical controller that
+		  is used to communicate with this System controllers.
+
+--------
+Examples
+--------
+
+Example with interrupt method:
 
 firmware {
 	zynqmp_firmware: zynqmp-firmware {
@@ -23,3 +41,21 @@
 		};
 	};
 };
+
+Example with IPI mailbox method:
+
+firmware {
+	zynqmp_firmware: zynqmp-firmware {
+		compatible = "xlnx,zynqmp-firmware";
+		method = "smc";
+
+		zynqmp_power: zynqmp-power {
+			compatible = "xlnx,zynqmp-power";
+			interrupt-parent = <&gic>;
+			interrupts = <0 35 4>;
+			mboxes = <&ipi_mailbox_pmu0 0>,
+				 <&ipi_mailbox_pmu0 1>;
+			mbox-names = "tx", "rx";
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/supply/battery.txt b/Documentation/devicetree/bindings/power/supply/battery.txt
index 5c913d4c..3049cf8 100644
--- a/Documentation/devicetree/bindings/power/supply/battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/battery.txt
@@ -35,6 +35,10 @@
    for each of the battery capacity lookup table. The first temperature value
    specifies the OCV table 0, and the second temperature value specifies the
    OCV table 1, and so on.
+ - resistance-temp-table: An array providing the temperature in degree Celsius
+   and corresponding battery internal resistance percent, which is used to look
+   up the resistance percent according to current temperature to get a accurate
+   batterty internal resistance in different temperatures.
 
 Battery properties are named, where possible, for the corresponding
 elements in enum power_supply_property, defined in
@@ -61,6 +65,7 @@
 		ocv-capacity-table-0 = <4185000 100>, <4113000 95>, <4066000 90>, ...;
 		ocv-capacity-table-1 = <4200000 100>, <4185000 95>, <4113000 90>, ...;
 		ocv-capacity-table-2 = <4250000 100>, <4200000 95>, <4185000 90>, ...;
+		resistance-temp-table = <20 100>, <10 90>, <0 80>, <(-10) 60>;
 	};
 
 	charger: charger@11 {
diff --git a/Documentation/devicetree/bindings/power/supply/bq25890.txt b/Documentation/devicetree/bindings/power/supply/bq25890.txt
index dc05689..dc9c8f7 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25890.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq25890.txt
@@ -1,11 +1,14 @@
 Binding for TI bq25890 Li-Ion Charger
 
-This driver will support the bq25896 and the bq25890. There are other ICs
-in the same family but those have not been tested.
+This driver will support the bq25892, the bq25896 and the bq25890. There are
+other ICs in the same family but those have not been tested.
 
 Required properties:
 - compatible: Should contain one of the following:
     * "ti,bq25890"
+    * "ti,bq25892"
+    * "ti,bq25895"
+    * "ti,bq25896"
 - reg: integer, i2c address of the device.
 - ti,battery-regulation-voltage: integer, maximum charging voltage (in uV);
 - ti,charge-current: integer, maximum charging current (in uA);
diff --git a/Documentation/devicetree/bindings/power/supply/max17040_battery.txt b/Documentation/devicetree/bindings/power/supply/max17040_battery.txt
new file mode 100644
index 0000000..4e0186b
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/max17040_battery.txt
@@ -0,0 +1,33 @@
+max17040_battery
+~~~~~~~~~~~~~~~~
+
+Required properties :
+ - compatible : "maxim,max17040" or "maxim,max77836-battery"
+ - reg: i2c slave address
+
+Optional properties :
+- maxim,alert-low-soc-level :	The alert threshold that sets the state of
+				charge level (%) where an interrupt is
+				generated. Can be configured from 1 up to 32
+				(%). If skipped the power up default value of
+				4 (%) will be used.
+- interrupts : 			Interrupt line see Documentation/devicetree/
+				bindings/interrupt-controller/interrupts.txt
+- wakeup-source :		This device has wakeup capabilities. Use this
+				property to use alert low SOC level interrupt
+				as wake up source.
+
+Optional properties support interrupt functionality for alert low state of
+charge level, present in some ICs in the same family, and should be used with
+compatible "maxim,max77836-battery".
+
+Example:
+
+	battery-fuel-gauge@36 {
+		compatible = "maxim,max77836-battery";
+		reg = <0x36>;
+		maxim,alert-low-soc-level = <10>;
+		interrupt-parent = <&gpio7>;
+		interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+		wakeup-source;
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/max17042_battery.txt b/Documentation/devicetree/bindings/power/supply/max17042_battery.txt
index 3f3894a..f34c5da 100644
--- a/Documentation/devicetree/bindings/power/supply/max17042_battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/max17042_battery.txt
@@ -2,7 +2,11 @@
 ~~~~~~~~~~~~~~~~
 
 Required properties :
- - compatible : "maxim,max17042"
+ - compatible : one of the following
+ * "maxim,max17042"
+ * "maxim,max17047"
+ * "maxim,max17050"
+ * "maxim,max17055"
 
 Optional properties :
  - maxim,rsns-microohm : Resistance of rsns resistor in micro Ohms
diff --git a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.txt b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.txt
index 0a5705b..b6359b5 100644
--- a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.txt
+++ b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.txt
@@ -13,6 +13,8 @@
 - io-channel-names: Should be "bat-temp" or "charge-vol".
 - nvmem-cells: A phandle to the calibration cells provided by eFuse device.
 - nvmem-cell-names: Should be "fgu_calib".
+- sprd,calib-resistance-micro-ohms: Specify the real resistance of coulomb counter
+  chip in micro Ohms.
 - monitored-battery: Phandle of battery characteristics devicetree node.
   See Documentation/devicetree/bindings/power/supply/battery.txt
 
@@ -52,5 +54,6 @@
 			nvmem-cells = <&fgu_calib>;
 			nvmem-cell-names = "fgu_calib";
 			monitored-battery = <&bat>;
+			sprd,calib-resistance-micro-ohms = <21500>;
 		};
 	};
diff --git a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
index 4a21fe7..7dcab2b 100644
--- a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
@@ -30,13 +30,51 @@
       - items:
           - const: allwinner,sun50i-h5-pwm
           - const: allwinner,sun5i-a13-pwm
+      - const: allwinner,sun50i-h6-pwm
 
   reg:
     maxItems: 1
 
   clocks:
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: Module Clock
+      - description: Bus Clock
+
+  # Even though it only applies to subschemas under the conditionals,
+  # not listing them here will trigger a warning because of the
+  # additionalsProperties set to false.
+  clock-names: true
+
+  resets:
     maxItems: 1
 
+if:
+  properties:
+    compatible:
+      contains:
+        const: allwinner,sun50i-h6-pwm
+
+then:
+  properties:
+    clocks:
+      maxItems: 2
+
+    clock-names:
+      items:
+        - const: mod
+        - const: bus
+
+  required:
+    - clock-names
+    - resets
+
+else:
+  properties:
+    clocks:
+      maxItems: 1
+
 required:
   - "#pwm-cells"
   - compatible
@@ -54,4 +92,17 @@
         #pwm-cells = <3>;
     };
 
+  - |
+    #include <dt-bindings/clock/sun50i-h6-ccu.h>
+    #include <dt-bindings/reset/sun50i-h6-ccu.h>
+
+    pwm@300a000 {
+      compatible = "allwinner,sun50i-h6-pwm";
+      reg = <0x0300a000 0x400>;
+      clocks = <&osc24M>, <&ccu CLK_BUS_PWM>;
+      clock-names = "mod", "bus";
+      resets = <&ccu RST_BUS_PWM>;
+      #pwm-cells = <3>;
+    };
+
 ...
diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
index 1b06f86a..a1b8a48 100644
--- a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
@@ -3,7 +3,7 @@
 Required properties:
 - compatible: should be "fsl,imx23-pwm"
 - reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 2. See pwm.yaml in this directory for a description of
+- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
   the cells format.
 - fsl,pwm-number: the number of PWM devices
 
@@ -12,6 +12,6 @@
 pwm: pwm@80064000 {
 	compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
 	reg = <0x80064000 0x2000>;
-	#pwm-cells = <2>;
+	#pwm-cells = <3>;
 	fsl,pwm-number = <8>;
 };
diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt b/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt
new file mode 100644
index 0000000..3ba668b
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt
@@ -0,0 +1,36 @@
+Mediatek SCP Bindings
+----------------------------------------
+
+This binding provides support for ARM Cortex M4 Co-processor found on some
+Mediatek SoCs.
+
+Required properties:
+- compatible		Should be "mediatek,mt8183-scp"
+- reg			Should contain the address ranges for the two memory
+			regions, SRAM and CFG.
+- reg-names		Contains the corresponding names for the two memory
+			regions. These should be named "sram" & "cfg".
+- clocks		Clock for co-processor (See: ../clock/clock-bindings.txt)
+- clock-names		Contains the corresponding name for the clock. This
+			should be named "main".
+
+Subnodes
+--------
+
+Subnodes of the SCP represent rpmsg devices. The names of the devices are not
+important. The properties of these nodes are defined by the individual bindings
+for the rpmsg devices - but must contain the following property:
+
+- mtk,rpmsg-name	Contains the name for the rpmsg device. Used to match
+			the subnode to rpmsg device announced by SCP.
+
+Example:
+
+	scp: scp@10500000 {
+		compatible = "mediatek,mt8183-scp";
+		reg = <0 0x10500000 0 0x80000>,
+		      <0 0x105c0000 0 0x5000>;
+		reg-names = "sram", "cfg";
+		clocks = <&infracfg CLK_INFRA_SCPSYS>;
+		clock-names = "main";
+	};
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
index 292dfda..9938918 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
@@ -10,11 +10,17 @@
 		    "qcom,msm8974-adsp-pil"
 		    "qcom,msm8996-adsp-pil"
 		    "qcom,msm8996-slpi-pil"
+		    "qcom,msm8998-adsp-pas"
+		    "qcom,msm8998-slpi-pas"
 		    "qcom,qcs404-adsp-pas"
 		    "qcom,qcs404-cdsp-pas"
 		    "qcom,qcs404-wcss-pas"
 		    "qcom,sdm845-adsp-pas"
 		    "qcom,sdm845-cdsp-pas"
+		    "qcom,sm8150-adsp-pas"
+		    "qcom,sm8150-cdsp-pas"
+		    "qcom,sm8150-mpss-pas"
+		    "qcom,sm8150-slpi-pas"
 
 - interrupts-extended:
 	Usage: required
@@ -29,12 +35,18 @@
 	qcom,msm8974-adsp-pil:
 	qcom,msm8996-adsp-pil:
 	qcom,msm8996-slpi-pil:
+	qcom,msm8998-adsp-pas:
+	qcom,msm8998-slpi-pas:
 	qcom,qcs404-adsp-pas:
 	qcom,qcs404-cdsp-pas:
 	qcom,sdm845-adsp-pas:
 	qcom,sdm845-cdsp-pas:
+	qcom,sm8150-adsp-pas:
+	qcom,sm8150-cdsp-pas:
+	qcom,sm8150-slpi-pas:
 		    must be "wdog", "fatal", "ready", "handover", "stop-ack"
 	qcom,qcs404-wcss-pas:
+	qcom,sm8150-mpss-pas:
 		    must be "wdog", "fatal", "ready", "handover", "stop-ack",
 		    "shutdown-ack"
 
@@ -67,6 +79,38 @@
 	Definition: reference to the px regulator to be held on behalf of the
 		    booting Hexagon core
 
+- power-domains:
+	Usage: required
+	Value type: <phandle>
+	Definition: reference to power-domains that match the power-domain-names
+
+- power-domain-names:
+	Usage: required
+	Value type: <stringlist>
+	Definition: The power-domains needed depend on the compatible string:
+	qcom,msm8974-adsp-pil:
+	qcom,msm8996-adsp-pil:
+	qcom,msm8998-adsp-pas:
+		    must be "cx"
+	qcom,msm8996-slpi-pil:
+		    must be "ss_cx"
+	qcom,msm8998-slpi-pas:
+		    must be "ssc_cx"
+	qcom,qcs404-adsp-pas:
+		    must be "lpi_cx"
+	qcom,qcs404-cdsp-pas:
+	qcom,qcs404-wcss-pas:
+		    must be "mx"
+	qcom,sdm845-adsp-pas:
+	qcom,sdm845-cdsp-pas:
+	qcom,sm8150-adsp-pas:
+	qcom,sm8150-cdsp-pas:
+		    must be "cx", "load_state"
+	qcom,sm8150-mpss-pas:
+		    must be "cx", "load_state", "mss"
+	qcom,sm8150-slpi-pas:
+		    must be "lcx", "lmx", "load_state"
+
 - memory-region:
 	Usage: required
 	Value type: <phandle>
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
index c416746..88dfa3f 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
@@ -13,6 +13,7 @@
 		    "qcom,msm8974-mss-pil"
 		    "qcom,msm8996-mss-pil"
 		    "qcom,msm8998-mss-pil"
+		    "qcom,sc7180-mss-pil"
 		    "qcom,sdm845-mss-pil"
 
 - reg:
@@ -43,6 +44,7 @@
 		    must be "wdog", "fatal", "ready", "handover", "stop-ack"
 	qcom,msm8996-mss-pil:
 	qcom,msm8998-mss-pil:
+	qcom,sc7180-mss-pil:
 	qcom,sdm845-mss-pil:
 		    must be "wdog", "fatal", "ready", "handover", "stop-ack",
 		    "shutdown-ack"
@@ -75,6 +77,9 @@
 	qcom,msm8998-mss-pil:
 		    must be "iface", "bus", "mem", "xo", "gpll0_mss",
 		    "snoc_axi", "mnoc_axi", "qdss"
+	qcom,sc7180-mss-pil:
+		    must be "iface", "bus", "xo", "snoc_axi", "mnoc_axi",
+		    "mss_crypto", "mss_nav", "nav"
 	qcom,sdm845-mss-pil:
 		    must be "iface", "bus", "mem", "xo", "gpll0_mss",
 		    "snoc_axi", "mnoc_axi", "prng"
@@ -86,7 +91,7 @@
 		    reference to the list of 3 reset-controllers for the
 		    wcss sub-system
 		    reference to the list of 2 reset-controllers for the modem
-		    sub-system on SDM845 SoCs
+		    sub-system on SC7180, SDM845 SoCs
 
 - reset-names:
 	Usage: required
@@ -95,7 +100,7 @@
 		    must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
 		    for the wcss sub-system
 		    must be "mss_restart", "pdc_reset" for the modem
-		    sub-system on SDM845 SoCs
+		    sub-system on SC7180, SDM845 SoCs
 
 For the compatible strings below the following supplies are required:
   "qcom,q6v5-pil"
@@ -144,6 +149,7 @@
 	qcom,msm8996-mss-pil:
 	qcom,msm8998-mss-pil:
 		    must be "cx", "mx"
+	qcom,sc7180-mss-pil:
 	qcom,sdm845-mss-pil:
 		    must be "cx", "mx", "mss", "load_state"
 
@@ -165,6 +171,19 @@
 		    by the three offsets within syscon for q6, modem and nc
 		    halt registers.
 
+For the compatible strings below the following phandle references are required:
+  "qcom,sc7180-mss-pil"
+- qcom,halt-nav-regs:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: reference to a list of 2 phandles with one offset each for
+		    the modem sub-system running on SC7180 SoC. The first
+		    phandle reference is to the mss clock node followed by the
+		    offset within register space for nav halt register. The
+		    second phandle reference is to a syscon representing TCSR
+		    followed by the offset within syscon for conn_box_spare0
+		    register.
+
 = SUBNODES:
 The Hexagon node must contain two subnodes, named "mba" and "mpss" representing
 the memory regions used by the Hexagon firmware. Each sub-node must contain:
diff --git a/Documentation/devicetree/bindings/reset/allwinner,sun6i-a31-clock-reset.yaml b/Documentation/devicetree/bindings/reset/allwinner,sun6i-a31-clock-reset.yaml
new file mode 100644
index 0000000..001c0d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/allwinner,sun6i-a31-clock-reset.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/allwinner,sun6i-a31-clock-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 Peripheral Reset Controller Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+deprecated: true
+
+select:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - allwinner,sun6i-a31-ahb1-reset
+          - allwinner,sun6i-a31-clock-reset
+
+  # The PRCM on the A31 and A23 will have the reg property missing,
+  # since it's set at the upper level node, and will be validated by
+  # PRCM's schema. Make sure we only validate standalone nodes.
+  required:
+    - compatible
+    - reg
+
+properties:
+  "#reset-cells":
+    const: 1
+    description: >
+      This additional argument passed to that reset controller is the
+      offset of the bit controlling this particular reset line in the
+      register.
+
+  compatible:
+    enum:
+      - allwinner,sun6i-a31-ahb1-reset
+      - allwinner,sun6i-a31-clock-reset
+
+  reg:
+    maxItems: 1
+
+required:
+  - "#reset-cells"
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    ahb1_rst: reset@1c202c0 {
+        #reset-cells = <1>;
+        compatible = "allwinner,sun6i-a31-ahb1-reset";
+        reg = <0x01c202c0 0xc>;
+    };
+
+  - |
+    apbs_rst: reset@80014b0 {
+        #reset-cells = <1>;
+        compatible = "allwinner,sun6i-a31-clock-reset";
+        reg = <0x080014b0 0x4>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt b/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt
deleted file mode 100644
index 4ca66c9..0000000
--- a/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Allwinner sunxi Peripheral Reset Controller
-===========================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible: Should be one of the following:
-  "allwinner,sun6i-a31-ahb1-reset"
-  "allwinner,sun6i-a31-clock-reset"
-- reg: should be register base and length as documented in the
-  datasheet
-- #reset-cells: 1, see below
-
-example:
-
-ahb1_rst: reset@1c202c0 {
-	#reset-cells = <1>;
-	compatible = "allwinner,sun6i-a31-ahb1-reset";
-	reg = <0x01c202c0 0xc>;
-};
diff --git a/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml b/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml
new file mode 100644
index 0000000..411bd76
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2020 Broadcom
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reset/brcm,bcm7216-pcie-sata-rescal.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: BCM7216 RESCAL reset controller
+
+description: This document describes the BCM7216 RESCAL reset controller which is responsible for controlling the reset of the SATA and PCIe0/1 instances on BCM7216.
+
+maintainers:
+  - Florian Fainelli <f.fainelli@gmail.com>
+  - Jim Quinlan <jim2101024@gmail.com>
+
+properties:
+  compatible:
+    const: brcm,bcm7216-pcie-sata-rescal
+
+  reg:
+    maxItems: 1
+
+  "#reset-cells":
+    const: 0
+
+required:
+  - compatible
+  - reg
+  - "#reset-cells"
+
+examples:
+  - |
+    reset-controller@8b2c800 {
+          compatible = "brcm,bcm7216-pcie-sata-rescal";
+          reg = <0x8b2c800 0x10>;
+          #reset-cells = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
new file mode 100644
index 0000000..246dea8
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/intel,rcu-gw.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: System Reset Controller on Intel Gateway SoCs
+
+maintainers:
+  - Dilip Kota <eswara.kota@linux.intel.com>
+
+properties:
+  compatible:
+    enum:
+      - intel,rcu-lgm
+      - intel,rcu-xrx200
+
+  reg:
+    description: Reset controller registers.
+    maxItems: 1
+
+  intel,global-reset:
+    description: Global reset register offset and bit offset.
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+      - maxItems: 2
+
+  "#reset-cells":
+    minimum: 2
+    maximum: 3
+    description: |
+      First cell is reset request register offset.
+      Second cell is bit offset in reset request register.
+      Third cell is bit offset in reset status register.
+      For LGM SoC, reset cell count is 2 as bit offset in
+      reset request and reset status registers is same. Whereas
+      3 for legacy SoCs as bit offset differs.
+
+required:
+  - compatible
+  - reg
+  - intel,global-reset
+  - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    rcu0: reset-controller@e0000000 {
+        compatible = "intel,rcu-lgm";
+        reg = <0xe0000000 0x20000>;
+        intel,global-reset = <0x10 30>;
+        #reset-cells = <2>;
+    };
+
+    pwm: pwm@e0d00000 {
+        status = "disabled";
+        compatible = "intel,lgm-pwm";
+        reg = <0xe0d00000 0x30>;
+        clocks = <&cgu0 1>;
+        #pwm-cells = <2>;
+        resets = <&rcu0 0x30 21>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/nuvoton,npcm-reset.txt b/Documentation/devicetree/bindings/reset/nuvoton,npcm-reset.txt
new file mode 100644
index 0000000..6e80270
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/nuvoton,npcm-reset.txt
@@ -0,0 +1,32 @@
+Nuvoton NPCM Reset controller
+
+Required properties:
+- compatible : "nuvoton,npcm750-reset" for NPCM7XX BMC
+- reg : specifies physical base address and size of the register.
+- #reset-cells: must be set to 2
+
+Optional property:
+- nuvoton,sw-reset-number - Contains the software reset number to restart the SoC.
+  NPCM7xx contain four software reset that represent numbers 1 to 4.
+
+  If 'nuvoton,sw-reset-number' is not specfied software reset is disabled.
+
+Example:
+	rstc: rstc@f0801000 {
+		compatible = "nuvoton,npcm750-reset";
+		reg = <0xf0801000 0x70>;
+		#reset-cells = <2>;
+		nuvoton,sw-reset-number = <2>;
+	};
+
+Specifying reset lines connected to IP NPCM7XX modules
+======================================================
+example:
+
+        spi0: spi@..... {
+                ...
+                resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_PSPI1>;
+                ...
+        };
+
+The index could be found in <dt-bindings/reset/nuvoton,npcm7xx-reset.h>.
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
deleted file mode 100644
index 5d3791e7..0000000
--- a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Atmel AT91RM9200 Real Time Clock
-
-Required properties:
-- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc"
-- reg: physical base address of the controller and length of memory mapped
-  region.
-- interrupts: rtc alarm/event interrupt
-- clocks: phandle to input clock.
-
-Example:
-
-rtc@fffffe00 {
-	compatible = "atmel,at91rm9200-rtc";
-	reg = <0xfffffe00 0x100>;
-	interrupts = <1 4 7>;
-	clocks = <&clk32k>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
new file mode 100644
index 0000000..02bbfe7
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/atmel,at91rm9200-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel AT91 RTC Device Tree Bindings
+
+allOf:
+  - $ref: "rtc.yaml#"
+
+maintainers:
+  - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+  compatible:
+    enum:
+      - atmel,at91rm9200-rtc
+      - atmel,at91sam9x5-rtc
+      - atmel,sama5d4-rtc
+      - atmel,sama5d2-rtc
+      - microchip,sam9x60-rtc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+
+additionalProperties: false
+
+examples:
+  - |
+    rtc@fffffe00 {
+        compatible = "atmel,at91rm9200-rtc";
+        reg = <0xfffffe00 0x100>;
+        interrupts = <1 4 7>;
+        clocks = <&clk32k>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
deleted file mode 100644
index 130ca5b..0000000
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-STM32 Real Time Clock
-
-Required properties:
-- compatible: can be one of the following:
-  - "st,stm32-rtc" for devices compatible with stm32(f4/f7).
-  - "st,stm32h7-rtc" for devices compatible with stm32h7.
-  - "st,stm32mp1-rtc" for devices compatible with stm32mp1.
-- reg: address range of rtc register set.
-- clocks: can use up to two clocks, depending on part used:
-  - "rtc_ck": RTC clock source.
-  - "pclk": RTC APB interface clock.
-    It is not present on stm32(f4/f7).
-    It is required on stm32(h7/mp1).
-- clock-names: must be "rtc_ck" and "pclk".
-    It is required on stm32(h7/mp1).
-- interrupts: rtc alarm interrupt. On stm32mp1, a second interrupt is required
-  for rtc alarm wakeup interrupt.
-- st,syscfg: phandle/offset/mask triplet. The phandle to pwrcfg used to
-  access control register at offset, and change the dbp (Disable Backup
-  Protection) bit represented by the mask, mandatory to disable/enable backup
-  domain (RTC registers) write protection.
-    It is required on stm32(f4/f7/h7).
-
-Optional properties (to override default rtc_ck parent clock on stm32(f4/f7/h7):
-- assigned-clocks: reference to the rtc_ck clock entry.
-- assigned-clock-parents: phandle of the new parent clock of rtc_ck.
-
-Example:
-
-	rtc: rtc@40002800 {
-		compatible = "st,stm32-rtc";
-		reg = <0x40002800 0x400>;
-		clocks = <&rcc 1 CLK_RTC>;
-		assigned-clocks = <&rcc 1 CLK_RTC>;
-		assigned-clock-parents = <&rcc 1 CLK_LSE>;
-		interrupt-parent = <&exti>;
-		interrupts = <17 1>;
-		st,syscfg = <&pwrcfg 0x00 0x100>;
-	};
-
-	rtc: rtc@58004000 {
-		compatible = "st,stm32h7-rtc";
-		reg = <0x58004000 0x400>;
-		clocks = <&rcc RTCAPB_CK>, <&rcc RTC_CK>;
-		clock-names = "pclk", "rtc_ck";
-		assigned-clocks = <&rcc RTC_CK>;
-		assigned-clock-parents = <&rcc LSE_CK>;
-		interrupt-parent = <&exti>;
-		interrupts = <17 1>;
-		interrupt-names = "alarm";
-		st,syscfg = <&pwrcfg 0x00 0x100>;
-	};
-
-	rtc: rtc@5c004000 {
-		compatible = "st,stm32mp1-rtc";
-		reg = <0x5c004000 0x400>;
-		clocks = <&rcc RTCAPB>, <&rcc RTC>;
-		clock-names = "pclk", "rtc_ck";
-		interrupts-extended = <&intc GIC_SPI 3 IRQ_TYPE_NONE>,
-				      <&exti 19 1>;
-	};
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
new file mode 100644
index 0000000..0a54296
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
@@ -0,0 +1,139 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/st,stm32-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Real Time Clock Bindings
+
+maintainers:
+  - Gabriel Fernandez <gabriel.fernandez@st.com>
+
+properties:
+  compatible:
+    enum:
+      - st,stm32-rtc
+      - st,stm32h7-rtc
+      - st,stm32mp1-rtc
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 2
+
+  clock-names:
+    items:
+      - const: pclk
+      - const: rtc_ck
+
+  interrupts:
+    maxItems: 1
+
+  st,syscfg:
+    allOf:
+      - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+      - items:
+          minItems: 3
+          maxItems: 3
+    description: |
+       Phandle/offset/mask triplet. The phandle to pwrcfg used to
+       access control register at offset, and change the dbp (Disable Backup
+       Protection) bit represented by the mask, mandatory to disable/enable backup
+       domain (RTC registers) write protection.
+
+  assigned-clocks:
+    description: |
+      override default rtc_ck parent clock reference to the rtc_ck clock entry
+    maxItems: 1
+
+  assigned-clock-parents:
+    description: |
+      override default rtc_ck parent clock phandle of the new parent clock of rtc_ck
+    maxItems: 1
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32-rtc
+
+    then:
+      properties:
+        clocks:
+          minItems: 1
+          maxItems: 1
+
+        clock-names: false
+
+      required:
+        - st,syscfg
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32h7-rtc
+
+    then:
+       properties:
+         clocks:
+           minItems: 2
+           maxItems: 2
+
+       required:
+         - clock-names
+         - st,syscfg
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32mp1-rtc
+
+    then:
+       properties:
+         clocks:
+           minItems: 2
+           maxItems: 2
+
+         assigned-clocks: false
+         assigned-clock-parents: false
+
+       required:
+         - clock-names
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/mfd/stm32f4-rcc.h>
+    #include <dt-bindings/clock/stm32fx-clock.h>
+    rtc@40002800 {
+      compatible = "st,stm32-rtc";
+      reg = <0x40002800 0x400>;
+      clocks = <&rcc 1 CLK_RTC>;
+      assigned-clocks = <&rcc 1 CLK_RTC>;
+      assigned-clock-parents = <&rcc 1 CLK_LSE>;
+      interrupt-parent = <&exti>;
+      interrupts = <17 1>;
+      st,syscfg = <&pwrcfg 0x00 0x100>;
+    };
+
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    rtc@5c004000 {
+      compatible = "st,stm32mp1-rtc";
+      reg = <0x5c004000 0x400>;
+      clocks = <&rcc RTCAPB>, <&rcc RTC>;
+      clock-names = "pclk", "rtc_ck";
+      interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
index f5f5ab0..c904e2e 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
@@ -10,6 +10,8 @@
     on i.MX7ULP SoC with 32-bit little-endian register organization
   - "fsl,imx8qxp-lpuart" for lpuart compatible with the one integrated
     on i.MX8QXP SoC with 32-bit little-endian register organization
+  - "fsl,imx8qm-lpuart" for lpuart compatible with the one integrated
+    on i.MX8QM SoC with 32-bit little-endian register organization
 - reg : Address and length of the register set for the device
 - interrupts : Should contain uart interrupt
 - clocks : phandle + clock specifier pairs, one for each entry in clock-names
diff --git a/Documentation/devicetree/bindings/serial/rs485.txt b/Documentation/devicetree/bindings/serial/rs485.txt
index b92592d..a7fe93e 100644
--- a/Documentation/devicetree/bindings/serial/rs485.txt
+++ b/Documentation/devicetree/bindings/serial/rs485.txt
@@ -1,31 +1 @@
-* RS485 serial communications
-
-The RTS signal is capable of automatically controlling line direction for
-the built-in half-duplex mode.
-The properties described hereafter shall be given to a half-duplex capable
-UART node.
-
-Optional properties:
-- rs485-rts-delay: prop-encoded-array <a b> where:
-  * a is the delay between rts signal and beginning of data sent in milliseconds.
-      it corresponds to the delay before sending data.
-  * b is the delay between end of data sent and rts signal in milliseconds
-      it corresponds to the delay after sending data and actual release of the line.
-  If this property is not specified, <0 0> is assumed.
-- rs485-rts-active-low: drive RTS low when sending (default is high).
-- linux,rs485-enabled-at-boot-time: empty property telling to enable the rs485
-  feature at boot time. It can be disabled later with proper ioctl.
-- rs485-rx-during-tx: empty property that enables the receiving of data even
-  while sending data.
-
-RS485 example for Atmel USART:
-	usart0: serial@fff8c000 {
-		compatible = "atmel,at91sam9260-usart";
-		reg = <0xfff8c000 0x4000>;
-		interrupts = <7>;
-		atmel,use-dma-rx;
-		atmel,use-dma-tx;
-		linux,rs485-enabled-at-boot-time;
-		rs485-rts-delay = <0 200>;		// in milliseconds
-	};
-
+See rs485.yaml
diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
new file mode 100644
index 0000000..d4beaf1
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/rs485.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/rs485.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RS485 serial communications Bindings
+
+description: The RTS signal is capable of automatically controlling
+             line direction for the built-in half-duplex mode.
+             The properties described hereafter shall be given to a
+             half-duplex capable UART node.
+
+maintainers:
+  -  Rob Herring <robh@kernel.org>
+
+properties:
+  rs485-rts-delay:
+    description: prop-encoded-array <a b>
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+      - items:
+          items:
+            - description:
+                Delay between rts signal and beginning of data sent in milliseconds.
+                It corresponds to the delay before sending data.
+              default: 0
+              maximum: 1000
+            - description:
+                Delay between end of data sent and rts signal in milliseconds.
+                It corresponds to the delay after sending data and actual release of the line.
+              default: 0
+              maximum: 1000
+
+  rs485-rts-active-low:
+    description: drive RTS low when sending (default is high).
+    $ref: /schemas/types.yaml#/definitions/flag
+
+  linux,rs485-enabled-at-boot-time:
+    description: enables the rs485 feature at boot time. It can be disabled later with proper ioctl.
+    $ref: /schemas/types.yaml#/definitions/flag
+
+  rs485-rx-during-tx:
+   description: enables the receiving of data even while sending data.
+   $ref: /schemas/types.yaml#/definitions/flag
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
new file mode 100644
index 0000000..238c441
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/st,stm32-uart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+maintainers:
+  - Erwan Le Ray <erwan.leray@st.com>
+
+title: STMicroelectronics STM32 USART bindings
+
+allOf:
+  - $ref: rs485.yaml
+
+properties:
+  compatible:
+    enum:
+      - st,stm32-uart
+      - st,stm32f7-uart
+      - st,stm32h7-uart
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  label:
+    description: label associated with this uart
+
+  st,hw-flow-ctrl:
+    description: enable hardware flow control
+    $ref: /schemas/types.yaml#/definitions/flag
+
+  dmas:
+    minItems: 1
+    maxItems: 2
+
+  dma-names:
+    items:
+      enum: [ rx, tx ]
+    minItems: 1
+    maxItems: 2
+
+  wakeup-source: true
+
+  rs485-rts-delay: true
+  rs485-rts-active-low: true
+  linux,rs485-enabled-at-boot-time: true
+  rs485-rx-during-tx: true
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    usart1: serial@40011000 {
+      compatible = "st,stm32-uart";
+      reg = <0x40011000 0x400>;
+      interrupts = <37>;
+      clocks = <&rcc 0 164>;
+      dmas = <&dma2 2 4 0x414 0x0>,
+             <&dma2 7 4 0x414 0x0>;
+      dma-names = "rx", "tx";
+      rs485-rts-active-low;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-usart.txt b/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
deleted file mode 100644
index 8620f7f..0000000
--- a/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-* STMicroelectronics STM32 USART
-
-Required properties:
-- compatible: can be either:
-  - "st,stm32-uart",
-  - "st,stm32f7-uart",
-  - "st,stm32h7-uart".
-  depending is compatible with stm32(f4), stm32f7 or stm32h7.
-- reg: The address and length of the peripheral registers space
-- interrupts:
-  - The interrupt line for the USART instance,
-  - An optional wake-up interrupt.
-- clocks: The input clock of the USART instance
-
-Optional properties:
-- resets: Must contain the phandle to the reset controller.
-- pinctrl: The reference on the pins configuration
-- st,hw-flow-ctrl: bool flag to enable hardware flow control.
-- rs485-rts-delay, rs485-rx-during-tx, rs485-rts-active-low,
-  linux,rs485-enabled-at-boot-time: see rs485.txt.
-- dmas: phandle(s) to DMA controller node(s). Refer to stm32-dma.txt
-- dma-names: "rx" and/or "tx"
-- wakeup-source: bool flag to indicate this device has wakeup capabilities
-- interrupt-names, if optional wake-up interrupt is used, should be:
-  - "event": the name for the interrupt line of the USART instance
-  - "wakeup" the name for the optional wake-up interrupt
-
-
-Examples:
-usart4: serial@40004c00 {
-	compatible = "st,stm32-uart";
-	reg = <0x40004c00 0x400>;
-	interrupts = <52>;
-	clocks = <&clk_pclk1>;
-	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_usart4>;
-};
-
-usart2: serial@40004400 {
-	compatible = "st,stm32-uart";
-	reg = <0x40004400 0x400>;
-	interrupts = <38>;
-	clocks = <&clk_pclk1>;
-	st,hw-flow-ctrl;
-	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_usart2 &pinctrl_usart2_rtscts>;
-};
-
-usart1: serial@40011000 {
-	compatible = "st,stm32-uart";
-	reg = <0x40011000 0x400>;
-	interrupts = <37>;
-	clocks = <&rcc 0 164>;
-	dmas = <&dma2 2 4 0x414 0x0>,
-	       <&dma2 7 4 0x414 0x0>;
-	dma-names = "rx", "tx";
-};
diff --git a/Documentation/devicetree/bindings/slimbus/bus.txt b/Documentation/devicetree/bindings/slimbus/bus.txt
index 52fa642..bbe871f 100644
--- a/Documentation/devicetree/bindings/slimbus/bus.txt
+++ b/Documentation/devicetree/bindings/slimbus/bus.txt
@@ -32,6 +32,10 @@
 	 	  Product Code, shall be in lower case hexadecimal with leading
 		  zeroes suppressed
 
+Optional property for SLIMbus child node if it is present:
+- slim-ifc-dev	- Should be phandle to SLIMBus Interface device.
+		  Required for devices which deal with streams.
+
 SLIMbus example for Qualcomm's slimbus manager component:
 
 	slim@28080000 {
@@ -43,8 +47,14 @@
 		#address-cells = <2>;
 		#size-cell = <0>;
 
+		codec_ifd: ifd@0,0{
+			compatible = "slim217,60";
+			reg = <0 0>;
+		};
+
 		codec: wcd9310@1,0{
 			compatible = "slim217,60";
 			reg = <1 0>;
+			slim-ifc-dev  = <&codec_ifd>;
 		};
 	};
diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
index 8f469d8..2bc3677 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
@@ -11,6 +11,7 @@
 power/power-domain.yaml. It provides the power domains defined in
 - include/dt-bindings/power/mt8173-power.h
 - include/dt-bindings/power/mt6797-power.h
+- include/dt-bindings/power/mt6765-power.h
 - include/dt-bindings/power/mt2701-power.h
 - include/dt-bindings/power/mt2712-power.h
 - include/dt-bindings/power/mt7622-power.h
@@ -19,6 +20,7 @@
 - compatible: Should be one of:
 	- "mediatek,mt2701-scpsys"
 	- "mediatek,mt2712-scpsys"
+	- "mediatek,mt6765-scpsys"
 	- "mediatek,mt6797-scpsys"
 	- "mediatek,mt7622-scpsys"
 	- "mediatek,mt7623-scpsys", "mediatek,mt2701-scpsys": For MT7623 SoC
@@ -33,6 +35,10 @@
                       enabled before enabling certain power domains.
 	Required clocks for MT2701 or MT7623: "mm", "mfg", "ethif"
 	Required clocks for MT2712: "mm", "mfg", "venc", "jpgdec", "audio", "vdec"
+	Required clocks for MT6765: MUX: "mm", "mfg"
+				    CG: "mm-0", "mm-1", "mm-2", "mm-3", "isp-0",
+					"isp-1", "cam-0", "cam-1", "cam-2",
+					"cam-3","cam-4"
 	Required clocks for MT6797: "mm", "mfg", "vdec"
 	Required clocks for MT7622 or MT7629: "hif_sel"
 	Required clocks for MT7623A: "ethif"
diff --git a/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt b/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt
new file mode 100644
index 0000000..436547f
--- /dev/null
+++ b/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt
@@ -0,0 +1,167 @@
+Qualcomm SoundWire Controller Bindings
+
+
+This binding describes the Qualcomm SoundWire Controller along with its
+board specific bus parameters.
+
+- compatible:
+	Usage: required
+	Value type: <stringlist>
+	Definition: must be "qcom,soundwire-v<MAJOR>.<MINOR>.<STEP>",
+		    Example:
+			"qcom,soundwire-v1.3.0"
+			"qcom,soundwire-v1.5.0"
+			"qcom,soundwire-v1.6.0"
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: the base address and size of SoundWire controller
+		    address space.
+
+- interrupts:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: should specify the SoundWire Controller IRQ
+
+- clock-names:
+	Usage: required
+	Value type: <stringlist>
+	Definition: should be "iface" for SoundWire Controller interface clock
+
+- clocks:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: should specify the SoundWire Controller interface clock
+
+- #sound-dai-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: must be 1 for digital audio interfaces on the controller.
+
+- qcom,dout-ports:
+	Usage: required
+	Value type: <u32>
+	Definition: must be count of data out ports
+
+- qcom,din-ports:
+	Usage: required
+	Value type: <u32>
+	Definition: must be count of data in ports
+
+- qcom,ports-offset1:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: should specify payload transport window offset1 of each
+		    data port. Out ports followed by In ports.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,ports-offset2:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: should specify payload transport window offset2 of each
+		    data port. Out ports followed by In ports.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,ports-sinterval-low:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: should be sample interval low of each data port.
+		    Out ports followed by In ports. Used for Sample Interval
+		    calculation.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,ports-word-length:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: should be size of payload channel sample.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,ports-block-pack-mode:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: should be 0 or 1 to indicate the block packing mode.
+		    0 to indicate Blocks are per Channel
+		    1 to indicate Blocks are per Port.
+		    Out ports followed by In ports.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,ports-block-group-count:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: should be in range 1 to 4 to indicate how many sample
+		    intervals are combined into a payload.
+		    Out ports followed by In ports.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,ports-lane-control:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: should be in range 0 to 7 to identify which	data lane
+		    the data port uses.
+		    Out ports followed by In ports.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,ports-hstart:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: should be number identifying lowerst numbered coloum in
+		    SoundWire Frame, i.e. left edge of the Transport sub-frame
+		    for each port. Values between 0 and 15 are valid.
+		    Out ports followed by In ports.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,ports-hstop:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: should be number identifying highest numbered coloum in
+		    SoundWire Frame, i.e. the right edge of the Transport
+		    sub-frame for each port. Values between 0 and 15 are valid.
+		    Out ports followed by In ports.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+- qcom,dports-type:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: should be one of the following types
+		    0 for reduced port
+		    1 for simple ports
+		    2 for full port
+		    Out ports followed by In ports.
+		    More info in MIPI Alliance SoundWire 1.0 Specifications.
+
+Note:
+	More Information on detail of encoding of these fields can be
+found in MIPI Alliance SoundWire 1.0 Specifications.
+
+= SoundWire devices
+Each subnode of the bus represents SoundWire device attached to it.
+The properties of these nodes are defined by the individual bindings.
+
+= EXAMPLE
+The following example represents a SoundWire controller on DB845c board
+which has controller integrated inside WCD934x codec on SDM845 SoC.
+
+soundwire: soundwire@c85 {
+	compatible = "qcom,soundwire-v1.3.0";
+	reg = <0xc85 0x20>;
+	interrupts = <20 IRQ_TYPE_EDGE_RISING>;
+	clocks = <&wcc>;
+	clock-names = "iface";
+	#sound-dai-cells = <1>;
+	qcom,dports-type = <0>;
+	qcom,dout-ports	= <6>;
+	qcom,din-ports	= <2>;
+	qcom,ports-sinterval-low = /bits/ 8  <0x07 0x1F 0x3F 0x7 0x1F 0x3F 0x0F 0x0F>;
+	qcom,ports-offset1 = /bits/ 8 <0x01 0x02 0x0C 0x6 0x12 0x0D 0x07 0x0A >;
+	qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x1F 0x00 0x00 0x1F 0x00 0x00>;
+
+	/* Left Speaker */
+	left{
+		....
+	};
+
+	/* Right Speaker */
+	right{
+		....
+	};
+};
diff --git a/Documentation/devicetree/bindings/soundwire/soundwire-controller.yaml b/Documentation/devicetree/bindings/soundwire/soundwire-controller.yaml
index 1b43993..330924b 100644
--- a/Documentation/devicetree/bindings/soundwire/soundwire-controller.yaml
+++ b/Documentation/devicetree/bindings/soundwire/soundwire-controller.yaml
@@ -69,6 +69,7 @@
             reg = <0 1>;
             powerdown-gpios = <&wcdpinctrl 2 0>;
             #thermal-sensor-cells = <0>;
+            #sound-dai-cells = <0>;
         };
 
         speaker@0,2 {
@@ -76,6 +77,7 @@
             reg = <0 2>;
             powerdown-gpios = <&wcdpinctrl 2 0>;
             #thermal-sensor-cells = <0>;
+            #sound-dai-cells = <0>;
         };
     };
 
diff --git a/Documentation/devicetree/bindings/spi/fsl-spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt
index 411375ea..0654380 100644
--- a/Documentation/devicetree/bindings/spi/fsl-spi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-spi.txt
@@ -15,13 +15,13 @@
 - clock-frequency : input clock frequency to non FSL_SOC cores
 
 Optional properties:
-- gpios : specifies the gpio pins to be used for chipselects.
+- cs-gpios : specifies the gpio pins to be used for chipselects.
   The gpios will be referred to as reg = <index> in the SPI child nodes.
   If unspecified, a single SPI device without a chip select can be used.
 - fsl,spisel_boot : for the MPC8306 and MPC8309, specifies that the
   SPISEL_BOOT signal is used as chip select for a slave device. Use
   reg = <number of gpios> in the corresponding child node, i.e. 0 if
-  the gpios property is not present.
+  the cs-gpios property is not present.
 
 Example:
 	spi@4c0 {
@@ -31,8 +31,8 @@
 		interrupts = <82 0>;
 		interrupt-parent = <700>;
 		mode = "cpu";
-		gpios = <&gpio 18 1	// device reg=<0>
-			 &gpio 19 1>;	// device reg=<1>
+		cs-gpios = <&gpio 18 1		// device reg=<0>
+			    &gpio 19 1>;	// device reg=<1>
 	};
 
 
diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
new file mode 100644
index 0000000..80bac7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sram/allwinner,sun4i-a10-system-control.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 System Control Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+description:
+  The SRAM controller found on most Allwinner devices is represented
+  by a regular node for the SRAM controller itself, with sub-nodes
+  representing the SRAM handled by the SRAM controller.
+
+properties:
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 1
+
+  compatible:
+    oneOf:
+      - const: allwinner,sun4i-a10-sram-controller
+        deprecated: true
+      - const: allwinner,sun4i-a10-system-control
+      - const: allwinner,sun5i-a13-system-control
+      - items:
+        - const: allwinner,sun7i-a20-system-control
+        - const: allwinner,sun4i-a10-system-control
+      - const: allwinner,sun8i-a23-system-control
+      - const: allwinner,sun8i-h3-system-control
+      - const: allwinner,sun50i-a64-sram-controller
+        deprecated: true
+      - const: allwinner,sun50i-a64-system-control
+      - const: allwinner,sun50i-h5-system-control
+      - items:
+        - const: allwinner,sun50i-h6-system-control
+        - const: allwinner,sun50i-a64-system-control
+      - items:
+        - const: allwinner,suniv-f1c100s-system-control
+        - const: allwinner,sun4i-a10-system-control
+
+  reg:
+    maxItems: 1
+
+  ranges: true
+
+patternProperties:
+  "^sram@[a-z0-9]+":
+    type: object
+
+    properties:
+      compatible:
+        const: mmio-sram
+
+    patternProperties:
+      "^sram-section?@[a-f0-9]+$":
+        type: object
+
+        properties:
+          compatible:
+            oneOf:
+              - const: allwinner,sun4i-a10-sram-a3-a4
+              - const: allwinner,sun4i-a10-sram-c1
+              - const: allwinner,sun4i-a10-sram-d
+              - const: allwinner,sun50i-a64-sram-c
+              - items:
+                - const: allwinner,sun5i-a13-sram-a3-a4
+                - const: allwinner,sun4i-a10-sram-a3-a4
+              - items:
+                - const: allwinner,sun7i-a20-sram-a3-a4
+                - const: allwinner,sun4i-a10-sram-a3-a4
+              - items:
+                - const: allwinner,sun5i-a13-sram-c1
+                - const: allwinner,sun4i-a10-sram-c1
+              - items:
+                - const: allwinner,sun7i-a20-sram-c1
+                - const: allwinner,sun4i-a10-sram-c1
+              - items:
+                - const: allwinner,sun8i-a23-sram-c1
+                - const: allwinner,sun4i-a10-sram-c1
+              - items:
+                - const: allwinner,sun8i-h3-sram-c1
+                - const: allwinner,sun4i-a10-sram-c1
+              - items:
+                - const: allwinner,sun50i-a64-sram-c1
+                - const: allwinner,sun4i-a10-sram-c1
+              - items:
+                - const: allwinner,sun50i-h5-sram-c1
+                - const: allwinner,sun4i-a10-sram-c1
+              - items:
+                - const: allwinner,sun50i-h6-sram-c1
+                - const: allwinner,sun4i-a10-sram-c1
+              - items:
+                - const: allwinner,sun5i-a13-sram-d
+                - const: allwinner,sun4i-a10-sram-d
+              - items:
+                - const: allwinner,sun7i-a20-sram-d
+                - const: allwinner,sun4i-a10-sram-d
+              - items:
+                - const: allwinner,suniv-f1c100s-sram-d
+                - const: allwinner,sun4i-a10-sram-d
+              - items:
+                - const: allwinner,sun50i-h6-sram-c
+                - const: allwinner,sun50i-a64-sram-c
+
+required:
+  - "#address-cells"
+  - "#size-cells"
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    system-control@1c00000 {
+      compatible = "allwinner,sun4i-a10-system-control";
+      reg = <0x01c00000 0x30>;
+      #address-cells = <1>;
+      #size-cells = <1>;
+      ranges;
+
+      sram_a: sram@00000000 {
+        compatible = "mmio-sram";
+        reg = <0x00000000 0xc000>;
+        #address-cells = <1>;
+        #size-cells = <1>;
+        ranges = <0 0x00000000 0xc000>;
+
+        emac_sram: sram-section@8000 {
+          compatible = "allwinner,sun4i-a10-sram-a3-a4";
+          reg = <0x8000 0x4000>;
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/sram/sram.yaml b/Documentation/devicetree/bindings/sram/sram.yaml
index ee2287a..7b83cc6 100644
--- a/Documentation/devicetree/bindings/sram/sram.yaml
+++ b/Documentation/devicetree/bindings/sram/sram.yaml
@@ -55,7 +55,7 @@
     type: boolean
 
 patternProperties:
-  "^([a-z]*-)?sram@[a-f0-9]+$":
+  "^([a-z]*-)?sram(-section)?@[a-f0-9]+$":
     type: object
     description:
       Each child of the sram node specifies a region of reserved memory.
@@ -64,15 +64,20 @@
         description:
           Should contain a vendor specific string in the form
           <vendor>,[<device>-]<usage>
-        enum:
-          - allwinner,sun9i-a80-smp-sram
-          - amlogic,meson8-smp-sram
-          - amlogic,meson8b-smp-sram
-          - renesas,smp-sram
-          - rockchip,rk3066-smp-sram
-          - samsung,exynos4210-sysram
-          - samsung,exynos4210-sysram-ns
-          - socionext,milbeaut-smp-sram
+        contains:
+          enum:
+            - allwinner,sun4i-a10-sram-a3-a4
+            - allwinner,sun4i-a10-sram-c1
+            - allwinner,sun4i-a10-sram-d
+            - allwinner,sun9i-a80-smp-sram
+            - allwinner,sun50i-a64-sram-c
+            - amlogic,meson8-smp-sram
+            - amlogic,meson8b-smp-sram
+            - renesas,smp-sram
+            - rockchip,rk3066-smp-sram
+            - samsung,exynos4210-sysram
+            - samsung,exynos4210-sysram-ns
+            - socionext,milbeaut-smp-sram
 
       reg:
         description:
diff --git a/Documentation/devicetree/bindings/sram/sunxi-sram.txt b/Documentation/devicetree/bindings/sram/sunxi-sram.txt
deleted file mode 100644
index 380246a..0000000
--- a/Documentation/devicetree/bindings/sram/sunxi-sram.txt
+++ /dev/null
@@ -1,113 +0,0 @@
-Allwinnner SoC SRAM controllers
------------------------------------------------------
-
-The SRAM controller found on most Allwinner devices is represented by
-a regular node for the SRAM controller itself, with sub-nodes
-reprensenting the SRAM handled by the SRAM controller.
-
-Controller Node
----------------
-
-Required properties:
-- compatible : should be:
-    - "allwinner,sun4i-a10-sram-controller" (deprecated)
-    - "allwinner,sun4i-a10-system-control"
-    - "allwinner,sun5i-a13-system-control"
-    - "allwinner,sun7i-a20-system-control", "allwinner,sun4i-a10-system-control"
-    - "allwinner,sun8i-a23-system-control"
-    - "allwinner,sun8i-h3-system-control"
-    - "allwinner,sun50i-a64-sram-controller" (deprecated)
-    - "allwinner,sun50i-a64-system-control"
-    - "allwinner,sun50i-h5-system-control"
-    - "allwinner,sun50i-h6-system-control", "allwinner,sun50i-a64-system-control"
-    - "allwinner,suniv-f1c100s-system-control", "allwinner,sun4i-a10-system-control"
-- reg : sram controller register offset + length
-
-SRAM nodes
-----------
-
-Each SRAM is described using the mmio-sram bindings documented in
-Documentation/devicetree/bindings/sram/sram.txt
-
-Each SRAM will have SRAM sections that are going to be handled by the
-SRAM controller as subnodes. These sections are represented following
-once again the representation described in the mmio-sram binding.
-
-The valid sections compatible for A10 are:
-    - allwinner,sun4i-a10-sram-a3-a4
-    - allwinner,sun4i-a10-sram-c1
-    - allwinner,sun4i-a10-sram-d
-
-The valid sections compatible for A13 are:
-    - allwinner,sun5i-a13-sram-a3-a4, allwinner,sun4i-a10-sram-a3-a4
-    - allwinner,sun5i-a13-sram-c1, allwinner,sun4i-a10-sram-c1
-    - allwinner,sun5i-a13-sram-d, allwinner,sun4i-a10-sram-d
-
-The valid sections compatible for A20 are:
-    - allwinner,sun7i-a20-sram-a3-a4, allwinner,sun4i-a10-sram-a3-a4
-    - allwinner,sun7i-a20-sram-c1, allwinner,sun4i-a10-sram-c1
-    - allwinner,sun7i-a20-sram-d, allwinner,sun4i-a10-sram-d
-
-The valid sections compatible for A23/A33 are:
-    - allwinner,sun8i-a23-sram-c1, allwinner,sun4i-a10-sram-c1
-
-The valid sections compatible for H3 are:
-    - allwinner,sun8i-h3-sram-c1, allwinner,sun4i-a10-sram-c1
-
-The valid sections compatible for A64 are:
-    - allwinner,sun50i-a64-sram-c
-    - allwinner,sun50i-a64-sram-c1, allwinner,sun4i-a10-sram-c1
-
-The valid sections compatible for H5 are:
-    - allwinner,sun50i-h5-sram-c1, allwinner,sun4i-a10-sram-c1
-
-The valid sections compatible for H6 are:
-    - allwinner,sun50i-h6-sram-c, allwinner,sun50i-a64-sram-c
-    - allwinner,sun50i-h6-sram-c1, allwinner,sun4i-a10-sram-c1
-
-The valid sections compatible for F1C100s are:
-    - allwinner,suniv-f1c100s-sram-d, allwinner,sun4i-a10-sram-d
-
-Devices using SRAM sections
----------------------------
-
-Some devices need to request to the SRAM controller to map an SRAM for
-their exclusive use.
-
-The relationship between such a device and an SRAM section is
-expressed through the allwinner,sram property, that will take a
-phandle and an argument.
-
-This valid values for this argument are:
-  - 0: CPU
-  - 1: Device
-
-Example
--------
-system-control@1c00000 {
-	compatible = "allwinner,sun4i-a10-system-control";
-	reg = <0x01c00000 0x30>;
-	#address-cells = <1>;
-	#size-cells = <1>;
-	ranges;
-
-	sram_a: sram@00000000 {
-		compatible = "mmio-sram";
-		reg = <0x00000000 0xc000>;
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges = <0 0x00000000 0xc000>;
-
-		emac_sram: sram-section@8000 {
-			compatible = "allwinner,sun4i-a10-sram-a3-a4";
-			reg = <0x8000 0x4000>;
-		};
-	};
-};
-
-emac: ethernet@1c0b000 {
-	compatible = "allwinner,sun4i-a10-emac";
-	...
-
-	allwinner,sram = <&emac_sram 1>;
-};
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
index b3f0fe9..102f319 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
@@ -99,22 +99,22 @@
       compatible = "arm,armv7-timer-mem";
       #address-cells = <1>;
       #size-cells = <1>;
-      ranges;
+      ranges = <0 0xf0001000 0x1000>;
       reg = <0xf0000000 0x1000>;
       clock-frequency = <50000000>;
 
-      frame@f0001000 {
+      frame@0 {
         frame-number = <0>;
         interrupts = <0 13 0x8>,
                <0 14 0x8>;
-        reg = <0xf0001000 0x1000>,
-              <0xf0002000 0x1000>;
+        reg = <0x0000 0x1000>,
+              <0x1000 0x1000>;
       };
 
-      frame@f0003000 {
+      frame@2000 {
         frame-number = <1>;
         interrupts = <0 15 0x8>;
-        reg = <0xf0003000 0x1000>;
+        reg = <0x2000 0x1000>;
       };
     };
 
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.txt b/Documentation/devicetree/bindings/timer/renesas,tmu.txt
index 9dff7e5..29159f4 100644
--- a/Documentation/devicetree/bindings/timer/renesas,tmu.txt
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.txt
@@ -11,6 +11,7 @@
   - compatible: must contain one or more of the following:
     - "renesas,tmu-r8a7740" for the r8a7740 TMU
     - "renesas,tmu-r8a774a1" for the r8a774A1 TMU
+    - "renesas,tmu-r8a774b1" for the r8a774B1 TMU
     - "renesas,tmu-r8a774c0" for the r8a774C0 TMU
     - "renesas,tmu-r8a7778" for the r8a7778 TMU
     - "renesas,tmu-r8a7779" for the r8a7779 TMU
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 765fd1c1..978de7d 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -104,6 +104,10 @@
           - infineon,slb9645tt
             # Infineon TLV493D-A1B6 I2C 3D Magnetic Sensor
           - infineon,tlv493d-a1b6
+            # Infineon Multi-phase Digital VR Controller xdpe12254
+          - infineon,xdpe12254
+            # Infineon Multi-phase Digital VR Controller xdpe12284
+          - infineon,xdpe12284
             # Inspur Power System power supply unit version 1
           - inspur,ipsps1
             # Intersil ISL29028 Ambient Light and Proximity Sensor
@@ -132,6 +136,8 @@
           - maxim,max6621
             # 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
           - maxim,max6625
+            # 3-Channel Remote Temperature Sensor
+          - maxim,max31730
             # mCube 3-axis 8-bit digital accelerometer
           - mcube,mc3230
             # MEMSIC 2-axis 8-bit digital accelerometer
@@ -354,6 +360,10 @@
           - ti,tmp103
             # Digital Temperature Sensor
           - ti,tmp275
+            # TI Dual channel DCAP+ multiphase controller TPS53679
+          - ti,tps53679
+            # TI Dual channel DCAP+ multiphase controller TPS53688
+          - ti,tps53688
             # Winbond/Nuvoton H/W Monitor
           - winbond,w83793
             # i2c trusted platform module (TPM)
diff --git a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
index 4efb77b..267fce1 100644
--- a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
+++ b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
@@ -107,7 +107,7 @@
               reg = <0xff400000 0x40000>;
               interrupts = <31>;
               clocks = <&clkc_usb1>;
-              clock-names = "ddr";
+              clock-names = "otg";
               phys = <&usb2_phy1>;
               dr_mode = "peripheral";
               g-rx-fifo-size = <192>;
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index cfc9f40..51376cbe 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -15,6 +15,10 @@
 	"qcom,ci-hdrc"
 	"chipidea,usb2"
 	"xlnx,zynq-usb-2.20a"
+	"nvidia,tegra20-udc"
+	"nvidia,tegra30-udc"
+	"nvidia,tegra114-udc"
+	"nvidia,tegra124-udc"
 - reg: base address and length of the registers
 - interrupts: interrupt for the USB controller
 
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
deleted file mode 100644
index aafff3a..0000000
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-Platform DesignWare HS OTG USB 2.0 controller
------------------------------------------------------
-
-Required properties:
-- compatible : One of:
-  - brcm,bcm2835-usb: The DWC2 USB controller instance in the BCM2835 SoC.
-  - hisilicon,hi6220-usb: The DWC2 USB controller instance in the hi6220 SoC.
-  - rockchip,rk3066-usb: The DWC2 USB controller instance in the rk3066 Soc;
-  - "rockchip,px30-usb", "rockchip,rk3066-usb", "snps,dwc2": for px30 Soc;
-  - "rockchip,rk3188-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3188 Soc;
-  - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
-  - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
-  - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
-  - "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
-  - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
-  - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
-  - "amlogic,meson-g12a-usb": The DWC2 USB controller instance in Amlogic G12A SoCs;
-  - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
-  - snps,dwc2: A generic DWC2 USB controller with default parameters.
-  - "st,stm32f4x9-fsotg": The DWC2 USB FS/HS controller instance in STM32F4x9 SoCs
-  configured in FS mode;
-  - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
-  configured in HS mode;
-  - "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs
-    configured in HS mode;
-- reg : Should contain 1 register range (address and length)
-- interrupts : Should contain 1 interrupt
-- clocks: clock provider specifier
-- clock-names: shall be "otg"
-Refer to clk/clock-bindings.txt for generic clock consumer properties
-
-Optional properties:
-- phys: phy provider specifier
-- phy-names: shall be "usb2-phy"
-- vbus-supply: reference to the VBUS regulator. Depending on the current mode
-  this is enabled (in "host" mode") or disabled (in "peripheral" mode). The
-  regulator is updated if the controller is configured in "otg" mode and the
-  status changes between "host" and "peripheral".
-Refer to phy/phy-bindings.txt for generic phy consumer properties
-- dr_mode: shall be one of "host", "peripheral" and "otg"
-  Refer to usb/generic.txt
-- g-rx-fifo-size: size of rx fifo size in gadget mode.
-- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
-- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
-- snps,need-phy-for-wake: If present indicates that the phy needs to be left
-                          on for remote wakeup during suspend.
-- snps,reset-phy-on-wake: If present indicates that we need to reset the PHY when
-                          we detect a wakeup.  This is due to a hardware errata.
-
-Deprecated properties:
-- g-use-dma: gadget DMA mode is automatically detected
-
-Example:
-
-        usb@101c0000 {
-                compatible = "ralink,rt3050-usb, snps,dwc2";
-                reg = <0x101c0000 40000>;
-                interrupts = <18>;
-		clocks = <&usb_otg_ahb_clk>;
-		clock-names = "otg";
-		phys = <&usbphy>;
-		phy-names = "usb2-phy";
-		snps,need-phy-for-wake;
-        };
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
new file mode 100644
index 0000000..71cf7ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -0,0 +1,151 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/dwc2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DesignWare HS OTG USB 2.0 controller Bindings
+
+maintainers:
+  - Rob Herring <robh@kernel.org>
+
+properties:
+  compatible:
+    oneOf:
+      - const: brcm,bcm2835-usb
+      - const: hisilicon,hi6220-usb
+      - items:
+          - const: rockchip,rk3066-usb
+          - const: snps,dwc2
+      - items:
+          - const: rockchip,px30-usb
+          - const: rockchip,rk3066-usb
+          - const: snps,dwc2
+      - items:
+          - const: rockchip,rk3036-usb
+          - const: rockchip,rk3066-usb
+          - const: snps,dwc2
+      - items:
+          - const: rockchip,rv1108-usb
+          - const: rockchip,rk3066-usb
+          - const: snps,dwc2
+      - items:
+          - const: rockchip,rk3188-usb
+          - const: rockchip,rk3066-usb
+          - const: snps,dwc2
+      - items:
+          - const: rockchip,rk3228-usb
+          - const: rockchip,rk3066-usb
+          - const: snps,dwc2
+      - items:
+          - const: rockchip,rk3288-usb
+          - const: rockchip,rk3066-usb
+          - const: snps,dwc2
+      - const: lantiq,arx100-usb
+      - const: lantiq,xrx200-usb
+      - items:
+          - const: amlogic,meson8-usb
+          - const: snps,dwc2
+      - items:
+          - const: amlogic,meson8b-usb
+          - const: snps,dwc2
+      - const: amlogic,meson-gxbb-usb
+      - items:
+          - const: amlogic,meson-g12a-usb
+          - const: snps,dwc2
+      - const: amcc,dwc-otg
+      - const: snps,dwc2
+      - const: st,stm32f4x9-fsotg
+      - const: st,stm32f4x9-hsotg
+      - const: st,stm32f7-hsotg
+      - const: samsung,s3c6400-hsotg
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: otg
+
+  resets:
+    items:
+     - description: common reset
+     - description: ecc reset
+    minItems: 1
+
+  reset-names:
+    items:
+     - const: dwc2
+     - const: dwc2-ecc
+    minItems: 1
+
+  phys:
+    maxItems: 1
+
+  phy-names:
+    const: usb2-phy
+
+  vbus-supply:
+    description: reference to the VBUS regulator. Depending on the current mode
+      this is enabled (in "host" mode") or disabled (in "peripheral" mode). The
+      regulator is updated if the controller is configured in "otg" mode and the
+      status changes between "host" and "peripheral".
+
+  vusb_d-supply:
+    description: phandle to voltage regulator of digital section,
+
+  vusb_a-supply:
+    description: phandle to voltage regulator of analog section.
+
+  dr_mode:
+    enum: [host, peripheral, otg]
+
+  g-rx-fifo-size:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: size of rx fifo size in gadget mode.
+
+  g-np-tx-fifo-size:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: size of non-periodic tx fifo size in gadget mode.
+
+  g-tx-fifo-size:
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    description: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
+
+  snps,need-phy-for-wake:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description: If present indicates that the phy needs to be left on for remote wakeup during suspend.
+
+  snps,reset-phy-on-wake:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description: If present indicates that we need to reset the PHY when we detect a wakeup.
+                 This is due to a hardware errata.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+      usb@101c0000 {
+        compatible = "rockchip,rk3066-usb", "snps,dwc2";
+        reg = <0x10180000 0x40000>;
+        interrupts = <18>;
+        clocks = <&usb_otg_ahb_clk>;
+        clock-names = "otg";
+        phys = <&usbphy>;
+        phy-names = "usb2-phy";
+      };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/generic.txt b/Documentation/devicetree/bindings/usb/generic.txt
index cf5a1ad..e6790d2 100644
--- a/Documentation/devicetree/bindings/usb/generic.txt
+++ b/Documentation/devicetree/bindings/usb/generic.txt
@@ -2,10 +2,11 @@
 
 Optional properties:
  - maximum-speed: tells USB controllers we want to work up to a certain
-			speed. Valid arguments are "super-speed", "high-speed",
-			"full-speed" and "low-speed". In case this isn't passed
-			via DT, USB controllers should default to their maximum
-			HW capability.
+			speed. Valid arguments are "super-speed-plus",
+			"super-speed", "high-speed", "full-speed" and
+			"low-speed". In case this isn't passed via DT, USB
+			controllers should default to their maximum HW
+			capability.
  - dr_mode: tells Dual-Role USB controllers that we want to work on a
 			particular mode. Valid arguments are "host",
 			"peripheral" and "otg". In case this attribute isn't
diff --git a/Documentation/devicetree/bindings/usb/mediatek,musb.txt b/Documentation/devicetree/bindings/usb/mediatek,musb.txt
new file mode 100644
index 0000000..2b8a87c
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/mediatek,musb.txt
@@ -0,0 +1,57 @@
+MediaTek musb DRD/OTG controller
+-------------------------------------------
+
+Required properties:
+ - compatible      : should be one of:
+                     "mediatek,mt2701-musb"
+                     ...
+                     followed by "mediatek,mtk-musb"
+ - reg             : specifies physical base address and size of
+                     the registers
+ - interrupts      : interrupt used by musb controller
+ - interrupt-names : must be "mc"
+ - phys            : PHY specifier for the OTG phy
+ - dr_mode         : should be one of "host", "peripheral" or "otg",
+                     refer to usb/generic.txt
+ - clocks          : a list of phandle + clock-specifier pairs, one for
+                     each entry in clock-names
+ - clock-names     : must contain "main", "mcu", "univpll"
+                     for clocks of controller
+
+Optional properties:
+ - power-domains   : a phandle to USB power domain node to control USB's
+                     MTCMOS
+
+Required child nodes:
+ usb connector node as defined in bindings/connector/usb-connector.txt
+Optional properties:
+ - id-gpios        : input GPIO for USB ID pin.
+ - vbus-gpios      : input GPIO for USB VBUS pin.
+ - vbus-supply     : reference to the VBUS regulator, needed when supports
+                     dual-role mode
+ - usb-role-switch : use USB Role Switch to support dual-role switch, see
+                     usb/generic.txt.
+
+Example:
+
+usb2: usb@11200000 {
+	compatible = "mediatek,mt2701-musb",
+		     "mediatek,mtk-musb";
+	reg = <0 0x11200000 0 0x1000>;
+	interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
+	interrupt-names = "mc";
+	phys = <&u2port2 PHY_TYPE_USB2>;
+	dr_mode = "otg";
+	clocks = <&pericfg CLK_PERI_USB0>,
+		 <&pericfg CLK_PERI_USB0_MCU>,
+		 <&pericfg CLK_PERI_USB_SLV>;
+	clock-names = "main","mcu","univpll";
+	power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+	usb-role-switch;
+	connector{
+		compatible = "gpio-usb-b-connector", "usb-b-connector";
+		type = "micro";
+		id-gpios = <&pio 44 GPIO_ACTIVE_HIGH>;
+		vbus-supply = <&usb_vbus>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 6046f45..9e67944 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -109,6 +109,8 @@
     description: Artesyn Embedded Technologies Inc.
   "^asahi-kasei,.*":
     description: Asahi Kasei Corp.
+  "^asc,.*":
+    description: All Sensors Corporation
   "^aspeed,.*":
     description: ASPEED Technology Inc.
   "^asus,.*":
@@ -149,6 +151,8 @@
     description: Bosch Sensortec GmbH
   "^boundary,.*":
     description: Boundary Devices Inc.
+  "^broadmobi,.*":
+    description: Shanghai Broadmobi Communication Technology Co.,Ltd.
   "^brcm,.*":
     description: Broadcom Corporation
   "^buffalo,.*":
@@ -157,6 +161,8 @@
     description: B&R Industrial Automation GmbH
   "^bticino,.*":
     description: Bticino International
+  "^calaosystems,.*":
+    description: CALAO Systems SAS
   "^calxeda,.*":
     description: Calxeda
   "^capella,.*":
@@ -475,6 +481,8 @@
     description: Shenzhen Jesurun Electronics Business Dept.
   "^jianda,.*":
     description: Jiandangjing Technology Co., Ltd.
+  "^kam,.*":
+    description: Kamstrup A/S
   "^karo,.*":
     description: Ka-Ro electronics GmbH
   "^keithkoep,.*":
@@ -513,6 +521,8 @@
     description: Lantiq Semiconductor
   "^lattice,.*":
     description: Lattice Semiconductor
+  "^leadtek,.*":
+    description: Shenzhen Leadtek Technology Co., Ltd.
   "^leez,.*":
     description: Leez
   "^lego,.*":
@@ -545,6 +555,8 @@
     description: Logic PD, Inc.
   "^longcheer,.*":
     description: Longcheer Technology (Shanghai) Co., Ltd.
+  "^loongson,.*":
+    description: Loongson Technology Corporation Limited
   "^lsi,.*":
     description: LSI Corp. (LSI Logic)
   "^lwn,.*":
@@ -613,6 +625,8 @@
     description: Moxa Inc.
   "^mpl,.*":
     description: MPL AG
+  "^mps,.*":
+    description: Monolithic Power Systems Inc.
   "^mqmaker,.*":
     description: mqmaker Inc.
   "^mscc,.*":
@@ -717,6 +731,8 @@
     description: Panasonic Corporation
   "^parade,.*":
     description: Parade Technologies Inc.
+  "^parallax,.*":
+    description: Parallax Inc.
   "^pda,.*":
     description: Precision Design Associates, Inc.
   "^pericom,.*":
@@ -823,6 +839,8 @@
     description: Sancloud Ltd
   "^sandisk,.*":
     description: Sandisk Corporation
+  "^satoz,.*":
+    description: Satoz International Co., Ltd
   "^sbs,.*":
     description: Smart Battery System
   "^schindler,.*":
@@ -911,8 +929,12 @@
     description: Startek
   "^ste,.*":
     description: ST-Ericsson
+    deprecated: true
   "^stericsson,.*":
     description: ST-Ericsson
+  "^st-ericsson,.*":
+    description: ST-Ericsson
+    deprecated: true
   "^summit,.*":
     description: Summit microelectronics
   "^sunchip,.*":
@@ -1012,6 +1034,8 @@
     description: Variscite Ltd.
   "^via,.*":
     description: VIA Technologies, Inc.
+  "^videostrong,.*":
+    description: Videostrong Technology Co., Ltd.
   "^virtio,.*":
     description: Virtual I/O Device Specification, developed by the OASIS consortium
   "^vishay,.*":
@@ -1056,10 +1080,16 @@
     description: Extreme Engineering Solutions (X-ES)
   "^xillybus,.*":
     description: Xillybus Ltd.
+  "^xinpeng,.*":
+    description: Shenzhen Xinpeng Technology Co., Ltd
   "^xlnx,.*":
     description: Xilinx
   "^xunlong,.*":
     description: Shenzhen Xunlong Software CO.,Limited
+  "^xylon,.*":
+    description: Xylon
+  "^yna,.*":
+    description: YSH & ATIL
   "^yones-toptech,.*":
     description: Yones Toptech Co., Ltd.
   "^ysoft,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/da9062-wdt.txt b/Documentation/devicetree/bindings/watchdog/da9062-wdt.txt
index b935b52..950e4fb 100644
--- a/Documentation/devicetree/bindings/watchdog/da9062-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/da9062-wdt.txt
@@ -6,6 +6,11 @@
 	"dlg,da9061-watchdog", "dlg,da9062-watchdog"
 	"dlg,da9062-watchdog"
 
+Optional properties:
+- dlg,use-sw-pm: Add this property to disable the watchdog during suspend.
+	Only use this option if you can't use the watchdog automatic suspend
+	function during a suspend (see register CONTROL_B).
+
 Example: DA9062
 
 	pmic0: da9062@58 {
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
index fd380eb..4dd36bd 100644
--- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -4,22 +4,27 @@
 
 - compatible should contain:
 	"mediatek,mt2701-wdt", "mediatek,mt6589-wdt": for MT2701
+	"mediatek,mt2712-wdt", "mediatek,mt6589-wdt": for MT2712
 	"mediatek,mt6589-wdt": for MT6589
 	"mediatek,mt6797-wdt", "mediatek,mt6589-wdt": for MT6797
 	"mediatek,mt7622-wdt", "mediatek,mt6589-wdt": for MT7622
 	"mediatek,mt7623-wdt", "mediatek,mt6589-wdt": for MT7623
 	"mediatek,mt7629-wdt", "mediatek,mt6589-wdt": for MT7629
+	"mediatek,mt8183-wdt", "mediatek,mt6589-wdt": for MT8183
 	"mediatek,mt8516-wdt", "mediatek,mt6589-wdt": for MT8516
 
 - reg : Specifies base physical address and size of the registers.
 
 Optional properties:
 - timeout-sec: contains the watchdog timeout in seconds.
+- #reset-cells: Should be 1.
 
 Example:
 
-wdt: watchdog@10000000 {
-	compatible = "mediatek,mt6589-wdt";
-	reg = <0x10000000 0x18>;
+watchdog: watchdog@10007000 {
+	compatible = "mediatek,mt8183-wdt",
+		     "mediatek,mt6589-wdt";
+	reg = <0 0x10007000 0 0x100>;
 	timeout-sec = <10>;
+	#reset-cells = <1>;
 };
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt
index a5bf04d..79b3c62 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt
@@ -19,6 +19,7 @@
 	         - "renesas,r8a7794-wdt" (R-Car E2)
 	         - "renesas,r8a7795-wdt" (R-Car H3)
 	         - "renesas,r8a7796-wdt" (R-Car M3-W)
+	         - "renesas,r8a77961-wdt" (R-Car M3-W+)
 		 - "renesas,r8a77965-wdt" (R-Car M3-N)
 	         - "renesas,r8a77970-wdt" (R-Car V3M)
 	         - "renesas,r8a77990-wdt" (R-Car E3)
diff --git a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.txt b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.txt
deleted file mode 100644
index d8f4430..0000000
--- a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-STM32 Independent WatchDoG (IWDG)
----------------------------------
-
-Required properties:
-- compatible: Should be either:
-  - "st,stm32-iwdg"
-  - "st,stm32mp1-iwdg"
-- reg: Physical base address and length of the registers set for the device
-- clocks: Reference to the clock entry lsi. Additional pclk clock entry
-  is required only for st,stm32mp1-iwdg.
-- clock-names: Name of the clocks used.
-  "lsi" for st,stm32-iwdg
-  "lsi", "pclk" for st,stm32mp1-iwdg
-
-Optional Properties:
-- timeout-sec: Watchdog timeout value in seconds.
-
-Example:
-
-iwdg: watchdog@40003000 {
-	compatible = "st,stm32-iwdg";
-	reg = <0x40003000 0x400>;
-	clocks = <&clk_lsi>;
-	clock-names = "lsi";
-	timeout-sec = <32>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
new file mode 100644
index 0000000..a27c504
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/st,stm32-iwdg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Independent WatchDoG (IWDG) bindings
+
+maintainers:
+  - Yannick Fertre <yannick.fertre@st.com>
+  - Christophe Roullier <christophe.roullier@st.com>
+
+allOf:
+  - $ref: "watchdog.yaml#"
+
+properties:
+  compatible:
+    enum:
+      - st,stm32-iwdg
+      - st,stm32mp1-iwdg
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Low speed clock
+      - description: Optional peripheral clock
+    minItems: 1
+    maxItems: 2
+
+  clock-names:
+    items:
+      enum:
+        - lsi
+        - pclk
+    minItems: 1
+    maxItems: 2
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+
+examples:
+  - |
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    watchdog@5a002000 {
+      compatible = "st,stm32mp1-iwdg";
+      reg = <0x5a002000 0x400>;
+      clocks = <&rcc IWDG2>, <&rcc CK_LSI>;
+      clock-names = "pclk", "lsi";
+      timeout-sec = <32>;
+    };
+
+...
diff --git a/Documentation/devicetree/writing-schema.rst b/Documentation/devicetree/writing-schema.rst
index efcd5d2..7635ab2 100644
--- a/Documentation/devicetree/writing-schema.rst
+++ b/Documentation/devicetree/writing-schema.rst
@@ -121,7 +121,13 @@
 installed. Ensure they are in your PATH (~/.local/bin by default).
 
 dtc must also be built with YAML output support enabled. This requires that
-libyaml and its headers be installed on the host system.
+libyaml and its headers be installed on the host system. For some distributions
+that involves installing the development package, such as:
+
+Debian:
+  apt-get install libyaml-dev
+Fedora:
+  dnf -y install libyaml-devel
 
 Running checks
 ~~~~~~~~~~~~~~
diff --git a/Documentation/doc-guide/contributing.rst b/Documentation/doc-guide/contributing.rst
new file mode 100644
index 0000000..67ee369
--- /dev/null
+++ b/Documentation/doc-guide/contributing.rst
@@ -0,0 +1,295 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+How to help improve kernel documentation
+========================================
+
+Documentation is an important part of any software-development project.
+Good documentation helps to bring new developers in and helps established
+developers work more effectively.  Without top-quality documentation, a lot
+of time is wasted in reverse-engineering the code and making avoidable
+mistakes.
+
+Unfortunately, the kernel's documentation currently falls far short of what
+it needs to be to support a project of this size and importance.
+
+This guide is for contributors who would like to improve that situation.
+Kernel documentation improvements can be made by developers at a variety of
+skill levels; they are a relatively easy way to learn the kernel process in
+general and find a place in the community.  The bulk of what follows is the
+documentation maintainer's list of tasks that most urgently need to be
+done.
+
+The documentation TODO list
+---------------------------
+
+There is an endless list of tasks that need to be carried out to get our
+documentation to where it should be.  This list contains a number of
+important items, but is far from exhaustive; if you see a different way to
+improve the documentation, please do not hold back!
+
+Addressing warnings
+~~~~~~~~~~~~~~~~~~~
+
+The documentation build currently spews out an unbelievable number of
+warnings.  When you have that many, you might as well have none at all;
+people ignore them, and they will never notice when their work adds new
+ones.  For this reason, eliminating warnings is one of the highest-priority
+tasks on the documentation TODO list.  The task itself is reasonably
+straightforward, but it must be approached in the right way to be
+successful.
+
+Warnings issued by a compiler for C code can often be dismissed as false
+positives, leading to patches aimed at simply shutting the compiler up.
+Warnings from the documentation build almost always point at a real
+problem; making those warnings go away requires understanding the problem
+and fixing it at its source.  For this reason, patches fixing documentation
+warnings should probably not say "fix a warning" in the changelog title;
+they should indicate the real problem that has been fixed.
+
+Another important point is that documentation warnings are often created by
+problems in kerneldoc comments in C code.  While the documentation
+maintainer appreciates being copied on fixes for these warnings, the
+documentation tree is often not the right one to actually carry those
+fixes; they should go to the maintainer of the subsystem in question.
+
+For example, in a documentation build I grabbed a pair of warnings nearly
+at random::
+
+  ./drivers/devfreq/devfreq.c:1818: warning: bad line:
+  	- Resource-managed devfreq_register_notifier()
+  ./drivers/devfreq/devfreq.c:1854: warning: bad line:
+	- Resource-managed devfreq_unregister_notifier()
+
+(The lines were split for readability).
+
+A quick look at the source file named above turned up a couple of kerneldoc
+comments that look like this::
+
+  /**
+   * devm_devfreq_register_notifier()
+	  - Resource-managed devfreq_register_notifier()
+   * @dev:	The devfreq user device. (parent of devfreq)
+   * @devfreq:	The devfreq object.
+   * @nb:		The notifier block to be unregistered.
+   * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+   */
+
+The problem is the missing "*", which confuses the build system's
+simplistic idea of what C comment blocks look like.  This problem had been
+present since that comment was added in 2016 — a full four years.  Fixing
+it was a matter of adding the missing asterisks.  A quick look at the
+history for that file showed what the normal format for subject lines is,
+and ``scripts/get_maintainer.pl`` told me who should receive it.  The
+resulting patch looked like this::
+
+  [PATCH] PM / devfreq: Fix two malformed kerneldoc comments
+
+  Two kerneldoc comments in devfreq.c fail to adhere to the required format,
+  resulting in these doc-build warnings:
+
+    ./drivers/devfreq/devfreq.c:1818: warning: bad line:
+  	  - Resource-managed devfreq_register_notifier()
+    ./drivers/devfreq/devfreq.c:1854: warning: bad line:
+	  - Resource-managed devfreq_unregister_notifier()
+
+  Add a couple of missing asterisks and make kerneldoc a little happier.
+
+  Signed-off-by: Jonathan Corbet <corbet@lwn.net>
+  ---
+   drivers/devfreq/devfreq.c | 4 ++--
+   1 file changed, 2 insertions(+), 2 deletions(-)
+
+  diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+  index 57f6944d65a6..00c9b80b3d33 100644
+  --- a/drivers/devfreq/devfreq.c
+  +++ b/drivers/devfreq/devfreq.c
+  @@ -1814,7 +1814,7 @@ static void devm_devfreq_notifier_release(struct device *dev, void *res)
+
+   /**
+    * devm_devfreq_register_notifier()
+  -	- Resource-managed devfreq_register_notifier()
+  + *	- Resource-managed devfreq_register_notifier()
+    * @dev:	The devfreq user device. (parent of devfreq)
+    * @devfreq:	The devfreq object.
+    * @nb:		The notifier block to be unregistered.
+  @@ -1850,7 +1850,7 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
+
+   /**
+    * devm_devfreq_unregister_notifier()
+  -	- Resource-managed devfreq_unregister_notifier()
+  + *	- Resource-managed devfreq_unregister_notifier()
+    * @dev:	The devfreq user device. (parent of devfreq)
+    * @devfreq:	The devfreq object.
+    * @nb:		The notifier block to be unregistered.
+  --
+  2.24.1
+
+The entire process only took a few minutes.  Of course, I then found that
+somebody else had fixed it in a separate tree, highlighting another lesson:
+always check linux-next to see if a problem has been fixed before you dig
+into it.
+
+Other fixes will take longer, especially those relating to structure
+members or function parameters that lack documentation.  In such cases, it
+is necessary to work out what the role of those members or parameters is
+and describe them correctly.  Overall, this task gets a little tedious at
+times, but it's highly important.  If we can actually eliminate warnings
+from the documentation build, then we can start expecting developers to
+avoid adding new ones.
+
+Languishing kerneldoc comments
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Developers are encouraged to write kerneldoc comments for their code, but
+many of those comments are never pulled into the docs build.  That makes
+this information harder to find and, for example, makes Sphinx unable to
+generate links to that documentation.  Adding ``kernel-doc`` directives to
+the documentation to bring those comments in can help the community derive
+the full value of the work that has gone into creating them.
+
+The ``scripts/find-unused-docs.sh`` tool can be used to find these
+overlooked comments.
+
+Note that the most value comes from pulling in the documentation for
+exported functions and data structures.  Many subsystems also have
+kerneldoc comments for internal use; those should not be pulled into the
+documentation build unless they are placed in a document that is
+specifically aimed at developers working within the relevant subsystem.
+
+
+Typo fixes
+~~~~~~~~~~
+
+Fixing typographical or formatting errors in the documentation is a quick
+way to figure out how to create and send patches, and it is a useful
+service.  I am always willing to accept such patches.  That said, once you
+have fixed a few, please consider moving on to more advanced tasks, leaving
+some typos for the next beginner to address.
+
+Please note that some things are *not* typos and should not be "fixed":
+
+ - Both American and British English spellings are allowed within the
+   kernel documentation.  There is no need to fix one by replacing it with
+   the other.
+
+ - The question of whether a period should be followed by one or two spaces
+   is not to be debated in the context of kernel documentation.  Other
+   areas of rational disagreement, such as the "Oxford comma", are also
+   off-topic here.
+
+As with any patch to any project, please consider whether your change is
+really making things better.
+
+Ancient documentation
+~~~~~~~~~~~~~~~~~~~~~
+
+Some kernel documentation is current, maintained, and useful.  Some
+documentation is ... not.  Dusty, old, and inaccurate documentation can
+mislead readers and casts doubt on our documentation as a whole.  Anything
+that can be done to address such problems is more than welcome.
+
+Whenever you are working with a document, please consider whether it is
+current, whether it needs updating, or whether it should perhaps be removed
+altogether.  There are a number of warning signs that you can pay attention
+to here:
+
+ - References to 2.x kernels
+ - Pointers to SourceForge repositories
+ - Nothing but typo fixes in the history for several years
+ - Discussion of pre-Git workflows
+
+The best thing to do, of course, would be to bring the documentation
+current, adding whatever information is needed.  Such work often requires
+the cooperation of developers familiar with the subsystem in question, of
+course.  Developers are often more than willing to cooperate with people
+working to improve the documentation when asked nicely, and when their
+answers are listened to and acted upon.
+
+Some documentation is beyond hope; we occasionally find documents that
+refer to code that was removed from the kernel long ago, for example.
+There is surprising resistance to removing obsolete documentation, but we
+should do that anyway.  Extra cruft in our documentation helps nobody.
+
+In cases where there is perhaps some useful information in a badly outdated
+document, and you are unable to update it, the best thing to do may be to
+add a warning at the beginning.  The following text is recommended::
+
+  .. warning ::
+  	This document is outdated and in need of attention.  Please use
+	this information with caution, and please consider sending patches
+	to update it.
+
+That way, at least our long-suffering readers have been warned that the
+document may lead them astray.
+
+Documentation coherency
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The old-timers around here will remember the Linux books that showed up on
+the shelves in the 1990s.  They were simply collections of documentation
+files scrounged from various locations on the net.  The books have (mostly)
+improved since then, but the kernel's documentation is still mostly built
+on that model.  It is thousands of files, almost each of which was written
+in isolation from all of the others.  We don't have a coherent body of
+kernel documentation; we have thousands of individual documents.
+
+We have been trying to improve the situation through the creation of
+a set of "books" that group documentation for specific readers.  These
+include:
+
+ - :doc:`../admin-guide/index`
+ - :doc:`../core-api/index`
+ - :doc:`../driver-api/index`
+ - :doc:`../userspace-api/index`
+
+As well as this book on documentation itself.
+
+Moving documents into the appropriate books is an important task and needs
+to continue.  There are a couple of challenges associated with this work,
+though.  Moving documentation files creates short-term pain for the people
+who work with those files; they are understandably unenthusiastic about
+such changes.  Usually the case can be made to move a document once; we
+really don't want to keep shifting them around, though.
+
+Even when all documents are in the right place, though, we have only
+managed to turn a big pile into a group of smaller piles.  The work of
+trying to knit all of those documents together into a single whole has not
+yet begun.  If you have bright ideas on how we could proceed on that front,
+we would be more than happy to hear them.
+
+Stylesheet improvements
+~~~~~~~~~~~~~~~~~~~~~~~
+
+With the adoption of Sphinx we have much nicer-looking HTML output than we
+once did.  But it could still use a lot of improvement; Donald Knuth and
+Edward Tufte would be unimpressed.  That requires tweaking our stylesheets
+to create more typographically sound, accessible, and readable output.
+
+Be warned: if you take on this task you are heading into classic bikeshed
+territory.  Expect a lot of opinions and discussion for even relatively
+obvious changes.  That is, alas, the nature of the world we live in.
+
+Non-LaTeX PDF build
+~~~~~~~~~~~~~~~~~~~
+
+This is a decidedly nontrivial task for somebody with a lot of time and
+Python skills.  The Sphinx toolchain is relatively small and well
+contained; it is easy to add to a development system.  But building PDF or
+EPUB output requires installing LaTeX, which is anything but small or well
+contained.  That would be a nice thing to eliminate.
+
+The original hope had been to use the rst2pdf tool (https://rst2pdf.org/)
+for PDF generation, but it turned out to not be up to the task.
+Development work on rst2pdf seems to have picked up again in recent times,
+though, which is a hopeful sign.  If a suitably motivated developer were to
+work with that project to make rst2pdf work with the kernel documentation
+build, the world would be eternally grateful.
+
+Write more documentation
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Naturally, there are massive parts of the kernel that are severely
+underdocumented.  If you have the knowledge to document a specific kernel
+subsystem and the desire to do so, please do not hesitate to do some
+writing and contribute the result to the kernel.  Untold numbers of kernel
+developers and users will thank you.
diff --git a/Documentation/doc-guide/index.rst b/Documentation/doc-guide/index.rst
index 603f3ff..7c7d977 100644
--- a/Documentation/doc-guide/index.rst
+++ b/Documentation/doc-guide/index.rst
@@ -10,6 +10,8 @@
    sphinx
    kernel-doc
    parse-headers
+   contributing
+   maintainer-profile
 
 .. only::  subproject and html
 
diff --git a/Documentation/doc-guide/maintainer-profile.rst b/Documentation/doc-guide/maintainer-profile.rst
new file mode 100644
index 0000000..5afc0dd
--- /dev/null
+++ b/Documentation/doc-guide/maintainer-profile.rst
@@ -0,0 +1,45 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Documentation subsystem maintainer entry profile
+================================================
+
+The documentation "subsystem" is the central coordinating point for the
+kernel's documentation and associated infrastructure.  It covers the
+hierarchy under Documentation/ (with the exception of
+Documentation/device-tree), various utilities under scripts/ and, at least
+some of the time, LICENSES/.
+
+It's worth noting, though, that the boundaries of this subsystem are rather
+fuzzier than normal.  Many other subsystem maintainers like to keep control
+of portions of Documentation/, and many more freely apply changes there
+when it is convenient.  Beyond that, much of the kernel's documentation is
+found in the source as kerneldoc comments; those are usually (but not
+always) maintained by the relevant subsystem maintainer.
+
+The mailing list for documentation is linux-doc@vger.kernel.org.  Patches
+should be made against the docs-next tree whenever possible.
+
+Submit checklist addendum
+-------------------------
+
+When making documentation changes, you should actually build the
+documentation and ensure that no new errors or warnings have been
+introduced.  Generating HTML documents and looking at the result will help
+to avoid unsightly misunderstandings about how things will be rendered.
+
+Key cycle dates
+---------------
+
+Patches can be sent anytime, but response will be slower than usual during
+the merge window.  The docs tree tends to close late before the merge
+window opens, since the risk of regressions from documentation patches is
+low.
+
+Review cadence
+--------------
+
+I am the sole maintainer for the documentation subsystem, and I am doing
+the work on my own time, so the response to patches will occasionally be
+slow.  I try to always send out a notification when a patch is merged (or
+when I decide that one cannot be).  Do not hesitate to send a ping if you
+have not heard back within a week of sending a patch.
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst
index a9a7a3c..e5953e7 100644
--- a/Documentation/driver-api/dmaengine/client.rst
+++ b/Documentation/driver-api/dmaengine/client.rst
@@ -151,8 +151,8 @@
      Note that callbacks will always be invoked from the DMA
      engines tasklet, never from interrupt context.
 
-  Optional: per descriptor metadata
-  ---------------------------------
+Optional: per descriptor metadata
+---------------------------------
   DMAengine provides two ways for metadata support.
 
   DESC_METADATA_CLIENT
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 20e07e4..46c1378 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -267,6 +267,8 @@
 
 GPIO
   devm_gpiod_get()
+  devm_gpiod_get_array()
+  devm_gpiod_get_array_optional()
   devm_gpiod_get_index()
   devm_gpiod_get_index_optional()
   devm_gpiod_get_optional()
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index 2ff7431..8719225 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -507,11 +507,6 @@
   cascaded irq has to be handled by a threaded interrupt handler.
   Apart from that it works exactly like the chained irqchip.
 
-- DEPRECATED: gpiochip_set_chained_irqchip(): sets up a chained cascaded irq
-  handler for a gpio_chip from a parent IRQ and passes the struct gpio_chip*
-  as handler data. Notice that we pass is as the handler data, since the
-  irqchip data is likely used by the parent irqchip.
-
 - gpiochip_set_nested_irqchip(): sets up a nested cascaded irq handler for a
   gpio_chip from a parent IRQ. As the parent IRQ has usually been
   explicitly requested by the driver, this does very little more than
diff --git a/Documentation/driver-api/gpio/drivers-on-gpio.rst b/Documentation/driver-api/gpio/drivers-on-gpio.rst
index f3a1893..820b403 100644
--- a/Documentation/driver-api/gpio/drivers-on-gpio.rst
+++ b/Documentation/driver-api/gpio/drivers-on-gpio.rst
@@ -95,7 +95,7 @@
 MTD NOR flash has add-ons for extra GPIO lines too, though the address bus is
 usually connected directly to the flash.
 
-Use those instead of talking directly to the GPIOs using sysfs; they integrate
-with kernel frameworks better than your userspace code could. Needless to say,
-just using the appropriate kernel drivers will simplify and speed up your
-embedded hacking in particular by providing ready-made components.
+Use those instead of talking directly to the GPIOs from userspace; they
+integrate with kernel frameworks better than your userspace code could.
+Needless to say, just using the appropriate kernel drivers will simplify and
+speed up your embedded hacking in particular by providing ready-made components.
diff --git a/Documentation/driver-api/gpio/index.rst b/Documentation/driver-api/gpio/index.rst
index 5b61032..1d48fe2 100644
--- a/Documentation/driver-api/gpio/index.rst
+++ b/Documentation/driver-api/gpio/index.rst
@@ -8,6 +8,7 @@
    :maxdepth: 2
 
    intro
+   using-gpio
    driver
    consumer
    board
diff --git a/Documentation/driver-api/gpio/using-gpio.rst b/Documentation/driver-api/gpio/using-gpio.rst
new file mode 100644
index 0000000..dda0694
--- /dev/null
+++ b/Documentation/driver-api/gpio/using-gpio.rst
@@ -0,0 +1,50 @@
+=========================
+Using GPIO Lines in Linux
+=========================
+
+The Linux kernel exists to abstract and present hardware to users. GPIO lines
+as such are normally not user facing abstractions. The most obvious, natural
+and preferred way to use GPIO lines is to let kernel hardware drivers deal
+with them.
+
+For examples of already existing generic drivers that will also be good
+examples for any other kernel drivers you want to author, refer to
+:doc:`drivers-on-gpio`
+
+For any kind of mass produced system you want to support, such as servers,
+laptops, phones, tablets, routers, and any consumer or office or business goods
+using appropriate kernel drivers is paramount. Submit your code for inclusion
+in the upstream Linux kernel when you feel it is mature enough and you will get
+help to refine it, see :doc:`../../process/submitting-patches`.
+
+In Linux GPIO lines also have a userspace ABI.
+
+The userspace ABI is intended for one-off deployments. Examples are prototypes,
+factory lines, maker community projects, workshop specimen, production tools,
+industrial automation, PLC-type use cases, door controllers, in short a piece
+of specialized equipment that is not produced by the numbers, requiring
+operators to have a deep knowledge of the equipment and knows about the
+software-hardware interface to be set up. They should not have a natural fit
+to any existing kernel subsystem and not be a good fit for an operating system,
+because of not being reusable or abstract enough, or involving a lot of non
+computer hardware related policy.
+
+Applications that have a good reason to use the industrial I/O (IIO) subsystem
+from userspace will likely be a good fit for using GPIO lines from userspace as
+well.
+
+Do not under any circumstances abuse the GPIO userspace ABI to cut corners in
+any product development projects. If you use it for prototyping, then do not
+productify the prototype: rewrite it using proper kernel drivers. Do not under
+any circumstances deploy any uniform products using GPIO from userspace.
+
+The userspace ABI is a character device for each GPIO hardware unit (GPIO chip).
+These devices will appear on the system as ``/dev/gpiochip0`` thru
+``/dev/gpiochipN``. Examples of how to directly use the userspace ABI can be
+found in the kernel tree ``tools/gpio`` subdirectory.
+
+For structured and managed applications, we recommend that you make use of the
+libgpiod_ library. This provides helper abstractions, command line utlities
+and arbitration for multiple simultaneous consumers on the same GPIO chip.
+
+.. _libgpiod: https://git.kernel.org/pub/scm/libs/libgpiod/libgpiod.git/
diff --git a/Documentation/driver-api/interconnect.rst b/Documentation/driver-api/interconnect.rst
index cdeb582..5ed4f57 100644
--- a/Documentation/driver-api/interconnect.rst
+++ b/Documentation/driver-api/interconnect.rst
@@ -91,3 +91,25 @@
 get paths between endpoints and set their bandwidth/latency/QoS requirements
 for these interconnect paths.  These interfaces are not currently
 documented.
+
+Interconnect debugfs interfaces
+-------------------------------
+
+Like several other subsystems interconnect will create some files for debugging
+and introspection. Files in debugfs are not considered ABI so application
+software shouldn't rely on format details change between kernel versions.
+
+``/sys/kernel/debug/interconnect/interconnect_summary``:
+
+Show all interconnect nodes in the system with their aggregated bandwidth
+request. Indented under each node show bandwidth requests from each device.
+
+``/sys/kernel/debug/interconnect/interconnect_graph``:
+
+Show the interconnect graph in the graphviz dot format. It shows all
+interconnect nodes and links in the system and groups together nodes from the
+same provider as subgraphs. The format is human-readable and can also be piped
+through dot to generate diagrams in many graphical formats::
+
+        $ cat /sys/kernel/debug/interconnect/interconnect_graph | \
+                dot -Tsvg > interconnect_graph.svg
diff --git a/Documentation/driver-api/thermal/cpu-idle-cooling.rst b/Documentation/driver-api/thermal/cpu-idle-cooling.rst
index e4f0859..9f0016e 100644
--- a/Documentation/driver-api/thermal/cpu-idle-cooling.rst
+++ b/Documentation/driver-api/thermal/cpu-idle-cooling.rst
@@ -65,6 +65,8 @@
 performance penalty and a fixed latency. Mitigation can be increased
 or decreased by modulating the duty cycle of the idle injection.
 
+::
+
      ^
      |
      |
@@ -91,6 +93,8 @@
 The governor will change the cooling device state thus the duty cycle
 and this variation will modulate the cooling effect.
 
+::
+
      ^
      |
      |
@@ -154,6 +158,7 @@
 
  P(opp)target = ((Trunning x (P(opp)running) + (Tidle x P(opp)idle)) /
 			(Trunning + Tidle)
+
   ...
 
  Tidle = Trunning x ((P(opp)running / P(opp)target) - 1)
diff --git a/Documentation/fb/fbcon.rst b/Documentation/fb/fbcon.rst
index ebca417..e57a3d1 100644
--- a/Documentation/fb/fbcon.rst
+++ b/Documentation/fb/fbcon.rst
@@ -127,7 +127,7 @@
 	is typically located on the same video card.  Thus, the consoles that
 	are controlled by the VGA console will be garbled.
 
-4. fbcon=rotate:<n>
+5. fbcon=rotate:<n>
 
 	This option changes the orientation angle of the console display. The
 	value 'n' accepts the following:
@@ -152,21 +152,21 @@
 	Actually, the underlying fb driver is totally ignorant of console
 	rotation.
 
-5. fbcon=margin:<color>
+6. fbcon=margin:<color>
 
 	This option specifies the color of the margins. The margins are the
 	leftover area at the right and the bottom of the screen that are not
 	used by text. By default, this area will be black. The 'color' value
 	is an integer number that depends on the framebuffer driver being used.
 
-6. fbcon=nodefer
+7. fbcon=nodefer
 
 	If the kernel is compiled with deferred fbcon takeover support, normally
 	the framebuffer contents, left in place by the firmware/bootloader, will
 	be preserved until there actually is some text is output to the console.
 	This option causes fbcon to bind immediately to the fbdev device.
 
-7. fbcon=logo-pos:<location>
+8. fbcon=logo-pos:<location>
 
 	The only possible 'location' is 'center' (without quotes), and when
 	given, the bootup logo is moved from the default top-left corner
@@ -174,6 +174,11 @@
 	displayed due to multiple CPUs, the collected line of logos is moved
 	as a whole.
 
+9. fbcon=logo-count:<n>
+
+	The value 'n' overrides the number of bootup logos. 0 disables the
+	logo, and -1 gives the default which is the number of online CPUs.
+
 C. Attaching, Detaching and Unloading
 
 Before going on to how to attach, detach and unload the framebuffer console, an
diff --git a/Documentation/fb/modedb.rst b/Documentation/fb/modedb.rst
index 9c4e3fd..624d08f 100644
--- a/Documentation/fb/modedb.rst
+++ b/Documentation/fb/modedb.rst
@@ -65,6 +65,9 @@
   - reflect_y (boolean): Perform an axial symmetry on the Y axis
   - rotate (integer): Rotate the initial framebuffer by x
     degrees. Valid values are 0, 90, 180 and 270.
+  - panel_orientation, one of "normal", "upside_down", "left_side_up", or
+    "right_side_up". For KMS drivers only, this sets the "panel orientation"
+    property on the kms connector as hint for kms users.
 
 
 -----------------------------------------------------------------------------
diff --git a/Documentation/features/core/jump-labels/arch-support.txt b/Documentation/features/core/jump-labels/arch-support.txt
index cae7be2..632a1c7 100644
--- a/Documentation/features/core/jump-labels/arch-support.txt
+++ b/Documentation/features/core/jump-labels/arch-support.txt
@@ -7,7 +7,7 @@
     |         arch |status|
     -----------------------
     |       alpha: | TODO |
-    |         arc: | TODO |
+    |         arc: |  ok  |
     |         arm: |  ok  |
     |       arm64: |  ok  |
     |         c6x: | TODO |
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 4fae046..32b2972 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -24,7 +24,7 @@
     |      parisc: |  ok  |
     |     powerpc: |  ok  |
     |       riscv: | TODO |
-    |        s390: | TODO |
+    |        s390: |  ok  |
     |          sh: | TODO |
     |       sparc: | TODO |
     |          um: | TODO |
diff --git a/Documentation/filesystems/adfs.txt b/Documentation/filesystems/adfs.txt
index 5949766..0baa8e8 100644
--- a/Documentation/filesystems/adfs.txt
+++ b/Documentation/filesystems/adfs.txt
@@ -1,3 +1,27 @@
+Filesystems supported by ADFS
+-----------------------------
+
+The ADFS module supports the following Filecore formats which have:
+
+- new maps
+- new directories or big directories
+
+In terms of the named formats, this means we support:
+
+- E and E+, with or without boot block
+- F and F+
+
+We fully support reading files from these filesystems, and writing to
+existing files within their existing allocation.  Essentially, we do
+not support changing any of the filesystem metadata.
+
+This is intended to support loopback mounted Linux native filesystems
+on a RISC OS Filecore filesystem, but will allow the data within files
+to be changed.
+
+If write support (ADFS_FS_RW) is configured, we allow rudimentary
+directory updates, specifically updating the access mode and timestamp.
+
 Mount options for ADFS
 ----------------------
 
diff --git a/Documentation/filesystems/automount-support.txt b/Documentation/filesystems/automount-support.txt
index b0afd3d..7d9f826 100644
--- a/Documentation/filesystems/automount-support.txt
+++ b/Documentation/filesystems/automount-support.txt
@@ -9,7 +9,7 @@
 IN-KERNEL AUTOMOUNTING
 ======================
 
-See section "Mount Traps" of  Documentation/filesystems/autofs.txt
+See section "Mount Traps" of  Documentation/filesystems/autofs.rst
 
 Then from userspace, you can just do something like:
 
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 3135b80..4eb3e2d 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -235,6 +235,17 @@
                        hide up to all remaining free space. The actual space that
                        would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable
                        This space is reclaimed once checkpoint=enable.
+compress_algorithm=%s  Control compress algorithm, currently f2fs supports "lzo"
+                       and "lz4" algorithm.
+compress_log_size=%u   Support configuring compress cluster size, the size will
+                       be 4KB * (1 << %u), 16KB is minimum size, also it's
+                       default size.
+compress_extension=%s  Support adding specified extension, so that f2fs can enable
+                       compression on those corresponding files, e.g. if all files
+                       with '.ext' has high compression rate, we can set the '.ext'
+                       on compression extension list and enable compression on
+                       these file by default rather than to enable it via ioctl.
+                       For other files, we can still enable compression via ioctl.
 
 ================================================================================
 DEBUGFS ENTRIES
@@ -259,170 +270,6 @@
 
 Files in /sys/fs/f2fs/<devname>
 (see also Documentation/ABI/testing/sysfs-fs-f2fs)
-..............................................................................
- File                         Content
-
- gc_urgent_sleep_time         This parameter controls sleep time for gc_urgent.
-                              500 ms is set by default. See above gc_urgent.
-
- gc_min_sleep_time            This tuning parameter controls the minimum sleep
-                              time for the garbage collection thread. Time is
-                              in milliseconds.
-
- gc_max_sleep_time            This tuning parameter controls the maximum sleep
-                              time for the garbage collection thread. Time is
-                              in milliseconds.
-
- gc_no_gc_sleep_time          This tuning parameter controls the default sleep
-                              time for the garbage collection thread. Time is
-                              in milliseconds.
-
- gc_idle                      This parameter controls the selection of victim
-                              policy for garbage collection. Setting gc_idle = 0
-                              (default) will disable this option. Setting
-                              gc_idle = 1 will select the Cost Benefit approach
-                              & setting gc_idle = 2 will select the greedy approach.
-
- gc_urgent                    This parameter controls triggering background GCs
-                              urgently or not. Setting gc_urgent = 0 [default]
-                              makes back to default behavior, while if it is set
-                              to 1, background thread starts to do GC by given
-                              gc_urgent_sleep_time interval.
-
- reclaim_segments             This parameter controls the number of prefree
-                              segments to be reclaimed. If the number of prefree
-			      segments is larger than the number of segments
-			      in the proportion to the percentage over total
-			      volume size, f2fs tries to conduct checkpoint to
-			      reclaim the prefree segments to free segments.
-			      By default, 5% over total # of segments.
-
- main_blkaddr                 This value gives the first block address of
-			      MAIN area in the partition.
-
- max_small_discards	      This parameter controls the number of discard
-			      commands that consist small blocks less than 2MB.
-			      The candidates to be discarded are cached until
-			      checkpoint is triggered, and issued during the
-			      checkpoint. By default, it is disabled with 0.
-
- discard_granularity	      This parameter controls the granularity of discard
-			      command size. It will issue discard commands iif
-			      the size is larger than given granularity. Its
-			      unit size is 4KB, and 4 (=16KB) is set by default.
-			      The maximum value is 128 (=512KB).
-
- reserved_blocks	      This parameter indicates the number of blocks that
-			      f2fs reserves internally for root.
-
- batched_trim_sections	      This parameter controls the number of sections
-                              to be trimmed out in batch mode when FITRIM
-                              conducts. 32 sections is set by default.
-
- ipu_policy                   This parameter controls the policy of in-place
-                              updates in f2fs. There are five policies:
-                               0x01: F2FS_IPU_FORCE, 0x02: F2FS_IPU_SSR,
-                               0x04: F2FS_IPU_UTIL,  0x08: F2FS_IPU_SSR_UTIL,
-                               0x10: F2FS_IPU_FSYNC.
-
- min_ipu_util                 This parameter controls the threshold to trigger
-                              in-place-updates. The number indicates percentage
-                              of the filesystem utilization, and used by
-                              F2FS_IPU_UTIL and F2FS_IPU_SSR_UTIL policies.
-
- min_fsync_blocks             This parameter controls the threshold to trigger
-                              in-place-updates when F2FS_IPU_FSYNC mode is set.
-			      The number indicates the number of dirty pages
-			      when fsync needs to flush on its call path. If
-			      the number is less than this value, it triggers
-			      in-place-updates.
-
- min_seq_blocks		      This parameter controls the threshold to serialize
-			      write IOs issued by multiple threads in parallel.
-
- min_hot_blocks		      This parameter controls the threshold to allocate
-			      a hot data log for pending data blocks to write.
-
- min_ssr_sections	      This parameter adds the threshold when deciding
-			      SSR block allocation. If this is large, SSR mode
-			      will be enabled early.
-
- ram_thresh                   This parameter controls the memory footprint used
-			      by free nids and cached nat entries. By default,
-			      1 is set, which indicates 10 MB / 1 GB RAM.
-
- ra_nid_pages		      When building free nids, F2FS reads NAT blocks
-			      ahead for speed up. Default is 0.
-
- dirty_nats_ratio	      Given dirty ratio of cached nat entries, F2FS
-			      determines flushing them in background.
-
- max_victim_search	      This parameter controls the number of trials to
-			      find a victim segment when conducting SSR and
-			      cleaning operations. The default value is 4096
-			      which covers 8GB block address range.
-
- migration_granularity	      For large-sized sections, F2FS can stop GC given
-			      this granularity instead of reclaiming entire
-			      section.
-
- dir_level                    This parameter controls the directory level to
-			      support large directory. If a directory has a
-			      number of files, it can reduce the file lookup
-			      latency by increasing this dir_level value.
-			      Otherwise, it needs to decrease this value to
-			      reduce the space overhead. The default value is 0.
-
- cp_interval		      F2FS tries to do checkpoint periodically, 60 secs
-			      by default.
-
- idle_interval		      F2FS detects system is idle, if there's no F2FS
-			      operations during given interval, 5 secs by
-			      default.
-
- discard_idle_interval	      F2FS detects the discard thread is idle, given
-			      time interval. Default is 5 secs.
-
- gc_idle_interval	      F2FS detects the GC thread is idle, given time
-			      interval. Default is 5 secs.
-
- umount_discard_timeout       When unmounting the disk, F2FS waits for finishing
-			      queued discard commands which can take huge time.
-			      This gives time out for it, 5 secs by default.
-
- iostat_enable		      This controls to enable/disable iostat in F2FS.
-
- readdir_ra		      This enables/disabled readahead of inode blocks
-			      in readdir, and default is enabled.
-
- gc_pin_file_thresh	      This indicates how many GC can be failed for the
-			      pinned file. If it exceeds this, F2FS doesn't
-			      guarantee its pinning state. 2048 trials is set
-			      by default.
-
- extension_list		      This enables to change extension_list for hot/cold
-			      files in runtime.
-
- inject_rate		      This controls injection rate of arbitrary faults.
-
- inject_type		      This controls injection type of arbitrary faults.
-
- dirty_segments 	      This shows # of dirty segments.
-
- lifetime_write_kbytes	      This shows # of data written to the disk.
-
- features		      This shows current features enabled on F2FS.
-
- current_reserved_blocks      This shows # of blocks currently reserved.
-
- unusable                     If checkpoint=disable, this shows the number of
-                              blocks that are unusable.
-                              If checkpoint=enable it shows the number of blocks
-                              that would be unusable if checkpoint=disable were
-                              to be set.
-
-encoding 	              This shows the encoding used for casefolding.
-                              If casefolding is not enabled, returns (none)
 
 ================================================================================
 USAGE
@@ -840,3 +687,44 @@
  4. address = fibmap(fd, offset)
  5. open(blkdev)
  6. write(blkdev, address)
+
+Compression implementation
+--------------------------
+
+- New term named cluster is defined as basic unit of compression, file can
+be divided into multiple clusters logically. One cluster includes 4 << n
+(n >= 0) logical pages, compression size is also cluster size, each of
+cluster can be compressed or not.
+
+- In cluster metadata layout, one special block address is used to indicate
+cluster is compressed one or normal one, for compressed cluster, following
+metadata maps cluster to [1, 4 << n - 1] physical blocks, in where f2fs
+stores data including compress header and compressed data.
+
+- In order to eliminate write amplification during overwrite, F2FS only
+support compression on write-once file, data can be compressed only when
+all logical blocks in file are valid and cluster compress ratio is lower
+than specified threshold.
+
+- To enable compression on regular inode, there are three ways:
+* chattr +c file
+* chattr +c dir; touch dir/file
+* mount w/ -o compress_extension=ext; touch file.ext
+
+Compress metadata layout:
+                             [Dnode Structure]
+             +-----------------------------------------------+
+             | cluster 1 | cluster 2 | ......... | cluster N |
+             +-----------------------------------------------+
+             .           .                       .           .
+       .                       .                .                      .
+  .         Compressed Cluster       .        .        Normal Cluster            .
++----------+---------+---------+---------+  +---------+---------+---------+---------+
+|compr flag| block 1 | block 2 | block 3 |  | block 1 | block 2 | block 3 | block 4 |
++----------+---------+---------+---------+  +---------+---------+---------+---------+
+           .                             .
+         .                                           .
+       .                                                           .
+      +-------------+-------------+----------+----------------------------+
+      | data length | data chksum | reserved |      compressed data       |
+      +-------------+-------------+----------+----------------------------+
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 01e9092..bd99323 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -1016,9 +1016,9 @@
 - Direct I/O is not supported on encrypted files.  Attempts to use
   direct I/O on such files will fall back to buffered I/O.
 
-- The fallocate operations FALLOC_FL_COLLAPSE_RANGE,
-  FALLOC_FL_INSERT_RANGE, and FALLOC_FL_ZERO_RANGE are not supported
-  on encrypted files and will fail with EOPNOTSUPP.
+- The fallocate operations FALLOC_FL_COLLAPSE_RANGE and
+  FALLOC_FL_INSERT_RANGE are not supported on encrypted files and will
+  fail with EOPNOTSUPP.
 
 - Online defragmentation of encrypted files is not supported.  The
   EXT4_IOC_MOVE_EXT and F2FS_IOC_MOVE_RANGE ioctls will fail with
diff --git a/Documentation/filesystems/fuse.rst b/Documentation/filesystems/fuse.rst
new file mode 100644
index 0000000..8e45506
--- /dev/null
+++ b/Documentation/filesystems/fuse.rst
@@ -0,0 +1,406 @@
+.. SPDX-License-Identifier: GPL-2.0
+==============
+FUSE
+==============
+
+Definitions
+===========
+
+Userspace filesystem:
+  A filesystem in which data and metadata are provided by an ordinary
+  userspace process.  The filesystem can be accessed normally through
+  the kernel interface.
+
+Filesystem daemon:
+  The process(es) providing the data and metadata of the filesystem.
+
+Non-privileged mount (or user mount):
+  A userspace filesystem mounted by a non-privileged (non-root) user.
+  The filesystem daemon is running with the privileges of the mounting
+  user.  NOTE: this is not the same as mounts allowed with the "user"
+  option in /etc/fstab, which is not discussed here.
+
+Filesystem connection:
+  A connection between the filesystem daemon and the kernel.  The
+  connection exists until either the daemon dies, or the filesystem is
+  umounted.  Note that detaching (or lazy umounting) the filesystem
+  does *not* break the connection, in this case it will exist until
+  the last reference to the filesystem is released.
+
+Mount owner:
+  The user who does the mounting.
+
+User:
+  The user who is performing filesystem operations.
+
+What is FUSE?
+=============
+
+FUSE is a userspace filesystem framework.  It consists of a kernel
+module (fuse.ko), a userspace library (libfuse.*) and a mount utility
+(fusermount).
+
+One of the most important features of FUSE is allowing secure,
+non-privileged mounts.  This opens up new possibilities for the use of
+filesystems.  A good example is sshfs: a secure network filesystem
+using the sftp protocol.
+
+The userspace library and utilities are available from the
+`FUSE homepage: <http://fuse.sourceforge.net/>`_
+
+Filesystem type
+===============
+
+The filesystem type given to mount(2) can be one of the following:
+
+    fuse
+      This is the usual way to mount a FUSE filesystem.  The first
+      argument of the mount system call may contain an arbitrary string,
+      which is not interpreted by the kernel.
+
+    fuseblk
+      The filesystem is block device based.  The first argument of the
+      mount system call is interpreted as the name of the device.
+
+Mount options
+=============
+
+fd=N
+  The file descriptor to use for communication between the userspace
+  filesystem and the kernel.  The file descriptor must have been
+  obtained by opening the FUSE device ('/dev/fuse').
+
+rootmode=M
+  The file mode of the filesystem's root in octal representation.
+
+user_id=N
+  The numeric user id of the mount owner.
+
+group_id=N
+  The numeric group id of the mount owner.
+
+default_permissions
+  By default FUSE doesn't check file access permissions, the
+  filesystem is free to implement its access policy or leave it to
+  the underlying file access mechanism (e.g. in case of network
+  filesystems).  This option enables permission checking, restricting
+  access based on file mode.  It is usually useful together with the
+  'allow_other' mount option.
+
+allow_other
+  This option overrides the security measure restricting file access
+  to the user mounting the filesystem.  This option is by default only
+  allowed to root, but this restriction can be removed with a
+  (userspace) configuration option.
+
+max_read=N
+  With this option the maximum size of read operations can be set.
+  The default is infinite.  Note that the size of read requests is
+  limited anyway to 32 pages (which is 128kbyte on i386).
+
+blksize=N
+  Set the block size for the filesystem.  The default is 512.  This
+  option is only valid for 'fuseblk' type mounts.
+
+Control filesystem
+==================
+
+There's a control filesystem for FUSE, which can be mounted by::
+
+  mount -t fusectl none /sys/fs/fuse/connections
+
+Mounting it under the '/sys/fs/fuse/connections' directory makes it
+backwards compatible with earlier versions.
+
+Under the fuse control filesystem each connection has a directory
+named by a unique number.
+
+For each connection the following files exist within this directory:
+
+	waiting
+	  The number of requests which are waiting to be transferred to
+	  userspace or being processed by the filesystem daemon.  If there is
+	  no filesystem activity and 'waiting' is non-zero, then the
+	  filesystem is hung or deadlocked.
+
+	abort
+	  Writing anything into this file will abort the filesystem
+	  connection.  This means that all waiting requests will be aborted an
+	  error returned for all aborted and new requests.
+
+Only the owner of the mount may read or write these files.
+
+Interrupting filesystem operations
+##################################
+
+If a process issuing a FUSE filesystem request is interrupted, the
+following will happen:
+
+  -  If the request is not yet sent to userspace AND the signal is
+     fatal (SIGKILL or unhandled fatal signal), then the request is
+     dequeued and returns immediately.
+
+  -  If the request is not yet sent to userspace AND the signal is not
+     fatal, then an interrupted flag is set for the request.  When
+     the request has been successfully transferred to userspace and
+     this flag is set, an INTERRUPT request is queued.
+
+  -  If the request is already sent to userspace, then an INTERRUPT
+     request is queued.
+
+INTERRUPT requests take precedence over other requests, so the
+userspace filesystem will receive queued INTERRUPTs before any others.
+
+The userspace filesystem may ignore the INTERRUPT requests entirely,
+or may honor them by sending a reply to the *original* request, with
+the error set to EINTR.
+
+It is also possible that there's a race between processing the
+original request and its INTERRUPT request.  There are two possibilities:
+
+  1. The INTERRUPT request is processed before the original request is
+     processed
+
+  2. The INTERRUPT request is processed after the original request has
+     been answered
+
+If the filesystem cannot find the original request, it should wait for
+some timeout and/or a number of new requests to arrive, after which it
+should reply to the INTERRUPT request with an EAGAIN error.  In case
+1) the INTERRUPT request will be requeued.  In case 2) the INTERRUPT
+reply will be ignored.
+
+Aborting a filesystem connection
+================================
+
+It is possible to get into certain situations where the filesystem is
+not responding.  Reasons for this may be:
+
+  a) Broken userspace filesystem implementation
+
+  b) Network connection down
+
+  c) Accidental deadlock
+
+  d) Malicious deadlock
+
+(For more on c) and d) see later sections)
+
+In either of these cases it may be useful to abort the connection to
+the filesystem.  There are several ways to do this:
+
+  - Kill the filesystem daemon.  Works in case of a) and b)
+
+  - Kill the filesystem daemon and all users of the filesystem.  Works
+    in all cases except some malicious deadlocks
+
+  - Use forced umount (umount -f).  Works in all cases but only if
+    filesystem is still attached (it hasn't been lazy unmounted)
+
+  - Abort filesystem through the FUSE control filesystem.  Most
+    powerful method, always works.
+
+How do non-privileged mounts work?
+==================================
+
+Since the mount() system call is a privileged operation, a helper
+program (fusermount) is needed, which is installed setuid root.
+
+The implication of providing non-privileged mounts is that the mount
+owner must not be able to use this capability to compromise the
+system.  Obvious requirements arising from this are:
+
+ A) mount owner should not be able to get elevated privileges with the
+    help of the mounted filesystem
+
+ B) mount owner should not get illegitimate access to information from
+    other users' and the super user's processes
+
+ C) mount owner should not be able to induce undesired behavior in
+    other users' or the super user's processes
+
+How are requirements fulfilled?
+===============================
+
+ A) The mount owner could gain elevated privileges by either:
+
+    1. creating a filesystem containing a device file, then opening this device
+
+    2. creating a filesystem containing a suid or sgid application, then executing this application
+
+    The solution is not to allow opening device files and ignore
+    setuid and setgid bits when executing programs.  To ensure this
+    fusermount always adds "nosuid" and "nodev" to the mount options
+    for non-privileged mounts.
+
+ B) If another user is accessing files or directories in the
+    filesystem, the filesystem daemon serving requests can record the
+    exact sequence and timing of operations performed.  This
+    information is otherwise inaccessible to the mount owner, so this
+    counts as an information leak.
+
+    The solution to this problem will be presented in point 2) of C).
+
+ C) There are several ways in which the mount owner can induce
+    undesired behavior in other users' processes, such as:
+
+     1) mounting a filesystem over a file or directory which the mount
+        owner could otherwise not be able to modify (or could only
+        make limited modifications).
+
+        This is solved in fusermount, by checking the access
+        permissions on the mountpoint and only allowing the mount if
+        the mount owner can do unlimited modification (has write
+        access to the mountpoint, and mountpoint is not a "sticky"
+        directory)
+
+     2) Even if 1) is solved the mount owner can change the behavior
+        of other users' processes.
+
+         i) It can slow down or indefinitely delay the execution of a
+            filesystem operation creating a DoS against the user or the
+            whole system.  For example a suid application locking a
+            system file, and then accessing a file on the mount owner's
+            filesystem could be stopped, and thus causing the system
+            file to be locked forever.
+
+         ii) It can present files or directories of unlimited length, or
+             directory structures of unlimited depth, possibly causing a
+             system process to eat up diskspace, memory or other
+             resources, again causing *DoS*.
+
+	The solution to this as well as B) is not to allow processes
+	to access the filesystem, which could otherwise not be
+	monitored or manipulated by the mount owner.  Since if the
+	mount owner can ptrace a process, it can do all of the above
+	without using a FUSE mount, the same criteria as used in
+	ptrace can be used to check if a process is allowed to access
+	the filesystem or not.
+
+	Note that the *ptrace* check is not strictly necessary to
+	prevent B/2/i, it is enough to check if mount owner has enough
+	privilege to send signal to the process accessing the
+	filesystem, since *SIGSTOP* can be used to get a similar effect.
+
+I think these limitations are unacceptable?
+===========================================
+
+If a sysadmin trusts the users enough, or can ensure through other
+measures, that system processes will never enter non-privileged
+mounts, it can relax the last limitation with a 'user_allow_other'
+config option.  If this config option is set, the mounting user can
+add the 'allow_other' mount option which disables the check for other
+users' processes.
+
+Kernel - userspace interface
+============================
+
+The following diagram shows how a filesystem operation (in this
+example unlink) is performed in FUSE. ::
+
+
+ |  "rm /mnt/fuse/file"               |  FUSE filesystem daemon
+ |                                    |
+ |                                    |  >sys_read()
+ |                                    |    >fuse_dev_read()
+ |                                    |      >request_wait()
+ |                                    |        [sleep on fc->waitq]
+ |                                    |
+ |  >sys_unlink()                     |
+ |    >fuse_unlink()                  |
+ |      [get request from             |
+ |       fc->unused_list]             |
+ |      >request_send()               |
+ |        [queue req on fc->pending]  |
+ |        [wake up fc->waitq]         |        [woken up]
+ |        >request_wait_answer()      |
+ |          [sleep on req->waitq]     |
+ |                                    |      <request_wait()
+ |                                    |      [remove req from fc->pending]
+ |                                    |      [copy req to read buffer]
+ |                                    |      [add req to fc->processing]
+ |                                    |    <fuse_dev_read()
+ |                                    |  <sys_read()
+ |                                    |
+ |                                    |  [perform unlink]
+ |                                    |
+ |                                    |  >sys_write()
+ |                                    |    >fuse_dev_write()
+ |                                    |      [look up req in fc->processing]
+ |                                    |      [remove from fc->processing]
+ |                                    |      [copy write buffer to req]
+ |          [woken up]                |      [wake up req->waitq]
+ |                                    |    <fuse_dev_write()
+ |                                    |  <sys_write()
+ |        <request_wait_answer()      |
+ |      <request_send()               |
+ |      [add request to               |
+ |       fc->unused_list]             |
+ |    <fuse_unlink()                  |
+ |  <sys_unlink()                     |
+
+.. note:: Everything in the description above is greatly simplified
+
+There are a couple of ways in which to deadlock a FUSE filesystem.
+Since we are talking about unprivileged userspace programs,
+something must be done about these.
+
+**Scenario 1 -  Simple deadlock**::
+
+ |  "rm /mnt/fuse/file"               |  FUSE filesystem daemon
+ |                                    |
+ |  >sys_unlink("/mnt/fuse/file")     |
+ |    [acquire inode semaphore        |
+ |     for "file"]                    |
+ |    >fuse_unlink()                  |
+ |      [sleep on req->waitq]         |
+ |                                    |  <sys_read()
+ |                                    |  >sys_unlink("/mnt/fuse/file")
+ |                                    |    [acquire inode semaphore
+ |                                    |     for "file"]
+ |                                    |    *DEADLOCK*
+
+The solution for this is to allow the filesystem to be aborted.
+
+**Scenario 2 - Tricky deadlock**
+
+
+This one needs a carefully crafted filesystem.  It's a variation on
+the above, only the call back to the filesystem is not explicit,
+but is caused by a pagefault. ::
+
+ |  Kamikaze filesystem thread 1      |  Kamikaze filesystem thread 2
+ |                                    |
+ |  [fd = open("/mnt/fuse/file")]     |  [request served normally]
+ |  [mmap fd to 'addr']               |
+ |  [close fd]                        |  [FLUSH triggers 'magic' flag]
+ |  [read a byte from addr]           |
+ |    >do_page_fault()                |
+ |      [find or create page]         |
+ |      [lock page]                   |
+ |      >fuse_readpage()              |
+ |         [queue READ request]       |
+ |         [sleep on req->waitq]      |
+ |                                    |  [read request to buffer]
+ |                                    |  [create reply header before addr]
+ |                                    |  >sys_write(addr - headerlength)
+ |                                    |    >fuse_dev_write()
+ |                                    |      [look up req in fc->processing]
+ |                                    |      [remove from fc->processing]
+ |                                    |      [copy write buffer to req]
+ |                                    |        >do_page_fault()
+ |                                    |           [find or create page]
+ |                                    |           [lock page]
+ |                                    |           * DEADLOCK *
+
+The solution is basically the same as above.
+
+An additional problem is that while the write buffer is being copied
+to the request, the request must not be interrupted/aborted.  This is
+because the destination address of the copy may not be valid after the
+request has returned.
+
+This is solved with doing the copy atomically, and allowing abort
+while the page(s) belonging to the write buffer are faulted with
+get_user_pages().  The 'req->locked' flag indicates when the copy is
+taking place, and abort is delayed until this flag is unset.
diff --git a/Documentation/filesystems/fuse.txt b/Documentation/filesystems/fuse.txt
deleted file mode 100644
index 13af4a4..0000000
--- a/Documentation/filesystems/fuse.txt
+++ /dev/null
@@ -1,423 +0,0 @@
-Definitions
-~~~~~~~~~~~
-
-Userspace filesystem:
-
-  A filesystem in which data and metadata are provided by an ordinary
-  userspace process.  The filesystem can be accessed normally through
-  the kernel interface.
-
-Filesystem daemon:
-
-  The process(es) providing the data and metadata of the filesystem.
-
-Non-privileged mount (or user mount):
-
-  A userspace filesystem mounted by a non-privileged (non-root) user.
-  The filesystem daemon is running with the privileges of the mounting
-  user.  NOTE: this is not the same as mounts allowed with the "user"
-  option in /etc/fstab, which is not discussed here.
-
-Filesystem connection:
-
-  A connection between the filesystem daemon and the kernel.  The
-  connection exists until either the daemon dies, or the filesystem is
-  umounted.  Note that detaching (or lazy umounting) the filesystem
-  does _not_ break the connection, in this case it will exist until
-  the last reference to the filesystem is released.
-
-Mount owner:
-
-  The user who does the mounting.
-
-User:
-
-  The user who is performing filesystem operations.
-
-What is FUSE?
-~~~~~~~~~~~~~
-
-FUSE is a userspace filesystem framework.  It consists of a kernel
-module (fuse.ko), a userspace library (libfuse.*) and a mount utility
-(fusermount).
-
-One of the most important features of FUSE is allowing secure,
-non-privileged mounts.  This opens up new possibilities for the use of
-filesystems.  A good example is sshfs: a secure network filesystem
-using the sftp protocol.
-
-The userspace library and utilities are available from the FUSE
-homepage:
-
-  http://fuse.sourceforge.net/
-
-Filesystem type
-~~~~~~~~~~~~~~~
-
-The filesystem type given to mount(2) can be one of the following:
-
-'fuse'
-
-  This is the usual way to mount a FUSE filesystem.  The first
-  argument of the mount system call may contain an arbitrary string,
-  which is not interpreted by the kernel.
-
-'fuseblk'
-
-  The filesystem is block device based.  The first argument of the
-  mount system call is interpreted as the name of the device.
-
-Mount options
-~~~~~~~~~~~~~
-
-'fd=N'
-
-  The file descriptor to use for communication between the userspace
-  filesystem and the kernel.  The file descriptor must have been
-  obtained by opening the FUSE device ('/dev/fuse').
-
-'rootmode=M'
-
-  The file mode of the filesystem's root in octal representation.
-
-'user_id=N'
-
-  The numeric user id of the mount owner.
-
-'group_id=N'
-
-  The numeric group id of the mount owner.
-
-'default_permissions'
-
-  By default FUSE doesn't check file access permissions, the
-  filesystem is free to implement its access policy or leave it to
-  the underlying file access mechanism (e.g. in case of network
-  filesystems).  This option enables permission checking, restricting
-  access based on file mode.  It is usually useful together with the
-  'allow_other' mount option.
-
-'allow_other'
-
-  This option overrides the security measure restricting file access
-  to the user mounting the filesystem.  This option is by default only
-  allowed to root, but this restriction can be removed with a
-  (userspace) configuration option.
-
-'max_read=N'
-
-  With this option the maximum size of read operations can be set.
-  The default is infinite.  Note that the size of read requests is
-  limited anyway to 32 pages (which is 128kbyte on i386).
-
-'blksize=N'
-
-  Set the block size for the filesystem.  The default is 512.  This
-  option is only valid for 'fuseblk' type mounts.
-
-Control filesystem
-~~~~~~~~~~~~~~~~~~
-
-There's a control filesystem for FUSE, which can be mounted by:
-
-  mount -t fusectl none /sys/fs/fuse/connections
-
-Mounting it under the '/sys/fs/fuse/connections' directory makes it
-backwards compatible with earlier versions.
-
-Under the fuse control filesystem each connection has a directory
-named by a unique number.
-
-For each connection the following files exist within this directory:
-
- 'waiting'
-
-  The number of requests which are waiting to be transferred to
-  userspace or being processed by the filesystem daemon.  If there is
-  no filesystem activity and 'waiting' is non-zero, then the
-  filesystem is hung or deadlocked.
-
- 'abort'
-
-  Writing anything into this file will abort the filesystem
-  connection.  This means that all waiting requests will be aborted an
-  error returned for all aborted and new requests.
-
-Only the owner of the mount may read or write these files.
-
-Interrupting filesystem operations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If a process issuing a FUSE filesystem request is interrupted, the
-following will happen:
-
-  1) If the request is not yet sent to userspace AND the signal is
-     fatal (SIGKILL or unhandled fatal signal), then the request is
-     dequeued and returns immediately.
-
-  2) If the request is not yet sent to userspace AND the signal is not
-     fatal, then an 'interrupted' flag is set for the request.  When
-     the request has been successfully transferred to userspace and
-     this flag is set, an INTERRUPT request is queued.
-
-  3) If the request is already sent to userspace, then an INTERRUPT
-     request is queued.
-
-INTERRUPT requests take precedence over other requests, so the
-userspace filesystem will receive queued INTERRUPTs before any others.
-
-The userspace filesystem may ignore the INTERRUPT requests entirely,
-or may honor them by sending a reply to the _original_ request, with
-the error set to EINTR.
-
-It is also possible that there's a race between processing the
-original request and its INTERRUPT request.  There are two possibilities:
-
-  1) The INTERRUPT request is processed before the original request is
-     processed
-
-  2) The INTERRUPT request is processed after the original request has
-     been answered
-
-If the filesystem cannot find the original request, it should wait for
-some timeout and/or a number of new requests to arrive, after which it
-should reply to the INTERRUPT request with an EAGAIN error.  In case
-1) the INTERRUPT request will be requeued.  In case 2) the INTERRUPT
-reply will be ignored.
-
-Aborting a filesystem connection
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-It is possible to get into certain situations where the filesystem is
-not responding.  Reasons for this may be:
-
-  a) Broken userspace filesystem implementation
-
-  b) Network connection down
-
-  c) Accidental deadlock
-
-  d) Malicious deadlock
-
-(For more on c) and d) see later sections)
-
-In either of these cases it may be useful to abort the connection to
-the filesystem.  There are several ways to do this:
-
-  - Kill the filesystem daemon.  Works in case of a) and b)
-
-  - Kill the filesystem daemon and all users of the filesystem.  Works
-    in all cases except some malicious deadlocks
-
-  - Use forced umount (umount -f).  Works in all cases but only if
-    filesystem is still attached (it hasn't been lazy unmounted)
-
-  - Abort filesystem through the FUSE control filesystem.  Most
-    powerful method, always works.
-
-How do non-privileged mounts work?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Since the mount() system call is a privileged operation, a helper
-program (fusermount) is needed, which is installed setuid root.
-
-The implication of providing non-privileged mounts is that the mount
-owner must not be able to use this capability to compromise the
-system.  Obvious requirements arising from this are:
-
- A) mount owner should not be able to get elevated privileges with the
-    help of the mounted filesystem
-
- B) mount owner should not get illegitimate access to information from
-    other users' and the super user's processes
-
- C) mount owner should not be able to induce undesired behavior in
-    other users' or the super user's processes
-
-How are requirements fulfilled?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- A) The mount owner could gain elevated privileges by either:
-
-     1) creating a filesystem containing a device file, then opening
-	this device
-
-     2) creating a filesystem containing a suid or sgid application,
-	then executing this application
-
-    The solution is not to allow opening device files and ignore
-    setuid and setgid bits when executing programs.  To ensure this
-    fusermount always adds "nosuid" and "nodev" to the mount options
-    for non-privileged mounts.
-
- B) If another user is accessing files or directories in the
-    filesystem, the filesystem daemon serving requests can record the
-    exact sequence and timing of operations performed.  This
-    information is otherwise inaccessible to the mount owner, so this
-    counts as an information leak.
-
-    The solution to this problem will be presented in point 2) of C).
-
- C) There are several ways in which the mount owner can induce
-    undesired behavior in other users' processes, such as:
-
-     1) mounting a filesystem over a file or directory which the mount
-        owner could otherwise not be able to modify (or could only
-        make limited modifications).
-
-        This is solved in fusermount, by checking the access
-        permissions on the mountpoint and only allowing the mount if
-        the mount owner can do unlimited modification (has write
-        access to the mountpoint, and mountpoint is not a "sticky"
-        directory)
-
-     2) Even if 1) is solved the mount owner can change the behavior
-        of other users' processes.
-
-         i) It can slow down or indefinitely delay the execution of a
-           filesystem operation creating a DoS against the user or the
-           whole system.  For example a suid application locking a
-           system file, and then accessing a file on the mount owner's
-           filesystem could be stopped, and thus causing the system
-           file to be locked forever.
-
-         ii) It can present files or directories of unlimited length, or
-           directory structures of unlimited depth, possibly causing a
-           system process to eat up diskspace, memory or other
-           resources, again causing DoS.
-
-	The solution to this as well as B) is not to allow processes
-	to access the filesystem, which could otherwise not be
-	monitored or manipulated by the mount owner.  Since if the
-	mount owner can ptrace a process, it can do all of the above
-	without using a FUSE mount, the same criteria as used in
-	ptrace can be used to check if a process is allowed to access
-	the filesystem or not.
-
-	Note that the ptrace check is not strictly necessary to
-	prevent B/2/i, it is enough to check if mount owner has enough
-	privilege to send signal to the process accessing the
-	filesystem, since SIGSTOP can be used to get a similar effect.
-
-I think these limitations are unacceptable?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If a sysadmin trusts the users enough, or can ensure through other
-measures, that system processes will never enter non-privileged
-mounts, it can relax the last limitation with a "user_allow_other"
-config option.  If this config option is set, the mounting user can
-add the "allow_other" mount option which disables the check for other
-users' processes.
-
-Kernel - userspace interface
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following diagram shows how a filesystem operation (in this
-example unlink) is performed in FUSE.
-
-NOTE: everything in this description is greatly simplified
-
- |  "rm /mnt/fuse/file"               |  FUSE filesystem daemon
- |                                    |
- |                                    |  >sys_read()
- |                                    |    >fuse_dev_read()
- |                                    |      >request_wait()
- |                                    |        [sleep on fc->waitq]
- |                                    |
- |  >sys_unlink()                     |
- |    >fuse_unlink()                  |
- |      [get request from             |
- |       fc->unused_list]             |
- |      >request_send()               |
- |        [queue req on fc->pending]  |
- |        [wake up fc->waitq]         |        [woken up]
- |        >request_wait_answer()      |
- |          [sleep on req->waitq]     |
- |                                    |      <request_wait()
- |                                    |      [remove req from fc->pending]
- |                                    |      [copy req to read buffer]
- |                                    |      [add req to fc->processing]
- |                                    |    <fuse_dev_read()
- |                                    |  <sys_read()
- |                                    |
- |                                    |  [perform unlink]
- |                                    |
- |                                    |  >sys_write()
- |                                    |    >fuse_dev_write()
- |                                    |      [look up req in fc->processing]
- |                                    |      [remove from fc->processing]
- |                                    |      [copy write buffer to req]
- |          [woken up]                |      [wake up req->waitq]
- |                                    |    <fuse_dev_write()
- |                                    |  <sys_write()
- |        <request_wait_answer()      |
- |      <request_send()               |
- |      [add request to               |
- |       fc->unused_list]             |
- |    <fuse_unlink()                  |
- |  <sys_unlink()                     |
-
-There are a couple of ways in which to deadlock a FUSE filesystem.
-Since we are talking about unprivileged userspace programs,
-something must be done about these.
-
-Scenario 1 -  Simple deadlock
------------------------------
-
- |  "rm /mnt/fuse/file"               |  FUSE filesystem daemon
- |                                    |
- |  >sys_unlink("/mnt/fuse/file")     |
- |    [acquire inode semaphore        |
- |     for "file"]                    |
- |    >fuse_unlink()                  |
- |      [sleep on req->waitq]         |
- |                                    |  <sys_read()
- |                                    |  >sys_unlink("/mnt/fuse/file")
- |                                    |    [acquire inode semaphore
- |                                    |     for "file"]
- |                                    |    *DEADLOCK*
-
-The solution for this is to allow the filesystem to be aborted.
-
-Scenario 2 - Tricky deadlock
-----------------------------
-
-This one needs a carefully crafted filesystem.  It's a variation on
-the above, only the call back to the filesystem is not explicit,
-but is caused by a pagefault.
-
- |  Kamikaze filesystem thread 1      |  Kamikaze filesystem thread 2
- |                                    |
- |  [fd = open("/mnt/fuse/file")]     |  [request served normally]
- |  [mmap fd to 'addr']               |
- |  [close fd]                        |  [FLUSH triggers 'magic' flag]
- |  [read a byte from addr]           |
- |    >do_page_fault()                |
- |      [find or create page]         |
- |      [lock page]                   |
- |      >fuse_readpage()              |
- |         [queue READ request]       |
- |         [sleep on req->waitq]      |
- |                                    |  [read request to buffer]
- |                                    |  [create reply header before addr]
- |                                    |  >sys_write(addr - headerlength)
- |                                    |    >fuse_dev_write()
- |                                    |      [look up req in fc->processing]
- |                                    |      [remove from fc->processing]
- |                                    |      [copy write buffer to req]
- |                                    |        >do_page_fault()
- |                                    |           [find or create page]
- |                                    |           [lock page]
- |                                    |           * DEADLOCK *
-
-Solution is basically the same as above.
-
-An additional problem is that while the write buffer is being copied
-to the request, the request must not be interrupted/aborted.  This is
-because the destination address of the copy may not be valid after the
-request has returned.
-
-This is solved with doing the copy atomically, and allowing abort
-while the page(s) belonging to the write buffer are faulted with
-get_user_pages().  The 'req->locked' flag indicates when the copy is
-taking place, and abort is delayed until this flag is unset.
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index ad6315a..386eaad 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -47,4 +47,7 @@
    :maxdepth: 2
 
    autofs
+   fuse
+   overlayfs
    virtiofs
+   vfat
diff --git a/Documentation/filesystems/mount_api.txt b/Documentation/filesystems/mount_api.txt
index 00ff0cf..87c14bb 100644
--- a/Documentation/filesystems/mount_api.txt
+++ b/Documentation/filesystems/mount_api.txt
@@ -427,7 +427,6 @@
 	fs_value_is_string,		Value is a string
 	fs_value_is_blob,		Value is a binary blob
 	fs_value_is_filename,		Value is a filename* + dirfd
-	fs_value_is_filename_empty,	Value is a filename* + dirfd + AT_EMPTY_PATH
 	fs_value_is_file,		Value is an open file (file*)
 
      If there is a value, that value is stored in a union in the struct in one
@@ -519,7 +518,6 @@
 There's a core description struct that links everything together:
 
 	struct fs_parameter_description {
-		const char	name[16];
 		const struct fs_parameter_spec *specs;
 		const struct fs_parameter_enum *enums;
 	};
@@ -535,19 +533,13 @@
 	};
 
 	static const struct fs_parameter_description afs_fs_parameters = {
-		.name		= "kAFS",
 		.specs		= afs_param_specs,
 		.enums		= afs_param_enums,
 	};
 
 The members are as follows:
 
- (1) const char name[16];
-
-     The name to be used in error messages generated by the parse helper
-     functions.
-
- (2) const struct fs_parameter_specification *specs;
+ (1) const struct fs_parameter_specification *specs;
 
      Table of parameter specifications, terminated with a null entry, where the
      entries are of type:
@@ -626,7 +618,7 @@
      of arguments to specify the type and the flags for anything that doesn't
      match one of the above macros.
 
- (6) const struct fs_parameter_enum *enums;
+ (2) const struct fs_parameter_enum *enums;
 
      Table of enum value names to integer mappings, terminated with a null
      entry.  This is of type:
diff --git a/Documentation/filesystems/nfs/fault_injection.txt b/Documentation/filesystems/nfs/fault_injection.txt
deleted file mode 100644
index f3a5b0a..0000000
--- a/Documentation/filesystems/nfs/fault_injection.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-
-Fault Injection
-===============
-Fault injection is a method for forcing errors that may not normally occur, or
-may be difficult to reproduce.  Forcing these errors in a controlled environment
-can help the developer find and fix bugs before their code is shipped in a
-production system.  Injecting an error on the Linux NFS server will allow us to
-observe how the client reacts and if it manages to recover its state correctly.
-
-NFSD_FAULT_INJECTION must be selected when configuring the kernel to use this
-feature.
-
-
-Using Fault Injection
-=====================
-On the client, mount the fault injection server through NFS v4.0+ and do some
-work over NFS (open files, take locks, ...).
-
-On the server, mount the debugfs filesystem to <debug_dir> and ls
-<debug_dir>/nfsd.  This will show a list of files that will be used for
-injecting faults on the NFS server.  As root, write a number n to the file
-corresponding to the action you want the server to take.  The server will then
-process the first n items it finds.  So if you want to forget 5 locks, echo '5'
-to <debug_dir>/nfsd/forget_locks.  A value of 0 will tell the server to forget
-all corresponding items.  A log message will be created containing the number
-of items forgotten (check dmesg).
-
-Go back to work on the client and check if the client recovered from the error
-correctly.
-
-
-Available Faults
-================
-forget_clients:
-     The NFS server keeps a list of clients that have placed a mount call.  If
-     this list is cleared, the server will have no knowledge of who the client
-     is, forcing the client to reauthenticate with the server.
-
-forget_openowners:
-     The NFS server keeps a list of what files are currently opened and who
-     they were opened by.  Clearing this list will force the client to reopen
-     its files.
-
-forget_locks:
-     The NFS server keeps a list of what files are currently locked in the VFS.
-     Clearing this list will force the client to reclaim its locks (files are
-     unlocked through the VFS as they are cleared from this list).
-
-forget_delegations:
-     A delegation is used to assure the client that a file, or part of a file,
-     has not changed since the delegation was awarded.  Clearing this list will
-     force the client to reacquire its delegation before accessing the file
-     again.
-
-recall_delegations:
-     Delegations can be recalled by the server when another client attempts to
-     access a file.  This test will notify the client that its delegation has
-     been revoked, forcing the client to reacquire the delegation before using
-     the file again.
-
-
-tools/nfs/inject_faults.sh script
-=================================
-This script has been created to ease the fault injection process.  This script
-will detect the mounted debugfs directory and write to the files located there
-based on the arguments passed by the user.  For example, running
-`inject_faults.sh forget_locks 1` as root will instruct the server to forget
-one lock.  Running `inject_faults forget_locks` will instruct the server to
-forgetall locks.
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt
deleted file mode 100644
index b86831a..0000000
--- a/Documentation/filesystems/nfs/idmapper.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-
-=========
-ID Mapper
-=========
-Id mapper is used by NFS to translate user and group ids into names, and to
-translate user and group names into ids.  Part of this translation involves
-performing an upcall to userspace to request the information.  There are two
-ways NFS could obtain this information: placing a call to /sbin/request-key
-or by placing a call to the rpc.idmap daemon.
-
-NFS will attempt to call /sbin/request-key first.  If this succeeds, the
-result will be cached using the generic request-key cache.  This call should
-only fail if /etc/request-key.conf is not configured for the id_resolver key
-type, see the "Configuring" section below if you wish to use the request-key
-method.
-
-If the call to /sbin/request-key fails (if /etc/request-key.conf is not
-configured with the id_resolver key type), then the idmapper will ask the
-legacy rpc.idmap daemon for the id mapping.  This result will be stored
-in a custom NFS idmap cache.
-
-
-===========
-Configuring
-===========
-The file /etc/request-key.conf will need to be modified so /sbin/request-key can
-direct the upcall.  The following line should be added:
-
-#OP	TYPE	DESCRIPTION	CALLOUT INFO	PROGRAM ARG1 ARG2 ARG3 ...
-#======	=======	===============	===============	===============================
-create	id_resolver	*	*		/usr/sbin/nfs.idmap %k %d 600
-
-This will direct all id_resolver requests to the program /usr/sbin/nfs.idmap.
-The last parameter, 600, defines how many seconds into the future the key will
-expire.  This parameter is optional for /usr/sbin/nfs.idmap.  When the timeout
-is not specified, nfs.idmap will default to 600 seconds.
-
-id mapper uses for key descriptions:
-	  uid:  Find the UID for the given user
-	  gid:  Find the GID for the given group
-	 user:  Find the user  name for the given UID
-	group:  Find the group name for the given GID
-
-You can handle any of these individually, rather than using the generic upcall
-program.  If you would like to use your own program for a uid lookup then you
-would edit your request-key.conf so it look similar to this:
-
-#OP	TYPE	DESCRIPTION	CALLOUT INFO	PROGRAM ARG1 ARG2 ARG3 ...
-#======	=======	===============	===============	===============================
-create	id_resolver	uid:*	*		/some/other/program %k %d 600
-create	id_resolver	*	*		/usr/sbin/nfs.idmap %k %d 600
-
-Notice that the new line was added above the line for the generic program.
-request-key will find the first matching line and corresponding program.  In
-this case, /some/other/program will handle all uid lookups and
-/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
-
-See <file:Documentation/security/keys/request-key.rst> for more information
-about the request-key function.
-
-
-=========
-nfs.idmap
-=========
-nfs.idmap is designed to be called by request-key, and should not be run "by
-hand".  This program takes two arguments, a serialized key and a key
-description.  The serialized key is first converted into a key_serial_t, and
-then passed as an argument to keyctl_instantiate (both are part of keyutils.h).
-
-The actual lookups are performed by functions found in nfsidmap.h.  nfs.idmap
-determines the correct function to call by looking at the first part of the
-description string.  For example, a uid lookup description will appear as
-"uid:user@domain".
-
-nfs.idmap will return 0 if the key was instantiated, and non-zero otherwise.
diff --git a/Documentation/filesystems/nfs/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt
deleted file mode 100644
index 22dc0dd..0000000
--- a/Documentation/filesystems/nfs/nfs-rdma.txt
+++ /dev/null
@@ -1,274 +0,0 @@
-################################################################################
-#									       #
-#				NFS/RDMA README				       #
-#									       #
-################################################################################
-
- Author: NetApp and Open Grid Computing
- Date: May 29, 2008
-
-Table of Contents
-~~~~~~~~~~~~~~~~~
- - Overview
- - Getting Help
- - Installation
- - Check RDMA and NFS Setup
- - NFS/RDMA Setup
-
-Overview
-~~~~~~~~
-
-  This document describes how to install and setup the Linux NFS/RDMA client
-  and server software.
-
-  The NFS/RDMA client was first included in Linux 2.6.24. The NFS/RDMA server
-  was first included in the following release, Linux 2.6.25.
-
-  In our testing, we have obtained excellent performance results (full 10Gbit
-  wire bandwidth at minimal client CPU) under many workloads. The code passes
-  the full Connectathon test suite and operates over both Infiniband and iWARP
-  RDMA adapters.
-
-Getting Help
-~~~~~~~~~~~~
-
-  If you get stuck, you can ask questions on the
-
-                nfs-rdma-devel@lists.sourceforge.net
-
-  mailing list.
-
-Installation
-~~~~~~~~~~~~
-
-  These instructions are a step by step guide to building a machine for
-  use with NFS/RDMA.
-
-  - Install an RDMA device
-
-    Any device supported by the drivers in drivers/infiniband/hw is acceptable.
-
-    Testing has been performed using several Mellanox-based IB cards, the
-    Ammasso AMS1100 iWARP adapter, and the Chelsio cxgb3 iWARP adapter.
-
-  - Install a Linux distribution and tools
-
-    The first kernel release to contain both the NFS/RDMA client and server was
-    Linux 2.6.25  Therefore, a distribution compatible with this and subsequent
-    Linux kernel release should be installed.
-
-    The procedures described in this document have been tested with
-    distributions from Red Hat's Fedora Project (http://fedora.redhat.com/).
-
-  - Install nfs-utils-1.1.2 or greater on the client
-
-    An NFS/RDMA mount point can be obtained by using the mount.nfs command in
-    nfs-utils-1.1.2 or greater (nfs-utils-1.1.1 was the first nfs-utils
-    version with support for NFS/RDMA mounts, but for various reasons we
-    recommend using nfs-utils-1.1.2 or greater). To see which version of
-    mount.nfs you are using, type:
-
-    $ /sbin/mount.nfs -V
-
-    If the version is less than 1.1.2 or the command does not exist,
-    you should install the latest version of nfs-utils.
-
-    Download the latest package from:
-
-    http://www.kernel.org/pub/linux/utils/nfs
-
-    Uncompress the package and follow the installation instructions.
-
-    If you will not need the idmapper and gssd executables (you do not need
-    these to create an NFS/RDMA enabled mount command), the installation
-    process can be simplified by disabling these features when running
-    configure:
-
-    $ ./configure --disable-gss --disable-nfsv4
-
-    To build nfs-utils you will need the tcp_wrappers package installed. For
-    more information on this see the package's README and INSTALL files.
-
-    After building the nfs-utils package, there will be a mount.nfs binary in
-    the utils/mount directory. This binary can be used to initiate NFS v2, v3,
-    or v4 mounts. To initiate a v4 mount, the binary must be called
-    mount.nfs4.  The standard technique is to create a symlink called
-    mount.nfs4 to mount.nfs.
-
-    This mount.nfs binary should be installed at /sbin/mount.nfs as follows:
-
-    $ sudo cp utils/mount/mount.nfs /sbin/mount.nfs
-
-    In this location, mount.nfs will be invoked automatically for NFS mounts
-    by the system mount command.
-
-    NOTE: mount.nfs and therefore nfs-utils-1.1.2 or greater is only needed
-    on the NFS client machine. You do not need this specific version of
-    nfs-utils on the server. Furthermore, only the mount.nfs command from
-    nfs-utils-1.1.2 is needed on the client.
-
-  - Install a Linux kernel with NFS/RDMA
-
-    The NFS/RDMA client and server are both included in the mainline Linux
-    kernel version 2.6.25 and later. This and other versions of the Linux
-    kernel can be found at:
-
-    https://www.kernel.org/pub/linux/kernel/
-
-    Download the sources and place them in an appropriate location.
-
-  - Configure the RDMA stack
-
-    Make sure your kernel configuration has RDMA support enabled. Under
-    Device Drivers -> InfiniBand support, update the kernel configuration
-    to enable InfiniBand support [NOTE: the option name is misleading. Enabling
-    InfiniBand support is required for all RDMA devices (IB, iWARP, etc.)].
-
-    Enable the appropriate IB HCA support (mlx4, mthca, ehca, ipath, etc.) or
-    iWARP adapter support (amso, cxgb3, etc.).
-
-    If you are using InfiniBand, be sure to enable IP-over-InfiniBand support.
-
-  - Configure the NFS client and server
-
-    Your kernel configuration must also have NFS file system support and/or
-    NFS server support enabled. These and other NFS related configuration
-    options can be found under File Systems -> Network File Systems.
-
-  - Build, install, reboot
-
-    The NFS/RDMA code will be enabled automatically if NFS and RDMA
-    are turned on. The NFS/RDMA client and server are configured via the hidden
-    SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The
-    value of SUNRPC_XPRT_RDMA will be:
-
-     - N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client
-       and server will not be built
-     - M if both SUNRPC and INFINIBAND are on (M or Y) and at least one is M,
-       in this case the NFS/RDMA client and server will be built as modules
-     - Y if both SUNRPC and INFINIBAND are Y, in this case the NFS/RDMA client
-       and server will be built into the kernel
-
-    Therefore, if you have followed the steps above and turned no NFS and RDMA,
-    the NFS/RDMA client and server will be built.
-
-    Build a new kernel, install it, boot it.
-
-Check RDMA and NFS Setup
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-    Before configuring the NFS/RDMA software, it is a good idea to test
-    your new kernel to ensure that the kernel is working correctly.
-    In particular, it is a good idea to verify that the RDMA stack
-    is functioning as expected and standard NFS over TCP/IP and/or UDP/IP
-    is working properly.
-
-  - Check RDMA Setup
-
-    If you built the RDMA components as modules, load them at
-    this time. For example, if you are using a Mellanox Tavor/Sinai/Arbel
-    card:
-
-    $ modprobe ib_mthca
-    $ modprobe ib_ipoib
-
-    If you are using InfiniBand, make sure there is a Subnet Manager (SM)
-    running on the network. If your IB switch has an embedded SM, you can
-    use it. Otherwise, you will need to run an SM, such as OpenSM, on one
-    of your end nodes.
-
-    If an SM is running on your network, you should see the following:
-
-    $ cat /sys/class/infiniband/driverX/ports/1/state
-    4: ACTIVE
-
-    where driverX is mthca0, ipath5, ehca3, etc.
-
-    To further test the InfiniBand software stack, use IPoIB (this
-    assumes you have two IB hosts named host1 and host2):
-
-    host1$ ip link set dev ib0 up
-    host1$ ip address add dev ib0 a.b.c.x
-    host2$ ip link set dev ib0 up
-    host2$ ip address add dev ib0 a.b.c.y
-    host1$ ping a.b.c.y
-    host2$ ping a.b.c.x
-
-    For other device types, follow the appropriate procedures.
-
-  - Check NFS Setup
-
-    For the NFS components enabled above (client and/or server),
-    test their functionality over standard Ethernet using TCP/IP or UDP/IP.
-
-NFS/RDMA Setup
-~~~~~~~~~~~~~~
-
-  We recommend that you use two machines, one to act as the client and
-  one to act as the server.
-
-  One time configuration:
-
-  - On the server system, configure the /etc/exports file and
-    start the NFS/RDMA server.
-
-    Exports entries with the following formats have been tested:
-
-    /vol0   192.168.0.47(fsid=0,rw,async,insecure,no_root_squash)
-    /vol0   192.168.0.0/255.255.255.0(fsid=0,rw,async,insecure,no_root_squash)
-
-    The IP address(es) is(are) the client's IPoIB address for an InfiniBand
-    HCA or the client's iWARP address(es) for an RNIC.
-
-    NOTE: The "insecure" option must be used because the NFS/RDMA client does
-    not use a reserved port.
-
- Each time a machine boots:
-
-  - Load and configure the RDMA drivers
-
-    For InfiniBand using a Mellanox adapter:
-
-    $ modprobe ib_mthca
-    $ modprobe ib_ipoib
-    $ ip li set dev ib0 up
-    $ ip addr add dev ib0 a.b.c.d
-
-    NOTE: use unique addresses for the client and server
-
-  - Start the NFS server
-
-    If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
-    kernel config), load the RDMA transport module:
-
-    $ modprobe svcrdma
-
-    Regardless of how the server was built (module or built-in), start the
-    server:
-
-    $ /etc/init.d/nfs start
-
-    or
-
-    $ service nfs start
-
-    Instruct the server to listen on the RDMA transport:
-
-    $ echo rdma 20049 > /proc/fs/nfsd/portlist
-
-  - On the client system
-
-    If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
-    kernel config), load the RDMA client module:
-
-    $ modprobe xprtrdma.ko
-
-    Regardless of how the client was built (module or built-in), use this
-    command to mount the NFS/RDMA server:
-
-    $ mount -o rdma,port=20049 <IPoIB-server-name-or-address>:/<export> /mnt
-
-    To verify that the mount is using RDMA, run "cat /proc/mounts" and check
-    the "proto" field for the given mount.
-
-  Congratulations! You're using NFS/RDMA!
diff --git a/Documentation/filesystems/nfs/nfs.txt b/Documentation/filesystems/nfs/nfs.txt
deleted file mode 100644
index f2571c8..0000000
--- a/Documentation/filesystems/nfs/nfs.txt
+++ /dev/null
@@ -1,136 +0,0 @@
-
-The NFS client
-==============
-
-The NFS version 2 protocol was first documented in RFC1094 (March 1989).
-Since then two more major releases of NFS have been published, with NFSv3
-being documented in RFC1813 (June 1995), and NFSv4 in RFC3530 (April
-2003).
-
-The Linux NFS client currently supports all the above published versions,
-and work is in progress on adding support for minor version 1 of the NFSv4
-protocol.
-
-The purpose of this document is to provide information on some of the
-special features of the NFS client that can be configured by system
-administrators.
-
-
-The nfs4_unique_id parameter
-============================
-
-NFSv4 requires clients to identify themselves to servers with a unique
-string.  File open and lock state shared between one client and one server
-is associated with this identity.  To support robust NFSv4 state recovery
-and transparent state migration, this identity string must not change
-across client reboots.
-
-Without any other intervention, the Linux client uses a string that contains
-the local system's node name.  System administrators, however, often do not
-take care to ensure that node names are fully qualified and do not change
-over the lifetime of a client system.  Node names can have other
-administrative requirements that require particular behavior that does not
-work well as part of an nfs_client_id4 string.
-
-The nfs.nfs4_unique_id boot parameter specifies a unique string that can be
-used instead of a system's node name when an NFS client identifies itself to
-a server.  Thus, if the system's node name is not unique, or it changes, its
-nfs.nfs4_unique_id stays the same, preventing collision with other clients
-or loss of state during NFS reboot recovery or transparent state migration.
-
-The nfs.nfs4_unique_id string is typically a UUID, though it can contain
-anything that is believed to be unique across all NFS clients.  An
-nfs4_unique_id string should be chosen when a client system is installed,
-just as a system's root file system gets a fresh UUID in its label at
-install time.
-
-The string should remain fixed for the lifetime of the client.  It can be
-changed safely if care is taken that the client shuts down cleanly and all
-outstanding NFSv4 state has expired, to prevent loss of NFSv4 state.
-
-This string can be stored in an NFS client's grub.conf, or it can be provided
-via a net boot facility such as PXE.  It may also be specified as an nfs.ko
-module parameter.  Specifying a uniquifier string is not support for NFS
-clients running in containers.
-
-
-The DNS resolver
-================
-
-NFSv4 allows for one server to refer the NFS client to data that has been
-migrated onto another server by means of the special "fs_locations"
-attribute. See
-	http://tools.ietf.org/html/rfc3530#section-6
-and
-	http://tools.ietf.org/html/draft-ietf-nfsv4-referrals-00
-
-The fs_locations information can take the form of either an ip address and
-a path, or a DNS hostname and a path. The latter requires the NFS client to
-do a DNS lookup in order to mount the new volume, and hence the need for an
-upcall to allow userland to provide this service.
-
-Assuming that the user has the 'rpc_pipefs' filesystem mounted in the usual
-/var/lib/nfs/rpc_pipefs, the upcall consists of the following steps:
-
-   (1) The process checks the dns_resolve cache to see if it contains a
-       valid entry. If so, it returns that entry and exits.
-
-   (2) If no valid entry exists, the helper script '/sbin/nfs_cache_getent'
-       (may be changed using the 'nfs.cache_getent' kernel boot parameter)
-       is run, with two arguments:
-		- the cache name, "dns_resolve"
-		- the hostname to resolve
-
-   (3) After looking up the corresponding ip address, the helper script
-       writes the result into the rpc_pipefs pseudo-file
-       '/var/lib/nfs/rpc_pipefs/cache/dns_resolve/channel'
-       in the following (text) format:
-
-		"<ip address> <hostname> <ttl>\n"
-
-       Where <ip address> is in the usual IPv4 (123.456.78.90) or IPv6
-       (ffee:ddcc:bbaa:9988:7766:5544:3322:1100, ffee::1100, ...) format.
-       <hostname> is identical to the second argument of the helper
-       script, and <ttl> is the 'time to live' of this cache entry (in
-       units of seconds).
-
-       Note: If <ip address> is invalid, say the string "0", then a negative
-       entry is created, which will cause the kernel to treat the hostname
-       as having no valid DNS translation.
-
-
-
-
-A basic sample /sbin/nfs_cache_getent
-=====================================
-
-#!/bin/bash
-#
-ttl=600
-#
-cut=/usr/bin/cut
-getent=/usr/bin/getent
-rpc_pipefs=/var/lib/nfs/rpc_pipefs
-#
-die()
-{
-	echo "Usage: $0 cache_name entry_name"
-	exit 1
-}
-
-[ $# -lt 2 ] && die
-cachename="$1"
-cache_path=${rpc_pipefs}/cache/${cachename}/channel
-
-case "${cachename}" in
-	dns_resolve)
-		name="$2"
-		result="$(${getent} hosts ${name} | ${cut} -f1 -d\ )"
-		[ -z "${result}" ] && result="0"
-		;;
-	*)
-		die
-		;;
-esac
-echo "${result} ${name} ${ttl}" >${cache_path}
-
diff --git a/Documentation/filesystems/nfs/nfsd-admin-interfaces.txt b/Documentation/filesystems/nfs/nfsd-admin-interfaces.txt
deleted file mode 100644
index 56a96fb..0000000
--- a/Documentation/filesystems/nfs/nfsd-admin-interfaces.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Administrative interfaces for nfsd
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Note that normally these interfaces are used only by the utilities in
-nfs-utils.
-
-nfsd is controlled mainly by pseudofiles under the "nfsd" filesystem,
-which is normally mounted at /proc/fs/nfsd/.
-
-The server is always started by the first write of a nonzero value to
-nfsd/threads.
-
-Before doing that, NFSD can be told which sockets to listen on by
-writing to nfsd/portlist; that write may be:
-
-	- an ascii-encoded file descriptor, which should refer to a
-	  bound (and listening, for tcp) socket, or
-	- "transportname port", where transportname is currently either
-	  "udp", "tcp", or "rdma".
-
-If nfsd is started without doing any of these, then it will create one
-udp and one tcp listener at port 2049 (see nfsd_init_socks).
-
-On startup, nfsd and lockd grace periods start.
-
-nfsd is shut down by a write of 0 to nfsd/threads.  All locks and state
-are thrown away at that point.
-
-Between startup and shutdown, the number of threads may be adjusted up
-or down by additional writes to nfsd/threads or by writes to
-nfsd/pool_threads.
-
-For more detail about files under nfsd/ and what they control, see
-fs/nfsd/nfsctl.c; most of them have detailed comments.
-
-Implementation notes
-^^^^^^^^^^^^^^^^^^^^
-
-Note that the rpc server requires the caller to serialize addition and
-removal of listening sockets, and startup and shutdown of the server.
-For nfsd this is done using nfsd_mutex.
diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
deleted file mode 100644
index ae43324..0000000
--- a/Documentation/filesystems/nfs/nfsroot.txt
+++ /dev/null
@@ -1,355 +0,0 @@
-Mounting the root filesystem via NFS (nfsroot)
-===============================================
-
-Written 1996 by Gero Kuhlmann <gero@gkminix.han.de>
-Updated 1997 by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
-Updated 2006 by Nico Schottelius <nico-kernel-nfsroot@schottelius.org>
-Updated 2006 by Horms <horms@verge.net.au>
-Updated 2018 by Chris Novakovic <chris@chrisn.me.uk>
-
-
-
-In order to use a diskless system, such as an X-terminal or printer server
-for example, it is necessary for the root filesystem to be present on a
-non-disk device. This may be an initramfs (see Documentation/filesystems/
-ramfs-rootfs-initramfs.txt), a ramdisk (see Documentation/admin-guide/initrd.rst) or a
-filesystem mounted via NFS. The following text describes on how to use NFS
-for the root filesystem. For the rest of this text 'client' means the
-diskless system, and 'server' means the NFS server.
-
-
-
-
-1.) Enabling nfsroot capabilities
-    -----------------------------
-
-In order to use nfsroot, NFS client support needs to be selected as
-built-in during configuration. Once this has been selected, the nfsroot
-option will become available, which should also be selected.
-
-In the networking options, kernel level autoconfiguration can be selected,
-along with the types of autoconfiguration to support. Selecting all of
-DHCP, BOOTP and RARP is safe.
-
-
-
-
-2.) Kernel command line
-    -------------------
-
-When the kernel has been loaded by a boot loader (see below) it needs to be
-told what root fs device to use. And in the case of nfsroot, where to find
-both the server and the name of the directory on the server to mount as root.
-This can be established using the following kernel command line parameters:
-
-
-root=/dev/nfs
-
-  This is necessary to enable the pseudo-NFS-device. Note that it's not a
-  real device but just a synonym to tell the kernel to use NFS instead of
-  a real device.
-
-
-nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>]
-
-  If the `nfsroot' parameter is NOT given on the command line,
-  the default "/tftpboot/%s" will be used.
-
-  <server-ip>	Specifies the IP address of the NFS server.
-		The default address is determined by the `ip' parameter
-		(see below). This parameter allows the use of different
-		servers for IP autoconfiguration and NFS.
-
-  <root-dir>	Name of the directory on the server to mount as root.
-		If there is a "%s" token in the string, it will be
-		replaced by the ASCII-representation of the client's
-		IP address.
-
-  <nfs-options>	Standard NFS options. All options are separated by commas.
-		The following defaults are used:
-			port		= as given by server portmap daemon
-			rsize		= 4096
-			wsize		= 4096
-			timeo		= 7
-			retrans		= 3
-			acregmin	= 3
-			acregmax	= 60
-			acdirmin	= 30
-			acdirmax	= 60
-			flags		= hard, nointr, noposix, cto, ac
-
-
-ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>:
-   <dns0-ip>:<dns1-ip>:<ntp0-ip>
-
-  This parameter tells the kernel how to configure IP addresses of devices
-  and also how to set up the IP routing table. It was originally called
-  `nfsaddrs', but now the boot-time IP configuration works independently of
-  NFS, so it was renamed to `ip' and the old name remained as an alias for
-  compatibility reasons.
-
-  If this parameter is missing from the kernel command line, all fields are
-  assumed to be empty, and the defaults mentioned below apply. In general
-  this means that the kernel tries to configure everything using
-  autoconfiguration.
-
-  The <autoconf> parameter can appear alone as the value to the `ip'
-  parameter (without all the ':' characters before).  If the value is
-  "ip=off" or "ip=none", no autoconfiguration will take place, otherwise
-  autoconfiguration will take place.  The most common way to use this
-  is "ip=dhcp".
-
-  <client-ip>	IP address of the client.
-
-  		Default:  Determined using autoconfiguration.
-
-  <server-ip>	IP address of the NFS server. If RARP is used to determine
-		the client address and this parameter is NOT empty only
-		replies from the specified server are accepted.
-
-		Only required for NFS root. That is autoconfiguration
-		will not be triggered if it is missing and NFS root is not
-		in operation.
-
-		Value is exported to /proc/net/pnp with the prefix "bootserver "
-		(see below).
-
-		Default: Determined using autoconfiguration.
-		         The address of the autoconfiguration server is used.
-
-  <gw-ip>	IP address of a gateway if the server is on a different subnet.
-
-		Default: Determined using autoconfiguration.
-
-  <netmask>	Netmask for local network interface. If unspecified
-		the netmask is derived from the client IP address assuming
-		classful addressing.
-
-		Default:  Determined using autoconfiguration.
-
-  <hostname>	Name of the client. If a '.' character is present, anything
-		before the first '.' is used as the client's hostname, and anything
-		after it is used as its NIS domain name. May be supplied by
-		autoconfiguration, but its absence will not trigger autoconfiguration.
-		If specified and DHCP is used, the user-provided hostname (and NIS
-		domain name, if present) will be carried in the DHCP request; this
-		may cause a DNS record to be created or updated for the client.
-
-  		Default: Client IP address is used in ASCII notation.
-
-  <device>	Name of network device to use.
-
-		Default: If the host only has one device, it is used.
-			 Otherwise the device is determined using
-			 autoconfiguration. This is done by sending
-			 autoconfiguration requests out of all devices,
-			 and using the device that received the first reply.
-
-  <autoconf>	Method to use for autoconfiguration. In the case of options
-                which specify multiple autoconfiguration protocols,
-		requests are sent using all protocols, and the first one
-		to reply is used.
-
-		Only autoconfiguration protocols that have been compiled
-		into the kernel will be used, regardless of the value of
-		this option.
-
-                  off or none: don't use autoconfiguration
-				(do static IP assignment instead)
-		  on or any:   use any protocol available in the kernel
-			       (default)
-		  dhcp:        use DHCP
-		  bootp:       use BOOTP
-		  rarp:        use RARP
-		  both:        use both BOOTP and RARP but not DHCP
-		               (old option kept for backwards compatibility)
-
-		if dhcp is used, the client identifier can be used by following
-		format "ip=dhcp,client-id-type,client-id-value"
-
-                Default: any
-
-  <dns0-ip>	IP address of primary nameserver.
-		Value is exported to /proc/net/pnp with the prefix "nameserver "
-		(see below).
-
-		Default: None if not using autoconfiguration; determined
-		automatically if using autoconfiguration.
-
-  <dns1-ip>	IP address of secondary nameserver.
-		See <dns0-ip>.
-
-  <ntp0-ip>	IP address of a Network Time Protocol (NTP) server.
-		Value is exported to /proc/net/ipconfig/ntp_servers, but is
-		otherwise unused (see below).
-
-		Default: None if not using autoconfiguration; determined
-		automatically if using autoconfiguration.
-
-  After configuration (whether manual or automatic) is complete, two files
-  are created in the following format; lines are omitted if their respective
-  value is empty following configuration:
-
-  - /proc/net/pnp:
-
-	#PROTO: <DHCP|BOOTP|RARP|MANUAL>	(depending on configuration method)
-	domain <dns-domain>			(if autoconfigured, the DNS domain)
-	nameserver <dns0-ip>			(primary name server IP)
-	nameserver <dns1-ip>			(secondary name server IP)
-	nameserver <dns2-ip>			(tertiary name server IP)
-	bootserver <server-ip>			(NFS server IP)
-
-  - /proc/net/ipconfig/ntp_servers:
-
-	<ntp0-ip>				(NTP server IP)
-	<ntp1-ip>				(NTP server IP)
-	<ntp2-ip>				(NTP server IP)
-
-  <dns-domain> and <dns2-ip> (in /proc/net/pnp) and <ntp1-ip> and <ntp2-ip>
-  (in /proc/net/ipconfig/ntp_servers) are requested during autoconfiguration;
-  they cannot be specified as part of the "ip=" kernel command line parameter.
-
-  Because the "domain" and "nameserver" options are recognised by DNS
-  resolvers, /etc/resolv.conf is often linked to /proc/net/pnp on systems
-  that use an NFS root filesystem.
-
-  Note that the kernel will not synchronise the system time with any NTP
-  servers it discovers; this is the responsibility of a user space process
-  (e.g. an initrd/initramfs script that passes the IP addresses listed in
-  /proc/net/ipconfig/ntp_servers to an NTP client before mounting the real
-  root filesystem if it is on NFS).
-
-
-nfsrootdebug
-
-  This parameter enables debugging messages to appear in the kernel
-  log at boot time so that administrators can verify that the correct
-  NFS mount options, server address, and root path are passed to the
-  NFS client.
-
-
-rdinit=<executable file>
-
-  To specify which file contains the program that starts system
-  initialization, administrators can use this command line parameter.
-  The default value of this parameter is "/init".  If the specified
-  file exists and the kernel can execute it, root filesystem related
-  kernel command line parameters, including `nfsroot=', are ignored.
-
-  A description of the process of mounting the root file system can be
-  found in:
-
-    Documentation/driver-api/early-userspace/early_userspace_support.rst
-
-
-
-
-3.) Boot Loader
-    ----------
-
-To get the kernel into memory different approaches can be used.
-They depend on various facilities being available:
-
-
-3.1)  Booting from a floppy using syslinux
-
-	When building kernels, an easy way to create a boot floppy that uses
-	syslinux is to use the zdisk or bzdisk make targets which use zimage
-      	and bzimage images respectively. Both targets accept the
-     	FDARGS parameter which can be used to set the kernel command line.
-
-	e.g.
-	   make bzdisk FDARGS="root=/dev/nfs"
-
-   	Note that the user running this command will need to have
-     	access to the floppy drive device, /dev/fd0
-
-     	For more information on syslinux, including how to create bootdisks
-     	for prebuilt kernels, see http://syslinux.zytor.com/
-
-	N.B: Previously it was possible to write a kernel directly to
-	     a floppy using dd, configure the boot device using rdev, and
-	     boot using the resulting floppy. Linux no longer supports this
-	     method of booting.
-
-3.2) Booting from a cdrom using isolinux
-
-     	When building kernels, an easy way to create a bootable cdrom that
-     	uses isolinux is to use the isoimage target which uses a bzimage
-     	image. Like zdisk and bzdisk, this target accepts the FDARGS
-     	parameter which can be used to set the kernel command line.
-
-	e.g.
-	  make isoimage FDARGS="root=/dev/nfs"
-
-     	The resulting iso image will be arch/<ARCH>/boot/image.iso
-     	This can be written to a cdrom using a variety of tools including
-     	cdrecord.
-
-	e.g.
-	  cdrecord dev=ATAPI:1,0,0 arch/x86/boot/image.iso
-
-     	For more information on isolinux, including how to create bootdisks
-     	for prebuilt kernels, see http://syslinux.zytor.com/
-
-3.2) Using LILO
-	When using LILO all the necessary command line parameters may be
-	specified using the 'append=' directive in the LILO configuration
-	file.
-
-	However, to use the 'root=' directive you also need to create
-	a dummy root device, which may be removed after LILO is run.
-
-	mknod /dev/boot255 c 0 255
-
-	For information on configuring LILO, please refer to its documentation.
-
-3.3) Using GRUB
-	When using GRUB, kernel parameter are simply appended after the kernel
-	specification: kernel <kernel> <parameters>
-
-3.4) Using loadlin
-	loadlin may be used to boot Linux from a DOS command prompt without
-	requiring a local hard disk to mount as root. This has not been
-	thoroughly tested by the authors of this document, but in general
-	it should be possible configure the kernel command line similarly
-	to the configuration of LILO.
-
-	Please refer to the loadlin documentation for further information.
-
-3.5) Using a boot ROM
-	This is probably the most elegant way of booting a diskless client.
-	With a boot ROM the kernel is loaded using the TFTP protocol. The
-	authors of this document are not aware of any no commercial boot
-	ROMs that support booting Linux over the network. However, there
-	are two free implementations of a boot ROM, netboot-nfs and
-	etherboot, both of which are available on sunsite.unc.edu, and both
-	of which contain everything you need to boot a diskless Linux client.
-
-3.6) Using pxelinux
-	Pxelinux may be used to boot linux using the PXE boot loader
-	which is present on many modern network cards.
-
-	When using pxelinux, the kernel image is specified using
-	"kernel <relative-path-below /tftpboot>". The nfsroot parameters
-	are passed to the kernel by adding them to the "append" line.
-	It is common to use serial console in conjunction with pxeliunx,
-	see Documentation/admin-guide/serial-console.rst for more information.
-
-	For more information on isolinux, including how to create bootdisks
-	for prebuilt kernels, see http://syslinux.zytor.com/
-
-
-
-
-4.) Credits
-    -------
-
-  The nfsroot code in the kernel and the RARP support have been written
-  by Gero Kuhlmann <gero@gkminix.han.de>.
-
-  The rest of the IP layer autoconfiguration code has been written
-  by Martin Mares <mj@atrey.karlin.mff.cuni.cz>.
-
-  In order to write the initial version of nfsroot I would like to thank
-  Jens-Uwe Mager <jum@anubis.han.de> for his help.
diff --git a/Documentation/filesystems/nfs/pnfs-block-server.txt b/Documentation/filesystems/nfs/pnfs-block-server.txt
deleted file mode 100644
index 2143673..0000000
--- a/Documentation/filesystems/nfs/pnfs-block-server.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-pNFS block layout server user guide
-
-The Linux NFS server now supports the pNFS block layout extension.  In this
-case the NFS server acts as Metadata Server (MDS) for pNFS, which in addition
-to handling all the metadata access to the NFS export also hands out layouts
-to the clients to directly access the underlying block devices that are
-shared with the client.
-
-To use pNFS block layouts with with the Linux NFS server the exported file
-system needs to support the pNFS block layouts (currently just XFS), and the
-file system must sit on shared storage (typically iSCSI) that is accessible
-to the clients in addition to the MDS.  As of now the file system needs to
-sit directly on the exported volume, striping or concatenation of
-volumes on the MDS and clients is not supported yet.
-
-On the server, pNFS block volume support is automatically if the file system
-support it.  On the client make sure the kernel has the CONFIG_PNFS_BLOCK
-option enabled, the blkmapd daemon from nfs-utils is running, and the
-file system is mounted using the NFSv4.1 protocol version (mount -o vers=4.1).
-
-If the nfsd server needs to fence a non-responding client it calls
-/sbin/nfsd-recall-failed with the first argument set to the IP address of
-the client, and the second argument set to the device node without the /dev
-prefix for the file system to be fenced. Below is an example file that shows
-how to translate the device into a serial number from SCSI EVPD 0x80:
-
-cat > /sbin/nfsd-recall-failed << EOF
-#!/bin/sh
-
-CLIENT="$1"
-DEV="/dev/$2"
-EVPD=`sg_inq --page=0x80 ${DEV} | \
-	grep "Unit serial number:" | \
-	awk -F ': ' '{print $2}'`
-
-echo "fencing client ${CLIENT} serial ${EVPD}" >> /var/log/pnfsd-fence.log
-EOF
diff --git a/Documentation/filesystems/nfs/pnfs-scsi-server.txt b/Documentation/filesystems/nfs/pnfs-scsi-server.txt
deleted file mode 100644
index 5bef726..0000000
--- a/Documentation/filesystems/nfs/pnfs-scsi-server.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-pNFS SCSI layout server user guide
-==================================
-
-This document describes support for pNFS SCSI layouts in the Linux NFS server.
-With pNFS SCSI layouts, the NFS server acts as Metadata Server (MDS) for pNFS,
-which in addition to handling all the metadata access to the NFS export,
-also hands out layouts to the clients so that they can directly access the
-underlying SCSI LUNs that are shared with the client.
-
-To use pNFS SCSI layouts with with the Linux NFS server, the exported file
-system needs to support the pNFS SCSI layouts (currently just XFS), and the
-file system must sit on a SCSI LUN that is accessible to the clients in
-addition to the MDS.  As of now the file system needs to sit directly on the
-exported LUN, striping or concatenation of LUNs on the MDS and clients
-is not supported yet.
-
-On a server built with CONFIG_NFSD_SCSI, the pNFS SCSI volume support is
-automatically enabled if the file system is exported using the "pnfs"
-option and the underlying SCSI device support persistent reservations.
-On the client make sure the kernel has the CONFIG_PNFS_BLOCK option
-enabled, and the file system is mounted using the NFSv4.1 protocol
-version (mount -o vers=4.1).
diff --git a/Documentation/filesystems/path-lookup.rst b/Documentation/filesystems/path-lookup.rst
index 434a07b..a321697 100644
--- a/Documentation/filesystems/path-lookup.rst
+++ b/Documentation/filesystems/path-lookup.rst
@@ -13,6 +13,7 @@
 including:
 
 - per-directory parallel name lookup.
+- ``openat2()`` resolution restriction flags.
 
 Introduction to pathname lookup
 ===============================
@@ -235,6 +236,13 @@
 unsuccessfully scanned a chain in the hash table, it simply tries
 again.
 
+``rename_lock`` is also used to detect and defend against potential attacks
+against ``LOOKUP_BENEATH`` and ``LOOKUP_IN_ROOT`` when resolving ".." (where
+the parent directory is moved outside the root, bypassing the ``path_equal()``
+check). If ``rename_lock`` is updated during the lookup and the path encounters
+a "..", a potential attack occurred and ``handle_dots()`` will bail out with
+``-EAGAIN``.
+
 inode->i_rwsem
 ~~~~~~~~~~~~~~
 
@@ -348,6 +356,13 @@
 needed to stabilize the link to the mounted-on dentry, which the
 refcount on the mount itself doesn't ensure.
 
+``mount_lock`` is also used to detect and defend against potential attacks
+against ``LOOKUP_BENEATH`` and ``LOOKUP_IN_ROOT`` when resolving ".." (where
+the parent directory is moved outside the root, bypassing the ``path_equal()``
+check). If ``mount_lock`` is updated during the lookup and the path encounters
+a "..", a potential attack occurred and ``handle_dots()`` will bail out with
+``-EAGAIN``.
+
 RCU
 ~~~
 
@@ -405,6 +420,10 @@
 only one root is in effect for the entire path walk, even if it races
 with a ``chroot()`` system call.
 
+It should be noted that in the case of ``LOOKUP_IN_ROOT`` or
+``LOOKUP_BENEATH``, the effective root becomes the directory file descriptor
+passed to ``openat2()`` (which exposes these ``LOOKUP_`` flags).
+
 The root is needed when either of two conditions holds: (1) either the
 pathname or a symbolic link starts with a "'/'", or (2) a "``..``"
 component is being handled, since "``..``" from the root must always stay
@@ -1149,7 +1168,7 @@
 the stack frame discarded.
 
 The other case involves things in ``/proc`` that look like symlinks but
-aren't really::
+aren't really (and are therefore commonly referred to as "magic-links")::
 
      $ ls -l /proc/self/fd/1
      lrwx------ 1 neilb neilb 64 Jun 13 10:19 /proc/self/fd/1 -> /dev/pts/4
@@ -1286,7 +1305,9 @@
 A suitable way to wrap up this tour of pathname walking is to list
 the various flags that can be stored in the ``nameidata`` to guide the
 lookup process.  Many of these are only meaningful on the final
-component, others reflect the current state of the pathname lookup.
+component, others reflect the current state of the pathname lookup, and some
+apply restrictions to all path components encountered in the path lookup.
+
 And then there is ``LOOKUP_EMPTY``, which doesn't fit conceptually with
 the others.  If this is not set, an empty pathname causes an error
 very early on.  If it is set, empty pathnames are not considered to be
@@ -1310,13 +1331,48 @@
 ``LOOKUP_JUMPED`` means that the current dentry was chosen not because
 it had the right name but for some other reason.  This happens when
 following "``..``", following a symlink to ``/``, crossing a mount point
-or accessing a "``/proc/$PID/fd/$FD``" symlink.  In this case the
-filesystem has not been asked to revalidate the name (with
-``d_revalidate()``).  In such cases the inode may still need to be
-revalidated, so ``d_op->d_weak_revalidate()`` is called if
+or accessing a "``/proc/$PID/fd/$FD``" symlink (also known as a "magic
+link"). In this case the filesystem has not been asked to revalidate the
+name (with ``d_revalidate()``).  In such cases the inode may still need
+to be revalidated, so ``d_op->d_weak_revalidate()`` is called if
 ``LOOKUP_JUMPED`` is set when the look completes - which may be at the
 final component or, when creating, unlinking, or renaming, at the penultimate component.
 
+Resolution-restriction flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to allow userspace to protect itself against certain race conditions
+and attack scenarios involving changing path components, a series of flags are
+available which apply restrictions to all path components encountered during
+path lookup. These flags are exposed through ``openat2()``'s ``resolve`` field.
+
+``LOOKUP_NO_SYMLINKS`` blocks all symlink traversals (including magic-links).
+This is distinctly different from ``LOOKUP_FOLLOW``, because the latter only
+relates to restricting the following of trailing symlinks.
+
+``LOOKUP_NO_MAGICLINKS`` blocks all magic-link traversals. Filesystems must
+ensure that they return errors from ``nd_jump_link()``, because that is how
+``LOOKUP_NO_MAGICLINKS`` and other magic-link restrictions are implemented.
+
+``LOOKUP_NO_XDEV`` blocks all ``vfsmount`` traversals (this includes both
+bind-mounts and ordinary mounts). Note that the ``vfsmount`` which contains the
+lookup is determined by the first mountpoint the path lookup reaches --
+absolute paths start with the ``vfsmount`` of ``/``, and relative paths start
+with the ``dfd``'s ``vfsmount``. Magic-links are only permitted if the
+``vfsmount`` of the path is unchanged.
+
+``LOOKUP_BENEATH`` blocks any path components which resolve outside the
+starting point of the resolution. This is done by blocking ``nd_jump_root()``
+as well as blocking ".." if it would jump outside the starting point.
+``rename_lock`` and ``mount_lock`` are used to detect attacks against the
+resolution of "..". Magic-links are also blocked.
+
+``LOOKUP_IN_ROOT`` resolves all path components as though the starting point
+were the filesystem root. ``nd_jump_root()`` brings the resolution back to to
+the starting point, and ".." at the starting point will act as a no-op. As with
+``LOOKUP_BENEATH``, ``rename_lock`` and ``mount_lock`` are used to detect
+attacks against ".." resolution. Magic-links are also blocked.
+
 Final-component flags
 ~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/Documentation/filesystems/vfat.rst b/Documentation/filesystems/vfat.rst
new file mode 100644
index 0000000..e85d74e
--- /dev/null
+++ b/Documentation/filesystems/vfat.rst
@@ -0,0 +1,387 @@
+====
+VFAT
+====
+
+USING VFAT
+==========
+
+To use the vfat filesystem, use the filesystem type 'vfat'.  i.e.::
+
+  mount -t vfat /dev/fd0 /mnt
+
+
+No special partition formatter is required,
+'mkdosfs' will work fine if you want to format from within Linux.
+
+VFAT MOUNT OPTIONS
+==================
+
+**uid=###**
+	Set the owner of all files on this filesystem.
+	The default is the uid of current process.
+
+**gid=###**
+	Set the group of all files on this filesystem.
+	The default is the gid of current process.
+
+**umask=###**
+	The permission mask (for files and directories, see *umask(1)*).
+	The default is the umask of current process.
+
+**dmask=###**
+	The permission mask for the directory.
+	The default is the umask of current process.
+
+**fmask=###**
+	The permission mask for files.
+	The default is the umask of current process.
+
+**allow_utime=###**
+	This option controls the permission check of mtime/atime.
+
+		**-20**: If current process is in group of file's group ID,
+                you can change timestamp.
+
+		**-2**: Other users can change timestamp.
+
+	The default is set from dmask option. If the directory is
+	writable, utime(2) is also allowed. i.e. ~dmask & 022.
+
+	Normally utime(2) checks current process is owner of
+	the file, or it has CAP_FOWNER capability. But FAT
+	filesystem doesn't have uid/gid on disk, so normal
+	check is too unflexible. With this option you can
+	relax it.
+
+**codepage=###**
+	Sets the codepage number for converting to shortname
+	characters on FAT filesystem.
+	By default, FAT_DEFAULT_CODEPAGE setting is used.
+
+**iocharset=<name>**
+	Character set to use for converting between the
+	encoding is used for user visible filename and 16 bit
+	Unicode characters. Long filenames are stored on disk
+	in Unicode format, but Unix for the most part doesn't
+	know how to deal with Unicode.
+	By default, FAT_DEFAULT_IOCHARSET setting is used.
+
+	There is also an option of doing UTF-8 translations
+	with the utf8 option.
+
+.. note:: ``iocharset=utf8`` is not recommended. If unsure, you should consider
+	  the utf8 option instead.
+
+**utf8=<bool>**
+	UTF-8 is the filesystem safe version of Unicode that
+	is used by the console. It can be enabled or disabled
+	for the filesystem with this option.
+	If 'uni_xlate' gets set, UTF-8 gets disabled.
+	By default, FAT_DEFAULT_UTF8 setting is used.
+
+**uni_xlate=<bool>**
+	Translate unhandled Unicode characters to special
+	escaped sequences.  This would let you backup and
+	restore filenames that are created with any Unicode
+	characters.  Until Linux supports Unicode for real,
+	this gives you an alternative.  Without this option,
+	a '?' is used when no translation is possible.  The
+	escape character is ':' because it is otherwise
+	illegal on the vfat filesystem.  The escape sequence
+	that gets used is ':' and the four digits of hexadecimal
+	unicode.
+
+**nonumtail=<bool>**
+	When creating 8.3 aliases, normally the alias will
+	end in '~1' or tilde followed by some number.  If this
+	option is set, then if the filename is
+	"longfilename.txt" and "longfile.txt" does not
+	currently exist in the directory, longfile.txt will
+	be the short alias instead of longfi~1.txt.
+
+**usefree**
+	Use the "free clusters" value stored on FSINFO. It will
+	be used to determine number of free clusters without
+	scanning disk. But it's not used by default, because
+	recent Windows don't update it correctly in some
+	case. If you are sure the "free clusters" on FSINFO is
+	correct, by this option you can avoid scanning disk.
+
+**quiet**
+	Stops printing certain warning messages.
+
+**check=s|r|n**
+	Case sensitivity checking setting.
+
+	**s**: strict, case sensitive
+
+	**r**: relaxed, case insensitive
+
+	**n**: normal, default setting, currently case insensitive
+
+**nocase**
+	This was deprecated for vfat. Use ``shortname=win95`` instead.
+
+**shortname=lower|win95|winnt|mixed**
+	Shortname display/create setting.
+
+	**lower**: convert to lowercase for display,
+	emulate the Windows 95 rule for create.
+
+	**win95**: emulate the Windows 95 rule for display/create.
+
+	**winnt**: emulate the Windows NT rule for display/create.
+
+	**mixed**: emulate the Windows NT rule for display,
+	emulate the Windows 95 rule for create.
+
+	Default setting is `mixed`.
+
+**tz=UTC**
+	Interpret timestamps as UTC rather than local time.
+	This option disables the conversion of timestamps
+	between local time (as used by Windows on FAT) and UTC
+	(which Linux uses internally).  This is particularly
+	useful when mounting devices (like digital cameras)
+	that are set to UTC in order to avoid the pitfalls of
+	local time.
+
+**time_offset=minutes**
+	Set offset for conversion of timestamps from local time
+	used by FAT to UTC. I.e. <minutes> minutes will be subtracted
+	from each timestamp to convert it to UTC used internally by
+	Linux. This is useful when time zone set in ``sys_tz`` is
+	not the time zone used by the filesystem. Note that this
+	option still does not provide correct time stamps in all
+	cases in presence of DST - time stamps in a different DST
+	setting will be off by one hour.
+
+**showexec**
+	If set, the execute permission bits of the file will be
+	allowed only if the extension part of the name is .EXE,
+	.COM, or .BAT. Not set by default.
+
+**debug**
+	Can be set, but unused by the current implementation.
+
+**sys_immutable**
+	If set, ATTR_SYS attribute on FAT is handled as
+	IMMUTABLE flag on Linux. Not set by default.
+
+**flush**
+	If set, the filesystem will try to flush to disk more
+	early than normal. Not set by default.
+
+**rodir**
+	FAT has the ATTR_RO (read-only) attribute. On Windows,
+	the ATTR_RO of the directory will just be ignored,
+	and is used only by applications as a flag (e.g. it's set
+	for the customized folder).
+
+	If you want to use ATTR_RO as read-only flag even for
+	the directory, set this option.
+
+**errors=panic|continue|remount-ro**
+	specify FAT behavior on critical errors: panic, continue
+	without doing anything or remount the partition in
+	read-only mode (default behavior).
+
+**discard**
+	If set, issues discard/TRIM commands to the block
+	device when blocks are freed. This is useful for SSD devices
+	and sparse/thinly-provisoned LUNs.
+
+**nfs=stale_rw|nostale_ro**
+	Enable this only if you want to export the FAT filesystem
+	over NFS.
+
+		**stale_rw**: This option maintains an index (cache) of directory
+		*inodes* by *i_logstart* which is used by the nfs-related code to
+		improve look-ups. Full file operations (read/write) over NFS is
+		supported but with cache eviction at NFS server, this could
+		result in ESTALE issues.
+
+		**nostale_ro**: This option bases the *inode* number and filehandle
+		on the on-disk location of a file in the MS-DOS directory entry.
+		This ensures that ESTALE will not be returned after a file is
+		evicted from the inode cache. However, it means that operations
+		such as rename, create and unlink could cause filehandles that
+		previously pointed at one file to point at a different file,
+		potentially causing data corruption. For this reason, this
+		option also mounts the filesystem readonly.
+
+	To maintain backward compatibility, ``'-o nfs'`` is also accepted,
+	defaulting to "stale_rw".
+
+**dos1xfloppy  <bool>: 0,1,yes,no,true,false**
+	If set, use a fallback default BIOS Parameter Block
+	configuration, determined by backing device size. These static
+	parameters match defaults assumed by DOS 1.x for 160 kiB,
+	180 kiB, 320 kiB, and 360 kiB floppies and floppy images.
+
+
+
+LIMITATION
+==========
+
+The fallocated region of file is discarded at umount/evict time
+when using fallocate with FALLOC_FL_KEEP_SIZE.
+So, User should assume that fallocated region can be discarded at
+last close if there is memory pressure resulting in eviction of
+the inode from the memory. As a result, for any dependency on
+the fallocated region, user should make sure to recheck fallocate
+after reopening the file.
+
+TODO
+====
+Need to get rid of the raw scanning stuff.  Instead, always use
+a get next directory entry approach.  The only thing left that uses
+raw scanning is the directory renaming code.
+
+
+POSSIBLE PROBLEMS
+=================
+
+- vfat_valid_longname does not properly checked reserved names.
+- When a volume name is the same as a directory name in the root
+  directory of the filesystem, the directory name sometimes shows
+  up as an empty file.
+- autoconv option does not work correctly.
+
+
+TEST SUITE
+==========
+If you plan to make any modifications to the vfat filesystem, please
+get the test suite that comes with the vfat distribution at
+
+`<http://web.archive.org/web/*/http://bmrc.berkeley.edu/people/chaffee/vfat.html>`_
+
+This tests quite a few parts of the vfat filesystem and additional
+tests for new features or untested features would be appreciated.
+
+NOTES ON THE STRUCTURE OF THE VFAT FILESYSTEM
+=============================================
+This documentation was provided by Galen C. Hunt gchunt@cs.rochester.edu and
+lightly annotated by Gordon Chaffee.
+
+This document presents a very rough, technical overview of my
+knowledge of the extended FAT file system used in Windows NT 3.5 and
+Windows 95.  I don't guarantee that any of the following is correct,
+but it appears to be so.
+
+The extended FAT file system is almost identical to the FAT
+file system used in DOS versions up to and including *6.223410239847*
+:-).  The significant change has been the addition of long file names.
+These names support up to 255 characters including spaces and lower
+case characters as opposed to the traditional 8.3 short names.
+
+Here is the description of the traditional FAT entry in the current
+Windows 95 filesystem::
+
+        struct directory { // Short 8.3 names
+                unsigned char name[8];          // file name
+                unsigned char ext[3];           // file extension
+                unsigned char attr;             // attribute byte
+		unsigned char lcase;		// Case for base and extension
+		unsigned char ctime_ms;		// Creation time, milliseconds
+		unsigned char ctime[2];		// Creation time
+		unsigned char cdate[2];		// Creation date
+		unsigned char adate[2];		// Last access date
+		unsigned char reserved[2];	// reserved values (ignored)
+                unsigned char time[2];          // time stamp
+                unsigned char date[2];          // date stamp
+                unsigned char start[2];         // starting cluster number
+                unsigned char size[4];          // size of the file
+        };
+
+
+The lcase field specifies if the base and/or the extension of an 8.3
+name should be capitalized.  This field does not seem to be used by
+Windows 95 but it is used by Windows NT.  The case of filenames is not
+completely compatible from Windows NT to Windows 95.  It is not completely
+compatible in the reverse direction, however.  Filenames that fit in
+the 8.3 namespace and are written on Windows NT to be lowercase will
+show up as uppercase on Windows 95.
+
+.. note:: Note that the ``start`` and ``size`` values are actually little
+          endian integer values.  The descriptions of the fields in this
+          structure are public knowledge and can be found elsewhere.
+
+With the extended FAT system, Microsoft has inserted extra
+directory entries for any files with extended names.  (Any name which
+legally fits within the old 8.3 encoding scheme does not have extra
+entries.)  I call these extra entries slots.  Basically, a slot is a
+specially formatted directory entry which holds up to 13 characters of
+a file's extended name.  Think of slots as additional labeling for the
+directory entry of the file to which they correspond.  Microsoft
+prefers to refer to the 8.3 entry for a file as its alias and the
+extended slot directory entries as the file name.
+
+The C structure for a slot directory entry follows::
+
+        struct slot { // Up to 13 characters of a long name
+                unsigned char id;               // sequence number for slot
+                unsigned char name0_4[10];      // first 5 characters in name
+                unsigned char attr;             // attribute byte
+                unsigned char reserved;         // always 0
+                unsigned char alias_checksum;   // checksum for 8.3 alias
+                unsigned char name5_10[12];     // 6 more characters in name
+                unsigned char start[2];         // starting cluster number
+                unsigned char name11_12[4];     // last 2 characters in name
+        };
+
+
+If the layout of the slots looks a little odd, it's only
+because of Microsoft's efforts to maintain compatibility with old
+software.  The slots must be disguised to prevent old software from
+panicking.  To this end, a number of measures are taken:
+
+        1) The attribute byte for a slot directory entry is always set
+           to 0x0f.  This corresponds to an old directory entry with
+           attributes of "hidden", "system", "read-only", and "volume
+           label".  Most old software will ignore any directory
+           entries with the "volume label" bit set.  Real volume label
+           entries don't have the other three bits set.
+
+        2) The starting cluster is always set to 0, an impossible
+           value for a DOS file.
+
+Because the extended FAT system is backward compatible, it is
+possible for old software to modify directory entries.  Measures must
+be taken to ensure the validity of slots.  An extended FAT system can
+verify that a slot does in fact belong to an 8.3 directory entry by
+the following:
+
+        1) Positioning.  Slots for a file always immediately proceed
+           their corresponding 8.3 directory entry.  In addition, each
+           slot has an id which marks its order in the extended file
+           name.  Here is a very abbreviated view of an 8.3 directory
+           entry and its corresponding long name slots for the file
+           "My Big File.Extension which is long"::
+
+                <proceeding files...>
+                <slot #3, id = 0x43, characters = "h is long">
+                <slot #2, id = 0x02, characters = "xtension whic">
+                <slot #1, id = 0x01, characters = "My Big File.E">
+                <directory entry, name = "MYBIGFIL.EXT">
+
+
+           .. note:: Note that the slots are stored from last to first.  Slots
+		     are numbered from 1 to N.  The Nth slot is ``or'ed`` with
+		     0x40 to mark it as the last one.
+
+        2) Checksum.  Each slot has an alias_checksum value.  The
+           checksum is calculated from the 8.3 name using the
+           following algorithm::
+
+                for (sum = i = 0; i < 11; i++) {
+                        sum = (((sum&1)<<7)|((sum&0xfe)>>1)) + name[i]
+                }
+
+
+	3) If there is free space in the final slot, a Unicode ``NULL (0x0000)``
+	   is stored after the final character.  After that, all unused
+	   characters in the final slot are set to Unicode 0xFFFF.
+
+Finally, note that the extended name is stored in Unicode.  Each Unicode
+character takes either two or four bytes, UTF-16LE encoded.
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
deleted file mode 100644
index 9103129..0000000
--- a/Documentation/filesystems/vfat.txt
+++ /dev/null
@@ -1,347 +0,0 @@
-USING VFAT
-----------------------------------------------------------------------
-To use the vfat filesystem, use the filesystem type 'vfat'.  i.e.
-  mount -t vfat /dev/fd0 /mnt
-
-No special partition formatter is required.  mkdosfs will work fine
-if you want to format from within Linux.
-
-VFAT MOUNT OPTIONS
-----------------------------------------------------------------------
-uid=###       -- Set the owner of all files on this filesystem.
-		 The default is the uid of current process.
-
-gid=###       -- Set the group of all files on this filesystem.
-		 The default is the gid of current process.
-
-umask=###     -- The permission mask (for files and directories, see umask(1)).
-                 The default is the umask of current process.
-
-dmask=###     -- The permission mask for the directory.
-                 The default is the umask of current process.
-
-fmask=###     -- The permission mask for files.
-                 The default is the umask of current process.
-
-allow_utime=### -- This option controls the permission check of mtime/atime.
-
-                  20 - If current process is in group of file's group ID,
-                       you can change timestamp.
-                   2 - Other users can change timestamp.
-
-                 The default is set from `dmask' option. (If the directory is
-                 writable, utime(2) is also allowed. I.e. ~dmask & 022)
-
-                 Normally utime(2) checks current process is owner of
-                 the file, or it has CAP_FOWNER capability.  But FAT
-                 filesystem doesn't have uid/gid on disk, so normal
-                 check is too unflexible. With this option you can
-                 relax it.
-
-codepage=###  -- Sets the codepage number for converting to shortname
-		 characters on FAT filesystem.
-		 By default, FAT_DEFAULT_CODEPAGE setting is used.
-
-iocharset=<name> -- Character set to use for converting between the
-		 encoding is used for user visible filename and 16 bit
-		 Unicode characters. Long filenames are stored on disk
-		 in Unicode format, but Unix for the most part doesn't
-		 know how to deal with Unicode.
-		 By default, FAT_DEFAULT_IOCHARSET setting is used.
-
-		 There is also an option of doing UTF-8 translations
-		 with the utf8 option.
-
-		 NOTE: "iocharset=utf8" is not recommended. If unsure,
-		 you should consider the following option instead.
-
-utf8=<bool>   -- UTF-8 is the filesystem safe version of Unicode that
-		 is used by the console. It can be enabled or disabled
-		 for the filesystem with this option.
-		 If 'uni_xlate' gets set, UTF-8 gets disabled.
-		 By default, FAT_DEFAULT_UTF8 setting is used.
-
-uni_xlate=<bool> -- Translate unhandled Unicode characters to special
-		 escaped sequences.  This would let you backup and
-		 restore filenames that are created with any Unicode
-		 characters.  Until Linux supports Unicode for real,
-		 this gives you an alternative.  Without this option,
-		 a '?' is used when no translation is possible.  The
-		 escape character is ':' because it is otherwise
-		 illegal on the vfat filesystem.  The escape sequence
-		 that gets used is ':' and the four digits of hexadecimal
-		 unicode.
-
-nonumtail=<bool> -- When creating 8.3 aliases, normally the alias will
-                 end in '~1' or tilde followed by some number.  If this
-                 option is set, then if the filename is 
-                 "longfilename.txt" and "longfile.txt" does not
-                 currently exist in the directory, 'longfile.txt' will
-                 be the short alias instead of 'longfi~1.txt'. 
-                  
-usefree       -- Use the "free clusters" value stored on FSINFO. It'll
-                 be used to determine number of free clusters without
-                 scanning disk. But it's not used by default, because
-                 recent Windows don't update it correctly in some
-                 case. If you are sure the "free clusters" on FSINFO is
-                 correct, by this option you can avoid scanning disk.
-
-quiet         -- Stops printing certain warning messages.
-
-check=s|r|n   -- Case sensitivity checking setting.
-                 s: strict, case sensitive
-                 r: relaxed, case insensitive
-                 n: normal, default setting, currently case insensitive
-
-nocase        -- This was deprecated for vfat. Use shortname=win95 instead.
-
-shortname=lower|win95|winnt|mixed
-	      -- Shortname display/create setting.
-		 lower: convert to lowercase for display,
-			emulate the Windows 95 rule for create.
-		 win95: emulate the Windows 95 rule for display/create.
-		 winnt: emulate the Windows NT rule for display/create.
-		 mixed: emulate the Windows NT rule for display,
-			emulate the Windows 95 rule for create.
-		 Default setting is `mixed'.
-
-tz=UTC        -- Interpret timestamps as UTC rather than local time.
-                 This option disables the conversion of timestamps
-                 between local time (as used by Windows on FAT) and UTC
-                 (which Linux uses internally).  This is particularly
-                 useful when mounting devices (like digital cameras)
-                 that are set to UTC in order to avoid the pitfalls of
-                 local time.
-time_offset=minutes
-	      -- Set offset for conversion of timestamps from local time
-		 used by FAT to UTC. I.e. <minutes> minutes will be subtracted
-		 from each timestamp to convert it to UTC used internally by
-		 Linux. This is useful when time zone set in sys_tz is
-		 not the time zone used by the filesystem. Note that this
-		 option still does not provide correct time stamps in all
-		 cases in presence of DST - time stamps in a different DST
-		 setting will be off by one hour.
-
-showexec      -- If set, the execute permission bits of the file will be
-		 allowed only if the extension part of the name is .EXE,
-		 .COM, or .BAT. Not set by default.
-
-debug         -- Can be set, but unused by the current implementation.
-
-sys_immutable -- If set, ATTR_SYS attribute on FAT is handled as
-		 IMMUTABLE flag on Linux. Not set by default.
-
-flush         -- If set, the filesystem will try to flush to disk more
-		 early than normal. Not set by default.
-
-rodir	      -- FAT has the ATTR_RO (read-only) attribute. On Windows,
-		 the ATTR_RO of the directory will just be ignored,
-		 and is used only by applications as a flag (e.g. it's set
-		 for the customized folder).
-
-		 If you want to use ATTR_RO as read-only flag even for
-		 the directory, set this option.
-
-errors=panic|continue|remount-ro
-	      -- specify FAT behavior on critical errors: panic, continue
-		 without doing anything or remount the partition in
-		 read-only mode (default behavior).
-
-discard       -- If set, issues discard/TRIM commands to the block
-		 device when blocks are freed. This is useful for SSD devices
-		 and sparse/thinly-provisoned LUNs.
-
-nfs=stale_rw|nostale_ro
-		Enable this only if you want to export the FAT filesystem
-		over NFS.
-
-		stale_rw: This option maintains an index (cache) of directory
-		inodes by i_logstart which is used by the nfs-related code to
-		improve look-ups. Full file operations (read/write) over NFS is
-		supported but with cache eviction at NFS server, this could
-		result in ESTALE issues.
-
-		nostale_ro: This option bases the inode number and filehandle
-		on the on-disk location of a file in the MS-DOS directory entry.
-		This ensures that ESTALE will not be returned after a file is
-		evicted from the inode cache. However, it means that operations
-		such as rename, create and unlink could cause filehandles that
-		previously pointed at one file to point at a different file,
-		potentially causing data corruption. For this reason, this
-		option also mounts the filesystem readonly.
-
-		To maintain backward compatibility, '-o nfs' is also accepted,
-		defaulting to stale_rw
-
-dos1xfloppy  -- If set, use a fallback default BIOS Parameter Block
-		configuration, determined by backing device size. These static
-		parameters match defaults assumed by DOS 1.x for 160 kiB,
-		180 kiB, 320 kiB, and 360 kiB floppies and floppy images.
-
-
-<bool>: 0,1,yes,no,true,false
-
-LIMITATION
----------------------------------------------------------------------
-* The fallocated region of file is discarded at umount/evict time
-  when using fallocate with FALLOC_FL_KEEP_SIZE.
-  So, User should assume that fallocated region can be discarded at
-  last close if there is memory pressure resulting in eviction of
-  the inode from the memory. As a result, for any dependency on
-  the fallocated region, user should make sure to recheck fallocate
-  after reopening the file.
-
-TODO
-----------------------------------------------------------------------
-* Need to get rid of the raw scanning stuff.  Instead, always use
-  a get next directory entry approach.  The only thing left that uses
-  raw scanning is the directory renaming code.
-
-
-POSSIBLE PROBLEMS
-----------------------------------------------------------------------
-* vfat_valid_longname does not properly checked reserved names.
-* When a volume name is the same as a directory name in the root
-  directory of the filesystem, the directory name sometimes shows
-  up as an empty file.
-* autoconv option does not work correctly.
-
-BUG REPORTS
-----------------------------------------------------------------------
-If you have trouble with the VFAT filesystem, mail bug reports to
-chaffee@bmrc.cs.berkeley.edu.  Please specify the filename
-and the operation that gave you trouble.
-
-TEST SUITE
-----------------------------------------------------------------------
-If you plan to make any modifications to the vfat filesystem, please
-get the test suite that comes with the vfat distribution at
-
-  http://web.archive.org/web/*/http://bmrc.berkeley.edu/
-  people/chaffee/vfat.html
-
-This tests quite a few parts of the vfat filesystem and additional
-tests for new features or untested features would be appreciated.
-
-NOTES ON THE STRUCTURE OF THE VFAT FILESYSTEM
-----------------------------------------------------------------------
-(This documentation was provided by Galen C. Hunt <gchunt@cs.rochester.edu>
- and lightly annotated by Gordon Chaffee).
-
-This document presents a very rough, technical overview of my
-knowledge of the extended FAT file system used in Windows NT 3.5 and
-Windows 95.  I don't guarantee that any of the following is correct,
-but it appears to be so.
-
-The extended FAT file system is almost identical to the FAT
-file system used in DOS versions up to and including 6.223410239847
-:-).  The significant change has been the addition of long file names.
-These names support up to 255 characters including spaces and lower
-case characters as opposed to the traditional 8.3 short names.
-
-Here is the description of the traditional FAT entry in the current
-Windows 95 filesystem:
-
-        struct directory { // Short 8.3 names 
-                unsigned char name[8];          // file name 
-                unsigned char ext[3];           // file extension 
-                unsigned char attr;             // attribute byte 
-		unsigned char lcase;		// Case for base and extension
-		unsigned char ctime_ms;		// Creation time, milliseconds
-		unsigned char ctime[2];		// Creation time
-		unsigned char cdate[2];		// Creation date
-		unsigned char adate[2];		// Last access date
-		unsigned char reserved[2];	// reserved values (ignored) 
-                unsigned char time[2];          // time stamp 
-                unsigned char date[2];          // date stamp 
-                unsigned char start[2];         // starting cluster number 
-                unsigned char size[4];          // size of the file 
-        };
-
-The lcase field specifies if the base and/or the extension of an 8.3
-name should be capitalized.  This field does not seem to be used by
-Windows 95 but it is used by Windows NT.  The case of filenames is not
-completely compatible from Windows NT to Windows 95.  It is not completely
-compatible in the reverse direction, however.  Filenames that fit in
-the 8.3 namespace and are written on Windows NT to be lowercase will
-show up as uppercase on Windows 95.
-
-Note that the "start" and "size" values are actually little
-endian integer values.  The descriptions of the fields in this
-structure are public knowledge and can be found elsewhere.
-
-With the extended FAT system, Microsoft has inserted extra
-directory entries for any files with extended names.  (Any name which
-legally fits within the old 8.3 encoding scheme does not have extra
-entries.)  I call these extra entries slots.  Basically, a slot is a
-specially formatted directory entry which holds up to 13 characters of
-a file's extended name.  Think of slots as additional labeling for the
-directory entry of the file to which they correspond.  Microsoft
-prefers to refer to the 8.3 entry for a file as its alias and the
-extended slot directory entries as the file name. 
-
-The C structure for a slot directory entry follows:
-
-        struct slot { // Up to 13 characters of a long name 
-                unsigned char id;               // sequence number for slot 
-                unsigned char name0_4[10];      // first 5 characters in name 
-                unsigned char attr;             // attribute byte
-                unsigned char reserved;         // always 0 
-                unsigned char alias_checksum;   // checksum for 8.3 alias 
-                unsigned char name5_10[12];     // 6 more characters in name
-                unsigned char start[2];         // starting cluster number
-                unsigned char name11_12[4];     // last 2 characters in name
-        };
-
-If the layout of the slots looks a little odd, it's only
-because of Microsoft's efforts to maintain compatibility with old
-software.  The slots must be disguised to prevent old software from
-panicking.  To this end, a number of measures are taken:
-
-        1) The attribute byte for a slot directory entry is always set
-           to 0x0f.  This corresponds to an old directory entry with
-           attributes of "hidden", "system", "read-only", and "volume
-           label".  Most old software will ignore any directory
-           entries with the "volume label" bit set.  Real volume label
-           entries don't have the other three bits set.
-
-        2) The starting cluster is always set to 0, an impossible
-           value for a DOS file.
-
-Because the extended FAT system is backward compatible, it is
-possible for old software to modify directory entries.  Measures must
-be taken to ensure the validity of slots.  An extended FAT system can
-verify that a slot does in fact belong to an 8.3 directory entry by
-the following:
-
-        1) Positioning.  Slots for a file always immediately proceed
-           their corresponding 8.3 directory entry.  In addition, each
-           slot has an id which marks its order in the extended file
-           name.  Here is a very abbreviated view of an 8.3 directory
-           entry and its corresponding long name slots for the file
-           "My Big File.Extension which is long":
-
-                <proceeding files...>
-                <slot #3, id = 0x43, characters = "h is long">
-                <slot #2, id = 0x02, characters = "xtension whic">
-                <slot #1, id = 0x01, characters = "My Big File.E">
-                <directory entry, name = "MYBIGFIL.EXT">
-
-           Note that the slots are stored from last to first.  Slots
-           are numbered from 1 to N.  The Nth slot is or'ed with 0x40
-           to mark it as the last one.
-
-        2) Checksum.  Each slot has an "alias_checksum" value.  The
-           checksum is calculated from the 8.3 name using the
-           following algorithm:
-
-                for (sum = i = 0; i < 11; i++) {
-                        sum = (((sum&1)<<7)|((sum&0xfe)>>1)) + name[i]
-                }
-
-	3) If there is free space in the final slot, a Unicode NULL (0x0000) 
-	   is stored after the final character.  After that, all unused 
-	   characters in the final slot are set to Unicode 0xFFFF.
-
-Finally, note that the extended name is stored in Unicode.  Each Unicode
-character takes either two or four bytes, UTF-16LE encoded.
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 966bd2d..a733205 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -24,9 +24,9 @@
 At the core of every DRM driver is a :c:type:`struct drm_driver
 <drm_driver>` structure. Drivers typically statically initialize
 a drm_driver structure, and then pass it to
-:c:func:`drm_dev_alloc()` to allocate a device instance. After the
+drm_dev_alloc() to allocate a device instance. After the
 device instance is fully initialized it can be registered (which makes
-it accessible from userspace) using :c:func:`drm_dev_register()`.
+it accessible from userspace) using drm_dev_register().
 
 The :c:type:`struct drm_driver <drm_driver>` structure
 contains static information that describes the driver and features it
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 23a3c98..906771e 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -3,7 +3,7 @@
 =========================
 
 Drivers must initialize the mode setting core by calling
-:c:func:`drm_mode_config_init()` on the DRM device. The function
+drm_mode_config_init() on the DRM device. The function
 initializes the :c:type:`struct drm_device <drm_device>`
 mode_config field and never fails. Once done, mode configuration must
 be setup by initializing the following fields.
@@ -181,8 +181,7 @@
 directly instantiated on each object, but free-standing mode objects themselves,
 represented by :c:type:`struct drm_property <drm_property>`, which only specify
 the type and value range of a property. Any given property can be attached
-multiple times to different objects using :c:func:`drm_object_attach_property()
-<drm_object_attach_property>`.
+multiple times to different objects using drm_object_attach_property().
 
 .. kernel-doc:: include/drm/drm_mode_object.h
    :internal:
@@ -260,7 +259,8 @@
   drm_connector_state <drm_connector_state>` for connectors. These are the only
   objects with userspace-visible and settable state. For internal state drivers
   can subclass these structures through embeddeding, or add entirely new state
-  structures for their globally shared hardware functions.
+  structures for their globally shared hardware functions, see :c:type:`struct
+  drm_private_state<drm_private_state>`.
 
 - An atomic update is assembled and validated as an entirely free-standing pile
   of structures within the :c:type:`drm_atomic_state <drm_atomic_state>`
@@ -269,6 +269,14 @@
   to the driver and modeset objects. This way rolling back an update boils down
   to releasing memory and unreferencing objects like framebuffers.
 
+Locking of atomic state structures is internally using :c:type:`struct
+drm_modeset_lock <drm_modeset_lock>`. As a general rule the locking shouldn't be
+exposed to drivers, instead the right locks should be automatically acquired by
+any function that duplicates or peeks into a state, like e.g.
+drm_atomic_get_crtc_state().  Locking only protects the software data
+structure, ordering of committing state changes to hardware is sequenced using
+:c:type:`struct drm_crtc_commit <drm_crtc_commit>`.
+
 Read on in this chapter, and also in :ref:`drm_atomic_helper` for more detailed
 coverage of specific topics.
 
@@ -479,6 +487,9 @@
 .. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
    :export:
 
+.. kernel-doc:: include/drm/drm_color_mgmt.h
+   :internal:
+
 Tile Group Property
 -------------------
 
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 5961929..c77b326 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -149,19 +149,19 @@
 To create a GEM object, a driver allocates memory for an instance of its
 specific GEM object type and initializes the embedded struct
 :c:type:`struct drm_gem_object <drm_gem_object>` with a call
-to :c:func:`drm_gem_object_init()`. The function takes a pointer
+to drm_gem_object_init(). The function takes a pointer
 to the DRM device, a pointer to the GEM object and the buffer object
 size in bytes.
 
 GEM uses shmem to allocate anonymous pageable memory.
-:c:func:`drm_gem_object_init()` will create an shmfs file of the
+drm_gem_object_init() will create an shmfs file of the
 requested size and store it into the struct :c:type:`struct
 drm_gem_object <drm_gem_object>` filp field. The memory is
 used as either main storage for the object when the graphics hardware
 uses system memory directly or as a backing store otherwise.
 
 Drivers are responsible for the actual physical pages allocation by
-calling :c:func:`shmem_read_mapping_page_gfp()` for each page.
+calling shmem_read_mapping_page_gfp() for each page.
 Note that they can decide to allocate pages when initializing the GEM
 object, or to delay allocation until the memory is needed (for instance
 when a page fault occurs as a result of a userspace memory access or
@@ -170,20 +170,18 @@
 Anonymous pageable memory allocation is not always desired, for instance
 when the hardware requires physically contiguous system memory as is
 often the case in embedded devices. Drivers can create GEM objects with
-no shmfs backing (called private GEM objects) by initializing them with
-a call to :c:func:`drm_gem_private_object_init()` instead of
-:c:func:`drm_gem_object_init()`. Storage for private GEM objects
-must be managed by drivers.
+no shmfs backing (called private GEM objects) by initializing them with a call
+to drm_gem_private_object_init() instead of drm_gem_object_init(). Storage for
+private GEM objects must be managed by drivers.
 
 GEM Objects Lifetime
 --------------------
 
 All GEM objects are reference-counted by the GEM core. References can be
-acquired and release by :c:func:`calling drm_gem_object_get()` and
-:c:func:`drm_gem_object_put()` respectively. The caller must hold the
-:c:type:`struct drm_device <drm_device>` struct_mutex lock when calling
-:c:func:`drm_gem_object_get()`. As a convenience, GEM provides
-:c:func:`drm_gem_object_put_unlocked()` functions that can be called without
+acquired and release by calling drm_gem_object_get() and drm_gem_object_put()
+respectively. The caller must hold the :c:type:`struct drm_device <drm_device>`
+struct_mutex lock when calling drm_gem_object_get(). As a convenience, GEM
+provides drm_gem_object_put_unlocked() functions that can be called without
 holding the lock.
 
 When the last reference to a GEM object is released the GEM core calls
@@ -194,7 +192,7 @@
 void (\*gem_free_object) (struct drm_gem_object \*obj); Drivers are
 responsible for freeing all GEM object resources. This includes the
 resources created by the GEM core, which need to be released with
-:c:func:`drm_gem_object_release()`.
+drm_gem_object_release().
 
 GEM Objects Naming
 ------------------
@@ -210,13 +208,11 @@
 DRM file handle frees all its GEM handles and dereferences the
 associated GEM objects.
 
-To create a handle for a GEM object drivers call
-:c:func:`drm_gem_handle_create()`. The function takes a pointer
-to the DRM file and the GEM object and returns a locally unique handle.
-When the handle is no longer needed drivers delete it with a call to
-:c:func:`drm_gem_handle_delete()`. Finally the GEM object
-associated with a handle can be retrieved by a call to
-:c:func:`drm_gem_object_lookup()`.
+To create a handle for a GEM object drivers call drm_gem_handle_create(). The
+function takes a pointer to the DRM file and the GEM object and returns a
+locally unique handle.  When the handle is no longer needed drivers delete it
+with a call to drm_gem_handle_delete(). Finally the GEM object associated with a
+handle can be retrieved by a call to drm_gem_object_lookup().
 
 Handles don't take ownership of GEM objects, they only take a reference
 to the object that will be dropped when the handle is destroyed. To
@@ -258,7 +254,7 @@
 don't have their own file handle. Two alternative methods currently
 co-exist to map GEM objects to userspace. The first method uses a
 driver-specific ioctl to perform the mapping operation, calling
-:c:func:`do_mmap()` under the hood. This is often considered
+do_mmap() under the hood. This is often considered
 dubious, seems to be discouraged for new GEM-enabled drivers, and will
 thus not be described here.
 
@@ -267,23 +263,22 @@
 offset); DRM identifies the GEM object to be mapped by a fake offset
 passed through the mmap offset argument. Prior to being mapped, a GEM
 object must thus be associated with a fake offset. To do so, drivers
-must call :c:func:`drm_gem_create_mmap_offset()` on the object.
+must call drm_gem_create_mmap_offset() on the object.
 
 Once allocated, the fake offset value must be passed to the application
 in a driver-specific way and can then be used as the mmap offset
 argument.
 
-The GEM core provides a helper method :c:func:`drm_gem_mmap()` to
+The GEM core provides a helper method drm_gem_mmap() to
 handle object mapping. The method can be set directly as the mmap file
 operation handler. It will look up the GEM object based on the offset
 value and set the VMA operations to the :c:type:`struct drm_driver
-<drm_driver>` gem_vm_ops field. Note that
-:c:func:`drm_gem_mmap()` doesn't map memory to userspace, but
-relies on the driver-provided fault handler to map pages individually.
+<drm_driver>` gem_vm_ops field. Note that drm_gem_mmap() doesn't map memory to
+userspace, but relies on the driver-provided fault handler to map pages
+individually.
 
-To use :c:func:`drm_gem_mmap()`, drivers must fill the struct
-:c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
-with a pointer to VM operations.
+To use drm_gem_mmap(), drivers must fill the struct :c:type:`struct drm_driver
+<drm_driver>` gem_vm_ops field with a pointer to VM operations.
 
 The VM operations is a :c:type:`struct vm_operations_struct <vm_operations_struct>`
 made up of several fields, the more interesting ones being:
@@ -298,9 +293,8 @@
 
 
 The open and close operations must update the GEM object reference
-count. Drivers can use the :c:func:`drm_gem_vm_open()` and
-:c:func:`drm_gem_vm_close()` helper functions directly as open
-and close handlers.
+count. Drivers can use the drm_gem_vm_open() and drm_gem_vm_close() helper
+functions directly as open and close handlers.
 
 The fault operation handler is responsible for mapping individual pages
 to userspace when a page fault occurs. Depending on the memory
@@ -312,12 +306,12 @@
 faults can implement their own mmap file operation handler.
 
 For platforms without MMU the GEM core provides a helper method
-:c:func:`drm_gem_cma_get_unmapped_area`. The mmap() routines will call
-this to get a proposed address for the mapping.
+drm_gem_cma_get_unmapped_area(). The mmap() routines will call this to get a
+proposed address for the mapping.
 
-To use :c:func:`drm_gem_cma_get_unmapped_area`, drivers must fill the
-struct :c:type:`struct file_operations <file_operations>` get_unmapped_area
-field with a pointer on :c:func:`drm_gem_cma_get_unmapped_area`.
+To use drm_gem_cma_get_unmapped_area(), drivers must fill the struct
+:c:type:`struct file_operations <file_operations>` get_unmapped_area field with
+a pointer on drm_gem_cma_get_unmapped_area().
 
 More detailed information about get_unmapped_area can be found in
 Documentation/nommu-mmap.txt
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 94f9052..56fec6e 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -254,36 +254,45 @@
 There's a collection of tests that aims to cover the whole functionality of
 DRM drivers and that can be used to check that changes to DRM drivers or the
 core don't regress existing functionality. This test suite is called IGT and
-its code can be found in https://cgit.freedesktop.org/drm/igt-gpu-tools/.
+its code and instructions to build and run can be found in
+https://gitlab.freedesktop.org/drm/igt-gpu-tools/.
 
-To build IGT, start by installing its build dependencies. In Debian-based
-systems::
+Using VKMS to test DRM API
+--------------------------
 
-	# apt-get build-dep intel-gpu-tools
+VKMS is a software-only model of a KMS driver that is useful for testing
+and for running compositors. VKMS aims to enable a virtual display without
+the need for a hardware display capability. These characteristics made VKMS
+a perfect tool for validating the DRM core behavior and also support the
+compositor developer. VKMS makes it possible to test DRM functions in a
+virtual machine without display, simplifying the validation of some of the
+core changes.
 
-And in Fedora-based systems::
+To Validate changes in DRM API with VKMS, start setting the kernel: make
+sure to enable VKMS module; compile the kernel with the VKMS enabled and
+install it in the target machine. VKMS can be run in a Virtual Machine
+(QEMU, virtme or similar). It's recommended the use of KVM with the minimum
+of 1GB of RAM and four cores.
 
-	# dnf builddep intel-gpu-tools
+It's possible to run the IGT-tests in a VM in two ways:
 
-Then clone the repository::
+	1. Use IGT inside a VM
+	2. Use IGT from the host machine and write the results in a shared directory.
 
-	$ git clone git://anongit.freedesktop.org/drm/igt-gpu-tools
+As follow, there is an example of using a VM with a shared directory with
+the host machine to run igt-tests. As an example it's used virtme::
 
-Configure the build system and start the build::
+	$ virtme-run --rwdir /path/for/shared_dir --kdir=path/for/kernel/directory --mods=auto
 
-	$ cd igt-gpu-tools && ./autogen.sh && make -j6
+Run the igt-tests in the guest machine, as example it's ran the 'kms_flip'
+tests::
 
-Download the piglit dependency::
+	$ /path/for/igt-gpu-tools/scripts/run-tests.sh -p -s -t "kms_flip.*" -v
 
-	$ ./scripts/run-tests.sh -d
-
-And run the tests::
-
-	$ ./scripts/run-tests.sh -t kms -t core -s
-
-run-tests.sh is a wrapper around piglit that will execute the tests matching
-the -t options. A report in HTML format will be available in
-./results/html/index.html. Results can be compared with piglit.
+In this example, instead of build the igt_runner, Piglit is used
+(-p option); it's created html summary of the tests results and it's saved
+in the folder "igt-gpu-tools/results"; it's executed only the igt-tests
+matching the -t option.
 
 Display CRC Support
 -------------------
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index d0947c5..e539c42 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -466,9 +466,6 @@
 .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
    :doc: GuC-based command submission
 
-.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
-   :internal:
-
 HuC
 ---
 .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_huc.c
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 6792fa9..bc869b2 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -142,14 +142,14 @@
 
 Level: Advanced
 
-Convert instances of dev_info/dev_err/dev_warn to their DRM_DEV_* equivalent
-----------------------------------------------------------------------------
+Convert logging to drm_* functions with drm_device paramater
+------------------------------------------------------------
 
 For drivers which could have multiple instances, it is necessary to
 differentiate between which is which in the logs. Since DRM_INFO/WARN/ERROR
 don't do this, drivers used dev_info/warn/err to make this differentiation. We
-now have DRM_DEV_* variants of the drm print macros, so we can start to convert
-those drivers back to using drm-formwatted specific log messages.
+now have drm_* variants of the drm print functions, so we can start to convert
+those drivers back to using drm-formatted specific log messages.
 
 Before you start this conversion please contact the relevant maintainers to make
 sure your work will be merged - not everyone agrees that the DRM dmesg macros
@@ -171,26 +171,43 @@
 
 Level: Intermediate
 
-Convert drivers to use drm_fb_helper_fbdev_setup/teardown()
------------------------------------------------------------
+Convert drivers to use drm_fbdev_generic_setup()
+------------------------------------------------
 
-Most drivers can use drm_fb_helper_fbdev_setup() except maybe:
-
-- amdgpu which has special logic to decide whether to call
-  drm_helper_disable_unused_functions()
-
-- armada which isn't atomic and doesn't call
-  drm_helper_disable_unused_functions()
-
-- i915 which calls drm_fb_helper_initial_config() in a worker
-
-Drivers that use drm_framebuffer_remove() to clean up the fbdev framebuffer can
-probably use drm_fb_helper_fbdev_teardown().
+Most drivers can use drm_fbdev_generic_setup(). Driver have to implement
+atomic modesetting and GEM vmap support. Current generic fbdev emulation
+expects the framebuffer in system memory (or system-like memory).
 
 Contact: Maintainer of the driver you plan to convert
 
 Level: Intermediate
 
+drm_framebuffer_funcs and drm_mode_config_funcs.fb_create cleanup
+-----------------------------------------------------------------
+
+A lot more drivers could be switched over to the drm_gem_framebuffer helpers.
+Various hold-ups:
+
+- Need to switch over to the generic dirty tracking code using
+  drm_atomic_helper_dirtyfb first (e.g. qxl).
+
+- Need to switch to drm_fbdev_generic_setup(), otherwise a lot of the custom fb
+  setup code can't be deleted.
+
+- Many drivers wrap drm_gem_fb_create() only to check for valid formats. For
+  atomic drivers we could check for valid formats by calling
+  drm_plane_check_pixel_format() against all planes, and pass if any plane
+  supports the format. For non-atomic that's not possible since like the format
+  list for the primary plane is fake and we'd therefor reject valid formats.
+
+- Many drivers subclass drm_framebuffer, we'd need a embedding compatible
+  version of the varios drm_gem_fb_create functions. Maybe called
+  drm_gem_fb_create/_with_dirty/_with_funcs as needed.
+
+Contact: Daniel Vetter
+
+Level: Intermediate
+
 Clean up mmap forwarding
 ------------------------
 
@@ -328,8 +345,8 @@
   these igt tests need to be fixed: kms_fbcon_fbt@psr and
   kms_fbcon_fbt@psr-suspend.
 
-- The max connector argument for drm_fb_helper_init() and
-  drm_fb_helper_fbdev_setup() isn't used anymore and can be removed.
+- The max connector argument for drm_fb_helper_init() isn't used anymore and
+  can be removed.
 
 - The helper doesn't keep an array of connectors anymore so these can be
   removed: drm_fb_helper_single_add_all_connectors(),
@@ -351,6 +368,23 @@
 
 Level: Intermediate
 
+Remove load/unload callbacks from all non-DRIVER_LEGACY drivers
+---------------------------------------------------------------
+
+The load/unload callbacks in struct &drm_driver are very much midlayers, plus
+for historical reasons they get the ordering wrong (and we can't fix that)
+between setting up the &drm_driver structure and calling drm_dev_register().
+
+- Rework drivers to no longer use the load/unload callbacks, directly coding the
+  load/unload sequence into the driver's probe function.
+
+- Once all non-DRIVER_LEGACY drivers are converted, disallow the load/unload
+  callbacks for all modern drivers.
+
+Contact: Daniel Vetter
+
+Level: Intermediate
+
 Core refactorings
 =================
 
diff --git a/Documentation/i2c/busses/i2c-parport-light.rst b/Documentation/i2c/busses/i2c-parport-light.rst
deleted file mode 100644
index e73af97..0000000
--- a/Documentation/i2c/busses/i2c-parport-light.rst
+++ /dev/null
@@ -1,24 +0,0 @@
-===============================
-Kernel driver i2c-parport-light
-===============================
-
-Author: Jean Delvare <jdelvare@suse.de>
-
-This driver is a light version of i2c-parport. It doesn't depend
-on the parport driver, and uses direct I/O access instead. This might be
-preferred on embedded systems where wasting memory for the clean but heavy
-parport handling is not an option. The drawback is a reduced portability
-and the impossibility to daisy-chain other parallel port devices.
-
-Please see i2c-parport for documentation.
-
-Module parameters:
-
-* type: type of adapter (see i2c-parport or modinfo)
-
-* base: base I/O address
-  Default is 0x378 which is fairly common for parallel ports, at least on PC.
-
-* irq: optional IRQ
-  This must be passed if you want SMBus alert support, assuming your adapter
-  actually supports this.
diff --git a/Documentation/i2c/busses/index.rst b/Documentation/i2c/busses/index.rst
index 2a26e25..5e4077b 100644
--- a/Documentation/i2c/busses/index.rst
+++ b/Documentation/i2c/busses/index.rst
@@ -20,7 +20,6 @@
    i2c-nforce2
    i2c-nvidia-gpu
    i2c-ocores
-   i2c-parport-light
    i2c-parport
    i2c-pca-isa
    i2c-piix4
diff --git a/Documentation/i2c/dev-interface.rst b/Documentation/i2c/dev-interface.rst
index 69c23a3..bdb247f 100644
--- a/Documentation/i2c/dev-interface.rst
+++ b/Documentation/i2c/dev-interface.rst
@@ -1,27 +1,27 @@
-====================
-I2C Device Interface
-====================
+============================================
+Implementing I2C device drivers in userspace
+============================================
 
-Usually, i2c devices are controlled by a kernel driver. But it is also
+Usually, I2C devices are controlled by a kernel driver. But it is also
 possible to access all devices on an adapter from userspace, through
 the /dev interface. You need to load module i2c-dev for this.
 
-Each registered i2c adapter gets a number, counting from 0. You can
+Each registered I2C adapter gets a number, counting from 0. You can
 examine /sys/class/i2c-dev/ to see what number corresponds to which adapter.
 Alternatively, you can run "i2cdetect -l" to obtain a formatted list of all
-i2c adapters present on your system at a given time. i2cdetect is part of
+I2C adapters present on your system at a given time. i2cdetect is part of
 the i2c-tools package.
 
 I2C device files are character device files with major device number 89
 and a minor device number corresponding to the number assigned as
 explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ...,
-i2c-10, ...). All 256 minor device numbers are reserved for i2c.
+i2c-10, ...). All 256 minor device numbers are reserved for I2C.
 
 
 C example
 =========
 
-So let's say you want to access an i2c adapter from a C program.
+So let's say you want to access an I2C adapter from a C program.
 First, you need to include these two headers::
 
   #include <linux/i2c-dev.h>
@@ -66,7 +66,7 @@
   /* Using SMBus commands */
   res = i2c_smbus_read_word_data(file, reg);
   if (res < 0) {
-    /* ERROR HANDLING: i2c transaction failed */
+    /* ERROR HANDLING: I2C transaction failed */
   } else {
     /* res contains the read word */
   }
@@ -79,12 +79,12 @@
   buf[1] = 0x43;
   buf[2] = 0x65;
   if (write(file, buf, 3) != 3) {
-    /* ERROR HANDLING: i2c transaction failed */
+    /* ERROR HANDLING: I2C transaction failed */
   }
 
   /* Using I2C Read, equivalent of i2c_smbus_read_byte(file) */
   if (read(file, buf, 1) != 1) {
-    /* ERROR HANDLING: i2c transaction failed */
+    /* ERROR HANDLING: I2C transaction failed */
   } else {
     /* buf[0] contains the read byte */
   }
@@ -144,7 +144,7 @@
   If possible, use the provided ``i2c_smbus_*`` methods described below instead
   of issuing direct ioctls.
 
-You can do plain i2c transactions by using read(2) and write(2) calls.
+You can do plain I2C transactions by using read(2) and write(2) calls.
 You do not need to pass the address byte; instead, set it through
 ioctl I2C_SLAVE before you try to access the device.
 
diff --git a/Documentation/i2c/dma-considerations.rst b/Documentation/i2c/dma-considerations.rst
index 2030020..142d52c 100644
--- a/Documentation/i2c/dma-considerations.rst
+++ b/Documentation/i2c/dma-considerations.rst
@@ -2,7 +2,7 @@
 Linux I2C and DMA
 =================
 
-Given that i2c is a low-speed bus, over which the majority of messages
+Given that I2C is a low-speed bus, over which the majority of messages
 transferred are small, it is not considered a prime user of DMA access. At this
 time of writing, only 10% of I2C bus master drivers have DMA support
 implemented. And the vast majority of transactions are so small that setting up
diff --git a/Documentation/i2c/i2c-protocol.rst b/Documentation/i2c/i2c-protocol.rst
index 2f8fcf6..b2092f8 100644
--- a/Documentation/i2c/i2c-protocol.rst
+++ b/Documentation/i2c/i2c-protocol.rst
@@ -1,26 +1,26 @@
-============
-I2C Protocol
-============
+================
+The I2C Protocol
+================
 
-This document describes the i2c protocol. Or will, when it is finished :-)
+This document describes the I2C protocol. Or will, when it is finished :-)
 
 Key to symbols
 ==============
 
 =============== =============================================================
-S     (1 bit) : Start bit
-P     (1 bit) : Stop bit
-Rd/Wr (1 bit) : Read/Write bit. Rd equals 1, Wr equals 0.
-A, NA (1 bit) : Accept and reverse accept bit.
-Addr  (7 bits): I2C 7 bit address. Note that this can be expanded as usual to
+S               Start condition
+P               Stop condition
+Rd/Wr (1 bit)   Read/Write bit. Rd equals 1, Wr equals 0.
+A, NA (1 bit)   Acknowledge (ACK) and Not Acknowledge (NACK) bit
+Addr  (7 bits)  I2C 7 bit address. Note that this can be expanded as usual to
                 get a 10 bit I2C address.
-Comm  (8 bits): Command byte, a data byte which often selects a register on
+Comm  (8 bits)  Command byte, a data byte which often selects a register on
                 the device.
-Data  (8 bits): A plain data byte. Sometimes, I write DataLow, DataHigh
+Data  (8 bits)  A plain data byte. Sometimes, I write DataLow, DataHigh
                 for 16 bit data.
-Count (8 bits): A data byte containing the length of a block operation.
+Count (8 bits)  A data byte containing the length of a block operation.
 
-[..]:           Data sent by I2C device, as opposed to data sent by the
+[..]            Data sent by I2C device, as opposed to data sent by the
                 host adapter.
 =============== =============================================================
 
@@ -28,7 +28,7 @@
 Simple send transaction
 =======================
 
-This corresponds to i2c_master_send::
+Implemented by i2c_master_send()::
 
   S Addr Wr [A] Data [A] Data [A] ... [A] Data [A] P
 
@@ -36,7 +36,7 @@
 Simple receive transaction
 ==========================
 
-This corresponds to i2c_master_recv::
+Implemented by i2c_master_recv()::
 
   S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
 
@@ -44,11 +44,11 @@
 Combined transactions
 =====================
 
-This corresponds to i2c_transfer
+Implemented by i2c_transfer().
 
-They are just like the above transactions, but instead of a stop bit P
-a start bit S is sent and the transaction continues. An example of
-a byte read, followed by a byte write::
+They are just like the above transactions, but instead of a stop
+condition P a start condition S is sent and the transaction continues.
+An example of a byte read, followed by a byte write::
 
   S Addr Rd [A] [Data] NA S Addr Wr [A] Data [A] P
 
@@ -57,7 +57,7 @@
 =====================
 
 The following modifications to the I2C protocol can also be generated by
-setting these flags for i2c messages. With the exception of I2C_M_NOSTART, they
+setting these flags for I2C messages. With the exception of I2C_M_NOSTART, they
 are usually only needed to work around device issues:
 
 I2C_M_IGNORE_NAK:
@@ -77,8 +77,9 @@
       S Addr Rd [A] [Data] NA Data [A] P
 
     If you set the I2C_M_NOSTART variable for the first partial message,
-    we do not generate Addr, but we do generate the startbit S. This will
-    probably confuse all other clients on your bus, so don't try this.
+    we do not generate Addr, but we do generate the start condition S.
+    This will probably confuse all other clients on your bus, so don't
+    try this.
 
     This is often used to gather transmits from multiple data buffers in
     system memory into something that appears as a single transfer to the
diff --git a/Documentation/i2c/i2c-topology.rst b/Documentation/i2c/i2c-topology.rst
index 0c1ae95..7cb5381 100644
--- a/Documentation/i2c/i2c-topology.rst
+++ b/Documentation/i2c/i2c-topology.rst
@@ -1,9 +1,9 @@
-============
-I2C topology
-============
+================================
+I2C muxes and complex topologies
+================================
 
-There are a couple of reasons for building more complex i2c topologies
-than a straight-forward i2c bus with one adapter and one or more devices.
+There are a couple of reasons for building more complex I2C topologies
+than a straight-forward I2C bus with one adapter and one or more devices.
 
 1. A mux may be needed on the bus to prevent address collisions.
 
@@ -11,20 +11,20 @@
    may be needed to determine if it is ok to access the bus.
 
 3. A device (particularly RF tuners) may want to avoid the digital noise
-   from the i2c bus, at least most of the time, and sits behind a gate
+   from the I2C bus, at least most of the time, and sits behind a gate
    that has to be operated before the device can be accessed.
 
 Etc
 ===
 
-These constructs are represented as i2c adapter trees by Linux, where
+These constructs are represented as I2C adapter trees by Linux, where
 each adapter has a parent adapter (except the root adapter) and zero or
 more child adapters. The root adapter is the actual adapter that issues
-i2c transfers, and all adapters with a parent are part of an "i2c-mux"
+I2C transfers, and all adapters with a parent are part of an "i2c-mux"
 object (quoted, since it can also be an arbitrator or a gate).
 
 Depending of the particular mux driver, something happens when there is
-an i2c transfer on one of its child adapters. The mux driver can
+an I2C transfer on one of its child adapters. The mux driver can
 obviously operate a mux, but it can also do arbitration with an external
 bus master or open a gate. The mux driver has two operations for this,
 select and deselect. select is called before the transfer and (the
@@ -34,7 +34,7 @@
 Locking
 =======
 
-There are two variants of locking available to i2c muxes, they can be
+There are two variants of locking available to I2C muxes, they can be
 mux-locked or parent-locked muxes. As is evident from below, it can be
 useful to know if a mux is mux-locked or if it is parent-locked. The
 following list was correct at the time of writing:
@@ -45,7 +45,7 @@
 i2c-arb-gpio-challenge    Parent-locked
 i2c-mux-gpio              Normally parent-locked, mux-locked iff
                           all involved gpio pins are controlled by the
-                          same i2c root adapter that they mux.
+                          same I2C root adapter that they mux.
 i2c-mux-gpmux             Normally parent-locked, mux-locked iff
                           specified in device-tree.
 i2c-mux-ltc4306           Mux-locked
@@ -54,7 +54,7 @@
 i2c-mux-pca954x           Parent-locked
 i2c-mux-pinctrl           Normally parent-locked, mux-locked iff
                           all involved pinctrl devices are controlled
-                          by the same i2c root adapter that they mux.
+                          by the same I2C root adapter that they mux.
 i2c-mux-reg               Parent-locked
 ======================    =============================================
 
@@ -83,9 +83,9 @@
 Mux-locked muxes does not lock the entire parent adapter during the
 full select-transfer-deselect transaction, only the muxes on the parent
 adapter are locked. Mux-locked muxes are mostly interesting if the
-select and/or deselect operations must use i2c transfers to complete
+select and/or deselect operations must use I2C transfers to complete
 their tasks. Since the parent adapter is not fully locked during the
-full transaction, unrelated i2c transfers may interleave the different
+full transaction, unrelated I2C transfers may interleave the different
 stages of the transaction. This has the benefit that the mux driver
 may be easier and cleaner to implement, but it has some caveats.
 
@@ -109,14 +109,14 @@
 
 ML3. A mux-locked mux cannot be used by a driver for auto-closing
      gates/muxes, i.e. something that closes automatically after a given
-     number (one, in most cases) of i2c transfers. Unrelated i2c transfers
+     number (one, in most cases) of I2C transfers. Unrelated I2C transfers
      may creep in and close prematurely.
 
-ML4. If any non-i2c operation in the mux driver changes the i2c mux state,
+ML4. If any non-I2C operation in the mux driver changes the I2C mux state,
      the driver has to lock the root adapter during that operation.
      Otherwise garbage may appear on the bus as seen from devices
-     behind the mux, when an unrelated i2c transfer is in flight during
-     the non-i2c mux-changing operation.
+     behind the mux, when an unrelated I2C transfer is in flight during
+     the non-I2C mux-changing operation.
 ==== =====================================================================
 
 
@@ -137,14 +137,14 @@
 
 When there is an access to D1, this happens:
 
- 1. Someone issues an i2c-transfer to D1.
+ 1. Someone issues an I2C transfer to D1.
  2. M1 locks muxes on its parent (the root adapter in this case).
  3. M1 calls ->select to ready the mux.
- 4. M1 (presumably) does some i2c-transfers as part of its select.
-    These transfers are normal i2c-transfers that locks the parent
+ 4. M1 (presumably) does some I2C transfers as part of its select.
+    These transfers are normal I2C transfers that locks the parent
     adapter.
- 5. M1 feeds the i2c-transfer from step 1 to its parent adapter as a
-    normal i2c-transfer that locks the parent adapter.
+ 5. M1 feeds the I2C transfer from step 1 to its parent adapter as a
+    normal I2C transfer that locks the parent adapter.
  6. M1 calls ->deselect, if it has one.
  7. Same rules as in step 4, but for ->deselect.
  8. M1 unlocks muxes on its parent.
@@ -159,8 +159,8 @@
 
 Parent-locked muxes lock the parent adapter during the full select-
 transfer-deselect transaction. The implication is that the mux driver
-has to ensure that any and all i2c transfers through that parent
-adapter during the transaction are unlocked i2c transfers (using e.g.
+has to ensure that any and all I2C transfers through that parent
+adapter during the transaction are unlocked I2C transfers (using e.g.
 __i2c_transfer), or a deadlock will follow. There are a couple of
 caveats.
 
@@ -169,12 +169,12 @@
      of another mux, this might break a possible assumption from the
      child mux that the root adapter is unused between its select op
      and the actual transfer (e.g. if the child mux is auto-closing
-     and the parent mux issus i2c-transfers as part of its select).
+     and the parent mux issues I2C transfers as part of its select).
      This is especially the case if the parent mux is mux-locked, but
      it may also happen if the parent mux is parent-locked.
 
 PL2. If select/deselect calls out to other subsystems such as gpio,
-     pinctrl, regmap or iio, it is essential that any i2c transfers
+     pinctrl, regmap or iio, it is essential that any I2C transfers
      caused by these subsystems are unlocked. This can be convoluted to
      accomplish, maybe even impossible if an acceptably clean solution
      is sought.
@@ -197,15 +197,15 @@
 
 When there is an access to D1, this happens:
 
- 1.  Someone issues an i2c-transfer to D1.
+ 1.  Someone issues an I2C transfer to D1.
  2.  M1 locks muxes on its parent (the root adapter in this case).
  3.  M1 locks its parent adapter.
  4.  M1 calls ->select to ready the mux.
- 5.  If M1 does any i2c-transfers (on this root adapter) as part of
-     its select, those transfers must be unlocked i2c-transfers so
+ 5.  If M1 does any I2C transfers (on this root adapter) as part of
+     its select, those transfers must be unlocked I2C transfers so
      that they do not deadlock the root adapter.
- 6.  M1 feeds the i2c-transfer from step 1 to the root adapter as an
-     unlocked i2c-transfer, so that it does not deadlock the parent
+ 6.  M1 feeds the I2C transfer from step 1 to the root adapter as an
+     unlocked I2C transfer, so that it does not deadlock the parent
      adapter.
  7.  M1 calls ->deselect, if it has one.
  8.  Same rules as in step 5, but for ->deselect.
@@ -240,7 +240,7 @@
 the buck to the root adapter).
 
 This topology is bad if M2 is an auto-closing mux and M1->select
-issues any unlocked i2c transfers on the root adapter that may leak
+issues any unlocked I2C transfers on the root adapter that may leak
 through and be seen by the M2 adapter, thus closing M2 prematurely.
 
 
@@ -286,14 +286,14 @@
 
 This kind of topology is generally not suitable and should probably
 be avoided. The reason is that M2 probably assumes that there will
-be no i2c transfers during its calls to ->select and ->deselect, and
+be no I2C transfers during its calls to ->select and ->deselect, and
 if there are, any such transfers might appear on the slave side of M2
-as partial i2c transfers, i.e. garbage or worse. This might cause
+as partial I2C transfers, i.e. garbage or worse. This might cause
 device lockups and/or other problems.
 
 The topology is especially troublesome if M2 is an auto-closing
 mux. In that case, any interleaved accesses to D4 might close M2
-prematurely, as might any i2c-transfers part of M1->select.
+prematurely, as might any I2C transfers part of M1->select.
 
 But if M2 is not making the above stated assumption, and if M2 is not
 auto-closing, the topology is fine.
diff --git a/Documentation/i2c/i2c.svg b/Documentation/i2c/i2c.svg
new file mode 100644
index 0000000..5979405
--- /dev/null
+++ b/Documentation/i2c/i2c.svg
@@ -0,0 +1,1341 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   sodipodi:docname="i2c.svg"
+   inkscape:version="0.92.3 (2405546, 2018-03-11)"
+   version="1.1"
+   id="svg2"
+   viewBox="0 0 813.34215 261.01596"
+   height="73.664505mm"
+   width="229.54323mm">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="DotM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker8861"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path8859"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker6165"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="DotM"
+       inkscape:collect="always">
+      <path
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         id="path6163"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker2713"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="DotM">
+      <path
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         id="path2711"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="DotM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="DotM"
+       style="overflow:visible"
+       inkscape:isstock="true"
+       inkscape:collect="always">
+      <path
+         id="path1795"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker6389"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow2Mend">
+      <path
+         transform="scale(-0.6)"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         id="path6387"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="TriangleOutM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="TriangleOutM"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path4107"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="scale(0.4)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="TriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker5333"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path5335"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="scale(0.8)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="TriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="TriangleOutL"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path5049"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="scale(0.8)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="DotS"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="DotS"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path9326"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         style="fill:#404040;fill-opacity:1;fill-rule:evenodd;stroke:#404040;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.2,0,0,0.2,1.48,0.2)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path9283"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path9097"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker8935"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path8937"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker8781"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path8783"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path4700"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path4502"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL-4"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4502-7"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL-4-4"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4502-7-5"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL-7"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4502-5"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4502-2"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL-78"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4502-57"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL-1"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4502-8"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL-9"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4502-75"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="EmptyTriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="EmptyTriangleOutL-78-2"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4502-57-6"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,-4.8,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3-5"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6-3"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3-5-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6-3-2"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3-5-6-1"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6-3-2-2"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3-5-7"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6-3-0"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3-9"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6-36"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-31"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-9"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3-5-7-5"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6-3-0-4"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3-9-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6-36-5"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-3-5-7-3"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4369-6-3-0-5"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-3"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-1"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-3-7"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-1-8"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-3"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-2"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-0"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-5"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-5"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-4"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-7"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-5-9"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-5-3"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-5-4"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-5-5"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-5-4-4"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-5-5-3"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-5-4-4-9"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-5-5-3-2"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-5-4-4-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-5-5-3-8"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-5-4-4-2"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-5-5-3-7"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-3"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-6"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-6-5-4-4-6-5"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-1-5-5-3-8-3"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-27"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-09"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-27-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-09-2"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-27-0"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-09-23"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-9"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-7"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-4-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-7-7"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-4-6-3"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-7-7-5"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-4-6-2"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-7-7-9"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="TriangleOutM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="TriangleOutM-2"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path4107-7"
+         d="M 5.77,0 -2.88,5 V -5 Z"
+         style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="scale(0.4)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart-9"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9283-3"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker9095-1-5-4-6-6"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9097-2-0-7-7-0"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6)" />
+    </marker>
+    <marker
+       inkscape:stockid="DotM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="DotM-3"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path1795-7"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker2713-9"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="DotM">
+      <path
+         inkscape:connector-curvature="0"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         id="path2711-2" />
+    </marker>
+    <marker
+       inkscape:stockid="DotM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="DotM-2"
+       style="overflow:visible"
+       inkscape:isstock="true"
+       inkscape:collect="always">
+      <path
+         inkscape:connector-curvature="0"
+         id="path1795-8"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker2713-3"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="DotM">
+      <path
+         inkscape:connector-curvature="0"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         id="path2711-6" />
+    </marker>
+    <marker
+       inkscape:stockid="DotM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="DotM-1"
+       style="overflow:visible"
+       inkscape:isstock="true"
+       inkscape:collect="always">
+      <path
+         inkscape:connector-curvature="0"
+         id="path1795-2"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker2713-94"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="DotM">
+      <path
+         inkscape:connector-curvature="0"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         id="path2711-7" />
+    </marker>
+    <marker
+       inkscape:stockid="DotM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="DotM-8"
+       style="overflow:visible"
+       inkscape:isstock="true"
+       inkscape:collect="always">
+      <path
+         inkscape:connector-curvature="0"
+         id="path1795-4"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker2713-36"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="DotM">
+      <path
+         inkscape:connector-curvature="0"
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         id="path2711-1" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker6165-5"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="DotM">
+      <path
+         transform="matrix(0.4,0,0,0.4,2.96,0.4)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+         d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+         id="path6163-5"
+         inkscape:connector-curvature="0" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     showguides="true"
+     inkscape:window-maximized="1"
+     inkscape:window-y="0"
+     inkscape:window-x="0"
+     inkscape:window-height="1015"
+     inkscape:window-width="1920"
+     showgrid="true"
+     inkscape:current-layer="layer1"
+     inkscape:document-units="px"
+     inkscape:cy="214.66765"
+     inkscape:cx="-167.56857"
+     inkscape:zoom="0.70710678"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     borderopacity="1.0"
+     bordercolor="#666666"
+     pagecolor="#ffffff"
+     id="base"
+     inkscape:snap-to-guides="true"
+     inkscape:snap-grids="true"
+     inkscape:snap-bbox="false"
+     inkscape:object-nodes="true"
+     fit-margin-top="5"
+     fit-margin-left="5"
+     fit-margin-right="5"
+     fit-margin-bottom="5">
+    <inkscape:grid
+       type="xygrid"
+       id="grid4451"
+       originx="-93.377219"
+       originy="-347.2523" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+        <dc:creator>
+          <cc:Agent>
+            <dc:title>Luca Ceresoli</dc:title>
+          </cc:Agent>
+        </dc:creator>
+        <dc:date>2020</dc:date>
+        <cc:license
+           rdf:resource="http://creativecommons.org/licenses/by-sa/4.0/" />
+      </cc:Work>
+      <cc:License
+         rdf:about="http://creativecommons.org/licenses/by-sa/4.0/">
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#Reproduction" />
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#Distribution" />
+        <cc:requires
+           rdf:resource="http://creativecommons.org/ns#Notice" />
+        <cc:requires
+           rdf:resource="http://creativecommons.org/ns#Attribution" />
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
+        <cc:requires
+           rdf:resource="http://creativecommons.org/ns#ShareAlike" />
+      </cc:License>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Livello 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-93.377215,-444.09395)">
+    <rect
+       style="opacity:1;fill:#ffb9b9;fill-opacity:1;stroke:#f00000;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect4424-3-2-9-7"
+       width="112.5"
+       height="113.75008"
+       x="112.5"
+       y="471.11221"
+       rx="0"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="167.5354"
+       y="521.46259"
+       id="text4349"><tspan
+         sodipodi:role="line"
+         x="167.5354"
+         y="521.46259"
+         style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
+         id="tspan1273">I2C</tspan><tspan
+         sodipodi:role="line"
+         x="167.5354"
+         y="552.71259"
+         style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
+         id="tspan1285">Master</tspan></text>
+    <rect
+       style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
+       id="rect4424-3-2-9-7-3-3-5-3"
+       width="112.49998"
+       height="112.50001"
+       x="262.5"
+       y="471.11218"
+       rx="0"
+       ry="0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968767;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="m 112.50002,639.86223 c 712.50002,0 712.50002,0 712.50002,0"
+       id="path4655-9-3-65-5-6"
+       inkscape:connector-curvature="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:12px;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="318.59131"
+       y="520.83752"
+       id="text4349-26"><tspan
+         sodipodi:role="line"
+         x="318.59131"
+         y="520.83752"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
+         id="tspan1273-8">I2C</tspan><tspan
+         sodipodi:role="line"
+         x="318.59131"
+         y="552.08752"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
+         id="tspan1287">Slave</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968767;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="m 112.49995,677.36223 c 712.50005,0 712.50005,0 712.50005,0"
+       id="path4655-9-3-65-5-6-2"
+       inkscape:connector-curvature="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:12px;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="861.07312"
+       y="687.03937"
+       id="text4349-7"><tspan
+         sodipodi:role="line"
+         x="861.07312"
+         y="687.03937"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;writing-mode:lr-tb;direction:ltr;text-anchor:middle;stroke-width:1px"
+         id="tspan1285-9">SCL</tspan></text>
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot1627"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:Sans;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"><flowRegion
+         id="flowRegion1629"><rect
+           id="rect1631"
+           width="220"
+           height="120"
+           x="140"
+           y="-126.29921" /></flowRegion><flowPara
+         id="flowPara1633" /></flowRoot>    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:12px;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="863.31921"
+       y="648.96735"
+       id="text4349-7-3"><tspan
+         sodipodi:role="line"
+         x="863.31921"
+         y="648.96735"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
+         id="tspan1285-9-6">SDA</tspan></text>
+    <rect
+       style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
+       id="rect4424-3-2-9-7-3-3-5-3-0"
+       width="112.49998"
+       height="112.50002"
+       x="412.5"
+       y="471.11215"
+       rx="0"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:12px;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="468.59131"
+       y="520.83746"
+       id="text4349-26-6"><tspan
+         sodipodi:role="line"
+         x="468.59131"
+         y="520.83746"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
+         id="tspan1273-8-2">I2C</tspan><tspan
+         sodipodi:role="line"
+         x="468.59131"
+         y="552.08746"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
+         id="tspan1287-6">Slave</tspan></text>
+    <rect
+       style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
+       id="rect4424-3-2-9-7-3-3-5-3-1"
+       width="112.49998"
+       height="112.50002"
+       x="562.5"
+       y="471.11215"
+       rx="0"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:12px;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="618.59131"
+       y="520.83746"
+       id="text4349-26-8"><tspan
+         sodipodi:role="line"
+         x="618.59131"
+         y="520.83746"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
+         id="tspan1273-8-7">I2C</tspan><tspan
+         sodipodi:role="line"
+         x="618.59131"
+         y="552.08746"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
+         id="tspan1287-9">Slave</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM)"
+       d="m 150,583.61221 v 93.75"
+       id="path4655-9-3-65-5-6-20"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker2713)"
+       d="m 187.5,583.61221 v 56.25"
+       id="path4655-9-3-65-5-6-20-2"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM-2)"
+       d="m 300,583.61221 v 93.75"
+       id="path4655-9-3-65-5-6-20-9"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker2713-9)"
+       d="m 337.5,583.61221 v 56.25"
+       id="path4655-9-3-65-5-6-20-2-7"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM-1)"
+       d="m 450,583.61221 v 93.75"
+       id="path4655-9-3-65-5-6-20-93"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker2713-3)"
+       d="m 487.5,583.61221 v 56.25"
+       id="path4655-9-3-65-5-6-20-2-1"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM-8)"
+       d="m 600,583.61221 v 93.75"
+       id="path4655-9-3-65-5-6-20-5"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker2713-94)"
+       d="m 637.5,583.61221 v 56.25"
+       id="path4655-9-3-65-5-6-20-2-0"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker6165);marker-end:url(#marker6165)"
+       d="m 750,471.11221 v 28.125 l 9.375,9.375 -18.74999,18.75 18.74999,18.75 -18.74999,18.75 18.74999,18.75 -9.375,9.375 v 28.125 0 0 56.25"
+       id="path6135"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cccccccccccc" />
+    <path
+       style="opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker8861);marker-end:url(#marker6165-5)"
+       d="m 787.49999,471.11221 v 28.125 l 9.375,9.375 -18.74999,18.75 18.74999,18.75 -18.74999,18.75 18.74999,18.75 -9.375,9.375 v 28.125 0 0 18.75001"
+       id="path6135-4"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cccccccccccc" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968719;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="m 712.5,471.11221 c 112.49999,0 112.49999,0 112.49999,0"
+       id="path4655-9-3-65-5-6-7"
+       inkscape:connector-curvature="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:12px;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="859.94275"
+       y="480.03558"
+       id="text4349-7-3-6"><tspan
+         sodipodi:role="line"
+         x="859.94275"
+         y="480.03558"
+         style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
+         id="tspan1285-9-6-5">V<tspan
+   style="font-size:64.99999762%;baseline-shift:sub"
+   id="tspan9307">DD</tspan></tspan></text>
+  </g>
+</svg>
diff --git a/Documentation/i2c/index.rst b/Documentation/i2c/index.rst
index a0fbaf6..fee4744 100644
--- a/Documentation/i2c/index.rst
+++ b/Documentation/i2c/index.rst
@@ -4,30 +4,66 @@
 I2C/SMBus Subsystem
 ===================
 
+Introduction
+============
+
 .. toctree::
    :maxdepth: 1
 
+   summary
+   i2c-protocol
+   smbus-protocol
+   instantiating-devices
+   busses/index
+   i2c-topology
+   muxes/i2c-mux-gpio
+
+Writing device drivers
+======================
+
+.. toctree::
+   :maxdepth: 1
+
+   writing-clients
    dev-interface
    dma-considerations
    fault-codes
    functionality
+
+Debugging
+=========
+
+.. toctree::
+   :maxdepth: 1
+
    gpio-fault-injection
-   i2c-protocol
    i2c-stub
-   i2c-topology
-   instantiating-devices
-   old-module-parameters
-   slave-eeprom-backend
+
+Slave I2C
+=========
+
+.. toctree::
+   :maxdepth: 1
+
    slave-interface
-   smbus-protocol
-   summary
+   slave-eeprom-backend
+
+Advanced topics
+===============
+
+.. toctree::
+   :maxdepth: 1
+
    ten-bit-addresses
+
+Legacy documentation
+====================
+
+.. toctree::
+   :maxdepth: 1
+
    upgrading-clients
-   writing-clients
-
-   muxes/i2c-mux-gpio
-
-   busses/index
+   old-module-parameters
 
 .. only::  subproject and html
 
diff --git a/Documentation/i2c/instantiating-devices.rst b/Documentation/i2c/instantiating-devices.rst
index 875ebe9e..e558e0a 100644
--- a/Documentation/i2c/instantiating-devices.rst
+++ b/Documentation/i2c/instantiating-devices.rst
@@ -9,14 +9,67 @@
 several ways to achieve this, depending on the context and requirements.
 
 
-Method 1a: Declare the I2C devices by bus number
-------------------------------------------------
+Method 1: Declare the I2C devices statically
+--------------------------------------------
 
 This method is appropriate when the I2C bus is a system bus as is the case
-for many embedded systems. On such systems, each I2C bus has a number
-which is known in advance. It is thus possible to pre-declare the I2C
-devices which live on this bus. This is done with an array of struct
-i2c_board_info which is registered by calling i2c_register_board_info().
+for many embedded systems. On such systems, each I2C bus has a number which
+is known in advance. It is thus possible to pre-declare the I2C devices
+which live on this bus.
+
+This information is provided to the kernel in a different way on different
+architectures: device tree, ACPI or board files.
+
+When the I2C bus in question is registered, the I2C devices will be
+instantiated automatically by i2c-core. The devices will be automatically
+unbound and destroyed when the I2C bus they sit on goes away (if ever).
+
+
+Declare the I2C devices via devicetree
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+On platforms using devicetree, the declaration of I2C devices is done in
+subnodes of the master controller.
+
+Example::
+
+	i2c1: i2c@400a0000 {
+		/* ... master properties skipped ... */
+		clock-frequency = <100000>;
+
+		flash@50 {
+			compatible = "atmel,24c256";
+			reg = <0x50>;
+		};
+
+		pca9532: gpio@60 {
+			compatible = "nxp,pca9532";
+			gpio-controller;
+			#gpio-cells = <2>;
+			reg = <0x60>;
+		};
+	};
+
+Here, two devices are attached to the bus using a speed of 100kHz. For
+additional properties which might be needed to set up the device, please refer
+to its devicetree documentation in Documentation/devicetree/bindings/.
+
+
+Declare the I2C devices via ACPI
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ACPI can also describe I2C devices. There is special documentation for this
+which is currently located at :doc:`../firmware-guide/acpi/enumeration`.
+
+
+Declare the I2C devices in board files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In many embedded architectures, devicetree has replaced the old hardware
+description based on board files, but the latter are still used in old
+code. Instantiating I2C devices via board files is done with an array of
+struct i2c_board_info which is registered by calling
+i2c_register_board_info().
 
 Example (from omap2 h4)::
 
@@ -44,49 +97,7 @@
   }
 
 The above code declares 3 devices on I2C bus 1, including their respective
-addresses and custom data needed by their drivers. When the I2C bus in
-question is registered, the I2C devices will be instantiated automatically
-by i2c-core.
-
-The devices will be automatically unbound and destroyed when the I2C bus
-they sit on goes away (if ever.)
-
-
-Method 1b: Declare the I2C devices via devicetree
--------------------------------------------------
-
-This method has the same implications as method 1a. The declaration of I2C
-devices is here done via devicetree as subnodes of the master controller.
-
-Example::
-
-	i2c1: i2c@400a0000 {
-		/* ... master properties skipped ... */
-		clock-frequency = <100000>;
-
-		flash@50 {
-			compatible = "atmel,24c256";
-			reg = <0x50>;
-		};
-
-		pca9532: gpio@60 {
-			compatible = "nxp,pca9532";
-			gpio-controller;
-			#gpio-cells = <2>;
-			reg = <0x60>;
-		};
-	};
-
-Here, two devices are attached to the bus using a speed of 100kHz. For
-additional properties which might be needed to set up the device, please refer
-to its devicetree documentation in Documentation/devicetree/bindings/.
-
-
-Method 1c: Declare the I2C devices via ACPI
--------------------------------------------
-
-ACPI can also describe I2C devices. There is special documentation for this
-which is currently located at Documentation/firmware-guide/acpi/enumeration.rst.
+addresses and custom data needed by their drivers.
 
 
 Method 2: Instantiate the devices explicitly
@@ -98,7 +109,7 @@
 main chip by the means of an I2C bus. You won't know the number of the I2C
 bus in advance, so the method 1 described above can't be used. Instead,
 you can instantiate your I2C devices explicitly. This is done by filling
-a struct i2c_board_info and calling i2c_new_device().
+a struct i2c_board_info and calling i2c_new_client_device().
 
 Example (from the sfe4001 network driver)::
 
@@ -110,7 +121,7 @@
   {
 	(...)
 	efx->board_info.hwmon_client =
-		i2c_new_device(&efx->i2c_adap, &sfe4001_hwmon_info);
+		i2c_new_client_device(&efx->i2c_adap, &sfe4001_hwmon_info);
 
 	(...)
   }
@@ -123,7 +134,7 @@
 on cheap variants of a board but you have no way to tell them apart), or
 it may have different addresses from one board to the next (manufacturer
 changing its design without notice). In this case, you can call
-i2c_new_scanned_device() instead of i2c_new_device().
+i2c_new_scanned_device() instead of i2c_new_client_device().
 
 Example (from the nxp OHCI driver)::
 
@@ -152,7 +163,7 @@
 
 The driver which instantiated the I2C device is responsible for destroying
 it on cleanup. This is done by calling i2c_unregister_device() on the
-pointer that was earlier returned by i2c_new_device() or
+pointer that was earlier returned by i2c_new_client_device() or
 i2c_new_scanned_device().
 
 
@@ -188,7 +199,7 @@
 or when the underlying I2C bus is itself destroyed, whichever happens
 first.
 
-Those of you familiar with the i2c subsystem of 2.4 kernels and early 2.6
+Those of you familiar with the I2C subsystem of 2.4 kernels and early 2.6
 kernels will find out that this method 3 is essentially similar to what
 was done there. Two significant differences are:
 
@@ -214,15 +225,15 @@
 what addresses they live at. However, in certain cases, it does not, so a
 sysfs interface was added to let the user provide the information. This
 interface is made of 2 attribute files which are created in every I2C bus
-directory: new_device and delete_device. Both files are write only and you
-must write the right parameters to them in order to properly instantiate,
-respectively delete, an I2C device.
+directory: ``new_device`` and ``delete_device``. Both files are write
+only and you must write the right parameters to them in order to properly
+instantiate, respectively delete, an I2C device.
 
-File new_device takes 2 parameters: the name of the I2C device (a string)
-and the address of the I2C device (a number, typically expressed in
-hexadecimal starting with 0x, but can also be expressed in decimal.)
+File ``new_device`` takes 2 parameters: the name of the I2C device (a
+string) and the address of the I2C device (a number, typically expressed
+in hexadecimal starting with 0x, but can also be expressed in decimal.)
 
-File delete_device takes a single parameter: the address of the I2C
+File ``delete_device`` takes a single parameter: the address of the I2C
 device. As no two devices can live at the same address on a given I2C
 segment, the address is sufficient to uniquely identify the device to be
 deleted.
diff --git a/Documentation/i2c/old-module-parameters.rst b/Documentation/i2c/old-module-parameters.rst
index a193951..38e5582 100644
--- a/Documentation/i2c/old-module-parameters.rst
+++ b/Documentation/i2c/old-module-parameters.rst
@@ -1,19 +1,24 @@
-=================================================
-I2C device driver binding control from user-space
-=================================================
+================================================================
+I2C device driver binding control from user-space in old kernels
+================================================================
 
-Up to kernel 2.6.32, many i2c drivers used helper macros provided by
+.. NOTE::
+   Note: this section is only relevant if you are handling some old code
+   found in kernel 2.6. If you work with more recent kernels, you can
+   safely skip this section.
+
+Up to kernel 2.6.32, many I2C drivers used helper macros provided by
 <linux/i2c.h> which created standard module parameters to let the user
-control how the driver would probe i2c buses and attach to devices. These
-parameters were known as "probe" (to let the driver probe for an extra
-address), "force" (to forcibly attach the driver to a given device) and
-"ignore" (to prevent a driver from probing a given address).
+control how the driver would probe I2C buses and attach to devices. These
+parameters were known as ``probe`` (to let the driver probe for an extra
+address), ``force`` (to forcibly attach the driver to a given device) and
+``ignore`` (to prevent a driver from probing a given address).
 
-With the conversion of the i2c subsystem to the standard device driver
+With the conversion of the I2C subsystem to the standard device driver
 binding model, it became clear that these per-module parameters were no
 longer needed, and that a centralized implementation was possible. The new,
-sysfs-based interface is described in the documentation file
-"instantiating-devices", section "Method 4: Instantiate from user-space".
+sysfs-based interface is described in :doc:`instantiating-devices`, section
+"Method 4: Instantiate from user-space".
 
 Below is a mapping from the old module parameters to the new interface.
 
@@ -42,8 +47,8 @@
   # echo dummy 0x2f > /sys/bus/i2c/devices/i2c-1/new_device
   # modprobe <driver>
 
-Of course, it is important to instantiate the "dummy" device before loading
+Of course, it is important to instantiate the ``dummy`` device before loading
 the driver. The dummy device will be handled by i2c-core itself, preventing
 other drivers from binding to it later on. If there is a real device at the
 problematic address, and you want another driver to bind to it, then simply
-pass the name of the device in question instead of "dummy".
+pass the name of the device in question instead of ``dummy``.
diff --git a/Documentation/i2c/slave-interface.rst b/Documentation/i2c/slave-interface.rst
index c769bd6..82ea3e1 100644
--- a/Documentation/i2c/slave-interface.rst
+++ b/Documentation/i2c/slave-interface.rst
@@ -59,7 +59,7 @@
 
 	ret = i2c_slave_event(client, event, &val)
 
-'client' describes the i2c slave device. 'event' is one of the special event
+'client' describes the I2C slave device. 'event' is one of the special event
 types described hereafter. 'val' holds an u8 value for the data byte to be
 read/written and is thus bidirectional. The pointer to val must always be
 provided even if val is not used for an event, i.e. don't use NULL here. 'ret'
@@ -143,7 +143,7 @@
 If you want to add slave support to the bus driver:
 
 * implement calls to register/unregister the slave and add those to the
-  struct i2c_algorithm. When registering, you probably need to set the i2c
+  struct i2c_algorithm. When registering, you probably need to set the I2C
   slave address and enable slave specific interrupts. If you use runtime pm, you
   should use pm_runtime_get_sync() because your device usually needs to be
   powered on always to be able to detect its slave address. When unregistering,
diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst
index e30eb1d..c122ed2 100644
--- a/Documentation/i2c/smbus-protocol.rst
+++ b/Documentation/i2c/smbus-protocol.rst
@@ -1,6 +1,6 @@
-======================
-SMBus Protocol Summary
-======================
+==================
+The SMBus Protocol
+==================
 
 The following is a summary of the SMBus protocol. It applies to
 all revisions of the protocol (1.0, 1.1, and 2.0).
@@ -27,27 +27,27 @@
 Each transaction type corresponds to a functionality flag. Before calling a
 transaction function, a device driver should always check (just once) for
 the corresponding functionality flag to ensure that the underlying I2C
-adapter supports the transaction in question. See
-<file:Documentation/i2c/functionality.rst> for the details.
+adapter supports the transaction in question. See :doc:`functionality` for
+the details.
 
 
 Key to symbols
 ==============
 
 =============== =============================================================
-S     (1 bit) : Start bit
-P     (1 bit) : Stop bit
-Rd/Wr (1 bit) : Read/Write bit. Rd equals 1, Wr equals 0.
-A, NA (1 bit) : Accept and reverse accept bit.
-Addr  (7 bits): I2C 7 bit address. Note that this can be expanded as usual to
+S               Start condition
+P               Stop condition
+Rd/Wr (1 bit)   Read/Write bit. Rd equals 1, Wr equals 0.
+A, NA (1 bit)   Acknowledge (ACK) and Not Acknowledge (NACK) bit
+Addr  (7 bits)  I2C 7 bit address. Note that this can be expanded as usual to
                 get a 10 bit I2C address.
-Comm  (8 bits): Command byte, a data byte which often selects a register on
+Comm  (8 bits)  Command byte, a data byte which often selects a register on
                 the device.
-Data  (8 bits): A plain data byte. Sometimes, I write DataLow, DataHigh
+Data  (8 bits)  A plain data byte. Sometimes, I write DataLow, DataHigh
                 for 16 bit data.
-Count (8 bits): A data byte containing the length of a block operation.
+Count (8 bits)  A data byte containing the length of a block operation.
 
-[..]:           Data sent by I2C device, as opposed to data sent by the host
+[..]            Data sent by I2C device, as opposed to data sent by the host
                 adapter.
 =============== =============================================================
 
@@ -62,8 +62,10 @@
 Functionality flag: I2C_FUNC_SMBUS_QUICK
 
 
-SMBus Receive Byte:  i2c_smbus_read_byte()
-==========================================
+SMBus Receive Byte
+==================
+
+Implemented by i2c_smbus_read_byte()
 
 This reads a single byte from a device, without specifying a device
 register. Some devices are so simple that this interface is enough; for
@@ -75,8 +77,10 @@
 Functionality flag: I2C_FUNC_SMBUS_READ_BYTE
 
 
-SMBus Send Byte:  i2c_smbus_write_byte()
-========================================
+SMBus Send Byte
+===============
+
+Implemented by i2c_smbus_write_byte()
 
 This operation is the reverse of Receive Byte: it sends a single byte
 to a device.  See Receive Byte for more information.
@@ -88,8 +92,10 @@
 Functionality flag: I2C_FUNC_SMBUS_WRITE_BYTE
 
 
-SMBus Read Byte:  i2c_smbus_read_byte_data()
-============================================
+SMBus Read Byte
+===============
+
+Implemented by i2c_smbus_read_byte_data()
 
 This reads a single byte from a device, from a designated register.
 The register is specified through the Comm byte::
@@ -99,8 +105,10 @@
 Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
 
 
-SMBus Read Word:  i2c_smbus_read_word_data()
-============================================
+SMBus Read Word
+===============
+
+Implemented by i2c_smbus_read_word_data()
 
 This operation is very like Read Byte; again, data is read from a
 device, from a designated register that is specified through the Comm
@@ -110,13 +118,15 @@
 
 Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
 
-Note the convenience function i2c_smbus_read_word_swapped is
+Note the convenience function i2c_smbus_read_word_swapped() is
 available for reads where the two data bytes are the other way
 around (not SMBus compliant, but very popular.)
 
 
-SMBus Write Byte:  i2c_smbus_write_byte_data()
-==============================================
+SMBus Write Byte
+================
+
+Implemented by i2c_smbus_write_byte_data()
 
 This writes a single byte to a device, to a designated register. The
 register is specified through the Comm byte. This is the opposite of
@@ -129,24 +139,26 @@
 Functionality flag: I2C_FUNC_SMBUS_WRITE_BYTE_DATA
 
 
-SMBus Write Word:  i2c_smbus_write_word_data()
-==============================================
+SMBus Write Word
+================
+
+Implemented by i2c_smbus_write_word_data()
 
 This is the opposite of the Read Word operation. 16 bits
-of data is written to a device, to the designated register that is
-specified through the Comm byte.::
+of data are written to a device, to the designated register that is
+specified through the Comm byte::
 
   S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P
 
 Functionality flag: I2C_FUNC_SMBUS_WRITE_WORD_DATA
 
-Note the convenience function i2c_smbus_write_word_swapped is
+Note the convenience function i2c_smbus_write_word_swapped() is
 available for writes where the two data bytes are the other way
 around (not SMBus compliant, but very popular.)
 
 
-SMBus Process Call:
-===================
+SMBus Process Call
+==================
 
 This command selects a device register (through the Comm byte), sends
 16 bits of data to it, and reads 16 bits of data in return::
@@ -157,8 +169,10 @@
 Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
 
 
-SMBus Block Read:  i2c_smbus_read_block_data()
-==============================================
+SMBus Block Read
+================
+
+Implemented by i2c_smbus_read_block_data()
 
 This command reads a block of up to 32 bytes from a device, from a
 designated register that is specified through the Comm byte. The amount
@@ -172,8 +186,10 @@
 Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
 
 
-SMBus Block Write:  i2c_smbus_write_block_data()
-================================================
+SMBus Block Write
+=================
+
+Implemented by i2c_smbus_write_block_data()
 
 The opposite of the Block Read command, this writes up to 32 bytes to
 a device, to a designated register that is specified through the
@@ -266,16 +282,19 @@
 I2C Block Transactions
 ======================
 
-The following I2C block transactions are supported by the
-SMBus layer and are described here for completeness.
-They are *NOT* defined by the SMBus specification.
+The following I2C block transactions are similar to the SMBus Block Read
+and Write operations, except these do not have a Count byte. They are
+supported by the SMBus layer and are described here for completeness, but
+they are *NOT* defined by the SMBus specification.
 
 I2C block transactions do not limit the number of bytes transferred
 but the SMBus layer places a limit of 32 bytes.
 
 
-I2C Block Read:  i2c_smbus_read_i2c_block_data()
-================================================
+I2C Block Read
+==============
+
+Implemented by i2c_smbus_read_i2c_block_data()
 
 This command reads a block of bytes from a device, from a
 designated register that is specified through the Comm byte::
@@ -286,8 +305,10 @@
 Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
 
 
-I2C Block Write:  i2c_smbus_write_i2c_block_data()
-==================================================
+I2C Block Write
+===============
+
+Implemented by i2c_smbus_write_i2c_block_data()
 
 The opposite of the Block Read command, this writes bytes to
 a device, to a designated register that is specified through the
diff --git a/Documentation/i2c/summary.rst b/Documentation/i2c/summary.rst
index 3a24eac..ce72300 100644
--- a/Documentation/i2c/summary.rst
+++ b/Documentation/i2c/summary.rst
@@ -1,13 +1,19 @@
-=============
-I2C and SMBus
-=============
+=============================
+Introduction to I2C and SMBus
+=============================
 
-I2C (pronounce: I squared C) is a protocol developed by Philips. It is a
-slow two-wire protocol (variable speed, up to 400 kHz), with a high speed
-extension (3.4 MHz).  It provides an inexpensive bus for connecting many
-types of devices with infrequent or low bandwidth communications needs.
-I2C is widely used with embedded systems.  Some systems use variants that
-don't meet branding requirements, and so are not advertised as being I2C.
+I²C (pronounce: I squared C and written I2C in the kernel documentation) is
+a protocol developed by Philips. It is a slow two-wire protocol (variable
+speed, up to 400 kHz), with a high speed extension (3.4 MHz).  It provides
+an inexpensive bus for connecting many types of devices with infrequent or
+low bandwidth communications needs.  I2C is widely used with embedded
+systems.  Some systems use variants that don't meet branding requirements,
+and so are not advertised as being I2C but come under different names,
+e.g. TWI (Two Wire Interface), IIC.
+
+The official I2C specification is the `"I2C-bus specification and user
+manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
+published by NXP Semiconductors.
 
 SMBus (System Management Bus) is based on the I2C protocol, and is mostly
 a subset of I2C protocols and signaling.  Many I2C devices will work on an
@@ -25,21 +31,29 @@
 Terminology
 ===========
 
-When we talk about I2C, we use the following terms::
+Using the terminology from the official documentation, the I2C bus connects
+one or more *master* chips and one or more *slave* chips.
 
-  Bus    -> Algorithm
-            Adapter
-  Device -> Driver
-            Client
+.. kernel-figure::  i2c.svg
+   :alt:    Simple I2C bus with one master and 3 slaves
 
-An Algorithm driver contains general code that can be used for a whole class
-of I2C adapters. Each specific adapter driver either depends on one algorithm
-driver, or includes its own implementation.
+   Simple I2C bus
 
-A Driver driver (yes, this sounds ridiculous, sorry) contains the general
-code to access some type of device. Each detected device gets its own
-data in the Client structure. Usually, Driver and Client are more closely
-integrated than Algorithm and Adapter.
+A **master** chip is a node that starts communications with slaves. In the
+Linux kernel implementation it is called an **adapter** or bus. Adapter
+drivers are in the ``drivers/i2c/busses/`` subdirectory.
 
-For a given configuration, you will need a driver for your I2C bus, and
-drivers for your I2C devices (usually one driver for each device).
+An **algorithm** contains general code that can be used to implement a
+whole class of I2C adapters. Each specific adapter driver either depends on
+an algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes
+its own implementation.
+
+A **slave** chip is a node that responds to communications when addressed
+by the master. In Linux it is called a **client**. Client drivers are kept
+in a directory specific to the feature they provide, for example
+``drivers/media/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
+video-related chips.
+
+For the example configuration in figure, you will need a driver for your
+I2C adapter, and drivers for your I2C devices (usually one driver for each
+device).
diff --git a/Documentation/i2c/writing-clients.rst b/Documentation/i2c/writing-clients.rst
index ced309b5..978cc82 100644
--- a/Documentation/i2c/writing-clients.rst
+++ b/Documentation/i2c/writing-clients.rst
@@ -1,6 +1,6 @@
-===================
-Writing I2C Clients
-===================
+===============================
+Implementing I2C device drivers
+===============================
 
 This is a small guide for those who want to write kernel drivers for I2C
 or SMBus devices, using Linux as the protocol host/master (not slave).
@@ -95,7 +95,7 @@
 client.
 
 I have found it useful to define foo_read and foo_write functions for this.
-For some cases, it will be easier to call the i2c functions directly,
+For some cases, it will be easier to call the I2C functions directly,
 but many chips have some kind of register-value idea that can easily
 be encapsulated.
 
@@ -175,8 +175,8 @@
 If you know for a fact that an I2C device is connected to a given I2C bus,
 you can instantiate that device by simply filling an i2c_board_info
 structure with the device address and driver name, and calling
-i2c_new_device().  This will create the device, then the driver core will
-take care of finding the right driver and will call its probe() method.
+i2c_new_client_device().  This will create the device, then the driver core
+will take care of finding the right driver and will call its probe() method.
 If a driver supports different device types, you can specify the type you
 want using the type field.  You can also specify an IRQ and platform data
 if needed.
@@ -186,14 +186,14 @@
 example, where the same driver supports dozens of slightly different
 models, and I2C device addresses change from one model to the next.  In
 that case, you can use the i2c_new_scanned_device() variant, which is
-similar to i2c_new_device(), except that it takes an additional list of
-possible I2C addresses to probe.  A device is created for the first
+similar to i2c_new_client_device(), except that it takes an additional list
+of possible I2C addresses to probe.  A device is created for the first
 responsive address in the list.  If you expect more than one device to be
 present in the address range, simply call i2c_new_scanned_device() that
 many times.
 
-The call to i2c_new_device() or i2c_new_scanned_device() typically happens
-in the I2C bus driver. You may want to save the returned i2c_client
+The call to i2c_new_client_device() or i2c_new_scanned_device() typically
+happens in the I2C bus driver. You may want to save the returned i2c_client
 reference for later use.
 
 
@@ -236,11 +236,11 @@
 Device Deletion
 ---------------
 
-Each I2C device which has been created using i2c_new_device() or
-i2c_new_scanned_device() can be unregistered by calling
+Each I2C device which has been created using i2c_new_client_device()
+or i2c_new_scanned_device() can be unregistered by calling
 i2c_unregister_device().  If you don't call it explicitly, it will be
-called automatically before the underlying I2C bus itself is removed, as a
-device can't survive its parent in the device driver model.
+called automatically before the underlying I2C bus itself is removed,
+as a device can't survive its parent in the device driver model.
 
 
 Initializing the driver
@@ -344,7 +344,7 @@
 	int i2c_master_recv(struct i2c_client *client, char *buf, int count);
 
 These routines read and write some bytes from/to a client. The client
-contains the i2c address, so you do not have to include it. The second
+contains the I2C address, so you do not have to include it. The second
 parameter contains the bytes to read/write, the third the number of bytes
 to read/write (must be less than the length of the buffer, also should be
 less than 64k since msg.len is u16.) Returned is the actual number of bytes
@@ -357,9 +357,9 @@
 
 This sends a series of messages. Each message can be a read or write,
 and they can be mixed in any way. The transactions are combined: no
-stop bit is sent between transaction. The i2c_msg structure contains
-for each message the client address, the number of bytes of the message
-and the message data itself.
+stop condition is issued between transaction. The i2c_msg structure
+contains for each message the client address, the number of bytes of the
+message and the message data itself.
 
 You can read the file ``i2c-protocol`` for more information about the
 actual I2C protocol.
diff --git a/Documentation/isdn/avmb1.rst b/Documentation/isdn/avmb1.rst
deleted file mode 100644
index de3961e..0000000
--- a/Documentation/isdn/avmb1.rst
+++ /dev/null
@@ -1,246 +0,0 @@
-================================
-Driver for active AVM Controller
-================================
-
-The driver provides a kernel capi2.0 Interface (kernelcapi) and
-on top of this a User-Level-CAPI2.0-interface (capi)
-and a driver to connect isdn4linux with CAPI2.0 (capidrv).
-The lowlevel interface can be used to implement a CAPI2.0
-also for passive cards since July 1999.
-
-The author can be reached at calle@calle.in-berlin.de.
-The command avmcapictrl is part of the isdn4k-utils.
-t4-files can be found at ftp://ftp.avm.de/cardware/b1/linux/firmware
-
-Currently supported cards:
-
-	- B1 ISA (all versions)
-	- B1 PCI
-	- T1/T1B (HEMA card)
-	- M1
-	- M2
-	- B1 PCMCIA
-
-Installing
-----------
-
-You need at least /dev/capi20 to load the firmware.
-
-::
-
-    mknod /dev/capi20 c 68 0
-    mknod /dev/capi20.00 c 68 1
-    mknod /dev/capi20.01 c 68 2
-    .
-    .
-    .
-    mknod /dev/capi20.19 c 68 20
-
-Running
--------
-
-To use the card you need the t4-files to download the firmware.
-AVM GmbH provides several t4-files for the different D-channel
-protocols (b1.t4 for Euro-ISDN). Install these file in /lib/isdn.
-
-if you configure as modules load the modules this way::
-
-    insmod /lib/modules/current/misc/capiutil.o
-    insmod /lib/modules/current/misc/b1.o
-    insmod /lib/modules/current/misc/kernelcapi.o
-    insmod /lib/modules/current/misc/capidrv.o
-    insmod /lib/modules/current/misc/capi.o
-
-if you have an B1-PCI card load the module b1pci.o::
-
-    insmod /lib/modules/current/misc/b1pci.o
-
-and load the firmware with::
-
-    avmcapictrl load /lib/isdn/b1.t4 1
-
-if you have an B1-ISA card load the module b1isa.o
-and add the card by calling::
-
-    avmcapictrl add 0x150 15
-
-and load the firmware by calling::
-
-    avmcapictrl load /lib/isdn/b1.t4 1
-
-if you have an T1-ISA card load the module t1isa.o
-and add the card by calling::
-
-    avmcapictrl add 0x450 15 T1 0
-
-and load the firmware by calling::
-
-    avmcapictrl load /lib/isdn/t1.t4 1
-
-if you have an PCMCIA card (B1/M1/M2) load the module b1pcmcia.o
-before you insert the card.
-
-Leased Lines with B1
---------------------
-
-Init card and load firmware.
-
-For an D64S use "FV: 1" as phone number
-
-For an D64S2 use "FV: 1" and "FV: 2" for multilink
-or "FV: 1,2" to use CAPI channel bundling.
-
-/proc-Interface
------------------
-
-/proc/capi::
-
-  dr-xr-xr-x   2 root     root            0 Jul  1 14:03 .
-  dr-xr-xr-x  82 root     root            0 Jun 30 19:08 ..
-  -r--r--r--   1 root     root            0 Jul  1 14:03 applications
-  -r--r--r--   1 root     root            0 Jul  1 14:03 applstats
-  -r--r--r--   1 root     root            0 Jul  1 14:03 capi20
-  -r--r--r--   1 root     root            0 Jul  1 14:03 capidrv
-  -r--r--r--   1 root     root            0 Jul  1 14:03 controller
-  -r--r--r--   1 root     root            0 Jul  1 14:03 contrstats
-  -r--r--r--   1 root     root            0 Jul  1 14:03 driver
-  -r--r--r--   1 root     root            0 Jul  1 14:03 ncci
-  -r--r--r--   1 root     root            0 Jul  1 14:03 users
-
-/proc/capi/applications:
-   applid level3cnt datablkcnt datablklen ncci-cnt recvqueuelen
-	level3cnt:
-	    capi_register parameter
-	datablkcnt:
-	    capi_register parameter
-	ncci-cnt:
-	    current number of nccis (connections)
-	recvqueuelen:
-	    number of messages on receive queue
-
-   for example::
-
-	1 -2 16 2048 1 0
-	2 2 7 2048 1 0
-
-/proc/capi/applstats:
-   applid recvctlmsg nrecvdatamsg nsentctlmsg nsentdatamsg
-	recvctlmsg:
-	    capi messages received without DATA_B3_IND
-	recvdatamsg:
-	    capi DATA_B3_IND received
-	sentctlmsg:
-	    capi messages sent without DATA_B3_REQ
-	sentdatamsg:
-	    capi DATA_B3_REQ sent
-
-   for example::
-
-	1 2057 1699 1721 1699
-
-/proc/capi/capi20: statistics of capi.o (/dev/capi20)
-    minor nopen nrecvdropmsg nrecvctlmsg nrecvdatamsg sentctlmsg sentdatamsg
-	minor:
-	    minor device number of capi device
-	nopen:
-	    number of calls to devices open
-	nrecvdropmsg:
-	    capi messages dropped (messages in recvqueue in close)
-	nrecvctlmsg:
-	    capi messages received without DATA_B3_IND
-	nrecvdatamsg:
-	    capi DATA_B3_IND received
-	nsentctlmsg:
-	    capi messages sent without DATA_B3_REQ
-	nsentdatamsg:
-	    capi DATA_B3_REQ sent
-
-   for example::
-
-	1 2 18 0 16 2
-
-/proc/capi/capidrv: statistics of capidrv.o (capi messages)
-    nrecvctlmsg nrecvdatamsg sentctlmsg sentdatamsg
-	nrecvctlmsg:
-	    capi messages received without DATA_B3_IND
-	nrecvdatamsg:
-	    capi DATA_B3_IND received
-	nsentctlmsg:
-	    capi messages sent without DATA_B3_REQ
-	nsentdatamsg:
-	    capi DATA_B3_REQ sent
-
-   for example:
-	2780 2226 2256 2226
-
-/proc/capi/controller:
-   controller drivername state cardname   controllerinfo
-
-   for example::
-
-	1 b1pci      running  b1pci-e000       B1 3.07-01 0xe000 19
-	2 t1isa      running  t1isa-450        B1 3.07-01 0x450 11 0
-	3 b1pcmcia   running  m2-150           B1 3.07-01 0x150 5
-
-/proc/capi/contrstats:
-    controller nrecvctlmsg nrecvdatamsg sentctlmsg sentdatamsg
-	nrecvctlmsg:
-	    capi messages received without DATA_B3_IND
-	nrecvdatamsg:
-	    capi DATA_B3_IND received
-	nsentctlmsg:
-	    capi messages sent without DATA_B3_REQ
-	nsentdatamsg:
-	    capi DATA_B3_REQ sent
-
-   for example::
-
-	1 2845 2272 2310 2274
-	2 2 0 2 0
-	3 2 0 2 0
-
-/proc/capi/driver:
-   drivername ncontroller
-
-   for example::
-
-	b1pci                            1
-	t1isa                            1
-	b1pcmcia                         1
-	b1isa                            0
-
-/proc/capi/ncci:
-   apllid ncci winsize sendwindow
-
-   for example::
-
-	1 0x10101 8 0
-
-/proc/capi/users: kernelmodules that use the kernelcapi.
-   name
-
-   for example::
-
-	capidrv
-	capi20
-
-Questions
----------
-
-Check out the FAQ (ftp.isdn4linux.de) or subscribe to the
-linux-avmb1@calle.in-berlin.de mailing list by sending
-a mail to majordomo@calle.in-berlin.de with
-subscribe linux-avmb1
-in the body.
-
-German documentation and several scripts can be found at
-ftp://ftp.avm.de/cardware/b1/linux/
-
-Bugs
-----
-
-If you find any please let me know.
-
-Enjoy,
-
-Carsten Paeth (calle@calle.in-berlin.de)
diff --git a/Documentation/isdn/gigaset.rst b/Documentation/isdn/gigaset.rst
deleted file mode 100644
index 98b4ec5..0000000
--- a/Documentation/isdn/gigaset.rst
+++ /dev/null
@@ -1,465 +0,0 @@
-==========================
-GigaSet 307x Device Driver
-==========================
-
-1.   Requirements
-=================
-
-1.1. Hardware
--------------
-
-     This driver supports the connection of the Gigaset 307x/417x family of
-     ISDN DECT bases via Gigaset M101 Data, Gigaset M105 Data or direct USB
-     connection. The following devices are reported to be compatible:
-
-     Bases:
-       - Siemens Gigaset 3070/3075 isdn
-       - Siemens Gigaset 4170/4175 isdn
-       - Siemens Gigaset SX205/255
-       - Siemens Gigaset SX353
-       - T-Com Sinus 45 [AB] isdn
-       - T-Com Sinus 721X[A] [SE]
-       - Vox Chicago 390 ISDN (KPN Telecom)
-
-     RS232 data boxes:
-       - Siemens Gigaset M101 Data
-       - T-Com Sinus 45 Data 1
-
-     USB data boxes:
-       - Siemens Gigaset M105 Data
-       - Siemens Gigaset USB Adapter DECT
-       - T-Com Sinus 45 Data 2
-       - T-Com Sinus 721 data
-       - Chicago 390 USB (KPN)
-
-     See also http://www.erbze.info/sinus_gigaset.htm
-       (archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm ) and
-	http://gigaset307x.sourceforge.net/
-
-     We had also reports from users of Gigaset M105 who could use the drivers
-     with SX 100 and CX 100 ISDN bases (only in unimodem mode, see section 2.5.)
-     If you have another device that works with our driver, please let us know.
-
-     Chances of getting an USB device to work are good if the output of::
-
-	lsusb
-
-     at the command line contains one of the following::
-
-	ID 0681:0001
-	ID 0681:0002
-	ID 0681:0009
-	ID 0681:0021
-	ID 0681:0022
-
-1.2. Software
--------------
-
-     The driver works with the Kernel CAPI subsystem and can be used with any
-     software which is able to use CAPI 2.0 for ISDN connections (voice or data).
-
-     There are some user space tools available at
-     https://sourceforge.net/projects/gigaset307x/
-     which provide access to additional device specific functions like SMS,
-     phonebook or call journal.
-
-
-2.   How to use the driver
-==========================
-
-2.1. Modules
-------------
-
-     For the devices to work, the proper kernel modules have to be loaded.
-     This normally happens automatically when the system detects the USB
-     device (base, M105) or when the line discipline is attached (M101). It
-     can also be triggered manually using the modprobe(8) command, for example
-     for troubleshooting or to pass module parameters.
-
-     The module ser_gigaset provides a serial line discipline N_GIGASET_M101
-     which uses the regular serial port driver to access the device, and must
-     therefore be attached to the serial device to which the M101 is connected.
-     The ldattach(8) command (included in util-linux-ng release 2.14 or later)
-     can be used for that purpose, for example::
-
-	ldattach GIGASET_M101 /dev/ttyS1
-
-     This will open the device file, attach the line discipline to it, and
-     then sleep in the background, keeping the device open so that the line
-     discipline remains active. To deactivate it, kill the daemon, for example
-     with::
-
-	killall ldattach
-
-     before disconnecting the device. To have this happen automatically at
-     system startup/shutdown on an LSB compatible system, create and activate
-     an appropriate LSB startup script /etc/init.d/gigaset. (The init name
-     'gigaset' is officially assigned to this project by LANANA.)
-     Alternatively, just add the 'ldattach' command line to /etc/rc.local.
-
-     The modules accept the following parameters:
-
-	=============== ========== ==========================================
-	Module		Parameter  Meaning
-
-	gigaset		debug	   debug level (see section 3.2.)
-
-			startmode  initial operation mode (see section 2.5.):
-	bas_gigaset )		   1=CAPI (default), 0=Unimodem
-	ser_gigaset )
-	usb_gigaset )	cidmode    initial Call-ID mode setting (see section
-				   2.5.): 1=on (default), 0=off
-
-	=============== ========== ==========================================
-
-     Depending on your distribution you may want to create a separate module
-     configuration file like /etc/modprobe.d/gigaset.conf for these.
-
-2.2. Device nodes for user space programs
------------------------------------------
-
-     The device can be accessed from user space (eg. by the user space tools
-     mentioned in 1.2.) through the device nodes:
-
-     - /dev/ttyGS0 for M101 (RS232 data boxes)
-     - /dev/ttyGU0 for M105 (USB data boxes)
-     - /dev/ttyGB0 for the base driver (direct USB connection)
-
-     If you connect more than one device of a type, they will get consecutive
-     device nodes, eg. /dev/ttyGU1 for a second M105.
-
-     You can also set a "default device" for the user space tools to use when
-     no device node is given as parameter, by creating a symlink /dev/ttyG to
-     one of them, eg.::
-
-	ln -s /dev/ttyGB0 /dev/ttyG
-
-     The devices accept the following device specific ioctl calls
-     (defined in gigaset_dev.h):
-
-     ``ioctl(int fd, GIGASET_REDIR, int *cmd);``
-
-     If cmd==1, the device is set to be controlled exclusively through the
-     character device node; access from the ISDN subsystem is blocked.
-
-     If cmd==0, the device is set to be used from the ISDN subsystem and does
-     not communicate through the character device node.
-
-     ``ioctl(int fd, GIGASET_CONFIG, int *cmd);``
-
-     (ser_gigaset and usb_gigaset only)
-
-     If cmd==1, the device is set to adapter configuration mode where commands
-     are interpreted by the M10x DECT adapter itself instead of being
-     forwarded to the base station. In this mode, the device accepts the
-     commands described in Siemens document "AT-Kommando Alignment M10x Data"
-     for setting the operation mode, associating with a base station and
-     querying parameters like field strengh and signal quality.
-
-     Note that there is no ioctl command for leaving adapter configuration
-     mode and returning to regular operation. In order to leave adapter
-     configuration mode, write the command ATO to the device.
-
-     ``ioctl(int fd, GIGASET_BRKCHARS, unsigned char brkchars[6]);``
-
-     (usb_gigaset only)
-
-     Set the break characters on an M105's internal serial adapter to the six
-     bytes stored in brkchars[]. Unused bytes should be set to zero.
-
-     ioctl(int fd, GIGASET_VERSION, unsigned version[4]);
-     Retrieve version information from the driver. version[0] must be set to
-     one of:
-
-     - GIGVER_DRIVER: retrieve driver version
-     - GIGVER_COMPAT: retrieve interface compatibility version
-     - GIGVER_FWBASE: retrieve the firmware version of the base
-
-     Upon return, version[] is filled with the requested version information.
-
-2.3. CAPI
----------
-
-     The devices will show up as CAPI controllers as soon as the
-     corresponding driver module is loaded, and can then be used with
-     CAPI 2.0 kernel and user space applications. For user space access,
-     the module capi.ko must be loaded.
-
-     Most distributions handle loading and unloading of the various CAPI
-     modules automatically via the command capiinit(1) from the capi4k-utils
-     package or a similar mechanism. Note that capiinit(1) cannot unload the
-     Gigaset drivers because it doesn't support more than one module per
-     driver.
-
-2.5. Unimodem mode
-------------------
-
-     In this mode the device works like a modem connected to a serial port
-     (the /dev/ttyGU0, ... mentioned above) which understands the commands::
-
-	 ATZ                 init, reset
-	     => OK or ERROR
-	 ATD
-	 ATDT                dial
-	     => OK, CONNECT,
-		BUSY,
-		NO DIAL TONE,
-		NO CARRIER,
-		NO ANSWER
-	 <pause>+++<pause>   change to command mode when connected
-	 ATH                 hangup
-
-     You can use some configuration tool of your distribution to configure this
-     "modem" or configure pppd/wvdial manually. There are some example ppp
-     configuration files and chat scripts in the gigaset-VERSION/ppp directory
-     in the driver packages from https://sourceforge.net/projects/gigaset307x/.
-     Please note that the USB drivers are not able to change the state of the
-     control lines. This means you must use "Stupid Mode" if you are using
-     wvdial or you should use the nocrtscts option of pppd.
-     You must also assure that the ppp_async module is loaded with the parameter
-     flag_time=0. You can do this e.g. by adding a line like::
-
-	options ppp_async flag_time=0
-
-     to an appropriate module configuration file, like::
-
-	/etc/modprobe.d/gigaset.conf.
-
-     Unimodem mode is needed for making some devices [e.g. SX100] work which
-     do not support the regular Gigaset command set. If debug output (see
-     section 3.2.) shows something like this when dialing::
-
-	 CMD Received: ERROR
-	 Available Params: 0
-	 Connection State: 0, Response: -1
-	 gigaset_process_response: resp_code -1 in ConState 0 !
-	 Timeout occurred
-
-     then switching to unimodem mode may help.
-
-     If you have installed the command line tool gigacontr, you can enter
-     unimodem mode using::
-
-	 gigacontr --mode unimodem
-
-     You can switch back using::
-
-	 gigacontr --mode isdn
-
-     You can also put the driver directly into Unimodem mode when it's loaded,
-     by passing the module parameter startmode=0 to the hardware specific
-     module, e.g.::
-
-	modprobe usb_gigaset startmode=0
-
-     or by adding a line like::
-
-	options usb_gigaset startmode=0
-
-     to an appropriate module configuration file, like::
-
-	/etc/modprobe.d/gigaset.conf
-
-2.6. Call-ID (CID) mode
------------------------
-
-     Call-IDs are numbers used to tag commands to, and responses from, the
-     Gigaset base in order to support the simultaneous handling of multiple
-     ISDN calls. Their use can be enabled ("CID mode") or disabled ("Unimodem
-     mode"). Without Call-IDs (in Unimodem mode), only a very limited set of
-     functions is available. It allows outgoing data connections only, but
-     does not signal incoming calls or other base events.
-
-     DECT cordless data devices (M10x) permanently occupy the cordless
-     connection to the base while Call-IDs are activated. As the Gigaset
-     bases only support one DECT data connection at a time, this prevents
-     other DECT cordless data devices from accessing the base.
-
-     During active operation, the driver switches to the necessary mode
-     automatically. However, for the reasons above, the mode chosen when
-     the device is not in use (idle) can be selected by the user.
-
-     - If you want to receive incoming calls, you can use the default
-       settings (CID mode).
-     - If you have several DECT data devices (M10x) which you want to use
-       in turn, select Unimodem mode by passing the parameter "cidmode=0" to
-       the appropriate driver module (ser_gigaset or usb_gigaset).
-
-     If you want both of these at once, you are out of luck.
-
-     You can also use the tty class parameter "cidmode" of the device to
-     change its CID mode while the driver is loaded, eg.::
-
-	echo 0 > /sys/class/tty/ttyGU0/cidmode
-
-2.7. Dialing Numbers
---------------------
-provided by an application for dialing out must
-     be a public network number according to the local dialing plan, without
-     any dial prefix for getting an outside line.
-
-     Internal calls can be made by providing an internal extension number
-     prefixed with ``**`` (two asterisks) as the called party number. So to dial
-     eg. the first registered DECT handset, give ``**11`` as the called party
-     number. Dialing ``***`` (three asterisks) calls all extensions
-     simultaneously (global call).
-
-     Unimodem mode does not support internal calls.
-
-2.8. Unregistered Wireless Devices (M101/M105)
-----------------------------------------------
-
-     The main purpose of the ser_gigaset and usb_gigaset drivers is to allow
-     the M101 and M105 wireless devices to be used as ISDN devices for ISDN
-     connections through a Gigaset base. Therefore they assume that the device
-     is registered to a DECT base.
-
-     If the M101/M105 device is not registered to a base, initialization of
-     the device fails, and a corresponding error message is logged by the
-     driver. In that situation, a restricted set of functions is available
-     which includes, in particular, those necessary for registering the device
-     to a base or for switching it between Fixed Part and Portable Part
-     modes. See the gigacontr(8) manpage for details.
-
-3.   Troubleshooting
-====================
-
-3.1. Solutions to frequently reported problems
-----------------------------------------------
-
-     Problem:
-	You have a slow provider and isdn4linux gives up dialing too early.
-     Solution:
-	Load the isdn module using the dialtimeout option. You can do this e.g.
-	by adding a line like::
-
-	   options isdn dialtimeout=15
-
-	to /etc/modprobe.d/gigaset.conf or a similar file.
-
-     Problem:
-	The isdnlog program emits error messages or just doesn't work.
-     Solution:
-	Isdnlog supports only the HiSax driver. Do not attempt to use it with
-	other drivers such as Gigaset.
-
-     Problem:
-	You have two or more DECT data adapters (M101/M105) and only the
-	first one you turn on works.
-     Solution:
-	Select Unimodem mode for all DECT data adapters. (see section 2.5.)
-
-     Problem:
-	Messages like this::
-
-	    usb_gigaset 3-2:1.0: Could not initialize the device.
-
-	appear in your syslog.
-     Solution:
-	Check whether your M10x wireless device is correctly registered to the
-	Gigaset base. (see section 2.7.)
-
-3.2. Telling the driver to provide more information
----------------------------------------------------
-     Building the driver with the "Gigaset debugging" kernel configuration
-     option (CONFIG_GIGASET_DEBUG) gives it the ability to produce additional
-     information useful for debugging.
-
-     You can control the amount of debugging information the driver produces by
-     writing an appropriate value to /sys/module/gigaset/parameters/debug,
-     e.g.::
-
-	echo 0 > /sys/module/gigaset/parameters/debug
-
-     switches off debugging output completely,
-
-     ::
-
-	echo 0x302020 > /sys/module/gigaset/parameters/debug
-
-     enables a reasonable set of debugging output messages. These values are
-     bit patterns where every bit controls a certain type of debugging output.
-     See the constants DEBUG_* in the source file gigaset.h for details.
-
-     The initial value can be set using the debug parameter when loading the
-     module "gigaset", e.g. by adding a line::
-
-	options gigaset debug=0
-
-     to your module configuration file, eg. /etc/modprobe.d/gigaset.conf
-
-     Generated debugging information can be found
-     - as output of the command::
-
-	 dmesg
-
-     - in system log files written by your syslog daemon, usually
-       in /var/log/, e.g. /var/log/messages.
-
-3.3. Reporting problems and bugs
---------------------------------
-     If you can't solve problems with the driver on your own, feel free to
-     use one of the forums, bug trackers, or mailing lists on
-
-	 https://sourceforge.net/projects/gigaset307x
-
-     or write an electronic mail to the maintainers.
-
-     Try to provide as much information as possible, such as
-
-     - distribution
-     - kernel version (uname -r)
-     - gcc version (gcc --version)
-     - hardware architecture (uname -m, ...)
-     - type and firmware version of your device (base and wireless module,
-       if any)
-     - output of "lsusb -v" (if using an USB device)
-     - error messages
-     - relevant system log messages (it would help if you activate debug
-       output as described in 3.2.)
-
-     For help with general configuration problems not specific to our driver,
-     such as isdn4linux and network configuration issues, please refer to the
-     appropriate forums and newsgroups.
-
-3.4. Reporting problem solutions
---------------------------------
-     If you solved a problem with our drivers, wrote startup scripts for your
-     distribution, ... feel free to contact us (using one of the places
-     mentioned in 3.3.). We'd like to add scripts, hints, documentation
-     to the driver and/or the project web page.
-
-
-4.   Links, other software
-==========================
-
-     - Sourceforge project developing this driver and associated tools
-	 https://sourceforge.net/projects/gigaset307x
-     - Yahoo! Group on the Siemens Gigaset family of devices
-	 https://de.groups.yahoo.com/group/Siemens-Gigaset
-     - Siemens Gigaset/T-Sinus compatibility table
-	 http://www.erbze.info/sinus_gigaset.htm
-	    (archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm )
-
-
-5.   Credits
-============
-
-     Thanks to
-
-     Karsten Keil
-	for his help with isdn4linux
-     Deti Fliegl
-	for his base driver code
-     Dennis Dietrich
-	for his kernel 2.6 patches
-     Andreas Rummel
-	for his work and logs to get unimodem mode working
-     Andreas Degert
-	for his logs and patches to get cx 100 working
-     Dietrich Feist
-	for his generous donation of one M105 and two M101 cordless adapters
-     Christoph Schweers
-	for his generous donation of a M34 device
-
-     and all the other people who sent logs and other information.
diff --git a/Documentation/isdn/hysdn.rst b/Documentation/isdn/hysdn.rst
deleted file mode 100644
index 0a168d1..0000000
--- a/Documentation/isdn/hysdn.rst
+++ /dev/null
@@ -1,196 +0,0 @@
-============
-Hysdn Driver
-============
-
-The hysdn driver has been written by
-Werner Cornelius (werner@isdn4linux.de or werner@titro.de)
-for Hypercope GmbH Aachen Germany. Hypercope agreed to publish this driver
-under the GNU General Public License.
-
-The CAPI 2.0-support was added by Ulrich Albrecht (ualbrecht@hypercope.de)
-for Hypercope GmbH Aachen, Germany.
-
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-.. Table of contents
-
-    1. About the driver
-
-    2. Loading/Unloading the driver
-
-    3. Entries in the /proc filesystem
-
-    4. The /proc/net/hysdn/cardconfX file
-
-    5. The /proc/net/hysdn/cardlogX file
-
-    6. Where to get additional info and help
-
-
-1. About the driver
-===================
-
-   The drivers/isdn/hysdn subdir contains a driver for HYPERCOPEs active
-   PCI isdn cards Champ, Ergo and Metro. To enable support for this cards
-   enable ISDN support in the kernel config and support for HYSDN cards in
-   the active cards submenu. The driver may only be compiled and used if
-   support for loadable modules and the process filesystem have been enabled.
-
-   These cards provide two different interfaces to the kernel. Without the
-   optional CAPI 2.0 support, they register as ethernet card. IP-routing
-   to a ISDN-destination is performed on the card itself. All necessary
-   handlers for various protocols like ppp and others as well as config info
-   and firmware may be fetched from Hypercopes WWW-Site www.hypercope.de.
-
-   With CAPI 2.0 support enabled, the card can also be used as a CAPI 2.0
-   compliant devices with either CAPI 2.0 applications
-   (check isdn4k-utils) or -using the capidrv module- as a regular
-   isdn4linux device. This is done via the same mechanism as with the
-   active AVM cards and in fact uses the same module.
-
-
-2. Loading/Unloading the driver
-===============================
-
-   The module has no command line parameters and auto detects up to 10 cards
-   in the id-range 0-9.
-   If a loaded driver shall be unloaded all open files in the /proc/net/hysdn
-   subdir need to be closed and all ethernet interfaces allocated by this
-   driver must be shut down. Otherwise the module counter will avoid a module
-   unload.
-
-   If you are using the CAPI 2.0-interface, make sure to load/modprobe the
-   kernelcapi-module first.
-
-   If you plan to use the capidrv-link to isdn4linux, make sure to load
-   capidrv.o after all modules using this driver (i.e. after hysdn and
-   any avm-specific modules).
-
-3. Entries in the /proc filesystem
-==================================
-
-   When the module has been loaded it adds the directory hysdn in the
-   /proc/net tree. This directory contains exactly 2 file entries for each
-   card. One is called cardconfX and the other cardlogX, where X is the
-   card id number from 0 to 9.
-   The cards are numbered in the order found in the PCI config data.
-
-4. The /proc/net/hysdn/cardconfX file
-=====================================
-
-   This file may be read to get by everyone to get info about the cards type,
-   actual state, available features and used resources.
-   The first 3 entries (id, bus and slot) are PCI info fields, the following
-   type field gives the information about the cards type:
-
-   - 4 -> Ergo card (server card with 2 b-chans)
-   - 5 -> Metro card (server card with 4 or 8 b-chans)
-   - 6 -> Champ card (client card with 2 b-chans)
-
-   The following 3 fields show the hardware assignments for irq, iobase and the
-   dual ported memory (dp-mem).
-
-   The fields b-chans and fax-chans announce the available card resources of
-   this types for the user.
-
-   The state variable indicates the actual drivers state for this card with the
-   following assignments.
-
-   - 0 -> card has not been booted since driver load
-   - 1 -> card booting is actually in progess
-   - 2 -> card is in an error state due to a previous boot failure
-   - 3 -> card is booted and active
-
-   And the last field (device) shows the name of the ethernet device assigned
-   to this card. Up to the first successful boot this field only shows a -
-   to tell that no net device has been allocated up to now. Once a net device
-   has been allocated it remains assigned to this card, even if a card is
-   rebooted and an boot error occurs.
-
-   Writing to the cardconfX file boots the card or transfers config lines to
-   the cards firmware. The type of data is automatically detected when the
-   first data is written. Only root has write access to this file.
-   The firmware boot files are normally called hyclient.pof for client cards
-   and hyserver.pof for server cards.
-   After successfully writing the boot file, complete config files or single
-   config lines may be copied to this file.
-   If an error occurs the return value given to the writing process has the
-   following additional codes (decimal):
-
-   ==== ============================================
-   1000 Another process is currently bootng the card
-   1001 Invalid firmware header
-   1002 Boards dual-port RAM test failed
-   1003 Internal firmware handler error
-   1004 Boot image size invalid
-   1005 First boot stage (bootstrap loader) failed
-   1006 Second boot stage failure
-   1007 Timeout waiting for card ready during boot
-   1008 Operation only allowed in booted state
-   1009 Config line too long
-   1010 Invalid channel number
-   1011 Timeout sending config data
-   ==== ============================================
-
-   Additional info about error reasons may be fetched from the log output.
-
-5. The /proc/net/hysdn/cardlogX file
-====================================
-
-   The cardlogX file entry may be opened multiple for reading by everyone to
-   get the cards and drivers log data. Card messages always start with the
-   keyword LOG. All other lines are output from the driver.
-   The driver log data may be redirected to the syslog by selecting the
-   appropriate bitmask. The cards log messages will always be send to this
-   interface but never to the syslog.
-
-   A root user may write a decimal or hex (with 0x) value t this file to select
-   desired output options. As mentioned above the cards log dat is always
-   written to the cardlog file independent of the following options only used
-   to check and debug the driver itself:
-
-   For example::
-
-	echo "0x34560078" > /proc/net/hysdn/cardlog0
-
-   to output the hex log mask 34560078 for card 0.
-
-   The written value is regarded as an unsigned 32-Bit value, bit ored for
-   desired output. The following bits are already assigned:
-
-   ==========   ============================================================
-   0x80000000   All driver log data is alternatively via syslog
-   0x00000001   Log memory allocation errors
-   0x00000010   Firmware load start and close are logged
-   0x00000020   Log firmware record parser
-   0x00000040   Log every firmware write actions
-   0x00000080   Log all card related boot messages
-   0x00000100   Output all config data sent for debugging purposes
-   0x00000200   Only non comment config lines are shown wth channel
-   0x00000400   Additional conf log output
-   0x00001000   Log the asynchronous scheduler actions (config and log)
-   0x00100000   Log all open and close actions to /proc/net/hysdn/card files
-   0x00200000   Log all actions from /proc file entries
-   0x00010000   Log network interface init and deinit
-   ==========   ============================================================
-
-6. Where to get additional info and help
-========================================
-
-   If you have any problems concerning the driver or configuration contact
-   the Hypercope support team (support@hypercope.de) and or the authors
-   Werner Cornelius (werner@isdn4linux or cornelius@titro.de) or
-   Ulrich Albrecht (ualbrecht@hypercope.de).
diff --git a/Documentation/isdn/index.rst b/Documentation/isdn/index.rst
index 407e74b7..9622939 100644
--- a/Documentation/isdn/index.rst
+++ b/Documentation/isdn/index.rst
@@ -9,9 +9,6 @@
 
    interface_capi
 
-   avmb1
-   gigaset
-   hysdn
    m_isdn
 
    credits
diff --git a/Documentation/isdn/interface_capi.rst b/Documentation/isdn/interface_capi.rst
index 01a4b5a..fe24214 100644
--- a/Documentation/isdn/interface_capi.rst
+++ b/Documentation/isdn/interface_capi.rst
@@ -26,13 +26,6 @@
 2. Driver and Device Registration
 =================================
 
-CAPI drivers optionally register themselves with Kernel CAPI by calling the
-Kernel CAPI function register_capi_driver() with a pointer to a struct
-capi_driver. This structure must be filled with the name and revision of the
-driver, and optionally a pointer to a callback function, add_card(). The
-registration can be revoked by calling the function unregister_capi_driver()
-with a pointer to the same struct capi_driver.
-
 CAPI drivers must register each of the ISDN devices they control with Kernel
 CAPI by calling the Kernel CAPI function attach_capi_ctr() with a pointer to a
 struct capi_ctr before they can be used. This structure must be filled with
@@ -89,9 +82,6 @@
 	the name of the driver, as a zero-terminated ASCII string
 ``char revision[32]``
 	the revision number of the driver, as a zero-terminated ASCII string
-``int (*add_card)(struct capi_driver *driver, capicardparams *data)``
-	a callback function pointer (may be NULL)
-
 
 4.2 struct capi_ctr
 -------------------
@@ -178,12 +168,6 @@
 	pointer to a callback function returning the entry for the device in
 	the CAPI controller info table, /proc/capi/controller
 
-``const struct file_operations *proc_fops``
-	pointers to callback functions for the device's proc file
-	system entry, /proc/capi/controllers/<n>; pointer to the device's
-	capi_ctr structure is available from struct proc_dir_entry::data
-	which is available from struct inode.
-
 Note:
   Callback functions except send_message() are never called in interrupt
   context.
@@ -267,25 +251,10 @@
 	    _cmsg structure members.
 =========== =================================================================
 
-Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert
-messages between their transport encoding described in the CAPI 2.0 standard
-and their _cmsg structure representation. Note that capi_cmsg2message() does
-not know or check the size of its destination buffer. The caller must make
-sure it is big enough to accommodate the resulting CAPI message.
-
 
 5. Lower Layer Interface Functions
 ==================================
 
-(declared in <linux/isdn/capilli.h>)
-
-::
-
-  void register_capi_driver(struct capi_driver *drvr)
-  void unregister_capi_driver(struct capi_driver *drvr)
-
-register/unregister a driver with Kernel CAPI
-
 ::
 
   int attach_capi_ctr(struct capi_ctr *ctrlr)
@@ -302,13 +271,6 @@
 
 ::
 
-  void capi_ctr_suspend_output(struct capi_ctr *ctrlr)
-  void capi_ctr_resume_output(struct capi_ctr *ctrlr)
-
-signal suspend/resume
-
-::
-
   void capi_ctr_handle_message(struct capi_ctr * ctrlr, u16 applid,
 			       struct sk_buff *skb)
 
@@ -319,21 +281,6 @@
 6. Helper Functions and Macros
 ==============================
 
-Library functions (from <linux/isdn/capilli.h>):
-
-::
-
-  void capilib_new_ncci(struct list_head *head, u16 applid,
-			u32 ncci, u32 winsize)
-  void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci)
-  void capilib_release_appl(struct list_head *head, u16 applid)
-  void capilib_release(struct list_head *head)
-  void capilib_data_b3_conf(struct list_head *head, u16 applid,
-			u32 ncci, u16 msgid)
-  u16  capilib_data_b3_req(struct list_head *head, u16 applid,
-			u32 ncci, u16 msgid)
-
-
 Macros to extract/set element values from/in a CAPI message header
 (from <linux/isdn/capiutil.h>):
 
@@ -357,24 +304,6 @@
 Library functions for working with _cmsg structures
 (from <linux/isdn/capiutil.h>):
 
-``unsigned capi_cmsg2message(_cmsg *cmsg, u8 *msg)``
-	Assembles a CAPI 2.0 message from the parameters in ``*cmsg``,
-	storing the result in ``*msg``.
-
-``unsigned capi_message2cmsg(_cmsg *cmsg, u8 *msg)``
-	Disassembles the CAPI 2.0 message in ``*msg``, storing the parameters
-	in ``*cmsg``.
-
-``unsigned capi_cmsg_header(_cmsg *cmsg, u16 ApplId, u8 Command, u8 Subcommand, u16 Messagenumber, u32 Controller)``
-	Fills the header part and address field of the _cmsg structure ``*cmsg``
-	with the given values, zeroing the remainder of the structure so only
-	parameters with non-default values need to be changed before sending
-	the message.
-
-``void capi_cmsg_answer(_cmsg *cmsg)``
-	Sets the low bit of the Subcommand field in ``*cmsg``, thereby
-	converting ``_REQ`` to ``_CONF`` and ``_IND`` to ``_RESP``.
-
 ``char *capi_cmd2str(u8 Command, u8 Subcommand)``
 	Returns the CAPI 2.0 message name corresponding to the given command
 	and subcommand values, as a static ASCII string. The return value may
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 231e6a6..d0111dd2 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -591,7 +591,8 @@
 Documentation/kbuild/Kconfig.recursion-issue-02.
 
 Below is a list of examples of prior fixes for these types of recursive issues;
-all errors appear to involve one or more select's and one or more "depends on".
+all errors appear to involve one or more "select" statements and one or more
+"depends on".
 
 ============    ===================================
 commit          fix
@@ -653,7 +654,7 @@
 the deduced semantics matches our intended Kconfig design goals.
 
 Having well defined semantics can be useful for tools for practical
-evaluation of depenencies, for instance one such use known case was work to
+evaluation of dependencies, for instance one such case was work to
 express in boolean abstraction of the inferred semantics of Kconfig to
 translate Kconfig logic into boolean formulas and run a SAT solver on this to
 find dead code / features (always inactive), 114 dead features were found in
@@ -680,7 +681,7 @@
 boolean formulas and run a SAT solver on it [5]_. Another known related project
 is CADOS [6]_ (former VAMOS [7]_) and the tools, mainly undertaker [8]_, which
 has been introduced first with [9]_.  The basic concept of undertaker is to
-exract variability models from Kconfig, and put them together with a
+extract variability models from Kconfig and put them together with a
 propositional formula extracted from CPP #ifdefs and build-rules into a SAT
 solver in order to find dead code, dead files, and dead symbols. If using a SAT
 solver is desirable on Kconfig one approach would be to evaluate repurposing
diff --git a/Documentation/kbuild/kconfig.rst b/Documentation/kbuild/kconfig.rst
index a9a855f..dce6801 100644
--- a/Documentation/kbuild/kconfig.rst
+++ b/Documentation/kbuild/kconfig.rst
@@ -154,11 +154,6 @@
 This environment variable can be set to specify the path & name of the
 "auto.conf" file.  Its default value is "include/config/auto.conf".
 
-KCONFIG_TRISTATE
-----------------
-This environment variable can be set to specify the path & name of the
-"tristate.conf" file.  Its default value is "include/config/tristate.conf".
-
 KCONFIG_AUTOHEADER
 ------------------
 This environment variable can be set to specify the path & name of the
diff --git a/Documentation/kernel-hacking/hacking.rst b/Documentation/kernel-hacking/hacking.rst
index a3ddb21..d62aacb 100644
--- a/Documentation/kernel-hacking/hacking.rst
+++ b/Documentation/kernel-hacking/hacking.rst
@@ -601,7 +601,7 @@
 
 This is the variant of `EXPORT_SYMBOL()` that allows specifying a symbol
 namespace. Symbol Namespaces are documented in
-``Documentation/kbuild/namespaces.rst``.
+``Documentation/core-api/symbol-namespaces.rst``.
 
 :c:func:`EXPORT_SYMBOL_NS_GPL()`
 --------------------------------
@@ -610,7 +610,7 @@
 
 This is the variant of `EXPORT_SYMBOL_GPL()` that allows specifying a symbol
 namespace. Symbol Namespaces are documented in
-``Documentation/kbuild/namespaces.rst``.
+``Documentation/core-api/symbol-namespaces.rst``.
 
 Routines and Conventions
 ========================
diff --git a/Documentation/locking/locktorture.rst b/Documentation/locking/locktorture.rst
index e79eeec..5bcb99b 100644
--- a/Documentation/locking/locktorture.rst
+++ b/Documentation/locking/locktorture.rst
@@ -103,8 +103,7 @@
 		  Number of seconds between statistics-related printk()s.
 		  By default, locktorture will report stats every 60 seconds.
 		  Setting the interval to zero causes the statistics to
-		  be printed -only- when the module is unloaded, and this
-		  is the default.
+		  be printed -only- when the module is unloaded.
 
 stutter
 		  The length of time to run the test before pausing for this
diff --git a/Documentation/locking/spinlocks.rst b/Documentation/locking/spinlocks.rst
index 66e3792..bec96f7 100644
--- a/Documentation/locking/spinlocks.rst
+++ b/Documentation/locking/spinlocks.rst
@@ -25,9 +25,9 @@
 
      Documentation/memory-barriers.txt
 
-       (5) LOCK operations.
+       (5) ACQUIRE operations.
 
-       (6) UNLOCK operations.
+       (6) RELEASE operations.
 
 The above is usually pretty simple (you usually need and want only one
 spinlock for most things - using more than one spinlock can make things a
diff --git a/Documentation/maintainer/maintainer-entry-profile.rst b/Documentation/maintainer/maintainer-entry-profile.rst
index 3eaddc8..11ebe36 100644
--- a/Documentation/maintainer/maintainer-entry-profile.rst
+++ b/Documentation/maintainer/maintainer-entry-profile.rst
@@ -99,4 +99,5 @@
 .. toctree::
    :maxdepth: 1
 
+   ../doc-guide/maintainer-profile
    ../nvdimm/maintainer-entry-profile
diff --git a/Documentation/media/kapi/dtv-frontend.rst b/Documentation/media/kapi/dtv-frontend.rst
index fbc5517..b362109 100644
--- a/Documentation/media/kapi/dtv-frontend.rst
+++ b/Documentation/media/kapi/dtv-frontend.rst
@@ -15,8 +15,8 @@
 Demodulator driver
 ^^^^^^^^^^^^^^^^^^
 
-The demodulator driver is responsible to talk with the decoding part of the
-hardware. Such driver should implement :c:type:`dvb_frontend_ops`, with
+The demodulator driver is responsible for talking with the decoding part of the
+hardware. Such driver should implement :c:type:`dvb_frontend_ops`, which
 tells what type of digital TV standards are supported, and points to a
 series of functions that allow the DVB core to command the hardware via
 the code under ``include/media/dvb_frontend.c``.
@@ -120,7 +120,7 @@
 
 .. |delta|   unicode:: U+00394
 
-The ``include/media/dvb_frontend.c`` has a kernel thread with is
+The ``include/media/dvb_frontend.c`` has a kernel thread which is
 responsible for tuning the device. It supports multiple algorithms to
 detect a channel, as defined at enum :c:func:`dvbfe_algo`.
 
@@ -220,11 +220,11 @@
   - As the gain is visible through the set of registers that adjust the gain,
     typically, this statistics is always available [#f3]_.
 
-  - Drivers should try to make it available all the times, as this statistics
+  - Drivers should try to make it available all the times, as these statistics
     can be used when adjusting an antenna position and to check for troubles
     at the cabling.
 
-  .. [#f3] On a few devices, the gain keeps floating if no carrier.
+  .. [#f3] On a few devices, the gain keeps floating if there is no carrier.
      On such devices, strength report should check first if carrier is
      detected at the tuner (``FE_HAS_CARRIER``, see :c:type:`fe_status`),
      and otherwise return the lowest possible value.
@@ -232,7 +232,7 @@
 Carrier Signal to Noise ratio (:ref:`DTV-STAT-CNR`)
   - Signal to Noise ratio for the main carrier.
 
-  - Signal to Noise measurement depends on the device. On some hardware, is
+  - Signal to Noise measurement depends on the device. On some hardware, it is
     available when the main carrier is detected. On those hardware, CNR
     measurement usually comes from the tuner (e. g. after ``FE_HAS_CARRIER``,
     see :c:type:`fe_status`).
@@ -323,8 +323,8 @@
 		.read_status = foo_get_status_and_stats,
 	};
 
-Statistics collect
-^^^^^^^^^^^^^^^^^^
+Statistics collection
+^^^^^^^^^^^^^^^^^^^^^
 
 On almost all frontend hardware, the bit and byte counts are stored by
 the hardware after a certain amount of time or after the total bit/block
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
index d0902f3..2535b77 100644
--- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
@@ -177,7 +177,7 @@
       - ``CEC_MODE_MONITOR``
       - 0xe0
       - Put the file descriptor into monitor mode. Can only be used in
-	combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`,i
+	combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`,
 	otherwise the ``EINVAL`` error code will be returned.
 	In monitor mode all messages this CEC
 	device transmits and all messages it receives (both broadcast
diff --git a/Documentation/media/uapi/dvb/video-get-event.rst b/Documentation/media/uapi/dvb/video-get-event.rst
index def6c40..7f03fbe 100644
--- a/Documentation/media/uapi/dvb/video-get-event.rst
+++ b/Documentation/media/uapi/dvb/video-get-event.rst
@@ -81,7 +81,7 @@
 	#define VIDEO_EVENT_FRAME_RATE_CHANGED	2
 	#define VIDEO_EVENT_DECODER_STOPPED 	3
 	#define VIDEO_EVENT_VSYNC 		4
-		__kernel_time_t timestamp;
+		long timestamp;
 		union {
 			video_size_t size;
 			unsigned int frame_rate;	/* in frames per 1000sec */
diff --git a/Documentation/media/uapi/dvb/video_types.rst b/Documentation/media/uapi/dvb/video_types.rst
index 479942c..2697400 100644
--- a/Documentation/media/uapi/dvb/video_types.rst
+++ b/Documentation/media/uapi/dvb/video_types.rst
@@ -170,7 +170,7 @@
     #define VIDEO_EVENT_FRAME_RATE_CHANGED  2
     #define VIDEO_EVENT_DECODER_STOPPED     3
     #define VIDEO_EVENT_VSYNC       4
-	__kernel_time_t timestamp;
+	long timestamp;
 	union {
 	    video_size_t size;
 	    unsigned int frame_rate;    /* in frames per 1000sec */
diff --git a/Documentation/media/uapi/v4l/pixfmt-reserved.rst b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
index b2cd155..7d98a7b 100644
--- a/Documentation/media/uapi/v4l/pixfmt-reserved.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
@@ -55,8 +55,7 @@
 
       - ``V4L2_PIX_FMT_HM12``
       - 'HM12'
-      - YUV 4:2:0 format used by the IVTV driver,
-	`http://www.ivtvdriver.org/ <http://www.ivtvdriver.org/>`__
+      - YUV 4:2:0 format used by the IVTV driver.
 
 	The format is documented in the kernel sources in the file
 	``Documentation/media/v4l-drivers/cx2341x.rst``
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
index 9608512..045540b 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
@@ -13,7 +13,7 @@
 .. _v4l2-pix-fmt-sgrbg12p:
 
 *******************************************************************************************************************************
-V4L2_PIX_FMT_SRGGB12P ('pRAA'), V4L2_PIX_FMT_SGRBG12P ('pgAA'), V4L2_PIX_FMT_SGBRG12P ('pGAA'), V4L2_PIX_FMT_SBGGR12P ('pBAA'),
+V4L2_PIX_FMT_SRGGB12P ('pRCC'), V4L2_PIX_FMT_SGRBG12P ('pgCC'), V4L2_PIX_FMT_SGBRG12P ('pGCC'), V4L2_PIX_FMT_SBGGR12P ('pBCC'),
 *******************************************************************************************************************************
 
 
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
index 1a988d7e..051ae3d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
@@ -13,7 +13,7 @@
 .. _v4l2-pix-fmt-sgrbg14p:
 
 *******************************************************************************************************************************
-V4L2_PIX_FMT_SRGGB14P ('pRCC'), V4L2_PIX_FMT_SGRBG14P ('pgCC'), V4L2_PIX_FMT_SGBRG14P ('pGCC'), V4L2_PIX_FMT_SBGGR14P ('pBCC'),
+V4L2_PIX_FMT_SRGGB14P ('pREE'), V4L2_PIX_FMT_SGRBG14P ('pgEE'), V4L2_PIX_FMT_SGBRG14P ('pGEE'), V4L2_PIX_FMT_SBGGR14P ('pBEE'),
 *******************************************************************************************************************************
 
 *man V4L2_PIX_FMT_SRGGB14P(2)*
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst b/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst
index 4031b17..6f1be87 100644
--- a/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst
@@ -15,7 +15,7 @@
 
 *man V4L2_TCH_FMT_DELTA_TD16(2)*
 
-16-bit signed Touch Delta
+16-bit signed little endian Touch Delta
 
 
 Description
@@ -37,38 +37,38 @@
     :widths:       2 1 1 1 1 1 1 1 1
 
     * - start + 0:
-      - D'\ :sub:`00high`
       - D'\ :sub:`00low`
-      - D'\ :sub:`01high`
+      - D'\ :sub:`00high`
       - D'\ :sub:`01low`
-      - D'\ :sub:`02high`
+      - D'\ :sub:`01high`
       - D'\ :sub:`02low`
-      - D'\ :sub:`03high`
+      - D'\ :sub:`02high`
       - D'\ :sub:`03low`
+      - D'\ :sub:`03high`
     * - start + 8:
-      - D'\ :sub:`10high`
       - D'\ :sub:`10low`
-      - D'\ :sub:`11high`
+      - D'\ :sub:`10high`
       - D'\ :sub:`11low`
-      - D'\ :sub:`12high`
+      - D'\ :sub:`11high`
       - D'\ :sub:`12low`
-      - D'\ :sub:`13high`
+      - D'\ :sub:`12high`
       - D'\ :sub:`13low`
+      - D'\ :sub:`13high`
     * - start + 16:
-      - D'\ :sub:`20high`
       - D'\ :sub:`20low`
-      - D'\ :sub:`21high`
+      - D'\ :sub:`20high`
       - D'\ :sub:`21low`
-      - D'\ :sub:`22high`
+      - D'\ :sub:`21high`
       - D'\ :sub:`22low`
-      - D'\ :sub:`23high`
+      - D'\ :sub:`22high`
       - D'\ :sub:`23low`
+      - D'\ :sub:`23high`
     * - start + 24:
-      - D'\ :sub:`30high`
       - D'\ :sub:`30low`
-      - D'\ :sub:`31high`
+      - D'\ :sub:`30high`
       - D'\ :sub:`31low`
-      - D'\ :sub:`32high`
+      - D'\ :sub:`31high`
       - D'\ :sub:`32low`
-      - D'\ :sub:`33high`
+      - D'\ :sub:`32high`
       - D'\ :sub:`33low`
+      - D'\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst b/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst
index 8278543..cb3da668 100644
--- a/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst
@@ -15,7 +15,7 @@
 
 *man V4L2_TCH_FMT_TU16(2)*
 
-16-bit unsigned raw touch data
+16-bit unsigned little endian raw touch data
 
 
 Description
@@ -36,38 +36,38 @@
     :widths:       2 1 1 1 1 1 1 1 1
 
     * - start + 0:
-      - R'\ :sub:`00high`
       - R'\ :sub:`00low`
-      - R'\ :sub:`01high`
+      - R'\ :sub:`00high`
       - R'\ :sub:`01low`
-      - R'\ :sub:`02high`
+      - R'\ :sub:`01high`
       - R'\ :sub:`02low`
-      - R'\ :sub:`03high`
+      - R'\ :sub:`02high`
       - R'\ :sub:`03low`
+      - R'\ :sub:`03high`
     * - start + 8:
-      - R'\ :sub:`10high`
       - R'\ :sub:`10low`
-      - R'\ :sub:`11high`
+      - R'\ :sub:`10high`
       - R'\ :sub:`11low`
-      - R'\ :sub:`12high`
+      - R'\ :sub:`11high`
       - R'\ :sub:`12low`
-      - R'\ :sub:`13high`
+      - R'\ :sub:`12high`
       - R'\ :sub:`13low`
+      - R'\ :sub:`13high`
     * - start + 16:
-      - R'\ :sub:`20high`
       - R'\ :sub:`20low`
-      - R'\ :sub:`21high`
+      - R'\ :sub:`20high`
       - R'\ :sub:`21low`
-      - R'\ :sub:`22high`
+      - R'\ :sub:`21high`
       - R'\ :sub:`22low`
-      - R'\ :sub:`23high`
+      - R'\ :sub:`22high`
       - R'\ :sub:`23low`
+      - R'\ :sub:`23high`
     * - start + 24:
-      - R'\ :sub:`30high`
       - R'\ :sub:`30low`
-      - R'\ :sub:`31high`
+      - R'\ :sub:`30high`
       - R'\ :sub:`31low`
-      - R'\ :sub:`32high`
+      - R'\ :sub:`31high`
       - R'\ :sub:`32low`
-      - R'\ :sub:`33high`
+      - R'\ :sub:`32high`
       - R'\ :sub:`33low`
+      - R'\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
index 399ef10..8ca6ab7 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
@@ -44,7 +44,9 @@
 the :ref:`VIDIOC_ENUM_FMT` ioctl with a pointer to this structure. Drivers
 fill the rest of the structure or return an ``EINVAL`` error code. All
 formats are enumerable by beginning at index zero and incrementing by
-one until ``EINVAL`` is returned.
+one until ``EINVAL`` is returned. If applicable, drivers shall return
+formats in preference order, where preferred formats are returned before
+(that is, with lower ``index`` value) less-preferred formats.
 
 .. note::
 
diff --git a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
index 5712bd4..5c675cb 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
@@ -279,7 +279,7 @@
 	then it will set this flag to signal this to the application.
     * - ``V4L2_DV_FL_HALF_LINE``
       - Specific to interlaced formats: if set, then the vertical
-	backporch of field 1 (aka the odd field) is really one half-line
+	frontporch of field 1 (aka the odd field) is really one half-line
 	longer and the vertical backporch of field 2 (aka the even field)
 	is really one half-line shorter, so each field has exactly the
 	same number of half-lines. Whether half-lines can be detected or
diff --git a/Documentation/media/v4l-drivers/cx18.rst b/Documentation/media/v4l-drivers/cx18.rst
deleted file mode 100644
index 16895a7..0000000
--- a/Documentation/media/v4l-drivers/cx18.rst
+++ /dev/null
@@ -1,39 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-The cx18 driver
-===============
-
-.. note::
-
-   This documentation is outdated.
-
-Some notes regarding the cx18 driver for the Conexant CX23418 MPEG
-encoder chip:
-
-1) Currently supported are:
-
-	- Hauppauge HVR-1600
-	- Compro VideoMate H900
-	- Yuan MPC718
-	- Conexant Raptor PAL/SECAM devkit
-
-2) Some people have problems getting the i2c bus to work.
-   The symptom is that the eeprom cannot be read and the card is
-   unusable. This is probably fixed, but if you have problems
-   then post to the video4linux or ivtv-users mailing list.
-
-3) VBI (raw or sliced) has not yet been implemented.
-
-4) MPEG indexing is not yet implemented.
-
-5) The driver is still a bit rough around the edges, this should
-   improve over time.
-
-
-Firmware:
-
-You can obtain the firmware files here:
-
-http://dl.ivtvdriver.org/ivtv/firmware/cx18-firmware.tar.gz
-
-Untar and copy the .fw files to your firmware directory.
diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst
index c4c78a2..b41fea2 100644
--- a/Documentation/media/v4l-drivers/index.rst
+++ b/Documentation/media/v4l-drivers/index.rst
@@ -38,7 +38,6 @@
 	bttv
 	cafe_ccic
 	cpia2
-	cx18
 	cx2341x
 	cx88
 	davinci-vpbe
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index ec3b586..7146da0 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1868,12 +1868,16 @@
  (*) smp_mb__before_atomic();
  (*) smp_mb__after_atomic();
 
-     These are for use with atomic (such as add, subtract, increment and
-     decrement) functions that don't return a value, especially when used for
-     reference counting.  These functions do not imply memory barriers.
+     These are for use with atomic RMW functions that do not imply memory
+     barriers, but where the code needs a memory barrier. Examples for atomic
+     RMW functions that do not imply are memory barrier are e.g. add,
+     subtract, (failed) conditional operations, _relaxed functions,
+     but not atomic_read or atomic_set. A common example where a memory
+     barrier may be required is when atomic ops are used for reference
+     counting.
 
-     These are also used for atomic bitop functions that do not return a
-     value (such as set_bit and clear_bit).
+     These are also used for atomic RMW bitop functions that do not imply a
+     memory barrier (such as set_bit and clear_bit).
 
      As an example, consider a piece of code that marks an object as being dead
      and then decrements the object's reference count:
diff --git a/Documentation/misc-devices/xilinx_sdfec.rst b/Documentation/misc-devices/xilinx_sdfec.rst
index 2245fcf..7a47075 100644
--- a/Documentation/misc-devices/xilinx_sdfec.rst
+++ b/Documentation/misc-devices/xilinx_sdfec.rst
@@ -1,4 +1,5 @@
 .. SPDX-License-Identifier: GPL-2.0+
+
 ====================
 Xilinx SD-FEC Driver
 ====================
diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt
index ca2136c..0bf32d1 100644
--- a/Documentation/networking/nf_flowtable.txt
+++ b/Documentation/networking/nf_flowtable.txt
@@ -76,7 +76,7 @@
 
         table inet x {
 		flowtable f {
-			hook ingress priority 0 devices = { eth0, eth1 };
+			hook ingress priority 0; devices = { eth0, eth1 };
 		}
                 chain y {
                         type filter hook forward priority 0; policy accept;
diff --git a/Documentation/nvdimm/maintainer-entry-profile.rst b/Documentation/nvdimm/maintainer-entry-profile.rst
index 77081fd..efe37ad 100644
--- a/Documentation/nvdimm/maintainer-entry-profile.rst
+++ b/Documentation/nvdimm/maintainer-entry-profile.rst
@@ -33,7 +33,8 @@
 necessarily before initial posting. Contact the list if you need help
 getting the test environment set up.
 
-### ACPI Device Specific Methods (_DSM)
+ACPI Device Specific Methods (_DSM)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Before patches enabling for a new _DSM family will be considered it must
 be assigned a format-interface-code from the NVDIMM Sub-team of the ACPI
 Specification Working Group. In general, the stance of the subsystem is
diff --git a/Documentation/power/interface.rst b/Documentation/power/interface.rst
deleted file mode 100644
index 8d270ed..0000000
--- a/Documentation/power/interface.rst
+++ /dev/null
@@ -1,79 +0,0 @@
-===========================================
-Power Management Interface for System Sleep
-===========================================
-
-Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-
-The power management subsystem provides userspace with a unified sysfs interface
-for system sleep regardless of the underlying system architecture or platform.
-The interface is located in the /sys/power/ directory (assuming that sysfs is
-mounted at /sys).
-
-/sys/power/state is the system sleep state control file.
-
-Reading from it returns a list of supported sleep states, encoded as:
-
-- 'freeze' (Suspend-to-Idle)
-- 'standby' (Power-On Suspend)
-- 'mem' (Suspend-to-RAM)
-- 'disk' (Suspend-to-Disk)
-
-Suspend-to-Idle is always supported.  Suspend-to-Disk is always supported
-too as long the kernel has been configured to support hibernation at all
-(ie. CONFIG_HIBERNATION is set in the kernel configuration file).  Support
-for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the
-platform.
-
-If one of the strings listed in /sys/power/state is written to it, the system
-will attempt to transition into the corresponding sleep state.  Refer to
-Documentation/admin-guide/pm/sleep-states.rst for a description of each of
-those states.
-
-/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
-Specifically, it tells the kernel what to do after creating a hibernation image.
-
-Reading from it returns a list of supported options encoded as:
-
-- 'platform' (put the system into sleep using a platform-provided method)
-- 'shutdown' (shut the system down)
-- 'reboot' (reboot the system)
-- 'suspend' (trigger a Suspend-to-RAM transition)
-- 'test_resume' (resume-after-hibernation test mode)
-
-The currently selected option is printed in square brackets.
-
-The 'platform' option is only available if the platform provides a special
-mechanism to put the system to sleep after creating a hibernation image (ACPI
-does that, for example).  The 'suspend' option is available if Suspend-to-RAM
-is supported.  Refer to Documentation/power/basic-pm-debugging.rst for the
-description of the 'test_resume' option.
-
-To select an option, write the string representing it to /sys/power/disk.
-
-/sys/power/image_size controls the size of hibernation images.
-
-It can be written a string representing a non-negative integer that will be
-used as a best-effort upper limit of the image size, in bytes.  The hibernation
-core will do its best to ensure that the image size will not exceed that number.
-However, if that turns out to be impossible to achieve, a hibernation image will
-still be created and its size will be as small as possible.  In particular,
-writing '0' to this file will enforce hibernation images to be as small as
-possible.
-
-Reading from this file returns the current image size limit, which is set to
-around 2/5 of available RAM by default.
-
-/sys/power/pm_trace controls the PM trace mechanism saving the last suspend
-or resume event point in the RTC across reboots.
-
-It helps to debug hard lockups or reboots due to device driver failures that
-occur during system suspend or resume (which is more common) more effectively.
-
-If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume
-event point in turn will be stored in the RTC memory (overwriting the actual
-RTC information), so it will survive a system crash if one occurs right after
-storing it and it can be used later to identify the driver that caused the crash
-to happen (see Documentation/power/s2ram.rst for more information).
-
-Initially it contains '0' which may be changed to '1' by writing a string
-representing a nonzero integer into it.
diff --git a/Documentation/powerpc/imc.rst b/Documentation/powerpc/imc.rst
new file mode 100644
index 0000000..633bcee
--- /dev/null
+++ b/Documentation/powerpc/imc.rst
@@ -0,0 +1,199 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _imc:
+
+===================================
+IMC (In-Memory Collection Counters)
+===================================
+
+Anju T Sudhakar, 10 May 2019
+
+.. contents::
+    :depth: 3
+
+
+Basic overview
+==============
+
+IMC (In-Memory collection counters) is a hardware monitoring facility that
+collects large numbers of hardware performance events at Nest level (these are
+on-chip but off-core), Core level and Thread level.
+
+The Nest PMU counters are handled by a Nest IMC microcode which runs in the OCC
+(On-Chip Controller) complex. The microcode collects the counter data and moves
+the nest IMC counter data to memory.
+
+The Core and Thread IMC PMU counters are handled in the core. Core level PMU
+counters give us the IMC counters' data per core and thread level PMU counters
+give us the IMC counters' data per CPU thread.
+
+OPAL obtains the IMC PMU and supported events information from the IMC Catalog
+and passes on to the kernel via the device tree. The event's information
+contains:
+
+- Event name
+- Event Offset
+- Event description
+
+and possibly also:
+
+- Event scale
+- Event unit
+
+Some PMUs may have a common scale and unit values for all their supported
+events. For those cases, the scale and unit properties for those events must be
+inherited from the PMU.
+
+The event offset in the memory is where the counter data gets accumulated.
+
+IMC catalog is available at:
+	https://github.com/open-power/ima-catalog
+
+The kernel discovers the IMC counters information in the device tree at the
+`imc-counters` device node which has a compatible field
+`ibm,opal-in-memory-counters`. From the device tree, the kernel parses the PMUs
+and their event's information and register the PMU and its attributes in the
+kernel.
+
+IMC example usage
+=================
+
+.. code-block:: sh
+
+  # perf list
+  [...]
+  nest_mcs01/PM_MCS01_64B_RD_DISP_PORT01/            [Kernel PMU event]
+  nest_mcs01/PM_MCS01_64B_RD_DISP_PORT23/            [Kernel PMU event]
+  [...]
+  core_imc/CPM_0THRD_NON_IDLE_PCYC/                  [Kernel PMU event]
+  core_imc/CPM_1THRD_NON_IDLE_INST/                  [Kernel PMU event]
+  [...]
+  thread_imc/CPM_0THRD_NON_IDLE_PCYC/                [Kernel PMU event]
+  thread_imc/CPM_1THRD_NON_IDLE_INST/                [Kernel PMU event]
+
+To see per chip data for nest_mcs0/PM_MCS_DOWN_128B_DATA_XFER_MC0/:
+
+.. code-block:: sh
+
+  # ./perf stat -e "nest_mcs01/PM_MCS01_64B_WR_DISP_PORT01/" -a --per-socket
+
+To see non-idle instructions for core 0:
+
+.. code-block:: sh
+
+  # ./perf stat -e "core_imc/CPM_NON_IDLE_INST/" -C 0 -I 1000
+
+To see non-idle instructions for a "make":
+
+.. code-block:: sh
+
+  # ./perf stat -e "thread_imc/CPM_NON_IDLE_PCYC/" make
+
+
+IMC Trace-mode
+===============
+
+POWER9 supports two modes for IMC which are the Accumulation mode and Trace
+mode. In Accumulation mode, event counts are accumulated in system Memory.
+Hypervisor then reads the posted counts periodically or when requested. In IMC
+Trace mode, the 64 bit trace SCOM value is initialized with the event
+information. The CPMCxSEL and CPMC_LOAD in the trace SCOM, specifies the event
+to be monitored and the sampling duration. On each overflow in the CPMCxSEL,
+hardware snapshots the program counter along with event counts and writes into
+memory pointed by LDBAR.
+
+LDBAR is a 64 bit special purpose per thread register, it has bits to indicate
+whether hardware is configured for accumulation or trace mode.
+
+LDBAR Register Layout
+---------------------
+
+  +-------+----------------------+
+  | 0     | Enable/Disable       |
+  +-------+----------------------+
+  | 1     | 0: Accumulation Mode |
+  |       +----------------------+
+  |       | 1: Trace Mode        |
+  +-------+----------------------+
+  | 2:3   | Reserved             |
+  +-------+----------------------+
+  | 4-6   | PB scope             |
+  +-------+----------------------+
+  | 7     | Reserved             |
+  +-------+----------------------+
+  | 8:50  | Counter Address      |
+  +-------+----------------------+
+  | 51:63 | Reserved             |
+  +-------+----------------------+
+
+TRACE_IMC_SCOM bit representation
+---------------------------------
+
+  +-------+------------+
+  | 0:1   | SAMPSEL    |
+  +-------+------------+
+  | 2:33  | CPMC_LOAD  |
+  +-------+------------+
+  | 34:40 | CPMC1SEL   |
+  +-------+------------+
+  | 41:47 | CPMC2SEL   |
+  +-------+------------+
+  | 48:50 | BUFFERSIZE |
+  +-------+------------+
+  | 51:63 | RESERVED   |
+  +-------+------------+
+
+CPMC_LOAD contains the sampling duration. SAMPSEL and CPMCxSEL determines the
+event to count. BUFFERSIZE indicates the memory range. On each overflow,
+hardware snapshots the program counter along with event counts and updates the
+memory and reloads the CMPC_LOAD value for the next sampling duration. IMC
+hardware does not support exceptions, so it quietly wraps around if memory
+buffer reaches the end.
+
+*Currently the event monitored for trace-mode is fixed as cycle.*
+
+Trace IMC example usage
+=======================
+
+.. code-block:: sh
+
+  # perf list
+  [....]
+  trace_imc/trace_cycles/                            [Kernel PMU event]
+
+To record an application/process with trace-imc event:
+
+.. code-block:: sh
+
+  # perf record -e trace_imc/trace_cycles/ yes > /dev/null
+  [ perf record: Woken up 1 times to write data ]
+  [ perf record: Captured and wrote 0.012 MB perf.data (21 samples) ]
+
+The `perf.data` generated, can be read using perf report.
+
+Benefits of using IMC trace-mode
+================================
+
+PMI (Performance Monitoring Interrupts) interrupt handling is avoided, since IMC
+trace mode snapshots the program counter and updates to the memory. And this
+also provide a way for the operating system to do instruction sampling in real
+time without PMI processing overhead.
+
+Performance data using `perf top` with and without trace-imc event.
+
+PMI interrupts count when `perf top` command is executed without trace-imc event.
+
+.. code-block:: sh
+
+  # grep PMI /proc/interrupts
+  PMI:          0          0          0          0   Performance monitoring interrupts
+  # ./perf top
+  ...
+  # grep PMI /proc/interrupts
+  PMI:      39735       8710      17338      17801   Performance monitoring interrupts
+  # ./perf top -e trace_imc/trace_cycles/
+  ...
+  # grep PMI /proc/interrupts
+  PMI:      39735       8710      17338      17801   Performance monitoring interrupts
+
+
+That is, the PMI interrupt counts do not increment when using the `trace_imc` event.
diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst
index ba5edb3..0d45f0f 100644
--- a/Documentation/powerpc/index.rst
+++ b/Documentation/powerpc/index.rst
@@ -18,9 +18,11 @@
     elfnote
     firmware-assisted-dump
     hvcs
+    imc
     isa-versions
     kaslr-booke32
     mpc52xx
+    papr_hcalls
     pci_iov_resource_on_powernv
     pmu-ebb
     ptrace
diff --git a/Documentation/powerpc/papr_hcalls.rst b/Documentation/powerpc/papr_hcalls.rst
new file mode 100644
index 0000000..3493631
--- /dev/null
+++ b/Documentation/powerpc/papr_hcalls.rst
@@ -0,0 +1,250 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+Hypercall Op-codes (hcalls)
+===========================
+
+Overview
+=========
+
+Virtualization on 64-bit Power Book3S Platforms is based on the PAPR
+specification [1]_ which describes the run-time environment for a guest
+operating system and how it should interact with the hypervisor for
+privileged operations. Currently there are two PAPR compliant hypervisors:
+
+- **IBM PowerVM (PHYP)**: IBM's proprietary hypervisor that supports AIX,
+  IBM-i and  Linux as supported guests (termed as Logical Partitions
+  or LPARS). It supports the full PAPR specification.
+
+- **Qemu/KVM**: Supports PPC64 linux guests running on a PPC64 linux host.
+  Though it only implements a subset of PAPR specification called LoPAPR [2]_.
+
+On PPC64 arch a guest kernel running on top of a PAPR hypervisor is called
+a *pSeries guest*. A pseries guest runs in a supervisor mode (HV=0) and must
+issue hypercalls to the hypervisor whenever it needs to perform an action
+that is hypervisor priviledged [3]_ or for other services managed by the
+hypervisor.
+
+Hence a Hypercall (hcall) is essentially a request by the pseries guest
+asking hypervisor to perform a privileged operation on behalf of the guest. The
+guest issues a with necessary input operands. The hypervisor after performing
+the privilege operation returns a status code and output operands back to the
+guest.
+
+HCALL ABI
+=========
+The ABI specification for a hcall between a pseries guest and PAPR hypervisor
+is covered in section 14.5.3 of ref [2]_. Switch to the  Hypervisor context is
+done via the instruction **HVCS** that expects the Opcode for hcall is set in *r3*
+and any in-arguments for the hcall are provided in registers *r4-r12*. If values
+have to be passed through a memory buffer, the data stored in that buffer should be
+in Big-endian byte order.
+
+Once control is returns back to the guest after hypervisor has serviced the
+'HVCS' instruction the return value of the hcall is available in *r3* and any
+out values are returned in registers *r4-r12*. Again like in case of in-arguments,
+any out values stored in a memory buffer will be in Big-endian byte order.
+
+Powerpc arch code provides convenient wrappers named **plpar_hcall_xxx** defined
+in a arch specific header [4]_ to issue hcalls from the linux kernel
+running as pseries guest.
+
+Register Conventions
+====================
+
+Any hcall should follow same register convention as described in section 2.2.1.1
+of "64-Bit ELF V2 ABI Specification: Power Architecture"[5]_. Table below
+summarizes these conventions:
+
++----------+----------+-------------------------------------------+
+| Register |Volatile  |  Purpose                                  |
+| Range    |(Y/N)     |                                           |
++==========+==========+===========================================+
+|   r0     |    Y     |  Optional-usage                           |
++----------+----------+-------------------------------------------+
+|   r1     |    N     |  Stack Pointer                            |
++----------+----------+-------------------------------------------+
+|   r2     |    N     |  TOC                                      |
++----------+----------+-------------------------------------------+
+|   r3     |    Y     |  hcall opcode/return value                |
++----------+----------+-------------------------------------------+
+|  r4-r10  |    Y     |  in and out values                        |
++----------+----------+-------------------------------------------+
+|   r11    |    Y     |  Optional-usage/Environmental pointer     |
++----------+----------+-------------------------------------------+
+|   r12    |    Y     |  Optional-usage/Function entry address at |
+|          |          |  global entry point                       |
++----------+----------+-------------------------------------------+
+|   r13    |    N     |  Thread-Pointer                           |
++----------+----------+-------------------------------------------+
+|  r14-r31 |    N     |  Local Variables                          |
++----------+----------+-------------------------------------------+
+|    LR    |    Y     |  Link Register                            |
++----------+----------+-------------------------------------------+
+|   CTR    |    Y     |  Loop Counter                             |
++----------+----------+-------------------------------------------+
+|   XER    |    Y     |  Fixed-point exception register.          |
++----------+----------+-------------------------------------------+
+|  CR0-1   |    Y     |  Condition register fields.               |
++----------+----------+-------------------------------------------+
+|  CR2-4   |    N     |  Condition register fields.               |
++----------+----------+-------------------------------------------+
+|  CR5-7   |    Y     |  Condition register fields.               |
++----------+----------+-------------------------------------------+
+|  Others  |    N     |                                           |
++----------+----------+-------------------------------------------+
+
+DRC & DRC Indexes
+=================
+::
+
+     DR1                                  Guest
+     +--+        +------------+         +---------+
+     |  | <----> |            |         |  User   |
+     +--+  DRC1  |            |   DRC   |  Space  |
+                 |    PAPR    |  Index  +---------+
+     DR2         | Hypervisor |         |         |
+     +--+        |            | <-----> |  Kernel |
+     |  | <----> |            |  Hcall  |         |
+     +--+  DRC2  +------------+         +---------+
+
+PAPR hypervisor terms shared hardware resources like PCI devices, NVDIMMs etc
+available for use by LPARs as Dynamic Resource (DR). When a DR is allocated to
+an LPAR, PHYP creates a data-structure called Dynamic Resource Connector (DRC)
+to manage LPAR access. An LPAR refers to a DRC via an opaque 32-bit number
+called DRC-Index. The DRC-index value is provided to the LPAR via device-tree
+where its present as an attribute in the device tree node associated with the
+DR.
+
+HCALL Return-values
+===================
+
+After servicing the hcall, hypervisor sets the return-value in *r3* indicating
+success or failure of the hcall. In case of a failure an error code indicates
+the cause for error. These codes are defined and documented in arch specific
+header [4]_.
+
+In some cases a hcall can potentially take a long time and need to be issued
+multiple times in order to be completely serviced. These hcalls will usually
+accept an opaque value *continue-token* within there argument list and a
+return value of *H_CONTINUE* indicates that hypervisor hasn't still finished
+servicing the hcall yet.
+
+To make such hcalls the guest need to set *continue-token == 0* for the
+initial call and use the hypervisor returned value of *continue-token*
+for each subsequent hcall until hypervisor returns a non *H_CONTINUE*
+return value.
+
+HCALL Op-codes
+==============
+
+Below is a partial list of HCALLs that are supported by PHYP. For the
+corresponding opcode values please look into the arch specific header [4]_:
+
+**H_SCM_READ_METADATA**
+
+| Input: *drcIndex, offset, buffer-address, numBytesToRead*
+| Out: *numBytesRead*
+| Return Value: *H_Success, H_Parameter, H_P2, H_P3, H_Hardware*
+
+Given a DRC Index of an NVDIMM, read N-bytes from the the metadata area
+associated with it, at a specified offset and copy it to provided buffer.
+The metadata area stores configuration information such as label information,
+bad-blocks etc. The metadata area is located out-of-band of NVDIMM storage
+area hence a separate access semantics is provided.
+
+**H_SCM_WRITE_METADATA**
+
+| Input: *drcIndex, offset, data, numBytesToWrite*
+| Out: *None*
+| Return Value: *H_Success, H_Parameter, H_P2, H_P4, H_Hardware*
+
+Given a DRC Index of an NVDIMM, write N-bytes to the metadata area
+associated with it, at the specified offset and from the provided buffer.
+
+**H_SCM_BIND_MEM**
+
+| Input: *drcIndex, startingScmBlockIndex, numScmBlocksToBind,*
+| *targetLogicalMemoryAddress, continue-token*
+| Out: *continue-token, targetLogicalMemoryAddress, numScmBlocksToBound*
+| Return Value: *H_Success, H_Parameter, H_P2, H_P3, H_P4, H_Overlap,*
+| *H_Too_Big, H_P5, H_Busy*
+
+Given a DRC-Index of an NVDIMM, map a continuous SCM blocks range
+*(startingScmBlockIndex, startingScmBlockIndex+numScmBlocksToBind)* to the guest
+at *targetLogicalMemoryAddress* within guest physical address space. In
+case *targetLogicalMemoryAddress == 0xFFFFFFFF_FFFFFFFF* then hypervisor
+assigns a target address to the guest. The HCALL can fail if the Guest has
+an active PTE entry to the SCM block being bound.
+
+**H_SCM_UNBIND_MEM**
+| Input: drcIndex, startingScmLogicalMemoryAddress, numScmBlocksToUnbind
+| Out: numScmBlocksUnbound
+| Return Value: *H_Success, H_Parameter, H_P2, H_P3, H_In_Use, H_Overlap,*
+| *H_Busy, H_LongBusyOrder1mSec, H_LongBusyOrder10mSec*
+
+Given a DRC-Index of an NVDimm, unmap *numScmBlocksToUnbind* SCM blocks starting
+at *startingScmLogicalMemoryAddress* from guest physical address space. The
+HCALL can fail if the Guest has an active PTE entry to the SCM block being
+unbound.
+
+**H_SCM_QUERY_BLOCK_MEM_BINDING**
+
+| Input: *drcIndex, scmBlockIndex*
+| Out: *Guest-Physical-Address*
+| Return Value: *H_Success, H_Parameter, H_P2, H_NotFound*
+
+Given a DRC-Index and an SCM Block index return the guest physical address to
+which the SCM block is mapped to.
+
+**H_SCM_QUERY_LOGICAL_MEM_BINDING**
+
+| Input: *Guest-Physical-Address*
+| Out: *drcIndex, scmBlockIndex*
+| Return Value: *H_Success, H_Parameter, H_P2, H_NotFound*
+
+Given a guest physical address return which DRC Index and SCM block is mapped
+to that address.
+
+**H_SCM_UNBIND_ALL**
+
+| Input: *scmTargetScope, drcIndex*
+| Out: *None*
+| Return Value: *H_Success, H_Parameter, H_P2, H_P3, H_In_Use, H_Busy,*
+| *H_LongBusyOrder1mSec, H_LongBusyOrder10mSec*
+
+Depending on the Target scope unmap all SCM blocks belonging to all NVDIMMs
+or all SCM blocks belonging to a single NVDIMM identified by its drcIndex
+from the LPAR memory.
+
+**H_SCM_HEALTH**
+
+| Input: drcIndex
+| Out: *health-bitmap, health-bit-valid-bitmap*
+| Return Value: *H_Success, H_Parameter, H_Hardware*
+
+Given a DRC Index return the info on predictive failure and overall health of
+the NVDIMM. The asserted bits in the health-bitmap indicate a single predictive
+failure and health-bit-valid-bitmap indicate which bits in health-bitmap are
+valid.
+
+**H_SCM_PERFORMANCE_STATS**
+
+| Input: drcIndex, resultBuffer Addr
+| Out: None
+| Return Value:  *H_Success, H_Parameter, H_Unsupported, H_Hardware, H_Authority, H_Privilege*
+
+Given a DRC Index collect the performance statistics for NVDIMM and copy them
+to the resultBuffer.
+
+References
+==========
+.. [1] "Power Architecture Platform Reference"
+       https://en.wikipedia.org/wiki/Power_Architecture_Platform_Reference
+.. [2] "Linux on Power Architecture Platform Reference"
+       https://members.openpowerfoundation.org/document/dl/469
+.. [3] "Definitions and Notation" Book III-Section 14.5.3
+       https://openpowerfoundation.org/?resource_lib=power-isa-version-3-0
+.. [4] arch/powerpc/include/asm/hvcall.h
+.. [5] "64-Bit ELF V2 ABI Specification: Power Architecture"
+       https://openpowerfoundation.org/?resource_lib=64-bit-elf-v2-abi-specification-power-architecture
diff --git a/Documentation/powerpc/ultravisor.rst b/Documentation/powerpc/ultravisor.rst
index 730854f..363736d 100644
--- a/Documentation/powerpc/ultravisor.rst
+++ b/Documentation/powerpc/ultravisor.rst
@@ -948,6 +948,66 @@
     up its internal state for this virtual machine.
 
 
+H_SVM_INIT_ABORT
+----------------
+
+    Abort the process of securing an SVM.
+
+Syntax
+~~~~~~
+
+.. code-block:: c
+
+	uint64_t hypercall(const uint64_t H_SVM_INIT_ABORT)
+
+Return values
+~~~~~~~~~~~~~
+
+    One of the following values:
+
+	* H_PARAMETER 		on successfully cleaning up the state,
+				Hypervisor will return this value to the
+				**guest**, to indicate that the underlying
+				UV_ESM ultracall failed.
+
+	* H_STATE		if called after a VM has gone secure (i.e
+				H_SVM_INIT_DONE hypercall was successful).
+
+	* H_UNSUPPORTED		if called from a wrong context (e.g. from a
+				normal VM).
+
+Description
+~~~~~~~~~~~
+
+    Abort the process of securing a virtual machine. This call must
+    be made after a prior call to ``H_SVM_INIT_START`` hypercall and
+    before a call to ``H_SVM_INIT_DONE``.
+
+    On entry into this hypercall the non-volatile GPRs and FPRs are
+    expected to contain the values they had at the time the VM issued
+    the UV_ESM ultracall. Further ``SRR0`` is expected to contain the
+    address of the instruction after the ``UV_ESM`` ultracall and ``SRR1``
+    the MSR value with which to return to the VM.
+
+    This hypercall will cleanup any partial state that was established for
+    the VM since the prior ``H_SVM_INIT_START`` hypercall, including paging
+    out pages that were paged-into secure memory, and issue the
+    ``UV_SVM_TERMINATE`` ultracall to terminate the VM.
+
+    After the partial state is cleaned up, control returns to the VM
+    (**not Ultravisor**), at the address specified in ``SRR0`` with the
+    MSR values set to the value in ``SRR1``.
+
+Use cases
+~~~~~~~~~
+
+    If after a successful call to ``H_SVM_INIT_START``, the Ultravisor
+    encounters an error while securing a virtual machine, either due
+    to lack of resources or because the VM's security information could
+    not be validated, Ultravisor informs the Hypervisor about it.
+    Hypervisor should use this call to clean up any internal state for
+    this virtual machine and return to the VM.
+
 H_SVM_PAGE_IN
 -------------
 
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 2284f22..e478635 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -383,7 +383,8 @@
 E2fsprogs
 ---------
 
-- <http://prdownloads.sourceforge.net/e2fsprogs/e2fsprogs-1.29.tar.gz>
+- <https://www.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/>
+- <https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/>
 
 JFSutils
 --------
@@ -393,12 +394,13 @@
 Reiserfsprogs
 -------------
 
-- <http://www.kernel.org/pub/linux/utils/fs/reiserfs/>
+- <https://git.kernel.org/pub/scm/linux/kernel/git/jeffm/reiserfsprogs.git/>
 
 Xfsprogs
 --------
 
-- <ftp://oss.sgi.com/projects/xfs/>
+- <https://git.kernel.org/pub/scm/fs/xfs/xfsprogs-dev.git>
+- <https://www.kernel.org/pub/linux/utils/fs/xfs/xfsprogs/>
 
 Pcmciautils
 -----------
@@ -437,7 +439,9 @@
 PPP
 ---
 
-- <ftp://ftp.samba.org/pub/ppp/>
+- <https://download.samba.org/pub/ppp/>
+- <https://git.ozlabs.org/?p=ppp.git>
+- <https://github.com/paulusmack/ppp/>
 
 NFS-utils
 ---------
@@ -447,7 +451,7 @@
 Iptables
 --------
 
-- <http://www.iptables.org/downloads.html>
+- <https://netfilter.org/projects/iptables/index.html>
 
 Ip-route2
 ---------
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index 5d54946..33edae6 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -1,3 +1,5 @@
+.. _embargoed_hardware_issues:
+
 Embargoed hardware issues
 =========================
 
@@ -36,7 +38,10 @@
 The list is encrypted and email to the list can be sent by either PGP or
 S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
 certificate. The list's PGP key and S/MIME certificate are available from
-https://www.kernel.org/....
+the following URLs:
+
+  - PGP: https://www.kernel.org/static/files/hardware-security.asc
+  - S/MIME: https://www.kernel.org/static/files/hardware-security.crt
 
 While hardware security issues are often handled by the affected hardware
 vendor, we welcome contact from researchers or individuals who have
@@ -55,14 +60,14 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 The encrypted mailing-lists which are used in our process are hosted on
-Linux Foundation's IT infrastructure. By providing this service Linux
-Foundation's director of IT Infrastructure security technically has the
-ability to access the embargoed information, but is obliged to
-confidentiality by his employment contract. Linux Foundation's director of
-IT Infrastructure security is also responsible for the kernel.org
-infrastructure.
+Linux Foundation's IT infrastructure. By providing this service, members
+of Linux Foundation's IT operations personnel technically have the
+ability to access the embargoed information, but are obliged to
+confidentiality by their employment contract. Linux Foundation IT
+personnel are also responsible for operating and managing the rest of
+kernel.org infrastructure.
 
-The Linux Foundation's current director of IT Infrastructure security is
+The Linux Foundation's current director of IT Project infrastructure is
 Konstantin Ryabitsev.
 
 
@@ -274,7 +279,7 @@
 subscriber with the subscriber's PGP key or S/MIME certificate. Details
 about the mailing-list software and the setup which is used to ensure the
 security of the lists and protection of the data can be found here:
-https://www.kernel.org/....
+https://korg.wiki.kernel.org/userdoc/remail.
 
 List keys
 ^^^^^^^^^
diff --git a/Documentation/riscv/boot-image-header.rst b/Documentation/riscv/boot-image-header.rst
index 518d46d..d775253 100644
--- a/Documentation/riscv/boot-image-header.rst
+++ b/Documentation/riscv/boot-image-header.rst
@@ -22,7 +22,7 @@
 	u64 res2 = 0;		  /* Reserved */
 	u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
 	u32 magic2 = 0x05435352;  /* Magic number 2, little endian, "RSC\x05" */
-	u32 res4;		  /* Reserved for PE COFF offset */
+	u32 res3;		  /* Reserved for PE COFF offset */
 
 This header format is compliant with PE/COFF header and largely inspired from
 ARM64 header. Thus, both ARM64 & RISC-V header can be combined into one common
@@ -34,7 +34,7 @@
 - This header can also be reused to support EFI stub for RISC-V in future. EFI
   specification needs PE/COFF image header in the beginning of the kernel image
   in order to load it as an EFI application. In order to support EFI stub,
-  code0 should be replaced with "MZ" magic string and res5(at offset 0x3c) should
+  code0 should be replaced with "MZ" magic string and res3(at offset 0x3c) should
   point to the rest of the PE/COFF header.
 
 - version field indicate header version number
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
index 5b6119f..b182363 100644
--- a/Documentation/sphinx/automarkup.py
+++ b/Documentation/sphinx/automarkup.py
@@ -5,8 +5,13 @@
 # has been done.
 #
 from docutils import nodes
+import sphinx
 from sphinx import addnodes
-from sphinx.environment import NoUri
+if sphinx.version_info[0] < 2 or \
+   sphinx.version_info[0] == 2 and sphinx.version_info[1] < 1:
+    from sphinx.environment import NoUri
+else:
+    from sphinx.errors import NoUri
 import re
 
 #
diff --git a/Documentation/trace/boottime-trace.rst b/Documentation/trace/boottime-trace.rst
new file mode 100644
index 0000000..dcb3900
--- /dev/null
+++ b/Documentation/trace/boottime-trace.rst
@@ -0,0 +1,184 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Boot-time tracing
+=================
+
+:Author: Masami Hiramatsu <mhiramat@kernel.org>
+
+Overview
+========
+
+Boot-time tracing allows users to trace boot-time process including
+device initialization with full features of ftrace including per-event
+filter and actions, histograms, kprobe-events and synthetic-events,
+and trace instances.
+Since kernel command line is not enough to control these complex features,
+this uses bootconfig file to describe tracing feature programming.
+
+Options in the Boot Config
+==========================
+
+Here is the list of available options list for boot time tracing in
+boot config file [1]_. All options are under "ftrace." or "kernel."
+prefix. See kernel parameters for the options which starts
+with "kernel." prefix [2]_.
+
+.. [1] See :ref:`Documentation/admin-guide/bootconfig.rst <bootconfig>`
+.. [2] See :ref:`Documentation/admin-guide/kernel-parameters.rst <kernelparameters>`
+
+Ftrace Global Options
+---------------------
+
+Ftrace global options have "kernel." prefix in boot config, which means
+these options are passed as a part of kernel legacy command line.
+
+kernel.tp_printk
+   Output trace-event data on printk buffer too.
+
+kernel.dump_on_oops [= MODE]
+   Dump ftrace on Oops. If MODE = 1 or omitted, dump trace buffer
+   on all CPUs. If MODE = 2, dump a buffer on a CPU which kicks Oops.
+
+kernel.traceoff_on_warning
+   Stop tracing if WARN_ON() occurs.
+
+kernel.fgraph_max_depth = MAX_DEPTH
+   Set MAX_DEPTH to maximum depth of fgraph tracer.
+
+kernel.fgraph_filters = FILTER[, FILTER2...]
+   Add fgraph tracing function filters.
+
+kernel.fgraph_notraces = FILTER[, FILTER2...]
+   Add fgraph non-tracing function filters.
+
+
+Ftrace Per-instance Options
+---------------------------
+
+These options can be used for each instance including global ftrace node.
+
+ftrace.[instance.INSTANCE.]options = OPT1[, OPT2[...]]
+   Enable given ftrace options.
+
+ftrace.[instance.INSTANCE.]trace_clock = CLOCK
+   Set given CLOCK to ftrace's trace_clock.
+
+ftrace.[instance.INSTANCE.]buffer_size = SIZE
+   Configure ftrace buffer size to SIZE. You can use "KB" or "MB"
+   for that SIZE.
+
+ftrace.[instance.INSTANCE.]alloc_snapshot
+   Allocate snapshot buffer.
+
+ftrace.[instance.INSTANCE.]cpumask = CPUMASK
+   Set CPUMASK as trace cpu-mask.
+
+ftrace.[instance.INSTANCE.]events = EVENT[, EVENT2[...]]
+   Enable given events on boot. You can use a wild card in EVENT.
+
+ftrace.[instance.INSTANCE.]tracer = TRACER
+   Set TRACER to current tracer on boot. (e.g. function)
+
+ftrace.[instance.INSTANCE.]ftrace.filters
+   This will take an array of tracing function filter rules.
+
+ftrace.[instance.INSTANCE.]ftrace.notraces
+   This will take an array of NON-tracing function filter rules.
+
+
+Ftrace Per-Event Options
+------------------------
+
+These options are setting per-event options.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.enable
+   Enable GROUP:EVENT tracing.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.filter = FILTER
+   Set FILTER rule to the GROUP:EVENT.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.actions = ACTION[, ACTION2[...]]
+   Set ACTIONs to the GROUP:EVENT.
+
+ftrace.[instance.INSTANCE.]event.kprobes.EVENT.probes = PROBE[, PROBE2[...]]
+   Defines new kprobe event based on PROBEs. It is able to define
+   multiple probes on one event, but those must have same type of
+   arguments. This option is available only for the event which
+   group name is "kprobes".
+
+ftrace.[instance.INSTANCE.]event.synthetic.EVENT.fields = FIELD[, FIELD2[...]]
+   Defines new synthetic event with FIELDs. Each field should be
+   "type varname".
+
+Note that kprobe and synthetic event definitions can be written under
+instance node, but those are also visible from other instances. So please
+take care for event name conflict.
+
+
+Examples
+========
+
+For example, to add filter and actions for each event, define kprobe
+events, and synthetic events with histogram, write a boot config like
+below::
+
+  ftrace.event {
+        task.task_newtask {
+                filter = "pid < 128"
+                enable
+        }
+        kprobes.vfs_read {
+                probes = "vfs_read $arg1 $arg2"
+                filter = "common_pid < 200"
+                enable
+        }
+        synthetic.initcall_latency {
+                fields = "unsigned long func", "u64 lat"
+                actions = "hist:keys=func.sym,lat:vals=lat:sort=lat"
+        }
+        initcall.initcall_start {
+                actions = "hist:keys=func:ts0=common_timestamp.usecs"
+        }
+        initcall.initcall_finish {
+                actions = "hist:keys=func:lat=common_timestamp.usecs-$ts0:onmatch(initcall.initcall_start).initcall_latency(func,$lat)"
+        }
+  }
+
+Also, boot-time tracing supports "instance" node, which allows us to run
+several tracers for different purpose at once. For example, one tracer
+is for tracing functions starting with "user\_", and others tracing
+"kernel\_" functions, you can write boot config as below::
+
+  ftrace.instance {
+        foo {
+                tracer = "function"
+                ftrace.filters = "user_*"
+        }
+        bar {
+                tracer = "function"
+                ftrace.filters = "kernel_*"
+        }
+  }
+
+The instance node also accepts event nodes so that each instance
+can customize its event tracing.
+
+This boot-time tracing also supports ftrace kernel parameters via boot
+config.
+For example, following kernel parameters::
+
+ trace_options=sym-addr trace_event=initcall:* tp_printk trace_buf_size=1M ftrace=function ftrace_filter="vfs*"
+
+This can be written in boot config like below::
+
+  kernel {
+        trace_options = sym-addr
+        trace_event = "initcall:*"
+        tp_printk
+        trace_buf_size = 1M
+        ftrace = function
+        ftrace_filter = "vfs*"
+  }
+
+Note that parameters start with "kernel" prefix instead of "ftrace".
diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst
index f7e1fcc..ed79b22 100644
--- a/Documentation/trace/events.rst
+++ b/Documentation/trace/events.rst
@@ -525,3 +525,518 @@
   event counts (hitcount).
 
   See Documentation/trace/histogram.rst for details and examples.
+
+6.3 In-kernel trace event API
+-----------------------------
+
+In most cases, the command-line interface to trace events is more than
+sufficient.  Sometimes, however, applications might find the need for
+more complex relationships than can be expressed through a simple
+series of linked command-line expressions, or putting together sets of
+commands may be simply too cumbersome.  An example might be an
+application that needs to 'listen' to the trace stream in order to
+maintain an in-kernel state machine detecting, for instance, when an
+illegal kernel state occurs in the scheduler.
+
+The trace event subsystem provides an in-kernel API allowing modules
+or other kernel code to generate user-defined 'synthetic' events at
+will, which can be used to either augment the existing trace stream
+and/or signal that a particular important state has occurred.
+
+A similar in-kernel API is also available for creating kprobe and
+kretprobe events.
+
+Both the synthetic event and k/ret/probe event APIs are built on top
+of a lower-level "dynevent_cmd" event command API, which is also
+available for more specialized applications, or as the basis of other
+higher-level trace event APIs.
+
+The API provided for these purposes is describe below and allows the
+following:
+
+  - dynamically creating synthetic event definitions
+  - dynamically creating kprobe and kretprobe event definitions
+  - tracing synthetic events from in-kernel code
+  - the low-level "dynevent_cmd" API
+
+6.3.1 Dyamically creating synthetic event definitions
+-----------------------------------------------------
+
+There are a couple ways to create a new synthetic event from a kernel
+module or other kernel code.
+
+The first creates the event in one step, using synth_event_create().
+In this method, the name of the event to create and an array defining
+the fields is supplied to synth_event_create().  If successful, a
+synthetic event with that name and fields will exist following that
+call.  For example, to create a new "schedtest" synthetic event:
+
+  ret = synth_event_create("schedtest", sched_fields,
+                           ARRAY_SIZE(sched_fields), THIS_MODULE);
+
+The sched_fields param in this example points to an array of struct
+synth_field_desc, each of which describes an event field by type and
+name:
+
+  static struct synth_field_desc sched_fields[] = {
+        { .type = "pid_t",              .name = "next_pid_field" },
+        { .type = "char[16]",           .name = "next_comm_field" },
+        { .type = "u64",                .name = "ts_ns" },
+        { .type = "u64",                .name = "ts_ms" },
+        { .type = "unsigned int",       .name = "cpu" },
+        { .type = "char[64]",           .name = "my_string_field" },
+        { .type = "int",                .name = "my_int_field" },
+  };
+
+See synth_field_size() for available types. If field_name contains [n]
+the field is considered to be an array.
+
+If the event is created from within a module, a pointer to the module
+must be passed to synth_event_create().  This will ensure that the
+trace buffer won't contain unreadable events when the module is
+removed.
+
+At this point, the event object is ready to be used for generating new
+events.
+
+In the second method, the event is created in several steps.  This
+allows events to be created dynamically and without the need to create
+and populate an array of fields beforehand.
+
+To use this method, an empty or partially empty synthetic event should
+first be created using synth_event_gen_cmd_start() or
+synth_event_gen_cmd_array_start().  For synth_event_gen_cmd_start(),
+the name of the event along with one or more pairs of args each pair
+representing a 'type field_name;' field specification should be
+supplied.  For synth_event_gen_cmd_array_start(), the name of the
+event along with an array of struct synth_field_desc should be
+supplied. Before calling synth_event_gen_cmd_start() or
+synth_event_gen_cmd_array_start(), the user should create and
+initialize a dynevent_cmd object using synth_event_cmd_init().
+
+For example, to create a new "schedtest" synthetic event with two
+fields:
+
+  struct dynevent_cmd cmd;
+  char *buf;
+
+  /* Create a buffer to hold the generated command */
+  buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+  /* Before generating the command, initialize the cmd object */
+  synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+  ret = synth_event_gen_cmd_start(&cmd, "schedtest", THIS_MODULE,
+                                  "pid_t", "next_pid_field",
+                                  "u64", "ts_ns");
+
+Alternatively, using an array of struct synth_field_desc fields
+containing the same information:
+
+  ret = synth_event_gen_cmd_array_start(&cmd, "schedtest", THIS_MODULE,
+                                        fields, n_fields);
+
+Once the synthetic event object has been created, it can then be
+populated with more fields.  Fields are added one by one using
+synth_event_add_field(), supplying the dynevent_cmd object, a field
+type, and a field name.  For example, to add a new int field named
+"intfield", the following call should be made:
+
+  ret = synth_event_add_field(&cmd, "int", "intfield");
+
+See synth_field_size() for available types. If field_name contains [n]
+the field is considered to be an array.
+
+A group of fields can also be added all at once using an array of
+synth_field_desc with add_synth_fields().  For example, this would add
+just the first four sched_fields:
+
+  ret = synth_event_add_fields(&cmd, sched_fields, 4);
+
+If you already have a string of the form 'type field_name',
+synth_event_add_field_str() can be used to add it as-is; it will
+also automatically append a ';' to the string.
+
+Once all the fields have been added, the event should be finalized and
+registered by calling the synth_event_gen_cmd_end() function:
+
+  ret = synth_event_gen_cmd_end(&cmd);
+
+At this point, the event object is ready to be used for tracing new
+events.
+
+6.3.3 Tracing synthetic events from in-kernel code
+--------------------------------------------------
+
+To trace a synthetic event, there are several options.  The first
+option is to trace the event in one call, using synth_event_trace()
+with a variable number of values, or synth_event_trace_array() with an
+array of values to be set.  A second option can be used to avoid the
+need for a pre-formed array of values or list of arguments, via
+synth_event_trace_start() and synth_event_trace_end() along with
+synth_event_add_next_val() or synth_event_add_val() to add the values
+piecewise.
+
+6.3.3.1 Tracing a synthetic event all at once
+---------------------------------------------
+
+To trace a synthetic event all at once, the synth_event_trace() or
+synth_event_trace_array() functions can be used.
+
+The synth_event_trace() function is passed the trace_event_file
+representing the synthetic event (which can be retrieved using
+trace_get_event_file() using the synthetic event name, "synthetic" as
+the system name, and the trace instance name (NULL if using the global
+trace array)), along with an variable number of u64 args, one for each
+synthetic event field, and the number of values being passed.
+
+So, to trace an event corresponding to the synthetic event definition
+above, code like the following could be used:
+
+  ret = synth_event_trace(create_synth_test, 7, /* number of values */
+                          444,             /* next_pid_field */
+                          (u64)"clackers", /* next_comm_field */
+                          1000000,         /* ts_ns */
+                          1000,            /* ts_ms */
+                          smp_processor_id(),/* cpu */
+                          (u64)"Thneed",   /* my_string_field */
+                          999);            /* my_int_field */
+
+All vals should be cast to u64, and string vals are just pointers to
+strings, cast to u64.  Strings will be copied into space reserved in
+the event for the string, using these pointers.
+
+Alternatively, the synth_event_trace_array() function can be used to
+accomplish the same thing.  It is passed the trace_event_file
+representing the synthetic event (which can be retrieved using
+trace_get_event_file() using the synthetic event name, "synthetic" as
+the system name, and the trace instance name (NULL if using the global
+trace array)), along with an array of u64, one for each synthetic
+event field.
+
+To trace an event corresponding to the synthetic event definition
+above, code like the following could be used:
+
+  u64 vals[7];
+
+  vals[0] = 777;                  /* next_pid_field */
+  vals[1] = (u64)"tiddlywinks";   /* next_comm_field */
+  vals[2] = 1000000;              /* ts_ns */
+  vals[3] = 1000;                 /* ts_ms */
+  vals[4] = smp_processor_id();   /* cpu */
+  vals[5] = (u64)"thneed";        /* my_string_field */
+  vals[6] = 398;                  /* my_int_field */
+
+The 'vals' array is just an array of u64, the number of which must
+match the number of field in the synthetic event, and which must be in
+the same order as the synthetic event fields.
+
+All vals should be cast to u64, and string vals are just pointers to
+strings, cast to u64.  Strings will be copied into space reserved in
+the event for the string, using these pointers.
+
+In order to trace a synthetic event, a pointer to the trace event file
+is needed.  The trace_get_event_file() function can be used to get
+it - it will find the file in the given trace instance (in this case
+NULL since the top trace array is being used) while at the same time
+preventing the instance containing it from going away:
+
+       schedtest_event_file = trace_get_event_file(NULL, "synthetic",
+                                                   "schedtest");
+
+Before tracing the event, it should be enabled in some way, otherwise
+the synthetic event won't actually show up in the trace buffer.
+
+To enable a synthetic event from the kernel, trace_array_set_clr_event()
+can be used (which is not specific to synthetic events, so does need
+the "synthetic" system name to be specified explicitly).
+
+To enable the event, pass 'true' to it:
+
+       trace_array_set_clr_event(schedtest_event_file->tr,
+                                 "synthetic", "schedtest", true);
+
+To disable it pass false:
+
+       trace_array_set_clr_event(schedtest_event_file->tr,
+                                 "synthetic", "schedtest", false);
+
+Finally, synth_event_trace_array() can be used to actually trace the
+event, which should be visible in the trace buffer afterwards:
+
+       ret = synth_event_trace_array(schedtest_event_file, vals,
+                                     ARRAY_SIZE(vals));
+
+To remove the synthetic event, the event should be disabled, and the
+trace instance should be 'put' back using trace_put_event_file():
+
+       trace_array_set_clr_event(schedtest_event_file->tr,
+                                 "synthetic", "schedtest", false);
+       trace_put_event_file(schedtest_event_file);
+
+If those have been successful, synth_event_delete() can be called to
+remove the event:
+
+       ret = synth_event_delete("schedtest");
+
+6.3.3.1 Tracing a synthetic event piecewise
+-------------------------------------------
+
+To trace a synthetic using the piecewise method described above, the
+synth_event_trace_start() function is used to 'open' the synthetic
+event trace:
+
+       struct synth_trace_state trace_state;
+
+       ret = synth_event_trace_start(schedtest_event_file, &trace_state);
+
+It's passed the trace_event_file representing the synthetic event
+using the same methods as described above, along with a pointer to a
+struct synth_trace_state object, which will be zeroed before use and
+used to maintain state between this and following calls.
+
+Once the event has been opened, which means space for it has been
+reserved in the trace buffer, the individual fields can be set.  There
+are two ways to do that, either one after another for each field in
+the event, which requires no lookups, or by name, which does.  The
+tradeoff is flexibility in doing the assignments vs the cost of a
+lookup per field.
+
+To assign the values one after the other without lookups,
+synth_event_add_next_val() should be used.  Each call is passed the
+same synth_trace_state object used in the synth_event_trace_start(),
+along with the value to set the next field in the event.  After each
+field is set, the 'cursor' points to the next field, which will be set
+by the subsequent call, continuing until all the fields have been set
+in order.  The same sequence of calls as in the above examples using
+this method would be (without error-handling code):
+
+       /* next_pid_field */
+       ret = synth_event_add_next_val(777, &trace_state);
+
+       /* next_comm_field */
+       ret = synth_event_add_next_val((u64)"slinky", &trace_state);
+
+       /* ts_ns */
+       ret = synth_event_add_next_val(1000000, &trace_state);
+
+       /* ts_ms */
+       ret = synth_event_add_next_val(1000, &trace_state);
+
+       /* cpu */
+       ret = synth_event_add_next_val(smp_processor_id(), &trace_state);
+
+       /* my_string_field */
+       ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state);
+
+       /* my_int_field */
+       ret = synth_event_add_next_val(395, &trace_state);
+
+To assign the values in any order, synth_event_add_val() should be
+used.  Each call is passed the same synth_trace_state object used in
+the synth_event_trace_start(), along with the field name of the field
+to set and the value to set it to.  The same sequence of calls as in
+the above examples using this method would be (without error-handling
+code):
+
+       ret = synth_event_add_val("next_pid_field", 777, &trace_state);
+       ret = synth_event_add_val("next_comm_field", (u64)"silly putty",
+                                 &trace_state);
+       ret = synth_event_add_val("ts_ns", 1000000, &trace_state);
+       ret = synth_event_add_val("ts_ms", 1000, &trace_state);
+       ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state);
+       ret = synth_event_add_val("my_string_field", (u64)"thneed_9",
+                                 &trace_state);
+       ret = synth_event_add_val("my_int_field", 3999, &trace_state);
+
+Note that synth_event_add_next_val() and synth_event_add_val() are
+incompatible if used within the same trace of an event - either one
+can be used but not both at the same time.
+
+Finally, the event won't be actually traced until it's 'closed',
+which is done using synth_event_trace_end(), which takes only the
+struct synth_trace_state object used in the previous calls:
+
+       ret = synth_event_trace_end(&trace_state);
+
+Note that synth_event_trace_end() must be called at the end regardless
+of whether any of the add calls failed (say due to a bad field name
+being passed in).
+
+6.3.4 Dyamically creating kprobe and kretprobe event definitions
+----------------------------------------------------------------
+
+To create a kprobe or kretprobe trace event from kernel code, the
+kprobe_event_gen_cmd_start() or kretprobe_event_gen_cmd_start()
+functions can be used.
+
+To create a kprobe event, an empty or partially empty kprobe event
+should first be created using kprobe_event_gen_cmd_start().  The name
+of the event and the probe location should be specfied along with one
+or args each representing a probe field should be supplied to this
+function.  Before calling kprobe_event_gen_cmd_start(), the user
+should create and initialize a dynevent_cmd object using
+kprobe_event_cmd_init().
+
+For example, to create a new "schedtest" kprobe event with two fields:
+
+  struct dynevent_cmd cmd;
+  char *buf;
+
+  /* Create a buffer to hold the generated command */
+  buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+  /* Before generating the command, initialize the cmd object */
+  kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+  /*
+   * Define the gen_kprobe_test event with the first 2 kprobe
+   * fields.
+   */
+  ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test", "do_sys_open",
+                                   "dfd=%ax", "filename=%dx");
+
+Once the kprobe event object has been created, it can then be
+populated with more fields.  Fields can be added using
+kprobe_event_add_fields(), supplying the dynevent_cmd object along
+with a variable arg list of probe fields.  For example, to add a
+couple additional fields, the following call could be made:
+
+  ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)");
+
+Once all the fields have been added, the event should be finalized and
+registered by calling the kprobe_event_gen_cmd_end() or
+kretprobe_event_gen_cmd_end() functions, depending on whether a kprobe
+or kretprobe command was started:
+
+  ret = kprobe_event_gen_cmd_end(&cmd);
+
+or
+
+  ret = kretprobe_event_gen_cmd_end(&cmd);
+
+At this point, the event object is ready to be used for tracing new
+events.
+
+Similarly, a kretprobe event can be created using
+kretprobe_event_gen_cmd_start() with a probe name and location and
+additional params such as $retval:
+
+  ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test",
+                                      "do_sys_open", "$retval");
+
+Similar to the synthetic event case, code like the following can be
+used to enable the newly created kprobe event:
+
+  gen_kprobe_test = trace_get_event_file(NULL, "kprobes", "gen_kprobe_test");
+
+  ret = trace_array_set_clr_event(gen_kprobe_test->tr,
+                                  "kprobes", "gen_kprobe_test", true);
+
+Finally, also similar to synthetic events, the following code can be
+used to give the kprobe event file back and delete the event:
+
+  trace_put_event_file(gen_kprobe_test);
+
+  ret = kprobe_event_delete("gen_kprobe_test");
+
+6.3.4 The "dynevent_cmd" low-level API
+--------------------------------------
+
+Both the in-kernel synthetic event and kprobe interfaces are built on
+top of a lower-level "dynevent_cmd" interface.  This interface is
+meant to provide the basis for higher-level interfaces such as the
+synthetic and kprobe interfaces, which can be used as examples.
+
+The basic idea is simple and amounts to providing a general-purpose
+layer that can be used to generate trace event commands.  The
+generated command strings can then be passed to the command-parsing
+and event creation code that already exists in the trace event
+subystem for creating the corresponding trace events.
+
+In a nutshell, the way it works is that the higher-level interface
+code creates a struct dynevent_cmd object, then uses a couple
+functions, dynevent_arg_add() and dynevent_arg_pair_add() to build up
+a command string, which finally causes the command to be executed
+using the dynevent_create() function.  The details of the interface
+are described below.
+
+The first step in building a new command string is to create and
+initialize an instance of a dynevent_cmd.  Here, for instance, we
+create a dynevent_cmd on the stack and initialize it:
+
+  struct dynevent_cmd cmd;
+  char *buf;
+  int ret;
+
+  buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+  dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_FOO,
+                    foo_event_run_command);
+
+The dynevent_cmd initialization needs to be given a user-specified
+buffer and the length of the buffer (MAX_DYNEVENT_CMD_LEN can be used
+for this purpose - at 2k it's generally too big to be comfortably put
+on the stack, so is dynamically allocated), a dynevent type id, which
+is meant to be used to check that further API calls are for the
+correct command type, and a pointer to an event-specific run_command()
+callback that will be called to actually execute the event-specific
+command function.
+
+Once that's done, the command string can by built up by successive
+calls to argument-adding functions.
+
+To add a single argument, define and initialize a struct dynevent_arg
+or struct dynevent_arg_pair object.  Here's an example of the simplest
+possible arg addition, which is simply to append the given string as
+a whitespace-separated argument to the command:
+
+  struct dynevent_arg arg;
+
+  dynevent_arg_init(&arg, NULL, 0);
+
+  arg.str = name;
+
+  ret = dynevent_arg_add(cmd, &arg);
+
+The arg object is first initialized using dynevent_arg_init() and in
+this case the parameters are NULL or 0, which means there's no
+optional sanity-checking function or separator appended to the end of
+the arg.
+
+Here's another more complicated example using an 'arg pair', which is
+used to create an argument that consists of a couple components added
+together as a unit, for example, a 'type field_name;' arg or a simple
+expression arg e.g. 'flags=%cx':
+
+  struct dynevent_arg_pair arg_pair;
+
+  dynevent_arg_pair_init(&arg_pair, dynevent_foo_check_arg_fn, 0, ';');
+
+  arg_pair.lhs = type;
+  arg_pair.rhs = name;
+
+  ret = dynevent_arg_pair_add(cmd, &arg_pair);
+
+Again, the arg_pair is first initialized, in this case with a callback
+function used to check the sanity of the args (for example, that
+neither part of the pair is NULL), along with a character to be used
+to add an operator between the pair (here none) and a separator to be
+appended onto the end of the arg pair (here ';').
+
+There's also a dynevent_str_add() function that can be used to simply
+add a string as-is, with no spaces, delimeters, or arg check.
+
+Any number of dynevent_*_add() calls can be made to build up the string
+(until its length surpasses cmd->maxlen).  When all the arguments have
+been added and the command string is complete, the only thing left to
+do is run the command, which happens by simply calling
+dynevent_create():
+
+  ret = dynevent_create(&cmd);
+
+At that point, if the return value is 0, the dynamic event has been
+created and is ready to use.
+
+See the dynevent_cmd function definitions themselves for the details
+of the API.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index d2b5657..ff658e2 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -95,7 +95,8 @@
   current_tracer:
 
 	This is used to set or display the current tracer
-	that is configured.
+	that is configured. Changing the current tracer clears
+	the ring buffer content as well as the "snapshot" buffer.
 
   available_tracers:
 
@@ -126,7 +127,8 @@
 	This file holds the output of the trace in a human
 	readable format (described below). Note, tracing is temporarily
 	disabled when the file is open for reading. Once all readers
-	are closed, tracing is re-enabled.
+	are closed, tracing is re-enabled. Opening this file for
+	writing with the O_TRUNC flag clears the ring buffer content.
 
   trace_pipe:
 
@@ -185,7 +187,8 @@
 	CPU buffer and not total size of all buffers. The
 	trace buffers are allocated in pages (blocks of memory
 	that the kernel uses for allocation, usually 4 KB in size).
-	If the last page allocated has room for more bytes
+	A few extra pages may be allocated to accommodate buffer management
+	meta-data. If the last page allocated has room for more bytes
 	than requested, the rest of the page will be used,
 	making the actual allocation bigger than requested or shown.
 	( Note, the size may not be a multiple of the page size
@@ -235,7 +238,7 @@
 	This interface also allows for commands to be used. See the
 	"Filter commands" section for more details.
 
-	As a speed up, since processing strings can't be quite expensive
+	As a speed up, since processing strings can be quite expensive
 	and requires a check of all functions registered to tracing, instead
 	an index can be written into this file. A number (starting with "1")
 	written will instead select the same corresponding at the line position
@@ -382,7 +385,7 @@
 
 	By default, 128 comms are saved (see "saved_cmdlines" above). To
 	increase or decrease the amount of comms that are cached, echo
-	in a the number of comms to cache, into this file.
+	the number of comms to cache into this file.
 
   saved_tgids:
 
@@ -490,6 +493,9 @@
 
 	  # echo global > trace_clock
 
+	Setting a clock clears the ring buffer content as well as the
+	"snapshot" buffer.
+
   trace_marker:
 
 	This is a very useful file for synchronizing user space
@@ -3324,7 +3330,7 @@
 
 As you can see, the new directory looks similar to the tracing directory
 itself. In fact, it is very similar, except that the buffer and
-events are agnostic from the main director, or from any other
+events are agnostic from the main directory, or from any other
 instances that are created.
 
 The files in the new directory work just like the files with the
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index 04acd27..fa9e1c7 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -19,6 +19,7 @@
    events-msr
    mmiotrace
    histogram
+   boottime-trace
    hwlat_detector
    intel_th
    stm
diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
index 5599305..cc4c5fc 100644
--- a/Documentation/trace/kprobetrace.rst
+++ b/Documentation/trace/kprobetrace.rst
@@ -97,6 +97,7 @@
 For $comm, the default type is "string"; any other type is invalid.
 
 .. _user_mem_access:
+
 User Memory Access
 ------------------
 Kprobe events supports user-space memory access. For that purpose, you can use
@@ -252,4 +253,3 @@
 Each line shows when the kernel hits an event, and <- SYMBOL means kernel
 returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel
 returns from do_sys_open to sys_open+0x1b).
-
diff --git a/Documentation/trace/ring-buffer-design.txt b/Documentation/trace/ring-buffer-design.txt
index ff747b6..2d53c6f 100644
--- a/Documentation/trace/ring-buffer-design.txt
+++ b/Documentation/trace/ring-buffer-design.txt
@@ -37,7 +37,7 @@
 
 cmpxchg - hardware-assisted atomic transaction that performs the following:
 
-   A = B iff previous A == C
+   A = B if previous A == C
 
    R = cmpxchg(A, C, B) is saying that we replace A with B if and only if
       current A is equal to C, and we put the old (current) A into R
diff --git a/Documentation/translations/it_IT/process/changes.rst b/Documentation/translations/it_IT/process/changes.rst
index 94a6499..37da444 100644
--- a/Documentation/translations/it_IT/process/changes.rst
+++ b/Documentation/translations/it_IT/process/changes.rst
@@ -390,7 +390,8 @@
 E2fsprogs
 ---------
 
-- <http://prdownloads.sourceforge.net/e2fsprogs/e2fsprogs-1.29.tar.gz>
+- <https://www.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/>
+- <https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/>
 
 JFSutils
 --------
@@ -400,12 +401,13 @@
 Reiserfsprogs
 -------------
 
-- <http://www.kernel.org/pub/linux/utils/fs/reiserfs/>
+- <https://git.kernel.org/pub/scm/linux/kernel/git/jeffm/reiserfsprogs.git/>
 
 Xfsprogs
 --------
 
-- <ftp://oss.sgi.com/projects/xfs/>
+- <https://git.kernel.org/pub/scm/fs/xfs/xfsprogs-dev.git>
+- <https://www.kernel.org/pub/linux/utils/fs/xfs/xfsprogs/>
 
 Pcmciautils
 -----------
@@ -444,7 +446,9 @@
 PPP
 ---
 
-- <ftp://ftp.samba.org/pub/ppp/>
+- <https://download.samba.org/pub/ppp/>
+- <https://git.ozlabs.org/?p=ppp.git>
+- <https://github.com/paulusmack/ppp/>
 
 
 NFS-utils
@@ -455,7 +459,7 @@
 Iptables
 --------
 
-- <http://www.iptables.org/downloads.html>
+- <https://netfilter.org/projects/iptables/index.html>
 
 Ip-route2
 ---------
diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst
index ae3ad89..71d4823 100644
--- a/Documentation/translations/ko_KR/howto.rst
+++ b/Documentation/translations/ko_KR/howto.rst
@@ -318,8 +318,8 @@
 리뷰 프로세스는 patchwork라는 도구를 통해 추적된다. patchwork은 등록된 패치와
 패치에 대한 코멘트, 패치의 버전을 볼 수 있는 웹 인터페이스를 제공하고,
 메인테이너는 패치를 리뷰 중, 리뷰 통과, 또는 반려됨으로 표시할 수 있다.
-대부분의 이러한 patchwork 사이트는 https://patchwork.kernel.org/ 또는
-http://patchwork.ozlabs.org/ 에 나열되어 있다.
+대부분의 이러한 patchwork 사이트는 https://patchwork.kernel.org/ 에 나열되어
+있다.
 
 통합 테스트를 위한 linux-next 커널 트리
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -328,7 +328,7 @@
 거쳐야 한다. 이런 목적으로, 모든 서브시스템 트리의 변경사항을 거의 매일
 받아가는 특수한 테스트 저장소가 존재한다:
 
-       https://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
+       https://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
 
 이런 식으로, linux-next 커널을 통해 다음 머지 기간에 메인라인 커널에 어떤
 변경이 가해질 것인지 간략히 알 수 있다. 모험심 강한 테스터라면 linux-next
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index f07c40a..2e831ec 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -2413,7 +2413,7 @@
 알고 있는, - inb() 나 writel() 과 같은 - 적절한 액세스 루틴을 통해 이루어져야만
 합니다.  이것들은 대부분의 경우에는 명시적 메모리 배리어 와 함께 사용될 필요가
 없습니다만, 완화된 메모리 액세스 속성으로 I/O 메모리 윈도우로의 참조를 위해
-액세스 함수가 사용된다면 순서를 강제하기 위해 _madatory_ 메모리 배리어가
+액세스 함수가 사용된다면 순서를 강제하기 위해 _mandatory_ 메모리 배리어가
 필요합니다.
 
 더 많은 정보를 위해선 Documentation/driver-api/device-io.rst 를 참고하십시오.
@@ -2528,7 +2528,7 @@
 	이것들은 readX() 와 writeX() 랑 비슷하지만, 더 완화된 메모리 순서
 	보장을 제공합니다.  구체적으로, 이것들은 일반적 메모리 액세스나 delay()
 	루프 (예:앞의 2-5 항목) 에 대해 순서를 보장하지 않습니다만 디폴트 I/O
-	기능으로 매핑된 __iomem 포인터에 대해 동작할 때, 같은 CPU 쓰레드에 의해
+	기능으로 매핑된 __iomem 포인터에 대해 동작할 때, 같은 CPU 쓰레드에 의한
 	같은 주변장치로의 액세스에는 순서가 맞춰질 것이 보장됩니다.
 
  (*) readsX(), writesX():
diff --git a/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst b/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst
new file mode 100644
index 0000000..b93f1af
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst
@@ -0,0 +1,228 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/embargoed-hardware-issues.rst <embargoed_hardware_issues>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+被限制的硬件问题
+================
+
+范围
+----
+
+导致安全问题的硬件问题与只影响Linux内核的纯软件错误是不同的安全错误类别。
+
+必须区别对待诸如熔毁(Meltdown)、Spectre、L1TF等硬件问题,因为它们通常会影响
+所有操作系统(“OS”),因此需要在不同的OS供应商、发行版、硬件供应商和其他各方
+之间进行协调。对于某些问题,软件缓解可能依赖于微码或固件更新,这需要进一步的
+协调。
+
+.. _zh_Contact:
+
+接触
+----
+
+Linux内核硬件安全小组独立于普通的Linux内核安全小组。
+
+该小组只负责协调被限制的硬件安全问题。Linux内核中纯软件安全漏洞的报告不由该
+小组处理,报告者将被引导至常规Linux内核安全小组(:ref:`Documentation/admin-guide/
+<securitybugs>`)联系。
+
+可以通过电子邮件 <hardware-security@kernel.org> 与小组联系。这是一份私密的安全
+官名单,他们将帮助您根据我们的文档化流程协调问题。
+
+邮件列表是加密的,发送到列表的电子邮件可以通过PGP或S/MIME加密,并且必须使用报告
+者的PGP密钥或S/MIME证书签名。该列表的PGP密钥和S/MIME证书可从
+https://www.kernel.org/.... 获得。
+
+虽然硬件安全问题通常由受影响的硬件供应商处理,但我们欢迎发现潜在硬件缺陷的研究
+人员或个人与我们联系。
+
+硬件安全官
+^^^^^^^^^^
+
+目前的硬件安全官小组:
+
+  - Linus Torvalds(Linux基金会院士)
+  - Greg Kroah Hartman(Linux基金会院士)
+  - Thomas Gleixner(Linux基金会院士)
+
+邮件列表的操作
+^^^^^^^^^^^^^^
+
+处理流程中使用的加密邮件列表托管在Linux Foundation的IT基础设施上。通过提供这项
+服务,Linux基金会的IT基础设施安全总监在技术上有能力访问被限制的信息,但根据他
+的雇佣合同,他必须保密。Linux基金会的IT基础设施安全总监还负责 kernel.org 基础
+设施。
+
+Linux基金会目前的IT基础设施安全总监是 Konstantin Ryabitsev。
+
+保密协议
+--------
+
+Linux内核硬件安全小组不是正式的机构,因此无法签订任何保密协议。核心社区意识到
+这些问题的敏感性,并提供了一份谅解备忘录。
+
+谅解备忘录
+----------
+
+Linux内核社区深刻理解在不同操作系统供应商、发行商、硬件供应商和其他各方之间
+进行协调时,保持硬件安全问题处于限制状态的要求。
+
+Linux内核社区在过去已经成功地处理了硬件安全问题,并且有必要的机制允许在限制
+限制下进行符合社区的开发。
+
+Linux内核社区有一个专门的硬件安全小组负责初始联系,并监督在限制规则下处理
+此类问题的过程。
+
+硬件安全小组确定开发人员(领域专家),他们将组成特定问题的初始响应小组。最初
+的响应小组可以引入更多的开发人员(领域专家)以最佳的技术方式解决这个问题。
+
+所有相关开发商承诺遵守限制规定,并对收到的信息保密。违反承诺将导致立即从当前
+问题中排除,并从所有相关邮件列表中删除。此外,硬件安全小组还将把违反者排除在
+未来的问题之外。这一后果的影响在我们社区是一种非常有效的威慑。如果发生违规
+情况,硬件安全小组将立即通知相关方。如果您或任何人发现潜在的违规行为,请立即
+向硬件安全人员报告。
+
+流程
+^^^^
+
+由于Linux内核开发的全球分布式特性,面对面的会议几乎不可能解决硬件安全问题。
+由于时区和其他因素,电话会议很难协调,只能在绝对必要时使用。加密电子邮件已被
+证明是解决此类问题的最有效和最安全的通信方法。
+
+开始披露
+""""""""
+
+披露内容首先通过电子邮件联系Linux内核硬件安全小组。此初始联系人应包含问题的
+描述和任何已知受影响硬件的列表。如果您的组织制造或分发受影响的硬件,我们建议
+您也考虑哪些其他硬件可能会受到影响。
+
+硬件安全小组将提供一个特定于事件的加密邮件列表,用于与报告者进行初步讨论、
+进一步披露和协调。
+
+硬件安全小组将向披露方提供一份开发人员(领域专家)名单,在与开发人员确认他们
+将遵守本谅解备忘录和文件化流程后,应首先告知开发人员有关该问题的信息。这些开发
+人员组成初始响应小组,并在初始接触后负责处理问题。硬件安全小组支持响应小组,
+但不一定参与缓解开发过程。
+
+虽然个别开发人员可能通过其雇主受到保密协议的保护,但他们不能以Linux内核开发
+人员的身份签订个别保密协议。但是,他们将同意遵守这一书面程序和谅解备忘录。
+
+披露方应提供已经或应该被告知该问题的所有其他实体的联系人名单。这有几个目的:
+
+  - 披露的实体列表允许跨行业通信,例如其他操作系统供应商、硬件供应商等。
+
+  - 可联系已披露的实体,指定应参与缓解措施开发的专家。
+
+  - 如果需要处理某一问题的专家受雇于某一上市实体或某一上市实体的成员,则响应
+    小组可要求该实体披露该专家。这确保专家也是实体反应小组的一部分。
+
+披露
+""""
+
+披露方通过特定的加密邮件列表向初始响应小组提供详细信息。
+
+根据我们的经验,这些问题的技术文档通常是一个足够的起点,最好通过电子邮件进行
+进一步的技术澄清。
+
+缓解开发
+""""""""
+
+初始响应小组设置加密邮件列表,或在适当的情况下重新修改现有邮件列表。
+
+使用邮件列表接近于正常的Linux开发过程,并且在过去已经成功地用于为各种硬件安全
+问题开发缓解措施。
+
+邮件列表的操作方式与正常的Linux开发相同。发布、讨论和审查修补程序,如果同意,
+则应用于非公共git存储库,参与开发人员只能通过安全连接访问该存储库。存储库包含
+针对主线内核的主开发分支,并根据需要为稳定的内核版本提供向后移植分支。
+
+最初的响应小组将根据需要从Linux内核开发人员社区中确定更多的专家。引进专家可以
+在开发过程中的任何时候发生,需要及时处理。
+
+如果专家受雇于披露方提供的披露清单上的实体或其成员,则相关实体将要求其参与。
+
+否则,披露方将被告知专家参与的情况。谅解备忘录涵盖了专家,要求披露方确认参与。
+如果披露方有令人信服的理由提出异议,则必须在五个工作日内提出异议,并立即与事件
+小组解决。如果披露方在五个工作日内未作出回应,则视为默许。
+
+在确认或解决异议后,专家由事件小组披露,并进入开发过程。
+
+协调发布
+""""""""
+
+有关各方将协商限制结束的日期和时间。此时,准备好的缓解措施集成到相关的内核树中
+并发布。
+
+虽然我们理解硬件安全问题需要协调限制时间,但限制时间应限制在所有有关各方制定、
+测试和准备缓解措施所需的最短时间内。人为地延长限制时间以满足会议讨论日期或其他
+非技术原因,会给相关的开发人员和响应小组带来了更多的工作和负担,因为补丁需要
+保持最新,以便跟踪正在进行的上游内核开发,这可能会造成冲突的更改。
+
+CVE分配
+"""""""
+
+硬件安全小组和初始响应小组都不分配CVE,开发过程也不需要CVE。如果CVE是由披露方
+提供的,则可用于文档中。
+
+流程专使
+--------
+
+为了协助这一进程,我们在各组织设立了专使,他们可以回答有关报告流程和进一步处理
+的问题或提供指导。专使不参与特定问题的披露,除非响应小组或相关披露方提出要求。
+现任专使名单:
+
+  ============= ========================================================
+  ARM
+  AMD		Tom Lendacky <tom.lendacky@amd.com>
+  IBM
+  Intel		Tony Luck <tony.luck@intel.com>
+  Qualcomm	Trilok Soni <tsoni@codeaurora.org>
+
+  Microsoft	Sasha Levin <sashal@kernel.org>
+  VMware
+  Xen		Andrew Cooper <andrew.cooper3@citrix.com>
+
+  Canonical	Tyler Hicks <tyhicks@canonical.com>
+  Debian	Ben Hutchings <ben@decadent.org.uk>
+  Oracle	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+  Red Hat	Josh Poimboeuf <jpoimboe@redhat.com>
+  SUSE		Jiri Kosina <jkosina@suse.cz>
+
+  Amazon
+  Google	Kees Cook <keescook@chromium.org>
+  ============= ========================================================
+
+如果要将您的组织添加到专使名单中,请与硬件安全小组联系。被提名的专使必须完全
+理解和支持我们的过程,并且在Linux内核社区中很容易联系。
+
+加密邮件列表
+------------
+
+我们使用加密邮件列表进行通信。这些列表的工作原理是,发送到列表的电子邮件使用
+列表的PGP密钥或列表的/MIME证书进行加密。邮件列表软件对电子邮件进行解密,并
+使用订阅者的PGP密钥或S/MIME证书为每个订阅者分别对其进行重新加密。有关邮件列表
+软件和用于确保列表安全和数据保护的设置的详细信息,请访问:
+https://www.kernel.org/....
+
+关键点
+^^^^^^
+
+初次接触见 :ref:`zh_Contact`. 对于特定于事件的邮件列表,密钥和S/MIME证书通过
+特定列表发送的电子邮件传递给订阅者。
+
+订阅事件特定列表
+^^^^^^^^^^^^^^^^
+
+订阅由响应小组处理。希望参与通信的披露方将潜在订户的列表发送给响应组,以便
+响应组可以验证订阅请求。
+
+每个订户都需要通过电子邮件向响应小组发送订阅请求。电子邮件必须使用订阅服务器
+的PGP密钥或S/MIME证书签名。如果使用PGP密钥,则必须从公钥服务器获得该密钥,
+并且理想情况下该密钥连接到Linux内核的PGP信任网。另请参见:
+https://www.kernel.org/signature.html.
+
+响应小组验证订阅者,并将订阅者添加到列表中。订阅后,订阅者将收到来自邮件列表
+的电子邮件,该邮件列表使用列表的PGP密钥或列表的/MIME证书签名。订阅者的电子邮件
+客户端可以从签名中提取PGP密钥或S/MIME证书,以便订阅者可以向列表发送加密电子
+邮件。
diff --git a/Documentation/translations/zh_CN/process/index.rst b/Documentation/translations/zh_CN/process/index.rst
index be1e764..8051a7b 100644
--- a/Documentation/translations/zh_CN/process/index.rst
+++ b/Documentation/translations/zh_CN/process/index.rst
@@ -31,6 +31,8 @@
    development-process
    email-clients
    license-rules
+   kernel-enforcement-statement
+   kernel-driver-statement
 
 其它大多数开发人员感兴趣的社区指南:
 
@@ -43,6 +45,7 @@
    stable-api-nonsense
    stable-kernel-rules
    management-style
+   embargoed-hardware-issues
 
 这些是一些总体技术指南,由于缺乏更好的地方,现在已经放在这里
 
diff --git a/Documentation/translations/zh_CN/process/kernel-driver-statement.rst b/Documentation/translations/zh_CN/process/kernel-driver-statement.rst
new file mode 100644
index 0000000..2b3375bc
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/kernel-driver-statement.rst
@@ -0,0 +1,199 @@
+.. _cn_process_statement_driver:
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/kernel-driver-statement.rst <process_statement_driver>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+内核驱动声明
+------------
+
+关于Linux内核模块的立场声明
+===========================
+
+我们,以下署名的Linux内核开发人员,认为任何封闭源Linux内核模块或驱动程序都是
+有害的和不可取的。我们已经一再发现它们对Linux用户,企业和更大的Linux生态系统
+有害。这样的模块否定了Linux开发模型的开放性,稳定性,灵活性和可维护性,并使
+他们的用户无法使用Linux社区的专业知识。提供闭源内核模块的供应商迫使其客户
+放弃Linux的主要优势或选择新的供应商。因此,为了充分利用开源所提供的成本节省和
+共享支持优势,我们敦促供应商采取措施,以开源内核代码在Linux上为其客户提供支持。
+
+我们只为自己说话,而不是我们今天可能会为之工作,过去或将来会为之工作的任何公司。
+
+ - Dave Airlie
+ - Nick Andrew
+ - Jens Axboe
+ - Ralf Baechle
+ - Felipe Balbi
+ - Ohad Ben-Cohen
+ - Muli Ben-Yehuda
+ - Jiri Benc
+ - Arnd Bergmann
+ - Thomas Bogendoerfer
+ - Vitaly Bordug
+ - James Bottomley
+ - Josh Boyer
+ - Neil Brown
+ - Mark Brown
+ - David Brownell
+ - Michael Buesch
+ - Franck Bui-Huu
+ - Adrian Bunk
+ - François Cami
+ - Ralph Campbell
+ - Luiz Fernando N. Capitulino
+ - Mauro Carvalho Chehab
+ - Denis Cheng
+ - Jonathan Corbet
+ - Glauber Costa
+ - Alan Cox
+ - Magnus Damm
+ - Ahmed S. Darwish
+ - Robert P. J. Day
+ - Hans de Goede
+ - Arnaldo Carvalho de Melo
+ - Helge Deller
+ - Jean Delvare
+ - Mathieu Desnoyers
+ - Sven-Thorsten Dietrich
+ - Alexey Dobriyan
+ - Daniel Drake
+ - Alex Dubov
+ - Randy Dunlap
+ - Michael Ellerman
+ - Pekka Enberg
+ - Jan Engelhardt
+ - Mark Fasheh
+ - J. Bruce Fields
+ - Larry Finger
+ - Jeremy Fitzhardinge
+ - Mike Frysinger
+ - Kumar Gala
+ - Robin Getz
+ - Liam Girdwood
+ - Jan-Benedict Glaw
+ - Thomas Gleixner
+ - Brice Goglin
+ - Cyrill Gorcunov
+ - Andy Gospodarek
+ - Thomas Graf
+ - Krzysztof Halasa
+ - Harvey Harrison
+ - Stephen Hemminger
+ - Michael Hennerich
+ - Tejun Heo
+ - Benjamin Herrenschmidt
+ - Kristian Høgsberg
+ - Henrique de Moraes Holschuh
+ - Marcel Holtmann
+ - Mike Isely
+ - Takashi Iwai
+ - Olof Johansson
+ - Dave Jones
+ - Jesper Juhl
+ - Matthias Kaehlcke
+ - Kenji Kaneshige
+ - Jan Kara
+ - Jeremy Kerr
+ - Russell King
+ - Olaf Kirch
+ - Roel Kluin
+ - Hans-Jürgen Koch
+ - Auke Kok
+ - Peter Korsgaard
+ - Jiri Kosina
+ - Aaro Koskinen
+ - Mariusz Kozlowski
+ - Greg Kroah-Hartman
+ - Michael Krufky
+ - Aneesh Kumar
+ - Clemens Ladisch
+ - Christoph Lameter
+ - Gunnar Larisch
+ - Anders Larsen
+ - Grant Likely
+ - John W. Linville
+ - Yinghai Lu
+ - Tony Luck
+ - Pavel Machek
+ - Matt Mackall
+ - Paul Mackerras
+ - Roland McGrath
+ - Patrick McHardy
+ - Kyle McMartin
+ - Paul Menage
+ - Thierry Merle
+ - Eric Miao
+ - Akinobu Mita
+ - Ingo Molnar
+ - James Morris
+ - Andrew Morton
+ - Paul Mundt
+ - Oleg Nesterov
+ - Luca Olivetti
+ - S.Çağlar Onur
+ - Pierre Ossman
+ - Keith Owens
+ - Venkatesh Pallipadi
+ - Nick Piggin
+ - Nicolas Pitre
+ - Evgeniy Polyakov
+ - Richard Purdie
+ - Mike Rapoport
+ - Sam Ravnborg
+ - Gerrit Renker
+ - Stefan Richter
+ - David Rientjes
+ - Luis R. Rodriguez
+ - Stefan Roese
+ - Francois Romieu
+ - Rami Rosen
+ - Stephen Rothwell
+ - Maciej W. Rozycki
+ - Mark Salyzyn
+ - Yoshinori Sato
+ - Deepak Saxena
+ - Holger Schurig
+ - Amit Shah
+ - Yoshihiro Shimoda
+ - Sergei Shtylyov
+ - Kay Sievers
+ - Sebastian Siewior
+ - Rik Snel
+ - Jes Sorensen
+ - Alexey Starikovskiy
+ - Alan Stern
+ - Timur Tabi
+ - Hirokazu Takata
+ - Eliezer Tamir
+ - Eugene Teo
+ - Doug Thompson
+ - FUJITA Tomonori
+ - Dmitry Torokhov
+ - Marcelo Tosatti
+ - Steven Toth
+ - Theodore Tso
+ - Matthias Urlichs
+ - Geert Uytterhoeven
+ - Arjan van de Ven
+ - Ivo van Doorn
+ - Rik van Riel
+ - Wim Van Sebroeck
+ - Hans Verkuil
+ - Horst H. von Brand
+ - Dmitri Vorobiev
+ - Anton Vorontsov
+ - Daniel Walker
+ - Johannes Weiner
+ - Harald Welte
+ - Matthew Wilcox
+ - Dan J. Williams
+ - Darrick J. Wong
+ - David Woodhouse
+ - Chris Wright
+ - Bryan Wu
+ - Rafael J. Wysocki
+ - Herbert Xu
+ - Vlad Yasevich
+ - Peter Zijlstra
+ - Bartlomiej Zolnierkiewicz
diff --git a/Documentation/translations/zh_CN/process/kernel-enforcement-statement.rst b/Documentation/translations/zh_CN/process/kernel-enforcement-statement.rst
new file mode 100644
index 0000000..75f7b7b
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/kernel-enforcement-statement.rst
@@ -0,0 +1,151 @@
+.. _cn_process_statement_kernel:
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/kernel-enforcement-statement.rst <process_statement_kernel>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+Linux 内核执行声明
+------------------
+
+作为Linux内核的开发人员,我们对如何使用我们的软件以及如何实施软件许可证有着
+浓厚的兴趣。遵守GPL-2.0的互惠共享义务对我们软件和社区的长期可持续性至关重要。
+
+虽然有权强制执行对我们社区的贡献中的单独版权权益,但我们有共同的利益,即确保
+个人强制执行行动的方式有利于我们的社区,不会对我们软件生态系统的健康和增长
+产生意外的负面影响。为了阻止无益的执法行动,我们同意代表我们自己和我们版权
+利益的任何继承人对Linux内核用户作出以下符合我们开发社区最大利益的承诺:
+
+    尽管有GPL-2.0的终止条款,我们同意,采用以下GPL-3.0条款作为我们许可证下的
+    附加许可,作为任何对许可证下权利的非防御性主张,这符合我们开发社区的最佳
+    利益。
+
+        但是,如果您停止所有违反本许可证的行为,则您从特定版权持有人处获得的
+        许可证将被恢复:(a)暂时恢复,除非版权持有人明确并最终终止您的许可证;
+        以及(b)永久恢复, 如果版权持有人未能在你终止违反后60天内以合理方式
+        通知您违反本许可证的行为,则永久恢复您的许可证。
+
+        此外,如果版权所有者以某种合理的方式通知您违反了本许可,这是您第一次
+        从该版权所有者处收到违反本许可的通知(对于任何作品),并且您在收到通知
+        后的30天内纠正违规行为。则您从特定版权所有者处获得的许可将永久恢复.
+
+我们提供这些保证的目的是鼓励更多地使用该软件。我们希望公司和个人使用、修改和
+分发此软件。我们希望以公开和透明的方式与用户合作,以消除我们对法规遵从性或强制
+执行的任何不确定性,这些不确定性可能会限制我们软件的采用。我们将法律行动视为
+最后手段,只有在其他社区努力未能解决这一问题时才采取行动。
+
+最后,一旦一个不合规问题得到解决,我们希望用户会感到欢迎,加入我们为之努力的
+这个项目。共同努力,我们会更强大。
+
+除了下面提到的以外,我们只为自己说话,而不是为今天、过去或将来可能为之工作的
+任何公司说话。
+
+  - Laura Abbott
+  - Bjorn Andersson (Linaro)
+  - Andrea Arcangeli
+  - Neil Armstrong
+  - Jens Axboe
+  - Pablo Neira Ayuso
+  - Khalid Aziz
+  - Ralf Baechle
+  - Felipe Balbi
+  - Arnd Bergmann
+  - Ard Biesheuvel
+  - Tim Bird
+  - Paolo Bonzini
+  - Christian Borntraeger
+  - Mark Brown (Linaro)
+  - Paul Burton
+  - Javier Martinez Canillas
+  - Rob Clark
+  - Kees Cook (Google)
+  - Jonathan Corbet
+  - Dennis Dalessandro
+  - Vivien Didelot (Savoir-faire Linux)
+  - Hans de Goede
+  - Mel Gorman (SUSE)
+  - Sven Eckelmann
+  - Alex Elder (Linaro)
+  - Fabio Estevam
+  - Larry Finger
+  - Bhumika Goyal
+  - Andy Gross
+  - Juergen Gross
+  - Shawn Guo
+  - Ulf Hansson
+  - Stephen Hemminger (Microsoft)
+  - Tejun Heo
+  - Rob Herring
+  - Masami Hiramatsu
+  - Michal Hocko
+  - Simon Horman
+  - Johan Hovold (Hovold Consulting AB)
+  - Christophe JAILLET
+  - Olof Johansson
+  - Lee Jones (Linaro)
+  - Heiner Kallweit
+  - Srinivas Kandagatla
+  - Jan Kara
+  - Shuah Khan (Samsung)
+  - David Kershner
+  - Jaegeuk Kim
+  - Namhyung Kim
+  - Colin Ian King
+  - Jeff Kirsher
+  - Greg Kroah-Hartman (Linux Foundation)
+  - Christian König
+  - Vinod Koul
+  - Krzysztof Kozlowski
+  - Viresh Kumar
+  - Aneesh Kumar K.V
+  - Julia Lawall
+  - Doug Ledford
+  - Chuck Lever (Oracle)
+  - Daniel Lezcano
+  - Shaohua Li
+  - Xin Long
+  - Tony Luck
+  - Catalin Marinas (Arm Ltd)
+  - Mike Marshall
+  - Chris Mason
+  - Paul E. McKenney
+  - Arnaldo Carvalho de Melo
+  - David S. Miller
+  - Ingo Molnar
+  - Kuninori Morimoto
+  - Trond Myklebust
+  - Martin K. Petersen (Oracle)
+  - Borislav Petkov
+  - Jiri Pirko
+  - Josh Poimboeuf
+  - Sebastian Reichel (Collabora)
+  - Guenter Roeck
+  - Joerg Roedel
+  - Leon Romanovsky
+  - Steven Rostedt (VMware)
+  - Frank Rowand
+  - Ivan Safonov
+  - Anna Schumaker
+  - Jes Sorensen
+  - K.Y. Srinivasan
+  - David Sterba (SUSE)
+  - Heiko Stuebner
+  - Jiri Kosina (SUSE)
+  - Willy Tarreau
+  - Dmitry Torokhov
+  - Linus Torvalds
+  - Thierry Reding
+  - Rik van Riel
+  - Luis R. Rodriguez
+  - Geert Uytterhoeven (Glider bvba)
+  - Eduardo Valentin (Amazon.com)
+  - Daniel Vetter
+  - Linus Walleij
+  - Richard Weinberger
+  - Dan Williams
+  - Rafael J. Wysocki
+  - Arvind Yadav
+  - Masahiro Yamada
+  - Wei Yongjun
+  - Lv Zheng
+  - Marc Zyngier (Arm Ltd)
diff --git a/Documentation/usb/index.rst b/Documentation/usb/index.rst
index e55386a..36b6ebd 100644
--- a/Documentation/usb/index.rst
+++ b/Documentation/usb/index.rst
@@ -22,11 +22,9 @@
     misc_usbsevseg
     mtouchusb
     ohci
-    rio
     usbip_protocol
     usbmon
     usb-serial
-    wusb-design-overview
 
     usb-help
     text_files
diff --git a/Documentation/usb/text_files.rst b/Documentation/usb/text_files.rst
index 6a8d3fc..1c18c05 100644
--- a/Documentation/usb/text_files.rst
+++ b/Documentation/usb/text_files.rst
@@ -16,12 +16,6 @@
 .. literalinclude:: usbdevfs-drop-permissions.c
     :language: c
 
-WUSB command line script to manipulate auth credentials
--------------------------------------------------------
-
-.. literalinclude:: wusb-cbaf
-   :language: shell
-
 Credits
 -------
 
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 4ef8643..2e91370 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -132,7 +132,6 @@
 'F'   80-8F  linux/arcfb.h                                           conflict!
 'F'   DD     video/sstfb.h                                           conflict!
 'G'   00-3F  drivers/misc/sgi-gru/grulib.h                           conflict!
-'G'   00-0F  linux/gigaset_dev.h                                     conflict!
 'H'   00-7F  linux/hiddev.h                                          conflict!
 'H'   00-0F  linux/hidraw.h                                          conflict!
 'H'   01     linux/mei.h                                             conflict!
diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt
index ebb37b3..c6e1ce5 100644
--- a/Documentation/virt/kvm/api.txt
+++ b/Documentation/virt/kvm/api.txt
@@ -2196,6 +2196,15 @@
 arm64 system registers have the following id bit patterns:
   0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
 
+WARNING:
+     Two system register IDs do not follow the specified pattern.  These
+     are KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT, which map to
+     system registers CNTV_CVAL_EL0 and CNTVCT_EL0 respectively.  These
+     two had their values accidentally swapped, which means TIMER_CVAL is
+     derived from the register encoding for CNTVCT_EL0 and TIMER_CNT is
+     derived from the register encoding for CNTV_CVAL_EL0.  As this is
+     API, it must remain this way.
+
 arm64 firmware pseudo-registers have the following bit pattern:
   0x6030 0000 0014 <regno:16>
 
@@ -4168,6 +4177,42 @@
 unpins the VPA pages and releases all the device pages that are used to
 track the secure pages by hypervisor.
 
+4.122 KVM_S390_NORMAL_RESET
+
+Capability: KVM_CAP_S390_VCPU_RESETS
+Architectures: s390
+Type: vcpu ioctl
+Parameters: none
+Returns: 0
+
+This ioctl resets VCPU registers and control structures according to
+the cpu reset definition in the POP (Principles Of Operation).
+
+4.123 KVM_S390_INITIAL_RESET
+
+Capability: none
+Architectures: s390
+Type: vcpu ioctl
+Parameters: none
+Returns: 0
+
+This ioctl resets VCPU registers and control structures according to
+the initial cpu reset definition in the POP. However, the cpu is not
+put into ESA mode. This reset is a superset of the normal reset.
+
+4.124 KVM_S390_CLEAR_RESET
+
+Capability: KVM_CAP_S390_VCPU_RESETS
+Architectures: s390
+Type: vcpu ioctl
+Parameters: none
+Returns: 0
+
+This ioctl resets VCPU registers and control structures according to
+the clear cpu reset definition in the POP. However, the cpu is not put
+into ESA mode. This reset is a superset of the initial reset.
+
+
 5. The kvm_run structure
 ------------------------
 
@@ -5396,3 +5441,10 @@
 flush hypercalls by Hyper-V) so userspace should disable KVM identification
 in CPUID and only exposes Hyper-V identification. In this case, guest
 thinks it's running on Hyper-V and only use Hyper-V hypercalls.
+
+8.22 KVM_CAP_S390_VCPU_RESETS
+
+Architectures: s390
+
+This capability indicates that the KVM_S390_NORMAL_RESET and
+KVM_S390_CLEAR_RESET ioctls are available.
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 893a8ba0..95fec59 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -149,14 +149,14 @@
 device driver that wants to mirror a process address space must start with the
 registration of a mmu_interval_notifier::
 
- mni->ops = &driver_ops;
- int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
-			          unsigned long start, unsigned long length,
-			          struct mm_struct *mm);
+ int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
+				  struct mm_struct *mm, unsigned long start,
+				  unsigned long length,
+				  const struct mmu_interval_notifier_ops *ops);
 
-During the driver_ops->invalidate() callback the device driver must perform
-the update action to the range (mark range read only, or fully unmap,
-etc.). The device must complete the update before the driver callback returns.
+During the ops->invalidate() callback the device driver must perform the
+update action to the range (mark range read only, or fully unmap, etc.). The
+device must complete the update before the driver callback returns.
 
 When the device driver wants to populate a range of virtual addresses, it can
 use::
@@ -183,7 +183,7 @@
       struct hmm_range range;
       ...
 
-      range.notifier = &mni;
+      range.notifier = &interval_sub;
       range.start = ...;
       range.end = ...;
       range.pfns = ...;
@@ -191,11 +191,11 @@
       range.values = ...;
       range.pfn_shift = ...;
 
-      if (!mmget_not_zero(mni->notifier.mm))
+      if (!mmget_not_zero(interval_sub->notifier.mm))
           return -EFAULT;
 
  again:
-      range.notifier_seq = mmu_interval_read_begin(&mni);
+      range.notifier_seq = mmu_interval_read_begin(&interval_sub);
       down_read(&mm->mmap_sem);
       ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
       if (ret) {
diff --git a/Documentation/vm/zswap.rst b/Documentation/vm/zswap.rst
index 1444ecd..61f6185 100644
--- a/Documentation/vm/zswap.rst
+++ b/Documentation/vm/zswap.rst
@@ -130,6 +130,19 @@
 existing pages which are marked as same-value filled pages remain stored
 unchanged in zswap until they are either loaded or invalidated.
 
+To prevent zswap from shrinking pool when zswap is full and there's a high
+pressure on swap (this will result in flipping pages in and out zswap pool
+without any real benefit but with a performance drop for the system), a
+special parameter has been introduced to implement a sort of hysteresis to
+refuse taking pages into zswap pool until it has sufficient space if the limit
+has been hit. To set the threshold at which zswap would start accepting pages
+again after it became full, use the sysfs ``accept_threhsold_percent``
+attribute, e. g.::
+
+	echo 80 > /sys/module/zswap/parameters/accept_threhsold_percent
+
+Setting this parameter to 100 will disable the hysteresis.
+
 A debugfs interface is provided for various statistic about pool size, number
 of pages stored, same-value filled pages and various counters for the reasons
 pages are rejected.
diff --git a/Documentation/w1/masters/omap-hdq.rst b/Documentation/w1/masters/omap-hdq.rst
index 345298a..5347b5d 100644
--- a/Documentation/w1/masters/omap-hdq.rst
+++ b/Documentation/w1/masters/omap-hdq.rst
@@ -44,7 +44,7 @@
 e.g::
 
   insmod omap_hdq.ko W1_ID=2
-  inamod w1_bq27000.ko F_ID=2
+  insmod w1_bq27000.ko F_ID=2
 
 The driver also supports 1-wire mode. In this mode, there is no need to
 pass slave ID as parameter. The driver will auto-detect slaves connected
diff --git a/Documentation/x86/boot.rst b/Documentation/x86/boot.rst
index 692ce57..c9c2015 100644
--- a/Documentation/x86/boot.rst
+++ b/Documentation/x86/boot.rst
@@ -69,11 +69,12 @@
 		xloadflags to support booting a 64-bit kernel from 32-bit
 		EFI
 
-Protocol 2.14:	BURNT BY INCORRECT COMMIT ae7e1238e68f2a472a125673ab506d49158c1889
+Protocol 2.14	BURNT BY INCORRECT COMMIT
+                ae7e1238e68f2a472a125673ab506d49158c1889
 		(x86/boot: Add ACPI RSDP address to setup_header)
 		DO NOT USE!!! ASSUME SAME AS 2.13.
 
-Protocol 2.15:	(Kernel 5.5) Added the kernel_info and kernel_info.setup_type_max.
+Protocol 2.15	(Kernel 5.5) Added the kernel_info and kernel_info.setup_type_max.
 =============	============================================================
 
 .. note::
@@ -834,14 +835,14 @@
   chunks of memory are occupied by kernel data.
 
   Thus setup_indirect struct and SETUP_INDIRECT type were introduced in
-  protocol 2.15.
+  protocol 2.15::
 
-  struct setup_indirect {
-    __u32 type;
-    __u32 reserved;  /* Reserved, must be set to zero. */
-    __u64 len;
-    __u64 addr;
-  };
+    struct setup_indirect {
+      __u32 type;
+      __u32 reserved;  /* Reserved, must be set to zero. */
+      __u64 len;
+      __u64 addr;
+    };
 
   The type member is a SETUP_INDIRECT | SETUP_* type. However, it cannot be
   SETUP_INDIRECT itself since making the setup_indirect a tree structure
@@ -849,19 +850,19 @@
   and stack space can be limited in boot contexts.
 
   Let's give an example how to point to SETUP_E820_EXT data using setup_indirect.
-  In this case setup_data and setup_indirect will look like this:
+  In this case setup_data and setup_indirect will look like this::
 
-  struct setup_data {
-    __u64 next = 0 or <addr_of_next_setup_data_struct>;
-    __u32 type = SETUP_INDIRECT;
-    __u32 len = sizeof(setup_data);
-    __u8 data[sizeof(setup_indirect)] = struct setup_indirect {
-      __u32 type = SETUP_INDIRECT | SETUP_E820_EXT;
-      __u32 reserved = 0;
-      __u64 len = <len_of_SETUP_E820_EXT_data>;
-      __u64 addr = <addr_of_SETUP_E820_EXT_data>;
+    struct setup_data {
+      __u64 next = 0 or <addr_of_next_setup_data_struct>;
+      __u32 type = SETUP_INDIRECT;
+      __u32 len = sizeof(setup_data);
+      __u8 data[sizeof(setup_indirect)] = struct setup_indirect {
+        __u32 type = SETUP_INDIRECT | SETUP_E820_EXT;
+        __u32 reserved = 0;
+        __u64 len = <len_of_SETUP_E820_EXT_data>;
+        __u64 addr = <addr_of_SETUP_E820_EXT_data>;
+      }
     }
-  }
 
 .. note::
      SETUP_INDIRECT | SETUP_NONE objects cannot be properly distinguished
@@ -964,7 +965,7 @@
 All kernel_info data should be part of this structure. Fixed size data have to
 be put before kernel_info_var_len_data label. Variable size data have to be put
 after kernel_info_var_len_data label. Each chunk of variable size data has to
-be prefixed with header/magic and its size, e.g.:
+be prefixed with header/magic and its size, e.g.::
 
   kernel_info:
           .ascii  "LToP"          /* Header, Linux top (structure). */
diff --git a/Documentation/x86/intel_mpx.rst b/Documentation/x86/intel_mpx.rst
deleted file mode 100644
index 387a640..0000000
--- a/Documentation/x86/intel_mpx.rst
+++ /dev/null
@@ -1,252 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-===========================================
-Intel(R) Memory Protection Extensions (MPX)
-===========================================
-
-Intel(R) MPX Overview
-=====================
-
-Intel(R) Memory Protection Extensions (Intel(R) MPX) is a new capability
-introduced into Intel Architecture. Intel MPX provides hardware features
-that can be used in conjunction with compiler changes to check memory
-references, for those references whose compile-time normal intentions are
-usurped at runtime due to buffer overflow or underflow.
-
-You can tell if your CPU supports MPX by looking in /proc/cpuinfo::
-
-	cat /proc/cpuinfo  | grep ' mpx '
-
-For more information, please refer to Intel(R) Architecture Instruction
-Set Extensions Programming Reference, Chapter 9: Intel(R) Memory Protection
-Extensions.
-
-Note: As of December 2014, no hardware with MPX is available but it is
-possible to use SDE (Intel(R) Software Development Emulator) instead, which
-can be downloaded from
-http://software.intel.com/en-us/articles/intel-software-development-emulator
-
-
-How to get the advantage of MPX
-===============================
-
-For MPX to work, changes are required in the kernel, binutils and compiler.
-No source changes are required for applications, just a recompile.
-
-There are a lot of moving parts of this to all work right. The following
-is how we expect the compiler, application and kernel to work together.
-
-1) Application developer compiles with -fmpx. The compiler will add the
-   instrumentation as well as some setup code called early after the app
-   starts. New instruction prefixes are noops for old CPUs.
-2) That setup code allocates (virtual) space for the "bounds directory",
-   points the "bndcfgu" register to the directory (must also set the valid
-   bit) and notifies the kernel (via the new prctl(PR_MPX_ENABLE_MANAGEMENT))
-   that the app will be using MPX.  The app must be careful not to access
-   the bounds tables between the time when it populates "bndcfgu" and
-   when it calls the prctl().  This might be hard to guarantee if the app
-   is compiled with MPX.  You can add "__attribute__((bnd_legacy))" to
-   the function to disable MPX instrumentation to help guarantee this.
-   Also be careful not to call out to any other code which might be
-   MPX-instrumented.
-3) The kernel detects that the CPU has MPX, allows the new prctl() to
-   succeed, and notes the location of the bounds directory. Userspace is
-   expected to keep the bounds directory at that location. We note it
-   instead of reading it each time because the 'xsave' operation needed
-   to access the bounds directory register is an expensive operation.
-4) If the application needs to spill bounds out of the 4 registers, it
-   issues a bndstx instruction. Since the bounds directory is empty at
-   this point, a bounds fault (#BR) is raised, the kernel allocates a
-   bounds table (in the user address space) and makes the relevant entry
-   in the bounds directory point to the new table.
-5) If the application violates the bounds specified in the bounds registers,
-   a separate kind of #BR is raised which will deliver a signal with
-   information about the violation in the 'struct siginfo'.
-6) Whenever memory is freed, we know that it can no longer contain valid
-   pointers, and we attempt to free the associated space in the bounds
-   tables. If an entire table becomes unused, we will attempt to free
-   the table and remove the entry in the directory.
-
-To summarize, there are essentially three things interacting here:
-
-GCC with -fmpx:
- * enables annotation of code with MPX instructions and prefixes
- * inserts code early in the application to call in to the "gcc runtime"
-GCC MPX Runtime:
- * Checks for hardware MPX support in cpuid leaf
- * allocates virtual space for the bounds directory (malloc() essentially)
- * points the hardware BNDCFGU register at the directory
- * calls a new prctl(PR_MPX_ENABLE_MANAGEMENT) to notify the kernel to
-   start managing the bounds directories
-Kernel MPX Code:
- * Checks for hardware MPX support in cpuid leaf
- * Handles #BR exceptions and sends SIGSEGV to the app when it violates
-   bounds, like during a buffer overflow.
- * When bounds are spilled in to an unallocated bounds table, the kernel
-   notices in the #BR exception, allocates the virtual space, then
-   updates the bounds directory to point to the new table. It keeps
-   special track of the memory with a VM_MPX flag.
- * Frees unused bounds tables at the time that the memory they described
-   is unmapped.
-
-
-How does MPX kernel code work
-=============================
-
-Handling #BR faults caused by MPX
----------------------------------
-
-When MPX is enabled, there are 2 new situations that can generate
-#BR faults.
-
-  * new bounds tables (BT) need to be allocated to save bounds.
-  * bounds violation caused by MPX instructions.
-
-We hook #BR handler to handle these two new situations.
-
-On-demand kernel allocation of bounds tables
---------------------------------------------
-
-MPX only has 4 hardware registers for storing bounds information. If
-MPX-enabled code needs more than these 4 registers, it needs to spill
-them somewhere. It has two special instructions for this which allow
-the bounds to be moved between the bounds registers and some new "bounds
-tables".
-
-#BR exceptions are a new class of exceptions just for MPX. They are
-similar conceptually to a page fault and will be raised by the MPX
-hardware during both bounds violations or when the tables are not
-present. The kernel handles those #BR exceptions for not-present tables
-by carving the space out of the normal processes address space and then
-pointing the bounds-directory over to it.
-
-The tables need to be accessed and controlled by userspace because
-the instructions for moving bounds in and out of them are extremely
-frequent. They potentially happen every time a register points to
-memory. Any direct kernel involvement (like a syscall) to access the
-tables would obviously destroy performance.
-
-Why not do this in userspace? MPX does not strictly require anything in
-the kernel. It can theoretically be done completely from userspace. Here
-are a few ways this could be done. We don't think any of them are practical
-in the real-world, but here they are.
-
-:Q: Can virtual space simply be reserved for the bounds tables so that we
-    never have to allocate them?
-:A: MPX-enabled application will possibly create a lot of bounds tables in
-    process address space to save bounds information. These tables can take
-    up huge swaths of memory (as much as 80% of the memory on the system)
-    even if we clean them up aggressively. In the worst-case scenario, the
-    tables can be 4x the size of the data structure being tracked. IOW, a
-    1-page structure can require 4 bounds-table pages. An X-GB virtual
-    area needs 4*X GB of virtual space, plus 2GB for the bounds directory.
-    If we were to preallocate them for the 128TB of user virtual address
-    space, we would need to reserve 512TB+2GB, which is larger than the
-    entire virtual address space today. This means they can not be reserved
-    ahead of time. Also, a single process's pre-populated bounds directory
-    consumes 2GB of virtual *AND* physical memory. IOW, it's completely
-    infeasible to prepopulate bounds directories.
-
-:Q: Can we preallocate bounds table space at the same time memory is
-    allocated which might contain pointers that might eventually need
-    bounds tables?
-:A: This would work if we could hook the site of each and every memory
-    allocation syscall. This can be done for small, constrained applications.
-    But, it isn't practical at a larger scale since a given app has no
-    way of controlling how all the parts of the app might allocate memory
-    (think libraries). The kernel is really the only place to intercept
-    these calls.
-
-:Q: Could a bounds fault be handed to userspace and the tables allocated
-    there in a signal handler instead of in the kernel?
-:A: mmap() is not on the list of safe async handler functions and even
-    if mmap() would work it still requires locking or nasty tricks to
-    keep track of the allocation state there.
-
-Having ruled out all of the userspace-only approaches for managing
-bounds tables that we could think of, we create them on demand in
-the kernel.
-
-Decoding MPX instructions
--------------------------
-
-If a #BR is generated due to a bounds violation caused by MPX.
-We need to decode MPX instructions to get violation address and
-set this address into extended struct siginfo.
-
-The _sigfault field of struct siginfo is extended as follow::
-
-  87		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
-  88		struct {
-  89			void __user *_addr; /* faulting insn/memory ref. */
-  90 #ifdef __ARCH_SI_TRAPNO
-  91			int _trapno;	/* TRAP # which caused the signal */
-  92 #endif
-  93			short _addr_lsb; /* LSB of the reported address */
-  94			struct {
-  95				void __user *_lower;
-  96				void __user *_upper;
-  97			} _addr_bnd;
-  98		} _sigfault;
-
-The '_addr' field refers to violation address, and new '_addr_and'
-field refers to the upper/lower bounds when a #BR is caused.
-
-Glibc will be also updated to support this new siginfo. So user
-can get violation address and bounds when bounds violations occur.
-
-Cleanup unused bounds tables
-----------------------------
-
-When a BNDSTX instruction attempts to save bounds to a bounds directory
-entry marked as invalid, a #BR is generated. This is an indication that
-no bounds table exists for this entry. In this case the fault handler
-will allocate a new bounds table on demand.
-
-Since the kernel allocated those tables on-demand without userspace
-knowledge, it is also responsible for freeing them when the associated
-mappings go away.
-
-Here, the solution for this issue is to hook do_munmap() to check
-whether one process is MPX enabled. If yes, those bounds tables covered
-in the virtual address region which is being unmapped will be freed also.
-
-Adding new prctl commands
--------------------------
-
-Two new prctl commands are added to enable and disable MPX bounds tables
-management in kernel.
-::
-
-  155	#define PR_MPX_ENABLE_MANAGEMENT	43
-  156	#define PR_MPX_DISABLE_MANAGEMENT	44
-
-Runtime library in userspace is responsible for allocation of bounds
-directory. So kernel have to use XSAVE instruction to get the base
-of bounds directory from BNDCFG register.
-
-But XSAVE is expected to be very expensive. In order to do performance
-optimization, we have to get the base of bounds directory and save it
-into struct mm_struct to be used in future during PR_MPX_ENABLE_MANAGEMENT
-command execution.
-
-
-Special rules
-=============
-
-1) If userspace is requesting help from the kernel to do the management
-of bounds tables, it may not create or modify entries in the bounds directory.
-
-Certainly users can allocate bounds tables and forcibly point the bounds
-directory at them through XSAVE instruction, and then set valid bit
-of bounds entry to have this entry valid.  But, the kernel will decline
-to assist in managing these tables.
-
-2) Userspace may not take multiple bounds directory entries and point
-them at the same bounds table.
-
-This is allowed architecturally.  See more information "Intel(R) Architecture
-Instruction Set Extensions Programming Reference" (9.3.4).
-
-However, if users did this, the kernel might be fooled in to unmapping an
-in-use bounds table since it does not recognize sharing.
diff --git a/Documentation/x86/x86_64/mm.rst b/Documentation/x86/x86_64/mm.rst
index 267fc48..e505340 100644
--- a/Documentation/x86/x86_64/mm.rst
+++ b/Documentation/x86/x86_64/mm.rst
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-================
-Memory Managment
-================
+=================
+Memory Management
+=================
 
 Complete virtual memory map with 4-level page tables
 ====================================================
diff --git a/MAINTAINERS b/MAINTAINERS
index d51ce1a..f74871a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -317,30 +317,30 @@
 M:	"Rafael J. Wysocki" <rjw@rjwysocki.net>
 M:	Len Brown <lenb@kernel.org>
 L:	linux-acpi@vger.kernel.org
-W:	https://01.org/linux-acpi
-Q:	https://patchwork.kernel.org/project/linux-acpi/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
-B:	https://bugzilla.kernel.org
 S:	Supported
+W:	https://01.org/linux-acpi
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+Q:	https://patchwork.kernel.org/project/linux-acpi/list/
+B:	https://bugzilla.kernel.org
+F:	Documentation/ABI/testing/configfs-acpi
+F:	Documentation/ABI/testing/sysfs-bus-acpi
+F:	Documentation/firmware-guide/acpi/
 F:	drivers/acpi/
+F:	drivers/pci/*/*acpi*
+F:	drivers/pci/*acpi*
 F:	drivers/pnp/pnpacpi/
+F:	include/acpi/
 F:	include/linux/acpi.h
 F:	include/linux/fwnode.h
-F:	include/acpi/
-F:	Documentation/firmware-guide/acpi/
-F:	Documentation/ABI/testing/sysfs-bus-acpi
-F:	Documentation/ABI/testing/configfs-acpi
-F:	drivers/pci/*acpi*
-F:	drivers/pci/*/*acpi*
 F:	tools/power/acpi/
 
 ACPI APEI
 M:	"Rafael J. Wysocki" <rjw@rjwysocki.net>
 M:	Len Brown <lenb@kernel.org>
-L:	linux-acpi@vger.kernel.org
 R:	James Morse <james.morse@arm.com>
 R:	Tony Luck <tony.luck@intel.com>
 R:	Borislav Petkov <bp@alien8.de>
+L:	linux-acpi@vger.kernel.org
 F:	drivers/acpi/apei/
 
 ACPI COMPONENT ARCHITECTURE (ACPICA)
@@ -349,13 +349,13 @@
 M:	"Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
 L:	linux-acpi@vger.kernel.org
 L:	devel@acpica.org
+S:	Supported
 W:	https://acpica.org/
 W:	https://github.com/acpica/acpica/
-Q:	https://patchwork.kernel.org/project/linux-acpi/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+Q:	https://patchwork.kernel.org/project/linux-acpi/list/
 B:	https://bugzilla.kernel.org
 B:	https://bugs.acpica.org
-S:	Supported
 F:	drivers/acpi/acpica/
 F:	include/acpi/
 F:	tools/power/acpi/
@@ -363,9 +363,9 @@
 ACPI FAN DRIVER
 M:	Zhang Rui <rui.zhang@intel.com>
 L:	linux-acpi@vger.kernel.org
+S:	Supported
 W:	https://01.org/linux-acpi
 B:	https://bugzilla.kernel.org
-S:	Supported
 F:	drivers/acpi/fan.c
 
 ACPI FOR ARM64 (ACPI/arm64)
@@ -389,26 +389,26 @@
 R:	Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 R:	Mika Westerberg <mika.westerberg@linux.intel.com>
 L:	linux-acpi@vger.kernel.org
-Q:	https://patchwork.kernel.org/project/linux-acpi/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
-B:	https://bugzilla.kernel.org
 S:	Supported
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+Q:	https://patchwork.kernel.org/project/linux-acpi/list/
+B:	https://bugzilla.kernel.org
 F:	drivers/acpi/pmic/
 
 ACPI THERMAL DRIVER
 M:	Zhang Rui <rui.zhang@intel.com>
 L:	linux-acpi@vger.kernel.org
+S:	Supported
 W:	https://01.org/linux-acpi
 B:	https://bugzilla.kernel.org
-S:	Supported
 F:	drivers/acpi/*thermal*
 
 ACPI VIDEO DRIVER
 M:	Zhang Rui <rui.zhang@intel.com>
 L:	linux-acpi@vger.kernel.org
+S:	Supported
 W:	https://01.org/linux-acpi
 B:	https://bugzilla.kernel.org
-S:	Supported
 F:	drivers/acpi/acpi_video.c
 
 ACPI WMI DRIVER
@@ -674,6 +674,14 @@
 F:	Documentation/i2c/busses/i2c-ali1563.rst
 F:	drivers/i2c/busses/i2c-ali1563.c
 
+ALL SENSORS DLH SERIES PRESSURE SENSORS DRIVER
+M:	Tomislav Denis <tomislav.denis@avl.com>
+W:	http://www.allsensors.com/
+S:	Maintained
+L:	linux-iio@vger.kernel.org
+F:	drivers/iio/pressure/dlhl60d.c
+F:	Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml
+
 ALLEGRO DVT VIDEO IP CORE DRIVER
 M:	Michael Tretter <m.tretter@pengutronix.de>
 R:	Pengutronix Kernel Team <kernel@pengutronix.de>
@@ -734,7 +742,7 @@
 F:	drivers/mailbox/mailbox-altera.c
 
 ALTERA PIO DRIVER
-M:	Tien Hock Loh <thloh@altera.com>
+M:	Joyce Ooi <joyce.ooi@intel.com>
 L:	linux-gpio@vger.kernel.org
 S:	Maintained
 F:	drivers/gpio/gpio-altera.c
@@ -907,6 +915,14 @@
 F:	drivers/iio/dac/ad5758.c
 F:	Documentation/devicetree/bindings/iio/dac/ad5758.txt
 
+ANALOG DEVICES INC AD7091R5 DRIVER
+M:	Beniamin Bia <beniamin.bia@analog.com>
+L:	linux-iio@vger.kernel.org
+W:	http://ez.analog.com/community/linux-device-drivers
+S:	Supported
+F:	drivers/iio/adc/ad7091r5.c
+F:	Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
+
 ANALOG DEVICES INC AD7124 DRIVER
 M:	Stefan Popa <stefan.popa@analog.com>
 L:	linux-iio@vger.kernel.org
@@ -1061,7 +1077,7 @@
 F:	Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
 F:	Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
 F:	drivers/iio/*/ad*
-F:	drivers/iio/adc/ltc2497*
+F:	drivers/iio/adc/ltc249*
 X:	drivers/iio/*/adjd*
 F:	drivers/staging/iio/*/ad*
 
@@ -2076,6 +2092,7 @@
 F:	drivers/watchdog/coh901327_wdt.c
 F:	Documentation/devicetree/bindings/arm/ste-*
 F:	Documentation/devicetree/bindings/arm/ux500/
+F:	Documentation/devicetree/bindings/arm/ux500.yaml
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
 
 ARM/NUVOTON NPCM ARCHITECTURE
@@ -3116,6 +3133,13 @@
 F:	drivers/net/bonding/
 F:	include/uapi/linux/if_bonding.h
 
+BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
+M:	Dan Robertson <dan@dlrobertson.com>
+L:	linux-iio@vger.kernel.org
+S:	Maintained
+F:	drivers/iio/accel/bma400*
+F:	Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
+
 BPF (Safe dynamic programs and tools)
 M:	Alexei Starovoitov <ast@kernel.org>
 M:	Daniel Borkmann <daniel@iogearbox.net>
@@ -3265,6 +3289,8 @@
 N:	bcm2711
 N:	bcm2835
 F:	drivers/staging/vc04_services
+F:	Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+F:	drivers/pci/controller/pcie-brcmstb.c
 
 BROADCOM BCM47XX MIPS ARCHITECTURE
 M:	Hauke Mehrtens <hauke@hauke-m.de>
@@ -3320,6 +3346,8 @@
 F:	arch/arm/mm/cache-b15-rac.c
 F:	arch/arm/include/asm/hardware/cache-b15-rac.h
 N:	brcmstb
+F:	Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+F:	drivers/pci/controller/pcie-brcmstb.c
 
 BROADCOM BMIPS CPUFREQ DRIVER
 M:	Markus Mayer <mmayer@broadcom.com>
@@ -4455,13 +4483,10 @@
 
 CX18 VIDEO4LINUX DRIVER
 M:	Andy Walls <awalls@md.metrocast.net>
-L:	ivtv-devel@ivtvdriver.org (subscribers-only)
 L:	linux-media@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
 W:	https://linuxtv.org
-W:	http://www.ivtvdriver.org/index.php/Cx18
 S:	Maintained
-F:	Documentation/media/v4l-drivers/cx18*
 F:	drivers/media/pci/cx18/
 F:	include/uapi/linux/ivtv*
 
@@ -5002,6 +5027,24 @@
 K:	dma_(buf|fence|resv)
 T:	git git://anongit.freedesktop.org/drm/drm-misc
 
+DMA-BUF HEAPS FRAMEWORK
+M:	Sumit Semwal <sumit.semwal@linaro.org>
+R:	Andrew F. Davis <afd@ti.com>
+R:	Benjamin Gaignard <benjamin.gaignard@linaro.org>
+R:	Liam Mark <lmark@codeaurora.org>
+R:	Laura Abbott <labbott@redhat.com>
+R:	Brian Starkey <Brian.Starkey@arm.com>
+R:	John Stultz <john.stultz@linaro.org>
+S:	Maintained
+L:	linux-media@vger.kernel.org
+L:	dri-devel@lists.freedesktop.org
+L:	linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
+F:	include/uapi/linux/dma-heap.h
+F:	include/linux/dma-heap.h
+F:	drivers/dma-buf/dma-heap.c
+F:	drivers/dma-buf/heaps/*
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:	Vinod Koul <vkoul@kernel.org>
 L:	dmaengine@vger.kernel.org
@@ -5207,6 +5250,12 @@
 S:	Maintained
 F:	drivers/gpu/drm/bochs/
 
+DRM DRIVER FOR BOE HIMAX8279D PANELS
+M:	Jerry Han <hanxu5@huaqin.corp-partner.google.com>
+S:	Maintained
+F:	drivers/gpu/drm/panel/panel-boe-himax8279d.c
+F:	Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt
+
 DRM DRIVER FOR FARADAY TVE200 TV ENCODER
 M:	Linus Walleij <linus.walleij@linaro.org>
 T:	git git://anongit.freedesktop.org/drm/drm-misc
@@ -5362,6 +5411,12 @@
 F:	drivers/gpu/drm/tiny/st7735r.c
 F:	Documentation/devicetree/bindings/display/sitronix,st7735r.txt
 
+DRM DRIVER FOR SONY ACX424AKP PANELS
+M:	Linus Walleij <linus.walleij@linaro.org>
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+S:	Maintained
+F:	drivers/gpu/drm/panel/panel-sony-acx424akp.c
+
 DRM DRIVER FOR ST-ERICSSON MCDE
 M:	Linus Walleij <linus.walleij@linaro.org>
 T:	git git://anongit.freedesktop.org/drm/drm-misc
@@ -5434,7 +5489,6 @@
 DRM DRIVERS AND MISC GPU PATCHES
 M:	Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
 M:	Maxime Ripard <mripard@kernel.org>
-M:	Sean Paul <sean@poorly.run>
 W:	https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
 S:	Maintained
 T:	git git://anongit.freedesktop.org/drm/drm-misc
@@ -6181,6 +6235,12 @@
 S:	Maintained
 F:	drivers/media/rc/ene_ir.*
 
+EPAPR HYPERVISOR BYTE CHANNEL DEVICE DRIVER
+M:	Laurentiu Tudor <laurentiu.tudor@nxp.com>
+L:	linuxppc-dev@lists.ozlabs.org
+S:	Maintained
+F:	drivers/tty/ehv_bytechan.c
+
 EPSON S1D13XXX FRAMEBUFFER DRIVER
 M:	Kristoffer Ericson <kristoffer.ericson@gmail.com>
 S:	Maintained
@@ -6429,6 +6489,7 @@
 F:	include/linux/fs.h
 F:	include/linux/fs_types.h
 F:	include/uapi/linux/fs.h
+F:	include/uapi/linux/openat2.h
 
 FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:	Riku Voipio <riku.voipio@iki.fi>
@@ -6847,7 +6908,7 @@
 S:	Maintained
 F:	fs/fuse/
 F:	include/uapi/linux/fuse.h
-F:	Documentation/filesystems/fuse.txt
+F:	Documentation/filesystems/fuse.rst
 
 FUTEX SUBSYSTEM
 M:	Thomas Gleixner <tglx@linutronix.de>
@@ -7322,6 +7383,7 @@
 HARDWARE SPINLOCK CORE
 M:	Ohad Ben-Cohen <ohad@wizery.com>
 M:	Bjorn Andersson <bjorn.andersson@linaro.org>
+R:	Baolin Wang <baolin.wang7@gmail.com>
 L:	linux-remoteproc@vger.kernel.org
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git hwspinlock-next
@@ -7601,9 +7663,8 @@
 F:	drivers/net/usb/hso.c
 
 HSR NETWORK PROTOCOL
-M:	Arvid Brodin <arvid.brodin@alten.se>
 L:	netdev@vger.kernel.org
-S:	Maintained
+S:	Orphan
 F:	net/hsr/
 
 HT16K33 LED CONTROLLER DRIVER
@@ -7758,9 +7819,7 @@
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
 F:	Documentation/i2c/busses/i2c-parport.rst
-F:	Documentation/i2c/busses/i2c-parport-light.rst
 F:	drivers/i2c/busses/i2c-parport.c
-F:	drivers/i2c/busses/i2c-parport-light.c
 
 I2C SUBSYSTEM
 M:	Wolfram Sang <wsa@the-dreams.de>
@@ -8141,8 +8200,7 @@
 F:	drivers/auxdisplay/img-ascii-lcd.c
 
 IMGTEC IR DECODER DRIVER
-M:	James Hogan <jhogan@kernel.org>
-S:	Maintained
+S:	Orphan
 F:	drivers/media/rc/img-ir/
 
 IMON SOUNDGRAPH USB IR RECEIVER
@@ -8383,7 +8441,6 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
 F:	drivers/gpio/gpio-ich.c
 F:	drivers/gpio/gpio-intel-mid.c
-F:	drivers/gpio/gpio-lynxpoint.c
 F:	drivers/gpio/gpio-merrifield.c
 F:	drivers/gpio/gpio-ml-ioh.c
 F:	drivers/gpio/gpio-pch.c
@@ -8854,10 +8911,12 @@
 L:	netdev@vger.kernel.org
 W:	http://www.isdn4linux.de
 S:	Maintained
-F:	drivers/isdn/mISDN
-F:	drivers/isdn/hardware
+F:	drivers/isdn/mISDN/
+F:	drivers/isdn/hardware/
+F:	drivers/isdn/Kconfig
+F:	drivers/isdn/Makefile
 
-ISDN/CAPI SUBSYSTEM
+ISDN/CMTP OVER BLUETOOTH
 M:	Karsten Keil <isdn@linux-pingi.de>
 L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)
 L:	netdev@vger.kernel.org
@@ -8865,7 +8924,6 @@
 S:	Odd Fixes
 F:	Documentation/isdn/
 F:	drivers/isdn/capi/
-F:	drivers/staging/isdn/
 F:	net/bluetooth/cmtp/
 F:	include/linux/isdn/
 F:	include/uapi/linux/isdn/
@@ -8889,10 +8947,9 @@
 
 IVTV VIDEO4LINUX DRIVER
 M:	Andy Walls <awalls@md.metrocast.net>
-L:	ivtv-devel@ivtvdriver.org (subscribers-only)
 L:	linux-media@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
-W:	http://www.ivtvdriver.org
+W:	https://linuxtv.org
 S:	Maintained
 F:	Documentation/media/v4l-drivers/ivtv*
 F:	drivers/media/pci/ivtv/
@@ -9628,6 +9685,7 @@
 M:	Kees Cook <keescook@chromium.org>
 S:	Maintained
 F:	drivers/misc/lkdtm/*
+F:	tools/testing/selftests/lkdtm/*
 
 LINUX KERNEL MEMORY CONSISTENCY MODEL (LKMM)
 M:	Alan Stern <stern@rowland.harvard.edu>
@@ -10337,7 +10395,7 @@
 L:	linux-renesas-soc@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
 S:	Supported
-F:	Documentation/devicetree/bindings/media/renesas,ceu.txt
+F:	Documentation/devicetree/bindings/media/renesas,ceu.yaml
 F:	drivers/media/platform/renesas-ceu.c
 F:	include/media/drv-intf/renesas-ceu.h
 
@@ -10375,7 +10433,7 @@
 L:	linux-renesas-soc@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
 S:	Supported
-F:	Documentation/devicetree/bindings/media/renesas,csi2.txt
+F:	Documentation/devicetree/bindings/media/renesas,csi2.yaml
 F:	Documentation/devicetree/bindings/media/renesas,vin.txt
 F:	drivers/media/platform/rcar-vin/
 
@@ -11058,7 +11116,6 @@
 MIPS
 M:	Ralf Baechle <ralf@linux-mips.org>
 M:	Paul Burton <paulburton@kernel.org>
-M:	James Hogan <jhogan@kernel.org>
 L:	linux-mips@vger.kernel.org
 W:	http://www.linux-mips.org/
 T:	git git://git.linux-mips.org/pub/scm/ralf/linux.git
@@ -12523,6 +12580,13 @@
 S:	Maintained
 F:	drivers/platform/x86/panasonic-laptop.c
 
+PARALLAX PING IIO SENSOR DRIVER
+M:	Andreas Klinger <ak@it-klinger.de>
+L:	linux-iio@vger.kernel.org
+S:	Maintained
+F:	Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml
+F:	drivers/iio/proximity/ping.c
+
 PARALLEL LCD/KEYPAD PANEL DRIVER
 M:	Willy Tarreau <willy@haproxy.com>
 M:	Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
@@ -12660,7 +12724,7 @@
 L:	linux-pci@vger.kernel.org
 L:	linux-arm-kernel@lists.infradead.org
 S:	Maintained
-F:	Documentation/devicetree/bindings/pci/versatile.txt
+F:	Documentation/devicetree/bindings/pci/versatile.yaml
 F:	drivers/pci/controller/pci-versatile.c
 
 PCI DRIVER FOR ARMADA 8K
@@ -12693,7 +12757,7 @@
 L:	linux-pci@vger.kernel.org
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
-F:	Documentation/devicetree/bindings/pci/host-generic-pci.txt
+F:	Documentation/devicetree/bindings/pci/host-generic-pci.yaml
 F:	drivers/pci/controller/pci-host-common.c
 F:	drivers/pci/controller/pci-host-generic.c
 
@@ -12855,7 +12919,7 @@
 
 PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
 M:	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-R:	Andrew Murray <andrew.murray@arm.com>
+R:	Andrew Murray <amurray@thegoodpenguin.co.uk>
 L:	linux-pci@vger.kernel.org
 Q:	http://patchwork.ozlabs.org/project/linux-pci/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/
@@ -13276,11 +13340,6 @@
 S:	Supported
 F:	tools/power/pm-graph
 
-PNP SUPPORT
-M:	"Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
-S:	Maintained
-F:	drivers/pnp/
-
 PNI RM3100 IIO DRIVER
 M:	Song Qiang <songqiang1304521@gmail.com>
 L:	linux-iio@vger.kernel.org
@@ -13288,6 +13347,11 @@
 F:	drivers/iio/magnetometer/rm3100*
 F:	Documentation/devicetree/bindings/iio/magnetometer/pni,rm3100.txt
 
+PNP SUPPORT
+M:	"Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+S:	Maintained
+F:	drivers/pnp/
+
 POSIX CLOCKS and TIMERS
 M:	Thomas Gleixner <tglx@linutronix.de>
 L:	linux-kernel@vger.kernel.org
@@ -13296,7 +13360,7 @@
 F:	fs/timerfd.c
 F:	include/linux/timer*
 F:	include/linux/time_namespace.h
-F:	kernel/time_namespace.c
+F:	kernel/time/namespace.c
 F:	kernel/time/*timer*
 
 POWER MANAGEMENT CORE
@@ -14045,7 +14109,6 @@
 F:	tools/testing/selftests/rtc/
 
 REALTEK AUDIO CODECS
-M:	Bard Liao <bardliao@realtek.com>
 M:	Oder Chiou <oder_chiou@realtek.com>
 S:	Maintained
 F:	sound/soc/codecs/rt*
@@ -14233,6 +14296,12 @@
 F:	include/linux/hid-roccat*
 F:	Documentation/ABI/*/sysfs-driver-hid-roccat*
 
+ROCKCHIP ISP V1 DRIVER
+M:	Helen Koike <helen.koike@collabora.com>
+L:	linux-media@vger.kernel.org
+S:	Maintained
+F:	drivers/staging/media/rkisp1/
+
 ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
 M:	Jacob Chen <jacob-chen@iotwrt.com>
 M:	Ezequiel Garcia <ezequiel@collabora.com>
@@ -14880,8 +14949,8 @@
 F:	drivers/mmc/host/sdhci-omap.c
 
 SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER
-M:	Scott Bauer <scott.bauer@intel.com>
 M:	Jonathan Derrick <jonathan.derrick@intel.com>
+M:	Revanth Rajashekar <revanth.rajashekar@intel.com>
 L:	linux-block@vger.kernel.org
 S:	Supported
 F:	block/sed*
@@ -15143,11 +15212,8 @@
 F:	Documentation/fb/sm712fb.rst
 
 SIMPLE FIRMWARE INTERFACE (SFI)
-M:	Len Brown <lenb@kernel.org>
-L:	sfi-devel@simplefirmware.org
 W:	http://simplefirmware.org/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-sfi-2.6.git
-S:	Supported
+S:	Obsolete
 F:	arch/x86/platform/sfi/
 F:	drivers/sfi/
 F:	include/linux/sfi*.h
@@ -15870,6 +15936,15 @@
 F:	Documentation/networking/device_drivers/stmicro/
 F:	drivers/net/ethernet/stmicro/stmmac/
 
+EXTRA BOOT CONFIG
+M:	Masami Hiramatsu <mhiramat@kernel.org>
+S:	Maintained
+F:	lib/bootconfig.c
+F:	fs/proc/bootconfig.c
+F:	include/linux/bootconfig.h
+F:	tools/bootconfig/*
+F:	Documentation/admin-guide/bootconfig.rst
+
 SUN3/3X
 M:	Sam Creasey <sammy@sammy.net>
 W:	http://sammy.net/sun3/
@@ -16077,6 +16152,7 @@
 F:	drivers/firmware/arm_scmi/
 F:	drivers/reset/reset-scmi.c
 F:	include/linux/sc[mp]i_protocol.h
+F:	include/trace/events/scmi.h
 
 SYSTEM RESET/SHUTDOWN DRIVERS
 M:	Sebastian Reichel <sre@kernel.org>
@@ -16475,6 +16551,7 @@
 M:	Michael Jamet <michael.jamet@intel.com>
 M:	Mika Westerberg <mika.westerberg@linux.intel.com>
 M:	Yehezkel Bernat <YehezkelShB@gmail.com>
+L:	linux-usb@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
 S:	Maintained
 F:	Documentation/admin-guide/thunderbolt.rst
@@ -16675,6 +16752,7 @@
 S:	Maintained
 F:	drivers/media/platform/ti-vpe/
 F:	Documentation/devicetree/bindings/media/ti,vpe.yaml
+	Documentation/devicetree/bindings/media/ti,cal.yaml
 
 TI WILINK WIRELESS DRIVERS
 L:	linux-wireless@vger.kernel.org
@@ -17060,7 +17138,6 @@
 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
 R:	Alim Akhtar <alim.akhtar@samsung.com>
 R:	Avri Altman <avri.altman@wdc.com>
-R:	Pedro Sousa <pedrom.sousa@synopsys.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	Documentation/scsi/ufs.txt
@@ -17477,7 +17554,7 @@
 VFAT/FAT/MSDOS FILESYSTEM
 M:	OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
 S:	Maintained
-F:	Documentation/filesystems/vfat.txt
+F:	Documentation/filesystems/vfat.rst
 F:	fs/fat/
 
 VFIO DRIVER
@@ -18042,8 +18119,8 @@
 X-POWERS AXP288 PMIC DRIVERS
 M:	Hans de Goede <hdegoede@redhat.com>
 S:	Maintained
-N:	axp288
 F:	drivers/acpi/pmic/intel_pmic_xpower.c
+N:	axp288
 
 X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS
 M:	Chen-Yu Tsai <wens@csie.org>
diff --git a/Makefile b/Makefile
index 6a01b073..65a5dc6 100644
--- a/Makefile
+++ b/Makefile
@@ -423,7 +423,6 @@
 DEPMOD		= /sbin/depmod
 PERL		= perl
 PYTHON		= python
-PYTHON2		= python2
 PYTHON3		= python3
 CHECK		= sparse
 BASH		= bash
@@ -474,7 +473,7 @@
 
 export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
 export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
-export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
@@ -674,7 +673,7 @@
 #
 # This exploits the 'multi-target pattern rule' trick.
 # The syncconfig should be executed only once to make all the targets.
-%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG)
+%/auto.conf %/auto.conf.cmd: $(KCONFIG_CONFIG)
 	$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
 else # !may-sync-config
 # External modules and some install targets need include/generated/autoconf.h
@@ -1278,24 +1277,13 @@
 # using awk while concatenating to the final file.
 
 PHONY += modules
-modules: $(if $(KBUILD_BUILTIN),vmlinux) modules.order modules.builtin
+modules: $(if $(KBUILD_BUILTIN),vmlinux) modules.order
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/modules-check.sh
 
 modules.order: descend
 	$(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(build-dirs)) > $@
 
-modbuiltin-dirs := $(addprefix _modbuiltin_, $(build-dirs))
-
-modules.builtin: $(modbuiltin-dirs)
-	$(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(build-dirs)) > $@
-
-PHONY += $(modbuiltin-dirs)
-# tristate.conf is not included from this Makefile. Add it as a prerequisite
-# here to make it self-healing in case somebody accidentally removes it.
-$(modbuiltin-dirs): include/config/tristate.conf
-	$(Q)$(MAKE) $(modbuiltin)=$(patsubst _modbuiltin_%,%,$@)
-
 # Target to prepare building external modules
 PHONY += modules_prepare
 modules_prepare: prepare
@@ -1315,7 +1303,7 @@
 		ln -s $(CURDIR) $(MODLIB)/build ; \
 	fi
 	@sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order
-	@sed 's:^:kernel/:' modules.builtin > $(MODLIB)/modules.builtin
+	@cp -f modules.builtin $(MODLIB)/
 	@cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
 
@@ -1357,7 +1345,7 @@
 
 # Directories & files removed with 'make clean'
 CLEAN_DIRS  += include/ksym
-CLEAN_FILES += modules.builtin.modinfo modules.nsdeps
+CLEAN_FILES += modules.builtin modules.builtin.modinfo modules.nsdeps
 
 # Directories & files removed with 'make mrproper'
 MRPROPER_DIRS  += include/config include/generated          \
@@ -1712,7 +1700,7 @@
 		-o -name '*.lex.c' -o -name '*.tab.[ch]' \
 		-o -name '*.asn1.[ch]' \
 		-o -name '*.symtypes' -o -name 'modules.order' \
-		-o -name modules.builtin -o -name '.tmp_*.o.*' \
+		-o -name '.tmp_*.o.*' \
 		-o -name '*.c.[012]*.*' \
 		-o -name '*.ll' \
 		-o -name '*.gcno' \) -type f -print | xargs rm -f
diff --git a/arch/Kconfig b/arch/Kconfig
index 48b5e10..98de654 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -393,17 +393,22 @@
 config HAVE_ARCH_JUMP_LABEL_RELATIVE
 	bool
 
-config HAVE_RCU_TABLE_FREE
+config MMU_GATHER_TABLE_FREE
 	bool
 
-config HAVE_RCU_TABLE_NO_INVALIDATE
+config MMU_GATHER_RCU_TABLE_FREE
+	bool
+	select MMU_GATHER_TABLE_FREE
+
+config MMU_GATHER_PAGE_SIZE
 	bool
 
-config HAVE_MMU_GATHER_PAGE_SIZE
+config MMU_GATHER_NO_RANGE
 	bool
 
-config HAVE_MMU_GATHER_NO_GATHER
+config MMU_GATHER_NO_GATHER
 	bool
+	depends on MMU_GATHER_TABLE_FREE
 
 config ARCH_HAVE_NMI_SAFE_CMPXCHG
 	bool
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 5d4c76a..f19aa57 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -655,8 +655,6 @@ setup_arch(char **cmdline_p)
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
 	conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
 #endif
 #endif
 
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index 7268222..528d2be 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -119,13 +119,12 @@ static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
 	return res;
 }
 
-static const struct file_operations srm_env_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= srm_env_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= srm_env_proc_write,
+static const struct proc_ops srm_env_proc_ops = {
+	.proc_open	= srm_env_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= srm_env_proc_write,
 };
 
 static int __init
@@ -182,7 +181,7 @@ srm_env_init(void)
 	entry = srm_named_entries;
 	while (entry->name && entry->id) {
 		if (!proc_create_data(entry->name, 0644, named_dir,
-			     &srm_env_proc_fops, (void *)entry->id))
+			     &srm_env_proc_ops, (void *)entry->id))
 			goto cleanup;
 		entry++;
 	}
@@ -194,7 +193,7 @@ srm_env_init(void)
 		char name[4];
 		sprintf(name, "%ld", var_num);
 		if (!proc_create_data(name, 0644, numbered_dir,
-			     &srm_env_proc_fops, (void *)var_num))
+			     &srm_env_proc_ops, (void *)var_num))
 			goto cleanup;
 	}
 
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 8e13b0b..36d42da 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -475,3 +475,5 @@
 543	common	fspick				sys_fspick
 544	common	pidfd_open			sys_pidfd_open
 # 545 reserved for clone3
+547	common	openat2				sys_openat2
+548	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5f44820..ff2a393 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -28,6 +28,7 @@
 	select GENERIC_SMP_IDLE_THREAD
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_TRACEHOOK
+	select HAVE_COPY_THREAD_TLS
 	select HAVE_DEBUG_STACKOVERFLOW
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_FUTEX_CMPXCHG if FUTEX
@@ -350,9 +351,8 @@
 	  Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
 	  zones.
 
-if ISA_ARCOMPACT
-
 config ARC_COMPACT_IRQ_LEVELS
+	depends on ISA_ARCOMPACT
 	bool "Setup Timer IRQ as high Priority"
 	# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
 	depends on !SMP
@@ -360,14 +360,10 @@
 config ARC_FPU_SAVE_RESTORE
 	bool "Enable FPU state persistence across context switch"
 	help
-	  Double Precision Floating Point unit had dedicated regs which
-	  need to be saved/restored across context-switch.
-	  Note that ARC FPU is overly simplistic, unlike say x86, which has
-	  hardware pieces to allow software to conditionally save/restore,
-	  based on actual usage of FPU by a task. Thus our implemn does
-	  this for all tasks in system.
-
-endif #ISA_ARCOMPACT
+	  ARCompact FPU has internal registers to assist with Double precision
+	  Floating Point operations. There are control and stauts registers
+	  for floating point exceptions and rounding modes. These are
+	  preserved across task context switch when enabled.
 
 config ARC_CANT_LLSC
 	def_bool n
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index f9a5c9d..1d109b0 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -78,6 +78,7 @@ gmac: ethernet@18000 {
 			interrupt-names = "macirq";
 			phy-mode = "rgmii";
 			snps,pbl = < 32 >;
+			snps,multicast-filter-bins = <256>;
 			clocks = <&apbclk>;
 			clock-names = "stmmaceth";
 			max-speed = <100>;
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 5134f0b..f7e4324 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -39,6 +39,8 @@
 #define ARC_REG_CLUSTER_BCR	0xcf
 #define ARC_REG_AUX_ICCM	0x208	/* ICCM Base Addr (ARCv2) */
 #define ARC_REG_LPB_CTRL	0x488	/* ARCv2 Loop Buffer control */
+#define ARC_REG_FPU_CTRL	0x300
+#define ARC_REG_FPU_STATUS	0x301
 
 /* Common for ARCompact and ARCv2 status register */
 #define ARC_REG_STATUS32	0x0A
diff --git a/arch/arc/include/asm/fpu.h b/arch/arc/include/asm/fpu.h
new file mode 100644
index 0000000..6434725
--- /dev/null
+++ b/arch/arc/include/asm/fpu.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ */
+
+#ifndef _ASM_ARC_FPU_H
+#define _ASM_ARC_FPU_H
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+	struct {
+		unsigned int l, h;
+	} aux_dpfp[2];
+};
+
+#define fpu_init_task(regs)
+
+#else
+
+/*
+ * ARCv2 FPU Control aux register
+ *   - bits to enable Traps on Exceptions
+ *   - Rounding mode
+ *
+ * ARCv2 FPU Status aux register
+ *   - FPU exceptions flags (Inv, Div-by-Zero, overflow, underflow, inexact)
+ *   - Flag Write Enable to clear flags explicitly (vs. by fpu instructions
+ *     only
+ */
+
+struct arc_fpu {
+	unsigned int ctrl, status;
+};
+
+extern void fpu_init_task(struct pt_regs *regs);
+
+#endif	/* !CONFIG_ISA_ARCOMPACT */
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+
+#else	/* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+#define fpu_save_restore(p, n)
+#define fpu_init_task(regs)
+
+#endif	/* CONFIG_ARC_FPU_SAVE_RESTORE */
+
+#endif	/* _ASM_ARC_FPU_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 9019ed9..12be7e1 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -273,6 +273,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 #define pmd_none(x)			(!pmd_val(x))
 #define	pmd_bad(x)			((pmd_val(x) & ~PAGE_MASK))
 #define pmd_present(x)			(pmd_val(x))
+#define pmd_leaf(x)			(pmd_val(x) & _PAGE_HW_SZ)
 #define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)
 
 #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 706edea..ec532d1 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -14,15 +14,7 @@
 #ifndef __ASSEMBLY__
 
 #include <asm/ptrace.h>
-
-#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
-/* These DPFP regs need to be saved/restored across ctx-sw */
-struct arc_fpu {
-	struct {
-		unsigned int l, h;
-	} aux_dpfp[2];
-};
-#endif
+#include <asm/fpu.h>
 
 #ifdef CONFIG_ARC_PLAT_EZNPS
 struct eznps_dp {
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
index 77f1233..aadf65b 100644
--- a/arch/arc/include/asm/switch_to.h
+++ b/arch/arc/include/asm/switch_to.h
@@ -9,19 +9,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/sched.h>
-
-#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
-
-extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
-#define ARC_FPU_PREV(p, n)	fpu_save_restore(p, n)
-#define ARC_FPU_NEXT(t)
-
-#else
-
-#define ARC_FPU_PREV(p, n)
-#define ARC_FPU_NEXT(n)
-
-#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+#include <asm/fpu.h>
 
 #ifdef CONFIG_ARC_PLAT_EZNPS
 extern void dp_save_restore(struct task_struct *p, struct task_struct *n);
@@ -36,9 +24,8 @@ struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
 #define switch_to(prev, next, last)	\
 do {					\
 	ARC_EZNPS_DP_PREV(prev, next);	\
-	ARC_FPU_PREV(prev, next);	\
+	fpu_save_restore(prev, next);	\
 	last = __switch_to(prev, next);\
-	ARC_FPU_NEXT(next);		\
 	mb();				\
 } while (0)
 
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index 7ddba13..c3f4714 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -11,6 +11,7 @@
 #include <linux/types.h>
 
 int sys_clone_wrapper(int, int, int, int, int);
+int sys_clone3_wrapper(void *, size_t);
 int sys_cacheflush(uint32_t, uint32_t uint32_t);
 int sys_arc_settls(void *);
 int sys_arc_gettls(void);
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 5eafa11..fa2713a 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -21,6 +21,7 @@
 #define __ARCH_WANT_SET_GET_RLIMIT
 #define __ARCH_WANT_SYS_EXECVE
 #define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
 #define __ARCH_WANT_SYS_VFORK
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_TIME32_SYSCALLS
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
index e784f53..7553967 100644
--- a/arch/arc/kernel/Makefile
+++ b/arch/arc/kernel/Makefile
@@ -23,7 +23,9 @@
 obj-$(CONFIG_JUMP_LABEL)		+= jump_label.o
 
 obj-$(CONFIG_ARC_FPU_SAVE_RESTORE)	+= fpu.o
+ifdef CONFIG_ISA_ARCOMPACT
 CFLAGS_fpu.o   += -mdpfp
+endif
 
 ifdef CONFIG_ARC_DW2_UNWIND
 CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 1f6bb18..60406ec 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -35,6 +35,18 @@
 	b .Lret_from_system_call
 END(sys_clone_wrapper)
 
+ENTRY(sys_clone3_wrapper)
+	SAVE_CALLEE_SAVED_USER
+	bl  @sys_clone3
+	DISCARD_CALLEE_SAVED_USER
+
+	GET_CURR_THR_INFO_FLAGS   r10
+	btst r10, TIF_SYSCALL_TRACE
+	bnz  tracesys_exit
+
+	b .Lret_from_system_call
+END(sys_clone3_wrapper)
+
 ENTRY(ret_from_fork)
 	; when the forked child comes here from the __switch_to function
 	; r0 has the last task pointer.
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
index 07e22b5..c67c0f0 100644
--- a/arch/arc/kernel/fpu.c
+++ b/arch/arc/kernel/fpu.c
@@ -6,7 +6,9 @@
  */
 
 #include <linux/sched.h>
-#include <asm/switch_to.h>
+#include <asm/fpu.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
 
 /*
  * To save/restore FPU regs, simplest scheme would use LR/SR insns.
@@ -50,3 +52,28 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 		: "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
 	);
 }
+
+#else
+
+void fpu_init_task(struct pt_regs *regs)
+{
+	/* default rounding mode */
+	write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
+
+	/* set "Write enable" to allow explicit write to exception flags */
+	write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000);
+}
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+	struct arc_fpu *save = &prev->thread.fpu;
+	struct arc_fpu *restore = &next->thread.fpu;
+
+	save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
+	save->status = read_aux_reg(ARC_REG_FPU_STATUS);
+
+	write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
+	write_aux_reg(ARC_REG_FPU_STATUS, restore->status);
+}
+
+#endif
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index e1889ce..315528f 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -20,6 +20,8 @@
 #include <linux/elf.h>
 #include <linux/tick.h>
 
+#include <asm/fpu.h>
+
 SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
 {
 	task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
@@ -171,9 +173,8 @@ asmlinkage void ret_from_fork(void);
  * |    user_r25    |
  * ------------------  <===== END of PAGE
  */
-int copy_thread(unsigned long clone_flags,
-		unsigned long usp, unsigned long kthread_arg,
-		struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+	unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
 {
 	struct pt_regs *c_regs;        /* child's pt_regs */
 	unsigned long *childksp;       /* to unwind out of __switch_to() */
@@ -231,7 +232,7 @@ int copy_thread(unsigned long clone_flags,
 		 * set task's userland tls data ptr from 4th arg
 		 * clone C-lib call is difft from clone sys-call
 		 */
-		task_thread_info(p)->thr_ptr = regs->r3;
+		task_thread_info(p)->thr_ptr = tls;
 	} else {
 		/* Normal fork case: set parent's TLS ptr in child */
 		task_thread_info(p)->thr_ptr =
@@ -264,7 +265,7 @@ int copy_thread(unsigned long clone_flags,
 /*
  * Do necessary setup to start up a new user task
  */
-void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
 {
 	regs->sp = usp;
 	regs->ret = pc;
@@ -280,6 +281,8 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
 	regs->eflags = 0;
 #endif
 
+	fpu_init_task(regs);
+
 	/* bogus seed values for debugging */
 	regs->lp_start = 0x10;
 	regs->lp_end = 0x80;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 7ee89dc..e1c6474 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -572,10 +572,6 @@ void __init setup_arch(char **cmdline_p)
 	 */
 	root_mountflags &= ~MS_RDONLY;
 
-#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
-#endif
-
 	arc_unwind_init();
 }
 
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
index fddecc7..1069446 100644
--- a/arch/arc/kernel/sys.c
+++ b/arch/arc/kernel/sys.c
@@ -7,6 +7,7 @@
 #include <asm/syscalls.h>
 
 #define sys_clone	sys_clone_wrapper
+#define sys_clone3	sys_clone3_wrapper
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0b1b1c6..97864aa 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -74,7 +74,7 @@
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_COPY_THREAD_TLS
 	select HAVE_C_RECORDMCOUNT
-	select HAVE_DEBUG_KMEMLEAK
+	select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
 	select HAVE_DMA_CONTIGUOUS if MMU
 	select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
 	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
@@ -102,7 +102,7 @@
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
-	select HAVE_RCU_TABLE_FREE if SMP && ARM_LPAE
+	select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RSEQ
 	select HAVE_STACKPROTECTOR
@@ -1905,7 +1905,7 @@
 config KEXEC
 	bool "Kexec system call (EXPERIMENTAL)"
 	depends on (!SMP || PM_SLEEP_SMP)
-	depends on !CPU_V7M
+	depends on MMU
 	select KEXEC_CORE
 	help
 	  kexec is a system call that implements the ability to shutdown your
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 8bcbd0c..b70d7de 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -147,14 +147,14 @@
 		    0x80024000      | 0xf0024000     | UART9
 
 	config DEBUG_AT91_RM9200_DBGU
-		bool "Kernel low-level debugging on AT91RM9200, AT91SAM9 DBGU"
+		bool "Kernel low-level debugging on AT91RM9200, AT91SAM9, SAM9X60 DBGU"
 		select DEBUG_AT91_UART
-		depends on SOC_AT91RM9200 || SOC_AT91SAM9
+		depends on SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAM9X60
 		help
 		  Say Y here if you want kernel low-level debugging support
 		  on the DBGU port of:
 		    at91rm9200, at91sam9260, at91sam9g20, at91sam9261,
-		    at91sam9g10, at91sam9n12, at91sam9rl64, at91sam9x5
+		    at91sam9g10, at91sam9n12, at91sam9rl64, at91sam9x5, sam9x60
 
 	config DEBUG_AT91_SAM9263_DBGU
 		bool "Kernel low-level debugging on AT91SAM{9263,9G45,A5D3} DBGU"
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index a1e883c..da599c3 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -110,12 +110,12 @@
 
 # -fstack-protector-strong triggers protection checks in this code,
 # but it is being used too early to link to meaningful stack_chk logic.
-nossp_flags := $(call cc-option, -fno-stack-protector)
-CFLAGS_atags_to_fdt.o := $(nossp_flags)
-CFLAGS_fdt.o := $(nossp_flags)
-CFLAGS_fdt_ro.o := $(nossp_flags)
-CFLAGS_fdt_rw.o := $(nossp_flags)
-CFLAGS_fdt_wip.o := $(nossp_flags)
+nossp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector
+CFLAGS_atags_to_fdt.o := $(nossp-flags-y)
+CFLAGS_fdt.o := $(nossp-flags-y)
+CFLAGS_fdt_ro.o := $(nossp-flags-y)
+CFLAGS_fdt_rw.o := $(nossp-flags-y)
+CFLAGS_fdt_wip.o := $(nossp-flags-y)
 
 ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
 asflags-y := -DZIMAGE
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index ead21e5..088b0a0 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -140,6 +140,17 @@
 #endif
 		.endm
 
+		.macro	enable_cp15_barriers, reg
+		mrc	p15, 0, \reg, c1, c0, 0	@ read SCTLR
+		tst	\reg, #(1 << 5)		@ CP15BEN bit set?
+		bne	.L_\@
+		orr	\reg, \reg, #(1 << 5)	@ CP15 barrier instructions
+		mcr	p15, 0, \reg, c1, c0, 0	@ write SCTLR
+ ARM(		.inst   0xf57ff06f		@ v7+ isb	)
+ THUMB(		isb						)
+.L_\@:
+		.endm
+
 		.section ".start", "ax"
 /*
  * sort out different calling conventions
@@ -820,6 +831,7 @@
 		mov	pc, r12
 
 __armv7_mmu_cache_on:
+		enable_cp15_barriers	r11
 		mov	r12, lr
 #ifdef CONFIG_MMU
 		mrc	p15, 0, r11, c0, c1, 4	@ read ID_MMFR0
@@ -1209,6 +1221,7 @@
 		mov	pc, lr
 
 __armv7_mmu_cache_flush:
+		enable_cp15_barriers	r10
 		tst	r4, #1
 		bne	iflush
 		mrc	p15, 0, r10, c0, c1, 5	@ read ID_MMFR1
@@ -1447,21 +1460,7 @@
 
 		@ Preserve return value of efi_entry() in r4
 		mov	r4, r0
-
-		@ our cache maintenance code relies on CP15 barrier instructions
-		@ but since we arrived here with the MMU and caches configured
-		@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
-		@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
-		@ the enable path will be executed on v7+ only.
-		mrc	p15, 0, r1, c1, c0, 0	@ read SCTLR
-		tst	r1, #(1 << 5)		@ CP15BEN bit set?
-		bne	0f
-		orr	r1, r1, #(1 << 5)	@ CP15 barrier instructions
-		mcr	p15, 0, r1, c1, c0, 0	@ write SCTLR
- ARM(		.inst	0xf57ff06f		@ v7+ isb	)
- THUMB(		isb						)
-
-0:		bl	cache_clean_flush
+		bl	cache_clean_flush
 		bl	cache_off
 
 		@ Set parameters for booting zImage according to boot protocol
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 08011dc..d6546d2 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -37,18 +37,24 @@
 	at91-ariag25.dtb \
 	at91-ariettag25.dtb \
 	at91-cosino_mega2560.dtb \
-	at91-kizboxmini.dtb \
+	at91-kizboxmini-base.dtb \
+	at91-kizboxmini-mb.dtb \
+	at91-kizboxmini-rd.dtb \
+	at91-smartkiz.dtb \
 	at91-wb45n.dtb \
 	at91sam9g15ek.dtb \
 	at91sam9g25ek.dtb \
 	at91sam9g35ek.dtb \
 	at91sam9x25ek.dtb \
 	at91sam9x35ek.dtb
+dtb-$(CONFIG_SOC_SAM9X60) += \
+	at91-sam9x60ek.dtb
 dtb-$(CONFIG_SOC_SAM_V7) += \
 	at91-kizbox2-2.dtb \
 	at91-kizbox3-hs.dtb \
 	at91-nattis-2-natte-2.dtb \
 	at91-sama5d27_som1_ek.dtb \
+	at91-sama5d27_wlsom1_ek.dtb \
 	at91-sama5d2_ptc_ek.dtb \
 	at91-sama5d2_xplained.dtb \
 	at91-sama5d3_xplained.dtb \
@@ -422,6 +428,10 @@
 	imx6dl-gw560x.dtb \
 	imx6dl-gw5903.dtb \
 	imx6dl-gw5904.dtb \
+	imx6dl-gw5907.dtb \
+	imx6dl-gw5910.dtb \
+	imx6dl-gw5912.dtb \
+	imx6dl-gw5913.dtb \
 	imx6dl-hummingboard.dtb \
 	imx6dl-hummingboard-emmc-som-v15.dtb \
 	imx6dl-hummingboard-som-v15.dtb \
@@ -493,6 +503,10 @@
 	imx6q-gw560x.dtb \
 	imx6q-gw5903.dtb \
 	imx6q-gw5904.dtb \
+	imx6q-gw5907.dtb \
+	imx6q-gw5910.dtb \
+	imx6q-gw5912.dtb \
+	imx6q-gw5913.dtb \
 	imx6q-h100.dtb \
 	imx6q-hummingboard.dtb \
 	imx6q-hummingboard-emmc-som-v15.dtb \
@@ -554,6 +568,7 @@
 	imx6qp-zii-rdu2.dtb
 dtb-$(CONFIG_SOC_IMX6SL) += \
 	imx6sl-evk.dtb \
+	imx6sl-tolino-shine3.dtb \
 	imx6sl-warp.dtb
 dtb-$(CONFIG_SOC_IMX6SLL) += \
 	imx6sll-evk.dtb \
@@ -612,6 +627,7 @@
 	imx7s-mba7.dtb \
 	imx7s-warp.dtb
 dtb-$(CONFIG_SOC_IMX7ULP) += \
+	imx7ulp-com.dtb \
 	imx7ulp-evk.dtb
 dtb-$(CONFIG_SOC_LS1021A) += \
 	ls1021a-moxa-uc-8410a.dtb \
@@ -691,6 +707,7 @@
 	omap3-devkit8000.dtb \
 	omap3-devkit8000-lcd43.dtb \
 	omap3-devkit8000-lcd70.dtb \
+	omap3-echo.dtb \
 	omap3-evm.dtb \
 	omap3-evm-37xx.dtb \
 	omap3-gta04a3.dtb \
@@ -1129,6 +1146,7 @@
 	sun8i-h3-orangepi-plus2e.dtb \
 	sun8i-h3-orangepi-zero-plus2.dtb \
 	sun8i-h3-rervision-dvk.dtb \
+	sun8i-h3-emlid-neutis-n5h3-devboard.dtb \
 	sun8i-r16-bananapi-m2m.dtb \
 	sun8i-r16-nintendo-nes-classic.dtb \
 	sun8i-r16-nintendo-super-nes-classic.dtb \
@@ -1182,7 +1200,9 @@
 	ste-hrefprev60-stuib.dtb \
 	ste-hrefprev60-tvk.dtb \
 	ste-hrefv60plus-stuib.dtb \
-	ste-hrefv60plus-tvk.dtb
+	ste-hrefv60plus-tvk.dtb \
+	ste-href520-tvk.dtb \
+	ste-ux500-samsung-golden.dtb
 dtb-$(CONFIG_ARCH_UNIPHIER) += \
 	uniphier-ld4-ref.dtb \
 	uniphier-ld6b-ref.dtb \
@@ -1238,6 +1258,8 @@
 dtb-$(CONFIG_MACH_ARMADA_375) += \
 	armada-375-db.dtb
 dtb-$(CONFIG_MACH_ARMADA_38X) += \
+	armada-385-clearfog-gtr-s4.dtb \
+	armada-385-clearfog-gtr-l8.dtb \
 	armada-385-db-88f6820-amc.dtb \
 	armada-385-db-ap.dtb \
 	armada-385-linksys-caiman.dtb \
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 6f0a6be..68252da 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -113,7 +113,7 @@ switch10 {
 		};
 	};
 
-	backlight {
+	backlight: backlight {
 		compatible = "pwm-backlight";
 		pwms = <&ecap0 0 50000 0>;
 		brightness-levels = <0 51 53 56 62 75 101 152 255>;
@@ -121,35 +121,15 @@ backlight {
 	};
 
 	panel {
-		compatible = "ti,tilcdc,panel";
-		status = "okay";
+		compatible = "tfc,s9700rtwv43tr-01b";
+
 		pinctrl-names = "default";
 		pinctrl-0 = <&lcd_pins_s0>;
-		panel-info {
-			ac-bias           = <255>;
-			ac-bias-intrpt    = <0>;
-			dma-burst-sz      = <16>;
-			bpp               = <32>;
-			fdd               = <0x80>;
-			sync-edge         = <0>;
-			sync-ctrl         = <1>;
-			raster-order      = <0>;
-			fifo-th           = <0>;
-		};
+		backlight = <&backlight>;
 
-		display-timings {
-			800x480p62 {
-				clock-frequency = <30000000>;
-				hactive = <800>;
-				vactive = <480>;
-				hfront-porch = <39>;
-				hback-porch = <39>;
-				hsync-len = <47>;
-				vback-porch = <29>;
-				vfront-porch = <13>;
-				vsync-len = <2>;
-				hsync-active = <1>;
-				vsync-active = <1>;
+		port {
+			panel_0: endpoint@0 {
+				remote-endpoint = <&lcdc_0>;
 			};
 		};
 	};
@@ -500,6 +480,12 @@ &lcdc {
 	status = "okay";
 
 	blue-and-red-wiring = "crossed";
+
+	port {
+		lcdc_0: endpoint@0 {
+			remote-endpoint = <&panel_0>;
+		};
+	};
 };
 
 &elm {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index a97f9df..32f515a 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -183,36 +183,16 @@ sound_master: simple-audio-card,codec {
 	};
 
 	panel {
-		compatible = "ti,tilcdc,panel";
+		compatible = "newhaven,nhd-4.3-480272ef-atxl";
+
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&lcd_pins_default>;
 		pinctrl-1 = <&lcd_pins_sleep>;
 		backlight = <&lcd_bl>;
-		status = "okay";
-		panel-info {
-			ac-bias		= <255>;
-			ac-bias-intrpt	= <0>;
-			dma-burst-sz	= <16>;
-			bpp		= <32>;
-			fdd		= <0x80>;
-			sync-edge	= <0>;
-			sync-ctrl	= <1>;
-			raster-order	= <0>;
-			fifo-th		= <0>;
-		};
-		display-timings {
-			480x272 {
-				hactive		= <480>;
-				vactive		= <272>;
-				hback-porch	= <43>;
-				hfront-porch	= <8>;
-				hsync-len	= <4>;
-				vback-porch	= <12>;
-				vfront-porch	= <4>;
-				vsync-len	= <10>;
-				clock-frequency = <9000000>;
-				hsync-active	= <0>;
-				vsync-active	= <0>;
+
+		port {
+			panel_0: endpoint@0 {
+				remote-endpoint = <&lcdc_0>;
 			};
 		};
 	};
@@ -725,6 +705,12 @@ &lcdc {
 	status = "okay";
 
 	blue-and-red-wiring = "crossed";
+
+	port {
+		lcdc_0: endpoint@0 {
+			remote-endpoint = <&panel_0>;
+		};
+	};
 };
 
 &rtc {
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 204bccf..021eb57 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -287,6 +287,19 @@ pca9536: gpio@41 {
 		gpio-controller;
 		#gpio-cells = <2>;
 	};
+
+	/* osd9616p0899-10 */
+	display@3c {
+		compatible = "solomon,ssd1306fb-i2c";
+		reg = <0x3c>;
+		solomon,height = <16>;
+		solomon,width = <96>;
+		solomon,com-seq;
+		solomon,com-invdir;
+		solomon,page-offset = <0>;
+		solomon,prechargep1 = <2>;
+		solomon,prechargep2 = <13>;
+	};
 };
 
 &spi0 {
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 3a8a205..4e2986f 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -225,7 +225,6 @@ i2c0: i2c@0 {
 
 		target-module@d000 {			/* 0x44e0d000, ap 20 38.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "adc_tsc";
 			reg = <0xd000 0x4>,
 			      <0xd010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1009,7 +1008,6 @@ i2c1: i2c@0 {
 
 		target-module@30000 {			/* 0x48030000, ap 77 08.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spi0";
 			reg = <0x30000 0x4>,
 			      <0x30110 0x4>,
 			      <0x30114 0x4>;
@@ -1134,7 +1132,6 @@ timer2: timer@0 {
 
 		target-module@42000 {			/* 0x48042000, ap 24 1c.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer3";
 			reg = <0x42000 0x4>,
 			      <0x42010 0x4>,
 			      <0x42014 0x4>;
@@ -1160,7 +1157,6 @@ timer3: timer@0 {
 
 		target-module@44000 {			/* 0x48044000, ap 26 26.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer4";
 			reg = <0x44000 0x4>,
 			      <0x44010 0x4>,
 			      <0x44014 0x4>;
@@ -1187,7 +1183,6 @@ timer4: timer@0 {
 
 		target-module@46000 {			/* 0x48046000, ap 28 28.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer5";
 			reg = <0x46000 0x4>,
 			      <0x46010 0x4>,
 			      <0x46014 0x4>;
@@ -1214,7 +1209,6 @@ timer5: timer@0 {
 
 		target-module@48000 {			/* 0x48048000, ap 30 22.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer6";
 			reg = <0x48000 0x4>,
 			      <0x48010 0x4>,
 			      <0x48014 0x4>;
@@ -1241,7 +1235,6 @@ timer6: timer@0 {
 
 		target-module@4a000 {			/* 0x4804a000, ap 85 60.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer7";
 			reg = <0x4a000 0x4>,
 			      <0x4a010 0x4>,
 			      <0x4a014 0x4>;
@@ -1344,7 +1337,6 @@ mmc1: mmc@0 {
 
 		target-module@80000 {			/* 0x48080000, ap 38 18.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "elm";
 			reg = <0x80000 0x4>,
 			      <0x80010 0x4>,
 			      <0x80014 0x4>;
@@ -1412,7 +1404,6 @@ mbox_wkupm3: wkup_m3 {
 
 		target-module@ca000 {			/* 0x480ca000, ap 91 40.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spinlock";
 			reg = <0xca000 0x4>,
 			      <0xca010 0x4>,
 			      <0xca014 0x4>;
@@ -1533,7 +1524,6 @@ i2c2: i2c@0 {
 
 		target-module@a0000 {			/* 0x481a0000, ap 79 24.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spi1";
 			reg = <0xa0000 0x4>,
 			      <0xa0110 0x4>,
 			      <0xa0114 0x4>;
@@ -1749,7 +1739,6 @@ target-module@cc000 {			/* 0x481cc000, ap 60 46.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
 			reg = <0xcc020 0x4>;
 			reg-names = "rev";
-			ti,hwmods = "d_can0";
 			/* Domains (P, C): per_pwrdm, l4ls_clkdm */
 			clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
 				 <&dcan0_fck>;
@@ -1773,7 +1762,6 @@ target-module@d0000 {			/* 0x481d0000, ap 62 42.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
 			reg = <0xd0020 0x4>;
 			reg-names = "rev";
-			ti,hwmods = "d_can1";
 			/* Domains (P, C): per_pwrdm, l4ls_clkdm */
 			clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
 				 <&dcan1_fck>;
@@ -1863,7 +1851,6 @@ segment@300000 {					/* 0x48300000 */
 
 		target-module@0 {			/* 0x48300000, ap 66 48.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss0";
 			reg = <0x0 0x4>,
 			      <0x4 0x4>;
 			reg-names = "rev", "sysc";
@@ -1916,7 +1903,6 @@ ehrpwm0: pwm@200 {
 
 		target-module@2000 {			/* 0x48302000, ap 68 52.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss1";
 			reg = <0x2000 0x4>,
 			      <0x2004 0x4>;
 			reg-names = "rev", "sysc";
@@ -1969,7 +1955,6 @@ ehrpwm1: pwm@200 {
 
 		target-module@4000 {			/* 0x48304000, ap 70 44.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss2";
 			reg = <0x4000 0x4>,
 			      <0x4004 0x4>;
 			reg-names = "rev", "sysc";
@@ -2022,7 +2007,6 @@ ehrpwm2: pwm@200 {
 
 		target-module@e000 {			/* 0x4830e000, ap 72 4a.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "lcdc";
 			reg = <0xe000 0x4>,
 			      <0xe054 0x4>;
 			reg-names = "rev", "sysc";
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 646f114..41dcfb3 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -439,23 +439,87 @@ gpmc: gpmc@50000000 {
 			status = "disabled";
 		};
 
-		sham: sham@53100000 {
-			compatible = "ti,omap4-sham";
-			ti,hwmods = "sham";
-			reg = <0x53100000 0x200>;
-			interrupts = <109>;
-			dmas = <&edma 36 0>;
-			dma-names = "rx";
+		sham_target: target-module@53100000 {
+			compatible = "ti,sysc-omap3-sham", "ti,sysc";
+			reg = <0x53100100 0x4>,
+			      <0x53100110 0x4>,
+			      <0x53100114 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): per_pwrdm, l3_clkdm */
+			clocks = <&l3_clkctrl AM3_L3_SHAM_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x53100000 0x1000>;
+
+			sham: sham@0 {
+				compatible = "ti,omap4-sham";
+				reg = <0 0x200>;
+				interrupts = <109>;
+				dmas = <&edma 36 0>;
+				dma-names = "rx";
+			};
 		};
 
-		aes: aes@53500000 {
-			compatible = "ti,omap4-aes";
-			ti,hwmods = "aes";
-			reg = <0x53500000 0xa0>;
-			interrupts = <103>;
-			dmas = <&edma 6 0>,
-			       <&edma 5 0>;
-			dma-names = "tx", "rx";
+		aes_target: target-module@53500000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x53500080 0x4>,
+			      <0x53500084 0x4>,
+			      <0x53500088 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): per_pwrdm, l3_clkdm */
+			clocks = <&l3_clkctrl AM3_L3_AES_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x53500000 0x1000>;
+
+			aes: aes@0 {
+				compatible = "ti,omap4-aes";
+				reg = <0 0xa0>;
+				interrupts = <103>;
+				dmas = <&edma 6 0>,
+				       <&edma 5 0>;
+				dma-names = "tx", "rx";
+			};
+		};
+
+		target-module@56000000 {
+			compatible = "ti,sysc-omap4", "ti,sysc";
+			reg = <0x5600fe00 0x4>,
+			      <0x5600fe10 0x4>;
+			reg-names = "rev", "sysc";
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			clocks = <&gfx_l3_clkctrl AM3_GFX_L3_GFX_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_gfx 0>;
+			reset-names = "rstctrl";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x56000000 0x1000000>;
+
+			/*
+			 * Closed source PowerVR driver, no child device
+			 * binding or driver in mainline
+			 */
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index 125379e..e0b5a00 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -74,7 +74,7 @@ davinci_emac: ethernet@5c000000 {
 			clock-names = "ick";
 		};
 
-		davinci_mdio: ethernet@5c030000 {
+		davinci_mdio: mdio@5c030000 {
 			compatible = "ti,davinci_mdio";
 			ti,hwmods = "davinci_mdio";
 			status = "disabled";
diff --git a/arch/arm/boot/dts/am3703.dtsi b/arch/arm/boot/dts/am3703.dtsi
new file mode 100644
index 0000000..2b994ae
--- /dev/null
+++ b/arch/arm/boot/dts/am3703.dtsi
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 André Hentschel <nerv@dawncrow.de>
+ */
+
+#include "omap36xx.dtsi"
+
+&iva {
+	status = "disabled";
+};
+
+&sgx_module {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/am3715.dtsi b/arch/arm/boot/dts/am3715.dtsi
new file mode 100644
index 0000000..ab328e8
--- /dev/null
+++ b/arch/arm/boot/dts/am3715.dtsi
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 André Hentschel <nerv@dawncrow.de>
+ */
+
+#include "omap36xx.dtsi"
+
+&iva {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index ca0aa3f..faa14dc 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -256,33 +256,92 @@ mmc3: mmc@0 {
 			};
 		};
 
-		sham: sham@53100000 {
-			compatible = "ti,omap5-sham";
-			ti,hwmods = "sham";
-			reg = <0x53100000 0x300>;
-			dmas = <&edma 36 0>;
-			dma-names = "rx";
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+		sham_target: target-module@53100000 {
+			compatible = "ti,sysc-omap3-sham", "ti,sysc";
+			reg = <0x53100100 0x4>,
+			      <0x53100110 0x4>,
+			      <0x53100114 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): per_pwrdm, l3_clkdm */
+			clocks = <&l3_clkctrl AM4_L3_SHAM_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x53100000 0x1000>;
+
+			sham: sham@0 {
+				compatible = "ti,omap5-sham";
+				reg = <0 0x300>;
+				dmas = <&edma 36 0>;
+				dma-names = "rx";
+				interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			};
 		};
 
-		aes: aes@53501000 {
-			compatible = "ti,omap4-aes";
-			ti,hwmods = "aes";
-			reg = <0x53501000 0xa0>;
-			interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&edma 6 0>,
-				<&edma 5 0>;
-			dma-names = "tx", "rx";
+		aes_target: target-module@53501000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x53501080 0x4>,
+			      <0x53501084 0x4>,
+			      <0x53501088 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): per_pwrdm, l3_clkdm */
+			clocks = <&l3_clkctrl AM4_L3_AES_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x53501000 0x1000>;
+
+			aes: aes@0 {
+				compatible = "ti,omap4-aes";
+				reg = <0 0xa0>;
+				interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&edma 6 0>,
+				      <&edma 5 0>;
+				dma-names = "tx", "rx";
+			};
 		};
 
-		des: des@53701000 {
-			compatible = "ti,omap4-des";
-			ti,hwmods = "des";
-			reg = <0x53701000 0xa0>;
-			interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&edma 34 0>,
-				<&edma 33 0>;
-			dma-names = "tx", "rx";
+		des_target: target-module@53701000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x53701030 0x4>,
+			      <0x53701034 0x4>,
+			      <0x53701038 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): per_pwrdm, l3_clkdm */
+			clocks = <&l3_clkctrl AM4_L3_DES_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x53701000 0x1000>;
+
+			des: des@0 {
+				compatible = "ti,omap4-des";
+				reg = <0 0xa0>;
+				interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&edma 34 0>,
+				       <&edma 33 0>;
+				dma-names = "tx", "rx";
+			};
 		};
 
 		gpmc: gpmc@50000000 {
@@ -305,17 +364,34 @@ gpmc: gpmc@50000000 {
 			status = "disabled";
 		};
 
-		qspi: spi@47900000 {
-			compatible = "ti,am4372-qspi";
-			reg = <0x47900000 0x100>,
-			      <0x30000000 0x4000000>;
-			reg-names = "qspi_base", "qspi_mmap";
+		target-module@47900000 {
+			compatible = "ti,sysc-omap4", "ti,sysc";
+			reg = <0x47900000 0x4>,
+			      <0x47900010 0x4>;
+			reg-names = "rev", "sysc";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			clocks = <&l3s_clkctrl AM4_L3S_QSPI_CLKCTRL 0>;
+			clock-names = "fck";
 			#address-cells = <1>;
-			#size-cells = <0>;
-			ti,hwmods = "qspi";
-			interrupts = <0 138 0x4>;
-			num-cs = <4>;
-			status = "disabled";
+			#size-cells = <1>;
+			ranges = <0x0 0x47900000 0x1000>,
+				 <0x30000000 0x30000000 0x4000000>;
+
+			qspi: spi@0 {
+				compatible = "ti,am4372-qspi";
+				reg = <0 0x100>,
+				      <0x30000000 0x4000000>;
+				reg-names = "qspi_base", "qspi_mmap";
+				clocks = <&dpll_per_m2_div4_ck>;
+				clock-names = "fck";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 138 0x4>;
+				num-cs = <4>;
+			};
 		};
 
 		dss: dss@4832a000 {
@@ -369,6 +445,26 @@ pm_sram_data: pm-data-sram@1000 {
 				pool;
 			};
 		};
+
+		target-module@56000000 {
+			compatible = "ti,sysc-omap4", "ti,sysc";
+			reg = <0x5600fe00 0x4>,
+			      <0x5600fe10 0x4>;
+			reg-names = "rev", "sysc";
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			clocks = <&gfx_l3_clkctrl AM4_GFX_L3_GFX_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_gfx 0>;
+			reset-names = "rstctrl";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x56000000 0x1000000>;
+		};
 	};
 };
 
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index 0dd59ee..e18e17d 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -225,7 +225,6 @@ i2c0: i2c@0 {
 
 		target-module@d000 {			/* 0x44e0d000, ap 20 38.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "adc_tsc";
 			reg = <0xd000 0x4>,
 			      <0xd010 0x4>;
 			reg-names = "rev", "sysc";
@@ -763,7 +762,6 @@ i2c1: i2c@0 {
 
 		target-module@30000 {			/* 0x48030000, ap 65 08.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spi0";
 			reg = <0x30000 0x4>,
 			      <0x30110 0x4>,
 			      <0x30114 0x4>;
@@ -900,7 +898,6 @@ timer2: timer@0  {
 
 		target-module@42000 {			/* 0x48042000, ap 20 24.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer3";
 			reg = <0x42000 0x4>,
 			      <0x42010 0x4>,
 			      <0x42014 0x4>;
@@ -927,7 +924,6 @@ timer3: timer@0 {
 
 		target-module@44000 {			/* 0x48044000, ap 22 26.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer4";
 			reg = <0x44000 0x4>,
 			      <0x44010 0x4>,
 			      <0x44014 0x4>;
@@ -955,7 +951,6 @@ timer4: timer@0 {
 
 		target-module@46000 {			/* 0x48046000, ap 24 28.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer5";
 			reg = <0x46000 0x4>,
 			      <0x46010 0x4>,
 			      <0x46014 0x4>;
@@ -983,7 +978,6 @@ timer5: timer@0 {
 
 		target-module@48000 {			/* 0x48048000, ap 26 1a.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer6";
 			reg = <0x48000 0x4>,
 			      <0x48010 0x4>,
 			      <0x48014 0x4>;
@@ -1011,7 +1005,6 @@ timer6: timer@0 {
 
 		target-module@4a000 {			/* 0x4804a000, ap 71 48.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer7";
 			reg = <0x4a000 0x4>,
 			      <0x4a010 0x4>,
 			      <0x4a014 0x4>;
@@ -1107,7 +1100,6 @@ mmc1: mmc@0 {
 
 		target-module@80000 {			/* 0x48080000, ap 32 18.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "elm";
 			reg = <0x80000 0x4>,
 			      <0x80010 0x4>,
 			      <0x80014 0x4>;
@@ -1169,7 +1161,6 @@ mbox_wkupm3: wkup_m3 {
 
 		target-module@ca000 {			/* 0x480ca000, ap 77 38.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spinlock";
 			reg = <0xca000 0x4>,
 			      <0xca010 0x4>,
 			      <0xca014 0x4>;
@@ -1282,7 +1273,6 @@ i2c2: i2c@0 {
 
 		target-module@a0000 {			/* 0x481a0000, ap 67 2c.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spi1";
 			reg = <0xa0000 0x4>,
 			      <0xa0110 0x4>,
 			      <0xa0114 0x4>;
@@ -1313,7 +1303,6 @@ spi1: spi@0 {
 
 		target-module@a2000 {			/* 0x481a2000, ap 69 2e.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spi2";
 			reg = <0xa2000 0x4>,
 			      <0xa2110 0x4>,
 			      <0xa2114 0x4>;
@@ -1344,7 +1333,6 @@ spi2: spi@0 {
 
 		target-module@a4000 {			/* 0x481a4000, ap 92 62.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spi3";
 			reg = <0xa4000 0x4>,
 			      <0xa4110 0x4>,
 			      <0xa4114 0x4>;
@@ -1527,7 +1515,6 @@ gpio3: gpio@0 {
 
 		target-module@c1000 {			/* 0x481c1000, ap 94 68.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer8";
 			reg = <0xc1000 0x4>,
 			      <0xc1010 0x4>,
 			      <0xc1014 0x4>;
@@ -1556,7 +1543,6 @@ target-module@cc000 {			/* 0x481cc000, ap 50 46.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
 			reg = <0xcc020 0x4>;
 			reg-names = "rev";
-			ti,hwmods = "d_can0";
 			/* Domains (P, C): per_pwrdm, l4ls_clkdm */
 			clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
 			clock-names = "fck";
@@ -1577,7 +1563,6 @@ target-module@d0000 {			/* 0x481d0000, ap 52 3a.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
 			reg = <0xd0020 0x4>;
 			reg-names = "rev";
-			ti,hwmods = "d_can1";
 			/* Domains (P, C): per_pwrdm, l4ls_clkdm */
 			clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
 			clock-names = "fck";
@@ -1695,7 +1680,6 @@ segment@300000 {					/* 0x48300000 */
 
 		target-module@0 {			/* 0x48300000, ap 56 40.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss0";
 			reg = <0x0 0x4>,
 			      <0x4 0x4>;
 			reg-names = "rev", "sysc";
@@ -1748,7 +1732,6 @@ ehrpwm0: pwm@200 {
 
 		target-module@2000 {			/* 0x48302000, ap 58 4a.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss1";
 			reg = <0x2000 0x4>,
 			      <0x2004 0x4>;
 			reg-names = "rev", "sysc";
@@ -1801,7 +1784,6 @@ ehrpwm1: pwm@200 {
 
 		target-module@4000 {			/* 0x48304000, ap 60 44.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss2";
 			reg = <0x4000 0x4>,
 			      <0x4004 0x4>;
 			reg-names = "rev", "sysc";
@@ -1854,7 +1836,6 @@ ehrpwm2: pwm@200 {
 
 		target-module@6000 {			/* 0x48306000, ap 96 58.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss3";
 			reg = <0x6000 0x4>,
 			      <0x6004 0x4>;
 			reg-names = "rev", "sysc";
@@ -1896,7 +1877,6 @@ ehrpwm3: pwm@200 {
 
 		target-module@8000 {			/* 0x48308000, ap 98 54.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss4";
 			reg = <0x8000 0x4>,
 			      <0x8004 0x4>;
 			reg-names = "rev", "sysc";
@@ -1938,7 +1918,6 @@ ehrpwm4: pwm@48308200 {
 
 		target-module@a000 {			/* 0x4830a000, ap 100 60.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss5";
 			reg = <0xa000 0x4>,
 			      <0xa004 0x4>;
 			reg-names = "rev", "sysc";
@@ -2086,7 +2065,6 @@ gpio5: gpio@0 {
 
 		target-module@26000 {			/* 0x48326000, ap 86 66.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "vpfe0";
 			reg = <0x26000 0x4>,
 			      <0x26104 0x4>;
 			reg-names = "rev", "sysc";
@@ -2113,7 +2091,6 @@ vpfe0: vpfe@0 {
 
 		target-module@28000 {			/* 0x48328000, ap 75 0e.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "vpfe1";
 			reg = <0x28000 0x4>,
 			      <0x28104 0x4>;
 			reg-names = "rev", "sysc";
@@ -2162,7 +2139,6 @@ target-module@2a000 {			/* 0x4832a000, ap 88 3c.0 */
 
 		target-module@3d000 {			/* 0x4833d000, ap 102 6e.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer9";
 			reg = <0x3d000 0x4>,
 			      <0x3d010 0x4>,
 			      <0x3d014 0x4>;
@@ -2189,7 +2165,6 @@ timer9: timer@0 {
 
 		target-module@3f000 {			/* 0x4833f000, ap 104 5c.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer10";
 			reg = <0x3f000 0x4>,
 			      <0x3f010 0x4>,
 			      <0x3f014 0x4>;
@@ -2216,7 +2191,6 @@ timer10: timer@0 {
 
 		target-module@41000 {			/* 0x48341000, ap 106 76.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer11";
 			reg = <0x41000 0x4>,
 			      <0x41010 0x4>,
 			      <0x41014 0x4>;
@@ -2243,7 +2217,6 @@ timer11: timer@0 {
 
 		target-module@45000 {			/* 0x48345000, ap 108 6a.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spi4";
 			reg = <0x45000 0x4>,
 			      <0x45110 0x4>,
 			      <0x45114 0x4>;
@@ -2358,7 +2331,6 @@ usb1: usb@10000 {
 
 		target-module@a8000 {			/* 0x483a8000, ap 125 6c.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "ocp2scp0";
 			reg = <0xa8000 0x4>;
 			reg-names = "rev";
 			/* Domains (P, C): per_pwrdm, l4ls_clkdm */
@@ -2440,7 +2412,6 @@ usb2: usb@10000 {
 
 		target-module@e8000 {			/* 0x483e8000, ap 129 78.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "ocp2scp1";
 			reg = <0xe8000 0x4>;
 			reg-names = "rev";
 			/* Domains (P, C): per_pwrdm, l4ls_clkdm */
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 74eaa6a..2522249 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -272,6 +272,12 @@ AM4372_IOPAD(0xa24, DS0_PULL_UP_DOWN_EN | INPUT_EN | MUX_MODE7)
 		>;
 	};
 
+	clkout1_pin: pinmux_clkout1_pin {
+		pinctrl-single,pins = <
+			0x270 (PIN_OUTPUT_PULLDOWN | MUX_MODE3)	/* XDMA_EVENT_INTR0/CLKOUT1 */
+		>;
+	};
+
 	cpsw_default: cpsw_default {
 		pinctrl-single,pins = <
 			/* Slave 1 */
@@ -593,6 +599,25 @@ &i2c1 {
 	pinctrl-0 = <&i2c1_pins>;
 	clock-frequency = <400000>;
 
+	ov2659@30 {
+		compatible = "ovti,ov2659";
+		reg = <0x30>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&clkout1_pin>;
+
+		clocks = <&clkout1_mux_ck>;
+		clock-names = "xvclk";
+		assigned-clocks = <&clkout1_mux_ck>;
+		assigned-clock-parents = <&clkout1_osc_div_ck>;
+
+		port {
+			ov2659_1: endpoint {
+				remote-endpoint = <&vpfe0_ep>;
+				link-frequencies = /bits/ 64 <70000000>;
+			};
+		};
+	};
+
 	edt-ft5306@38 {
 		status = "okay";
 		compatible = "edt,edt-ft5306", "edt,edt-ft5x06";
@@ -877,7 +902,7 @@ &vpfe0 {
 	/* Camera port */
 	port {
 		vpfe0_ep: endpoint {
-			/* remote-endpoint = <&sensor>; add once we have it */
+			remote-endpoint = <&ov2659_1>;
 			ti,am437x-vpfe-interface = <0>;
 			bus-width = <8>;
 			hsync-active = <0>;
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index a6fbc08..27259fd 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -145,6 +145,12 @@ sound0_master: simple-audio-card,codec {
 			system-clock-frequency = <12000000>;
 		};
 	};
+
+	audio_mstrclk: clock {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <12000000>;
+	};
 };
 
 &am43xx_pinmux {
@@ -696,6 +702,21 @@ tlv320aic3111: tlv320aic3111@18 {
 		IOVDD-supply = <&dcdc4>; /* V3_3D -> DCDC4 */
 		DVDD-supply = <&ldo1>; /* V1_8AUD -> V1_8D -> LDO1 */
 	};
+
+	ov2659@30 {
+		compatible = "ovti,ov2659";
+		reg = <0x30>;
+
+		clocks = <&audio_mstrclk>;
+		clock-names = "xvclk";
+
+		port {
+			ov2659_1: endpoint {
+				remote-endpoint = <&vpfe1_ep>;
+				link-frequencies = /bits/ 64 <70000000>;
+			};
+		};
+	};
 };
 
 &i2c2 {
@@ -964,7 +985,7 @@ &vpfe1 {
 
 	port {
 		vpfe1_ep: endpoint {
-			/* remote-endpoint = <&sensor>; add once we have it */
+			remote-endpoint = <&ov2659_1>;
 			ti,am437x-vpfe-interface = <0>;
 			bus-width = <8>;
 			hsync-active = <0>;
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index 091356f..c726cd8 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -704,6 +704,60 @@ usb_phy1_always_on_clk32k: usb_phy1_always_on_clk32k@2a48 {
 		ti,bit-shift = <8>;
 		reg = <0x2a48>;
 	};
+
+	clkout1_osc_div_ck: clkout1-osc-div-ck {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&sys_clkin_ck>;
+		ti,bit-shift = <20>;
+		ti,max-div = <4>;
+		reg = <0x4100>;
+	};
+
+	clkout1_src2_mux_ck: clkout1-src2-mux-ck {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>,
+			 <&dpll_per_m2_ck>, <&dpll_disp_m2_ck>,
+			 <&dpll_mpu_m2_ck>;
+		reg = <0x4100>;
+	};
+
+	clkout1_src2_pre_div_ck: clkout1-src2-pre-div-ck {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&clkout1_src2_mux_ck>;
+		ti,bit-shift = <4>;
+		ti,max-div = <8>;
+		reg = <0x4100>;
+	};
+
+	clkout1_src2_post_div_ck: clkout1-src2-post-div-ck {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&clkout1_src2_pre_div_ck>;
+		ti,bit-shift = <8>;
+		ti,max-div = <32>;
+		ti,index-power-of-two;
+		reg = <0x4100>;
+	};
+
+	clkout1_mux_ck: clkout1-mux-ck {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&clkout1_osc_div_ck>, <&clk_rc32k_ck>,
+			 <&clkout1_src2_post_div_ck>, <&dpll_extdev_m2_ck>;
+		ti,bit-shift = <16>;
+		reg = <0x4100>;
+	};
+
+	clkout1_ck: clkout1-ck {
+		#clock-cells = <0>;
+		compatible = "ti,gate-clock";
+		clocks = <&clkout1_mux_ck>;
+		ti,bit-shift = <23>;
+		reg = <0x4100>;
+	};
 };
 
 &prcm {
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 398721c..aa5e55f 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -9,6 +9,7 @@ / {
 	aliases {
 		rtc0 = &tps659038_rtc;
 		rtc1 = &rtc;
+		display0 = &hdmi0;
 	};
 
 	chosen {
@@ -96,6 +97,48 @@ led-out7 {
 			default-state = "off";
 		};
 	};
+
+	hdmi0: connector@0 {
+		compatible = "hdmi-connector";
+		label = "hdmi";
+
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&tpd12s015_out>;
+			};
+		};
+	};
+
+	tpd12s015: encoder@0 {
+		compatible = "ti,tpd12s016", "ti,tpd12s015";
+
+		gpios = <0>, /* optional CT_CP_HPD */
+			<0>, /* optional LS_OE */
+			<&gpio7 12 GPIO_ACTIVE_HIGH>;	/* HPD */
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+
+				tpd12s015_in: endpoint@0 {
+					remote-endpoint = <&hdmi_out>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+
+				tpd12s015_out: endpoint@0 {
+					remote-endpoint = <&hdmi_connector_in>;
+				};
+			};
+		};
+	};
 };
 
 &dra7_pmx_core {
@@ -485,3 +528,19 @@ partition@6 {
 &cpu0 {
 	vdd-supply = <&smps12_reg>;
 };
+
+&hdmi {
+	status = "okay";
+
+	vdda-supply = <&ldo4_reg>;
+
+	port {
+		hdmi_out: endpoint {
+			remote-endpoint = <&tpd12s015_in>;
+		};
+	};
+};
+
+&dss {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/armada-385-clearfog-gtr-l8.dts b/arch/arm/boot/dts/armada-385-clearfog-gtr-l8.dts
new file mode 100644
index 0000000..c9ac630
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-clearfog-gtr-l8.dts
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+#include "armada-385-clearfog-gtr.dtsi"
+
+/ {
+	model = "SolidRun Clearfog GTR L8";
+};
+
+&mdio {
+	switch0: switch0@4 {
+		compatible = "marvell,mv88e6190";
+		reg = <4>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cf_gtr_switch_reset_pins>;
+		reset-gpios = <&gpio0 18 GPIO_ACTIVE_LOW>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@1 {
+				reg = <1>;
+				label = "lan8";
+				phy-handle = <&switch0phy0>;
+			};
+
+			port@2 {
+				reg = <2>;
+				label = "lan7";
+				phy-handle = <&switch0phy1>;
+			};
+
+			port@3 {
+				reg = <3>;
+				label = "lan6";
+				phy-handle = <&switch0phy2>;
+			};
+
+			port@4 {
+				reg = <4>;
+				label = "lan5";
+				phy-handle = <&switch0phy3>;
+			};
+
+			port@5 {
+				reg = <5>;
+				label = "lan4";
+				phy-handle = <&switch0phy4>;
+			};
+
+			port@6 {
+				reg = <6>;
+				label = "lan3";
+				phy-handle = <&switch0phy5>;
+			};
+
+			port@7 {
+				reg = <7>;
+				label = "lan2";
+				phy-handle = <&switch0phy6>;
+			};
+
+			port@8 {
+				reg = <8>;
+				label = "lan1";
+				phy-handle = <&switch0phy7>;
+			};
+
+			port@10 {
+				reg = <10>;
+				label = "cpu";
+				ethernet = <&eth1>;
+			};
+
+		};
+
+		mdio {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			switch0phy0: switch0phy0@1 {
+				reg = <0x1>;
+			};
+
+			switch0phy1: switch0phy1@2 {
+				reg = <0x2>;
+			};
+
+			switch0phy2: switch0phy2@3 {
+				reg = <0x3>;
+			};
+
+			switch0phy3: switch0phy3@4 {
+				reg = <0x4>;
+			};
+
+			switch0phy4: switch0phy4@5 {
+				reg = <0x5>;
+			};
+
+			switch0phy5: switch0phy5@6 {
+				reg = <0x6>;
+			};
+
+			switch0phy6: switch0phy6@7 {
+				reg = <0x7>;
+			};
+
+			switch0phy7: switch0phy7@8 {
+				reg = <0x8>;
+			};
+		};
+
+	};
+};
diff --git a/arch/arm/boot/dts/armada-385-clearfog-gtr-s4.dts b/arch/arm/boot/dts/armada-385-clearfog-gtr-s4.dts
new file mode 100644
index 0000000..fa653b3
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-clearfog-gtr-s4.dts
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+#include "armada-385-clearfog-gtr.dtsi"
+
+/ {
+	model = "SolidRun Clearfog GTR S4";
+};
+
+&sfp0 {
+	tx-fault-gpio = <&gpio0 24 GPIO_ACTIVE_HIGH>;
+};
+
+&mdio {
+	switch0: switch0@4 {
+		compatible = "marvell,mv88e6085";
+		reg = <4>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cf_gtr_switch_reset_pins>;
+		reset-gpios = <&gpio0 18 GPIO_ACTIVE_LOW>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@1 {
+				reg = <1>;
+				label = "lan2";
+				phy-handle = <&switch0phy0>;
+			};
+
+			port@2 {
+				reg = <2>;
+				label = "lan1";
+				phy-handle = <&switch0phy1>;
+			};
+
+			port@3 {
+				reg = <3>;
+				label = "lan4";
+				phy-handle = <&switch0phy2>;
+			};
+
+			port@4 {
+				reg = <4>;
+				label = "lan3";
+				phy-handle = <&switch0phy3>;
+			};
+
+			port@5 {
+				reg = <5>;
+				label = "cpu";
+				ethernet = <&eth1>;
+			};
+
+		};
+
+		mdio {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			switch0phy0: switch0phy0@11 {
+				reg = <0x11>;
+			};
+
+			switch0phy1: switch0phy1@12 {
+				reg = <0x12>;
+			};
+
+			switch0phy2: switch0phy2@13 {
+				reg = <0x13>;
+			};
+
+			switch0phy3: switch0phy3@14 {
+				reg = <0x14>;
+			};
+		};
+
+	};
+};
diff --git a/arch/arm/boot/dts/armada-385-clearfog-gtr.dtsi b/arch/arm/boot/dts/armada-385-clearfog-gtr.dtsi
new file mode 100644
index 0000000..624bbca
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-clearfog-gtr.dtsi
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Device Tree file for Clearfog GTR machines rev 1.0 (88F6825)
+ *
+ *  Rabeeh Khoury <rabeeh@solid-run.com>, based on Russell King clearfog work
+ */
+
+/*
+	SERDES mapping -
+	0. SATA1 on CON18, or optionally mini PCIe CON3 - PCIe0
+	1. 6141 switch (2.5Gbps capable)
+	2. SATA0 on CON17, or optionally mini PCIe CON4 - PCIe1
+	3. USB 3.0 Host
+	4. mini PCIe CON2 - PCIe2
+	5. SFP connector, or optionally SGMII Ethernet 1512 PHY
+
+	USB 2.0 mapping -
+	0. USB 2.0 - 0 USB pins header CON12
+	1. USB 2.0 - 1 mini PCIe CON2
+	2. USB 2.0 - 2 to USB 3.0 connector (used with SERDES #3)
+
+	Pin mapping -
+	0,1 - console UART
+	2,3 - I2C0 - connected to I2C EEPROM, two temperature sensors,
+	      front panel and PSE controller
+	4,5 - MDC/MDIO
+	6..17 - RGMII
+	18 - Topaz switch reset (active low)
+	19 - 1512 phy reset
+	20 - 1512 phy reset (eth2, optional)
+	21,28,37,38,39,40 - SD0
+	22 - USB 3.0 current limiter enable (active high)
+	24 - SFP TX fault (input active high)
+	25 - SFP present (input active low)
+	26,27 - I2C1 - connected to SFP
+	29 - Fan PWM
+	30 - CON4 mini PCIe wifi disable
+	31 - CON3 mini PCIe wifi disable
+	32 - Fuse programming power toggle (1.8v)
+	33 - CON4 mini PCIe reset
+	34 - CON2 mini PCIe wifi disable
+	35 - CON3 mini PCIe reset
+	36 - Rear button (GPIO active low)
+	41 - CON1 front panel connector
+	42 - Front LED1, or front panel CON1
+	43 - Micron L-PBGA 24 ball SPI (1Gb) CS, or TPM SPI CS
+	44 - CON2 mini PCIe reset
+	45 - TPM PIRQ signal, or front panel CON1
+	46 - SFP TX disable
+	47 - Control isolation of boot sensitive SAR signals
+	48 - PSE reset
+	49 - PSE OSS signal
+	50 - PSE interrupt
+	52 - Front LED2, or front panel
+	53 - Front button
+	54 - SFP LOS (input active high)
+	55 - Fan sense
+	56(mosi),57(clk),58(miso) - SPI interface - 32Mb SPI, 1Gb SPI and TPM
+	59 - SPI 32Mb W25Q32BVZPIG CS0 chip select (bootable)
+*/
+
+/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include "armada-385.dtsi"
+
+/ {
+	compatible = "marvell,armada385", "marvell,armada380";
+
+	aliases {
+		/* So that mvebu u-boot can update the MAC addresses */
+		ethernet1 = &eth0;
+		ethernet2 = &eth1;
+		ethernet3 = &eth2;
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_5p0v: regulator-5p0v {
+		compatible = "regulator-fixed";
+		regulator-name = "5P0V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+	};
+
+	v_usb3_con: regulator-v-usb3-con {
+		compatible = "regulator-fixed";
+		gpio = <&gpio0 22 GPIO_ACTIVE_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cf_gtr_usb3_con_vbus>;
+		regulator-max-microvolt = <5000000>;
+		regulator-min-microvolt = <5000000>;
+		regulator-name = "v_usb3_con";
+		vin-supply = <&reg_5p0v>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
+			  MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
+
+		internal-regs {
+
+			rtc@a3800 {
+				status = "okay";
+			};
+
+			i2c@11000 { /* ROM, temp sensor and front panel */
+				pinctrl-0 = <&i2c0_pins>;
+				pinctrl-names = "default";
+				status = "okay";
+			};
+
+			i2c@11100 { /* SFP (CON5/CON6) */
+				pinctrl-0 = <&cf_gtr_i2c1_pins>;
+				pinctrl-names = "default";
+				status = "okay";
+			};
+
+			pinctrl@18000 {
+				cf_gtr_switch_reset_pins: cf-gtr-switch-reset-pins {
+					marvell,pins = "mpp18";
+					marvell,function = "gpio";
+				};
+
+				cf_gtr_usb3_con_vbus: cf-gtr-usb3-con-vbus {
+					marvell,pins = "mpp22";
+					marvell,function = "gpio";
+				};
+
+				cf_gtr_fan_pwm: cf-gtr-fan-pwm {
+					marvell,pins = "mpp23";
+					marvell,function = "gpio";
+				};
+
+				cf_gtr_i2c1_pins: i2c1-pins {
+					/* SFP */
+					marvell,pins = "mpp26", "mpp27";
+					marvell,function = "i2c1";
+				};
+
+				cf_gtr_sdhci_pins: cf-gtr-sdhci-pins {
+					marvell,pins = "mpp21", "mpp28",
+						       "mpp37", "mpp38",
+						       "mpp39", "mpp40";
+					marvell,function = "sd0";
+				};
+
+				cf_gtr_isolation_pins: cf-gtr-isolation-pins {
+					marvell,pins = "mpp47";
+					marvell,function = "gpio";
+				};
+
+				cf_gtr_poe_reset_pins: cf-gtr-poe-reset-pins {
+					marvell,pins = "mpp48";
+					marvell,function = "gpio";
+				};
+
+				cf_gtr_spi1_cs_pins: spi1-cs-pins {
+					marvell,pins = "mpp59";
+					marvell,function = "spi1";
+				};
+
+				cf_gtr_front_button_pins: cf-gtr-front-button-pins {
+					marvell,pins = "mpp53";
+					marvell,function = "gpio";
+				};
+
+				cf_gtr_rear_button_pins: cf-gtr-rear-button-pins {
+					marvell,pins = "mpp36";
+					marvell,function = "gpio";
+				};
+			};
+
+			sdhci@d8000 {
+				bus-width = <4>;
+				no-1-8-v;
+				non-removable;
+				pinctrl-0 = <&cf_gtr_sdhci_pins>;
+				pinctrl-names = "default";
+				status = "okay";
+				vmmc = <&reg_3p3v>;
+				wp-inverted;
+			};
+
+			usb@58000 {
+				status = "okay";
+			};
+
+			usb3@f0000 {
+				status = "okay";
+			};
+
+			usb3@f8000 {
+				vbus-supply = <&v_usb3_con>;
+				status = "okay";
+			};
+		};
+
+		pcie {
+			status = "okay";
+			/*
+			 * The PCIe units are accessible through
+			 * the mini-PCIe connectors on the board.
+			 */
+			pcie@1,0 {
+				reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+				status = "okay";
+			};
+
+			pcie@2,0 {
+				reset-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+				status = "okay";
+			};
+
+			pcie@3,0 {
+				reset-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
+				status = "okay";
+			};
+		};
+	};
+
+	sfp0: sfp {
+		compatible = "sff,sfp";
+		i2c-bus = <&i2c1>;
+		los-gpio = <&gpio1 22 GPIO_ACTIVE_HIGH>;
+		mod-def0-gpio = <&gpio0 25 GPIO_ACTIVE_LOW>;
+		tx-disable-gpio = <&gpio1 14 GPIO_ACTIVE_HIGH>;
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		pinctrl-0 = <&cf_gtr_rear_button_pins &cf_gtr_front_button_pins>;
+		pinctrl-names = "default";
+
+		button_0 {
+			label = "Rear Button";
+			gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+			linux,can-disable;
+			linux,code = <BTN_0>;
+		};
+
+		button_1 {
+			label = "Front Button";
+			gpios = <&gpio1 21 GPIO_ACTIVE_LOW>;
+			linux,can-disable;
+			linux,code = <BTN_1>;
+		};
+	};
+
+	gpio-leds {
+		compatible = "gpio-leds";
+
+		led1 {
+			function = LED_FUNCTION_CPU;
+			color = <LED_COLOR_ID_GREEN>;
+			gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+		};
+
+		led2 {
+			function = LED_FUNCTION_HEARTBEAT;
+			color = <LED_COLOR_ID_GREEN>;
+			gpios = <&gpio1 20 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&bm {
+	status = "okay";
+};
+
+&bm_bppi {
+	status = "okay";
+};
+
+&eth0 {
+	/* ethernet@70000 */
+	pinctrl-0 = <&ge0_rgmii_pins>;
+	pinctrl-names = "default";
+	phy = <&phy_dedicated>;
+	phy-mode = "rgmii-id";
+	buffer-manager = <&bm>;
+	bm,pool-long = <0>;
+	bm,pool-short = <1>;
+	status = "okay";
+};
+
+&eth1 {
+	/* ethernet@30000 */
+	bm,pool-long = <2>;
+	bm,pool-short = <1>;
+	buffer-manager = <&bm>;
+	phys = <&comphy1 1>;
+	phy-mode = "2500base-x";
+	status = "okay";
+
+	fixed-link {
+		speed = <2500>;
+		full-duplex;
+	};
+};
+
+&eth2 {
+	/* ethernet@34000 */
+	bm,pool-long = <3>;
+	bm,pool-short = <1>;
+	buffer-manager = <&bm>;
+	managed = "in-band-status";
+	phys = <&comphy5 1>;
+	phy-mode = "sgmii";
+	sfp = <&sfp0>;
+	status = "okay";
+};
+
+&mdio {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mdio_pins>;
+	status = "okay";
+
+	phy_dedicated: ethernet-phy@0 {
+		/*
+		 * Annoyingly, the marvell phy driver configures the LED
+		 * register, rather than preserving reset-loaded setting.
+		 * We undo that rubbish here.
+		 */
+		marvell,reg-init = <3 16 0 0x1017>;
+		reg = <0>;
+	};
+};
+
+&uart0 {
+	pinctrl-0 = <&uart0_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&spi1 {
+	/*
+	 * CS0: W25Q32 flash
+	 */
+	pinctrl-0 = <&spi1_pins &cf_gtr_spi1_cs_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	spi-flash@0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "w25q32", "jedec,spi-nor";
+		reg = <0>; /* Chip select 0 */
+		spi-max-frequency = <3000000>;
+		status = "okay";
+	};
+};
+
+&i2c0 {
+	pinctrl-0 = <&i2c0_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	/* U26 temperature sensor placed near SoC */
+	temp1: nct75@4c {
+		compatible = "lm75";
+		reg = <0x4c>;
+	};
+
+	/* U27 temperature sensor placed near RTC battery */
+	temp2: nct75@4d {
+		compatible = "lm75";
+		reg = <0x4d>;
+	};
+
+	/* 2Kb eeprom */
+	eeprom@53 {
+		compatible = "atmel,24c02";
+		reg = <0x53>;
+	};
+};
+
+&ahci0 {
+	status = "okay";
+};
+
+&ahci1 {
+	status = "okay";
+};
+
+&gpio0 {
+	pinctrl-0 = <&cf_gtr_fan_pwm>;
+	pinctrl-names = "default";
+
+	wifi-disable {
+		gpio-hog;
+		gpios = <30 GPIO_ACTIVE_LOW>, <31 GPIO_ACTIVE_LOW>;
+		output-low;
+		line-name = "wifi-disable";
+	};
+};
+
+&gpio1 {
+	pinctrl-0 = <&cf_gtr_isolation_pins &cf_gtr_poe_reset_pins>;
+	pinctrl-names = "default";
+
+	lte-disable {
+		gpio-hog;
+		gpios = <2 GPIO_ACTIVE_LOW>;
+		output-low;
+		line-name = "lte-disable";
+	};
+
+	/*
+	 * This signal, when asserted, isolates Armada 38x sample at reset pins
+	 * from control of external devices. Should be de-asserted after reset.
+	 */
+	sar-isolation {
+		gpio-hog;
+		gpios = <15 GPIO_ACTIVE_LOW>;
+		output-low;
+		line-name = "sar-isolation";
+	};
+
+	poe-reset {
+		gpio-hog;
+		gpios = <16 GPIO_ACTIVE_LOW>;
+		output-low;
+		line-name = "poe-reset";
+	};
+};
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dtsi b/arch/arm/boot/dts/armada-388-clearfog.dtsi
index 0d81600..a0aa1d1 100644
--- a/arch/arm/boot/dts/armada-388-clearfog.dtsi
+++ b/arch/arm/boot/dts/armada-388-clearfog.dtsi
@@ -111,11 +111,6 @@ &eth2 {
 };
 
 &i2c0 {
-	clock-frequency = <400000>;
-	pinctrl-0 = <&i2c0_pins>;
-	pinctrl-names = "default";
-	status = "okay";
-
 	/*
 	 * PCA9655 GPIO expander, up to 1MHz clock.
 	 *  0-CON3 CLKREQ#
@@ -183,6 +178,12 @@ mikrobus_adc: mcp3021@4c {
 		compatible = "microchip,mcp3021";
 		reg = <0x4c>;
 	};
+
+	eeprom@52 {
+		compatible = "atmel,24c02";
+		reg = <0x52>;
+		pagesize = <16>;
+	};
 };
 
 &i2c1 {
diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts
index 705adfa..fb49df2 100644
--- a/arch/arm/boot/dts/armada-388-helios4.dts
+++ b/arch/arm/boot/dts/armada-388-helios4.dts
@@ -140,11 +140,6 @@ usb3_phy: usb3-phy {
 	soc {
 		internal-regs {
 			i2c@11000 {
-				clock-frequency = <400000>;
-				pinctrl-0 = <&i2c0_pins>;
-				pinctrl-names = "default";
-				status = "okay";
-
 				/*
 				 * PCA9655 GPIO expander, up to 1MHz clock.
 				 *  0-Board Revision bit 0 #
diff --git a/arch/arm/boot/dts/armada-38x-solidrun-microsom.dtsi b/arch/arm/boot/dts/armada-38x-solidrun-microsom.dtsi
index 3a7f9c1..363ac42 100644
--- a/arch/arm/boot/dts/armada-38x-solidrun-microsom.dtsi
+++ b/arch/arm/boot/dts/armada-38x-solidrun-microsom.dtsi
@@ -71,6 +71,19 @@ phy_dedicated: ethernet-phy@0 {
 	};
 };
 
+&i2c0 {
+	clock-frequency = <400000>;
+	pinctrl-0 = <&i2c0_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	eeprom@53 {
+		compatible = "atmel,24c02";
+		reg = <0x53>;
+		pagesize = <16>;
+	};
+};
+
 &pinctrl {
 	microsom_phy_clk_pins: microsom-phy-clk-pins {
 		marvell,pins = "mpp45";
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-wedge100.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-wedge100.dts
index b1e10f0..322587b 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-wedge100.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-wedge100.dts
@@ -76,7 +76,6 @@ &uart5 {
 
 &mac1 {
 	status = "okay";
-	no-hw-checksum;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
 };
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-wedge40.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-wedge40.dts
index aaa77a5..54e5085 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-wedge40.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-wedge40.dts
@@ -75,7 +75,6 @@ &uart5 {
 
 &mac1 {
 	status = "okay";
-	no-hw-checksum;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
 };
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-yamp.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-yamp.dts
index 5293359..fe2e11c 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-yamp.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-yamp.dts
@@ -35,7 +35,6 @@ &uart2 {
 &mac0 {
 	status = "okay";
 	use-ncsi;
-	no-hw-checksum;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_rmii1_default>;
 	clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>,
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
index 13f7aef..6232cd7 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
@@ -97,22 +97,22 @@ &i2c3 {
 	status = "okay";
 
 	power-supply@68 {
-		compatible = "ibm,cffps2";
+		compatible = "ibm,cffps";
 		reg = <0x68>;
 	};
 
 	power-supply@69 {
-		compatible = "ibm,cffps2";
+		compatible = "ibm,cffps";
 		reg = <0x69>;
 	};
 
 	power-supply@6a {
-		compatible = "ibm,cffps2";
+		compatible = "ibm,cffps";
 		reg = <0x6a>;
 	};
 
 	power-supply@6b {
-		compatible = "ibm,cffps2";
+		compatible = "ibm,cffps";
 		reg = <0x6b>;
 	};
 };
@@ -352,18 +352,8 @@ eeprom@51 {
 &i2c8 {
 	status = "okay";
 
-	ucd90320@b {
-		compatible = "ti,ucd90160";
-		reg = <0x0b>;
-	};
-
-	ucd90320@c {
-		compatible = "ti,ucd90160";
-		reg = <0x0c>;
-	};
-
 	ucd90320@11 {
-		compatible = "ti,ucd90160";
+		compatible = "ti,ucd90320";
 		reg = <0x11>;
 	};
 
diff --git a/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts b/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
index c17bb7f..62a3ab4 100644
--- a/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
@@ -94,8 +94,6 @@ ps1-presence {
 
 	gpio-keys-polled {
 		compatible = "gpio-keys-polled";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		poll-interval = <1000>;
 
 		fan0-presence {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-swift.dts b/arch/arm/boot/dts/aspeed-bmc-opp-swift.dts
index b8fdd2a..d56b5ed 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-swift.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-swift.dts
@@ -82,8 +82,6 @@ iio-hwmon-battery {
 
 	gpio-keys-polled {
 		compatible = "gpio-keys-polled";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		poll-interval = <1000>;
 
 		scm0-presence {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts b/arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts
index affd2c8..01074b6 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts
@@ -14,7 +14,7 @@ chosen {
 		bootargs = "console=ttyS4,115200 earlyprintk";
 	};
 
-	memory {
+	memory@40000000 {
 		reg = <0x40000000 0x20000000>;
 	};
 
@@ -107,10 +107,7 @@ flash@0 {
 
 &mac0 {
 	status = "okay";
-
 	use-ncsi;
-	no-hw-checksum;
-
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_rmii1_default>;
 };
@@ -236,3 +233,16 @@ &vuart {
 &wdt2 {
 	aspeed,alt-boot;
 };
+
+&sdmmc {
+	status = "okay";
+};
+
+&sdhci1 {
+	status = "okay";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_sd2_default>;
+	cd-inverted;
+	disable-wp;
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
index 569dad9..421aa60 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
@@ -77,8 +77,6 @@ iio-hwmon-battery {
 
 	gpio-keys-polled {
 		compatible = "gpio-keys-polled";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		poll-interval = <1000>;
 
 		fan0-presence {
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 46c0891..807a0fc 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -179,18 +179,21 @@ syscon: syscon@1e6e2000 {
 				compatible = "aspeed,ast2400-scu", "syscon", "simple-mfd";
 				reg = <0x1e6e2000 0x1a8>;
 				#address-cells = <1>;
-				#size-cells = <0>;
+				#size-cells = <1>;
+				ranges = <0 0x1e6e2000 0x1000>;
 				#clock-cells = <1>;
 				#reset-cells = <1>;
 
-				pinctrl: pinctrl {
-					compatible = "aspeed,ast2400-pinctrl";
-				};
-
-				p2a: p2a-control {
+				p2a: p2a-control@2c {
+					reg = <0x2c 0x4>;
 					compatible = "aspeed,ast2400-p2a-ctrl";
 					status = "disabled";
 				};
+
+				pinctrl: pinctrl@80 {
+					reg = <0x80 0x18>, <0xa0 0x10>;
+					compatible = "aspeed,ast2400-pinctrl";
+				};
 			};
 
 			rng: hwrng@1e6e2078 {
@@ -346,14 +349,14 @@ lpc_host: lpc-host@80 {
 
 					lpc_ctrl: lpc-ctrl@0 {
 						compatible = "aspeed,ast2400-lpc-ctrl";
-						reg = <0x0 0x80>;
+						reg = <0x0 0x10>;
 						clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
 						status = "disabled";
 					};
 
-					lpc_snoop: lpc-snoop@0 {
+					lpc_snoop: lpc-snoop@10 {
 						compatible = "aspeed,ast2400-lpc-snoop";
-						reg = <0x0 0x80>;
+						reg = <0x10 0x8>;
 						interrupts = <8>;
 						status = "disabled";
 					};
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index a259c63..ebec0fa 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -47,13 +47,6 @@ memory@80000000 {
 		reg = <0x80000000 0>;
 	};
 
-	edac: sdram@1e6e0000 {
-		compatible = "aspeed,ast2500-sdram-edac";
-		reg = <0x1e6e0000 0x174>;
-		interrupts = <0>;
-		status = "disabled";
-	};
-
 	ahb {
 		compatible = "simple-bus";
 		#address-cells = <1>;
@@ -213,23 +206,32 @@ apb {
 			#size-cells = <1>;
 			ranges;
 
+			edac: memory-controller@1e6e0000 {
+				compatible = "aspeed,ast2500-sdram-edac";
+				reg = <0x1e6e0000 0x174>;
+				interrupts = <0>;
+				status = "disabled";
+			};
+
 			syscon: syscon@1e6e2000 {
 				compatible = "aspeed,ast2500-scu", "syscon", "simple-mfd";
 				reg = <0x1e6e2000 0x1a8>;
 				#address-cells = <1>;
-				#size-cells = <0>;
+				#size-cells = <1>;
+				ranges = <0 0x1e6e2000 0x1000>;
 				#clock-cells = <1>;
 				#reset-cells = <1>;
 
-				pinctrl: pinctrl {
-					compatible = "aspeed,ast2500-pinctrl";
-					aspeed,external-nodes = <&gfx &lhc>;
-
+				p2a: p2a-control@2c {
+					compatible = "aspeed,ast2500-p2a-ctrl";
+					reg = <0x2c 0x4>;
+					status = "disabled";
 				};
 
-				p2a: p2a-control {
-					compatible = "aspeed,ast2500-p2a-ctrl";
-					status = "disabled";
+				pinctrl: pinctrl@80 {
+					compatible = "aspeed,ast2500-pinctrl";
+					reg = <0x80 0x18>, <0xa0 0x10>;
+					aspeed,external-nodes = <&gfx>, <&lhc>;
 				};
 			};
 
@@ -460,29 +462,30 @@ kcs4: kcs4@0 {
 
 					lpc_ctrl: lpc-ctrl@0 {
 						compatible = "aspeed,ast2500-lpc-ctrl";
-						reg = <0x0 0x80>;
+						reg = <0x0 0x10>;
 						clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
 						status = "disabled";
 					};
 
-					lpc_snoop: lpc-snoop@0 {
+					lpc_snoop: lpc-snoop@10 {
 						compatible = "aspeed,ast2500-lpc-snoop";
-						reg = <0x0 0x80>;
+						reg = <0x10 0x8>;
 						interrupts = <8>;
 						status = "disabled";
 					};
 
-					lhc: lhc@20 {
-						compatible = "aspeed,ast2500-lhc";
-						reg = <0x20 0x24 0x48 0x8>;
-					};
-
 					lpc_reset: reset-controller@18 {
 						compatible = "aspeed,ast2500-lpc-reset";
 						reg = <0x18 0x4>;
 						#reset-cells = <1>;
 					};
 
+					lhc: lhc@20 {
+						compatible = "aspeed,ast2500-lhc";
+						reg = <0x20 0x24 0x48 0x8>;
+					};
+
+
 					ibt: ibt@c0 {
 						compatible = "aspeed,ast2500-ibt-bmc";
 						reg = <0xc0 0x18>;
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index b72afba..796976d 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -365,7 +365,7 @@ wdt3: watchdog@1e785080 {
 				status = "disabled";
 			};
 
-			wdt4: watchdog@1e7850C0 {
+			wdt4: watchdog@1e7850c0 {
 				compatible = "aspeed,ast2600-wdt";
 				reg = <0x1e7850C0 0x40>;
 				status = "disabled";
diff --git a/arch/arm/boot/dts/ast2500-facebook-netbmc-common.dtsi b/arch/arm/boot/dts/ast2500-facebook-netbmc-common.dtsi
index 7a395ba..7468f10 100644
--- a/arch/arm/boot/dts/ast2500-facebook-netbmc-common.dtsi
+++ b/arch/arm/boot/dts/ast2500-facebook-netbmc-common.dtsi
@@ -71,7 +71,6 @@ flash1@0 {
 
 &mac1 {
 	status = "okay";
-	no-hw-checksum;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
 };
diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts
index 90996ea..7d938cc 100644
--- a/arch/arm/boot/dts/at91-kizbox.dts
+++ b/arch/arm/boot/dts/at91-kizbox.dts
@@ -28,85 +28,6 @@ main_xtal {
 		};
 	};
 
-	ahb {
-		apb {
-			tcb0: timer@fffa0000 {
-				timer@0 {
-					compatible = "atmel,tcb-timer";
-					reg = <0>, <1>;
-				};
-
-				timer@2 {
-					compatible = "atmel,tcb-timer";
-					reg = <2>;
-				};
-			};
-
-			macb0: ethernet@fffc4000 {
-				phy-mode = "mii";
-				pinctrl-0 = <&pinctrl_macb_rmii
-				             &pinctrl_macb_rmii_mii_alt>;
-				status = "okay";
-			};
-
-			usart3: serial@fffd0000 {
-				status = "okay";
-			};
-
-			dbgu: serial@fffff200 {
-				status = "okay";
-			};
-
-			watchdog@fffffd40 {
-				timeout-sec = <15>;
-				atmel,max-heartbeat-sec = <16>;
-				atmel,min-heartbeat-sec = <0>;
-				status = "okay";
-			};
-		};
-
-		usb0: ohci@500000 {
-			num-ports = <1>;
-			status = "okay";
-		};
-
-		ebi: ebi@10000000 {
-			status = "okay";
-
-			nand_controller: nand-controller {
-				status = "okay";
-				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
-				pinctrl-names = "default";
-
-				nand@3 {
-					reg = <0x3 0x0 0x800000>;
-					rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
-					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
-					nand-bus-width = <8>;
-					nand-ecc-mode = "soft";
-					nand-on-flash-bbt;
-					label = "atmel_nand";
-
-					partitions {
-						compatible = "fixed-partitions";
-						#address-cells = <1>;
-						#size-cells = <1>;
-
-						bootstrap@0 {
-							label = "bootstrap";
-							reg = <0x0 0x20000>;
-						};
-
-						ubi@20000 {
-							label = "ubi";
-							reg = <0x20000 0x7fe0000>;
-						};
-					};
-				};
-			};
-		};
-	};
-
 	gpio_keys {
 		compatible = "gpio-keys";
 		#address-cells = <1>;
@@ -127,15 +48,6 @@ user {
 		};
 	};
 
-	i2c-gpio-0 {
-		status = "okay";
-
-		rtc: pcf8563@51 {
-			compatible = "nxp,pcf8563";
-			reg = <0x51>;
-		};
-	};
-
 	pwm_leds {
 		compatible = "pwm-leds";
 
@@ -179,3 +91,87 @@ &pinctrl_tcb1_tioa2
 			     &pinctrl_tcb1_tiob0>;
 	};
 };
+
+&tcb0 {
+	timer@0 {
+		compatible = "atmel,tcb-timer";
+		reg = <0>, <1>;
+	};
+
+	timer@2 {
+		compatible = "atmel,tcb-timer";
+		reg = <2>;
+	};
+};
+
+&ebi {
+	status = "okay";
+};
+
+&nand_controller {
+	status = "okay";
+	pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+	pinctrl-names = "default";
+
+	nand@3 {
+		reg = <0x3 0x0 0x800000>;
+		rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+		cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+		nand-bus-width = <8>;
+		nand-ecc-mode = "soft";
+		nand-on-flash-bbt;
+		label = "atmel_nand";
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			bootstrap@0 {
+				label = "bootstrap";
+				reg = <0x0 0x20000>;
+			};
+
+			ubi@20000 {
+				label = "ubi";
+				reg = <0x20000 0x7fe0000>;
+			};
+		};
+	};
+};
+
+&macb0 {
+	phy-mode = "mii";
+	pinctrl-0 = <&pinctrl_macb_rmii
+		     &pinctrl_macb_rmii_mii_alt>;
+	status = "okay";
+};
+
+&usart3 {
+	status = "okay";
+};
+
+&dbgu {
+	status = "okay";
+};
+
+&watchdog {
+	timeout-sec = <15>;
+	atmel,max-heartbeat-sec = <16>;
+	atmel,min-heartbeat-sec = <0>;
+	status = "okay";
+};
+
+&usb0 {
+	num-ports = <1>;
+	status = "okay";
+};
+
+&i2c_gpio0 {
+	status = "okay";
+
+	rtc: pcf8563@51 {
+		compatible = "nxp,pcf8563";
+		reg = <0x51>;
+	};
+};
diff --git a/arch/arm/boot/dts/at91-kizboxmini-base.dts b/arch/arm/boot/dts/at91-kizboxmini-base.dts
new file mode 100644
index 0000000..81c29ca
--- /dev/null
+++ b/arch/arm/boot/dts/at91-kizboxmini-base.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * at91-kizboxmini-base.dts - Device Tree file for Overkiz Kizbox mini
+ * base board
+ *
+ * Copyright (C) 2015 Overkiz SAS
+ *   Author: Antoine Aubert <a.aubert@overkiz.com>
+ *           Kévin Raymond <k.raymond@overkiz.com>
+ */
+/dts-v1/;
+#include "at91-kizboxmini-common.dtsi"
+
+/ {
+	model = "Overkiz Kizbox Mini";
+	compatible = "overkiz,kizboxmini-base", "atmel,at91sam9g25",
+		     "atmel,at91sam9x5", "atmel,at91sam9";
+};
+
+&pinctrl_usart0 {
+	atmel,pins =
+		<AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+		 AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE
+		 AT91_PIOA 2 AT91_PERIPH_GPIO AT91_PINCTRL_MULTI_DRIVE>;
+};
diff --git a/arch/arm/boot/dts/at91-kizboxmini-common.dtsi b/arch/arm/boot/dts/at91-kizboxmini-common.dtsi
new file mode 100644
index 0000000..fddf267
--- /dev/null
+++ b/arch/arm/boot/dts/at91-kizboxmini-common.dtsi
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * at91-kizboxmini.dts - Device Tree file for Overkiz Kizbox mini board
+ *
+ * Copyright (C) 2014-2018 Overkiz SAS
+ *   Author: Antoine Aubert <a.aubert@overkiz.com>
+ *           Gaël Portay <g.portay@overkiz.com>
+ *           Kévin Raymond <k.raymond@overkiz.com>
+ *           Dorian Rocipon <d.rocipon@overkiz.com>
+ */
+#include "at91sam9g25.dtsi"
+
+/ {
+	chosen {
+		bootargs = "ubi.mtd=ubi";
+		stdout-path = &dbgu;
+	};
+
+	memory {
+		reg = <0x20000000 0x8000000>;
+	};
+
+	clocks {
+		main_xtal {
+			clock-frequency = <12000000>;
+		};
+
+		slow_xtal {
+			clock-frequency = <32768>;
+		};
+
+		adc_op_clk {
+			status = "disabled";
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		prog {
+			label = "PB_PROG";
+			gpios = <&pioC 17 GPIO_ACTIVE_LOW>;
+			linux,code = <0x102>;
+			wakeup-source;
+		};
+
+		reset {
+			label = "PB_RST";
+			gpios = <&pioC 16 GPIO_ACTIVE_LOW>;
+			linux,code = <0x100>;
+			wakeup-source;
+		};
+	};
+
+	leds: pwm_leds {
+		compatible = "pwm-leds";
+
+		led_blue: pwm_blue {
+			label = "pwm:blue:user";
+			pwms = <&pwm0 2 10000000 0>;
+			max-brightness = <255>;
+			linux,default-trigger = "none";
+			status = "disabled";
+		};
+
+		led_green: pwm_green {
+			label = "pwm:green:user";
+			pwms = <&pwm0 0 10000000 0>;
+			max-brightness = <255>;
+			linux,default-trigger = "default-on";
+		};
+
+		led_red: pwm_red {
+			label = "pwm:red:user";
+			pwms = <&pwm0 1 10000000 0>;
+			max-brightness = <255>;
+			linux,default-trigger = "default-on";
+		};
+	};
+};
+
+&usart0 {
+	atmel,use-dma-rx;
+	atmel,use-dma-tx;
+	status = "okay";
+};
+
+&macb0 {
+	phy-mode = "rmii";
+	status = "okay";
+};
+
+&pwm0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm0_pwm0_1
+		     &pinctrl_pwm0_pwm1_1
+		     &pinctrl_pwm0_pwm2_1>;
+	status = "okay";
+};
+
+&dbgu {
+	status = "okay";
+};
+
+&watchdog {
+	status = "okay";
+};
+
+&adc0 {
+	status = "disabled";
+};
+
+&rtc {
+	status = "disabled";
+};
+
+&ebi {
+	pinctrl-0 = <&pinctrl_ebi_addr_nand
+			&pinctrl_ebi_data_0_7>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&nand_controller {
+	status = "okay";
+	pinctrl-0 = <&pinctrl_nand_oe_we
+		     &pinctrl_nand_cs
+		     &pinctrl_nand_rb>;
+	pinctrl-names = "default";
+
+	nand@3 {
+		reg = <0x3 0x0 0x800000>;
+		rb-gpios = <&pioD 5 GPIO_ACTIVE_HIGH>;
+		cs-gpios = <&pioD 4 GPIO_ACTIVE_HIGH>;
+		nand-bus-width = <8>;
+		nand-ecc-mode = "hw";
+		nand-ecc-strength = <4>;
+		nand-ecc-step-size = <512>;
+		nand-on-flash-bbt;
+		label = "atmel_nand";
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			bootstrap@0 {
+				label = "bootstrap";
+				reg = <0x0 0x20000>;
+			};
+
+			ubi@20000 {
+				label = "ubi";
+				reg = <0x20000 0x7fe0000>;
+			};
+		};
+	};
+};
+
+&usb0 {
+	num-ports = <1>;
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
+
diff --git a/arch/arm/boot/dts/at91-kizboxmini-mb.dts b/arch/arm/boot/dts/at91-kizboxmini-mb.dts
new file mode 100644
index 0000000..c07d307
--- /dev/null
+++ b/arch/arm/boot/dts/at91-kizboxmini-mb.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Overkiz SAS
+ *   Author: Mickael Gardet <m.gardet@overkiz.com>
+ *           Kévin Raymond <k.raymond@overkiz.com>
+ */
+/dts-v1/;
+#include "at91-kizboxmini-common.dtsi"
+
+/ {
+	model = "Overkiz Kizbox Mini Mother Board";
+	compatible = "overkiz,kizboxmini-mb", "atmel,at91sam9g25",
+		     "atmel,at91sam9x5", "atmel,at91sam9";
+};
+
+&usb0 {
+	num-ports = <2>;
+};
+
+&rtc {
+	status = "okay";
+};
+
+&led_blue {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/at91-kizboxmini-rd.dts b/arch/arm/boot/dts/at91-kizboxmini-rd.dts
new file mode 100644
index 0000000..ab50f4d
--- /dev/null
+++ b/arch/arm/boot/dts/at91-kizboxmini-rd.dts
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Overkiz SAS
+ *   Author: Mickael Gardet <m.gardet@overkiz.com>
+ *           Kévin Raymond <k.raymond@overkiz.com>
+ */
+/dts-v1/;
+#include "at91-kizboxmini-common.dtsi"
+
+/ {
+	model = "Overkiz Kizbox Mini RailDIN";
+	compatible = "overkiz,kizboxmini-rd", "atmel,at91sam9g25",
+		     "atmel,at91sam9x5", "atmel,at91sam9";
+
+	clocks {
+		adc_op_clk {
+			status = "okay";
+		};
+	};
+};
+
+&pinctrl {
+	adc0 {
+		pinctrl_adc0_ad5: adc0_ad5-0 {
+			/* pull-up disable */
+			atmel,pins = <AT91_PIOB 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+		};
+	};
+};
+
+&usart0 {
+	status = "disabled";
+};
+
+&rtc {
+	status = "okay";
+};
+
+&led_blue {
+	status = "okay";
+};
+
+&adc0 {
+	atmel,adc-vref = <2500>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_adc0_ad5>;
+	atmel,adc-channels-used = <0x0020>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/at91-kizboxmini.dts b/arch/arm/boot/dts/at91-kizboxmini.dts
deleted file mode 100644
index cb22f5f..0000000
--- a/arch/arm/boot/dts/at91-kizboxmini.dts
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * at91-kizboxmini.dts - Device Tree file for Overkiz Kizbox mini board
- *
- * Copyright (C) 2014 Gaël PORTAY <g.portay@overkiz.com>
- */
-/dts-v1/;
-#include "at91sam9g25.dtsi"
-#include <dt-bindings/pwm/pwm.h>
-
-/ {
-	model = "Overkiz Kizbox mini";
-	compatible = "overkiz,kizboxmini", "atmel,at91sam9g25", "atmel,at91sam9x5", "atmel,at91sam9";
-
-	chosen {
-		bootargs = "ubi.mtd=ubi";
-		stdout-path = &dbgu;
-	};
-
-	memory {
-		reg = <0x20000000 0x8000000>;
-	};
-
-	clocks {
-		slow_xtal {
-			clock-frequency = <32768>;
-		};
-
-		main_xtal {
-			clock-frequency = <12000000>;
-		};
-	};
-
-	ahb {
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			atmel,has-pmecc;
-			atmel,pmecc-cap = <4>;
-			atmel,pmecc-sector-size = <512>;
-			nand-on-flash-bbt;
-			status = "okay";
-		};
-	};
-
-	gpio_keys {
-		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		prog {
-			label = "PB_PROG";
-			gpios = <&pioC 17 GPIO_ACTIVE_LOW>;
-			linux,code = <0x102>;
-			wakeup-source;
-		};
-
-		reset {
-			label = "PB_RST";
-			gpios = <&pioC 16 GPIO_ACTIVE_LOW>;
-			linux,code = <0x100>;
-			wakeup-source;
-		};
-	};
-
-	pwm_leds {
-		compatible = "pwm-leds";
-
-		green {
-			label = "pwm:green:user";
-			pwms = <&pwm0 0 10000000 0>;
-			max-brightness = <255>;
-			linux,default-trigger = "default-on";
-		};
-
-		red {
-			label = "pwm:red:user";
-			pwms = <&pwm0 1 10000000 0>;
-			max-brightness = <255>;
-			linux,default-trigger = "default-on";
-		};
-	};
-};
-
-&dbgu {
-	status = "okay";
-};
-
-&ebi {
-	pinctrl-0 = <&pinctrl_ebi_addr_nand
-		     &pinctrl_ebi_data_0_7>;
-	pinctrl-names = "default";
-	status = "okay";
-
-	nand-controller {
-		pinctrl-0 = <&pinctrl_nand_oe_we
-			     &pinctrl_nand_cs
-			     &pinctrl_nand_rb>;
-		pinctrl-names = "default";
-		status = "okay";
-
-		nand@3 {
-			reg = <0x3 0x0 0x800000>;
-			rb-gpios = <&pioD 5 GPIO_ACTIVE_HIGH>;
-			cs-gpios = <&pioD 4 GPIO_ACTIVE_HIGH>;
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			nand-ecc-strength = <4>;
-			nand-ecc-step-size = <512>;
-			nand-on-flash-bbt;
-			label = "atmel_nand";
-
-			partitions {
-				compatible = "fixed-partitions";
-				#address-cells = <1>;
-				#size-cells = <1>;
-
-				bootstrap@0 {
-					label = "bootstrap";
-					reg = <0x0 0x20000>;
-				};
-
-				ubi@20000 {
-					label = "ubi";
-					reg = <0x20000 0x7fe0000>;
-				};
-			};
-		};
-	};
-};
-
-&macb0 {
-	phy-mode = "rmii";
-	status = "okay";
-};
-
-&pwm0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_pwm0_pwm0_1
-		     &pinctrl_pwm0_pwm1_1>;
-	status = "okay";
-};
-
-&tcb0 {
-	timer@0 {
-		compatible = "atmel,tcb-timer";
-		reg = <0>;
-	};
-
-	timer@1 {
-		compatible = "atmel,tcb-timer";
-		reg = <1>;
-	};
-};
-
-&usart0 {
-	status = "okay";
-};
-
-&usb0 {
-	num-ports = <1>;
-	status = "okay";
-};
-
-&usb1 {
-	status = "okay";
-};
-
-&watchdog {
-	status = "okay";
-};
diff --git a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
index f245944..4f12347 100644
--- a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
+++ b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
@@ -8,7 +8,6 @@
  */
 /dts-v1/;
 #include "at91-linea.dtsi"
-#include "sama5d3_lcd.dtsi"
 #include "at91-natte.dtsi"
 
 / {
diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
new file mode 100644
index 0000000..9f30132
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * at91-sam9x60ek.dts - Device Tree file for Microchip SAM9X60-EK board
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Sandeep Sheriker M <sandeepsheriker.mallikarjun@microchip.com>
+ */
+/dts-v1/;
+#include "sam9x60.dtsi"
+
+/ {
+	model = "Microchip SAM9X60-EK";
+	compatible = "microchip,sam9x60ek", "microchip,sam9x60", "atmel,at91sam9";
+
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		serial1 = &uart1;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	clocks {
+		slow_xtal {
+			clock-frequency = <32768>;
+		};
+
+		main_xtal {
+			clock-frequency = <24000000>;
+		};
+	};
+
+	regulators: regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		vdd_1v8: fixed-regulator-vdd_1v8@0 {
+			compatible = "regulator-fixed";
+			regulator-name = "VDD_1V8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+			status = "okay";
+		};
+
+		vdd_1v5: fixed-regulator-vdd_1v5@1 {
+			compatible = "regulator-fixed";
+			regulator-name = "VDD_1V5";
+			regulator-min-microvolt = <1500000>;
+			regulator-max-microvolt = <1500000>;
+			regulator-always-on;
+			status = "okay";
+		};
+
+		vdd1_3v3: fixed-regulator-vdd1_3v3@2 {
+			compatible = "regulator-fixed";
+			regulator-name = "VDD1_3V3";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+			status = "okay";
+		};
+
+		vdd2_3v3: regulator-fixed-vdd2_3v3@3 {
+			compatible = "regulator-fixed";
+			regulator-name = "VDD2_3V3";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+			status = "okay";
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_key_gpio_default>;
+		status = "okay";
+
+		sw1 {
+			label = "SW1";
+			gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
+			linux,code=<0x104>;
+			wakeup-source;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		status = "okay"; /* Conflict with pwm0. */
+
+		red {
+			label = "red";
+			gpios = <&pioB 11 GPIO_ACTIVE_HIGH>;
+		};
+
+		green {
+			label = "green";
+			gpios = <&pioB 12 GPIO_ACTIVE_HIGH>;
+		};
+
+		blue {
+			label = "blue";
+			gpios = <&pioB 13 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+		};
+	};
+};
+
+&adc {
+	vddana-supply = <&vdd1_3v3>;
+	vref-supply = <&vdd1_3v3>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
+	status = "okay";
+};
+
+&can0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_can0_rx_tx>;
+	status = "disabled"; /* Conflict with dbgu. */
+};
+
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_can1_rx_tx>;
+	status = "okay";
+};
+
+&classd {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_classd_default>;
+	atmel,pwm-type = "diff";
+	atmel,non-overlap-time = <10>;
+	status = "okay";
+};
+
+&dbgu {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_dbgu>;
+	status = "okay"; /* Conflict with can0. */
+};
+
+&ebi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ebi_addr_nand &pinctrl_ebi_data_0_7>;
+	status = "okay";
+
+	nand_controller: nand-controller {
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_nand_oe_we &pinctrl_nand_cs &pinctrl_nand_rb>;
+		status = "okay";
+
+		nand@3 {
+			reg = <0x3 0x0 0x800000>;
+			rb-gpios = <&pioD 5 GPIO_ACTIVE_HIGH>;
+			cs-gpios = <&pioD 4 GPIO_ACTIVE_HIGH>;
+			nand-bus-width = <8>;
+			nand-ecc-mode = "hw";
+			nand-ecc-strength = <8>;
+			nand-ecc-step-size = <512>;
+			nand-on-flash-bbt;
+			label = "atmel_nand";
+
+			partitions {
+				compatible = "fixed-partitions";
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				at91bootstrap@0 {
+					label = "at91bootstrap";
+					reg = <0x0 0x40000>;
+				};
+
+				uboot@40000 {
+					label = "u-boot";
+					reg = <0x40000 0xc0000>;
+				};
+
+				ubootenvred@100000 {
+					label = "U-Boot Env Redundant";
+					reg = <0x100000 0x40000>;
+				};
+
+				ubootenv@140000 {
+					label = "U-Boot Env";
+					reg = <0x140000 0x40000>;
+				};
+
+				dtb@180000 {
+					label = "device tree";
+					reg = <0x180000 0x80000>;
+				};
+
+				kernel@200000 {
+					label = "kernel";
+					reg = <0x200000 0x600000>;
+				};
+
+				rootfs@800000 {
+					label = "rootfs";
+					reg = <0x800000 0x1f800000>;
+				};
+			};
+		};
+	};
+};
+
+&flx0 {
+	atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
+	status = "okay";
+
+	i2c0: i2c@600 {
+		compatible = "microchip,sam9x60-i2c";
+		reg = <0x600 0x200>;
+		interrupts = <5 IRQ_TYPE_LEVEL_HIGH 7>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_flx0_default>;
+		atmel,fifo-size = <16>;
+		i2c-analog-filter;
+		i2c-digital-filter;
+		i2c-digital-filter-width-ns = <35>;
+		status = "okay";
+
+		eeprom@53 {
+			compatible = "atmel,24c32";
+			reg = <0x53>;
+			pagesize = <16>;
+			size = <128>;
+			status = "okay";
+		};
+	};
+};
+
+&flx4 {
+	atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_SPI>;
+	status = "disabled";
+
+	spi0: spi@400 {
+		compatible = "microchip,sam9x60-spi", "atmel,at91rm9200-spi";
+		reg = <0x400 0x200>;
+		interrupts = <13 IRQ_TYPE_LEVEL_HIGH 7>;
+		clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
+		clock-names = "spi_clk";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_flx4_default>;
+		atmel,fifo-size = <16>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+};
+
+&flx5 {
+	atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
+	status = "okay";
+
+	uart1: serial@200 {
+		compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
+		reg = <0x200 0x200>;
+		interrupts = <14 IRQ_TYPE_LEVEL_HIGH 7>;
+		dmas = <&dma0
+			(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+			 AT91_XDMAC_DT_PERID(10))>,
+		       <&dma0
+			(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+			 AT91_XDMAC_DT_PERID(11))>;
+		dma-names = "tx", "rx";
+		clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
+		clock-names = "usart";
+		pinctrl-0 = <&pinctrl_flx5_default>;
+		pinctrl-names = "default";
+		atmel,use-dma-rx;
+		atmel,use-dma-tx;
+		status = "okay";
+	};
+};
+
+&flx6 {
+	atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
+	status = "okay";
+
+	i2c1: i2c@600 {
+		compatible = "microchip,sam9x60-i2c";
+		reg = <0x600 0x200>;
+		interrupts = <9 IRQ_TYPE_LEVEL_HIGH 7>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_flx6_default>;
+		atmel,fifo-size = <16>;
+		i2c-analog-filter;
+		i2c-digital-filter;
+		i2c-digital-filter-width-ns = <35>;
+		status = "okay";
+
+		gpio_exp: mcp23008@20 {
+			compatible = "microchip,mcp23008";
+			reg = <0x20>;
+		};
+	};
+};
+
+&i2s {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2s_default>;
+	#sound-dai-cells = <0>;
+	status = "disabled"; /* Conflict with QSPI. */
+};
+
+&macb0 {
+	phy-mode = "rmii";
+	#address-cells = <1>;
+	#size-cells = <0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_macb0_rmii>;
+	status = "okay";
+
+	ethernet-phy@0 {
+		reg = <0x0>;
+	};
+};
+
+&pinctrl {
+	atmel,mux-mask = <
+			 /*	A	B	C	*/
+			 0xFFFFFE7F 0xC0E0397F 0xEF00019D	/* pioA */
+			 0x03FFFFFF 0x02FC7E68 0x00780000	/* pioB */
+			 0xffffffff 0xF83FFFFF 0xB800F3FC	/* pioC */
+			 0x003FFFFF 0x003F8000 0x00000000	/* pioD */
+			 >;
+
+	adc {
+		pinctrl_adc_default: adc_default {
+			atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+		};
+
+		pinctrl_adtrg_default: adtrg_default {
+			atmel,pins = <AT91_PIOB 18 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
+		};
+	};
+
+	dbgu {
+		pinctrl_dbgu: dbgu-0 {
+			atmel,pins = <AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+				      AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+		};
+	};
+
+	i2s {
+		pinctrl_i2s_default: i2s {
+			atmel,pins =
+				<AT91_PIOB 19 AT91_PERIPH_B AT91_PINCTRL_NONE		/* I2SCK */
+				 AT91_PIOB 20 AT91_PERIPH_B AT91_PINCTRL_NONE		/* I2SWS */
+				 AT91_PIOB 21 AT91_PERIPH_B AT91_PINCTRL_NONE		/* I2SDIN */
+				 AT91_PIOB 22 AT91_PERIPH_B AT91_PINCTRL_NONE		/* I2SDOUT */
+				 AT91_PIOB 23 AT91_PERIPH_B AT91_PINCTRL_NONE>;		/* I2SMCK */
+		};
+	};
+
+	qspi {
+		pinctrl_qspi: qspi {
+			atmel,pins =
+				<AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_SLEWRATE_DIS
+				 AT91_PIOB 20 AT91_PERIPH_A AT91_PINCTRL_SLEWRATE_DIS
+				 AT91_PIOB 21 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOB 22 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOB 23 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOB 24 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_SLEWRATE_DIS)>;
+		};
+	};
+
+	nand {
+		pinctrl_nand_oe_we: nand-oe-we-0 {
+			atmel,pins =
+				<AT91_PIOD 0 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 1 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)>;
+		};
+
+		pinctrl_nand_rb: nand-rb-0 {
+			atmel,pins =
+				<AT91_PIOD 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+		};
+
+		pinctrl_nand_cs: nand-cs-0 {
+			atmel,pins =
+				<AT91_PIOD 4 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+		};
+	};
+
+	ebi {
+		pinctrl_ebi_data_0_7: ebi-data-lsb-0 {
+			atmel,pins =
+				<AT91_PIOD 6 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 7 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 8 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 9 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 10 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 11 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 12 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 13 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)>;
+		};
+
+		pinctrl_ebi_data_0_15: ebi-data-msb-0 {
+			atmel,pins =
+				<AT91_PIOD 6 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 7 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 8 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 9 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 10 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 11 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 12 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 13 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 14 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 15 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 16 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 17 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 18 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 19 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 20 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOD 21 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+		};
+
+		pinctrl_ebi_addr_nand: ebi-addr-0 {
+			atmel,pins =
+				<AT91_PIOD 2 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)
+				 AT91_PIOD 3 AT91_PERIPH_A (AT91_PINCTRL_NONE | AT91_PINCTRL_SLEWRATE_DIS)>;
+		};
+	};
+
+	flexcom {
+		pinctrl_flx0_default: flx0_twi {
+			atmel,pins =
+				<AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+				 AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+		};
+
+		pinctrl_flx4_default: flx4_spi {
+			atmel,pins =
+				<AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE
+				 AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+		};
+
+		pinctrl_flx5_default: flx_uart {
+			atmel,pins =
+				<AT91_PIOA 7 AT91_PERIPH_C AT91_PINCTRL_NONE
+				 AT91_PIOA 8 AT91_PERIPH_B AT91_PINCTRL_NONE
+				 AT91_PIOA 21 AT91_PERIPH_B AT91_PINCTRL_NONE
+				 AT91_PIOA 22 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+		};
+
+		pinctrl_flx6_default: flx6_twi {
+			atmel,pins =
+				<AT91_PIOA 30 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+				 AT91_PIOA 31 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+		};
+	};
+
+	classd {
+		pinctrl_classd_default: classd {
+			atmel,pins =
+				<AT91_PIOA 24 AT91_PERIPH_C AT91_PINCTRL_PULL_UP
+				 AT91_PIOA 25 AT91_PERIPH_C AT91_PINCTRL_PULL_UP
+				 AT91_PIOA 26 AT91_PERIPH_C AT91_PINCTRL_PULL_UP
+				 AT91_PIOA 27 AT91_PERIPH_C AT91_PINCTRL_PULL_UP>;
+		};
+	};
+
+	can0 {
+		pinctrl_can0_rx_tx: can0_rx_tx {
+			atmel,pins =
+				<AT91_PIOA 9 AT91_PERIPH_B AT91_PINCTRL_NONE	/* CANRX0 */
+				 AT91_PIOA 10 AT91_PERIPH_B AT91_PINCTRL_NONE	/* CANTX0 */
+				 AT91_PIOD 20 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_DOWN	/* Enable CAN0 mux */
+				 AT91_PIOD 21 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_DOWN>;	/* Enable CAN Transceivers */
+		};
+	};
+
+	can1 {
+		pinctrl_can1_rx_tx: can1_rx_tx {
+			atmel,pins =
+				<AT91_PIOA 6 AT91_PERIPH_B AT91_PINCTRL_NONE	/* CANRX1 RXD1 */
+				 AT91_PIOA 5 AT91_PERIPH_B AT91_PINCTRL_NONE	/* CANTX1 TXD1 */
+				 AT91_PIOD 19 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_DOWN	/* Enable CAN1 mux */
+				 AT91_PIOD 21 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_DOWN>;	/* Enable CAN Transceivers */
+		};
+	};
+
+	macb0 {
+		pinctrl_macb0_rmii: macb0_rmii-0 {
+			atmel,pins =
+				<AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB0 periph A */
+				 AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB1 periph A */
+				 AT91_PIOB 2 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB2 periph A */
+				 AT91_PIOB 3 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB3 periph A */
+				 AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB4 periph A */
+				 AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB5 periph A */
+				 AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB6 periph A */
+				 AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB7 periph A */
+				 AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB9 periph A */
+				 AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* PB10 periph A */
+		};
+	};
+
+	pwm0 {
+		pinctrl_pwm0_0: pwm0_0 {
+			atmel,pins = <AT91_PIOB 11 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+		};
+
+		pinctrl_pwm0_1: pwm0_1 {
+			atmel,pins = <AT91_PIOB 12 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+		};
+
+		pinctrl_pwm0_2: pwm0_2 {
+			atmel,pins = <AT91_PIOB 13 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+		};
+
+		pinctrl_pwm0_3: pwm0_3 {
+			atmel,pins = <AT91_PIOB 14 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+		};
+	};
+
+	sdmmc0 {
+		pinctrl_sdmmc0_default: sdmmc0 {
+			atmel,pins =
+				<AT91_PIOA 17 AT91_PERIPH_A (AT91_PINCTRL_DRIVE_STRENGTH_HI)				/* PA17 CK  periph A with pullup */
+				 AT91_PIOA 16 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)	/* PA16 CMD periph A with pullup */
+				 AT91_PIOA 15 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)	/* PA15 DAT0 periph A */
+				 AT91_PIOA 18 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)	/* PA18 DAT1 periph A with pullup */
+				 AT91_PIOA 19 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)	/* PA19 DAT2 periph A with pullup */
+				 AT91_PIOA 20 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)>;	/* PA20 DAT3 periph A with pullup */
+		};
+	};
+
+	gpio_keys {
+		pinctrl_key_gpio_default: pinctrl_key_gpio {
+			atmel,pins = <AT91_PIOD 18 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+		};
+	};
+}; /* pinctrl */
+
+&pmc {
+	atmel,osc-bypass;
+};
+
+&pwm0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm0_0 &pinctrl_pwm0_1 &pinctrl_pwm0_2 &pinctrl_pwm0_3>;
+	status = "disabled"; /* Conflict with leds. */
+};
+
+&sdmmc0 {
+	bus-width = <4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_sdmmc0_default>;
+	status = "okay";
+	cd-gpios = <&pioA 23 GPIO_ACTIVE_LOW>;
+	disable-wp;
+};
+
+&qspi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_qspi>;
+	status = "okay"; /* Conflict with i2s. */
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <80000000>;
+		m25p,fast-read;
+
+		at91bootstrap@0 {
+			label = "qspi: at91bootstrap";
+			reg = <0x0 0x40000>;
+		};
+
+		bootloader@40000 {
+			label = "qspi: bootloader";
+			reg = <0x40000 0xc0000>;
+		};
+
+		bootloaderenvred@100000 {
+			label = "qspi: bootloader env redundant";
+			reg = <0x100000 0x40000>;
+		};
+
+		bootloaderenv@140000 {
+			label = "qspi: bootloader env";
+			reg = <0x140000 0x40000>;
+		};
+
+		dtb@180000 {
+			label = "qspi: device tree";
+			reg = <0x180000 0x80000>;
+		};
+
+		kernel@200000 {
+			label = "qspi: kernel";
+			reg = <0x200000 0x600000>;
+		};
+	};
+};
+
+&shutdown_controller {
+	atmel,shdwc-debouncer = <976>;
+	status = "okay";
+
+	input@0 {
+		reg = <0>;
+	};
+};
+
+&tcb0 {
+	timer0: timer@0 {
+		compatible = "atmel,tcb-timer";
+		reg = <0>;
+	};
+
+	timer1: timer@1 {
+		compatible = "atmel,tcb-timer";
+		reg = <1>;
+	};
+};
+
+&usb1 {
+	num-ports = <3>;
+	atmel,vbus-gpio = <0
+			   &pioD 15 GPIO_ACTIVE_HIGH
+			   &pioD 16 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&usb2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 7788d5d..6281590 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -24,6 +24,10 @@ main_xtal {
 	};
 
 	ahb {
+		sdmmc0: sdio-host@a0000000 {
+			microchip,sdcal-inverted;
+		};
+
 		apb {
 			qspi1: spi@f0024000 {
 				pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index fca5716..b0853bf 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -131,6 +131,9 @@ i2c2: i2c@600 {
 					interrupts = <20 IRQ_TYPE_LEVEL_HIGH 7>;
 					dmas = <0>, <0>;
 					dma-names = "tx", "rx";
+					i2c-analog-filter;
+					i2c-digital-filter;
+					i2c-digital-filter-width-ns = <35>;
 					#address-cells = <1>;
 					#size-cells = <0>;
 					clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
@@ -246,6 +249,9 @@ i2c3: i2c@600 {
 
 			i2c1: i2c@fc028000 {
 				dmas = <0>, <0>;
+				i2c-analog-filter;
+				i2c-digital-filter;
+				i2c-digital-filter-width-ns = <35>;
 				pinctrl-names = "default";
 				pinctrl-0 = <&pinctrl_i2c1_default>;
 				status = "okay";
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
new file mode 100644
index 0000000..db3e223
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * at91-sama5d27_wlsom1.dtsi - Device Tree file for SAMA5D27 WLSOM1
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Nicolas Ferre <nicolas.ferre@microcihp.com>
+ * Author: Eugen Hristev <eugen.hristev@microcihp.com>
+ */
+#include "sama5d2.dtsi"
+#include "sama5d2-pinfunc.h"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/atmel-flexcom.h>
+#include <dt-bindings/pinctrl/at91.h>
+
+/ {
+	model = "Microchip SAMA5D27 WLSOM1";
+	compatible = "microchip,sama5d27-wlsom1", "atmel,sama5d27", "atmel,sama5d2", "atmel,sama5";
+
+	clocks {
+		slow_xtal {
+			clock-frequency = <32768>;
+		};
+
+		main_xtal {
+			clock-frequency = <24000000>;
+		};
+	};
+};
+
+&flx1 {
+	atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
+
+	uart6: serial@200 {
+		compatible = "atmel,at91sam9260-usart";
+		reg = <0x200 0x200>;
+		interrupts = <20 IRQ_TYPE_LEVEL_HIGH 7>;
+		dmas = <&dma0
+			(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+			 AT91_XDMAC_DT_PERID(13))>,
+		       <&dma0
+			(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+			 AT91_XDMAC_DT_PERID(14))>;
+		dma-names = "tx", "rx";
+		clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
+		clock-names = "usart";
+		pinctrl-0 = <&pinctrl_flx1_default>;
+		pinctrl-names = "default";
+	};
+};
+
+&i2c0 {
+	pinctrl-0 = <&pinctrl_i2c0_default>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&i2c1 {
+	dmas = <0>, <0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1_default>;
+	status = "okay";
+
+	mcp16502@5b {
+		compatible = "microchip,mcp16502";
+		reg = <0x5b>;
+		status = "okay";
+		lpm-gpios = <&pioBU 0 GPIO_ACTIVE_LOW>;
+
+		regulators {
+			vdd_3v3: VDD_IO {
+				regulator-name = "VDD_IO";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3700000>;
+				regulator-initial-mode = <2>;
+				regulator-allowed-modes = <2>, <4>;
+				regulator-always-on;
+
+				regulator-state-standby {
+					regulator-on-in-suspend;
+					regulator-mode = <4>;
+				};
+
+				regulator-state-mem {
+					regulator-off-in-suspend;
+					regulator-mode = <4>;
+				};
+			};
+
+			vddio_ddr: VDD_DDR {
+				regulator-name = "VDD_DDR";
+				regulator-min-microvolt = <600000>;
+				regulator-max-microvolt = <1850000>;
+				regulator-initial-mode = <2>;
+				regulator-allowed-modes = <2>, <4>;
+				regulator-always-on;
+
+				regulator-state-standby {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1200000>;
+					regulator-changeable-in-suspend;
+					regulator-mode = <4>;
+				};
+
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1200000>;
+					regulator-changeable-in-suspend;
+					regulator-mode = <4>;
+				};
+			};
+
+			vdd_core: VDD_CORE {
+				regulator-name = "VDD_CORE";
+				regulator-min-microvolt = <600000>;
+				regulator-max-microvolt = <1850000>;
+				regulator-initial-mode = <2>;
+				regulator-allowed-modes = <2>, <4>;
+				regulator-always-on;
+
+				regulator-state-standby {
+					regulator-on-in-suspend;
+					regulator-mode = <4>;
+				};
+
+				regulator-state-mem {
+					regulator-off-in-suspend;
+					regulator-mode = <4>;
+				};
+			};
+
+			vdd_ddr: VDD_OTHER {
+				regulator-name = "VDD_OTHER";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-initial-mode = <2>;
+				regulator-allowed-modes = <2>, <4>;
+				regulator-always-on;
+
+				regulator-state-standby {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1800000>;
+					regulator-changeable-in-suspend;
+					regulator-mode = <4>;
+				};
+
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1800000>;
+					regulator-changeable-in-suspend;
+					regulator-mode = <4>;
+				};
+			};
+
+			LDO1 {
+				regulator-name = "LDO1";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3700000>;
+				regulator-always-on;
+
+				regulator-state-standby {
+					regulator-on-in-suspend;
+				};
+
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			LDO2 {
+				regulator-name = "LDO2";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3700000>;
+				regulator-always-on;
+
+				regulator-state-standby {
+					regulator-on-in-suspend;
+				};
+
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+		};
+	};
+};
+
+&macb0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_macb0_default>;
+	phy-mode = "rmii";
+
+	ethernet-phy@0 {
+		reg = <0x0>;
+		interrupt-parent = <&pioA>;
+		interrupts = <PIN_PB24 IRQ_TYPE_LEVEL_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_macb0_phy_irq>;
+	};
+};
+
+&pmc {
+	atmel,osc-bypass;
+};
+
+&qspi1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_qspi1_default>;
+	status = "disabled";
+
+	qspi1_flash: spi_flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <80000000>;
+		spi-rx-bus-width = <4>;
+		spi-tx-bus-width = <4>;
+		m25p,fast-read;
+		status = "disabled";
+
+		at91bootstrap@0 {
+			label = "at91bootstrap";
+			reg = <0x0 0x40000>;
+		};
+
+		bootloader@40000 {
+			label = "bootloader";
+			reg = <0x40000 0xc0000>;
+		};
+
+		bootloaderenvred@100000 {
+			label = "bootloader env redundant";
+			reg = <0x100000 0x40000>;
+		};
+
+		bootloaderenv@140000 {
+			label = "bootloader env";
+			reg = <0x140000 0x40000>;
+		};
+
+		dtb@180000 {
+			label = "device tree";
+			reg = <0x180000 0x80000>;
+		};
+
+		kernel@200000 {
+			label = "kernel";
+			reg = <0x200000 0x600000>;
+		};
+	};
+};
+
+&pioA {
+	pinctrl_flx1_default: flx1_usart_default {
+		pinmux = <PIN_PA24__FLEXCOM1_IO0>,
+			 <PIN_PA23__FLEXCOM1_IO1>,
+			 <PIN_PA25__FLEXCOM1_IO3>,
+			 <PIN_PA26__FLEXCOM1_IO4>;
+		bias-disable;
+	};
+
+	pinctrl_i2c0_default: i2c0_default {
+		pinmux = <PIN_PD21__TWD0>,
+			 <PIN_PD22__TWCK0>;
+		bias-disable;
+	};
+
+	pinctrl_i2c1_default: i2c1_default {
+		pinmux = <PIN_PD19__TWD1>,
+			 <PIN_PD20__TWCK1>;
+		bias-disable;
+	};
+
+	pinctrl_macb0_default: macb0_default {
+		pinmux = <PIN_PB14__GTXCK>,
+			 <PIN_PB15__GTXEN>,
+			 <PIN_PB16__GRXDV>,
+			 <PIN_PB17__GRXER>,
+			 <PIN_PB18__GRX0>,
+			 <PIN_PB19__GRX1>,
+			 <PIN_PB20__GTX0>,
+			 <PIN_PB21__GTX1>,
+			 <PIN_PB22__GMDC>,
+			 <PIN_PB23__GMDIO>;
+		bias-disable;
+	};
+
+	pinctrl_macb0_phy_irq: macb0_phy_irq {
+		pinmux = <PIN_PB24__GPIO>;
+		bias-disable;
+	};
+
+	pinctrl_qspi1_default: qspi1_default {
+		pinmux = <PIN_PB5__QSPI1_SCK>,
+			 <PIN_PB6__QSPI1_CS>,
+			 <PIN_PB7__QSPI1_IO0>,
+			 <PIN_PB8__QSPI1_IO1>,
+			 <PIN_PB9__QSPI1_IO2>,
+			 <PIN_PB10__QSPI1_IO3>;
+		bias-pull-up;
+	};
+};
+
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
new file mode 100644
index 0000000..0b9fa29
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * at91-sama5d27_wlsom1_ek.dts - Device Tree file for SAMA5D27 WLSOM1 EK
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Nicolas Ferre <nicolas.ferre@microcihp.com>
+ */
+/dts-v1/;
+#include "at91-sama5d27_wlsom1.dtsi"
+
+/ {
+	model = "Microchip SAMA5D27 WLSOM1 EK";
+	compatible = "microchip,sama5d27-wlsom1-ek", "microchip,sama5d27-wlsom1", "atmel,sama5d27", "atmel,sama5d2", "atmel,sama5";
+
+	aliases {
+		serial0 = &uart0;	/* DBGU */
+		serial1 = &uart6;	/* BT */
+		serial2 = &uart5;	/* mikro BUS 2 */
+		serial3 = &uart3;	/* mikro BUS 1 */
+		i2c1	= &i2c1;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_key_gpio_default>;
+		status = "okay";
+
+		sw4 {
+			label = "USER BUTTON";
+			gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
+			linux,code = <0x104>;
+			wakeup-source;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_led_gpio_default>;
+		status = "okay";
+
+		red {
+			label = "red";
+			gpios = <&pioA PIN_PA6 GPIO_ACTIVE_HIGH>;
+		};
+
+		green {
+			label = "green";
+			gpios = <&pioA PIN_PA7 GPIO_ACTIVE_HIGH>;
+		};
+
+		blue {
+			label = "blue";
+			gpios = <&pioA PIN_PA8 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+		};
+	};
+};
+
+&adc {
+	vddana-supply = <&vdd_3v3>;
+	vref-supply = <&vdd_3v3>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_adc_default>;
+	status = "okay";
+};
+
+&flx0 {
+	atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
+	status = "okay";
+
+	uart5: serial@200 {
+		compatible = "atmel,at91sam9260-usart";
+		reg = <0x200 0x200>;
+		interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
+		dmas = <&dma0
+			(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+			 AT91_XDMAC_DT_PERID(11))>,
+		       <&dma0
+			(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+			 AT91_XDMAC_DT_PERID(12))>;
+		dma-names = "tx", "rx";
+		clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+		clock-names = "usart";
+		pinctrl-0 = <&pinctrl_flx0_default>;
+		pinctrl-names = "default";
+		atmel,use-dma-rx;
+		atmel,use-dma-tx;
+		status = "okay";
+	};
+};
+
+&flx1 {
+	status = "okay";
+
+	uart6: serial@200 {
+		atmel,use-dma-rx;
+		atmel,use-dma-tx;
+		status = "okay";
+	};
+};
+
+&macb0 {
+	status = "okay";
+};
+
+&pioA {
+	/*
+	 * There is no real pinmux for ADC, if the pin
+	 * is not requested by another peripheral then
+	 * the muxing is done when channel is enabled.
+	 * Requesting pins for ADC is GPIO is
+	 * encouraged to prevent conflicts and to
+	 * disable bias in order to be in the same
+	 * state when the pin is not muxed to the adc.
+	 */
+	pinctrl_adc_default: adc_default {
+		pinmux = <PIN_PD25__GPIO>,
+			 <PIN_PD26__GPIO>;
+		bias-disable;
+	};
+
+	pinctrl_flx0_default: flx0_usart_default {
+		pinmux = <PIN_PB28__FLEXCOM0_IO0>,
+			 <PIN_PB29__FLEXCOM0_IO1>;
+		bias-disable;
+	};
+
+	pinctrl_key_gpio_default: key_gpio_default {
+		pinmux = <PIN_PB2__GPIO>;
+		bias-pull-up;
+	};
+
+	pinctrl_led_gpio_default: led_gpio_default {
+		pinmux = <PIN_PA6__GPIO>,
+			 <PIN_PA7__GPIO>,
+			 <PIN_PA8__GPIO>;
+		bias-pull-down;
+	};
+
+	pinctrl_sdmmc0_default: sdmmc0_default {
+		cmd_data {
+			pinmux = <PIN_PA1__SDMMC0_CMD>,
+				 <PIN_PA2__SDMMC0_DAT0>,
+				 <PIN_PA3__SDMMC0_DAT1>,
+				 <PIN_PA4__SDMMC0_DAT2>,
+				 <PIN_PA5__SDMMC0_DAT3>;
+			bias-disable;
+		};
+
+		ck_cd_vddsel {
+			pinmux = <PIN_PA0__SDMMC0_CK>,
+				 <PIN_PA11__SDMMC0_VDDSEL>,
+				 <PIN_PA12__SDMMC0_WP>,
+				 <PIN_PA13__SDMMC0_CD>;
+			bias-disable;
+		};
+	};
+
+	pinctrl_uart0_default: uart0_default {
+		pinmux = <PIN_PB26__URXD0>,
+			 <PIN_PB27__UTXD0>;
+		bias-disable;
+	};
+
+	pinctrl_uart3_default: uart3_default {
+		pinmux = <PIN_PB11__URXD3>,
+			 <PIN_PB12__UTXD3>;
+		bias-disable;
+	};
+
+	pinctrl_pwm0_default: pwm0_default {
+		pinmux = <PIN_PA31__PWML0>,
+			 <PIN_PA30__PWMH0>;
+		bias-disable;
+	};
+
+	pinctrl_usb_default: usb_default {
+		pinmux = <PIN_PA10__GPIO>;
+		bias-disable;
+	};
+};
+
+&pwm0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm0_default>;
+	status = "okay";
+};
+
+&qspi1 {
+	status = "okay";
+
+	qspi1_flash: spi_flash@0 {
+		status = "okay";
+	};
+};
+
+&sdmmc0 {
+	bus-width = <4>;
+	mmc-ddr-3_3v;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_sdmmc0_default>;
+	status = "okay";
+};
+
+&shutdown_controller {
+	atmel,shdwc-debouncer = <976>;
+	atmel,wakeup-rtc-timer;
+
+	input@0 {
+		reg = <0>;
+		atmel,wakeup-type = "low";
+	};
+};
+
+&tcb0 {
+	timer0: timer@0 {
+		compatible = "atmel,tcb-timer";
+		reg = <0>;
+	};
+
+	timer1: timer@1 {
+		compatible = "atmel,tcb-timer";
+		reg = <1>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart0_default>;
+	atmel,use-dma-rx;
+	atmel,use-dma-tx;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3_default>;
+	atmel,use-dma-rx;
+	atmel,use-dma-tx;
+	status = "okay";
+};
+
+&usb1 {
+	num-ports = <3>;
+	atmel,vbus-gpio = <0
+			   &pioA PIN_PA10 GPIO_ACTIVE_HIGH
+			   0
+			  >;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usb_default>;
+	status = "okay";
+};
+
+&usb2 {
+	phy_type = "hsic";
+	status = "okay";
+};
+
+&watchdog {
+	status = "okay";
+};
+
diff --git a/arch/arm/boot/dts/at91-smartkiz.dts b/arch/arm/boot/dts/at91-smartkiz.dts
new file mode 100644
index 0000000..106f23b
--- /dev/null
+++ b/arch/arm/boot/dts/at91-smartkiz.dts
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018 Overkiz SAS
+ *   Author: Mickael Gardet <m.gardet@overkiz.com>
+ *           Kévin Raymond <k.raymond@overkiz.com>
+ *           Dorian Rocipon <d.rocipon@overkiz.com>
+ */
+/dts-v1/;
+#include "at91-kizboxmini-common.dtsi"
+
+/ {
+	model = "Overkiz SmartKiz";
+	compatible = "overkiz,smartkiz", "atmel,at91sam9g25",
+		     "atmel,at91sam9x5", "atmel,at91sam9";
+
+	clocks {
+		adc_op_clk {
+			status = "okay";
+		};
+	};
+
+	aliases {
+		serial5 = &uart0;
+	};
+
+	pio_keys {
+		hk_reset {
+			label = "HK_RESET";
+			gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+		};
+
+		power_rf {
+			label = "POWER_RF";
+			gpios = <&pioA 20 GPIO_ACTIVE_HIGH>;
+		};
+
+		power_wifi {
+			label = "POWER_WIFI";
+			gpios = <&pioA 21 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&pinctrl {
+	i2c1 {
+		pinctrl_i2c1: i2c1-0 {
+			atmel,pins =
+				<AT91_PIOC 0 AT91_PERIPH_C AT91_PINCTRL_PULL_UP
+				AT91_PIOC 1 AT91_PERIPH_C AT91_PINCTRL_PULL_UP>;
+		};
+	};
+
+	adc0 {
+		pinctrl_adc0_ad0: adc0_ad0-0 {
+			/* pull-up disable */
+			atmel,pins = <AT91_PIOB 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+		};
+		pinctrl_adc0_ad5: adc0_ad5-0 {
+			/* pull-up disable */
+			atmel,pins = <AT91_PIOB 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+		};
+		pinctrl_adc0_ad6: adc0_ad6-0 {
+			/* pull-up disable */
+			atmel,pins = <AT91_PIOB 17 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+		};
+		pinctrl_adc0_ad11: adc0_ad11-0 {
+			/* pull-up disable */
+			atmel,pins = <AT91_PIOB 10 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+		};
+	};
+};
+
+&i2c1 {
+	dmas = <0>, <0>;
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "disabled";
+};
+
+&macb0 {
+	status = "disabled";
+};
+
+&rtc {
+	status = "okay";
+};
+
+&leds {
+	blue {
+		status = "okay";
+	};
+};
+
+&adc0 {
+	atmel,adc-vref = <2500>;
+	pinctrl-names = "default";
+	pinctrl-0 = <
+		&pinctrl_adc0_ad0
+		&pinctrl_adc0_ad5
+		&pinctrl_adc0_ad6
+		&pinctrl_adc0_ad11
+	>;
+	atmel,adc-channels-used = <0x0861>;
+	status = "okay";
+};
+
+&uart0 {
+	status = "okay";
+};
+
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index dee9c0c..6afbb48 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -187,7 +187,7 @@ pinctrl_dbgu: dbgu-0 {
 				usart0 {
 					pinctrl_usart0: usart0-0 {
 						atmel,pins =
-							<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -221,7 +221,7 @@ pinctrl_usart0_ri: usart0_ri-0 {
 				usart1 {
 					pinctrl_usart1: usart1-0 {
 						atmel,pins =
-							<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -239,7 +239,7 @@ pinctrl_usart1_cts: usart1_cts-0 {
 				usart2 {
 					pinctrl_usart2: usart2-0 {
 						atmel,pins =
-							<AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -257,7 +257,7 @@ pinctrl_usart2_cts: usart2_cts-0 {
 				usart3 {
 					pinctrl_usart3: usart3-0 {
 						atmel,pins =
-							<AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -275,7 +275,7 @@ pinctrl_usart3_cts: usart3_cts-0 {
 				uart0 {
 					pinctrl_uart0: uart0-0 {
 						atmel,pins =
-							<AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_NONE
+							<AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP
 							 AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
 					};
 				};
@@ -283,7 +283,7 @@ pinctrl_uart0: uart0-0 {
 				uart1 {
 					pinctrl_uart1: uart1-0 {
 						atmel,pins =
-							<AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 				};
@@ -738,7 +738,7 @@ rtc@fffffd20 {
 				status = "disabled";
 			};
 
-			watchdog@fffffd40 {
+			watchdog: watchdog@fffffd40 {
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
@@ -793,7 +793,7 @@ nand_controller: nand-controller {
 		};
 	};
 
-	i2c-gpio-0 {
+	i2c_gpio0: i2c-gpio-0 {
 		compatible = "i2c-gpio";
 		gpios = <&pioA 23 GPIO_ACTIVE_HIGH /* sda */
 			 &pioA 24 GPIO_ACTIVE_HIGH /* scl */
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index dba025a..5ed3d74 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -329,7 +329,7 @@ pinctrl_dbgu: dbgu-0 {
 				usart0 {
 					pinctrl_usart0: usart0-0 {
 						atmel,pins =
-							<AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE>,
+							<AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
 							<AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -347,7 +347,7 @@ pinctrl_usart0_cts: usart0_cts-0 {
 				usart1 {
 					pinctrl_usart1: usart1-0 {
 						atmel,pins =
-							<AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE>,
+							<AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
 							<AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -365,7 +365,7 @@ pinctrl_usart1_cts: usart1_cts-0 {
 				usart2 {
 					pinctrl_usart2: usart2-0 {
 						atmel,pins =
-							<AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>,
+							<AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
 							<AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 99678ab..5c990cf 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -183,7 +183,7 @@ pinctrl_dbgu: dbgu-0 {
 				usart0 {
 					pinctrl_usart0: usart0-0 {
 						atmel,pins =
-							<AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -201,7 +201,7 @@ pinctrl_usart0_cts: usart0_cts-0 {
 				usart1 {
 					pinctrl_usart1: usart1-0 {
 						atmel,pins =
-							<AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOD 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -219,7 +219,7 @@ pinctrl_usart1_cts: usart1_cts-0 {
 				usart2 {
 					pinctrl_usart2: usart2-0 {
 						atmel,pins =
-							<AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOD 3 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 691c95e..fd17909 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -556,7 +556,7 @@ pinctrl_isi_data_10_11: isi-0-data-10-11 {
 				usart0 {
 					pinctrl_usart0: usart0-0 {
 						atmel,pins =
-							<AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 18 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -574,7 +574,7 @@ pinctrl_usart0_cts: usart0_cts-0 {
 				usart1 {
 					pinctrl_usart1: usart1-0 {
 						atmel,pins =
-							<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -592,7 +592,7 @@ pinctrl_usart1_cts: usart1_cts-0 {
 				usart2 {
 					pinctrl_usart2: usart2-0 {
 						atmel,pins =
-							<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -610,7 +610,7 @@ pinctrl_usart2_cts: usart2_cts-0 {
 				usart3 {
 					pinctrl_usart3: usart3-0 {
 						atmel,pins =
-							<AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
+							<AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
 							 AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index 8643b71..ea024e4 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -682,7 +682,7 @@ pinctrl_tcb0_tiob2: tcb0_tiob2-0 {
 				usart0 {
 					pinctrl_usart0: usart0-0 {
 						atmel,pins =
-							<AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE>,
+							<AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
 							<AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -721,7 +721,7 @@ pinctrl_usart0_sck: usart0_sck-0 {
 				usart1 {
 					pinctrl_usart1: usart1-0 {
 						atmel,pins =
-							<AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE>,
+							<AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
 							<AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -744,7 +744,7 @@ pinctrl_usart1_sck: usart1_sck-0 {
 				usart2 {
 					pinctrl_usart2: usart2-0 {
 						atmel,pins =
-							<AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE>,
+							<AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
 							<AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
@@ -767,7 +767,7 @@ pinctrl_usart2_sck: usart2_sck-0 {
 				usart3 {
 					pinctrl_usart3: usart3-0 {
 						atmel,pins =
-							<AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE>,
+							<AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
 							<AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
 					};
 
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index 8eedd6e..d1e684d 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -12,26 +12,6 @@ / {
 
 	interrupt-parent = <&gicv2>;
 
-	reserved-memory {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		ranges;
-
-		/*
-		 * arm64 reserves the CMA by default somewhere in ZONE_DMA32,
-		 * that's not good enough for the BCM2711 as some devices can
-		 * only address the lower 1G of memory (ZONE_DMA).
-		 */
-		linux,cma {
-			compatible = "shared-dma-pool";
-			size = <0x2000000>; /* 32MB */
-			alloc-ranges = <0x0 0x00000000 0x40000000>;
-			reusable;
-			linux,cma-default;
-		};
-	};
-
-
 	soc {
 		/*
 		 * Defined ranges:
@@ -123,10 +103,8 @@ pm: watchdog@7e100000 {
 		};
 
 		rng@7e104000 {
-			interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
-
-			/* RNG is incompatible with brcm,bcm2835-rng */
-			status = "disabled";
+			compatible = "brcm,bcm2711-rng200";
+			reg = <0x7e104000 0x28>;
 		};
 
 		uart2: serial@7e201400 {
@@ -342,7 +320,36 @@ scb {
 		#address-cells = <2>;
 		#size-cells = <1>;
 
-		ranges = <0x0 0x7c000000  0x0 0xfc000000  0x03800000>;
+		ranges = <0x0 0x7c000000  0x0 0xfc000000  0x03800000>,
+			 <0x6 0x00000000  0x6 0x00000000  0x40000000>;
+
+		pcie0: pcie@7d500000 {
+			compatible = "brcm,bcm2711-pcie";
+			reg = <0x0 0x7d500000 0x9310>;
+			device_type = "pci";
+			#address-cells = <3>;
+			#interrupt-cells = <1>;
+			#size-cells = <2>;
+			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "pcie", "msi";
+			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+			interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
+							IRQ_TYPE_LEVEL_HIGH>;
+			msi-controller;
+			msi-parent = <&pcie0>;
+
+			ranges = <0x02000000 0x0 0xf8000000 0x6 0x00000000
+				  0x0 0x04000000>;
+			/*
+			 * The wrapper around the PCIe block has a bug
+			 * preventing it from accessing beyond the first 3GB of
+			 * memory.
+			 */
+			dma-ranges = <0x02000000 0x0 0x00000000 0x0 0x00000000
+				      0x0 0xc0000000>;
+			brcm,enable-ssc;
+		};
 
 		genet: ethernet@7d580000 {
 			compatible = "brcm,bcm2711-genet-v5";
@@ -840,6 +847,19 @@ pin-rts {
 	};
 };
 
+&rmem {
+	#address-cells = <2>;
+};
+
+&cma {
+	/*
+	 * arm64 reserves the CMA by default somewhere in ZONE_DMA32,
+	 * that's not good enough for the BCM2711 as some devices can
+	 * only address the lower 1G of memory (ZONE_DMA).
+	 */
+	alloc-ranges = <0x0 0x00000000 0x40000000>;
+};
+
 &i2c0 {
 	compatible = "brcm,bcm2711-i2c", "brcm,bcm2835-i2c";
 	interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/bcm2835-common.dtsi b/arch/arm/boot/dts/bcm2835-common.dtsi
index fe1ab40..2b1d9d4c 100644
--- a/arch/arm/boot/dts/bcm2835-common.dtsi
+++ b/arch/arm/boot/dts/bcm2835-common.dtsi
@@ -70,6 +70,12 @@ pm: watchdog@7e100000 {
 			system-power-controller;
 		};
 
+		rng@7e104000 {
+			compatible = "brcm,bcm2835-rng";
+			reg = <0x7e104000 0x10>;
+			interrupts = <2 29>;
+		};
+
 		pixelvalve@7e206000 {
 			compatible = "brcm,bcm2835-pixelvalve0";
 			reg = <0x7e206000 0x100>;
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 8394916..e1abe8c 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -30,6 +30,19 @@ chosen {
 		stdout-path = "serial0:115200n8";
 	};
 
+	rmem: reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		cma: linux,cma {
+			compatible = "shared-dma-pool";
+			size = <0x4000000>; /* 64MB */
+			reusable;
+			linux,cma-default;
+		};
+	};
+
 	thermal-zones {
 		cpu_thermal: cpu-thermal {
 			polling-delay-passive = <0>;
@@ -84,12 +97,6 @@ clocks: cprman@7e101000 {
 				<&dsi1 0>, <&dsi1 1>, <&dsi1 2>;
 		};
 
-		rng@7e104000 {
-			compatible = "brcm,bcm2835-rng";
-			reg = <0x7e104000 0x10>;
-			interrupts = <2 29>;
-		};
-
 		mailbox: mailbox@7e00b880 {
 			compatible = "brcm,bcm2835-mbox";
 			reg = <0x7e00b880 0x40>;
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index a2c9de3..536fb24 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -55,18 +55,9 @@ gpio-restart {
 		priority = <200>;
 	};
 
-	/* Hardware I2C block cannot do more than 63 bytes per transfer,
-	 * which would prevent reading from a SFP's EEPROM (256 byte).
-	 */
-	i2c1: i2c {
-		compatible = "i2c-gpio";
-		sda-gpios = <&gpioa 5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
-		scl-gpios = <&gpioa 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
-	};
-
 	sfp: sfp {
 		compatible = "sff,sfp";
-		i2c-bus = <&i2c1>;
+		i2c-bus = <&i2c0>;
 		mod-def0-gpios = <&gpioa 28 GPIO_ACTIVE_LOW>;
 		los-gpios = <&gpioa 24 GPIO_ACTIVE_HIGH>;
 		tx-fault-gpios = <&gpioa 30 GPIO_ACTIVE_HIGH>;
@@ -74,6 +65,10 @@ sfp: sfp {
 	};
 };
 
+&i2c0 {
+	status = "okay";
+};
+
 &amac0 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/dm3725.dtsi b/arch/arm/boot/dts/dm3725.dtsi
new file mode 100644
index 0000000..d24e906
--- /dev/null
+++ b/arch/arm/boot/dts/dm3725.dtsi
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 André Hentschel <nerv@dawncrow.de>
+ */
+
+#include "omap36xx.dtsi"
+
+&sgx_module {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/dra7-evm-common.dtsi b/arch/arm/boot/dts/dra7-evm-common.dtsi
index 82eeba8..23244b5 100644
--- a/arch/arm/boot/dts/dra7-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra7-evm-common.dtsi
@@ -4,7 +4,7 @@
  */
 
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/ti-dra7-atl.h>
+#include <dt-bindings/clock/ti-dra7-atl.h>
 #include <dt-bindings/input/input.h>
 
 / {
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 7e7aa10..fc41883 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -186,7 +186,6 @@ cm_core_clockdomains: clockdomains {
 
 		target-module@56000 {			/* 0x4a056000, ap 9 02.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "dma_system";
 			reg = <0x56000 0x4>,
 			      <0x5602c 0x4>,
 			      <0x56028 0x4>;
@@ -212,7 +211,7 @@ SYSC_OMAP2_SOFTRESET |
 			ranges = <0x0 0x56000 0x1000>;
 
 			sdma: dma-controller@0 {
-				compatible = "ti,omap4430-sdma";
+				compatible = "ti,omap4430-sdma", "ti,omap-sdma";
 				reg = <0x0 0x1000>;
 				interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
@@ -234,7 +233,6 @@ target-module@5e000 {			/* 0x4a05e000, ap 11 1a.0 */
 
 		target-module@80000 {			/* 0x4a080000, ap 13 20.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "ocp2scp1";
 			reg = <0x80000 0x4>,
 			      <0x80010 0x4>,
 			      <0x80014 0x4>;
@@ -302,7 +300,6 @@ usb3_phy1: phy@4400 {
 
 		target-module@90000 {			/* 0x4a090000, ap 59 42.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "ocp2scp3";
 			reg = <0x90000 0x4>,
 			      <0x90010 0x4>,
 			      <0x90014 0x4>;
@@ -394,7 +391,6 @@ target-module@a0000 {			/* 0x4a0a0000, ap 15 40.0 */
 
 		target-module@d9000 {			/* 0x4a0d9000, ap 17 72.0 */
 			compatible = "ti,sysc-omap4-sr", "ti,sysc";
-			ti,hwmods = "smartreflex_mpu";
 			reg = <0xd9038 0x4>;
 			reg-names = "sysc";
 			ti,sysc-mask = <SYSC_OMAP3_SR_ENAWAKEUP>;
@@ -414,7 +410,6 @@ target-module@d9000 {			/* 0x4a0d9000, ap 17 72.0 */
 
 		target-module@dd000 {			/* 0x4a0dd000, ap 19 18.0 */
 			compatible = "ti,sysc-omap4-sr", "ti,sysc";
-			ti,hwmods = "smartreflex_core";
 			reg = <0xdd038 0x4>;
 			reg-names = "sysc";
 			ti,sysc-mask = <SYSC_OMAP3_SR_ENAWAKEUP>;
@@ -471,7 +466,6 @@ mailbox1: mailbox@0 {
 
 		target-module@f6000 {			/* 0x4a0f6000, ap 25 78.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spinlock";
 			reg = <0xf6000 0x4>,
 			      <0xf6010 0x4>,
 			      <0xf6014 0x4>;
@@ -1233,7 +1227,6 @@ timer4: timer@0 {
 
 		target-module@3e000 {			/* 0x4803e000, ap 11 56.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer9";
 			reg = <0x3e000 0x4>,
 			      <0x3e010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1748,7 +1741,6 @@ i2c2: i2c@0 {
 
 		target-module@78000 {			/* 0x48078000, ap 39 0a.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "elm";
 			reg = <0x78000 0x4>,
 			      <0x78010 0x4>,
 			      <0x78014 0x4>;
@@ -1842,7 +1834,6 @@ i2c5: i2c@0 {
 
 		target-module@86000 {			/* 0x48086000, ap 41 5e.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer10";
 			reg = <0x86000 0x4>,
 			      <0x86010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1870,7 +1861,6 @@ timer10: timer@0 {
 
 		target-module@88000 {			/* 0x48088000, ap 43 66.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer11";
 			reg = <0x88000 0x4>,
 			      <0x88010 0x4>;
 			reg-names = "rev", "sysc";
@@ -2044,6 +2034,37 @@ target-module@a4000 {			/* 0x480a4000, ap 57 42.0 */
 				 <0x00001000 0x000a5000 0x00001000>;
 		};
 
+		des_target: target-module@a5000 {	/* 0x480a5000 */
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0xa5030 0x4>,
+			      <0xa5034 0x4>,
+			      <0xa5038 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): l4per_pwrdm, l4sec_clkdm */
+			clocks = <&l4sec_clkctrl DRA7_L4SEC_DES_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0xa5000 0x00001000>;
+
+			des: des@0 {
+				compatible = "ti,omap4-des";
+				reg = <0 0xa0>;
+				interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&sdma_xbar 117>, <&sdma_xbar 116>;
+				dma-names = "tx", "rx";
+				clocks = <&l3_iclk_div>;
+				clock-names = "fck";
+			};
+		};
+
 		target-module@a8000 {			/* 0x480a8000, ap 59 1a.0 */
 			compatible = "ti,sysc";
 			status = "disabled";
@@ -2490,7 +2511,6 @@ atl: atl@0 {
 
 		target-module@3e000 {			/* 0x4843e000, ap 25 30.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss0";
 			reg = <0x3e000 0x4>,
 			      <0x3e004 0x4>;
 			reg-names = "rev", "sysc";
@@ -2537,7 +2557,6 @@ ehrpwm0: pwm@200 {
 
 		target-module@40000 {			/* 0x48440000, ap 27 38.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss1";
 			reg = <0x40000 0x4>,
 			      <0x40004 0x4>;
 			reg-names = "rev", "sysc";
@@ -2584,7 +2603,6 @@ ehrpwm1: pwm@200 {
 
 		target-module@42000 {			/* 0x48442000, ap 29 20.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "epwmss2";
 			reg = <0x42000 0x4>,
 			      <0x42004 0x4>;
 			reg-names = "rev", "sysc";
@@ -3326,7 +3344,6 @@ target-module@1e000 {			/* 0x4881e000, ap 93 2c.0 */
 
 		target-module@20000 {			/* 0x48820000, ap 5 08.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer5";
 			reg = <0x20000 0x4>,
 			      <0x20010 0x4>;
 			reg-names = "rev", "sysc";
@@ -3354,7 +3371,6 @@ timer5: timer@0 {
 
 		target-module@22000 {			/* 0x48822000, ap 7 24.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer6";
 			reg = <0x22000 0x4>,
 			      <0x22010 0x4>;
 			reg-names = "rev", "sysc";
@@ -3382,7 +3398,6 @@ timer6: timer@0 {
 
 		target-module@24000 {			/* 0x48824000, ap 9 26.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer7";
 			reg = <0x24000 0x4>,
 			      <0x24010 0x4>;
 			reg-names = "rev", "sysc";
@@ -3410,7 +3425,6 @@ timer7: timer@0 {
 
 		target-module@26000 {			/* 0x48826000, ap 11 0c.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer8";
 			reg = <0x26000 0x4>,
 			      <0x26010 0x4>;
 			reg-names = "rev", "sysc";
@@ -3438,7 +3452,6 @@ timer8: timer@0 {
 
 		target-module@28000 {			/* 0x48828000, ap 13 16.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer13";
 			reg = <0x28000 0x4>,
 			      <0x28010 0x4>;
 			reg-names = "rev", "sysc";
@@ -3466,7 +3479,6 @@ timer13: timer@0 {
 
 		target-module@2a000 {			/* 0x4882a000, ap 15 10.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer14";
 			reg = <0x2a000 0x4>,
 			      <0x2a010 0x4>;
 			reg-names = "rev", "sysc";
@@ -3494,7 +3506,6 @@ timer14: timer@0 {
 
 		target-module@2c000 {			/* 0x4882c000, ap 17 02.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer15";
 			reg = <0x2c000 0x4>,
 			      <0x2c010 0x4>;
 			reg-names = "rev", "sysc";
@@ -3522,7 +3533,6 @@ timer15: timer@0 {
 
 		target-module@2e000 {			/* 0x4882e000, ap 19 14.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer16";
 			reg = <0x2e000 0x4>,
 			      <0x2e010 0x4>;
 			reg-names = "rev", "sysc";
@@ -4166,35 +4176,88 @@ usb4_tm: target-module@140000 {		/* 0x48940000, ap 75 3c.0 */
 		};
 
 		target-module@170000 {			/* 0x48970000, ap 21 0a.0 */
-			compatible = "ti,sysc";
-			status = "disabled";
+			compatible = "ti,sysc-omap4", "ti,sysc";
+			reg = <0x170010 0x4>;
+			reg-names = "sysc";
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			clocks = <&cam_clkctrl DRA7_CAM_VIP1_CLKCTRL 0>;
+			clock-names = "fck";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x0 0x170000 0x10000>;
+			status = "disabled";
 		};
 
 		target-module@190000 {			/* 0x48990000, ap 23 2e.0 */
-			compatible = "ti,sysc";
-			status = "disabled";
+			compatible = "ti,sysc-omap4", "ti,sysc";
+			reg = <0x190010 0x4>;
+			reg-names = "sysc";
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			clocks = <&cam_clkctrl DRA7_CAM_VIP2_CLKCTRL 0>;
+			clock-names = "fck";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x0 0x190000 0x10000>;
+			status = "disabled";
 		};
 
 		target-module@1b0000 {			/* 0x489b0000, ap 25 34.0 */
-			compatible = "ti,sysc";
-			status = "disabled";
+			compatible = "ti,sysc-omap4", "ti,sysc";
+			reg = <0x1b0000 0x4>,
+			      <0x1b0010 0x4>;
+			reg-names = "rev", "sysc";
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			clocks = <&cam_clkctrl DRA7_CAM_VIP3_CLKCTRL 0>;
+			clock-names = "fck";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x0 0x1b0000 0x10000>;
+			status = "disabled";
 		};
 
-		target-module@1d0000 {			/* 0x489d0000, ap 27 30.0 */
-			compatible = "ti,sysc";
-			status = "disabled";
+		target-module@1d0010 {			/* 0x489d0000, ap 27 30.0 */
+			compatible = "ti,sysc-omap4", "ti,sysc";
+			reg = <0x1d0010 0x4>;
+			reg-names = "sysc";
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			clocks = <&vpe_clkctrl DRA7_VPE_VPE_CLKCTRL 0>;
+			clock-names = "fck";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x0 0x1d0000 0x10000>;
+
+			vpe: vpe@0 {
+				compatible = "ti,dra7-vpe";
+				reg = <0x0000 0x120>,
+				      <0x0700 0x80>,
+				      <0x5700 0x18>,
+				      <0xd000 0x400>;
+				reg-names = "vpe_top",
+					    "sc",
+					    "csc",
+					    "vpdma";
+				interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+			};
 		};
 	};
 };
@@ -4422,7 +4485,6 @@ segment@20000 {					/* 0x4ae20000 */
 
 		target-module@0 {			/* 0x4ae20000, ap 19 08.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer12";
 			reg = <0x0 0x4>,
 			      <0x10 0x4>;
 			reg-names = "rev", "sysc";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 73e5011..d78b684 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -377,44 +377,120 @@ dmm@4e000000 {
 			ti,hwmods = "dmm";
 		};
 
-		mmu0_dsp1: mmu@40d01000 {
-			compatible = "ti,dra7-dsp-iommu";
-			reg = <0x40d01000 0x100>;
-			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu0_dsp1";
-			#iommu-cells = <0>;
-			ti,syscon-mmuconfig = <&dsp1_system 0x0>;
-			status = "disabled";
+		target-module@40d01000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x40d01000 0x4>,
+			      <0x40d01010 0x4>,
+			      <0x40d01014 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			clocks = <&dsp1_clkctrl DRA7_DSP1_MMU0_DSP1_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_dsp1 1>;
+			reset-names = "rstctrl";
+			ranges = <0x0 0x40d01000 0x1000>;
+			#size-cells = <1>;
+			#address-cells = <1>;
+
+			mmu0_dsp1: mmu@0 {
+				compatible = "ti,dra7-dsp-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+				ti,syscon-mmuconfig = <&dsp1_system 0x0>;
+			};
 		};
 
-		mmu1_dsp1: mmu@40d02000 {
-			compatible = "ti,dra7-dsp-iommu";
-			reg = <0x40d02000 0x100>;
-			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu1_dsp1";
-			#iommu-cells = <0>;
-			ti,syscon-mmuconfig = <&dsp1_system 0x1>;
-			status = "disabled";
+		target-module@40d02000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x40d02000 0x4>,
+			      <0x40d02010 0x4>,
+			      <0x40d02014 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			clocks = <&dsp1_clkctrl DRA7_DSP1_MMU0_DSP1_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_dsp1 1>;
+			reset-names = "rstctrl";
+			ranges = <0x0 0x40d02000 0x1000>;
+			#size-cells = <1>;
+			#address-cells = <1>;
+
+			mmu1_dsp1: mmu@0 {
+				compatible = "ti,dra7-dsp-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+				ti,syscon-mmuconfig = <&dsp1_system 0x1>;
+			};
 		};
 
-		mmu_ipu1: mmu@58882000 {
-			compatible = "ti,dra7-iommu";
-			reg = <0x58882000 0x100>;
-			interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu_ipu1";
-			#iommu-cells = <0>;
-			ti,iommu-bus-err-back;
-			status = "disabled";
+		target-module@58882000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x58882000 0x4>,
+			      <0x58882010 0x4>,
+			      <0x58882014 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_ipu 2>;
+			reset-names = "rstctrl";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x58882000 0x100>;
+
+			mmu_ipu1: mmu@0 {
+				compatible = "ti,dra7-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+				ti,iommu-bus-err-back;
+			};
 		};
 
-		mmu_ipu2: mmu@55082000 {
-			compatible = "ti,dra7-iommu";
-			reg = <0x55082000 0x100>;
-			interrupts = <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu_ipu2";
-			#iommu-cells = <0>;
-			ti,iommu-bus-err-back;
-			status = "disabled";
+		target-module@55082000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x55082000 0x4>,
+			      <0x55082010 0x4>,
+			      <0x55082014 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			clocks = <&ipu2_clkctrl DRA7_IPU2_MMU_IPU2_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_core 2>;
+			reset-names = "rstctrl";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x55082000 0x100>;
+
+			mmu_ipu2: mmu@0 {
+				compatible = "ti,dra7-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+				ti,iommu-bus-err-back;
+			};
 		};
 
 		abb_mpu: regulator-abb-mpu {
@@ -597,6 +673,24 @@ gpmc: gpmc@50000000 {
 			status = "disabled";
 		};
 
+		target-module@56000000 {
+			compatible = "ti,sysc-omap4", "ti,sysc";
+			reg = <0x5600fe00 0x4>,
+			      <0x5600fe10 0x4>;
+			reg-names = "rev", "sysc";
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			clocks = <&gpu_clkctrl DRA7_GPU_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x56000000 0x2000000>;
+		};
+
 		crossbar_mpu: crossbar@4a002a48 {
 			compatible = "ti,irq-crossbar";
 			reg = <0x4a002a48 0x130>;
@@ -652,48 +746,96 @@ hdmi: encoder@58060000 {
 			};
 		};
 
-		aes1: aes@4b500000 {
-			compatible = "ti,omap4-aes";
-			ti,hwmods = "aes1";
-			reg = <0x4b500000 0xa0>;
-			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&edma_xbar 111 0>, <&edma_xbar 110 0>;
-			dma-names = "tx", "rx";
-			clocks = <&l3_iclk_div>;
+		aes1_target: target-module@4b500000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x4b500080 0x4>,
+			      <0x4b500084 0x4>,
+			      <0x4b500088 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): per_pwrdm, l4sec_clkdm */
+			clocks = <&l4sec_clkctrl DRA7_L4SEC_AES1_CLKCTRL 0>;
 			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x4b500000 0x1000>;
+
+			aes1: aes@0 {
+				compatible = "ti,omap4-aes";
+				reg = <0 0xa0>;
+				interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&edma_xbar 111 0>, <&edma_xbar 110 0>;
+				dma-names = "tx", "rx";
+				clocks = <&l3_iclk_div>;
+				clock-names = "fck";
+			};
 		};
 
-		aes2: aes@4b700000 {
-			compatible = "ti,omap4-aes";
-			ti,hwmods = "aes2";
-			reg = <0x4b700000 0xa0>;
-			interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&edma_xbar 114 0>, <&edma_xbar 113 0>;
-			dma-names = "tx", "rx";
-			clocks = <&l3_iclk_div>;
+		aes2_target: target-module@4b700000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x4b700080 0x4>,
+			      <0x4b700084 0x4>,
+			      <0x4b700088 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): per_pwrdm, l4sec_clkdm */
+			clocks = <&l4sec_clkctrl DRA7_L4SEC_AES2_CLKCTRL 0>;
 			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x4b700000 0x1000>;
+
+			aes2: aes@0 {
+				compatible = "ti,omap4-aes";
+				reg = <0 0xa0>;
+				interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&edma_xbar 114 0>, <&edma_xbar 113 0>;
+				dma-names = "tx", "rx";
+				clocks = <&l3_iclk_div>;
+				clock-names = "fck";
+			};
 		};
 
-		des: des@480a5000 {
-			compatible = "ti,omap4-des";
-			ti,hwmods = "des";
-			reg = <0x480a5000 0xa0>;
-			interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&sdma_xbar 117>, <&sdma_xbar 116>;
-			dma-names = "tx", "rx";
-			clocks = <&l3_iclk_div>;
+		sham_target: target-module@4b101000 {
+			compatible = "ti,sysc-omap3-sham", "ti,sysc";
+			reg = <0x4b101100 0x4>,
+			      <0x4b101110 0x4>,
+			      <0x4b101114 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): l4per_pwrdm, l4sec_clkdm */
+			clocks = <&l4sec_clkctrl DRA7_L4SEC_SHAM_CLKCTRL 0>;
 			clock-names = "fck";
-		};
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x4b101000 0x1000>;
 
-		sham: sham@53100000 {
-			compatible = "ti,omap5-sham";
-			ti,hwmods = "sham";
-			reg = <0x4b101000 0x300>;
-			interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&edma_xbar 119 0>;
-			dma-names = "rx";
-			clocks = <&l3_iclk_div>;
-			clock-names = "fck";
+			sham: sham@0 {
+				compatible = "ti,omap5-sham";
+				reg = <0 0x300>;
+				interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&edma_xbar 119 0>;
+				dma-names = "rx";
+				clocks = <&l3_iclk_div>;
+				clock-names = "fck";
+			};
 		};
 
 		opp_supply_mpu: opp-supply@4a003b20 {
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
index 8641a3d..01558a8 100644
--- a/arch/arm/boot/dts/dra72-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -6,7 +6,7 @@
 
 #include "dra72x.dtsi"
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/ti-dra7-atl.h>
+#include <dt-bindings/clock/ti-dra7-atl.h>
 
 / {
 	compatible = "ti,dra72-evm", "ti,dra722", "ti,dra72", "ti,dra7";
@@ -187,6 +187,12 @@ vmmcwl_fixed: fixedregulator-mmcwl {
 		gpio = <&gpio5 8 GPIO_ACTIVE_HIGH>;
 		enable-active-high;
 	};
+
+	clk_ov5640_fixed: clock {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24000000>;
+	};
 };
 
 &dra7_pmx_core {
@@ -269,6 +275,23 @@ p1 {
 			line-name = "vin6_sel_s0";
 		};
 	};
+
+	ov5640@3c {
+		compatible = "ovti,ov5640";
+		reg = <0x3c>;
+
+		clocks = <&clk_ov5640_fixed>;
+		clock-names = "xclk";
+
+		port {
+			csi2_cam0: endpoint {
+				remote-endpoint = <&csi2_phy0>;
+				clock-lanes = <0>;
+				data-lanes = <1 2>;
+			};
+		};
+	};
+
 };
 
 &uart1 {
@@ -580,3 +603,11 @@ mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
 &pcie1_rc {
 	status = "okay";
 };
+
+&csi2_0 {
+	csi2_phy0: endpoint {
+		remote-endpoint = <&csi2_cam0>;
+		clock-lanes = <0>;
+		data-lanes = <1 2>;
+	};
+};
diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
index f576270..82b57a3 100644
--- a/arch/arm/boot/dts/dra72x.dtsi
+++ b/arch/arm/boot/dts/dra72x.dtsi
@@ -17,6 +17,48 @@ pmu {
 	};
 };
 
+&l4_per2 {
+	target-module@5b000 {			/* 0x4845b000, ap 59 46.0 */
+		compatible = "ti,sysc-omap4", "ti,sysc";
+		reg = <0x5b000 0x4>,
+		      <0x5b010 0x4>;
+		reg-names = "rev", "sysc";
+		ti,sysc-midle = <SYSC_IDLE_FORCE>,
+				<SYSC_IDLE_NO>;
+		ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+				<SYSC_IDLE_NO>;
+		clocks = <&cam_clkctrl DRA7_CAM_VIP2_CLKCTRL 0>;
+		clock-names = "fck";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0x0 0x5b000 0x1000>;
+
+		cal: cal@0 {
+			compatible = "ti,dra72-cal";
+			reg = <0x0000 0x400>,
+			      <0x0800 0x40>,
+			      <0x0900 0x40>;
+			reg-names = "cal_top",
+				    "cal_rx_core0",
+				    "cal_rx_core1";
+			interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+			ti,camerrx-control = <&scm_conf 0xE94>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				csi2_0: port@0 {
+					reg = <0>;
+				};
+				csi2_1: port@1 {
+					reg = <1>;
+				};
+			};
+		};
+	};
+};
+
 &dss {
 	reg = <0x58000000 0x80>,
 	      <0x58004054 0x4>,
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index d1b5b76..c5abc43 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -66,24 +66,63 @@ usb4: usb@48950000 {
 			};
 		};
 
-		mmu0_dsp2: mmu@41501000 {
-			compatible = "ti,dra7-dsp-iommu";
-			reg = <0x41501000 0x100>;
-			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu0_dsp2";
-			#iommu-cells = <0>;
-			ti,syscon-mmuconfig = <&dsp2_system 0x0>;
-			status = "disabled";
+		target-module@41501000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x41501000 0x4>,
+			      <0x41501010 0x4>,
+			      <0x41501014 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			clocks = <&dsp2_clkctrl DRA7_DSP2_MMU0_DSP2_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_dsp2 1>;
+			reset-names = "rstctrl";
+			ranges = <0x0 0x41501000 0x1000>;
+			#size-cells = <1>;
+			#address-cells = <1>;
+
+			mmu0_dsp2: mmu@0 {
+				compatible = "ti,dra7-dsp-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+				ti,syscon-mmuconfig = <&dsp2_system 0x0>;
+			};
 		};
 
-		mmu1_dsp2: mmu@41502000 {
-			compatible = "ti,dra7-dsp-iommu";
-			reg = <0x41502000 0x100>;
-			interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu1_dsp2";
-			#iommu-cells = <0>;
-			ti,syscon-mmuconfig = <&dsp2_system 0x1>;
-			status = "disabled";
+		target-module@41502000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x41502000 0x4>,
+			      <0x41502010 0x4>,
+			      <0x41502014 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+
+			clocks = <&dsp2_clkctrl DRA7_DSP2_MMU0_DSP2_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_dsp2 1>;
+			reset-names = "rstctrl";
+			ranges = <0x0 0x41502000 0x1000>;
+			#size-cells = <1>;
+			#address-cells = <1>;
+
+			mmu1_dsp2: mmu@0 {
+				compatible = "ti,dra7-dsp-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+				ti,syscon-mmuconfig = <&dsp2_system 0x1>;
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/dra76-evm.dts b/arch/arm/boot/dts/dra76-evm.dts
index 1fb6f13..e958cb3 100644
--- a/arch/arm/boot/dts/dra76-evm.dts
+++ b/arch/arm/boot/dts/dra76-evm.dts
@@ -13,6 +13,13 @@ / {
 	model = "TI DRA762 EVM";
 	compatible = "ti,dra76-evm", "ti,dra762", "ti,dra7";
 
+	aliases {
+		display0 = &hdmi0;
+
+		sound0 = &sound0;
+		sound1 = &hdmi;
+	};
+
 	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x80000000 0x0 0x80000000>;
@@ -116,6 +123,54 @@ aic_dvdd: fixedregulator-aic_dvdd {
 		regulator-min-microvolt = <1800000>;
 		regulator-max-microvolt = <1800000>;
 	};
+
+	clk_ov5640_fixed: clock {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24000000>;
+	};
+
+	hdmi0: connector {
+		compatible = "hdmi-connector";
+		label = "hdmi";
+
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&tpd12s015_out>;
+			};
+		};
+	};
+
+	tpd12s015: encoder {
+		compatible = "ti,tpd12s015";
+
+		gpios = <&gpio7 30 GPIO_ACTIVE_HIGH>,   /* gpio7_30, CT CP HPD */
+			<&gpio7 31 GPIO_ACTIVE_HIGH>,   /* gpio7_31, LS OE */
+			<&gpio7 12 GPIO_ACTIVE_HIGH>;   /* gpio7_12/sp1_cs2, HPD */
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+
+				tpd12s015_in: endpoint {
+					remote-endpoint = <&hdmi_out>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+
+				tpd12s015_out: endpoint {
+					remote-endpoint = <&hdmi_connector_in>;
+				};
+			};
+		};
+	};
 };
 
 &i2c1 {
@@ -317,6 +372,27 @@ tlv320aic3106: tlv320aic3106@19 {
 	};
 };
 
+&i2c5 {
+	status = "okay";
+	clock-frequency = <400000>;
+
+	ov5640@3c {
+		compatible = "ovti,ov5640";
+		reg = <0x3c>;
+
+		clocks = <&clk_ov5640_fixed>;
+		clock-names = "xclk";
+
+		port {
+			csi2_cam0: endpoint {
+				remote-endpoint = <&csi2_phy0>;
+				clock-lanes = <0>;
+				data-lanes = <1 2>;
+			};
+		};
+	};
+};
+
 &cpu0 {
 	vdd-supply = <&buck10_reg>;
 };
@@ -411,6 +487,23 @@ &usb2_phy2 {
 	phy-supply = <&ldo3_reg>;
 };
 
+&dss {
+	status = "ok";
+	vdda_video-supply = <&ldo5_reg>;
+};
+
+&hdmi {
+	status = "ok";
+
+	vdda-supply = <&ldo1_reg>;
+
+	port {
+		hdmi_out: endpoint {
+			remote-endpoint = <&tpd12s015_in>;
+		};
+	};
+};
+
 &qspi {
 	spi-max-frequency = <96000000>;
 	m25p80@0 {
@@ -447,3 +540,11 @@ can-transceiver {
 		max-bitrate = <5000000>;
 	};
 };
+
+&csi2_0 {
+	csi2_phy0: endpoint {
+		remote-endpoint = <&csi2_cam0>;
+		clock-lanes = <0>;
+		data-lanes = <1 2>;
+	};
+};
diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
index cdcba3f..2f7539a 100644
--- a/arch/arm/boot/dts/dra76x.dtsi
+++ b/arch/arm/boot/dts/dra76x.dtsi
@@ -41,6 +41,48 @@ m_can0: mcan@1a00 {
 
 };
 
+&l4_per3 {
+	target-module@1b0000 {			/* 0x489b0000, ap 25 34.0 */
+		compatible = "ti,sysc-omap4", "ti,sysc";
+		reg = <0x1b0000 0x4>,
+		      <0x1b0010 0x4>;
+		reg-names = "rev", "sysc";
+		ti,sysc-midle = <SYSC_IDLE_FORCE>,
+				<SYSC_IDLE_NO>;
+		ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+				<SYSC_IDLE_NO>;
+		clocks = <&cam_clkctrl DRA7_CAM_VIP3_CLKCTRL 0>;
+		clock-names = "fck";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0x0 0x1b0000 0x10000>;
+
+		cal: cal@0 {
+			compatible = "ti,dra76-cal";
+			reg = <0x0000 0x400>,
+			      <0x0800 0x40>,
+			      <0x0900 0x40>;
+			reg-names = "cal_top",
+				    "cal_rx_core0",
+				    "cal_rx_core1";
+			interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+			ti,camerrx-control = <&scm_conf 0x6dc>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				csi2_0: port@0 {
+					reg = <0>;
+				};
+				csi2_1: port@1 {
+					reg = <1>;
+				};
+			};
+		};
+	};
+};
+
 /* MCAN interrupts are hard-wired to irqs 67, 68 */
 &crossbar_mpu {
 	ti,irqs-skip = <10 67 68 133 139 140>;
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 93e1eb8..55cef4c 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -1591,10 +1591,10 @@ dsp2_clkctrl: dsp2-clkctrl@20 {
 
 	rtc_cm: rtc-cm@700 {
 		compatible = "ti,omap4-cm";
-		reg = <0x700 0x100>;
+		reg = <0x700 0x60>;
 		#address-cells = <1>;
 		#size-cells = <1>;
-		ranges = <0 0x700 0x100>;
+		ranges = <0 0x700 0x60>;
 
 		rtc_clkctrl: rtc-clkctrl@20 {
 			compatible = "ti,clkctrl";
@@ -1603,6 +1603,20 @@ rtc_clkctrl: rtc-clkctrl@20 {
 		};
 	};
 
+	vpe_cm: vpe-cm@760 {
+		compatible = "ti,omap4-cm";
+		reg = <0x760 0xc>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0x760 0xc>;
+
+		vpe_clkctrl: vpe-clkctrl@0 {
+			compatible = "ti,clkctrl";
+			reg = <0x0 0xc>;
+			#clock-cells = <2>;
+		};
+	};
+
 };
 
 &cm_core {
@@ -1720,6 +1734,20 @@ l3instr_clkctrl: l3instr-clkctrl@20 {
 		};
 	};
 
+	cam_cm: cam-cm@1000 {
+		compatible = "ti,omap4-cm";
+		reg = <0x1000 0x100>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0x1000 0x100>;
+
+		cam_clkctrl: cam-clkctrl@20 {
+			compatible = "ti,clkctrl";
+			reg = <0x20 0x2c>;
+			#clock-cells = <2>;
+		};
+	};
+
 	dss_cm: dss-cm@1100 {
 		compatible = "ti,omap4-cm";
 		reg = <0x1100 0x100>;
@@ -1734,6 +1762,20 @@ dss_clkctrl: dss-clkctrl@20 {
 		};
 	};
 
+	gpu_cm: gpu-cm@1200 {
+		compatible = "ti,omap4-cm";
+		reg = <0x1200 0x100>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0x1200 0x100>;
+
+		gpu_clkctrl: gpu-clkctrl@20 {
+			compatible = "ti,clkctrl";
+			reg = <0x20 0x4>;
+			#clock-cells = <2>;
+		};
+	};
+
 	l3init_cm: l3init-cm@1300 {
 		compatible = "ti,omap4-cm";
 		reg = <0x1300 0x100>;
diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi
index 5a2c532..ce50c4d 100644
--- a/arch/arm/boot/dts/e60k02.dtsi
+++ b/arch/arm/boot/dts/e60k02.dtsi
@@ -48,7 +48,8 @@ on {
 		};
 	};
 
-	memory {
+	memory@80000000 {
+		device_type = "memory";
 		reg = <0x80000000 0x20000000>;
 	};
 
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index b016b0b..044e5da 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -145,12 +145,12 @@ sram@2020000 {
 			#size-cells = <1>;
 			ranges = <0 0x02020000 0x40000>;
 
-			smp-sysram@0 {
+			smp-sram@0 {
 				compatible = "samsung,exynos4210-sysram";
 				reg = <0x0 0x1000>;
 			};
 
-			smp-sysram@3f000 {
+			smp-sram@3f000 {
 				compatible = "samsung,exynos4210-sysram-ns";
 				reg = <0x3f000 0x1000>;
 			};
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index 09d3d54..a1bdf78 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -590,16 +590,16 @@ &serial_3 {
 };
 
 &sysram {
-	smp-sysram@0 {
+	smp-sram@0 {
 		status = "disabled";
 	};
 
-	smp-sysram@5000 {
+	smp-sram@5000 {
 		compatible = "samsung,exynos4210-sysram";
 		reg = <0x5000 0x1000>;
 	};
 
-	smp-sysram@1f000 {
+	smp-sram@1f000 {
 		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 554819a..b446623 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -79,12 +79,12 @@ sysram: sram@2020000 {
 			#size-cells = <1>;
 			ranges = <0 0x02020000 0x20000>;
 
-			smp-sysram@0 {
+			smp-sram@0 {
 				compatible = "samsung,exynos4210-sysram";
 				reg = <0x0 0x1000>;
 			};
 
-			smp-sysram@1f000 {
+			smp-sram@1f000 {
 				compatible = "samsung,exynos4210-sysram-ns";
 				reg = <0x1f000 0x1000>;
 			};
diff --git a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
index ce87d2f..31719c0 100644
--- a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
+++ b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
@@ -168,3 +168,8 @@ &s5c73m3 {
 	vdda-supply = <&ldo17_reg>;
 	status = "okay";
 };
+
+&touchkey_reg {
+	gpio = <&gpm0 0 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
index 83be3a7..3023bc3 100644
--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
+++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
@@ -13,6 +13,7 @@
 #include "exynos4412.dtsi"
 #include "exynos4412-ppmu-common.dtsi"
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/clock/maxim,max77686.h>
 #include <dt-bindings/pinctrl/samsung.h>
@@ -92,6 +93,15 @@ vcc18mhl: voltage-regulator-5 {
 		enable-active-high;
 	};
 
+	touchkey_reg: voltage-regulator-6 {
+		compatible = "regulator-fixed";
+		regulator-name = "LED_VDD_3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		enable-active-high;
+		status = "disabled";
+	};
+
 	gpio-keys {
 		compatible = "gpio-keys";
 		pinctrl-names = "default";
@@ -197,6 +207,25 @@ max77693-fuel-gauge@36 {
 		};
 	};
 
+	i2c-gpio-4 {
+		compatible = "i2c-gpio";
+		sda-gpios = <&gpl0 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+		scl-gpios = <&gpl0 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+		i2c-gpio,delay-us = <2>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		touchkey@20 {
+			compatible = "cypress,midas-touchkey";
+			reg = <0x20>;
+			vdd-supply = <&touchkey_reg>;
+			vcc-supply = <&ldo5_reg>;
+			interrupt-parent = <&gpj0>;
+			interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+			linux,keycodes = <KEY_BACK KEY_MENU>;
+		};
+	};
+
 	i2c-mhl {
 		compatible = "i2c-gpio";
 		gpios = <&gpf0 4 GPIO_ACTIVE_HIGH>, <&gpf0 6 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/exynos4412-n710x.dts b/arch/arm/boot/dts/exynos4412-n710x.dts
index fe2bfd7..98cd128 100644
--- a/arch/arm/boot/dts/exynos4412-n710x.dts
+++ b/arch/arm/boot/dts/exynos4412-n710x.dts
@@ -73,3 +73,8 @@ &s5c73m3 {
 	vdda-supply = <&cam_vdda_reg>;
 	status = "okay";
 };
+
+&touchkey_reg {
+	gpio = <&gpm0 5 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index ea55f37..9c39e82 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -267,7 +267,7 @@ usb3503: usb3503@8 {
 
 		intn-gpios = <&gpx3 0 GPIO_ACTIVE_HIGH>;
 		connect-gpios = <&gpx3 4 GPIO_ACTIVE_HIGH>;
-		reset-gpios = <&gpx3 5 GPIO_ACTIVE_HIGH>;
+		reset-gpios = <&gpx3 5 GPIO_ACTIVE_LOW>;
 		initial-mode = <1>;
 	};
 
diff --git a/arch/arm/boot/dts/exynos4412-tiny4412.dts b/arch/arm/boot/dts/exynos4412-tiny4412.dts
index 01f37b5..3a91de8 100644
--- a/arch/arm/boot/dts/exynos4412-tiny4412.dts
+++ b/arch/arm/boot/dts/exynos4412-tiny4412.dts
@@ -66,6 +66,31 @@ xusbxti {
 			clock-frequency = <24000000>;
 		};
 	};
+
+	panel {
+		compatible = "innolux,at070tn92";
+
+		port {
+			panel_input: endpoint {
+				remote-endpoint = <&lcdc_output>;
+			};
+		};
+	};
+};
+
+&fimd {
+	pinctrl-0 = <&lcd_clk>, <&lcd_data24>;
+	pinctrl-names = "default";
+	#address-cells = <1>;
+	#size-cells = <0>;
+	status = "okay";
+
+	port@3 {
+		reg = <3>;
+		lcdc_output: endpoint {
+			remote-endpoint = <&panel_input>;
+		};
+	};
 };
 
 &rtc {
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index 5022aa5..4886894 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -195,12 +195,12 @@ sram@2020000 {
 			#size-cells = <1>;
 			ranges = <0 0x02020000 0x40000>;
 
-			smp-sysram@0 {
+			smp-sram@0 {
 				compatible = "samsung,exynos4210-sysram";
 				reg = <0x0 0x1000>;
 			};
 
-			smp-sysram@2f000 {
+			smp-sram@2f000 {
 				compatible = "samsung,exynos4210-sysram-ns";
 				reg = <0x2f000 0x1000>;
 			};
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index 4801ca7..22eb951 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -36,7 +36,7 @@ soc: soc {
 		ranges;
 
 		chipid: chipid@10000000 {
-			compatible = "samsung,exynos4210-chipid", "syscon";
+			compatible = "samsung,exynos4210-chipid";
 			reg = <0x10000000 0x100>;
 		};
 
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index d6c85ef..f8ebc62 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -15,7 +15,7 @@
 #include "exynos5250.dtsi"
 
 / {
-	model = "Insignal Arndale evaluation board based on EXYNOS5250";
+	model = "Insignal Arndale evaluation board based on Exynos5250";
 	compatible = "insignal,arndale", "samsung,exynos5250", "samsung,exynos5";
 
 	memory@40000000 {
@@ -154,7 +154,7 @@ usb_hub: usb-hub {
 		compatible = "smsc,usb3503a";
 
 		reset-gpios = <&gpx3 5 GPIO_ACTIVE_LOW>;
-		connect-gpios = <&gpd1 7 GPIO_ACTIVE_LOW>;
+		connect-gpios = <&gpd1 7 GPIO_ACTIVE_HIGH>;
 	};
 };
 
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 6dc9694..5c42df0 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG SMDK5250 board device tree source
+ * Samsung SMDK5250 board device tree source
  *
  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
@@ -12,7 +12,7 @@
 #include "exynos5250.dtsi"
 
 / {
-	model = "SAMSUNG SMDK5250 board based on EXYNOS5250";
+	model = "Samsung SMDK5250 board based on Exynos5250";
 	compatible = "samsung,smdk5250", "samsung,exynos5250", "samsung,exynos5";
 
 	aliases {
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index e1f0215..b6135af 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -1,16 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG EXYNOS5250 SoC device tree source
+ * Samsung Exynos5250 SoC device tree source
  *
  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * SAMSUNG EXYNOS5250 SoC device nodes are listed in this file.
- * EXYNOS5250 based board files can include this file and provide
+ * Samsung Exynos5250 SoC device nodes are listed in this file.
+ * Exynos5250 based board files can include this file and provide
  * values for board specfic bindings.
  *
  * Note: This file does not include device nodes for all the controllers in
- * EXYNOS5250 SoC. As device tree coverage for EXYNOS5250 increases,
+ * Exynos5250 SoC. As device tree coverage for Exynos5250 increases,
  * additional nodes can be added to this file.
  */
 
@@ -171,12 +171,12 @@ sram@2020000 {
 			#size-cells = <1>;
 			ranges = <0 0x02020000 0x30000>;
 
-			smp-sysram@0 {
+			smp-sram@0 {
 				compatible = "samsung,exynos4210-sysram";
 				reg = <0x0 0x1000>;
 			};
 
-			smp-sysram@2f000 {
+			smp-sram@2f000 {
 				compatible = "samsung,exynos4210-sysram-ns";
 				reg = <0x2f000 0x1000>;
 			};
diff --git a/arch/arm/boot/dts/exynos5260-xyref5260.dts b/arch/arm/boot/dts/exynos5260-xyref5260.dts
index 36a2b77..0dc2ec1 100644
--- a/arch/arm/boot/dts/exynos5260-xyref5260.dts
+++ b/arch/arm/boot/dts/exynos5260-xyref5260.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG XYREF5260 board device tree source
+ * Samsung XYREF5260 board device tree source
  *
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
@@ -10,7 +10,7 @@
 #include "exynos5260.dtsi"
 
 / {
-	model = "SAMSUNG XYREF5260 board based on EXYNOS5260";
+	model = "Samsung XYREF5260 board based on Exynos5260";
 	compatible = "samsung,xyref5260", "samsung,exynos5260", "samsung,exynos5";
 
 	memory@20000000 {
diff --git a/arch/arm/boot/dts/exynos5260.dtsi b/arch/arm/boot/dts/exynos5260.dtsi
index b0811db..154df70 100644
--- a/arch/arm/boot/dts/exynos5260.dtsi
+++ b/arch/arm/boot/dts/exynos5260.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG EXYNOS5260 SoC device tree source
+ * Samsung Exynos5260 SoC device tree source
  *
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index e0db251..4f9297a 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -170,7 +170,7 @@ usb3503: usb-hub@8 {
 
 		intn-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
 		connect-gpios = <&gpx0 6 GPIO_ACTIVE_HIGH>;
-		reset-gpios = <&gpx1 4 GPIO_ACTIVE_HIGH>;
+		reset-gpios = <&gpx1 4 GPIO_ACTIVE_LOW>;
 		initial-mode = <1>;
 
 		clock-names = "refclk";
diff --git a/arch/arm/boot/dts/exynos5410-smdk5410.dts b/arch/arm/boot/dts/exynos5410-smdk5410.dts
index dffa5e3..5282b5d 100644
--- a/arch/arm/boot/dts/exynos5410-smdk5410.dts
+++ b/arch/arm/boot/dts/exynos5410-smdk5410.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG SMDK5410 board device tree source
+ * Samsung SMDK5410 board device tree source
  *
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
@@ -10,7 +10,7 @@
 #include "exynos5410.dtsi"
 #include <dt-bindings/interrupt-controller/irq.h>
 / {
-	model = "Samsung SMDK5410 board based on EXYNOS5410";
+	model = "Samsung SMDK5410 board based on Exynos5410";
 	compatible = "samsung,smdk5410", "samsung,exynos5410", "samsung,exynos5";
 
 	memory@40000000 {
diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
index a4b03d4..2eab80b 100644
--- a/arch/arm/boot/dts/exynos5410.dtsi
+++ b/arch/arm/boot/dts/exynos5410.dtsi
@@ -1,12 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG EXYNOS5410 SoC device tree source
+ * Samsung Exynos5410 SoC device tree source
  *
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * SAMSUNG EXYNOS5410 SoC device nodes are listed in this file.
- * EXYNOS5410 based board files can include this file and provide
+ * Samsung Exynos5410 SoC device nodes are listed in this file.
+ * Exynos5410 based board files can include this file and provide
  * values for board specfic bindings.
  */
 
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 592d7b45..ee28d30 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -15,7 +15,7 @@
 #include <dt-bindings/clock/samsung,s2mps11.h>
 
 / {
-	model = "Insignal Arndale Octa evaluation board based on EXYNOS5420";
+	model = "Insignal Arndale Octa evaluation board based on Exynos5420";
 	compatible = "insignal,arndale-octa", "samsung,exynos5420", "samsung,exynos5";
 
 	memory@20000000 {
diff --git a/arch/arm/boot/dts/exynos5420-cpus.dtsi b/arch/arm/boot/dts/exynos5420-cpus.dtsi
index 0ee6e92..58d1c54 100644
--- a/arch/arm/boot/dts/exynos5420-cpus.dtsi
+++ b/arch/arm/boot/dts/exynos5420-cpus.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG EXYNOS5420 SoC cpu device tree source
+ * Samsung Exynos5420 SoC cpu device tree source
  *
  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index 8240e51..e3f2afe8 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG SMDK5420 board device tree source
+ * Samsung SMDK5420 board device tree source
  *
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
@@ -12,7 +12,7 @@
 #include <dt-bindings/gpio/gpio.h>
 
 / {
-	model = "Samsung SMDK5420 board based on EXYNOS5420";
+	model = "Samsung SMDK5420 board based on Exynos5420";
 	compatible = "samsung,smdk5420", "samsung,exynos5420", "samsung,exynos5";
 
 	memory@20000000 {
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index d39907a..b672080 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -1,12 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG EXYNOS5420 SoC device tree source
+ * Samsung Exynos5420 SoC device tree source
  *
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * SAMSUNG EXYNOS5420 SoC device nodes are listed in this file.
- * EXYNOS5420 based board files can include this file and provide
+ * Samsung Exynos5420 SoC device nodes are listed in this file.
+ * Exynos5420 based board files can include this file and provide
  * values for board specfic bindings.
  */
 
@@ -48,62 +48,62 @@ cluster_a15_opp_table: opp_table0 {
 
 		opp-1800000000 {
 			opp-hz = /bits/ 64 <1800000000>;
-			opp-microvolt = <1250000>;
+			opp-microvolt = <1250000 1250000 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-1700000000 {
 			opp-hz = /bits/ 64 <1700000000>;
-			opp-microvolt = <1212500>;
+			opp-microvolt = <1212500 1212500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-1600000000 {
 			opp-hz = /bits/ 64 <1600000000>;
-			opp-microvolt = <1175000>;
+			opp-microvolt = <1175000 1175000 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-1500000000 {
 			opp-hz = /bits/ 64 <1500000000>;
-			opp-microvolt = <1137500>;
+			opp-microvolt = <1137500 1137500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-1400000000 {
 			opp-hz = /bits/ 64 <1400000000>;
-			opp-microvolt = <1112500>;
+			opp-microvolt = <1112500 1112500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-1300000000 {
 			opp-hz = /bits/ 64 <1300000000>;
-			opp-microvolt = <1062500>;
+			opp-microvolt = <1062500 1062500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
-			opp-microvolt = <1037500>;
+			opp-microvolt = <1037500 1037500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
-			opp-microvolt = <1012500>;
+			opp-microvolt = <1012500 1012500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
-			opp-microvolt = < 987500>;
+			opp-microvolt = < 987500 987500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-900000000 {
 			opp-hz = /bits/ 64 <900000000>;
-			opp-microvolt = < 962500>;
+			opp-microvolt = < 962500 962500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-800000000 {
 			opp-hz = /bits/ 64 <800000000>;
-			opp-microvolt = < 937500>;
+			opp-microvolt = < 937500 937500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 		opp-700000000 {
 			opp-hz = /bits/ 64 <700000000>;
-			opp-microvolt = < 912500>;
+			opp-microvolt = < 912500 912500 1500000>;
 			clock-latency-ns = <140000>;
 		};
 	};
@@ -744,6 +744,56 @@ gsc_1: video-scaler@13e10000 {
 			iommus = <&sysmmu_gscl1>;
 		};
 
+		gpu: gpu@11800000 {
+			compatible = "samsung,exynos5420-mali", "arm,mali-t628";
+			reg = <0x11800000 0x5000>;
+			interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "job", "mmu", "gpu";
+
+			clocks = <&clock CLK_G3D>;
+			clock-names = "core";
+			power-domains = <&g3d_pd>;
+			operating-points-v2 = <&gpu_opp_table>;
+
+			status = "disabled";
+			#cooling-cells = <2>;
+
+			gpu_opp_table: opp-table {
+				compatible = "operating-points-v2";
+
+				opp-177000000 {
+					opp-hz = /bits/ 64 <177000000>;
+					opp-microvolt = <812500>;
+				};
+				opp-266000000 {
+					opp-hz = /bits/ 64 <266000000>;
+					opp-microvolt = <862500>;
+				};
+				opp-350000000 {
+					opp-hz = /bits/ 64 <350000000>;
+					opp-microvolt = <912500>;
+				};
+				opp-420000000 {
+					opp-hz = /bits/ 64 <420000000>;
+					opp-microvolt = <962500>;
+				};
+				opp-480000000 {
+					opp-hz = /bits/ 64 <480000000>;
+					opp-microvolt = <1000000>;
+				};
+				opp-543000000 {
+					opp-hz = /bits/ 64 <543000000>;
+					opp-microvolt = <1037500>;
+				};
+				opp-600000000 {
+					opp-hz = /bits/ 64 <600000000>;
+					opp-microvolt = <1150000>;
+				};
+			};
+		};
+
 		scaler_0: scaler@12800000 {
 			compatible = "samsung,exynos5420-scaler";
 			reg = <0x12800000 0x1294>;
@@ -1042,7 +1092,6 @@ bus_wcore: bus_wcore {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK400_WCORE>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_wcore_opp_table>;
 			status = "disabled";
 		};
 
@@ -1050,7 +1099,6 @@ bus_noc: bus_noc {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK100_NOC>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_noc_opp_table>;
 			status = "disabled";
 		};
 
@@ -1058,7 +1106,6 @@ bus_fsys_apb: bus_fsys_apb {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_PCLK200_FSYS>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_fsys_apb_opp_table>;
 			status = "disabled";
 		};
 
@@ -1066,7 +1113,6 @@ bus_fsys: bus_fsys {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK200_FSYS>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_fsys_apb_opp_table>;
 			status = "disabled";
 		};
 
@@ -1074,7 +1120,6 @@ bus_fsys2: bus_fsys2 {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK200_FSYS2>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_fsys2_opp_table>;
 			status = "disabled";
 		};
 
@@ -1082,7 +1127,6 @@ bus_mfc: bus_mfc {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK333>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_mfc_opp_table>;
 			status = "disabled";
 		};
 
@@ -1090,7 +1134,6 @@ bus_gen: bus_gen {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK266>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_gen_opp_table>;
 			status = "disabled";
 		};
 
@@ -1098,7 +1141,6 @@ bus_peri: bus_peri {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK66>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_peri_opp_table>;
 			status = "disabled";
 		};
 
@@ -1106,7 +1148,6 @@ bus_g2d: bus_g2d {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK333_G2D>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_g2d_opp_table>;
 			status = "disabled";
 		};
 
@@ -1114,7 +1155,6 @@ bus_g2d_acp: bus_g2d_acp {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK266_G2D>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_g2d_acp_opp_table>;
 			status = "disabled";
 		};
 
@@ -1122,7 +1162,6 @@ bus_jpeg: bus_jpeg {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK300_JPEG>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_jpeg_opp_table>;
 			status = "disabled";
 		};
 
@@ -1130,7 +1169,6 @@ bus_jpeg_apb: bus_jpeg_apb {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK166>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_jpeg_apb_opp_table>;
 			status = "disabled";
 		};
 
@@ -1138,7 +1176,6 @@ bus_disp1_fimd: bus_disp1_fimd {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK300_DISP1>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_disp1_fimd_opp_table>;
 			status = "disabled";
 		};
 
@@ -1146,7 +1183,6 @@ bus_disp1: bus_disp1 {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK400_DISP1>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_disp1_opp_table>;
 			status = "disabled";
 		};
 
@@ -1154,7 +1190,6 @@ bus_gscl_scaler: bus_gscl_scaler {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK300_GSCL>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_gscl_opp_table>;
 			status = "disabled";
 		};
 
@@ -1162,252 +1197,8 @@ bus_mscl: bus_mscl {
 			compatible = "samsung,exynos-bus";
 			clocks = <&clock CLK_DOUT_ACLK400_MSCL>;
 			clock-names = "bus";
-			operating-points-v2 = <&bus_mscl_opp_table>;
 			status = "disabled";
 		};
-
-		bus_wcore_opp_table: opp_table2 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <84000000>;
-				opp-microvolt = <925000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <111000000>;
-				opp-microvolt = <950000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <222000000>;
-				opp-microvolt = <950000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <333000000>;
-				opp-microvolt = <950000>;
-			};
-			opp04 {
-				opp-hz = /bits/ 64 <400000000>;
-				opp-microvolt = <987500>;
-			};
-		};
-
-		bus_noc_opp_table: opp_table3 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <67000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <75000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <86000000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <100000000>;
-			};
-		};
-
-		bus_fsys_apb_opp_table: opp_table4 {
-			compatible = "operating-points-v2";
-			opp-shared;
-
-			opp00 {
-				opp-hz = /bits/ 64 <100000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <200000000>;
-			};
-		};
-
-		bus_fsys2_opp_table: opp_table5 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <75000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <100000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <150000000>;
-			};
-		};
-
-		bus_mfc_opp_table: opp_table6 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <96000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <111000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <167000000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <222000000>;
-			};
-			opp04 {
-				opp-hz = /bits/ 64 <333000000>;
-			};
-		};
-
-		bus_gen_opp_table: opp_table7 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <89000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <133000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <178000000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <267000000>;
-			};
-		};
-
-		bus_peri_opp_table: opp_table8 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <67000000>;
-			};
-		};
-
-		bus_g2d_opp_table: opp_table9 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <84000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <167000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <222000000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <300000000>;
-			};
-			opp04 {
-				opp-hz = /bits/ 64 <333000000>;
-			};
-		};
-
-		bus_g2d_acp_opp_table: opp_table10 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <67000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <133000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <178000000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <267000000>;
-			};
-		};
-
-		bus_jpeg_opp_table: opp_table11 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <75000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <150000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <200000000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <300000000>;
-			};
-		};
-
-		bus_jpeg_apb_opp_table: opp_table12 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <84000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <111000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <134000000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <167000000>;
-			};
-		};
-
-		bus_disp1_fimd_opp_table: opp_table13 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <120000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <200000000>;
-			};
-		};
-
-		bus_disp1_opp_table: opp_table14 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <120000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <200000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <300000000>;
-			};
-		};
-
-		bus_gscl_opp_table: opp_table15 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <150000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <200000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <300000000>;
-			};
-		};
-
-		bus_mscl_opp_table: opp_table16 {
-			compatible = "operating-points-v2";
-
-			opp00 {
-				opp-hz = /bits/ 64 <84000000>;
-			};
-			opp01 {
-				opp-hz = /bits/ 64 <167000000>;
-			};
-			opp02 {
-				opp-hz = /bits/ 64 <222000000>;
-			};
-			opp03 {
-				opp-hz = /bits/ 64 <333000000>;
-			};
-			opp04 {
-				opp-hz = /bits/ 64 <400000000>;
-			};
-		};
 	};
 
 	thermal-zones {
diff --git a/arch/arm/boot/dts/exynos5422-cpus.dtsi b/arch/arm/boot/dts/exynos5422-cpus.dtsi
index e4a5857..1b8605c 100644
--- a/arch/arm/boot/dts/exynos5422-cpus.dtsi
+++ b/arch/arm/boot/dts/exynos5422-cpus.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG EXYNOS5422 SoC cpu device tree source
+ * Samsung Exynos5422 SoC cpu device tree source
  *
  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index 059fa32d..5cf1aed 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -35,7 +35,264 @@ oscclk {
 		};
 	};
 
-	dmc_opp_table: opp_table2 {
+	bus_wcore_opp_table: opp_table2 {
+		compatible = "operating-points-v2";
+
+		/* derived from 532MHz MPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <88700000>;
+			opp-microvolt = <925000 925000 1400000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <133000000>;
+			opp-microvolt = <950000 950000 1400000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <177400000>;
+			opp-microvolt = <950000 950000 1400000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <266000000>;
+			opp-microvolt = <950000 950000 1400000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <532000000>;
+			opp-microvolt = <1000000 1000000 1400000>;
+		};
+	};
+
+	bus_noc_opp_table: opp_table3 {
+		compatible = "operating-points-v2";
+
+		/* derived from 666MHz CPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <66600000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <74000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <83250000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <111000000>;
+		};
+	};
+
+	bus_fsys_apb_opp_table: opp_table4 {
+		compatible = "operating-points-v2";
+
+		/* derived from 666MHz CPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <111000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <222000000>;
+		};
+	};
+
+	bus_fsys2_opp_table: opp_table5 {
+		compatible = "operating-points-v2";
+
+		/* derived from 600MHz DPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <75000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <120000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+	};
+
+	bus_mfc_opp_table: opp_table6 {
+		compatible = "operating-points-v2";
+
+		/* derived from 666MHz CPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <83250000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <111000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <166500000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <222000000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <333000000>;
+		};
+	};
+
+	bus_gen_opp_table: opp_table7 {
+		compatible = "operating-points-v2";
+
+		/* derived from 532MHz MPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <88700000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <133000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <178000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <266000000>;
+		};
+	};
+
+	bus_peri_opp_table: opp_table8 {
+		compatible = "operating-points-v2";
+
+		/* derived from 666MHz CPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <66600000>;
+		};
+	};
+
+	bus_g2d_opp_table: opp_table9 {
+		compatible = "operating-points-v2";
+
+		/* derived from 666MHz CPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <83250000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <111000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <166500000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <222000000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <333000000>;
+		};
+	};
+
+	bus_g2d_acp_opp_table: opp_table10 {
+		compatible = "operating-points-v2";
+
+		/* derived from 532MHz MPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <66500000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <133000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <178000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <266000000>;
+		};
+	};
+
+	bus_jpeg_opp_table: opp_table11 {
+		compatible = "operating-points-v2";
+
+		/* derived from 600MHz DPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <75000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <150000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <300000000>;
+		};
+	};
+
+	bus_jpeg_apb_opp_table: opp_table12 {
+		compatible = "operating-points-v2";
+
+		/* derived from 666MHz CPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <83250000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <111000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <133000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <166500000>;
+		};
+	};
+
+	bus_disp1_fimd_opp_table: opp_table13 {
+		compatible = "operating-points-v2";
+
+		/* derived from 600MHz DPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <120000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+	};
+
+	bus_disp1_opp_table: opp_table14 {
+		compatible = "operating-points-v2";
+
+		/* derived from 600MHz DPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <120000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <300000000>;
+		};
+	};
+
+	bus_gscl_opp_table: opp_table15 {
+		compatible = "operating-points-v2";
+
+		/* derived from 600MHz DPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <150000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <300000000>;
+		};
+	};
+
+	bus_mscl_opp_table: opp_table16 {
+		compatible = "operating-points-v2";
+
+		/* derived from 666MHz CPLL */
+		opp00 {
+			opp-hz = /bits/ 64 <84000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <167000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <222000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <333000000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <666000000>;
+		};
+	};
+
+	dmc_opp_table: opp_table17 {
 		compatible = "operating-points-v2";
 
 		opp00 {
@@ -134,6 +391,7 @@ &adc {
 };
 
 &bus_wcore {
+	operating-points-v2 = <&bus_wcore_opp_table>;
 	devfreq-events = <&nocp_mem0_0>, <&nocp_mem0_1>,
 			<&nocp_mem1_0>, <&nocp_mem1_1>;
 	vdd-supply = <&buck3_reg>;
@@ -142,76 +400,91 @@ &bus_wcore {
 };
 
 &bus_noc {
+	operating-points-v2 = <&bus_noc_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_fsys_apb {
+	operating-points-v2 = <&bus_fsys_apb_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_fsys {
+	operating-points-v2 = <&bus_fsys2_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_fsys2 {
+	operating-points-v2 = <&bus_fsys2_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_mfc {
+	operating-points-v2 = <&bus_mfc_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_gen {
+	operating-points-v2 = <&bus_gen_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_peri {
+	operating-points-v2 = <&bus_peri_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_g2d {
+	operating-points-v2 = <&bus_g2d_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_g2d_acp {
+	operating-points-v2 = <&bus_g2d_acp_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_jpeg {
+	operating-points-v2 = <&bus_jpeg_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_jpeg_apb {
+	operating-points-v2 = <&bus_jpeg_apb_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_disp1_fimd {
+	operating-points-v2 = <&bus_disp1_fimd_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_disp1 {
+	operating-points-v2 = <&bus_disp1_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_gscl_scaler {
+	operating-points-v2 = <&bus_gscl_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
 
 &bus_mscl {
+	operating-points-v2 = <&bus_mscl_opp_table>;
 	devfreq = <&bus_wcore>;
 	status = "okay";
 };
@@ -601,6 +874,8 @@ buck2_reg: BUCK2 {
 				regulator-max-microvolt = <1500000>;
 				regulator-always-on;
 				regulator-boot-on;
+				regulator-coupled-with = <&buck3_reg>;
+				regulator-coupled-max-spread = <300000>;
 
 				regulator-state-mem {
 					regulator-off-in-suspend;
@@ -613,6 +888,8 @@ buck3_reg: BUCK3 {
 				regulator-max-microvolt = <1400000>;
 				regulator-always-on;
 				regulator-boot-on;
+				regulator-coupled-with = <&buck2_reg>;
+				regulator-coupled-max-spread = <300000>;
 
 				regulator-state-mem {
 					regulator-off-in-suspend;
@@ -623,7 +900,6 @@ buck4_reg: BUCK4 {
 				regulator-name = "vdd_g3d";
 				regulator-min-microvolt = <800000>;
 				regulator-max-microvolt = <1400000>;
-				regulator-always-on;
 				regulator-boot-on;
 
 				regulator-state-mem {
@@ -771,6 +1047,11 @@ &tmu_gpu {
 	vtmu-supply = <&ldo7_reg>;
 };
 
+&gpu {
+	mali-supply = <&buck4_reg>;
+	status = "okay";
+};
+
 &rtc {
 	status = "okay";
 	clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>;
diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
index d271e75..f163206 100644
--- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
@@ -72,14 +72,14 @@ map0 {
 				 */
 				map1 {
 					trip = <&cpu0_alert1>;
-					cooling-device = <&cpu0 3 7>,
-							 <&cpu1 3 7>,
-							 <&cpu2 3 7>,
-							 <&cpu3 3 7>,
-							 <&cpu4 3 12>,
-							 <&cpu5 3 12>,
-							 <&cpu6 3 12>,
-							 <&cpu7 3 12>;
+					cooling-device = <&cpu0 3 8>,
+							 <&cpu1 3 8>,
+							 <&cpu2 3 8>,
+							 <&cpu3 3 8>,
+							 <&cpu4 3 14>,
+							 <&cpu5 3 14>,
+							 <&cpu6 3 14>,
+							 <&cpu7 3 14>;
 				};
 			};
 		};
@@ -116,14 +116,14 @@ map0 {
 				};
 				map1 {
 					trip = <&cpu1_alert1>;
-					cooling-device = <&cpu0 3 7>,
-							 <&cpu1 3 7>,
-							 <&cpu2 3 7>,
-							 <&cpu3 3 7>,
-							 <&cpu4 3 12>,
-							 <&cpu5 3 12>,
-							 <&cpu6 3 12>,
-							 <&cpu7 3 12>;
+					cooling-device = <&cpu0 3 8>,
+							 <&cpu1 3 8>,
+							 <&cpu2 3 8>,
+							 <&cpu3 3 8>,
+							 <&cpu4 3 14>,
+							 <&cpu5 3 14>,
+							 <&cpu6 3 14>,
+							 <&cpu7 3 14>;
 				};
 			};
 		};
@@ -160,14 +160,14 @@ map0 {
 				};
 				map1 {
 					trip = <&cpu2_alert1>;
-					cooling-device = <&cpu0 3 7>,
-							 <&cpu1 3 7>,
-							 <&cpu2 3 7>,
-							 <&cpu3 3 7>,
-							 <&cpu4 3 12>,
-							 <&cpu5 3 12>,
-							 <&cpu6 3 12>,
-							 <&cpu7 3 12>;
+					cooling-device = <&cpu0 3 8>,
+							 <&cpu1 3 8>,
+							 <&cpu2 3 8>,
+							 <&cpu3 3 8>,
+							 <&cpu4 3 14>,
+							 <&cpu5 3 14>,
+							 <&cpu6 3 14>,
+							 <&cpu7 3 14>;
 				};
 			};
 		};
@@ -204,14 +204,14 @@ map0 {
 				};
 				map1 {
 					trip = <&cpu3_alert1>;
-					cooling-device = <&cpu0 3 7>,
-							 <&cpu1 3 7>,
-							 <&cpu2 3 7>,
-							 <&cpu3 3 7>,
-							 <&cpu4 3 12>,
-							 <&cpu5 3 12>,
-							 <&cpu6 3 12>,
-							 <&cpu7 3 12>;
+					cooling-device = <&cpu0 3 8>,
+							 <&cpu1 3 8>,
+							 <&cpu2 3 8>,
+							 <&cpu3 3 8>,
+							 <&cpu4 3 14>,
+							 <&cpu5 3 14>,
+							 <&cpu6 3 14>,
+							 <&cpu7 3 14>;
 				};
 			};
 		};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 8388720..1865a70 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -107,7 +107,7 @@ map2 {
 				/*
 				 * When reaching cpu0_alert3, reduce CPU
 				 * by 2 steps. On Exynos5422/5800 that would
-				 * be: 1600 MHz and 1100 MHz.
+				 * (usually) be: 1800 MHz and 1200 MHz.
 				 */
 				map3 {
 					trip = <&cpu0_alert3>;
@@ -122,19 +122,19 @@ map3 {
 				};
 				/*
 				 * When reaching cpu0_alert4, reduce CPU
-				 * further, down to 600 MHz (12 steps for big,
-				 * 7 steps for LITTLE).
+				 * further, down to 600 MHz (14 steps for big,
+				 * 8 steps for LITTLE).
 				 */
-				map4 {
+				cpu0_cooling_map4: map4 {
 					trip = <&cpu0_alert4>;
-					cooling-device = <&cpu0 3 7>,
-							 <&cpu1 3 7>,
-							 <&cpu2 3 7>,
-							 <&cpu3 3 7>,
-							 <&cpu4 3 12>,
-							 <&cpu5 3 12>,
-							 <&cpu6 3 12>,
-							 <&cpu7 3 12>;
+					cooling-device = <&cpu0 3 8>,
+							 <&cpu1 3 8>,
+							 <&cpu2 3 8>,
+							 <&cpu3 3 8>,
+							 <&cpu4 3 14>,
+							 <&cpu5 3 14>,
+							 <&cpu6 3 14>,
+							 <&cpu7 3 14>;
 				};
 			};
 		};
@@ -198,16 +198,16 @@ map3 {
 							 <&cpu6 0 2>,
 							 <&cpu7 0 2>;
 				};
-				map4 {
+				cpu1_cooling_map4: map4 {
 					trip = <&cpu1_alert4>;
-					cooling-device = <&cpu0 3 7>,
-							 <&cpu1 3 7>,
-							 <&cpu2 3 7>,
-							 <&cpu3 3 7>,
-							 <&cpu4 3 12>,
-							 <&cpu5 3 12>,
-							 <&cpu6 3 12>,
-							 <&cpu7 3 12>;
+					cooling-device = <&cpu0 3 8>,
+							 <&cpu1 3 8>,
+							 <&cpu2 3 8>,
+							 <&cpu3 3 8>,
+							 <&cpu4 3 14>,
+							 <&cpu5 3 14>,
+							 <&cpu6 3 14>,
+							 <&cpu7 3 14>;
 				};
 			};
 		};
@@ -271,16 +271,16 @@ map3 {
 							 <&cpu6 0 2>,
 							 <&cpu7 0 2>;
 				};
-				map4 {
+				cpu2_cooling_map4: map4 {
 					trip = <&cpu2_alert4>;
-					cooling-device = <&cpu0 3 7>,
-							 <&cpu1 3 7>,
-							 <&cpu2 3 7>,
-							 <&cpu3 3 7>,
-							 <&cpu4 3 12>,
-							 <&cpu5 3 12>,
-							 <&cpu6 3 12>,
-							 <&cpu7 3 12>;
+					cooling-device = <&cpu0 3 8>,
+							 <&cpu1 3 8>,
+							 <&cpu2 3 8>,
+							 <&cpu3 3 8>,
+							 <&cpu4 3 14>,
+							 <&cpu5 3 14>,
+							 <&cpu6 3 14>,
+							 <&cpu7 3 14>;
 				};
 			};
 		};
@@ -344,16 +344,16 @@ map3 {
 							 <&cpu6 0 2>,
 							 <&cpu7 0 2>;
 				};
-				map4 {
+				cpu3_cooling_map4: map4 {
 					trip = <&cpu3_alert4>;
-					cooling-device = <&cpu0 3 7>,
-							 <&cpu1 3 7>,
-							 <&cpu2 3 7>,
-							 <&cpu3 3 7>,
-							 <&cpu4 3 12>,
-							 <&cpu5 3 12>,
-							 <&cpu6 3 12>,
-							 <&cpu7 3 12>;
+					cooling-device = <&cpu0 3 8>,
+							 <&cpu1 3 8>,
+							 <&cpu2 3 8>,
+							 <&cpu3 3 8>,
+							 <&cpu4 3 14>,
+							 <&cpu5 3 14>,
+							 <&cpu6 3 14>,
+							 <&cpu7 3 14>;
 				};
 			};
 		};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts b/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
index a31ca2e..98feeca 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
@@ -30,6 +30,64 @@ &chipid {
 	samsung,asv-bin = <2>;
 };
 
+/*
+ * Odroid XU3-Lite board uses SoC revision with lower maximum frequencies
+ * than Odroid XU3/XU4 boards: 1.8 GHz for A15 cores & 1.3 GHz for A7 cores.
+ * Therefore we need to update OPPs tables and thermal maps accordingly.
+ */
+&cluster_a15_opp_table {
+	/delete-node/opp-2000000000;
+	/delete-node/opp-1900000000;
+};
+
+&cluster_a7_opp_table {
+	/delete-node/opp-1400000000;
+};
+
+&cpu0_cooling_map4 {
+	cooling-device = <&cpu0 3 7>,
+			 <&cpu1 3 7>,
+			 <&cpu2 3 7>,
+			 <&cpu3 3 7>,
+			 <&cpu4 3 12>,
+			 <&cpu5 3 12>,
+			 <&cpu6 3 12>,
+			 <&cpu7 3 12>;
+};
+
+&cpu1_cooling_map4 {
+	cooling-device = <&cpu0 3 7>,
+			 <&cpu1 3 7>,
+			 <&cpu2 3 7>,
+			 <&cpu3 3 7>,
+			 <&cpu4 3 12>,
+			 <&cpu5 3 12>,
+			 <&cpu6 3 12>,
+			 <&cpu7 3 12>;
+};
+
+&cpu2_cooling_map4 {
+	cooling-device = <&cpu0 3 7>,
+			 <&cpu1 3 7>,
+			 <&cpu2 3 7>,
+			 <&cpu3 3 7>,
+			 <&cpu4 3 12>,
+			 <&cpu5 3 12>,
+			 <&cpu6 3 12>,
+			 <&cpu7 3 12>;
+};
+
+&cpu3_cooling_map4 {
+	cooling-device = <&cpu0 3 7>,
+			 <&cpu1 3 7>,
+			 <&cpu2 3 7>,
+			 <&cpu3 3 7>,
+			 <&cpu4 3 12>,
+			 <&cpu5 3 12>,
+			 <&cpu6 3 12>,
+			 <&cpu7 3 12>;
+};
+
 &pwm {
 	/*
 	 * PWM 0 -- fan
diff --git a/arch/arm/boot/dts/exynos54xx.dtsi b/arch/arm/boot/dts/exynos54xx.dtsi
index f78dee8..8aa5117 100644
--- a/arch/arm/boot/dts/exynos54xx.dtsi
+++ b/arch/arm/boot/dts/exynos54xx.dtsi
@@ -62,12 +62,12 @@ sram@2020000 {
 			#size-cells = <1>;
 			ranges = <0 0x02020000 0x54000>;
 
-			smp-sysram@0 {
+			smp-sram@0 {
 				compatible = "samsung,exynos4210-sysram";
 				reg = <0x0 0x1000>;
 			};
 
-			smp-sysram@53000 {
+			smp-sram@53000 {
 				compatible = "samsung,exynos4210-sysram-ns";
 				reg = <0x53000 0x1000>;
 			};
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 60ca3d6..60ab0ef 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -156,6 +156,15 @@ &clock_audss {
 	assigned-clock-parents = <&clock CLK_MAU_EPLL>;
 };
 
+/*
+ * Peach Pi board uses SoC revision with lower maximum frequency for A7 cores
+ * (1.3 GHz instead of 1.4 GHz) than Odroid XU3/XU4 boards.  Thus we need to
+ * update A7 OPPs table accordingly.
+ */
+&cluster_a7_opp_table {
+	/delete-node/opp-1400000000;
+};
+
 &cpu0 {
 	cpu-supply = <&buck2_reg>;
 };
@@ -257,6 +266,8 @@ buck2_reg: BUCK2 {
 				regulator-always-on;
 				regulator-boot-on;
 				regulator-ramp-delay = <12500>;
+				regulator-coupled-with = <&buck3_reg>;
+				regulator-coupled-max-spread = <300000>;
 				regulator-state-mem {
 					regulator-off-in-suspend;
 				};
@@ -269,6 +280,8 @@ buck3_reg: BUCK3 {
 				regulator-always-on;
 				regulator-boot-on;
 				regulator-ramp-delay = <12500>;
+				regulator-coupled-with = <&buck2_reg>;
+				regulator-coupled-max-spread = <300000>;
 				regulator-state-mem {
 					regulator-off-in-suspend;
 				};
diff --git a/arch/arm/boot/dts/exynos5800.dtsi b/arch/arm/boot/dts/exynos5800.dtsi
index 16177d8..dfb99ab 100644
--- a/arch/arm/boot/dts/exynos5800.dtsi
+++ b/arch/arm/boot/dts/exynos5800.dtsi
@@ -1,12 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG EXYNOS5800 SoC device tree source
+ * Samsung Exynos5800 SoC device tree source
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * SAMSUNG EXYNOS5800 SoC device nodes are listed in this file.
- * EXYNOS5800 based board files can include this file and provide
+ * Samsung Exynos5800 SoC device nodes are listed in this file.
+ * Exynos5800 based board files can include this file and provide
  * values for board specfic bindings.
  */
 
@@ -21,67 +21,87 @@ &clock {
 };
 
 &cluster_a15_opp_table {
+	opp-2000000000 {
+		opp-hz = /bits/ 64 <2000000000>;
+		opp-microvolt = <1312500>;
+		clock-latency-ns = <140000>;
+	};
+	opp-1900000000 {
+		opp-hz = /bits/ 64 <1900000000>;
+		opp-microvolt = <1262500>;
+		clock-latency-ns = <140000>;
+	};
+	opp-1800000000 {
+		opp-hz = /bits/ 64 <1800000000>;
+		opp-microvolt = <1237500>;
+		clock-latency-ns = <140000>;
+	};
 	opp-1700000000 {
-		opp-microvolt = <1250000>;
+		opp-microvolt = <1250000 1250000 1500000>;
 	};
 	opp-1600000000 {
-		opp-microvolt = <1250000>;
+		opp-microvolt = <1250000 1250000 1500000>;
 	};
 	opp-1500000000 {
-		opp-microvolt = <1100000>;
+		opp-microvolt = <1100000 1100000 1500000>;
 	};
 	opp-1400000000 {
-		opp-microvolt = <1100000>;
+		opp-microvolt = <1100000 1100000 1500000>;
 	};
 	opp-1300000000 {
-		opp-microvolt = <1100000>;
+		opp-microvolt = <1100000 1100000 1500000>;
 	};
 	opp-1200000000 {
-		opp-microvolt = <1000000>;
+		opp-microvolt = <1000000 1000000 1500000>;
 	};
 	opp-1100000000 {
-		opp-microvolt = <1000000>;
+		opp-microvolt = <1000000 1000000 1500000>;
 	};
 	opp-1000000000 {
-		opp-microvolt = <1000000>;
+		opp-microvolt = <1000000 1000000 1500000>;
 	};
 	opp-900000000 {
-		opp-microvolt = <1000000>;
+		opp-microvolt = <1000000 1000000 1500000>;
 	};
 	opp-800000000 {
-		opp-microvolt = <900000>;
+		opp-microvolt = <900000 900000 1500000>;
 	};
 	opp-700000000 {
-		opp-microvolt = <900000>;
+		opp-microvolt = <900000 900000 1500000>;
 	};
 	opp-600000000 {
 		opp-hz = /bits/ 64 <600000000>;
-		opp-microvolt = <900000>;
+		opp-microvolt = <900000 900000 1500000>;
 		clock-latency-ns = <140000>;
 	};
 	opp-500000000 {
 		opp-hz = /bits/ 64 <500000000>;
-		opp-microvolt = <900000>;
+		opp-microvolt = <900000 900000 1500000>;
 		clock-latency-ns = <140000>;
 	};
 	opp-400000000 {
 		opp-hz = /bits/ 64 <400000000>;
-		opp-microvolt = <900000>;
+		opp-microvolt = <900000 900000 1500000>;
 		clock-latency-ns = <140000>;
 	};
 	opp-300000000 {
 		opp-hz = /bits/ 64 <300000000>;
-		opp-microvolt = <900000>;
+		opp-microvolt = <900000 900000 1500000>;
 		clock-latency-ns = <140000>;
 	};
 	opp-200000000 {
 		opp-hz = /bits/ 64 <200000000>;
-		opp-microvolt = <900000>;
+		opp-microvolt = <900000 900000 1500000>;
 		clock-latency-ns = <140000>;
 	};
 };
 
 &cluster_a7_opp_table {
+	opp-1400000000 {
+		opp-hz = /bits/ 64 <1400000000>;
+		opp-microvolt = <1275000>;
+		clock-latency-ns = <140000>;
+	};
 	opp-1300000000 {
 		opp-microvolt = <1250000>;
 	};
diff --git a/arch/arm/boot/dts/ibm-power9-dual.dtsi b/arch/arm/boot/dts/ibm-power9-dual.dtsi
index 2abc42e..a0fa65b 100644
--- a/arch/arm/boot/dts/ibm-power9-dual.dtsi
+++ b/arch/arm/boot/dts/ibm-power9-dual.dtsi
@@ -86,7 +86,7 @@ sbefifo@2400 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 
-			fsi_occ0: occ {
+			fsi_occ0: occ@1 {
 				compatible = "ibm,p9-occ";
 			};
 		};
@@ -187,7 +187,7 @@ sbefifo@2400 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 
-			fsi_occ1: occ {
+			fsi_occ1: occ@2 {
 				compatible = "ibm,p9-occ";
 			};
 		};
diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
index 0fde90d..3f38c2e 100644
--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
+++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
@@ -165,8 +165,6 @@ &uart2 {
 };
 
 &usbhost1 {
-	phy_type = "serial";
-	dr_mode = "host";
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index 05cccd1..fb66884 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -304,8 +304,6 @@ &uart1 {
 };
 
 &usbhost1 {
-	phy_type = "serial";
-	dr_mode = "host";
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 9a097ef..40b95a2 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -570,6 +570,9 @@ usbhost1: usb@53ff4400 {
 				clock-names = "ipg", "ahb", "per";
 				fsl,usbmisc = <&usbmisc 1>;
 				fsl,usbphy = <&usbphy1>;
+				maximum-speed = "full-speed";
+				phy_type = "serial";
+				dr_mode = "host";
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index ed6a3ce..552196d 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -58,27 +58,25 @@ clk_usb: clk-usb {
 
 	display1: disp1 {
 		compatible = "fsl,imx-parallel-display";
+		#address-cells = <1>;
+		#size-cells = <0>;
 		interface-pix-fmt = "rgb24";
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_ipu_disp1>;
-		display-timings {
-			native-mode = <&timing0>;
-			timing0: dvi {
-				clock-frequency = <65000000>;
-				hactive = <1024>;
-				vactive = <768>;
-				hback-porch = <220>;
-				hfront-porch = <40>;
-				vback-porch = <21>;
-				vfront-porch = <7>;
-				hsync-len = <60>;
-				vsync-len = <10>;
+
+		port@0 {
+		reg = <0>;
+
+			display0_in: endpoint {
+				remote-endpoint = <&ipu_di0_disp1>;
 			};
 		};
 
-		port {
-			display0_in: endpoint {
-				remote-endpoint = <&ipu_di0_disp1>;
+		port@1 {
+			reg = <1>;
+
+			parallel_display_out: endpoint {
+				remote-endpoint = <&tfp410_in>;
 			};
 		};
 	};
@@ -115,6 +113,42 @@ display1_in: endpoint {
 		};
 	};
 
+	dvi-connector {
+		compatible = "dvi-connector";
+		digital;
+
+		port {
+			dvi_connector_in: endpoint {
+				remote-endpoint = <&tfp410_out>;
+			};
+		};
+	};
+
+	dvi-encoder {
+		compatible = "ti,tfp410";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+
+				tfp410_in: endpoint {
+					remote-endpoint = <&parallel_display_out>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+
+				tfp410_out: endpoint {
+					remote-endpoint = <&dvi_connector_in>;
+				};
+			};
+		};
+	};
+
 	gpio-keys {
 		compatible = "gpio-keys";
 		pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx6dl-gw5907.dts b/arch/arm/boot/dts/imx6dl-gw5907.dts
new file mode 100644
index 0000000..3fa2822
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw5907.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw5907.dtsi"
+
+/ {
+	model = "Gateworks Ventana i.MX6 DualLite/Solo GW5907";
+	compatible = "gw,imx6dl-gw5907", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-gw5910.dts b/arch/arm/boot/dts/imx6dl-gw5910.dts
new file mode 100644
index 0000000..0d5e7e5
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw5910.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw5910.dtsi"
+
+/ {
+	model = "Gateworks Ventana i.MX6 DualLite/Solo GW5910";
+	compatible = "gw,imx6dl-gw5910", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-gw5912.dts b/arch/arm/boot/dts/imx6dl-gw5912.dts
new file mode 100644
index 0000000..5260e01
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw5912.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw5912.dtsi"
+
+/ {
+	model = "Gateworks Ventana i.MX6 DualLite/Solo GW5912";
+	compatible = "gw,imx6dl-gw5912", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-gw5913.dts b/arch/arm/boot/dts/imx6dl-gw5913.dts
new file mode 100644
index 0000000..b74e533
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw5913.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw5913.dtsi"
+
+/ {
+	model = "Gateworks Ventana i.MX6 DualLite/Solo GW5913";
+	compatible = "gw,imx6dl-gw5913", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw5907.dts b/arch/arm/boot/dts/imx6q-gw5907.dts
new file mode 100644
index 0000000..b25526e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw5907.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-gw5907.dtsi"
+
+/ {
+	model = "Gateworks Ventana i.MX6 Dual/Quad GW5907";
+	compatible = "gw,imx6q-gw5907", "gw,ventana", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw5910.dts b/arch/arm/boot/dts/imx6q-gw5910.dts
new file mode 100644
index 0000000..6aafa2f
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw5910.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-gw5910.dtsi"
+
+/ {
+	model = "Gateworks Ventana i.MX6 Dual/Quad GW5910";
+	compatible = "gw,imx6q-gw5910", "gw,ventana", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw5912.dts b/arch/arm/boot/dts/imx6q-gw5912.dts
new file mode 100644
index 0000000..4dcbd94
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw5912.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-gw5912.dtsi"
+
+/ {
+	model = "Gateworks Ventana i.MX6 Dual/Quad GW5912";
+	compatible = "gw,imx6q-gw5912", "gw,ventana", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw5913.dts b/arch/arm/boot/dts/imx6q-gw5913.dts
new file mode 100644
index 0000000..6f511f1
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw5913.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-gw5913.dtsi"
+
+/ {
+	model = "Gateworks Ventana i.MX6 Dual/Quad GW5913";
+	compatible = "gw,imx6q-gw5913", "gw,ventana", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts
index d96ae54..7a3d1d3 100644
--- a/arch/arm/boot/dts/imx6q-logicpd.dts
+++ b/arch/arm/boot/dts/imx6q-logicpd.dts
@@ -73,6 +73,16 @@ &hdmi {
 	status = "okay";
 };
 
+&i2c1 {
+	touchscreen@26 {
+		compatible = "ilitek,ili2117";
+		reg = <0x26>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_touchscreen>;
+		interrupts-extended = <&gpio1 6 IRQ_TYPE_EDGE_RISING>;
+	};
+};
+
 &ldb {
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index ff1287e..1b5bc6b 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -200,7 +200,7 @@ ethphy: ethernet-phy@7 {
 
 &hdmi {
 	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_hdmi_ddc>;
+	pinctrl-0 = <&pinctrl_hdmi_ddc &pinctrl_hdmi_cec>;
 	status = "disabled";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
new file mode 100644
index 0000000..0bdebdd
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	/* these are used by bootloader for disabling nodes */
+	aliases {
+		led0 = &led0;
+		led1 = &led1;
+		nand = &gpmi;
+		usb0 = &usbh1;
+		usb1 = &usbotg;
+	};
+
+	chosen {
+		stdout-path = &uart2;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_leds>;
+
+		led0: user1 {
+			label = "user1";
+			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+			default-state = "on";
+			linux,default-trigger = "heartbeat";
+		};
+
+		led1: user2 {
+			label = "user2";
+			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+			default-state = "off";
+		};
+	};
+
+	memory@10000000 {
+		device_type = "memory";
+		reg = <0x10000000 0x20000000>;
+	};
+
+	pps {
+		compatible = "pps-gpio";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pps>;
+		gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+		status = "okay";
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_5p0v: regulator-5p0v {
+		compatible = "regulator-fixed";
+		regulator-name = "5P0V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+	};
+
+	reg_usb_otg_vbus: regulator-usb-otg-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-mode = "rgmii-id";
+	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&gpmi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_gpmi_nand>;
+	status = "okay";
+};
+
+&hdmi {
+	ddc-i2c-bus = <&i2c3>;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	gpio@23 {
+		compatible = "nxp,pca9555";
+		reg = <0x23>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	eeprom@50 {
+		compatible = "atmel,24c02";
+		reg = <0x50>;
+		pagesize = <16>;
+	};
+
+	eeprom@51 {
+		compatible = "atmel,24c02";
+		reg = <0x51>;
+		pagesize = <16>;
+	};
+
+	eeprom@52 {
+		compatible = "atmel,24c02";
+		reg = <0x52>;
+		pagesize = <16>;
+	};
+
+	eeprom@53 {
+		compatible = "atmel,24c02";
+		reg = <0x53>;
+		pagesize = <16>;
+	};
+
+	rtc@68 {
+		compatible = "dallas,ds1672";
+		reg = <0x68>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+
+	gpio@20 {
+		compatible = "nxp,pca9555";
+		reg = <0x20>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	adc@48 {
+		compatible = "ti,ads1015";
+		reg = <0x48>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		channel@4 {
+			reg = <4>;
+			ti,gain = <0>;
+			ti,datarate = <5>;
+		};
+
+		channel@5 {
+			reg = <5>;
+			ti,gain = <0>;
+			ti,datarate = <5>;
+		};
+
+		channel@6 {
+			reg = <6>;
+			ti,gain = <0>;
+			ti,datarate = <5>;
+		};
+	};
+};
+
+&pcie {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pcie>;
+	reset-gpio = <&gpio1 0 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm2>; /* MX6_DIO1 */
+	status = "disabled";
+};
+
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>; /* MX6_DIO2 */
+	status = "disabled";
+};
+
+&pwm4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>; /* MX6_DIO3 */
+	status = "disabled";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usb_otg_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	disable-over-current;
+	status = "okay";
+};
+
+&usbh1 {
+	status = "okay";
+};
+
+&wdog1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
+};
+
+&iomuxc {
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_RGMII_RXC__RGMII_RXC		0x1b0b0
+			MX6QDL_PAD_RGMII_RD0__RGMII_RD0		0x1b0b0
+			MX6QDL_PAD_RGMII_RD1__RGMII_RD1		0x1b0b0
+			MX6QDL_PAD_RGMII_RD2__RGMII_RD2		0x1b0b0
+			MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b0b0
+			MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x1b0b0
+			MX6QDL_PAD_RGMII_TXC__RGMII_TXC		0x1b0b0
+			MX6QDL_PAD_RGMII_TD0__RGMII_TD0		0x1b0b0
+			MX6QDL_PAD_RGMII_TD1__RGMII_TD1		0x1b0b0
+			MX6QDL_PAD_RGMII_TD2__RGMII_TD2		0x1b0b0
+			MX6QDL_PAD_RGMII_TD3__RGMII_TD3		0x1b0b0
+			MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL	0x1b0b0
+			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x1b0b0
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x4001b0a8
+			MX6QDL_PAD_ENET_TXD0__GPIO1_IO30	0x1b0b0
+		>;
+	};
+
+	pinctrl_gpio_leds: gpioledsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL0__GPIO4_IO06		0x1b0b0
+			MX6QDL_PAD_KEY_ROW0__GPIO4_IO07		0x1b0b0
+		>;
+	};
+
+	pinctrl_gpmi_nand: gpminandgrp {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CLE__NAND_CLE		0xb0b1
+			MX6QDL_PAD_NANDF_ALE__NAND_ALE		0xb0b1
+			MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
+			MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
+			MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
+			MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
+			MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
+			MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
+			MX6QDL_PAD_NANDF_D1__NAND_DATA01	0xb0b1
+			MX6QDL_PAD_NANDF_D2__NAND_DATA02	0xb0b1
+			MX6QDL_PAD_NANDF_D3__NAND_DATA03	0xb0b1
+			MX6QDL_PAD_NANDF_D4__NAND_DATA04	0xb0b1
+			MX6QDL_PAD_NANDF_D5__NAND_DATA05	0xb0b1
+			MX6QDL_PAD_NANDF_D6__NAND_DATA06	0xb0b1
+			MX6QDL_PAD_NANDF_D7__NAND_DATA07	0xb0b1
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D21__I2C1_SCL		0x4001b8b1
+			MX6QDL_PAD_EIM_D28__I2C1_SDA		0x4001b8b1
+			MX6QDL_PAD_GPIO_4__GPIO1_IO04		0x0001b0b0
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL3__I2C2_SCL		0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__I2C2_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_3__I2C3_SCL		0x4001b8b1
+			MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001b8b1
+			MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x1b0b0
+			MX6QDL_PAD_GPIO_19__GPIO4_IO05		0x1b0b0
+		>;
+	};
+
+	pinctrl_pcie: pciegrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_0__GPIO1_IO00		0x1b0b0
+		>;
+	};
+
+	pinctrl_pps: ppsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_RXD1__GPIO1_IO26	0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT2__PWM2_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT1__PWM3_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm4: pwm4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__PWM4_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D24__UART3_TX_DATA	0x1b0b1
+			MX6QDL_PAD_EIM_D25__UART3_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL1__UART5_TX_DATA	0x1b0b1
+			MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__USB_OTG_ID		0x17059
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22		0x1b0b0
+		>;
+	};
+
+	pinctrl_wdog: wdoggrp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT8__WDOG1_B		0x1b0b0
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
new file mode 100644
index 0000000..be1af74
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	/* these are used by bootloader for disabling nodes */
+	aliases {
+		led0 = &led0;
+		led1 = &led1;
+		led2 = &led2;
+	};
+
+	chosen {
+		stdout-path = &uart2;
+	};
+
+	memory@10000000 {
+		device_type = "memory";
+		reg = <0x10000000 0x20000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_leds>;
+
+		led0: user1 {
+			label = "user1";
+			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+			default-state = "on";
+			linux,default-trigger = "heartbeat";
+		};
+
+		led1: user2 {
+			label = "user2";
+			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+			default-state = "off";
+		};
+
+		led2: user3 {
+			label = "user3";
+			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+			default-state = "off";
+		};
+	};
+
+	pps {
+		compatible = "pps-gpio";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pps>;
+		gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+		status = "okay";
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_5p0v: regulator-5p0v {
+		compatible = "regulator-fixed";
+		regulator-name = "5P0V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+	};
+
+	reg_wl: regulator-wl {
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_reg_wl>;
+		compatible = "regulator-fixed";
+		regulator-name = "wl";
+		gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+		startup-delay-us = <100>;
+		enable-active-high;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_bt: regulator-bt {
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_reg_bt>;
+		compatible = "regulator-fixed";
+		regulator-name = "bt";
+		gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+		startup-delay-us = <100>;
+		enable-active-high;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+};
+
+
+&ecspi3 {
+	cs-gpios = <&gpio4 24 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi3>;
+	status = "okay";
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-mode = "rgmii-id";
+	status = "okay";
+};
+
+&gpmi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_gpmi_nand>;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	gpio@23 {
+		compatible = "nxp,pca9555";
+		reg = <0x23>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	eeprom@50 {
+		compatible = "atmel,24c02";
+		reg = <0x50>;
+		pagesize = <16>;
+	};
+
+	eeprom@51 {
+		compatible = "atmel,24c02";
+		reg = <0x51>;
+		pagesize = <16>;
+	};
+
+	eeprom@52 {
+		compatible = "atmel,24c02";
+		reg = <0x52>;
+		pagesize = <16>;
+	};
+
+	eeprom@53 {
+		compatible = "atmel,24c02";
+		reg = <0x53>;
+		pagesize = <16>;
+	};
+
+	rtc@68 {
+		compatible = "dallas,ds1672";
+		reg = <0x68>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+
+	accel@19 {
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_accel>;
+		compatible = "st,lis2de12";
+		reg = <0x19>;
+		st,drdy-int-pin = <1>;
+		interrupt-parent = <&gpio7>;
+		interrupts = <13 0>;
+		interrupt-names = "INT1";
+	};
+};
+
+&pcie {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pcie>;
+	reset-gpio = <&gpio3 20 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm2>; /* MX6_DIO1 */
+	status = "disabled";
+};
+
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>; /* MX6_DIO2 */
+	status = "disabled";
+};
+
+/* off-board RS232 */
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+/* serial console */
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+/* Sterling-LWB Bluetooth */
+&uart4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart4>;
+	uart-has-rtscts;
+	status = "okay";
+};
+
+/* GPS */
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_5p0v>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	disable-over-current;
+	status = "okay";
+};
+
+&usbh1 {
+	status = "okay";
+};
+
+/* Sterling-LWB SDIO WiFi */
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	vmmc-supply = <&reg_3p3v>;
+	non-removable;
+	bus-width = <4>;
+	status = "okay";
+};
+
+&usdhc3 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+	vmmc-supply = <&reg_3p3v>;
+	status = "okay";
+};
+
+&wdog1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
+};
+
+&iomuxc {
+	pinctrl_accel: accelmuxgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_18__GPIO7_IO13		0x1b0b1
+		>;
+	};
+
+	pinctrl_ecspi3: escpi3grp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK	0x100b1
+			MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI	0x100b1
+			MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO	0x100b1
+			MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24	0x100b1
+		>;
+	};
+
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_RGMII_RXC__RGMII_RXC		0x1b030
+			MX6QDL_PAD_RGMII_RD0__RGMII_RD0		0x1b030
+			MX6QDL_PAD_RGMII_RD1__RGMII_RD1		0x1b030
+			MX6QDL_PAD_RGMII_RD2__RGMII_RD2		0x1b030
+			MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b030
+			MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x1b030
+			MX6QDL_PAD_RGMII_TXC__RGMII_TXC		0x1b030
+			MX6QDL_PAD_RGMII_TD0__RGMII_TD0		0x1b030
+			MX6QDL_PAD_RGMII_TD1__RGMII_TD1		0x1b030
+			MX6QDL_PAD_RGMII_TD2__RGMII_TD2		0x1b030
+			MX6QDL_PAD_RGMII_TD3__RGMII_TD3		0x1b030
+			MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL	0x1b030
+			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x1b0b0
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x4001b0a8
+			MX6QDL_PAD_ENET_TXD0__GPIO1_IO30	0x1b0b0
+		>;
+	};
+
+	pinctrl_gpio_leds: gpioledsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL0__GPIO4_IO06  0x1b0b0
+			MX6QDL_PAD_KEY_ROW0__GPIO4_IO07  0x1b0b0
+			MX6QDL_PAD_KEY_ROW4__GPIO4_IO15  0x1b0b0
+		>;
+	};
+
+	pinctrl_gpmi_nand: gpminandgrp {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CLE__NAND_CLE		0xb0b1
+			MX6QDL_PAD_NANDF_ALE__NAND_ALE		0xb0b1
+			MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
+			MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
+			MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
+			MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
+			MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
+			MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
+			MX6QDL_PAD_NANDF_D1__NAND_DATA01	0xb0b1
+			MX6QDL_PAD_NANDF_D2__NAND_DATA02	0xb0b1
+			MX6QDL_PAD_NANDF_D3__NAND_DATA03	0xb0b1
+			MX6QDL_PAD_NANDF_D4__NAND_DATA04	0xb0b1
+			MX6QDL_PAD_NANDF_D5__NAND_DATA05	0xb0b1
+			MX6QDL_PAD_NANDF_D6__NAND_DATA06	0xb0b1
+			MX6QDL_PAD_NANDF_D7__NAND_DATA07	0xb0b1
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D21__I2C1_SCL		0x4001b8b1
+			MX6QDL_PAD_EIM_D28__I2C1_SDA		0x4001b8b1
+			MX6QDL_PAD_GPIO_4__GPIO1_IO04		0x0001b0b0
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL3__I2C2_SCL		0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__I2C2_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_3__I2C3_SCL		0x4001b8b1
+			MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_pcie: pciegrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D20__GPIO3_IO20		0x1b0b0
+		>;
+	};
+
+	pinctrl_pps: ppsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_DI0_DISP_CLK__GPIO4_IO16	0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT2__PWM2_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT1__PWM3_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_reg_bt: regbtgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x1b0b1
+		>;
+	};
+
+	pinctrl_reg_wl: regwlgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_5__GPIO1_IO05		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA	0x1b0b1
+			MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart4: uart4grp {
+		fsl,pins = <
+			MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA	0x1b0b1
+			MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA	0x1b0b1
+			MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B	0x1b0b1
+			MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL1__UART5_TX_DATA	0x1b0b1
+			MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__USB_OTG_ID		0x13059
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD2_CMD__SD2_CMD		0x17059
+			MX6QDL_PAD_SD2_CLK__SD2_CLK		0x10059
+			MX6QDL_PAD_SD2_DAT0__SD2_DATA0		0x17059
+			MX6QDL_PAD_SD2_DAT1__SD2_DATA1		0x17059
+			MX6QDL_PAD_SD2_DAT2__SD2_DATA2		0x17059
+			MX6QDL_PAD_SD2_DAT3__SD2_DATA3		0x17059
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x17059
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x10059
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x17059
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17059
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17059
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17059
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x17059 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x17059
+		>;
+	};
+
+	pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170b9
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x170b9
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170b9
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170b9
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170b9
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170b9
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170b9 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170b9
+		>;
+	};
+
+	pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170f9
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100f9
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170f9
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170f9
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170f9
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170f9
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170f9 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170f9
+		>;
+	};
+
+	pinctrl_wdog: wdoggrp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT8__WDOG1_B		0x1b0b0
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
new file mode 100644
index 0000000..8c57fd2
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	/* these are used by bootloader for disabling nodes */
+	aliases {
+		led0 = &led0;
+		led1 = &led1;
+		led2 = &led2;
+		nand = &gpmi;
+		usb0 = &usbh1;
+		usb1 = &usbotg;
+	};
+
+	chosen {
+		stdout-path = &uart2;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_leds>;
+
+		led0: user1 {
+			label = "user1";
+			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+			default-state = "on";
+			linux,default-trigger = "heartbeat";
+		};
+
+		led1: user2 {
+			label = "user2";
+			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+			default-state = "off";
+		};
+
+		led2: user3 {
+			label = "user3";
+			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+			default-state = "off";
+		};
+	};
+
+	memory@10000000 {
+		device_type = "memory";
+		reg = <0x10000000 0x40000000>;
+	};
+
+	pps {
+		compatible = "pps-gpio";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pps>;
+		gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_usb_vbus: regulator-5p0v {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+	};
+};
+
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan1>;
+	status = "okay";
+};
+
+&ecspi2 {
+	cs-gpios = <&gpio2 26 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi2>;
+	status = "okay";
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-mode = "rgmii-id";
+	status = "okay";
+};
+
+&gpmi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_gpmi_nand>;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	gpio@23 {
+		compatible = "nxp,pca9555";
+		reg = <0x23>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	eeprom@50 {
+		compatible = "atmel,24c02";
+		reg = <0x50>;
+		pagesize = <16>;
+	};
+
+	eeprom@51 {
+		compatible = "atmel,24c02";
+		reg = <0x51>;
+		pagesize = <16>;
+	};
+
+	eeprom@52 {
+		compatible = "atmel,24c02";
+		reg = <0x52>;
+		pagesize = <16>;
+	};
+
+	eeprom@53 {
+		compatible = "atmel,24c02";
+		reg = <0x53>;
+		pagesize = <16>;
+	};
+
+	rtc@68 {
+		compatible = "dallas,ds1672";
+		reg = <0x68>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+
+	accel@19 {
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_accel>;
+		compatible = "st,lis2de12";
+		reg = <0x19>;
+		st,drdy-int-pin = <1>;
+		interrupt-parent = <&gpio7>;
+		interrupts = <13 0>;
+		interrupt-names = "INT1";
+	};
+};
+
+&pcie {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pcie>;
+	reset-gpio = <&gpio1 29 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pwm1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm1>; /* MX6_DIO0 */
+	status = "disabled";
+};
+
+&pwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm2>; /* MX6_DIO1 */
+	status = "disabled";
+};
+
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>; /* MX6_DIO2 */
+	status = "disabled";
+};
+
+&pwm4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>; /* MX6_DIO3 */
+	status = "disabled";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	rts-gpios = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usb_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	disable-over-current;
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usbh1 {
+	vbus-supply = <&reg_usb_vbus>;
+	status = "okay";
+};
+
+&usdhc3 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+	vmmc-supply = <&reg_3p3v>;
+	no-1-8-v; /* firmware will remove if board revision supports */
+	status = "okay";
+};
+
+&wdog1 {
+	status = "disabled";
+};
+
+&wdog2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_accel: accelmuxgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_18__GPIO7_IO13		0x1b0b1
+		>;
+	};
+
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_RGMII_RXC__RGMII_RXC		0x1b030
+			MX6QDL_PAD_RGMII_RD0__RGMII_RD0		0x1b030
+			MX6QDL_PAD_RGMII_RD1__RGMII_RD1		0x1b030
+			MX6QDL_PAD_RGMII_RD2__RGMII_RD2		0x1b030
+			MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b030
+			MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x1b030
+			MX6QDL_PAD_RGMII_TXC__RGMII_TXC		0x1b030
+			MX6QDL_PAD_RGMII_TD0__RGMII_TD0		0x1b030
+			MX6QDL_PAD_RGMII_TD1__RGMII_TD1		0x1b030
+			MX6QDL_PAD_RGMII_TD2__RGMII_TD2		0x1b030
+			MX6QDL_PAD_RGMII_TD3__RGMII_TD3		0x1b030
+			MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL	0x1b030
+			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x1b0b0
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+		>;
+	};
+
+	pinctrl_ecspi2: escpi2grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_CS0__ECSPI2_SCLK	0x100b1
+			MX6QDL_PAD_EIM_CS1__ECSPI2_MOSI	0x100b1
+			MX6QDL_PAD_EIM_OE__ECSPI2_MISO	0x100b1
+			MX6QDL_PAD_EIM_RW__GPIO2_IO26	0x100b1
+		>;
+	};
+
+	pinctrl_flexcan1: flexcan1grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX	0x1b0b1
+			MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX	0x1b0b1
+			MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x4001b0b0
+		>;
+	};
+
+	pinctrl_gpio_leds: gpioledsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL0__GPIO4_IO06		0x1b0b0
+			MX6QDL_PAD_KEY_ROW0__GPIO4_IO07		0x1b0b0
+			MX6QDL_PAD_KEY_ROW4__GPIO4_IO15		0x1b0b0
+		>;
+	};
+
+	pinctrl_gpmi_nand: gpminandgrp {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CLE__NAND_CLE		0xb0b1
+			MX6QDL_PAD_NANDF_ALE__NAND_ALE		0xb0b1
+			MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
+			MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
+			MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
+			MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
+			MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
+			MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
+			MX6QDL_PAD_NANDF_D1__NAND_DATA01	0xb0b1
+			MX6QDL_PAD_NANDF_D2__NAND_DATA02	0xb0b1
+			MX6QDL_PAD_NANDF_D3__NAND_DATA03	0xb0b1
+			MX6QDL_PAD_NANDF_D4__NAND_DATA04	0xb0b1
+			MX6QDL_PAD_NANDF_D5__NAND_DATA05	0xb0b1
+			MX6QDL_PAD_NANDF_D6__NAND_DATA06	0xb0b1
+			MX6QDL_PAD_NANDF_D7__NAND_DATA07	0xb0b1
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D21__I2C1_SCL		0x4001b8b1
+			MX6QDL_PAD_EIM_D28__I2C1_SDA		0x4001b8b1
+			MX6QDL_PAD_GPIO_4__GPIO1_IO04		0x0001b0b0
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL3__I2C2_SCL		0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__I2C2_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_3__I2C3_SCL		0x4001b8b1
+			MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_pcie: pciegrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28	0x1b0b0
+			MX6QDL_PAD_ENET_TXD1__GPIO1_IO29	0x1b0b0
+		>;
+	};
+
+	pinctrl_pps: ppsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_5__GPIO1_IO05		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm1: pwm1grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_9__PWM1_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT2__PWM2_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT1__PWM3_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm4: pwm4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT2__PWM4_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA	0x1b0b1
+			MX6QDL_PAD_GPIO_17__GPIO7_IO12		0x4001b0b1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT3__GPIO2_IO11		0x4001b0b1
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL1__UART5_TX_DATA	0x1b0b1
+			MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__USB_OTG_ID		0x13059
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x17059
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x10059
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x17059
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17059
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17059
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17059
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x17059 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x17059
+		>;
+	};
+
+	pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170b9
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100b9
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170b9
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170b9
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170b9
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170b9
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170b9 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170b9
+		>;
+	};
+
+	pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170f9
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100f9
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170f9
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170f9
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170f9
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170f9
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170f9 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170f9
+		>;
+	};
+
+	pinctrl_wdog: wdoggrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT3__WDOG2_B		0x1b0b0
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
new file mode 100644
index 0000000..635c203
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Gateworks Corporation
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	/* these are used by bootloader for disabling nodes */
+	aliases {
+		led0 = &led0;
+		led1 = &led1;
+		nand = &gpmi;
+		usb0 = &usbh1;
+		usb1 = &usbotg;
+	};
+
+	chosen {
+		stdout-path = &uart2;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_leds>;
+
+		led0: user1 {
+			label = "user1";
+			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+			default-state = "on";
+			linux,default-trigger = "heartbeat";
+		};
+
+		led1: user2 {
+			label = "user2";
+			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+			default-state = "off";
+		};
+	};
+
+	memory@10000000 {
+		device_type = "memory";
+		reg = <0x10000000 0x20000000>;
+	};
+
+	pps {
+		compatible = "pps-gpio";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pps>;
+		gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+		status = "okay";
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_5p0v: regulator-5p0v {
+		compatible = "regulator-fixed";
+		regulator-name = "5P0V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+	};
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-mode = "rgmii-id";
+	status = "okay";
+};
+
+&gpmi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_gpmi_nand>;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	gpio@23 {
+		compatible = "nxp,pca9555";
+		reg = <0x23>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	eeprom@50 {
+		compatible = "atmel,24c02";
+		reg = <0x50>;
+		pagesize = <16>;
+	};
+
+	eeprom@51 {
+		compatible = "atmel,24c02";
+		reg = <0x51>;
+		pagesize = <16>;
+	};
+
+	eeprom@52 {
+		compatible = "atmel,24c02";
+		reg = <0x52>;
+		pagesize = <16>;
+	};
+
+	eeprom@53 {
+		compatible = "atmel,24c02";
+		reg = <0x53>;
+		pagesize = <16>;
+	};
+
+	rtc@68 {
+		compatible = "dallas,ds1672";
+		reg = <0x68>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+};
+
+&pcie {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pcie>;
+	reset-gpio = <&gpio1 0 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm2>; /* MX6_DIO1 */
+	status = "disabled";
+};
+
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>; /* MX6_DIO2 */
+	status = "disabled";
+};
+
+&pwm4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>; /* MX6_DIO3 */
+	status = "disabled";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "okay";
+};
+
+&usbotg {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	disable-over-current;
+	status = "okay";
+};
+
+&usbh1 {
+	status = "okay";
+};
+
+&wdog1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
+};
+
+&iomuxc {
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_RGMII_RXC__RGMII_RXC		0x1b030
+			MX6QDL_PAD_RGMII_RD0__RGMII_RD0		0x1b030
+			MX6QDL_PAD_RGMII_RD1__RGMII_RD1		0x1b030
+			MX6QDL_PAD_RGMII_RD2__RGMII_RD2		0x1b030
+			MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b030
+			MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x1b030
+			MX6QDL_PAD_RGMII_TXC__RGMII_TXC		0x1b030
+			MX6QDL_PAD_RGMII_TD0__RGMII_TD0		0x1b030
+			MX6QDL_PAD_RGMII_TD1__RGMII_TD1		0x1b030
+			MX6QDL_PAD_RGMII_TD2__RGMII_TD2		0x1b030
+			MX6QDL_PAD_RGMII_TD3__RGMII_TD3		0x1b030
+			MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL	0x1b030
+			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x1b0b0
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x4001b0a8
+			MX6QDL_PAD_ENET_TXD0__GPIO1_IO30	0x1b0b0
+		>;
+	};
+
+	pinctrl_gpio_leds: gpioledsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL0__GPIO4_IO06		0x1b0b0
+			MX6QDL_PAD_KEY_ROW0__GPIO4_IO07		0x1b0b0
+		>;
+	};
+
+	pinctrl_gpmi_nand: gpminandgrp {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CLE__NAND_CLE		0xb0b1
+			MX6QDL_PAD_NANDF_ALE__NAND_ALE		0xb0b1
+			MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
+			MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
+			MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
+			MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
+			MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
+			MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
+			MX6QDL_PAD_NANDF_D1__NAND_DATA01	0xb0b1
+			MX6QDL_PAD_NANDF_D2__NAND_DATA02	0xb0b1
+			MX6QDL_PAD_NANDF_D3__NAND_DATA03	0xb0b1
+			MX6QDL_PAD_NANDF_D4__NAND_DATA04	0xb0b1
+			MX6QDL_PAD_NANDF_D5__NAND_DATA05	0xb0b1
+			MX6QDL_PAD_NANDF_D6__NAND_DATA06	0xb0b1
+			MX6QDL_PAD_NANDF_D7__NAND_DATA07	0xb0b1
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D21__I2C1_SCL		0x4001b8b1
+			MX6QDL_PAD_EIM_D28__I2C1_SDA		0x4001b8b1
+			MX6QDL_PAD_GPIO_4__GPIO1_IO04		0x0001b0b0
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL3__I2C2_SCL		0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__I2C2_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_3__I2C3_SCL		0x4001b8b1
+			MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_pcie: pciegrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_0__GPIO1_IO00		0x1b0b0
+		>;
+	};
+
+	pinctrl_pps: ppsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT2__PWM2_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT1__PWM3_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm4: pwm4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__PWM4_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D24__UART3_TX_DATA	0x1b0b1
+			MX6QDL_PAD_EIM_D25__UART3_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL1__UART5_TX_DATA	0x1b0b1
+			MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__USB_OTG_ID		0x17059
+		>;
+	};
+
+	pinctrl_wdog: wdoggrp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT8__WDOG1_B		0x1b0b0
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-icore-1.5.dtsi b/arch/arm/boot/dts/imx6qdl-icore-1.5.dtsi
index d91d46b..0fd7f2e 100644
--- a/arch/arm/boot/dts/imx6qdl-icore-1.5.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore-1.5.dtsi
@@ -25,10 +25,8 @@ MX6QDL_PAD_GPIO_17__GPIO7_IO12		0x1b0b0
 &fec {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
-	phy-reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
 	clocks = <&clks IMX6QDL_CLK_ENET>,
 		 <&clks IMX6QDL_CLK_ENET>,
 		 <&clks IMX6QDL_CLK_ENET_REF>;
-	phy-mode = "rmii";
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi
index 7814f1e..756f3a9 100644
--- a/arch/arm/boot/dts/imx6qdl-icore.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
@@ -150,10 +150,23 @@ &clks {
 &fec {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
-	phy-reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
 	clocks = <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET>, <&rmii_clk>;
 	phy-mode = "rmii";
+	phy-handle = <&eth_phy>;
 	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		eth_phy: ethernet-phy@0 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <0>;
+			reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
+			reset-assert-us = <4000>;
+			reset-deassert-us = <4000>;
+		};
+	};
 };
 
 &gpmi {
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
index 6486df3..978dc1c 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
@@ -5,6 +5,7 @@
  */
 
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/dlg,da9063-regulator.h>
 
 / {
 	aliases {
@@ -100,6 +101,10 @@ da9062_rtc: rtc {
 			compatible = "dlg,da9062-rtc";
 		};
 
+		da9062_onkey: onkey {
+			compatible = "dlg,da9062-onkey";
+		};
+
 		watchdog {
 			compatible = "dlg,da9062-watchdog";
 		};
@@ -109,6 +114,7 @@ vdd_arm: buck1 {
 				regulator-name = "vdd_arm";
 				regulator-min-microvolt = <730000>;
 				regulator-max-microvolt = <1380000>;
+				regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
 				regulator-always-on;
 			};
 
@@ -116,6 +122,7 @@ vdd_soc: buck2 {
 				regulator-name = "vdd_soc";
 				regulator-min-microvolt = <730000>;
 				regulator-max-microvolt = <1380000>;
+				regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
 				regulator-always-on;
 			};
 
@@ -123,6 +130,7 @@ vdd_ddr3_1p5: buck3 {
 				regulator-name = "vdd_ddr3";
 				regulator-min-microvolt = <1500000>;
 				regulator-max-microvolt = <1500000>;
+				regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
 				regulator-always-on;
 			};
 
@@ -130,6 +138,7 @@ vdd_eth_1p2: buck4 {
 				regulator-name = "vdd_eth";
 				regulator-min-microvolt = <1200000>;
 				regulator-max-microvolt = <1200000>;
+				regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
 				regulator-always-on;
 			};
 
diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
index a2a4f33..0075637 100644
--- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
@@ -60,18 +60,6 @@ reg_5p0v_main: regulator-5p0v-main {
 		regulator-always-on;
 	};
 
-	reg_5p0v_user_usb: regulator-5p0v-user-usb {
-		compatible = "regulator-fixed";
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_reg_user_usb>;
-		vin-supply = <&reg_5p0v_main>;
-		regulator-name = "5V_USER_USB";
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-		gpio = <&gpio3 22 GPIO_ACTIVE_LOW>;
-		startup-delay-us = <1000>;
-	};
-
 	reg_3p3v_pmic: regulator-3p3v-pmic {
 		compatible = "regulator-fixed";
 		vin-supply = <&reg_12p0v>;
@@ -331,6 +319,39 @@ flash@0 {
 	};
 };
 
+&gpio3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_gpio3_hog>;
+
+	usb-emulation {
+		gpio-hog;
+		gpios = <19 GPIO_ACTIVE_HIGH>;
+		output-low;
+		line-name = "usb-emulation";
+	};
+
+	usb-mode1 {
+		gpio-hog;
+		gpios = <20 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "usb-mode1";
+	};
+
+	usb-pwr {
+		gpio-hog;
+		gpios = <22 GPIO_ACTIVE_LOW>;
+		output-high;
+		line-name = "usb-pwr-ctrl-en-n";
+	};
+
+	usb-mode2 {
+		gpio-hog;
+		gpios = <23 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "usb-mode2";
+	};
+};
+
 &i2c1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_i2c1>;
@@ -592,6 +613,16 @@ touchscreen@2a {
 		status = "disabled";
 	};
 
+	reg_5p0v_user_usb: charger@32 {
+		compatible = "microchip,ucs1002";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_ucs1002_pins>;
+		reg = <0x32>;
+		interrupts-extended = <&gpio5 2 IRQ_TYPE_EDGE_BOTH>,
+				      <&gpio3 21 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "a_det", "alert";
+	};
+
 	hpa1: amp@60 {
 		compatible = "ti,tpa6130a2";
 		pinctrl-names = "default";
@@ -629,7 +660,7 @@ &usdhc2 {
 	pinctrl-0 = <&pinctrl_usdhc2>;
 	bus-width = <4>;
 	cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
-	wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+	disable-wp;
 	vmmc-supply = <&reg_3p3v_sd>;
 	vqmmc-supply = <&reg_3p3v>;
 	no-1-8-v;
@@ -642,7 +673,7 @@ &usdhc3 {
 	pinctrl-0 = <&pinctrl_usdhc3>;
 	bus-width = <4>;
 	cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
-	wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+	disable-wp;
 	vmmc-supply = <&reg_3p3v_sd>;
 	vqmmc-supply = <&reg_3p3v>;
 	no-1-8-v;
@@ -776,6 +807,7 @@ switchphy4: switchphy@4 {
 &usbh1 {
 	vbus-supply = <&reg_5p0v_main>;
 	disable-over-current;
+	maximum-speed = "full-speed";
 	status = "okay";
 };
 
@@ -936,6 +968,15 @@ MX6QDL_PAD_ENET_REF_CLK__GPIO1_IO23	0x1b0b0
 		>;
 	};
 
+	pinctrl_gpio3_hog: gpio3hoggrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D19__GPIO3_IO19		0x1b0b0
+			MX6QDL_PAD_EIM_D20__GPIO3_IO20		0x1b0b0
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22		0x1b0b0
+			MX6QDL_PAD_EIM_D23__GPIO3_IO23		0x1b0b0
+		>;
+	};
+
 	pinctrl_i2c1: i2c1grp {
 		fsl,pins = <
 			MX6QDL_PAD_CSI0_DAT8__I2C1_SDA		0x4001b8b1
@@ -983,12 +1024,6 @@ MX6QDL_PAD_SD3_RST__GPIO7_IO08		0x858
 		>;
 	};
 
-	pinctrl_reg_user_usb: usbotggrp {
-		fsl,pins = <
-			MX6QDL_PAD_EIM_D22__GPIO3_IO22		0x40000038
-		>;
-	};
-
 	pinctrl_rmii_phy_irq: phygrp {
 		fsl,pins = <
 			MX6QDL_PAD_EIM_D30__GPIO3_IO30		0x40010000
@@ -1048,6 +1083,13 @@ MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA	0x1b0b1
 		>;
 	};
 
+	pinctrl_ucs1002_pins: ucs1002grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_A25__GPIO5_IO02  	0x1b0b0
+			MX6QDL_PAD_EIM_D21__GPIO3_IO21  	0x1b0b0
+		>;
+	};
+
 	pinctrl_usdhc2: usdhc2grp {
 		fsl,pins = <
 			MX6QDL_PAD_SD2_CMD__SD2_CMD		0x10059
@@ -1056,7 +1098,6 @@ MX6QDL_PAD_SD2_DAT0__SD2_DATA0		0x17059
 			MX6QDL_PAD_SD2_DAT1__SD2_DATA1		0x17059
 			MX6QDL_PAD_SD2_DAT2__SD2_DATA2		0x17059
 			MX6QDL_PAD_SD2_DAT3__SD2_DATA3		0x17059
-			MX6QDL_PAD_NANDF_D3__GPIO2_IO03		0x40010040
 			MX6QDL_PAD_NANDF_D2__GPIO2_IO02		0x40010040
 		>;
 	};
@@ -1069,7 +1110,6 @@ MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x17059
 			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17059
 			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17059
 			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17059
-			MX6QDL_PAD_NANDF_D1__GPIO2_IO01		0x40010040
 			MX6QDL_PAD_NANDF_D0__GPIO2_IO00		0x40010040
 
 		>;
diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine3.dts b/arch/arm/boot/dts/imx6sl-tolino-shine3.dts
new file mode 100644
index 0000000..27143ea
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sl-tolino-shine3.dts
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Device tree for the Tolino Shine 3 ebook reader
+ *
+ * Name on mainboard is: 37NB-E60K00+4A4
+ * Serials start with: E60K02 (a number also seen in
+ * vendor kernel sources)
+ *
+ * This mainboard seems to be equipped with different SoCs.
+ * In the Toline Shine 3 ebook reader it is a i.MX6SL
+ *
+ * Copyright 2019 Andreas Kemnade
+ * based on works
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx6sl.dtsi"
+#include "e60k02.dtsi"
+
+/ {
+	model = "Tolino Shine 3";
+	compatible = "kobo,tolino-shine3", "fsl,imx6sl";
+};
+
+&gpio_keys {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_gpio_keys>;
+};
+
+&i2c1 {
+	pinctrl-names = "default","sleep";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	pinctrl-1 = <&pinctrl_i2c1_sleep>;
+};
+
+&i2c2 {
+	pinctrl-names = "default","sleep";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	pinctrl-1 = <&pinctrl_i2c2_sleep>;
+};
+
+&i2c3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+};
+
+&iomuxc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hog>;
+
+	pinctrl_gpio_keys: gpio-keysgrp {
+		fsl,pins = <
+			MX6SL_PAD_SD1_DAT1__GPIO5_IO08	0x17059	/* PWR_SW */
+			MX6SL_PAD_SD1_DAT4__GPIO5_IO12	0x17059	/* HALL_EN */
+		>;
+	};
+
+	pinctrl_hog: hoggrp {
+		fsl,pins = <
+			MX6SL_PAD_LCD_DAT0__GPIO2_IO20	0x79
+			MX6SL_PAD_LCD_DAT1__GPIO2_IO21	0x79
+			MX6SL_PAD_LCD_DAT2__GPIO2_IO22	0x79
+			MX6SL_PAD_LCD_DAT3__GPIO2_IO23	0x79
+			MX6SL_PAD_LCD_DAT4__GPIO2_IO24	0x79
+			MX6SL_PAD_LCD_DAT5__GPIO2_IO25	0x79
+			MX6SL_PAD_LCD_DAT6__GPIO2_IO26	0x79
+			MX6SL_PAD_LCD_DAT7__GPIO2_IO27	0x79
+			MX6SL_PAD_LCD_DAT8__GPIO2_IO28	0x79
+			MX6SL_PAD_LCD_DAT9__GPIO2_IO29	0x79
+			MX6SL_PAD_LCD_DAT10__GPIO2_IO30	0x79
+			MX6SL_PAD_LCD_DAT11__GPIO2_IO31	0x79
+			MX6SL_PAD_LCD_DAT12__GPIO3_IO00	0x79
+			MX6SL_PAD_LCD_DAT13__GPIO3_IO01	0x79
+			MX6SL_PAD_LCD_DAT14__GPIO3_IO02	0x79
+			MX6SL_PAD_LCD_DAT15__GPIO3_IO03	0x79
+			MX6SL_PAD_LCD_DAT16__GPIO3_IO04	0x79
+			MX6SL_PAD_LCD_DAT17__GPIO3_IO05	0x79
+			MX6SL_PAD_LCD_DAT18__GPIO3_IO06	0x79
+			MX6SL_PAD_LCD_DAT19__GPIO3_IO07	0x79
+			MX6SL_PAD_LCD_DAT20__GPIO3_IO08	0x79
+			MX6SL_PAD_LCD_DAT21__GPIO3_IO09	0x79
+			MX6SL_PAD_LCD_DAT22__GPIO3_IO10	0x79
+			MX6SL_PAD_LCD_DAT23__GPIO3_IO11	0x79
+			MX6SL_PAD_LCD_CLK__GPIO2_IO15		0x79
+			MX6SL_PAD_LCD_ENABLE__GPIO2_IO16	0x79
+			MX6SL_PAD_LCD_HSYNC__GPIO2_IO17	0x79
+			MX6SL_PAD_LCD_VSYNC__GPIO2_IO18	0x79
+			MX6SL_PAD_LCD_RESET__GPIO2_IO19	0x79
+			MX6SL_PAD_KEY_COL3__GPIO3_IO30		0x79
+			MX6SL_PAD_KEY_ROW7__GPIO4_IO07		0x79
+			MX6SL_PAD_ECSPI2_MOSI__GPIO4_IO13	0x79
+			MX6SL_PAD_KEY_COL5__GPIO4_IO02		0x79
+			MX6SL_PAD_KEY_ROW6__GPIO4_IO05		0x79
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6SL_PAD_I2C1_SCL__I2C1_SCL	 0x4001f8b1
+			MX6SL_PAD_I2C1_SDA__I2C1_SDA	 0x4001f8b1
+		>;
+	};
+
+	pinctrl_i2c1_sleep: i2c1grp-sleep {
+		fsl,pins = <
+			MX6SL_PAD_I2C1_SCL__I2C1_SCL	 0x400108b1
+			MX6SL_PAD_I2C1_SDA__I2C1_SDA	 0x400108b1
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6SL_PAD_I2C2_SCL__I2C2_SCL	 0x4001f8b1
+			MX6SL_PAD_I2C2_SDA__I2C2_SDA	 0x4001f8b1
+		>;
+	};
+
+	pinctrl_i2c2_sleep: i2c2grp-sleep {
+		fsl,pins = <
+			MX6SL_PAD_I2C2_SCL__I2C2_SCL	 0x400108b1
+			MX6SL_PAD_I2C2_SDA__I2C2_SDA	 0x400108b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6SL_PAD_REF_CLK_24M__I2C3_SCL  0x4001f8b1
+			MX6SL_PAD_REF_CLK_32K__I2C3_SDA  0x4001f8b1
+		>;
+	};
+
+	pinctrl_led: ledgrp {
+		fsl,pins = <
+			MX6SL_PAD_SD1_DAT6__GPIO5_IO07 0x17059
+		>;
+	};
+
+	pinctrl_lm3630a_bl_gpio: lm3630a-bl-gpiogrp {
+		fsl,pins = <
+			MX6SL_PAD_EPDC_PWRCTRL3__GPIO2_IO10		0x10059 /* HWEN */
+		>;
+	};
+
+	pinctrl_ricoh_gpio: ricoh_gpiogrp {
+		fsl,pins = <
+			MX6SL_PAD_SD1_CLK__GPIO5_IO15                  0x1b8b1 /* ricoh619 chg */
+			MX6SL_PAD_SD1_DAT0__GPIO5_IO11        0x1b8b1 /* ricoh619 irq */
+			MX6SL_PAD_KEY_COL2__GPIO3_IO28                         0x1b8b1 /* ricoh619 bat_low_int */
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6SL_PAD_UART1_TXD__UART1_TX_DATA 0x1b0b1
+			MX6SL_PAD_UART1_RXD__UART1_TX_DATA 0x1b0b1
+		>;
+	};
+
+	pinctrl_usbotg1: usbotg1grp {
+		fsl,pins = <
+			MX6SL_PAD_EPDC_PWRCOM__USB_OTG1_ID 0x17059
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6SL_PAD_SD2_CMD__SD2_CMD		0x17059
+			MX6SL_PAD_SD2_CLK__SD2_CLK		0x13059
+			MX6SL_PAD_SD2_DAT0__SD2_DATA0		0x17059
+			MX6SL_PAD_SD2_DAT1__SD2_DATA1		0x17059
+			MX6SL_PAD_SD2_DAT2__SD2_DATA2		0x17059
+			MX6SL_PAD_SD2_DAT3__SD2_DATA3		0x17059
+		>;
+	};
+
+	pinctrl_usdhc2_100mhz: usdhc2grp-100mhz {
+		fsl,pins = <
+			MX6SL_PAD_SD2_CMD__SD2_CMD		0x170b9
+			MX6SL_PAD_SD2_CLK__SD2_CLK		0x130b9
+			MX6SL_PAD_SD2_DAT0__SD2_DATA0		0x170b9
+			MX6SL_PAD_SD2_DAT1__SD2_DATA1		0x170b9
+			MX6SL_PAD_SD2_DAT2__SD2_DATA2		0x170b9
+			MX6SL_PAD_SD2_DAT3__SD2_DATA3		0x170b9
+		>;
+	};
+
+	pinctrl_usdhc2_200mhz: usdhc2grp-200mhz {
+		fsl,pins = <
+			MX6SL_PAD_SD2_CMD__SD2_CMD		0x170f9
+			MX6SL_PAD_SD2_CLK__SD2_CLK		0x130f9
+			MX6SL_PAD_SD2_DAT0__SD2_DATA0		0x170f9
+			MX6SL_PAD_SD2_DAT1__SD2_DATA1		0x170f9
+			MX6SL_PAD_SD2_DAT2__SD2_DATA2		0x170f9
+			MX6SL_PAD_SD2_DAT3__SD2_DATA3		0x170f9
+		>;
+	};
+
+	pinctrl_usdhc2_sleep: usdhc2grp-sleep {
+		fsl,pins = <
+			MX6SL_PAD_SD2_CMD__GPIO5_IO04		0x100f9
+			MX6SL_PAD_SD2_CLK__GPIO5_IO05		0x100f9
+			MX6SL_PAD_SD2_DAT0__GPIO5_IO01		0x100f9
+			MX6SL_PAD_SD2_DAT1__GPIO4_IO30		0x100f9
+			MX6SL_PAD_SD2_DAT2__GPIO5_IO03		0x100f9
+			MX6SL_PAD_SD2_DAT3__GPIO4_IO28		0x100f9
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX6SL_PAD_SD3_CMD__SD3_CMD	0x11059
+			MX6SL_PAD_SD3_CLK__SD3_CLK	0x11059
+			MX6SL_PAD_SD3_DAT0__SD3_DATA0	0x11059
+			MX6SL_PAD_SD3_DAT1__SD3_DATA1	0x11059
+			MX6SL_PAD_SD3_DAT2__SD3_DATA2	0x11059
+			MX6SL_PAD_SD3_DAT3__SD3_DATA3	0x11059
+		>;
+	};
+
+	pinctrl_usdhc3_100mhz: usdhc3grp-100mhz {
+		fsl,pins = <
+			MX6SL_PAD_SD3_CMD__SD3_CMD	0x170b9
+			MX6SL_PAD_SD3_CLK__SD3_CLK	0x170b9
+			MX6SL_PAD_SD3_DAT0__SD3_DATA0	0x170b9
+			MX6SL_PAD_SD3_DAT1__SD3_DATA1	0x170b9
+			MX6SL_PAD_SD3_DAT2__SD3_DATA2	0x170b9
+			MX6SL_PAD_SD3_DAT3__SD3_DATA3	0x170b9
+		>;
+	};
+
+	pinctrl_usdhc3_200mhz: usdhc3grp-200mhz {
+		fsl,pins = <
+			MX6SL_PAD_SD3_CMD__SD3_CMD	0x170f9
+			MX6SL_PAD_SD3_CLK__SD3_CLK	0x170f9
+			MX6SL_PAD_SD3_DAT0__SD3_DATA0	0x170f9
+			MX6SL_PAD_SD3_DAT1__SD3_DATA1	0x170f9
+			MX6SL_PAD_SD3_DAT2__SD3_DATA2	0x170f9
+			MX6SL_PAD_SD3_DAT3__SD3_DATA3	0x170f9
+		>;
+	};
+
+	pinctrl_usdhc3_sleep: usdhc3grp-sleep {
+		fsl,pins = <
+			MX6SL_PAD_SD3_CMD__GPIO5_IO21	0x100c1
+			MX6SL_PAD_SD3_CLK__GPIO5_IO18	0x100c1
+			MX6SL_PAD_SD3_DAT0__GPIO5_IO19	0x100c1
+			MX6SL_PAD_SD3_DAT1__GPIO5_IO20	0x100c1
+			MX6SL_PAD_SD3_DAT2__GPIO5_IO16	0x100c1
+			MX6SL_PAD_SD3_DAT3__GPIO5_IO17	0x100c1
+		>;
+	};
+
+	pinctrl_wifi_power: wifi-powergrp {
+		fsl,pins = <
+			MX6SL_PAD_SD2_DAT6__GPIO4_IO29	0x10059	/* WIFI_3V3_ON */
+		>;
+	};
+
+	pinctrl_wifi_reset: wifi-resetgrp {
+		fsl,pins = <
+			MX6SL_PAD_SD2_DAT7__GPIO5_IO00	0x10059	/* WIFI_RST */
+		>;
+	};
+};
+
+&leds {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_led>;
+};
+
+&lm3630a {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_lm3630a_bl_gpio>;
+};
+
+&reg_wifi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wifi_power>;
+};
+
+&reg_vdd1p1 {
+	vin-supply = <&dcdc2_reg>;
+};
+
+&reg_vdd2p5 {
+	vin-supply = <&dcdc2_reg>;
+};
+
+&ricoh619 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ricoh_gpio>;
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+};
+
+&usdhc2 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+	pinctrl-3 = <&pinctrl_usdhc2_sleep>;
+};
+
+&usdhc3 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+	pinctrl-3 = <&pinctrl_usdhc3_sleep>;
+};
+
+&wifi_pwrseq {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wifi_reset>;
+};
diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
index 85aa8bb..a1bc5bb3 100644
--- a/arch/arm/boot/dts/imx6sll.dtsi
+++ b/arch/arm/boot/dts/imx6sll.dtsi
@@ -632,6 +632,15 @@ sdma: dma-controller@20ec000 {
 				fsl,sdma-ram-script-name = "imx/sdma/sdma-imx6q.bin";
 			};
 
+			pxp: pxp@20f0000 {
+				compatible = "fsl,imx6sll-pxp", "fsl,imx6ull-pxp";
+				reg = <0x20f0000 0x4000>;
+				interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6SLL_CLK_PXP>;
+				clock-names = "axi";
+			};
+
 			lcdif: lcd-controller@20f8000 {
 				compatible = "fsl,imx6sll-lcdif", "fsl,imx28-lcdif";
 				reg = <0x020f8000 0x4000>;
diff --git a/arch/arm/boot/dts/imx6sx-sdb-reva.dts b/arch/arm/boot/dts/imx6sx-sdb-reva.dts
index 91a7548..dce5dcf 100644
--- a/arch/arm/boot/dts/imx6sx-sdb-reva.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb-reva.dts
@@ -6,6 +6,7 @@
 
 / {
 	model = "Freescale i.MX6 SoloX SDB RevA Board";
+	compatible = "fsl,imx6sx-sdb-reva", "fsl,imx6sx";
 };
 
 &i2c1 {
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
index 2121445..265bf41 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
@@ -327,6 +327,7 @@ &usdhc2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_usdhc2>;
 	no-1-8-v;
+	broken-cd;
 	keep-power-in-suspend;
 	wakeup-source;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi
index 6d850d9..9145c53 100644
--- a/arch/arm/boot/dts/imx6ull-colibri.dtsi
+++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi
@@ -220,7 +220,7 @@ &wdog1 {
 &iomuxc {
 	pinctrl_can_int: canint-grp {
 		fsl,pins = <
-			MX6UL_PAD_ENET1_TX_DATA1__GPIO2_IO04	0X14 /* SODIMM 73 */
+			MX6UL_PAD_ENET1_TX_DATA1__GPIO2_IO04	0x13010	/* SODIMM 73 */
 		>;
 	};
 
@@ -256,15 +256,15 @@ MX6UL_PAD_ENET2_TX_EN__GPIO2_IO13	0x0
 
 	pinctrl_ecspi1_cs: ecspi1-cs-grp {
 		fsl,pins = <
-			MX6UL_PAD_LCD_DATA21__GPIO3_IO26	0x000a0
+			MX6UL_PAD_LCD_DATA21__GPIO3_IO26	0x70a0	/* SODIMM 86 */
 		>;
 	};
 
 	pinctrl_ecspi1: ecspi1-grp {
 		fsl,pins = <
-			MX6UL_PAD_LCD_DATA20__ECSPI1_SCLK	0x000a0
-			MX6UL_PAD_LCD_DATA22__ECSPI1_MOSI	0x000a0
-			MX6UL_PAD_LCD_DATA23__ECSPI1_MISO	0x100a0
+			MX6UL_PAD_LCD_DATA20__ECSPI1_SCLK	0x000a0	/* SODIMM 88 */
+			MX6UL_PAD_LCD_DATA22__ECSPI1_MOSI	0x000a0 /* SODIMM 92 */
+			MX6UL_PAD_LCD_DATA23__ECSPI1_MISO	0x100a0 /* SODIMM 90 */
 		>;
 	};
 
@@ -284,68 +284,68 @@ MX6UL_PAD_ENET1_RX_EN__FLEXCAN2_TX	0x1b020
 
 	pinctrl_gpio_bl_on: gpio-bl-on-grp {
 		fsl,pins = <
-			MX6UL_PAD_JTAG_TMS__GPIO1_IO11		0x000a0
+			MX6UL_PAD_JTAG_TMS__GPIO1_IO11		0x30a0	/* SODIMM 71 */
 		>;
 	};
 
 	pinctrl_gpio1: gpio1-grp {
 		fsl,pins = <
-			MX6UL_PAD_UART3_RX_DATA__GPIO1_IO25	0X14 /* SODIMM 77 */
-			MX6UL_PAD_JTAG_TCK__GPIO1_IO14		0x14 /* SODIMM 99 */
-			MX6UL_PAD_NAND_CE1_B__GPIO4_IO14	0x14 /* SODIMM 133 */
-			MX6UL_PAD_UART3_TX_DATA__GPIO1_IO24	0x14 /* SODIMM 135 */
-			MX6UL_PAD_UART3_CTS_B__GPIO1_IO26	0x14 /* SODIMM 100 */
-			MX6UL_PAD_JTAG_TRST_B__GPIO1_IO15	0x14 /* SODIMM 102 */
-			MX6UL_PAD_ENET1_RX_ER__GPIO2_IO07	0x14 /* SODIMM 104 */
-			MX6UL_PAD_UART3_RTS_B__GPIO1_IO27	0x14 /* SODIMM 186 */
+			MX6UL_PAD_UART3_RX_DATA__GPIO1_IO25	0x10b0 /* SODIMM 77 */
+			MX6UL_PAD_JTAG_TCK__GPIO1_IO14		0x70a0 /* SODIMM 99 */
+			MX6UL_PAD_NAND_CE1_B__GPIO4_IO14	0x10b0 /* SODIMM 133 */
+			MX6UL_PAD_UART3_TX_DATA__GPIO1_IO24	0x10b0 /* SODIMM 135 */
+			MX6UL_PAD_UART3_CTS_B__GPIO1_IO26	0x10b0 /* SODIMM 100 */
+			MX6UL_PAD_JTAG_TRST_B__GPIO1_IO15	0x70a0 /* SODIMM 102 */
+			MX6UL_PAD_ENET1_RX_ER__GPIO2_IO07	0x10b0 /* SODIMM 104 */
+			MX6UL_PAD_UART3_RTS_B__GPIO1_IO27	0x10b0 /* SODIMM 186 */
 		>;
 	};
 
 	pinctrl_gpio2: gpio2-grp { /* Camera */
 		fsl,pins = <
-			MX6UL_PAD_CSI_DATA04__GPIO4_IO25	0x74 /* SODIMM 69 */
-			MX6UL_PAD_CSI_MCLK__GPIO4_IO17		0x14 /* SODIMM 75 */
-			MX6UL_PAD_CSI_DATA06__GPIO4_IO27	0x14 /* SODIMM 85 */
-			MX6UL_PAD_CSI_PIXCLK__GPIO4_IO18	0x14 /* SODIMM 96 */
-			MX6UL_PAD_CSI_DATA05__GPIO4_IO26	0x14 /* SODIMM 98 */
+			MX6UL_PAD_CSI_DATA04__GPIO4_IO25	0x10b0 /* SODIMM 69 */
+			MX6UL_PAD_CSI_MCLK__GPIO4_IO17		0x10b0 /* SODIMM 75 */
+			MX6UL_PAD_CSI_DATA06__GPIO4_IO27	0x10b0 /* SODIMM 85 */
+			MX6UL_PAD_CSI_PIXCLK__GPIO4_IO18	0x10b0 /* SODIMM 96 */
+			MX6UL_PAD_CSI_DATA05__GPIO4_IO26	0x10b0 /* SODIMM 98 */
 		>;
 	};
 
 	pinctrl_gpio3: gpio3-grp { /* CAN2 */
 		fsl,pins = <
-			MX6UL_PAD_ENET1_RX_EN__GPIO2_IO02	0x14 /* SODIMM 178 */
-			MX6UL_PAD_ENET1_TX_DATA0__GPIO2_IO03	0x14 /* SODIMM 188 */
+			MX6UL_PAD_ENET1_RX_EN__GPIO2_IO02	0x10b0 /* SODIMM 178 */
+			MX6UL_PAD_ENET1_TX_DATA0__GPIO2_IO03	0x10b0 /* SODIMM 188 */
 		>;
 	};
 
 	pinctrl_gpio4: gpio4-grp {
 		fsl,pins = <
-			MX6UL_PAD_CSI_DATA07__GPIO4_IO28	0x74 /* SODIMM 65 */
+			MX6UL_PAD_CSI_DATA07__GPIO4_IO28	0x10b0 /* SODIMM 65 */
 		>;
 	};
 
 	pinctrl_gpio5: gpio5-grp { /* ATMEL MXT TOUCH */
 		fsl,pins = <
-			MX6UL_PAD_JTAG_MOD__GPIO1_IO10		0x74 /* SODIMM 106 */
+			MX6UL_PAD_JTAG_MOD__GPIO1_IO10		0xb0a0 /* SODIMM 106 */
 		>;
 	};
 
 	pinctrl_gpio6: gpio6-grp { /* Wifi pins */
 		fsl,pins = <
-			MX6UL_PAD_GPIO1_IO03__GPIO1_IO03	0x14 /* SODIMM 89 */
-			MX6UL_PAD_CSI_DATA02__GPIO4_IO23	0x14 /* SODIMM 79 */
-			MX6UL_PAD_CSI_VSYNC__GPIO4_IO19		0x14 /* SODIMM 81 */
-			MX6UL_PAD_CSI_DATA03__GPIO4_IO24	0x14 /* SODIMM 97 */
-			MX6UL_PAD_CSI_DATA00__GPIO4_IO21	0x14 /* SODIMM 101 */
-			MX6UL_PAD_CSI_DATA01__GPIO4_IO22	0x14 /* SODIMM 103 */
-			MX6UL_PAD_CSI_HSYNC__GPIO4_IO20		0x14 /* SODIMM 94 */
+			MX6UL_PAD_GPIO1_IO03__GPIO1_IO03	0x10b0 /* SODIMM 89 */
+			MX6UL_PAD_CSI_DATA02__GPIO4_IO23	0x10b0 /* SODIMM 79 */
+			MX6UL_PAD_CSI_VSYNC__GPIO4_IO19		0x10b0 /* SODIMM 81 */
+			MX6UL_PAD_CSI_DATA03__GPIO4_IO24	0x10b0 /* SODIMM 97 */
+			MX6UL_PAD_CSI_DATA00__GPIO4_IO21	0x10b0 /* SODIMM 101 */
+			MX6UL_PAD_CSI_DATA01__GPIO4_IO22	0x10b0 /* SODIMM 103 */
+			MX6UL_PAD_CSI_HSYNC__GPIO4_IO20		0x10b0 /* SODIMM 94 */
 		>;
 	};
 
 	pinctrl_gpio7: gpio7-grp { /* CAN1 */
 		fsl,pins = <
-			MX6UL_PAD_ENET1_RX_DATA0__GPIO2_IO00	0x74 /* SODIMM 55 */
-			MX6UL_PAD_ENET1_RX_DATA1__GPIO2_IO01	0x74 /* SODIMM 63 */
+			MX6UL_PAD_ENET1_RX_DATA0__GPIO2_IO00	0xb0b0/* SODIMM 55 */
+			MX6UL_PAD_ENET1_RX_DATA1__GPIO2_IO01	0xb0b0 /* SODIMM 63 */
 		>;
 	};
 
@@ -370,15 +370,15 @@ MX6UL_PAD_NAND_READY_B__RAWNAND_READY_B	0x100a9
 
 	pinctrl_i2c1: i2c1-grp {
 		fsl,pins = <
-			MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0
-			MX6UL_PAD_UART4_RX_DATA__I2C1_SDA 0x4001b8b0
+			MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0	/* SODIMM 196 */
+			MX6UL_PAD_UART4_RX_DATA__I2C1_SDA 0x4001b8b0	/* SODIMM 194 */
 		>;
 	};
 
 	pinctrl_i2c1_gpio: i2c1-gpio-grp {
 		fsl,pins = <
-			MX6UL_PAD_UART4_TX_DATA__GPIO1_IO28 0x4001b8b0
-			MX6UL_PAD_UART4_RX_DATA__GPIO1_IO29 0x4001b8b0
+			MX6UL_PAD_UART4_TX_DATA__GPIO1_IO28 0x4001b8b0	/* SODIMM 196 */
+			MX6UL_PAD_UART4_RX_DATA__GPIO1_IO29 0x4001b8b0	/* SODIMM 194 */
 		>;
 	};
 
@@ -398,107 +398,107 @@ MX6UL_PAD_UART5_RX_DATA__GPIO1_IO31 0x4001b8b0
 
 	pinctrl_lcdif_dat: lcdif-dat-grp {
 		fsl,pins = <
-			MX6UL_PAD_LCD_DATA00__LCDIF_DATA00  0x00079
-			MX6UL_PAD_LCD_DATA01__LCDIF_DATA01  0x00079
-			MX6UL_PAD_LCD_DATA02__LCDIF_DATA02  0x00079
-			MX6UL_PAD_LCD_DATA03__LCDIF_DATA03  0x00079
-			MX6UL_PAD_LCD_DATA04__LCDIF_DATA04  0x00079
-			MX6UL_PAD_LCD_DATA05__LCDIF_DATA05  0x00079
-			MX6UL_PAD_LCD_DATA06__LCDIF_DATA06  0x00079
-			MX6UL_PAD_LCD_DATA07__LCDIF_DATA07  0x00079
-			MX6UL_PAD_LCD_DATA08__LCDIF_DATA08  0x00079
-			MX6UL_PAD_LCD_DATA09__LCDIF_DATA09  0x00079
-			MX6UL_PAD_LCD_DATA10__LCDIF_DATA10  0x00079
-			MX6UL_PAD_LCD_DATA11__LCDIF_DATA11  0x00079
-			MX6UL_PAD_LCD_DATA12__LCDIF_DATA12  0x00079
-			MX6UL_PAD_LCD_DATA13__LCDIF_DATA13  0x00079
-			MX6UL_PAD_LCD_DATA14__LCDIF_DATA14  0x00079
-			MX6UL_PAD_LCD_DATA15__LCDIF_DATA15  0x00079
-			MX6UL_PAD_LCD_DATA16__LCDIF_DATA16  0x00079
-			MX6UL_PAD_LCD_DATA17__LCDIF_DATA17  0x00079
+			MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x00079	/* SODIMM 76 */
+			MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x00079	/* SODIMM 70 */
+			MX6UL_PAD_LCD_DATA02__LCDIF_DATA02 0x00079	/* SODIMM 60 */
+			MX6UL_PAD_LCD_DATA03__LCDIF_DATA03 0x00079	/* SODIMM 58 */
+			MX6UL_PAD_LCD_DATA04__LCDIF_DATA04 0x00079	/* SODIMM 78 */
+			MX6UL_PAD_LCD_DATA05__LCDIF_DATA05 0x00079	/* SODIMM 72 */
+			MX6UL_PAD_LCD_DATA06__LCDIF_DATA06 0x00079	/* SODIMM 80 */
+			MX6UL_PAD_LCD_DATA07__LCDIF_DATA07 0x00079	/* SODIMM 46 */
+			MX6UL_PAD_LCD_DATA08__LCDIF_DATA08 0x00079	/* SODIMM 62 */
+			MX6UL_PAD_LCD_DATA09__LCDIF_DATA09 0x00079	/* SODIMM 48 */
+			MX6UL_PAD_LCD_DATA10__LCDIF_DATA10 0x00079	/* SODIMM 74 */
+			MX6UL_PAD_LCD_DATA11__LCDIF_DATA11 0x00079	/* SODIMM 50 */
+			MX6UL_PAD_LCD_DATA12__LCDIF_DATA12 0x00079	/* SODIMM 52 */
+			MX6UL_PAD_LCD_DATA13__LCDIF_DATA13 0x00079	/* SODIMM 54 */
+			MX6UL_PAD_LCD_DATA14__LCDIF_DATA14 0x00079	/* SODIMM 66 */
+			MX6UL_PAD_LCD_DATA15__LCDIF_DATA15 0x00079	/* SODIMM 64 */
+			MX6UL_PAD_LCD_DATA16__LCDIF_DATA16 0x00079	/* SODIMM 57 */
+			MX6UL_PAD_LCD_DATA17__LCDIF_DATA17 0x00079	/* SODIMM 61 */
 		>;
 	};
 
 	pinctrl_lcdif_ctrl: lcdif-ctrl-grp {
 		fsl,pins = <
-			MX6UL_PAD_LCD_CLK__LCDIF_CLK	    0x00079
-			MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE  0x00079
-			MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC    0x00079
-			MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC    0x00079
+			MX6UL_PAD_LCD_CLK__LCDIF_CLK	    0x00079	/* SODIMM 56 */
+			MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE  0x00079	/* SODIMM 44 */
+			MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC    0x00079	/* SODIMM 68 */
+			MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC    0x00079	/* SODIMM 82 */
 		>;
 	};
 
 	pinctrl_pwm4: pwm4-grp {
 		fsl,pins = <
-			MX6UL_PAD_NAND_WP_B__PWM4_OUT	0x00079
+			MX6UL_PAD_NAND_WP_B__PWM4_OUT	0x00079		/* SODIMM 59 */
 		>;
 	};
 
 	pinctrl_pwm5: pwm5-grp {
 		fsl,pins = <
-			MX6UL_PAD_NAND_DQS__PWM5_OUT	0x00079
+			MX6UL_PAD_NAND_DQS__PWM5_OUT	0x00079		/* SODIMM 28 */
 		>;
 	};
 
 	pinctrl_pwm6: pwm6-grp {
 		fsl,pins = <
-			MX6UL_PAD_ENET1_TX_EN__PWM6_OUT	0x00079
+			MX6UL_PAD_ENET1_TX_EN__PWM6_OUT	0x00079		/* SODIMM 30 */
 		>;
 	};
 
 	pinctrl_pwm7: pwm7-grp {
 		fsl,pins = <
-			MX6UL_PAD_ENET1_TX_CLK__PWM7_OUT	0x00079
+			MX6UL_PAD_ENET1_TX_CLK__PWM7_OUT	0x00079	/* SODIMM 67 */
 		>;
 	};
 
 	pinctrl_uart1: uart1-grp {
 		fsl,pins = <
-			MX6UL_PAD_UART1_TX_DATA__UART1_DTE_RX	0x1b0b1
-			MX6UL_PAD_UART1_RX_DATA__UART1_DTE_TX	0x1b0b1
-			MX6UL_PAD_UART1_RTS_B__UART1_DTE_CTS	0x1b0b1
-			MX6UL_PAD_UART1_CTS_B__UART1_DTE_RTS	0x1b0b1
+			MX6UL_PAD_UART1_TX_DATA__UART1_DTE_RX	0x1b0b1	/* SODIMM 33 */
+			MX6UL_PAD_UART1_RX_DATA__UART1_DTE_TX	0x1b0b1	/* SODIMM 35 */
+			MX6UL_PAD_UART1_RTS_B__UART1_DTE_CTS	0x1b0b1	/* SODIMM 27 */
+			MX6UL_PAD_UART1_CTS_B__UART1_DTE_RTS	0x1b0b1	/* SODIMM 25 */
 		>;
 	};
 
 	pinctrl_uart1_ctrl1: uart1-ctrl1-grp { /* Additional DTR, DCD */
 		fsl,pins = <
-			MX6UL_PAD_JTAG_TDI__GPIO1_IO13		0x1b0b1 /* DCD */
-			MX6UL_PAD_LCD_DATA18__GPIO3_IO23	0x1b0b1 /* DSR */
-			MX6UL_PAD_JTAG_TDO__GPIO1_IO12		0x1b0b1 /* DTR */
-			MX6UL_PAD_LCD_DATA19__GPIO3_IO24        0x1b0b1 /* RI */
+			MX6UL_PAD_JTAG_TDI__GPIO1_IO13		0x70a0 /* SODIMM 31 */
+			MX6UL_PAD_LCD_DATA18__GPIO3_IO23	0x10b0 /* SODIMM 29 */
+			MX6UL_PAD_JTAG_TDO__GPIO1_IO12		0x90b1 /* SODIMM 23 */
+			MX6UL_PAD_LCD_DATA19__GPIO3_IO24        0x10b0 /* SODIMM 37 */
 		>;
 	};
 
 	pinctrl_uart2: uart2-grp {
 		fsl,pins = <
-			MX6UL_PAD_UART2_TX_DATA__UART2_DTE_RX	0x1b0b1
-			MX6UL_PAD_UART2_RX_DATA__UART2_DTE_TX	0x1b0b1
-			MX6UL_PAD_UART2_CTS_B__UART2_DTE_RTS	0x1b0b1
-			MX6UL_PAD_UART2_RTS_B__UART2_DTE_CTS	0x1b0b1
+			MX6UL_PAD_UART2_TX_DATA__UART2_DTE_RX	0x1b0b1 /* SODIMM 36 */
+			MX6UL_PAD_UART2_RX_DATA__UART2_DTE_TX	0x1b0b1 /* SODIMM 38 */
+			MX6UL_PAD_UART2_CTS_B__UART2_DTE_RTS	0x1b0b1 /* SODIMM 32 */
+			MX6UL_PAD_UART2_RTS_B__UART2_DTE_CTS	0x1b0b1 /* SODIMM 34 */
 		>;
 	};
 	pinctrl_uart5: uart5-grp {
 		fsl,pins = <
-			MX6UL_PAD_GPIO1_IO04__UART5_DTE_RX	0x1b0b1
-			MX6UL_PAD_GPIO1_IO05__UART5_DTE_TX	0x1b0b1
+			MX6UL_PAD_GPIO1_IO04__UART5_DTE_RX	0x1b0b1 /* SODIMM 19 */
+			MX6UL_PAD_GPIO1_IO05__UART5_DTE_TX	0x1b0b1 /* SODIMM 21 */
 		>;
 	};
 
 	pinctrl_usbh_reg: gpio-usbh-reg {
 		fsl,pins = <
-			MX6UL_PAD_GPIO1_IO02__GPIO1_IO02	0x1b0b1 /* SODIMM 129 USBH PEN */
+			MX6UL_PAD_GPIO1_IO02__GPIO1_IO02	0x10b0 /* SODIMM 129 */
 		>;
 	};
 
 	pinctrl_usdhc1: usdhc1-grp {
 		fsl,pins = <
-			MX6UL_PAD_SD1_CLK__USDHC1_CLK		0x17059
-			MX6UL_PAD_SD1_CMD__USDHC1_CMD		0x10059
-			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0	0x17059
-			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1	0x17059
-			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2	0x17059
-			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3	0x17059
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK		0x17059 /* SODIMM 47 */
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD		0x10059 /* SODIMM 190 */
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0	0x17059 /* SODIMM 192 */
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1	0x17059 /* SODIMM 49 */
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2	0x17059 /* SODIMM 51 */
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3	0x17059 /* SODIMM 53 */
 		>;
 	};
 
@@ -533,7 +533,7 @@ MX6UL_PAD_CSI_DATA03__USDHC2_DATA3	0x17059
 			MX6UL_PAD_CSI_HSYNC__USDHC2_CMD		0x17059
 			MX6UL_PAD_CSI_VSYNC__USDHC2_CLK		0x17059
 
-			MX6UL_PAD_GPIO1_IO03__OSC32K_32K_OUT	0x14
+			MX6UL_PAD_GPIO1_IO03__OSC32K_32K_OUT	0x10
 		>;
 	};
 
@@ -547,23 +547,23 @@ MX6UL_PAD_LCD_RESET__WDOG1_WDOG_ANY    0x30b0
 &iomuxc_snvs {
 	pinctrl_snvs_gpio1: snvs-gpio1-grp {
 		fsl,pins = <
-			MX6ULL_PAD_SNVS_TAMPER6__GPIO5_IO06	0x14 /* SODIMM 93 */
-			MX6ULL_PAD_SNVS_TAMPER3__GPIO5_IO03	0x14 /* SODIMM 95 */
-			MX6ULL_PAD_BOOT_MODE0__GPIO5_IO10	0x74 /* SODIMM 105 */
-			MX6ULL_PAD_SNVS_TAMPER5__GPIO5_IO05	0x14 /* SODIMM 131 USBH OC */
-			MX6ULL_PAD_SNVS_TAMPER8__GPIO5_IO08	0x74 /* SODIMM 138 */
+			MX6ULL_PAD_SNVS_TAMPER6__GPIO5_IO06	0x110a0	/* SODIMM 93 */
+			MX6ULL_PAD_SNVS_TAMPER3__GPIO5_IO03	0x110a0	/* SODIMM 95 */
+			MX6ULL_PAD_BOOT_MODE0__GPIO5_IO10	0x1b0a0	/* SODIMM 105 */
+			MX6ULL_PAD_SNVS_TAMPER5__GPIO5_IO05	0x0b0a0	/* SODIMM 131 */
+			MX6ULL_PAD_SNVS_TAMPER8__GPIO5_IO08	0x110a0	/* SODIMM 138 */
 		>;
 	};
 
 	pinctrl_snvs_gpio2: snvs-gpio2-grp { /* ATMEL MXT TOUCH */
 		fsl,pins = <
-			MX6ULL_PAD_SNVS_TAMPER4__GPIO5_IO04	0x74 /* SODIMM 107 */
+			MX6ULL_PAD_SNVS_TAMPER4__GPIO5_IO04	0xb0a0	/* SODIMM 107 */
 		>;
 	};
 
 	pinctrl_snvs_gpio3: snvs-gpio3-grp { /* Wifi pins */
 		fsl,pins = <
-			MX6ULL_PAD_BOOT_MODE1__GPIO5_IO11	0x14 /* SODIMM 127 */
+			MX6ULL_PAD_BOOT_MODE1__GPIO5_IO11	0x130a0	/* SODIMM 127 */
 		>;
 	};
 
@@ -587,13 +587,13 @@ MX6ULL_PAD_SNVS_TAMPER2__GPIO5_IO02	0x130b0
 
 	pinctrl_snvs_gpiokeys: snvs-gpiokeys-grp {
 		fsl,pins = <
-			MX6ULL_PAD_SNVS_TAMPER1__GPIO5_IO01	0x130b0
+			MX6ULL_PAD_SNVS_TAMPER1__GPIO5_IO01	0x130a0	/* SODIMM 45 */
 		>;
 	};
 
 	pinctrl_snvs_usdhc1_cd: snvs-usdhc1-cd-grp {
 		fsl,pins = <
-			MX6ULL_PAD_SNVS_TAMPER0__GPIO5_IO00	0x1b0b0 /* CD */
+			MX6ULL_PAD_SNVS_TAMPER0__GPIO5_IO00	0x1b0a0 /* SODIMM 43 */
 		>;
 	};
 
@@ -605,7 +605,7 @@ MX6ULL_PAD_SNVS_TAMPER0__GPIO5_IO00	0x0
 
 	pinctrl_snvs_wifi_pdn: snvs-wifi-pdn-grp {
 		fsl,pins = <
-			MX6ULL_PAD_BOOT_MODE1__GPIO5_IO11	0x14
+			MX6ULL_PAD_BOOT_MODE1__GPIO5_IO11	0x130a0
 		>;
 	};
 };
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index 6f50ebf..e57da0d 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -7,12 +7,42 @@
 #include "imx7d.dtsi"
 
 / {
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm4 0 50000 0>;
+		brightness-levels = <0 36 72 108 144 180 216 255>;
+		default-brightness-level = <6>;
+	};
+
 	/* Will be filled by the bootloader */
 	memory@80000000 {
 		device_type = "memory";
 		reg = <0x80000000 0>;
 	};
 
+	panel {
+		compatible = "vxt,vl050-8048nt-c01";
+		backlight = <&backlight>;
+		power-supply = <&reg_lcd_3v3>;
+
+		port {
+			panel_in: endpoint {
+				remote-endpoint = <&display_out>;
+			};
+		};
+	};
+
+	reg_lcd_3v3: regulator-lcd-3v3 {
+		compatible = "regulator-fixed";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_reg_lcdreg_on>;
+		regulator-name = "lcd-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+        };
+
 	reg_wlreg_on: regulator-wlreg_on {
 		compatible = "regulator-fixed";
 		pinctrl-names = "default";
@@ -230,6 +260,18 @@ vgen6_reg: vldo4 {
 	};
 };
 
+&lcdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_lcdif>;
+	status = "okay";
+
+	port {
+		display_out: endpoint {
+			remote-endpoint = <&panel_in>;
+		};
+	};
+};
+
 &sai1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_sai1>;
@@ -260,6 +302,8 @@ &pwm3 {
 };
 
 &pwm4 { /* Backlight */
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>;
 	status = "okay";
 };
 
@@ -413,6 +457,40 @@ MX7D_PAD_SAI1_RX_SYNC__I2C4_SCL		0x4000007f
 		>;
 	};
 
+	pinctrl_lcdif: lcdifgrp {
+		fsl,pins = <
+			MX7D_PAD_LCD_DATA00__LCD_DATA0		0x79
+			MX7D_PAD_LCD_DATA01__LCD_DATA1		0x79
+			MX7D_PAD_LCD_DATA02__LCD_DATA2		0x79
+			MX7D_PAD_LCD_DATA03__LCD_DATA3		0x79
+			MX7D_PAD_LCD_DATA04__LCD_DATA4		0x79
+			MX7D_PAD_LCD_DATA05__LCD_DATA5		0x79
+			MX7D_PAD_LCD_DATA06__LCD_DATA6		0x79
+			MX7D_PAD_LCD_DATA07__LCD_DATA7		0x79
+			MX7D_PAD_LCD_DATA08__LCD_DATA8		0x79
+			MX7D_PAD_LCD_DATA09__LCD_DATA9		0x79
+			MX7D_PAD_LCD_DATA10__LCD_DATA10		0x79
+			MX7D_PAD_LCD_DATA11__LCD_DATA11		0x79
+			MX7D_PAD_LCD_DATA12__LCD_DATA12		0x79
+			MX7D_PAD_LCD_DATA13__LCD_DATA13		0x79
+			MX7D_PAD_LCD_DATA14__LCD_DATA14		0x79
+			MX7D_PAD_LCD_DATA15__LCD_DATA15		0x79
+			MX7D_PAD_LCD_DATA16__LCD_DATA16		0x79
+			MX7D_PAD_LCD_DATA17__LCD_DATA17		0x79
+			MX7D_PAD_LCD_DATA18__LCD_DATA18		0x79
+			MX7D_PAD_LCD_DATA19__LCD_DATA19		0x79
+			MX7D_PAD_LCD_DATA20__LCD_DATA20		0x79
+			MX7D_PAD_LCD_DATA21__LCD_DATA21		0x79
+			MX7D_PAD_LCD_DATA22__LCD_DATA22		0x79
+			MX7D_PAD_LCD_DATA23__LCD_DATA23		0x79
+			MX7D_PAD_LCD_CLK__LCD_CLK		0x79
+			MX7D_PAD_LCD_ENABLE__LCD_ENABLE		0x78
+			MX7D_PAD_LCD_VSYNC__LCD_VSYNC		0x78
+			MX7D_PAD_LCD_HSYNC__LCD_HSYNC		0x78
+			MX7D_PAD_LCD_RESET__GPIO3_IO4		0x14
+		>;
+	};
+
 	pinctrl_pwm1: pwm1 {
 		fsl,pins = <
 			MX7D_PAD_GPIO1_IO08__PWM1_OUT   0x7f
@@ -431,6 +509,12 @@ MX7D_PAD_GPIO1_IO10__PWM3_OUT   0x7f
 		>;
 	};
 
+	pinctrl_pwm4: pwm4grp{
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO11__PWM4_OUT	0x7f
+		>;
+	};
+
 	pinctrl_reg_wlreg_on: regregongrp {
 		fsl,pins = <
 			MX7D_PAD_ECSPI1_SCLK__GPIO4_IO16	0x59
@@ -577,6 +661,12 @@ MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2	0x7d
 		>;
 	};
 
+	pinctrl_reg_lcdreg_on: reglcdongrp {
+	fsl,pins = <
+			MX7D_PAD_LPSR_GPIO1_IO06__GPIO1_IO6	0x59
+		>;
+	};
+
 	pinctrl_wdog: wdoggrp {
 		fsl,pins = <
 			MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B	0x74
diff --git a/arch/arm/boot/dts/imx7d-sdb-reva.dts b/arch/arm/boot/dts/imx7d-sdb-reva.dts
index 7ce9d8c..cabdaa6 100644
--- a/arch/arm/boot/dts/imx7d-sdb-reva.dts
+++ b/arch/arm/boot/dts/imx7d-sdb-reva.dts
@@ -7,6 +7,9 @@
 #include "imx7d-sdb.dts"
 
 / {
+	model = "Freescale i.MX7 SabreSD RevA Board";
+	compatible = "fsl,imx7d-sdb-reva", "fsl,imx7d";
+
 	reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
 		pinctrl-0 = <&pinctrl_usb_otg2_vbus_reg_reva>;
 		gpio = <&gpio4 7 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index d8acd7c..92f6d0c 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -12,7 +12,7 @@ cpu0: cpu@0 {
 			clock-frequency = <996000000>;
 			operating-points-v2 = <&cpu0_opp_table>;
 			#cooling-cells = <2>;
-			nvmem-cells = <&cpu_speed_grade>;
+			nvmem-cells = <&fuse_grade>;
 			nvmem-cell-names = "speed_grade";
 		};
 
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 1b812f4..568d7a9 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -152,8 +152,7 @@ tempmon: tempmon {
 		interrupt-parent = <&gpc>;
 		interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
 		fsl,tempmon = <&anatop>;
-		nvmem-cells = <&tempmon_calib>,
-			<&tempmon_temp_grade>;
+		nvmem-cells = <&tempmon_calib>,	<&fuse_grade>;
 		nvmem-cell-names = "calib", "temp_grade";
 		clocks = <&clks IMX7D_PLL_SYS_MAIN_CLK>;
 	};
@@ -548,11 +547,7 @@ tempmon_calib: calib@3c {
 					reg = <0x3c 0x4>;
 				};
 
-				tempmon_temp_grade: temp-grade@10 {
-					reg = <0x10 0x4>;
-				};
-
-				cpu_speed_grade: speed-grade@10 {
+				fuse_grade: fuse-grade@10 {
 					reg = <0x10 0x4>;
 				};
 			};
@@ -658,6 +653,12 @@ pgc_pcie_phy: power-domain@1 {
 						reg = <1>;
 						power-supply = <&reg_1p0d>;
 					};
+
+					pgc_hsic_phy: power-domain@2 {
+						#power-domain-cells = <0>;
+						reg = <2>;
+						power-supply = <&reg_1p2>;
+					};
 				};
 			};
 		};
@@ -1101,6 +1102,7 @@ usbh: usb@30b30000 {
 				compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
 				reg = <0x30b30000 0x200>;
 				interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+				power-domains = <&pgc_hsic_phy>;
 				clocks = <&clks IMX7D_USB_CTRL_CLK>;
 				fsl,usbphy = <&usbphynop3>;
 				fsl,usbmisc = <&usbmisc3 0>;
diff --git a/arch/arm/boot/dts/imx7ulp-com.dts b/arch/arm/boot/dts/imx7ulp-com.dts
new file mode 100644
index 0000000..d76fea3
--- /dev/null
+++ b/arch/arm/boot/dts/imx7ulp-com.dts
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2019 NXP
+
+/dts-v1/;
+
+#include "imx7ulp.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "Embedded Artists i.MX7ULP COM";
+	compatible = "ea,imx7ulp-com", "fsl,imx7ulp";
+
+	chosen {
+		stdout-path = &lpuart4;
+	};
+
+	memory@60000000 {
+		device_type = "memory";
+		reg = <0x60000000 0x4000000>;
+	};
+};
+
+&lpuart4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_lpuart4>;
+	status = "okay";
+};
+
+&usbotg1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg1_id>;
+	srp-disable;
+	hnp-disable;
+	adp-disable;
+	status = "okay";
+};
+
+&usdhc0 {
+	assigned-clocks = <&pcc2 IMX7ULP_CLK_USDHC0>;
+	assigned-clock-parents = <&scg1 IMX7ULP_CLK_APLL_PFD1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc0>;
+	non-removable;
+	bus-width = <8>;
+	no-1-8-v;
+	status = "okay";
+};
+
+&iomuxc1 {
+	pinctrl_lpuart4: lpuart4grp {
+		fsl,pins = <
+			IMX7ULP_PAD_PTC3__LPUART4_RX	0x3
+			IMX7ULP_PAD_PTC2__LPUART4_TX	0x3
+		>;
+	};
+
+	pinctrl_usbotg1_id: otg1idgrp {
+		fsl,pins = <
+			IMX7ULP_PAD_PTC13__USB0_ID	0x10003
+		>;
+	};
+
+	pinctrl_usdhc0: usdhc0grp {
+		fsl,pins = <
+			IMX7ULP_PAD_PTD1__SDHC0_CMD	0x43
+			IMX7ULP_PAD_PTD2__SDHC0_CLK	0x10042
+			IMX7ULP_PAD_PTD3__SDHC0_D7	0x43
+			IMX7ULP_PAD_PTD4__SDHC0_D6	0x43
+			IMX7ULP_PAD_PTD5__SDHC0_D5	0x43
+			IMX7ULP_PAD_PTD6__SDHC0_D4	0x43
+			IMX7ULP_PAD_PTD7__SDHC0_D3	0x43
+			IMX7ULP_PAD_PTD8__SDHC0_D2	0x43
+			IMX7ULP_PAD_PTD9__SDHC0_D1	0x43
+			IMX7ULP_PAD_PTD10__SDHC0_D0	0x43
+			IMX7ULP_PAD_PTD11__SDHC0_DQS	0x42
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
index ae75a1db..ebbe151 100644
--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
@@ -46,6 +46,49 @@ audio_clock: audio_clock {
 		clock-frequency = <26000000>;
 	};
 
+	lcd_backlight: backlight {
+		compatible = "pwm-backlight";
+
+		pwms = <&pwm3 0 5000000 0>;
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <7>;
+		enable-gpios = <&gpio5 14 GPIO_ACTIVE_HIGH>;
+	};
+
+	lvds-receiver {
+		compatible = "ti,ds90cf384a", "lvds-decoder";
+		powerdown-gpios = <&gpio7 25 GPIO_ACTIVE_LOW>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				lvds_receiver_in: endpoint {
+					remote-endpoint = <&lvds0_out>;
+				};
+			};
+			port@1 {
+				reg = <1>;
+				lvds_receiver_out: endpoint {
+					remote-endpoint = <&panel_in>;
+				};
+			};
+		};
+	};
+
+	panel {
+		compatible = "edt,etm0700g0dh6";
+		backlight = <&lcd_backlight>;
+
+		port {
+			panel_in: endpoint {
+				remote-endpoint = <&lvds_receiver_out>;
+			};
+		};
+	};
+
 	reg_1p5v: 1p5v {
 		compatible = "regulator-fixed";
 		regulator-name = "1P5V";
@@ -89,8 +132,7 @@ vccq_sdhi1: regulator-vccq-sdhi1 {
 
 		gpios = <&gpio2 30 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 };
 
@@ -120,6 +162,18 @@ &cmt0 {
 	status = "okay";
 };
 
+&du {
+	status = "okay";
+};
+
+&gpio2 {
+	touch-interrupt {
+		gpio-hog;
+		gpios = <12 GPIO_ACTIVE_LOW>;
+		input;
+	};
+};
+
 &hsusb {
 	status = "okay";
 	pinctrl-0 = <&usb0_pins>;
@@ -147,6 +201,25 @@ sgtl5000: codec@a {
 		VDDIO-supply = <&reg_3p3v>;
 		VDDD-supply = <&reg_1p5v>;
 	};
+
+	touch: touchpanel@38 {
+		compatible = "edt,edt-ft5406";
+		reg = <0x38>;
+		interrupt-parent = <&gpio2>;
+		interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+	};
+};
+
+&lvds0 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			lvds0_out: endpoint {
+				remote-endpoint = <&lvds_receiver_in>;
+			};
+		};
+	};
 };
 
 &pci0 {
@@ -180,6 +253,11 @@ i2c2_pins: i2c2 {
 		function = "i2c2";
 	};
 
+	pwm3_pins: pwm3 {
+		groups = "pwm3";
+		function = "pwm3";
+	};
+
 	scif0_pins: scif0 {
 		groups = "scif0_data_d";
 		function = "scif0";
@@ -218,6 +296,12 @@ usb1_pins: usb1 {
 	};
 };
 
+&pwm3 {
+	pinctrl-0 = <&pwm3_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
 &rcar_sound {
 	pinctrl-0 = <&sound_pins>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/iwg20d-q7-dbcm-ca.dtsi b/arch/arm/boot/dts/iwg20d-q7-dbcm-ca.dtsi
index 0e99df2..ede2e0c 100644
--- a/arch/arm/boot/dts/iwg20d-q7-dbcm-ca.dtsi
+++ b/arch/arm/boot/dts/iwg20d-q7-dbcm-ca.dtsi
@@ -39,7 +39,6 @@ &can1 {
 &du {
 	pinctrl-0 = <&du_pins>;
 	pinctrl-names = "default";
-	status = "okay";
 
 	ports {
 		port@0 {
diff --git a/arch/arm/boot/dts/ls1021a-tsn.dts b/arch/arm/boot/dts/ls1021a-tsn.dts
index 5b76890..9d8f0c2 100644
--- a/arch/arm/boot/dts/ls1021a-tsn.dts
+++ b/arch/arm/boot/dts/ls1021a-tsn.dts
@@ -203,11 +203,15 @@ &mdio0 {
 	/* AR8031 */
 	sgmii_phy1: ethernet-phy@1 {
 		reg = <0x1>;
+		/* SGMII1_PHY_INT_B: connected to IRQ2, active low */
+		interrupts-extended = <&extirq 2 IRQ_TYPE_LEVEL_LOW>;
 	};
 
 	/* AR8031 */
 	sgmii_phy2: ethernet-phy@2 {
 		reg = <0x2>;
+		/* SGMII2_PHY_INT_B: connected to IRQ2, active low */
+		interrupts-extended = <&extirq 2 IRQ_TYPE_LEVEL_LOW>;
 	};
 
 	/* BCM5464 quad PHY */
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 2f6977a..0855b1f 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -216,6 +216,25 @@ scfg: scfg@1570000 {
 			compatible = "fsl,ls1021a-scfg", "syscon";
 			reg = <0x0 0x1570000 0x0 0x10000>;
 			big-endian;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x0 0x1570000 0x10000>;
+
+			extirq: interrupt-controller@1ac {
+				compatible = "fsl,ls1021a-extirq";
+				#interrupt-cells = <2>;
+				#address-cells = <0>;
+				interrupt-controller;
+				reg = <0x1ac 4>;
+				interrupt-map =
+					<0 0 &gic GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+					<1 0 &gic GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+					<2 0 &gic GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+					<3 0 &gic GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+					<4 0 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+					<5 0 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-map-mask = <0xffffffff 0x0>;
+			};
 		};
 
 		crypto: crypto@1700000 {
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index c4447f6..5d19830 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -282,4 +282,11 @@ efuse: nvmem@0 {
 			};
 		};
 	};
+
+	xtal: xtal-clk {
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		clock-output-names = "xtal";
+		#clock-cells = <0>;
+	};
 }; /* end of / */
diff --git a/arch/arm/boot/dts/meson6.dtsi b/arch/arm/boot/dts/meson6.dtsi
index 2d31b7c..4716030 100644
--- a/arch/arm/boot/dts/meson6.dtsi
+++ b/arch/arm/boot/dts/meson6.dtsi
@@ -36,13 +36,6 @@ apb2: bus@d0000000 {
 		ranges = <0x0 0xd0000000 0x40000>;
 	};
 
-	xtal: xtal-clk {
-		compatible = "fixed-clock";
-		clock-frequency = <24000000>;
-		clock-output-names = "xtal";
-		#clock-cells = <0>;
-	};
-
 	clk81: clk@0 {
 		#clock-cells = <0>;
 		compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index 3c534cd..eedb925 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -3,6 +3,7 @@
  * Copyright 2014 Carlo Caione <carlo@caione.org>
  */
 
+#include <dt-bindings/clock/meson8-ddr-clkc.h>
 #include <dt-bindings/clock/meson8b-clkc.h>
 #include <dt-bindings/gpio/meson8-gpio.h>
 #include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h>
@@ -129,8 +130,8 @@ opp-1992000000 {
 	gpu_opp_table: gpu-opp-table {
 		compatible = "operating-points-v2";
 
-		opp-182150000 {
-			opp-hz = /bits/ 64 <182150000>;
+		opp-182142857 {
+			opp-hz = /bits/ 64 <182142857>;
 			opp-microvolt = <1150000>;
 		};
 		opp-318750000 {
@@ -195,6 +196,14 @@ mmcbus: bus@c8000000 {
 		#size-cells = <1>;
 		ranges = <0x0 0xc8000000 0x8000>;
 
+		ddr_clkc: clock-controller@400 {
+			compatible = "amlogic,meson8-ddr-clkc";
+			reg = <0x400 0x20>;
+			clocks = <&xtal>;
+			clock-names = "xtal";
+			#clock-cells = <1>;
+		};
+
 		dmcbus: bus@6000 {
 			compatible = "simple-bus";
 			reg = <0x6000 0x400>;
@@ -455,6 +464,8 @@ &gpio_intc {
 &hhi {
 	clkc: clock-controller {
 		compatible = "amlogic,meson8-clkc";
+		clocks = <&xtal>, <&ddr_clkc DDR_CLKID_DDR_PLL>;
+		clock-names = "xtal", "ddr_pll";
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -529,8 +540,7 @@ &rtc {
 
 &saradc {
 	compatible = "amlogic,meson8-saradc", "amlogic,meson-saradc";
-	clocks = <&clkc CLKID_XTAL>,
-		<&clkc CLKID_SAR_ADC>;
+	clocks = <&xtal>, <&clkc CLKID_SAR_ADC>;
 	clock-names = "clkin", "core";
 	amlogic,hhi-sysctrl = <&hhi>;
 	nvmem-cells = <&temperature_calib>;
@@ -548,31 +558,31 @@ &spifc {
 };
 
 &timer_abcde {
-	clocks = <&clkc CLKID_XTAL>, <&clkc CLKID_CLK81>;
+	clocks = <&xtal>, <&clkc CLKID_CLK81>;
 	clock-names = "xtal", "pclk";
 };
 
 &uart_AO {
 	compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
-	clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_XTAL>, <&clkc CLKID_CLK81>;
+	clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
 	clock-names = "baud", "xtal", "pclk";
 };
 
 &uart_A {
 	compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
-	clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_XTAL>, <&clkc CLKID_UART0>;
+	clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
 	clock-names = "baud", "xtal", "pclk";
 };
 
 &uart_B {
 	compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
-	clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_XTAL>, <&clkc CLKID_UART1>;
+	clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
 	clock-names = "baud", "xtal", "pclk";
 };
 
 &uart_C {
 	compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
-	clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_XTAL>, <&clkc CLKID_UART2>;
+	clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
 	clock-names = "baud", "xtal", "pclk";
 };
 
diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
index bed1dfe..163a200 100644
--- a/arch/arm/boot/dts/meson8b-ec100.dts
+++ b/arch/arm/boot/dts/meson8b-ec100.dts
@@ -377,7 +377,7 @@ &pwm_cd {
 	status = "okay";
 	pinctrl-0 = <&pwm_c1_pins>, <&pwm_d_pins>;
 	pinctrl-names = "default";
-	clocks = <&clkc CLKID_XTAL>, <&clkc CLKID_XTAL>;
+	clocks = <&xtal>, <&xtal>;
 	clock-names = "clkin0", "clkin1";
 };
 
diff --git a/arch/arm/boot/dts/meson8b-mxq.dts b/arch/arm/boot/dts/meson8b-mxq.dts
index 6e39ad5..33037ef 100644
--- a/arch/arm/boot/dts/meson8b-mxq.dts
+++ b/arch/arm/boot/dts/meson8b-mxq.dts
@@ -165,7 +165,7 @@ &pwm_cd {
 	status = "okay";
 	pinctrl-0 = <&pwm_c1_pins>, <&pwm_d_pins>;
 	pinctrl-names = "default";
-	clocks = <&clkc CLKID_XTAL>, <&clkc CLKID_XTAL>;
+	clocks = <&xtal>, <&xtal>;
 	clock-names = "clkin0", "clkin1";
 };
 
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index a24eccc..a2a4780 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -340,7 +340,7 @@ &pwm_cd {
 	status = "okay";
 	pinctrl-0 = <&pwm_c1_pins>, <&pwm_d_pins>;
 	pinctrl-names = "default";
-	clocks = <&clkc CLKID_XTAL>, <&clkc CLKID_XTAL>;
+	clocks = <&xtal>, <&xtal>;
 	clock-names = "clkin0", "clkin1";
 };
 
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 099bf8e7..e34b039 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -4,6 +4,7 @@
  * Author: Carlo Caione <carlo@endlessm.com>
  */
 
+#include <dt-bindings/clock/meson8-ddr-clkc.h>
 #include <dt-bindings/clock/meson8b-clkc.h>
 #include <dt-bindings/gpio/meson8b-gpio.h>
 #include <dt-bindings/reset/amlogic,meson8b-reset.h>
@@ -125,8 +126,8 @@ opp-255000000 {
 			opp-hz = /bits/ 64 <255000000>;
 			opp-microvolt = <1100000>;
 		};
-		opp-364300000 {
-			opp-hz = /bits/ 64 <364300000>;
+		opp-364285714 {
+			opp-hz = /bits/ 64 <364285714>;
 			opp-microvolt = <1100000>;
 		};
 		opp-425000000 {
@@ -172,6 +173,14 @@ mmcbus: bus@c8000000 {
 		#size-cells = <1>;
 		ranges = <0x0 0xc8000000 0x8000>;
 
+		ddr_clkc: clock-controller@400 {
+			compatible = "amlogic,meson8b-ddr-clkc";
+			reg = <0x400 0x20>;
+			clocks = <&xtal>;
+			clock-names = "xtal";
+			#clock-cells = <1>;
+		};
+
 		dmcbus: bus@6000 {
 			compatible = "simple-bus";
 			reg = <0x6000 0x400>;
@@ -433,7 +442,9 @@ &gpio_intc {
 
 &hhi {
 	clkc: clock-controller {
-		compatible = "amlogic,meson8-clkc";
+		compatible = "amlogic,meson8b-clkc";
+		clocks = <&xtal>, <&ddr_clkc DDR_CLKID_DDR_PLL>;
+		clock-names = "xtal", "ddr_pll";
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -508,8 +519,7 @@ &rtc {
 
 &saradc {
 	compatible = "amlogic,meson8b-saradc", "amlogic,meson-saradc";
-	clocks = <&clkc CLKID_XTAL>,
-		<&clkc CLKID_SAR_ADC>;
+	clocks = <&xtal>, <&clkc CLKID_SAR_ADC>;
 	clock-names = "clkin", "core";
 	amlogic,hhi-sysctrl = <&hhi>;
 	nvmem-cells = <&temperature_calib>;
@@ -523,31 +533,31 @@ &sdio {
 };
 
 &timer_abcde {
-	clocks = <&clkc CLKID_XTAL>, <&clkc CLKID_CLK81>;
+	clocks = <&xtal>, <&clkc CLKID_CLK81>;
 	clock-names = "xtal", "pclk";
 };
 
 &uart_AO {
 	compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
-	clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_XTAL>, <&clkc CLKID_CLK81>;
+	clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
 	clock-names = "baud", "xtal", "pclk";
 };
 
 &uart_A {
 	compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
-	clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_XTAL>, <&clkc CLKID_UART0>;
+	clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
 	clock-names = "baud", "xtal", "pclk";
 };
 
 &uart_B {
 	compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
-	clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_XTAL>, <&clkc CLKID_UART1>;
+	clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
 	clock-names = "baud", "xtal", "pclk";
 };
 
 &uart_C {
 	compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
-	clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_XTAL>, <&clkc CLKID_UART2>;
+	clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
 	clock-names = "baud", "xtal", "pclk";
 };
 
diff --git a/arch/arm/boot/dts/mmp3-dell-ariel.dts b/arch/arm/boot/dts/mmp3-dell-ariel.dts
index c1947b5..15449c7 100644
--- a/arch/arm/boot/dts/mmp3-dell-ariel.dts
+++ b/arch/arm/boot/dts/mmp3-dell-ariel.dts
@@ -49,6 +49,28 @@ &usb_otg_phy0 {
 	status = "okay";
 };
 
+&hsic0 {
+	status = "okay";
+
+	usb1@1 {
+		compatible = "usb424,2640";
+		reg = <0x01>;
+		#address-cells = <0x01>;
+		#size-cells = <0x00>;
+
+		mass-storage@1 {
+			compatible = "usb424,4040";
+			reg = <0x01>;
+			status = "disabled";
+		};
+	};
+};
+
+&hsic_phy0 {
+	status = "okay";
+	reset-gpios = <&gpio 63 GPIO_ACTIVE_HIGH>;
+};
+
 &mmc3 {
 	status = "okay";
 	max-frequency = <50000000>;
diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi
index 6f48082..59a108e 100644
--- a/arch/arm/boot/dts/mmp3.dtsi
+++ b/arch/arm/boot/dts/mmp3.dtsi
@@ -201,6 +201,50 @@ usb_otg0: usb-otg@d4208000 {
 				status = "disabled";
 			};
 
+			hsic_phy0: hsic-phy@f0001800 {
+				compatible = "marvell,mmp3-hsic-phy",
+					     "usb-nop-xceiv";
+				reg = <0xf0001800 0x40>;
+				#phy-cells = <0>;
+				status = "disabled";
+			};
+
+			hsic0: hsic@f0001000 {
+				compatible = "marvell,pxau2o-ehci";
+				reg = <0xf0001000 0x200>;
+				interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&soc_clocks MMP2_CLK_USBHSIC0>;
+				clock-names = "USBCLK";
+				phys = <&hsic_phy0>;
+				phy-names = "usb";
+				phy_type = "hsic";
+				#address-cells = <0x01>;
+				#size-cells = <0x00>;
+				status = "disabled";
+			};
+
+			hsic_phy1: hsic-phy@f0002800 {
+				compatible = "marvell,mmp3-hsic-phy",
+					     "usb-nop-xceiv";
+				reg = <0xf0002800 0x40>;
+				#phy-cells = <0>;
+				status = "disabled";
+			};
+
+			hsic1: hsic@f0002000 {
+				compatible = "marvell,pxau2o-ehci";
+				reg = <0xf0002000 0x200>;
+				interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&soc_clocks MMP2_CLK_USBHSIC1>;
+				clock-names = "USBCLK";
+				phys = <&hsic_phy1>;
+				phy-names = "usb";
+				phy_type = "hsic";
+				#address-cells = <0x01>;
+				#size-cells = <0x00>;
+				status = "disabled";
+			};
+
 			mmc1: mmc@d4280000 {
 				compatible = "mrvl,pxav3-mmc";
 				reg = <0xd4280000 0x120>;
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index 936ad01..e39eee6 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -27,11 +27,12 @@ cpcap_battery: battery {
 			compatible = "motorola,cpcap-battery";
 			interrupts-extended = <
 				&cpcap 6 0 &cpcap 5 0 &cpcap 3 0
-				&cpcap 20 0 &cpcap 54 0
+				&cpcap 20 0 &cpcap 54 0 &cpcap 57 0
 			>;
 			interrupt-names =
 				"eol", "lowbph", "lowbpl",
-				"chrgcurr1", "battdetb";
+				"chrgcurr1", "battdetb",
+				"cccal";
 			io-channels = <&cpcap_adc 0 &cpcap_adc 1
 				       &cpcap_adc 5 &cpcap_adc 6>;
 			io-channel-names = "battdetb", "battp",
diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
index da6b107..8566550 100644
--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
@@ -249,6 +249,13 @@ hdmi_out: endpoint {
 	};
 };
 
+/* Battery NVRAM on 1-wire handled by w1_ds250x driver */
+&hdqw1w {
+	pinctrl-0 = <&hdq_pins>;
+	pinctrl-names = "default";
+	ti,mode = "1w";
+};
+
 &i2c1 {
 	tmp105@48 {
 		compatible = "ti,tmp105";
@@ -440,6 +447,13 @@ OMAP4_IOPAD(0x098, PIN_INPUT | MUX_MODE3)
 		>;
 	};
 
+	hdq_pins: pinmux_hdq_pins {
+		pinctrl-single,pins = <
+		/* 0x4a100120 hdq_sio.hdq_sio aa27 */
+		OMAP4_IOPAD(0x120, PIN_INPUT | MUX_MODE0)
+		>;
+	};
+
 	/* hdmi_cec.hdmi_cec, hdmi_scl.hdmi_scl, hdmi_sda.hdmi_sda */
 	dss_hdmi_pins: pinmux_dss_hdmi_pins {
 		pinctrl-single,pins = <
@@ -650,6 +664,11 @@ OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
 	};
 };
 
+/* RNG is used by secure mode and not accessible */
+&rng_target {
+	status = "disabled";
+};
+
 /* Configure pwm clock source for timers 8 & 9 */
 &timer8 {
 	assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index 000bf16..0e453fe 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -8,6 +8,7 @@
  * kind, whether express or implied.
  */
 
+#include <dt-bindings/bus/ti-sysc.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pinctrl/omap.h>
@@ -79,17 +80,37 @@ intc: interrupt-controller@1 {
 			reg = <0x480FE000 0x1000>;
 		};
 
-		sdma: dma-controller@48056000 {
-			compatible = "ti,omap2430-sdma", "ti,omap2420-sdma";
-			ti,hwmods = "dma";
-			reg = <0x48056000 0x1000>;
-			interrupts = <12>,
-				     <13>,
-				     <14>,
-				     <15>;
-			#dma-cells = <1>;
-			dma-channels = <32>;
-			dma-requests = <64>;
+		target-module@48056000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x48056000 0x4>,
+			      <0x4805602c 0x4>,
+			      <0x48056028 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_EMUFREE |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,syss-mask = <1>;
+			clocks = <&core_l3_ck>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x48056000 0x1000>;
+
+			sdma: dma-controller@0 {
+				compatible = "ti,omap2420-sdma", "ti,omap-sdma";
+				reg = <0 0x1000>;
+				interrupts = <12>,
+					     <13>,
+					     <14>,
+					     <15>;
+				#dma-cells = <1>;
+				dma-channels = <32>;
+				dma-requests = <64>;
+			};
 		};
 
 		i2c1: i2c@48070000 {
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 7f57af2..15ef759 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -309,6 +309,10 @@ wd_timer2: wdt@49016000 {
 	};
 };
 
+&sdma {
+	compatible = "ti,omap2430-sdma", "ti,omap-sdma";
+};
+
 &i2c1 {
 	compatible = "ti,omap2430-i2c";
 };
diff --git a/arch/arm/boot/dts/omap3-echo.dts b/arch/arm/boot/dts/omap3-echo.dts
new file mode 100644
index 0000000..93ffedd
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-echo.dts
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 André Hentschel <nerv@dawncrow.de>
+ */
+/dts-v1/;
+
+#include "dm3725.dtsi"
+
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "Amazon Echo (first generation)";
+	compatible = "amazon,omap3-echo", "ti,omap3630", "ti,omap3";
+
+	cpus {
+		cpu@0 {
+			cpu0-supply = <&vdd1_reg>;
+		};
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x80000000 0xc600000>; /* 198 MB */
+	};
+
+	vcc5v: fixedregulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc5v";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	vcc3v3: fixedregulator1 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	vcc1v8: fixedregulator2 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc1v8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio1 21 GPIO_ACTIVE_LOW>;
+		post-power-on-delay-ms = <40>;
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&button_pins>;
+
+		mute-button {
+			label = "mute";
+			linux,code = <KEY_MUTE>;
+			gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;	/* GPIO_70 */
+			wakeup-source;
+		};
+
+		help-button {
+			label = "help";
+			linux,code = <KEY_HELP>;
+			gpios = <&gpio3 8 GPIO_ACTIVE_LOW>;	/* GPIO_72 */
+			wakeup-source;
+		};
+	};
+
+	rotary: rotary-encoder {
+		compatible = "rotary-encoder";
+		gpios = <
+			&gpio3  5 GPIO_ACTIVE_HIGH /* GPIO_69 */
+			&gpio3 12 GPIO_ACTIVE_HIGH /* GPIO_76 */
+		>;
+		linux,axis = <REL_X>;
+		rotary-encoder,relative-axis;
+	};
+};
+
+&i2c1 {
+	clock-frequency = <400000>;
+
+	tps: tps@2d {
+		reg = <0x2d>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <400000>;
+
+	lp5523A: lp5523A@32 {
+		compatible = "national,lp5523";
+		label = "q1";
+		reg = <0x32>;
+		clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
+		enable-gpio = <&gpio4 13 GPIO_ACTIVE_HIGH>; /* GPIO_109 */
+
+		chan0 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan1 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan2 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan3 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan4 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan5 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan6 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan7 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan8 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+	};
+
+	lp5523B: lp5523B@33 {
+		compatible = "national,lp5523";
+		label = "q3";
+		reg = <0x33>;
+		clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
+
+		chan0 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan1 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan2 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan3 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan4 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan5 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan6 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan7 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan8 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+	};
+
+	lp5523C: lp5523C@34 {
+		compatible = "national,lp5523";
+		label = "q4";
+		reg = <0x34>;
+		clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
+
+		chan0 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan1 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan2 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan3 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan4 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan5 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan6 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan7 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan8 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+	};
+
+	lp5523D: lp552D@35 {
+		compatible = "national,lp5523";
+		label = "q2";
+		reg = <0x35>;
+		clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
+
+		chan0 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan1 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan2 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan3 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan4 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan5 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan6 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan7 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+		chan8 {
+			led-cur = /bits/ 8 <12>;
+			max-cur = /bits/ 8 <15>;
+		};
+	};
+};
+
+#include "tps65910.dtsi"
+
+&omap3_pmx_core {
+	tps_pins: pinmux_tps_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x21e0, PIN_INPUT_PULLUP | PIN_OFF_INPUT_PULLUP | PIN_OFF_OUTPUT_LOW | PIN_OFF_WAKEUPENABLE | MUX_MODE0) /* sys_nirq.sys_nirq */
+		>;
+	};
+
+	button_pins: pinmux_button_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x20dc, PIN_INPUT | MUX_MODE4)	/* dss_data0.gpio_70 */
+			OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4)	/* dss_data2.gpio_72 */
+		>;
+	};
+
+	mmc1_pins: pinmux_mmc1_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc1_clk.sdmmc1_clk */
+			OMAP3_CORE1_IOPAD(0x2146, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc1_cmd.sdmmc1_cmd */
+			OMAP3_CORE1_IOPAD(0x2148, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc1_dat0.sdmmc1_dat0 */
+			OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc1_dat1.sdmmc1_dat1 */
+			OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc1_dat2.sdmmc1_dat2 */
+			OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc1_dat3.sdmmc1_dat3 */
+		>;
+	};
+
+	mmc2_pins: pinmux_mmc2_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_clk.sdmmc2_clk */
+			OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_cmd.sdmmc2_cmd */
+			OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_dat0.sdmmc2_dat0 */
+			OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_dat1.sdmmc2_dat1 */
+			OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_dat2.sdmmc2_dat2 */
+			OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_dat3.sdmmc2_dat3 */
+			OMAP3_CORE1_IOPAD(0x2164, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_dat4.sdmmc2_dat4 */
+			OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_dat5.sdmmc2_dat5 */
+			OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_dat6.sdmmc2_dat6 */
+			OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE0)		/* sdmmc2_dat7.sdmmc2_dat7 */
+		>;
+	};
+};
+
+&omap3_pmx_core2 {
+	mmc3_pins: pinmux_mmc3_pins {
+		pinctrl-single,pins = <
+			OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2)	/* etk_clk.sdmmc3_clk */
+			OMAP3630_CORE2_IOPAD(0x25da, PIN_INPUT_PULLUP | MUX_MODE2)	/* etk_ctl.sdmmc3_cmd */
+			OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2)	/* etk_d3.sdmmc3_dat3 */
+			OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2)	/* etk_d4.sdmmc3_dat0 */
+			OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2)	/* etk_d5.sdmmc3_dat1 */
+			OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2)	/* etk_d6.sdmmc3_dat2 */
+		>;
+	};
+};
+
+&mmc1 {
+	status = "okay";
+	bus-width = <4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins>;
+	vmmc-supply = <&vmmc_reg>;
+};
+
+&mmc2 {
+	status = "okay";
+	bus-width = <8>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_pins>;
+	vmmc-supply = <&vmmc_reg>;
+};
+
+&mmc3 {
+	status = "okay";
+	bus-width = <4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc3_pins>;
+	non-removable;
+	disable-wp;
+	mmc-pwrseq = <&sdio_pwrseq>;
+	vmmc-supply = <&vcc3v3>;
+	vqmmc-supply = <&vcc1v8>;
+};
+
+&tps {
+	pinctrl-names = "default";
+	pinctrl-0 = <&tps_pins>;
+
+	interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+	interrupt-parent = <&intc>;
+
+	ti,en-ck32k-xtal;
+	ti,system-power-controller;
+
+	vcc1-supply = <&vcc5v>;
+	vcc2-supply = <&vcc5v>;
+	vcc3-supply = <&vcc5v>;
+	vcc4-supply = <&vcc5v>;
+	vcc5-supply = <&vcc5v>;
+	vcc6-supply = <&vcc5v>;
+	vcc7-supply = <&vcc5v>;
+	vccio-supply = <&vcc5v>;
+
+	regulators {
+
+		vio_reg: regulator@1 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+		};
+
+		vdd1_reg: regulator@2 {
+			regulator-name = "vdd_mpu";
+			regulator-min-microvolt = <600000>;
+			regulator-max-microvolt = <1500000>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		vdd2_reg: regulator@3 {
+			regulator-name = "vdd_dsp";
+			regulator-min-microvolt = <600000>;
+			regulator-max-microvolt = <1500000>;
+			regulator-always-on;
+		};
+
+		vdd3_reg: regulator@4 {
+			regulator-name = "vdd_core";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			regulator-always-on;
+		};
+
+		vdig1_reg: regulator@5 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <2700000>;
+			regulator-always-on;
+		};
+
+		vdig2_reg: regulator@6 {
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+		};
+
+		vpll_reg: regulator@7 {
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <2500000>;
+			regulator-always-on;
+		};
+
+		vdac_reg: regulator@8 {
+			regulator-min-microvolt = <1100000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
+		vaux1_reg: regulator@9 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <2850000>;
+			regulator-always-on;
+		};
+
+		vaux2_reg: regulator@10 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
+		vaux33_reg: regulator@11 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
+		vmmc_reg: regulator@12 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3000000>;
+			regulator-always-on;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index a638e05..c3c6d7d 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -482,6 +482,11 @@ &vintdig {
 	regulator-always-on;
 };
 
+/* First two dma channels are reserved on secure omap3 */
+&sdma {
+	dma-channel-mask = <0xfffffffc>;
+};
+
 &twl {
 	twl_audio: audio {
 		compatible = "ti,twl4030-audio";
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 5698a3e..634ea16 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -206,17 +206,41 @@ intc: interrupt-controller@48200000 {
 			reg = <0x48200000 0x1000>;
 		};
 
-		sdma: dma-controller@48056000 {
-			compatible = "ti,omap3630-sdma", "ti,omap3430-sdma";
-			reg = <0x48056000 0x1000>;
-			interrupts = <12>,
-				     <13>,
-				     <14>,
-				     <15>;
-			#dma-cells = <1>;
-			dma-channels = <32>;
-			dma-requests = <96>;
-			ti,hwmods = "dma";
+		target-module@48056000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x48056000 0x4>,
+			      <0x4805602c 0x4>,
+			      <0x48056028 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_EMUFREE |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-midle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,syss-mask = <1>;
+			/* Domains (V, P, C): core, core_pwrdm, core_l3_clkdm */
+			clocks = <&core_l3_ick>;
+			clock-names = "ick";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x48056000 0x1000>;
+
+			sdma: dma-controller@0 {
+				compatible = "ti,omap3430-sdma", "ti,omap-sdma";
+				reg = <0x0 0x1000>;
+				interrupts = <12>,
+					     <13>,
+					     <14>,
+					     <15>;
+				#dma-cells = <1>;
+				dma-channels = <32>;
+				dma-requests = <96>;
+			};
 		};
 
 		gpio1: gpio@48310000 {
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index c618cb2..71f3c8f 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -223,6 +223,10 @@ thermal_zones: thermal-zones {
 	};
 };
 
+&sdma {
+	compatible = "ti,omap3630-sdma", "ti,omap-sdma";
+};
+
 /* OMAP3630 needs dss_96m_fck for VENC */
 &venc {
 	clocks = <&dss_tv_fck>, <&dss_96m_fck>;
diff --git a/arch/arm/boot/dts/omap4-l4-abe.dtsi b/arch/arm/boot/dts/omap4-l4-abe.dtsi
index 6c892fc..a6feb20 100644
--- a/arch/arm/boot/dts/omap4-l4-abe.dtsi
+++ b/arch/arm/boot/dts/omap4-l4-abe.dtsi
@@ -219,7 +219,6 @@ target-module@2a000 {			/* 0x4012a000, ap 10 0a.0 */
 
 		target-module@2e000 {			/* 0x4012e000, ap 12 0c.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "dmic";
 			reg = <0x2e000 0x4>,
 			      <0x2e010 0x4>;
 			reg-names = "rev", "sysc";
@@ -279,7 +278,6 @@ wdt3: wdt@0 {
 
 		mcpdm_module: target-module@32000 {	/* 0x40132000, ap 16 10.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "mcpdm";
 			reg = <0x32000 0x4>,
 			      <0x32010 0x4>;
 			reg-names = "rev", "sysc";
@@ -314,7 +312,6 @@ mcpdm: mcpdm@0 {
 
 		target-module@38000 {			/* 0x40138000, ap 18 12.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer5";
 			reg = <0x38000 0x4>,
 			      <0x38010 0x4>;
 			reg-names = "rev", "sysc";
@@ -345,7 +342,6 @@ timer5: timer@0 {
 
 		target-module@3a000 {			/* 0x4013a000, ap 20 14.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer6";
 			reg = <0x3a000 0x4>,
 			      <0x3a010 0x4>;
 			reg-names = "rev", "sysc";
@@ -376,7 +372,6 @@ timer6: timer@0 {
 
 		target-module@3c000 {			/* 0x4013c000, ap 22 16.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer7";
 			reg = <0x3c000 0x4>,
 			      <0x3c010 0x4>;
 			reg-names = "rev", "sysc";
@@ -407,7 +402,6 @@ timer7: timer@0 {
 
 		target-module@3e000 {			/* 0x4013e000, ap 24 18.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer8";
 			reg = <0x3e000 0x4>,
 			      <0x3e010 0x4>;
 			reg-names = "rev", "sysc";
@@ -466,7 +460,6 @@ target-module@c0000 {			/* 0x401c0000, ap 30 1e.0 */
 
 		target-module@f1000 {			/* 0x401f1000, ap 32 20.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "aess";
 			reg = <0xf1000 0x4>,
 			      <0xf1010 0x4>;
 			reg-names = "rev", "sysc";
diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi
index 83f803be..459fd70 100644
--- a/arch/arm/boot/dts/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/omap4-l4.dtsi
@@ -136,7 +136,6 @@ cm2_clockdomains: clockdomains {
 
 		target-module@56000 {			/* 0x4a056000, ap 7 0a.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "dma_system";
 			reg = <0x56000 0x4>,
 			      <0x5602c 0x4>,
 			      <0x56028 0x4>;
@@ -160,7 +159,7 @@ SYSC_OMAP2_SOFTRESET |
 			ranges = <0x0 0x56000 0x1000>;
 
 			sdma: dma-controller@0 {
-				compatible = "ti,omap4430-sdma";
+				compatible = "ti,omap4430-sdma", "ti,omap-sdma";
 				reg = <0x0 0x1000>;
 				interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
@@ -174,7 +173,6 @@ sdma: dma-controller@0 {
 
 		target-module@58000 {			/* 0x4a058000, ap 10 0e.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "hsi";
 			reg = <0x58000 0x4>,
 			      <0x58010 0x4>,
 			      <0x58014 0x4>;
@@ -321,7 +319,6 @@ usbhsehci: ehci@c00 {
 
 		target-module@66000 {			/* 0x4a066000, ap 25 26.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "mmu_dsp";
 			reg = <0x66000 0x4>,
 			      <0x66010 0x4>,
 			      <0x66014 0x4>;
@@ -335,12 +332,18 @@ SYSC_OMAP2_SOFTRESET |
 			/* Domains (V, P, C): iva, tesla_pwrdm, tesla_clkdm */
 			clocks = <&tesla_clkctrl OMAP4_DSP_CLKCTRL 0>;
 			clock-names = "fck";
+			resets = <&prm_tesla 1>;
+			reset-names = "rstctrl";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x0 0x66000 0x1000>;
 
-			/* mmu_dsp cannot be moved before reset driver */
-			status = "disabled";
+			mmu_dsp: mmu@0 {
+				compatible = "ti,omap4-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+			};
 		};
 	};
 
@@ -420,7 +423,6 @@ usb_otg_hs: usb_otg_hs@0 {
 
 		target-module@2d000 {			/* 0x4a0ad000, ap 88 0c.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "ocp2scp_usb_phy";
 			reg = <0x2d000 0x4>,
 			      <0x2d010 0x4>,
 			      <0x2d014 0x4>;
@@ -499,7 +501,6 @@ target-module@4d000 {			/* 0x4a0cd000, ap 78 58.0 */
 
 		target-module@59000 {			/* 0x4a0d9000, ap 13 1a.0 */
 			compatible = "ti,sysc-omap4-sr", "ti,sysc";
-			ti,hwmods = "smartreflex_mpu";
 			reg = <0x59038 0x4>;
 			reg-names = "sysc";
 			ti,sysc-mask = <SYSC_OMAP3_SR_ENAWAKEUP>;
@@ -523,7 +524,6 @@ smartreflex_mpu: smartreflex@0 {
 
 		target-module@5b000 {			/* 0x4a0db000, ap 15 08.0 */
 			compatible = "ti,sysc-omap4-sr", "ti,sysc";
-			ti,hwmods = "smartreflex_iva";
 			reg = <0x5b038 0x4>;
 			reg-names = "sysc";
 			ti,sysc-mask = <SYSC_OMAP3_SR_ENAWAKEUP>;
@@ -547,7 +547,6 @@ smartreflex_iva: smartreflex@0 {
 
 		target-module@5d000 {			/* 0x4a0dd000, ap 17 22.0 */
 			compatible = "ti,sysc-omap4-sr", "ti,sysc";
-			ti,hwmods = "smartreflex_core";
 			reg = <0x5d038 0x4>;
 			reg-names = "sysc";
 			ti,sysc-mask = <SYSC_OMAP3_SR_ENAWAKEUP>;
@@ -613,7 +612,6 @@ mbox_dsp: mbox_dsp {
 
 		target-module@76000 {			/* 0x4a0f6000, ap 29 3a.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spinlock";
 			reg = <0x76000 0x4>,
 			      <0x76010 0x4>,
 			      <0x76014 0x4>;
@@ -721,7 +719,6 @@ target-module@8000 {			/* 0x4a108000, ap 63 62.0 */
 
 		target-module@a000 {			/* 0x4a10a000, ap 65 50.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "fdif";
 			reg = <0xa000 0x4>,
 			      <0xa010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1177,7 +1174,6 @@ timer1: timer@0 {
 
 		target-module@c000 {			/* 0x4a31c000, ap 11 20.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "kbd";
 			reg = <0xc000 0x4>,
 			      <0xc010 0x4>,
 			      <0xc014 0x4>;
@@ -1422,7 +1418,6 @@ uart3: serial@0 {
 
 		target-module@32000 {			/* 0x48032000, ap 5 02.0 */
 			compatible = "ti,sysc-omap2-timer", "ti,sysc";
-			ti,hwmods = "timer2";
 			reg = <0x32000 0x4>,
 			      <0x32010 0x4>,
 			      <0x32014 0x4>;
@@ -1454,7 +1449,6 @@ timer2: timer@0 {
 
 		target-module@34000 {			/* 0x48034000, ap 7 04.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer3";
 			reg = <0x34000 0x4>,
 			      <0x34010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1482,7 +1476,6 @@ timer3: timer@0 {
 
 		target-module@36000 {			/* 0x48036000, ap 9 0e.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer4";
 			reg = <0x36000 0x4>,
 			      <0x36010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1510,7 +1503,6 @@ timer4: timer@0 {
 
 		target-module@3e000 {			/* 0x4803e000, ap 11 08.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer9";
 			reg = <0x3e000 0x4>,
 			      <0x3e010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1892,7 +1884,6 @@ i2c2: i2c@0 {
 
 		target-module@76000 {			/* 0x48076000, ap 39 38.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "slimbus2";
 			reg = <0x76000 0x4>,
 			      <0x76010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1913,7 +1904,6 @@ target-module@76000 {			/* 0x48076000, ap 39 38.0 */
 
 		target-module@78000 {			/* 0x48078000, ap 41 1a.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "elm";
 			reg = <0x78000 0x4>,
 			      <0x78010 0x4>,
 			      <0x78014 0x4>;
@@ -1942,7 +1932,6 @@ elm: elm@0 {
 
 		target-module@86000 {			/* 0x48086000, ap 43 24.0 */
 			compatible = "ti,sysc-omap2-timer", "ti,sysc";
-			ti,hwmods = "timer10";
 			reg = <0x86000 0x4>,
 			      <0x86010 0x4>,
 			      <0x86014 0x4>;
@@ -1975,7 +1964,6 @@ timer10: timer@0 {
 
 		target-module@88000 {			/* 0x48088000, ap 45 2e.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer11";
 			reg = <0x88000 0x4>,
 			      <0x88010 0x4>;
 			reg-names = "rev", "sysc";
@@ -2002,12 +1990,26 @@ timer11: timer@0 {
 			};
 		};
 
-		target-module@90000 {			/* 0x48090000, ap 57 2a.0 */
-			compatible = "ti,sysc";
-			status = "disabled";
+		rng_target: target-module@90000 {	/* 0x48090000, ap 57 2a.0 */
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x91fe0 0x4>,
+			      <0x91fe4 0x4>;
+			reg-names = "rev", "sysc";
+			ti,sysc-mask = <(SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>;
+			/* Domains (P, C): l4per_pwrdm, l4_secure_clkdm */
+			clocks = <&l4_secure_clkctrl OMAP4_RNG_CLKCTRL 0>;
+			clock-names = "fck";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x0 0x90000 0x2000>;
+
+			rng: rng@0 {
+				compatible = "ti,omap4-rng";
+				reg = <0x0 0x2000>;
+				interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+			};
 		};
 
 		target-module@96000 {			/* 0x48096000, ap 37 26.0 */
@@ -2171,6 +2173,35 @@ target-module@a4000 {			/* 0x480a4000, ap 59 34.0 */
 				 <0x00001000 0x000a5000 0x00001000>;
 		};
 
+		des_target: target-module@a5000 {	/* 0x480a5000 */
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0xa5030 0x4>,
+			      <0xa5034 0x4>,
+			      <0xa5038 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): l4per_pwrdm, l4_secure_clkdm */
+			clocks = <&l4_secure_clkctrl OMAP4_DES3DES_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0xa5000 0x00001000>;
+
+			des: des@0 {
+				compatible = "ti,omap4-des";
+				reg = <0 0xa0>;
+				interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&sdma 117>, <&sdma 116>;
+				dma-names = "tx", "rx";
+			};
+		};
+
 		target-module@a8000 {			/* 0x480a8000, ap 61 3e.0 */
 			compatible = "ti,sysc";
 			status = "disabled";
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 2de8a6b..9a87440 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -173,14 +173,6 @@ gpmc: gpmc@50000000 {
 			#gpio-cells = <2>;
 		};
 
-		mmu_dsp: mmu@4a066000 {
-			compatible = "ti,omap4-iommu";
-			reg = <0x4a066000 0x100>;
-			interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu_dsp";
-			#iommu-cells = <0>;
-		};
-
 		target-module@52000000 {
 			compatible = "ti,sysc-omap4", "ti,sysc";
 			ti,hwmods = "iss";
@@ -206,17 +198,37 @@ target-module@52000000 {
 			/* No child device binding, driver in staging */
 		};
 
-		mmu_ipu: mmu@55082000 {
-			compatible = "ti,omap4-iommu";
-			reg = <0x55082000 0x100>;
-			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu_ipu";
-			#iommu-cells = <0>;
-			ti,iommu-bus-err-back;
+		target-module@55082000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x55082000 0x4>,
+			      <0x55082010 0x4>,
+			      <0x55082014 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			clocks = <&ducati_clkctrl OMAP4_IPU_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_core 2>;
+			reset-names = "rstctrl";
+			ranges = <0x0 0x55082000 0x100>;
+			#size-cells = <1>;
+			#address-cells = <1>;
+
+			mmu_ipu: mmu@0 {
+				compatible = "ti,omap4-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+				ti,iommu-bus-err-back;
+			};
 		};
+
 		target-module@4012c000 {
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "slimbus1";
 			reg = <0x4012c000 0x4>,
 			      <0x4012c010 0x4>;
 			reg-names = "rev", "sysc";
@@ -266,40 +278,90 @@ emif2: emif@4d000000 {
 			hw-caps-temp-alert;
 		};
 
-		aes1: aes@4b501000 {
-			compatible = "ti,omap4-aes";
-			ti,hwmods = "aes1";
-			reg = <0x4b501000 0xa0>;
-			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&sdma 111>, <&sdma 110>;
-			dma-names = "tx", "rx";
+		aes1_target: target-module@4b501000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x4b501080 0x4>,
+			      <0x4b501084 0x4>,
+			      <0x4b501088 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): l4per_pwrdm, l4_secure_clkdm */
+			clocks = <&l4_secure_clkctrl OMAP4_AES1_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x4b501000 0x1000>;
+
+			aes1: aes@0 {
+				compatible = "ti,omap4-aes";
+				reg = <0 0xa0>;
+				interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&sdma 111>, <&sdma 110>;
+				dma-names = "tx", "rx";
+			};
 		};
 
-		aes2: aes@4b701000 {
-			compatible = "ti,omap4-aes";
-			ti,hwmods = "aes2";
-			reg = <0x4b701000 0xa0>;
-			interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&sdma 114>, <&sdma 113>;
-			dma-names = "tx", "rx";
+		aes2_target: target-module@4b701000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x4b701080 0x4>,
+			      <0x4b701084 0x4>,
+			      <0x4b701088 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>,
+					<SYSC_IDLE_SMART_WKUP>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): l4per_pwrdm, l4_secure_clkdm */
+			clocks = <&l4_secure_clkctrl OMAP4_AES2_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x4b701000 0x1000>;
+
+			aes2: aes@0 {
+				compatible = "ti,omap4-aes";
+				reg = <0 0xa0>;
+				interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&sdma 114>, <&sdma 113>;
+				dma-names = "tx", "rx";
+			};
 		};
 
-		des: des@480a5000 {
-			compatible = "ti,omap4-des";
-			ti,hwmods = "des";
-			reg = <0x480a5000 0xa0>;
-			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&sdma 117>, <&sdma 116>;
-			dma-names = "tx", "rx";
-		};
+		sham_target: target-module@4b100000 {
+			compatible = "ti,sysc-omap3-sham", "ti,sysc";
+			reg = <0x4b100100 0x4>,
+			      <0x4b100110 0x4>,
+			      <0x4b100114 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,syss-mask = <1>;
+			/* Domains (P, C): l4per_pwrdm, l4_secure_clkdm */
+			clocks = <&l4_secure_clkctrl OMAP4_SHA2MD5_CLKCTRL 0>;
+			clock-names = "fck";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x4b100000 0x1000>;
 
-		sham: sham@4b100000 {
-			compatible = "ti,omap4-sham";
-			ti,hwmods = "sham";
-			reg = <0x4b100000 0x300>;
-			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&sdma 119>;
-			dma-names = "rx";
+			sham: sham@0 {
+				compatible = "ti,omap4-sham";
+				reg = <0 0x300>;
+				interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&sdma 119>;
+				dma-names = "rx";
+			};
 		};
 
 		abb_mpu: regulator-abb-mpu {
diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
index e9d9c84..5328685 100644
--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
@@ -1279,13 +1279,18 @@ l4_per_cm: l4_per_cm@1400 {
 		#size-cells = <1>;
 		ranges = <0 0x1400 0x200>;
 
-		l4_per_clkctrl: clk@20 {
-			compatible = "ti,clkctrl";
+		l4_per_clkctrl: clock@20 {
+			compatible = "ti,clkctrl-l4-per", "ti,clkctrl";
 			reg = <0x20 0x144>;
 			#clock-cells = <2>;
 		};
-	};
 
+		l4_secure_clkctrl: clock@1a0 {
+			compatible = "ti,clkctrl-l4-secure", "ti,clkctrl";
+			reg = <0x1a0 0x3c>;
+			#clock-cells = <2>;
+		};
+	};
 };
 
 &prm {
diff --git a/arch/arm/boot/dts/omap5-l4-abe.dtsi b/arch/arm/boot/dts/omap5-l4-abe.dtsi
index 23aa907..4ec7909d 100644
--- a/arch/arm/boot/dts/omap5-l4-abe.dtsi
+++ b/arch/arm/boot/dts/omap5-l4-abe.dtsi
@@ -203,7 +203,6 @@ target-module@2a000 {			/* 0x4012a000, ap 10 0a.0 */
 
 		target-module@2e000 {			/* 0x4012e000, ap 12 0c.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "dmic";
 			reg = <0x2e000 0x4>,
 			      <0x2e010 0x4>;
 			reg-names = "rev", "sysc";
@@ -244,7 +243,6 @@ target-module@30000 {			/* 0x40130000, ap 14 0e.0 */
 
 		mcpdm_module: target-module@32000 {	/* 0x40132000, ap 16 10.0 */
 			compatible = "ti,sysc-omap4", "ti,sysc";
-			ti,hwmods = "mcpdm";
 			reg = <0x32000 0x4>,
 			      <0x32010 0x4>;
 			reg-names = "rev", "sysc";
@@ -279,7 +277,6 @@ mcpdm: mcpdm@0 {
 
 		target-module@38000 {			/* 0x40138000, ap 18 12.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer5";
 			reg = <0x38000 0x4>,
 			      <0x38010 0x4>;
 			reg-names = "rev", "sysc";
@@ -311,7 +308,6 @@ timer5: timer@0 {
 
 		target-module@3a000 {			/* 0x4013a000, ap 20 14.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer6";
 			reg = <0x3a000 0x4>,
 			      <0x3a010 0x4>;
 			reg-names = "rev", "sysc";
@@ -343,7 +339,6 @@ timer6: timer@0 {
 
 		target-module@3c000 {			/* 0x4013c000, ap 22 16.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer7";
 			reg = <0x3c000 0x4>,
 			      <0x3c010 0x4>;
 			reg-names = "rev", "sysc";
@@ -374,7 +369,6 @@ timer7: timer@0 {
 
 		target-module@3e000 {			/* 0x4013e000, ap 24 18.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer8";
 			reg = <0x3e000 0x4>,
 			      <0x3e010 0x4>;
 			reg-names = "rev", "sysc";
diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi
index 25aacf1..f68740a 100644
--- a/arch/arm/boot/dts/omap5-l4.dtsi
+++ b/arch/arm/boot/dts/omap5-l4.dtsi
@@ -213,7 +213,6 @@ dwc3: dwc3@10000 {
 
 		target-module@56000 {			/* 0x4a056000, ap 7 02.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "dma_system";
 			reg = <0x56000 0x4>,
 			      <0x5602c 0x4>,
 			      <0x56028 0x4>;
@@ -237,7 +236,7 @@ SYSC_OMAP2_SOFTRESET |
 			ranges = <0x0 0x56000 0x1000>;
 
 			sdma: dma-controller@0 {
-				compatible = "ti,omap4430-sdma";
+				compatible = "ti,omap4430-sdma", "ti,omap-sdma";
 				reg = <0x0 0x1000>;
 				interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
@@ -349,7 +348,6 @@ usbhsehci: ehci@c00 {
 
 		target-module@66000 {			/* 0x4a066000, ap 23 0a.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "mmu_dsp";
 			reg = <0x66000 0x4>,
 			      <0x66010 0x4>,
 			      <0x66014 0x4>;
@@ -364,12 +362,18 @@ SYSC_OMAP2_SOFTRESET |
 			/* Domains (V, P, C): mm, dsp_pwrdm, dsp_clkdm */
 			clocks = <&dsp_clkctrl OMAP5_MMU_DSP_CLKCTRL 0>;
 			clock-names = "fck";
+			resets = <&prm_dsp 1>;
+			reset-names = "rstctrl";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x0 0x66000 0x1000>;
 
-			/* mmu_dsp cannot be moved before reset driver */
-			status = "disabled";
+			mmu_dsp: mmu@0 {
+				compatible = "ti,omap4-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+			};
 		};
 
 		target-module@70000 {			/* 0x4a070000, ap 79 2e.0 */
@@ -430,7 +434,6 @@ segment@80000 {					/* 0x4a080000 */
 
 		target-module@0 {			/* 0x4a080000, ap 83 28.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "ocp2scp1";
 			reg = <0x0 0x4>,
 			      <0x10 0x4>,
 			      <0x14 0x4>;
@@ -488,7 +491,6 @@ usb3_phy: usb3phy@4400 {
 
 		target-module@10000 {			/* 0x4a090000, ap 89 36.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "ocp2scp3";
 			reg = <0x10000 0x4>,
 			      <0x10010 0x4>,
 			      <0x10014 0x4>;
@@ -627,7 +629,6 @@ mbox_dsp: mbox_dsp {
 
 		target-module@76000 {			/* 0x4a0f6000, ap 27 0c.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "spinlock";
 			reg = <0x76000 0x4>,
 			      <0x76010 0x4>,
 			      <0x76014 0x4>;
@@ -1061,7 +1062,6 @@ uart3: serial@0 {
 
 		target-module@32000 {			/* 0x48032000, ap 5 3e.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer2";
 			reg = <0x32000 0x4>,
 			      <0x32010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1089,7 +1089,6 @@ timer2: timer@0 {
 
 		target-module@34000 {			/* 0x48034000, ap 7 46.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer3";
 			reg = <0x34000 0x4>,
 			      <0x34010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1117,7 +1116,6 @@ timer3: timer@0 {
 
 		target-module@36000 {			/* 0x48036000, ap 9 4e.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer4";
 			reg = <0x36000 0x4>,
 			      <0x36010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1145,7 +1143,6 @@ timer4: timer@0 {
 
 		target-module@3e000 {			/* 0x4803e000, ap 11 56.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer9";
 			reg = <0x3e000 0x4>,
 			      <0x3e010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1713,7 +1710,6 @@ i2c5: i2c@0 {
 
 		target-module@86000 {			/* 0x48086000, ap 41 5e.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer10";
 			reg = <0x86000 0x4>,
 			      <0x86010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1742,7 +1738,6 @@ timer10: timer@0 {
 
 		target-module@88000 {			/* 0x48088000, ap 43 66.0 */
 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
-			ti,hwmods = "timer11";
 			reg = <0x88000 0x4>,
 			      <0x88010 0x4>;
 			reg-names = "rev", "sysc";
@@ -1769,12 +1764,26 @@ timer11: timer@0 {
 			};
 		};
 
-		target-module@90000 {			/* 0x48090000, ap 55 1a.0 */
-			compatible = "ti,sysc";
-			status = "disabled";
+		rng_target: target-module@90000 {	/* 0x48090000, ap 55 1a.0 */
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x91fe0 0x4>,
+			      <0x91fe4 0x4>;
+			reg-names = "rev", "sysc";
+			ti,sysc-mask = <(SYSC_OMAP2_AUTOIDLE)>;
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>;
+			/* Domains (P, C): l4per_pwrdm, l4sec_clkdm */
+			clocks = <&l4sec_clkctrl OMAP5_RNG_CLKCTRL 0>;
+			clock-names = "fck";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x0 0x90000 0x2000>;
+
+			rng: rng@0 {
+				compatible = "ti,omap4-rng";
+				reg = <0x0 0x2000>;
+				interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+			};
 		};
 
 		target-module@98000 {			/* 0x48098000, ap 47 08.0 */
@@ -2358,7 +2367,6 @@ timer1: timer@0 {
 
 		target-module@c000 {			/* 0x4ae1c000, ap 11 1c.0 */
 			compatible = "ti,sysc-omap2", "ti,sysc";
-			ti,hwmods = "kbd";
 			reg = <0xc000 0x4>,
 			      <0xc010 0x4>;
 			reg-names = "rev", "sysc";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 1f6ad1d..d0ecf54 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -186,21 +186,33 @@ gpmc: gpmc@50000000 {
 			#gpio-cells = <2>;
 		};
 
-		mmu_dsp: mmu@4a066000 {
-			compatible = "ti,omap4-iommu";
-			reg = <0x4a066000 0x100>;
-			interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu_dsp";
-			#iommu-cells = <0>;
-		};
+		target-module@55082000 {
+			compatible = "ti,sysc-omap2", "ti,sysc";
+			reg = <0x55082000 0x4>,
+			      <0x55082010 0x4>,
+			      <0x55082014 0x4>;
+			reg-names = "rev", "sysc", "syss";
+			ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+					<SYSC_IDLE_NO>,
+					<SYSC_IDLE_SMART>;
+			ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+					 SYSC_OMAP2_SOFTRESET |
+					 SYSC_OMAP2_AUTOIDLE)>;
+			clocks = <&ipu_clkctrl OMAP5_MMU_IPU_CLKCTRL 0>;
+			clock-names = "fck";
+			resets = <&prm_core 2>;
+			reset-names = "rstctrl";
+			ranges = <0x0 0x55082000 0x100>;
+			#size-cells = <1>;
+			#address-cells = <1>;
 
-		mmu_ipu: mmu@55082000 {
-			compatible = "ti,omap4-iommu";
-			reg = <0x55082000 0x100>;
-			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
-			ti,hwmods = "mmu_ipu";
-			#iommu-cells = <0>;
-			ti,iommu-bus-err-back;
+			mmu_ipu: mmu@0 {
+				compatible = "ti,omap4-iommu";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+				#iommu-cells = <0>;
+				ti,iommu-bus-err-back;
+			};
 		};
 
 		dmm@4e000000 {
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi
index 4791834..42f2c44 100644
--- a/arch/arm/boot/dts/omap54xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi
@@ -1125,11 +1125,17 @@ l4per_cm: l4per_cm@1000 {
 		#size-cells = <1>;
 		ranges = <0 0x1000 0x200>;
 
-		l4per_clkctrl: clk@20 {
-			compatible = "ti,clkctrl";
+		l4per_clkctrl: clock@20 {
+			compatible = "ti,clkctrl-l4per", "ti,clkctrl";
 			reg = <0x20 0x15c>;
 			#clock-cells = <2>;
 		};
+
+		l4sec_clkctrl: clock@1a0 {
+			compatible = "ti,clkctrl-l4sec", "ti,clkctrl";
+			reg = <0x1a0 0x3c>;
+			#clock-cells = <2>;
+		};
 	};
 
 	dss_cm: dss_cm@1400 {
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index 0a0fb14..fa1852e 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /dts-v1/;
 
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/qcom,gcc-apq8084.h>
 #include <dt-bindings/gpio/gpio.h>
 
@@ -184,7 +185,7 @@ cpu_crit3: trip1 {
 
 	cpu-pmu {
 		compatible = "qcom,krait-pmu";
-		interrupts = <1 7 0xf04>;
+		interrupts = <GIC_PPI 7 0xf04>;
 	};
 
 	clocks {
@@ -203,10 +204,10 @@ sleep_clk: sleep_clk {
 
 	timer {
 		compatible = "arm,armv7-timer";
-		interrupts = <1 2 0xf08>,
-			     <1 3 0xf08>,
-			     <1 4 0xf08>,
-			     <1 1 0xf08>;
+		interrupts = <GIC_PPI 2 0xf08>,
+			     <GIC_PPI 3 0xf08>,
+			     <GIC_PPI 4 0xf08>,
+			     <GIC_PPI 1 0xf08>;
 		clock-frequency = <19200000>;
 	};
 
@@ -253,12 +254,13 @@ tsens_backup: backup@440 {
 
 		tsens: thermal-sensor@fc4a8000 {
 			compatible = "qcom,msm8974-tsens";
-			reg = <0xfc4a8000 0x2000>;
+			reg = <0xfc4a9000 0x1000>, /* TM */
+			      <0xfc4a8000 0x1000>; /* SROT */
 			nvmem-cells = <&tsens_calib>, <&tsens_backup>;
 			nvmem-cell-names = "calib", "calib_backup";
+			#qcom,sensors = <11>;
 			#thermal-sensor-cells = <1>;
 		};
-
 		timer@f9020000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
@@ -269,50 +271,50 @@ timer@f9020000 {
 
 			frame@f9021000 {
 				frame-number = <0>;
-				interrupts = <0 8 0x4>,
-					     <0 7 0x4>;
+				interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
 				reg = <0xf9021000 0x1000>,
 				      <0xf9022000 0x1000>;
 			};
 
 			frame@f9023000 {
 				frame-number = <1>;
-				interrupts = <0 9 0x4>;
+				interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
 				reg = <0xf9023000 0x1000>;
 				status = "disabled";
 			};
 
 			frame@f9024000 {
 				frame-number = <2>;
-				interrupts = <0 10 0x4>;
+				interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
 				reg = <0xf9024000 0x1000>;
 				status = "disabled";
 			};
 
 			frame@f9025000 {
 				frame-number = <3>;
-				interrupts = <0 11 0x4>;
+				interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
 				reg = <0xf9025000 0x1000>;
 				status = "disabled";
 			};
 
 			frame@f9026000 {
 				frame-number = <4>;
-				interrupts = <0 12 0x4>;
+				interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
 				reg = <0xf9026000 0x1000>;
 				status = "disabled";
 			};
 
 			frame@f9027000 {
 				frame-number = <5>;
-				interrupts = <0 13 0x4>;
+				interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
 				reg = <0xf9027000 0x1000>;
 				status = "disabled";
 			};
 
 			frame@f9028000 {
 				frame-number = <6>;
-				interrupts = <0 14 0x4>;
+				interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
 				reg = <0xf9028000 0x1000>;
 				status = "disabled";
 			};
@@ -404,13 +406,13 @@ tlmm: pinctrl@fd510000 {
 			#gpio-cells = <2>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
-			interrupts = <0 208 0>;
+			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		blsp2_uart2: serial@f995e000 {
 			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 			reg = <0xf995e000 0x1000>;
-			interrupts = <0 114 0x0>;
+			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
 			clock-names = "core", "iface";
 			status = "disabled";
@@ -420,7 +422,7 @@ sdhci@f9824900 {
 			compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
 			reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
 			reg-names = "hc_mem", "core_mem";
-			interrupts = <0 123 0>, <0 138 0>;
+			interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "hc_irq", "pwr_irq";
 			clocks = <&gcc GCC_SDCC1_APPS_CLK>,
 				 <&gcc GCC_SDCC1_AHB_CLK>,
@@ -433,7 +435,7 @@ sdhci@f98a4900 {
 			compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
 			reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
 			reg-names = "hc_mem", "core_mem";
-			interrupts = <0 125 0>, <0 221 0>;
+			interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "hc_irq", "pwr_irq";
 			clocks = <&gcc GCC_SDCC2_APPS_CLK>,
 				 <&gcc GCC_SDCC2_AHB_CLK>,
@@ -449,7 +451,7 @@ spmi_bus: spmi@fc4cf000 {
 			      <0xfc4cb000 0x1000>,
 			      <0xfc4ca000 0x1000>;
 			interrupt-names = "periph_irq";
-			interrupts = <0 190 0>;
+			interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
 			qcom,ee = <0>;
 			qcom,channel = <0>;
 			#address-cells = <2>;
@@ -463,7 +465,7 @@ smd {
 		compatible = "qcom,smd";
 
 		rpm {
-			interrupts = <0 168 1>;
+			interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
 			qcom,ipc = <&apcs 8 0>;
 			qcom,smd-edge = <15>;
 
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 8ef26da3..71bb25a 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -102,6 +102,7 @@ cpu@3 {
 		L2: l2-cache {
 			compatible = "cache";
 			cache-level = <2>;
+			qcom,saw = <&saw_l2>;
 		};
 	};
 
@@ -353,6 +354,12 @@ saw3: regulator@b0b9000 {
 			regulator;
 		};
 
+		saw_l2: regulator@b012000 {
+			compatible = "qcom,saw2";
+			reg = <0xb012000 0x1000>;
+			regulator;
+		};
+
 		blsp1_uart1: serial@78af000 {
 			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 			reg = <0x78af000 0x200>;
diff --git a/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts b/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts
index 26160c3..942e3a2 100644
--- a/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts
+++ b/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts
@@ -143,7 +143,7 @@ usbhub: hub@8 {
 				compatible = "smsc,usb3503a";
 				reg = <0x8>;
 				connect-gpios = <&gpioext2 1 GPIO_ACTIVE_HIGH>;
-				intn-gpios = <&gpioext2 0 GPIO_ACTIVE_LOW>;
+				intn-gpios = <&gpioext2 0 GPIO_ACTIVE_HIGH>;
 				initial-mode = <1>;
 			};
 		};
diff --git a/arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts b/arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts
index 2616039..d2d4877 100644
--- a/arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts
@@ -259,6 +259,25 @@ serial@f991e000 {
 		status = "ok";
 	};
 
+	remoteproc@fb21b000 {
+		status = "ok";
+
+		vddmx-supply = <&pm8841_s1>;
+		vddcx-supply = <&pm8841_s2>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&wcnss_pin_a>;
+
+		smd-edge {
+			qcom,remote-pid = <4>;
+			label = "pronto";
+
+			wcnss {
+				status = "ok";
+			};
+		};
+	};
+
 	pinctrl@fd510000 {
 		sdhc1_pin_a: sdhc1-pin-active {
 			clk {
@@ -287,6 +306,32 @@ cmd-data {
 				bias-pull-up;
 			};
 		};
+
+		wcnss_pin_a: wcnss-pin-active {
+			wlan {
+				pins =  "gpio36", "gpio37", "gpio38", "gpio39", "gpio40";
+				function = "wlan";
+
+				drive-strength = <6>;
+				bias-pull-down;
+			};
+
+			bt {
+				pins = "gpio35", "gpio43", "gpio44";
+				function = "bt";
+
+				drive-strength = <2>;
+				bias-pull-down;
+			};
+
+			fm {
+				pins = "gpio41", "gpio42";
+				function = "fm";
+
+				drive-strength = <2>;
+				bias-pull-down;
+			};
+		};
 	};
 
 	sdhci@f9824900 {
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 9a84eb0..4b161b8 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /dts-v1/;
 
+#include <dt-bindings/interconnect/qcom,msm8974.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/qcom,gcc-msm8974.h>
 #include <dt-bindings/clock/qcom,mmcc-msm8974.h>
@@ -20,17 +21,17 @@ reserved-memory {
 		#size-cells = <1>;
 		ranges;
 
-		mpss@8000000 {
+		mpss_region: mpss@8000000 {
 			reg = <0x08000000 0x5100000>;
 			no-map;
 		};
 
-		mba@d100000 {
+		mba_region: mba@d100000 {
 			reg = <0x0d100000 0x100000>;
 			no-map;
 		};
 
-		reserved@d200000 {
+		wcnss_region: wcnss@d200000 {
 			reg = <0x0d200000 0xa00000>;
 			no-map;
 		};
@@ -61,8 +62,11 @@ rfsa@fd60000 {
 		};
 
 		rmtfs@fd80000 {
+			compatible = "qcom,rmtfs-mem";
 			reg = <0x0fd80000 0x180000>;
 			no-map;
+
+			qcom,client-id = <1>;
 		};
 	};
 
@@ -356,6 +360,15 @@ adsp-pil {
 
 		qcom,smem-states = <&adsp_smp2p_out 0>;
 		qcom,smem-state-names = "stop";
+
+		smd-edge {
+			interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+			qcom,ipc = <&apcs 8 8>;
+			qcom,smd-edge = <1>;
+
+			label = "lpass";
+		};
 	};
 
 	smem {
@@ -795,6 +808,119 @@ rng@f9bff000 {
 			clock-names = "core";
 		};
 
+		remoteproc@fc880000 {
+			compatible = "qcom,msm8974-mss-pil";
+			reg = <0xfc880000 0x100>, <0xfc820000 0x020>;
+			reg-names = "qdsp6", "rmb";
+
+			interrupts-extended = <&intc GIC_SPI 24 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+			clocks = <&gcc GCC_MSS_Q6_BIMC_AXI_CLK>,
+				 <&gcc GCC_MSS_CFG_AHB_CLK>,
+				 <&gcc GCC_BOOT_ROM_AHB_CLK>,
+				 <&xo_board>;
+			clock-names = "iface", "bus", "mem", "xo";
+
+			resets = <&gcc GCC_MSS_RESTART>;
+			reset-names = "mss_restart";
+
+			cx-supply = <&pm8841_s2>;
+			mss-supply = <&pm8841_s3>;
+			mx-supply = <&pm8841_s1>;
+			pll-supply = <&pm8941_l12>;
+
+			qcom,halt-regs = <&tcsr_mutex_block 0x1180 0x1200 0x1280>;
+
+			qcom,smem-states = <&modem_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			mba {
+				memory-region = <&mba_region>;
+			};
+
+			mpss {
+				memory-region = <&mpss_region>;
+			};
+
+			smd-edge {
+				interrupts = <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>;
+
+				qcom,ipc = <&apcs 8 12>;
+				qcom,smd-edge = <0>;
+
+				label = "modem";
+			};
+		};
+
+		pronto: remoteproc@fb21b000 {
+			compatible = "qcom,pronto-v2-pil", "qcom,pronto";
+			reg = <0xfb204000 0x2000>, <0xfb202000 0x1000>, <0xfb21b000 0x3000>;
+			reg-names = "ccu", "dxe", "pmu";
+
+			memory-region = <&wcnss_region>;
+
+			interrupts-extended = <&intc GIC_SPI 149 IRQ_TYPE_EDGE_RISING>,
+					      <&wcnss_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&wcnss_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&wcnss_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&wcnss_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+			vddpx-supply = <&pm8941_s3>;
+
+			qcom,smem-states = <&wcnss_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			status = "disabled";
+
+			iris {
+				compatible = "qcom,wcn3680";
+
+				clocks = <&rpmcc RPM_SMD_CXO_A2>;
+				clock-names = "xo";
+
+				vddxo-supply = <&pm8941_l6>;
+				vddrfa-supply = <&pm8941_l11>;
+				vddpa-supply = <&pm8941_l19>;
+				vdddig-supply = <&pm8941_s3>;
+			};
+
+			smd-edge {
+				interrupts = <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>;
+
+				qcom,ipc = <&apcs 8 17>;
+				qcom,smd-edge = <6>;
+
+				wcnss {
+					compatible = "qcom,wcnss";
+					qcom,smd-channels = "WCNSS_CTRL";
+					status = "disabled";
+
+					qcom,mmio = <&pronto>;
+
+					bt {
+						compatible = "qcom,wcnss-bt";
+					};
+
+					wifi {
+						compatible = "qcom,wcnss-wlan";
+
+						interrupts = <GIC_SPI 145 IRQ_TYPE_EDGE_RISING>,
+							     <GIC_SPI 146 IRQ_TYPE_EDGE_RISING>;
+						interrupt-names = "tx", "rx";
+
+						qcom,smem-states = <&apps_smsm 10>, <&apps_smsm 9>;
+						qcom,smem-state-names = "tx-enable", "tx-rings-empty";
+					};
+				};
+			};
+		};
+
 		msmgpio: pinctrl@fd510000 {
 			compatible = "qcom,msm8974-pinctrl";
 			reg = <0xfd510000 0x4000>;
@@ -1179,6 +1305,79 @@ etm3_out: endpoint {
 			};
 		};
 
+		ocmem@fdd00000 {
+			compatible = "qcom,msm8974-ocmem";
+			reg = <0xfdd00000 0x2000>,
+			      <0xfec00000 0x180000>;
+			reg-names = "ctrl",
+			            "mem";
+			clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
+			         <&mmcc OCMEMCX_OCMEMNOC_CLK>;
+			clock-names = "core",
+			              "iface";
+
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			gmu_sram: gmu-sram@0 {
+				reg = <0x0 0x100000>;
+			};
+		};
+
+		bimc: interconnect@fc380000 {
+			reg = <0xfc380000 0x6a000>;
+			compatible = "qcom,msm8974-bimc";
+			#interconnect-cells = <1>;
+			clock-names = "bus", "bus_a";
+			clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
+			         <&rpmcc RPM_SMD_BIMC_A_CLK>;
+		};
+
+		snoc: interconnect@fc460000 {
+			reg = <0xfc460000 0x4000>;
+			compatible = "qcom,msm8974-snoc";
+			#interconnect-cells = <1>;
+			clock-names = "bus", "bus_a";
+			clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
+			         <&rpmcc RPM_SMD_SNOC_A_CLK>;
+		};
+
+		pnoc: interconnect@fc468000 {
+			reg = <0xfc468000 0x4000>;
+			compatible = "qcom,msm8974-pnoc";
+			#interconnect-cells = <1>;
+			clock-names = "bus", "bus_a";
+			clocks = <&rpmcc RPM_SMD_PNOC_CLK>,
+			         <&rpmcc RPM_SMD_PNOC_A_CLK>;
+		};
+
+		ocmemnoc: interconnect@fc470000 {
+			reg = <0xfc470000 0x4000>;
+			compatible = "qcom,msm8974-ocmemnoc";
+			#interconnect-cells = <1>;
+			clock-names = "bus", "bus_a";
+			clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
+			         <&rpmcc RPM_SMD_OCMEMGX_A_CLK>;
+		};
+
+		mmssnoc: interconnect@fc478000 {
+			reg = <0xfc478000 0x4000>;
+			compatible = "qcom,msm8974-mmssnoc";
+			#interconnect-cells = <1>;
+			clock-names = "bus", "bus_a";
+			clocks = <&mmcc MMSS_S0_AXI_CLK>,
+			         <&mmcc MMSS_S0_AXI_CLK>;
+		};
+
+		cnoc: interconnect@fc480000 {
+			reg = <0xfc480000 0x4000>;
+			compatible = "qcom,msm8974-cnoc";
+			#interconnect-cells = <1>;
+			clock-names = "bus", "bus_a";
+			clocks = <&rpmcc RPM_SMD_CNOC_CLK>,
+			         <&rpmcc RPM_SMD_CNOC_A_CLK>;
+		};
+
 		mdss: mdss@fd900000 {
 			status = "disabled";
 
@@ -1225,6 +1424,9 @@ mdp: mdp@fd900000 {
 				              "core",
 				              "vsync";
 
+				interconnects = <&mmssnoc MNOC_MAS_MDP_PORT0 &bimc BIMC_SLV_EBI_CH0>;
+				interconnect-names = "mdp0-mem";
+
 				ports {
 					#address-cells = <1>;
 					#size-cells = <0>;
@@ -1325,20 +1527,6 @@ reboot-mode {
 	smd {
 		compatible = "qcom,smd";
 
-		adsp {
-			interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
-
-			qcom,ipc = <&apcs 8 8>;
-			qcom,smd-edge = <1>;
-		};
-
-		modem {
-			interrupts = <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>;
-
-			qcom,ipc = <&apcs 8 12>;
-			qcom,smd-edge = <0>;
-		};
-
 		rpm {
 			interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
 			qcom,ipc = <&apcs 8 0>;
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index d03dcd9..75b2796 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -313,9 +313,9 @@ usbhs1: usb@e8207000 {
 		mmcif: mmc@e804c800 {
 			compatible = "renesas,mmcif-r7s72100", "renesas,sh-mmcif";
 			reg = <0xe804c800 0x80>;
-			interrupts = <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&mstp8_clks R7S72100_CLK_MMCIF>;
 			power-domains = <&cpg_clocks>;
 			reg-io-width = <4>;
@@ -326,9 +326,9 @@ GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH
 		sdhi0: sd@e804e000 {
 			compatible = "renesas,sdhi-r7s72100";
 			reg = <0xe804e000 0x100>;
-			interrupts = <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
 
 			clocks = <&mstp12_clks R7S72100_CLK_SDHI00>,
 				 <&mstp12_clks R7S72100_CLK_SDHI01>;
@@ -342,9 +342,9 @@ GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH
 		sdhi1: sd@e804e800 {
 			compatible = "renesas,sdhi-r7s72100";
 			reg = <0xe804e800 0x100>;
-			interrupts = <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>;
 
 			clocks = <&mstp12_clks R7S72100_CLK_SDHI10>,
 				 <&mstp12_clks R7S72100_CLK_SDHI11>;
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index dd865f3..a5cd312 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -84,27 +84,27 @@ dmac: dma-multiplexer {
 		dma0: dma-controller@e6700020 {
 			compatible = "renesas,shdma-r8a73a4";
 			reg = <0 0xe6700020 0 0x89e0>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-					GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
index 758360a..d960c27 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
@@ -60,8 +60,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		enable-gpio = <&pfc 74 GPIO_ACTIVE_HIGH>;
 		gpios = <&pfc 17 GPIO_ACTIVE_HIGH>;
-		states = <3300000 0
-			  1800000 1>;
+		states = <3300000 0>, <1800000 1>;
 
 		enable-active-high;
 	};
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 12ffe73..ebc1ff6 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -102,14 +102,14 @@ irqpin0: interrupt-controller@e6900000 {
 			<0xe6900020 1>,
 			<0xe6900040 1>,
 			<0xe6900060 1>;
-		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
 		power-domains = <&pd_a4s>;
 	};
@@ -124,14 +124,14 @@ irqpin1: interrupt-controller@e6900004 {
 			<0xe6900024 1>,
 			<0xe6900044 1>,
 			<0xe6900064 1>;
-		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
 		power-domains = <&pd_a4s>;
 	};
@@ -146,14 +146,14 @@ irqpin2: interrupt-controller@e6900008 {
 			<0xe6900028 1>,
 			<0xe6900048 1>,
 			<0xe6900068 1>;
-		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
 		power-domains = <&pd_a4s>;
 	};
@@ -168,14 +168,14 @@ irqpin3: interrupt-controller@e690000c {
 			<0xe690002c 1>,
 			<0xe690004c 1>,
 			<0xe690006c 1>;
-		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
 		power-domains = <&pd_a4s>;
 	};
@@ -198,10 +198,10 @@ i2c0: i2c@fff20000 {
 		#size-cells = <0>;
 		compatible = "renesas,iic-r8a7740", "renesas,rmobile-iic";
 		reg = <0xfff20000 0x425>;
-		interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7740_CLK_IIC0>;
 		power-domains = <&pd_a4r>;
 		status = "disabled";
@@ -212,10 +212,10 @@ i2c1: i2c@e6c20000 {
 		#size-cells = <0>;
 		compatible = "renesas,iic-r8a7740", "renesas,rmobile-iic";
 		reg = <0xe6c20000 0x425>;
-		interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7740_CLK_IIC1>;
 		power-domains = <&pd_a3sp>;
 		status = "disabled";
@@ -342,8 +342,8 @@ tpu: pwm@e6600000 {
 	mmcif0: mmc@e6bd0000 {
 		compatible = "renesas,mmcif-r8a7740", "renesas,sh-mmcif";
 		reg = <0xe6bd0000 0x100>;
-		interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7740_CLK_MMC>;
 		power-domains = <&pd_a3sp>;
 		status = "disabled";
@@ -352,9 +352,9 @@ mmcif0: mmc@e6bd0000 {
 	sdhi0: sd@e6850000 {
 		compatible = "renesas,sdhi-r8a7740";
 		reg = <0xe6850000 0x100>;
-		interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7740_CLK_SDHI0>;
 		power-domains = <&pd_a3sp>;
 		cap-sd-highspeed;
@@ -365,9 +365,9 @@ GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH
 	sdhi1: sd@e6860000 {
 		compatible = "renesas,sdhi-r8a7740";
 		reg = <0xe6860000 0x100>;
-		interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7740_CLK_SDHI1>;
 		power-domains = <&pd_a3sp>;
 		cap-sd-highspeed;
@@ -378,9 +378,9 @@ GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH
 	sdhi2: sd@e6870000 {
 		compatible = "renesas,sdhi-r8a7740";
 		reg = <0xe6870000 0x100>;
-		interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R8A7740_CLK_SDHI2>;
 		power-domains = <&pd_a3sp>;
 		cap-sd-highspeed;
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
index de981d6..1cd19a5 100644
--- a/arch/arm/boot/dts/r8a7743.dtsi
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -399,6 +399,9 @@ ipmmu_gp: mmu@e62a0000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -417,6 +420,9 @@ smp-sram@0 {
 		icram2:	sram@e6300000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe6300000 0 0x40000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe6300000 0x40000>;
 		};
 
 		/* The memory map in the User's Manual maps the cores to
@@ -600,8 +606,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a7743-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
@@ -614,8 +620,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a7743-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
@@ -628,22 +634,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a7743",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -661,22 +667,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a7743",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1366,20 +1372,20 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a7743",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1397,20 +1403,20 @@ audma1: dma-controller@ec720000 {
 			compatible = "renesas,dmac-r8a7743",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1461,10 +1467,10 @@ pci0: pci@ee090000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x800 0 0 0 0>;
@@ -1496,10 +1502,10 @@ pci1: pci@ee0d0000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x10800 0 0 0 0>;
@@ -1611,13 +1617,13 @@ pciec: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				  0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				  0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				  0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000
-				      0x43000000 2 0x00000000 2 0x00000000 1 0x00000000>;
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>,
+				     <0x43000000 2 0x00000000 2 0x00000000 1 0x00000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi
index fa74a26..1c82dd0 100644
--- a/arch/arm/boot/dts/r8a7744.dtsi
+++ b/arch/arm/boot/dts/r8a7744.dtsi
@@ -399,6 +399,9 @@ ipmmu_gp: mmu@e62a0000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -417,6 +420,9 @@ smp-sram@0 {
 		icram2:	sram@e6300000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe6300000 0 0x40000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe6300000 0x40000>;
 		};
 
 		/* The memory map in the User's Manual maps the cores to
@@ -600,8 +606,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a7744-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
@@ -614,8 +620,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a7744-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
@@ -628,22 +634,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a7744",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -661,22 +667,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a7744",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1366,20 +1372,20 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a7744",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1397,20 +1403,20 @@ audma1: dma-controller@ec720000 {
 			compatible = "renesas,dmac-r8a7744",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1461,10 +1467,10 @@ pci0: pci@ee090000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x800 0 0 0 0>;
@@ -1496,10 +1502,10 @@ pci1: pci@ee0d0000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x10800 0 0 0 0>;
@@ -1597,13 +1603,13 @@ pciec: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				  0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				  0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				  0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000
-				      0x43000000 2 0x00000000 2 0x00000000 1 0x00000000>;
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>,
+				     <0x43000000 2 0x00000000 2 0x00000000 1 0x00000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
index ce6603b..58d369a 100644
--- a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
+++ b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
@@ -76,8 +76,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 };
 
diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi
index c53f7ff..3f88a7e 100644
--- a/arch/arm/boot/dts/r8a7745.dtsi
+++ b/arch/arm/boot/dts/r8a7745.dtsi
@@ -363,6 +363,9 @@ ipmmu_gp: mmu@e62a0000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -381,6 +384,9 @@ smp-sram@0 {
 		icram2:	sram@e6300000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe6300000 0 0x40000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe6300000 0x40000>;
 		};
 		i2c0: i2c@e6508000 {
 			#address-cells = <1>;
@@ -543,8 +549,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a7745-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
@@ -557,8 +563,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a7745-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
@@ -571,22 +577,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a7745",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -604,22 +610,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a7745",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1293,20 +1299,20 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a7745",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1337,10 +1343,10 @@ pci0: pci@ee090000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x800 0 0 0 0>;
@@ -1372,10 +1378,10 @@ pci1: pci@ee0d0000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x10800 0 0 0 0>;
diff --git a/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts b/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
index 450efe9..8ac61b5 100644
--- a/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
+++ b/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
@@ -65,8 +65,7 @@ vccq_sdhi2: regulator-vccq-sdhi2 {
 
 		gpios = <&gpio2 24 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 };
 
diff --git a/arch/arm/boot/dts/r8a77470.dtsi b/arch/arm/boot/dts/r8a77470.dtsi
index 51806c7..6efcef1 100644
--- a/arch/arm/boot/dts/r8a77470.dtsi
+++ b/arch/arm/boot/dts/r8a77470.dtsi
@@ -242,6 +242,9 @@ irqc: interrupt-controller@e61c0000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -260,6 +263,9 @@ smp-sram@0 {
 		icram2:	sram@e6300000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe6300000 0 0x20000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe6300000 0x20000>;
 		};
 
 		i2c0: i2c@e6508000 {
@@ -407,8 +413,8 @@ usb_dmac00: dma-controller@e65a0000 {
 			compatible = "renesas,r8a77470-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
@@ -421,8 +427,8 @@ usb_dmac10: dma-controller@e65b0000 {
 			compatible = "renesas,r8a77470-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
@@ -435,8 +441,8 @@ usb_dmac01: dma-controller@e65a8000 {
 			compatible = "renesas,r8a77470-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a8000 0 0x100>;
-			interrupts = <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 326>;
 			power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
@@ -449,8 +455,8 @@ usb_dmac11: dma-controller@e65b8000 {
 			compatible = "renesas,r8a77470-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b8000 0 0x100>;
-			interrupts = <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 327>;
 			power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
@@ -463,22 +469,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a77470",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -496,22 +502,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a77470",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 10d996d..593c6df 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -79,10 +79,10 @@ irqpin: interrupt-controller@fe78001c {
 			<0xfe780024 4>,
 			<0xfe780044 4>,
 			<0xfe780064 4>;
-		interrupts =   <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
-				GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
-				GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
-				GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
 		sense-bitfield-width = <2>;
 	};
 
@@ -498,14 +498,17 @@ cpg_clocks: cpg_clocks@ffc80000 {
 		audio_clk_a: audio_clk_a {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
+			clock-frequency = <0>;
 		};
 		audio_clk_b: audio_clk_b {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
+			clock-frequency = <0>;
 		};
 		audio_clk_c: audio_clk_c {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
+			clock-frequency = <0>;
 		};
 
 		/* Fixed ratio clocks */
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
index c755f0b..d2240b8 100644
--- a/arch/arm/boot/dts/r8a7779-marzen.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -48,8 +48,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	ethernet@18000000 {
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index ebf5b7c..beb9885 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -68,6 +68,14 @@ gic: interrupt-controller@f0001000 {
 		      <0xf0000100 0x100>;
 	};
 
+	timer@f0000200 {
+		compatible = "arm,cortex-a9-global-timer";
+		reg = <0xf0000200 0x100>;
+		interrupts = <GIC_PPI 11
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+		clocks = <&cpg_clocks R8A7779_CLK_ZS>;
+	};
+
 	timer@f0000600 {
 		compatible = "arm,cortex-a9-twd-timer";
 		reg = <0xf0000600 0x20>;
@@ -164,10 +172,10 @@ irqpin0: interrupt-controller@fe78001c {
 			<0xfe780044 4>,
 			<0xfe780064 4>,
 			<0xfe780000 4>;
-		interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
 		sense-bitfield-width = <2>;
 	};
 
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 6ec2cf7..097fd93 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -150,8 +150,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi2: regulator-vcc-sdhi2 {
@@ -174,8 +173,7 @@ vccq_sdhi2: regulator-vccq-sdhi2 {
 
 		gpios = <&gpio5 30 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	audio_clock: audio_clock {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 5a27477..334ba19 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -487,6 +487,9 @@ ipmmu_rt: mmu@ffc80000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -669,8 +672,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a7790-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
@@ -683,8 +686,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a7790-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
@@ -697,22 +700,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a7790",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -730,22 +733,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a7790",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1300,20 +1303,20 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a7790",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1331,20 +1334,20 @@ audma1: dma-controller@ec720000 {
 			compatible = "renesas,dmac-r8a7790",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1388,10 +1391,10 @@ pci0: pci@ee090000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x800 0 0 0 0>;
@@ -1423,10 +1426,10 @@ pci1: pci@ee0b0000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee0a0000 0 0xee0a0000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pci2: pci@ee0d0000 {
@@ -1446,10 +1449,10 @@ pci2: pci@ee0d0000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x20800 0 0 0 0>;
@@ -1614,13 +1617,13 @@ pciec: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				  0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				  0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				  0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000
-				      0x43000000 1 0x80000000 1 0x80000000 0 0x80000000>;
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>,
+				     <0x43000000 1 0x80000000 1 0x80000000 0 0x80000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index af6bd8f..2b096d5 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -193,8 +193,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi1: regulator-vcc-sdhi1 {
@@ -217,8 +216,7 @@ vccq_sdhi1: regulator-vccq-sdhi1 {
 
 		gpios = <&gpio2 13 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi2: regulator-vcc-sdhi2 {
@@ -241,8 +239,7 @@ vccq_sdhi2: regulator-vccq-sdhi2 {
 
 		gpios = <&gpio2 26 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	audio_clock: audio_clock {
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index d6cf16a..f9ece7a 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -63,8 +63,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi2: regulator-vcc-sdhi2 {
@@ -85,8 +84,7 @@ vccq_sdhi2: regulator-vccq-sdhi2 {
 
 		gpios = <&gpio2 26 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	hdmi-out {
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 6f87550..59a55e8 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -420,6 +420,9 @@ ipmmu_gp: mmu@e62a0000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -618,8 +621,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a7791-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
@@ -632,8 +635,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a7791-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
@@ -646,22 +649,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a7791",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -679,22 +682,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a7791",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1338,20 +1341,20 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a7791",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1369,20 +1372,20 @@ audma1: dma-controller@ec720000 {
 			compatible = "renesas,dmac-r8a7791",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1426,10 +1429,10 @@ pci0: pci@ee090000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x800 0 0 0 0>;
@@ -1461,10 +1464,10 @@ pci1: pci@ee0d0000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x10800 0 0 0 0>;
@@ -1598,13 +1601,13 @@ pciec: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				  0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				  0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				  0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000
-				      0x43000000 2 0x00000000 2 0x00000000 1 0x00000000>;
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>,
+				     <0x43000000 2 0x00000000 2 0x00000000 1 0x00000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index c4ea2d6..39af16ca 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -345,6 +345,9 @@ irqc: interrupt-controller@e61c0000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -466,22 +469,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a7792",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -499,22 +502,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a7792",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 48fbeb6..22ca7cd 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -179,8 +179,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi1: regulator-vcc-sdhi1 {
@@ -203,8 +202,7 @@ vccq_sdhi1: regulator-vccq-sdhi1 {
 
 		gpios = <&gpio2 13 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi2: regulator-vcc-sdhi2 {
@@ -227,8 +225,7 @@ vccq_sdhi2: regulator-vccq-sdhi2 {
 
 		gpios = <&gpio2 26 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	audio_clock: audio_clock {
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index bf05110..eef035c 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -406,6 +406,9 @@ ipmmu_gp: mmu@e62a0000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -565,22 +568,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a7793",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -598,22 +601,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a7793",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1166,20 +1169,20 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a7793",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1197,20 +1200,20 @@ audma1: dma-controller@ec720000 {
 			compatible = "renesas,dmac-r8a7793",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index 1d22fcd..f79fce7 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -60,8 +60,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi1: regulator-vcc-sdhi1 {
@@ -84,8 +83,7 @@ vccq_sdhi1: regulator-vccq-sdhi1 {
 
 		gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	lbsc {
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index b3177ae..2c16ad8 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -126,8 +126,7 @@ vccq_sdhi1: regulator-vccq-sdhi1 {
 
 		gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vga-encoder {
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 8d797d3..05ef79c 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -351,6 +351,9 @@ ipmmu_gp: mmu@e62a0000 {
 		icram0:	sram@e63a0000 {
 			compatible = "mmio-sram";
 			reg = <0 0xe63a0000 0 0x12000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0 0xe63a0000 0x12000>;
 		};
 
 		icram1:	sram@e63c0000 {
@@ -527,22 +530,22 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a7794",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x20000>;
-			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -560,22 +563,22 @@ dmac1: dma-controller@e6720000 {
 			compatible = "renesas,dmac-r8a7794",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6720000 0 0x20000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1132,20 +1135,20 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a7794",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3", "ch4",
 					  "ch5", "ch6", "ch7", "ch8", "ch9",
@@ -1176,10 +1179,10 @@ pci0: pci@ee090000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x800 0 0 0 0>;
@@ -1211,10 +1214,10 @@ pci1: pci@ee0d0000 {
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
 			ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
-			interrupt-map-mask = <0xff00 0 0 0x7>;
-			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
-					 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map-mask = <0xf800 0 0 0x7>;
+			interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+					<0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
 			usb@1,0 {
 				reg = <0x10800 0 0 0 0>;
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index c70182c..cf36e25 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -224,7 +224,7 @@ emac: ethernet@10200000 {
 		status = "disabled";
 	};
 
-	sdmmc: dwmmc@10214000 {
+	sdmmc: mmc@10214000 {
 		compatible = "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x10214000 0x4000>;
 		clock-frequency = <37500000>;
@@ -238,7 +238,7 @@ sdmmc: dwmmc@10214000 {
 		status = "disabled";
 	};
 
-	sdio: dwmmc@10218000 {
+	sdio: mmc@10218000 {
 		compatible = "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x10218000 0x4000>;
 		max-frequency = <37500000>;
@@ -252,7 +252,7 @@ sdio: dwmmc@10218000 {
 		status = "disabled";
 	};
 
-	emmc: dwmmc@1021c000 {
+	emmc: mmc@1021c000 {
 		compatible = "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x1021c000 0x4000>;
 		interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
index c8b62bb..ad1afd4 100644
--- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts
+++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
@@ -466,9 +466,12 @@ &mmc1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>;
 	vmmcq-supply = <&vccio_wl>;
+	#address-cells = <1>;
+	#size-cells = <0>;
 	status = "okay";
 
 	brcmf: wifi@1 {
+		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&gpio3>;
 		interrupts = <RK_PD2 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 340ed6c..4e90efd 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -662,7 +662,7 @@ hdmi_in_vop: endpoint@0 {
 		};
 	};
 
-	sdmmc: dwmmc@30000000 {
+	sdmmc: mmc@30000000 {
 		compatible = "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x30000000 0x4000>;
 		interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
@@ -675,7 +675,7 @@ sdmmc: dwmmc@30000000 {
 		status = "disabled";
 	};
 
-	sdio: dwmmc@30010000 {
+	sdio: mmc@30010000 {
 		compatible = "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x30010000 0x4000>;
 		interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
@@ -688,7 +688,7 @@ sdio: dwmmc@30010000 {
 		status = "disabled";
 	};
 
-	emmc: dwmmc@30020000 {
+	emmc: mmc@30020000 {
 		compatible = "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x30020000 0x4000>;
 		interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 2afd686..018802d 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -97,7 +97,7 @@ ext_gmac: external-gmac-clock {
 	};
 
 	panel: panel {
-		compatible = "lg,lp079qx1-sp0v", "simple-panel";
+		compatible = "lg,lp079qx1-sp0v";
 		backlight = <&backlight>;
 		enable-gpios = <&gpio7 RK_PA4 GPIO_ACTIVE_HIGH>;
 		pinctrl-0 = <&lcd_cs>;
diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi
index 0aeef23..312582c 100644
--- a/arch/arm/boot/dts/rk3288-tinker.dtsi
+++ b/arch/arm/boot/dts/rk3288-tinker.dtsi
@@ -113,6 +113,17 @@ &cpu0 {
 	cpu0-supply = <&vdd_cpu>;
 };
 
+&cpu_opp_table {
+	opp-1704000000 {
+		opp-hz = /bits/ 64 <1704000000>;
+		opp-microvolt = <1350000>;
+	};
+	opp-1800000000 {
+		opp-hz = /bits/ 64 <1800000000>;
+		opp-microvolt = <1400000>;
+	};
+};
+
 &gmac {
 	assigned-clocks = <&cru SCLK_MAC>;
 	assigned-clock-parents = <&ext_gmac>;
@@ -175,7 +186,7 @@ vdd_cpu: DCDC_REG1 {
 				regulator-always-on;
 				regulator-boot-on;
 				regulator-min-microvolt = <750000>;
-				regulator-max-microvolt = <1350000>;
+				regulator-max-microvolt = <1400000>;
 				regulator-name = "vdd_arm";
 				regulator-ramp-delay = <6000>;
 				regulator-state-mem {
diff --git a/arch/arm/boot/dts/rk3288-veyron-brain.dts b/arch/arm/boot/dts/rk3288-veyron-brain.dts
index 406146c..aa33d09 100644
--- a/arch/arm/boot/dts/rk3288-veyron-brain.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-brain.dts
@@ -7,6 +7,7 @@
 
 /dts-v1/;
 #include "rk3288-veyron.dtsi"
+#include "rk3288-veyron-broadcom-bluetooth.dtsi"
 
 / {
 	model = "Google Brain";
@@ -40,6 +41,14 @@ vcc5_host2: vcc5-host2-regulator {
 };
 
 &pinctrl {
+	pinctrl-names = "default";
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+	>;
+
 	hdmi {
 		vcc50_hdmi_en: vcc50-hdmi-en {
 			rockchip,pins = <7 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-broadcom-bluetooth.dtsi b/arch/arm/boot/dts/rk3288-veyron-broadcom-bluetooth.dtsi
new file mode 100644
index 0000000..a10d25a
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-broadcom-bluetooth.dtsi
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Veyron (and derivatives) fragment for the Broadcom 43450 bluetooth
+ * chip.
+ *
+ * Copyright 2019 Google, Inc
+ */
+
+&uart0 {
+	bluetooth {
+		pinctrl-names = "default";
+		pinctrl-0 = <&bt_host_wake_l>, <&bt_enable_l>,
+			    <&bt_dev_wake>;
+
+		compatible = "brcm,bcm43540-bt";
+		host-wakeup-gpios	= <&gpio4 RK_PD7 GPIO_ACTIVE_HIGH>;
+		shutdown-gpios		= <&gpio4 RK_PD5 GPIO_ACTIVE_HIGH>;
+		device-wakeup-gpios	= <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
+		max-speed		= <3000000>;
+		brcm,bt-pcm-int-params	= [01 02 00 01 01];
+	};
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
index ffb60f8..05112c2 100644
--- a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
@@ -136,27 +136,6 @@ trackpad@15 {
 };
 
 &pinctrl {
-	pinctrl-0 = <
-		/* Common for sleep and wake, but no owners */
-		&ddr0_retention
-		&ddrio_pwroff
-		&global_pwroff
-
-		/* Wake only */
-		&suspend_l_wake
-		&bt_dev_wake_awake
-	>;
-	pinctrl-1 = <
-		/* Common for sleep and wake, but no owners */
-		&ddr0_retention
-		&ddrio_pwroff
-		&global_pwroff
-
-		/* Sleep only */
-		&suspend_l_sleep
-		&bt_dev_wake_sleep
-	>;
-
 	buttons {
 		ap_lid_int_l: ap-lid-int-l {
 			rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-edp.dtsi b/arch/arm/boot/dts/rk3288-veyron-edp.dtsi
index 300a7e3..32c0f10 100644
--- a/arch/arm/boot/dts/rk3288-veyron-edp.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron-edp.dtsi
@@ -54,7 +54,7 @@ backlight: backlight {
 	};
 
 	panel: panel {
-		compatible = "innolux,n116bge", "simple-panel";
+		compatible = "innolux,n116bge";
 		status = "okay";
 		power-supply = <&panel_regulator>;
 		backlight = <&backlight>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-fievel.dts b/arch/arm/boot/dts/rk3288-veyron-fievel.dts
index 9a0f550..309b122 100644
--- a/arch/arm/boot/dts/rk3288-veyron-fievel.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-fievel.dts
@@ -18,8 +18,6 @@ / {
 		     "google,veyron-fievel-rev0", "google,veyron-fievel",
 		     "google,veyron", "rockchip,rk3288";
 
-	/delete-node/ bt-activity;
-
 	vccsys: vccsys {
 		compatible = "regulator-fixed";
 		regulator-name = "vccsys";
@@ -215,7 +213,11 @@ &gpio0 {
 			  "PHY_PMEB",
 
 			  "PHY_INT",
-			  "REC_MODE_L",
+			  /*
+			   * RECOVERY_SW_L is Chrome OS ABI.  Schematics call
+			   * it REC_MODE_L.
+			   */
+			  "RECOVERY_SW_L",
 			  "OTP_OUT",
 			  "",
 			  "USB_OTG_POWER_EN",
@@ -382,7 +384,11 @@ &gpio7 {
 			  "PWR_LED1",
 			  "TPM_INT_H",
 			  "SPK_ON",
-			  "FW_WP_AP",
+			  /*
+			   * AP_FLASH_WP_L is Chrome OS ABI.  Schematics call
+			   * it FW_WP_AP.
+			   */
+			  "AP_FLASH_WP_L",
 			  "",
 
 			  "CPU_NMI",
diff --git a/arch/arm/boot/dts/rk3288-veyron-jaq.dts b/arch/arm/boot/dts/rk3288-veyron-jaq.dts
index a4966e5..171ba61 100644
--- a/arch/arm/boot/dts/rk3288-veyron-jaq.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-jaq.dts
@@ -273,6 +273,28 @@ &gpio8 {
 };
 
 &pinctrl {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Wake only */
+		&suspend_l_wake
+		&bt_dev_wake_awake
+	>;
+	pinctrl-1 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Sleep only */
+		&suspend_l_sleep
+		&bt_dev_wake_sleep
+	>;
+
 	buck-5v {
 		drv_5v: drv-5v {
 			rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-jerry.dts b/arch/arm/boot/dts/rk3288-veyron-jerry.dts
index a6ee44f..66f00d2 100644
--- a/arch/arm/boot/dts/rk3288-veyron-jerry.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-jerry.dts
@@ -418,6 +418,28 @@ &gpio8 {
 };
 
 &pinctrl {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Wake only */
+		&suspend_l_wake
+		&bt_dev_wake_awake
+	>;
+	pinctrl-1 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Sleep only */
+		&suspend_l_sleep
+		&bt_dev_wake_sleep
+	>;
+
 	buck-5v {
 		drv_5v: drv-5v {
 			rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-mickey.dts b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
index 06a6a95..ffd1121 100644
--- a/arch/arm/boot/dts/rk3288-veyron-mickey.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
@@ -7,6 +7,7 @@
 
 /dts-v1/;
 #include "rk3288-veyron.dtsi"
+#include "rk3288-veyron-broadcom-bluetooth.dtsi"
 
 / {
 	model = "Google Mickey";
@@ -411,6 +412,14 @@ &gpio8 {
 };
 
 &pinctrl {
+	pinctrl-names = "default";
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+	>;
+
 	hdmi {
 		power_hdmi_on: power-hdmi-on {
 			rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
index c833716..383fad1 100644
--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -7,6 +7,7 @@
 
 /dts-v1/;
 #include "rk3288-veyron-chromebook.dtsi"
+#include "rk3288-veyron-broadcom-bluetooth.dtsi"
 
 / {
 	model = "Google Minnie";
@@ -70,7 +71,7 @@ touchscreen@10 {
 };
 
 &panel {
-	compatible = "auo,b101ean01", "simple-panel";
+	compatible = "auo,b101ean01";
 
 	/delete-node/ panel-timing;
 
@@ -344,6 +345,26 @@ &gpio8 {
 };
 
 &pinctrl {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Wake only */
+		&suspend_l_wake
+	>;
+	pinctrl-1 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Sleep only */
+		&suspend_l_sleep
+	>;
+
 	buck-5v {
 		drv_5v: drv-5v {
 			rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-pinky.dts b/arch/arm/boot/dts/rk3288-veyron-pinky.dts
index f420499..71e6629 100644
--- a/arch/arm/boot/dts/rk3288-veyron-pinky.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-pinky.dts
@@ -64,6 +64,28 @@ &panel {
 };
 
 &pinctrl {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Wake only */
+		&suspend_l_wake
+		&bt_dev_wake_awake
+	>;
+	pinctrl-1 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Sleep only */
+		&suspend_l_sleep
+		&bt_dev_wake_sleep
+	>;
+
 	/delete-node/ lcd;
 
 	backlight {
diff --git a/arch/arm/boot/dts/rk3288-veyron-speedy.dts b/arch/arm/boot/dts/rk3288-veyron-speedy.dts
index 2f2989b..e354c61 100644
--- a/arch/arm/boot/dts/rk3288-veyron-speedy.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-speedy.dts
@@ -7,6 +7,7 @@
 
 /dts-v1/;
 #include "rk3288-veyron-chromebook.dtsi"
+#include "rk3288-veyron-broadcom-bluetooth.dtsi"
 #include "cros-ec-sbs.dtsi"
 
 / {
@@ -279,6 +280,26 @@ &gpio8 {
 };
 
 &pinctrl {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Wake only */
+		&suspend_l_wake
+	>;
+	pinctrl-1 = <
+		/* Common for sleep and wake, but no owners */
+		&ddr0_retention
+		&ddrio_pwroff
+		&global_pwroff
+
+		/* Sleep only */
+		&suspend_l_sleep
+	>;
+
 	buck-5v {
 		drv_5v: drv-5v {
 			rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-tiger.dts b/arch/arm/boot/dts/rk3288-veyron-tiger.dts
index bebb230..069f0c2c 100644
--- a/arch/arm/boot/dts/rk3288-veyron-tiger.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-tiger.dts
@@ -53,7 +53,7 @@ touchscreen@10 {
 };
 
 &panel {
-	compatible = "auo,b101ean01", "simple-panel";
+	compatible = "auo,b101ean01";
 
 	/delete-node/ panel-timing;
 
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 7525e3d..54a6838 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -23,30 +23,6 @@ memory {
 		reg = <0x0 0x0 0x0 0x80000000>;
 	};
 
-	bt_activity: bt-activity {
-		compatible = "gpio-keys";
-		pinctrl-names = "default";
-		pinctrl-0 = <&bt_host_wake>;
-
-		/*
-		 * HACK: until we have an LPM driver, we'll use an
-		 * ugly GPIO key to allow Bluetooth to wake from S3.
-		 * This is expected to only be used by BT modules that
-		 * use UART for comms.  For BT modules that talk over
-		 * SDIO we should use a wakeup mechanism related to SDIO.
-		 *
-		 * Use KEY_RESERVED here since that will work as a wakeup but
-		 * doesn't get reported to higher levels (so doesn't confuse
-		 * Chrome).
-		 */
-		bt-wake {
-			label = "BT Wakeup";
-			gpios = <&gpio4 RK_PD7 GPIO_ACTIVE_HIGH>;
-			linux,code = <KEY_RESERVED>;
-			wakeup-source;
-		};
-
-	};
 
 	power_button: power-button {
 		compatible = "gpio-keys";
@@ -82,22 +58,17 @@ sdio_pwrseq: sdio-pwrseq {
 		clocks = <&rk808 RK808_CLKOUT1>;
 		clock-names = "ext_clock";
 		pinctrl-names = "default";
-		pinctrl-0 = <&bt_enable_l>, <&wifi_enable_h>;
+		pinctrl-0 = <&wifi_enable_h>;
 
 		/*
-		 * Depending on the actual card populated GPIO4 D4 and D5
+		 * Depending on the actual card populated GPIO4 D4
 		 * correspond to one of these signals on the module:
 		 *
 		 * D4:
 		 * - SDIO_RESET_L_WL_REG_ON
 		 * - PDN (power down when low)
-		 *
-		 * D5:
-		 * - BT_I2S_WS_BT_RFDISABLE_L
-		 * - No connect
 		 */
-		reset-gpios = <&gpio4 RK_PD4 GPIO_ACTIVE_LOW>,
-			      <&gpio4 RK_PD5 GPIO_ACTIVE_LOW>;
+		reset-gpios = <&gpio4 RK_PD4 GPIO_ACTIVE_LOW>;
 	};
 
 	vcc_5v: vcc-5v {
@@ -481,26 +452,6 @@ &wdt {
 };
 
 &pinctrl {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <
-		/* Common for sleep and wake, but no owners */
-		&ddr0_retention
-		&ddrio_pwroff
-		&global_pwroff
-
-		/* Wake only */
-		&bt_dev_wake_awake
-	>;
-	pinctrl-1 = <
-		/* Common for sleep and wake, but no owners */
-		&ddr0_retention
-		&ddrio_pwroff
-		&global_pwroff
-
-		/* Sleep only */
-		&bt_dev_wake_sleep
-	>;
-
 	pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
 		bias-disable;
 		drive-strength = <8>;
@@ -622,6 +573,10 @@ bt_dev_wake_sleep: bt-dev-wake-sleep {
 		bt_dev_wake_awake: bt-dev-wake-awake {
 			rockchip,pins = <4 RK_PD2 RK_FUNC_GPIO &pcfg_output_high>;
 		};
+
+		bt_dev_wake: bt-dev-wake {
+			rockchip,pins = <4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
 	};
 
 	tpm {
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 415c75f..9beb662 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -247,7 +247,7 @@ display-subsystem {
 		ports = <&vopl_out>, <&vopb_out>;
 	};
 
-	sdmmc: dwmmc@ff0c0000 {
+	sdmmc: mmc@ff0c0000 {
 		compatible = "rockchip,rk3288-dw-mshc";
 		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
@@ -261,7 +261,7 @@ sdmmc: dwmmc@ff0c0000 {
 		status = "disabled";
 	};
 
-	sdio0: dwmmc@ff0d0000 {
+	sdio0: mmc@ff0d0000 {
 		compatible = "rockchip,rk3288-dw-mshc";
 		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
@@ -275,7 +275,7 @@ sdio0: dwmmc@ff0d0000 {
 		status = "disabled";
 	};
 
-	sdio1: dwmmc@ff0e0000 {
+	sdio1: mmc@ff0e0000 {
 		compatible = "rockchip,rk3288-dw-mshc";
 		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDIO1>, <&cru SCLK_SDIO1>,
@@ -289,7 +289,7 @@ sdio1: dwmmc@ff0e0000 {
 		status = "disabled";
 	};
 
-	emmc: dwmmc@ff0f0000 {
+	emmc: mmc@ff0f0000 {
 		compatible = "rockchip,rk3288-dw-mshc";
 		max-frequency = <150000000>;
 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index 97307a4..241f43e 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -231,7 +231,7 @@ emac: ethernet@10204000 {
 		status = "disabled";
 	};
 
-	mmc0: dwmmc@10214000 {
+	mmc0: mmc@10214000 {
 		compatible = "rockchip,rk2928-dw-mshc";
 		reg = <0x10214000 0x1000>;
 		interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
@@ -245,7 +245,7 @@ mmc0: dwmmc@10214000 {
 		status = "disabled";
 	};
 
-	mmc1: dwmmc@10218000 {
+	mmc1: mmc@10218000 {
 		compatible = "rockchip,rk2928-dw-mshc";
 		reg = <0x10218000 0x1000>;
 		interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
@@ -259,7 +259,7 @@ mmc1: dwmmc@10218000 {
 		status = "disabled";
 	};
 
-	emmc: dwmmc@1021c000 {
+	emmc: mmc@1021c000 {
 		compatible = "rockchip,rk2928-dw-mshc";
 		reg = <0x1021c000 0x1000>;
 		interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/rockchip-radxa-dalang-carrier.dtsi b/arch/arm/boot/dts/rockchip-radxa-dalang-carrier.dtsi
new file mode 100644
index 0000000..df3712a
--- /dev/null
+++ b/arch/arm/boot/dts/rockchip-radxa-dalang-carrier.dtsi
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2019 Radxa Limited
+ * Copyright (c) 2019 Amarula Solutions(India)
+ */
+
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+	chosen {
+		stdout-path = "serial2:1500000n8";
+	};
+};
+
+&gmac {
+	status = "okay";
+};
+
+&i2c1 {
+	status = "okay";
+	i2c-scl-rising-time-ns = <140>;
+	i2c-scl-falling-time-ns = <30>;
+};
+
+&i2c2 {
+	status = "okay";
+	clock-frequency = <400000>;
+
+	hym8563: hym8563@51 {
+		compatible = "haoyu,hym8563";
+		reg = <0x51>;
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		clock-output-names = "hym8563";
+		pinctrl-names = "default";
+		pinctrl-0 = <&hym8563_int>;
+		interrupt-parent = <&gpio4>;
+		interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+&pwm0 {
+	status = "okay";
+};
+
+&pwm2 {
+	status = "okay";
+};
+
+&sdmmc {
+	bus-width = <4>;
+	cap-mmc-highspeed;
+	cap-sd-highspeed;
+	cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+	disable-wp;
+	vqmmc-supply = <&vccio_sd>;
+	max-frequency = <150000000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_xfer &uart0_cts>;
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&pinctrl {
+	hym8563 {
+		hym8563_int: hym8563-int {
+			rockchip,pins =
+				<4 RK_PD6 0 &pcfg_pull_up>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
index 5876690e..1fd06e7 100644
--- a/arch/arm/boot/dts/rv1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -456,7 +456,7 @@ cru: clock-controller@20200000 {
 		#reset-cells = <1>;
 	};
 
-	emmc: dwmmc@30110000 {
+	emmc: mmc@30110000 {
 		compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x30110000 0x4000>;
 		interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
@@ -468,7 +468,7 @@ emmc: dwmmc@30110000 {
 		status = "disabled";
 	};
 
-	sdio: dwmmc@30120000 {
+	sdio: mmc@30120000 {
 		compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x30120000 0x4000>;
 		interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
@@ -480,7 +480,7 @@ sdio: dwmmc@30120000 {
 		status = "disabled";
 	};
 
-	sdmmc: dwmmc@30130000 {
+	sdmmc: mmc@30130000 {
 		compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x30130000 0x4000>;
 		interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/s3c2416-smdk2416.dts b/arch/arm/boot/dts/s3c2416-smdk2416.dts
index cb371bf..811bfde 100644
--- a/arch/arm/boot/dts/s3c2416-smdk2416.dts
+++ b/arch/arm/boot/dts/s3c2416-smdk2416.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG SMDK2416 board device tree source
+ * Samsung SMDK2416 board device tree source
  *
  * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
  */
diff --git a/arch/arm/boot/dts/s3c6410-smdk6410.dts b/arch/arm/boot/dts/s3c6410-smdk6410.dts
index 3bf6c45..96267f5 100644
--- a/arch/arm/boot/dts/s3c6410-smdk6410.dts
+++ b/arch/arm/boot/dts/s3c6410-smdk6410.dts
@@ -4,7 +4,7 @@
  *
  * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
  *
- * Device tree source file for SAMSUNG SMDK6410 board which is based on
+ * Device tree source file for Samsung SMDK6410 board which is based on
  * Samsung's S3C6410 SoC.
  */
 
@@ -16,7 +16,7 @@
 #include "s3c6410.dtsi"
 
 / {
-	model = "SAMSUNG SMDK6410 board based on S3C6410";
+	model = "Samsung SMDK6410 board based on S3C6410";
 	compatible = "samsung,mini6410", "samsung,s3c6410";
 
 	memory@50000000 {
diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
new file mode 100644
index 0000000..326b393
--- /dev/null
+++ b/arch/arm/boot/dts/sam9x60.dtsi
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * sam9x60.dtsi - Device Tree Include file for Microchip SAM9X60 SoC
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Sandeep Sheriker M <sandeepsheriker.mallikarjun@microchip.com>
+ */
+
+#include <dt-bindings/dma/at91.h>
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/at91.h>
+#include <dt-bindings/mfd/atmel-flexcom.h>
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	model = "Microchip SAM9X60 SoC";
+	compatible = "microchip,sam9x60";
+	interrupt-parent = <&aic>;
+
+	aliases {
+		serial0 = &dbgu;
+		gpio0 = &pioA;
+		gpio1 = &pioB;
+		gpio2 = &pioC;
+		gpio3 = &pioD;
+		tcb0 = &tcb0;
+		tcb1 = &tcb1;
+	};
+
+	cpus {
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x20000000 0x10000000>;
+	};
+
+	clocks {
+		slow_xtal: slow_xtal {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+		};
+
+		main_xtal: main_xtal {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+		};
+	};
+
+	sram: sram@300000 {
+		compatible = "mmio-sram";
+		reg = <0x00300000 0x100000>;
+	};
+
+	ahb {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		usb1: ohci@600000 {
+			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
+			reg = <0x00600000 0x100000>;
+			interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
+			clocks = <&pmc PMC_TYPE_PERIPHERAL 22>, <&pmc PMC_TYPE_PERIPHERAL 22>, <&pmc PMC_TYPE_SYSTEM 6>;
+			clock-names = "ohci_clk", "hclk", "uhpck";
+			status = "disabled";
+		};
+
+		usb2: ehci@700000 {
+			compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
+			reg = <0x00700000 0x100000>;
+			interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
+			clocks = <&pmc PMC_TYPE_CORE PMC_UTMI>, <&pmc PMC_TYPE_PERIPHERAL 22>;
+			clock-names = "usb_clk", "ehci_clk";
+			assigned-clocks = <&pmc PMC_TYPE_CORE PMC_UTMI>;
+			assigned-clock-rates = <480000000>;
+			status = "disabled";
+		};
+
+		ebi: ebi@10000000 {
+			compatible = "microchip,sam9x60-ebi";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			atmel,smc = <&smc>;
+			microchip,sfr = <&sfr>;
+			reg = <0x10000000 0x60000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x20000000 0x10000000
+				  0x2 0x0 0x30000000 0x10000000
+				  0x3 0x0 0x40000000 0x10000000
+				  0x4 0x0 0x50000000 0x10000000
+				  0x5 0x0 0x60000000 0x10000000>;
+			clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
+			status = "disabled";
+
+			nand_controller: nand-controller {
+				compatible = "microchip,sam9x60-nand-controller";
+				ecc-engine = <&pmecc>;
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
+		};
+
+		sdmmc0: sdio-host@80000000 {
+			compatible = "microchip,sam9x60-sdhci";
+			reg = <0x80000000 0x300>;
+			interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
+			clocks = <&pmc PMC_TYPE_PERIPHERAL 12>, <&pmc PMC_TYPE_GCK 12>;
+			clock-names = "hclock", "multclk";
+			assigned-clocks = <&pmc PMC_TYPE_GCK 12>;
+			assigned-clock-rates = <100000000>;
+			status = "disabled";
+		};
+
+		sdmmc1: sdio-host@90000000 {
+			compatible = "microchip,sam9x60-sdhci";
+			reg = <0x90000000 0x300>;
+			interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>;
+			clocks = <&pmc PMC_TYPE_PERIPHERAL 26>, <&pmc PMC_TYPE_GCK 26>;
+			clock-names = "hclock", "multclk";
+			assigned-clocks = <&pmc PMC_TYPE_GCK 26>;
+			assigned-clock-rates = <100000000>;
+			status = "disabled";
+		};
+
+		apb {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			flx4: flexcom@f0000000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf0000000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf0000000 0x800>;
+				status = "disabled";
+			};
+
+			flx5: flexcom@f0004000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf0004000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf0004000 0x800>;
+				status = "disabled";
+			};
+
+			dma0: dma-controller@f0008000 {
+				compatible = "microchip,sam9x60-dma", "atmel,sama5d4-dma";
+				reg = <0xf0008000 0x1000>;
+				interrupts = <20 IRQ_TYPE_LEVEL_HIGH 0>;
+				#dma-cells = <1>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
+				clock-names = "dma_clk";
+			};
+
+			ssc: ssc@f0010000 {
+				compatible = "atmel,at91sam9g45-ssc";
+				reg = <0xf0010000 0x4000>;
+				interrupts = <28 IRQ_TYPE_LEVEL_HIGH 5>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(38))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(39))>;
+				dma-names = "tx", "rx";
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 28>;
+				clock-names = "pclk";
+				status = "disabled";
+			};
+
+			qspi: spi@f0014000 {
+				compatible = "microchip,sam9x60-qspi";
+				reg = <0xf0014000 0x100>, <0x70000000 0x10000000>;
+				reg-names = "qspi_base", "qspi_mmap";
+				interrupts = <35 IRQ_TYPE_LEVEL_HIGH 7>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(26))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(27))>;
+				dma-names = "tx", "rx";
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 35>, <&pmc PMC_TYPE_SYSTEM 19>;
+				clock-names = "pclk", "qspick";
+				atmel,pmc = <&pmc>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			i2s: i2s@f001c000 {
+				compatible = "microchip,sam9x60-i2smcc";
+				reg = <0xf001c000 0x100>;
+				interrupts = <34 IRQ_TYPE_LEVEL_HIGH 7>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(36))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(37))>;
+				dma-names = "tx", "rx";
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 34>, <&pmc PMC_TYPE_GCK 34>;
+				clock-names = "pclk", "gclk";
+				status = "disabled";
+			};
+
+			flx11: flexcom@f0020000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf0020000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 32>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf0020000 0x800>;
+				status = "disabled";
+			};
+
+			flx12: flexcom@f0024000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf0024000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 33>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf0024000 0x800>;
+				status = "disabled";
+			};
+
+			pit64b: timer@f0028000 {
+				compatible = "microchip,sam9x60-pit64b";
+				reg = <0xf0028000 0x100>;
+				interrupts = <37 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 37>, <&pmc PMC_TYPE_GCK 37>;
+				clock-names = "pclk", "gclk";
+			};
+
+			sha: sha@f002c000 {
+				compatible = "atmel,at91sam9g46-sha";
+				reg = <0xf002c000 0x100>;
+				interrupts = <41 IRQ_TYPE_LEVEL_HIGH 0>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(34))>;
+				dma-names = "tx";
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 41>;
+				clock-names = "sha_clk";
+				status = "okay";
+			};
+
+			trng: trng@f0030000 {
+				compatible = "microchip,sam9x60-trng";
+				reg = <0xf0030000 0x100>;
+				interrupts = <38 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 38>;
+				status = "okay";
+			};
+
+			aes: aes@f0034000 {
+				compatible = "atmel,at91sam9g46-aes";
+				reg = <0xf0034000 0x100>;
+				interrupts = <39 IRQ_TYPE_LEVEL_HIGH 0>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(32))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(33))>;
+				dma-names = "tx", "rx";
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 39>;
+				clock-names = "aes_clk";
+				status = "okay";
+			};
+
+			tdes: tdes@f0038000 {
+				compatible = "atmel,at91sam9g46-tdes";
+				reg = <0xf0038000 0x100>;
+				interrupts = <40 IRQ_TYPE_LEVEL_HIGH 0>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(31))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(30))>;
+				dma-names = "tx", "rx";
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 40>;
+				clock-names = "tdes_clk";
+				status = "okay";
+			};
+
+			classd: classd@f003c000 {
+				compatible = "atmel,sama5d2-classd";
+				reg = <0xf003c000 0x100>;
+				interrupts = <42 IRQ_TYPE_LEVEL_HIGH 7>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(35))>;
+				dma-names = "tx";
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 42>, <&pmc PMC_TYPE_GCK 42>;
+				clock-names = "pclk", "gclk";
+				status = "disabled";
+			};
+
+			can0: can@f8000000 {
+				compatible = "microchip,sam9x60-can", "atmel,at91sam9x5-can";
+				reg = <0xf8000000 0x300>;
+				interrupts = <29 IRQ_TYPE_LEVEL_HIGH 3>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 29>;
+				clock-names = "can_clk";
+				status = "disabled";
+			};
+
+			can1: can@f8004000 {
+				compatible = "microchip,sam9x60-can", "atmel,at91sam9x5-can";
+				reg = <0xf8004000 0x300>;
+				interrupts = <30 IRQ_TYPE_LEVEL_HIGH 3>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 30>;
+				clock-names = "can_clk";
+				status = "disabled";
+			};
+
+			tcb0: timer@f8008000 {
+				compatible = "microchip,sam9x60-tcb", "atmel,at91sam9x5-tcb", "simple-mfd", "syscon";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0xf8008000 0x100>;
+				interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 17>, <&clk32k 0>;
+				clock-names = "t0_clk", "slow_clk";
+			};
+
+			tcb1: timer@f800c000 {
+				compatible = "microchip,sam9x60-tcb", "atmel,at91sam9x5-tcb", "simple-mfd", "syscon";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0xf800c000 0x100>;
+				interrupts = <45 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 45>, <&clk32k 0>;
+				clock-names = "t0_clk", "slow_clk";
+			};
+
+			flx6: flexcom@f8010000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf8010000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf8010000 0x800>;
+				status = "disabled";
+			};
+
+			flx7: flexcom@f8014000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf8014000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf8014000 0x800>;
+				status = "disabled";
+			};
+
+			flx8: flexcom@f8018000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf8018000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 11>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf8018000 0x800>;
+				status = "disabled";
+			};
+
+			flx0: flexcom@f801c000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf801c000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf801c000 0x800>;
+				status = "disabled";
+			};
+
+			flx1: flexcom@f8020000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf8020000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf8020000 0x800>;
+				status = "disabled";
+			};
+
+			flx2: flexcom@f8024000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf8024000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf8024000 0x800>;
+				status = "disabled";
+			};
+
+			flx3: flexcom@f8028000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf8028000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf8028000 0x800>;
+				status = "disabled";
+			};
+
+			macb0: ethernet@f802c000 {
+				compatible = "cdns,sam9x60-macb", "cdns,macb";
+				reg = <0xf802c000 0x1000>;
+				interrupts = <24 IRQ_TYPE_LEVEL_HIGH 3>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 24>, <&pmc PMC_TYPE_PERIPHERAL 24>;
+				clock-names = "hclk", "pclk";
+				status = "disabled";
+			};
+
+			macb1: ethernet@f8030000 {
+				compatible = "cdns,sam9x60-macb", "cdns,macb";
+				reg = <0xf8030000 0x1000>;
+				interrupts = <27 IRQ_TYPE_LEVEL_HIGH 3>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 27>, <&pmc PMC_TYPE_PERIPHERAL 27>;
+				clock-names = "hclk", "pclk";
+				status = "disabled";
+			};
+
+			pwm0: pwm@f8034000 {
+				compatible = "microchip,sam9x60-pwm";
+				reg = <0xf8034000 0x300>;
+				interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 18>;
+				#pwm-cells = <3>;
+				status="disabled";
+			};
+
+			hlcdc: hlcdc@f8038000 {
+				compatible = "microchip,sam9x60-hlcdc";
+				reg = <0xf8038000 0x4000>;
+				interrupts = <25 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 25>, <&pmc PMC_TYPE_GCK 25>, <&clk32k 1>;
+				clock-names = "periph_clk","sys_clk", "slow_clk";
+				assigned-clocks = <&pmc PMC_TYPE_GCK 25>;
+				assigned-clock-parents = <&pmc PMC_TYPE_CORE PMC_MCK>;
+				status = "disabled";
+
+				hlcdc-display-controller {
+					compatible = "atmel,hlcdc-display-controller";
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					port@0 {
+						#address-cells = <1>;
+						#size-cells = <0>;
+						reg = <0>;
+					};
+				};
+
+				hlcdc_pwm: hlcdc-pwm {
+					compatible = "atmel,hlcdc-pwm";
+					#pwm-cells = <3>;
+				};
+			};
+
+			flx9: flexcom@f8040000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf8040000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 15>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf8040000 0x800>;
+				status = "disabled";
+			};
+
+			flx10: flexcom@f8044000 {
+				compatible = "atmel,sama5d2-flexcom";
+				reg = <0xf8044000 0x200>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 16>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0x0 0xf8044000 0x800>;
+				status = "disabled";
+			};
+
+			isi: isi@f8048000 {
+				compatible = "microchip,sam9x60-isi", "atmel,at91sam9g45-isi";
+				reg = <0xf8048000 0x100>;
+				interrupts = <43 IRQ_TYPE_LEVEL_HIGH 5>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 43>;
+				clock-names = "isi_clk";
+				status = "disabled";
+				port {
+					#address-cells = <1>;
+					#size-cells = <0>;
+				};
+			};
+
+			adc: adc@f804c000 {
+				compatible = "microchip,sam9x60-adc", "atmel,sama5d2-adc";
+				reg = <0xf804c000 0x100>;
+				interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+				clock-names = "adc_clk";
+				dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(40))>;
+				dma-names = "rx";
+				atmel,min-sample-rate-hz = <200000>;
+				atmel,max-sample-rate-hz = <20000000>;
+				atmel,startup-time-ms = <4>;
+				atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
+				#io-channel-cells = <1>;
+				status = "disabled";
+			};
+
+			sfr: sfr@f8050000 {
+				compatible = "microchip,sam9x60-sfr", "syscon";
+				reg = <0xf8050000 0x100>;
+			};
+
+			matrix: matrix@ffffde00 {
+				compatible = "microchip,sam9x60-matrix", "atmel,at91sam9x5-matrix", "syscon";
+				reg = <0xffffde00 0x200>;
+			};
+
+			pmecc: ecc-engine@ffffe000 {
+				compatible = "microchip,sam9x60-pmecc", "atmel,at91sam9g45-pmecc";
+				reg = <0xffffe000 0x300>,
+				      <0xffffe600 0x100>;
+			};
+
+			mpddrc: mpddrc@ffffe800 {
+				compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc";
+				reg = <0xffffe800 0x200>;
+				clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>;
+				clock-names = "ddrck", "mpddr";
+			};
+
+			smc: smc@ffffea00 {
+				compatible = "microchip,sam9x60-smc", "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffea00 0x100>;
+			};
+
+			aic: interrupt-controller@fffff100 {
+				compatible = "microchip,sam9x60-aic";
+				#interrupt-cells = <3>;
+				interrupt-controller;
+				reg = <0xfffff100 0x100>;
+				atmel,external-irqs = <31>;
+			};
+
+			dbgu: serial@fffff200 {
+				compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+				reg = <0xfffff200 0x200>;
+				interrupts = <47 IRQ_TYPE_LEVEL_HIGH 7>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(28))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(29))>;
+				dma-names = "tx", "rx";
+				clocks = <&pmc PMC_TYPE_PERIPHERAL 47>;
+				clock-names = "usart";
+				status = "disabled";
+			};
+
+			pinctrl: pinctrl@fffff400 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
+				ranges = <0xfffff400 0xfffff400 0x800>;
+
+				pioA: gpio@fffff400 {
+					compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+					reg = <0xfffff400 0x200>;
+					interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
+					#gpio-cells = <2>;
+					gpio-controller;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+					clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
+				};
+
+				pioB: gpio@fffff600 {
+					compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+					reg = <0xfffff600 0x200>;
+					interrupts = <3 IRQ_TYPE_LEVEL_HIGH 1>;
+					#gpio-cells = <2>;
+					gpio-controller;
+					#gpio-lines = <26>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+					clocks = <&pmc PMC_TYPE_PERIPHERAL 3>;
+				};
+
+				pioC: gpio@fffff800 {
+					compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+					reg = <0xfffff800 0x200>;
+					interrupts = <4 IRQ_TYPE_LEVEL_HIGH 1>;
+					#gpio-cells = <2>;
+					gpio-controller;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+					clocks = <&pmc PMC_TYPE_PERIPHERAL 4>;
+				};
+
+				pioD: gpio@fffffa00 {
+					compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+					reg = <0xfffffa00 0x200>;
+					interrupts = <44 IRQ_TYPE_LEVEL_HIGH 1>;
+					#gpio-cells = <2>;
+					gpio-controller;
+					#gpio-lines = <22>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+					clocks = <&pmc PMC_TYPE_PERIPHERAL 44>;
+				};
+			};
+
+			pmc: pmc@fffffc00 {
+				compatible = "microchip,sam9x60-pmc", "syscon";
+				reg = <0xfffffc00 0x200>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				#clock-cells = <2>;
+				clocks = <&clk32k 1>, <&clk32k 0>, <&main_xtal>;
+				clock-names = "td_slck", "md_slck", "main_xtal";
+			};
+
+			reset_controller: rstc@fffffe00 {
+				compatible = "microchip,sam9x60-rstc";
+				reg = <0xfffffe00 0x10>;
+				clocks = <&clk32k 0>;
+			};
+
+			shutdown_controller: shdwc@fffffe10 {
+				compatible = "microchip,sam9x60-shdwc";
+				reg = <0xfffffe10 0x10>;
+				clocks = <&clk32k 0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				atmel,wakeup-rtc-timer;
+				atmel,wakeup-rtt-timer;
+				status = "disabled";
+			};
+
+			pit: timer@fffffe40 {
+				compatible = "atmel,at91sam9260-pit";
+				reg = <0xfffffe40 0x10>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
+			};
+
+			clk32k: sckc@fffffe50 {
+				compatible = "microchip,sam9x60-sckc";
+				reg = <0xfffffe50 0x4>;
+				clocks = <&slow_xtal>;
+				#clock-cells = <1>;
+			};
+
+			gpbr: syscon@fffffe60 {
+				compatible = "microchip,sam9x60-gpbr", "atmel,at91sam9260-gpbr", "syscon";
+				reg = <0xfffffe60 0x10>;
+			};
+
+			rtc: rtc@fffffea8 {
+				compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc";
+				reg = <0xfffffea8 0x100>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 5652048..2012b74 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -300,6 +300,8 @@ sdmmc0: sdio-host@a0000000 {
 			interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>;
 			clocks = <&pmc PMC_TYPE_PERIPHERAL 31>, <&pmc PMC_TYPE_GCK 31>, <&pmc PMC_TYPE_CORE PMC_MAIN>;
 			clock-names = "hclock", "multclk", "baseclk";
+			assigned-clocks = <&pmc PMC_TYPE_GCK 31>;
+			assigned-clock-rates = <480000000>;
 			status = "disabled";
 		};
 
@@ -309,6 +311,8 @@ sdmmc1: sdio-host@b0000000 {
 			interrupts = <32 IRQ_TYPE_LEVEL_HIGH 0>;
 			clocks = <&pmc PMC_TYPE_PERIPHERAL 32>, <&pmc PMC_TYPE_GCK 32>, <&pmc PMC_TYPE_CORE PMC_MAIN>;
 			clock-names = "hclock", "multclk", "baseclk";
+			assigned-clocks = <&pmc PMC_TYPE_GCK 32>;
+			assigned-clock-rates = <480000000>;
 			status = "disabled";
 		};
 
@@ -615,6 +619,7 @@ pwm0: pwm@f802c000 {
 				interrupts = <38 IRQ_TYPE_LEVEL_HIGH 7>;
 				#pwm-cells = <3>;
 				clocks = <&pmc PMC_TYPE_PERIPHERAL 38>;
+				status = "disabled";
 			};
 
 			sfr: sfr@f8030000 {
@@ -884,9 +889,12 @@ pioA: pinctrl@fc038000 {
 				clocks = <&pmc PMC_TYPE_PERIPHERAL 18>;
 			};
 
-			secumod@fc040000 {
+			pioBU: secumod@fc040000 {
 				compatible = "atmel,sama5d2-secumod", "syscon";
 				reg = <0xfc040000 0x100>;
+
+				gpio-controller;
+				#gpio-cells = <2>;
 			};
 
 			tdes@fc044000 {
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index f770aac..203d40b 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1188,49 +1188,49 @@ pioE_clk: pioE_clk {
 					usart0_clk: usart0_clk {
 						#clock-cells = <0>;
 						reg = <12>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					usart1_clk: usart1_clk {
 						#clock-cells = <0>;
 						reg = <13>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					usart2_clk: usart2_clk {
 						#clock-cells = <0>;
 						reg = <14>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					usart3_clk: usart3_clk {
 						#clock-cells = <0>;
 						reg = <15>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					uart0_clk: uart0_clk {
 						#clock-cells = <0>;
 						reg = <16>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					twi0_clk: twi0_clk {
 						reg = <18>;
 						#clock-cells = <0>;
-						atmel,clk-output-range = <0 16625000>;
+						atmel,clk-output-range = <0 41500000>;
 					};
 
 					twi1_clk: twi1_clk {
 						#clock-cells = <0>;
 						reg = <19>;
-						atmel,clk-output-range = <0 16625000>;
+						atmel,clk-output-range = <0 41500000>;
 					};
 
 					twi2_clk: twi2_clk {
 						#clock-cells = <0>;
 						reg = <20>;
-						atmel,clk-output-range = <0 16625000>;
+						atmel,clk-output-range = <0 41500000>;
 					};
 
 					mci0_clk: mci0_clk {
@@ -1246,19 +1246,19 @@ mci1_clk: mci1_clk {
 					spi0_clk: spi0_clk {
 						#clock-cells = <0>;
 						reg = <24>;
-						atmel,clk-output-range = <0 133000000>;
+						atmel,clk-output-range = <0 166000000>;
 					};
 
 					spi1_clk: spi1_clk {
 						#clock-cells = <0>;
 						reg = <25>;
-						atmel,clk-output-range = <0 133000000>;
+						atmel,clk-output-range = <0 166000000>;
 					};
 
 					tcb0_clk: tcb0_clk {
 						#clock-cells = <0>;
 						reg = <26>;
-						atmel,clk-output-range = <0 133000000>;
+						atmel,clk-output-range = <0 166000000>;
 					};
 
 					pwm_clk: pwm_clk {
@@ -1269,7 +1269,7 @@ pwm_clk: pwm_clk {
 					adc_clk: adc_clk {
 						#clock-cells = <0>;
 						reg = <29>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					dma0_clk: dma0_clk {
@@ -1300,13 +1300,13 @@ isi_clk: isi_clk {
 					ssc0_clk: ssc0_clk {
 						#clock-cells = <0>;
 						reg = <38>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					ssc1_clk: ssc1_clk {
 						#clock-cells = <0>;
 						reg = <39>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					sha_clk: sha_clk {
diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
index cf06a01..2470dd3 100644
--- a/arch/arm/boot/dts/sama5d3_can.dtsi
+++ b/arch/arm/boot/dts/sama5d3_can.dtsi
@@ -36,13 +36,13 @@ periphck {
 					can0_clk: can0_clk {
 						#clock-cells = <0>;
 						reg = <40>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					can1_clk: can1_clk {
 						#clock-cells = <0>;
 						reg = <41>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 				};
 			};
diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
index 1584035..215802b8 100644
--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
+++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
@@ -22,6 +22,7 @@ periphck {
 					tcb1_clk: tcb1_clk {
 						#clock-cells = <0>;
 						reg = <27>;
+						atmel,clk-output-range = <0 166000000>;
 					};
 				};
 			};
diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
index 4316bdb..cb62adb 100644
--- a/arch/arm/boot/dts/sama5d3_uart.dtsi
+++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
@@ -41,13 +41,13 @@ periphck {
 					uart0_clk: uart0_clk {
 						#clock-cells = <0>;
 						reg = <16>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 
 					uart1_clk: uart1_clk {
 						#clock-cells = <0>;
 						reg = <17>;
-						atmel,clk-output-range = <0 66000000>;
+						atmel,clk-output-range = <0 83000000>;
 					};
 				};
 			};
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index 3383699..c134154 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -39,11 +39,18 @@ cpu1: cpu@1 {
 		};
 	};
 
+	timer@f0000200 {
+		compatible = "arm,cortex-a9-global-timer";
+		reg = <0xf0000200 0x100>;
+		interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
+		clocks = <&periph_clk>;
+	};
+
 	timer@f0000600 {
 		compatible = "arm,cortex-a9-twd-timer";
 		reg = <0xf0000600 0x20>;
 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
-		clocks = <&twd_clk>;
+		clocks = <&periph_clk>;
 	};
 
 	gic: interrupt-controller@f0001000 {
@@ -110,14 +117,14 @@ irqpin0: interrupt-controller@e6900000 {
 			<0xe6900020 1>,
 			<0xe6900040 1>,
 			<0xe6900060 1>;
-		interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
 		power-domains = <&pd_a4s>;
 		control-parent;
@@ -132,14 +139,14 @@ irqpin1: interrupt-controller@e6900004 {
 			<0xe6900024 1>,
 			<0xe6900044 1>,
 			<0xe6900064 1>;
-		interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
 		power-domains = <&pd_a4s>;
 		control-parent;
@@ -154,14 +161,14 @@ irqpin2: interrupt-controller@e6900008 {
 			<0xe6900028 1>,
 			<0xe6900048 1>,
 			<0xe6900068 1>;
-		interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
 		power-domains = <&pd_a4s>;
 		control-parent;
@@ -176,14 +183,14 @@ irqpin3: interrupt-controller@e690000c {
 			<0xe690002c 1>,
 			<0xe690004c 1>,
 			<0xe690006c 1>;
-		interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
 		power-domains = <&pd_a4s>;
 		control-parent;
@@ -194,10 +201,10 @@ i2c0: i2c@e6820000 {
 		#size-cells = <0>;
 		compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
 		reg = <0xe6820000 0x425>;
-		interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks SH73A0_CLK_IIC0>;
 		power-domains = <&pd_a3sp>;
 		status = "disabled";
@@ -208,10 +215,10 @@ i2c1: i2c@e6822000 {
 		#size-cells = <0>;
 		compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
 		reg = <0xe6822000 0x425>;
-		interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks SH73A0_CLK_IIC1>;
 		power-domains = <&pd_a3sp>;
 		status = "disabled";
@@ -222,10 +229,10 @@ i2c2: i2c@e6824000 {
 		#size-cells = <0>;
 		compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
 		reg = <0xe6824000 0x425>;
-		interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks SH73A0_CLK_IIC2>;
 		power-domains = <&pd_a3sp>;
 		status = "disabled";
@@ -236,10 +243,10 @@ i2c3: i2c@e6826000 {
 		#size-cells = <0>;
 		compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
 		reg = <0xe6826000 0x425>;
-		interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks SH73A0_CLK_IIC3>;
 		power-domains = <&pd_a3sp>;
 		status = "disabled";
@@ -250,10 +257,10 @@ i2c4: i2c@e6828000 {
 		#size-cells = <0>;
 		compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
 		reg = <0xe6828000 0x425>;
-		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks SH73A0_CLK_IIC4>;
 		power-domains = <&pd_c5>;
 		status = "disabled";
@@ -262,8 +269,8 @@ GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH
 	mmcif: mmc@e6bd0000 {
 		compatible = "renesas,mmcif-sh73a0", "renesas,sh-mmcif";
 		reg = <0xe6bd0000 0x100>;
-		interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks SH73A0_CLK_MMCIF0>;
 		power-domains = <&pd_a3sp>;
 		reg-io-width = <4>;
@@ -317,9 +324,9 @@ msiof3: spi@e6c90000 {
 	sdhi0: sd@ee100000 {
 		compatible = "renesas,sdhi-sh73a0";
 		reg = <0xee100000 0x100>;
-		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks SH73A0_CLK_SDHI0>;
 		power-domains = <&pd_a3sp>;
 		cap-sd-highspeed;
@@ -330,8 +337,8 @@ GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH
 	sdhi1: sd@ee120000 {
 		compatible = "renesas,sdhi-sh73a0";
 		reg = <0xee120000 0x100>;
-		interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks SH73A0_CLK_SDHI1>;
 		power-domains = <&pd_a3sp>;
 		disable-wp;
@@ -342,8 +349,8 @@ sdhi1: sd@ee120000 {
 	sdhi2: sd@ee140000 {
 		compatible = "renesas,sdhi-sh73a0";
 		reg = <0xee140000 0x100>;
-		interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH
-			      GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks SH73A0_CLK_SDHI2>;
 		power-domains = <&pd_a3sp>;
 		disable-wp;
@@ -612,19 +619,25 @@ extal1_clk: extal1 {
 		extal2_clk: extal2 {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
+			/* This value must be overridden by the board. */
+			clock-frequency = <0>;
 		};
 		extcki_clk: extcki {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
+			/* This value can be overridden by the board. */
+			clock-frequency = <0>;
 		};
 		fsiack_clk: fsiack {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
+			/* This value can be overridden by the board. */
 			clock-frequency = <0>;
 		};
 		fsibck_clk: fsibck {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
+			/* This value can be overridden by the board. */
 			clock-frequency = <0>;
 		};
 
@@ -812,7 +825,7 @@ pll1_div13_clk: pll1_div13 {
 			clock-div = <13>;
 			clock-mult = <1>;
 		};
-		twd_clk: twd {
+		periph_clk: periph {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks SH73A0_CLK_Z>;
 			#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/ste-ab8500.dtsi b/arch/arm/boot/dts/ste-ab8500.dtsi
index 55fff4d..14d4d86 100644
--- a/arch/arm/boot/dts/ste-ab8500.dtsi
+++ b/arch/arm/boot/dts/ste-ab8500.dtsi
@@ -6,6 +6,20 @@
 #include <dt-bindings/clock/ste-ab8500.h>
 
 / {
+	/* Essential housekeeping hardware monitors */
+	iio-hwmon {
+		compatible = "iio-hwmon";
+		io-channels = <&gpadc 0x02>, /* Battery temperature */
+			    <&gpadc 0x03>, /* Main charger voltage */
+			    <&gpadc 0x08>, /* Main battery voltage */
+			    <&gpadc 0x09>, /* VBUS */
+			    <&gpadc 0x0a>, /* Main charger current */
+			    <&gpadc 0x0b>, /* USB charger current */
+			    <&gpadc 0x0c>, /* Backup battery voltage */
+			    <&gpadc 0x0d>, /* Die temperature */
+			    <&gpadc 0x12>; /* Crystal temperature */
+	};
+
 	soc {
 		prcmu@80157000 {
 			ab8500 {
@@ -33,12 +47,84 @@ ab8500-rtc {
 					interrupt-names = "60S", "ALARM";
 				};
 
-				ab8500-gpadc {
+				gpadc: ab8500-gpadc {
 					compatible = "stericsson,ab8500-gpadc";
 					interrupts = <32 IRQ_TYPE_LEVEL_HIGH
 						      39 IRQ_TYPE_LEVEL_HIGH>;
 					interrupt-names = "HW_CONV_END", "SW_CONV_END";
 					vddadc-supply = <&ab8500_ldo_tvout_reg>;
+					#address-cells = <1>;
+					#size-cells = <0>;
+					#io-channel-cells = <1>;
+
+					/* GPADC channels */
+					bat_ctrl: channel@01 {
+						reg = <0x01>;
+					};
+					btemp_ball: channel@02 {
+						reg = <0x02>;
+					};
+					main_charger_v: channel@03 {
+						reg = <0x03>;
+					};
+					acc_detect1: channel@04 {
+						reg = <0x04>;
+					};
+					acc_detect2: channel@05 {
+						reg = <0x05>;
+					};
+					adc_aux1: channel@06 {
+						reg = <0x06>;
+					};
+					adc_aux2: channel@07 {
+						reg = <0x07>;
+					};
+					main_batt_v: channel@08 {
+						reg = <0x08>;
+					};
+					vbus_v: channel@09 {
+						reg = <0x09>;
+					};
+					main_charger_c: channel@0a {
+						reg = <0x0a>;
+					};
+					usb_charger_c: channel@0b {
+						reg = <0x0b>;
+					};
+					bk_bat_v: channel@0c {
+						reg = <0x0c>;
+					};
+					die_temp: channel@0d {
+						reg = <0x0d>;
+					};
+					usb_id: channel@0e {
+						reg = <0x0e>;
+					};
+					xtal_temp: channel@12 {
+						reg = <0x12>;
+					};
+					vbat_true_meas: channel@13 {
+						reg = <0x13>;
+					};
+					bat_ctrl_and_ibat: channel@1c {
+						reg = <0x1c>;
+					};
+					vbat_meas_and_ibat: channel@1d {
+						reg = <0x1d>;
+					};
+					vbat_true_meas_and_ibat: channel@1e {
+						reg = <0x1e>;
+					};
+					bat_temp_and_ibat: channel@1f {
+						reg = <0x1f>;
+					};
+				};
+
+				ab8500_temp {
+					compatible = "stericsson,abx500-temp";
+					io-channels = <&gpadc 0x06>,
+						      <&gpadc 0x07>;
+					io-channel-name = "aux1", "aux2";
 				};
 
 				ab8500_battery: ab8500_battery {
@@ -49,17 +135,31 @@ ab8500_battery: ab8500_battery {
 				ab8500_fg {
 					compatible = "stericsson,ab8500-fg";
 					battery	   = <&ab8500_battery>;
+					io-channels = <&gpadc 0x08>;
+					io-channel-name = "main_bat_v";
 				};
 
 				ab8500_btemp {
 					compatible = "stericsson,ab8500-btemp";
 					battery	   = <&ab8500_battery>;
+					io-channels = <&gpadc 0x02>,
+						      <&gpadc 0x01>;
+					io-channel-name = "btemp_ball",
+							"bat_ctrl";
 				};
 
 				ab8500_charger {
 					compatible	= "stericsson,ab8500-charger";
 					battery		= <&ab8500_battery>;
 					vddadc-supply	= <&ab8500_ldo_tvout_reg>;
+					io-channels = <&gpadc 0x03>,
+						      <&gpadc 0x0a>,
+						      <&gpadc 0x09>,
+						      <&gpadc 0x0b>;
+					io-channel-name = "main_charger_v",
+							"main_charger_c",
+							"vbus_v",
+							"usb_charger_c";
 				};
 
 				ab8500_chargalg {
diff --git a/arch/arm/boot/dts/ste-ab8505.dtsi b/arch/arm/boot/dts/ste-ab8505.dtsi
new file mode 100644
index 0000000..c72aa25
--- /dev/null
+++ b/arch/arm/boot/dts/ste-ab8505.dtsi
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2012 Linaro Ltd
+ */
+
+#include <dt-bindings/clock/ste-ab8500.h>
+
+/ {
+	/* Essential housekeeping hardware monitors */
+	iio-hwmon {
+		compatible = "iio-hwmon";
+		io-channels = <&gpadc 0x02>, /* Battery temperature */
+			      <&gpadc 0x08>, /* Main battery voltage */
+			      <&gpadc 0x09>, /* VBUS */
+			      <&gpadc 0x0b>, /* Charger current */
+			      <&gpadc 0x0c>; /* Backup battery voltage */
+	};
+
+	soc {
+		prcmu@80157000 {
+			ab8505 {
+				compatible = "stericsson,ab8505";
+				interrupt-parent = <&intc>;
+				interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+
+				ab8500_clock: clock-controller {
+					compatible = "stericsson,ab8500-clk";
+					#clock-cells = <1>;
+				};
+
+				ab8505_gpio: ab8505-gpio {
+					compatible = "stericsson,ab8505-gpio";
+					gpio-controller;
+					#gpio-cells = <2>;
+				};
+
+				ab8500-rtc {
+					compatible = "stericsson,ab8500-rtc";
+					interrupts = <17 IRQ_TYPE_LEVEL_HIGH
+						      18 IRQ_TYPE_LEVEL_HIGH>;
+					interrupt-names = "60S", "ALARM";
+				};
+
+				gpadc: ab8500-gpadc {
+					compatible = "stericsson,ab8500-gpadc";
+					interrupts = <32 IRQ_TYPE_LEVEL_HIGH
+						      39 IRQ_TYPE_LEVEL_HIGH>;
+					interrupt-names = "HW_CONV_END", "SW_CONV_END";
+					vddadc-supply = <&ab8500_ldo_adc_reg>;
+					#address-cells = <1>;
+					#size-cells = <0>;
+					#io-channel-cells = <1>;
+
+					/* GPADC channels */
+					bat_ctrl: channel@01 {
+						reg = <0x01>;
+					};
+					btemp_ball: channel@02 {
+						reg = <0x02>;
+					};
+					acc_detect1: channel@04 {
+						reg = <0x04>;
+					};
+					acc_detect2: channel@05 {
+						reg = <0x05>;
+					};
+					adc_aux1: channel@06 {
+						reg = <0x06>;
+					};
+					adc_aux2: channel@07 {
+						reg = <0x07>;
+					};
+					main_batt_v: channel@08 {
+						reg = <0x08>;
+					};
+					vbus_v: channel@09 {
+						reg = <0x09>;
+					};
+					charger_c: channel@0b {
+						reg = <0x0b>;
+					};
+					bk_bat_v: channel@0c {
+						reg = <0x0c>;
+					};
+					usb_id: channel@0e {
+						reg = <0x0e>;
+					};
+				};
+
+				ab8500_battery: ab8500_battery {
+					status = "disabled";
+					thermistor-on-batctrl;
+				};
+
+				ab8500_fg {
+					status = "disabled";
+					compatible = "stericsson,ab8500-fg";
+					battery = <&ab8500_battery>;
+					io-channels = <&gpadc 0x08>;
+					io-channel-name = "main_bat_v";
+				};
+
+				ab8500_btemp {
+					status = "disabled";
+					compatible = "stericsson,ab8500-btemp";
+					battery = <&ab8500_battery>;
+					io-channels = <&gpadc 0x02>,
+						      <&gpadc 0x01>;
+					io-channel-name = "btemp_ball",
+							  "bat_ctrl";
+				};
+
+				ab8500_charger {
+					status = "disabled";
+					compatible = "stericsson,ab8500-charger";
+					battery = <&ab8500_battery>;
+					vddadc-supply = <&ab8500_ldo_adc_reg>;
+					io-channels = <&gpadc 0x09>,
+						      <&gpadc 0x0b>;
+					io-channel-name = "vbus_v",
+							  "usb_charger_c";
+				};
+
+				ab8500_chargalg {
+					status = "disabled";
+					compatible = "stericsson,ab8500-chargalg";
+					battery = <&ab8500_battery>;
+				};
+
+				ab8500_usb: ab8500_usb {
+					compatible = "stericsson,ab8500-usb";
+					interrupts = < 90 IRQ_TYPE_LEVEL_HIGH
+						       96 IRQ_TYPE_LEVEL_HIGH
+						       14 IRQ_TYPE_LEVEL_HIGH
+						       15 IRQ_TYPE_LEVEL_HIGH
+						       79 IRQ_TYPE_LEVEL_HIGH
+						       74 IRQ_TYPE_LEVEL_HIGH
+						       75 IRQ_TYPE_LEVEL_HIGH>;
+					interrupt-names = "ID_WAKEUP_R",
+							  "ID_WAKEUP_F",
+							  "VBUS_DET_F",
+							  "VBUS_DET_R",
+							  "USB_LINK_STATUS",
+							  "USB_ADP_PROBE_PLUG",
+							  "USB_ADP_PROBE_UNPLUG";
+					vddulpivio18-supply = <&ab8500_ldo_intcore_reg>;
+					v-ape-supply = <&db8500_vape_reg>;
+					musb_1v8-supply = <&db8500_vsmps2_reg>;
+					clocks = <&prcmu_clk PRCMU_SYSCLK>;
+					clock-names = "sysclk";
+				};
+
+				ab8500-ponkey {
+					compatible = "stericsson,ab8500-poweron-key";
+					interrupts = <6 IRQ_TYPE_LEVEL_HIGH
+						      7 IRQ_TYPE_LEVEL_HIGH>;
+					interrupt-names = "ONKEY_DBF", "ONKEY_DBR";
+				};
+
+				ab8500-sysctrl {
+					compatible = "stericsson,ab8500-sysctrl";
+				};
+
+				ab8500-pwm {
+					compatible = "stericsson,ab8500-pwm";
+					clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
+					clock-names = "intclk";
+				};
+
+				ab8500-debugfs {
+					compatible = "stericsson,ab8500-debug";
+				};
+
+				codec: ab8500-codec {
+					compatible = "stericsson,ab8500-codec";
+
+					V-AUD-supply = <&ab8500_ldo_audio_reg>;
+					V-AMIC1-supply = <&ab8500_ldo_anamic1_reg>;
+					V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
+
+					clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
+					clock-names = "audioclk";
+
+					stericsson,earpeice-cmv = <950>; /* Units in mV. */
+				};
+
+				ab8505-regulators {
+					compatible = "stericsson,ab8505-regulator";
+
+					ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+						regulator-min-microvolt = <2800000>;
+						regulator-max-microvolt = <3300000>;
+					};
+
+					ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
+						regulator-min-microvolt = <1100000>;
+						regulator-max-microvolt = <3300000>;
+					};
+
+					ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
+						regulator-min-microvolt = <1100000>;
+						regulator-max-microvolt = <3300000>;
+					};
+
+					ab8500_ldo_aux4_reg: ab8500_ldo_aux4 {
+						regulator-min-microvolt = <1100000>;
+						regulator-max-microvolt = <3300000>;
+					};
+
+					ab8500_ldo_aux5_reg: ab8500_ldo_aux5 {
+						regulator-min-microvolt = <1050000>;
+						regulator-max-microvolt = <2790000>;
+					};
+
+					ab8500_ldo_aux6_reg: ab8500_ldo_aux6 {
+						regulator-min-microvolt = <1050000>;
+						regulator-max-microvolt = <2790000>;
+					};
+
+					// supply for v-intcore12; VINTCORE12 LDO
+					ab8500_ldo_intcore_reg: ab8500_ldo_intcore {
+						regulator-min-microvolt = <1250000>;
+						regulator-max-microvolt = <1350000>;
+					};
+
+					// supply for gpadc; ADC LDO
+					ab8500_ldo_adc_reg: ab8500_ldo_adc {
+					};
+
+					// supply for ab8500-vaudio; VAUDIO LDO
+					ab8500_ldo_audio_reg: ab8500_ldo_audio {
+					};
+
+					// supply for v-anamic1 VAMIC1 LDO
+					ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
+					};
+
+					// supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1
+					ab8500_ldo_anamic2_reg: ab8500_ldo_anamic2 {
+					};
+
+					// supply for v-aux8; VAUX8 LDO
+					ab8500_ldo_aux8_reg: ab8500_ldo_aux8 {
+					};
+
+					// supply for U8500 CSI/DSI; VANA LDO
+					ab8500_ldo_ana_reg: ab8500_ldo_ana {
+					};
+				};
+			};
+		};
+
+		sound {
+			stericsson,audio-codec = <&codec>;
+			clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
+			clock-names = "sysclk", "ulpclk", "intclk";
+		};
+
+		mcde@a0350000 {
+			vana-supply = <&ab8500_ldo_ana_reg>;
+
+			dsi@a0351000 {
+				vana-supply = <&ab8500_ldo_ana_reg>;
+			};
+			dsi@a0352000 {
+				vana-supply = <&ab8500_ldo_ana_reg>;
+			};
+			dsi@a0353000 {
+				vana-supply = <&ab8500_ldo_ana_reg>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/ste-db8500.dtsi b/arch/arm/boot/dts/ste-db8500.dtsi
new file mode 100644
index 0000000..d309fad
--- /dev/null
+++ b/arch/arm/boot/dts/ste-db8500.dtsi
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "ste-dbx5x0.dtsi"
+
+/ {
+	cpus {
+		cpu@300 {
+			/* cpufreq controls */
+			operating-points = <998400 0
+					    800000 0
+					    400000 0
+					    200000 0>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/ste-db8520.dtsi b/arch/arm/boot/dts/ste-db8520.dtsi
new file mode 100644
index 0000000..48bd872
--- /dev/null
+++ b/arch/arm/boot/dts/ste-db8520.dtsi
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "ste-dbx5x0.dtsi"
+
+/ {
+	cpus {
+		cpu@300 {
+			/* cpufreq controls */
+			operating-points = <1152000 0
+					    800000 0
+					    400000 0
+					    200000 0>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/ste-dbx5x0-pinctrl.dtsi b/arch/arm/boot/dts/ste-dbx5x0-pinctrl.dtsi
new file mode 100644
index 0000000..7bf7a2d
--- /dev/null
+++ b/arch/arm/boot/dts/ste-dbx5x0-pinctrl.dtsi
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2013 Linaro Ltd.
+ */
+
+#include "ste-nomadik-pinctrl.dtsi"
+
+&pinctrl {
+	/* Settings for all UART default and sleep states */
+	uart0 {
+		u0_a_1_default: u0_a_1_default {
+			default_mux {
+				function = "u0";
+				groups = "u0_a_1";
+			};
+			default_cfg1 {
+				pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
+				ste,config = <&in_pu>;
+			};
+			default_cfg2 {
+				pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */
+				ste,config = <&out_hi>;
+			};
+		};
+
+		u0_a_1_sleep: u0_a_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins = "GPIO1_AJ3"; /* RTS */
+				ste,config = <&slpm_out_hi_wkup_pdis>;
+			};
+			sleep_cfg3 {
+				pins = "GPIO3_AH3"; /* TXD */
+				ste,config = <&slpm_out_wkup_pdis>;
+			};
+		};
+	};
+
+	uart1 {
+		u1rxtx_a_1_default: u1rxtx_a_1_default {
+			default_mux {
+				function = "u1";
+				groups = "u1rxtx_a_1";
+			};
+			default_cfg1 {
+				pins = "GPIO4_AH6"; /* RXD */
+				ste,config = <&in_pu>;
+			};
+			default_cfg2 {
+				pins = "GPIO5_AG6"; /* TXD */
+				ste,config = <&out_hi>;
+			};
+		};
+
+		u1rxtx_a_1_sleep: u1rxtx_a_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO4_AH6"; /* RXD */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins = "GPIO5_AG6"; /* TXD */
+				ste,config = <&slpm_out_wkup_pdis>;
+			};
+		};
+
+		u1ctsrts_a_1_default: u1ctsrts_a_1_default {
+			default_mux {
+				function = "u1";
+				groups = "u1ctsrts_a_1";
+			};
+			default_cfg1 {
+				pins = "GPIO6_AF6"; /* CTS */
+				ste,config = <&in_pu>;
+			};
+			default_cfg2 {
+				pins = "GPIO7_AG5"; /* RTS */
+				ste,config = <&out_hi>;
+			};
+		};
+
+		u1ctsrts_a_1_sleep: u1ctsrts_a_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO6_AF6"; /* CTS */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins = "GPIO7_AG5"; /* RTS */
+				ste,config = <&slpm_out_hi_wkup_pdis>;
+			};
+		};
+	};
+
+	uart2 {
+		u2rxtx_c_1_default: u2rxtx_c_1_default {
+			default_mux {
+				function = "u2";
+				groups = "u2rxtx_c_1";
+			};
+			default_cfg1 {
+				pins = "GPIO29_W2"; /* RXD */
+				ste,config = <&in_pu>;
+			};
+			default_cfg2 {
+				pins = "GPIO30_W3"; /* TXD */
+				ste,config = <&out_hi>;
+			};
+		};
+
+		u2rxtx_c_1_sleep: u2rxtx_c_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO29_W2"; /* RXD */
+				ste,config = <&in_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins = "GPIO30_W3"; /* TXD */
+				ste,config = <&out_wkup_pdis>;
+			};
+		};
+	};
+
+	/* Settings for all I2C default and sleep states */
+	i2c0 {
+		i2c0_a_1_default: i2c0_a_1_default {
+			default_mux {
+				function = "i2c0";
+				groups = "i2c0_a_1";
+			};
+			default_cfg1 {
+				pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */
+				ste,config = <&in_nopull>;
+			};
+		};
+
+		i2c0_a_1_sleep: i2c0_a_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+		};
+	};
+
+	i2c1 {
+		i2c1_b_2_default: i2c1_b_2_default {
+			default_mux {
+				function = "i2c1";
+				groups = "i2c1_b_2";
+			};
+			default_cfg1 {
+				pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */
+				ste,config = <&in_nopull>;
+			};
+		};
+
+		i2c1_b_2_sleep: i2c1_b_2_sleep {
+			sleep_cfg1 {
+				pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+		};
+	};
+
+	i2c2 {
+		i2c2_b_2_default: i2c2_b_2_default {
+			default_mux {
+				function = "i2c2";
+				groups = "i2c2_b_2";
+			};
+			default_cfg1 {
+				pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */
+				ste,config = <&in_nopull>;
+			};
+		};
+
+		i2c2_b_2_sleep: i2c2_b_2_sleep {
+			sleep_cfg1 {
+				pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+		};
+	};
+
+	i2c3 {
+		i2c3_c_2_default: i2c3_c_2_default {
+			default_mux {
+				function = "i2c3";
+				groups = "i2c3_c_2";
+			};
+			default_cfg1 {
+				pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */
+				ste,config = <&in_nopull>;
+			};
+		};
+
+		i2c3_c_2_sleep: i2c3_c_2_sleep {
+			sleep_cfg1 {
+				pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+		};
+	};
+
+	/*
+	 * Activating I2C4 will conflict with UART1 about the same pins so do not
+	 * enable I2C4 and UART1 at the same time.
+	 */
+	i2c4 {
+		i2c4_b_1_default: i2c4_b_1_default {
+			default_mux {
+				function = "i2c4";
+				groups = "i2c4_b_1";
+			};
+			default_cfg1 {
+				pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */
+				ste,config = <&in_nopull>;
+			};
+		};
+
+		i2c4_b_1_sleep: i2c4_b_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+		};
+	};
+
+	/* Settings for all MMC/SD/SDIO default and sleep states */
+	sdi0 {
+		/* This is the external SD card slot, 4 bits wide */
+		mc0_a_1_default: mc0_a_1_default {
+			default_mux {
+				function = "mc0";
+				groups = "mc0_a_1";
+			};
+			default_cfg1 {
+				pins =
+				"GPIO18_AC2", /* CMDDIR */
+				"GPIO19_AC1", /* DAT0DIR */
+				"GPIO20_AB4"; /* DAT2DIR */
+				ste,config = <&out_hi>;
+			};
+			default_cfg2 {
+				pins = "GPIO22_AA3"; /* FBCLK */
+				ste,config = <&in_nopull>;
+			};
+			default_cfg3 {
+				pins = "GPIO23_AA4"; /* CLK */
+				ste,config = <&out_lo>;
+			};
+			default_cfg4 {
+				pins =
+				"GPIO24_AB2", /* CMD */
+				"GPIO25_Y4", /* DAT0 */
+				"GPIO26_Y2", /* DAT1 */
+				"GPIO27_AA2", /* DAT2 */
+				"GPIO28_AA1"; /* DAT3 */
+				ste,config = <&in_pu>;
+			};
+		};
+
+		mc0_a_1_sleep: mc0_a_1_sleep {
+			sleep_cfg1 {
+				pins =
+				"GPIO18_AC2", /* CMDDIR */
+				"GPIO19_AC1", /* DAT0DIR */
+				"GPIO20_AB4"; /* DAT2DIR */
+				ste,config = <&slpm_out_hi_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins =
+				"GPIO22_AA3", /* FBCLK */
+				"GPIO24_AB2", /* CMD */
+				"GPIO25_Y4", /* DAT0 */
+				"GPIO26_Y2", /* DAT1 */
+				"GPIO27_AA2", /* DAT2 */
+				"GPIO28_AA1"; /* DAT3 */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+			sleep_cfg3 {
+				pins = "GPIO23_AA4"; /* CLK */
+				ste,config = <&slpm_out_lo_wkup_pdis>;
+			};
+		};
+
+		mc0_a_2_default: mc0_a_2_default {
+			default_mux {
+				function = "mc0";
+				groups = "mc0_a_2";
+			};
+			default_cfg1 {
+				pins = "GPIO22_AA3"; /* FBCLK */
+				ste,config = <&in_nopull>;
+			};
+			default_cfg2 {
+				pins = "GPIO23_AA4"; /* CLK */
+				ste,config = <&out_lo>;
+			};
+			default_cfg3 {
+				pins =
+				"GPIO24_AB2", /* CMD */
+				"GPIO25_Y4", /* DAT0 */
+				"GPIO26_Y2", /* DAT1 */
+				"GPIO27_AA2", /* DAT2 */
+				"GPIO28_AA1"; /* DAT3 */
+				ste,config = <&in_pu>;
+			};
+		};
+
+		mc0_a_2_sleep: mc0_a_2_sleep {
+			sleep_cfg1 {
+				pins =
+				"GPIO22_AA3", /* FBCLK */
+				"GPIO24_AB2", /* CMD */
+				"GPIO25_Y4", /* DAT0 */
+				"GPIO26_Y2", /* DAT1 */
+				"GPIO27_AA2", /* DAT2 */
+				"GPIO28_AA1"; /* DAT3 */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins = "GPIO23_AA4"; /* CLK */
+				ste,config = <&slpm_out_lo_wkup_pdis>;
+			};
+		};
+	};
+
+	sdi1 {
+		/* This is the WLAN SDIO 4 bits wide */
+		mc1_a_1_default: mc1_a_1_default {
+			default_mux {
+				function = "mc1";
+				groups = "mc1_a_1";
+			};
+			default_cfg1 {
+				pins = "GPIO208_AH16"; /* CLK */
+				ste,config = <&out_lo>;
+			};
+			default_cfg2 {
+				pins = "GPIO209_AG15"; /* FBCLK */
+				ste,config = <&in_nopull>;
+			};
+			default_cfg3 {
+				pins =
+				"GPIO210_AJ15", /* CMD */
+				"GPIO211_AG14", /* DAT0 */
+				"GPIO212_AF13", /* DAT1 */
+				"GPIO213_AG13", /* DAT2 */
+				"GPIO214_AH15"; /* DAT3 */
+				ste,config = <&in_pu>;
+			};
+		};
+
+		mc1_a_1_sleep: mc1_a_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO208_AH16"; /* CLK */
+				ste,config = <&slpm_out_lo_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins =
+				"GPIO209_AG15", /* FBCLK */
+				"GPIO210_AJ15", /* CMD */
+				"GPIO211_AG14", /* DAT0 */
+				"GPIO212_AF13", /* DAT1 */
+				"GPIO213_AG13", /* DAT2 */
+				"GPIO214_AH15"; /* DAT3 */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+		};
+
+		mc1_a_2_default: mc1_a_2_default {
+			default_mux {
+				function = "mc1";
+				groups = "mc1_a_2";
+			};
+			default_cfg1 {
+				pins = "GPIO208_AH16"; /* CLK */
+				ste,config = <&out_lo>;
+			};
+			default_cfg2 {
+				pins =
+				"GPIO210_AJ15", /* CMD */
+				"GPIO211_AG14", /* DAT0 */
+				"GPIO212_AF13", /* DAT1 */
+				"GPIO213_AG13", /* DAT2 */
+				"GPIO214_AH15"; /* DAT3 */
+				ste,config = <&in_pu>;
+			};
+		};
+
+		mc1_a_2_sleep: mc1_a_2_sleep {
+			sleep_cfg1 {
+				pins = "GPIO208_AH16"; /* CLK */
+				ste,config = <&slpm_out_lo_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins =
+				"GPIO210_AJ15", /* CMD */
+				"GPIO211_AG14", /* DAT0 */
+				"GPIO212_AF13", /* DAT1 */
+				"GPIO213_AG13", /* DAT2 */
+				"GPIO214_AH15"; /* DAT3 */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+		};
+	};
+
+	sdi2 {
+		/* This is the eMMC 8 bits wide, usually PoP eMMC */
+		mc2_a_1_default: mc2_a_1_default {
+			default_mux {
+				function = "mc2";
+				groups = "mc2_a_1";
+			};
+			default_cfg1 {
+				pins = "GPIO128_A5"; /* CLK */
+				ste,config = <&out_lo>;
+			};
+			default_cfg2 {
+				pins = "GPIO130_C8"; /* FBCLK */
+				ste,config = <&in_nopull>;
+			};
+			default_cfg3 {
+				pins =
+				"GPIO129_B4", /* CMD */
+				"GPIO131_A12", /* DAT0 */
+				"GPIO132_C10", /* DAT1 */
+				"GPIO133_B10", /* DAT2 */
+				"GPIO134_B9", /* DAT3 */
+				"GPIO135_A9", /* DAT4 */
+				"GPIO136_C7", /* DAT5 */
+				"GPIO137_A7", /* DAT6 */
+				"GPIO138_C5"; /* DAT7 */
+				ste,config = <&in_pu>;
+			};
+		};
+
+		mc2_a_1_sleep: mc2_a_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO128_A5"; /* CLK */
+				ste,config = <&out_lo_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins =
+				"GPIO130_C8", /* FBCLK */
+				"GPIO129_B4"; /* CMD */
+				ste,config = <&in_wkup_pdis_en>;
+			};
+			sleep_cfg3 {
+				pins =
+				"GPIO131_A12", /* DAT0 */
+				"GPIO132_C10", /* DAT1 */
+				"GPIO133_B10", /* DAT2 */
+				"GPIO134_B9", /* DAT3 */
+				"GPIO135_A9", /* DAT4 */
+				"GPIO136_C7", /* DAT5 */
+				"GPIO137_A7", /* DAT6 */
+				"GPIO138_C5"; /* DAT7 */
+				ste,config = <&in_wkup_pdis>;
+			};
+		};
+	};
+
+	sdi4 {
+		/* This is the eMMC 8 bits wide, usually PCB-mounted eMMC */
+		mc4_a_1_default: mc4_a_1_default {
+			default_mux {
+				function = "mc4";
+				groups = "mc4_a_1";
+			};
+			default_cfg1 {
+				pins = "GPIO203_AE23"; /* CLK */
+				ste,config = <&out_lo>;
+			};
+			default_cfg2 {
+				pins = "GPIO202_AF25"; /* FBCLK */
+				ste,config = <&in_nopull>;
+			};
+			default_cfg3 {
+				pins =
+				"GPIO201_AF24", /* CMD */
+				"GPIO200_AH26", /* DAT0 */
+				"GPIO199_AH23", /* DAT1 */
+				"GPIO198_AG25", /* DAT2 */
+				"GPIO197_AH24", /* DAT3 */
+				"GPIO207_AJ23", /* DAT4 */
+				"GPIO206_AG24", /* DAT5 */
+				"GPIO205_AG23", /* DAT6 */
+				"GPIO204_AF23"; /* DAT7 */
+				ste,config = <&in_pu>;
+			};
+		};
+
+		mc4_a_1_sleep: mc4_a_1_sleep {
+			sleep_cfg1 {
+				pins = "GPIO203_AE23"; /* CLK */
+				ste,config = <&out_lo_wkup_pdis>;
+			};
+			sleep_cfg2 {
+				pins =
+				"GPIO202_AF25", /* FBCLK */
+				"GPIO201_AF24", /* CMD */
+				"GPIO200_AH26", /* DAT0 */
+				"GPIO199_AH23", /* DAT1 */
+				"GPIO198_AG25", /* DAT2 */
+				"GPIO197_AH24", /* DAT3 */
+				"GPIO207_AJ23", /* DAT4 */
+				"GPIO206_AG24", /* DAT5 */
+				"GPIO205_AG23", /* DAT6 */
+				"GPIO204_AF23"; /* DAT7 */
+				ste,config = <&slpm_in_wkup_pdis>;
+			};
+		};
+	};
+
+	/*
+	 * Multi-rate serial ports (MSPs) - MSP3 output is internal and
+	 * cannot be muxed onto any pins.
+	 */
+	msp0 {
+		msp0txrxtfstck_a_1_default: msp0txrxtfstck_a_1_default {
+			default_msp0_mux {
+				function = "msp0";
+				groups = "msp0txrx_a_1", "msp0tfstck_a_1";
+			};
+			default_msp0_cfg {
+				pins =
+				"GPIO12_AC4", /* TXD */
+				"GPIO15_AC3", /* RXD */
+				"GPIO13_AF3", /* TFS */
+				"GPIO14_AE3"; /* TCK */
+				ste,config = <&in_nopull>;
+			};
+		};
+	};
+
+	msp1 {
+		msp1txrx_a_1_default: msp1txrx_a_1_default {
+			default_mux {
+				function = "msp1";
+				groups = "msp1txrx_a_1", "msp1_a_1";
+			};
+			default_cfg1 {
+				pins = "GPIO33_AF2";
+				ste,config = <&out_lo>;
+			};
+			default_cfg2 {
+				pins =
+				"GPIO34_AE1",
+				"GPIO35_AE2",
+				"GPIO36_AG2";
+				ste,config = <&in_nopull>;
+			};
+		};
+	};
+
+	msp2 {
+		msp2_a_1_default: msp2_a_1_default {
+			/* MSP2 usually used for HDMI audio */
+			default_mux {
+				function = "msp2";
+				groups = "msp2_a_1";
+			};
+			default_cfg1 {
+				pins =
+				"GPIO193_AH27", /* TXD */
+				"GPIO194_AF27", /* TCK */
+				"GPIO195_AG28"; /* TFS */
+				ste,config = <&in_pd>;
+			};
+			default_cfg2 {
+				pins = "GPIO196_AG26"; /* RXD */
+				ste,config = <&out_lo>;
+			};
+		};
+	};
+
+	musb {
+		usb_a_1_default: usb_a_1_default {
+			default_mux {
+				function = "usb";
+				groups = "usb_a_1";
+			};
+			default_cfg1 {
+				pins =
+				"GPIO256_AF28", /* NXT */
+				"GPIO258_AD29", /* XCLK */
+				"GPIO259_AC29", /* DIR */
+				"GPIO260_AD28", /* DAT7 */
+				"GPIO261_AD26", /* DAT6 */
+				"GPIO262_AE26", /* DAT5 */
+				"GPIO263_AG29", /* DAT4 */
+				"GPIO264_AE27", /* DAT3 */
+				"GPIO265_AD27", /* DAT2 */
+				"GPIO266_AC28", /* DAT1 */
+				"GPIO267_AC27"; /* DAT0 */
+				ste,config = <&in_nopull>;
+			};
+			default_cfg2 {
+				pins = "GPIO257_AE29"; /* STP */
+				ste,config = <&out_hi>;
+			};
+		};
+
+		usb_a_1_sleep: usb_a_1_sleep {
+			sleep_cfg1 {
+				pins =
+				"GPIO256_AF28", /* NXT */
+				"GPIO258_AD29", /* XCLK */
+				"GPIO259_AC29"; /* DIR */
+				ste,config = <&slpm_wkup_pdis_en>;
+			};
+			sleep_cfg2 {
+				pins = "GPIO257_AE29"; /* STP */
+				ste,config = <&slpm_out_hi_wkup_pdis>;
+			};
+			sleep_cfg3 {
+				pins =
+				"GPIO260_AD28", /* DAT7 */
+				"GPIO261_AD26", /* DAT6 */
+				"GPIO262_AE26", /* DAT5 */
+				"GPIO263_AG29", /* DAT4 */
+				"GPIO264_AE27", /* DAT3 */
+				"GPIO265_AD27", /* DAT2 */
+				"GPIO266_AC28", /* DAT1 */
+				"GPIO267_AC27"; /* DAT0 */
+				ste,config = <&slpm_in_wkup_pdis_en>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index bda454d..6671f74 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -14,6 +14,22 @@ / {
 	#address-cells = <1>;
 	#size-cells = <1>;
 
+	/* This stablilizes the device enumeration */
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		spi0 = &spi0;
+		spi1 = &spi1;
+		spi2 = &spi2;
+		spi3 = &spi3;
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+	};
+
 	chosen {
 	};
 
@@ -36,11 +52,6 @@ CPU0: cpu@300 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0x300>;
-			/* cpufreq controls */
-			operating-points = <998400 0
-					    800000 0
-					    400000 0
-					    200000 0>;
 			clocks = <&prcmu_clk PRCMU_ARMSS>;
 			clock-names = "cpu";
 			clock-latency = <20000>;
@@ -93,7 +104,7 @@ cooling-maps {
 	soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
-		compatible = "stericsson,db8500";
+		compatible = "stericsson,db8500", "simple-bus";
 		interrupt-parent = <&intc>;
 		ranges;
 
@@ -324,7 +335,7 @@ watchdog@a0410620 {
 		};
 
 		rtc@80154000 {
-			compatible = "arm,rtc-pl031", "arm,primecell";
+			compatible = "arm,pl031", "arm,primecell";
 			reg = <0x80154000 0x1000>;
 			interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
 
@@ -638,7 +649,7 @@ db8500_esram34_ret_reg: db8500_esram34_ret {
 			};
 		};
 
-		i2c@80004000 {
+		i2c0: i2c@80004000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x80004000 0x1000>;
 			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
@@ -651,9 +662,11 @@ i2c@80004000 {
 			clocks = <&prcc_kclk 3 3>, <&prcc_pclk 3 3>;
 			clock-names = "i2cclk", "apb_pclk";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		i2c@80122000 {
+		i2c1: i2c@80122000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x80122000 0x1000>;
 			interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
@@ -667,9 +680,11 @@ i2c@80122000 {
 			clocks = <&prcc_kclk 1 2>, <&prcc_pclk 1 2>;
 			clock-names = "i2cclk", "apb_pclk";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		i2c@80128000 {
+		i2c2: i2c@80128000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x80128000 0x1000>;
 			interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
@@ -683,9 +698,11 @@ i2c@80128000 {
 			clocks = <&prcc_kclk 1 6>, <&prcc_pclk 1 6>;
 			clock-names = "i2cclk", "apb_pclk";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		i2c@80110000 {
+		i2c3: i2c@80110000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x80110000 0x1000>;
 			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
@@ -699,9 +716,11 @@ i2c@80110000 {
 			clocks = <&prcc_kclk 2 0>, <&prcc_pclk 2 0>;
 			clock-names = "i2cclk", "apb_pclk";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		i2c@8012a000 {
+		i2c4: i2c@8012a000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x8012a000 0x1000>;
 			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
@@ -715,9 +734,11 @@ i2c@8012a000 {
 			clocks = <&prcc_kclk 1 9>, <&prcc_pclk 1 10>;
 			clock-names = "i2cclk", "apb_pclk";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		spi@80002000 {
+		ssp0: spi@80002000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80002000 0x1000>;
 			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
@@ -729,9 +750,11 @@ spi@80002000 {
 			       <&dma 8 0 0x0>; /* Logical - MemToDev */
 			dma-names = "rx", "tx";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		spi@80003000 {
+		ssp1: spi@80003000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80003000 0x1000>;
 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
@@ -743,9 +766,11 @@ spi@80003000 {
 			       <&dma 9 0 0x0>; /* Logical - MemToDev */
 			dma-names = "rx", "tx";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		spi@8011a000 {
+		spi0: spi@8011a000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x8011a000 0x1000>;
 			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
@@ -758,9 +783,11 @@ spi@8011a000 {
 			       <&dma 0 0 0x0>; /* Logical - MemToDev */
 			dma-names = "rx", "tx";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		spi@80112000 {
+		spi1: spi@80112000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80112000 0x1000>;
 			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
@@ -773,9 +800,11 @@ spi@80112000 {
 			       <&dma 35 0 0x0>; /* Logical - MemToDev */
 			dma-names = "rx", "tx";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		spi@80111000 {
+		spi2: spi@80111000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80111000 0x1000>;
 			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
@@ -788,9 +817,11 @@ spi@80111000 {
 			       <&dma 33 0 0x0>; /* Logical - MemToDev */
 			dma-names = "rx", "tx";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		spi@80129000 {
+		spi3: spi@80129000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80129000 0x1000>;
 			interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
@@ -803,9 +834,11 @@ spi@80129000 {
 			       <&dma 40 0 0x0>; /* Logical - MemToDev */
 			dma-names = "rx", "tx";
 			power-domains = <&pm_domains DOMAIN_VAPE>;
+
+			status = "disabled";
 		};
 
-		ux500_serial0: uart@80120000 {
+		serial0: uart@80120000 {
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0x80120000 0x1000>;
 			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
@@ -820,7 +853,7 @@ ux500_serial0: uart@80120000 {
 			status = "disabled";
 		};
 
-		ux500_serial1: uart@80121000 {
+		serial1: uart@80121000 {
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0x80121000 0x1000>;
 			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
@@ -835,7 +868,7 @@ ux500_serial1: uart@80121000 {
 			status = "disabled";
 		};
 
-		ux500_serial2: uart@80007000 {
+		serial2: uart@80007000 {
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0x80007000 0x1000>;
 			interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/ste-href-ab8505.dtsi b/arch/arm/boot/dts/ste-href-ab8505.dtsi
deleted file mode 100644
index 95cf38a..0000000
--- a/arch/arm/boot/dts/ste-href-ab8505.dtsi
+++ /dev/null
@@ -1,234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 Linaro Ltd.
- */
-
-/ {
-	soc {
-		prcmu@80157000 {
-			ab8505 {
-				ab8505-gpio {
-					/* Hog a few default settings */
-					pinctrl-names = "default";
-					pinctrl-0 = <&gpio2_default_mode>,
-						    <&gpio10_default_mode>,
-						    <&gpio11_default_mode>,
-						    <&gpio13_default_mode>,
-						    <&gpio34_default_mode>,
-						    <&gpio50_default_mode>,
-						    <&pwm_default_mode>,
-						    <&adi2_default_mode>,
-						    <&modsclsda_default_mode>,
-						    <&resethw_default_mode>,
-						    <&service_default_mode>;
-
-					/*
-					 * Pins 2, 10, 11, 13, 34 and 50
-					 * are muxed in as GPIO, and configured as INPUT PULL DOWN
-					 */
-					gpio2 {
-						gpio2_default_mode: gpio2_default {
-							default_mux {
-								function = "gpio";
-								groups = "gpio2_a_1";
-							};
-							default_cfg {
-								pins = "GPIO2_R5";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					gpio10 {
-						gpio10_default_mode: gpio10_default {
-							default_mux {
-								function = "gpio";
-								groups = "gpio10_d_1";
-							};
-							default_cfg {
-								pins = "GPIO10_B16";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					gpio11 {
-						gpio11_default_mode: gpio11_default {
-							default_mux {
-								function = "gpio";
-								groups = "gpio11_d_1";
-							};
-							default_cfg {
-								pins = "GPIO11_B17";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					gpio13 {
-						gpio13_default_mode: gpio13_default {
-							default_mux {
-								function = "gpio";
-								groups = "gpio13_d_1";
-							};
-							default_cfg {
-								pins = "GPIO13_D17";
-								input-enable;
-								bias-disable;
-							};
-						};
-					};
-					gpio34 {
-						gpio34_default_mode: gpio34_default {
-							default_mux {
-								function = "gpio";
-								groups = "gpio34_a_1";
-							};
-							default_cfg {
-								pins = "GPIO34_H14";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					gpio50 {
-						gpio50_default_mode: gpio50_default {
-							default_mux {
-								function = "gpio";
-								groups = "gpio50_d_1";
-							};
-							default_cfg {
-								pins = "GPIO50_L4";
-								input-enable;
-								bias-disable;
-							};
-						};
-					};
-					/* This sets up the PWM pin 14 */
-					pwm {
-						pwm_default_mode: pwm_default {
-							default_mux {
-								function = "pwmout";
-								groups = "pwmout1_d_1";
-							};
-							default_cfg {
-								pins = "GPIO14_C16";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					/* This sets up audio interface 2 */
-					adi2 {
-						adi2_default_mode: adi2_default {
-							default_mux {
-								function = "adi2";
-								groups = "adi2_d_1";
-							};
-							default_cfg {
-								pins = "GPIO17_P2",
-									 "GPIO18_N3",
-									 "GPIO19_T1",
-									 "GPIO20_P3";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					/* Modem I2C setup (SCL and SDA pins) */
-					modsclsda {
-						modsclsda_default_mode: modsclsda_default {
-							default_mux {
-								function = "modsclsda";
-								groups = "modsclsda_d_1";
-							};
-							default_cfg {
-								pins = "GPIO40_J15",
-									"GPIO41_J14";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					resethw {
-						resethw_default_mode: resethw_default {
-							default_mux {
-								function = "resethw";
-								groups = "resethw_d_1";
-							};
-							default_cfg {
-								pins = "GPIO52_D16";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					service {
-						service_default_mode: service_default {
-							default_mux {
-								function = "service";
-								groups = "service_d_1";
-							};
-							default_cfg {
-								pins = "GPIO53_D15";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					/*
-					 * Clock output pins associated with regulators.
-					 */
-					sysclkreq2 {
-						sysclkreq2_default_mode: sysclkreq2_default {
-							default_mux {
-								function = "sysclkreq";
-								groups = "sysclkreq2_d_1";
-							};
-							default_cfg {
-								pins = "GPIO1_N4";
-								input-enable;
-								bias-disable;
-							};
-						};
-						sysclkreq2_sleep_mode: sysclkreq2_sleep {
-							default_mux {
-								function = "gpio";
-								groups = "gpio1_a_1";
-							};
-							default_cfg {
-								pins = "GPIO1_N4";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-					sysclkreq4 {
-						sysclkreq4_default_mode: sysclkreq4_default {
-							default_mux {
-								function = "sysclkreq";
-								groups = "sysclkreq4_d_1";
-							};
-							default_cfg {
-								pins = "GPIO3_P5";
-								input-enable;
-								bias-disable;
-							};
-						};
-						sysclkreq4_sleep_mode: sysclkreq4_sleep {
-							default_mux {
-								function = "gpio";
-								groups = "gpio3_a_1";
-							};
-							default_cfg {
-								pins = "GPIO3_P5";
-								input-enable;
-								bias-pull-down;
-							};
-						};
-					};
-				};
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
index 2c382d2..434fa6b 100644
--- a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
+++ b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
@@ -3,212 +3,11 @@
  * Copyright 2013 Linaro Ltd.
  */
 
-#include "ste-nomadik-pinctrl.dtsi"
+#include "ste-dbx5x0-pinctrl.dtsi"
 
 / {
 	soc {
 		pinctrl {
-			/* Settings for all UART default and sleep states */
-			uart0 {
-				uart0_default_mode: uart0_default {
-					default_mux {
-						function = "u0";
-						groups = "u0_a_1";
-					};
-					default_cfg1 {
-						pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
-						ste,config = <&in_pu>;
-					};
-
-					default_cfg2 {
-						pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */
-						ste,config = <&out_hi>;
-					};
-				};
-
-				uart0_sleep_mode: uart0_sleep {
-					sleep_cfg1 {
-						pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-
-					sleep_cfg2 {
-						pins = "GPIO1_AJ3"; /* RTS */
-						ste,config = <&slpm_out_hi_wkup_pdis>;
-					};
-
-					sleep_cfg3 {
-						pins = "GPIO3_AH3"; /* TXD */
-						ste,config = <&slpm_out_wkup_pdis>;
-					};
-				};
-			};
-
-			uart1 {
-				uart1_default_mode: uart1_default {
-					default_mux {
-						function = "u1";
-						groups = "u1rxtx_a_1";
-					};
-					default_cfg1 {
-						pins = "GPIO4_AH6"; /* RXD */
-						ste,config = <&in_pu>;
-					};
-
-					default_cfg2 {
-						pins = "GPIO5_AG6"; /* TXD */
-						ste,config = <&out_hi>;
-					};
-				};
-
-				uart1_sleep_mode: uart1_sleep {
-					sleep_cfg1 {
-						pins = "GPIO4_AH6"; /* RXD */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-
-					sleep_cfg2 {
-						pins = "GPIO5_AG6"; /* TXD */
-						ste,config = <&slpm_out_wkup_pdis>;
-					};
-				};
-			};
-
-			uart2 {
-				uart2_default_mode: uart2_default {
-					default_mux {
-						function = "u2";
-						groups = "u2rxtx_c_1";
-					};
-					default_cfg1 {
-						pins = "GPIO29_W2"; /* RXD */
-						ste,config = <&in_pu>;
-					};
-
-					default_cfg2 {
-						pins = "GPIO30_W3"; /* TXD */
-						ste,config = <&out_hi>;
-					};
-				};
-
-				uart2_sleep_mode: uart2_sleep {
-					sleep_cfg1 {
-						pins = "GPIO29_W2"; /* RXD */
-						ste,config = <&in_wkup_pdis>;
-					};
-
-					sleep_cfg2 {
-						pins = "GPIO30_W3"; /* TXD */
-						ste,config = <&out_wkup_pdis>;
-					};
-				};
-			};
-
-			/* Settings for all I2C default and sleep states */
-			i2c0 {
-				i2c0_default_mode: i2c_default {
-					default_mux {
-						function = "i2c0";
-						groups = "i2c0_a_1";
-					};
-					default_cfg1 {
-						pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				i2c0_sleep_mode: i2c_sleep {
-					sleep_cfg1 {
-						pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-				};
-			};
-
-			i2c1 {
-				i2c1_default_mode: i2c_default {
-					default_mux {
-						function = "i2c1";
-						groups = "i2c1_b_2";
-					};
-					default_cfg1 {
-						pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				i2c1_sleep_mode: i2c_sleep {
-					sleep_cfg1 {
-						pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-				};
-			};
-
-			i2c2 {
-				i2c2_default_mode: i2c_default {
-					default_mux {
-						function = "i2c2";
-						groups = "i2c2_b_2";
-					};
-					default_cfg1 {
-						pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				i2c2_sleep_mode: i2c_sleep {
-					sleep_cfg1 {
-						pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-				};
-			};
-
-			i2c3 {
-				i2c3_default_mode: i2c_default {
-					default_mux {
-						function = "i2c3";
-						groups = "i2c3_c_2";
-					};
-					default_cfg1 {
-						pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				i2c3_sleep_mode: i2c_sleep {
-					sleep_cfg1 {
-						pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-				};
-			};
-
-			/*
-			 * Activating I2C4 will conflict with UART1 about the same pins so do not
-			 * enable I2C4 and UART1 at the same time.
-			 */
-			i2c4 {
-				i2c4_default_mode: i2c_default {
-					default_mux {
-						function = "i2c4";
-						groups = "i2c4_b_1";
-					};
-					default_cfg1 {
-						pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				i2c4_sleep_mode: i2c_sleep {
-					sleep_cfg1 {
-						pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-				};
-			};
-
 			/* Settings for all SPI default and sleep states */
 			spi2 {
 				spi2_default_mode: spi_default {
@@ -270,335 +69,6 @@ sleep_cfg3 {
 				};
 			};
 
-			/* Settings for all MMC/SD/SDIO default and sleep states */
-			sdi0 {
-				/* This is the external SD card slot, 4 bits wide */
-				sdi0_default_mode: sdi0_default {
-					default_mux {
-						function = "mc0";
-						groups = "mc0_a_1";
-					};
-					default_cfg1 {
-						pins =
-						"GPIO18_AC2", /* CMDDIR */
-						"GPIO19_AC1", /* DAT0DIR */
-						"GPIO20_AB4"; /* DAT2DIR */
-						ste,config = <&out_hi>;
-					};
-					default_cfg2 {
-						pins = "GPIO22_AA3"; /* FBCLK */
-						ste,config = <&in_nopull>;
-					};
-					default_cfg3 {
-						pins = "GPIO23_AA4"; /* CLK */
-						ste,config = <&out_lo>;
-					};
-					default_cfg4 {
-						pins =
-						"GPIO24_AB2", /* CMD */
-						"GPIO25_Y4", /* DAT0 */
-						"GPIO26_Y2", /* DAT1 */
-						"GPIO27_AA2", /* DAT2 */
-						"GPIO28_AA1"; /* DAT3 */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				sdi0_sleep_mode: sdi0_sleep {
-					sleep_cfg1 {
-						pins =
-						"GPIO18_AC2", /* CMDDIR */
-						"GPIO19_AC1", /* DAT0DIR */
-						"GPIO20_AB4"; /* DAT2DIR */
-						ste,config = <&slpm_out_hi_wkup_pdis>;
-					};
-					sleep_cfg2 {
-						pins =
-						"GPIO22_AA3", /* FBCLK */
-						"GPIO24_AB2", /* CMD */
-						"GPIO25_Y4", /* DAT0 */
-						"GPIO26_Y2", /* DAT1 */
-						"GPIO27_AA2", /* DAT2 */
-						"GPIO28_AA1"; /* DAT3 */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-					sleep_cfg3 {
-						pins = "GPIO23_AA4"; /* CLK */
-						ste,config = <&slpm_out_lo_wkup_pdis>;
-					};
-				};
-			};
-
-			sdi1 {
-				/* This is the WLAN SDIO 4 bits wide */
-				sdi1_default_mode: sdi1_default {
-					default_mux {
-						function = "mc1";
-						groups = "mc1_a_1";
-					};
-					default_cfg1 {
-						pins = "GPIO208_AH16"; /* CLK */
-						ste,config = <&out_lo>;
-					};
-					default_cfg2 {
-						pins = "GPIO209_AG15"; /* FBCLK */
-						ste,config = <&in_nopull>;
-					};
-					default_cfg3 {
-						pins =
-						"GPIO210_AJ15", /* CMD */
-						"GPIO211_AG14", /* DAT0 */
-						"GPIO212_AF13", /* DAT1 */
-						"GPIO213_AG13", /* DAT2 */
-						"GPIO214_AH15"; /* DAT3 */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				sdi1_sleep_mode: sdi1_sleep {
-					sleep_cfg1 {
-						pins = "GPIO208_AH16"; /* CLK */
-						ste,config = <&slpm_out_lo_wkup_pdis>;
-					};
-					sleep_cfg2 {
-						pins =
-						"GPIO209_AG15", /* FBCLK */
-						"GPIO210_AJ15", /* CMD */
-						"GPIO211_AG14", /* DAT0 */
-						"GPIO212_AF13", /* DAT1 */
-						"GPIO213_AG13", /* DAT2 */
-						"GPIO214_AH15"; /* DAT3 */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-				};
-			};
-
-			sdi2 {
-				/* This is the eMMC 8 bits wide, usually PoP eMMC */
-				sdi2_default_mode: sdi2_default {
-					default_mux {
-						function = "mc2";
-						groups = "mc2_a_1";
-					};
-					default_cfg1 {
-						pins = "GPIO128_A5"; /* CLK */
-						ste,config = <&out_lo>;
-					};
-					default_cfg2 {
-						pins = "GPIO130_C8"; /* FBCLK */
-						ste,config = <&in_nopull>;
-					};
-					default_cfg3 {
-						pins =
-						"GPIO129_B4", /* CMD */
-						"GPIO131_A12", /* DAT0 */
-						"GPIO132_C10", /* DAT1 */
-						"GPIO133_B10", /* DAT2 */
-						"GPIO134_B9", /* DAT3 */
-						"GPIO135_A9", /* DAT4 */
-						"GPIO136_C7", /* DAT5 */
-						"GPIO137_A7", /* DAT6 */
-						"GPIO138_C5"; /* DAT7 */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				sdi2_sleep_mode: sdi2_sleep {
-					sleep_cfg1 {
-						pins = "GPIO128_A5"; /* CLK */
-						ste,config = <&out_lo_wkup_pdis>;
-					};
-					sleep_cfg2 {
-						pins =
-						"GPIO130_C8", /* FBCLK */
-						"GPIO129_B4"; /* CMD */
-						ste,config = <&in_wkup_pdis_en>;
-					};
-					sleep_cfg3 {
-						pins =
-						"GPIO131_A12", /* DAT0 */
-						"GPIO132_C10", /* DAT1 */
-						"GPIO133_B10", /* DAT2 */
-						"GPIO134_B9", /* DAT3 */
-						"GPIO135_A9", /* DAT4 */
-						"GPIO136_C7", /* DAT5 */
-						"GPIO137_A7", /* DAT6 */
-						"GPIO138_C5"; /* DAT7 */
-						ste,config = <&in_wkup_pdis>;
-					};
-				};
-			};
-
-			sdi4 {
-				/* This is the eMMC 8 bits wide, usually PCB-mounted eMMC */
-				sdi4_default_mode: sdi4_default {
-					default_mux {
-						function = "mc4";
-						groups = "mc4_a_1";
-					};
-					default_cfg1 {
-						pins = "GPIO203_AE23"; /* CLK */
-						ste,config = <&out_lo>;
-					};
-					default_cfg2 {
-						pins = "GPIO202_AF25"; /* FBCLK */
-						ste,config = <&in_nopull>;
-					};
-					default_cfg3 {
-						pins =
-						"GPIO201_AF24", /* CMD */
-						"GPIO200_AH26", /* DAT0 */
-						"GPIO199_AH23", /* DAT1 */
-						"GPIO198_AG25", /* DAT2 */
-						"GPIO197_AH24", /* DAT3 */
-						"GPIO207_AJ23", /* DAT4 */
-						"GPIO206_AG24", /* DAT5 */
-						"GPIO205_AG23", /* DAT6 */
-						"GPIO204_AF23"; /* DAT7 */
-						ste,config = <&in_pu>;
-					};
-				};
-
-				sdi4_sleep_mode: sdi4_sleep {
-					sleep_cfg1 {
-						pins = "GPIO203_AE23"; /* CLK */
-						ste,config = <&out_lo_wkup_pdis>;
-					};
-					sleep_cfg2 {
-						pins =
-						"GPIO202_AF25", /* FBCLK */
-						"GPIO201_AF24", /* CMD */
-						"GPIO200_AH26", /* DAT0 */
-						"GPIO199_AH23", /* DAT1 */
-						"GPIO198_AG25", /* DAT2 */
-						"GPIO197_AH24", /* DAT3 */
-						"GPIO207_AJ23", /* DAT4 */
-						"GPIO206_AG24", /* DAT5 */
-						"GPIO205_AG23", /* DAT6 */
-						"GPIO204_AF23"; /* DAT7 */
-						ste,config = <&slpm_in_wkup_pdis>;
-					};
-				};
-			};
-
-			/*
-			 * Multi-rate serial ports (MSPs) - MSP3 output is internal and
-			 * cannot be muxed onto any pins.
-			 */
-			msp0 {
-				msp0_default_mode: msp0_default {
-					default_msp0_mux {
-						function = "msp0";
-						groups = "msp0txrx_a_1", "msp0tfstck_a_1";
-					};
-					default_msp0_cfg {
-						pins =
-						"GPIO12_AC4", /* TXD */
-						"GPIO15_AC3", /* RXD */
-						"GPIO13_AF3", /* TFS */
-						"GPIO14_AE3"; /* TCK */
-						ste,config = <&in_nopull>;
-					};
-				};
-			};
-
-			msp1 {
-				msp1_default_mode: msp1_default {
-					default_mux {
-						function = "msp1";
-						groups = "msp1txrx_a_1", "msp1_a_1";
-					};
-					default_cfg1 {
-						pins = "GPIO33_AF2";
-						ste,config = <&out_lo>;
-					};
-					default_cfg2 {
-						pins =
-						"GPIO34_AE1",
-						"GPIO35_AE2",
-						"GPIO36_AG2";
-						ste,config = <&in_nopull>;
-					};
-
-				};
-			};
-
-			msp2 {
-				msp2_default_mode: msp2_default {
-					/* MSP2 usually used for HDMI audio */
-					default_mux {
-						function = "msp2";
-						groups = "msp2_a_1";
-					};
-					default_cfg1 {
-						pins =
-						"GPIO193_AH27", /* TXD */
-						"GPIO194_AF27", /* TCK */
-						"GPIO195_AG28"; /* TFS */
-						ste,config = <&in_pd>;
-					};
-					default_cfg2 {
-						pins = "GPIO196_AG26"; /* RXD */
-						ste,config = <&out_lo>;
-					};
-				};
-			};
-
-
-			musb {
-				musb_default_mode: musb_default {
-					default_mux {
-						function = "usb";
-						groups = "usb_a_1";
-					};
-					default_cfg1 {
-						pins =
-						"GPIO256_AF28", /* NXT */
-						"GPIO258_AD29", /* XCLK */
-						"GPIO259_AC29", /* DIR */
-						"GPIO260_AD28", /* DAT7 */
-						"GPIO261_AD26", /* DAT6 */
-						"GPIO262_AE26", /* DAT5 */
-						"GPIO263_AG29", /* DAT4 */
-						"GPIO264_AE27", /* DAT3 */
-						"GPIO265_AD27", /* DAT2 */
-						"GPIO266_AC28", /* DAT1 */
-						"GPIO267_AC27"; /* DAT0 */
-						ste,config = <&in_nopull>;
-					};
-					default_cfg2 {
-						pins = "GPIO257_AE29"; /* STP */
-						ste,config = <&out_hi>;
-					};
-				};
-
-				musb_sleep_mode: musb_sleep {
-					sleep_cfg1 {
-						pins =
-						"GPIO256_AF28", /* NXT */
-						"GPIO258_AD29", /* XCLK */
-						"GPIO259_AC29"; /* DIR */
-						ste,config = <&slpm_wkup_pdis_en>;
-					};
-					sleep_cfg2 {
-						pins = "GPIO257_AE29"; /* STP */
-						ste,config = <&slpm_out_hi_wkup_pdis>;
-					};
-					sleep_cfg3 {
-						pins =
-						"GPIO260_AD28", /* DAT7 */
-						"GPIO261_AD26", /* DAT6 */
-						"GPIO262_AE26", /* DAT5 */
-						"GPIO263_AG29", /* DAT4 */
-						"GPIO264_AE27", /* DAT3 */
-						"GPIO265_AD27", /* DAT2 */
-						"GPIO266_AC28", /* DAT1 */
-						"GPIO267_AC27"; /* DAT0 */
-						ste,config = <&slpm_in_wkup_pdis_en>;
-					};
-				};
-			};
-
 			mcde {
 				lcd_default_mode: lcd_default {
 					default_mux1 {
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
new file mode 100644
index 0000000..e024520
--- /dev/null
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Device Tree for the TVK1281618 R2 UIB
+ */
+
+#include "ste-href-tvk1281618.dtsi"
+
+/ {
+	soc {
+		i2c@80128000 {
+			lsm303dlh@18 {
+				/* Accelerometer */
+				compatible = "st,lsm303dlh-accel";
+				st,drdy-int-pin = <1>;
+				drive-open-drain;
+				reg = <0x18>;
+				vdd-supply = <&ab8500_ldo_aux1_reg>;
+				vddio-supply = <&db8500_vsmps2_reg>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&accel_tvk_mode>;
+				/*
+				 * These interrupts cannot be used: the other component
+				 * ST-Micro L3D4200D gyro that is connected to the same lines
+				 * cannot set its DRDY line to open drain, so it cannot be
+				 * shared with other peripherals. The should be defined for
+				 * the falling edge if they could be wired together.
+				 *
+				 * interrupts-extended =
+				 * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
+				 * <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
+				 */
+			};
+			lsm303dlh@1e {
+				/* Magnetometer */
+				compatible = "st,lsm303dlh-magn";
+				reg = <0x1e>;
+				vdd-supply = <&ab8500_ldo_aux1_reg>;
+				vddio-supply = <&db8500_vsmps2_reg>;
+				/*
+				 * These interrupts cannot be used: the other component
+				 * ST-Micro L3D4200D gyro that is connected to the same lines
+				 * cannot set its DRDY line to open drain, so it cannot be
+				 * shared with other peripherals. The should be defined for
+				 * the falling edge if they could be wired together.
+				 *
+				 * interrupts-extended =
+				 * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
+				 * <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
+				 */
+			};
+			lis331dl@1c {
+				/* Accelerometer */
+				compatible = "st,lis331dl-accel";
+				st,drdy-int-pin = <1>;
+				reg = <0x1c>;
+				vdd-supply = <&ab8500_ldo_aux1_reg>;
+				vddio-supply = <&db8500_vsmps2_reg>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&accel_tvk_mode>;
+				interrupt-parent = <&gpio2>;
+				/* INT2 would need to be open drain */
+				interrupts = <18 IRQ_TYPE_EDGE_RISING>,
+					     <19 IRQ_TYPE_EDGE_RISING>;
+			};
+		};
+		mcde@a0350000 {
+			status = "okay";
+
+			dsi@a0351000 {
+				panel {
+					compatible = "samsung,s6d16d0";
+					reg = <0>;
+					vdd1-supply = <&ab8500_ldo_aux1_reg>;
+					reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
new file mode 100644
index 0000000..cb3677f
--- /dev/null
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Device Tree for the TVK1281618 R2 UIB
+ */
+
+#include "ste-href-tvk1281618.dtsi"
+
+/ {
+	soc {
+		i2c@80128000 {
+			/* Marked:
+			 * 129
+			 * M35
+			 * L3GD20
+			 */
+			l3gd20@6a {
+				/* Gyroscope */
+				compatible = "st,l3gd20";
+				status = "disabled";
+				st,drdy-int-pin = <1>;
+				drive-open-drain;
+				reg = <0x6a>; // 0x6a or 0x6b
+				vdd-supply = <&ab8500_ldo_aux1_reg>;
+				vddio-supply = <&db8500_vsmps2_reg>;
+			};
+			/*
+			 * Marked:
+			 * 2122
+			 * C3H
+			 * DQEEE
+			 * LIS3DH?
+			 */
+			lis3dh@18 {
+				/* Accelerometer */
+				compatible = "st,lis3dh-accel";
+				st,drdy-int-pin = <1>;
+				reg = <0x18>;
+				vdd-supply = <&ab8500_ldo_aux1_reg>;
+				vddio-supply = <&db8500_vsmps2_reg>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&accel_tvk_mode>;
+			};
+		};
+
+		mcde@a0350000 {
+			status = "okay";
+
+			dsi@a0351000 {
+				panel {
+					compatible = "sony,acx424akp";
+					reg = <0>;
+					vddi-supply = <&ab8500_ldo_aux1_reg>;
+					reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
index 3bafd26..e1dbfae 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
@@ -2,7 +2,7 @@
 /*
  * Copyright 2012 ST-Ericsson AB
  *
- * Device Tree for the TVK1281618 UIB
+ * Device Tree for the TVK1281618 family of UIBs
  */
 
 #include <dt-bindings/interrupt-controller/irq.h>
@@ -81,62 +81,8 @@ tc3589x_keypad {
 				};
 			};
 		};
-		/* Sensors mounted on this board variant */
+		/* Sensors mounted on all board variants */
 		i2c@80128000 {
-			lsm303dlh@18 {
-				/* Accelerometer */
-				compatible = "st,lsm303dlh-accel";
-				st,drdy-int-pin = <1>;
-				drive-open-drain;
-				reg = <0x18>;
-				vdd-supply = <&ab8500_ldo_aux1_reg>;
-				vddio-supply = <&db8500_vsmps2_reg>;
-				pinctrl-names = "default";
-				pinctrl-0 = <&accel_tvk_mode>;
-				/*
-				 * These interrupts cannot be used: the other component
-				 * ST-Micro L3D4200D gyro that is connected to the same lines
-				 * cannot set its DRDY line to open drain, so it cannot be
-				 * shared with other peripherals. The should be defined for
-				 * the falling edge if they could be wired together.
-				 *
-				 * interrupts-extended =
-				 * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
-				 * <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
-				 */
-			};
-			lsm303dlh@1e {
-				/* Magnetometer */
-				compatible = "st,lsm303dlh-magn";
-				reg = <0x1e>;
-				vdd-supply = <&ab8500_ldo_aux1_reg>;
-				vddio-supply = <&db8500_vsmps2_reg>;
-				/*
-				 * These interrupts cannot be used: the other component
-				 * ST-Micro L3D4200D gyro that is connected to the same lines
-				 * cannot set its DRDY line to open drain, so it cannot be
-				 * shared with other peripherals. The should be defined for
-				 * the falling edge if they could be wired together.
-				 *
-				 * interrupts-extended =
-				 * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
-				 * <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
-				 */
-			};
-			lis331dl@1c {
-				/* Accelerometer */
-				compatible = "st,lis331dl-accel";
-				st,drdy-int-pin = <1>;
-				reg = <0x1c>;
-				vdd-supply = <&ab8500_ldo_aux1_reg>;
-				vddio-supply = <&db8500_vsmps2_reg>;
-				pinctrl-names = "default";
-				pinctrl-0 = <&accel_tvk_mode>;
-				interrupt-parent = <&gpio2>;
-				/* INT2 would need to be open drain */
-				interrupts = <18 IRQ_TYPE_EDGE_RISING>,
-					     <19 IRQ_TYPE_EDGE_RISING>;
-			};
 			ak8974@f {
 				/* Magnetometer */
 				compatible = "asahi-kasei,ak8974";
@@ -268,18 +214,5 @@ tvk_cfg1 {
 				};
 			};
 		};
-
-		mcde@a0350000 {
-			status = "okay";
-
-			dsi@a0351000 {
-				panel {
-					compatible = "samsung,s6d16d0";
-					reg = <0>;
-					vdd1-supply = <&ab8500_ldo_aux1_reg>;
-					reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
-				};
-			};
-		};
 	};
 };
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 4f6acbd..33e3b0b 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -4,7 +4,6 @@
  */
 
 #include <dt-bindings/interrupt-controller/irq.h>
-#include "ste-dbx5x0.dtsi"
 #include "ste-href-family-pinctrl.dtsi"
 
 / {
@@ -16,41 +15,44 @@ memory {
 	soc {
 		uart@80120000 {
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&uart0_default_mode>;
-			pinctrl-1 = <&uart0_sleep_mode>;
+			pinctrl-0 = <&u0_a_1_default>;
+			pinctrl-1 = <&u0_a_1_sleep>;
 			status = "okay";
 		};
 
 		/* This UART is unused and thus left disabled */
 		uart@80121000 {
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&uart1_default_mode>;
-			pinctrl-1 = <&uart1_sleep_mode>;
+			pinctrl-0 = <&u1rxtx_a_1_default>;
+			pinctrl-1 = <&u1rxtx_a_1_sleep>;
 		};
 
 		uart@80007000 {
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&uart2_default_mode>;
-			pinctrl-1 = <&uart2_sleep_mode>;
+			pinctrl-0 = <&u2rxtx_c_1_default>;
+			pinctrl-1 = <&u2rxtx_c_1_sleep>;
 			status = "okay";
 		};
 
 		i2c@80004000 {
 			pinctrl-names = "default","sleep";
-			pinctrl-0 = <&i2c0_default_mode>;
-			pinctrl-1 = <&i2c0_sleep_mode>;
+			pinctrl-0 = <&i2c0_a_1_default>;
+			pinctrl-1 = <&i2c0_a_1_sleep>;
+			status = "okay";
 		};
 
 		i2c@80122000 {
 			pinctrl-names = "default","sleep";
-			pinctrl-0 = <&i2c1_default_mode>;
-			pinctrl-1 = <&i2c1_sleep_mode>;
+			pinctrl-0 = <&i2c1_b_2_default>;
+			pinctrl-1 = <&i2c1_b_2_sleep>;
+			status = "okay";
 		};
 
 		i2c@80128000 {
 			pinctrl-names = "default","sleep";
-			pinctrl-0 = <&i2c2_default_mode>;
-			pinctrl-1 = <&i2c2_sleep_mode>;
+			pinctrl-0 = <&i2c2_b_2_default>;
+			pinctrl-1 = <&i2c2_b_2_sleep>;
+			status = "okay";
 			lp5521@33 {
 				compatible = "national,lp5521";
 				reg = <0x33>;
@@ -96,8 +98,9 @@ bh1780@29 {
 
 		i2c@80110000 {
 			pinctrl-names = "default","sleep";
-			pinctrl-0 = <&i2c3_default_mode>;
-			pinctrl-1 = <&i2c3_sleep_mode>;
+			pinctrl-0 = <&i2c3_c_2_default>;
+			pinctrl-1 = <&i2c3_c_2_sleep>;
+			status = "okay";
 		};
 
 		/* ST6G3244ME level translator for 1.8/2.9 V */
@@ -132,8 +135,8 @@ sdi0_per1@80126000 {
 			vmmc-supply = <&ab8500_ldo_aux3_reg>;
 			vqmmc-supply = <&vmmci>;
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&sdi0_default_mode>;
-			pinctrl-1 = <&sdi0_sleep_mode>;
+			pinctrl-0 = <&mc0_a_1_default &sdi0_default_mode>;
+			pinctrl-1 = <&mc0_a_1_sleep>;
 
 			status = "okay";
 		};
@@ -145,8 +148,8 @@ sdi1_per2@80118000 {
 			bus-width = <4>;
 			non-removable;
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&sdi1_default_mode>;
-			pinctrl-1 = <&sdi1_sleep_mode>;
+			pinctrl-0 = <&mc1_a_1_default>;
+			pinctrl-1 = <&mc1_a_1_sleep>;
 
 			status = "okay";
 		};
@@ -160,8 +163,8 @@ sdi2_per3@80005000 {
 			non-removable;
 			vmmc-supply = <&db8500_vsmps2_reg>;
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&sdi2_default_mode>;
-			pinctrl-1 = <&sdi2_sleep_mode>;
+			pinctrl-0 = <&mc2_a_1_default>;
+			pinctrl-1 = <&mc2_a_1_sleep>;
 
 			status = "okay";
 		};
@@ -175,27 +178,27 @@ sdi4_per2@80114000 {
 			non-removable;
 			vmmc-supply = <&ab8500_ldo_aux2_reg>;
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&sdi4_default_mode>;
-			pinctrl-1 = <&sdi4_sleep_mode>;
+			pinctrl-0 = <&mc4_a_1_default>;
+			pinctrl-1 = <&mc4_a_1_sleep>;
 
 			status = "okay";
 		};
 
 		msp0: msp@80123000 {
 			pinctrl-names = "default";
-			pinctrl-0 = <&msp0_default_mode>;
+			pinctrl-0 = <&msp0txrxtfstck_a_1_default>;
 			status = "okay";
 		};
 
 		msp1: msp@80124000 {
 			pinctrl-names = "default";
-			pinctrl-0 = <&msp1_default_mode>;
+			pinctrl-0 = <&msp1txrx_a_1_default>;
 			status = "okay";
 		};
 
 		msp2: msp@80117000 {
 			pinctrl-names = "default";
-			pinctrl-0 = <&msp2_default_mode>;
+			pinctrl-0 = <&msp2_a_1_default>;
 		};
 
 		msp3: msp@80125000 {
@@ -209,8 +212,8 @@ ab8500-gpio {
 
 				ab8500_usb {
 					pinctrl-names = "default", "sleep";
-					pinctrl-0 = <&musb_default_mode>;
-					pinctrl-1 = <&musb_sleep_mode>;
+					pinctrl-0 = <&usb_a_1_default>;
+					pinctrl-1 = <&usb_a_1_sleep>;
 				};
 
 				ab8500-regulators {
@@ -257,6 +260,14 @@ ab8500_ldo_ana_reg: ab8500_ldo_ana {
 			};
 		};
 
+		pinctrl {
+			sdi0 {
+				sdi0_default_mode: sdi0_default {
+					/* Some boards set additional settings here */
+				};
+			};
+		};
+
 		mcde@a0350000 {
 			pinctrl-names = "default", "sleep";
 			pinctrl-0 = <&lcd_default_mode>;
diff --git a/arch/arm/boot/dts/ste-href520-tvk.dts b/arch/arm/boot/dts/ste-href520-tvk.dts
new file mode 100644
index 0000000..f8c0c1e
--- /dev/null
+++ b/arch/arm/boot/dts/ste-href520-tvk.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Device Tree for the HREF520 version with the TVK1281618 UIB
+ */
+
+/dts-v1/;
+#include "ste-db8520.dtsi"
+#include "ste-hrefv60plus.dtsi"
+#include "ste-href-tvk1281618-r3.dtsi"
+
+/ {
+	model = "ST-Ericsson HREF520 and TVK1281618 UIB";
+	compatible = "st-ericsson,href520", "st-ericsson,u8500";
+
+	soc {
+		vmmci: regulator-gpio {
+			gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+			enable-gpio = <&gpio2 14 GPIO_ACTIVE_HIGH>;
+			enable-active-high;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
index b78be5f..8ce6b72 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
@@ -4,8 +4,7 @@
  */
 
 /dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/irq.h>
+#include "ste-db8500.dtsi"
 #include "ste-hrefprev60.dtsi"
 #include "ste-href-stuib.dtsi"
 
@@ -13,13 +12,6 @@ / {
 	model = "ST-Ericsson HREF (pre-v60) and ST UIB";
 	compatible = "st-ericsson,mop500", "st-ericsson,u8500";
 
-	/* This stablilizes the serial port enumeration */
-	aliases {
-		serial0 = &ux500_serial0;
-		serial1 = &ux500_serial1;
-		serial2 = &ux500_serial2;
-	};
-
 	soc {
 		/* Reset line for the BU21013 touchscreen */
 		i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
index 60eed26..142f547 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
@@ -4,17 +4,11 @@
  */
 
 /dts-v1/;
+#include "ste-db8500.dtsi"
 #include "ste-hrefprev60.dtsi"
-#include "ste-href-tvk1281618.dtsi"
+#include "ste-href-tvk1281618-r2.dtsi"
 
 / {
 	model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB";
 	compatible = "st-ericsson,mop500", "st-ericsson,u8500";
-
-	/* This stablilizes the serial port enumeration */
-	aliases {
-		serial0 = &ux500_serial0;
-		serial1 = &ux500_serial1;
-		serial2 = &ux500_serial2;
-	};
 };
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index a036def..115495d 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -5,7 +5,6 @@
  * Device Tree for the HREF+ prior to the v60 variant.
  */
 
-#include "ste-dbx5x0.dtsi"
 #include "ste-href-ab8500.dtsi"
 #include "ste-href.dtsi"
 
@@ -58,6 +57,7 @@ spi@80002000 {
 			 */
 			pinctrl-names = "default";
 			pinctrl-0 = <&ssp0_hrefprev60_mode>;
+			status = "okay";
 		};
 
 		// External Micro SD slot
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
index 9be513a..1316886 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
@@ -6,8 +6,7 @@
  */
 
 /dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/irq.h>
+#include "ste-db8500.dtsi"
 #include "ste-hrefv60plus.dtsi"
 #include "ste-href-stuib.dtsi"
 
@@ -15,13 +14,6 @@ / {
 	model = "ST-Ericsson HREF (v60+) and ST UIB";
 	compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
 
-	/* This stablilizes the serial port enumeration */
-	aliases {
-		serial0 = &ux500_serial0;
-		serial1 = &ux500_serial1;
-		serial2 = &ux500_serial2;
-	};
-
 	soc {
 		/* Reset line for the BU21013 touchscreen */
 		i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
index 73ea310..5d4b824 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
@@ -6,17 +6,11 @@
  */
 
 /dts-v1/;
+#include "ste-db8500.dtsi"
 #include "ste-hrefv60plus.dtsi"
-#include "ste-href-tvk1281618.dtsi"
+#include "ste-href-tvk1281618-r2.dtsi"
 
 / {
 	model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB";
 	compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
-
-	/* This stablilizes the serial port enumeration */
-	aliases {
-		serial0 = &ux500_serial0;
-		serial1 = &ux500_serial1;
-		serial2 = &ux500_serial2;
-	};
 };
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index aece8eb..05b4fbb 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -3,7 +3,6 @@
  * Copyright 2012 ST-Ericsson AB
  */
 
-#include "ste-dbx5x0.dtsi"
 #include "ste-href-ab8500.dtsi"
 #include "ste-href.dtsi"
 
diff --git a/arch/arm/boot/dts/ste-nomadik-pinctrl.dtsi b/arch/arm/boot/dts/ste-nomadik-pinctrl.dtsi
index 5673a11..bfdb5d9a 100644
--- a/arch/arm/boot/dts/ste-nomadik-pinctrl.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-pinctrl.dtsi
@@ -25,6 +25,11 @@ out_lo: output_low {
 		ste,output = <OUTPUT_LOW>;
 	};
 
+	gpio_in_nopull: gpio_input_nopull {
+		ste,gpio = <GPIOMODE_ENABLED>;
+		ste,input = <INPUT_NOPULL>;
+	};
+
 	gpio_in_pu: gpio_input_pull_up {
 		ste,gpio = <GPIOMODE_ENABLED>;
 		ste,input = <INPUT_PULLUP>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index efbc446..be90e73 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -4,7 +4,7 @@
  */
 
 /dts-v1/;
-#include "ste-dbx5x0.dtsi"
+#include "ste-db8500.dtsi"
 #include "ste-href-ab8500.dtsi"
 #include "ste-href-family-pinctrl.dtsi"
 
@@ -12,13 +12,6 @@ / {
 	model = "Calao Systems Snowball platform with device tree";
 	compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500";
 
-	/* This stablilizes the serial port enumeration */
-	aliases {
-		serial0 = &ux500_serial0;
-		serial1 = &ux500_serial1;
-		serial2 = &ux500_serial2;
-	};
-
 	memory {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>;
@@ -156,19 +149,19 @@ gpio@8011e080 {
 
 		msp0: msp@80123000 {
 			pinctrl-names = "default";
-			pinctrl-0 = <&msp0_default_mode>;
+			pinctrl-0 = <&msp0txrxtfstck_a_1_default>;
 			status = "okay";
 		};
 
 		msp1: msp@80124000 {
 			pinctrl-names = "default";
-			pinctrl-0 = <&msp1_default_mode>;
+			pinctrl-0 = <&msp1txrx_a_1_default>;
 			status = "okay";
 		};
 
 		msp2: msp@80117000 {
 			pinctrl-names = "default";
-			pinctrl-0 = <&msp2_default_mode>;
+			pinctrl-0 = <&msp2_a_1_default>;
 		};
 
 		msp3: msp@80125000 {
@@ -238,8 +231,8 @@ sdi0_per1@80126000 {
 			vmmc-supply = <&ab8500_ldo_aux3_reg>;
 			vqmmc-supply = <&vmmci>;
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&sdi0_default_mode>;
-			pinctrl-1 = <&sdi0_sleep_mode>;
+			pinctrl-0 = <&mc0_a_1_default &sdi0_default_mode>;
+			pinctrl-1 = <&mc0_a_1_sleep>;
 
 			/* GPIO218 MMC_CD */
 			cd-gpios  = <&gpio6 26 GPIO_ACTIVE_LOW>;
@@ -253,8 +246,8 @@ sdi1_per2@80118000 {
 			max-frequency = <100000000>;
 			bus-width = <4>;
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&sdi1_default_mode>;
-			pinctrl-1 = <&sdi1_sleep_mode>;
+			pinctrl-0 = <&mc1_a_1_default>;
+			pinctrl-1 = <&mc1_a_1_sleep>;
 
 			status = "okay";
 		};
@@ -263,7 +256,7 @@ sdi1_per2@80118000 {
 		sdi2_per3@80005000 {
 			arm,primecell-periphid = <0x10480180>;
 			pinctrl-names = "default";
-			pinctrl-0 = <&sdi2_sleep_mode>;
+			pinctrl-0 = <&mc2_a_1_sleep>;
 
 			status = "okay";
 		};
@@ -276,49 +269,52 @@ sdi4_per2@80114000 {
 			cap-mmc-highspeed;
 			vmmc-supply = <&ab8500_ldo_aux2_reg>;
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&sdi4_default_mode>;
-			pinctrl-1 = <&sdi4_sleep_mode>;
+			pinctrl-0 = <&mc4_a_1_default>;
+			pinctrl-1 = <&mc4_a_1_sleep>;
 
 			status = "okay";
 		};
 
 		uart@80120000 {
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&uart0_default_mode>;
-			pinctrl-1 = <&uart0_sleep_mode>;
+			pinctrl-0 = <&u0_a_1_default>;
+			pinctrl-1 = <&u0_a_1_sleep>;
 			status = "okay";
 		};
 
 		/* This UART is unused and thus left disabled */
 		uart@80121000 {
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&uart1_default_mode>;
-			pinctrl-1 = <&uart1_sleep_mode>;
+			pinctrl-0 = <&u1rxtx_a_1_default>;
+			pinctrl-1 = <&u1rxtx_a_1_sleep>;
 		};
 
 		uart@80007000 {
 			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&uart2_default_mode>;
-			pinctrl-1 = <&uart2_sleep_mode>;
+			pinctrl-0 = <&u2rxtx_c_1_default>;
+			pinctrl-1 = <&u2rxtx_c_1_sleep>;
 			status = "okay";
 		};
 
 		i2c@80004000 {
 			pinctrl-names = "default","sleep";
-			pinctrl-0 = <&i2c0_default_mode>;
-			pinctrl-1 = <&i2c0_sleep_mode>;
+			pinctrl-0 = <&i2c0_a_1_default>;
+			pinctrl-1 = <&i2c0_a_1_sleep>;
+			status = "okay";
 		};
 
 		i2c@80122000 {
 			pinctrl-names = "default","sleep";
-			pinctrl-0 = <&i2c1_default_mode>;
-			pinctrl-1 = <&i2c1_sleep_mode>;
+			pinctrl-0 = <&i2c1_b_2_default>;
+			pinctrl-1 = <&i2c1_b_2_sleep>;
+			status = "okay";
 		};
 
 		i2c@80128000 {
 			pinctrl-names = "default","sleep";
-			pinctrl-0 = <&i2c2_default_mode>;
-			pinctrl-1 = <&i2c2_sleep_mode>;
+			pinctrl-0 = <&i2c2_b_2_default>;
+			pinctrl-1 = <&i2c2_b_2_sleep>;
+			status = "okay";
 			lsm303dlh@18 {
 				/* Accelerometer */
 				compatible = "st,lsm303dlh-accel";
@@ -367,20 +363,18 @@ lsp001wm@5c {
 
 		i2c@80110000 {
 			pinctrl-names = "default","sleep";
-			pinctrl-0 = <&i2c3_default_mode>;
-			pinctrl-1 = <&i2c3_sleep_mode>;
+			pinctrl-0 = <&i2c3_c_2_default>;
+			pinctrl-1 = <&i2c3_c_2_sleep>;
+			status = "okay";
 		};
 
 		spi@80002000 {
 			pinctrl-names = "default";
 			pinctrl-0 = <&ssp0_snowball_mode>;
+			status = "okay";
 		};
 
 		prcmu@80157000 {
-			cpufreq {
-				status = "okay";
-			};
-
 			ab8500 {
 				ab8500-gpio {
 					/*
@@ -406,8 +400,8 @@ ab8500-gpio {
 
 				ab8500_usb {
 					pinctrl-names = "default", "sleep";
-					pinctrl-0 = <&musb_default_mode>;
-					pinctrl-1 = <&musb_sleep_mode>;
+					pinctrl-0 = <&usb_a_1_default>;
+					pinctrl-1 = <&usb_a_1_sleep>;
 				};
 
 				ext_regulators: ab8500-ext-regulators {
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
new file mode 100644
index 0000000..313f0ab
--- /dev/null
+++ b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/dts-v1/;
+
+#include "ste-db8500.dtsi"
+#include "ste-ab8505.dtsi"
+#include "ste-dbx5x0-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/*
+ * Note: This device tree cannot be booted directly with the Samsung bootloader.
+ * You need an intermediate, device-tree compatible bootloader
+ * that locks the L2 cache. Otherwise the kernel will crash after decompression.
+ *
+ * There is a port of (mainline) U-Boot, see
+ * https://wiki.postmarketos.org/wiki/ST-Ericsson_NovaThor_U8500#U-Boot
+ */
+/ {
+	model = "Samsung Galaxy S III mini (GT-I8190)";
+	compatible = "samsung,golden", "st-ericsson,u8500";
+
+	chosen {
+		stdout-path = &serial2;
+	};
+
+	soc {
+		/* External Micro SD card slot */
+		sdi0_per1@80126000 {
+			status = "okay";
+
+			arm,primecell-periphid = <0x10480180>;
+			max-frequency = <100000000>;
+			bus-width = <4>;
+
+			non-removable;
+			/*
+			 * Unfortunately, there is no way to enable the UHS
+			 * modes due to a limitation of the SD level translator:
+			 * It will either translate to 2.9V or disconnect the
+			 * DATA lines, so switching to 1.8V signal voltage fails.
+			 */
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
+			st,sig-pin-fbclk;
+			full-pwr-cycle;
+
+			vmmc-supply = <&ab8500_ldo_aux3_reg>;
+			vqmmc-supply = <&sd_level_translator>;
+
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&mc0_a_2_default>;
+			pinctrl-1 = <&mc0_a_2_sleep>;
+		};
+
+		/* WLAN SDIO */
+		sdi1_per2@80118000 {
+			status = "okay";
+
+			arm,primecell-periphid = <0x10480180>;
+			max-frequency = <50000000>;
+			bus-width = <4>;
+
+			non-removable;
+			cap-sd-highspeed;
+
+			vmmc-supply = <&wl_reg_on>;
+
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&mc1_a_2_default>;
+			pinctrl-1 = <&mc1_a_2_sleep>;
+
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			wifi@1 {
+				compatible = "brcm,bcm4329-fmac";
+				reg = <1>;
+
+				/* GPIO216 (WLAN_HOST_WAKE) */
+				interrupt-parent = <&gpio6>;
+				interrupts = <24 IRQ_TYPE_EDGE_FALLING>;
+				interrupt-names = "host-wake";
+
+				pinctrl-names = "default";
+				pinctrl-0 = <&wlan_default>;
+			};
+		};
+
+		/* eMMC */
+		sdi2_per3@80005000 {
+			status = "okay";
+
+			arm,primecell-periphid = <0x10480180>;
+			max-frequency = <100000000>;
+			bus-width = <8>;
+
+			non-removable;
+			cap-mmc-highspeed;
+			mmc-ddr-1_8v;
+
+			vmmc-supply = <&vmem_3v3>;
+
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&mc2_a_1_default>;
+			pinctrl-1 = <&mc2_a_1_sleep>;
+		};
+
+		/* BT UART */
+		uart@80120000 {
+			status = "okay";
+
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&u0_a_1_default>;
+			pinctrl-1 = <&u0_a_1_sleep>;
+
+			bluetooth {
+				compatible = "brcm,bcm4330-bt";
+				/* GPIO222 (BT_VREG_ON) */
+				shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
+				/* GPIO199 (BT_WAKE) */
+				device-wakeup-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
+				/* GPIO97 (BT_HOST_WAKE) */
+				host-wakeup-gpios = <&gpio3 1 GPIO_ACTIVE_HIGH>;
+
+				pinctrl-names = "default";
+				pinctrl-0 = <&bluetooth_default>;
+			};
+		};
+
+		/* GPF UART */
+		uart@80121000 {
+			status = "okay";
+
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&u1rxtx_a_1_default &u1ctsrts_a_1_default>;
+			pinctrl-1 = <&u1rxtx_a_1_sleep &u1ctsrts_a_1_sleep>;
+		};
+
+		/* Debugging console UART */
+		uart@80007000 {
+			status = "okay";
+
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&u2rxtx_c_1_default>;
+			pinctrl-1 = <&u2rxtx_c_1_sleep>;
+		};
+
+		i2c@80128000 {
+			status = "okay";
+
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&i2c2_b_2_default>;
+			pinctrl-1 = <&i2c2_b_2_sleep>;
+
+			imu@68 {
+				compatible = "invensense,mpu6050";
+				reg = <0x68>;
+
+				/* GPIO206 (ACC_INT) */
+				interrupt-parent = <&gpio6>;
+				interrupts = <14 IRQ_TYPE_EDGE_RISING>;
+
+				mount-matrix = "0", "1", "0",
+					      "-1", "0", "0",
+					       "0", "0", "1";
+
+				vdd-supply = <&ab8500_ldo_aux1_reg>;
+				vddio-supply = <&ab8500_ldo_aux8_reg>;
+
+				pinctrl-names = "default";
+				pinctrl-0 = <&imu_default>;
+			};
+		};
+
+		i2c@80110000 {
+			status = "okay";
+
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&i2c3_c_2_default>;
+			pinctrl-1 = <&i2c3_c_2_sleep>;
+
+			touchscreen@4a {
+				compatible = "atmel,maxtouch";
+				reg = <0x4a>;
+
+				/* GPIO218 (TSP_INT_1V8) */
+				interrupt-parent = <&gpio6>;
+				interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+
+				pinctrl-names = "default";
+				pinctrl-0 = <&tsp_default>;
+			};
+		};
+
+		prcmu@80157000 {
+			ab8505 {
+				ab8500_usb {
+					pinctrl-names = "default", "sleep";
+					pinctrl-0 = <&usb_a_1_default>;
+					pinctrl-1 = <&usb_a_1_sleep>;
+				};
+
+				ab8505-regulators {
+					ab8500_ldo_aux1 {
+						regulator-name = "sensor_3v";
+						regulator-min-microvolt = <3000000>;
+						regulator-max-microvolt = <3000000>;
+					};
+
+					ab8500_ldo_aux2 {
+						regulator-name = "vreg_tsp_a3v3";
+						regulator-min-microvolt = <3300000>;
+						regulator-max-microvolt = <3300000>;
+						regulator-always-on; /* FIXME */
+					};
+
+					ab8500_ldo_aux3 {
+						regulator-name = "vdd_tf_2v91";
+					};
+
+					ab8500_ldo_aux4 {
+						regulator-name = "key_led_3.3v";
+						regulator-min-microvolt = <3300000>;
+						regulator-max-microvolt = <3300000>;
+					};
+
+					ab8500_ldo_aux5 {
+						regulator-name = "vreg_tsp_1v8";
+						regulator-min-microvolt = <1800000>;
+						regulator-max-microvolt = <1800000>;
+						regulator-always-on; /* FIXME */
+					};
+
+					ab8500_ldo_aux6 {
+						regulator-name = "touch_key_2.2v";
+						regulator-min-microvolt = <2200000>;
+						regulator-max-microvolt = <2200000>;
+					};
+
+					ab8500_ldo_aux8 {
+						regulator-name = "sensor_1v8";
+					};
+				};
+			};
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&gpio_keys_default>;
+
+		label = "GPIO Buttons";
+
+		volume-up {
+			label = "Volume Up";
+			/* GPIO67 (VOL_UP) */
+			gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEUP>;
+		};
+
+		volume-down {
+			label = "Volume Down";
+			/* GPIO92 (VOL_DOWN) */
+			gpios = <&gpio2 28 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEDOWN>;
+		};
+
+		home {
+			label = "Home";
+			/* GPIO91 (HOME_KEY) */
+			gpios = <&gpio2 27 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_HOMEPAGE>;
+		};
+	};
+
+	vibrator {
+		compatible = "gpio-vibrator";
+		/* GPIO195 (MOT_EN) */
+		enable-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&vibrator_default>;
+	};
+
+	/* External LDO for eMMC */
+	vmem_3v3: regulator-vmem {
+		compatible = "regulator-fixed";
+
+		regulator-name = "vmem_3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+
+		startup-delay-us = <200>;
+
+		/* GPIO223 (MEM_LDO_EN) */
+		gpio = <&gpio6 31 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&mem_ldo_default>;
+	};
+
+	/* TI TXS0206-29 level translator for 2.9 V */
+	sd_level_translator: regulator-sd-level-translator {
+		compatible = "regulator-fixed";
+
+		regulator-name = "sd-level-translator";
+		regulator-min-microvolt = <2900000>;
+		regulator-max-microvolt = <2900000>;
+
+		startup-delay-us = <200>;
+
+		/* GPIO87 (TXS0206-29_EN) */
+		gpios = <&gpio2 23 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&sd_level_translator_default>;
+	};
+
+	/*
+	 * WL_REG_ON takes WLAN out of reset and enables the internal regulators.
+	 * The voltage specified here is only used to determine the OCR mask,
+	 * the BCM chip is actually connected directly to VBAT.
+	 */
+	wl_reg_on: regulator-wl-reg-on {
+		compatible = "regulator-fixed";
+
+		regulator-name = "wl-reg-on";
+		regulator-min-microvolt = <3000000>;
+		regulator-max-microvolt = <3000000>;
+
+		startup-delay-us = <100000>;
+
+		/* GPIO215 (WLAN_EN) */
+		gpio = <&gpio6 23 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&wlan_en_default>;
+	};
+};
+
+&pinctrl {
+	gpio-keys {
+		gpio_keys_default: gpio_keys_default {
+			golden_cfg1 {
+				pins = "GPIO67",	/* VOL_UP */
+				       "GPIO91",	/* HOME_KEY */
+				       "GPIO92";	/* VOL_DOWN */
+				ste,config = <&gpio_in_pu>;
+			};
+		};
+	};
+
+	sdi0 {
+		sd_level_translator_default: sd_level_translator_default {
+			golden_cfg1 {
+				pins = "GPIO87_B3";	/* TXS0206-29_EN */
+				ste,config = <&gpio_out_lo>;
+			};
+		};
+	};
+
+	sdi2 {
+		mem_ldo_default: mem_ldo_default {
+			golden_cfg1 {
+				pins = "GPIO223_AH9";	/* MEM_LDO_EN */
+				ste,config = <&gpio_out_hi>;
+			};
+		};
+	};
+
+	imu {
+		imu_default: imu_default {
+			golden_cfg1 {
+				pins = "GPIO206_AG24";	/* ACC_INT */
+				ste,config = <&gpio_in_pd>;
+			};
+		};
+	};
+
+	tsp {
+		tsp_default: tsp_default {
+			golden_cfg1 {
+				pins = "GPIO218_AH11";	/* TSP_INT_1V8 */
+				ste,config = <&gpio_in_nopull>;
+			};
+		};
+	};
+
+	wlan {
+		wlan_default: wlan_default {
+			golden_cfg1 {
+				pins = "GPIO216_AG12";	/* WLAN_HOST_WAKE */
+				ste,config = <&gpio_in_pd>;
+			};
+		};
+
+		wlan_en_default: wlan_en_default {
+			golden_cfg1 {
+				pins = "GPIO215_AH13";	/* WLAN_EN */
+				ste,config = <&gpio_out_lo>;
+			};
+		};
+	};
+
+	bluetooth {
+		bluetooth_default: bluetooth_default {
+			golden_cfg1 {
+				pins = "GPIO199_AH23",	/* BT_WAKE */
+				       "GPIO222_AJ9";	/* BT_VREG_ON */
+				ste,config = <&gpio_out_lo>;
+			};
+			golden_cfg2 {
+				pins = "GPIO97_D9";	/* BT_HOST_WAKE */
+				ste,config = <&gpio_in_nopull>;
+			};
+		};
+	};
+
+	vibrator {
+		vibrator_default: vibrator_default {
+			golden_cfg1 {
+				pins = "GPIO195_AG28";	/* MOT_EN */
+				ste,config = <&gpio_out_lo>;
+			};
+		};
+	};
+};
+
+&ab8505_gpio {
+	/* Hog a few default settings */
+	pinctrl-names = "default";
+	pinctrl-0 = <&gpio_default>;
+
+	gpio {
+		gpio_default: gpio_default {
+			golden_mux {
+				/* Change unused pins to GPIO mode */
+				function = "gpio";
+				groups = "gpio3_a_1",	/* default: SysClkReq4 */
+					 "gpio14_a_1";	/* default: PWMOut1 */
+			};
+			golden_cfg1 {
+				pins = "GPIO11_B17", "GPIO13_D17", "GPIO50_L4";
+				bias-disable;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
index 58288aa..c27fa35 100644
--- a/arch/arm/boot/dts/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -95,6 +95,13 @@ vref: regulator-vref {
 		regulator-max-microvolt = <3300000>;
 	};
 
+	vdd_panel: vdd-panel {
+		compatible = "regulator-fixed";
+		regulator-name = "vdd_panel";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
 	leds {
 		compatible = "gpio-leds";
 		green {
@@ -138,6 +145,7 @@ usbotg_hs_phy: usbphy {
 
 	panel_rgb: panel-rgb {
 		compatible = "ampire,am-480272h3tmqw-t01h";
+		power-supply = <&vdd_panel>;
 		status = "okay";
 		port {
 			panel_in_rgb: endpoint {
diff --git a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
index 3520289..392fa14 100644
--- a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
@@ -163,7 +163,7 @@ gpiok: gpio@40022800 {
 				st,bank-name = "GPIOK";
 			};
 
-			usart1_pins_a: usart1@0 {
+			usart1_pins_a: usart1-0 {
 				pins1 {
 					pinmux = <STM32_PINMUX('A', 9, AF7)>; /* USART1_TX */
 					bias-disable;
@@ -176,7 +176,7 @@ pins2 {
 				};
 			};
 
-			usart3_pins_a: usart3@0 {
+			usart3_pins_a: usart3-0 {
 				pins1 {
 					pinmux = <STM32_PINMUX('B', 10, AF7)>; /* USART3_TX */
 					bias-disable;
@@ -189,7 +189,7 @@ pins2 {
 				};
 			};
 
-			usbotg_fs_pins_a: usbotg_fs@0 {
+			usbotg_fs_pins_a: usbotg-fs-0 {
 				pins {
 					pinmux = <STM32_PINMUX('A', 10, AF10)>, /* OTG_FS_ID */
 						 <STM32_PINMUX('A', 11, AF10)>, /* OTG_FS_DM */
@@ -200,7 +200,7 @@ pins {
 				};
 			};
 
-			usbotg_fs_pins_b: usbotg_fs@1 {
+			usbotg_fs_pins_b: usbotg-fs-1 {
 				pins {
 					pinmux = <STM32_PINMUX('B', 12, AF12)>, /* OTG_HS_ID */
 						 <STM32_PINMUX('B', 14, AF12)>, /* OTG_HS_DM */
@@ -211,7 +211,7 @@ pins {
 				};
 			};
 
-			usbotg_hs_pins_a: usbotg_hs@0 {
+			usbotg_hs_pins_a: usbotg-hs-0 {
 				pins {
 					pinmux = <STM32_PINMUX('H', 4, AF10)>, /* OTG_HS_ULPI_NXT*/
 						 <STM32_PINMUX('I', 11, AF10)>, /* OTG_HS_ULPI_DIR */
@@ -231,7 +231,7 @@ pins {
 				};
 			};
 
-			ethernet_mii: mii@0 {
+			ethernet_mii: mii-0 {
 				pins {
 					pinmux = <STM32_PINMUX('G', 13, AF11)>, /* ETH_MII_TXD0_ETH_RMII_TXD0 */
 						 <STM32_PINMUX('G', 14, AF11)>, /* ETH_MII_TXD1_ETH_RMII_TXD1 */
@@ -251,13 +251,13 @@ pins {
 				};
 			};
 
-			adc3_in8_pin: adc@200 {
+			adc3_in8_pin: adc-200 {
 				pins {
 					pinmux = <STM32_PINMUX('F', 10, ANALOG)>;
 				};
 			};
 
-			pwm1_pins: pwm@1 {
+			pwm1_pins: pwm-1 {
 				pins {
 					pinmux = <STM32_PINMUX('A', 8, AF1)>, /* TIM1_CH1 */
 						 <STM32_PINMUX('B', 13, AF1)>, /* TIM1_CH1N */
@@ -265,14 +265,14 @@ pins {
 				};
 			};
 
-			pwm3_pins: pwm@3 {
+			pwm3_pins: pwm-3 {
 				pins {
 					pinmux = <STM32_PINMUX('B', 4, AF2)>, /* TIM3_CH1 */
 						 <STM32_PINMUX('B', 5, AF2)>; /* TIM3_CH2 */
 				};
 			};
 
-			i2c1_pins: i2c1@0 {
+			i2c1_pins: i2c1-0 {
 				pins {
 					pinmux = <STM32_PINMUX('B', 9, AF4)>, /* I2C1_SDA */
 						 <STM32_PINMUX('B', 6, AF4)>; /* I2C1_SCL */
@@ -282,7 +282,7 @@ pins {
 				};
 			};
 
-			ltdc_pins: ltdc@0 {
+			ltdc_pins: ltdc-0 {
 				pins {
 					pinmux = <STM32_PINMUX('I', 12, AF14)>, /* LCD_HSYNC */
 						 <STM32_PINMUX('I', 13, AF14)>, /* LCD_VSYNC */
@@ -316,7 +316,7 @@ pins {
 				};
 			};
 
-			dcmi_pins: dcmi@0 {
+			dcmi_pins: dcmi-0 {
 				pins {
 					pinmux = <STM32_PINMUX('A', 4, AF13)>, /* DCMI_HSYNC */
 						 <STM32_PINMUX('B', 7, AF13)>, /* DCMI_VSYNC */
@@ -339,7 +339,7 @@ pins {
 				};
 			};
 
-			sdio_pins: sdio_pins@0 {
+			sdio_pins: sdio-pins-0 {
 				pins {
 					pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDIO_D0 */
 						 <STM32_PINMUX('C', 9, AF12)>, /* SDIO_D1 */
@@ -352,7 +352,7 @@ pins {
 				};
 			};
 
-			sdio_pins_od: sdio_pins_od@0 {
+			sdio_pins_od: sdio-pins-od-0 {
 				pins1 {
 					pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDIO_D0 */
 						 <STM32_PINMUX('C', 9, AF12)>, /* SDIO_D1 */
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index 5c8a826..d777069 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -80,7 +80,7 @@ clk_i2s_ckin: i2s-ckin {
 	};
 
 	soc {
-		romem: nvmem@1fff7800 {
+		romem: efuse@1fff7800 {
 			compatible = "st,stm32f4-otp";
 			reg = <0x1fff7800 0x400>;
 			#address-cells = <1>;
@@ -318,7 +318,6 @@ rtc: rtc@40002800 {
 			compatible = "st,stm32-rtc";
 			reg = <0x40002800 0x400>;
 			clocks = <&rcc 1 CLK_RTC>;
-			clock-names = "ck_rtc";
 			assigned-clocks = <&rcc 1 CLK_RTC>;
 			assigned-clock-parents = <&rcc 1 CLK_LSE>;
 			interrupt-parent = <&exti>;
@@ -789,7 +788,6 @@ dcmi: dcmi@50050000 {
 		rng: rng@50060800 {
 			compatible = "st,stm32-rng";
 			reg = <0x50060800 0x400>;
-			interrupts = <80>;
 			clocks = <&rcc 0 STM32F4_AHB2_CLOCK(RNG)>;
 
 		};
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
index f3ce477..9397db0 100644
--- a/arch/arm/boot/dts/stm32f469-disco.dts
+++ b/arch/arm/boot/dts/stm32f469-disco.dts
@@ -76,6 +76,13 @@ mmc_vcard: mmc_vcard {
 		regulator-max-microvolt = <3300000>;
 	};
 
+	vdd_dsi: vdd-dsi {
+		compatible = "regulator-fixed";
+		regulator-name = "vdd_dsi";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
 	soc {
 		dma-ranges = <0xc0000000 0x0 0x10000000>;
 	};
@@ -155,6 +162,7 @@ panel-dsi@0 {
 		compatible = "orisetech,otm8009a";
 		reg = <0>; /* dsi virtual channel (0..3) */
 		reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
+		power-supply = <&vdd_dsi>;
 		status = "okay";
 
 		port {
diff --git a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
index 9314128..fe4cfda 100644
--- a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
@@ -127,7 +127,7 @@ gpiok: gpio@40022800 {
 				st,bank-name = "GPIOK";
 			};
 
-			cec_pins_a: cec@0 {
+			cec_pins_a: cec-0 {
 				pins {
 					pinmux = <STM32_PINMUX('A', 15, AF4)>; /* HDMI CEC */
 					slew-rate = <0>;
@@ -136,7 +136,7 @@ pins {
 				};
 			};
 
-			usart1_pins_a: usart1@0 {
+			usart1_pins_a: usart1-0 {
 				pins1 {
 					pinmux = <STM32_PINMUX('A', 9, AF7)>; /* USART1_TX */
 					bias-disable;
@@ -149,7 +149,7 @@ pins2 {
 				};
 			};
 
-			usart1_pins_b: usart1@1 {
+			usart1_pins_b: usart1-1 {
 				pins1 {
 					pinmux = <STM32_PINMUX('A', 9, AF7)>; /* USART1_TX */
 					bias-disable;
@@ -162,7 +162,7 @@ pins2 {
 				};
 			};
 
-			i2c1_pins_b: i2c1@0 {
+			i2c1_pins_b: i2c1-0 {
 				pins {
 					pinmux = <STM32_PINMUX('B', 9, AF4)>, /* I2C1 SDA */
 						 <STM32_PINMUX('B', 8, AF4)>; /* I2C1 SCL */
@@ -172,7 +172,7 @@ pins {
 				};
 			};
 
-			usbotg_hs_pins_a: usbotg-hs@0 {
+			usbotg_hs_pins_a: usbotg-hs-0 {
 				pins {
 					pinmux = <STM32_PINMUX('H', 4, AF10)>, /* OTG_HS_ULPI_NXT */
 						 <STM32_PINMUX('I', 11, AF10)>, /* OTG_HS_ULPI_DIR */
@@ -192,7 +192,7 @@ pins {
 				};
 			};
 
-			usbotg_hs_pins_b: usbotg-hs@1 {
+			usbotg_hs_pins_b: usbotg-hs-1 {
 				pins {
 					pinmux = <STM32_PINMUX('H', 4, AF10)>, /* OTG_HS_ULPI_NXT */
 						 <STM32_PINMUX('C', 2, AF10)>, /* OTG_HS_ULPI_DIR */
@@ -212,7 +212,7 @@ pins {
 				};
 			};
 
-			usbotg_fs_pins_a: usbotg-fs@0 {
+			usbotg_fs_pins_a: usbotg-fs-0 {
 				pins {
 					pinmux = <STM32_PINMUX('A', 10, AF10)>, /* OTG_FS_ID */
 						 <STM32_PINMUX('A', 11, AF10)>, /* OTG_FS_DM */
@@ -223,7 +223,7 @@ pins {
 				};
 			};
 
-			sdio_pins_a: sdio_pins_a@0 {
+			sdio_pins_a: sdio-pins-a-0 {
 				pins {
 					pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1 D0 */
 						 <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1 D1 */
@@ -236,7 +236,7 @@ pins {
 				};
 			};
 
-			sdio_pins_od_a: sdio_pins_od_a@0 {
+			sdio_pins_od_a: sdio-pins-od-a-0 {
 				pins1 {
 					pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1 D0 */
 						 <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1 D1 */
@@ -254,7 +254,7 @@ pins2 {
 				};
 			};
 
-			sdio_pins_b: sdio_pins_b@0 {
+			sdio_pins_b: sdio-pins-b-0 {
 				pins {
 					pinmux = <STM32_PINMUX('G', 9, AF11)>, /* SDMMC2 D0 */
 						 <STM32_PINMUX('G', 10, AF11)>, /* SDMMC2 D1 */
@@ -267,7 +267,7 @@ pins {
 				};
 			};
 
-			sdio_pins_od_b: sdio_pins_od_b@0 {
+			sdio_pins_od_b: sdio-pins-od-b-0 {
 				pins1 {
 					pinmux = <STM32_PINMUX('G', 9, AF11)>, /* SDMMC2 D0 */
 						 <STM32_PINMUX('G', 10, AF11)>, /* SDMMC2 D1 */
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
index d26f93f..93c0637 100644
--- a/arch/arm/boot/dts/stm32f746.dtsi
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -300,7 +300,6 @@ rtc: rtc@40002800 {
 			compatible = "st,stm32-rtc";
 			reg = <0x40002800 0x400>;
 			clocks = <&rcc 1 CLK_RTC>;
-			clock-names = "ck_rtc";
 			assigned-clocks = <&rcc 1 CLK_RTC>;
 			assigned-clock-parents = <&rcc 1 CLK_LSE>;
 			interrupt-parent = <&exti>;
@@ -587,7 +586,7 @@ rcc: rcc@40023800 {
 			assigned-clock-rates = <1000000>;
 		};
 
-		dma1: dma@40026000 {
+		dma1: dma-controller@40026000 {
 			compatible = "st,stm32-dma";
 			reg = <0x40026000 0x400>;
 			interrupts = <11>,
@@ -603,7 +602,7 @@ dma1: dma@40026000 {
 			status = "disabled";
 		};
 
-		dma2: dma@40026400 {
+		dma2: dma-controller@40026400 {
 			compatible = "st,stm32-dma";
 			reg = <0x40026400 0x400>;
 			interrupts = <56>,
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index c065266..05eb02e 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -231,7 +231,7 @@ spi5: spi@40015000 {
 			status = "disabled";
 		};
 
-		dma1: dma@40020000 {
+		dma1: dma-controller@40020000 {
 			compatible = "st,stm32-dma";
 			reg = <0x40020000 0x400>;
 			interrupts = <11>,
@@ -249,7 +249,7 @@ dma1: dma@40020000 {
 			status = "disabled";
 		};
 
-		dma2: dma@40020400 {
+		dma2: dma-controller@40020400 {
 			compatible = "st,stm32-dma";
 			reg = <0x40020400 0x400>;
 			interrupts = <56>,
@@ -329,7 +329,7 @@ usbotg_fs: usb@40080000 {
 			status = "disabled";
 		};
 
-		mdma1: dma@52000000 {
+		mdma1: dma-controller@52000000 {
 			compatible = "st,stm32h7-mdma";
 			reg = <0x52000000 0x1000>;
 			interrupts = <122>;
diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
new file mode 100644
index 0000000..0237d4d
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
@@ -0,0 +1,1092 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2017 - All Rights Reserved
+ * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ */
+#include <dt-bindings/pinctrl/stm32-pinfunc.h>
+
+&pinctrl {
+	adc1_in6_pins_a: adc1-in6 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 12, ANALOG)>;
+		};
+	};
+
+	adc12_ain_pins_a: adc12-ain-0 {
+		pins {
+			pinmux = <STM32_PINMUX('C', 3, ANALOG)>, /* ADC1 in13 */
+				 <STM32_PINMUX('F', 12, ANALOG)>, /* ADC1 in6 */
+				 <STM32_PINMUX('F', 13, ANALOG)>, /* ADC2 in2 */
+				 <STM32_PINMUX('F', 14, ANALOG)>; /* ADC2 in6 */
+		};
+	};
+
+	adc12_usb_cc_pins_a: adc12-usb-cc-pins-0 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 4, ANALOG)>, /* ADC12 in18 */
+				 <STM32_PINMUX('A', 5, ANALOG)>; /* ADC12 in19 */
+		};
+	};
+
+	cec_pins_a: cec-0 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 15, AF4)>;
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	cec_pins_sleep_a: cec-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 15, ANALOG)>; /* HDMI_CEC */
+		};
+	};
+
+	cec_pins_b: cec-1 {
+		pins {
+			pinmux = <STM32_PINMUX('B', 6, AF5)>;
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	cec_pins_sleep_b: cec-sleep-1 {
+		pins {
+			pinmux = <STM32_PINMUX('B', 6, ANALOG)>; /* HDMI_CEC */
+		};
+	};
+
+	dac_ch1_pins_a: dac-ch1 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 4, ANALOG)>;
+		};
+	};
+
+	dac_ch2_pins_a: dac-ch2 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 5, ANALOG)>;
+		};
+	};
+
+	dcmi_pins_a: dcmi-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 8,  AF13)>,/* DCMI_HSYNC */
+				 <STM32_PINMUX('B', 7,  AF13)>,/* DCMI_VSYNC */
+				 <STM32_PINMUX('A', 6,  AF13)>,/* DCMI_PIXCLK */
+				 <STM32_PINMUX('H', 9,  AF13)>,/* DCMI_D0 */
+				 <STM32_PINMUX('H', 10, AF13)>,/* DCMI_D1 */
+				 <STM32_PINMUX('H', 11, AF13)>,/* DCMI_D2 */
+				 <STM32_PINMUX('H', 12, AF13)>,/* DCMI_D3 */
+				 <STM32_PINMUX('H', 14, AF13)>,/* DCMI_D4 */
+				 <STM32_PINMUX('I', 4,  AF13)>,/* DCMI_D5 */
+				 <STM32_PINMUX('B', 8,  AF13)>,/* DCMI_D6 */
+				 <STM32_PINMUX('E', 6,  AF13)>,/* DCMI_D7 */
+				 <STM32_PINMUX('I', 1,  AF13)>,/* DCMI_D8 */
+				 <STM32_PINMUX('H', 7,  AF13)>,/* DCMI_D9 */
+				 <STM32_PINMUX('I', 3,  AF13)>,/* DCMI_D10 */
+				 <STM32_PINMUX('H', 15, AF13)>;/* DCMI_D11 */
+			bias-disable;
+		};
+	};
+
+	dcmi_sleep_pins_a: dcmi-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 8,  ANALOG)>,/* DCMI_HSYNC */
+				 <STM32_PINMUX('B', 7,  ANALOG)>,/* DCMI_VSYNC */
+				 <STM32_PINMUX('A', 6,  ANALOG)>,/* DCMI_PIXCLK */
+				 <STM32_PINMUX('H', 9,  ANALOG)>,/* DCMI_D0 */
+				 <STM32_PINMUX('H', 10, ANALOG)>,/* DCMI_D1 */
+				 <STM32_PINMUX('H', 11, ANALOG)>,/* DCMI_D2 */
+				 <STM32_PINMUX('H', 12, ANALOG)>,/* DCMI_D3 */
+				 <STM32_PINMUX('H', 14, ANALOG)>,/* DCMI_D4 */
+				 <STM32_PINMUX('I', 4,  ANALOG)>,/* DCMI_D5 */
+				 <STM32_PINMUX('B', 8,  ANALOG)>,/* DCMI_D6 */
+				 <STM32_PINMUX('E', 6,  ANALOG)>,/* DCMI_D7 */
+				 <STM32_PINMUX('I', 1,  ANALOG)>,/* DCMI_D8 */
+				 <STM32_PINMUX('H', 7,  ANALOG)>,/* DCMI_D9 */
+				 <STM32_PINMUX('I', 3,  ANALOG)>,/* DCMI_D10 */
+				 <STM32_PINMUX('H', 15, ANALOG)>;/* DCMI_D11 */
+		};
+	};
+
+	ethernet0_rgmii_pins_a: rgmii-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('G', 5, AF11)>, /* ETH_RGMII_CLK125 */
+				 <STM32_PINMUX('G', 4, AF11)>, /* ETH_RGMII_GTX_CLK */
+				 <STM32_PINMUX('G', 13, AF11)>, /* ETH_RGMII_TXD0 */
+				 <STM32_PINMUX('G', 14, AF11)>, /* ETH_RGMII_TXD1 */
+				 <STM32_PINMUX('C', 2, AF11)>, /* ETH_RGMII_TXD2 */
+				 <STM32_PINMUX('E', 2, AF11)>, /* ETH_RGMII_TXD3 */
+				 <STM32_PINMUX('B', 11, AF11)>, /* ETH_RGMII_TX_CTL */
+				 <STM32_PINMUX('C', 1, AF11)>; /* ETH_MDC */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <2>;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('A', 2, AF11)>; /* ETH_MDIO */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+		pins3 {
+			pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH_RGMII_RXD0 */
+				 <STM32_PINMUX('C', 5, AF11)>, /* ETH_RGMII_RXD1 */
+				 <STM32_PINMUX('B', 0, AF11)>, /* ETH_RGMII_RXD2 */
+				 <STM32_PINMUX('B', 1, AF11)>, /* ETH_RGMII_RXD3 */
+				 <STM32_PINMUX('A', 1, AF11)>, /* ETH_RGMII_RX_CLK */
+				 <STM32_PINMUX('A', 7, AF11)>; /* ETH_RGMII_RX_CTL */
+			bias-disable;
+		};
+	};
+
+	ethernet0_rgmii_pins_sleep_a: rgmii-sleep-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_RGMII_CLK125 */
+				 <STM32_PINMUX('G', 4, ANALOG)>, /* ETH_RGMII_GTX_CLK */
+				 <STM32_PINMUX('G', 13, ANALOG)>, /* ETH_RGMII_TXD0 */
+				 <STM32_PINMUX('G', 14, ANALOG)>, /* ETH_RGMII_TXD1 */
+				 <STM32_PINMUX('C', 2, ANALOG)>, /* ETH_RGMII_TXD2 */
+				 <STM32_PINMUX('E', 2, ANALOG)>, /* ETH_RGMII_TXD3 */
+				 <STM32_PINMUX('B', 11, ANALOG)>, /* ETH_RGMII_TX_CTL */
+				 <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
+				 <STM32_PINMUX('C', 1, ANALOG)>, /* ETH_MDC */
+				 <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
+				 <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RGMII_RXD1 */
+				 <STM32_PINMUX('B', 0, ANALOG)>, /* ETH_RGMII_RXD2 */
+				 <STM32_PINMUX('B', 1, ANALOG)>, /* ETH_RGMII_RXD3 */
+				 <STM32_PINMUX('A', 1, ANALOG)>, /* ETH_RGMII_RX_CLK */
+				 <STM32_PINMUX('A', 7, ANALOG)>; /* ETH_RGMII_RX_CTL */
+		};
+	};
+
+	fmc_pins_a: fmc-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('D', 4, AF12)>, /* FMC_NOE */
+				 <STM32_PINMUX('D', 5, AF12)>, /* FMC_NWE */
+				 <STM32_PINMUX('D', 11, AF12)>, /* FMC_A16_FMC_CLE */
+				 <STM32_PINMUX('D', 12, AF12)>, /* FMC_A17_FMC_ALE */
+				 <STM32_PINMUX('D', 14, AF12)>, /* FMC_D0 */
+				 <STM32_PINMUX('D', 15, AF12)>, /* FMC_D1 */
+				 <STM32_PINMUX('D', 0, AF12)>, /* FMC_D2 */
+				 <STM32_PINMUX('D', 1, AF12)>, /* FMC_D3 */
+				 <STM32_PINMUX('E', 7, AF12)>, /* FMC_D4 */
+				 <STM32_PINMUX('E', 8, AF12)>, /* FMC_D5 */
+				 <STM32_PINMUX('E', 9, AF12)>, /* FMC_D6 */
+				 <STM32_PINMUX('E', 10, AF12)>, /* FMC_D7 */
+				 <STM32_PINMUX('G', 9, AF12)>; /* FMC_NE2_FMC_NCE */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <1>;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('D', 6, AF12)>; /* FMC_NWAIT */
+			bias-pull-up;
+		};
+	};
+
+	fmc_sleep_pins_a: fmc-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('D', 4, ANALOG)>, /* FMC_NOE */
+				 <STM32_PINMUX('D', 5, ANALOG)>, /* FMC_NWE */
+				 <STM32_PINMUX('D', 11, ANALOG)>, /* FMC_A16_FMC_CLE */
+				 <STM32_PINMUX('D', 12, ANALOG)>, /* FMC_A17_FMC_ALE */
+				 <STM32_PINMUX('D', 14, ANALOG)>, /* FMC_D0 */
+				 <STM32_PINMUX('D', 15, ANALOG)>, /* FMC_D1 */
+				 <STM32_PINMUX('D', 0, ANALOG)>, /* FMC_D2 */
+				 <STM32_PINMUX('D', 1, ANALOG)>, /* FMC_D3 */
+				 <STM32_PINMUX('E', 7, ANALOG)>, /* FMC_D4 */
+				 <STM32_PINMUX('E', 8, ANALOG)>, /* FMC_D5 */
+				 <STM32_PINMUX('E', 9, ANALOG)>, /* FMC_D6 */
+				 <STM32_PINMUX('E', 10, ANALOG)>, /* FMC_D7 */
+				 <STM32_PINMUX('D', 6, ANALOG)>, /* FMC_NWAIT */
+				 <STM32_PINMUX('G', 9, ANALOG)>; /* FMC_NE2_FMC_NCE */
+		};
+	};
+
+	i2c1_pins_a: i2c1-0 {
+		pins {
+			pinmux = <STM32_PINMUX('D', 12, AF5)>, /* I2C1_SCL */
+				 <STM32_PINMUX('F', 15, AF5)>; /* I2C1_SDA */
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	i2c1_pins_sleep_a: i2c1-1 {
+		pins {
+			pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* I2C1_SCL */
+				 <STM32_PINMUX('F', 15, ANALOG)>; /* I2C1_SDA */
+		};
+	};
+
+	i2c1_pins_b: i2c1-2 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 14, AF5)>, /* I2C1_SCL */
+				 <STM32_PINMUX('F', 15, AF5)>; /* I2C1_SDA */
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	i2c1_pins_sleep_b: i2c1-3 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 14, ANALOG)>, /* I2C1_SCL */
+				 <STM32_PINMUX('F', 15, ANALOG)>; /* I2C1_SDA */
+		};
+	};
+
+	i2c2_pins_a: i2c2-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 4, AF4)>, /* I2C2_SCL */
+				 <STM32_PINMUX('H', 5, AF4)>; /* I2C2_SDA */
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	i2c2_pins_sleep_a: i2c2-1 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 4, ANALOG)>, /* I2C2_SCL */
+				 <STM32_PINMUX('H', 5, ANALOG)>; /* I2C2_SDA */
+		};
+	};
+
+	i2c2_pins_b1: i2c2-2 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 5, AF4)>; /* I2C2_SDA */
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	i2c2_pins_sleep_b1: i2c2-3 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 5, ANALOG)>; /* I2C2_SDA */
+		};
+	};
+
+	i2c5_pins_a: i2c5-0 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 11, AF4)>, /* I2C5_SCL */
+				 <STM32_PINMUX('A', 12, AF4)>; /* I2C5_SDA */
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	i2c5_pins_sleep_a: i2c5-1 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 11, ANALOG)>, /* I2C5_SCL */
+				 <STM32_PINMUX('A', 12, ANALOG)>; /* I2C5_SDA */
+
+		};
+	};
+
+	i2s2_pins_a: i2s2-0 {
+		pins {
+			pinmux = <STM32_PINMUX('I', 3, AF5)>, /* I2S2_SDO */
+				 <STM32_PINMUX('B', 9, AF5)>, /* I2S2_WS */
+				 <STM32_PINMUX('A', 9, AF5)>; /* I2S2_CK */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-disable;
+		};
+	};
+
+	i2s2_pins_sleep_a: i2s2-1 {
+		pins {
+			pinmux = <STM32_PINMUX('I', 3, ANALOG)>, /* I2S2_SDO */
+				 <STM32_PINMUX('B', 9, ANALOG)>, /* I2S2_WS */
+				 <STM32_PINMUX('A', 9, ANALOG)>; /* I2S2_CK */
+		};
+	};
+
+	ltdc_pins_a: ltdc-a-0 {
+		pins {
+			pinmux = <STM32_PINMUX('G',  7, AF14)>, /* LCD_CLK */
+				 <STM32_PINMUX('I', 10, AF14)>, /* LCD_HSYNC */
+				 <STM32_PINMUX('I',  9, AF14)>, /* LCD_VSYNC */
+				 <STM32_PINMUX('F', 10, AF14)>, /* LCD_DE */
+				 <STM32_PINMUX('H',  2, AF14)>, /* LCD_R0 */
+				 <STM32_PINMUX('H',  3, AF14)>, /* LCD_R1 */
+				 <STM32_PINMUX('H',  8, AF14)>, /* LCD_R2 */
+				 <STM32_PINMUX('H',  9, AF14)>, /* LCD_R3 */
+				 <STM32_PINMUX('H', 10, AF14)>, /* LCD_R4 */
+				 <STM32_PINMUX('C',  0, AF14)>, /* LCD_R5 */
+				 <STM32_PINMUX('H', 12, AF14)>, /* LCD_R6 */
+				 <STM32_PINMUX('E', 15, AF14)>, /* LCD_R7 */
+				 <STM32_PINMUX('E',  5, AF14)>, /* LCD_G0 */
+				 <STM32_PINMUX('E',  6, AF14)>, /* LCD_G1 */
+				 <STM32_PINMUX('H', 13, AF14)>, /* LCD_G2 */
+				 <STM32_PINMUX('H', 14, AF14)>, /* LCD_G3 */
+				 <STM32_PINMUX('H', 15, AF14)>, /* LCD_G4 */
+				 <STM32_PINMUX('I',  0, AF14)>, /* LCD_G5 */
+				 <STM32_PINMUX('I',  1, AF14)>, /* LCD_G6 */
+				 <STM32_PINMUX('I',  2, AF14)>, /* LCD_G7 */
+				 <STM32_PINMUX('D',  9, AF14)>, /* LCD_B0 */
+				 <STM32_PINMUX('G', 12, AF14)>, /* LCD_B1 */
+				 <STM32_PINMUX('G', 10, AF14)>, /* LCD_B2 */
+				 <STM32_PINMUX('D', 10, AF14)>, /* LCD_B3 */
+				 <STM32_PINMUX('I',  4, AF14)>, /* LCD_B4 */
+				 <STM32_PINMUX('A',  3, AF14)>, /* LCD_B5 */
+				 <STM32_PINMUX('B',  8, AF14)>, /* LCD_B6 */
+				 <STM32_PINMUX('D',  8, AF14)>; /* LCD_B7 */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <1>;
+		};
+	};
+
+	ltdc_pins_sleep_a: ltdc-a-1 {
+		pins {
+			pinmux = <STM32_PINMUX('G',  7, ANALOG)>, /* LCD_CLK */
+				 <STM32_PINMUX('I', 10, ANALOG)>, /* LCD_HSYNC */
+				 <STM32_PINMUX('I',  9, ANALOG)>, /* LCD_VSYNC */
+				 <STM32_PINMUX('F', 10, ANALOG)>, /* LCD_DE */
+				 <STM32_PINMUX('H',  2, ANALOG)>, /* LCD_R0 */
+				 <STM32_PINMUX('H',  3, ANALOG)>, /* LCD_R1 */
+				 <STM32_PINMUX('H',  8, ANALOG)>, /* LCD_R2 */
+				 <STM32_PINMUX('H',  9, ANALOG)>, /* LCD_R3 */
+				 <STM32_PINMUX('H', 10, ANALOG)>, /* LCD_R4 */
+				 <STM32_PINMUX('C',  0, ANALOG)>, /* LCD_R5 */
+				 <STM32_PINMUX('H', 12, ANALOG)>, /* LCD_R6 */
+				 <STM32_PINMUX('E', 15, ANALOG)>, /* LCD_R7 */
+				 <STM32_PINMUX('E',  5, ANALOG)>, /* LCD_G0 */
+				 <STM32_PINMUX('E',  6, ANALOG)>, /* LCD_G1 */
+				 <STM32_PINMUX('H', 13, ANALOG)>, /* LCD_G2 */
+				 <STM32_PINMUX('H', 14, ANALOG)>, /* LCD_G3 */
+				 <STM32_PINMUX('H', 15, ANALOG)>, /* LCD_G4 */
+				 <STM32_PINMUX('I',  0, ANALOG)>, /* LCD_G5 */
+				 <STM32_PINMUX('I',  1, ANALOG)>, /* LCD_G6 */
+				 <STM32_PINMUX('I',  2, ANALOG)>, /* LCD_G7 */
+				 <STM32_PINMUX('D',  9, ANALOG)>, /* LCD_B0 */
+				 <STM32_PINMUX('G', 12, ANALOG)>, /* LCD_B1 */
+				 <STM32_PINMUX('G', 10, ANALOG)>, /* LCD_B2 */
+				 <STM32_PINMUX('D', 10, ANALOG)>, /* LCD_B3 */
+				 <STM32_PINMUX('I',  4, ANALOG)>, /* LCD_B4 */
+				 <STM32_PINMUX('A',  3, ANALOG)>, /* LCD_B5 */
+				 <STM32_PINMUX('B',  8, ANALOG)>, /* LCD_B6 */
+				 <STM32_PINMUX('D',  8, ANALOG)>; /* LCD_B7 */
+		};
+	};
+
+	ltdc_pins_b: ltdc-b-0 {
+		pins {
+			pinmux = <STM32_PINMUX('I', 14, AF14)>, /* LCD_CLK */
+				 <STM32_PINMUX('I', 12, AF14)>, /* LCD_HSYNC */
+				 <STM32_PINMUX('I', 13, AF14)>, /* LCD_VSYNC */
+				 <STM32_PINMUX('K',  7, AF14)>, /* LCD_DE */
+				 <STM32_PINMUX('I', 15, AF14)>, /* LCD_R0 */
+				 <STM32_PINMUX('J',  0, AF14)>, /* LCD_R1 */
+				 <STM32_PINMUX('J',  1, AF14)>, /* LCD_R2 */
+				 <STM32_PINMUX('J',  2, AF14)>, /* LCD_R3 */
+				 <STM32_PINMUX('J',  3, AF14)>, /* LCD_R4 */
+				 <STM32_PINMUX('J',  4, AF14)>, /* LCD_R5 */
+				 <STM32_PINMUX('J',  5, AF14)>, /* LCD_R6 */
+				 <STM32_PINMUX('J',  6, AF14)>, /* LCD_R7 */
+				 <STM32_PINMUX('J',  7, AF14)>, /* LCD_G0 */
+				 <STM32_PINMUX('J',  8, AF14)>, /* LCD_G1 */
+				 <STM32_PINMUX('J',  9, AF14)>, /* LCD_G2 */
+				 <STM32_PINMUX('J', 10, AF14)>, /* LCD_G3 */
+				 <STM32_PINMUX('J', 11, AF14)>, /* LCD_G4 */
+				 <STM32_PINMUX('K',  0, AF14)>, /* LCD_G5 */
+				 <STM32_PINMUX('K',  1, AF14)>, /* LCD_G6 */
+				 <STM32_PINMUX('K',  2, AF14)>, /* LCD_G7 */
+				 <STM32_PINMUX('J', 12, AF14)>, /* LCD_B0 */
+				 <STM32_PINMUX('J', 13, AF14)>, /* LCD_B1 */
+				 <STM32_PINMUX('J', 14, AF14)>, /* LCD_B2 */
+				 <STM32_PINMUX('J', 15, AF14)>, /* LCD_B3 */
+				 <STM32_PINMUX('K',  3, AF14)>, /* LCD_B4 */
+				 <STM32_PINMUX('K',  4, AF14)>, /* LCD_B5 */
+				 <STM32_PINMUX('K',  5, AF14)>, /* LCD_B6 */
+				 <STM32_PINMUX('K',  6, AF14)>; /* LCD_B7 */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <1>;
+		};
+	};
+
+	ltdc_pins_sleep_b: ltdc-b-1 {
+		pins {
+			pinmux = <STM32_PINMUX('I', 14, ANALOG)>, /* LCD_CLK */
+				 <STM32_PINMUX('I', 12, ANALOG)>, /* LCD_HSYNC */
+				 <STM32_PINMUX('I', 13, ANALOG)>, /* LCD_VSYNC */
+				 <STM32_PINMUX('K',  7, ANALOG)>, /* LCD_DE */
+				 <STM32_PINMUX('I', 15, ANALOG)>, /* LCD_R0 */
+				 <STM32_PINMUX('J',  0, ANALOG)>, /* LCD_R1 */
+				 <STM32_PINMUX('J',  1, ANALOG)>, /* LCD_R2 */
+				 <STM32_PINMUX('J',  2, ANALOG)>, /* LCD_R3 */
+				 <STM32_PINMUX('J',  3, ANALOG)>, /* LCD_R4 */
+				 <STM32_PINMUX('J',  4, ANALOG)>, /* LCD_R5 */
+				 <STM32_PINMUX('J',  5, ANALOG)>, /* LCD_R6 */
+				 <STM32_PINMUX('J',  6, ANALOG)>, /* LCD_R7 */
+				 <STM32_PINMUX('J',  7, ANALOG)>, /* LCD_G0 */
+				 <STM32_PINMUX('J',  8, ANALOG)>, /* LCD_G1 */
+				 <STM32_PINMUX('J',  9, ANALOG)>, /* LCD_G2 */
+				 <STM32_PINMUX('J', 10, ANALOG)>, /* LCD_G3 */
+				 <STM32_PINMUX('J', 11, ANALOG)>, /* LCD_G4 */
+				 <STM32_PINMUX('K',  0, ANALOG)>, /* LCD_G5 */
+				 <STM32_PINMUX('K',  1, ANALOG)>, /* LCD_G6 */
+				 <STM32_PINMUX('K',  2, ANALOG)>, /* LCD_G7 */
+				 <STM32_PINMUX('J', 12, ANALOG)>, /* LCD_B0 */
+				 <STM32_PINMUX('J', 13, ANALOG)>, /* LCD_B1 */
+				 <STM32_PINMUX('J', 14, ANALOG)>, /* LCD_B2 */
+				 <STM32_PINMUX('J', 15, ANALOG)>, /* LCD_B3 */
+				 <STM32_PINMUX('K',  3, ANALOG)>, /* LCD_B4 */
+				 <STM32_PINMUX('K',  4, ANALOG)>, /* LCD_B5 */
+				 <STM32_PINMUX('K',  5, ANALOG)>, /* LCD_B6 */
+				 <STM32_PINMUX('K',  6, ANALOG)>; /* LCD_B7 */
+		};
+	};
+
+	m_can1_pins_a: m-can1-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('I', 9, AF9)>; /* CAN1_RX */
+			bias-disable;
+		};
+	};
+
+	m_can1_sleep_pins_a: m_can1-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 13, ANALOG)>, /* CAN1_TX */
+				 <STM32_PINMUX('I', 9, ANALOG)>; /* CAN1_RX */
+		};
+	};
+
+	pwm1_pins_a: pwm1-0 {
+		pins {
+			pinmux = <STM32_PINMUX('E', 9, AF1)>, /* TIM1_CH1 */
+				 <STM32_PINMUX('E', 11, AF1)>, /* TIM1_CH2 */
+				 <STM32_PINMUX('E', 14, AF1)>; /* TIM1_CH4 */
+			bias-pull-down;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+	};
+
+	pwm1_sleep_pins_a: pwm1-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('E', 9, ANALOG)>, /* TIM1_CH1 */
+				 <STM32_PINMUX('E', 11, ANALOG)>, /* TIM1_CH2 */
+				 <STM32_PINMUX('E', 14, ANALOG)>; /* TIM1_CH4 */
+		};
+	};
+
+	pwm2_pins_a: pwm2-0 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 3, AF1)>; /* TIM2_CH4 */
+			bias-pull-down;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+	};
+
+	pwm2_sleep_pins_a: pwm2-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 3, ANALOG)>; /* TIM2_CH4 */
+		};
+	};
+
+	pwm3_pins_a: pwm3-0 {
+		pins {
+			pinmux = <STM32_PINMUX('C', 7, AF2)>; /* TIM3_CH2 */
+			bias-pull-down;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+	};
+
+	pwm3_sleep_pins_a: pwm3-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('C', 7, ANALOG)>; /* TIM3_CH2 */
+		};
+	};
+
+	pwm4_pins_a: pwm4-0 {
+		pins {
+			pinmux = <STM32_PINMUX('D', 14, AF2)>, /* TIM4_CH3 */
+				 <STM32_PINMUX('D', 15, AF2)>; /* TIM4_CH4 */
+			bias-pull-down;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+	};
+
+	pwm4_sleep_pins_a: pwm4-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('D', 14, ANALOG)>, /* TIM4_CH3 */
+				 <STM32_PINMUX('D', 15, ANALOG)>; /* TIM4_CH4 */
+		};
+	};
+
+	pwm4_pins_b: pwm4-1 {
+		pins {
+			pinmux = <STM32_PINMUX('D', 13, AF2)>; /* TIM4_CH2 */
+			bias-pull-down;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+	};
+
+	pwm4_sleep_pins_b: pwm4-sleep-1 {
+		pins {
+			pinmux = <STM32_PINMUX('D', 13, ANALOG)>; /* TIM4_CH2 */
+		};
+	};
+
+	pwm5_pins_a: pwm5-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 11, AF2)>; /* TIM5_CH2 */
+			bias-pull-down;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+	};
+
+	pwm5_sleep_pins_a: pwm5-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 11, ANALOG)>; /* TIM5_CH2 */
+		};
+	};
+
+	pwm8_pins_a: pwm8-0 {
+		pins {
+			pinmux = <STM32_PINMUX('I', 2, AF3)>; /* TIM8_CH4 */
+			bias-pull-down;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+	};
+
+	pwm8_sleep_pins_a: pwm8-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('I', 2, ANALOG)>; /* TIM8_CH4 */
+		};
+	};
+
+	pwm12_pins_a: pwm12-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 6, AF2)>; /* TIM12_CH1 */
+			bias-pull-down;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+	};
+
+	pwm12_sleep_pins_a: pwm12-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 6, ANALOG)>; /* TIM12_CH1 */
+		};
+	};
+
+	qspi_clk_pins_a: qspi-clk-0 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 10, AF9)>; /* QSPI_CLK */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <3>;
+		};
+	};
+
+	qspi_clk_sleep_pins_a: qspi-clk-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 10, ANALOG)>; /* QSPI_CLK */
+		};
+	};
+
+	qspi_bk1_pins_a: qspi-bk1-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('F', 8, AF10)>, /* QSPI_BK1_IO0 */
+				 <STM32_PINMUX('F', 9, AF10)>, /* QSPI_BK1_IO1 */
+				 <STM32_PINMUX('F', 7, AF9)>, /* QSPI_BK1_IO2 */
+				 <STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <1>;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('B', 6, AF10)>; /* QSPI_BK1_NCS */
+			bias-pull-up;
+			drive-push-pull;
+			slew-rate = <1>;
+		};
+	};
+
+	qspi_bk1_sleep_pins_a: qspi-bk1-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 8, ANALOG)>, /* QSPI_BK1_IO0 */
+				 <STM32_PINMUX('F', 9, ANALOG)>, /* QSPI_BK1_IO1 */
+				 <STM32_PINMUX('F', 7, ANALOG)>, /* QSPI_BK1_IO2 */
+				 <STM32_PINMUX('F', 6, ANALOG)>, /* QSPI_BK1_IO3 */
+				 <STM32_PINMUX('B', 6, ANALOG)>; /* QSPI_BK1_NCS */
+		};
+	};
+
+	qspi_bk2_pins_a: qspi-bk2-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('H', 2, AF9)>, /* QSPI_BK2_IO0 */
+				 <STM32_PINMUX('H', 3, AF9)>, /* QSPI_BK2_IO1 */
+				 <STM32_PINMUX('G', 10, AF11)>, /* QSPI_BK2_IO2 */
+				 <STM32_PINMUX('G', 7, AF11)>; /* QSPI_BK2_IO3 */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <1>;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('C', 0, AF10)>; /* QSPI_BK2_NCS */
+			bias-pull-up;
+			drive-push-pull;
+			slew-rate = <1>;
+		};
+	};
+
+	qspi_bk2_sleep_pins_a: qspi-bk2-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('H', 2, ANALOG)>, /* QSPI_BK2_IO0 */
+				 <STM32_PINMUX('H', 3, ANALOG)>, /* QSPI_BK2_IO1 */
+				 <STM32_PINMUX('G', 10, ANALOG)>, /* QSPI_BK2_IO2 */
+				 <STM32_PINMUX('G', 7, ANALOG)>, /* QSPI_BK2_IO3 */
+				 <STM32_PINMUX('C', 0, ANALOG)>; /* QSPI_BK2_NCS */
+		};
+	};
+
+	sai2a_pins_a: sai2a-0 {
+		pins {
+			pinmux = <STM32_PINMUX('I', 5, AF10)>, /* SAI2_SCK_A */
+				 <STM32_PINMUX('I', 6, AF10)>, /* SAI2_SD_A */
+				 <STM32_PINMUX('I', 7, AF10)>, /* SAI2_FS_A */
+				 <STM32_PINMUX('E', 0, AF10)>; /* SAI2_MCLK_A */
+			slew-rate = <0>;
+			drive-push-pull;
+			bias-disable;
+		};
+	};
+
+	sai2a_sleep_pins_a: sai2a-1 {
+		pins {
+			pinmux = <STM32_PINMUX('I', 5, ANALOG)>, /* SAI2_SCK_A */
+				 <STM32_PINMUX('I', 6, ANALOG)>, /* SAI2_SD_A */
+				 <STM32_PINMUX('I', 7, ANALOG)>, /* SAI2_FS_A */
+				 <STM32_PINMUX('E', 0, ANALOG)>; /* SAI2_MCLK_A */
+		};
+	};
+
+	sai2b_pins_a: sai2b-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('E', 12, AF10)>, /* SAI2_SCK_B */
+				 <STM32_PINMUX('E', 13, AF10)>, /* SAI2_FS_B */
+				 <STM32_PINMUX('E', 14, AF10)>; /* SAI2_MCLK_B */
+			slew-rate = <0>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('F', 11, AF10)>; /* SAI2_SD_B */
+			bias-disable;
+		};
+	};
+
+	sai2b_sleep_pins_a: sai2b-1 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 11, ANALOG)>, /* SAI2_SD_B */
+				 <STM32_PINMUX('E', 12, ANALOG)>, /* SAI2_SCK_B */
+				 <STM32_PINMUX('E', 13, ANALOG)>, /* SAI2_FS_B */
+				 <STM32_PINMUX('E', 14, ANALOG)>; /* SAI2_MCLK_B */
+		};
+	};
+
+	sai2b_pins_b: sai2b-2 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 11, AF10)>; /* SAI2_SD_B */
+			bias-disable;
+		};
+	};
+
+	sai2b_sleep_pins_b: sai2b-3 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 11, ANALOG)>; /* SAI2_SD_B */
+		};
+	};
+
+	sai4a_pins_a: sai4a-0 {
+		pins {
+			pinmux = <STM32_PINMUX('B', 5, AF10)>; /* SAI4_SD_A */
+			slew-rate = <0>;
+			drive-push-pull;
+			bias-disable;
+		};
+	};
+
+	sai4a_sleep_pins_a: sai4a-1 {
+		pins {
+			pinmux = <STM32_PINMUX('B', 5, ANALOG)>; /* SAI4_SD_A */
+		};
+	};
+
+	sdmmc1_b4_pins_a: sdmmc1-b4-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+				 <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+				 <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+				 <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
+				 <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-disable;
+		};
+	};
+
+	sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+				 <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+				 <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+				 <STM32_PINMUX('C', 11, AF12)>; /* SDMMC1_D3 */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins3 {
+			pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+			slew-rate = <1>;
+			drive-open-drain;
+			bias-disable;
+		};
+	};
+
+	sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
+				 <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */
+				 <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */
+				 <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */
+				 <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */
+				 <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */
+		};
+	};
+
+	sdmmc1_dir_pins_a: sdmmc1-dir-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('F', 2, AF11)>, /* SDMMC1_D0DIR */
+				 <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */
+				 <STM32_PINMUX('B', 9, AF11)>; /* SDMMC1_CDIR */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+		pins2{
+			pinmux = <STM32_PINMUX('E', 4, AF8)>; /* SDMMC1_CKIN */
+			bias-pull-up;
+		};
+	};
+
+	sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 2, ANALOG)>, /* SDMMC1_D0DIR */
+				 <STM32_PINMUX('C', 7, ANALOG)>, /* SDMMC1_D123DIR */
+				 <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */
+				 <STM32_PINMUX('E', 4, ANALOG)>; /* SDMMC1_CKIN */
+		};
+	};
+
+	sdmmc2_b4_pins_a: sdmmc2-b4-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+				 <STM32_PINMUX('B', 15, AF9)>, /* SDMMC2_D1 */
+				 <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+				 <STM32_PINMUX('B', 4, AF9)>, /* SDMMC2_D3 */
+				 <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('E', 3, AF9)>; /* SDMMC2_CK */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+	};
+
+	sdmmc2_b4_od_pins_a: sdmmc2-b4-od-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+				 <STM32_PINMUX('B', 15, AF9)>, /* SDMMC2_D1 */
+				 <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+				 <STM32_PINMUX('B', 4, AF9)>; /* SDMMC2_D3 */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('E', 3, AF9)>; /* SDMMC2_CK */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+		pins3 {
+			pinmux = <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
+			slew-rate = <1>;
+			drive-open-drain;
+			bias-pull-up;
+		};
+	};
+
+	sdmmc2_b4_sleep_pins_a: sdmmc2-b4-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('B', 14, ANALOG)>, /* SDMMC2_D0 */
+				 <STM32_PINMUX('B', 15, ANALOG)>, /* SDMMC2_D1 */
+				 <STM32_PINMUX('B', 3, ANALOG)>, /* SDMMC2_D2 */
+				 <STM32_PINMUX('B', 4, ANALOG)>, /* SDMMC2_D3 */
+				 <STM32_PINMUX('E', 3, ANALOG)>, /* SDMMC2_CK */
+				 <STM32_PINMUX('G', 6, ANALOG)>; /* SDMMC2_CMD */
+		};
+	};
+
+	sdmmc2_b4_pins_b: sdmmc2-b4-1 {
+		pins1 {
+			pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+				 <STM32_PINMUX('B', 15, AF9)>, /* SDMMC2_D1 */
+				 <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+				 <STM32_PINMUX('B', 4, AF9)>, /* SDMMC2_D3 */
+				 <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('E', 3, AF9)>; /* SDMMC2_CK */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-disable;
+		};
+	};
+
+	sdmmc2_b4_od_pins_b: sdmmc2-b4-od-1 {
+		pins1 {
+			pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+				 <STM32_PINMUX('B', 15, AF9)>, /* SDMMC2_D1 */
+				 <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+				 <STM32_PINMUX('B', 4, AF9)>; /* SDMMC2_D3 */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('E', 3, AF9)>; /* SDMMC2_CK */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins3 {
+			pinmux = <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
+			slew-rate = <1>;
+			drive-open-drain;
+			bias-disable;
+		};
+	};
+
+	sdmmc2_d47_pins_a: sdmmc2-d47-0 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 8, AF9)>, /* SDMMC2_D4 */
+				 <STM32_PINMUX('A', 9, AF10)>, /* SDMMC2_D5 */
+				 <STM32_PINMUX('E', 5, AF9)>, /* SDMMC2_D6 */
+				 <STM32_PINMUX('D', 3, AF9)>; /* SDMMC2_D7 */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+	};
+
+	sdmmc2_d47_sleep_pins_a: sdmmc2-d47-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('A', 8, ANALOG)>, /* SDMMC2_D4 */
+				 <STM32_PINMUX('A', 9, ANALOG)>, /* SDMMC2_D5 */
+				 <STM32_PINMUX('E', 5, ANALOG)>, /* SDMMC2_D6 */
+				 <STM32_PINMUX('D', 3, ANALOG)>; /* SDMMC2_D7 */
+		};
+	};
+
+	sdmmc3_b4_pins_a: sdmmc3-b4-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('F', 0, AF9)>, /* SDMMC3_D0 */
+				 <STM32_PINMUX('F', 4, AF9)>, /* SDMMC3_D1 */
+				 <STM32_PINMUX('F', 5, AF9)>, /* SDMMC3_D2 */
+				 <STM32_PINMUX('D', 7, AF10)>, /* SDMMC3_D3 */
+				 <STM32_PINMUX('F', 1, AF9)>; /* SDMMC3_CMD */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('G', 15, AF10)>; /* SDMMC3_CK */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+	};
+
+	sdmmc3_b4_od_pins_a: sdmmc3-b4-od-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('F', 0, AF9)>, /* SDMMC3_D0 */
+				 <STM32_PINMUX('F', 4, AF9)>, /* SDMMC3_D1 */
+				 <STM32_PINMUX('F', 5, AF9)>, /* SDMMC3_D2 */
+				 <STM32_PINMUX('D', 7, AF10)>; /* SDMMC3_D3 */
+			slew-rate = <1>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('G', 15, AF10)>; /* SDMMC3_CK */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+		pins3 {
+			pinmux = <STM32_PINMUX('F', 1, AF9)>; /* SDMMC2_CMD */
+			slew-rate = <1>;
+			drive-open-drain;
+			bias-pull-up;
+		};
+	};
+
+	sdmmc3_b4_sleep_pins_a: sdmmc3-b4-sleep-0 {
+		pins {
+			pinmux = <STM32_PINMUX('F', 0, ANALOG)>, /* SDMMC3_D0 */
+				 <STM32_PINMUX('F', 4, ANALOG)>, /* SDMMC3_D1 */
+				 <STM32_PINMUX('F', 5, ANALOG)>, /* SDMMC3_D2 */
+				 <STM32_PINMUX('D', 7, ANALOG)>, /* SDMMC3_D3 */
+				 <STM32_PINMUX('G', 15, ANALOG)>, /* SDMMC3_CK */
+				 <STM32_PINMUX('F', 1, ANALOG)>; /* SDMMC3_CMD */
+		};
+	};
+
+	spdifrx_pins_a: spdifrx-0 {
+		pins {
+			pinmux = <STM32_PINMUX('G', 12, AF8)>; /* SPDIF_IN1 */
+			bias-disable;
+		};
+	};
+
+	spdifrx_sleep_pins_a: spdifrx-1 {
+		pins {
+			pinmux = <STM32_PINMUX('G', 12, ANALOG)>; /* SPDIF_IN1 */
+		};
+	};
+
+	uart4_pins_a: uart4-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('G', 11, AF6)>; /* UART4_TX */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
+			bias-disable;
+		};
+	};
+
+	uart4_pins_b: uart4-1 {
+		pins1 {
+			pinmux = <STM32_PINMUX('D', 1, AF8)>; /* UART4_TX */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
+			bias-disable;
+		};
+	};
+
+	uart7_pins_a: uart7-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('E', 8, AF7)>; /* UART4_TX */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('E', 7, AF7)>, /* UART4_RX */
+				 <STM32_PINMUX('E', 10, AF7)>, /* UART4_CTS */
+				 <STM32_PINMUX('E', 9, AF7)>; /* UART4_RTS */
+			bias-disable;
+		};
+	};
+};
+
+&pinctrl_z {
+	i2c2_pins_b2: i2c2-0 {
+		pins {
+			pinmux = <STM32_PINMUX('Z', 0, AF3)>; /* I2C2_SCL */
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	i2c2_pins_sleep_b2: i2c2-1 {
+		pins {
+			pinmux = <STM32_PINMUX('Z', 0, ANALOG)>; /* I2C2_SCL */
+		};
+	};
+
+	i2c4_pins_a: i2c4-0 {
+		pins {
+			pinmux = <STM32_PINMUX('Z', 4, AF6)>, /* I2C4_SCL */
+				 <STM32_PINMUX('Z', 5, AF6)>; /* I2C4_SDA */
+			bias-disable;
+			drive-open-drain;
+			slew-rate = <0>;
+		};
+	};
+
+	i2c4_pins_sleep_a: i2c4-1 {
+		pins {
+			pinmux = <STM32_PINMUX('Z', 4, ANALOG)>, /* I2C4_SCL */
+				 <STM32_PINMUX('Z', 5, ANALOG)>; /* I2C4_SDA */
+		};
+	};
+
+	spi1_pins_a: spi1-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('Z', 0, AF5)>, /* SPI1_SCK */
+				 <STM32_PINMUX('Z', 2, AF5)>; /* SPI1_MOSI */
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <1>;
+		};
+
+		pins2 {
+			pinmux = <STM32_PINMUX('Z', 1, AF5)>; /* SPI1_MISO */
+			bias-disable;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi
new file mode 100644
index 0000000..fb41d07
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp151.dtsi
@@ -0,0 +1,1692 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2017 - All Rights Reserved
+ * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/stm32mp1-clks.h>
+#include <dt-bindings/reset/stm32mp1-resets.h>
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			compatible = "arm,cortex-a7";
+			device_type = "cpu";
+			reg = <0>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci";
+		method = "smc";
+		cpu_off = <0x84000002>;
+		cpu_on = <0x84000003>;
+	};
+
+	intc: interrupt-controller@a0021000 {
+		compatible = "arm,cortex-a7-gic";
+		#interrupt-cells = <3>;
+		interrupt-controller;
+		reg = <0xa0021000 0x1000>,
+		      <0xa0022000 0x2000>;
+	};
+
+	timer {
+		compatible = "arm,armv7-timer";
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+		interrupt-parent = <&intc>;
+	};
+
+	clocks {
+		clk_hse: clk-hse {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <24000000>;
+		};
+
+		clk_hsi: clk-hsi {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <64000000>;
+		};
+
+		clk_lse: clk-lse {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <32768>;
+		};
+
+		clk_lsi: clk-lsi {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <32000>;
+		};
+
+		clk_csi: clk-csi {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <4000000>;
+		};
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&dts>;
+
+			trips {
+				cpu_alert1: cpu-alert1 {
+					temperature = <85000>;
+					hysteresis = <0>;
+					type = "passive";
+				};
+
+				cpu-crit {
+					temperature = <120000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+			};
+		};
+	};
+
+	booster: regulator-booster {
+		compatible = "st,stm32mp1-booster";
+		st,syscfg = <&syscfg>;
+		status = "disabled";
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		interrupt-parent = <&intc>;
+		ranges;
+
+		timers2: timer@40000000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40000000 0x400>;
+			clocks = <&rcc TIM2_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 18 0x400 0x1>,
+			       <&dmamux1 19 0x400 0x1>,
+			       <&dmamux1 20 0x400 0x1>,
+			       <&dmamux1 21 0x400 0x1>,
+			       <&dmamux1 22 0x400 0x1>;
+			dma-names = "ch1", "ch2", "ch3", "ch4", "up";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@1 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <1>;
+				status = "disabled";
+			};
+
+			counter {
+				compatible = "st,stm32-timer-counter";
+				status = "disabled";
+			};
+		};
+
+		timers3: timer@40001000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40001000 0x400>;
+			clocks = <&rcc TIM3_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 23 0x400 0x1>,
+			       <&dmamux1 24 0x400 0x1>,
+			       <&dmamux1 25 0x400 0x1>,
+			       <&dmamux1 26 0x400 0x1>,
+			       <&dmamux1 27 0x400 0x1>,
+			       <&dmamux1 28 0x400 0x1>;
+			dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@2 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <2>;
+				status = "disabled";
+			};
+
+			counter {
+				compatible = "st,stm32-timer-counter";
+				status = "disabled";
+			};
+		};
+
+		timers4: timer@40002000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40002000 0x400>;
+			clocks = <&rcc TIM4_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 29 0x400 0x1>,
+			       <&dmamux1 30 0x400 0x1>,
+			       <&dmamux1 31 0x400 0x1>,
+			       <&dmamux1 32 0x400 0x1>;
+			dma-names = "ch1", "ch2", "ch3", "ch4";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@3 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <3>;
+				status = "disabled";
+			};
+
+			counter {
+				compatible = "st,stm32-timer-counter";
+				status = "disabled";
+			};
+		};
+
+		timers5: timer@40003000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40003000 0x400>;
+			clocks = <&rcc TIM5_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 55 0x400 0x1>,
+			       <&dmamux1 56 0x400 0x1>,
+			       <&dmamux1 57 0x400 0x1>,
+			       <&dmamux1 58 0x400 0x1>,
+			       <&dmamux1 59 0x400 0x1>,
+			       <&dmamux1 60 0x400 0x1>;
+			dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@4 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <4>;
+				status = "disabled";
+			};
+
+			counter {
+				compatible = "st,stm32-timer-counter";
+				status = "disabled";
+			};
+		};
+
+		timers6: timer@40004000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40004000 0x400>;
+			clocks = <&rcc TIM6_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 69 0x400 0x1>;
+			dma-names = "up";
+			status = "disabled";
+
+			timer@5 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <5>;
+				status = "disabled";
+			};
+		};
+
+		timers7: timer@40005000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40005000 0x400>;
+			clocks = <&rcc TIM7_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 70 0x400 0x1>;
+			dma-names = "up";
+			status = "disabled";
+
+			timer@6 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <6>;
+				status = "disabled";
+			};
+		};
+
+		timers12: timer@40006000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40006000 0x400>;
+			clocks = <&rcc TIM12_K>;
+			clock-names = "int";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@11 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <11>;
+				status = "disabled";
+			};
+		};
+
+		timers13: timer@40007000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40007000 0x400>;
+			clocks = <&rcc TIM13_K>;
+			clock-names = "int";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@12 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <12>;
+				status = "disabled";
+			};
+		};
+
+		timers14: timer@40008000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x40008000 0x400>;
+			clocks = <&rcc TIM14_K>;
+			clock-names = "int";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@13 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <13>;
+				status = "disabled";
+			};
+		};
+
+		lptimer1: timer@40009000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-lptimer";
+			reg = <0x40009000 0x400>;
+			clocks = <&rcc LPTIM1_K>;
+			clock-names = "mux";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm-lp";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			trigger@0 {
+				compatible = "st,stm32-lptimer-trigger";
+				reg = <0>;
+				status = "disabled";
+			};
+
+			counter {
+				compatible = "st,stm32-lptimer-counter";
+				status = "disabled";
+			};
+		};
+
+		spi2: spi@4000b000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32h7-spi";
+			reg = <0x4000b000 0x400>;
+			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc SPI2_K>;
+			resets = <&rcc SPI2_R>;
+			dmas = <&dmamux1 39 0x400 0x05>,
+			       <&dmamux1 40 0x400 0x05>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		i2s2: audio-controller@4000b000 {
+			compatible = "st,stm32h7-i2s";
+			#sound-dai-cells = <0>;
+			reg = <0x4000b000 0x400>;
+			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&dmamux1 39 0x400 0x01>,
+			       <&dmamux1 40 0x400 0x01>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		spi3: spi@4000c000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32h7-spi";
+			reg = <0x4000c000 0x400>;
+			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc SPI3_K>;
+			resets = <&rcc SPI3_R>;
+			dmas = <&dmamux1 61 0x400 0x05>,
+			       <&dmamux1 62 0x400 0x05>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		i2s3: audio-controller@4000c000 {
+			compatible = "st,stm32h7-i2s";
+			#sound-dai-cells = <0>;
+			reg = <0x4000c000 0x400>;
+			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&dmamux1 61 0x400 0x01>,
+			       <&dmamux1 62 0x400 0x01>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		spdifrx: audio-controller@4000d000 {
+			compatible = "st,stm32h7-spdifrx";
+			#sound-dai-cells = <0>;
+			reg = <0x4000d000 0x400>;
+			clocks = <&rcc SPDIF_K>;
+			clock-names = "kclk";
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&dmamux1 93 0x400 0x01>,
+			       <&dmamux1 94 0x400 0x01>;
+			dma-names = "rx", "rx-ctrl";
+			status = "disabled";
+		};
+
+		usart2: serial@4000e000 {
+			compatible = "st,stm32h7-uart";
+			reg = <0x4000e000 0x400>;
+			interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc USART2_K>;
+			status = "disabled";
+		};
+
+		usart3: serial@4000f000 {
+			compatible = "st,stm32h7-uart";
+			reg = <0x4000f000 0x400>;
+			interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc USART3_K>;
+			status = "disabled";
+		};
+
+		uart4: serial@40010000 {
+			compatible = "st,stm32h7-uart";
+			reg = <0x40010000 0x400>;
+			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc UART4_K>;
+			status = "disabled";
+		};
+
+		uart5: serial@40011000 {
+			compatible = "st,stm32h7-uart";
+			reg = <0x40011000 0x400>;
+			interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc UART5_K>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@40012000 {
+			compatible = "st,stm32f7-i2c";
+			reg = <0x40012000 0x400>;
+			interrupt-names = "event", "error";
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc I2C1_K>;
+			resets = <&rcc I2C1_R>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@40013000 {
+			compatible = "st,stm32f7-i2c";
+			reg = <0x40013000 0x400>;
+			interrupt-names = "event", "error";
+			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc I2C2_K>;
+			resets = <&rcc I2C2_R>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c3: i2c@40014000 {
+			compatible = "st,stm32f7-i2c";
+			reg = <0x40014000 0x400>;
+			interrupt-names = "event", "error";
+			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc I2C3_K>;
+			resets = <&rcc I2C3_R>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c5: i2c@40015000 {
+			compatible = "st,stm32f7-i2c";
+			reg = <0x40015000 0x400>;
+			interrupt-names = "event", "error";
+			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc I2C5_K>;
+			resets = <&rcc I2C5_R>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		cec: cec@40016000 {
+			compatible = "st,stm32-cec";
+			reg = <0x40016000 0x400>;
+			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc CEC_K>, <&clk_lse>;
+			clock-names = "cec", "hdmi-cec";
+			status = "disabled";
+		};
+
+		dac: dac@40017000 {
+			compatible = "st,stm32h7-dac-core";
+			reg = <0x40017000 0x400>;
+			clocks = <&rcc DAC12>;
+			clock-names = "pclk";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			dac1: dac@1 {
+				compatible = "st,stm32-dac";
+				#io-channels-cells = <1>;
+				reg = <1>;
+				status = "disabled";
+			};
+
+			dac2: dac@2 {
+				compatible = "st,stm32-dac";
+				#io-channels-cells = <1>;
+				reg = <2>;
+				status = "disabled";
+			};
+		};
+
+		uart7: serial@40018000 {
+			compatible = "st,stm32h7-uart";
+			reg = <0x40018000 0x400>;
+			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc UART7_K>;
+			status = "disabled";
+		};
+
+		uart8: serial@40019000 {
+			compatible = "st,stm32h7-uart";
+			reg = <0x40019000 0x400>;
+			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc UART8_K>;
+			status = "disabled";
+		};
+
+		timers1: timer@44000000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x44000000 0x400>;
+			clocks = <&rcc TIM1_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 11 0x400 0x1>,
+			       <&dmamux1 12 0x400 0x1>,
+			       <&dmamux1 13 0x400 0x1>,
+			       <&dmamux1 14 0x400 0x1>,
+			       <&dmamux1 15 0x400 0x1>,
+			       <&dmamux1 16 0x400 0x1>,
+			       <&dmamux1 17 0x400 0x1>;
+			dma-names = "ch1", "ch2", "ch3", "ch4",
+				    "up", "trig", "com";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@0 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <0>;
+				status = "disabled";
+			};
+
+			counter {
+				compatible = "st,stm32-timer-counter";
+				status = "disabled";
+			};
+		};
+
+		timers8: timer@44001000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x44001000 0x400>;
+			clocks = <&rcc TIM8_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 47 0x400 0x1>,
+			       <&dmamux1 48 0x400 0x1>,
+			       <&dmamux1 49 0x400 0x1>,
+			       <&dmamux1 50 0x400 0x1>,
+			       <&dmamux1 51 0x400 0x1>,
+			       <&dmamux1 52 0x400 0x1>,
+			       <&dmamux1 53 0x400 0x1>;
+			dma-names = "ch1", "ch2", "ch3", "ch4",
+				    "up", "trig", "com";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@7 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <7>;
+				status = "disabled";
+			};
+
+			counter {
+				compatible = "st,stm32-timer-counter";
+				status = "disabled";
+			};
+		};
+
+		usart6: serial@44003000 {
+			compatible = "st,stm32h7-uart";
+			reg = <0x44003000 0x400>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc USART6_K>;
+			status = "disabled";
+		};
+
+		spi1: spi@44004000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32h7-spi";
+			reg = <0x44004000 0x400>;
+			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc SPI1_K>;
+			resets = <&rcc SPI1_R>;
+			dmas = <&dmamux1 37 0x400 0x05>,
+			       <&dmamux1 38 0x400 0x05>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		i2s1: audio-controller@44004000 {
+			compatible = "st,stm32h7-i2s";
+			#sound-dai-cells = <0>;
+			reg = <0x44004000 0x400>;
+			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&dmamux1 37 0x400 0x01>,
+			       <&dmamux1 38 0x400 0x01>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		spi4: spi@44005000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32h7-spi";
+			reg = <0x44005000 0x400>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc SPI4_K>;
+			resets = <&rcc SPI4_R>;
+			dmas = <&dmamux1 83 0x400 0x05>,
+			       <&dmamux1 84 0x400 0x05>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		timers15: timer@44006000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x44006000 0x400>;
+			clocks = <&rcc TIM15_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 105 0x400 0x1>,
+			       <&dmamux1 106 0x400 0x1>,
+			       <&dmamux1 107 0x400 0x1>,
+			       <&dmamux1 108 0x400 0x1>;
+			dma-names = "ch1", "up", "trig", "com";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@14 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <14>;
+				status = "disabled";
+			};
+		};
+
+		timers16: timer@44007000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x44007000 0x400>;
+			clocks = <&rcc TIM16_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 109 0x400 0x1>,
+			       <&dmamux1 110 0x400 0x1>;
+			dma-names = "ch1", "up";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+			timer@15 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <15>;
+				status = "disabled";
+			};
+		};
+
+		timers17: timer@44008000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-timers";
+			reg = <0x44008000 0x400>;
+			clocks = <&rcc TIM17_K>;
+			clock-names = "int";
+			dmas = <&dmamux1 111 0x400 0x1>,
+			       <&dmamux1 112 0x400 0x1>;
+			dma-names = "ch1", "up";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			timer@16 {
+				compatible = "st,stm32h7-timer-trigger";
+				reg = <16>;
+				status = "disabled";
+			};
+		};
+
+		spi5: spi@44009000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32h7-spi";
+			reg = <0x44009000 0x400>;
+			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc SPI5_K>;
+			resets = <&rcc SPI5_R>;
+			dmas = <&dmamux1 85 0x400 0x05>,
+			       <&dmamux1 86 0x400 0x05>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		sai1: sai@4400a000 {
+			compatible = "st,stm32h7-sai";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x4400a000 0x400>;
+			reg = <0x4400a000 0x4>, <0x4400a3f0 0x10>;
+			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&rcc SAI1_R>;
+			status = "disabled";
+
+			sai1a: audio-controller@4400a004 {
+				#sound-dai-cells = <0>;
+
+				compatible = "st,stm32-sai-sub-a";
+				reg = <0x4 0x1c>;
+				clocks = <&rcc SAI1_K>;
+				clock-names = "sai_ck";
+				dmas = <&dmamux1 87 0x400 0x01>;
+				status = "disabled";
+			};
+
+			sai1b: audio-controller@4400a024 {
+				#sound-dai-cells = <0>;
+				compatible = "st,stm32-sai-sub-b";
+				reg = <0x24 0x1c>;
+				clocks = <&rcc SAI1_K>;
+				clock-names = "sai_ck";
+				dmas = <&dmamux1 88 0x400 0x01>;
+				status = "disabled";
+			};
+		};
+
+		sai2: sai@4400b000 {
+			compatible = "st,stm32h7-sai";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x4400b000 0x400>;
+			reg = <0x4400b000 0x4>, <0x4400b3f0 0x10>;
+			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&rcc SAI2_R>;
+			status = "disabled";
+
+			sai2a: audio-controller@4400b004 {
+				#sound-dai-cells = <0>;
+				compatible = "st,stm32-sai-sub-a";
+				reg = <0x4 0x1c>;
+				clocks = <&rcc SAI2_K>;
+				clock-names = "sai_ck";
+				dmas = <&dmamux1 89 0x400 0x01>;
+				status = "disabled";
+			};
+
+			sai2b: audio-controller@4400b024 {
+				#sound-dai-cells = <0>;
+				compatible = "st,stm32-sai-sub-b";
+				reg = <0x24 0x1c>;
+				clocks = <&rcc SAI2_K>;
+				clock-names = "sai_ck";
+				dmas = <&dmamux1 90 0x400 0x01>;
+				status = "disabled";
+			};
+		};
+
+		sai3: sai@4400c000 {
+			compatible = "st,stm32h7-sai";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x4400c000 0x400>;
+			reg = <0x4400c000 0x4>, <0x4400c3f0 0x10>;
+			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&rcc SAI3_R>;
+			status = "disabled";
+
+			sai3a: audio-controller@4400c004 {
+				#sound-dai-cells = <0>;
+				compatible = "st,stm32-sai-sub-a";
+				reg = <0x04 0x1c>;
+				clocks = <&rcc SAI3_K>;
+				clock-names = "sai_ck";
+				dmas = <&dmamux1 113 0x400 0x01>;
+				status = "disabled";
+			};
+
+			sai3b: audio-controller@4400c024 {
+				#sound-dai-cells = <0>;
+				compatible = "st,stm32-sai-sub-b";
+				reg = <0x24 0x1c>;
+				clocks = <&rcc SAI3_K>;
+				clock-names = "sai_ck";
+				dmas = <&dmamux1 114 0x400 0x01>;
+				status = "disabled";
+			};
+		};
+
+		dfsdm: dfsdm@4400d000 {
+			compatible = "st,stm32mp1-dfsdm";
+			reg = <0x4400d000 0x800>;
+			clocks = <&rcc DFSDM_K>;
+			clock-names = "dfsdm";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			dfsdm0: filter@0 {
+				compatible = "st,stm32-dfsdm-adc";
+				#io-channel-cells = <1>;
+				reg = <0>;
+				interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&dmamux1 101 0x400 0x01>;
+				dma-names = "rx";
+				status = "disabled";
+			};
+
+			dfsdm1: filter@1 {
+				compatible = "st,stm32-dfsdm-adc";
+				#io-channel-cells = <1>;
+				reg = <1>;
+				interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&dmamux1 102 0x400 0x01>;
+				dma-names = "rx";
+				status = "disabled";
+			};
+
+			dfsdm2: filter@2 {
+				compatible = "st,stm32-dfsdm-adc";
+				#io-channel-cells = <1>;
+				reg = <2>;
+				interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&dmamux1 103 0x400 0x01>;
+				dma-names = "rx";
+				status = "disabled";
+			};
+
+			dfsdm3: filter@3 {
+				compatible = "st,stm32-dfsdm-adc";
+				#io-channel-cells = <1>;
+				reg = <3>;
+				interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&dmamux1 104 0x400 0x01>;
+				dma-names = "rx";
+				status = "disabled";
+			};
+
+			dfsdm4: filter@4 {
+				compatible = "st,stm32-dfsdm-adc";
+				#io-channel-cells = <1>;
+				reg = <4>;
+				interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&dmamux1 91 0x400 0x01>;
+				dma-names = "rx";
+				status = "disabled";
+			};
+
+			dfsdm5: filter@5 {
+				compatible = "st,stm32-dfsdm-adc";
+				#io-channel-cells = <1>;
+				reg = <5>;
+				interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+				dmas = <&dmamux1 92 0x400 0x01>;
+				dma-names = "rx";
+				status = "disabled";
+			};
+		};
+
+		dma1: dma-controller@48000000 {
+			compatible = "st,stm32-dma";
+			reg = <0x48000000 0x400>;
+			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc DMA1>;
+			#dma-cells = <4>;
+			st,mem2mem;
+			dma-requests = <8>;
+		};
+
+		dma2: dma-controller@48001000 {
+			compatible = "st,stm32-dma";
+			reg = <0x48001000 0x400>;
+			interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc DMA2>;
+			#dma-cells = <4>;
+			st,mem2mem;
+			dma-requests = <8>;
+		};
+
+		dmamux1: dma-router@48002000 {
+			compatible = "st,stm32h7-dmamux";
+			reg = <0x48002000 0x1c>;
+			#dma-cells = <3>;
+			dma-requests = <128>;
+			dma-masters = <&dma1 &dma2>;
+			dma-channels = <16>;
+			clocks = <&rcc DMAMUX>;
+		};
+
+		adc: adc@48003000 {
+			compatible = "st,stm32mp1-adc-core";
+			reg = <0x48003000 0x400>;
+			interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc ADC12>, <&rcc ADC12_K>;
+			clock-names = "bus", "adc";
+			interrupt-controller;
+			st,syscfg = <&syscfg>;
+			#interrupt-cells = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			adc1: adc@0 {
+				compatible = "st,stm32mp1-adc";
+				#io-channel-cells = <1>;
+				reg = <0x0>;
+				interrupt-parent = <&adc>;
+				interrupts = <0>;
+				dmas = <&dmamux1 9 0x400 0x01>;
+				dma-names = "rx";
+				status = "disabled";
+			};
+
+			adc2: adc@100 {
+				compatible = "st,stm32mp1-adc";
+				#io-channel-cells = <1>;
+				reg = <0x100>;
+				interrupt-parent = <&adc>;
+				interrupts = <1>;
+				dmas = <&dmamux1 10 0x400 0x01>;
+				dma-names = "rx";
+				status = "disabled";
+			};
+		};
+
+		sdmmc3: sdmmc@48004000 {
+			compatible = "arm,pl18x", "arm,primecell";
+			arm,primecell-periphid = <0x10153180>;
+			reg = <0x48004000 0x400>;
+			interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "cmd_irq";
+			clocks = <&rcc SDMMC3_K>;
+			clock-names = "apb_pclk";
+			resets = <&rcc SDMMC3_R>;
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
+			max-frequency = <120000000>;
+			status = "disabled";
+		};
+
+		usbotg_hs: usb-otg@49000000 {
+			compatible = "snps,dwc2";
+			reg = <0x49000000 0x10000>;
+			clocks = <&rcc USBO_K>;
+			clock-names = "otg";
+			resets = <&rcc USBO_R>;
+			reset-names = "dwc2";
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			g-rx-fifo-size = <256>;
+			g-np-tx-fifo-size = <32>;
+			g-tx-fifo-size = <128 128 64 64 64 64 32 32>;
+			dr_mode = "otg";
+			status = "disabled";
+		};
+
+		ipcc: mailbox@4c001000 {
+			compatible = "st,stm32mp1-ipcc";
+			#mbox-cells = <1>;
+			reg = <0x4c001000 0x400>;
+			st,proc-id = <0>;
+			interrupts-extended =
+				<&intc GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+				<&intc GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+				<&exti 61 1>;
+			interrupt-names = "rx", "tx", "wakeup";
+			clocks = <&rcc IPCC>;
+			wakeup-source;
+			status = "disabled";
+		};
+
+		dcmi: dcmi@4c006000 {
+			compatible = "st,stm32-dcmi";
+			reg = <0x4c006000 0x400>;
+			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&rcc CAMITF_R>;
+			clocks = <&rcc DCMI>;
+			clock-names = "mclk";
+			dmas = <&dmamux1 75 0x400 0x0d>;
+			dma-names = "tx";
+			status = "disabled";
+		};
+
+		rcc: rcc@50000000 {
+			compatible = "st,stm32mp1-rcc", "syscon";
+			reg = <0x50000000 0x1000>;
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		pwr_regulators: pwr@50001000 {
+			compatible = "st,stm32mp1,pwr-reg";
+			reg = <0x50001000 0x10>;
+
+			reg11: reg11 {
+				regulator-name = "reg11";
+				regulator-min-microvolt = <1100000>;
+				regulator-max-microvolt = <1100000>;
+			};
+
+			reg18: reg18 {
+				regulator-name = "reg18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			usb33: usb33 {
+				regulator-name = "usb33";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+		};
+
+		exti: interrupt-controller@5000d000 {
+			compatible = "st,stm32mp1-exti", "syscon";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			reg = <0x5000d000 0x400>;
+		};
+
+		syscfg: syscon@50020000 {
+			compatible = "st,stm32mp157-syscfg", "syscon";
+			reg = <0x50020000 0x400>;
+			clocks = <&rcc SYSCFG>;
+		};
+
+		lptimer2: timer@50021000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-lptimer";
+			reg = <0x50021000 0x400>;
+			clocks = <&rcc LPTIM2_K>;
+			clock-names = "mux";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm-lp";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			trigger@1 {
+				compatible = "st,stm32-lptimer-trigger";
+				reg = <1>;
+				status = "disabled";
+			};
+
+			counter {
+				compatible = "st,stm32-lptimer-counter";
+				status = "disabled";
+			};
+		};
+
+		lptimer3: timer@50022000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32-lptimer";
+			reg = <0x50022000 0x400>;
+			clocks = <&rcc LPTIM3_K>;
+			clock-names = "mux";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm-lp";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			trigger@2 {
+				compatible = "st,stm32-lptimer-trigger";
+				reg = <2>;
+				status = "disabled";
+			};
+		};
+
+		lptimer4: timer@50023000 {
+			compatible = "st,stm32-lptimer";
+			reg = <0x50023000 0x400>;
+			clocks = <&rcc LPTIM4_K>;
+			clock-names = "mux";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm-lp";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+		};
+
+		lptimer5: timer@50024000 {
+			compatible = "st,stm32-lptimer";
+			reg = <0x50024000 0x400>;
+			clocks = <&rcc LPTIM5_K>;
+			clock-names = "mux";
+			status = "disabled";
+
+			pwm {
+				compatible = "st,stm32-pwm-lp";
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+		};
+
+		vrefbuf: vrefbuf@50025000 {
+			compatible = "st,stm32-vrefbuf";
+			reg = <0x50025000 0x8>;
+			regulator-min-microvolt = <1500000>;
+			regulator-max-microvolt = <2500000>;
+			clocks = <&rcc VREF>;
+			status = "disabled";
+		};
+
+		sai4: sai@50027000 {
+			compatible = "st,stm32h7-sai";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x50027000 0x400>;
+			reg = <0x50027000 0x4>, <0x500273f0 0x10>;
+			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&rcc SAI4_R>;
+			status = "disabled";
+
+			sai4a: audio-controller@50027004 {
+				#sound-dai-cells = <0>;
+				compatible = "st,stm32-sai-sub-a";
+				reg = <0x04 0x1c>;
+				clocks = <&rcc SAI4_K>;
+				clock-names = "sai_ck";
+				dmas = <&dmamux1 99 0x400 0x01>;
+				status = "disabled";
+			};
+
+			sai4b: audio-controller@50027024 {
+				#sound-dai-cells = <0>;
+				compatible = "st,stm32-sai-sub-b";
+				reg = <0x24 0x1c>;
+				clocks = <&rcc SAI4_K>;
+				clock-names = "sai_ck";
+				dmas = <&dmamux1 100 0x400 0x01>;
+				status = "disabled";
+			};
+		};
+
+		dts: thermal@50028000 {
+			compatible = "st,stm32-thermal";
+			reg = <0x50028000 0x100>;
+			interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc TMPSENS>;
+			clock-names = "pclk";
+			#thermal-sensor-cells = <0>;
+			status = "disabled";
+		};
+
+		hash1: hash@54002000 {
+			compatible = "st,stm32f756-hash";
+			reg = <0x54002000 0x400>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc HASH1>;
+			resets = <&rcc HASH1_R>;
+			dmas = <&mdma1 31 0x10 0x1000A02 0x0 0x0>;
+			dma-names = "in";
+			dma-maxburst = <2>;
+			status = "disabled";
+		};
+
+		rng1: rng@54003000 {
+			compatible = "st,stm32-rng";
+			reg = <0x54003000 0x400>;
+			clocks = <&rcc RNG1_K>;
+			resets = <&rcc RNG1_R>;
+			status = "disabled";
+		};
+
+		mdma1: dma-controller@58000000 {
+			compatible = "st,stm32h7-mdma";
+			reg = <0x58000000 0x1000>;
+			interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc MDMA>;
+			#dma-cells = <5>;
+			dma-channels = <32>;
+			dma-requests = <48>;
+		};
+
+		fmc: nand-controller@58002000 {
+			compatible = "st,stm32mp15-fmc2";
+			reg = <0x58002000 0x1000>,
+			      <0x80000000 0x1000>,
+			      <0x88010000 0x1000>,
+			      <0x88020000 0x1000>,
+			      <0x81000000 0x1000>,
+			      <0x89010000 0x1000>,
+			      <0x89020000 0x1000>;
+			interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&mdma1 20 0x10 0x12000a02 0x0 0x0>,
+			       <&mdma1 20 0x10 0x12000a08 0x0 0x0>,
+			       <&mdma1 21 0x10 0x12000a0a 0x0 0x0>;
+			dma-names = "tx", "rx", "ecc";
+			clocks = <&rcc FMC_K>;
+			resets = <&rcc FMC_R>;
+			status = "disabled";
+		};
+
+		qspi: spi@58003000 {
+			compatible = "st,stm32f469-qspi";
+			reg = <0x58003000 0x1000>, <0x70000000 0x10000000>;
+			reg-names = "qspi", "qspi_mm";
+			interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&mdma1 22 0x10 0x100002 0x0 0x0>,
+			       <&mdma1 22 0x10 0x100008 0x0 0x0>;
+			dma-names = "tx", "rx";
+			clocks = <&rcc QSPI_K>;
+			resets = <&rcc QSPI_R>;
+			status = "disabled";
+		};
+
+		sdmmc1: sdmmc@58005000 {
+			compatible = "arm,pl18x", "arm,primecell";
+			arm,primecell-periphid = <0x10153180>;
+			reg = <0x58005000 0x1000>;
+			interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "cmd_irq";
+			clocks = <&rcc SDMMC1_K>;
+			clock-names = "apb_pclk";
+			resets = <&rcc SDMMC1_R>;
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
+			max-frequency = <120000000>;
+			status = "disabled";
+		};
+
+		sdmmc2: sdmmc@58007000 {
+			compatible = "arm,pl18x", "arm,primecell";
+			arm,primecell-periphid = <0x10153180>;
+			reg = <0x58007000 0x1000>;
+			interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "cmd_irq";
+			clocks = <&rcc SDMMC2_K>;
+			clock-names = "apb_pclk";
+			resets = <&rcc SDMMC2_R>;
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
+			max-frequency = <120000000>;
+			status = "disabled";
+		};
+
+		crc1: crc@58009000 {
+			compatible = "st,stm32f7-crc";
+			reg = <0x58009000 0x400>;
+			clocks = <&rcc CRC1>;
+			status = "disabled";
+		};
+
+		stmmac_axi_config_0: stmmac-axi-config {
+			snps,wr_osr_lmt = <0x7>;
+			snps,rd_osr_lmt = <0x7>;
+			snps,blen = <0 0 0 0 16 8 4>;
+		};
+
+		ethernet0: ethernet@5800a000 {
+			compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a";
+			reg = <0x5800a000 0x2000>;
+			reg-names = "stmmaceth";
+			interrupts-extended = <&intc GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "macirq";
+			clock-names = "stmmaceth",
+				      "mac-clk-tx",
+				      "mac-clk-rx",
+				      "ethstp";
+			clocks = <&rcc ETHMAC>,
+				 <&rcc ETHTX>,
+				 <&rcc ETHRX>,
+				 <&rcc ETHSTP>;
+			st,syscon = <&syscfg 0x4>;
+			snps,mixed-burst;
+			snps,pbl = <2>;
+			snps,en-tx-lpi-clockgating;
+			snps,axi-config = <&stmmac_axi_config_0>;
+			snps,tso;
+			status = "disabled";
+		};
+
+		usbh_ohci: usbh-ohci@5800c000 {
+			compatible = "generic-ohci";
+			reg = <0x5800c000 0x1000>;
+			clocks = <&rcc USBH>;
+			resets = <&rcc USBH_R>;
+			interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		usbh_ehci: usbh-ehci@5800d000 {
+			compatible = "generic-ehci";
+			reg = <0x5800d000 0x1000>;
+			clocks = <&rcc USBH>;
+			resets = <&rcc USBH_R>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+			companion = <&usbh_ohci>;
+			status = "disabled";
+		};
+
+		ltdc: display-controller@5a001000 {
+			compatible = "st,stm32-ltdc";
+			reg = <0x5a001000 0x400>;
+			interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc LTDC_PX>;
+			clock-names = "lcd";
+			resets = <&rcc LTDC_R>;
+			status = "disabled";
+		};
+
+		iwdg2: watchdog@5a002000 {
+			compatible = "st,stm32mp1-iwdg";
+			reg = <0x5a002000 0x400>;
+			clocks = <&rcc IWDG2>, <&rcc CK_LSI>;
+			clock-names = "pclk", "lsi";
+			status = "disabled";
+		};
+
+		usbphyc: usbphyc@5a006000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32mp1-usbphyc";
+			reg = <0x5a006000 0x1000>;
+			clocks = <&rcc USBPHY_K>;
+			resets = <&rcc USBPHY_R>;
+			status = "disabled";
+
+			usbphyc_port0: usb-phy@0 {
+				#phy-cells = <0>;
+				reg = <0>;
+			};
+
+			usbphyc_port1: usb-phy@1 {
+				#phy-cells = <1>;
+				reg = <1>;
+			};
+		};
+
+		usart1: serial@5c000000 {
+			compatible = "st,stm32h7-uart";
+			reg = <0x5c000000 0x400>;
+			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc USART1_K>;
+			status = "disabled";
+		};
+
+		spi6: spi@5c001000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "st,stm32h7-spi";
+			reg = <0x5c001000 0x400>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc SPI6_K>;
+			resets = <&rcc SPI6_R>;
+			dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+			       <&mdma1 35 0x0 0x40002 0x0 0x0>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		i2c4: i2c@5c002000 {
+			compatible = "st,stm32f7-i2c";
+			reg = <0x5c002000 0x400>;
+			interrupt-names = "event", "error";
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc I2C4_K>;
+			resets = <&rcc I2C4_R>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		rtc: rtc@5c004000 {
+			compatible = "st,stm32mp1-rtc";
+			reg = <0x5c004000 0x400>;
+			clocks = <&rcc RTCAPB>, <&rcc RTC>;
+			clock-names = "pclk", "rtc_ck";
+			interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		bsec: efuse@5c005000 {
+			compatible = "st,stm32mp15-bsec";
+			reg = <0x5c005000 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ts_cal1: calib@5c {
+				reg = <0x5c 0x2>;
+			};
+			ts_cal2: calib@5e {
+				reg = <0x5e 0x2>;
+			};
+		};
+
+		i2c6: i2c@5c009000 {
+			compatible = "st,stm32f7-i2c";
+			reg = <0x5c009000 0x400>;
+			interrupt-names = "event", "error";
+			interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc I2C6_K>;
+			resets = <&rcc I2C6_R>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		/*
+		 * Break node order to solve dependency probe issue between
+		 * pinctrl and exti.
+		 */
+		pinctrl: pin-controller@50002000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "st,stm32mp157-pinctrl";
+			ranges = <0 0x50002000 0xa400>;
+			interrupt-parent = <&exti>;
+			st,syscfg = <&exti 0x60 0xff>;
+			pins-are-numbered;
+
+			gpioa: gpio@50002000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x0 0x400>;
+				clocks = <&rcc GPIOA>;
+				st,bank-name = "GPIOA";
+				status = "disabled";
+			};
+
+			gpiob: gpio@50003000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x1000 0x400>;
+				clocks = <&rcc GPIOB>;
+				st,bank-name = "GPIOB";
+				status = "disabled";
+			};
+
+			gpioc: gpio@50004000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x2000 0x400>;
+				clocks = <&rcc GPIOC>;
+				st,bank-name = "GPIOC";
+				status = "disabled";
+			};
+
+			gpiod: gpio@50005000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x3000 0x400>;
+				clocks = <&rcc GPIOD>;
+				st,bank-name = "GPIOD";
+				status = "disabled";
+			};
+
+			gpioe: gpio@50006000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x4000 0x400>;
+				clocks = <&rcc GPIOE>;
+				st,bank-name = "GPIOE";
+				status = "disabled";
+			};
+
+			gpiof: gpio@50007000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x5000 0x400>;
+				clocks = <&rcc GPIOF>;
+				st,bank-name = "GPIOF";
+				status = "disabled";
+			};
+
+			gpiog: gpio@50008000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x6000 0x400>;
+				clocks = <&rcc GPIOG>;
+				st,bank-name = "GPIOG";
+				status = "disabled";
+			};
+
+			gpioh: gpio@50009000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x7000 0x400>;
+				clocks = <&rcc GPIOH>;
+				st,bank-name = "GPIOH";
+				status = "disabled";
+			};
+
+			gpioi: gpio@5000a000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x8000 0x400>;
+				clocks = <&rcc GPIOI>;
+				st,bank-name = "GPIOI";
+				status = "disabled";
+			};
+
+			gpioj: gpio@5000b000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0x9000 0x400>;
+				clocks = <&rcc GPIOJ>;
+				st,bank-name = "GPIOJ";
+				status = "disabled";
+			};
+
+			gpiok: gpio@5000c000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0xa000 0x400>;
+				clocks = <&rcc GPIOK>;
+				st,bank-name = "GPIOK";
+				status = "disabled";
+			};
+		};
+
+		pinctrl_z: pin-controller-z@54004000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "st,stm32mp157-z-pinctrl";
+			ranges = <0 0x54004000 0x400>;
+			pins-are-numbered;
+			interrupt-parent = <&exti>;
+			st,syscfg = <&exti 0x60 0xff>;
+
+			gpioz: gpio@54004000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				reg = <0 0x400>;
+				clocks = <&rcc GPIOZ>;
+				st,bank-name = "GPIOZ";
+				st,bank-ioport = <11>;
+				status = "disabled";
+			};
+		};
+	};
+
+	mlahb: ahb {
+		compatible = "st,mlahb", "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		dma-ranges = <0x00000000 0x38000000 0x10000>,
+			     <0x10000000 0x10000000 0x60000>,
+			     <0x30000000 0x30000000 0x60000>;
+
+		m4_rproc: m4@10000000 {
+			compatible = "st,stm32mp1-m4";
+			reg = <0x10000000 0x40000>,
+			      <0x30000000 0x40000>,
+			      <0x38000000 0x10000>;
+			resets = <&rcc MCU_R>;
+			st,syscfg-holdboot = <&rcc 0x10C 0x1>;
+			st,syscfg-tz = <&rcc 0x000 0x1>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/stm32mp153.dtsi b/arch/arm/boot/dts/stm32mp153.dtsi
new file mode 100644
index 0000000..2d759fc
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp153.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+#include "stm32mp151.dtsi"
+
+/ {
+	cpus {
+		cpu1: cpu@1 {
+			compatible = "arm,cortex-a7";
+			device_type = "cpu";
+			reg = <1>;
+		};
+	};
+
+	soc {
+		m_can1: can@4400e000 {
+			compatible = "bosch,m_can";
+			reg = <0x4400e000 0x400>, <0x44011000 0x1400>;
+			reg-names = "m_can", "message_ram";
+			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "int0", "int1";
+			clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
+			clock-names = "hclk", "cclk";
+			bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
+			status = "disabled";
+		};
+
+		m_can2: can@4400f000 {
+			compatible = "bosch,m_can";
+			reg = <0x4400f000 0x400>, <0x44011000 0x2800>;
+			reg-names = "m_can", "message_ram";
+			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "int0", "int1";
+			clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
+			clock-names = "hclk", "cclk";
+			bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
deleted file mode 100644
index 3d1ecb4..0000000
--- a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
+++ /dev/null
@@ -1,953 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/*
- * Copyright (C) STMicroelectronics 2017 - All Rights Reserved
- * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
- */
-#include <dt-bindings/pinctrl/stm32-pinfunc.h>
-
-/ {
-	soc {
-		pinctrl: pin-controller@50002000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "st,stm32mp157-pinctrl";
-			ranges = <0 0x50002000 0xa400>;
-			interrupt-parent = <&exti>;
-			st,syscfg = <&exti 0x60 0xff>;
-			pins-are-numbered;
-
-			gpioa: gpio@50002000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x0 0x400>;
-				clocks = <&rcc GPIOA>;
-				st,bank-name = "GPIOA";
-				status = "disabled";
-			};
-
-			gpiob: gpio@50003000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x1000 0x400>;
-				clocks = <&rcc GPIOB>;
-				st,bank-name = "GPIOB";
-				status = "disabled";
-			};
-
-			gpioc: gpio@50004000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x2000 0x400>;
-				clocks = <&rcc GPIOC>;
-				st,bank-name = "GPIOC";
-				status = "disabled";
-			};
-
-			gpiod: gpio@50005000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x3000 0x400>;
-				clocks = <&rcc GPIOD>;
-				st,bank-name = "GPIOD";
-				status = "disabled";
-			};
-
-			gpioe: gpio@50006000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x4000 0x400>;
-				clocks = <&rcc GPIOE>;
-				st,bank-name = "GPIOE";
-				status = "disabled";
-			};
-
-			gpiof: gpio@50007000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x5000 0x400>;
-				clocks = <&rcc GPIOF>;
-				st,bank-name = "GPIOF";
-				status = "disabled";
-			};
-
-			gpiog: gpio@50008000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x6000 0x400>;
-				clocks = <&rcc GPIOG>;
-				st,bank-name = "GPIOG";
-				status = "disabled";
-			};
-
-			gpioh: gpio@50009000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x7000 0x400>;
-				clocks = <&rcc GPIOH>;
-				st,bank-name = "GPIOH";
-				status = "disabled";
-			};
-
-			gpioi: gpio@5000a000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x8000 0x400>;
-				clocks = <&rcc GPIOI>;
-				st,bank-name = "GPIOI";
-				status = "disabled";
-			};
-
-			gpioj: gpio@5000b000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0x9000 0x400>;
-				clocks = <&rcc GPIOJ>;
-				st,bank-name = "GPIOJ";
-				status = "disabled";
-			};
-
-			gpiok: gpio@5000c000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0xa000 0x400>;
-				clocks = <&rcc GPIOK>;
-				st,bank-name = "GPIOK";
-				status = "disabled";
-			};
-
-			adc12_ain_pins_a: adc12-ain-0 {
-				pins {
-					pinmux = <STM32_PINMUX('C', 3, ANALOG)>, /* ADC1 in13 */
-						 <STM32_PINMUX('F', 12, ANALOG)>, /* ADC1 in6 */
-						 <STM32_PINMUX('F', 13, ANALOG)>, /* ADC2 in2 */
-						 <STM32_PINMUX('F', 14, ANALOG)>; /* ADC2 in6 */
-				};
-			};
-
-			adc12_usb_cc_pins_a: adc12-usb-cc-pins-0 {
-				pins {
-					pinmux = <STM32_PINMUX('A', 4, ANALOG)>, /* ADC12 in18 */
-						 <STM32_PINMUX('A', 5, ANALOG)>; /* ADC12 in19 */
-				};
-			};
-
-			cec_pins_a: cec-0 {
-				pins {
-					pinmux = <STM32_PINMUX('A', 15, AF4)>;
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			cec_pins_sleep_a: cec-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('A', 15, ANALOG)>; /* HDMI_CEC */
-				};
-			};
-
-			cec_pins_b: cec-1 {
-				pins {
-					pinmux = <STM32_PINMUX('B', 6, AF5)>;
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			cec_pins_sleep_b: cec-sleep-1 {
-				pins {
-					pinmux = <STM32_PINMUX('B', 6, ANALOG)>; /* HDMI_CEC */
-				};
-			};
-
-			dac_ch1_pins_a: dac-ch1 {
-				pins {
-					pinmux = <STM32_PINMUX('A', 4, ANALOG)>;
-				};
-			};
-
-			dac_ch2_pins_a: dac-ch2 {
-				pins {
-					pinmux = <STM32_PINMUX('A', 5, ANALOG)>;
-				};
-			};
-
-			dcmi_pins_a: dcmi-0 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 8,  AF13)>,/* DCMI_HSYNC */
-						 <STM32_PINMUX('B', 7,  AF13)>,/* DCMI_VSYNC */
-						 <STM32_PINMUX('A', 6,  AF13)>,/* DCMI_PIXCLK */
-						 <STM32_PINMUX('H', 9,  AF13)>,/* DCMI_D0 */
-						 <STM32_PINMUX('H', 10, AF13)>,/* DCMI_D1 */
-						 <STM32_PINMUX('H', 11, AF13)>,/* DCMI_D2 */
-						 <STM32_PINMUX('H', 12, AF13)>,/* DCMI_D3 */
-						 <STM32_PINMUX('H', 14, AF13)>,/* DCMI_D4 */
-						 <STM32_PINMUX('I', 4,  AF13)>,/* DCMI_D5 */
-						 <STM32_PINMUX('B', 8,  AF13)>,/* DCMI_D6 */
-						 <STM32_PINMUX('E', 6,  AF13)>,/* DCMI_D7 */
-						 <STM32_PINMUX('I', 1,  AF13)>,/* DCMI_D8 */
-						 <STM32_PINMUX('H', 7,  AF13)>,/* DCMI_D9 */
-						 <STM32_PINMUX('I', 3,  AF13)>,/* DCMI_D10 */
-						 <STM32_PINMUX('H', 15, AF13)>;/* DCMI_D11 */
-					bias-disable;
-				};
-			};
-
-			dcmi_sleep_pins_a: dcmi-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 8,  ANALOG)>,/* DCMI_HSYNC */
-						 <STM32_PINMUX('B', 7,  ANALOG)>,/* DCMI_VSYNC */
-						 <STM32_PINMUX('A', 6,  ANALOG)>,/* DCMI_PIXCLK */
-						 <STM32_PINMUX('H', 9,  ANALOG)>,/* DCMI_D0 */
-						 <STM32_PINMUX('H', 10, ANALOG)>,/* DCMI_D1 */
-						 <STM32_PINMUX('H', 11, ANALOG)>,/* DCMI_D2 */
-						 <STM32_PINMUX('H', 12, ANALOG)>,/* DCMI_D3 */
-						 <STM32_PINMUX('H', 14, ANALOG)>,/* DCMI_D4 */
-						 <STM32_PINMUX('I', 4,  ANALOG)>,/* DCMI_D5 */
-						 <STM32_PINMUX('B', 8,  ANALOG)>,/* DCMI_D6 */
-						 <STM32_PINMUX('E', 6,  ANALOG)>,/* DCMI_D7 */
-						 <STM32_PINMUX('I', 1,  ANALOG)>,/* DCMI_D8 */
-						 <STM32_PINMUX('H', 7,  ANALOG)>,/* DCMI_D9 */
-						 <STM32_PINMUX('I', 3,  ANALOG)>,/* DCMI_D10 */
-						 <STM32_PINMUX('H', 15, ANALOG)>;/* DCMI_D11 */
-				};
-			};
-
-			ethernet0_rgmii_pins_a: rgmii-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('G', 5, AF11)>, /* ETH_RGMII_CLK125 */
-						 <STM32_PINMUX('G', 4, AF11)>, /* ETH_RGMII_GTX_CLK */
-						 <STM32_PINMUX('G', 13, AF11)>, /* ETH_RGMII_TXD0 */
-						 <STM32_PINMUX('G', 14, AF11)>, /* ETH_RGMII_TXD1 */
-						 <STM32_PINMUX('C', 2, AF11)>, /* ETH_RGMII_TXD2 */
-						 <STM32_PINMUX('E', 2, AF11)>, /* ETH_RGMII_TXD3 */
-						 <STM32_PINMUX('B', 11, AF11)>, /* ETH_RGMII_TX_CTL */
-						 <STM32_PINMUX('A', 2, AF11)>, /* ETH_MDIO */
-						 <STM32_PINMUX('C', 1, AF11)>; /* ETH_MDC */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <3>;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH_RGMII_RXD0 */
-						 <STM32_PINMUX('C', 5, AF11)>, /* ETH_RGMII_RXD1 */
-						 <STM32_PINMUX('B', 0, AF11)>, /* ETH_RGMII_RXD2 */
-						 <STM32_PINMUX('B', 1, AF11)>, /* ETH_RGMII_RXD3 */
-						 <STM32_PINMUX('A', 1, AF11)>, /* ETH_RGMII_RX_CLK */
-						 <STM32_PINMUX('A', 7, AF11)>; /* ETH_RGMII_RX_CTL */
-					bias-disable;
-				};
-			};
-
-			ethernet0_rgmii_pins_sleep_a: rgmii-sleep-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_RGMII_CLK125 */
-						 <STM32_PINMUX('G', 4, ANALOG)>, /* ETH_RGMII_GTX_CLK */
-						 <STM32_PINMUX('G', 13, ANALOG)>, /* ETH_RGMII_TXD0 */
-						 <STM32_PINMUX('G', 14, ANALOG)>, /* ETH_RGMII_TXD1 */
-						 <STM32_PINMUX('C', 2, ANALOG)>, /* ETH_RGMII_TXD2 */
-						 <STM32_PINMUX('E', 2, ANALOG)>, /* ETH_RGMII_TXD3 */
-						 <STM32_PINMUX('B', 11, ANALOG)>, /* ETH_RGMII_TX_CTL */
-						 <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
-						 <STM32_PINMUX('C', 1, ANALOG)>, /* ETH_MDC */
-						 <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
-						 <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RGMII_RXD1 */
-						 <STM32_PINMUX('B', 0, ANALOG)>, /* ETH_RGMII_RXD2 */
-						 <STM32_PINMUX('B', 1, ANALOG)>, /* ETH_RGMII_RXD3 */
-						 <STM32_PINMUX('A', 1, ANALOG)>, /* ETH_RGMII_RX_CLK */
-						 <STM32_PINMUX('A', 7, ANALOG)>; /* ETH_RGMII_RX_CTL */
-				};
-			};
-
-			fmc_pins_a: fmc-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('D', 4, AF12)>, /* FMC_NOE */
-						 <STM32_PINMUX('D', 5, AF12)>, /* FMC_NWE */
-						 <STM32_PINMUX('D', 11, AF12)>, /* FMC_A16_FMC_CLE */
-						 <STM32_PINMUX('D', 12, AF12)>, /* FMC_A17_FMC_ALE */
-						 <STM32_PINMUX('D', 14, AF12)>, /* FMC_D0 */
-						 <STM32_PINMUX('D', 15, AF12)>, /* FMC_D1 */
-						 <STM32_PINMUX('D', 0, AF12)>, /* FMC_D2 */
-						 <STM32_PINMUX('D', 1, AF12)>, /* FMC_D3 */
-						 <STM32_PINMUX('E', 7, AF12)>, /* FMC_D4 */
-						 <STM32_PINMUX('E', 8, AF12)>, /* FMC_D5 */
-						 <STM32_PINMUX('E', 9, AF12)>, /* FMC_D6 */
-						 <STM32_PINMUX('E', 10, AF12)>, /* FMC_D7 */
-						 <STM32_PINMUX('G', 9, AF12)>; /* FMC_NE2_FMC_NCE */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <1>;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('D', 6, AF12)>; /* FMC_NWAIT */
-					bias-pull-up;
-				};
-			};
-
-			fmc_sleep_pins_a: fmc-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('D', 4, ANALOG)>, /* FMC_NOE */
-						 <STM32_PINMUX('D', 5, ANALOG)>, /* FMC_NWE */
-						 <STM32_PINMUX('D', 11, ANALOG)>, /* FMC_A16_FMC_CLE */
-						 <STM32_PINMUX('D', 12, ANALOG)>, /* FMC_A17_FMC_ALE */
-						 <STM32_PINMUX('D', 14, ANALOG)>, /* FMC_D0 */
-						 <STM32_PINMUX('D', 15, ANALOG)>, /* FMC_D1 */
-						 <STM32_PINMUX('D', 0, ANALOG)>, /* FMC_D2 */
-						 <STM32_PINMUX('D', 1, ANALOG)>, /* FMC_D3 */
-						 <STM32_PINMUX('E', 7, ANALOG)>, /* FMC_D4 */
-						 <STM32_PINMUX('E', 8, ANALOG)>, /* FMC_D5 */
-						 <STM32_PINMUX('E', 9, ANALOG)>, /* FMC_D6 */
-						 <STM32_PINMUX('E', 10, ANALOG)>, /* FMC_D7 */
-						 <STM32_PINMUX('D', 6, ANALOG)>, /* FMC_NWAIT */
-						 <STM32_PINMUX('G', 9, ANALOG)>; /* FMC_NE2_FMC_NCE */
-				};
-			};
-
-			i2c1_pins_a: i2c1-0 {
-				pins {
-					pinmux = <STM32_PINMUX('D', 12, AF5)>, /* I2C1_SCL */
-						 <STM32_PINMUX('F', 15, AF5)>; /* I2C1_SDA */
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			i2c1_pins_sleep_a: i2c1-1 {
-				pins {
-					pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* I2C1_SCL */
-						 <STM32_PINMUX('F', 15, ANALOG)>; /* I2C1_SDA */
-				};
-			};
-
-			i2c1_pins_b: i2c1-2 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 14, AF5)>, /* I2C1_SCL */
-						 <STM32_PINMUX('F', 15, AF5)>; /* I2C1_SDA */
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			i2c1_pins_sleep_b: i2c1-3 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 14, ANALOG)>, /* I2C1_SCL */
-						 <STM32_PINMUX('F', 15, ANALOG)>; /* I2C1_SDA */
-				};
-			};
-
-			i2c2_pins_a: i2c2-0 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 4, AF4)>, /* I2C2_SCL */
-						 <STM32_PINMUX('H', 5, AF4)>; /* I2C2_SDA */
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			i2c2_pins_sleep_a: i2c2-1 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 4, ANALOG)>, /* I2C2_SCL */
-						 <STM32_PINMUX('H', 5, ANALOG)>; /* I2C2_SDA */
-				};
-			};
-
-			i2c2_pins_b1: i2c2-2 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 5, AF4)>; /* I2C2_SDA */
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			i2c2_pins_sleep_b1: i2c2-3 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 5, ANALOG)>; /* I2C2_SDA */
-				};
-			};
-
-			i2c5_pins_a: i2c5-0 {
-				pins {
-					pinmux = <STM32_PINMUX('A', 11, AF4)>, /* I2C5_SCL */
-						 <STM32_PINMUX('A', 12, AF4)>; /* I2C5_SDA */
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			i2c5_pins_sleep_a: i2c5-1 {
-				pins {
-					pinmux = <STM32_PINMUX('A', 11, ANALOG)>, /* I2C5_SCL */
-						 <STM32_PINMUX('A', 12, ANALOG)>; /* I2C5_SDA */
-
-				};
-			};
-
-			i2s2_pins_a: i2s2-0 {
-				pins {
-					pinmux = <STM32_PINMUX('I', 3, AF5)>, /* I2S2_SDO */
-						 <STM32_PINMUX('B', 9, AF5)>, /* I2S2_WS */
-						 <STM32_PINMUX('A', 9, AF5)>; /* I2S2_CK */
-					slew-rate = <1>;
-					drive-push-pull;
-					bias-disable;
-				};
-			};
-
-			i2s2_pins_sleep_a: i2s2-1 {
-				pins {
-					pinmux = <STM32_PINMUX('I', 3, ANALOG)>, /* I2S2_SDO */
-						 <STM32_PINMUX('B', 9, ANALOG)>, /* I2S2_WS */
-						 <STM32_PINMUX('A', 9, ANALOG)>; /* I2S2_CK */
-				};
-			};
-
-			ltdc_pins_a: ltdc-a-0 {
-				pins {
-					pinmux = <STM32_PINMUX('G',  7, AF14)>, /* LCD_CLK */
-						 <STM32_PINMUX('I', 10, AF14)>, /* LCD_HSYNC */
-						 <STM32_PINMUX('I',  9, AF14)>, /* LCD_VSYNC */
-						 <STM32_PINMUX('F', 10, AF14)>, /* LCD_DE */
-						 <STM32_PINMUX('H',  2, AF14)>, /* LCD_R0 */
-						 <STM32_PINMUX('H',  3, AF14)>, /* LCD_R1 */
-						 <STM32_PINMUX('H',  8, AF14)>, /* LCD_R2 */
-						 <STM32_PINMUX('H',  9, AF14)>, /* LCD_R3 */
-						 <STM32_PINMUX('H', 10, AF14)>, /* LCD_R4 */
-						 <STM32_PINMUX('C',  0, AF14)>, /* LCD_R5 */
-						 <STM32_PINMUX('H', 12, AF14)>, /* LCD_R6 */
-						 <STM32_PINMUX('E', 15, AF14)>, /* LCD_R7 */
-						 <STM32_PINMUX('E',  5, AF14)>, /* LCD_G0 */
-						 <STM32_PINMUX('E',  6, AF14)>, /* LCD_G1 */
-						 <STM32_PINMUX('H', 13, AF14)>, /* LCD_G2 */
-						 <STM32_PINMUX('H', 14, AF14)>, /* LCD_G3 */
-						 <STM32_PINMUX('H', 15, AF14)>, /* LCD_G4 */
-						 <STM32_PINMUX('I',  0, AF14)>, /* LCD_G5 */
-						 <STM32_PINMUX('I',  1, AF14)>, /* LCD_G6 */
-						 <STM32_PINMUX('I',  2, AF14)>, /* LCD_G7 */
-						 <STM32_PINMUX('D',  9, AF14)>, /* LCD_B0 */
-						 <STM32_PINMUX('G', 12, AF14)>, /* LCD_B1 */
-						 <STM32_PINMUX('G', 10, AF14)>, /* LCD_B2 */
-						 <STM32_PINMUX('D', 10, AF14)>, /* LCD_B3 */
-						 <STM32_PINMUX('I',  4, AF14)>, /* LCD_B4 */
-						 <STM32_PINMUX('A',  3, AF14)>, /* LCD_B5 */
-						 <STM32_PINMUX('B',  8, AF14)>, /* LCD_B6 */
-						 <STM32_PINMUX('D',  8, AF14)>; /* LCD_B7 */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <1>;
-				};
-			};
-
-			ltdc_pins_sleep_a: ltdc-a-1 {
-				pins {
-					pinmux = <STM32_PINMUX('G',  7, ANALOG)>, /* LCD_CLK */
-						 <STM32_PINMUX('I', 10, ANALOG)>, /* LCD_HSYNC */
-						 <STM32_PINMUX('I',  9, ANALOG)>, /* LCD_VSYNC */
-						 <STM32_PINMUX('F', 10, ANALOG)>, /* LCD_DE */
-						 <STM32_PINMUX('H',  2, ANALOG)>, /* LCD_R0 */
-						 <STM32_PINMUX('H',  3, ANALOG)>, /* LCD_R1 */
-						 <STM32_PINMUX('H',  8, ANALOG)>, /* LCD_R2 */
-						 <STM32_PINMUX('H',  9, ANALOG)>, /* LCD_R3 */
-						 <STM32_PINMUX('H', 10, ANALOG)>, /* LCD_R4 */
-						 <STM32_PINMUX('C',  0, ANALOG)>, /* LCD_R5 */
-						 <STM32_PINMUX('H', 12, ANALOG)>, /* LCD_R6 */
-						 <STM32_PINMUX('E', 15, ANALOG)>, /* LCD_R7 */
-						 <STM32_PINMUX('E',  5, ANALOG)>, /* LCD_G0 */
-						 <STM32_PINMUX('E',  6, ANALOG)>, /* LCD_G1 */
-						 <STM32_PINMUX('H', 13, ANALOG)>, /* LCD_G2 */
-						 <STM32_PINMUX('H', 14, ANALOG)>, /* LCD_G3 */
-						 <STM32_PINMUX('H', 15, ANALOG)>, /* LCD_G4 */
-						 <STM32_PINMUX('I',  0, ANALOG)>, /* LCD_G5 */
-						 <STM32_PINMUX('I',  1, ANALOG)>, /* LCD_G6 */
-						 <STM32_PINMUX('I',  2, ANALOG)>, /* LCD_G7 */
-						 <STM32_PINMUX('D',  9, ANALOG)>, /* LCD_B0 */
-						 <STM32_PINMUX('G', 12, ANALOG)>, /* LCD_B1 */
-						 <STM32_PINMUX('G', 10, ANALOG)>, /* LCD_B2 */
-						 <STM32_PINMUX('D', 10, ANALOG)>, /* LCD_B3 */
-						 <STM32_PINMUX('I',  4, ANALOG)>, /* LCD_B4 */
-						 <STM32_PINMUX('A',  3, ANALOG)>, /* LCD_B5 */
-						 <STM32_PINMUX('B',  8, ANALOG)>, /* LCD_B6 */
-						 <STM32_PINMUX('D',  8, ANALOG)>; /* LCD_B7 */
-				};
-			};
-
-			ltdc_pins_b: ltdc-b-0 {
-				pins {
-					pinmux = <STM32_PINMUX('I', 14, AF14)>, /* LCD_CLK */
-						 <STM32_PINMUX('I', 12, AF14)>, /* LCD_HSYNC */
-						 <STM32_PINMUX('I', 13, AF14)>, /* LCD_VSYNC */
-						 <STM32_PINMUX('K',  7, AF14)>, /* LCD_DE */
-						 <STM32_PINMUX('I', 15, AF14)>, /* LCD_R0 */
-						 <STM32_PINMUX('J',  0, AF14)>, /* LCD_R1 */
-						 <STM32_PINMUX('J',  1, AF14)>, /* LCD_R2 */
-						 <STM32_PINMUX('J',  2, AF14)>, /* LCD_R3 */
-						 <STM32_PINMUX('J',  3, AF14)>, /* LCD_R4 */
-						 <STM32_PINMUX('J',  4, AF14)>, /* LCD_R5 */
-						 <STM32_PINMUX('J',  5, AF14)>, /* LCD_R6 */
-						 <STM32_PINMUX('J',  6, AF14)>, /* LCD_R7 */
-						 <STM32_PINMUX('J',  7, AF14)>, /* LCD_G0 */
-						 <STM32_PINMUX('J',  8, AF14)>, /* LCD_G1 */
-						 <STM32_PINMUX('J',  9, AF14)>, /* LCD_G2 */
-						 <STM32_PINMUX('J', 10, AF14)>, /* LCD_G3 */
-						 <STM32_PINMUX('J', 11, AF14)>, /* LCD_G4 */
-						 <STM32_PINMUX('K',  0, AF14)>, /* LCD_G5 */
-						 <STM32_PINMUX('K',  1, AF14)>, /* LCD_G6 */
-						 <STM32_PINMUX('K',  2, AF14)>, /* LCD_G7 */
-						 <STM32_PINMUX('J', 12, AF14)>, /* LCD_B0 */
-						 <STM32_PINMUX('J', 13, AF14)>, /* LCD_B1 */
-						 <STM32_PINMUX('J', 14, AF14)>, /* LCD_B2 */
-						 <STM32_PINMUX('J', 15, AF14)>, /* LCD_B3 */
-						 <STM32_PINMUX('K',  3, AF14)>, /* LCD_B4 */
-						 <STM32_PINMUX('K',  4, AF14)>, /* LCD_B5 */
-						 <STM32_PINMUX('K',  5, AF14)>, /* LCD_B6 */
-						 <STM32_PINMUX('K',  6, AF14)>; /* LCD_B7 */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <1>;
-				};
-			};
-
-			ltdc_pins_sleep_b: ltdc-b-1 {
-				pins {
-					pinmux = <STM32_PINMUX('I', 14, ANALOG)>, /* LCD_CLK */
-						 <STM32_PINMUX('I', 12, ANALOG)>, /* LCD_HSYNC */
-						 <STM32_PINMUX('I', 13, ANALOG)>, /* LCD_VSYNC */
-						 <STM32_PINMUX('K',  7, ANALOG)>, /* LCD_DE */
-						 <STM32_PINMUX('I', 15, ANALOG)>, /* LCD_R0 */
-						 <STM32_PINMUX('J',  0, ANALOG)>, /* LCD_R1 */
-						 <STM32_PINMUX('J',  1, ANALOG)>, /* LCD_R2 */
-						 <STM32_PINMUX('J',  2, ANALOG)>, /* LCD_R3 */
-						 <STM32_PINMUX('J',  3, ANALOG)>, /* LCD_R4 */
-						 <STM32_PINMUX('J',  4, ANALOG)>, /* LCD_R5 */
-						 <STM32_PINMUX('J',  5, ANALOG)>, /* LCD_R6 */
-						 <STM32_PINMUX('J',  6, ANALOG)>, /* LCD_R7 */
-						 <STM32_PINMUX('J',  7, ANALOG)>, /* LCD_G0 */
-						 <STM32_PINMUX('J',  8, ANALOG)>, /* LCD_G1 */
-						 <STM32_PINMUX('J',  9, ANALOG)>, /* LCD_G2 */
-						 <STM32_PINMUX('J', 10, ANALOG)>, /* LCD_G3 */
-						 <STM32_PINMUX('J', 11, ANALOG)>, /* LCD_G4 */
-						 <STM32_PINMUX('K',  0, ANALOG)>, /* LCD_G5 */
-						 <STM32_PINMUX('K',  1, ANALOG)>, /* LCD_G6 */
-						 <STM32_PINMUX('K',  2, ANALOG)>, /* LCD_G7 */
-						 <STM32_PINMUX('J', 12, ANALOG)>, /* LCD_B0 */
-						 <STM32_PINMUX('J', 13, ANALOG)>, /* LCD_B1 */
-						 <STM32_PINMUX('J', 14, ANALOG)>, /* LCD_B2 */
-						 <STM32_PINMUX('J', 15, ANALOG)>, /* LCD_B3 */
-						 <STM32_PINMUX('K',  3, ANALOG)>, /* LCD_B4 */
-						 <STM32_PINMUX('K',  4, ANALOG)>, /* LCD_B5 */
-						 <STM32_PINMUX('K',  5, ANALOG)>, /* LCD_B6 */
-						 <STM32_PINMUX('K',  6, ANALOG)>; /* LCD_B7 */
-				};
-			};
-
-			m_can1_pins_a: m-can1-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
-					slew-rate = <1>;
-					drive-push-pull;
-					bias-disable;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('I', 9, AF9)>; /* CAN1_RX */
-					bias-disable;
-				};
-			};
-
-			m_can1_sleep_pins_a: m_can1-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 13, ANALOG)>, /* CAN1_TX */
-						 <STM32_PINMUX('I', 9, ANALOG)>; /* CAN1_RX */
-				};
-			};
-
-			pwm2_pins_a: pwm2-0 {
-				pins {
-					pinmux = <STM32_PINMUX('A', 3, AF1)>; /* TIM2_CH4 */
-					bias-pull-down;
-					drive-push-pull;
-					slew-rate = <0>;
-				};
-			};
-
-			pwm8_pins_a: pwm8-0 {
-				pins {
-					pinmux = <STM32_PINMUX('I', 2, AF3)>; /* TIM8_CH4 */
-					bias-pull-down;
-					drive-push-pull;
-					slew-rate = <0>;
-				};
-			};
-
-			pwm12_pins_a: pwm12-0 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 6, AF2)>; /* TIM12_CH1 */
-					bias-pull-down;
-					drive-push-pull;
-					slew-rate = <0>;
-				};
-			};
-
-			qspi_clk_pins_a: qspi-clk-0 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 10, AF9)>; /* QSPI_CLK */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <3>;
-				};
-			};
-
-			qspi_clk_sleep_pins_a: qspi-clk-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 10, ANALOG)>; /* QSPI_CLK */
-				};
-			};
-
-			qspi_bk1_pins_a: qspi-bk1-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('F', 8, AF10)>, /* QSPI_BK1_IO0 */
-						 <STM32_PINMUX('F', 9, AF10)>, /* QSPI_BK1_IO1 */
-						 <STM32_PINMUX('F', 7, AF9)>, /* QSPI_BK1_IO2 */
-						 <STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <1>;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('B', 6, AF10)>; /* QSPI_BK1_NCS */
-					bias-pull-up;
-					drive-push-pull;
-					slew-rate = <1>;
-				};
-			};
-
-			qspi_bk1_sleep_pins_a: qspi-bk1-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 8, ANALOG)>, /* QSPI_BK1_IO0 */
-						 <STM32_PINMUX('F', 9, ANALOG)>, /* QSPI_BK1_IO1 */
-						 <STM32_PINMUX('F', 7, ANALOG)>, /* QSPI_BK1_IO2 */
-						 <STM32_PINMUX('F', 6, ANALOG)>, /* QSPI_BK1_IO3 */
-						 <STM32_PINMUX('B', 6, ANALOG)>; /* QSPI_BK1_NCS */
-				};
-			};
-
-			qspi_bk2_pins_a: qspi-bk2-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('H', 2, AF9)>, /* QSPI_BK2_IO0 */
-						 <STM32_PINMUX('H', 3, AF9)>, /* QSPI_BK2_IO1 */
-						 <STM32_PINMUX('G', 10, AF11)>, /* QSPI_BK2_IO2 */
-						 <STM32_PINMUX('G', 7, AF11)>; /* QSPI_BK2_IO3 */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <1>;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('C', 0, AF10)>; /* QSPI_BK2_NCS */
-					bias-pull-up;
-					drive-push-pull;
-					slew-rate = <1>;
-				};
-			};
-
-			qspi_bk2_sleep_pins_a: qspi-bk2-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('H', 2, ANALOG)>, /* QSPI_BK2_IO0 */
-						 <STM32_PINMUX('H', 3, ANALOG)>, /* QSPI_BK2_IO1 */
-						 <STM32_PINMUX('G', 10, ANALOG)>, /* QSPI_BK2_IO2 */
-						 <STM32_PINMUX('G', 7, ANALOG)>, /* QSPI_BK2_IO3 */
-						 <STM32_PINMUX('C', 0, ANALOG)>; /* QSPI_BK2_NCS */
-				};
-			};
-
-			sai2a_pins_a: sai2a-0 {
-				pins {
-					pinmux = <STM32_PINMUX('I', 5, AF10)>, /* SAI2_SCK_A */
-						 <STM32_PINMUX('I', 6, AF10)>, /* SAI2_SD_A */
-						 <STM32_PINMUX('I', 7, AF10)>, /* SAI2_FS_A */
-						 <STM32_PINMUX('E', 0, AF10)>; /* SAI2_MCLK_A */
-					slew-rate = <0>;
-					drive-push-pull;
-					bias-disable;
-				};
-			};
-
-			sai2a_sleep_pins_a: sai2a-1 {
-				pins {
-					pinmux = <STM32_PINMUX('I', 5, ANALOG)>, /* SAI2_SCK_A */
-						 <STM32_PINMUX('I', 6, ANALOG)>, /* SAI2_SD_A */
-						 <STM32_PINMUX('I', 7, ANALOG)>, /* SAI2_FS_A */
-						 <STM32_PINMUX('E', 0, ANALOG)>; /* SAI2_MCLK_A */
-				};
-			};
-
-			sai2b_pins_a: sai2b-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('E', 12, AF10)>, /* SAI2_SCK_B */
-						 <STM32_PINMUX('E', 13, AF10)>, /* SAI2_FS_B */
-						 <STM32_PINMUX('E', 14, AF10)>; /* SAI2_MCLK_B */
-					slew-rate = <0>;
-					drive-push-pull;
-					bias-disable;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('F', 11, AF10)>; /* SAI2_SD_B */
-					bias-disable;
-				};
-			};
-
-			sai2b_sleep_pins_a: sai2b-1 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 11, ANALOG)>, /* SAI2_SD_B */
-						 <STM32_PINMUX('E', 12, ANALOG)>, /* SAI2_SCK_B */
-						 <STM32_PINMUX('E', 13, ANALOG)>, /* SAI2_FS_B */
-						 <STM32_PINMUX('E', 14, ANALOG)>; /* SAI2_MCLK_B */
-				};
-			};
-
-			sai2b_pins_b: sai2b-2 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 11, AF10)>; /* SAI2_SD_B */
-					bias-disable;
-				};
-			};
-
-			sai2b_sleep_pins_b: sai2b-3 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 11, ANALOG)>; /* SAI2_SD_B */
-				};
-			};
-
-			sai4a_pins_a: sai4a-0 {
-				pins {
-					pinmux = <STM32_PINMUX('B', 5, AF10)>; /* SAI4_SD_A */
-					slew-rate = <0>;
-					drive-push-pull;
-					bias-disable;
-				};
-			};
-
-			sai4a_sleep_pins_a: sai4a-1 {
-				pins {
-					pinmux = <STM32_PINMUX('B', 5, ANALOG)>; /* SAI4_SD_A */
-				};
-			};
-
-			sdmmc1_b4_pins_a: sdmmc1-b4-0 {
-				pins {
-					pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
-						 <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
-						 <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
-						 <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
-						 <STM32_PINMUX('C', 12, AF12)>, /* SDMMC1_CK */
-						 <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
-					slew-rate = <3>;
-					drive-push-pull;
-					bias-disable;
-				};
-			};
-
-			sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
-						 <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
-						 <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
-						 <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
-						 <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
-					slew-rate = <3>;
-					drive-push-pull;
-					bias-disable;
-				};
-				pins2{
-					pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
-					slew-rate = <3>;
-					drive-open-drain;
-					bias-disable;
-				};
-			};
-
-			sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
-						 <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */
-						 <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */
-						 <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */
-						 <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */
-						 <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */
-				};
-			};
-
-			sdmmc1_dir_pins_a: sdmmc1-dir-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('F', 2, AF11)>, /* SDMMC1_D0DIR */
-						 <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */
-						 <STM32_PINMUX('B', 9, AF11)>; /* SDMMC1_CDIR */
-					slew-rate = <3>;
-					drive-push-pull;
-					bias-pull-up;
-				};
-				pins2{
-					pinmux = <STM32_PINMUX('E', 4, AF8)>; /* SDMMC1_CKIN */
-					bias-pull-up;
-				};
-			};
-
-			sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 {
-				pins {
-					pinmux = <STM32_PINMUX('F', 2, ANALOG)>, /* SDMMC1_D0DIR */
-						 <STM32_PINMUX('C', 7, ANALOG)>, /* SDMMC1_D123DIR */
-						 <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */
-						 <STM32_PINMUX('E', 4, ANALOG)>; /* SDMMC1_CKIN */
-				};
-			};
-
-			spdifrx_pins_a: spdifrx-0 {
-				pins {
-					pinmux = <STM32_PINMUX('G', 12, AF8)>; /* SPDIF_IN1 */
-					bias-disable;
-				};
-			};
-
-			spdifrx_sleep_pins_a: spdifrx-1 {
-				pins {
-					pinmux = <STM32_PINMUX('G', 12, ANALOG)>; /* SPDIF_IN1 */
-				};
-			};
-
-			uart4_pins_a: uart4-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('G', 11, AF6)>; /* UART4_TX */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <0>;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
-					bias-disable;
-				};
-			};
-
-			uart4_pins_b: uart4-1 {
-				pins1 {
-					pinmux = <STM32_PINMUX('D', 1, AF8)>; /* UART4_TX */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <0>;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
-					bias-disable;
-				};
-			};
-
-			uart7_pins_a: uart7-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('E', 8, AF7)>; /* UART4_TX */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <0>;
-				};
-				pins2 {
-					pinmux = <STM32_PINMUX('E', 7, AF7)>, /* UART4_RX */
-						 <STM32_PINMUX('E', 10, AF7)>, /* UART4_CTS */
-						 <STM32_PINMUX('E', 9, AF7)>; /* UART4_RTS */
-					bias-disable;
-				};
-			};
-		};
-
-		pinctrl_z: pin-controller-z@54004000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "st,stm32mp157-z-pinctrl";
-			ranges = <0 0x54004000 0x400>;
-			pins-are-numbered;
-			interrupt-parent = <&exti>;
-			st,syscfg = <&exti 0x60 0xff>;
-
-			gpioz: gpio@54004000 {
-				gpio-controller;
-				#gpio-cells = <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg = <0 0x400>;
-				clocks = <&rcc GPIOZ>;
-				st,bank-name = "GPIOZ";
-				st,bank-ioport = <11>;
-				status = "disabled";
-			};
-
-			i2c2_pins_b2: i2c2-0 {
-				pins {
-					pinmux = <STM32_PINMUX('Z', 0, AF3)>; /* I2C2_SCL */
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			i2c2_pins_sleep_b2: i2c2-1 {
-				pins {
-					pinmux = <STM32_PINMUX('Z', 0, ANALOG)>; /* I2C2_SCL */
-				};
-			};
-
-			i2c4_pins_a: i2c4-0 {
-				pins {
-					pinmux = <STM32_PINMUX('Z', 4, AF6)>, /* I2C4_SCL */
-						 <STM32_PINMUX('Z', 5, AF6)>; /* I2C4_SDA */
-					bias-disable;
-					drive-open-drain;
-					slew-rate = <0>;
-				};
-			};
-
-			i2c4_pins_sleep_a: i2c4-1 {
-				pins {
-					pinmux = <STM32_PINMUX('Z', 4, ANALOG)>, /* I2C4_SCL */
-						 <STM32_PINMUX('Z', 5, ANALOG)>; /* I2C4_SDA */
-				};
-			};
-
-			spi1_pins_a: spi1-0 {
-				pins1 {
-					pinmux = <STM32_PINMUX('Z', 0, AF5)>, /* SPI1_SCK */
-						 <STM32_PINMUX('Z', 2, AF5)>; /* SPI1_MOSI */
-					bias-disable;
-					drive-push-pull;
-					slew-rate = <1>;
-				};
-
-				pins2 {
-					pinmux = <STM32_PINMUX('Z', 1, AF5)>; /* SPI1_MISO */
-					bias-disable;
-				};
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stm32mp157.dtsi b/arch/arm/boot/dts/stm32mp157.dtsi
new file mode 100644
index 0000000..3f0a4a9
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157.dtsi
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+#include "stm32mp153.dtsi"
+
+/ {
+	soc {
+		gpu: gpu@59000000 {
+			compatible = "vivante,gc";
+			reg = <0x59000000 0x800>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc GPU>, <&rcc GPU_K>;
+			clock-names = "bus" ,"core";
+			resets = <&rcc GPU_R>;
+			status = "disabled";
+		};
+
+		dsi: dsi@5a000000 {
+			compatible = "st,stm32-dsi";
+			reg = <0x5a000000 0x800>;
+			clocks = <&rcc DSI_K>, <&clk_hse>, <&rcc DSI_PX>;
+			clock-names = "pclk", "ref", "px_clk";
+			resets = <&rcc DSI_R>;
+			reset-names = "apb";
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-avenger96.dts
index 628c74a..cbfa407 100644
--- a/arch/arm/boot/dts/stm32mp157a-avenger96.dts
+++ b/arch/arm/boot/dts/stm32mp157a-avenger96.dts
@@ -6,8 +6,9 @@
 
 /dts-v1/;
 
-#include "stm32mp157c.dtsi"
-#include "stm32mp157xac-pinctrl.dtsi"
+#include "stm32mp157.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxac-pinctrl.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/mfd/st,stpmic1.h>
 
diff --git a/arch/arm/boot/dts/stm32mp157a-dk1.dts b/arch/arm/boot/dts/stm32mp157a-dk1.dts
index 984a47c..d03d4cd 100644
--- a/arch/arm/boot/dts/stm32mp157a-dk1.dts
+++ b/arch/arm/boot/dts/stm32mp157a-dk1.dts
@@ -6,10 +6,10 @@
 
 /dts-v1/;
 
-#include "stm32mp157c.dtsi"
-#include "stm32mp157xac-pinctrl.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/mfd/st,stpmic1.h>
+#include "stm32mp157.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxac-pinctrl.dtsi"
+#include "stm32mp15xx-dkx.dtsi"
 
 / {
 	model = "STMicroelectronics STM32MP157A-DK1 Discovery Board";
@@ -23,494 +23,4 @@ aliases {
 	chosen {
 		stdout-path = "serial0:115200n8";
 	};
-
-	memory@c0000000 {
-		device_type = "memory";
-		reg = <0xc0000000 0x20000000>;
-	};
-
-	reserved-memory {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-
-		mcuram2: mcuram2@10000000 {
-			compatible = "shared-dma-pool";
-			reg = <0x10000000 0x40000>;
-			no-map;
-		};
-
-		vdev0vring0: vdev0vring0@10040000 {
-			compatible = "shared-dma-pool";
-			reg = <0x10040000 0x1000>;
-			no-map;
-		};
-
-		vdev0vring1: vdev0vring1@10041000 {
-			compatible = "shared-dma-pool";
-			reg = <0x10041000 0x1000>;
-			no-map;
-		};
-
-		vdev0buffer: vdev0buffer@10042000 {
-			compatible = "shared-dma-pool";
-			reg = <0x10042000 0x4000>;
-			no-map;
-		};
-
-		mcuram: mcuram@30000000 {
-			compatible = "shared-dma-pool";
-			reg = <0x30000000 0x40000>;
-			no-map;
-		};
-
-		retram: retram@38000000 {
-			compatible = "shared-dma-pool";
-			reg = <0x38000000 0x10000>;
-			no-map;
-		};
-
-		gpu_reserved: gpu@d4000000 {
-			reg = <0xd4000000 0x4000000>;
-			no-map;
-		};
-	};
-
-	led {
-		compatible = "gpio-leds";
-		blue {
-			label = "heartbeat";
-			gpios = <&gpiod 11 GPIO_ACTIVE_HIGH>;
-			linux,default-trigger = "heartbeat";
-			default-state = "off";
-		};
-	};
-
-	sound {
-		compatible = "audio-graph-card";
-		label = "STM32MP1-DK";
-		routing =
-			"Playback" , "MCLK",
-			"Capture" , "MCLK",
-			"MICL" , "Mic Bias";
-		dais = <&sai2a_port &sai2b_port &i2s2_port>;
-		status = "okay";
-	};
-};
-
-&adc {
-	pinctrl-names = "default";
-	pinctrl-0 = <&adc12_ain_pins_a>, <&adc12_usb_cc_pins_a>;
-	vdd-supply = <&vdd>;
-	vdda-supply = <&vdd>;
-	vref-supply = <&vrefbuf>;
-	status = "disabled";
-	adc1: adc@0 {
-		/*
-		 * Type-C USB_PWR_CC1 & USB_PWR_CC2 on in18 & in19.
-		 * Use at least 5 * RC time, e.g. 5 * (Rp + Rd) * C:
-		 * 5 * (56 + 47kOhms) * 5pF => 2.5us.
-		 * Use arbitrary margin here (e.g. 5us).
-		 */
-		st,min-sample-time-nsecs = <5000>;
-		/* AIN connector, USB Type-C CC1 & CC2 */
-		st,adc-channels = <0 1 6 13 18 19>;
-		status = "okay";
-	};
-	adc2: adc@100 {
-		/* AIN connector, USB Type-C CC1 & CC2 */
-		st,adc-channels = <0 1 2 6 18 19>;
-		st,min-sample-time-nsecs = <5000>;
-		status = "okay";
-	};
-};
-
-&cec {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&cec_pins_b>;
-	pinctrl-1 = <&cec_pins_sleep_b>;
-	status = "okay";
-};
-
-&ethernet0 {
-	status = "okay";
-	pinctrl-0 = <&ethernet0_rgmii_pins_a>;
-	pinctrl-1 = <&ethernet0_rgmii_pins_sleep_a>;
-	pinctrl-names = "default", "sleep";
-	phy-mode = "rgmii-id";
-	max-speed = <1000>;
-	phy-handle = <&phy0>;
-
-	mdio0 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "snps,dwmac-mdio";
-		phy0: ethernet-phy@0 {
-			reg = <0>;
-		};
-	};
-};
-
-&gpu {
-	contiguous-area = <&gpu_reserved>;
-	status = "okay";
-};
-
-&i2c1 {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&i2c1_pins_a>;
-	pinctrl-1 = <&i2c1_pins_sleep_a>;
-	i2c-scl-rising-time-ns = <100>;
-	i2c-scl-falling-time-ns = <7>;
-	status = "okay";
-	/delete-property/dmas;
-	/delete-property/dma-names;
-
-	hdmi-transmitter@39 {
-		compatible = "sil,sii9022";
-		reg = <0x39>;
-		iovcc-supply = <&v3v3_hdmi>;
-		cvcc12-supply = <&v1v2_hdmi>;
-		reset-gpios = <&gpioa 10 GPIO_ACTIVE_LOW>;
-		interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
-		interrupt-parent = <&gpiog>;
-		#sound-dai-cells = <0>;
-		status = "okay";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				reg = <0>;
-				sii9022_in: endpoint {
-					remote-endpoint = <&ltdc_ep0_out>;
-				};
-			};
-
-			port@3 {
-				reg = <3>;
-				sii9022_tx_endpoint: endpoint {
-					remote-endpoint = <&i2s2_endpoint>;
-				};
-			};
-		};
-	};
-
-	cs42l51: cs42l51@4a {
-		compatible = "cirrus,cs42l51";
-		reg = <0x4a>;
-		#sound-dai-cells = <0>;
-		VL-supply = <&v3v3>;
-		VD-supply = <&v1v8_audio>;
-		VA-supply = <&v1v8_audio>;
-		VAHP-supply = <&v1v8_audio>;
-		reset-gpios = <&gpiog 9 GPIO_ACTIVE_LOW>;
-		clocks = <&sai2a>;
-		clock-names = "MCLK";
-		status = "okay";
-
-		cs42l51_port: port {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			cs42l51_tx_endpoint: endpoint@0 {
-				reg = <0>;
-				remote-endpoint = <&sai2a_endpoint>;
-				frame-master;
-				bitclock-master;
-			};
-
-			cs42l51_rx_endpoint: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&sai2b_endpoint>;
-				frame-master;
-				bitclock-master;
-			};
-		};
-	};
-};
-
-&i2c4 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&i2c4_pins_a>;
-	i2c-scl-rising-time-ns = <185>;
-	i2c-scl-falling-time-ns = <20>;
-	status = "okay";
-	/* spare dmas for other usage */
-	/delete-property/dmas;
-	/delete-property/dma-names;
-
-	pmic: stpmic@33 {
-		compatible = "st,stpmic1";
-		reg = <0x33>;
-		interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-		status = "okay";
-
-		regulators {
-			compatible = "st,stpmic1-regulators";
-			ldo1-supply = <&v3v3>;
-			ldo3-supply = <&vdd_ddr>;
-			ldo6-supply = <&v3v3>;
-			pwr_sw1-supply = <&bst_out>;
-			pwr_sw2-supply = <&bst_out>;
-
-			vddcore: buck1 {
-				regulator-name = "vddcore";
-				regulator-min-microvolt = <1200000>;
-				regulator-max-microvolt = <1350000>;
-				regulator-always-on;
-				regulator-initial-mode = <0>;
-				regulator-over-current-protection;
-			};
-
-			vdd_ddr: buck2 {
-				regulator-name = "vdd_ddr";
-				regulator-min-microvolt = <1350000>;
-				regulator-max-microvolt = <1350000>;
-				regulator-always-on;
-				regulator-initial-mode = <0>;
-				regulator-over-current-protection;
-			};
-
-			vdd: buck3 {
-				regulator-name = "vdd";
-				regulator-min-microvolt = <3300000>;
-				regulator-max-microvolt = <3300000>;
-				regulator-always-on;
-				st,mask-reset;
-				regulator-initial-mode = <0>;
-				regulator-over-current-protection;
-			};
-
-			v3v3: buck4 {
-				regulator-name = "v3v3";
-				regulator-min-microvolt = <3300000>;
-				regulator-max-microvolt = <3300000>;
-				regulator-always-on;
-				regulator-over-current-protection;
-				regulator-initial-mode = <0>;
-			};
-
-			v1v8_audio: ldo1 {
-				regulator-name = "v1v8_audio";
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				regulator-always-on;
-				interrupts = <IT_CURLIM_LDO1 0>;
-			};
-
-			v3v3_hdmi: ldo2 {
-				regulator-name = "v3v3_hdmi";
-				regulator-min-microvolt = <3300000>;
-				regulator-max-microvolt = <3300000>;
-				regulator-always-on;
-				interrupts = <IT_CURLIM_LDO2 0>;
-			};
-
-			vtt_ddr: ldo3 {
-				regulator-name = "vtt_ddr";
-				regulator-min-microvolt = <500000>;
-				regulator-max-microvolt = <750000>;
-				regulator-always-on;
-				regulator-over-current-protection;
-			};
-
-			vdd_usb: ldo4 {
-				regulator-name = "vdd_usb";
-				regulator-min-microvolt = <3300000>;
-				regulator-max-microvolt = <3300000>;
-				interrupts = <IT_CURLIM_LDO4 0>;
-			};
-
-			vdda: ldo5 {
-				regulator-name = "vdda";
-				regulator-min-microvolt = <2900000>;
-				regulator-max-microvolt = <2900000>;
-				interrupts = <IT_CURLIM_LDO5 0>;
-				regulator-boot-on;
-			};
-
-			v1v2_hdmi: ldo6 {
-				regulator-name = "v1v2_hdmi";
-				regulator-min-microvolt = <1200000>;
-				regulator-max-microvolt = <1200000>;
-				regulator-always-on;
-				interrupts = <IT_CURLIM_LDO6 0>;
-			};
-
-			vref_ddr: vref_ddr {
-				regulator-name = "vref_ddr";
-				regulator-always-on;
-				regulator-over-current-protection;
-			};
-
-			 bst_out: boost {
-				regulator-name = "bst_out";
-				interrupts = <IT_OCP_BOOST 0>;
-			 };
-
-			vbus_otg: pwr_sw1 {
-				regulator-name = "vbus_otg";
-				interrupts = <IT_OCP_OTG 0>;
-			 };
-
-			 vbus_sw: pwr_sw2 {
-				regulator-name = "vbus_sw";
-				interrupts = <IT_OCP_SWOUT 0>;
-				regulator-active-discharge = <1>;
-			 };
-		};
-
-		onkey {
-			compatible = "st,stpmic1-onkey";
-			interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 0>;
-			interrupt-names = "onkey-falling", "onkey-rising";
-			power-off-time-sec = <10>;
-			status = "okay";
-		};
-
-		watchdog {
-			compatible = "st,stpmic1-wdt";
-			status = "disabled";
-		};
-	};
-};
-
-&i2s2 {
-	clocks = <&rcc SPI2>, <&rcc SPI2_K>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
-	clock-names = "pclk", "i2sclk", "x8k", "x11k";
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&i2s2_pins_a>;
-	pinctrl-1 = <&i2s2_pins_sleep_a>;
-	status = "okay";
-
-	i2s2_port: port {
-		i2s2_endpoint: endpoint {
-			remote-endpoint = <&sii9022_tx_endpoint>;
-			format = "i2s";
-			mclk-fs = <256>;
-		};
-	};
-};
-
-&ipcc {
-	status = "okay";
-};
-
-&iwdg2 {
-	timeout-sec = <32>;
-	status = "okay";
-};
-
-&ltdc {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&ltdc_pins_a>;
-	pinctrl-1 = <&ltdc_pins_sleep_a>;
-	status = "okay";
-
-	port {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		ltdc_ep0_out: endpoint@0 {
-			reg = <0>;
-			remote-endpoint = <&sii9022_in>;
-		};
-	};
-};
-
-&m4_rproc {
-	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
-			<&vdev0vring1>, <&vdev0buffer>;
-	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
-	mbox-names = "vq0", "vq1", "shutdown";
-	interrupt-parent = <&exti>;
-	interrupts = <68 1>;
-	status = "okay";
-};
-
-&pwr_regulators {
-	vdd-supply = <&vdd>;
-	vdd_3v3_usbfs-supply = <&vdd_usb>;
-};
-
-&rng1 {
-	status = "okay";
-};
-
-&rtc {
-	status = "okay";
-};
-
-&sai2 {
-	clocks = <&rcc SAI2>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
-	clock-names = "pclk", "x8k", "x11k";
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&sai2a_pins_a>, <&sai2b_pins_b>;
-	pinctrl-1 = <&sai2a_sleep_pins_a>, <&sai2b_sleep_pins_b>;
-	status = "okay";
-
-	sai2a: audio-controller@4400b004 {
-		#clock-cells = <0>;
-		dma-names = "tx";
-		clocks = <&rcc SAI2_K>;
-		clock-names = "sai_ck";
-		status = "okay";
-
-		sai2a_port: port {
-			sai2a_endpoint: endpoint {
-				remote-endpoint = <&cs42l51_tx_endpoint>;
-				format = "i2s";
-				mclk-fs = <256>;
-				dai-tdm-slot-num = <2>;
-				dai-tdm-slot-width = <32>;
-			};
-		};
-	};
-
-	sai2b: audio-controller@4400b024 {
-		dma-names = "rx";
-		st,sync = <&sai2a 2>;
-		clocks = <&rcc SAI2_K>, <&sai2a>;
-		clock-names = "sai_ck", "MCLK";
-		status = "okay";
-
-		sai2b_port: port {
-			sai2b_endpoint: endpoint {
-				remote-endpoint = <&cs42l51_rx_endpoint>;
-				format = "i2s";
-				mclk-fs = <256>;
-				dai-tdm-slot-num = <2>;
-				dai-tdm-slot-width = <32>;
-			};
-		};
-	};
-};
-
-&sdmmc1 {
-	pinctrl-names = "default", "opendrain", "sleep";
-	pinctrl-0 = <&sdmmc1_b4_pins_a>;
-	pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
-	pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
-	broken-cd;
-	st,neg-edge;
-	bus-width = <4>;
-	vmmc-supply = <&v3v3>;
-	status = "okay";
-};
-
-&uart4 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart4_pins_a>;
-	status = "okay";
-};
-
-&vrefbuf {
-	regulator-min-microvolt = <2500000>;
-	regulator-max-microvolt = <2500000>;
-	vdda-supply = <&vdd>;
-	status = "okay";
 };
diff --git a/arch/arm/boot/dts/stm32mp157c-dk2.dts b/arch/arm/boot/dts/stm32mp157c-dk2.dts
index d26adcb..7985b80 100644
--- a/arch/arm/boot/dts/stm32mp157c-dk2.dts
+++ b/arch/arm/boot/dts/stm32mp157c-dk2.dts
@@ -6,11 +6,24 @@
 
 /dts-v1/;
 
-#include "stm32mp157a-dk1.dts"
+#include "stm32mp157.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxac-pinctrl.dtsi"
+#include "stm32mp15xx-dkx.dtsi"
 
 / {
 	model = "STMicroelectronics STM32MP157C-DK2 Discovery Board";
 	compatible = "st,stm32mp157c-dk2", "st,stm32mp157";
+
+	aliases {
+		ethernet0 = &ethernet0;
+		serial0 = &uart4;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
 };
 
 &dsi {
diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts
index b8cc0fb..1fc4325 100644
--- a/arch/arm/boot/dts/stm32mp157c-ed1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts
@@ -5,8 +5,10 @@
  */
 /dts-v1/;
 
-#include "stm32mp157c.dtsi"
-#include "stm32mp157xaa-pinctrl.dtsi"
+#include "stm32mp157.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/mfd/st,stpmic1.h>
 
@@ -89,6 +91,22 @@ sd_switch: regulator-sd_switch {
 	};
 };
 
+&adc {
+	/* ANA0, ANA1 are dedicated pins and don't need pinctrl: only in6. */
+	pinctrl-0 = <&adc1_in6_pins_a>;
+	pinctrl-names = "default";
+	vdd-supply = <&vdd>;
+	vdda-supply = <&vdda>;
+	vref-supply = <&vdda>;
+	status = "disabled";
+	adc1: adc@0 {
+		st,adc-channels = <0 1 6>;
+		/* 16.5 ck_cycles sampling time */
+		st,min-sample-time-nsecs = <400>;
+		status = "okay";
+	};
+};
+
 &dac {
 	pinctrl-names = "default";
 	pinctrl-0 = <&dac_ch1_pins_a &dac_ch2_pins_a>;
@@ -305,6 +323,22 @@ &sdmmc1 {
 	status = "okay";
 };
 
+&sdmmc2 {
+	pinctrl-names = "default", "opendrain", "sleep";
+	pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_a>;
+	pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_a>;
+	pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_a>;
+	non-removable;
+	no-sd;
+	no-sdio;
+	st,neg-edge;
+	bus-width = <8>;
+	vmmc-supply = <&v3v3>;
+	vqmmc-supply = <&v3v3>;
+	mmc-ddr-3_3v;
+	status = "okay";
+};
+
 &timers6 {
 	status = "okay";
 	/* spare dmas for other usage */
diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts
index 3789312..228e35e 100644
--- a/arch/arm/boot/dts/stm32mp157c-ev1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts
@@ -283,6 +283,18 @@ flash1: mx66l51235l@1 {
 	};
 };
 
+&sdmmc3 {
+	pinctrl-names = "default", "opendrain", "sleep";
+	pinctrl-0 = <&sdmmc3_b4_pins_a>;
+	pinctrl-1 = <&sdmmc3_b4_od_pins_a>;
+	pinctrl-2 = <&sdmmc3_b4_sleep_pins_a>;
+	broken-cd;
+	st,neg-edge;
+	bus-width = <4>;
+	vmmc-supply = <&v3v3>;
+	status = "disabled";
+};
+
 &spi1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&spi1_pins_a>;
@@ -296,7 +308,8 @@ &timers2 {
 	status = "disabled";
 	pwm {
 		pinctrl-0 = <&pwm2_pins_a>;
-		pinctrl-names = "default";
+		pinctrl-1 = <&pwm2_sleep_pins_a>;
+		pinctrl-names = "default", "sleep";
 		status = "okay";
 	};
 	timer@1 {
@@ -310,7 +323,8 @@ &timers8 {
 	status = "disabled";
 	pwm {
 		pinctrl-0 = <&pwm8_pins_a>;
-		pinctrl-names = "default";
+		pinctrl-1 = <&pwm8_sleep_pins_a>;
+		pinctrl-names = "default", "sleep";
 		status = "okay";
 	};
 	timer@7 {
@@ -324,7 +338,8 @@ &timers12 {
 	status = "disabled";
 	pwm {
 		pinctrl-0 = <&pwm12_pins_a>;
-		pinctrl-names = "default";
+		pinctrl-1 = <&pwm12_sleep_pins_a>;
+		pinctrl-names = "default", "sleep";
 		status = "okay";
 	};
 	timer@11 {
@@ -340,6 +355,7 @@ &usbh_ehci {
 &usbotg_hs {
 	dr_mode = "peripheral";
 	phys = <&usbphyc_port1 0>;
+	phy-names = "usb2-phy";
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
deleted file mode 100644
index ed8b258..0000000
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ /dev/null
@@ -1,1535 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/*
- * Copyright (C) STMicroelectronics 2017 - All Rights Reserved
- * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
- */
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/clock/stm32mp1-clks.h>
-#include <dt-bindings/reset/stm32mp1-resets.h>
-
-/ {
-	#address-cells = <1>;
-	#size-cells = <1>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: cpu@0 {
-			compatible = "arm,cortex-a7";
-			device_type = "cpu";
-			reg = <0>;
-		};
-
-		cpu1: cpu@1 {
-			compatible = "arm,cortex-a7";
-			device_type = "cpu";
-			reg = <1>;
-		};
-	};
-
-	psci {
-		compatible = "arm,psci";
-		method = "smc";
-		cpu_off = <0x84000002>;
-		cpu_on = <0x84000003>;
-	};
-
-	intc: interrupt-controller@a0021000 {
-		compatible = "arm,cortex-a7-gic";
-		#interrupt-cells = <3>;
-		interrupt-controller;
-		reg = <0xa0021000 0x1000>,
-		      <0xa0022000 0x2000>;
-	};
-
-	timer {
-		compatible = "arm,armv7-timer";
-		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
-		interrupt-parent = <&intc>;
-	};
-
-	clocks {
-		clk_hse: clk-hse {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <24000000>;
-		};
-
-		clk_hsi: clk-hsi {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <64000000>;
-		};
-
-		clk_lse: clk-lse {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <32768>;
-		};
-
-		clk_lsi: clk-lsi {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <32000>;
-		};
-
-		clk_csi: clk-csi {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <4000000>;
-		};
-	};
-
-	thermal-zones {
-		cpu_thermal: cpu-thermal {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&dts>;
-
-			trips {
-				cpu_alert1: cpu-alert1 {
-					temperature = <85000>;
-					hysteresis = <0>;
-					type = "passive";
-				};
-
-				cpu-crit {
-					temperature = <120000>;
-					hysteresis = <0>;
-					type = "critical";
-				};
-			};
-
-			cooling-maps {
-			};
-		};
-	};
-
-	booster: regulator-booster {
-		compatible = "st,stm32mp1-booster";
-		st,syscfg = <&syscfg>;
-		status = "disabled";
-	};
-
-	soc {
-		compatible = "simple-bus";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		interrupt-parent = <&intc>;
-		ranges;
-
-		timers2: timer@40000000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40000000 0x400>;
-			clocks = <&rcc TIM2_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 18 0x400 0x1>,
-			       <&dmamux1 19 0x400 0x1>,
-			       <&dmamux1 20 0x400 0x1>,
-			       <&dmamux1 21 0x400 0x1>,
-			       <&dmamux1 22 0x400 0x1>;
-			dma-names = "ch1", "ch2", "ch3", "ch4", "up";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@1 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <1>;
-				status = "disabled";
-			};
-		};
-
-		timers3: timer@40001000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40001000 0x400>;
-			clocks = <&rcc TIM3_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 23 0x400 0x1>,
-			       <&dmamux1 24 0x400 0x1>,
-			       <&dmamux1 25 0x400 0x1>,
-			       <&dmamux1 26 0x400 0x1>,
-			       <&dmamux1 27 0x400 0x1>,
-			       <&dmamux1 28 0x400 0x1>;
-			dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@2 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <2>;
-				status = "disabled";
-			};
-		};
-
-		timers4: timer@40002000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40002000 0x400>;
-			clocks = <&rcc TIM4_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 29 0x400 0x1>,
-			       <&dmamux1 30 0x400 0x1>,
-			       <&dmamux1 31 0x400 0x1>,
-			       <&dmamux1 32 0x400 0x1>;
-			dma-names = "ch1", "ch2", "ch3", "ch4";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@3 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <3>;
-				status = "disabled";
-			};
-		};
-
-		timers5: timer@40003000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40003000 0x400>;
-			clocks = <&rcc TIM5_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 55 0x400 0x1>,
-			       <&dmamux1 56 0x400 0x1>,
-			       <&dmamux1 57 0x400 0x1>,
-			       <&dmamux1 58 0x400 0x1>,
-			       <&dmamux1 59 0x400 0x1>,
-			       <&dmamux1 60 0x400 0x1>;
-			dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@4 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <4>;
-				status = "disabled";
-			};
-		};
-
-		timers6: timer@40004000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40004000 0x400>;
-			clocks = <&rcc TIM6_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 69 0x400 0x1>;
-			dma-names = "up";
-			status = "disabled";
-
-			timer@5 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <5>;
-				status = "disabled";
-			};
-		};
-
-		timers7: timer@40005000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40005000 0x400>;
-			clocks = <&rcc TIM7_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 70 0x400 0x1>;
-			dma-names = "up";
-			status = "disabled";
-
-			timer@6 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <6>;
-				status = "disabled";
-			};
-		};
-
-		timers12: timer@40006000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40006000 0x400>;
-			clocks = <&rcc TIM12_K>;
-			clock-names = "int";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@11 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <11>;
-				status = "disabled";
-			};
-		};
-
-		timers13: timer@40007000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40007000 0x400>;
-			clocks = <&rcc TIM13_K>;
-			clock-names = "int";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@12 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <12>;
-				status = "disabled";
-			};
-		};
-
-		timers14: timer@40008000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x40008000 0x400>;
-			clocks = <&rcc TIM14_K>;
-			clock-names = "int";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@13 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <13>;
-				status = "disabled";
-			};
-		};
-
-		lptimer1: timer@40009000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-lptimer";
-			reg = <0x40009000 0x400>;
-			clocks = <&rcc LPTIM1_K>;
-			clock-names = "mux";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm-lp";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			trigger@0 {
-				compatible = "st,stm32-lptimer-trigger";
-				reg = <0>;
-				status = "disabled";
-			};
-
-			counter {
-				compatible = "st,stm32-lptimer-counter";
-				status = "disabled";
-			};
-		};
-
-		spi2: spi@4000b000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32h7-spi";
-			reg = <0x4000b000 0x400>;
-			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc SPI2_K>;
-			resets = <&rcc SPI2_R>;
-			dmas = <&dmamux1 39 0x400 0x05>,
-			       <&dmamux1 40 0x400 0x05>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		i2s2: audio-controller@4000b000 {
-			compatible = "st,stm32h7-i2s";
-			#sound-dai-cells = <0>;
-			reg = <0x4000b000 0x400>;
-			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&dmamux1 39 0x400 0x01>,
-			       <&dmamux1 40 0x400 0x01>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		spi3: spi@4000c000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32h7-spi";
-			reg = <0x4000c000 0x400>;
-			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc SPI3_K>;
-			resets = <&rcc SPI3_R>;
-			dmas = <&dmamux1 61 0x400 0x05>,
-			       <&dmamux1 62 0x400 0x05>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		i2s3: audio-controller@4000c000 {
-			compatible = "st,stm32h7-i2s";
-			#sound-dai-cells = <0>;
-			reg = <0x4000c000 0x400>;
-			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&dmamux1 61 0x400 0x01>,
-			       <&dmamux1 62 0x400 0x01>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		spdifrx: audio-controller@4000d000 {
-			compatible = "st,stm32h7-spdifrx";
-			#sound-dai-cells = <0>;
-			reg = <0x4000d000 0x400>;
-			clocks = <&rcc SPDIF_K>;
-			clock-names = "kclk";
-			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&dmamux1 93 0x400 0x01>,
-			       <&dmamux1 94 0x400 0x01>;
-			dma-names = "rx", "rx-ctrl";
-			status = "disabled";
-		};
-
-		usart2: serial@4000e000 {
-			compatible = "st,stm32h7-uart";
-			reg = <0x4000e000 0x400>;
-			interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc USART2_K>;
-			status = "disabled";
-		};
-
-		usart3: serial@4000f000 {
-			compatible = "st,stm32h7-uart";
-			reg = <0x4000f000 0x400>;
-			interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc USART3_K>;
-			status = "disabled";
-		};
-
-		uart4: serial@40010000 {
-			compatible = "st,stm32h7-uart";
-			reg = <0x40010000 0x400>;
-			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc UART4_K>;
-			status = "disabled";
-		};
-
-		uart5: serial@40011000 {
-			compatible = "st,stm32h7-uart";
-			reg = <0x40011000 0x400>;
-			interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc UART5_K>;
-			status = "disabled";
-		};
-
-		i2c1: i2c@40012000 {
-			compatible = "st,stm32f7-i2c";
-			reg = <0x40012000 0x400>;
-			interrupt-names = "event", "error";
-			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc I2C1_K>;
-			resets = <&rcc I2C1_R>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		i2c2: i2c@40013000 {
-			compatible = "st,stm32f7-i2c";
-			reg = <0x40013000 0x400>;
-			interrupt-names = "event", "error";
-			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc I2C2_K>;
-			resets = <&rcc I2C2_R>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		i2c3: i2c@40014000 {
-			compatible = "st,stm32f7-i2c";
-			reg = <0x40014000 0x400>;
-			interrupt-names = "event", "error";
-			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc I2C3_K>;
-			resets = <&rcc I2C3_R>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		i2c5: i2c@40015000 {
-			compatible = "st,stm32f7-i2c";
-			reg = <0x40015000 0x400>;
-			interrupt-names = "event", "error";
-			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc I2C5_K>;
-			resets = <&rcc I2C5_R>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		cec: cec@40016000 {
-			compatible = "st,stm32-cec";
-			reg = <0x40016000 0x400>;
-			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc CEC_K>, <&clk_lse>;
-			clock-names = "cec", "hdmi-cec";
-			status = "disabled";
-		};
-
-		dac: dac@40017000 {
-			compatible = "st,stm32h7-dac-core";
-			reg = <0x40017000 0x400>;
-			clocks = <&rcc DAC12>;
-			clock-names = "pclk";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-
-			dac1: dac@1 {
-				compatible = "st,stm32-dac";
-				#io-channels-cells = <1>;
-				reg = <1>;
-				status = "disabled";
-			};
-
-			dac2: dac@2 {
-				compatible = "st,stm32-dac";
-				#io-channels-cells = <1>;
-				reg = <2>;
-				status = "disabled";
-			};
-		};
-
-		uart7: serial@40018000 {
-			compatible = "st,stm32h7-uart";
-			reg = <0x40018000 0x400>;
-			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc UART7_K>;
-			status = "disabled";
-		};
-
-		uart8: serial@40019000 {
-			compatible = "st,stm32h7-uart";
-			reg = <0x40019000 0x400>;
-			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc UART8_K>;
-			status = "disabled";
-		};
-
-		timers1: timer@44000000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x44000000 0x400>;
-			clocks = <&rcc TIM1_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 11 0x400 0x1>,
-			       <&dmamux1 12 0x400 0x1>,
-			       <&dmamux1 13 0x400 0x1>,
-			       <&dmamux1 14 0x400 0x1>,
-			       <&dmamux1 15 0x400 0x1>,
-			       <&dmamux1 16 0x400 0x1>,
-			       <&dmamux1 17 0x400 0x1>;
-			dma-names = "ch1", "ch2", "ch3", "ch4",
-				    "up", "trig", "com";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@0 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <0>;
-				status = "disabled";
-			};
-		};
-
-		timers8: timer@44001000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x44001000 0x400>;
-			clocks = <&rcc TIM8_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 47 0x400 0x1>,
-			       <&dmamux1 48 0x400 0x1>,
-			       <&dmamux1 49 0x400 0x1>,
-			       <&dmamux1 50 0x400 0x1>,
-			       <&dmamux1 51 0x400 0x1>,
-			       <&dmamux1 52 0x400 0x1>,
-			       <&dmamux1 53 0x400 0x1>;
-			dma-names = "ch1", "ch2", "ch3", "ch4",
-				    "up", "trig", "com";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@7 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <7>;
-				status = "disabled";
-			};
-		};
-
-		usart6: serial@44003000 {
-			compatible = "st,stm32h7-uart";
-			reg = <0x44003000 0x400>;
-			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc USART6_K>;
-			status = "disabled";
-		};
-
-		spi1: spi@44004000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32h7-spi";
-			reg = <0x44004000 0x400>;
-			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc SPI1_K>;
-			resets = <&rcc SPI1_R>;
-			dmas = <&dmamux1 37 0x400 0x05>,
-			       <&dmamux1 38 0x400 0x05>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		i2s1: audio-controller@44004000 {
-			compatible = "st,stm32h7-i2s";
-			#sound-dai-cells = <0>;
-			reg = <0x44004000 0x400>;
-			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&dmamux1 37 0x400 0x01>,
-			       <&dmamux1 38 0x400 0x01>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		spi4: spi@44005000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32h7-spi";
-			reg = <0x44005000 0x400>;
-			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc SPI4_K>;
-			resets = <&rcc SPI4_R>;
-			dmas = <&dmamux1 83 0x400 0x05>,
-			       <&dmamux1 84 0x400 0x05>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		timers15: timer@44006000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x44006000 0x400>;
-			clocks = <&rcc TIM15_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 105 0x400 0x1>,
-			       <&dmamux1 106 0x400 0x1>,
-			       <&dmamux1 107 0x400 0x1>,
-			       <&dmamux1 108 0x400 0x1>;
-			dma-names = "ch1", "up", "trig", "com";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@14 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <14>;
-				status = "disabled";
-			};
-		};
-
-		timers16: timer@44007000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x44007000 0x400>;
-			clocks = <&rcc TIM16_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 109 0x400 0x1>,
-			       <&dmamux1 110 0x400 0x1>;
-			dma-names = "ch1", "up";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-			timer@15 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <15>;
-				status = "disabled";
-			};
-		};
-
-		timers17: timer@44008000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-timers";
-			reg = <0x44008000 0x400>;
-			clocks = <&rcc TIM17_K>;
-			clock-names = "int";
-			dmas = <&dmamux1 111 0x400 0x1>,
-			       <&dmamux1 112 0x400 0x1>;
-			dma-names = "ch1", "up";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			timer@16 {
-				compatible = "st,stm32h7-timer-trigger";
-				reg = <16>;
-				status = "disabled";
-			};
-		};
-
-		spi5: spi@44009000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32h7-spi";
-			reg = <0x44009000 0x400>;
-			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc SPI5_K>;
-			resets = <&rcc SPI5_R>;
-			dmas = <&dmamux1 85 0x400 0x05>,
-			       <&dmamux1 86 0x400 0x05>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		sai1: sai@4400a000 {
-			compatible = "st,stm32h7-sai";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges = <0 0x4400a000 0x400>;
-			reg = <0x4400a000 0x4>, <0x4400a3f0 0x10>;
-			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
-			resets = <&rcc SAI1_R>;
-			status = "disabled";
-
-			sai1a: audio-controller@4400a004 {
-				#sound-dai-cells = <0>;
-
-				compatible = "st,stm32-sai-sub-a";
-				reg = <0x4 0x1c>;
-				clocks = <&rcc SAI1_K>;
-				clock-names = "sai_ck";
-				dmas = <&dmamux1 87 0x400 0x01>;
-				status = "disabled";
-			};
-
-			sai1b: audio-controller@4400a024 {
-				#sound-dai-cells = <0>;
-				compatible = "st,stm32-sai-sub-b";
-				reg = <0x24 0x1c>;
-				clocks = <&rcc SAI1_K>;
-				clock-names = "sai_ck";
-				dmas = <&dmamux1 88 0x400 0x01>;
-				status = "disabled";
-			};
-		};
-
-		sai2: sai@4400b000 {
-			compatible = "st,stm32h7-sai";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges = <0 0x4400b000 0x400>;
-			reg = <0x4400b000 0x4>, <0x4400b3f0 0x10>;
-			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
-			resets = <&rcc SAI2_R>;
-			status = "disabled";
-
-			sai2a: audio-controller@4400b004 {
-				#sound-dai-cells = <0>;
-				compatible = "st,stm32-sai-sub-a";
-				reg = <0x4 0x1c>;
-				clocks = <&rcc SAI2_K>;
-				clock-names = "sai_ck";
-				dmas = <&dmamux1 89 0x400 0x01>;
-				status = "disabled";
-			};
-
-			sai2b: audio-controller@4400b024 {
-				#sound-dai-cells = <0>;
-				compatible = "st,stm32-sai-sub-b";
-				reg = <0x24 0x1c>;
-				clocks = <&rcc SAI2_K>;
-				clock-names = "sai_ck";
-				dmas = <&dmamux1 90 0x400 0x01>;
-				status = "disabled";
-			};
-		};
-
-		sai3: sai@4400c000 {
-			compatible = "st,stm32h7-sai";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges = <0 0x4400c000 0x400>;
-			reg = <0x4400c000 0x4>, <0x4400c3f0 0x10>;
-			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-			resets = <&rcc SAI3_R>;
-			status = "disabled";
-
-			sai3a: audio-controller@4400c004 {
-				#sound-dai-cells = <0>;
-				compatible = "st,stm32-sai-sub-a";
-				reg = <0x04 0x1c>;
-				clocks = <&rcc SAI3_K>;
-				clock-names = "sai_ck";
-				dmas = <&dmamux1 113 0x400 0x01>;
-				status = "disabled";
-			};
-
-			sai3b: audio-controller@4400c024 {
-				#sound-dai-cells = <0>;
-				compatible = "st,stm32-sai-sub-b";
-				reg = <0x24 0x1c>;
-				clocks = <&rcc SAI3_K>;
-				clock-names = "sai_ck";
-				dmas = <&dmamux1 114 0x400 0x01>;
-				status = "disabled";
-			};
-		};
-
-		dfsdm: dfsdm@4400d000 {
-			compatible = "st,stm32mp1-dfsdm";
-			reg = <0x4400d000 0x800>;
-			clocks = <&rcc DFSDM_K>;
-			clock-names = "dfsdm";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-
-			dfsdm0: filter@0 {
-				compatible = "st,stm32-dfsdm-adc";
-				#io-channel-cells = <1>;
-				reg = <0>;
-				interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
-				dmas = <&dmamux1 101 0x400 0x01>;
-				dma-names = "rx";
-				status = "disabled";
-			};
-
-			dfsdm1: filter@1 {
-				compatible = "st,stm32-dfsdm-adc";
-				#io-channel-cells = <1>;
-				reg = <1>;
-				interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
-				dmas = <&dmamux1 102 0x400 0x01>;
-				dma-names = "rx";
-				status = "disabled";
-			};
-
-			dfsdm2: filter@2 {
-				compatible = "st,stm32-dfsdm-adc";
-				#io-channel-cells = <1>;
-				reg = <2>;
-				interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
-				dmas = <&dmamux1 103 0x400 0x01>;
-				dma-names = "rx";
-				status = "disabled";
-			};
-
-			dfsdm3: filter@3 {
-				compatible = "st,stm32-dfsdm-adc";
-				#io-channel-cells = <1>;
-				reg = <3>;
-				interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-				dmas = <&dmamux1 104 0x400 0x01>;
-				dma-names = "rx";
-				status = "disabled";
-			};
-
-			dfsdm4: filter@4 {
-				compatible = "st,stm32-dfsdm-adc";
-				#io-channel-cells = <1>;
-				reg = <4>;
-				interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
-				dmas = <&dmamux1 91 0x400 0x01>;
-				dma-names = "rx";
-				status = "disabled";
-			};
-
-			dfsdm5: filter@5 {
-				compatible = "st,stm32-dfsdm-adc";
-				#io-channel-cells = <1>;
-				reg = <5>;
-				interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
-				dmas = <&dmamux1 92 0x400 0x01>;
-				dma-names = "rx";
-				status = "disabled";
-			};
-		};
-
-		m_can1: can@4400e000 {
-			compatible = "bosch,m_can";
-			reg = <0x4400e000 0x400>, <0x44011000 0x1400>;
-			reg-names = "m_can", "message_ram";
-			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "int0", "int1";
-			clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
-			clock-names = "hclk", "cclk";
-			bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
-			status = "disabled";
-		};
-
-		m_can2: can@4400f000 {
-			compatible = "bosch,m_can";
-			reg = <0x4400f000 0x400>, <0x44011000 0x2800>;
-			reg-names = "m_can", "message_ram";
-			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "int0", "int1";
-			clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
-			clock-names = "hclk", "cclk";
-			bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
-			status = "disabled";
-		};
-
-		dma1: dma@48000000 {
-			compatible = "st,stm32-dma";
-			reg = <0x48000000 0x400>;
-			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc DMA1>;
-			#dma-cells = <4>;
-			st,mem2mem;
-			dma-requests = <8>;
-		};
-
-		dma2: dma@48001000 {
-			compatible = "st,stm32-dma";
-			reg = <0x48001000 0x400>;
-			interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc DMA2>;
-			#dma-cells = <4>;
-			st,mem2mem;
-			dma-requests = <8>;
-		};
-
-		dmamux1: dma-router@48002000 {
-			compatible = "st,stm32h7-dmamux";
-			reg = <0x48002000 0x1c>;
-			#dma-cells = <3>;
-			dma-requests = <128>;
-			dma-masters = <&dma1 &dma2>;
-			dma-channels = <16>;
-			clocks = <&rcc DMAMUX>;
-		};
-
-		adc: adc@48003000 {
-			compatible = "st,stm32mp1-adc-core";
-			reg = <0x48003000 0x400>;
-			interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc ADC12>, <&rcc ADC12_K>;
-			clock-names = "bus", "adc";
-			interrupt-controller;
-			st,syscfg = <&syscfg>;
-			#interrupt-cells = <1>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-
-			adc1: adc@0 {
-				compatible = "st,stm32mp1-adc";
-				#io-channel-cells = <1>;
-				reg = <0x0>;
-				interrupt-parent = <&adc>;
-				interrupts = <0>;
-				dmas = <&dmamux1 9 0x400 0x01>;
-				dma-names = "rx";
-				status = "disabled";
-			};
-
-			adc2: adc@100 {
-				compatible = "st,stm32mp1-adc";
-				#io-channel-cells = <1>;
-				reg = <0x100>;
-				interrupt-parent = <&adc>;
-				interrupts = <1>;
-				dmas = <&dmamux1 10 0x400 0x01>;
-				dma-names = "rx";
-				status = "disabled";
-			};
-		};
-
-		usbotg_hs: usb-otg@49000000 {
-			compatible = "snps,dwc2";
-			reg = <0x49000000 0x10000>;
-			clocks = <&rcc USBO_K>;
-			clock-names = "otg";
-			resets = <&rcc USBO_R>;
-			reset-names = "dwc2";
-			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
-			g-rx-fifo-size = <256>;
-			g-np-tx-fifo-size = <32>;
-			g-tx-fifo-size = <128 128 64 64 64 64 32 32>;
-			dr_mode = "otg";
-			status = "disabled";
-		};
-
-		ipcc: mailbox@4c001000 {
-			compatible = "st,stm32mp1-ipcc";
-			#mbox-cells = <1>;
-			reg = <0x4c001000 0x400>;
-			st,proc-id = <0>;
-			interrupts-extended =
-				<&intc GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
-				<&intc GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
-				<&exti 61 1>;
-			interrupt-names = "rx", "tx", "wakeup";
-			clocks = <&rcc IPCC>;
-			wakeup-source;
-			status = "disabled";
-		};
-
-		dcmi: dcmi@4c006000 {
-			compatible = "st,stm32-dcmi";
-			reg = <0x4c006000 0x400>;
-			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
-			resets = <&rcc CAMITF_R>;
-			clocks = <&rcc DCMI>;
-			clock-names = "mclk";
-			dmas = <&dmamux1 75 0x400 0x0d>;
-			dma-names = "tx";
-			status = "disabled";
-		};
-
-		rcc: rcc@50000000 {
-			compatible = "st,stm32mp1-rcc", "syscon";
-			reg = <0x50000000 0x1000>;
-			#clock-cells = <1>;
-			#reset-cells = <1>;
-		};
-
-		pwr_regulators: pwr@50001000 {
-			compatible = "st,stm32mp1,pwr-reg";
-			reg = <0x50001000 0x10>;
-
-			reg11: reg11 {
-				regulator-name = "reg11";
-				regulator-min-microvolt = <1100000>;
-				regulator-max-microvolt = <1100000>;
-			};
-
-			reg18: reg18 {
-				regulator-name = "reg18";
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-			};
-
-			usb33: usb33 {
-				regulator-name = "usb33";
-				regulator-min-microvolt = <3300000>;
-				regulator-max-microvolt = <3300000>;
-			};
-		};
-
-		exti: interrupt-controller@5000d000 {
-			compatible = "st,stm32mp1-exti", "syscon";
-			interrupt-controller;
-			#interrupt-cells = <2>;
-			reg = <0x5000d000 0x400>;
-		};
-
-		syscfg: syscon@50020000 {
-			compatible = "st,stm32mp157-syscfg", "syscon";
-			reg = <0x50020000 0x400>;
-			clocks = <&rcc SYSCFG>;
-		};
-
-		lptimer2: timer@50021000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-lptimer";
-			reg = <0x50021000 0x400>;
-			clocks = <&rcc LPTIM2_K>;
-			clock-names = "mux";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm-lp";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			trigger@1 {
-				compatible = "st,stm32-lptimer-trigger";
-				reg = <1>;
-				status = "disabled";
-			};
-
-			counter {
-				compatible = "st,stm32-lptimer-counter";
-				status = "disabled";
-			};
-		};
-
-		lptimer3: timer@50022000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32-lptimer";
-			reg = <0x50022000 0x400>;
-			clocks = <&rcc LPTIM3_K>;
-			clock-names = "mux";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm-lp";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			trigger@2 {
-				compatible = "st,stm32-lptimer-trigger";
-				reg = <2>;
-				status = "disabled";
-			};
-		};
-
-		lptimer4: timer@50023000 {
-			compatible = "st,stm32-lptimer";
-			reg = <0x50023000 0x400>;
-			clocks = <&rcc LPTIM4_K>;
-			clock-names = "mux";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm-lp";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-		};
-
-		lptimer5: timer@50024000 {
-			compatible = "st,stm32-lptimer";
-			reg = <0x50024000 0x400>;
-			clocks = <&rcc LPTIM5_K>;
-			clock-names = "mux";
-			status = "disabled";
-
-			pwm {
-				compatible = "st,stm32-pwm-lp";
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-		};
-
-		vrefbuf: vrefbuf@50025000 {
-			compatible = "st,stm32-vrefbuf";
-			reg = <0x50025000 0x8>;
-			regulator-min-microvolt = <1500000>;
-			regulator-max-microvolt = <2500000>;
-			clocks = <&rcc VREF>;
-			status = "disabled";
-		};
-
-		sai4: sai@50027000 {
-			compatible = "st,stm32h7-sai";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges = <0 0x50027000 0x400>;
-			reg = <0x50027000 0x4>, <0x500273f0 0x10>;
-			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
-			resets = <&rcc SAI4_R>;
-			status = "disabled";
-
-			sai4a: audio-controller@50027004 {
-				#sound-dai-cells = <0>;
-				compatible = "st,stm32-sai-sub-a";
-				reg = <0x04 0x1c>;
-				clocks = <&rcc SAI4_K>;
-				clock-names = "sai_ck";
-				dmas = <&dmamux1 99 0x400 0x01>;
-				status = "disabled";
-			};
-
-			sai4b: audio-controller@50027024 {
-				#sound-dai-cells = <0>;
-				compatible = "st,stm32-sai-sub-b";
-				reg = <0x24 0x1c>;
-				clocks = <&rcc SAI4_K>;
-				clock-names = "sai_ck";
-				dmas = <&dmamux1 100 0x400 0x01>;
-				status = "disabled";
-			};
-		};
-
-		dts: thermal@50028000 {
-			compatible = "st,stm32-thermal";
-			reg = <0x50028000 0x100>;
-			interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc TMPSENS>;
-			clock-names = "pclk";
-			#thermal-sensor-cells = <0>;
-			status = "disabled";
-		};
-
-		cryp1: cryp@54001000 {
-			compatible = "st,stm32mp1-cryp";
-			reg = <0x54001000 0x400>;
-			interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc CRYP1>;
-			resets = <&rcc CRYP1_R>;
-			status = "disabled";
-		};
-
-		hash1: hash@54002000 {
-			compatible = "st,stm32f756-hash";
-			reg = <0x54002000 0x400>;
-			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc HASH1>;
-			resets = <&rcc HASH1_R>;
-			dmas = <&mdma1 31 0x10 0x1000A02 0x0 0x0>;
-			dma-names = "in";
-			dma-maxburst = <2>;
-			status = "disabled";
-		};
-
-		rng1: rng@54003000 {
-			compatible = "st,stm32-rng";
-			reg = <0x54003000 0x400>;
-			clocks = <&rcc RNG1_K>;
-			resets = <&rcc RNG1_R>;
-			status = "disabled";
-		};
-
-		mdma1: dma@58000000 {
-			compatible = "st,stm32h7-mdma";
-			reg = <0x58000000 0x1000>;
-			interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc MDMA>;
-			#dma-cells = <5>;
-			dma-channels = <32>;
-			dma-requests = <48>;
-		};
-
-		fmc: nand-controller@58002000 {
-			compatible = "st,stm32mp15-fmc2";
-			reg = <0x58002000 0x1000>,
-			      <0x80000000 0x1000>,
-			      <0x88010000 0x1000>,
-			      <0x88020000 0x1000>,
-			      <0x81000000 0x1000>,
-			      <0x89010000 0x1000>,
-			      <0x89020000 0x1000>;
-			interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&mdma1 20 0x10 0x12000a02 0x0 0x0>,
-			       <&mdma1 20 0x10 0x12000a08 0x0 0x0>,
-			       <&mdma1 21 0x10 0x12000a0a 0x0 0x0>;
-			dma-names = "tx", "rx", "ecc";
-			clocks = <&rcc FMC_K>;
-			resets = <&rcc FMC_R>;
-			status = "disabled";
-		};
-
-		qspi: spi@58003000 {
-			compatible = "st,stm32f469-qspi";
-			reg = <0x58003000 0x1000>, <0x70000000 0x10000000>;
-			reg-names = "qspi", "qspi_mm";
-			interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
-			dmas = <&mdma1 22 0x10 0x100002 0x0 0x0>,
-			       <&mdma1 22 0x10 0x100008 0x0 0x0>;
-			dma-names = "tx", "rx";
-			clocks = <&rcc QSPI_K>;
-			resets = <&rcc QSPI_R>;
-			status = "disabled";
-		};
-
-		sdmmc1: sdmmc@58005000 {
-			compatible = "arm,pl18x", "arm,primecell";
-			arm,primecell-periphid = <0x10153180>;
-			reg = <0x58005000 0x1000>;
-			interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "cmd_irq";
-			clocks = <&rcc SDMMC1_K>;
-			clock-names = "apb_pclk";
-			resets = <&rcc SDMMC1_R>;
-			cap-sd-highspeed;
-			cap-mmc-highspeed;
-			max-frequency = <120000000>;
-		};
-
-		crc1: crc@58009000 {
-			compatible = "st,stm32f7-crc";
-			reg = <0x58009000 0x400>;
-			clocks = <&rcc CRC1>;
-			status = "disabled";
-		};
-
-		stmmac_axi_config_0: stmmac-axi-config {
-			snps,wr_osr_lmt = <0x7>;
-			snps,rd_osr_lmt = <0x7>;
-			snps,blen = <0 0 0 0 16 8 4>;
-		};
-
-		ethernet0: ethernet@5800a000 {
-			compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a";
-			reg = <0x5800a000 0x2000>;
-			reg-names = "stmmaceth";
-			interrupts-extended = <&intc GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "macirq";
-			clock-names = "stmmaceth",
-				      "mac-clk-tx",
-				      "mac-clk-rx",
-				      "ethstp",
-				      "syscfg-clk";
-			clocks = <&rcc ETHMAC>,
-				 <&rcc ETHTX>,
-				 <&rcc ETHRX>,
-				 <&rcc ETHSTP>,
-				 <&rcc SYSCFG>;
-			st,syscon = <&syscfg 0x4>;
-			snps,mixed-burst;
-			snps,pbl = <2>;
-			snps,axi-config = <&stmmac_axi_config_0>;
-			snps,tso;
-			status = "disabled";
-		};
-
-		usbh_ohci: usbh-ohci@5800c000 {
-			compatible = "generic-ohci";
-			reg = <0x5800c000 0x1000>;
-			clocks = <&rcc USBH>;
-			resets = <&rcc USBH_R>;
-			interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
-			status = "disabled";
-		};
-
-		usbh_ehci: usbh-ehci@5800d000 {
-			compatible = "generic-ehci";
-			reg = <0x5800d000 0x1000>;
-			clocks = <&rcc USBH>;
-			resets = <&rcc USBH_R>;
-			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
-			companion = <&usbh_ohci>;
-			status = "disabled";
-		};
-
-		gpu: gpu@59000000 {
-			compatible = "vivante,gc";
-			reg = <0x59000000 0x800>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc GPU>, <&rcc GPU_K>;
-			clock-names = "bus" ,"core";
-			resets = <&rcc GPU_R>;
-			status = "disabled";
-		};
-
-		dsi: dsi@5a000000 {
-			compatible = "st,stm32-dsi";
-			reg = <0x5a000000 0x800>;
-			clocks = <&rcc DSI_K>, <&clk_hse>, <&rcc DSI_PX>;
-			clock-names = "pclk", "ref", "px_clk";
-			resets = <&rcc DSI_R>;
-			reset-names = "apb";
-			status = "disabled";
-		};
-
-		ltdc: display-controller@5a001000 {
-			compatible = "st,stm32-ltdc";
-			reg = <0x5a001000 0x400>;
-			interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc LTDC_PX>;
-			clock-names = "lcd";
-			resets = <&rcc LTDC_R>;
-			status = "disabled";
-		};
-
-		iwdg2: watchdog@5a002000 {
-			compatible = "st,stm32mp1-iwdg";
-			reg = <0x5a002000 0x400>;
-			clocks = <&rcc IWDG2>, <&rcc CK_LSI>;
-			clock-names = "pclk", "lsi";
-			status = "disabled";
-		};
-
-		usbphyc: usbphyc@5a006000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32mp1-usbphyc";
-			reg = <0x5a006000 0x1000>;
-			clocks = <&rcc USBPHY_K>;
-			resets = <&rcc USBPHY_R>;
-			status = "disabled";
-
-			usbphyc_port0: usb-phy@0 {
-				#phy-cells = <0>;
-				reg = <0>;
-			};
-
-			usbphyc_port1: usb-phy@1 {
-				#phy-cells = <1>;
-				reg = <1>;
-			};
-		};
-
-		usart1: serial@5c000000 {
-			compatible = "st,stm32h7-uart";
-			reg = <0x5c000000 0x400>;
-			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc USART1_K>;
-			status = "disabled";
-		};
-
-		spi6: spi@5c001000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "st,stm32h7-spi";
-			reg = <0x5c001000 0x400>;
-			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc SPI6_K>;
-			resets = <&rcc SPI6_R>;
-			dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
-			       <&mdma1 35 0x0 0x40002 0x0 0x0>;
-			dma-names = "rx", "tx";
-			status = "disabled";
-		};
-
-		i2c4: i2c@5c002000 {
-			compatible = "st,stm32f7-i2c";
-			reg = <0x5c002000 0x400>;
-			interrupt-names = "event", "error";
-			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc I2C4_K>;
-			resets = <&rcc I2C4_R>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		rtc: rtc@5c004000 {
-			compatible = "st,stm32mp1-rtc";
-			reg = <0x5c004000 0x400>;
-			clocks = <&rcc RTCAPB>, <&rcc RTC>;
-			clock-names = "pclk", "rtc_ck";
-			interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
-			status = "disabled";
-		};
-
-		bsec: nvmem@5c005000 {
-			compatible = "st,stm32mp15-bsec";
-			reg = <0x5c005000 0x400>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ts_cal1: calib@5c {
-				reg = <0x5c 0x2>;
-			};
-			ts_cal2: calib@5e {
-				reg = <0x5e 0x2>;
-			};
-		};
-
-		i2c6: i2c@5c009000 {
-			compatible = "st,stm32f7-i2c";
-			reg = <0x5c009000 0x400>;
-			interrupt-names = "event", "error";
-			interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&rcc I2C6_K>;
-			resets = <&rcc I2C6_R>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-	};
-
-	mlahb {
-		compatible = "simple-bus";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		dma-ranges = <0x00000000 0x38000000 0x10000>,
-			     <0x10000000 0x10000000 0x60000>,
-			     <0x30000000 0x30000000 0x60000>;
-
-		m4_rproc: m4@10000000 {
-			compatible = "st,stm32mp1-m4";
-			reg = <0x10000000 0x40000>,
-			      <0x30000000 0x40000>,
-			      <0x38000000 0x10000>;
-			resets = <&rcc MCU_R>;
-			st,syscfg-holdboot = <&rcc 0x10C 0x1>;
-			st,syscfg-tz = <&rcc 0x000 0x1>;
-			status = "disabled";
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stm32mp157xaa-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157xaa-pinctrl.dtsi
deleted file mode 100644
index 875adf5..0000000
--- a/arch/arm/boot/dts/stm32mp157xaa-pinctrl.dtsi
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/*
- * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
- * Author: Alexandre Torgue <alexandre.torgue@st.com>
- */
-
-#include "stm32mp157-pinctrl.dtsi"
-/ {
-	soc {
-		pinctrl: pin-controller@50002000 {
-			st,package = <STM32MP_PKG_AA>;
-
-			gpioa: gpio@50002000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 0 16>;
-			};
-
-			gpiob: gpio@50003000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 16 16>;
-			};
-
-			gpioc: gpio@50004000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 32 16>;
-			};
-
-			gpiod: gpio@50005000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 48 16>;
-			};
-
-			gpioe: gpio@50006000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 64 16>;
-			};
-
-			gpiof: gpio@50007000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 80 16>;
-			};
-
-			gpiog: gpio@50008000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 96 16>;
-			};
-
-			gpioh: gpio@50009000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 112 16>;
-			};
-
-			gpioi: gpio@5000a000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 128 16>;
-			};
-
-			gpioj: gpio@5000b000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 144 16>;
-			};
-
-			gpiok: gpio@5000c000 {
-				status = "okay";
-				ngpios = <8>;
-				gpio-ranges = <&pinctrl 0 160 8>;
-			};
-		};
-
-		pinctrl_z: pin-controller-z@54004000 {
-			st,package = <STM32MP_PKG_AA>;
-
-			gpioz: gpio@54004000 {
-				status = "okay";
-				ngpios = <8>;
-				gpio-ranges = <&pinctrl_z 0 400 8>;
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stm32mp157xab-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157xab-pinctrl.dtsi
deleted file mode 100644
index 961fa12..0000000
--- a/arch/arm/boot/dts/stm32mp157xab-pinctrl.dtsi
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/*
- * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
- * Author: Alexandre Torgue <alexandre.torgue@st.com>
- */
-
-#include "stm32mp157-pinctrl.dtsi"
-/ {
-	soc {
-		pinctrl: pin-controller@50002000 {
-			st,package = <STM32MP_PKG_AB>;
-
-			gpioa: gpio@50002000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 0 16>;
-			};
-
-			gpiob: gpio@50003000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 16 16>;
-			};
-
-			gpioc: gpio@50004000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 32 16>;
-			};
-
-			gpiod: gpio@50005000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 48 16>;
-			};
-
-			gpioe: gpio@50006000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 64 16>;
-			};
-
-			gpiof: gpio@50007000 {
-				status = "okay";
-				ngpios = <6>;
-				gpio-ranges = <&pinctrl 6 86 6>;
-			};
-
-			gpiog: gpio@50008000 {
-				status = "okay";
-				ngpios = <10>;
-				gpio-ranges = <&pinctrl 6 102 10>;
-			};
-
-			gpioh: gpio@50009000 {
-				status = "okay";
-				ngpios = <2>;
-				gpio-ranges = <&pinctrl 0 112 2>;
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stm32mp157xac-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157xac-pinctrl.dtsi
deleted file mode 100644
index 26600f1..0000000
--- a/arch/arm/boot/dts/stm32mp157xac-pinctrl.dtsi
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/*
- * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
- * Author: Alexandre Torgue <alexandre.torgue@st.com>
- */
-
-#include "stm32mp157-pinctrl.dtsi"
-/ {
-	soc {
-		pinctrl: pin-controller@50002000 {
-			st,package = <STM32MP_PKG_AC>;
-
-			gpioa: gpio@50002000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 0 16>;
-			};
-
-			gpiob: gpio@50003000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 16 16>;
-			};
-
-			gpioc: gpio@50004000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 32 16>;
-			};
-
-			gpiod: gpio@50005000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 48 16>;
-			};
-
-			gpioe: gpio@50006000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 64 16>;
-			};
-
-			gpiof: gpio@50007000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 80 16>;
-			};
-
-			gpiog: gpio@50008000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 96 16>;
-			};
-
-			gpioh: gpio@50009000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 112 16>;
-			};
-
-			gpioi: gpio@5000a000 {
-				status = "okay";
-				ngpios = <12>;
-				gpio-ranges = <&pinctrl 0 128 12>;
-			};
-		};
-
-		pinctrl_z: pin-controller-z@54004000 {
-			st,package = <STM32MP_PKG_AC>;
-
-			gpioz: gpio@54004000 {
-				status = "okay";
-				ngpios = <8>;
-				gpio-ranges = <&pinctrl_z 0 400 8>;
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stm32mp157xad-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157xad-pinctrl.dtsi
deleted file mode 100644
index 910113f..0000000
--- a/arch/arm/boot/dts/stm32mp157xad-pinctrl.dtsi
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/*
- * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
- * Author: Alexandre Torgue <alexandre.torgue@st.com>
- */
-
-#include "stm32mp157-pinctrl.dtsi"
-/ {
-	soc {
-		pinctrl: pin-controller@50002000 {
-			st,package = <STM32MP_PKG_AD>;
-
-			gpioa: gpio@50002000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 0 16>;
-			};
-
-			gpiob: gpio@50003000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 16 16>;
-			};
-
-			gpioc: gpio@50004000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 32 16>;
-			};
-
-			gpiod: gpio@50005000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 48 16>;
-			};
-
-			gpioe: gpio@50006000 {
-				status = "okay";
-				ngpios = <16>;
-				gpio-ranges = <&pinctrl 0 64 16>;
-			};
-
-			gpiof: gpio@50007000 {
-				status = "okay";
-				ngpios = <6>;
-				gpio-ranges = <&pinctrl 6 86 6>;
-			};
-
-			gpiog: gpio@50008000 {
-				status = "okay";
-				ngpios = <10>;
-				gpio-ranges = <&pinctrl 6 102 10>;
-			};
-
-			gpioh: gpio@50009000 {
-				status = "okay";
-				ngpios = <2>;
-				gpio-ranges = <&pinctrl 0 112 2>;
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stm32mp15xc.dtsi b/arch/arm/boot/dts/stm32mp15xc.dtsi
new file mode 100644
index 0000000..b06a55a
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xc.dtsi
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+/ {
+	soc {
+		cryp1: cryp@54001000 {
+			compatible = "st,stm32mp1-cryp";
+			reg = <0x54001000 0x400>;
+			interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&rcc CRYP1>;
+			resets = <&rcc CRYP1_R>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
new file mode 100644
index 0000000..f6672e8
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+
+/ {
+	memory@c0000000 {
+		device_type = "memory";
+		reg = <0xc0000000 0x20000000>;
+	};
+
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		mcuram2: mcuram2@10000000 {
+			compatible = "shared-dma-pool";
+			reg = <0x10000000 0x40000>;
+			no-map;
+		};
+
+		vdev0vring0: vdev0vring0@10040000 {
+			compatible = "shared-dma-pool";
+			reg = <0x10040000 0x1000>;
+			no-map;
+		};
+
+		vdev0vring1: vdev0vring1@10041000 {
+			compatible = "shared-dma-pool";
+			reg = <0x10041000 0x1000>;
+			no-map;
+		};
+
+		vdev0buffer: vdev0buffer@10042000 {
+			compatible = "shared-dma-pool";
+			reg = <0x10042000 0x4000>;
+			no-map;
+		};
+
+		mcuram: mcuram@30000000 {
+			compatible = "shared-dma-pool";
+			reg = <0x30000000 0x40000>;
+			no-map;
+		};
+
+		retram: retram@38000000 {
+			compatible = "shared-dma-pool";
+			reg = <0x38000000 0x10000>;
+			no-map;
+		};
+
+		gpu_reserved: gpu@d4000000 {
+			reg = <0xd4000000 0x4000000>;
+			no-map;
+		};
+	};
+
+	led {
+		compatible = "gpio-leds";
+		blue {
+			label = "heartbeat";
+			gpios = <&gpiod 11 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+			default-state = "off";
+		};
+	};
+
+	sound {
+		compatible = "audio-graph-card";
+		label = "STM32MP1-DK";
+		routing =
+			"Playback" , "MCLK",
+			"Capture" , "MCLK",
+			"MICL" , "Mic Bias";
+		dais = <&sai2a_port &sai2b_port &i2s2_port>;
+		status = "okay";
+	};
+};
+
+&adc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&adc12_ain_pins_a>, <&adc12_usb_cc_pins_a>;
+	vdd-supply = <&vdd>;
+	vdda-supply = <&vdd>;
+	vref-supply = <&vrefbuf>;
+	status = "disabled";
+	adc1: adc@0 {
+		/*
+		 * Type-C USB_PWR_CC1 & USB_PWR_CC2 on in18 & in19.
+		 * Use at least 5 * RC time, e.g. 5 * (Rp + Rd) * C:
+		 * 5 * (56 + 47kOhms) * 5pF => 2.5us.
+		 * Use arbitrary margin here (e.g. 5us).
+		 */
+		st,min-sample-time-nsecs = <5000>;
+		/* AIN connector, USB Type-C CC1 & CC2 */
+		st,adc-channels = <0 1 6 13 18 19>;
+		status = "okay";
+	};
+	adc2: adc@100 {
+		/* AIN connector, USB Type-C CC1 & CC2 */
+		st,adc-channels = <0 1 2 6 18 19>;
+		st,min-sample-time-nsecs = <5000>;
+		status = "okay";
+	};
+};
+
+&cec {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&cec_pins_b>;
+	pinctrl-1 = <&cec_pins_sleep_b>;
+	status = "okay";
+};
+
+&ethernet0 {
+	status = "okay";
+	pinctrl-0 = <&ethernet0_rgmii_pins_a>;
+	pinctrl-1 = <&ethernet0_rgmii_pins_sleep_a>;
+	pinctrl-names = "default", "sleep";
+	phy-mode = "rgmii-id";
+	max-speed = <1000>;
+	phy-handle = <&phy0>;
+
+	mdio0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "snps,dwmac-mdio";
+		phy0: ethernet-phy@0 {
+			reg = <0>;
+		};
+	};
+};
+
+&gpu {
+	contiguous-area = <&gpu_reserved>;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&i2c1_pins_a>;
+	pinctrl-1 = <&i2c1_pins_sleep_a>;
+	i2c-scl-rising-time-ns = <100>;
+	i2c-scl-falling-time-ns = <7>;
+	status = "okay";
+	/delete-property/dmas;
+	/delete-property/dma-names;
+
+	hdmi-transmitter@39 {
+		compatible = "sil,sii9022";
+		reg = <0x39>;
+		iovcc-supply = <&v3v3_hdmi>;
+		cvcc12-supply = <&v1v2_hdmi>;
+		reset-gpios = <&gpioa 10 GPIO_ACTIVE_LOW>;
+		interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+		interrupt-parent = <&gpiog>;
+		#sound-dai-cells = <0>;
+		status = "okay";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				sii9022_in: endpoint {
+					remote-endpoint = <&ltdc_ep0_out>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				sii9022_tx_endpoint: endpoint {
+					remote-endpoint = <&i2s2_endpoint>;
+				};
+			};
+		};
+	};
+
+	cs42l51: cs42l51@4a {
+		compatible = "cirrus,cs42l51";
+		reg = <0x4a>;
+		#sound-dai-cells = <0>;
+		VL-supply = <&v3v3>;
+		VD-supply = <&v1v8_audio>;
+		VA-supply = <&v1v8_audio>;
+		VAHP-supply = <&v1v8_audio>;
+		reset-gpios = <&gpiog 9 GPIO_ACTIVE_LOW>;
+		clocks = <&sai2a>;
+		clock-names = "MCLK";
+		status = "okay";
+
+		cs42l51_port: port {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			cs42l51_tx_endpoint: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&sai2a_endpoint>;
+				frame-master;
+				bitclock-master;
+			};
+
+			cs42l51_rx_endpoint: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&sai2b_endpoint>;
+				frame-master;
+				bitclock-master;
+			};
+		};
+	};
+};
+
+&i2c4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c4_pins_a>;
+	i2c-scl-rising-time-ns = <185>;
+	i2c-scl-falling-time-ns = <20>;
+	status = "okay";
+	/* spare dmas for other usage */
+	/delete-property/dmas;
+	/delete-property/dma-names;
+
+	pmic: stpmic@33 {
+		compatible = "st,stpmic1";
+		reg = <0x33>;
+		interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		status = "okay";
+
+		regulators {
+			compatible = "st,stpmic1-regulators";
+			ldo1-supply = <&v3v3>;
+			ldo3-supply = <&vdd_ddr>;
+			ldo6-supply = <&v3v3>;
+			pwr_sw1-supply = <&bst_out>;
+			pwr_sw2-supply = <&bst_out>;
+
+			vddcore: buck1 {
+				regulator-name = "vddcore";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-always-on;
+				regulator-initial-mode = <0>;
+				regulator-over-current-protection;
+			};
+
+			vdd_ddr: buck2 {
+				regulator-name = "vdd_ddr";
+				regulator-min-microvolt = <1350000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-always-on;
+				regulator-initial-mode = <0>;
+				regulator-over-current-protection;
+			};
+
+			vdd: buck3 {
+				regulator-name = "vdd";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				st,mask-reset;
+				regulator-initial-mode = <0>;
+				regulator-over-current-protection;
+			};
+
+			v3v3: buck4 {
+				regulator-name = "v3v3";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				regulator-over-current-protection;
+				regulator-initial-mode = <0>;
+			};
+
+			v1v8_audio: ldo1 {
+				regulator-name = "v1v8_audio";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				interrupts = <IT_CURLIM_LDO1 0>;
+			};
+
+			v3v3_hdmi: ldo2 {
+				regulator-name = "v3v3_hdmi";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				interrupts = <IT_CURLIM_LDO2 0>;
+			};
+
+			vtt_ddr: ldo3 {
+				regulator-name = "vtt_ddr";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <750000>;
+				regulator-always-on;
+				regulator-over-current-protection;
+			};
+
+			vdd_usb: ldo4 {
+				regulator-name = "vdd_usb";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				interrupts = <IT_CURLIM_LDO4 0>;
+			};
+
+			vdda: ldo5 {
+				regulator-name = "vdda";
+				regulator-min-microvolt = <2900000>;
+				regulator-max-microvolt = <2900000>;
+				interrupts = <IT_CURLIM_LDO5 0>;
+				regulator-boot-on;
+			};
+
+			v1v2_hdmi: ldo6 {
+				regulator-name = "v1v2_hdmi";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+				interrupts = <IT_CURLIM_LDO6 0>;
+			};
+
+			vref_ddr: vref_ddr {
+				regulator-name = "vref_ddr";
+				regulator-always-on;
+				regulator-over-current-protection;
+			};
+
+			 bst_out: boost {
+				regulator-name = "bst_out";
+				interrupts = <IT_OCP_BOOST 0>;
+			 };
+
+			vbus_otg: pwr_sw1 {
+				regulator-name = "vbus_otg";
+				interrupts = <IT_OCP_OTG 0>;
+			 };
+
+			 vbus_sw: pwr_sw2 {
+				regulator-name = "vbus_sw";
+				interrupts = <IT_OCP_SWOUT 0>;
+				regulator-active-discharge = <1>;
+			 };
+		};
+
+		onkey {
+			compatible = "st,stpmic1-onkey";
+			interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 0>;
+			interrupt-names = "onkey-falling", "onkey-rising";
+			power-off-time-sec = <10>;
+			status = "okay";
+		};
+
+		watchdog {
+			compatible = "st,stpmic1-wdt";
+			status = "disabled";
+		};
+	};
+};
+
+&i2s2 {
+	clocks = <&rcc SPI2>, <&rcc SPI2_K>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
+	clock-names = "pclk", "i2sclk", "x8k", "x11k";
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&i2s2_pins_a>;
+	pinctrl-1 = <&i2s2_pins_sleep_a>;
+	status = "okay";
+
+	i2s2_port: port {
+		i2s2_endpoint: endpoint {
+			remote-endpoint = <&sii9022_tx_endpoint>;
+			format = "i2s";
+			mclk-fs = <256>;
+		};
+	};
+};
+
+&ipcc {
+	status = "okay";
+};
+
+&iwdg2 {
+	timeout-sec = <32>;
+	status = "okay";
+};
+
+&ltdc {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&ltdc_pins_a>;
+	pinctrl-1 = <&ltdc_pins_sleep_a>;
+	status = "okay";
+
+	port {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ltdc_ep0_out: endpoint@0 {
+			reg = <0>;
+			remote-endpoint = <&sii9022_in>;
+		};
+	};
+};
+
+&m4_rproc {
+	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+			<&vdev0vring1>, <&vdev0buffer>;
+	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+	mbox-names = "vq0", "vq1", "shutdown";
+	interrupt-parent = <&exti>;
+	interrupts = <68 1>;
+	status = "okay";
+};
+
+&pwr_regulators {
+	vdd-supply = <&vdd>;
+	vdd_3v3_usbfs-supply = <&vdd_usb>;
+};
+
+&rng1 {
+	status = "okay";
+};
+
+&rtc {
+	status = "okay";
+};
+
+&sai2 {
+	clocks = <&rcc SAI2>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
+	clock-names = "pclk", "x8k", "x11k";
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&sai2a_pins_a>, <&sai2b_pins_b>;
+	pinctrl-1 = <&sai2a_sleep_pins_a>, <&sai2b_sleep_pins_b>;
+	status = "okay";
+
+	sai2a: audio-controller@4400b004 {
+		#clock-cells = <0>;
+		dma-names = "tx";
+		clocks = <&rcc SAI2_K>;
+		clock-names = "sai_ck";
+		status = "okay";
+
+		sai2a_port: port {
+			sai2a_endpoint: endpoint {
+				remote-endpoint = <&cs42l51_tx_endpoint>;
+				format = "i2s";
+				mclk-fs = <256>;
+				dai-tdm-slot-num = <2>;
+				dai-tdm-slot-width = <32>;
+			};
+		};
+	};
+
+	sai2b: audio-controller@4400b024 {
+		dma-names = "rx";
+		st,sync = <&sai2a 2>;
+		clocks = <&rcc SAI2_K>, <&sai2a>;
+		clock-names = "sai_ck", "MCLK";
+		status = "okay";
+
+		sai2b_port: port {
+			sai2b_endpoint: endpoint {
+				remote-endpoint = <&cs42l51_rx_endpoint>;
+				format = "i2s";
+				mclk-fs = <256>;
+				dai-tdm-slot-num = <2>;
+				dai-tdm-slot-width = <32>;
+			};
+		};
+	};
+};
+
+&sdmmc1 {
+	pinctrl-names = "default", "opendrain", "sleep";
+	pinctrl-0 = <&sdmmc1_b4_pins_a>;
+	pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+	pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+	broken-cd;
+	st,neg-edge;
+	bus-width = <4>;
+	vmmc-supply = <&v3v3>;
+	status = "okay";
+};
+
+&sdmmc3 {
+	pinctrl-names = "default", "opendrain", "sleep";
+	pinctrl-0 = <&sdmmc3_b4_pins_a>;
+	pinctrl-1 = <&sdmmc3_b4_od_pins_a>;
+	pinctrl-2 = <&sdmmc3_b4_sleep_pins_a>;
+	broken-cd;
+	st,neg-edge;
+	bus-width = <4>;
+	vmmc-supply = <&v3v3>;
+	status = "disabled";
+};
+
+&timers1 {
+	/* spare dmas for other usage */
+	/delete-property/dmas;
+	/delete-property/dma-names;
+	status = "disabled";
+	pwm {
+		pinctrl-0 = <&pwm1_pins_a>;
+		pinctrl-1 = <&pwm1_sleep_pins_a>;
+		pinctrl-names = "default", "sleep";
+		status = "okay";
+	};
+	timer@0 {
+		status = "okay";
+	};
+};
+
+&timers3 {
+	/delete-property/dmas;
+	/delete-property/dma-names;
+	status = "disabled";
+	pwm {
+		pinctrl-0 = <&pwm3_pins_a>;
+		pinctrl-1 = <&pwm3_sleep_pins_a>;
+		pinctrl-names = "default", "sleep";
+		status = "okay";
+	};
+	timer@2 {
+		status = "okay";
+	};
+};
+
+&timers4 {
+	/delete-property/dmas;
+	/delete-property/dma-names;
+	status = "disabled";
+	pwm {
+		pinctrl-0 = <&pwm4_pins_a &pwm4_pins_b>;
+		pinctrl-1 = <&pwm4_sleep_pins_a &pwm4_sleep_pins_b>;
+		pinctrl-names = "default", "sleep";
+		status = "okay";
+	};
+	timer@3 {
+		status = "okay";
+	};
+};
+
+&timers5 {
+	/delete-property/dmas;
+	/delete-property/dma-names;
+	status = "disabled";
+	pwm {
+		pinctrl-0 = <&pwm5_pins_a>;
+		pinctrl-1 = <&pwm5_sleep_pins_a>;
+		pinctrl-names = "default", "sleep";
+		status = "okay";
+	};
+	timer@4 {
+		status = "okay";
+	};
+};
+
+&timers6 {
+	/delete-property/dmas;
+	/delete-property/dma-names;
+	status = "disabled";
+	timer@5 {
+		status = "okay";
+	};
+};
+
+&timers12 {
+	/delete-property/dmas;
+	/delete-property/dma-names;
+	status = "disabled";
+	pwm {
+		pinctrl-0 = <&pwm12_pins_a>;
+		pinctrl-1 = <&pwm12_sleep_pins_a>;
+		pinctrl-names = "default", "sleep";
+		status = "okay";
+	};
+	timer@11 {
+		status = "okay";
+	};
+};
+
+&uart4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart4_pins_a>;
+	status = "okay";
+};
+
+&usbh_ehci {
+	phys = <&usbphyc_port0>;
+	status = "okay";
+};
+
+&usbotg_hs {
+	dr_mode = "peripheral";
+	phys = <&usbphyc_port1 0>;
+	phy-names = "usb2-phy";
+	status = "okay";
+};
+
+&usbphyc {
+	status = "okay";
+};
+
+&usbphyc_port0 {
+	phy-supply = <&vdd_usb>;
+	vdda1v1-supply = <&reg11>;
+	vdda1v8-supply = <&reg18>;
+};
+
+&usbphyc_port1 {
+	phy-supply = <&vdd_usb>;
+	vdda1v1-supply = <&reg11>;
+	vdda1v8-supply = <&reg18>;
+};
+
+&vrefbuf {
+	regulator-min-microvolt = <2500000>;
+	regulator-max-microvolt = <2500000>;
+	vdda-supply = <&vdd>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp15xxaa-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15xxaa-pinctrl.dtsi
new file mode 100644
index 0000000..04f7a43
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xxaa-pinctrl.dtsi
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+&pinctrl {
+	st,package = <STM32MP_PKG_AA>;
+
+	gpioa: gpio@50002000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 0 16>;
+	};
+
+	gpiob: gpio@50003000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 16 16>;
+	};
+
+	gpioc: gpio@50004000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 32 16>;
+	};
+
+	gpiod: gpio@50005000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 48 16>;
+	};
+
+	gpioe: gpio@50006000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 64 16>;
+	};
+
+	gpiof: gpio@50007000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 80 16>;
+	};
+
+	gpiog: gpio@50008000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 96 16>;
+	};
+
+	gpioh: gpio@50009000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 112 16>;
+	};
+
+	gpioi: gpio@5000a000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 128 16>;
+	};
+
+	gpioj: gpio@5000b000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 144 16>;
+	};
+
+	gpiok: gpio@5000c000 {
+		status = "okay";
+		ngpios = <8>;
+		gpio-ranges = <&pinctrl 0 160 8>;
+	};
+};
+
+&pinctrl_z {
+	st,package = <STM32MP_PKG_AA>;
+
+	gpioz: gpio@54004000 {
+		status = "okay";
+		ngpios = <8>;
+		gpio-ranges = <&pinctrl_z 0 400 8>;
+	};
+};
diff --git a/arch/arm/boot/dts/stm32mp15xxab-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15xxab-pinctrl.dtsi
new file mode 100644
index 0000000..328dad1
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xxab-pinctrl.dtsi
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+&pinctrl {
+	st,package = <STM32MP_PKG_AB>;
+
+	gpioa: gpio@50002000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 0 16>;
+	};
+
+	gpiob: gpio@50003000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 16 16>;
+	};
+
+	gpioc: gpio@50004000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 32 16>;
+	};
+
+	gpiod: gpio@50005000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 48 16>;
+	};
+
+	gpioe: gpio@50006000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 64 16>;
+	};
+
+	gpiof: gpio@50007000 {
+		status = "okay";
+		ngpios = <6>;
+		gpio-ranges = <&pinctrl 6 86 6>;
+	};
+
+	gpiog: gpio@50008000 {
+		status = "okay";
+		ngpios = <10>;
+		gpio-ranges = <&pinctrl 6 102 10>;
+	};
+
+	gpioh: gpio@50009000 {
+		status = "okay";
+		ngpios = <2>;
+		gpio-ranges = <&pinctrl 0 112 2>;
+	};
+};
diff --git a/arch/arm/boot/dts/stm32mp15xxac-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15xxac-pinctrl.dtsi
new file mode 100644
index 0000000..7eaa245
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xxac-pinctrl.dtsi
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+&pinctrl {
+	st,package = <STM32MP_PKG_AC>;
+
+	gpioa: gpio@50002000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 0 16>;
+	};
+
+	gpiob: gpio@50003000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 16 16>;
+	};
+
+	gpioc: gpio@50004000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 32 16>;
+	};
+
+	gpiod: gpio@50005000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 48 16>;
+	};
+
+	gpioe: gpio@50006000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 64 16>;
+	};
+
+	gpiof: gpio@50007000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 80 16>;
+	};
+
+	gpiog: gpio@50008000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 96 16>;
+	};
+
+	gpioh: gpio@50009000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 112 16>;
+	};
+
+	gpioi: gpio@5000a000 {
+		status = "okay";
+		ngpios = <12>;
+		gpio-ranges = <&pinctrl 0 128 12>;
+	};
+};
+
+&pinctrl_z {
+	st,package = <STM32MP_PKG_AC>;
+
+	gpioz: gpio@54004000 {
+		status = "okay";
+		ngpios = <8>;
+		gpio-ranges = <&pinctrl_z 0 400 8>;
+	};
+};
diff --git a/arch/arm/boot/dts/stm32mp15xxad-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15xxad-pinctrl.dtsi
new file mode 100644
index 0000000..b63e207
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xxad-pinctrl.dtsi
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+&pinctrl {
+	st,package = <STM32MP_PKG_AD>;
+
+	gpioa: gpio@50002000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 0 16>;
+	};
+
+	gpiob: gpio@50003000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 16 16>;
+	};
+
+	gpioc: gpio@50004000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 32 16>;
+	};
+
+	gpiod: gpio@50005000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 48 16>;
+	};
+
+	gpioe: gpio@50006000 {
+		status = "okay";
+		ngpios = <16>;
+		gpio-ranges = <&pinctrl 0 64 16>;
+	};
+
+	gpiof: gpio@50007000 {
+		status = "okay";
+		ngpios = <6>;
+		gpio-ranges = <&pinctrl 6 86 6>;
+	};
+
+	gpiog: gpio@50008000 {
+		status = "okay";
+		ngpios = <10>;
+		gpio-ranges = <&pinctrl 6 102 10>;
+	};
+
+	gpioh: gpio@50009000 {
+		status = "okay";
+		ngpios = <2>;
+		gpio-ranges = <&pinctrl 0 112 2>;
+	};
+};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 4c268b7..bf531ef 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -624,6 +624,16 @@ ohci1: usb@1c1c400 {
 			status = "disabled";
 		};
 
+		csi1: csi@1c1d000 {
+			compatible = "allwinner,sun4i-a10-csi1";
+			reg = <0x01c1d000 0x1000>;
+			interrupts = <43>;
+			clocks = <&ccu CLK_AHB_CSI1>, <&ccu CLK_DRAM_CSI1>;
+			clock-names = "bus", "ram";
+			resets = <&ccu RST_CSI1>;
+			status = "disabled";
+		};
+
 		spi3: spi@1c1f000 {
 			compatible = "allwinner,sun4i-a10-spi";
 			reg = <0x01c1f000 0x1000>;
@@ -670,6 +680,31 @@ can0_ph_pins: can0-ph-pins {
 				function = "can";
 			};
 
+			/omit-if-no-ref/
+			csi1_8bits_pg_pins: csi1-8bits-pg-pins {
+				pins = "PG0", "PG2", "PG3", "PG4", "PG5",
+				       "PG6", "PG7", "PG8", "PG9", "PG10",
+				       "PG11";
+				function = "csi1";
+			};
+
+			/omit-if-no-ref/
+			csi1_24bits_ph_pins: csi1-24bits-ph-pins {
+				pins = "PH0", "PH1", "PH2", "PH3", "PH4",
+				       "PH5", "PH6", "PH7", "PH8", "PH9",
+				       "PH10", "PH11", "PH12", "PH13", "PH14",
+				       "PH15", "PH16", "PH17", "PH18", "PH19",
+				       "PH20", "PH21", "PH22", "PH23", "PH24",
+				       "PH25", "PH26", "PH27";
+				function = "csi1";
+			};
+
+			/omit-if-no-ref/
+			csi1_clk_pg_pin: csi1-clk-pg-pin {
+				pins = "PG1";
+				function = "csi1";
+			};
+
 			emac_pins: emac0-pins {
 				pins = "PA0", "PA1", "PA2",
 				       "PA3", "PA4", "PA5", "PA6",
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index 6befa23..0b526e6e 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -185,7 +185,7 @@ ve_sram: sram-section@0 {
 		mbus: dram-controller@1c01000 {
 			compatible = "allwinner,sun5i-a13-mbus";
 			reg = <0x01c01000 0x1000>;
-			clocks = <&ccu 99>;
+			clocks = <&ccu CLK_MBUS>;
 			dma-ranges = <0x00000000 0x40000000 0x20000000>;
 			#interconnect-cells = <1>;
 		};
@@ -275,6 +275,7 @@ tcon0: lcd-controller@1c0c000 {
 			compatible = "allwinner,sun5i-a13-tcon";
 			reg = <0x01c0c000 0x1000>;
 			interrupts = <44>;
+			dmas = <&dma SUN4I_DMA_DEDICATED 14>;
 			resets = <&ccu RST_LCD>;
 			reset-names = "lcd";
 			clocks = <&ccu CLK_AHB_LCD>,
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 2cf34ae..7762fbd 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -285,14 +285,19 @@ tcon0: lcd-controller@1c0c000 {
 			compatible = "allwinner,sun6i-a31-tcon";
 			reg = <0x01c0c000 0x1000>;
 			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
-			resets = <&ccu RST_AHB1_LCD0>;
-			reset-names = "lcd";
+			dmas = <&dma 11>;
+			resets = <&ccu RST_AHB1_LCD0>,
+				 <&ccu RST_AHB1_LVDS>;
+			reset-names = "lcd",
+				      "lvds";
 			clocks = <&ccu CLK_AHB1_LCD0>,
 				 <&ccu CLK_LCD0_CH0>,
-				 <&ccu CLK_LCD0_CH1>;
+				 <&ccu CLK_LCD0_CH1>,
+				 <&ccu 15>;
 			clock-names = "ahb",
 				      "tcon-ch0",
-				      "tcon-ch1";
+				      "tcon-ch1",
+				      "lvds-alt";
 			clock-output-names = "tcon0-pixel-clock";
 			#clock-cells = <0>;
 
@@ -334,14 +339,18 @@ tcon1: lcd-controller@1c0d000 {
 			compatible = "allwinner,sun6i-a31-tcon";
 			reg = <0x01c0d000 0x1000>;
 			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
-			resets = <&ccu RST_AHB1_LCD1>;
-			reset-names = "lcd";
+			dmas = <&dma 12>;
+			resets = <&ccu RST_AHB1_LCD1>,
+				 <&ccu RST_AHB1_LVDS>;
+			reset-names = "lcd", "lvds";
 			clocks = <&ccu CLK_AHB1_LCD1>,
 				 <&ccu CLK_LCD1_CH0>,
-				 <&ccu CLK_LCD1_CH1>;
+				 <&ccu CLK_LCD1_CH1>,
+				 <&ccu 15>;
 			clock-names = "ahb",
 				      "tcon-ch0",
-				      "tcon-ch1";
+				      "tcon-ch1",
+				      "lvds-alt";
 			clock-output-names = "tcon1-pixel-clock";
 			#clock-cells = <0>;
 
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 8aebefd..92b5be9 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -729,6 +729,17 @@ ohci1: usb@1c1c400 {
 			status = "disabled";
 		};
 
+		csi1: csi@1c1d000 {
+			compatible = "allwinner,sun7i-a20-csi1",
+				     "allwinner,sun4i-a10-csi1";
+			reg = <0x01c1d000 0x1000>;
+			interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_AHB_CSI1>, <&ccu CLK_DRAM_CSI1>;
+			clock-names = "bus", "ram";
+			resets = <&ccu RST_CSI1>;
+			status = "disabled";
+		};
+
 		spi3: spi@1c1f000 {
 			compatible = "allwinner,sun4i-a10-spi";
 			reg = <0x01c1f000 0x1000>;
@@ -803,6 +814,31 @@ csi0_clk_pin: csi-clk-pin {
 			};
 
 			/omit-if-no-ref/
+			csi1_8bits_pg_pins: csi1-8bits-pg-pins {
+				pins = "PG0", "PG2", "PG3", "PG4", "PG5",
+				       "PG6", "PG7", "PG8", "PG9", "PG10",
+				       "PG11";
+				function = "csi1";
+			};
+
+			/omit-if-no-ref/
+			csi1_24bits_ph_pins: csi1-24bits-ph-pins {
+				pins = "PH0", "PH1", "PH2", "PH3", "PH4",
+				       "PH5", "PH6", "PH7", "PH8", "PH9",
+				       "PH10", "PH11", "PH12", "PH13", "PH14",
+				       "PH15", "PH16", "PH17", "PH18", "PH19",
+				       "PH20", "PH21", "PH22", "PH23", "PH24",
+				       "PH25", "PH26", "PH27";
+				function = "csi1";
+			};
+
+			/omit-if-no-ref/
+			csi1_clk_pg_pin: csi1-clk-pg-pin {
+				pins = "PG1";
+				function = "csi1";
+			};
+
+			/omit-if-no-ref/
 			emac_pa_pins: emac-pa-pins {
 				pins = "PA0", "PA1", "PA2",
 				       "PA3", "PA4", "PA5", "PA6",
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index f292f96..48487f6 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -182,14 +182,19 @@ tcon0: lcd-controller@1c0c000 {
 			/* compatible gets set in SoC specific dtsi file */
 			reg = <0x01c0c000 0x1000>;
 			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&dma 12>;
 			clocks = <&ccu CLK_BUS_LCD>,
-				 <&ccu CLK_LCD_CH0>;
+				 <&ccu CLK_LCD_CH0>,
+				 <&ccu 13>;
 			clock-names = "ahb",
-				      "tcon-ch0";
+				      "tcon-ch0",
+				      "lvds-alt";
 			clock-output-names = "tcon-pixel-clock";
 			#clock-cells = <0>;
-			resets = <&ccu RST_BUS_LCD>;
-			reset-names = "lcd";
+			resets = <&ccu RST_BUS_LCD>,
+				 <&ccu RST_BUS_LVDS>;
+			reset-names = "lcd",
+				      "lvds";
 			status = "disabled";
 
 			ports {
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index 53c38de..74ac7ee 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -50,6 +50,7 @@
 #include <dt-bindings/reset/sun8i-a83t-ccu.h>
 #include <dt-bindings/reset/sun8i-de2.h>
 #include <dt-bindings/reset/sun8i-r-ccu.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	interrupt-parent = <&gic>;
@@ -581,6 +582,12 @@ mmc2: mmc@1c11000 {
 		sid: eeprom@1c14000 {
 			compatible = "allwinner,sun8i-a83t-sid";
 			reg = <0x1c14000 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			ths_calibration: thermal-sensor-calibration@34 {
+				reg = <0x34 8>;
+			};
 		};
 
 		crypto: crypto@1c15000 {
@@ -999,9 +1006,9 @@ emac: ethernet@1c30000 {
 			reg = <0x01c30000 0x104>;
 			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "macirq";
-			resets = <&ccu 13>;
+			resets = <&ccu CLK_BUS_EMAC>;
 			reset-names = "stmmaceth";
-			clocks = <&ccu 27>;
+			clocks = <&ccu RST_BUS_EMAC>;
 			clock-names = "stmmaceth";
 			status = "disabled";
 
@@ -1095,7 +1102,7 @@ r_ccu: clock@1f01400 {
 			compatible = "allwinner,sun8i-a83t-r-ccu";
 			reg = <0x01f01400 0x400>;
 			clocks = <&osc24M>, <&osc16Md512>, <&osc16M>,
-				 <&ccu 6>;
+				 <&ccu CLK_PLL_PERIPH>;
 			clock-names = "hosc", "losc", "iosc", "pll-periph";
 			#clock-cells = <1>;
 			#reset-cells = <1>;
@@ -1165,5 +1172,34 @@ r_rsb: rsb@1f03400 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 		};
+
+		ths: thermal-sensor@1f04000 {
+			compatible = "allwinner,sun8i-a83t-ths";
+			reg = <0x01f04000 0x100>;
+			interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+			nvmem-cells = <&ths_calibration>;
+			nvmem-cell-names = "calibration";
+			#thermal-sensor-cells = <1>;
+		};
+	};
+
+	thermal-zones {
+		cpu0_thermal: cpu0-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 0>;
+		};
+
+		cpu1_thermal: cpu1-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 1>;
+		};
+
+		gpu_thermal: gpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 2>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index ac9e26b..45a2444 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -143,6 +143,7 @@ hdmi_out_con: endpoint {
 };
 
 &ir {
+	linux,rc-map-name = "rc-tanix-tx3mini";
 	pinctrl-names = "default";
 	pinctrl-0 = <&r_ir_rx_pin>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-h3-emlid-neutis-n5h3-devboard.dts b/arch/arm/boot/dts/sun8i-h3-emlid-neutis-n5h3-devboard.dts
new file mode 100644
index 0000000..02fbe00
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-emlid-neutis-n5h3-devboard.dts
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * DTS for Emlid Neutis N5 Dev board.
+ *
+ * Copyright (C) 2019 Georgii Staroselskii <georgiii.staroselskii@emlid.com>
+ */
+
+/dts-v1/;
+
+#include "sun8i-h3-emlid-neutis-n5h3.dtsi"
+
+/ {
+	model = "Emlid Neutis N5H3 Developer board";
+	compatible = "emlid,neutis-n5h3-devboard",
+		     "emlid,neutis-n5h3",
+		     "allwinner,sun8i-h3";
+
+	vdd_cpux: gpio-regulator {
+		compatible = "regulator-gpio";
+		regulator-name = "vdd-cpux";
+		regulator-type = "voltage";
+		regulator-boot-on;
+		regulator-always-on;
+		regulator-min-microvolt = <1100000>;
+		regulator-max-microvolt = <1300000>;
+		regulator-ramp-delay = <50>; /* 4ms */
+		gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */
+		gpios-states = <0x1>;
+		states = <1100000 0x0>, <1300000 0x1>;
+	};
+
+	connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_con_in: endpoint {
+				remote-endpoint = <&hdmi_out_con>;
+			};
+		};
+	};
+
+};
+
+&cpu0 {
+	cpu-supply = <&vdd_cpux>;
+};
+
+&codec {
+	status = "okay";
+};
+
+&emac {
+	phy-handle = <&int_mii_phy>;
+	phy-mode = "mii";
+	allwinner,leds-active-low;
+	status = "okay";
+};
+
+&hdmi {
+	status = "okay";
+};
+
+&hdmi_out {
+	hdmi_out_con: endpoint {
+		remote-endpoint = <&hdmi_con_in>;
+	};
+};
+
+&i2c1 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-emlid-neutis-n5h3.dtsi b/arch/arm/boot/dts/sun8i-h3-emlid-neutis-n5h3.dtsi
new file mode 100644
index 0000000..eedd5da
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-emlid-neutis-n5h3.dtsi
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * DTSI for Emlid Neutis N5 SoM.
+ *
+ * Copyright (C) 2019 Georgii Staroselskii <georgii.staroselskii@emlid.com>
+ */
+
+/dts-v1/;
+
+#include "sun8i-h3.dtsi"
+#include <arm/sunxi-h3-h5-emlid-neutis.dtsi>
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
index c73f599..6b14927 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
@@ -60,8 +60,7 @@ reg_vdd_cpux: vdd-cpux-regulator {
 		enable-gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
 		gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */
 		gpios-states = <0x1>;
-		states = <1100000 0x0
-			  1300000 0x1>;
+		states = <1100000 0>, <1300000 1>;
 	};
 
 	reg_vcc_dram: vcc-dram {
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index fe773c7..20217e2 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -80,7 +80,7 @@ cpu0: cpu@0 {
 			#cooling-cells = <2>;
 		};
 
-		cpu@1 {
+		cpu1: cpu@1 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <1>;
@@ -90,7 +90,7 @@ cpu@1 {
 			#cooling-cells = <2>;
 		};
 
-		cpu@2 {
+		cpu2: cpu@2 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <2>;
@@ -100,7 +100,7 @@ cpu@2 {
 			#cooling-cells = <2>;
 		};
 
-		cpu@3 {
+		cpu3: cpu@3 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <3>;
@@ -111,6 +111,15 @@ cpu@3 {
 		};
 	};
 
+	pmu {
+		compatible = "arm,cortex-a7-pmu";
+		interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
 	timer {
 		compatible = "arm,armv7-timer";
 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -199,6 +208,26 @@ mali: gpu@1c40000 {
 			assigned-clocks = <&ccu CLK_GPU>;
 			assigned-clock-rates = <384000000>;
 		};
+
+		ths: thermal-sensor@1c25000 {
+			compatible = "allwinner,sun8i-h3-ths";
+			reg = <0x01c25000 0x400>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&ccu RST_BUS_THS>;
+			clocks = <&ccu CLK_BUS_THS>, <&ccu CLK_THS>;
+			clock-names = "bus", "mod";
+			nvmem-cells = <&ths_calibration>;
+			nvmem-cell-names = "calibration";
+			#thermal-sensor-cells = <0>;
+		};
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 0>;
+		};
 	};
 };
 
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index 421dfbb..8f09a24 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -44,6 +44,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/sun8i-de2.h>
 #include <dt-bindings/clock/sun8i-r40-ccu.h>
+#include <dt-bindings/clock/sun8i-tcon-top.h>
 #include <dt-bindings/reset/sun8i-r40-ccu.h>
 #include <dt-bindings/reset/sun8i-de2.h>
 
@@ -78,25 +79,25 @@ cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@0 {
+		cpu0: cpu@0 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <0>;
 		};
 
-		cpu@1 {
+		cpu1: cpu@1 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <1>;
 		};
 
-		cpu@2 {
+		cpu2: cpu@2 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <2>;
 		};
 
-		cpu@3 {
+		cpu3: cpu@3 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <3>;
@@ -180,6 +181,20 @@ nmi_intc: interrupt-controller@1c00030 {
 			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		csi0: csi@1c09000 {
+			compatible = "allwinner,sun8i-r40-csi0",
+				     "allwinner,sun7i-a20-csi0";
+			reg = <0x01c09000 0x1000>;
+			interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_CSI0>, <&ccu CLK_CSI_SCLK>,
+				 <&ccu CLK_DRAM_CSI0>;
+			clock-names = "bus", "isp", "ram";
+			resets = <&ccu RST_BUS_CSI0>;
+			interconnects = <&mbus 5>;
+			interconnect-names = "dma-mem";
+			status = "disabled";
+		};
+
 		mmc0: mmc@1c0f000 {
 			compatible = "allwinner,sun8i-r40-mmc",
 				     "allwinner,sun50i-a64-mmc";
@@ -355,6 +370,20 @@ clk_out_a_pin: clk-out-a-pin {
 				function = "clk_out_a";
 			};
 
+			/omit-if-no-ref/
+			csi0_8bits_pins: csi0-8bits-pins {
+				pins = "PE0", "PE2", "PE3", "PE4", "PE5",
+				       "PE6", "PE7", "PE8", "PE9", "PE10",
+				       "PE11";
+				function = "csi0";
+			};
+
+			/omit-if-no-ref/
+			csi0_mclk_pin: csi0-mclk-pin {
+				pins = "PE1";
+				function = "csi0";
+			};
+
 			gmac_rgmii_pins: gmac-rgmii-pins {
 				pins = "PA0", "PA1", "PA2", "PA3",
 				       "PA4", "PA5", "PA6", "PA7",
@@ -373,6 +402,26 @@ i2c0_pins: i2c0-pins {
 				function = "i2c0";
 			};
 
+			i2c1_pins: i2c1-pins {
+				pins = "PB18", "PB19";
+				function = "i2c1";
+			};
+
+			i2c2_pins: i2c2-pins {
+				pins = "PB20", "PB21";
+				function = "i2c2";
+			};
+
+			i2c3_pins: i2c3-pins {
+				pins = "PI0", "PI1";
+				function = "i2c3";
+			};
+
+			i2c4_pins: i2c4-pins {
+				pins = "PI2", "PI3";
+				function = "i2c4";
+			};
+
 			mmc0_pins: mmc0-pins {
 				pins = "PF0", "PF1", "PF2",
 				       "PF3", "PF4", "PF5";
@@ -398,6 +447,36 @@ mmc2_pins: mmc2-pins {
 				bias-pull-up;
 			};
 
+			/omit-if-no-ref/
+			spi0_pc_pins: spi0-pc-pins {
+				pins = "PC0", "PC1", "PC2";
+				function = "spi0";
+			};
+
+			/omit-if-no-ref/
+			spi0_cs0_pc_pin: spi0-cs0-pc-pin {
+				pins = "PC23";
+				function = "spi0";
+			};
+
+			/omit-if-no-ref/
+			spi1_pi_pins: spi1-pi-pins {
+				pins = "PI17", "PI18", "PI19";
+				function = "spi1";
+			};
+
+			/omit-if-no-ref/
+			spi1_cs0_pi_pin: spi1-cs0-pi-pin {
+				pins = "PI16";
+				function = "spi1";
+			};
+
+			/omit-if-no-ref/
+			spi1_cs1_pi_pin: spi1-cs1-pi-pin {
+				pins = "PI15";
+				function = "spi1";
+			};
+
 			uart0_pb_pins: uart0-pb-pins {
 				pins = "PB22", "PB23";
 				function = "uart0";
@@ -528,6 +607,8 @@ i2c1: i2c@1c2b000 {
 			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&ccu CLK_BUS_I2C1>;
 			resets = <&ccu RST_BUS_I2C1>;
+			pinctrl-0 = <&i2c1_pins>;
+			pinctrl-names = "default";
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -539,6 +620,8 @@ i2c2: i2c@1c2b400 {
 			interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&ccu CLK_BUS_I2C2>;
 			resets = <&ccu RST_BUS_I2C2>;
+			pinctrl-0 = <&i2c2_pins>;
+			pinctrl-names = "default";
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -550,6 +633,8 @@ i2c3: i2c@1c2b800 {
 			interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&ccu CLK_BUS_I2C3>;
 			resets = <&ccu RST_BUS_I2C3>;
+			pinctrl-0 = <&i2c3_pins>;
+			pinctrl-names = "default";
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -561,6 +646,60 @@ i2c4: i2c@1c2c000 {
 			interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&ccu CLK_BUS_I2C4>;
 			resets = <&ccu RST_BUS_I2C4>;
+			pinctrl-0 = <&i2c4_pins>;
+			pinctrl-names = "default";
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		spi0: spi@1c05000 {
+			compatible = "allwinner,sun8i-r40-spi",
+				     "allwinner,sun8i-h3-spi";
+			reg = <0x01c05000 0x1000>;
+			interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+			clock-names = "ahb", "mod";
+			resets = <&ccu RST_BUS_SPI0>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		spi1: spi@1c06000 {
+			compatible = "allwinner,sun8i-r40-spi",
+				     "allwinner,sun8i-h3-spi";
+			reg = <0x01c06000 0x1000>;
+			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
+			clock-names = "ahb", "mod";
+			resets = <&ccu RST_BUS_SPI1>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		spi2: spi@1c07000 {
+			compatible = "allwinner,sun8i-r40-spi",
+				     "allwinner,sun8i-h3-spi";
+			reg = <0x01c07000 0x1000>;
+			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_SPI2>, <&ccu CLK_SPI2>;
+			clock-names = "ahb", "mod";
+			resets = <&ccu RST_BUS_SPI2>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		spi3: spi@1c0f000 {
+			compatible = "allwinner,sun8i-r40-spi",
+				     "allwinner,sun8i-h3-spi";
+			reg = <0x01c0f000 0x1000>;
+			interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_SPI3>, <&ccu CLK_SPI3>;
+			clock-names = "ahb", "mod";
+			resets = <&ccu RST_BUS_SPI3>;
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -596,6 +735,14 @@ gmac_mdio: mdio {
 			};
 		};
 
+		mbus: dram-controller@1c62000 {
+			compatible = "allwinner,sun8i-r40-mbus";
+			reg = <0x01c62000 0x1000>;
+			clocks = <&ccu 155>;
+			dma-ranges = <0x00000000 0x40000000 0x80000000>;
+			#interconnect-cells = <1>;
+		};
+
 		tcon_top: tcon-top@1c70000 {
 			compatible = "allwinner,sun8i-r40-tcon-top";
 			reg = <0x01c70000 0x1000>;
@@ -718,7 +865,7 @@ tcon_tv0: lcd-controller@1c73000 {
 			compatible = "allwinner,sun8i-r40-tcon-tv";
 			reg = <0x01c73000 0x1000>;
 			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu CLK_BUS_TCON_TV0>, <&tcon_top 0>;
+			clocks = <&ccu CLK_BUS_TCON_TV0>, <&tcon_top CLK_TCON_TOP_TV0>;
 			clock-names = "ahb", "tcon-ch1";
 			resets = <&ccu RST_BUS_TCON_TV0>;
 			reset-names = "lcd";
@@ -761,7 +908,7 @@ tcon_tv1: lcd-controller@1c74000 {
 			compatible = "allwinner,sun8i-r40-tcon-tv";
 			reg = <0x01c74000 0x1000>;
 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu CLK_BUS_TCON_TV1>, <&tcon_top 1>;
+			clocks = <&ccu CLK_BUS_TCON_TV1>, <&tcon_top CLK_TCON_TOP_TV1>;
 			clock-names = "ahb", "tcon-ch1";
 			resets = <&ccu RST_BUS_TCON_TV1>;
 			reset-names = "lcd";
@@ -803,7 +950,7 @@ tcon_tv1_out_tcon_top: endpoint@1 {
 		gic: interrupt-controller@1c81000 {
 			compatible = "arm,gic-400";
 			reg = <0x01c81000 0x1000>,
-			      <0x01c82000 0x1000>,
+			      <0x01c82000 0x2000>,
 			      <0x01c84000 0x2000>,
 			      <0x01c86000 0x2000>;
 			interrupt-controller;
@@ -848,7 +995,7 @@ hdmi_phy: hdmi-phy@1ef0000 {
 			compatible = "allwinner,sun8i-r40-hdmi-phy";
 			reg = <0x01ef0000 0x10000>;
 			clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
-				 <&ccu 7>, <&ccu 16>;
+				 <&ccu CLK_PLL_VIDEO0>, <&ccu CLK_PLL_VIDEO1>;
 			clock-names = "bus", "mod", "pll-0", "pll-1";
 			resets = <&ccu RST_BUS_HDMI0>;
 			reset-names = "phy";
@@ -856,6 +1003,15 @@ hdmi_phy: hdmi-phy@1ef0000 {
 		};
 	};
 
+	pmu {
+		compatible = "arm,cortex-a7-pmu";
+		interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
 	timer {
 		compatible = "arm,armv7-timer";
 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index 23ba56d..81ea508 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -123,8 +123,6 @@ mixer0: mixer@1100000 {
 			clock-names = "bus",
 				      "mod";
 			resets = <&display_clocks 0>;
-			assigned-clocks = <&display_clocks 6>;
-			assigned-clock-rates = <150000000>;
 
 			ports {
 				#address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index 1d900f5..ce4fa67 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -387,16 +387,16 @@ ehci1: usb@a01000 {
 		usbphy2: phy@a01800 {
 			compatible = "allwinner,sun9i-a80-usb-phy";
 			reg = <0x00a01800 0x4>;
-			clocks = <&usb_clocks CLK_USB1_HSIC>,
+			clocks = <&usb_clocks CLK_USB1_PHY>,
 				 <&usb_clocks CLK_USB_HSIC>,
-				 <&usb_clocks CLK_USB1_PHY>;
-			clock-names = "hsic_480M",
+				 <&usb_clocks CLK_USB1_HSIC>;
+			clock-names = "phy",
 				      "hsic_12M",
-				      "phy";
-			resets = <&usb_clocks RST_USB1_HSIC>,
-				 <&usb_clocks RST_USB1_PHY>;
-			reset-names = "hsic",
-				      "phy";
+				      "hsic_480M";
+			resets = <&usb_clocks RST_USB1_PHY>,
+				 <&usb_clocks RST_USB1_HSIC>;
+			reset-names = "phy",
+				      "hsic";
 			status = "disabled";
 			#phy-cells = <0>;
 			/* usb1 is always used with HSIC */
@@ -429,16 +429,16 @@ ohci2: usb@a02400 {
 		usbphy3: phy@a02800 {
 			compatible = "allwinner,sun9i-a80-usb-phy";
 			reg = <0x00a02800 0x4>;
-			clocks = <&usb_clocks CLK_USB2_HSIC>,
+			clocks = <&usb_clocks CLK_USB2_PHY>,
 				 <&usb_clocks CLK_USB_HSIC>,
-				 <&usb_clocks CLK_USB2_PHY>;
-			clock-names = "hsic_480M",
+				 <&usb_clocks CLK_USB2_HSIC>;
+			clock-names = "phy",
 				      "hsic_12M",
-				      "phy";
-			resets = <&usb_clocks RST_USB2_HSIC>,
-				 <&usb_clocks RST_USB2_PHY>;
-			reset-names = "hsic",
-				      "phy";
+				      "hsic_480M";
+			resets = <&usb_clocks RST_USB2_PHY>,
+				 <&usb_clocks RST_USB2_HSIC>;
+			reset-names = "phy",
+				      "hsic";
 			status = "disabled";
 			#phy-cells = <0>;
 		};
@@ -530,9 +530,7 @@ mmc_config_clk: clk@1c13000 {
 			compatible = "allwinner,sun9i-a80-mmc-config-clk";
 			reg = <0x01c13000 0x10>;
 			clocks = <&ccu CLK_BUS_MMC>;
-			clock-names = "ahb";
 			resets = <&ccu RST_BUS_MMC>;
-			reset-names = "ahb";
 			#clock-cells = <1>;
 			#reset-cells = <1>;
 			clock-output-names = "mmc0_config", "mmc1_config",
@@ -880,8 +878,12 @@ tcon0: lcd-controller@3c00000 {
 			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&ccu CLK_BUS_LCD0>, <&ccu CLK_LCD0>;
 			clock-names = "ahb", "tcon-ch0";
-			resets = <&ccu RST_BUS_LCD0>, <&ccu RST_BUS_EDP>;
-			reset-names = "lcd", "edp";
+			resets = <&ccu RST_BUS_LCD0>,
+				 <&ccu RST_BUS_EDP>,
+				 <&ccu RST_BUS_LVDS>;
+			reset-names = "lcd",
+				      "edp",
+				      "lvds";
 			clock-output-names = "tcon0-pixel-clock";
 			#clock-cells = <0>;
 
diff --git a/arch/arm/boot/dts/sunxi-h3-h5-emlid-neutis.dtsi b/arch/arm/boot/dts/sunxi-h3-h5-emlid-neutis.dtsi
new file mode 100644
index 0000000..fc67e30
--- /dev/null
+++ b/arch/arm/boot/dts/sunxi-h3-h5-emlid-neutis.dtsi
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * DTSI for Emlid Neutis SoMs.
+ *
+ * Copyright (C) 2019 Georgii Staroselskii <georgii.staroselskii@emlid.com>
+ */
+
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	wifi_pwrseq: wifi_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&pio 2 7 GPIO_ACTIVE_LOW>; /* PC7 */
+		post-power-on-delay-ms = <200>;
+		clocks = <&rtc 1>;
+		clock-names = "ext_clock";
+	};
+};
+
+&cpu0 {
+	cpu-supply = <&vdd_cpux>;
+};
+
+&reg_usb0_vbus {
+	gpio = <&r_pio 0 9 GPIO_ACTIVE_HIGH>;   /* PL9 */
+	status = "okay";
+};
+
+
+&de {
+	status = "okay";
+};
+
+&ohci0 {
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&ohci2 {
+	status = "okay";
+};
+
+&ohci3 {
+	status = "okay";
+};
+
+
+&ehci0 {
+	status = "okay";
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&ehci3 {
+	status = "okay";
+};
+
+&mmc0 {
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+	status = "okay";
+};
+
+
+&mmc1 {
+	vmmc-supply = <&reg_vcc3v3>;
+	vqmmc-supply = <&reg_vcc3v3>;
+	mmc-pwrseq = <&wifi_pwrseq>;
+	bus-width = <4>;
+	non-removable;
+	status = "okay";
+
+	brcmf: wifi@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+		interrupt-parent = <&r_pio>;
+		interrupts = <0 5 IRQ_TYPE_LEVEL_LOW>;	/* PL5 */
+		interrupt-names = "host-wake";
+	};
+};
+
+&mmc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_8bit_pins>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <8>;
+	non-removable;
+	cap-mmc-hw-reset;
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pa_pins>;
+	status = "okay";
+};
+
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+	uart-has-rtscts;
+	status = "okay";
+
+	bluetooth {
+		compatible = "brcm,bcm43438-bt";
+		clocks = <&rtc 1>;
+		clock-names = "lpo";
+		vbat-supply = <&reg_vcc3v3>;
+		vddio-supply = <&reg_vcc3v3>;
+		shutdown-gpios = <&pio 2 4 GPIO_ACTIVE_HIGH>; /* PC4 */
+		device-wakeup-gpios = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; /* PL7 */
+	};
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart2_pins>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins>;
+	status = "okay";
+};
+
+&usbphy {
+	usb0_id_det-gpios = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	status = "okay";
+};
+
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&codec {
+	allwinner,audio-routing =
+		"Line Out", "LINEOUT",
+		"LINEIN", "Line In",
+		"MIC1", "Mic",
+		"MIC2", "Mic",
+		"Mic",  "MBIAS";
+};
+
+&i2c0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 0afea59..5e9c306 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -231,6 +231,12 @@ mmc2: mmc@1c11000 {
 		sid: eeprom@1c14000 {
 			/* compatible is in per SoC .dtsi file */
 			reg = <0x1c14000 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			ths_calibration: thermal-sensor-calibration@34 {
+				reg = <0x34 4>;
+			};
 		};
 
 		usb_otg: usb@1c19000 {
@@ -553,7 +559,7 @@ external_mdio: mdio@2 {
 		mbus: dram-controller@1c62000 {
 			compatible = "allwinner,sun8i-h3-mbus";
 			reg = <0x01c62000 0x1000>;
-			clocks = <&ccu 113>;
+			clocks = <&ccu CLK_MBUS>;
 			dma-ranges = <0x00000000 0x40000000 0xc0000000>;
 			#interconnect-cells = <1>;
 		};
@@ -811,7 +817,7 @@ hdmi_phy: hdmi-phy@1ef0000 {
 			compatible = "allwinner,sun8i-h3-hdmi-phy";
 			reg = <0x01ef0000 0x10000>;
 			clocks = <&ccu CLK_BUS_HDMI>, <&ccu CLK_HDMI_DDC>,
-				 <&ccu 6>;
+				 <&ccu CLK_PLL_VIDEO>;
 			clock-names = "bus", "mod", "pll-0";
 			resets = <&ccu RST_BUS_HDMI0>;
 			reset-names = "phy";
@@ -831,7 +837,8 @@ rtc: rtc@1f00000 {
 		r_ccu: clock@1f01400 {
 			compatible = "allwinner,sun8i-h3-r-ccu";
 			reg = <0x01f01400 0x100>;
-			clocks = <&osc24M>, <&rtc 0>, <&rtc 2>, <&ccu 9>;
+			clocks = <&osc24M>, <&rtc 0>, <&rtc 2>,
+				 <&ccu CLK_PLL_PERIPH0>;
 			clock-names = "hosc", "losc", "iosc", "pll-periph";
 			#clock-cells = <1>;
 			#reset-cells = <1>;
diff --git a/arch/arm/boot/dts/sunxi-libretech-all-h3-it.dtsi b/arch/arm/boot/dts/sunxi-libretech-all-h3-it.dtsi
new file mode 100644
index 0000000..204fba3
--- /dev/null
+++ b/arch/arm/boot/dts/sunxi-libretech-all-h3-it.dtsi
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2019 Chen-Yu Tsai <wens@csie.org>
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	aliases {
+		serial0 = &uart0;
+		spi0 = &spi0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	connector {
+		compatible = "hdmi-connector";
+		type = "d";
+
+		port {
+			hdmi_con_in: endpoint {
+				remote-endpoint = <&hdmi_out_con>;
+			};
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		status_led {
+			label = "librecomputer:blue:status";
+			gpios = <&pio 0 7 GPIO_ACTIVE_HIGH>; /* PA7 */
+		};
+	};
+
+	reg_vcc3v3: vcc3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&reg_vcc5v0>;
+	};
+
+	/* This represents the board's 5V input */
+	reg_vcc5v0: vcc5v0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc5v0";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	reg_vcc_dram: vcc-dram {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc-dram";
+		regulator-min-microvolt = <1500000>;
+		regulator-max-microvolt = <1500000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&reg_vcc5v0>;
+		gpio = <&r_pio 0 9 GPIO_ACTIVE_HIGH>; /* PL9 */
+		enable-active-high;
+	};
+
+	reg_vcc_io: vcc-io {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc-io";
+		/* This is simply a MOSFET switch */
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&reg_vcc3v3>;
+		gpio = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL5 */
+	};
+
+	reg_vcc_usbwifi: vcc-usbwifi {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc-usbwifi";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&reg_vcc5v0>;
+		gpio = <&pio 6 4 GPIO_ACTIVE_HIGH>; /* PG4 */
+		enable-active-high;
+	};
+
+	reg_vdd_cpux: vdd-cpux {
+		compatible = "regulator-fixed";
+		regulator-name = "vdd-cpux";
+		regulator-min-microvolt = <1100000>;
+		regulator-max-microvolt = <1100000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&reg_vcc5v0>;
+		gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
+		enable-active-high;
+	};
+};
+
+&cpu0 {
+	cpu-supply = <&reg_vdd_cpux>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_vdd_cpux>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_vdd_cpux>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_vdd_cpux>;
+};
+
+&de {
+	status = "okay";
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&hdmi {
+	status = "okay";
+};
+
+&hdmi_out {
+	hdmi_out_con: endpoint {
+		remote-endpoint = <&hdmi_con_in>;
+	};
+};
+
+&mmc0 {
+	vmmc-supply = <&reg_vcc_io>;
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+	status = "okay";
+};
+
+&pio {
+	vcc-pa-supply = <&reg_vcc_io>;
+	vcc-pc-supply = <&reg_vcc_io>;
+	vcc-pd-supply = <&reg_vcc_io>;
+	vcc-pe-supply = <&reg_vcc_io>;
+	vcc-pf-supply = <&reg_vcc_io>;
+	vcc-pg-supply = <&reg_vcc_io>;
+};
+
+&r_pio {
+	vcc-pl-supply = <&reg_vcc3v3>;
+};
+
+&spi0 {
+	status = "okay";
+
+	spiflash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <50000000>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pa_pins>;
+	status = "okay";
+};
+
+&usb_otg {
+	dr_mode = "peripheral";
+	status = "okay";
+};
+
+&usbphy {
+	usb1_vbus-supply = <&reg_vcc_usbwifi>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/tegra124-apalis-emc.dtsi b/arch/arm/boot/dts/tegra124-apalis-emc.dtsi
index d18eaf4..3240145 100644
--- a/arch/arm/boot/dts/tegra124-apalis-emc.dtsi
+++ b/arch/arm/boot/dts/tegra124-apalis-emc.dtsi
@@ -84,7 +84,7 @@ timing-924000000 {
 		};
 	};
 
-	emc@7001b000 {
+	external-memory-controller@7001b000 {
 		emc-timings-1 {
 			nvidia,ram-code = <1>;
 
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi b/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi
index 784a529..861d3f2 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi
@@ -79,7 +79,7 @@ timing-924000000 {
 		};
 	};
 
-	emc@7001b000 {
+	external-memory-controller@7001b000 {
 		emc-timings-3 {
 			nvidia,ram-code = <3>;
 
diff --git a/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi b/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi
index fb6b3e1..c91647d 100644
--- a/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi
@@ -219,7 +219,7 @@ timing-792000000 {
 		};
 	};
 
-	emc@7001b000 {
+	external-memory-controller@7001b000 {
 		emc-timings-1 {
 			nvidia,ram-code = <1>;
 
diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi b/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi
index c7c31d4..d2beea0 100644
--- a/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi
@@ -68,7 +68,7 @@ timing-792000000 {
 		};
 	};
 
-	emc@7001b000 {
+	external-memory-controller@7001b000 {
 		emc-timings-1 {
 			nvidia,ram-code = <1>;
 
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 413bfb9..7f330b1 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -622,9 +622,11 @@ mc: memory-controller@70019000 {
 		#iommu-cells = <1>;
 	};
 
-	emc: emc@7001b000 {
+	emc: external-memory-controller@7001b000 {
 		compatible = "nvidia,tegra124-emc";
 		reg = <0x0 0x7001b000 0x0 0x1000>;
+		clocks = <&tegra_car TEGRA124_CLK_EMC>;
+		clock-names = "emc";
 
 		nvidia,memory-controller = <&mc>;
 	};
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index 85fce5b..be0ab9b 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -311,6 +311,52 @@ nvec@7000c500 {
 		reset-names = "i2c";
 	};
 
+	memory-controller@7000f400 {
+		nvidia,use-ram-code;
+
+		emc-tables@hynix {
+			nvidia,ram-code = <0x0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			emc-table@166500 {
+				reg = <166500>;
+				compatible = "nvidia,tegra20-emc-table";
+				clock-frequency = <166500>;
+				nvidia,emc-registers = <0x0000000a 0x00000016
+					0x00000008 0x00000003 0x00000004 0x00000004
+					0x00000002 0x0000000c 0x00000003 0x00000003
+					0x00000002 0x00000001 0x00000004 0x00000005
+					0x00000004 0x00000009 0x0000000d 0x000004df
+					0x00000000 0x00000003 0x00000003 0x00000003
+					0x00000003 0x00000001 0x0000000a 0x000000c8
+					0x00000003 0x00000006 0x00000004 0x00000008
+					0x00000002 0x00000000 0x00000000 0x00000002
+					0x00000000 0x00000000 0x00000083 0xe03b0323
+					0x007fe010 0x00001414 0x00000000 0x00000000
+					0x00000000 0x00000000 0x00000000 0x00000000>;
+			};
+
+			emc-table@333000 {
+				reg = <333000>;
+				compatible = "nvidia,tegra20-emc-table";
+				clock-frequency = <333000>;
+				nvidia,emc-registers = <0x00000018 0x00000033
+					0x00000012 0x00000004 0x00000004 0x00000005
+					0x00000003 0x0000000c 0x00000006 0x00000006
+					0x00000003 0x00000001 0x00000004 0x00000005
+					0x00000004 0x00000009 0x0000000d 0x00000bff
+					0x00000000 0x00000003 0x00000003 0x00000006
+					0x00000006 0x00000001 0x00000011 0x000000c8
+					0x00000003 0x0000000e 0x00000007 0x00000008
+					0x00000002 0x00000000 0x00000000 0x00000002
+					0x00000000 0x00000000 0x00000083 0xf0440303
+					0x007fe010 0x00001414 0x00000000 0x00000000
+					0x00000000 0x00000000 0x00000000 0x00000000>;
+			};
+		};
+	};
+
 	i2c@7000d000 {
 		status = "okay";
 		clock-frequency = <400000>;
diff --git a/arch/arm/boot/dts/uniphier-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi
index 58cd4e8..64ec46c 100644
--- a/arch/arm/boot/dts/uniphier-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld4.dtsi
@@ -410,7 +410,8 @@ nand: nand@68000000 {
 			pinctrl-0 = <&pinctrl_nand>;
 			clock-names = "nand", "nand_x", "ecc";
 			clocks = <&sys_clk 2>, <&sys_clk 3>, <&sys_clk 3>;
-			resets = <&sys_rst 2>;
+			reset-names = "nand", "reg";
+			resets = <&sys_rst 2>, <&sys_rst 2>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/uniphier-pinctrl.dtsi b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
index 1fee5ff..bfdfb76 100644
--- a/arch/arm/boot/dts/uniphier-pinctrl.dtsi
+++ b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
@@ -106,6 +106,16 @@ pinctrl_i2c4: i2c4 {
 		function = "i2c4";
 	};
 
+	pinctrl_i2c5: i2c5 {
+		groups = "i2c5";
+		function = "i2c5";
+	};
+
+	pinctrl_i2c6: i2c6 {
+		groups = "i2c6";
+		function = "i2c6";
+	};
+
 	pinctrl_nand: nand {
 		groups = "nand";
 		function = "nand";
diff --git a/arch/arm/boot/dts/uniphier-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi
index 7f64e5a..2ec04d7 100644
--- a/arch/arm/boot/dts/uniphier-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro4.dtsi
@@ -600,7 +600,8 @@ nand: nand@68000000 {
 			pinctrl-0 = <&pinctrl_nand>;
 			clock-names = "nand", "nand_x", "ecc";
 			clocks = <&sys_clk 2>, <&sys_clk 3>, <&sys_clk 3>;
-			resets = <&sys_rst 2>;
+			reset-names = "nand", "reg";
+			resets = <&sys_rst 2>, <&sys_rst 2>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index eff7471..ea3961f 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -465,7 +465,8 @@ nand: nand@68000000 {
 			pinctrl-0 = <&pinctrl_nand>;
 			clock-names = "nand", "nand_x", "ecc";
 			clocks = <&sys_clk 2>, <&sys_clk 3>, <&sys_clk 3>;
-			resets = <&sys_rst 2>;
+			reset-names = "nand", "reg";
+			resets = <&sys_rst 2>, <&sys_rst 2>;
 		};
 
 		emmc: sdhc@68400000 {
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 4eddbb8..13b0d4a7 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -773,7 +773,8 @@ nand: nand@68000000 {
 			pinctrl-0 = <&pinctrl_nand>;
 			clock-names = "nand", "nand_x", "ecc";
 			clocks = <&sys_clk 2>, <&sys_clk 3>, <&sys_clk 3>;
-			resets = <&sys_rst 2>;
+			reset-names = "nand", "reg";
+			resets = <&sys_rst 2>, <&sys_rst 2>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/uniphier-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi
index cbebb6e..4fc6676 100644
--- a/arch/arm/boot/dts/uniphier-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld8.dtsi
@@ -414,7 +414,8 @@ nand: nand@68000000 {
 			pinctrl-0 = <&pinctrl_nand>;
 			clock-names = "nand", "nand_x", "ecc";
 			clocks = <&sys_clk 2>, <&sys_clk 3>, <&sys_clk 3>;
-			resets = <&sys_rst 2>;
+			reset-names = "nand", "reg";
+			resets = <&sys_rst 2>, <&sys_rst 2>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 48086c5..e500911 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -323,11 +323,6 @@ at93c46d@1 {
 };
 
 &i2c0 {
-	clock-frequency = <100000>;
-	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_i2c0>;
-	status = "okay";
-
 	gpio5: io-expander@20 {
 		compatible = "nxp,pca9554";
 		reg = <0x20>;
@@ -350,11 +345,6 @@ gpio6: io-expander@22 {
 };
 
 &i2c2 {
-	clock-frequency = <100000>;
-	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_i2c2>;
-	status = "okay";
-
 	tca9548@70 {
 		compatible = "nxp,pca9548";
 		pinctrl-0 = <&pinctrl_i2c_mux_reset>;
diff --git a/arch/arm/boot/dts/vf610-zii-scu4-aib.dts b/arch/arm/boot/dts/vf610-zii-scu4-aib.dts
index d7caf61..b642520 100644
--- a/arch/arm/boot/dts/vf610-zii-scu4-aib.dts
+++ b/arch/arm/boot/dts/vf610-zii-scu4-aib.dts
@@ -407,7 +407,7 @@ &dspi1 {
 	pinctrl-0 = <&pinctrl_dspi1>;
 	status = "okay";
 
-	spi-flash@0 {
+	flash@0 {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		compatible = "jedec,spi-nor";
@@ -420,7 +420,7 @@ partition@0 {
 		};
 	};
 
-	spi-flash@1 {
+	flash@1 {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		compatible = "jedec,spi-nor";
@@ -509,7 +509,7 @@ gpio6: io-expander@22 {
 		#gpio-cells = <2>;
 	};
 
-	lm75@48 {
+	temp-sensor@48 {
 		compatible = "national,lm75";
 		reg = <0x48>;
 	};
@@ -524,7 +524,7 @@ eeprom@52 {
 		reg = <0x52>;
 	};
 
-	ds1682@6b {
+	elapsed-time-recorder@6b {
 		compatible = "dallas,ds1682";
 		reg = <0x6b>;
 	};
@@ -536,7 +536,12 @@ &i2c1 {
 	pinctrl-0 = <&pinctrl_i2c1>;
 	status = "okay";
 
-	adt7411@4a {
+	watchdog@38 {
+		compatible = "zii,rave-wdt";
+		reg = <0x38>;
+	};
+
+	adc@4a {
 		compatible = "adi,adt7411";
 		reg = <0x4a>;
 	};
@@ -548,7 +553,7 @@ &i2c2 {
 	pinctrl-0 = <&pinctrl_i2c2>;
 	status = "okay";
 
-	gpio9: sx1503q@20 {
+	gpio9: io-expander@20 {
 		compatible = "semtech,sx1503q";
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_sx1503_20>;
@@ -559,12 +564,12 @@ gpio9: sx1503q@20 {
 		interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
 	};
 
-	lm75@4e {
+	temp-sensor@4e {
 		compatible = "national,lm75";
 		reg = <0x4e>;
 	};
 
-	lm75@4f {
+	temp-sensor@4f {
 		compatible = "national,lm75";
 		reg = <0x4f>;
 	};
@@ -576,17 +581,17 @@ gpio7: io-expander@23 {
 		reg = <0x23>;
 	};
 
-	adt7411@4a {
+	adc@4a {
 		compatible = "adi,adt7411";
 		reg = <0x4a>;
 	};
 
-	at24c08@54 {
+	eeprom@54 {
 		compatible = "atmel,24c08";
 		reg = <0x54>;
 	};
 
-	tca9548@70 {
+	i2c-mux@70 {
 		compatible = "nxp,pca9548";
 		pinctrl-names = "default";
 		#address-cells = <1>;
@@ -625,7 +630,7 @@ sff4_i2c: i2c@5 {
 		};
 	};
 
-	tca9548@71 {
+	i2c-mux@71 {
 		compatible = "nxp,pca9548";
 		pinctrl-names = "default";
 		reg = <0x71>;
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index ca6425a..db3899b 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -59,6 +59,39 @@ regulator_vccpint: fixedregulator {
 		regulator-always-on;
 	};
 
+	replicator {
+		compatible = "arm,coresight-static-replicator";
+		clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+		clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+		out-ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			/* replicator output ports */
+			port@0 {
+				reg = <0>;
+				replicator_out_port0: endpoint {
+					remote-endpoint = <&tpiu_in_port>;
+				};
+			};
+			port@1 {
+				reg = <1>;
+				replicator_out_port1: endpoint {
+					remote-endpoint = <&etb_in_port>;
+				};
+			};
+		};
+		in-ports {
+			/* replicator input port */
+			port {
+				replicator_in_port0: endpoint {
+					remote-endpoint = <&funnel_out_port>;
+				};
+			};
+		};
+	};
+
 	amba: amba {
 		compatible = "simple-bus";
 		#address-cells = <1>;
@@ -365,5 +398,107 @@ watchdog0: watchdog@f8005000 {
 			reg = <0xf8005000 0x1000>;
 			timeout-sec = <10>;
 		};
+
+		etb@f8801000 {
+			compatible = "arm,coresight-etb10", "arm,primecell";
+			reg = <0xf8801000 0x1000>;
+			clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+			clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+			in-ports {
+				port {
+					etb_in_port: endpoint {
+						remote-endpoint = <&replicator_out_port1>;
+					};
+				};
+			};
+		};
+
+		tpiu@f8803000 {
+			compatible = "arm,coresight-tpiu", "arm,primecell";
+			reg = <0xf8803000 0x1000>;
+			clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+			clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+			in-ports {
+				port {
+					tpiu_in_port: endpoint {
+						remote-endpoint = <&replicator_out_port0>;
+					};
+				};
+			};
+		};
+
+		funnel@f8804000 {
+			compatible = "arm,coresight-static-funnel", "arm,primecell";
+			reg = <0xf8804000 0x1000>;
+			clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+			clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+			/* funnel output ports */
+			out-ports {
+				port {
+					funnel_out_port: endpoint {
+						remote-endpoint =
+							<&replicator_in_port0>;
+					};
+				};
+			};
+
+			in-ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				/* funnel input ports */
+				port@0 {
+					reg = <0>;
+					funnel0_in_port0: endpoint {
+						remote-endpoint = <&ptm0_out_port>;
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					funnel0_in_port1: endpoint {
+						remote-endpoint = <&ptm1_out_port>;
+					};
+				};
+
+				port@2 {
+					reg = <2>;
+					funnel0_in_port2: endpoint {
+					};
+				};
+				/* The other input ports are not connect to anything */
+			};
+		};
+
+		ptm@f889c000 {
+			compatible = "arm,coresight-etm3x", "arm,primecell";
+			reg = <0xf889c000 0x1000>;
+			clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+			clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+			cpu = <&cpu0>;
+			out-ports {
+				port {
+					ptm0_out_port: endpoint {
+						remote-endpoint = <&funnel0_in_port0>;
+					};
+				};
+			};
+		};
+
+		ptm@f889d000 {
+			compatible = "arm,coresight-etm3x", "arm,primecell";
+			reg = <0xf889d000 0x1000>;
+			clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+			clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+			cpu = <&cpu1>;
+			out-ports {
+				port {
+					ptm1_out_port: endpoint {
+						remote-endpoint = <&funnel0_in_port1>;
+					};
+				};
+			};
+		};
 	};
 };
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 3729a6e..f66bb98 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -7,17 +7,13 @@
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARCH_MULTI_V4T=y
 CONFIG_ARCH_MULTI_V5=y
 # CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_AT91=y
 CONFIG_SOC_AT91RM9200=y
 CONFIG_SOC_AT91SAM9=y
+CONFIG_SOC_SAM9X60=y
 # CONFIG_ATMEL_CLOCKSOURCE_PIT is not set
 CONFIG_AEABI=y
 CONFIG_UACCESS_WITH_MEMCPY=y
@@ -27,6 +23,9 @@
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"
 CONFIG_KEXEC=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -37,13 +36,7 @@
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_DIAG is not set
-# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET6_XFRM_MODE_BEET is not set
 CONFIG_IPV6_SIT_6RD=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
@@ -57,6 +50,7 @@
 CONFIG_MTD_DATAFLASH=y
 CONFIG_MTD_RAW_NAND=y
 CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_SPI_NOR=y
 CONFIG_MTD_UBI=y
 CONFIG_MTD_UBI_GLUEBI=y
 CONFIG_BLK_DEV_LOOP=y
@@ -69,8 +63,8 @@
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
-CONFIG_MACB=y
 # CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_MACB=y
 CONFIG_DM9000=y
 # CONFIG_NET_VENDOR_FARADAY is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -82,10 +76,12 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 CONFIG_DAVICOM_PHY=y
 CONFIG_MICREL_PHY=y
-CONFIG_RTL8187=m
 CONFIG_LIBERTAS=m
 CONFIG_LIBERTAS_SDIO=m
 CONFIG_LIBERTAS_SPI=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+CONFIG_MWIFIEX_USB=m
 CONFIG_RT2X00=m
 CONFIG_RT2500USB=m
 CONFIG_RT73USB=m
@@ -93,15 +89,10 @@
 CONFIG_RT2800USB_RT53XX=y
 CONFIG_RT2800USB_RT55XX=y
 CONFIG_RT2800USB_UNKNOWN=y
+CONFIG_RTL8187=m
 CONFIG_RTL8192CU=m
 # CONFIG_RTLWIFI_DEBUG is not set
-CONFIG_MWIFIEX=m
-CONFIG_MWIFIEX_SDIO=m
-CONFIG_MWIFIEX_USB=m
 CONFIG_INPUT_POLLDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=480
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=272
 CONFIG_INPUT_JOYDEV=y
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
@@ -110,6 +101,7 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ADS7846=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=4
 CONFIG_SERIAL_ATMEL=y
@@ -119,25 +111,25 @@
 CONFIG_I2C_GPIO=y
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
-CONFIG_POWER_SUPPLY=y
+CONFIG_SPI_ATMEL_QUADSPI=y
 CONFIG_POWER_RESET=y
+# CONFIG_POWER_RESET_AT91_SAMA5D2_SHDWC is not set
+CONFIG_POWER_SUPPLY=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT91SAM9X_WATCHDOG=y
+CONFIG_MFD_ATMEL_FLEXCOM=y
 CONFIG_MFD_ATMEL_HLCDC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_SOC_CAMERA=y
 CONFIG_VIDEO_ATMEL_ISI=y
-CONFIG_SOC_CAMERA_OV2640=m
 CONFIG_DRM=y
 CONFIG_DRM_ATMEL_HLCDC=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB_ATMEL=y
-# CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_ATMEL_LCDC=y
 # CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_BACKLIGHT_PWM=y
@@ -150,6 +142,9 @@
 CONFIG_SND_AT91_SOC_SAM9G20_WM8731=y
 CONFIG_SND_ATMEL_SOC_WM8904=y
 CONFIG_SND_AT91_SOC_SAM9X5_WM8731=y
+CONFIG_SND_ATMEL_SOC_CLASSD=y
+CONFIG_SND_SOC_MIKROE_PROTO=m
+CONFIG_SND_MCHP_SOC_I2S_MCC=y
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_EHCI_HCD=y
@@ -165,6 +160,9 @@
 CONFIG_USB_ATMEL_USBA=y
 CONFIG_USB_G_SERIAL=y
 CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_AT91=y
 CONFIG_MMC_ATMELMCI=y
 CONFIG_MMC_SPI=y
 CONFIG_NEW_LEDS=y
@@ -181,9 +179,12 @@
 CONFIG_RTC_DRV_AT91SAM9=y
 CONFIG_DMADEVICES=y
 CONFIG_AT_HDMAC=y
+CONFIG_AT_XDMAC=y
+CONFIG_MICROCHIP_PIT64B=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_IIO=y
 CONFIG_AT91_ADC=y
+CONFIG_AT91_SAMA5D2_ADC=y
 CONFIG_PWM=y
 CONFIG_PWM_ATMEL=y
 CONFIG_PWM_ATMEL_HLCDC_PWM=y
@@ -200,12 +201,6 @@
 CONFIG_NLS_CODEPAGE_850=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_DEBUG_FS=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_FTRACE is not set
-CONFIG_DEBUG_USER=y
 CONFIG_CRYPTO_ECB=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -215,3 +210,9 @@
 CONFIG_FONT_8x8=y
 CONFIG_FONT_ACORN_8x8=y
 CONFIG_FONT_MINI_4x6=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_USER=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index fde84f12..c8e0c14 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -38,6 +38,7 @@
 CONFIG_CRYPTO_SHA512_ARM=m
 CONFIG_CRYPTO_AES_ARM_BS=m
 CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_PARTITION_ADVANCED=y
@@ -92,6 +93,7 @@
 CONFIG_BLK_DEV_CRYPTOLOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_ATA=y
@@ -154,6 +156,7 @@
 CONFIG_SENSORS_PWM_FAN=y
 CONFIG_SENSORS_INA2XX=y
 CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
@@ -291,6 +294,7 @@
 CONFIG_COMMON_CLK_MAX77686=y
 CONFIG_COMMON_CLK_S2MPS11=y
 CONFIG_EXYNOS_IOMMU=y
+CONFIG_PM_DEVFREQ=y
 CONFIG_DEVFREQ_GOV_PERFORMANCE=y
 CONFIG_DEVFREQ_GOV_POWERSAVE=y
 CONFIG_DEVFREQ_GOV_USERSPACE=y
@@ -319,6 +323,8 @@
 CONFIG_ROMFS_FS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
@@ -356,4 +362,7 @@
 # CONFIG_DETECT_HUNG_TASK is not set
 CONFIG_PROVE_LOCKING=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_USER=y
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
index f012e81..d2d5f1c 100644
--- a/arch/arm/configs/gemini_defconfig
+++ b/arch/arm/configs/gemini_defconfig
@@ -2,28 +2,36 @@
 CONFIG_SYSVIPC=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_USER_NS=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_MULTI_V4=y
 # CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_GEMINI=y
-CONFIG_PCI=y
-CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
-CONFIG_CMA=y
 CONFIG_CMDLINE="console=ttyS0,115200n8"
 CONFIG_KEXEC=y
-CONFIG_BINFMT_MISC=y
 CONFIG_PM=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=y
+CONFIG_CMA=y
 CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_NET_DSA=y
+CONFIG_VLAN_8021Q=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_PCI=y
 CONFIG_DEVTMPFS=y
 CONFIG_MTD=y
+CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -40,10 +48,12 @@
 CONFIG_ATA=y
 CONFIG_PATA_FTIDE010=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_NET_DSA_REALTEK_SMI=y
 CONFIG_GEMINI_ETHERNET=y
 CONFIG_MDIO_BITBANG=y
 CONFIG_MDIO_GPIO=y
-CONFIG_REALTEK_PHY=y
+CONFIG_MARVELL_PHY=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -69,7 +79,9 @@
 CONFIG_LOGO=y
 CONFIG_USB=y
 CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_FOTG210_HCD=y
+CONFIG_USB_UHCI_HCD=y
 CONFIG_USB_STORAGE=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 3608e55..8f216a5 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -182,6 +182,7 @@
 CONFIG_TOUCHSCREEN_DA9052=y
 CONFIG_TOUCHSCREEN_EGALAX=y
 CONFIG_TOUCHSCREEN_GOODIX=y
+CONFIG_TOUCHSCREEN_ILI210X=y
 CONFIG_TOUCHSCREEN_MAX11801=y
 CONFIG_TOUCHSCREEN_IMX6UL_TSC=y
 CONFIG_TOUCHSCREEN_EDT_FT5X06=y
@@ -282,6 +283,7 @@
 CONFIG_DRM_PANEL_LVDS=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
+CONFIG_DRM_TI_TFP410=y
 CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
 CONFIG_DRM_DW_HDMI_CEC=y
 CONFIG_DRM_IMX=y
@@ -323,6 +325,7 @@
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_MXC=y
+CONFIG_USB_ACM=m
 CONFIG_USB_STORAGE=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
@@ -442,6 +445,8 @@
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_NLS_DEFAULT="cp437"
 CONFIG_NLS_CODEPAGE_437=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index f5d19cc..017d65f 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -492,6 +492,7 @@
 CONFIG_SENSORS_RASPBERRYPI_HWMON=m
 CONFIG_SENSORS_INA2XX=m
 CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_IMX_THERMAL=y
 CONFIG_ROCKCHIP_THERMAL=y
 CONFIG_RCAR_THERMAL=y
@@ -592,6 +593,7 @@
 CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_STM32_BOOSTER=m
 CONFIG_REGULATOR_STM32_VREFBUF=m
+CONFIG_REGULATOR_STM32_PWR=y
 CONFIG_REGULATOR_STPMIC1=y
 CONFIG_REGULATOR_TI_ABB=y
 CONFIG_REGULATOR_TPS51632=y
@@ -1093,6 +1095,8 @@
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 4dd1d8c..cdc75ef 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -148,6 +148,7 @@
 CONFIG_DRM=y
 CONFIG_DRM_MSM=m
 CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_ANALOGIX_ANX78XX=m
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 # CONFIG_LCD_CLASS_DEVICE is not set
@@ -251,6 +252,9 @@
 CONFIG_PHY_QCOM_USB_HS=y
 CONFIG_PHY_QCOM_USB_HSIC=y
 CONFIG_QCOM_QFPROM=y
+CONFIG_INTERCONNECT=y
+CONFIG_INTERCONNECT_QCOM=y
+CONFIG_INTERCONNECT_QCOM_MSM8974=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index de38304..64fa849 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -64,6 +64,7 @@
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_EDT_FT5X06=y
 CONFIG_TOUCHSCREEN_ST1232=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_ADXL34X=y
@@ -123,7 +124,9 @@
 CONFIG_VIDEO_ML86V7667=y
 CONFIG_DRM=y
 CONFIG_DRM_RCAR_DU=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_LVDS_CODEC=y
 CONFIG_DRM_SII902X=y
 CONFIG_DRM_I2C_ADV7511=y
 CONFIG_DRM_I2C_ADV7511_AUDIO=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 822cddf..4dd5c92 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -12,7 +12,9 @@
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_U8500_CPUIDLE=y
 CONFIG_VFP=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index b5752f0..c815477 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -326,16 +326,16 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
 #define gits_write_cwriter(v, c)	__gic_writeq_nonatomic(v, c)
 
 /*
- * GITS_VPROPBASER - hi and lo bits may be accessed independently.
+ * GICR_VPROPBASER - hi and lo bits may be accessed independently.
  */
-#define gits_read_vpropbaser(c)		__gic_readq_nonatomic(c)
-#define gits_write_vpropbaser(v, c)	__gic_writeq_nonatomic(v, c)
+#define gicr_read_vpropbaser(c)		__gic_readq_nonatomic(c)
+#define gicr_write_vpropbaser(v, c)	__gic_writeq_nonatomic(v, c)
 
 /*
- * GITS_VPENDBASER - the Valid bit must be cleared before changing
+ * GICR_VPENDBASER - the Valid bit must be cleared before changing
  * anything else.
  */
-static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
+static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
 {
 	u32 tmp;
 
@@ -352,7 +352,7 @@ static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
 	__gic_writeq_nonatomic(val, addr);
 }
 
-#define gits_read_vpendbaser(c)		__gic_readq_nonatomic(c)
+#define gicr_read_vpendbaser(c)		__gic_readq_nonatomic(c)
 
 static inline bool gic_prio_masking_enabled(void)
 {
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 9b11851..3944305 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -9,18 +9,29 @@
 
 #include <linux/kvm_host.h>
 #include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
 #include <asm/kvm_arm.h>
 #include <asm/cputype.h>
 
 /* arm64 compatibility macros */
+#define PSR_AA32_MODE_FIQ	FIQ_MODE
+#define PSR_AA32_MODE_SVC	SVC_MODE
 #define PSR_AA32_MODE_ABT	ABT_MODE
 #define PSR_AA32_MODE_UND	UND_MODE
 #define PSR_AA32_T_BIT		PSR_T_BIT
+#define PSR_AA32_F_BIT		PSR_F_BIT
 #define PSR_AA32_I_BIT		PSR_I_BIT
 #define PSR_AA32_A_BIT		PSR_A_BIT
 #define PSR_AA32_E_BIT		PSR_E_BIT
 #define PSR_AA32_IT_MASK	PSR_IT_MASK
+#define PSR_AA32_GE_MASK	0x000f0000
+#define PSR_AA32_DIT_BIT	0x00200000
+#define PSR_AA32_PAN_BIT	0x00400000
+#define PSR_AA32_SSBS_BIT	0x00800000
+#define PSR_AA32_Q_BIT		PSR_Q_BIT
+#define PSR_AA32_V_BIT		PSR_V_BIT
+#define PSR_AA32_C_BIT		PSR_C_BIT
+#define PSR_AA32_Z_BIT		PSR_Z_BIT
+#define PSR_AA32_N_BIT		PSR_N_BIT
 
 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
 
@@ -41,6 +52,11 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
 	*__vcpu_spsr(vcpu) = v;
 }
 
+static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
+{
+	return spsr;
+}
+
 static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
 					 u8 reg_num)
 {
@@ -182,6 +198,11 @@ static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
 	return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
 }
 
+static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+
 static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
 {
 	return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
@@ -198,7 +219,7 @@ static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
 }
 
 /* Get Access Size from a data abort */
-static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
+static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
 {
 	switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
 	case 0:
@@ -209,7 +230,7 @@ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
 		return 4;
 	default:
 		kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
-		return -EFAULT;
+		return 4;
 	}
 }
 
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 556cd81..c3314b2 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -14,7 +14,6 @@
 #include <asm/cputype.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
 #include <asm/fpstate.h>
 #include <kvm/arm_arch_timer.h>
 
@@ -202,9 +201,6 @@ struct kvm_vcpu_arch {
 	 /* Don't run the guest (internal implementation need) */
 	bool pause;
 
-	/* IO related fields */
-	struct kvm_decode mmio_decode;
-
 	/* Cache some mmu pages needed inside spinlock regions */
 	struct kvm_mmu_memory_cache mmu_page_cache;
 
@@ -284,8 +280,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
@@ -300,6 +294,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
 				     int exception_index) {}
 
+/* MMIO helpers */
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+		 phys_addr_t fault_ipa);
+
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 				       unsigned long hyp_stack_ptr,
 				       unsigned long vector_ptr)
@@ -363,9 +365,9 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 static inline bool kvm_arch_requires_vhe(void) { return false; }
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {}
 
 static inline void kvm_arm_init_debug(void) {}
 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 40e9034..3c1b55e 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/kvm_host.h>
 #include <asm/cp15.h>
+#include <asm/kvm_arm.h>
 #include <asm/vfp.h>
 
 #define __hyp_text __section(.hyp.text) notrace
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
deleted file mode 100644
index 7c0eddb..0000000
--- a/arch/arm/include/asm/kvm_mmio.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_MMIO_H__
-#define __ARM_KVM_MMIO_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-
-struct kvm_decode {
-	unsigned long rt;
-	bool sign_extend;
-};
-
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
-
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-		 phys_addr_t fault_ipa);
-
-#endif	/* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 51beec4..0d3ea35 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -189,6 +189,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 }
 
 #define pmd_large(pmd)		(pmd_val(pmd) & 2)
+#define pmd_leaf(pmd)		(pmd_val(pmd) & 2)
 #define pmd_bad(pmd)		(pmd_val(pmd) & 2)
 #define pmd_present(pmd)	(pmd_val(pmd))
 
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5b18295..ad55ab0 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -134,6 +134,7 @@
 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
 						 PMD_TYPE_SECT)
 #define pmd_large(pmd)		pmd_sect(pmd)
+#define pmd_leaf(pmd)		pmd_sect(pmd)
 
 #define pud_clear(pudp)			\
 	do {				\
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 010fa1a..30fb233 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -42,12 +42,6 @@
 
 #define swapper_pg_dir ((pgd_t *) 0)
 
-#define __swp_type(x)		(0)
-#define __swp_offset(x)		(0)
-#define __swp_entry(typ,off)	((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
-
 
 typedef pte_t *pte_addr_t;
 
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 669474a..4d4e7b6 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -37,10 +37,6 @@ static inline void __tlb_remove_table(void *_table)
 
 #include <asm-generic/tlb.h>
 
-#ifndef CONFIG_HAVE_RCU_TABLE_FREE
-#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
-#endif
-
 static inline void
 __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
 {
diff --git a/arch/arm/include/debug/brcmstb.S b/arch/arm/include/debug/brcmstb.S
index bf8702e..132a20c 100644
--- a/arch/arm/include/debug/brcmstb.S
+++ b/arch/arm/include/debug/brcmstb.S
@@ -31,6 +31,7 @@
 #define UARTA_7268		UARTA_7255
 #define UARTA_7271		UARTA_7268
 #define UARTA_7278		REG_PHYS_ADDR_V7(0x40c000)
+#define UARTA_7216		UARTA_7278
 #define UARTA_7364		REG_PHYS_ADDR(0x40b000)
 #define UARTA_7366		UARTA_7364
 #define UARTA_74371		REG_PHYS_ADDR(0x406b00)
@@ -82,17 +83,18 @@
 
 		/* Chip specific detection starts here */
 20:		checkuart(\rp, \rv, 0x33900000, 3390)
-21:		checkuart(\rp, \rv, 0x72500000, 7250)
-22:		checkuart(\rp, \rv, 0x72550000, 7255)
-23:		checkuart(\rp, \rv, 0x72600000, 7260)
-24:		checkuart(\rp, \rv, 0x72680000, 7268)
-25:		checkuart(\rp, \rv, 0x72710000, 7271)
-26:		checkuart(\rp, \rv, 0x72780000, 7278)
-27:		checkuart(\rp, \rv, 0x73640000, 7364)
-28:		checkuart(\rp, \rv, 0x73660000, 7366)
-29:		checkuart(\rp, \rv, 0x07437100, 74371)
-30:		checkuart(\rp, \rv, 0x74390000, 7439)
-31:		checkuart(\rp, \rv, 0x74450000, 7445)
+21:		checkuart(\rp, \rv, 0x72160000, 7216)
+22:		checkuart(\rp, \rv, 0x72500000, 7250)
+23:		checkuart(\rp, \rv, 0x72550000, 7255)
+24:		checkuart(\rp, \rv, 0x72600000, 7260)
+25:		checkuart(\rp, \rv, 0x72680000, 7268)
+26:		checkuart(\rp, \rv, 0x72710000, 7271)
+27:		checkuart(\rp, \rv, 0x72780000, 7278)
+28:		checkuart(\rp, \rv, 0x73640000, 7364)
+29:		checkuart(\rp, \rv, 0x73660000, 7366)
+30:		checkuart(\rp, \rv, 0x07437100, 74371)
+31:		checkuart(\rp, \rv, 0x74390000, 7439)
+32:		checkuart(\rp, \rv, 0x74450000, 7445)
 
 		/* No valid UART found */
 90:		mov	\rp, #0
diff --git a/arch/arm/kernel/atags_proc.c b/arch/arm/kernel/atags_proc.c
index 312cb89..4247ebf 100644
--- a/arch/arm/kernel/atags_proc.c
+++ b/arch/arm/kernel/atags_proc.c
@@ -17,9 +17,9 @@ static ssize_t atags_read(struct file *file, char __user *buf,
 	return simple_read_from_buffer(buf, count, ppos, b->data, b->size);
 }
 
-static const struct file_operations atags_fops = {
-	.read = atags_read,
-	.llseek = default_llseek,
+static const struct proc_ops atags_proc_ops = {
+	.proc_read	= atags_read,
+	.proc_lseek	= default_llseek,
 };
 
 #define BOOT_PARAMS_SIZE 1536
@@ -61,7 +61,7 @@ static int __init init_atags_procfs(void)
 	b->size = size;
 	memcpy(b->data, atags_copy, size);
 
-	tags_entry = proc_create_data("atags", 0400, NULL, &atags_fops, b);
+	tags_entry = proc_create_data("atags", 0400, NULL, &atags_proc_ops, b);
 	if (!tags_entry)
 		goto nomem;
 
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d0a464e..d8e18cd 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1164,8 +1164,6 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
 	conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
 #endif
 #endif
 
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 71778bb..cc726af 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -92,6 +92,8 @@ static int save_trace(struct stackframe *frame, void *d)
 		return 0;
 
 	regs = (struct pt_regs *)frame->sp;
+	if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
+		return 0;
 
 	trace->entries[trace->nr_entries++] = regs->ARM_pc;
 
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index abb7dd7..1e70e72 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
 
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
+	unsigned long end = frame + 4 + sizeof(struct pt_regs);
+
 #ifdef CONFIG_KALLSYMS
 	printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
 #else
 	printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
 
-	if (in_entry_text(from))
-		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
+	if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
+		dump_mem("", "Exception stack", frame + 4, end);
 }
 
 void dump_backtrace_stm(u32 *stack, u32 instruction)
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 0e6f235..9f7ae0d 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -34,11 +34,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ NULL }
 };
 
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-	return 0;
-}
-
 static u64 core_reg_offset_from_id(u64 id)
 {
 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index af41725..7979420 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -105,11 +105,28 @@
 	    AT91SAM9X35
 	    AT91SAM9XE
 
+config SOC_SAM9X60
+	bool "SAM9X60"
+	depends on ARCH_MULTI_V5
+	select ATMEL_AIC5_IRQ
+	select ATMEL_PM if PM
+	select ATMEL_SDRAMC
+	select CPU_ARM926T
+	select HAVE_AT91_USB_CLK
+	select HAVE_AT91_GENERATED_CLK
+	select HAVE_AT91_SAM9X60_PLL
+	select MEMORY
+	select PINCTRL_AT91
+	select SOC_SAM_V4_V5
+	select SRAM if PM
+	help
+	  Select this if you are using Microchip's SAM9X60 SoC
+
 comment "Clocksource driver selection"
 
 config ATMEL_CLOCKSOURCE_PIT
 	bool "Periodic Interval Timer (PIT) support"
-	depends on SOC_AT91SAM9 || SOC_SAMA5
+	depends on SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5
 	default SOC_AT91SAM9 || SOC_SAMA5
 	select ATMEL_PIT
 	help
@@ -119,7 +136,7 @@
 
 config ATMEL_CLOCKSOURCE_TCB
 	bool "Timer Counter Blocks (TCB) support"
-	default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5
+	default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5
 	select ATMEL_TCB_CLKSRC
 	help
 	  Select this to get a high precision clocksource based on a
@@ -154,6 +171,9 @@
 config HAVE_AT91_I2S_MUX_CLK
 	bool
 
+config HAVE_AT91_SAM9X60_PLL
+	bool
+
 config SOC_SAM_V4_V5
 	bool
 
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index de64301..f565490 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -6,6 +6,7 @@
 # CPU-specific support
 obj-$(CONFIG_SOC_AT91RM9200)	+= at91rm9200.o
 obj-$(CONFIG_SOC_AT91SAM9)	+= at91sam9.o
+obj-$(CONFIG_SOC_SAM9X60)	+= sam9x60.o
 obj-$(CONFIG_SOC_SAMA5)		+= sama5.o
 obj-$(CONFIG_SOC_SAMV7)		+= samv7.o
 
diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c
index bf629c9..7e57218 100644
--- a/arch/arm/mach-at91/at91sam9.c
+++ b/arch/arm/mach-at91/at91sam9.c
@@ -31,21 +31,3 @@ DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM9")
 	.init_machine	= at91sam9_init,
 	.dt_compat	= at91_dt_board_compat,
 MACHINE_END
-
-static void __init sam9x60_init(void)
-{
-	of_platform_default_populate(NULL, NULL, NULL);
-
-	sam9x60_pm_init();
-}
-
-static const char *const sam9x60_dt_board_compat[] __initconst = {
-	"microchip,sam9x60",
-	NULL
-};
-
-DT_MACHINE_START(sam9x60_dt, "Microchip SAM9X60")
-	/* Maintainer: Microchip */
-	.init_machine	= sam9x60_init,
-	.dt_compat	= sam9x60_dt_board_compat,
-MACHINE_END
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index d5af6ae..374b9d1 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -691,6 +691,12 @@ static void __init at91_pm_use_default_mode(int pm_mode)
 		soc_pm.data.suspend_mode = AT91_PM_ULP0;
 }
 
+static const struct of_device_id atmel_shdwc_ids[] = {
+	{ .compatible = "atmel,sama5d2-shdwc" },
+	{ .compatible = "microchip,sam9x60-shdwc" },
+	{ /* sentinel. */ }
+};
+
 static void __init at91_pm_modes_init(void)
 {
 	struct device_node *np;
@@ -700,7 +706,7 @@ static void __init at91_pm_modes_init(void)
 	    !at91_is_pm_mode_active(AT91_PM_ULP1))
 		return;
 
-	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
+	np = of_find_matching_node(NULL, atmel_shdwc_ids);
 	if (!np) {
 		pr_warn("%s: failed to find shdwc!\n", __func__);
 		goto ulp1_default;
@@ -751,6 +757,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
 	{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
 	{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
 	{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
+	{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[1] },
 	{ /* sentinel */ },
 };
 
@@ -805,7 +812,7 @@ void __init at91rm9200_pm_init(void)
 
 void __init sam9x60_pm_init(void)
 {
-	if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
+	if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
 		return;
 
 	at91_pm_modes_init();
diff --git a/arch/arm/mach-at91/sam9x60.c b/arch/arm/mach-at91/sam9x60.c
new file mode 100644
index 0000000..d8c739d
--- /dev/null
+++ b/arch/arm/mach-at91/sam9x60.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Setup code for SAM9X60.
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <asm/mach/arch.h>
+#include <asm/system_misc.h>
+
+#include "generic.h"
+
+static void __init sam9x60_init(void)
+{
+	of_platform_default_populate(NULL, NULL, NULL);
+
+	sam9x60_pm_init();
+}
+
+static const char *const sam9x60_dt_board_compat[] __initconst = {
+	"microchip,sam9x60",
+	NULL
+};
+
+DT_MACHINE_START(sam9x60_dt, "Microchip SAM9X60")
+	/* Maintainer: Microchip */
+	.init_machine	= sam9x60_init,
+	.dt_compat	= sam9x60_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index e4e25f2..fcfe2a0 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -211,6 +211,7 @@
 	bool "Broadcom BCM7XXX based boards"
 	depends on ARCH_MULTI_V7
 	select ARCH_HAS_RESET_CONTROLLER
+	select ARM_AMBA
 	select ARM_GIC
 	select ARM_ERRATA_798181 if SMP
 	select HAVE_ARM_ARCH_TIMER
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index a03d844..58838a9 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -7,8 +7,7 @@
 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include
 
 # Common objects
-obj-y 					:= time.o serial.o usb.o \
-					   common.o sram.o
+obj-y 					:= serial.o usb.o common.o sram.o
 
 obj-$(CONFIG_DAVINCI_MUX)		+= mux.o
 
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 150a36f..2328b15 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -30,6 +30,8 @@
 #include <linux/spi/eeprom.h>
 #include <linux/v4l2-dv-timings.h>
 #include <linux/platform_data/ti-aemif.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -245,6 +247,19 @@ static struct davinci_i2c_platform_data i2c_pdata = {
 	.bus_delay	= 0	/* usec */,
 };
 
+/* Fixed regulator support */
+static struct regulator_consumer_supply fixed_supplies_3_3v[] = {
+	/* Baseboard 3.3V: 5V -> TPS767D301 -> 3.3V */
+	REGULATOR_SUPPLY("AVDD", "1-0018"),
+	REGULATOR_SUPPLY("DRVDD", "1-0018"),
+	REGULATOR_SUPPLY("IOVDD", "1-0018"),
+};
+
+static struct regulator_consumer_supply fixed_supplies_1_8v[] = {
+	/* Baseboard 1.8V: 5V -> TPS767D301 -> 1.8V */
+	REGULATOR_SUPPLY("DVDD", "1-0018"),
+};
+
 static int dm365evm_keyscan_enable(struct device *dev)
 {
 	return davinci_cfg_reg(DM365_KEYSCAN);
@@ -800,6 +815,11 @@ static __init void dm365_evm_init(void)
 	if (ret)
 		pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
 
+	regulator_register_always_on(0, "fixed-dummy", fixed_supplies_1_8v,
+				     ARRAY_SIZE(fixed_supplies_1_8v), 1800000);
+	regulator_register_always_on(1, "fixed-dummy", fixed_supplies_3_3v,
+				     ARRAY_SIZE(fixed_supplies_3_3v), 3300000);
+
 	nvmem_add_cell_table(&davinci_nvmem_cell_table);
 	nvmem_add_cell_lookups(&davinci_nvmem_cell_lookup, 1);
 
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 9d87d4e..3461d12 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -29,6 +29,8 @@
 #include <linux/v4l2-dv-timings.h>
 #include <linux/export.h>
 #include <linux/leds.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
 
 #include <media/i2c/tvp514x.h>
 
@@ -653,6 +655,19 @@ static struct i2c_board_info __initdata i2c_info[] =  {
 	},
 };
 
+/* Fixed regulator support */
+static struct regulator_consumer_supply fixed_supplies_3_3v[] = {
+	/* Baseboard 3.3V: 5V -> TPS54310PWP -> 3.3V */
+	REGULATOR_SUPPLY("AVDD", "1-001b"),
+	REGULATOR_SUPPLY("DRVDD", "1-001b"),
+};
+
+static struct regulator_consumer_supply fixed_supplies_1_8v[] = {
+	/* Baseboard 1.8V: 5V -> TPS54310PWP -> 1.8V */
+	REGULATOR_SUPPLY("IOVDD", "1-001b"),
+	REGULATOR_SUPPLY("DVDD", "1-001b"),
+};
+
 #define DM644X_I2C_SDA_PIN	GPIO_TO_PIN(2, 12)
 #define DM644X_I2C_SCL_PIN	GPIO_TO_PIN(2, 11)
 
@@ -823,6 +838,17 @@ static int davinci_phy_fixup(struct phy_device *phydev)
 
 #define HAS_NAND	IS_ENABLED(CONFIG_MTD_NAND_DAVINCI)
 
+#define GPIO_nVBUS_DRV		160
+
+static struct gpiod_lookup_table dm644evm_usb_gpio_table = {
+	.dev_id = "musb-davinci",
+	.table = {
+		GPIO_LOOKUP("davinci_gpio", GPIO_nVBUS_DRV, NULL,
+			    GPIO_ACTIVE_HIGH),
+		{ }
+	},
+};
+
 static __init void davinci_evm_init(void)
 {
 	int ret;
@@ -831,6 +857,11 @@ static __init void davinci_evm_init(void)
 
 	dm644x_register_clocks();
 
+	regulator_register_always_on(0, "fixed-dummy", fixed_supplies_1_8v,
+				     ARRAY_SIZE(fixed_supplies_1_8v), 1800000);
+	regulator_register_always_on(1, "fixed-dummy", fixed_supplies_3_3v,
+				     ARRAY_SIZE(fixed_supplies_3_3v), 3300000);
+
 	dm644x_init_devices();
 
 	ret = dm644x_gpio_register();
@@ -875,6 +906,7 @@ static __init void davinci_evm_init(void)
 	dm644x_init_asp();
 
 	/* irlml6401 switches over 1A, in under 8 msec */
+	gpiod_add_lookup_table(&dm644evm_usb_gpio_table);
 	davinci_setup_usb(1000, 8);
 
 	if (IS_BUILTIN(CONFIG_PHYLIB)) {
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 2d69e70..feb206b 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -21,7 +21,6 @@
 #include <mach/common.h>
 #include <mach/cputype.h>
 #include <mach/da8xx.h>
-#include <mach/time.h>
 
 #include "asp.h"
 #include "cpuidle.h"
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index e650131..849e811 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -17,7 +17,6 @@
 #include <mach/hardware.h>
 #include <mach/cputype.h>
 #include <mach/mux.h>
-#include <mach/time.h>
 
 #include "davinci.h"
 #include "irqs.h"
@@ -303,21 +302,3 @@ int davinci_gpio_register(struct resource *res, int size, void *pdata)
 	davinci_gpio_device.dev.platform_data = pdata;
 	return platform_device_register(&davinci_gpio_device);
 }
-
-/*-------------------------------------------------------------------------*/
-
-/*-------------------------------------------------------------------------*/
-
-struct davinci_timer_instance davinci_timer_instance[2] = {
-	{
-		.base		= DAVINCI_TIMER0_BASE,
-		.bottom_irq	= DAVINCI_INTC_IRQ(IRQ_TINT0_TINT12),
-		.top_irq	= DAVINCI_INTC_IRQ(IRQ_TINT0_TINT34),
-	},
-	{
-		.base		= DAVINCI_TIMER1_BASE,
-		.bottom_irq	= DAVINCI_INTC_IRQ(IRQ_TINT1_TINT12),
-		.top_irq	= DAVINCI_INTC_IRQ(IRQ_TINT1_TINT34),
-	},
-};
-
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 9fc5c73..c1e0d46 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -35,7 +35,8 @@
 #include <mach/cputype.h>
 #include <mach/mux.h>
 #include <mach/serial.h>
-#include <mach/time.h>
+
+#include <clocksource/timer-davinci.h>
 
 #include "asp.h"
 #include "davinci.h"
@@ -660,10 +661,16 @@ static struct davinci_id dm365_ids[] = {
 	},
 };
 
-static struct davinci_timer_info dm365_timer_info = {
-	.timers		= davinci_timer_instance,
-	.clockevent_id	= T0_BOT,
-	.clocksource_id	= T0_TOP,
+/*
+ * Bottom half of timer0 is used for clockevent, top half is used for
+ * clocksource.
+ */
+static const struct davinci_timer_cfg dm365_timer_cfg = {
+	.reg = DEFINE_RES_IO(DAVINCI_TIMER0_BASE, SZ_128),
+	.irq = {
+		DEFINE_RES_IRQ(DAVINCI_INTC_IRQ(IRQ_TINT0_TINT12)),
+		DEFINE_RES_IRQ(DAVINCI_INTC_IRQ(IRQ_TINT0_TINT34)),
+	},
 };
 
 #define DM365_UART1_BASE	(IO_PHYS + 0x106000)
@@ -723,7 +730,6 @@ static const struct davinci_soc_info davinci_soc_info_dm365 = {
 	.pinmux_base		= DAVINCI_SYSTEM_MODULE_BASE,
 	.pinmux_pins		= dm365_pins,
 	.pinmux_pins_num	= ARRAY_SIZE(dm365_pins),
-	.timer_info		= &dm365_timer_info,
 	.emac_pdata		= &dm365_emac_pdata,
 	.sram_dma		= 0x00010000,
 	.sram_len		= SZ_32K,
@@ -771,6 +777,7 @@ void __init dm365_init_time(void)
 {
 	void __iomem *pll1, *pll2, *psc;
 	struct clk *clk;
+	int rv;
 
 	clk_register_fixed_rate(NULL, "ref_clk", NULL, 0, DM365_REF_FREQ);
 
@@ -789,7 +796,8 @@ void __init dm365_init_time(void)
 		return;
 	}
 
-	davinci_timer_init(clk);
+	rv = davinci_timer_register(clk, &dm365_timer_cfg);
+	WARN(rv, "Unable to register the timer: %d\n", rv);
 }
 
 void __init dm365_register_clocks(void)
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 9526e5d..139b83de 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -22,22 +22,6 @@
 #define DAVINCI_INTC_START		NR_IRQS
 #define DAVINCI_INTC_IRQ(_irqnum)	(DAVINCI_INTC_START + (_irqnum))
 
-void davinci_timer_init(struct clk *clk);
-
-struct davinci_timer_instance {
-	u32		base;
-	u32		bottom_irq;
-	u32		top_irq;
-	unsigned long	cmp_off;
-	unsigned int	cmp_irq;
-};
-
-struct davinci_timer_info {
-	struct davinci_timer_instance	*timers;
-	unsigned int			clockevent_id;
-	unsigned int			clocksource_id;
-};
-
 struct davinci_gpio_controller;
 
 /*
@@ -58,7 +42,6 @@ struct davinci_soc_info {
 	u32				pinmux_base;
 	const struct mux_config		*pinmux_pins;
 	unsigned long			pinmux_pins_num;
-	struct davinci_timer_info	*timer_info;
 	int				gpio_type;
 	u32				gpio_base;
 	unsigned			gpio_num;
diff --git a/arch/arm/mach-davinci/include/mach/time.h b/arch/arm/mach-davinci/include/mach/time.h
deleted file mode 100644
index ba91373..0000000
--- a/arch/arm/mach-davinci/include/mach/time.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Local header file for DaVinci time code.
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#ifndef __ARCH_ARM_MACH_DAVINCI_TIME_H
-#define __ARCH_ARM_MACH_DAVINCI_TIME_H
-
-#define DAVINCI_TIMER1_BASE		(IO_PHYS + 0x21800)
-
-enum {
-	T0_BOT,
-	T0_TOP,
-	T1_BOT,
-	T1_TOP,
-	NUM_TIMERS
-};
-
-#define IS_TIMER1(id)		(id & 0x2)
-#define IS_TIMER0(id)		(!IS_TIMER1(id))
-#define IS_TIMER_TOP(id)	((id & 0x1))
-#define IS_TIMER_BOT(id)	(!IS_TIMER_TOP(id))
-
-#define ID_TO_TIMER(id)		(IS_TIMER1(id) != 0)
-
-extern struct davinci_timer_instance davinci_timer_instance[];
-
-#endif /* __ARCH_ARM_MACH_DAVINCI_TIME_H */
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
deleted file mode 100644
index 740410a..0000000
--- a/arch/arm/mach-davinci/time.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * DaVinci timer subsystem
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/sched_clock.h>
-
-#include <asm/mach/irq.h>
-#include <asm/mach/time.h>
-
-#include <mach/cputype.h>
-#include <mach/hardware.h>
-#include <mach/time.h>
-
-static struct clock_event_device clockevent_davinci;
-static unsigned int davinci_clock_tick_rate;
-
-/*
- * This driver configures the 2 64-bit count-up timers as 4 independent
- * 32-bit count-up timers used as follows:
- */
-
-enum {
-	TID_CLOCKEVENT,
-	TID_CLOCKSOURCE,
-};
-
-/* Timer register offsets */
-#define PID12			0x0
-#define TIM12			0x10
-#define TIM34			0x14
-#define PRD12			0x18
-#define PRD34			0x1c
-#define TCR			0x20
-#define TGCR			0x24
-#define WDTCR			0x28
-
-/* Offsets of the 8 compare registers */
-#define	CMP12_0			0x60
-#define	CMP12_1			0x64
-#define	CMP12_2			0x68
-#define	CMP12_3			0x6c
-#define	CMP12_4			0x70
-#define	CMP12_5			0x74
-#define	CMP12_6			0x78
-#define	CMP12_7			0x7c
-
-/* Timer register bitfields */
-#define TCR_ENAMODE_DISABLE          0x0
-#define TCR_ENAMODE_ONESHOT          0x1
-#define TCR_ENAMODE_PERIODIC         0x2
-#define TCR_ENAMODE_MASK             0x3
-
-#define TGCR_TIMMODE_SHIFT           2
-#define TGCR_TIMMODE_64BIT_GP        0x0
-#define TGCR_TIMMODE_32BIT_UNCHAINED 0x1
-#define TGCR_TIMMODE_64BIT_WDOG      0x2
-#define TGCR_TIMMODE_32BIT_CHAINED   0x3
-
-#define TGCR_TIM12RS_SHIFT           0
-#define TGCR_TIM34RS_SHIFT           1
-#define TGCR_RESET                   0x0
-#define TGCR_UNRESET                 0x1
-#define TGCR_RESET_MASK              0x3
-
-struct timer_s {
-	char *name;
-	unsigned int id;
-	unsigned long period;
-	unsigned long opts;
-	unsigned long flags;
-	void __iomem *base;
-	unsigned long tim_off;
-	unsigned long prd_off;
-	unsigned long enamode_shift;
-	struct irqaction irqaction;
-};
-static struct timer_s timers[];
-
-/* values for 'opts' field of struct timer_s */
-#define TIMER_OPTS_DISABLED		0x01
-#define TIMER_OPTS_ONESHOT		0x02
-#define TIMER_OPTS_PERIODIC		0x04
-#define TIMER_OPTS_STATE_MASK		0x07
-
-#define TIMER_OPTS_USE_COMPARE		0x80000000
-#define USING_COMPARE(t)		((t)->opts & TIMER_OPTS_USE_COMPARE)
-
-static char *id_to_name[] = {
-	[T0_BOT]	= "timer0_0",
-	[T0_TOP]	= "timer0_1",
-	[T1_BOT]	= "timer1_0",
-	[T1_TOP]	= "timer1_1",
-};
-
-static int timer32_config(struct timer_s *t)
-{
-	u32 tcr;
-	struct davinci_soc_info *soc_info = &davinci_soc_info;
-
-	if (USING_COMPARE(t)) {
-		struct davinci_timer_instance *dtip =
-				soc_info->timer_info->timers;
-		int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
-
-		/*
-		 * Next interrupt should be the current time reg value plus
-		 * the new period (using 32-bit unsigned addition/wrapping
-		 * to 0 on overflow).  This assumes that the clocksource
-		 * is setup to count to 2^32-1 before wrapping around to 0.
-		 */
-		__raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
-			t->base + dtip[event_timer].cmp_off);
-	} else {
-		tcr = __raw_readl(t->base + TCR);
-
-		/* disable timer */
-		tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
-		__raw_writel(tcr, t->base + TCR);
-
-		/* reset counter to zero, set new period */
-		__raw_writel(0, t->base + t->tim_off);
-		__raw_writel(t->period, t->base + t->prd_off);
-
-		/* Set enable mode */
-		if (t->opts & TIMER_OPTS_ONESHOT)
-			tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
-		else if (t->opts & TIMER_OPTS_PERIODIC)
-			tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
-
-		__raw_writel(tcr, t->base + TCR);
-	}
-	return 0;
-}
-
-static inline u32 timer32_read(struct timer_s *t)
-{
-	return __raw_readl(t->base + t->tim_off);
-}
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-	struct clock_event_device *evt = &clockevent_davinci;
-
-	evt->event_handler(evt);
-	return IRQ_HANDLED;
-}
-
-/* called when 32-bit counter wraps */
-static irqreturn_t freerun_interrupt(int irq, void *dev_id)
-{
-	return IRQ_HANDLED;
-}
-
-static struct timer_s timers[] = {
-	[TID_CLOCKEVENT] = {
-		.name      = "clockevent",
-		.opts      = TIMER_OPTS_DISABLED,
-		.irqaction = {
-			.flags   = IRQF_TIMER,
-			.handler = timer_interrupt,
-		}
-	},
-	[TID_CLOCKSOURCE] = {
-		.name       = "free-run counter",
-		.period     = ~0,
-		.opts       = TIMER_OPTS_PERIODIC,
-		.irqaction = {
-			.flags   = IRQF_TIMER,
-			.handler = freerun_interrupt,
-		}
-	},
-};
-
-static void __init timer_init(void)
-{
-	struct davinci_soc_info *soc_info = &davinci_soc_info;
-	struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
-	void __iomem *base[2];
-	int i;
-
-	/* Global init of each 64-bit timer as a whole */
-	for(i=0; i<2; i++) {
-		u32 tgcr;
-
-		base[i] = ioremap(dtip[i].base, SZ_4K);
-		if (WARN_ON(!base[i]))
-			continue;
-
-		/* Disabled, Internal clock source */
-		__raw_writel(0, base[i] + TCR);
-
-		/* reset both timers, no pre-scaler for timer34 */
-		tgcr = 0;
-		__raw_writel(tgcr, base[i] + TGCR);
-
-		/* Set both timers to unchained 32-bit */
-		tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
-		__raw_writel(tgcr, base[i] + TGCR);
-
-		/* Unreset timers */
-		tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
-			(TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
-		__raw_writel(tgcr, base[i] + TGCR);
-
-		/* Init both counters to zero */
-		__raw_writel(0, base[i] + TIM12);
-		__raw_writel(0, base[i] + TIM34);
-	}
-
-	/* Init of each timer as a 32-bit timer */
-	for (i=0; i< ARRAY_SIZE(timers); i++) {
-		struct timer_s *t = &timers[i];
-		int timer = ID_TO_TIMER(t->id);
-		u32 irq;
-
-		t->base = base[timer];
-		if (!t->base)
-			continue;
-
-		if (IS_TIMER_BOT(t->id)) {
-			t->enamode_shift = 6;
-			t->tim_off = TIM12;
-			t->prd_off = PRD12;
-			irq = dtip[timer].bottom_irq;
-		} else {
-			t->enamode_shift = 22;
-			t->tim_off = TIM34;
-			t->prd_off = PRD34;
-			irq = dtip[timer].top_irq;
-		}
-
-		/* Register interrupt */
-		t->irqaction.name = t->name;
-		t->irqaction.dev_id = (void *)t;
-
-		if (t->irqaction.handler != NULL) {
-			irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
-			setup_irq(irq, &t->irqaction);
-		}
-	}
-}
-
-/*
- * clocksource
- */
-static u64 read_cycles(struct clocksource *cs)
-{
-	struct timer_s *t = &timers[TID_CLOCKSOURCE];
-
-	return (cycles_t)timer32_read(t);
-}
-
-static struct clocksource clocksource_davinci = {
-	.rating		= 300,
-	.read		= read_cycles,
-	.mask		= CLOCKSOURCE_MASK(32),
-	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/*
- * Overwrite weak default sched_clock with something more precise
- */
-static u64 notrace davinci_read_sched_clock(void)
-{
-	return timer32_read(&timers[TID_CLOCKSOURCE]);
-}
-
-/*
- * clockevent
- */
-static int davinci_set_next_event(unsigned long cycles,
-				  struct clock_event_device *evt)
-{
-	struct timer_s *t = &timers[TID_CLOCKEVENT];
-
-	t->period = cycles;
-	timer32_config(t);
-	return 0;
-}
-
-static int davinci_shutdown(struct clock_event_device *evt)
-{
-	struct timer_s *t = &timers[TID_CLOCKEVENT];
-
-	t->opts &= ~TIMER_OPTS_STATE_MASK;
-	t->opts |= TIMER_OPTS_DISABLED;
-	return 0;
-}
-
-static int davinci_set_oneshot(struct clock_event_device *evt)
-{
-	struct timer_s *t = &timers[TID_CLOCKEVENT];
-
-	t->opts &= ~TIMER_OPTS_STATE_MASK;
-	t->opts |= TIMER_OPTS_ONESHOT;
-	return 0;
-}
-
-static int davinci_set_periodic(struct clock_event_device *evt)
-{
-	struct timer_s *t = &timers[TID_CLOCKEVENT];
-
-	t->period = davinci_clock_tick_rate / (HZ);
-	t->opts &= ~TIMER_OPTS_STATE_MASK;
-	t->opts |= TIMER_OPTS_PERIODIC;
-	timer32_config(t);
-	return 0;
-}
-
-static struct clock_event_device clockevent_davinci = {
-	.features		= CLOCK_EVT_FEAT_PERIODIC |
-				  CLOCK_EVT_FEAT_ONESHOT,
-	.set_next_event		= davinci_set_next_event,
-	.set_state_shutdown	= davinci_shutdown,
-	.set_state_periodic	= davinci_set_periodic,
-	.set_state_oneshot	= davinci_set_oneshot,
-};
-
-void __init davinci_timer_init(struct clk *timer_clk)
-{
-	struct davinci_soc_info *soc_info = &davinci_soc_info;
-	unsigned int clockevent_id;
-	unsigned int clocksource_id;
-	int i;
-
-	clockevent_id = soc_info->timer_info->clockevent_id;
-	clocksource_id = soc_info->timer_info->clocksource_id;
-
-	timers[TID_CLOCKEVENT].id = clockevent_id;
-	timers[TID_CLOCKSOURCE].id = clocksource_id;
-
-	/*
-	 * If using same timer for both clock events & clocksource,
-	 * a compare register must be used to generate an event interrupt.
-	 * This is equivalent to a oneshot timer only (not periodic).
-	 */
-	if (clockevent_id == clocksource_id) {
-		struct davinci_timer_instance *dtip =
-				soc_info->timer_info->timers;
-		int event_timer = ID_TO_TIMER(clockevent_id);
-
-		/* Only bottom timers can use compare regs */
-		if (IS_TIMER_TOP(clockevent_id))
-			pr_warn("%s: Invalid use of system timers.  Results unpredictable.\n",
-				__func__);
-		else if ((dtip[event_timer].cmp_off == 0)
-				|| (dtip[event_timer].cmp_irq == 0))
-			pr_warn("%s: Invalid timer instance setup.  Results unpredictable.\n",
-				__func__);
-		else {
-			timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
-			clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
-		}
-	}
-
-	BUG_ON(IS_ERR(timer_clk));
-	clk_prepare_enable(timer_clk);
-
-	/* init timer hw */
-	timer_init();
-
-	davinci_clock_tick_rate = clk_get_rate(timer_clk);
-
-	/* setup clocksource */
-	clocksource_davinci.name = id_to_name[clocksource_id];
-	if (clocksource_register_hz(&clocksource_davinci,
-				    davinci_clock_tick_rate))
-		pr_err("%s: can't register clocksource!\n",
-		       clocksource_davinci.name);
-
-	sched_clock_register(davinci_read_sched_clock, 32,
-			  davinci_clock_tick_rate);
-
-	/* setup clockevent */
-	clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
-
-	clockevent_davinci.cpumask = cpumask_of(0);
-	clockevents_config_and_register(&clockevent_davinci,
-					davinci_clock_tick_rate, 1, 0xfffffffe);
-
-	for (i=0; i< ARRAY_SIZE(timers); i++)
-		timer32_config(&timers[i]);
-}
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 6e7f10c..cbbe03e 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -3,10 +3,10 @@
 # Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
 #		http://www.samsung.com/
 
-# Configuration options for the EXYNOS
+# Configuration options for the Samsung Exynos
 
 menuconfig ARCH_EXYNOS
-	bool "Samsung EXYNOS"
+	bool "Samsung Exynos"
 	depends on ARCH_MULTI_V7
 	select ARCH_HAS_HOLES_MEMORYMODEL
 	select ARCH_SUPPORTS_BIG_ENDIAN
@@ -42,7 +42,7 @@
 	select POWER_RESET_SYSCON
 	select POWER_RESET_SYSCON_POWEROFF
 	help
-	  Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
+	  Support for Samsung Exynos SoCs
 
 if ARCH_EXYNOS
 
@@ -52,63 +52,62 @@
 	  Compile in setup memory (init) code for MFC
 
 config ARCH_EXYNOS3
-	bool "SAMSUNG EXYNOS3"
+	bool "Samsung Exynos3"
 	default y
 	select ARM_CPU_SUSPEND if PM
 	help
-	  Samsung EXYNOS3 (Cortex-A7) SoC based systems
+	  Samsung Exynos3 (Cortex-A7) SoC based systems
 
 config ARCH_EXYNOS4
-	bool "SAMSUNG EXYNOS4"
+	bool "Samsung Exynos4"
 	default y
 	select ARM_CPU_SUSPEND if PM_SLEEP
 	select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
 	select CPU_EXYNOS4210
 	select GIC_NON_BANKED
-	select MIGHT_HAVE_CACHE_L2X0
 	help
-	  Samsung EXYNOS4 (Cortex-A9) SoC based systems
+	  Samsung Exynos4 (Cortex-A9) SoC based systems
 
 config ARCH_EXYNOS5
-	bool "SAMSUNG EXYNOS5"
+	bool "Samsung Exynos5"
 	default y
 	help
-	  Samsung EXYNOS5 (Cortex-A15/A7) SoC based systems
+	  Samsung Exynos5 (Cortex-A15/A7) SoC based systems
 
-comment "EXYNOS SoCs"
+comment "Exynos SoCs"
 
 config SOC_EXYNOS3250
-	bool "SAMSUNG EXYNOS3250"
+	bool "Samsung Exynos3250"
 	default y
 	depends on ARCH_EXYNOS3
 
 config CPU_EXYNOS4210
-	bool "SAMSUNG EXYNOS4210"
+	bool "Samsung Exynos4210"
 	default y
 	depends on ARCH_EXYNOS4
 
 config SOC_EXYNOS4412
-	bool "SAMSUNG EXYNOS4412"
+	bool "Samsung Exynos4412"
 	default y
 	depends on ARCH_EXYNOS4
 
 config SOC_EXYNOS5250
-	bool "SAMSUNG EXYNOS5250"
+	bool "Samsung Exynos5250"
 	default y
 	depends on ARCH_EXYNOS5
 
 config SOC_EXYNOS5260
-	bool "SAMSUNG EXYNOS5260"
+	bool "Samsung Exynos5260"
 	default y
 	depends on ARCH_EXYNOS5
 
 config SOC_EXYNOS5410
-	bool "SAMSUNG EXYNOS5410"
+	bool "Samsung Exynos5410"
 	default y
 	depends on ARCH_EXYNOS5
 
 config SOC_EXYNOS5420
-	bool "SAMSUNG EXYNOS5420"
+	bool "Samsung Exynos5420"
 	default y
 	depends on ARCH_EXYNOS5
 	select EXYNOS_MCPM if SMP
@@ -116,7 +115,7 @@
 	select ARM_CPU_SUSPEND
 
 config SOC_EXYNOS5800
-	bool "SAMSUNG EXYNOS5800"
+	bool "Samsung EXYNOS5800"
 	default y
 	depends on SOC_EXYNOS5420
 
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 56411bb..afd988a 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -3,7 +3,7 @@
  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * Common Header for EXYNOS machines
+ * Common Header for Exynos machines
  */
 
 #ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 9aa4833..7a8d155 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 //
-// SAMSUNG EXYNOS Flattened Device Tree enabled machine
+// Samsung Exynos Flattened Device Tree enabled machine
 //
 // Copyright (c) 2010-2014 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com
@@ -192,7 +192,7 @@ static void __init exynos_dt_fixup(void)
 	of_fdt_limit_memory(8);
 }
 
-DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
+DT_MACHINE_START(EXYNOS_DT, "Samsung Exynos (Flattened Device Tree)")
 	.l2c_aux_val	= 0x3c400001,
 	.l2c_aux_mask	= 0xc20fffff,
 	.smp		= smp_ops(exynos_smp_ops),
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index 22ebe36..8d58faa 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -3,7 +3,7 @@
  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com/
  *
- * EXYNOS - Memory map definitions
+ * Exynos - Memory map definitions
  */
 
 #ifndef __ASM_ARCH_MAP_H
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 48e7fb3..78af34c 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com
 //
-// EXYNOS - Power Management support
+// Exynos - Power Management support
 //
 // Based on arch/arm/mach-s3c2410/pm.c
 // Copyright (c) 2006 Simtec Electronics
diff --git a/arch/arm/mach-exynos/smc.h b/arch/arm/mach-exynos/smc.h
index 98832e5..5c30feb 100644
--- a/arch/arm/mach-exynos/smc.h
+++ b/arch/arm/mach-exynos/smc.h
@@ -2,7 +2,7 @@
 /*
  *  Copyright (c) 2012 Samsung Electronics.
  *
- * EXYNOS - SMC Call
+ * Exynos - SMC Call
  */
 
 #ifndef __ASM_ARCH_EXYNOS_SMC_H
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 6a0d344..3bf14ca 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com
 //
-// EXYNOS - Suspend support
+// Exynos - Suspend support
 //
 // Based on arch/arm/mach-s3c2410/pm.c
 // Copyright (c) 2006 Simtec Electronics
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 593bf15..95584ee 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -520,6 +520,7 @@
 	bool "i.MX6 UltraLite support"
 	select PINCTRL_IMX6UL
 	select SOC_IMX6
+	select ARM_ERRATA_814220
 
 	help
 	  This enables support for Freescale i.MX6 UltraLite processor.
@@ -556,6 +557,7 @@
 	select PINCTRL_IMX7D
 	select SOC_IMX7D_CA7 if ARCH_MULTI_V7
 	select SOC_IMX7D_CM4 if ARM_SINGLE_ARMV7M
+	select ARM_ERRATA_814220 if ARCH_MULTI_V7
 	help
 		This enables support for Freescale i.MX7 Dual processor.
 
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index 871f983..06f8d64 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -15,6 +15,11 @@
 #define OCOTP_UID_H	0x420
 #define OCOTP_UID_L	0x410
 
+#define OCOTP_ULP_UID_1		0x4b0
+#define OCOTP_ULP_UID_2		0x4c0
+#define OCOTP_ULP_UID_3		0x4d0
+#define OCOTP_ULP_UID_4		0x4e0
+
 unsigned int __mxc_cpu_type;
 static unsigned int imx_soc_revision;
 
@@ -164,6 +169,7 @@ struct device * __init imx_soc_device_init(void)
 		soc_id = "i.MX7D";
 		break;
 	case MXC_CPU_IMX7ULP:
+		ocotp_compat = "fsl,imx7ulp-ocotp";
 		soc_id = "i.MX7ULP";
 		break;
 	default:
@@ -178,11 +184,25 @@ struct device * __init imx_soc_device_init(void)
 	}
 
 	if (!IS_ERR_OR_NULL(ocotp)) {
-		regmap_read(ocotp, OCOTP_UID_H, &val);
-		soc_uid = val;
-		regmap_read(ocotp, OCOTP_UID_L, &val);
-		soc_uid <<= 32;
-		soc_uid |= val;
+		if (__mxc_cpu_type == MXC_CPU_IMX7ULP) {
+			regmap_read(ocotp, OCOTP_ULP_UID_4, &val);
+			soc_uid = val & 0xffff;
+			regmap_read(ocotp, OCOTP_ULP_UID_3, &val);
+			soc_uid <<= 16;
+			soc_uid |= val & 0xffff;
+			regmap_read(ocotp, OCOTP_ULP_UID_2, &val);
+			soc_uid <<= 16;
+			soc_uid |= val & 0xffff;
+			regmap_read(ocotp, OCOTP_ULP_UID_1, &val);
+			soc_uid <<= 16;
+			soc_uid |= val & 0xffff;
+		} else {
+			regmap_read(ocotp, OCOTP_UID_H, &val);
+			soc_uid = val;
+			regmap_read(ocotp, OCOTP_UID_L, &val);
+			soc_uid <<= 32;
+			soc_uid |= val;
+		}
 	}
 
 	soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index f07cfda..e1135b9 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -16,11 +16,11 @@
 clock-common				= clock.o
 secure-common				= omap-smc.o omap-secure.o
 
-obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
+obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
 obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
 obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common)
-obj-$(CONFIG_SOC_AM33XX) += $(hwmod-common)
-obj-$(CONFIG_SOC_OMAP5)	 += $(hwmod-common) $(secure-common)
+obj-$(CONFIG_SOC_AM33XX) += $(hwmod-common) $(secure-common)
+obj-$(CONFIG_SOC_OMAP5)  += $(hwmod-common) $(secure-common)
 obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common)
 obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common)
 
diff --git a/arch/arm/mach-omap2/clockdomains43xx_data.c b/arch/arm/mach-omap2/clockdomains43xx_data.c
index 751708d..c96a2b1 100644
--- a/arch/arm/mach-omap2/clockdomains43xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains43xx_data.c
@@ -84,6 +84,15 @@ static struct clockdomain l3s_tsc_43xx_clkdm = {
 	.flags		  = CLKDM_CAN_SWSUP,
 };
 
+static struct clockdomain lcdc_43xx_clkdm = {
+	.name		  = "lcdc_clkdm",
+	.pwrdm		  = { .name = "per_pwrdm" },
+	.prcm_partition	  = AM43XX_CM_PARTITION,
+	.cm_inst	  = AM43XX_CM_PER_INST,
+	.clkdm_offs	  = AM43XX_CM_PER_LCDC_CDOFFS,
+	.flags		  = CLKDM_CAN_SWSUP,
+};
+
 static struct clockdomain dss_43xx_clkdm = {
 	.name		  = "dss_clkdm",
 	.pwrdm		  = { .name = "per_pwrdm" },
@@ -173,6 +182,7 @@ static struct clockdomain *clockdomains_am43xx[] __initdata = {
 	&pruss_ocp_43xx_clkdm,
 	&ocpwp_l3_43xx_clkdm,
 	&l3s_tsc_43xx_clkdm,
+	&lcdc_43xx_clkdm,
 	&dss_43xx_clkdm,
 	&l3_aon_43xx_clkdm,
 	&emif_43xx_clkdm,
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 3068802..27835c4 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -606,7 +606,7 @@ static struct clockdomain cam_7xx_clkdm = {
 	.dep_bit	  = DRA7XX_CAM_STATDEP_SHIFT,
 	.wkdep_srcs	  = cam_wkup_sleep_deps,
 	.sleepdep_srcs	  = cam_wkup_sleep_deps,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.flags		  = CLKDM_CAN_SWSUP,
 };
 
 static struct clockdomain l4per_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 223b37c..75d7299 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -255,7 +255,7 @@ extern void gic_dist_disable(void);
 extern void gic_dist_enable(void);
 extern bool gic_dist_disabled(void);
 extern void gic_timer_retrigger(void);
-extern void omap_smc1(u32 fn, u32 arg);
+extern void _omap_smc1(u32 fn, u32 arg);
 extern void omap4_sar_ram_init(void);
 extern void __iomem *omap4_get_sar_ram_base(void);
 extern void omap4_mpuss_early_init(void);
@@ -345,9 +345,12 @@ static inline int dra7xx_pciess_reset(struct omap_hwmod *oh)
 }
 #endif
 
+struct omap_system_dma_plat_info;
+
 void pdata_quirks_init(const struct of_device_id *);
 void omap_auxdata_legacy_init(struct device *dev);
 void omap_pcs_legacy_init(int irq, void (*rearm)(void));
+extern struct omap_system_dma_plat_info dma_plat_info;
 
 struct omap_sdrc_params;
 extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index 0c105ba..8cc109cc 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -30,10 +30,6 @@
 #include <linux/omap-dma.h>
 
 #include "soc.h"
-#include "omap_hwmod.h"
-#include "omap_device.h"
-
-static enum omap_reg_offsets dma_common_ch_end;
 
 static const struct omap_dma_reg reg_map[] = {
 	[REVISION]	= { 0x0000, 0x00, OMAP_DMA_REG_32BIT },
@@ -81,42 +77,6 @@ static const struct omap_dma_reg reg_map[] = {
 	[CCDN]		= { 0x00d8, 0x60, OMAP_DMA_REG_32BIT },
 };
 
-static void __iomem *dma_base;
-static inline void dma_write(u32 val, int reg, int lch)
-{
-	void __iomem *addr = dma_base;
-
-	addr += reg_map[reg].offset;
-	addr += reg_map[reg].stride * lch;
-
-	writel_relaxed(val, addr);
-}
-
-static inline u32 dma_read(int reg, int lch)
-{
-	void __iomem *addr = dma_base;
-
-	addr += reg_map[reg].offset;
-	addr += reg_map[reg].stride * lch;
-
-	return readl_relaxed(addr);
-}
-
-static void omap2_clear_dma(int lch)
-{
-	int i;
-
-	for (i = CSDP; i <= dma_common_ch_end; i += 1)
-		dma_write(0, i, lch);
-}
-
-static void omap2_show_dma_caps(void)
-{
-	u8 revision = dma_read(REVISION, 0) & 0xff;
-	printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
-				revision >> 4, revision & 0xf);
-}
-
 static unsigned configure_dma_errata(void)
 {
 	unsigned errata = 0;
@@ -211,82 +171,35 @@ static const struct dma_slave_map omap24xx_sdma_dt_map[] = {
 	{ "musb-hdrc.1.auto", "dmareq5", SDMA_FILTER_PARAM(64) }, /* OMAP2420 only */
 };
 
-static struct omap_system_dma_plat_info dma_plat_info __initdata = {
-	.reg_map	= reg_map,
-	.channel_stride	= 0x60,
-	.show_dma_caps	= omap2_show_dma_caps,
-	.clear_dma	= omap2_clear_dma,
-	.dma_write	= dma_write,
-	.dma_read	= dma_read,
+static struct omap_dma_dev_attr dma_attr = {
+	.dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
+		    IS_CSSA_32 | IS_CDSA_32,
+	.lch_count = 32,
 };
 
-static struct platform_device_info omap_dma_dev_info __initdata = {
-	.name = "omap-dma-engine",
-	.id = -1,
-	.dma_mask = DMA_BIT_MASK(32),
+struct omap_system_dma_plat_info dma_plat_info = {
+	.reg_map	= reg_map,
+	.channel_stride	= 0x60,
+	.dma_attr	= &dma_attr,
 };
 
 /* One time initializations */
-static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
+static int __init omap2_system_dma_init(void)
 {
-	struct platform_device			*pdev;
-	struct omap_system_dma_plat_info	p;
-	struct omap_dma_dev_attr		*d;
-	struct resource				*mem;
-	char					*name = "omap_dma_system";
-
-	p = dma_plat_info;
-	p.dma_attr = (struct omap_dma_dev_attr *)oh->dev_attr;
-	p.errata = configure_dma_errata();
+	dma_plat_info.errata = configure_dma_errata();
 
 	if (soc_is_omap24xx()) {
 		/* DMA slave map for drivers not yet converted to DT */
-		p.slave_map = omap24xx_sdma_dt_map;
-		p.slavecnt = ARRAY_SIZE(omap24xx_sdma_dt_map);
+		dma_plat_info.slave_map = omap24xx_sdma_dt_map;
+		dma_plat_info.slavecnt = ARRAY_SIZE(omap24xx_sdma_dt_map);
 	}
 
-	pdev = omap_device_build(name, 0, oh, &p, sizeof(p));
-	if (IS_ERR(pdev)) {
-		pr_err("%s: Can't build omap_device for %s:%s.\n",
-			__func__, name, oh->name);
-		return PTR_ERR(pdev);
-	}
+	if (!soc_is_omap242x())
+		dma_attr.dev_caps |= IS_RW_PRIORITY;
 
-	omap_dma_dev_info.res = pdev->resource;
-	omap_dma_dev_info.num_res = pdev->num_resources;
-
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
-		return -EINVAL;
-	}
-
-	dma_base = ioremap(mem->start, resource_size(mem));
-	if (!dma_base) {
-		dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
-		return -ENOMEM;
-	}
-
-	d = oh->dev_attr;
-
-	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
-		d->dev_caps |= HS_CHANNELS_RESERVED;
-
-	if (platform_get_irq_byname(pdev, "0") < 0)
-		d->dev_caps |= DMA_ENGINE_HANDLE_IRQ;
-
-	/* Check the capabilities register for descriptor loading feature */
-	if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
-		dma_common_ch_end = CCDN;
-	else
-		dma_common_ch_end = CCFN;
+	if (soc_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
+		dma_attr.dev_caps |= HS_CHANNELS_RESERVED;
 
 	return 0;
 }
-
-static int __init omap2_system_dma_init(void)
-{
-	return omap_hwmod_for_each_by_class("dma",
-			omap2_system_dma_init_dev, NULL);
-}
 omap_arch_initcall(omap2_system_dma_init);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 349e480..f280472 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -51,6 +51,7 @@
 #include "prm33xx.h"
 #include "prm44xx.h"
 #include "opp2xxx.h"
+#include "omap-secure.h"
 
 /*
  * omap_clk_soc_init: points to a function that does the SoC-specific
@@ -430,6 +431,7 @@ void __init omap2420_init_early(void)
 	omap_hwmod_init_postsetup();
 	omap_clk_soc_init = omap2420_dt_clk_init;
 	rate_table = omap2420_rate_table;
+	omap_secure_init();
 }
 
 void __init omap2420_init_late(void)
@@ -454,6 +456,7 @@ void __init omap2430_init_early(void)
 	omap_hwmod_init_postsetup();
 	omap_clk_soc_init = omap2430_dt_clk_init;
 	rate_table = omap2430_rate_table;
+	omap_secure_init();
 }
 
 void __init omap2430_init_late(void)
@@ -481,6 +484,7 @@ void __init omap3_init_early(void)
 	omap3xxx_clockdomains_init();
 	omap3xxx_hwmod_init();
 	omap_hwmod_init_postsetup();
+	omap_secure_init();
 }
 
 void __init omap3430_init_early(void)
@@ -533,6 +537,7 @@ void __init ti814x_init_early(void)
 	dm814x_hwmod_init();
 	omap_hwmod_init_postsetup();
 	omap_clk_soc_init = dm814x_dt_clk_init;
+	omap_secure_init();
 }
 
 void __init ti816x_init_early(void)
@@ -549,6 +554,7 @@ void __init ti816x_init_early(void)
 	dm816x_hwmod_init();
 	omap_hwmod_init_postsetup();
 	omap_clk_soc_init = dm816x_dt_clk_init;
+	omap_secure_init();
 }
 #endif
 
@@ -566,6 +572,7 @@ void __init am33xx_init_early(void)
 	am33xx_hwmod_init();
 	omap_hwmod_init_postsetup();
 	omap_clk_soc_init = am33xx_dt_clk_init;
+	omap_secure_init();
 }
 
 void __init am33xx_init_late(void)
@@ -589,6 +596,7 @@ void __init am43xx_init_early(void)
 	omap_hwmod_init_postsetup();
 	omap_l2_cache_init();
 	omap_clk_soc_init = am43xx_dt_clk_init;
+	omap_secure_init();
 }
 
 void __init am43xx_init_late(void)
@@ -617,6 +625,7 @@ void __init omap4430_init_early(void)
 	omap_hwmod_init_postsetup();
 	omap_l2_cache_init();
 	omap_clk_soc_init = omap4xxx_dt_clk_init;
+	omap_secure_init();
 }
 
 void __init omap4430_init_late(void)
@@ -643,6 +652,7 @@ void __init omap5_init_early(void)
 	omap54xx_hwmod_init();
 	omap_hwmod_init_postsetup();
 	omap_clk_soc_init = omap5xxx_dt_clk_init;
+	omap_secure_init();
 }
 
 void __init omap5_init_late(void)
@@ -666,6 +676,7 @@ void __init dra7xx_init_early(void)
 	dra7xx_hwmod_init();
 	omap_hwmod_init_postsetup();
 	omap_clk_soc_init = dra7xx_dt_clk_init;
+	omap_secure_init();
 }
 
 void __init dra7xx_init_late(void)
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index f1a6ece..54aff33 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -8,36 +8,134 @@
 
 #include <linux/platform_device.h>
 #include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/list.h>
 
-#include "omap_hwmod.h"
-#include "omap_device.h"
+#include "clockdomain.h"
 #include "powerdomain.h"
 
+struct pwrdm_link {
+	struct device *dev;
+	struct powerdomain *pwrdm;
+	struct list_head node;
+};
+
+static DEFINE_SPINLOCK(iommu_lock);
+static struct clockdomain *emu_clkdm;
+static atomic_t emu_count;
+
+static void omap_iommu_dra7_emu_swsup_config(struct platform_device *pdev,
+					     bool enable)
+{
+	struct device_node *np = pdev->dev.of_node;
+	unsigned long flags;
+
+	if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
+		return;
+
+	if (!emu_clkdm) {
+		emu_clkdm = clkdm_lookup("emu_clkdm");
+		if (WARN_ON_ONCE(!emu_clkdm))
+			return;
+	}
+
+	spin_lock_irqsave(&iommu_lock, flags);
+
+	if (enable && (atomic_inc_return(&emu_count) == 1))
+		clkdm_deny_idle(emu_clkdm);
+	else if (!enable && (atomic_dec_return(&emu_count) == 0))
+		clkdm_allow_idle(emu_clkdm);
+
+	spin_unlock_irqrestore(&iommu_lock, flags);
+}
+
+static struct powerdomain *_get_pwrdm(struct device *dev)
+{
+	struct clk *clk;
+	struct clk_hw_omap *hwclk;
+	struct clockdomain *clkdm;
+	struct powerdomain *pwrdm = NULL;
+	struct pwrdm_link *entry;
+	unsigned long flags;
+	static LIST_HEAD(cache);
+
+	spin_lock_irqsave(&iommu_lock, flags);
+
+	list_for_each_entry(entry, &cache, node) {
+		if (entry->dev == dev) {
+			pwrdm = entry->pwrdm;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&iommu_lock, flags);
+
+	if (pwrdm)
+		return pwrdm;
+
+	clk = of_clk_get(dev->of_node->parent, 0);
+	if (!clk) {
+		dev_err(dev, "no fck found\n");
+		return NULL;
+	}
+
+	hwclk = to_clk_hw_omap(__clk_get_hw(clk));
+	clk_put(clk);
+	if (!hwclk || !hwclk->clkdm_name) {
+		dev_err(dev, "no hwclk data\n");
+		return NULL;
+	}
+
+	clkdm = clkdm_lookup(hwclk->clkdm_name);
+	if (!clkdm) {
+		dev_err(dev, "clkdm not found: %s\n", hwclk->clkdm_name);
+		return NULL;
+	}
+
+	pwrdm = clkdm_get_pwrdm(clkdm);
+	if (!pwrdm) {
+		dev_err(dev, "pwrdm not found: %s\n", clkdm->name);
+		return NULL;
+	}
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (entry) {
+		entry->dev = dev;
+		entry->pwrdm = pwrdm;
+		spin_lock_irqsave(&iommu_lock, flags);
+		list_add(&entry->node, &cache);
+		spin_unlock_irqrestore(&iommu_lock, flags);
+	}
+
+	return pwrdm;
+}
+
 int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request,
 				    u8 *pwrst)
 {
 	struct powerdomain *pwrdm;
-	struct omap_device *od;
 	u8 next_pwrst;
+	int ret = 0;
 
-	od = to_omap_device(pdev);
-	if (!od)
+	pwrdm = _get_pwrdm(&pdev->dev);
+	if (!pwrdm)
 		return -ENODEV;
 
-	if (od->hwmods_cnt != 1)
-		return -EINVAL;
-
-	pwrdm = omap_hwmod_get_pwrdm(od->hwmods[0]);
-	if (!pwrdm)
-		return -EINVAL;
-
-	if (request)
+	if (request) {
 		*pwrst = pwrdm_read_next_pwrst(pwrdm);
+		omap_iommu_dra7_emu_swsup_config(pdev, true);
+	}
 
 	if (*pwrst > PWRDM_POWER_RET)
-		return 0;
+		goto out;
 
 	next_pwrst = request ? PWRDM_POWER_ON : *pwrst;
 
-	return pwrdm_set_next_pwrst(pwrdm, next_pwrst);
+	ret = pwrdm_set_next_pwrst(pwrdm, next_pwrst);
+
+out:
+	if (!request)
+		omap_iommu_dra7_emu_swsup_config(pdev, false);
+
+	return ret;
 }
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index 24298e47..d00e3c7 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -8,18 +8,42 @@
  * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/memblock.h>
+#include <linux/of.h>
 
 #include <asm/cacheflush.h>
 #include <asm/memblock.h>
 
+#include "common.h"
 #include "omap-secure.h"
 
 static phys_addr_t omap_secure_memblock_base;
 
+bool optee_available;
+
+#define OMAP_SIP_SMC_STD_CALL_VAL(func_num) \
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
+	ARM_SMCCC_OWNER_SIP, (func_num))
+
+static void __init omap_optee_init_check(void)
+{
+	struct device_node *np;
+
+	/*
+	 * We only check that the OP-TEE node is present and available. The
+	 * OP-TEE kernel driver is not needed for the type of interaction made
+	 * with OP-TEE here so the driver's status is not checked.
+	 */
+	np = of_find_node_by_path("/firmware/optee");
+	if (np && of_device_is_available(np))
+		optee_available = true;
+	of_node_put(np);
+}
+
 /**
  * omap_sec_dispatcher: Routine to dispatch low power secure
  * service routines
@@ -53,6 +77,27 @@ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
 	return ret;
 }
 
+void omap_smccc_smc(u32 fn, u32 arg)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_smc(OMAP_SIP_SMC_STD_CALL_VAL(fn), arg,
+		      0, 0, 0, 0, 0, 0, &res);
+	WARN(res.a0, "Secure function call 0x%08x failed\n", fn);
+}
+
+void omap_smc1(u32 fn, u32 arg)
+{
+	/*
+	 * If this platform has OP-TEE installed we use ARM SMC calls
+	 * otherwise fall back to the OMAP ROM style calls.
+	 */
+	if (optee_available)
+		omap_smccc_smc(fn, arg);
+	else
+		_omap_smc1(fn, arg);
+}
+
 /* Allocate the memory to save secure ram */
 int __init omap_secure_ram_reserve_memblock(void)
 {
@@ -163,3 +208,8 @@ u32 rx51_secure_rng_call(u32 ptr, u32 count, u32 flag)
 				      NO_FLAG,
 				      3, ptr, count, flag, 0);
 }
+
+void __init omap_secure_init(void)
+{
+	omap_optee_init_check();
+}
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index 20046e8..ba8c486 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -10,6 +10,8 @@
 #ifndef OMAP_ARCH_OMAP_SECURE_H
 #define OMAP_ARCH_OMAP_SECURE_H
 
+#include <linux/types.h>
+
 /* Monitor error code */
 #define  API_HAL_RET_VALUE_NS2S_CONVERSION_ERROR	0xFFFFFFFE
 #define  API_HAL_RET_VALUE_SERVICE_UNKNWON		0xFFFFFFFF
@@ -51,6 +53,9 @@
 #define OMAP4_PPA_L2_POR_INDEX		0x23
 #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX	0x25
 
+#define AM43xx_PPA_SVC_PM_SUSPEND	0x71
+#define AM43xx_PPA_SVC_PM_RESUME	0x72
+
 /* Secure RX-51 PPA (Primary Protected Application) APIs */
 #define RX51_PPA_HWRNG			29
 #define RX51_PPA_L2_INVAL		40
@@ -60,6 +65,8 @@
 
 extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs,
 				u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+extern void omap_smccc_smc(u32 fn, u32 arg);
+extern void omap_smc1(u32 fn, u32 arg);
 extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
 extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
 extern phys_addr_t omap_secure_ram_mempool_base(void);
@@ -72,6 +79,9 @@ extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
 extern u32 rx51_secure_update_aux_cr(u32 set_bits, u32 clear_bits);
 extern u32 rx51_secure_rng_call(u32 ptr, u32 count, u32 flag);
 
+extern bool optee_available;
+void omap_secure_init(void);
+
 #ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
 void set_cntfreq(void);
 #else
diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S
index fd2bcd91..d483284 100644
--- a/arch/arm/mach-omap2/omap-smc.S
+++ b/arch/arm/mach-omap2/omap-smc.S
@@ -18,18 +18,18 @@
  * the monitor API number. It uses few CPU registers
  * internally and hence they need be backed up including
  * link register "lr".
- * Function signature : void omap_smc1(u32 fn, u32 arg)
+ * Function signature : void _omap_smc1(u32 fn, u32 arg)
  */
 	.arch armv7-a
 	.arch_extension sec
-ENTRY(omap_smc1)
+ENTRY(_omap_smc1)
 	stmfd   sp!, {r2-r12, lr}
 	mov	r12, r0
 	mov 	r0, r1
 	dsb
 	smc	#0
 	ldmfd   sp!, {r2-r12, pc}
-ENDPROC(omap_smc1)
+ENDPROC(_omap_smc1)
 
 /**
  * u32 omap_smc2(u32 id, u32 falg, u32 pargs)
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 1d55602..6b4548f 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -373,176 +373,6 @@ void omap_device_delete(struct omap_device *od)
 	kfree(od);
 }
 
-/**
- * omap_device_copy_resources - Add legacy IO and IRQ resources
- * @oh: interconnect target module
- * @pdev: platform device to copy resources to
- *
- * We still have legacy DMA and smartreflex needing resources.
- * Let's populate what they need until we can eventually just
- * remove this function. Note that there should be no need to
- * call this from omap_device_build_from_dt(), nor should there
- * be any need to call it for other devices.
- */
-static int
-omap_device_copy_resources(struct omap_hwmod *oh,
-			   struct platform_device *pdev)
-{
-	struct device_node *np, *child;
-	struct property *prop;
-	struct resource *res;
-	const char *name;
-	int error, irq = 0;
-
-	if (!oh || !oh->od || !oh->od->pdev)
-		return -EINVAL;
-
-	np = oh->od->pdev->dev.of_node;
-	if (!np) {
-		error = -ENODEV;
-		goto error;
-	}
-
-	res = kcalloc(2, sizeof(*res), GFP_KERNEL);
-	if (!res)
-		return -ENOMEM;
-
-	/* Do we have a dts range for the interconnect target module? */
-	error = omap_hwmod_parse_module_range(oh, np, res);
-
-	/* No ranges, rely on device reg entry */
-	if (error)
-		error = of_address_to_resource(np, 0, res);
-	if (error)
-		goto free;
-
-	/* SmartReflex needs first IO resource name to be "mpu" */
-	res[0].name = "mpu";
-
-	/*
-	 * We may have a configured "ti,sysc" interconnect target with a
-	 * dts child with the interrupt. If so use the first child's
-	 * first interrupt for "ti-hwmods" legacy support.
-	 */
-	of_property_for_each_string(np, "compatible", prop, name)
-		if (!strncmp("ti,sysc-", name, 8))
-			break;
-
-	child = of_get_next_available_child(np, NULL);
-
-	if (name)
-		irq = irq_of_parse_and_map(child, 0);
-	if (!irq)
-		irq = irq_of_parse_and_map(np, 0);
-	if (!irq) {
-		error = -EINVAL;
-		goto free;
-	}
-
-	/* Legacy DMA code needs interrupt name to be "0" */
-	res[1].start = irq;
-	res[1].end = irq;
-	res[1].flags = IORESOURCE_IRQ;
-	res[1].name = "0";
-
-	error = platform_device_add_resources(pdev, res, 2);
-
-free:
-	kfree(res);
-
-error:
-	WARN(error, "%s: %s device %s failed: %i\n",
-	     __func__, oh->name, dev_name(&pdev->dev),
-	     error);
-
-	return error;
-}
-
-/**
- * omap_device_build - build and register an omap_device with one omap_hwmod
- * @pdev_name: name of the platform_device driver to use
- * @pdev_id: this platform_device's connection ID
- * @oh: ptr to the single omap_hwmod that backs this omap_device
- * @pdata: platform_data ptr to associate with the platform_device
- * @pdata_len: amount of memory pointed to by @pdata
- *
- * Convenience function for building and registering a single
- * omap_device record, which in turn builds and registers a
- * platform_device record.  See omap_device_build_ss() for more
- * information.  Returns ERR_PTR(-EINVAL) if @oh is NULL; otherwise,
- * passes along the return value of omap_device_build_ss().
- */
-struct platform_device __init *omap_device_build(const char *pdev_name,
-						 int pdev_id,
-						 struct omap_hwmod *oh,
-						 void *pdata, int pdata_len)
-{
-	int ret = -ENOMEM;
-	struct platform_device *pdev;
-	struct omap_device *od;
-
-	if (!oh || !pdev_name)
-		return ERR_PTR(-EINVAL);
-
-	if (!pdata && pdata_len > 0)
-		return ERR_PTR(-EINVAL);
-
-	if (strncmp(oh->name, "smartreflex", 11) &&
-	    strncmp(oh->name, "dma", 3)) {
-		pr_warn("%s need to update %s to probe with dt\na",
-			__func__, pdev_name);
-		ret = -ENODEV;
-		goto odbs_exit;
-	}
-
-	pdev = platform_device_alloc(pdev_name, pdev_id);
-	if (!pdev) {
-		ret = -ENOMEM;
-		goto odbs_exit;
-	}
-
-	/* Set the dev_name early to allow dev_xxx in omap_device_alloc */
-	if (pdev->id != -1)
-		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
-	else
-		dev_set_name(&pdev->dev, "%s", pdev->name);
-
-	/*
-	 * Must be called before omap_device_alloc() as oh->od
-	 * only contains the currently registered omap_device
-	 * and will get overwritten by omap_device_alloc().
-	 */
-	ret = omap_device_copy_resources(oh, pdev);
-	if (ret)
-		goto odbs_exit1;
-
-	od = omap_device_alloc(pdev, &oh, 1);
-	if (IS_ERR(od)) {
-		ret = PTR_ERR(od);
-		goto odbs_exit1;
-	}
-
-	ret = platform_device_add_data(pdev, pdata, pdata_len);
-	if (ret)
-		goto odbs_exit2;
-
-	ret = omap_device_register(pdev);
-	if (ret)
-		goto odbs_exit2;
-
-	return pdev;
-
-odbs_exit2:
-	omap_device_delete(od);
-odbs_exit1:
-	platform_device_put(pdev);
-odbs_exit:
-
-	pr_err("omap_device: %s: build failed (%d)\n", pdev_name, ret);
-
-	return ERR_PTR(ret);
-}
-
 #ifdef CONFIG_PM
 static int _od_runtime_suspend(struct device *dev)
 {
diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
index ced775e..f77e76a 100644
--- a/arch/arm/mach-omap2/omap_device.h
+++ b/arch/arm/mach-omap2/omap_device.h
@@ -68,10 +68,6 @@ int omap_device_idle(struct platform_device *pdev);
 
 /* Core code interface */
 
-struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
-					  struct omap_hwmod *oh, void *pdata,
-					  int pdata_len);
-
 struct omap_device *omap_device_alloc(struct platform_device *pdev,
 				      struct omap_hwmod **ohs, int oh_cnt);
 void omap_device_delete(struct omap_device *od);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index a136788..17d337e 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1853,23 +1853,6 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh)
 }
 
 /**
- * _enable_preprogram - Pre-program an IP block during the _enable() process
- * @oh: struct omap_hwmod *
- *
- * Some IP blocks (such as AESS) require some additional programming
- * after enable before they can enter idle.  If a function pointer to
- * do so is present in the hwmod data, then call it and pass along the
- * return value; otherwise, return 0.
- */
-static int _enable_preprogram(struct omap_hwmod *oh)
-{
-	if (!oh->class->enable_preprogram)
-		return 0;
-
-	return oh->class->enable_preprogram(oh);
-}
-
-/**
  * _enable - enable an omap_hwmod
  * @oh: struct omap_hwmod *
  *
@@ -1952,7 +1935,6 @@ static int _enable(struct omap_hwmod *oh)
 				_update_sysc_cache(oh);
 			_enable_sysc(oh);
 		}
-		r = _enable_preprogram(oh);
 	} else {
 		if (soc_ops.disable_module)
 			soc_ops.disable_module(oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 2d0fd99..eebf2fd 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -501,7 +501,6 @@ struct omap_hwmod_omap4_prcm {
  * @sysc: device SYSCONFIG/SYSSTATUS register data
  * @pre_shutdown: ptr to fn to be executed immediately prior to device shutdown
  * @reset: ptr to fn to be executed in place of the standard hwmod reset fn
- * @enable_preprogram:  ptr to fn to be executed during device enable
  * @lock: ptr to fn to be executed to lock IP registers
  * @unlock: ptr to fn to be executed to unlock IP registers
  *
@@ -526,7 +525,6 @@ struct omap_hwmod_class {
 	struct omap_hwmod_class_sysconfig	*sysc;
 	int					(*pre_shutdown)(struct omap_hwmod *oh);
 	int					(*reset)(struct omap_hwmod *oh);
-	int					(*enable_preprogram)(struct omap_hwmod *oh);
 	void					(*lock)(struct omap_hwmod *oh);
 	void					(*unlock)(struct omap_hwmod *oh);
 };
@@ -662,7 +660,6 @@ const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh);
  *
  */
 
-extern int omap_hwmod_aess_preprogram(struct omap_hwmod *oh);
 void omap_hwmod_rtc_unlock(struct omap_hwmod *oh);
 void omap_hwmod_rtc_lock(struct omap_hwmod *oh);
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index d49df96..b14442c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/platform_data/i2c-omap.h>
-#include <linux/omap-dma.h>
 
 #include "omap_hwmod.h"
 #include "l3_2xxx.h"
@@ -126,21 +125,6 @@ static struct omap_hwmod omap2420_i2c2_hwmod = {
 	.flags		= HWMOD_16BIT_REG,
 };
 
-/* dma attributes */
-static struct omap_dma_dev_attr dma_dev_attr = {
-	.dev_caps  = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
-						IS_CSSA_32 | IS_CDSA_32,
-	.lch_count = 32,
-};
-
-static struct omap_hwmod omap2420_dma_system_hwmod = {
-	.name		= "dma",
-	.class		= &omap2xxx_dma_hwmod_class,
-	.main_clk	= "core_l3_ck",
-	.dev_attr	= &dma_dev_attr,
-	.flags		= HWMOD_NO_IDLEST,
-};
-
 /* mailbox */
 static struct omap_hwmod omap2420_mailbox_hwmod = {
 	.name		= "mailbox",
@@ -328,22 +312,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio4 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* dma_system -> L3 */
-static struct omap_hwmod_ocp_if omap2420_dma_system__l3 = {
-	.master		= &omap2420_dma_system_hwmod,
-	.slave		= &omap2xxx_l3_main_hwmod,
-	.clk		= "core_l3_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_core -> dma_system */
-static struct omap_hwmod_ocp_if omap2420_l4_core__dma_system = {
-	.master		= &omap2xxx_l4_core_hwmod,
-	.slave		= &omap2420_dma_system_hwmod,
-	.clk		= "sdma_ick",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l4_core -> mailbox */
 static struct omap_hwmod_ocp_if omap2420_l4_core__mailbox = {
 	.master		= &omap2xxx_l4_core_hwmod,
@@ -435,8 +403,6 @@ static struct omap_hwmod_ocp_if *omap2420_hwmod_ocp_ifs[] __initdata = {
 	&omap2420_l4_wkup__gpio2,
 	&omap2420_l4_wkup__gpio3,
 	&omap2420_l4_wkup__gpio4,
-	&omap2420_dma_system__l3,
-	&omap2420_l4_core__dma_system,
 	&omap2420_l4_core__mailbox,
 	&omap2420_l4_core__mcbsp1,
 	&omap2420_l4_core__mcbsp2,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index c51ef84..41a37c7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -12,7 +12,6 @@
 
 #include <linux/platform_data/i2c-omap.h>
 #include <linux/platform_data/hsmmc-omap.h>
-#include <linux/omap-dma.h>
 
 #include "omap_hwmod.h"
 #include "l3_2xxx.h"
@@ -121,21 +120,6 @@ static struct omap_hwmod omap2430_gpio5_hwmod = {
 	.class		= &omap2xxx_gpio_hwmod_class,
 };
 
-/* dma attributes */
-static struct omap_dma_dev_attr dma_dev_attr = {
-	.dev_caps  = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
-				IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
-	.lch_count = 32,
-};
-
-static struct omap_hwmod omap2430_dma_system_hwmod = {
-	.name		= "dma",
-	.class		= &omap2xxx_dma_hwmod_class,
-	.main_clk	= "core_l3_ck",
-	.dev_attr	= &dma_dev_attr,
-	.flags		= HWMOD_NO_IDLEST,
-};
-
 /* mailbox */
 static struct omap_hwmod omap2430_mailbox_hwmod = {
 	.name		= "mailbox",
@@ -508,22 +492,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__gpio5 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* dma_system -> L3 */
-static struct omap_hwmod_ocp_if omap2430_dma_system__l3 = {
-	.master		= &omap2430_dma_system_hwmod,
-	.slave		= &omap2xxx_l3_main_hwmod,
-	.clk		= "core_l3_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_core -> dma_system */
-static struct omap_hwmod_ocp_if omap2430_l4_core__dma_system = {
-	.master		= &omap2xxx_l4_core_hwmod,
-	.slave		= &omap2430_dma_system_hwmod,
-	.clk		= "sdma_ick",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l4_core -> mailbox */
 static struct omap_hwmod_ocp_if omap2430_l4_core__mailbox = {
 	.master		= &omap2xxx_l4_core_hwmod,
@@ -635,8 +603,6 @@ static struct omap_hwmod_ocp_if *omap2430_hwmod_ocp_ifs[] __initdata = {
 	&omap2430_l4_wkup__gpio3,
 	&omap2430_l4_wkup__gpio4,
 	&omap2430_l4_core__gpio5,
-	&omap2430_dma_system__l3,
-	&omap2430_l4_core__dma_system,
 	&omap2430_l4_core__mailbox,
 	&omap2430_l4_core__mcbsp1,
 	&omap2430_l4_core__mcbsp2,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index f767524..a445704 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -7,7 +7,6 @@
  */
 
 #include <linux/types.h>
-#include <linux/omap-dma.h>
 
 #include "omap_hwmod.h"
 #include "omap_hwmod_common_data.h"
@@ -95,23 +94,6 @@ struct omap_hwmod_class omap2xxx_gpio_hwmod_class = {
 	.sysc = &omap2xxx_gpio_sysc,
 };
 
-/* system dma */
-static struct omap_hwmod_class_sysconfig omap2xxx_dma_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x002c,
-	.syss_offs	= 0x0028,
-	.sysc_flags	= (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
-			   SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
-			   SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-struct omap_hwmod_class omap2xxx_dma_hwmod_class = {
-	.name	= "dma",
-	.sysc	= &omap2xxx_dma_sysc,
-};
-
 /*
  * 'mailbox' class
  * mailbox module allowing communication between the on-chip processors
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
index 26e13d4..5ef76fe 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
@@ -28,29 +28,13 @@ extern struct omap_hwmod_ocp_if am33xx_pruss__l3_main;
 extern struct omap_hwmod_ocp_if am33xx_gfx__l3_main;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__gfx;
 extern struct omap_hwmod_ocp_if am33xx_l4_wkup__rtc;
-extern struct omap_hwmod_ocp_if am33xx_l4_per__dcan0;
-extern struct omap_hwmod_ocp_if am33xx_l4_per__dcan1;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__elm;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2;
 extern struct omap_hwmod_ocp_if am33xx_l3_s__gpmc;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__spinlock;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi0;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi1;
 extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer2;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer3;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer4;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer5;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer6;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer7;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__tpcc;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc0;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc1;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc2;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__ocmc;
-extern struct omap_hwmod_ocp_if am33xx_l3_main__sha0;
-extern struct omap_hwmod_ocp_if am33xx_l3_main__aes0;
 
 extern struct omap_hwmod am33xx_l3_main_hwmod;
 extern struct omap_hwmod am33xx_l3_s_hwmod;
@@ -61,29 +45,13 @@ extern struct omap_hwmod am33xx_mpu_hwmod;
 extern struct omap_hwmod am33xx_pruss_hwmod;
 extern struct omap_hwmod am33xx_gfx_hwmod;
 extern struct omap_hwmod am33xx_prcm_hwmod;
-extern struct omap_hwmod am33xx_aes0_hwmod;
-extern struct omap_hwmod am33xx_sha0_hwmod;
 extern struct omap_hwmod am33xx_ocmcram_hwmod;
 extern struct omap_hwmod am33xx_smartreflex0_hwmod;
 extern struct omap_hwmod am33xx_smartreflex1_hwmod;
-extern struct omap_hwmod am33xx_dcan0_hwmod;
-extern struct omap_hwmod am33xx_dcan1_hwmod;
-extern struct omap_hwmod am33xx_elm_hwmod;
-extern struct omap_hwmod am33xx_epwmss0_hwmod;
-extern struct omap_hwmod am33xx_epwmss1_hwmod;
-extern struct omap_hwmod am33xx_epwmss2_hwmod;
 extern struct omap_hwmod am33xx_gpmc_hwmod;
 extern struct omap_hwmod am33xx_rtc_hwmod;
-extern struct omap_hwmod am33xx_spi0_hwmod;
-extern struct omap_hwmod am33xx_spi1_hwmod;
-extern struct omap_hwmod am33xx_spinlock_hwmod;
 extern struct omap_hwmod am33xx_timer1_hwmod;
 extern struct omap_hwmod am33xx_timer2_hwmod;
-extern struct omap_hwmod am33xx_timer3_hwmod;
-extern struct omap_hwmod am33xx_timer4_hwmod;
-extern struct omap_hwmod am33xx_timer5_hwmod;
-extern struct omap_hwmod am33xx_timer6_hwmod;
-extern struct omap_hwmod am33xx_timer7_hwmod;
 extern struct omap_hwmod am33xx_tpcc_hwmod;
 extern struct omap_hwmod am33xx_tptc0_hwmod;
 extern struct omap_hwmod am33xx_tptc1_hwmod;
@@ -94,7 +62,6 @@ extern struct omap_hwmod_class am33xx_l4_hwmod_class;
 extern struct omap_hwmod_class am33xx_wkup_m3_hwmod_class;
 extern struct omap_hwmod_class am33xx_control_hwmod_class;
 extern struct omap_hwmod_class am33xx_timer_hwmod_class;
-extern struct omap_hwmod_class am33xx_epwmss_hwmod_class;
 extern struct omap_hwmod_class am33xx_ehrpwm_hwmod_class;
 extern struct omap_hwmod_class am33xx_spi_hwmod_class;
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
index 7123c455..ac7d5bb 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
@@ -106,50 +106,6 @@ struct omap_hwmod_ocp_if am33xx_l4_wkup__rtc = {
 	.user		= OCP_USER_MPU,
 };
 
-/* l4 per/ls -> DCAN0 */
-struct omap_hwmod_ocp_if am33xx_l4_per__dcan0 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_dcan0_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4 per/ls -> DCAN1 */
-struct omap_hwmod_ocp_if am33xx_l4_per__dcan1 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_dcan1_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-struct omap_hwmod_ocp_if am33xx_l4_ls__elm = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_elm_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_epwmss0_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_epwmss1_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_epwmss2_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
 /* l3s cfg -> gpmc */
 struct omap_hwmod_ocp_if am33xx_l3_s__gpmc = {
 	.master		= &am33xx_l3_s_hwmod,
@@ -158,30 +114,6 @@ struct omap_hwmod_ocp_if am33xx_l3_s__gpmc = {
 	.user		= OCP_USER_MPU,
 };
 
-/* l4 ls -> spinlock */
-struct omap_hwmod_ocp_if am33xx_l4_ls__spinlock = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_spinlock_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4 ls -> mcspi0 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi0 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_spi0_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4 ls -> mcspi1 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi1 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_spi1_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
 /* l4 per -> timer2 */
 struct omap_hwmod_ocp_if am33xx_l4_ls__timer2 = {
 	.master		= &am33xx_l4_ls_hwmod,
@@ -190,46 +122,6 @@ struct omap_hwmod_ocp_if am33xx_l4_ls__timer2 = {
 	.user		= OCP_USER_MPU,
 };
 
-/* l4 per -> timer3 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__timer3 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_timer3_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4 per -> timer4 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__timer4 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_timer4_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4 per -> timer5 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__timer5 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_timer5_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4 per -> timer6 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__timer6 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_timer6_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4 per -> timer7 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__timer7 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_timer7_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
 /* l3 main -> tpcc */
 struct omap_hwmod_ocp_if am33xx_l3_main__tpcc = {
 	.master		= &am33xx_l3_main_hwmod,
@@ -268,19 +160,3 @@ struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = {
 	.slave		= &am33xx_ocmcram_hwmod,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
-
-/* l3 main -> sha0 HIB2 */
-struct omap_hwmod_ocp_if am33xx_l3_main__sha0 = {
-	.master		= &am33xx_l3_main_hwmod,
-	.slave		= &am33xx_sha0_hwmod,
-	.clk		= "sha0_fck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3 main -> AES0 HIB2 */
-struct omap_hwmod_ocp_if am33xx_l3_main__aes0 = {
-	.master		= &am33xx_l3_main_hwmod,
-	.slave		= &am33xx_aes0_hwmod,
-	.clk		= "aes0_fck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index 2df86596..78ec1bc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -213,57 +213,7 @@ struct omap_hwmod_class am33xx_emif_hwmod_class = {
 	.sysc		= &am33xx_emif_sysc,
 };
 
-/*
- * 'aes0' class
- */
-static struct omap_hwmod_class_sysconfig am33xx_aes0_sysc = {
-	.rev_offs	= 0x80,
-	.sysc_offs	= 0x84,
-	.syss_offs	= 0x88,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
 
-static struct omap_hwmod_class am33xx_aes0_hwmod_class = {
-	.name		= "aes0",
-	.sysc		= &am33xx_aes0_sysc,
-};
-
-struct omap_hwmod am33xx_aes0_hwmod = {
-	.name		= "aes",
-	.class		= &am33xx_aes0_hwmod_class,
-	.clkdm_name	= "l3_clkdm",
-	.main_clk	= "aes0_fck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* sha0 HIB2 (the 'P' (public) device) */
-static struct omap_hwmod_class_sysconfig am33xx_sha0_sysc = {
-	.rev_offs	= 0x100,
-	.sysc_offs	= 0x110,
-	.syss_offs	= 0x114,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class am33xx_sha0_hwmod_class = {
-	.name		= "sha0",
-	.sysc		= &am33xx_sha0_sysc,
-};
-
-struct omap_hwmod am33xx_sha0_hwmod = {
-	.name		= "sham",
-	.class		= &am33xx_sha0_hwmod_class,
-	.clkdm_name	= "l3_clkdm",
-	.main_clk	= "l3_gclk",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
 
 /* ocmcram */
 static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
@@ -321,122 +271,6 @@ struct omap_hwmod_class am33xx_control_hwmod_class = {
 	.name		= "control",
 };
 
-/*
- * dcan class
- */
-static struct omap_hwmod_class am33xx_dcan_hwmod_class = {
-	.name = "d_can",
-};
-
-/* dcan0 */
-struct omap_hwmod am33xx_dcan0_hwmod = {
-	.name		= "d_can0",
-	.class		= &am33xx_dcan_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "dcan0_fck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* dcan1 */
-struct omap_hwmod am33xx_dcan1_hwmod = {
-	.name		= "d_can1",
-	.class		= &am33xx_dcan_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "dcan1_fck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* elm */
-static struct omap_hwmod_class_sysconfig am33xx_elm_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
-			SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
-			SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_elm_hwmod_class = {
-	.name		= "elm",
-	.sysc		= &am33xx_elm_sysc,
-};
-
-struct omap_hwmod am33xx_elm_hwmod = {
-	.name		= "elm",
-	.class		= &am33xx_elm_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* pwmss  */
-static struct omap_hwmod_class_sysconfig am33xx_epwmss_sysc = {
-	.rev_offs	= 0x0,
-	.sysc_offs	= 0x4,
-	.sysc_flags	= (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
-			MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-struct omap_hwmod_class am33xx_epwmss_hwmod_class = {
-	.name		= "epwmss",
-	.sysc		= &am33xx_epwmss_sysc,
-};
-
-/* epwmss0 */
-struct omap_hwmod am33xx_epwmss0_hwmod = {
-	.name		= "epwmss0",
-	.class		= &am33xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* epwmss1 */
-struct omap_hwmod am33xx_epwmss1_hwmod = {
-	.name		= "epwmss1",
-	.class		= &am33xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* epwmss2 */
-struct omap_hwmod am33xx_epwmss2_hwmod = {
-	.name		= "epwmss2",
-	.class		= &am33xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
 
 /* gpmc */
 static struct omap_hwmod_class_sysconfig gpmc_sysc = {
@@ -501,83 +335,6 @@ struct omap_hwmod am33xx_rtc_hwmod = {
 	},
 };
 
-/* 'spi' class */
-static struct omap_hwmod_class_sysconfig am33xx_mcspi_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0110,
-	.syss_offs	= 0x0114,
-	.sysc_flags	= (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
-			  SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
-			  SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-struct omap_hwmod_class am33xx_spi_hwmod_class = {
-	.name		= "mcspi",
-	.sysc		= &am33xx_mcspi_sysc,
-};
-
-/* spi0 */
-struct omap_hwmod am33xx_spi0_hwmod = {
-	.name		= "spi0",
-	.class		= &am33xx_spi_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "dpll_per_m2_div4_ck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* spi1 */
-struct omap_hwmod am33xx_spi1_hwmod = {
-	.name		= "spi1",
-	.class		= &am33xx_spi_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "dpll_per_m2_div4_ck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
- * 'spinlock' class
- * spinlock provides hardware assistance for synchronizing the
- * processes running on multiple processors
- */
-
-static struct omap_hwmod_class_sysconfig am33xx_spinlock_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_spinlock_hwmod_class = {
-	.name		= "spinlock",
-	.sysc		= &am33xx_spinlock_sysc,
-};
-
-struct omap_hwmod am33xx_spinlock_hwmod = {
-	.name		= "spinlock",
-	.class		= &am33xx_spinlock_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
 /* 'timer 2-7' class */
 static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
 	.rev_offs	= 0x0000,
@@ -636,66 +393,6 @@ struct omap_hwmod am33xx_timer2_hwmod = {
 	},
 };
 
-struct omap_hwmod am33xx_timer3_hwmod = {
-	.name		= "timer3",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer3_fck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-struct omap_hwmod am33xx_timer4_hwmod = {
-	.name		= "timer4",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer4_fck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-struct omap_hwmod am33xx_timer5_hwmod = {
-	.name		= "timer5",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer5_fck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-struct omap_hwmod am33xx_timer6_hwmod = {
-	.name		= "timer6",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer6_fck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-struct omap_hwmod am33xx_timer7_hwmod = {
-	.name		= "timer7",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer7_fck",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
 /* tpcc */
 static struct omap_hwmod_class am33xx_tpcc_hwmod_class = {
 	.name		= "tpcc",
@@ -772,21 +469,7 @@ struct omap_hwmod am33xx_tptc2_hwmod = {
 
 static void omap_hwmod_am33xx_clkctrl(void)
 {
-	CLKCTRL(am33xx_dcan0_hwmod, AM33XX_CM_PER_DCAN0_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_dcan1_hwmod, AM33XX_CM_PER_DCAN1_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_elm_hwmod, AM33XX_CM_PER_ELM_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_epwmss0_hwmod, AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_epwmss1_hwmod, AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_epwmss2_hwmod, AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_spi0_hwmod, AM33XX_CM_PER_SPI0_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_spi1_hwmod, AM33XX_CM_PER_SPI1_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_spinlock_hwmod, AM33XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_timer2_hwmod, AM33XX_CM_PER_TIMER2_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer3_hwmod, AM33XX_CM_PER_TIMER3_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer4_hwmod, AM33XX_CM_PER_TIMER4_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer5_hwmod, AM33XX_CM_PER_TIMER5_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer6_hwmod, AM33XX_CM_PER_TIMER6_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer7_hwmod, AM33XX_CM_PER_TIMER7_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_smartreflex0_hwmod,
 		AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_smartreflex1_hwmod,
@@ -807,8 +490,6 @@ static void omap_hwmod_am33xx_clkctrl(void)
 	CLKCTRL(am33xx_mpu_hwmod , AM33XX_CM_MPU_MPU_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_l3_instr_hwmod , AM33XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_ocmcram_hwmod , AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_sha0_hwmod , AM33XX_CM_PER_SHA0_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_aes0_hwmod , AM33XX_CM_PER_AES0_CLKCTRL_OFFSET);
 }
 
 static void omap_hwmod_am33xx_rst(void)
@@ -826,21 +507,7 @@ void omap_hwmod_am33xx_reg(void)
 
 static void omap_hwmod_am43xx_clkctrl(void)
 {
-	CLKCTRL(am33xx_dcan0_hwmod, AM43XX_CM_PER_DCAN0_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_dcan1_hwmod, AM43XX_CM_PER_DCAN1_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_elm_hwmod, AM43XX_CM_PER_ELM_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_epwmss0_hwmod, AM43XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_epwmss1_hwmod, AM43XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_epwmss2_hwmod, AM43XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_spi0_hwmod, AM43XX_CM_PER_SPI0_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_spi1_hwmod, AM43XX_CM_PER_SPI1_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_spinlock_hwmod, AM43XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_timer2_hwmod, AM43XX_CM_PER_TIMER2_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer3_hwmod, AM43XX_CM_PER_TIMER3_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer4_hwmod, AM43XX_CM_PER_TIMER4_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer5_hwmod, AM43XX_CM_PER_TIMER5_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer6_hwmod, AM43XX_CM_PER_TIMER6_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_timer7_hwmod, AM43XX_CM_PER_TIMER7_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_smartreflex0_hwmod,
 		AM43XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_smartreflex1_hwmod,
@@ -860,8 +527,6 @@ static void omap_hwmod_am43xx_clkctrl(void)
 	CLKCTRL(am33xx_mpu_hwmod , AM43XX_CM_MPU_MPU_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_l3_instr_hwmod , AM43XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_ocmcram_hwmod , AM43XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_sha0_hwmod , AM43XX_CM_PER_SHA0_CLKCTRL_OFFSET);
-	CLKCTRL(am33xx_aes0_hwmod , AM43XX_CM_PER_AES0_CLKCTRL_OFFSET);
 }
 
 static void omap_hwmod_am43xx_rst(void)
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index c63f664..f1ea8c6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -81,36 +81,6 @@ static struct omap_hwmod am33xx_wkup_m3_hwmod = {
 	.rst_lines_cnt	= ARRAY_SIZE(am33xx_wkup_m3_resets),
 };
 
-/*
- * 'adc/tsc' class
- * TouchScreen Controller (Anolog-To-Digital Converter)
- */
-static struct omap_hwmod_class_sysconfig am33xx_adc_tsc_sysc = {
-	.rev_offs	= 0x00,
-	.sysc_offs	= 0x10,
-	.sysc_flags	= SYSC_HAS_SIDLEMODE,
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class am33xx_adc_tsc_hwmod_class = {
-	.name		= "adc_tsc",
-	.sysc		= &am33xx_adc_tsc_sysc,
-};
-
-static struct omap_hwmod am33xx_adc_tsc_hwmod = {
-	.name		= "adc_tsc",
-	.class		= &am33xx_adc_tsc_hwmod_class,
-	.clkdm_name	= "l4_wkup_clkdm",
-	.main_clk	= "adc_tsc_fck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM33XX_CM_WKUP_ADC_TSC_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
 
 /*
  * Modules omap_hwmod structures
@@ -226,34 +196,6 @@ static struct omap_hwmod am33xx_control_hwmod = {
 	},
 };
 
-/* lcdc */
-static struct omap_hwmod_class_sysconfig lcdc_sysc = {
-	.rev_offs	= 0x0,
-	.sysc_offs	= 0x54,
-	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE,
-	.idlemodes	= SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			  MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART,
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class am33xx_lcdc_hwmod_class = {
-	.name		= "lcdc",
-	.sysc		= &lcdc_sysc,
-};
-
-static struct omap_hwmod am33xx_lcdc_hwmod = {
-	.name		= "lcdc",
-	.class		= &am33xx_lcdc_hwmod_class,
-	.clkdm_name	= "lcdc_clkdm",
-	.flags		= HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
-	.main_clk	= "lcd_gclk",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM33XX_CM_PER_LCDC_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
 
 /*
  * Interfaces
@@ -331,21 +273,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__control = {
 	.user		= OCP_USER_MPU,
 };
 
-/* L4 WKUP -> ADC_TSC */
-static struct omap_hwmod_ocp_if am33xx_l4_wkup__adc_tsc = {
-	.master		= &am33xx_l4_wkup_hwmod,
-	.slave		= &am33xx_adc_tsc_hwmod,
-	.clk		= "dpll_core_m4_div2_ck",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_l3_main__lcdc = {
-	.master		= &am33xx_l3_main_hwmod,
-	.slave		= &am33xx_lcdc_hwmod,
-	.clk		= "dpll_core_m4_ck",
-	.user		= OCP_USER_MPU,
-};
-
 /* l4 wkup -> timer1 */
 static struct omap_hwmod_ocp_if am33xx_l4_wkup__timer1 = {
 	.master		= &am33xx_l4_wkup_hwmod,
@@ -375,32 +302,14 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
 	&am33xx_l4_wkup__smartreflex1,
 	&am33xx_l4_wkup__timer1,
 	&am33xx_l4_wkup__rtc,
-	&am33xx_l4_wkup__adc_tsc,
 	&am33xx_l4_hs__pruss,
-	&am33xx_l4_per__dcan0,
-	&am33xx_l4_per__dcan1,
 	&am33xx_l4_ls__timer2,
-	&am33xx_l4_ls__timer3,
-	&am33xx_l4_ls__timer4,
-	&am33xx_l4_ls__timer5,
-	&am33xx_l4_ls__timer6,
-	&am33xx_l4_ls__timer7,
 	&am33xx_l3_main__tpcc,
-	&am33xx_l4_ls__spinlock,
-	&am33xx_l4_ls__elm,
-	&am33xx_l4_ls__epwmss0,
-	&am33xx_l4_ls__epwmss1,
-	&am33xx_l4_ls__epwmss2,
 	&am33xx_l3_s__gpmc,
-	&am33xx_l3_main__lcdc,
-	&am33xx_l4_ls__mcspi0,
-	&am33xx_l4_ls__mcspi1,
 	&am33xx_l3_main__tptc0,
 	&am33xx_l3_main__tptc1,
 	&am33xx_l3_main__tptc2,
 	&am33xx_l3_main__ocmc,
-	&am33xx_l3_main__sha0,
-	&am33xx_l3_main__aes0,
 	NULL,
 };
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index f52438b..3c8d2b6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -16,7 +16,6 @@
 #include <linux/power/smartreflex.h>
 #include <linux/platform_data/hsmmc-omap.h>
 
-#include <linux/omap-dma.h>
 #include "l3_3xxx.h"
 #include "l4_3xxx.h"
 
@@ -833,47 +832,6 @@ static struct omap_hwmod omap3xxx_gpio6_hwmod = {
 	.class		= &omap3xxx_gpio_hwmod_class,
 };
 
-/* dma attributes */
-static struct omap_dma_dev_attr dma_dev_attr = {
-	.dev_caps  = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
-				IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
-	.lch_count = 32,
-};
-
-static struct omap_hwmod_class_sysconfig omap3xxx_dma_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x002c,
-	.syss_offs	= 0x0028,
-	.sysc_flags	= (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap3xxx_dma_hwmod_class = {
-	.name = "dma",
-	.sysc = &omap3xxx_dma_sysc,
-};
-
-/* dma_system */
-static struct omap_hwmod omap3xxx_dma_system_hwmod = {
-	.name		= "dma",
-	.class		= &omap3xxx_dma_hwmod_class,
-	.main_clk	= "core_l3_ick",
-	.prcm = {
-		.omap2 = {
-			.module_offs		= CORE_MOD,
-			.idlest_reg_id		= 1,
-			.idlest_idle_bit	= OMAP3430_ST_SDMA_SHIFT,
-		},
-	},
-	.dev_attr	= &dma_dev_attr,
-	.flags		= HWMOD_NO_IDLEST,
-};
-
 /*
  * 'mcbsp' class
  * multi channel buffered serial port controller
@@ -2233,23 +2191,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* dma_system -> L3 */
-static struct omap_hwmod_ocp_if omap3xxx_dma_system__l3 = {
-	.master		= &omap3xxx_dma_system_hwmod,
-	.slave		= &omap3xxx_l3_main_hwmod,
-	.clk		= "core_l3_ick",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> dma_system */
-static struct omap_hwmod_ocp_if omap3xxx_l4_core__dma_system = {
-	.master		= &omap3xxx_l4_core_hwmod,
-	.slave		= &omap3xxx_dma_system_hwmod,
-	.clk		= "core_l4_ick",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-
 /* l4_core -> mcbsp1 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
@@ -2628,8 +2569,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_per__gpio4,
 	&omap3xxx_l4_per__gpio5,
 	&omap3xxx_l4_per__gpio6,
-	&omap3xxx_dma_system__l3,
-	&omap3xxx_l4_core__dma_system,
 	&omap3xxx_l4_core__mcbsp1,
 	&omap3xxx_l4_per__mcbsp2,
 	&omap3xxx_l4_per__mcbsp3,
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index b81f834..d0867db 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -112,165 +112,6 @@ static struct omap_hwmod am43xx_synctimer_hwmod = {
 	},
 };
 
-static struct omap_hwmod am43xx_timer8_hwmod = {
-	.name		= "timer8",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer8_fck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM43XX_CM_PER_TIMER8_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_timer9_hwmod = {
-	.name		= "timer9",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer9_fck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM43XX_CM_PER_TIMER9_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_timer10_hwmod = {
-	.name		= "timer10",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer10_fck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM43XX_CM_PER_TIMER10_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_timer11_hwmod = {
-	.name		= "timer11",
-	.class		= &am33xx_timer_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "timer11_fck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM43XX_CM_PER_TIMER11_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_epwmss3_hwmod = {
-	.name		= "epwmss3",
-	.class		= &am33xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs = AM43XX_CM_PER_EPWMSS3_CLKCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_epwmss4_hwmod = {
-	.name		= "epwmss4",
-	.class		= &am33xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs = AM43XX_CM_PER_EPWMSS4_CLKCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_epwmss5_hwmod = {
-	.name		= "epwmss5",
-	.class		= &am33xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs = AM43XX_CM_PER_EPWMSS5_CLKCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_spi2_hwmod = {
-	.name		= "spi2",
-	.class		= &am33xx_spi_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "dpll_per_m2_div4_ck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs = AM43XX_CM_PER_SPI2_CLKCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_spi3_hwmod = {
-	.name		= "spi3",
-	.class		= &am33xx_spi_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "dpll_per_m2_div4_ck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs = AM43XX_CM_PER_SPI3_CLKCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_spi4_hwmod = {
-	.name		= "spi4",
-	.class		= &am33xx_spi_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "dpll_per_m2_div4_ck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs = AM43XX_CM_PER_SPI4_CLKCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod_class am43xx_ocp2scp_hwmod_class = {
-	.name	= "ocp2scp",
-};
-
-static struct omap_hwmod am43xx_ocp2scp0_hwmod = {
-	.name		= "ocp2scp0",
-	.class		= &am43xx_ocp2scp_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = AM43XX_CM_PER_USBPHYOCP2SCP0_CLKCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_ocp2scp1_hwmod = {
-	.name		= "ocp2scp1",
-	.class		= &am43xx_ocp2scp_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.main_clk	= "l4ls_gclk",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs	= AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
 
 static struct omap_hwmod_class_sysconfig am43xx_usb_otg_ss_sysc = {
 	.rev_offs	= 0x0000,
@@ -315,89 +156,6 @@ static struct omap_hwmod am43xx_usb_otg_ss1_hwmod = {
 	},
 };
 
-static struct omap_hwmod_class_sysconfig am43xx_qspi_sysc = {
-	.rev_offs	= 0,
-	.sysc_offs      = 0x0010,
-	.sysc_flags     = SYSC_HAS_SIDLEMODE,
-	.idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-				SIDLE_SMART_WKUP),
-	.sysc_fields    = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class am43xx_qspi_hwmod_class = {
-	.name   = "qspi",
-	.sysc   = &am43xx_qspi_sysc,
-};
-
-static struct omap_hwmod am43xx_qspi_hwmod = {
-	.name           = "qspi",
-	.class          = &am43xx_qspi_hwmod_class,
-	.clkdm_name     = "l3s_clkdm",
-	.main_clk       = "l3s_gclk",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = AM43XX_CM_PER_QSPI_CLKCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
- * 'adc/tsc' class
- * TouchScreen Controller (Analog-To-Digital Converter)
- */
-static struct omap_hwmod_class_sysconfig am43xx_adc_tsc_sysc = {
-	.rev_offs	= 0x00,
-	.sysc_offs	= 0x10,
-	.sysc_flags	= SYSC_HAS_SIDLEMODE,
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			  SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class am43xx_adc_tsc_hwmod_class = {
-	.name		= "adc_tsc",
-	.sysc		= &am43xx_adc_tsc_sysc,
-};
-
-static struct omap_hwmod am43xx_adc_tsc_hwmod = {
-	.name		= "adc_tsc",
-	.class		= &am43xx_adc_tsc_hwmod_class,
-	.clkdm_name	= "l3s_tsc_clkdm",
-	.main_clk	= "adc_tsc_fck",
-	.prcm		= {
-		.omap4  = {
-			.clkctrl_offs   = AM43XX_CM_WKUP_ADC_TSC_CLKCTRL_OFFSET,
-			.modulemode     = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod_class_sysconfig am43xx_des_sysc = {
-	.rev_offs	= 0x30,
-	.sysc_offs	= 0x34,
-	.syss_offs	= 0x38,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class am43xx_des_hwmod_class = {
-	.name		= "des",
-	.sysc		= &am43xx_des_sysc,
-};
-
-static struct omap_hwmod am43xx_des_hwmod = {
-	.name		= "des",
-	.class		= &am43xx_des_hwmod_class,
-	.clkdm_name	= "l3_clkdm",
-	.main_clk	= "l3_gclk",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM43XX_CM_PER_DES_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
 /* dss */
 
 static struct omap_hwmod am43xx_dss_core_hwmod = {
@@ -467,44 +225,6 @@ static struct omap_hwmod am43xx_dss_rfbi_hwmod = {
 };
 
 
-static struct omap_hwmod_class_sysconfig am43xx_vpfe_sysc = {
-	.rev_offs       = 0x0,
-	.sysc_offs      = 0x104,
-	.sysc_flags     = SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE,
-	.idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-				MSTANDBY_FORCE | MSTANDBY_SMART | MSTANDBY_NO),
-	.sysc_fields    = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class am43xx_vpfe_hwmod_class = {
-	.name           = "vpfe",
-	.sysc           = &am43xx_vpfe_sysc,
-};
-
-static struct omap_hwmod am43xx_vpfe0_hwmod = {
-	.name           = "vpfe0",
-	.class          = &am43xx_vpfe_hwmod_class,
-	.clkdm_name     = "l3s_clkdm",
-	.prcm           = {
-		.omap4  = {
-			.modulemode     = MODULEMODE_SWCTRL,
-			.clkctrl_offs   = AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET,
-		},
-	},
-};
-
-static struct omap_hwmod am43xx_vpfe1_hwmod = {
-	.name           = "vpfe1",
-	.class          = &am43xx_vpfe_hwmod_class,
-	.clkdm_name     = "l3s_clkdm",
-	.prcm           = {
-		.omap4  = {
-			.modulemode     = MODULEMODE_SWCTRL,
-			.clkctrl_offs   = AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET,
-		},
-	},
-};
-
 /* Interfaces */
 static struct omap_hwmod_ocp_if am43xx_l3_main__emif = {
 	.master		= &am33xx_l3_main_hwmod,
@@ -562,13 +282,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_wkup__control = {
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_ocp_if am43xx_l4_wkup__adc_tsc = {
-	.master         = &am33xx_l4_wkup_hwmod,
-	.slave          = &am43xx_adc_tsc_hwmod,
-	.clk            = "dpll_core_m4_div2_ck",
-	.user           = OCP_USER_MPU,
-};
-
 static struct omap_hwmod_ocp_if am43xx_l4_wkup__timer1 = {
 	.master		= &am33xx_l4_wkup_hwmod,
 	.slave		= &am33xx_timer1_hwmod,
@@ -583,90 +296,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__synctimer = {
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_ocp_if am43xx_l4_ls__timer8 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_timer8_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__timer9 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_timer9_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__timer10 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_timer10_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__timer11 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_timer11_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss3 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_epwmss3_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss4 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_epwmss4_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss5 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_epwmss5_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__mcspi2 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_spi2_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__mcspi3 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_spi3_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__mcspi4 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_spi4_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__ocp2scp0 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_ocp2scp0_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__ocp2scp1 = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am43xx_ocp2scp1_hwmod,
-	.clk		= "l4ls_gclk",
-	.user		= OCP_USER_MPU,
-};
-
 static struct omap_hwmod_ocp_if am43xx_l3_s__usbotgss0 = {
 	.master         = &am33xx_l3_s_hwmod,
 	.slave          = &am43xx_usb_otg_ss0_hwmod,
@@ -681,13 +310,6 @@ static struct omap_hwmod_ocp_if am43xx_l3_s__usbotgss1 = {
 	.user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_ocp_if am43xx_l3_s__qspi = {
-	.master         = &am33xx_l3_s_hwmod,
-	.slave          = &am43xx_qspi_hwmod,
-	.clk            = "l3s_gclk",
-	.user           = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 static struct omap_hwmod_ocp_if am43xx_dss__l3_main = {
 	.master		= &am43xx_dss_core_hwmod,
 	.slave		= &am33xx_l3_main_hwmod,
@@ -716,53 +338,8 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__dss_rfbi = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_ocp_if am43xx_l3__vpfe0 = {
-	.master         = &am43xx_vpfe0_hwmod,
-	.slave          = &am33xx_l3_main_hwmod,
-	.clk            = "l3_gclk",
-	.user           = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l3__vpfe1 = {
-	.master         = &am43xx_vpfe1_hwmod,
-	.slave          = &am33xx_l3_main_hwmod,
-	.clk            = "l3_gclk",
-	.user           = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe0 = {
-	.master         = &am33xx_l4_ls_hwmod,
-	.slave          = &am43xx_vpfe0_hwmod,
-	.clk            = "l4ls_gclk",
-	.user           = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe1 = {
-	.master         = &am33xx_l4_ls_hwmod,
-	.slave          = &am43xx_vpfe1_hwmod,
-	.clk            = "l4ls_gclk",
-	.user           = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l3_main__des = {
-	.master		= &am33xx_l3_main_hwmod,
-	.slave		= &am43xx_des_hwmod,
-	.clk		= "l3_gclk",
-	.user		= OCP_USER_MPU,
-};
-
 static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
 	&am33xx_l4_wkup__synctimer,
-	&am43xx_l4_ls__timer8,
-	&am43xx_l4_ls__timer9,
-	&am43xx_l4_ls__timer10,
-	&am43xx_l4_ls__timer11,
-	&am43xx_l4_ls__epwmss3,
-	&am43xx_l4_ls__epwmss4,
-	&am43xx_l4_ls__epwmss5,
-	&am43xx_l4_ls__mcspi2,
-	&am43xx_l4_ls__mcspi3,
-	&am43xx_l4_ls__mcspi4,
 	&am43xx_l3_main__pruss,
 	&am33xx_mpu__l3_main,
 	&am33xx_mpu__prcm,
@@ -782,44 +359,19 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
 	&am43xx_l4_wkup__smartreflex0,
 	&am43xx_l4_wkup__smartreflex1,
 	&am43xx_l4_wkup__timer1,
-	&am43xx_l4_wkup__adc_tsc,
-	&am43xx_l3_s__qspi,
-	&am33xx_l4_per__dcan0,
-	&am33xx_l4_per__dcan1,
 	&am33xx_l4_ls__timer2,
-	&am33xx_l4_ls__timer3,
-	&am33xx_l4_ls__timer4,
-	&am33xx_l4_ls__timer5,
-	&am33xx_l4_ls__timer6,
-	&am33xx_l4_ls__timer7,
 	&am33xx_l3_main__tpcc,
-	&am33xx_l4_ls__spinlock,
-	&am33xx_l4_ls__elm,
-	&am33xx_l4_ls__epwmss0,
-	&am33xx_l4_ls__epwmss1,
-	&am33xx_l4_ls__epwmss2,
 	&am33xx_l3_s__gpmc,
-	&am33xx_l4_ls__mcspi0,
-	&am33xx_l4_ls__mcspi1,
 	&am33xx_l3_main__tptc0,
 	&am33xx_l3_main__tptc1,
 	&am33xx_l3_main__tptc2,
 	&am33xx_l3_main__ocmc,
-	&am33xx_l3_main__sha0,
-	&am33xx_l3_main__aes0,
-	&am43xx_l3_main__des,
-	&am43xx_l4_ls__ocp2scp0,
-	&am43xx_l4_ls__ocp2scp1,
 	&am43xx_l3_s__usbotgss0,
 	&am43xx_l3_s__usbotgss1,
 	&am43xx_dss__l3_main,
 	&am43xx_l4_ls__dss,
 	&am43xx_l4_ls__dss_dispc,
 	&am43xx_l4_ls__dss_rfbi,
-	&am43xx_l3__vpfe0,
-	&am43xx_l3__vpfe1,
-	&am43xx_l4_ls__vpfe0,
-	&am43xx_l4_ls__vpfe1,
 	NULL,
 };
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 292f544..b7c51ea 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -18,9 +18,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/power/smartreflex.h>
-
-#include <linux/omap-dma.h>
 
 #include "omap_hwmod.h"
 #include "omap_hwmod_common_data.h"
@@ -32,9 +29,6 @@
 /* Base offset for all OMAP4 interrupts external to MPUSS */
 #define OMAP44XX_IRQ_GIC_START	32
 
-/* Base offset for all OMAP4 dma requests */
-#define OMAP44XX_DMA_REQ_START	1
-
 /*
  * IP blocks
  */
@@ -238,43 +232,6 @@ static struct omap_hwmod omap44xx_ocp_wp_noc_hwmod = {
  */
 
 /*
- * 'aess' class
- * audio engine sub system
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART |
-			   MSTANDBY_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_aess_hwmod_class = {
-	.name	= "aess",
-	.sysc	= &omap44xx_aess_sysc,
-	.enable_preprogram = omap_hwmod_aess_preprogram,
-};
-
-/* aess */
-static struct omap_hwmod omap44xx_aess_hwmod = {
-	.name		= "aess",
-	.class		= &omap44xx_aess_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "aess_fclk",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ABE_AESS_CONTEXT_OFFSET,
-			.lostcontext_mask = OMAP4430_LOSTCONTEXT_DFF_MASK,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
  * 'counter' class
  * 32-bit ordinary counter, clocked by the falling edge of the 32 khz clock
  */
@@ -399,87 +356,6 @@ static struct omap_hwmod omap44xx_debugss_hwmod = {
 };
 
 /*
- * 'dma' class
- * dma controller for data exchange between memory to memory (i.e. internal or
- * external memory) and gp peripherals to memory or memory to gp peripherals
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_dma_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x002c,
-	.syss_offs	= 0x0028,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_dma_hwmod_class = {
-	.name	= "dma",
-	.sysc	= &omap44xx_dma_sysc,
-};
-
-/* dma dev_attr */
-static struct omap_dma_dev_attr dma_dev_attr = {
-	.dev_caps	= RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
-			  IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
-	.lch_count	= 32,
-};
-
-/* dma_system */
-static struct omap_hwmod omap44xx_dma_system_hwmod = {
-	.name		= "dma_system",
-	.class		= &omap44xx_dma_hwmod_class,
-	.clkdm_name	= "l3_dma_clkdm",
-	.main_clk	= "l3_div_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_SDMA_SDMA_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_SDMA_SDMA_CONTEXT_OFFSET,
-		},
-	},
-	.dev_attr	= &dma_dev_attr,
-};
-
-/*
- * 'dmic' class
- * digital microphone controller
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_dmic_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_dmic_hwmod_class = {
-	.name	= "dmic",
-	.sysc	= &omap44xx_dmic_sysc,
-};
-
-/* dmic */
-static struct omap_hwmod omap44xx_dmic_hwmod = {
-	.name		= "dmic",
-	.class		= &omap44xx_dmic_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "func_dmic_abe_gfclk",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ABE_DMIC_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
  * 'dsp' class
  * dsp sub-system
  */
@@ -777,66 +653,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
 	.opt_clks_cnt	= ARRAY_SIZE(dss_venc_opt_clks),
 };
 
-/* sha0 HIB2 (the 'P' (public) device) */
-static struct omap_hwmod_class_sysconfig omap44xx_sha0_sysc = {
-	.rev_offs	= 0x100,
-	.sysc_offs	= 0x110,
-	.syss_offs	= 0x114,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
 
-static struct omap_hwmod_class omap44xx_sha0_hwmod_class = {
-	.name		= "sham",
-	.sysc		= &omap44xx_sha0_sysc,
-};
-
-static struct omap_hwmod omap44xx_sha0_hwmod = {
-	.name		= "sham",
-	.class		= &omap44xx_sha0_hwmod_class,
-	.clkdm_name	= "l4_secure_clkdm",
-	.main_clk	= "l3_div_ck",
-	.prcm		= {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4SEC_SHA2MD51_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
- * 'elm' class
- * bch error location module
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_elm_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_elm_hwmod_class = {
-	.name	= "elm",
-	.sysc	= &omap44xx_elm_sysc,
-};
-
-/* elm */
-static struct omap_hwmod omap44xx_elm_hwmod = {
-	.name		= "elm",
-	.class		= &omap44xx_elm_hwmod_class,
-	.clkdm_name	= "l4_per_clkdm",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4PER_ELM_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4PER_ELM_CONTEXT_OFFSET,
-		},
-	},
-};
 
 /*
  * 'emif' class
@@ -885,147 +702,6 @@ static struct omap_hwmod omap44xx_emif2_hwmod = {
 };
 
 /*
-    Crypto modules AES0/1 belong to:
-	PD_L4_PER power domain
-	CD_L4_SEC clock domain
-	On the L3, the AES modules are mapped to
-	L3_CLK2: Peripherals and multimedia sub clock domain
-*/
-static struct omap_hwmod_class_sysconfig omap44xx_aes_sysc = {
-	.rev_offs	= 0x80,
-	.sysc_offs	= 0x84,
-	.syss_offs	= 0x88,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class omap44xx_aes_hwmod_class = {
-	.name		= "aes",
-	.sysc		= &omap44xx_aes_sysc,
-};
-
-static struct omap_hwmod omap44xx_aes1_hwmod = {
-	.name		= "aes1",
-	.class		= &omap44xx_aes_hwmod_class,
-	.clkdm_name	= "l4_secure_clkdm",
-	.main_clk	= "l3_div_ck",
-	.prcm		= {
-		.omap4	= {
-			.context_offs	= OMAP4_RM_L4SEC_AES1_CONTEXT_OFFSET,
-			.clkctrl_offs	= OMAP4_CM_L4SEC_AES1_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__aes1 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_aes1_hwmod,
-	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod omap44xx_aes2_hwmod = {
-	.name		= "aes2",
-	.class		= &omap44xx_aes_hwmod_class,
-	.clkdm_name	= "l4_secure_clkdm",
-	.main_clk	= "l3_div_ck",
-	.prcm		= {
-		.omap4	= {
-			.context_offs	= OMAP4_RM_L4SEC_AES2_CONTEXT_OFFSET,
-			.clkctrl_offs	= OMAP4_CM_L4SEC_AES2_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__aes2 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_aes2_hwmod,
-	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/*
- * 'des' class for DES3DES module
- */
-static struct omap_hwmod_class_sysconfig omap44xx_des_sysc = {
-	.rev_offs	= 0x30,
-	.sysc_offs	= 0x34,
-	.syss_offs	= 0x38,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class omap44xx_des_hwmod_class = {
-	.name		= "des",
-	.sysc		= &omap44xx_des_sysc,
-};
-
-static struct omap_hwmod omap44xx_des_hwmod = {
-	.name		= "des",
-	.class		= &omap44xx_des_hwmod_class,
-	.clkdm_name	= "l4_secure_clkdm",
-	.main_clk	= "l3_div_ck",
-	.prcm		= {
-		.omap4	= {
-			.context_offs	= OMAP4_RM_L4SEC_DES3DES_CONTEXT_OFFSET,
-			.clkctrl_offs	= OMAP4_CM_L4SEC_DES3DES_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__des = {
-	.master		= &omap44xx_l3_main_2_hwmod,
-	.slave		= &omap44xx_des_hwmod,
-	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/*
- * 'fdif' class
- * face detection hw accelerator module
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_fdif_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	/*
-	 * FDIF needs 100 OCP clk cycles delay after a softreset before
-	 * accessing sysconfig again.
-	 * The lowest frequency at the moment for L3 bus is 100 MHz, so
-	 * 1usec delay is needed. Add an x2 margin to be safe (2 usecs).
-	 *
-	 * TODO: Indicate errata when available.
-	 */
-	.srst_udelay	= 2,
-	.sysc_flags	= (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_fdif_hwmod_class = {
-	.name	= "fdif",
-	.sysc	= &omap44xx_fdif_sysc,
-};
-
-/* fdif */
-static struct omap_hwmod omap44xx_fdif_hwmod = {
-	.name		= "fdif",
-	.class		= &omap44xx_fdif_hwmod_class,
-	.clkdm_name	= "iss_clkdm",
-	.main_clk	= "fdif_fck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
  * 'gpmc' class
  * general purpose memory controller
  */
@@ -1063,45 +739,6 @@ static struct omap_hwmod omap44xx_gpmc_hwmod = {
 
 
 /*
- * 'hsi' class
- * mipi high-speed synchronous serial interface (multichannel and full-duplex
- * serial if)
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_hsi_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_EMUFREE |
-			   SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
-			   MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_hsi_hwmod_class = {
-	.name	= "hsi",
-	.sysc	= &omap44xx_hsi_sysc,
-};
-
-/* hsi */
-static struct omap_hwmod omap44xx_hsi_hwmod = {
-	.name		= "hsi",
-	.class		= &omap44xx_hsi_hwmod_class,
-	.clkdm_name	= "l3_init_clkdm",
-	.main_clk	= "hsi_fck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L3INIT_HSI_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L3INIT_HSI_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
-/*
  * 'ipu' class
  * imaging processor unit
  */
@@ -1218,177 +855,6 @@ static struct omap_hwmod omap44xx_iva_hwmod = {
 };
 
 /*
- * 'kbd' class
- * keyboard controller
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_kbd_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_kbd_hwmod_class = {
-	.name	= "kbd",
-	.sysc	= &omap44xx_kbd_sysc,
-};
-
-/* kbd */
-static struct omap_hwmod omap44xx_kbd_hwmod = {
-	.name		= "kbd",
-	.class		= &omap44xx_kbd_hwmod_class,
-	.clkdm_name	= "l4_wkup_clkdm",
-	.main_clk	= "sys_32k_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_WKUP_KEYBOARD_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_WKUP_KEYBOARD_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-
-/*
- * 'mcpdm' class
- * multi channel pdm controller (proprietary interface with phoenix power
- * ic)
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_mcpdm_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_mcpdm_hwmod_class = {
-	.name	= "mcpdm",
-	.sysc	= &omap44xx_mcpdm_sysc,
-};
-
-/* mcpdm */
-static struct omap_hwmod omap44xx_mcpdm_hwmod = {
-	.name		= "mcpdm",
-	.class		= &omap44xx_mcpdm_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	/*
-	 * It's suspected that the McPDM requires an off-chip main
-	 * functional clock, controlled via I2C.  This IP block is
-	 * currently reset very early during boot, before I2C is
-	 * available, so it doesn't seem that we have any choice in
-	 * the kernel other than to avoid resetting it.
-	 *
-	 * Also, McPDM needs to be configured to NO_IDLE mode when it
-	 * is in used otherwise vital clocks will be gated which
-	 * results 'slow motion' audio playback.
-	 */
-	.flags		= HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
-	.main_clk	= "pad_clks_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ABE_PDM_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
- * 'mmu' class
- * The memory management unit performs virtual to physical address translation
- * for its requestors.
- */
-
-static struct omap_hwmod_class_sysconfig mmu_sysc = {
-	.rev_offs	= 0x000,
-	.sysc_offs	= 0x010,
-	.syss_offs	= 0x014,
-	.sysc_flags	= (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_mmu_hwmod_class = {
-	.name = "mmu",
-	.sysc = &mmu_sysc,
-};
-
-/* mmu ipu */
-
-static struct omap_hwmod omap44xx_mmu_ipu_hwmod;
-static struct omap_hwmod_rst_info omap44xx_mmu_ipu_resets[] = {
-	{ .name = "mmu_cache", .rst_shift = 2 },
-};
-
-/* l3_main_2 -> mmu_ipu */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__mmu_ipu = {
-	.master		= &omap44xx_l3_main_2_hwmod,
-	.slave		= &omap44xx_mmu_ipu_hwmod,
-	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod omap44xx_mmu_ipu_hwmod = {
-	.name		= "mmu_ipu",
-	.class		= &omap44xx_mmu_hwmod_class,
-	.clkdm_name	= "ducati_clkdm",
-	.rst_lines	= omap44xx_mmu_ipu_resets,
-	.rst_lines_cnt	= ARRAY_SIZE(omap44xx_mmu_ipu_resets),
-	.main_clk	= "ducati_clk_mux_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET,
-			.rstctrl_offs = OMAP4_RM_DUCATI_RSTCTRL_OFFSET,
-			.context_offs = OMAP4_RM_DUCATI_DUCATI_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
-/* mmu dsp */
-
-static struct omap_hwmod omap44xx_mmu_dsp_hwmod;
-static struct omap_hwmod_rst_info omap44xx_mmu_dsp_resets[] = {
-	{ .name = "mmu_cache", .rst_shift = 1 },
-};
-
-/* l4_cfg -> dsp */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__mmu_dsp = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_mmu_dsp_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod omap44xx_mmu_dsp_hwmod = {
-	.name		= "mmu_dsp",
-	.class		= &omap44xx_mmu_hwmod_class,
-	.clkdm_name	= "tesla_clkdm",
-	.rst_lines	= omap44xx_mmu_dsp_resets,
-	.rst_lines_cnt	= ARRAY_SIZE(omap44xx_mmu_dsp_resets),
-	.main_clk	= "dpll_iva_m4x2_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET,
-			.rstctrl_offs = OMAP4_RM_TESLA_RSTCTRL_OFFSET,
-			.context_offs = OMAP4_RM_TESLA_TESLA_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
-/*
  * 'mpu' class
  * mpu sub-system
  */
@@ -1434,51 +900,6 @@ static struct omap_hwmod omap44xx_ocmc_ram_hwmod = {
 	},
 };
 
-/*
- * 'ocp2scp' class
- * bridge to transform ocp interface protocol to scp (serial control port)
- * protocol
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_ocp2scp_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = {
-	.name	= "ocp2scp",
-	.sysc	= &omap44xx_ocp2scp_sysc,
-};
-
-/* ocp2scp_usb_phy */
-static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
-	.name		= "ocp2scp_usb_phy",
-	.class		= &omap44xx_ocp2scp_hwmod_class,
-	.clkdm_name	= "l3_init_clkdm",
-	/*
-	 * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP
-	 * block as an "optional clock," and normally should never be
-	 * specified as the main_clk for an OMAP IP block.  However it
-	 * turns out that this clock is actually the main clock for
-	 * the ocp2scp_usb_phy IP block:
-	 * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html
-	 * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems
-	 * to be the best workaround.
-	 */
-	.main_clk	= "ocp2scp_usb_phy_phy_48m",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L3INIT_USBPHYOCP2SCP_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
 
 /*
  * 'prcm' class
@@ -1585,189 +1006,6 @@ static struct omap_hwmod omap44xx_sl2if_hwmod = {
 };
 
 /*
- * 'slimbus' class
- * bidirectional, multi-drop, multi-channel two-line serial interface between
- * the device and external components
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_slimbus_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_slimbus_hwmod_class = {
-	.name	= "slimbus",
-	.sysc	= &omap44xx_slimbus_sysc,
-};
-
-/* slimbus1 */
-static struct omap_hwmod_opt_clk slimbus1_opt_clks[] = {
-	{ .role = "fclk_1", .clk = "slimbus1_fclk_1" },
-	{ .role = "fclk_0", .clk = "slimbus1_fclk_0" },
-	{ .role = "fclk_2", .clk = "slimbus1_fclk_2" },
-	{ .role = "slimbus_clk", .clk = "slimbus1_slimbus_clk" },
-};
-
-static struct omap_hwmod omap44xx_slimbus1_hwmod = {
-	.name		= "slimbus1",
-	.class		= &omap44xx_slimbus_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM1_ABE_SLIMBUS_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ABE_SLIMBUS_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-	.opt_clks	= slimbus1_opt_clks,
-	.opt_clks_cnt	= ARRAY_SIZE(slimbus1_opt_clks),
-};
-
-/* slimbus2 */
-static struct omap_hwmod_opt_clk slimbus2_opt_clks[] = {
-	{ .role = "fclk_1", .clk = "slimbus2_fclk_1" },
-	{ .role = "fclk_0", .clk = "slimbus2_fclk_0" },
-	{ .role = "slimbus_clk", .clk = "slimbus2_slimbus_clk" },
-};
-
-static struct omap_hwmod omap44xx_slimbus2_hwmod = {
-	.name		= "slimbus2",
-	.class		= &omap44xx_slimbus_hwmod_class,
-	.clkdm_name	= "l4_per_clkdm",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4PER_SLIMBUS2_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4PER_SLIMBUS2_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-	.opt_clks	= slimbus2_opt_clks,
-	.opt_clks_cnt	= ARRAY_SIZE(slimbus2_opt_clks),
-};
-
-/*
- * 'smartreflex' class
- * smartreflex module (monitor silicon performance and outputs a measure of
- * performance error)
- */
-
-/* The IP is not compliant to type1 / type2 scheme */
-static struct omap_hwmod_class_sysconfig omap44xx_smartreflex_sysc = {
-	.rev_offs	= -ENODEV,
-	.sysc_offs	= 0x0038,
-	.sysc_flags	= (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap36xx_sr_sysc_fields,
-};
-
-static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = {
-	.name	= "smartreflex",
-	.sysc	= &omap44xx_smartreflex_sysc,
-};
-
-/* smartreflex_core */
-static struct omap_smartreflex_dev_attr smartreflex_core_dev_attr = {
-	.sensor_voltdm_name   = "core",
-};
-
-static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
-	.name		= "smartreflex_core",
-	.class		= &omap44xx_smartreflex_hwmod_class,
-	.clkdm_name	= "l4_ao_clkdm",
-
-	.main_clk	= "smartreflex_core_fck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ALWON_SR_CORE_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-	.dev_attr	= &smartreflex_core_dev_attr,
-};
-
-/* smartreflex_iva */
-static struct omap_smartreflex_dev_attr smartreflex_iva_dev_attr = {
-	.sensor_voltdm_name	= "iva",
-};
-
-static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
-	.name		= "smartreflex_iva",
-	.class		= &omap44xx_smartreflex_hwmod_class,
-	.clkdm_name	= "l4_ao_clkdm",
-	.main_clk	= "smartreflex_iva_fck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ALWON_SR_IVA_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-	.dev_attr	= &smartreflex_iva_dev_attr,
-};
-
-/* smartreflex_mpu */
-static struct omap_smartreflex_dev_attr smartreflex_mpu_dev_attr = {
-	.sensor_voltdm_name	= "mpu",
-};
-
-static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
-	.name		= "smartreflex_mpu",
-	.class		= &omap44xx_smartreflex_hwmod_class,
-	.clkdm_name	= "l4_ao_clkdm",
-	.main_clk	= "smartreflex_mpu_fck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ALWON_SR_MPU_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-	.dev_attr	= &smartreflex_mpu_dev_attr,
-};
-
-/*
- * 'spinlock' class
- * spinlock provides hardware assistance for synchronizing the processes
- * running on multiple processors
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_spinlock_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_spinlock_hwmod_class = {
-	.name	= "spinlock",
-	.sysc	= &omap44xx_spinlock_sysc,
-};
-
-/* spinlock */
-static struct omap_hwmod omap44xx_spinlock_hwmod = {
-	.name		= "spinlock",
-	.class		= &omap44xx_spinlock_hwmod_class,
-	.clkdm_name	= "l4_cfg_clkdm",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4CFG_HW_SEM_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4CFG_HW_SEM_CONTEXT_OFFSET,
-		},
-	},
-};
-
-/*
  * 'timer' class
  * general purpose timer module with accurate 1ms tick
  * This class contains several variants: ['timer_1ms', 'timer']
@@ -1790,21 +1028,6 @@ static struct omap_hwmod_class omap44xx_timer_1ms_hwmod_class = {
 	.sysc	= &omap44xx_timer_1ms_sysc,
 };
 
-static struct omap_hwmod_class_sysconfig omap44xx_timer_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_timer_hwmod_class = {
-	.name	= "timer",
-	.sysc	= &omap44xx_timer_sysc,
-};
-
 /* timer1 */
 static struct omap_hwmod omap44xx_timer1_hwmod = {
 	.name		= "timer1",
@@ -1821,158 +1044,6 @@ static struct omap_hwmod omap44xx_timer1_hwmod = {
 	},
 };
 
-/* timer2 */
-static struct omap_hwmod omap44xx_timer2_hwmod = {
-	.name		= "timer2",
-	.class		= &omap44xx_timer_1ms_hwmod_class,
-	.clkdm_name	= "l4_per_clkdm",
-	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,
-	.main_clk	= "cm2_dm2_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4PER_DMTIMER2_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer3 */
-static struct omap_hwmod omap44xx_timer3_hwmod = {
-	.name		= "timer3",
-	.class		= &omap44xx_timer_hwmod_class,
-	.clkdm_name	= "l4_per_clkdm",
-	.main_clk	= "cm2_dm3_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4PER_DMTIMER3_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer4 */
-static struct omap_hwmod omap44xx_timer4_hwmod = {
-	.name		= "timer4",
-	.class		= &omap44xx_timer_hwmod_class,
-	.clkdm_name	= "l4_per_clkdm",
-	.main_clk	= "cm2_dm4_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4PER_DMTIMER4_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer5 */
-static struct omap_hwmod omap44xx_timer5_hwmod = {
-	.name		= "timer5",
-	.class		= &omap44xx_timer_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "timer5_sync_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ABE_TIMER5_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer6 */
-static struct omap_hwmod omap44xx_timer6_hwmod = {
-	.name		= "timer6",
-	.class		= &omap44xx_timer_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "timer6_sync_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ABE_TIMER6_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer7 */
-static struct omap_hwmod omap44xx_timer7_hwmod = {
-	.name		= "timer7",
-	.class		= &omap44xx_timer_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "timer7_sync_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ABE_TIMER7_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer8 */
-static struct omap_hwmod omap44xx_timer8_hwmod = {
-	.name		= "timer8",
-	.class		= &omap44xx_timer_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "timer8_sync_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_ABE_TIMER8_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer9 */
-static struct omap_hwmod omap44xx_timer9_hwmod = {
-	.name		= "timer9",
-	.class		= &omap44xx_timer_hwmod_class,
-	.clkdm_name	= "l4_per_clkdm",
-	.main_clk	= "cm2_dm9_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4PER_DMTIMER9_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer10 */
-static struct omap_hwmod omap44xx_timer10_hwmod = {
-	.name		= "timer10",
-	.class		= &omap44xx_timer_1ms_hwmod_class,
-	.clkdm_name	= "l4_per_clkdm",
-	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,
-	.main_clk	= "cm2_dm10_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4PER_DMTIMER10_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer11 */
-static struct omap_hwmod omap44xx_timer11_hwmod = {
-	.name		= "timer11",
-	.class		= &omap44xx_timer_hwmod_class,
-	.clkdm_name	= "l4_per_clkdm",
-	.main_clk	= "cm2_dm11_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET,
-			.context_offs = OMAP4_RM_L4PER_DMTIMER11_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
 /*
  * 'usb_host_fs' class
  * full-speed usb host controller
@@ -2213,30 +1284,6 @@ static struct omap_hwmod_ocp_if omap44xx_debugss__l3_main_2 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* dma_system -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
-	.master		= &omap44xx_dma_system_hwmod,
-	.slave		= &omap44xx_l3_main_2_hwmod,
-	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* fdif -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_fdif__l3_main_2 = {
-	.master		= &omap44xx_fdif_hwmod,
-	.slave		= &omap44xx_l3_main_2_hwmod,
-	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* hsi -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = {
-	.master		= &omap44xx_hsi_hwmod,
-	.slave		= &omap44xx_l3_main_2_hwmod,
-	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* ipu -> l3_main_2 */
 static struct omap_hwmod_ocp_if omap44xx_ipu__l3_main_2 = {
 	.master		= &omap44xx_ipu_hwmod,
@@ -2317,14 +1364,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* aess -> l4_abe */
-static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
-	.master		= &omap44xx_aess_hwmod,
-	.slave		= &omap44xx_l4_abe_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* dsp -> l4_abe */
 static struct omap_hwmod_ocp_if omap44xx_dsp__l4_abe = {
 	.master		= &omap44xx_dsp_hwmod,
@@ -2389,22 +1428,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ocp_wp_noc = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_abe -> aess */
-static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_aess_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_abe -> aess (dma) */
-static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_aess_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_SDMA,
-};
-
 /* l4_wkup -> counter_32k */
 static struct omap_hwmod_ocp_if omap44xx_l4_wkup__counter_32k = {
 	.master		= &omap44xx_l4_wkup_hwmod,
@@ -2453,22 +1476,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_instr__debugss = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> dma_system */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dma_system = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_dma_system_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> dmic */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_dmic_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* dsp -> iva */
 static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
 	.master		= &omap44xx_dsp_hwmod,
@@ -2605,30 +1612,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_venc = {
 	.user		= OCP_USER_MPU,
 };
 
-/* l3_main_2 -> sham */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sha0 = {
-	.master		= &omap44xx_l3_main_2_hwmod,
-	.slave		= &omap44xx_sha0_hwmod,
-	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> elm */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__elm = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_elm_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> fdif */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__fdif = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_fdif_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l3_main_2 -> gpmc */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_2__gpmc = {
 	.master		= &omap44xx_l3_main_2_hwmod,
@@ -2637,14 +1620,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__gpmc = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> hsi */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__hsi = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_hsi_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l3_main_2 -> ipu */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ipu = {
 	.master		= &omap44xx_l3_main_2_hwmod,
@@ -2677,22 +1652,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iva = {
 	.user		= OCP_USER_MPU,
 };
 
-/* l4_wkup -> kbd */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__kbd = {
-	.master		= &omap44xx_l4_wkup_hwmod,
-	.slave		= &omap44xx_kbd_hwmod,
-	.clk		= "l4_wkup_clk_mux_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> mcpdm */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_mcpdm_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l3_main_2 -> ocmc_ram */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ocmc_ram = {
 	.master		= &omap44xx_l3_main_2_hwmod,
@@ -2701,14 +1660,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ocmc_ram = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> ocp2scp_usb_phy */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ocp2scp_usb_phy = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_ocp2scp_usb_phy_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* mpu_private -> prcm_mpu */
 static struct omap_hwmod_ocp_if omap44xx_mpu_private__prcm_mpu = {
 	.master		= &omap44xx_mpu_private_hwmod,
@@ -2757,62 +1708,6 @@ static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_abe -> slimbus1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__slimbus1 = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_slimbus1_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_abe -> slimbus1 (dma) */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__slimbus1_dma = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_slimbus1_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_SDMA,
-};
-
-/* l4_per -> slimbus2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__slimbus2 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_slimbus2_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> smartreflex_core */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_core = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_smartreflex_core_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> smartreflex_iva */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_iva = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_smartreflex_iva_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> smartreflex_mpu */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_mpu = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_smartreflex_mpu_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> spinlock */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__spinlock = {
-	.master		= &omap44xx_l4_cfg_hwmod,
-	.slave		= &omap44xx_spinlock_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l4_wkup -> timer1 */
 static struct omap_hwmod_ocp_if omap44xx_l4_wkup__timer1 = {
 	.master		= &omap44xx_l4_wkup_hwmod,
@@ -2821,86 +1716,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__timer1 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_per -> timer2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__timer2 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_timer2_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer3 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__timer3 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_timer3_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer4 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__timer4 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_timer4_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> timer5 */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5 = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_timer5_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> timer6 */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6 = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_timer6_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> timer7 */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7 = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_timer7_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> timer8 */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8 = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_timer8_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer9 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__timer9 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_timer9_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer10 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__timer10 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_timer10_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer11 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__timer11 = {
-	.master		= &omap44xx_l4_per_hwmod,
-	.slave		= &omap44xx_timer11_hwmod,
-	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l4_cfg -> usb_host_fs */
 static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
 	.master		= &omap44xx_l4_cfg_hwmod,
@@ -2953,9 +1768,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
 	&omap44xx_l4_cfg__l3_main_1,
 	&omap44xx_mpu__l3_main_1,
 	&omap44xx_debugss__l3_main_2,
-	&omap44xx_dma_system__l3_main_2,
-	&omap44xx_fdif__l3_main_2,
-	&omap44xx_hsi__l3_main_2,
 	&omap44xx_ipu__l3_main_2,
 	&omap44xx_iss__l3_main_2,
 	&omap44xx_iva__l3_main_2,
@@ -2966,7 +1778,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
 	&omap44xx_l3_main_1__l3_main_3,
 	&omap44xx_l3_main_2__l3_main_3,
 	&omap44xx_l4_cfg__l3_main_3,
-	&omap44xx_aess__l4_abe,
 	&omap44xx_dsp__l4_abe,
 	&omap44xx_l3_main_1__l4_abe,
 	&omap44xx_mpu__l4_abe,
@@ -2975,16 +1786,12 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
 	&omap44xx_l4_cfg__l4_wkup,
 	&omap44xx_mpu__mpu_private,
 	&omap44xx_l4_cfg__ocp_wp_noc,
-	&omap44xx_l4_abe__aess,
-	&omap44xx_l4_abe__aess_dma,
 	&omap44xx_l4_wkup__counter_32k,
 	&omap44xx_l4_cfg__ctrl_module_core,
 	&omap44xx_l4_cfg__ctrl_module_pad_core,
 	&omap44xx_l4_wkup__ctrl_module_wkup,
 	&omap44xx_l4_wkup__ctrl_module_pad_wkup,
 	&omap44xx_l3_instr__debugss,
-	&omap44xx_l4_cfg__dma_system,
-	&omap44xx_l4_abe__dmic,
 	&omap44xx_dsp__iva,
 	/* &omap44xx_dsp__sl2if, */
 	&omap44xx_l4_cfg__dsp,
@@ -3002,53 +1809,24 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
 	&omap44xx_l4_per__dss_rfbi,
 	&omap44xx_l3_main_2__dss_venc,
 	&omap44xx_l4_per__dss_venc,
-	&omap44xx_l4_per__elm,
-	&omap44xx_l4_cfg__fdif,
 	&omap44xx_l3_main_2__gpmc,
-	&omap44xx_l4_cfg__hsi,
 	&omap44xx_l3_main_2__ipu,
 	&omap44xx_l3_main_2__iss,
 	/* &omap44xx_iva__sl2if, */
 	&omap44xx_l3_main_2__iva,
-	&omap44xx_l4_wkup__kbd,
-	&omap44xx_l4_abe__mcpdm,
-	&omap44xx_l3_main_2__mmu_ipu,
-	&omap44xx_l4_cfg__mmu_dsp,
 	&omap44xx_l3_main_2__ocmc_ram,
-	&omap44xx_l4_cfg__ocp2scp_usb_phy,
 	&omap44xx_mpu_private__prcm_mpu,
 	&omap44xx_l4_wkup__cm_core_aon,
 	&omap44xx_l4_cfg__cm_core,
 	&omap44xx_l4_wkup__prm,
 	&omap44xx_l4_wkup__scrm,
 	/* &omap44xx_l3_main_2__sl2if, */
-	&omap44xx_l4_abe__slimbus1,
-	&omap44xx_l4_abe__slimbus1_dma,
-	&omap44xx_l4_per__slimbus2,
-	&omap44xx_l4_cfg__smartreflex_core,
-	&omap44xx_l4_cfg__smartreflex_iva,
-	&omap44xx_l4_cfg__smartreflex_mpu,
-	&omap44xx_l4_cfg__spinlock,
 	&omap44xx_l4_wkup__timer1,
-	&omap44xx_l4_per__timer2,
-	&omap44xx_l4_per__timer3,
-	&omap44xx_l4_per__timer4,
-	&omap44xx_l4_abe__timer5,
-	&omap44xx_l4_abe__timer6,
-	&omap44xx_l4_abe__timer7,
-	&omap44xx_l4_abe__timer8,
-	&omap44xx_l4_per__timer9,
-	&omap44xx_l4_per__timer10,
-	&omap44xx_l4_per__timer11,
 	/* &omap44xx_l4_cfg__usb_host_fs, */
 	&omap44xx_l4_cfg__usb_host_hs,
 	&omap44xx_l4_cfg__usb_tll_hs,
 	&omap44xx_mpu__emif1,
 	&omap44xx_mpu__emif2,
-	&omap44xx_l3_main_2__aes1,
-	&omap44xx_l3_main_2__aes2,
-	&omap44xx_l3_main_2__des,
-	&omap44xx_l3_main_2__sha0,
 	NULL,
 };
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index cc5ad6a..ad398f6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -17,8 +17,6 @@
 #include <linux/io.h>
 #include <linux/power/smartreflex.h>
 
-#include <linux/omap-dma.h>
-
 #include "omap_hwmod.h"
 #include "omap_hwmod_common_data.h"
 #include "cm1_54xx.h"
@@ -28,10 +26,6 @@
 /* Base offset for all OMAP5 interrupts external to MPUSS */
 #define OMAP54XX_IRQ_GIC_START	32
 
-/* Base offset for all OMAP5 dma requests */
-#define OMAP54XX_DMA_REQ_START	1
-
-
 /*
  * IP blocks
  */
@@ -233,87 +227,6 @@ static struct omap_hwmod omap54xx_counter_32k_hwmod = {
 };
 
 /*
- * 'dma' class
- * dma controller for data exchange between memory to memory (i.e. internal or
- * external memory) and gp peripherals to memory or memory to gp peripherals
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_dma_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x002c,
-	.syss_offs	= 0x0028,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_dma_hwmod_class = {
-	.name	= "dma",
-	.sysc	= &omap54xx_dma_sysc,
-};
-
-/* dma dev_attr */
-static struct omap_dma_dev_attr dma_dev_attr = {
-	.dev_caps	= RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
-			  IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
-	.lch_count	= 32,
-};
-
-/* dma_system */
-static struct omap_hwmod omap54xx_dma_system_hwmod = {
-	.name		= "dma_system",
-	.class		= &omap54xx_dma_hwmod_class,
-	.clkdm_name	= "dma_clkdm",
-	.main_clk	= "l3_iclk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_DMA_DMA_SYSTEM_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_DMA_DMA_SYSTEM_CONTEXT_OFFSET,
-		},
-	},
-	.dev_attr	= &dma_dev_attr,
-};
-
-/*
- * 'dmic' class
- * digital microphone controller
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_dmic_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap54xx_dmic_hwmod_class = {
-	.name	= "dmic",
-	.sysc	= &omap54xx_dmic_sysc,
-};
-
-/* dmic */
-static struct omap_hwmod omap54xx_dmic_hwmod = {
-	.name		= "dmic",
-	.class		= &omap54xx_dmic_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "dmic_gfclk",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_ABE_DMIC_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_ABE_DMIC_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
  * 'dss' class
  * display sub-system
  */
@@ -593,154 +506,8 @@ static struct omap_hwmod omap54xx_emif2_hwmod = {
 	},
 };
 
-/*
- * 'kbd' class
- * keyboard controller
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_kbd_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_kbd_hwmod_class = {
-	.name	= "kbd",
-	.sysc	= &omap54xx_kbd_sysc,
-};
-
-/* kbd */
-static struct omap_hwmod omap54xx_kbd_hwmod = {
-	.name		= "kbd",
-	.class		= &omap54xx_kbd_hwmod_class,
-	.clkdm_name	= "wkupaon_clkdm",
-	.main_clk	= "sys_32k_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_WKUPAON_KBD_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_WKUPAON_KBD_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
- * 'mcpdm' class
- * multi channel pdm controller (proprietary interface with phoenix power
- * ic)
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_mcpdm_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap54xx_mcpdm_hwmod_class = {
-	.name	= "mcpdm",
-	.sysc	= &omap54xx_mcpdm_sysc,
-};
-
-/* mcpdm */
-static struct omap_hwmod omap54xx_mcpdm_hwmod = {
-	.name		= "mcpdm",
-	.class		= &omap54xx_mcpdm_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	/*
-	 * It's suspected that the McPDM requires an off-chip main
-	 * functional clock, controlled via I2C.  This IP block is
-	 * currently reset very early during boot, before I2C is
-	 * available, so it doesn't seem that we have any choice in
-	 * the kernel other than to avoid resetting it.  XXX This is
-	 * really a hardware issue workaround: every IP block should
-	 * be able to source its main functional clock from either
-	 * on-chip or off-chip sources.  McPDM seems to be the only
-	 * current exception.
-	 */
-
-	.flags		= HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
-	.main_clk	= "pad_clks_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_ABE_MCPDM_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_ABE_MCPDM_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
 
 
-/*
- * 'mmu' class
- * The memory management unit performs virtual to physical address translation
- * for its requestors.
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_mmu_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_mmu_hwmod_class = {
-	.name = "mmu",
-	.sysc = &omap54xx_mmu_sysc,
-};
-
-static struct omap_hwmod_rst_info omap54xx_mmu_dsp_resets[] = {
-	{ .name = "mmu_cache", .rst_shift = 1 },
-};
-
-static struct omap_hwmod omap54xx_mmu_dsp_hwmod = {
-	.name		= "mmu_dsp",
-	.class		= &omap54xx_mmu_hwmod_class,
-	.clkdm_name	= "dsp_clkdm",
-	.rst_lines	= omap54xx_mmu_dsp_resets,
-	.rst_lines_cnt	= ARRAY_SIZE(omap54xx_mmu_dsp_resets),
-	.main_clk	= "dpll_iva_h11x2_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_DSP_DSP_CLKCTRL_OFFSET,
-			.rstctrl_offs = OMAP54XX_RM_DSP_RSTCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_DSP_DSP_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
-/* mmu ipu */
-static struct omap_hwmod_rst_info omap54xx_mmu_ipu_resets[] = {
-	{ .name = "mmu_cache", .rst_shift = 2 },
-};
-
-static struct omap_hwmod omap54xx_mmu_ipu_hwmod = {
-	.name		= "mmu_ipu",
-	.class		= &omap54xx_mmu_hwmod_class,
-	.clkdm_name	= "ipu_clkdm",
-	.rst_lines	= omap54xx_mmu_ipu_resets,
-	.rst_lines_cnt	= ARRAY_SIZE(omap54xx_mmu_ipu_resets),
-	.main_clk	= "dpll_core_h22x2_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_IPU_IPU_CLKCTRL_OFFSET,
-			.rstctrl_offs = OMAP54XX_RM_IPU_RSTCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_IPU_IPU_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
 
 /*
  * 'mpu' class
@@ -766,76 +533,6 @@ static struct omap_hwmod omap54xx_mpu_hwmod = {
 	},
 };
 
-/*
- * 'spinlock' class
- * spinlock provides hardware assistance for synchronizing the processes
- * running on multiple processors
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_spinlock_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_spinlock_hwmod_class = {
-	.name	= "spinlock",
-	.sysc	= &omap54xx_spinlock_sysc,
-};
-
-/* spinlock */
-static struct omap_hwmod omap54xx_spinlock_hwmod = {
-	.name		= "spinlock",
-	.class		= &omap54xx_spinlock_hwmod_class,
-	.clkdm_name	= "l4cfg_clkdm",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L4CFG_SPINLOCK_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L4CFG_SPINLOCK_CONTEXT_OFFSET,
-		},
-	},
-};
-
-/*
- * 'ocp2scp' class
- * bridge to transform ocp interface protocol to scp (serial control port)
- * protocol
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_ocp2scp_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
-			SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_ocp2scp_hwmod_class = {
-	.name	= "ocp2scp",
-	.sysc	= &omap54xx_ocp2scp_sysc,
-};
-
-/* ocp2scp1 */
-static struct omap_hwmod omap54xx_ocp2scp1_hwmod = {
-	.name		= "ocp2scp1",
-	.class		= &omap54xx_ocp2scp_hwmod_class,
-	.clkdm_name	= "l3init_clkdm",
-	.main_clk	= "l4_root_clk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L3INIT_OCP2SCP1_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L3INIT_OCP2SCP1_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
 
 /*
  * 'timer' class
@@ -858,21 +555,6 @@ static struct omap_hwmod_class omap54xx_timer_1ms_hwmod_class = {
 	.sysc	= &omap54xx_timer_1ms_sysc,
 };
 
-static struct omap_hwmod_class_sysconfig omap54xx_timer_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap54xx_timer_hwmod_class = {
-	.name	= "timer",
-	.sysc	= &omap54xx_timer_sysc,
-};
-
 /* timer1 */
 static struct omap_hwmod omap54xx_timer1_hwmod = {
 	.name		= "timer1",
@@ -889,158 +571,6 @@ static struct omap_hwmod omap54xx_timer1_hwmod = {
 	},
 };
 
-/* timer2 */
-static struct omap_hwmod omap54xx_timer2_hwmod = {
-	.name		= "timer2",
-	.class		= &omap54xx_timer_1ms_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer2_gfclk_mux",
-	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L4PER_TIMER2_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L4PER_TIMER2_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer3 */
-static struct omap_hwmod omap54xx_timer3_hwmod = {
-	.name		= "timer3",
-	.class		= &omap54xx_timer_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer3_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L4PER_TIMER3_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L4PER_TIMER3_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer4 */
-static struct omap_hwmod omap54xx_timer4_hwmod = {
-	.name		= "timer4",
-	.class		= &omap54xx_timer_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer4_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L4PER_TIMER4_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L4PER_TIMER4_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer5 */
-static struct omap_hwmod omap54xx_timer5_hwmod = {
-	.name		= "timer5",
-	.class		= &omap54xx_timer_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "timer5_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_ABE_TIMER5_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_ABE_TIMER5_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer6 */
-static struct omap_hwmod omap54xx_timer6_hwmod = {
-	.name		= "timer6",
-	.class		= &omap54xx_timer_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "timer6_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_ABE_TIMER6_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_ABE_TIMER6_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer7 */
-static struct omap_hwmod omap54xx_timer7_hwmod = {
-	.name		= "timer7",
-	.class		= &omap54xx_timer_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "timer7_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_ABE_TIMER7_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_ABE_TIMER7_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer8 */
-static struct omap_hwmod omap54xx_timer8_hwmod = {
-	.name		= "timer8",
-	.class		= &omap54xx_timer_hwmod_class,
-	.clkdm_name	= "abe_clkdm",
-	.main_clk	= "timer8_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_ABE_TIMER8_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_ABE_TIMER8_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer9 */
-static struct omap_hwmod omap54xx_timer9_hwmod = {
-	.name		= "timer9",
-	.class		= &omap54xx_timer_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer9_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L4PER_TIMER9_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L4PER_TIMER9_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer10 */
-static struct omap_hwmod omap54xx_timer10_hwmod = {
-	.name		= "timer10",
-	.class		= &omap54xx_timer_1ms_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer10_gfclk_mux",
-	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L4PER_TIMER10_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L4PER_TIMER10_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer11 */
-static struct omap_hwmod omap54xx_timer11_hwmod = {
-	.name		= "timer11",
-	.class		= &omap54xx_timer_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer11_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L4PER_TIMER11_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L4PER_TIMER11_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
 /*
  * 'usb_host_hs' class
  * high-speed multi-port usb host controller
@@ -1193,35 +723,6 @@ static struct omap_hwmod omap54xx_usb_otg_ss_hwmod = {
 	.opt_clks_cnt	= ARRAY_SIZE(usb_otg_ss_opt_clks),
 };
 
-
-/*
- * 'ocp2scp' class
- * bridge to transform ocp interface protocol to scp (serial control port)
- * protocol
- */
-/* ocp2scp3 */
-static struct omap_hwmod omap54xx_ocp2scp3_hwmod;
-/* l4_cfg -> ocp2scp3 */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__ocp2scp3 = {
-	.master		= &omap54xx_l4_cfg_hwmod,
-	.slave		= &omap54xx_ocp2scp3_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod omap54xx_ocp2scp3_hwmod = {
-	.name		= "ocp2scp3",
-	.class		= &omap54xx_ocp2scp_hwmod_class,
-	.clkdm_name	= "l3init_clkdm",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = OMAP54XX_CM_L3INIT_OCP2SCP3_CLKCTRL_OFFSET,
-			.context_offs = OMAP54XX_RM_L3INIT_OCP2SCP3_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
 /*
  * 'sata' class
  * sata:  serial ata interface  gen2 compliant   ( 1 rx/ 1 tx)
@@ -1303,14 +804,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_cfg__l3_main_1 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> mmu_dsp */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mmu_dsp = {
-	.master		= &omap54xx_l4_cfg_hwmod,
-	.slave		= &omap54xx_mmu_dsp_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* mpu -> l3_main_1 */
 static struct omap_hwmod_ocp_if omap54xx_mpu__l3_main_1 = {
 	.master		= &omap54xx_mpu_hwmod,
@@ -1335,14 +828,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_cfg__l3_main_2 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l3_main_2 -> mmu_ipu */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__mmu_ipu = {
-	.master		= &omap54xx_l3_main_2_hwmod,
-	.slave		= &omap54xx_mmu_ipu_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l3_main_1 -> l3_main_3 */
 static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l3_main_3 = {
 	.master		= &omap54xx_l3_main_1_hwmod,
@@ -1423,22 +908,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_wkup__counter_32k = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> dma_system */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__dma_system = {
-	.master		= &omap54xx_l4_cfg_hwmod,
-	.slave		= &omap54xx_dma_system_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> dmic */
-static struct omap_hwmod_ocp_if omap54xx_l4_abe__dmic = {
-	.master		= &omap54xx_l4_abe_hwmod,
-	.slave		= &omap54xx_dmic_hwmod,
-	.clk		= "abe_iclk",
-	.user		= OCP_USER_MPU,
-};
-
 /* l3_main_2 -> dss */
 static struct omap_hwmod_ocp_if omap54xx_l3_main_2__dss = {
 	.master		= &omap54xx_l3_main_2_hwmod,
@@ -1503,22 +972,6 @@ static struct omap_hwmod_ocp_if omap54xx_mpu__emif2 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_wkup -> kbd */
-static struct omap_hwmod_ocp_if omap54xx_l4_wkup__kbd = {
-	.master		= &omap54xx_l4_wkup_hwmod,
-	.slave		= &omap54xx_kbd_hwmod,
-	.clk		= "wkupaon_iclk_mux",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> mcpdm */
-static struct omap_hwmod_ocp_if omap54xx_l4_abe__mcpdm = {
-	.master		= &omap54xx_l4_abe_hwmod,
-	.slave		= &omap54xx_mcpdm_hwmod,
-	.clk		= "abe_iclk",
-	.user		= OCP_USER_MPU,
-};
-
 /* l4_cfg -> mpu */
 static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mpu = {
 	.master		= &omap54xx_l4_cfg_hwmod,
@@ -1527,22 +980,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mpu = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> spinlock */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__spinlock = {
-	.master		= &omap54xx_l4_cfg_hwmod,
-	.slave		= &omap54xx_spinlock_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> ocp2scp1 */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__ocp2scp1 = {
-	.master		= &omap54xx_l4_cfg_hwmod,
-	.slave		= &omap54xx_ocp2scp1_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l4_wkup -> timer1 */
 static struct omap_hwmod_ocp_if omap54xx_l4_wkup__timer1 = {
 	.master		= &omap54xx_l4_wkup_hwmod,
@@ -1551,86 +988,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_wkup__timer1 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_per -> timer2 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__timer2 = {
-	.master		= &omap54xx_l4_per_hwmod,
-	.slave		= &omap54xx_timer2_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer3 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__timer3 = {
-	.master		= &omap54xx_l4_per_hwmod,
-	.slave		= &omap54xx_timer3_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer4 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__timer4 = {
-	.master		= &omap54xx_l4_per_hwmod,
-	.slave		= &omap54xx_timer4_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_abe -> timer5 */
-static struct omap_hwmod_ocp_if omap54xx_l4_abe__timer5 = {
-	.master		= &omap54xx_l4_abe_hwmod,
-	.slave		= &omap54xx_timer5_hwmod,
-	.clk		= "abe_iclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_abe -> timer6 */
-static struct omap_hwmod_ocp_if omap54xx_l4_abe__timer6 = {
-	.master		= &omap54xx_l4_abe_hwmod,
-	.slave		= &omap54xx_timer6_hwmod,
-	.clk		= "abe_iclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_abe -> timer7 */
-static struct omap_hwmod_ocp_if omap54xx_l4_abe__timer7 = {
-	.master		= &omap54xx_l4_abe_hwmod,
-	.slave		= &omap54xx_timer7_hwmod,
-	.clk		= "abe_iclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_abe -> timer8 */
-static struct omap_hwmod_ocp_if omap54xx_l4_abe__timer8 = {
-	.master		= &omap54xx_l4_abe_hwmod,
-	.slave		= &omap54xx_timer8_hwmod,
-	.clk		= "abe_iclk",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_per -> timer9 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__timer9 = {
-	.master		= &omap54xx_l4_per_hwmod,
-	.slave		= &omap54xx_timer9_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer10 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__timer10 = {
-	.master		= &omap54xx_l4_per_hwmod,
-	.slave		= &omap54xx_timer10_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> timer11 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__timer11 = {
-	.master		= &omap54xx_l4_per_hwmod,
-	.slave		= &omap54xx_timer11_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l4_cfg -> usb_host_hs */
 static struct omap_hwmod_ocp_if omap54xx_l4_cfg__usb_host_hs = {
 	.master		= &omap54xx_l4_cfg_hwmod,
@@ -1673,9 +1030,6 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
 	&omap54xx_l3_main_1__l4_wkup,
 	&omap54xx_mpu__mpu_private,
 	&omap54xx_l4_wkup__counter_32k,
-	&omap54xx_l4_cfg__dma_system,
-	&omap54xx_l4_abe__dmic,
-	&omap54xx_l4_cfg__mmu_dsp,
 	&omap54xx_l3_main_2__dss,
 	&omap54xx_l3_main_2__dss_dispc,
 	&omap54xx_l3_main_2__dss_dsi1_a,
@@ -1684,27 +1038,11 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
 	&omap54xx_l3_main_2__dss_rfbi,
 	&omap54xx_mpu__emif1,
 	&omap54xx_mpu__emif2,
-	&omap54xx_l3_main_2__mmu_ipu,
-	&omap54xx_l4_wkup__kbd,
-	&omap54xx_l4_abe__mcpdm,
 	&omap54xx_l4_cfg__mpu,
-	&omap54xx_l4_cfg__spinlock,
-	&omap54xx_l4_cfg__ocp2scp1,
 	&omap54xx_l4_wkup__timer1,
-	&omap54xx_l4_per__timer2,
-	&omap54xx_l4_per__timer3,
-	&omap54xx_l4_per__timer4,
-	&omap54xx_l4_abe__timer5,
-	&omap54xx_l4_abe__timer6,
-	&omap54xx_l4_abe__timer7,
-	&omap54xx_l4_abe__timer8,
-	&omap54xx_l4_per__timer9,
-	&omap54xx_l4_per__timer10,
-	&omap54xx_l4_per__timer11,
 	&omap54xx_l4_cfg__usb_host_hs,
 	&omap54xx_l4_cfg__usb_tll_hs,
 	&omap54xx_l4_cfg__usb_otg_ss,
-	&omap54xx_l4_cfg__ocp2scp3,
 	&omap54xx_l4_cfg__sata,
 	NULL,
 };
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index f8715bd..acef373 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -15,9 +15,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/power/smartreflex.h>
-
-#include <linux/omap-dma.h>
 
 #include "omap_hwmod.h"
 #include "omap_hwmod_common_data.h"
@@ -29,10 +26,6 @@
 /* Base offset for all DRA7XX interrupts external to MPUSS */
 #define DRA7XX_IRQ_GIC_START	32
 
-/* Base offset for all DRA7XX dma requests */
-#define DRA7XX_DMA_REQ_START	1
-
-
 /*
  * IP blocks
  */
@@ -284,156 +277,6 @@ static struct omap_hwmod dra7xx_ctrl_module_wkup_hwmod = {
 };
 
 /*
- * 'dcan' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_dcan_hwmod_class = {
-	.name	= "dcan",
-};
-
-/* dcan1 */
-static struct omap_hwmod dra7xx_dcan1_hwmod = {
-	.name		= "dcan1",
-	.class		= &dra7xx_dcan_hwmod_class,
-	.clkdm_name	= "wkupaon_clkdm",
-	.main_clk	= "dcan1_sys_clk_mux",
-	.flags		= HWMOD_CLKDM_NOAUTO,
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_WKUPAON_DCAN1_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_WKUPAON_DCAN1_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* dcan2 */
-static struct omap_hwmod dra7xx_dcan2_hwmod = {
-	.name		= "dcan2",
-	.class		= &dra7xx_dcan_hwmod_class,
-	.clkdm_name	= "l4per2_clkdm",
-	.main_clk	= "sys_clkin1",
-	.flags		= HWMOD_CLKDM_NOAUTO,
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER2_DCAN2_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER2_DCAN2_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* pwmss  */
-static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
-	.rev_offs	= 0x0,
-	.sysc_offs	= 0x4,
-	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			  SYSC_HAS_RESET_STATUS,
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type2,
-};
-
-/*
- * epwmss class
- */
-static struct omap_hwmod_class dra7xx_epwmss_hwmod_class = {
-	.name		= "epwmss",
-	.sysc		= &dra7xx_epwmss_sysc,
-};
-
-/* epwmss0 */
-static struct omap_hwmod dra7xx_epwmss0_hwmod = {
-	.name		= "epwmss0",
-	.class		= &dra7xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4per2_clkdm",
-	.main_clk	= "l4_root_clk_div",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-			.clkctrl_offs	= DRA7XX_CM_L4PER2_PWMSS1_CLKCTRL_OFFSET,
-			.context_offs	= DRA7XX_RM_L4PER2_PWMSS1_CONTEXT_OFFSET,
-		},
-	},
-};
-
-/* epwmss1 */
-static struct omap_hwmod dra7xx_epwmss1_hwmod = {
-	.name		= "epwmss1",
-	.class		= &dra7xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4per2_clkdm",
-	.main_clk	= "l4_root_clk_div",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-			.clkctrl_offs	= DRA7XX_CM_L4PER2_PWMSS2_CLKCTRL_OFFSET,
-			.context_offs	= DRA7XX_RM_L4PER2_PWMSS2_CONTEXT_OFFSET,
-		},
-	},
-};
-
-/* epwmss2 */
-static struct omap_hwmod dra7xx_epwmss2_hwmod = {
-	.name		= "epwmss2",
-	.class		= &dra7xx_epwmss_hwmod_class,
-	.clkdm_name	= "l4per2_clkdm",
-	.main_clk	= "l4_root_clk_div",
-	.prcm		= {
-		.omap4	= {
-			.modulemode	= MODULEMODE_SWCTRL,
-			.clkctrl_offs	= DRA7XX_CM_L4PER2_PWMSS3_CLKCTRL_OFFSET,
-			.context_offs	= DRA7XX_RM_L4PER2_PWMSS3_CONTEXT_OFFSET,
-		},
-	},
-};
-
-/*
- * 'dma' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_dma_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x002c,
-	.syss_offs	= 0x0028,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
-			   MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_dma_hwmod_class = {
-	.name	= "dma",
-	.sysc	= &dra7xx_dma_sysc,
-};
-
-/* dma dev_attr */
-static struct omap_dma_dev_attr dma_dev_attr = {
-	.dev_caps	= RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
-			  IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
-	.lch_count	= 32,
-};
-
-/* dma_system */
-static struct omap_hwmod dra7xx_dma_system_hwmod = {
-	.name		= "dma_system",
-	.class		= &dra7xx_dma_hwmod_class,
-	.clkdm_name	= "dma_clkdm",
-	.main_clk	= "l3_iclk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_DMA_DMA_SYSTEM_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_DMA_DMA_SYSTEM_CONTEXT_OFFSET,
-		},
-	},
-	.dev_attr	= &dma_dev_attr,
-};
-
-/*
  * 'tpcc' class
  *
  */
@@ -626,112 +469,9 @@ static struct omap_hwmod dra7xx_dss_hdmi_hwmod = {
 	.parent_hwmod	= &dra7xx_dss_hwmod,
 };
 
-/* AES (the 'P' (public) device) */
-static struct omap_hwmod_class_sysconfig dra7xx_aes_sysc = {
-	.rev_offs	= 0x0080,
-	.sysc_offs	= 0x0084,
-	.syss_offs	= 0x0088,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
 
-static struct omap_hwmod_class dra7xx_aes_hwmod_class = {
-	.name	= "aes",
-	.sysc	= &dra7xx_aes_sysc,
-};
 
-/* AES1 */
-static struct omap_hwmod dra7xx_aes1_hwmod = {
-	.name		= "aes1",
-	.class		= &dra7xx_aes_hwmod_class,
-	.clkdm_name	= "l4sec_clkdm",
-	.main_clk	= "l3_iclk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4SEC_AES1_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4SEC_AES1_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
 
-/* AES2 */
-static struct omap_hwmod dra7xx_aes2_hwmod = {
-	.name		= "aes2",
-	.class		= &dra7xx_aes_hwmod_class,
-	.clkdm_name	= "l4sec_clkdm",
-	.main_clk	= "l3_iclk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4SEC_AES2_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4SEC_AES2_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
-/* sha0 HIB2 (the 'P' (public) device) */
-static struct omap_hwmod_class_sysconfig dra7xx_sha0_sysc = {
-	.rev_offs	= 0x100,
-	.sysc_offs	= 0x110,
-	.syss_offs	= 0x114,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class dra7xx_sha0_hwmod_class = {
-	.name		= "sham",
-	.sysc		= &dra7xx_sha0_sysc,
-};
-
-static struct omap_hwmod dra7xx_sha0_hwmod = {
-	.name		= "sham",
-	.class		= &dra7xx_sha0_hwmod_class,
-	.clkdm_name	= "l4sec_clkdm",
-	.main_clk	= "l3_iclk_div",
-	.prcm		= {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4SEC_SHA2MD51_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
-/*
- * 'elm' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_elm_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_elm_hwmod_class = {
-	.name	= "elm",
-	.sysc	= &dra7xx_elm_sysc,
-};
-
-/* elm */
-
-static struct omap_hwmod dra7xx_elm_hwmod = {
-	.name		= "elm",
-	.class		= &dra7xx_elm_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "l3_iclk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER_ELM_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER_ELM_CONTEXT_OFFSET,
-		},
-	},
-};
 
 /*
  * 'gpmc' class
@@ -797,55 +537,6 @@ static struct omap_hwmod dra7xx_mpu_hwmod = {
 	},
 };
 
-/*
- * 'ocp2scp' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_ocp2scp_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_ocp2scp_hwmod_class = {
-	.name	= "ocp2scp",
-	.sysc	= &dra7xx_ocp2scp_sysc,
-};
-
-/* ocp2scp1 */
-static struct omap_hwmod dra7xx_ocp2scp1_hwmod = {
-	.name		= "ocp2scp1",
-	.class		= &dra7xx_ocp2scp_hwmod_class,
-	.clkdm_name	= "l3init_clkdm",
-	.main_clk	= "l4_root_clk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L3INIT_OCP2SCP1_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L3INIT_OCP2SCP1_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
-/* ocp2scp3 */
-static struct omap_hwmod dra7xx_ocp2scp3_hwmod = {
-	.name		= "ocp2scp3",
-	.class		= &dra7xx_ocp2scp_hwmod_class,
-	.clkdm_name	= "l3init_clkdm",
-	.main_clk	= "l4_root_clk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L3INIT_OCP2SCP3_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L3INIT_OCP2SCP3_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
 
 /*
  * 'PCIE' class
@@ -1032,103 +723,6 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
 };
 
 /*
- * 'smartreflex' class
- *
- */
-
-/* The IP is not compliant to type1 / type2 scheme */
-static struct omap_hwmod_class_sysconfig dra7xx_smartreflex_sysc = {
-	.rev_offs	= -ENODEV,
-	.sysc_offs	= 0x0038,
-	.sysc_flags	= (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-			   SIDLE_SMART_WKUP),
-	.sysc_fields	= &omap36xx_sr_sysc_fields,
-};
-
-static struct omap_hwmod_class dra7xx_smartreflex_hwmod_class = {
-	.name	= "smartreflex",
-	.sysc	= &dra7xx_smartreflex_sysc,
-};
-
-/* smartreflex_core */
-/* smartreflex_core dev_attr */
-static struct omap_smartreflex_dev_attr smartreflex_core_dev_attr = {
-	.sensor_voltdm_name	= "core",
-};
-
-static struct omap_hwmod dra7xx_smartreflex_core_hwmod = {
-	.name		= "smartreflex_core",
-	.class		= &dra7xx_smartreflex_hwmod_class,
-	.clkdm_name	= "coreaon_clkdm",
-	.main_clk	= "wkupaon_iclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_COREAON_SMARTREFLEX_CORE_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_COREAON_SMARTREFLEX_CORE_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-	.dev_attr	= &smartreflex_core_dev_attr,
-};
-
-/* smartreflex_mpu */
-/* smartreflex_mpu dev_attr */
-static struct omap_smartreflex_dev_attr smartreflex_mpu_dev_attr = {
-	.sensor_voltdm_name	= "mpu",
-};
-
-static struct omap_hwmod dra7xx_smartreflex_mpu_hwmod = {
-	.name		= "smartreflex_mpu",
-	.class		= &dra7xx_smartreflex_hwmod_class,
-	.clkdm_name	= "coreaon_clkdm",
-	.main_clk	= "wkupaon_iclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_COREAON_SMARTREFLEX_MPU_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_COREAON_SMARTREFLEX_MPU_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-	.dev_attr	= &smartreflex_mpu_dev_attr,
-};
-
-/*
- * 'spinlock' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
-			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-			   SYSS_HAS_RESET_STATUS),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_spinlock_hwmod_class = {
-	.name	= "spinlock",
-	.sysc	= &dra7xx_spinlock_sysc,
-};
-
-/* spinlock */
-static struct omap_hwmod dra7xx_spinlock_hwmod = {
-	.name		= "spinlock",
-	.class		= &dra7xx_spinlock_hwmod_class,
-	.clkdm_name	= "l4cfg_clkdm",
-	.main_clk	= "l3_iclk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4CFG_SPINLOCK_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4CFG_SPINLOCK_CONTEXT_OFFSET,
-		},
-	},
-};
-
-/*
  * 'timer' class
  *
  * This class contains several variants: ['timer_1ms', 'timer_secure',
@@ -1225,213 +819,6 @@ static struct omap_hwmod dra7xx_timer4_hwmod = {
 	},
 };
 
-/* timer5 */
-static struct omap_hwmod dra7xx_timer5_hwmod = {
-	.name		= "timer5",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "ipu_clkdm",
-	.main_clk	= "timer5_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_IPU_TIMER5_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_IPU_TIMER5_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer6 */
-static struct omap_hwmod dra7xx_timer6_hwmod = {
-	.name		= "timer6",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "ipu_clkdm",
-	.main_clk	= "timer6_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_IPU_TIMER6_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_IPU_TIMER6_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer7 */
-static struct omap_hwmod dra7xx_timer7_hwmod = {
-	.name		= "timer7",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "ipu_clkdm",
-	.main_clk	= "timer7_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_IPU_TIMER7_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_IPU_TIMER7_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer8 */
-static struct omap_hwmod dra7xx_timer8_hwmod = {
-	.name		= "timer8",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "ipu_clkdm",
-	.main_clk	= "timer8_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_IPU_TIMER8_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_IPU_TIMER8_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer9 */
-static struct omap_hwmod dra7xx_timer9_hwmod = {
-	.name		= "timer9",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer9_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER9_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER_TIMER9_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer10 */
-static struct omap_hwmod dra7xx_timer10_hwmod = {
-	.name		= "timer10",
-	.class		= &dra7xx_timer_1ms_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer10_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER10_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER_TIMER10_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer11 */
-static struct omap_hwmod dra7xx_timer11_hwmod = {
-	.name		= "timer11",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "l4per_clkdm",
-	.main_clk	= "timer11_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER_TIMER11_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER_TIMER11_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer12 */
-static struct omap_hwmod dra7xx_timer12_hwmod = {
-	.name		= "timer12",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "wkupaon_clkdm",
-	.main_clk	= "secure_32k_clk_src_ck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_WKUPAON_TIMER12_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_WKUPAON_TIMER12_CONTEXT_OFFSET,
-		},
-	},
-};
-
-/* timer13 */
-static struct omap_hwmod dra7xx_timer13_hwmod = {
-	.name		= "timer13",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "l4per3_clkdm",
-	.main_clk	= "timer13_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER3_TIMER13_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER3_TIMER13_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer14 */
-static struct omap_hwmod dra7xx_timer14_hwmod = {
-	.name		= "timer14",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "l4per3_clkdm",
-	.main_clk	= "timer14_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER3_TIMER14_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER3_TIMER14_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer15 */
-static struct omap_hwmod dra7xx_timer15_hwmod = {
-	.name		= "timer15",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "l4per3_clkdm",
-	.main_clk	= "timer15_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER3_TIMER15_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER3_TIMER15_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* timer16 */
-static struct omap_hwmod dra7xx_timer16_hwmod = {
-	.name		= "timer16",
-	.class		= &dra7xx_timer_hwmod_class,
-	.clkdm_name	= "l4per3_clkdm",
-	.main_clk	= "timer16_gfclk_mux",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4PER3_TIMER16_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4PER3_TIMER16_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* DES (the 'P' (public) device) */
-static struct omap_hwmod_class_sysconfig dra7xx_des_sysc = {
-	.rev_offs	= 0x0030,
-	.sysc_offs	= 0x0034,
-	.syss_offs	= 0x0038,
-	.sysc_flags	= SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class dra7xx_des_hwmod_class = {
-	.name	= "des",
-	.sysc	= &dra7xx_des_sysc,
-};
-
-/* DES */
-static struct omap_hwmod dra7xx_des_hwmod = {
-	.name		= "des",
-	.class		= &dra7xx_des_hwmod_class,
-	.clkdm_name	= "l4sec_clkdm",
-	.main_clk	= "l3_iclk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_L4SEC_DES3DES_CLKCTRL_OFFSET,
-			.context_offs = DRA7XX_RM_L4SEC_DES3DES_CONTEXT_OFFSET,
-			.modulemode   = MODULEMODE_HWCTRL,
-		},
-	},
-};
-
 /*
  * 'usb_otg_ss' class
  *
@@ -1690,30 +1077,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_wkup__ctrl_module_wkup = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_wkup -> dcan1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_wkup__dcan1 = {
-	.master		= &dra7xx_l4_wkup_hwmod,
-	.slave		= &dra7xx_dcan1_hwmod,
-	.clk		= "wkupaon_iclk_mux",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per2 -> dcan2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__dcan2 = {
-	.master		= &dra7xx_l4_per2_hwmod,
-	.slave		= &dra7xx_dcan2_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> dma_system */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__dma_system = {
-	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_dma_system_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l3_main_1 -> tpcc */
 static struct omap_hwmod_ocp_if dra7xx_l3_main_1__tpcc = {
 	.master		= &dra7xx_l3_main_1_hwmod,
@@ -1762,38 +1125,6 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l3_main_1 -> aes1 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__aes1 = {
-	.master		= &dra7xx_l3_main_1_hwmod,
-	.slave		= &dra7xx_aes1_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> aes2 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__aes2 = {
-	.master		= &dra7xx_l3_main_1_hwmod,
-	.slave		= &dra7xx_aes2_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> sha0 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__sha0 = {
-	.master		= &dra7xx_l3_main_1_hwmod,
-	.slave		= &dra7xx_sha0_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> elm */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = {
-	.master		= &dra7xx_l4_per1_hwmod,
-	.slave		= &dra7xx_elm_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l3_main_1 -> gpmc */
 static struct omap_hwmod_ocp_if dra7xx_l3_main_1__gpmc = {
 	.master		= &dra7xx_l3_main_1_hwmod,
@@ -1810,22 +1141,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__mpu = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> ocp2scp1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp1 = {
-	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_ocp2scp1_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> ocp2scp3 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = {
-	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_ocp2scp3_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l3_main_1 -> pciess1 */
 static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = {
 	.master		= &dra7xx_l3_main_1_hwmod,
@@ -1882,30 +1197,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__sata = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> smartreflex_core */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__smartreflex_core = {
-	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_smartreflex_core_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> smartreflex_mpu */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__smartreflex_mpu = {
-	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_smartreflex_mpu_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> spinlock */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__spinlock = {
-	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_spinlock_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l4_wkup -> timer1 */
 static struct omap_hwmod_ocp_if dra7xx_l4_wkup__timer1 = {
 	.master		= &dra7xx_l4_wkup_hwmod,
@@ -1938,110 +1229,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer4 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_per3 -> timer5 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer5 = {
-	.master		= &dra7xx_l4_per3_hwmod,
-	.slave		= &dra7xx_timer5_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per3 -> timer6 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer6 = {
-	.master		= &dra7xx_l4_per3_hwmod,
-	.slave		= &dra7xx_timer6_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per3 -> timer7 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer7 = {
-	.master		= &dra7xx_l4_per3_hwmod,
-	.slave		= &dra7xx_timer7_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per3 -> timer8 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer8 = {
-	.master		= &dra7xx_l4_per3_hwmod,
-	.slave		= &dra7xx_timer8_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> timer9 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer9 = {
-	.master		= &dra7xx_l4_per1_hwmod,
-	.slave		= &dra7xx_timer9_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> timer10 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer10 = {
-	.master		= &dra7xx_l4_per1_hwmod,
-	.slave		= &dra7xx_timer10_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> timer11 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer11 = {
-	.master		= &dra7xx_l4_per1_hwmod,
-	.slave		= &dra7xx_timer11_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_wkup -> timer12 */
-static struct omap_hwmod_ocp_if dra7xx_l4_wkup__timer12 = {
-	.master		= &dra7xx_l4_wkup_hwmod,
-	.slave		= &dra7xx_timer12_hwmod,
-	.clk		= "wkupaon_iclk_mux",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per3 -> timer13 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer13 = {
-	.master		= &dra7xx_l4_per3_hwmod,
-	.slave		= &dra7xx_timer13_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per3 -> timer14 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer14 = {
-	.master		= &dra7xx_l4_per3_hwmod,
-	.slave		= &dra7xx_timer14_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per3 -> timer15 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer15 = {
-	.master		= &dra7xx_l4_per3_hwmod,
-	.slave		= &dra7xx_timer15_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per3 -> timer16 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer16 = {
-	.master		= &dra7xx_l4_per3_hwmod,
-	.slave		= &dra7xx_timer16_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> des */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__des = {
-	.master		= &dra7xx_l4_per1_hwmod,
-	.slave		= &dra7xx_des_hwmod,
-	.clk		= "l3_iclk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
 /* l4_per3 -> usb_otg_ss1 */
 static struct omap_hwmod_ocp_if dra7xx_l4_per3__usb_otg_ss1 = {
 	.master		= &dra7xx_l4_per3_hwmod,
@@ -2106,30 +1293,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per2__vcp2 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_per2 -> epwmss0 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__epwmss0 = {
-	.master		= &dra7xx_l4_per2_hwmod,
-	.slave		= &dra7xx_epwmss0_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_per2 -> epwmss1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__epwmss1 = {
-	.master		= &dra7xx_l4_per2_hwmod,
-	.slave		= &dra7xx_epwmss1_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_per2 -> epwmss2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__epwmss2 = {
-	.master		= &dra7xx_l4_per2_hwmod,
-	.slave		= &dra7xx_epwmss2_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU,
-};
-
 static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l3_main_1__dmm,
 	&dra7xx_l3_main_2__l3_instr,
@@ -2146,48 +1309,24 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l3_main_1__bb2d,
 	&dra7xx_l4_wkup__counter_32k,
 	&dra7xx_l4_wkup__ctrl_module_wkup,
-	&dra7xx_l4_wkup__dcan1,
-	&dra7xx_l4_per2__dcan2,
-	&dra7xx_l4_cfg__dma_system,
 	&dra7xx_l3_main_1__tpcc,
 	&dra7xx_l3_main_1__tptc0,
 	&dra7xx_l3_main_1__tptc1,
 	&dra7xx_l3_main_1__dss,
 	&dra7xx_l3_main_1__dispc,
 	&dra7xx_l3_main_1__hdmi,
-	&dra7xx_l3_main_1__aes1,
-	&dra7xx_l3_main_1__aes2,
-	&dra7xx_l3_main_1__sha0,
-	&dra7xx_l4_per1__elm,
 	&dra7xx_l3_main_1__gpmc,
 	&dra7xx_l4_cfg__mpu,
-	&dra7xx_l4_cfg__ocp2scp1,
-	&dra7xx_l4_cfg__ocp2scp3,
 	&dra7xx_l3_main_1__pciess1,
 	&dra7xx_l4_cfg__pciess1,
 	&dra7xx_l3_main_1__pciess2,
 	&dra7xx_l4_cfg__pciess2,
 	&dra7xx_l3_main_1__qspi,
 	&dra7xx_l4_cfg__sata,
-	&dra7xx_l4_cfg__smartreflex_core,
-	&dra7xx_l4_cfg__smartreflex_mpu,
-	&dra7xx_l4_cfg__spinlock,
 	&dra7xx_l4_wkup__timer1,
 	&dra7xx_l4_per1__timer2,
 	&dra7xx_l4_per1__timer3,
 	&dra7xx_l4_per1__timer4,
-	&dra7xx_l4_per3__timer5,
-	&dra7xx_l4_per3__timer6,
-	&dra7xx_l4_per3__timer7,
-	&dra7xx_l4_per3__timer8,
-	&dra7xx_l4_per1__timer9,
-	&dra7xx_l4_per1__timer10,
-	&dra7xx_l4_per1__timer11,
-	&dra7xx_l4_per3__timer13,
-	&dra7xx_l4_per3__timer14,
-	&dra7xx_l4_per3__timer15,
-	&dra7xx_l4_per3__timer16,
-	&dra7xx_l4_per1__des,
 	&dra7xx_l4_per3__usb_otg_ss1,
 	&dra7xx_l4_per3__usb_otg_ss2,
 	&dra7xx_l4_per3__usb_otg_ss3,
@@ -2195,15 +1334,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l4_per2__vcp1,
 	&dra7xx_l3_main_1__vcp2,
 	&dra7xx_l4_per2__vcp2,
-	&dra7xx_l4_per2__epwmss0,
-	&dra7xx_l4_per2__epwmss1,
-	&dra7xx_l4_per2__epwmss2,
-	NULL,
-};
-
-/* GP-only hwmod links */
-static struct omap_hwmod_ocp_if *dra7xx_gp_hwmod_ocp_ifs[] __initdata = {
-	&dra7xx_l4_wkup__timer12,
 	NULL,
 };
 
@@ -2256,8 +1386,5 @@ int __init dra7xx_hwmod_init(void)
 		}
 	}
 
-	if (!ret && omap_type() == OMAP2_DEVICE_TYPE_GP)
-		ret = omap_hwmod_register_links(dra7xx_gp_hwmod_ocp_ifs);
-
 	return ret;
 }
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index ca56563..c85cb8b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -98,7 +98,6 @@ extern struct omap_hwmod_class omap2_hdq1w_class;
 extern struct omap_hwmod_class omap2xxx_timer_hwmod_class;
 extern struct omap_hwmod_class omap2xxx_wd_timer_hwmod_class;
 extern struct omap_hwmod_class omap2xxx_gpio_hwmod_class;
-extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
 extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
 extern struct omap_hwmod_class omap2xxx_mcspi_class;
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c
index d5ddba0..143623b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_reset.c
+++ b/arch/arm/mach-omap2/omap_hwmod_reset.c
@@ -26,8 +26,6 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 
-#include <sound/aess.h>
-
 #include "omap_hwmod.h"
 #include "common.h"
 
@@ -41,28 +39,6 @@
 #define OMAP_RTC_MAX_READY_TIME	50
 
 /**
- * omap_hwmod_aess_preprogram - enable AESS internal autogating
- * @oh: struct omap_hwmod *
- *
- * The AESS will not IdleAck to the PRCM until its internal autogating
- * is enabled.  Since internal autogating is disabled by default after
- * AESS reset, we must enable autogating after the hwmod code resets
- * the AESS.  Returns 0.
- */
-int omap_hwmod_aess_preprogram(struct omap_hwmod *oh)
-{
-	void __iomem *va;
-
-	va = omap_hwmod_get_mpu_rt_va(oh);
-	if (!va)
-		return -EINVAL;
-
-	aess_enable_autogating(va);
-
-	return 0;
-}
-
-/**
  * omap_rtc_wait_not_busy - Wait for the RTC BUSY flag
  * @oh: struct omap_hwmod *
  *
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index e95c224..dbb7c2a 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -23,6 +23,7 @@
 #include <linux/platform_data/ti-sysc.h>
 #include <linux/platform_data/wkup_m3.h>
 #include <linux/platform_data/asoc-ti-mcbsp.h>
+#include <linux/platform_data/ti-prm.h>
 
 #include "clockdomain.h"
 #include "common.h"
@@ -42,6 +43,17 @@ struct pdata_init {
 static struct of_dev_auxdata omap_auxdata_lookup[];
 static struct twl4030_gpio_platform_data twl_gpio_auxdata;
 
+#if IS_ENABLED(CONFIG_OMAP_IOMMU)
+int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request,
+				    u8 *pwrst);
+#else
+static inline int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev,
+						  bool request, u8 *pwrst)
+{
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_MACH_NOKIA_N8X0
 static void __init omap2420_n8x0_legacy_init(void)
 {
@@ -260,16 +272,6 @@ static void __init omap3_pandora_legacy_init(void)
 }
 #endif /* CONFIG_ARCH_OMAP3 */
 
-#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
-static struct iommu_platform_data omap4_iommu_pdata = {
-	.reset_name = "mmu_cache",
-	.assert_reset = omap_device_assert_hardreset,
-	.deassert_reset = omap_device_deassert_hardreset,
-	.device_enable = omap_device_enable,
-	.device_idle = omap_device_idle,
-};
-#endif
-
 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 static struct wkup_m3_platform_data wkup_m3_data = {
 	.reset_name = "wkup_m3",
@@ -285,6 +287,10 @@ static void __init omap5_uevm_legacy_init(void)
 #endif
 
 #ifdef CONFIG_SOC_DRA7XX
+static struct iommu_platform_data dra7_ipu1_dsp_iommu_pdata = {
+	.set_pwrdm_constraint = omap_iommu_set_pwrdm_constraint,
+};
+
 static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc1;
 static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc2;
 static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc3;
@@ -412,6 +418,12 @@ void omap_pcs_legacy_init(int irq, void (*rearm)(void))
 	pcs_pdata.rearm = rearm;
 }
 
+static struct ti_prm_platform_data ti_prm_pdata = {
+	.clkdm_deny_idle = clkdm_deny_idle,
+	.clkdm_allow_idle = clkdm_allow_idle,
+	.clkdm_lookup = clkdm_lookup,
+};
+
 /*
  * GPIOs for TWL are initialized by the I2C bus and need custom
  * handing until DSS has device tree bindings.
@@ -492,10 +504,6 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
 		       &wkup_m3_data),
 #endif
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
-	OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu",
-		       &omap4_iommu_pdata),
-	OF_DEV_AUXDATA("ti,omap4-iommu", 0x55082000, "55082000.mmu",
-		       &omap4_iommu_pdata),
 	OF_DEV_AUXDATA("ti,omap4-smartreflex-iva", 0x4a0db000,
 		       "4a0db000.smartreflex", &omap_sr_pdata[OMAP_SR_IVA]),
 	OF_DEV_AUXDATA("ti,omap4-smartreflex-core", 0x4a0dd000,
@@ -510,10 +518,18 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
 		       &dra7_hsmmc_data_mmc2),
 	OF_DEV_AUXDATA("ti,dra7-hsmmc", 0x480ad000, "480ad000.mmc",
 		       &dra7_hsmmc_data_mmc3),
+	OF_DEV_AUXDATA("ti,dra7-dsp-iommu", 0x40d01000, "40d01000.mmu",
+		       &dra7_ipu1_dsp_iommu_pdata),
+	OF_DEV_AUXDATA("ti,dra7-dsp-iommu", 0x41501000, "41501000.mmu",
+		       &dra7_ipu1_dsp_iommu_pdata),
+	OF_DEV_AUXDATA("ti,dra7-iommu", 0x58882000, "58882000.mmu",
+		       &dra7_ipu1_dsp_iommu_pdata),
 #endif
 	/* Common auxdata */
 	OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata),
 	OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata),
+	OF_DEV_AUXDATA("ti,omap-prm-inst", 0, NULL, &ti_prm_pdata),
+	OF_DEV_AUXDATA("ti,omap-sdma", 0, NULL, &dma_plat_info),
 	{ /* sentinel */ },
 };
 
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index 1581b6a..6953c47 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -83,8 +83,6 @@ static int omap2_enter_full_retention(void)
 	l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
 	omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);
 
-	cpu_cluster_pm_enter();
-
 	/* One last check for pending IRQs to avoid extra latency due
 	 * to sleeping unnecessarily. */
 	if (omap_irq_pending())
@@ -96,8 +94,6 @@ static int omap2_enter_full_retention(void)
 			   OMAP_SDRC_REGADDR(SDRC_POWER));
 
 no_sleep:
-	cpu_cluster_pm_exit();
-
 	clk_enable(osc_ck);
 
 	/* clear CORE wake-up events */
@@ -162,25 +158,27 @@ static int omap2_can_sleep(void)
 		return 0;
 	if (__clk_is_enabled(osc_ck))
 		return 0;
-	if (omap_dma_running())
-		return 0;
 
 	return 1;
 }
 
 static void omap2_pm_idle(void)
 {
-	if (!omap2_can_sleep()) {
-		if (omap_irq_pending())
-			return;
-		omap2_enter_mpu_retention();
-		return;
-	}
+	int error;
 
 	if (omap_irq_pending())
 		return;
 
+	error = cpu_cluster_pm_enter();
+	if (error || !omap2_can_sleep()) {
+		omap2_enter_mpu_retention();
+		goto out_cpu_cluster_pm;
+	}
+
 	omap2_enter_full_retention();
+
+out_cpu_cluster_pm:
+	cpu_cluster_pm_exit();
 }
 
 static void __init prcm_setup_regs(void)
diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
index f11442e..7461b03 100644
--- a/arch/arm/mach-omap2/pm33xx-core.c
+++ b/arch/arm/mach-omap2/pm33xx-core.c
@@ -28,6 +28,7 @@
 #include "prm33xx.h"
 #include "soc.h"
 #include "sram.h"
+#include "omap-secure.h"
 
 static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm, *mpu_pwrdm;
 static struct clockdomain *gfx_l4ls_clkdm;
@@ -166,6 +167,16 @@ static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
 {
 	int ret = 0;
 
+	/* Suspend secure side on HS devices */
+	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
+		if (optee_available)
+			omap_smccc_smc(AM43xx_PPA_SVC_PM_SUSPEND, 0);
+		else
+			omap_secure_dispatcher(AM43xx_PPA_SVC_PM_SUSPEND,
+					       FLAG_START_CRITICAL,
+					       0, 0, 0, 0, 0);
+	}
+
 	amx3_pre_suspend_common();
 	scu_power_mode(scu_base, SCU_PM_POWEROFF);
 	ret = cpu_suspend(args, fn);
@@ -174,6 +185,19 @@ static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
 	if (!am43xx_check_off_mode_enable())
 		amx3_post_suspend_common();
 
+	/*
+	 * Resume secure side on HS devices.
+	 *
+	 * Note that even on systems with OP-TEE available this resume call is
+	 * issued to the ROM. This is because upon waking from suspend the ROM
+	 * is restored as the secure monitor. On systems with OP-TEE ROM will
+	 * restore OP-TEE during this call.
+	 */
+	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
+		omap_secure_dispatcher(AM43xx_PPA_SVC_PM_RESUME,
+				       FLAG_START_CRITICAL,
+				       0, 0, 0, 0, 0);
+
 	return ret;
 }
 
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 54254fc..e66e994 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -25,7 +25,6 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/omap-dma.h>
 #include <linux/omap-gpmc.h>
 
 #include <trace/events/power.h>
@@ -85,7 +84,6 @@ static void omap3_core_save_context(void)
 	omap3_gpmc_save_context();
 	/* Save the system control module context, padconf already save above*/
 	omap3_control_save_context();
-	omap_dma_global_context_save();
 }
 
 static void omap3_core_restore_context(void)
@@ -96,7 +94,6 @@ static void omap3_core_restore_context(void)
 	omap3_gpmc_restore_context();
 	/* Restore the interrupt controller context */
 	omap_intc_restore_context();
-	omap_dma_global_context_restore();
 }
 
 /*
@@ -547,9 +544,7 @@ int __init omap3_pm_init(void)
 
 		local_irq_disable();
 
-		omap_dma_global_context_save();
 		omap3_save_secure_ram_context();
-		omap_dma_global_context_restore();
 
 		local_irq_enable();
 	}
diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h
index e2ad14e..7078a61 100644
--- a/arch/arm/mach-omap2/prcm43xx.h
+++ b/arch/arm/mach-omap2/prcm43xx.h
@@ -68,6 +68,7 @@
 #define AM43XX_CM_PER_ICSS_CDOFFS			0x0300
 #define AM43XX_CM_PER_L4LS_CDOFFS			0x0400
 #define AM43XX_CM_PER_EMIF_CDOFFS			0x0700
+#define AM43XX_CM_PER_LCDC_CDOFFS			0x0800
 #define AM43XX_CM_PER_DSS_CDOFFS			0x0a00
 #define AM43XX_CM_PER_CPSW_CDOFFS			0x0b00
 #define AM43XX_CM_PER_OCPWP_L3_CDOFFS			0x0c00
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index eba917d..35dd3ad 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -11,9 +11,9 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
+#include <linux/gpio/machine.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
-#include <linux/usb/gpio_vbus.h>
 
 #include <asm/mach-types.h>
 #include <linux/sizes.h>
@@ -144,17 +144,18 @@ static inline void __init colibri_pxa320_init_eth(void) {}
 #endif /* CONFIG_AX88796 */
 
 #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
-static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
-	.gpio_vbus		= mfp_to_gpio(MFP_PIN_GPIO96),
-	.gpio_pullup		= -1,
+static struct gpiod_lookup_table gpio_vbus_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", MFP_PIN_GPIO96,
+			    "vbus", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device colibri_pxa320_gpio_vbus = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &colibri_pxa320_gpio_vbus_info,
-	},
 };
 
 static void colibri_pxa320_udc_command(int cmd)
@@ -173,6 +174,7 @@ static struct pxa2xx_udc_mach_info colibri_pxa320_udc_info __initdata = {
 static void __init colibri_pxa320_init_udc(void)
 {
 	pxa_set_udc_info(&colibri_pxa320_udc_info);
+	gpiod_add_lookup_table(&gpio_vbus_gpiod_table);
 	platform_device_register(&colibri_pxa320_gpio_vbus);
 }
 #else
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index 91f7c3e..f37c44b 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/clk-provider.h>
+#include <linux/gpio/machine.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
@@ -22,7 +23,6 @@
 #include <linux/mfd/t7l66xb.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/memblock.h>
 
 #include <video/w100fb.h>
@@ -51,18 +51,20 @@ void __init eseries_fixup(struct tag *tags, char **cmdline)
 		memblock_add(0xa0000000, SZ_64M);
 }
 
-struct gpio_vbus_mach_info e7xx_udc_info = {
-	.gpio_vbus   = GPIO_E7XX_USB_DISC,
-	.gpio_pullup = GPIO_E7XX_USB_PULLUP,
-	.gpio_pullup_inverted = 1
+static struct gpiod_lookup_table e7xx_gpio_vbus_gpiod_table __maybe_unused = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", GPIO_E7XX_USB_DISC,
+			    "vbus", GPIO_ACTIVE_HIGH),
+		GPIO_LOOKUP("gpio-pxa", GPIO_E7XX_USB_PULLUP,
+			    "pullup", GPIO_ACTIVE_LOW),
+		{ },
+	},
 };
 
 static struct platform_device e7xx_gpio_vbus __maybe_unused = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &e7xx_udc_info,
-	},
 };
 
 struct pxaficp_platform_data e7xx_ficp_platform_data = {
@@ -165,6 +167,7 @@ static void __init e330_init(void)
 	pxa_set_stuart_info(NULL);
 	eseries_register_clks();
 	eseries_get_tmio_gpios();
+	gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
 	platform_add_devices(ARRAY_AND_SIZE(e330_devices));
 }
 
@@ -216,6 +219,7 @@ static void __init e350_init(void)
 	pxa_set_stuart_info(NULL);
 	eseries_register_clks();
 	eseries_get_tmio_gpios();
+	gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
 	platform_add_devices(ARRAY_AND_SIZE(e350_devices));
 }
 
@@ -340,6 +344,7 @@ static void __init e400_init(void)
 	eseries_register_clks();
 	eseries_get_tmio_gpios();
 	pxa_set_fb_info(NULL, &e400_pxafb_mach_info);
+	gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
 	platform_add_devices(ARRAY_AND_SIZE(e400_devices));
 }
 
@@ -534,6 +539,7 @@ static void __init e740_init(void)
 	clk_add_alias("CLK_CK48M", e740_t7l66xb_device.name,
 			"UDCCLK", &pxa25x_device_udc.dev),
 	eseries_get_tmio_gpios();
+	gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
 	platform_add_devices(ARRAY_AND_SIZE(e740_devices));
 	pxa_set_ac97_info(NULL);
 	pxa_set_ficp_info(&e7xx_ficp_platform_data);
@@ -733,6 +739,7 @@ static void __init e750_init(void)
 	clk_add_alias("CLK_CK3P6MI", e750_tc6393xb_device.name,
 			"GPIO11_CLK", NULL),
 	eseries_get_tmio_gpios();
+	gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
 	platform_add_devices(ARRAY_AND_SIZE(e750_devices));
 	pxa_set_ac97_info(NULL);
 	pxa_set_ficp_info(&e7xx_ficp_platform_data);
@@ -888,18 +895,20 @@ static struct platform_device e800_fb_device = {
 
 /* --------------------------- UDC definitions --------------------------- */
 
-static struct gpio_vbus_mach_info e800_udc_info = {
-	.gpio_vbus   = GPIO_E800_USB_DISC,
-	.gpio_pullup = GPIO_E800_USB_PULLUP,
-	.gpio_pullup_inverted = 1
+static struct gpiod_lookup_table e800_gpio_vbus_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", GPIO_E800_USB_DISC,
+			    "vbus", GPIO_ACTIVE_HIGH),
+		GPIO_LOOKUP("gpio-pxa", GPIO_E800_USB_PULLUP,
+			    "pullup", GPIO_ACTIVE_LOW),
+		{ },
+	},
 };
 
 static struct platform_device e800_gpio_vbus = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &e800_udc_info,
-	},
 };
 
 
@@ -949,6 +958,7 @@ static void __init e800_init(void)
 	clk_add_alias("CLK_CK3P6MI", e800_tc6393xb_device.name,
 			"GPIO11_CLK", NULL),
 	eseries_get_tmio_gpios();
+	gpiod_add_lookup_table(&e800_gpio_vbus_gpiod_table);
 	platform_add_devices(ARRAY_AND_SIZE(e800_devices));
 	pxa_set_ac97_info(NULL);
 }
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 4b4589c..49dd618 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -20,10 +20,10 @@
 #include <linux/delay.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/gpio/machine.h>
 #include <linux/gpio.h>
 #include <linux/err.h>
 #include <linux/clk.h>
-#include <linux/usb/gpio_vbus.h>
 
 #include <asm/setup.h>
 #include <asm/memory.h>
@@ -101,21 +101,25 @@ static void __init gumstix_mmc_init(void)
 #endif
 
 #ifdef CONFIG_USB_PXA25X
-static struct gpio_vbus_mach_info gumstix_udc_info = {
-	.gpio_vbus		= GPIO_GUMSTIX_USB_GPIOn,
-	.gpio_pullup		= GPIO_GUMSTIX_USB_GPIOx,
+static struct gpiod_lookup_table gumstix_gpio_vbus_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", GPIO_GUMSTIX_USB_GPIOn,
+			    "vbus", GPIO_ACTIVE_HIGH),
+		GPIO_LOOKUP("gpio-pxa", GPIO_GUMSTIX_USB_GPIOx,
+			    "pullup", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device gumstix_gpio_vbus = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &gumstix_udc_info,
-	},
 };
 
 static void __init gumstix_udc_init(void)
 {
+	gpiod_add_lookup_table(&gumstix_gpio_vbus_gpiod_table);
 	platform_device_register(&gumstix_gpio_vbus);
 }
 #else
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 311268d..238a751 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -34,7 +34,6 @@
 #include <linux/spi/ads7846.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/pxa2xx_spi.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/platform_data/i2c-pxa.h>
 
 #include <mach/hardware.h>
@@ -578,18 +577,24 @@ static struct pwm_lookup hx4700_pwm_lookup[] = {
  * USB "Transceiver"
  */
 
-static struct gpio_vbus_mach_info gpio_vbus_info = {
-	.gpio_pullup        = GPIO76_HX4700_USBC_PUEN,
-	.gpio_vbus          = GPIOD14_nUSBC_DETECT,
-	.gpio_vbus_inverted = 1,
+static struct gpiod_lookup_table gpio_vbus_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		/* This GPIO is on ASIC3 */
+		GPIO_LOOKUP("asic3",
+			    /* Convert to a local offset on the ASIC3 */
+			    GPIOD14_nUSBC_DETECT - HX4700_ASIC3_GPIO_BASE,
+			    "vbus", GPIO_ACTIVE_LOW),
+		/* This one is on the primary SOC GPIO */
+		GPIO_LOOKUP("gpio-pxa", GPIO76_HX4700_USBC_PUEN,
+			    "pullup", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device gpio_vbus = {
 	.name          = "gpio-vbus",
 	.id            = -1,
-	.dev = {
-		.platform_data = &gpio_vbus_info,
-	},
 };
 
 static struct pxa2xx_udc_mach_info hx4700_udc_info;
@@ -883,6 +888,7 @@ static void __init hx4700_init(void)
 	pxa_set_stuart_info(NULL);
 
 	gpiod_add_lookup_table(&bq24022_gpiod_table);
+	gpiod_add_lookup_table(&gpio_vbus_gpiod_table);
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 	pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
 
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 868dc0c..5d0591f 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -27,7 +27,6 @@
 #include <linux/regulator/fixed.h>
 #include <linux/regulator/gpio-regulator.h>
 #include <linux/regulator/machine.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/platform_data/i2c-pxa.h>
 
 #include <mach/hardware.h>
@@ -506,9 +505,20 @@ static struct resource gpio_vbus_resource = {
 	.end	= IRQ_MAGICIAN_VBUS,
 };
 
-static struct gpio_vbus_mach_info gpio_vbus_info = {
-	.gpio_pullup	= GPIO27_MAGICIAN_USBC_PUEN,
-	.gpio_vbus	= EGPIO_MAGICIAN_CABLE_VBUS,
+static struct gpiod_lookup_table gpio_vbus_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		/*
+		 * EGPIO on register 4 index 1, the second EGPIO chip
+		 * starts at register 4 so this will be at index 1 on that
+		 * chip.
+		 */
+		GPIO_LOOKUP("htc-egpio-1", 1,
+			    "vbus", GPIO_ACTIVE_HIGH),
+		GPIO_LOOKUP("gpio-pxa", GPIO27_MAGICIAN_USBC_PUEN,
+			    "pullup", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device gpio_vbus = {
@@ -516,9 +526,6 @@ static struct platform_device gpio_vbus = {
 	.id		= -1,
 	.num_resources	= 1,
 	.resource	= &gpio_vbus_resource,
-	.dev = {
-		.platform_data = &gpio_vbus_info,
-	},
 };
 
 /*
@@ -1032,6 +1039,7 @@ static void __init magician_init(void)
 		ARRAY_SIZE(pwm_backlight_supply), 5000000);
 
 	gpiod_add_lookup_table(&bq24022_gpiod_table);
+	gpiod_add_lookup_table(&gpio_vbus_gpiod_table);
 	platform_add_devices(ARRAY_AND_SIZE(devices));
 }
 
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index c360023..0b8bae9 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -24,7 +24,6 @@
 #include <linux/power_supply.h>
 #include <linux/wm97xx.h>
 #include <linux/mtd/physmap.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/reboot.h>
 #include <linux/regulator/fixed.h>
 #include <linux/regulator/max1586.h>
@@ -368,10 +367,13 @@ static struct pxa2xx_udc_mach_info mioa701_udc_info = {
 	.gpio_pullup	  = GPIO22_USB_ENABLE,
 };
 
-struct gpio_vbus_mach_info gpio_vbus_data = {
-	.gpio_vbus = GPIO13_nUSB_DETECT,
-	.gpio_vbus_inverted = 1,
-	.gpio_pullup = -1,
+static struct gpiod_lookup_table gpio_vbus_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", GPIO13_nUSB_DETECT,
+			    "vbus", GPIO_ACTIVE_LOW),
+		{ },
+	},
 };
 
 /*
@@ -677,7 +679,7 @@ MIO_SIMPLE_DEV(mioa701_led,	  "leds-gpio",	    &gpio_led_info)
 MIO_SIMPLE_DEV(pxa2xx_pcm,	  "pxa2xx-pcm",	    NULL)
 MIO_SIMPLE_DEV(mioa701_sound,	  "mioa701-wm9713", NULL)
 MIO_SIMPLE_DEV(mioa701_board,	  "mioa701-board",  NULL)
-MIO_SIMPLE_DEV(gpio_vbus,	  "gpio-vbus",      &gpio_vbus_data);
+MIO_SIMPLE_DEV(gpio_vbus,	  "gpio-vbus",      NULL);
 
 static struct platform_device *devices[] __initdata = {
 	&mioa701_gpio_keys,
@@ -750,6 +752,7 @@ static void __init mioa701_machine_init(void)
 	pxa_set_ac97_info(&mioa701_ac97_info);
 	pm_power_off = mioa701_poweroff;
 	pwm_add_table(mioa701_pwm_lookup, ARRAY_SIZE(mioa701_pwm_lookup));
+	gpiod_add_lookup_table(&gpio_vbus_gpiod_table);
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 	gsm_init();
 
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 3ad0b39..b600b63 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -13,10 +13,10 @@
 #include <linux/pda_power.h>
 #include <linux/pwm.h>
 #include <linux/pwm_backlight.h>
+#include <linux/gpio/machine.h>
 #include <linux/gpio.h>
 #include <linux/wm97xx.h>
 #include <linux/power_supply.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/regulator/max1586.h>
 #include <linux/platform_data/i2c-pxa.h>
 
@@ -159,32 +159,32 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
  ******************************************************************************/
 #if	defined(CONFIG_USB_PXA27X) || \
 	defined(CONFIG_USB_PXA27X_MODULE)
-static struct gpio_vbus_mach_info palm27x_udc_info = {
-	.gpio_vbus_inverted	= 1,
+
+/* The actual GPIO offsets get filled in in the palm27x_udc_init() call */
+static struct gpiod_lookup_table palm27x_udc_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", 0,
+			    "vbus", GPIO_ACTIVE_HIGH),
+		GPIO_LOOKUP("gpio-pxa", 0,
+			    "pullup", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device palm27x_gpio_vbus = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &palm27x_udc_info,
-	},
 };
 
 void __init palm27x_udc_init(int vbus, int pullup, int vbus_inverted)
 {
-	palm27x_udc_info.gpio_vbus	= vbus;
-	palm27x_udc_info.gpio_pullup	= pullup;
+	palm27x_udc_gpiod_table.table[0].chip_hwnum = vbus;
+	palm27x_udc_gpiod_table.table[1].chip_hwnum = pullup;
+	if (vbus_inverted)
+		palm27x_udc_gpiod_table.table[0].flags = GPIO_ACTIVE_LOW;
 
-	palm27x_udc_info.gpio_vbus_inverted = vbus_inverted;
-
-	if (!gpio_request(pullup, "USB Pullup")) {
-		gpio_direction_output(pullup,
-			palm27x_udc_info.gpio_vbus_inverted);
-		gpio_free(pullup);
-	} else
-		return;
-
+	gpiod_add_lookup_table(&palm27x_udc_gpiod_table);
 	platform_device_register(&palm27x_gpio_vbus);
 }
 #endif
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index 9024033..7c7cbb4 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -23,7 +23,6 @@
 #include <linux/gpio.h>
 #include <linux/wm97xx.h>
 #include <linux/power_supply.h>
-#include <linux/usb/gpio_vbus.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index f52bd15..fda9dea 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -23,7 +23,6 @@
 #include <linux/power_supply.h>
 #include <linux/gpio_keys.h>
 #include <linux/mtd/physmap.h>
-#include <linux/usb/gpio_vbus.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -319,22 +318,25 @@ static inline void palmtc_mkp_init(void) {}
  * UDC
  ******************************************************************************/
 #if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
-static struct gpio_vbus_mach_info palmtc_udc_info = {
-	.gpio_vbus		= GPIO_NR_PALMTC_USB_DETECT_N,
-	.gpio_vbus_inverted	= 1,
-	.gpio_pullup		= GPIO_NR_PALMTC_USB_POWER,
+static struct gpiod_lookup_table palmtc_udc_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMTC_USB_DETECT_N,
+			    "vbus", GPIO_ACTIVE_LOW),
+		GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMTC_USB_POWER,
+			    "pullup", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device palmtc_gpio_vbus = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &palmtc_udc_info,
-	},
 };
 
 static void __init palmtc_udc_init(void)
 {
+	gpiod_add_lookup_table(&palmtc_udc_gpiod_table);
 	platform_device_register(&palmtc_gpio_vbus);
 };
 #else
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index a92b966..7171014 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -23,7 +23,6 @@
 #include <linux/gpio.h>
 #include <linux/wm97xx.h>
 #include <linux/power_supply.h>
-#include <linux/usb/gpio_vbus.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -201,18 +200,20 @@ static struct pxaficp_platform_data palmte2_ficp_platform_data = {
 /******************************************************************************
  * UDC
  ******************************************************************************/
-static struct gpio_vbus_mach_info palmte2_udc_info = {
-	.gpio_vbus		= GPIO_NR_PALMTE2_USB_DETECT_N,
-	.gpio_vbus_inverted	= 1,
-	.gpio_pullup		= GPIO_NR_PALMTE2_USB_PULLUP,
+static struct gpiod_lookup_table palmte2_udc_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMTE2_USB_DETECT_N,
+			    "vbus", GPIO_ACTIVE_LOW),
+		GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMTE2_USB_PULLUP,
+			    "pullup", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device palmte2_gpio_vbus = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &palmte2_udc_info,
-	},
 };
 
 /******************************************************************************
@@ -368,6 +369,7 @@ static void __init palmte2_init(void)
 	pxa_set_ficp_info(&palmte2_ficp_platform_data);
 
 	pwm_add_table(palmte2_pwm_lookup, ARRAY_SIZE(palmte2_pwm_lookup));
+	gpiod_add_lookup_table(&palmte2_udc_gpiod_table);
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 }
 
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 926593e..07332c9 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -23,7 +23,6 @@
 #include <linux/gpio.h>
 #include <linux/wm97xx.h>
 #include <linux/power_supply.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/mtd/platnand.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/physmap.h>
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 77fe2e3..4df4439 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -25,7 +25,6 @@
 #include <linux/gpio.h>
 #include <linux/wm97xx.h>
 #include <linux/power_supply.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/platform_data/i2c-gpio.h>
 #include <linux/gpio/machine.h>
 
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 4e13893..3d2c108 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -33,7 +33,6 @@
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/platform_data/i2c-pxa.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/reboot.h>
 #include <linux/memblock.h>
 
@@ -240,18 +239,20 @@ static struct scoop_pcmcia_config tosa_pcmcia_config = {
 /*
  * USB Device Controller
  */
-static struct gpio_vbus_mach_info tosa_udc_info = {
-	.gpio_pullup		= TOSA_GPIO_USB_PULLUP,
-	.gpio_vbus		= TOSA_GPIO_USB_IN,
-	.gpio_vbus_inverted	= 1,
+static struct gpiod_lookup_table tosa_udc_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_USB_IN,
+			    "vbus", GPIO_ACTIVE_LOW),
+		GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_USB_PULLUP,
+			    "pullup", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device tosa_gpio_vbus = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &tosa_udc_info,
-	},
 };
 
 /*
@@ -949,6 +950,7 @@ static void __init tosa_init(void)
 
 	clk_add_alias("CLK_CK3P6MI", tc6393xb_device.name, "GPIO11_CLK", NULL);
 
+	gpiod_add_lookup_table(&tosa_udc_gpiod_table);
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 }
 
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index 26a5ebc..14505e8 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -14,7 +14,6 @@
 #include <linux/leds.h>
 #include <linux/gpio.h>
 #include <linux/gpio/machine.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
@@ -352,17 +351,18 @@ static inline void vpac270_uhc_init(void) {}
  * USB Gadget
  ******************************************************************************/
 #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
-static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
-	.gpio_vbus		= GPIO41_VPAC270_UDC_DETECT,
-	.gpio_pullup		= -1,
+static struct gpiod_lookup_table vpac270_gpio_vbus_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("gpio-pxa", GPIO41_VPAC270_UDC_DETECT,
+			    "vbus", GPIO_ACTIVE_HIGH),
+		{ },
+	},
 };
 
 static struct platform_device vpac270_gpio_vbus = {
 	.name	= "gpio-vbus",
 	.id	= -1,
-	.dev	= {
-		.platform_data	= &vpac270_gpio_vbus_info,
-	},
 };
 
 static void vpac270_udc_command(int cmd)
@@ -381,6 +381,7 @@ static struct pxa2xx_udc_mach_info vpac270_udc_info __initdata = {
 static void __init vpac270_udc_init(void)
 {
 	pxa_set_udc_info(&vpac270_udc_info);
+	gpiod_add_lookup_table(&vpac270_gpio_vbus_gpiod_table);
 	platform_device_register(&vpac270_gpio_vbus);
 }
 #else
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 686f0bb..c5c06d9 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -19,12 +19,12 @@
 
 
 
-menu "SAMSUNG S3C24XX SoCs Support"
+menu "Samsung S3C24XX SoCs Support"
 
 comment "S3C24XX SoCs"
 
 config CPU_S3C2410
-	bool "SAMSUNG S3C2410"
+	bool "Samsung S3C2410"
 	default y
 	select CPU_ARM920T
 	select S3C2410_COMMON_CLK
@@ -35,7 +35,7 @@
 	  of Samsung Mobile CPUs.
 
 config CPU_S3C2412
-	bool "SAMSUNG S3C2412"
+	bool "Samsung S3C2412"
 	select CPU_ARM926T
 	select S3C2412_COMMON_CLK
 	select S3C2412_PM if PM_SLEEP
@@ -43,7 +43,7 @@
 	  Support for the S3C2412 and S3C2413 SoCs from the S3C24XX line
 
 config CPU_S3C2416
-	bool "SAMSUNG S3C2416/S3C2450"
+	bool "Samsung S3C2416/S3C2450"
 	select CPU_ARM926T
 	select S3C2416_PM if PM_SLEEP
 	select S3C2443_COMMON_CLK
@@ -51,7 +51,7 @@
 	  Support for the S3C2416 SoC from the S3C24XX line
 
 config CPU_S3C2440
-	bool "SAMSUNG S3C2440"
+	bool "Samsung S3C2440"
 	select CPU_ARM920T
 	select S3C2410_COMMON_CLK
 	select S3C2410_PM if PM_SLEEP
@@ -59,7 +59,7 @@
 	  Support for S3C2440 Samsung Mobile CPU based systems.
 
 config CPU_S3C2442
-	bool "SAMSUNG S3C2442"
+	bool "Samsung S3C2442"
 	select CPU_ARM920T
 	select S3C2410_COMMON_CLK
 	select S3C2410_PM if PM_SLEEP
@@ -71,7 +71,7 @@
 	depends on CPU_S3C2440 || CPU_S3C2442
 
 config CPU_S3C2443
-	bool "SAMSUNG S3C2443"
+	bool "Samsung S3C2443"
 	select CPU_ARM920T
 	select S3C2443_COMMON_CLK
 	help
@@ -591,6 +591,6 @@
 	help
 	  Internal node for H1940 and related PM
 
-endmenu	# SAMSUNG S3C24XX SoCs Support
+endmenu	# Samsung S3C24XX SoCs Support
 
 endif	# ARCH_S3C24XX
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
index 29f9b34..03d8f27 100644
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
@@ -377,6 +377,7 @@ static struct pwm_lookup rx1950_pwm_lookup[] = {
 };
 
 static struct pwm_device *lcd_pwm;
+static struct pwm_state lcd_pwm_state;
 
 static void rx1950_lcd_power(int enable)
 {
@@ -429,15 +430,16 @@ static void rx1950_lcd_power(int enable)
 
 		/* GPB1->OUTPUT, GPB1->0 */
 		gpio_direction_output(S3C2410_GPB(1), 0);
-		pwm_config(lcd_pwm, 0, LCD_PWM_PERIOD);
-		pwm_disable(lcd_pwm);
+
+		lcd_pwm_state.enabled = false;
+		pwm_apply_state(lcd_pwm, &lcd_pwm_state);
 
 		/* GPC0->0, GPC10->0 */
 		gpio_direction_output(S3C2410_GPC(0), 0);
 		gpio_direction_output(S3C2410_GPC(10), 0);
 	} else {
-		pwm_config(lcd_pwm, LCD_PWM_DUTY, LCD_PWM_PERIOD);
-		pwm_enable(lcd_pwm);
+		lcd_pwm_state.enabled = true;
+		pwm_apply_state(lcd_pwm, &lcd_pwm_state);
 
 		gpio_direction_output(S3C2410_GPC(0), 1);
 		gpio_direction_output(S3C2410_GPC(5), 1);
@@ -493,10 +495,13 @@ static int rx1950_backlight_init(struct device *dev)
 	}
 
 	/*
-	 * FIXME: pwm_apply_args() should be removed when switching to
-	 * the atomic PWM API.
+	 * This is only required to initialize .polarity; all other values are
+	 * fixed in this driver.
 	 */
-	pwm_apply_args(lcd_pwm);
+	pwm_init_state(lcd_pwm, &lcd_pwm_state);
+
+	lcd_pwm_state.period = LCD_PWM_PERIOD;
+	lcd_pwm_state.duty_cycle = LCD_PWM_DUTY;
 
 	rx1950_lcd_power(1);
 	rx1950_bl_power(1);
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 5700822..ac3e356 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -336,7 +336,6 @@
 
 config MACH_S3C64XX_DT
 	bool "Samsung S3C6400/S3C6410 machine using Device Tree"
-	select TIMER_OF
 	select CPU_S3C6400
 	select CPU_S3C6410
 	select PINCTRL
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index 951208f..829d5db 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -13,7 +13,6 @@
 #include <linux/serial_core.h>
 #include <linux/serial_s3c.h>
 #include <linux/spi/spi_gpio.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/platform_data/s3c-hsotg.h>
 
 #include <asm/mach-types.h>
@@ -124,15 +123,16 @@ static struct s3c2410_hcd_info smartq_usb_host_info = {
 	.enable_oc	= smartq_usb_host_enableoc,
 };
 
-static struct gpio_vbus_mach_info smartq_usb_otg_vbus_pdata = {
-	.gpio_vbus		= S3C64XX_GPL(9),
-	.gpio_pullup		= -1,
-	.gpio_vbus_inverted	= true,
+static struct gpiod_lookup_table smartq_usb_otg_vbus_gpiod_table = {
+	.dev_id = "gpio-vbus",
+	.table = {
+		GPIO_LOOKUP("GPL", 9, "vbus", GPIO_ACTIVE_LOW),
+		{ },
+	},
 };
 
 static struct platform_device smartq_usb_otg_vbus_dev = {
 	.name			= "gpio-vbus",
-	.dev.platform_data	= &smartq_usb_otg_vbus_pdata,
 };
 
 static struct pwm_lookup smartq_pwm_lookup[] = {
@@ -418,6 +418,7 @@ void __init smartq_machine_init(void)
 
 	pwm_add_table(smartq_pwm_lookup, ARRAY_SIZE(smartq_pwm_lookup));
 	gpiod_add_lookup_table(&smartq_lcd_control_gpiod_table);
+	gpiod_add_lookup_table(&smartq_usb_otg_vbus_gpiod_table);
 	platform_add_devices(smartq_devices, ARRAY_SIZE(smartq_devices));
 
 	gpiod_add_lookup_table(&smartq_audio_gpios);
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index 3341a12..02cc6ff9 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -59,6 +59,9 @@
 #define CLK_RESET_PLLX_MISC3_IDDQ	3
 #define CLK_RESET_PLLM_MISC_IDDQ	5
 #define CLK_RESET_PLLC_MISC_IDDQ	26
+#define CLK_RESET_PLLP_RESHIFT		0x528
+#define CLK_RESET_PLLP_RESHIFT_DEFAULT	0x3b
+#define CLK_RESET_PLLP_RESHIFT_ENABLE	0x3
 
 #define CLK_RESET_CLK_SOURCE_MSELECT	0x3b4
 
@@ -370,6 +373,18 @@
 	pll_locked r1, r0, CLK_RESET_PLLC_BASE
 	pll_locked r1, r0, CLK_RESET_PLLX_BASE
 
+	tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
+	cmp	r1, #TEGRA30
+	beq	1f
+
+	ldr	r1, [r0, #CLK_RESET_PLLP_BASE]
+	bic	r1, r1, #(1<<31)	@ disable PllP bypass
+	str	r1, [r0, #CLK_RESET_PLLP_BASE]
+
+	mov	r1, #CLK_RESET_PLLP_RESHIFT_DEFAULT
+	str	r1, [r0, #CLK_RESET_PLLP_RESHIFT]
+1:
+
 	mov32	r7, TEGRA_TMRUS_BASE
 	ldr	r1, [r7]
 	add	r1, r1, #LOCK_DELAY
@@ -630,9 +645,16 @@
 	str	r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
 
 	/* disable PLLP, PLLA, PLLC and PLLX */
+	tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
+	cmp	r1, #TEGRA30
 	ldr	r0, [r5, #CLK_RESET_PLLP_BASE]
+	orrne	r0, r0, #(1 << 31)	@ enable PllP bypass on fast cluster
 	bic	r0, r0, #(1 << 30)
 	str	r0, [r5, #CLK_RESET_PLLP_BASE]
+	beq	1f
+	mov	r0, #CLK_RESET_PLLP_RESHIFT_ENABLE
+	str	r0, [r5, #CLK_RESET_PLLP_RESHIFT]
+1:
 	ldr	r0, [r5, #CLK_RESET_PLLA_BASE]
 	bic	r0, r0, #(1 << 30)
 	str	r0, [r5, #CLK_RESET_PLLA_BASE]
@@ -648,8 +670,12 @@
 	pll_iddq_entry r1, r5, CLK_RESET_PLLX_MISC3, CLK_RESET_PLLX_MISC3_IDDQ
 _no_pll_in_iddq:
 
-	/* switch to CLKS */
-	mov	r0, #0	/* brust policy = 32KHz */
+	/*
+	 * Switch to clk_s (32KHz); bits 28:31=0
+	 * Enable burst on CPU IRQ; bit 24=1
+	 * Set IRQ burst clock source to clk_m; bits 10:8=0
+	 */
+	mov	r0, #(1 << 24)
 	str	r0, [r5, #CLK_RESET_SCLK_BURST]
 
 	ret	lr
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index a79fa3b..a1694d9 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -201,7 +201,7 @@ static unsigned long pin_highz_conf[] = {
 };
 
 /* Pin control settings */
-static struct pinctrl_map __initdata u300_pinmux_map[] = {
+static const struct pinctrl_map u300_pinmux_map[] = {
 	/* anonymous maps for chip power and EMIFs */
 	PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "power"),
 	PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "emif0"),
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index a10085b..68ec303 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
 #include <linux/irqchip/arm-gic.h>
 #include "common.h"
@@ -30,6 +31,7 @@ int zynq_cpun_start(u32 address, int cpu)
 {
 	u32 trampoline_code_size = &zynq_secondary_trampoline_end -
 						&zynq_secondary_trampoline;
+	u32 phy_cpuid = cpu_logical_map(cpu);
 
 	/* MS: Expectation that SLCR are directly map and accessible */
 	/* Not possible to jump to non aligned address */
@@ -39,7 +41,7 @@ int zynq_cpun_start(u32 address, int cpu)
 		u32 trampoline_size = &zynq_secondary_trampoline_jump -
 						&zynq_secondary_trampoline;
 
-		zynq_slcr_cpu_stop(cpu);
+		zynq_slcr_cpu_stop(phy_cpuid);
 		if (address) {
 			if (__pa(PAGE_OFFSET)) {
 				zero = ioremap(0, trampoline_code_size);
@@ -68,7 +70,7 @@ int zynq_cpun_start(u32 address, int cpu)
 			if (__pa(PAGE_OFFSET))
 				iounmap(zero);
 		}
-		zynq_slcr_cpu_start(cpu);
+		zynq_slcr_cpu_start(phy_cpuid);
 
 		return 0;
 	}
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 788c5cf..84718ed 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -162,12 +162,12 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer
 	return count;
 }
 
-static const struct file_operations alignment_proc_fops = {
-	.open		= alignment_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= alignment_proc_write,
+static const struct proc_ops alignment_proc_ops = {
+	.proc_open	= alignment_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= alignment_proc_write,
 };
 #endif /* CONFIG_PROC_FS */
 
@@ -1016,7 +1016,7 @@ static int __init alignment_init(void)
 	struct proc_dir_entry *res;
 
 	res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
-			  &alignment_proc_fops);
+			  &alignment_proc_ops);
 	if (!res)
 		return -ENOMEM;
 #endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e822af0..9414d72f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
 
 static int __dma_supported(struct device *dev, u64 mask, bool warn)
 {
-	unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
+	unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
 
 	/*
 	 * Translate the device's DMA mask to a PFN limit.  This
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3ef2041..054be44 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -324,7 +324,7 @@ static inline void poison_init_mem(void *s, size_t count)
 		*p++ = 0xe7fddef0;
 }
 
-static inline void
+static inline void __init
 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
 {
 	struct page *start_pg, *end_pg;
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 08c9941..7d85999 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -65,18 +65,9 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
 static struct omap_system_dma_plat_info *p;
 static struct omap_dma_dev_attr *d;
 static void omap_clear_dma(int lch);
-static int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
-				 unsigned char write_prio);
 static int enable_1510_mode;
 static u32 errata;
 
-static struct omap_dma_global_context_registers {
-	u32 dma_irqenable_l0;
-	u32 dma_irqenable_l1;
-	u32 dma_ocp_sysconfig;
-	u32 dma_gcr;
-} omap_dma_global_context;
-
 struct dma_link_info {
 	int *linked_dmach_q;
 	int no_of_lchs_linked;
@@ -90,42 +81,6 @@ struct dma_link_info {
 
 };
 
-static struct dma_link_info *dma_linked_lch;
-
-#ifndef CONFIG_ARCH_OMAP1
-
-/* Chain handling macros */
-#define OMAP_DMA_CHAIN_QINIT(chain_id)					\
-	do {								\
-		dma_linked_lch[chain_id].q_head =			\
-		dma_linked_lch[chain_id].q_tail =			\
-		dma_linked_lch[chain_id].q_count = 0;			\
-	} while (0)
-#define OMAP_DMA_CHAIN_QFULL(chain_id)					\
-		(dma_linked_lch[chain_id].no_of_lchs_linked ==		\
-		dma_linked_lch[chain_id].q_count)
-#define OMAP_DMA_CHAIN_QLAST(chain_id)					\
-	do {								\
-		((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==	\
-		dma_linked_lch[chain_id].q_count)			\
-	} while (0)
-#define OMAP_DMA_CHAIN_QEMPTY(chain_id)					\
-		(0 == dma_linked_lch[chain_id].q_count)
-#define __OMAP_DMA_CHAIN_INCQ(end)					\
-	((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
-#define OMAP_DMA_CHAIN_INCQHEAD(chain_id)				\
-	do {								\
-		__OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head);	\
-		dma_linked_lch[chain_id].q_count--;			\
-	} while (0)
-
-#define OMAP_DMA_CHAIN_INCQTAIL(chain_id)				\
-	do {								\
-		__OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail);	\
-		dma_linked_lch[chain_id].q_count++; \
-	} while (0)
-#endif
-
 static int dma_lch_count;
 static int dma_chan_count;
 static int omap_dma_reserve_channels;
@@ -137,9 +92,6 @@ static inline void disable_lnk(int lch);
 static void omap_disable_channel_irq(int lch);
 static inline void omap_enable_channel_irq(int lch);
 
-#define REVISIT_24XX()		printk(KERN_ERR "FIXME: no %s on 24xx\n", \
-						__func__);
-
 #ifdef CONFIG_ARCH_OMAP15XX
 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
 static int omap_dma_in_1510_mode(void)
@@ -278,19 +230,6 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
 }
 EXPORT_SYMBOL(omap_set_dma_transfer_params);
 
-void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
-{
-	if (dma_omap2plus()) {
-		u32 csdp;
-
-		csdp = p->dma_read(CSDP, lch);
-		csdp &= ~(0x3 << 16);
-		csdp |= (mode << 16);
-		p->dma_write(csdp, CSDP, lch);
-	}
-}
-EXPORT_SYMBOL(omap_set_dma_write_mode);
-
 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
 {
 	if (dma_omap1() && !dma_omap15xx()) {
@@ -332,25 +271,6 @@ void omap_set_dma_src_params(int lch, int src_port, int src_amode,
 }
 EXPORT_SYMBOL(omap_set_dma_src_params);
 
-void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
-{
-	omap_set_dma_transfer_params(lch, params->data_type,
-				     params->elem_count, params->frame_count,
-				     params->sync_mode, params->trigger,
-				     params->src_or_dst_synch);
-	omap_set_dma_src_params(lch, params->src_port,
-				params->src_amode, params->src_start,
-				params->src_ei, params->src_fi);
-
-	omap_set_dma_dest_params(lch, params->dst_port,
-				 params->dst_amode, params->dst_start,
-				 params->dst_ei, params->dst_fi);
-	if (params->read_prio || params->write_prio)
-		omap_dma_set_prio_lch(lch, params->read_prio,
-				      params->write_prio);
-}
-EXPORT_SYMBOL(omap_set_dma_params);
-
 void omap_set_dma_src_data_pack(int lch, int enable)
 {
 	u32 l;
@@ -507,12 +427,6 @@ static inline void omap_disable_channel_irq(int lch)
 		p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
 }
 
-void omap_enable_dma_irq(int lch, u16 bits)
-{
-	dma_chan[lch].enabled_irqs |= bits;
-}
-EXPORT_SYMBOL(omap_enable_dma_irq);
-
 void omap_disable_dma_irq(int lch, u16 bits)
 {
 	dma_chan[lch].enabled_irqs &= ~bits;
@@ -532,12 +446,6 @@ static inline void enable_lnk(int lch)
 	if (dma_chan[lch].next_lch != -1)
 		l = dma_chan[lch].next_lch | (1 << 15);
 
-#ifndef CONFIG_ARCH_OMAP1
-	if (dma_omap2plus())
-		if (dma_chan[lch].next_linked_ch != -1)
-			l = dma_chan[lch].next_linked_ch | (1 << 15);
-#endif
-
 	p->dma_write(l, CLNK_CTRL, lch);
 }
 
@@ -564,42 +472,6 @@ static inline void disable_lnk(int lch)
 	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
 }
 
-static inline void omap2_enable_irq_lch(int lch)
-{
-	u32 val;
-	unsigned long flags;
-
-	if (dma_omap1())
-		return;
-
-	spin_lock_irqsave(&dma_chan_lock, flags);
-	/* clear IRQ STATUS */
-	p->dma_write(1 << lch, IRQSTATUS_L0, lch);
-	/* Enable interrupt */
-	val = p->dma_read(IRQENABLE_L0, lch);
-	val |= 1 << lch;
-	p->dma_write(val, IRQENABLE_L0, lch);
-	spin_unlock_irqrestore(&dma_chan_lock, flags);
-}
-
-static inline void omap2_disable_irq_lch(int lch)
-{
-	u32 val;
-	unsigned long flags;
-
-	if (dma_omap1())
-		return;
-
-	spin_lock_irqsave(&dma_chan_lock, flags);
-	/* Disable interrupt */
-	val = p->dma_read(IRQENABLE_L0, lch);
-	val &= ~(1 << lch);
-	p->dma_write(val, IRQENABLE_L0, lch);
-	/* clear IRQ STATUS */
-	p->dma_write(1 << lch, IRQSTATUS_L0, lch);
-	spin_unlock_irqrestore(&dma_chan_lock, flags);
-}
-
 int omap_request_dma(int dev_id, const char *dev_name,
 		     void (*callback)(int lch, u16 ch_status, void *data),
 		     void *data, int *dma_ch_out)
@@ -628,9 +500,6 @@ int omap_request_dma(int dev_id, const char *dev_name,
 	if (p->clear_lch_regs)
 		p->clear_lch_regs(free_ch);
 
-	if (dma_omap2plus())
-		omap_clear_dma(free_ch);
-
 	spin_unlock_irqrestore(&dma_chan_lock, flags);
 
 	chan->dev_name = dev_name;
@@ -638,20 +507,10 @@ int omap_request_dma(int dev_id, const char *dev_name,
 	chan->data = data;
 	chan->flags = 0;
 
-#ifndef CONFIG_ARCH_OMAP1
-	if (dma_omap2plus()) {
-		chan->chain_id = -1;
-		chan->next_linked_ch = -1;
-	}
-#endif
-
 	chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
 
 	if (dma_omap1())
 		chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
-	else if (dma_omap2plus())
-		chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
-			OMAP2_DMA_TRANS_ERR_IRQ;
 
 	if (dma_omap16xx()) {
 		/* If the sync device is set, configure it dynamically. */
@@ -668,11 +527,6 @@ int omap_request_dma(int dev_id, const char *dev_name,
 		p->dma_write(dev_id, CCR, free_ch);
 	}
 
-	if (dma_omap2plus()) {
-		omap_enable_channel_irq(free_ch);
-		omap2_enable_irq_lch(free_ch);
-	}
-
 	*dma_ch_out = free_ch;
 
 	return 0;
@@ -689,20 +543,12 @@ void omap_free_dma(int lch)
 		return;
 	}
 
-	/* Disable interrupt for logical channel */
-	if (dma_omap2plus())
-		omap2_disable_irq_lch(lch);
-
 	/* Disable all DMA interrupts for the channel. */
 	omap_disable_channel_irq(lch);
 
 	/* Make sure the DMA transfer is stopped. */
 	p->dma_write(0, CCR, lch);
 
-	/* Clear registers */
-	if (dma_omap2plus())
-		omap_clear_dma(lch);
-
 	spin_lock_irqsave(&dma_chan_lock, flags);
 	dma_chan[lch].dev_id = -1;
 	dma_chan[lch].next_lch = -1;
@@ -711,71 +557,6 @@ void omap_free_dma(int lch)
 }
 EXPORT_SYMBOL(omap_free_dma);
 
-/**
- * @brief omap_dma_set_global_params : Set global priority settings for dma
- *
- * @param arb_rate
- * @param max_fifo_depth
- * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
- * 						   DMA_THREAD_RESERVE_ONET
- * 						   DMA_THREAD_RESERVE_TWOT
- * 						   DMA_THREAD_RESERVE_THREET
- */
-void
-omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
-{
-	u32 reg;
-
-	if (dma_omap1()) {
-		printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
-		return;
-	}
-
-	if (max_fifo_depth == 0)
-		max_fifo_depth = 1;
-	if (arb_rate == 0)
-		arb_rate = 1;
-
-	reg = 0xff & max_fifo_depth;
-	reg |= (0x3 & tparams) << 12;
-	reg |= (arb_rate & 0xff) << 16;
-
-	p->dma_write(reg, GCR, 0);
-}
-EXPORT_SYMBOL(omap_dma_set_global_params);
-
-/**
- * @brief omap_dma_set_prio_lch : Set channel wise priority settings
- *
- * @param lch
- * @param read_prio - Read priority
- * @param write_prio - Write priority
- * Both of the above can be set with one of the following values :
- * 	DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
- */
-static int
-omap_dma_set_prio_lch(int lch, unsigned char read_prio,
-		      unsigned char write_prio)
-{
-	u32 l;
-
-	if (unlikely((lch < 0 || lch >= dma_lch_count))) {
-		printk(KERN_ERR "Invalid channel id\n");
-		return -EINVAL;
-	}
-	l = p->dma_read(CCR, lch);
-	l &= ~((1 << 6) | (1 << 26));
-	if (d->dev_caps & IS_RW_PRIORITY)
-		l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
-	else
-		l |= ((read_prio & 0x1) << 6);
-
-	p->dma_write(l, CCR, lch);
-
-	return 0;
-}
-
-
 /*
  * Clears any DMA state so the DMA engine is ready to restart with new buffers
  * through omap_start_dma(). Any buffers in flight are discarded.
@@ -926,29 +707,6 @@ EXPORT_SYMBOL(omap_stop_dma);
  * Allows changing the DMA callback function or data. This may be needed if
  * the driver shares a single DMA channel for multiple dma triggers.
  */
-int omap_set_dma_callback(int lch,
-			  void (*callback)(int lch, u16 ch_status, void *data),
-			  void *data)
-{
-	unsigned long flags;
-
-	if (lch < 0)
-		return -ENODEV;
-
-	spin_lock_irqsave(&dma_chan_lock, flags);
-	if (dma_chan[lch].dev_id == -1) {
-		printk(KERN_ERR "DMA callback for not set for free channel\n");
-		spin_unlock_irqrestore(&dma_chan_lock, flags);
-		return -EINVAL;
-	}
-	dma_chan[lch].callback = callback;
-	dma_chan[lch].data = data;
-	spin_unlock_irqrestore(&dma_chan_lock, flags);
-
-	return 0;
-}
-EXPORT_SYMBOL(omap_set_dma_callback);
-
 /*
  * Returns current physical source address for the given DMA channel.
  * If the channel is running the caller must disable interrupts prior calling
@@ -1048,34 +806,6 @@ int omap_dma_running(void)
 	return 0;
 }
 
-/*
- * lch_queue DMA will start right after lch_head one is finished.
- * For this DMA link to start, you still need to start (see omap_start_dma)
- * the first one. That will fire up the entire queue.
- */
-void omap_dma_link_lch(int lch_head, int lch_queue)
-{
-	if (omap_dma_in_1510_mode()) {
-		if (lch_head == lch_queue) {
-			p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
-								CCR, lch_head);
-			return;
-		}
-		printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
-		BUG();
-		return;
-	}
-
-	if ((dma_chan[lch_head].dev_id == -1) ||
-	    (dma_chan[lch_queue].dev_id == -1)) {
-		pr_err("omap_dma: trying to link non requested channels\n");
-		dump_stack();
-	}
-
-	dma_chan[lch_head].next_lch = lch_queue;
-}
-EXPORT_SYMBOL(omap_dma_link_lch);
-
 /*----------------------------------------------------------------------------*/
 
 #ifdef CONFIG_ARCH_OMAP1
@@ -1136,145 +866,6 @@ static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
 #define omap1_dma_irq_handler	NULL
 #endif
 
-#ifdef CONFIG_ARCH_OMAP2PLUS
-
-static int omap2_dma_handle_ch(int ch)
-{
-	u32 status = p->dma_read(CSR, ch);
-
-	if (!status) {
-		if (printk_ratelimit())
-			pr_warn("Spurious DMA IRQ for lch %d\n", ch);
-		p->dma_write(1 << ch, IRQSTATUS_L0, ch);
-		return 0;
-	}
-	if (unlikely(dma_chan[ch].dev_id == -1)) {
-		if (printk_ratelimit())
-			pr_warn("IRQ %04x for non-allocated DMA channel %d\n",
-				status, ch);
-		return 0;
-	}
-	if (unlikely(status & OMAP_DMA_DROP_IRQ))
-		pr_info("DMA synchronization event drop occurred with device %d\n",
-			dma_chan[ch].dev_id);
-	if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
-		printk(KERN_INFO "DMA transaction error with device %d\n",
-		       dma_chan[ch].dev_id);
-		if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
-			u32 ccr;
-
-			ccr = p->dma_read(CCR, ch);
-			ccr &= ~OMAP_DMA_CCR_EN;
-			p->dma_write(ccr, CCR, ch);
-			dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
-		}
-	}
-	if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
-		printk(KERN_INFO "DMA secure error with device %d\n",
-		       dma_chan[ch].dev_id);
-	if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
-		printk(KERN_INFO "DMA misaligned error with device %d\n",
-		       dma_chan[ch].dev_id);
-
-	p->dma_write(status, CSR, ch);
-	p->dma_write(1 << ch, IRQSTATUS_L0, ch);
-	/* read back the register to flush the write */
-	p->dma_read(IRQSTATUS_L0, ch);
-
-	/* If the ch is not chained then chain_id will be -1 */
-	if (dma_chan[ch].chain_id != -1) {
-		int chain_id = dma_chan[ch].chain_id;
-		dma_chan[ch].state = DMA_CH_NOTSTARTED;
-		if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
-			dma_chan[dma_chan[ch].next_linked_ch].state =
-							DMA_CH_STARTED;
-		if (dma_linked_lch[chain_id].chain_mode ==
-						OMAP_DMA_DYNAMIC_CHAIN)
-			disable_lnk(ch);
-
-		if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
-			OMAP_DMA_CHAIN_INCQHEAD(chain_id);
-
-		status = p->dma_read(CSR, ch);
-		p->dma_write(status, CSR, ch);
-	}
-
-	if (likely(dma_chan[ch].callback != NULL))
-		dma_chan[ch].callback(ch, status, dma_chan[ch].data);
-
-	return 0;
-}
-
-/* STATUS register count is from 1-32 while our is 0-31 */
-static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
-{
-	u32 val, enable_reg;
-	int i;
-
-	val = p->dma_read(IRQSTATUS_L0, 0);
-	if (val == 0) {
-		if (printk_ratelimit())
-			printk(KERN_WARNING "Spurious DMA IRQ\n");
-		return IRQ_HANDLED;
-	}
-	enable_reg = p->dma_read(IRQENABLE_L0, 0);
-	val &= enable_reg; /* Dispatch only relevant interrupts */
-	for (i = 0; i < dma_lch_count && val != 0; i++) {
-		if (val & 1)
-			omap2_dma_handle_ch(i);
-		val >>= 1;
-	}
-
-	return IRQ_HANDLED;
-}
-
-static struct irqaction omap24xx_dma_irq = {
-	.name = "DMA",
-	.handler = omap2_dma_irq_handler,
-};
-
-#else
-static struct irqaction omap24xx_dma_irq;
-#endif
-
-/*----------------------------------------------------------------------------*/
-
-/*
- * Note that we are currently using only IRQENABLE_L0 and L1.
- * As the DSP may be using IRQENABLE_L2 and L3, let's not
- * touch those for now.
- */
-void omap_dma_global_context_save(void)
-{
-	omap_dma_global_context.dma_irqenable_l0 =
-		p->dma_read(IRQENABLE_L0, 0);
-	omap_dma_global_context.dma_irqenable_l1 =
-		p->dma_read(IRQENABLE_L1, 0);
-	omap_dma_global_context.dma_ocp_sysconfig =
-		p->dma_read(OCP_SYSCONFIG, 0);
-	omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
-}
-
-void omap_dma_global_context_restore(void)
-{
-	int ch;
-
-	p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
-	p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
-		OCP_SYSCONFIG, 0);
-	p->dma_write(omap_dma_global_context.dma_irqenable_l0,
-		IRQENABLE_L0, 0);
-	p->dma_write(omap_dma_global_context.dma_irqenable_l1,
-		IRQENABLE_L1, 0);
-
-	if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
-		p->dma_write(0x3 , IRQSTATUS_L0, 0);
-
-	for (ch = 0; ch < dma_chan_count; ch++)
-		if (dma_chan[ch].dev_id != -1)
-			omap_clear_dma(ch);
-}
-
 struct omap_system_dma_plat_info *omap_get_plat_info(void)
 {
 	return p;
@@ -1286,7 +877,6 @@ static int omap_system_dma_probe(struct platform_device *pdev)
 	int ch, ret = 0;
 	int dma_irq;
 	char irq_name[4];
-	int irq_rel;
 
 	p = pdev->dev.platform_data;
 	if (!p) {
@@ -1312,21 +902,9 @@ static int omap_system_dma_probe(struct platform_device *pdev)
 	if (!dma_chan)
 		return -ENOMEM;
 
-	if (dma_omap2plus()) {
-		dma_linked_lch = kcalloc(dma_lch_count,
-					 sizeof(*dma_linked_lch),
-					 GFP_KERNEL);
-		if (!dma_linked_lch) {
-			ret = -ENOMEM;
-			goto exit_dma_lch_fail;
-		}
-	}
-
 	spin_lock_init(&dma_chan_lock);
 	for (ch = 0; ch < dma_chan_count; ch++) {
 		omap_clear_dma(ch);
-		if (dma_omap2plus())
-			omap2_disable_irq_lch(ch);
 
 		dma_chan[ch].dev_id = -1;
 		dma_chan[ch].next_lch = -1;
@@ -1359,26 +937,6 @@ static int omap_system_dma_probe(struct platform_device *pdev)
 		}
 	}
 
-	if (d->dev_caps & IS_RW_PRIORITY)
-		omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
-				DMA_DEFAULT_FIFO_DEPTH, 0);
-
-	if (dma_omap2plus() && !(d->dev_caps & DMA_ENGINE_HANDLE_IRQ)) {
-		strcpy(irq_name, "0");
-		dma_irq = platform_get_irq_byname(pdev, irq_name);
-		if (dma_irq < 0) {
-			dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
-			ret = dma_irq;
-			goto exit_dma_lch_fail;
-		}
-		ret = setup_irq(dma_irq, &omap24xx_dma_irq);
-		if (ret) {
-			dev_err(&pdev->dev, "set_up failed for IRQ %d for DMA (error %d)\n",
-				dma_irq, ret);
-			goto exit_dma_lch_fail;
-		}
-	}
-
 	/* reserve dma channels 0 and 1 in high security devices on 34xx */
 	if (d->dev_caps & HS_CHANNELS_RESERVED) {
 		pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
@@ -1389,34 +947,21 @@ static int omap_system_dma_probe(struct platform_device *pdev)
 	return 0;
 
 exit_dma_irq_fail:
-	dev_err(&pdev->dev, "unable to request IRQ %d for DMA (error %d)\n",
-		dma_irq, ret);
-	for (irq_rel = 0; irq_rel < ch;	irq_rel++) {
-		dma_irq = platform_get_irq(pdev, irq_rel);
-		free_irq(dma_irq, (void *)(irq_rel + 1));
-	}
-
-exit_dma_lch_fail:
 	return ret;
 }
 
 static int omap_system_dma_remove(struct platform_device *pdev)
 {
-	int dma_irq;
+	int dma_irq, irq_rel = 0;
 
-	if (dma_omap2plus()) {
-		char irq_name[4];
-		strcpy(irq_name, "0");
-		dma_irq = platform_get_irq_byname(pdev, irq_name);
-		if (dma_irq >= 0)
-			remove_irq(dma_irq, &omap24xx_dma_irq);
-	} else {
-		int irq_rel = 0;
-		for ( ; irq_rel < dma_chan_count; irq_rel++) {
-			dma_irq = platform_get_irq(pdev, irq_rel);
-			free_irq(dma_irq, (void *)(irq_rel + 1));
-		}
+	if (dma_omap2plus())
+		return 0;
+
+	for ( ; irq_rel < dma_chan_count; irq_rel++) {
+		dma_irq = platform_get_irq(pdev, irq_rel);
+		free_irq(dma_irq, (void *)(irq_rel + 1));
 	}
+
 	return 0;
 }
 
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index ee3d5c9..4f7b272 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -40,7 +40,7 @@ enum s3c_cpu_type {
 	TYPE_ADCV11, /* S3C2443 */
 	TYPE_ADCV12, /* S3C2416, S3C2450 */
 	TYPE_ADCV2, /* S3C64XX */
-	TYPE_ADCV3, /* S5PV210, S5PC110, EXYNOS4210 */
+	TYPE_ADCV3, /* S5PV210, S5PC110, Exynos4210 */
 };
 
 struct s3c_adc_client {
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 1602f6d..089a176 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2011 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com
 //
-// Base SAMSUNG platform device definitions
+// Base Samsung platform device definitions
 
 #include <linux/kernel.h>
 #include <linux/types.h>
diff --git a/arch/arm/plat-samsung/gpio-samsung.c b/arch/arm/plat-samsung/gpio-samsung.c
index f66c820..8955fd6 100644
--- a/arch/arm/plat-samsung/gpio-samsung.c
+++ b/arch/arm/plat-samsung/gpio-samsung.c
@@ -8,7 +8,7 @@
 //      Ben Dooks <ben@simtec.co.uk>
 //      http://armlinux.simtec.co.uk/
 //
-// SAMSUNG - GPIOlib support
+// Samsung - GPIOlib support
 
 #include <linux/kernel.h>
 #include <linux/irq.h>
diff --git a/arch/arm/plat-samsung/include/plat/samsung-time.h b/arch/arm/plat-samsung/include/plat/samsung-time.h
index d16eefe..32ab086 100644
--- a/arch/arm/plat-samsung/include/plat/samsung-time.h
+++ b/arch/arm/plat-samsung/include/plat/samsung-time.h
@@ -9,7 +9,7 @@
 #ifndef __ASM_PLAT_SAMSUNG_TIME_H
 #define __ASM_PLAT_SAMSUNG_TIME_H __FILE__
 
-/* SAMSUNG HR-Timer Clock mode */
+/* Samsung HR-Timer Clock mode */
 enum samsung_timer_mode {
 	SAMSUNG_PWM0,
 	SAMSUNG_PWM1,
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 6da7dc4..4d1cf74 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -449,3 +449,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 435	common	clone3				sys_clone3
+437	common	openat2				sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index de238b5..0b30e88 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -104,6 +104,7 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW_LEVEL
 	select GENERIC_PCI_IOMAP
+	select GENERIC_PTDUMP
 	select GENERIC_SCHED_CLOCK
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_STRNCPY_FROM_USER
@@ -164,7 +165,7 @@
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_FUNCTION_ARG_ACCESS_API
 	select HAVE_FUTEX_CMPXCHG if FUTEX
-	select HAVE_RCU_TABLE_FREE
+	select MMU_GATHER_RCU_TABLE_FREE
 	select HAVE_RSEQ
 	select HAVE_STACKPROTECTOR
 	select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index cf09010..1c906d9 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -1,22 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-config ARM64_PTDUMP_CORE
-	def_bool n
-
-config ARM64_PTDUMP_DEBUGFS
-	bool "Export kernel pagetable layout to userspace via debugfs"
-	depends on DEBUG_KERNEL
-	select ARM64_PTDUMP_CORE
-	select DEBUG_FS
-        help
-	  Say Y here if you want to show the kernel pagetable layout in a
-	  debugfs file. This information is only useful for kernel developers
-	  who are working in architecture specific areas of the kernel.
-	  It is probably not a good idea to enable this feature in a production
-	  kernel.
-
-	  If in doubt, say N.
-
 config PID_IN_CONTEXTIDR
 	bool "Write the current PID to the CONTEXTIDR register"
 	help
@@ -42,7 +25,7 @@
 
 config DEBUG_WX
 	bool "Warn on W+X mappings at boot"
-	select ARM64_PTDUMP_CORE
+	select PTDUMP_CORE
 	---help---
 	  Generate a warning if any W+X mappings are found at boot.
 
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index d241802..cf4f786 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -15,6 +15,8 @@
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-bananapi-m2-plus-v1.2.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-emlid-neutis-n5-devboard.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-libretech-all-h3-cc.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-libretech-all-h3-it.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-libretech-all-h5-cc.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-nanopi-neo2.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-nanopi-neo-plus2.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-pc2.dtb
@@ -26,4 +28,5 @@
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-orangepi-lite2.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-orangepi-one-plus.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-pine-h64.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-pine-h64-model-b.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-tanix-tx6.dtb
diff --git a/arch/arm64/boot/dts/allwinner/axp803.dtsi b/arch/arm64/boot/dts/allwinner/axp803.dtsi
index f0349ef..10e9186 100644
--- a/arch/arm64/boot/dts/allwinner/axp803.dtsi
+++ b/arch/arm64/boot/dts/allwinner/axp803.dtsi
@@ -1,44 +1,5 @@
-/*
- * Copyright 2017 Icenowy Zheng <icenowy@aosc.xyz>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright 2017 Icenowy Zheng <icenowy@aosc.xyz>
 
 /*
  * AXP803 Integrated Power Management Chip
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
index 5634245..c7bd73f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
@@ -1,12 +1,11 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-/*
- * Copyright (C) 2018 Amarula Solutions B.V.
- * Author: Jagan Teki <jagan@amarulasolutions.com>
- */
+// Copyright (C) 2018 Amarula Solutions B.V.
+// Author: Jagan Teki <jagan@amarulasolutions.com>
 
 /dts-v1/;
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
 
@@ -65,6 +64,22 @@ wifi_pwrseq: wifi-pwrseq {
 	};
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &csi {
 	status = "okay";
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 208373e..883f217 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -1,48 +1,10 @@
-/*
- * Copyright (c) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2016 ARM Ltd.
 
 /dts-v1/;
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
 
@@ -108,6 +70,22 @@ &codec_analog {
 	status = "okay";
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &dai {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi
new file mode 100644
index 0000000..578c374
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Vasily khoruzhick <anarsoul@gmail.com>
+ */
+
+/ {
+	cpu0_opp_table: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp-648000000 {
+			opp-hz = /bits/ 64 <648000000>;
+			opp-microvolt = <1040000>;
+			clock-latency-ns = <244144>; /* 8 32k periods */
+		};
+
+		opp-816000000 {
+			opp-hz = /bits/ 64 <816000000>;
+			opp-microvolt = <1100000>;
+			clock-latency-ns = <244144>; /* 8 32k periods */
+		};
+
+		opp-912000000 {
+			opp-hz = /bits/ 64 <912000000>;
+			opp-microvolt = <1120000>;
+			clock-latency-ns = <244144>; /* 8 32k periods */
+		};
+
+		opp-960000000 {
+			opp-hz = /bits/ 64 <960000000>;
+			opp-microvolt = <1160000>;
+			clock-latency-ns = <244144>; /* 8 32k periods */
+		};
+
+		opp-1008000000 {
+			opp-hz = /bits/ 64 <1008000000>;
+			opp-microvolt = <1200000>;
+			clock-latency-ns = <244144>; /* 8 32k periods */
+		};
+
+		opp-1056000000 {
+			opp-hz = /bits/ 64 <1056000000>;
+			opp-microvolt = <1240000>;
+			clock-latency-ns = <244144>; /* 8 32k periods */
+		};
+
+		opp-1104000000 {
+			opp-hz = /bits/ 64 <1104000000>;
+			opp-microvolt = <1260000>;
+			clock-latency-ns = <244144>; /* 8 32k periods */
+		};
+
+		opp-1152000000 {
+			opp-hz = /bits/ 64 <1152000000>;
+			opp-microvolt = <1300000>;
+			clock-latency-ns = <244144>; /* 8 32k periods */
+		};
+	};
+};
+
+&cpu0 {
+	operating-points-v2 = <&cpu0_opp_table>;
+};
+
+&cpu1 {
+	operating-points-v2 = <&cpu0_opp_table>;
+};
+
+&cpu2 {
+	operating-points-v2 = <&cpu0_opp_table>;
+};
+
+&cpu3 {
+	operating-points-v2 = <&cpu0_opp_table>;
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
index 9b9d915..e58db8a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
@@ -1,48 +1,10 @@
-/*
- * Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
 
 /dts-v1/;
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
 
@@ -87,6 +49,22 @@ wifi_pwrseq: wifi_pwrseq {
 	};
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &de {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-oceanic-5205-5inmfd.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-oceanic-5205-5inmfd.dts
index 787ebd8..577f9e1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-oceanic-5205-5inmfd.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-oceanic-5205-5inmfd.dts
@@ -1,9 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-/*
- * Copyright (C) 2019 Oceanic Systems (UK) Ltd.
- * Copyright (C) 2019 Amarula Solutions B.V.
- * Author: Jagan Teki <jagan@amarulasolutions.com>
- */
+// Copyright (C) 2019 Oceanic Systems (UK) Ltd.
+// Copyright (C) 2019 Amarula Solutions B.V.
+// Author: Jagan Teki <jagan@amarulasolutions.com>
 
 /dts-v1/;
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts
index 121e6cc..efb2084 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts
@@ -1,8 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-/*
- * Copyright (C) 2018 Martin Ayotte <martinayotte@gmail.com>
- * Copyright (C) 2019 Sunil Mohan Adapa <sunil@medhas.org>
- */
+// Copyright (C) 2018 Martin Ayotte <martinayotte@gmail.com>
+// Copyright (C) 2019 Sunil Mohan Adapa <sunil@medhas.org>
 
 #include "sun50i-a64-olinuxino.dts"
 
@@ -21,3 +19,7 @@ &mmc2 {
 	cap-mmc-hw-reset;
 	status = "okay";
 };
+
+&pio {
+	vcc-pc-supply = <&reg_eldo1>;
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
index 393c194..6dfafa1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
@@ -1,48 +1,10 @@
-/*
- * Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
 
 /dts-v1/;
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
 
@@ -87,6 +49,22 @@ wifi_pwrseq: wifi_pwrseq {
 	};
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &de {
 	status = "okay";
 };
@@ -163,6 +141,23 @@ &ohci1 {
 	status = "okay";
 };
 
+&pio {
+	vcc-pc-supply = <&reg_dcdc1>;
+	vcc-pd-supply = <&reg_dcdc1>;
+	vcc-pe-supply = <&reg_aldo1>;
+	vcc-pg-supply = <&reg_dldo4>;
+};
+
+&r_pio {
+	/*
+	 * FIXME: We can't add that supply for now since it would
+	 * create a circular dependency between pinctrl, the regulator
+	 * and the RSB Bus.
+	 *
+	 * vcc-pl-supply = <&reg_aldo2>;
+	 */
+};
+
 &r_rsb {
 	status = "okay";
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
index f54a415..fde9c7a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
@@ -1,49 +1,11 @@
-/*
- * Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
- * Copyright (C) 2017-2018 Samuel Holland <samuel@sholland.org>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
+// Copyright (C) 2017-2018 Samuel Holland <samuel@sholland.org>
 
 /dts-v1/;
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
 
@@ -123,6 +85,22 @@ &codec_analog {
 	status = "okay";
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &dai {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
index 72d6961..302e24b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
@@ -1,8 +1,5 @@
-/*
- * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- *
- * Copyright (c) 2018 ARM Ltd.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2018 ARM Ltd.
 
 #include "sun50i-a64-sopine-baseboard.dts"
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
index d5b6e81..b26181c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
@@ -1,44 +1,5 @@
-/*
- * Copyright (c) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2016 ARM Ltd.
 
 #include "sun50i-a64-pine64.dts"
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 409523c..2165f23 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -1,48 +1,10 @@
-/*
- * Copyright (c) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2016 ARM Ltd.
 
 /dts-v1/;
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
 
@@ -84,6 +46,22 @@ &codec_analog {
 	status = "okay";
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &dai {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 78c82a6..3d894b2 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -1,15 +1,14 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-/*
- * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.xyz>
- * Copyright (C) 2018 Vasily Khoruzhick <anarsoul@gmail.com>
- *
- */
+// Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+// Copyright (C) 2018 Vasily Khoruzhick <anarsoul@gmail.com>
 
 /dts-v1/;
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/pwm/pwm.h>
 
@@ -60,6 +59,7 @@ lid_switch {
 			linux,code = <SW_LID>;
 			linux,can-disable;
 			wakeup-source;
+			wakeup-event-action = <EV_ACT_DEASSERTED>;
 		};
 	};
 
@@ -98,6 +98,22 @@ &codec_analog {
 	status = "okay";
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &dai {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index 920103e..2f6ea9f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -1,47 +1,7 @@
-/*
- * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
- *
- * Based on sun50i-a64-pine64.dts, which is:
- *   Copyright (c) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+// Based on sun50i-a64-pine64.dts, which is:
+//   Copyright (c) 2016 ARM Ltd.
 
 /dts-v1/;
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index 9d20e13..c48692b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -1,49 +1,10 @@
-/*
- * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
- *
- * Based on sun50i-a64-pine64.dts, which is:
- *   Copyright (c) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+// Based on sun50i-a64-pine64.dts, which is:
+//   Copyright (c) 2016 ARM Ltd.
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
 
@@ -51,6 +12,22 @@ &codec_analog {
 	cpvdd-supply = <&reg_eldo1>;
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
index 9704151..f5df5f7 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
@@ -1,13 +1,11 @@
-/*
- * Copyright (C) Harald Geyer <harald@ccbib.org>
- * based on sun50i-a64-olinuxino.dts by Jagan Teki <jteki@openedev.com>
- *
- * SPDX-License-Identifier: (GPL-2.0 OR MIT)
- */
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+// Copyright (C) Harald Geyer <harald@ccbib.org>
+// based on sun50i-a64-olinuxino.dts by Jagan Teki <jteki@openedev.com>
 
 /dts-v1/;
 
 #include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
@@ -104,6 +102,22 @@ &de {
 	status = "okay";
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &ehci1 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 27e4823..862b47d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -1,46 +1,7 @@
-/*
- * Copyright (C) 2016 ARM Ltd.
- * based on the Allwinner H3 dtsi:
- *    Copyright (C) 2015 Jens Kuske <jenskuske@gmail.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2016 ARM Ltd.
+// based on the Allwinner H3 dtsi:
+//    Copyright (C) 2015 Jens Kuske <jenskuske@gmail.com>
 
 #include <dt-bindings/clock/sun50i-a64-ccu.h>
 #include <dt-bindings/clock/sun8i-de2.h>
@@ -49,6 +10,7 @@
 #include <dt-bindings/reset/sun50i-a64-ccu.h>
 #include <dt-bindings/reset/sun8i-de2.h>
 #include <dt-bindings/reset/sun8i-r-ccu.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	interrupt-parent = <&gic>;
@@ -89,6 +51,9 @@ cpu0: cpu@0 {
 			reg = <0>;
 			enable-method = "psci";
 			next-level-cache = <&L2>;
+			clocks = <&ccu 21>;
+			clock-names = "cpu";
+			#cooling-cells = <2>;
 		};
 
 		cpu1: cpu@1 {
@@ -97,6 +62,9 @@ cpu1: cpu@1 {
 			reg = <1>;
 			enable-method = "psci";
 			next-level-cache = <&L2>;
+			clocks = <&ccu 21>;
+			clock-names = "cpu";
+			#cooling-cells = <2>;
 		};
 
 		cpu2: cpu@2 {
@@ -105,6 +73,9 @@ cpu2: cpu@2 {
 			reg = <2>;
 			enable-method = "psci";
 			next-level-cache = <&L2>;
+			clocks = <&ccu 21>;
+			clock-names = "cpu";
+			#cooling-cells = <2>;
 		};
 
 		cpu3: cpu@3 {
@@ -113,6 +84,9 @@ cpu3: cpu@3 {
 			reg = <3>;
 			enable-method = "psci";
 			next-level-cache = <&L2>;
+			clocks = <&ccu 21>;
+			clock-names = "cpu";
+			#cooling-cells = <2>;
 		};
 
 		L2: l2-cache {
@@ -211,6 +185,69 @@ timer {
 			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 	};
 
+	thermal-zones {
+		cpu_thermal: cpu0-thermal {
+			/* milliseconds */
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 0>;
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert0>;
+					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu_alert1>;
+					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+
+			trips {
+				cpu_alert0: cpu_alert0 {
+					/* milliCelsius */
+					temperature = <75000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu_alert1: cpu_alert1 {
+					/* milliCelsius */
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+
+				cpu_crit: cpu_crit {
+					/* milliCelsius */
+					temperature = <110000>;
+					hysteresis = <2000>;
+					type = "critical";
+				};
+			};
+		};
+
+		gpu0_thermal: gpu0-thermal {
+			/* milliseconds */
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 1>;
+		};
+
+		gpu1_thermal: gpu1-thermal {
+			/* milliseconds */
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 2>;
+		};
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <1>;
@@ -382,6 +419,12 @@ tcon0_out: port@1 {
 					#address-cells = <1>;
 					#size-cells = <0>;
 					reg = <1>;
+
+					tcon0_out_dsi: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&dsi_in_tcon0>;
+						allwinner,tcon-channel = <1>;
+					};
 				};
 			};
 		};
@@ -485,6 +528,12 @@ mmc2: mmc@1c11000 {
 		sid: eeprom@1c14000 {
 			compatible = "allwinner,sun50i-a64-sid";
 			reg = <0x1c14000 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			ths_calibration: thermal-sensor-calibration@34 {
+				reg = <0x34 0x8>;
+			};
 		};
 
 		crypto: crypto@1c15000 {
@@ -593,7 +642,7 @@ pio: pinctrl@1c20800 {
 			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu 58>, <&osc24M>, <&rtc 0>;
+			clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&rtc 0>;
 			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			#gpio-cells = <3>;
@@ -810,6 +859,18 @@ codec: codec@1c22e00 {
 			status = "disabled";
 		};
 
+		ths: thermal-sensor@1c25000 {
+			compatible = "allwinner,sun50i-a64-ths";
+			reg = <0x01c25000 0x100>;
+			clocks = <&ccu CLK_BUS_THS>, <&ccu CLK_THS>;
+			clock-names = "bus", "mod";
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&ccu RST_BUS_THS>;
+			nvmem-cells = <&ths_calibration>;
+			nvmem-cell-names = "calibration";
+			#thermal-sensor-cells = <1>;
+		};
+
 		uart0: serial@1c28000 {
 			compatible = "snps,dw-apb-uart";
 			reg = <0x01c28000 0x400>;
@@ -1014,6 +1075,37 @@ csi: csi@1cb0000 {
 			status = "disabled";
 		};
 
+		dsi: dsi@1ca0000 {
+			compatible = "allwinner,sun50i-a64-mipi-dsi";
+			reg = <0x01ca0000 0x1000>;
+			interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_MIPI_DSI>;
+			resets = <&ccu RST_BUS_MIPI_DSI>;
+			phys = <&dphy>;
+			phy-names = "dphy";
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port {
+				dsi_in_tcon0: endpoint {
+					remote-endpoint = <&tcon0_out_dsi>;
+				};
+			};
+		};
+
+		dphy: d-phy@1ca1000 {
+			compatible = "allwinner,sun50i-a64-mipi-dphy",
+				     "allwinner,sun6i-a31-mipi-dphy";
+			reg = <0x01ca1000 0x1000>;
+			clocks = <&ccu CLK_BUS_MIPI_DSI>,
+				 <&ccu CLK_DSI_DPHY>;
+			clock-names = "bus", "mod";
+			resets = <&ccu RST_BUS_MIPI_DSI>;
+			status = "disabled";
+			#phy-cells = <0>;
+		};
+
 		hdmi: hdmi@1ee0000 {
 			compatible = "allwinner,sun50i-a64-dw-hdmi",
 				     "allwinner,sun8i-a83t-dw-hdmi";
@@ -1051,7 +1143,7 @@ hdmi_phy: hdmi-phy@1ef0000 {
 			compatible = "allwinner,sun50i-a64-hdmi-phy";
 			reg = <0x01ef0000 0x10000>;
 			clocks = <&ccu CLK_BUS_HDMI>, <&ccu CLK_HDMI_DDC>,
-				 <&ccu 7>;
+				 <&ccu CLK_PLL_VIDEO0>;
 			clock-names = "bus", "mod", "pll-0";
 			resets = <&ccu RST_BUS_HDMI0>;
 			reset-names = "phy";
@@ -1081,7 +1173,8 @@ r_intc: interrupt-controller@1f00c00 {
 		r_ccu: clock@1f01400 {
 			compatible = "allwinner,sun50i-a64-r-ccu";
 			reg = <0x01f01400 0x100>;
-			clocks = <&osc24M>, <&rtc 0>, <&rtc 2>, <&ccu 11>;
+			clocks = <&osc24M>, <&rtc 0>, <&rtc 2>,
+				 <&ccu CLK_PLL_PERIPH0>;
 			clock-names = "hosc", "losc", "iosc", "pll-periph";
 			#clock-cells = <1>;
 			#reset-cells = <1>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts
index c924090..076a0b9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts
@@ -1,8 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+// Copyright (C) 2018 Aleksandr Aleksandrov <aleksandr.aleksandrov@emlid.com>
+
 /*
  * DTS for Emlid Neutis N5 Dev board.
- *
- * Copyright (C) 2018 Aleksandr Aleksandrov <aleksandr.aleksandrov@emlid.com>
  */
 
 /dts-v1/;
@@ -15,14 +15,6 @@ / {
 		     "emlid,neutis-n5",
 		     "allwinner,sun50i-h5";
 
-	aliases {
-		serial0 = &uart0;
-	};
-
-	chosen {
-		stdout-path = "serial0:115200n8";
-	};
-
 	connector {
 		compatible = "hdmi-connector";
 		type = "a";
@@ -34,16 +26,6 @@ hdmi_con_in: endpoint {
 		};
 	};
 
-	reg_usb0_vbus: usb0-vbus {
-		compatible = "regulator-fixed";
-		regulator-name = "usb0-vbus";
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-		enable-active-high;
-		gpio = <&r_pio 0 9 GPIO_ACTIVE_HIGH>;   /* PL9 */
-		status = "okay";
-	};
-
 	vdd_cpux: gpio-regulator {
 		compatible = "regulator-gpio";
 		regulator-name = "vdd-cpux";
@@ -59,33 +41,11 @@ vdd_cpux: gpio-regulator {
 	};
 };
 
+&cpu0 {
+	cpu-supply = <&vdd_cpux>;
+};
+
 &codec {
-	allwinner,audio-routing =
-		"Line Out", "LINEOUT",
-		"LINEIN", "Line In",
-		"MIC1", "Mic",
-		"MIC2", "Mic",
-		"Mic",  "MBIAS";
-	status = "okay";
-};
-
-&de {
-	status = "okay";
-};
-
-&ehci0 {
-	status = "okay";
-};
-
-&ehci1 {
-	status = "okay";
-};
-
-&ehci2 {
-	status = "okay";
-};
-
-&ehci3 {
 	status = "okay";
 };
 
@@ -106,42 +66,6 @@ hdmi_out_con: endpoint {
 	};
 };
 
-&mmc0 {
-	vmmc-supply = <&reg_vcc3v3>;
-	bus-width = <4>;
-	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
-	status = "okay";
-};
-
-&ohci0 {
-	status = "okay";
-};
-
-&ohci1 {
-	status = "okay";
-};
-
-&ohci2 {
-	status = "okay";
-};
-
-&ohci3 {
-	status = "okay";
-};
-
-&uart0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_pa_pins>;
-	status = "okay";
-};
-
-&usb_otg {
-	dr_mode = "otg";
-	status = "okay";
-};
-
-&usbphy {
-	usb0_id_det-gpios = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
-	usb0_vbus-supply = <&reg_usb0_vbus>;
+&i2c1 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi
index 5bec574..fc57001 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi
@@ -1,73 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+// Copyright (C) 2018 Aleksandr Aleksandrov <aleksandr.aleksandrov@emlid.com>
+
 /*
  * DTSI for Emlid Neutis N5 SoM.
- *
- * Copyright (C) 2018 Aleksandr Aleksandrov <aleksandr.aleksandrov@emlid.com>
  */
 
 /dts-v1/;
 
 #include "sun50i-h5.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-
-/ {
-	reg_vcc3v3: vcc3v3 {
-		compatible = "regulator-fixed";
-		regulator-name = "vcc3v3";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-	};
-
-	wifi_pwrseq: wifi_pwrseq {
-		compatible = "mmc-pwrseq-simple";
-		reset-gpios = <&pio 2 7 GPIO_ACTIVE_LOW>; /* PC7 */
-		post-power-on-delay-ms = <200>;
-		clocks = <&rtc 1>;
-		clock-names = "ext_clock";
-	};
-};
-
-&mmc1 {
-	vmmc-supply = <&reg_vcc3v3>;
-	vqmmc-supply = <&reg_vcc3v3>;
-	mmc-pwrseq = <&wifi_pwrseq>;
-	bus-width = <4>;
-	non-removable;
-	status = "okay";
-
-	brcmf: wifi@1 {
-		reg = <1>;
-		compatible = "brcm,bcm4329-fmac";
-		interrupt-parent = <&r_pio>;
-		interrupts = <0 5 IRQ_TYPE_LEVEL_LOW>;	/* PL5 */
-		interrupt-names = "host-wake";
-	};
-};
-
-&mmc2 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&mmc2_8bit_pins>;
-	vmmc-supply = <&reg_vcc3v3>;
-	bus-width = <8>;
-	non-removable;
-	cap-mmc-hw-reset;
-	status = "okay";
-};
-
-&uart1 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
-	uart-has-rtscts;
-	status = "okay";
-
-	bluetooth {
-		compatible = "brcm,bcm43438-bt";
-		clocks = <&rtc 1>;
-		clock-names = "lpo";
-		vbat-supply = <&reg_vcc3v3>;
-		vddio-supply = <&reg_vcc3v3>;
-		shutdown-gpios = <&pio 2 4 GPIO_ACTIVE_HIGH>; /* PC4 */
-		device-wakeup-gpios = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; /* PL7 */
-	};
-};
+#include <arm/sunxi-h3-h5-emlid-neutis.dtsi>
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts
index d68bdfe..64d35da 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts
@@ -1,8 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-/*
- * Copyright (C) 2018 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- */
+// Copyright (C) 2018 BayLibre, SAS
+// Author: Neil Armstrong <narmstrong@baylibre.com>
 
 /dts-v1/;
 #include "sun50i-h5.dtsi"
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-it.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-it.dts
new file mode 100644
index 0000000..e59d68b
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-it.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2019 Chen-Yu Tsai <wens@csie.org>
+
+/dts-v1/;
+#include "sun50i-h5.dtsi"
+#include <arm/sunxi-libretech-all-h3-it.dtsi>
+
+/ {
+	model = "Libre Computer Board ALL-H3-IT H5";
+	compatible = "libretech,all-h3-it-h5", "allwinner,sun50i-h5";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts
new file mode 100644
index 0000000..df1b926
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2020 Chen-Yu Tsai <wens@csie.org>
+
+#include "sun50i-h5-libretech-all-h3-cc.dts"
+
+/ {
+	model = "Libre Computer Board ALL-H5-CC H5";
+	compatible = "libretech,all-h5-cc-h5", "allwinner,sun50i-h5";
+
+	aliases {
+		spi0 = &spi0;
+	};
+
+	reg_gmac_3v3: gmac-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "gmac-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <5000>;
+		enable-active-high;
+		gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
+		vin-supply = <&reg_vcc5v0>;
+	};
+};
+
+&codec {
+	/* No line out; only onboard microphone */
+	allwinner,audio-routing =
+		"MIC1", "Mic",
+		"Mic",  "MBIAS";
+};
+
+/* This board has external PHY */
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emac_rgmii_pins>;
+	phy-supply = <&reg_gmac_3v3>;
+	phy-handle = <&ext_rgmii_phy>;
+	phy-mode = "rgmii";
+	/delete-property/ allwinner,leds-active-low;
+	status = "okay";
+};
+
+&external_mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
+
+&spi0  {
+	status = "okay";
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <50000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
index 1c7dde8..4f9ba53 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
@@ -1,45 +1,6 @@
-/*
- * Copyright (C) 2017 Antony Antony <antony@phenome.org>
- * Copyright (C) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2017 Antony Antony <antony@phenome.org>
+// Copyright (C) 2016 ARM Ltd.
 
 /dts-v1/;
 #include "sun50i-h5.dtsi"
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
index 57a6f45..b059e20 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
@@ -1,44 +1,5 @@
-/*
- * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
 
 /dts-v1/;
 #include "sun50i-h5.dtsi"
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
index e126c1c..70b5f099 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -1,44 +1,5 @@
-/*
- * Copyright (C) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2016 ARM Ltd.
 
 /dts-v1/;
 #include "sun50i-h5.dtsi"
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
index d9b3ed2..cb44bfa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
@@ -1,47 +1,7 @@
-/*
- * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.xyz>
- *
- * Based on sun50i-h5-orangepi-pc2.dts, which is:
- *   Copyright (C) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+// Based on sun50i-h5-orangepi-pc2.dts, which is:
+//   Copyright (C) 2016 ARM Ltd.
 
 /dts-v1/;
 #include "sun50i-h5.dtsi"
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
index db6ea7b..ef5ca64 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
@@ -1,9 +1,6 @@
-/*
- * Copyright (C) 2016 ARM Ltd.
- * Copyright (C) 2018 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * SPDX-License-Identifier: (GPL-2.0+ OR X11)
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2016 ARM Ltd.
+// Copyright (C) 2018 Hauke Mehrtens <hauke@hauke-m.de>
 
 /dts-v1/;
 #include "sun50i-h5.dtsi"
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
index dacf613..c95a685 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
@@ -1,44 +1,5 @@
-/*
- * Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This library is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This library is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
 
 /dts-v1/;
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index e92c4de..9893aa6 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -1,44 +1,5 @@
-/*
- * Copyright (C) 2016 ARM Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2016 ARM Ltd.
 
 #include <arm/sunxi-h3-h5.dtsi>
 
@@ -54,21 +15,21 @@ cpu0: cpu@0 {
 			enable-method = "psci";
 		};
 
-		cpu@1 {
+		cpu1: cpu@1 {
 			compatible = "arm,cortex-a53";
 			device_type = "cpu";
 			reg = <1>;
 			enable-method = "psci";
 		};
 
-		cpu@2 {
+		cpu2: cpu@2 {
 			compatible = "arm,cortex-a53";
 			device_type = "cpu";
 			reg = <2>;
 			enable-method = "psci";
 		};
 
-		cpu@3 {
+		cpu3: cpu@3 {
 			compatible = "arm,cortex-a53";
 			device_type = "cpu";
 			reg = <3>;
@@ -76,6 +37,16 @@ cpu@3 {
 		};
 	};
 
+	pmu {
+		compatible = "arm,cortex-a53-pmu",
+			     "arm,armv8-pmuv3";
+		interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
 	psci {
 		compatible = "arm,psci-0.2";
 		method = "smc";
@@ -176,6 +147,32 @@ mali: gpu@1e80000 {
 			assigned-clocks = <&ccu CLK_GPU>;
 			assigned-clock-rates = <384000000>;
 		};
+
+		ths: thermal-sensor@1c25000 {
+			compatible = "allwinner,sun50i-h5-ths";
+			reg = <0x01c25000 0x400>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&ccu RST_BUS_THS>;
+			clocks = <&ccu CLK_BUS_THS>, <&ccu CLK_THS>;
+			clock-names = "bus", "mod";
+			nvmem-cells = <&ths_calibration>;
+			nvmem-cell-names = "calibration";
+			#thermal-sensor-cells = <1>;
+		};
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 0>;
+		};
+
+		gpu_thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 1>;
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index f335f74..df6d872 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -1,7 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
-/*
- * Copyright (C) 2019 Clément Péron <peron.clem@gmail.com>
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2019 Clément Péron <peron.clem@gmail.com>
 
 /dts-v1/;
 
@@ -76,6 +74,10 @@ &de {
 	status = "okay";
 };
 
+&dwc3 {
+	status = "okay";
+};
+
 &ehci0 {
 	status = "okay";
 };
@@ -292,3 +294,7 @@ &usb2phy {
 	usb0_vbus-supply = <&reg_vcc5v>;
 	status = "okay";
 };
+
+&usb3phy {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
index 4ed3fc2..c311eee 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
@@ -1,7 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
-/*
- * Copyright (C) 2019 Ondřej Jirman <megous@megous.com>
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2019 Ondřej Jirman <megous@megous.com>
 
 /dts-v1/;
 
@@ -274,6 +272,10 @@ sw {
 	};
 };
 
+&r_ir {
+	status = "okay";
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_ph_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
index e098a24..e7ca75c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
@@ -1,7 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
-/*
- * Copyright (C) 2018 Jagan Teki <jagan@openedev.com>
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2018 Jagan Teki <jagan@openedev.com>
 
 #include "sun50i-h6-orangepi.dtsi"
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
index 12e1756..83aab73 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
@@ -1,8 +1,6 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
-/*
- * Copyright (C) 2018 Amarula Solutions
- * Author: Jagan Teki <jagan@amarulasolutions.com>
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2018 Amarula Solutions
+// Author: Jagan Teki <jagan@amarulasolutions.com>
 
 #include "sun50i-h6-orangepi.dtsi"
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
index df4cbd7..37f4c57 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
@@ -1,8 +1,6 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
-/*
- * Copyright (C) 2018 Amarula Solutions
- * Author: Jagan Teki <jagan@amarulasolutions.com>
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2018 Amarula Solutions
+// Author: Jagan Teki <jagan@amarulasolutions.com>
 
 /dts-v1/;
 
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
new file mode 100644
index 0000000..f4c8966
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/*
+ * Copyright (C) 2019 Corentin LABBE <clabbe@baylibre.com>
+ */
+
+#include "sun50i-h6-pine-h64.dts"
+
+/ {
+	model = "Pine H64 model B";
+	compatible = "pine64,pine-h64-model-b", "allwinner,sun50i-h6";
+
+	/delete-node/ reg_gmac_3v3;
+};
+
+&hdmi_connector {
+	/delete-property/ ddc-en-gpios;
+};
+
+&emac {
+	phy-supply = <&reg_aldo2>;
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index 74899ed..d1c2aa5 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -1,7 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
-/*
- * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.io>
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.io>
 
 /dts-v1/;
 
@@ -10,7 +8,7 @@
 #include <dt-bindings/gpio/gpio.h>
 
 / {
-	model = "Pine H64";
+	model = "Pine H64 model A";
 	compatible = "pine64,pine-h64", "allwinner,sun50i-h6";
 
 	aliases {
@@ -22,9 +20,10 @@ chosen {
 		stdout-path = "serial0:115200n8";
 	};
 
-	connector {
+	hdmi_connector: connector {
 		compatible = "hdmi-connector";
 		type = "a";
+		ddc-en-gpios = <&pio 7 2 GPIO_ACTIVE_HIGH>; /* PH2 */
 
 		port {
 			hdmi_con_in: endpoint {
@@ -52,6 +51,16 @@ status {
 		};
 	};
 
+	reg_gmac_3v3: gmac-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc-gmac-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		gpio = <&pio 2 16 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
 	reg_usb_vbus: vbus {
 		compatible = "regulator-fixed";
 		regulator-name = "usb-vbus";
@@ -68,7 +77,7 @@ &emac {
 	pinctrl-0 = <&ext_rgmii_pins>;
 	phy-mode = "rgmii";
 	phy-handle = <&ext_rgmii_phy>;
-	phy-supply = <&reg_aldo2>;
+	phy-supply = <&reg_gmac_3v3>;
 	allwinner,rx-delay-ps = <200>;
 	allwinner,tx-delay-ps = <200>;
 	status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
index bccfe1e..83e6cb0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
@@ -1,7 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
-/*
- * Copyright (c) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
 
 /dts-v1/;
 
@@ -45,6 +43,10 @@ &de {
 	status = "okay";
 };
 
+&dwc3 {
+	status = "okay";
+};
+
 &ehci0 {
 	status = "okay";
 };
@@ -85,6 +87,7 @@ &ohci3 {
 };
 
 &r_ir {
+	linux,rc-map-name = "rc-tanix-tx5max";
 	status = "okay";
 };
 
@@ -102,3 +105,7 @@ &usb2otg {
 &usb2phy {
 	status = "okay";
 };
+
+&usb3phy {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 2982408..3329283 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -1,7 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
-/*
- * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
- */
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/sun50i-h6-ccu.h>
@@ -11,6 +9,7 @@
 #include <dt-bindings/reset/sun50i-h6-ccu.h>
 #include <dt-bindings/reset/sun50i-h6-r-ccu.h>
 #include <dt-bindings/reset/sun8i-de2.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	interrupt-parent = <&gic>;
@@ -70,6 +69,16 @@ ext_osc32k: ext_osc32k_clk {
 		clock-output-names = "ext_osc32k";
 	};
 
+	pmu {
+		compatible = "arm,cortex-a53-pmu",
+			     "arm,armv8-pmuv3";
+		interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
 	psci {
 		compatible = "arm,psci-0.2";
 		method = "smc";
@@ -233,6 +242,12 @@ dma: dma-controller@3002000 {
 		sid: efuse@3006000 {
 			compatible = "allwinner,sun50i-h6-sid";
 			reg = <0x03006000 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			ths_calibration: thermal-sensor-calibration@14 {
+				reg = <0x14 0x8>;
+			};
 		};
 
 		watchdog: watchdog@30090a0 {
@@ -245,6 +260,16 @@ watchdog: watchdog@30090a0 {
 			status = "disabled";
 		};
 
+		pwm: pwm@300a000 {
+			compatible = "allwinner,sun50i-h6-pwm";
+			reg = <0x0300a000 0x400>;
+			clocks = <&osc24M>, <&ccu CLK_BUS_PWM>;
+			clock-names = "mod", "bus";
+			resets = <&ccu RST_BUS_PWM>;
+			#pwm-cells = <3>;
+			status = "disabled";
+		};
+
 		pio: pinctrl@300b000 {
 			compatible = "allwinner,sun50i-h6-pinctrl";
 			reg = <0x0300b000 0x400>;
@@ -856,5 +881,31 @@ r_i2c: i2c@7081400 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 		};
+
+		ths: thermal-sensor@5070400 {
+			compatible = "allwinner,sun50i-h6-ths";
+			reg = <0x05070400 0x100>;
+			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_THS>;
+			clock-names = "bus";
+			resets = <&ccu RST_BUS_THS>;
+			nvmem-cells = <&ths_calibration>;
+			nvmem-cell-names = "calibration";
+			#thermal-sensor-cells = <1>;
+		};
+	};
+
+	thermal-zones {
+		cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 0>;
+		};
+
+		gpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&ths 1>;
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/altera/Makefile b/arch/arm64/boot/dts/altera/Makefile
index 27bb925..10119c7 100644
--- a/arch/arm64/boot/dts/altera/Makefile
+++ b/arch/arm64/boot/dts/altera/Makefile
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
-dtb-$(CONFIG_ARCH_STRATIX10) += socfpga_stratix10_socdk.dtb
+dtb-$(CONFIG_ARCH_STRATIX10) += socfpga_stratix10_socdk.dtb \
+				socfpga_stratix10_socdk_nand.dtb
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
new file mode 100644
index 0000000..9946515
--- /dev/null
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Altera Corporation (C) 2015. All rights reserved.
+ */
+
+#include "socfpga_stratix10.dtsi"
+
+/ {
+	model = "SoCFPGA Stratix 10 SoCDK";
+
+	aliases {
+		serial0 = &uart0;
+		ethernet0 = &gmac0;
+		ethernet1 = &gmac1;
+		ethernet2 = &gmac2;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		hps0 {
+			label = "hps_led0";
+			gpios = <&portb 20 GPIO_ACTIVE_HIGH>;
+		};
+
+		hps1 {
+			label = "hps_led1";
+			gpios = <&portb 19 GPIO_ACTIVE_HIGH>;
+		};
+
+		hps2 {
+			label = "hps_led2";
+			gpios = <&portb 21 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		/* We expect the bootloader to fill in the reg */
+		reg = <0 0 0 0>;
+	};
+
+	ref_033v: 033-v-ref {
+		compatible = "regulator-fixed";
+		regulator-name = "0.33V";
+		regulator-min-microvolt = <330000>;
+		regulator-max-microvolt = <330000>;
+	};
+
+	soc {
+		clocks {
+			osc1 {
+				clock-frequency = <25000000>;
+			};
+		};
+
+		eccmgr {
+			sdmmca-ecc@ff8c8c00 {
+				compatible = "altr,socfpga-s10-sdmmc-ecc",
+					     "altr,socfpga-sdmmc-ecc";
+				reg = <0xff8c8c00 0x100>;
+				altr,ecc-parent = <&mmc>;
+				interrupts = <14 4>,
+					     <15 4>;
+			};
+		};
+	};
+};
+
+&gpio1 {
+	status = "okay";
+};
+
+&gmac2 {
+	status = "okay";
+	phy-mode = "rgmii";
+	phy-handle = <&phy0>;
+
+	max-frame-size = <9000>;
+
+	mdio0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "snps,dwmac-mdio";
+		phy0: ethernet-phy@0 {
+			reg = <4>;
+
+			txd0-skew-ps = <0>; /* -420ps */
+			txd1-skew-ps = <0>; /* -420ps */
+			txd2-skew-ps = <0>; /* -420ps */
+			txd3-skew-ps = <0>; /* -420ps */
+			rxd0-skew-ps = <420>; /* 0ps */
+			rxd1-skew-ps = <420>; /* 0ps */
+			rxd2-skew-ps = <420>; /* 0ps */
+			rxd3-skew-ps = <420>; /* 0ps */
+			txen-skew-ps = <0>; /* -420ps */
+			txc-skew-ps = <900>; /* 0ps */
+			rxdv-skew-ps = <420>; /* 0ps */
+			rxc-skew-ps = <1680>; /* 780ps */
+		};
+	};
+};
+
+&nand {
+	status = "okay";
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0>;
+		nand-bus-width = <16>;
+
+		partition@0 {
+			label = "u-boot";
+			reg = <0 0x200000>;
+		};
+
+		partition@200000 {
+			label = "env";
+			reg = <0x200000 0x40000>;
+		};
+
+		partition@240000 {
+			label = "dtb";
+			reg = <0x240000 0x40000>;
+		};
+
+		partition@280000 {
+			label = "kernel";
+			reg = <0x280000 0x2000000>;
+		};
+
+		partition@2280000 {
+			label = "misc";
+			reg = <0x2280000 0x2000000>;
+		};
+
+		partition@4280000 {
+			label = "rootfs";
+			reg = <0x4280000 0x3bd80000>;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+	disable-over-current;
+};
+
+&watchdog0 {
+	status = "okay";
+};
+
+&i2c2 {
+	status = "okay";
+	clock-frequency = <100000>;
+	i2c-sda-falling-time-ns = <890>;  /* hcnt */
+	i2c-sdl-falling-time-ns = <890>;  /* lcnt */
+
+	adc@14 {
+		compatible = "lltc,ltc2497";
+		reg = <0x14>;
+		vref-supply = <&ref_033v>;
+	};
+
+	temp@4c {
+		compatible = "maxim,max1619";
+		reg = <0x4c>;
+	};
+
+	eeprom@51 {
+		compatible = "atmel,24c32";
+		reg = <0x51>;
+		pagesize = <32>;
+	};
+
+	rtc@68 {
+		compatible = "dallas,ds1339";
+		reg = <0x68>;
+	};
+};
+
+&qspi {
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q00a";
+		reg = <0>;
+		spi-max-frequency = <100000000>;
+
+		m25p,fast-read;
+		cdns,page-size = <256>;
+		cdns,block-size = <16>;
+		cdns,read-delay = <1>;
+		cdns,tshsl-ns = <50>;
+		cdns,tsd2d-ns = <50>;
+		cdns,tchsh-ns = <4>;
+		cdns,tslch-ns = <4>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qspi_boot: partition@0 {
+				label = "Boot and fpga data";
+				reg = <0x0 0x034B0000>;
+			};
+
+			qspi_rootfs: partition@4000000 {
+				label = "Root Filesystem - JFFS2";
+				reg = <0x034B0000 0x0EB50000>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 6340053..eef0045 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -7,6 +7,7 @@
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-s922x-khadas-vim3.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-odroid-n2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-ugoos-am6.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-kii-pro.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nanopi-k2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nexbox-a95x.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-odroidc2.dtb
@@ -29,11 +30,13 @@
 dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s805x-p241.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905w-p281.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905w-tx3-mini.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-libretech-pc.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-khadas-vim2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-rbox-pro.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-libretech-pc.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-vega-s96.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-sei610.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
index 7210ad0..4dec518 100644
--- a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
@@ -5,6 +5,7 @@
 
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/meson-a1-gpio.h>
 
 / {
 	compatible = "amlogic,a1";
@@ -74,6 +75,30 @@ apb: bus@fe000000 {
 			#size-cells = <2>;
 			ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x1000000>;
 
+
+			reset: reset-controller@0 {
+				compatible = "amlogic,meson-a1-reset";
+				reg = <0x0 0x0 0x0 0x8c>;
+				#reset-cells = <1>;
+			};
+
+			periphs_pinctrl: pinctrl@0400 {
+				compatible = "amlogic,meson-a1-periphs-pinctrl";
+				#address-cells = <2>;
+				#size-cells = <2>;
+				ranges;
+
+				gpio: bank@0400 {
+					reg = <0x0 0x0400 0x0 0x003c>,
+					      <0x0 0x0480 0x0 0x0118>;
+					reg-names = "mux", "gpio";
+					gpio-controller;
+					#gpio-cells = <2>;
+					gpio-ranges = <&periphs_pinctrl 0 0 62>;
+				};
+
+			};
+
 			uart_AO: serial@1c00 {
 				compatible = "amlogic,meson-gx-uart",
 					     "amlogic,meson-ao-uart";
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index 04803c3..aace3d3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -1169,6 +1169,7 @@ toddr_a: audio-controller@100 {
 				interrupts = <GIC_SPI 84 IRQ_TYPE_EDGE_RISING>;
 				clocks = <&clkc_audio AUD_CLKID_TODDR_A>;
 				resets = <&arb AXG_ARB_TODDR_A>;
+				amlogic,fifo-depth = <512>;
 				status = "disabled";
 			};
 
@@ -1180,6 +1181,7 @@ toddr_b: audio-controller@140 {
 				interrupts = <GIC_SPI 85 IRQ_TYPE_EDGE_RISING>;
 				clocks = <&clkc_audio AUD_CLKID_TODDR_B>;
 				resets = <&arb AXG_ARB_TODDR_B>;
+				amlogic,fifo-depth = <256>;
 				status = "disabled";
 			};
 
@@ -1191,6 +1193,7 @@ toddr_c: audio-controller@180 {
 				interrupts = <GIC_SPI 86 IRQ_TYPE_EDGE_RISING>;
 				clocks = <&clkc_audio AUD_CLKID_TODDR_C>;
 				resets = <&arb AXG_ARB_TODDR_C>;
+				amlogic,fifo-depth = <256>;
 				status = "disabled";
 			};
 
@@ -1202,6 +1205,7 @@ frddr_a: audio-controller@1c0 {
 				interrupts = <GIC_SPI 88 IRQ_TYPE_EDGE_RISING>;
 				clocks = <&clkc_audio AUD_CLKID_FRDDR_A>;
 				resets = <&arb AXG_ARB_FRDDR_A>;
+				amlogic,fifo-depth = <512>;
 				status = "disabled";
 			};
 
@@ -1213,6 +1217,7 @@ frddr_b: audio-controller@200 {
 				interrupts = <GIC_SPI 89 IRQ_TYPE_EDGE_RISING>;
 				clocks = <&clkc_audio AUD_CLKID_FRDDR_B>;
 				resets = <&arb AXG_ARB_FRDDR_B>;
+				amlogic,fifo-depth = <256>;
 				status = "disabled";
 			};
 
@@ -1224,6 +1229,7 @@ frddr_c: audio-controller@240 {
 				interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
 				clocks = <&clkc_audio AUD_CLKID_FRDDR_C>;
 				resets = <&arb AXG_ARB_FRDDR_C>;
+				amlogic,fifo-depth = <256>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 7fabc8d..abe04f4 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -1968,6 +1968,29 @@ saradc: adc@9000 {
 			};
 		};
 
+		vdec: video-decoder@ff620000 {
+			compatible = "amlogic,g12a-vdec";
+			reg = <0x0 0xff620000 0x0 0x10000>,
+			      <0x0 0xffd0e180 0x0 0xe4>;
+			reg-names = "dos", "esparser";
+			interrupts = <GIC_SPI 44 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "vdec", "esparser";
+
+			amlogic,ao-sysctrl = <&rti>;
+			amlogic,canvas = <&canvas>;
+
+			clocks = <&clkc CLKID_PARSER>,
+				 <&clkc CLKID_DOS>,
+				 <&clkc CLKID_VDEC_1>,
+				 <&clkc CLKID_VDEC_HEVC>,
+				 <&clkc CLKID_VDEC_HEVCF>;
+			clock-names = "dos_parser", "dos", "vdec_1",
+				      "vdec_hevc", "vdec_hevcf";
+			resets = <&reset RESET_PARSER>;
+			reset-names = "esparser";
+		};
+
 		vpu: vpu@ff900000 {
 			compatible = "amlogic,meson-g12a-vpu";
 			reg = <0x0 0xff900000 0x0 0x100000>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
index b3ba2fd..03054c4 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
@@ -106,6 +106,7 @@ toddr_a: audio-controller@100 {
 			resets = <&arb AXG_ARB_TODDR_A>,
 				 <&clkc_audio AUD_RESET_TODDR_A>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <512>;
 			status = "disabled";
 		};
 
@@ -120,6 +121,7 @@ toddr_b: audio-controller@140 {
 			resets = <&arb AXG_ARB_TODDR_B>,
 				 <&clkc_audio AUD_RESET_TODDR_B>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
@@ -134,6 +136,7 @@ toddr_c: audio-controller@180 {
 			resets = <&arb AXG_ARB_TODDR_C>,
 				 <&clkc_audio AUD_RESET_TODDR_C>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
@@ -148,6 +151,7 @@ frddr_a: audio-controller@1c0 {
 			resets = <&arb AXG_ARB_FRDDR_A>,
 				 <&clkc_audio AUD_RESET_FRDDR_A>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <512>;
 			status = "disabled";
 		};
 
@@ -162,6 +166,7 @@ frddr_b: audio-controller@200 {
 			resets = <&arb AXG_ARB_FRDDR_B>,
 				 <&clkc_audio AUD_RESET_FRDDR_B>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
@@ -176,6 +181,7 @@ frddr_c: audio-controller@240 {
 			resets = <&arb AXG_ARB_FRDDR_C>,
 				 <&clkc_audio AUD_RESET_FRDDR_C>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
new file mode 100644
index 0000000..248b018
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+/* Libretech Amlogic GX PC form factor - AKA: Tartiflette */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+	adc-keys {
+		compatible = "adc-keys";
+		io-channels = <&saradc 0>;
+		io-channel-names = "buttons";
+		keyup-threshold-microvolt = <1800000>;
+
+		update-button {
+			label = "update";
+			linux,code = <KEY_VENDOR>;
+			press-threshold-microvolt = <1300000>;
+		};
+	};
+
+	aliases {
+		serial0 = &uart_AO;
+		ethernet0 = &ethmac;
+		spi0 = &spifc;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	cvbs-connector {
+		compatible = "composite-video-connector";
+		status = "disabled";
+
+		port {
+			cvbs_connector_in: endpoint {
+				remote-endpoint = <&cvbs_vdac_out>;
+			};
+		};
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+
+	hdmi-connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&hdmi_tx_tmds_out>;
+			};
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		poll-interval = <100>;
+
+		power-button {
+			label = "power";
+			linux,code = <KEY_POWER>;
+			gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	ao_5v: regulator-ao_5v {
+		compatible = "regulator-fixed";
+		regulator-name = "AO_5V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&dc_in>;
+		regulator-always-on;
+	};
+
+	dc_in: regulator-dc_in {
+		compatible = "regulator-fixed";
+		regulator-name = "DC_IN";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		green {
+			color = <LED_COLOR_ID_GREEN>;
+			function = LED_FUNCTION_DISK_ACTIVITY;
+			gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "disk-activity";
+		};
+
+		blue {
+			color = <LED_COLOR_ID_BLUE>;
+			function = LED_FUNCTION_STATUS;
+			gpios = <&gpio GPIODV_28 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+			panic-indicator;
+		};
+	};
+
+	vcc_card: regulator-vcc_card {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_CARD";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vddio_ao3v3>;
+
+		gpio = <&gpio GPIODV_4 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vcc5v: regulator-vcc5v {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC5V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&ao_5v>;
+
+		gpio = <&gpio GPIOH_3 GPIO_OPEN_DRAIN>;
+	};
+
+	vddio_ao18: regulator-vddio_ao18 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_AO18";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&ao_5v>;
+		regulator-always-on;
+	};
+
+	vddio_ao3v3: regulator-vddio_ao3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_AO3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&ao_5v>;
+		regulator-always-on;
+	};
+
+	vddio_boot: regulator-vddio_boot {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_BOOT";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vddio_ao3v3>;
+		regulator-always-on;
+	};
+
+	vddio_card: regulator-vddio-card {
+		compatible = "regulator-gpio";
+		regulator-name = "VDDIO_CARD";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio GPIODV_5 GPIO_ACTIVE_HIGH>;
+		gpios-states = <0>;
+
+		states = <3300000 0>,
+			 <1800000 1>;
+
+		regulator-settling-time-up-us = <200>;
+		regulator-settling-time-down-us = <50000>;
+	};
+};
+
+&cec_AO {
+	pinctrl-0 = <&ao_cec_pins>;
+	pinctrl-names = "default";
+	hdmi-phandle = <&hdmi_tx>;
+	status = "okay";
+};
+
+&cvbs_vdac_port {
+	cvbs_vdac_out: endpoint {
+		remote-endpoint = <&cvbs_connector_in>;
+	};
+};
+
+&ethmac {
+	pinctrl-0 = <&eth_pins>, <&eth_phy_irq_pins>;
+	pinctrl-names = "default";
+	phy-handle = <&external_phy>;
+	amlogic,tx-delay-ns = <2>;
+	phy-mode = "rgmii";
+	status = "okay";
+};
+
+&external_mdio {
+	external_phy: ethernet-phy@0 {
+		reg = <0>;
+		max-speed = <1000>;
+		reset-assert-us = <10000>;
+		reset-deassert-us = <30000>;
+		reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
+		interrupt-parent = <&gpio_intc>;
+		interrupts = <25 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+&pinctrl_periphs {
+	/*
+	 * Make sure the reset pin of the usb HUB is driven high to take
+	 * it out of reset.
+	 */
+	usb1_rst_pins: usb1_rst_irq {
+		mux {
+			groups = "GPIODV_3";
+			function = "gpio_periphs";
+			bias-disable;
+			output-high;
+		};
+	};
+
+	/* Make sure the phy irq pin is properly configured as input */
+	eth_phy_irq_pins: eth_phy_irq {
+		mux {
+			groups = "GPIOZ_15";
+			function = "gpio_periphs";
+			bias-disable;
+			output-disable;
+		};
+	};
+};
+
+&hdmi_tx {
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+	hdmi-supply = <&vcc5v>;
+	status = "okay";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
+&ir {
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&i2c_C {
+	pinctrl-0 = <&i2c_c_dv18_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	rtc: rtc@51 {
+		reg = <0x51>;
+		compatible = "nxp,pcf8563";
+		#clock-cells = <0>;
+		clock-output-names = "rtc_clkout";
+	};
+};
+
+&pwm_AO_ab {
+	pinctrl-0 = <&pwm_ao_a_3_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+	status = "okay";
+};
+
+&pwm_ab {
+	pinctrl-0 = <&pwm_b_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+	status = "okay";
+};
+
+&pwm_ef {
+	pinctrl-0 = <&pwm_e_pins>, <&pwm_f_clk_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+	status = "okay";
+};
+
+&saradc {
+	vref-supply = <&vddio_ao18>;
+	status = "okay";
+};
+
+/* SD card */
+&sd_emmc_b {
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-1 = <&sdcard_clk_gate_pins>;
+	pinctrl-names = "default", "clk-gate";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	sd-uhs-sdr12;
+	sd-uhs-sdr25;
+	sd-uhs-sdr50;
+	sd-uhs-ddr50;
+	max-frequency = <200000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
+
+	vmmc-supply = <&vcc_card>;
+	vqmmc-supply = <&vddio_card>;
+
+	status = "okay";
+};
+
+/* eMMC */
+&sd_emmc_c {
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-1 = <&emmc_clk_gate_pins>;
+	pinctrl-names = "default", "clk-gate";
+
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+	max-frequency = <200000000>;
+	disable-wp;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vddio_ao3v3>;
+	vqmmc-supply = <&vddio_boot>;
+
+	status = "okay";
+};
+
+&spifc {
+	pinctrl-0 = <&nor_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	gd25lq128: spi-flash@0 {
+		compatible = "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0>;
+		spi-max-frequency = <12000000>;
+	};
+};
+
+&uart_AO {
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb2_phy0 {
+	pinctrl-0 = <&usb1_rst_pins>;
+	pinctrl-names = "default";
+	phy-supply = <&vcc5v>;
+};
+
+&usb2_phy1 {
+	phy-supply = <&vcc5v>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
new file mode 100644
index 0000000..2f1f829
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Mohammad Rasim <mohammad.rasim96@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxbb-p20x.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+/ {
+	compatible = "videostrong,kii-pro", "amlogic,p201", "amlogic,s905", "amlogic,meson-gxbb";
+	model = "Videostrong KII Pro";
+
+	leds {
+		compatible = "gpio-leds";
+		status {
+			gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+			color = <LED_COLOR_ID_RED>;
+			function = LED_FUNCTION_STATUS;
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		poll-interval = <20>;
+
+		button-reset {
+			label = "reset";
+			linux,code = <KEY_POWER>;
+			gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+};
+
+
+
+&uart_A {
+	status = "okay";
+	pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+	pinctrl-names = "default";
+	uart-has-rtscts;
+
+	bluetooth {
+		compatible = "brcm,bcm4335a0";
+	};
+};
+
+
+
+&ethmac {
+	status = "okay";
+	pinctrl-0 = <&eth_rmii_pins>;
+	pinctrl-names = "default";
+
+	phy-handle = <&eth_phy0>;
+	phy-mode = "rmii";
+
+	mdio {
+		compatible = "snps,dwmac-mdio";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		eth_phy0: ethernet-phy@0 {
+			/* IC Plus IP101GR (0x02430c54) */
+			reg = <0>;
+			reset-assert-us = <10000>;
+			reset-deassert-us = <10000>;
+			reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-libretech-pc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-libretech-pc.dts
new file mode 100644
index 0000000..100a1cf
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-libretech-pc.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre SAS. All rights reserved.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905d.dtsi"
+#include "meson-gx-libretech-pc.dtsi"
+
+/ {
+	compatible = "libretech,aml-s905d-pc", "amlogic,s905d",
+		     "amlogic,meson-gxl";
+	model = "Libre Computer AML-S905D-PC";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index ed33d8e..259d863 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -533,6 +533,15 @@ mux {
 			};
 		};
 
+		i2c_c_dv18_pins: i2c_c_dv18 {
+			mux {
+				groups = "i2c_sck_c_dv19",
+				      "i2c_sda_c_dv18";
+				function = "i2c_c";
+				bias-disable;
+			};
+		};
+
 		eth_pins: eth_c {
 			mux {
 				groups = "eth_mdio",
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-libretech-pc.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-s912-libretech-pc.dts
new file mode 100644
index 0000000..444c249
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-s912-libretech-pc.dts
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre SAS. All rights reserved.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxm.dtsi"
+#include "meson-gx-libretech-pc.dtsi"
+
+/ {
+	compatible = "libretech,aml-s912-pc", "amlogic,s912",
+		     "amlogic,meson-gxm";
+	model = "Libre Computer AML-S912-PC";
+
+	typec2_vbus: regulator-typec2_vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "TYPEC2_VBUS";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&vcc5v>;
+
+		gpio = <&gpio GPIODV_1 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&pinctrl_periphs {
+	/*
+	 * Make sure the irq pin of the TYPE C controller is not driven
+	 * by the SoC.
+	 */
+	fusb302_irq_pins: fusb302_irq {
+		mux {
+			groups = "GPIODV_0";
+			function = "gpio_periphs";
+			bias-pull-up;
+			output-disable;
+		};
+	};
+};
+
+&i2c_C {
+	fusb302@22 {
+		compatible = "fcs,fusb302";
+		reg = <0x22>;
+
+		pinctrl-0 = <&fusb302_irq_pins>;
+		pinctrl-names = "default";
+		interrupt-parent = <&gpio_intc>;
+		interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+
+		vbus-supply = <&typec2_vbus>;
+
+		status = "okay";
+	};
+};
+
+&usb2_phy2 {
+	phy-supply = <&typec2_vbus>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index 7894a54..d847a3f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -201,6 +201,7 @@ toddr_a: audio-controller@100 {
 			resets = <&arb AXG_ARB_TODDR_A>,
 				 <&clkc_audio AUD_RESET_TODDR_A>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <8192>;
 			status = "disabled";
 		};
 
@@ -215,6 +216,7 @@ toddr_b: audio-controller@140 {
 			resets = <&arb AXG_ARB_TODDR_B>,
 				 <&clkc_audio AUD_RESET_TODDR_B>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
@@ -229,6 +231,7 @@ toddr_c: audio-controller@180 {
 			resets = <&arb AXG_ARB_TODDR_C>,
 				 <&clkc_audio AUD_RESET_TODDR_C>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
@@ -243,6 +246,7 @@ frddr_a: audio-controller@1c0 {
 			resets = <&arb AXG_ARB_FRDDR_A>,
 				 <&clkc_audio AUD_RESET_FRDDR_A>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <512>;
 			status = "disabled";
 		};
 
@@ -257,6 +261,7 @@ frddr_b: audio-controller@200 {
 			resets = <&arb AXG_ARB_FRDDR_B>,
 				 <&clkc_audio AUD_RESET_FRDDR_B>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
@@ -271,6 +276,7 @@ frddr_c: audio-controller@240 {
 			resets = <&arb AXG_ARB_FRDDR_C>,
 				 <&clkc_audio AUD_RESET_FRDDR_C>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
@@ -412,6 +418,7 @@ toddr_d: audio-controller@840 {
 			resets = <&arb AXG_ARB_TODDR_D>,
 				 <&clkc_audio AUD_RESET_TODDR_D>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 
@@ -426,6 +433,7 @@ frddr_d: audio-controller@880 {
 			resets = <&arb AXG_ARB_FRDDR_D>,
 				 <&clkc_audio AUD_RESET_FRDDR_D>;
 			reset-names = "arb", "rst";
+			amlogic,fifo-depth = <256>;
 			status = "disabled";
 		};
 	};
@@ -482,6 +490,10 @@ &simplefb_hdmi {
 	power-domains = <&pwrc PWRC_SM1_VPU_ID>;
 };
 
+&vdec {
+	compatible = "amlogic,sm1-vdec";
+};
+
 &vpu {
 	power-domains = <&pwrc PWRC_SM1_VPU_ID>;
 };
diff --git a/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts b/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts
index 3e8c707..7a2c7f9 100644
--- a/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts
+++ b/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts
@@ -49,12 +49,6 @@ memory@0 {
 		reg = <0x1 0x00000000 0x0 0x40000000>; // 1GB
 	};
 
-	uart_clk: uart-clk {
-		compatible = "fixed-clock";
-		clock-frequency = <500000000>;
-		#clock-cells = <0>;
-	};
-
 	soc {
 		gpio0: gpio@50027000 {
 			porta: gpio-controller@0 {
@@ -173,21 +167,18 @@ pinmux {
 
 &uart0 {
 	status = "okay";
-	clocks = <&uart_clk>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_uart0_default>;
 };
 
 &uart1 {
 	status = "okay";
-	clocks = <&uart_clk>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_uart1_default>;
 };
 
 &uart2 {
 	status = "okay";
-	clocks = <&uart_clk>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_uart2_default>;
 };
diff --git a/arch/arm64/boot/dts/bitmain/bm1880.dtsi b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
index d65453f..fa6e690 100644
--- a/arch/arm64/boot/dts/bitmain/bm1880.dtsi
+++ b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
@@ -4,6 +4,7 @@
  * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
  */
 
+#include <dt-bindings/clock/bm1880-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/reset/bitmain,bm1880-reset.h>
 
@@ -66,6 +67,12 @@ timer {
 			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
 	};
 
+	osc: osc {
+		compatible = "fixed-clock";
+		clock-frequency = <25000000>;
+		#clock-cells = <0>;
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -94,6 +101,15 @@ pinctrl: pinctrl@400 {
 				reg = <0x400 0x120>;
 			};
 
+			clk: clock-controller@e8 {
+				compatible = "bitmain,bm1880-clk";
+				reg = <0xe8 0x0c>, <0x800 0xb0>;
+				reg-names = "pll", "sys";
+				clocks = <&osc>;
+				clock-names = "osc";
+				#clock-cells = <1>;
+			};
+
 			rst: reset-controller@c00 {
 				compatible = "bitmain,bm1880-reset";
 				reg = <0xc00 0x8>;
@@ -158,6 +174,9 @@ portc: gpio-controller@0 {
 		uart0: serial@58018000 {
 			compatible = "snps,dw-apb-uart";
 			reg = <0x0 0x58018000 0x0 0x2000>;
+			clocks = <&clk BM1880_CLK_UART_500M>,
+				 <&clk BM1880_CLK_APB_UART>;
+			clock-names = "baudclk", "apb_pclk";
 			interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
@@ -168,6 +187,9 @@ uart0: serial@58018000 {
 		uart1: serial@5801A000 {
 			compatible = "snps,dw-apb-uart";
 			reg = <0x0 0x5801a000 0x0 0x2000>;
+			clocks = <&clk BM1880_CLK_UART_500M>,
+				 <&clk BM1880_CLK_APB_UART>;
+			clock-names = "baudclk", "apb_pclk";
 			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
@@ -178,6 +200,9 @@ uart1: serial@5801A000 {
 		uart2: serial@5801C000 {
 			compatible = "snps,dw-apb-uart";
 			reg = <0x0 0x5801c000 0x0 0x2000>;
+			clocks = <&clk BM1880_CLK_UART_500M>,
+				 <&clk BM1880_CLK_APB_UART>;
+			clock-names = "baudclk", "apb_pclk";
 			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
@@ -188,6 +213,9 @@ uart2: serial@5801C000 {
 		uart3: serial@5801E000 {
 			compatible = "snps,dw-apb-uart";
 			reg = <0x0 0x5801e000 0x0 0x2000>;
+			clocks = <&clk BM1880_CLK_UART_500M>,
+				 <&clk BM1880_CLK_APB_UART>;
+			clock-names = "baudclk", "apb_pclk";
 			interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index 6f90b0e..250fc01 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG Exynos5433 TM2 board device tree source
+ * Samsung Exynos5433 TM2 board device tree source
  *
  * Copyright (c) 2016 Samsung Electronics Co., Ltd.
  *
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
index dda5d27..fdd0796 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG Exynos5433 TM2 board device tree source
+ * Samsung Exynos5433 TM2 board device tree source
  *
  * Copyright (c) 2016 Samsung Electronics Co., Ltd.
  *
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts
index 1e207ce8..089fc7a 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG Exynos5433 TM2E board device tree source
+ * Samsung Exynos5433 TM2E board device tree source
  *
  * Copyright (c) 2016 Samsung Electronics Co., Ltd.
  *
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
index 080e0f5..7af288f 100644
--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG Exynos7 Espresso board device tree source
+ * Samsung Exynos7 Espresso board device tree source
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
@@ -13,7 +13,7 @@
 #include <dt-bindings/gpio/gpio.h>
 
 / {
-	model = "Samsung Exynos7 Espresso board based on EXYNOS7";
+	model = "Samsung Exynos7 Espresso board based on Exynos7";
 	compatible = "samsung,exynos7-espresso", "samsung,exynos7";
 
 	aliases {
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index 3a00ef0..5558045 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * SAMSUNG EXYNOS7 SoC device tree source
+ * Samsung Exynos7 SoC device tree source
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 38e344a..bac293e 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -18,6 +18,8 @@
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-clearfog-cx.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
 
@@ -28,7 +30,9 @@
 dtb-$(CONFIG_ARCH_MXC) += imx8mq-hummingboard-pulse.dtb
 dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-devkit.dtb
 dtb-$(CONFIG_ARCH_MXC) += imx8mq-nitrogen.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mq-phanbell.dtb
 dtb-$(CONFIG_ARCH_MXC) += imx8mq-pico-pi.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mq-thor96.dtb
 dtb-$(CONFIG_ARCH_MXC) += imx8mq-zii-ultra-rmb3.dtb
 dtb-$(CONFIG_ARCH_MXC) += imx8mq-zii-ultra-zest.dtb
 dtb-$(CONFIG_ARCH_MXC) += imx8qxp-ai_ml.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
index 5b9d4b3..ca409d9 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
@@ -123,6 +123,21 @@ &esdhc1 {
 	status = "okay";
 };
 
+&fspi {
+	status = "okay";
+
+	mt35xu02g0: flash@0 {
+		compatible = "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		/* The following setting enables 1-1-8 (CMD-ADDR-DATA) mode */
+		spi-rx-bus-width = <8>; /* 8 SPI Rx lines */
+		spi-tx-bus-width = <1>; /* 1 SPI Tx line */
+		reg = <0>;
+	};
+};
+
 &i2c0 {
 	status = "okay";
 
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index 9720a19..afb5565 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -93,9 +93,26 @@ &esdhc {
 
 &esdhc1 {
 	mmc-hs200-1_8v;
+	mmc-hs400-1_8v;
+	bus-width = <8>;
 	status = "okay";
 };
 
+&fspi {
+	status = "okay";
+
+	mt35xu02g0: flash@0 {
+		compatible = "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		/* The following setting enables 1-1-8 (CMD-ADDR-DATA) mode */
+		spi-rx-bus-width = <8>; /* 8 SPI Rx lines */
+		spi-tx-bus-width = <1>; /* 1 SPI Tx line */
+		reg = <0>;
+	};
+};
+
 &i2c0 {
 	status = "okay";
 
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index a6f9b77..0bf375e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -277,6 +277,19 @@ i2c7: i2c@2070000 {
 			status = "disabled";
 		};
 
+		fspi: spi@20c0000 {
+			compatible = "nxp,lx2160a-fspi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x20c0000 0x0 0x10000>,
+			      <0x0 0x20000000 0x0 0x10000000>;
+			reg-names = "fspi_base", "fspi_mmap";
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+			clock-names = "fspi_en", "fspi";
+			status = "disabled";
+		};
+
 		esdhc: mmc@2140000 {
 			compatible = "fsl,ls1028a-esdhc", "fsl,esdhc";
 			reg = <0x0 0x2140000 0x0 0x10000>;
@@ -322,7 +335,7 @@ duart1: serial@21c0600 {
 
 		edma0: dma-controller@22c0000 {
 			#dma-cells = <2>;
-			compatible = "fsl,vf610-edma";
+			compatible = "fsl,ls1028a-edma";
 			reg = <0x0 0x22c0000 0x0 0x10000>,
 			      <0x0 0x22d0000 0x0 0x10000>,
 			      <0x0 0x22e0000 0x0 0x10000>;
@@ -534,6 +547,7 @@ sai1: audio-controller@f100000 {
 			dma-names = "tx", "rx";
 			dmas = <&edma0 1 4>,
 			       <&edma0 1 3>;
+			fsl,sai-asynchronous;
 			status = "disabled";
 		};
 
@@ -548,6 +562,22 @@ sai2: audio-controller@f110000 {
 			dma-names = "tx", "rx";
 			dmas = <&edma0 1 6>,
 			       <&edma0 1 5>;
+			fsl,sai-asynchronous;
+			status = "disabled";
+		};
+
+		sai3: audio-controller@f120000 {
+			#sound-dai-cells = <0>;
+			compatible = "fsl,vf610-sai";
+			reg = <0x0 0xf120000 0x0 0x10000>;
+			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>,
+				 <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "bus", "mclk1", "mclk2", "mclk3";
+			dma-names = "tx", "rx";
+			dmas = <&edma0 1 8>,
+			       <&edma0 1 7>;
+			fsl,sai-asynchronous;
 			status = "disabled";
 		};
 
@@ -562,6 +592,37 @@ sai4: audio-controller@f130000 {
 			dma-names = "tx", "rx";
 			dmas = <&edma0 1 10>,
 			       <&edma0 1 9>;
+			fsl,sai-asynchronous;
+			status = "disabled";
+		};
+
+		sai5: audio-controller@f140000 {
+			#sound-dai-cells = <0>;
+			compatible = "fsl,vf610-sai";
+			reg = <0x0 0xf140000 0x0 0x10000>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>,
+				 <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "bus", "mclk1", "mclk2", "mclk3";
+			dma-names = "tx", "rx";
+			dmas = <&edma0 1 12>,
+			       <&edma0 1 11>;
+			fsl,sai-asynchronous;
+			status = "disabled";
+		};
+
+		sai6: audio-controller@f150000 {
+			#sound-dai-cells = <0>;
+			compatible = "fsl,vf610-sai";
+			reg = <0x0 0xf150000 0x0 0x10000>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>,
+				 <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "bus", "mclk1", "mclk2", "mclk3";
+			dma-names = "tx", "rx";
+			dmas = <&edma0 1 14>,
+			       <&edma0 1 13>;
+			fsl,sai-asynchronous;
 			status = "disabled";
 		};
 
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
index 3595be0..db3d303 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
@@ -112,6 +112,20 @@ nand@0,0 {
 
 };
 
+&qspi {
+	status = "okay";
+
+	mt25qu512a0: flash@0 {
+		compatible = "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		spi-rx-bus-width = <4>;
+		spi-tx-bus-width = <1>;
+		reg = <0>;
+	};
+};
+
 #include "fsl-ls1046-post.dtsi"
 
 &fman0 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
index 0c742be..dbc23d6 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
@@ -101,23 +101,23 @@ cpld: board-control@2,0 {
 &qspi {
 	status = "okay";
 
-	qflash0: flash@0 {
-		compatible = "spansion,m25p80";
+	s25fs512s0: flash@0 {
+		compatible = "jedec,spi-nor";
 		#address-cells = <1>;
 		#size-cells = <1>;
-		spi-max-frequency = <20000000>;
+		spi-max-frequency = <50000000>;
 		spi-rx-bus-width = <4>;
-		spi-tx-bus-width = <4>;
+		spi-tx-bus-width = <1>;
 		reg = <0>;
 	};
 
-	qflash1: flash@1 {
-		compatible = "spansion,m25p80";
+	s25fs512s1: flash@1 {
+		compatible = "jedec,spi-nor";
 		#address-cells = <1>;
 		#size-cells = <1>;
-		spi-max-frequency = <20000000>;
+		spi-max-frequency = <50000000>;
 		spi-rx-bus-width = <4>;
-		spi-tx-bus-width = <4>;
+		spi-tx-bus-width = <1>;
 		reg = <1>;
 	};
 };
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
index 120e62d..41d8b15 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
@@ -143,6 +143,30 @@ &esdhc {
 	status = "okay";
 };
 
+&qspi {
+	status = "okay";
+
+	s25fs512s0: flash@0 {
+		compatible = "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		spi-rx-bus-width = <4>;
+		spi-tx-bus-width = <1>;
+		reg = <0>;
+	};
+
+	s25fs512s1: flash@1 {
+		compatible = "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		spi-rx-bus-width = <4>;
+		spi-tx-bus-width = <1>;
+		reg = <1>;
+	};
+};
+
 &sata {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
index 90b1989..4d77b34 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
@@ -86,6 +86,30 @@ &esdhc {
 	status = "okay";
 };
 
+&qspi {
+	status = "okay";
+
+	s25fs512s0: flash@0 {
+		compatible = "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		spi-rx-bus-width = <4>;
+		spi-tx-bus-width = <1>;
+		reg = <0>;
+	};
+
+	s25fs512s1: flash@1 {
+		compatible = "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		spi-rx-bus-width = <4>;
+		spi-tx-bus-width = <1>;
+		reg = <1>;
+	};
+};
+
 &sata {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index c676d07..5945662 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -375,6 +375,19 @@ i2c3: i2c@2030000 {
 			status = "disabled";
 		};
 
+		qspi: spi@20c0000 {
+			compatible = "fsl,ls2080a-qspi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x20c0000 0x0 0x10000>,
+			      <0x0 0x20000000 0x0 0x10000000>;
+			reg-names = "QuadSPI", "QuadSPI-memory";
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "qspi_en", "qspi";
+			clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+			status = "disabled";
+		};
+
 		esdhc: esdhc@2140000 {
 			compatible = "fsl,ls1088a-esdhc", "fsl,esdhc";
 			reg = <0x0 0x2140000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
index 6fd7f63..d0d6702 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
@@ -108,7 +108,15 @@ dflash0: n25q512a@0 {
 };
 
 &qspi {
-	status = "disabled";
+	status = "okay";
+
+	s25fs512s0: flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		spi-max-frequency = <50000000>;
+		reg = <0>;
+	};
 };
 
 &sata0 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index 7a0be8e..f96d06d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -618,16 +618,16 @@ ifc: ifc@2240000 {
 		};
 
 		qspi: spi@20c0000 {
-			status = "disabled";
-			compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi";
+			compatible = "fsl,ls2080a-qspi";
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x0 0x20c0000 0x0 0x10000>,
 			      <0x0 0x20000000 0x0 0x10000000>;
 			reg-names = "QuadSPI", "QuadSPI-memory";
-			interrupts = <0 25 0x4>; /* Level high type */
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clockgen 4 3>, <&clockgen 4 3>;
 			clock-names = "qspi_en", "qspi";
+			status = "disabled";
 		};
 
 		pcie1: pcie@3400000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
new file mode 100644
index 0000000..071e216
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Device Tree file for LX2160A-CEx7
+//
+// Copyright 2019 SolidRun Ltd.
+
+/dts-v1/;
+
+#include "fsl-lx2160a.dtsi"
+
+/ {
+	model = "SolidRun LX2160A COM Express Type 7 module";
+	compatible = "solidrun,lx2160a-cex7", "fsl,lx2160a";
+
+	aliases {
+		crypto = &crypto;
+	};
+
+	sb_3v3: regulator-sb3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "RT7290";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+};
+
+&crypto {
+	status = "okay";
+};
+
+&dpmac17 {
+	phy-handle = <&rgmii_phy1>;
+	phy-connection-type = "rgmii-id";
+};
+
+&emdio1 {
+	status = "okay";
+
+	rgmii_phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+};
+
+&esdhc1 {
+	mmc-hs200-1_8v;
+	mmc-hs400-1_8v;
+	bus-width = <8>;
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	i2c-switch@77 {
+		compatible = "nxp,pca9547";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x77>;
+
+		i2c@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+
+			fan-temperature-ctrlr@18 {
+				compatible = "ti,amc6821";
+				reg = <0x18>;
+				cooling-min-state = <0>;
+				cooling-max-state = <9>;
+				#cooling-cells = <2>;
+			};
+		};
+
+		i2c@3 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <3>;
+
+			temperature-sensor@48 {
+				compatible = "nxp,sa56004";
+				reg = <0x48>;
+				vcc-supply = <&sb_3v3>;
+			};
+		};
+	};
+};
+
+&i2c2 {
+	status = "okay";
+};
+
+&i2c4 {
+	status = "okay";
+
+	rtc@51 {
+		compatible = "nxp,pcf2129";
+		reg = <0x51>;
+		// IRQ10_B
+		interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+	};
+};
+
+&fspi {
+	status = "okay";
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "micron,m25p80";
+		m25p,fast-read;
+		spi-max-frequency = <50000000>;
+		reg = <0>;
+		/* The following setting enables 1-1-8 (CMD-ADDR-DATA) mode */
+		spi-rx-bus-width = <8>;
+		spi-tx-bus-width = <1>;
+	};
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-cx.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-cx.dts
new file mode 100644
index 0000000..86a9b77
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-cx.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Device Tree file for LX2160A Clearfog CX board
+//
+// Copyright 2019 SolidRun Ltd.
+
+/dts-v1/;
+
+#include "fsl-lx2160a-clearfog-itx.dtsi"
+
+/ {
+	model = "SolidRun LX2160A Clearfog CX";
+	compatible = "solidrun,clearfog-cx",
+		"solidrun,lx2160a-cex7", "fsl,lx2160a";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
new file mode 100644
index 0000000..f3741a3
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Device Tree file for LX2160A Clearfog ITX board; this contains the
+// common parts shared between the Clearfog CX and Honeycomb builds.
+//
+// Copyright 2019 SolidRun Ltd.
+
+/dts-v1/;
+
+#include "fsl-lx2160a-cex7.dtsi"
+
+/ {
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&emdio2 {
+	status = "okay";
+};
+
+&esdhc0 {
+	sd-uhs-sdr104;
+	sd-uhs-sdr50;
+	sd-uhs-sdr25;
+	sd-uhs-sdr12;
+	status = "okay";
+};
+
+&sata0 {
+	status = "okay";
+};
+
+&sata1 {
+	status = "okay";
+};
+
+&sata2 {
+	status = "okay";
+};
+
+&sata3 {
+	status = "okay";
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-honeycomb.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-honeycomb.dts
new file mode 100644
index 0000000..fe19f30
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-honeycomb.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Device Tree file for LX2160A Honeycomb board
+//
+// Copyright 2019 SolidRun Ltd.
+
+/dts-v1/;
+
+#include "fsl-lx2160a-clearfog-itx.dtsi"
+
+/ {
+	model = "SolidRun LX2160A Honeycomb";
+	compatible = "solidrun,honeycomb",
+		"solidrun,lx2160a-cex7", "fsl,lx2160a";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
index c2817b7..51615de 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
@@ -35,6 +35,34 @@ &crypto {
 	status = "okay";
 };
 
+&dpmac17 {
+	phy-handle = <&rgmii_phy1>;
+	phy-connection-type = "rgmii-id";
+};
+
+&dpmac18 {
+	phy-handle = <&rgmii_phy2>;
+	phy-connection-type = "rgmii-id";
+};
+
+&emdio1 {
+	status = "okay";
+
+	rgmii_phy1: ethernet-phy@1 {
+		/* AR8035 PHY */
+		compatible = "ethernet-phy-id004d.d072";
+		reg = <0x1>;
+		eee-broken-1000t;
+	};
+
+	rgmii_phy2: ethernet-phy@2 {
+		/* AR8035 PHY */
+		compatible = "ethernet-phy-id004d.d072";
+		reg = <0x2>;
+		eee-broken-1000t;
+	};
+};
+
 &esdhc0 {
 	sd-uhs-sdr104;
 	sd-uhs-sdr50;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index e883fe0..e5ee559 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -939,6 +939,27 @@ ptp-timer@8b95000 {
 			fsl,extts-fifo;
 		};
 
+		/* WRIOP0: 0x8b8_0000, E-MDIO1: 0x1_6000 */
+		emdio1: mdio@8b96000 {
+			compatible = "fsl,fman-memac-mdio";
+			reg = <0x0 0x8b96000 0x0 0x1000>;
+			interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			little-endian;
+			status = "disabled";
+		};
+
+		emdio2: mdio@8b97000 {
+			compatible = "fsl,fman-memac-mdio";
+			reg = <0x0 0x8b97000 0x0 0x1000>;
+			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+			little-endian;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
 		fsl_mc: fsl-mc@80c000000 {
 			compatible = "fsl,qoriq-mc";
 			reg = <0x00000008 0x0c000000 0 0x40>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
index 28ab17a..9e54747 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
@@ -16,6 +16,11 @@ chosen {
 		stdout-path = &uart2;
 	};
 
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0x0 0x40000000 0 0x80000000>;
+	};
+
 	leds {
 		compatible = "gpio-leds";
 		pinctrl-names = "default";
@@ -77,6 +82,26 @@ &A53_0 {
 	cpu-supply = <&buck2_reg>;
 };
 
+&ddrc {
+	operating-points-v2 = <&ddrc_opp_table>;
+
+	ddrc_opp_table: opp-table {
+		compatible = "operating-points-v2";
+
+		opp-25M {
+			opp-hz = /bits/ 64 <25000000>;
+		};
+
+		opp-100M {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+
+		opp-750M {
+			opp-hz = /bits/ 64 <750000000>;
+		};
+	};
+};
+
 &fec1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index cffa899..5ccc4cc 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -430,18 +430,26 @@
 #define MX8MM_IOMUXC_SAI1_MCLK_SIM_M_HRESP                                  0x1AC 0x414 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI2_RXFS_SAI2_RX_SYNC                                 0x1B0 0x418 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SAI2_RXFS_SAI5_TX_SYNC                                 0x1B0 0x418 0x4EC 0x1 0x2
+#define MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX                                 0x1B0 0x418 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI2_RXFS_UART1_DTE_RX                                 0x1B0 0x418 0x4F4 0x4 0x2
 #define MX8MM_IOMUXC_SAI2_RXFS_GPIO4_IO21                                   0x1B0 0x418 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SAI2_RXFS_SIM_M_HSIZE0                                 0x1B0 0x418 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI2_RXC_SAI2_RX_BCLK                                  0x1B4 0x41C 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SAI2_RXC_SAI5_TX_BCLK                                  0x1B4 0x41C 0x4E8 0x1 0x2
+#define MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX                                  0x1B4 0x41C 0x4F4 0x4 0x3
+#define MX8MM_IOMUXC_SAI2_RXC_UART1_DTE_TX                                  0x1B4 0x41C 0x000 0x4 0x0
 #define MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22                                    0x1B4 0x41C 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SAI2_RXC_SIM_M_HSIZE1                                  0x1B4 0x41C 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0                                0x1B8 0x420 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SAI2_RXD0_SAI5_TX_DATA0                                0x1B8 0x420 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B                              0x1B8 0x420 0x4F0 0x4 0x2
+#define MX8MM_IOMUXC_SAI2_RXD0_UART1_DTE_CTS_B                              0x1B8 0x420 0x000 0x4 0x0
 #define MX8MM_IOMUXC_SAI2_RXD0_GPIO4_IO23                                   0x1B8 0x420 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SAI2_RXD0_SIM_M_HSIZE2                                 0x1B8 0x420 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC                                 0x1BC 0x424 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SAI2_TXFS_SAI5_TX_DATA1                                0x1BC 0x424 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B                              0x1BC 0x424 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI2_TXFS_UART1_DTE_RTS_B                              0x1BC 0x424 0x4F0 0x4 0x3
 #define MX8MM_IOMUXC_SAI2_TXFS_GPIO4_IO24                                   0x1BC 0x424 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SAI2_TXFS_SIM_M_HWRITE                                 0x1BC 0x424 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI2_TXC_SAI2_TX_BCLK                                  0x1C0 0x428 0x000 0x0 0x0
@@ -464,21 +472,29 @@
 #define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK                                  0x1D0 0x438 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SAI3_RXC_GPT1_CLK                                      0x1D0 0x438 0x000 0x1 0x0
 #define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK                                  0x1D0 0x438 0x4D0 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B                               0x1D0 0x438 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI3_RXC_UART2_DTE_RTS_B                               0x1D0 0x438 0x4F8 0x4 0x2
 #define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29                                    0x1D0 0x438 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1                                 0x1D0 0x438 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI3_RXD_SAI3_RX_DATA0                                 0x1D4 0x43C 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SAI3_RXD_GPT1_COMPARE1                                 0x1D4 0x43C 0x000 0x1 0x0
 #define MX8MM_IOMUXC_SAI3_RXD_SAI5_RX_DATA0                                 0x1D4 0x43C 0x4D4 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B                               0x1D4 0x43C 0x4F8 0x4 0x3
+#define MX8MM_IOMUXC_SAI3_RXD_UART2_DTE_CTS_B                               0x1D4 0x43C 0x000 0x4 0x0
 #define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30                                    0x1D4 0x43C 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0                                  0x1D4 0x43C 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC                                 0x1D8 0x440 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CAPTURE2                                0x1D8 0x440 0x000 0x1 0x0
 #define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1                                0x1D8 0x440 0x4D8 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX                                 0x1D8 0x440 0x4Fc 0x4 0x2
+#define MX8MM_IOMUXC_SAI3_TXFS_UART2_DTE_TX                                 0x1D8 0x440 0x000 0x4 0x0
 #define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31                                   0x1D8 0x440 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1                                 0x1D8 0x440 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI3_TXC_SAI3_TX_BCLK                                  0x1DC 0x444 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SAI3_TXC_GPT1_COMPARE2                                 0x1DC 0x444 0x000 0x1 0x0
 #define MX8MM_IOMUXC_SAI3_TXC_SAI5_RX_DATA2                                 0x1DC 0x444 0x4DC 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX                                  0x1DC 0x444 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI3_TXC_UART2_DTE_RX                                  0x1DC 0x444 0x4Fc 0x4 0x3
 #define MX8MM_IOMUXC_SAI3_TXC_GPIO5_IO0                                     0x1DC 0x444 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SAI3_TXC_TPSMP_HDATA2                                  0x1DC 0x444 0x000 0x7 0x0
 #define MX8MM_IOMUXC_SAI3_TXD_SAI3_TX_DATA0                                 0x1E0 0x448 0x000 0x0 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 3d95b66..1e5e115 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -140,11 +140,6 @@ opp-1800000000 {
 		};
 	};
 
-	memory@40000000 {
-		device_type = "memory";
-		reg = <0x0 0x40000000 0 0x80000000>;
-	};
-
 	osc_32k: clock-osc-32k {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
@@ -232,7 +227,7 @@ soc@0 {
 		ranges = <0x0 0x0 0x0 0x3e000000>;
 
 		aips1: bus@30000000 {
-			compatible = "fsl,aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x30000000 0x30000000 0x400000>;
@@ -438,7 +433,7 @@ cpu_speed_grade: speed-grade@10 {
 			};
 
 			anatop: anatop@30360000 {
-				compatible = "fsl,imx8mm-anatop", "syscon", "simple-bus";
+				compatible = "fsl,imx8mm-anatop", "syscon";
 				reg = <0x30360000 0x10000>;
 			};
 
@@ -501,7 +496,7 @@ src: reset-controller@30390000 {
 		};
 
 		aips2: bus@30400000 {
-			compatible = "fsl,aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x30400000 0x30400000 0x400000>;
@@ -560,7 +555,7 @@ system_counter: timer@306a0000 {
 		};
 
 		aips3: bus@30800000 {
-			compatible = "fsl,aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x30800000 0x30800000 0x400000>;
@@ -641,6 +636,36 @@ uart2: serial@30890000 {
 				status = "disabled";
 			};
 
+			crypto: crypto@30900000 {
+				compatible = "fsl,sec-v4.0";
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0x30900000 0x40000>;
+				ranges = <0 0x30900000 0x40000>;
+				interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clk IMX8MM_CLK_AHB>,
+					 <&clk IMX8MM_CLK_IPG_ROOT>;
+				clock-names = "aclk", "ipg";
+
+				sec_jr0: jr@1000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x1000 0x1000>;
+					interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				sec_jr1: jr@2000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x2000 0x1000>;
+					interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				sec_jr2: jr@3000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x3000 0x1000>;
+					interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+				};
+			};
+
 			i2c1: i2c@30a20000 {
 				compatible = "fsl,imx8mm-i2c", "fsl,imx21-i2c";
 				#address-cells = <1>;
@@ -775,7 +800,7 @@ fec1: ethernet@30be0000 {
 		};
 
 		aips4: bus@32c00000 {
-			compatible = "fsl,aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x32c00000 0x32c00000 0x400000>;
@@ -858,6 +883,16 @@ gic: interrupt-controller@38800000 {
 			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		ddrc: memory-controller@3d400000 {
+			compatible = "fsl,imx8mm-ddrc", "fsl,imx8m-ddrc";
+			reg = <0x3d400000 0x400000>;
+			clock-names = "core", "pll", "alt", "apb";
+			clocks = <&clk IMX8MM_CLK_DRAM_CORE>,
+				 <&clk IMX8MM_DRAM_PLL>,
+				 <&clk IMX8MM_CLK_DRAM_ALT>,
+				 <&clk IMX8MM_CLK_DRAM_APB>;
+		};
+
 		ddr-pmu@3d800000 {
 			compatible = "fsl,imx8mm-ddr-pmu", "fsl,imx8m-ddr-pmu";
 			reg = <0x3d800000 0x400000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
index 0719494..2497eeb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
@@ -17,6 +17,26 @@ &A53_0 {
 	cpu-supply = <&buck2_reg>;
 };
 
+&ddrc {
+	operating-points-v2 = <&ddrc_opp_table>;
+
+	ddrc_opp_table: opp-table {
+		compatible = "operating-points-v2";
+
+		opp-25M {
+			opp-hz = /bits/ 64 <25000000>;
+		};
+
+		opp-100M {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+
+		opp-600M {
+			opp-hz = /bits/ 64 <600000000>;
+		};
+	};
+};
+
 &i2c1 {
 	pmic@4b {
 		compatible = "rohm,bd71847";
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
index 2a74330..0d2ec4a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
@@ -3,6 +3,7 @@
  * Copyright 2019 NXP
  */
 
+#include <dt-bindings/usb/pd.h>
 #include "imx8mn.dtsi"
 
 / {
@@ -22,6 +23,11 @@ status {
 		};
 	};
 
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0x0 0x40000000 0 0x80000000>;
+	};
+
 	reg_usdhc2_vmmc: regulator-usdhc2 {
 		compatible = "regulator-fixed";
 		pinctrl-names = "default";
@@ -60,6 +66,42 @@ &i2c1 {
 	status = "okay";
 };
 
+&i2c2 {
+	clock-frequency = <400000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+
+	ptn5110: tcpc@50 {
+		compatible = "nxp,ptn5110";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_typec1>;
+		reg = <0x50>;
+		interrupt-parent = <&gpio2>;
+		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+		status = "okay";
+
+		port {
+			typec1_dr_sw: endpoint {
+				remote-endpoint = <&usb1_drd_sw>;
+			};
+		};
+
+		typec1_con: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C";
+			power-role = "dual";
+			data-role = "dual";
+			try-power-role = "sink";
+			source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+			sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+				     PDO_VAR(5000, 20000, 3000)>;
+			op-sink-microwatt = <15000000>;
+			self-powered;
+		};
+	};
+};
+
 &snvs_pwrkey {
 	status = "okay";
 };
@@ -70,6 +112,21 @@ &uart2 { /* console */
 	status = "okay";
 };
 
+&usbotg1 {
+	dr_mode = "otg";
+	hnp-disable;
+	srp-disable;
+	adp-disable;
+	usb-role-switch;
+	status = "okay";
+
+	port {
+		usb1_drd_sw: endpoint {
+			remote-endpoint = <&typec1_dr_sw>;
+		};
+	};
+};
+
 &usdhc2 {
 	assigned-clocks = <&clk IMX8MN_CLK_USDHC2>;
 	assigned-clock-rates = <200000000>;
@@ -138,12 +195,25 @@ MX8MN_IOMUXC_I2C1_SDA_I2C1_SDA		0x400001c3
 		>;
 	};
 
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX8MN_IOMUXC_I2C2_SCL_I2C2_SCL		0x400001c3
+			MX8MN_IOMUXC_I2C2_SDA_I2C2_SDA		0x400001c3
+		>;
+	};
+
 	pinctrl_reg_usdhc2_vmmc: regusdhc2vmmc {
 		fsl,pins = <
 			MX8MN_IOMUXC_SD2_RESET_B_GPIO2_IO19	0x41
 		>;
 	};
 
+	pinctrl_typec1: typec1grp {
+		fsl,pins = <
+			MX8MN_IOMUXC_SD1_STROBE_GPIO2_IO11	0x159
+		>;
+	};
+
 	pinctrl_uart2: uart2grp {
 		fsl,pins = <
 			MX8MN_IOMUXC_UART2_RXD_UART2_DCE_RX	0x140
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index e916250..a44b543 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -139,11 +139,6 @@ opp-1500000000 {
 		};
 	};
 
-	memory@40000000 {
-		device_type = "memory";
-		reg = <0x0 0x40000000 0 0x80000000>;
-	};
-
 	osc_32k: clock-osc-32k {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
@@ -208,7 +203,7 @@ soc@0 {
 		ranges = <0x0 0x0 0x0 0x3e000000>;
 
 		aips1: bus@30000000 {
-			compatible = "fsl,aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			reg = <0x30000000 0x400000>;
 			#address-cells = <1>;
 			#size-cells = <1>;
@@ -349,7 +344,7 @@ cpu_speed_grade: speed-grade@10 {
 
 			anatop: anatop@30360000 {
 				compatible = "fsl,imx8mn-anatop", "fsl,imx8mm-anatop",
-					     "syscon", "simple-bus";
+					     "syscon";
 				reg = <0x30360000 0x10000>;
 			};
 
@@ -395,7 +390,7 @@ src: reset-controller@30390000 {
 		};
 
 		aips2: bus@30400000 {
-			compatible = "fsl,aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			reg = <0x30400000 0x400000>;
 			#address-cells = <1>;
 			#size-cells = <1>;
@@ -455,7 +450,7 @@ system_counter: timer@306a0000 {
 		};
 
 		aips3: bus@30800000 {
-			compatible = "fsl,aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			reg = <0x30800000 0x400000>;
 			#address-cells = <1>;
 			#size-cells = <1>;
@@ -537,6 +532,36 @@ uart2: serial@30890000 {
 				status = "disabled";
 			};
 
+			crypto: crypto@30900000 {
+				compatible = "fsl,sec-v4.0";
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0x30900000 0x40000>;
+				ranges = <0 0x30900000 0x40000>;
+				interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clk IMX8MN_CLK_AHB>,
+					 <&clk IMX8MN_CLK_IPG_ROOT>;
+				clock-names = "aclk", "ipg";
+
+				sec_jr0: jr0@1000 {
+					 compatible = "fsl,sec-v4.0-job-ring";
+					 reg = <0x1000 0x1000>;
+					 interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				sec_jr1: jr1@2000 {
+					 compatible = "fsl,sec-v4.0-job-ring";
+					 reg = <0x2000 0x1000>;
+					 interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				sec_jr2: jr2@3000 {
+					 compatible = "fsl,sec-v4.0-job-ring";
+					 reg = <0x3000 0x1000>;
+					 interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+				};
+			};
+
 			i2c1: i2c@30a20000 {
 				compatible = "fsl,imx8mn-i2c", "fsl,imx21-i2c";
 				#address-cells = <1>;
@@ -671,7 +696,7 @@ fec1: ethernet@30be0000 {
 		};
 
 		aips4: bus@32c00000 {
-			compatible = "fsl,aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			reg = <0x32c00000 0x400000>;
 			#address-cells = <1>;
 			#size-cells = <1>;
@@ -683,10 +708,8 @@ usbotg1: usb@32e40000 {
 				interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clk IMX8MN_CLK_USB1_CTRL_ROOT>;
 				clock-names = "usb1_ctrl_root_clk";
-				assigned-clocks = <&clk IMX8MN_CLK_USB_BUS>,
-						  <&clk IMX8MN_CLK_USB_CORE_REF>;
-				assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>,
-							 <&clk IMX8MN_SYS_PLL1_100M>;
+				assigned-clocks = <&clk IMX8MN_CLK_USB_BUS>;
+				assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>;
 				fsl,usbphy = <&usbphynop1>;
 				fsl,usbmisc = <&usbmisc1 0>;
 				status = "disabled";
@@ -759,6 +782,16 @@ gic: interrupt-controller@38800000 {
 			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		ddrc: memory-controller@3d400000 {
+			compatible = "fsl,imx8mn-ddrc", "fsl,imx8m-ddrc";
+			reg = <0x3d400000 0x400000>;
+			clock-names = "core", "pll", "alt", "apb";
+			clocks = <&clk IMX8MN_CLK_DRAM_CORE>,
+				 <&clk IMX8MN_DRAM_PLL>,
+				 <&clk IMX8MN_CLK_DRAM_ALT>,
+				 <&clk IMX8MN_CLK_DRAM_APB>;
+		};
+
 		ddr-pmu@3d800000 {
 			compatible = "fsl,imx8mn-ddr-pmu", "fsl,imx8m-ddr-pmu";
 			reg = <0x3d800000 0x400000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index c366859..94066d4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -105,6 +105,33 @@ &A53_3 {
 	cpu-supply = <&buck2_reg>;
 };
 
+&ddrc {
+	operating-points-v2 = <&ddrc_opp_table>;
+
+	ddrc_opp_table: opp-table {
+		compatible = "operating-points-v2";
+
+		opp-25M {
+			opp-hz = /bits/ 64 <25000000>;
+		};
+
+		opp-100M {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+
+		/*
+		 * On imx8mq B0 PLL can't be bypassed so low bus is 166M
+		 */
+		opp-166M {
+			opp-hz = /bits/ 64 <166935483>;
+		};
+
+		opp-800M {
+			opp-hz = /bits/ 64 <800000000>;
+		};
+	};
+};
+
 &fec1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts b/arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts
index b8cb20c..bfd91c1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-hummingboard-pulse.dts
@@ -84,6 +84,12 @@ &i2c3 {
 	clock-frequency = <100000>;
 	status = "okay";
 
+	eeprom@57 {
+		compatible = "atmel,24c02";
+		reg = <0x57>;
+		status = "okay";
+	};
+
 	rtc@69 {
 		compatible = "abracon,ab1805";
 		reg = <0x69>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
index 596bc65..764a4cb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
@@ -440,6 +440,13 @@ touchscreen@5d {
 		AVDD28-supply = <&reg_2v8_p>;
 		VDDIO-supply = <&reg_1v8_p>;
 	};
+
+	accel-gyro@6a {
+		compatible = "st,lsm9ds1-imu";
+		reg = <0x6a>;
+		vdd-supply = <&reg_3v3_p>;
+		vddio-supply = <&reg_3v3_p>;
+	};
 };
 
 &iomuxc {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-phanbell.dts b/arch/arm64/boot/dts/freescale/imx8mq-phanbell.dts
new file mode 100644
index 0000000..3f2a489
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mq-phanbell.dts
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2017-2019 NXP
+ */
+
+/dts-v1/;
+
+#include "imx8mq.dtsi"
+
+/ {
+	model = "Google i.MX8MQ Phanbell";
+	compatible = "google,imx8mq-phanbell", "fsl,imx8mq";
+
+	chosen {
+		stdout-path = &uart1;
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0x00000000 0x40000000 0 0x40000000>;
+	};
+
+	pmic_osc: clock-pmic {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		clock-output-names = "pmic_osc";
+	};
+
+	reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
+		compatible = "regulator-fixed";
+		regulator-name = "VSD_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&A53_0 {
+	cpu-supply = <&buck2>;
+};
+
+&A53_1 {
+	cpu-supply = <&buck2>;
+};
+
+&A53_2 {
+	cpu-supply = <&buck2>;
+};
+
+&A53_3 {
+	cpu-supply = <&buck2>;
+};
+
+&i2c1 {
+	clock-frequency = <400000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	pmic: pmic@4b {
+		compatible = "rohm,bd71837";
+		reg = <0x4b>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pmic>;
+		#clock-cells = <0>;
+		clocks = <&pmic_osc>;
+		clock-output-names = "pmic_clk";
+		interrupt-parent = <&gpio1>;
+		interrupts = <3 GPIO_ACTIVE_LOW>;
+
+		regulators {
+			buck1: BUCK1 {
+				regulator-name = "buck1";
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <1250>;
+				rohm,dvs-run-voltage = <900000>;
+				rohm,dvs-idle-voltage = <900000>;
+				rohm,dvs-suspend-voltage = <800000>;
+			};
+
+			buck2: BUCK2 {
+				regulator-name = "buck2";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-boot-on;
+				regulator-always-on;
+				rohm,dvs-run-voltage = <1000000>;
+				rohm,dvs-idle-voltage = <900000>;
+			};
+
+			buck3: BUCK3 {
+				regulator-name = "buck3";
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-boot-on;
+				rohm,dvs-run-voltage = <900000>;
+			};
+
+			buck4: BUCK4 {
+				regulator-name = "buck4";
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-boot-on;
+				regulator-always-on;
+				rohm,dvs-run-voltage = <900000>;
+			};
+
+			buck5: BUCK5 {
+				regulator-name = "buck5";
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			buck6: BUCK6 {
+				regulator-name = "buck6";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			buck7: BUCK7 {
+				regulator-name = "buck7";
+				regulator-min-microvolt = <1605000>;
+				regulator-max-microvolt = <1995000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			buck8: BUCK8 {
+				regulator-name = "buck8";
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1400000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo1: LDO1 {
+				regulator-name = "ldo1";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo2: LDO2 {
+				regulator-name = "ldo2";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <900000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo3: LDO3 {
+				regulator-name = "ldo3";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo4: LDO4 {
+				regulator-name = "ldo4";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo5: LDO5 {
+				regulator-name = "ldo5";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo6: LDO6 {
+				regulator-name = "ldo6";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo7: LDO7 {
+				regulator-name = "ldo7";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+	bus-width = <8>;
+	non-removable;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+	pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+	pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+	bus-width = <4>;
+	cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+	vmmc-supply = <&reg_usdhc2_vmmc>;
+	status = "okay";
+};
+
+&usb3_phy0 {
+	status = "okay";
+};
+
+&usb_dwc3_0 {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usb3_phy1 {
+	status = "okay";
+};
+
+&usb_dwc3_1 {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&wdog1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL			0x4000007f
+			MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA			0x4000007f
+		>;
+	};
+
+	pinctrl_pmic: pmicirq {
+		fsl,pins = <
+			MX8MQ_IOMUXC_GPIO1_IO03_GPIO1_IO3	0x41
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX		0x49
+			MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX		0x49
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK			0x83
+			MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD			0xc3
+			MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0		0xc3
+			MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1		0xc3
+			MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2		0xc3
+			MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3		0xc3
+			MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4		0xc3
+			MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5		0xc3
+			MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6		0xc3
+			MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7		0xc3
+			MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE		0x83
+			MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B		0xc1
+		>;
+	};
+
+	pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK			0x85
+			MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD			0xc5
+			MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0		0xc5
+			MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1		0xc5
+			MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2		0xc5
+			MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3		0xc5
+			MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4		0xc5
+			MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5		0xc5
+			MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6		0xc5
+			MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7		0xc5
+			MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE		0x85
+			MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B		0xc1
+		>;
+	};
+
+	pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK			0x87
+			MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD			0xc7
+			MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0		0xc7
+			MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1		0xc7
+			MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2		0xc7
+			MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3		0xc7
+			MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4		0xc7
+			MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5		0xc7
+			MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6		0xc7
+			MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7		0xc7
+			MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE		0x87
+			MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B		0xc1
+		>;
+	};
+
+	pinctrl_usdhc2_gpio: usdhc2grpgpio {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_CD_B_GPIO2_IO12	0x41
+			MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19	0x41
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK			0x83
+			MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD			0xc3
+			MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0		0xc3
+			MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1		0xc3
+			MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2		0xc3
+			MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3		0xc3
+			MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xc1
+		>;
+	};
+
+	pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK			0x85
+			MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD			0xc5
+			MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0		0xc5
+			MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1		0xc5
+			MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2		0xc5
+			MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3		0xc5
+			MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xc1
+		>;
+	};
+
+	pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK			0x87
+			MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD			0xc7
+			MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0		0xc7
+			MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1		0xc7
+			MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2		0xc7
+			MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3		0xc7
+			MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xc1
+		>;
+	};
+
+	pinctrl_wdog: wdoggrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
+		>;
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-sr-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-sr-som.dtsi
index 3dc44114..602c870 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-sr-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-sr-som.dtsi
@@ -125,6 +125,12 @@ vgen6_reg: vgen6 {
 			};
 		};
 	};
+
+	eeprom@50 {
+		compatible = "atmel,24c01";
+		reg = <0x50>;
+		status = "okay";
+	};
 };
 
 &pgc_gpu{
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
new file mode 100644
index 0000000..b4795a0
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 Einfochips
+ * Copyright 2019 Linaro Ltd.
+ */
+
+/dts-v1/;
+
+#include "imx8mq.dtsi"
+
+/ {
+	model = "Einfochips i.MX8MQ Thor96";
+	compatible = "einfochips,imx8mq-thor96", "fsl,imx8mq";
+
+	chosen {
+		stdout-path = &uart1;
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0x00000000 0x40000000 0 0x80000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_leds>;
+
+		user-led1 {
+			label = "green:user1";
+			gpios = <&gpio4 21 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+		};
+
+		user-led2 {
+			label = "green:user2";
+			gpios = <&gpio4 22 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "none";
+		};
+
+		user-led3 {
+			label = "green:user3";
+			gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc1";
+			default-state = "off";
+		};
+
+		user-led4 {
+			label = "green:user4";
+			gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+			panic-indicator;
+			linux,default-trigger = "none";
+		};
+
+		wlan-active-led {
+			label = "yellow:wlan";
+			gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "phy0tx";
+			default-state = "off";
+		};
+
+		bt-active-led {
+			label = "blue:bt";
+			gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "hci0-power";
+			default-state = "off";
+		};
+	};
+
+	reg_usdhc1_vmmc: reg-usdhc1-vmmc {
+		compatible = "regulator-fixed";
+		regulator-name = "VDD_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_usdhc1_vqmmc: reg-usdhc1-vqmmc {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_1V8_EXT";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-always-on;
+	};
+
+	reg_usdhc2_vmmc: reg-usdhc2-vmmc {
+		compatible = "regulator-fixed";
+		regulator-name = "VSD_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_reg_usdhc2>;
+		gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_usdhc2_vqmmc: reg-usdhc2-vqmmc {
+		compatible = "regulator-fixed";
+		regulator-name = "NVCC_SD2";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_wifi_reg_on>;
+		gpio = <&gpio3 3 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+/* LS-SPI0 */
+&ecspi2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi2>;
+	status = "okay";
+};
+
+&fec1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_fec1>;
+	phy-mode = "rgmii-id";
+	phy-reset-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+	phy-handle = <&ethphy>;
+	fsl,magic-packet;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy: ethernet-phy@3 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <3>;
+		};
+	};
+};
+
+/* LS-I2C0 */
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	pmic@8 {
+		compatible = "fsl,pfuze100";
+		reg = <0x8>;
+
+		regulators {
+			sw1a_reg: sw1ab {
+				regulator-min-microvolt = <300000>;
+				regulator-max-microvolt = <1875000>;
+			};
+
+			sw1c_reg: sw1c {
+				regulator-min-microvolt = <300000>;
+				regulator-max-microvolt = <1875000>;
+			};
+
+			sw2_reg: sw2 {
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			sw3a_reg: sw3ab {
+				regulator-min-microvolt = <400000>;
+				regulator-max-microvolt = <1975000>;
+				regulator-always-on;
+			};
+
+			sw4_reg: sw4 {
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			swbst_reg: swbst {
+				regulator-min-microvolt = <5000000>;
+				regulator-max-microvolt = <5150000>;
+			};
+
+			snvs_reg: vsnvs {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-always-on;
+			};
+
+			vref_reg: vrefddr {
+				regulator-always-on;
+			};
+
+			vgen1_reg: vgen1 {
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1550000>;
+			};
+
+			vgen2_reg: vgen2 {
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1550000>;
+				regulator-always-on;
+			};
+
+			vgen3_reg: vgen3 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen4_reg: vgen4 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen5_reg: vgen5 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen6_reg: vgen6 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+			};
+		};
+	};
+};
+
+/* LS-I2C1 */
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+
+	eeprom: eeprom@50 {
+		compatible = "atmel,24c256";
+		reg = <0x50>;
+	};
+};
+
+/* HS-I2C2 */
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+};
+
+/* HS-I2C3 */
+&i2c4 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c4>;
+	status = "okay";
+};
+
+&pgc_gpu {
+	power-supply = <&sw1a_reg>;
+};
+
+&pgc_vpu {
+	power-supply = <&sw1c_reg>;
+};
+
+&qspi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_qspi0>;
+	status = "okay";
+
+	flash@0 {
+		compatible = "jedec,spi-nor";
+		spi-max-frequency = <100000000>;
+		reg = <0>;
+	};
+};
+
+/* Debug UART */
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	assigned-clocks = <&clk IMX8MQ_CLK_UART1>;
+	assigned-clock-parents = <&clk IMX8MQ_CLK_25M>;
+	status = "okay";
+};
+
+/* LS-UART0 */
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	assigned-clocks = <&clk IMX8MQ_CLK_UART2>;
+	assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_80M>;
+	uart-has-rtscts;
+	status = "okay";
+
+	bluetooth {
+		compatible = "brcm,bcm43438-bt";
+		device-wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+		host-wakeup-gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
+		shutdown-gpios = <&gpio3 5 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_bt_gpios>;
+	};
+};
+
+/* LS-UART1 */
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	assigned-clocks = <&clk IMX8MQ_CLK_UART3>;
+	assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_80M>;
+	status = "okay";
+};
+
+&usb3_phy1 {
+	status = "okay";
+};
+
+&usb_dwc3_1 {
+	dr_mode = "host";
+	status = "okay";
+};
+
+/* SDIO */
+&usdhc1 {
+	#address-cells = <0x1>;
+	#size-cells = <0x0>;
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+	vmmc-supply = <&reg_usdhc1_vmmc>;
+	vqmmc-supply = <&reg_usdhc1_vqmmc>;
+	mmc-pwrseq = <&sdio_pwrseq>;
+	bus-width = <4>;
+	non-removable;
+	no-sd;
+	no-emmc;
+	status = "okay";
+
+	brcmf: wifi@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+	};
+};
+
+/* uSD */
+&usdhc2 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+	pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+	pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+	vmmc-supply = <&reg_usdhc2_vmmc>;
+	vqmmc-supply = <&reg_usdhc2_vqmmc>;
+	cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+	bus-width = <4>;
+	no-sdio;
+	no-emmc;
+	disable-wp;
+	status = "okay";
+};
+
+&wdog1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_bt_gpios: btgpiosgrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SAI5_RXD1_GPIO3_IO22		0x19
+			MX8MQ_IOMUXC_NAND_DQS_GPIO3_IO14		0x19
+			MX8MQ_IOMUXC_NAND_CLE_GPIO3_IO5			0x19
+		>;
+	};
+
+	pinctrl_ecspi2: ecspi2grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK		0x16
+			MX8MQ_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI		0x16
+			MX8MQ_IOMUXC_ECSPI2_MISO_ECSPI2_MISO		0x16
+			MX8MQ_IOMUXC_ECSPI2_SS0_ECSPI2_SS0		0x16
+		>;
+	};
+
+	pinctrl_fec1: fec1grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC			0x4
+			MX8MQ_IOMUXC_ENET_MDIO_ENET1_MDIO		0x24
+			MX8MQ_IOMUXC_ENET_TD3_ENET1_RGMII_TD3		0x1c
+			MX8MQ_IOMUXC_ENET_TD2_ENET1_RGMII_TD2		0x1c
+			MX8MQ_IOMUXC_ENET_TD1_ENET1_RGMII_TD1		0x1c
+			MX8MQ_IOMUXC_ENET_TD0_ENET1_RGMII_TD0		0x1c
+			MX8MQ_IOMUXC_ENET_RD3_ENET1_RGMII_RD3		0x91
+			MX8MQ_IOMUXC_ENET_RD2_ENET1_RGMII_RD2		0x91
+			MX8MQ_IOMUXC_ENET_RD1_ENET1_RGMII_RD1		0x91
+			MX8MQ_IOMUXC_ENET_RD0_ENET1_RGMII_RD0		0x91
+			MX8MQ_IOMUXC_ENET_TXC_ENET1_RGMII_TXC		0x1c
+			MX8MQ_IOMUXC_ENET_RXC_ENET1_RGMII_RXC		0x91
+			MX8MQ_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL	0x91
+			MX8MQ_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL	0x1c
+			MX8MQ_IOMUXC_GPIO1_IO09_GPIO1_IO9		0x19
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL			0x4000007f
+			MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA			0x4000007f
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_I2C2_SCL_I2C2_SCL			0x4000007f
+			MX8MQ_IOMUXC_I2C2_SDA_I2C2_SDA			0x4000007f
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_I2C3_SCL_I2C3_SCL			0x4000007f
+			MX8MQ_IOMUXC_I2C3_SDA_I2C3_SDA			0x4000007f
+		>;
+	};
+
+	pinctrl_i2c4: i2c4grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_I2C4_SCL_I2C4_SCL			0x4000007f
+			MX8MQ_IOMUXC_I2C4_SDA_I2C4_SDA			0x4000007f
+		>;
+	};
+
+	pinctrl_leds: ledsgrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SAI2_RXFS_GPIO4_IO21		0x19
+			MX8MQ_IOMUXC_SAI2_RXC_GPIO4_IO22		0x19
+			MX8MQ_IOMUXC_SAI3_RXFS_GPIO4_IO28		0x19
+			MX8MQ_IOMUXC_SAI3_RXC_GPIO4_IO29		0x19
+			MX8MQ_IOMUXC_SAI1_RXC_GPIO4_IO1			0x19
+			MX8MQ_IOMUXC_SAI1_RXFS_GPIO4_IO0		0x19
+		>;
+	};
+
+	pinctrl_qspi0: qspi0grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_NAND_ALE_QSPI_A_SCLK		0x82
+			MX8MQ_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B		0x82
+			MX8MQ_IOMUXC_NAND_DATA00_QSPI_A_DATA0		0x82
+			MX8MQ_IOMUXC_NAND_DATA01_QSPI_A_DATA1		0x82
+			MX8MQ_IOMUXC_NAND_DATA02_QSPI_A_DATA2		0x82
+			MX8MQ_IOMUXC_NAND_DATA03_QSPI_A_DATA3		0x82
+
+		>;
+	};
+
+	pinctrl_reg_usdhc2: regusdhc2grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19		0x41
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX		0x49
+			MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX		0x49
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_UART2_RXD_UART2_DCE_RX		0x49
+			MX8MQ_IOMUXC_UART2_TXD_UART2_DCE_TX		0x49
+			MX8MQ_IOMUXC_UART4_RXD_UART2_DCE_CTS_B		0x49
+			MX8MQ_IOMUXC_UART4_TXD_UART2_DCE_RTS_B		0x49
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_UART3_TXD_UART3_DCE_TX		0x49
+			MX8MQ_IOMUXC_UART3_RXD_UART3_DCE_RX		0x49
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK			0x83
+			MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD			0xc3
+			MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0		0xc3
+			MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1		0xc3
+			MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2		0xc3
+			MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3		0xc3
+			MX8MQ_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K	0x85
+		>;
+	};
+
+	pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK			0x8d
+			MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD			0xcd
+			MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0		0xcd
+			MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1		0xcd
+			MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2		0xcd
+			MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3		0xcd
+			MX8MQ_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K	0x85
+		>;
+	};
+
+	pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK			0x9f
+			MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD			0xdf
+			MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0		0xdf
+			MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1		0xdf
+			MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2		0xdf
+			MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3		0xdf
+			MX8MQ_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K	0x85
+		>;
+	};
+
+	pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_CD_B_GPIO2_IO12		0x41
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK			0x83
+			MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD			0xc3
+			MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0		0xc3
+			MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1		0xc3
+			MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2		0xc3
+			MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3		0xc3
+			MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xc1
+		>;
+	};
+
+	pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK			0x8c
+			MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD			0xcc
+			MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0		0xcc
+			MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1		0xcc
+			MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2		0xcc
+			MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3		0xcc
+			MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xc1
+		>;
+	};
+
+	pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK			0x9c
+			MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD			0xdc
+			MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0		0xdc
+			MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1		0xdc
+			MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2		0xdc
+			MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3		0xdc
+			MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xcc
+		>;
+	};
+
+	pinctrl_wdog: wdoggrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B		0xc6
+		>;
+	};
+
+	pinctrl_wifi_reg_on: wifiregongrp {
+		fsl,pins = <
+			MX8MQ_IOMUXC_NAND_CE2_B_GPIO3_IO3		0x17059
+		>;
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
index d2a6da4..6b35813 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
@@ -8,7 +8,7 @@
 #include "imx8mq-zii-ultra.dtsi"
 
 / {
-	model = "ZII i.MX8MQ Ultra RMB3 Board";
+	model = "ZII Ultra RMB3 Board";
 	compatible = "zii,imx8mq-ultra-rmb3", "zii,imx8mq-ultra", "fsl,imx8mq";
 };
 
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts
index 1084d93..173b9e9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts
@@ -8,7 +8,7 @@
 #include "imx8mq-zii-ultra.dtsi"
 
 / {
-	model = "ZII i.MX8MQ Ultra Zest Board";
+	model = "ZII Ultra Zest Board";
 	compatible = "zii,imx8mq-ultra-zest", "zii,imx8mq-ultra", "fsl,imx8mq";
 };
 
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 7f93194..6a1e839 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -290,11 +290,67 @@ soc@0 {
 		dma-ranges = <0x40000000 0x0 0x40000000 0xc0000000>;
 
 		bus@30000000 { /* AIPS1 */
-			compatible = "fsl,imx8mq-aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x30000000 0x30000000 0x400000>;
 
+			sai1: sai@30010000 {
+				#sound-dai-cells = <0>;
+				compatible = "fsl,imx8mq-sai";
+				reg = <0x30010000 0x10000>;
+				interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clk IMX8MQ_CLK_SAI1_IPG>,
+				         <&clk IMX8MQ_CLK_SAI1_ROOT>,
+				         <&clk IMX8MQ_CLK_DUMMY>, <&clk IMX8MQ_CLK_DUMMY>;
+				clock-names = "bus", "mclk1", "mclk2", "mclk3";
+				dmas = <&sdma2 8 24 0>, <&sdma1 9 24 0>;
+				dma-names = "rx", "tx";
+				status = "disabled";
+			};
+
+			sai6: sai@30030000 {
+				#sound-dai-cells = <0>;
+				compatible = "fsl,imx8mq-sai";
+				reg = <0x30030000 0x10000>;
+				interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clk IMX8MQ_CLK_SAI6_IPG>,
+				         <&clk IMX8MQ_CLK_SAI6_ROOT>,
+				         <&clk IMX8MQ_CLK_DUMMY>, <&clk IMX8MQ_CLK_DUMMY>;
+				clock-names = "bus", "mclk1", "mclk2", "mclk3";
+				dmas = <&sdma2 4 24 0>, <&sdma2 5 24 0>;
+				dma-names = "rx", "tx";
+				status = "disabled";
+			};
+
+			sai5: sai@30040000 {
+				#sound-dai-cells = <0>;
+				compatible = "fsl,imx8mq-sai";
+				reg = <0x30040000 0x10000>;
+				interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clk IMX8MQ_CLK_SAI5_IPG>,
+				         <&clk IMX8MQ_CLK_SAI5_ROOT>,
+				         <&clk IMX8MQ_CLK_DUMMY>, <&clk IMX8MQ_CLK_DUMMY>;
+				clock-names = "bus", "mclk1", "mclk2", "mclk3";
+				dmas = <&sdma2 2 24 0>, <&sdma2 3 24 0>;
+				dma-names = "rx", "tx";
+				status = "disabled";
+			};
+
+			sai4: sai@30050000 {
+				#sound-dai-cells = <0>;
+				compatible = "fsl,imx8mq-sai";
+				reg = <0x30050000 0x10000>;
+				interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clk IMX8MQ_CLK_SAI4_IPG>,
+				         <&clk IMX8MQ_CLK_SAI4_ROOT>,
+				         <&clk IMX8MQ_CLK_DUMMY>, <&clk IMX8MQ_CLK_DUMMY>;
+				clock-names = "bus", "mclk1", "mclk2", "mclk3";
+				dmas = <&sdma2 0 24 0>, <&sdma2 1 24 0>;
+				dma-names = "rx", "tx";
+				status = "disabled";
+			};
+
 			gpio1: gpio@30200000 {
 				compatible = "fsl,imx8mq-gpio", "fsl,imx35-gpio";
 				reg = <0x30200000 0x10000>;
@@ -448,6 +504,23 @@ sdma2: sdma@302c0000 {
 				fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
 			};
 
+			lcdif: lcd-controller@30320000 {
+				compatible = "fsl,imx8mq-lcdif", "fsl,imx28-lcdif";
+				reg = <0x30320000 0x10000>;
+				interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clk IMX8MQ_CLK_LCDIF_PIXEL>;
+				clock-names = "pix";
+				assigned-clocks = <&clk IMX8MQ_VIDEO_PLL1_REF_SEL>,
+						  <&clk IMX8MQ_VIDEO_PLL1_BYPASS>,
+						  <&clk IMX8MQ_CLK_LCDIF_PIXEL>,
+						  <&clk IMX8MQ_VIDEO_PLL1>;
+				assigned-clock-parents = <&clk IMX8MQ_CLK_25M>,
+						  <&clk IMX8MQ_VIDEO_PLL1>,
+						  <&clk IMX8MQ_VIDEO_PLL1_OUT>;
+				assigned-clock-rates = <0>, <0>, <0>, <594000000>;
+				status = "disabled";
+			};
+
 			iomuxc: iomuxc@30330000 {
 				compatible = "fsl,imx8mq-iomuxc";
 				reg = <0x30330000 0x10000>;
@@ -519,6 +592,8 @@ clk: clock-controller@30380000 {
 				clock-names = "ckil", "osc_25m", "osc_27m",
 				              "clk_ext1", "clk_ext2",
 				              "clk_ext3", "clk_ext4";
+				assigned-clocks = <&clk IMX8MQ_CLK_NOC>;
+				assigned-clock-rates = <800000000>;
 			};
 
 			src: reset-controller@30390000 {
@@ -617,7 +692,7 @@ pgc_pcie2: power-domain@a {
 		};
 
 		bus@30400000 { /* AIPS2 */
-			compatible = "fsl,imx8mq-aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x30400000 0x30400000 0x400000>;
@@ -676,7 +751,7 @@ system_counter: timer@306a0000 {
 		};
 
 		bus@30800000 { /* AIPS3 */
-			compatible = "fsl,imx8mq-aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x30800000 0x30800000 0x400000>,
@@ -765,6 +840,20 @@ sai2: sai@308b0000 {
 				status = "disabled";
 			};
 
+			sai3: sai@308c0000 {
+				#sound-dai-cells = <0>;
+				compatible = "fsl,imx8mq-sai";
+				reg = <0x308c0000 0x10000>;
+				interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clk IMX8MQ_CLK_SAI3_IPG>,
+				         <&clk IMX8MQ_CLK_SAI3_ROOT>,
+				         <&clk IMX8MQ_CLK_DUMMY>, <&clk IMX8MQ_CLK_DUMMY>;
+				clock-names = "bus", "mclk1", "mclk2", "mclk3";
+				dmas = <&sdma1 12 24 0>, <&sdma1 13 24 0>;
+				dma-names = "rx", "tx";
+				status = "disabled";
+			};
+
 			crypto: crypto@30900000 {
 				compatible = "fsl,sec-v4.0";
 				#address-cells = <1>;
@@ -934,7 +1023,7 @@ fec1: ethernet@30be0000 {
 		};
 
 		bus@32c00000 { /* AIPS4 */
-			compatible = "fsl,imx8mq-aips-bus", "simple-bus";
+			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x32c00000 0x32c00000 0x400000>;
@@ -1113,6 +1202,16 @@ gic: interrupt-controller@38800000 {
 			interrupt-parent = <&gic>;
 		};
 
+		ddrc: memory-controller@3d400000 {
+			compatible = "fsl,imx8mq-ddrc", "fsl,imx8m-ddrc";
+			reg = <0x3d400000 0x400000>;
+			clock-names = "core", "pll", "alt", "apb";
+			clocks = <&clk IMX8MQ_CLK_DRAM_CORE>,
+				 <&clk IMX8MQ_DRAM_PLL_OUT>,
+				 <&clk IMX8MQ_CLK_DRAM_ALT>,
+				 <&clk IMX8MQ_CLK_DRAM_APB>;
+		};
+
 		ddr-pmu@3d800000 {
 			compatible = "fsl,imx8mq-ddr-pmu", "fsl,imx8m-ddr-pmu";
 			reg = <0x3d800000 0x400000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
index 9646a41..fb5f752 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
@@ -250,7 +250,6 @@ adma_lpuart0: serial@5a060000 {
 			compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
 			reg = <0x5a060000 0x1000>;
 			interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-parent = <&gic>;
 			clocks = <&adma_lpcg IMX_ADMA_LPCG_UART0_IPG_CLK>,
 				 <&adma_lpcg IMX_ADMA_LPCG_UART0_BAUD_CLK>;
 			clock-names = "ipg", "baud";
@@ -262,7 +261,6 @@ adma_lpuart1: serial@5a070000 {
 			compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
 			reg = <0x5a070000 0x1000>;
 			interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-parent = <&gic>;
 			clocks = <&adma_lpcg IMX_ADMA_LPCG_UART1_IPG_CLK>,
 				 <&adma_lpcg IMX_ADMA_LPCG_UART1_BAUD_CLK>;
 			clock-names = "ipg", "baud";
@@ -274,7 +272,6 @@ adma_lpuart2: serial@5a080000 {
 			compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
 			reg = <0x5a080000 0x1000>;
 			interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-parent = <&gic>;
 			clocks = <&adma_lpcg IMX_ADMA_LPCG_UART2_IPG_CLK>,
 				 <&adma_lpcg IMX_ADMA_LPCG_UART2_BAUD_CLK>;
 			clock-names = "ipg", "baud";
@@ -286,7 +283,6 @@ adma_lpuart3: serial@5a090000 {
 			compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
 			reg = <0x5a090000 0x1000>;
 			interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-parent = <&gic>;
 			clocks = <&adma_lpcg IMX_ADMA_LPCG_UART3_IPG_CLK>,
 				 <&adma_lpcg IMX_ADMA_LPCG_UART3_BAUD_CLK>;
 			clock-names = "ipg", "baud";
@@ -298,7 +294,6 @@ adma_i2c0: i2c@5a800000 {
 			compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
 			reg = <0x5a800000 0x4000>;
 			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-parent = <&gic>;
 			clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C0_CLK>;
 			clock-names = "per";
 			assigned-clocks = <&clk IMX_ADMA_I2C0_CLK>;
@@ -311,7 +306,6 @@ adma_i2c1: i2c@5a810000 {
 			compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
 			reg = <0x5a810000 0x4000>;
 			interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-parent = <&gic>;
 			clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C1_CLK>;
 			clock-names = "per";
 			assigned-clocks = <&clk IMX_ADMA_I2C1_CLK>;
@@ -324,7 +318,6 @@ adma_i2c2: i2c@5a820000 {
 			compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
 			reg = <0x5a820000 0x4000>;
 			interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-parent = <&gic>;
 			clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C2_CLK>;
 			clock-names = "per";
 			assigned-clocks = <&clk IMX_ADMA_I2C2_CLK>;
@@ -337,7 +330,6 @@ adma_i2c3: i2c@5a830000 {
 			compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
 			reg = <0x5a830000 0x4000>;
 			interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-parent = <&gic>;
 			clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C3_CLK>;
 			clock-names = "per";
 			assigned-clocks = <&clk IMX_ADMA_I2C3_CLK>;
@@ -361,7 +353,6 @@ conn_lpcg: clock-controller@5b200000 {
 
 		usdhc1: mmc@5b010000 {
 			compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
-			interrupt-parent = <&gic>;
 			interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
 			reg = <0x5b010000 0x10000>;
 			clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC0_IPG_CLK>,
@@ -374,7 +365,6 @@ usdhc1: mmc@5b010000 {
 
 		usdhc2: mmc@5b020000 {
 			compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
-			interrupt-parent = <&gic>;
 			interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
 			reg = <0x5b020000 0x10000>;
 			clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC1_IPG_CLK>,
@@ -389,7 +379,6 @@ usdhc2: mmc@5b020000 {
 
 		usdhc3: mmc@5b030000 {
 			compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
-			interrupt-parent = <&gic>;
 			interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
 			reg = <0x5b030000 0x10000>;
 			clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC2_IPG_CLK>,
@@ -446,7 +435,6 @@ ddr_subsyss: bus@5c000000 {
 		ddr-pmu@5c020000 {
 			compatible = "fsl,imx8-ddr-pmu";
 			reg = <0x5c020000 0x10000>;
-			interrupt-parent = <&gic>;
 			interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
 		};
 	};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
index c563d3e..7d370da 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
@@ -170,6 +170,7 @@ &i2c2 {
 };
 
 &ir {
+	linux,rc-map-name = "rc-hisi-poplar";
 	status = "okay";
 };
 
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
index 13821a0..12bc1d3 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
@@ -564,7 +564,7 @@ pcie: pcie@9860000 {
 			#address-cells = <3>;
 			#size-cells = <2>;
 			device_type = "pci";
-			bus-range = <0 15>;
+			bus-range = <0x00 0xff>;
 			num-lanes = <1>;
 			ranges = <0x81000000 0x0 0x00000000 0x4f00000 0x0 0x100000
 				  0x82000000 0x0 0x3000000 0x3000000 0x0 0x01f00000>;
diff --git a/arch/arm64/boot/dts/intel/Makefile b/arch/arm64/boot/dts/intel/Makefile
index 1253af3..40cb16e 100644
--- a/arch/arm64/boot/dts/intel/Makefile
+++ b/arch/arm64/boot/dts/intel/Makefile
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
-dtb-$(CONFIG_ARCH_AGILEX) += socfpga_agilex_socdk.dtb
+dtb-$(CONFIG_ARCH_AGILEX) += socfpga_agilex_socdk.dtb \
+			     socfpga_agilex_socdk_nand.dtb
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index d43e129..e1d357e 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -113,6 +113,7 @@ gmac0: ethernet@ff800000 {
 			rx-fifo-depth = <16384>;
 			snps,multicast-filter-bins = <256>;
 			iommus = <&smmu 1>;
+			altr,sysmgr-syscon = <&sysmgr 0x44 0>;
 			status = "disabled";
 		};
 
@@ -128,6 +129,7 @@ gmac1: ethernet@ff802000 {
 			rx-fifo-depth = <16384>;
 			snps,multicast-filter-bins = <256>;
 			iommus = <&smmu 2>;
+			altr,sysmgr-syscon = <&sysmgr 0x48 8>;
 			status = "disabled";
 		};
 
@@ -143,6 +145,7 @@ gmac2: ethernet@ff804000 {
 			rx-fifo-depth = <16384>;
 			snps,multicast-filter-bins = <256>;
 			iommus = <&smmu 3>;
+			altr,sysmgr-syscon = <&sysmgr 0x4c 16>;
 			status = "disabled";
 		};
 
@@ -249,6 +252,18 @@ mmc: dwmmc0@ff808000 {
 			status = "disabled";
 		};
 
+		nand: nand@ffb90000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "altr,socfpga-denali-nand";
+			reg = <0xffb90000 0x10000>,
+			      <0xffb80000 0x1000>;
+			reg-names = "nand_data", "denali_reg";
+			interrupts = <0 97 4>;
+			resets = <&rst NAND_RESET>, <&rst NAND_OCP_RESET>;
+			status = "disabled";
+		};
+
 		ocram: sram@ffe00000 {
 			compatible = "mmio-sram";
 			reg = <0xffe00000 0x40000>;
@@ -325,7 +340,7 @@ spi1: spi@ffda5000 {
 		};
 
 		sysmgr: sysmgr@ffd12000 {
-			compatible = "altr,sys-mgr", "syscon";
+			compatible = "altr,sys-mgr-s10","altr,sys-mgr";
 			reg = <0xffd12000 0x500>;
 		};
 
@@ -449,6 +464,65 @@ sdr: sdr@f8011100 {
 			reg = <0xf8011100 0xc0>;
 		};
 
+		eccmgr {
+			compatible = "altr,socfpga-s10-ecc-manager",
+				     "altr,socfpga-a10-ecc-manager";
+			altr,sysmgr-syscon = <&sysmgr>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupts = <0 15 4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			ranges;
+
+			sdramedac {
+				compatible = "altr,sdram-edac-s10";
+				altr,sdr-syscon = <&sdr>;
+				interrupts = <16 4>;
+			};
+
+			ocram-ecc@ff8cc000 {
+				compatible = "altr,socfpga-s10-ocram-ecc",
+					     "altr,socfpga-a10-ocram-ecc";
+				reg = <0xff8cc000 0x100>;
+				altr,ecc-parent = <&ocram>;
+				interrupts = <1 4>;
+			};
+
+			usb0-ecc@ff8c4000 {
+				compatible = "altr,socfpga-s10-usb-ecc",
+					     "altr,socfpga-usb-ecc";
+				reg = <0xff8c4000 0x100>;
+				altr,ecc-parent = <&usb0>;
+				interrupts = <2 4>;
+			};
+
+			emac0-rx-ecc@ff8c0000 {
+				compatible = "altr,socfpga-s10-eth-mac-ecc",
+					     "altr,socfpga-eth-mac-ecc";
+				reg = <0xff8c0000 0x100>;
+				altr,ecc-parent = <&gmac0>;
+				interrupts = <4 4>;
+			};
+
+			emac0-tx-ecc@ff8c0400 {
+				compatible = "altr,socfpga-s10-eth-mac-ecc",
+					     "altr,socfpga-eth-mac-ecc";
+				reg = <0xff8c0400 0x100>;
+				altr,ecc-parent = <&gmac0>;
+				interrupts = <5 4>;
+			};
+
+			sdmmca-ecc@ff8c8c00 {
+				compatible = "altr,socfpga-s10-sdmmc-ecc",
+					     "altr,socfpga-sdmmc-ecc";
+				reg = <0xff8c8c00 0x100>;
+				altr,ecc-parent = <&mmc>;
+				interrupts = <14 4>,
+					     <15 4>;
+			};
+		};
+
 		qspi: spi@ff8d2000 {
 			compatible = "cdns,qspi-nor";
 			#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts
new file mode 100644
index 0000000..979aa59
--- /dev/null
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier:     GPL-2.0
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+#include "socfpga_agilex.dtsi"
+
+/ {
+	model = "SoCFPGA Agilex SoCDK";
+
+	aliases {
+		serial0 = &uart0;
+		ethernet0 = &gmac0;
+		ethernet1 = &gmac1;
+		ethernet2 = &gmac2;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		hps0 {
+			label = "hps_led0";
+			gpios = <&portb 20 GPIO_ACTIVE_HIGH>;
+		};
+
+		hps1 {
+			label = "hps_led1";
+			gpios = <&portb 19 GPIO_ACTIVE_HIGH>;
+		};
+
+		hps2 {
+			label = "hps_led2";
+			gpios = <&portb 21 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		/* We expect the bootloader to fill in the reg */
+		reg = <0 0 0 0>;
+	};
+
+	soc {
+		clocks {
+			osc1 {
+				clock-frequency = <25000000>;
+			};
+		};
+	};
+};
+
+&gpio1 {
+	status = "okay";
+};
+
+&gmac2 {
+	status = "okay";
+	phy-mode = "rgmii";
+	phy-handle = <&phy0>;
+
+	max-frame-size = <9000>;
+
+	mdio0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "snps,dwmac-mdio";
+		phy0: ethernet-phy@0 {
+			reg = <4>;
+
+			txd0-skew-ps = <0>; /* -420ps */
+			txd1-skew-ps = <0>; /* -420ps */
+			txd2-skew-ps = <0>; /* -420ps */
+			txd3-skew-ps = <0>; /* -420ps */
+			rxd0-skew-ps = <420>; /* 0ps */
+			rxd1-skew-ps = <420>; /* 0ps */
+			rxd2-skew-ps = <420>; /* 0ps */
+			rxd3-skew-ps = <420>; /* 0ps */
+			txen-skew-ps = <0>; /* -420ps */
+			txc-skew-ps = <900>; /* 0ps */
+			rxdv-skew-ps = <420>; /* 0ps */
+			rxc-skew-ps = <1680>; /* 780ps */
+		};
+	};
+};
+
+&nand {
+	status = "okay";
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0>;
+		nand-bus-width = <16>;
+
+		partition@0 {
+			label = "u-boot";
+			reg = <0 0x200000>;
+		};
+		partition@200000 {
+			label = "env";
+			reg = <0x200000 0x40000>;
+		};
+		partition@240000 {
+			label = "dtb";
+			reg = <0x240000 0x40000>;
+		};
+		partition@280000 {
+			label = "kernel";
+			reg = <0x280000 0x2000000>;
+		};
+		partition@2280000 {
+			label = "misc";
+			reg = <0x2280000 0x2000000>;
+		};
+		partition@4280000 {
+			label = "rootfs";
+			reg = <0x4280000 0x3bd80000>;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+	disable-over-current;
+};
+
+&watchdog0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
index bd4aab6..7eb6c17 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
@@ -69,6 +69,7 @@ sfp_eth0: sfp-eth0 {
 		mod-def0-gpio = <&gpiosb 3 GPIO_ACTIVE_LOW>;
 		tx-disable-gpio = <&gpiosb 4 GPIO_ACTIVE_HIGH>;
 		tx-fault-gpio = <&gpiosb 5 GPIO_ACTIVE_HIGH>;
+		maximum-power-milliwatt = <3000>;
 	};
 
 	sfp_eth1: sfp-eth1 {
@@ -78,6 +79,7 @@ sfp_eth1: sfp-eth1 {
 		mod-def0-gpio = <&gpiosb 8 GPIO_ACTIVE_LOW>;
 		tx-disable-gpio = <&gpiosb 9 GPIO_ACTIVE_HIGH>;
 		tx-fault-gpio = <&gpiosb 10 GPIO_ACTIVE_HIGH>;
+		maximum-power-milliwatt = <3000>;
 	};
 };
 
@@ -119,12 +121,14 @@ &i2c0 {
 	status = "okay";
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c1_pins>;
+	/delete-property/mrvl,i2c-fast-mode;
 };
 
 &i2c1 {
 	status = "okay";
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c2_pins>;
+	/delete-property/mrvl,i2c-fast-mode;
 
 	lm75@48 {
 		status = "okay";
@@ -143,6 +147,7 @@ &eth0 {
 	phy-mode = "sgmii";
 	status = "okay";
 	managed = "in-band-status";
+	phys = <&comphy1 0>;
 	sfp = <&sfp_eth0>;
 };
 
@@ -150,11 +155,14 @@ &eth1 {
 	phy-mode = "sgmii";
 	status = "okay";
 	managed = "in-band-status";
+	phys = <&comphy0 1>;
 	sfp = <&sfp_eth1>;
 };
 
 &usb3 {
 	status = "okay";
+	phys = <&usb2_utmi_otg_phy>;
+	phy-names = "usb2-utmi-otg-phy";
 };
 
 &uart0 {
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index bd88149..a211a04 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -408,6 +408,8 @@ port@5 {
 				reg = <5>;
 				label = "cpu";
 				ethernet = <&cp1_eth2>;
+				phy-mode = "2500base-x";
+				managed = "in-band-status";
 			};
 		};
 
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 15f1842..8b4e806 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -157,6 +157,7 @@ cpu0: cpu@0 {
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0>;
 			#cooling-cells = <2>;
+			dynamic-power-coefficient = <263>;
 			clocks = <&infracfg CLK_INFRA_CA53SEL>,
 				 <&apmixedsys CLK_APMIXED_MAINPLL>;
 			clock-names = "cpu", "intermediate";
@@ -170,6 +171,7 @@ cpu1: cpu@1 {
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0>;
 			#cooling-cells = <2>;
+			dynamic-power-coefficient = <263>;
 			clocks = <&infracfg CLK_INFRA_CA53SEL>,
 				 <&apmixedsys CLK_APMIXED_MAINPLL>;
 			clock-names = "cpu", "intermediate";
@@ -183,6 +185,7 @@ cpu2: cpu@100 {
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0>;
 			#cooling-cells = <2>;
+			dynamic-power-coefficient = <530>;
 			clocks = <&infracfg CLK_INFRA_CA72SEL>,
 				 <&apmixedsys CLK_APMIXED_MAINPLL>;
 			clock-names = "cpu", "intermediate";
@@ -196,6 +199,7 @@ cpu3: cpu@101 {
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0>;
 			#cooling-cells = <2>;
+			dynamic-power-coefficient = <530>;
 			clocks = <&infracfg CLK_INFRA_CA72SEL>,
 				 <&apmixedsys CLK_APMIXED_MAINPLL>;
 			clock-names = "cpu", "intermediate";
@@ -1401,6 +1405,20 @@ vcodec_enc: vcodec@18002000 {
 						 <&topckgen CLK_TOP_UNIVPLL1_D2>;
 		};
 
+		jpegdec: jpegdec@18004000 {
+			compatible = "mediatek,mt8173-jpgdec";
+			reg = <0 0x18004000 0 0x1000>;
+			interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&vencsys CLK_VENC_CKE0>,
+				 <&vencsys CLK_VENC_CKE3>;
+			clock-names = "jpgdec-smi",
+				      "jpgdec";
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_VENC>;
+			mediatek,larb = <&larb3>;
+			iommus = <&iommu M4U_PORT_JPGDEC_WDMA>,
+				 <&iommu M4U_PORT_JPGDEC_BSDMA>;
+		};
+
 		vencltsys: clock-controller@19000000 {
 			compatible = "mediatek,mt8173-vencltsys", "syscon";
 			reg = <0 0x19000000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 10b3247..124f9d3 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -8,6 +8,7 @@
 #include <dt-bindings/clock/mt8183-clk.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/reset-controller/mt8183-resets.h>
 #include "mt8183-pinfunc.h"
 
 / {
@@ -227,6 +228,7 @@ infracfg: syscon@10001000 {
 			compatible = "mediatek,mt8183-infracfg", "syscon";
 			reg = <0 0x10001000 0 0x1000>;
 			#clock-cells = <1>;
+			#reset-cells = <1>;
 		};
 
 		pio: pinctrl@10005000 {
@@ -278,6 +280,15 @@ systimer: timer@10017000 {
 			clock-names = "clk13m";
 		};
 
+		gce: mailbox@10238000 {
+			compatible = "mediatek,mt8183-gce";
+			reg = <0 0x10238000 0 0x4000>;
+			interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_LOW>;
+			#mbox-cells = <3>;
+			clocks = <&infracfg CLK_INFRA_GCE>;
+			clock-names = "gce";
+		};
+
 		auxadc: auxadc@11001000 {
 			compatible = "mediatek,mt8183-auxadc",
 				     "mediatek,mt8173-auxadc";
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index 631a7f7..6238e6e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -604,9 +604,11 @@ mc: memory-controller@70019000 {
 		#iommu-cells = <1>;
 	};
 
-	emc: emc@7001b000 {
+	emc: external-memory-controller@7001b000 {
 		compatible = "nvidia,tegra132-emc", "nvidia,tegra124-emc";
 		reg = <0x0 0x7001b000 0x0 0x1000>;
+		clocks = <&tegra_car TEGRA124_CLK_EMC>;
+		clock-names = "emc";
 
 		nvidia,memory-controller = <&mc>;
 	};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index 5e18acf..947744d 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -8,6 +8,7 @@ / {
 	compatible = "nvidia,p3310", "nvidia,tegra186";
 
 	aliases {
+		ethernet0 = "/ethernet@2490000";
 		sdhci0 = "/sdhci@3460000";
 		sdhci1 = "/sdhci@3400000";
 		serial0 = &uarta;
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 7893d78..c905527 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -142,7 +142,29 @@ agic: interrupt-controller@2a40000 {
 	memory-controller@2c00000 {
 		compatible = "nvidia,tegra186-mc";
 		reg = <0x0 0x02c00000 0x0 0xb0000>;
+		interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
 		status = "disabled";
+
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		ranges = <0x0 0x02c00000 0x0 0x02c00000 0x0 0xb0000>;
+
+		/*
+		 * Memory clients have access to all 40 bits that the memory
+		 * controller can address.
+		 */
+		dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
+
+		emc: external-memory-controller@2c60000 {
+			compatible = "nvidia,tegra186-emc";
+			reg = <0x0 0x02c60000 0x0 0x50000>;
+			interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&bpmp TEGRA186_CLK_EMC>;
+			clock-names = "emc";
+
+			nvidia,bpmp = <&bpmp>;
+		};
 	};
 
 	uarta: serial@3100000 {
@@ -524,12 +546,9 @@ usb@3530000 {
 		reg = <0x0 0x03530000 0x0 0x8000>,
 		      <0x0 0x03538000 0x0 0x1000>;
 		reg-names = "hcd", "fpci";
-
-		iommus = <&smmu TEGRA186_SID_XUSB_HOST>;
 		interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
-
 		clocks = <&bpmp TEGRA186_CLK_XUSB_HOST>,
 			 <&bpmp TEGRA186_CLK_XUSB_FALCON>,
 			 <&bpmp TEGRA186_CLK_XUSB_SS>,
@@ -542,16 +561,15 @@ usb@3530000 {
 		clock-names = "xusb_host", "xusb_falcon_src", "xusb_ss",
 			      "xusb_ss_src", "xusb_hs_src", "xusb_fs_src",
 			      "pll_u_480m", "clk_m", "pll_e";
-
 		power-domains = <&bpmp TEGRA186_POWER_DOMAIN_XUSBC>,
 				<&bpmp TEGRA186_POWER_DOMAIN_XUSBA>;
 		power-domain-names = "xusb_host", "xusb_ss";
-		nvidia,xusb-padctl = <&padctl>;
-
-		status = "disabled";
-
+		iommus = <&smmu TEGRA186_SID_XUSB_HOST>;
 		#address-cells = <1>;
 		#size-cells = <0>;
+		status = "disabled";
+
+		nvidia,xusb-padctl = <&padctl>;
 	};
 
 	fuse@3820000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index c7f2a20..bdd33ff 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -48,6 +48,10 @@ phy: phy@0 {
 			};
 		};
 
+		memory-controller@2c00000 {
+			status = "okay";
+		};
+
 		serial@3110000 {
 			status = "okay";
 		};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index 353a6a2..985e7d8 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -152,7 +152,7 @@ force-recovery {
 			gpios = <&gpio TEGRA194_MAIN_GPIO(G, 0)
 				       GPIO_ACTIVE_LOW>;
 			linux,input-type = <EV_KEY>;
-			linux,code = <BTN_1>;
+			linux,code = <KEY_SLEEP>;
 			debounce-interval = <10>;
 		};
 
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 11220d9..ccac43b 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -7,6 +7,7 @@
 #include <dt-bindings/power/tegra194-powergate.h>
 #include <dt-bindings/reset/tegra194-reset.h>
 #include <dt-bindings/thermal/tegra194-bpmp-thermal.h>
+#include <dt-bindings/memory/tegra194-mc.h>
 
 / {
 	compatible = "nvidia,tegra194";
@@ -21,6 +22,12 @@ cbb@0 {
 		#size-cells = <1>;
 		ranges = <0x0 0x0 0x0 0x40000000>;
 
+		misc@100000 {
+			compatible = "nvidia,tegra194-misc";
+			reg = <0x00100000 0xf000>,
+			      <0x0010f000 0x1000>;
+		};
+
 		gpio: gpio@2200000 {
 			compatible = "nvidia,tegra194-gpio";
 			reg-names = "security", "gpio";
@@ -164,6 +171,48 @@ clkreq {
 			};
 		};
 
+		mc: memory-controller@2c00000 {
+			compatible = "nvidia,tegra194-mc";
+			reg = <0x02c00000 0x100000>,
+			      <0x02b80000 0x040000>,
+			      <0x01700000 0x100000>;
+			status = "disabled";
+
+			#address-cells = <2>;
+			#size-cells = <2>;
+
+			ranges = <0x01700000 0x0 0x01700000 0x0 0x100000>,
+				 <0x02b80000 0x0 0x02b80000 0x0 0x040000>,
+				 <0x02c00000 0x0 0x02c00000 0x0 0x100000>;
+
+			/*
+			 * Bit 39 of addresses passing through the memory
+			 * controller selects the XBAR format used when memory
+			 * is accessed. This is used to transparently access
+			 * memory in the XBAR format used by the discrete GPU
+			 * (bit 39 set) or Tegra (bit 39 clear).
+			 *
+			 * As a consequence, the operating system must ensure
+			 * that bit 39 is never used implicitly, for example
+			 * via an I/O virtual address mapping of an IOMMU. If
+			 * devices require access to the XBAR switch, their
+			 * drivers must set this bit explicitly.
+			 *
+			 * Limit the DMA range for memory clients to [38:0].
+			 */
+			dma-ranges = <0x0 0x0 0x0 0x80 0x0>;
+
+			emc: external-memory-controller@2c60000 {
+				compatible = "nvidia,tegra194-emc";
+				reg = <0x0 0x02c60000 0x0 0x90000>,
+				      <0x0 0x01780000 0x0 0x80000>;
+				clocks = <&bpmp TEGRA194_CLK_EMC>;
+				clock-names = "emc";
+
+				nvidia,bpmp = <&bpmp>;
+			};
+		};
+
 		uarta: serial@3100000 {
 			compatible = "nvidia,tegra194-uart", "nvidia,tegra20-uart";
 			reg = <0x03100000 0x40>;
@@ -488,6 +537,13 @@ hda@3510000 {
 			status = "disabled";
 		};
 
+		fuse@3820000 {
+			compatible = "nvidia,tegra194-efuse";
+			reg = <0x03820000 0x10000>;
+			clocks = <&bpmp TEGRA194_CLK_FUSE>;
+			clock-names = "fuse";
+		};
+
 		gic: interrupt-controller@3881000 {
 			compatible = "arm,gic-400";
 			#interrupt-cells = <3>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
index 90381d5..9101d3a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
@@ -536,6 +536,19 @@ sdhci@700b0000 {
 		vmmc-supply = <&vdd_3v3_sd>;
 	};
 
+	sdhci@700b0400 {
+		status = "okay";
+		bus-width = <4>;
+
+		vqmmc-supply = <&vdd_1v8>;
+		vmmc-supply = <&vdd_3v3_sys>;
+
+		non-removable;
+		cap-sdio-irq;
+		keep-power-in-suspend;
+		wakeup-source;
+	};
+
 	clocks {
 		compatible = "simple-bus";
 		#address-cells = <1>;
@@ -573,6 +586,66 @@ cpu-sleep {
 		};
 	};
 
+	fan: fan {
+		compatible = "pwm-fan";
+		pwms = <&pwm 3 45334>;
+
+		cooling-levels = <0 64 128 255>;
+		#cooling-cells = <2>;
+	};
+
+	thermal-zones {
+		cpu {
+			trips {
+				cpu_trip_critical: critical {
+					temperature = <96500>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+
+				cpu_trip_hot: hot {
+					temperature = <70000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+
+				cpu_trip_active: active {
+					temperature = <50000>;
+					hysteresis = <2000>;
+					type = "active";
+				};
+
+				cpu_trip_passive: passive {
+					temperature = <30000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+			};
+
+			cooling-maps {
+				cpu-critical {
+					cooling-device = <&fan 3 3>;
+					trip = <&cpu_trip_critical>;
+				};
+
+				cpu-hot {
+					cooling-device = <&fan 2 2>;
+					trip = <&cpu_trip_hot>;
+				};
+
+				cpu-active {
+					cooling-device = <&fan 1 1>;
+					trip = <&cpu_trip_active>;
+				};
+
+				cpu-passive {
+					cooling-device = <&fan 0 0>;
+					trip = <&cpu_trip_passive>;
+				};
+			};
+		};
+	};
+
 	gpio-keys {
 		compatible = "gpio-keys";
 
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 6498a1e..973c0f0 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
+dtb-$(CONFIG_ARCH_QCOM) += apq8096-ifc6640.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk01.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-mtp.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8150.dtb
@@ -13,6 +14,7 @@
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-hp-envy-x2.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-lenovo-miix-630.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-idp.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r1.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r2.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r3.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
index ec2f0de..aff218c 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
@@ -8,6 +8,15 @@ usb_hub_reset_pm: usb_hub_reset_pm {
 		pinconf {
 			pins = "gpio3";
 			function = PMIC_GPIO_FUNC_NORMAL;
+			input-disable;
+			output-high;
+		};
+	};
+
+	usb_hub_reset_pm_device: usb_hub_reset_pm_device {
+		pinconf {
+			pins = "gpio3";
+			function = PMIC_GPIO_FUNC_NORMAL;
 			output-low;
 		};
 	};
@@ -22,6 +31,16 @@ pinconf {
 		};
 	};
 
+	usb_sw_sel_pm_device: usb_sw_sel_pm_device {
+		pinconf {
+			pins = "gpio4";
+			function = PMIC_GPIO_FUNC_NORMAL;
+			power-source = <PM8916_GPIO_VPH>;
+			input-disable;
+			output-low;
+		};
+	};
+
 	pm8916_gpios_leds: pm8916_gpios_leds {
 		pinconf {
 			pins = "gpio1", "gpio2";
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index e12a36c..037e26b 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -358,14 +358,15 @@ sdhci@7864000 {
 		};
 
 		usb@78d9000 {
-			extcon = <&usb_id>;
+			extcon = <&usb_id>, <&usb_id>;
 			status = "okay";
 			adp-disable;
 			hnp-disable;
 			srp-disable;
-			dr_mode = "host";
-			pinctrl-names = "default";
-			pinctrl-0 = <&usb_sw_sel_pm>;
+			dr_mode = "otg";
+			pinctrl-names = "default", "device";
+			pinctrl-0 = <&usb_sw_sel_pm &usb_hub_reset_pm>;
+			pinctrl-1 = <&usb_sw_sel_pm_device &usb_hub_reset_pm_device>;
 			ulpi {
 				phy {
 					v1p8-supply = <&pm8916_l7>;
@@ -504,7 +505,7 @@ usb2513 {
 
 	usb_id: usb-id {
 		compatible = "linux,extcon-usb-gpio";
-		vbus-gpio = <&msmgpio 121 GPIO_ACTIVE_HIGH>;
+		id-gpio = <&msmgpio 121 GPIO_ACTIVE_HIGH>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&usb_id_default>;
 	};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi
deleted file mode 100644
index a5cc80d..0000000
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- */
-&msmgpio {
-	sdc2_cd_on: sdc2_cd_on {
-		mux {
-			pins = "gpio38";
-			function = "gpio";
-		};
-
-		config {
-			pins = "gpio38";
-			bias-pull-up;		/* pull up */
-			drive-strength = <16>;	/* 16 MA */
-		};
-	};
-
-	sdc2_cd_off: sdc2_cd_off {
-		mux {
-			pins = "gpio38";
-			function = "gpio";
-		};
-
-		config {
-			pins = "gpio38";
-			bias-pull-up;		/* pull up */
-			drive-strength = <2>;	/* 2 MA */
-		};
-	};
-
-	blsp1_uart1_default: blsp1_uart1_default {
-		mux {
-			pins = "gpio41", "gpio42", "gpio43", "gpio44";
-			function = "blsp_uart2";
-		};
-
-		config {
-			pins = "gpio41", "gpio42", "gpio43", "gpio44";
-			drive-strength = <16>;
-			bias-disable;
-		};
-	};
-
-	blsp1_uart1_sleep: blsp1_uart1_sleep {
-		mux {
-			pins = "gpio41", "gpio42", "gpio43", "gpio44";
-			function = "gpio";
-		};
-
-		config {
-			pins = "gpio41", "gpio42", "gpio43", "gpio44";
-			drive-strength = <2>;
-			bias-disable;
-		};
-	};
-
-	hdmi_hpd_active: hdmi_hpd_active {
-		mux {
-			pins = "gpio34";
-			function = "hdmi_hot";
-		};
-
-		config {
-			pins = "gpio34";
-			bias-pull-down;
-			drive-strength = <16>;
-		};
-	};
-
-	hdmi_hpd_suspend: hdmi_hpd_suspend {
-		mux {
-			pins = "gpio34";
-			function = "hdmi_hot";
-		};
-
-		config {
-			pins = "gpio34";
-			bias-pull-down;
-			drive-strength = <2>;
-		};
-	};
-
-	hdmi_ddc_active: hdmi_ddc_active {
-		mux {
-			pins = "gpio32", "gpio33";
-			function = "hdmi_ddc";
-		};
-
-		config {
-			pins = "gpio32", "gpio33";
-			drive-strength = <2>;
-			bias-pull-up;
-		};
-	};
-
-	hdmi_ddc_suspend: hdmi_ddc_suspend {
-		mux {
-			pins = "gpio32", "gpio33";
-			function = "hdmi_ddc";
-		};
-
-		config {
-			pins = "gpio32", "gpio33";
-			drive-strength = <2>;
-			bias-pull-down;
-		};
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
deleted file mode 100644
index 31a3e33..0000000
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
-&pm8994_gpios {
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>;
-
-	ls_exp_gpio_f: pm8994_gpio5 {
-		pinconf {
-			pins = "gpio5";
-			output-low;
-			power-source = <2>; // PM8994_GPIO_S4, 1.8V
-		};
-	};
-
-	bt_en_gpios: bt_en_gpios {
-		pinconf {
-			pins = "gpio19";
-			function = PMIC_GPIO_FUNC_NORMAL;
-			output-low;
-			power-source = <PM8994_GPIO_S4>; // 1.8V
-			qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
-			bias-pull-down;
-		};
-	};
-
-	wlan_en_gpios: wlan_en_gpios {
-		pinconf {
-			pins = "gpio8";
-			function = PMIC_GPIO_FUNC_NORMAL;
-			output-low;
-			power-source = <PM8994_GPIO_S4>; // 1.8V
-			qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
-			bias-pull-down;
-		};
-	};
-
-	audio_mclk: clk_div1 {
-		pinconf {
-			pins = "gpio15";
-			function = "func1";
-			power-source = <PM8994_GPIO_S4>; // 1.8V
-		};
-	};
-
-	volume_up_gpio: pm8996_gpio2 {
-		pinconf {
-			pins = "gpio2";
-			function = "normal";
-			input-enable;
-			drive-push-pull;
-			bias-pull-up;
-			qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
-			power-source = <PM8994_GPIO_S4>; // 1.8V
-		};
-	};
-
-	divclk4_pin_a: divclk4 {
-		pinconf {
-			pins = "gpio18";
-			function = PMIC_GPIO_FUNC_FUNC2;
-
-			bias-disable;
-			power-source = <PM8994_GPIO_S4>;
-		};
-	};
-
-	usb3_vbus_det_gpio: pm8996_gpio22 {
-		pinconf {
-			pins = "gpio22";
-			function = PMIC_GPIO_FUNC_NORMAL;
-			input-enable;
-			bias-pull-down;
-			qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
-			power-source = <PM8994_GPIO_S4>; // 1.8V
-		};
-	};
-};
-
-&pmi8994_gpios {
-	usb2_vbus_det_gpio: pmi8996_gpio6 {
-		pinconf {
-			pins = "gpio6";
-			function = PMIC_GPIO_FUNC_NORMAL;
-			input-enable;
-			bias-pull-down;
-			qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
-			power-source = <PM8994_GPIO_S4>; // 1.8V
-		};
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index dba3488..fff6115 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -6,10 +6,9 @@
 #include "msm8996.dtsi"
 #include "pm8994.dtsi"
 #include "pmi8994.dtsi"
-#include "apq8096-db820c-pins.dtsi"
-#include "apq8096-db820c-pmic-pins.dtsi"
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
 #include <dt-bindings/sound/qcom,q6afe.h>
 #include <dt-bindings/sound/qcom,q6asm.h>
 
@@ -78,424 +77,6 @@ div1_mclk: divclk1 {
 		};
 	};
 
-	soc {
-		serial@7570000 {
-			label = "BT-UART";
-			status = "okay";
-			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&blsp1_uart1_default>;
-			pinctrl-1 = <&blsp1_uart1_sleep>;
-
-			bluetooth {
-				compatible = "qcom,qca6174-bt";
-
-				/* bt_disable_n gpio */
-				enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
-
-				clocks = <&divclk4>;
-			};
-		};
-
-		serial@75b0000 {
-			label = "LS-UART1";
-			status = "okay";
-			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&blsp2_uart1_2pins_default>;
-			pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
-		};
-
-		serial@75b1000 {
-			label = "LS-UART0";
-			status = "disabled";
-			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&blsp2_uart2_4pins_default>;
-			pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
-		};
-
-		i2c@7577000 {
-		/* On Low speed expansion */
-			label = "LS-I2C0";
-			status = "okay";
-		};
-
-		i2c@75b6000 {
-		/* On Low speed expansion */
-			label = "LS-I2C1";
-			status = "okay";
-		};
-
-		spi@7575000 {
-		/* On Low speed expansion */
-			label = "LS-SPI0";
-			status = "okay";
-		};
-
-		i2c@75b5000 {
-		/* On High speed expansion */
-			label = "HS-I2C2";
-			status = "okay";
-		};
-
-		spi@75ba000{
-		/* On High speed expansion */
-			label = "HS-SPI1";
-			status = "okay";
-		};
-
-		sdhci@74a4900 {
-		/* External SD card */
-			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
-			pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
-			cd-gpios = <&msmgpio 38 0x1>;
-			vmmc-supply = <&pm8994_l21>;
-			vqmmc-supply = <&pm8994_l13>;
-			status = "okay";
-		};
-
-		phy@627000 {
-			status = "okay";
-		};
-
-		ufshc@624000 {
-			status = "okay";
-		};
-
-		pinctrl@1010000 {
-			gpio-line-names =
-				"[SPI0_DOUT]", /* GPIO_0, BLSP1_SPI_MOSI, LSEC pin 14 */
-				"[SPI0_DIN]", /* GPIO_1, BLSP1_SPI_MISO, LSEC pin 10 */
-				"[SPI0_CS]", /* GPIO_2, BLSP1_SPI_CS_N, LSEC pin 12 */
-				"[SPI0_SCLK]", /* GPIO_3, BLSP1_SPI_CLK, LSEC pin 8 */
-				"[UART1_TxD]", /* GPIO_4, BLSP8_UART_TX, LSEC pin 11 */
-				"[UART1_RxD]", /* GPIO_5, BLSP8_UART_RX, LSEC pin 13 */
-				"[I2C1_SDA]", /* GPIO_6, BLSP8_I2C_SDA, LSEC pin 21 */
-				"[I2C1_SCL]", /* GPIO_7, BLSP8_I2C_SCL, LSEC pin 19 */
-				"GPIO-H", /* GPIO_8, LCD0_RESET_N, LSEC pin 30 */
-				"TP93", /* GPIO_9 */
-				"GPIO-G", /* GPIO_10, MDP_VSYNC_P, LSEC pin 29 */
-				"[MDP_VSYNC_S]", /* GPIO_11, S HSEC pin 55 */
-				"NC", /* GPIO_12 */
-				"[CSI0_MCLK]", /* GPIO_13, CAM_MCLK0, P HSEC pin 15 */
-				"[CAM_MCLK1]", /* GPIO_14, J14 pin 11 */
-				"[CSI1_MCLK]", /* GPIO_15, CAM_MCLK2, P HSEC pin 17 */
-				"TP99", /* GPIO_16 */
-				"[I2C2_SDA]", /* GPIO_17, CCI_I2C_SDA0, P HSEC pin 34 */
-				"[I2C2_SCL]", /* GPIO_18, CCI_I2C_SCL0, P HSEC pin 32 */
-				"[CCI_I2C_SDA1]", /* GPIO_19, S HSEC pin 38 */
-				"[CCI_I2C_SCL1]", /* GPIO_20, S HSEC pin 36 */
-				"FLASH_STROBE_EN", /* GPIO_21, S HSEC pin 5 */
-				"FLASH_STROBE_TRIG", /* GPIO_22, S HSEC pin 1 */
-				"GPIO-K", /* GPIO_23, CAM2_RST_N, LSEC pin 33 */
-				"GPIO-D", /* GPIO_24, LSEC pin 26 */
-				"GPIO-I", /* GPIO_25, CAM0_RST_N, LSEC pin 31 */
-				"GPIO-J", /* GPIO_26, CAM0_STANDBY_N, LSEC pin 32 */
-				"BLSP6_I2C_SDA", /* GPIO_27 */
-				"BLSP6_I2C_SCL", /* GPIO_28 */
-				"GPIO-B", /* GPIO_29, TS0_RESET_N, LSEC pin 24 */
-				"GPIO30", /* GPIO_30, S HSEC pin 4 */
-				"HDMI_CEC", /* GPIO_31 */
-				"HDMI_DDC_CLOCK", /* GPIO_32 */
-				"HDMI_DDC_DATA", /* GPIO_33 */
-				"HDMI_HOT_PLUG_DETECT", /* GPIO_34 */
-				"PCIE0_RST_N", /* GPIO_35 */
-				"PCIE0_CLKREQ_N", /* GPIO_36 */
-				"PCIE0_WAKE", /* GPIO_37 */
-				"SD_CARD_DET_N", /* GPIO_38 */
-				"TSIF1_SYNC", /* GPIO_39, S HSEC pin 48 */
-				"W_DISABLE_N", /* GPIO_40 */
-				"[BLSP9_UART_TX]", /* GPIO_41 */
-				"[BLSP9_UART_RX]", /* GPIO_42 */
-				"[BLSP2_UART_CTS_N]", /* GPIO_43 */
-				"[BLSP2_UART_RFR_N]", /* GPIO_44 */
-				"[BLSP3_UART_TX]", /* GPIO_45 */
-				"[BLSP3_UART_RX]", /* GPIO_46 */
-				"[I2C0_SDA]", /* GPIO_47, LS_I2C0_SDA, LSEC pin 17 */
-				"[I2C0_SCL]", /* GPIO_48, LS_I2C0_SCL, LSEC pin 15 */
-				"[UART0_TxD]", /* GPIO_49, BLSP9_UART_TX, LSEC pin 5 */
-				"[UART0_RxD]", /* GPIO_50, BLSP9_UART_RX, LSEC pin 7 */
-				"[UART0_CTS]", /* GPIO_51, BLSP9_UART_CTS_N, LSEC pin 3 */
-				"[UART0_RTS]", /* GPIO_52, BLSP9_UART_RFR_N, LSEC pin 9 */
-				"[CODEC_INT1_N]", /* GPIO_53 */
-				"[CODEC_INT2_N]", /* GPIO_54 */
-				"[BLSP7_I2C_SDA]", /* GPIO_55 */
-				"[BLSP7_I2C_SCL]", /* GPIO_56 */
-				"MI2S_MCLK", /* GPIO_57, S HSEC pin 3 */
-				"[PCM_CLK]", /* GPIO_58, QUA_MI2S_SCK, LSEC pin 18 */
-				"[PCM_FS]", /* GPIO_59, QUA_MI2S_WS, LSEC pin 16 */
-				"[PCM_DO]", /* GPIO_60, QUA_MI2S_DATA0, LSEC pin 20 */
-				"[PCM_DI]", /* GPIO_61, QUA_MI2S_DATA1, LSEC pin 22 */
-				"GPIO-E", /* GPIO_62, LSEC pin 27 */
-				"TP87", /* GPIO_63 */
-				"[CODEC_RST_N]", /* GPIO_64 */
-				"[PCM1_CLK]", /* GPIO_65 */
-				"[PCM1_SYNC]", /* GPIO_66 */
-				"[PCM1_DIN]", /* GPIO_67 */
-				"[PCM1_DOUT]", /* GPIO_68 */
-				"AUDIO_REF_CLK", /* GPIO_69 */
-				"SLIMBUS_CLK", /* GPIO_70 */
-				"SLIMBUS_DATA0", /* GPIO_71 */
-				"SLIMBUS_DATA1", /* GPIO_72 */
-				"NC", /* GPIO_73 */
-				"NC", /* GPIO_74 */
-				"NC", /* GPIO_75 */
-				"NC", /* GPIO_76 */
-				"TP94", /* GPIO_77 */
-				"NC", /* GPIO_78 */
-				"TP95", /* GPIO_79 */
-				"GPIO-A", /* GPIO_80, MEMS_RESET_N, LSEC pin 23 */
-				"TP88", /* GPIO_81 */
-				"TP89", /* GPIO_82 */
-				"TP90", /* GPIO_83 */
-				"TP91", /* GPIO_84 */
-				"[SD_DAT0]", /* GPIO_85, BLSP12_SPI_MOSI, P HSEC pin 1 */
-				"[SD_CMD]", /* GPIO_86, BLSP12_SPI_MISO, P HSEC pin 11 */
-				"[SD_DAT3]", /* GPIO_87, BLSP12_SPI_CS_N, P HSEC pin 7 */
-				"[SD_SCLK]", /* GPIO_88, BLSP12_SPI_CLK, P HSEC pin 9 */
-				"TSIF1_CLK", /* GPIO_89, S HSEC pin 42 */
-				"TSIF1_EN", /* GPIO_90, S HSEC pin 46 */
-				"TSIF1_DATA", /* GPIO_91, S HSEC pin 44 */
-				"NC", /* GPIO_92 */
-				"TSIF2_CLK", /* GPIO_93, S HSEC pin 52 */
-				"TSIF2_EN", /* GPIO_94, S HSEC pin 56 */
-				"TSIF2_DATA", /* GPIO_95, S HSEC pin 54 */
-				"TSIF2_SYNC", /* GPIO_96, S HSEC pin 58 */
-				"NC", /* GPIO_97 */
-				"CAM1_STANDBY_N", /* GPIO_98 */
-				"NC", /* GPIO_99 */
-				"NC", /* GPIO_100 */
-				"[LCD1_RESET_N]", /* GPIO_101, S HSEC pin 51 */
-				"BOOT_CONFIG1", /* GPIO_102 */
-				"USB_HUB_RESET", /* GPIO_103 */
-				"CAM1_RST_N", /* GPIO_104 */
-				"NC", /* GPIO_105 */
-				"NC", /* GPIO_106 */
-				"NC", /* GPIO_107 */
-				"NC", /* GPIO_108 */
-				"NC", /* GPIO_109 */
-				"NC", /* GPIO_110 */
-				"NC", /* GPIO_111 */
-				"NC", /* GPIO_112 */
-				"PMI8994_BUA", /* GPIO_113 */
-				"PCIE2_RST_N", /* GPIO_114 */
-				"PCIE2_CLKREQ_N", /* GPIO_115 */
-				"PCIE2_WAKE", /* GPIO_116 */
-				"SSC_IRQ_0", /* GPIO_117 */
-				"SSC_IRQ_1", /* GPIO_118 */
-				"SSC_IRQ_2", /* GPIO_119 */
-				"NC", /* GPIO_120 */
-				"GPIO121", /* GPIO_121, S HSEC pin 2 */
-				"NC", /* GPIO_122 */
-				"SSC_IRQ_6", /* GPIO_123 */
-				"SSC_IRQ_7", /* GPIO_124 */
-				"GPIO-C", /* GPIO_125, TS_INT0, LSEC pin 25 */
-				"BOOT_CONFIG5", /* GPIO_126 */
-				"NC", /* GPIO_127 */
-				"NC", /* GPIO_128 */
-				"BOOT_CONFIG7", /* GPIO_129 */
-				"PCIE1_RST_N", /* GPIO_130 */
-				"PCIE1_CLKREQ_N", /* GPIO_131 */
-				"PCIE1_WAKE", /* GPIO_132 */
-				"GPIO-L", /* GPIO_133, CAM2_STANDBY_N, LSEC pin 34 */
-				"NC", /* GPIO_134 */
-				"NC", /* GPIO_135 */
-				"BOOT_CONFIG8", /* GPIO_136 */
-				"NC", /* GPIO_137 */
-				"NC", /* GPIO_138 */
-				"GPS_SSBI2", /* GPIO_139 */
-				"GPS_SSBI1", /* GPIO_140 */
-				"NC", /* GPIO_141 */
-				"NC", /* GPIO_142 */
-				"NC", /* GPIO_143 */
-				"BOOT_CONFIG6", /* GPIO_144 */
-				"NC", /* GPIO_145 */
-				"NC", /* GPIO_146 */
-				"NC", /* GPIO_147 */
-				"NC", /* GPIO_148 */
-				"NC"; /* GPIO_149 */
-		};
-
-		qcom,spmi@400f000 {
-			pmic@0 {
-				gpios@c000 {
-					gpio-line-names =
-						"NC",
-						"KEY_VOLP_N",
-						"NC",
-						"BL1_PWM",
-						"GPIO-F", /* BL0_PWM, LSEC pin 28 */
-						"BL1_EN",
-						"NC",
-						"WLAN_EN",
-						"NC",
-						"NC",
-						"NC",
-						"NC",
-						"NC",
-						"NC",
-						"DIVCLK1",
-						"DIVCLK2",
-						"DIVCLK3",
-						"DIVCLK4",
-						"BT_EN",
-						"PMIC_SLB",
-						"PMIC_BUA",
-						"USB_VBUS_DET";
-				};
-
-				mpps@a000 {
-					gpio-line-names =
-						"VDDPX_BIAS",
-						"WIFI_LED",
-						"NC",
-						"BT_LED",
-						"PM_MPP05",
-						"PM_MPP06",
-						"PM_MPP07",
-						"NC";
-				};
-			};
-
-			pmic@2 {
-				gpios@c000 {
-					gpio-line-names =
-						"NC",
-						"SPKR_AMP_EN1",
-						"SPKR_AMP_EN2",
-						"TP61",
-						"NC",
-						"USB2_VBUS_DET",
-						"NC",
-						"NC",
-						"NC",
-						"NC";
-				};
-			};
-		};
-
-		phy@34000 {
-			status = "okay";
-		};
-
-		phy@7410000 {
-			status = "okay";
-		};
-
-		phy@7411000 {
-			status = "okay";
-		};
-
-		phy@7412000 {
-			status = "okay";
-		};
-
-		usb@6af8800 {
-			status = "okay";
-			extcon = <&usb3_id>;
-
-			dwc3@6a00000 {
-				extcon = <&usb3_id>;
-				dr_mode = "otg";
-			};
-		};
-
-		usb3_id: usb3-id {
-			compatible = "linux,extcon-usb-gpio";
-			id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&usb3_vbus_det_gpio>;
-		};
-
-		usb@76f8800 {
-			status = "okay";
-			extcon = <&usb2_id>;
-
-			dwc3@7600000 {
-				extcon = <&usb2_id>;
-				dr_mode = "otg";
-				maximum-speed = "high-speed";
-			};
-		};
-
-		usb2_id: usb2-id {
-			compatible = "linux,extcon-usb-gpio";
-			id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&usb2_vbus_det_gpio>;
-		};
-
-		wlan_en: wlan-en-1-8v {
-			pinctrl-names = "default";
-			pinctrl-0 = <&wlan_en_gpios>;
-			compatible = "regulator-fixed";
-			regulator-name = "wlan-en-regulator";
-			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <1800000>;
-
-			gpio = <&pm8994_gpios 8 0>;
-
-			/* WLAN card specific delay */
-			startup-delay-us = <70000>;
-			enable-active-high;
-		};
-
-		agnoc@0 {
-			pcie@600000 {
-				status = "okay";
-				perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
-				vddpe-3v3-supply = <&wlan_en>;
-			};
-
-			pcie@608000 {
-				status = "okay";
-				perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
-			};
-
-			pcie@610000 {
-				status = "okay";
-				perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
-			};
-		};
-
-		slim_msm: slim@91c0000 {
-			ngd@1 {
-				wcd9335: codec@1{
-					clock-names = "mclk", "slimbus";
-					clocks = <&div1_mclk>,
-						 <&rpmcc RPM_SMD_BB_CLK1>;
-				};
-			};
-		};
-
-		mdss@900000 {
-			status = "okay";
-
-			mdp@901000 {
-				status = "okay";
-			};
-
-			hdmi-phy@9a0600 {
-				status = "okay";
-
-				vddio-supply = <&pm8994_l12>;
-				vcca-supply = <&pm8994_l28>;
-				#phy-cells = <0>;
-			};
-
-			hdmi-tx@9a0000 {
-				status = "okay";
-
-				pinctrl-names = "default", "sleep";
-				pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
-				pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
-
-				core-vdda-supply = <&pm8994_l12>;
-				core-vcc-supply = <&pm8994_s4>;
-			};
-		};
-	};
-
-
 	gpio_keys {
 		compatible = "gpio-keys";
 		#address-cells = <1>;
@@ -512,175 +93,819 @@ button@0 {
 		};
 	};
 
-	rpm-glink {
-		rpm_requests {
-			pm8994-regulators {
-				vdd_l1-supply = <&pm8994_s3>;
-				vdd_l2_l26_l28-supply = <&pm8994_s3>;
-				vdd_l3_l11-supply = <&pm8994_s3>;
-				vdd_l4_l27_l31-supply = <&pm8994_s3>;
-				vdd_l5_l7-supply = <&pm8994_s5>;
-				vdd_l14_l15-supply = <&pm8994_s5>;
-				vdd_l20_l21-supply = <&pm8994_s5>;
-				vdd_l25-supply = <&pm8994_s3>;
+	usb2_id: usb2-id {
+		compatible = "linux,extcon-usb-gpio";
+		id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb2_vbus_det_gpio>;
+	};
 
-				s3 {
-					regulator-min-microvolt = <1300000>;
-					regulator-max-microvolt = <1300000>;
-				};
+	usb3_id: usb3-id {
+		compatible = "linux,extcon-usb-gpio";
+		id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb3_vbus_det_gpio>;
+	};
 
-				/**
-				 * 1.8v required on LS expansion
-				 * for mezzanine boards
-				 */
-				s4 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-always-on;
-				};
-				s5 {
-					regulator-min-microvolt = <2150000>;
-					regulator-max-microvolt = <2150000>;
-				};
-				s7 {
-					regulator-min-microvolt = <800000>;
-					regulator-max-microvolt = <800000>;
-				};
+	vph_pwr: vph-pwr-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vph_pwr";
+		regulator-always-on;
+		regulator-boot-on;
 
-				l1 {
-					regulator-min-microvolt = <1000000>;
-					regulator-max-microvolt = <1000000>;
-				};
-				l2 {
-					regulator-min-microvolt = <1250000>;
-					regulator-max-microvolt = <1250000>;
-				};
-				l3 {
-					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <850000>;
-				};
-				l4 {
-					regulator-min-microvolt = <1225000>;
-					regulator-max-microvolt = <1225000>;
-				};
-				l6 {
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-				};
-				l8 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-				l9 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-				l10 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-				l11 {
-					regulator-min-microvolt = <1150000>;
-					regulator-max-microvolt = <1150000>;
-				};
-				l12 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-				l13 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <2950000>;
-				};
-				l14 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-				l15 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-				l16 {
-					regulator-min-microvolt = <2700000>;
-					regulator-max-microvolt = <2700000>;
-				};
-				l17 {
-					regulator-min-microvolt = <2500000>;
-					regulator-max-microvolt = <2500000>;
-				};
-				l18 {
-					regulator-min-microvolt = <2700000>;
-					regulator-max-microvolt = <2900000>;
-				};
-				l19 {
-					regulator-min-microvolt = <3000000>;
-					regulator-max-microvolt = <3000000>;
-				};
-				l20 {
-					regulator-min-microvolt = <2950000>;
-					regulator-max-microvolt = <2950000>;
-					regulator-allow-set-load;
-				};
-				l21 {
-					regulator-min-microvolt = <2950000>;
-					regulator-max-microvolt = <2950000>;
-					regulator-allow-set-load;
-					regulator-system-load = <200000>;
-				};
-				l22 {
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-				};
-				l23 {
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-				l24 {
-					regulator-min-microvolt = <3075000>;
-					regulator-max-microvolt = <3075000>;
-				};
-				l25 {
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-					regulator-allow-set-load;
-				};
-				l27 {
-					regulator-min-microvolt = <1000000>;
-					regulator-max-microvolt = <1000000>;
-				};
-				l28 {
-					regulator-min-microvolt = <925000>;
-					regulator-max-microvolt = <925000>;
-					regulator-allow-set-load;
-				};
-				l29 {
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-				l30 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-				l32 {
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-			};
+		regulator-min-microvolt = <3700000>;
+		regulator-max-microvolt = <3700000>;
+	};
+
+	vreg_s8a_l3a_input: vreg-s8a-l3a-input {
+		compatible = "regulator-fixed";
+		regulator-name = "vreg_s8a_l3a_input";
+		regulator-always-on;
+		regulator-boot-on;
+
+		regulator-min-microvolt = <0>;
+		regulator-max-microvolt = <0>;
+	};
+
+	wlan_en: wlan-en-1-8v {
+		pinctrl-names = "default";
+		pinctrl-0 = <&wlan_en_gpios>;
+		compatible = "regulator-fixed";
+		regulator-name = "wlan-en-regulator";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+
+		gpio = <&pm8994_gpios 8 0>;
+
+		/* WLAN card specific delay */
+		startup-delay-us = <70000>;
+		enable-active-high;
+	};
+};
+
+&blsp1_i2c2 {
+	/* On Low speed expansion */
+	label = "LS-I2C0";
+	status = "okay";
+};
+
+&blsp1_spi0 {
+	/* On Low speed expansion */
+	label = "LS-SPI0";
+	status = "okay";
+};
+
+&blsp1_uart1 {
+	label = "BT-UART";
+	status = "okay";
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&blsp1_uart1_default>;
+	pinctrl-1 = <&blsp1_uart1_sleep>;
+
+	bluetooth {
+		compatible = "qcom,qca6174-bt";
+
+		/* bt_disable_n gpio */
+		enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+
+		clocks = <&divclk4>;
+	};
+};
+
+&blsp2_i2c0 {
+	/* On High speed expansion */
+	label = "HS-I2C2";
+	status = "okay";
+};
+
+&blsp2_i2c1 {
+	/* On Low speed expansion */
+	label = "LS-I2C1";
+	status = "okay";
+};
+
+&blsp2_spi5 {
+	/* On High speed expansion */
+	label = "HS-SPI1";
+	status = "okay";
+};
+
+&blsp2_uart1 {
+	label = "LS-UART1";
+	status = "okay";
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&blsp2_uart1_2pins_default>;
+	pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+};
+
+&blsp2_uart2 {
+	label = "LS-UART0";
+	status = "disabled";
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&blsp2_uart2_4pins_default>;
+	pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+};
+
+&camss {
+	vdda-supply = <&vreg_l2a_1p25>;
+};
+
+&hdmi {
+	status = "okay";
+
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+	pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+	core-vdda-supply = <&vreg_l12a_1p8>;
+	core-vcc-supply = <&vreg_s4a_1p8>;
+};
+
+&hdmi_phy {
+	status = "okay";
+
+	vddio-supply = <&vreg_l12a_1p8>;
+	vcca-supply = <&vreg_l28a_0p925>;
+	#phy-cells = <0>;
+};
+
+&hsusb_phy1 {
+	status = "okay";
+
+	vdda-pll-supply = <&vreg_l12a_1p8>;
+	vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
+};
+
+&hsusb_phy2 {
+	status = "okay";
+
+	vdda-pll-supply = <&vreg_l12a_1p8>;
+	vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
+};
+
+&mdp {
+	status = "okay";
+};
+
+&mdss {
+	status = "okay";
+};
+
+&msmgpio {
+	gpio-line-names =
+		"[SPI0_DOUT]", /* GPIO_0, BLSP1_SPI_MOSI, LSEC pin 14 */
+		"[SPI0_DIN]", /* GPIO_1, BLSP1_SPI_MISO, LSEC pin 10 */
+		"[SPI0_CS]", /* GPIO_2, BLSP1_SPI_CS_N, LSEC pin 12 */
+		"[SPI0_SCLK]", /* GPIO_3, BLSP1_SPI_CLK, LSEC pin 8 */
+		"[UART1_TxD]", /* GPIO_4, BLSP8_UART_TX, LSEC pin 11 */
+		"[UART1_RxD]", /* GPIO_5, BLSP8_UART_RX, LSEC pin 13 */
+		"[I2C1_SDA]", /* GPIO_6, BLSP8_I2C_SDA, LSEC pin 21 */
+		"[I2C1_SCL]", /* GPIO_7, BLSP8_I2C_SCL, LSEC pin 19 */
+		"GPIO-H", /* GPIO_8, LCD0_RESET_N, LSEC pin 30 */
+		"TP93", /* GPIO_9 */
+		"GPIO-G", /* GPIO_10, MDP_VSYNC_P, LSEC pin 29 */
+		"[MDP_VSYNC_S]", /* GPIO_11, S HSEC pin 55 */
+		"NC", /* GPIO_12 */
+		"[CSI0_MCLK]", /* GPIO_13, CAM_MCLK0, P HSEC pin 15 */
+		"[CAM_MCLK1]", /* GPIO_14, J14 pin 11 */
+		"[CSI1_MCLK]", /* GPIO_15, CAM_MCLK2, P HSEC pin 17 */
+		"TP99", /* GPIO_16 */
+		"[I2C2_SDA]", /* GPIO_17, CCI_I2C_SDA0, P HSEC pin 34 */
+		"[I2C2_SCL]", /* GPIO_18, CCI_I2C_SCL0, P HSEC pin 32 */
+		"[CCI_I2C_SDA1]", /* GPIO_19, S HSEC pin 38 */
+		"[CCI_I2C_SCL1]", /* GPIO_20, S HSEC pin 36 */
+		"FLASH_STROBE_EN", /* GPIO_21, S HSEC pin 5 */
+		"FLASH_STROBE_TRIG", /* GPIO_22, S HSEC pin 1 */
+		"GPIO-K", /* GPIO_23, CAM2_RST_N, LSEC pin 33 */
+		"GPIO-D", /* GPIO_24, LSEC pin 26 */
+		"GPIO-I", /* GPIO_25, CAM0_RST_N, LSEC pin 31 */
+		"GPIO-J", /* GPIO_26, CAM0_STANDBY_N, LSEC pin 32 */
+		"BLSP6_I2C_SDA", /* GPIO_27 */
+		"BLSP6_I2C_SCL", /* GPIO_28 */
+		"GPIO-B", /* GPIO_29, TS0_RESET_N, LSEC pin 24 */
+		"GPIO30", /* GPIO_30, S HSEC pin 4 */
+		"HDMI_CEC", /* GPIO_31 */
+		"HDMI_DDC_CLOCK", /* GPIO_32 */
+		"HDMI_DDC_DATA", /* GPIO_33 */
+		"HDMI_HOT_PLUG_DETECT", /* GPIO_34 */
+		"PCIE0_RST_N", /* GPIO_35 */
+		"PCIE0_CLKREQ_N", /* GPIO_36 */
+		"PCIE0_WAKE", /* GPIO_37 */
+		"SD_CARD_DET_N", /* GPIO_38 */
+		"TSIF1_SYNC", /* GPIO_39, S HSEC pin 48 */
+		"W_DISABLE_N", /* GPIO_40 */
+		"[BLSP9_UART_TX]", /* GPIO_41 */
+		"[BLSP9_UART_RX]", /* GPIO_42 */
+		"[BLSP2_UART_CTS_N]", /* GPIO_43 */
+		"[BLSP2_UART_RFR_N]", /* GPIO_44 */
+		"[BLSP3_UART_TX]", /* GPIO_45 */
+		"[BLSP3_UART_RX]", /* GPIO_46 */
+		"[I2C0_SDA]", /* GPIO_47, LS_I2C0_SDA, LSEC pin 17 */
+		"[I2C0_SCL]", /* GPIO_48, LS_I2C0_SCL, LSEC pin 15 */
+		"[UART0_TxD]", /* GPIO_49, BLSP9_UART_TX, LSEC pin 5 */
+		"[UART0_RxD]", /* GPIO_50, BLSP9_UART_RX, LSEC pin 7 */
+		"[UART0_CTS]", /* GPIO_51, BLSP9_UART_CTS_N, LSEC pin 3 */
+		"[UART0_RTS]", /* GPIO_52, BLSP9_UART_RFR_N, LSEC pin 9 */
+		"[CODEC_INT1_N]", /* GPIO_53 */
+		"[CODEC_INT2_N]", /* GPIO_54 */
+		"[BLSP7_I2C_SDA]", /* GPIO_55 */
+		"[BLSP7_I2C_SCL]", /* GPIO_56 */
+		"MI2S_MCLK", /* GPIO_57, S HSEC pin 3 */
+		"[PCM_CLK]", /* GPIO_58, QUA_MI2S_SCK, LSEC pin 18 */
+		"[PCM_FS]", /* GPIO_59, QUA_MI2S_WS, LSEC pin 16 */
+		"[PCM_DO]", /* GPIO_60, QUA_MI2S_DATA0, LSEC pin 20 */
+		"[PCM_DI]", /* GPIO_61, QUA_MI2S_DATA1, LSEC pin 22 */
+		"GPIO-E", /* GPIO_62, LSEC pin 27 */
+		"TP87", /* GPIO_63 */
+		"[CODEC_RST_N]", /* GPIO_64 */
+		"[PCM1_CLK]", /* GPIO_65 */
+		"[PCM1_SYNC]", /* GPIO_66 */
+		"[PCM1_DIN]", /* GPIO_67 */
+		"[PCM1_DOUT]", /* GPIO_68 */
+		"AUDIO_REF_CLK", /* GPIO_69 */
+		"SLIMBUS_CLK", /* GPIO_70 */
+		"SLIMBUS_DATA0", /* GPIO_71 */
+		"SLIMBUS_DATA1", /* GPIO_72 */
+		"NC", /* GPIO_73 */
+		"NC", /* GPIO_74 */
+		"NC", /* GPIO_75 */
+		"NC", /* GPIO_76 */
+		"TP94", /* GPIO_77 */
+		"NC", /* GPIO_78 */
+		"TP95", /* GPIO_79 */
+		"GPIO-A", /* GPIO_80, MEMS_RESET_N, LSEC pin 23 */
+		"TP88", /* GPIO_81 */
+		"TP89", /* GPIO_82 */
+		"TP90", /* GPIO_83 */
+		"TP91", /* GPIO_84 */
+		"[SD_DAT0]", /* GPIO_85, BLSP12_SPI_MOSI, P HSEC pin 1 */
+		"[SD_CMD]", /* GPIO_86, BLSP12_SPI_MISO, P HSEC pin 11 */
+		"[SD_DAT3]", /* GPIO_87, BLSP12_SPI_CS_N, P HSEC pin 7 */
+		"[SD_SCLK]", /* GPIO_88, BLSP12_SPI_CLK, P HSEC pin 9 */
+		"TSIF1_CLK", /* GPIO_89, S HSEC pin 42 */
+		"TSIF1_EN", /* GPIO_90, S HSEC pin 46 */
+		"TSIF1_DATA", /* GPIO_91, S HSEC pin 44 */
+		"NC", /* GPIO_92 */
+		"TSIF2_CLK", /* GPIO_93, S HSEC pin 52 */
+		"TSIF2_EN", /* GPIO_94, S HSEC pin 56 */
+		"TSIF2_DATA", /* GPIO_95, S HSEC pin 54 */
+		"TSIF2_SYNC", /* GPIO_96, S HSEC pin 58 */
+		"NC", /* GPIO_97 */
+		"CAM1_STANDBY_N", /* GPIO_98 */
+		"NC", /* GPIO_99 */
+		"NC", /* GPIO_100 */
+		"[LCD1_RESET_N]", /* GPIO_101, S HSEC pin 51 */
+		"BOOT_CONFIG1", /* GPIO_102 */
+		"USB_HUB_RESET", /* GPIO_103 */
+		"CAM1_RST_N", /* GPIO_104 */
+		"NC", /* GPIO_105 */
+		"NC", /* GPIO_106 */
+		"NC", /* GPIO_107 */
+		"NC", /* GPIO_108 */
+		"NC", /* GPIO_109 */
+		"NC", /* GPIO_110 */
+		"NC", /* GPIO_111 */
+		"NC", /* GPIO_112 */
+		"PMI8994_BUA", /* GPIO_113 */
+		"PCIE2_RST_N", /* GPIO_114 */
+		"PCIE2_CLKREQ_N", /* GPIO_115 */
+		"PCIE2_WAKE", /* GPIO_116 */
+		"SSC_IRQ_0", /* GPIO_117 */
+		"SSC_IRQ_1", /* GPIO_118 */
+		"SSC_IRQ_2", /* GPIO_119 */
+		"NC", /* GPIO_120 */
+		"GPIO121", /* GPIO_121, S HSEC pin 2 */
+		"NC", /* GPIO_122 */
+		"SSC_IRQ_6", /* GPIO_123 */
+		"SSC_IRQ_7", /* GPIO_124 */
+		"GPIO-C", /* GPIO_125, TS_INT0, LSEC pin 25 */
+		"BOOT_CONFIG5", /* GPIO_126 */
+		"NC", /* GPIO_127 */
+		"NC", /* GPIO_128 */
+		"BOOT_CONFIG7", /* GPIO_129 */
+		"PCIE1_RST_N", /* GPIO_130 */
+		"PCIE1_CLKREQ_N", /* GPIO_131 */
+		"PCIE1_WAKE", /* GPIO_132 */
+		"GPIO-L", /* GPIO_133, CAM2_STANDBY_N, LSEC pin 34 */
+		"NC", /* GPIO_134 */
+		"NC", /* GPIO_135 */
+		"BOOT_CONFIG8", /* GPIO_136 */
+		"NC", /* GPIO_137 */
+		"NC", /* GPIO_138 */
+		"GPS_SSBI2", /* GPIO_139 */
+		"GPS_SSBI1", /* GPIO_140 */
+		"NC", /* GPIO_141 */
+		"NC", /* GPIO_142 */
+		"NC", /* GPIO_143 */
+		"BOOT_CONFIG6", /* GPIO_144 */
+		"NC", /* GPIO_145 */
+		"NC", /* GPIO_146 */
+		"NC", /* GPIO_147 */
+		"NC", /* GPIO_148 */
+		"NC"; /* GPIO_149 */
+
+	sdc2_cd_on: sdc2_cd_on {
+		mux {
+			pins = "gpio38";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio38";
+			bias-pull-up;		/* pull up */
+			drive-strength = <16>;	/* 16 MA */
+		};
+	};
+
+	sdc2_cd_off: sdc2_cd_off {
+		mux {
+			pins = "gpio38";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio38";
+			bias-pull-up;		/* pull up */
+			drive-strength = <2>;	/* 2 MA */
+		};
+	};
+
+	blsp1_uart1_default: blsp1_uart1_default {
+		mux {
+			pins = "gpio41", "gpio42", "gpio43", "gpio44";
+			function = "blsp_uart2";
+		};
+
+		config {
+			pins = "gpio41", "gpio42", "gpio43", "gpio44";
+			drive-strength = <16>;
+			bias-disable;
+		};
+	};
+
+	blsp1_uart1_sleep: blsp1_uart1_sleep {
+		mux {
+			pins = "gpio41", "gpio42", "gpio43", "gpio44";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio41", "gpio42", "gpio43", "gpio44";
+			drive-strength = <2>;
+			bias-disable;
+		};
+	};
+
+	hdmi_hpd_active: hdmi_hpd_active {
+		mux {
+			pins = "gpio34";
+			function = "hdmi_hot";
+		};
+
+		config {
+			pins = "gpio34";
+			bias-pull-down;
+			drive-strength = <16>;
+		};
+	};
+
+	hdmi_hpd_suspend: hdmi_hpd_suspend {
+		mux {
+			pins = "gpio34";
+			function = "hdmi_hot";
+		};
+
+		config {
+			pins = "gpio34";
+			bias-pull-down;
+			drive-strength = <2>;
+		};
+	};
+
+	hdmi_ddc_active: hdmi_ddc_active {
+		mux {
+			pins = "gpio32", "gpio33";
+			function = "hdmi_ddc";
+		};
+
+		config {
+			pins = "gpio32", "gpio33";
+			drive-strength = <2>;
+			bias-pull-up;
+		};
+	};
+
+	hdmi_ddc_suspend: hdmi_ddc_suspend {
+		mux {
+			pins = "gpio32", "gpio33";
+			function = "hdmi_ddc";
+		};
+
+		config {
+			pins = "gpio32", "gpio33";
+			drive-strength = <2>;
+			bias-pull-down;
 		};
 	};
 };
 
-&spmi_bus {
-	pmic@0 {
-		pon@800 {
-			resin {
-				compatible = "qcom,pm8941-resin";
-				interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
-				debounce = <15625>;
-				bias-pull-up;
-				linux,code = <KEY_VOLUMEDOWN>;
-			};
+&pcie0 {
+	status = "okay";
+	perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+	vddpe-3v3-supply = <&wlan_en>;
+	vdda-supply = <&vreg_l28a_0p925>;
+};
+
+&pcie1 {
+	status = "okay";
+	perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+	vdda-supply = <&vreg_l28a_0p925>;
+};
+
+&pcie2 {
+	status = "okay";
+	perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+	vdda-supply = <&vreg_l28a_0p925>;
+};
+
+&pcie_phy {
+	status = "okay";
+
+	vdda-phy-supply = <&vreg_l28a_0p925>;
+	vdda-pll-supply = <&vreg_l12a_1p8>;
+};
+
+&pm8994_gpios {
+	gpio-line-names =
+		"NC",
+		"KEY_VOLP_N",
+		"NC",
+		"BL1_PWM",
+		"GPIO-F", /* BL0_PWM, LSEC pin 28 */
+		"BL1_EN",
+		"NC",
+		"WLAN_EN",
+		"NC",
+		"NC",
+		"NC",
+		"NC",
+		"NC",
+		"NC",
+		"DIVCLK1",
+		"DIVCLK2",
+		"DIVCLK3",
+		"DIVCLK4",
+		"BT_EN",
+		"PMIC_SLB",
+		"PMIC_BUA",
+		"USB_VBUS_DET";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>;
+
+	ls_exp_gpio_f: pm8994_gpio5 {
+		pinconf {
+			pins = "gpio5";
+			output-low;
+			power-source = <2>; // PM8994_GPIO_S4, 1.8V
 		};
 	};
+
+	bt_en_gpios: bt_en_gpios {
+		pinconf {
+			pins = "gpio19";
+			function = PMIC_GPIO_FUNC_NORMAL;
+			output-low;
+			power-source = <PM8994_GPIO_S4>; // 1.8V
+			qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+			bias-pull-down;
+		};
+	};
+
+	wlan_en_gpios: wlan_en_gpios {
+		pinconf {
+			pins = "gpio8";
+			function = PMIC_GPIO_FUNC_NORMAL;
+			output-low;
+			power-source = <PM8994_GPIO_S4>; // 1.8V
+			qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+			bias-pull-down;
+		};
+	};
+
+	audio_mclk: clk_div1 {
+		pinconf {
+			pins = "gpio15";
+			function = "func1";
+			power-source = <PM8994_GPIO_S4>; // 1.8V
+		};
+	};
+
+	volume_up_gpio: pm8996_gpio2 {
+		pinconf {
+			pins = "gpio2";
+			function = "normal";
+			input-enable;
+			drive-push-pull;
+			bias-pull-up;
+			qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+			power-source = <PM8994_GPIO_S4>; // 1.8V
+		};
+	};
+
+	divclk4_pin_a: divclk4 {
+		pinconf {
+			pins = "gpio18";
+			function = PMIC_GPIO_FUNC_FUNC2;
+
+			bias-disable;
+			power-source = <PM8994_GPIO_S4>;
+		};
+	};
+
+	usb3_vbus_det_gpio: pm8996_gpio22 {
+		pinconf {
+			pins = "gpio22";
+			function = PMIC_GPIO_FUNC_NORMAL;
+			input-enable;
+			bias-pull-down;
+			qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+			power-source = <PM8994_GPIO_S4>; // 1.8V
+		};
+	};
+};
+
+&pm8994_mpps {
+	gpio-line-names =
+		"VDDPX_BIAS",
+		"WIFI_LED",
+		"NC",
+		"BT_LED",
+		"PM_MPP05",
+		"PM_MPP06",
+		"PM_MPP07",
+		"NC";
+};
+
+&pm8994_spmi_regulators {
+	qcom,saw-reg = <&saw3>;
+	s9 {
+		qcom,saw-slave;
+	};
+	s10 {
+		qcom,saw-slave;
+	};
+	s11 {
+		qcom,saw-leader;
+		regulator-always-on;
+		regulator-min-microvolt = <1230000>;
+		regulator-max-microvolt = <1230000>;
+	};
+};
+
+&pmi8994_gpios {
+	gpio-line-names =
+		"NC",
+		"SPKR_AMP_EN1",
+		"SPKR_AMP_EN2",
+		"TP61",
+		"NC",
+		"USB2_VBUS_DET",
+		"NC",
+		"NC",
+		"NC",
+		"NC";
+
+	usb2_vbus_det_gpio: pmi8996_gpio6 {
+		pinconf {
+			pins = "gpio6";
+			function = PMIC_GPIO_FUNC_NORMAL;
+			input-enable;
+			bias-pull-down;
+			qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+			power-source = <PM8994_GPIO_S4>; // 1.8V
+		};
+	};
+};
+
+&rpm_requests {
+	pm8994-regulators {
+		compatible = "qcom,rpm-pm8994-regulators";
+
+		vdd_s1-supply = <&vph_pwr>;
+		vdd_s2-supply = <&vph_pwr>;
+		vdd_s3-supply = <&vph_pwr>;
+		vdd_s4-supply = <&vph_pwr>;
+		vdd_s5-supply = <&vph_pwr>;
+		vdd_s6-supply = <&vph_pwr>;
+		vdd_s7-supply = <&vph_pwr>;
+		vdd_s8-supply = <&vph_pwr>;
+		vdd_s9-supply = <&vph_pwr>;
+		vdd_s10-supply = <&vph_pwr>;
+		vdd_s11-supply = <&vph_pwr>;
+		vdd_s12-supply = <&vph_pwr>;
+		vdd_l2_l26_l28-supply = <&vreg_s3a_1p3>;
+		vdd_l3_l11-supply = <&vreg_s8a_l3a_input>;
+		vdd_l4_l27_l31-supply = <&vreg_s3a_1p3>;
+		vdd_l5_l7-supply = <&vreg_s5a_2p15>;
+		vdd_l6_l12_l32-supply = <&vreg_s5a_2p15>;
+		vdd_l8_l16_l30-supply = <&vph_pwr>;
+		vdd_l14_l15-supply = <&vreg_s5a_2p15>;
+		vdd_l25-supply = <&vreg_s3a_1p3>;
+		vdd_lvs1_2-supply = <&vreg_s4a_1p8>;
+
+		vreg_s3a_1p3: s3 {
+			regulator-name = "vreg_s3a_1p3";
+			regulator-min-microvolt = <1300000>;
+			regulator-max-microvolt = <1300000>;
+		};
+
+		/**
+		 * 1.8v required on LS expansion
+		 * for mezzanine boards
+		 */
+		vreg_s4a_1p8: s4 {
+			regulator-name = "vreg_s4a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+		};
+		vreg_s5a_2p15: s5 {
+			regulator-name = "vreg_s5a_2p15";
+			regulator-min-microvolt = <2150000>;
+			regulator-max-microvolt = <2150000>;
+		};
+		vreg_s7a_1p0: s7 {
+			regulator-name = "vreg_s7a_1p0";
+			regulator-min-microvolt = <800000>;
+			regulator-max-microvolt = <800000>;
+		};
+
+		vreg_l1a_1p0: l1 {
+			regulator-name = "vreg_l1a_1p0";
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+		};
+		vreg_l2a_1p25: l2 {
+			regulator-name = "vreg_l2a_1p25";
+			regulator-min-microvolt = <1250000>;
+			regulator-max-microvolt = <1250000>;
+		};
+		vreg_l3a_0p875: l3 {
+			regulator-name = "vreg_l3a_0p875";
+			regulator-min-microvolt = <850000>;
+			regulator-max-microvolt = <850000>;
+		};
+		vreg_l4a_1p225: l4 {
+			regulator-name = "vreg_l4a_1p225";
+			regulator-min-microvolt = <1225000>;
+			regulator-max-microvolt = <1225000>;
+		};
+		vreg_l6a_1p2: l6 {
+			regulator-name = "vreg_l6a_1p2";
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+		};
+		vreg_l8a_1p8: l8 {
+			regulator-name = "vreg_l8a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l9a_1p8: l9 {
+			regulator-name = "vreg_l9a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l10a_1p8: l10 {
+			regulator-name = "vreg_l10a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l11a_1p15: l11 {
+			regulator-name = "vreg_l11a_1p15";
+			regulator-min-microvolt = <1150000>;
+			regulator-max-microvolt = <1150000>;
+		};
+		vreg_l12a_1p8: l12 {
+			regulator-name = "vreg_l12a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l13a_2p95: l13 {
+			regulator-name = "vreg_l13a_2p95";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <2950000>;
+		};
+		vreg_l14a_1p8: l14 {
+			regulator-name = "vreg_l14a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l15a_1p8: l15 {
+			regulator-name = "vreg_l15a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l16a_2p7: l16 {
+			regulator-name = "vreg_l16a_2p7";
+			regulator-min-microvolt = <2700000>;
+			regulator-max-microvolt = <2700000>;
+		};
+		vreg_l17a_2p8: l17 {
+			regulator-name = "vreg_l17a_2p8";
+			regulator-min-microvolt = <2500000>;
+			regulator-max-microvolt = <2500000>;
+		};
+		vreg_l18a_2p85: l18 {
+			regulator-name = "vreg_l18a_2p85";
+			regulator-min-microvolt = <2700000>;
+			regulator-max-microvolt = <2900000>;
+		};
+		vreg_l19a_2p8: l19 {
+			regulator-name = "vreg_l19a_2p8";
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3000000>;
+		};
+		vreg_l20a_2p95: l20 {
+			regulator-name = "vreg_l20a_2p95";
+			regulator-min-microvolt = <2950000>;
+			regulator-max-microvolt = <2950000>;
+			regulator-allow-set-load;
+		};
+		vreg_l21a_2p95: l21 {
+			regulator-name = "vreg_l21a_2p95";
+			regulator-min-microvolt = <2950000>;
+			regulator-max-microvolt = <2950000>;
+			regulator-allow-set-load;
+			regulator-system-load = <200000>;
+		};
+		vreg_l22a_3p0: l22 {
+			regulator-name = "vreg_l22a_3p0";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+		};
+		vreg_l23a_2p8: l23 {
+			regulator-name = "vreg_l23a_2p8";
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2800000>;
+		};
+		vreg_l24a_3p075: l24 {
+			regulator-name = "vreg_l24a_3p075";
+			regulator-min-microvolt = <3075000>;
+			regulator-max-microvolt = <3075000>;
+		};
+		vreg_l25a_1p2: l25 {
+			regulator-name = "vreg_l25a_1p2";
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			regulator-allow-set-load;
+		};
+		vreg_l26a_0p8: l27 {
+			regulator-name = "vreg_l26a_0p8";
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+		};
+		vreg_l28a_0p925: l28 {
+			regulator-name = "vreg_l28a_0p925";
+			regulator-min-microvolt = <925000>;
+			regulator-max-microvolt = <925000>;
+			regulator-allow-set-load;
+		};
+		vreg_l29a_2p8: l29 {
+			regulator-name = "vreg_l29a_2p8";
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2800000>;
+		};
+		vreg_l30a_1p8: l30 {
+			regulator-name = "vreg_l30a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l32a_1p8: l32 {
+			regulator-name = "vreg_l32a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+
+		vreg_lvs1a_1p8: lvs1 {
+			regulator-name = "vreg_lvs1a_1p8";
+		};
+
+		vreg_lvs2a_1p8: lvs2 {
+			regulator-name = "vreg_lvs2a_1p8";
+		};
+	};
+};
+
+&sdhc2 {
+	/* External SD card */
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+	cd-gpios = <&msmgpio 38 0x1>;
+	vmmc-supply = <&vreg_l21a_2p95>;
+	vqmmc-supply = <&vreg_l13a_2p95>;
+	status = "okay";
 };
 
 &sound {
@@ -754,3 +979,84 @@ codec {
 		};
 	};
 };
+
+&spmi_bus {
+	pmic@0 {
+		pon@800 {
+			resin {
+				compatible = "qcom,pm8941-resin";
+				interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+				debounce = <15625>;
+				bias-pull-up;
+				linux,code = <KEY_VOLUMEDOWN>;
+			};
+		};
+	};
+};
+
+&ufsphy {
+	status = "okay";
+
+	vdda-phy-supply = <&vreg_l28a_0p925>;
+	vdda-pll-supply = <&vreg_l12a_1p8>;
+
+	vdda-phy-max-microamp = <18380>;
+	vdda-pll-max-microamp = <9440>;
+
+	vddp-ref-clk-supply = <&vreg_l25a_1p2>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+};
+
+&ufshc {
+	status = "okay";
+
+	vcc-supply = <&vreg_l20a_2p95>;
+	vccq-supply = <&vreg_l25a_1p2>;
+	vccq2-supply = <&vreg_s4a_1p8>;
+
+	vcc-max-microamp = <600000>;
+	vccq-max-microamp = <450000>;
+	vccq2-max-microamp = <450000>;
+};
+
+&usb2 {
+	status = "okay";
+	extcon = <&usb2_id>;
+
+	dwc3@7600000 {
+		extcon = <&usb2_id>;
+		dr_mode = "otg";
+		maximum-speed = "high-speed";
+	};
+};
+
+&usb3 {
+	status = "okay";
+	extcon = <&usb3_id>;
+
+	dwc3@6a00000 {
+		extcon = <&usb3_id>;
+		dr_mode = "otg";
+	};
+};
+
+&usb3phy {
+	status = "okay";
+
+	vdda-phy-supply = <&vreg_l28a_0p925>;
+	vdda-pll-supply = <&vreg_l12a_1p8>;
+
+};
+
+&wcd9335 {
+	clock-names = "mclk", "slimbus";
+	clocks = <&div1_mclk>,
+		 <&rpmcc RPM_SMD_BB_CLK1>;
+
+	vdd-buck-supply = <&vreg_s4a_1p8>;
+	vdd-buck-sido-supply = <&vreg_s4a_1p8>;
+	vdd-tx-supply = <&vreg_s4a_1p8>;
+	vdd-rx-supply = <&vreg_s4a_1p8>;
+	vdd-io-supply = <&vreg_s4a_1p8>;
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
new file mode 100644
index 0000000..f6ddf17a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+/dts-v1/;
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+/ {
+	model = "Inforce 6640 Single Board Computer";
+	compatible = "inforce,ifc6640", "qcom,apq8096-sbc", "qcom,apq8096";
+
+	qcom,msm-id = <291 0x00030001>;
+	qcom,board-id = <0x00010018 0>;
+
+	aliases {
+		serial0 = &blsp2_uart1;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	v1p05: v1p05-regulator {
+		compatible = "regulator-fixed";
+		reglator-name = "v1p05";
+		regulator-always-on;
+		regulator-boot-on;
+
+		regulator-min-microvolt = <1050000>;
+		regulator-max-microvolt = <1050000>;
+
+		vin-supply = <&v5p0>;
+	};
+
+	v12_poe: v12-poe-regulator {
+		compatible = "regulator-fixed";
+		reglator-name = "v12_poe";
+		regulator-always-on;
+		regulator-boot-on;
+
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
+	};
+
+	v3p3: v3p3-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "v3p3";
+		regulator-always-on;
+		regulator-boot-on;
+
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		vin-supply = <&v12_poe>;
+	};
+
+	v5p0: v5p0-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "v5p0";
+		regulator-always-on;
+		regulator-boot-on;
+
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+
+		vin-supply = <&v12_poe>;
+	};
+
+	vph_pwr: vph-pwr-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vph_pwr";
+		regulator-always-on;
+		regulator-boot-on;
+
+		regulator-min-microvolt = <3800000>;
+		regulator-max-microvolt = <3800000>;
+	};
+};
+
+&blsp2_uart1 {
+	status = "okay";
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&blsp2_uart1_2pins_default>;
+	pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+};
+
+&msmgpio {
+	sdc2_pins_default: sdc2-pins-default {
+		clk {
+			pins = "sdc2_clk";
+			bias-disable;
+			drive-strength = <16>;
+		};
+
+		cmd {
+			pins = "sdc2_cmd";
+			bias-pull-up;
+			drive-strength = <10>;
+		};
+
+		data {
+			pins = "sdc2_data";
+			bias-pull-up;
+			drive-strength = <10>;
+		};
+
+		cd {
+			pins = "gpio38";
+			function = "gpio";
+
+			bias-pull-up;
+			drive-strength = <16>;
+		};
+	};
+
+	sdc2_pins_sleep: sdc2-pins-sleep {
+		clk {
+			pins = "sdc2_clk";
+			bias-disable;
+			drive-strength = <2>;
+		};
+
+		cmd {
+			pins = "sdc2_cmd";
+			bias-pull-up;
+			drive-strength = <2>;
+		};
+
+		data {
+			pins = "sdc2_data";
+			bias-pull-up;
+			drive-strength = <2>;
+		};
+
+		cd {
+			pins = "gpio38";
+			function = "gpio";
+			bias-pull-up;
+			drive-strength = <2>;
+		};
+	};
+};
+
+&rpm_requests {
+	pm8994-regulators {
+		compatible = "qcom,rpm-pm8994-regulators";
+
+		vdd_s1-supply = <&vph_pwr>;
+		vdd_s2-supply = <&vph_pwr>;
+		vdd_s3-supply = <&vph_pwr>;
+		vdd_s4-supply = <&vph_pwr>;
+		vdd_s5-supply = <&vph_pwr>;
+		vdd_s6-supply = <&vph_pwr>;
+		vdd_s7-supply = <&vph_pwr>;
+		vdd_s8-supply = <&vph_pwr>;
+		vdd_s9-supply = <&vph_pwr>;
+		vdd_s10-supply = <&vph_pwr>;
+		vdd_s11-supply = <&vph_pwr>;
+		vdd_s12-supply = <&vph_pwr>;
+		vdd_l2_l26_l28-supply = <&vreg_s3a_1p3>;
+		vdd_l3_l11-supply = <&vreg_s3a_1p3>;
+		vdd_l4_l27_l31-supply = <&vreg_s3a_1p3>;
+		vdd_l5_l7-supply = <&vreg_s5a_2p15>;
+		vdd_l6_l12_l32-supply = <&vreg_s5a_2p15>;
+		vdd_l8_l16_l30-supply = <&vph_pwr>;
+		vdd_l25-supply = <&vreg_s3a_1p3>;
+		vdd_lvs1_2-supply = <&vreg_s4a_1p8>;
+
+		vreg_s3a_1p3: s3 {
+			regulator-name = "vreg_s3a_1p3";
+			regulator-min-microvolt = <1300000>;
+			regulator-max-microvolt = <1300000>;
+		};
+
+		vreg_s4a_1p8: s4 {
+			regulator-name = "vreg_s4a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+		};
+		vreg_s5a_2p15: s5 {
+			regulator-name = "vreg_s5a_2p15";
+			regulator-min-microvolt = <2150000>;
+			regulator-max-microvolt = <2150000>;
+		};
+		vreg_s7a_1p0: s7 {
+			regulator-name = "vreg_s7a_1p0";
+			regulator-min-microvolt = <800000>;
+			regulator-max-microvolt = <800000>;
+		};
+
+		vreg_l1a_1p0: l1 {
+			regulator-name = "vreg_l1a_1p0";
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+		};
+		vreg_l2a_1p25: l2 {
+			regulator-name = "vreg_l2a_1p25";
+			regulator-min-microvolt = <1250000>;
+			regulator-max-microvolt = <1250000>;
+		};
+		vreg_l3a_0p875: l3 {
+			regulator-name = "vreg_l3a_0p875";
+			regulator-min-microvolt = <850000>;
+			regulator-max-microvolt = <850000>;
+		};
+		vreg_l4a_1p225: l4 {
+			regulator-name = "vreg_l4a_1p225";
+			regulator-min-microvolt = <1225000>;
+			regulator-max-microvolt = <1225000>;
+		};
+		vreg_l6a_1p2: l6 {
+			regulator-name = "vreg_l6a_1p2";
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+		};
+		vreg_l8a_1p8: l8 {
+			regulator-name = "vreg_l8a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l9a_1p8: l9 {
+			regulator-name = "vreg_l9a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l10a_1p8: l10 {
+			regulator-name = "vreg_l10a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l11a_1p15: l11 {
+			regulator-name = "vreg_l11a_1p15";
+			regulator-min-microvolt = <1150000>;
+			regulator-max-microvolt = <1150000>;
+		};
+		vreg_l12a_1p8: l12 {
+			regulator-name = "vreg_l12a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l13a_2p95: l13 {
+			regulator-name = "vreg_l13a_2p95";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <2950000>;
+		};
+		vreg_l14a_1p8: l14 {
+			regulator-name = "vreg_l14a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l15a_1p8: l15 {
+			regulator-name = "vreg_l15a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l16a_2p7: l16 {
+			regulator-name = "vreg_l16a_2p7";
+			regulator-min-microvolt = <2700000>;
+			regulator-max-microvolt = <2700000>;
+		};
+		vreg_l17a_2p8: l17 {
+			regulator-name = "vreg_l17a_2p8";
+			regulator-min-microvolt = <2500000>;
+			regulator-max-microvolt = <2500000>;
+		};
+		vreg_l18a_2p85: l18 {
+			regulator-name = "vreg_l18a_2p85";
+			regulator-min-microvolt = <2700000>;
+			regulator-max-microvolt = <2900000>;
+		};
+		vreg_l19a_2p8: l19 {
+			regulator-name = "vreg_l19a_2p8";
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3000000>;
+		};
+		vreg_l20a_2p95: l20 {
+			regulator-name = "vreg_l20a_2p95";
+			regulator-min-microvolt = <2950000>;
+			regulator-max-microvolt = <2950000>;
+			regulator-allow-set-load;
+		};
+		vreg_l21a_2p95: l21 {
+			regulator-name = "vreg_l21a_2p95";
+			regulator-min-microvolt = <2950000>;
+			regulator-max-microvolt = <2950000>;
+		};
+		vreg_l22a_3p0: l22 {
+			regulator-name = "vreg_l22a_3p0";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+		};
+		vreg_l23a_2p8: l23 {
+			regulator-name = "vreg_l23a_2p8";
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2800000>;
+		};
+		vreg_l24a_3p075: l24 {
+			regulator-name = "vreg_l24a_3p075";
+			regulator-min-microvolt = <3075000>;
+			regulator-max-microvolt = <3075000>;
+		};
+		vreg_l25a_1p2: l25 {
+			regulator-name = "vreg_l25a_1p2";
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			regulator-allow-set-load;
+		};
+		vreg_l26a_0p8: l27 {
+			regulator-name = "vreg_l26a_0p8";
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+		};
+		vreg_l28a_0p925: l28 {
+			regulator-name = "vreg_l28a_0p925";
+			regulator-min-microvolt = <925000>;
+			regulator-max-microvolt = <925000>;
+			regulator-allow-set-load;
+		};
+		vreg_l29a_2p8: l29 {
+			regulator-name = "vreg_l29a_2p8";
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2800000>;
+		};
+		vreg_l30a_1p8: l30 {
+			regulator-name = "vreg_l30a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+		vreg_l32a_1p8: l32 {
+			regulator-name = "vreg_l32a_1p8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+
+		vreg_lvs1a_1p8: lvs1 {
+			regulator-name = "vreg_lvs1a_1p8";
+		};
+
+		vreg_lvs2a_1p8: lvs2 {
+			regulator-name = "vreg_lvs2a_1p8";
+		};
+	};
+};
+
+&sdhc2 {
+	status = "okay";
+
+	bus-width = <4>;
+
+	cd-gpios = <&msmgpio 38 0x1>;
+
+	vmmc-supply = <&vreg_l21a_2p95>;
+	vqmmc-supply = <&vreg_l13a_2p95>;
+
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&sdc2_pins_default>;
+	pinctrl-1 = <&sdc2_pins_sleep>;
+};
+
+&ufshc {
+	status = "okay";
+
+	vcc-supply = <&vreg_l20a_2p95>;
+	vccq-supply = <&vreg_l25a_1p2>;
+	vccq2-supply = <&vreg_s4a_1p8>;
+
+	vcc-max-microamp = <600000>;
+	vccq-max-microamp = <450000>;
+	vccq2-max-microamp = <450000>;
+};
+
+&ufsphy {
+	status = "okay";
+
+	vdda-phy-supply = <&vreg_l28a_0p925>;
+	vdda-pll-supply = <&vreg_l12a_1p8>;
+
+	vdda-phy-max-microamp = <18380>;
+	vdda-pll-max-microamp = <9440>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 8686e10..9f31064 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -102,10 +102,11 @@ CPU0: cpu@0 {
 			reg = <0x0>;
 			next-level-cache = <&L2_0>;
 			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
 			clocks = <&apcs>;
 			operating-points-v2 = <&cpu_opp_table>;
 			#cooling-cells = <2>;
+			power-domains = <&CPU_PD0>;
+			power-domain-names = "psci";
 		};
 
 		CPU1: cpu@1 {
@@ -114,10 +115,11 @@ CPU1: cpu@1 {
 			reg = <0x1>;
 			next-level-cache = <&L2_0>;
 			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
 			clocks = <&apcs>;
 			operating-points-v2 = <&cpu_opp_table>;
 			#cooling-cells = <2>;
+			power-domains = <&CPU_PD1>;
+			power-domain-names = "psci";
 		};
 
 		CPU2: cpu@2 {
@@ -126,10 +128,11 @@ CPU2: cpu@2 {
 			reg = <0x2>;
 			next-level-cache = <&L2_0>;
 			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
 			clocks = <&apcs>;
 			operating-points-v2 = <&cpu_opp_table>;
 			#cooling-cells = <2>;
+			power-domains = <&CPU_PD2>;
+			power-domain-names = "psci";
 		};
 
 		CPU3: cpu@3 {
@@ -138,10 +141,11 @@ CPU3: cpu@3 {
 			reg = <0x3>;
 			next-level-cache = <&L2_0>;
 			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
 			clocks = <&apcs>;
 			operating-points-v2 = <&cpu_opp_table>;
 			#cooling-cells = <2>;
+			power-domains = <&CPU_PD3>;
+			power-domain-names = "psci";
 		};
 
 		L2_0: l2-cache {
@@ -161,12 +165,57 @@ CPU_SLEEP_0: cpu-sleep-0 {
 				min-residency-us = <2000>;
 				local-timer-stop;
 			};
+
+			CLUSTER_RET: cluster-retention {
+				compatible = "domain-idle-state";
+				arm,psci-suspend-param = <0x41000012>;
+				entry-latency-us = <500>;
+				exit-latency-us = <500>;
+				min-residency-us = <2000>;
+			};
+
+			CLUSTER_PWRDN: cluster-gdhs {
+				compatible = "domain-idle-state";
+				arm,psci-suspend-param = <0x41000032>;
+				entry-latency-us = <2000>;
+				exit-latency-us = <2000>;
+				min-residency-us = <6000>;
+			};
 		};
 	};
 
 	psci {
 		compatible = "arm,psci-1.0";
 		method = "smc";
+
+		CPU_PD0: cpu-pd0 {
+			#power-domain-cells = <0>;
+			power-domains = <&CLUSTER_PD>;
+			domain-idle-states = <&CPU_SLEEP_0>;
+		};
+
+		CPU_PD1: cpu-pd1 {
+			#power-domain-cells = <0>;
+			power-domains = <&CLUSTER_PD>;
+			domain-idle-states = <&CPU_SLEEP_0>;
+		};
+
+		CPU_PD2: cpu-pd2 {
+			#power-domain-cells = <0>;
+			power-domains = <&CLUSTER_PD>;
+			domain-idle-states = <&CPU_SLEEP_0>;
+		};
+
+		CPU_PD3: cpu-pd3 {
+			#power-domain-cells = <0>;
+			power-domains = <&CLUSTER_PD>;
+			domain-idle-states = <&CPU_SLEEP_0>;
+		};
+
+		CLUSTER_PD: cluster-pd {
+			#power-domain-cells = <0>;
+			domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>;
+		};
 	};
 
 	pmu {
@@ -429,7 +478,8 @@ apcs: mailbox@b011000 {
 			compatible = "qcom,msm8916-apcs-kpss-global", "syscon";
 			reg = <0xb011000 0x1000>;
 			#mbox-cells = <1>;
-			clocks = <&a53pll>;
+			clocks = <&a53pll>, <&gcc GPLL0_VOTE>;
+			clock-names = "pll", "aux";
 			#clock-cells = <0>;
 		};
 
@@ -816,6 +866,8 @@ tsens: thermal-sensor@4a9000 {
 			nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
 			nvmem-cell-names = "calib", "calib_sel";
 			#qcom,sensors = <5>;
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow";
 			#thermal-sensor-cells = <1>;
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 4ca2e7b..7ae082e 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -16,72 +16,19 @@ / {
 
 	chosen { };
 
-	memory {
-		device_type = "memory";
-		/* We expect the bootloader to fill in the reg */
-		reg = <0 0 0 0>;
-	};
-
-	reserved-memory {
-		#address-cells = <2>;
-		#size-cells = <2>;
-		ranges;
-
-		mba_region: mba@91500000 {
-			reg = <0x0 0x91500000 0x0 0x200000>;
-			no-map;
+	clocks {
+		xo_board: xo_board {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <19200000>;
+			clock-output-names = "xo_board";
 		};
 
-		slpi_region: slpi@90b00000 {
-			reg = <0x0 0x90b00000 0x0 0xa00000>;
-			no-map;
-		};
-
-		venus_region: venus@90400000 {
-			reg = <0x0 0x90400000 0x0 0x700000>;
-			no-map;
-		};
-
-		adsp_region: adsp@8ea00000 {
-			reg = <0x0 0x8ea00000 0x0 0x1a00000>;
-			no-map;
-		};
-
-		mpss_region: mpss@88800000 {
-			reg = <0x0 0x88800000 0x0 0x6200000>;
-			no-map;
-		};
-
-		smem_mem: smem-mem@86000000 {
-			reg = <0x0 0x86000000 0x0 0x200000>;
-			no-map;
-		};
-
-		memory@85800000 {
-			reg = <0x0 0x85800000 0x0 0x800000>;
-			no-map;
-		};
-
-		memory@86200000 {
-			reg = <0x0 0x86200000 0x0 0x2600000>;
-			no-map;
-		};
-
-		rmtfs@86700000 {
-			compatible = "qcom,rmtfs-mem";
-
-			size = <0x0 0x200000>;
-			alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
-			no-map;
-
-			qcom,client-id = <1>;
-			qcom,vmid = <15>;
-		};
-
-		zap_shader_region: gpu@8f200000 {
-			compatible = "shared-dma-pool";
-			reg = <0x0 0x90b00000 0x0 0xa00000>;
-			no-map;
+		sleep_clk: sleep_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <32764>;
+			clock-output-names = "sleep_clk";
 		};
 	};
 
@@ -173,275 +120,9 @@ CPU_SLEEP_0: cpu-sleep-0 {
 		};
 	};
 
-	thermal-zones {
-		cpu0-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens0 3>;
-
-			trips {
-				cpu0_alert0: trip-point@0 {
-					temperature = <75000>;
-					hysteresis = <2000>;
-					type = "passive";
-				};
-
-				cpu0_crit: cpu_crit {
-					temperature = <110000>;
-					hysteresis = <2000>;
-					type = "critical";
-				};
-			};
-		};
-
-		cpu1-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens0 5>;
-
-			trips {
-				cpu1_alert0: trip-point@0 {
-					temperature = <75000>;
-					hysteresis = <2000>;
-					type = "passive";
-				};
-
-				cpu1_crit: cpu_crit {
-					temperature = <110000>;
-					hysteresis = <2000>;
-					type = "critical";
-				};
-			};
-		};
-
-		cpu2-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens0 8>;
-
-			trips {
-				cpu2_alert0: trip-point@0 {
-					temperature = <75000>;
-					hysteresis = <2000>;
-					type = "passive";
-				};
-
-				cpu2_crit: cpu_crit {
-					temperature = <110000>;
-					hysteresis = <2000>;
-					type = "critical";
-				};
-			};
-		};
-
-		cpu3-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens0 10>;
-
-			trips {
-				cpu3_alert0: trip-point@0 {
-					temperature = <75000>;
-					hysteresis = <2000>;
-					type = "passive";
-				};
-
-				cpu3_crit: cpu_crit {
-					temperature = <110000>;
-					hysteresis = <2000>;
-					type = "critical";
-				};
-			};
-		};
-
-		gpu-thermal-top {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens1 6>;
-
-			trips {
-				gpu1_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		gpu-thermal-bottom {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens1 7>;
-
-			trips {
-				gpu2_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		m4m-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens0 1>;
-
-			trips {
-				m4m_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		l3-or-venus-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens0 2>;
-
-			trips {
-				l3_or_venus_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		cluster0-l2-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens0 7>;
-
-			trips {
-				cluster0_l2_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		cluster1-l2-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens0 12>;
-
-			trips {
-				cluster1_l2_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		camera-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens1 1>;
-
-			trips {
-				camera_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		q6-dsp-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens1 2>;
-
-			trips {
-				q6_dsp_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		mem-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens1 3>;
-
-			trips {
-				mem_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-
-		modemtx-thermal {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-
-			thermal-sensors = <&tsens1 4>;
-
-			trips {
-				modemtx_alert0: trip-point@0 {
-					temperature = <90000>;
-					hysteresis = <2000>;
-					type = "hot";
-				};
-			};
-		};
-	};
-
-	timer {
-		compatible = "arm,armv8-timer";
-		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
-	};
-
-	clocks {
-		xo_board: xo_board {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <19200000>;
-			clock-output-names = "xo_board";
-		};
-
-		sleep_clk: sleep_clk {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <32764>;
-			clock-output-names = "sleep_clk";
-		};
-	};
-
-	psci {
-		compatible = "arm,psci-1.0";
-		method = "smc";
-	};
-
 	firmware {
 		scm {
 			compatible = "qcom,scm-msm8996";
-
 			qcom,dload-mode = <&tcsr 0x13000>;
 		};
 	};
@@ -452,10 +133,78 @@ tcsr_mutex: hwlock {
 		#hwlock-cells = <1>;
 	};
 
-	smem {
-		compatible = "qcom,smem";
-		memory-region = <&smem_mem>;
-		hwlocks = <&tcsr_mutex 3>;
+	memory {
+		device_type = "memory";
+		/* We expect the bootloader to fill in the reg */
+		reg = <0 0 0 0>;
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		mba_region: mba@91500000 {
+			reg = <0x0 0x91500000 0x0 0x200000>;
+			no-map;
+		};
+
+		slpi_region: slpi@90b00000 {
+			reg = <0x0 0x90b00000 0x0 0xa00000>;
+			no-map;
+		};
+
+		venus_region: venus@90400000 {
+			reg = <0x0 0x90400000 0x0 0x700000>;
+			no-map;
+		};
+
+		adsp_region: adsp@8ea00000 {
+			reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+			no-map;
+		};
+
+		mpss_region: mpss@88800000 {
+			reg = <0x0 0x88800000 0x0 0x6200000>;
+			no-map;
+		};
+
+		smem_mem: smem-mem@86000000 {
+			reg = <0x0 0x86000000 0x0 0x200000>;
+			no-map;
+		};
+
+		memory@85800000 {
+			reg = <0x0 0x85800000 0x0 0x800000>;
+			no-map;
+		};
+
+		memory@86200000 {
+			reg = <0x0 0x86200000 0x0 0x2600000>;
+			no-map;
+		};
+
+		rmtfs@86700000 {
+			compatible = "qcom,rmtfs-mem";
+
+			size = <0x0 0x200000>;
+			alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+			no-map;
+
+			qcom,client-id = <1>;
+			qcom,vmid = <15>;
+		};
+
+		zap_shader_region: gpu@8f200000 {
+			compatible = "shared-dma-pool";
+			reg = <0x0 0x90b00000 0x0 0xa00000>;
+			no-map;
+		};
 	};
 
 	rpm-glink {
@@ -467,7 +216,7 @@ rpm-glink {
 
 		mboxes = <&apcs_glb 0>;
 
-		rpm_requests {
+		rpm_requests: rpm-requests {
 			compatible = "qcom,rpm-msm8996";
 			qcom,glink-channels = "rpm_requests";
 
@@ -509,57 +258,83 @@ rpmpd_opp6: opp6 {
 					};
 				};
 			};
+		};
+	};
 
-			pm8994-regulators {
-				compatible = "qcom,rpm-pm8994-regulators";
+	smem {
+		compatible = "qcom,smem";
+		memory-region = <&smem_mem>;
+		hwlocks = <&tcsr_mutex 3>;
+	};
 
-				pm8994_s1: s1 {};
-				pm8994_s2: s2 {};
-				pm8994_s3: s3 {};
-				pm8994_s4: s4 {};
-				pm8994_s5: s5 {};
-				pm8994_s6: s6 {};
-				pm8994_s7: s7 {};
-				pm8994_s8: s8 {};
-				pm8994_s9: s9 {};
-				pm8994_s10: s10 {};
-				pm8994_s11: s11 {};
-				pm8994_s12: s12 {};
+	smp2p-adsp {
+		compatible = "qcom,smp2p";
+		qcom,smem = <443>, <429>;
 
-				pm8994_l1: l1 {};
-				pm8994_l2: l2 {};
-				pm8994_l3: l3 {};
-				pm8994_l4: l4 {};
-				pm8994_l5: l5 {};
-				pm8994_l6: l6 {};
-				pm8994_l7: l7 {};
-				pm8994_l8: l8 {};
-				pm8994_l9: l9 {};
-				pm8994_l10: l10 {};
-				pm8994_l11: l11 {};
-				pm8994_l12: l12 {};
-				pm8994_l13: l13 {};
-				pm8994_l14: l14 {};
-				pm8994_l15: l15 {};
-				pm8994_l16: l16 {};
-				pm8994_l17: l17 {};
-				pm8994_l18: l18 {};
-				pm8994_l19: l19 {};
-				pm8994_l20: l20 {};
-				pm8994_l21: l21 {};
-				pm8994_l22: l22 {};
-				pm8994_l23: l23 {};
-				pm8994_l24: l24 {};
-				pm8994_l25: l25 {};
-				pm8994_l26: l26 {};
-				pm8994_l27: l27 {};
-				pm8994_l28: l28 {};
-				pm8994_l29: l29 {};
-				pm8994_l30: l30 {};
-				pm8994_l31: l31 {};
-				pm8994_l32: l32 {};
-			};
+		interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
 
+		mboxes = <&apcs_glb 10>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <2>;
+
+		smp2p_adsp_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		smp2p_adsp_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	smp2p-modem {
+		compatible = "qcom,smp2p";
+		qcom,smem = <435>, <428>;
+
+		interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+		mboxes = <&apcs_glb 14>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <1>;
+
+		modem_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		modem_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	smp2p-slpi {
+		compatible = "qcom,smp2p";
+		qcom,smem = <481>, <430>;
+
+		interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+		mboxes = <&apcs_glb 26>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <3>;
+
+		smp2p_slpi_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		smp2p_slpi_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
 		};
 	};
 
@@ -569,9 +344,90 @@ soc: soc {
 		ranges = <0 0 0 0xffffffff>;
 		compatible = "simple-bus";
 
+		pcie_phy: phy@34000 {
+			compatible = "qcom,msm8996-qmp-pcie-phy";
+			reg = <0x00034000 0x488>;
+			#clock-cells = <1>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+				<&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+				<&gcc GCC_PCIE_CLKREF_CLK>;
+			clock-names = "aux", "cfg_ahb", "ref";
+
+			resets = <&gcc GCC_PCIE_PHY_BCR>,
+				<&gcc GCC_PCIE_PHY_COM_BCR>,
+				<&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+			reset-names = "phy", "common", "cfg";
+			status = "disabled";
+
+			pciephy_0: lane@35000 {
+				reg = <0x00035000 0x130>,
+				      <0x00035200 0x200>,
+				      <0x00035400 0x1dc>;
+				#phy-cells = <0>;
+
+				clock-output-names = "pcie_0_pipe_clk_src";
+				clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+				clock-names = "pipe0";
+				resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+				reset-names = "lane0";
+			};
+
+			pciephy_1: lane@36000 {
+				reg = <0x00036000 0x130>,
+				      <0x00036200 0x200>,
+				      <0x00036400 0x1dc>;
+				#phy-cells = <0>;
+
+				clock-output-names = "pcie_1_pipe_clk_src";
+				clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+				clock-names = "pipe1";
+				resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+				reset-names = "lane1";
+			};
+
+			pciephy_2: lane@37000 {
+				reg = <0x00037000 0x130>,
+				      <0x00037200 0x200>,
+				      <0x00037400 0x1dc>;
+				#phy-cells = <0>;
+
+				clock-output-names = "pcie_2_pipe_clk_src";
+				clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+				clock-names = "pipe2";
+				resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+				reset-names = "lane2";
+			};
+		};
+
 		rpm_msg_ram: memory@68000 {
 			compatible = "qcom,rpm-msg-ram";
-			reg = <0x68000 0x6000>;
+			reg = <0x00068000 0x6000>;
+		};
+
+		qfprom@74000 {
+			compatible = "qcom,qfprom";
+			reg = <0x00074000 0x8ff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qusb2p_hstx_trim: hstx_trim@24e {
+				reg = <0x24e 0x2>;
+				bits = <5 4>;
+			};
+
+			qusb2s_hstx_trim: hstx_trim@24f {
+				reg = <0x24f 0x1>;
+				bits = <1 4>;
+			};
+
+			gpu_speed_bin: gpu_speed_bin@133 {
+				reg = <0x133 0x1>;
+				bits = <5 3>;
+			};
 		};
 
 		rng: rng@83000 {
@@ -581,60 +437,790 @@ rng: rng@83000 {
 			clock-names = "core";
 		};
 
-		tcsr_mutex_regs: syscon@740000 {
-			compatible = "syscon";
-			reg = <0x740000 0x20000>;
-		};
-
-		tsens0: thermal-sensor@4a9000 {
-			compatible = "qcom,msm8996-tsens";
-			reg = <0x4a9000 0x1000>, /* TM */
-			      <0x4a8000 0x1000>; /* SROT */
-			#qcom,sensors = <13>;
-			interrupts = <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "uplow";
-			#thermal-sensor-cells = <1>;
-		};
-
-		tsens1: thermal-sensor@4ad000 {
-			compatible = "qcom,msm8996-tsens";
-			reg = <0x4ad000 0x1000>, /* TM */
-			      <0x4ac000 0x1000>; /* SROT */
-			#qcom,sensors = <8>;
-			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "uplow";
-			#thermal-sensor-cells = <1>;
-		};
-
-		tcsr: syscon@7a0000 {
-			compatible = "qcom,tcsr-msm8996", "syscon";
-			reg = <0x7a0000 0x18000>;
-		};
-
-		intc: interrupt-controller@9bc0000 {
-			compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
-			#interrupt-cells = <3>;
-			interrupt-controller;
-			#redistributor-regions = <1>;
-			redistributor-stride = <0x0 0x40000>;
-			reg = <0x09bc0000 0x10000>,
-			      <0x09c00000 0x100000>;
-			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
-		};
-
-		apcs_glb: mailbox@9820000 {
-			compatible = "qcom,msm8996-apcs-hmss-global";
-			reg = <0x9820000 0x1000>;
-
-			#mbox-cells = <1>;
-		};
-
 		gcc: clock-controller@300000 {
 			compatible = "qcom,gcc-msm8996";
 			#clock-cells = <1>;
 			#reset-cells = <1>;
 			#power-domain-cells = <1>;
-			reg = <0x300000 0x90000>;
+			reg = <0x00300000 0x90000>;
+		};
+
+		tsens0: thermal-sensor@4a9000 {
+			compatible = "qcom,msm8996-tsens";
+			reg = <0x004a9000 0x1000>, /* TM */
+			      <0x004a8000 0x1000>; /* SROT */
+			#qcom,sensors = <13>;
+			interrupts = <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow", "critical";
+			#thermal-sensor-cells = <1>;
+		};
+
+		tsens1: thermal-sensor@4ad000 {
+			compatible = "qcom,msm8996-tsens";
+			reg = <0x004ad000 0x1000>, /* TM */
+			      <0x004ac000 0x1000>; /* SROT */
+			#qcom,sensors = <8>;
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow", "critical";
+			#thermal-sensor-cells = <1>;
+		};
+
+		tcsr_mutex_regs: syscon@740000 {
+			compatible = "syscon";
+			reg = <0x00740000 0x20000>;
+		};
+
+		tcsr: syscon@7a0000 {
+			compatible = "qcom,tcsr-msm8996", "syscon";
+			reg = <0x007a0000 0x18000>;
+		};
+
+		mmcc: clock-controller@8c0000 {
+			compatible = "qcom,mmcc-msm8996";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			#power-domain-cells = <1>;
+			reg = <0x008c0000 0x40000>;
+			assigned-clocks = <&mmcc MMPLL9_PLL>,
+					  <&mmcc MMPLL1_PLL>,
+					  <&mmcc MMPLL3_PLL>,
+					  <&mmcc MMPLL4_PLL>,
+					  <&mmcc MMPLL5_PLL>;
+			assigned-clock-rates = <624000000>,
+					       <810000000>,
+					       <980000000>,
+					       <960000000>,
+					       <825000000>;
+		};
+
+		mdss: mdss@900000 {
+			compatible = "qcom,mdss";
+
+			reg = <0x00900000 0x1000>,
+			      <0x009b0000 0x1040>,
+			      <0x009b8000 0x1040>;
+			reg-names = "mdss_phys",
+				    "vbif_phys",
+				    "vbif_nrt_phys";
+
+			power-domains = <&mmcc MDSS_GDSC>;
+			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			clocks = <&mmcc MDSS_AHB_CLK>;
+			clock-names = "iface";
+
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			mdp: mdp@901000 {
+				compatible = "qcom,mdp5";
+				reg = <0x00901000 0x90000>;
+				reg-names = "mdp_phys";
+
+				interrupt-parent = <&mdss>;
+				interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+				clocks = <&mmcc MDSS_AHB_CLK>,
+					 <&mmcc MDSS_AXI_CLK>,
+					 <&mmcc MDSS_MDP_CLK>,
+					 <&mmcc SMMU_MDP_AXI_CLK>,
+					 <&mmcc MDSS_VSYNC_CLK>;
+				clock-names = "iface",
+					      "bus",
+					      "core",
+					      "iommu",
+					      "vsync";
+
+				iommus = <&mdp_smmu 0>;
+
+				ports {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					port@0 {
+						reg = <0>;
+						mdp5_intf3_out: endpoint {
+							remote-endpoint = <&hdmi_in>;
+						};
+					};
+				};
+			};
+
+			hdmi: hdmi-tx@9a0000 {
+				compatible = "qcom,hdmi-tx-8996";
+				reg =	<0x009a0000 0x50c>,
+					<0x00070000 0x6158>,
+					<0x009e0000 0xfff>;
+				reg-names = "core_physical",
+					    "qfprom_physical",
+					    "hdcp_physical";
+
+				interrupt-parent = <&mdss>;
+				interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+				clocks = <&mmcc MDSS_MDP_CLK>,
+					 <&mmcc MDSS_AHB_CLK>,
+					 <&mmcc MDSS_HDMI_CLK>,
+					 <&mmcc MDSS_HDMI_AHB_CLK>,
+					 <&mmcc MDSS_EXTPCLK_CLK>;
+				clock-names =
+					"mdp_core",
+					"iface",
+					"core",
+					"alt_iface",
+					"extp";
+
+				phys = <&hdmi_phy>;
+				phy-names = "hdmi_phy";
+				#sound-dai-cells = <1>;
+
+				ports {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					port@0 {
+						reg = <0>;
+						hdmi_in: endpoint {
+							remote-endpoint = <&mdp5_intf3_out>;
+						};
+					};
+				};
+			};
+
+			hdmi_phy: hdmi-phy@9a0600 {
+				#phy-cells = <0>;
+				compatible = "qcom,hdmi-phy-8996";
+				reg = <0x009a0600 0x1c4>,
+				      <0x009a0a00 0x124>,
+				      <0x009a0c00 0x124>,
+				      <0x009a0e00 0x124>,
+				      <0x009a1000 0x124>,
+				      <0x009a1200 0x0c8>;
+				reg-names = "hdmi_pll",
+					    "hdmi_tx_l0",
+					    "hdmi_tx_l1",
+					    "hdmi_tx_l2",
+					    "hdmi_tx_l3",
+					    "hdmi_phy";
+
+				clocks = <&mmcc MDSS_AHB_CLK>,
+					 <&gcc GCC_HDMI_CLKREF_CLK>;
+				clock-names = "iface",
+					      "ref";
+			};
+		};
+		gpu@b00000 {
+			compatible = "qcom,adreno-530.2", "qcom,adreno";
+			#stream-id-cells = <16>;
+
+			reg = <0x00b00000 0x3f000>;
+			reg-names = "kgsl_3d0_reg_memory";
+
+			interrupts = <0 300 IRQ_TYPE_LEVEL_HIGH>;
+
+			clocks = <&mmcc GPU_GX_GFX3D_CLK>,
+				<&mmcc GPU_AHB_CLK>,
+				<&mmcc GPU_GX_RBBMTIMER_CLK>,
+				<&gcc GCC_BIMC_GFX_CLK>,
+				<&gcc GCC_MMSS_BIMC_GFX_CLK>;
+
+			clock-names = "core",
+				"iface",
+				"rbbmtimer",
+				"mem",
+				"mem_iface";
+
+			power-domains = <&mmcc GPU_GDSC>;
+			iommus = <&adreno_smmu 0>;
+
+			nvmem-cells = <&gpu_speed_bin>;
+			nvmem-cell-names = "speed_bin";
+
+			qcom,gpu-quirk-two-pass-use-wfi;
+			qcom,gpu-quirk-fault-detect-mask;
+
+			operating-points-v2 = <&gpu_opp_table>;
+
+			gpu_opp_table: opp-table {
+				compatible  ="operating-points-v2";
+
+				/*
+				 * 624Mhz and 560Mhz are only available on speed
+				 * bin (1 << 0). All the rest are available on
+				 * all bins of the hardware
+				 */
+				opp-624000000 {
+					opp-hz = /bits/ 64 <624000000>;
+					opp-supported-hw = <0x01>;
+				};
+				opp-560000000 {
+					opp-hz = /bits/ 64 <560000000>;
+					opp-supported-hw = <0x01>;
+				};
+				opp-510000000 {
+					opp-hz = /bits/ 64 <510000000>;
+					opp-supported-hw = <0xFF>;
+				};
+				opp-401800000 {
+					opp-hz = /bits/ 64 <401800000>;
+					opp-supported-hw = <0xFF>;
+				};
+				opp-315000000 {
+					opp-hz = /bits/ 64 <315000000>;
+					opp-supported-hw = <0xFF>;
+				};
+				opp-214000000 {
+					opp-hz = /bits/ 64 <214000000>;
+					opp-supported-hw = <0xFF>;
+				};
+				opp-133000000 {
+					opp-hz = /bits/ 64 <133000000>;
+					opp-supported-hw = <0xFF>;
+				};
+			};
+
+			zap-shader {
+				memory-region = <&zap_shader_region>;
+			};
+		};
+
+		msmgpio: pinctrl@1010000 {
+			compatible = "qcom,msm8996-pinctrl";
+			reg = <0x01010000 0x300000>;
+			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		spmi_bus: qcom,spmi@400f000 {
+			compatible = "qcom,spmi-pmic-arb";
+			reg = <0x0400f000 0x1000>,
+			      <0x04400000 0x800000>,
+			      <0x04c00000 0x800000>,
+			      <0x05800000 0x200000>,
+			      <0x0400a000 0x002100>;
+			reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+			interrupt-names = "periph_irq";
+			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+			qcom,ee = <0>;
+			qcom,channel = <0>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+			interrupt-controller;
+			#interrupt-cells = <4>;
+		};
+
+		agnoc@0 {
+			power-domains = <&gcc AGGRE0_NOC_GDSC>;
+			compatible = "simple-pm-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			pcie0: pcie@600000 {
+				compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+				status = "disabled";
+				power-domains = <&gcc PCIE0_GDSC>;
+				bus-range = <0x00 0xff>;
+				num-lanes = <1>;
+
+				reg = <0x00600000 0x2000>,
+				      <0x0c000000 0xf1d>,
+				      <0x0c000f20 0xa8>,
+				      <0x0c100000 0x100000>;
+				reg-names = "parf", "dbi", "elbi","config";
+
+				phys = <&pciephy_0>;
+				phy-names = "pciephy";
+
+				#address-cells = <3>;
+				#size-cells = <2>;
+				ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+					<0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+
+				interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "msi";
+				#interrupt-cells = <1>;
+				interrupt-map-mask = <0 0 0 0x7>;
+				interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+						<0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+						<0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+						<0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+				pinctrl-names = "default", "sleep";
+				pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+				pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+
+				linux,pci-domain = <0>;
+
+				clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+					<&gcc GCC_PCIE_0_AUX_CLK>,
+					<&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+					<&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+					<&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+
+				clock-names =  "pipe",
+						"aux",
+						"cfg",
+						"bus_master",
+						"bus_slave";
+
+			};
+
+			pcie1: pcie@608000 {
+				compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+				power-domains = <&gcc PCIE1_GDSC>;
+				bus-range = <0x00 0xff>;
+				num-lanes = <1>;
+
+				status  = "disabled";
+
+				reg = <0x00608000 0x2000>,
+				      <0x0d000000 0xf1d>,
+				      <0x0d000f20 0xa8>,
+				      <0x0d100000 0x100000>;
+
+				reg-names = "parf", "dbi", "elbi","config";
+
+				phys = <&pciephy_1>;
+				phy-names = "pciephy";
+
+				#address-cells = <3>;
+				#size-cells = <2>;
+				ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+					<0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+
+				interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "msi";
+				#interrupt-cells = <1>;
+				interrupt-map-mask = <0 0 0 0x7>;
+				interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+						<0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+						<0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+						<0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+				pinctrl-names = "default", "sleep";
+				pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
+				pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+
+				linux,pci-domain = <1>;
+
+				clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+					<&gcc GCC_PCIE_1_AUX_CLK>,
+					<&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+					<&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+					<&gcc GCC_PCIE_1_SLV_AXI_CLK>;
+
+				clock-names =  "pipe",
+						"aux",
+						"cfg",
+						"bus_master",
+						"bus_slave";
+			};
+
+			pcie2: pcie@610000 {
+				compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
+				power-domains = <&gcc PCIE2_GDSC>;
+				bus-range = <0x00 0xff>;
+				num-lanes = <1>;
+				status = "disabled";
+				reg = <0x00610000 0x2000>,
+				      <0x0e000000 0xf1d>,
+				      <0x0e000f20 0xa8>,
+				      <0x0e100000 0x100000>;
+
+				reg-names = "parf", "dbi", "elbi","config";
+
+				phys = <&pciephy_2>;
+				phy-names = "pciephy";
+
+				#address-cells = <3>;
+				#size-cells = <2>;
+				ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+					<0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+
+				device_type = "pci";
+
+				interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "msi";
+				#interrupt-cells = <1>;
+				interrupt-map-mask = <0 0 0 0x7>;
+				interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+						<0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+						<0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+						<0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+				pinctrl-names = "default", "sleep";
+				pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
+				pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+
+				linux,pci-domain = <2>;
+				clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+					<&gcc GCC_PCIE_2_AUX_CLK>,
+					<&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+					<&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+					<&gcc GCC_PCIE_2_SLV_AXI_CLK>;
+
+				clock-names =  "pipe",
+						"aux",
+						"cfg",
+						"bus_master",
+						"bus_slave";
+			};
+		};
+
+		ufshc: ufshc@624000 {
+			compatible = "qcom,ufshc";
+			reg = <0x00624000 0x2500>;
+			interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+			phys = <&ufsphy>;
+			phy-names = "ufsphy";
+
+			power-domains = <&gcc UFS_GDSC>;
+
+			clock-names =
+				"core_clk_src",
+				"core_clk",
+				"bus_clk",
+				"bus_aggr_clk",
+				"iface_clk",
+				"core_clk_unipro_src",
+				"core_clk_unipro",
+				"core_clk_ice",
+				"ref_clk",
+				"tx_lane0_sync_clk",
+				"rx_lane0_sync_clk";
+			clocks =
+				<&gcc UFS_AXI_CLK_SRC>,
+				<&gcc GCC_UFS_AXI_CLK>,
+				<&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+				<&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+				<&gcc GCC_UFS_AHB_CLK>,
+				<&gcc UFS_ICE_CORE_CLK_SRC>,
+				<&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+				<&gcc GCC_UFS_ICE_CORE_CLK>,
+				<&rpmcc RPM_SMD_LN_BB_CLK>,
+				<&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+				<&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+			freq-table-hz =
+				<100000000 200000000>,
+				<0 0>,
+				<0 0>,
+				<0 0>,
+				<0 0>,
+				<150000000 300000000>,
+				<0 0>,
+				<0 0>,
+				<0 0>,
+				<0 0>,
+				<0 0>;
+
+			lanes-per-direction = <1>;
+			#reset-cells = <1>;
+			status = "disabled";
+
+			ufs_variant {
+				compatible = "qcom,ufs_variant";
+			};
+		};
+
+		ufsphy: phy@627000 {
+			compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
+			reg = <0x00627000 0xda8>;
+			reg-names = "phy_mem";
+			#phy-cells = <0>;
+
+			clock-names = "ref_clk_src", "ref_clk";
+			clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+				 <&gcc GCC_UFS_CLKREF_CLK>;
+			resets = <&ufshc 0>;
+			status = "disabled";
+		};
+
+		camss: camss@a00000 {
+			compatible = "qcom,msm8996-camss";
+			reg = <0x00a34000 0x1000>,
+			      <0x00a00030 0x4>,
+			      <0x00a35000 0x1000>,
+			      <0x00a00038 0x4>,
+			      <0x00a36000 0x1000>,
+			      <0x00a00040 0x4>,
+			      <0x00a30000 0x100>,
+			      <0x00a30400 0x100>,
+			      <0x00a30800 0x100>,
+			      <0x00a30c00 0x100>,
+			      <0x00a31000 0x500>,
+			      <0x00a00020 0x10>,
+			      <0x00a10000 0x1000>,
+			      <0x00a14000 0x1000>;
+			reg-names = "csiphy0",
+				"csiphy0_clk_mux",
+				"csiphy1",
+				"csiphy1_clk_mux",
+				"csiphy2",
+				"csiphy2_clk_mux",
+				"csid0",
+				"csid1",
+				"csid2",
+				"csid3",
+				"ispif",
+				"csi_clk_mux",
+				"vfe0",
+				"vfe1";
+			interrupts = <GIC_SPI 78 0>,
+				<GIC_SPI 79 0>,
+				<GIC_SPI 80 0>,
+				<GIC_SPI 296 0>,
+				<GIC_SPI 297 0>,
+				<GIC_SPI 298 0>,
+				<GIC_SPI 299 0>,
+				<GIC_SPI 309 0>,
+				<GIC_SPI 314 0>,
+				<GIC_SPI 315 0>;
+			interrupt-names = "csiphy0",
+				"csiphy1",
+				"csiphy2",
+				"csid0",
+				"csid1",
+				"csid2",
+				"csid3",
+				"ispif",
+				"vfe0",
+				"vfe1";
+			power-domains = <&mmcc VFE0_GDSC>;
+			clocks = <&mmcc CAMSS_TOP_AHB_CLK>,
+				<&mmcc CAMSS_ISPIF_AHB_CLK>,
+				<&mmcc CAMSS_CSI0PHYTIMER_CLK>,
+				<&mmcc CAMSS_CSI1PHYTIMER_CLK>,
+				<&mmcc CAMSS_CSI2PHYTIMER_CLK>,
+				<&mmcc CAMSS_CSI0_AHB_CLK>,
+				<&mmcc CAMSS_CSI0_CLK>,
+				<&mmcc CAMSS_CSI0PHY_CLK>,
+				<&mmcc CAMSS_CSI0PIX_CLK>,
+				<&mmcc CAMSS_CSI0RDI_CLK>,
+				<&mmcc CAMSS_CSI1_AHB_CLK>,
+				<&mmcc CAMSS_CSI1_CLK>,
+				<&mmcc CAMSS_CSI1PHY_CLK>,
+				<&mmcc CAMSS_CSI1PIX_CLK>,
+				<&mmcc CAMSS_CSI1RDI_CLK>,
+				<&mmcc CAMSS_CSI2_AHB_CLK>,
+				<&mmcc CAMSS_CSI2_CLK>,
+				<&mmcc CAMSS_CSI2PHY_CLK>,
+				<&mmcc CAMSS_CSI2PIX_CLK>,
+				<&mmcc CAMSS_CSI2RDI_CLK>,
+				<&mmcc CAMSS_CSI3_AHB_CLK>,
+				<&mmcc CAMSS_CSI3_CLK>,
+				<&mmcc CAMSS_CSI3PHY_CLK>,
+				<&mmcc CAMSS_CSI3PIX_CLK>,
+				<&mmcc CAMSS_CSI3RDI_CLK>,
+				<&mmcc CAMSS_AHB_CLK>,
+				<&mmcc CAMSS_VFE0_CLK>,
+				<&mmcc CAMSS_CSI_VFE0_CLK>,
+				<&mmcc CAMSS_VFE0_AHB_CLK>,
+				<&mmcc CAMSS_VFE0_STREAM_CLK>,
+				<&mmcc CAMSS_VFE1_CLK>,
+				<&mmcc CAMSS_CSI_VFE1_CLK>,
+				<&mmcc CAMSS_VFE1_AHB_CLK>,
+				<&mmcc CAMSS_VFE1_STREAM_CLK>,
+				<&mmcc CAMSS_VFE_AHB_CLK>,
+				<&mmcc CAMSS_VFE_AXI_CLK>;
+			clock-names = "top_ahb",
+				"ispif_ahb",
+				"csiphy0_timer",
+				"csiphy1_timer",
+				"csiphy2_timer",
+				"csi0_ahb",
+				"csi0",
+				"csi0_phy",
+				"csi0_pix",
+				"csi0_rdi",
+				"csi1_ahb",
+				"csi1",
+				"csi1_phy",
+				"csi1_pix",
+				"csi1_rdi",
+				"csi2_ahb",
+				"csi2",
+				"csi2_phy",
+				"csi2_pix",
+				"csi2_rdi",
+				"csi3_ahb",
+				"csi3",
+				"csi3_phy",
+				"csi3_pix",
+				"csi3_rdi",
+				"ahb",
+				"vfe0",
+				"csi_vfe0",
+				"vfe0_ahb",
+				"vfe0_stream",
+				"vfe1",
+				"csi_vfe1",
+				"vfe1_ahb",
+				"vfe1_stream",
+				"vfe_ahb",
+				"vfe_axi";
+			iommus = <&vfe_smmu 0>,
+				 <&vfe_smmu 1>,
+				 <&vfe_smmu 2>,
+				 <&vfe_smmu 3>;
+			status = "disabled";
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+			};
+		};
+
+		adreno_smmu: iommu@b40000 {
+			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+			reg = <0x00b40000 0x10000>;
+
+			#global-interrupts = <1>;
+			interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+
+			clocks = <&mmcc GPU_AHB_CLK>,
+				 <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+			clock-names = "iface", "bus";
+
+			power-domains = <&mmcc GPU_GDSC>;
+		};
+
+		video-codec@c00000 {
+			compatible = "qcom,msm8996-venus";
+			reg = <0x00c00000 0xff000>;
+			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&mmcc VENUS_GDSC>;
+			clocks = <&mmcc VIDEO_CORE_CLK>,
+				 <&mmcc VIDEO_AHB_CLK>,
+				 <&mmcc VIDEO_AXI_CLK>,
+				 <&mmcc VIDEO_MAXI_CLK>;
+			clock-names = "core", "iface", "bus", "mbus";
+			iommus = <&venus_smmu 0x00>,
+				 <&venus_smmu 0x01>,
+				 <&venus_smmu 0x0a>,
+				 <&venus_smmu 0x07>,
+				 <&venus_smmu 0x0e>,
+				 <&venus_smmu 0x0f>,
+				 <&venus_smmu 0x08>,
+				 <&venus_smmu 0x09>,
+				 <&venus_smmu 0x0b>,
+				 <&venus_smmu 0x0c>,
+				 <&venus_smmu 0x0d>,
+				 <&venus_smmu 0x10>,
+				 <&venus_smmu 0x11>,
+				 <&venus_smmu 0x21>,
+				 <&venus_smmu 0x28>,
+				 <&venus_smmu 0x29>,
+				 <&venus_smmu 0x2b>,
+				 <&venus_smmu 0x2c>,
+				 <&venus_smmu 0x2d>,
+				 <&venus_smmu 0x31>;
+			memory-region = <&venus_region>;
+			status = "okay";
+
+			video-decoder {
+				compatible = "venus-decoder";
+				clocks = <&mmcc VIDEO_SUBCORE0_CLK>;
+				clock-names = "core";
+				power-domains = <&mmcc VENUS_CORE0_GDSC>;
+			};
+
+			video-encoder {
+				compatible = "venus-encoder";
+				clocks = <&mmcc VIDEO_SUBCORE1_CLK>;
+				clock-names = "core";
+				power-domains = <&mmcc VENUS_CORE1_GDSC>;
+			};
+		};
+
+		mdp_smmu: iommu@d00000 {
+			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+			reg = <0x00d00000 0x10000>;
+
+			#global-interrupts = <1>;
+			interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+				 <&mmcc SMMU_MDP_AXI_CLK>;
+			clock-names = "iface", "bus";
+
+			power-domains = <&mmcc MDSS_GDSC>;
+		};
+
+		venus_smmu: iommu@d40000 {
+			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+			reg = <0x00d40000 0x20000>;
+			#global-interrupts = <1>;
+			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&mmcc MMAGIC_VIDEO_GDSC>;
+			clocks = <&mmcc SMMU_VIDEO_AHB_CLK>,
+				 <&mmcc SMMU_VIDEO_AXI_CLK>;
+			clock-names = "iface", "bus";
+			#iommu-cells = <1>;
+			status = "okay";
+		};
+
+		vfe_smmu: iommu@da0000 {
+			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+			reg = <0x00da0000 0x10000>;
+
+			#global-interrupts = <1>;
+			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&mmcc MMAGIC_CAMSS_GDSC>;
+			clocks = <&mmcc SMMU_VFE_AHB_CLK>,
+				 <&mmcc SMMU_VFE_AXI_CLK>;
+			clock-names = "iface",
+				      "bus";
+			#iommu-cells = <1>;
+		};
+
+		lpass_q6_smmu: iommu@1600000 {
+			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+			reg = <0x01600000 0x20000>;
+			#iommu-cells = <1>;
+			power-domains = <&gcc HLOS1_VOTE_LPASS_CORE_GDSC>;
+
+			#global-interrupts = <1>;
+			interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+		                <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>;
+
+			clocks = <&gcc GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK>,
+				 <&gcc GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>;
+			clock-names = "iface", "bus";
 		};
 
 		stm@3002000 {
@@ -1104,13 +1690,119 @@ apss_merge_funnel_out: endpoint {
 				};
 			};
 		};
-
 		kryocc: clock-controller@6400000 {
 			compatible = "qcom,apcc-msm8996";
-			reg = <0x6400000 0x90000>;
+			reg = <0x06400000 0x90000>;
 			#clock-cells = <1>;
 		};
 
+		usb3: usb@6af8800 {
+			compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
+			reg = <0x06af8800 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+				<&gcc GCC_USB30_MASTER_CLK>,
+				<&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+				<&gcc GCC_USB30_MOCK_UTMI_CLK>,
+				<&gcc GCC_USB30_SLEEP_CLK>,
+				<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+			assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+					  <&gcc GCC_USB30_MASTER_CLK>;
+			assigned-clock-rates = <19200000>, <120000000>;
+
+			power-domains = <&gcc USB30_GDSC>;
+			status = "disabled";
+
+			dwc3@6a00000 {
+				compatible = "snps,dwc3";
+				reg = <0x06a00000 0xcc00>;
+				interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+				phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+				phy-names = "usb2-phy", "usb3-phy";
+				snps,dis_u2_susphy_quirk;
+				snps,dis_enblslpm_quirk;
+			};
+		};
+
+		usb3phy: phy@7410000 {
+			compatible = "qcom,msm8996-qmp-usb3-phy";
+			reg = <0x07410000 0x1c4>;
+			#clock-cells = <1>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+				<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+				<&gcc GCC_USB3_CLKREF_CLK>;
+			clock-names = "aux", "cfg_ahb", "ref";
+
+			resets = <&gcc GCC_USB3_PHY_BCR>,
+				<&gcc GCC_USB3PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+			status = "disabled";
+
+			ssusb_phy_0: lane@7410200 {
+				reg = <0x07410200 0x200>,
+				      <0x07410400 0x130>,
+				      <0x07410600 0x1a8>;
+				#phy-cells = <0>;
+
+				clock-output-names = "usb3_phy_pipe_clk_src";
+				clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+				clock-names = "pipe0";
+			};
+		};
+
+		hsusb_phy1: phy@7411000 {
+			compatible = "qcom,msm8996-qusb2-phy";
+			reg = <0x07411000 0x180>;
+			#phy-cells = <0>;
+
+			clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+				<&gcc GCC_RX1_USB2_CLKREF_CLK>;
+			clock-names = "cfg_ahb", "ref";
+
+			resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+			nvmem-cells = <&qusb2p_hstx_trim>;
+			status = "disabled";
+		};
+
+		hsusb_phy2: phy@7412000 {
+			compatible = "qcom,msm8996-qusb2-phy";
+			reg = <0x07412000 0x180>;
+			#phy-cells = <0>;
+
+			clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+				<&gcc GCC_RX2_USB2_CLKREF_CLK>;
+			clock-names = "cfg_ahb", "ref";
+
+			resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+			nvmem-cells = <&qusb2s_hstx_trim>;
+			status = "disabled";
+		};
+
+		sdhc2: sdhci@74a4900 {
+			 status = "disabled";
+			 compatible = "qcom,sdhci-msm-v4";
+			 reg = <0x074a4900 0x314>, <0x074a4000 0x800>;
+			 reg-names = "hc_mem", "core_mem";
+
+			 interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
+				      <0 221 IRQ_TYPE_LEVEL_HIGH>;
+			 interrupt-names = "hc_irq", "pwr_irq";
+
+			 clock-names = "iface", "core", "xo";
+			 clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+			 <&gcc GCC_SDCC2_APPS_CLK>,
+			 <&xo_board>;
+			 bus-width = <4>;
+		 };
+
 		blsp1_uart1: serial@7570000 {
 			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 			reg = <0x07570000 0x1000>;
@@ -1136,6 +1828,41 @@ blsp1_spi0: spi@7575000 {
 			status = "disabled";
 		};
 
+		blsp1_i2c2: i2c@7577000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x07577000 0x1000>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+				<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+			clock-names = "iface", "core";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&blsp1_i2c2_default>;
+			pinctrl-1 = <&blsp1_i2c2_sleep>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		blsp2_uart1: serial@75b0000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x075b0000 0x1000>;
+			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+				 <&gcc GCC_BLSP2_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp2_uart2: serial@75b1000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x075b1000 0x1000>;
+			interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+				 <&gcc GCC_BLSP2_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
 		blsp2_i2c0: i2c@75b5000 {
 			compatible = "qcom,i2c-qup-v2.2.1";
 			reg = <0x075b5000 0x1000>;
@@ -1151,16 +1878,6 @@ blsp2_i2c0: i2c@75b5000 {
 			status = "disabled";
 		};
 
-		blsp2_uart1: serial@75b0000 {
-			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
-			reg = <0x75b0000 0x1000>;
-			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
-				 <&gcc GCC_BLSP2_AHB_CLK>;
-			clock-names = "core", "iface";
-			status = "disabled";
-		};
-
 		blsp2_i2c1: i2c@75b6000 {
 			compatible = "qcom,i2c-qup-v2.2.1";
 			reg = <0x075b6000 0x1000>;
@@ -1176,31 +1893,6 @@ blsp2_i2c1: i2c@75b6000 {
 			status = "disabled";
 		};
 
-		blsp2_uart2: serial@75b1000 {
-			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
-			reg = <0x075b1000 0x1000>;
-			interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
-				 <&gcc GCC_BLSP2_AHB_CLK>;
-			clock-names = "core", "iface";
-			status = "disabled";
-		};
-
-		blsp1_i2c2: i2c@7577000 {
-			compatible = "qcom,i2c-qup-v2.2.1";
-			reg = <0x07577000 0x1000>;
-			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&gcc GCC_BLSP1_AHB_CLK>,
-				<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
-			clock-names = "iface", "core";
-			pinctrl-names = "default", "sleep";
-			pinctrl-0 = <&blsp1_i2c2_default>;
-			pinctrl-1 = <&blsp1_i2c2_sleep>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
 		blsp2_spi5: spi@75ba000{
 			compatible = "qcom,spi-qup-v2.2.1";
 			reg = <0x075ba000 0x600>;
@@ -1216,31 +1908,174 @@ blsp2_spi5: spi@75ba000{
 			status = "disabled";
 		};
 
-		sdhc2: sdhci@74a4900 {
-			 status = "disabled";
-			 compatible = "qcom,sdhci-msm-v4";
-			 reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
-			 reg-names = "hc_mem", "core_mem";
+		usb2: usb@76f8800 {
+			compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
+			reg = <0x076f8800 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
 
-			 interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
-				      <0 221 IRQ_TYPE_LEVEL_HIGH>;
-			 interrupt-names = "hc_irq", "pwr_irq";
+			clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+				<&gcc GCC_USB20_MASTER_CLK>,
+				<&gcc GCC_USB20_MOCK_UTMI_CLK>,
+				<&gcc GCC_USB20_SLEEP_CLK>,
+				<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
 
-			 clock-names = "iface", "core", "xo";
-			 clocks = <&gcc GCC_SDCC2_AHB_CLK>,
-			 <&gcc GCC_SDCC2_APPS_CLK>,
-			 <&xo_board>;
-			 bus-width = <4>;
-		 };
+			assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+					  <&gcc GCC_USB20_MASTER_CLK>;
+			assigned-clock-rates = <19200000>, <60000000>;
 
-		msmgpio: pinctrl@1010000 {
-			compatible = "qcom,msm8996-pinctrl";
-			reg = <0x01010000 0x300000>;
-			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
-			gpio-controller;
-			#gpio-cells = <2>;
-			interrupt-controller;
-			#interrupt-cells = <2>;
+			power-domains = <&gcc USB30_GDSC>;
+			status = "disabled";
+
+			dwc3@7600000 {
+				compatible = "snps,dwc3";
+				reg = <0x07600000 0xcc00>;
+				interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
+				phys = <&hsusb_phy2>;
+				phy-names = "usb2-phy";
+				snps,dis_u2_susphy_quirk;
+				snps,dis_enblslpm_quirk;
+			};
+		};
+
+		slimbam: dma@9184000 {
+			compatible = "qcom,bam-v1.7.0";
+			qcom,controlled-remotely;
+			reg = <0x09184000 0x32000>;
+			num-channels  = <31>;
+			interrupts = <0 164 IRQ_TYPE_LEVEL_HIGH>;
+			#dma-cells = <1>;
+			qcom,ee = <1>;
+			qcom,num-ees = <2>;
+		};
+
+		slim_msm: slim@91c0000 {
+			compatible = "qcom,slim-ngd-v1.5.0";
+			reg = <0x091c0000 0x2C000>;
+			reg-names = "ctrl";
+			interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+			dmas =	<&slimbam 3>, <&slimbam 4>,
+				<&slimbam 5>, <&slimbam 6>;
+			dma-names = "rx", "tx", "tx2", "rx2";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			ngd@1 {
+				reg = <1>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				tasha_ifd: tas-ifd {
+					compatible = "slim217,1a0";
+					reg  = <0 0>;
+				};
+
+				wcd9335: codec@1{
+					pinctrl-0 = <&cdc_reset_active &wcd_intr_default>;
+					pinctrl-names = "default";
+
+					compatible = "slim217,1a0";
+					reg  = <1 0>;
+
+					interrupt-parent = <&msmgpio>;
+					interrupts = <54 IRQ_TYPE_LEVEL_HIGH>,
+						     <53 IRQ_TYPE_LEVEL_HIGH>;
+					interrupt-names  = "intr1", "intr2";
+					interrupt-controller;
+					#interrupt-cells = <1>;
+					reset-gpios = <&msmgpio 64 0>;
+
+					slim-ifc-dev  = <&tasha_ifd>;
+
+					#sound-dai-cells = <1>;
+				};
+			};
+		};
+
+		adsp_pil: remoteproc@9300000 {
+			compatible = "qcom,msm8996-adsp-pil";
+			reg = <0x09300000 0x80000>;
+
+			interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready",
+					  "handover", "stop-ack";
+
+			clocks = <&xo_board>;
+			clock-names = "xo";
+
+			memory-region = <&adsp_region>;
+
+			qcom,smem-states = <&smp2p_adsp_out 0>;
+			qcom,smem-state-names = "stop";
+
+			smd-edge {
+				interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+				label = "lpass";
+				mboxes = <&apcs_glb 8>;
+				qcom,smd-edge = <1>;
+				qcom,remote-pid = <2>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				apr {
+					power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
+					compatible = "qcom,apr-v2";
+					qcom,smd-channels = "apr_audio_svc";
+					qcom,apr-domain = <APR_DOMAIN_ADSP>;
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					q6core {
+						reg = <APR_SVC_ADSP_CORE>;
+						compatible = "qcom,q6core";
+					};
+
+					q6afe: q6afe {
+						compatible = "qcom,q6afe";
+						reg = <APR_SVC_AFE>;
+						q6afedai: dais {
+							compatible = "qcom,q6afe-dais";
+							#address-cells = <1>;
+							#size-cells = <0>;
+							#sound-dai-cells = <1>;
+							hdmi@1 {
+								reg = <1>;
+							};
+						};
+					};
+
+					q6asm: q6asm {
+						compatible = "qcom,q6asm";
+						reg = <APR_SVC_ASM>;
+						q6asmdai: dais {
+							compatible = "qcom,q6asm-dais";
+							#sound-dai-cells = <1>;
+							iommus = <&lpass_q6_smmu 1>;
+						};
+					};
+
+					q6adm: q6adm {
+						compatible = "qcom,q6adm";
+						reg = <APR_SVC_ADM>;
+						q6routing: routing {
+							compatible = "qcom,q6adm-routing";
+							#sound-dai-cells = <0>;
+						};
+					};
+				};
+
+			};
+		};
+
+		apcs_glb: mailbox@9820000 {
+			compatible = "qcom,msm8996-apcs-hmss-global";
+			reg = <0x09820000 0x1000>;
+
+			#mbox-cells = <1>;
 		};
 
 		timer@9840000 {
@@ -1302,1191 +2137,268 @@ frame@98c0000 {
 			};
 		};
 
-		spmi_bus: qcom,spmi@400f000 {
-			compatible = "qcom,spmi-pmic-arb";
-			reg = <0x400f000 0x1000>,
-			      <0x4400000 0x800000>,
-			      <0x4c00000 0x800000>,
-			      <0x5800000 0x200000>,
-			      <0x400a000 0x002100>;
-			reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
-			interrupt-names = "periph_irq";
-			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
-			qcom,ee = <0>;
-			qcom,channel = <0>;
-			#address-cells = <2>;
-			#size-cells = <0>;
+		saw3: syscon@9a10000 {
+			compatible = "syscon";
+			reg = <0x09a10000 0x1000>;
+		};
+
+		intc: interrupt-controller@9bc0000 {
+			compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
+			#interrupt-cells = <3>;
 			interrupt-controller;
-			#interrupt-cells = <4>;
-		};
-
-		ufsphy: phy@627000 {
-			compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
-			reg = <0x627000 0xda8>;
-			reg-names = "phy_mem";
-			#phy-cells = <0>;
-
-			vdda-phy-supply = <&pm8994_l28>;
-			vdda-pll-supply = <&pm8994_l12>;
-
-			vdda-phy-max-microamp = <18380>;
-			vdda-pll-max-microamp = <9440>;
-
-			vddp-ref-clk-supply = <&pm8994_l25>;
-			vddp-ref-clk-max-microamp = <100>;
-			vddp-ref-clk-always-on;
-
-			clock-names = "ref_clk_src", "ref_clk";
-			clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
-				 <&gcc GCC_UFS_CLKREF_CLK>;
-			resets = <&ufshc 0>;
-			status = "disabled";
-		};
-
-		ufshc: ufshc@624000 {
-			compatible = "qcom,ufshc";
-			reg = <0x624000 0x2500>;
-			interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
-
-			phys = <&ufsphy>;
-			phy-names = "ufsphy";
-
-			vcc-supply = <&pm8994_l20>;
-			vccq-supply = <&pm8994_l25>;
-			vccq2-supply = <&pm8994_s4>;
-
-			vcc-max-microamp = <600000>;
-			vccq-max-microamp = <450000>;
-			vccq2-max-microamp = <450000>;
-
-			power-domains = <&gcc UFS_GDSC>;
-
-			clock-names =
-				"core_clk_src",
-				"core_clk",
-				"bus_clk",
-				"bus_aggr_clk",
-				"iface_clk",
-				"core_clk_unipro_src",
-				"core_clk_unipro",
-				"core_clk_ice",
-				"ref_clk",
-				"tx_lane0_sync_clk",
-				"rx_lane0_sync_clk";
-			clocks =
-				<&gcc UFS_AXI_CLK_SRC>,
-				<&gcc GCC_UFS_AXI_CLK>,
-				<&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
-				<&gcc GCC_AGGRE2_UFS_AXI_CLK>,
-				<&gcc GCC_UFS_AHB_CLK>,
-				<&gcc UFS_ICE_CORE_CLK_SRC>,
-				<&gcc GCC_UFS_UNIPRO_CORE_CLK>,
-				<&gcc GCC_UFS_ICE_CORE_CLK>,
-				<&rpmcc RPM_SMD_LN_BB_CLK>,
-				<&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
-				<&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
-			freq-table-hz =
-				<100000000 200000000>,
-				<0 0>,
-				<0 0>,
-				<0 0>,
-				<0 0>,
-				<150000000 300000000>,
-				<0 0>,
-				<0 0>,
-				<0 0>,
-				<0 0>,
-				<0 0>;
-
-			lanes-per-direction = <1>;
-			#reset-cells = <1>;
-			status = "disabled";
-
-			ufs_variant {
-				compatible = "qcom,ufs_variant";
-			};
-		};
-
-		mmcc: clock-controller@8c0000 {
-			compatible = "qcom,mmcc-msm8996";
-			#clock-cells = <1>;
-			#reset-cells = <1>;
-			#power-domain-cells = <1>;
-			reg = <0x8c0000 0x40000>;
-			assigned-clocks = <&mmcc MMPLL9_PLL>,
-					  <&mmcc MMPLL1_PLL>,
-					  <&mmcc MMPLL3_PLL>,
-					  <&mmcc MMPLL4_PLL>,
-					  <&mmcc MMPLL5_PLL>;
-			assigned-clock-rates = <624000000>,
-					       <810000000>,
-					       <980000000>,
-					       <960000000>,
-					       <825000000>;
-		};
-
-		qfprom@74000 {
-			compatible = "qcom,qfprom";
-			reg = <0x74000 0x8ff>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-
-			qusb2p_hstx_trim: hstx_trim@24e {
-				reg = <0x24e 0x2>;
-				bits = <5 4>;
-			};
-
-			qusb2s_hstx_trim: hstx_trim@24f {
-				reg = <0x24f 0x1>;
-				bits = <1 4>;
-			};
-
-			gpu_speed_bin: gpu_speed_bin@133 {
-				reg = <0x133 0x1>;
-				bits = <5 3>;
-			};
-		};
-
-		phy@34000 {
-			compatible = "qcom,msm8996-qmp-pcie-phy";
-			reg = <0x34000 0x488>;
-			#clock-cells = <1>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges;
-
-			clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
-				<&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
-				<&gcc GCC_PCIE_CLKREF_CLK>;
-			clock-names = "aux", "cfg_ahb", "ref";
-
-			vdda-phy-supply = <&pm8994_l28>;
-			vdda-pll-supply = <&pm8994_l12>;
-
-			resets = <&gcc GCC_PCIE_PHY_BCR>,
-				<&gcc GCC_PCIE_PHY_COM_BCR>,
-				<&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
-			reset-names = "phy", "common", "cfg";
-			status = "disabled";
-
-			pciephy_0: lane@35000 {
-				reg = <0x035000 0x130>,
-					<0x035200 0x200>,
-					<0x035400 0x1dc>;
-				#phy-cells = <0>;
-
-				clock-output-names = "pcie_0_pipe_clk_src";
-				clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
-				clock-names = "pipe0";
-				resets = <&gcc GCC_PCIE_0_PHY_BCR>;
-				reset-names = "lane0";
-			};
-
-			pciephy_1: lane@36000 {
-				reg = <0x036000 0x130>,
-					<0x036200 0x200>,
-					<0x036400 0x1dc>;
-				#phy-cells = <0>;
-
-				clock-output-names = "pcie_1_pipe_clk_src";
-				clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
-				clock-names = "pipe1";
-				resets = <&gcc GCC_PCIE_1_PHY_BCR>;
-				reset-names = "lane1";
-			};
-
-			pciephy_2: lane@37000 {
-				reg = <0x037000 0x130>,
-					<0x037200 0x200>,
-					<0x037400 0x1dc>;
-				#phy-cells = <0>;
-
-				clock-output-names = "pcie_2_pipe_clk_src";
-				clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
-				clock-names = "pipe2";
-				resets = <&gcc GCC_PCIE_2_PHY_BCR>;
-				reset-names = "lane2";
-			};
-		};
-
-		phy@7410000 {
-			compatible = "qcom,msm8996-qmp-usb3-phy";
-			reg = <0x7410000 0x1c4>;
-			#clock-cells = <1>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges;
-
-			clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
-				<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
-				<&gcc GCC_USB3_CLKREF_CLK>;
-			clock-names = "aux", "cfg_ahb", "ref";
-
-			vdda-phy-supply = <&pm8994_l28>;
-			vdda-pll-supply = <&pm8994_l12>;
-
-			resets = <&gcc GCC_USB3_PHY_BCR>,
-				<&gcc GCC_USB3PHY_PHY_BCR>;
-			reset-names = "phy", "common";
-			status = "disabled";
-
-			ssusb_phy_0: lane@7410200 {
-				reg = <0x7410200 0x200>,
-					<0x7410400 0x130>,
-					<0x7410600 0x1a8>;
-				#phy-cells = <0>;
-
-				clock-output-names = "usb3_phy_pipe_clk_src";
-				clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
-				clock-names = "pipe0";
-			};
-		};
-
-		hsusb_phy1: phy@7411000 {
-			compatible = "qcom,msm8996-qusb2-phy";
-			reg = <0x7411000 0x180>;
-			#phy-cells = <0>;
-
-			clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
-				<&gcc GCC_RX1_USB2_CLKREF_CLK>;
-			clock-names = "cfg_ahb", "ref";
-
-			vdda-pll-supply = <&pm8994_l12>;
-			vdda-phy-dpdm-supply = <&pm8994_l24>;
-
-			resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
-			nvmem-cells = <&qusb2p_hstx_trim>;
-			status = "disabled";
-		};
-
-		hsusb_phy2: phy@7412000 {
-			compatible = "qcom,msm8996-qusb2-phy";
-			reg = <0x7412000 0x180>;
-			#phy-cells = <0>;
-
-			clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
-				<&gcc GCC_RX2_USB2_CLKREF_CLK>;
-			clock-names = "cfg_ahb", "ref";
-
-			vdda-pll-supply = <&pm8994_l12>;
-			vdda-phy-dpdm-supply = <&pm8994_l24>;
-
-			resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
-			nvmem-cells = <&qusb2s_hstx_trim>;
-			status = "disabled";
-		};
-
-		usb2: usb@76f8800 {
-			compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
-			reg = <0x76f8800 0x400>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges;
-
-			clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
-				<&gcc GCC_USB20_MASTER_CLK>,
-				<&gcc GCC_USB20_MOCK_UTMI_CLK>,
-				<&gcc GCC_USB20_SLEEP_CLK>,
-				<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
-
-			assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
-					  <&gcc GCC_USB20_MASTER_CLK>;
-			assigned-clock-rates = <19200000>, <60000000>;
-
-			power-domains = <&gcc USB30_GDSC>;
-			status = "disabled";
-
-			dwc3@7600000 {
-				compatible = "snps,dwc3";
-				reg = <0x7600000 0xcc00>;
-				interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
-				phys = <&hsusb_phy2>;
-				phy-names = "usb2-phy";
-			};
-		};
-
-		usb3: usb@6af8800 {
-			compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
-			reg = <0x6af8800 0x400>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges;
-
-			clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
-				<&gcc GCC_USB30_MASTER_CLK>,
-				<&gcc GCC_AGGRE2_USB3_AXI_CLK>,
-				<&gcc GCC_USB30_MOCK_UTMI_CLK>,
-				<&gcc GCC_USB30_SLEEP_CLK>,
-				<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
-
-			assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
-					  <&gcc GCC_USB30_MASTER_CLK>;
-			assigned-clock-rates = <19200000>, <120000000>;
-
-			power-domains = <&gcc USB30_GDSC>;
-			status = "disabled";
-
-			dwc3@6a00000 {
-				compatible = "snps,dwc3";
-				reg = <0x6a00000 0xcc00>;
-				interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
-				phys = <&hsusb_phy1>, <&ssusb_phy_0>;
-				phy-names = "usb2-phy", "usb3-phy";
-			};
-		};
-
-		vfe_smmu: iommu@da0000 {
-			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
-			reg = <0xda0000 0x10000>;
-
-			#global-interrupts = <1>;
-			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
-			power-domains = <&mmcc MMAGIC_CAMSS_GDSC>;
-			clocks = <&mmcc SMMU_VFE_AHB_CLK>,
-				 <&mmcc SMMU_VFE_AXI_CLK>;
-			clock-names = "iface",
-				      "bus";
-			#iommu-cells = <1>;
-		};
-
-		camss: camss@a00000 {
-			compatible = "qcom,msm8996-camss";
-			reg = <0xa34000 0x1000>,
-				<0xa00030 0x4>,
-				<0xa35000 0x1000>,
-				<0xa00038 0x4>,
-				<0xa36000 0x1000>,
-				<0xa00040 0x4>,
-				<0xa30000 0x100>,
-				<0xa30400 0x100>,
-				<0xa30800 0x100>,
-				<0xa30c00 0x100>,
-				<0xa31000 0x500>,
-				<0xa00020 0x10>,
-				<0xa10000 0x1000>,
-				<0xa14000 0x1000>;
-			reg-names = "csiphy0",
-				"csiphy0_clk_mux",
-				"csiphy1",
-				"csiphy1_clk_mux",
-				"csiphy2",
-				"csiphy2_clk_mux",
-				"csid0",
-				"csid1",
-				"csid2",
-				"csid3",
-				"ispif",
-				"csi_clk_mux",
-				"vfe0",
-				"vfe1";
-			interrupts = <GIC_SPI 78 0>,
-				<GIC_SPI 79 0>,
-				<GIC_SPI 80 0>,
-				<GIC_SPI 296 0>,
-				<GIC_SPI 297 0>,
-				<GIC_SPI 298 0>,
-				<GIC_SPI 299 0>,
-				<GIC_SPI 309 0>,
-				<GIC_SPI 314 0>,
-				<GIC_SPI 315 0>;
-			interrupt-names = "csiphy0",
-				"csiphy1",
-				"csiphy2",
-				"csid0",
-				"csid1",
-				"csid2",
-				"csid3",
-				"ispif",
-				"vfe0",
-				"vfe1";
-			power-domains = <&mmcc VFE0_GDSC>;
-			clocks = <&mmcc CAMSS_TOP_AHB_CLK>,
-				<&mmcc CAMSS_ISPIF_AHB_CLK>,
-				<&mmcc CAMSS_CSI0PHYTIMER_CLK>,
-				<&mmcc CAMSS_CSI1PHYTIMER_CLK>,
-				<&mmcc CAMSS_CSI2PHYTIMER_CLK>,
-				<&mmcc CAMSS_CSI0_AHB_CLK>,
-				<&mmcc CAMSS_CSI0_CLK>,
-				<&mmcc CAMSS_CSI0PHY_CLK>,
-				<&mmcc CAMSS_CSI0PIX_CLK>,
-				<&mmcc CAMSS_CSI0RDI_CLK>,
-				<&mmcc CAMSS_CSI1_AHB_CLK>,
-				<&mmcc CAMSS_CSI1_CLK>,
-				<&mmcc CAMSS_CSI1PHY_CLK>,
-				<&mmcc CAMSS_CSI1PIX_CLK>,
-				<&mmcc CAMSS_CSI1RDI_CLK>,
-				<&mmcc CAMSS_CSI2_AHB_CLK>,
-				<&mmcc CAMSS_CSI2_CLK>,
-				<&mmcc CAMSS_CSI2PHY_CLK>,
-				<&mmcc CAMSS_CSI2PIX_CLK>,
-				<&mmcc CAMSS_CSI2RDI_CLK>,
-				<&mmcc CAMSS_CSI3_AHB_CLK>,
-				<&mmcc CAMSS_CSI3_CLK>,
-				<&mmcc CAMSS_CSI3PHY_CLK>,
-				<&mmcc CAMSS_CSI3PIX_CLK>,
-				<&mmcc CAMSS_CSI3RDI_CLK>,
-				<&mmcc CAMSS_AHB_CLK>,
-				<&mmcc CAMSS_VFE0_CLK>,
-				<&mmcc CAMSS_CSI_VFE0_CLK>,
-				<&mmcc CAMSS_VFE0_AHB_CLK>,
-				<&mmcc CAMSS_VFE0_STREAM_CLK>,
-				<&mmcc CAMSS_VFE1_CLK>,
-				<&mmcc CAMSS_CSI_VFE1_CLK>,
-				<&mmcc CAMSS_VFE1_AHB_CLK>,
-				<&mmcc CAMSS_VFE1_STREAM_CLK>,
-				<&mmcc CAMSS_VFE_AHB_CLK>,
-				<&mmcc CAMSS_VFE_AXI_CLK>;
-			clock-names = "top_ahb",
-				"ispif_ahb",
-				"csiphy0_timer",
-				"csiphy1_timer",
-				"csiphy2_timer",
-				"csi0_ahb",
-				"csi0",
-				"csi0_phy",
-				"csi0_pix",
-				"csi0_rdi",
-				"csi1_ahb",
-				"csi1",
-				"csi1_phy",
-				"csi1_pix",
-				"csi1_rdi",
-				"csi2_ahb",
-				"csi2",
-				"csi2_phy",
-				"csi2_pix",
-				"csi2_rdi",
-				"csi3_ahb",
-				"csi3",
-				"csi3_phy",
-				"csi3_pix",
-				"csi3_rdi",
-				"ahb",
-				"vfe0",
-				"csi_vfe0",
-				"vfe0_ahb",
-				"vfe0_stream",
-				"vfe1",
-				"csi_vfe1",
-				"vfe1_ahb",
-				"vfe1_stream",
-				"vfe_ahb",
-				"vfe_axi";
-			vdda-supply = <&pm8994_l2>;
-			iommus = <&vfe_smmu 0>,
-				 <&vfe_smmu 1>,
-				 <&vfe_smmu 2>,
-				 <&vfe_smmu 3>;
-			status = "disabled";
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-			};
-		};
-
-		adreno_smmu: iommu@b40000 {
-			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
-			reg = <0xb40000 0x10000>;
-
-			#global-interrupts = <1>;
-			interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>;
-			#iommu-cells = <1>;
-
-			clocks = <&mmcc GPU_AHB_CLK>,
-				 <&gcc GCC_MMSS_BIMC_GFX_CLK>;
-			clock-names = "iface", "bus";
-
-			power-domains = <&mmcc GPU_GDSC>;
-		};
-
-		mdp_smmu: iommu@d00000 {
-			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
-			reg = <0xd00000 0x10000>;
-
-			#global-interrupts = <1>;
-			interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
-			#iommu-cells = <1>;
-			clocks = <&mmcc SMMU_MDP_AHB_CLK>,
-				 <&mmcc SMMU_MDP_AXI_CLK>;
-			clock-names = "iface", "bus";
-
-			power-domains = <&mmcc MDSS_GDSC>;
-		};
-
-		lpass_q6_smmu: iommu@1600000 {
-			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
-			reg = <0x1600000 0x20000>;
-			#iommu-cells = <1>;
-			power-domains = <&gcc HLOS1_VOTE_LPASS_CORE_GDSC>;
-
-			#global-interrupts = <1>;
-			interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
-		                <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>;
-
-			clocks = <&gcc GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK>,
-				 <&gcc GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>;
-			clock-names = "iface", "bus";
-		};
-
-		agnoc@0 {
-			power-domains = <&gcc AGGRE0_NOC_GDSC>;
-			compatible = "simple-pm-bus";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges;
-
-			pcie0: pcie@600000 {
-				compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
-				status = "disabled";
-				power-domains = <&gcc PCIE0_GDSC>;
-				bus-range = <0x00 0xff>;
-				num-lanes = <1>;
-
-				reg = <0x00600000 0x2000>,
-				      <0x0c000000 0xf1d>,
-				      <0x0c000f20 0xa8>,
-				      <0x0c100000 0x100000>;
-				reg-names = "parf", "dbi", "elbi","config";
-
-				phys = <&pciephy_0>;
-				phy-names = "pciephy";
-
-				#address-cells = <3>;
-				#size-cells = <2>;
-				ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
-					<0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
-
-				interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
-				interrupt-names = "msi";
-				#interrupt-cells = <1>;
-				interrupt-map-mask = <0 0 0 0x7>;
-				interrupt-map = <0 0 0 1 &intc 0 244 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
-						<0 0 0 2 &intc 0 245 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
-						<0 0 0 3 &intc 0 247 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
-						<0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
-
-				pinctrl-names = "default", "sleep";
-				pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
-				pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
-
-
-				vdda-supply = <&pm8994_l28>;
-
-				linux,pci-domain = <0>;
-
-				clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
-					<&gcc GCC_PCIE_0_AUX_CLK>,
-					<&gcc GCC_PCIE_0_CFG_AHB_CLK>,
-					<&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
-					<&gcc GCC_PCIE_0_SLV_AXI_CLK>;
-
-				clock-names =  "pipe",
-						"aux",
-						"cfg",
-						"bus_master",
-						"bus_slave";
-
-			};
-
-			pcie1: pcie@608000 {
-				compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
-				power-domains = <&gcc PCIE1_GDSC>;
-				bus-range = <0x00 0xff>;
-				num-lanes = <1>;
-
-				status  = "disabled";
-
-				reg = <0x00608000 0x2000>,
-				      <0x0d000000 0xf1d>,
-				      <0x0d000f20 0xa8>,
-				      <0x0d100000 0x100000>;
-
-				reg-names = "parf", "dbi", "elbi","config";
-
-				phys = <&pciephy_1>;
-				phy-names = "pciephy";
-
-				#address-cells = <3>;
-				#size-cells = <2>;
-				ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
-					<0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
-
-				interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
-				interrupt-names = "msi";
-				#interrupt-cells = <1>;
-				interrupt-map-mask = <0 0 0 0x7>;
-				interrupt-map = <0 0 0 1 &intc 0 272 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
-						<0 0 0 2 &intc 0 273 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
-						<0 0 0 3 &intc 0 274 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
-						<0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
-
-				pinctrl-names = "default", "sleep";
-				pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
-				pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
-
-
-				vdda-supply = <&pm8994_l28>;
-				linux,pci-domain = <1>;
-
-				clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
-					<&gcc GCC_PCIE_1_AUX_CLK>,
-					<&gcc GCC_PCIE_1_CFG_AHB_CLK>,
-					<&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
-					<&gcc GCC_PCIE_1_SLV_AXI_CLK>;
-
-				clock-names =  "pipe",
-						"aux",
-						"cfg",
-						"bus_master",
-						"bus_slave";
-			};
-
-			pcie2: pcie@610000 {
-				compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
-				power-domains = <&gcc PCIE2_GDSC>;
-				bus-range = <0x00 0xff>;
-				num-lanes = <1>;
-				status = "disabled";
-				reg = <0x00610000 0x2000>,
-				      <0x0e000000 0xf1d>,
-				      <0x0e000f20 0xa8>,
-				      <0x0e100000 0x100000>;
-
-				reg-names = "parf", "dbi", "elbi","config";
-
-				phys = <&pciephy_2>;
-				phy-names = "pciephy";
-
-				#address-cells = <3>;
-				#size-cells = <2>;
-				ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
-					<0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
-
-				device_type = "pci";
-
-				interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
-				interrupt-names = "msi";
-				#interrupt-cells = <1>;
-				interrupt-map-mask = <0 0 0 0x7>;
-				interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
-						<0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
-						<0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
-						<0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
-
-				pinctrl-names = "default", "sleep";
-				pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
-				pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
-
-				vdda-supply = <&pm8994_l28>;
-
-				linux,pci-domain = <2>;
-				clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
-					<&gcc GCC_PCIE_2_AUX_CLK>,
-					<&gcc GCC_PCIE_2_CFG_AHB_CLK>,
-					<&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
-					<&gcc GCC_PCIE_2_SLV_AXI_CLK>;
-
-				clock-names =  "pipe",
-						"aux",
-						"cfg",
-						"bus_master",
-						"bus_slave";
-			};
-		};
-
-		slimbam:dma@9184000
-		{
-			compatible = "qcom,bam-v1.7.0";
-			qcom,controlled-remotely;
-			reg = <0x9184000 0x32000>;
-			num-channels  = <31>;
-			interrupts = <0 164 IRQ_TYPE_LEVEL_HIGH>;
-			#dma-cells = <1>;
-			qcom,ee = <1>;
-			qcom,num-ees = <2>;
-		};
-
-		slim_msm: slim@91c0000 {
-			compatible = "qcom,slim-ngd-v1.5.0";
-			reg = <0x91c0000 0x2C000>;
-			reg-names = "ctrl";
-			interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
-			dmas =	<&slimbam 3>, <&slimbam 4>,
-				<&slimbam 5>, <&slimbam 6>;
-			dma-names = "rx", "tx", "tx2", "rx2";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			ngd@1 {
-				reg = <1>;
-				#address-cells = <1>;
-				#size-cells = <1>;
-
-				tasha_ifd: tas-ifd {
-					compatible = "slim217,1a0";
-					reg  = <0 0>;
-				};
-
-				wcd9335: codec@1{
-					pinctrl-0 = <&cdc_reset_active &wcd_intr_default>;
-					pinctrl-names = "default";
-
-					compatible = "slim217,1a0";
-					reg  = <1 0>;
-
-					interrupt-parent = <&msmgpio>;
-					interrupts = <54 IRQ_TYPE_LEVEL_HIGH>,
-						     <53 IRQ_TYPE_LEVEL_HIGH>;
-					interrupt-names  = "intr1", "intr2";
-					interrupt-controller;
-					#interrupt-cells = <1>;
-					reset-gpios = <&msmgpio 64 0>;
-
-					slim-ifc-dev  = <&tasha_ifd>;
-
-					vdd-buck-supply = <&pm8994_s4>;
-					vdd-buck-sido-supply = <&pm8994_s4>;
-					vdd-tx-supply = <&pm8994_s4>;
-					vdd-rx-supply = <&pm8994_s4>;
-					vdd-io-supply = <&pm8994_s4>;
-
-					#sound-dai-cells = <1>;
-				};
-			};
-		};
-
-		gpu@b00000 {
-			compatible = "qcom,adreno-530.2", "qcom,adreno";
-			#stream-id-cells = <16>;
-
-			reg = <0xb00000 0x3f000>;
-			reg-names = "kgsl_3d0_reg_memory";
-
-			interrupts = <0 300 IRQ_TYPE_LEVEL_HIGH>;
-
-			clocks = <&mmcc GPU_GX_GFX3D_CLK>,
-				<&mmcc GPU_AHB_CLK>,
-				<&mmcc GPU_GX_RBBMTIMER_CLK>,
-				<&gcc GCC_BIMC_GFX_CLK>,
-				<&gcc GCC_MMSS_BIMC_GFX_CLK>;
-
-			clock-names = "core",
-				"iface",
-				"rbbmtimer",
-				"mem",
-				"mem_iface";
-
-			power-domains = <&mmcc GPU_GDSC>;
-			iommus = <&adreno_smmu 0>;
-
-			nvmem-cells = <&gpu_speed_bin>;
-			nvmem-cell-names = "speed_bin";
-
-			qcom,gpu-quirk-two-pass-use-wfi;
-			qcom,gpu-quirk-fault-detect-mask;
-
-			operating-points-v2 = <&gpu_opp_table>;
-
-			gpu_opp_table: opp-table {
-				compatible  ="operating-points-v2";
-
-				/*
-				 * 624Mhz and 560Mhz are only available on speed
-				 * bin (1 << 0). All the rest are available on
-				 * all bins of the hardware
-				 */
-				opp-624000000 {
-					opp-hz = /bits/ 64 <624000000>;
-					opp-supported-hw = <0x01>;
-				};
-				opp-560000000 {
-					opp-hz = /bits/ 64 <560000000>;
-					opp-supported-hw = <0x01>;
-				};
-				opp-510000000 {
-					opp-hz = /bits/ 64 <510000000>;
-					opp-supported-hw = <0xFF>;
-				};
-				opp-401800000 {
-					opp-hz = /bits/ 64 <401800000>;
-					opp-supported-hw = <0xFF>;
-				};
-				opp-315000000 {
-					opp-hz = /bits/ 64 <315000000>;
-					opp-supported-hw = <0xFF>;
-				};
-				opp-214000000 {
-					opp-hz = /bits/ 64 <214000000>;
-					opp-supported-hw = <0xFF>;
-				};
-				opp-133000000 {
-					opp-hz = /bits/ 64 <133000000>;
-					opp-supported-hw = <0xFF>;
-				};
-			};
-
-			zap-shader {
-				memory-region = <&zap_shader_region>;
-			};
-		};
-
-		mdss: mdss@900000 {
-			compatible = "qcom,mdss";
-
-			reg = <0x900000 0x1000>,
-			      <0x9b0000 0x1040>,
-			      <0x9b8000 0x1040>;
-			reg-names = "mdss_phys",
-				    "vbif_phys",
-				    "vbif_nrt_phys";
-
-			power-domains = <&mmcc MDSS_GDSC>;
-			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
-
-			interrupt-controller;
-			#interrupt-cells = <1>;
-
-			clocks = <&mmcc MDSS_AHB_CLK>;
-			clock-names = "iface";
-
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges;
-
-			mdp: mdp@901000 {
-				compatible = "qcom,mdp5";
-				reg = <0x901000 0x90000>;
-				reg-names = "mdp_phys";
-
-				interrupt-parent = <&mdss>;
-				interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
-
-				clocks = <&mmcc MDSS_AHB_CLK>,
-					 <&mmcc MDSS_AXI_CLK>,
-					 <&mmcc MDSS_MDP_CLK>,
-					 <&mmcc SMMU_MDP_AXI_CLK>,
-					 <&mmcc MDSS_VSYNC_CLK>;
-				clock-names = "iface",
-					      "bus",
-					      "core",
-					      "iommu",
-					      "vsync";
-
-				iommus = <&mdp_smmu 0>;
-
-				ports {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					port@0 {
-						reg = <0>;
-						mdp5_intf3_out: endpoint {
-							remote-endpoint = <&hdmi_in>;
-						};
-					};
-				};
-			};
-
-			hdmi: hdmi-tx@9a0000 {
-				compatible = "qcom,hdmi-tx-8996";
-				reg =	<0x009a0000 0x50c>,
-					<0x00070000 0x6158>,
-					<0x009e0000 0xfff>;
-				reg-names = "core_physical",
-					    "qfprom_physical",
-					    "hdcp_physical";
-
-				interrupt-parent = <&mdss>;
-				interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
-
-				clocks = <&mmcc MDSS_MDP_CLK>,
-					 <&mmcc MDSS_AHB_CLK>,
-					 <&mmcc MDSS_HDMI_CLK>,
-					 <&mmcc MDSS_HDMI_AHB_CLK>,
-					 <&mmcc MDSS_EXTPCLK_CLK>;
-				clock-names =
-					"mdp_core",
-					"iface",
-					"core",
-					"alt_iface",
-					"extp";
-
-				phys = <&hdmi_phy>;
-				phy-names = "hdmi_phy";
-				#sound-dai-cells = <1>;
-
-				ports {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					port@0 {
-						reg = <0>;
-						hdmi_in: endpoint {
-							remote-endpoint = <&mdp5_intf3_out>;
-						};
-					};
-				};
-			};
-
-			hdmi_phy: hdmi-phy@9a0600 {
-				#phy-cells = <0>;
-				compatible = "qcom,hdmi-phy-8996";
-				reg = <0x9a0600 0x1c4>,
-				      <0x9a0a00 0x124>,
-				      <0x9a0c00 0x124>,
-				      <0x9a0e00 0x124>,
-				      <0x9a1000 0x124>,
-				      <0x9a1200 0x0c8>;
-				reg-names = "hdmi_pll",
-					    "hdmi_tx_l0",
-					    "hdmi_tx_l1",
-					    "hdmi_tx_l2",
-					    "hdmi_tx_l3",
-					    "hdmi_phy";
-
-				clocks = <&mmcc MDSS_AHB_CLK>,
-					 <&gcc GCC_HDMI_CLKREF_CLK>;
-				clock-names = "iface",
-					      "ref";
-			};
-		};
-
-		venus_smmu: arm,smmu-venus@d40000 {
-			compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
-			reg = <0xd40000 0x20000>;
-			#global-interrupts = <1>;
-			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
-			power-domains = <&mmcc MMAGIC_VIDEO_GDSC>;
-			clocks = <&mmcc SMMU_VIDEO_AHB_CLK>,
-				 <&mmcc SMMU_VIDEO_AXI_CLK>;
-			clock-names = "iface", "bus";
-			#iommu-cells = <1>;
-			status = "okay";
-		};
-
-		video-codec@c00000 {
-			compatible = "qcom,msm8996-venus";
-			reg = <0x00c00000 0xff000>;
-			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
-			power-domains = <&mmcc VENUS_GDSC>;
-			clocks = <&mmcc VIDEO_CORE_CLK>,
-				 <&mmcc VIDEO_AHB_CLK>,
-				 <&mmcc VIDEO_AXI_CLK>,
-				 <&mmcc VIDEO_MAXI_CLK>;
-			clock-names = "core", "iface", "bus", "mbus";
-			iommus = <&venus_smmu 0x00>,
-				 <&venus_smmu 0x01>,
-				 <&venus_smmu 0x0a>,
-				 <&venus_smmu 0x07>,
-				 <&venus_smmu 0x0e>,
-				 <&venus_smmu 0x0f>,
-				 <&venus_smmu 0x08>,
-				 <&venus_smmu 0x09>,
-				 <&venus_smmu 0x0b>,
-				 <&venus_smmu 0x0c>,
-				 <&venus_smmu 0x0d>,
-				 <&venus_smmu 0x10>,
-				 <&venus_smmu 0x11>,
-				 <&venus_smmu 0x21>,
-				 <&venus_smmu 0x28>,
-				 <&venus_smmu 0x29>,
-				 <&venus_smmu 0x2b>,
-				 <&venus_smmu 0x2c>,
-				 <&venus_smmu 0x2d>,
-				 <&venus_smmu 0x31>;
-			memory-region = <&venus_region>;
-			status = "okay";
-
-			video-decoder {
-				compatible = "venus-decoder";
-				clocks = <&mmcc VIDEO_SUBCORE0_CLK>;
-				clock-names = "core";
-				power-domains = <&mmcc VENUS_CORE0_GDSC>;
-			};
-
-			video-encoder {
-				compatible = "venus-encoder";
-				clocks = <&mmcc VIDEO_SUBCORE1_CLK>;
-				clock-names = "core";
-				power-domains = <&mmcc VENUS_CORE1_GDSC>;
-			};
+			#redistributor-regions = <1>;
+			redistributor-stride = <0x0 0x40000>;
+			reg = <0x09bc0000 0x10000>,
+			      <0x09c00000 0x100000>;
+			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
 		};
 	};
 
 	sound: sound {
 	};
 
-	adsp-pil {
-		compatible = "qcom,msm8996-adsp-pil";
+	thermal-zones {
+		cpu0-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
 
-		interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
-				      <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
-				      <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
-				      <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
-				      <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
-		interrupt-names = "wdog", "fatal", "ready",
-				  "handover", "stop-ack";
+			thermal-sensors = <&tsens0 3>;
 
-		clocks = <&xo_board>;
-		clock-names = "xo";
-
-		memory-region = <&adsp_region>;
-
-		qcom,smem-states = <&adsp_smp2p_out 0>;
-		qcom,smem-state-names = "stop";
-
-		smd-edge {
-			interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
-
-			label = "lpass";
-			mboxes = <&apcs_glb 8>;
-			qcom,smd-edge = <1>;
-			qcom,remote-pid = <2>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			apr {
-				power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
-				compatible = "qcom,apr-v2";
-				qcom,smd-channels = "apr_audio_svc";
-				qcom,apr-domain = <APR_DOMAIN_ADSP>;
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				q6core {
-					reg = <APR_SVC_ADSP_CORE>;
-					compatible = "qcom,q6core";
+			trips {
+				cpu0_alert0: trip-point@0 {
+					temperature = <75000>;
+					hysteresis = <2000>;
+					type = "passive";
 				};
 
-				q6afe: q6afe {
-					compatible = "qcom,q6afe";
-					reg = <APR_SVC_AFE>;
-					q6afedai: dais {
-						compatible = "qcom,q6afe-dais";
-						#address-cells = <1>;
-						#size-cells = <0>;
-						#sound-dai-cells = <1>;
-						hdmi@1 {
-							reg = <1>;
-						};
-					};
-				};
-
-				q6asm: q6asm {
-					compatible = "qcom,q6asm";
-					reg = <APR_SVC_ASM>;
-					q6asmdai: dais {
-						compatible = "qcom,q6asm-dais";
-						#sound-dai-cells = <1>;
-						iommus = <&lpass_q6_smmu 1>;
-					};
-				};
-
-				q6adm: q6adm {
-					compatible = "qcom,q6adm";
-					reg = <APR_SVC_ADM>;
-					q6routing: routing {
-						compatible = "qcom,q6adm-routing";
-						#sound-dai-cells = <0>;
-					};
+				cpu0_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <2000>;
+					type = "critical";
 				};
 			};
+		};
 
+		cpu1-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 5>;
+
+			trips {
+				cpu1_alert0: trip-point@0 {
+					temperature = <75000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu1_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <2000>;
+					type = "critical";
+				};
+			};
+		};
+
+		cpu2-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 8>;
+
+			trips {
+				cpu2_alert0: trip-point@0 {
+					temperature = <75000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu2_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <2000>;
+					type = "critical";
+				};
+			};
+		};
+
+		cpu3-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 10>;
+
+			trips {
+				cpu3_alert0: trip-point@0 {
+					temperature = <75000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu3_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <2000>;
+					type = "critical";
+				};
+			};
+		};
+
+		gpu-thermal-top {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 6>;
+
+			trips {
+				gpu1_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		gpu-thermal-bottom {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 7>;
+
+			trips {
+				gpu2_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		m4m-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 1>;
+
+			trips {
+				m4m_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		l3-or-venus-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 2>;
+
+			trips {
+				l3_or_venus_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		cluster0-l2-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 7>;
+
+			trips {
+				cluster0_l2_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		cluster1-l2-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 12>;
+
+			trips {
+				cluster1_l2_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		camera-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 1>;
+
+			trips {
+				camera_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		q6-dsp-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 2>;
+
+			trips {
+				q6_dsp_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		mem-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 3>;
+
+			trips {
+				mem_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		modemtx-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 4>;
+
+			trips {
+				modemtx_alert0: trip-point@0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
 		};
 	};
 
-	adsp-smp2p {
-		compatible = "qcom,smp2p";
-		qcom,smem = <443>, <429>;
-
-		interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
-
-		mboxes = <&apcs_glb 10>;
-
-		qcom,local-pid = <0>;
-		qcom,remote-pid = <2>;
-
-		adsp_smp2p_out: master-kernel {
-			qcom,entry-name = "master-kernel";
-			#qcom,smem-state-cells = <1>;
-		};
-
-		adsp_smp2p_in: slave-kernel {
-			qcom,entry-name = "slave-kernel";
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
 	};
-
-	modem-smp2p {
-		compatible = "qcom,smp2p";
-		qcom,smem = <435>, <428>;
-
-		interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
-
-		mboxes = <&apcs_glb 14>;
-
-		qcom,local-pid = <0>;
-		qcom,remote-pid = <1>;
-
-		modem_smp2p_out: master-kernel {
-			qcom,entry-name = "master-kernel";
-			#qcom,smem-state-cells = <1>;
-		};
-
-		modem_smp2p_in: slave-kernel {
-			qcom,entry-name = "slave-kernel";
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-	};
-
-	smp2p-slpi {
-		compatible = "qcom,smp2p";
-		qcom,smem = <481>, <430>;
-
-		interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
-
-		mboxes = <&apcs_glb 26>;
-
-		qcom,local-pid = <0>;
-		qcom,remote-pid = <3>;
-
-		slpi_smp2p_in: slave-kernel {
-			qcom,entry-name = "slave-kernel";
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		slpi_smp2p_out: master-kernel {
-			qcom,entry-name = "master-kernel";
-			#qcom,smem-state-cells = <1>;
-		};
-	};
-
 };
 #include "msm8996-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
index 6138b58..6ab830d 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
@@ -74,6 +74,23 @@ &CPU7 {
 	cpu-idle-states = <&BIG_CPU_SLEEP_1>;
 };
 
+&pm8005_lsid1 {
+	pm8005-regulators {
+		compatible = "qcom,pm8005-regulators";
+
+		vdd_s1-supply = <&vph_pwr>;
+
+		pm8005_s1: s1 { /* VDD_GFX supply */
+			regulator-min-microvolt = <524000>;
+			regulator-max-microvolt = <1100000>;
+			regulator-enable-ramp-delay = <500>;
+
+			/* hack until we rig up the gpu consumer */
+			regulator-always-on;
+		};
+	};
+};
+
 &qusb2phy {
 	status = "okay";
 
@@ -292,3 +309,35 @@ &usb3phy {
 	vdda-phy-supply = <&vreg_l1a_0p875>;
 	vdda-pll-supply = <&vreg_l2a_1p2>;
 };
+
+&wifi {
+	status = "okay";
+
+	vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
+	vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+	vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+	vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+};
+
+/* PINCTRL - board-specific pinctrl */
+&blsp1_uart3_on {
+	rx {
+		/delete-property/ bias-disable;
+		/*
+		 * Configure a pull-up on 45 (RX). This is needed to
+		 * avoid garbage data when the TX pin of the Bluetooth
+		 * module is in tri-state (module powered off or not
+		 * driving the signal yet).
+		 */
+		bias-pull-up;
+	};
+
+	cts {
+		/delete-property/ bias-disable;
+		/*
+		 * Configure a pull-down on 47 (CTS) to match the pull
+		 * of the Bluetooth module.
+		 */
+		bias-pull-down;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index 5f101a2..0e0b9bc 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -9,6 +9,7 @@
 / {
 	aliases {
 		serial0 = &blsp2_uart1;
+		serial1 = &blsp1_uart3;
 	};
 
 	chosen {
@@ -311,6 +312,14 @@ vreg_bob: bob {
 	};
 };
 
+&remoteproc_adsp {
+	status = "okay";
+};
+
+&remoteproc_slpi {
+	status = "okay";
+};
+
 &tlmm {
 	gpio-reserved-ranges = <0 4>, <81 4>;
 };
@@ -364,3 +373,35 @@ &usb3phy {
 	vdda-phy-supply = <&vreg_l1a_0p875>;
 	vdda-pll-supply = <&vreg_l2a_1p2>;
 };
+
+&wifi {
+	status = "okay";
+
+	vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
+	vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+	vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+	vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+};
+
+/* PINCTRL - board-specific pinctrl */
+&blsp1_uart3_on {
+	rx {
+		/delete-property/ bias-disable;
+		/*
+		 * Configure a pull-up on 45 (RX). This is needed to
+		 * avoid garbage data when the TX pin of the Bluetooth
+		 * module is in tri-state (module powered off or not
+		 * driving the signal yet).
+		 */
+		bias-pull-up;
+	};
+
+	cts {
+		/delete-property/ bias-disable;
+		/*
+		 * Configure a pull-down on 47 (CTS) to match the pull
+		 * of the Bluetooth module.
+		 */
+		bias-pull-down;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8998-pins.dtsi
index e32d3ab..7c222cb 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-pins.dtsi
@@ -77,13 +77,30 @@ config {
 	};
 
 	blsp1_uart3_on: blsp1_uart3_on {
-		mux {
-			pins = "gpio45", "gpio46", "gpio47", "gpio48";
+		tx {
+			pins = "gpio45";
 			function = "blsp_uart3_a";
+			drive-strength = <2>;
+			bias-disable;
 		};
 
-		config {
-			pins = "gpio45", "gpio46", "gpio47", "gpio48";
+		rx {
+			pins = "gpio46";
+			function = "blsp_uart3_a";
+			drive-strength = <2>;
+			bias-disable;
+		};
+
+		cts {
+			pins = "gpio47";
+			function = "blsp_uart3_a";
+			drive-strength = <2>;
+			bias-disable;
+		};
+
+		rfr {
+			pins = "gpio48";
+			function = "blsp_uart3_a";
 			drive-strength = <2>;
 			bias-disable;
 		};
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index fc7838ea..91f7f2d 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -3,6 +3,7 @@
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/qcom,gcc-msm8998.h>
+#include <dt-bindings/clock/qcom,gpucc-msm8998.h>
 #include <dt-bindings/clock/qcom,rpmcc.h>
 #include <dt-bindings/power/qcom-rpmpd.h>
 #include <dt-bindings/gpio/gpio.h>
@@ -28,8 +29,13 @@ reserved-memory {
 		#size-cells = <2>;
 		ranges;
 
-		memory@85800000 {
-			reg = <0x0 0x85800000 0x0 0x800000>;
+		hyp_mem: memory@85800000 {
+			reg = <0x0 0x85800000 0x0 0x600000>;
+			no-map;
+		};
+
+		xbl_mem: memory@85e00000 {
+			reg = <0x0 0x85e00000 0x0 0x100000>;
 			no-map;
 		};
 
@@ -38,21 +44,69 @@ smem_mem: smem-mem@86000000 {
 			no-map;
 		};
 
-		memory@86200000 {
+		tz_mem: memory@86200000 {
 			reg = <0x0 0x86200000 0x0 0x2d00000>;
 			no-map;
 		};
 
-		rmtfs {
+		rmtfs_mem: memory@88f00000 {
 			compatible = "qcom,rmtfs-mem";
-
-			size = <0x0 0x200000>;
-			alloc-ranges = <0x0 0xa0000000 0x0 0x2000000>;
+			reg = <0x0 0x88f00000 0x0 0x200000>;
 			no-map;
 
 			qcom,client-id = <1>;
 			qcom,vmid = <15>;
 		};
+
+		spss_mem: memory@8ab00000 {
+			reg = <0x0 0x8ab00000 0x0 0x700000>;
+			no-map;
+		};
+
+		adsp_mem: memory@8b200000 {
+			reg = <0x0 0x8b200000 0x0 0x1a00000>;
+			no-map;
+		};
+
+		mpss_mem: memory@8cc00000 {
+			reg = <0x0 0x8cc00000 0x0 0x7000000>;
+			no-map;
+		};
+
+		venus_mem: memory@93c00000 {
+			reg = <0x0 0x93c00000 0x0 0x500000>;
+			no-map;
+		};
+
+		mba_mem: memory@94100000 {
+			reg = <0x0 0x94100000 0x0 0x200000>;
+			no-map;
+		};
+
+		slpi_mem: memory@94300000 {
+			reg = <0x0 0x94300000 0x0 0xf00000>;
+			no-map;
+		};
+
+		ipa_fw_mem: memory@95200000 {
+			reg = <0x0 0x95200000 0x0 0x10000>;
+			no-map;
+		};
+
+		ipa_gsi_mem: memory@95210000 {
+			reg = <0x0 0x95210000 0x0 0x5000>;
+			no-map;
+		};
+
+		gpu_mem: memory@95600000 {
+			reg = <0x0 0x95600000 0x0 0x100000>;
+			no-map;
+		};
+
+		wlan_msa_mem: memory@95700000 {
+			reg = <0x0 0x95700000 0x0 0x100000>;
+			no-map;
+		};
 	};
 
 	clocks {
@@ -817,8 +871,9 @@ tsens0: thermal@10ab000 {
 			reg = <0x010ab000 0x1000>, /* TM */
 			      <0x010aa000 0x1000>; /* SROT */
 			#qcom,sensors = <14>;
-			interrupts = <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "uplow";
+			interrupts = <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow", "critical";
 			#thermal-sensor-cells = <1>;
 		};
 
@@ -827,8 +882,9 @@ tsens1: thermal@10ae000 {
 			reg = <0x010ae000 0x1000>, /* TM */
 			      <0x010ad000 0x1000>; /* SROT */
 			#qcom,sensors = <8>;
-			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "uplow";
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow", "critical";
 			#thermal-sensor-cells = <1>;
 		};
 
@@ -847,6 +903,25 @@ anoc1_smmu: iommu@1680000 {
 				<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>;
 		};
 
+		anoc2_smmu: iommu@16c0000 {
+			compatible = "qcom,msm8998-smmu-v2", "qcom,smmu-v2";
+			reg = <0x016c0000 0x40000>;
+			#iommu-cells = <1>;
+
+			#global-interrupts = <0>;
+			interrupts =
+				<GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 374 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 375 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 377 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 378 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 462 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 463 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 465 IRQ_TYPE_EDGE_RISING>;
+		};
+
 		pcie0: pci@1c00000 {
 			compatible = "qcom,pcie-msm8996";
 			reg =	<0x01c00000 0x2000>,
@@ -987,7 +1062,7 @@ ufsphy_lanes: lanes@1da7400 {
 
 		tcsr_mutex_regs: syscon@1f40000 {
 			compatible = "syscon";
-			reg = <0x01f40000 0x20000>;
+			reg = <0x01f40000 0x40000>;
 		};
 
 		tlmm: pinctrl@3400000 {
@@ -1000,6 +1075,110 @@ tlmm: pinctrl@3400000 {
 			#interrupt-cells = <0x2>;
 		};
 
+		remoteproc_mss: remoteproc@4080000 {
+			compatible = "qcom,msm8998-mss-pil";
+			reg = <0x04080000 0x100>, <0x04180000 0x20>;
+			reg-names = "qdsp6", "rmb";
+
+			interrupts-extended =
+				<&intc GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+				<&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+				<&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+				<&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+				<&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>,
+				<&modem_smp2p_in 7 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready",
+					  "handover", "stop-ack",
+					  "shutdown-ack";
+
+			clocks = <&gcc GCC_MSS_CFG_AHB_CLK>,
+				 <&gcc GCC_BIMC_MSS_Q6_AXI_CLK>,
+				 <&gcc GCC_BOOT_ROM_AHB_CLK>,
+				 <&gcc GCC_MSS_GPLL0_DIV_CLK_SRC>,
+				 <&gcc GCC_MSS_SNOC_AXI_CLK>,
+				 <&gcc GCC_MSS_MNOC_BIMC_AXI_CLK>,
+				 <&rpmcc RPM_SMD_QDSS_CLK>,
+				 <&rpmcc RPM_SMD_XO_CLK_SRC>;
+			clock-names = "iface", "bus", "mem", "gpll0_mss",
+				      "snoc_axi", "mnoc_axi", "qdss", "xo";
+
+			qcom,smem-states = <&modem_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			resets = <&gcc GCC_MSS_RESTART>;
+			reset-names = "mss_restart";
+
+			qcom,halt-regs = <&tcsr_mutex_regs 0x23000 0x25000 0x24000>;
+
+			power-domains = <&rpmpd MSM8998_VDDCX>,
+					<&rpmpd MSM8998_VDDMX>;
+			power-domain-names = "cx", "mx";
+
+			mba {
+				memory-region = <&mba_mem>;
+			};
+
+			mpss {
+				memory-region = <&mpss_mem>;
+			};
+
+			glink-edge {
+				interrupts = <GIC_SPI 452 IRQ_TYPE_EDGE_RISING>;
+				label = "modem";
+				qcom,remote-pid = <1>;
+				mboxes = <&apcs_glb 15>;
+			};
+		};
+
+		gpucc: clock-controller@5065000 {
+			compatible = "qcom,msm8998-gpucc";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			#power-domain-cells = <1>;
+			reg = <0x05065000 0x9000>;
+
+			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+				 <&gcc GPLL0_OUT_MAIN>;
+			clock-names = "xo",
+				      "gpll0";
+		};
+
+		remoteproc_slpi: remoteproc@5800000 {
+			compatible = "qcom,msm8998-slpi-pas";
+			reg = <0x05800000 0x4040>;
+
+			interrupts-extended = <&intc GIC_SPI 390 IRQ_TYPE_EDGE_RISING>,
+					      <&slpi_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&slpi_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&slpi_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&slpi_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready",
+					  "handover", "stop-ack";
+
+			px-supply = <&vreg_lvs2a_1p8>;
+
+			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+				 <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
+			clock-names = "xo", "aggre2";
+
+			memory-region = <&slpi_mem>;
+
+			qcom,smem-states = <&slpi_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			power-domains = <&rpmpd MSM8998_SSCCX>;
+			power-domain-names = "ssc_cx";
+
+			status = "disabled";
+
+			glink-edge {
+				interrupts = <GIC_SPI 179 IRQ_TYPE_EDGE_RISING>;
+				label = "dsps";
+				qcom,remote-pid = <3>;
+				mboxes = <&apcs_glb 27>;
+			};
+		};
+
 		stm: stm@6002000 {
 			compatible = "arm,coresight-stm", "arm,primecell";
 			reg = <0x06002000 0x1000>,
@@ -1792,6 +1971,39 @@ blsp2_i2c5: i2c@c1ba000 {
 			#size-cells = <0>;
 		};
 
+		remoteproc_adsp: remoteproc@17300000 {
+			compatible = "qcom,msm8998-adsp-pas";
+			reg = <0x17300000 0x4040>;
+
+			interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
+					      <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready",
+					  "handover", "stop-ack";
+
+			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>;
+			clock-names = "xo";
+
+			memory-region = <&adsp_mem>;
+
+			qcom,smem-states = <&adsp_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			power-domains = <&rpmpd MSM8998_VDDCX>;
+			power-domain-names = "cx";
+
+			status = "disabled";
+
+			glink-edge {
+				interrupts = <GIC_SPI 157 IRQ_TYPE_EDGE_RISING>;
+				label = "lpass";
+				qcom,remote-pid = <2>;
+				mboxes = <&apcs_glb 9>;
+			};
+		};
+
 		apcs_glb: mailbox@17911000 {
 			compatible = "qcom,msm8998-apcs-hmss-global";
 			reg = <0x17911000 0x1000>;
@@ -1870,6 +2082,32 @@ intc: interrupt-controller@17a00000 {
 			redistributor-stride = <0x0 0x20000>;
 			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
 		};
+
+		wifi: wifi@18800000 {
+			compatible = "qcom,wcn3990-wifi";
+			status = "disabled";
+			reg = <0x18800000 0x800000>;
+			reg-names = "membase";
+			memory-region = <&wlan_msa_mem>;
+			clocks = <&rpmcc RPM_SMD_RF_CLK2_PIN>;
+			clock-names = "cxo_ref_clk_pin";
+			interrupts =
+				<GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 415 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>;
+			iommus = <&anoc2_smmu 0x1900>,
+				 <&anoc2_smmu 0x1901>;
+			qcom,snoc-host-cap-8bit-quirk;
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi
new file mode 100644
index 0000000..2353463
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: BSD-3-Clause
+// Copyright (c) 2019, The Linux Foundation. All rights reserved.
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/thermal/thermal.h>
+
+&spmi_bus {
+	pm6150_lsid0: pmic@0 {
+		compatible = "qcom,pm6150", "qcom,spmi-pmic";
+		reg = <0x0 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		pm6150_pon: pon@800 {
+			compatible = "qcom,pm8998-pon";
+			reg = <0x800>;
+			mode-bootloader = <0x2>;
+			mode-recovery = <0x1>;
+
+			pwrkey {
+				compatible = "qcom,pm8941-pwrkey";
+				interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+				debounce = <15625>;
+				bias-pull-up;
+				linux,code = <KEY_POWER>;
+			};
+		};
+
+		pm6150_temp: temp-alarm@2400 {
+			compatible = "qcom,spmi-temp-alarm";
+			reg = <0x2400>;
+			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			io-channels = <&pm6150_adc ADC5_DIE_TEMP>;
+			io-channel-names = "thermal";
+			#thermal-sensor-cells = <0>;
+		};
+
+		pm6150_adc: adc@3100 {
+			compatible = "qcom,spmi-adc5";
+			reg = <0x3100>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#io-channel-cells = <1>;
+
+			adc-chan@6 {
+				reg = <ADC5_DIE_TEMP>;
+				label = "die_temp";
+			};
+		};
+
+		pm6150_gpio: gpios@c000 {
+			compatible = "qcom,pm6150-gpio", "qcom,spmi-gpio";
+			reg = <0xc000>;
+			gpio-controller;
+			gpio-ranges = <&pm6150_gpio 0 0 10>;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	pm6150_lsid1: pmic@1 {
+		compatible = "qcom,pm6150", "qcom,spmi-pmic";
+		reg = <0x1 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pm6150l.dtsi b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
new file mode 100644
index 0000000..f84027b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: BSD-3-Clause
+// Copyright (c) 2019, The Linux Foundation. All rights reserved.
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+	pm6150l_lsid4: pmic@4 {
+		compatible = "qcom,pm6150l", "qcom,spmi-pmic";
+		reg = <0x4 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		pm6150l_gpio: gpios@c000 {
+			compatible = "qcom,pm6150l-gpio", "qcom,spmi-gpio";
+			reg = <0xc000>;
+			gpio-controller;
+			gpio-ranges = <&pm6150l_gpio 0 0 12>;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	pm6150l_lsid5: pmic@5 {
+		compatible = "qcom,pm6150l", "qcom,spmi-pmic";
+		reg = <0x5 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8004.dtsi b/arch/arm64/boot/dts/qcom/pm8004.dtsi
index 297b57b..0abd1ab 100644
--- a/arch/arm64/boot/dts/qcom/pm8004.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8004.dtsi
@@ -4,17 +4,23 @@
 
 &spmi_bus {
 
-	pmic@4 {
+	pm8004_lsid4: pmic@4 {
 		compatible = "qcom,pm8004", "qcom,spmi-pmic";
 		reg = <0x4 SPMI_USID>;
 		#address-cells = <1>;
 		#size-cells = <0>;
+		status = "disabled";
 	};
 
-	pmic@5 {
+	pm8004_lsid5: pmic@5 {
 		compatible = "qcom,pm8004", "qcom,spmi-pmic";
 		reg = <0x5 SPMI_USID>;
 		#address-cells = <1>;
 		#size-cells = <0>;
+		status = "disabled";
+
+		pm8004_spmi_regulators: regulators {
+			compatible = "qcom,pm8004-regulators";
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index 9dd2df1..0bcdf04 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -111,6 +111,12 @@ pm8916_1: pm8916@1 {
 		#address-cells = <1>;
 		#size-cells = <0>;
 
+		pm8916_vib: vibrator@c000 {
+			compatible = "qcom,pm8916-vib";
+			reg = <0xc000>;
+			status = "disabled";
+		};
+
 		wcd_codec: codec@f000 {
 			compatible = "qcom,pm8916-wcd-analog-codec";
 			reg = <0xf000 0x200>;
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
index 76b5a3e..7e4f777 100644
--- a/arch/arm64/boot/dts/qcom/pm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8994.dtsi
@@ -85,5 +85,9 @@ pmic@1 {
 		reg = <0x1 SPMI_USID>;
 		#address-cells = <1>;
 		#size-cells = <0>;
+
+		pm8994_spmi_regulators: regulators {
+			compatible = "qcom,pm8994-regulators";
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index 501a733..522d3ef 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -73,6 +73,7 @@ pms405_s3: s3 {
 		regulator-always-on;
 		regulator-boot-on;
 		regulator-name = "vdd_apc";
+		regulator-initial-mode = <1>;
 		regulator-min-microvolt = <1048000>;
 		regulator-max-microvolt = <1384000>;
 	};
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index f5f0c4c..4ee1e3d 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -42,6 +42,10 @@ CPU0: cpu@100 {
 			cpu-idle-states = <&CPU_SLEEP_0>;
 			next-level-cache = <&L2_0>;
 			#cooling-cells = <2>;
+			clocks = <&apcs_glb>;
+			operating-points-v2 = <&cpu_opp_table>;
+			power-domains = <&cpr>;
+			power-domain-names = "cpr";
 		};
 
 		CPU1: cpu@101 {
@@ -52,6 +56,10 @@ CPU1: cpu@101 {
 			cpu-idle-states = <&CPU_SLEEP_0>;
 			next-level-cache = <&L2_0>;
 			#cooling-cells = <2>;
+			clocks = <&apcs_glb>;
+			operating-points-v2 = <&cpu_opp_table>;
+			power-domains = <&cpr>;
+			power-domain-names = "cpr";
 		};
 
 		CPU2: cpu@102 {
@@ -62,6 +70,10 @@ CPU2: cpu@102 {
 			cpu-idle-states = <&CPU_SLEEP_0>;
 			next-level-cache = <&L2_0>;
 			#cooling-cells = <2>;
+			clocks = <&apcs_glb>;
+			operating-points-v2 = <&cpu_opp_table>;
+			power-domains = <&cpr>;
+			power-domain-names = "cpr";
 		};
 
 		CPU3: cpu@103 {
@@ -72,6 +84,10 @@ CPU3: cpu@103 {
 			cpu-idle-states = <&CPU_SLEEP_0>;
 			next-level-cache = <&L2_0>;
 			#cooling-cells = <2>;
+			clocks = <&apcs_glb>;
+			operating-points-v2 = <&cpu_opp_table>;
+			power-domains = <&cpr>;
+			power-domain-names = "cpr";
 		};
 
 		L2_0: l2-cache {
@@ -94,6 +110,41 @@ CPU_SLEEP_0: cpu-sleep-0 {
 		};
 	};
 
+	cpu_opp_table: cpu-opp-table {
+		compatible = "operating-points-v2-kryo-cpu";
+		opp-shared;
+
+		opp-1094400000 {
+			opp-hz = /bits/ 64 <1094400000>;
+			required-opps = <&cpr_opp1>;
+		};
+		opp-1248000000 {
+			opp-hz = /bits/ 64 <1248000000>;
+			required-opps = <&cpr_opp2>;
+		};
+		opp-1401600000 {
+			opp-hz = /bits/ 64 <1401600000>;
+			required-opps = <&cpr_opp3>;
+		};
+	};
+
+	cpr_opp_table: cpr-opp-table {
+		compatible = "operating-points-v2-qcom-level";
+
+		cpr_opp1: opp1 {
+			opp-level = <1>;
+			qcom,opp-fuse-level = <1>;
+		};
+		cpr_opp2: opp2 {
+			opp-level = <2>;
+			qcom,opp-fuse-level = <2>;
+		};
+		cpr_opp3: opp3 {
+			opp-level = <3>;
+			qcom,opp-fuse-level = <3>;
+		};
+	};
+
 	firmware {
 		scm: scm {
 			compatible = "qcom,scm-qcs404", "qcom,scm";
@@ -280,6 +331,62 @@ qfprom: qfprom@a4000 {
 			tsens_caldata: caldata@d0 {
 				reg = <0x1f8 0x14>;
 			};
+			cpr_efuse_speedbin: speedbin@13c {
+				reg = <0x13c 0x4>;
+				bits = <2 3>;
+			};
+			cpr_efuse_quot_offset1: qoffset1@231 {
+				reg = <0x231 0x4>;
+				bits = <4 7>;
+			};
+			cpr_efuse_quot_offset2: qoffset2@232 {
+				reg = <0x232 0x4>;
+				bits = <3 7>;
+			};
+			cpr_efuse_quot_offset3: qoffset3@233 {
+				reg = <0x233 0x4>;
+				bits = <2 7>;
+			};
+			cpr_efuse_init_voltage1: ivoltage1@229 {
+				reg = <0x229 0x4>;
+				bits = <4 6>;
+			};
+			cpr_efuse_init_voltage2: ivoltage2@22a {
+				reg = <0x22a 0x4>;
+				bits = <2 6>;
+			};
+			cpr_efuse_init_voltage3: ivoltage3@22b {
+				reg = <0x22b 0x4>;
+				bits = <0 6>;
+			};
+			cpr_efuse_quot1: quot1@22b {
+				reg = <0x22b 0x4>;
+				bits = <6 12>;
+			};
+			cpr_efuse_quot2: quot2@22d {
+				reg = <0x22d 0x4>;
+				bits = <2 12>;
+			};
+			cpr_efuse_quot3: quot3@230 {
+				reg = <0x230 0x4>;
+				bits = <0 12>;
+			};
+			cpr_efuse_ring1: ring1@228 {
+				reg = <0x228 0x4>;
+				bits = <0 3>;
+			};
+			cpr_efuse_ring2: ring2@228 {
+				reg = <0x228 0x4>;
+				bits = <4 3>;
+			};
+			cpr_efuse_ring3: ring3@229 {
+				reg = <0x229 0x4>;
+				bits = <0 3>;
+			};
+			cpr_efuse_revision: revision@218 {
+				reg = <0x218 0x4>;
+				bits = <3 3>;
+			};
 		};
 
 		rng: rng@e3000 {
@@ -902,14 +1009,65 @@ apcs_glb: mailbox@b011000 {
 			compatible = "qcom,qcs404-apcs-apps-global", "syscon";
 			reg = <0x0b011000 0x1000>;
 			#mbox-cells = <1>;
+			clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
+			clock-names = "pll", "aux";
+			#clock-cells = <0>;
+		};
+
+		apcs_hfpll: clock-controller@b016000 {
+			compatible = "qcom,hfpll";
+			reg = <0x0b016000 0x30>;
+			#clock-cells = <0>;
+			clock-output-names = "apcs_hfpll";
+			clocks = <&xo_board>;
+			clock-names = "xo";
 		};
 
 		watchdog@b017000 {
-			compatible = "qcom,kpss-wdt";
+			compatible = "qcom,apss-wdt-qcs404", "qcom,kpss-wdt";
 			reg = <0x0b017000 0x1000>;
 			clocks = <&sleep_clk>;
 		};
 
+		cpr: power-controller@b018000 {
+			compatible = "qcom,qcs404-cpr", "qcom,cpr";
+			reg = <0x0b018000 0x1000>;
+			interrupts = <0 15 IRQ_TYPE_EDGE_RISING>;
+			clocks = <&xo_board>;
+			clock-names = "ref";
+			vdd-apc-supply = <&pms405_s3>;
+			#power-domain-cells = <0>;
+			operating-points-v2 = <&cpr_opp_table>;
+			acc-syscon = <&tcsr>;
+
+			nvmem-cells = <&cpr_efuse_quot_offset1>,
+				<&cpr_efuse_quot_offset2>,
+				<&cpr_efuse_quot_offset3>,
+				<&cpr_efuse_init_voltage1>,
+				<&cpr_efuse_init_voltage2>,
+				<&cpr_efuse_init_voltage3>,
+				<&cpr_efuse_quot1>,
+				<&cpr_efuse_quot2>,
+				<&cpr_efuse_quot3>,
+				<&cpr_efuse_ring1>,
+				<&cpr_efuse_ring2>,
+				<&cpr_efuse_ring3>,
+				<&cpr_efuse_revision>;
+			nvmem-cell-names = "cpr_quotient_offset1",
+				"cpr_quotient_offset2",
+				"cpr_quotient_offset3",
+				"cpr_init_voltage1",
+				"cpr_init_voltage2",
+				"cpr_init_voltage3",
+				"cpr_quotient1",
+				"cpr_quotient2",
+				"cpr_quotient3",
+				"cpr_ring_osc1",
+				"cpr_ring_osc2",
+				"cpr_ring_osc3",
+				"cpr_fuse_revision";
+		};
+
 		timer@b120000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-idp.dts b/arch/arm64/boot/dts/qcom/sc7180-idp.dts
new file mode 100644
index 0000000..388f50a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-idp.dts
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * SC7180 IDP board device tree source
+ *
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sc7180.dtsi"
+#include "pm6150.dtsi"
+#include "pm6150l.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SC7180 IDP";
+	compatible = "qcom,sc7180-idp", "qcom,sc7180";
+
+	aliases {
+		hsuart0 = &uart3;
+		serial0 = &uart8;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&apps_rsc {
+	pm6150-rpmh-regulators {
+		compatible = "qcom,pm6150-rpmh-regulators";
+		qcom,pmic-id = "a";
+
+		vreg_s1a_1p1: smps1 {
+			regulator-min-microvolt = <1128000>;
+			regulator-max-microvolt = <1128000>;
+		};
+
+		vreg_s4a_1p0: smps4 {
+			regulator-min-microvolt = <824000>;
+			regulator-max-microvolt = <1120000>;
+		};
+
+		vreg_s5a_2p0: smps5 {
+			regulator-min-microvolt = <1744000>;
+			regulator-max-microvolt = <2040000>;
+		};
+
+		vreg_l1a_1p2: ldo1 {
+			regulator-min-microvolt = <1178000>;
+			regulator-max-microvolt = <1256000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l2a_1p0: ldo2 {
+			regulator-min-microvolt = <944000>;
+			regulator-max-microvolt = <1056000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l3a_1p0: ldo3 {
+			regulator-min-microvolt = <968000>;
+			regulator-max-microvolt = <1064000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l4a_0p8: ldo4 {
+			regulator-min-microvolt = <824000>;
+			regulator-max-microvolt = <928000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l5a_2p7: ldo5 {
+			regulator-min-microvolt = <2496000>;
+			regulator-max-microvolt = <3000000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l6a_0p6: ldo6 {
+			regulator-min-microvolt = <568000>;
+			regulator-max-microvolt = <648000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l9a_0p6: ldo9 {
+			regulator-min-microvolt = <488000>;
+			regulator-max-microvolt = <800000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l10a_1p8: ldo10 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1832000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l11a_1p8: ldo11 {
+			regulator-min-microvolt = <1696000>;
+			regulator-max-microvolt = <1904000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l12a_1p8: ldo12 {
+			regulator-min-microvolt = <1696000>;
+			regulator-max-microvolt = <1952000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l13a_1p8: ldo13 {
+			regulator-min-microvolt = <1696000>;
+			regulator-max-microvolt = <1904000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l14a_1p8: ldo14 {
+			regulator-min-microvolt = <1728000>;
+			regulator-max-microvolt = <1832000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l15a_1p8: ldo15 {
+			regulator-min-microvolt = <1696000>;
+			regulator-max-microvolt = <1904000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l16a_2p7: ldo16 {
+			regulator-min-microvolt = <2496000>;
+			regulator-max-microvolt = <3304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l17a_3p0: ldo17 {
+			regulator-min-microvolt = <2920000>;
+			regulator-max-microvolt = <3232000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l18a_2p8: ldo18 {
+			regulator-min-microvolt = <2496000>;
+			regulator-max-microvolt = <3304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l19a_2p9: ldo19 {
+			regulator-min-microvolt = <2696000>;
+			regulator-max-microvolt = <3304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+	};
+
+	pm6150l-rpmh-regulators {
+		compatible = "qcom,pm6150l-rpmh-regulators";
+		qcom,pmic-id = "c";
+
+		vreg_s8c_1p3: smps8 {
+			regulator-min-microvolt = <1120000>;
+			regulator-max-microvolt = <1408000>;
+		};
+
+		vreg_l1c_1p8: ldo1 {
+			regulator-min-microvolt = <1616000>;
+			regulator-max-microvolt = <1984000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l2c_1p3: ldo2 {
+			regulator-min-microvolt = <1168000>;
+			regulator-max-microvolt = <1304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l3c_1p2: ldo3 {
+			regulator-min-microvolt = <1144000>;
+			regulator-max-microvolt = <1304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l4c_1p8: ldo4 {
+			regulator-min-microvolt = <1648000>;
+			regulator-max-microvolt = <3304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l5c_1p8: ldo5 {
+			regulator-min-microvolt = <1648000>;
+			regulator-max-microvolt = <3304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l6c_2p9: ldo6 {
+			regulator-min-microvolt = <2696000>;
+			regulator-max-microvolt = <3304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l7c_3p0: ldo7 {
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3312000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l8c_1p8: ldo8 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1904000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l9c_2p9: ldo9 {
+			regulator-min-microvolt = <2952000>;
+			regulator-max-microvolt = <3304000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l10c_3p3: ldo10 {
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3400000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_l11c_3p3: ldo11 {
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3400000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+		};
+
+		vreg_bob: bob {
+			regulator-min-microvolt = <3008000>;
+			regulator-max-microvolt = <3960000>;
+			regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+		};
+	};
+};
+
+&qspi {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&qspi_clk &qspi_cs0 &qspi_data01>;
+
+	flash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <25000000>;
+		spi-tx-bus-width = <2>;
+		spi-rx-bus-width = <2>;
+	};
+};
+
+&qupv3_id_0 {
+	status = "okay";
+};
+
+&qupv3_id_1 {
+	status = "okay";
+};
+
+&uart3 {
+	status = "okay";
+};
+
+&uart8 {
+	status = "okay";
+};
+
+&usb_1 {
+	status = "okay";
+};
+
+&usb_1_dwc3 {
+	dr_mode = "host";
+};
+
+&usb_1_hsphy {
+	status = "okay";
+	vdd-supply = <&vreg_l4a_0p8>;
+	vdda-pll-supply = <&vreg_l11a_1p8>;
+	vdda-phy-dpdm-supply = <&vreg_l17a_3p0>;
+	qcom,imp-res-offset-value = <8>;
+	qcom,hstx-trim-value = <QUSB2_V2_HSTX_TRIM_21_6_MA>;
+	qcom,preemphasis-level = <QUSB2_V2_PREEMPHASIS_5_PERCENT>;
+	qcom,preemphasis-width = <QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT>;
+};
+
+&usb_1_qmpphy {
+	status = "okay";
+	vdda-phy-supply = <&vreg_l3c_1p2>;
+	vdda-pll-supply = <&vreg_l4a_0p8>;
+};
+
+/* PINCTRL - additions to nodes defined in sc7180.dtsi */
+
+&qspi_clk {
+	pinconf {
+		pins = "gpio63";
+		bias-disable;
+	};
+};
+
+&qspi_cs0 {
+	pinconf {
+		pins = "gpio68";
+		bias-disable;
+	};
+};
+
+&qspi_data01 {
+	pinconf {
+		pins = "gpio64", "gpio65";
+
+		/* High-Z when no transfers; nice to park the lines */
+		bias-pull-up;
+	};
+};
+
+&qup_i2c2_default {
+	pinconf {
+		pins = "gpio15", "gpio16";
+		drive-strength = <2>;
+
+		/* Has external pullup */
+		bias-disable;
+	};
+};
+
+&qup_i2c4_default {
+	pinconf {
+		pins = "gpio115", "gpio116";
+		drive-strength = <2>;
+
+		/* Has external pullup */
+		bias-disable;
+	};
+};
+
+&qup_i2c7_default {
+	pinconf {
+		pins = "gpio6", "gpio7";
+		drive-strength = <2>;
+
+		/* Has external pullup */
+		bias-disable;
+	};
+};
+
+&qup_i2c9_default {
+	pinconf {
+		pins = "gpio46", "gpio47";
+		drive-strength = <2>;
+
+		/* Has external pullup */
+		bias-disable;
+	};
+};
+
+&qup_uart3_default {
+	pinconf-cts {
+		/*
+		 * Configure a pull-down on 38 (CTS) to match the pull of
+		 * the Bluetooth module.
+		 */
+		pins = "gpio38";
+		bias-pull-down;
+		output-high;
+	};
+
+	pinconf-rts {
+		/* We'll drive 39 (RTS), so no pull */
+		pins = "gpio39";
+		drive-strength = <2>;
+		bias-disable;
+	};
+
+	pinconf-tx {
+		/* We'll drive 40 (TX), so no pull */
+		pins = "gpio40";
+		drive-strength = <2>;
+		bias-disable;
+		output-high;
+	};
+
+	pinconf-rx {
+		/*
+		 * Configure a pull-up on 41 (RX). This is needed to avoid
+		 * garbage data when the TX pin of the Bluetooth module is
+		 * in tri-state (module powered off or not driving the
+		 * signal yet).
+		 */
+		pins = "gpio41";
+		bias-pull-up;
+	};
+};
+
+&qup_uart8_default {
+	pinconf-tx {
+		pins = "gpio44";
+		drive-strength = <2>;
+		bias-disable;
+	};
+
+	pinconf-rx {
+		pins = "gpio45";
+		drive-strength = <2>;
+		bias-pull-up;
+	};
+};
+
+&qup_spi0_default {
+	pinconf {
+		pins = "gpio34", "gpio35", "gpio36", "gpio37";
+		drive-strength = <2>;
+		bias-disable;
+	};
+};
+
+&qup_spi6_default {
+	pinconf {
+		pins = "gpio59", "gpio60", "gpio61", "gpio62";
+		drive-strength = <2>;
+		bias-disable;
+	};
+};
+
+&qup_spi10_default {
+	pinconf {
+		pins = "gpio86", "gpio87", "gpio88", "gpio89";
+		drive-strength = <2>;
+		bias-disable;
+	};
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
new file mode 100644
index 0000000..8011c5f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -0,0 +1,2187 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * SC7180 SoC device tree source
+ *
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-sc7180.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/phy/phy-qcom-qusb2.h>
+#include <dt-bindings/power/qcom-aoss-qmp.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/reset/qcom,sdm845-aoss.h>
+#include <dt-bindings/reset/qcom,sdm845-pdc.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+	interrupt-parent = <&intc>;
+
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	chosen { };
+
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		i2c5 = &i2c5;
+		i2c6 = &i2c6;
+		i2c7 = &i2c7;
+		i2c8 = &i2c8;
+		i2c9 = &i2c9;
+		i2c10 = &i2c10;
+		i2c11 = &i2c11;
+		spi0 = &spi0;
+		spi1 = &spi1;
+		spi3 = &spi3;
+		spi5 = &spi5;
+		spi6 = &spi6;
+		spi8 = &spi8;
+		spi10 = &spi10;
+		spi11 = &spi11;
+	};
+
+	clocks {
+		xo_board: xo-board {
+			compatible = "fixed-clock";
+			clock-frequency = <38400000>;
+			#clock-cells = <0>;
+		};
+
+		sleep_clk: sleep-clk {
+			compatible = "fixed-clock";
+			clock-frequency = <32764>;
+			#clock-cells = <0>;
+		};
+	};
+
+	reserved_memory: reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		aop_cmd_db_mem: memory@80820000 {
+			reg = <0x0 0x80820000 0x0 0x20000>;
+			compatible = "qcom,cmd-db";
+		};
+
+		smem_mem: memory@80900000 {
+			reg = <0x0 0x80900000 0x0 0x200000>;
+			no-map;
+		};
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
+			L2_0: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+				L3_0: l3-cache {
+					compatible = "cache";
+				};
+			};
+		};
+
+		CPU1: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			next-level-cache = <&L2_100>;
+			#cooling-cells = <2>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
+			L2_100: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU2: cpu@200 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x200>;
+			enable-method = "psci";
+			next-level-cache = <&L2_200>;
+			#cooling-cells = <2>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
+			L2_200: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU3: cpu@300 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x300>;
+			enable-method = "psci";
+			next-level-cache = <&L2_300>;
+			#cooling-cells = <2>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
+			L2_300: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU4: cpu@400 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x400>;
+			enable-method = "psci";
+			next-level-cache = <&L2_400>;
+			#cooling-cells = <2>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
+			L2_400: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU5: cpu@500 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x500>;
+			enable-method = "psci";
+			next-level-cache = <&L2_500>;
+			#cooling-cells = <2>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
+			L2_500: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU6: cpu@600 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x600>;
+			enable-method = "psci";
+			next-level-cache = <&L2_600>;
+			#cooling-cells = <2>;
+			qcom,freq-domain = <&cpufreq_hw 1>;
+			L2_600: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU7: cpu@700 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x700>;
+			enable-method = "psci";
+			next-level-cache = <&L2_700>;
+			#cooling-cells = <2>;
+			qcom,freq-domain = <&cpufreq_hw 1>;
+			L2_700: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		/* We expect the bootloader to fill in the size */
+		reg = <0 0x80000000 0 0>;
+	};
+
+	pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <GIC_PPI 5 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	firmware {
+		scm {
+			compatible = "qcom,scm-sc7180", "qcom,scm";
+		};
+	};
+
+	tcsr_mutex: hwlock {
+		compatible = "qcom,tcsr-mutex";
+		syscon = <&tcsr_mutex_regs 0 0x1000>;
+		#hwlock-cells = <1>;
+	};
+
+	smem {
+		compatible = "qcom,smem";
+		memory-region = <&smem_mem>;
+		hwlocks = <&tcsr_mutex 3>;
+	};
+
+	smp2p-cdsp {
+		compatible = "qcom,smp2p";
+		qcom,smem = <94>, <432>;
+
+		interrupts = <GIC_SPI 576 IRQ_TYPE_EDGE_RISING>;
+
+		mboxes = <&apss_shared 6>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <5>;
+
+		cdsp_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		cdsp_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	smp2p-lpass {
+		compatible = "qcom,smp2p";
+		qcom,smem = <443>, <429>;
+
+		interrupts = <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>;
+
+		mboxes = <&apss_shared 10>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <2>;
+
+		adsp_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		adsp_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	smp2p-mpss {
+		compatible = "qcom,smp2p";
+		qcom,smem = <435>, <428>;
+		interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+		mboxes = <&apss_shared 14>;
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <1>;
+
+		modem_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		modem_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
+	soc: soc {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges = <0 0 0 0 0x10 0>;
+		dma-ranges = <0 0 0 0 0x10 0>;
+		compatible = "simple-bus";
+
+		gcc: clock-controller@100000 {
+			compatible = "qcom,gcc-sc7180";
+			reg = <0 0x00100000 0 0x1f0000>;
+			clocks = <&rpmhcc RPMH_CXO_CLK>,
+				 <&rpmhcc RPMH_CXO_CLK_A>;
+			clock-names = "bi_tcxo", "bi_tcxo_ao";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			#power-domain-cells = <1>;
+		};
+
+		qfprom@784000 {
+			compatible = "qcom,qfprom";
+			reg = <0 0x00784000 0 0x8ff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qusb2p_hstx_trim: hstx-trim-primary@25b {
+				reg = <0x25b 0x1>;
+				bits = <1 3>;
+			};
+		};
+
+		qupv3_id_0: geniqup@8c0000 {
+			compatible = "qcom,geni-se-qup";
+			reg = <0 0x008c0000 0 0x6000>;
+			clock-names = "m-ahb", "s-ahb";
+			clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+				 <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			status = "disabled";
+
+			i2c0: i2c@880000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00880000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c0_default>;
+				interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			spi0: spi@880000 {
+				compatible = "qcom,geni-spi";
+				reg = <0 0x00880000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_spi0_default>;
+				interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart0: serial@880000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00880000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart0_default>;
+				interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c1: i2c@884000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00884000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c1_default>;
+				interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			spi1: spi@884000 {
+				compatible = "qcom,geni-spi";
+				reg = <0 0x00884000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_spi1_default>;
+				interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart1: serial@884000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00884000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart1_default>;
+				interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c2: i2c@888000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00888000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c2_default>;
+				interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart2: serial@888000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00888000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart2_default>;
+				interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c3: i2c@88c000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x0088c000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c3_default>;
+				interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			spi3: spi@88c000 {
+				compatible = "qcom,geni-spi";
+				reg = <0 0x0088c000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_spi3_default>;
+				interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart3: serial@88c000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x0088c000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart3_default>;
+				interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c4: i2c@890000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00890000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c4_default>;
+				interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart4: serial@890000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00890000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart4_default>;
+				interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c5: i2c@894000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00894000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c5_default>;
+				interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			spi5: spi@894000 {
+				compatible = "qcom,geni-spi";
+				reg = <0 0x00894000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_spi5_default>;
+				interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart5: serial@894000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00894000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart5_default>;
+				interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+		};
+
+		qupv3_id_1: geniqup@ac0000 {
+			compatible = "qcom,geni-se-qup";
+			reg = <0 0x00ac0000 0 0x6000>;
+			clock-names = "m-ahb", "s-ahb";
+			clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+				 <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			status = "disabled";
+
+			i2c6: i2c@a80000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00a80000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c6_default>;
+				interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			spi6: spi@a80000 {
+				compatible = "qcom,geni-spi";
+				reg = <0 0x00a80000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_spi6_default>;
+				interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart6: serial@a80000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00a80000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart6_default>;
+				interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c7: i2c@a84000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00a84000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c7_default>;
+				interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart7: serial@a84000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00a84000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart7_default>;
+				interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c8: i2c@a88000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00a88000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c8_default>;
+				interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			spi8: spi@a88000 {
+				compatible = "qcom,geni-spi";
+				reg = <0 0x00a88000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_spi8_default>;
+				interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart8: serial@a88000 {
+				compatible = "qcom,geni-debug-uart";
+				reg = <0 0x00a88000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart8_default>;
+				interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c9: i2c@a8c000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00a8c000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c9_default>;
+				interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart9: serial@a8c000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00a8c000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart9_default>;
+				interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c10: i2c@a90000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00a90000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c10_default>;
+				interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			spi10: spi@a90000 {
+				compatible = "qcom,geni-spi";
+				reg = <0 0x00a90000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_spi10_default>;
+				interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart10: serial@a90000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00a90000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart10_default>;
+				interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+
+			i2c11: i2c@a94000 {
+				compatible = "qcom,geni-i2c";
+				reg = <0 0x00a94000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_i2c11_default>;
+				interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			spi11: spi@a94000 {
+				compatible = "qcom,geni-spi";
+				reg = <0 0x00a94000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_spi11_default>;
+				interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart11: serial@a94000 {
+				compatible = "qcom,geni-uart";
+				reg = <0 0x00a94000 0 0x4000>;
+				clock-names = "se";
+				clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&qup_uart11_default>;
+				interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+				status = "disabled";
+			};
+		};
+
+		tcsr_mutex_regs: syscon@1f40000 {
+			compatible = "syscon";
+			reg = <0 0x01f40000 0 0x40000>;
+		};
+
+		tlmm: pinctrl@3500000 {
+			compatible = "qcom,sc7180-pinctrl";
+			reg = <0 0x03500000 0 0x300000>,
+			      <0 0x03900000 0 0x300000>,
+			      <0 0x03d00000 0 0x300000>;
+			reg-names = "west", "north", "south";
+			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			gpio-ranges = <&tlmm 0 0 120>;
+			wakeup-parent = <&pdc>;
+
+			qspi_clk: qspi-clk {
+				pinmux {
+					pins = "gpio63";
+					function = "qspi_clk";
+				};
+			};
+
+			qspi_cs0: qspi-cs0 {
+				pinmux {
+					pins = "gpio68";
+					function = "qspi_cs";
+				};
+			};
+
+			qspi_cs1: qspi-cs1 {
+				pinmux {
+					pins = "gpio72";
+					function = "qspi_cs";
+				};
+			};
+
+			qspi_data01: qspi-data01 {
+				pinmux-data {
+					pins = "gpio64", "gpio65";
+					function = "qspi_data";
+				};
+			};
+
+			qspi_data12: qspi-data12 {
+				pinmux-data {
+					pins = "gpio66", "gpio67";
+					function = "qspi_data";
+				};
+			};
+
+			qup_i2c0_default: qup-i2c0-default {
+				pinmux {
+					pins = "gpio34", "gpio35";
+					function = "qup00";
+				};
+			};
+
+			qup_i2c1_default: qup-i2c1-default {
+				pinmux {
+					pins = "gpio0", "gpio1";
+					function = "qup01";
+				};
+			};
+
+			qup_i2c2_default: qup-i2c2-default {
+				pinmux {
+					pins = "gpio15", "gpio16";
+					function = "qup02_i2c";
+				};
+			};
+
+			qup_i2c3_default: qup-i2c3-default {
+				pinmux {
+					pins = "gpio38", "gpio39";
+					function = "qup03";
+				};
+			};
+
+			qup_i2c4_default: qup-i2c4-default {
+				pinmux {
+					pins = "gpio115", "gpio116";
+					function = "qup04_i2c";
+				};
+			};
+
+			qup_i2c5_default: qup-i2c5-default {
+				pinmux {
+					pins = "gpio25", "gpio26";
+					function = "qup05";
+				};
+			};
+
+			qup_i2c6_default: qup-i2c6-default {
+				pinmux {
+					pins = "gpio59", "gpio60";
+					function = "qup10";
+				};
+			};
+
+			qup_i2c7_default: qup-i2c7-default {
+				pinmux {
+					pins = "gpio6", "gpio7";
+					function = "qup11_i2c";
+				};
+			};
+
+			qup_i2c8_default: qup-i2c8-default {
+				pinmux {
+					pins = "gpio42", "gpio43";
+					function = "qup12";
+				};
+			};
+
+			qup_i2c9_default: qup-i2c9-default {
+				pinmux {
+					pins = "gpio46", "gpio47";
+					function = "qup13_i2c";
+				};
+			};
+
+			qup_i2c10_default: qup-i2c10-default {
+				pinmux {
+					pins = "gpio86", "gpio87";
+					function = "qup14";
+				};
+			};
+
+			qup_i2c11_default: qup-i2c11-default {
+				pinmux {
+					pins = "gpio53", "gpio54";
+					function = "qup15";
+				};
+			};
+
+			qup_spi0_default: qup-spi0-default {
+				pinmux {
+					pins = "gpio34", "gpio35",
+					       "gpio36", "gpio37";
+					function = "qup00";
+				};
+			};
+
+			qup_spi1_default: qup-spi1-default {
+				pinmux {
+					pins = "gpio0", "gpio1",
+					       "gpio2", "gpio3";
+					function = "qup01";
+				};
+			};
+
+			qup_spi3_default: qup-spi3-default {
+				pinmux {
+					pins = "gpio38", "gpio39",
+					       "gpio40", "gpio41";
+					function = "qup03";
+				};
+			};
+
+			qup_spi5_default: qup-spi5-default {
+				pinmux {
+					pins = "gpio25", "gpio26",
+					       "gpio27", "gpio28";
+					function = "qup05";
+				};
+			};
+
+			qup_spi6_default: qup-spi6-default {
+				pinmux {
+					pins = "gpio59", "gpio60",
+					       "gpio61", "gpio62";
+					function = "qup10";
+				};
+			};
+
+			qup_spi8_default: qup-spi8-default {
+				pinmux {
+					pins = "gpio42", "gpio43",
+					       "gpio44", "gpio45";
+					function = "qup12";
+				};
+			};
+
+			qup_spi10_default: qup-spi10-default {
+				pinmux {
+					pins = "gpio86", "gpio87",
+					       "gpio88", "gpio89";
+					function = "qup14";
+				};
+			};
+
+			qup_spi11_default: qup-spi11-default {
+				pinmux {
+					pins = "gpio53", "gpio54",
+					       "gpio55", "gpio56";
+					function = "qup15";
+				};
+			};
+
+			qup_uart0_default: qup-uart0-default {
+				pinmux {
+					pins = "gpio34", "gpio35",
+					       "gpio36", "gpio37";
+					function = "qup00";
+				};
+			};
+
+			qup_uart1_default: qup-uart1-default {
+				pinmux {
+					pins = "gpio0", "gpio1",
+					       "gpio2", "gpio3";
+					function = "qup01";
+				};
+			};
+
+			qup_uart2_default: qup-uart2-default {
+				pinmux {
+					pins = "gpio15", "gpio16";
+					function = "qup02_uart";
+				};
+			};
+
+			qup_uart3_default: qup-uart3-default {
+				pinmux {
+					pins = "gpio38", "gpio39",
+					       "gpio40", "gpio41";
+					function = "qup03";
+				};
+			};
+
+			qup_uart4_default: qup-uart4-default {
+				pinmux {
+					pins = "gpio115", "gpio116";
+					function = "qup04_uart";
+				};
+			};
+
+			qup_uart5_default: qup-uart5-default {
+				pinmux {
+					pins = "gpio25", "gpio26",
+					       "gpio27", "gpio28";
+					function = "qup05";
+				};
+			};
+
+			qup_uart6_default: qup-uart6-default {
+				pinmux {
+					pins = "gpio59", "gpio60",
+					       "gpio61", "gpio62";
+					function = "qup10";
+				};
+			};
+
+			qup_uart7_default: qup-uart7-default {
+				pinmux {
+					pins = "gpio6", "gpio7";
+					function = "qup11_uart";
+				};
+			};
+
+			qup_uart8_default: qup-uart8-default {
+				pinmux {
+					pins = "gpio44", "gpio45";
+					function = "qup12";
+				};
+			};
+
+			qup_uart9_default: qup-uart9-default {
+				pinmux {
+					pins = "gpio46", "gpio47";
+					function = "qup13_uart";
+				};
+			};
+
+			qup_uart10_default: qup-uart10-default {
+				pinmux {
+					pins = "gpio86", "gpio87",
+					       "gpio88", "gpio89";
+					function = "qup14";
+				};
+			};
+
+			qup_uart11_default: qup-uart11-default {
+				pinmux {
+					pins = "gpio53", "gpio54",
+					       "gpio55", "gpio56";
+					function = "qup15";
+				};
+			};
+		};
+
+		qspi: spi@88dc000 {
+			compatible = "qcom,qspi-v1";
+			reg = <0 0x088dc000 0 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+				 <&gcc GCC_QSPI_CORE_CLK>;
+			clock-names = "iface", "core";
+			status = "disabled";
+		};
+
+		usb_1_hsphy: phy@88e3000 {
+			compatible = "qcom,sc7180-qusb2-phy";
+			reg = <0 0x088e3000 0 0x400>;
+			status = "disabled";
+			#phy-cells = <0>;
+			clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+				 <&rpmhcc RPMH_CXO_CLK>;
+			clock-names = "cfg_ahb", "ref";
+			resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+
+			nvmem-cells = <&qusb2p_hstx_trim>;
+		};
+
+		usb_1_qmpphy: phy-wrapper@88e9000 {
+			compatible = "qcom,sc7180-qmp-usb3-phy";
+			reg = <0 0x088e9000 0 0x18c>,
+			      <0 0x088e8000 0 0x38>;
+			reg-names = "reg-base", "dp_com";
+			status = "disabled";
+			#clock-cells = <1>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+
+			clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+				 <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+				 <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
+				 <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+			clock-names = "aux", "cfg_ahb", "ref", "com_aux";
+
+			resets = <&gcc GCC_USB3_DP_PHY_PRIM_BCR>,
+				 <&gcc GCC_USB3_PHY_PRIM_BCR>;
+			reset-names = "phy", "common";
+
+			usb_1_ssphy: phy@88e9200 {
+				reg = <0 0x088e9200 0 0x128>,
+				      <0 0x088e9400 0 0x200>,
+				      <0 0x088e9c00 0 0x218>,
+				      <0 0x088e9600 0 0x128>,
+				      <0 0x088e9800 0 0x200>,
+				      <0 0x088e9a00 0 0x18>;
+				#clock-cells = <0>;
+				#phy-cells = <0>;
+				clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+				clock-names = "pipe0";
+				clock-output-names = "usb3_phy_pipe_clk_src";
+			};
+		};
+
+		system-cache-controller@9200000 {
+			compatible = "qcom,sc7180-llcc";
+			reg = <0 0x09200000 0 0x200000>, <0 0x09600000 0 0x50000>;
+			reg-names = "llcc_base", "llcc_broadcast_base";
+			interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		usb_1: usb@a6f8800 {
+			compatible = "qcom,sc7180-dwc3", "qcom,dwc3";
+			reg = <0 0x0a6f8800 0 0x400>;
+			status = "disabled";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			dma-ranges;
+
+			clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+				 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+				 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+				 <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+				 <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
+			clock-names = "cfg_noc", "core", "iface", "mock_utmi",
+				      "sleep";
+
+			assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+					  <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+			assigned-clock-rates = <19200000>, <150000000>;
+
+			interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+					  "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+			power-domains = <&gcc USB30_PRIM_GDSC>;
+
+			resets = <&gcc GCC_USB30_PRIM_BCR>;
+
+			usb_1_dwc3: dwc3@a600000 {
+				compatible = "snps,dwc3";
+				reg = <0 0x0a600000 0 0xe000>;
+				interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+				iommus = <&apps_smmu 0x540 0>;
+				snps,dis_u2_susphy_quirk;
+				snps,dis_enblslpm_quirk;
+				phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
+				phy-names = "usb2-phy", "usb3-phy";
+			};
+		};
+
+		pdc: interrupt-controller@b220000 {
+			compatible = "qcom,sc7180-pdc", "qcom,pdc";
+			reg = <0 0x0b220000 0 0x30000>;
+			qcom,pdc-ranges = <0 480 15>, <17 497 98>,
+					  <119 634 4>, <124 639 1>;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&intc>;
+			interrupt-controller;
+		};
+
+		pdc_reset: reset-controller@b2e0000 {
+			compatible = "qcom,sc7180-pdc-global", "qcom,sdm845-pdc-global";
+			reg = <0 0x0b2e0000 0 0x20000>;
+			#reset-cells = <1>;
+		};
+
+		tsens0: thermal-sensor@c263000 {
+			compatible = "qcom,sc7180-tsens","qcom,tsens-v2";
+			reg = <0 0x0c263000 0 0x1ff>, /* TM */
+				<0 0x0c222000 0 0x1ff>; /* SROT */
+			#qcom,sensors = <15>;
+			interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow","critical";
+			#thermal-sensor-cells = <1>;
+		};
+
+		tsens1: thermal-sensor@c265000 {
+			compatible = "qcom,sc7180-tsens","qcom,tsens-v2";
+			reg = <0 0x0c265000 0 0x1ff>, /* TM */
+				<0 0x0c223000 0 0x1ff>; /* SROT */
+			#qcom,sensors = <10>;
+			interrupts = <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow","critical";
+			#thermal-sensor-cells = <1>;
+		};
+
+		aoss_reset: reset-controller@c2a0000 {
+			compatible = "qcom,sc7180-aoss-cc", "qcom,sdm845-aoss-cc";
+			reg = <0 0x0c2a0000 0 0x31000>;
+			#reset-cells = <1>;
+		};
+
+		aoss_qmp: qmp@c300000 {
+			compatible = "qcom,sc7180-aoss-qmp";
+			reg = <0 0x0c300000 0 0x100000>;
+			interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
+			mboxes = <&apss_shared 0>;
+
+			#clock-cells = <0>;
+			#power-domain-cells = <1>;
+		};
+
+		spmi_bus: spmi@c440000 {
+			compatible = "qcom,spmi-pmic-arb";
+			reg = <0 0x0c440000 0 0x1100>,
+			      <0 0x0c600000 0 0x2000000>,
+			      <0 0x0e600000 0 0x100000>,
+			      <0 0x0e700000 0 0xa0000>,
+			      <0 0x0c40a000 0 0x26000>;
+			reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+			interrupt-names = "periph_irq";
+			interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+			qcom,ee = <0>;
+			qcom,channel = <0>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupt-controller;
+			#interrupt-cells = <4>;
+			cell-index = <0>;
+		};
+
+		apps_smmu: iommu@15000000 {
+			compatible = "qcom,sc7180-smmu-500", "arm,mmu-500";
+			reg = <0 0x15000000 0 0x100000>;
+			#iommu-cells = <2>;
+			#global-interrupts = <1>;
+			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 410 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 411 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		intc: interrupt-controller@17a00000 {
+			compatible = "arm,gic-v3";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			#interrupt-cells = <3>;
+			interrupt-controller;
+			reg = <0 0x17a00000 0 0x10000>,     /* GICD */
+			      <0 0x17a60000 0 0x100000>;    /* GICR * 8 */
+			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+			msi-controller@17a40000 {
+				compatible = "arm,gic-v3-its";
+				msi-controller;
+				#msi-cells = <1>;
+				reg = <0 0x17a40000 0 0x20000>;
+				status = "disabled";
+			};
+		};
+
+		apss_shared: mailbox@17c00000 {
+			compatible = "qcom,sc7180-apss-shared";
+			reg = <0 0x17c00000 0 0x10000>;
+			#mbox-cells = <1>;
+		};
+
+		watchdog@17c10000 {
+			compatible = "qcom,apss-wdt-sc7180", "qcom,kpss-wdt";
+			reg = <0 0x17c10000 0 0x1000>;
+			clocks = <&sleep_clk>;
+		};
+
+		timer@17c20000{
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			compatible = "arm,armv7-timer-mem";
+			reg = <0 0x17c20000 0 0x1000>;
+
+			frame@17c21000 {
+				frame-number = <0>;
+				interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0 0x17c21000 0 0x1000>,
+				      <0 0x17c22000 0 0x1000>;
+			};
+
+			frame@17c23000 {
+				frame-number = <1>;
+				interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0 0x17c23000 0 0x1000>;
+				status = "disabled";
+			};
+
+			frame@17c25000 {
+				frame-number = <2>;
+				interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0 0x17c25000 0 0x1000>;
+				status = "disabled";
+			};
+
+			frame@17c27000 {
+				frame-number = <3>;
+				interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0 0x17c27000 0 0x1000>;
+				status = "disabled";
+			};
+
+			frame@17c29000 {
+				frame-number = <4>;
+				interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0 0x17c29000 0 0x1000>;
+				status = "disabled";
+			};
+
+			frame@17c2b000 {
+				frame-number = <5>;
+				interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0 0x17c2b000 0 0x1000>;
+				status = "disabled";
+			};
+
+			frame@17c2d000 {
+				frame-number = <6>;
+				interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0 0x17c2d000 0 0x1000>;
+				status = "disabled";
+			};
+		};
+
+		apps_rsc: rsc@18200000 {
+			compatible = "qcom,rpmh-rsc";
+			reg = <0 0x18200000 0 0x10000>,
+			      <0 0x18210000 0 0x10000>,
+			      <0 0x18220000 0 0x10000>;
+			reg-names = "drv-0", "drv-1", "drv-2";
+			interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+			qcom,tcs-offset = <0xd00>;
+			qcom,drv-id = <2>;
+			qcom,tcs-config = <ACTIVE_TCS  2>,
+					  <SLEEP_TCS   3>,
+					  <WAKE_TCS    3>,
+					  <CONTROL_TCS 1>;
+
+			rpmhcc: clock-controller {
+				compatible = "qcom,sc7180-rpmh-clk";
+				clocks = <&xo_board>;
+				clock-names = "xo";
+				#clock-cells = <1>;
+			};
+
+			rpmhpd: power-controller {
+				compatible = "qcom,sc7180-rpmhpd";
+				#power-domain-cells = <1>;
+				operating-points-v2 = <&rpmhpd_opp_table>;
+
+				rpmhpd_opp_table: opp-table {
+					compatible = "operating-points-v2";
+
+					rpmhpd_opp_ret: opp1 {
+						opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+					};
+
+					rpmhpd_opp_min_svs: opp2 {
+						opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+					};
+
+					rpmhpd_opp_low_svs: opp3 {
+						opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+					};
+
+					rpmhpd_opp_svs: opp4 {
+						opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+					};
+
+					rpmhpd_opp_svs_l1: opp5 {
+						opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+					};
+
+					rpmhpd_opp_svs_l2: opp6 {
+						opp-level = <224>;
+					};
+
+					rpmhpd_opp_nom: opp7 {
+						opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+					};
+
+					rpmhpd_opp_nom_l1: opp8 {
+						opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+					};
+
+					rpmhpd_opp_nom_l2: opp9 {
+						opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+					};
+
+					rpmhpd_opp_turbo: opp10 {
+						opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+					};
+
+					rpmhpd_opp_turbo_l1: opp11 {
+						opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+					};
+				};
+			};
+		};
+
+		cpufreq_hw: cpufreq@18323000 {
+			compatible = "qcom,cpufreq-hw";
+			reg = <0 0x18323000 0 0x1400>, <0 0x18325800 0 0x1400>;
+			reg-names = "freq-domain0", "freq-domain1";
+
+			clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+			clock-names = "xo", "alternate";
+
+			#freq-domain-cells = <1>;
+		};
+	};
+
+	thermal-zones {
+		cpu0-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 1>;
+
+			trips {
+				cpu0_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu0_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu0_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu0_alert0>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu0_alert1>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu1-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 2>;
+
+			trips {
+				cpu1_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu1_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu1_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu1_alert0>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu1_alert1>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu2-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 3>;
+
+			trips {
+				cpu2_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu2_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu2_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu2_alert0>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu2_alert1>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu3-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 4>;
+
+			trips {
+				cpu3_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu3_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu3_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu3_alert0>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu3_alert1>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu4-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 5>;
+
+			trips {
+				cpu4_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu4_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu4_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu4_alert0>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu4_alert1>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu5-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 6>;
+
+			trips {
+				cpu5_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu5_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu5_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu5_alert0>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu5_alert1>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu6-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 9>;
+
+			trips {
+				cpu6_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu6_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu6_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu6_alert0>;
+					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu6_alert1>;
+					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu7-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 10>;
+
+			trips {
+				cpu7_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu7_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu7_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu7_alert0>;
+					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu7_alert1>;
+					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu8-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 11>;
+
+			trips {
+				cpu8_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu8_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu8_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu8_alert0>;
+					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu8_alert1>;
+					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		cpu9-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 12>;
+
+			trips {
+				cpu9_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu9_alert1: trip-point1 {
+					temperature = <95000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu9_crit: cpu_crit {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu9_alert0>;
+					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu9_alert1>;
+					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		aoss0-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 0>;
+
+			trips {
+				aoss0_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		cpuss0-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 7>;
+
+			trips {
+				cpuss0_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+				cpuss0_crit: cluster0_crit {
+					temperature = <110000>;
+					hysteresis = <2000>;
+					type = "critical";
+				};
+			};
+		};
+
+		cpuss1-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 8>;
+
+			trips {
+				cpuss1_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+				cpuss1_crit: cluster0_crit {
+					temperature = <110000>;
+					hysteresis = <2000>;
+					type = "critical";
+				};
+			};
+		};
+
+		gpuss0-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 13>;
+
+			trips {
+				gpuss0_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		gpuss1-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens0 14>;
+
+			trips {
+				gpuss1_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		aoss1-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 0>;
+
+			trips {
+				aoss1_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		cwlan-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 1>;
+
+			trips {
+				cwlan_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		audio-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 2>;
+
+			trips {
+				audio_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		ddr-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 3>;
+
+			trips {
+				ddr_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		q6-hvx-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 4>;
+
+			trips {
+				q6_hvx_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		camera-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 5>;
+
+			trips {
+				camera_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		mdm-core-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 6>;
+
+			trips {
+				mdm_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		mdm-dsp-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 7>;
+
+			trips {
+				mdm_dsp_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		npu-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 8>;
+
+			trips {
+				npu_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+
+		video-thermal {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&tsens1 9>;
+
+			trips {
+				video_alert0: trip-point0 {
+					temperature = <90000>;
+					hysteresis = <2000>;
+					type = "hot";
+				};
+			};
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
index 9a4ff57..7b53b3c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
@@ -165,7 +165,6 @@ panel_in_edp: endpoint {
 /delete-node/ &venus_mem;
 /delete-node/ &cdsp_mem;
 /delete-node/ &cdsp_pas;
-/delete-node/ &zap_shader;
 /delete-node/ &gpu_mem;
 
 /* Increase the size from 120 MB to 128 MB */
@@ -651,6 +650,20 @@ &spi0 {
 	status = "okay";
 };
 
+&spi5 {
+	status = "okay";
+
+	tpm@0 {
+		compatible = "google,cr50";
+		reg = <0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&h1_ap_int_odl>;
+		spi-max-frequency = <800000>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <129 IRQ_TYPE_EDGE_RISING>;
+	};
+};
+
 &spi10 {
 	status = "okay";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index d100f46..eb77aaa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -197,7 +197,7 @@ vph_pwr: vph-pwr-regulator {
 &adsp_pas {
 	status = "okay";
 
-	firmware-name = "qcom/db845c/adsp.mdt";
+	firmware-name = "qcom/sdm845/adsp.mdt";
 };
 
 &apps_rsc {
@@ -343,7 +343,7 @@ vreg_bob: bob {
 
 &cdsp_pas {
 	status = "okay";
-	firmware-name = "qcom/db845c/cdsp.mdt";
+	firmware-name = "qcom/sdm845/cdsp.mdt";
 };
 
 &gcc {
@@ -352,6 +352,18 @@ &gcc {
 			   <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
 };
 
+&gpu {
+	zap-shader {
+		memory-region = <&gpu_mem>;
+		firmware-name = "qcom/sdm845/a630_zap.mbn";
+	};
+};
+
+&mss_pil {
+	status = "okay";
+	firmware-name = "qcom/sdm845/mba.mbn", "qcom/sdm845/modem.mbn";
+};
+
 &pm8998_gpio {
 	vol_up_pin_a: vol-up-active {
 		pins = "gpio6";
@@ -529,6 +541,8 @@ &wifi {
 	vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
 	vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
 	vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+
+	qcom,snoc-host-cap-8bit-quirk;
 };
 
 /* PINCTRL - additions to nodes defined in sdm845.dtsi */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index c57548b..09ad37b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -360,6 +360,13 @@ &gcc {
 			   <GCC_LPASS_SWAY_CLK>;
 };
 
+&gpu {
+	zap-shader {
+		memory-region = <&gpu_mem>;
+		firmware-name = "qcom/sdm845/a630_zap.mbn";
+	};
+};
+
 &i2c10 {
 	status = "okay";
 	clock-frequency = <400000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index ddb1f23..d42302b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -1357,7 +1357,7 @@ uart15: serial@a9c000 {
 			};
 		};
 
-		cache-controller@1100000 {
+		system-cache-controller@1100000 {
 			compatible = "qcom,sdm845-llcc";
 			reg = <0 0x01100000 0 0x200000>, <0 0x01300000 0 0x50000>;
 			reg-names = "llcc_base", "llcc_broadcast_base";
@@ -1374,6 +1374,8 @@ ufs_mem_hc: ufshc@1d84000 {
 			lanes-per-direction = <2>;
 			power-domains = <&gcc UFS_PHY_GDSC>;
 			#reset-cells = <1>;
+			resets = <&gcc GCC_UFS_PHY_BCR>;
+			reset-names = "rst";
 
 			iommus = <&apps_smmu 0x100 0xf>;
 
@@ -1447,6 +1449,7 @@ tlmm: pinctrl@3400000 {
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			gpio-ranges = <&tlmm 0 0 150>;
+			wakeup-parent = <&pdc_intc>;
 
 			qspi_clk: qspi-clk {
 				pinmux {
@@ -2804,7 +2807,7 @@ dsi1_phy: dsi-phy@ae96400 {
 			};
 		};
 
-		gpu@5000000 {
+		gpu: gpu@5000000 {
 			compatible = "qcom,adreno-630.2", "qcom,adreno";
 			#stream-id-cells = <16>;
 
@@ -2824,10 +2827,6 @@ gpu@5000000 {
 
 			qcom,gmu = <&gmu>;
 
-			zap_shader: zap-shader {
-				memory-region = <&gpu_mem>;
-			};
-
 			gpu_opp_table: opp-table {
 				compatible = "operating-points-v2";
 
@@ -2939,6 +2938,15 @@ dispcc: clock-controller@af00000 {
 			#power-domain-cells = <1>;
 		};
 
+		pdc_intc: interrupt-controller@b220000 {
+			compatible = "qcom,sdm845-pdc", "qcom,pdc";
+			reg = <0 0x0b220000 0 0x30000>;
+			qcom,pdc-ranges = <0 480 94>, <94 609 15>, <115 630 7>;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&intc>;
+			interrupt-controller;
+		};
+
 		pdc_reset: reset-controller@b2e0000 {
 			compatible = "qcom,sdm845-pdc-global";
 			reg = <0 0x0b2e0000 0 0x20000>;
@@ -2950,8 +2958,9 @@ tsens0: thermal-sensor@c263000 {
 			reg = <0 0x0c263000 0 0x1ff>, /* TM */
 			      <0 0x0c222000 0 0x1ff>; /* SROT */
 			#qcom,sensors = <13>;
-			interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "uplow";
+			interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow", "critical";
 			#thermal-sensor-cells = <1>;
 		};
 
@@ -2960,8 +2969,9 @@ tsens1: thermal-sensor@c265000 {
 			reg = <0 0x0c265000 0 0x1ff>, /* TM */
 			      <0 0x0c223000 0 0x1ff>; /* SROT */
 			#qcom,sensors = <8>;
-			interrupts = <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "uplow";
+			interrupts = <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "uplow", "critical";
 			#thermal-sensor-cells = <1>;
 		};
 
@@ -3191,7 +3201,7 @@ intc: interrupt-controller@17a00000 {
 			      <0 0x17a60000 0 0x100000>;    /* GICR * 8 */
 			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
 
-			gic-its@17a40000 {
+			msi-controller@17a40000 {
 				compatible = "arm,gic-v3-its";
 				msi-controller;
 				#msi-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index 13dc619..b255be3 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -245,6 +245,13 @@ &gcc {
 			   <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
 };
 
+&gpu {
+	zap-shader {
+		memory-region = <&gpu_mem>;
+		firmware-name = "qcom/LENOVO/81JL/qcdxkmsuc850.mbn";
+	};
+};
+
 &i2c1 {
 	status = "okay";
 	clock-frequency = <400000>;
diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
index aa5de42..8ab1661 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
@@ -7,6 +7,7 @@
 /dts-v1/;
 
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/gpio/gpio.h>
 #include "sm8150.dtsi"
 #include "pm8150.dtsi"
 #include "pm8150b.dtsi"
@@ -366,6 +367,18 @@ resin {
 	};
 };
 
+&remoteproc_adsp {
+	status = "okay";
+};
+
+&remoteproc_cdsp {
+	status = "okay";
+};
+
+&remoteproc_slpi {
+	status = "okay";
+};
+
 &tlmm {
 	gpio-reserved-ranges = <0 4>, <126 4>;
 };
@@ -373,3 +386,25 @@ &tlmm {
 &uart2 {
 	status = "okay";
 };
+
+&ufs_mem_hc {
+	status = "okay";
+
+	reset-gpios = <&tlmm 175 GPIO_ACTIVE_LOW>;
+
+	vcc-supply = <&vreg_l10a_2p5>;
+	vcc-max-microamp = <750000>;
+	vccq-supply = <&vreg_l9a_1p2>;
+	vccq-max-microamp = <700000>;
+	vccq2-supply = <&vreg_s4a_1p8>;
+	vccq2-max-microamp = <750000>;
+};
+
+&ufs_mem_phy {
+	status = "okay";
+
+	vdda-phy-supply = <&vdda_ufs_2ln_core_1>;
+	vdda-max-microamp = <90200>;
+	vdda-pll-supply = <&vreg_l3c_1p2>;
+	vdda-pll-max-microamp = <19000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 8f23fca..141c21d 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -5,8 +5,11 @@
  */
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/qcom-aoss-qmp.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
 #include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,gcc-sm8150.h>
 
 / {
 	interrupt-parent = <&intc>;
@@ -42,6 +45,7 @@ CPU0: cpu@0 {
 			reg = <0x0 0x0>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
 			L2_0: l2-cache {
 				compatible = "cache";
 				next-level-cache = <&L3_0>;
@@ -57,6 +61,7 @@ CPU1: cpu@100 {
 			reg = <0x0 0x100>;
 			enable-method = "psci";
 			next-level-cache = <&L2_100>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
 			L2_100: l2-cache {
 				compatible = "cache";
 				next-level-cache = <&L3_0>;
@@ -70,6 +75,7 @@ CPU2: cpu@200 {
 			reg = <0x0 0x200>;
 			enable-method = "psci";
 			next-level-cache = <&L2_200>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
 			L2_200: l2-cache {
 				compatible = "cache";
 				next-level-cache = <&L3_0>;
@@ -82,6 +88,7 @@ CPU3: cpu@300 {
 			reg = <0x0 0x300>;
 			enable-method = "psci";
 			next-level-cache = <&L2_300>;
+			qcom,freq-domain = <&cpufreq_hw 0>;
 			L2_300: l2-cache {
 				compatible = "cache";
 				next-level-cache = <&L3_0>;
@@ -94,6 +101,7 @@ CPU4: cpu@400 {
 			reg = <0x0 0x400>;
 			enable-method = "psci";
 			next-level-cache = <&L2_400>;
+			qcom,freq-domain = <&cpufreq_hw 1>;
 			L2_400: l2-cache {
 				compatible = "cache";
 				next-level-cache = <&L3_0>;
@@ -106,6 +114,7 @@ CPU5: cpu@500 {
 			reg = <0x0 0x500>;
 			enable-method = "psci";
 			next-level-cache = <&L2_500>;
+			qcom,freq-domain = <&cpufreq_hw 1>;
 			L2_500: l2-cache {
 				compatible = "cache";
 				next-level-cache = <&L3_0>;
@@ -118,6 +127,7 @@ CPU6: cpu@600 {
 			reg = <0x0 0x600>;
 			enable-method = "psci";
 			next-level-cache = <&L2_600>;
+			qcom,freq-domain = <&cpufreq_hw 1>;
 			L2_600: l2-cache {
 				compatible = "cache";
 				next-level-cache = <&L3_0>;
@@ -130,6 +140,7 @@ CPU7: cpu@700 {
 			reg = <0x0 0x700>;
 			enable-method = "psci";
 			next-level-cache = <&L2_700>;
+			qcom,freq-domain = <&cpufreq_hw 2>;
 			L2_700: l2-cache {
 				compatible = "cache";
 				next-level-cache = <&L3_0>;
@@ -283,6 +294,102 @@ smem {
 		hwlocks = <&tcsr_mutex 3>;
 	};
 
+	smp2p-cdsp {
+		compatible = "qcom,smp2p";
+		qcom,smem = <94>, <432>;
+
+		interrupts = <GIC_SPI 576 IRQ_TYPE_EDGE_RISING>;
+
+		mboxes = <&apss_shared 6>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <5>;
+
+		cdsp_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		cdsp_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	smp2p-lpass {
+		compatible = "qcom,smp2p";
+		qcom,smem = <443>, <429>;
+
+		interrupts = <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>;
+
+		mboxes = <&apss_shared 10>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <2>;
+
+		adsp_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		adsp_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	smp2p-mpss {
+		compatible = "qcom,smp2p";
+		qcom,smem = <435>, <428>;
+
+		interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+
+		mboxes = <&apss_shared 14>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <1>;
+
+		modem_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		modem_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
+	smp2p-slpi {
+		compatible = "qcom,smp2p";
+		qcom,smem = <481>, <430>;
+
+		interrupts = <GIC_SPI 172 IRQ_TYPE_EDGE_RISING>;
+
+		mboxes = <&apss_shared 26>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <3>;
+
+		slpi_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		slpi_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
 	soc: soc@0 {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -306,8 +413,8 @@ qupv3_id_1: geniqup@ac0000 {
 			compatible = "qcom,geni-se-qup";
 			reg = <0x0 0x00ac0000 0x0 0x6000>;
 			clock-names = "m-ahb", "s-ahb";
-			clocks = <&gcc 123>,
-				 <&gcc 124>;
+			clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+				 <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
 			#address-cells = <2>;
 			#size-cells = <2>;
 			ranges;
@@ -317,17 +424,120 @@ uart2: serial@a90000 {
 				compatible = "qcom,geni-debug-uart";
 				reg = <0x0 0x00a90000 0x0 0x4000>;
 				clock-names = "se";
-				clocks = <&gcc 105>;
+				clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
 				interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
 				status = "disabled";
 			};
 		};
 
+		ufs_mem_hc: ufshc@1d84000 {
+			compatible = "qcom,sm8150-ufshc", "qcom,ufshc",
+				     "jedec,ufs-2.0";
+			reg = <0 0x01d84000 0 0x2500>;
+			interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+			phys = <&ufs_mem_phy_lanes>;
+			phy-names = "ufsphy";
+			lanes-per-direction = <2>;
+			#reset-cells = <1>;
+			resets = <&gcc GCC_UFS_PHY_BCR>;
+			reset-names = "rst";
+
+			clock-names =
+				"core_clk",
+				"bus_aggr_clk",
+				"iface_clk",
+				"core_clk_unipro",
+				"ref_clk",
+				"tx_lane0_sync_clk",
+				"rx_lane0_sync_clk",
+				"rx_lane1_sync_clk";
+			clocks =
+				<&gcc GCC_UFS_PHY_AXI_CLK>,
+				<&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+				<&gcc GCC_UFS_PHY_AHB_CLK>,
+				<&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+				<&rpmhcc RPMH_CXO_CLK>,
+				<&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+				<&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+				<&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+			freq-table-hz =
+				<37500000 300000000>,
+				<0 0>,
+				<0 0>,
+				<37500000 300000000>,
+				<0 0>,
+				<0 0>,
+				<0 0>,
+				<0 0>;
+
+			status = "disabled";
+		};
+
+		ufs_mem_phy: phy@1d87000 {
+			compatible = "qcom,sm8150-qmp-ufs-phy";
+			reg = <0 0x01d87000 0 0x1c0>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			clock-names = "ref",
+				      "ref_aux";
+			clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+				 <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+			resets = <&ufs_mem_hc 0>;
+			reset-names = "ufsphy";
+			status = "disabled";
+
+			ufs_mem_phy_lanes: lanes@1d87400 {
+				reg = <0 0x01d87400 0 0x108>,
+				      <0 0x01d87600 0 0x1e0>,
+				      <0 0x01d87c00 0 0x1dc>,
+				      <0 0x01d87800 0 0x108>,
+				      <0 0x01d87a00 0 0x1e0>;
+				#phy-cells = <0>;
+			};
+		};
+
 		tcsr_mutex_regs: syscon@1f40000 {
 			compatible = "syscon";
 			reg = <0x0 0x01f40000 0x0 0x40000>;
 		};
 
+		remoteproc_slpi: remoteproc@2400000 {
+			compatible = "qcom,sm8150-slpi-pas";
+			reg = <0x0 0x02400000 0x0 0x4040>;
+
+			interrupts-extended = <&intc GIC_SPI 494 IRQ_TYPE_EDGE_RISING>,
+					      <&slpi_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&slpi_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&slpi_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&slpi_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready",
+					  "handover", "stop-ack";
+
+			clocks = <&rpmhcc RPMH_CXO_CLK>;
+			clock-names = "xo";
+
+			power-domains = <&aoss_qmp AOSS_QMP_LS_SLPI>,
+					<&rpmhpd 3>,
+					<&rpmhpd 2>;
+			power-domain-names = "load_state", "lcx", "lmx";
+
+			memory-region = <&slpi_mem>;
+
+			qcom,smem-states = <&slpi_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			status = "disabled";
+
+			glink-edge {
+				interrupts = <GIC_SPI 170 IRQ_TYPE_EDGE_RISING>;
+				label = "dsps";
+				qcom,remote-pid = <3>;
+				mboxes = <&apss_shared 24>;
+			};
+		};
+
 		tlmm: pinctrl@3100000 {
 			compatible = "qcom,sm8150-pinctrl";
 			reg = <0x0 0x03100000 0x0 0x300000>,
@@ -343,6 +553,74 @@ tlmm: pinctrl@3100000 {
 			#interrupt-cells = <2>;
 		};
 
+		remoteproc_mpss: remoteproc@4080000 {
+			compatible = "qcom,sm8150-mpss-pas";
+			reg = <0x0 0x04080000 0x0 0x4040>;
+
+			interrupts-extended = <&intc GIC_SPI 266 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>,
+					      <&modem_smp2p_in 7 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready", "handover",
+					  "stop-ack", "shutdown-ack";
+
+			clocks = <&rpmhcc RPMH_CXO_CLK>;
+			clock-names = "xo";
+
+			power-domains = <&aoss_qmp AOSS_QMP_LS_MODEM>,
+					<&rpmhpd 7>,
+					<&rpmhpd 0>;
+			power-domain-names = "load_state", "cx", "mss";
+
+			memory-region = <&mpss_mem>;
+
+			qcom,smem-states = <&modem_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			glink-edge {
+				interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+				label = "modem";
+				qcom,remote-pid = <1>;
+				mboxes = <&apss_shared 12>;
+			};
+		};
+
+		remoteproc_cdsp: remoteproc@8300000 {
+			compatible = "qcom,sm8150-cdsp-pas";
+			reg = <0x0 0x08300000 0x0 0x4040>;
+
+			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+					      <&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&cdsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&cdsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&cdsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready",
+					  "handover", "stop-ack";
+
+			clocks = <&rpmhcc RPMH_CXO_CLK>;
+			clock-names = "xo";
+
+			power-domains = <&aoss_qmp AOSS_QMP_LS_CDSP>,
+					<&rpmhpd 7>;
+			power-domain-names = "load_state", "cx";
+
+			memory-region = <&cdsp_mem>;
+
+			qcom,smem-states = <&cdsp_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			status = "disabled";
+
+			glink-edge {
+				interrupts = <GIC_SPI 574 IRQ_TYPE_EDGE_RISING>;
+				label = "cdsp";
+				qcom,remote-pid = <5>;
+				mboxes = <&apss_shared 4>;
+			};
+		};
+
 		aoss_qmp: power-controller@c300000 {
 			compatible = "qcom,sm8150-aoss-qmp";
 			reg = <0x0 0x0c300000 0x0 0x100000>;
@@ -372,6 +650,40 @@ spmi_bus: spmi@c440000 {
 			cell-index = <0>;
 		};
 
+		remoteproc_adsp: remoteproc@17300000 {
+			compatible = "qcom,sm8150-adsp-pas";
+			reg = <0x0 0x17300000 0x0 0x4040>;
+
+			interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
+					      <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+					      <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+					      <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+					      <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog", "fatal", "ready",
+					  "handover", "stop-ack";
+
+			clocks = <&rpmhcc RPMH_CXO_CLK>;
+			clock-names = "xo";
+
+			power-domains = <&aoss_qmp AOSS_QMP_LS_LPASS>,
+					<&rpmhpd 7>;
+			power-domain-names = "load_state", "cx";
+
+			memory-region = <&adsp_mem>;
+
+			qcom,smem-states = <&adsp_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			status = "disabled";
+
+			glink-edge {
+				interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+				label = "lpass";
+				qcom,remote-pid = <2>;
+				mboxes = <&apss_shared 8>;
+			};
+		};
+
 		intc: interrupt-controller@17a00000 {
 			compatible = "arm,gic-v3";
 			interrupt-controller;
@@ -387,6 +699,12 @@ apss_shared: mailbox@17c00000 {
 			#mbox-cells = <1>;
 		};
 
+		watchdog@17c10000 {
+			compatible = "qcom,apss-wdt-sm8150", "qcom,kpss-wdt";
+			reg = <0 0x17c10000 0 0x1000>;
+			clocks = <&sleep_clk>;
+		};
+
 		timer@17c20000 {
 			#address-cells = <2>;
 			#size-cells = <2>;
@@ -469,6 +787,73 @@ rpmhcc: clock-controller {
 				clock-names = "xo";
 				clocks = <&xo_board>;
 			};
+
+			rpmhpd: power-controller {
+				compatible = "qcom,sm8150-rpmhpd";
+				#power-domain-cells = <1>;
+				operating-points-v2 = <&rpmhpd_opp_table>;
+
+				rpmhpd_opp_table: opp-table {
+					compatible = "operating-points-v2";
+
+					rpmhpd_opp_ret: opp1 {
+						opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+					};
+
+					rpmhpd_opp_min_svs: opp2 {
+						opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+					};
+
+					rpmhpd_opp_low_svs: opp3 {
+						opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+					};
+
+					rpmhpd_opp_svs: opp4 {
+						opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+					};
+
+					rpmhpd_opp_svs_l1: opp5 {
+						opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+					};
+
+					rpmhpd_opp_svs_l2: opp6 {
+						opp-level = <224>;
+					};
+
+					rpmhpd_opp_nom: opp7 {
+						opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+					};
+
+					rpmhpd_opp_nom_l1: opp8 {
+						opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+					};
+
+					rpmhpd_opp_nom_l2: opp9 {
+						opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+					};
+
+					rpmhpd_opp_turbo: opp10 {
+						opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+					};
+
+					rpmhpd_opp_turbo_l1: opp11 {
+						opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+					};
+				};
+			};
+		};
+
+		cpufreq_hw: cpufreq@18323000 {
+			compatible = "qcom,cpufreq-hw";
+			reg = <0 0x18323000 0 0x1400>, <0 0x18325800 0 0x1400>,
+			      <0 0x18327800 0 0x1400>;
+			reg-names = "freq-domain0", "freq-domain1",
+				    "freq-domain2";
+
+			clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+			clock-names = "xo", "alternate";
+
+			#freq-domain-cells = <1>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 8fdbd22..2153842 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -3,22 +3,21 @@
 dtb-$(CONFIG_ARCH_R8A774A1) += r8a774a1-hihope-rzg2m-ex.dtb
 dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n.dtb
 dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n-ex.dtb
-dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-cat874.dtb r8a774c0-ek874.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb r8a7795-h3ulcb.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-h3ulcb-kf.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-xs.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-es1-salvator-x.dtb r8a7795-es1-h3ulcb.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-es1-h3ulcb-kf.dtb
-dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb r8a7796-m3ulcb.dtb
-dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-m3ulcb-kf.dtb
-dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-xs.dtb
-dtb-$(CONFIG_ARCH_R8A77960) += r8a7796-salvator-x.dtb r8a7796-m3ulcb.dtb
-dtb-$(CONFIG_ARCH_R8A77960) += r8a7796-m3ulcb-kf.dtb
-dtb-$(CONFIG_ARCH_R8A77960) += r8a7796-salvator-xs.dtb
+dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-cat874.dtb r8a774c0-ek874.dtb \
+			       r8a774c0-ek874-idk-2121wr.dtb
+dtb-$(CONFIG_ARCH_R8A7795) += r8a77950-salvator-x.dtb
+dtb-$(CONFIG_ARCH_R8A7795) += r8a77950-ulcb.dtb r8a77950-ulcb-kf.dtb
+dtb-$(CONFIG_ARCH_R8A7795) += r8a77951-salvator-x.dtb r8a77951-salvator-xs.dtb
+dtb-$(CONFIG_ARCH_R8A7795) += r8a77951-ulcb.dtb r8a77951-ulcb-kf.dtb
+dtb-$(CONFIG_ARCH_R8A77950) += r8a77950-salvator-x.dtb
+dtb-$(CONFIG_ARCH_R8A77950) += r8a77950-ulcb.dtb r8a77950-ulcb-kf.dtb
+dtb-$(CONFIG_ARCH_R8A77951) += r8a77951-salvator-x.dtb r8a77951-salvator-xs.dtb
+dtb-$(CONFIG_ARCH_R8A77951) += r8a77951-ulcb.dtb r8a77951-ulcb-kf.dtb
+dtb-$(CONFIG_ARCH_R8A77960) += r8a77960-salvator-x.dtb r8a77960-salvator-xs.dtb
+dtb-$(CONFIG_ARCH_R8A77960) += r8a77960-ulcb.dtb r8a77960-ulcb-kf.dtb
 dtb-$(CONFIG_ARCH_R8A77961) += r8a77961-salvator-xs.dtb
 dtb-$(CONFIG_ARCH_R8A77965) += r8a77965-salvator-x.dtb r8a77965-salvator-xs.dtb
-dtb-$(CONFIG_ARCH_R8A77965) += r8a77965-m3nulcb.dtb
-dtb-$(CONFIG_ARCH_R8A77965) += r8a77965-m3nulcb-kf.dtb
+dtb-$(CONFIG_ARCH_R8A77965) += r8a77965-ulcb.dtb r8a77965-ulcb-kf.dtb
 dtb-$(CONFIG_ARCH_R8A77970) += r8a77970-eagle.dtb r8a77970-v3msk.dtb
 dtb-$(CONFIG_ARCH_R8A77980) += r8a77980-condor.dtb r8a77980-v3hsk.dtb
 dtb-$(CONFIG_ARCH_R8A77990) += r8a77990-ebisu.dtb
diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
index 2c942a7..bd05690 100644
--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
@@ -109,8 +109,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	wlan_en_reg: regulator-wlan_en {
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index 34a9f47..8f950da 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -521,12 +521,12 @@ intc_ex: interrupt-controller@e61c0000 {
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
 			power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
 			resets = <&cpg 407>;
@@ -837,8 +837,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a774a1-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
@@ -851,8 +851,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a774a1-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
@@ -878,23 +878,23 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a774a1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x10000>;
-			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -920,23 +920,23 @@ dmac1: dma-controller@e7300000 {
 			compatible = "renesas,dmac-r8a774a1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -962,23 +962,23 @@ dmac2: dma-controller@e7310000 {
 			compatible = "renesas,dmac-r8a774a1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -2075,23 +2075,23 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a774a1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -2117,23 +2117,23 @@ audma1: dma-controller@ec720000 {
 			compatible = "renesas,dmac-r8a774a1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -2323,10 +2323,10 @@ pciec0: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
 			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
@@ -2350,10 +2350,10 @@ pciec1: pcie@ee800000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000
-				0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000
-				0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000
-				0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000>,
+				 <0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000>,
+				 <0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000>,
+				 <0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
 			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
 			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index fe78387..c40ea30 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -395,12 +395,12 @@ intc_ex: interrupt-controller@e61c0000 {
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
 			power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
 			resets = <&cpg 407>;
@@ -711,8 +711,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a774b1-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
@@ -725,8 +725,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a774b1-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
@@ -752,23 +752,23 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a774b1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x10000>;
-			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -794,23 +794,23 @@ dmac1: dma-controller@e7300000 {
 			compatible = "renesas,dmac-r8a774b1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -836,23 +836,23 @@ dmac2: dma-controller@e7310000 {
 			compatible = "renesas,dmac-r8a774b1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -1949,23 +1949,23 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a774b1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -1983,23 +1983,23 @@ audma1: dma-controller@ec720000 {
 			compatible = "renesas,dmac-r8a774b1",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -2192,10 +2192,10 @@ pciec0: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				  0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				  0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				  0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
 			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
@@ -2219,10 +2219,10 @@ pciec1: pcie@ee800000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000
-				  0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000
-				  0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000
-				  0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000>,
+				 <0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000>,
+				 <0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000>,
+				 <0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
 			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
 			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
index c99b1dec..26aee00 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
@@ -110,8 +110,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	wlan_en_reg: fixedregulator {
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-idk-2121wr.dts b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-idk-2121wr.dts
new file mode 100644
index 0000000..a7b27d0
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-idk-2121wr.dts
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Silicon Linux RZ/G2E evaluation kit (EK874),
+ * connected to an Advantech IDK-2121WR 21.5" LVDS panel
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+#include "r8a774c0-ek874.dts"
+
+/ {
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm5 0 50000>;
+
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <6>;
+
+		power-supply = <&reg_12p0v>;
+		enable-gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
+	};
+
+	panel-lvds {
+		compatible = "advantech,idk-2121wr", "panel-lvds";
+
+		width-mm = <476>;
+		height-mm = <268>;
+
+		data-mapping = "vesa-24";
+
+		panel-timing {
+			clock-frequency = <148500000>;
+			hactive = <1920>;
+			vactive = <1080>;
+			hsync-len = <44>;
+			hfront-porch = <88>;
+			hback-porch = <148>;
+			vfront-porch = <4>;
+			vback-porch = <36>;
+			vsync-len = <5>;
+		};
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				dual-lvds-odd-pixels;
+				panel_in0: endpoint {
+					remote-endpoint = <&lvds0_out>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				dual-lvds-even-pixels;
+				panel_in1: endpoint {
+					remote-endpoint = <&lvds1_out>;
+				};
+			};
+		};
+	};
+};
+
+&gpio0 {
+	/*
+	 * When GP0_17 is low LVDS[01] are connected to the LVDS connector
+	 * When GP0_17 is high LVDS[01] are connected to the LT8918L
+	 */
+	lvds-connector-en-gpio{
+		gpio-hog;
+		gpios = <17 GPIO_ACTIVE_HIGH>;
+		output-low;
+		line-name = "lvds-connector-en-gpio";
+	};
+};
+
+&lvds0 {
+	ports {
+		port@1 {
+			lvds0_out: endpoint {
+				remote-endpoint = <&panel_in0>;
+			};
+		};
+	};
+};
+
+&lvds1 {
+	status = "okay";
+
+	clocks = <&cpg CPG_MOD 727>, <&x13_clk>, <&extal_clk>;
+	clock-names = "fck", "dclkin.0", "extal";
+
+	ports {
+		port@1 {
+			lvds1_out: endpoint {
+				remote-endpoint = <&panel_in1>;
+			};
+		};
+	};
+};
+
+&pfc {
+	pwm5_pins: pwm5 {
+		groups = "pwm5_a";
+		function = "pwm5";
+	};
+};
+
+&pwm5 {
+	pinctrl-0 = <&pwm5_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index c7bdc36..a53cd5f 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -369,12 +369,12 @@ intc_ex: interrupt-controller@e61c0000 {
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
 			power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
 			resets = <&cpg 407>;
@@ -697,8 +697,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a774c0-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
@@ -711,8 +711,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a774c0-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
@@ -725,23 +725,23 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a774c0",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x10000>;
-			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -767,23 +767,23 @@ dmac1: dma-controller@e7300000 {
 			compatible = "renesas,dmac-r8a774c0",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -809,23 +809,23 @@ dmac2: dma-controller@e7310000 {
 			compatible = "renesas,dmac-r8a774c0",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -1521,23 +1521,23 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a774c0",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -1679,10 +1679,10 @@ pciec0: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				  0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				  0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				  0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
 			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb-kf.dts b/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb-kf.dts
deleted file mode 100644
index 2f24dfc..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb-kf.dts
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the H3ULCB Kingfisher board
- *
- * Copyright (C) 2017 Renesas Electronics Corp.
- * Copyright (C) 2017 Cogent Embedded, Inc.
- */
-
-#include "r8a7795-es1-h3ulcb.dts"
-#include "ulcb-kf.dtsi"
-
-/ {
-	model = "Renesas H3ULCB Kingfisher board based on r8a7795 ES1.x";
-	compatible = "shimafuji,kingfisher", "renesas,h3ulcb",
-		     "renesas,r8a7795";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts
deleted file mode 100644
index 598b981..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the H3ULCB (R-Car Starter Kit Premier) board
- *
- * Copyright (C) 2016 Renesas Electronics Corp.
- * Copyright (C) 2016 Cogent Embedded, Inc.
- */
-
-/dts-v1/;
-#include "r8a7795-es1.dtsi"
-#include "ulcb.dtsi"
-
-/ {
-	model = "Renesas H3ULCB board based on r8a7795 ES1.x";
-	compatible = "renesas,h3ulcb", "renesas,r8a7795";
-
-	memory@48000000 {
-		device_type = "memory";
-		/* first 128MB is reserved for secure area. */
-		reg = <0x0 0x48000000 0x0 0x38000000>;
-	};
-
-	memory@500000000 {
-		device_type = "memory";
-		reg = <0x5 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@600000000 {
-		device_type = "memory";
-		reg = <0x6 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@700000000 {
-		device_type = "memory";
-		reg = <0x7 0x00000000 0x0 0x40000000>;
-	};
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts
deleted file mode 100644
index c729686..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the Salvator-X board with R-Car H3 ES1.x
- *
- * Copyright (C) 2015 Renesas Electronics Corp.
- */
-
-/dts-v1/;
-#include "r8a7795-es1.dtsi"
-#include "salvator-x.dtsi"
-
-/ {
-	model = "Renesas Salvator-X board based on r8a7795 ES1.x";
-	compatible = "renesas,salvator-x", "renesas,r8a7795";
-
-	memory@48000000 {
-		device_type = "memory";
-		/* first 128MB is reserved for secure area. */
-		reg = <0x0 0x48000000 0x0 0x38000000>;
-	};
-
-	memory@500000000 {
-		device_type = "memory";
-		reg = <0x5 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@600000000 {
-		device_type = "memory";
-		reg = <0x6 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@700000000 {
-		device_type = "memory";
-		reg = <0x7 0x00000000 0x0 0x40000000>;
-	};
-};
-
-&du {
-	clocks = <&cpg CPG_MOD 724>,
-		 <&cpg CPG_MOD 723>,
-		 <&cpg CPG_MOD 722>,
-		 <&cpg CPG_MOD 721>,
-		 <&versaclock5 1>,
-		 <&x21_clk>,
-		 <&x22_clk>,
-		 <&versaclock5 2>;
-	clock-names = "du.0", "du.1", "du.2", "du.3",
-		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
-};
-
-&ehci2 {
-	status = "okay";
-};
-
-&hdmi0 {
-	status = "okay";
-
-	ports {
-		port@1 {
-			reg = <1>;
-			rcar_dw_hdmi0_out: endpoint {
-				remote-endpoint = <&hdmi0_con>;
-			};
-		};
-		port@2 {
-			reg = <2>;
-			dw_hdmi0_snd_in: endpoint {
-				remote-endpoint = <&rsnd_endpoint1>;
-			};
-		};
-	};
-};
-
-&hdmi0_con {
-	remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&hdmi1 {
-	status = "okay";
-
-	ports {
-		port@1 {
-			reg = <1>;
-			rcar_dw_hdmi1_out: endpoint {
-				remote-endpoint = <&hdmi1_con>;
-			};
-		};
-		port@2 {
-			reg = <2>;
-			dw_hdmi1_snd_in: endpoint {
-				remote-endpoint = <&rsnd_endpoint2>;
-			};
-		};
-	};
-};
-
-&hdmi1_con {
-	remote-endpoint = <&rcar_dw_hdmi1_out>;
-};
-
-&ohci2 {
-	status = "okay";
-};
-
-&pfc {
-	usb2_pins: usb2 {
-		groups = "usb2";
-		function = "usb2";
-	};
-};
-
-&rcar_sound {
-	ports {
-		/* rsnd_port0 is on salvator-common */
-		rsnd_port1: port@1 {
-			reg = <1>;
-			rsnd_endpoint1: endpoint {
-				remote-endpoint = <&dw_hdmi0_snd_in>;
-
-				dai-format = "i2s";
-				bitclock-master = <&rsnd_endpoint1>;
-				frame-master = <&rsnd_endpoint1>;
-
-				playback = <&ssi2>;
-			};
-		};
-		rsnd_port2: port@2 {
-			reg = <2>;
-			rsnd_endpoint2: endpoint {
-				remote-endpoint = <&dw_hdmi1_snd_in>;
-
-				dai-format = "i2s";
-				bitclock-master = <&rsnd_endpoint2>;
-				frame-master = <&rsnd_endpoint2>;
-
-				playback = <&ssi3>;
-			};
-		};
-	};
-};
-
-&sata {
-	status = "okay";
-};
-
-&sound_card {
-	dais = <&rsnd_port0	/* ak4613 */
-		&rsnd_port1	/* HDMI0  */
-		&rsnd_port2>;	/* HDMI1  */
-};
-
-&usb2_phy2 {
-	pinctrl-0 = <&usb2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi b/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi
deleted file mode 100644
index 14d8513..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi
+++ /dev/null
@@ -1,319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the R-Car H3 (R8A77950) ES1.x SoC
- *
- * Copyright (C) 2015 Renesas Electronics Corp.
- */
-
-#include "r8a7795.dtsi"
-
-&audma0 {
-	iommus = <&ipmmu_mp1 0>, <&ipmmu_mp1 1>,
-	       <&ipmmu_mp1 2>, <&ipmmu_mp1 3>,
-	       <&ipmmu_mp1 4>, <&ipmmu_mp1 5>,
-	       <&ipmmu_mp1 6>, <&ipmmu_mp1 7>,
-	       <&ipmmu_mp1 8>, <&ipmmu_mp1 9>,
-	       <&ipmmu_mp1 10>, <&ipmmu_mp1 11>,
-	       <&ipmmu_mp1 12>, <&ipmmu_mp1 13>,
-	       <&ipmmu_mp1 14>, <&ipmmu_mp1 15>;
-};
-
-&audma1 {
-	iommus = <&ipmmu_mp1 16>, <&ipmmu_mp1 17>,
-	       <&ipmmu_mp1 18>, <&ipmmu_mp1 19>,
-	       <&ipmmu_mp1 20>, <&ipmmu_mp1 21>,
-	       <&ipmmu_mp1 22>, <&ipmmu_mp1 23>,
-	       <&ipmmu_mp1 24>, <&ipmmu_mp1 25>,
-	       <&ipmmu_mp1 26>, <&ipmmu_mp1 27>,
-	       <&ipmmu_mp1 28>, <&ipmmu_mp1 29>,
-	       <&ipmmu_mp1 30>, <&ipmmu_mp1 31>;
-};
-
-&du {
-	vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd3 0>;
-};
-
-&fcpvb1 {
-	iommus = <&ipmmu_vp0 7>;
-};
-
-&fcpf1 {
-	iommus = <&ipmmu_vp0 1>;
-};
-
-&fcpvi1 {
-	iommus = <&ipmmu_vp0 9>;
-};
-
-&fcpvd2 {
-	iommus = <&ipmmu_vi0 10>;
-};
-
-&gpio1 {
-	gpio-ranges = <&pfc 0 32 28>;
-};
-
-&ipmmu_vi0 {
-	renesas,ipmmu-main = <&ipmmu_mm 11>;
-};
-
-&ipmmu_vp0 {
-	renesas,ipmmu-main = <&ipmmu_mm 12>;
-};
-
-&ipmmu_vc0 {
-	renesas,ipmmu-main = <&ipmmu_mm 9>;
-};
-
-&ipmmu_vc1 {
-	renesas,ipmmu-main = <&ipmmu_mm 10>;
-};
-
-&ipmmu_rt {
-	renesas,ipmmu-main = <&ipmmu_mm 7>;
-};
-
-&soc {
-	/delete-node/ dma-controller@e6460000;
-	/delete-node/ dma-controller@e6470000;
-
-	ipmmu_mp1: mmu@ec680000 {
-		compatible = "renesas,ipmmu-r8a7795";
-		reg = <0 0xec680000 0 0x1000>;
-		renesas,ipmmu-main = <&ipmmu_mm 5>;
-		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-		#iommu-cells = <1>;
-	};
-
-	ipmmu_sy: mmu@e7730000 {
-		compatible = "renesas,ipmmu-r8a7795";
-		reg = <0 0xe7730000 0 0x1000>;
-		renesas,ipmmu-main = <&ipmmu_mm 8>;
-		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-		#iommu-cells = <1>;
-	};
-
-	/delete-node/ mmu@fd950000;
-	/delete-node/ mmu@fd960000;
-	/delete-node/ mmu@fd970000;
-	/delete-node/ mmu@febe0000;
-	/delete-node/ mmu@fe980000;
-
-	xhci1: usb@ee040000 {
-		compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
-		reg = <0 0xee040000 0 0xc00>;
-		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&cpg CPG_MOD 327>;
-		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-		resets = <&cpg 327>;
-		status = "disabled";
-	};
-
-	/delete-node/ usb@e659c000;
-	/delete-node/ usb@ee0e0000;
-	/delete-node/ usb@ee0e0100;
-
-	/delete-node/ usb-phy@ee0e0200;
-
-	fdp1@fe948000 {
-		compatible = "renesas,fdp1";
-		reg = <0 0xfe948000 0 0x2400>;
-		interrupts = <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&cpg CPG_MOD 117>;
-		power-domains = <&sysc R8A7795_PD_A3VP>;
-		resets = <&cpg 117>;
-		renesas,fcp = <&fcpf2>;
-	};
-
-	fcpf2: fcp@fe952000 {
-		compatible = "renesas,fcpf";
-		reg = <0 0xfe952000 0 0x200>;
-		clocks = <&cpg CPG_MOD 613>;
-		power-domains = <&sysc R8A7795_PD_A3VP>;
-		resets = <&cpg 613>;
-		iommus = <&ipmmu_vp0 2>;
-	};
-
-	fcpvd3: fcp@fea3f000 {
-		compatible = "renesas,fcpv";
-		reg = <0 0xfea3f000 0 0x200>;
-		clocks = <&cpg CPG_MOD 600>;
-		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-		resets = <&cpg 600>;
-		iommus = <&ipmmu_vi0 11>;
-	};
-
-	fcpvi2: fcp@fe9cf000 {
-		compatible = "renesas,fcpv";
-		reg = <0 0xfe9cf000 0 0x200>;
-		clocks = <&cpg CPG_MOD 609>;
-		power-domains = <&sysc R8A7795_PD_A3VP>;
-		resets = <&cpg 609>;
-		iommus = <&ipmmu_vp0 10>;
-	};
-
-	vspd3: vsp@fea38000 {
-		compatible = "renesas,vsp2";
-		reg = <0 0xfea38000 0 0x5000>;
-		interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&cpg CPG_MOD 620>;
-		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-		resets = <&cpg 620>;
-
-		renesas,fcp = <&fcpvd3>;
-	};
-
-	vspi2: vsp@fe9c0000 {
-		compatible = "renesas,vsp2";
-		reg = <0 0xfe9c0000 0 0x8000>;
-		interrupts = <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&cpg CPG_MOD 629>;
-		power-domains = <&sysc R8A7795_PD_A3VP>;
-		resets = <&cpg 629>;
-
-		renesas,fcp = <&fcpvi2>;
-	};
-
-	csi21: csi2@fea90000 {
-		compatible = "renesas,r8a7795-csi2";
-		reg = <0 0xfea90000 0 0x10000>;
-		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&cpg CPG_MOD 713>;
-		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-		resets = <&cpg 713>;
-		status = "disabled";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@1 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				reg = <1>;
-
-				csi21vin0: endpoint@0 {
-					reg = <0>;
-					remote-endpoint = <&vin0csi21>;
-				};
-				csi21vin1: endpoint@1 {
-					reg = <1>;
-					remote-endpoint = <&vin1csi21>;
-				};
-				csi21vin2: endpoint@2 {
-					reg = <2>;
-					remote-endpoint = <&vin2csi21>;
-				};
-				csi21vin3: endpoint@3 {
-					reg = <3>;
-					remote-endpoint = <&vin3csi21>;
-				};
-				csi21vin4: endpoint@4 {
-					reg = <4>;
-					remote-endpoint = <&vin4csi21>;
-				};
-				csi21vin5: endpoint@5 {
-					reg = <5>;
-					remote-endpoint = <&vin5csi21>;
-				};
-				csi21vin6: endpoint@6 {
-					reg = <6>;
-					remote-endpoint = <&vin6csi21>;
-				};
-				csi21vin7: endpoint@7 {
-					reg = <7>;
-					remote-endpoint = <&vin7csi21>;
-				};
-			};
-		};
-	};
-};
-
-&vin0 {
-	ports {
-		port@1 {
-			vin0csi21: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&csi21vin0>;
-			};
-		};
-	};
-};
-
-&vin1 {
-	ports {
-		port@1 {
-			vin1csi21: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&csi21vin1>;
-			};
-		};
-	};
-};
-
-&vin2 {
-	ports {
-		port@1 {
-			vin2csi21: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&csi21vin2>;
-			};
-		};
-	};
-};
-
-&vin3 {
-	ports {
-		port@1 {
-			vin3csi21: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&csi21vin3>;
-			};
-		};
-	};
-};
-
-&vin4 {
-	ports {
-		port@1 {
-			vin4csi21: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&csi21vin4>;
-			};
-		};
-	};
-};
-
-&vin5 {
-	ports {
-		port@1 {
-			vin5csi21: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&csi21vin5>;
-			};
-		};
-	};
-};
-
-&vin6 {
-	ports {
-		port@1 {
-			vin6csi21: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&csi21vin6>;
-			};
-		};
-	};
-};
-
-&vin7 {
-	ports {
-		port@1 {
-			vin7csi21: endpoint@1 {
-				reg = <1>;
-				remote-endpoint = <&csi21vin7>;
-			};
-		};
-	};
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb-kf.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb-kf.dts
deleted file mode 100644
index 80791ed..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb-kf.dts
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the H3ULCB Kingfisher board
- *
- * Copyright (C) 2017 Renesas Electronics Corp.
- * Copyright (C) 2017 Cogent Embedded, Inc.
- */
-
-#include "r8a7795-h3ulcb.dts"
-#include "ulcb-kf.dtsi"
-
-/ {
-	model = "Renesas H3ULCB Kingfisher board based on r8a7795 ES2.0+";
-	compatible = "shimafuji,kingfisher", "renesas,h3ulcb",
-		     "renesas,r8a7795";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
deleted file mode 100644
index 54515ea..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the H3ULCB (R-Car Starter Kit Premier) board
- *
- * Copyright (C) 2016 Renesas Electronics Corp.
- * Copyright (C) 2016 Cogent Embedded, Inc.
- */
-
-/dts-v1/;
-#include "r8a7795.dtsi"
-#include "ulcb.dtsi"
-
-/ {
-	model = "Renesas H3ULCB board based on r8a7795 ES2.0+";
-	compatible = "renesas,h3ulcb", "renesas,r8a7795";
-
-	memory@48000000 {
-		device_type = "memory";
-		/* first 128MB is reserved for secure area. */
-		reg = <0x0 0x48000000 0x0 0x38000000>;
-	};
-
-	memory@500000000 {
-		device_type = "memory";
-		reg = <0x5 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@600000000 {
-		device_type = "memory";
-		reg = <0x6 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@700000000 {
-		device_type = "memory";
-		reg = <0x7 0x00000000 0x0 0x40000000>;
-	};
-};
-
-&du {
-	clocks = <&cpg CPG_MOD 724>,
-		 <&cpg CPG_MOD 723>,
-		 <&cpg CPG_MOD 722>,
-		 <&cpg CPG_MOD 721>,
-		 <&versaclock5 1>,
-		 <&versaclock5 3>,
-		 <&versaclock5 4>,
-		 <&versaclock5 2>;
-	clock-names = "du.0", "du.1", "du.2", "du.3",
-		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
deleted file mode 100644
index 72874f6..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the Salvator-X board with R-Car H3 ES2.0
- *
- * Copyright (C) 2015 Renesas Electronics Corp.
- */
-
-/dts-v1/;
-#include "r8a7795.dtsi"
-#include "salvator-x.dtsi"
-
-/ {
-	model = "Renesas Salvator-X board based on r8a7795 ES2.0+";
-	compatible = "renesas,salvator-x", "renesas,r8a7795";
-
-	memory@48000000 {
-		device_type = "memory";
-		/* first 128MB is reserved for secure area. */
-		reg = <0x0 0x48000000 0x0 0x38000000>;
-	};
-
-	memory@500000000 {
-		device_type = "memory";
-		reg = <0x5 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@600000000 {
-		device_type = "memory";
-		reg = <0x6 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@700000000 {
-		device_type = "memory";
-		reg = <0x7 0x00000000 0x0 0x40000000>;
-	};
-};
-
-&du {
-	clocks = <&cpg CPG_MOD 724>,
-		 <&cpg CPG_MOD 723>,
-		 <&cpg CPG_MOD 722>,
-		 <&cpg CPG_MOD 721>,
-		 <&versaclock5 1>,
-		 <&x21_clk>,
-		 <&x22_clk>,
-		 <&versaclock5 2>;
-	clock-names = "du.0", "du.1", "du.2", "du.3",
-		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
-};
-
-&ehci2 {
-	status = "okay";
-};
-
-&hdmi0 {
-	status = "okay";
-
-	ports {
-		port@1 {
-			reg = <1>;
-			rcar_dw_hdmi0_out: endpoint {
-				remote-endpoint = <&hdmi0_con>;
-			};
-		};
-		port@2 {
-			reg = <2>;
-			dw_hdmi0_snd_in: endpoint {
-				remote-endpoint = <&rsnd_endpoint1>;
-			};
-		};
-	};
-};
-
-&hdmi0_con {
-	remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&hdmi1 {
-	status = "okay";
-
-	ports {
-		port@1 {
-			reg = <1>;
-			rcar_dw_hdmi1_out: endpoint {
-				remote-endpoint = <&hdmi1_con>;
-			};
-		};
-		port@2 {
-			reg = <2>;
-			dw_hdmi1_snd_in: endpoint {
-				remote-endpoint = <&rsnd_endpoint2>;
-			};
-		};
-	};
-};
-
-&hdmi1_con {
-	remote-endpoint = <&rcar_dw_hdmi1_out>;
-};
-
-&ohci2 {
-	status = "okay";
-};
-
-&pfc {
-	usb2_pins: usb2 {
-		groups = "usb2";
-		function = "usb2";
-	};
-};
-
-&rcar_sound {
-	ports {
-		/* rsnd_port0 is on salvator-common */
-		rsnd_port1: port@1 {
-			reg = <1>;
-			rsnd_endpoint1: endpoint {
-				remote-endpoint = <&dw_hdmi0_snd_in>;
-
-				dai-format = "i2s";
-				bitclock-master = <&rsnd_endpoint1>;
-				frame-master = <&rsnd_endpoint1>;
-
-				playback = <&ssi2>;
-			};
-		};
-		rsnd_port2: port@2 {
-			reg = <2>;
-			rsnd_endpoint2: endpoint {
-				remote-endpoint = <&dw_hdmi1_snd_in>;
-
-				dai-format = "i2s";
-				bitclock-master = <&rsnd_endpoint2>;
-				frame-master = <&rsnd_endpoint2>;
-
-				playback = <&ssi3>;
-			};
-		};
-	};
-};
-
-&sata {
-	status = "okay";
-};
-
-&sound_card {
-	dais = <&rsnd_port0	/* ak4613 */
-		&rsnd_port1	/* HDMI0  */
-		&rsnd_port2>;	/* HDMI1  */
-};
-
-&usb2_phy2 {
-	pinctrl-0 = <&usb2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts
deleted file mode 100644
index 36667c8..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the Salvator-X 2nd version board with R-Car H3 ES2.0
- *
- * Copyright (C) 2015-2017 Renesas Electronics Corp.
- */
-
-/dts-v1/;
-#include "r8a7795.dtsi"
-#include "salvator-xs.dtsi"
-
-/ {
-	model = "Renesas Salvator-X 2nd version board based on r8a7795 ES2.0+";
-	compatible = "renesas,salvator-xs", "renesas,r8a7795";
-
-	memory@48000000 {
-		device_type = "memory";
-		/* first 128MB is reserved for secure area. */
-		reg = <0x0 0x48000000 0x0 0x38000000>;
-	};
-
-	memory@500000000 {
-		device_type = "memory";
-		reg = <0x5 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@600000000 {
-		device_type = "memory";
-		reg = <0x6 0x00000000 0x0 0x40000000>;
-	};
-
-	memory@700000000 {
-		device_type = "memory";
-		reg = <0x7 0x00000000 0x0 0x40000000>;
-	};
-};
-
-&du {
-	clocks = <&cpg CPG_MOD 724>,
-		 <&cpg CPG_MOD 723>,
-		 <&cpg CPG_MOD 722>,
-		 <&cpg CPG_MOD 721>,
-		 <&versaclock6 1>,
-		 <&x21_clk>,
-		 <&x22_clk>,
-		 <&versaclock6 2>;
-	clock-names = "du.0", "du.1", "du.2", "du.3",
-		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
-};
-
-&ehci2 {
-	status = "okay";
-};
-
-&ehci3 {
-	dr_mode = "otg";
-	status = "okay";
-};
-
-&hdmi0 {
-	status = "okay";
-
-	ports {
-		port@1 {
-			reg = <1>;
-			rcar_dw_hdmi0_out: endpoint {
-				remote-endpoint = <&hdmi0_con>;
-			};
-		};
-		port@2 {
-			reg = <2>;
-			dw_hdmi0_snd_in: endpoint {
-				remote-endpoint = <&rsnd_endpoint1>;
-			};
-		};
-	};
-};
-
-&hdmi0_con {
-	remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&hdmi1 {
-	status = "okay";
-
-	ports {
-		port@1 {
-			reg = <1>;
-			rcar_dw_hdmi1_out: endpoint {
-				remote-endpoint = <&hdmi1_con>;
-			};
-		};
-		port@2 {
-			reg = <2>;
-			dw_hdmi1_snd_in: endpoint {
-				remote-endpoint = <&rsnd_endpoint2>;
-			};
-		};
-	};
-};
-
-&hdmi1_con {
-	remote-endpoint = <&rcar_dw_hdmi1_out>;
-};
-
-&hsusb3 {
-	dr_mode = "otg";
-	status = "okay";
-};
-
-&ohci2 {
-	status = "okay";
-};
-
-&ohci3 {
-	dr_mode = "otg";
-	status = "okay";
-};
-
-&pca9654 {
-	pcie_sata_switch {
-		gpio-hog;
-		gpios = <7 GPIO_ACTIVE_HIGH>;
-		output-low; /* enable SATA by default */
-		line-name = "PCIE/SATA switch";
-	};
-};
-
-&pfc {
-	usb2_pins: usb2 {
-		groups = "usb2";
-		function = "usb2";
-	};
-
-	/*
-	 * - On Salvator-X[S], GP6_3[01] are connected to ADV7482 as irq pins
-	 *   (when SW31 is the default setting on Salvator-XS).
-	 * - If SW31 is the default setting, you cannot use USB2.0 ch3 on
-	 *   r8a7795 with Salvator-XS.
-	 *   Hence the SW31 setting must be changed like 2) below.
-	 *   1) Default setting of SW31: ON-ON-OFF-OFF-OFF-OFF:
-	 *	- Connect GP6_3[01] to ADV7842.
-	 *   2) Changed setting of SW31: OFF-OFF-ON-ON-ON-ON:
-	 *	- Connect GP6_3[01] to BD082065 (USB2.0 ch3's host power).
-	 *	- Connect GP6_{04,21} to ADV7842.
-	 */
-	usb2_ch3_pins: usb2_ch3 {
-		groups = "usb2_ch3";
-		function = "usb2_ch3";
-	};
-};
-
-&rcar_sound {
-	ports {
-		/* rsnd_port0 is on salvator-common */
-		rsnd_port1: port@1 {
-			reg = <1>;
-			rsnd_endpoint1: endpoint {
-				remote-endpoint = <&dw_hdmi0_snd_in>;
-
-				dai-format = "i2s";
-				bitclock-master = <&rsnd_endpoint1>;
-				frame-master = <&rsnd_endpoint1>;
-
-				playback = <&ssi2>;
-			};
-		};
-		rsnd_port2: port@2 {
-			reg = <2>;
-			rsnd_endpoint2: endpoint {
-				remote-endpoint = <&dw_hdmi1_snd_in>;
-
-				dai-format = "i2s";
-				bitclock-master = <&rsnd_endpoint2>;
-				frame-master = <&rsnd_endpoint2>;
-
-				playback = <&ssi3>;
-			};
-		};
-	};
-};
-
-/* SW12-7 must be set 'Off' (MD12 set to 1) which is not the default! */
-&sata {
-	status = "okay";
-};
-
-&sound_card {
-	dais = <&rsnd_port0	/* ak4613 */
-		&rsnd_port1	/* HDMI0  */
-		&rsnd_port2>;	/* HDMI1  */
-};
-
-&usb2_phy2 {
-	pinctrl-0 = <&usb2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
-
-&usb2_phy3 {
-	pinctrl-0 = <&usb2_ch3_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
deleted file mode 100644
index fde6ec1..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ /dev/null
@@ -1,3339 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the R-Car H3 (R8A77950) SoC
- *
- * Copyright (C) 2015 Renesas Electronics Corp.
- */
-
-#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/power/r8a7795-sysc.h>
-
-#define CPG_AUDIO_CLK_I		R8A7795_CLK_S0D4
-
-/ {
-	compatible = "renesas,r8a7795";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	aliases {
-		i2c0 = &i2c0;
-		i2c1 = &i2c1;
-		i2c2 = &i2c2;
-		i2c3 = &i2c3;
-		i2c4 = &i2c4;
-		i2c5 = &i2c5;
-		i2c6 = &i2c6;
-		i2c7 = &i2c_dvfs;
-	};
-
-	/*
-	 * The external audio clocks are configured as 0 Hz fixed frequency
-	 * clocks by default.
-	 * Boards that provide audio clocks should override them.
-	 */
-	audio_clk_a: audio_clk_a {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	audio_clk_b: audio_clk_b {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	audio_clk_c: audio_clk_c {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	/* External CAN clock - to be overridden by boards that provide it */
-	can_clk: can {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	cluster0_opp: opp_table0 {
-		compatible = "operating-points-v2";
-		opp-shared;
-
-		opp-500000000 {
-			opp-hz = /bits/ 64 <500000000>;
-			opp-microvolt = <830000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1000000000 {
-			opp-hz = /bits/ 64 <1000000000>;
-			opp-microvolt = <830000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1500000000 {
-			opp-hz = /bits/ 64 <1500000000>;
-			opp-microvolt = <830000>;
-			clock-latency-ns = <300000>;
-			opp-suspend;
-		};
-		opp-1600000000 {
-			opp-hz = /bits/ 64 <1600000000>;
-			opp-microvolt = <900000>;
-			clock-latency-ns = <300000>;
-			turbo-mode;
-		};
-		opp-1700000000 {
-			opp-hz = /bits/ 64 <1700000000>;
-			opp-microvolt = <960000>;
-			clock-latency-ns = <300000>;
-			turbo-mode;
-		};
-	};
-
-	cluster1_opp: opp_table1 {
-		compatible = "operating-points-v2";
-		opp-shared;
-
-		opp-800000000 {
-			opp-hz = /bits/ 64 <800000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1000000000 {
-			opp-hz = /bits/ 64 <1000000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1200000000 {
-			opp-hz = /bits/ 64 <1200000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu-map {
-			cluster0 {
-				core0 {
-					cpu = <&a57_0>;
-				};
-				core1 {
-					cpu = <&a57_1>;
-				};
-				core2 {
-					cpu = <&a57_2>;
-				};
-				core3 {
-					cpu = <&a57_3>;
-				};
-			};
-
-			cluster1 {
-				core0 {
-					cpu = <&a53_0>;
-				};
-				core1 {
-					cpu = <&a53_1>;
-				};
-				core2 {
-					cpu = <&a53_2>;
-				};
-				core3 {
-					cpu = <&a53_3>;
-				};
-			};
-		};
-
-		a57_0: cpu@0 {
-			compatible = "arm,cortex-a57";
-			reg = <0x0>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7795_PD_CA57_CPU0>;
-			next-level-cache = <&L2_CA57>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
-			dynamic-power-coefficient = <854>;
-			clocks = <&cpg CPG_CORE R8A7795_CLK_Z>;
-			operating-points-v2 = <&cluster0_opp>;
-			capacity-dmips-mhz = <1024>;
-			#cooling-cells = <2>;
-		};
-
-		a57_1: cpu@1 {
-			compatible = "arm,cortex-a57";
-			reg = <0x1>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7795_PD_CA57_CPU1>;
-			next-level-cache = <&L2_CA57>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
-			clocks = <&cpg CPG_CORE R8A7795_CLK_Z>;
-			operating-points-v2 = <&cluster0_opp>;
-			capacity-dmips-mhz = <1024>;
-			#cooling-cells = <2>;
-		};
-
-		a57_2: cpu@2 {
-			compatible = "arm,cortex-a57";
-			reg = <0x2>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7795_PD_CA57_CPU2>;
-			next-level-cache = <&L2_CA57>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
-			clocks = <&cpg CPG_CORE R8A7795_CLK_Z>;
-			operating-points-v2 = <&cluster0_opp>;
-			capacity-dmips-mhz = <1024>;
-			#cooling-cells = <2>;
-		};
-
-		a57_3: cpu@3 {
-			compatible = "arm,cortex-a57";
-			reg = <0x3>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7795_PD_CA57_CPU3>;
-			next-level-cache = <&L2_CA57>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
-			clocks = <&cpg CPG_CORE R8A7795_CLK_Z>;
-			operating-points-v2 = <&cluster0_opp>;
-			capacity-dmips-mhz = <1024>;
-			#cooling-cells = <2>;
-		};
-
-		a53_0: cpu@100 {
-			compatible = "arm,cortex-a53";
-			reg = <0x100>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7795_PD_CA53_CPU0>;
-			next-level-cache = <&L2_CA53>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_1>;
-			#cooling-cells = <2>;
-			dynamic-power-coefficient = <277>;
-			clocks = <&cpg CPG_CORE R8A7795_CLK_Z2>;
-			operating-points-v2 = <&cluster1_opp>;
-			capacity-dmips-mhz = <535>;
-		};
-
-		a53_1: cpu@101 {
-			compatible = "arm,cortex-a53";
-			reg = <0x101>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7795_PD_CA53_CPU1>;
-			next-level-cache = <&L2_CA53>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_1>;
-			clocks = <&cpg CPG_CORE R8A7795_CLK_Z2>;
-			operating-points-v2 = <&cluster1_opp>;
-			capacity-dmips-mhz = <535>;
-		};
-
-		a53_2: cpu@102 {
-			compatible = "arm,cortex-a53";
-			reg = <0x102>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7795_PD_CA53_CPU2>;
-			next-level-cache = <&L2_CA53>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_1>;
-			clocks = <&cpg CPG_CORE R8A7795_CLK_Z2>;
-			operating-points-v2 = <&cluster1_opp>;
-			capacity-dmips-mhz = <535>;
-		};
-
-		a53_3: cpu@103 {
-			compatible = "arm,cortex-a53";
-			reg = <0x103>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7795_PD_CA53_CPU3>;
-			next-level-cache = <&L2_CA53>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_1>;
-			clocks = <&cpg CPG_CORE R8A7795_CLK_Z2>;
-			operating-points-v2 = <&cluster1_opp>;
-			capacity-dmips-mhz = <535>;
-		};
-
-		L2_CA57: cache-controller-0 {
-			compatible = "cache";
-			power-domains = <&sysc R8A7795_PD_CA57_SCU>;
-			cache-unified;
-			cache-level = <2>;
-		};
-
-		L2_CA53: cache-controller-1 {
-			compatible = "cache";
-			power-domains = <&sysc R8A7795_PD_CA53_SCU>;
-			cache-unified;
-			cache-level = <2>;
-		};
-
-		idle-states {
-			entry-method = "psci";
-
-			CPU_SLEEP_0: cpu-sleep-0 {
-				compatible = "arm,idle-state";
-				arm,psci-suspend-param = <0x0010000>;
-				local-timer-stop;
-				entry-latency-us = <400>;
-				exit-latency-us = <500>;
-				min-residency-us = <4000>;
-			};
-
-			CPU_SLEEP_1: cpu-sleep-1 {
-				compatible = "arm,idle-state";
-				arm,psci-suspend-param = <0x0010000>;
-				local-timer-stop;
-				entry-latency-us = <700>;
-				exit-latency-us = <700>;
-				min-residency-us = <5000>;
-			};
-		};
-	};
-
-	extal_clk: extal {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		/* This value must be overridden by the board */
-		clock-frequency = <0>;
-	};
-
-	extalr_clk: extalr {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		/* This value must be overridden by the board */
-		clock-frequency = <0>;
-	};
-
-	/* External PCIe clock - can be overridden by the board */
-	pcie_bus_clk: pcie_bus {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	pmu_a53 {
-		compatible = "arm,cortex-a53-pmu";
-		interrupts-extended = <&gic GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-affinity = <&a53_0>,
-				     <&a53_1>,
-				     <&a53_2>,
-				     <&a53_3>;
-	};
-
-	pmu_a57 {
-		compatible = "arm,cortex-a57-pmu";
-		interrupts-extended = <&gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-affinity = <&a57_0>,
-				     <&a57_1>,
-				     <&a57_2>,
-				     <&a57_3>;
-	};
-
-	psci {
-		compatible = "arm,psci-1.0", "arm,psci-0.2";
-		method = "smc";
-	};
-
-	/* External SCIF clock - to be overridden by boards that provide it */
-	scif_clk: scif {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	soc: soc {
-		compatible = "simple-bus";
-		interrupt-parent = <&gic>;
-
-		#address-cells = <2>;
-		#size-cells = <2>;
-		ranges;
-
-		rwdt: watchdog@e6020000 {
-			compatible = "renesas,r8a7795-wdt", "renesas,rcar-gen3-wdt";
-			reg = <0 0xe6020000 0 0x0c>;
-			clocks = <&cpg CPG_MOD 402>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 402>;
-			status = "disabled";
-		};
-
-		gpio0: gpio@e6050000 {
-			compatible = "renesas,gpio-r8a7795",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6050000 0 0x50>;
-			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 0 16>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 912>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 912>;
-		};
-
-		gpio1: gpio@e6051000 {
-			compatible = "renesas,gpio-r8a7795",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6051000 0 0x50>;
-			interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 32 29>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 911>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 911>;
-		};
-
-		gpio2: gpio@e6052000 {
-			compatible = "renesas,gpio-r8a7795",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6052000 0 0x50>;
-			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 64 15>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 910>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 910>;
-		};
-
-		gpio3: gpio@e6053000 {
-			compatible = "renesas,gpio-r8a7795",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6053000 0 0x50>;
-			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 96 16>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 909>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 909>;
-		};
-
-		gpio4: gpio@e6054000 {
-			compatible = "renesas,gpio-r8a7795",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6054000 0 0x50>;
-			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 128 18>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 908>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 908>;
-		};
-
-		gpio5: gpio@e6055000 {
-			compatible = "renesas,gpio-r8a7795",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6055000 0 0x50>;
-			interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 160 26>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 907>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 907>;
-		};
-
-		gpio6: gpio@e6055400 {
-			compatible = "renesas,gpio-r8a7795",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6055400 0 0x50>;
-			interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 192 32>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 906>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 906>;
-		};
-
-		gpio7: gpio@e6055800 {
-			compatible = "renesas,gpio-r8a7795",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6055800 0 0x50>;
-			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 224 4>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 905>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 905>;
-		};
-
-		pfc: pin-controller@e6060000 {
-			compatible = "renesas,pfc-r8a7795";
-			reg = <0 0xe6060000 0 0x50c>;
-		};
-
-		cmt0: timer@e60f0000 {
-			compatible = "renesas,r8a7795-cmt0",
-				     "renesas,rcar-gen3-cmt0";
-			reg = <0 0xe60f0000 0 0x1004>;
-			interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 303>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 303>;
-			status = "disabled";
-		};
-
-		cmt1: timer@e6130000 {
-			compatible = "renesas,r8a7795-cmt1",
-				     "renesas,rcar-gen3-cmt1";
-			reg = <0 0xe6130000 0 0x1004>;
-			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 302>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 302>;
-			status = "disabled";
-		};
-
-		cmt2: timer@e6140000 {
-			compatible = "renesas,r8a7795-cmt1",
-				     "renesas,rcar-gen3-cmt1";
-			reg = <0 0xe6140000 0 0x1004>;
-			interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 301>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 301>;
-			status = "disabled";
-		};
-
-		cmt3: timer@e6148000 {
-			compatible = "renesas,r8a7795-cmt1",
-				     "renesas,rcar-gen3-cmt1";
-			reg = <0 0xe6148000 0 0x1004>;
-			interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 300>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 300>;
-			status = "disabled";
-		};
-
-		cpg: clock-controller@e6150000 {
-			compatible = "renesas,r8a7795-cpg-mssr";
-			reg = <0 0xe6150000 0 0x1000>;
-			clocks = <&extal_clk>, <&extalr_clk>;
-			clock-names = "extal", "extalr";
-			#clock-cells = <2>;
-			#power-domain-cells = <0>;
-			#reset-cells = <1>;
-		};
-
-		rst: reset-controller@e6160000 {
-			compatible = "renesas,r8a7795-rst";
-			reg = <0 0xe6160000 0 0x0200>;
-		};
-
-		sysc: system-controller@e6180000 {
-			compatible = "renesas,r8a7795-sysc";
-			reg = <0 0xe6180000 0 0x0400>;
-			#power-domain-cells = <1>;
-		};
-
-		tsc: thermal@e6198000 {
-			compatible = "renesas,r8a7795-thermal";
-			reg = <0 0xe6198000 0 0x100>,
-			      <0 0xe61a0000 0 0x100>,
-			      <0 0xe61a8000 0 0x100>;
-			interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 522>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 522>;
-			#thermal-sensor-cells = <1>;
-		};
-
-		intc_ex: interrupt-controller@e61c0000 {
-			compatible = "renesas,intc-ex-r8a7795", "renesas,irqc";
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 407>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 407>;
-		};
-
-		i2c0: i2c@e6500000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7795",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe6500000 0 0x40>;
-			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 931>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 931>;
-			dmas = <&dmac1 0x91>, <&dmac1 0x90>,
-			       <&dmac2 0x91>, <&dmac2 0x90>;
-			dma-names = "tx", "rx", "tx", "rx";
-			i2c-scl-internal-delay-ns = <110>;
-			status = "disabled";
-		};
-
-		i2c1: i2c@e6508000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7795",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe6508000 0 0x40>;
-			interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 930>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 930>;
-			dmas = <&dmac1 0x93>, <&dmac1 0x92>,
-			       <&dmac2 0x93>, <&dmac2 0x92>;
-			dma-names = "tx", "rx", "tx", "rx";
-			i2c-scl-internal-delay-ns = <6>;
-			status = "disabled";
-		};
-
-		i2c2: i2c@e6510000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7795",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe6510000 0 0x40>;
-			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 929>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 929>;
-			dmas = <&dmac1 0x95>, <&dmac1 0x94>,
-			       <&dmac2 0x95>, <&dmac2 0x94>;
-			dma-names = "tx", "rx", "tx", "rx";
-			i2c-scl-internal-delay-ns = <6>;
-			status = "disabled";
-		};
-
-		i2c3: i2c@e66d0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7795",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe66d0000 0 0x40>;
-			interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 928>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 928>;
-			dmas = <&dmac0 0x97>, <&dmac0 0x96>;
-			dma-names = "tx", "rx";
-			i2c-scl-internal-delay-ns = <110>;
-			status = "disabled";
-		};
-
-		i2c4: i2c@e66d8000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7795",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe66d8000 0 0x40>;
-			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 927>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 927>;
-			dmas = <&dmac0 0x99>, <&dmac0 0x98>;
-			dma-names = "tx", "rx";
-			i2c-scl-internal-delay-ns = <110>;
-			status = "disabled";
-		};
-
-		i2c5: i2c@e66e0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7795",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe66e0000 0 0x40>;
-			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 919>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 919>;
-			dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
-			dma-names = "tx", "rx";
-			i2c-scl-internal-delay-ns = <110>;
-			status = "disabled";
-		};
-
-		i2c6: i2c@e66e8000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7795",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe66e8000 0 0x40>;
-			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 918>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 918>;
-			dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
-			dma-names = "tx", "rx";
-			i2c-scl-internal-delay-ns = <6>;
-			status = "disabled";
-		};
-
-		i2c_dvfs: i2c@e60b0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,iic-r8a7795",
-				     "renesas,rcar-gen3-iic",
-				     "renesas,rmobile-iic";
-			reg = <0 0xe60b0000 0 0x425>;
-			interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 926>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 926>;
-			dmas = <&dmac0 0x11>, <&dmac0 0x10>;
-			dma-names = "tx", "rx";
-			status = "disabled";
-		};
-
-		hscif0: serial@e6540000 {
-			compatible = "renesas,hscif-r8a7795",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe6540000 0 96>;
-			interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 520>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x31>, <&dmac1 0x30>,
-			       <&dmac2 0x31>, <&dmac2 0x30>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 520>;
-			status = "disabled";
-		};
-
-		hscif1: serial@e6550000 {
-			compatible = "renesas,hscif-r8a7795",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe6550000 0 96>;
-			interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 519>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x33>, <&dmac1 0x32>,
-			       <&dmac2 0x33>, <&dmac2 0x32>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 519>;
-			status = "disabled";
-		};
-
-		hscif2: serial@e6560000 {
-			compatible = "renesas,hscif-r8a7795",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe6560000 0 96>;
-			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 518>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x35>, <&dmac1 0x34>,
-			       <&dmac2 0x35>, <&dmac2 0x34>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 518>;
-			status = "disabled";
-		};
-
-		hscif3: serial@e66a0000 {
-			compatible = "renesas,hscif-r8a7795",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe66a0000 0 96>;
-			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 517>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac0 0x37>, <&dmac0 0x36>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 517>;
-			status = "disabled";
-		};
-
-		hscif4: serial@e66b0000 {
-			compatible = "renesas,hscif-r8a7795",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe66b0000 0 96>;
-			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 516>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac0 0x39>, <&dmac0 0x38>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 516>;
-			status = "disabled";
-		};
-
-		hsusb: usb@e6590000 {
-			compatible = "renesas,usbhs-r8a7795",
-				     "renesas,rcar-gen3-usbhs";
-			reg = <0 0xe6590000 0 0x200>;
-			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 704>, <&cpg CPG_MOD 703>;
-			dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
-			       <&usb_dmac1 0>, <&usb_dmac1 1>;
-			dma-names = "ch0", "ch1", "ch2", "ch3";
-			renesas,buswait = <11>;
-			phys = <&usb2_phy0 3>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 704>, <&cpg 703>;
-			status = "disabled";
-		};
-
-		hsusb3: usb@e659c000 {
-			compatible = "renesas,usbhs-r8a7795",
-				     "renesas,rcar-gen3-usbhs";
-			reg = <0 0xe659c000 0 0x200>;
-			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 705>, <&cpg CPG_MOD 700>;
-			dmas = <&usb_dmac2 0>, <&usb_dmac2 1>,
-			       <&usb_dmac3 0>, <&usb_dmac3 1>;
-			dma-names = "ch0", "ch1", "ch2", "ch3";
-			renesas,buswait = <11>;
-			phys = <&usb2_phy3 3>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 705>, <&cpg 700>;
-			status = "disabled";
-		};
-
-		usb_dmac0: dma-controller@e65a0000 {
-			compatible = "renesas,r8a7795-usb-dmac",
-				     "renesas,usb-dmac";
-			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "ch0", "ch1";
-			clocks = <&cpg CPG_MOD 330>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 330>;
-			#dma-cells = <1>;
-			dma-channels = <2>;
-		};
-
-		usb_dmac1: dma-controller@e65b0000 {
-			compatible = "renesas,r8a7795-usb-dmac",
-				     "renesas,usb-dmac";
-			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "ch0", "ch1";
-			clocks = <&cpg CPG_MOD 331>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 331>;
-			#dma-cells = <1>;
-			dma-channels = <2>;
-		};
-
-		usb_dmac2: dma-controller@e6460000 {
-			compatible = "renesas,r8a7795-usb-dmac",
-				     "renesas,usb-dmac";
-			reg = <0 0xe6460000 0 0x100>;
-			interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "ch0", "ch1";
-			clocks = <&cpg CPG_MOD 326>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 326>;
-			#dma-cells = <1>;
-			dma-channels = <2>;
-		};
-
-		usb_dmac3: dma-controller@e6470000 {
-			compatible = "renesas,r8a7795-usb-dmac",
-				     "renesas,usb-dmac";
-			reg = <0 0xe6470000 0 0x100>;
-			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "ch0", "ch1";
-			clocks = <&cpg CPG_MOD 329>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 329>;
-			#dma-cells = <1>;
-			dma-channels = <2>;
-		};
-
-		usb3_phy0: usb-phy@e65ee000 {
-			compatible = "renesas,r8a7795-usb3-phy",
-				     "renesas,rcar-gen3-usb3-phy";
-			reg = <0 0xe65ee000 0 0x90>;
-			clocks = <&cpg CPG_MOD 328>, <&usb3s0_clk>,
-				 <&usb_extal_clk>;
-			clock-names = "usb3-if", "usb3s_clk", "usb_extal";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 328>;
-			#phy-cells = <0>;
-			status = "disabled";
-		};
-
-		arm_cc630p: crypto@e6601000 {
-			compatible = "arm,cryptocell-630p-ree";
-			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
-			reg = <0x0 0xe6601000 0 0x1000>;
-			clocks = <&cpg CPG_MOD 229>;
-			resets = <&cpg 229>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-		};
-
-		dmac0: dma-controller@e6700000 {
-			compatible = "renesas,dmac-r8a7795",
-				     "renesas,rcar-dmac";
-			reg = <0 0xe6700000 0 0x10000>;
-			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 219>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 219>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>,
-			       <&ipmmu_ds0 2>, <&ipmmu_ds0 3>,
-			       <&ipmmu_ds0 4>, <&ipmmu_ds0 5>,
-			       <&ipmmu_ds0 6>, <&ipmmu_ds0 7>,
-			       <&ipmmu_ds0 8>, <&ipmmu_ds0 9>,
-			       <&ipmmu_ds0 10>, <&ipmmu_ds0 11>,
-			       <&ipmmu_ds0 12>, <&ipmmu_ds0 13>,
-			       <&ipmmu_ds0 14>, <&ipmmu_ds0 15>;
-		};
-
-		dmac1: dma-controller@e7300000 {
-			compatible = "renesas,dmac-r8a7795",
-				     "renesas,rcar-dmac";
-			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 218>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 218>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_ds1 0>, <&ipmmu_ds1 1>,
-			       <&ipmmu_ds1 2>, <&ipmmu_ds1 3>,
-			       <&ipmmu_ds1 4>, <&ipmmu_ds1 5>,
-			       <&ipmmu_ds1 6>, <&ipmmu_ds1 7>,
-			       <&ipmmu_ds1 8>, <&ipmmu_ds1 9>,
-			       <&ipmmu_ds1 10>, <&ipmmu_ds1 11>,
-			       <&ipmmu_ds1 12>, <&ipmmu_ds1 13>,
-			       <&ipmmu_ds1 14>, <&ipmmu_ds1 15>;
-		};
-
-		dmac2: dma-controller@e7310000 {
-			compatible = "renesas,dmac-r8a7795",
-				     "renesas,rcar-dmac";
-			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 217>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 217>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_ds1 16>, <&ipmmu_ds1 17>,
-			       <&ipmmu_ds1 18>, <&ipmmu_ds1 19>,
-			       <&ipmmu_ds1 20>, <&ipmmu_ds1 21>,
-			       <&ipmmu_ds1 22>, <&ipmmu_ds1 23>,
-			       <&ipmmu_ds1 24>, <&ipmmu_ds1 25>,
-			       <&ipmmu_ds1 26>, <&ipmmu_ds1 27>,
-			       <&ipmmu_ds1 28>, <&ipmmu_ds1 29>,
-			       <&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
-		};
-
-		ipmmu_ds0: mmu@e6740000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xe6740000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 0>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_ds1: mmu@e7740000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xe7740000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 1>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_hc: mmu@e6570000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xe6570000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 2>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_ir: mmu@ff8b0000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xff8b0000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 3>;
-			power-domains = <&sysc R8A7795_PD_A3IR>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_mm: mmu@e67b0000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xe67b0000 0 0x1000>;
-			interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_mp0: mmu@ec670000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xec670000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 4>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_pv0: mmu@fd800000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfd800000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 6>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_pv1: mmu@fd950000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfd950000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 7>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_pv2: mmu@fd960000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfd960000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 8>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_pv3: mmu@fd970000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfd970000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 9>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_rt: mmu@ffc80000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xffc80000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 10>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_vc0: mmu@fe6b0000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfe6b0000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 12>;
-			power-domains = <&sysc R8A7795_PD_A3VC>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_vc1: mmu@fe6f0000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfe6f0000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 13>;
-			power-domains = <&sysc R8A7795_PD_A3VC>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_vi0: mmu@febd0000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfebd0000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 14>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_vi1: mmu@febe0000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfebe0000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 15>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_vp0: mmu@fe990000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfe990000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 16>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_vp1: mmu@fe980000 {
-			compatible = "renesas,ipmmu-r8a7795";
-			reg = <0 0xfe980000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 17>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			#iommu-cells = <1>;
-		};
-
-		avb: ethernet@e6800000 {
-			compatible = "renesas,etheravb-r8a7795",
-				     "renesas,etheravb-rcar-gen3";
-			reg = <0 0xe6800000 0 0x800>, <0 0xe6a00000 0 0x10000>;
-			interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "ch0", "ch1", "ch2", "ch3",
-					  "ch4", "ch5", "ch6", "ch7",
-					  "ch8", "ch9", "ch10", "ch11",
-					  "ch12", "ch13", "ch14", "ch15",
-					  "ch16", "ch17", "ch18", "ch19",
-					  "ch20", "ch21", "ch22", "ch23",
-					  "ch24";
-			clocks = <&cpg CPG_MOD 812>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 812>;
-			phy-mode = "rgmii";
-			iommus = <&ipmmu_ds0 16>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		can0: can@e6c30000 {
-			compatible = "renesas,can-r8a7795",
-				     "renesas,rcar-gen3-can";
-			reg = <0 0xe6c30000 0 0x1000>;
-			interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 916>,
-			       <&cpg CPG_CORE R8A7795_CLK_CANFD>,
-			       <&can_clk>;
-			clock-names = "clkp1", "clkp2", "can_clk";
-			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
-			assigned-clock-rates = <40000000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 916>;
-			status = "disabled";
-		};
-
-		can1: can@e6c38000 {
-			compatible = "renesas,can-r8a7795",
-				     "renesas,rcar-gen3-can";
-			reg = <0 0xe6c38000 0 0x1000>;
-			interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 915>,
-			       <&cpg CPG_CORE R8A7795_CLK_CANFD>,
-			       <&can_clk>;
-			clock-names = "clkp1", "clkp2", "can_clk";
-			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
-			assigned-clock-rates = <40000000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 915>;
-			status = "disabled";
-		};
-
-		canfd: can@e66c0000 {
-			compatible = "renesas,r8a7795-canfd",
-				     "renesas,rcar-gen3-canfd";
-			reg = <0 0xe66c0000 0 0x8000>;
-			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
-				   <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 914>,
-			       <&cpg CPG_CORE R8A7795_CLK_CANFD>,
-			       <&can_clk>;
-			clock-names = "fck", "canfd", "can_clk";
-			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
-			assigned-clock-rates = <40000000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 914>;
-			status = "disabled";
-
-			channel0 {
-				status = "disabled";
-			};
-
-			channel1 {
-				status = "disabled";
-			};
-		};
-
-		pwm0: pwm@e6e30000 {
-			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
-			reg = <0 0xe6e30000 0 0x8>;
-			clocks = <&cpg CPG_MOD 523>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 523>;
-			#pwm-cells = <2>;
-			status = "disabled";
-		};
-
-		pwm1: pwm@e6e31000 {
-			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
-			reg = <0 0xe6e31000 0 0x8>;
-			clocks = <&cpg CPG_MOD 523>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 523>;
-			#pwm-cells = <2>;
-			status = "disabled";
-		};
-
-		pwm2: pwm@e6e32000 {
-			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
-			reg = <0 0xe6e32000 0 0x8>;
-			clocks = <&cpg CPG_MOD 523>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 523>;
-			#pwm-cells = <2>;
-			status = "disabled";
-		};
-
-		pwm3: pwm@e6e33000 {
-			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
-			reg = <0 0xe6e33000 0 0x8>;
-			clocks = <&cpg CPG_MOD 523>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 523>;
-			#pwm-cells = <2>;
-			status = "disabled";
-		};
-
-		pwm4: pwm@e6e34000 {
-			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
-			reg = <0 0xe6e34000 0 0x8>;
-			clocks = <&cpg CPG_MOD 523>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 523>;
-			#pwm-cells = <2>;
-			status = "disabled";
-		};
-
-		pwm5: pwm@e6e35000 {
-			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
-			reg = <0 0xe6e35000 0 0x8>;
-			clocks = <&cpg CPG_MOD 523>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 523>;
-			#pwm-cells = <2>;
-			status = "disabled";
-		};
-
-		pwm6: pwm@e6e36000 {
-			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
-			reg = <0 0xe6e36000 0 0x8>;
-			clocks = <&cpg CPG_MOD 523>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 523>;
-			#pwm-cells = <2>;
-			status = "disabled";
-		};
-
-		scif0: serial@e6e60000 {
-			compatible = "renesas,scif-r8a7795",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6e60000 0 64>;
-			interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 207>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x51>, <&dmac1 0x50>,
-			       <&dmac2 0x51>, <&dmac2 0x50>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 207>;
-			status = "disabled";
-		};
-
-		scif1: serial@e6e68000 {
-			compatible = "renesas,scif-r8a7795",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6e68000 0 64>;
-			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 206>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x53>, <&dmac1 0x52>,
-			       <&dmac2 0x53>, <&dmac2 0x52>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 206>;
-			status = "disabled";
-		};
-
-		scif2: serial@e6e88000 {
-			compatible = "renesas,scif-r8a7795",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6e88000 0 64>;
-			interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 310>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x13>, <&dmac1 0x12>,
-			       <&dmac2 0x13>, <&dmac2 0x12>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 310>;
-			status = "disabled";
-		};
-
-		scif3: serial@e6c50000 {
-			compatible = "renesas,scif-r8a7795",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6c50000 0 64>;
-			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 204>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac0 0x57>, <&dmac0 0x56>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 204>;
-			status = "disabled";
-		};
-
-		scif4: serial@e6c40000 {
-			compatible = "renesas,scif-r8a7795",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6c40000 0 64>;
-			interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 203>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac0 0x59>, <&dmac0 0x58>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 203>;
-			status = "disabled";
-		};
-
-		scif5: serial@e6f30000 {
-			compatible = "renesas,scif-r8a7795",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6f30000 0 64>;
-			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 202>,
-				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-			       <&dmac2 0x5b>, <&dmac2 0x5a>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 202>;
-			status = "disabled";
-		};
-
-		tpu: pwm@e6e80000 {
-			compatible = "renesas,tpu-r8a7795", "renesas,tpu";
-			reg = <0 0xe6e80000 0 0x148>;
-			interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 304>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 304>;
-			#pwm-cells = <3>;
-			status = "disabled";
-		};
-
-		msiof0: spi@e6e90000 {
-			compatible = "renesas,msiof-r8a7795",
-				     "renesas,rcar-gen3-msiof";
-			reg = <0 0xe6e90000 0 0x0064>;
-			interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 211>;
-			dmas = <&dmac1 0x41>, <&dmac1 0x40>,
-			       <&dmac2 0x41>, <&dmac2 0x40>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 211>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		msiof1: spi@e6ea0000 {
-			compatible = "renesas,msiof-r8a7795",
-				     "renesas,rcar-gen3-msiof";
-			reg = <0 0xe6ea0000 0 0x0064>;
-			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 210>;
-			dmas = <&dmac1 0x43>, <&dmac1 0x42>,
-			       <&dmac2 0x43>, <&dmac2 0x42>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 210>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		msiof2: spi@e6c00000 {
-			compatible = "renesas,msiof-r8a7795",
-				     "renesas,rcar-gen3-msiof";
-			reg = <0 0xe6c00000 0 0x0064>;
-			interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 209>;
-			dmas = <&dmac0 0x45>, <&dmac0 0x44>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 209>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		msiof3: spi@e6c10000 {
-			compatible = "renesas,msiof-r8a7795",
-				     "renesas,rcar-gen3-msiof";
-			reg = <0 0xe6c10000 0 0x0064>;
-			interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 208>;
-			dmas = <&dmac0 0x47>, <&dmac0 0x46>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 208>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		vin0: video@e6ef0000 {
-			compatible = "renesas,vin-r8a7795";
-			reg = <0 0xe6ef0000 0 0x1000>;
-			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 811>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 811>;
-			renesas,id = <0>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin0csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin0>;
-					};
-					vin0csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin0>;
-					};
-				};
-			};
-		};
-
-		vin1: video@e6ef1000 {
-			compatible = "renesas,vin-r8a7795";
-			reg = <0 0xe6ef1000 0 0x1000>;
-			interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 810>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 810>;
-			renesas,id = <1>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin1csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin1>;
-					};
-					vin1csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin1>;
-					};
-				};
-			};
-		};
-
-		vin2: video@e6ef2000 {
-			compatible = "renesas,vin-r8a7795";
-			reg = <0 0xe6ef2000 0 0x1000>;
-			interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 809>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 809>;
-			renesas,id = <2>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin2csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin2>;
-					};
-					vin2csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin2>;
-					};
-				};
-			};
-		};
-
-		vin3: video@e6ef3000 {
-			compatible = "renesas,vin-r8a7795";
-			reg = <0 0xe6ef3000 0 0x1000>;
-			interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 808>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 808>;
-			renesas,id = <3>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin3csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin3>;
-					};
-					vin3csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin3>;
-					};
-				};
-			};
-		};
-
-		vin4: video@e6ef4000 {
-			compatible = "renesas,vin-r8a7795";
-			reg = <0 0xe6ef4000 0 0x1000>;
-			interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 807>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 807>;
-			renesas,id = <4>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin4csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin4>;
-					};
-					vin4csi41: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&csi41vin4>;
-					};
-				};
-			};
-		};
-
-		vin5: video@e6ef5000 {
-			compatible = "renesas,vin-r8a7795";
-			reg = <0 0xe6ef5000 0 0x1000>;
-			interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 806>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 806>;
-			renesas,id = <5>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin5csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin5>;
-					};
-					vin5csi41: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&csi41vin5>;
-					};
-				};
-			};
-		};
-
-		vin6: video@e6ef6000 {
-			compatible = "renesas,vin-r8a7795";
-			reg = <0 0xe6ef6000 0 0x1000>;
-			interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 805>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 805>;
-			renesas,id = <6>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin6csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin6>;
-					};
-					vin6csi41: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&csi41vin6>;
-					};
-				};
-			};
-		};
-
-		vin7: video@e6ef7000 {
-			compatible = "renesas,vin-r8a7795";
-			reg = <0 0xe6ef7000 0 0x1000>;
-			interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 804>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 804>;
-			renesas,id = <7>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin7csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin7>;
-					};
-					vin7csi41: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&csi41vin7>;
-					};
-				};
-			};
-		};
-
-		drif00: rif@e6f40000 {
-			compatible = "renesas,r8a7795-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f40000 0 0x64>;
-			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 515>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x20>, <&dmac2 0x20>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 515>;
-			renesas,bonding = <&drif01>;
-			status = "disabled";
-		};
-
-		drif01: rif@e6f50000 {
-			compatible = "renesas,r8a7795-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f50000 0 0x64>;
-			interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 514>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x22>, <&dmac2 0x22>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 514>;
-			renesas,bonding = <&drif00>;
-			status = "disabled";
-		};
-
-		drif10: rif@e6f60000 {
-			compatible = "renesas,r8a7795-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f60000 0 0x64>;
-			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 513>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x24>, <&dmac2 0x24>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 513>;
-			renesas,bonding = <&drif11>;
-			status = "disabled";
-		};
-
-		drif11: rif@e6f70000 {
-			compatible = "renesas,r8a7795-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f70000 0 0x64>;
-			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 512>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x26>, <&dmac2 0x26>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 512>;
-			renesas,bonding = <&drif10>;
-			status = "disabled";
-		};
-
-		drif20: rif@e6f80000 {
-			compatible = "renesas,r8a7795-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f80000 0 0x64>;
-			interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 511>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x28>, <&dmac2 0x28>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 511>;
-			renesas,bonding = <&drif21>;
-			status = "disabled";
-		};
-
-		drif21: rif@e6f90000 {
-			compatible = "renesas,r8a7795-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f90000 0 0x64>;
-			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 510>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x2a>, <&dmac2 0x2a>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 510>;
-			renesas,bonding = <&drif20>;
-			status = "disabled";
-		};
-
-		drif30: rif@e6fa0000 {
-			compatible = "renesas,r8a7795-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6fa0000 0 0x64>;
-			interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 509>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x2c>, <&dmac2 0x2c>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 509>;
-			renesas,bonding = <&drif31>;
-			status = "disabled";
-		};
-
-		drif31: rif@e6fb0000 {
-			compatible = "renesas,r8a7795-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6fb0000 0 0x64>;
-			interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 508>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x2e>, <&dmac2 0x2e>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 508>;
-			renesas,bonding = <&drif30>;
-			status = "disabled";
-		};
-
-		rcar_sound: sound@ec500000 {
-			/*
-			 * #sound-dai-cells is required
-			 *
-			 * Single DAI : #sound-dai-cells = <0>;	<&rcar_sound>;
-			 * Multi  DAI : #sound-dai-cells = <1>;	<&rcar_sound N>;
-			 */
-			/*
-			 * #clock-cells is required for audio_clkout0/1/2/3
-			 *
-			 * clkout	: #clock-cells = <0>;	<&rcar_sound>;
-			 * clkout0/1/2/3: #clock-cells = <1>;	<&rcar_sound N>;
-			 */
-			compatible =  "renesas,rcar_sound-r8a7795", "renesas,rcar_sound-gen3";
-			reg =	<0 0xec500000 0 0x1000>, /* SCU */
-				<0 0xec5a0000 0 0x100>,  /* ADG */
-				<0 0xec540000 0 0x1000>, /* SSIU */
-				<0 0xec541000 0 0x280>,  /* SSI */
-				<0 0xec760000 0 0x200>;  /* Audio DMAC peri peri*/
-			reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
-
-			clocks = <&cpg CPG_MOD 1005>,
-				 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
-				 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
-				 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
-				 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
-				 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
-				 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
-				 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
-				 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
-				 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
-				 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
-				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
-				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
-				 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
-				 <&audio_clk_a>, <&audio_clk_b>,
-				 <&audio_clk_c>,
-				 <&cpg CPG_CORE R8A7795_CLK_S0D4>;
-			clock-names = "ssi-all",
-				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
-				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
-				      "ssi.1", "ssi.0",
-				      "src.9", "src.8", "src.7", "src.6",
-				      "src.5", "src.4", "src.3", "src.2",
-				      "src.1", "src.0",
-				      "mix.1", "mix.0",
-				      "ctu.1", "ctu.0",
-				      "dvc.0", "dvc.1",
-				      "clk_a", "clk_b", "clk_c", "clk_i";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 1005>,
-				 <&cpg 1006>, <&cpg 1007>,
-				 <&cpg 1008>, <&cpg 1009>,
-				 <&cpg 1010>, <&cpg 1011>,
-				 <&cpg 1012>, <&cpg 1013>,
-				 <&cpg 1014>, <&cpg 1015>;
-			reset-names = "ssi-all",
-				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
-				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
-				      "ssi.1", "ssi.0";
-			status = "disabled";
-
-			rcar_sound,dvc {
-				dvc0: dvc-0 {
-					dmas = <&audma1 0xbc>;
-					dma-names = "tx";
-				};
-				dvc1: dvc-1 {
-					dmas = <&audma1 0xbe>;
-					dma-names = "tx";
-				};
-			};
-
-			rcar_sound,mix {
-				mix0: mix-0 { };
-				mix1: mix-1 { };
-			};
-
-			rcar_sound,ctu {
-				ctu00: ctu-0 { };
-				ctu01: ctu-1 { };
-				ctu02: ctu-2 { };
-				ctu03: ctu-3 { };
-				ctu10: ctu-4 { };
-				ctu11: ctu-5 { };
-				ctu12: ctu-6 { };
-				ctu13: ctu-7 { };
-			};
-
-			rcar_sound,src {
-				src0: src-0 {
-					interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x85>, <&audma1 0x9a>;
-					dma-names = "rx", "tx";
-				};
-				src1: src-1 {
-					interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x87>, <&audma1 0x9c>;
-					dma-names = "rx", "tx";
-				};
-				src2: src-2 {
-					interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x89>, <&audma1 0x9e>;
-					dma-names = "rx", "tx";
-				};
-				src3: src-3 {
-					interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x8b>, <&audma1 0xa0>;
-					dma-names = "rx", "tx";
-				};
-				src4: src-4 {
-					interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x8d>, <&audma1 0xb0>;
-					dma-names = "rx", "tx";
-				};
-				src5: src-5 {
-					interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x8f>, <&audma1 0xb2>;
-					dma-names = "rx", "tx";
-				};
-				src6: src-6 {
-					interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x91>, <&audma1 0xb4>;
-					dma-names = "rx", "tx";
-				};
-				src7: src-7 {
-					interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x93>, <&audma1 0xb6>;
-					dma-names = "rx", "tx";
-				};
-				src8: src-8 {
-					interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x95>, <&audma1 0xb8>;
-					dma-names = "rx", "tx";
-				};
-				src9: src-9 {
-					interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x97>, <&audma1 0xba>;
-					dma-names = "rx", "tx";
-				};
-			};
-
-			rcar_sound,ssiu {
-				ssiu00: ssiu-0 {
-					dmas = <&audma0 0x15>, <&audma1 0x16>;
-					dma-names = "rx", "tx";
-				};
-				ssiu01: ssiu-1 {
-					dmas = <&audma0 0x35>, <&audma1 0x36>;
-					dma-names = "rx", "tx";
-				};
-				ssiu02: ssiu-2 {
-					dmas = <&audma0 0x37>, <&audma1 0x38>;
-					dma-names = "rx", "tx";
-				};
-				ssiu03: ssiu-3 {
-					dmas = <&audma0 0x47>, <&audma1 0x48>;
-					dma-names = "rx", "tx";
-				};
-				ssiu04: ssiu-4 {
-					dmas = <&audma0 0x3F>, <&audma1 0x40>;
-					dma-names = "rx", "tx";
-				};
-				ssiu05: ssiu-5 {
-					dmas = <&audma0 0x43>, <&audma1 0x44>;
-					dma-names = "rx", "tx";
-				};
-				ssiu06: ssiu-6 {
-					dmas = <&audma0 0x4F>, <&audma1 0x50>;
-					dma-names = "rx", "tx";
-				};
-				ssiu07: ssiu-7 {
-					dmas = <&audma0 0x53>, <&audma1 0x54>;
-					dma-names = "rx", "tx";
-				};
-				ssiu10: ssiu-8 {
-					dmas = <&audma0 0x49>, <&audma1 0x4a>;
-					dma-names = "rx", "tx";
-				};
-				ssiu11: ssiu-9 {
-					dmas = <&audma0 0x4B>, <&audma1 0x4C>;
-					dma-names = "rx", "tx";
-				};
-				ssiu12: ssiu-10 {
-					dmas = <&audma0 0x57>, <&audma1 0x58>;
-					dma-names = "rx", "tx";
-				};
-				ssiu13: ssiu-11 {
-					dmas = <&audma0 0x59>, <&audma1 0x5A>;
-					dma-names = "rx", "tx";
-				};
-				ssiu14: ssiu-12 {
-					dmas = <&audma0 0x5F>, <&audma1 0x60>;
-					dma-names = "rx", "tx";
-				};
-				ssiu15: ssiu-13 {
-					dmas = <&audma0 0xC3>, <&audma1 0xC4>;
-					dma-names = "rx", "tx";
-				};
-				ssiu16: ssiu-14 {
-					dmas = <&audma0 0xC7>, <&audma1 0xC8>;
-					dma-names = "rx", "tx";
-				};
-				ssiu17: ssiu-15 {
-					dmas = <&audma0 0xCB>, <&audma1 0xCC>;
-					dma-names = "rx", "tx";
-				};
-				ssiu20: ssiu-16 {
-					dmas = <&audma0 0x63>, <&audma1 0x64>;
-					dma-names = "rx", "tx";
-				};
-				ssiu21: ssiu-17 {
-					dmas = <&audma0 0x67>, <&audma1 0x68>;
-					dma-names = "rx", "tx";
-				};
-				ssiu22: ssiu-18 {
-					dmas = <&audma0 0x6B>, <&audma1 0x6C>;
-					dma-names = "rx", "tx";
-				};
-				ssiu23: ssiu-19 {
-					dmas = <&audma0 0x6D>, <&audma1 0x6E>;
-					dma-names = "rx", "tx";
-				};
-				ssiu24: ssiu-20 {
-					dmas = <&audma0 0xCF>, <&audma1 0xCE>;
-					dma-names = "rx", "tx";
-				};
-				ssiu25: ssiu-21 {
-					dmas = <&audma0 0xEB>, <&audma1 0xEC>;
-					dma-names = "rx", "tx";
-				};
-				ssiu26: ssiu-22 {
-					dmas = <&audma0 0xED>, <&audma1 0xEE>;
-					dma-names = "rx", "tx";
-				};
-				ssiu27: ssiu-23 {
-					dmas = <&audma0 0xEF>, <&audma1 0xF0>;
-					dma-names = "rx", "tx";
-				};
-				ssiu30: ssiu-24 {
-					dmas = <&audma0 0x6f>, <&audma1 0x70>;
-					dma-names = "rx", "tx";
-				};
-				ssiu31: ssiu-25 {
-					dmas = <&audma0 0x21>, <&audma1 0x22>;
-					dma-names = "rx", "tx";
-				};
-				ssiu32: ssiu-26 {
-					dmas = <&audma0 0x23>, <&audma1 0x24>;
-					dma-names = "rx", "tx";
-				};
-				ssiu33: ssiu-27 {
-					dmas = <&audma0 0x25>, <&audma1 0x26>;
-					dma-names = "rx", "tx";
-				};
-				ssiu34: ssiu-28 {
-					dmas = <&audma0 0x27>, <&audma1 0x28>;
-					dma-names = "rx", "tx";
-				};
-				ssiu35: ssiu-29 {
-					dmas = <&audma0 0x29>, <&audma1 0x2A>;
-					dma-names = "rx", "tx";
-				};
-				ssiu36: ssiu-30 {
-					dmas = <&audma0 0x2B>, <&audma1 0x2C>;
-					dma-names = "rx", "tx";
-				};
-				ssiu37: ssiu-31 {
-					dmas = <&audma0 0x2D>, <&audma1 0x2E>;
-					dma-names = "rx", "tx";
-				};
-				ssiu40: ssiu-32 {
-					dmas =	<&audma0 0x71>, <&audma1 0x72>;
-					dma-names = "rx", "tx";
-				};
-				ssiu41: ssiu-33 {
-					dmas = <&audma0 0x17>, <&audma1 0x18>;
-					dma-names = "rx", "tx";
-				};
-				ssiu42: ssiu-34 {
-					dmas = <&audma0 0x19>, <&audma1 0x1A>;
-					dma-names = "rx", "tx";
-				};
-				ssiu43: ssiu-35 {
-					dmas = <&audma0 0x1B>, <&audma1 0x1C>;
-					dma-names = "rx", "tx";
-				};
-				ssiu44: ssiu-36 {
-					dmas = <&audma0 0x1D>, <&audma1 0x1E>;
-					dma-names = "rx", "tx";
-				};
-				ssiu45: ssiu-37 {
-					dmas = <&audma0 0x1F>, <&audma1 0x20>;
-					dma-names = "rx", "tx";
-				};
-				ssiu46: ssiu-38 {
-					dmas = <&audma0 0x31>, <&audma1 0x32>;
-					dma-names = "rx", "tx";
-				};
-				ssiu47: ssiu-39 {
-					dmas = <&audma0 0x33>, <&audma1 0x34>;
-					dma-names = "rx", "tx";
-				};
-				ssiu50: ssiu-40 {
-					dmas = <&audma0 0x73>, <&audma1 0x74>;
-					dma-names = "rx", "tx";
-				};
-				ssiu60: ssiu-41 {
-					dmas = <&audma0 0x75>, <&audma1 0x76>;
-					dma-names = "rx", "tx";
-				};
-				ssiu70: ssiu-42 {
-					dmas = <&audma0 0x79>, <&audma1 0x7a>;
-					dma-names = "rx", "tx";
-				};
-				ssiu80: ssiu-43 {
-					dmas = <&audma0 0x7b>, <&audma1 0x7c>;
-					dma-names = "rx", "tx";
-				};
-				ssiu90: ssiu-44 {
-					dmas = <&audma0 0x7d>, <&audma1 0x7e>;
-					dma-names = "rx", "tx";
-				};
-				ssiu91: ssiu-45 {
-					dmas = <&audma0 0x7F>, <&audma1 0x80>;
-					dma-names = "rx", "tx";
-				};
-				ssiu92: ssiu-46 {
-					dmas = <&audma0 0x81>, <&audma1 0x82>;
-					dma-names = "rx", "tx";
-				};
-				ssiu93: ssiu-47 {
-					dmas = <&audma0 0x83>, <&audma1 0x84>;
-					dma-names = "rx", "tx";
-				};
-				ssiu94: ssiu-48 {
-					dmas = <&audma0 0xA3>, <&audma1 0xA4>;
-					dma-names = "rx", "tx";
-				};
-				ssiu95: ssiu-49 {
-					dmas = <&audma0 0xA5>, <&audma1 0xA6>;
-					dma-names = "rx", "tx";
-				};
-				ssiu96: ssiu-50 {
-					dmas = <&audma0 0xA7>, <&audma1 0xA8>;
-					dma-names = "rx", "tx";
-				};
-				ssiu97: ssiu-51 {
-					dmas = <&audma0 0xA9>, <&audma1 0xAA>;
-					dma-names = "rx", "tx";
-				};
-			};
-
-			rcar_sound,ssi {
-				ssi0: ssi-0 {
-					interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x01>, <&audma1 0x02>;
-					dma-names = "rx", "tx";
-				};
-				ssi1: ssi-1 {
-					 interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x03>, <&audma1 0x04>;
-					dma-names = "rx", "tx";
-				};
-				ssi2: ssi-2 {
-					interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x05>, <&audma1 0x06>;
-					dma-names = "rx", "tx";
-				};
-				ssi3: ssi-3 {
-					interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x07>, <&audma1 0x08>;
-					dma-names = "rx", "tx";
-				};
-				ssi4: ssi-4 {
-					interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x09>, <&audma1 0x0a>;
-					dma-names = "rx", "tx";
-				};
-				ssi5: ssi-5 {
-					interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x0b>, <&audma1 0x0c>;
-					dma-names = "rx", "tx";
-				};
-				ssi6: ssi-6 {
-					interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x0d>, <&audma1 0x0e>;
-					dma-names = "rx", "tx";
-				};
-				ssi7: ssi-7 {
-					interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x0f>, <&audma1 0x10>;
-					dma-names = "rx", "tx";
-				};
-				ssi8: ssi-8 {
-					interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x11>, <&audma1 0x12>;
-					dma-names = "rx", "tx";
-				};
-				ssi9: ssi-9 {
-					interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x13>, <&audma1 0x14>;
-					dma-names = "rx", "tx";
-				};
-			};
-		};
-
-		audma0: dma-controller@ec700000 {
-			compatible = "renesas,dmac-r8a7795",
-				     "renesas,rcar-dmac";
-			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 502>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 502>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_mp0 0>, <&ipmmu_mp0 1>,
-			       <&ipmmu_mp0 2>, <&ipmmu_mp0 3>,
-			       <&ipmmu_mp0 4>, <&ipmmu_mp0 5>,
-			       <&ipmmu_mp0 6>, <&ipmmu_mp0 7>,
-			       <&ipmmu_mp0 8>, <&ipmmu_mp0 9>,
-			       <&ipmmu_mp0 10>, <&ipmmu_mp0 11>,
-			       <&ipmmu_mp0 12>, <&ipmmu_mp0 13>,
-			       <&ipmmu_mp0 14>, <&ipmmu_mp0 15>;
-		};
-
-		audma1: dma-controller@ec720000 {
-			compatible = "renesas,dmac-r8a7795",
-				     "renesas,rcar-dmac";
-			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 501>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 501>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_mp0 16>, <&ipmmu_mp0 17>,
-			       <&ipmmu_mp0 18>, <&ipmmu_mp0 19>,
-			       <&ipmmu_mp0 20>, <&ipmmu_mp0 21>,
-			       <&ipmmu_mp0 22>, <&ipmmu_mp0 23>,
-			       <&ipmmu_mp0 24>, <&ipmmu_mp0 25>,
-			       <&ipmmu_mp0 26>, <&ipmmu_mp0 27>,
-			       <&ipmmu_mp0 28>, <&ipmmu_mp0 29>,
-			       <&ipmmu_mp0 30>, <&ipmmu_mp0 31>;
-		};
-
-		xhci0: usb@ee000000 {
-			compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
-			reg = <0 0xee000000 0 0xc00>;
-			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 328>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 328>;
-			status = "disabled";
-		};
-
-		usb3_peri0: usb@ee020000 {
-			compatible = "renesas,r8a7795-usb3-peri",
-				     "renesas,rcar-gen3-usb3-peri";
-			reg = <0 0xee020000 0 0x400>;
-			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 328>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 328>;
-			status = "disabled";
-		};
-
-		ohci0: usb@ee080000 {
-			compatible = "generic-ohci";
-			reg = <0 0xee080000 0 0x100>;
-			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
-			phys = <&usb2_phy0 1>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 703>, <&cpg 704>;
-			status = "disabled";
-		};
-
-		ohci1: usb@ee0a0000 {
-			compatible = "generic-ohci";
-			reg = <0 0xee0a0000 0 0x100>;
-			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 702>;
-			phys = <&usb2_phy1 1>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 702>;
-			status = "disabled";
-		};
-
-		ohci2: usb@ee0c0000 {
-			compatible = "generic-ohci";
-			reg = <0 0xee0c0000 0 0x100>;
-			interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 701>;
-			phys = <&usb2_phy2 1>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 701>;
-			status = "disabled";
-		};
-
-		ohci3: usb@ee0e0000 {
-			compatible = "generic-ohci";
-			reg = <0 0xee0e0000 0 0x100>;
-			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 700>, <&cpg CPG_MOD 705>;
-			phys = <&usb2_phy3 1>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 700>, <&cpg 705>;
-			status = "disabled";
-		};
-
-		ehci0: usb@ee080100 {
-			compatible = "generic-ehci";
-			reg = <0 0xee080100 0 0x100>;
-			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
-			phys = <&usb2_phy0 2>;
-			phy-names = "usb";
-			companion = <&ohci0>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 703>, <&cpg 704>;
-			status = "disabled";
-		};
-
-		ehci1: usb@ee0a0100 {
-			compatible = "generic-ehci";
-			reg = <0 0xee0a0100 0 0x100>;
-			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 702>;
-			phys = <&usb2_phy1 2>;
-			phy-names = "usb";
-			companion = <&ohci1>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 702>;
-			status = "disabled";
-		};
-
-		ehci2: usb@ee0c0100 {
-			compatible = "generic-ehci";
-			reg = <0 0xee0c0100 0 0x100>;
-			interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 701>;
-			phys = <&usb2_phy2 2>;
-			phy-names = "usb";
-			companion = <&ohci2>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 701>;
-			status = "disabled";
-		};
-
-		ehci3: usb@ee0e0100 {
-			compatible = "generic-ehci";
-			reg = <0 0xee0e0100 0 0x100>;
-			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 700>, <&cpg CPG_MOD 705>;
-			phys = <&usb2_phy3 2>;
-			phy-names = "usb";
-			companion = <&ohci3>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 700>, <&cpg 705>;
-			status = "disabled";
-		};
-
-		usb2_phy0: usb-phy@ee080200 {
-			compatible = "renesas,usb2-phy-r8a7795",
-				     "renesas,rcar-gen3-usb2-phy";
-			reg = <0 0xee080200 0 0x700>;
-			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 703>, <&cpg 704>;
-			#phy-cells = <1>;
-			status = "disabled";
-		};
-
-		usb2_phy1: usb-phy@ee0a0200 {
-			compatible = "renesas,usb2-phy-r8a7795",
-				     "renesas,rcar-gen3-usb2-phy";
-			reg = <0 0xee0a0200 0 0x700>;
-			clocks = <&cpg CPG_MOD 702>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 702>;
-			#phy-cells = <1>;
-			status = "disabled";
-		};
-
-		usb2_phy2: usb-phy@ee0c0200 {
-			compatible = "renesas,usb2-phy-r8a7795",
-				     "renesas,rcar-gen3-usb2-phy";
-			reg = <0 0xee0c0200 0 0x700>;
-			clocks = <&cpg CPG_MOD 701>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 701>;
-			#phy-cells = <1>;
-			status = "disabled";
-		};
-
-		usb2_phy3: usb-phy@ee0e0200 {
-			compatible = "renesas,usb2-phy-r8a7795",
-				     "renesas,rcar-gen3-usb2-phy";
-			reg = <0 0xee0e0200 0 0x700>;
-			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 700>, <&cpg CPG_MOD 705>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 700>, <&cpg 705>;
-			#phy-cells = <1>;
-			status = "disabled";
-		};
-
-		sdhi0: sd@ee100000 {
-			compatible = "renesas,sdhi-r8a7795",
-				     "renesas,rcar-gen3-sdhi";
-			reg = <0 0xee100000 0 0x2000>;
-			interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 314>;
-			max-frequency = <200000000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 314>;
-			iommus = <&ipmmu_ds1 32>;
-			status = "disabled";
-		};
-
-		sdhi1: sd@ee120000 {
-			compatible = "renesas,sdhi-r8a7795",
-				     "renesas,rcar-gen3-sdhi";
-			reg = <0 0xee120000 0 0x2000>;
-			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 313>;
-			max-frequency = <200000000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 313>;
-			iommus = <&ipmmu_ds1 33>;
-			status = "disabled";
-		};
-
-		sdhi2: sd@ee140000 {
-			compatible = "renesas,sdhi-r8a7795",
-				     "renesas,rcar-gen3-sdhi";
-			reg = <0 0xee140000 0 0x2000>;
-			interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 312>;
-			max-frequency = <200000000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 312>;
-			iommus = <&ipmmu_ds1 34>;
-			status = "disabled";
-		};
-
-		sdhi3: sd@ee160000 {
-			compatible = "renesas,sdhi-r8a7795",
-				     "renesas,rcar-gen3-sdhi";
-			reg = <0 0xee160000 0 0x2000>;
-			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 311>;
-			max-frequency = <200000000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 311>;
-			iommus = <&ipmmu_ds1 35>;
-			status = "disabled";
-		};
-
-		sata: sata@ee300000 {
-			compatible = "renesas,sata-r8a7795",
-				     "renesas,rcar-gen3-sata";
-			reg = <0 0xee300000 0 0x200000>;
-			interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 815>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 815>;
-			status = "disabled";
-			iommus = <&ipmmu_hc 2>;
-		};
-
-		gic: interrupt-controller@f1010000 {
-			compatible = "arm,gic-400";
-			#interrupt-cells = <3>;
-			#address-cells = <0>;
-			interrupt-controller;
-			reg = <0x0 0xf1010000 0 0x1000>,
-			      <0x0 0xf1020000 0 0x20000>,
-			      <0x0 0xf1040000 0 0x20000>,
-			      <0x0 0xf1060000 0 0x20000>;
-			interrupts = <GIC_PPI 9
-					(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
-			clocks = <&cpg CPG_MOD 408>;
-			clock-names = "clk";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 408>;
-		};
-
-		pciec0: pcie@fe000000 {
-			compatible = "renesas,pcie-r8a7795",
-				     "renesas,pcie-rcar-gen3";
-			reg = <0 0xfe000000 0 0x80000>;
-			#address-cells = <3>;
-			#size-cells = <2>;
-			bus-range = <0x00 0xff>;
-			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
-			/* Map all possible DDR as inbound ranges */
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
-			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
-			#interrupt-cells = <1>;
-			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
-			clock-names = "pcie", "pcie_bus";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 319>;
-			status = "disabled";
-		};
-
-		pciec1: pcie@ee800000 {
-			compatible = "renesas,pcie-r8a7795",
-				     "renesas,pcie-rcar-gen3";
-			reg = <0 0xee800000 0 0x80000>;
-			#address-cells = <3>;
-			#size-cells = <2>;
-			bus-range = <0x00 0xff>;
-			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000
-				0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000
-				0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000
-				0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
-			/* Map all possible DDR as inbound ranges */
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
-			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
-			#interrupt-cells = <1>;
-			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 318>, <&pcie_bus_clk>;
-			clock-names = "pcie", "pcie_bus";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 318>;
-			status = "disabled";
-		};
-
-		imr-lx4@fe860000 {
-			compatible = "renesas,r8a7795-imr-lx4",
-				     "renesas,imr-lx4";
-			reg = <0 0xfe860000 0 0x2000>;
-			interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 823>;
-			power-domains = <&sysc R8A7795_PD_A3VC>;
-			resets = <&cpg 823>;
-		};
-
-		imr-lx4@fe870000 {
-			compatible = "renesas,r8a7795-imr-lx4",
-				     "renesas,imr-lx4";
-			reg = <0 0xfe870000 0 0x2000>;
-			interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 822>;
-			power-domains = <&sysc R8A7795_PD_A3VC>;
-			resets = <&cpg 822>;
-		};
-
-		imr-lx4@fe880000 {
-			compatible = "renesas,r8a7795-imr-lx4",
-				     "renesas,imr-lx4";
-			reg = <0 0xfe880000 0 0x2000>;
-			interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 821>;
-			power-domains = <&sysc R8A7795_PD_A3VC>;
-			resets = <&cpg 821>;
-		};
-
-		imr-lx4@fe890000 {
-			compatible = "renesas,r8a7795-imr-lx4",
-				     "renesas,imr-lx4";
-			reg = <0 0xfe890000 0 0x2000>;
-			interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 820>;
-			power-domains = <&sysc R8A7795_PD_A3VC>;
-			resets = <&cpg 820>;
-		};
-
-		vspbc: vsp@fe920000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfe920000 0 0x8000>;
-			interrupts = <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 624>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 624>;
-
-			renesas,fcp = <&fcpvb1>;
-		};
-
-		vspbd: vsp@fe960000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfe960000 0 0x8000>;
-			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 626>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 626>;
-
-			renesas,fcp = <&fcpvb0>;
-		};
-
-		vspd0: vsp@fea20000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfea20000 0 0x5000>;
-			interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 623>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 623>;
-
-			renesas,fcp = <&fcpvd0>;
-		};
-
-		vspd1: vsp@fea28000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfea28000 0 0x5000>;
-			interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 622>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 622>;
-
-			renesas,fcp = <&fcpvd1>;
-		};
-
-		vspd2: vsp@fea30000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfea30000 0 0x5000>;
-			interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 621>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 621>;
-
-			renesas,fcp = <&fcpvd2>;
-		};
-
-		vspi0: vsp@fe9a0000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfe9a0000 0 0x8000>;
-			interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 631>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 631>;
-
-			renesas,fcp = <&fcpvi0>;
-		};
-
-		vspi1: vsp@fe9b0000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfe9b0000 0 0x8000>;
-			interrupts = <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 630>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 630>;
-
-			renesas,fcp = <&fcpvi1>;
-		};
-
-		fdp1@fe940000 {
-			compatible = "renesas,fdp1";
-			reg = <0 0xfe940000 0 0x2400>;
-			interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 119>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 119>;
-			renesas,fcp = <&fcpf0>;
-		};
-
-		fdp1@fe944000 {
-			compatible = "renesas,fdp1";
-			reg = <0 0xfe944000 0 0x2400>;
-			interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 118>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 118>;
-			renesas,fcp = <&fcpf1>;
-		};
-
-		fcpf0: fcp@fe950000 {
-			compatible = "renesas,fcpf";
-			reg = <0 0xfe950000 0 0x200>;
-			clocks = <&cpg CPG_MOD 615>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 615>;
-			iommus = <&ipmmu_vp0 0>;
-		};
-
-		fcpf1: fcp@fe951000 {
-			compatible = "renesas,fcpf";
-			reg = <0 0xfe951000 0 0x200>;
-			clocks = <&cpg CPG_MOD 614>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 614>;
-			iommus = <&ipmmu_vp1 1>;
-		};
-
-		fcpvb0: fcp@fe96f000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfe96f000 0 0x200>;
-			clocks = <&cpg CPG_MOD 607>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 607>;
-			iommus = <&ipmmu_vp0 5>;
-		};
-
-		fcpvb1: fcp@fe92f000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfe92f000 0 0x200>;
-			clocks = <&cpg CPG_MOD 606>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 606>;
-			iommus = <&ipmmu_vp1 7>;
-		};
-
-		fcpvi0: fcp@fe9af000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfe9af000 0 0x200>;
-			clocks = <&cpg CPG_MOD 611>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 611>;
-			iommus = <&ipmmu_vp0 8>;
-		};
-
-		fcpvi1: fcp@fe9bf000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfe9bf000 0 0x200>;
-			clocks = <&cpg CPG_MOD 610>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 610>;
-			iommus = <&ipmmu_vp1 9>;
-		};
-
-		fcpvd0: fcp@fea27000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfea27000 0 0x200>;
-			clocks = <&cpg CPG_MOD 603>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 603>;
-			iommus = <&ipmmu_vi0 8>;
-		};
-
-		fcpvd1: fcp@fea2f000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfea2f000 0 0x200>;
-			clocks = <&cpg CPG_MOD 602>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 602>;
-			iommus = <&ipmmu_vi0 9>;
-		};
-
-		fcpvd2: fcp@fea37000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfea37000 0 0x200>;
-			clocks = <&cpg CPG_MOD 601>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 601>;
-			iommus = <&ipmmu_vi1 10>;
-		};
-
-		cmm0: cmm@fea40000 {
-			compatible = "renesas,r8a7795-cmm",
-				     "renesas,rcar-gen3-cmm";
-			reg = <0 0xfea40000 0 0x1000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			clocks = <&cpg CPG_MOD 711>;
-			resets = <&cpg 711>;
-		};
-
-		cmm1: cmm@fea50000 {
-			compatible = "renesas,r8a7795-cmm",
-				     "renesas,rcar-gen3-cmm";
-			reg = <0 0xfea50000 0 0x1000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			clocks = <&cpg CPG_MOD 710>;
-			resets = <&cpg 710>;
-		};
-
-		cmm2: cmm@fea60000 {
-			compatible = "renesas,r8a7795-cmm",
-				     "renesas,rcar-gen3-cmm";
-			reg = <0 0xfea60000 0 0x1000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			clocks = <&cpg CPG_MOD 709>;
-			resets = <&cpg 709>;
-		};
-
-		cmm3: cmm@fea70000 {
-			compatible = "renesas,r8a7795-cmm",
-				     "renesas,rcar-gen3-cmm";
-			reg = <0 0xfea70000 0 0x1000>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			clocks = <&cpg CPG_MOD 708>;
-			resets = <&cpg 708>;
-		};
-
-		csi20: csi2@fea80000 {
-			compatible = "renesas,r8a7795-csi2";
-			reg = <0 0xfea80000 0 0x10000>;
-			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 714>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 714>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					csi20vin0: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&vin0csi20>;
-					};
-					csi20vin1: endpoint@1 {
-						reg = <1>;
-						remote-endpoint = <&vin1csi20>;
-					};
-					csi20vin2: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&vin2csi20>;
-					};
-					csi20vin3: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&vin3csi20>;
-					};
-					csi20vin4: endpoint@4 {
-						reg = <4>;
-						remote-endpoint = <&vin4csi20>;
-					};
-					csi20vin5: endpoint@5 {
-						reg = <5>;
-						remote-endpoint = <&vin5csi20>;
-					};
-					csi20vin6: endpoint@6 {
-						reg = <6>;
-						remote-endpoint = <&vin6csi20>;
-					};
-					csi20vin7: endpoint@7 {
-						reg = <7>;
-						remote-endpoint = <&vin7csi20>;
-					};
-				};
-			};
-		};
-
-		csi40: csi2@feaa0000 {
-			compatible = "renesas,r8a7795-csi2";
-			reg = <0 0xfeaa0000 0 0x10000>;
-			interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 716>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 716>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					csi40vin0: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&vin0csi40>;
-					};
-					csi40vin1: endpoint@1 {
-						reg = <1>;
-						remote-endpoint = <&vin1csi40>;
-					};
-					csi40vin2: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&vin2csi40>;
-					};
-					csi40vin3: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&vin3csi40>;
-					};
-				};
-			};
-		};
-
-		csi41: csi2@feab0000 {
-			compatible = "renesas,r8a7795-csi2";
-			reg = <0 0xfeab0000 0 0x10000>;
-			interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 715>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 715>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					csi41vin4: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&vin4csi41>;
-					};
-					csi41vin5: endpoint@1 {
-						reg = <1>;
-						remote-endpoint = <&vin5csi41>;
-					};
-					csi41vin6: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&vin6csi41>;
-					};
-					csi41vin7: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&vin7csi41>;
-					};
-				};
-			};
-		};
-
-		hdmi0: hdmi@fead0000 {
-			compatible = "renesas,r8a7795-hdmi", "renesas,rcar-gen3-hdmi";
-			reg = <0 0xfead0000 0 0x10000>;
-			interrupts = <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 729>, <&cpg CPG_CORE R8A7795_CLK_HDMI>;
-			clock-names = "iahb", "isfr";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 729>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				port@0 {
-					reg = <0>;
-					dw_hdmi0_in: endpoint {
-						remote-endpoint = <&du_out_hdmi0>;
-					};
-				};
-				port@1 {
-					reg = <1>;
-				};
-				port@2 {
-					/* HDMI sound */
-					reg = <2>;
-				};
-			};
-		};
-
-		hdmi1: hdmi@feae0000 {
-			compatible = "renesas,r8a7795-hdmi", "renesas,rcar-gen3-hdmi";
-			reg = <0 0xfeae0000 0 0x10000>;
-			interrupts = <GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 728>, <&cpg CPG_CORE R8A7795_CLK_HDMI>;
-			clock-names = "iahb", "isfr";
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 728>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				port@0 {
-					reg = <0>;
-					dw_hdmi1_in: endpoint {
-						remote-endpoint = <&du_out_hdmi1>;
-					};
-				};
-				port@1 {
-					reg = <1>;
-				};
-				port@2 {
-					/* HDMI sound */
-					reg = <2>;
-				};
-			};
-		};
-
-		du: display@feb00000 {
-			compatible = "renesas,du-r8a7795";
-			reg = <0 0xfeb00000 0 0x80000>;
-			interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 724>,
-				 <&cpg CPG_MOD 723>,
-				 <&cpg CPG_MOD 722>,
-				 <&cpg CPG_MOD 721>;
-			clock-names = "du.0", "du.1", "du.2", "du.3";
-
-			renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>, <&cmm3>;
-			vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd0 1>;
-
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@0 {
-					reg = <0>;
-					du_out_rgb: endpoint {
-					};
-				};
-				port@1 {
-					reg = <1>;
-					du_out_hdmi0: endpoint {
-						remote-endpoint = <&dw_hdmi0_in>;
-					};
-				};
-				port@2 {
-					reg = <2>;
-					du_out_hdmi1: endpoint {
-						remote-endpoint = <&dw_hdmi1_in>;
-					};
-				};
-				port@3 {
-					reg = <3>;
-					du_out_lvds0: endpoint {
-						remote-endpoint = <&lvds0_in>;
-					};
-				};
-			};
-		};
-
-		lvds0: lvds@feb90000 {
-			compatible = "renesas,r8a7795-lvds";
-			reg = <0 0xfeb90000 0 0x14>;
-			clocks = <&cpg CPG_MOD 727>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 727>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@0 {
-					reg = <0>;
-					lvds0_in: endpoint {
-						remote-endpoint = <&du_out_lvds0>;
-					};
-				};
-				port@1 {
-					reg = <1>;
-					lvds0_out: endpoint {
-					};
-				};
-			};
-		};
-
-		prr: chipid@fff00044 {
-			compatible = "renesas,prr";
-			reg = <0 0xfff00044 0 4>;
-		};
-	};
-
-	thermal-zones {
-		sensor_thermal1: sensor-thermal1 {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-			thermal-sensors = <&tsc 0>;
-			sustainable-power = <6313>;
-
-			trips {
-				sensor1_crit: sensor1-crit {
-					temperature = <120000>;
-					hysteresis = <1000>;
-					type = "critical";
-				};
-			};
-		};
-
-		sensor_thermal2: sensor-thermal2 {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-			thermal-sensors = <&tsc 1>;
-			sustainable-power = <6313>;
-
-			trips {
-				sensor2_crit: sensor2-crit {
-					temperature = <120000>;
-					hysteresis = <1000>;
-					type = "critical";
-				};
-			};
-		};
-
-		sensor_thermal3: sensor-thermal3 {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-			thermal-sensors = <&tsc 2>;
-
-			trips {
-				target: trip-point1 {
-					temperature = <100000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-
-				sensor3_crit: sensor3-crit {
-					temperature = <120000>;
-					hysteresis = <1000>;
-					type = "critical";
-				};
-			};
-
-			cooling-maps {
-				map0 {
-					trip = <&target>;
-					cooling-device = <&a57_0 2 4>;
-					contribution = <1024>;
-				};
-
-				map1 {
-					trip = <&target>;
-					cooling-device = <&a53_0 0 2>;
-					contribution = <1024>;
-				};
-			};
-		};
-	};
-
-	timer {
-		compatible = "arm,armv8-timer";
-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
-	};
-
-	/* External USB clocks - can be overridden by the board */
-	usb3s0_clk: usb3s0 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	usb_extal_clk: usb_extal {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts
new file mode 100644
index 0000000..2438825
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Salvator-X board with R-Car H3 ES1.x
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a77950.dtsi"
+#include "salvator-x.dtsi"
+
+/ {
+	model = "Renesas Salvator-X board based on r8a77950";
+	compatible = "renesas,salvator-x", "renesas,r8a7795";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&cpg CPG_MOD 721>,
+		 <&versaclock5 1>,
+		 <&x21_clk>,
+		 <&x22_clk>,
+		 <&versaclock5 2>;
+	clock-names = "du.0", "du.1", "du.2", "du.3",
+		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&hdmi0 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi0_out: endpoint {
+				remote-endpoint = <&hdmi0_con>;
+			};
+		};
+		port@2 {
+			reg = <2>;
+			dw_hdmi0_snd_in: endpoint {
+				remote-endpoint = <&rsnd_endpoint1>;
+			};
+		};
+	};
+};
+
+&hdmi0_con {
+	remote-endpoint = <&rcar_dw_hdmi0_out>;
+};
+
+&hdmi1 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi1_out: endpoint {
+				remote-endpoint = <&hdmi1_con>;
+			};
+		};
+		port@2 {
+			reg = <2>;
+			dw_hdmi1_snd_in: endpoint {
+				remote-endpoint = <&rsnd_endpoint2>;
+			};
+		};
+	};
+};
+
+&hdmi1_con {
+	remote-endpoint = <&rcar_dw_hdmi1_out>;
+};
+
+&ohci2 {
+	status = "okay";
+};
+
+&pfc {
+	usb2_pins: usb2 {
+		groups = "usb2";
+		function = "usb2";
+	};
+};
+
+&rcar_sound {
+	ports {
+		/* rsnd_port0 is on salvator-common */
+		rsnd_port1: port@1 {
+			reg = <1>;
+			rsnd_endpoint1: endpoint {
+				remote-endpoint = <&dw_hdmi0_snd_in>;
+
+				dai-format = "i2s";
+				bitclock-master = <&rsnd_endpoint1>;
+				frame-master = <&rsnd_endpoint1>;
+
+				playback = <&ssi2>;
+			};
+		};
+		rsnd_port2: port@2 {
+			reg = <2>;
+			rsnd_endpoint2: endpoint {
+				remote-endpoint = <&dw_hdmi1_snd_in>;
+
+				dai-format = "i2s";
+				bitclock-master = <&rsnd_endpoint2>;
+				frame-master = <&rsnd_endpoint2>;
+
+				playback = <&ssi3>;
+			};
+		};
+	};
+};
+
+&sata {
+	status = "okay";
+};
+
+&sound_card {
+	dais = <&rsnd_port0	/* ak4613 */
+		&rsnd_port1	/* HDMI0  */
+		&rsnd_port2>;	/* HDMI1  */
+};
+
+&usb2_phy2 {
+	pinctrl-0 = <&usb2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77950-ulcb-kf.dts b/arch/arm64/boot/dts/renesas/r8a77950-ulcb-kf.dts
new file mode 100644
index 0000000..dcaaf12
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77950-ulcb-kf.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the H3ULCB Kingfisher board
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ * Copyright (C) 2017 Cogent Embedded, Inc.
+ */
+
+#include "r8a77950-ulcb.dts"
+#include "ulcb-kf.dtsi"
+
+/ {
+	model = "Renesas H3ULCB Kingfisher board based on r8a77950";
+	compatible = "shimafuji,kingfisher", "renesas,h3ulcb",
+		     "renesas,r8a7795";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77950-ulcb.dts b/arch/arm64/boot/dts/renesas/r8a77950-ulcb.dts
new file mode 100644
index 0000000..38a6d6a
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77950-ulcb.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the H3ULCB (R-Car Starter Kit Premier) board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ */
+
+/dts-v1/;
+#include "r8a77950.dtsi"
+#include "ulcb.dtsi"
+
+/ {
+	model = "Renesas H3ULCB board based on r8a77950";
+	compatible = "renesas,h3ulcb", "renesas,r8a7795";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77950.dtsi b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
new file mode 100644
index 0000000..1521649
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the R-Car H3 (R8A77950) SoC
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include "r8a77951.dtsi"
+
+&audma0 {
+	iommus = <&ipmmu_mp1 0>, <&ipmmu_mp1 1>,
+	       <&ipmmu_mp1 2>, <&ipmmu_mp1 3>,
+	       <&ipmmu_mp1 4>, <&ipmmu_mp1 5>,
+	       <&ipmmu_mp1 6>, <&ipmmu_mp1 7>,
+	       <&ipmmu_mp1 8>, <&ipmmu_mp1 9>,
+	       <&ipmmu_mp1 10>, <&ipmmu_mp1 11>,
+	       <&ipmmu_mp1 12>, <&ipmmu_mp1 13>,
+	       <&ipmmu_mp1 14>, <&ipmmu_mp1 15>;
+};
+
+&audma1 {
+	iommus = <&ipmmu_mp1 16>, <&ipmmu_mp1 17>,
+	       <&ipmmu_mp1 18>, <&ipmmu_mp1 19>,
+	       <&ipmmu_mp1 20>, <&ipmmu_mp1 21>,
+	       <&ipmmu_mp1 22>, <&ipmmu_mp1 23>,
+	       <&ipmmu_mp1 24>, <&ipmmu_mp1 25>,
+	       <&ipmmu_mp1 26>, <&ipmmu_mp1 27>,
+	       <&ipmmu_mp1 28>, <&ipmmu_mp1 29>,
+	       <&ipmmu_mp1 30>, <&ipmmu_mp1 31>;
+};
+
+&du {
+	vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd3 0>;
+};
+
+&fcpvb1 {
+	iommus = <&ipmmu_vp0 7>;
+};
+
+&fcpf1 {
+	iommus = <&ipmmu_vp0 1>;
+};
+
+&fcpvi1 {
+	iommus = <&ipmmu_vp0 9>;
+};
+
+&fcpvd2 {
+	iommus = <&ipmmu_vi0 10>;
+};
+
+&gpio1 {
+	gpio-ranges = <&pfc 0 32 28>;
+};
+
+&ipmmu_vi0 {
+	renesas,ipmmu-main = <&ipmmu_mm 11>;
+};
+
+&ipmmu_vp0 {
+	renesas,ipmmu-main = <&ipmmu_mm 12>;
+};
+
+&ipmmu_vc0 {
+	renesas,ipmmu-main = <&ipmmu_mm 9>;
+};
+
+&ipmmu_vc1 {
+	renesas,ipmmu-main = <&ipmmu_mm 10>;
+};
+
+&ipmmu_rt {
+	renesas,ipmmu-main = <&ipmmu_mm 7>;
+};
+
+&soc {
+	/delete-node/ dma-controller@e6460000;
+	/delete-node/ dma-controller@e6470000;
+
+	ipmmu_mp1: mmu@ec680000 {
+		compatible = "renesas,ipmmu-r8a7795";
+		reg = <0 0xec680000 0 0x1000>;
+		renesas,ipmmu-main = <&ipmmu_mm 5>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		#iommu-cells = <1>;
+	};
+
+	ipmmu_sy: mmu@e7730000 {
+		compatible = "renesas,ipmmu-r8a7795";
+		reg = <0 0xe7730000 0 0x1000>;
+		renesas,ipmmu-main = <&ipmmu_mm 8>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		#iommu-cells = <1>;
+	};
+
+	/delete-node/ mmu@fd950000;
+	/delete-node/ mmu@fd960000;
+	/delete-node/ mmu@fd970000;
+	/delete-node/ mmu@febe0000;
+	/delete-node/ mmu@fe980000;
+
+	xhci1: usb@ee040000 {
+		compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
+		reg = <0 0xee040000 0 0xc00>;
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 327>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		resets = <&cpg 327>;
+		status = "disabled";
+	};
+
+	/delete-node/ usb@e659c000;
+	/delete-node/ usb@ee0e0000;
+	/delete-node/ usb@ee0e0100;
+
+	/delete-node/ usb-phy@ee0e0200;
+
+	fdp1@fe948000 {
+		compatible = "renesas,fdp1";
+		reg = <0 0xfe948000 0 0x2400>;
+		interrupts = <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 117>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		resets = <&cpg 117>;
+		renesas,fcp = <&fcpf2>;
+	};
+
+	fcpf2: fcp@fe952000 {
+		compatible = "renesas,fcpf";
+		reg = <0 0xfe952000 0 0x200>;
+		clocks = <&cpg CPG_MOD 613>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		resets = <&cpg 613>;
+		iommus = <&ipmmu_vp0 2>;
+	};
+
+	fcpvd3: fcp@fea3f000 {
+		compatible = "renesas,fcpv";
+		reg = <0 0xfea3f000 0 0x200>;
+		clocks = <&cpg CPG_MOD 600>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		resets = <&cpg 600>;
+		iommus = <&ipmmu_vi0 11>;
+	};
+
+	fcpvi2: fcp@fe9cf000 {
+		compatible = "renesas,fcpv";
+		reg = <0 0xfe9cf000 0 0x200>;
+		clocks = <&cpg CPG_MOD 609>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		resets = <&cpg 609>;
+		iommus = <&ipmmu_vp0 10>;
+	};
+
+	vspd3: vsp@fea38000 {
+		compatible = "renesas,vsp2";
+		reg = <0 0xfea38000 0 0x5000>;
+		interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 620>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		resets = <&cpg 620>;
+
+		renesas,fcp = <&fcpvd3>;
+	};
+
+	vspi2: vsp@fe9c0000 {
+		compatible = "renesas,vsp2";
+		reg = <0 0xfe9c0000 0 0x8000>;
+		interrupts = <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 629>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		resets = <&cpg 629>;
+
+		renesas,fcp = <&fcpvi2>;
+	};
+
+	csi21: csi2@fea90000 {
+		compatible = "renesas,r8a7795-csi2";
+		reg = <0 0xfea90000 0 0x10000>;
+		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 713>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		resets = <&cpg 713>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@1 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				reg = <1>;
+
+				csi21vin0: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&vin0csi21>;
+				};
+				csi21vin1: endpoint@1 {
+					reg = <1>;
+					remote-endpoint = <&vin1csi21>;
+				};
+				csi21vin2: endpoint@2 {
+					reg = <2>;
+					remote-endpoint = <&vin2csi21>;
+				};
+				csi21vin3: endpoint@3 {
+					reg = <3>;
+					remote-endpoint = <&vin3csi21>;
+				};
+				csi21vin4: endpoint@4 {
+					reg = <4>;
+					remote-endpoint = <&vin4csi21>;
+				};
+				csi21vin5: endpoint@5 {
+					reg = <5>;
+					remote-endpoint = <&vin5csi21>;
+				};
+				csi21vin6: endpoint@6 {
+					reg = <6>;
+					remote-endpoint = <&vin6csi21>;
+				};
+				csi21vin7: endpoint@7 {
+					reg = <7>;
+					remote-endpoint = <&vin7csi21>;
+				};
+			};
+		};
+	};
+};
+
+&vin0 {
+	ports {
+		port@1 {
+			vin0csi21: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&csi21vin0>;
+			};
+		};
+	};
+};
+
+&vin1 {
+	ports {
+		port@1 {
+			vin1csi21: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&csi21vin1>;
+			};
+		};
+	};
+};
+
+&vin2 {
+	ports {
+		port@1 {
+			vin2csi21: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&csi21vin2>;
+			};
+		};
+	};
+};
+
+&vin3 {
+	ports {
+		port@1 {
+			vin3csi21: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&csi21vin3>;
+			};
+		};
+	};
+};
+
+&vin4 {
+	ports {
+		port@1 {
+			vin4csi21: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&csi21vin4>;
+			};
+		};
+	};
+};
+
+&vin5 {
+	ports {
+		port@1 {
+			vin5csi21: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&csi21vin5>;
+			};
+		};
+	};
+};
+
+&vin6 {
+	ports {
+		port@1 {
+			vin6csi21: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&csi21vin6>;
+			};
+		};
+	};
+};
+
+&vin7 {
+	ports {
+		port@1 {
+			vin7csi21: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&csi21vin7>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts
new file mode 100644
index 0000000..a402a2f
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Salvator-X board with R-Car H3 ES2.0
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a77951.dtsi"
+#include "salvator-x.dtsi"
+
+/ {
+	model = "Renesas Salvator-X board based on r8a77951";
+	compatible = "renesas,salvator-x", "renesas,r8a7795";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&cpg CPG_MOD 721>,
+		 <&versaclock5 1>,
+		 <&x21_clk>,
+		 <&x22_clk>,
+		 <&versaclock5 2>;
+	clock-names = "du.0", "du.1", "du.2", "du.3",
+		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&hdmi0 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi0_out: endpoint {
+				remote-endpoint = <&hdmi0_con>;
+			};
+		};
+		port@2 {
+			reg = <2>;
+			dw_hdmi0_snd_in: endpoint {
+				remote-endpoint = <&rsnd_endpoint1>;
+			};
+		};
+	};
+};
+
+&hdmi0_con {
+	remote-endpoint = <&rcar_dw_hdmi0_out>;
+};
+
+&hdmi1 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi1_out: endpoint {
+				remote-endpoint = <&hdmi1_con>;
+			};
+		};
+		port@2 {
+			reg = <2>;
+			dw_hdmi1_snd_in: endpoint {
+				remote-endpoint = <&rsnd_endpoint2>;
+			};
+		};
+	};
+};
+
+&hdmi1_con {
+	remote-endpoint = <&rcar_dw_hdmi1_out>;
+};
+
+&ohci2 {
+	status = "okay";
+};
+
+&pfc {
+	usb2_pins: usb2 {
+		groups = "usb2";
+		function = "usb2";
+	};
+};
+
+&rcar_sound {
+	ports {
+		/* rsnd_port0 is on salvator-common */
+		rsnd_port1: port@1 {
+			reg = <1>;
+			rsnd_endpoint1: endpoint {
+				remote-endpoint = <&dw_hdmi0_snd_in>;
+
+				dai-format = "i2s";
+				bitclock-master = <&rsnd_endpoint1>;
+				frame-master = <&rsnd_endpoint1>;
+
+				playback = <&ssi2>;
+			};
+		};
+		rsnd_port2: port@2 {
+			reg = <2>;
+			rsnd_endpoint2: endpoint {
+				remote-endpoint = <&dw_hdmi1_snd_in>;
+
+				dai-format = "i2s";
+				bitclock-master = <&rsnd_endpoint2>;
+				frame-master = <&rsnd_endpoint2>;
+
+				playback = <&ssi3>;
+			};
+		};
+	};
+};
+
+&sata {
+	status = "okay";
+};
+
+&sound_card {
+	dais = <&rsnd_port0	/* ak4613 */
+		&rsnd_port1	/* HDMI0  */
+		&rsnd_port2>;	/* HDMI1  */
+};
+
+&usb2_phy2 {
+	pinctrl-0 = <&usb2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts
new file mode 100644
index 0000000..cef9da4
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Salvator-X 2nd version board with R-Car H3 ES2.0+
+ *
+ * Copyright (C) 2015-2017 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a77951.dtsi"
+#include "salvator-xs.dtsi"
+
+/ {
+	model = "Renesas Salvator-X 2nd version board based on r8a77951";
+	compatible = "renesas,salvator-xs", "renesas,r8a7795";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&cpg CPG_MOD 721>,
+		 <&versaclock6 1>,
+		 <&x21_clk>,
+		 <&x22_clk>,
+		 <&versaclock6 2>;
+	clock-names = "du.0", "du.1", "du.2", "du.3",
+		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&ehci3 {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&hdmi0 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi0_out: endpoint {
+				remote-endpoint = <&hdmi0_con>;
+			};
+		};
+		port@2 {
+			reg = <2>;
+			dw_hdmi0_snd_in: endpoint {
+				remote-endpoint = <&rsnd_endpoint1>;
+			};
+		};
+	};
+};
+
+&hdmi0_con {
+	remote-endpoint = <&rcar_dw_hdmi0_out>;
+};
+
+&hdmi1 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi1_out: endpoint {
+				remote-endpoint = <&hdmi1_con>;
+			};
+		};
+		port@2 {
+			reg = <2>;
+			dw_hdmi1_snd_in: endpoint {
+				remote-endpoint = <&rsnd_endpoint2>;
+			};
+		};
+	};
+};
+
+&hdmi1_con {
+	remote-endpoint = <&rcar_dw_hdmi1_out>;
+};
+
+&hsusb3 {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&ohci2 {
+	status = "okay";
+};
+
+&ohci3 {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&pca9654 {
+	pcie_sata_switch {
+		gpio-hog;
+		gpios = <7 GPIO_ACTIVE_HIGH>;
+		output-low; /* enable SATA by default */
+		line-name = "PCIE/SATA switch";
+	};
+};
+
+&pfc {
+	usb2_pins: usb2 {
+		groups = "usb2";
+		function = "usb2";
+	};
+
+	/*
+	 * - On Salvator-X[S], GP6_3[01] are connected to ADV7482 as irq pins
+	 *   (when SW31 is the default setting on Salvator-XS).
+	 * - If SW31 is the default setting, you cannot use USB2.0 ch3 on
+	 *   r8a77951 with Salvator-XS.
+	 *   Hence the SW31 setting must be changed like 2) below.
+	 *   1) Default setting of SW31: ON-ON-OFF-OFF-OFF-OFF:
+	 *	- Connect GP6_3[01] to ADV7842.
+	 *   2) Changed setting of SW31: OFF-OFF-ON-ON-ON-ON:
+	 *	- Connect GP6_3[01] to BD082065 (USB2.0 ch3's host power).
+	 *	- Connect GP6_{04,21} to ADV7842.
+	 */
+	usb2_ch3_pins: usb2_ch3 {
+		groups = "usb2_ch3";
+		function = "usb2_ch3";
+	};
+};
+
+&rcar_sound {
+	ports {
+		/* rsnd_port0 is on salvator-common */
+		rsnd_port1: port@1 {
+			reg = <1>;
+			rsnd_endpoint1: endpoint {
+				remote-endpoint = <&dw_hdmi0_snd_in>;
+
+				dai-format = "i2s";
+				bitclock-master = <&rsnd_endpoint1>;
+				frame-master = <&rsnd_endpoint1>;
+
+				playback = <&ssi2>;
+			};
+		};
+		rsnd_port2: port@2 {
+			reg = <2>;
+			rsnd_endpoint2: endpoint {
+				remote-endpoint = <&dw_hdmi1_snd_in>;
+
+				dai-format = "i2s";
+				bitclock-master = <&rsnd_endpoint2>;
+				frame-master = <&rsnd_endpoint2>;
+
+				playback = <&ssi3>;
+			};
+		};
+	};
+};
+
+/* SW12-7 must be set 'Off' (MD12 set to 1) which is not the default! */
+&sata {
+	status = "okay";
+};
+
+&sound_card {
+	dais = <&rsnd_port0	/* ak4613 */
+		&rsnd_port1	/* HDMI0  */
+		&rsnd_port2>;	/* HDMI1  */
+};
+
+&usb2_phy2 {
+	pinctrl-0 = <&usb2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
+&usb2_phy3 {
+	pinctrl-0 = <&usb2_ch3_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77951-ulcb-kf.dts b/arch/arm64/boot/dts/renesas/r8a77951-ulcb-kf.dts
new file mode 100644
index 0000000..11f943a
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77951-ulcb-kf.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the H3ULCB Kingfisher board
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ * Copyright (C) 2017 Cogent Embedded, Inc.
+ */
+
+#include "r8a77951-ulcb.dts"
+#include "ulcb-kf.dtsi"
+
+/ {
+	model = "Renesas H3ULCB Kingfisher board based on r8a77951";
+	compatible = "shimafuji,kingfisher", "renesas,h3ulcb",
+		     "renesas,r8a7795";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77951-ulcb.dts b/arch/arm64/boot/dts/renesas/r8a77951-ulcb.dts
new file mode 100644
index 0000000..8ad8f2a
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77951-ulcb.dts
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the H3ULCB (R-Car Starter Kit Premier) board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ */
+
+/dts-v1/;
+#include "r8a77951.dtsi"
+#include "ulcb.dtsi"
+
+/ {
+	model = "Renesas H3ULCB board based on r8a77951";
+	compatible = "renesas,h3ulcb", "renesas,r8a7795";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&cpg CPG_MOD 721>,
+		 <&versaclock5 1>,
+		 <&versaclock5 3>,
+		 <&versaclock5 4>,
+		 <&versaclock5 2>;
+	clock-names = "du.0", "du.1", "du.2", "du.3",
+		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
new file mode 100644
index 0000000..a8729eb7
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -0,0 +1,3339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the R-Car H3 (R8A77951) SoC
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/r8a7795-sysc.h>
+
+#define CPG_AUDIO_CLK_I		R8A7795_CLK_S0D4
+
+/ {
+	compatible = "renesas,r8a7795";
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		i2c5 = &i2c5;
+		i2c6 = &i2c6;
+		i2c7 = &i2c_dvfs;
+	};
+
+	/*
+	 * The external audio clocks are configured as 0 Hz fixed frequency
+	 * clocks by default.
+	 * Boards that provide audio clocks should override them.
+	 */
+	audio_clk_a: audio_clk_a {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	audio_clk_b: audio_clk_b {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	audio_clk_c: audio_clk_c {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	/* External CAN clock - to be overridden by boards that provide it */
+	can_clk: can {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	cluster0_opp: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp-500000000 {
+			opp-hz = /bits/ 64 <500000000>;
+			opp-microvolt = <830000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1000000000 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <830000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1500000000 {
+			opp-hz = /bits/ 64 <1500000000>;
+			opp-microvolt = <830000>;
+			clock-latency-ns = <300000>;
+			opp-suspend;
+		};
+		opp-1600000000 {
+			opp-hz = /bits/ 64 <1600000000>;
+			opp-microvolt = <900000>;
+			clock-latency-ns = <300000>;
+			turbo-mode;
+		};
+		opp-1700000000 {
+			opp-hz = /bits/ 64 <1700000000>;
+			opp-microvolt = <960000>;
+			clock-latency-ns = <300000>;
+			turbo-mode;
+		};
+	};
+
+	cluster1_opp: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp-800000000 {
+			opp-hz = /bits/ 64 <800000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1000000000 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1200000000 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&a57_0>;
+				};
+				core1 {
+					cpu = <&a57_1>;
+				};
+				core2 {
+					cpu = <&a57_2>;
+				};
+				core3 {
+					cpu = <&a57_3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&a53_0>;
+				};
+				core1 {
+					cpu = <&a53_1>;
+				};
+				core2 {
+					cpu = <&a53_2>;
+				};
+				core3 {
+					cpu = <&a53_3>;
+				};
+			};
+		};
+
+		a57_0: cpu@0 {
+			compatible = "arm,cortex-a57";
+			reg = <0x0>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA57_CPU0>;
+			next-level-cache = <&L2_CA57>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
+			dynamic-power-coefficient = <854>;
+			clocks = <&cpg CPG_CORE R8A7795_CLK_Z>;
+			operating-points-v2 = <&cluster0_opp>;
+			capacity-dmips-mhz = <1024>;
+			#cooling-cells = <2>;
+		};
+
+		a57_1: cpu@1 {
+			compatible = "arm,cortex-a57";
+			reg = <0x1>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA57_CPU1>;
+			next-level-cache = <&L2_CA57>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
+			clocks = <&cpg CPG_CORE R8A7795_CLK_Z>;
+			operating-points-v2 = <&cluster0_opp>;
+			capacity-dmips-mhz = <1024>;
+			#cooling-cells = <2>;
+		};
+
+		a57_2: cpu@2 {
+			compatible = "arm,cortex-a57";
+			reg = <0x2>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA57_CPU2>;
+			next-level-cache = <&L2_CA57>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
+			clocks = <&cpg CPG_CORE R8A7795_CLK_Z>;
+			operating-points-v2 = <&cluster0_opp>;
+			capacity-dmips-mhz = <1024>;
+			#cooling-cells = <2>;
+		};
+
+		a57_3: cpu@3 {
+			compatible = "arm,cortex-a57";
+			reg = <0x3>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA57_CPU3>;
+			next-level-cache = <&L2_CA57>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
+			clocks = <&cpg CPG_CORE R8A7795_CLK_Z>;
+			operating-points-v2 = <&cluster0_opp>;
+			capacity-dmips-mhz = <1024>;
+			#cooling-cells = <2>;
+		};
+
+		a53_0: cpu@100 {
+			compatible = "arm,cortex-a53";
+			reg = <0x100>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA53_CPU0>;
+			next-level-cache = <&L2_CA53>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_1>;
+			#cooling-cells = <2>;
+			dynamic-power-coefficient = <277>;
+			clocks = <&cpg CPG_CORE R8A7795_CLK_Z2>;
+			operating-points-v2 = <&cluster1_opp>;
+			capacity-dmips-mhz = <535>;
+		};
+
+		a53_1: cpu@101 {
+			compatible = "arm,cortex-a53";
+			reg = <0x101>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA53_CPU1>;
+			next-level-cache = <&L2_CA53>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_1>;
+			clocks = <&cpg CPG_CORE R8A7795_CLK_Z2>;
+			operating-points-v2 = <&cluster1_opp>;
+			capacity-dmips-mhz = <535>;
+		};
+
+		a53_2: cpu@102 {
+			compatible = "arm,cortex-a53";
+			reg = <0x102>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA53_CPU2>;
+			next-level-cache = <&L2_CA53>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_1>;
+			clocks = <&cpg CPG_CORE R8A7795_CLK_Z2>;
+			operating-points-v2 = <&cluster1_opp>;
+			capacity-dmips-mhz = <535>;
+		};
+
+		a53_3: cpu@103 {
+			compatible = "arm,cortex-a53";
+			reg = <0x103>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA53_CPU3>;
+			next-level-cache = <&L2_CA53>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_1>;
+			clocks = <&cpg CPG_CORE R8A7795_CLK_Z2>;
+			operating-points-v2 = <&cluster1_opp>;
+			capacity-dmips-mhz = <535>;
+		};
+
+		L2_CA57: cache-controller-0 {
+			compatible = "cache";
+			power-domains = <&sysc R8A7795_PD_CA57_SCU>;
+			cache-unified;
+			cache-level = <2>;
+		};
+
+		L2_CA53: cache-controller-1 {
+			compatible = "cache";
+			power-domains = <&sysc R8A7795_PD_CA53_SCU>;
+			cache-unified;
+			cache-level = <2>;
+		};
+
+		idle-states {
+			entry-method = "psci";
+
+			CPU_SLEEP_0: cpu-sleep-0 {
+				compatible = "arm,idle-state";
+				arm,psci-suspend-param = <0x0010000>;
+				local-timer-stop;
+				entry-latency-us = <400>;
+				exit-latency-us = <500>;
+				min-residency-us = <4000>;
+			};
+
+			CPU_SLEEP_1: cpu-sleep-1 {
+				compatible = "arm,idle-state";
+				arm,psci-suspend-param = <0x0010000>;
+				local-timer-stop;
+				entry-latency-us = <700>;
+				exit-latency-us = <700>;
+				min-residency-us = <5000>;
+			};
+		};
+	};
+
+	extal_clk: extal {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		/* This value must be overridden by the board */
+		clock-frequency = <0>;
+	};
+
+	extalr_clk: extalr {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		/* This value must be overridden by the board */
+		clock-frequency = <0>;
+	};
+
+	/* External PCIe clock - can be overridden by the board */
+	pcie_bus_clk: pcie_bus {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	pmu_a53 {
+		compatible = "arm,cortex-a53-pmu";
+		interrupts-extended = <&gic GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&a53_0>,
+				     <&a53_1>,
+				     <&a53_2>,
+				     <&a53_3>;
+	};
+
+	pmu_a57 {
+		compatible = "arm,cortex-a57-pmu";
+		interrupts-extended = <&gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&a57_0>,
+				     <&a57_1>,
+				     <&a57_2>,
+				     <&a57_3>;
+	};
+
+	psci {
+		compatible = "arm,psci-1.0", "arm,psci-0.2";
+		method = "smc";
+	};
+
+	/* External SCIF clock - to be overridden by boards that provide it */
+	scif_clk: scif {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	soc: soc {
+		compatible = "simple-bus";
+		interrupt-parent = <&gic>;
+
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		rwdt: watchdog@e6020000 {
+			compatible = "renesas,r8a7795-wdt", "renesas,rcar-gen3-wdt";
+			reg = <0 0xe6020000 0 0x0c>;
+			clocks = <&cpg CPG_MOD 402>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 402>;
+			status = "disabled";
+		};
+
+		gpio0: gpio@e6050000 {
+			compatible = "renesas,gpio-r8a7795",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6050000 0 0x50>;
+			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 0 16>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 912>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 912>;
+		};
+
+		gpio1: gpio@e6051000 {
+			compatible = "renesas,gpio-r8a7795",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6051000 0 0x50>;
+			interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 32 29>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 911>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 911>;
+		};
+
+		gpio2: gpio@e6052000 {
+			compatible = "renesas,gpio-r8a7795",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6052000 0 0x50>;
+			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 64 15>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 910>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 910>;
+		};
+
+		gpio3: gpio@e6053000 {
+			compatible = "renesas,gpio-r8a7795",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6053000 0 0x50>;
+			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 96 16>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 909>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 909>;
+		};
+
+		gpio4: gpio@e6054000 {
+			compatible = "renesas,gpio-r8a7795",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6054000 0 0x50>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 128 18>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 908>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 908>;
+		};
+
+		gpio5: gpio@e6055000 {
+			compatible = "renesas,gpio-r8a7795",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6055000 0 0x50>;
+			interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 160 26>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 907>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 907>;
+		};
+
+		gpio6: gpio@e6055400 {
+			compatible = "renesas,gpio-r8a7795",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6055400 0 0x50>;
+			interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 192 32>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 906>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 906>;
+		};
+
+		gpio7: gpio@e6055800 {
+			compatible = "renesas,gpio-r8a7795",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6055800 0 0x50>;
+			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 224 4>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 905>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 905>;
+		};
+
+		pfc: pin-controller@e6060000 {
+			compatible = "renesas,pfc-r8a7795";
+			reg = <0 0xe6060000 0 0x50c>;
+		};
+
+		cmt0: timer@e60f0000 {
+			compatible = "renesas,r8a7795-cmt0",
+				     "renesas,rcar-gen3-cmt0";
+			reg = <0 0xe60f0000 0 0x1004>;
+			interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 303>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 303>;
+			status = "disabled";
+		};
+
+		cmt1: timer@e6130000 {
+			compatible = "renesas,r8a7795-cmt1",
+				     "renesas,rcar-gen3-cmt1";
+			reg = <0 0xe6130000 0 0x1004>;
+			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 302>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 302>;
+			status = "disabled";
+		};
+
+		cmt2: timer@e6140000 {
+			compatible = "renesas,r8a7795-cmt1",
+				     "renesas,rcar-gen3-cmt1";
+			reg = <0 0xe6140000 0 0x1004>;
+			interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 301>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 301>;
+			status = "disabled";
+		};
+
+		cmt3: timer@e6148000 {
+			compatible = "renesas,r8a7795-cmt1",
+				     "renesas,rcar-gen3-cmt1";
+			reg = <0 0xe6148000 0 0x1004>;
+			interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 300>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 300>;
+			status = "disabled";
+		};
+
+		cpg: clock-controller@e6150000 {
+			compatible = "renesas,r8a7795-cpg-mssr";
+			reg = <0 0xe6150000 0 0x1000>;
+			clocks = <&extal_clk>, <&extalr_clk>;
+			clock-names = "extal", "extalr";
+			#clock-cells = <2>;
+			#power-domain-cells = <0>;
+			#reset-cells = <1>;
+		};
+
+		rst: reset-controller@e6160000 {
+			compatible = "renesas,r8a7795-rst";
+			reg = <0 0xe6160000 0 0x0200>;
+		};
+
+		sysc: system-controller@e6180000 {
+			compatible = "renesas,r8a7795-sysc";
+			reg = <0 0xe6180000 0 0x0400>;
+			#power-domain-cells = <1>;
+		};
+
+		tsc: thermal@e6198000 {
+			compatible = "renesas,r8a7795-thermal";
+			reg = <0 0xe6198000 0 0x100>,
+			      <0 0xe61a0000 0 0x100>,
+			      <0 0xe61a8000 0 0x100>;
+			interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 522>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 522>;
+			#thermal-sensor-cells = <1>;
+		};
+
+		intc_ex: interrupt-controller@e61c0000 {
+			compatible = "renesas,intc-ex-r8a7795", "renesas,irqc";
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			reg = <0 0xe61c0000 0 0x200>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 407>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 407>;
+		};
+
+		i2c0: i2c@e6500000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7795",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe6500000 0 0x40>;
+			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 931>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 931>;
+			dmas = <&dmac1 0x91>, <&dmac1 0x90>,
+			       <&dmac2 0x91>, <&dmac2 0x90>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@e6508000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7795",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe6508000 0 0x40>;
+			interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 930>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 930>;
+			dmas = <&dmac1 0x93>, <&dmac1 0x92>,
+			       <&dmac2 0x93>, <&dmac2 0x92>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@e6510000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7795",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe6510000 0 0x40>;
+			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 929>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 929>;
+			dmas = <&dmac1 0x95>, <&dmac1 0x94>,
+			       <&dmac2 0x95>, <&dmac2 0x94>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c3: i2c@e66d0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7795",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66d0000 0 0x40>;
+			interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 928>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 928>;
+			dmas = <&dmac0 0x97>, <&dmac0 0x96>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c4: i2c@e66d8000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7795",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66d8000 0 0x40>;
+			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 927>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 927>;
+			dmas = <&dmac0 0x99>, <&dmac0 0x98>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c5: i2c@e66e0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7795",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66e0000 0 0x40>;
+			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 919>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 919>;
+			dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c6: i2c@e66e8000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7795",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66e8000 0 0x40>;
+			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 918>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 918>;
+			dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c_dvfs: i2c@e60b0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,iic-r8a7795",
+				     "renesas,rcar-gen3-iic",
+				     "renesas,rmobile-iic";
+			reg = <0 0xe60b0000 0 0x425>;
+			interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 926>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 926>;
+			dmas = <&dmac0 0x11>, <&dmac0 0x10>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		hscif0: serial@e6540000 {
+			compatible = "renesas,hscif-r8a7795",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe6540000 0 96>;
+			interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 520>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x31>, <&dmac1 0x30>,
+			       <&dmac2 0x31>, <&dmac2 0x30>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 520>;
+			status = "disabled";
+		};
+
+		hscif1: serial@e6550000 {
+			compatible = "renesas,hscif-r8a7795",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe6550000 0 96>;
+			interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 519>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x33>, <&dmac1 0x32>,
+			       <&dmac2 0x33>, <&dmac2 0x32>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 519>;
+			status = "disabled";
+		};
+
+		hscif2: serial@e6560000 {
+			compatible = "renesas,hscif-r8a7795",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe6560000 0 96>;
+			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 518>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x35>, <&dmac1 0x34>,
+			       <&dmac2 0x35>, <&dmac2 0x34>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 518>;
+			status = "disabled";
+		};
+
+		hscif3: serial@e66a0000 {
+			compatible = "renesas,hscif-r8a7795",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe66a0000 0 96>;
+			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 517>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x37>, <&dmac0 0x36>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 517>;
+			status = "disabled";
+		};
+
+		hscif4: serial@e66b0000 {
+			compatible = "renesas,hscif-r8a7795",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe66b0000 0 96>;
+			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 516>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x39>, <&dmac0 0x38>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 516>;
+			status = "disabled";
+		};
+
+		hsusb: usb@e6590000 {
+			compatible = "renesas,usbhs-r8a7795",
+				     "renesas,rcar-gen3-usbhs";
+			reg = <0 0xe6590000 0 0x200>;
+			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 704>, <&cpg CPG_MOD 703>;
+			dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
+			       <&usb_dmac1 0>, <&usb_dmac1 1>;
+			dma-names = "ch0", "ch1", "ch2", "ch3";
+			renesas,buswait = <11>;
+			phys = <&usb2_phy0 3>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 704>, <&cpg 703>;
+			status = "disabled";
+		};
+
+		hsusb3: usb@e659c000 {
+			compatible = "renesas,usbhs-r8a7795",
+				     "renesas,rcar-gen3-usbhs";
+			reg = <0 0xe659c000 0 0x200>;
+			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 705>, <&cpg CPG_MOD 700>;
+			dmas = <&usb_dmac2 0>, <&usb_dmac2 1>,
+			       <&usb_dmac3 0>, <&usb_dmac3 1>;
+			dma-names = "ch0", "ch1", "ch2", "ch3";
+			renesas,buswait = <11>;
+			phys = <&usb2_phy3 3>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 705>, <&cpg 700>;
+			status = "disabled";
+		};
+
+		usb_dmac0: dma-controller@e65a0000 {
+			compatible = "renesas,r8a7795-usb-dmac",
+				     "renesas,usb-dmac";
+			reg = <0 0xe65a0000 0 0x100>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1";
+			clocks = <&cpg CPG_MOD 330>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 330>;
+			#dma-cells = <1>;
+			dma-channels = <2>;
+		};
+
+		usb_dmac1: dma-controller@e65b0000 {
+			compatible = "renesas,r8a7795-usb-dmac",
+				     "renesas,usb-dmac";
+			reg = <0 0xe65b0000 0 0x100>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1";
+			clocks = <&cpg CPG_MOD 331>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 331>;
+			#dma-cells = <1>;
+			dma-channels = <2>;
+		};
+
+		usb_dmac2: dma-controller@e6460000 {
+			compatible = "renesas,r8a7795-usb-dmac",
+				     "renesas,usb-dmac";
+			reg = <0 0xe6460000 0 0x100>;
+			interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1";
+			clocks = <&cpg CPG_MOD 326>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 326>;
+			#dma-cells = <1>;
+			dma-channels = <2>;
+		};
+
+		usb_dmac3: dma-controller@e6470000 {
+			compatible = "renesas,r8a7795-usb-dmac",
+				     "renesas,usb-dmac";
+			reg = <0 0xe6470000 0 0x100>;
+			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1";
+			clocks = <&cpg CPG_MOD 329>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 329>;
+			#dma-cells = <1>;
+			dma-channels = <2>;
+		};
+
+		usb3_phy0: usb-phy@e65ee000 {
+			compatible = "renesas,r8a7795-usb3-phy",
+				     "renesas,rcar-gen3-usb3-phy";
+			reg = <0 0xe65ee000 0 0x90>;
+			clocks = <&cpg CPG_MOD 328>, <&usb3s0_clk>,
+				 <&usb_extal_clk>;
+			clock-names = "usb3-if", "usb3s_clk", "usb_extal";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 328>;
+			#phy-cells = <0>;
+			status = "disabled";
+		};
+
+		arm_cc630p: crypto@e6601000 {
+			compatible = "arm,cryptocell-630p-ree";
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+			reg = <0x0 0xe6601000 0 0x1000>;
+			clocks = <&cpg CPG_MOD 229>;
+			resets = <&cpg 229>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		};
+
+		dmac0: dma-controller@e6700000 {
+			compatible = "renesas,dmac-r8a7795",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe6700000 0 0x10000>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 219>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 219>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>,
+			       <&ipmmu_ds0 2>, <&ipmmu_ds0 3>,
+			       <&ipmmu_ds0 4>, <&ipmmu_ds0 5>,
+			       <&ipmmu_ds0 6>, <&ipmmu_ds0 7>,
+			       <&ipmmu_ds0 8>, <&ipmmu_ds0 9>,
+			       <&ipmmu_ds0 10>, <&ipmmu_ds0 11>,
+			       <&ipmmu_ds0 12>, <&ipmmu_ds0 13>,
+			       <&ipmmu_ds0 14>, <&ipmmu_ds0 15>;
+		};
+
+		dmac1: dma-controller@e7300000 {
+			compatible = "renesas,dmac-r8a7795",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe7300000 0 0x10000>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 218>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 218>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_ds1 0>, <&ipmmu_ds1 1>,
+			       <&ipmmu_ds1 2>, <&ipmmu_ds1 3>,
+			       <&ipmmu_ds1 4>, <&ipmmu_ds1 5>,
+			       <&ipmmu_ds1 6>, <&ipmmu_ds1 7>,
+			       <&ipmmu_ds1 8>, <&ipmmu_ds1 9>,
+			       <&ipmmu_ds1 10>, <&ipmmu_ds1 11>,
+			       <&ipmmu_ds1 12>, <&ipmmu_ds1 13>,
+			       <&ipmmu_ds1 14>, <&ipmmu_ds1 15>;
+		};
+
+		dmac2: dma-controller@e7310000 {
+			compatible = "renesas,dmac-r8a7795",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe7310000 0 0x10000>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 217>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 217>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_ds1 16>, <&ipmmu_ds1 17>,
+			       <&ipmmu_ds1 18>, <&ipmmu_ds1 19>,
+			       <&ipmmu_ds1 20>, <&ipmmu_ds1 21>,
+			       <&ipmmu_ds1 22>, <&ipmmu_ds1 23>,
+			       <&ipmmu_ds1 24>, <&ipmmu_ds1 25>,
+			       <&ipmmu_ds1 26>, <&ipmmu_ds1 27>,
+			       <&ipmmu_ds1 28>, <&ipmmu_ds1 29>,
+			       <&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
+		};
+
+		ipmmu_ds0: mmu@e6740000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xe6740000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 0>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_ds1: mmu@e7740000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xe7740000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 1>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_hc: mmu@e6570000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xe6570000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 2>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_ir: mmu@ff8b0000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xff8b0000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 3>;
+			power-domains = <&sysc R8A7795_PD_A3IR>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_mm: mmu@e67b0000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xe67b0000 0 0x1000>;
+			interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_mp0: mmu@ec670000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xec670000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 4>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_pv0: mmu@fd800000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfd800000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 6>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_pv1: mmu@fd950000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfd950000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 7>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_pv2: mmu@fd960000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfd960000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 8>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_pv3: mmu@fd970000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfd970000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 9>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_rt: mmu@ffc80000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xffc80000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 10>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_vc0: mmu@fe6b0000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfe6b0000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 12>;
+			power-domains = <&sysc R8A7795_PD_A3VC>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_vc1: mmu@fe6f0000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfe6f0000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 13>;
+			power-domains = <&sysc R8A7795_PD_A3VC>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_vi0: mmu@febd0000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfebd0000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 14>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_vi1: mmu@febe0000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfebe0000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 15>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_vp0: mmu@fe990000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfe990000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 16>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_vp1: mmu@fe980000 {
+			compatible = "renesas,ipmmu-r8a7795";
+			reg = <0 0xfe980000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 17>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			#iommu-cells = <1>;
+		};
+
+		avb: ethernet@e6800000 {
+			compatible = "renesas,etheravb-r8a7795",
+				     "renesas,etheravb-rcar-gen3";
+			reg = <0 0xe6800000 0 0x800>, <0 0xe6a00000 0 0x10000>;
+			interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1", "ch2", "ch3",
+					  "ch4", "ch5", "ch6", "ch7",
+					  "ch8", "ch9", "ch10", "ch11",
+					  "ch12", "ch13", "ch14", "ch15",
+					  "ch16", "ch17", "ch18", "ch19",
+					  "ch20", "ch21", "ch22", "ch23",
+					  "ch24";
+			clocks = <&cpg CPG_MOD 812>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 812>;
+			phy-mode = "rgmii";
+			iommus = <&ipmmu_ds0 16>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		can0: can@e6c30000 {
+			compatible = "renesas,can-r8a7795",
+				     "renesas,rcar-gen3-can";
+			reg = <0 0xe6c30000 0 0x1000>;
+			interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 916>,
+			       <&cpg CPG_CORE R8A7795_CLK_CANFD>,
+			       <&can_clk>;
+			clock-names = "clkp1", "clkp2", "can_clk";
+			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
+			assigned-clock-rates = <40000000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 916>;
+			status = "disabled";
+		};
+
+		can1: can@e6c38000 {
+			compatible = "renesas,can-r8a7795",
+				     "renesas,rcar-gen3-can";
+			reg = <0 0xe6c38000 0 0x1000>;
+			interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 915>,
+			       <&cpg CPG_CORE R8A7795_CLK_CANFD>,
+			       <&can_clk>;
+			clock-names = "clkp1", "clkp2", "can_clk";
+			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
+			assigned-clock-rates = <40000000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 915>;
+			status = "disabled";
+		};
+
+		canfd: can@e66c0000 {
+			compatible = "renesas,r8a7795-canfd",
+				     "renesas,rcar-gen3-canfd";
+			reg = <0 0xe66c0000 0 0x8000>;
+			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+				   <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 914>,
+			       <&cpg CPG_CORE R8A7795_CLK_CANFD>,
+			       <&can_clk>;
+			clock-names = "fck", "canfd", "can_clk";
+			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
+			assigned-clock-rates = <40000000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 914>;
+			status = "disabled";
+
+			channel0 {
+				status = "disabled";
+			};
+
+			channel1 {
+				status = "disabled";
+			};
+		};
+
+		pwm0: pwm@e6e30000 {
+			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
+			reg = <0 0xe6e30000 0 0x8>;
+			clocks = <&cpg CPG_MOD 523>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 523>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pwm1: pwm@e6e31000 {
+			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
+			reg = <0 0xe6e31000 0 0x8>;
+			clocks = <&cpg CPG_MOD 523>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 523>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pwm2: pwm@e6e32000 {
+			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
+			reg = <0 0xe6e32000 0 0x8>;
+			clocks = <&cpg CPG_MOD 523>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 523>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pwm3: pwm@e6e33000 {
+			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
+			reg = <0 0xe6e33000 0 0x8>;
+			clocks = <&cpg CPG_MOD 523>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 523>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pwm4: pwm@e6e34000 {
+			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
+			reg = <0 0xe6e34000 0 0x8>;
+			clocks = <&cpg CPG_MOD 523>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 523>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pwm5: pwm@e6e35000 {
+			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
+			reg = <0 0xe6e35000 0 0x8>;
+			clocks = <&cpg CPG_MOD 523>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 523>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pwm6: pwm@e6e36000 {
+			compatible = "renesas,pwm-r8a7795", "renesas,pwm-rcar";
+			reg = <0 0xe6e36000 0 0x8>;
+			clocks = <&cpg CPG_MOD 523>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 523>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		scif0: serial@e6e60000 {
+			compatible = "renesas,scif-r8a7795",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6e60000 0 64>;
+			interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 207>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x51>, <&dmac1 0x50>,
+			       <&dmac2 0x51>, <&dmac2 0x50>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 207>;
+			status = "disabled";
+		};
+
+		scif1: serial@e6e68000 {
+			compatible = "renesas,scif-r8a7795",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6e68000 0 64>;
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 206>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x53>, <&dmac1 0x52>,
+			       <&dmac2 0x53>, <&dmac2 0x52>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 206>;
+			status = "disabled";
+		};
+
+		scif2: serial@e6e88000 {
+			compatible = "renesas,scif-r8a7795",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6e88000 0 64>;
+			interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 310>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+			       <&dmac2 0x13>, <&dmac2 0x12>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 310>;
+			status = "disabled";
+		};
+
+		scif3: serial@e6c50000 {
+			compatible = "renesas,scif-r8a7795",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6c50000 0 64>;
+			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 204>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x57>, <&dmac0 0x56>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 204>;
+			status = "disabled";
+		};
+
+		scif4: serial@e6c40000 {
+			compatible = "renesas,scif-r8a7795",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6c40000 0 64>;
+			interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 203>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x59>, <&dmac0 0x58>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 203>;
+			status = "disabled";
+		};
+
+		scif5: serial@e6f30000 {
+			compatible = "renesas,scif-r8a7795",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6f30000 0 64>;
+			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 202>,
+				 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
+			       <&dmac2 0x5b>, <&dmac2 0x5a>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 202>;
+			status = "disabled";
+		};
+
+		tpu: pwm@e6e80000 {
+			compatible = "renesas,tpu-r8a7795", "renesas,tpu";
+			reg = <0 0xe6e80000 0 0x148>;
+			interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 304>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 304>;
+			#pwm-cells = <3>;
+			status = "disabled";
+		};
+
+		msiof0: spi@e6e90000 {
+			compatible = "renesas,msiof-r8a7795",
+				     "renesas,rcar-gen3-msiof";
+			reg = <0 0xe6e90000 0 0x0064>;
+			interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 211>;
+			dmas = <&dmac1 0x41>, <&dmac1 0x40>,
+			       <&dmac2 0x41>, <&dmac2 0x40>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 211>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		msiof1: spi@e6ea0000 {
+			compatible = "renesas,msiof-r8a7795",
+				     "renesas,rcar-gen3-msiof";
+			reg = <0 0xe6ea0000 0 0x0064>;
+			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 210>;
+			dmas = <&dmac1 0x43>, <&dmac1 0x42>,
+			       <&dmac2 0x43>, <&dmac2 0x42>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 210>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		msiof2: spi@e6c00000 {
+			compatible = "renesas,msiof-r8a7795",
+				     "renesas,rcar-gen3-msiof";
+			reg = <0 0xe6c00000 0 0x0064>;
+			interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 209>;
+			dmas = <&dmac0 0x45>, <&dmac0 0x44>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 209>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		msiof3: spi@e6c10000 {
+			compatible = "renesas,msiof-r8a7795",
+				     "renesas,rcar-gen3-msiof";
+			reg = <0 0xe6c10000 0 0x0064>;
+			interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 208>;
+			dmas = <&dmac0 0x47>, <&dmac0 0x46>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 208>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		vin0: video@e6ef0000 {
+			compatible = "renesas,vin-r8a7795";
+			reg = <0 0xe6ef0000 0 0x1000>;
+			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 811>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 811>;
+			renesas,id = <0>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin0csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin0>;
+					};
+					vin0csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin0>;
+					};
+				};
+			};
+		};
+
+		vin1: video@e6ef1000 {
+			compatible = "renesas,vin-r8a7795";
+			reg = <0 0xe6ef1000 0 0x1000>;
+			interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 810>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 810>;
+			renesas,id = <1>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin1csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin1>;
+					};
+					vin1csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin1>;
+					};
+				};
+			};
+		};
+
+		vin2: video@e6ef2000 {
+			compatible = "renesas,vin-r8a7795";
+			reg = <0 0xe6ef2000 0 0x1000>;
+			interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 809>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 809>;
+			renesas,id = <2>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin2csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin2>;
+					};
+					vin2csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin2>;
+					};
+				};
+			};
+		};
+
+		vin3: video@e6ef3000 {
+			compatible = "renesas,vin-r8a7795";
+			reg = <0 0xe6ef3000 0 0x1000>;
+			interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 808>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 808>;
+			renesas,id = <3>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin3csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin3>;
+					};
+					vin3csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin3>;
+					};
+				};
+			};
+		};
+
+		vin4: video@e6ef4000 {
+			compatible = "renesas,vin-r8a7795";
+			reg = <0 0xe6ef4000 0 0x1000>;
+			interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 807>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 807>;
+			renesas,id = <4>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin4csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin4>;
+					};
+					vin4csi41: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&csi41vin4>;
+					};
+				};
+			};
+		};
+
+		vin5: video@e6ef5000 {
+			compatible = "renesas,vin-r8a7795";
+			reg = <0 0xe6ef5000 0 0x1000>;
+			interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 806>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 806>;
+			renesas,id = <5>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin5csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin5>;
+					};
+					vin5csi41: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&csi41vin5>;
+					};
+				};
+			};
+		};
+
+		vin6: video@e6ef6000 {
+			compatible = "renesas,vin-r8a7795";
+			reg = <0 0xe6ef6000 0 0x1000>;
+			interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 805>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 805>;
+			renesas,id = <6>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin6csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin6>;
+					};
+					vin6csi41: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&csi41vin6>;
+					};
+				};
+			};
+		};
+
+		vin7: video@e6ef7000 {
+			compatible = "renesas,vin-r8a7795";
+			reg = <0 0xe6ef7000 0 0x1000>;
+			interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 804>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 804>;
+			renesas,id = <7>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin7csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin7>;
+					};
+					vin7csi41: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&csi41vin7>;
+					};
+				};
+			};
+		};
+
+		drif00: rif@e6f40000 {
+			compatible = "renesas,r8a7795-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f40000 0 0x64>;
+			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 515>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x20>, <&dmac2 0x20>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 515>;
+			renesas,bonding = <&drif01>;
+			status = "disabled";
+		};
+
+		drif01: rif@e6f50000 {
+			compatible = "renesas,r8a7795-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f50000 0 0x64>;
+			interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 514>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x22>, <&dmac2 0x22>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 514>;
+			renesas,bonding = <&drif00>;
+			status = "disabled";
+		};
+
+		drif10: rif@e6f60000 {
+			compatible = "renesas,r8a7795-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f60000 0 0x64>;
+			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 513>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x24>, <&dmac2 0x24>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 513>;
+			renesas,bonding = <&drif11>;
+			status = "disabled";
+		};
+
+		drif11: rif@e6f70000 {
+			compatible = "renesas,r8a7795-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f70000 0 0x64>;
+			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 512>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x26>, <&dmac2 0x26>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 512>;
+			renesas,bonding = <&drif10>;
+			status = "disabled";
+		};
+
+		drif20: rif@e6f80000 {
+			compatible = "renesas,r8a7795-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f80000 0 0x64>;
+			interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 511>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x28>, <&dmac2 0x28>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 511>;
+			renesas,bonding = <&drif21>;
+			status = "disabled";
+		};
+
+		drif21: rif@e6f90000 {
+			compatible = "renesas,r8a7795-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f90000 0 0x64>;
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 510>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x2a>, <&dmac2 0x2a>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 510>;
+			renesas,bonding = <&drif20>;
+			status = "disabled";
+		};
+
+		drif30: rif@e6fa0000 {
+			compatible = "renesas,r8a7795-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6fa0000 0 0x64>;
+			interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 509>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x2c>, <&dmac2 0x2c>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 509>;
+			renesas,bonding = <&drif31>;
+			status = "disabled";
+		};
+
+		drif31: rif@e6fb0000 {
+			compatible = "renesas,r8a7795-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6fb0000 0 0x64>;
+			interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 508>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x2e>, <&dmac2 0x2e>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 508>;
+			renesas,bonding = <&drif30>;
+			status = "disabled";
+		};
+
+		rcar_sound: sound@ec500000 {
+			/*
+			 * #sound-dai-cells is required
+			 *
+			 * Single DAI : #sound-dai-cells = <0>;	<&rcar_sound>;
+			 * Multi  DAI : #sound-dai-cells = <1>;	<&rcar_sound N>;
+			 */
+			/*
+			 * #clock-cells is required for audio_clkout0/1/2/3
+			 *
+			 * clkout	: #clock-cells = <0>;	<&rcar_sound>;
+			 * clkout0/1/2/3: #clock-cells = <1>;	<&rcar_sound N>;
+			 */
+			compatible =  "renesas,rcar_sound-r8a7795", "renesas,rcar_sound-gen3";
+			reg =	<0 0xec500000 0 0x1000>, /* SCU */
+				<0 0xec5a0000 0 0x100>,  /* ADG */
+				<0 0xec540000 0 0x1000>, /* SSIU */
+				<0 0xec541000 0 0x280>,  /* SSI */
+				<0 0xec760000 0 0x200>;  /* Audio DMAC peri peri*/
+			reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
+			clocks = <&cpg CPG_MOD 1005>,
+				 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+				 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+				 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+				 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+				 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+				 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+				 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+				 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+				 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+				 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+				 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+				 <&audio_clk_a>, <&audio_clk_b>,
+				 <&audio_clk_c>,
+				 <&cpg CPG_CORE R8A7795_CLK_S0D4>;
+			clock-names = "ssi-all",
+				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+				      "ssi.1", "ssi.0",
+				      "src.9", "src.8", "src.7", "src.6",
+				      "src.5", "src.4", "src.3", "src.2",
+				      "src.1", "src.0",
+				      "mix.1", "mix.0",
+				      "ctu.1", "ctu.0",
+				      "dvc.0", "dvc.1",
+				      "clk_a", "clk_b", "clk_c", "clk_i";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 1005>,
+				 <&cpg 1006>, <&cpg 1007>,
+				 <&cpg 1008>, <&cpg 1009>,
+				 <&cpg 1010>, <&cpg 1011>,
+				 <&cpg 1012>, <&cpg 1013>,
+				 <&cpg 1014>, <&cpg 1015>;
+			reset-names = "ssi-all",
+				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+				      "ssi.1", "ssi.0";
+			status = "disabled";
+
+			rcar_sound,dvc {
+				dvc0: dvc-0 {
+					dmas = <&audma1 0xbc>;
+					dma-names = "tx";
+				};
+				dvc1: dvc-1 {
+					dmas = <&audma1 0xbe>;
+					dma-names = "tx";
+				};
+			};
+
+			rcar_sound,mix {
+				mix0: mix-0 { };
+				mix1: mix-1 { };
+			};
+
+			rcar_sound,ctu {
+				ctu00: ctu-0 { };
+				ctu01: ctu-1 { };
+				ctu02: ctu-2 { };
+				ctu03: ctu-3 { };
+				ctu10: ctu-4 { };
+				ctu11: ctu-5 { };
+				ctu12: ctu-6 { };
+				ctu13: ctu-7 { };
+			};
+
+			rcar_sound,src {
+				src0: src-0 {
+					interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x85>, <&audma1 0x9a>;
+					dma-names = "rx", "tx";
+				};
+				src1: src-1 {
+					interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x87>, <&audma1 0x9c>;
+					dma-names = "rx", "tx";
+				};
+				src2: src-2 {
+					interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x89>, <&audma1 0x9e>;
+					dma-names = "rx", "tx";
+				};
+				src3: src-3 {
+					interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8b>, <&audma1 0xa0>;
+					dma-names = "rx", "tx";
+				};
+				src4: src-4 {
+					interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8d>, <&audma1 0xb0>;
+					dma-names = "rx", "tx";
+				};
+				src5: src-5 {
+					interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8f>, <&audma1 0xb2>;
+					dma-names = "rx", "tx";
+				};
+				src6: src-6 {
+					interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x91>, <&audma1 0xb4>;
+					dma-names = "rx", "tx";
+				};
+				src7: src-7 {
+					interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x93>, <&audma1 0xb6>;
+					dma-names = "rx", "tx";
+				};
+				src8: src-8 {
+					interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x95>, <&audma1 0xb8>;
+					dma-names = "rx", "tx";
+				};
+				src9: src-9 {
+					interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x97>, <&audma1 0xba>;
+					dma-names = "rx", "tx";
+				};
+			};
+
+			rcar_sound,ssiu {
+				ssiu00: ssiu-0 {
+					dmas = <&audma0 0x15>, <&audma1 0x16>;
+					dma-names = "rx", "tx";
+				};
+				ssiu01: ssiu-1 {
+					dmas = <&audma0 0x35>, <&audma1 0x36>;
+					dma-names = "rx", "tx";
+				};
+				ssiu02: ssiu-2 {
+					dmas = <&audma0 0x37>, <&audma1 0x38>;
+					dma-names = "rx", "tx";
+				};
+				ssiu03: ssiu-3 {
+					dmas = <&audma0 0x47>, <&audma1 0x48>;
+					dma-names = "rx", "tx";
+				};
+				ssiu04: ssiu-4 {
+					dmas = <&audma0 0x3F>, <&audma1 0x40>;
+					dma-names = "rx", "tx";
+				};
+				ssiu05: ssiu-5 {
+					dmas = <&audma0 0x43>, <&audma1 0x44>;
+					dma-names = "rx", "tx";
+				};
+				ssiu06: ssiu-6 {
+					dmas = <&audma0 0x4F>, <&audma1 0x50>;
+					dma-names = "rx", "tx";
+				};
+				ssiu07: ssiu-7 {
+					dmas = <&audma0 0x53>, <&audma1 0x54>;
+					dma-names = "rx", "tx";
+				};
+				ssiu10: ssiu-8 {
+					dmas = <&audma0 0x49>, <&audma1 0x4a>;
+					dma-names = "rx", "tx";
+				};
+				ssiu11: ssiu-9 {
+					dmas = <&audma0 0x4B>, <&audma1 0x4C>;
+					dma-names = "rx", "tx";
+				};
+				ssiu12: ssiu-10 {
+					dmas = <&audma0 0x57>, <&audma1 0x58>;
+					dma-names = "rx", "tx";
+				};
+				ssiu13: ssiu-11 {
+					dmas = <&audma0 0x59>, <&audma1 0x5A>;
+					dma-names = "rx", "tx";
+				};
+				ssiu14: ssiu-12 {
+					dmas = <&audma0 0x5F>, <&audma1 0x60>;
+					dma-names = "rx", "tx";
+				};
+				ssiu15: ssiu-13 {
+					dmas = <&audma0 0xC3>, <&audma1 0xC4>;
+					dma-names = "rx", "tx";
+				};
+				ssiu16: ssiu-14 {
+					dmas = <&audma0 0xC7>, <&audma1 0xC8>;
+					dma-names = "rx", "tx";
+				};
+				ssiu17: ssiu-15 {
+					dmas = <&audma0 0xCB>, <&audma1 0xCC>;
+					dma-names = "rx", "tx";
+				};
+				ssiu20: ssiu-16 {
+					dmas = <&audma0 0x63>, <&audma1 0x64>;
+					dma-names = "rx", "tx";
+				};
+				ssiu21: ssiu-17 {
+					dmas = <&audma0 0x67>, <&audma1 0x68>;
+					dma-names = "rx", "tx";
+				};
+				ssiu22: ssiu-18 {
+					dmas = <&audma0 0x6B>, <&audma1 0x6C>;
+					dma-names = "rx", "tx";
+				};
+				ssiu23: ssiu-19 {
+					dmas = <&audma0 0x6D>, <&audma1 0x6E>;
+					dma-names = "rx", "tx";
+				};
+				ssiu24: ssiu-20 {
+					dmas = <&audma0 0xCF>, <&audma1 0xCE>;
+					dma-names = "rx", "tx";
+				};
+				ssiu25: ssiu-21 {
+					dmas = <&audma0 0xEB>, <&audma1 0xEC>;
+					dma-names = "rx", "tx";
+				};
+				ssiu26: ssiu-22 {
+					dmas = <&audma0 0xED>, <&audma1 0xEE>;
+					dma-names = "rx", "tx";
+				};
+				ssiu27: ssiu-23 {
+					dmas = <&audma0 0xEF>, <&audma1 0xF0>;
+					dma-names = "rx", "tx";
+				};
+				ssiu30: ssiu-24 {
+					dmas = <&audma0 0x6f>, <&audma1 0x70>;
+					dma-names = "rx", "tx";
+				};
+				ssiu31: ssiu-25 {
+					dmas = <&audma0 0x21>, <&audma1 0x22>;
+					dma-names = "rx", "tx";
+				};
+				ssiu32: ssiu-26 {
+					dmas = <&audma0 0x23>, <&audma1 0x24>;
+					dma-names = "rx", "tx";
+				};
+				ssiu33: ssiu-27 {
+					dmas = <&audma0 0x25>, <&audma1 0x26>;
+					dma-names = "rx", "tx";
+				};
+				ssiu34: ssiu-28 {
+					dmas = <&audma0 0x27>, <&audma1 0x28>;
+					dma-names = "rx", "tx";
+				};
+				ssiu35: ssiu-29 {
+					dmas = <&audma0 0x29>, <&audma1 0x2A>;
+					dma-names = "rx", "tx";
+				};
+				ssiu36: ssiu-30 {
+					dmas = <&audma0 0x2B>, <&audma1 0x2C>;
+					dma-names = "rx", "tx";
+				};
+				ssiu37: ssiu-31 {
+					dmas = <&audma0 0x2D>, <&audma1 0x2E>;
+					dma-names = "rx", "tx";
+				};
+				ssiu40: ssiu-32 {
+					dmas =	<&audma0 0x71>, <&audma1 0x72>;
+					dma-names = "rx", "tx";
+				};
+				ssiu41: ssiu-33 {
+					dmas = <&audma0 0x17>, <&audma1 0x18>;
+					dma-names = "rx", "tx";
+				};
+				ssiu42: ssiu-34 {
+					dmas = <&audma0 0x19>, <&audma1 0x1A>;
+					dma-names = "rx", "tx";
+				};
+				ssiu43: ssiu-35 {
+					dmas = <&audma0 0x1B>, <&audma1 0x1C>;
+					dma-names = "rx", "tx";
+				};
+				ssiu44: ssiu-36 {
+					dmas = <&audma0 0x1D>, <&audma1 0x1E>;
+					dma-names = "rx", "tx";
+				};
+				ssiu45: ssiu-37 {
+					dmas = <&audma0 0x1F>, <&audma1 0x20>;
+					dma-names = "rx", "tx";
+				};
+				ssiu46: ssiu-38 {
+					dmas = <&audma0 0x31>, <&audma1 0x32>;
+					dma-names = "rx", "tx";
+				};
+				ssiu47: ssiu-39 {
+					dmas = <&audma0 0x33>, <&audma1 0x34>;
+					dma-names = "rx", "tx";
+				};
+				ssiu50: ssiu-40 {
+					dmas = <&audma0 0x73>, <&audma1 0x74>;
+					dma-names = "rx", "tx";
+				};
+				ssiu60: ssiu-41 {
+					dmas = <&audma0 0x75>, <&audma1 0x76>;
+					dma-names = "rx", "tx";
+				};
+				ssiu70: ssiu-42 {
+					dmas = <&audma0 0x79>, <&audma1 0x7a>;
+					dma-names = "rx", "tx";
+				};
+				ssiu80: ssiu-43 {
+					dmas = <&audma0 0x7b>, <&audma1 0x7c>;
+					dma-names = "rx", "tx";
+				};
+				ssiu90: ssiu-44 {
+					dmas = <&audma0 0x7d>, <&audma1 0x7e>;
+					dma-names = "rx", "tx";
+				};
+				ssiu91: ssiu-45 {
+					dmas = <&audma0 0x7F>, <&audma1 0x80>;
+					dma-names = "rx", "tx";
+				};
+				ssiu92: ssiu-46 {
+					dmas = <&audma0 0x81>, <&audma1 0x82>;
+					dma-names = "rx", "tx";
+				};
+				ssiu93: ssiu-47 {
+					dmas = <&audma0 0x83>, <&audma1 0x84>;
+					dma-names = "rx", "tx";
+				};
+				ssiu94: ssiu-48 {
+					dmas = <&audma0 0xA3>, <&audma1 0xA4>;
+					dma-names = "rx", "tx";
+				};
+				ssiu95: ssiu-49 {
+					dmas = <&audma0 0xA5>, <&audma1 0xA6>;
+					dma-names = "rx", "tx";
+				};
+				ssiu96: ssiu-50 {
+					dmas = <&audma0 0xA7>, <&audma1 0xA8>;
+					dma-names = "rx", "tx";
+				};
+				ssiu97: ssiu-51 {
+					dmas = <&audma0 0xA9>, <&audma1 0xAA>;
+					dma-names = "rx", "tx";
+				};
+			};
+
+			rcar_sound,ssi {
+				ssi0: ssi-0 {
+					interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x01>, <&audma1 0x02>;
+					dma-names = "rx", "tx";
+				};
+				ssi1: ssi-1 {
+					 interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x03>, <&audma1 0x04>;
+					dma-names = "rx", "tx";
+				};
+				ssi2: ssi-2 {
+					interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x05>, <&audma1 0x06>;
+					dma-names = "rx", "tx";
+				};
+				ssi3: ssi-3 {
+					interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x07>, <&audma1 0x08>;
+					dma-names = "rx", "tx";
+				};
+				ssi4: ssi-4 {
+					interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x09>, <&audma1 0x0a>;
+					dma-names = "rx", "tx";
+				};
+				ssi5: ssi-5 {
+					interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0b>, <&audma1 0x0c>;
+					dma-names = "rx", "tx";
+				};
+				ssi6: ssi-6 {
+					interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0d>, <&audma1 0x0e>;
+					dma-names = "rx", "tx";
+				};
+				ssi7: ssi-7 {
+					interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0f>, <&audma1 0x10>;
+					dma-names = "rx", "tx";
+				};
+				ssi8: ssi-8 {
+					interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x11>, <&audma1 0x12>;
+					dma-names = "rx", "tx";
+				};
+				ssi9: ssi-9 {
+					interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x13>, <&audma1 0x14>;
+					dma-names = "rx", "tx";
+				};
+			};
+		};
+
+		audma0: dma-controller@ec700000 {
+			compatible = "renesas,dmac-r8a7795",
+				     "renesas,rcar-dmac";
+			reg = <0 0xec700000 0 0x10000>;
+			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 502>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 502>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_mp0 0>, <&ipmmu_mp0 1>,
+			       <&ipmmu_mp0 2>, <&ipmmu_mp0 3>,
+			       <&ipmmu_mp0 4>, <&ipmmu_mp0 5>,
+			       <&ipmmu_mp0 6>, <&ipmmu_mp0 7>,
+			       <&ipmmu_mp0 8>, <&ipmmu_mp0 9>,
+			       <&ipmmu_mp0 10>, <&ipmmu_mp0 11>,
+			       <&ipmmu_mp0 12>, <&ipmmu_mp0 13>,
+			       <&ipmmu_mp0 14>, <&ipmmu_mp0 15>;
+		};
+
+		audma1: dma-controller@ec720000 {
+			compatible = "renesas,dmac-r8a7795",
+				     "renesas,rcar-dmac";
+			reg = <0 0xec720000 0 0x10000>;
+			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 501>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 501>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_mp0 16>, <&ipmmu_mp0 17>,
+			       <&ipmmu_mp0 18>, <&ipmmu_mp0 19>,
+			       <&ipmmu_mp0 20>, <&ipmmu_mp0 21>,
+			       <&ipmmu_mp0 22>, <&ipmmu_mp0 23>,
+			       <&ipmmu_mp0 24>, <&ipmmu_mp0 25>,
+			       <&ipmmu_mp0 26>, <&ipmmu_mp0 27>,
+			       <&ipmmu_mp0 28>, <&ipmmu_mp0 29>,
+			       <&ipmmu_mp0 30>, <&ipmmu_mp0 31>;
+		};
+
+		xhci0: usb@ee000000 {
+			compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
+			reg = <0 0xee000000 0 0xc00>;
+			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 328>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 328>;
+			status = "disabled";
+		};
+
+		usb3_peri0: usb@ee020000 {
+			compatible = "renesas,r8a7795-usb3-peri",
+				     "renesas,rcar-gen3-usb3-peri";
+			reg = <0 0xee020000 0 0x400>;
+			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 328>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 328>;
+			status = "disabled";
+		};
+
+		ohci0: usb@ee080000 {
+			compatible = "generic-ohci";
+			reg = <0 0xee080000 0 0x100>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+			phys = <&usb2_phy0 1>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 703>, <&cpg 704>;
+			status = "disabled";
+		};
+
+		ohci1: usb@ee0a0000 {
+			compatible = "generic-ohci";
+			reg = <0 0xee0a0000 0 0x100>;
+			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 702>;
+			phys = <&usb2_phy1 1>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 702>;
+			status = "disabled";
+		};
+
+		ohci2: usb@ee0c0000 {
+			compatible = "generic-ohci";
+			reg = <0 0xee0c0000 0 0x100>;
+			interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 701>;
+			phys = <&usb2_phy2 1>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 701>;
+			status = "disabled";
+		};
+
+		ohci3: usb@ee0e0000 {
+			compatible = "generic-ohci";
+			reg = <0 0xee0e0000 0 0x100>;
+			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 700>, <&cpg CPG_MOD 705>;
+			phys = <&usb2_phy3 1>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 700>, <&cpg 705>;
+			status = "disabled";
+		};
+
+		ehci0: usb@ee080100 {
+			compatible = "generic-ehci";
+			reg = <0 0xee080100 0 0x100>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+			phys = <&usb2_phy0 2>;
+			phy-names = "usb";
+			companion = <&ohci0>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 703>, <&cpg 704>;
+			status = "disabled";
+		};
+
+		ehci1: usb@ee0a0100 {
+			compatible = "generic-ehci";
+			reg = <0 0xee0a0100 0 0x100>;
+			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 702>;
+			phys = <&usb2_phy1 2>;
+			phy-names = "usb";
+			companion = <&ohci1>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 702>;
+			status = "disabled";
+		};
+
+		ehci2: usb@ee0c0100 {
+			compatible = "generic-ehci";
+			reg = <0 0xee0c0100 0 0x100>;
+			interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 701>;
+			phys = <&usb2_phy2 2>;
+			phy-names = "usb";
+			companion = <&ohci2>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 701>;
+			status = "disabled";
+		};
+
+		ehci3: usb@ee0e0100 {
+			compatible = "generic-ehci";
+			reg = <0 0xee0e0100 0 0x100>;
+			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 700>, <&cpg CPG_MOD 705>;
+			phys = <&usb2_phy3 2>;
+			phy-names = "usb";
+			companion = <&ohci3>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 700>, <&cpg 705>;
+			status = "disabled";
+		};
+
+		usb2_phy0: usb-phy@ee080200 {
+			compatible = "renesas,usb2-phy-r8a7795",
+				     "renesas,rcar-gen3-usb2-phy";
+			reg = <0 0xee080200 0 0x700>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 703>, <&cpg 704>;
+			#phy-cells = <1>;
+			status = "disabled";
+		};
+
+		usb2_phy1: usb-phy@ee0a0200 {
+			compatible = "renesas,usb2-phy-r8a7795",
+				     "renesas,rcar-gen3-usb2-phy";
+			reg = <0 0xee0a0200 0 0x700>;
+			clocks = <&cpg CPG_MOD 702>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 702>;
+			#phy-cells = <1>;
+			status = "disabled";
+		};
+
+		usb2_phy2: usb-phy@ee0c0200 {
+			compatible = "renesas,usb2-phy-r8a7795",
+				     "renesas,rcar-gen3-usb2-phy";
+			reg = <0 0xee0c0200 0 0x700>;
+			clocks = <&cpg CPG_MOD 701>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 701>;
+			#phy-cells = <1>;
+			status = "disabled";
+		};
+
+		usb2_phy3: usb-phy@ee0e0200 {
+			compatible = "renesas,usb2-phy-r8a7795",
+				     "renesas,rcar-gen3-usb2-phy";
+			reg = <0 0xee0e0200 0 0x700>;
+			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 700>, <&cpg CPG_MOD 705>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 700>, <&cpg 705>;
+			#phy-cells = <1>;
+			status = "disabled";
+		};
+
+		sdhi0: sd@ee100000 {
+			compatible = "renesas,sdhi-r8a7795",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee100000 0 0x2000>;
+			interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 314>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 314>;
+			iommus = <&ipmmu_ds1 32>;
+			status = "disabled";
+		};
+
+		sdhi1: sd@ee120000 {
+			compatible = "renesas,sdhi-r8a7795",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee120000 0 0x2000>;
+			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 313>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 313>;
+			iommus = <&ipmmu_ds1 33>;
+			status = "disabled";
+		};
+
+		sdhi2: sd@ee140000 {
+			compatible = "renesas,sdhi-r8a7795",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee140000 0 0x2000>;
+			interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 312>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 312>;
+			iommus = <&ipmmu_ds1 34>;
+			status = "disabled";
+		};
+
+		sdhi3: sd@ee160000 {
+			compatible = "renesas,sdhi-r8a7795",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee160000 0 0x2000>;
+			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 311>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 311>;
+			iommus = <&ipmmu_ds1 35>;
+			status = "disabled";
+		};
+
+		sata: sata@ee300000 {
+			compatible = "renesas,sata-r8a7795",
+				     "renesas,rcar-gen3-sata";
+			reg = <0 0xee300000 0 0x200000>;
+			interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 815>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 815>;
+			status = "disabled";
+			iommus = <&ipmmu_hc 2>;
+		};
+
+		gic: interrupt-controller@f1010000 {
+			compatible = "arm,gic-400";
+			#interrupt-cells = <3>;
+			#address-cells = <0>;
+			interrupt-controller;
+			reg = <0x0 0xf1010000 0 0x1000>,
+			      <0x0 0xf1020000 0 0x20000>,
+			      <0x0 0xf1040000 0 0x20000>,
+			      <0x0 0xf1060000 0 0x20000>;
+			interrupts = <GIC_PPI 9
+					(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+			clocks = <&cpg CPG_MOD 408>;
+			clock-names = "clk";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 408>;
+		};
+
+		pciec0: pcie@fe000000 {
+			compatible = "renesas,pcie-r8a7795",
+				     "renesas,pcie-rcar-gen3";
+			reg = <0 0xfe000000 0 0x80000>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			bus-range = <0x00 0xff>;
+			device_type = "pci";
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			/* Map all possible DDR as inbound ranges */
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
+			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
+			clock-names = "pcie", "pcie_bus";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 319>;
+			status = "disabled";
+		};
+
+		pciec1: pcie@ee800000 {
+			compatible = "renesas,pcie-r8a7795",
+				     "renesas,pcie-rcar-gen3";
+			reg = <0 0xee800000 0 0x80000>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			bus-range = <0x00 0xff>;
+			device_type = "pci";
+			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000>,
+				 <0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000>,
+				 <0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000>,
+				 <0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
+			/* Map all possible DDR as inbound ranges */
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
+			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 318>, <&pcie_bus_clk>;
+			clock-names = "pcie", "pcie_bus";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 318>;
+			status = "disabled";
+		};
+
+		imr-lx4@fe860000 {
+			compatible = "renesas,r8a7795-imr-lx4",
+				     "renesas,imr-lx4";
+			reg = <0 0xfe860000 0 0x2000>;
+			interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 823>;
+			power-domains = <&sysc R8A7795_PD_A3VC>;
+			resets = <&cpg 823>;
+		};
+
+		imr-lx4@fe870000 {
+			compatible = "renesas,r8a7795-imr-lx4",
+				     "renesas,imr-lx4";
+			reg = <0 0xfe870000 0 0x2000>;
+			interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 822>;
+			power-domains = <&sysc R8A7795_PD_A3VC>;
+			resets = <&cpg 822>;
+		};
+
+		imr-lx4@fe880000 {
+			compatible = "renesas,r8a7795-imr-lx4",
+				     "renesas,imr-lx4";
+			reg = <0 0xfe880000 0 0x2000>;
+			interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 821>;
+			power-domains = <&sysc R8A7795_PD_A3VC>;
+			resets = <&cpg 821>;
+		};
+
+		imr-lx4@fe890000 {
+			compatible = "renesas,r8a7795-imr-lx4",
+				     "renesas,imr-lx4";
+			reg = <0 0xfe890000 0 0x2000>;
+			interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 820>;
+			power-domains = <&sysc R8A7795_PD_A3VC>;
+			resets = <&cpg 820>;
+		};
+
+		vspbc: vsp@fe920000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfe920000 0 0x8000>;
+			interrupts = <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 624>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 624>;
+
+			renesas,fcp = <&fcpvb1>;
+		};
+
+		vspbd: vsp@fe960000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfe960000 0 0x8000>;
+			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 626>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 626>;
+
+			renesas,fcp = <&fcpvb0>;
+		};
+
+		vspd0: vsp@fea20000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfea20000 0 0x5000>;
+			interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 623>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 623>;
+
+			renesas,fcp = <&fcpvd0>;
+		};
+
+		vspd1: vsp@fea28000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfea28000 0 0x5000>;
+			interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 622>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 622>;
+
+			renesas,fcp = <&fcpvd1>;
+		};
+
+		vspd2: vsp@fea30000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfea30000 0 0x5000>;
+			interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 621>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 621>;
+
+			renesas,fcp = <&fcpvd2>;
+		};
+
+		vspi0: vsp@fe9a0000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfe9a0000 0 0x8000>;
+			interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 631>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 631>;
+
+			renesas,fcp = <&fcpvi0>;
+		};
+
+		vspi1: vsp@fe9b0000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfe9b0000 0 0x8000>;
+			interrupts = <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 630>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 630>;
+
+			renesas,fcp = <&fcpvi1>;
+		};
+
+		fdp1@fe940000 {
+			compatible = "renesas,fdp1";
+			reg = <0 0xfe940000 0 0x2400>;
+			interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 119>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 119>;
+			renesas,fcp = <&fcpf0>;
+		};
+
+		fdp1@fe944000 {
+			compatible = "renesas,fdp1";
+			reg = <0 0xfe944000 0 0x2400>;
+			interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 118>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 118>;
+			renesas,fcp = <&fcpf1>;
+		};
+
+		fcpf0: fcp@fe950000 {
+			compatible = "renesas,fcpf";
+			reg = <0 0xfe950000 0 0x200>;
+			clocks = <&cpg CPG_MOD 615>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 615>;
+			iommus = <&ipmmu_vp0 0>;
+		};
+
+		fcpf1: fcp@fe951000 {
+			compatible = "renesas,fcpf";
+			reg = <0 0xfe951000 0 0x200>;
+			clocks = <&cpg CPG_MOD 614>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 614>;
+			iommus = <&ipmmu_vp1 1>;
+		};
+
+		fcpvb0: fcp@fe96f000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfe96f000 0 0x200>;
+			clocks = <&cpg CPG_MOD 607>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 607>;
+			iommus = <&ipmmu_vp0 5>;
+		};
+
+		fcpvb1: fcp@fe92f000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfe92f000 0 0x200>;
+			clocks = <&cpg CPG_MOD 606>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 606>;
+			iommus = <&ipmmu_vp1 7>;
+		};
+
+		fcpvi0: fcp@fe9af000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfe9af000 0 0x200>;
+			clocks = <&cpg CPG_MOD 611>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 611>;
+			iommus = <&ipmmu_vp0 8>;
+		};
+
+		fcpvi1: fcp@fe9bf000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfe9bf000 0 0x200>;
+			clocks = <&cpg CPG_MOD 610>;
+			power-domains = <&sysc R8A7795_PD_A3VP>;
+			resets = <&cpg 610>;
+			iommus = <&ipmmu_vp1 9>;
+		};
+
+		fcpvd0: fcp@fea27000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfea27000 0 0x200>;
+			clocks = <&cpg CPG_MOD 603>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 603>;
+			iommus = <&ipmmu_vi0 8>;
+		};
+
+		fcpvd1: fcp@fea2f000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfea2f000 0 0x200>;
+			clocks = <&cpg CPG_MOD 602>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 602>;
+			iommus = <&ipmmu_vi0 9>;
+		};
+
+		fcpvd2: fcp@fea37000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfea37000 0 0x200>;
+			clocks = <&cpg CPG_MOD 601>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 601>;
+			iommus = <&ipmmu_vi1 10>;
+		};
+
+		cmm0: cmm@fea40000 {
+			compatible = "renesas,r8a7795-cmm",
+				     "renesas,rcar-gen3-cmm";
+			reg = <0 0xfea40000 0 0x1000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			clocks = <&cpg CPG_MOD 711>;
+			resets = <&cpg 711>;
+		};
+
+		cmm1: cmm@fea50000 {
+			compatible = "renesas,r8a7795-cmm",
+				     "renesas,rcar-gen3-cmm";
+			reg = <0 0xfea50000 0 0x1000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			clocks = <&cpg CPG_MOD 710>;
+			resets = <&cpg 710>;
+		};
+
+		cmm2: cmm@fea60000 {
+			compatible = "renesas,r8a7795-cmm",
+				     "renesas,rcar-gen3-cmm";
+			reg = <0 0xfea60000 0 0x1000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			clocks = <&cpg CPG_MOD 709>;
+			resets = <&cpg 709>;
+		};
+
+		cmm3: cmm@fea70000 {
+			compatible = "renesas,r8a7795-cmm",
+				     "renesas,rcar-gen3-cmm";
+			reg = <0 0xfea70000 0 0x1000>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			clocks = <&cpg CPG_MOD 708>;
+			resets = <&cpg 708>;
+		};
+
+		csi20: csi2@fea80000 {
+			compatible = "renesas,r8a7795-csi2";
+			reg = <0 0xfea80000 0 0x10000>;
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 714>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 714>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					csi20vin0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&vin0csi20>;
+					};
+					csi20vin1: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&vin1csi20>;
+					};
+					csi20vin2: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&vin2csi20>;
+					};
+					csi20vin3: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&vin3csi20>;
+					};
+					csi20vin4: endpoint@4 {
+						reg = <4>;
+						remote-endpoint = <&vin4csi20>;
+					};
+					csi20vin5: endpoint@5 {
+						reg = <5>;
+						remote-endpoint = <&vin5csi20>;
+					};
+					csi20vin6: endpoint@6 {
+						reg = <6>;
+						remote-endpoint = <&vin6csi20>;
+					};
+					csi20vin7: endpoint@7 {
+						reg = <7>;
+						remote-endpoint = <&vin7csi20>;
+					};
+				};
+			};
+		};
+
+		csi40: csi2@feaa0000 {
+			compatible = "renesas,r8a7795-csi2";
+			reg = <0 0xfeaa0000 0 0x10000>;
+			interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 716>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 716>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					csi40vin0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&vin0csi40>;
+					};
+					csi40vin1: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&vin1csi40>;
+					};
+					csi40vin2: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&vin2csi40>;
+					};
+					csi40vin3: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&vin3csi40>;
+					};
+				};
+			};
+		};
+
+		csi41: csi2@feab0000 {
+			compatible = "renesas,r8a7795-csi2";
+			reg = <0 0xfeab0000 0 0x10000>;
+			interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 715>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 715>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					csi41vin4: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&vin4csi41>;
+					};
+					csi41vin5: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&vin5csi41>;
+					};
+					csi41vin6: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&vin6csi41>;
+					};
+					csi41vin7: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&vin7csi41>;
+					};
+				};
+			};
+		};
+
+		hdmi0: hdmi@fead0000 {
+			compatible = "renesas,r8a7795-hdmi", "renesas,rcar-gen3-hdmi";
+			reg = <0 0xfead0000 0 0x10000>;
+			interrupts = <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 729>, <&cpg CPG_CORE R8A7795_CLK_HDMI>;
+			clock-names = "iahb", "isfr";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 729>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dw_hdmi0_in: endpoint {
+						remote-endpoint = <&du_out_hdmi0>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+				};
+				port@2 {
+					/* HDMI sound */
+					reg = <2>;
+				};
+			};
+		};
+
+		hdmi1: hdmi@feae0000 {
+			compatible = "renesas,r8a7795-hdmi", "renesas,rcar-gen3-hdmi";
+			reg = <0 0xfeae0000 0 0x10000>;
+			interrupts = <GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 728>, <&cpg CPG_CORE R8A7795_CLK_HDMI>;
+			clock-names = "iahb", "isfr";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 728>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dw_hdmi1_in: endpoint {
+						remote-endpoint = <&du_out_hdmi1>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+				};
+				port@2 {
+					/* HDMI sound */
+					reg = <2>;
+				};
+			};
+		};
+
+		du: display@feb00000 {
+			compatible = "renesas,du-r8a7795";
+			reg = <0 0xfeb00000 0 0x80000>;
+			interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 724>,
+				 <&cpg CPG_MOD 723>,
+				 <&cpg CPG_MOD 722>,
+				 <&cpg CPG_MOD 721>;
+			clock-names = "du.0", "du.1", "du.2", "du.3";
+
+			renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>, <&cmm3>;
+			vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd0 1>;
+
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					du_out_rgb: endpoint {
+					};
+				};
+				port@1 {
+					reg = <1>;
+					du_out_hdmi0: endpoint {
+						remote-endpoint = <&dw_hdmi0_in>;
+					};
+				};
+				port@2 {
+					reg = <2>;
+					du_out_hdmi1: endpoint {
+						remote-endpoint = <&dw_hdmi1_in>;
+					};
+				};
+				port@3 {
+					reg = <3>;
+					du_out_lvds0: endpoint {
+						remote-endpoint = <&lvds0_in>;
+					};
+				};
+			};
+		};
+
+		lvds0: lvds@feb90000 {
+			compatible = "renesas,r8a7795-lvds";
+			reg = <0 0xfeb90000 0 0x14>;
+			clocks = <&cpg CPG_MOD 727>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 727>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					lvds0_in: endpoint {
+						remote-endpoint = <&du_out_lvds0>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					lvds0_out: endpoint {
+					};
+				};
+			};
+		};
+
+		prr: chipid@fff00044 {
+			compatible = "renesas,prr";
+			reg = <0 0xfff00044 0 4>;
+		};
+	};
+
+	thermal-zones {
+		sensor_thermal1: sensor-thermal1 {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+			thermal-sensors = <&tsc 0>;
+			sustainable-power = <6313>;
+
+			trips {
+				sensor1_crit: sensor1-crit {
+					temperature = <120000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		sensor_thermal2: sensor-thermal2 {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+			thermal-sensors = <&tsc 1>;
+			sustainable-power = <6313>;
+
+			trips {
+				sensor2_crit: sensor2-crit {
+					temperature = <120000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		sensor_thermal3: sensor-thermal3 {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+			thermal-sensors = <&tsc 2>;
+
+			trips {
+				target: trip-point1 {
+					temperature = <100000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+
+				sensor3_crit: sensor3-crit {
+					temperature = <120000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&target>;
+					cooling-device = <&a57_0 2 4>;
+					contribution = <1024>;
+				};
+
+				map1 {
+					trip = <&target>;
+					cooling-device = <&a53_0 0 2>;
+					contribution = <1024>;
+				};
+			};
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	/* External USB clocks - can be overridden by the board */
+	usb3s0_clk: usb3s0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	usb_extal_clk: usb_extal {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb-kf.dts b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb-kf.dts
deleted file mode 100644
index 2df50eb..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb-kf.dts
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the M3ULCB Kingfisher board
- *
- * Copyright (C) 2017 Renesas Electronics Corp.
- * Copyright (C) 2017 Cogent Embedded, Inc.
- */
-
-#include "r8a7796-m3ulcb.dts"
-#include "ulcb-kf.dtsi"
-
-/ {
-	model = "Renesas M3ULCB Kingfisher board based on r8a7796";
-	compatible = "shimafuji,kingfisher", "renesas,m3ulcb",
-		     "renesas,r8a7796";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
deleted file mode 100644
index 9e4594c..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the M3ULCB (R-Car Starter Kit Pro) board
- *
- * Copyright (C) 2016 Renesas Electronics Corp.
- * Copyright (C) 2016 Cogent Embedded, Inc.
- */
-
-/dts-v1/;
-#include "r8a7796.dtsi"
-#include "ulcb.dtsi"
-
-/ {
-	model = "Renesas M3ULCB board based on r8a7796";
-	compatible = "renesas,m3ulcb", "renesas,r8a7796";
-
-	memory@48000000 {
-		device_type = "memory";
-		/* first 128MB is reserved for secure area. */
-		reg = <0x0 0x48000000 0x0 0x38000000>;
-	};
-
-	memory@600000000 {
-		device_type = "memory";
-		reg = <0x6 0x00000000 0x0 0x40000000>;
-	};
-};
-
-&du {
-	clocks = <&cpg CPG_MOD 724>,
-		 <&cpg CPG_MOD 723>,
-		 <&cpg CPG_MOD 722>,
-		 <&versaclock5 1>,
-		 <&versaclock5 3>,
-		 <&versaclock5 2>;
-	clock-names = "du.0", "du.1", "du.2",
-		      "dclkin.0", "dclkin.1", "dclkin.2";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
deleted file mode 100644
index de37e91..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the Salvator-X board with R-Car M3-W
- *
- * Copyright (C) 2016 Renesas Electronics Corp.
- */
-
-/dts-v1/;
-#include "r8a7796.dtsi"
-#include "salvator-x.dtsi"
-
-/ {
-	model = "Renesas Salvator-X board based on r8a7796";
-	compatible = "renesas,salvator-x", "renesas,r8a7796";
-
-	memory@48000000 {
-		device_type = "memory";
-		/* first 128MB is reserved for secure area. */
-		reg = <0x0 0x48000000 0x0 0x78000000>;
-	};
-
-	memory@600000000 {
-		device_type = "memory";
-		reg = <0x6 0x00000000 0x0 0x80000000>;
-	};
-};
-
-&du {
-	clocks = <&cpg CPG_MOD 724>,
-		 <&cpg CPG_MOD 723>,
-		 <&cpg CPG_MOD 722>,
-		 <&versaclock5 1>,
-		 <&x21_clk>,
-		 <&versaclock5 2>;
-	clock-names = "du.0", "du.1", "du.2",
-		      "dclkin.0", "dclkin.1", "dclkin.2";
-};
-
-&hdmi0 {
-	status = "okay";
-
-	ports {
-		port@1 {
-			reg = <1>;
-			rcar_dw_hdmi0_out: endpoint {
-				remote-endpoint = <&hdmi0_con>;
-			};
-		};
-		port@2 {
-			reg = <2>;
-			dw_hdmi0_snd_in: endpoint {
-				remote-endpoint = <&rsnd_endpoint1>;
-			};
-		};
-	};
-};
-
-&hdmi0_con {
-	remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&rcar_sound {
-	ports {
-		/* rsnd_port0 is on salvator-common */
-		rsnd_port1: port@1 {
-			reg = <1>;
-			rsnd_endpoint1: endpoint {
-				remote-endpoint = <&dw_hdmi0_snd_in>;
-
-				dai-format = "i2s";
-				bitclock-master = <&rsnd_endpoint1>;
-				frame-master = <&rsnd_endpoint1>;
-
-				playback = <&ssi2>;
-			};
-		};
-	};
-};
-
-&sound_card {
-	dais = <&rsnd_port0	/* ak4613 */
-		&rsnd_port1>;	/* HDMI0  */
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts
deleted file mode 100644
index a1cbfef..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the Salvator-X 2nd version board with R-Car M3-W
- *
- * Copyright (C) 2015-2017 Renesas Electronics Corp.
- */
-
-/dts-v1/;
-#include "r8a7796.dtsi"
-#include "salvator-xs.dtsi"
-
-/ {
-	model = "Renesas Salvator-X 2nd version board based on r8a7796";
-	compatible = "renesas,salvator-xs", "renesas,r8a7796";
-
-	memory@48000000 {
-		device_type = "memory";
-		/* first 128MB is reserved for secure area. */
-		reg = <0x0 0x48000000 0x0 0x78000000>;
-	};
-
-	memory@600000000 {
-		device_type = "memory";
-		reg = <0x6 0x00000000 0x0 0x80000000>;
-	};
-};
-
-&du {
-	clocks = <&cpg CPG_MOD 724>,
-		 <&cpg CPG_MOD 723>,
-		 <&cpg CPG_MOD 722>,
-		 <&versaclock6 1>,
-		 <&x21_clk>,
-		 <&versaclock6 2>;
-	clock-names = "du.0", "du.1", "du.2",
-		      "dclkin.0", "dclkin.1", "dclkin.2";
-};
-
-&hdmi0 {
-	status = "okay";
-
-	ports {
-		port@1 {
-			reg = <1>;
-			rcar_dw_hdmi0_out: endpoint {
-				remote-endpoint = <&hdmi0_con>;
-			};
-		};
-		port@2 {
-			reg = <2>;
-			dw_hdmi0_snd_in: endpoint {
-				remote-endpoint = <&rsnd_endpoint1>;
-			};
-		};
-	};
-};
-
-&hdmi0_con {
-	remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&rcar_sound {
-	ports {
-		/* rsnd_port0 is on salvator-common */
-		rsnd_port1: port@1 {
-			reg = <1>;
-			rsnd_endpoint1: endpoint {
-				remote-endpoint = <&dw_hdmi0_snd_in>;
-
-				dai-format = "i2s";
-				bitclock-master = <&rsnd_endpoint1>;
-				frame-master = <&rsnd_endpoint1>;
-
-				playback = <&ssi2>;
-			};
-		};
-	};
-};
-
-&sound_card {
-	dais = <&rsnd_port0	/* ak4613 */
-		&rsnd_port1>;	/* HDMI0  */
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
deleted file mode 100644
index b9db882..0000000
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ /dev/null
@@ -1,2972 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the R-Car M3-W (R8A77960) SoC
- *
- * Copyright (C) 2016-2017 Renesas Electronics Corp.
- */
-
-#include <dt-bindings/clock/r8a7796-cpg-mssr.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/power/r8a7796-sysc.h>
-
-#define CPG_AUDIO_CLK_I		R8A7796_CLK_S0D4
-
-/ {
-	compatible = "renesas,r8a7796";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	aliases {
-		i2c0 = &i2c0;
-		i2c1 = &i2c1;
-		i2c2 = &i2c2;
-		i2c3 = &i2c3;
-		i2c4 = &i2c4;
-		i2c5 = &i2c5;
-		i2c6 = &i2c6;
-		i2c7 = &i2c_dvfs;
-	};
-
-	/*
-	 * The external audio clocks are configured as 0 Hz fixed frequency
-	 * clocks by default.
-	 * Boards that provide audio clocks should override them.
-	 */
-	audio_clk_a: audio_clk_a {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	audio_clk_b: audio_clk_b {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	audio_clk_c: audio_clk_c {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	/* External CAN clock - to be overridden by boards that provide it */
-	can_clk: can {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	cluster0_opp: opp_table0 {
-		compatible = "operating-points-v2";
-		opp-shared;
-
-		opp-500000000 {
-			opp-hz = /bits/ 64 <500000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1000000000 {
-			opp-hz = /bits/ 64 <1000000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1500000000 {
-			opp-hz = /bits/ 64 <1500000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1600000000 {
-			opp-hz = /bits/ 64 <1600000000>;
-			opp-microvolt = <900000>;
-			clock-latency-ns = <300000>;
-			turbo-mode;
-		};
-		opp-1700000000 {
-			opp-hz = /bits/ 64 <1700000000>;
-			opp-microvolt = <900000>;
-			clock-latency-ns = <300000>;
-			turbo-mode;
-		};
-		opp-1800000000 {
-			opp-hz = /bits/ 64 <1800000000>;
-			opp-microvolt = <960000>;
-			clock-latency-ns = <300000>;
-			turbo-mode;
-		};
-	};
-
-	cluster1_opp: opp_table1 {
-		compatible = "operating-points-v2";
-		opp-shared;
-
-		opp-800000000 {
-			opp-hz = /bits/ 64 <800000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1000000000 {
-			opp-hz = /bits/ 64 <1000000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1200000000 {
-			opp-hz = /bits/ 64 <1200000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-		};
-		opp-1300000000 {
-			opp-hz = /bits/ 64 <1300000000>;
-			opp-microvolt = <820000>;
-			clock-latency-ns = <300000>;
-			turbo-mode;
-		};
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu-map {
-			cluster0 {
-				core0 {
-					cpu = <&a57_0>;
-				};
-				core1 {
-					cpu = <&a57_1>;
-				};
-			};
-
-			cluster1 {
-				core0 {
-					cpu = <&a53_0>;
-				};
-				core1 {
-					cpu = <&a53_1>;
-				};
-				core2 {
-					cpu = <&a53_2>;
-				};
-				core3 {
-					cpu = <&a53_3>;
-				};
-			};
-		};
-
-		a57_0: cpu@0 {
-			compatible = "arm,cortex-a57";
-			reg = <0x0>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7796_PD_CA57_CPU0>;
-			next-level-cache = <&L2_CA57>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
-			dynamic-power-coefficient = <854>;
-			clocks = <&cpg CPG_CORE R8A7796_CLK_Z>;
-			operating-points-v2 = <&cluster0_opp>;
-			capacity-dmips-mhz = <1024>;
-			#cooling-cells = <2>;
-		};
-
-		a57_1: cpu@1 {
-			compatible = "arm,cortex-a57";
-			reg = <0x1>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7796_PD_CA57_CPU1>;
-			next-level-cache = <&L2_CA57>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0>;
-			clocks = <&cpg CPG_CORE R8A7796_CLK_Z>;
-			operating-points-v2 = <&cluster0_opp>;
-			capacity-dmips-mhz = <1024>;
-			#cooling-cells = <2>;
-		};
-
-		a53_0: cpu@100 {
-			compatible = "arm,cortex-a53";
-			reg = <0x100>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7796_PD_CA53_CPU0>;
-			next-level-cache = <&L2_CA53>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_1>;
-			#cooling-cells = <2>;
-			dynamic-power-coefficient = <277>;
-			clocks = <&cpg CPG_CORE R8A7796_CLK_Z2>;
-			operating-points-v2 = <&cluster1_opp>;
-			capacity-dmips-mhz = <535>;
-		};
-
-		a53_1: cpu@101 {
-			compatible = "arm,cortex-a53";
-			reg = <0x101>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7796_PD_CA53_CPU1>;
-			next-level-cache = <&L2_CA53>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_1>;
-			clocks = <&cpg CPG_CORE R8A7796_CLK_Z2>;
-			operating-points-v2 = <&cluster1_opp>;
-			capacity-dmips-mhz = <535>;
-		};
-
-		a53_2: cpu@102 {
-			compatible = "arm,cortex-a53";
-			reg = <0x102>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7796_PD_CA53_CPU2>;
-			next-level-cache = <&L2_CA53>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_1>;
-			clocks = <&cpg CPG_CORE R8A7796_CLK_Z2>;
-			operating-points-v2 = <&cluster1_opp>;
-			capacity-dmips-mhz = <535>;
-		};
-
-		a53_3: cpu@103 {
-			compatible = "arm,cortex-a53";
-			reg = <0x103>;
-			device_type = "cpu";
-			power-domains = <&sysc R8A7796_PD_CA53_CPU3>;
-			next-level-cache = <&L2_CA53>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_1>;
-			clocks = <&cpg CPG_CORE R8A7796_CLK_Z2>;
-			operating-points-v2 = <&cluster1_opp>;
-			capacity-dmips-mhz = <535>;
-		};
-
-		L2_CA57: cache-controller-0 {
-			compatible = "cache";
-			power-domains = <&sysc R8A7796_PD_CA57_SCU>;
-			cache-unified;
-			cache-level = <2>;
-		};
-
-		L2_CA53: cache-controller-1 {
-			compatible = "cache";
-			power-domains = <&sysc R8A7796_PD_CA53_SCU>;
-			cache-unified;
-			cache-level = <2>;
-		};
-
-		idle-states {
-			entry-method = "psci";
-
-			CPU_SLEEP_0: cpu-sleep-0 {
-				compatible = "arm,idle-state";
-				arm,psci-suspend-param = <0x0010000>;
-				local-timer-stop;
-				entry-latency-us = <400>;
-				exit-latency-us = <500>;
-				min-residency-us = <4000>;
-			};
-
-			CPU_SLEEP_1: cpu-sleep-1 {
-				compatible = "arm,idle-state";
-				arm,psci-suspend-param = <0x0010000>;
-				local-timer-stop;
-				entry-latency-us = <700>;
-				exit-latency-us = <700>;
-				min-residency-us = <5000>;
-			};
-		};
-	};
-
-	extal_clk: extal {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		/* This value must be overridden by the board */
-		clock-frequency = <0>;
-	};
-
-	extalr_clk: extalr {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		/* This value must be overridden by the board */
-		clock-frequency = <0>;
-	};
-
-	/* External PCIe clock - can be overridden by the board */
-	pcie_bus_clk: pcie_bus {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	pmu_a53 {
-		compatible = "arm,cortex-a53-pmu";
-		interrupts-extended = <&gic GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-affinity = <&a53_0>, <&a53_1>, <&a53_2>, <&a53_3>;
-	};
-
-	pmu_a57 {
-		compatible = "arm,cortex-a57-pmu";
-		interrupts-extended = <&gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
-				      <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-affinity = <&a57_0>, <&a57_1>;
-	};
-
-	psci {
-		compatible = "arm,psci-1.0", "arm,psci-0.2";
-		method = "smc";
-	};
-
-	/* External SCIF clock - to be overridden by boards that provide it */
-	scif_clk: scif {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	soc {
-		compatible = "simple-bus";
-		interrupt-parent = <&gic>;
-		#address-cells = <2>;
-		#size-cells = <2>;
-		ranges;
-
-		rwdt: watchdog@e6020000 {
-			compatible = "renesas,r8a7796-wdt",
-				     "renesas,rcar-gen3-wdt";
-			reg = <0 0xe6020000 0 0x0c>;
-			clocks = <&cpg CPG_MOD 402>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 402>;
-			status = "disabled";
-		};
-
-		gpio0: gpio@e6050000 {
-			compatible = "renesas,gpio-r8a7796",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6050000 0 0x50>;
-			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 0 16>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 912>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 912>;
-		};
-
-		gpio1: gpio@e6051000 {
-			compatible = "renesas,gpio-r8a7796",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6051000 0 0x50>;
-			interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 32 29>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 911>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 911>;
-		};
-
-		gpio2: gpio@e6052000 {
-			compatible = "renesas,gpio-r8a7796",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6052000 0 0x50>;
-			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 64 15>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 910>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 910>;
-		};
-
-		gpio3: gpio@e6053000 {
-			compatible = "renesas,gpio-r8a7796",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6053000 0 0x50>;
-			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 96 16>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 909>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 909>;
-		};
-
-		gpio4: gpio@e6054000 {
-			compatible = "renesas,gpio-r8a7796",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6054000 0 0x50>;
-			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 128 18>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 908>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 908>;
-		};
-
-		gpio5: gpio@e6055000 {
-			compatible = "renesas,gpio-r8a7796",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6055000 0 0x50>;
-			interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 160 26>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 907>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 907>;
-		};
-
-		gpio6: gpio@e6055400 {
-			compatible = "renesas,gpio-r8a7796",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6055400 0 0x50>;
-			interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 192 32>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 906>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 906>;
-		};
-
-		gpio7: gpio@e6055800 {
-			compatible = "renesas,gpio-r8a7796",
-				     "renesas,rcar-gen3-gpio";
-			reg = <0 0xe6055800 0 0x50>;
-			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
-			#gpio-cells = <2>;
-			gpio-controller;
-			gpio-ranges = <&pfc 0 224 4>;
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			clocks = <&cpg CPG_MOD 905>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 905>;
-		};
-
-		pfc: pin-controller@e6060000 {
-			compatible = "renesas,pfc-r8a7796";
-			reg = <0 0xe6060000 0 0x50c>;
-		};
-
-		cmt0: timer@e60f0000 {
-			compatible = "renesas,r8a7796-cmt0",
-				     "renesas,rcar-gen3-cmt0";
-			reg = <0 0xe60f0000 0 0x1004>;
-			interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 303>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 303>;
-			status = "disabled";
-		};
-
-		cmt1: timer@e6130000 {
-			compatible = "renesas,r8a7796-cmt1",
-				     "renesas,rcar-gen3-cmt1";
-			reg = <0 0xe6130000 0 0x1004>;
-			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 302>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 302>;
-			status = "disabled";
-		};
-
-		cmt2: timer@e6140000 {
-			compatible = "renesas,r8a7796-cmt1",
-				     "renesas,rcar-gen3-cmt1";
-			reg = <0 0xe6140000 0 0x1004>;
-			interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 301>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 301>;
-			status = "disabled";
-		};
-
-		cmt3: timer@e6148000 {
-			compatible = "renesas,r8a7796-cmt1",
-				     "renesas,rcar-gen3-cmt1";
-			reg = <0 0xe6148000 0 0x1004>;
-			interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 300>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 300>;
-			status = "disabled";
-		};
-
-		cpg: clock-controller@e6150000 {
-			compatible = "renesas,r8a7796-cpg-mssr";
-			reg = <0 0xe6150000 0 0x1000>;
-			clocks = <&extal_clk>, <&extalr_clk>;
-			clock-names = "extal", "extalr";
-			#clock-cells = <2>;
-			#power-domain-cells = <0>;
-			#reset-cells = <1>;
-		};
-
-		rst: reset-controller@e6160000 {
-			compatible = "renesas,r8a7796-rst";
-			reg = <0 0xe6160000 0 0x0200>;
-		};
-
-		sysc: system-controller@e6180000 {
-			compatible = "renesas,r8a7796-sysc";
-			reg = <0 0xe6180000 0 0x0400>;
-			#power-domain-cells = <1>;
-		};
-
-		tsc: thermal@e6198000 {
-			compatible = "renesas,r8a7796-thermal";
-			reg = <0 0xe6198000 0 0x100>,
-			      <0 0xe61a0000 0 0x100>,
-			      <0 0xe61a8000 0 0x100>;
-			interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 522>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 522>;
-			#thermal-sensor-cells = <1>;
-		};
-
-		intc_ex: interrupt-controller@e61c0000 {
-			compatible = "renesas,intc-ex-r8a7796", "renesas,irqc";
-			#interrupt-cells = <2>;
-			interrupt-controller;
-			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 407>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 407>;
-		};
-
-		i2c0: i2c@e6500000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7796",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe6500000 0 0x40>;
-			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 931>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 931>;
-			dmas = <&dmac1 0x91>, <&dmac1 0x90>,
-			       <&dmac2 0x91>, <&dmac2 0x90>;
-			dma-names = "tx", "rx", "tx", "rx";
-			i2c-scl-internal-delay-ns = <110>;
-			status = "disabled";
-		};
-
-		i2c1: i2c@e6508000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7796",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe6508000 0 0x40>;
-			interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 930>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 930>;
-			dmas = <&dmac1 0x93>, <&dmac1 0x92>,
-			       <&dmac2 0x93>, <&dmac2 0x92>;
-			dma-names = "tx", "rx", "tx", "rx";
-			i2c-scl-internal-delay-ns = <6>;
-			status = "disabled";
-		};
-
-		i2c2: i2c@e6510000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7796",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe6510000 0 0x40>;
-			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 929>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 929>;
-			dmas = <&dmac1 0x95>, <&dmac1 0x94>,
-			       <&dmac2 0x95>, <&dmac2 0x94>;
-			dma-names = "tx", "rx", "tx", "rx";
-			i2c-scl-internal-delay-ns = <6>;
-			status = "disabled";
-		};
-
-		i2c3: i2c@e66d0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7796",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe66d0000 0 0x40>;
-			interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 928>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 928>;
-			dmas = <&dmac0 0x97>, <&dmac0 0x96>;
-			dma-names = "tx", "rx";
-			i2c-scl-internal-delay-ns = <110>;
-			status = "disabled";
-		};
-
-		i2c4: i2c@e66d8000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7796",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe66d8000 0 0x40>;
-			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 927>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 927>;
-			dmas = <&dmac0 0x99>, <&dmac0 0x98>;
-			dma-names = "tx", "rx";
-			i2c-scl-internal-delay-ns = <110>;
-			status = "disabled";
-		};
-
-		i2c5: i2c@e66e0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7796",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe66e0000 0 0x40>;
-			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 919>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 919>;
-			dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
-			dma-names = "tx", "rx";
-			i2c-scl-internal-delay-ns = <110>;
-			status = "disabled";
-		};
-
-		i2c6: i2c@e66e8000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,i2c-r8a7796",
-				     "renesas,rcar-gen3-i2c";
-			reg = <0 0xe66e8000 0 0x40>;
-			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 918>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 918>;
-			dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
-			dma-names = "tx", "rx";
-			i2c-scl-internal-delay-ns = <6>;
-			status = "disabled";
-		};
-
-		i2c_dvfs: i2c@e60b0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "renesas,iic-r8a7796",
-				     "renesas,rcar-gen3-iic",
-				     "renesas,rmobile-iic";
-			reg = <0 0xe60b0000 0 0x425>;
-			interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 926>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 926>;
-			dmas = <&dmac0 0x11>, <&dmac0 0x10>;
-			dma-names = "tx", "rx";
-			status = "disabled";
-		};
-
-		hscif0: serial@e6540000 {
-			compatible = "renesas,hscif-r8a7796",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe6540000 0 0x60>;
-			interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 520>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x31>, <&dmac1 0x30>,
-			       <&dmac2 0x31>, <&dmac2 0x30>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 520>;
-			status = "disabled";
-		};
-
-		hscif1: serial@e6550000 {
-			compatible = "renesas,hscif-r8a7796",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe6550000 0 0x60>;
-			interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 519>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x33>, <&dmac1 0x32>,
-			       <&dmac2 0x33>, <&dmac2 0x32>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 519>;
-			status = "disabled";
-		};
-
-		hscif2: serial@e6560000 {
-			compatible = "renesas,hscif-r8a7796",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe6560000 0 0x60>;
-			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 518>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x35>, <&dmac1 0x34>,
-			       <&dmac2 0x35>, <&dmac2 0x34>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 518>;
-			status = "disabled";
-		};
-
-		hscif3: serial@e66a0000 {
-			compatible = "renesas,hscif-r8a7796",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe66a0000 0 0x60>;
-			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 517>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac0 0x37>, <&dmac0 0x36>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 517>;
-			status = "disabled";
-		};
-
-		hscif4: serial@e66b0000 {
-			compatible = "renesas,hscif-r8a7796",
-				     "renesas,rcar-gen3-hscif",
-				     "renesas,hscif";
-			reg = <0 0xe66b0000 0 0x60>;
-			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 516>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac0 0x39>, <&dmac0 0x38>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 516>;
-			status = "disabled";
-		};
-
-		hsusb: usb@e6590000 {
-			compatible = "renesas,usbhs-r8a7796",
-				     "renesas,rcar-gen3-usbhs";
-			reg = <0 0xe6590000 0 0x200>;
-			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 704>, <&cpg CPG_MOD 703>;
-			dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
-			       <&usb_dmac1 0>, <&usb_dmac1 1>;
-			dma-names = "ch0", "ch1", "ch2", "ch3";
-			renesas,buswait = <11>;
-			phys = <&usb2_phy0 3>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 704>, <&cpg 703>;
-			status = "disabled";
-		};
-
-		usb_dmac0: dma-controller@e65a0000 {
-			compatible = "renesas,r8a7796-usb-dmac",
-				     "renesas,usb-dmac";
-			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "ch0", "ch1";
-			clocks = <&cpg CPG_MOD 330>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 330>;
-			#dma-cells = <1>;
-			dma-channels = <2>;
-		};
-
-		usb_dmac1: dma-controller@e65b0000 {
-			compatible = "renesas,r8a7796-usb-dmac",
-				     "renesas,usb-dmac";
-			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "ch0", "ch1";
-			clocks = <&cpg CPG_MOD 331>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 331>;
-			#dma-cells = <1>;
-			dma-channels = <2>;
-		};
-
-		usb3_phy0: usb-phy@e65ee000 {
-			compatible = "renesas,r8a7796-usb3-phy",
-				     "renesas,rcar-gen3-usb3-phy";
-			reg = <0 0xe65ee000 0 0x90>;
-			clocks = <&cpg CPG_MOD 328>, <&usb3s0_clk>,
-				 <&usb_extal_clk>;
-			clock-names = "usb3-if", "usb3s_clk", "usb_extal";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 328>;
-			#phy-cells = <0>;
-			status = "disabled";
-		};
-
-		dmac0: dma-controller@e6700000 {
-			compatible = "renesas,dmac-r8a7796",
-				     "renesas,rcar-dmac";
-			reg = <0 0xe6700000 0 0x10000>;
-			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 219>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 219>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>,
-			       <&ipmmu_ds0 2>, <&ipmmu_ds0 3>,
-			       <&ipmmu_ds0 4>, <&ipmmu_ds0 5>,
-			       <&ipmmu_ds0 6>, <&ipmmu_ds0 7>,
-			       <&ipmmu_ds0 8>, <&ipmmu_ds0 9>,
-			       <&ipmmu_ds0 10>, <&ipmmu_ds0 11>,
-			       <&ipmmu_ds0 12>, <&ipmmu_ds0 13>,
-			       <&ipmmu_ds0 14>, <&ipmmu_ds0 15>;
-		};
-
-		dmac1: dma-controller@e7300000 {
-			compatible = "renesas,dmac-r8a7796",
-				     "renesas,rcar-dmac";
-			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 218>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 218>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_ds1 0>, <&ipmmu_ds1 1>,
-			       <&ipmmu_ds1 2>, <&ipmmu_ds1 3>,
-			       <&ipmmu_ds1 4>, <&ipmmu_ds1 5>,
-			       <&ipmmu_ds1 6>, <&ipmmu_ds1 7>,
-			       <&ipmmu_ds1 8>, <&ipmmu_ds1 9>,
-			       <&ipmmu_ds1 10>, <&ipmmu_ds1 11>,
-			       <&ipmmu_ds1 12>, <&ipmmu_ds1 13>,
-			       <&ipmmu_ds1 14>, <&ipmmu_ds1 15>;
-		};
-
-		dmac2: dma-controller@e7310000 {
-			compatible = "renesas,dmac-r8a7796",
-				     "renesas,rcar-dmac";
-			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 217>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 217>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_ds1 16>, <&ipmmu_ds1 17>,
-			       <&ipmmu_ds1 18>, <&ipmmu_ds1 19>,
-			       <&ipmmu_ds1 20>, <&ipmmu_ds1 21>,
-			       <&ipmmu_ds1 22>, <&ipmmu_ds1 23>,
-			       <&ipmmu_ds1 24>, <&ipmmu_ds1 25>,
-			       <&ipmmu_ds1 26>, <&ipmmu_ds1 27>,
-			       <&ipmmu_ds1 28>, <&ipmmu_ds1 29>,
-			       <&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
-		};
-
-		ipmmu_ds0: mmu@e6740000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xe6740000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 0>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_ds1: mmu@e7740000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xe7740000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 1>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_hc: mmu@e6570000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xe6570000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 2>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_ir: mmu@ff8b0000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xff8b0000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 3>;
-			power-domains = <&sysc R8A7796_PD_A3IR>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_mm: mmu@e67b0000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xe67b0000 0 0x1000>;
-			interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_mp: mmu@ec670000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xec670000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 4>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_pv0: mmu@fd800000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xfd800000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 5>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_pv1: mmu@fd950000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xfd950000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 6>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_rt: mmu@ffc80000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xffc80000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 7>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_vc0: mmu@fe6b0000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xfe6b0000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 8>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			#iommu-cells = <1>;
-		};
-
-		ipmmu_vi0: mmu@febd0000 {
-			compatible = "renesas,ipmmu-r8a7796";
-			reg = <0 0xfebd0000 0 0x1000>;
-			renesas,ipmmu-main = <&ipmmu_mm 9>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			#iommu-cells = <1>;
-		};
-
-		avb: ethernet@e6800000 {
-			compatible = "renesas,etheravb-r8a7796",
-				     "renesas,etheravb-rcar-gen3";
-			reg = <0 0xe6800000 0 0x800>, <0 0xe6a00000 0 0x10000>;
-			interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "ch0", "ch1", "ch2", "ch3",
-					  "ch4", "ch5", "ch6", "ch7",
-					  "ch8", "ch9", "ch10", "ch11",
-					  "ch12", "ch13", "ch14", "ch15",
-					  "ch16", "ch17", "ch18", "ch19",
-					  "ch20", "ch21", "ch22", "ch23",
-					  "ch24";
-			clocks = <&cpg CPG_MOD 812>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 812>;
-			phy-mode = "rgmii";
-			iommus = <&ipmmu_ds0 16>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		can0: can@e6c30000 {
-			compatible = "renesas,can-r8a7796",
-				     "renesas,rcar-gen3-can";
-			reg = <0 0xe6c30000 0 0x1000>;
-			interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 916>,
-			       <&cpg CPG_CORE R8A7796_CLK_CANFD>,
-			       <&can_clk>;
-			clock-names = "clkp1", "clkp2", "can_clk";
-			assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
-			assigned-clock-rates = <40000000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 916>;
-			status = "disabled";
-		};
-
-		can1: can@e6c38000 {
-			compatible = "renesas,can-r8a7796",
-				     "renesas,rcar-gen3-can";
-			reg = <0 0xe6c38000 0 0x1000>;
-			interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 915>,
-			       <&cpg CPG_CORE R8A7796_CLK_CANFD>,
-			       <&can_clk>;
-			clock-names = "clkp1", "clkp2", "can_clk";
-			assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
-			assigned-clock-rates = <40000000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 915>;
-			status = "disabled";
-		};
-
-		canfd: can@e66c0000 {
-			compatible = "renesas,r8a7796-canfd",
-				     "renesas,rcar-gen3-canfd";
-			reg = <0 0xe66c0000 0 0x8000>;
-			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
-				   <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 914>,
-			       <&cpg CPG_CORE R8A7796_CLK_CANFD>,
-			       <&can_clk>;
-			clock-names = "fck", "canfd", "can_clk";
-			assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
-			assigned-clock-rates = <40000000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 914>;
-			status = "disabled";
-
-			channel0 {
-				status = "disabled";
-			};
-
-			channel1 {
-				status = "disabled";
-			};
-		};
-
-		pwm0: pwm@e6e30000 {
-			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
-			reg = <0 0xe6e30000 0 8>;
-			#pwm-cells = <2>;
-			clocks = <&cpg CPG_MOD 523>;
-			resets = <&cpg 523>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			status = "disabled";
-		};
-
-		pwm1: pwm@e6e31000 {
-			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
-			reg = <0 0xe6e31000 0 8>;
-			#pwm-cells = <2>;
-			clocks = <&cpg CPG_MOD 523>;
-			resets = <&cpg 523>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			status = "disabled";
-		};
-
-		pwm2: pwm@e6e32000 {
-			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
-			reg = <0 0xe6e32000 0 8>;
-			#pwm-cells = <2>;
-			clocks = <&cpg CPG_MOD 523>;
-			resets = <&cpg 523>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			status = "disabled";
-		};
-
-		pwm3: pwm@e6e33000 {
-			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
-			reg = <0 0xe6e33000 0 8>;
-			#pwm-cells = <2>;
-			clocks = <&cpg CPG_MOD 523>;
-			resets = <&cpg 523>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			status = "disabled";
-		};
-
-		pwm4: pwm@e6e34000 {
-			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
-			reg = <0 0xe6e34000 0 8>;
-			#pwm-cells = <2>;
-			clocks = <&cpg CPG_MOD 523>;
-			resets = <&cpg 523>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			status = "disabled";
-		};
-
-		pwm5: pwm@e6e35000 {
-			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
-			reg = <0 0xe6e35000 0 8>;
-			#pwm-cells = <2>;
-			clocks = <&cpg CPG_MOD 523>;
-			resets = <&cpg 523>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			status = "disabled";
-		};
-
-		pwm6: pwm@e6e36000 {
-			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
-			reg = <0 0xe6e36000 0 8>;
-			#pwm-cells = <2>;
-			clocks = <&cpg CPG_MOD 523>;
-			resets = <&cpg 523>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			status = "disabled";
-		};
-
-		scif0: serial@e6e60000 {
-			compatible = "renesas,scif-r8a7796",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6e60000 0 64>;
-			interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 207>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x51>, <&dmac1 0x50>,
-			       <&dmac2 0x51>, <&dmac2 0x50>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 207>;
-			status = "disabled";
-		};
-
-		scif1: serial@e6e68000 {
-			compatible = "renesas,scif-r8a7796",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6e68000 0 64>;
-			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 206>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x53>, <&dmac1 0x52>,
-			       <&dmac2 0x53>, <&dmac2 0x52>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 206>;
-			status = "disabled";
-		};
-
-		scif2: serial@e6e88000 {
-			compatible = "renesas,scif-r8a7796",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6e88000 0 64>;
-			interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 310>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x13>, <&dmac1 0x12>,
-			       <&dmac2 0x13>, <&dmac2 0x12>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 310>;
-			status = "disabled";
-		};
-
-		scif3: serial@e6c50000 {
-			compatible = "renesas,scif-r8a7796",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6c50000 0 64>;
-			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 204>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac0 0x57>, <&dmac0 0x56>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 204>;
-			status = "disabled";
-		};
-
-		scif4: serial@e6c40000 {
-			compatible = "renesas,scif-r8a7796",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6c40000 0 64>;
-			interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 203>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac0 0x59>, <&dmac0 0x58>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 203>;
-			status = "disabled";
-		};
-
-		scif5: serial@e6f30000 {
-			compatible = "renesas,scif-r8a7796",
-				     "renesas,rcar-gen3-scif", "renesas,scif";
-			reg = <0 0xe6f30000 0 64>;
-			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 202>,
-				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
-				 <&scif_clk>;
-			clock-names = "fck", "brg_int", "scif_clk";
-			dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-			       <&dmac2 0x5b>, <&dmac2 0x5a>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 202>;
-			status = "disabled";
-		};
-
-		tpu: pwm@e6e80000 {
-			compatible = "renesas,tpu-r8a7796", "renesas,tpu";
-			reg = <0 0xe6e80000 0 0x148>;
-			interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 304>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 304>;
-			#pwm-cells = <3>;
-			status = "disabled";
-		};
-
-		msiof0: spi@e6e90000 {
-			compatible = "renesas,msiof-r8a7796",
-				     "renesas,rcar-gen3-msiof";
-			reg = <0 0xe6e90000 0 0x0064>;
-			interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 211>;
-			dmas = <&dmac1 0x41>, <&dmac1 0x40>,
-			       <&dmac2 0x41>, <&dmac2 0x40>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 211>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		msiof1: spi@e6ea0000 {
-			compatible = "renesas,msiof-r8a7796",
-				     "renesas,rcar-gen3-msiof";
-			reg = <0 0xe6ea0000 0 0x0064>;
-			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 210>;
-			dmas = <&dmac1 0x43>, <&dmac1 0x42>,
-			       <&dmac2 0x43>, <&dmac2 0x42>;
-			dma-names = "tx", "rx", "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 210>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		msiof2: spi@e6c00000 {
-			compatible = "renesas,msiof-r8a7796",
-				     "renesas,rcar-gen3-msiof";
-			reg = <0 0xe6c00000 0 0x0064>;
-			interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 209>;
-			dmas = <&dmac0 0x45>, <&dmac0 0x44>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 209>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		msiof3: spi@e6c10000 {
-			compatible = "renesas,msiof-r8a7796",
-				     "renesas,rcar-gen3-msiof";
-			reg = <0 0xe6c10000 0 0x0064>;
-			interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 208>;
-			dmas = <&dmac0 0x47>, <&dmac0 0x46>;
-			dma-names = "tx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 208>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		vin0: video@e6ef0000 {
-			compatible = "renesas,vin-r8a7796";
-			reg = <0 0xe6ef0000 0 0x1000>;
-			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 811>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 811>;
-			renesas,id = <0>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin0csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin0>;
-					};
-					vin0csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin0>;
-					};
-				};
-			};
-		};
-
-		vin1: video@e6ef1000 {
-			compatible = "renesas,vin-r8a7796";
-			reg = <0 0xe6ef1000 0 0x1000>;
-			interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 810>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 810>;
-			renesas,id = <1>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin1csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin1>;
-					};
-					vin1csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin1>;
-					};
-				};
-			};
-		};
-
-		vin2: video@e6ef2000 {
-			compatible = "renesas,vin-r8a7796";
-			reg = <0 0xe6ef2000 0 0x1000>;
-			interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 809>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 809>;
-			renesas,id = <2>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin2csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin2>;
-					};
-					vin2csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin2>;
-					};
-				};
-			};
-		};
-
-		vin3: video@e6ef3000 {
-			compatible = "renesas,vin-r8a7796";
-			reg = <0 0xe6ef3000 0 0x1000>;
-			interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 808>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 808>;
-			renesas,id = <3>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin3csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin3>;
-					};
-					vin3csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin3>;
-					};
-				};
-			};
-		};
-
-		vin4: video@e6ef4000 {
-			compatible = "renesas,vin-r8a7796";
-			reg = <0 0xe6ef4000 0 0x1000>;
-			interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 807>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 807>;
-			renesas,id = <4>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin4csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin4>;
-					};
-					vin4csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin4>;
-					};
-				};
-			};
-		};
-
-		vin5: video@e6ef5000 {
-			compatible = "renesas,vin-r8a7796";
-			reg = <0 0xe6ef5000 0 0x1000>;
-			interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 806>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 806>;
-			renesas,id = <5>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin5csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin5>;
-					};
-					vin5csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin5>;
-					};
-				};
-			};
-		};
-
-		vin6: video@e6ef6000 {
-			compatible = "renesas,vin-r8a7796";
-			reg = <0 0xe6ef6000 0 0x1000>;
-			interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 805>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 805>;
-			renesas,id = <6>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin6csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin6>;
-					};
-					vin6csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin6>;
-					};
-				};
-			};
-		};
-
-		vin7: video@e6ef7000 {
-			compatible = "renesas,vin-r8a7796";
-			reg = <0 0xe6ef7000 0 0x1000>;
-			interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 804>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 804>;
-			renesas,id = <7>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					vin7csi20: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&csi20vin7>;
-					};
-					vin7csi40: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&csi40vin7>;
-					};
-				};
-			};
-		};
-
-		drif00: rif@e6f40000 {
-			compatible = "renesas,r8a7796-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f40000 0 0x64>;
-			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 515>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x20>, <&dmac2 0x20>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 515>;
-			renesas,bonding = <&drif01>;
-			status = "disabled";
-		};
-
-		drif01: rif@e6f50000 {
-			compatible = "renesas,r8a7796-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f50000 0 0x64>;
-			interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 514>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x22>, <&dmac2 0x22>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 514>;
-			renesas,bonding = <&drif00>;
-			status = "disabled";
-		};
-
-		drif10: rif@e6f60000 {
-			compatible = "renesas,r8a7796-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f60000 0 0x64>;
-			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 513>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x24>, <&dmac2 0x24>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 513>;
-			renesas,bonding = <&drif11>;
-			status = "disabled";
-		};
-
-		drif11: rif@e6f70000 {
-			compatible = "renesas,r8a7796-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f70000 0 0x64>;
-			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 512>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x26>, <&dmac2 0x26>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 512>;
-			renesas,bonding = <&drif10>;
-			status = "disabled";
-		};
-
-		drif20: rif@e6f80000 {
-			compatible = "renesas,r8a7796-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f80000 0 0x64>;
-			interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 511>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x28>, <&dmac2 0x28>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 511>;
-			renesas,bonding = <&drif21>;
-			status = "disabled";
-		};
-
-		drif21: rif@e6f90000 {
-			compatible = "renesas,r8a7796-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6f90000 0 0x64>;
-			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 510>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x2a>, <&dmac2 0x2a>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 510>;
-			renesas,bonding = <&drif20>;
-			status = "disabled";
-		};
-
-		drif30: rif@e6fa0000 {
-			compatible = "renesas,r8a7796-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6fa0000 0 0x64>;
-			interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 509>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x2c>, <&dmac2 0x2c>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 509>;
-			renesas,bonding = <&drif31>;
-			status = "disabled";
-		};
-
-		drif31: rif@e6fb0000 {
-			compatible = "renesas,r8a7796-drif",
-				     "renesas,rcar-gen3-drif";
-			reg = <0 0xe6fb0000 0 0x64>;
-			interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 508>;
-			clock-names = "fck";
-			dmas = <&dmac1 0x2e>, <&dmac2 0x2e>;
-			dma-names = "rx", "rx";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 508>;
-			renesas,bonding = <&drif30>;
-			status = "disabled";
-		};
-
-		rcar_sound: sound@ec500000 {
-			/*
-			 * #sound-dai-cells is required
-			 *
-			 * Single DAI : #sound-dai-cells = <0>;	<&rcar_sound>;
-			 * Multi  DAI : #sound-dai-cells = <1>;	<&rcar_sound N>;
-			 */
-			/*
-			 * #clock-cells is required for audio_clkout0/1/2/3
-			 *
-			 * clkout	: #clock-cells = <0>;	<&rcar_sound>;
-			 * clkout0/1/2/3: #clock-cells = <1>;	<&rcar_sound N>;
-			 */
-			compatible =  "renesas,rcar_sound-r8a7796", "renesas,rcar_sound-gen3";
-			reg =	<0 0xec500000 0 0x1000>, /* SCU */
-				<0 0xec5a0000 0 0x100>,  /* ADG */
-				<0 0xec540000 0 0x1000>, /* SSIU */
-				<0 0xec541000 0 0x280>,  /* SSI */
-				<0 0xec760000 0 0x200>;  /* Audio DMAC peri peri*/
-			reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
-
-			clocks = <&cpg CPG_MOD 1005>,
-				 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
-				 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
-				 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
-				 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
-				 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
-				 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
-				 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
-				 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
-				 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
-				 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
-				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
-				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
-				 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
-				 <&audio_clk_a>, <&audio_clk_b>,
-				 <&audio_clk_c>,
-				 <&cpg CPG_CORE R8A7796_CLK_S0D4>;
-			clock-names = "ssi-all",
-				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
-				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
-				      "ssi.1", "ssi.0",
-				      "src.9", "src.8", "src.7", "src.6",
-				      "src.5", "src.4", "src.3", "src.2",
-				      "src.1", "src.0",
-				      "mix.1", "mix.0",
-				      "ctu.1", "ctu.0",
-				      "dvc.0", "dvc.1",
-				      "clk_a", "clk_b", "clk_c", "clk_i";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 1005>,
-				 <&cpg 1006>, <&cpg 1007>,
-				 <&cpg 1008>, <&cpg 1009>,
-				 <&cpg 1010>, <&cpg 1011>,
-				 <&cpg 1012>, <&cpg 1013>,
-				 <&cpg 1014>, <&cpg 1015>;
-			reset-names = "ssi-all",
-				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
-				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
-				      "ssi.1", "ssi.0";
-			status = "disabled";
-
-			rcar_sound,ctu {
-				ctu00: ctu-0 { };
-				ctu01: ctu-1 { };
-				ctu02: ctu-2 { };
-				ctu03: ctu-3 { };
-				ctu10: ctu-4 { };
-				ctu11: ctu-5 { };
-				ctu12: ctu-6 { };
-				ctu13: ctu-7 { };
-			};
-
-			rcar_sound,dvc {
-				dvc0: dvc-0 {
-					dmas = <&audma1 0xbc>;
-					dma-names = "tx";
-				};
-				dvc1: dvc-1 {
-					dmas = <&audma1 0xbe>;
-					dma-names = "tx";
-				};
-			};
-
-			rcar_sound,mix {
-				mix0: mix-0 { };
-				mix1: mix-1 { };
-			};
-
-			rcar_sound,src {
-				src0: src-0 {
-					interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x85>, <&audma1 0x9a>;
-					dma-names = "rx", "tx";
-				};
-				src1: src-1 {
-					interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x87>, <&audma1 0x9c>;
-					dma-names = "rx", "tx";
-				};
-				src2: src-2 {
-					interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x89>, <&audma1 0x9e>;
-					dma-names = "rx", "tx";
-				};
-				src3: src-3 {
-					interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x8b>, <&audma1 0xa0>;
-					dma-names = "rx", "tx";
-				};
-				src4: src-4 {
-					interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x8d>, <&audma1 0xb0>;
-					dma-names = "rx", "tx";
-				};
-				src5: src-5 {
-					interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x8f>, <&audma1 0xb2>;
-					dma-names = "rx", "tx";
-				};
-				src6: src-6 {
-					interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x91>, <&audma1 0xb4>;
-					dma-names = "rx", "tx";
-				};
-				src7: src-7 {
-					interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x93>, <&audma1 0xb6>;
-					dma-names = "rx", "tx";
-				};
-				src8: src-8 {
-					interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x95>, <&audma1 0xb8>;
-					dma-names = "rx", "tx";
-				};
-				src9: src-9 {
-					interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x97>, <&audma1 0xba>;
-					dma-names = "rx", "tx";
-				};
-			};
-
-			rcar_sound,ssi {
-				ssi0: ssi-0 {
-					interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x01>, <&audma1 0x02>;
-					dma-names = "rx", "tx";
-				};
-				ssi1: ssi-1 {
-					interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x03>, <&audma1 0x04>;
-					dma-names = "rx", "tx";
-				};
-				ssi2: ssi-2 {
-					interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x05>, <&audma1 0x06>;
-					dma-names = "rx", "tx";
-				};
-				ssi3: ssi-3 {
-					interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x07>, <&audma1 0x08>;
-					dma-names = "rx", "tx";
-				};
-				ssi4: ssi-4 {
-					interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x09>, <&audma1 0x0a>;
-					dma-names = "rx", "tx";
-				};
-				ssi5: ssi-5 {
-					interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x0b>, <&audma1 0x0c>;
-					dma-names = "rx", "tx";
-				};
-				ssi6: ssi-6 {
-					interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x0d>, <&audma1 0x0e>;
-					dma-names = "rx", "tx";
-				};
-				ssi7: ssi-7 {
-					interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x0f>, <&audma1 0x10>;
-					dma-names = "rx", "tx";
-				};
-				ssi8: ssi-8 {
-					interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x11>, <&audma1 0x12>;
-					dma-names = "rx", "tx";
-				};
-				ssi9: ssi-9 {
-					interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
-					dmas = <&audma0 0x13>, <&audma1 0x14>;
-					dma-names = "rx", "tx";
-				};
-			};
-
-			rcar_sound,ssiu {
-				ssiu00: ssiu-0 {
-					dmas = <&audma0 0x15>, <&audma1 0x16>;
-					dma-names = "rx", "tx";
-				};
-				ssiu01: ssiu-1 {
-					dmas = <&audma0 0x35>, <&audma1 0x36>;
-					dma-names = "rx", "tx";
-				};
-				ssiu02: ssiu-2 {
-					dmas = <&audma0 0x37>, <&audma1 0x38>;
-					dma-names = "rx", "tx";
-				};
-				ssiu03: ssiu-3 {
-					dmas = <&audma0 0x47>, <&audma1 0x48>;
-					dma-names = "rx", "tx";
-				};
-				ssiu04: ssiu-4 {
-					dmas = <&audma0 0x3F>, <&audma1 0x40>;
-					dma-names = "rx", "tx";
-				};
-				ssiu05: ssiu-5 {
-					dmas = <&audma0 0x43>, <&audma1 0x44>;
-					dma-names = "rx", "tx";
-				};
-				ssiu06: ssiu-6 {
-					dmas = <&audma0 0x4F>, <&audma1 0x50>;
-					dma-names = "rx", "tx";
-				};
-				ssiu07: ssiu-7 {
-					dmas = <&audma0 0x53>, <&audma1 0x54>;
-					dma-names = "rx", "tx";
-				};
-				ssiu10: ssiu-8 {
-					dmas = <&audma0 0x49>, <&audma1 0x4a>;
-					dma-names = "rx", "tx";
-				};
-				ssiu11: ssiu-9 {
-					dmas = <&audma0 0x4B>, <&audma1 0x4C>;
-					dma-names = "rx", "tx";
-				};
-				ssiu12: ssiu-10 {
-					dmas = <&audma0 0x57>, <&audma1 0x58>;
-					dma-names = "rx", "tx";
-				};
-				ssiu13: ssiu-11 {
-					dmas = <&audma0 0x59>, <&audma1 0x5A>;
-					dma-names = "rx", "tx";
-				};
-				ssiu14: ssiu-12 {
-					dmas = <&audma0 0x5F>, <&audma1 0x60>;
-					dma-names = "rx", "tx";
-				};
-				ssiu15: ssiu-13 {
-					dmas = <&audma0 0xC3>, <&audma1 0xC4>;
-					dma-names = "rx", "tx";
-				};
-				ssiu16: ssiu-14 {
-					dmas = <&audma0 0xC7>, <&audma1 0xC8>;
-					dma-names = "rx", "tx";
-				};
-				ssiu17: ssiu-15 {
-					dmas = <&audma0 0xCB>, <&audma1 0xCC>;
-					dma-names = "rx", "tx";
-				};
-				ssiu20: ssiu-16 {
-					dmas = <&audma0 0x63>, <&audma1 0x64>;
-					dma-names = "rx", "tx";
-				};
-				ssiu21: ssiu-17 {
-					dmas = <&audma0 0x67>, <&audma1 0x68>;
-					dma-names = "rx", "tx";
-				};
-				ssiu22: ssiu-18 {
-					dmas = <&audma0 0x6B>, <&audma1 0x6C>;
-					dma-names = "rx", "tx";
-				};
-				ssiu23: ssiu-19 {
-					dmas = <&audma0 0x6D>, <&audma1 0x6E>;
-					dma-names = "rx", "tx";
-				};
-				ssiu24: ssiu-20 {
-					dmas = <&audma0 0xCF>, <&audma1 0xCE>;
-					dma-names = "rx", "tx";
-				};
-				ssiu25: ssiu-21 {
-					dmas = <&audma0 0xEB>, <&audma1 0xEC>;
-					dma-names = "rx", "tx";
-				};
-				ssiu26: ssiu-22 {
-					dmas = <&audma0 0xED>, <&audma1 0xEE>;
-					dma-names = "rx", "tx";
-				};
-				ssiu27: ssiu-23 {
-					dmas = <&audma0 0xEF>, <&audma1 0xF0>;
-					dma-names = "rx", "tx";
-				};
-				ssiu30: ssiu-24 {
-					dmas = <&audma0 0x6f>, <&audma1 0x70>;
-					dma-names = "rx", "tx";
-				};
-				ssiu31: ssiu-25 {
-					dmas = <&audma0 0x21>, <&audma1 0x22>;
-					dma-names = "rx", "tx";
-				};
-				ssiu32: ssiu-26 {
-					dmas = <&audma0 0x23>, <&audma1 0x24>;
-					dma-names = "rx", "tx";
-				};
-				ssiu33: ssiu-27 {
-					dmas = <&audma0 0x25>, <&audma1 0x26>;
-					dma-names = "rx", "tx";
-				};
-				ssiu34: ssiu-28 {
-					dmas = <&audma0 0x27>, <&audma1 0x28>;
-					dma-names = "rx", "tx";
-				};
-				ssiu35: ssiu-29 {
-					dmas = <&audma0 0x29>, <&audma1 0x2A>;
-					dma-names = "rx", "tx";
-				};
-				ssiu36: ssiu-30 {
-					dmas = <&audma0 0x2B>, <&audma1 0x2C>;
-					dma-names = "rx", "tx";
-				};
-				ssiu37: ssiu-31 {
-					dmas = <&audma0 0x2D>, <&audma1 0x2E>;
-					dma-names = "rx", "tx";
-				};
-				ssiu40: ssiu-32 {
-					dmas =	<&audma0 0x71>, <&audma1 0x72>;
-					dma-names = "rx", "tx";
-				};
-				ssiu41: ssiu-33 {
-					dmas = <&audma0 0x17>, <&audma1 0x18>;
-					dma-names = "rx", "tx";
-				};
-				ssiu42: ssiu-34 {
-					dmas = <&audma0 0x19>, <&audma1 0x1A>;
-					dma-names = "rx", "tx";
-				};
-				ssiu43: ssiu-35 {
-					dmas = <&audma0 0x1B>, <&audma1 0x1C>;
-					dma-names = "rx", "tx";
-				};
-				ssiu44: ssiu-36 {
-					dmas = <&audma0 0x1D>, <&audma1 0x1E>;
-					dma-names = "rx", "tx";
-				};
-				ssiu45: ssiu-37 {
-					dmas = <&audma0 0x1F>, <&audma1 0x20>;
-					dma-names = "rx", "tx";
-				};
-				ssiu46: ssiu-38 {
-					dmas = <&audma0 0x31>, <&audma1 0x32>;
-					dma-names = "rx", "tx";
-				};
-				ssiu47: ssiu-39 {
-					dmas = <&audma0 0x33>, <&audma1 0x34>;
-					dma-names = "rx", "tx";
-				};
-				ssiu50: ssiu-40 {
-					dmas = <&audma0 0x73>, <&audma1 0x74>;
-					dma-names = "rx", "tx";
-				};
-				ssiu60: ssiu-41 {
-					dmas = <&audma0 0x75>, <&audma1 0x76>;
-					dma-names = "rx", "tx";
-				};
-				ssiu70: ssiu-42 {
-					dmas = <&audma0 0x79>, <&audma1 0x7a>;
-					dma-names = "rx", "tx";
-				};
-				ssiu80: ssiu-43 {
-					dmas = <&audma0 0x7b>, <&audma1 0x7c>;
-					dma-names = "rx", "tx";
-				};
-				ssiu90: ssiu-44 {
-					dmas = <&audma0 0x7d>, <&audma1 0x7e>;
-					dma-names = "rx", "tx";
-				};
-				ssiu91: ssiu-45 {
-					dmas = <&audma0 0x7F>, <&audma1 0x80>;
-					dma-names = "rx", "tx";
-				};
-				ssiu92: ssiu-46 {
-					dmas = <&audma0 0x81>, <&audma1 0x82>;
-					dma-names = "rx", "tx";
-				};
-				ssiu93: ssiu-47 {
-					dmas = <&audma0 0x83>, <&audma1 0x84>;
-					dma-names = "rx", "tx";
-				};
-				ssiu94: ssiu-48 {
-					dmas = <&audma0 0xA3>, <&audma1 0xA4>;
-					dma-names = "rx", "tx";
-				};
-				ssiu95: ssiu-49 {
-					dmas = <&audma0 0xA5>, <&audma1 0xA6>;
-					dma-names = "rx", "tx";
-				};
-				ssiu96: ssiu-50 {
-					dmas = <&audma0 0xA7>, <&audma1 0xA8>;
-					dma-names = "rx", "tx";
-				};
-				ssiu97: ssiu-51 {
-					dmas = <&audma0 0xA9>, <&audma1 0xAA>;
-					dma-names = "rx", "tx";
-				};
-			};
-		};
-
-		audma0: dma-controller@ec700000 {
-			compatible = "renesas,dmac-r8a7796",
-				     "renesas,rcar-dmac";
-			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 502>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 502>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_mp 0>, <&ipmmu_mp 1>,
-			       <&ipmmu_mp 2>, <&ipmmu_mp 3>,
-			       <&ipmmu_mp 4>, <&ipmmu_mp 5>,
-			       <&ipmmu_mp 6>, <&ipmmu_mp 7>,
-			       <&ipmmu_mp 8>, <&ipmmu_mp 9>,
-			       <&ipmmu_mp 10>, <&ipmmu_mp 11>,
-			       <&ipmmu_mp 12>, <&ipmmu_mp 13>,
-			       <&ipmmu_mp 14>, <&ipmmu_mp 15>;
-		};
-
-		audma1: dma-controller@ec720000 {
-			compatible = "renesas,dmac-r8a7796",
-				     "renesas,rcar-dmac";
-			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names = "error",
-					"ch0", "ch1", "ch2", "ch3",
-					"ch4", "ch5", "ch6", "ch7",
-					"ch8", "ch9", "ch10", "ch11",
-					"ch12", "ch13", "ch14", "ch15";
-			clocks = <&cpg CPG_MOD 501>;
-			clock-names = "fck";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 501>;
-			#dma-cells = <1>;
-			dma-channels = <16>;
-			iommus = <&ipmmu_mp 16>, <&ipmmu_mp 17>,
-			       <&ipmmu_mp 18>, <&ipmmu_mp 19>,
-			       <&ipmmu_mp 20>, <&ipmmu_mp 21>,
-			       <&ipmmu_mp 22>, <&ipmmu_mp 23>,
-			       <&ipmmu_mp 24>, <&ipmmu_mp 25>,
-			       <&ipmmu_mp 26>, <&ipmmu_mp 27>,
-			       <&ipmmu_mp 28>, <&ipmmu_mp 29>,
-			       <&ipmmu_mp 30>, <&ipmmu_mp 31>;
-		};
-
-		xhci0: usb@ee000000 {
-			compatible = "renesas,xhci-r8a7796",
-				     "renesas,rcar-gen3-xhci";
-			reg = <0 0xee000000 0 0xc00>;
-			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 328>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 328>;
-			status = "disabled";
-		};
-
-		usb3_peri0: usb@ee020000 {
-			compatible = "renesas,r8a7796-usb3-peri",
-				     "renesas,rcar-gen3-usb3-peri";
-			reg = <0 0xee020000 0 0x400>;
-			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 328>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 328>;
-			status = "disabled";
-		};
-
-		ohci0: usb@ee080000 {
-			compatible = "generic-ohci";
-			reg = <0 0xee080000 0 0x100>;
-			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
-			phys = <&usb2_phy0 1>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 703>, <&cpg 704>;
-			status = "disabled";
-		};
-
-		ohci1: usb@ee0a0000 {
-			compatible = "generic-ohci";
-			reg = <0 0xee0a0000 0 0x100>;
-			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 702>;
-			phys = <&usb2_phy1 1>;
-			phy-names = "usb";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 702>;
-			status = "disabled";
-		};
-
-		ehci0: usb@ee080100 {
-			compatible = "generic-ehci";
-			reg = <0 0xee080100 0 0x100>;
-			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
-			phys = <&usb2_phy0 2>;
-			phy-names = "usb";
-			companion = <&ohci0>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 703>, <&cpg 704>;
-			status = "disabled";
-		};
-
-		ehci1: usb@ee0a0100 {
-			compatible = "generic-ehci";
-			reg = <0 0xee0a0100 0 0x100>;
-			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 702>;
-			phys = <&usb2_phy1 2>;
-			phy-names = "usb";
-			companion = <&ohci1>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 702>;
-			status = "disabled";
-		};
-
-		usb2_phy0: usb-phy@ee080200 {
-			compatible = "renesas,usb2-phy-r8a7796",
-				     "renesas,rcar-gen3-usb2-phy";
-			reg = <0 0xee080200 0 0x700>;
-			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 703>, <&cpg 704>;
-			#phy-cells = <1>;
-			status = "disabled";
-		};
-
-		usb2_phy1: usb-phy@ee0a0200 {
-			compatible = "renesas,usb2-phy-r8a7796",
-				     "renesas,rcar-gen3-usb2-phy";
-			reg = <0 0xee0a0200 0 0x700>;
-			clocks = <&cpg CPG_MOD 702>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 702>;
-			#phy-cells = <1>;
-			status = "disabled";
-		};
-
-		sdhi0: sd@ee100000 {
-			compatible = "renesas,sdhi-r8a7796",
-				     "renesas,rcar-gen3-sdhi";
-			reg = <0 0xee100000 0 0x2000>;
-			interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 314>;
-			max-frequency = <200000000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 314>;
-			iommus = <&ipmmu_ds1 32>;
-			status = "disabled";
-		};
-
-		sdhi1: sd@ee120000 {
-			compatible = "renesas,sdhi-r8a7796",
-				     "renesas,rcar-gen3-sdhi";
-			reg = <0 0xee120000 0 0x2000>;
-			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 313>;
-			max-frequency = <200000000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 313>;
-			iommus = <&ipmmu_ds1 33>;
-			status = "disabled";
-		};
-
-		sdhi2: sd@ee140000 {
-			compatible = "renesas,sdhi-r8a7796",
-				     "renesas,rcar-gen3-sdhi";
-			reg = <0 0xee140000 0 0x2000>;
-			interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 312>;
-			max-frequency = <200000000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 312>;
-			iommus = <&ipmmu_ds1 34>;
-			status = "disabled";
-		};
-
-		sdhi3: sd@ee160000 {
-			compatible = "renesas,sdhi-r8a7796",
-				     "renesas,rcar-gen3-sdhi";
-			reg = <0 0xee160000 0 0x2000>;
-			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 311>;
-			max-frequency = <200000000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 311>;
-			iommus = <&ipmmu_ds1 35>;
-			status = "disabled";
-		};
-
-		gic: interrupt-controller@f1010000 {
-			compatible = "arm,gic-400";
-			#interrupt-cells = <3>;
-			#address-cells = <0>;
-			interrupt-controller;
-			reg = <0x0 0xf1010000 0 0x1000>,
-			      <0x0 0xf1020000 0 0x20000>,
-			      <0x0 0xf1040000 0 0x20000>,
-			      <0x0 0xf1060000 0 0x20000>;
-			interrupts = <GIC_PPI 9
-					(GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
-			clocks = <&cpg CPG_MOD 408>;
-			clock-names = "clk";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 408>;
-		};
-
-		pciec0: pcie@fe000000 {
-			compatible = "renesas,pcie-r8a7796",
-				     "renesas,pcie-rcar-gen3";
-			reg = <0 0xfe000000 0 0x80000>;
-			#address-cells = <3>;
-			#size-cells = <2>;
-			bus-range = <0x00 0xff>;
-			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
-			/* Map all possible DDR as inbound ranges */
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
-			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
-			#interrupt-cells = <1>;
-			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
-			clock-names = "pcie", "pcie_bus";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 319>;
-			status = "disabled";
-		};
-
-		pciec1: pcie@ee800000 {
-			compatible = "renesas,pcie-r8a7796",
-				     "renesas,pcie-rcar-gen3";
-			reg = <0 0xee800000 0 0x80000>;
-			#address-cells = <3>;
-			#size-cells = <2>;
-			bus-range = <0x00 0xff>;
-			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000
-				0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000
-				0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000
-				0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
-			/* Map all possible DDR as inbound ranges */
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
-			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
-			#interrupt-cells = <1>;
-			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 318>, <&pcie_bus_clk>;
-			clock-names = "pcie", "pcie_bus";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 318>;
-			status = "disabled";
-		};
-
-		imr-lx4@fe860000 {
-			compatible = "renesas,r8a7796-imr-lx4",
-				     "renesas,imr-lx4";
-			reg = <0 0xfe860000 0 0x2000>;
-			interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 823>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			resets = <&cpg 823>;
-		};
-
-		imr-lx4@fe870000 {
-			compatible = "renesas,r8a7796-imr-lx4",
-				     "renesas,imr-lx4";
-			reg = <0 0xfe870000 0 0x2000>;
-			interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 822>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			resets = <&cpg 822>;
-		};
-
-		fdp1@fe940000 {
-			compatible = "renesas,fdp1";
-			reg = <0 0xfe940000 0 0x2400>;
-			interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 119>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			resets = <&cpg 119>;
-			renesas,fcp = <&fcpf0>;
-		};
-
-		fcpf0: fcp@fe950000 {
-			compatible = "renesas,fcpf";
-			reg = <0 0xfe950000 0 0x200>;
-			clocks = <&cpg CPG_MOD 615>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			resets = <&cpg 615>;
-		};
-
-		fcpvb0: fcp@fe96f000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfe96f000 0 0x200>;
-			clocks = <&cpg CPG_MOD 607>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			resets = <&cpg 607>;
-		};
-
-		fcpvi0: fcp@fe9af000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfe9af000 0 0x200>;
-			clocks = <&cpg CPG_MOD 611>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			resets = <&cpg 611>;
-			iommus = <&ipmmu_vc0 19>;
-		};
-
-		fcpvd0: fcp@fea27000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfea27000 0 0x200>;
-			clocks = <&cpg CPG_MOD 603>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 603>;
-			iommus = <&ipmmu_vi0 8>;
-		};
-
-		fcpvd1: fcp@fea2f000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfea2f000 0 0x200>;
-			clocks = <&cpg CPG_MOD 602>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 602>;
-			iommus = <&ipmmu_vi0 9>;
-		};
-
-		fcpvd2: fcp@fea37000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfea37000 0 0x200>;
-			clocks = <&cpg CPG_MOD 601>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 601>;
-			iommus = <&ipmmu_vi0 10>;
-		};
-
-		vspb: vsp@fe960000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfe960000 0 0x8000>;
-			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 626>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			resets = <&cpg 626>;
-
-			renesas,fcp = <&fcpvb0>;
-		};
-
-		vspd0: vsp@fea20000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfea20000 0 0x5000>;
-			interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 623>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 623>;
-
-			renesas,fcp = <&fcpvd0>;
-		};
-
-		vspd1: vsp@fea28000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfea28000 0 0x5000>;
-			interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 622>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 622>;
-
-			renesas,fcp = <&fcpvd1>;
-		};
-
-		vspd2: vsp@fea30000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfea30000 0 0x5000>;
-			interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 621>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 621>;
-
-			renesas,fcp = <&fcpvd2>;
-		};
-
-		vspi0: vsp@fe9a0000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfe9a0000 0 0x8000>;
-			interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 631>;
-			power-domains = <&sysc R8A7796_PD_A3VC>;
-			resets = <&cpg 631>;
-
-			renesas,fcp = <&fcpvi0>;
-		};
-
-		cmm0: cmm@fea40000 {
-			compatible = "renesas,r8a7796-cmm",
-				     "renesas,rcar-gen3-cmm";
-			reg = <0 0xfea40000 0 0x1000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			clocks = <&cpg CPG_MOD 711>;
-			resets = <&cpg 711>;
-		};
-
-		cmm1: cmm@fea50000 {
-			compatible = "renesas,r8a7796-cmm",
-				     "renesas,rcar-gen3-cmm";
-			reg = <0 0xfea50000 0 0x1000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			clocks = <&cpg CPG_MOD 710>;
-			resets = <&cpg 710>;
-		};
-
-		cmm2: cmm@fea60000 {
-			compatible = "renesas,r8a7796-cmm",
-				     "renesas,rcar-gen3-cmm";
-			reg = <0 0xfea60000 0 0x1000>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			clocks = <&cpg CPG_MOD 709>;
-			resets = <&cpg 709>;
-		};
-
-		csi20: csi2@fea80000 {
-			compatible = "renesas,r8a7796-csi2";
-			reg = <0 0xfea80000 0 0x10000>;
-			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 714>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 714>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					csi20vin0: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&vin0csi20>;
-					};
-					csi20vin1: endpoint@1 {
-						reg = <1>;
-						remote-endpoint = <&vin1csi20>;
-					};
-					csi20vin2: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&vin2csi20>;
-					};
-					csi20vin3: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&vin3csi20>;
-					};
-					csi20vin4: endpoint@4 {
-						reg = <4>;
-						remote-endpoint = <&vin4csi20>;
-					};
-					csi20vin5: endpoint@5 {
-						reg = <5>;
-						remote-endpoint = <&vin5csi20>;
-					};
-					csi20vin6: endpoint@6 {
-						reg = <6>;
-						remote-endpoint = <&vin6csi20>;
-					};
-					csi20vin7: endpoint@7 {
-						reg = <7>;
-						remote-endpoint = <&vin7csi20>;
-					};
-				};
-			};
-		};
-
-		csi40: csi2@feaa0000 {
-			compatible = "renesas,r8a7796-csi2";
-			reg = <0 0xfeaa0000 0 0x10000>;
-			interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 716>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 716>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@1 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-
-					reg = <1>;
-
-					csi40vin0: endpoint@0 {
-						reg = <0>;
-						remote-endpoint = <&vin0csi40>;
-					};
-					csi40vin1: endpoint@1 {
-						reg = <1>;
-						remote-endpoint = <&vin1csi40>;
-					};
-					csi40vin2: endpoint@2 {
-						reg = <2>;
-						remote-endpoint = <&vin2csi40>;
-					};
-					csi40vin3: endpoint@3 {
-						reg = <3>;
-						remote-endpoint = <&vin3csi40>;
-					};
-					csi40vin4: endpoint@4 {
-						reg = <4>;
-						remote-endpoint = <&vin4csi40>;
-					};
-					csi40vin5: endpoint@5 {
-						reg = <5>;
-						remote-endpoint = <&vin5csi40>;
-					};
-					csi40vin6: endpoint@6 {
-						reg = <6>;
-						remote-endpoint = <&vin6csi40>;
-					};
-					csi40vin7: endpoint@7 {
-						reg = <7>;
-						remote-endpoint = <&vin7csi40>;
-					};
-				};
-
-			};
-		};
-
-		hdmi0: hdmi@fead0000 {
-			compatible = "renesas,r8a7796-hdmi", "renesas,rcar-gen3-hdmi";
-			reg = <0 0xfead0000 0 0x10000>;
-			interrupts = <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 729>, <&cpg CPG_CORE R8A7796_CLK_HDMI>;
-			clock-names = "iahb", "isfr";
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 729>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				port@0 {
-					reg = <0>;
-					dw_hdmi0_in: endpoint {
-						remote-endpoint = <&du_out_hdmi0>;
-					};
-				};
-				port@1 {
-					reg = <1>;
-				};
-				port@2 {
-					/* HDMI sound */
-					reg = <2>;
-				};
-			};
-		};
-
-		du: display@feb00000 {
-			compatible = "renesas,du-r8a7796";
-			reg = <0 0xfeb00000 0 0x70000>;
-			interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
-				     <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 724>,
-				 <&cpg CPG_MOD 723>,
-				 <&cpg CPG_MOD 722>;
-			clock-names = "du.0", "du.1", "du.2";
-
-			renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>;
-			vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>;
-
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@0 {
-					reg = <0>;
-					du_out_rgb: endpoint {
-					};
-				};
-				port@1 {
-					reg = <1>;
-					du_out_hdmi0: endpoint {
-						remote-endpoint = <&dw_hdmi0_in>;
-					};
-				};
-				port@2 {
-					reg = <2>;
-					du_out_lvds0: endpoint {
-						remote-endpoint = <&lvds0_in>;
-					};
-				};
-			};
-		};
-
-		lvds0: lvds@feb90000 {
-			compatible = "renesas,r8a7796-lvds";
-			reg = <0 0xfeb90000 0 0x14>;
-			clocks = <&cpg CPG_MOD 727>;
-			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
-			resets = <&cpg 727>;
-			status = "disabled";
-
-			ports {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-				port@0 {
-					reg = <0>;
-					lvds0_in: endpoint {
-						remote-endpoint = <&du_out_lvds0>;
-					};
-				};
-				port@1 {
-					reg = <1>;
-					lvds0_out: endpoint {
-					};
-				};
-			};
-		};
-
-		prr: chipid@fff00044 {
-			compatible = "renesas,prr";
-			reg = <0 0xfff00044 0 4>;
-		};
-	};
-
-	thermal-zones {
-		sensor_thermal1: sensor-thermal1 {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-			thermal-sensors = <&tsc 0>;
-			sustainable-power = <3874>;
-
-			trips {
-				sensor1_crit: sensor1-crit {
-					temperature = <120000>;
-					hysteresis = <1000>;
-					type = "critical";
-				};
-			};
-		};
-
-		sensor_thermal2: sensor-thermal2 {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-			thermal-sensors = <&tsc 1>;
-			sustainable-power = <3874>;
-
-			trips {
-				sensor2_crit: sensor2-crit {
-					temperature = <120000>;
-					hysteresis = <1000>;
-					type = "critical";
-				};
-			};
-		};
-
-		sensor_thermal3: sensor-thermal3 {
-			polling-delay-passive = <250>;
-			polling-delay = <1000>;
-			thermal-sensors = <&tsc 2>;
-			sustainable-power = <3874>;
-
-			cooling-maps {
-				map0 {
-					trip = <&target>;
-					cooling-device = <&a57_0 2 4>;
-					contribution = <1024>;
-				};
-				map1 {
-					trip = <&target>;
-					cooling-device = <&a53_0 0 2>;
-					contribution = <1024>;
-				};
-			};
-			trips {
-				target: trip-point1 {
-					temperature = <100000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-
-				sensor3_crit: sensor3-crit {
-					temperature = <120000>;
-					hysteresis = <1000>;
-					type = "critical";
-				};
-			};
-		};
-	};
-
-	timer {
-		compatible = "arm,armv8-timer";
-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
-	};
-
-	/* External USB clocks - can be overridden by the board */
-	usb3s0_clk: usb3s0 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-
-	usb_extal_clk: usb_extal {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <0>;
-	};
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts
new file mode 100644
index 0000000..ecfbeaf
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Salvator-X board with R-Car M3-W
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a77960.dtsi"
+#include "salvator-x.dtsi"
+
+/ {
+	model = "Renesas Salvator-X board based on r8a77960";
+	compatible = "renesas,salvator-x", "renesas,r8a7796";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x78000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x80000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&versaclock5 1>,
+		 <&x21_clk>,
+		 <&versaclock5 2>;
+	clock-names = "du.0", "du.1", "du.2",
+		      "dclkin.0", "dclkin.1", "dclkin.2";
+};
+
+&hdmi0 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi0_out: endpoint {
+				remote-endpoint = <&hdmi0_con>;
+			};
+		};
+		port@2 {
+			reg = <2>;
+			dw_hdmi0_snd_in: endpoint {
+				remote-endpoint = <&rsnd_endpoint1>;
+			};
+		};
+	};
+};
+
+&hdmi0_con {
+	remote-endpoint = <&rcar_dw_hdmi0_out>;
+};
+
+&rcar_sound {
+	ports {
+		/* rsnd_port0 is on salvator-common */
+		rsnd_port1: port@1 {
+			reg = <1>;
+			rsnd_endpoint1: endpoint {
+				remote-endpoint = <&dw_hdmi0_snd_in>;
+
+				dai-format = "i2s";
+				bitclock-master = <&rsnd_endpoint1>;
+				frame-master = <&rsnd_endpoint1>;
+
+				playback = <&ssi2>;
+			};
+		};
+	};
+};
+
+&sound_card {
+	dais = <&rsnd_port0	/* ak4613 */
+		&rsnd_port1>;	/* HDMI0  */
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts
new file mode 100644
index 0000000..249896a
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Salvator-X 2nd version board with R-Car M3-W
+ *
+ * Copyright (C) 2015-2017 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a77960.dtsi"
+#include "salvator-xs.dtsi"
+
+/ {
+	model = "Renesas Salvator-X 2nd version board based on r8a77960";
+	compatible = "renesas,salvator-xs", "renesas,r8a7796";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x78000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x80000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&versaclock6 1>,
+		 <&x21_clk>,
+		 <&versaclock6 2>;
+	clock-names = "du.0", "du.1", "du.2",
+		      "dclkin.0", "dclkin.1", "dclkin.2";
+};
+
+&hdmi0 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi0_out: endpoint {
+				remote-endpoint = <&hdmi0_con>;
+			};
+		};
+		port@2 {
+			reg = <2>;
+			dw_hdmi0_snd_in: endpoint {
+				remote-endpoint = <&rsnd_endpoint1>;
+			};
+		};
+	};
+};
+
+&hdmi0_con {
+	remote-endpoint = <&rcar_dw_hdmi0_out>;
+};
+
+&rcar_sound {
+	ports {
+		/* rsnd_port0 is on salvator-common */
+		rsnd_port1: port@1 {
+			reg = <1>;
+			rsnd_endpoint1: endpoint {
+				remote-endpoint = <&dw_hdmi0_snd_in>;
+
+				dai-format = "i2s";
+				bitclock-master = <&rsnd_endpoint1>;
+				frame-master = <&rsnd_endpoint1>;
+
+				playback = <&ssi2>;
+			};
+		};
+	};
+};
+
+&sound_card {
+	dais = <&rsnd_port0	/* ak4613 */
+		&rsnd_port1>;	/* HDMI0  */
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77960-ulcb-kf.dts b/arch/arm64/boot/dts/renesas/r8a77960-ulcb-kf.dts
new file mode 100644
index 0000000..2151c37
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77960-ulcb-kf.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the M3ULCB Kingfisher board
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ * Copyright (C) 2017 Cogent Embedded, Inc.
+ */
+
+#include "r8a77960-ulcb.dts"
+#include "ulcb-kf.dtsi"
+
+/ {
+	model = "Renesas M3ULCB Kingfisher board based on r8a77960";
+	compatible = "shimafuji,kingfisher", "renesas,m3ulcb",
+		     "renesas,r8a7796";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77960-ulcb.dts b/arch/arm64/boot/dts/renesas/r8a77960-ulcb.dts
new file mode 100644
index 0000000..d041042
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77960-ulcb.dts
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the M3ULCB (R-Car Starter Kit Pro) board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ */
+
+/dts-v1/;
+#include "r8a77960.dtsi"
+#include "ulcb.dtsi"
+
+/ {
+	model = "Renesas M3ULCB board based on r8a77960";
+	compatible = "renesas,m3ulcb", "renesas,r8a7796";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&versaclock5 1>,
+		 <&versaclock5 3>,
+		 <&versaclock5 2>;
+	clock-names = "du.0", "du.1", "du.2",
+		      "dclkin.0", "dclkin.1", "dclkin.2";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
new file mode 100644
index 0000000..60f156c
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -0,0 +1,2972 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the R-Car M3-W (R8A77960) SoC
+ *
+ * Copyright (C) 2016-2017 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/clock/r8a7796-cpg-mssr.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/r8a7796-sysc.h>
+
+#define CPG_AUDIO_CLK_I		R8A7796_CLK_S0D4
+
+/ {
+	compatible = "renesas,r8a7796";
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		i2c5 = &i2c5;
+		i2c6 = &i2c6;
+		i2c7 = &i2c_dvfs;
+	};
+
+	/*
+	 * The external audio clocks are configured as 0 Hz fixed frequency
+	 * clocks by default.
+	 * Boards that provide audio clocks should override them.
+	 */
+	audio_clk_a: audio_clk_a {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	audio_clk_b: audio_clk_b {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	audio_clk_c: audio_clk_c {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	/* External CAN clock - to be overridden by boards that provide it */
+	can_clk: can {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	cluster0_opp: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp-500000000 {
+			opp-hz = /bits/ 64 <500000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1000000000 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1500000000 {
+			opp-hz = /bits/ 64 <1500000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1600000000 {
+			opp-hz = /bits/ 64 <1600000000>;
+			opp-microvolt = <900000>;
+			clock-latency-ns = <300000>;
+			turbo-mode;
+		};
+		opp-1700000000 {
+			opp-hz = /bits/ 64 <1700000000>;
+			opp-microvolt = <900000>;
+			clock-latency-ns = <300000>;
+			turbo-mode;
+		};
+		opp-1800000000 {
+			opp-hz = /bits/ 64 <1800000000>;
+			opp-microvolt = <960000>;
+			clock-latency-ns = <300000>;
+			turbo-mode;
+		};
+	};
+
+	cluster1_opp: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp-800000000 {
+			opp-hz = /bits/ 64 <800000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1000000000 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1200000000 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+		};
+		opp-1300000000 {
+			opp-hz = /bits/ 64 <1300000000>;
+			opp-microvolt = <820000>;
+			clock-latency-ns = <300000>;
+			turbo-mode;
+		};
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&a57_0>;
+				};
+				core1 {
+					cpu = <&a57_1>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&a53_0>;
+				};
+				core1 {
+					cpu = <&a53_1>;
+				};
+				core2 {
+					cpu = <&a53_2>;
+				};
+				core3 {
+					cpu = <&a53_3>;
+				};
+			};
+		};
+
+		a57_0: cpu@0 {
+			compatible = "arm,cortex-a57";
+			reg = <0x0>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7796_PD_CA57_CPU0>;
+			next-level-cache = <&L2_CA57>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
+			dynamic-power-coefficient = <854>;
+			clocks = <&cpg CPG_CORE R8A7796_CLK_Z>;
+			operating-points-v2 = <&cluster0_opp>;
+			capacity-dmips-mhz = <1024>;
+			#cooling-cells = <2>;
+		};
+
+		a57_1: cpu@1 {
+			compatible = "arm,cortex-a57";
+			reg = <0x1>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7796_PD_CA57_CPU1>;
+			next-level-cache = <&L2_CA57>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
+			clocks = <&cpg CPG_CORE R8A7796_CLK_Z>;
+			operating-points-v2 = <&cluster0_opp>;
+			capacity-dmips-mhz = <1024>;
+			#cooling-cells = <2>;
+		};
+
+		a53_0: cpu@100 {
+			compatible = "arm,cortex-a53";
+			reg = <0x100>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7796_PD_CA53_CPU0>;
+			next-level-cache = <&L2_CA53>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_1>;
+			#cooling-cells = <2>;
+			dynamic-power-coefficient = <277>;
+			clocks = <&cpg CPG_CORE R8A7796_CLK_Z2>;
+			operating-points-v2 = <&cluster1_opp>;
+			capacity-dmips-mhz = <535>;
+		};
+
+		a53_1: cpu@101 {
+			compatible = "arm,cortex-a53";
+			reg = <0x101>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7796_PD_CA53_CPU1>;
+			next-level-cache = <&L2_CA53>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_1>;
+			clocks = <&cpg CPG_CORE R8A7796_CLK_Z2>;
+			operating-points-v2 = <&cluster1_opp>;
+			capacity-dmips-mhz = <535>;
+		};
+
+		a53_2: cpu@102 {
+			compatible = "arm,cortex-a53";
+			reg = <0x102>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7796_PD_CA53_CPU2>;
+			next-level-cache = <&L2_CA53>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_1>;
+			clocks = <&cpg CPG_CORE R8A7796_CLK_Z2>;
+			operating-points-v2 = <&cluster1_opp>;
+			capacity-dmips-mhz = <535>;
+		};
+
+		a53_3: cpu@103 {
+			compatible = "arm,cortex-a53";
+			reg = <0x103>;
+			device_type = "cpu";
+			power-domains = <&sysc R8A7796_PD_CA53_CPU3>;
+			next-level-cache = <&L2_CA53>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_1>;
+			clocks = <&cpg CPG_CORE R8A7796_CLK_Z2>;
+			operating-points-v2 = <&cluster1_opp>;
+			capacity-dmips-mhz = <535>;
+		};
+
+		L2_CA57: cache-controller-0 {
+			compatible = "cache";
+			power-domains = <&sysc R8A7796_PD_CA57_SCU>;
+			cache-unified;
+			cache-level = <2>;
+		};
+
+		L2_CA53: cache-controller-1 {
+			compatible = "cache";
+			power-domains = <&sysc R8A7796_PD_CA53_SCU>;
+			cache-unified;
+			cache-level = <2>;
+		};
+
+		idle-states {
+			entry-method = "psci";
+
+			CPU_SLEEP_0: cpu-sleep-0 {
+				compatible = "arm,idle-state";
+				arm,psci-suspend-param = <0x0010000>;
+				local-timer-stop;
+				entry-latency-us = <400>;
+				exit-latency-us = <500>;
+				min-residency-us = <4000>;
+			};
+
+			CPU_SLEEP_1: cpu-sleep-1 {
+				compatible = "arm,idle-state";
+				arm,psci-suspend-param = <0x0010000>;
+				local-timer-stop;
+				entry-latency-us = <700>;
+				exit-latency-us = <700>;
+				min-residency-us = <5000>;
+			};
+		};
+	};
+
+	extal_clk: extal {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		/* This value must be overridden by the board */
+		clock-frequency = <0>;
+	};
+
+	extalr_clk: extalr {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		/* This value must be overridden by the board */
+		clock-frequency = <0>;
+	};
+
+	/* External PCIe clock - can be overridden by the board */
+	pcie_bus_clk: pcie_bus {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	pmu_a53 {
+		compatible = "arm,cortex-a53-pmu";
+		interrupts-extended = <&gic GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&a53_0>, <&a53_1>, <&a53_2>, <&a53_3>;
+	};
+
+	pmu_a57 {
+		compatible = "arm,cortex-a57-pmu";
+		interrupts-extended = <&gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+				      <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&a57_0>, <&a57_1>;
+	};
+
+	psci {
+		compatible = "arm,psci-1.0", "arm,psci-0.2";
+		method = "smc";
+	};
+
+	/* External SCIF clock - to be overridden by boards that provide it */
+	scif_clk: scif {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		interrupt-parent = <&gic>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		rwdt: watchdog@e6020000 {
+			compatible = "renesas,r8a7796-wdt",
+				     "renesas,rcar-gen3-wdt";
+			reg = <0 0xe6020000 0 0x0c>;
+			clocks = <&cpg CPG_MOD 402>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 402>;
+			status = "disabled";
+		};
+
+		gpio0: gpio@e6050000 {
+			compatible = "renesas,gpio-r8a7796",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6050000 0 0x50>;
+			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 0 16>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 912>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 912>;
+		};
+
+		gpio1: gpio@e6051000 {
+			compatible = "renesas,gpio-r8a7796",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6051000 0 0x50>;
+			interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 32 29>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 911>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 911>;
+		};
+
+		gpio2: gpio@e6052000 {
+			compatible = "renesas,gpio-r8a7796",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6052000 0 0x50>;
+			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 64 15>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 910>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 910>;
+		};
+
+		gpio3: gpio@e6053000 {
+			compatible = "renesas,gpio-r8a7796",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6053000 0 0x50>;
+			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 96 16>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 909>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 909>;
+		};
+
+		gpio4: gpio@e6054000 {
+			compatible = "renesas,gpio-r8a7796",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6054000 0 0x50>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 128 18>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 908>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 908>;
+		};
+
+		gpio5: gpio@e6055000 {
+			compatible = "renesas,gpio-r8a7796",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6055000 0 0x50>;
+			interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 160 26>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 907>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 907>;
+		};
+
+		gpio6: gpio@e6055400 {
+			compatible = "renesas,gpio-r8a7796",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6055400 0 0x50>;
+			interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 192 32>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 906>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 906>;
+		};
+
+		gpio7: gpio@e6055800 {
+			compatible = "renesas,gpio-r8a7796",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6055800 0 0x50>;
+			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 224 4>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 905>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 905>;
+		};
+
+		pfc: pin-controller@e6060000 {
+			compatible = "renesas,pfc-r8a7796";
+			reg = <0 0xe6060000 0 0x50c>;
+		};
+
+		cmt0: timer@e60f0000 {
+			compatible = "renesas,r8a7796-cmt0",
+				     "renesas,rcar-gen3-cmt0";
+			reg = <0 0xe60f0000 0 0x1004>;
+			interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 303>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 303>;
+			status = "disabled";
+		};
+
+		cmt1: timer@e6130000 {
+			compatible = "renesas,r8a7796-cmt1",
+				     "renesas,rcar-gen3-cmt1";
+			reg = <0 0xe6130000 0 0x1004>;
+			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 302>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 302>;
+			status = "disabled";
+		};
+
+		cmt2: timer@e6140000 {
+			compatible = "renesas,r8a7796-cmt1",
+				     "renesas,rcar-gen3-cmt1";
+			reg = <0 0xe6140000 0 0x1004>;
+			interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 301>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 301>;
+			status = "disabled";
+		};
+
+		cmt3: timer@e6148000 {
+			compatible = "renesas,r8a7796-cmt1",
+				     "renesas,rcar-gen3-cmt1";
+			reg = <0 0xe6148000 0 0x1004>;
+			interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 300>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 300>;
+			status = "disabled";
+		};
+
+		cpg: clock-controller@e6150000 {
+			compatible = "renesas,r8a7796-cpg-mssr";
+			reg = <0 0xe6150000 0 0x1000>;
+			clocks = <&extal_clk>, <&extalr_clk>;
+			clock-names = "extal", "extalr";
+			#clock-cells = <2>;
+			#power-domain-cells = <0>;
+			#reset-cells = <1>;
+		};
+
+		rst: reset-controller@e6160000 {
+			compatible = "renesas,r8a7796-rst";
+			reg = <0 0xe6160000 0 0x0200>;
+		};
+
+		sysc: system-controller@e6180000 {
+			compatible = "renesas,r8a7796-sysc";
+			reg = <0 0xe6180000 0 0x0400>;
+			#power-domain-cells = <1>;
+		};
+
+		tsc: thermal@e6198000 {
+			compatible = "renesas,r8a7796-thermal";
+			reg = <0 0xe6198000 0 0x100>,
+			      <0 0xe61a0000 0 0x100>,
+			      <0 0xe61a8000 0 0x100>;
+			interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 522>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 522>;
+			#thermal-sensor-cells = <1>;
+		};
+
+		intc_ex: interrupt-controller@e61c0000 {
+			compatible = "renesas,intc-ex-r8a7796", "renesas,irqc";
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			reg = <0 0xe61c0000 0 0x200>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 407>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 407>;
+		};
+
+		i2c0: i2c@e6500000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe6500000 0 0x40>;
+			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 931>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 931>;
+			dmas = <&dmac1 0x91>, <&dmac1 0x90>,
+			       <&dmac2 0x91>, <&dmac2 0x90>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@e6508000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe6508000 0 0x40>;
+			interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 930>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 930>;
+			dmas = <&dmac1 0x93>, <&dmac1 0x92>,
+			       <&dmac2 0x93>, <&dmac2 0x92>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@e6510000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe6510000 0 0x40>;
+			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 929>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 929>;
+			dmas = <&dmac1 0x95>, <&dmac1 0x94>,
+			       <&dmac2 0x95>, <&dmac2 0x94>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c3: i2c@e66d0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66d0000 0 0x40>;
+			interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 928>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 928>;
+			dmas = <&dmac0 0x97>, <&dmac0 0x96>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c4: i2c@e66d8000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66d8000 0 0x40>;
+			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 927>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 927>;
+			dmas = <&dmac0 0x99>, <&dmac0 0x98>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c5: i2c@e66e0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66e0000 0 0x40>;
+			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 919>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 919>;
+			dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c6: i2c@e66e8000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66e8000 0 0x40>;
+			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 918>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 918>;
+			dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c_dvfs: i2c@e60b0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,iic-r8a7796",
+				     "renesas,rcar-gen3-iic",
+				     "renesas,rmobile-iic";
+			reg = <0 0xe60b0000 0 0x425>;
+			interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 926>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 926>;
+			dmas = <&dmac0 0x11>, <&dmac0 0x10>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		hscif0: serial@e6540000 {
+			compatible = "renesas,hscif-r8a7796",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe6540000 0 0x60>;
+			interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 520>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x31>, <&dmac1 0x30>,
+			       <&dmac2 0x31>, <&dmac2 0x30>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 520>;
+			status = "disabled";
+		};
+
+		hscif1: serial@e6550000 {
+			compatible = "renesas,hscif-r8a7796",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe6550000 0 0x60>;
+			interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 519>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x33>, <&dmac1 0x32>,
+			       <&dmac2 0x33>, <&dmac2 0x32>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 519>;
+			status = "disabled";
+		};
+
+		hscif2: serial@e6560000 {
+			compatible = "renesas,hscif-r8a7796",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe6560000 0 0x60>;
+			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 518>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x35>, <&dmac1 0x34>,
+			       <&dmac2 0x35>, <&dmac2 0x34>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 518>;
+			status = "disabled";
+		};
+
+		hscif3: serial@e66a0000 {
+			compatible = "renesas,hscif-r8a7796",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe66a0000 0 0x60>;
+			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 517>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x37>, <&dmac0 0x36>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 517>;
+			status = "disabled";
+		};
+
+		hscif4: serial@e66b0000 {
+			compatible = "renesas,hscif-r8a7796",
+				     "renesas,rcar-gen3-hscif",
+				     "renesas,hscif";
+			reg = <0 0xe66b0000 0 0x60>;
+			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 516>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x39>, <&dmac0 0x38>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 516>;
+			status = "disabled";
+		};
+
+		hsusb: usb@e6590000 {
+			compatible = "renesas,usbhs-r8a7796",
+				     "renesas,rcar-gen3-usbhs";
+			reg = <0 0xe6590000 0 0x200>;
+			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 704>, <&cpg CPG_MOD 703>;
+			dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
+			       <&usb_dmac1 0>, <&usb_dmac1 1>;
+			dma-names = "ch0", "ch1", "ch2", "ch3";
+			renesas,buswait = <11>;
+			phys = <&usb2_phy0 3>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 704>, <&cpg 703>;
+			status = "disabled";
+		};
+
+		usb_dmac0: dma-controller@e65a0000 {
+			compatible = "renesas,r8a7796-usb-dmac",
+				     "renesas,usb-dmac";
+			reg = <0 0xe65a0000 0 0x100>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1";
+			clocks = <&cpg CPG_MOD 330>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 330>;
+			#dma-cells = <1>;
+			dma-channels = <2>;
+		};
+
+		usb_dmac1: dma-controller@e65b0000 {
+			compatible = "renesas,r8a7796-usb-dmac",
+				     "renesas,usb-dmac";
+			reg = <0 0xe65b0000 0 0x100>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1";
+			clocks = <&cpg CPG_MOD 331>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 331>;
+			#dma-cells = <1>;
+			dma-channels = <2>;
+		};
+
+		usb3_phy0: usb-phy@e65ee000 {
+			compatible = "renesas,r8a7796-usb3-phy",
+				     "renesas,rcar-gen3-usb3-phy";
+			reg = <0 0xe65ee000 0 0x90>;
+			clocks = <&cpg CPG_MOD 328>, <&usb3s0_clk>,
+				 <&usb_extal_clk>;
+			clock-names = "usb3-if", "usb3s_clk", "usb_extal";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 328>;
+			#phy-cells = <0>;
+			status = "disabled";
+		};
+
+		dmac0: dma-controller@e6700000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe6700000 0 0x10000>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 219>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 219>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>,
+			       <&ipmmu_ds0 2>, <&ipmmu_ds0 3>,
+			       <&ipmmu_ds0 4>, <&ipmmu_ds0 5>,
+			       <&ipmmu_ds0 6>, <&ipmmu_ds0 7>,
+			       <&ipmmu_ds0 8>, <&ipmmu_ds0 9>,
+			       <&ipmmu_ds0 10>, <&ipmmu_ds0 11>,
+			       <&ipmmu_ds0 12>, <&ipmmu_ds0 13>,
+			       <&ipmmu_ds0 14>, <&ipmmu_ds0 15>;
+		};
+
+		dmac1: dma-controller@e7300000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe7300000 0 0x10000>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 218>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 218>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_ds1 0>, <&ipmmu_ds1 1>,
+			       <&ipmmu_ds1 2>, <&ipmmu_ds1 3>,
+			       <&ipmmu_ds1 4>, <&ipmmu_ds1 5>,
+			       <&ipmmu_ds1 6>, <&ipmmu_ds1 7>,
+			       <&ipmmu_ds1 8>, <&ipmmu_ds1 9>,
+			       <&ipmmu_ds1 10>, <&ipmmu_ds1 11>,
+			       <&ipmmu_ds1 12>, <&ipmmu_ds1 13>,
+			       <&ipmmu_ds1 14>, <&ipmmu_ds1 15>;
+		};
+
+		dmac2: dma-controller@e7310000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe7310000 0 0x10000>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 217>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 217>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_ds1 16>, <&ipmmu_ds1 17>,
+			       <&ipmmu_ds1 18>, <&ipmmu_ds1 19>,
+			       <&ipmmu_ds1 20>, <&ipmmu_ds1 21>,
+			       <&ipmmu_ds1 22>, <&ipmmu_ds1 23>,
+			       <&ipmmu_ds1 24>, <&ipmmu_ds1 25>,
+			       <&ipmmu_ds1 26>, <&ipmmu_ds1 27>,
+			       <&ipmmu_ds1 28>, <&ipmmu_ds1 29>,
+			       <&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
+		};
+
+		ipmmu_ds0: mmu@e6740000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xe6740000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 0>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_ds1: mmu@e7740000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xe7740000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 1>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_hc: mmu@e6570000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xe6570000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 2>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_ir: mmu@ff8b0000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xff8b0000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 3>;
+			power-domains = <&sysc R8A7796_PD_A3IR>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_mm: mmu@e67b0000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xe67b0000 0 0x1000>;
+			interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_mp: mmu@ec670000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xec670000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 4>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_pv0: mmu@fd800000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xfd800000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 5>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_pv1: mmu@fd950000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xfd950000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 6>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_rt: mmu@ffc80000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xffc80000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 7>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_vc0: mmu@fe6b0000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xfe6b0000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 8>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			#iommu-cells = <1>;
+		};
+
+		ipmmu_vi0: mmu@febd0000 {
+			compatible = "renesas,ipmmu-r8a7796";
+			reg = <0 0xfebd0000 0 0x1000>;
+			renesas,ipmmu-main = <&ipmmu_mm 9>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#iommu-cells = <1>;
+		};
+
+		avb: ethernet@e6800000 {
+			compatible = "renesas,etheravb-r8a7796",
+				     "renesas,etheravb-rcar-gen3";
+			reg = <0 0xe6800000 0 0x800>, <0 0xe6a00000 0 0x10000>;
+			interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1", "ch2", "ch3",
+					  "ch4", "ch5", "ch6", "ch7",
+					  "ch8", "ch9", "ch10", "ch11",
+					  "ch12", "ch13", "ch14", "ch15",
+					  "ch16", "ch17", "ch18", "ch19",
+					  "ch20", "ch21", "ch22", "ch23",
+					  "ch24";
+			clocks = <&cpg CPG_MOD 812>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 812>;
+			phy-mode = "rgmii";
+			iommus = <&ipmmu_ds0 16>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		can0: can@e6c30000 {
+			compatible = "renesas,can-r8a7796",
+				     "renesas,rcar-gen3-can";
+			reg = <0 0xe6c30000 0 0x1000>;
+			interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 916>,
+			       <&cpg CPG_CORE R8A7796_CLK_CANFD>,
+			       <&can_clk>;
+			clock-names = "clkp1", "clkp2", "can_clk";
+			assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
+			assigned-clock-rates = <40000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 916>;
+			status = "disabled";
+		};
+
+		can1: can@e6c38000 {
+			compatible = "renesas,can-r8a7796",
+				     "renesas,rcar-gen3-can";
+			reg = <0 0xe6c38000 0 0x1000>;
+			interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 915>,
+			       <&cpg CPG_CORE R8A7796_CLK_CANFD>,
+			       <&can_clk>;
+			clock-names = "clkp1", "clkp2", "can_clk";
+			assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
+			assigned-clock-rates = <40000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 915>;
+			status = "disabled";
+		};
+
+		canfd: can@e66c0000 {
+			compatible = "renesas,r8a7796-canfd",
+				     "renesas,rcar-gen3-canfd";
+			reg = <0 0xe66c0000 0 0x8000>;
+			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+				   <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 914>,
+			       <&cpg CPG_CORE R8A7796_CLK_CANFD>,
+			       <&can_clk>;
+			clock-names = "fck", "canfd", "can_clk";
+			assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
+			assigned-clock-rates = <40000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 914>;
+			status = "disabled";
+
+			channel0 {
+				status = "disabled";
+			};
+
+			channel1 {
+				status = "disabled";
+			};
+		};
+
+		pwm0: pwm@e6e30000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e30000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm1: pwm@e6e31000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e31000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm2: pwm@e6e32000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e32000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm3: pwm@e6e33000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e33000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm4: pwm@e6e34000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e34000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm5: pwm@e6e35000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e35000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm6: pwm@e6e36000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e36000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif0: serial@e6e60000 {
+			compatible = "renesas,scif-r8a7796",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6e60000 0 64>;
+			interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 207>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x51>, <&dmac1 0x50>,
+			       <&dmac2 0x51>, <&dmac2 0x50>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 207>;
+			status = "disabled";
+		};
+
+		scif1: serial@e6e68000 {
+			compatible = "renesas,scif-r8a7796",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6e68000 0 64>;
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 206>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x53>, <&dmac1 0x52>,
+			       <&dmac2 0x53>, <&dmac2 0x52>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 206>;
+			status = "disabled";
+		};
+
+		scif2: serial@e6e88000 {
+			compatible = "renesas,scif-r8a7796",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6e88000 0 64>;
+			interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 310>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+			       <&dmac2 0x13>, <&dmac2 0x12>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 310>;
+			status = "disabled";
+		};
+
+		scif3: serial@e6c50000 {
+			compatible = "renesas,scif-r8a7796",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6c50000 0 64>;
+			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 204>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x57>, <&dmac0 0x56>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 204>;
+			status = "disabled";
+		};
+
+		scif4: serial@e6c40000 {
+			compatible = "renesas,scif-r8a7796",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6c40000 0 64>;
+			interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 203>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x59>, <&dmac0 0x58>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 203>;
+			status = "disabled";
+		};
+
+		scif5: serial@e6f30000 {
+			compatible = "renesas,scif-r8a7796",
+				     "renesas,rcar-gen3-scif", "renesas,scif";
+			reg = <0 0xe6f30000 0 64>;
+			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 202>,
+				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+				 <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
+			       <&dmac2 0x5b>, <&dmac2 0x5a>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 202>;
+			status = "disabled";
+		};
+
+		tpu: pwm@e6e80000 {
+			compatible = "renesas,tpu-r8a7796", "renesas,tpu";
+			reg = <0 0xe6e80000 0 0x148>;
+			interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 304>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 304>;
+			#pwm-cells = <3>;
+			status = "disabled";
+		};
+
+		msiof0: spi@e6e90000 {
+			compatible = "renesas,msiof-r8a7796",
+				     "renesas,rcar-gen3-msiof";
+			reg = <0 0xe6e90000 0 0x0064>;
+			interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 211>;
+			dmas = <&dmac1 0x41>, <&dmac1 0x40>,
+			       <&dmac2 0x41>, <&dmac2 0x40>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 211>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		msiof1: spi@e6ea0000 {
+			compatible = "renesas,msiof-r8a7796",
+				     "renesas,rcar-gen3-msiof";
+			reg = <0 0xe6ea0000 0 0x0064>;
+			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 210>;
+			dmas = <&dmac1 0x43>, <&dmac1 0x42>,
+			       <&dmac2 0x43>, <&dmac2 0x42>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 210>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		msiof2: spi@e6c00000 {
+			compatible = "renesas,msiof-r8a7796",
+				     "renesas,rcar-gen3-msiof";
+			reg = <0 0xe6c00000 0 0x0064>;
+			interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 209>;
+			dmas = <&dmac0 0x45>, <&dmac0 0x44>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 209>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		msiof3: spi@e6c10000 {
+			compatible = "renesas,msiof-r8a7796",
+				     "renesas,rcar-gen3-msiof";
+			reg = <0 0xe6c10000 0 0x0064>;
+			interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 208>;
+			dmas = <&dmac0 0x47>, <&dmac0 0x46>;
+			dma-names = "tx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 208>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		vin0: video@e6ef0000 {
+			compatible = "renesas,vin-r8a7796";
+			reg = <0 0xe6ef0000 0 0x1000>;
+			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 811>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 811>;
+			renesas,id = <0>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin0csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin0>;
+					};
+					vin0csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin0>;
+					};
+				};
+			};
+		};
+
+		vin1: video@e6ef1000 {
+			compatible = "renesas,vin-r8a7796";
+			reg = <0 0xe6ef1000 0 0x1000>;
+			interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 810>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 810>;
+			renesas,id = <1>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin1csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin1>;
+					};
+					vin1csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin1>;
+					};
+				};
+			};
+		};
+
+		vin2: video@e6ef2000 {
+			compatible = "renesas,vin-r8a7796";
+			reg = <0 0xe6ef2000 0 0x1000>;
+			interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 809>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 809>;
+			renesas,id = <2>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin2csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin2>;
+					};
+					vin2csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin2>;
+					};
+				};
+			};
+		};
+
+		vin3: video@e6ef3000 {
+			compatible = "renesas,vin-r8a7796";
+			reg = <0 0xe6ef3000 0 0x1000>;
+			interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 808>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 808>;
+			renesas,id = <3>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin3csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin3>;
+					};
+					vin3csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin3>;
+					};
+				};
+			};
+		};
+
+		vin4: video@e6ef4000 {
+			compatible = "renesas,vin-r8a7796";
+			reg = <0 0xe6ef4000 0 0x1000>;
+			interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 807>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 807>;
+			renesas,id = <4>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin4csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin4>;
+					};
+					vin4csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin4>;
+					};
+				};
+			};
+		};
+
+		vin5: video@e6ef5000 {
+			compatible = "renesas,vin-r8a7796";
+			reg = <0 0xe6ef5000 0 0x1000>;
+			interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 806>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 806>;
+			renesas,id = <5>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin5csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin5>;
+					};
+					vin5csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin5>;
+					};
+				};
+			};
+		};
+
+		vin6: video@e6ef6000 {
+			compatible = "renesas,vin-r8a7796";
+			reg = <0 0xe6ef6000 0 0x1000>;
+			interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 805>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 805>;
+			renesas,id = <6>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin6csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin6>;
+					};
+					vin6csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin6>;
+					};
+				};
+			};
+		};
+
+		vin7: video@e6ef7000 {
+			compatible = "renesas,vin-r8a7796";
+			reg = <0 0xe6ef7000 0 0x1000>;
+			interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 804>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 804>;
+			renesas,id = <7>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					vin7csi20: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&csi20vin7>;
+					};
+					vin7csi40: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&csi40vin7>;
+					};
+				};
+			};
+		};
+
+		drif00: rif@e6f40000 {
+			compatible = "renesas,r8a7796-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f40000 0 0x64>;
+			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 515>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x20>, <&dmac2 0x20>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 515>;
+			renesas,bonding = <&drif01>;
+			status = "disabled";
+		};
+
+		drif01: rif@e6f50000 {
+			compatible = "renesas,r8a7796-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f50000 0 0x64>;
+			interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 514>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x22>, <&dmac2 0x22>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 514>;
+			renesas,bonding = <&drif00>;
+			status = "disabled";
+		};
+
+		drif10: rif@e6f60000 {
+			compatible = "renesas,r8a7796-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f60000 0 0x64>;
+			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 513>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x24>, <&dmac2 0x24>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 513>;
+			renesas,bonding = <&drif11>;
+			status = "disabled";
+		};
+
+		drif11: rif@e6f70000 {
+			compatible = "renesas,r8a7796-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f70000 0 0x64>;
+			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 512>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x26>, <&dmac2 0x26>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 512>;
+			renesas,bonding = <&drif10>;
+			status = "disabled";
+		};
+
+		drif20: rif@e6f80000 {
+			compatible = "renesas,r8a7796-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f80000 0 0x64>;
+			interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 511>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x28>, <&dmac2 0x28>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 511>;
+			renesas,bonding = <&drif21>;
+			status = "disabled";
+		};
+
+		drif21: rif@e6f90000 {
+			compatible = "renesas,r8a7796-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6f90000 0 0x64>;
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 510>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x2a>, <&dmac2 0x2a>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 510>;
+			renesas,bonding = <&drif20>;
+			status = "disabled";
+		};
+
+		drif30: rif@e6fa0000 {
+			compatible = "renesas,r8a7796-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6fa0000 0 0x64>;
+			interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 509>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x2c>, <&dmac2 0x2c>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 509>;
+			renesas,bonding = <&drif31>;
+			status = "disabled";
+		};
+
+		drif31: rif@e6fb0000 {
+			compatible = "renesas,r8a7796-drif",
+				     "renesas,rcar-gen3-drif";
+			reg = <0 0xe6fb0000 0 0x64>;
+			interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 508>;
+			clock-names = "fck";
+			dmas = <&dmac1 0x2e>, <&dmac2 0x2e>;
+			dma-names = "rx", "rx";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 508>;
+			renesas,bonding = <&drif30>;
+			status = "disabled";
+		};
+
+		rcar_sound: sound@ec500000 {
+			/*
+			 * #sound-dai-cells is required
+			 *
+			 * Single DAI : #sound-dai-cells = <0>;	<&rcar_sound>;
+			 * Multi  DAI : #sound-dai-cells = <1>;	<&rcar_sound N>;
+			 */
+			/*
+			 * #clock-cells is required for audio_clkout0/1/2/3
+			 *
+			 * clkout	: #clock-cells = <0>;	<&rcar_sound>;
+			 * clkout0/1/2/3: #clock-cells = <1>;	<&rcar_sound N>;
+			 */
+			compatible =  "renesas,rcar_sound-r8a7796", "renesas,rcar_sound-gen3";
+			reg =	<0 0xec500000 0 0x1000>, /* SCU */
+				<0 0xec5a0000 0 0x100>,  /* ADG */
+				<0 0xec540000 0 0x1000>, /* SSIU */
+				<0 0xec541000 0 0x280>,  /* SSI */
+				<0 0xec760000 0 0x200>;  /* Audio DMAC peri peri*/
+			reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
+			clocks = <&cpg CPG_MOD 1005>,
+				 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+				 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+				 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+				 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+				 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+				 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+				 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+				 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+				 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+				 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+				 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+				 <&audio_clk_a>, <&audio_clk_b>,
+				 <&audio_clk_c>,
+				 <&cpg CPG_CORE R8A7796_CLK_S0D4>;
+			clock-names = "ssi-all",
+				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+				      "ssi.1", "ssi.0",
+				      "src.9", "src.8", "src.7", "src.6",
+				      "src.5", "src.4", "src.3", "src.2",
+				      "src.1", "src.0",
+				      "mix.1", "mix.0",
+				      "ctu.1", "ctu.0",
+				      "dvc.0", "dvc.1",
+				      "clk_a", "clk_b", "clk_c", "clk_i";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 1005>,
+				 <&cpg 1006>, <&cpg 1007>,
+				 <&cpg 1008>, <&cpg 1009>,
+				 <&cpg 1010>, <&cpg 1011>,
+				 <&cpg 1012>, <&cpg 1013>,
+				 <&cpg 1014>, <&cpg 1015>;
+			reset-names = "ssi-all",
+				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+				      "ssi.1", "ssi.0";
+			status = "disabled";
+
+			rcar_sound,ctu {
+				ctu00: ctu-0 { };
+				ctu01: ctu-1 { };
+				ctu02: ctu-2 { };
+				ctu03: ctu-3 { };
+				ctu10: ctu-4 { };
+				ctu11: ctu-5 { };
+				ctu12: ctu-6 { };
+				ctu13: ctu-7 { };
+			};
+
+			rcar_sound,dvc {
+				dvc0: dvc-0 {
+					dmas = <&audma1 0xbc>;
+					dma-names = "tx";
+				};
+				dvc1: dvc-1 {
+					dmas = <&audma1 0xbe>;
+					dma-names = "tx";
+				};
+			};
+
+			rcar_sound,mix {
+				mix0: mix-0 { };
+				mix1: mix-1 { };
+			};
+
+			rcar_sound,src {
+				src0: src-0 {
+					interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x85>, <&audma1 0x9a>;
+					dma-names = "rx", "tx";
+				};
+				src1: src-1 {
+					interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x87>, <&audma1 0x9c>;
+					dma-names = "rx", "tx";
+				};
+				src2: src-2 {
+					interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x89>, <&audma1 0x9e>;
+					dma-names = "rx", "tx";
+				};
+				src3: src-3 {
+					interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8b>, <&audma1 0xa0>;
+					dma-names = "rx", "tx";
+				};
+				src4: src-4 {
+					interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8d>, <&audma1 0xb0>;
+					dma-names = "rx", "tx";
+				};
+				src5: src-5 {
+					interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8f>, <&audma1 0xb2>;
+					dma-names = "rx", "tx";
+				};
+				src6: src-6 {
+					interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x91>, <&audma1 0xb4>;
+					dma-names = "rx", "tx";
+				};
+				src7: src-7 {
+					interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x93>, <&audma1 0xb6>;
+					dma-names = "rx", "tx";
+				};
+				src8: src-8 {
+					interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x95>, <&audma1 0xb8>;
+					dma-names = "rx", "tx";
+				};
+				src9: src-9 {
+					interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x97>, <&audma1 0xba>;
+					dma-names = "rx", "tx";
+				};
+			};
+
+			rcar_sound,ssi {
+				ssi0: ssi-0 {
+					interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x01>, <&audma1 0x02>;
+					dma-names = "rx", "tx";
+				};
+				ssi1: ssi-1 {
+					interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x03>, <&audma1 0x04>;
+					dma-names = "rx", "tx";
+				};
+				ssi2: ssi-2 {
+					interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x05>, <&audma1 0x06>;
+					dma-names = "rx", "tx";
+				};
+				ssi3: ssi-3 {
+					interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x07>, <&audma1 0x08>;
+					dma-names = "rx", "tx";
+				};
+				ssi4: ssi-4 {
+					interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x09>, <&audma1 0x0a>;
+					dma-names = "rx", "tx";
+				};
+				ssi5: ssi-5 {
+					interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0b>, <&audma1 0x0c>;
+					dma-names = "rx", "tx";
+				};
+				ssi6: ssi-6 {
+					interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0d>, <&audma1 0x0e>;
+					dma-names = "rx", "tx";
+				};
+				ssi7: ssi-7 {
+					interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0f>, <&audma1 0x10>;
+					dma-names = "rx", "tx";
+				};
+				ssi8: ssi-8 {
+					interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x11>, <&audma1 0x12>;
+					dma-names = "rx", "tx";
+				};
+				ssi9: ssi-9 {
+					interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x13>, <&audma1 0x14>;
+					dma-names = "rx", "tx";
+				};
+			};
+
+			rcar_sound,ssiu {
+				ssiu00: ssiu-0 {
+					dmas = <&audma0 0x15>, <&audma1 0x16>;
+					dma-names = "rx", "tx";
+				};
+				ssiu01: ssiu-1 {
+					dmas = <&audma0 0x35>, <&audma1 0x36>;
+					dma-names = "rx", "tx";
+				};
+				ssiu02: ssiu-2 {
+					dmas = <&audma0 0x37>, <&audma1 0x38>;
+					dma-names = "rx", "tx";
+				};
+				ssiu03: ssiu-3 {
+					dmas = <&audma0 0x47>, <&audma1 0x48>;
+					dma-names = "rx", "tx";
+				};
+				ssiu04: ssiu-4 {
+					dmas = <&audma0 0x3F>, <&audma1 0x40>;
+					dma-names = "rx", "tx";
+				};
+				ssiu05: ssiu-5 {
+					dmas = <&audma0 0x43>, <&audma1 0x44>;
+					dma-names = "rx", "tx";
+				};
+				ssiu06: ssiu-6 {
+					dmas = <&audma0 0x4F>, <&audma1 0x50>;
+					dma-names = "rx", "tx";
+				};
+				ssiu07: ssiu-7 {
+					dmas = <&audma0 0x53>, <&audma1 0x54>;
+					dma-names = "rx", "tx";
+				};
+				ssiu10: ssiu-8 {
+					dmas = <&audma0 0x49>, <&audma1 0x4a>;
+					dma-names = "rx", "tx";
+				};
+				ssiu11: ssiu-9 {
+					dmas = <&audma0 0x4B>, <&audma1 0x4C>;
+					dma-names = "rx", "tx";
+				};
+				ssiu12: ssiu-10 {
+					dmas = <&audma0 0x57>, <&audma1 0x58>;
+					dma-names = "rx", "tx";
+				};
+				ssiu13: ssiu-11 {
+					dmas = <&audma0 0x59>, <&audma1 0x5A>;
+					dma-names = "rx", "tx";
+				};
+				ssiu14: ssiu-12 {
+					dmas = <&audma0 0x5F>, <&audma1 0x60>;
+					dma-names = "rx", "tx";
+				};
+				ssiu15: ssiu-13 {
+					dmas = <&audma0 0xC3>, <&audma1 0xC4>;
+					dma-names = "rx", "tx";
+				};
+				ssiu16: ssiu-14 {
+					dmas = <&audma0 0xC7>, <&audma1 0xC8>;
+					dma-names = "rx", "tx";
+				};
+				ssiu17: ssiu-15 {
+					dmas = <&audma0 0xCB>, <&audma1 0xCC>;
+					dma-names = "rx", "tx";
+				};
+				ssiu20: ssiu-16 {
+					dmas = <&audma0 0x63>, <&audma1 0x64>;
+					dma-names = "rx", "tx";
+				};
+				ssiu21: ssiu-17 {
+					dmas = <&audma0 0x67>, <&audma1 0x68>;
+					dma-names = "rx", "tx";
+				};
+				ssiu22: ssiu-18 {
+					dmas = <&audma0 0x6B>, <&audma1 0x6C>;
+					dma-names = "rx", "tx";
+				};
+				ssiu23: ssiu-19 {
+					dmas = <&audma0 0x6D>, <&audma1 0x6E>;
+					dma-names = "rx", "tx";
+				};
+				ssiu24: ssiu-20 {
+					dmas = <&audma0 0xCF>, <&audma1 0xCE>;
+					dma-names = "rx", "tx";
+				};
+				ssiu25: ssiu-21 {
+					dmas = <&audma0 0xEB>, <&audma1 0xEC>;
+					dma-names = "rx", "tx";
+				};
+				ssiu26: ssiu-22 {
+					dmas = <&audma0 0xED>, <&audma1 0xEE>;
+					dma-names = "rx", "tx";
+				};
+				ssiu27: ssiu-23 {
+					dmas = <&audma0 0xEF>, <&audma1 0xF0>;
+					dma-names = "rx", "tx";
+				};
+				ssiu30: ssiu-24 {
+					dmas = <&audma0 0x6f>, <&audma1 0x70>;
+					dma-names = "rx", "tx";
+				};
+				ssiu31: ssiu-25 {
+					dmas = <&audma0 0x21>, <&audma1 0x22>;
+					dma-names = "rx", "tx";
+				};
+				ssiu32: ssiu-26 {
+					dmas = <&audma0 0x23>, <&audma1 0x24>;
+					dma-names = "rx", "tx";
+				};
+				ssiu33: ssiu-27 {
+					dmas = <&audma0 0x25>, <&audma1 0x26>;
+					dma-names = "rx", "tx";
+				};
+				ssiu34: ssiu-28 {
+					dmas = <&audma0 0x27>, <&audma1 0x28>;
+					dma-names = "rx", "tx";
+				};
+				ssiu35: ssiu-29 {
+					dmas = <&audma0 0x29>, <&audma1 0x2A>;
+					dma-names = "rx", "tx";
+				};
+				ssiu36: ssiu-30 {
+					dmas = <&audma0 0x2B>, <&audma1 0x2C>;
+					dma-names = "rx", "tx";
+				};
+				ssiu37: ssiu-31 {
+					dmas = <&audma0 0x2D>, <&audma1 0x2E>;
+					dma-names = "rx", "tx";
+				};
+				ssiu40: ssiu-32 {
+					dmas =	<&audma0 0x71>, <&audma1 0x72>;
+					dma-names = "rx", "tx";
+				};
+				ssiu41: ssiu-33 {
+					dmas = <&audma0 0x17>, <&audma1 0x18>;
+					dma-names = "rx", "tx";
+				};
+				ssiu42: ssiu-34 {
+					dmas = <&audma0 0x19>, <&audma1 0x1A>;
+					dma-names = "rx", "tx";
+				};
+				ssiu43: ssiu-35 {
+					dmas = <&audma0 0x1B>, <&audma1 0x1C>;
+					dma-names = "rx", "tx";
+				};
+				ssiu44: ssiu-36 {
+					dmas = <&audma0 0x1D>, <&audma1 0x1E>;
+					dma-names = "rx", "tx";
+				};
+				ssiu45: ssiu-37 {
+					dmas = <&audma0 0x1F>, <&audma1 0x20>;
+					dma-names = "rx", "tx";
+				};
+				ssiu46: ssiu-38 {
+					dmas = <&audma0 0x31>, <&audma1 0x32>;
+					dma-names = "rx", "tx";
+				};
+				ssiu47: ssiu-39 {
+					dmas = <&audma0 0x33>, <&audma1 0x34>;
+					dma-names = "rx", "tx";
+				};
+				ssiu50: ssiu-40 {
+					dmas = <&audma0 0x73>, <&audma1 0x74>;
+					dma-names = "rx", "tx";
+				};
+				ssiu60: ssiu-41 {
+					dmas = <&audma0 0x75>, <&audma1 0x76>;
+					dma-names = "rx", "tx";
+				};
+				ssiu70: ssiu-42 {
+					dmas = <&audma0 0x79>, <&audma1 0x7a>;
+					dma-names = "rx", "tx";
+				};
+				ssiu80: ssiu-43 {
+					dmas = <&audma0 0x7b>, <&audma1 0x7c>;
+					dma-names = "rx", "tx";
+				};
+				ssiu90: ssiu-44 {
+					dmas = <&audma0 0x7d>, <&audma1 0x7e>;
+					dma-names = "rx", "tx";
+				};
+				ssiu91: ssiu-45 {
+					dmas = <&audma0 0x7F>, <&audma1 0x80>;
+					dma-names = "rx", "tx";
+				};
+				ssiu92: ssiu-46 {
+					dmas = <&audma0 0x81>, <&audma1 0x82>;
+					dma-names = "rx", "tx";
+				};
+				ssiu93: ssiu-47 {
+					dmas = <&audma0 0x83>, <&audma1 0x84>;
+					dma-names = "rx", "tx";
+				};
+				ssiu94: ssiu-48 {
+					dmas = <&audma0 0xA3>, <&audma1 0xA4>;
+					dma-names = "rx", "tx";
+				};
+				ssiu95: ssiu-49 {
+					dmas = <&audma0 0xA5>, <&audma1 0xA6>;
+					dma-names = "rx", "tx";
+				};
+				ssiu96: ssiu-50 {
+					dmas = <&audma0 0xA7>, <&audma1 0xA8>;
+					dma-names = "rx", "tx";
+				};
+				ssiu97: ssiu-51 {
+					dmas = <&audma0 0xA9>, <&audma1 0xAA>;
+					dma-names = "rx", "tx";
+				};
+			};
+		};
+
+		audma0: dma-controller@ec700000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xec700000 0 0x10000>;
+			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 502>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 502>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_mp 0>, <&ipmmu_mp 1>,
+			       <&ipmmu_mp 2>, <&ipmmu_mp 3>,
+			       <&ipmmu_mp 4>, <&ipmmu_mp 5>,
+			       <&ipmmu_mp 6>, <&ipmmu_mp 7>,
+			       <&ipmmu_mp 8>, <&ipmmu_mp 9>,
+			       <&ipmmu_mp 10>, <&ipmmu_mp 11>,
+			       <&ipmmu_mp 12>, <&ipmmu_mp 13>,
+			       <&ipmmu_mp 14>, <&ipmmu_mp 15>;
+		};
+
+		audma1: dma-controller@ec720000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xec720000 0 0x10000>;
+			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 501>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 501>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			iommus = <&ipmmu_mp 16>, <&ipmmu_mp 17>,
+			       <&ipmmu_mp 18>, <&ipmmu_mp 19>,
+			       <&ipmmu_mp 20>, <&ipmmu_mp 21>,
+			       <&ipmmu_mp 22>, <&ipmmu_mp 23>,
+			       <&ipmmu_mp 24>, <&ipmmu_mp 25>,
+			       <&ipmmu_mp 26>, <&ipmmu_mp 27>,
+			       <&ipmmu_mp 28>, <&ipmmu_mp 29>,
+			       <&ipmmu_mp 30>, <&ipmmu_mp 31>;
+		};
+
+		xhci0: usb@ee000000 {
+			compatible = "renesas,xhci-r8a7796",
+				     "renesas,rcar-gen3-xhci";
+			reg = <0 0xee000000 0 0xc00>;
+			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 328>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 328>;
+			status = "disabled";
+		};
+
+		usb3_peri0: usb@ee020000 {
+			compatible = "renesas,r8a7796-usb3-peri",
+				     "renesas,rcar-gen3-usb3-peri";
+			reg = <0 0xee020000 0 0x400>;
+			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 328>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 328>;
+			status = "disabled";
+		};
+
+		ohci0: usb@ee080000 {
+			compatible = "generic-ohci";
+			reg = <0 0xee080000 0 0x100>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+			phys = <&usb2_phy0 1>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 703>, <&cpg 704>;
+			status = "disabled";
+		};
+
+		ohci1: usb@ee0a0000 {
+			compatible = "generic-ohci";
+			reg = <0 0xee0a0000 0 0x100>;
+			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 702>;
+			phys = <&usb2_phy1 1>;
+			phy-names = "usb";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 702>;
+			status = "disabled";
+		};
+
+		ehci0: usb@ee080100 {
+			compatible = "generic-ehci";
+			reg = <0 0xee080100 0 0x100>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+			phys = <&usb2_phy0 2>;
+			phy-names = "usb";
+			companion = <&ohci0>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 703>, <&cpg 704>;
+			status = "disabled";
+		};
+
+		ehci1: usb@ee0a0100 {
+			compatible = "generic-ehci";
+			reg = <0 0xee0a0100 0 0x100>;
+			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 702>;
+			phys = <&usb2_phy1 2>;
+			phy-names = "usb";
+			companion = <&ohci1>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 702>;
+			status = "disabled";
+		};
+
+		usb2_phy0: usb-phy@ee080200 {
+			compatible = "renesas,usb2-phy-r8a7796",
+				     "renesas,rcar-gen3-usb2-phy";
+			reg = <0 0xee080200 0 0x700>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 703>, <&cpg 704>;
+			#phy-cells = <1>;
+			status = "disabled";
+		};
+
+		usb2_phy1: usb-phy@ee0a0200 {
+			compatible = "renesas,usb2-phy-r8a7796",
+				     "renesas,rcar-gen3-usb2-phy";
+			reg = <0 0xee0a0200 0 0x700>;
+			clocks = <&cpg CPG_MOD 702>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 702>;
+			#phy-cells = <1>;
+			status = "disabled";
+		};
+
+		sdhi0: sd@ee100000 {
+			compatible = "renesas,sdhi-r8a7796",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee100000 0 0x2000>;
+			interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 314>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 314>;
+			iommus = <&ipmmu_ds1 32>;
+			status = "disabled";
+		};
+
+		sdhi1: sd@ee120000 {
+			compatible = "renesas,sdhi-r8a7796",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee120000 0 0x2000>;
+			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 313>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 313>;
+			iommus = <&ipmmu_ds1 33>;
+			status = "disabled";
+		};
+
+		sdhi2: sd@ee140000 {
+			compatible = "renesas,sdhi-r8a7796",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee140000 0 0x2000>;
+			interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 312>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 312>;
+			iommus = <&ipmmu_ds1 34>;
+			status = "disabled";
+		};
+
+		sdhi3: sd@ee160000 {
+			compatible = "renesas,sdhi-r8a7796",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee160000 0 0x2000>;
+			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 311>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 311>;
+			iommus = <&ipmmu_ds1 35>;
+			status = "disabled";
+		};
+
+		gic: interrupt-controller@f1010000 {
+			compatible = "arm,gic-400";
+			#interrupt-cells = <3>;
+			#address-cells = <0>;
+			interrupt-controller;
+			reg = <0x0 0xf1010000 0 0x1000>,
+			      <0x0 0xf1020000 0 0x20000>,
+			      <0x0 0xf1040000 0 0x20000>,
+			      <0x0 0xf1060000 0 0x20000>;
+			interrupts = <GIC_PPI 9
+					(GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
+			clocks = <&cpg CPG_MOD 408>;
+			clock-names = "clk";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 408>;
+		};
+
+		pciec0: pcie@fe000000 {
+			compatible = "renesas,pcie-r8a7796",
+				     "renesas,pcie-rcar-gen3";
+			reg = <0 0xfe000000 0 0x80000>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			bus-range = <0x00 0xff>;
+			device_type = "pci";
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			/* Map all possible DDR as inbound ranges */
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
+			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
+			clock-names = "pcie", "pcie_bus";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 319>;
+			status = "disabled";
+		};
+
+		pciec1: pcie@ee800000 {
+			compatible = "renesas,pcie-r8a7796",
+				     "renesas,pcie-rcar-gen3";
+			reg = <0 0xee800000 0 0x80000>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			bus-range = <0x00 0xff>;
+			device_type = "pci";
+			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000>,
+				 <0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000>,
+				 <0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000>,
+				 <0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
+			/* Map all possible DDR as inbound ranges */
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
+			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 318>, <&pcie_bus_clk>;
+			clock-names = "pcie", "pcie_bus";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 318>;
+			status = "disabled";
+		};
+
+		imr-lx4@fe860000 {
+			compatible = "renesas,r8a7796-imr-lx4",
+				     "renesas,imr-lx4";
+			reg = <0 0xfe860000 0 0x2000>;
+			interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 823>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			resets = <&cpg 823>;
+		};
+
+		imr-lx4@fe870000 {
+			compatible = "renesas,r8a7796-imr-lx4",
+				     "renesas,imr-lx4";
+			reg = <0 0xfe870000 0 0x2000>;
+			interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 822>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			resets = <&cpg 822>;
+		};
+
+		fdp1@fe940000 {
+			compatible = "renesas,fdp1";
+			reg = <0 0xfe940000 0 0x2400>;
+			interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 119>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			resets = <&cpg 119>;
+			renesas,fcp = <&fcpf0>;
+		};
+
+		fcpf0: fcp@fe950000 {
+			compatible = "renesas,fcpf";
+			reg = <0 0xfe950000 0 0x200>;
+			clocks = <&cpg CPG_MOD 615>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			resets = <&cpg 615>;
+		};
+
+		fcpvb0: fcp@fe96f000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfe96f000 0 0x200>;
+			clocks = <&cpg CPG_MOD 607>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			resets = <&cpg 607>;
+		};
+
+		fcpvi0: fcp@fe9af000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfe9af000 0 0x200>;
+			clocks = <&cpg CPG_MOD 611>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			resets = <&cpg 611>;
+			iommus = <&ipmmu_vc0 19>;
+		};
+
+		fcpvd0: fcp@fea27000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfea27000 0 0x200>;
+			clocks = <&cpg CPG_MOD 603>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 603>;
+			iommus = <&ipmmu_vi0 8>;
+		};
+
+		fcpvd1: fcp@fea2f000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfea2f000 0 0x200>;
+			clocks = <&cpg CPG_MOD 602>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 602>;
+			iommus = <&ipmmu_vi0 9>;
+		};
+
+		fcpvd2: fcp@fea37000 {
+			compatible = "renesas,fcpv";
+			reg = <0 0xfea37000 0 0x200>;
+			clocks = <&cpg CPG_MOD 601>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 601>;
+			iommus = <&ipmmu_vi0 10>;
+		};
+
+		vspb: vsp@fe960000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfe960000 0 0x8000>;
+			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 626>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			resets = <&cpg 626>;
+
+			renesas,fcp = <&fcpvb0>;
+		};
+
+		vspd0: vsp@fea20000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfea20000 0 0x5000>;
+			interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 623>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 623>;
+
+			renesas,fcp = <&fcpvd0>;
+		};
+
+		vspd1: vsp@fea28000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfea28000 0 0x5000>;
+			interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 622>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 622>;
+
+			renesas,fcp = <&fcpvd1>;
+		};
+
+		vspd2: vsp@fea30000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfea30000 0 0x5000>;
+			interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 621>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 621>;
+
+			renesas,fcp = <&fcpvd2>;
+		};
+
+		vspi0: vsp@fe9a0000 {
+			compatible = "renesas,vsp2";
+			reg = <0 0xfe9a0000 0 0x8000>;
+			interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 631>;
+			power-domains = <&sysc R8A7796_PD_A3VC>;
+			resets = <&cpg 631>;
+
+			renesas,fcp = <&fcpvi0>;
+		};
+
+		cmm0: cmm@fea40000 {
+			compatible = "renesas,r8a7796-cmm",
+				     "renesas,rcar-gen3-cmm";
+			reg = <0 0xfea40000 0 0x1000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			clocks = <&cpg CPG_MOD 711>;
+			resets = <&cpg 711>;
+		};
+
+		cmm1: cmm@fea50000 {
+			compatible = "renesas,r8a7796-cmm",
+				     "renesas,rcar-gen3-cmm";
+			reg = <0 0xfea50000 0 0x1000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			clocks = <&cpg CPG_MOD 710>;
+			resets = <&cpg 710>;
+		};
+
+		cmm2: cmm@fea60000 {
+			compatible = "renesas,r8a7796-cmm",
+				     "renesas,rcar-gen3-cmm";
+			reg = <0 0xfea60000 0 0x1000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			clocks = <&cpg CPG_MOD 709>;
+			resets = <&cpg 709>;
+		};
+
+		csi20: csi2@fea80000 {
+			compatible = "renesas,r8a7796-csi2";
+			reg = <0 0xfea80000 0 0x10000>;
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 714>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 714>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					csi20vin0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&vin0csi20>;
+					};
+					csi20vin1: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&vin1csi20>;
+					};
+					csi20vin2: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&vin2csi20>;
+					};
+					csi20vin3: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&vin3csi20>;
+					};
+					csi20vin4: endpoint@4 {
+						reg = <4>;
+						remote-endpoint = <&vin4csi20>;
+					};
+					csi20vin5: endpoint@5 {
+						reg = <5>;
+						remote-endpoint = <&vin5csi20>;
+					};
+					csi20vin6: endpoint@6 {
+						reg = <6>;
+						remote-endpoint = <&vin6csi20>;
+					};
+					csi20vin7: endpoint@7 {
+						reg = <7>;
+						remote-endpoint = <&vin7csi20>;
+					};
+				};
+			};
+		};
+
+		csi40: csi2@feaa0000 {
+			compatible = "renesas,r8a7796-csi2";
+			reg = <0 0xfeaa0000 0 0x10000>;
+			interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 716>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 716>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					reg = <1>;
+
+					csi40vin0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&vin0csi40>;
+					};
+					csi40vin1: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&vin1csi40>;
+					};
+					csi40vin2: endpoint@2 {
+						reg = <2>;
+						remote-endpoint = <&vin2csi40>;
+					};
+					csi40vin3: endpoint@3 {
+						reg = <3>;
+						remote-endpoint = <&vin3csi40>;
+					};
+					csi40vin4: endpoint@4 {
+						reg = <4>;
+						remote-endpoint = <&vin4csi40>;
+					};
+					csi40vin5: endpoint@5 {
+						reg = <5>;
+						remote-endpoint = <&vin5csi40>;
+					};
+					csi40vin6: endpoint@6 {
+						reg = <6>;
+						remote-endpoint = <&vin6csi40>;
+					};
+					csi40vin7: endpoint@7 {
+						reg = <7>;
+						remote-endpoint = <&vin7csi40>;
+					};
+				};
+
+			};
+		};
+
+		hdmi0: hdmi@fead0000 {
+			compatible = "renesas,r8a7796-hdmi", "renesas,rcar-gen3-hdmi";
+			reg = <0 0xfead0000 0 0x10000>;
+			interrupts = <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 729>, <&cpg CPG_CORE R8A7796_CLK_HDMI>;
+			clock-names = "iahb", "isfr";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 729>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dw_hdmi0_in: endpoint {
+						remote-endpoint = <&du_out_hdmi0>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+				};
+				port@2 {
+					/* HDMI sound */
+					reg = <2>;
+				};
+			};
+		};
+
+		du: display@feb00000 {
+			compatible = "renesas,du-r8a7796";
+			reg = <0 0xfeb00000 0 0x70000>;
+			interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 724>,
+				 <&cpg CPG_MOD 723>,
+				 <&cpg CPG_MOD 722>;
+			clock-names = "du.0", "du.1", "du.2";
+
+			renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>;
+			vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>;
+
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					du_out_rgb: endpoint {
+					};
+				};
+				port@1 {
+					reg = <1>;
+					du_out_hdmi0: endpoint {
+						remote-endpoint = <&dw_hdmi0_in>;
+					};
+				};
+				port@2 {
+					reg = <2>;
+					du_out_lvds0: endpoint {
+						remote-endpoint = <&lvds0_in>;
+					};
+				};
+			};
+		};
+
+		lvds0: lvds@feb90000 {
+			compatible = "renesas,r8a7796-lvds";
+			reg = <0 0xfeb90000 0 0x14>;
+			clocks = <&cpg CPG_MOD 727>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 727>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					lvds0_in: endpoint {
+						remote-endpoint = <&du_out_lvds0>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					lvds0_out: endpoint {
+					};
+				};
+			};
+		};
+
+		prr: chipid@fff00044 {
+			compatible = "renesas,prr";
+			reg = <0 0xfff00044 0 4>;
+		};
+	};
+
+	thermal-zones {
+		sensor_thermal1: sensor-thermal1 {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+			thermal-sensors = <&tsc 0>;
+			sustainable-power = <3874>;
+
+			trips {
+				sensor1_crit: sensor1-crit {
+					temperature = <120000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		sensor_thermal2: sensor-thermal2 {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+			thermal-sensors = <&tsc 1>;
+			sustainable-power = <3874>;
+
+			trips {
+				sensor2_crit: sensor2-crit {
+					temperature = <120000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		sensor_thermal3: sensor-thermal3 {
+			polling-delay-passive = <250>;
+			polling-delay = <1000>;
+			thermal-sensors = <&tsc 2>;
+			sustainable-power = <3874>;
+
+			cooling-maps {
+				map0 {
+					trip = <&target>;
+					cooling-device = <&a57_0 2 4>;
+					contribution = <1024>;
+				};
+				map1 {
+					trip = <&target>;
+					cooling-device = <&a53_0 0 2>;
+					contribution = <1024>;
+				};
+			};
+			trips {
+				target: trip-point1 {
+					temperature = <100000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+
+				sensor3_crit: sensor3-crit {
+					temperature = <120000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	/* External USB clocks - can be overridden by the board */
+	usb3s0_clk: usb3s0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	usb_extal_clk: usb_extal {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index 64466c8..be3824b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -319,53 +319,133 @@ soc {
 		ranges;
 
 		rwdt: watchdog@e6020000 {
+			compatible = "renesas,r8a77961-wdt",
+				     "renesas,rcar-gen3-wdt";
 			reg = <0 0xe6020000 0 0x0c>;
-			/* placeholder */
+			clocks = <&cpg CPG_MOD 402>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 402>;
+			status = "disabled";
+		};
+
+		gpio0: gpio@e6050000 {
+			compatible = "renesas,gpio-r8a77961",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6050000 0 0x50>;
+			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 0 16>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 912>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 912>;
+		};
+
+		gpio1: gpio@e6051000 {
+			compatible = "renesas,gpio-r8a77961",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6051000 0 0x50>;
+			interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 32 29>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 911>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 911>;
 		};
 
 		gpio2: gpio@e6052000 {
+			compatible = "renesas,gpio-r8a77961",
+				     "renesas,rcar-gen3-gpio";
 			reg = <0 0xe6052000 0 0x50>;
+			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
 			#gpio-cells = <2>;
 			gpio-controller;
+			gpio-ranges = <&pfc 0 64 15>;
 			#interrupt-cells = <2>;
 			interrupt-controller;
-			/* placeholder */
+			clocks = <&cpg CPG_MOD 910>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 910>;
 		};
 
 		gpio3: gpio@e6053000 {
+			compatible = "renesas,gpio-r8a77961",
+				     "renesas,rcar-gen3-gpio";
 			reg = <0 0xe6053000 0 0x50>;
+			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
 			#gpio-cells = <2>;
 			gpio-controller;
+			gpio-ranges = <&pfc 0 96 16>;
 			#interrupt-cells = <2>;
 			interrupt-controller;
-			/* placeholder */
+			clocks = <&cpg CPG_MOD 909>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 909>;
 		};
 
 		gpio4: gpio@e6054000 {
+			compatible = "renesas,gpio-r8a77961",
+				     "renesas,rcar-gen3-gpio";
 			reg = <0 0xe6054000 0 0x50>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
 			#gpio-cells = <2>;
 			gpio-controller;
+			gpio-ranges = <&pfc 0 128 18>;
 			#interrupt-cells = <2>;
 			interrupt-controller;
-			/* placeholder */
+			clocks = <&cpg CPG_MOD 908>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 908>;
 		};
 
 		gpio5: gpio@e6055000 {
+			compatible = "renesas,gpio-r8a77961",
+				     "renesas,rcar-gen3-gpio";
 			reg = <0 0xe6055000 0 0x50>;
+			interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
 			#gpio-cells = <2>;
 			gpio-controller;
+			gpio-ranges = <&pfc 0 160 26>;
 			#interrupt-cells = <2>;
 			interrupt-controller;
-			/* placeholder */
+			clocks = <&cpg CPG_MOD 907>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 907>;
 		};
 
 		gpio6: gpio@e6055400 {
+			compatible = "renesas,gpio-r8a77961",
+				     "renesas,rcar-gen3-gpio";
 			reg = <0 0xe6055400 0 0x50>;
+			interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
 			#gpio-cells = <2>;
 			gpio-controller;
+			gpio-ranges = <&pfc 0 192 32>;
 			#interrupt-cells = <2>;
 			interrupt-controller;
-			/* placeholder */
+			clocks = <&cpg CPG_MOD 906>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 906>;
+		};
+
+		gpio7: gpio@e6055800 {
+			compatible = "renesas,gpio-r8a77961",
+				     "renesas,rcar-gen3-gpio";
+			reg = <0 0xe6055800 0 0x50>;
+			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			gpio-ranges = <&pfc 0 224 4>;
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			clocks = <&cpg CPG_MOD 905>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 905>;
 		};
 
 		pfc: pin-controller@e6060000 {
@@ -401,27 +481,138 @@ intc_ex: interrupt-controller@e61c0000 {
 			/* placeholder */
 		};
 
+		i2c0: i2c@e6500000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a77961",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe6500000 0 0x40>;
+			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 931>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 931>;
+			dmas = <&dmac1 0x91>, <&dmac1 0x90>,
+			       <&dmac2 0x91>, <&dmac2 0x90>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@e6508000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a77961",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe6508000 0 0x40>;
+			interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 930>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 930>;
+			dmas = <&dmac1 0x93>, <&dmac1 0x92>,
+			       <&dmac2 0x93>, <&dmac2 0x92>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
 		i2c2: i2c@e6510000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a77961",
+				     "renesas,rcar-gen3-i2c";
 			reg = <0 0xe6510000 0 0x40>;
-			/* placeholder */
+			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 929>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 929>;
+			dmas = <&dmac1 0x95>, <&dmac1 0x94>,
+			       <&dmac2 0x95>, <&dmac2 0x94>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c3: i2c@e66d0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a77961",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66d0000 0 0x40>;
+			interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 928>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 928>;
+			dmas = <&dmac0 0x97>, <&dmac0 0x96>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
 		};
 
 		i2c4: i2c@e66d8000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a77961",
+				     "renesas,rcar-gen3-i2c";
 			reg = <0 0xe66d8000 0 0x40>;
-			/* placeholder */
+			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 927>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 927>;
+			dmas = <&dmac0 0x99>, <&dmac0 0x98>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c5: i2c@e66e0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a77961",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66e0000 0 0x40>;
+			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 919>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 919>;
+			dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c6: i2c@e66e8000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a77961",
+				     "renesas,rcar-gen3-i2c";
+			reg = <0 0xe66e8000 0 0x40>;
+			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 918>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 918>;
+			dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
 		};
 
 		i2c_dvfs: i2c@e60b0000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
+			compatible = "renesas,iic-r8a77961",
+				     "renesas,rcar-gen3-iic",
+				     "renesas,rmobile-iic";
 			reg = <0 0xe60b0000 0 0x425>;
-			/* placeholder */
+			interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 926>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 926>;
+			dmas = <&dmac0 0x11>, <&dmac0 0x10>;
+			dma-names = "tx", "rx";
+			status = "disabled";
 		};
 
+
 		hscif1: serial@e6550000 {
 			reg = <0 0xe6550000 0 0x60>;
 			/* placeholder */
@@ -438,11 +629,151 @@ usb3_phy0: usb-phy@e65ee000 {
 			/* placeholder */
 		};
 
+		dmac0: dma-controller@e6700000 {
+			compatible = "renesas,dmac-r8a77961",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe6700000 0 0x10000>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 219>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 219>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+		};
+
+		dmac1: dma-controller@e7300000 {
+			compatible = "renesas,dmac-r8a77961",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe7300000 0 0x10000>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 218>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 218>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+		};
+
+		dmac2: dma-controller@e7310000 {
+			compatible = "renesas,dmac-r8a77961",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe7310000 0 0x10000>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 217>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 217>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+		};
+
 		avb: ethernet@e6800000 {
+			compatible = "renesas,etheravb-r8a77961",
+				     "renesas,etheravb-rcar-gen3";
 			reg = <0 0xe6800000 0 0x800>, <0 0xe6a00000 0 0x10000>;
+			interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "ch0", "ch1", "ch2", "ch3",
+					  "ch4", "ch5", "ch6", "ch7",
+					  "ch8", "ch9", "ch10", "ch11",
+					  "ch12", "ch13", "ch14", "ch15",
+					  "ch16", "ch17", "ch18", "ch19",
+					  "ch20", "ch21", "ch22", "ch23",
+					  "ch24";
+			clocks = <&cpg CPG_MOD 812>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 812>;
+			phy-mode = "rgmii";
 			#address-cells = <1>;
 			#size-cells = <0>;
-			/* placeholder */
+			status = "disabled";
 		};
 
 		pwm1: pwm@e6e31000 {
@@ -574,18 +905,51 @@ usb2_phy1: usb-phy@ee0a0200 {
 		};
 
 		sdhi0: sd@ee100000 {
+			compatible = "renesas,sdhi-r8a77961",
+				     "renesas,rcar-gen3-sdhi";
 			reg = <0 0xee100000 0 0x2000>;
-			/* placeholder */
+			interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 314>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 314>;
+			status = "disabled";
+		};
+
+		sdhi1: sd@ee120000 {
+			compatible = "renesas,sdhi-r8a77961",
+				     "renesas,rcar-gen3-sdhi";
+			reg = <0 0xee120000 0 0x2000>;
+			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 313>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 313>;
+			status = "disabled";
 		};
 
 		sdhi2: sd@ee140000 {
+			compatible = "renesas,sdhi-r8a77961",
+				     "renesas,rcar-gen3-sdhi";
 			reg = <0 0xee140000 0 0x2000>;
-			/* placeholder */
+			interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 312>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 312>;
+			status = "disabled";
 		};
 
 		sdhi3: sd@ee160000 {
+			compatible = "renesas,sdhi-r8a77961",
+				     "renesas,rcar-gen3-sdhi";
 			reg = <0 0xee160000 0 0x2000>;
-			/* placeholder */
+			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 311>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+			resets = <&cpg 311>;
+			status = "disabled";
 		};
 
 		gic: interrupt-controller@f1010000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77965-m3nulcb-kf.dts b/arch/arm64/boot/dts/renesas/r8a77965-m3nulcb-kf.dts
deleted file mode 100644
index dadad97..0000000
--- a/arch/arm64/boot/dts/renesas/r8a77965-m3nulcb-kf.dts
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree Source for the M3NULCB Kingfisher board
- *
- * Copyright (C) 2018 Renesas Electronics Corp.
- * Copyright (C) 2018 Cogent Embedded, Inc.
- */
-
-#include "r8a77965-m3nulcb.dts"
-#include "ulcb-kf.dtsi"
-
-/ {
-	model = "Renesas M3NULCB Kingfisher board based on r8a77965";
-	compatible = "shimafuji,kingfisher", "renesas,m3nulcb",
-		     "renesas,r8a77965";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77965-ulcb-kf.dts b/arch/arm64/boot/dts/renesas/r8a77965-ulcb-kf.dts
new file mode 100644
index 0000000..12aa08f
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77965-ulcb-kf.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the M3NULCB Kingfisher board
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ */
+
+#include "r8a77965-ulcb.dts"
+#include "ulcb-kf.dtsi"
+
+/ {
+	model = "Renesas M3NULCB Kingfisher board based on r8a77965";
+	compatible = "shimafuji,kingfisher", "renesas,m3nulcb",
+		     "renesas,r8a77965";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77965-m3nulcb.dts b/arch/arm64/boot/dts/renesas/r8a77965-ulcb.dts
similarity index 100%
rename from arch/arm64/boot/dts/renesas/r8a77965-m3nulcb.dts
rename to arch/arm64/boot/dts/renesas/r8a77965-ulcb.dts
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index bdbe197..c17d90b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -429,12 +429,12 @@ intc_ex: interrupt-controller@e61c0000 {
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
 			power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
 			resets = <&cpg 407>;
@@ -680,8 +680,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a77965-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
@@ -694,8 +694,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a77965-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
@@ -721,23 +721,23 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a77965",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x10000>;
-			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -763,23 +763,23 @@ dmac1: dma-controller@e7300000 {
 			compatible = "renesas,dmac-r8a77965",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -805,23 +805,23 @@ dmac2: dma-controller@e7310000 {
 			compatible = "renesas,dmac-r8a77965",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -1937,23 +1937,23 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a77965",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -1971,23 +1971,23 @@ audma1: dma-controller@ec720000 {
 			compatible = "renesas,dmac-r8a77965",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec720000 0 0x10000>;
-			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -2184,10 +2184,10 @@ pciec0: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
 			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
@@ -2211,10 +2211,10 @@ pciec1: pcie@ee800000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000
-				0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000
-				0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000
-				0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000>,
+				 <0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000>,
+				 <0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000>,
+				 <0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
 			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
 			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 0d0558e..664a73a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -302,8 +302,8 @@ sysc: system-controller@e6180000 {
 
 		thermal: thermal@e6190000 {
 			compatible = "renesas,thermal-r8a77970";
-			reg =  <0 0xe6190000 0 0x10
-				0 0xe6190100 0 0x120>;
+			reg = <0 0xe6190000 0 0x10>,
+			      <0 0xe6190100 0 0x120>;
 			interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
@@ -318,12 +318,12 @@ intc_ex: interrupt-controller@e61c0000 {
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
 			power-domains = <&sysc R8A77970_PD_ALWAYS_ON>;
 			resets = <&cpg 407>;
@@ -933,15 +933,15 @@ dmac1: dma-controller@e7300000 {
 			compatible = "renesas,dmac-r8a77970",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7";
@@ -961,15 +961,15 @@ dmac2: dma-controller@e7310000 {
 			compatible = "renesas,dmac-r8a77970",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7";
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 4d86669..b340fb4 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -348,12 +348,12 @@ intc_ex: interrupt-controller@e61c0000 {
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
 			power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
 			resets = <&cpg 407>;
@@ -1174,23 +1174,23 @@ dmac1: dma-controller@e7300000 {
 			compatible = "renesas,dmac-r8a77980",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1216,23 +1216,23 @@ dmac2: dma-controller@e7310000 {
 			compatible = "renesas,dmac-r8a77980",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 364 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 365 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 366 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 364 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 365 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 366 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					  "ch0", "ch1", "ch2", "ch3",
 					  "ch4", "ch5", "ch6", "ch7",
@@ -1367,21 +1367,17 @@ pciec: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <
-				0x01000000 0 0x00000000 0 0xfe100000 0 0x0100000
-				0x02000000 0 0xfe200000 0 0xfe200000 0 0x0200000
-				0x02000000 0 0x30000000 0 0x30000000 0 0x8000000
-				0x42000000 0 0x38000000 0 0x38000000 0 0x8000000
-			>;
-			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000
-				      0 0x80000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x0100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x0200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x8000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x8000000>;
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
 			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
 			#interrupt-cells = <1>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic GIC_SPI 148
-					 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
 			clock-names = "pcie", "pcie_bus";
 			power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
index b38f9d4..4fd2b14 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
@@ -182,8 +182,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi1: regulator-vcc-sdhi1 {
@@ -206,8 +205,7 @@ vccq_sdhi1: regulator-vccq-sdhi1 {
 
 		gpios = <&gpio3 15 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vga {
@@ -636,7 +634,6 @@ &rcar_sound {
 	/* audio_clkout0/1/2/3 */
 	#clock-cells = <1>;
 	clock-frequency = <12288000 11289600>;
-	clkout-lr-synchronous;
 
 	status = "okay";
 
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 67a6824..32d91f2 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -394,12 +394,12 @@ intc_ex: interrupt-controller@e61c0000 {
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
 			power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
 			resets = <&cpg 407>;
@@ -643,8 +643,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a77990-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
@@ -657,8 +657,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a77990-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
@@ -671,23 +671,23 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a77990",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x10000>;
-			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -713,23 +713,23 @@ dmac1: dma-controller@e7300000 {
 			compatible = "renesas,dmac-r8a77990",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -755,23 +755,23 @@ dmac2: dma-controller@e7310000 {
 			compatible = "renesas,dmac-r8a77990",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -1474,23 +1474,23 @@ audma0: dma-controller@ec700000 {
 			compatible = "renesas,dmac-r8a77990",
 				     "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
-			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7",
@@ -1635,10 +1635,10 @@ pciec0: pcie@fe000000 {
 			#size-cells = <2>;
 			bus-range = <0x00 0xff>;
 			device_type = "pci";
-			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
-				  0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
-				  0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
-				  0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+				 <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+				 <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+				 <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
 			/* Map all possible DDR as inbound ranges */
 			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
index e6ee2b7..9503007 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
@@ -231,12 +231,12 @@ intc_ex: interrupt-controller@e61c0000 {
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			reg = <0 0xe61c0000 0 0x200>;
-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
 			power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
 			resets = <&cpg 407>;
@@ -365,8 +365,8 @@ usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a77995-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65a0000 0 0x100>;
-			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
 			power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
@@ -379,8 +379,8 @@ usb_dmac1: dma-controller@e65b0000 {
 			compatible = "renesas,r8a77995-usb-dmac",
 				     "renesas,usb-dmac";
 			reg = <0 0xe65b0000 0 0x100>;
-			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
 			power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
@@ -418,15 +418,15 @@ dmac0: dma-controller@e6700000 {
 			compatible = "renesas,dmac-r8a77995",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe6700000 0 0x10000>;
-			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7";
@@ -446,15 +446,15 @@ dmac1: dma-controller@e7300000 {
 			compatible = "renesas,dmac-r8a77995",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7300000 0 0x10000>;
-			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7";
@@ -474,15 +474,15 @@ dmac2: dma-controller@e7310000 {
 			compatible = "renesas,dmac-r8a77995",
 				     "renesas,rcar-dmac";
 			reg = <0 0xe7310000 0 0x10000>;
-			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
-				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "error",
 					"ch0", "ch1", "ch2", "ch3",
 					"ch4", "ch5", "ch6", "ch7";
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index 21e0105..98bbcaf 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -232,8 +232,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vcc_sdhi3: regulator-vcc-sdhi3 {
@@ -256,8 +255,7 @@ vccq_sdhi3: regulator-vccq-sdhi3 {
 
 		gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	vga {
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index 3ef8917..ff88af8 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -120,8 +120,7 @@ vccq_sdhi0: regulator-vccq-sdhi0 {
 
 		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
 		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
+		states = <3300000 1>, <1800000 0>;
 	};
 
 	x12_clk: x12 {
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 48fb631..60d9437 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -33,6 +33,8 @@
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc-mezzanine.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rock-pi-4.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rock960.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rockpro64-v2.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rockpro64.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire-excavator.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399pro-rock-pi-n10.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30-evb.dts b/arch/arm64/boot/dts/rockchip/px30-evb.dts
index 936ed7d..0a68025 100644
--- a/arch/arm64/boot/dts/rockchip/px30-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-evb.dts
@@ -112,6 +112,38 @@ &display_subsystem {
 	status = "okay";
 };
 
+&dsi {
+	status = "okay";
+
+	ports {
+		mipi_out: port@1 {
+			reg = <1>;
+
+			mipi_out_panel: endpoint {
+				remote-endpoint = <&mipi_in_panel>;
+			};
+		};
+	};
+
+	panel@0 {
+		compatible = "xinpeng,xpp055c272";
+		reg = <0>;
+		backlight = <&backlight>;
+		iovcc-supply = <&vcc_1v8>;
+		vci-supply = <&vcc3v3_lcd>;
+
+		port {
+			mipi_in_panel: endpoint {
+				remote-endpoint = <&mipi_out_panel>;
+			};
+		};
+	};
+};
+
+&dsi_dphy {
+	status = "okay";
+};
+
 &emmc {
 	bus-width = <8>;
 	cap-mmc-highspeed;
@@ -132,6 +164,11 @@ &gmac {
 	status = "okay";
 };
 
+&gpu {
+	mali-supply = <&vdd_log>;
+	status = "okay";
+};
+
 &i2c0 {
 	status = "okay";
 
@@ -485,6 +522,12 @@ &sdio {
 	status = "okay";
 };
 
+&tsadc {
+	rockchip,hw-tshut-mode = <1>;
+	rockchip,hw-tshut-polarity = <1>;
+	status = "okay";
+};
+
 &u2phy {
 	status = "okay";
 
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index 8812b70..75908c5 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -10,6 +10,7 @@
 #include <dt-bindings/pinctrl/rockchip.h>
 #include <dt-bindings/power/px30-power.h>
 #include <dt-bindings/soc/rockchip,boot-mode.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	compatible = "rockchip,px30";
@@ -113,16 +114,11 @@ cpu0_opp_table: cpu0-opp-table {
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp-408000000 {
-			opp-hz = /bits/ 64 <408000000>;
-			opp-microvolt = <950000 950000 1350000>;
-			clock-latency-ns = <40000>;
-			opp-suspend;
-		};
 		opp-600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			opp-microvolt = <950000 950000 1350000>;
 			clock-latency-ns = <40000>;
+			opp-suspend;
 		};
 		opp-816000000 {
 			opp-hz = /bits/ 64 <816000000>;
@@ -181,6 +177,55 @@ timer {
 			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 	};
 
+	thermal_zones: thermal-zones {
+		soc_thermal: soc-thermal {
+			polling-delay-passive = <20>;
+			polling-delay = <1000>;
+			sustainable-power = <750>;
+			thermal-sensors = <&tsadc 0>;
+
+			trips {
+				threshold: trip-point-0 {
+					temperature = <70000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				target: trip-point-1 {
+					temperature = <85000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				soc_crit: soc-crit {
+					temperature = <115000>;
+					hysteresis = <2000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&target>;
+					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+					contribution = <4096>;
+				};
+
+				map1 {
+					trip = <&target>;
+					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+					contribution = <4096>;
+				};
+			};
+		};
+
+		gpu_thermal: gpu-thermal {
+			polling-delay-passive = <100>; /* milliseconds */
+			polling-delay = <1000>; /* milliseconds */
+			thermal-sensors = <&tsadc 1>;
+		};
+	};
+
 	xin24m: xin24m {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
@@ -365,6 +410,33 @@ io_domains: io-domains {
 			compatible = "rockchip,px30-io-voltage-domain";
 			status = "disabled";
 		};
+
+		lvds: lvds {
+			compatible = "rockchip,px30-lvds";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			phys = <&dsi_dphy>;
+			phy-names = "dphy";
+			rockchip,grf = <&grf>;
+			rockchip,output = "lvds";
+			status = "disabled";
+
+			port@0 {
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				lvds_vopb_in: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&vopb_out_lvds>;
+				};
+
+				lvds_vopl_in: endpoint@1 {
+					reg = <1>;
+					remote-endpoint = <&vopl_out_lvds>;
+				};
+			};
+		};
 	};
 
 	uart1: serial@ff158000 {
@@ -645,6 +717,26 @@ dmac: dmac@ff240000 {
 		};
 	};
 
+	tsadc: tsadc@ff280000 {
+		compatible = "rockchip,px30-tsadc";
+		reg = <0x0 0xff280000 0x0 0x100>;
+		interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+		assigned-clocks = <&cru SCLK_TSADC>;
+		assigned-clock-rates = <50000>;
+		clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+		clock-names = "tsadc", "apb_pclk";
+		resets = <&cru SRST_TSADC>;
+		reset-names = "tsadc-apb";
+		rockchip,grf = <&grf>;
+		rockchip,hw-tshut-temp = <120000>;
+		pinctrl-names = "init", "default", "sleep";
+		pinctrl-0 = <&tsadc_otp_gpio>;
+		pinctrl-1 = <&tsadc_otp_out>;
+		pinctrl-2 = <&tsadc_otp_gpio>;
+		#thermal-sensor-cells = <1>;
+		status = "disabled";
+	};
+
 	saradc: saradc@ff288000 {
 		compatible = "rockchip,px30-saradc", "rockchip,rk3399-saradc";
 		reg = <0x0 0xff288000 0x0 0x100>;
@@ -755,6 +847,18 @@ u2phy_otg: otg-port {
 		};
 	};
 
+	dsi_dphy: phy@ff2e0000 {
+		compatible = "rockchip,px30-dsi-dphy";
+		reg = <0x0 0xff2e0000 0x0 0x10000>;
+		clocks = <&pmucru SCLK_MIPIDSIPHY_REF>, <&cru PCLK_MIPIDSIPHY>;
+		clock-names = "ref", "pclk";
+		resets = <&cru SRST_MIPIDSIPHY_P>;
+		reset-names = "apb";
+		#phy-cells = <0>;
+		power-domains = <&power PX30_PD_VO>;
+		status = "disabled";
+	};
+
 	usb20_otg: usb@ff300000 {
 		compatible = "rockchip,px30-usb", "rockchip,rk3066-usb",
 			     "snps,dwc2";
@@ -820,13 +924,13 @@ gmac: ethernet@ff360000 {
 		status = "disabled";
 	};
 
-	sdmmc: dwmmc@ff370000 {
+	sdmmc: mmc@ff370000 {
 		compatible = "rockchip,px30-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff370000 0x0 0x4000>;
 		interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
 		max-frequency = <150000000>;
 		pinctrl-names = "default";
@@ -835,13 +939,13 @@ sdmmc: dwmmc@ff370000 {
 		status = "disabled";
 	};
 
-	sdio: dwmmc@ff380000 {
+	sdio: mmc@ff380000 {
 		compatible = "rockchip,px30-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff380000 0x0 0x4000>;
 		interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
 			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
 		max-frequency = <150000000>;
 		pinctrl-names = "default";
@@ -850,13 +954,13 @@ sdio: dwmmc@ff380000 {
 		status = "disabled";
 	};
 
-	emmc: dwmmc@ff390000 {
+	emmc: mmc@ff390000 {
 		compatible = "rockchip,px30-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff390000 0x0 0x4000>;
 		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
 		max-frequency = <150000000>;
 		pinctrl-names = "default";
@@ -865,6 +969,57 @@ emmc: dwmmc@ff390000 {
 		status = "disabled";
 	};
 
+	gpu: gpu@ff400000 {
+		compatible = "rockchip,px30-mali", "arm,mali-bifrost";
+		reg = <0x0 0xff400000 0x0 0x4000>;
+		interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "job", "mmu", "gpu";
+		clocks = <&cru SCLK_GPU>;
+		#cooling-cells = <2>;
+		power-domains = <&power PX30_PD_GPU>;
+		status = "disabled";
+	};
+
+	dsi: dsi@ff450000 {
+		compatible = "rockchip,px30-mipi-dsi";
+		reg = <0x0 0xff450000 0x0 0x10000>;
+		interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru PCLK_MIPI_DSI>;
+		clock-names = "pclk";
+		phys = <&dsi_dphy>;
+		phy-names = "dphy";
+		power-domains = <&power PX30_PD_VO>;
+		resets = <&cru SRST_MIPIDSI_HOST_P>;
+		reset-names = "apb";
+		rockchip,grf = <&grf>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				dsi_in_vopb: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&vopb_out_dsi>;
+				};
+
+				dsi_in_vopl: endpoint@1 {
+					reg = <1>;
+					remote-endpoint = <&vopl_out_dsi>;
+				};
+			};
+		};
+	};
+
 	vopb: vop@ff460000 {
 		compatible = "rockchip,px30-vop-big";
 		reg = <0x0 0xff460000 0x0 0xefc>;
@@ -882,6 +1037,16 @@ vopb: vop@ff460000 {
 		vopb_out: port {
 			#address-cells = <1>;
 			#size-cells = <0>;
+
+			vopb_out_dsi: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&dsi_in_vopb>;
+			};
+
+			vopb_out_lvds: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&lvds_vopb_in>;
+			};
 		};
 	};
 
@@ -914,6 +1079,16 @@ vopl: vop@ff470000 {
 		vopl_out: port {
 			#address-cells = <1>;
 			#size-cells = <0>;
+
+			vopl_out_dsi: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&dsi_in_vopl>;
+			};
+
+			vopl_out_lvds: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&lvds_vopl_in>;
+			};
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index 8bdc66c..116f190 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -584,14 +584,14 @@ spdif_tx: spdif-tx@ff3a0000 {
 		status = "disabled";
 	};
 
-	sdmmc: dwmmc@ff480000 {
+	sdmmc: mmc@ff480000 {
 		compatible = "rockchip,rk3308-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff480000 0x0 0x4000>;
 		interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
 		bus-width = <4>;
 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
 		max-frequency = <150000000>;
 		pinctrl-names = "default";
@@ -599,27 +599,27 @@ sdmmc: dwmmc@ff480000 {
 		status = "disabled";
 	};
 
-	emmc: dwmmc@ff490000 {
+	emmc: mmc@ff490000 {
 		compatible = "rockchip,rk3308-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff490000 0x0 0x4000>;
 		interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
 		bus-width = <8>;
 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
 		max-frequency = <150000000>;
 		status = "disabled";
 	};
 
-	sdio: dwmmc@ff4a0000 {
+	sdio: mmc@ff4a0000 {
 		compatible = "rockchip,rk3308-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff4a0000 0x0 0x4000>;
 		interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
 		bus-width = <4>;
 		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
 			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
-		clock-names = "biu", "ciu", "ciu-drv", "ciu-sample";
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
 		max-frequency = <150000000>;
 		pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 91306eb..1f53ead 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -41,6 +41,7 @@ cpu0: cpu@0 {
 			reg = <0x0 0x0>;
 			clocks = <&cru ARMCLK>;
 			#cooling-cells = <2>;
+			cpu-idle-states = <&CPU_SLEEP>;
 			dynamic-power-coefficient = <120>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
@@ -53,6 +54,7 @@ cpu1: cpu@1 {
 			reg = <0x0 0x1>;
 			clocks = <&cru ARMCLK>;
 			#cooling-cells = <2>;
+			cpu-idle-states = <&CPU_SLEEP>;
 			dynamic-power-coefficient = <120>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
@@ -65,6 +67,7 @@ cpu2: cpu@2 {
 			reg = <0x0 0x2>;
 			clocks = <&cru ARMCLK>;
 			#cooling-cells = <2>;
+			cpu-idle-states = <&CPU_SLEEP>;
 			dynamic-power-coefficient = <120>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
@@ -77,12 +80,26 @@ cpu3: cpu@3 {
 			reg = <0x0 0x3>;
 			clocks = <&cru ARMCLK>;
 			#cooling-cells = <2>;
+			cpu-idle-states = <&CPU_SLEEP>;
 			dynamic-power-coefficient = <120>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
 			operating-points-v2 = <&cpu0_opp_table>;
 		};
 
+		idle-states {
+			entry-method = "psci";
+
+			CPU_SLEEP: cpu-sleep {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x0010000>;
+				entry-latency-us = <120>;
+				exit-latency-us = <250>;
+				min-residency-us = <900>;
+			};
+		};
+
 		l2: l2-cache0 {
 			compatible = "cache";
 		};
@@ -837,7 +854,7 @@ u2phy_host: host-port {
 		};
 	};
 
-	sdmmc: dwmmc@ff500000 {
+	sdmmc: mmc@ff500000 {
 		compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff500000 0x0 0x4000>;
 		interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
@@ -849,7 +866,7 @@ sdmmc: dwmmc@ff500000 {
 		status = "disabled";
 	};
 
-	sdio: dwmmc@ff510000 {
+	sdio: mmc@ff510000 {
 		compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff510000 0x0 0x4000>;
 		interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
@@ -861,7 +878,7 @@ sdio: dwmmc@ff510000 {
 		status = "disabled";
 	};
 
-	emmc: dwmmc@ff520000 {
+	emmc: mmc@ff520000 {
 		compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff520000 0x0 0x4000>;
 		interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
index 8251f3c..cbde279 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
@@ -83,12 +83,6 @@ &spi2 {
 	status = "okay";
 };
 
-&uart0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
-	status = "okay";
-};
-
 &usb_otg {
 	dr_mode = "otg";
 	status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index fd86188..a0df61c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -204,7 +204,7 @@ xin24m: oscillator {
 		#clock-cells = <0>;
 	};
 
-	sdmmc: dwmmc@ff0c0000 {
+	sdmmc: mmc@ff0c0000 {
 		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff0c0000 0x0 0x4000>;
 		max-frequency = <150000000>;
@@ -218,7 +218,7 @@ sdmmc: dwmmc@ff0c0000 {
 		status = "disabled";
 	};
 
-	sdio0: dwmmc@ff0d0000 {
+	sdio0: mmc@ff0d0000 {
 		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff0d0000 0x0 0x4000>;
 		max-frequency = <150000000>;
@@ -232,7 +232,7 @@ sdio0: dwmmc@ff0d0000 {
 		status = "disabled";
 	};
 
-	emmc: dwmmc@ff0f0000 {
+	emmc: mmc@ff0f0000 {
 		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff0f0000 0x0 0x4000>;
 		max-frequency = <150000000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index c706db0..d63faf3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -206,7 +206,7 @@ vdd_log: vdd-log {
 		regulator-name = "vdd_log";
 		regulator-always-on;
 		regulator-boot-on;
-		regulator-min-microvolt = <800000>;
+		regulator-min-microvolt = <430000>;
 		regulator-max-microvolt = <1400000>;
 		vin-supply = <&vcc_sys>;
 	};
@@ -660,7 +660,6 @@ &sdio0 {
 	keep-power-in-suspend;
 	mmc-pwrseq = <&sdio_pwrseq>;
 	non-removable;
-	num-slots = <1>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
 	sd-uhs-sdr104;
@@ -669,9 +668,12 @@ &sdio0 {
 	vqmmc-supply = &vcc1v8_s3;	/* IO line */
 	vmmc-supply = &vcc_sdio;	/* card's power */
 
+	#address-cells = <1>;
+	#size-cells = <0>;
 	status = "okay";
 
 	brcmf: wifi@1 {
+		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&gpio0>;
 		interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index 9dd3b17..e6c1c94 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -18,7 +18,7 @@ / {
 		     "google,bob", "google,gru", "rockchip,rk3399";
 
 	edp_panel: edp-panel {
-		compatible = "boe,nv101wxmn51", "simple-panel";
+		compatible = "boe,nv101wxmn51";
 		backlight = <&backlight>;
 		power-supply = <&pp3300_disp>;
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
index b806686..2bbef9f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -39,7 +39,7 @@ p3_3v_dig: p3-3v-dig {
 	};
 
 	edp_panel: edp-panel {
-		compatible = "sharp,lq123p1jx31", "simple-panel";
+		compatible = "sharp,lq123p1jx31";
 		backlight = <&backlight>;
 		power-supply = <&pp3300_disp>;
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
index c133e8d..d69a613 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
@@ -556,7 +556,6 @@ &saradc {
 &sdmmc {
 	clock-frequency = <150000000>;
 	clock-freq-min-max = <200000 150000000>;
-	supports-sd;
 	bus-width = <4>;
 	cap-mmc-highspeed;
 	cap-sd-highspeed;
@@ -572,7 +571,6 @@ &sdhci {
 	bus-width = <8>;
 	mmc-hs400-1_8v;
 	mmc-hs400-enhanced-strobe;
-	supports-emmc;
 	non-removable;
 	keep-power-in-suspend;
 	status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
index 4944d78a..e87a044 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
@@ -654,9 +654,12 @@ &sdio0 {
 	sd-uhs-sdr104;
 	vqmmc-supply = <&vcc1v8_s3>;
 	vmmc-supply = <&vccio_sd>;
+	#address-cells = <1>;
+	#size-cells = <0>;
 	status = "okay";
 
 	brcmf: wifi@1 {
+		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&gpio0>;
 		interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
index 2a12798..e0d7561 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
@@ -94,31 +94,9 @@ map3 {
 	};
 };
 
-&gpu_thermal {
-	trips {
-		gpu_warm: gpu_warm {
-			temperature = <55000>;
-			hysteresis = <2000>;
-			type = "active";
-		};
-
-		gpu_hot: gpu_hot {
-			temperature = <65000>;
-			hysteresis = <2000>;
-			type = "active";
-		};
-	};
-	cooling-maps {
-		map1 {
-			trip = <&gpu_warm>;
-			cooling-device = <&fan THERMAL_NO_LIMIT 1>;
-		};
-
-		map2 {
-			trip = <&gpu_hot>;
-			cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
-		};
-	};
+&pcie0 {
+	num-lanes = <4>;
+	vpcie3v3-supply = <&vcc3v3_sys>;
 };
 
 &pinctrl {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index b788ae4..c88018a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -48,7 +48,7 @@ vcc5v0_sys: vcc5v0-sys {
 	};
 
 	/* switched by pmic_sleep */
-	vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
+	vcc1v8_s3: vcc1v8-s3 {
 		compatible = "regulator-fixed";
 		regulator-always-on;
 		regulator-boot-on;
@@ -71,6 +71,27 @@ vcc3v0_sd: vcc3v0-sd {
 		vin-supply = <&vcc3v3_sys>;
 	};
 
+	/*
+	 * Really, this is supplied by vcc_1v8, and vcc1v8_s3 only
+	 * drives the enable pin, but we can't quite model that.
+	 */
+	vcca0v9_s3: vcca0v9-s3 {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <900000>;
+		regulator-max-microvolt = <900000>;
+		regulator-name = "vcca0v9_s3";
+		vin-supply = <&vcc1v8_s3>;
+	};
+
+	/* As above, actually supplied by vcc3v3_sys */
+	vcca1v8_s3: vcca1v8-s3 {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-name = "vcca1v8_s3";
+		vin-supply = <&vcc1v8_s3>;
+	};
+
 	vbus_typec: vbus-typec {
 		compatible = "regulator-fixed";
 		regulator-min-microvolt = <5000000>;
@@ -485,7 +506,9 @@ &pcie_phy {
 &pcie0 {
 	ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>;
 	max-link-speed = <2>;
-	num-lanes = <4>;
+	num-lanes = <2>;
+	vpcie0v9-supply = <&vcca0v9_s3>;
+	vpcie1v8-supply = <&vcca1v8_s3>;
 	status = "okay";
 };
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
index 0541dfc..9c659f3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
@@ -648,9 +648,12 @@ &sdio0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
 	sd-uhs-sdr104;
+	#address-cells = <1>;
+	#size-cells = <0>;
 	status = "okay";
 
 	brcmf: wifi@1 {
+		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&gpio0>;
 		interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts
index d6b3042..2acb3d5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts
@@ -32,8 +32,6 @@ vcc3v3_pcie: vcc3v3-pcie {
 		gpio = <&gpio1 RK_PC1 GPIO_ACTIVE_HIGH>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&vcc3v3_pcie_en>;
-		regulator-always-on;
-		regulator-boot-on;
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
 		vin-supply = <&dc_12v>;
@@ -50,6 +48,8 @@ &pcie0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pcie_perst>;
 	vpcie3v3-supply = <&vcc3v3_pcie>;
+	vpcie1v8-supply = <&vcc1v8_pmu>;
+	vpcie0v9-supply = <&vcca_0v9>;
 	status = "okay";
 };
 
@@ -70,3 +70,24 @@ pcie_perst: pcie-perst {
 		};
 	};
 };
+
+&sdio0 {
+	bus-width = <4>;
+	cap-sd-highspeed;
+	cap-sdio-irq;
+	keep-power-in-suspend;
+	mmc-pwrseq = <&sdio_pwrseq>;
+	non-removable;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
+	sd-uhs-sdr104;
+	vmmc-supply = <&vcc3v3_ngff>;
+	vqmmc-supply = <&vcc_1v8>;
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
index 7e07dae..9f225e9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
@@ -110,20 +110,6 @@ vcc_vbus_typec0: vcc-vbus-typec0 {
 		regulator-max-microvolt = <5000000>;
 	};
 
-	/*
-	 * should be placed inside mp8859, but not until mp8859 has
-	 * its own dt-binding.
-	 */
-	dc_12v: mp8859-dcdc1 {
-		compatible = "regulator-fixed";
-		regulator-name = "dc_12v";
-		regulator-always-on;
-		regulator-boot-on;
-		regulator-min-microvolt = <12000000>;
-		regulator-max-microvolt = <12000000>;
-		vin-supply = <&vcc_vbus_typec0>;
-	};
-
 	/* switched by pmic_sleep */
 	vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
 		compatible = "regulator-fixed";
@@ -135,6 +121,19 @@ vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
 		vin-supply = <&vcc_1v8>;
 	};
 
+	vcc3v0_sd: vcc3v0-sd {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio4 RK_PD6 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&vcc3v0_sd_en>;
+		regulator-name = "vcc3v0_sd";
+		regulator-boot-on;
+		regulator-min-microvolt = <3000000>;
+		regulator-max-microvolt = <3000000>;
+		vin-supply = <&vcc3v3_sys>;
+	};
+
 	vcc3v3_sys: vcc3v3-sys {
 		compatible = "regulator-fixed";
 		regulator-name = "vcc3v3_sys";
@@ -145,6 +144,16 @@ vcc3v3_sys: vcc3v3-sys {
 		vin-supply = <&dc_12v>;
 	};
 
+	vcca_0v9: vcca-0v9 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcca_0v9";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <900000>;
+		regulator-max-microvolt = <900000>;
+		vin-supply = <&vcc3v3_sys>;
+	};
+
 	/* Actually 3 regulators (host0, 1, 2) controlled by the same gpio */
 	vcc5v0_host: vcc5v0-host-regulator {
 		compatible = "regulator-fixed";
@@ -153,7 +162,6 @@ vcc5v0_host: vcc5v0-host-regulator {
 		pinctrl-names = "default";
 		pinctrl-0 = <&vcc5v0_host_en &hub_rst>;
 		regulator-name = "vcc5v0_host";
-		regulator-always-on;
 		vin-supply = <&vcc_sys>;
 	};
 
@@ -175,7 +183,6 @@ vcc_sys: vcc-sys {
 		pinctrl-names = "default";
 		pinctrl-0 = <&vcc_sys_en>;
 		regulator-name = "vcc_sys";
-		regulator-always-on;
 		regulator-boot-on;
 		regulator-min-microvolt = <5000000>;
 		regulator-max-microvolt = <5000000>;
@@ -188,9 +195,9 @@ vdd_log: vdd-log {
 		regulator-name = "vdd_log";
 		regulator-always-on;
 		regulator-boot-on;
-		regulator-min-microvolt = <800000>;
+		regulator-min-microvolt = <450000>;
 		regulator-max-microvolt = <1400000>;
-		vin-supply = <&vcc3v3_sys>;
+		pwm-supply = <&vcc3v3_sys>;
 	};
 };
 
@@ -238,6 +245,11 @@ &gmac {
 	status = "okay";
 };
 
+&gpu {
+	mali-supply = <&vdd_gpu>;
+	status = "okay";
+};
+
 &hdmi {
 	ddc-i2c-bus = <&i2c3>;
 	pinctrl-names = "default";
@@ -245,6 +257,10 @@ &hdmi {
 	status = "okay";
 };
 
+&hdmi_sound {
+	status = "okay";
+};
+
 &i2c0 {
 	clock-frequency = <400000>;
 	i2c-scl-rising-time-ns = <168>;
@@ -360,7 +376,6 @@ regulator-state-mem {
 
 			vcc_sdio: LDO_REG4 {
 				regulator-name = "vcc_sdio";
-				regulator-always-on;
 				regulator-boot-on;
 				regulator-min-microvolt = <1800000>;
 				regulator-max-microvolt = <3000000>;
@@ -465,8 +480,6 @@ vdd_gpu: regulator@41 {
 		regulator-min-microvolt = <712500>;
 		regulator-max-microvolt = <1500000>;
 		regulator-ramp-delay = <1000>;
-		regulator-always-on;
-		regulator-boot-on;
 		vin-supply = <&vcc3v3_sys>;
 
 		regulator-state-mem {
@@ -519,6 +532,24 @@ fusb0: usb-typec@22 {
 		vbus-supply = <&vcc_vbus_typec0>;
 		status = "okay";
 	};
+
+	mp8859: regulator@66 {
+		compatible = "mps,mp8859";
+		reg = <0x66>;
+		dc_12v: mp8859_dcdc {
+			regulator-name = "dc_12v";
+			regulator-min-microvolt = <12000000>;
+			regulator-max-microvolt = <12000000>;
+			regulator-always-on;
+			regulator-boot-on;
+			vin-supply = <&vcc_vbus_typec0>;
+
+			regulator-state-mem {
+				regulator-on-in-suspend;
+				regulator-suspend-microvolt = <12000000>;
+			};
+		};
+	};
 };
 
 &i2s0 {
@@ -559,7 +590,7 @@ pwr_key_l: pwr-key-l {
 
 	lcd-panel {
 		lcd_panel_reset: lcd-panel-reset {
-			rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>;
+			rockchip,pins = <4 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>;
 		};
 	};
 
@@ -593,6 +624,12 @@ wifi_enable_h: wifi-enable-h {
 		};
 	};
 
+	sdmmc {
+		vcc3v0_sd_en: vcc3v0-sd-en {
+			rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
 	pmic {
 		pmic_int_l: pmic-int-l {
 			rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
@@ -645,24 +682,34 @@ &saradc {
 
 &sdmmc {
 	bus-width = <4>;
-	cap-mmc-highspeed;
 	cap-sd-highspeed;
 	cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
 	disable-wp;
 	max-frequency = <150000000>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
+	sd-uhs-sdr104;
+	vmmc-supply = <&vcc3v0_sd>;
+	vqmmc-supply = <&vcc_sdio>;
 	status = "okay";
 };
 
 &sdhci {
 	bus-width = <8>;
-	mmc-hs400-1_8v;
-	mmc-hs400-enhanced-strobe;
 	non-removable;
 	status = "okay";
 };
 
+&spi1 {
+	status = "okay";
+
+	flash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <10000000>;
+	};
+};
+
 &tcphy0 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
index 188d9df..3923ec0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
@@ -53,6 +53,16 @@ vcc5v0_sys: vcc-sys {
 		vin-supply = <&vcc12v_dcin>;
 	};
 
+	vcc_0v9: vcc-0v9 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_0v9";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <900000>;
+		regulator-max-microvolt = <900000>;
+		vin-supply = <&vcc3v3_sys>;
+	};
+
 	vcc3v3_pcie: vcc3v3-pcie-regulator {
 		compatible = "regulator-fixed";
 		enable-active-high;
@@ -463,6 +473,22 @@ &pmu_io_domains {
 	pmu1830-supply = <&vcc_3v0>;
 };
 
+&pcie_phy {
+	status = "okay";
+};
+
+&pcie0 {
+	ep-gpios = <&gpio4 RK_PD3 GPIO_ACTIVE_HIGH>;
+	max-link-speed = <2>;
+	num-lanes = <4>;
+	pinctrl-0 = <&pcie_clkreqnb_cpm>;
+	pinctrl-names = "default";
+	vpcie0v9-supply = <&vcc_0v9>;
+	vpcie1v8-supply = <&vcc_1v8>;
+	vpcie3v3-supply = <&vcc3v3_pcie>;
+	status = "okay";
+};
+
 &pinctrl {
 	bt {
 		bt_enable_h: bt-enable-h {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index c7d48d4..b69f0f2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -76,6 +76,15 @@ vcc5v0_host: vcc5v0-host-regulator {
 		regulator-always-on;
 		vin-supply = <&vcc5v0_sys>;
 	};
+
+	vcc_0v9: vcc-0v9 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_0v9";
+		regulator-always-on;
+		regulator-min-microvolt = <900000>;
+		regulator-max-microvolt = <900000>;
+		vin-supply = <&vcc3v3_sys>;
+	};
 };
 
 &cpu_l0 {
@@ -384,6 +393,8 @@ &pcie0 {
 	num-lanes = <4>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pcie_clkreqn_cpm>;
+	vpcie0v9-supply = <&vcc_0v9>;
+	vpcie1v8-supply = <&vcca_1v8>;
 	vpcie3v3-supply = <&vcc3v3_pcie>;
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64-v2.dts b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64-v2.dts
new file mode 100644
index 0000000..304e3c51
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64-v2.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2018 Akash Gajjar <Akash_Gajjar@mentor.com>
+ * Copyright (c) 2019 Katsuhiro Suzuki <katsuhiro@katsuster.net>
+ */
+
+/dts-v1/;
+#include "rk3399-rockpro64.dtsi"
+
+/ {
+	model = "Pine64 RockPro64 v2.0";
+	compatible = "pine64,rockpro64-v2.0", "pine64,rockpro64", "rockchip,rk3399";
+};
+
+&i2c1 {
+	es8316: codec@10 {
+		compatible = "everest,es8316";
+		reg = <0x10>;
+		clocks = <&cru SCLK_I2S_8CH_OUT>;
+		clock-names = "mclk";
+		#sound-dai-cells = <0>;
+
+		port {
+			es8316_p0_0: endpoint {
+				remote-endpoint = <&i2s1_p0_0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
index 7f4b2eb..4b42717 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
@@ -2,481 +2,18 @@
 /*
  * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd.
  * Copyright (c) 2018 Akash Gajjar <Akash_Gajjar@mentor.com>
+ * Copyright (c) 2019 Katsuhiro Suzuki <katsuhiro@katsuster.net>
  */
 
 /dts-v1/;
-#include <dt-bindings/input/linux-event-codes.h>
-#include <dt-bindings/pwm/pwm.h>
-#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
+#include "rk3399-rockpro64.dtsi"
 
 / {
-	model = "Pine64 RockPro64";
-	compatible = "pine64,rockpro64", "rockchip,rk3399";
-
-	chosen {
-		stdout-path = "serial2:1500000n8";
-	};
-
-	clkin_gmac: external-gmac-clock {
-		compatible = "fixed-clock";
-		clock-frequency = <125000000>;
-		clock-output-names = "clkin_gmac";
-		#clock-cells = <0>;
-	};
-
-	gpio-keys {
-		compatible = "gpio-keys";
-		autorepeat;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pwrbtn>;
-
-		power {
-			debounce-interval = <100>;
-			gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
-			label = "GPIO Key Power";
-			linux,code = <KEY_POWER>;
-			wakeup-source;
-		};
-	};
-
-	leds {
-		compatible = "gpio-leds";
-		pinctrl-names = "default";
-		pinctrl-0 = <&work_led_gpio>, <&diy_led_gpio>;
-
-		work-led {
-			label = "work";
-			default-state = "on";
-			gpios = <&gpio0 RK_PB3 GPIO_ACTIVE_HIGH>;
-		};
-
-		diy-led {
-			label = "diy";
-			default-state = "off";
-			gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
-		};
-	};
-
-	fan: pwm-fan {
-		compatible = "pwm-fan";
-		#cooling-cells = <2>;
-		fan-supply = <&vcc12v_dcin>;
-		pwms = <&pwm1 0 50000 0>;
-	};
-
-	sdio_pwrseq: sdio-pwrseq {
-		compatible = "mmc-pwrseq-simple";
-		clocks = <&rk808 1>;
-		clock-names = "ext_clock";
-		pinctrl-names = "default";
-		pinctrl-0 = <&wifi_enable_h>;
-
-		/*
-		 * On the module itself this is one of these (depending
-		 * on the actual card populated):
-		 * - SDIO_RESET_L_WL_REG_ON
-		 * - PDN (power down when low)
-		 */
-		reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
-	};
-
-	sound {
-		compatible = "audio-graph-card";
-		label = "rockchip,rk3399";
-		dais = <&i2s1_p0>;
-	};
-
-	vcc12v_dcin: vcc12v-dcin {
-		compatible = "regulator-fixed";
-		regulator-name = "vcc12v_dcin";
-		regulator-always-on;
-		regulator-boot-on;
-		regulator-min-microvolt = <12000000>;
-		regulator-max-microvolt = <12000000>;
-	};
-
-	/* switched by pmic_sleep */
-	vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
-		compatible = "regulator-fixed";
-		regulator-name = "vcc1v8_s3";
-		regulator-always-on;
-		regulator-boot-on;
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		vin-supply = <&vcc_1v8>;
-	};
-
-	vcc3v3_pcie: vcc3v3-pcie-regulator {
-		compatible = "regulator-fixed";
-		enable-active-high;
-		gpio = <&gpio1 RK_PD0 GPIO_ACTIVE_HIGH>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pcie_pwr_en>;
-		regulator-name = "vcc3v3_pcie";
-		regulator-always-on;
-		regulator-boot-on;
-		vin-supply = <&vcc12v_dcin>;
-	};
-
-	vcc3v3_sys: vcc3v3-sys {
-		compatible = "regulator-fixed";
-		regulator-name = "vcc3v3_sys";
-		regulator-always-on;
-		regulator-boot-on;
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-		vin-supply = <&vcc5v0_sys>;
-	};
-
-	/* Actually 3 regulators (host0, 1, 2) controlled by the same gpio */
-	vcc5v0_host: vcc5v0-host-regulator {
-		compatible = "regulator-fixed";
-		enable-active-high;
-		gpio = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&vcc5v0_host_en>;
-		regulator-name = "vcc5v0_host";
-		regulator-always-on;
-		vin-supply = <&vcc5v0_usb>;
-	};
-
-	vcc5v0_typec: vcc5v0-typec-regulator {
-		compatible = "regulator-fixed";
-		enable-active-high;
-		gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&vcc5v0_typec_en>;
-		regulator-name = "vcc5v0_typec";
-		regulator-always-on;
-		vin-supply = <&vcc5v0_usb>;
-	};
-
-	vcc5v0_sys: vcc5v0-sys {
-		compatible = "regulator-fixed";
-		regulator-name = "vcc5v0_sys";
-		regulator-always-on;
-		regulator-boot-on;
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-		vin-supply = <&vcc12v_dcin>;
-	};
-
-	vcc5v0_usb: vcc5v0-usb {
-		compatible = "regulator-fixed";
-		regulator-name = "vcc5v0_usb";
-		regulator-always-on;
-		regulator-boot-on;
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-		vin-supply = <&vcc12v_dcin>;
-	};
-
-	vdd_log: vdd-log {
-		compatible = "pwm-regulator";
-		pwms = <&pwm2 0 25000 1>;
-		regulator-name = "vdd_log";
-		regulator-always-on;
-		regulator-boot-on;
-		regulator-min-microvolt = <800000>;
-		regulator-max-microvolt = <1700000>;
-		vin-supply = <&vcc5v0_sys>;
-	};
-};
-
-&cpu_l0 {
-	cpu-supply = <&vdd_cpu_l>;
-};
-
-&cpu_l1 {
-	cpu-supply = <&vdd_cpu_l>;
-};
-
-&cpu_l2 {
-	cpu-supply = <&vdd_cpu_l>;
-};
-
-&cpu_l3 {
-	cpu-supply = <&vdd_cpu_l>;
-};
-
-&cpu_b0 {
-	cpu-supply = <&vdd_cpu_b>;
-};
-
-&cpu_b1 {
-	cpu-supply = <&vdd_cpu_b>;
-};
-
-&emmc_phy {
-	status = "okay";
-};
-
-&gmac {
-	assigned-clocks = <&cru SCLK_RMII_SRC>;
-	assigned-clock-parents = <&clkin_gmac>;
-	clock_in_out = "input";
-	phy-supply = <&vcc_lan>;
-	phy-mode = "rgmii";
-	pinctrl-names = "default";
-	pinctrl-0 = <&rgmii_pins>;
-	snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
-	snps,reset-active-low;
-	snps,reset-delays-us = <0 10000 50000>;
-	tx_delay = <0x28>;
-	rx_delay = <0x11>;
-	status = "okay";
-};
-
-&hdmi {
-	ddc-i2c-bus = <&i2c3>;
-	pinctrl-names = "default";
-	pinctrl-0 = <&hdmi_cec>;
-	status = "okay";
-};
-
-&hdmi_sound {
-	status = "okay";
-};
-
-&gpu {
-	mali-supply = <&vdd_gpu>;
-	status = "okay";
-};
-
-&i2c0 {
-	clock-frequency = <400000>;
-	i2c-scl-rising-time-ns = <168>;
-	i2c-scl-falling-time-ns = <4>;
-	status = "okay";
-
-	rk808: pmic@1b {
-		compatible = "rockchip,rk808";
-		reg = <0x1b>;
-		interrupt-parent = <&gpio3>;
-		interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
-		#clock-cells = <1>;
-		clock-output-names = "xin32k", "rk808-clkout2";
-		pinctrl-names = "default";
-		pinctrl-0 = <&pmic_int_l>;
-		rockchip,system-power-controller;
-		wakeup-source;
-
-		vcc1-supply = <&vcc5v0_sys>;
-		vcc2-supply = <&vcc5v0_sys>;
-		vcc3-supply = <&vcc5v0_sys>;
-		vcc4-supply = <&vcc5v0_sys>;
-		vcc6-supply = <&vcc5v0_sys>;
-		vcc7-supply = <&vcc5v0_sys>;
-		vcc8-supply = <&vcc3v3_sys>;
-		vcc9-supply = <&vcc5v0_sys>;
-		vcc10-supply = <&vcc5v0_sys>;
-		vcc11-supply = <&vcc5v0_sys>;
-		vcc12-supply = <&vcc3v3_sys>;
-		vddio-supply = <&vcca_1v8>;
-
-		regulators {
-			vdd_center: DCDC_REG1 {
-				regulator-name = "vdd_center";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <750000>;
-				regulator-max-microvolt = <1350000>;
-				regulator-ramp-delay = <6001>;
-				regulator-state-mem {
-					regulator-off-in-suspend;
-				};
-			};
-
-			vdd_cpu_l: DCDC_REG2 {
-				regulator-name = "vdd_cpu_l";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <750000>;
-				regulator-max-microvolt = <1350000>;
-				regulator-ramp-delay = <6001>;
-				regulator-state-mem {
-					regulator-off-in-suspend;
-				};
-			};
-
-			vcc_ddr: DCDC_REG3 {
-				regulator-name = "vcc_ddr";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-state-mem {
-					regulator-on-in-suspend;
-				};
-			};
-
-			vcc_1v8: DCDC_REG4 {
-				regulator-name = "vcc_1v8";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				regulator-state-mem {
-					regulator-on-in-suspend;
-					regulator-suspend-microvolt = <1800000>;
-				};
-			};
-
-			vcc1v8_dvp: LDO_REG1 {
-				regulator-name = "vcc1v8_dvp";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				regulator-state-mem {
-					regulator-off-in-suspend;
-				};
-			};
-
-			vcc3v0_touch: LDO_REG2 {
-				regulator-name = "vcc3v0_touch";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <3000000>;
-				regulator-max-microvolt = <3000000>;
-				regulator-state-mem {
-					regulator-off-in-suspend;
-				};
-			};
-
-			vcca_1v8: LDO_REG3 {
-				regulator-name = "vcca_1v8";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				regulator-state-mem {
-					regulator-on-in-suspend;
-					regulator-suspend-microvolt = <1800000>;
-				};
-			};
-
-			vcc_sdio: LDO_REG4 {
-				regulator-name = "vcc_sdio";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <3000000>;
-				regulator-state-mem {
-					regulator-on-in-suspend;
-					regulator-suspend-microvolt = <3000000>;
-				};
-			};
-
-			vcca3v0_codec: LDO_REG5 {
-				regulator-name = "vcca3v0_codec";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <3000000>;
-				regulator-max-microvolt = <3000000>;
-				regulator-state-mem {
-					regulator-off-in-suspend;
-				};
-			};
-
-			vcc_1v5: LDO_REG6 {
-				regulator-name = "vcc_1v5";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <1500000>;
-				regulator-max-microvolt = <1500000>;
-				regulator-state-mem {
-					regulator-on-in-suspend;
-					regulator-suspend-microvolt = <1500000>;
-				};
-			};
-
-			vcca1v8_codec: LDO_REG7 {
-				regulator-name = "vcca1v8_codec";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				regulator-state-mem {
-					regulator-off-in-suspend;
-				};
-			};
-
-			vcc_3v0: LDO_REG8 {
-				regulator-name = "vcc_3v0";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-min-microvolt = <3000000>;
-				regulator-max-microvolt = <3000000>;
-				regulator-state-mem {
-					regulator-on-in-suspend;
-					regulator-suspend-microvolt = <3000000>;
-				};
-			};
-
-			vcc3v3_s3: vcc_lan: SWITCH_REG1 {
-				regulator-name = "vcc3v3_s3";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-state-mem {
-					regulator-off-in-suspend;
-				};
-			};
-
-			vcc3v3_s0: SWITCH_REG2 {
-				regulator-name = "vcc3v3_s0";
-				regulator-always-on;
-				regulator-boot-on;
-				regulator-state-mem {
-					regulator-off-in-suspend;
-				};
-			};
-		};
-	};
-
-	vdd_cpu_b: regulator@40 {
-		compatible = "silergy,syr827";
-		reg = <0x40>;
-		fcs,suspend-voltage-selector = <1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&vsel1_gpio>;
-		regulator-name = "vdd_cpu_b";
-		regulator-min-microvolt = <712500>;
-		regulator-max-microvolt = <1500000>;
-		regulator-ramp-delay = <1000>;
-		regulator-always-on;
-		regulator-boot-on;
-		vin-supply = <&vcc5v0_sys>;
-
-		regulator-state-mem {
-			regulator-off-in-suspend;
-		};
-	};
-
-	vdd_gpu: regulator@41 {
-		compatible = "silergy,syr828";
-		reg = <0x41>;
-		fcs,suspend-voltage-selector = <1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&vsel2_gpio>;
-		regulator-name = "vdd_gpu";
-		regulator-min-microvolt = <712500>;
-		regulator-max-microvolt = <1500000>;
-		regulator-ramp-delay = <1000>;
-		regulator-always-on;
-		regulator-boot-on;
-		vin-supply = <&vcc5v0_sys>;
-
-		regulator-state-mem {
-			regulator-off-in-suspend;
-		};
-	};
+	model = "Pine64 RockPro64 v2.1";
+	compatible = "pine64,rockpro64-v2.1", "pine64,rockpro64", "rockchip,rk3399";
 };
 
 &i2c1 {
-	i2c-scl-rising-time-ns = <300>;
-	i2c-scl-falling-time-ns = <15>;
-	status = "okay";
-
 	es8316: codec@11 {
 		compatible = "everest,es8316";
 		reg = <0x11>;
@@ -491,291 +28,3 @@ es8316_p0_0: endpoint {
 		};
 	};
 };
-
-&i2c3 {
-	i2c-scl-rising-time-ns = <450>;
-	i2c-scl-falling-time-ns = <15>;
-	status = "okay";
-};
-
-&i2c4 {
-	i2c-scl-rising-time-ns = <600>;
-	i2c-scl-falling-time-ns = <20>;
-	status = "okay";
-
-	fusb0: typec-portc@22 {
-		compatible = "fcs,fusb302";
-		reg = <0x22>;
-		interrupt-parent = <&gpio1>;
-		interrupts = <RK_PA2 IRQ_TYPE_LEVEL_LOW>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&fusb0_int>;
-		vbus-supply = <&vcc5v0_typec>;
-		status = "okay";
-	};
-};
-
-&i2s0 {
-	rockchip,playback-channels = <8>;
-	rockchip,capture-channels = <8>;
-	status = "okay";
-};
-
-&i2s1 {
-	rockchip,playback-channels = <2>;
-	rockchip,capture-channels = <2>;
-	status = "okay";
-
-	i2s1_p0: port {
-		i2s1_p0_0: endpoint {
-			dai-format = "i2s";
-			mclk-fs = <256>;
-			remote-endpoint = <&es8316_p0_0>;
-		};
-	};
-};
-
-&i2s2 {
-	status = "okay";
-};
-
-&io_domains {
-	status = "okay";
-
-	bt656-supply = <&vcc1v8_dvp>;
-	audio-supply = <&vcc_3v0>;
-	sdmmc-supply = <&vcc_sdio>;
-	gpio1830-supply = <&vcc_3v0>;
-};
-
-&pcie0 {
-	ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
-	num-lanes = <4>;
-	pinctrl-names = "default";
-	pinctrl-0 = <&pcie_perst>;
-	vpcie12v-supply = <&vcc12v_dcin>;
-	vpcie3v3-supply = <&vcc3v3_pcie>;
-	status = "okay";
-};
-
-&pcie_phy {
-	status = "okay";
-};
-
-&pmu_io_domains {
-	pmu1830-supply = <&vcc_3v0>;
-	status = "okay";
-};
-
-&pinctrl {
-	buttons {
-		pwrbtn: pwrbtn {
-			rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
-		};
-	};
-
-	fusb302x {
-		fusb0_int: fusb0-int {
-			rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
-		};
-	};
-
-	leds {
-		work_led_gpio: work_led-gpio {
-			rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
-		};
-
-		diy_led_gpio: diy_led-gpio {
-			rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
-		};
-	};
-
-	pcie {
-		pcie_perst: pcie-perst {
-			rockchip,pins = <2 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
-		};
-
-		pcie_pwr_en: pcie-pwr-en {
-			rockchip,pins = <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
-		};
-	};
-
-	pmic {
-		pmic_int_l: pmic-int-l {
-			rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
-		};
-
-		vsel1_gpio: vsel1-gpio {
-			rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
-		};
-
-		vsel2_gpio: vsel2-gpio {
-			rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
-		};
-	};
-
-	sdio-pwrseq {
-		wifi_enable_h: wifi-enable-h {
-			rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
-		};
-	};
-
-	usb-typec {
-		vcc5v0_typec_en: vcc5v0_typec_en {
-			rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
-		};
-	};
-
-	usb2 {
-		vcc5v0_host_en: vcc5v0-host-en {
-			rockchip,pins = <4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
-		};
-	};
-};
-
-&pwm0 {
-	status = "okay";
-};
-
-&pwm1 {
-	status = "okay";
-};
-
-&pwm2 {
-	status = "okay";
-};
-
-&saradc {
-	vref-supply = <&vcca1v8_s3>;
-	status = "okay";
-};
-
-&sdmmc {
-	bus-width = <4>;
-	cap-sd-highspeed;
-	cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
-	disable-wp;
-	max-frequency = <150000000>;
-	pinctrl-names = "default";
-	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
-	status = "okay";
-};
-
-&sdhci {
-	bus-width = <8>;
-	mmc-hs200-1_8v;
-	non-removable;
-	status = "okay";
-};
-
-&spi1 {
-	status = "okay";
-
-	flash@0 {
-		compatible = "jedec,spi-nor";
-		reg = <0>;
-		spi-max-frequency = <10000000>;
-	};
-};
-
-&tcphy0 {
-	status = "okay";
-};
-
-&tcphy1 {
-	status = "okay";
-};
-
-&tsadc {
-	/* tshut mode 0:CRU 1:GPIO */
-	rockchip,hw-tshut-mode = <1>;
-	/* tshut polarity 0:LOW 1:HIGH */
-	rockchip,hw-tshut-polarity = <1>;
-	status = "okay";
-};
-
-&u2phy0 {
-	status = "okay";
-
-	u2phy0_otg: otg-port {
-		status = "okay";
-	};
-
-	u2phy0_host: host-port {
-		phy-supply = <&vcc5v0_host>;
-		status = "okay";
-	};
-};
-
-&u2phy1 {
-	status = "okay";
-
-	u2phy1_otg: otg-port {
-		status = "okay";
-	};
-
-	u2phy1_host: host-port {
-		phy-supply = <&vcc5v0_host>;
-		status = "okay";
-	};
-};
-
-&uart0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_xfer &uart0_cts>;
-	status = "okay";
-};
-
-&uart2 {
-	status = "okay";
-};
-
-&usb_host0_ehci {
-	status = "okay";
-};
-
-&usb_host0_ohci {
-	status = "okay";
-};
-
-&usb_host1_ehci {
-	status = "okay";
-};
-
-&usb_host1_ohci {
-	status = "okay";
-};
-
-&usbdrd3_0 {
-	status = "okay";
-};
-
-&usbdrd_dwc3_0 {
-	status = "okay";
-	dr_mode = "otg";
-};
-
-&usbdrd3_1 {
-	status = "okay";
-};
-
-&usbdrd_dwc3_1 {
-	status = "okay";
-	dr_mode = "host";
-};
-
-&vopb {
-	status = "okay";
-};
-
-&vopb_mmu {
-	status = "okay";
-};
-
-&vopl {
-	status = "okay";
-};
-
-&vopl_mmu {
-	status = "okay";
-};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
new file mode 100644
index 0000000..9bca258
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2018 Akash Gajjar <Akash_Gajjar@mentor.com>
+ */
+
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "rk3399.dtsi"
+#include "rk3399-opp.dtsi"
+
+/ {
+	chosen {
+		stdout-path = "serial2:1500000n8";
+	};
+
+	clkin_gmac: external-gmac-clock {
+		compatible = "fixed-clock";
+		clock-frequency = <125000000>;
+		clock-output-names = "clkin_gmac";
+		#clock-cells = <0>;
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		autorepeat;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwrbtn>;
+
+		power {
+			debounce-interval = <100>;
+			gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
+			label = "GPIO Key Power";
+			linux,code = <KEY_POWER>;
+			wakeup-source;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&work_led_gpio>, <&diy_led_gpio>;
+
+		work-led {
+			label = "work";
+			default-state = "on";
+			gpios = <&gpio0 RK_PB3 GPIO_ACTIVE_HIGH>;
+		};
+
+		diy-led {
+			label = "diy";
+			default-state = "off";
+			gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	fan: pwm-fan {
+		compatible = "pwm-fan";
+		#cooling-cells = <2>;
+		fan-supply = <&vcc12v_dcin>;
+		pwms = <&pwm1 0 50000 0>;
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&rk808 1>;
+		clock-names = "ext_clock";
+		pinctrl-names = "default";
+		pinctrl-0 = <&wifi_enable_h>;
+		reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+	};
+
+	sound {
+		compatible = "audio-graph-card";
+		label = "rockchip,rk3399";
+		dais = <&i2s1_p0>;
+	};
+
+	vcc12v_dcin: vcc12v-dcin {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc12v_dcin";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
+	};
+
+	/* switched by pmic_sleep */
+	vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc1v8_s3";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vcc_1v8>;
+	};
+
+	vcc3v3_pcie: vcc3v3-pcie-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio1 RK_PD0 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pcie_pwr_en>;
+		regulator-name = "vcc3v3_pcie";
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc12v_dcin>;
+	};
+
+	vcc3v3_sys: vcc3v3-sys {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3_sys";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vcc5v0_sys>;
+	};
+
+	/* Actually 3 regulators (host0, 1, 2) controlled by the same gpio */
+	vcc5v0_host: vcc5v0-host-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&vcc5v0_host_en>;
+		regulator-name = "vcc5v0_host";
+		regulator-always-on;
+		vin-supply = <&vcc5v0_usb>;
+	};
+
+	vcc5v0_typec: vcc5v0-typec-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&vcc5v0_typec_en>;
+		regulator-name = "vcc5v0_typec";
+		regulator-always-on;
+		vin-supply = <&vcc5v0_usb>;
+	};
+
+	vcc5v0_sys: vcc5v0-sys {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc5v0_sys";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&vcc12v_dcin>;
+	};
+
+	vcc5v0_usb: vcc5v0-usb {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc5v0_usb";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&vcc12v_dcin>;
+	};
+
+	vdd_log: vdd-log {
+		compatible = "pwm-regulator";
+		pwms = <&pwm2 0 25000 1>;
+		regulator-name = "vdd_log";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <800000>;
+		regulator-max-microvolt = <1700000>;
+		vin-supply = <&vcc5v0_sys>;
+	};
+};
+
+&cpu_l0 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l1 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l2 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l3 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_b0 {
+	cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_b1 {
+	cpu-supply = <&vdd_cpu_b>;
+};
+
+&emmc_phy {
+	status = "okay";
+};
+
+&gmac {
+	assigned-clocks = <&cru SCLK_RMII_SRC>;
+	assigned-clock-parents = <&clkin_gmac>;
+	clock_in_out = "input";
+	phy-supply = <&vcc_lan>;
+	phy-mode = "rgmii";
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+	snps,reset-active-low;
+	snps,reset-delays-us = <0 10000 50000>;
+	tx_delay = <0x28>;
+	rx_delay = <0x11>;
+	status = "okay";
+};
+
+&hdmi {
+	ddc-i2c-bus = <&i2c3>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&hdmi_cec>;
+	status = "okay";
+};
+
+&hdmi_sound {
+	status = "okay";
+};
+
+&gpu {
+	mali-supply = <&vdd_gpu>;
+	status = "okay";
+};
+
+&i2c0 {
+	clock-frequency = <400000>;
+	i2c-scl-rising-time-ns = <168>;
+	i2c-scl-falling-time-ns = <4>;
+	status = "okay";
+
+	rk808: pmic@1b {
+		compatible = "rockchip,rk808";
+		reg = <0x1b>;
+		interrupt-parent = <&gpio3>;
+		interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+		#clock-cells = <1>;
+		clock-output-names = "xin32k", "rk808-clkout2";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_int_l>;
+		rockchip,system-power-controller;
+		wakeup-source;
+
+		vcc1-supply = <&vcc5v0_sys>;
+		vcc2-supply = <&vcc5v0_sys>;
+		vcc3-supply = <&vcc5v0_sys>;
+		vcc4-supply = <&vcc5v0_sys>;
+		vcc6-supply = <&vcc5v0_sys>;
+		vcc7-supply = <&vcc5v0_sys>;
+		vcc8-supply = <&vcc3v3_sys>;
+		vcc9-supply = <&vcc5v0_sys>;
+		vcc10-supply = <&vcc5v0_sys>;
+		vcc11-supply = <&vcc5v0_sys>;
+		vcc12-supply = <&vcc3v3_sys>;
+		vddio-supply = <&vcca_1v8>;
+
+		regulators {
+			vdd_center: DCDC_REG1 {
+				regulator-name = "vdd_center";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <6001>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vdd_cpu_l: DCDC_REG2 {
+				regulator-name = "vdd_cpu_l";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <6001>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_ddr: DCDC_REG3 {
+				regulator-name = "vcc_ddr";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+				};
+			};
+
+			vcc_1v8: DCDC_REG4 {
+				regulator-name = "vcc_1v8";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1800000>;
+				};
+			};
+
+			vcc1v8_dvp: LDO_REG1 {
+				regulator-name = "vcc1v8_dvp";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc3v0_touch: LDO_REG2 {
+				regulator-name = "vcc3v0_touch";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcca_1v8: LDO_REG3 {
+				regulator-name = "vcca_1v8";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1800000>;
+				};
+			};
+
+			vcc_sdio: LDO_REG4 {
+				regulator-name = "vcc_sdio";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <3000000>;
+				};
+			};
+
+			vcca3v0_codec: LDO_REG5 {
+				regulator-name = "vcca3v0_codec";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_1v5: LDO_REG6 {
+				regulator-name = "vcc_1v5";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1500000>;
+				};
+			};
+
+			vcca1v8_codec: LDO_REG7 {
+				regulator-name = "vcca1v8_codec";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_3v0: LDO_REG8 {
+				regulator-name = "vcc_3v0";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <3000000>;
+				};
+			};
+
+			vcc3v3_s3: vcc_lan: SWITCH_REG1 {
+				regulator-name = "vcc3v3_s3";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc3v3_s0: SWITCH_REG2 {
+				regulator-name = "vcc3v3_s0";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+		};
+	};
+
+	vdd_cpu_b: regulator@40 {
+		compatible = "silergy,syr827";
+		reg = <0x40>;
+		fcs,suspend-voltage-selector = <1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&vsel1_gpio>;
+		regulator-name = "vdd_cpu_b";
+		regulator-min-microvolt = <712500>;
+		regulator-max-microvolt = <1500000>;
+		regulator-ramp-delay = <1000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc5v0_sys>;
+
+		regulator-state-mem {
+			regulator-off-in-suspend;
+		};
+	};
+
+	vdd_gpu: regulator@41 {
+		compatible = "silergy,syr828";
+		reg = <0x41>;
+		fcs,suspend-voltage-selector = <1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&vsel2_gpio>;
+		regulator-name = "vdd_gpu";
+		regulator-min-microvolt = <712500>;
+		regulator-max-microvolt = <1500000>;
+		regulator-ramp-delay = <1000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc5v0_sys>;
+
+		regulator-state-mem {
+			regulator-off-in-suspend;
+		};
+	};
+};
+
+&i2c1 {
+	i2c-scl-rising-time-ns = <300>;
+	i2c-scl-falling-time-ns = <15>;
+	status = "okay";
+};
+
+&i2c3 {
+	i2c-scl-rising-time-ns = <450>;
+	i2c-scl-falling-time-ns = <15>;
+	status = "okay";
+};
+
+&i2c4 {
+	i2c-scl-rising-time-ns = <600>;
+	i2c-scl-falling-time-ns = <20>;
+	status = "okay";
+
+	fusb0: typec-portc@22 {
+		compatible = "fcs,fusb302";
+		reg = <0x22>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <RK_PA2 IRQ_TYPE_LEVEL_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&fusb0_int>;
+		vbus-supply = <&vcc5v0_typec>;
+		status = "okay";
+	};
+};
+
+&i2s0 {
+	rockchip,playback-channels = <8>;
+	rockchip,capture-channels = <8>;
+	status = "okay";
+};
+
+&i2s1 {
+	rockchip,playback-channels = <2>;
+	rockchip,capture-channels = <2>;
+	status = "okay";
+
+	i2s1_p0: port {
+		i2s1_p0_0: endpoint {
+			dai-format = "i2s";
+			mclk-fs = <256>;
+			remote-endpoint = <&es8316_p0_0>;
+		};
+	};
+};
+
+&i2s2 {
+	status = "okay";
+};
+
+&io_domains {
+	status = "okay";
+
+	bt656-supply = <&vcc1v8_dvp>;
+	audio-supply = <&vcc_3v0>;
+	sdmmc-supply = <&vcc_sdio>;
+	gpio1830-supply = <&vcc_3v0>;
+};
+
+&pcie0 {
+	ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
+	num-lanes = <4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_perst>;
+	vpcie12v-supply = <&vcc12v_dcin>;
+	vpcie3v3-supply = <&vcc3v3_pcie>;
+	status = "okay";
+};
+
+&pcie_phy {
+	status = "okay";
+};
+
+&pmu_io_domains {
+	pmu1830-supply = <&vcc_3v0>;
+	status = "okay";
+};
+
+&pinctrl {
+	bt {
+		bt_enable_h: bt-enable-h {
+			rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		bt_host_wake_l: bt-host-wake-l {
+			rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+
+		bt_wake_l: bt-wake-l {
+			rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	buttons {
+		pwrbtn: pwrbtn {
+			rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	fusb302x {
+		fusb0_int: fusb0-int {
+			rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	leds {
+		work_led_gpio: work_led-gpio {
+			rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		diy_led_gpio: diy_led-gpio {
+			rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	pcie {
+		pcie_perst: pcie-perst {
+			rockchip,pins = <2 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		pcie_pwr_en: pcie-pwr-en {
+			rockchip,pins = <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	pmic {
+		pmic_int_l: pmic-int-l {
+			rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+
+		vsel1_gpio: vsel1-gpio {
+			rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+
+		vsel2_gpio: vsel2-gpio {
+			rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+	};
+
+	sdio-pwrseq {
+		wifi_enable_h: wifi-enable-h {
+			rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	usb-typec {
+		vcc5v0_typec_en: vcc5v0_typec_en {
+			rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	usb2 {
+		vcc5v0_host_en: vcc5v0-host-en {
+			rockchip,pins = <4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+&pwm0 {
+	status = "okay";
+};
+
+&pwm1 {
+	status = "okay";
+};
+
+&pwm2 {
+	status = "okay";
+};
+
+&saradc {
+	vref-supply = <&vcca1v8_s3>;
+	status = "okay";
+};
+
+&sdio0 {
+	bus-width = <4>;
+	cap-sd-highspeed;
+	cap-sdio-irq;
+	disable-wp;
+	keep-power-in-suspend;
+	mmc-pwrseq = <&sdio_pwrseq>;
+	non-removable;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
+	sd-uhs-sdr104;
+	status = "okay";
+};
+
+&sdmmc {
+	bus-width = <4>;
+	cap-sd-highspeed;
+	cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
+	disable-wp;
+	max-frequency = <150000000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
+	status = "okay";
+};
+
+&sdhci {
+	bus-width = <8>;
+	mmc-hs200-1_8v;
+	non-removable;
+	status = "okay";
+};
+
+&spi1 {
+	status = "okay";
+
+	flash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <10000000>;
+	};
+};
+
+&tcphy0 {
+	status = "okay";
+};
+
+&tcphy1 {
+	status = "okay";
+};
+
+&tsadc {
+	/* tshut mode 0:CRU 1:GPIO */
+	rockchip,hw-tshut-mode = <1>;
+	/* tshut polarity 0:LOW 1:HIGH */
+	rockchip,hw-tshut-polarity = <1>;
+	status = "okay";
+};
+
+&u2phy0 {
+	status = "okay";
+
+	u2phy0_otg: otg-port {
+		status = "okay";
+	};
+
+	u2phy0_host: host-port {
+		phy-supply = <&vcc5v0_host>;
+		status = "okay";
+	};
+};
+
+&u2phy1 {
+	status = "okay";
+
+	u2phy1_otg: otg-port {
+		status = "okay";
+	};
+
+	u2phy1_host: host-port {
+		phy-supply = <&vcc5v0_host>;
+		status = "okay";
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+	status = "okay";
+
+	bluetooth {
+		compatible = "brcm,bcm43438-bt";
+		clocks = <&rk808 1>;
+		clock-names = "lpo";
+		device-wakeup-gpios = <&gpio2 RK_PD3 GPIO_ACTIVE_HIGH>;
+		host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>;
+		shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>;
+		vbat-supply = <&vcc3v3_sys>;
+		vddio-supply = <&vcc_1v8>;
+	};
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&usb_host0_ehci {
+	status = "okay";
+};
+
+&usb_host0_ohci {
+	status = "okay";
+};
+
+&usb_host1_ehci {
+	status = "okay";
+};
+
+&usb_host1_ohci {
+	status = "okay";
+};
+
+&usbdrd3_0 {
+	status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+	status = "okay";
+	dr_mode = "otg";
+};
+
+&usbdrd3_1 {
+	status = "okay";
+};
+
+&usbdrd_dwc3_1 {
+	status = "okay";
+	dr_mode = "host";
+};
+
+&vopb {
+	status = "okay";
+};
+
+&vopb_mmu {
+	status = "okay";
+};
+
+&vopl {
+	status = "okay";
+};
+
+&vopl_mmu {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
index 808ea77..b4d8f60 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
@@ -84,7 +84,7 @@ backlight: backlight {
 	};
 
 	edp_panel: edp-panel {
-		compatible ="lg,lp079qx1-sp0v", "simple-panel";
+		compatible ="lg,lp079qx1-sp0v";
 		backlight = <&backlight>;
 		enable-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>;
 		pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index e62ea0e..33cc21f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -291,7 +291,7 @@ gmac: ethernet@fe300000 {
 		status = "disabled";
 	};
 
-	sdio0: dwmmc@fe310000 {
+	sdio0: mmc@fe310000 {
 		compatible = "rockchip,rk3399-dw-mshc",
 			     "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xfe310000 0x0 0x4000>;
@@ -307,7 +307,7 @@ sdio0: dwmmc@fe310000 {
 		status = "disabled";
 	};
 
-	sdmmc: dwmmc@fe320000 {
+	sdmmc: mmc@fe320000 {
 		compatible = "rockchip,rk3399-dw-mshc",
 			     "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xfe320000 0x0 0x4000>;
@@ -828,6 +828,14 @@ gpu_crit: gpu_crit {
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&gpu_alert0>;
+					cooling-device =
+						<&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 	};
 
@@ -1887,6 +1895,7 @@ gpu: gpu@ff9a0000 {
 			     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>;
 		interrupt-names = "gpu", "job", "mmu";
 		clocks = <&cru ACLK_GPU>;
+		#cooling-cells = <2>;
 		power-domains = <&power RK3399_PD_GPU>;
 		status = "disabled";
 	};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts b/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts
new file mode 100644
index 0000000..b42f941
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2019 Radxa Limited
+ * Copyright (c) 2019 Amarula Solutions(India)
+ */
+
+/dts-v1/;
+#include "rk3399.dtsi"
+#include "rk3399-opp.dtsi"
+#include "rk3399pro-vmarc-som.dtsi"
+#include <arm/rockchip-radxa-dalang-carrier.dtsi>
+
+/ {
+	model = "Radxa ROCK Pi N10";
+	compatible = "radxa,rockpi-n10", "rockchip,rk3399pro";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
new file mode 100644
index 0000000..0a51633
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2019 Vamrs Limited
+ * Copyright (c) 2019 Amarula Solutions(India)
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+	compatible = "vamrs,rk3399pro-vmarc-som", "rockchip,rk3399pro";
+
+	clkin_gmac: external-gmac-clock {
+		compatible = "fixed-clock";
+		clock-frequency = <125000000>;
+		clock-output-names = "clkin_gmac";
+		#clock-cells = <0>;
+	};
+
+	vcc12v_dcin: vcc12v-dcin-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc12v_dcin";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
+	};
+
+	vcc5v0_sys: vcc5v0-sys-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc5v0_sys";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&vcc12v_dcin>;
+	};
+};
+
+&cpu_l0 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l1 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l2 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l3 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&emmc_phy {
+	status = "okay";
+};
+
+&gmac {
+	assigned-clocks = <&cru SCLK_RMII_SRC>;
+	assigned-clock-parents = <&clkin_gmac>;
+	clock_in_out = "input";
+	phy-supply = <&vcc_lan>;
+	phy-mode = "rgmii";
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+	snps,reset-active-low;
+	snps,reset-delays-us = <0 10000 50000>;
+	tx_delay = <0x28>;
+	rx_delay = <0x11>;
+};
+
+&i2c0 {
+	clock-frequency = <400000>;
+	i2c-scl-rising-time-ns = <180>;
+	i2c-scl-falling-time-ns = <30>;
+	status = "okay";
+
+	rk809: pmic@20 {
+		compatible = "rockchip,rk809";
+		reg = <0x20>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <RK_PC2 IRQ_TYPE_LEVEL_LOW>;
+		#clock-cells = <1>;
+		clock-output-names = "rk808-clkout1", "rk808-clkout2";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_int_l>;
+		rockchip,system-power-controller;
+		wakeup-source;
+
+		vcc1-supply = <&vcc5v0_sys>;
+		vcc2-supply = <&vcc5v0_sys>;
+		vcc3-supply = <&vcc5v0_sys>;
+		vcc4-supply = <&vcc5v0_sys>;
+		vcc5-supply = <&vcc_buck5>;
+		vcc6-supply = <&vcc_buck5>;
+		vcc7-supply = <&vcc5v0_sys>;
+		vcc8-supply = <&vcc3v3_sys>;
+		vcc9-supply = <&vcc5v0_sys>;
+
+		regulators {
+			vdd_log: DCDC_REG1 {
+				regulator-name = "vdd_log";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-initial-mode = <0x2>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+					regulator-suspend-microvolt = <900000>;
+				};
+			};
+
+			vdd_cpu_l: DCDC_REG2 {
+				regulator-name = "vdd_cpu_l";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <6001>;
+				regulator-initial-mode = <0x2>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_ddr: DCDC_REG3 {
+				regulator-name = "vcc_ddr";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-initial-mode = <0x2>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+				};
+			};
+
+			vcc3v3_sys: DCDC_REG4 {
+				regulator-name = "vcc3v3_sys";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-initial-mode = <0x2>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <3300000>;
+				};
+			};
+
+			vcc_buck5: DCDC_REG5 {
+				regulator-name = "vcc_buck5";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <2200000>;
+				regulator-max-microvolt = <2200000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <2200000>;
+				};
+			};
+
+			vcca_0v9: LDO_REG1 {
+				regulator-name = "vcca_0v9";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <900000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_1v8: LDO_REG2 {
+				regulator-name = "vcc_1v8";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1800000>;
+				};
+			};
+
+			vcc_0v9: LDO_REG3 {
+				regulator-name = "vcc_0v9";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <900000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <900000>;
+				};
+			};
+
+			vcca_1v8: LDO_REG4 {
+				regulator-name = "vcca_1v8";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1850000>;
+				regulator-max-microvolt = <1850000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			/*
+			 * As per BSP, but schematic not showing any regulator
+			 * pin for LD05.
+			 */
+			vdd1v5_dvp: LDO_REG5 {
+				regulator-name = "vdd1v5_dvp";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_1v5: LDO_REG6 {
+				regulator-name = "vcc_1v5";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vccio_3v0: LDO_REG7 {
+				regulator-name = "vccio_3v0";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vccio_sd: LDO_REG8 {
+				regulator-name = "vccio_sd";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			/*
+			 * As per BSP, but schematic not showing any regulator
+			 * pin for LD09.
+			 */
+			vcc_sd: LDO_REG9 {
+				regulator-name = "vcc_sd";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc5v0_usb2: SWITCH_REG1 {
+				regulator-name = "vcc5v0_usb2";
+				regulator-min-microvolt = <5000000>;
+				regulator-max-microvolt = <5000000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <5000000>;
+				};
+			};
+
+			vccio_3v3: vcc_lan: SWITCH_REG2 {
+				regulator-name = "vccio_3v3";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+		};
+	};
+};
+
+&io_domains {
+	status = "okay";
+	bt656-supply = <&vcca_1v8>;
+	sdmmc-supply = <&vccio_sd>;
+	gpio1830-supply = <&vccio_3v0>;
+};
+
+&pmu_io_domains {
+	status = "okay";
+	pmu1830-supply = <&vcc_1v8>;
+};
+
+&sdhci {
+	bus-width = <8>;
+	mmc-hs400-1_8v;
+	mmc-hs400-enhanced-strobe;
+	non-removable;
+	status = "okay";
+};
+
+&tsadc {
+	status = "okay";
+	rockchip,hw-tshut-mode = <1>;
+	rockchip,hw-tshut-polarity = <1>;
+};
+
+&pinctrl {
+	pmic {
+		pmic_int_l: pmic-int-l {
+			rockchip,pins =
+				<1 RK_PC2 0 &pcfg_pull_up>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index 8ec40a0..5b18bda 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -633,7 +633,8 @@ nand: nand@68000000 {
 			pinctrl-0 = <&pinctrl_nand>;
 			clock-names = "nand", "nand_x", "ecc";
 			clocks = <&sys_clk 2>, <&sys_clk 3>, <&sys_clk 3>;
-			resets = <&sys_rst 2>;
+			reset-names = "nand", "reg";
+			resets = <&sys_rst 2>, <&sys_rst 2>;
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index b658f2b..f2dc5f6 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -937,7 +937,8 @@ nand: nand@68000000 {
 			pinctrl-0 = <&pinctrl_nand>;
 			clock-names = "nand", "nand_x", "ecc";
 			clocks = <&sys_clk 2>, <&sys_clk 3>, <&sys_clk 3>;
-			resets = <&sys_rst 2>;
+			reset-names = "nand", "reg";
+			resets = <&sys_rst 2>, <&sys_rst 2>;
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index d6f6cee..73e7e12 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -795,7 +795,8 @@ nand: nand@68000000 {
 			pinctrl-0 = <&pinctrl_nand>;
 			clock-names = "nand", "nand_x", "ecc";
 			clocks = <&sys_clk 2>, <&sys_clk 3>, <&sys_clk 3>;
-			resets = <&sys_rst 2>;
+			reset-names = "nand", "reg";
+			resets = <&sys_rst 2>, <&sys_rst 2>;
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/sprd/Makefile b/arch/arm64/boot/dts/sprd/Makefile
index 2bdc238..f4f1f51 100644
--- a/arch/arm64/boot/dts/sprd/Makefile
+++ b/arch/arm64/boot/dts/sprd/Makefile
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_ARCH_SPRD) += sc9836-openphone.dtb \
-			sp9860g-1h10.dtb
+			sp9860g-1h10.dtb	\
+			sp9863a-1h10.dtb
diff --git a/arch/arm64/boot/dts/sprd/sc9863a.dtsi b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
new file mode 100644
index 0000000..cd80756
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Unisoc SC9863A SoC DTS file
+ *
+ * Copyright (C) 2019, Unisoc Inc.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "sharkl3.dtsi"
+
+/ {
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+				core1 {
+					cpu = <&CPU1>;
+				};
+				core2 {
+					cpu = <&CPU2>;
+				};
+				core3 {
+					cpu = <&CPU3>;
+				};
+				core4 {
+					cpu = <&CPU4>;
+				};
+				core5 {
+					cpu = <&CPU5>;
+				};
+				core6 {
+					cpu = <&CPU6>;
+				};
+				core7 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a55";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			cpu-idle-states = <&CORE_PD>;
+		};
+
+		CPU1: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a55";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			cpu-idle-states = <&CORE_PD>;
+		};
+
+		CPU2: cpu@200 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a55";
+			reg = <0x0 0x200>;
+			enable-method = "psci";
+			cpu-idle-states = <&CORE_PD>;
+		};
+
+		CPU3: cpu@300 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a55";
+			reg = <0x0 0x300>;
+			enable-method = "psci";
+			cpu-idle-states = <&CORE_PD>;
+		};
+
+		CPU4: cpu@400 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a55";
+			reg = <0x0 0x400>;
+			enable-method = "psci";
+			cpu-idle-states = <&CORE_PD>;
+		};
+
+		CPU5: cpu@500 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a55";
+			reg = <0x0 0x500>;
+			enable-method = "psci";
+			cpu-idle-states = <&CORE_PD>;
+		};
+
+		CPU6: cpu@600 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a55";
+			reg = <0x0 0x600>;
+			enable-method = "psci";
+			cpu-idle-states = <&CORE_PD>;
+		};
+
+		CPU7: cpu@700 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a55";
+			reg = <0x0 0x700>;
+			enable-method = "psci";
+			cpu-idle-states = <&CORE_PD>;
+		};
+	};
+
+	idle-states {
+		entry-method = "arm,psci";
+		CORE_PD: core-pd {
+			compatible = "arm,idle-state";
+			entry-latency-us = <4000>;
+			exit-latency-us = <4000>;
+			min-residency-us = <10000>;
+			local-timer-stop;
+			arm,psci-suspend-param = <0x00010000>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>, /* Physical Secure PPI */
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>, /* Physical Non-Secure PPI */
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>, /* Virtual PPI */
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>; /* Hipervisor PPI */
+	};
+
+	pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	soc {
+		gic: interrupt-controller@14000000 {
+			compatible = "arm,gic-v3";
+			#interrupt-cells = <3>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			redistributor-stride = <0x0 0x20000>;	/* 128KB stride */
+			#redistributor-regions = <1>;
+			interrupt-controller;
+			reg = <0x0 0x14000000 0 0x20000>,	/* GICD */
+			      <0x0 0x14040000 0 0x100000>;	/* GICR */
+			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		funnel@10001000 {
+			compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+			reg = <0 0x10001000 0 0x1000>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					funnel_soc_out_port: endpoint {
+						remote-endpoint = <&etb_in>;
+					};
+				};
+			};
+
+			in-ports {
+				port {
+					funnel_soc_in_port: endpoint {
+						remote-endpoint =
+						<&funnel_ca55_out_port>;
+					};
+				};
+			};
+		};
+
+		etb@10003000 {
+			compatible = "arm,coresight-tmc", "arm,primecell";
+			reg = <0 0x10003000 0 0x1000>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			in-ports {
+				port {
+					etb_in: endpoint {
+						remote-endpoint =
+						<&funnel_soc_out_port>;
+					};
+				};
+			};
+		};
+
+		funnel@12001000 {
+			compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+			reg = <0 0x12001000 0 0x1000>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					funnel_little_out_port: endpoint {
+						remote-endpoint =
+						<&etf_little_in>;
+					};
+				};
+			};
+
+			in-ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					funnel_little_in_port0: endpoint {
+						remote-endpoint = <&etm0_out>;
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					funnel_little_in_port1: endpoint {
+						remote-endpoint = <&etm1_out>;
+					};
+				};
+
+				port@2 {
+					reg = <2>;
+					funnel_little_in_port2: endpoint {
+						remote-endpoint = <&etm2_out>;
+					};
+				};
+
+				port@3 {
+					reg = <3>;
+					funnel_little_in_port3: endpoint {
+						remote-endpoint = <&etm3_out>;
+					};
+				};
+			};
+		};
+
+		etf@12002000 {
+			compatible = "arm,coresight-tmc", "arm,primecell";
+			reg = <0 0x12002000 0 0x1000>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etf_little_out: endpoint {
+						remote-endpoint =
+						<&funnel_ca55_in_port0>;
+					};
+				};
+			};
+
+			in-port {
+				port {
+					etf_little_in: endpoint {
+						remote-endpoint =
+						<&funnel_little_out_port>;
+					};
+				};
+			};
+		};
+
+		etf@12003000 {
+			compatible = "arm,coresight-tmc", "arm,primecell";
+			reg = <0 0x12003000 0 0x1000>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etf_big_out: endpoint {
+						remote-endpoint =
+						<&funnel_ca55_in_port1>;
+					};
+				};
+			};
+
+			in-ports {
+				port {
+					etf_big_in: endpoint {
+						remote-endpoint =
+						<&funnel_big_out_port>;
+					};
+				};
+			};
+		};
+
+		funnel@12004000 {
+			compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+			reg = <0 0x12004000 0 0x1000>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					funnel_ca55_out_port: endpoint {
+						remote-endpoint =
+						<&funnel_soc_in_port>;
+					};
+				};
+			};
+
+			in-ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					funnel_ca55_in_port0: endpoint {
+						remote-endpoint =
+						<&etf_little_out>;
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					funnel_ca55_in_port1: endpoint {
+						remote-endpoint =
+						<&etf_big_out>;
+					};
+				};
+			};
+		};
+
+		funnel@12005000 {
+			compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+			reg = <0 0x12005000 0 0x1000>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					funnel_big_out_port: endpoint {
+						remote-endpoint =
+						<&etf_big_in>;
+					};
+				};
+			};
+
+			in-ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					funnel_big_in_port0: endpoint {
+						remote-endpoint = <&etm4_out>;
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					funnel_big_in_port1: endpoint {
+						remote-endpoint = <&etm5_out>;
+					};
+				};
+
+				port@2 {
+					reg = <2>;
+					funnel_big_in_port2: endpoint {
+						remote-endpoint = <&etm6_out>;
+					};
+				};
+
+				port@3 {
+					reg = <3>;
+					funnel_big_in_port3: endpoint {
+						remote-endpoint = <&etm7_out>;
+					};
+				};
+			};
+		};
+
+		etm@13040000 {
+			compatible = "arm,coresight-etm4x", "arm,primecell";
+			reg = <0 0x13040000 0 0x1000>;
+			cpu = <&CPU0>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etm0_out: endpoint {
+						remote-endpoint =
+						<&funnel_little_in_port0>;
+					};
+				};
+			};
+		};
+
+		etm@13140000 {
+			compatible = "arm,coresight-etm4x", "arm,primecell";
+			reg = <0 0x13140000 0 0x1000>;
+			cpu = <&CPU1>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etm1_out: endpoint {
+						remote-endpoint =
+						<&funnel_little_in_port1>;
+					};
+				};
+			};
+		};
+
+		etm@13240000 {
+			compatible = "arm,coresight-etm4x", "arm,primecell";
+			reg = <0 0x13240000 0 0x1000>;
+			cpu = <&CPU2>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etm2_out: endpoint {
+						remote-endpoint =
+						<&funnel_little_in_port2>;
+					};
+				};
+			};
+		};
+
+		etm@13340000 {
+			compatible = "arm,coresight-etm4x", "arm,primecell";
+			reg = <0 0x13340000 0 0x1000>;
+			cpu = <&CPU3>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etm3_out: endpoint {
+						remote-endpoint =
+						<&funnel_little_in_port3>;
+					};
+				};
+			};
+		};
+
+		etm@13440000 {
+			compatible = "arm,coresight-etm4x", "arm,primecell";
+			reg = <0 0x13440000 0 0x1000>;
+			cpu = <&CPU4>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etm4_out: endpoint {
+						remote-endpoint =
+						<&funnel_big_in_port0>;
+					};
+				};
+			};
+		};
+
+		etm@13540000 {
+			compatible = "arm,coresight-etm4x", "arm,primecell";
+			reg = <0 0x13540000 0 0x1000>;
+			cpu = <&CPU5>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etm5_out: endpoint {
+						remote-endpoint =
+						<&funnel_big_in_port1>;
+					};
+				};
+			};
+		};
+
+		etm@13640000 {
+			compatible = "arm,coresight-etm4x", "arm,primecell";
+			reg = <0 0x13640000 0 0x1000>;
+			cpu = <&CPU6>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etm6_out: endpoint {
+						remote-endpoint =
+						<&funnel_big_in_port2>;
+					};
+				};
+			};
+		};
+
+		etm@13740000 {
+			compatible = "arm,coresight-etm4x", "arm,primecell";
+			reg = <0 0x13740000 0 0x1000>;
+			cpu = <&CPU7>;
+			clocks = <&ext_26m>;
+			clock-names = "apb_pclk";
+
+			out-ports {
+				port {
+					etm7_out: endpoint {
+						remote-endpoint =
+						<&funnel_big_in_port3>;
+					};
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/sprd/sharkl3.dtsi b/arch/arm64/boot/dts/sprd/sharkl3.dtsi
new file mode 100644
index 0000000..0222128
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/sharkl3.dtsi
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Unisoc Sharkl3 platform DTS file
+ *
+ * Copyright (C) 2019, Unisoc Inc.
+ */
+
+/ {
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	soc: soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		apb@70000000 {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x0 0x70000000 0x10000000>;
+
+			uart0: serial@0 {
+				compatible = "sprd,sc9863a-uart",
+					     "sprd,sc9836-uart";
+				reg = <0x0 0x100>;
+				interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&ext_26m>;
+				status = "disabled";
+			};
+
+			uart1: serial@100000 {
+				compatible = "sprd,sc9863a-uart",
+					     "sprd,sc9836-uart";
+				reg = <0x100000 0x100>;
+				interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&ext_26m>;
+				status = "disabled";
+			};
+
+			uart2: serial@200000 {
+				compatible = "sprd,sc9863a-uart",
+					     "sprd,sc9836-uart";
+				reg = <0x200000 0x100>;
+				interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&ext_26m>;
+				status = "disabled";
+			};
+
+			uart3: serial@300000 {
+				compatible = "sprd,sc9863a-uart",
+					     "sprd,sc9836-uart";
+				reg = <0x300000 0x100>;
+				interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&ext_26m>;
+				status = "disabled";
+			};
+
+			uart4: serial@400000 {
+				compatible = "sprd,sc9863a-uart",
+					     "sprd,sc9836-uart";
+				reg = <0x400000 0x100>;
+				interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&ext_26m>;
+				status = "disabled";
+			};
+		};
+	};
+
+	ext_26m: ext-26m {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <26000000>;
+		clock-output-names = "ext-26m";
+	};
+};
diff --git a/arch/arm64/boot/dts/sprd/sp9863a-1h10.dts b/arch/arm64/boot/dts/sprd/sp9863a-1h10.dts
new file mode 100644
index 0000000..5c32c15
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/sp9863a-1h10.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Unisoc SP9863A-1h10 boards DTS file
+ *
+ * Copyright (C) 2019, Unisoc Inc.
+ */
+
+/dts-v1/;
+
+#include "sc9863a.dtsi"
+
+/ {
+	model = "Spreadtrum SP9863A-1H10 Board";
+
+	compatible = "sprd,sp9863a-1h10", "sprd,sc9863a";
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x0 0x80000000 0x0 0x80000000>;
+	};
+
+	chosen {
+		stdout-path = "serial1:115200n8";
+		bootargs = "earlycon";
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index efb2457..e5df20a 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -51,17 +51,6 @@ gic_its: gic-its@1820000 {
 		};
 	};
 
-	secure_proxy_main: mailbox@32c00000 {
-		compatible = "ti,am654-secure-proxy";
-		#mbox-cells = <1>;
-		reg-names = "target_data", "rt", "scfg";
-		reg = <0x00 0x32c00000 0x00 0x100000>,
-		      <0x00 0x32400000 0x00 0x100000>,
-		      <0x00 0x32800000 0x00 0x100000>;
-		interrupt-names = "rx_011";
-		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
-	};
-
 	serdes0: serdes@900000 {
 		compatible = "ti,phy-am654-serdes";
 		reg = <0x0 0x900000 0x0 0x2000>;
@@ -385,11 +374,15 @@ intr_main_gpio: interrupt-controller0 {
 		ti,sci-rm-range-girq = <0x1>;
 	};
 
-	cbass_main_navss: interconnect0 {
-		compatible = "simple-bus";
+	main_navss {
+		compatible = "simple-mfd";
 		#address-cells = <2>;
 		#size-cells = <2>;
 		ranges;
+		dma-coherent;
+		dma-ranges;
+
+		ti,sci-dev-id = <118>;
 
 		intr_main_navss: interrupt-controller1 {
 			compatible = "ti,sci-intr";
@@ -414,6 +407,17 @@ inta_main_udmass: interrupt-controller@33d00000 {
 			ti,sci-rm-range-global-event = <0x1>;
 		};
 
+		secure_proxy_main: mailbox@32c00000 {
+			compatible = "ti,am654-secure-proxy";
+			#mbox-cells = <1>;
+			reg-names = "target_data", "rt", "scfg";
+			reg = <0x00 0x32c00000 0x00 0x100000>,
+			      <0x00 0x32400000 0x00 0x100000>,
+			      <0x00 0x32800000 0x00 0x100000>;
+			interrupt-names = "rx_011";
+			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
 		hwspinlock: spinlock@30e00000 {
 			compatible = "ti,am654-hwspinlock";
 			reg = <0x00 0x30e00000 0x00 0x1000>;
@@ -527,6 +531,41 @@ mailbox0_cluster11: mailbox@31f8b000 {
 			ti,mbox-num-fifos = <16>;
 			interrupt-parent = <&intr_main_navss>;
 		};
+
+		ringacc: ringacc@3c000000 {
+			compatible = "ti,am654-navss-ringacc";
+			reg =	<0x0 0x3c000000 0x0 0x400000>,
+				<0x0 0x38000000 0x0 0x400000>,
+				<0x0 0x31120000 0x0 0x100>,
+				<0x0 0x33000000 0x0 0x40000>;
+			reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
+			ti,num-rings = <818>;
+			ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+			ti,dma-ring-reset-quirk;
+			ti,sci = <&dmsc>;
+			ti,sci-dev-id = <187>;
+			msi-parent = <&inta_main_udmass>;
+		};
+
+		main_udmap: dma-controller@31150000 {
+			compatible = "ti,am654-navss-main-udmap";
+			reg =	<0x0 0x31150000 0x0 0x100>,
+				<0x0 0x34000000 0x0 0x100000>,
+				<0x0 0x35000000 0x0 0x100000>;
+			reg-names = "gcfg", "rchanrt", "tchanrt";
+			msi-parent = <&inta_main_udmass>;
+			#dma-cells = <1>;
+
+			ti,sci = <&dmsc>;
+			ti,sci-dev-id = <188>;
+			ti,ringacc = <&ringacc>;
+
+			ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
+						<0x2>; /* TX_CHAN */
+			ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */
+						<0x5>; /* RX_CHAN */
+			ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
+		};
 	};
 
 	main_gpio0:  main_gpio0@600000 {
@@ -624,4 +663,83 @@ pcie1_ep: pcie-ep@5600000 {
 		dma-coherent;
 		interrupts = <GIC_SPI 355 IRQ_TYPE_EDGE_RISING>;
 	};
+
+	mcasp0: mcasp@2b00000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b00000 0x0 0x2000>,
+			<0x0 0x02b08000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 104 0>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 104 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp1: mcasp@2b10000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b10000 0x0 0x2000>,
+			<0x0 0x02b18000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc401>, <&main_udmap 0x4401>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 105 0>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp2: mcasp@2b20000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b20000 0x0 0x2000>,
+			<0x0 0x02b28000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc402>, <&main_udmap 0x4402>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 106 0>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 106 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	cal: cal@6f03000 {
+		compatible = "ti,am654-cal";
+		reg = <0x0 0x06f03000 0x0 0x400>,
+		      <0x0 0x06f03800 0x0 0x40>;
+		reg-names = "cal_top",
+			    "cal_rx_core0";
+		interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+		ti,camerrx-control = <&scm_conf 0x40c0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 2 0>;
+		power-domains = <&k3_pds 2 TI_SCI_PD_EXCLUSIVE>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			csi2_0: port@0 {
+				reg = <0>;
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 7bdf534..92629cb 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -6,6 +6,14 @@
  */
 
 &cbass_mcu {
+	mcu_conf: scm_conf@40f00000 {
+		compatible = "syscon", "simple-mfd";
+		reg = <0x0 0x40f00000 0x0 0x20000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0x0 0x0 0x40f00000 0x20000>;
+	};
+
 	mcu_uart0: serial@40a00000 {
 		compatible = "ti,am654-uart";
 			reg = <0x00 0x40a00000 0x00 0x100>;
@@ -95,4 +103,88 @@ adc {
 			compatible = "ti,am654-adc", "ti,am3359-adc";
 		};
 	};
+
+	mcu_navss {
+		compatible = "simple-mfd";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		dma-coherent;
+		dma-ranges;
+
+		ti,sci-dev-id = <119>;
+
+		mcu_ringacc: ringacc@2b800000 {
+			compatible = "ti,am654-navss-ringacc";
+			reg =	<0x0 0x2b800000 0x0 0x400000>,
+				<0x0 0x2b000000 0x0 0x400000>,
+				<0x0 0x28590000 0x0 0x100>,
+				<0x0 0x2a500000 0x0 0x40000>;
+			reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
+			ti,num-rings = <286>;
+			ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+			ti,dma-ring-reset-quirk;
+			ti,sci = <&dmsc>;
+			ti,sci-dev-id = <195>;
+			msi-parent = <&inta_main_udmass>;
+		};
+
+		mcu_udmap: dma-controller@285c0000 {
+			compatible = "ti,am654-navss-mcu-udmap";
+			reg =	<0x0 0x285c0000 0x0 0x100>,
+				<0x0 0x2a800000 0x0 0x40000>,
+				<0x0 0x2aa00000 0x0 0x40000>;
+			reg-names = "gcfg", "rchanrt", "tchanrt";
+			msi-parent = <&inta_main_udmass>;
+			#dma-cells = <1>;
+
+			ti,sci = <&dmsc>;
+			ti,sci-dev-id = <194>;
+			ti,ringacc = <&mcu_ringacc>;
+
+			ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
+						<0x2>; /* TX_CHAN */
+			ti,sci-rm-range-rchan = <0x3>, /* RX_HCHAN */
+						<0x4>; /* RX_CHAN */
+			ti,sci-rm-range-rflow = <0x5>; /* GP RFLOW */
+		};
+	};
+
+	fss: fss@47000000 {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		ospi0: spi@47040000 {
+			compatible = "ti,am654-ospi", "cdns,qspi-nor";
+			reg = <0x0 0x47040000 0x0 0x100>,
+				<0x5 0x00000000 0x1 0x0000000>;
+			interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>;
+			cdns,fifo-depth = <256>;
+			cdns,fifo-width = <4>;
+			cdns,trigger-address = <0x0>;
+			clocks = <&k3_clks 248 0>;
+			assigned-clocks = <&k3_clks 248 0>;
+			assigned-clock-parents = <&k3_clks 248 2>;
+			assigned-clock-rates = <166666666>;
+			power-domains = <&k3_pds 248 TI_SCI_PD_EXCLUSIVE>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		ospi1: spi@47050000 {
+			compatible = "ti,am654-ospi", "cdns,qspi-nor";
+			reg = <0x0 0x47050000 0x0 0x100>,
+				<0x7 0x00000000 0x1 0x00000000>;
+			interrupts = <GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>;
+			cdns,fifo-depth = <256>;
+			cdns,fifo-width = <4>;
+			cdns,trigger-address = <0x0>;
+			clocks = <&k3_clks 249 6>;
+			power-domains = <&k3_pds 249 TI_SCI_PD_EXCLUSIVE>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi
index 6dfccd5..aea36e2 100644
--- a/arch/arm64/boot/dts/ti/k3-am65.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi
@@ -74,13 +74,18 @@ cbass_main: interconnect@100000 {
 			 /* MCUSS Range */
 			 <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>,
 			 <0x00 0x40200000 0x00 0x40200000 0x00 0x00900100>,
+			 <0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>, /* CTRL_MMR0 */
 			 <0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>,
 			 <0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>,
 			 <0x00 0x41c00000 0x00 0x41c00000 0x00 0x00080000>,
 			 <0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>,
 			 <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>,
 			 <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
-			 <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>;
+			 <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
+			 <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>,
+			 <0x00 0x70000000 0x00 0x70000000 0x00 0x200000>,
+			 <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>,
+			 <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>;
 
 		cbass_mcu: interconnect@28380000 {
 			compatible = "simple-bus";
@@ -88,13 +93,17 @@ cbass_mcu: interconnect@28380000 {
 			#size-cells = <2>;
 			ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>, /* MCU NAVSS*/
 				 <0x00 0x40200000 0x00 0x40200000 0x00 0x00900100>, /* First peripheral window */
+				 <0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>, /* CTRL_MMR0 */
 				 <0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>, /* MCU R5F Core0 */
 				 <0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>, /* MCU R5F Core1 */
 				 <0x00 0x41c00000 0x00 0x41c00000 0x00 0x00080000>, /* MCU SRAM */
 				 <0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>, /* WKUP */
 				 <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
 				 <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
-				 <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>; /* OSPI space 1 */
+				 <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI space 1 */
+				 <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>, /*  FSS OSPI0 data region 1 */
+				 <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>, /* FSS OSPI0 data region 3*/
+				 <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>; /* FSS OSPI1 data region 3*/
 
 			cbass_wakeup: interconnect@42040000 {
 				compatible = "simple-bus";
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index 8a85b48..1700996 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -53,6 +53,12 @@ sw6 {
 			gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
 		};
 	};
+
+	clk_ov5640_fixed: clock {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24000000>;
+	};
 };
 
 &wkup_pmx0 {
@@ -69,6 +75,29 @@ AM65X_WKUP_IOPAD(0x0030, PIN_INPUT, 7) /* (R5) WKUP_GPIO0_24 */
 			AM65X_WKUP_IOPAD(0x003c, PIN_INPUT, 7) /* (P2) WKUP_GPIO0_27 */
 		>;
 	};
+
+	mcu_fss0_ospi0_pins_default: mcu-fss0-ospi0-pins_default {
+		pinctrl-single,pins = <
+			AM65X_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* (V1) MCU_OSPI0_CLK */
+			AM65X_WKUP_IOPAD(0x0008, PIN_INPUT, 0)	 /* (U2) MCU_OSPI0_DQS */
+			AM65X_WKUP_IOPAD(0x000c, PIN_INPUT, 0)  /* (U4) MCU_OSPI0_D0 */
+			AM65X_WKUP_IOPAD(0x0010, PIN_INPUT, 0)  /* (U5) MCU_OSPI0_D1 */
+			AM65X_WKUP_IOPAD(0x0014, PIN_INPUT, 0)  /* (T2) MCU_OSPI0_D2 */
+			AM65X_WKUP_IOPAD(0x0018, PIN_INPUT, 0)  /* (T3) MCU_OSPI0_D3 */
+			AM65X_WKUP_IOPAD(0x001c, PIN_INPUT, 0)  /* (T4) MCU_OSPI0_D4 */
+			AM65X_WKUP_IOPAD(0x0020, PIN_INPUT, 0)  /* (T5) MCU_OSPI0_D5 */
+			AM65X_WKUP_IOPAD(0x0024, PIN_INPUT, 0)  /* (R2) MCU_OSPI0_D6 */
+			AM65X_WKUP_IOPAD(0x0028, PIN_INPUT, 0)  /* (R3) MCU_OSPI0_D7 */
+			AM65X_WKUP_IOPAD(0x002c, PIN_OUTPUT, 0) /* (R4) MCU_OSPI0_CSn0 */
+		>;
+	};
+
+	wkup_pca554_default: wkup_pca554_default {
+		pinctrl-single,pins = <
+			AM65X_WKUP_IOPAD(0x0034, PIN_INPUT, 7) /* (T1) MCU_OSPI1_CLK.WKUP_GPIO0_25 */
+
+		>;
+	};
 };
 
 &main_pmx0 {
@@ -164,6 +193,12 @@ pca9554: gpio@39 {
 		reg = <0x39>;
 		gpio-controller;
 		#gpio-cells = <2>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&wkup_pca554_default>;
+		interrupt-parent = <&wkup_gpio0>;
+		interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
 	};
 };
 
@@ -184,6 +219,23 @@ &main_i2c1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&main_i2c1_pins_default>;
 	clock-frequency = <400000>;
+
+	ov5640@3c {
+		compatible = "ovti,ov5640";
+		reg = <0x3c>;
+
+		clocks = <&clk_ov5640_fixed>;
+		clock-names = "xclk";
+
+		port {
+			csi2_cam0: endpoint {
+				remote-endpoint = <&csi2_phy0>;
+				clock-lanes = <0>;
+				data-lanes = <1 2>;
+			};
+		};
+	};
+
 };
 
 &main_i2c2 {
@@ -339,3 +391,31 @@ &mailbox0_cluster10 {
 &mailbox0_cluster11 {
 	status = "disabled";
 };
+
+&ospi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mcu_fss0_ospi0_pins_default>;
+
+	flash@0{
+		compatible = "jedec,spi-nor";
+		reg = <0x0>;
+		spi-tx-bus-width = <1>;
+		spi-rx-bus-width = <8>;
+		spi-max-frequency = <40000000>;
+		cdns,tshsl-ns = <60>;
+		cdns,tsd2d-ns = <60>;
+		cdns,tchsh-ns = <60>;
+		cdns,tslch-ns = <60>;
+		cdns,read-delay = <0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+};
+
+&csi2_0 {
+	csi2_phy0: endpoint {
+		remote-endpoint = <&csi2_cam0>;
+		clock-lanes = <0>;
+		data-lanes = <1 2>;
+	};
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
index 2a3cd61..7a5c3d4 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
@@ -67,6 +67,46 @@ main_usbss1_pins_default: main_usbss1_pins_default {
 			J721E_IOPAD(0x214, PIN_OUTPUT, 4) /* (V4) MCAN1_TX.USB1_DRVVBUS */
 		>;
 	};
+
+	main_i2c1_exp4_pins_default: main-i2c1-exp4-pins-default {
+		pinctrl-single,pins = <
+			J721E_IOPAD(0x230, PIN_INPUT, 7) /* (U2) ECAP0_IN_APWM_OUT.GPIO1_11 */
+		>;
+	};
+
+	main_i2c0_pins_default: main-i2c0-pins-default {
+		pinctrl-single,pins = <
+			J721E_IOPAD(0x220, PIN_INPUT_PULLUP, 0) /* (AC5) I2C0_SCL */
+			J721E_IOPAD(0x224, PIN_INPUT_PULLUP, 0) /* (AA5) I2C0_SDA */
+		>;
+	};
+
+	main_i2c1_pins_default: main-i2c1-pins-default {
+		pinctrl-single,pins = <
+			J721E_IOPAD(0x228, PIN_INPUT_PULLUP, 0) /* (Y6) I2C1_SCL */
+			J721E_IOPAD(0x22c, PIN_INPUT_PULLUP, 0) /* (AA6) I2C1_SDA */
+		>;
+	};
+
+	main_i2c3_pins_default: main-i2c3-pins-default {
+		pinctrl-single,pins = <
+			J721E_IOPAD(0x270, PIN_INPUT_PULLUP, 4) /* (T26) MMC2_CLK.I2C3_SCL */
+			J721E_IOPAD(0x274, PIN_INPUT_PULLUP, 4) /* (T25) MMC2_CMD.I2C3_SDA */
+		>;
+	};
+
+	main_i2c6_pins_default: main-i2c6-pins-default {
+		pinctrl-single,pins = <
+			J721E_IOPAD(0x1d0, PIN_INPUT_PULLUP, 2) /* (AA3) SPI0_D1.I2C6_SCL */
+			J721E_IOPAD(0x1e4, PIN_INPUT_PULLUP, 2) /* (Y2) SPI1_D1.I2C6_SDA */
+		>;
+	};
+
+	main_i2c1_exp4_pins_default: main-i2c1-exp4-pins-default {
+		pinctrl-single,pins = <
+			J721E_IOPAD(0x230, PIN_INPUT, 7) /* (U2) ECAP0_IN_APWM_OUT.GPIO1_11 */
+		 >;
+	};
 };
 
 &wkup_pmx0 {
@@ -75,6 +115,19 @@ sw11_button_pins_default: sw11_button_pins_default {
 			J721E_WKUP_IOPAD(0xcc, PIN_INPUT, 7) /* (G28) WKUP_GPIO0_7 */
 		>;
 	};
+
+	mcu_fss0_ospi1_pins_default: mcu-fss0-ospi1-pins-default {
+		pinctrl-single,pins = <
+			J721E_WKUP_IOPAD(0x34, PIN_OUTPUT, 0) /* (F22) MCU_OSPI1_CLK */
+			J721E_WKUP_IOPAD(0x50, PIN_OUTPUT, 0) /* (C22) MCU_OSPI1_CSn0 */
+			J721E_WKUP_IOPAD(0x40, PIN_INPUT, 0) /* (D22) MCU_OSPI1_D0 */
+			J721E_WKUP_IOPAD(0x44, PIN_INPUT, 0) /* (G22) MCU_OSPI1_D1 */
+			J721E_WKUP_IOPAD(0x48, PIN_INPUT, 0) /* (D23) MCU_OSPI1_D2 */
+			J721E_WKUP_IOPAD(0x4c, PIN_INPUT, 0) /* (C23) MCU_OSPI1_D3 */
+			J721E_WKUP_IOPAD(0x3c, PIN_INPUT, 0) /* (B23) MCU_OSPI1_DQS */
+			J721E_WKUP_IOPAD(0x38, PIN_INPUT, 0) /* (A23) MCU_OSPI1_LBCLKO */
+		>;
+	};
 };
 
 &wkup_uart0 {
@@ -279,3 +332,100 @@ &usb1 {
 	dr_mode = "host";
 	maximum-speed = "high-speed";
 };
+
+&ospi1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mcu_fss0_ospi1_pins_default>;
+
+	flash@0{
+		compatible = "jedec,spi-nor";
+		reg = <0x0>;
+		spi-tx-bus-width = <1>;
+		spi-rx-bus-width = <4>;
+		spi-max-frequency = <40000000>;
+		cdns,tshsl-ns = <60>;
+		cdns,tsd2d-ns = <60>;
+		cdns,tchsh-ns = <60>;
+		cdns,tslch-ns = <60>;
+		cdns,read-delay = <2>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+};
+
+&tscadc0 {
+	adc {
+		ti,adc-channels = <0 1 2 3 4 5 6 7>;
+	};
+};
+
+&tscadc1 {
+	adc {
+		ti,adc-channels = <0 1 2 3 4 5 6 7>;
+	};
+};
+
+&main_i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&main_i2c0_pins_default>;
+	clock-frequency = <400000>;
+
+	exp1: gpio@20 {
+		compatible = "ti,tca6416";
+		reg = <0x20>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	exp2: gpio@22 {
+		compatible = "ti,tca6424";
+		reg = <0x22>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+};
+
+&main_i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&main_i2c1_pins_default>;
+	clock-frequency = <400000>;
+
+	exp4: gpio@20 {
+		compatible = "ti,tca6408";
+		reg = <0x20>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&main_i2c1_exp4_pins_default>;
+		interrupt-parent = <&main_gpio1>;
+		interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
+
+&main_i2c3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&main_i2c3_pins_default>;
+	clock-frequency = <400000>;
+
+	exp3: gpio@20 {
+		compatible = "ti,tca6408";
+		reg = <0x20>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+};
+
+&main_i2c6 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&main_i2c6_pins_default>;
+	clock-frequency = <400000>;
+
+	exp5: gpio@20 {
+		compatible = "ti,tca6408";
+		reg = <0x20>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index 1e4c2b7..0b9d14b 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -40,16 +40,6 @@ gic_its: gic-its@1820000 {
 		};
 	};
 
-	smmu0: smmu@36600000 {
-		compatible = "arm,smmu-v3";
-		reg = <0x0 0x36600000 0x0 0x100000>;
-		interrupt-parent = <&gic500>;
-		interrupts = <GIC_SPI 772 IRQ_TYPE_EDGE_RISING>,
-			     <GIC_SPI 768 IRQ_TYPE_EDGE_RISING>;
-		interrupt-names = "eventq", "gerror";
-		#iommu-cells = <1>;
-	};
-
 	main_gpio_intr: interrupt-controller0 {
 		compatible = "ti,sci-intr";
 		ti,intr-trigger-type = <1>;
@@ -61,11 +51,15 @@ main_gpio_intr: interrupt-controller0 {
 		ti,sci-rm-range-girq = <0x1>;
 	};
 
-	cbass_main_navss: interconnect0 {
-		compatible = "simple-bus";
+	main_navss {
+		compatible = "simple-mfd";
 		#address-cells = <2>;
 		#size-cells = <2>;
 		ranges;
+		dma-coherent;
+		dma-ranges;
+
+		ti,sci-dev-id = <199>;
 
 		main_navss_intr: interrupt-controller1 {
 			compatible = "ti,sci-intr";
@@ -90,6 +84,27 @@ main_udmass_inta: interrupt-controller@33d00000 {
 			ti,sci-rm-range-global-event = <0xd>;
 		};
 
+		secure_proxy_main: mailbox@32c00000 {
+			compatible = "ti,am654-secure-proxy";
+			#mbox-cells = <1>;
+			reg-names = "target_data", "rt", "scfg";
+			reg = <0x00 0x32c00000 0x00 0x100000>,
+			      <0x00 0x32400000 0x00 0x100000>,
+			      <0x00 0x32800000 0x00 0x100000>;
+			interrupt-names = "rx_011";
+			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		smmu0: smmu@36600000 {
+			compatible = "arm,smmu-v3";
+			reg = <0x0 0x36600000 0x0 0x100000>;
+			interrupt-parent = <&gic500>;
+			interrupts = <GIC_SPI 772 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 768 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eventq", "gerror";
+			#iommu-cells = <1>;
+		};
+
 		hwspinlock: spinlock@30e00000 {
 			compatible = "ti,am654-hwspinlock";
 			reg = <0x00 0x30e00000 0x00 0x1000>;
@@ -203,17 +218,42 @@ mailbox0_cluster11: mailbox@31f8b000 {
 			ti,mbox-num-fifos = <16>;
 			interrupt-parent = <&main_navss_intr>;
 		};
-	};
 
-	secure_proxy_main: mailbox@32c00000 {
-		compatible = "ti,am654-secure-proxy";
-		#mbox-cells = <1>;
-		reg-names = "target_data", "rt", "scfg";
-		reg = <0x00 0x32c00000 0x00 0x100000>,
-		      <0x00 0x32400000 0x00 0x100000>,
-		      <0x00 0x32800000 0x00 0x100000>;
-		interrupt-names = "rx_011";
-		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		main_ringacc: ringacc@3c000000 {
+			compatible = "ti,am654-navss-ringacc";
+			reg =	<0x0 0x3c000000 0x0 0x400000>,
+				<0x0 0x38000000 0x0 0x400000>,
+				<0x0 0x31120000 0x0 0x100>,
+				<0x0 0x33000000 0x0 0x40000>;
+			reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
+			ti,num-rings = <1024>;
+			ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */
+			ti,sci = <&dmsc>;
+			ti,sci-dev-id = <211>;
+			msi-parent = <&main_udmass_inta>;
+		};
+
+		main_udmap: dma-controller@31150000 {
+			compatible = "ti,j721e-navss-main-udmap";
+			reg =	<0x0 0x31150000 0x0 0x100>,
+				<0x0 0x34000000 0x0 0x100000>,
+				<0x0 0x35000000 0x0 0x100000>;
+			reg-names = "gcfg", "rchanrt", "tchanrt";
+			msi-parent = <&main_udmass_inta>;
+			#dma-cells = <1>;
+
+			ti,sci = <&dmsc>;
+			ti,sci-dev-id = <212>;
+			ti,ringacc = <&main_ringacc>;
+
+			ti,sci-rm-range-tchan = <0x0d>, /* TX_CHAN */
+						<0x0f>, /* TX_HCHAN */
+						<0x10>; /* TX_UHCHAN */
+			ti,sci-rm-range-rchan = <0x0a>, /* RX_CHAN */
+						<0x0b>, /* RX_HCHAN */
+						<0x0c>; /* RX_UHCHAN */
+			ti,sci-rm-range-rflow = <0x00>; /* GP RFLOW */
+		};
 	};
 
 	main_pmx0: pinmux@11c000 {
@@ -596,4 +636,331 @@ usb1: usb@6400000 {
 			dr_mode = "otg";
 		};
 	};
+
+	main_i2c0: i2c@2000000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x2000000 0x0 0x100>;
+		interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 187 0>;
+		power-domains = <&k3_pds 187 TI_SCI_PD_SHARED>;
+	};
+
+	main_i2c1: i2c@2010000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x2010000 0x0 0x100>;
+		interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 188 0>;
+		power-domains = <&k3_pds 188 TI_SCI_PD_EXCLUSIVE>;
+	};
+
+	main_i2c2: i2c@2020000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x2020000 0x0 0x100>;
+		interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 189 0>;
+		power-domains = <&k3_pds 189 TI_SCI_PD_EXCLUSIVE>;
+	};
+
+	main_i2c3: i2c@2030000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x2030000 0x0 0x100>;
+		interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 190 0>;
+		power-domains = <&k3_pds 190 TI_SCI_PD_EXCLUSIVE>;
+	};
+
+	main_i2c4: i2c@2040000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x2040000 0x0 0x100>;
+		interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 191 0>;
+		power-domains = <&k3_pds 191 TI_SCI_PD_EXCLUSIVE>;
+	};
+
+	main_i2c5: i2c@2050000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x2050000 0x0 0x100>;
+		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 192 0>;
+		power-domains = <&k3_pds 192 TI_SCI_PD_EXCLUSIVE>;
+	};
+
+	main_i2c6: i2c@2060000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x2060000 0x0 0x100>;
+		interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 193 0>;
+		power-domains = <&k3_pds 193 TI_SCI_PD_EXCLUSIVE>;
+	};
+
+	ufs_wrapper: ufs-wrapper@4e80000 {
+		compatible = "ti,j721e-ufs";
+		reg = <0x0 0x4e80000 0x0 0x100>;
+		power-domains = <&k3_pds 277 TI_SCI_PD_EXCLUSIVE>;
+		clocks = <&k3_clks 277 1>;
+		assigned-clocks = <&k3_clks 277 1>;
+		assigned-clock-parents = <&k3_clks 277 4>;
+		ranges;
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		ufs@4e84000 {
+			compatible = "cdns,ufshc-m31-16nm", "jedec,ufs-2.0";
+			reg = <0x0 0x4e84000 0x0 0x10000>;
+			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+			freq-table-hz = <250000000 250000000>, <19200000 19200000>, <19200000 19200000>;
+			clocks = <&k3_clks 277 0>, <&k3_clks 277 1>, <&k3_clks 277 1>;
+			clock-names = "core_clk", "phy_clk", "ref_clk";
+			dma-coherent;
+		};
+	};
+
+	mcasp0: mcasp@2b00000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b00000 0x0 0x2000>,
+			<0x0 0x02b08000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 544 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 545 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 174 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 174 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp1: mcasp@2b10000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b10000 0x0 0x2000>,
+			<0x0 0x02b18000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 546 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 547 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc401>, <&main_udmap 0x4401>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 175 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 175 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp2: mcasp@2b20000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b20000 0x0 0x2000>,
+			<0x0 0x02b28000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 548 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 549 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc402>, <&main_udmap 0x4402>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 176 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 176 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp3: mcasp@2b30000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b30000 0x0 0x2000>,
+			<0x0 0x02b38000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 177 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 177 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp4: mcasp@2b40000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b40000 0x0 0x2000>,
+			<0x0 0x02b48000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc501>, <&main_udmap 0x4501>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 178 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 178 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp5: mcasp@2b50000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b50000 0x0 0x2000>,
+			<0x0 0x02b58000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 554 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 555 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc502>, <&main_udmap 0x4502>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 179 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 179 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp6: mcasp@2b60000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b60000 0x0 0x2000>,
+			<0x0 0x02b68000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 556 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 557 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc503>, <&main_udmap 0x4503>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 180 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 180 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp7: mcasp@2b70000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b70000 0x0 0x2000>,
+			<0x0 0x02b78000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 558 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 559 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc504>, <&main_udmap 0x4504>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 181 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 181 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp8: mcasp@2b80000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b80000 0x0 0x2000>,
+			<0x0 0x02b88000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 560 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 561 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc505>, <&main_udmap 0x4505>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 182 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 182 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp9: mcasp@2b90000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02b90000 0x0 0x2000>,
+			<0x0 0x02b98000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 562 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 563 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc506>, <&main_udmap 0x4506>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 183 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 183 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp10: mcasp@2ba0000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02ba0000 0x0 0x2000>,
+			<0x0 0x02ba8000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 564 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 565 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc507>, <&main_udmap 0x4507>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 184 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 184 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
+
+	mcasp11: mcasp@2bb0000 {
+		compatible = "ti,am33xx-mcasp-audio";
+		reg = <0x0 0x02bb0000 0x0 0x2000>,
+			<0x0 0x02bb8000 0x0 0x1000>;
+		reg-names = "mpu","dat";
+		interrupts = <GIC_SPI 566 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 567 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tx", "rx";
+
+		dmas = <&main_udmap 0xc508>, <&main_udmap 0x4508>;
+		dma-names = "tx", "rx";
+
+		clocks = <&k3_clks 185 1>;
+		clock-names = "fck";
+		power-domains = <&k3_pds 185 TI_SCI_PD_EXCLUSIVE>;
+
+		status = "disabled";
+	};
 };
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 555dc7b..16c874b 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -121,4 +121,153 @@ wkup_gpio1: gpio@42100000 {
 		clocks = <&k3_clks 114 0>;
 		clock-names = "gpio";
 	};
+
+	mcu_i2c0: i2c@40b00000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x40b00000 0x0 0x100>;
+		interrupts = <GIC_SPI 852 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 194 0>;
+		power-domains = <&k3_pds 194 TI_SCI_PD_EXCLUSIVE>;
+	};
+
+	mcu_i2c1: i2c@40b10000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x40b10000 0x0 0x100>;
+		interrupts = <GIC_SPI 853 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 195 0>;
+		power-domains = <&k3_pds 195 TI_SCI_PD_EXCLUSIVE>;
+	};
+
+	wkup_i2c0: i2c@42120000 {
+		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+		reg = <0x0 0x42120000 0x0 0x100>;
+		interrupts = <GIC_SPI 896 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "fck";
+		clocks = <&k3_clks 197 0>;
+		power-domains = <&k3_pds 197 TI_SCI_PD_SHARED>;
+	};
+
+	fss: fss@47000000 {
+		compatible = "simple-bus";
+		reg = <0x0 0x47000000 0x0 0x100>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		ospi0: spi@47040000 {
+			compatible = "ti,am654-ospi";
+			reg = <0x0 0x47040000 0x0 0x100>,
+				<0x5 0x00000000 0x1 0x0000000>;
+			interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>;
+			cdns,fifo-depth = <256>;
+			cdns,fifo-width = <4>;
+			cdns,trigger-address = <0x0>;
+			clocks = <&k3_clks 103 0>;
+			assigned-clocks = <&k3_clks 103 0>;
+			assigned-clock-parents = <&k3_clks 103 2>;
+			assigned-clock-rates = <166666666>;
+			power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		ospi1: spi@47050000 {
+			compatible = "ti,am654-ospi";
+			reg = <0x0 0x47050000 0x0 0x100>,
+				<0x7 0x00000000 0x1 0x00000000>;
+			interrupts = <GIC_SPI 841 IRQ_TYPE_LEVEL_HIGH>;
+			cdns,fifo-depth = <256>;
+			cdns,fifo-width = <4>;
+			cdns,trigger-address = <0x0>;
+			clocks = <&k3_clks 104 0>;
+			power-domains = <&k3_pds 104 TI_SCI_PD_EXCLUSIVE>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+	};
+
+	tscadc0: tscadc@40200000 {
+		compatible = "ti,am3359-tscadc";
+		reg = <0x0 0x40200000 0x0 0x1000>;
+		interrupts = <GIC_SPI 860 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&k3_pds 0 TI_SCI_PD_EXCLUSIVE>;
+		clocks = <&k3_clks 0 1>;
+		assigned-clocks = <&k3_clks 0 3>;
+		assigned-clock-rates = <60000000>;
+		clock-names = "adc_tsc_fck";
+
+		adc {
+			#io-channel-cells = <1>;
+			compatible = "ti,am3359-adc";
+		};
+	};
+
+	tscadc1: tscadc@40210000 {
+		compatible = "ti,am3359-tscadc";
+		reg = <0x0 0x40210000 0x0 0x1000>;
+		interrupts = <GIC_SPI 861 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&k3_pds 1 TI_SCI_PD_EXCLUSIVE>;
+		clocks = <&k3_clks 1 1>;
+		assigned-clocks = <&k3_clks 1 3>;
+		assigned-clock-rates = <60000000>;
+		clock-names = "adc_tsc_fck";
+
+		adc {
+			#io-channel-cells = <1>;
+			compatible = "ti,am3359-adc";
+		};
+	};
+
+	mcu_navss {
+		compatible = "simple-mfd";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		dma-coherent;
+		dma-ranges;
+
+		ti,sci-dev-id = <232>;
+
+		mcu_ringacc: ringacc@2b800000 {
+			compatible = "ti,am654-navss-ringacc";
+			reg =	<0x0 0x2b800000 0x0 0x400000>,
+				<0x0 0x2b000000 0x0 0x400000>,
+				<0x0 0x28590000 0x0 0x100>,
+				<0x0 0x2a500000 0x0 0x40000>;
+			reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
+			ti,num-rings = <286>;
+			ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */
+			ti,sci = <&dmsc>;
+			ti,sci-dev-id = <235>;
+			msi-parent = <&main_udmass_inta>;
+		};
+
+		mcu_udmap: dma-controller@285c0000 {
+			compatible = "ti,j721e-navss-mcu-udmap";
+			reg =	<0x0 0x285c0000 0x0 0x100>,
+				<0x0 0x2a800000 0x0 0x40000>,
+				<0x0 0x2aa00000 0x0 0x40000>;
+			reg-names = "gcfg", "rchanrt", "tchanrt";
+			msi-parent = <&main_udmass_inta>;
+			#dma-cells = <1>;
+
+			ti,sci = <&dmsc>;
+			ti,sci-dev-id = <236>;
+			ti,ringacc = <&mcu_ringacc>;
+
+			ti,sci-rm-range-tchan = <0x0d>, /* TX_CHAN */
+						<0x0f>; /* TX_HCHAN */
+			ti,sci-rm-range-rchan = <0x0a>, /* RX_CHAN */
+						<0x0b>; /* RX_HCHAN */
+			ti,sci-rm-range-rflow = <0x00>; /* GP RFLOW */
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
index 1884fc7..7680109 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
@@ -27,3 +27,48 @@ secure_ddr: optee@9e800000 {
 		};
 	};
 };
+
+&wkup_pmx0 {
+	wkup_i2c0_pins_default: wkup_i2c0_pins_default {
+		pinctrl-single,pins = <
+			J721E_WKUP_IOPAD(0xf8, PIN_INPUT_PULLUP, 0) /* (J25) WKUP_I2C0_SCL */
+			J721E_WKUP_IOPAD(0xfc, PIN_INPUT_PULLUP, 0) /* (H24) WKUP_I2C0_SDA */
+		>;
+	};
+
+	mcu_fss0_ospi0_pins_default: mcu-fss0-ospi0-pins-default {
+		pinctrl-single,pins = <
+			J721E_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* MCU_OSPI0_CLK */
+			J721E_WKUP_IOPAD(0x0008, PIN_INPUT, 0)  /* MCU_OSPI0_DQS */
+			J721E_WKUP_IOPAD(0x000c, PIN_INPUT, 0)  /* MCU_OSPI0_D0 */
+			J721E_WKUP_IOPAD(0x0010, PIN_INPUT, 0)  /* MCU_OSPI0_D1 */
+			J721E_WKUP_IOPAD(0x0014, PIN_INPUT, 0)  /* MCU_OSPI0_D2 */
+			J721E_WKUP_IOPAD(0x0018, PIN_INPUT, 0)  /* MCU_OSPI0_D3 */
+			J721E_WKUP_IOPAD(0x001c, PIN_INPUT, 0)  /* MCU_OSPI0_D4 */
+			J721E_WKUP_IOPAD(0x0020, PIN_INPUT, 0)  /* MCU_OSPI0_D5 */
+			J721E_WKUP_IOPAD(0x0024, PIN_INPUT, 0)  /* MCU_OSPI0_D6 */
+			J721E_WKUP_IOPAD(0x0028, PIN_INPUT, 0)  /* MCU_OSPI0_D7 */
+			J721E_WKUP_IOPAD(0x002c, PIN_OUTPUT, 0) /* MCU_OSPI0_CSn0 */
+		>;
+	};
+};
+
+&ospi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mcu_fss0_ospi0_pins_default>;
+
+	flash@0{
+		compatible = "jedec,spi-nor";
+		reg = <0x0>;
+		spi-tx-bus-width = <1>;
+		spi-rx-bus-width = <8>;
+		spi-max-frequency = <40000000>;
+		cdns,tshsl-ns = <60>;
+		cdns,tsd2d-ns = <60>;
+		cdns,tchsh-ns = <60>;
+		cdns,tslch-ns = <60>;
+		cdns,read-delay = <0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
index ee5470e..027bd1f 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
@@ -130,7 +130,7 @@ cbass_main: interconnect@100000 {
 			 <0x00 0x06000000 0x00 0x06000000 0x00 0x00400000>, /* USBSS0 */
 			 <0x00 0x06400000 0x00 0x06400000 0x00 0x00400000>, /* USBSS1 */
 			 <0x00 0x01000000 0x00 0x01000000 0x00 0x0af02400>, /* Most peripherals */
-			 <0x00 0x30800000 0x00 0x30800000 0x00 0x0bc00000>, /* MAIN NAVSS */
+			 <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */
 			 <0x00 0x0d000000 0x00 0x0d000000 0x00 0x01000000>, /* PCIe Core*/
 			 <0x00 0x10000000 0x00 0x10000000 0x00 0x10000000>, /* PCIe DAT */
 			 <0x00 0x64800000 0x00 0x64800000 0x00 0x00800000>, /* C71 */
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
new file mode 100644
index 0000000..9868ca1
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Clock specification for Xilinx ZynqMP
+ *
+ * (C) Copyright 2017 - 2019, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+#include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+/ {
+	pss_ref_clk: pss_ref_clk {
+		u-boot,dm-pre-reloc;
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <33333333>;
+	};
+
+	video_clk: video_clk {
+		u-boot,dm-pre-reloc;
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <27000000>;
+	};
+
+	pss_alt_ref_clk: pss_alt_ref_clk {
+		u-boot,dm-pre-reloc;
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	gt_crx_ref_clk: gt_crx_ref_clk {
+		u-boot,dm-pre-reloc;
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <108000000>;
+	};
+
+	aux_ref_clk: aux_ref_clk {
+		u-boot,dm-pre-reloc;
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <27000000>;
+	};
+};
+
+&can0 {
+	clocks = <&zynqmp_clk CAN0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&can1 {
+	clocks = <&zynqmp_clk CAN1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&cpu0 {
+	clocks = <&zynqmp_clk ACPU>;
+};
+
+&fpd_dma_chan1 {
+	clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan2 {
+	clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan3 {
+	clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan4 {
+	clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan5 {
+	clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan6 {
+	clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan7 {
+	clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan8 {
+	clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan1 {
+	clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan2 {
+	clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan3 {
+	clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan4 {
+	clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan5 {
+	clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan6 {
+	clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan7 {
+	clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan8 {
+	clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&gem0 {
+	clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM0_REF>,
+		 <&zynqmp_clk GEM0_TX>, <&zynqmp_clk GEM0_RX>,
+		 <&zynqmp_clk GEM_TSU>;
+	clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem1 {
+	clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM1_REF>,
+		 <&zynqmp_clk GEM1_TX>, <&zynqmp_clk GEM1_RX>,
+		 <&zynqmp_clk GEM_TSU>;
+	clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem2 {
+	clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM2_REF>,
+		 <&zynqmp_clk GEM2_TX>, <&zynqmp_clk GEM2_RX>,
+		 <&zynqmp_clk GEM_TSU>;
+	clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem3 {
+	clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM3_REF>,
+		 <&zynqmp_clk GEM3_TX>, <&zynqmp_clk GEM3_RX>,
+		 <&zynqmp_clk GEM_TSU>;
+	clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gpio {
+	clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&i2c0 {
+	clocks = <&zynqmp_clk I2C0_REF>;
+};
+
+&i2c1 {
+	clocks = <&zynqmp_clk I2C1_REF>;
+};
+
+&pcie {
+	clocks = <&zynqmp_clk PCIE_REF>;
+};
+
+&sata {
+	clocks = <&zynqmp_clk SATA_REF>;
+};
+
+&sdhci0 {
+	clocks = <&zynqmp_clk SDIO0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&sdhci1 {
+	clocks = <&zynqmp_clk SDIO1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&spi0 {
+	clocks = <&zynqmp_clk SPI0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&spi1 {
+	clocks = <&zynqmp_clk SPI1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc0 {
+	clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc1 {
+	clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc2 {
+	clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc3 {
+	clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&uart0 {
+	clocks = <&zynqmp_clk UART0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&uart1 {
+	clocks = <&zynqmp_clk UART1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&usb0 {
+	clocks = <&zynqmp_clk USB0_BUS_REF>, <&zynqmp_clk USB3_DUAL_REF>;
+};
+
+&usb1 {
+	clocks = <&zynqmp_clk USB1_BUS_REF>, <&zynqmp_clk USB3_DUAL_REF>;
+};
+
+&watchdog0 {
+	clocks = <&zynqmp_clk WDT>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
deleted file mode 100644
index 306ad21..0000000
--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
+++ /dev/null
@@ -1,213 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Clock specification for Xilinx ZynqMP
- *
- * (C) Copyright 2015 - 2018, Xilinx, Inc.
- *
- * Michal Simek <michal.simek@xilinx.com>
- */
-
-/ {
-	clk100: clk100 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <100000000>;
-	};
-
-	clk125: clk125 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <125000000>;
-	};
-
-	clk200: clk200 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <200000000>;
-	};
-
-	clk250: clk250 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <250000000>;
-	};
-
-	clk300: clk300 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <300000000>;
-	};
-
-	clk600: clk600 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <600000000>;
-	};
-
-	dp_aclk: clock0 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <100000000>;
-		clock-accuracy = <100>;
-	};
-
-	dp_aud_clk: clock1 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <24576000>;
-		clock-accuracy = <100>;
-	};
-
-	dpdma_clk: dpdma-clk {
-		compatible = "fixed-clock";
-		#clock-cells = <0x0>;
-		clock-frequency = <533000000>;
-	};
-
-	drm_clock: drm-clock {
-		compatible = "fixed-clock";
-		#clock-cells = <0x0>;
-		clock-frequency = <262750000>;
-		clock-accuracy = <0x64>;
-	};
-};
-
-&can0 {
-	clocks = <&clk100 &clk100>;
-};
-
-&can1 {
-	clocks = <&clk100 &clk100>;
-};
-
-&fpd_dma_chan1 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&fpd_dma_chan2 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&fpd_dma_chan3 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&fpd_dma_chan4 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&fpd_dma_chan5 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&fpd_dma_chan6 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&fpd_dma_chan7 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&fpd_dma_chan8 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&lpd_dma_chan1 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&lpd_dma_chan2 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&lpd_dma_chan3 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&lpd_dma_chan4 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&lpd_dma_chan5 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&lpd_dma_chan6 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&lpd_dma_chan7 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&lpd_dma_chan8 {
-	clocks = <&clk600>, <&clk100>;
-};
-
-&gem0 {
-	clocks = <&clk125>, <&clk125>, <&clk125>;
-};
-
-&gem1 {
-	clocks = <&clk125>, <&clk125>, <&clk125>;
-};
-
-&gem2 {
-	clocks = <&clk125>, <&clk125>, <&clk125>;
-};
-
-&gem3 {
-	clocks = <&clk125>, <&clk125>, <&clk125>;
-};
-
-&gpio {
-	clocks = <&clk100>;
-};
-
-&i2c0 {
-	clocks = <&clk100>;
-};
-
-&i2c1 {
-	clocks = <&clk100>;
-};
-
-&sata {
-	clocks = <&clk250>;
-};
-
-&sdhci0 {
-	clocks = <&clk200 &clk200>;
-};
-
-&sdhci1 {
-	clocks = <&clk200 &clk200>;
-};
-
-&spi0 {
-	clocks = <&clk200 &clk200>;
-};
-
-&spi1 {
-	clocks = <&clk200 &clk200>;
-};
-
-&uart0 {
-	clocks = <&clk100 &clk100>;
-};
-
-&uart1 {
-	clocks = <&clk100 &clk100>;
-};
-
-&usb0 {
-	clocks = <&clk250>, <&clk250>;
-};
-
-&usb1 {
-	clocks = <&clk250>, <&clk250>;
-};
-
-&watchdog0 {
-	clocks = <&clk250>;
-};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
index 0f7b4cf..2e05fa4 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP ZC1232
  *
- * (C) Copyright 2017 - 2018, Xilinx, Inc.
+ * (C) Copyright 2017 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 
 / {
 	model = "ZynqMP ZC1232 RevA";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
index 9092828..3d0aaa0 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP ZC1254
  *
- * (C) Copyright 2015 - 2018, Xilinx, Inc.
+ * (C) Copyright 2015 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
@@ -11,7 +11,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 
 / {
 	model = "ZynqMP ZC1254 RevA";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
index 4f404c58..66a9048 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP ZC1275
  *
- * (C) Copyright 2017 - 2018, Xilinx, Inc.
+ * (C) Copyright 2017 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
@@ -11,7 +11,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 
 / {
 	model = "ZynqMP ZC1275 RevA";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
index 9a3e39d..69f6e46 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP zc1751-xm015-dc1
  *
- * (C) Copyright 2015 - 2018, Xilinx, Inc.
+ * (C) Copyright 2015 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
 / {
@@ -73,7 +73,7 @@ &gem3 {
 	status = "okay";
 	phy-handle = <&phy0>;
 	phy-mode = "rgmii-id";
-	phy0: phy@0 {
+	phy0: ethernet-phy@0 {
 		reg = <0>;
 	};
 };
@@ -128,4 +128,5 @@ &uart0 {
 /* ULPI SMSC USB3320 */
 &usb0 {
 	status = "okay";
+	dr_mode = "host";
 };
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
index 2421ec7..4a86efa 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP zc1751-xm016-dc2
  *
- * (C) Copyright 2015 - 2018, Xilinx, Inc.
+ * (C) Copyright 2015 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
 / {
@@ -84,7 +84,7 @@ &gem2 {
 	status = "okay";
 	phy-handle = <&phy0>;
 	phy-mode = "rgmii-id";
-	phy0: phy@5 {
+	phy0: ethernet-phy@5 {
 		reg = <5>;
 		ti,rx-internal-delay = <0x8>;
 		ti,tx-internal-delay = <0xa>;
@@ -123,7 +123,7 @@ &spi0 {
 	status = "okay";
 	num-cs = <1>;
 
-	spi0_flash0: flash0@0 {
+	spi0_flash0: flash@0 {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		compatible = "sst,sst25wf080", "jedec,spi-nor";
@@ -141,7 +141,7 @@ &spi1 {
 	status = "okay";
 	num-cs = <1>;
 
-	spi1_flash0: flash0@0 {
+	spi1_flash0: flash@0 {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		compatible = "atmel,at45db041e", "atmel,at45", "atmel,dataflash";
@@ -158,6 +158,7 @@ partition@0 {
 /* ULPI SMSC USB3320 */
 &usb1 {
 	status = "okay";
+	dr_mode = "host";
 };
 
 &uart0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
index 7a49dee..4ea6ef5 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP zc1751-xm017-dc3
  *
- * (C) Copyright 2016 - 2018, Xilinx, Inc.
+ * (C) Copyright 2016 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 
 / {
 	model = "ZynqMP zc1751-xm017-dc3 RevA";
@@ -73,7 +73,7 @@ &gem0 {
 	status = "okay";
 	phy-handle = <&phy0>;
 	phy-mode = "rgmii-id";
-	phy0: phy@0 { /* VSC8211 */
+	phy0: ethernet-phy@0 { /* VSC8211 */
 		reg = <0>;
 	};
 };
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
index 54c7b4f..2366cd9 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP zc1751-xm018-dc4
  *
- * (C) Copyright 2015 - 2018, Xilinx, Inc.
+ * (C) Copyright 2015 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 
 / {
 	model = "ZynqMP zc1751-xm018-dc4";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
index b8b5ff1..41934e3 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP zc1751-xm019-dc5
  *
- * (C) Copyright 2015 - 2018, Xilinx, Inc.
+ * (C) Copyright 2015 - 2019, Xilinx, Inc.
  *
  * Siva Durga Prasad <siva.durga.paladugu@xilinx.com>
  * Michal Simek <michal.simek@xilinx.com>
@@ -11,7 +11,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
 / {
@@ -74,7 +74,7 @@ &gem1 {
 	status = "okay";
 	phy-handle = <&phy0>;
 	phy-mode = "rgmii-id";
-	phy0: phy@0 {
+	phy0: ethernet-phy@0 {
 		reg = <0>;
 	};
 };
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index e5699d0..d60110a 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP ZCU100 revC
  *
- * (C) Copyright 2016 - 2018, Xilinx, Inc.
+ * (C) Copyright 2016 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  * Nathalie Chan King Choy
@@ -11,7 +11,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
@@ -103,6 +103,11 @@ sdio_pwrseq: sdio-pwrseq {
 		reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
 		post-power-on-delay-ms = <10>;
 	};
+
+	ina226 {
+		compatible = "iio-hwmon";
+		io-channels = <&u35 0>, <&u35 1>, <&u35 2>, <&u35 3>;
+	};
 };
 
 &dcc {
@@ -191,8 +196,9 @@ i2csw_5: i2c@5 {
 			#size-cells = <0>;
 			reg = <5>;
 			/* PS_PMBUS */
-			ina226@40 { /* u35 */
+			u35: ina226@40 { /* u35 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
 				reg = <0x40>;
 				shunt-resistor = <10000>;
 				/* MIO31 is alert which should be routed to PMUFW */
@@ -226,7 +232,6 @@ &rtc {
 &sdhci0 {
 	status = "okay";
 	no-1-8-v;
-	broken-cd; /* CD has to be enabled by default */
 	disable-wp;
 };
 
@@ -251,11 +256,13 @@ wlcore: wifi@2 {
 &spi0 { /* Low Speed connector */
 	status = "okay";
 	label = "LS-SPI0";
+	num-cs = <1>;
 };
 
 &spi1 { /* High Speed connector */
 	status = "okay";
 	label = "HS-SPI1";
+	num-cs = <1>;
 };
 
 &uart0 {
@@ -274,11 +281,13 @@ &uart1 {
 /* ULPI SMSC USB3320 */
 &usb0 {
 	status = "okay";
+	dr_mode = "peripheral";
 };
 
 /* ULPI SMSC USB3320 */
 &usb1 {
 	status = "okay";
+	dr_mode = "host";
 };
 
 &watchdog0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index 2a3b665..4f80172 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP ZCU102 RevA
  *
- * (C) Copyright 2015 - 2018, Xilinx, Inc.
+ * (C) Copyright 2015 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/gpio/gpio.h>
 
@@ -59,6 +59,79 @@ heartbeat-led {
 			linux,default-trigger = "heartbeat";
 		};
 	};
+
+	ina226-u76 {
+		compatible = "iio-hwmon";
+		io-channels = <&u76 0>, <&u76 1>, <&u76 2>, <&u76 3>;
+	};
+	ina226-u77 {
+		compatible = "iio-hwmon";
+		io-channels = <&u77 0>, <&u77 1>, <&u77 2>, <&u77 3>;
+	};
+	ina226-u78 {
+		compatible = "iio-hwmon";
+		io-channels = <&u78 0>, <&u78 1>, <&u78 2>, <&u78 3>;
+	};
+	ina226-u87 {
+		compatible = "iio-hwmon";
+		io-channels = <&u87 0>, <&u87 1>, <&u87 2>, <&u87 3>;
+	};
+	ina226-u85 {
+		compatible = "iio-hwmon";
+		io-channels = <&u85 0>, <&u85 1>, <&u85 2>, <&u85 3>;
+	};
+	ina226-u86 {
+		compatible = "iio-hwmon";
+		io-channels = <&u86 0>, <&u86 1>, <&u86 2>, <&u86 3>;
+	};
+	ina226-u93 {
+		compatible = "iio-hwmon";
+		io-channels = <&u93 0>, <&u93 1>, <&u93 2>, <&u93 3>;
+	};
+	ina226-u88 {
+		compatible = "iio-hwmon";
+		io-channels = <&u88 0>, <&u88 1>, <&u88 2>, <&u88 3>;
+	};
+	ina226-u15 {
+		compatible = "iio-hwmon";
+		io-channels = <&u15 0>, <&u15 1>, <&u15 2>, <&u15 3>;
+	};
+	ina226-u92 {
+		compatible = "iio-hwmon";
+		io-channels = <&u92 0>, <&u92 1>, <&u92 2>, <&u92 3>;
+	};
+	ina226-u79 {
+		compatible = "iio-hwmon";
+		io-channels = <&u79 0>, <&u79 1>, <&u79 2>, <&u79 3>;
+	};
+	ina226-u81 {
+		compatible = "iio-hwmon";
+		io-channels = <&u81 0>, <&u81 1>, <&u81 2>, <&u81 3>;
+	};
+	ina226-u80 {
+		compatible = "iio-hwmon";
+		io-channels = <&u80 0>, <&u80 1>, <&u80 2>, <&u80 3>;
+	};
+	ina226-u84 {
+		compatible = "iio-hwmon";
+		io-channels = <&u84 0>, <&u84 1>, <&u84 2>, <&u84 3>;
+	};
+	ina226-u16 {
+		compatible = "iio-hwmon";
+		io-channels = <&u16 0>, <&u16 1>, <&u16 2>, <&u16 3>;
+	};
+	ina226-u65 {
+		compatible = "iio-hwmon";
+		io-channels = <&u65 0>, <&u65 1>, <&u65 2>, <&u65 3>;
+	};
+	ina226-u74 {
+		compatible = "iio-hwmon";
+		io-channels = <&u74 0>, <&u74 1>, <&u74 2>, <&u74 3>;
+	};
+	ina226-u75 {
+		compatible = "iio-hwmon";
+		io-channels = <&u75 0>, <&u75 1>, <&u75 2>, <&u75 3>;
+	};
 };
 
 &can1 {
@@ -105,7 +178,7 @@ &gem3 {
 	status = "okay";
 	phy-handle = <&phy0>;
 	phy-mode = "rgmii-id";
-	phy0: phy@21 {
+	phy0: ethernet-phy@21 {
 		reg = <21>;
 		ti,rx-internal-delay = <0x8>;
 		ti,tx-internal-delay = <0xa>;
@@ -125,21 +198,11 @@ &i2c0 {
 	tca6416_u97: gpio@20 {
 		compatible = "ti,tca6416";
 		reg = <0x20>;
-		gpio-controller;
+		gpio-controller; /* IRQ not connected */
 		#gpio-cells = <2>;
-		/*
-		 * IRQ not connected
-		 * Lines:
-		 * 0 - PS_GTR_LAN_SEL0
-		 * 1 - PS_GTR_LAN_SEL1
-		 * 2 - PS_GTR_LAN_SEL2
-		 * 3 - PS_GTR_LAN_SEL3
-		 * 4 - PCI_CLK_DIR_SEL
-		 * 5 - IIC_MUX_RESET_B
-		 * 6 - GEM3_EXP_RESET_B
-		 * 7, 10 - 17 - not connected
-		 */
-
+		gpio-line-names = "PS_GTR_LAN_SEL0", "PS_GTR_LAN_SEL1", "PS_GTR_LAN_SEL2", "PS_GTR_LAN_SEL3",
+				"PCI_CLK_DIR_SEL", "IIC_MUX_RESET_B", "GEM3_EXP_RESET_B",
+				"", "", "", "", "", "", "", "", "";
 		gtr-sel0 {
 			gpio-hog;
 			gpios = <0 0>;
@@ -169,27 +232,12 @@ gtr-sel3 {
 	tca6416_u61: gpio@21 {
 		compatible = "ti,tca6416";
 		reg = <0x21>;
-		gpio-controller;
+		gpio-controller; /* IRQ not connected */
 		#gpio-cells = <2>;
-		/*
-		 * IRQ not connected
-		 * Lines:
-		 * 0 - VCCPSPLL_EN
-		 * 1 - MGTRAVCC_EN
-		 * 2 - MGTRAVTT_EN
-		 * 3 - VCCPSDDRPLL_EN
-		 * 4 - MIO26_PMU_INPUT_LS
-		 * 5 - PL_PMBUS_ALERT
-		 * 6 - PS_PMBUS_ALERT
-		 * 7 - MAXIM_PMBUS_ALERT
-		 * 10 - PL_DDR4_VTERM_EN
-		 * 11 - PL_DDR4_VPP_2V5_EN
-		 * 12 - PS_DIMM_VDDQ_TO_PSVCCO_ON
-		 * 13 - PS_DIMM_SUSPEND_EN
-		 * 14 - PS_DDR4_VTERM_EN
-		 * 15 - PS_DDR4_VPP_2V5_EN
-		 * 16 - 17 - not connected
-		 */
+		gpio-line-names = "VCCPSPLL_EN", "MGTRAVCC_EN", "MGTRAVTT_EN", "VCCPSDDRPLL_EN", "MIO26_PMU_INPUT_LS",
+				"PL_PMBUS_ALERT", "PS_PMBUS_ALERT", "MAXIM_PMBUS_ALERT", "PL_DDR4_VTERM_EN",
+				"PL_DDR4_VPP_2V5_EN", "PS_DIMM_VDDQ_TO_PSVCCO_ON", "PS_DIMM_SUSPEND_EN",
+				"PS_DDR4_VTERM_EN", "PS_DDR4_VPP_2V5_EN", "", "";
 	};
 
 	i2c-mux@75 { /* u60 */
@@ -202,53 +250,73 @@ i2c@0 {
 			#size-cells = <0>;
 			reg = <0>;
 			/* PS_PMBUS */
-			ina226@40 { /* u76 */
+			u76: ina226@40 { /* u76 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u76";
 				reg = <0x40>;
 				shunt-resistor = <5000>;
 			};
-			ina226@41 { /* u77 */
+			u77: ina226@41 { /* u77 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u77";
 				reg = <0x41>;
 				shunt-resistor = <5000>;
 			};
-			ina226@42 { /* u78 */
+			u78: ina226@42 { /* u78 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u78";
 				reg = <0x42>;
 				shunt-resistor = <5000>;
 			};
-			ina226@43 { /* u87 */
+			u87: ina226@43 { /* u87 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u87";
 				reg = <0x43>;
 				shunt-resistor = <5000>;
 			};
-			ina226@44 { /* u85 */
+			u85: ina226@44 { /* u85 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u85";
 				reg = <0x44>;
 				shunt-resistor = <5000>;
 			};
-			ina226@45 { /* u86 */
+			u86: ina226@45 { /* u86 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u86";
 				reg = <0x45>;
 				shunt-resistor = <5000>;
 			};
-			ina226@46 { /* u93 */
+			u93: ina226@46 { /* u93 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u93";
 				reg = <0x46>;
 				shunt-resistor = <5000>;
 			};
-			ina226@47 { /* u88 */
+			u88: ina226@47 { /* u88 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u88";
 				reg = <0x47>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4a { /* u15 */
+			u15: ina226@4a { /* u15 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u15";
 				reg = <0x4a>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4b { /* u92 */
+			u92: ina226@4b { /* u92 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u92";
 				reg = <0x4b>;
 				shunt-resistor = <5000>;
 			};
@@ -258,43 +326,59 @@ i2c@1 {
 			#size-cells = <0>;
 			reg = <1>;
 			/* PL_PMBUS */
-			ina226@40 { /* u79 */
+			u79: ina226@40 { /* u79 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u79";
 				reg = <0x40>;
 				shunt-resistor = <2000>;
 			};
-			ina226@41 { /* u81 */
+			u81: ina226@41 { /* u81 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u81";
 				reg = <0x41>;
 				shunt-resistor = <5000>;
 			};
-			ina226@42 { /* u80 */
+			u80: ina226@42 { /* u80 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u80";
 				reg = <0x42>;
 				shunt-resistor = <5000>;
 			};
-			ina226@43 { /* u84 */
+			u84: ina226@43 { /* u84 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u84";
 				reg = <0x43>;
 				shunt-resistor = <5000>;
 			};
-			ina226@44 { /* u16 */
+			u16: ina226@44 { /* u16 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u16";
 				reg = <0x44>;
 				shunt-resistor = <5000>;
 			};
-			ina226@45 { /* u65 */
+			u65: ina226@45 { /* u65 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u65";
 				reg = <0x45>;
 				shunt-resistor = <5000>;
 			};
-			ina226@46 { /* u74 */
+			u74: ina226@46 { /* u74 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u74";
 				reg = <0x46>;
 				shunt-resistor = <5000>;
 			};
-			ina226@47 { /* u75 */
+			u75: ina226@47 { /* u75 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u75";
 				reg = <0x47>;
 				shunt-resistor = <5000>;
 			};
@@ -414,6 +498,7 @@ si570_1: clock-generator@5d { /* USER SI570 - u42 */
 				temperature-stability = <50>;
 				factory-fout = <300000000>;
 				clock-frequency = <300000000>;
+				clock-output-names = "si570_user";
 			};
 		};
 		i2c@3 {
@@ -427,6 +512,7 @@ si570_2: clock-generator@5d { /* USER MGT SI570 - u56 */
 				temperature-stability = <50>; /* copy from zc702 */
 				factory-fout = <156250000>;
 				clock-frequency = <148500000>;
+				clock-output-names = "si570_mgt";
 			};
 		};
 		i2c@4 {
@@ -540,6 +626,7 @@ &uart1 {
 /* ULPI SMSC USB3320 */
 &usb0 {
 	status = "okay";
+	dr_mode = "host";
 };
 
 &watchdog0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
index 1780ed2..d9ad8a4 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
@@ -16,7 +16,7 @@ / {
 
 &gem3 {
 	phy-handle = <&phyc>;
-	phyc: phy@c {
+	phyc: ethernet-phy@c {
 		reg = <0xc>;
 		ti,rx-internal-delay = <0x8>;
 		ti,tx-internal-delay = <0xa>;
@@ -24,7 +24,7 @@ phyc: phy@c {
 		ti,dp83867-rxctrl-strap-quirk;
 	};
 	/* Cleanup from RevA */
-	/delete-node/ phy@21;
+	/delete-node/ ethernet-phy@21;
 };
 
 /* Fix collision with u61 */
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
index 8f45614..7a4614e 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP ZCU104
  *
- * (C) Copyright 2017 - 2018, Xilinx, Inc.
+ * (C) Copyright 2017 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
 / {
@@ -50,7 +50,7 @@ &gem3 {
 	status = "okay";
 	phy-handle = <&phy0>;
 	phy-mode = "rgmii-id";
-	phy0: phy@c {
+	phy0: ethernet-phy@c {
 		reg = <0xc>;
 		ti,rx-internal-delay = <0x8>;
 		ti,tx-internal-delay = <0xa>;
@@ -118,9 +118,9 @@ i2c@4 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <4>;
-			tca6416_u97: gpio@21 {
+			tca6416_u97: gpio@20 {
 				compatible = "ti,tca6416";
-				reg = <0x21>;
+				reg = <0x20>;
 				gpio-controller;
 				#gpio-cells = <2>;
 				/*
@@ -189,6 +189,7 @@ &uart1 {
 /* ULPI SMSC USB3320 */
 &usb0 {
 	status = "okay";
+	dr_mode = "host";
 };
 
 &watchdog0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
index 93ce7eb..6e9efe2 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP ZCU106
  *
- * (C) Copyright 2016, Xilinx, Inc.
+ * (C) Copyright 2016 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/gpio/gpio.h>
 
@@ -59,6 +59,79 @@ heartbeat-led {
 			linux,default-trigger = "heartbeat";
 		};
 	};
+
+	ina226-u76 {
+		compatible = "iio-hwmon";
+		io-channels = <&u76 0>, <&u76 1>, <&u76 2>, <&u76 3>;
+	};
+	ina226-u77 {
+		compatible = "iio-hwmon";
+		io-channels = <&u77 0>, <&u77 1>, <&u77 2>, <&u77 3>;
+	};
+	ina226-u78 {
+		compatible = "iio-hwmon";
+		io-channels = <&u78 0>, <&u78 1>, <&u78 2>, <&u78 3>;
+	};
+	ina226-u87 {
+		compatible = "iio-hwmon";
+		io-channels = <&u87 0>, <&u87 1>, <&u87 2>, <&u87 3>;
+	};
+	ina226-u85 {
+		compatible = "iio-hwmon";
+		io-channels = <&u85 0>, <&u85 1>, <&u85 2>, <&u85 3>;
+	};
+	ina226-u86 {
+		compatible = "iio-hwmon";
+		io-channels = <&u86 0>, <&u86 1>, <&u86 2>, <&u86 3>;
+	};
+	ina226-u93 {
+		compatible = "iio-hwmon";
+		io-channels = <&u93 0>, <&u93 1>, <&u93 2>, <&u93 3>;
+	};
+	ina226-u88 {
+		compatible = "iio-hwmon";
+		io-channels = <&u88 0>, <&u88 1>, <&u88 2>, <&u88 3>;
+	};
+	ina226-u15 {
+		compatible = "iio-hwmon";
+		io-channels = <&u15 0>, <&u15 1>, <&u15 2>, <&u15 3>;
+	};
+	ina226-u92 {
+		compatible = "iio-hwmon";
+		io-channels = <&u92 0>, <&u92 1>, <&u92 2>, <&u92 3>;
+	};
+	ina226-u79 {
+		compatible = "iio-hwmon";
+		io-channels = <&u79 0>, <&u79 1>, <&u79 2>, <&u79 3>;
+	};
+	ina226-u81 {
+		compatible = "iio-hwmon";
+		io-channels = <&u81 0>, <&u81 1>, <&u81 2>, <&u81 3>;
+	};
+	ina226-u80 {
+		compatible = "iio-hwmon";
+		io-channels = <&u80 0>, <&u80 1>, <&u80 2>, <&u80 3>;
+	};
+	ina226-u84 {
+		compatible = "iio-hwmon";
+		io-channels = <&u84 0>, <&u84 1>, <&u84 2>, <&u84 3>;
+	};
+	ina226-u16 {
+		compatible = "iio-hwmon";
+		io-channels = <&u16 0>, <&u16 1>, <&u16 2>, <&u16 3>;
+	};
+	ina226-u65 {
+		compatible = "iio-hwmon";
+		io-channels = <&u65 0>, <&u65 1>, <&u65 2>, <&u65 3>;
+	};
+	ina226-u74 {
+		compatible = "iio-hwmon";
+		io-channels = <&u74 0>, <&u74 1>, <&u74 2>, <&u74 3>;
+	};
+	ina226-u75 {
+		compatible = "iio-hwmon";
+		io-channels = <&u75 0>, <&u75 1>, <&u75 2>, <&u75 3>;
+	};
 };
 
 &can1 {
@@ -106,7 +179,7 @@ &gem3 {
 	status = "okay";
 	phy-handle = <&phy0>;
 	phy-mode = "rgmii-id";
-	phy0: phy@c {
+	phy0: ethernet-phy@c {
 		reg = <0xc>;
 		ti,rx-internal-delay = <0x8>;
 		ti,tx-internal-delay = <0xa>;
@@ -177,53 +250,73 @@ i2c@0 {
 			#size-cells = <0>;
 			reg = <0>;
 			/* PS_PMBUS */
-			ina226@40 { /* u76 */
+			u76: ina226@40 { /* u76 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u76";
 				reg = <0x40>;
 				shunt-resistor = <5000>;
 			};
-			ina226@41 { /* u77 */
+			u77: ina226@41 { /* u77 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u77";
 				reg = <0x41>;
 				shunt-resistor = <5000>;
 			};
-			ina226@42 { /* u78 */
+			u78: ina226@42 { /* u78 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u78";
 				reg = <0x42>;
 				shunt-resistor = <5000>;
 			};
-			ina226@43 { /* u87 */
+			u87: ina226@43 { /* u87 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u87";
 				reg = <0x43>;
 				shunt-resistor = <5000>;
 			};
-			ina226@44 { /* u85 */
+			u85: ina226@44 { /* u85 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u85";
 				reg = <0x44>;
 				shunt-resistor = <5000>;
 			};
-			ina226@45 { /* u86 */
+			u86: ina226@45 { /* u86 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u86";
 				reg = <0x45>;
 				shunt-resistor = <5000>;
 			};
-			ina226@46 { /* u93 */
+			u93: ina226@46 { /* u93 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u93";
 				reg = <0x46>;
 				shunt-resistor = <5000>;
 			};
-			ina226@47 { /* u88 */
+			u88: ina226@47 { /* u88 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u88";
 				reg = <0x47>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4a { /* u15 */
+			u15: ina226@4a { /* u15 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u15";
 				reg = <0x4a>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4b { /* u92 */
+			u92: ina226@4b { /* u92 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u92";
 				reg = <0x4b>;
 				shunt-resistor = <5000>;
 			};
@@ -233,43 +326,59 @@ i2c@1 {
 			#size-cells = <0>;
 			reg = <1>;
 			/* PL_PMBUS */
-			ina226@40 { /* u79 */
+			u79: ina226@40 { /* u79 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u79";
 				reg = <0x40>;
 				shunt-resistor = <2000>;
 			};
-			ina226@41 { /* u81 */
+			u81: ina226@41 { /* u81 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u81";
 				reg = <0x41>;
 				shunt-resistor = <5000>;
 			};
-			ina226@42 { /* u80 */
+			u80: ina226@42 { /* u80 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u80";
 				reg = <0x42>;
 				shunt-resistor = <5000>;
 			};
-			ina226@43 { /* u84 */
+			u84: ina226@43 { /* u84 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u84";
 				reg = <0x43>;
 				shunt-resistor = <5000>;
 			};
-			ina226@44 { /* u16 */
+			u16: ina226@44 { /* u16 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u16";
 				reg = <0x44>;
 				shunt-resistor = <5000>;
 			};
-			ina226@45 { /* u65 */
+			u65: ina226@45 { /* u65 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u65";
 				reg = <0x45>;
 				shunt-resistor = <5000>;
 			};
-			ina226@46 { /* u74 */
+			u74: ina226@46 { /* u74 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u74";
 				reg = <0x46>;
 				shunt-resistor = <5000>;
 			};
-			ina226@47 { /* u75 */
+			u75: ina226@47 { /* u75 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u75";
 				reg = <0x47>;
 				shunt-resistor = <5000>;
 			};
@@ -388,6 +497,7 @@ si570_1: clock-generator@5d { /* USER SI570 - u42 */
 				temperature-stability = <50>;
 				factory-fout = <300000000>;
 				clock-frequency = <300000000>;
+				clock-output-names = "si570_user";
 			};
 		};
 		i2c@3 {
@@ -401,6 +511,7 @@ si570_2: clock-generator@5d { /* USER MGT SI570 - u56 */
 				temperature-stability = <50>; /* copy from zc702 */
 				factory-fout = <156250000>;
 				clock-frequency = <148500000>;
+				clock-output-names = "si570_mgt";
 			};
 		};
 		i2c@4 {
@@ -514,6 +625,7 @@ &uart1 {
 /* ULPI SMSC USB3320 */
 &usb0 {
 	status = "okay";
+	dr_mode = "host";
 };
 
 &watchdog0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
index 8bb0001..2e92634 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP ZCU111
  *
- * (C) Copyright 2017 - 2018, Xilinx, Inc.
+ * (C) Copyright 2017 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  */
@@ -10,7 +10,7 @@
 /dts-v1/;
 
 #include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/gpio/gpio.h>
 
@@ -59,6 +59,63 @@ heartbeat-led {
 			linux,default-trigger = "heartbeat";
 		};
 	};
+
+	ina226-u67 {
+		compatible = "iio-hwmon";
+		io-channels = <&u67 0>, <&u67 1>, <&u67 2>, <&u67 3>;
+	};
+	ina226-u59 {
+		compatible = "iio-hwmon";
+		io-channels = <&u59 0>, <&u59 1>, <&u59 2>, <&u59 3>;
+	};
+	ina226-u61 {
+		compatible = "iio-hwmon";
+		io-channels = <&u61 0>, <&u61 1>, <&u61 2>, <&u61 3>;
+	};
+	ina226-u60 {
+		compatible = "iio-hwmon";
+		io-channels = <&u60 0>, <&u60 1>, <&u60 2>, <&u60 3>;
+	};
+	ina226-u64 {
+		compatible = "iio-hwmon";
+		io-channels = <&u64 0>, <&u64 1>, <&u64 2>, <&u64 3>;
+	};
+	ina226-u69 {
+		compatible = "iio-hwmon";
+		io-channels = <&u69 0>, <&u69 1>, <&u69 2>, <&u69 3>;
+	};
+	ina226-u66 {
+		compatible = "iio-hwmon";
+		io-channels = <&u66 0>, <&u66 1>, <&u66 2>, <&u66 3>;
+	};
+	ina226-u65 {
+		compatible = "iio-hwmon";
+		io-channels = <&u65 0>, <&u65 1>, <&u65 2>, <&u65 3>;
+	};
+	ina226-u63 {
+		compatible = "iio-hwmon";
+		io-channels = <&u63 0>, <&u63 1>, <&u63 2>, <&u63 3>;
+	};
+	ina226-u3 {
+		compatible = "iio-hwmon";
+		io-channels = <&u3 0>, <&u3 1>, <&u3 2>, <&u3 3>;
+	};
+	ina226-u71 {
+		compatible = "iio-hwmon";
+		io-channels = <&u71 0>, <&u71 1>, <&u71 2>, <&u71 3>;
+	};
+	ina226-u77 {
+		compatible = "iio-hwmon";
+		io-channels = <&u77 0>, <&u77 1>, <&u77 2>, <&u77 3>;
+	};
+	ina226-u73 {
+		compatible = "iio-hwmon";
+		io-channels = <&u73 0>, <&u73 1>, <&u73 2>, <&u73 3>;
+	};
+	ina226-u79 {
+		compatible = "iio-hwmon";
+		io-channels = <&u79 0>, <&u79 1>, <&u79 2>, <&u79 3>;
+	};
 };
 
 &dcc {
@@ -101,7 +158,7 @@ &gem3 {
 	status = "okay";
 	phy-handle = <&phy0>;
 	phy-mode = "rgmii-id";
-	phy0: phy@c {
+	phy0: ethernet-phy@c {
 		reg = <0xc>;
 		ti,rx-internal-delay = <0x8>;
 		ti,tx-internal-delay = <0xa>;
@@ -152,73 +209,101 @@ i2c@0 {
 			reg = <0>;
 			/* PS_PMBUS */
 			/* PMBUS_ALERT done via pca9544 */
-			ina226@40 { /* u67 */
+			u67: ina226@40 { /* u67 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u67";
 				reg = <0x40>;
 				shunt-resistor = <2000>;
 			};
-			ina226@41 { /* u59 */
+			u59: ina226@41 { /* u59 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u59";
 				reg = <0x41>;
 				shunt-resistor = <5000>;
 			};
-			ina226@42 { /* u61 */
+			u61: ina226@42 { /* u61 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u61";
 				reg = <0x42>;
 				shunt-resistor = <5000>;
 			};
-			ina226@43 { /* u60 */
+			u60: ina226@43 { /* u60 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u60";
 				reg = <0x43>;
 				shunt-resistor = <5000>;
 			};
-			ina226@45 { /* u64 */
+			u64: ina226@45 { /* u64 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u64";
 				reg = <0x45>;
 				shunt-resistor = <5000>;
 			};
-			ina226@46 { /* u69 */
+			u69: ina226@46 { /* u69 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u69";
 				reg = <0x46>;
 				shunt-resistor = <2000>;
 			};
-			ina226@47 { /* u66 */
+			u66: ina226@47 { /* u66 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u66";
 				reg = <0x47>;
 				shunt-resistor = <5000>;
 			};
-			ina226@48 { /* u65 */
+			u65: ina226@48 { /* u65 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u65";
 				reg = <0x48>;
 				shunt-resistor = <5000>;
 			};
-			ina226@49 { /* u63 */
+			u63: ina226@49 { /* u63 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u63";
 				reg = <0x49>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4a { /* u3 */
+			u3: ina226@4a { /* u3 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u3";
 				reg = <0x4a>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4b { /* u71 */
+			u71: ina226@4b { /* u71 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u71";
 				reg = <0x4b>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4c { /* u77 */
+			u77: ina226@4c { /* u77 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u77";
 				reg = <0x4c>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4d { /* u73 */
+			u73: ina226@4d { /* u73 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u73";
 				reg = <0x4d>;
 				shunt-resistor = <5000>;
 			};
-			ina226@4e { /* u79 */
+			u79: ina226@4e { /* u79 */
 				compatible = "ti,ina226";
+				#io-channel-cells = <1>;
+				label = "ina226-u79";
 				reg = <0x4e>;
 				shunt-resistor = <5000>;
 			};
@@ -304,6 +389,7 @@ si570_1: clock-generator@5d { /* USER SI570 - u47 */
 				temperature-stability = <50>;
 				factory-fout = <300000000>;
 				clock-frequency = <300000000>;
+				clock-output-names = "si570_user";
 			};
 		};
 		i2c@3 {
@@ -316,7 +402,8 @@ si570_2: clock-generator@5d { /* USER MGT SI570 - u49 */
 				reg = <0x5d>;
 				temperature-stability = <50>;
 				factory-fout = <156250000>;
-				clock-frequency = <148500000>;
+				clock-frequency = <156250000>;
+				clock-output-names = "si570_mgt";
 			};
 		};
 		i2c@4 {
@@ -440,4 +527,5 @@ &uart0 {
 /* ULPI SMSC USB3320 */
 &usb0 {
 	status = "okay";
+	dr_mode = "host";
 };
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 3c731e7..26d926e 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -2,7 +2,7 @@
 /*
  * dts file for Xilinx ZynqMP
  *
- * (C) Copyright 2014 - 2015, Xilinx, Inc.
+ * (C) Copyright 2014 - 2019, Xilinx, Inc.
  *
  * Michal Simek <michal.simek@xilinx.com>
  *
@@ -12,6 +12,8 @@
  * the License, or (at your option) any later version.
  */
 
+#include <dt-bindings/power/xlnx-zynqmp-power.h>
+
 / {
 	compatible = "xlnx,zynqmp";
 	#address-cells = <2>;
@@ -118,8 +120,31 @@ psci {
 	firmware {
 		zynqmp_firmware: zynqmp-firmware {
 			compatible = "xlnx,zynqmp-firmware";
+			#power-domain-cells = <1>;
 			method = "smc";
 
+			zynqmp_power: zynqmp-power {
+				compatible = "xlnx,zynqmp-power";
+				interrupt-parent = <&gic>;
+				interrupts = <0 35 4>;
+			};
+
+			zynqmp_clk: clock-controller {
+				u-boot,dm-pre-reloc;
+				#clock-cells = <1>;
+				compatible = "xlnx,zynqmp-clk";
+				clocks = <&pss_ref_clk>,
+					 <&video_clk>,
+					 <&pss_alt_ref_clk>,
+					 <&aux_ref_clk>,
+					 <&gt_crx_ref_clk>;
+				clock-names = "pss_ref_clk",
+					      "video_clk",
+					      "pss_alt_ref_clk",
+					      "aux_ref_clk",
+					      "gt_crx_ref_clk";
+			};
+
 			nvmem_firmware {
 				compatible = "xlnx,zynqmp-nvmem-fw";
 				#address-cells = <1>;
@@ -187,6 +212,7 @@ can0: can@ff060000 {
 			interrupt-parent = <&gic>;
 			tx-fifo-depth = <0x40>;
 			rx-fifo-depth = <0x40>;
+			power-domains = <&zynqmp_firmware PD_CAN_0>;
 		};
 
 		can1: can@ff070000 {
@@ -198,6 +224,7 @@ can1: can@ff070000 {
 			interrupt-parent = <&gic>;
 			tx-fifo-depth = <0x40>;
 			rx-fifo-depth = <0x40>;
+			power-domains = <&zynqmp_firmware PD_CAN_1>;
 		};
 
 		cci: cci@fd6e0000 {
@@ -228,6 +255,7 @@ fpd_dma_chan1: dma@fd500000 {
 			interrupts = <0 124 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <128>;
+			power-domains = <&zynqmp_firmware PD_GDMA>;
 		};
 
 		fpd_dma_chan2: dma@fd510000 {
@@ -238,6 +266,7 @@ fpd_dma_chan2: dma@fd510000 {
 			interrupts = <0 125 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <128>;
+			power-domains = <&zynqmp_firmware PD_GDMA>;
 		};
 
 		fpd_dma_chan3: dma@fd520000 {
@@ -248,6 +277,7 @@ fpd_dma_chan3: dma@fd520000 {
 			interrupts = <0 126 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <128>;
+			power-domains = <&zynqmp_firmware PD_GDMA>;
 		};
 
 		fpd_dma_chan4: dma@fd530000 {
@@ -258,6 +288,7 @@ fpd_dma_chan4: dma@fd530000 {
 			interrupts = <0 127 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <128>;
+			power-domains = <&zynqmp_firmware PD_GDMA>;
 		};
 
 		fpd_dma_chan5: dma@fd540000 {
@@ -268,6 +299,7 @@ fpd_dma_chan5: dma@fd540000 {
 			interrupts = <0 128 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <128>;
+			power-domains = <&zynqmp_firmware PD_GDMA>;
 		};
 
 		fpd_dma_chan6: dma@fd550000 {
@@ -278,6 +310,7 @@ fpd_dma_chan6: dma@fd550000 {
 			interrupts = <0 129 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <128>;
+			power-domains = <&zynqmp_firmware PD_GDMA>;
 		};
 
 		fpd_dma_chan7: dma@fd560000 {
@@ -288,6 +321,7 @@ fpd_dma_chan7: dma@fd560000 {
 			interrupts = <0 130 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <128>;
+			power-domains = <&zynqmp_firmware PD_GDMA>;
 		};
 
 		fpd_dma_chan8: dma@fd570000 {
@@ -298,6 +332,7 @@ fpd_dma_chan8: dma@fd570000 {
 			interrupts = <0 131 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <128>;
+			power-domains = <&zynqmp_firmware PD_GDMA>;
 		};
 
 		/* LPDDMA default allows only secured access. inorder to enable
@@ -312,6 +347,7 @@ lpd_dma_chan1: dma@ffa80000 {
 			interrupts = <0 77 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <64>;
+			power-domains = <&zynqmp_firmware PD_ADMA>;
 		};
 
 		lpd_dma_chan2: dma@ffa90000 {
@@ -322,6 +358,7 @@ lpd_dma_chan2: dma@ffa90000 {
 			interrupts = <0 78 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <64>;
+			power-domains = <&zynqmp_firmware PD_ADMA>;
 		};
 
 		lpd_dma_chan3: dma@ffaa0000 {
@@ -332,6 +369,7 @@ lpd_dma_chan3: dma@ffaa0000 {
 			interrupts = <0 79 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <64>;
+			power-domains = <&zynqmp_firmware PD_ADMA>;
 		};
 
 		lpd_dma_chan4: dma@ffab0000 {
@@ -342,6 +380,7 @@ lpd_dma_chan4: dma@ffab0000 {
 			interrupts = <0 80 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <64>;
+			power-domains = <&zynqmp_firmware PD_ADMA>;
 		};
 
 		lpd_dma_chan5: dma@ffac0000 {
@@ -352,6 +391,7 @@ lpd_dma_chan5: dma@ffac0000 {
 			interrupts = <0 81 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <64>;
+			power-domains = <&zynqmp_firmware PD_ADMA>;
 		};
 
 		lpd_dma_chan6: dma@ffad0000 {
@@ -362,6 +402,7 @@ lpd_dma_chan6: dma@ffad0000 {
 			interrupts = <0 82 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <64>;
+			power-domains = <&zynqmp_firmware PD_ADMA>;
 		};
 
 		lpd_dma_chan7: dma@ffae0000 {
@@ -372,6 +413,7 @@ lpd_dma_chan7: dma@ffae0000 {
 			interrupts = <0 83 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <64>;
+			power-domains = <&zynqmp_firmware PD_ADMA>;
 		};
 
 		lpd_dma_chan8: dma@ffaf0000 {
@@ -382,6 +424,7 @@ lpd_dma_chan8: dma@ffaf0000 {
 			interrupts = <0 84 4>;
 			clock-names = "clk_main", "clk_apb";
 			xlnx,bus-width = <64>;
+			power-domains = <&zynqmp_firmware PD_ADMA>;
 		};
 
 		mc: memory-controller@fd070000 {
@@ -400,6 +443,7 @@ gem0: ethernet@ff0b0000 {
 			clock-names = "pclk", "hclk", "tx_clk";
 			#address-cells = <1>;
 			#size-cells = <0>;
+			power-domains = <&zynqmp_firmware PD_ETH_0>;
 		};
 
 		gem1: ethernet@ff0c0000 {
@@ -411,6 +455,7 @@ gem1: ethernet@ff0c0000 {
 			clock-names = "pclk", "hclk", "tx_clk";
 			#address-cells = <1>;
 			#size-cells = <0>;
+			power-domains = <&zynqmp_firmware PD_ETH_1>;
 		};
 
 		gem2: ethernet@ff0d0000 {
@@ -422,6 +467,7 @@ gem2: ethernet@ff0d0000 {
 			clock-names = "pclk", "hclk", "tx_clk";
 			#address-cells = <1>;
 			#size-cells = <0>;
+			power-domains = <&zynqmp_firmware PD_ETH_2>;
 		};
 
 		gem3: ethernet@ff0e0000 {
@@ -433,6 +479,7 @@ gem3: ethernet@ff0e0000 {
 			clock-names = "pclk", "hclk", "tx_clk";
 			#address-cells = <1>;
 			#size-cells = <0>;
+			power-domains = <&zynqmp_firmware PD_ETH_3>;
 		};
 
 		gpio: gpio@ff0a0000 {
@@ -445,6 +492,7 @@ gpio: gpio@ff0a0000 {
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			reg = <0x0 0xff0a0000 0x0 0x1000>;
+			power-domains = <&zynqmp_firmware PD_GPIO>;
 		};
 
 		i2c0: i2c@ff020000 {
@@ -455,6 +503,7 @@ i2c0: i2c@ff020000 {
 			reg = <0x0 0xff020000 0x0 0x1000>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			power-domains = <&zynqmp_firmware PD_I2C_0>;
 		};
 
 		i2c1: i2c@ff030000 {
@@ -465,6 +514,7 @@ i2c1: i2c@ff030000 {
 			reg = <0x0 0xff030000 0x0 0x1000>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			power-domains = <&zynqmp_firmware PD_I2C_1>;
 		};
 
 		pcie: pcie@fd0e0000 {
@@ -496,6 +546,7 @@ pcie: pcie@fd0e0000 {
 					<0x0 0x0 0x0 0x2 &pcie_intc 0x2>,
 					<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
 					<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
+			power-domains = <&zynqmp_firmware PD_PCIE>;
 			pcie_intc: legacy-interrupt-controller {
 				interrupt-controller;
 				#address-cells = <0>;
@@ -519,24 +570,31 @@ sata: ahci@fd0c0000 {
 			reg = <0x0 0xfd0c0000 0x0 0x2000>;
 			interrupt-parent = <&gic>;
 			interrupts = <0 133 4>;
+			power-domains = <&zynqmp_firmware PD_SATA>;
 		};
 
 		sdhci0: mmc@ff160000 {
-			compatible = "arasan,sdhci-8.9a";
+			compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
 			status = "disabled";
 			interrupt-parent = <&gic>;
 			interrupts = <0 48 4>;
 			reg = <0x0 0xff160000 0x0 0x1000>;
 			clock-names = "clk_xin", "clk_ahb";
+			#clock-cells = <1>;
+			clock-output-names = "clk_out_sd0", "clk_in_sd0";
+			power-domains = <&zynqmp_firmware PD_SD_0>;
 		};
 
 		sdhci1: mmc@ff170000 {
-			compatible = "arasan,sdhci-8.9a";
+			compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
 			status = "disabled";
 			interrupt-parent = <&gic>;
 			interrupts = <0 49 4>;
 			reg = <0x0 0xff170000 0x0 0x1000>;
 			clock-names = "clk_xin", "clk_ahb";
+			#clock-cells = <1>;
+			clock-output-names = "clk_out_sd1", "clk_in_sd1";
+			power-domains = <&zynqmp_firmware PD_SD_1>;
 		};
 
 		smmu: smmu@fd800000 {
@@ -561,6 +619,7 @@ spi0: spi@ff040000 {
 			clock-names = "ref_clk", "pclk";
 			#address-cells = <1>;
 			#size-cells = <0>;
+			power-domains = <&zynqmp_firmware PD_SPI_0>;
 		};
 
 		spi1: spi@ff050000 {
@@ -572,6 +631,7 @@ spi1: spi@ff050000 {
 			clock-names = "ref_clk", "pclk";
 			#address-cells = <1>;
 			#size-cells = <0>;
+			power-domains = <&zynqmp_firmware PD_SPI_1>;
 		};
 
 		ttc0: timer@ff110000 {
@@ -581,6 +641,7 @@ ttc0: timer@ff110000 {
 			interrupts = <0 36 4>, <0 37 4>, <0 38 4>;
 			reg = <0x0 0xff110000 0x0 0x1000>;
 			timer-width = <32>;
+			power-domains = <&zynqmp_firmware PD_TTC_0>;
 		};
 
 		ttc1: timer@ff120000 {
@@ -590,6 +651,7 @@ ttc1: timer@ff120000 {
 			interrupts = <0 39 4>, <0 40 4>, <0 41 4>;
 			reg = <0x0 0xff120000 0x0 0x1000>;
 			timer-width = <32>;
+			power-domains = <&zynqmp_firmware PD_TTC_1>;
 		};
 
 		ttc2: timer@ff130000 {
@@ -599,6 +661,7 @@ ttc2: timer@ff130000 {
 			interrupts = <0 42 4>, <0 43 4>, <0 44 4>;
 			reg = <0x0 0xff130000 0x0 0x1000>;
 			timer-width = <32>;
+			power-domains = <&zynqmp_firmware PD_TTC_2>;
 		};
 
 		ttc3: timer@ff140000 {
@@ -608,6 +671,7 @@ ttc3: timer@ff140000 {
 			interrupts = <0 45 4>, <0 46 4>, <0 47 4>;
 			reg = <0x0 0xff140000 0x0 0x1000>;
 			timer-width = <32>;
+			power-domains = <&zynqmp_firmware PD_TTC_3>;
 		};
 
 		uart0: serial@ff000000 {
@@ -617,6 +681,7 @@ uart0: serial@ff000000 {
 			interrupts = <0 21 4>;
 			reg = <0x0 0xff000000 0x0 0x1000>;
 			clock-names = "uart_clk", "pclk";
+			power-domains = <&zynqmp_firmware PD_UART_0>;
 		};
 
 		uart1: serial@ff010000 {
@@ -626,6 +691,7 @@ uart1: serial@ff010000 {
 			interrupts = <0 22 4>;
 			reg = <0x0 0xff010000 0x0 0x1000>;
 			clock-names = "uart_clk", "pclk";
+			power-domains = <&zynqmp_firmware PD_UART_1>;
 		};
 
 		usb0: usb@fe200000 {
@@ -635,6 +701,7 @@ usb0: usb@fe200000 {
 			interrupts = <0 65 4>;
 			reg = <0x0 0xfe200000 0x0 0x40000>;
 			clock-names = "clk_xin", "clk_ahb";
+			power-domains = <&zynqmp_firmware PD_USB_0>;
 		};
 
 		usb1: usb@fe300000 {
@@ -644,6 +711,7 @@ usb1: usb@fe300000 {
 			interrupts = <0 70 4>;
 			reg = <0x0 0xfe300000 0x0 0x40000>;
 			clock-names = "clk_xin", "clk_ahb";
+			power-domains = <&zynqmp_firmware PD_USB_1>;
 		};
 
 		watchdog0: watchdog@fd4d0000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index b2f6673..0f21288 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -86,6 +86,8 @@
 CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
 CONFIG_ARM_SCPI_CPUFREQ=y
 CONFIG_ARM_IMX_CPUFREQ_DT=m
+CONFIG_ARM_QCOM_CPUFREQ_NVMEM=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
 CONFIG_ARM_RASPBERRYPI_CPUFREQ=m
 CONFIG_ARM_TEGRA186_CPUFREQ=y
 CONFIG_ARM_SCPI_PROTOCOL=y
@@ -169,6 +171,7 @@
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIUART_LL=y
 CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
 CONFIG_CFG80211=m
 CONFIG_MAC80211=m
 CONFIG_MAC80211_LEDS=y
@@ -190,6 +193,7 @@
 CONFIG_PCI_HOST_THUNDER_PEM=y
 CONFIG_PCI_HOST_THUNDER_ECAM=y
 CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCIE_BRCMSTB=m
 CONFIG_PCI_LAYERSCAPE=y
 CONFIG_PCI_HISI=y
 CONFIG_PCIE_QCOM=y
@@ -252,6 +256,7 @@
 CONFIG_AMD_XGBE=y
 CONFIG_NET_XGENE=y
 CONFIG_ATL1C=m
+CONFIG_BCMGENET=m
 CONFIG_BNX2X=m
 CONFIG_MACB=y
 CONFIG_THUNDER_NIC_PF=y
@@ -299,6 +304,7 @@
 CONFIG_USB_NET_MCS7830=m
 CONFIG_ATH10K=m
 CONFIG_ATH10K_PCI=m
+CONFIG_ATH10K_SNOC=m
 CONFIG_BRCMFMAC=m
 CONFIG_MWIFIEX=m
 CONFIG_MWIFIEX_PCIE=m
@@ -350,6 +356,7 @@
 CONFIG_SERIAL_FSL_LINFLEXUART=y
 CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE=y
 CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_OWL=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_IPMI_HANDLER=m
@@ -367,6 +374,7 @@
 CONFIG_I2C_IMX_LPI2C=y
 CONFIG_I2C_MESON=y
 CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_OWL=y
 CONFIG_I2C_PXA=y
 CONFIG_I2C_QCOM_GENI=m
 CONFIG_I2C_QUP=y
@@ -387,13 +395,18 @@
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_QCOM_QSPI=m
 CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=m
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SUN6I=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_OWL=y
+CONFIG_PINCTRL_S700=y
+CONFIG_PINCTRL_S900=y
 CONFIG_PINCTRL_IMX8MM=y
 CONFIG_PINCTRL_IMX8MN=y
 CONFIG_PINCTRL_IMX8MQ=y
@@ -421,6 +434,7 @@
 CONFIG_GPIO_PCA953X_IRQ=y
 CONFIG_GPIO_MAX77620=y
 CONFIG_POWER_AVS=y
+CONFIG_QCOM_CPR=y
 CONFIG_ROCKCHIP_IODOMAIN=y
 CONFIG_POWER_RESET_MSM=y
 CONFIG_POWER_RESET_XGENE=y
@@ -448,6 +462,7 @@
 CONFIG_EXYNOS_THERMAL=y
 CONFIG_TEGRA_BPMP_THERMAL=m
 CONFIG_QCOM_TSENS=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
 CONFIG_UNIPHIER_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_ARM_SP805_WATCHDOG=y
@@ -458,6 +473,7 @@
 CONFIG_IMX_SC_WDT=m
 CONFIG_MESON_GXBB_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=m
+CONFIG_QCOM_WDT=m
 CONFIG_RENESAS_WDT=y
 CONFIG_UNIPHIER_WATCHDOG=y
 CONFIG_BCM2835_WDT=y
@@ -537,6 +553,7 @@
 CONFIG_DRM_TEGRA=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_SII902X=m
+CONFIG_DRM_TI_SN65DSI86=m
 CONFIG_DRM_I2C_ADV7511=m
 CONFIG_DRM_VC4=m
 CONFIG_DRM_ETNAVIV=m
@@ -578,6 +595,7 @@
 CONFIG_SND_AUDIO_GRAPH_CARD=m
 CONFIG_I2C_HID=m
 CONFIG_USB=y
+CONFIG_USB_CONN_GPIO=m
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_TEGRA=y
@@ -603,7 +621,10 @@
 CONFIG_USB_GADGET=y
 CONFIG_USB_RENESAS_USBHS_UDC=m
 CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_TEGRA_XUDC=m
 CONFIG_TYPEC=m
+CONFIG_TYPEC_TCPM=m
+CONFIG_TYPEC_FUSB302=m
 CONFIG_TYPEC_HD3SS3220=m
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
@@ -630,6 +651,7 @@
 CONFIG_MMC_SUNXI=y
 CONFIG_MMC_BCM2835=y
 CONFIG_MMC_SDHCI_XENON=y
+CONFIG_MMC_OWL=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
@@ -666,6 +688,7 @@
 CONFIG_K3_DMA=y
 CONFIG_MV_XOR=y
 CONFIG_MV_XOR_V2=y
+CONFIG_OWL_DMA=y
 CONFIG_PL330_DMA=y
 CONFIG_TEGRA20_APB_DMA=y
 CONFIG_QCOM_BAM_DMA=y
@@ -692,6 +715,7 @@
 CONFIG_CLK_RASPBERRYPI=m
 CONFIG_CLK_IMX8MM=y
 CONFIG_CLK_IMX8MN=y
+CONFIG_CLK_IMX8MP=y
 CONFIG_CLK_IMX8MQ=y
 CONFIG_CLK_IMX8QXP=y
 CONFIG_TI_SCI_CLK=y
@@ -708,6 +732,7 @@
 CONFIG_QCS_GCC_404=y
 CONFIG_SDM_GCC_845=y
 CONFIG_SM_GCC_8150=y
+CONFIG_QCOM_HFPLL=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_ARM_MHU=y
@@ -727,15 +752,21 @@
 CONFIG_RPMSG_QCOM_GLINK_RPM=y
 CONFIG_RPMSG_QCOM_GLINK_SMEM=m
 CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_OWL_PM_DOMAINS=y
 CONFIG_RASPBERRYPI_POWER=y
 CONFIG_IMX_SCU_SOC=y
+CONFIG_QCOM_AOSS_QMP=y
+CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_GENI_SE=y
 CONFIG_QCOM_GLINK_SSR=m
+CONFIG_QCOM_RMTFS_MEM=m
 CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_RPMHPD=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD_RPM=y
 CONFIG_QCOM_SMP2P=y
 CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_SOCINFO=m
 CONFIG_ARCH_R8A774A1=y
 CONFIG_ARCH_R8A774B1=y
 CONFIG_ARCH_R8A774C0=y
@@ -747,6 +778,7 @@
 CONFIG_ARCH_R8A77980=y
 CONFIG_ARCH_R8A77990=y
 CONFIG_ARCH_R8A77995=y
+CONFIG_QCOM_PDC=y
 CONFIG_ROCKCHIP_PM_DOMAINS=y
 CONFIG_ARCH_TEGRA_132_SOC=y
 CONFIG_ARCH_TEGRA_210_SOC=y
@@ -760,6 +792,7 @@
 CONFIG_MEMORY=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
+CONFIG_QCOM_SPMI_ADC5=m
 CONFIG_ROCKCHIP_SARADC=m
 CONFIG_IIO_CROS_EC_SENSORS_CORE=m
 CONFIG_IIO_CROS_EC_SENSORS=m
@@ -776,6 +809,8 @@
 CONFIG_PWM_SAMSUNG=y
 CONFIG_PWM_SUN4I=m
 CONFIG_PWM_TEGRA=m
+CONFIG_RESET_QCOM_AOSS=y
+CONFIG_RESET_QCOM_PDC=m
 CONFIG_RESET_TI_SCI=y
 CONFIG_PHY_XGENE=y
 CONFIG_PHY_SUN4I_USB=y
@@ -846,7 +881,10 @@
 CONFIG_CRYPTO_ECHAINIV=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_SUN8I_CE=m
+CONFIG_CRYPTO_DEV_FSL_CAAM=m
 CONFIG_CRYPTO_DEV_HISI_ZIP=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_DEV_QCOM_RNG=m
 CONFIG_CMA_SIZE_MBYTES=32
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index bd23f87..d3077c9 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -3,7 +3,6 @@
 generic-y += delay.h
 generic-y += div64.h
 generic-y += dma.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += early_ioremap.h
 generic-y += emergency-restart.h
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 4750fc8..25fec4b 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -140,11 +140,11 @@ static inline u32 gic_read_rpr(void)
 #define gicr_write_pendbaser(v, c)	writeq_relaxed(v, c)
 #define gicr_read_pendbaser(c)		readq_relaxed(c)
 
-#define gits_write_vpropbaser(v, c)	writeq_relaxed(v, c)
-#define gits_read_vpropbaser(c)		readq_relaxed(c)
+#define gicr_write_vpropbaser(v, c)	writeq_relaxed(v, c)
+#define gicr_read_vpropbaser(c)		readq_relaxed(c)
 
-#define gits_write_vpendbaser(v, c)	writeq_relaxed(v, c)
-#define gits_read_vpendbaser(c)		readq_relaxed(c)
+#define gicr_write_vpendbaser(v, c)	writeq_relaxed(v, c)
+#define gicr_read_vpendbaser(c)		readq_relaxed(c)
 
 static inline bool gic_prio_masking_enabled(void)
 {
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index b0d53a2..935d2aa 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -4,6 +4,9 @@
  */
 #ifndef __ASM_COMPAT_H
 #define __ASM_COMPAT_H
+
+#include <asm-generic/compat.h>
+
 #ifdef CONFIG_COMPAT
 
 /*
@@ -13,8 +16,6 @@
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 
-#include <asm-generic/compat.h>
-
 #define COMPAT_USER_HZ		100
 #ifdef __AARCH64EB__
 #define COMPAT_UTS_MACHINE	"armv8b\0\0"
@@ -113,23 +114,6 @@ typedef u32		compat_sigset_word;
 
 #define COMPAT_OFF_T_MAX	0x7fffffff
 
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
-	return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
-	return (u32)(unsigned long)uptr;
-}
-
 #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
 #define COMPAT_MINSIGSTKSZ	2048
 
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5efe5ca..688c634 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -17,7 +17,6 @@
 #include <asm/esr.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_hyp.h>
-#include <asm/kvm_mmio.h>
 #include <asm/ptrace.h>
 #include <asm/cputype.h>
 #include <asm/virt.h>
@@ -219,6 +218,38 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
 		vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
 }
 
+/*
+ * The layout of SPSR for an AArch32 state is different when observed from an
+ * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
+ * view given an AArch64 view.
+ *
+ * In ARM DDI 0487E.a see:
+ *
+ * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
+ * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
+ * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
+ *
+ * Which show the following differences:
+ *
+ * | Bit | AA64 | AA32 | Notes                       |
+ * +-----+------+------+-----------------------------|
+ * | 24  | DIT  | J    | J is RES0 in ARMv8          |
+ * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
+ *
+ * ... and all other bits are (currently) common.
+ */
+static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
+{
+	const unsigned long overlap = BIT(24) | BIT(21);
+	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
+
+	spsr &= ~overlap;
+
+	spsr |= dit << 21;
+
+	return spsr;
+}
+
 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 {
 	u32 mode;
@@ -283,6 +314,11 @@ static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
 }
 
+static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
+{
+	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
+}
+
 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
 {
 	return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
@@ -304,7 +340,7 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
 }
 
-static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
+static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
 {
 	return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
 }
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f5acdde..d87aa60 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -24,7 +24,6 @@
 #include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
 #include <asm/thread_info.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -53,7 +52,7 @@ int kvm_arm_init_sve(void);
 
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
 
@@ -325,9 +324,6 @@ struct kvm_vcpu_arch {
 	/* Don't run the guest (internal implementation need) */
 	bool pause;
 
-	/* IO related fields */
-	struct kvm_decode mmio_decode;
-
 	/* Cache some mmu pages needed inside spinlock regions */
 	struct kvm_mmu_memory_cache mmu_page_cache;
 
@@ -446,8 +442,6 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
@@ -491,6 +485,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		       int exception_index);
 
+/* MMIO helpers */
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+		 phys_addr_t fault_ipa);
+
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
 
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
deleted file mode 100644
index 02b5c48..0000000
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM64_KVM_MMIO_H__
-#define __ARM64_KVM_MMIO_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_arm.h>
-
-/*
- * This is annoying. The mmio code requires this, even if we don't
- * need any decoding. To be fixed.
- */
-struct kvm_decode {
-	unsigned long rt;
-	bool sign_extend;
-};
-
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
-
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-		 phys_addr_t fault_ipa);
-
-#endif	/* __ARM64_KVM_MMIO_H__ */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index cd5de0e..538c85e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -441,6 +441,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				 PMD_TYPE_TABLE)
 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
 				 PMD_TYPE_SECT)
+#define pmd_leaf(pmd)		pmd_sect(pmd)
 
 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
 static inline bool pud_sect(pud_t pud) { return false; }
@@ -525,6 +526,7 @@ static inline void pte_unmap(pte_t *pte) { }
 #define pud_none(pud)		(!pud_val(pud))
 #define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
 #define pud_present(pud)	pte_present(pud_pte(pud))
+#define pud_leaf(pud)		pud_sect(pud)
 #define pud_valid(pud)		pte_valid(pud_pte(pud))
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 0b8e726..38187f7 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -5,7 +5,7 @@
 #ifndef __ASM_PTDUMP_H
 #define __ASM_PTDUMP_H
 
-#ifdef CONFIG_ARM64_PTDUMP_CORE
+#ifdef CONFIG_PTDUMP_CORE
 
 #include <linux/mm_types.h>
 #include <linux/seq_file.h>
@@ -21,15 +21,15 @@ struct ptdump_info {
 	unsigned long			base_addr;
 };
 
-void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
-#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_PTDUMP_DEBUGFS
 void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
 #else
 static inline void ptdump_debugfs_register(struct ptdump_info *info,
 					   const char *name) { }
 #endif
 void ptdump_check_wx(void);
-#endif /* CONFIG_ARM64_PTDUMP_CORE */
+#endif /* CONFIG_PTDUMP_CORE */
 
 #ifdef CONFIG_DEBUG_WX
 #define debug_checkwx()	ptdump_check_wx()
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index fbebb41..bf57308f 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -62,6 +62,7 @@
 #define PSR_AA32_I_BIT		0x00000080
 #define PSR_AA32_A_BIT		0x00000100
 #define PSR_AA32_E_BIT		0x00000200
+#define PSR_AA32_PAN_BIT	0x00400000
 #define PSR_AA32_SSBS_BIT	0x00800000
 #define PSR_AA32_DIT_BIT	0x01000000
 #define PSR_AA32_Q_BIT		0x08000000
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 5af8258..1dd22da 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -38,7 +38,7 @@
 #define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE + 5)
 #define __ARM_NR_COMPAT_END		(__ARM_NR_COMPAT_BASE + 0x800)
 
-#define __NR_compat_syscalls		436
+#define __NR_compat_syscalls		439
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 94ab29c..c1c6163 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -879,6 +879,10 @@ __SYSCALL(__NR_fspick, sys_fspick)
 __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
 #define __NR_clone3 435
 __SYSCALL(__NR_clone3, sys_clone3)
+#define __NR_openat2 437
+__SYSCALL(__NR_openat2, sys_openat2)
+#define __NR_pidfd_getfd 438
+__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
 
 /*
  * Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 820e575..ba85bb2 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -220,10 +220,18 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_PTIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 2, 2)
 #define KVM_REG_ARM_PTIMER_CNT		ARM64_SYS_REG(3, 3, 14, 0, 1)
 
-/* EL0 Virtual Timer Registers */
+/*
+ * EL0 Virtual Timer Registers
+ *
+ * WARNING:
+ *      KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined
+ *      with the appropriate register encodings.  Their values have been
+ *      accidentally swapped.  As this is set API, the definitions here
+ *      must be used, rather than ones derived from the encodings.
+ */
 #define KVM_REG_ARM_TIMER_CTL		ARM64_SYS_REG(3, 3, 14, 3, 1)
-#define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 0, 2)
+#define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)
 
 /* KVM-as-firmware specific pseudo-registers */
 #define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 7ed9294..d1bb5b6 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -49,6 +49,7 @@
 #define PSR_SSBS_BIT	0x00001000
 #define PSR_PAN_BIT	0x00400000
 #define PSR_UAO_BIT	0x00800000
+#define PSR_DIT_BIT	0x01000000
 #define PSR_V_BIT	0x10000000
 #define PSR_C_BIT	0x20000000
 #define PSR_Z_BIT	0x40000000
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b6f9455..a34890b 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -360,9 +360,6 @@ void __init setup_arch(char **cmdline_p)
 	init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
 #endif
 
-#ifdef CONFIG_VT
-	conswitchp = &dummy_con;
-#endif
 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
 			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 43487f0..7a7e425 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -101,7 +101,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
 {
 	bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
-	unsigned long mdscr;
+	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
 
 	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
 
@@ -197,6 +197,10 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
 	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
 
+	/* Write mdcr_el2 changes since vcpu_load on VHE systems */
+	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
+		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+
 	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
 	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
 }
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2fff061..2bd9230 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -47,11 +47,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ NULL }
 };
 
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-	return 0;
-}
-
 static bool core_reg_offset_is_vreg(u64 off)
 {
 	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 0c6832e..d22d053 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -51,7 +51,7 @@
  * u64 __guest_enter(struct kvm_vcpu *vcpu,
  *		     struct kvm_cpu_context *host_ctxt);
  */
-ENTRY(__guest_enter)
+SYM_FUNC_START(__guest_enter)
 	// x0: vcpu
 	// x1: host context
 	// x2-x17: clobbered by macros
@@ -100,9 +100,8 @@
 	// Do not touch any register after this!
 	eret
 	sb
-ENDPROC(__guest_enter)
 
-ENTRY(__guest_exit)
+SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
 	// x0: return code
 	// x1: vcpu
 	// x2-x29,lr: vcpu regs
@@ -195,4 +194,4 @@
 	msr	spsr_el2, x4
 	orr	x0, x0, x5
 1:	ret
-ENDPROC(__guest_exit)
+SYM_FUNC_END(__guest_enter)
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index ccdb6a0..6aafc28 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -14,9 +14,6 @@
 #include <asm/kvm_emulate.h>
 #include <asm/esr.h>
 
-#define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
-				 PSR_I_BIT | PSR_D_BIT)
-
 #define CURRENT_EL_SP_EL0_VECTOR	0x0
 #define CURRENT_EL_SP_ELx_VECTOR	0x200
 #define LOWER_EL_AArch64_VECTOR		0x400
@@ -50,6 +47,69 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
 	return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
 }
 
+/*
+ * When an exception is taken, most PSTATE fields are left unchanged in the
+ * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
+ * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
+ * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
+ *
+ * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
+ * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
+ *
+ * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
+ * MSB to LSB.
+ */
+static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
+{
+	unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+	unsigned long old, new;
+
+	old = *vcpu_cpsr(vcpu);
+	new = 0;
+
+	new |= (old & PSR_N_BIT);
+	new |= (old & PSR_Z_BIT);
+	new |= (old & PSR_C_BIT);
+	new |= (old & PSR_V_BIT);
+
+	// TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
+
+	new |= (old & PSR_DIT_BIT);
+
+	// PSTATE.UAO is set to zero upon any exception to AArch64
+	// See ARM DDI 0487E.a, page D5-2579.
+
+	// PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
+	// SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
+	// See ARM DDI 0487E.a, page D5-2578.
+	new |= (old & PSR_PAN_BIT);
+	if (!(sctlr & SCTLR_EL1_SPAN))
+		new |= PSR_PAN_BIT;
+
+	// PSTATE.SS is set to zero upon any exception to AArch64
+	// See ARM DDI 0487E.a, page D2-2452.
+
+	// PSTATE.IL is set to zero upon any exception to AArch64
+	// See ARM DDI 0487E.a, page D1-2306.
+
+	// PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
+	// See ARM DDI 0487E.a, page D13-3258
+	if (sctlr & SCTLR_ELx_DSSBS)
+		new |= PSR_SSBS_BIT;
+
+	// PSTATE.BTYPE is set to zero upon any exception to AArch64
+	// See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
+
+	new |= PSR_D_BIT;
+	new |= PSR_A_BIT;
+	new |= PSR_I_BIT;
+	new |= PSR_F_BIT;
+
+	new |= PSR_MODE_EL1h;
+
+	return new;
+}
+
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 {
 	unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -59,7 +119,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
 	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 
-	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+	*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
 	vcpu_write_spsr(vcpu, cpsr);
 
 	vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
@@ -94,7 +154,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
 	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 
-	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+	*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
 	vcpu_write_spsr(vcpu, cpsr);
 
 	/*
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index f4a8ae9..30b7ea6 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -204,7 +204,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
 	return true;
 }
 
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
 	kfree(vcpu->arch.sve_state);
 }
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index dab1fea..a4f48c1 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -13,52 +13,46 @@
 #include <asm/kvm_mmu.h>
 
 /*
- * The LSB of the random hyp VA tag or 0 if no randomization is used.
+ * The LSB of the HYP VA tag
  */
 static u8 tag_lsb;
 /*
- * The random hyp VA tag value with the region bit if hyp randomization is used
+ * The HYP VA tag value with the region bit
  */
 static u64 tag_val;
 static u64 va_mask;
 
+/*
+ * We want to generate a hyp VA with the following format (with V ==
+ * vabits_actual):
+ *
+ *  63 ... V |     V-1    | V-2 .. tag_lsb | tag_lsb - 1 .. 0
+ *  ---------------------------------------------------------
+ * | 0000000 | hyp_va_msb |   random tag   |  kern linear VA |
+ *           |--------- tag_val -----------|----- va_mask ---|
+ *
+ * which does not conflict with the idmap regions.
+ */
 __init void kvm_compute_layout(void)
 {
 	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
 	u64 hyp_va_msb;
-	int kva_msb;
 
 	/* Where is my RAM region? */
 	hyp_va_msb  = idmap_addr & BIT(vabits_actual - 1);
 	hyp_va_msb ^= BIT(vabits_actual - 1);
 
-	kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
+	tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
 			(u64)(high_memory - 1));
 
-	if (kva_msb == (vabits_actual - 1)) {
-		/*
-		 * No space in the address, let's compute the mask so
-		 * that it covers (vabits_actual - 1) bits, and the region
-		 * bit. The tag stays set to zero.
-		 */
-		va_mask  = BIT(vabits_actual - 1) - 1;
-		va_mask |= hyp_va_msb;
-	} else {
-		/*
-		 * We do have some free bits to insert a random tag.
-		 * Hyp VAs are now created from kernel linear map VAs
-		 * using the following formula (with V == vabits_actual):
-		 *
-		 *  63 ... V |     V-1    | V-2 .. tag_lsb | tag_lsb - 1 .. 0
-		 *  ---------------------------------------------------------
-		 * | 0000000 | hyp_va_msb |    random tag  |  kern linear VA |
-		 */
-		tag_lsb = kva_msb;
-		va_mask = GENMASK_ULL(tag_lsb - 1, 0);
-		tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
-		tag_val |= hyp_va_msb;
-		tag_val >>= tag_lsb;
+	va_mask = GENMASK_ULL(tag_lsb - 1, 0);
+	tag_val = hyp_va_msb;
+
+	if (tag_lsb != (vabits_actual - 1)) {
+		/* We have some free bits to insert a random tag. */
+		tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
 	}
+	tag_val >>= tag_lsb;
 }
 
 static u32 compute_instruction(int n, u32 rd, u32 rn)
@@ -117,11 +111,11 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
 		 * VHE doesn't need any address translation, let's NOP
 		 * everything.
 		 *
-		 * Alternatively, if we don't have any spare bits in
-		 * the address, NOP everything after masking that
-		 * kernel VA.
+		 * Alternatively, if the tag is zero (because the layout
+		 * dictates it and we don't have any spare bits in the
+		 * address), NOP everything after masking the kernel VA.
 		 */
-		if (has_vhe() || (!tag_lsb && i > 0)) {
+		if (has_vhe() || (!tag_val && i > 0)) {
 			updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
 			continue;
 		}
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 849c1df..d91030f 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,8 +4,8 @@
 				   ioremap.o mmap.o pgd.o mmu.o \
 				   context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
-obj-$(CONFIG_ARM64_PTDUMP_CORE)	+= dump.o
-obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS)	+= ptdump_debugfs.o
+obj-$(CONFIG_PTDUMP_CORE)	+= dump.o
+obj-$(CONFIG_PTDUMP_DEBUGFS)	+= ptdump_debugfs.o
 obj-$(CONFIG_NUMA)		+= numa.o
 obj-$(CONFIG_DEBUG_VIRTUAL)	+= physaddr.o
 KASAN_SANITIZE_physaddr.o	+= n
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 0a920b53..860c00e 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include <linux/init.h>
 #include <linux/mm.h>
+#include <linux/ptdump.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 
@@ -75,10 +76,11 @@ static struct addr_marker address_markers[] = {
  * dumps out a description of the range.
  */
 struct pg_state {
+	struct ptdump_state ptdump;
 	struct seq_file *seq;
 	const struct addr_marker *marker;
 	unsigned long start_address;
-	unsigned level;
+	int level;
 	u64 current_prot;
 	bool check_wx;
 	unsigned long wx_pages;
@@ -174,11 +176,14 @@ struct pg_level {
 };
 
 static struct pg_level pg_level[] = {
-	{
-	}, { /* pgd */
+	{ /* pgd */
 		.name	= "PGD",
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
+	}, { /* p4d */
+		.name	= "P4D",
+		.bits	= pte_bits,
+		.num	= ARRAY_SIZE(pte_bits),
 	}, { /* pud */
 		.name	= (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
 		.bits	= pte_bits,
@@ -241,13 +246,17 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
 	st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
 }
 
-static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
-				u64 val)
+static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
+		      unsigned long val)
 {
+	struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
 	static const char units[] = "KMGTPE";
-	u64 prot = val & pg_level[level].mask;
+	u64 prot = 0;
 
-	if (!st->level) {
+	if (level >= 0)
+		prot = val & pg_level[level].mask;
+
+	if (st->level == -1) {
 		st->level = level;
 		st->current_prot = prot;
 		st->start_address = addr;
@@ -260,21 +269,22 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 		if (st->current_prot) {
 			note_prot_uxn(st, addr);
 			note_prot_wx(st, addr);
-			pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
+		}
+
+		pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
 				   st->start_address, addr);
 
-			delta = (addr - st->start_address) >> 10;
-			while (!(delta & 1023) && unit[1]) {
-				delta >>= 10;
-				unit++;
-			}
-			pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
-				   pg_level[st->level].name);
-			if (pg_level[st->level].bits)
-				dump_prot(st, pg_level[st->level].bits,
-					  pg_level[st->level].num);
-			pt_dump_seq_puts(st->seq, "\n");
+		delta = (addr - st->start_address) >> 10;
+		while (!(delta & 1023) && unit[1]) {
+			delta >>= 10;
+			unit++;
 		}
+		pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+				   pg_level[st->level].name);
+		if (st->current_prot && pg_level[st->level].bits)
+			dump_prot(st, pg_level[st->level].bits,
+				  pg_level[st->level].num);
+		pt_dump_seq_puts(st->seq, "\n");
 
 		if (addr >= st->marker[1].start_address) {
 			st->marker++;
@@ -293,85 +303,27 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 
 }
 
-static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
-		     unsigned long end)
+void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
 {
-	unsigned long addr = start;
-	pte_t *ptep = pte_offset_kernel(pmdp, start);
+	unsigned long end = ~0UL;
+	struct pg_state st;
 
-	do {
-		note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
-	} while (ptep++, addr += PAGE_SIZE, addr != end);
-}
+	if (info->base_addr < TASK_SIZE_64)
+		end = TASK_SIZE_64;
 
-static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
-		     unsigned long end)
-{
-	unsigned long next, addr = start;
-	pmd_t *pmdp = pmd_offset(pudp, start);
-
-	do {
-		pmd_t pmd = READ_ONCE(*pmdp);
-		next = pmd_addr_end(addr, end);
-
-		if (pmd_none(pmd) || pmd_sect(pmd)) {
-			note_page(st, addr, 3, pmd_val(pmd));
-		} else {
-			BUG_ON(pmd_bad(pmd));
-			walk_pte(st, pmdp, addr, next);
-		}
-	} while (pmdp++, addr = next, addr != end);
-}
-
-static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
-		     unsigned long end)
-{
-	unsigned long next, addr = start;
-	pud_t *pudp = pud_offset(pgdp, start);
-
-	do {
-		pud_t pud = READ_ONCE(*pudp);
-		next = pud_addr_end(addr, end);
-
-		if (pud_none(pud) || pud_sect(pud)) {
-			note_page(st, addr, 2, pud_val(pud));
-		} else {
-			BUG_ON(pud_bad(pud));
-			walk_pmd(st, pudp, addr, next);
-		}
-	} while (pudp++, addr = next, addr != end);
-}
-
-static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
-		     unsigned long start)
-{
-	unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
-	unsigned long next, addr = start;
-	pgd_t *pgdp = pgd_offset(mm, start);
-
-	do {
-		pgd_t pgd = READ_ONCE(*pgdp);
-		next = pgd_addr_end(addr, end);
-
-		if (pgd_none(pgd)) {
-			note_page(st, addr, 1, pgd_val(pgd));
-		} else {
-			BUG_ON(pgd_bad(pgd));
-			walk_pud(st, pgdp, addr, next);
-		}
-	} while (pgdp++, addr = next, addr != end);
-}
-
-void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
-{
-	struct pg_state st = {
-		.seq = m,
+	st = (struct pg_state){
+		.seq = s,
 		.marker = info->markers,
+		.ptdump = {
+			.note_page = note_page,
+			.range = (struct ptdump_range[]){
+				{info->base_addr, end},
+				{0, 0}
+			}
+		}
 	};
 
-	walk_pgd(&st, info->mm, info->base_addr);
-
-	note_page(&st, 0, 0, 0);
+	ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
 }
 
 static void ptdump_initialize(void)
@@ -398,11 +350,19 @@ void ptdump_check_wx(void)
 			{ 0, NULL},
 			{ -1, NULL},
 		},
+		.level = -1,
 		.check_wx = true,
+		.ptdump = {
+			.note_page = note_page,
+			.range = (struct ptdump_range[]) {
+				{PAGE_OFFSET, ~0UL},
+				{0, 0}
+			}
+		}
 	};
 
-	walk_pgd(&st, &init_mm, PAGE_OFFSET);
-	note_page(&st, 0, 0, 0);
+	ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
+
 	if (st.wx_pages || st.uxn_pages)
 		pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
 			st.wx_pages, st.uxn_pages);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 40797cb..128f708 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -943,13 +943,13 @@ int __init arch_ioremap_pud_supported(void)
 	 * SW table walks can't handle removal of intermediate entries.
 	 */
 	return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
-	       !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
+	       !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
 int __init arch_ioremap_pmd_supported(void)
 {
 	/* See arch_ioremap_pud_supported() */
-	return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
+	return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index 064163f..1f2eae3 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -7,7 +7,7 @@
 static int ptdump_show(struct seq_file *m, void *v)
 {
 	struct ptdump_info *info = m->private;
-	ptdump_walk_pgd(m, info);
+	ptdump_walk(m, info);
 	return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(ptdump);
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 4d4754e..bc15a26 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -7,7 +7,6 @@
 generic-y += device.h
 generic-y += div64.h
 generic-y += dma.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index 23ee604..52eaf31 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -136,10 +136,6 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_HIGHMEM
 	kmap_init();
 #endif
-
-#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
-#endif
 }
 
 unsigned long va_pa_offset;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index b392c0a..a25ab9b 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -331,10 +331,10 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
 	return size;
 }
 
-static const struct file_operations salinfo_event_fops = {
-	.open  = salinfo_event_open,
-	.read  = salinfo_event_read,
-	.llseek = noop_llseek,
+static const struct proc_ops salinfo_event_proc_ops = {
+	.proc_open	= salinfo_event_open,
+	.proc_read	= salinfo_event_read,
+	.proc_lseek	= noop_llseek,
 };
 
 static int
@@ -534,12 +534,12 @@ salinfo_log_write(struct file *file, const char __user *buffer, size_t count, lo
 	return count;
 }
 
-static const struct file_operations salinfo_data_fops = {
-	.open    = salinfo_log_open,
-	.release = salinfo_log_release,
-	.read    = salinfo_log_read,
-	.write   = salinfo_log_write,
-	.llseek  = default_llseek,
+static const struct proc_ops salinfo_data_proc_ops = {
+	.proc_open	= salinfo_log_open,
+	.proc_release	= salinfo_log_release,
+	.proc_read	= salinfo_log_read,
+	.proc_write	= salinfo_log_write,
+	.proc_lseek	= default_llseek,
 };
 
 static int salinfo_cpu_online(unsigned int cpu)
@@ -617,13 +617,13 @@ salinfo_init(void)
 			continue;
 
 		entry = proc_create_data("event", S_IRUSR, dir,
-					 &salinfo_event_fops, data);
+					 &salinfo_event_proc_ops, data);
 		if (!entry)
 			continue;
 		*sdir++ = entry;
 
 		entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir,
-					 &salinfo_data_fops, data);
+					 &salinfo_data_proc_ops, data);
 		if (!entry)
 			continue;
 		*sdir++ = entry;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c49fcef..4009383 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -608,9 +608,6 @@ setup_arch (char **cmdline_p)
 
 #ifdef CONFIG_VT
 	if (!conswitchp) {
-# if defined(CONFIG_DUMMY_CONSOLE)
-		conswitchp = &dummy_con;
-# endif
 # if defined(CONFIG_VGA_CONSOLE)
 		/*
 		 * Non-legacy systems may route legacy VGA MMIO range to system
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 36d5faf..042911e 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -356,3 +356,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 # 435 reserved for clone3
+437	common	openat2				sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
index d5e683d..3a84f24 100644
--- a/arch/m68k/configs/amcore_defconfig
+++ b/arch/m68k/configs/amcore_defconfig
@@ -13,7 +13,6 @@
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5307=y
 CONFIG_AMCORE=y
diff --git a/arch/m68k/configs/m5208evb_defconfig b/arch/m68k/configs/m5208evb_defconfig
index a3102ff..0ee3079 100644
--- a/arch/m68k/configs/m5208evb_defconfig
+++ b/arch/m68k/configs/m5208evb_defconfig
@@ -10,8 +10,6 @@
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 # CONFIG_4KSTACKS is not set
 CONFIG_RAMBASE=0x40000000
diff --git a/arch/m68k/configs/m5249evb_defconfig b/arch/m68k/configs/m5249evb_defconfig
index f7bb9ed..f84f68c 100644
--- a/arch/m68k/configs/m5249evb_defconfig
+++ b/arch/m68k/configs/m5249evb_defconfig
@@ -10,8 +10,6 @@
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5249=y
 CONFIG_M5249C3=y
diff --git a/arch/m68k/configs/m5272c3_defconfig b/arch/m68k/configs/m5272c3_defconfig
index 1e679f6..eca6502 100644
--- a/arch/m68k/configs/m5272c3_defconfig
+++ b/arch/m68k/configs/m5272c3_defconfig
@@ -10,8 +10,6 @@
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5272=y
 CONFIG_M5272C3=y
diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig
index d2987b4..9402c7a 100644
--- a/arch/m68k/configs/m5275evb_defconfig
+++ b/arch/m68k/configs/m5275evb_defconfig
@@ -10,8 +10,6 @@
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5275=y
 # CONFIG_4KSTACKS is not set
diff --git a/arch/m68k/configs/m5307c3_defconfig b/arch/m68k/configs/m5307c3_defconfig
index 97a78c9..bb8b0eb 100644
--- a/arch/m68k/configs/m5307c3_defconfig
+++ b/arch/m68k/configs/m5307c3_defconfig
@@ -10,8 +10,6 @@
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5307=y
 CONFIG_M5307C3=y
diff --git a/arch/m68k/configs/m5407c3_defconfig b/arch/m68k/configs/m5407c3_defconfig
index 766a97f..ce9ccf1 100644
--- a/arch/m68k/configs/m5407c3_defconfig
+++ b/arch/m68k/configs/m5407c3_defconfig
@@ -11,8 +11,6 @@
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_MMU is not set
 CONFIG_M5407=y
 CONFIG_M5407C3=y
diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig
index 579fd98..93f7c7a 100644
--- a/arch/m68k/configs/m5475evb_defconfig
+++ b/arch/m68k/configs/m5475evb_defconfig
@@ -11,8 +11,6 @@
 CONFIG_EMBEDDED=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_COLDFIRE=y
 # CONFIG_4KSTACKS is not set
 CONFIG_RAMBASE=0x0
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
index 0134008..6bc80c3 100644
--- a/arch/m68k/include/asm/uaccess_no.h
+++ b/arch/m68k/include/asm/uaccess_no.h
@@ -71,26 +71,29 @@ extern int __put_user_bad(void);
 #define get_user(x, ptr)					\
 ({								\
     int __gu_err = 0;						\
-    typeof(x) __gu_val = 0;					\
     switch (sizeof(*(ptr))) {					\
     case 1:							\
-	__get_user_asm(__gu_err, __gu_val, ptr, b, "=d");	\
+	__get_user_asm(__gu_err, x, ptr, b, "=d");		\
 	break;							\
     case 2:							\
-	__get_user_asm(__gu_err, __gu_val, ptr, w, "=r");	\
+	__get_user_asm(__gu_err, x, ptr, w, "=r");		\
 	break;							\
     case 4:							\
-	__get_user_asm(__gu_err, __gu_val, ptr, l, "=r");	\
+	__get_user_asm(__gu_err, x, ptr, l, "=r");		\
 	break;							\
-    case 8:							\
-	memcpy((void *) &__gu_val, ptr, sizeof (*(ptr)));	\
+    case 8: {							\
+	union {							\
+	    u64 l;						\
+	    __typeof__(*(ptr)) t;				\
+	} __gu_val;						\
+	memcpy(&__gu_val.l, ptr, sizeof(__gu_val.l));		\
+	(x) = __gu_val.t;					\
 	break;							\
+    }								\
     default:							\
-	__gu_val = 0;						\
 	__gu_err = __get_user_bad();				\
 	break;							\
     }								\
-    (x) = (typeof(*(ptr))) __gu_val;				\
     __gu_err;							\
 })
 #define __get_user(x, ptr) get_user(x, ptr)
diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c
index 3b9cab8..857fa2a 100644
--- a/arch/m68k/kernel/bootinfo_proc.c
+++ b/arch/m68k/kernel/bootinfo_proc.c
@@ -26,9 +26,9 @@ static ssize_t bootinfo_read(struct file *file, char __user *buf,
 				       bootinfo_size);
 }
 
-static const struct file_operations bootinfo_fops = {
-	.read = bootinfo_read,
-	.llseek = default_llseek,
+static const struct proc_ops bootinfo_proc_ops = {
+	.proc_read	= bootinfo_read,
+	.proc_lseek	= default_llseek,
 };
 
 void __init save_bootinfo(const struct bi_record *bi)
@@ -67,7 +67,7 @@ static int __init init_bootinfo_procfs(void)
 	if (!bootinfo_copy)
 		return -ENOMEM;
 
-	pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL);
+	pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_proc_ops, NULL);
 	if (!pde) {
 		kfree(bootinfo_copy);
 		return -ENOMEM;
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 528484f..ab8aa7b 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -274,10 +274,6 @@ void __init setup_arch(char **cmdline_p)
 
 	parse_early_param();
 
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
-#endif
-
 	switch (m68k_machtype) {
 #ifdef CONFIG_AMIGA
 	case MACH_AMIGA:
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 3c5def1..a63483d 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -146,10 +146,6 @@ void __init setup_arch(char **cmdline_p)
 	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 	boot_command_line[COMMAND_LINE_SIZE-1] = 0;
 
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
-#endif
-
 	/*
 	 * Give all the memory to the bootmap allocator, tell it to put the
 	 * boot mem_map at the start of memory.
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index a00a5d0..f4f49fc 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -435,3 +435,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 435	common	clone3				__sys_clone3
+437	common	openat2				sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index 03ce7f9..d806dee 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -70,7 +70,6 @@ void __init config_sun3x(void)
 		break;
 	default:
 		serial_console = 0;
-		conswitchp = &dummy_con;
 		break;
 	}
 #endif
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index a105f11..6a331bd 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -27,6 +27,7 @@
 	select HAVE_ARCH_HASH
 	select HAVE_ARCH_KGDB
 	select HAVE_DEBUG_KMEMLEAK
+	select HAVE_DMA_CONTIGUOUS
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index b3b433d..9b8a50f 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -26,6 +26,7 @@
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_EFI_PARTITION is not set
+CONFIG_CMA=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -73,7 +74,7 @@
 CONFIG_UIO=y
 CONFIG_UIO_PDRV_GENIRQ=y
 CONFIG_UIO_DMEM_GENIRQ=y
-CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_TMPFS=y
 CONFIG_CRAMFS=y
@@ -82,10 +83,11 @@
 CONFIG_CIFS=y
 CONFIG_CIFS_STATS2=y
 CONFIG_ENCRYPTED_KEYS=y
+CONFIG_DMA_CMA=y
 CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SPINLOCK=y
 CONFIG_KGDB=y
 CONFIG_KGDB_TESTS=y
 CONFIG_KGDB_KDB=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_SPINLOCK=y
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index 377de39c..8c42078 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -70,7 +70,7 @@
 CONFIG_FB=y
 CONFIG_FB_XILINX=y
 # CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_CRAMFS=y
 CONFIG_ROMFS_FS=y
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 0bde47e..dcba538 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -92,7 +92,8 @@ static inline void __disable_dcache_nomsr(void)
 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)	\
 do {									\
 	int align = ~(cache_line_length - 1);				\
-	end = min(start + cache_size, end);				\
+	if (start <  UINT_MAX - cache_size)				\
+		end = min(start + cache_size, end);			\
 	start &= align;							\
 } while (0)
 
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index ef2f494..cd9b445 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -51,6 +51,7 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
 	{"9.5", 0x22},
 	{"9.6", 0x23},
 	{"10.0", 0x24},
+	{"11.0", 0x25},
 	{NULL, 0},
 };
 
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 7d28944..14b2764 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -121,10 +121,10 @@
 	tophys(r4,r4)			/* convert to phys address */
 	ori	r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 _copy_command_line:
-	/* r2=r5+r6 - r5 contain pointer to command line */
+	/* r2=r5+r11 - r5 contain pointer to command line */
 	lbu	r2, r5, r11
 	beqid	r2, skip		/* Skip if no data */
-	sb	r2, r4, r11		/* addr[r4+r6]= r2 */
+	sb	r2, r4, r11		/* addr[r4+r11]= r2 */
 	addik	r11, r11, 1		/* increment counting */
 	bgtid	r3, _copy_command_line	/* loop for all entries       */
 	addik	r3, r3, -1		/* decrement loop */
@@ -139,8 +139,8 @@
 	ori	r4, r0, TOPHYS(_bram_load_start)	/* save bram context */
 	ori	r3, r0, (LMB_SIZE - 4)
 _copy_bram:
-	lw	r7, r0, r11		/* r7 = r0 + r6 */
-	sw	r7, r4, r11		/* addr[r4 + r6] = r7 */
+	lw	r7, r0, r11		/* r7 = r0 + r11 */
+	sw	r7, r4, r11		/* addr[r4 + r11] = r7 */
 	addik	r11, r11, 4		/* increment counting */
 	bgtid	r3, _copy_bram		/* loop for all entries */
 	addik	r3, r3, -4		/* descrement loop */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 522a0c5..511c1ab 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -65,10 +65,6 @@ void __init setup_arch(char **cmdline_p)
 	microblaze_cache_init();
 
 	xilinx_pci_init();
-
-#if defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
-#endif
 }
 
 #ifdef CONFIG_MTD_UCLINUX
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 09b0cd7..4c67b11 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -441,3 +441,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 435	common	clone3				sys_clone3
+437	common	openat2				sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 050fc62..1056f16 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -7,6 +7,7 @@
  * for more details.
  */
 
+#include <linux/dma-contiguous.h>
 #include <linux/memblock.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -345,6 +346,9 @@ asmlinkage void __init mmu_init(void)
 	/* This will also cause that unflatten device tree will be allocated
 	 * inside 768MB limit */
 	memblock_set_current_limit(memory_start + lowmem_size - 1);
+
+	/* CMA initialization */
+	dma_contiguous_reserve(memory_start + lowmem_size - 1);
 }
 
 /* This is only called until mem_init is done. */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a2739a3..797d7f1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -5,9 +5,11 @@
 	select ARCH_32BIT_OFF_T if !64BIT
 	select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
 	select ARCH_CLOCKSOURCE_DATA
+	select ARCH_HAS_FORTIFY_SOURCE
+	select ARCH_HAS_KCOV
+	select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_HAS_UBSAN_SANITIZE_ALL
-	select ARCH_HAS_FORTIFY_SOURCE
 	select ARCH_SUPPORTS_UPROBES
 	select ARCH_USE_BUILTIN_BSWAP
 	select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
@@ -47,7 +49,7 @@
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
 	select HAVE_ASM_MODVERSIONS
-	select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
+	select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_COPY_THREAD_TLS
 	select HAVE_C_RECORDMCOUNT
@@ -55,11 +57,14 @@
 	select HAVE_DEBUG_STACKOVERFLOW
 	select HAVE_DMA_CONTIGUOUS
 	select HAVE_DYNAMIC_FTRACE
+	select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
 	select HAVE_EXIT_THREAD
 	select HAVE_FAST_GUP
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_TRACER
+	select HAVE_GCC_PLUGINS
+	select HAVE_GENERIC_VDSO
 	select HAVE_IDE
 	select HAVE_IOREMAP_PROT
 	select HAVE_IRQ_EXIT_ON_IRQ_STACK
@@ -78,18 +83,14 @@
 	select HAVE_STACKPROTECTOR
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
-	select HAVE_GENERIC_VDSO
 	select IRQ_FORCED_THREADING
 	select ISA if EISA
-	select MODULES_USE_ELF_RELA if MODULES && 64BIT
 	select MODULES_USE_ELF_REL if MODULES
+	select MODULES_USE_ELF_RELA if MODULES && 64BIT
 	select PERF_USE_VMALLOC
 	select RTC_LIB
 	select SYSCTL_EXCEPTION_TRACE
 	select VIRT_TO_BUS
-	select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
-	select ARCH_HAS_KCOV
-	select HAVE_GCC_PLUGINS
 
 menu "Machine selection"
 
@@ -104,20 +105,18 @@
 	select CEVT_R4K
 	select CLKSRC_MIPS_GIC
 	select COMMON_CLK
-	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
+	select CPU_MIPSR2_IRQ_VI
 	select CSRC_R4K
 	select DMA_PERDEV_COHERENT
 	select HAVE_PCI
 	select IRQ_MIPS_CPU
-	select LIBFDT
 	select MIPS_AUTO_PFN_OFFSET
 	select MIPS_CPU_SCACHE
 	select MIPS_GIC
 	select MIPS_L1_CACHE_SHIFT_7
 	select NO_EXCEPT_FILL
 	select PCI_DRIVERS_GENERIC
-	select PINCTRL
 	select SMP_UP if SMP
 	select SWAP_IO_SPACE
 	select SYS_HAS_CPU_MIPS32_R1
@@ -132,11 +131,12 @@
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_MICROMIPS
-	select SYS_SUPPORTS_MIPS_CPS
 	select SYS_SUPPORTS_MIPS16
+	select SYS_SUPPORTS_MIPS_CPS
 	select SYS_SUPPORTS_MULTITHREADING
 	select SYS_SUPPORTS_RELOCATABLE
 	select SYS_SUPPORTS_SMARTMIPS
+	select UHI_BOOT
 	select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
 	select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
 	select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
@@ -144,7 +144,6 @@
 	select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
 	select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
 	select USE_OF
-	select UHI_BOOT
 	help
 	  Select this to build a kernel which aims to support multiple boards,
 	  generally using a flattened device tree passed from the bootloader
@@ -403,7 +402,6 @@
 	select GENERIC_IRQ_CHIP
 	select BUILTIN_DTB if MIPS_NO_APPENDED_DTB
 	select USE_OF
-	select LIBFDT
 
 config LANTIQ
 	bool "Lantiq based platforms"
@@ -510,7 +508,6 @@
 	select DMA_NONCOHERENT
 	select GPIOLIB
 	select IRQ_MIPS_CPU
-	select LIBFDT
 	select MFD_SYSCON
 	select MIPS_CPU_SCACHE
 	select MIPS_GIC
@@ -548,7 +545,6 @@
 	select I8253
 	select I8259
 	select IRQ_MIPS_CPU
-	select LIBFDT
 	select MIPS_BONITO64
 	select MIPS_CPU_SCACHE
 	select MIPS_GIC
@@ -980,7 +976,6 @@
 	select ZONE_DMA32
 	select HOLES_IN_ZONE
 	select GPIOLIB
-	select LIBFDT
 	select USE_OF
 	select ARCH_SPARSEMEM_ENABLE
 	select SYS_SUPPORTS_SMP
@@ -1223,8 +1218,7 @@
 	def_bool n
 
 config GENERIC_CSUM
-	bool
-	default y if !CPU_HAS_LOAD_STORE_LR
+	def_bool CPU_NO_LOAD_STORE_LR
 
 config GENERIC_ISA_DMA
 	bool
@@ -1442,11 +1436,14 @@
 	bool "Loongson 64-bit CPU"
 	depends on SYS_HAS_CPU_LOONGSON64
 	select ARCH_HAS_PHYS_TO_DMA
+	select CPU_MIPSR2
+	select CPU_HAS_PREFETCH
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
 	select CPU_SUPPORTS_HUGEPAGES
 	select CPU_SUPPORTS_MSA
-	select CPU_HAS_LOAD_STORE_LR
+	select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
+	select CPU_MIPSR2_IRQ_VI
 	select WEAK_ORDERING
 	select WEAK_REORDERING_BEYOND_LLSC
 	select MIPS_ASID_BITS_VARIABLE
@@ -1464,8 +1461,6 @@
 config LOONGSON3_ENHANCEMENT
 	bool "New Loongson-3 CPU Enhancements"
 	default n
-	select CPU_MIPSR2
-	select CPU_HAS_PREFETCH
 	depends on CPU_LOONGSON64
 	help
 	  New Loongson-3 cores (since Loongson-3A R2, as opposed to Loongson-3A
@@ -1542,7 +1537,6 @@
 	bool "MIPS32 Release 1"
 	depends on SYS_HAS_CPU_MIPS32_R1
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
 	help
@@ -1560,7 +1554,6 @@
 	bool "MIPS32 Release 2"
 	depends on SYS_HAS_CPU_MIPS32_R2
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
 	select CPU_SUPPORTS_MSA
@@ -1576,6 +1569,7 @@
 	bool "MIPS32 Release 6"
 	depends on SYS_HAS_CPU_MIPS32_R6
 	select CPU_HAS_PREFETCH
+	select CPU_NO_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
 	select CPU_SUPPORTS_MSA
@@ -1591,7 +1585,6 @@
 	bool "MIPS64 Release 1"
 	depends on SYS_HAS_CPU_MIPS64_R1
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
@@ -1611,7 +1604,6 @@
 	bool "MIPS64 Release 2"
 	depends on SYS_HAS_CPU_MIPS64_R2
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
@@ -1629,6 +1621,7 @@
 	bool "MIPS64 Release 6"
 	depends on SYS_HAS_CPU_MIPS64_R6
 	select CPU_HAS_PREFETCH
+	select CPU_NO_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
@@ -1646,7 +1639,6 @@
 	bool "R3000"
 	depends on SYS_HAS_CPU_R3000
 	select CPU_HAS_WB
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_R3K_TLB
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
@@ -1662,7 +1654,6 @@
 	bool "R39XX"
 	depends on SYS_HAS_CPU_TX39XX
 	select CPU_SUPPORTS_32BIT_KERNEL
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_R3K_TLB
 
 config CPU_VR41XX
@@ -1670,7 +1661,6 @@
 	depends on SYS_HAS_CPU_VR41XX
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
-	select CPU_HAS_LOAD_STORE_LR
 	help
 	  The options selects support for the NEC VR4100 series of processors.
 	  Only choose this option if you have one of these processors as a
@@ -1683,7 +1673,6 @@
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HUGEPAGES
-	select CPU_HAS_LOAD_STORE_LR
 	help
 	  MIPS Technologies R4000-series processors other than 4300, including
 	  the R4000, R4400, R4600, and 4700.
@@ -1692,7 +1681,6 @@
 	bool "R49XX"
 	depends on SYS_HAS_CPU_TX49XX
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HUGEPAGES
@@ -1703,7 +1691,6 @@
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HUGEPAGES
-	select CPU_HAS_LOAD_STORE_LR
 	help
 	  MIPS Technologies R5000-series processors other than the Nevada.
 
@@ -1713,7 +1700,6 @@
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HUGEPAGES
-	select CPU_HAS_LOAD_STORE_LR
 	help
 	  NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV
 	  instruction set.
@@ -1724,7 +1710,6 @@
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HUGEPAGES
-	select CPU_HAS_LOAD_STORE_LR
 	help
 	  QED / PMC-Sierra RM52xx-series ("Nevada") processors.
 
@@ -1732,7 +1717,6 @@
 	bool "R10000"
 	depends on SYS_HAS_CPU_R10000
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
@@ -1744,7 +1728,6 @@
 	bool "RM7000"
 	depends on SYS_HAS_CPU_RM7000
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
@@ -1753,7 +1736,6 @@
 config CPU_SB1
 	bool "SB1"
 	depends on SYS_HAS_CPU_SB1
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
@@ -1764,7 +1746,6 @@
 	bool "Cavium Octeon processor"
 	depends on SYS_HAS_CPU_CAVIUM_OCTEON
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select WEAK_ORDERING
 	select CPU_SUPPORTS_HIGHMEM
@@ -1794,7 +1775,6 @@
 	select WEAK_ORDERING
 	select CPU_SUPPORTS_HIGHMEM
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_CPUFREQ
 	select MIPS_EXTERNAL_TIMER
 	help
@@ -1803,7 +1783,6 @@
 config CPU_XLR
 	bool "Netlogic XLR SoC"
 	depends on SYS_HAS_CPU_XLR
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
@@ -1822,7 +1801,6 @@
 	select WEAK_ORDERING
 	select WEAK_REORDERING_BEYOND_LLSC
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_MIPSR2
 	select CPU_SUPPORTS_HUGEPAGES
 	select MIPS_ASID_BITS_VARIABLE
@@ -1928,14 +1906,12 @@
 	select CPU_SUPPORTS_HIGHMEM
 	select CPU_SUPPORTS_HUGEPAGES
 	select ARCH_HAS_PHYS_TO_DMA
-	select CPU_HAS_LOAD_STORE_LR
 
 config CPU_LOONGSON32
 	bool
 	select CPU_MIPS32
 	select CPU_MIPSR2
 	select CPU_HAS_PREFETCH
-	select CPU_HAS_LOAD_STORE_LR
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
 	select CPU_SUPPORTS_CPUFREQ
@@ -2110,12 +2086,14 @@
 	bool
 	default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
 	select CPU_HAS_RIXI
+	select CPU_HAS_DIEI if !CPU_DIEI_BROKEN
 	select MIPS_SPRAM
 
 config CPU_MIPSR6
 	bool
 	default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
 	select CPU_HAS_RIXI
+	select CPU_HAS_DIEI if !CPU_DIEI_BROKEN
 	select HAVE_ARCH_BITREVERSE
 	select MIPS_ASID_BITS_VARIABLE
 	select MIPS_CRC_SUPPORT
@@ -2575,15 +2553,23 @@
 config XKS01
 	bool
 
+config CPU_HAS_DIEI
+	depends on !CPU_DIEI_BROKEN
+	bool
+
+config CPU_DIEI_BROKEN
+	bool
+
 config CPU_HAS_RIXI
 	bool
 
-config CPU_HAS_LOAD_STORE_LR
+config CPU_NO_LOAD_STORE_LR
 	bool
 	help
-	  CPU has support for unaligned load and store instructions:
+	  CPU lacks support for unaligned load and store instructions:
 	  LWL, LWR, SWL, SWR (Load/store word left/right).
-	  LDL, LDR, SDL, SDR (Load/store doubleword left/right, for 64bit systems).
+	  LDL, LDR, SDL, SDR (Load/store doubleword left/right, for 64bit
+	  systems).
 
 #
 # Vectored interrupt mode is an R2 feature
@@ -2696,6 +2682,14 @@
 config SYS_SUPPORTS_NUMA
 	bool
 
+config HAVE_SETUP_PER_CPU_AREA
+	def_bool y
+	depends on NUMA
+
+config NEED_PER_CPU_EMBED_FIRST_CHUNK
+	def_bool y
+	depends on NUMA
+
 config RELOCATABLE
 	bool "Relocatable kernel"
 	depends on SYS_SUPPORTS_RELOCATABLE && (CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_MIPS32_R6 || CPU_MIPS64_R6 || CAVIUM_OCTEON_SOC)
diff --git a/arch/mips/Makefile.postlink b/arch/mips/Makefile.postlink
index f03fdc9..4b1d3ba 100644
--- a/arch/mips/Makefile.postlink
+++ b/arch/mips/Makefile.postlink
@@ -17,7 +17,7 @@
       cmd_ls3_llsc = $(CMD_LS3_LLSC) $@
 
 CMD_RELOCS = arch/mips/boot/tools/relocs
-quiet_cmd_relocs = RELOCS $@
+quiet_cmd_relocs = RELOCS  $@
       cmd_relocs = $(CMD_RELOCS) $@
 
 # `@true` prevents complaint when there is nothing to be done
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 528bd73..4ed45ad 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -123,7 +123,7 @@
 targets += vmlinux.its
 targets += vmlinux.gz.its
 targets += vmlinux.bz2.its
-targets += vmlinux.lzmo.its
+targets += vmlinux.lzma.its
 targets += vmlinux.lzo.its
 
 quiet_cmd_cpp_its_S = ITS     $@
diff --git a/arch/mips/boot/dts/ingenic/Makefile b/arch/mips/boot/dts/ingenic/Makefile
index 9cc4844..e165429 100644
--- a/arch/mips/boot/dts/ingenic/Makefile
+++ b/arch/mips/boot/dts/ingenic/Makefile
@@ -2,5 +2,6 @@
 dtb-$(CONFIG_JZ4740_QI_LB60)	+= qi_lb60.dtb
 dtb-$(CONFIG_JZ4770_GCW0)	+= gcw0.dtb
 dtb-$(CONFIG_JZ4780_CI20)	+= ci20.dtb
+dtb-$(CONFIG_X1000_CU1000_NEO)	+= cu1000-neo.dtb
 
 obj-$(CONFIG_BUILTIN_DTB)	+= $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/ingenic/cu1000-neo.dts b/arch/mips/boot/dts/ingenic/cu1000-neo.dts
new file mode 100644
index 0000000..03abd94
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/cu1000-neo.dts
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "x1000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+	compatible = "yna,cu1000-neo", "ingenic,x1000";
+	model = "YSH & ATIL General Board CU Neo";
+
+	aliases {
+		serial2 = &uart2;
+	};
+
+	chosen {
+		stdout-path = "serial2:115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x04000000>;
+	};
+
+	wlan_pwrseq: msc1-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+
+		clocks = <&lpoclk>;
+		clock-names = "ext_clock";
+
+		reset-gpios = <&gpc 17 GPIO_ACTIVE_LOW>;
+		post-power-on-delay-ms = <200>;
+
+		lpoclk: ap6212a {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <32768>;
+		};
+	};
+};
+
+&exclk {
+	clock-frequency = <24000000>;
+};
+
+&tcu {
+	/* 1500 kHz for the system timer and clocksource */
+	assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER2>;
+	assigned-clock-rates = <1500000>, <1500000>;
+
+	/* Use channel #0 for the system timer channel #2 for the clocksource */
+	ingenic,pwm-channels-mask = <0xfa>;
+};
+
+&i2c0 {
+	status = "okay";
+
+	clock-frequency = <400000>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pins_i2c0>;
+
+	ads7830@48 {
+		compatible = "ti,ads7830";
+		reg = <0x48>;
+	};
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pins_uart2>;
+
+	status = "okay";
+};
+
+&mac {
+	phy-mode = "rmii";
+	phy-handle = <&lan8720a>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pins_mac>;
+
+	snps,reset-gpio = <&gpc 23 GPIO_ACTIVE_LOW>; /* PC23 */
+	snps,reset-active-low;
+	snps,reset-delays-us = <0 10000 30000>;
+
+	status = "okay";
+};
+
+&mdio {
+	status = "okay";
+
+	lan8720a: ethernet-phy@0 {
+		compatible = "ethernet-phy-id0007.c0f0", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+	};
+};
+
+&msc0 {
+	bus-width = <8>;
+	max-frequency = <50000000>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pins_msc0>;
+
+	non-removable;
+
+	status = "okay";
+};
+
+&msc1 {
+	bus-width = <4>;
+	max-frequency = <50000000>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pins_msc1>;
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	non-removable;
+
+	mmc-pwrseq = <&wlan_pwrseq>;
+
+	status = "okay";
+
+	ap6212a: wifi@1 {
+		compatible = "brcm,bcm4329-fmac";
+		reg = <1>;
+
+		interrupt-parent = <&gpc>;
+		interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+		interrupt-names = "host-wake";
+
+		brcm,drive-strength = <10>;
+	};
+};
+
+&pinctrl {
+	pins_i2c0: i2c0 {
+		function = "i2c0";
+		groups = "i2c0-data";
+		bias-disable;
+	};
+
+	pins_uart2: uart2 {
+		function = "uart2";
+		groups = "uart2-data-d";
+		bias-disable;
+	};
+
+	pins_mac: mac {
+		function = "mac";
+		groups = "mac";
+		bias-disable;
+	};
+
+	pins_msc0: msc0 {
+		function = "mmc0";
+		groups = "mmc0-1bit", "mmc0-4bit", "mmc0-8bit";
+		bias-disable;
+	};
+
+	pins_msc1: msc1 {
+		function = "mmc1";
+		groups = "mmc1-1bit", "mmc1-4bit";
+		bias-disable;
+	};
+};
diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi
new file mode 100644
index 0000000..4994c69
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/x1000.dtsi
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/x1000-cgu.h>
+#include <dt-bindings/dma/x1000-dma.h>
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "ingenic,x1000", "ingenic,x1000e";
+
+	cpuintc: interrupt-controller {
+		#address-cells = <0>;
+		#interrupt-cells = <1>;
+		interrupt-controller;
+		compatible = "mti,cpu-interrupt-controller";
+	};
+
+	intc: interrupt-controller@10001000 {
+		compatible = "ingenic,x1000-intc", "ingenic,jz4780-intc";
+		reg = <0x10001000 0x50>;
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+
+		interrupt-parent = <&cpuintc>;
+		interrupts = <2>;
+	};
+
+	exclk: ext {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+	};
+
+	rtclk: rtc {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+	};
+
+	cgu: x1000-cgu@10000000 {
+		compatible = "ingenic,x1000-cgu";
+		reg = <0x10000000 0x100>;
+
+		#clock-cells = <1>;
+
+		clocks = <&exclk>, <&rtclk>;
+		clock-names = "ext", "rtc";
+	};
+
+	tcu: timer@10002000 {
+		compatible = "ingenic,x1000-tcu",
+				 "ingenic,jz4770-tcu",
+				 "simple-mfd";
+		reg = <0x10002000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0x0 0x10002000 0x1000>;
+
+		#clock-cells = <1>;
+
+		clocks = <&cgu X1000_CLK_RTCLK
+			  &cgu X1000_CLK_EXCLK
+			  &cgu X1000_CLK_PCLK>;
+		clock-names = "rtc", "ext", "pclk";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <27 26 25>;
+
+		wdt: watchdog@0 {
+			compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog";
+			reg = <0x0 0x10>;
+
+			clocks = <&cgu X1000_CLK_RTCLK>;
+			clock-names = "wdt";
+		};
+	};
+
+	rtc: rtc@10003000 {
+		compatible = "ingenic,x1000-rtc", "ingenic,jz4780-rtc";
+		reg = <0x10003000 0x4c>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <32>;
+
+		clocks = <&cgu X1000_CLK_RTCLK>;
+		clock-names = "rtc";
+	};
+
+	pinctrl: pin-controller@10010000 {
+		compatible = "ingenic,x1000-pinctrl";
+		reg = <0x10010000 0x800>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		gpa: gpio@0 {
+			compatible = "ingenic,x1000-gpio";
+			reg = <0>;
+
+			gpio-controller;
+			gpio-ranges = <&pinctrl 0 0 32>;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+
+			interrupt-parent = <&intc>;
+			interrupts = <17>;
+		};
+
+		gpb: gpio@1 {
+			compatible = "ingenic,x1000-gpio";
+			reg = <1>;
+
+			gpio-controller;
+			gpio-ranges = <&pinctrl 0 32 32>;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+
+			interrupt-parent = <&intc>;
+			interrupts = <16>;
+		};
+
+		gpc: gpio@2 {
+			compatible = "ingenic,x1000-gpio";
+			reg = <2>;
+
+			gpio-controller;
+			gpio-ranges = <&pinctrl 0 64 32>;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+
+			interrupt-parent = <&intc>;
+			interrupts = <15>;
+		};
+
+		gpd: gpio@3 {
+			compatible = "ingenic,x1000-gpio";
+			reg = <3>;
+
+			gpio-controller;
+			gpio-ranges = <&pinctrl 0 96 32>;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+
+			interrupt-parent = <&intc>;
+			interrupts = <14>;
+		};
+	};
+
+	i2c0: i2c-controller@10050000 {
+		compatible = "ingenic,x1000-i2c";
+		reg = <0x10050000 0x1000>;
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <60>;
+
+		clocks = <&cgu X1000_CLK_I2C0>;
+
+		status = "disabled";
+	};
+
+	i2c1: i2c-controller@10051000 {
+		compatible = "ingenic,x1000-i2c";
+		reg = <0x10051000 0x1000>;
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <59>;
+
+		clocks = <&cgu X1000_CLK_I2C1>;
+
+		status = "disabled";
+	};
+
+	i2c2: i2c-controller@10052000 {
+		compatible = "ingenic,x1000-i2c";
+		reg = <0x10052000 0x1000>;
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <58>;
+
+		clocks = <&cgu X1000_CLK_I2C2>;
+
+		status = "disabled";
+	};
+
+	uart0: serial@10030000 {
+		compatible = "ingenic,x1000-uart";
+		reg = <0x10030000 0x100>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <51>;
+
+		clocks = <&exclk>, <&cgu X1000_CLK_UART0>;
+		clock-names = "baud", "module";
+
+		status = "disabled";
+	};
+
+	uart1: serial@10031000 {
+		compatible = "ingenic,x1000-uart";
+		reg = <0x10031000 0x100>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <50>;
+
+		clocks = <&exclk>, <&cgu X1000_CLK_UART1>;
+		clock-names = "baud", "module";
+
+		status = "disabled";
+	};
+
+	uart2: serial@10032000 {
+		compatible = "ingenic,x1000-uart";
+		reg = <0x10032000 0x100>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <49>;
+
+		clocks = <&exclk>, <&cgu X1000_CLK_UART2>;
+		clock-names = "baud", "module";
+
+		status = "disabled";
+	};
+
+	pdma: dma-controller@13420000 {
+		compatible = "ingenic,x1000-dma";
+		reg = <0x13420000 0x400
+			   0x13421000 0x40>;
+		#dma-cells = <2>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <10>;
+
+		clocks = <&cgu X1000_CLK_PDMA>;
+	};
+
+	mac: ethernet@134b0000 {
+		compatible = "ingenic,x1000-mac", "snps,dwmac";
+		reg = <0x134b0000 0x2000>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <55>;
+		interrupt-names = "macirq";
+
+		clocks = <&cgu X1000_CLK_MAC>;
+		clock-names = "stmmaceth";
+
+		status = "disabled";
+
+		mdio: mdio {
+			compatible = "snps,dwmac-mdio";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			status = "disabled";
+		};
+	};
+
+	msc0: mmc@13450000 {
+		compatible = "ingenic,x1000-mmc";
+		reg = <0x13450000 0x1000>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <37>;
+
+		clocks = <&cgu X1000_CLK_MSC0>;
+		clock-names = "mmc";
+
+		cap-sd-highspeed;
+		cap-mmc-highspeed;
+		cap-sdio-irq;
+
+		dmas = <&pdma X1000_DMA_MSC0_RX 0xffffffff>,
+			   <&pdma X1000_DMA_MSC0_TX 0xffffffff>;
+		dma-names = "rx", "tx";
+
+		status = "disabled";
+	};
+
+	msc1: mmc@13460000 {
+		compatible = "ingenic,x1000-mmc";
+		reg = <0x13460000 0x1000>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <36>;
+
+		clocks = <&cgu X1000_CLK_MSC1>;
+		clock-names = "mmc";
+
+		cap-sd-highspeed;
+		cap-mmc-highspeed;
+		cap-sdio-irq;
+
+		dmas = <&pdma X1000_DMA_MSC1_RX 0xffffffff>,
+			   <&pdma X1000_DMA_MSC1_TX 0xffffffff>;
+		dma-names = "rx", "tx";
+
+		status = "disabled";
+	};
+};
diff --git a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
index aa5caaa..6069b33 100644
--- a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
+++ b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
@@ -177,6 +177,9 @@ &uart1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinmux_i2s_gpio>;		/* GPIO0..3 */
 
+	fifo-size = <8>;
+	tx-threshold = <8>;
+
 	rts-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
 	cts-gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
 };
@@ -195,3 +198,8 @@ &uart2 {
 &watchdog {
 	status = "okay";
 };
+
+&wmac {
+	status = "okay";
+	mediatek,mtd-eeprom = <&factory 0x0000>;
+};
diff --git a/arch/mips/boot/dts/ralink/mt7628a.dtsi b/arch/mips/boot/dts/ralink/mt7628a.dtsi
index 742bcc1..892e8ab 100644
--- a/arch/mips/boot/dts/ralink/mt7628a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7628a.dtsi
@@ -285,4 +285,14 @@ ehci@101c0000 {
 		interrupt-parent = <&intc>;
 		interrupts = <18>;
 	};
+
+	wmac: wmac@10300000 {
+		compatible = "mediatek,mt7628-wmac";
+		reg = <0x10300000 0x100000>;
+
+		interrupt-parent = <&cpuintc>;
+		interrupts = <6>;
+
+		status = "disabled";
+	};
 };
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index f97be32..6bd1e97 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2193,7 +2193,7 @@ static int octeon_irq_cib_map(struct irq_domain *d,
 	struct octeon_irq_cib_chip_data *cd;
 
 	if (hw >= host_data->max_bits) {
-		pr_err("ERROR: %s mapping %u is to big!\n",
+		pr_err("ERROR: %s mapping %u is too big!\n",
 		       irq_domain_get_of_node(d)->name, (unsigned)hw);
 		return -EINVAL;
 	}
diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig
new file mode 100644
index 0000000..9b05a8f
--- /dev/null
+++ b/arch/mips/configs/cu1000-neo_defconfig
@@ -0,0 +1,117 @@
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_KERNEL_GZIP=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_INGENIC=y
+CONFIG_X1000_CU1000_NEO=y
+CONFIG_HIGHMEM=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=7
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_CFG80211=y
+CONFIG_UEVENT_HELPER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_NETDEVICES=y
+CONFIG_STMMAC_ETH=y
+CONFIG_SMSC_PHY=y
+CONFIG_BRCMFMAC=y
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=2
+CONFIG_SERIAL_EARLYCON=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=3
+CONFIG_SERIAL_8250_RUNTIME_UARTS=3
+CONFIG_SERIAL_8250_INGENIC=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_JZ4780=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_ADS7828=y
+CONFIG_WATCHDOG=y
+CONFIG_JZ4740_WDT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_JZ4740=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_JZ4740=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_JZ4780=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_NVMEM=y
+CONFIG_NVMEM_SYSFS=y
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_PRINTK_TIME=y
+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_CONSOLE_LOGLEVEL_QUIET=15
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7
+CONFIG_DEBUG_INFO=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon clk_ignore_unused"
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config
index 1134fbb..7626f2a 100644
--- a/arch/mips/configs/generic/board-ocelot.config
+++ b/arch/mips/configs/generic/board-ocelot.config
@@ -41,6 +41,7 @@
 CONFIG_SPI_DW_MMIO=y
 CONFIG_SPI_SPIDEV=y
 
+CONFIG_PINCTRL=y
 CONFIG_PINCTRL_OCELOT=y
 
 CONFIG_GPIO_SYSFS=y
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 61b0fc2..4ebd8ce 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -6,7 +6,6 @@
 generated-y += syscall_table_64_o32.h
 generic-y += current.h
 generic-y += device.h
-generic-y += dma-contiguous.h
 generic-y += emergency-restart.h
 generic-y += export.h
 generic-y += irq_work.h
@@ -19,6 +18,7 @@
 generic-y += qrwlock.h
 generic-y += qspinlock.h
 generic-y += sections.h
+generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += unaligned.h
 generic-y += user.h
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index d41a505..6172778 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -81,6 +81,7 @@ enum loongson2ef_machine_type {
 #define  MACH_INGENIC_JZ4770	2	/* JZ4770 SOC		*/
 #define  MACH_INGENIC_JZ4780	3	/* JZ4780 SOC		*/
 #define  MACH_INGENIC_X1000		4	/* X1000 SOC		*/
+#define  MACH_INGENIC_X1830		5	/* X1830 SOC		*/
 
 extern char *system_type;
 const char *get_system_type(void);
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index c99166e..255afcd 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -100,24 +100,6 @@ typedef u32		compat_sigset_word;
 
 #define COMPAT_OFF_T_MAX	0x7fffffff
 
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
-	/* cast to a __user pointer via "unsigned long" makes sparse happy */
-	return (void __user *)(unsigned long)(long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
-	return (u32)(unsigned long)uptr;
-}
-
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = (struct pt_regs *)
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 983a6a7..de44c92 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -555,6 +555,10 @@
 # define cpu_has_perf		__opt(MIPS_CPU_PERF)
 #endif
 
+#ifndef cpu_has_mac2008_only
+# define cpu_has_mac2008_only	__opt(MIPS_CPU_MAC_2008_ONLY)
+#endif
+
 #ifdef CONFIG_SMP
 /*
  * Some systems share FTLB RAMs between threads within a core (siblings in
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index ea83078..216a229 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -46,7 +46,7 @@
 #define PRID_COMP_NETLOGIC	0x0c0000
 #define PRID_COMP_CAVIUM	0x0d0000
 #define PRID_COMP_LOONGSON	0x140000
-#define PRID_COMP_INGENIC_D0	0xd00000	/* JZ4740, JZ4750 */
+#define PRID_COMP_INGENIC_D0	0xd00000	/* JZ4740, JZ4750, X1830 */
 #define PRID_COMP_INGENIC_D1	0xd10000	/* JZ4770, JZ4775, X1000 */
 #define PRID_COMP_INGENIC_E1	0xe10000	/* JZ4780 */
 
@@ -185,7 +185,8 @@
  * These are the PRID's for when 23:16 == PRID_COMP_INGENIC_*
  */
 
-#define PRID_IMP_XBURST	       0x0200
+#define PRID_IMP_XBURST_REV1	0x0200	/* XBurst with MXU SIMD ISA		*/
+#define PRID_IMP_XBURST_REV2	0x0100	/* XBurst with MXU2 SIMD ISA	*/
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC
@@ -415,6 +416,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \
 				BIT_ULL(56)	/* CPU has perf counters implemented per TC (MIPSMT ASE) */
 #define MIPS_CPU_MMID		BIT_ULL(57)	/* CPU supports MemoryMapIDs */
+#define MIPS_CPU_MAC_2008_ONLY	BIT_ULL(58)	/* CPU Only support MAC2008 Fused multiply-add instruction */
 
 /*
  * CPU ASE encodings
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index c52948f..159087f 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -32,8 +32,6 @@ struct gio_driver {
 };
 #define to_gio_driver(drv) container_of(drv, struct gio_driver, driver)
 
-extern const struct gio_device_id *gio_match_device(const struct gio_device_id *,
-						    const struct gio_device *);
 extern struct gio_device *gio_dev_get(struct gio_device *);
 extern void gio_dev_put(struct gio_device *);
 
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index a4f48b0..a0b9220 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -23,7 +23,7 @@
  * TLB hazards
  */
 #if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && \
-	!defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_LOONGSON3_ENHANCEMENT)
+	!defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_CPU_LOONGSON64)
 
 /*
  * MIPSR2 defines ehb for hazard avoidance
@@ -158,7 +158,7 @@ do {									\
 } while (0)
 
 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
-	defined(CONFIG_CPU_LOONGSON2EF) || defined(CONFIG_LOONGSON3_ENHANCEMENT) || \
+	defined(CONFIG_CPU_LOONGSON2EF) || defined(CONFIG_CPU_LOONGSON64) || \
 	defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
 
 /*
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index c4728bb..47a8ffc 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -18,7 +18,7 @@
 #include <asm/compiler.h>
 #include <asm/hazards.h>
 
-#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
+#if defined(CONFIG_CPU_HAS_DIEI)
 
 static inline void arch_local_irq_disable(void)
 {
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
 void arch_local_irq_disable(void);
 unsigned long arch_local_irq_save(void);
 void arch_local_irq_restore(unsigned long flags);
-#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_DIEI */
 
 static inline void arch_local_irq_enable(void)
 {
@@ -102,7 +102,7 @@ static inline void arch_local_irq_enable(void)
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
-#if   defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+#if defined(CONFIG_CPU_HAS_DIEI)
 	"	ei							\n"
 #else
 	"	mfc0	$1,$12						\n"
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 02783e1..fef0fda 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -37,6 +37,7 @@ static __inline__ long local_add_return(long i, local_t * l)
 		__asm__ __volatile__(
 		"	.set	push					\n"
 		"	.set	arch=r4000				\n"
+			__SYNC(full, loongson3_war) "			\n"
 		"1:"	__LL	"%1, %2		# local_add_return	\n"
 		"	addu	%0, %1, %3				\n"
 			__SC	"%0, %2					\n"
@@ -52,6 +53,7 @@ static __inline__ long local_add_return(long i, local_t * l)
 		__asm__ __volatile__(
 		"	.set	push					\n"
 		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
+			__SYNC(full, loongson3_war) "			\n"
 		"1:"	__LL	"%1, %2		# local_add_return	\n"
 		"	addu	%0, %1, %3				\n"
 			__SC	"%0, %2					\n"
@@ -84,6 +86,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
 		__asm__ __volatile__(
 		"	.set	push					\n"
 		"	.set	arch=r4000				\n"
+			__SYNC(full, loongson3_war) "			\n"
 		"1:"	__LL	"%1, %2		# local_sub_return	\n"
 		"	subu	%0, %1, %3				\n"
 			__SC	"%0, %2					\n"
@@ -99,6 +102,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
 		__asm__ __volatile__(
 		"	.set	push					\n"
 		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
+			__SYNC(full, loongson3_war) "			\n"
 		"1:"	__LL	"%1, %2		# local_sub_return	\n"
 		"	subu	%0, %1, %3				\n"
 			__SC	"%0, %2					\n"
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index f992c1d..3e54f60 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -10,20 +10,10 @@
 #define __ASM_MACH_IP27_KERNEL_ENTRY_H
 
 #include <asm/sn/addrs.h>
-#include <asm/sn/sn0/hubni.h>
+#include <asm/sn/agent.h>
 #include <asm/sn/klkernvars.h>
 
 /*
- * Returns the local nasid into res.
- */
-	.macro GET_NASID_ASM res
-	dli	\res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
-	ld	\res, (\res)
-	and	\res, NSRI_NODEID_MASK
-	dsrl	\res, NSRI_NODEID_SHFT
-	.endm
-
-/*
  * TLB bits
  */
 #define PAGE_GLOBAL		(1 << 6)
diff --git a/arch/mips/include/asm/mach-ip27/mangle-port.h b/arch/mips/include/asm/mach-ip27/mangle-port.h
index f6e4912..27c56ef 100644
--- a/arch/mips/include/asm/mach-ip27/mangle-port.h
+++ b/arch/mips/include/asm/mach-ip27/mangle-port.h
@@ -8,7 +8,7 @@
 #ifndef __ASM_MACH_IP27_MANGLE_PORT_H
 #define __ASM_MACH_IP27_MANGLE_PORT_H
 
-#define __swizzle_addr_b(port)	(port)
+#define __swizzle_addr_b(port)	((port) ^ 3)
 #define __swizzle_addr_w(port)	((port) ^ 2)
 #define __swizzle_addr_l(port)	(port)
 #define __swizzle_addr_q(port)	(port)
@@ -20,6 +20,6 @@
 # define ioswabl(a, x)		(x)
 # define __mem_ioswabl(a, x)	cpu_to_le32(x)
 # define ioswabq(a, x)		(x)
-# define __mem_ioswabq(a, x)	cpu_to_le32(x)
+# define __mem_ioswabq(a, x)	cpu_to_le64(x)
 
 #endif /* __ASM_MACH_IP27_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index f463826..08c36e5 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -4,7 +4,8 @@
 
 #include <asm/sn/addrs.h>
 #include <asm/sn/arch.h>
-#include <asm/sn/hub.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klkernvars.h>
 
 #define pa_to_nid(addr)		NASID_GET(addr)
 
@@ -12,7 +13,6 @@ struct hub_data {
 	kern_vars_t	kern_vars;
 	DECLARE_BITMAP(h_bigwin_used, HUB_NUM_BIG_WINDOW);
 	cpumask_t	h_cpus;
-	unsigned long slice_map;
 };
 
 struct node_data {
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index be61ddc..d66cc53 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -2,12 +2,12 @@
 #ifndef _ASM_MACH_TOPOLOGY_H
 #define _ASM_MACH_TOPOLOGY_H	1
 
-#include <asm/sn/hub.h>
 #include <asm/sn/types.h>
 #include <asm/mmzone.h>
 
 struct cpuinfo_ip27 {
 	nasid_t		p_nasid;	/* my node ID in numa-as-id-space */
+	unsigned short	p_speed;	/* cpu speed in MHz */
 	unsigned char	p_slice;	/* Physical position on node board */
 };
 
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index 7dc8d75..4fab38c 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -46,5 +46,7 @@
 #define cpu_has_wsbh		1
 #define cpu_has_ic_fills_f_dc	1
 #define cpu_hwrena_impl_bits	0xc0000000
+#define cpu_has_mac2008_only	1
+#define cpu_has_mips_r2_exec_hazard 0
 
 #endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 0d5a309..796fe47 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1101,9 +1101,12 @@
 /*
  * Bits 22:20 of the FPU Status Register will be read as 0,
  * and should be written as zero.
+ * MAC2008 was removed in Release 5 so we still treat it as
+ * reserved.
  */
 #define FPU_CSR_RSVD	(_ULCAST_(7) << 20)
 
+#define FPU_CSR_MAC2008	(_ULCAST_(1) << 20)
 #define FPU_CSR_ABS2008	(_ULCAST_(1) << 19)
 #define FPU_CSR_NAN2008	(_ULCAST_(1) << 18)
 
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 3bc630f..9c476a0 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -806,7 +806,8 @@ struct bridge_controller {
 	unsigned long		baddr;
 	unsigned long		intr_addr;
 	struct irq_domain	*domain;
-	unsigned int		pci_int[8];
+	unsigned int		pci_int[8][2];
+	unsigned int		int_mapping[8][2];
 	u32			ioc3_sid[8];
 	nasid_t			nasid;
 };
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 91b89aa..aef5378f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -639,6 +639,11 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
 
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifdef _PAGE_HUGE
+#define pmd_leaf(pmd)	((pmd_val(pmd) & _PAGE_HUGE) != 0)
+#define pud_leaf(pud)	((pud_val(pud) & _PAGE_HUGE) != 0)
+#endif
+
 #define gup_fast_permitted(start, end)	(!cpu_has_dc_aliases)
 
 #include <asm-generic/pgtable.h>
diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h
deleted file mode 100644
index 2777148..0000000
--- a/arch/mips/include/asm/serial.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2017 MIPS Tech, LLC
- */
-#ifndef __ASM__SERIAL_H
-#define __ASM__SERIAL_H
-
-#ifdef CONFIG_MIPS_GENERIC
-/*
- * Generic kernels cannot know a correct value for all platforms at
- * compile time. Set it to 0 to prevent 8250_early using it
- */
-#define BASE_BAUD 0
-#else
-#include <asm-generic/serial.h>
-#endif
-
-#endif /* __ASM__SERIAL_H */
diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h
index f7d3273..9a96825 100644
--- a/arch/mips/include/asm/sn/arch.h
+++ b/arch/mips/include/asm/sn/arch.h
@@ -25,7 +25,4 @@
 #define INVALID_MODULE		(moduleid_t)-1
 #define INVALID_PARTID		(partid_t)-1
 
-extern nasid_t get_nasid(void);
-extern int get_cpu_slice(cpuid_t);
-
 #endif /* _ASM_SN_ARCH_H */
diff --git a/arch/mips/include/asm/sn/hub.h b/arch/mips/include/asm/sn/hub.h
deleted file mode 100644
index 45878fb..0000000
--- a/arch/mips/include/asm/sn/hub.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SN_HUB_H
-#define __ASM_SN_HUB_H
-
-#include <linux/types.h>
-#include <linux/cpumask.h>
-#include <asm/sn/types.h>
-#include <asm/sn/io.h>
-#include <asm/sn/klkernvars.h>
-#include <asm/xtalk/xtalk.h>
-
-/* ip27-hubio.c */
-extern unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
-			  unsigned long xtalk_addr, size_t size);
-extern void hub_pio_init(nasid_t nasid);
-
-#endif /* __ASM_SN_HUB_H */
diff --git a/arch/mips/include/asm/sn/intr.h b/arch/mips/include/asm/sn/intr.h
index fc13481..3d6954d3 100644
--- a/arch/mips/include/asm/sn/intr.h
+++ b/arch/mips/include/asm/sn/intr.h
@@ -8,15 +8,6 @@
 #ifndef __ASM_SN_INTR_H
 #define __ASM_SN_INTR_H
 
-/* Number of interrupt levels associated with each interrupt register. */
-#define N_INTPEND_BITS		64
-
-#define INT_PEND0_BASELVL	0
-#define INT_PEND1_BASELVL	64
-
-#define N_INTPENDJUNK_BITS	8
-#define INTPENDJUNK_CLRBIT	0x80
-
 /*
  * Macros to manipulate the interrupt register on the calling hub chip.
  */
@@ -84,14 +75,6 @@ do {								\
 #define CPU_RESCHED_B_IRQ	 8
 #define CPU_CALL_A_IRQ		 9
 #define CPU_CALL_B_IRQ		10
-#define MSC_MESG_INTR		11
-#define BASE_PCI_IRQ		12
-
-/*
- * INT_PEND0 again, bits determined by hardware / hardcoded:
- */
-#define SDISK_INTR		63	/* SABLE name */
-#define IP_PEND0_6_63		63	/* What is this bit? */
 
 /*
  * INT_PEND1 hard-coded bits:
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index 78ef760..2c09c17 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -21,50 +21,50 @@ struct ioc3_serialregs {
 
 /* SUPERIO uart register map */
 struct ioc3_uartregs {
+	u8	iu_lcr;
 	union {
-		u8	iu_rbr;	/* read only, DLAB == 0 */
-		u8	iu_thr;	/* write only, DLAB == 0 */
-		u8	iu_dll;	/* DLAB == 1 */
+		u8	iu_iir;	/* read only */
+		u8	iu_fcr;	/* write only */
 	};
 	union {
 		u8	iu_ier;	/* DLAB == 0 */
 		u8	iu_dlm;	/* DLAB == 1 */
 	};
 	union {
-		u8	iu_iir;	/* read only */
-		u8	iu_fcr;	/* write only */
+		u8	iu_rbr;	/* read only, DLAB == 0 */
+		u8	iu_thr;	/* write only, DLAB == 0 */
+		u8	iu_dll;	/* DLAB == 1 */
 	};
-	u8	iu_lcr;
-	u8	iu_mcr;
-	u8	iu_lsr;
-	u8	iu_msr;
 	u8	iu_scr;
+	u8	iu_msr;
+	u8	iu_lsr;
+	u8	iu_mcr;
 };
 
 struct ioc3_sioregs {
 	u8	fill[0x141];	/* starts at 0x141 */
 
-	u8	uartc;
 	u8	kbdcg;
+	u8	uartc;
 
-	u8	fill0[0x150 - 0x142 - 1];
+	u8	fill0[0x151 - 0x142 - 1];
 
-	u8	pp_data;
-	u8	pp_dsr;
 	u8	pp_dcr;
+	u8	pp_dsr;
+	u8	pp_data;
 
-	u8	fill1[0x158 - 0x152 - 1];
+	u8	fill1[0x159 - 0x153 - 1];
 
-	u8	pp_fifa;
-	u8	pp_cfgb;
 	u8	pp_ecr;
+	u8	pp_cfgb;
+	u8	pp_fifa;
 
-	u8	fill2[0x168 - 0x15a - 1];
+	u8	fill2[0x16a - 0x15b - 1];
 
-	u8	rtcad;
 	u8	rtcdat;
+	u8	rtcad;
 
-	u8	fill3[0x170 - 0x169 - 1];
+	u8	fill3[0x170 - 0x16b - 1];
 
 	struct ioc3_uartregs	uartb;	/* 0x20170  */
 	struct ioc3_uartregs	uarta;	/* 0x20178  */
@@ -598,5 +598,9 @@ struct ioc3_etxd {
 #define	IOC3_SUBSYS_IP30_SYSBOARD	0xc304
 #define	IOC3_SUBSYS_MENET		0xc305
 #define	IOC3_SUBSYS_MENET4		0xc306
+#define	IOC3_SUBSYS_IO7			0xc307
+#define	IOC3_SUBSYS_IO8			0xc308
+#define	IOC3_SUBSYS_IO9			0xc309
+#define	IOC3_SUBSYS_IP34_SYSBOARD	0xc30A
 
 #endif /* MIPS_SN_IOC3_H */
diff --git a/arch/mips/include/asm/sn/klconfig.h b/arch/mips/include/asm/sn/klconfig.h
index 467c313..117f85e4 100644
--- a/arch/mips/include/asm/sn/klconfig.h
+++ b/arch/mips/include/asm/sn/klconfig.h
@@ -889,10 +889,6 @@ typedef union {
 extern lboard_t *find_lboard(lboard_t *start, unsigned char type);
 extern klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char type);
 extern klinfo_t *find_first_component(lboard_t *brd, unsigned char type);
-extern klcpu_t *nasid_slice_to_cpuinfo(nasid_t, int);
 extern lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_class);
 
-
-extern klcpu_t *sn_get_cpuinfo(cpuid_t cpu);
-
 #endif /* _ASM_SN_KLCONFIG_H */
diff --git a/arch/mips/include/asm/sn/kldir.h b/arch/mips/include/asm/sn/kldir.h
index bfb3aec..245f59b 100644
--- a/arch/mips/include/asm/sn/kldir.h
+++ b/arch/mips/include/asm/sn/kldir.h
@@ -1,201 +1,16 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Derived from IRIX <sys/SN/kldir.h>, revision 1.21.
- *
- * Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 1999, 2000 by Ralf Baechle
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+
 #ifndef _ASM_SN_KLDIR_H
 #define _ASM_SN_KLDIR_H
 
-
-/*
- * The kldir memory area resides at a fixed place in each node's memory and
- * provides pointers to most other IP27 memory areas.  This allows us to
- * resize and/or relocate memory areas at a later time without breaking all
- * firmware and kernels that use them.	Indices in the array are
- * permanently dedicated to areas listed below.	 Some memory areas (marked
- * below) reside at a permanently fixed location, but are included in the
- * directory for completeness.
- */
-
 #define KLDIR_MAGIC		0x434d5f53505f5357
 
-/*
- * The upper portion of the memory map applies during boot
- * only and is overwritten by IRIX/SYMMON.
- *
- *				      MEMORY MAP PER NODE
- *
- * 0x2000000 (32M)	   +-----------------------------------------+
- *			   |	  IO6 BUFFERS FOR FLASH ENET IOC3    |
- * 0x1F80000 (31.5M)	   +-----------------------------------------+
- *			   |	  IO6 TEXT/DATA/BSS/stack	     |
- * 0x1C00000 (30M)	   +-----------------------------------------+
- *			   |	  IO6 PROM DEBUG TEXT/DATA/BSS/stack |
- * 0x0800000 (28M)	   +-----------------------------------------+
- *			   |	  IP27 PROM TEXT/DATA/BSS/stack	     |
- * 0x1B00000 (27M)	   +-----------------------------------------+
- *			   |	  IP27 CFG			     |
- * 0x1A00000 (26M)	   +-----------------------------------------+
- *			   |	  Graphics PROM			     |
- * 0x1800000 (24M)	   +-----------------------------------------+
- *			   |	  3rd Party PROM drivers	     |
- * 0x1600000 (22M)	   +-----------------------------------------+
- *			   |					     |
- *			   |	  Free				     |
- *			   |					     |
- *			   +-----------------------------------------+
- *			   |	  UNIX DEBUG Version		     |
- * 0x190000 (2M--)	   +-----------------------------------------+
- *			   |	  SYMMON			     |
- *			   |	  (For UNIX Debug only)		     |
- * 0x34000 (208K)	   +-----------------------------------------+
- *			   |	  SYMMON STACK [NUM_CPU_PER_NODE]    |
- *			   |	  (For UNIX Debug only)		     |
- * 0x25000 (148K)	   +-----------------------------------------+
- *			   |	  KLCONFIG - II (temp)		     |
- *			   |					     |
- *			   |	----------------------------	     |
- *			   |					     |
- *			   |	  UNIX NON-DEBUG Version	     |
- * 0x19000 (100K)	   +-----------------------------------------+
- *
- *
- * The lower portion of the memory map contains information that is
- * permanent and is used by the IP27PROM, IO6PROM and IRIX.
- *
- * 0x19000 (100K)	   +-----------------------------------------+
- *			   |					     |
- *			   |	  PI Error Spools (32K)		     |
- *			   |					     |
- * 0x12000 (72K)	   +-----------------------------------------+
- *			   |	  Unused			     |
- * 0x11c00 (71K)	   +-----------------------------------------+
- *			   |	  CPU 1 NMI Eframe area		     |
- * 0x11a00 (70.5K)	   +-----------------------------------------+
- *			   |	  CPU 0 NMI Eframe area		     |
- * 0x11800 (70K)	   +-----------------------------------------+
- *			   |	  CPU 1 NMI Register save area	     |
- * 0x11600 (69.5K)	   +-----------------------------------------+
- *			   |	  CPU 0 NMI Register save area	     |
- * 0x11400 (69K)	   +-----------------------------------------+
- *			   |	  GDA (1k)			     |
- * 0x11000 (68K)	   +-----------------------------------------+
- *			   |	  Early cache Exception stack	     |
- *			   |		 and/or			     |
- *			   |	  kernel/io6prom nmi registers	     |
- * 0x10800  (66k)	   +-----------------------------------------+
- *			   |	  cache error eframe		     |
- * 0x10400 (65K)	   +-----------------------------------------+
- *			   |	  Exception Handlers (UALIAS copy)   |
- * 0x10000 (64K)	   +-----------------------------------------+
- *			   |					     |
- *			   |					     |
- *			   |	  KLCONFIG - I (permanent) (48K)     |
- *			   |					     |
- *			   |					     |
- *			   |					     |
- * 0x4000 (16K)		   +-----------------------------------------+
- *			   |	  NMI Handler (Protected Page)	     |
- * 0x3000 (12K)		   +-----------------------------------------+
- *			   |	  ARCS PVECTORS (master node only)   |
- * 0x2c00 (11K)		   +-----------------------------------------+
- *			   |	  ARCS TVECTORS (master node only)   |
- * 0x2800 (10K)		   +-----------------------------------------+
- *			   |	  LAUNCH [NUM_CPU]		     |
- * 0x2400 (9K)		   +-----------------------------------------+
- *			   |	  Low memory directory (KLDIR)	     |
- * 0x2000 (8K)		   +-----------------------------------------+
- *			   |	  ARCS SPB (1K)			     |
- * 0x1000 (4K)		   +-----------------------------------------+
- *			   |	  Early cache Exception stack	     |
- *			   |		 and/or			     |
- *			   |	  kernel/io6prom nmi registers	     |
- * 0x800  (2k)		   +-----------------------------------------+
- *			   |	  cache error eframe		     |
- * 0x400 (1K)		   +-----------------------------------------+
- *			   |	  Exception Handlers		     |
- * 0x0	 (0K)		   +-----------------------------------------+
- */
-
-#ifdef __ASSEMBLY__
 #define KLDIR_OFF_MAGIC			0x00
 #define KLDIR_OFF_OFFSET		0x08
 #define KLDIR_OFF_POINTER		0x10
 #define KLDIR_OFF_SIZE			0x18
 #define KLDIR_OFF_COUNT			0x20
 #define KLDIR_OFF_STRIDE		0x28
-#endif /* __ASSEMBLY__ */
-
-/*
- * This is defined here because IP27_SYMMON_STK_SIZE must be at least what
- * we define here.  Since it's set up in the prom.  We can't redefine it later
- * and expect more space to be allocated.  The way to find out the true size
- * of the symmon stacks is to divide SYMMON_STK_SIZE by SYMMON_STK_STRIDE
- * for a particular node.
- */
-#define SYMMON_STACK_SIZE		0x8000
-
-#if defined(PROM)
-
-/*
- * These defines are prom version dependent.  No code other than the IP27
- * prom should attempt to use these values.
- */
-#define IP27_LAUNCH_OFFSET		0x2400
-#define IP27_LAUNCH_SIZE		0x400
-#define IP27_LAUNCH_COUNT		2
-#define IP27_LAUNCH_STRIDE		0x200
-
-#define IP27_KLCONFIG_OFFSET		0x4000
-#define IP27_KLCONFIG_SIZE		0xc000
-#define IP27_KLCONFIG_COUNT		1
-#define IP27_KLCONFIG_STRIDE		0
-
-#define IP27_NMI_OFFSET			0x3000
-#define IP27_NMI_SIZE			0x40
-#define IP27_NMI_COUNT			2
-#define IP27_NMI_STRIDE			0x40
-
-#define IP27_PI_ERROR_OFFSET		0x12000
-#define IP27_PI_ERROR_SIZE		0x4000
-#define IP27_PI_ERROR_COUNT		1
-#define IP27_PI_ERROR_STRIDE		0
-
-#define IP27_SYMMON_STK_OFFSET		0x25000
-#define IP27_SYMMON_STK_SIZE		0xe000
-#define IP27_SYMMON_STK_COUNT		2
-/* IP27_SYMMON_STK_STRIDE must be >= SYMMON_STACK_SIZE */
-#define IP27_SYMMON_STK_STRIDE		0x7000
-
-#define IP27_FREEMEM_OFFSET		0x19000
-#define IP27_FREEMEM_SIZE		-1
-#define IP27_FREEMEM_COUNT		1
-#define IP27_FREEMEM_STRIDE		0
-
-#endif /* PROM */
-/*
- * There will be only one of these in a partition so the IO6 must set it up.
- */
-#define IO6_GDA_OFFSET			0x11000
-#define IO6_GDA_SIZE			0x400
-#define IO6_GDA_COUNT			1
-#define IO6_GDA_STRIDE			0
-
-/*
- * save area of kernel nmi regs in the prom format
- */
-#define IP27_NMI_KREGS_OFFSET		0x11400
-#define IP27_NMI_KREGS_CPU_SIZE		0x200
-/*
- * save area of kernel nmi regs in eframe format
- */
-#define IP27_NMI_EFRAME_OFFSET		0x11800
-#define IP27_NMI_EFRAME_SIZE		0x200
 
 #define KLDIR_ENT_SIZE			0x40
 #define KLDIR_MAX_ENTRIES		(0x400 / 0x40)
@@ -214,4 +29,8 @@ typedef struct kldir_ent_s {
 } kldir_ent_t;
 #endif /* !__ASSEMBLY__ */
 
+#ifdef CONFIG_SGI_IP27
+#include <asm/sn/sn0/kldir.h>
+#endif
+
 #endif /* _ASM_SN_KLDIR_H */
diff --git a/arch/mips/include/asm/sn/sn0/hub.h b/arch/mips/include/asm/sn/sn0/hub.h
index d78dd76..c84adde3 100644
--- a/arch/mips/include/asm/sn/sn0/hub.h
+++ b/arch/mips/include/asm/sn/sn0/hub.h
@@ -37,4 +37,26 @@
 #define UATTR_MSPEC	2
 #define UATTR_UNCAC	3
 
+#ifdef __ASSEMBLY__
+/*
+ * Returns the local nasid into res.
+ */
+	.macro GET_NASID_ASM res
+	dli	\res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
+	ld	\res, (\res)
+	and	\res, NSRI_NODEID_MASK
+	dsrl	\res, NSRI_NODEID_SHFT
+	.endm
+#else
+
+/*
+ * get_nasid() returns the physical node id number of the caller.
+ */
+static inline nasid_t get_nasid(void)
+{
+	return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
+			 >> NSRI_NODEID_SHFT);
+}
+#endif
+
 #endif /* _ASM_SN_SN0_HUB_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubni.h b/arch/mips/include/asm/sn/sn0/hubni.h
index b73c4be..b825314 100644
--- a/arch/mips/include/asm/sn/sn0/hubni.h
+++ b/arch/mips/include/asm/sn/sn0/hubni.h
@@ -250,6 +250,14 @@ typedef union	hubni_port_error_u {
 #define NI_LLP_CB_MAX		0xff
 #define NI_LLP_SN_MAX		0xff
 
+static inline int get_region_shift(void)
+{
+	if (LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
+		return NASID_TO_FINEREG_SHFT;
+
+	return NASID_TO_COARSEREG_SHFT;
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_SGI_SN0_HUBNI_H */
diff --git a/arch/mips/include/asm/sn/sn0/ip27.h b/arch/mips/include/asm/sn/sn0/ip27.h
deleted file mode 100644
index 3b5efee..0000000
--- a/arch/mips/include/asm/sn/sn0/ip27.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Derived from IRIX <sys/SN/SN0/IP27.h>.
- *
- * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
- * Copyright (C) 1999, 2006 by Ralf Baechle
- */
-#ifndef _ASM_SN_SN0_IP27_H
-#define _ASM_SN_SN0_IP27_H
-
-#include <asm/mipsregs.h>
-
-/*
- * Simple definitions for the masks which remove SW bits from pte.
- */
-
-#define TLBLO_HWBITSHIFT	0		/* Shift value, for masking */
-
-#ifndef __ASSEMBLY__
-
-#define CAUSE_BERRINTR		IE_IRQ5
-
-#define ECCF_CACHE_ERR	0
-#define ECCF_TAGLO	1
-#define ECCF_ECC	2
-#define ECCF_ERROREPC	3
-#define ECCF_PADDR	4
-#define ECCF_SIZE	(5 * sizeof(long))
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef __ASSEMBLY__
-
-/*
- * KL_GET_CPUNUM (similar to EV_GET_SPNUM for EVEREST platform) reads
- * the processor number of the calling processor.  The proc parameters
- * must be a register.
- */
-#define KL_GET_CPUNUM(proc)				\
-	dli	proc, LOCAL_HUB(0);			\
-	ld	proc, PI_CPU_NUM(proc)
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * R10000 status register interrupt bit mask usage for IP27.
- */
-#define SRB_SWTIMO	IE_SW0		/* 0x0100 */
-#define SRB_NET		IE_SW1		/* 0x0200 */
-#define SRB_DEV0	IE_IRQ0		/* 0x0400 */
-#define SRB_DEV1	IE_IRQ1		/* 0x0800 */
-#define SRB_TIMOCLK	IE_IRQ2		/* 0x1000 */
-#define SRB_PROFCLK	IE_IRQ3		/* 0x2000 */
-#define SRB_ERR		IE_IRQ4		/* 0x4000 */
-#define SRB_SCHEDCLK	IE_IRQ5		/* 0x8000 */
-
-#define SR_IBIT_HI	SRB_DEV0
-#define SR_IBIT_PROF	SRB_PROFCLK
-
-#define SRB_SWTIMO_IDX		0
-#define SRB_NET_IDX		1
-#define SRB_DEV0_IDX		2
-#define SRB_DEV1_IDX		3
-#define SRB_TIMOCLK_IDX		4
-#define SRB_PROFCLK_IDX		5
-#define SRB_ERR_IDX		6
-#define SRB_SCHEDCLK_IDX	7
-
-#define NUM_CAUSE_INTRS		8
-
-#define SCACHE_LINESIZE 128
-#define SCACHE_LINEMASK (SCACHE_LINESIZE - 1)
-
-#include <asm/sn/addrs.h>
-
-#define LED_CYCLE_MASK	0x0f
-#define LED_CYCLE_SHFT	4
-
-#define SEND_NMI(_nasid, _slice)	\
-	  REMOTE_HUB_S((_nasid),  (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
-
-#endif /* _ASM_SN_SN0_IP27_H */
diff --git a/arch/mips/include/asm/sn/sn0/kldir.h b/arch/mips/include/asm/sn/sn0/kldir.h
new file mode 100644
index 0000000..1b10af6
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/kldir.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Derived from IRIX <sys/SN/kldir.h>, revision 1.21.
+ *
+ * Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999, 2000 by Ralf Baechle
+ */
+#ifndef _ASM_SN_SN0_KLDIR_H
+#define _ASM_SN_SN0_KLDIR_H
+
+
+/*
+ * The kldir memory area resides at a fixed place in each node's memory and
+ * provides pointers to most other IP27 memory areas.  This allows us to
+ * resize and/or relocate memory areas at a later time without breaking all
+ * firmware and kernels that use them.	Indices in the array are
+ * permanently dedicated to areas listed below.	 Some memory areas (marked
+ * below) reside at a permanently fixed location, but are included in the
+ * directory for completeness.
+ */
+
+/*
+ * The upper portion of the memory map applies during boot
+ * only and is overwritten by IRIX/SYMMON.
+ *
+ *				      MEMORY MAP PER NODE
+ *
+ * 0x2000000 (32M)	   +-----------------------------------------+
+ *			   |	  IO6 BUFFERS FOR FLASH ENET IOC3    |
+ * 0x1F80000 (31.5M)	   +-----------------------------------------+
+ *			   |	  IO6 TEXT/DATA/BSS/stack	     |
+ * 0x1C00000 (30M)	   +-----------------------------------------+
+ *			   |	  IO6 PROM DEBUG TEXT/DATA/BSS/stack |
+ * 0x0800000 (28M)	   +-----------------------------------------+
+ *			   |	  IP27 PROM TEXT/DATA/BSS/stack	     |
+ * 0x1B00000 (27M)	   +-----------------------------------------+
+ *			   |	  IP27 CFG			     |
+ * 0x1A00000 (26M)	   +-----------------------------------------+
+ *			   |	  Graphics PROM			     |
+ * 0x1800000 (24M)	   +-----------------------------------------+
+ *			   |	  3rd Party PROM drivers	     |
+ * 0x1600000 (22M)	   +-----------------------------------------+
+ *			   |					     |
+ *			   |	  Free				     |
+ *			   |					     |
+ *			   +-----------------------------------------+
+ *			   |	  UNIX DEBUG Version		     |
+ * 0x190000 (2M--)	   +-----------------------------------------+
+ *			   |	  SYMMON			     |
+ *			   |	  (For UNIX Debug only)		     |
+ * 0x34000 (208K)	   +-----------------------------------------+
+ *			   |	  SYMMON STACK [NUM_CPU_PER_NODE]    |
+ *			   |	  (For UNIX Debug only)		     |
+ * 0x25000 (148K)	   +-----------------------------------------+
+ *			   |	  KLCONFIG - II (temp)		     |
+ *			   |					     |
+ *			   |	----------------------------	     |
+ *			   |					     |
+ *			   |	  UNIX NON-DEBUG Version	     |
+ * 0x19000 (100K)	   +-----------------------------------------+
+ *
+ *
+ * The lower portion of the memory map contains information that is
+ * permanent and is used by the IP27PROM, IO6PROM and IRIX.
+ *
+ * 0x19000 (100K)	   +-----------------------------------------+
+ *			   |					     |
+ *			   |	  PI Error Spools (32K)		     |
+ *			   |					     |
+ * 0x12000 (72K)	   +-----------------------------------------+
+ *			   |	  Unused			     |
+ * 0x11c00 (71K)	   +-----------------------------------------+
+ *			   |	  CPU 1 NMI Eframe area		     |
+ * 0x11a00 (70.5K)	   +-----------------------------------------+
+ *			   |	  CPU 0 NMI Eframe area		     |
+ * 0x11800 (70K)	   +-----------------------------------------+
+ *			   |	  CPU 1 NMI Register save area	     |
+ * 0x11600 (69.5K)	   +-----------------------------------------+
+ *			   |	  CPU 0 NMI Register save area	     |
+ * 0x11400 (69K)	   +-----------------------------------------+
+ *			   |	  GDA (1k)			     |
+ * 0x11000 (68K)	   +-----------------------------------------+
+ *			   |	  Early cache Exception stack	     |
+ *			   |		 and/or			     |
+ *			   |	  kernel/io6prom nmi registers	     |
+ * 0x10800  (66k)	   +-----------------------------------------+
+ *			   |	  cache error eframe		     |
+ * 0x10400 (65K)	   +-----------------------------------------+
+ *			   |	  Exception Handlers (UALIAS copy)   |
+ * 0x10000 (64K)	   +-----------------------------------------+
+ *			   |					     |
+ *			   |					     |
+ *			   |	  KLCONFIG - I (permanent) (48K)     |
+ *			   |					     |
+ *			   |					     |
+ *			   |					     |
+ * 0x4000 (16K)		   +-----------------------------------------+
+ *			   |	  NMI Handler (Protected Page)	     |
+ * 0x3000 (12K)		   +-----------------------------------------+
+ *			   |	  ARCS PVECTORS (master node only)   |
+ * 0x2c00 (11K)		   +-----------------------------------------+
+ *			   |	  ARCS TVECTORS (master node only)   |
+ * 0x2800 (10K)		   +-----------------------------------------+
+ *			   |	  LAUNCH [NUM_CPU]		     |
+ * 0x2400 (9K)		   +-----------------------------------------+
+ *			   |	  Low memory directory (KLDIR)	     |
+ * 0x2000 (8K)		   +-----------------------------------------+
+ *			   |	  ARCS SPB (1K)			     |
+ * 0x1000 (4K)		   +-----------------------------------------+
+ *			   |	  Early cache Exception stack	     |
+ *			   |		 and/or			     |
+ *			   |	  kernel/io6prom nmi registers	     |
+ * 0x800  (2k)		   +-----------------------------------------+
+ *			   |	  cache error eframe		     |
+ * 0x400 (1K)		   +-----------------------------------------+
+ *			   |	  Exception Handlers		     |
+ * 0x0	 (0K)		   +-----------------------------------------+
+ */
+
+/*
+ * This is defined here because IP27_SYMMON_STK_SIZE must be at least what
+ * we define here.  Since it's set up in the prom.  We can't redefine it later
+ * and expect more space to be allocated.  The way to find out the true size
+ * of the symmon stacks is to divide SYMMON_STK_SIZE by SYMMON_STK_STRIDE
+ * for a particular node.
+ */
+#define SYMMON_STACK_SIZE		0x8000
+
+#if defined(PROM)
+
+/*
+ * These defines are prom version dependent.  No code other than the IP27
+ * prom should attempt to use these values.
+ */
+#define IP27_LAUNCH_OFFSET		0x2400
+#define IP27_LAUNCH_SIZE		0x400
+#define IP27_LAUNCH_COUNT		2
+#define IP27_LAUNCH_STRIDE		0x200
+
+#define IP27_KLCONFIG_OFFSET		0x4000
+#define IP27_KLCONFIG_SIZE		0xc000
+#define IP27_KLCONFIG_COUNT		1
+#define IP27_KLCONFIG_STRIDE		0
+
+#define IP27_NMI_OFFSET			0x3000
+#define IP27_NMI_SIZE			0x40
+#define IP27_NMI_COUNT			2
+#define IP27_NMI_STRIDE			0x40
+
+#define IP27_PI_ERROR_OFFSET		0x12000
+#define IP27_PI_ERROR_SIZE		0x4000
+#define IP27_PI_ERROR_COUNT		1
+#define IP27_PI_ERROR_STRIDE		0
+
+#define IP27_SYMMON_STK_OFFSET		0x25000
+#define IP27_SYMMON_STK_SIZE		0xe000
+#define IP27_SYMMON_STK_COUNT		2
+/* IP27_SYMMON_STK_STRIDE must be >= SYMMON_STACK_SIZE */
+#define IP27_SYMMON_STK_STRIDE		0x7000
+
+#define IP27_FREEMEM_OFFSET		0x19000
+#define IP27_FREEMEM_SIZE		-1
+#define IP27_FREEMEM_COUNT		1
+#define IP27_FREEMEM_STRIDE		0
+
+#endif /* PROM */
+/*
+ * There will be only one of these in a partition so the IO6 must set it up.
+ */
+#define IO6_GDA_OFFSET			0x11000
+#define IO6_GDA_SIZE			0x400
+#define IO6_GDA_COUNT			1
+#define IO6_GDA_STRIDE			0
+
+/*
+ * save area of kernel nmi regs in the prom format
+ */
+#define IP27_NMI_KREGS_OFFSET		0x11400
+#define IP27_NMI_KREGS_CPU_SIZE		0x200
+/*
+ * save area of kernel nmi regs in eframe format
+ */
+#define IP27_NMI_EFRAME_OFFSET		0x11800
+#define IP27_NMI_EFRAME_SIZE		0x200
+
+#endif /* _ASM_SN_SN0_KLDIR_H */
diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h
deleted file mode 100644
index 63a2c30..0000000
--- a/arch/mips/include/asm/sn/sn_private.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SN_SN_PRIVATE_H
-#define __ASM_SN_SN_PRIVATE_H
-
-#include <asm/sn/types.h>
-
-extern nasid_t master_nasid;
-
-extern void cpu_node_probe(void);
-extern void hub_rtc_init(nasid_t nasid);
-extern void cpu_time_init(void);
-extern void per_cpu_init(void);
-extern void install_cpu_nmi_handler(int slice);
-extern void install_ipi(void);
-extern void setup_replication_mask(void);
-extern void replicate_kernel_text(void);
-extern unsigned long node_getfirstfree(nasid_t nasid);
-
-#endif /* __ASM_SN_SN_PRIVATE_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index 203c927..451ba1e 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -11,6 +11,8 @@
 
 #include <linux/types.h>
 
+#ifndef __ASSEMBLY__
+
 typedef unsigned long	cpuid_t;
 typedef signed short	nasid_t;	/* node id in numa-as-id space */
 typedef signed char	partid_t;	/* partition ID type */
@@ -18,4 +20,6 @@ typedef signed short	moduleid_t;	/* user-visible module number type */
 
 typedef dev_t		vertex_hdl_t;	/* hardware graph vertex handle */
 
+#endif
+
 #endif /* _ASM_SN_TYPES_H */
diff --git a/arch/mips/jz4740/Kconfig b/arch/mips/jz4740/Kconfig
index 4dd0c44..412d2fa 100644
--- a/arch/mips/jz4740/Kconfig
+++ b/arch/mips/jz4740/Kconfig
@@ -16,6 +16,10 @@
 	bool "MIPS Creator CI20"
 	select MACH_JZ4780
 
+config X1000_CU1000_NEO
+	bool "YSH & ATIL CU1000 Module with Neo backplane"
+	select MACH_X1000
+
 endchoice
 
 config MACH_JZ4740
@@ -33,3 +37,9 @@
 	select MIPS_CPU_SCACHE
 	select SYS_HAS_CPU_MIPS32_R2
 	select SYS_SUPPORTS_HIGHMEM
+
+config MACH_X1000
+	bool
+	select MIPS_CPU_SCACHE
+	select SYS_HAS_CPU_MIPS32_R2
+	select SYS_SUPPORTS_HIGHMEM
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index dc8ee21..880c268 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -44,6 +44,8 @@ static void __init jz4740_detect_mem(void)
 
 static unsigned long __init get_board_mach_type(const void *fdt)
 {
+	if (!fdt_node_check_compatible(fdt, 0, "ingenic,x1830"))
+		return MACH_INGENIC_X1830;
 	if (!fdt_node_check_compatible(fdt, 0, "ingenic,x1000"))
 		return MACH_INGENIC_X1000;
 	if (!fdt_node_check_compatible(fdt, 0, "ingenic,jz4780"))
@@ -86,6 +88,8 @@ void __init device_tree_init(void)
 const char *get_system_type(void)
 {
 	switch (mips_machtype) {
+	case MACH_INGENIC_X1830:
+		return "X1830";
 	case MACH_INGENIC_X1000:
 		return "X1000";
 	case MACH_INGENIC_JZ4780:
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c543326..6ab6b03 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -102,7 +102,12 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
 		if (fir & MIPS_FPIR_HAS2008) {
 			fcsr = read_32bit_cp1_register(CP1_STATUS);
 
-			fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+			/*
+			 * MAC2008 toolchain never landed in real world, so we're only
+			 * testing wether it can be disabled and don't try to enabled
+			 * it.
+			 */
+			fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | FPU_CSR_MAC2008);
 			write_32bit_cp1_register(CP1_STATUS, fcsr0);
 			fcsr0 = read_32bit_cp1_register(CP1_STATUS);
 
@@ -112,6 +117,15 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
 
 			write_32bit_cp1_register(CP1_STATUS, fcsr);
 
+			if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2)) {
+				/*
+				 * The bit for MAC2008 might be reused by R6 in future,
+				 * so we only test for R2-R5.
+				 */
+				if (fcsr0 & FPU_CSR_MAC2008)
+					c->options |= MIPS_CPU_MAC_2008_ONLY;
+			}
+
 			if (!(fcsr0 & FPU_CSR_NAN2008))
 				c->options |= MIPS_CPU_NAN_LEGACY;
 			if (fcsr1 & FPU_CSR_NAN2008)
@@ -1960,10 +1974,8 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
 	BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter);
 
 	switch (c->processor_id & PRID_IMP_MASK) {
-	case PRID_IMP_XBURST:
-		c->cputype = CPU_XBURST;
-		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
-		__cpu_name[cpu] = "Ingenic JZRISC";
+	case PRID_IMP_XBURST_REV1:
+
 		/*
 		 * The XBurst core by default attempts to avoid branch target
 		 * buffer lookups by detecting & special casing loops. This
@@ -1971,36 +1983,45 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
 		 * Set cp0 config7 bit 4 to disable this feature.
 		 */
 		set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
+
+		switch (c->processor_id & PRID_COMP_MASK) {
+
+		/*
+		 * The config0 register in the XBurst CPUs with a processor ID of
+		 * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
+		 * but they don't actually support this ISA.
+		 */
+		case PRID_COMP_INGENIC_D0:
+			c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+			break;
+
+		/*
+		 * The config0 register in the XBurst CPUs with a processor ID of
+		 * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
+		 * mode is not compatible with the MIPS standard, it will cause
+		 * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
+		 * when starting the init process. After chip reset, the default
+		 * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
+		 * switch back to VTLB mode to prevent getting stuck.
+		 */
+		case PRID_COMP_INGENIC_D1:
+			write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
+			break;
+
+		default:
+			break;
+		}
+	/* fall-through */
+	case PRID_IMP_XBURST_REV2:
+		c->cputype = CPU_XBURST;
+		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+		__cpu_name[cpu] = "Ingenic XBurst";
 		break;
+
 	default:
 		panic("Unknown Ingenic Processor ID!");
 		break;
 	}
-
-	switch (c->processor_id & PRID_COMP_MASK) {
-	/*
-	 * The config0 register in the XBurst CPUs with a processor ID of
-	 * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
-	 * mode is not compatible with the MIPS standard, it will cause
-	 * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
-	 * when starting the init process. After chip reset, the default
-	 * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
-	 * switch back to VTLB mode to prevent getting stuck.
-	 */
-	case PRID_COMP_INGENIC_D1:
-		write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
-		break;
-	/*
-	 * The config0 register in the XBurst CPUs with a processor ID of
-	 * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
-	 * but they don't actually support this ISA.
-	 */
-	case PRID_COMP_INGENIC_D0:
-		c->isa_level &= ~MIPS_CPU_ISA_M32R2;
-		break;
-	default:
-		break;
-	}
 }
 
 static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c3d4212..1ac2752 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -515,8 +515,7 @@ static void __init request_crashkernel(struct resource *res)
 	ret = request_resource(res, &crashk_res);
 	if (!ret)
 		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
-			(unsigned long)((crashk_res.end -
-					 crashk_res.start + 1) >> 20),
+			(unsigned long)(resource_size(&crashk_res) >> 20),
 			(unsigned long)(crashk_res.start  >> 20));
 }
 #else /* !defined(CONFIG_KEXEC)		*/
@@ -698,8 +697,7 @@ static void __init arch_mem_init(char **cmdline_p)
 	mips_parse_crashkernel();
 #ifdef CONFIG_KEXEC
 	if (crashk_res.start != crashk_res.end)
-		memblock_reserve(crashk_res.start,
-				 crashk_res.end - crashk_res.start + 1);
+		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
 #endif
 	device_tree_init();
 	sparse_init();
@@ -796,8 +794,6 @@ void __init setup_arch(char **cmdline_p)
 #if defined(CONFIG_VT)
 #if defined(CONFIG_VGA_CONSOLE)
 	conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
 #endif
 #endif
 
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index f2973ce..abdd7aa 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -90,6 +90,9 @@ void synchronise_count_master(int cpu)
 void synchronise_count_slave(int cpu)
 {
 	int i;
+	unsigned long flags;
+
+	local_irq_save(flags);
 
 	/*
 	 * Not every cpu is online at the time this gets called,
@@ -113,5 +116,7 @@ void synchronise_count_slave(int cpu)
 	}
 	/* Arrange for an interrupt in a short while */
 	write_c0_compare(read_c0_count() + COUNTON);
+
+	local_irq_restore(flags);
 }
 #undef NR_LOOPS
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
index a3d4bec..6efb2f6 100644
--- a/arch/mips/kernel/syscalls/Makefile
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -18,7 +18,7 @@
 		   '$(syshdr_pfx_$(basetarget))'		\
 		   '$(syshdr_offset_$(basetarget))'
 
-quiet_cmd_sysnr = SYSNR  $@
+quiet_cmd_sysnr = SYSNR   $@
       cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@'		\
 		  '$(sysnr_abis_$(basetarget))'			\
 		  '$(sysnr_pfx_$(basetarget))'			\
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index e7c5ab3..1f9e8ad 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -374,3 +374,5 @@
 433	n32	fspick				sys_fspick
 434	n32	pidfd_open			sys_pidfd_open
 435	n32	clone3				__sys_clone3
+437	n32	openat2				sys_openat2
+438	n32	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 13cd665..c0b9d80 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -350,3 +350,5 @@
 433	n64	fspick				sys_fspick
 434	n64	pidfd_open			sys_pidfd_open
 435	n64	clone3				__sys_clone3
+437	n64	openat2				sys_openat2
+438	n64	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 353539e..ac58677 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -423,3 +423,5 @@
 433	o32	fspick				sys_fspick
 434	o32	pidfd_open			sys_pidfd_open
 435	o32	clone3				__sys_clone3
+437	o32	openat2				sys_openat2
+438	o32	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 83f2a43..31968cb 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -210,11 +210,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
 			regs.regs[29] = task->thread.reg29;
 			regs.regs[31] = 0;
 			regs.cp0_epc = task->thread.reg31;
-#ifdef CONFIG_KGDB_KDB
-		} else if (atomic_read(&kgdb_active) != -1 &&
-			   kdb_current_regs) {
-			memcpy(&regs, kdb_current_regs, sizeof(regs));
-#endif /* CONFIG_KGDB_KDB */
 		} else {
 			prepare_frametrace(&regs);
 		}
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 92bd2b0..ca6fc47 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -131,7 +131,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 #define     _LoadW(addr, value, res, type)   \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -152,7 +152,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 /* For CPUs without lwl instruction */
 #define     _LoadW(addr, value, res, type) \
 do {                                                        \
@@ -187,7 +187,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 
 #define     _LoadHWU(addr, value, res, type) \
 do {                                                        \
@@ -213,7 +213,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 #define     _LoadWU(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -256,7 +256,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 /* For CPUs without lwl and ldl instructions */
 #define	    _LoadWU(addr, value, res, type) \
 do {                                                        \
@@ -340,7 +340,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 
 
 #define     _StoreHW(addr, value, res, type) \
@@ -366,7 +366,7 @@ do {                                                        \
 			: "r" (value), "r" (addr), "i" (-EFAULT));\
 } while(0)
 
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 #define     _StoreW(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -407,7 +407,7 @@ do {                                                        \
 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 } while(0)
 
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 #define     _StoreW(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -483,7 +483,7 @@ do {                                                        \
 		: "memory");                                \
 } while(0)
 
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 
 #else /* __BIG_ENDIAN */
 
@@ -509,7 +509,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 #define     _LoadW(addr, value, res, type)   \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -530,7 +530,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#else  /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 /* For CPUs without lwl instruction */
 #define     _LoadW(addr, value, res, type) \
 do {                                                        \
@@ -565,7 +565,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 
 
 #define     _LoadHWU(addr, value, res, type) \
@@ -592,7 +592,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 #define     _LoadWU(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -635,7 +635,7 @@ do {                                                        \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
 
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 /* For CPUs without lwl and ldl instructions */
 #define	    _LoadWU(addr, value, res, type) \
 do {                                                        \
@@ -718,7 +718,7 @@ do {                                                        \
 			: "=&r" (value), "=r" (res)	    \
 			: "r" (addr), "i" (-EFAULT));       \
 } while(0)
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 
 #define     _StoreHW(addr, value, res, type) \
 do {                                                        \
@@ -743,7 +743,7 @@ do {                                                        \
 			: "r" (value), "r" (addr), "i" (-EFAULT));\
 } while(0)
 
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 #define     _StoreW(addr, value, res, type)  \
 do {                                                        \
 		__asm__ __volatile__ (                      \
@@ -784,7 +784,7 @@ do {                                                        \
 		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 } while(0)
 
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 /* For CPUs without swl and sdl instructions */
 #define     _StoreW(addr, value, res, type)  \
 do {                                                        \
@@ -861,7 +861,7 @@ do {                                                        \
 		: "memory");                                \
 } while(0)
 
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 #endif
 
 #define LoadHWU(addr, value, res)	_LoadHWU(addr, value, res, kernel)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 1109924..71244bf 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -156,7 +156,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
 	struct kvm_vcpu *vcpu;
 
 	kvm_for_each_vcpu(i, vcpu, kvm) {
-		kvm_arch_vcpu_free(vcpu);
+		kvm_vcpu_destroy(vcpu);
 	}
 
 	mutex_lock(&kvm->lock);
@@ -280,25 +280,43 @@ static inline void dump_handler(const char *symbol, void *start, void *end)
 	pr_debug("\tEND(%s)\n", symbol);
 }
 
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+/* low level hrtimer wake routine */
+static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+	struct kvm_vcpu *vcpu;
+
+	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+
+	kvm_mips_callbacks->queue_timer_int(vcpu);
+
+	vcpu->arch.wait = 0;
+	if (swq_has_sleeper(&vcpu->wq))
+		swake_up_one(&vcpu->wq);
+
+	return kvm_mips_count_timeout(vcpu);
+}
+
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+	return 0;
+}
+
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 {
 	int err, size;
 	void *gebase, *p, *handler, *refill_start, *refill_end;
 	int i;
 
-	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+	kvm_debug("kvm @ %p: create cpu %d at %p\n",
+		  vcpu->kvm, vcpu->vcpu_id, vcpu);
 
-	if (!vcpu) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	err = kvm_vcpu_init(vcpu, kvm, id);
-
+	err = kvm_mips_callbacks->vcpu_init(vcpu);
 	if (err)
-		goto out_free_cpu;
+		return err;
 
-	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+		     HRTIMER_MODE_REL);
+	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
 
 	/*
 	 * Allocate space for host mode exception handlers that handle
@@ -313,7 +331,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 
 	if (!gebase) {
 		err = -ENOMEM;
-		goto out_uninit_cpu;
+		goto out_uninit_vcpu;
 	}
 	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
 		  ALIGN(size, PAGE_SIZE), gebase);
@@ -392,38 +410,33 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 	vcpu->arch.last_sched_cpu = -1;
 	vcpu->arch.last_exec_cpu = -1;
 
-	return vcpu;
+	/* Initial guest state */
+	err = kvm_mips_callbacks->vcpu_setup(vcpu);
+	if (err)
+		goto out_free_commpage;
 
+	return 0;
+
+out_free_commpage:
+	kfree(vcpu->arch.kseg0_commpage);
 out_free_gebase:
 	kfree(gebase);
-
-out_uninit_cpu:
-	kvm_vcpu_uninit(vcpu);
-
-out_free_cpu:
-	kfree(vcpu);
-
-out:
-	return ERR_PTR(err);
+out_uninit_vcpu:
+	kvm_mips_callbacks->vcpu_uninit(vcpu);
+	return err;
 }
 
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 
-	kvm_vcpu_uninit(vcpu);
-
 	kvm_mips_dump_stats(vcpu);
 
 	kvm_mmu_free_memory_caches(vcpu);
 	kfree(vcpu->arch.guest_ebase);
 	kfree(vcpu->arch.kseg0_commpage);
-	kfree(vcpu);
-}
 
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-	kvm_arch_vcpu_free(vcpu);
+	kvm_mips_callbacks->vcpu_uninit(vcpu);
 }
 
 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -1212,58 +1225,12 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 	return 0;
 }
 
-static void kvm_mips_comparecount_func(unsigned long data)
-{
-	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
-	kvm_mips_callbacks->queue_timer_int(vcpu);
-
-	vcpu->arch.wait = 0;
-	if (swq_has_sleeper(&vcpu->wq))
-		swake_up_one(&vcpu->wq);
-}
-
-/* low level hrtimer wake routine */
-static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
-{
-	struct kvm_vcpu *vcpu;
-
-	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
-	kvm_mips_comparecount_func((unsigned long) vcpu);
-	return kvm_mips_count_timeout(vcpu);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-	int err;
-
-	err = kvm_mips_callbacks->vcpu_init(vcpu);
-	if (err)
-		return err;
-
-	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
-		     HRTIMER_MODE_REL);
-	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
-	return 0;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-	kvm_mips_callbacks->vcpu_uninit(vcpu);
-}
-
 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 				  struct kvm_translation *tr)
 {
 	return 0;
 }
 
-/* Initial guest state */
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-	return kvm_mips_callbacks->vcpu_setup(vcpu);
-}
-
 static void kvm_mips_set_c0_status(void)
 {
 	u32 status = read_c0_status();
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index 8126f15b..61c0334 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -89,13 +89,12 @@ static ssize_t pvc_line_proc_write(struct file *file, const char __user *buf,
 	return count;
 }
 
-static const struct file_operations pvc_line_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= pvc_line_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= pvc_line_proc_write,
+static const struct proc_ops pvc_line_proc_ops = {
+	.proc_open	= pvc_line_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= pvc_line_proc_write,
 };
 
 static ssize_t pvc_scroll_proc_write(struct file *file, const char __user *buf,
@@ -148,13 +147,12 @@ static int pvc_scroll_proc_open(struct inode *inode, struct file *file)
 	return single_open(file, pvc_scroll_proc_show, NULL);
 }
 
-static const struct file_operations pvc_scroll_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= pvc_scroll_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= pvc_scroll_proc_write,
+static const struct proc_ops pvc_scroll_proc_ops = {
+	.proc_open	= pvc_scroll_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= pvc_scroll_proc_write,
 };
 
 void pvc_proc_timerfunc(struct timer_list *unused)
@@ -189,12 +187,11 @@ static int __init pvc_proc_init(void)
 	}
 	for (i = 0; i < PVC_NLINES; i++) {
 		proc_entry = proc_create_data(pvc_linename[i], 0644, dir,
-					&pvc_line_proc_fops, &pvc_linedata[i]);
+					&pvc_line_proc_ops, &pvc_linedata[i]);
 		if (proc_entry == NULL)
 			goto error;
 	}
-	proc_entry = proc_create("scroll", 0644, dir,
-				 &pvc_scroll_proc_fops);
+	proc_entry = proc_create("scroll", 0644, dir, &pvc_scroll_proc_ops);
 	if (proc_entry == NULL)
 		goto error;
 
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index cdd19d85..f7994d9 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -301,14 +301,14 @@
 	 and	t0, src, ADDRMASK
 	PREFS(	0, 2*32(src) )
 	PREFD(	1, 2*32(dst) )
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 	bnez	t1, .Ldst_unaligned\@
 	 nop
 	bnez	t0, .Lsrc_unaligned_dst_aligned\@
-#else
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 	or	t0, t0, t1
 	bnez	t0, .Lcopy_unaligned_bytes\@
-#endif
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 	/*
 	 * use delay slot for fall-through
 	 * src and dst are aligned; need to compute rem
@@ -389,7 +389,7 @@
 	bne	rem, len, 1b
 	.set	noreorder
 
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 	/*
 	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
 	 * A loop would do only a byte at a time with possible branch
@@ -491,7 +491,7 @@
 	bne	len, rem, 1b
 	.set	noreorder
 
-#endif /* CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */
 .Lcopy_bytes_checklen\@:
 	beqz	len, .Ldone\@
 	 nop
@@ -520,7 +520,7 @@
 	jr	ra
 	 nop
 
-#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifdef CONFIG_CPU_NO_LOAD_STORE_LR
 .Lcopy_unaligned_bytes\@:
 1:
 	COPY_BYTE(0)
@@ -534,7 +534,7 @@
 	ADD	src, src, 8
 	b	1b
 	 ADD	dst, dst, 8
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 	.if __memcpy == 1
 	END(memcpy)
 	.set __memcpy, 0
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 418611e..d5449e8 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -115,7 +115,7 @@
 #endif
 	.set		reorder
 
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 	R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
 	EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@)	/* make word/dword aligned */
@@ -125,7 +125,7 @@
 	PTR_SUBU	a0, t0			/* long align ptr */
 	PTR_ADDU	a2, t0			/* correct size */
 
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 #define STORE_BYTE(N)				\
 	EX(sb, a1, N(a0), .Lbyte_fixup\@);	\
 	.set		noreorder;		\
@@ -150,7 +150,7 @@
 	ori		a0, STORMASK
 	xori		a0, STORMASK
 	PTR_ADDIU	a0, STORSIZE
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 1:	ori		t1, a2, 0x3f		/* # of full blocks */
 	xori		t1, 0x3f
 	andi		t0, a2, 0x40-STORSIZE
@@ -185,7 +185,7 @@
 
 	.set		noreorder
 	beqz		a2, 1f
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
 	 PTR_ADDU	a0, a2			/* What's left */
 	.set		reorder
 	R10KCBARRIER(0(ra))
@@ -194,7 +194,7 @@
 #else
 	EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
 #endif
-#else
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
 	 PTR_SUBU	t0, $0, a2
 	.set		reorder
 	move		a2, zero		/* No remaining longs */
@@ -211,7 +211,7 @@
 	EX(sb, a1, 6(a0), .Lbyte_fixup\@)
 #endif
 0:
-#endif
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 1:	move		a2, zero
 	jr		ra
 
@@ -234,7 +234,7 @@
 	.hidden __memset
 	.endif
 
-#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifdef CONFIG_CPU_NO_LOAD_STORE_LR
 .Lbyte_fixup\@:
 	/*
 	 * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
@@ -243,7 +243,7 @@
 	PTR_SUBU	a2, t0
 	PTR_ADDIU	a2, 1
 	jr		ra
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
 
 .Lfirst_fixup\@:
 	/* unset_bytes already in a2 */
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index 5530070..de03838 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -15,7 +15,7 @@
 #include <linux/export.h>
 #include <linux/stringify.h>
 
-#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
+#if !defined(CONFIG_CPU_HAS_DIEI)
 
 /*
  * For cli() we have to insert nops to make sure that the new value
@@ -110,4 +110,4 @@ notrace void arch_local_irq_restore(unsigned long flags)
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
 
-#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_DIEI */
diff --git a/arch/mips/loongson2ef/common/pm.c b/arch/mips/loongson2ef/common/pm.c
index 11f4cfd5..bcb7ae9 100644
--- a/arch/mips/loongson2ef/common/pm.c
+++ b/arch/mips/loongson2ef/common/pm.c
@@ -91,7 +91,7 @@ static inline void stop_perf_counters(void)
 
 static void loongson_suspend_enter(void)
 {
-	static unsigned int cached_cpu_freq;
+	unsigned int cached_cpu_freq;
 
 	/* setup wakeup events via enabling the IRQs */
 	setup_wakeup_events();
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index ef94a22..e5b40c5 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -75,7 +75,7 @@ static int __init compute_node_distance(int row, int col)
 				loongson_sysconf.cores_per_package;
 
 	if (col == row)
-		return 0;
+		return LOCAL_DISTANCE;
 	else if (package_row == package_col)
 		return 40;
 	else
diff --git a/arch/mips/loongson64/platform.c b/arch/mips/loongson64/platform.c
index 13f3404..9674ae1 100644
--- a/arch/mips/loongson64/platform.c
+++ b/arch/mips/loongson64/platform.c
@@ -27,6 +27,9 @@ static int __init loongson3_platform_init(void)
 			continue;
 
 		pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+		if (!pdev)
+			return -ENOMEM;
+
 		pdev->name = loongson_sysconf.sensors[i].name;
 		pdev->id = loongson_sysconf.sensors[i].id;
 		pdev->dev.platform_data = &loongson_sysconf.sensors[i];
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 710e1f8..9701c89 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1514,16 +1514,28 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			break;
 
 		case madd_s_op:
-			handler = fpemu_sp_madd;
+			if (cpu_has_mac2008_only)
+				handler = ieee754sp_madd;
+			else
+				handler = fpemu_sp_madd;
 			goto scoptop;
 		case msub_s_op:
-			handler = fpemu_sp_msub;
+			if (cpu_has_mac2008_only)
+				handler = ieee754sp_msub;
+			else
+				handler = fpemu_sp_msub;
 			goto scoptop;
 		case nmadd_s_op:
-			handler = fpemu_sp_nmadd;
+			if (cpu_has_mac2008_only)
+				handler = ieee754sp_nmadd;
+			else
+				handler = fpemu_sp_nmadd;
 			goto scoptop;
 		case nmsub_s_op:
-			handler = fpemu_sp_nmsub;
+			if (cpu_has_mac2008_only)
+				handler = ieee754sp_nmsub;
+			else
+				handler = fpemu_sp_nmsub;
 			goto scoptop;
 
 		      scoptop:
@@ -1610,15 +1622,27 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			break;
 
 		case madd_d_op:
-			handler = fpemu_dp_madd;
+			if (cpu_has_mac2008_only)
+				handler = ieee754dp_madd;
+			else
+				handler = fpemu_dp_madd;
 			goto dcoptop;
 		case msub_d_op:
-			handler = fpemu_dp_msub;
+			if (cpu_has_mac2008_only)
+				handler = ieee754dp_msub;
+			else
+				handler = fpemu_dp_msub;
 			goto dcoptop;
 		case nmadd_d_op:
-			handler = fpemu_dp_nmadd;
+			if (cpu_has_mac2008_only)
+				handler = ieee754dp_nmadd;
+			else
+				handler = fpemu_dp_nmadd;
 			goto dcoptop;
 		case nmsub_d_op:
+			if (cpu_has_mac2008_only)
+				handler = ieee754dp_nmsub;
+			else
 			handler = fpemu_dp_nmsub;
 			goto dcoptop;
 
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index 3da0ce4..e24ef37 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -68,6 +68,12 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
 
 	ieee754_clearcx();
 
+	rs = xs ^ ys;
+	if (flags & MADDF_NEGATE_PRODUCT)
+		rs ^= 1;
+	if (flags & MADDF_NEGATE_ADDITION)
+		zs ^= 1;
+
 	/*
 	 * Handle the cases when at least one of x, y or z is a NaN.
 	 * Order of precedence is sNaN, qNaN and z, x, y.
@@ -104,9 +110,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
-		if ((zc == IEEE754_CLASS_INF) &&
-		    ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) ||
-		     ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) {
+		if ((zc == IEEE754_CLASS_INF) && (zs != rs)) {
 			/*
 			 * Cases of addition of infinities with opposite signs
 			 * or subtraction of infinities with same signs.
@@ -116,15 +120,10 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
 		}
 		/*
 		 * z is here either not an infinity, or an infinity having the
-		 * same sign as product (x*y) (in case of MADDF.D instruction)
-		 * or product -(x*y) (in MSUBF.D case). The result must be an
-		 * infinity, and its sign is determined only by the value of
-		 * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y.
+		 * same sign as product (x*y). The result must be an infinity,
+		 * and its sign is determined only by the sign of product (x*y).
 		 */
-		if (flags & MADDF_NEGATE_PRODUCT)
-			return ieee754dp_inf(1 ^ (xs ^ ys));
-		else
-			return ieee754dp_inf(xs ^ ys);
+		return ieee754dp_inf(rs);
 
 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
@@ -135,10 +134,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
 			return ieee754dp_inf(zs);
 		if (zc == IEEE754_CLASS_ZERO) {
 			/* Handle cases +0 + (-0) and similar ones. */
-			if ((!(flags & MADDF_NEGATE_PRODUCT)
-					&& (zs == (xs ^ ys))) ||
-			    ((flags & MADDF_NEGATE_PRODUCT)
-					&& (zs != (xs ^ ys))))
+			if (zs == rs)
 				/*
 				 * Cases of addition of zeros of equal signs
 				 * or subtraction of zeroes of opposite signs.
@@ -187,9 +183,6 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
 	assert(ym & DP_HIDDEN_BIT);
 
 	re = xe + ye;
-	rs = xs ^ ys;
-	if (flags & MADDF_NEGATE_PRODUCT)
-		rs ^= 1;
 
 	/* shunt to top of word */
 	xm <<= 64 - (DP_FBITS + 1);
@@ -340,3 +333,27 @@ union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
 {
 	return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
 }
+
+union ieee754dp ieee754dp_madd(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	return _dp_maddf(z, x, y, 0);
+}
+
+union ieee754dp ieee754dp_msub(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	return _dp_maddf(z, x, y, MADDF_NEGATE_ADDITION);
+}
+
+union ieee754dp ieee754dp_nmadd(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT|MADDF_NEGATE_ADDITION);
+}
+
+union ieee754dp ieee754dp_nmsub(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+}
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index b9167bd..090caa7 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -68,6 +68,14 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
 				union ieee754sp y);
 union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
 				union ieee754sp y);
+union ieee754sp ieee754sp_madd(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+union ieee754sp ieee754sp_msub(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+union ieee754sp ieee754sp_nmadd(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+union ieee754sp ieee754sp_nmsub(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
 int ieee754sp_2008class(union ieee754sp x);
 union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y);
 union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y);
@@ -103,6 +111,14 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
 				union ieee754dp y);
 union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
 				union ieee754dp y);
+union ieee754dp ieee754dp_madd(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+union ieee754dp ieee754dp_msub(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+union ieee754dp ieee754dp_nmadd(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+union ieee754dp ieee754dp_nmsub(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
 int ieee754dp_2008class(union ieee754dp x);
 union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y);
 union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y);
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 52b2011..2c3b135 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -16,6 +16,7 @@
 
 enum maddf_flags {
 	MADDF_NEGATE_PRODUCT	= 1 << 0,
+	MADDF_NEGATE_ADDITION	= 1 << 1,
 };
 
 static inline void ieee754_clearcx(void)
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index d638354..1b85b1a 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -36,6 +36,12 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
 
 	ieee754_clearcx();
 
+	rs = xs ^ ys;
+	if (flags & MADDF_NEGATE_PRODUCT)
+		rs ^= 1;
+	if (flags & MADDF_NEGATE_ADDITION)
+		zs ^= 1;
+
 	/*
 	 * Handle the cases when at least one of x, y or z is a NaN.
 	 * Order of precedence is sNaN, qNaN and z, x, y.
@@ -73,9 +79,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
-		if ((zc == IEEE754_CLASS_INF) &&
-		    ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) ||
-		     ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) {
+		if ((zc == IEEE754_CLASS_INF) && (zs != rs)) {
 			/*
 			 * Cases of addition of infinities with opposite signs
 			 * or subtraction of infinities with same signs.
@@ -85,15 +89,10 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
 		}
 		/*
 		 * z is here either not an infinity, or an infinity having the
-		 * same sign as product (x*y) (in case of MADDF.D instruction)
-		 * or product -(x*y) (in MSUBF.D case). The result must be an
-		 * infinity, and its sign is determined only by the value of
-		 * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y.
+		 * same sign as product (x*y). The result must be an infinity,
+		 * and its sign is determined only by the sign of product (x*y).
 		 */
-		if (flags & MADDF_NEGATE_PRODUCT)
-			return ieee754sp_inf(1 ^ (xs ^ ys));
-		else
-			return ieee754sp_inf(xs ^ ys);
+		return ieee754sp_inf(rs);
 
 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
@@ -104,10 +103,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
 			return ieee754sp_inf(zs);
 		if (zc == IEEE754_CLASS_ZERO) {
 			/* Handle cases +0 + (-0) and similar ones. */
-			if ((!(flags & MADDF_NEGATE_PRODUCT)
-					&& (zs == (xs ^ ys))) ||
-			    ((flags & MADDF_NEGATE_PRODUCT)
-					&& (zs != (xs ^ ys))))
+			if (zs == rs)
 				/*
 				 * Cases of addition of zeros of equal signs
 				 * or subtraction of zeroes of opposite signs.
@@ -158,9 +154,6 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
 	assert(ym & SP_HIDDEN_BIT);
 
 	re = xe + ye;
-	rs = xs ^ ys;
-	if (flags & MADDF_NEGATE_PRODUCT)
-		rs ^= 1;
 
 	/* Multiple 24 bit xm and ym to give 48 bit results */
 	rm64 = (uint64_t)xm * ym;
@@ -260,3 +253,27 @@ union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
 {
 	return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
 }
+
+union ieee754sp ieee754sp_madd(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	return _sp_maddf(z, x, y, 0);
+}
+
+union ieee754sp ieee754sp_msub(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	return _sp_maddf(z, x, y, MADDF_NEGATE_ADDITION);
+}
+
+union ieee754sp ieee754sp_nmadd(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT|MADDF_NEGATE_ADDITION);
+}
+
+union ieee754sp ieee754sp_nmsub(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 50f9ed8..7968400 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -508,6 +508,51 @@ void __ref free_initmem(void)
 		free_initmem_default(POISON_FREE_INITMEM);
 }
 
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+	return node_distance(cpu_to_node(from), cpu_to_node(to));
+}
+
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
+				       size_t align)
+{
+	return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
+				      MEMBLOCK_ALLOC_ACCESSIBLE,
+				      cpu_to_node(cpu));
+}
+
+static void __init pcpu_fc_free(void *ptr, size_t size)
+{
+	memblock_free_early(__pa(ptr), size);
+}
+
+void __init setup_per_cpu_areas(void)
+{
+	unsigned long delta;
+	unsigned int cpu;
+	int rc;
+
+	/*
+	 * Always reserve area for module percpu variables.  That's
+	 * what the legacy allocator did.
+	 */
+	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+				    pcpu_cpu_distance,
+				    pcpu_fc_alloc, pcpu_fc_free);
+	if (rc < 0)
+		panic("Failed to initialize percpu areas.");
+
+	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+	for_each_possible_cpu(cpu)
+		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 unsigned long pgd_current[NR_CPUS];
 #endif
diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile
index 2d03af7..d559123 100644
--- a/arch/mips/net/Makefile
+++ b/arch/mips/net/Makefile
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 # MIPS networking code
 
+obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
 obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
new file mode 100644
index 0000000..0af8862
--- /dev/null
+++ b/arch/mips/net/bpf_jit.c
@@ -0,0 +1,1270 @@
+/*
+ * Just-In-Time compiler for BPF filters on MIPS
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/if_vlan.h>
+#include <linux/moduleloader.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/asm.h>
+#include <asm/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/uasm.h>
+
+#include "bpf_jit.h"
+
+/* ABI
+ * r_skb_hl	SKB header length
+ * r_data	SKB data pointer
+ * r_off	Offset
+ * r_A		BPF register A
+ * r_X		BPF register X
+ * r_skb	*skb
+ * r_M		*scratch memory
+ * r_skb_len	SKB length
+ *
+ * On entry (*bpf_func)(*skb, *filter)
+ * a0 = MIPS_R_A0 = skb;
+ * a1 = MIPS_R_A1 = filter;
+ *
+ * Stack
+ * ...
+ * M[15]
+ * M[14]
+ * M[13]
+ * ...
+ * M[0] <-- r_M
+ * saved reg k-1
+ * saved reg k-2
+ * ...
+ * saved reg 0 <-- r_sp
+ * <no argument area>
+ *
+ *                     Packet layout
+ *
+ * <--------------------- len ------------------------>
+ * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
+ * ----------------------------------------------------
+ * |                  skb->data                       |
+ * ----------------------------------------------------
+ */
+
+#define ptr typeof(unsigned long)
+
+#define SCRATCH_OFF(k)		(4 * (k))
+
+/* JIT flags */
+#define SEEN_CALL		(1 << BPF_MEMWORDS)
+#define SEEN_SREG_SFT		(BPF_MEMWORDS + 1)
+#define SEEN_SREG_BASE		(1 << SEEN_SREG_SFT)
+#define SEEN_SREG(x)		(SEEN_SREG_BASE << (x))
+#define SEEN_OFF		SEEN_SREG(2)
+#define SEEN_A			SEEN_SREG(3)
+#define SEEN_X			SEEN_SREG(4)
+#define SEEN_SKB		SEEN_SREG(5)
+#define SEEN_MEM		SEEN_SREG(6)
+/* SEEN_SK_DATA also implies skb_hl an skb_len */
+#define SEEN_SKB_DATA		(SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
+
+/* Arguments used by JIT */
+#define ARGS_USED_BY_JIT	2 /* only applicable to 64-bit */
+
+#define SBIT(x)			(1 << (x)) /* Signed version of BIT() */
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf:		The sk_filter
+ * @prologue_bytes:	Number of bytes for prologue
+ * @idx:		Instruction index
+ * @flags:		JIT flags
+ * @offsets:		Instruction offsets
+ * @target:		Memory location for the compiled filter
+ */
+struct jit_ctx {
+	const struct bpf_prog *skf;
+	unsigned int prologue_bytes;
+	u32 idx;
+	u32 flags;
+	u32 *offsets;
+	u32 *target;
+};
+
+
+static inline int optimize_div(u32 *k)
+{
+	/* power of 2 divides can be implemented with right shift */
+	if (!(*k & (*k-1))) {
+		*k = ilog2(*k);
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr(ctx, func, ...)			\
+do {							\
+	if ((ctx)->target != NULL) {			\
+		u32 *p = &(ctx)->target[ctx->idx];	\
+		uasm_i_##func(&p, ##__VA_ARGS__);	\
+	}						\
+	(ctx)->idx++;					\
+} while (0)
+
+/*
+ * Similar to emit_instr but it must be used when we need to emit
+ * 32-bit or 64-bit instructions
+ */
+#define emit_long_instr(ctx, func, ...)			\
+do {							\
+	if ((ctx)->target != NULL) {			\
+		u32 *p = &(ctx)->target[ctx->idx];	\
+		UASM_i_##func(&p, ##__VA_ARGS__);	\
+	}						\
+	(ctx)->idx++;					\
+} while (0)
+
+/* Determine if immediate is within the 16-bit signed range */
+static inline bool is_range16(s32 imm)
+{
+	return !(imm >= SBIT(15) || imm < -SBIT(15));
+}
+
+static inline void emit_addu(unsigned int dst, unsigned int src1,
+			     unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, addu, dst, src1, src2);
+}
+
+static inline void emit_nop(struct jit_ctx *ctx)
+{
+	emit_instr(ctx, nop);
+}
+
+/* Load a u32 immediate to a register */
+static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
+{
+	if (ctx->target != NULL) {
+		/* addiu can only handle s16 */
+		if (!is_range16(imm)) {
+			u32 *p = &ctx->target[ctx->idx];
+			uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
+			p = &ctx->target[ctx->idx + 1];
+			uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
+		} else {
+			u32 *p = &ctx->target[ctx->idx];
+			uasm_i_addiu(&p, dst, r_zero, imm);
+		}
+	}
+	ctx->idx++;
+
+	if (!is_range16(imm))
+		ctx->idx++;
+}
+
+static inline void emit_or(unsigned int dst, unsigned int src1,
+			   unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, or, dst, src1, src2);
+}
+
+static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
+			    struct jit_ctx *ctx)
+{
+	if (imm >= BIT(16)) {
+		emit_load_imm(r_tmp, imm, ctx);
+		emit_or(dst, src, r_tmp, ctx);
+	} else {
+		emit_instr(ctx, ori, dst, src, imm);
+	}
+}
+
+static inline void emit_daddiu(unsigned int dst, unsigned int src,
+			       int imm, struct jit_ctx *ctx)
+{
+	/*
+	 * Only used for stack, so the imm is relatively small
+	 * and it fits in 15-bits
+	 */
+	emit_instr(ctx, daddiu, dst, src, imm);
+}
+
+static inline void emit_addiu(unsigned int dst, unsigned int src,
+			      u32 imm, struct jit_ctx *ctx)
+{
+	if (!is_range16(imm)) {
+		emit_load_imm(r_tmp, imm, ctx);
+		emit_addu(dst, r_tmp, src, ctx);
+	} else {
+		emit_instr(ctx, addiu, dst, src, imm);
+	}
+}
+
+static inline void emit_and(unsigned int dst, unsigned int src1,
+			    unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, and, dst, src1, src2);
+}
+
+static inline void emit_andi(unsigned int dst, unsigned int src,
+			     u32 imm, struct jit_ctx *ctx)
+{
+	/* If imm does not fit in u16 then load it to register */
+	if (imm >= BIT(16)) {
+		emit_load_imm(r_tmp, imm, ctx);
+		emit_and(dst, src, r_tmp, ctx);
+	} else {
+		emit_instr(ctx, andi, dst, src, imm);
+	}
+}
+
+static inline void emit_xor(unsigned int dst, unsigned int src1,
+			    unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, xor, dst, src1, src2);
+}
+
+static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
+{
+	/* If imm does not fit in u16 then load it to register */
+	if (imm >= BIT(16)) {
+		emit_load_imm(r_tmp, imm, ctx);
+		emit_xor(dst, src, r_tmp, ctx);
+	} else {
+		emit_instr(ctx, xori, dst, src, imm);
+	}
+}
+
+static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
+{
+	emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
+}
+
+static inline void emit_subu(unsigned int dst, unsigned int src1,
+			     unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, subu, dst, src1, src2);
+}
+
+static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
+{
+	emit_subu(reg, r_zero, reg, ctx);
+}
+
+static inline void emit_sllv(unsigned int dst, unsigned int src,
+			     unsigned int sa, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, sllv, dst, src, sa);
+}
+
+static inline void emit_sll(unsigned int dst, unsigned int src,
+			    unsigned int sa, struct jit_ctx *ctx)
+{
+	/* sa is 5-bits long */
+	if (sa >= BIT(5))
+		/* Shifting >= 32 results in zero */
+		emit_jit_reg_move(dst, r_zero, ctx);
+	else
+		emit_instr(ctx, sll, dst, src, sa);
+}
+
+static inline void emit_srlv(unsigned int dst, unsigned int src,
+			     unsigned int sa, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, srlv, dst, src, sa);
+}
+
+static inline void emit_srl(unsigned int dst, unsigned int src,
+			    unsigned int sa, struct jit_ctx *ctx)
+{
+	/* sa is 5-bits long */
+	if (sa >= BIT(5))
+		/* Shifting >= 32 results in zero */
+		emit_jit_reg_move(dst, r_zero, ctx);
+	else
+		emit_instr(ctx, srl, dst, src, sa);
+}
+
+static inline void emit_slt(unsigned int dst, unsigned int src1,
+			    unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, slt, dst, src1, src2);
+}
+
+static inline void emit_sltu(unsigned int dst, unsigned int src1,
+			     unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, sltu, dst, src1, src2);
+}
+
+static inline void emit_sltiu(unsigned dst, unsigned int src,
+			      unsigned int imm, struct jit_ctx *ctx)
+{
+	/* 16 bit immediate */
+	if (!is_range16((s32)imm)) {
+		emit_load_imm(r_tmp, imm, ctx);
+		emit_sltu(dst, src, r_tmp, ctx);
+	} else {
+		emit_instr(ctx, sltiu, dst, src, imm);
+	}
+
+}
+
+/* Store register on the stack */
+static inline void emit_store_stack_reg(ptr reg, ptr base,
+					unsigned int offset,
+					struct jit_ctx *ctx)
+{
+	emit_long_instr(ctx, SW, reg, offset, base);
+}
+
+static inline void emit_store(ptr reg, ptr base, unsigned int offset,
+			      struct jit_ctx *ctx)
+{
+	emit_instr(ctx, sw, reg, offset, base);
+}
+
+static inline void emit_load_stack_reg(ptr reg, ptr base,
+				       unsigned int offset,
+				       struct jit_ctx *ctx)
+{
+	emit_long_instr(ctx, LW, reg, offset, base);
+}
+
+static inline void emit_load(unsigned int reg, unsigned int base,
+			     unsigned int offset, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, lw, reg, offset, base);
+}
+
+static inline void emit_load_byte(unsigned int reg, unsigned int base,
+				  unsigned int offset, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, lb, reg, offset, base);
+}
+
+static inline void emit_half_load(unsigned int reg, unsigned int base,
+				  unsigned int offset, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, lh, reg, offset, base);
+}
+
+static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
+					   unsigned int offset, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, lhu, reg, offset, base);
+}
+
+static inline void emit_mul(unsigned int dst, unsigned int src1,
+			    unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, mul, dst, src1, src2);
+}
+
+static inline void emit_div(unsigned int dst, unsigned int src,
+			    struct jit_ctx *ctx)
+{
+	if (ctx->target != NULL) {
+		u32 *p = &ctx->target[ctx->idx];
+		uasm_i_divu(&p, dst, src);
+		p = &ctx->target[ctx->idx + 1];
+		uasm_i_mflo(&p, dst);
+	}
+	ctx->idx += 2; /* 2 insts */
+}
+
+static inline void emit_mod(unsigned int dst, unsigned int src,
+			    struct jit_ctx *ctx)
+{
+	if (ctx->target != NULL) {
+		u32 *p = &ctx->target[ctx->idx];
+		uasm_i_divu(&p, dst, src);
+		p = &ctx->target[ctx->idx + 1];
+		uasm_i_mfhi(&p, dst);
+	}
+	ctx->idx += 2; /* 2 insts */
+}
+
+static inline void emit_dsll(unsigned int dst, unsigned int src,
+			     unsigned int sa, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, dsll, dst, src, sa);
+}
+
+static inline void emit_dsrl32(unsigned int dst, unsigned int src,
+			       unsigned int sa, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, dsrl32, dst, src, sa);
+}
+
+static inline void emit_wsbh(unsigned int dst, unsigned int src,
+			     struct jit_ctx *ctx)
+{
+	emit_instr(ctx, wsbh, dst, src);
+}
+
+/* load pointer to register */
+static inline void emit_load_ptr(unsigned int dst, unsigned int src,
+				     int imm, struct jit_ctx *ctx)
+{
+	/* src contains the base addr of the 32/64-pointer */
+	emit_long_instr(ctx, LW, dst, imm, src);
+}
+
+/* load a function pointer to register */
+static inline void emit_load_func(unsigned int reg, ptr imm,
+				  struct jit_ctx *ctx)
+{
+	if (IS_ENABLED(CONFIG_64BIT)) {
+		/* At this point imm is always 64-bit */
+		emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
+		emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
+		emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
+		emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
+		emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
+	} else {
+		emit_load_imm(reg, imm, ctx);
+	}
+}
+
+/* Move to real MIPS register */
+static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
+{
+	emit_long_instr(ctx, ADDU, dst, src, r_zero);
+}
+
+/* Move to JIT (32-bit) register */
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
+{
+	emit_addu(dst, src, r_zero, ctx);
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+	if (ctx->target == NULL)
+		return 0;
+
+	/*
+	 * We want a pc-relative branch. We only do forward branches
+	 * so tgt is always after pc. tgt is the instruction offset
+	 * we want to jump to.
+
+	 * Branch on MIPS:
+	 * I: target_offset <- sign_extend(offset)
+	 * I+1: PC += target_offset (delay slot)
+	 *
+	 * ctx->idx currently points to the branch instruction
+	 * but the offset is added to the delay slot so we need
+	 * to subtract 4.
+	 */
+	return ctx->offsets[tgt] -
+		(ctx->idx * 4 - ctx->prologue_bytes) - 4;
+}
+
+static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
+			     unsigned int imm, struct jit_ctx *ctx)
+{
+	if (ctx->target != NULL) {
+		u32 *p = &ctx->target[ctx->idx];
+
+		switch (cond) {
+		case MIPS_COND_EQ:
+			uasm_i_beq(&p, reg1, reg2, imm);
+			break;
+		case MIPS_COND_NE:
+			uasm_i_bne(&p, reg1, reg2, imm);
+			break;
+		case MIPS_COND_ALL:
+			uasm_i_b(&p, imm);
+			break;
+		default:
+			pr_warn("%s: Unhandled branch conditional: %d\n",
+				__func__, cond);
+		}
+	}
+	ctx->idx++;
+}
+
+static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
+{
+	emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
+}
+
+static inline void emit_jalr(unsigned int link, unsigned int reg,
+			     struct jit_ctx *ctx)
+{
+	emit_instr(ctx, jalr, link, reg);
+}
+
+static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, jr, reg);
+}
+
+static inline u16 align_sp(unsigned int num)
+{
+	/* Double word alignment for 32-bit, quadword for 64-bit */
+	unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
+	num = (num + (align - 1)) & -align;
+	return num;
+}
+
+static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
+{
+	int i = 0, real_off = 0;
+	u32 sflags, tmp_flags;
+
+	/* Adjust the stack pointer */
+	if (offset)
+		emit_stack_offset(-align_sp(offset), ctx);
+
+	tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
+	/* sflags is essentially a bitmap */
+	while (tmp_flags) {
+		if ((sflags >> i) & 0x1) {
+			emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
+					     ctx);
+			real_off += SZREG;
+		}
+		i++;
+		tmp_flags >>= 1;
+	}
+
+	/* save return address */
+	if (ctx->flags & SEEN_CALL) {
+		emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
+		real_off += SZREG;
+	}
+
+	/* Setup r_M leaving the alignment gap if necessary */
+	if (ctx->flags & SEEN_MEM) {
+		if (real_off % (SZREG * 2))
+			real_off += SZREG;
+		emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
+	}
+}
+
+static void restore_bpf_jit_regs(struct jit_ctx *ctx,
+				 unsigned int offset)
+{
+	int i, real_off = 0;
+	u32 sflags, tmp_flags;
+
+	tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
+	/* sflags is a bitmap */
+	i = 0;
+	while (tmp_flags) {
+		if ((sflags >> i) & 0x1) {
+			emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
+					    ctx);
+			real_off += SZREG;
+		}
+		i++;
+		tmp_flags >>= 1;
+	}
+
+	/* restore return address */
+	if (ctx->flags & SEEN_CALL)
+		emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
+
+	/* Restore the sp and discard the scrach memory */
+	if (offset)
+		emit_stack_offset(align_sp(offset), ctx);
+}
+
+static unsigned int get_stack_depth(struct jit_ctx *ctx)
+{
+	int sp_off = 0;
+
+
+	/* How may s* regs do we need to preserved? */
+	sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
+
+	if (ctx->flags & SEEN_MEM)
+		sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
+
+	if (ctx->flags & SEEN_CALL)
+		sp_off += SZREG; /* Space for our ra register */
+
+	return sp_off;
+}
+
+static void build_prologue(struct jit_ctx *ctx)
+{
+	int sp_off;
+
+	/* Calculate the total offset for the stack pointer */
+	sp_off = get_stack_depth(ctx);
+	save_bpf_jit_regs(ctx, sp_off);
+
+	if (ctx->flags & SEEN_SKB)
+		emit_reg_move(r_skb, MIPS_R_A0, ctx);
+
+	if (ctx->flags & SEEN_SKB_DATA) {
+		/* Load packet length */
+		emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
+			  ctx);
+		emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
+			  ctx);
+		/* Load the data pointer */
+		emit_load_ptr(r_skb_data, r_skb,
+			      offsetof(struct sk_buff, data), ctx);
+		/* Load the header length */
+		emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
+	}
+
+	if (ctx->flags & SEEN_X)
+		emit_jit_reg_move(r_X, r_zero, ctx);
+
+	/*
+	 * Do not leak kernel data to userspace, we only need to clear
+	 * r_A if it is ever used.  In fact if it is never used, we
+	 * will not save/restore it, so clearing it in this case would
+	 * corrupt the state of the caller.
+	 */
+	if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
+	    (ctx->flags & SEEN_A))
+		emit_jit_reg_move(r_A, r_zero, ctx);
+}
+
+static void build_epilogue(struct jit_ctx *ctx)
+{
+	unsigned int sp_off;
+
+	/* Calculate the total offset for the stack pointer */
+
+	sp_off = get_stack_depth(ctx);
+	restore_bpf_jit_regs(ctx, sp_off);
+
+	/* Return */
+	emit_jr(r_ra, ctx);
+	emit_nop(ctx);
+}
+
+#define CHOOSE_LOAD_FUNC(K, func) \
+	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
+	 func##_positive)
+
+static int build_body(struct jit_ctx *ctx)
+{
+	const struct bpf_prog *prog = ctx->skf;
+	const struct sock_filter *inst;
+	unsigned int i, off, condt;
+	u32 k, b_off __maybe_unused;
+	u8 (*sk_load_func)(unsigned long *skb, int offset);
+
+	for (i = 0; i < prog->len; i++) {
+		u16 code;
+
+		inst = &(prog->insns[i]);
+		pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
+			 __func__, inst->code, inst->jt, inst->jf, inst->k);
+		k = inst->k;
+		code = bpf_anc_helper(inst);
+
+		if (ctx->target == NULL)
+			ctx->offsets[i] = ctx->idx * 4;
+
+		switch (code) {
+		case BPF_LD | BPF_IMM:
+			/* A <- k ==> li r_A, k */
+			ctx->flags |= SEEN_A;
+			emit_load_imm(r_A, k, ctx);
+			break;
+		case BPF_LD | BPF_W | BPF_LEN:
+			BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
+			/* A <- len ==> lw r_A, offset(skb) */
+			ctx->flags |= SEEN_SKB | SEEN_A;
+			off = offsetof(struct sk_buff, len);
+			emit_load(r_A, r_skb, off, ctx);
+			break;
+		case BPF_LD | BPF_MEM:
+			/* A <- M[k] ==> lw r_A, offset(M) */
+			ctx->flags |= SEEN_MEM | SEEN_A;
+			emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
+			break;
+		case BPF_LD | BPF_W | BPF_ABS:
+			/* A <- P[k:4] */
+			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
+			goto load;
+		case BPF_LD | BPF_H | BPF_ABS:
+			/* A <- P[k:2] */
+			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
+			goto load;
+		case BPF_LD | BPF_B | BPF_ABS:
+			/* A <- P[k:1] */
+			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
+load:
+			emit_load_imm(r_off, k, ctx);
+load_common:
+			ctx->flags |= SEEN_CALL | SEEN_OFF |
+				SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
+
+			emit_load_func(r_s0, (ptr)sk_load_func, ctx);
+			emit_reg_move(MIPS_R_A0, r_skb, ctx);
+			emit_jalr(MIPS_R_RA, r_s0, ctx);
+			/* Load second argument to delay slot */
+			emit_reg_move(MIPS_R_A1, r_off, ctx);
+			/* Check the error value */
+			emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
+				   ctx);
+			/* Load return register on DS for failures */
+			emit_reg_move(r_ret, r_zero, ctx);
+			/* Return with error */
+			emit_b(b_imm(prog->len, ctx), ctx);
+			emit_nop(ctx);
+			break;
+		case BPF_LD | BPF_W | BPF_IND:
+			/* A <- P[X + k:4] */
+			sk_load_func = sk_load_word;
+			goto load_ind;
+		case BPF_LD | BPF_H | BPF_IND:
+			/* A <- P[X + k:2] */
+			sk_load_func = sk_load_half;
+			goto load_ind;
+		case BPF_LD | BPF_B | BPF_IND:
+			/* A <- P[X + k:1] */
+			sk_load_func = sk_load_byte;
+load_ind:
+			ctx->flags |= SEEN_OFF | SEEN_X;
+			emit_addiu(r_off, r_X, k, ctx);
+			goto load_common;
+		case BPF_LDX | BPF_IMM:
+			/* X <- k */
+			ctx->flags |= SEEN_X;
+			emit_load_imm(r_X, k, ctx);
+			break;
+		case BPF_LDX | BPF_MEM:
+			/* X <- M[k] */
+			ctx->flags |= SEEN_X | SEEN_MEM;
+			emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
+			break;
+		case BPF_LDX | BPF_W | BPF_LEN:
+			/* X <- len */
+			ctx->flags |= SEEN_X | SEEN_SKB;
+			off = offsetof(struct sk_buff, len);
+			emit_load(r_X, r_skb, off, ctx);
+			break;
+		case BPF_LDX | BPF_B | BPF_MSH:
+			/* X <- 4 * (P[k:1] & 0xf) */
+			ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
+			/* Load offset to a1 */
+			emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
+			/*
+			 * This may emit two instructions so it may not fit
+			 * in the delay slot. So use a0 in the delay slot.
+			 */
+			emit_load_imm(MIPS_R_A1, k, ctx);
+			emit_jalr(MIPS_R_RA, r_s0, ctx);
+			emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
+			/* Check the error value */
+			emit_bcond(MIPS_COND_NE, r_ret, 0,
+				   b_imm(prog->len, ctx), ctx);
+			emit_reg_move(r_ret, r_zero, ctx);
+			/* We are good */
+			/* X <- P[1:K] & 0xf */
+			emit_andi(r_X, r_A, 0xf, ctx);
+			/* X << 2 */
+			emit_b(b_imm(i + 1, ctx), ctx);
+			emit_sll(r_X, r_X, 2, ctx); /* delay slot */
+			break;
+		case BPF_ST:
+			/* M[k] <- A */
+			ctx->flags |= SEEN_MEM | SEEN_A;
+			emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
+			break;
+		case BPF_STX:
+			/* M[k] <- X */
+			ctx->flags |= SEEN_MEM | SEEN_X;
+			emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
+			break;
+		case BPF_ALU | BPF_ADD | BPF_K:
+			/* A += K */
+			ctx->flags |= SEEN_A;
+			emit_addiu(r_A, r_A, k, ctx);
+			break;
+		case BPF_ALU | BPF_ADD | BPF_X:
+			/* A += X */
+			ctx->flags |= SEEN_A | SEEN_X;
+			emit_addu(r_A, r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_SUB | BPF_K:
+			/* A -= K */
+			ctx->flags |= SEEN_A;
+			emit_addiu(r_A, r_A, -k, ctx);
+			break;
+		case BPF_ALU | BPF_SUB | BPF_X:
+			/* A -= X */
+			ctx->flags |= SEEN_A | SEEN_X;
+			emit_subu(r_A, r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_MUL | BPF_K:
+			/* A *= K */
+			/* Load K to scratch register before MUL */
+			ctx->flags |= SEEN_A;
+			emit_load_imm(r_s0, k, ctx);
+			emit_mul(r_A, r_A, r_s0, ctx);
+			break;
+		case BPF_ALU | BPF_MUL | BPF_X:
+			/* A *= X */
+			ctx->flags |= SEEN_A | SEEN_X;
+			emit_mul(r_A, r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_DIV | BPF_K:
+			/* A /= k */
+			if (k == 1)
+				break;
+			if (optimize_div(&k)) {
+				ctx->flags |= SEEN_A;
+				emit_srl(r_A, r_A, k, ctx);
+				break;
+			}
+			ctx->flags |= SEEN_A;
+			emit_load_imm(r_s0, k, ctx);
+			emit_div(r_A, r_s0, ctx);
+			break;
+		case BPF_ALU | BPF_MOD | BPF_K:
+			/* A %= k */
+			if (k == 1) {
+				ctx->flags |= SEEN_A;
+				emit_jit_reg_move(r_A, r_zero, ctx);
+			} else {
+				ctx->flags |= SEEN_A;
+				emit_load_imm(r_s0, k, ctx);
+				emit_mod(r_A, r_s0, ctx);
+			}
+			break;
+		case BPF_ALU | BPF_DIV | BPF_X:
+			/* A /= X */
+			ctx->flags |= SEEN_X | SEEN_A;
+			/* Check if r_X is zero */
+			emit_bcond(MIPS_COND_EQ, r_X, r_zero,
+				   b_imm(prog->len, ctx), ctx);
+			emit_load_imm(r_ret, 0, ctx); /* delay slot */
+			emit_div(r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_MOD | BPF_X:
+			/* A %= X */
+			ctx->flags |= SEEN_X | SEEN_A;
+			/* Check if r_X is zero */
+			emit_bcond(MIPS_COND_EQ, r_X, r_zero,
+				   b_imm(prog->len, ctx), ctx);
+			emit_load_imm(r_ret, 0, ctx); /* delay slot */
+			emit_mod(r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_OR | BPF_K:
+			/* A |= K */
+			ctx->flags |= SEEN_A;
+			emit_ori(r_A, r_A, k, ctx);
+			break;
+		case BPF_ALU | BPF_OR | BPF_X:
+			/* A |= X */
+			ctx->flags |= SEEN_A;
+			emit_ori(r_A, r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_XOR | BPF_K:
+			/* A ^= k */
+			ctx->flags |= SEEN_A;
+			emit_xori(r_A, r_A, k, ctx);
+			break;
+		case BPF_ANC | SKF_AD_ALU_XOR_X:
+		case BPF_ALU | BPF_XOR | BPF_X:
+			/* A ^= X */
+			ctx->flags |= SEEN_A;
+			emit_xor(r_A, r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_AND | BPF_K:
+			/* A &= K */
+			ctx->flags |= SEEN_A;
+			emit_andi(r_A, r_A, k, ctx);
+			break;
+		case BPF_ALU | BPF_AND | BPF_X:
+			/* A &= X */
+			ctx->flags |= SEEN_A | SEEN_X;
+			emit_and(r_A, r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_LSH | BPF_K:
+			/* A <<= K */
+			ctx->flags |= SEEN_A;
+			emit_sll(r_A, r_A, k, ctx);
+			break;
+		case BPF_ALU | BPF_LSH | BPF_X:
+			/* A <<= X */
+			ctx->flags |= SEEN_A | SEEN_X;
+			emit_sllv(r_A, r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_RSH | BPF_K:
+			/* A >>= K */
+			ctx->flags |= SEEN_A;
+			emit_srl(r_A, r_A, k, ctx);
+			break;
+		case BPF_ALU | BPF_RSH | BPF_X:
+			ctx->flags |= SEEN_A | SEEN_X;
+			emit_srlv(r_A, r_A, r_X, ctx);
+			break;
+		case BPF_ALU | BPF_NEG:
+			/* A = -A */
+			ctx->flags |= SEEN_A;
+			emit_neg(r_A, ctx);
+			break;
+		case BPF_JMP | BPF_JA:
+			/* pc += K */
+			emit_b(b_imm(i + k + 1, ctx), ctx);
+			emit_nop(ctx);
+			break;
+		case BPF_JMP | BPF_JEQ | BPF_K:
+			/* pc += ( A == K ) ? pc->jt : pc->jf */
+			condt = MIPS_COND_EQ | MIPS_COND_K;
+			goto jmp_cmp;
+		case BPF_JMP | BPF_JEQ | BPF_X:
+			ctx->flags |= SEEN_X;
+			/* pc += ( A == X ) ? pc->jt : pc->jf */
+			condt = MIPS_COND_EQ | MIPS_COND_X;
+			goto jmp_cmp;
+		case BPF_JMP | BPF_JGE | BPF_K:
+			/* pc += ( A >= K ) ? pc->jt : pc->jf */
+			condt = MIPS_COND_GE | MIPS_COND_K;
+			goto jmp_cmp;
+		case BPF_JMP | BPF_JGE | BPF_X:
+			ctx->flags |= SEEN_X;
+			/* pc += ( A >= X ) ? pc->jt : pc->jf */
+			condt = MIPS_COND_GE | MIPS_COND_X;
+			goto jmp_cmp;
+		case BPF_JMP | BPF_JGT | BPF_K:
+			/* pc += ( A > K ) ? pc->jt : pc->jf */
+			condt = MIPS_COND_GT | MIPS_COND_K;
+			goto jmp_cmp;
+		case BPF_JMP | BPF_JGT | BPF_X:
+			ctx->flags |= SEEN_X;
+			/* pc += ( A > X ) ? pc->jt : pc->jf */
+			condt = MIPS_COND_GT | MIPS_COND_X;
+jmp_cmp:
+			/* Greater or Equal */
+			if ((condt & MIPS_COND_GE) ||
+			    (condt & MIPS_COND_GT)) {
+				if (condt & MIPS_COND_K) { /* K */
+					ctx->flags |= SEEN_A;
+					emit_sltiu(r_s0, r_A, k, ctx);
+				} else { /* X */
+					ctx->flags |= SEEN_A |
+						SEEN_X;
+					emit_sltu(r_s0, r_A, r_X, ctx);
+				}
+				/* A < (K|X) ? r_scrach = 1 */
+				b_off = b_imm(i + inst->jf + 1, ctx);
+				emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
+					   ctx);
+				emit_nop(ctx);
+				/* A > (K|X) ? scratch = 0 */
+				if (condt & MIPS_COND_GT) {
+					/* Checking for equality */
+					ctx->flags |= SEEN_A | SEEN_X;
+					if (condt & MIPS_COND_K)
+						emit_load_imm(r_s0, k, ctx);
+					else
+						emit_jit_reg_move(r_s0, r_X,
+								  ctx);
+					b_off = b_imm(i + inst->jf + 1, ctx);
+					emit_bcond(MIPS_COND_EQ, r_A, r_s0,
+						   b_off, ctx);
+					emit_nop(ctx);
+					/* Finally, A > K|X */
+					b_off = b_imm(i + inst->jt + 1, ctx);
+					emit_b(b_off, ctx);
+					emit_nop(ctx);
+				} else {
+					/* A >= (K|X) so jump */
+					b_off = b_imm(i + inst->jt + 1, ctx);
+					emit_b(b_off, ctx);
+					emit_nop(ctx);
+				}
+			} else {
+				/* A == K|X */
+				if (condt & MIPS_COND_K) { /* K */
+					ctx->flags |= SEEN_A;
+					emit_load_imm(r_s0, k, ctx);
+					/* jump true */
+					b_off = b_imm(i + inst->jt + 1, ctx);
+					emit_bcond(MIPS_COND_EQ, r_A, r_s0,
+						   b_off, ctx);
+					emit_nop(ctx);
+					/* jump false */
+					b_off = b_imm(i + inst->jf + 1,
+						      ctx);
+					emit_bcond(MIPS_COND_NE, r_A, r_s0,
+						   b_off, ctx);
+					emit_nop(ctx);
+				} else { /* X */
+					/* jump true */
+					ctx->flags |= SEEN_A | SEEN_X;
+					b_off = b_imm(i + inst->jt + 1,
+						      ctx);
+					emit_bcond(MIPS_COND_EQ, r_A, r_X,
+						   b_off, ctx);
+					emit_nop(ctx);
+					/* jump false */
+					b_off = b_imm(i + inst->jf + 1, ctx);
+					emit_bcond(MIPS_COND_NE, r_A, r_X,
+						   b_off, ctx);
+					emit_nop(ctx);
+				}
+			}
+			break;
+		case BPF_JMP | BPF_JSET | BPF_K:
+			ctx->flags |= SEEN_A;
+			/* pc += (A & K) ? pc -> jt : pc -> jf */
+			emit_load_imm(r_s1, k, ctx);
+			emit_and(r_s0, r_A, r_s1, ctx);
+			/* jump true */
+			b_off = b_imm(i + inst->jt + 1, ctx);
+			emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
+			emit_nop(ctx);
+			/* jump false */
+			b_off = b_imm(i + inst->jf + 1, ctx);
+			emit_b(b_off, ctx);
+			emit_nop(ctx);
+			break;
+		case BPF_JMP | BPF_JSET | BPF_X:
+			ctx->flags |= SEEN_X | SEEN_A;
+			/* pc += (A & X) ? pc -> jt : pc -> jf */
+			emit_and(r_s0, r_A, r_X, ctx);
+			/* jump true */
+			b_off = b_imm(i + inst->jt + 1, ctx);
+			emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
+			emit_nop(ctx);
+			/* jump false */
+			b_off = b_imm(i + inst->jf + 1, ctx);
+			emit_b(b_off, ctx);
+			emit_nop(ctx);
+			break;
+		case BPF_RET | BPF_A:
+			ctx->flags |= SEEN_A;
+			if (i != prog->len - 1)
+				/*
+				 * If this is not the last instruction
+				 * then jump to the epilogue
+				 */
+				emit_b(b_imm(prog->len, ctx), ctx);
+			emit_reg_move(r_ret, r_A, ctx); /* delay slot */
+			break;
+		case BPF_RET | BPF_K:
+			/*
+			 * It can emit two instructions so it does not fit on
+			 * the delay slot.
+			 */
+			emit_load_imm(r_ret, k, ctx);
+			if (i != prog->len - 1) {
+				/*
+				 * If this is not the last instruction
+				 * then jump to the epilogue
+				 */
+				emit_b(b_imm(prog->len, ctx), ctx);
+				emit_nop(ctx);
+			}
+			break;
+		case BPF_MISC | BPF_TAX:
+			/* X = A */
+			ctx->flags |= SEEN_X | SEEN_A;
+			emit_jit_reg_move(r_X, r_A, ctx);
+			break;
+		case BPF_MISC | BPF_TXA:
+			/* A = X */
+			ctx->flags |= SEEN_A | SEEN_X;
+			emit_jit_reg_move(r_A, r_X, ctx);
+			break;
+		/* AUX */
+		case BPF_ANC | SKF_AD_PROTOCOL:
+			/* A = ntohs(skb->protocol */
+			ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
+			BUILD_BUG_ON(sizeof_field(struct sk_buff,
+						  protocol) != 2);
+			off = offsetof(struct sk_buff, protocol);
+			emit_half_load(r_A, r_skb, off, ctx);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			/* This needs little endian fixup */
+			if (cpu_has_wsbh) {
+				/* R2 and later have the wsbh instruction */
+				emit_wsbh(r_A, r_A, ctx);
+			} else {
+				/* Get first byte */
+				emit_andi(r_tmp_imm, r_A, 0xff, ctx);
+				/* Shift it */
+				emit_sll(r_tmp, r_tmp_imm, 8, ctx);
+				/* Get second byte */
+				emit_srl(r_tmp_imm, r_A, 8, ctx);
+				emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
+				/* Put everyting together in r_A */
+				emit_or(r_A, r_tmp, r_tmp_imm, ctx);
+			}
+#endif
+			break;
+		case BPF_ANC | SKF_AD_CPU:
+			ctx->flags |= SEEN_A | SEEN_OFF;
+			/* A = current_thread_info()->cpu */
+			BUILD_BUG_ON(sizeof_field(struct thread_info,
+						  cpu) != 4);
+			off = offsetof(struct thread_info, cpu);
+			/* $28/gp points to the thread_info struct */
+			emit_load(r_A, 28, off, ctx);
+			break;
+		case BPF_ANC | SKF_AD_IFINDEX:
+			/* A = skb->dev->ifindex */
+		case BPF_ANC | SKF_AD_HATYPE:
+			/* A = skb->dev->type */
+			ctx->flags |= SEEN_SKB | SEEN_A;
+			off = offsetof(struct sk_buff, dev);
+			/* Load *dev pointer */
+			emit_load_ptr(r_s0, r_skb, off, ctx);
+			/* error (0) in the delay slot */
+			emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
+				   b_imm(prog->len, ctx), ctx);
+			emit_reg_move(r_ret, r_zero, ctx);
+			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+				BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
+				off = offsetof(struct net_device, ifindex);
+				emit_load(r_A, r_s0, off, ctx);
+			} else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
+				BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
+				off = offsetof(struct net_device, type);
+				emit_half_load_unsigned(r_A, r_s0, off, ctx);
+			}
+			break;
+		case BPF_ANC | SKF_AD_MARK:
+			ctx->flags |= SEEN_SKB | SEEN_A;
+			BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
+			off = offsetof(struct sk_buff, mark);
+			emit_load(r_A, r_skb, off, ctx);
+			break;
+		case BPF_ANC | SKF_AD_RXHASH:
+			ctx->flags |= SEEN_SKB | SEEN_A;
+			BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
+			off = offsetof(struct sk_buff, hash);
+			emit_load(r_A, r_skb, off, ctx);
+			break;
+		case BPF_ANC | SKF_AD_VLAN_TAG:
+			ctx->flags |= SEEN_SKB | SEEN_A;
+			BUILD_BUG_ON(sizeof_field(struct sk_buff,
+						  vlan_tci) != 2);
+			off = offsetof(struct sk_buff, vlan_tci);
+			emit_half_load_unsigned(r_A, r_skb, off, ctx);
+			break;
+		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+			ctx->flags |= SEEN_SKB | SEEN_A;
+			emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
+			if (PKT_VLAN_PRESENT_BIT)
+				emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
+			if (PKT_VLAN_PRESENT_BIT < 7)
+				emit_andi(r_A, r_A, 1, ctx);
+			break;
+		case BPF_ANC | SKF_AD_PKTTYPE:
+			ctx->flags |= SEEN_SKB;
+
+			emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
+			/* Keep only the last 3 bits */
+			emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+			/* Get the actual packet type to the lower 3 bits */
+			emit_srl(r_A, r_A, 5, ctx);
+#endif
+			break;
+		case BPF_ANC | SKF_AD_QUEUE:
+			ctx->flags |= SEEN_SKB | SEEN_A;
+			BUILD_BUG_ON(sizeof_field(struct sk_buff,
+						  queue_mapping) != 2);
+			BUILD_BUG_ON(offsetof(struct sk_buff,
+					      queue_mapping) > 0xff);
+			off = offsetof(struct sk_buff, queue_mapping);
+			emit_half_load_unsigned(r_A, r_skb, off, ctx);
+			break;
+		default:
+			pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
+				 inst->code);
+			return -1;
+		}
+	}
+
+	/* compute offsets only during the first pass */
+	if (ctx->target == NULL)
+		ctx->offsets[i] = ctx->idx * 4;
+
+	return 0;
+}
+
+void bpf_jit_compile(struct bpf_prog *fp)
+{
+	struct jit_ctx ctx;
+	unsigned int alloc_size, tmp_idx;
+
+	if (!bpf_jit_enable)
+		return;
+
+	memset(&ctx, 0, sizeof(ctx));
+
+	ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+	if (ctx.offsets == NULL)
+		return;
+
+	ctx.skf = fp;
+
+	if (build_body(&ctx))
+		goto out;
+
+	tmp_idx = ctx.idx;
+	build_prologue(&ctx);
+	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
+	/* just to complete the ctx.idx count */
+	build_epilogue(&ctx);
+
+	alloc_size = 4 * ctx.idx;
+	ctx.target = module_alloc(alloc_size);
+	if (ctx.target == NULL)
+		goto out;
+
+	/* Clean it */
+	memset(ctx.target, 0, alloc_size);
+
+	ctx.idx = 0;
+
+	/* Generate the actual JIT code */
+	build_prologue(&ctx);
+	build_body(&ctx);
+	build_epilogue(&ctx);
+
+	/* Update the icache */
+	flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
+
+	if (bpf_jit_enable > 1)
+		/* Dump JIT code */
+		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
+
+	fp->bpf_func = (void *)ctx.target;
+	fp->jited = 1;
+
+out:
+	kfree(ctx.offsets);
+}
+
+void bpf_jit_free(struct bpf_prog *fp)
+{
+	if (fp->jited)
+		module_memfree(fp->bpf_func);
+
+	bpf_prog_unlock_free(fp);
+}
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
new file mode 100644
index 0000000..57154c5
--- /dev/null
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -0,0 +1,285 @@
+/*
+ * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
+ * compiler.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <asm/asm.h>
+#include <asm/isa-rev.h>
+#include <asm/regdef.h>
+#include "bpf_jit.h"
+
+/* ABI
+ *
+ * r_skb_hl	skb header length
+ * r_skb_data	skb data
+ * r_off(a1)	offset register
+ * r_A		BPF register A
+ * r_X		PF register X
+ * r_skb(a0)	*skb
+ * r_M		*scratch memory
+ * r_skb_le	skb length
+ * r_s0		Scratch register 0
+ * r_s1		Scratch register 1
+ *
+ * On entry:
+ * a0: *skb
+ * a1: offset (imm or imm + X)
+ *
+ * All non-BPF-ABI registers are free for use. On return, we only
+ * care about r_ret. The BPF-ABI registers are assumed to remain
+ * unmodified during the entire filter operation.
+ */
+
+#define skb	a0
+#define offset	a1
+#define SKF_LL_OFF  (-0x200000) /* Can't include linux/filter.h in assembly */
+
+	/* We know better :) so prevent assembler reordering etc */
+	.set 	noreorder
+
+#define is_offset_negative(TYPE)				\
+	/* If offset is negative we have more work to do */	\
+	slti	t0, offset, 0;					\
+	bgtz	t0, bpf_slow_path_##TYPE##_neg;			\
+	/* Be careful what follows in DS. */
+
+#define is_offset_in_header(SIZE, TYPE)				\
+	/* Reading from header? */				\
+	addiu	$r_s0, $r_skb_hl, -SIZE;			\
+	slt	t0, $r_s0, offset;				\
+	bgtz	t0, bpf_slow_path_##TYPE;			\
+
+LEAF(sk_load_word)
+	is_offset_negative(word)
+FEXPORT(sk_load_word_positive)
+	is_offset_in_header(4, word)
+	/* Offset within header boundaries */
+	PTR_ADDU t1, $r_skb_data, offset
+	.set	reorder
+	lw	$r_A, 0(t1)
+	.set	noreorder
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+	wsbh	t0, $r_A
+	rotr	$r_A, t0, 16
+# else
+	sll	t0, $r_A, 24
+	srl	t1, $r_A, 24
+	srl	t2, $r_A, 8
+	or	t0, t0, t1
+	andi	t2, t2, 0xff00
+	andi	t1, $r_A, 0xff00
+	or	t0, t0, t2
+	sll	t1, t1, 8
+	or	$r_A, t0, t1
+# endif
+#endif
+	jr	$r_ra
+	 move	$r_ret, zero
+	END(sk_load_word)
+
+LEAF(sk_load_half)
+	is_offset_negative(half)
+FEXPORT(sk_load_half_positive)
+	is_offset_in_header(2, half)
+	/* Offset within header boundaries */
+	PTR_ADDU t1, $r_skb_data, offset
+	lhu	$r_A, 0(t1)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+	wsbh	$r_A, $r_A
+# else
+	sll	t0, $r_A, 8
+	srl	t1, $r_A, 8
+	andi	t0, t0, 0xff00
+	or	$r_A, t0, t1
+# endif
+#endif
+	jr	$r_ra
+	 move	$r_ret, zero
+	END(sk_load_half)
+
+LEAF(sk_load_byte)
+	is_offset_negative(byte)
+FEXPORT(sk_load_byte_positive)
+	is_offset_in_header(1, byte)
+	/* Offset within header boundaries */
+	PTR_ADDU t1, $r_skb_data, offset
+	lbu	$r_A, 0(t1)
+	jr	$r_ra
+	 move	$r_ret, zero
+	END(sk_load_byte)
+
+/*
+ * call skb_copy_bits:
+ * (prototype in linux/skbuff.h)
+ *
+ * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
+ *
+ * o32 mandates we leave 4 spaces for argument registers in case
+ * the callee needs to use them. Even though we don't care about
+ * the argument registers ourselves, we need to allocate that space
+ * to remain ABI compliant since the callee may want to use that space.
+ * We also allocate 2 more spaces for $r_ra and our return register (*to).
+ *
+ * n64 is a bit different. The *caller* will allocate the space to preserve
+ * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
+ * good reason but it does not matter that much really.
+ *
+ * (void *to) is returned in r_s0
+ *
+ */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define DS_OFFSET(SIZE) (4 * SZREG)
+#else
+#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
+#endif
+#define bpf_slow_path_common(SIZE)				\
+	/* Quick check. Are we within reasonable boundaries? */ \
+	LONG_ADDIU	$r_s1, $r_skb_len, -SIZE;		\
+	sltu		$r_s0, offset, $r_s1;			\
+	beqz		$r_s0, fault;				\
+	/* Load 4th argument in DS */				\
+	 LONG_ADDIU	a3, zero, SIZE;				\
+	PTR_ADDIU	$r_sp, $r_sp, -(6 * SZREG);		\
+	PTR_LA		t0, skb_copy_bits;			\
+	PTR_S		$r_ra, (5 * SZREG)($r_sp);		\
+	/* Assign low slot to a2 */				\
+	PTR_ADDIU	a2, $r_sp, DS_OFFSET(SIZE);		\
+	jalr		t0;					\
+	/* Reset our destination slot (DS but it's ok) */	\
+	 INT_S		zero, (4 * SZREG)($r_sp);		\
+	/*							\
+	 * skb_copy_bits returns 0 on success and -EFAULT	\
+	 * on error. Our data live in a2. Do not bother with	\
+	 * our data if an error has been returned.		\
+	 */							\
+	/* Restore our frame */					\
+	PTR_L		$r_ra, (5 * SZREG)($r_sp);		\
+	INT_L		$r_s0, (4 * SZREG)($r_sp);		\
+	bltz		v0, fault;				\
+	 PTR_ADDIU	$r_sp, $r_sp, 6 * SZREG;		\
+	move		$r_ret, zero;				\
+
+NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
+	bpf_slow_path_common(4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+	wsbh	t0, $r_s0
+	jr	$r_ra
+	 rotr	$r_A, t0, 16
+# else
+	sll	t0, $r_s0, 24
+	srl	t1, $r_s0, 24
+	srl	t2, $r_s0, 8
+	or	t0, t0, t1
+	andi	t2, t2, 0xff00
+	andi	t1, $r_s0, 0xff00
+	or	t0, t0, t2
+	sll	t1, t1, 8
+	jr	$r_ra
+	 or	$r_A, t0, t1
+# endif
+#else
+	jr	$r_ra
+	 move	$r_A, $r_s0
+#endif
+
+	END(bpf_slow_path_word)
+
+NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
+	bpf_slow_path_common(2)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+	jr	$r_ra
+	 wsbh	$r_A, $r_s0
+# else
+	sll	t0, $r_s0, 8
+	andi	t1, $r_s0, 0xff00
+	andi	t0, t0, 0xff00
+	srl	t1, t1, 8
+	jr	$r_ra
+	 or	$r_A, t0, t1
+# endif
+#else
+	jr	$r_ra
+	 move	$r_A, $r_s0
+#endif
+
+	END(bpf_slow_path_half)
+
+NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
+	bpf_slow_path_common(1)
+	jr	$r_ra
+	 move	$r_A, $r_s0
+
+	END(bpf_slow_path_byte)
+
+/*
+ * Negative entry points
+ */
+	.macro bpf_is_end_of_data
+	li	t0, SKF_LL_OFF
+	/* Reading link layer data? */
+	slt	t1, offset, t0
+	bgtz	t1, fault
+	/* Be careful what follows in DS. */
+	.endm
+/*
+ * call skb_copy_bits:
+ * (prototype in linux/filter.h)
+ *
+ * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
+ *                                            int k, unsigned int size)
+ *
+ * see above (bpf_slow_path_common) for ABI restrictions
+ */
+#define bpf_negative_common(SIZE)					\
+	PTR_ADDIU	$r_sp, $r_sp, -(6 * SZREG);			\
+	PTR_LA		t0, bpf_internal_load_pointer_neg_helper;	\
+	PTR_S		$r_ra, (5 * SZREG)($r_sp);			\
+	jalr		t0;						\
+	 li		a2, SIZE;					\
+	PTR_L		$r_ra, (5 * SZREG)($r_sp);			\
+	/* Check return pointer */					\
+	beqz		v0, fault;					\
+	 PTR_ADDIU	$r_sp, $r_sp, 6 * SZREG;			\
+	/* Preserve our pointer */					\
+	move		$r_s0, v0;					\
+	/* Set return value */						\
+	move		$r_ret, zero;					\
+
+bpf_slow_path_word_neg:
+	bpf_is_end_of_data
+NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
+	bpf_negative_common(4)
+	jr	$r_ra
+	 lw	$r_A, 0($r_s0)
+	END(sk_load_word_negative)
+
+bpf_slow_path_half_neg:
+	bpf_is_end_of_data
+NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
+	bpf_negative_common(2)
+	jr	$r_ra
+	 lhu	$r_A, 0($r_s0)
+	END(sk_load_half_negative)
+
+bpf_slow_path_byte_neg:
+	bpf_is_end_of_data
+NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
+	bpf_negative_common(1)
+	jr	$r_ra
+	 lbu	$r_A, 0($r_s0)
+	END(sk_load_byte_negative)
+
+fault:
+	jr	$r_ra
+	 addiu $r_ret, zero, 1
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 8e26b12..d85cbf8 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -10,7 +10,7 @@
 #include <asm/sn/addrs.h>
 #include <asm/sn/types.h>
 #include <asm/sn/klconfig.h>
-#include <asm/sn/hub.h>
+#include <asm/sn/agent.h>
 #include <asm/sn/ioc3.h>
 #include <asm/pci/bridge.h>
 
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
index 5c1a196..3b2552f 100644
--- a/arch/mips/pci/pci-xtalk-bridge.c
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -437,17 +437,28 @@ static int bridge_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 	struct irq_alloc_info info;
 	int irq;
 
-	irq = bc->pci_int[slot];
+	switch (pin) {
+	case PCI_INTERRUPT_UNKNOWN:
+	case PCI_INTERRUPT_INTA:
+	case PCI_INTERRUPT_INTC:
+		pin = 0;
+		break;
+	case PCI_INTERRUPT_INTB:
+	case PCI_INTERRUPT_INTD:
+		pin = 1;
+	}
+
+	irq = bc->pci_int[slot][pin];
 	if (irq == -1) {
 		info.ctrl = bc;
 		info.nasid = bc->nasid;
-		info.pin = slot;
+		info.pin = bc->int_mapping[slot][pin];
 
 		irq = irq_domain_alloc_irqs(bc->domain, 1, bc->nasid, &info);
 		if (irq < 0)
 			return irq;
 
-		bc->pci_int[slot] = irq;
+		bc->pci_int[slot][pin] = irq;
 	}
 	return irq;
 }
@@ -458,21 +469,26 @@ static void bridge_setup_ip27_baseio6g(struct bridge_controller *bc)
 {
 	bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO6G);
 	bc->ioc3_sid[6] = IOC3_SID(IOC3_SUBSYS_IP27_MIO);
+	bc->int_mapping[2][1] = 4;
+	bc->int_mapping[6][1] = 6;
 }
 
 static void bridge_setup_ip27_baseio(struct bridge_controller *bc)
 {
 	bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO);
+	bc->int_mapping[2][1] = 4;
 }
 
 static void bridge_setup_ip29_baseio(struct bridge_controller *bc)
 {
 	bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP29_SYSBOARD);
+	bc->int_mapping[2][1] = 3;
 }
 
 static void bridge_setup_ip30_sysboard(struct bridge_controller *bc)
 {
 	bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP30_SYSBOARD);
+	bc->int_mapping[2][1] = 4;
 }
 
 static void bridge_setup_menet(struct bridge_controller *bc)
@@ -483,6 +499,26 @@ static void bridge_setup_menet(struct bridge_controller *bc)
 	bc->ioc3_sid[3] = IOC3_SID(IOC3_SUBSYS_MENET4);
 }
 
+static void bridge_setup_io7(struct bridge_controller *bc)
+{
+	bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IO7);
+}
+
+static void bridge_setup_io8(struct bridge_controller *bc)
+{
+	bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IO8);
+}
+
+static void bridge_setup_io9(struct bridge_controller *bc)
+{
+	bc->ioc3_sid[1] = IOC3_SID(IOC3_SUBSYS_IO9);
+}
+
+static void bridge_setup_ip34_fuel_sysboard(struct bridge_controller *bc)
+{
+	bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IP34_SYSBOARD);
+}
+
 #define BRIDGE_BOARD_SETUP(_partno, _setup)	\
 	{ .match = _partno, .setup = _setup }
 
@@ -500,6 +536,10 @@ static const struct {
 	BRIDGE_BOARD_SETUP("030-0887-", bridge_setup_ip30_sysboard),
 	BRIDGE_BOARD_SETUP("030-1467-", bridge_setup_ip30_sysboard),
 	BRIDGE_BOARD_SETUP("030-0873-", bridge_setup_menet),
+	BRIDGE_BOARD_SETUP("030-1557-", bridge_setup_io7),
+	BRIDGE_BOARD_SETUP("030-1673-", bridge_setup_io8),
+	BRIDGE_BOARD_SETUP("030-1771-", bridge_setup_io9),
+	BRIDGE_BOARD_SETUP("030-1707-", bridge_setup_ip34_fuel_sysboard),
 };
 
 static void bridge_setup_board(struct bridge_controller *bc, char *partnum)
@@ -655,7 +695,11 @@ static int bridge_probe(struct platform_device *pdev)
 
 	for (slot = 0; slot < 8; slot++) {
 		bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
-		bc->pci_int[slot] = -1;
+		bc->pci_int[slot][0] = -1;
+		bc->pci_int[slot][1] = -1;
+		/* default interrupt pin mapping */
+		bc->int_mapping[slot][0] = slot;
+		bc->int_mapping[slot][1] = slot ^ 4;
 	}
 	bridge_read(bc, b_wid_tflush);	  /* wait until Bridge PIO complete */
 
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
index 0ddeb31..bdf5380 100644
--- a/arch/mips/ralink/ill_acc.c
+++ b/arch/mips/ralink/ill_acc.c
@@ -67,11 +67,13 @@ static int __init ill_acc_of_setup(void)
 	irq = irq_of_parse_and_map(np, 0);
 	if (!irq) {
 		dev_err(&pdev->dev, "failed to get irq\n");
+		put_device(&pdev->dev);
 		return -EINVAL;
 	}
 
 	if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) {
 		dev_err(&pdev->dev, "failed to request irq\n");
+		put_device(&pdev->dev);
 		return -EINVAL;
 	}
 
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index 282b47c..de0768a 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -47,8 +47,9 @@ static struct device gio_bus = {
  * Used by a driver to check whether an of_device present in the
  * system is in its list of supported devices.
  */
-const struct gio_device_id *gio_match_device(const struct gio_device_id *match,
-		     const struct gio_device *dev)
+static const struct gio_device_id *
+gio_match_device(const struct gio_device_id *match,
+		 const struct gio_device *dev)
 {
 	const struct gio_device_id *ids;
 
@@ -58,7 +59,6 @@ const struct gio_device_id *gio_match_device(const struct gio_device_id *match,
 
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(gio_match_device);
 
 struct gio_device *gio_dev_get(struct gio_device *dev)
 {
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index 73ad29b..5a38ae6 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -16,8 +16,8 @@
 
 #include <asm/ptrace.h>
 #include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
 #include <asm/sn/arch.h>
-#include <asm/sn/sn0/hub.h>
 #include <asm/tlbdebug.h>
 #include <asm/traps.h>
 #include <linux/uaccess.h>
@@ -30,29 +30,31 @@ static void dump_hub_information(unsigned long errst0, unsigned long errst1)
 		{ "WERR", "Uncached Partial Write", "PWERR", "Write Timeout",
 		  NULL, NULL, NULL, NULL }
 	};
-	int wrb = errst1 & PI_ERR_ST1_WRBRRB_MASK;
+	union pi_err_stat0 st0;
+	union pi_err_stat1 st1;
 
-	if (!(errst0 & PI_ERR_ST0_VALID_MASK)) {
-		printk("Hub does not contain valid error information\n");
+	st0.pi_stat0_word = errst0;
+	st1.pi_stat1_word = errst1;
+
+	if (!st0.pi_stat0_fmt.s0_valid) {
+		pr_info("Hub does not contain valid error information\n");
 		return;
 	}
 
-
-	printk("Hub has valid error information:\n");
-	if (errst0 & PI_ERR_ST0_OVERRUN_MASK)
-		printk("Overrun is set.	 Error stack may contain additional "
+	pr_info("Hub has valid error information:\n");
+	if (st0.pi_stat0_fmt.s0_ovr_run)
+		pr_info("Overrun is set. Error stack may contain additional "
 		       "information.\n");
-	printk("Hub error address is %08lx\n",
-	       (errst0 & PI_ERR_ST0_ADDR_MASK) >> (PI_ERR_ST0_ADDR_SHFT - 3));
-	printk("Incoming message command 0x%lx\n",
-	       (errst0 & PI_ERR_ST0_CMD_MASK) >> PI_ERR_ST0_CMD_SHFT);
-	printk("Supplemental field of incoming message is 0x%lx\n",
-	       (errst0 & PI_ERR_ST0_SUPPL_MASK) >> PI_ERR_ST0_SUPPL_SHFT);
-	printk("T5 Rn (for RRB only) is 0x%lx\n",
-	       (errst0 & PI_ERR_ST0_REQNUM_MASK) >> PI_ERR_ST0_REQNUM_SHFT);
-	printk("Error type is %s\n", err_type[wrb]
-	       [(errst0 & PI_ERR_ST0_TYPE_MASK) >> PI_ERR_ST0_TYPE_SHFT]
-		? : "invalid");
+	pr_info("Hub error address is %08lx\n",
+		(unsigned long)st0.pi_stat0_fmt.s0_addr);
+	pr_info("Incoming message command 0x%lx\n",
+		(unsigned long)st0.pi_stat0_fmt.s0_cmd);
+	pr_info("Supplemental field of incoming message is 0x%lx\n",
+		(unsigned long)st0.pi_stat0_fmt.s0_supl);
+	pr_info("T5 Rn (for RRB only) is 0x%lx\n",
+		(unsigned long)st0.pi_stat0_fmt.s0_t5_req);
+	pr_info("Error type is %s\n", err_type[st1.pi_stat1_fmt.s1_rw_rb]
+	       [st0.pi_stat0_fmt.s0_err_type] ? : "invalid");
 }
 
 int ip27_be_handler(struct pt_regs *regs, int is_fixup)
diff --git a/arch/mips/sgi-ip27/ip27-common.h b/arch/mips/sgi-ip27/ip27-common.h
index 3ffbcf9..ed008a0 100644
--- a/arch/mips/sgi-ip27/ip27-common.h
+++ b/arch/mips/sgi-ip27/ip27-common.h
@@ -3,8 +3,18 @@
 #ifndef __IP27_COMMON_H
 #define __IP27_COMMON_H
 
-extern void ip27_reboot_setup(void);
+extern nasid_t master_nasid;
+
+extern void cpu_node_probe(void);
 extern void hub_rt_clock_event_init(void);
+extern void hub_rtc_init(nasid_t nasid);
+extern void install_cpu_nmi_handler(int slice);
+extern void install_ipi(void);
+extern void ip27_reboot_setup(void);
 extern const struct plat_smp_ops ip27_smp_ops;
+extern unsigned long node_getfirstfree(nasid_t nasid);
+extern void per_cpu_init(void);
+extern void replicate_kernel_text(void);
+extern void setup_replication_mask(void);
 
 #endif /* __IP27_COMMON_H */
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
index 5886bee..7737a88 100644
--- a/arch/mips/sgi-ip27/ip27-console.c
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -9,14 +9,15 @@
 #include <asm/page.h>
 #include <asm/setup.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/sn0/hub.h>
+#include <asm/sn/agent.h>
 #include <asm/sn/klconfig.h>
 #include <asm/sn/ioc3.h>
-#include <asm/sn/sn_private.h>
 
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 
+#include "ip27-common.h"
+
 #define IOC3_CLK	(22000000 / 3)
 #define IOC3_FLAGS	(0)
 
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index a538d0c..8352eb6 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -11,7 +11,9 @@
 #include <linux/mmzone.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/arch.h>
-#include <asm/sn/hub.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/io.h>
+#include <asm/xtalk/xtalk.h>
 
 
 static int force_fire_and_forget = 1;
@@ -82,7 +84,7 @@ unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
  */
 static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
 {
-	iprb_t prb;
+	union iprb_u prb;
 	int prb_offset;
 
 	/*
@@ -135,7 +137,7 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
 static void hub_set_piomode(nasid_t nasid)
 {
 	u64 ii_iowa;
-	hubii_wcr_t ii_wcr;
+	union hubii_wcr_u ii_wcr;
 	unsigned i;
 
 	ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index f597e1e..32bcb8d 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -19,23 +19,18 @@
 #include <asm/pgtable.h>
 #include <asm/sgialib.h>
 #include <asm/time.h>
+#include <asm/sn/agent.h>
 #include <asm/sn/types.h>
-#include <asm/sn/sn0/addrs.h>
-#include <asm/sn/sn0/hubni.h>
-#include <asm/sn/sn0/hubio.h>
 #include <asm/sn/klconfig.h>
 #include <asm/sn/ioc3.h>
 #include <asm/mipsregs.h>
 #include <asm/sn/gda.h>
-#include <asm/sn/hub.h>
 #include <asm/sn/intr.h>
 #include <asm/current.h>
 #include <asm/processor.h>
 #include <asm/mmu_context.h>
 #include <asm/thread_info.h>
 #include <asm/sn/launch.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/sn0/ip27.h>
 #include <asm/sn/mapped_kernel.h>
 
 #include "ip27-common.h"
@@ -77,18 +72,14 @@ static void per_hub_init(nasid_t nasid)
 void per_cpu_init(void)
 {
 	int cpu = smp_processor_id();
-	int slice = LOCAL_HUB_L(PI_CPU_NUM);
 	nasid_t nasid = get_nasid();
-	struct hub_data *hub = hub_data(nasid);
-
-	if (test_and_set_bit(slice, &hub->slice_map))
-		return;
 
 	clear_c0_status(ST0_IM);
 
 	per_hub_init(nasid);
 
-	cpu_time_init();
+	pr_info("CPU %d clock is %dMHz.\n", cpu, sn_cpu_info[cpu].p_speed);
+
 	install_ipi();
 
 	/* Install our NMI handler if symmon hasn't installed one. */
@@ -98,16 +89,6 @@ void per_cpu_init(void)
 	enable_percpu_irq(IP27_HUB_PEND1_IRQ, IRQ_TYPE_NONE);
 }
 
-/*
- * get_nasid() returns the physical node id number of the caller.
- */
-nasid_t
-get_nasid(void)
-{
-	return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
-			 >> NSRI_NODEID_SHFT);
-}
-
 void __init plat_mem_setup(void)
 {
 	u64 p, e, n_mode;
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index c72ae33..42df9fa 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -19,7 +19,6 @@
 #include <asm/sn/addrs.h>
 #include <asm/sn/agent.h>
 #include <asm/sn/arch.h>
-#include <asm/sn/hub.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/irq_alloc.h>
 
@@ -288,11 +287,9 @@ void __init arch_init_irq(void)
 	 * Mark these as reserved right away so they won't be used accidentally
 	 * later.
 	 */
-	for (i = 0; i <= BASE_PCI_IRQ; i++)
+	for (i = 0; i <= CPU_CALL_B_IRQ; i++)
 		set_bit(i, hub_irq_map);
 
-	set_bit(IP_PEND0_6_63, hub_irq_map);
-
 	for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
 		set_bit(i, hub_irq_map);
 
diff --git a/arch/mips/sgi-ip27/ip27-klconfig.c b/arch/mips/sgi-ip27/ip27-klconfig.c
index 6cb2160..81a1646 100644
--- a/arch/mips/sgi-ip27/ip27-klconfig.c
+++ b/arch/mips/sgi-ip27/ip27-klconfig.c
@@ -72,54 +72,3 @@ lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_type)
 	/* Didn't find it. */
 	return (lboard_t *)NULL;
 }
-
-klcpu_t *nasid_slice_to_cpuinfo(nasid_t nasid, int slice)
-{
-	lboard_t *brd;
-	klcpu_t *acpu;
-
-	if (!(brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27)))
-		return (klcpu_t *)NULL;
-
-	if (!(acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU)))
-		return (klcpu_t *)NULL;
-
-	do {
-		if ((acpu->cpu_info.physid) == slice)
-			return acpu;
-	} while ((acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
-								KLSTRUCT_CPU)));
-	return (klcpu_t *)NULL;
-}
-
-klcpu_t *sn_get_cpuinfo(cpuid_t cpu)
-{
-	nasid_t nasid;
-	int slice;
-	klcpu_t *acpu;
-
-	if (!(cpu < MAXCPUS)) {
-		printk("sn_get_cpuinfo: illegal cpuid 0x%lx\n", cpu);
-		return NULL;
-	}
-
-	nasid = cputonasid(cpu);
-	if (nasid  == INVALID_NASID)
-		return NULL;
-
-	for (slice = 0; slice < CPUS_PER_NODE; slice++) {
-		acpu = nasid_slice_to_cpuinfo(nasid, slice);
-		if (acpu && acpu->cpu_info.virtid == cpu)
-			return acpu;
-	}
-	return NULL;
-}
-
-int get_cpu_slice(cpuid_t cpu)
-{
-	klcpu_t *acpu;
-
-	if ((acpu = sn_get_cpuinfo(cpu)) == NULL)
-		return -1;
-	return acpu->cpu_info.physid;
-}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index ee1c6ff..abd7a84 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -16,11 +16,11 @@
 #include <asm/sn/types.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/gda.h>
-#include <asm/sn/hub.h>
 #include <asm/sn/mapped_kernel.h>
-#include <asm/sn/sn_private.h>
 
-static cpumask_t ktext_repmask;
+#include "ip27-common.h"
+
+static nodemask_t ktext_repmask;
 
 /*
  * XXX - This needs to be much smarter about where it puts copies of the
@@ -30,8 +30,8 @@ static cpumask_t ktext_repmask;
 void __init setup_replication_mask(void)
 {
 	/* Set only the master cnode's bit.  The master cnode is always 0. */
-	cpumask_clear(&ktext_repmask);
-	cpumask_set_cpu(0, &ktext_repmask);
+	nodes_clear(ktext_repmask);
+	node_set(0, ktext_repmask);
 
 #ifdef CONFIG_REPLICATE_KTEXT
 #ifndef CONFIG_MAPPED_KERNEL
@@ -44,7 +44,7 @@ void __init setup_replication_mask(void)
 			if (nasid == 0)
 				continue;
 			/* Advertise that we have a copy of the kernel */
-			cpumask_set_cpu(nasid, &ktext_repmask);
+			node_set(nasid, ktext_repmask);
 		}
 	}
 #endif
@@ -98,7 +98,7 @@ void __init replicate_kernel_text(void)
 			continue;
 
 		/* Check if this node should get a copy of the kernel */
-		if (cpumask_test_cpu(client_nasid, &ktext_repmask)) {
+		if (node_isset(client_nasid, ktext_repmask)) {
 			server_nasid = client_nasid;
 			copy_kernel(server_nasid);
 		}
@@ -122,7 +122,7 @@ unsigned long node_getfirstfree(nasid_t nasid)
 	loadbase += 16777216;
 #endif
 	offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
-	if ((nasid == 0) || (cpumask_test_cpu(nasid, &ktext_repmask)))
+	if ((nasid == 0) || (node_isset(nasid, ktext_repmask)))
 		return TO_NODE(nasid, offset) >> PAGE_SHIFT;
 	else
 		return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 563aad5..a45691e 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -25,10 +25,10 @@
 #include <asm/sections.h>
 
 #include <asm/sn/arch.h>
-#include <asm/sn/hub.h>
+#include <asm/sn/agent.h>
 #include <asm/sn/klconfig.h>
-#include <asm/sn/sn_private.h>
 
+#include "ip27-common.h"
 
 #define SLOT_PFNSHIFT		(SLOT_SHIFT - PAGE_SHIFT)
 #define PFN_NASIDSHFT		(NASID_SHFT - PAGE_SHIFT)
@@ -37,31 +37,18 @@ struct node_data *__node_data[MAX_NUMNODES];
 
 EXPORT_SYMBOL(__node_data);
 
-static int fine_mode;
-
-static int is_fine_dirmode(void)
+static u64 gen_region_mask(void)
 {
-	return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE;
-}
-
-static u64 get_region(nasid_t nasid)
-{
-	if (fine_mode)
-		return nasid >> NASID_TO_FINEREG_SHFT;
-	else
-		return nasid >> NASID_TO_COARSEREG_SHFT;
-}
-
-static u64 region_mask;
-
-static void gen_region_mask(u64 *region_mask)
-{
+	int region_shift;
+	u64 region_mask;
 	nasid_t nasid;
 
-	(*region_mask) = 0;
-	for_each_online_node(nasid) {
-		(*region_mask) |= 1ULL << get_region(nasid);
-	}
+	region_shift = get_region_shift();
+	region_mask = 0;
+	for_each_online_node(nasid)
+		region_mask |= BIT_ULL(nasid >> region_shift);
+
+	return region_mask;
 }
 
 #define rou_rflag	rou_flags
@@ -148,25 +135,25 @@ static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
 		} while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
 	}
 
+	if (nasid_a == nasid_b)
+		return LOCAL_DISTANCE;
+
+	if (router_a == router_b)
+		return LOCAL_DISTANCE + 1;
+
 	if (router_a == NULL) {
 		pr_info("node_distance: router_a NULL\n");
-		return -1;
+		return 255;
 	}
 	if (router_b == NULL) {
 		pr_info("node_distance: router_b NULL\n");
-		return -1;
+		return 255;
 	}
 
-	if (nasid_a == nasid_b)
-		return 0;
-
-	if (router_a == router_b)
-		return 1;
-
 	router_distance = 100;
 	router_recurse(router_a, router_b, 2);
 
-	return router_distance;
+	return LOCAL_DISTANCE + router_distance;
 }
 
 static void __init init_topology_matrix(void)
@@ -281,10 +268,10 @@ static unsigned long __init slot_psize_compute(nasid_t nasid, int slot)
 
 static void __init mlreset(void)
 {
+	u64 region_mask;
 	nasid_t nasid;
 
 	master_nasid = get_nasid();
-	fine_mode = is_fine_dirmode();
 
 	/*
 	 * Probe for all CPUs - this creates the cpumask and sets up the
@@ -297,7 +284,7 @@ static void __init mlreset(void)
 	init_topology_matrix();
 	dump_topology();
 
-	gen_region_mask(&region_mask);
+	region_mask = gen_region_mask();
 
 	setup_replication_mask();
 
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index daf3670..84889b5 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -9,7 +9,7 @@
 #include <asm/sn/addrs.h>
 #include <asm/sn/nmi.h>
 #include <asm/sn/arch.h>
-#include <asm/sn/sn0/hub.h>
+#include <asm/sn/agent.h>
 
 #if 0
 #define NODE_NUM_CPUS(n)	CNODE_NUM_CPUS(n)
@@ -17,6 +17,9 @@
 #define NODE_NUM_CPUS(n)	CPUS_PER_NODE
 #endif
 
+#define SEND_NMI(_nasid, _slice)	\
+	REMOTE_HUB_S((_nasid),  (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
+
 typedef unsigned long machreg_t;
 
 static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
index 74d0782..5ac5ad6 100644
--- a/arch/mips/sgi-ip27/ip27-reset.c
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -22,9 +22,9 @@
 #include <asm/reboot.h>
 #include <asm/sgialib.h>
 #include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/gda.h>
-#include <asm/sn/sn0/hub.h>
 
 #include "ip27-common.h"
 
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index faa0244..5d2652a 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -15,36 +15,22 @@
 #include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
+#include <asm/sn/agent.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/gda.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/klconfig.h>
 #include <asm/sn/launch.h>
 #include <asm/sn/mapped_kernel.h>
-#include <asm/sn/sn_private.h>
 #include <asm/sn/types.h>
-#include <asm/sn/sn0/hubpi.h>
-#include <asm/sn/sn0/hubio.h>
-#include <asm/sn/sn0/ip27.h>
 
 #include "ip27-common.h"
 
-/*
- * Takes as first input the PROM assigned cpu id, and the kernel
- * assigned cpu id as the second.
- */
-static void alloc_cpupda(nasid_t nasid, cpuid_t cpu, int cpunum)
+static int node_scan_cpus(nasid_t nasid, int highest)
 {
-	cputonasid(cpunum) = nasid;
-	cputoslice(cpunum) = get_cpu_slice(cpu);
-}
-
-static int do_cpumask(nasid_t nasid, int highest)
-{
-	static int tot_cpus_found = 0;
+	static int cpus_found;
 	lboard_t *brd;
 	klcpu_t *acpu;
-	int cpus_found = 0;
 	cpuid_t cpuid;
 
 	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
@@ -55,13 +41,15 @@ static int do_cpumask(nasid_t nasid, int highest)
 			cpuid = acpu->cpu_info.virtid;
 			/* Only let it join in if it's marked enabled */
 			if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
-			    (tot_cpus_found != NR_CPUS)) {
+			    (cpus_found != NR_CPUS)) {
 				if (cpuid > highest)
 					highest = cpuid;
 				set_cpu_possible(cpuid, true);
-				alloc_cpupda(nasid, cpuid, tot_cpus_found);
+				cputonasid(cpus_found) = nasid;
+				cputoslice(cpus_found) = acpu->cpu_info.physid;
+				sn_cpu_info[cpus_found].p_speed =
+							acpu->cpu_speed;
 				cpus_found++;
-				tot_cpus_found++;
 			}
 			acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
 								KLSTRUCT_CPU);
@@ -87,7 +75,7 @@ void cpu_node_probe(void)
 		if (nasid == INVALID_NASID)
 			break;
 		node_set_online(nasid);
-		highest = do_cpumask(nasid, highest);
+		highest = node_scan_cpus(nasid, highest);
 	}
 
 	printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
@@ -180,7 +168,8 @@ static void __init ip27_smp_setup(void)
 	/*
 	 * PROM sets up system, that boot cpu is always first CPU on nasid 0
 	 */
-	alloc_cpupda(0, 0, 0);
+	cputonasid(0) = 0;
+	cputoslice(0) = LOCAL_HUB_L(PI_CPU_NUM);
 }
 
 static void __init ip27_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 17302bb..61f3565 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -25,17 +25,14 @@
 #include <asm/sn/klconfig.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/sn0/ip27.h>
-#include <asm/sn/sn0/hub.h>
+#include <asm/sn/agent.h>
+
+#include "ip27-common.h"
 
 #define TICK_SIZE (tick_nsec / 1000)
 
 /* Includes for ioc3_init().  */
 #include <asm/sn/types.h>
-#include <asm/sn/sn0/addrs.h>
-#include <asm/sn/sn0/hubni.h>
-#include <asm/sn/sn0/hubio.h>
 #include <asm/pci/bridge.h>
 
 #include "ip27-common.h"
@@ -153,25 +150,6 @@ void __init plat_time_init(void)
 	hub_rt_clock_event_init();
 }
 
-void cpu_time_init(void)
-{
-	lboard_t *board;
-	klcpu_t *cpu;
-	int cpuid;
-
-	/* Don't use ARCS.  ARCS is fragile.  Klconfig is simple and sane.  */
-	board = find_lboard(KL_CONFIG_INFO(get_nasid()), KLTYPE_IP27);
-	if (!board)
-		panic("Can't find board info for myself.");
-
-	cpuid = LOCAL_HUB_L(PI_CPU_NUM) ? IP27_CPU0_INDEX : IP27_CPU1_INDEX;
-	cpu = (klcpu_t *) KLCF_COMP(board, cpuid);
-	if (!cpu)
-		panic("No information about myself?");
-
-	printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
-}
-
 void hub_rtc_init(nasid_t nasid)
 {
 
@@ -190,23 +168,3 @@ void hub_rtc_init(nasid_t nasid)
 		LOCAL_HUB_S(PI_RT_PEND_B, 0);
 	}
 }
-
-static int __init sgi_ip27_rtc_devinit(void)
-{
-	struct resource res;
-
-	memset(&res, 0, sizeof(res));
-	res.start = XPHYSADDR(KL_CONFIG_CH_CONS_INFO(master_nasid)->memory_base +
-			      IOC3_BYTEBUS_DEV0);
-	res.end = res.start + 32767;
-	res.flags = IORESOURCE_MEM;
-
-	return IS_ERR(platform_device_register_simple("rtc-m48t35", -1,
-						      &res, 1));
-}
-
-/*
- * kludge make this a device_initcall after ioc3 resource conflicts
- * are resolved
- */
-late_initcall(sgi_ip27_rtc_devinit);
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 5218b90..000ede1 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -15,7 +15,6 @@
 #include <asm/sn/addrs.h>
 #include <asm/sn/types.h>
 #include <asm/sn/klconfig.h>
-#include <asm/sn/hub.h>
 #include <asm/pci/bridge.h>
 #include <asm/xtalk/xtalk.h>
 
diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c
index d46655b..c2ffcb9 100644
--- a/arch/mips/sgi-ip30/ip30-irq.c
+++ b/arch/mips/sgi-ip30/ip30-irq.c
@@ -232,9 +232,10 @@ static void heart_domain_free(struct irq_domain *domain,
 		return;
 
 	irqd = irq_domain_get_irq_data(domain, virq);
-	clear_bit(irqd->hwirq, heart_irq_map);
-	if (irqd && irqd->chip_data)
+	if (irqd) {
+		clear_bit(irqd->hwirq, heart_irq_map);
 		kfree(irqd->chip_data);
+	}
 }
 
 static const struct irq_domain_ops heart_domain_ops = {
diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c
index b66b6b1..be57b832 100644
--- a/arch/mips/vdso/genvdso.c
+++ b/arch/mips/vdso/genvdso.c
@@ -251,6 +251,18 @@ int main(int argc, char **argv)
 	fprintf(out_file, "#include <linux/linkage.h>\n");
 	fprintf(out_file, "#include <linux/mm.h>\n");
 	fprintf(out_file, "#include <asm/vdso.h>\n");
+	fprintf(out_file, "static int vdso_mremap(\n");
+	fprintf(out_file, "	const struct vm_special_mapping *sm,\n");
+	fprintf(out_file, "	struct vm_area_struct *new_vma)\n");
+	fprintf(out_file, "{\n");
+	fprintf(out_file, "	unsigned long new_size =\n");
+	fprintf(out_file, "	new_vma->vm_end - new_vma->vm_start;\n");
+	fprintf(out_file, "	if (vdso_image.size != new_size)\n");
+	fprintf(out_file, "		return -EINVAL;\n");
+	fprintf(out_file, "	current->mm->context.vdso =\n");
+	fprintf(out_file, "	(void __user *)(new_vma->vm_start);\n");
+	fprintf(out_file, "	return 0;\n");
+	fprintf(out_file, "}\n");
 
 	/* Write out the stripped VDSO data. */
 	fprintf(out_file,
@@ -275,6 +287,7 @@ int main(int argc, char **argv)
 	fprintf(out_file, "\t.mapping = {\n");
 	fprintf(out_file, "\t\t.name = \"[vdso]\",\n");
 	fprintf(out_file, "\t\t.pages = vdso_pages,\n");
+	fprintf(out_file, "\t\t.mremap = vdso_mremap,\n");
 	fprintf(out_file, "\t},\n");
 
 	/* Calculate and write symbol offsets to <output file> */
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index 31d29d9..a066efb 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -317,11 +317,6 @@ void __init setup_arch(char **cmdline_p)
 
 	unflatten_and_copy_device_tree();
 
-	if(IS_ENABLED(CONFIG_VT)) {
-		if(IS_ENABLED(CONFIG_DUMMY_CONSOLE))
-			conswitchp = &dummy_con;
-	}
-
 	*cmdline_p = boot_command_line;
 	early_trap_init();
 }
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 4cf35b0..3c6e3c8 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -196,8 +196,4 @@ void __init setup_arch(char **cmdline_p)
 	 * get kmalloc into gear
 	 */
 	paging_init();
-
-#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
-#endif
 }
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index d668f5b..c0a774b 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -308,11 +308,6 @@ void __init setup_arch(char **cmdline_p)
 	/* paging_init() sets up the MMU and marks all pages as reserved */
 	paging_init();
 
-#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-	if (!conswitchp)
-		conswitchp = &dummy_con;
-#endif
-
 	*cmdline_p = boot_command_line;
 
 	printk(KERN_INFO "OpenRISC Linux -- http://openrisc.io\n");
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
deleted file mode 100644
index d3e3d94..0000000
--- a/arch/parisc/configs/712_defconfig
+++ /dev/null
@@ -1,181 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PA7100LC=y
-CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_GSC_LASI=y
-# CONFIG_PDC_CHASSIS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_DIAG=m
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-CONFIG_LLC2=m
-CONFIG_NET_PKTGEN=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_ATA_OVER_ETH=m
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_SCSI_LASI700=y
-CONFIG_SCSI_DEBUG=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_LASI_82596=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_KEYBOARD_HIL_OLD is not set
-CONFIG_MOUSE_SERIAL=m
-CONFIG_LEGACY_PTY_COUNT=64
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=17
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_SERIAL_MUX is not set
-CONFIG_PDC_CONSOLE=y
-CONFIG_PRINTER=m
-CONFIG_PPDEV=m
-# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_DUMMY_CONSOLE_COLUMNS=128
-CONFIG_DUMMY_CONSOLE_ROWS=48
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_HARMONY=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_HW is not set
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
deleted file mode 100644
index 3335734..0000000
--- a/arch/parisc/configs/a500_defconfig
+++ /dev/null
@@ -1,177 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PA8X00=y
-CONFIG_64BIT=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-# CONFIG_GSC is not set
-CONFIG_PCI=y
-CONFIG_PCI_LBA=y
-CONFIG_PCCARD=m
-# CONFIG_PCMCIA_LOAD_CIS is not set
-CONFIG_YENTA=m
-CONFIG_PD6729=m
-CONFIG_I82092=m
-# CONFIG_SUPERIO is not set
-# CONFIG_CHASSIS_LCD_LED is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
-CONFIG_LLC2=m
-CONFIG_NET_PKTGEN=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_DEBUG=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_CTL=m
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_3C589=m
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-CONFIG_ACENIC=m
-CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_PCNET32=m
-CONFIG_TIGON3=m
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_TULIP=y
-CONFIG_TULIP_MMIO=y
-CONFIG_PCMCIA_XIRCOM=m
-CONFIG_HP100=m
-CONFIG_E100=m
-CONFIG_E1000=m
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_PCMCIA_XIRC2PS=m
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_CS=m
-CONFIG_SERIAL_8250_NR_UARTS=17
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_PDC_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=y
-# CONFIG_HWMON is not set
-CONFIG_AGP=y
-CONFIG_AGP_PARISC=y
-# CONFIG_STI_CONSOLE is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V4=m
-CONFIG_NFSD=m
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_BLOWFISH=m
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
deleted file mode 100644
index 07fde5b..0000000
--- a/arch/parisc/configs/b180_defconfig
+++ /dev/null
@@ -1,97 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODVERSIONS=y
-CONFIG_PA7100LC=y
-CONFIG_HPPB=y
-CONFIG_IOMMU_CCIO=y
-CONFIG_GSC_LASI=y
-CONFIG_GSC_WAX=y
-CONFIG_EISA=y
-CONFIG_ISA=y
-CONFIG_PCI=y
-CONFIG_GSC_DINO=y
-# CONFIG_PDC_CHASSIS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LASI700=y
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
-CONFIG_SCSI_ZALON=y
-CONFIG_SCSI_NCR53C8XX_SYNC=40
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=y
-CONFIG_LASI_82596=y
-CONFIG_PPP=y
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_HIL_OLD is not set
-CONFIG_INPUT_MISC=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=13
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_PRINTER=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_HARMONY=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_UTF8=m
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_SECURITY=y
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
deleted file mode 100644
index 64d45a8..0000000
--- a/arch/parisc/configs/c3000_defconfig
+++ /dev/null
@@ -1,151 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PA8X00=y
-CONFIG_PREEMPT_VOLUNTARY=y
-# CONFIG_GSC is not set
-CONFIG_PCI=y
-CONFIG_PCI_LBA=y
-# CONFIG_PDC_CHASSIS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_DIAG is not set
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_NETFILTER=y
-CONFIG_NET_PKTGEN=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_NS87415=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
-CONFIG_SCSI_DEBUG=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_CTL=m
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_ACENIC=m
-CONFIG_TIGON3=m
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_TULIP=y
-CONFIG_TULIP_MMIO=y
-CONFIG_E100=m
-CONFIG_E1000=m
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_MOUSE_PS2 is not set
-CONFIG_SERIO=m
-CONFIG_SERIO_LIBPS2=m
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=13
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_AD1889=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_PRINTER=m
-CONFIG_USB_STORAGE=m
-CONFIG_USB_STORAGE_USBAT=m
-CONFIG_USB_STORAGE_SDDR09=m
-CONFIG_USB_STORAGE_SDDR55=m
-CONFIG_USB_STORAGE_JUMPSHOT=m
-CONFIG_USB_MDC800=m
-CONFIG_USB_MICROTEK=m
-CONFIG_USB_LEGOTOWER=m
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_XFS_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_MUTEXES=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_MD5=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
deleted file mode 100644
index db864b1..0000000
--- a/arch/parisc/configs/c8000_defconfig
+++ /dev/null
@@ -1,234 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_PA8X00=y
-CONFIG_64BIT=y
-CONFIG_SMP=y
-CONFIG_PREEMPT=y
-CONFIG_IOMMU_CCIO=y
-CONFIG_PCI=y
-CONFIG_PCI_LBA=y
-# CONFIG_SUPERIO is not set
-# CONFIG_CHASSIS_LCD_LED is not set
-# CONFIG_PDC_CHASSIS is not set
-# CONFIG_PDC_CHASSIS_WARN is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=m
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=m
-# CONFIG_IPV6 is not set
-CONFIG_IP_DCCP=m
-# CONFIG_IP_DCCP_CCID3 is not set
-CONFIG_TIPC=m
-CONFIG_LLC2=m
-CONFIG_DNS_RESOLVER=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=y
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_CDROM_PKTCDVD_WCACHE=y
-CONFIG_ATA_OVER_ETH=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_FC_ATTRS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-CONFIG_ISCSI_TCP=m
-CONFIG_ISCSI_BOOT_SYSFS=m
-CONFIG_ATA=y
-CONFIG_PATA_SIL680=y
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=y
-CONFIG_FUSION_SAS=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_NETCONSOLE=m
-CONFIG_TUN=y
-CONFIG_E1000=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_WLAN is not set
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_HIL_OLD is not set
-# CONFIG_KEYBOARD_HIL is not set
-# CONFIG_MOUSE_PS2 is not set
-CONFIG_INPUT_MISC=y
-CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_PARKBD=m
-CONFIG_SERIO_GSCPS2=m
-# CONFIG_HP_SDC is not set
-CONFIG_SERIO_PCIPS2=m
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIO_RAW=m
-CONFIG_SERIAL_8250=y
-# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=8
-CONFIG_SERIAL_8250_RUNTIME_UARTS=8
-CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_MUX is not set
-CONFIG_SERIAL_JSM=m
-CONFIG_PRINTER=y
-CONFIG_HW_RANDOM=y
-CONFIG_RAW_DRIVER=m
-CONFIG_PTP_1588_CLOCK=y
-CONFIG_SSB=m
-CONFIG_SSB_DRIVER_PCICORE=y
-CONFIG_AGP=y
-CONFIG_AGP_PARISC=y
-CONFIG_DRM=y
-CONFIG_DRM_RADEON=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_FOREIGN_ENDIAN=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-# CONFIG_FB_STI is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_GENERIC is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-# CONFIG_STI_CONSOLE is not set
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=m
-CONFIG_SND=m
-CONFIG_SND_VERBOSE_PRINTK=y
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_AD1889=m
-# CONFIG_SND_USB is not set
-# CONFIG_SND_GSC is not set
-CONFIG_USB=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_QUOTA=y
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_XATTR=y
-CONFIG_NFS_FS=m
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_BLOCK_EXT_DEVT=y
-CONFIG_LATENCYTOP=y
-CONFIG_KEYS=y
-# CONFIG_CRYPTO_HW is not set
-CONFIG_FONTS=y
diff --git a/arch/parisc/configs/defconfig b/arch/parisc/configs/defconfig
deleted file mode 100644
index 5b877ca..0000000
--- a/arch/parisc/configs/defconfig
+++ /dev/null
@@ -1,206 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PA7100LC=y
-CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_IOMMU_CCIO=y
-CONFIG_GSC_LASI=y
-CONFIG_GSC_WAX=y
-CONFIG_EISA=y
-CONFIG_PCI=y
-CONFIG_GSC_DINO=y
-CONFIG_PCI_LBA=y
-CONFIG_PCCARD=y
-CONFIG_YENTA=y
-CONFIG_PD6729=y
-CONFIG_I82092=y
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_DIAG=m
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_LLC2=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_PC_PCMCIA=m
-CONFIG_PARPORT_1284=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_NS87415=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LASI700=y
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_ZALON=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_MD_RAID10=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_ACENIC=y
-CONFIG_TIGON3=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=y
-CONFIG_LASI_82596=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_KEYBOARD_HIL_OLD is not set
-CONFIG_MOUSE_SERIAL=y
-CONFIG_LEGACY_PTY_COUNT=64
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_CS=y
-CONFIG_SERIAL_8250_NR_UARTS=17
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_PRINTER=m
-CONFIG_PPDEV=m
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_DUMMY_CONSOLE_COLUMNS=128
-CONFIG_DUMMY_CONSOLE_ROWS=48
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_AD1889=y
-CONFIG_SND_HARMONY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_TOPSEED=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_UHCI_HCD=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_VFAT_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=y
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_KEYS=y
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
-CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index c7a5726..61bac8f 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -10,23 +10,20 @@
 CONFIG_EXPERT=y
 CONFIG_PERF_EVENTS=y
 CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PA7100LC=y
 CONFIG_SMP=y
 CONFIG_HZ_100=y
 CONFIG_IOMMU_CCIO=y
 CONFIG_GSC_LASI=y
 CONFIG_GSC_WAX=y
-CONFIG_EISA=y
-CONFIG_PCI=y
 CONFIG_GSC_DINO=y
 CONFIG_PCI_LBA=y
-CONFIG_PCCARD=m
-CONFIG_YENTA=m
 # CONFIG_PDC_CHASSIS is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_UNUSED_SYMBOLS=y
+# CONFIG_BLK_DEV_BSG is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_NET=y
@@ -35,17 +32,15 @@
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
 CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
 CONFIG_INET_DIAG=m
 CONFIG_LLC2=m
 # CONFIG_WIRELESS is not set
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
@@ -88,7 +83,6 @@
 # CONFIG_NET_VENDOR_ALTEON is not set
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
@@ -97,8 +91,6 @@
 CONFIG_TULIP=y
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
-# CONFIG_NET_VENDOR_HP is not set
 CONFIG_LASI_82596=y
 # CONFIG_NET_VENDOR_MELLANOX is not set
 # CONFIG_NET_VENDOR_MICREL is not set
@@ -106,10 +98,9 @@
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
 # CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
 # CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SILAN is not set
 # CONFIG_NET_VENDOR_SIS is not set
@@ -142,7 +133,6 @@
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 # CONFIG_HWMON is not set
-CONFIG_AGP=y
 CONFIG_FB=y
 CONFIG_FB_FOREIGN_ENDIAN=y
 CONFIG_FB_MODE_HELPERS=y
@@ -230,63 +220,11 @@
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
 # CONFIG_CIFS_DEBUG is not set
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=y
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_LATENCYTOP=y
-CONFIG_LKDTM=m
-CONFIG_KEYS=y
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -302,3 +240,14 @@
 CONFIG_CRC_CCITT=m
 CONFIG_CRC_T10DIF=y
 CONFIG_FONTS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_LATENCYTOP=y
+CONFIG_LKDTM=m
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index d39e7f8..59561e0 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -17,27 +17,24 @@
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_COMPAT_BRK is not set
+CONFIG_PA8X00=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_HPPB=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_UNUSED_SYMBOLS=y
 CONFIG_BLK_DEV_INTEGRITY=y
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_PA8X00=y
-CONFIG_64BIT=y
-CONFIG_SMP=y
-# CONFIG_COMPACTION is not set
-CONFIG_HPPB=y
-CONFIG_IOMMU_CCIO=y
-CONFIG_GSC_LASI=y
-CONFIG_GSC_WAX=y
-CONFIG_PCI=y
-CONFIG_PCI_STUB=m
-CONFIG_PCI_IOV=y
-CONFIG_GSC_DINO=y
-CONFIG_PCI_LBA=y
 CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -47,18 +44,17 @@
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_NETFILTER=y
 # CONFIG_NETFILTER_ADVANCED is not set
 CONFIG_NETFILTER_NETLINK_LOG=y
 CONFIG_DCB=y
 # CONFIG_WIRELESS is not set
+CONFIG_PCI=y
+CONFIG_PCI_STUB=m
+CONFIG_PCI_IOV=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_LOOP=y
@@ -102,7 +98,6 @@
 # CONFIG_NET_VENDOR_ALTEON is not set
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
@@ -111,10 +106,8 @@
 CONFIG_TULIP=y
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
-CONFIG_HP100=m
-CONFIG_E1000=y
 CONFIG_LASI_82596=y
+CONFIG_E1000=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MELLANOX is not set
 # CONFIG_NET_VENDOR_MICREL is not set
@@ -124,9 +117,8 @@
 # CONFIG_NET_VENDOR_OKI is not set
 CONFIG_QLA3XXX=m
 CONFIG_QLCNIC=m
-CONFIG_QLGE=m
-# CONFIG_NET_VENDOR_REALTEK is not set
 # CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SILAN is not set
 # CONFIG_NET_VENDOR_SIS is not set
@@ -153,9 +145,6 @@
 CONFIG_STE10XP=m
 CONFIG_VITESSE_PHY=m
 CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_HIL_OLD is not set
@@ -208,34 +197,18 @@
 CONFIG_FB_MODE_HELPERS=y
 # CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
 CONFIG_HIDRAW=y
 CONFIG_HID_PID=y
 CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_MON=m
-CONFIG_USB_WUSB_CBAF=m
-CONFIG_USB_XHCI_HCD=m
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_ONESHOT=y
-CONFIG_LEDS_TRIGGER_DISK=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_UIO=y
 CONFIG_UIO_PDRV_GENIRQ=m
 CONFIG_UIO_AEC=m
 CONFIG_UIO_SERCOS3=m
 CONFIG_UIO_PCI_GENERIC=m
 CONFIG_STAGING=y
+CONFIG_QLGE=m
+CONFIG_HP100=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_SECURITY=y
@@ -272,14 +245,6 @@
 CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_ISO8859_2=m
 CONFIG_NLS_UTF8=m
-CONFIG_PRINTK_TIME=y
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_SCHED_DEBUG is not set
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
@@ -292,3 +257,10 @@
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_CCITT=m
 CONFIG_LIBCRC32C=y
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index e03e3c8..2f4f66a 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -173,23 +173,6 @@ struct compat_shmid64_ds {
 #define COMPAT_ELF_NGREG 80
 typedef compat_ulong_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
 
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
-	return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
-	return (u32)(unsigned long)uptr;
-}
-
 static __inline__ void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = &current->thread.regs;
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 53a21ce..e320bae 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -151,10 +151,6 @@ void __init setup_arch(char **cmdline_p)
 	dma_ops_init();
 #endif
 
-#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;	/* we use do_take_over_console() later ! */
-#endif
-
 	clear_sched_clock_stable();
 }
 
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 285ff51..52a15f5 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -433,3 +433,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 435	common	clone3				sys_clone3_wrapper
+437	common	openat2				sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 354cf06..5224fb3 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -351,7 +351,6 @@ static void __init map_pages(unsigned long start_vaddr,
 			     unsigned long start_paddr, unsigned long size,
 			     pgprot_t pgprot, int force)
 {
-	pgd_t *pg_dir;
 	pmd_t *pmd;
 	pte_t *pg_table;
 	unsigned long end_paddr;
@@ -372,62 +371,37 @@ static void __init map_pages(unsigned long start_vaddr,
 
 	end_paddr = start_paddr + size;
 
-	pg_dir = pgd_offset_k(start_vaddr);
-
-#if PTRS_PER_PMD == 1
-	start_pmd = 0;
-#else
+	/* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
 	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-#endif
 	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 
 	address = start_paddr;
 	vaddr = start_vaddr;
 	while (address < end_paddr) {
-#if PTRS_PER_PMD == 1
-		pmd = (pmd_t *)__pa(pg_dir);
-#else
-		pmd = (pmd_t *)pgd_address(*pg_dir);
+		pgd_t *pgd = pgd_offset_k(vaddr);
+		p4d_t *p4d = p4d_offset(pgd, vaddr);
+		pud_t *pud = pud_offset(p4d, vaddr);
 
-		/*
-		 * pmd is physical at this point
-		 */
-
-		if (!pmd) {
+#if CONFIG_PGTABLE_LEVELS == 3
+		if (pud_none(*pud)) {
 			pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
 					     PAGE_SIZE << PMD_ORDER);
 			if (!pmd)
 				panic("pmd allocation failed.\n");
-			pmd = (pmd_t *) __pa(pmd);
+			pud_populate(NULL, pud, pmd);
 		}
-
-		pud_populate(NULL, (pud_t *)pg_dir, __va(pmd));
 #endif
-		pg_dir++;
 
-		/* now change pmd to kernel virtual addresses */
-
-		pmd = (pmd_t *)__va(pmd) + start_pmd;
+		pmd = pmd_offset(pud, vaddr);
 		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
-
-			/*
-			 * pg_table is physical at this point
-			 */
-
-			pg_table = (pte_t *)pmd_address(*pmd);
-			if (!pg_table) {
-				pg_table = memblock_alloc(PAGE_SIZE,
-							  PAGE_SIZE);
+			if (pmd_none(*pmd)) {
+				pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 				if (!pg_table)
 					panic("page table allocation failed\n");
-				pg_table = (pte_t *) __pa(pg_table);
+				pmd_populate_kernel(NULL, pmd, pg_table);
 			}
 
-			pmd_populate_kernel(NULL, pmd, __va(pg_table));
-
-			/* now change pg_table to kernel virtual addresses */
-
-			pg_table = (pte_t *) __va(pg_table) + start_pte;
+			pg_table = pte_offset_kernel(pmd, vaddr);
 			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
 				pte_t pte;
 				pgprot_t prot;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e7c6070..497b7d0b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -1,10 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 source "arch/powerpc/platforms/Kconfig.cputype"
 
-config PPC32
-	bool
-	default y if !PPC64
-
 config 32BIT
 	bool
 	default y if PPC32
@@ -133,7 +129,7 @@
 	select ARCH_HAS_PTE_SPECIAL
 	select ARCH_HAS_MEMBARRIER_CALLBACKS
 	select ARCH_HAS_SCALED_CPUTIME		if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
-	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
+	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
 	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_HAS_UACCESS_FLUSHCACHE
 	select ARCH_HAS_UACCESS_MCSAFE		if PPC64
@@ -173,6 +169,7 @@
 	select HAVE_ARCH_HUGE_VMAP		if PPC_BOOK3S_64 && PPC_RADIX_MMU
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_KASAN			if PPC32
+	select HAVE_ARCH_KASAN_VMALLOC		if PPC32
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_MMAP_RND_BITS
 	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if COMPAT
@@ -222,9 +219,8 @@
 	select HAVE_HARDLOCKUP_DETECTOR_PERF	if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
-	select HAVE_RCU_TABLE_FREE		if SMP
-	select HAVE_RCU_TABLE_NO_INVALIDATE	if HAVE_RCU_TABLE_FREE
-	select HAVE_MMU_GATHER_PAGE_SIZE
+	select MMU_GATHER_RCU_TABLE_FREE
+	select MMU_GATHER_PAGE_SIZE
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RELIABLE_STACKTRACE		if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
 	select HAVE_SYSCALL_TRACEPOINTS
@@ -238,6 +234,7 @@
 	select NEED_DMA_MAP_STATE		if PPC64 || NOT_COHERENT_CACHE
 	select NEED_SG_DMA_LENGTH
 	select OF
+	select OF_DMA_DEFAULT_COHERENT		if !NOT_COHERENT_CACHE
 	select OF_EARLY_FLATTREE
 	select OLD_SIGACTION			if PPC32
 	select OLD_SIGSUSPEND
@@ -482,7 +479,7 @@
 config HOTPLUG_CPU
 	bool "Support for enabling/disabling CPUs"
 	depends on SMP && (PPC_PSERIES || \
-	PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
+		PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
 	help
 	  Say Y here to be able to disable and re-enable individual
 	  CPUs at runtime on SMP machines.
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 4e1d398..0b06383 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -371,7 +371,7 @@
 
 config PPC_DEBUG_WX
 	bool "Warn on W+X mappings at boot"
-	depends on PPC_PTDUMP
+	depends on PPC_PTDUMP && STRICT_KERNEL_RWX
 	help
 	  Generate a warning if any W+X mappings are found at boot.
 
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
index 134f12f..2268396 100644
--- a/arch/powerpc/Makefile.postlink
+++ b/arch/powerpc/Makefile.postlink
@@ -17,11 +17,11 @@
 quiet_cmd_relocs_check = CHKREL  $@
 ifdef CONFIG_PPC_BOOK3S_64
       cmd_relocs_check =						\
-	$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" ; \
+	$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" ; \
 	$(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@"
 else
       cmd_relocs_check =						\
-	$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@"
+	$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@"
 endif
 
 # `@true` prevents complaint when there is nothing to be done
diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c
index 1699e95..00c4d84 100644
--- a/arch/powerpc/boot/4xx.c
+++ b/arch/powerpc/boot/4xx.c
@@ -228,7 +228,7 @@ void ibm4xx_denali_fixup_memsize(void)
 		dpath = 8; /* 64 bits */
 
 	/* get address pins (rows) */
- 	val = SDRAM0_READ(DDR0_42);
+	val = SDRAM0_READ(DDR0_42);
 
 	row = DDR_GET_VAL(val, DDR_APIN, DDR_APIN_SHIFT);
 	if (row > max_row)
diff --git a/arch/powerpc/boot/dts/mgcoge.dts b/arch/powerpc/boot/dts/mgcoge.dts
index a2dd5f1..7de0689 100644
--- a/arch/powerpc/boot/dts/mgcoge.dts
+++ b/arch/powerpc/boot/dts/mgcoge.dts
@@ -224,7 +224,7 @@ spi@11aa0 {
 				reg = <0x11a80 0x40 0x89fc 0x2>;
 				interrupts = <2 8>;
 				interrupt-parent = <&PIC>;
-				gpios = < &cpm2_pio_d 19 0>;
+				cs-gpios = < &cpm2_pio_d 19 0>;
 				#address-cells = <1>;
 				#size-cells = <0>;
 				ds3106@1 {
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index b625718..ecebc27 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -249,7 +249,7 @@ spi@4c0 {
 			reg = <0x4c0 0x40>;
 			interrupts = <2>;
 			interrupt-parent = <&qeic>;
-			gpios = <&qe_pio_d 13 0>;
+			cs-gpios = <&qe_pio_d 13 0>;
 			mode = "cpu-qe";
 
 			mmc-slot@0 {
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index 1a8321a..33bbe58 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -200,7 +200,7 @@ spi@7000 {
 			interrupts = <59 2>;
 			interrupt-parent = <&mpic>;
 			mode = "cpu";
-			gpios = <&sdcsr_pio 7 0>;
+			cs-gpios = <&sdcsr_pio 7 0>;
 			sleep = <&pmc 0x00000800 0>;
 
 			mmc-slot@0 {
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index f0c8a07..7705a5c 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -59,7 +59,6 @@
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_EXAR is not set
-# CONFIG_NET_VENDOR_HP is not set
 CONFIG_IBM_EMAC=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MELLANOX is not set
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index ed02f12..22dc0da 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -10,8 +10,6 @@
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_AMIGA_PARTITION=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_EBONY is not set
 CONFIG_SAM440EP=y
 CONFIG_CMDLINE_BOOL=y
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index fdb11da..789622f 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -14,8 +14,6 @@
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_CHRP is not set
 CONFIG_PPC_MPC52xx=y
 CONFIG_PPC_MPC5200_SIMPLE=y
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index 648c6b3..24bf1bd 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -11,8 +11,6 @@
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_MSDOS_PARTITION is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
 CONFIG_PPC_83xx=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 510f7fd..f55e23c 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -9,7 +9,6 @@
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_PPC_ADDER875=y
 CONFIG_8xx_COPYBACK=y
 CONFIG_GEN_RTC=y
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 6e08d95..00d6996 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -6,7 +6,6 @@
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
 CONFIG_PPC_82xx=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 9c1bf60..0e2e5e8 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -11,7 +11,6 @@
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_PPC_EP88XC=y
 CONFIG_8xx_COPYBACK=y
 CONFIG_GEN_RTC=y
diff --git a/arch/powerpc/configs/guest.config b/arch/powerpc/configs/guest.config
index 8b8cd18..209f585 100644
--- a/arch/powerpc/configs/guest.config
+++ b/arch/powerpc/configs/guest.config
@@ -1,5 +1,4 @@
 CONFIG_VIRTIO_BLK=y
-CONFIG_VIRTIO_BLK_SCSI=y
 CONFIG_SCSI_VIRTIO=y
 CONFIG_VIRTIO_NET=y
 CONFIG_NET_FAILOVER=y
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index 6ce4f20..dcc8dcc 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -12,7 +12,6 @@
 CONFIG_EMBEDDED=y
 CONFIG_SLAB=y
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_PMAC is not set
 CONFIG_PPC_82xx=y
 CONFIG_MGCOGE=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index 1f3a045..e39346b 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -9,7 +9,6 @@
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PPC_CHRP is not set
 CONFIG_PPC_MPC512x=y
 CONFIG_MPC512x_LPBFIFO=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 0327a32..82a008c 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -11,7 +11,6 @@
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_8xx_COPYBACK=y
 CONFIG_GEN_RTC=y
 CONFIG_HZ_100=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 3284145..7174937 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -181,7 +181,6 @@
 CONFIG_MLX5_CORE_EN=y
 CONFIG_MLX5_CORE_IPOIB=y
 CONFIG_MYRI10GE=m
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_USB_NET_DRIVERS=m
 # CONFIG_WLAN is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index b250e6f..7e68cb2 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -189,7 +189,6 @@
 CONFIG_MYRI10GE=m
 CONFIG_S2IO=m
 CONFIG_PASEMI_MAC=y
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_SUNGEM=y
 CONFIG_GELIC_NET=m
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 7e28919..3e2f44f 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -507,7 +507,6 @@
 CONFIG_HAMACHI=m
 CONFIG_YELLOWFIN=m
 CONFIG_QLA3XXX=m
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_8139CP=m
 CONFIG_8139TOO=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 26126b4..6b68109 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -169,7 +169,6 @@
 CONFIG_I40E=m
 CONFIG_MLX4_EN=m
 CONFIG_MYRI10GE=m
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 069f67f..1b6bdad 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -1,8 +1,3 @@
-CONFIG_PPC64=y
-CONFIG_ALTIVEC=y
-CONFIG_VSX=y
-CONFIG_NR_CPUS=2048
-CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_KERNEL_XZ=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
@@ -28,17 +23,15 @@
 # CONFIG_AIO is not set
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
 CONFIG_SLAB_FREELIST_HARDENED=y
-CONFIG_JUMP_LABEL=y
-CONFIG_STRICT_KERNEL_RWX=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_SIG=y
-CONFIG_MODULE_SIG_FORCE=y
-CONFIG_MODULE_SIG_SHA512=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MQ_IOSCHED_DEADLINE is not set
-# CONFIG_MQ_IOSCHED_KYBER is not set
+CONFIG_PPC64=y
+CONFIG_ALTIVEC=y
+CONFIG_VSX=y
+CONFIG_NR_CPUS=2048
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_PANIC_TIMEOUT=30
 # CONFIG_PPC_VAS is not set
 # CONFIG_PPC_PSERIES is not set
 # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
@@ -46,17 +39,27 @@
 CONFIG_CPU_IDLE=y
 CONFIG_HZ_100=y
 CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
 CONFIG_PRESERVE_FA_DUMP=y
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_NUMA=y
-# CONFIG_COMPACTION is not set
-# CONFIG_MIGRATION is not set
 CONFIG_PPC_64K_PAGES=y
 CONFIG_SCHED_SMT=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=tty0 console=hvc0 ipr.fast_reboot=1 quiet"
 # CONFIG_SECCOMP is not set
 # CONFIG_PPC_MEM_KEYS is not set
+CONFIG_JUMP_LABEL=y
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -138,7 +141,6 @@
 CONFIG_BNX2X=m
 # CONFIG_NET_VENDOR_BROCADE is not set
 # CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CAVIUM is not set
 CONFIG_CHELSIO_T1=m
 # CONFIG_NET_VENDOR_CISCO is not set
@@ -147,7 +149,6 @@
 # CONFIG_NET_VENDOR_DLINK is not set
 CONFIG_BE2NET=m
 # CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 CONFIG_E1000=m
 CONFIG_E1000E=m
@@ -155,7 +156,6 @@
 CONFIG_IXGB=m
 CONFIG_IXGBE=m
 CONFIG_I40E=m
-CONFIG_S2IO=m
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 # CONFIG_MLX4_CORE_GEN2 is not set
@@ -166,12 +166,12 @@
 # CONFIG_NET_VENDOR_MICROSEMI is not set
 CONFIG_MYRI10GE=m
 # CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_S2IO=m
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
 # CONFIG_NET_VENDOR_PACKET_ENGINES is not set
-CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
 CONFIG_QED=m
 CONFIG_QEDE=m
@@ -238,7 +238,6 @@
 CONFIG_HID_EZKEY=y
 CONFIG_HID_ITE=y
 CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MONTEREY=y
 CONFIG_USB_HIDDEV=y
@@ -275,6 +274,18 @@
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
+CONFIG_ENCRYPTED_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_LOCKDOWN_LSM=y
+CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY=y
+# CONFIG_INTEGRITY is not set
+CONFIG_LSM="yama,loadpin,safesetid,integrity"
+# CONFIG_CRYPTO_HW is not set
 CONFIG_CRC16=y
 CONFIG_CRC_ITU_T=y
 CONFIG_LIBCRC32C=y
@@ -285,17 +296,20 @@
 # CONFIG_XZ_DEC_SPARC is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_PANIC_ON_OOPS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
 CONFIG_WQ_WATCHDOG=y
 # CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_DEBUG_CREDENTIALS=y
 # CONFIG_FTRACE is not set
-# CONFIG_RUNTIME_TESTING_MENU is not set
 CONFIG_XMON=y
-CONFIG_XMON_DEFAULT=y
-CONFIG_ENCRYPTED_KEYS=y
-# CONFIG_CRYPTO_ECHAINIV is not set
-# CONFIG_CRYPTO_HW is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 29b19ec..b964084 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -77,5 +77,4 @@
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
 CONFIG_CRC_T10DIF=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index ffed2b42..eda8bfb 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -14,7 +14,6 @@
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_TQM8XX=y
 CONFIG_8xx_COPYBACK=y
 # CONFIG_8xx_CPU15 is not set
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index a09595f..9a53e296 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -6,27 +6,28 @@
 
 #include <asm/machdep.h>
 
-static inline int arch_get_random_long(unsigned long *v)
+static inline bool __must_check arch_get_random_long(unsigned long *v)
 {
-	return 0;
+	return false;
 }
 
-static inline int arch_get_random_int(unsigned int *v)
+static inline bool __must_check arch_get_random_int(unsigned int *v)
 {
-	return 0;
+	return false;
 }
 
-static inline int arch_get_random_seed_long(unsigned long *v)
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 {
 	if (ppc_md.get_random_seed)
 		return ppc_md.get_random_seed(v);
 
-	return 0;
+	return false;
 }
-static inline int arch_get_random_seed_int(unsigned int *v)
+
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
 {
 	unsigned long val;
-	int rc;
+	bool rc;
 
 	rc = arch_get_random_seed_long(&val);
 	if (rc)
@@ -34,16 +35,6 @@ static inline int arch_get_random_seed_int(unsigned int *v)
 
 	return rc;
 }
-
-static inline int arch_has_random(void)
-{
-	return 0;
-}
-
-static inline int arch_has_random_seed(void)
-{
-	return !!ppc_md.get_random_seed;
-}
 #endif /* CONFIG_ARCH_RANDOM */
 
 #ifdef CONFIG_PPC_POWERNV
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index f9dc597..3c0ba22 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -102,41 +102,91 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
 	isync();	/* Context sync required after mtsrin() */
 }
 
-static inline void allow_user_access(void __user *to, const void __user *from, u32 size)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+					      u32 size, unsigned long dir)
 {
 	u32 addr, end;
 
-	if (__builtin_constant_p(to) && to == NULL)
+	BUILD_BUG_ON(!__builtin_constant_p(dir));
+	BUILD_BUG_ON(dir == KUAP_CURRENT);
+
+	if (!(dir & KUAP_WRITE))
 		return;
 
 	addr = (__force u32)to;
 
-	if (!addr || addr >= TASK_SIZE || !size)
+	if (unlikely(addr >= TASK_SIZE || !size))
 		return;
 
 	end = min(addr + size, TASK_SIZE);
+
 	current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
 	kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end);	/* Clear Ks */
 }
 
-static inline void prevent_user_access(void __user *to, const void __user *from, u32 size)
+static __always_inline void prevent_user_access(void __user *to, const void __user *from,
+						u32 size, unsigned long dir)
 {
-	u32 addr = (__force u32)to;
-	u32 end = min(addr + size, TASK_SIZE);
+	u32 addr, end;
 
-	if (!addr || addr >= TASK_SIZE || !size)
+	BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+	if (dir == KUAP_CURRENT) {
+		u32 kuap = current->thread.kuap;
+
+		if (unlikely(!kuap))
+			return;
+
+		addr = kuap & 0xf0000000;
+		end = kuap << 28;
+	} else if (dir & KUAP_WRITE) {
+		addr = (__force u32)to;
+		end = min(addr + size, TASK_SIZE);
+
+		if (unlikely(addr >= TASK_SIZE || !size))
+			return;
+	} else {
 		return;
+	}
 
 	current->thread.kuap = 0;
 	kuap_update_sr(mfsrin(addr) | SR_KS, addr, end);	/* set Ks */
 }
 
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline unsigned long prevent_user_access_return(void)
 {
+	unsigned long flags = current->thread.kuap;
+	unsigned long addr = flags & 0xf0000000;
+	unsigned long end = flags << 28;
+	void __user *to = (__force void __user *)addr;
+
+	if (flags)
+		prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+
+	return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+	unsigned long addr = flags & 0xf0000000;
+	unsigned long end = flags << 28;
+	void __user *to = (__force void __user *)addr;
+
+	if (flags)
+		allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+}
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+	unsigned long begin = regs->kuap & 0xf0000000;
+	unsigned long end = regs->kuap << 28;
+
 	if (!is_write)
 		return false;
 
-	return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
+	return WARN(address < begin || address >= end,
+		    "Bug: write fault blocked by segment registers !");
 }
 
 #endif /* CONFIG_PPC_KUAP */
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 9983177..dc5c039 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -49,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
 
 #define get_hugepd_cache_index(x)  (x)
 
-#ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 				    void *table, int shift)
 {
@@ -66,13 +65,6 @@ static inline void __tlb_remove_table(void *_table)
 
 	pgtable_free(table, shift);
 }
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb,
-				    void *table, int shift)
-{
-	pgtable_free(table, shift);
-}
-#endif
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 				  unsigned long address)
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 0796533..5b39c11 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -193,7 +193,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 #else
 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 #endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END	_ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
 #define VMALLOC_END	ioremap_bot
+#endif
 
 #ifndef __ASSEMBLY__
 #include <linux/sched.h>
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index f254de9..90dd3a3 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -63,6 +63,14 @@
  * because that would require an expensive read/modify write of the AMR.
  */
 
+static inline unsigned long get_kuap(void)
+{
+	if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
+		return 0;
+
+	return mfspr(SPRN_AMR);
+}
+
 static inline void set_kuap(unsigned long value)
 {
 	if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
@@ -77,25 +85,43 @@ static inline void set_kuap(unsigned long value)
 	isync();
 }
 
-static inline void allow_user_access(void __user *to, const void __user *from,
-				     unsigned long size)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+					      unsigned long size, unsigned long dir)
 {
 	// This is written so we can resolve to a single case at build time
-	if (__builtin_constant_p(to) && to == NULL)
+	BUILD_BUG_ON(!__builtin_constant_p(dir));
+	if (dir == KUAP_READ)
 		set_kuap(AMR_KUAP_BLOCK_WRITE);
-	else if (__builtin_constant_p(from) && from == NULL)
+	else if (dir == KUAP_WRITE)
 		set_kuap(AMR_KUAP_BLOCK_READ);
-	else
+	else if (dir == KUAP_READ_WRITE)
 		set_kuap(0);
+	else
+		BUILD_BUG();
 }
 
 static inline void prevent_user_access(void __user *to, const void __user *from,
-				       unsigned long size)
+				       unsigned long size, unsigned long dir)
 {
 	set_kuap(AMR_KUAP_BLOCKED);
 }
 
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline unsigned long prevent_user_access_return(void)
+{
+	unsigned long flags = get_kuap();
+
+	set_kuap(AMR_KUAP_BLOCKED);
+
+	return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+	set_kuap(flags);
+}
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 {
 	return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
 		    (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index f6968c8..a41e91b 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_list;
 extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
 extern void pmd_fragment_free(unsigned long *);
 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
-#ifdef CONFIG_SMP
 extern void __tlb_remove_table(void *_table);
-#endif
 void pte_frag_destroy(void *pte_frag);
 
 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index b01624e..201a69e 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1355,18 +1355,21 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
  * Like pmd_huge() and pmd_large(), but works regardless of config options
  */
 #define pmd_is_leaf pmd_is_leaf
+#define pmd_leaf pmd_is_leaf
 static inline bool pmd_is_leaf(pmd_t pmd)
 {
 	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
 }
 
 #define pud_is_leaf pud_is_leaf
+#define pud_leaf pud_is_leaf
 static inline bool pud_is_leaf(pud_t pud)
 {
 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
 }
 
 #define pgd_is_leaf pgd_is_leaf
+#define pgd_leaf pgd_is_leaf
 static inline bool pgd_is_leaf(pgd_t pgd)
 {
 	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 74d0db5..3e3cdfa 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -96,23 +96,6 @@ typedef u32		compat_sigset_word;
 
 #define COMPAT_OFF_T_MAX	0x7fffffff
 
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
-	return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
-	return (u32)(unsigned long)uptr;
-}
-
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = current->thread.regs;
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
index 4c24ea8..ce483b0f 100644
--- a/arch/powerpc/include/asm/cpm.h
+++ b/arch/powerpc/include/asm/cpm.h
@@ -1,171 +1 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CPM_H
-#define __CPM_H
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/of.h>
-#include <soc/fsl/qe/qe.h>
-
-/*
- * SPI Parameter RAM common to QE and CPM.
- */
-struct spi_pram {
-	__be16	rbase;	/* Rx Buffer descriptor base address */
-	__be16	tbase;	/* Tx Buffer descriptor base address */
-	u8	rfcr;	/* Rx function code */
-	u8	tfcr;	/* Tx function code */
-	__be16	mrblr;	/* Max receive buffer length */
-	__be32	rstate;	/* Internal */
-	__be32	rdp;	/* Internal */
-	__be16	rbptr;	/* Internal */
-	__be16	rbc;	/* Internal */
-	__be32	rxtmp;	/* Internal */
-	__be32	tstate;	/* Internal */
-	__be32	tdp;	/* Internal */
-	__be16	tbptr;	/* Internal */
-	__be16	tbc;	/* Internal */
-	__be32	txtmp;	/* Internal */
-	__be32	res;	/* Tx temp. */
-	__be16  rpbase;	/* Relocation pointer (CPM1 only) */
-	__be16	res1;	/* Reserved */
-};
-
-/*
- * USB Controller pram common to QE and CPM.
- */
-struct usb_ctlr {
-	u8	usb_usmod;
-	u8	usb_usadr;
-	u8	usb_uscom;
-	u8	res1[1];
-	__be16	usb_usep[4];
-	u8	res2[4];
-	__be16	usb_usber;
-	u8	res3[2];
-	__be16	usb_usbmr;
-	u8	res4[1];
-	u8	usb_usbs;
-	/* Fields down below are QE-only */
-	__be16	usb_ussft;
-	u8	res5[2];
-	__be16	usb_usfrn;
-	u8	res6[0x22];
-} __attribute__ ((packed));
-
-/*
- * Function code bits, usually generic to devices.
- */
-#ifdef CONFIG_CPM1
-#define CPMFCR_GBL	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
-#define CPMFCR_TC2	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
-#define CPMFCR_DTB	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
-#define CPMFCR_BDB	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
-#else
-#define CPMFCR_GBL	((u_char)0x20)	/* Set memory snooping */
-#define CPMFCR_TC2	((u_char)0x04)	/* Transfer code 2 value */
-#define CPMFCR_DTB	((u_char)0x02)	/* Use local bus for data when set */
-#define CPMFCR_BDB	((u_char)0x01)	/* Use local bus for BD when set */
-#endif
-#define CPMFCR_EB	((u_char)0x10)	/* Set big endian byte order */
-
-/* Opcodes common to CPM1 and CPM2
-*/
-#define CPM_CR_INIT_TRX		((ushort)0x0000)
-#define CPM_CR_INIT_RX		((ushort)0x0001)
-#define CPM_CR_INIT_TX		((ushort)0x0002)
-#define CPM_CR_HUNT_MODE	((ushort)0x0003)
-#define CPM_CR_STOP_TX		((ushort)0x0004)
-#define CPM_CR_GRA_STOP_TX	((ushort)0x0005)
-#define CPM_CR_RESTART_TX	((ushort)0x0006)
-#define CPM_CR_CLOSE_RX_BD	((ushort)0x0007)
-#define CPM_CR_SET_GADDR	((ushort)0x0008)
-#define CPM_CR_SET_TIMER	((ushort)0x0008)
-#define CPM_CR_STOP_IDMA	((ushort)0x000b)
-
-/* Buffer descriptors used by many of the CPM protocols. */
-typedef struct cpm_buf_desc {
-	ushort	cbd_sc;		/* Status and Control */
-	ushort	cbd_datlen;	/* Data length in buffer */
-	uint	cbd_bufaddr;	/* Buffer address in host memory */
-} cbd_t;
-
-/* Buffer descriptor control/status used by serial
- */
-
-#define BD_SC_EMPTY	(0x8000)	/* Receive is empty */
-#define BD_SC_READY	(0x8000)	/* Transmit is ready */
-#define BD_SC_WRAP	(0x2000)	/* Last buffer descriptor */
-#define BD_SC_INTRPT	(0x1000)	/* Interrupt on change */
-#define BD_SC_LAST	(0x0800)	/* Last buffer in frame */
-#define BD_SC_TC	(0x0400)	/* Transmit CRC */
-#define BD_SC_CM	(0x0200)	/* Continuous mode */
-#define BD_SC_ID	(0x0100)	/* Rec'd too many idles */
-#define BD_SC_P		(0x0100)	/* xmt preamble */
-#define BD_SC_BR	(0x0020)	/* Break received */
-#define BD_SC_FR	(0x0010)	/* Framing error */
-#define BD_SC_PR	(0x0008)	/* Parity error */
-#define BD_SC_NAK	(0x0004)	/* NAK - did not respond */
-#define BD_SC_OV	(0x0002)	/* Overrun */
-#define BD_SC_UN	(0x0002)	/* Underrun */
-#define BD_SC_CD	(0x0001)	/* */
-#define BD_SC_CL	(0x0001)	/* Collision */
-
-/* Buffer descriptor control/status used by Ethernet receive.
- * Common to SCC and FCC.
- */
-#define BD_ENET_RX_EMPTY	(0x8000)
-#define BD_ENET_RX_WRAP		(0x2000)
-#define BD_ENET_RX_INTR		(0x1000)
-#define BD_ENET_RX_LAST		(0x0800)
-#define BD_ENET_RX_FIRST	(0x0400)
-#define BD_ENET_RX_MISS		(0x0100)
-#define BD_ENET_RX_BC		(0x0080)	/* FCC Only */
-#define BD_ENET_RX_MC		(0x0040)	/* FCC Only */
-#define BD_ENET_RX_LG		(0x0020)
-#define BD_ENET_RX_NO		(0x0010)
-#define BD_ENET_RX_SH		(0x0008)
-#define BD_ENET_RX_CR		(0x0004)
-#define BD_ENET_RX_OV		(0x0002)
-#define BD_ENET_RX_CL		(0x0001)
-#define BD_ENET_RX_STATS	(0x01ff)	/* All status bits */
-
-/* Buffer descriptor control/status used by Ethernet transmit.
- * Common to SCC and FCC.
- */
-#define BD_ENET_TX_READY	(0x8000)
-#define BD_ENET_TX_PAD		(0x4000)
-#define BD_ENET_TX_WRAP		(0x2000)
-#define BD_ENET_TX_INTR		(0x1000)
-#define BD_ENET_TX_LAST		(0x0800)
-#define BD_ENET_TX_TC		(0x0400)
-#define BD_ENET_TX_DEF		(0x0200)
-#define BD_ENET_TX_HB		(0x0100)
-#define BD_ENET_TX_LC		(0x0080)
-#define BD_ENET_TX_RL		(0x0040)
-#define BD_ENET_TX_RCMASK	(0x003c)
-#define BD_ENET_TX_UN		(0x0002)
-#define BD_ENET_TX_CSL		(0x0001)
-#define BD_ENET_TX_STATS	(0x03ff)	/* All status bits */
-
-/* Buffer descriptor control/status used by Transparent mode SCC.
- */
-#define BD_SCC_TX_LAST		(0x0800)
-
-/* Buffer descriptor control/status used by I2C.
- */
-#define BD_I2C_START		(0x0400)
-
-#ifdef CONFIG_CPM
-int cpm_command(u32 command, u8 opcode);
-#else
-static inline int cpm_command(u32 command, u8 opcode)
-{
-	return -ENOSYS;
-}
-#endif /* CONFIG_CPM */
-
-int cpm2_gpiochip_add32(struct device *dev);
-
-#endif
+#include <soc/fsl/cpm.h>
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index cf00ff0..40a4d3c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -212,6 +212,7 @@ static inline void cpu_feature_keys_init(void) { }
 #define CPU_FTR_P9_TLBIE_STQ_BUG	LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_P9_TIDR			LONG_ASM_CONST(0x0000800000000000)
 #define CPU_FTR_P9_TLBIE_ERAT_BUG	LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_P9_RADIX_PREFETCH_BUG	LONG_ASM_CONST(0x0002000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -459,8 +460,10 @@ static inline void cpu_feature_keys_init(void) { }
 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
 	    CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
 	    CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
-#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
-#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
+#define CPU_FTRS_POWER9_DD2_0 (CPU_FTRS_POWER9 | CPU_FTR_P9_RADIX_PREFETCH_BUG)
+#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | \
+			       CPU_FTR_P9_RADIX_PREFETCH_BUG | \
+			       CPU_FTR_POWER9_DD2_1)
 #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
 			       CPU_FTR_P9_TM_HV_ASSIST | \
 			       CPU_FTR_P9_TM_XER_SO_BUG)
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index b3e214a..ca33f4e 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -33,7 +33,7 @@
 #define FW_FEATURE_LLAN		ASM_CONST(0x0000000000010000)
 #define FW_FEATURE_BULK_REMOVE	ASM_CONST(0x0000000000020000)
 #define FW_FEATURE_XDABR	ASM_CONST(0x0000000000040000)
-#define FW_FEATURE_MULTITCE	ASM_CONST(0x0000000000080000)
+#define FW_FEATURE_PUT_TCE_IND	ASM_CONST(0x0000000000080000)
 #define FW_FEATURE_SPLPAR	ASM_CONST(0x0000000000100000)
 #define FW_FEATURE_LPAR		ASM_CONST(0x0000000000400000)
 #define FW_FEATURE_PS3_LV1	ASM_CONST(0x0000000000800000)
@@ -51,6 +51,7 @@
 #define FW_FEATURE_BLOCK_REMOVE ASM_CONST(0x0000001000000000)
 #define FW_FEATURE_PAPR_SCM 	ASM_CONST(0x0000002000000000)
 #define FW_FEATURE_ULTRAVISOR	ASM_CONST(0x0000004000000000)
+#define FW_FEATURE_STUFF_TCE	ASM_CONST(0x0000008000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -63,7 +64,8 @@ enum {
 		FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
 		FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
 		FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
-		FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
+		FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE |
+		FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
 		FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
 		FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
 		FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN |
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index eea28ca..bc7d9d0 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -35,7 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
 {
 	int oldval = 0, ret;
 
-	allow_write_to_user(uaddr, sizeof(*uaddr));
+	allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
 	pagefault_disable();
 
 	switch (op) {
@@ -62,7 +62,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
 
 	*oval = oldval;
 
-	prevent_write_to_user(uaddr, sizeof(*uaddr));
+	prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
 	return ret;
 }
 
@@ -76,7 +76,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	if (!access_ok(uaddr, sizeof(u32)))
 		return -EFAULT;
 
-	allow_write_to_user(uaddr, sizeof(*uaddr));
+	allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
+
         __asm__ __volatile__ (
         PPC_ATOMIC_ENTRY_BARRIER
 "1:     lwarx   %1,0,%3         # futex_atomic_cmpxchg_inatomic\n\
@@ -97,7 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
         : "cc", "memory");
 
 	*uval = prev;
-	prevent_write_to_user(uaddr, sizeof(*uaddr));
+	prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
+
         return ret;
 }
 
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 13bd870..e90c073 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -350,6 +350,7 @@
 #define H_SVM_PAGE_OUT		0xEF04
 #define H_SVM_INIT_START	0xEF08
 #define H_SVM_INIT_DONE		0xEF0C
+#define H_SVM_INIT_ABORT	0xEF14
 
 /* Values for 2nd argument to H_SET_MODE */
 #define H_SET_MODE_RESOURCE_SET_CIABR		1
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 27ac6f5d..f2f8d8a 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -34,7 +34,11 @@ struct arch_hw_breakpoint {
 #define HW_BRK_TYPE_PRIV_ALL	(HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
 				 HW_BRK_TYPE_HYP)
 
+#ifdef CONFIG_PPC_8xx
+#define HW_BREAKPOINT_ALIGN 0x3
+#else
 #define HW_BREAKPOINT_ALIGN 0x7
+#endif
 
 #define DABR_MAX_LEN	8
 #define DAWR_MAX_LEN	512
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 296e51c..fbff9ff 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -31,9 +31,11 @@
 void kasan_early_init(void);
 void kasan_mmu_init(void);
 void kasan_init(void);
+void kasan_late_init(void);
 #else
 static inline void kasan_init(void) { }
 static inline void kasan_mmu_init(void) { }
+static inline void kasan_late_init(void) { }
 #endif
 
 #endif /* __ASSEMBLY */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 5b5e396..92bcd1a 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -2,6 +2,16 @@
 #ifndef _ASM_POWERPC_KUP_H_
 #define _ASM_POWERPC_KUP_H_
 
+#define KUAP_READ	1
+#define KUAP_WRITE	2
+#define KUAP_READ_WRITE	(KUAP_READ | KUAP_WRITE)
+/*
+ * For prevent_user_access() only.
+ * Use the current saved situation instead of the to/from/size params.
+ * Used on book3s/32
+ */
+#define KUAP_CURRENT	4
+
 #ifdef CONFIG_PPC64
 #include <asm/book3s/64/kup-radix.h>
 #endif
@@ -42,32 +52,55 @@ void setup_kuap(bool disabled);
 #else
 static inline void setup_kuap(bool disabled) { }
 static inline void allow_user_access(void __user *to, const void __user *from,
-				     unsigned long size) { }
+				     unsigned long size, unsigned long dir) { }
 static inline void prevent_user_access(void __user *to, const void __user *from,
-				       unsigned long size) { }
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) { return false; }
+				       unsigned long size, unsigned long dir) { }
+static inline unsigned long prevent_user_access_return(void) { return 0UL; }
+static inline void restore_user_access(unsigned long flags) { }
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+	return false;
+}
 #endif /* CONFIG_PPC_KUAP */
 
 static inline void allow_read_from_user(const void __user *from, unsigned long size)
 {
-	allow_user_access(NULL, from, size);
+	allow_user_access(NULL, from, size, KUAP_READ);
 }
 
 static inline void allow_write_to_user(void __user *to, unsigned long size)
 {
-	allow_user_access(to, NULL, size);
+	allow_user_access(to, NULL, size, KUAP_WRITE);
+}
+
+static inline void allow_read_write_user(void __user *to, const void __user *from,
+					 unsigned long size)
+{
+	allow_user_access(to, from, size, KUAP_READ_WRITE);
 }
 
 static inline void prevent_read_from_user(const void __user *from, unsigned long size)
 {
-	prevent_user_access(NULL, from, size);
+	prevent_user_access(NULL, from, size, KUAP_READ);
 }
 
 static inline void prevent_write_to_user(void __user *to, unsigned long size)
 {
-	prevent_user_access(to, NULL, size);
+	prevent_user_access(to, NULL, size, KUAP_WRITE);
+}
+
+static inline void prevent_read_write_user(void __user *to, const void __user *from,
+					   unsigned long size)
+{
+	prevent_user_access(to, from, size, KUAP_READ_WRITE);
+}
+
+static inline void prevent_current_access_user(void)
+{
+	prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
 }
 
 #endif /* !__ASSEMBLY__ */
 
-#endif /* _ASM_POWERPC_KUP_H_ */
+#endif /* _ASM_POWERPC_KUAP_H_ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
index 50204e2..5a9834e 100644
--- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h
+++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
@@ -19,8 +19,9 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
+unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
-			     struct kvm *kvm);
+			     struct kvm *kvm, bool skip_page_out);
 #else
 static inline int kvmppc_uvmem_init(void)
 {
@@ -62,6 +63,11 @@ static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
 	return H_UNSUPPORTED;
 }
 
+static inline unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
+{
+	return H_UNSUPPORTED;
+}
+
 static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
 {
 	return -EFAULT;
@@ -69,6 +75,6 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
 
 static inline void
 kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
-			struct kvm *kvm) { }
+			struct kvm *kvm, bool skip_page_out) { }
 #endif /* CONFIG_PPC_UV */
 #endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 0a398f2..6e8b8ff 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -278,6 +278,7 @@ struct kvm_resize_hpt;
 /* Flag values for kvm_arch.secure_guest */
 #define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */
 #define KVMPPC_SECURE_INIT_DONE  0x2 /* H_SVM_INIT_DONE completed */
+#define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
 
 struct kvm_arch {
 	unsigned int lpid;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 3d2f871..bc2494e 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -119,8 +119,7 @@ extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
 			struct kvmppc_pte *pte);
 
-extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
-                                                unsigned int id);
+extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
 extern int kvmppc_core_check_processor_compat(void);
@@ -274,7 +273,7 @@ struct kvmppc_ops {
 	void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
-	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
+	int (*vcpu_create)(struct kvm_vcpu *vcpu);
 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
 	int (*check_requests)(struct kvm_vcpu *vcpu);
 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 58efca9..360367c 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -238,11 +238,6 @@ static inline void arch_unmap(struct mm_struct *mm,
 		mm->context.vdso_base = 0;
 }
 
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
-				     struct vm_area_struct *vma)
-{
-}
-
 #ifdef CONFIG_PPC_MEM_KEYS
 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
 			       bool execute, bool foreign);
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 1006a42..85ed239 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -35,18 +35,33 @@
 #include <asm/reg.h>
 
 static inline void allow_user_access(void __user *to, const void __user *from,
-				     unsigned long size)
+				     unsigned long size, unsigned long dir)
 {
 	mtspr(SPRN_MD_AP, MD_APG_INIT);
 }
 
 static inline void prevent_user_access(void __user *to, const void __user *from,
-				       unsigned long size)
+				       unsigned long size, unsigned long dir)
 {
 	mtspr(SPRN_MD_AP, MD_APG_KUAP);
 }
 
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline unsigned long prevent_user_access_return(void)
+{
+	unsigned long flags = mfspr(SPRN_MD_AP);
+
+	mtspr(SPRN_MD_AP, MD_APG_KUAP);
+
+	return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+	mtspr(SPRN_MD_AP, flags);
+}
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 {
 	return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
 		    "Bug: fault blocked by AP register !");
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 552b96e..60c4d82 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -114,7 +114,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 #else
 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 #endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END	_ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
 #define VMALLOC_END	ioremap_bot
+#endif
 
 /*
  * Bits in a linux-style PTE.  These match the bits in the
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
index 332b13b..29c4366 100644
--- a/arch/powerpc/include/asm/nohash/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -46,7 +46,6 @@ static inline void pgtable_free(void *table, int shift)
 
 #define get_hugepd_cache_index(x)	(x)
 
-#ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
 {
 	unsigned long pgf = (unsigned long)table;
@@ -64,13 +63,6 @@ static inline void __tlb_remove_table(void *_table)
 	pgtable_free(table, shift);
 }
 
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
-{
-	pgtable_free(table, shift);
-}
-#endif
-
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 				  unsigned long address)
 {
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 7f1fd41..8633208 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -209,7 +209,7 @@ static inline bool pfn_valid(unsigned long pfn)
  */
 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
-#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
+#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
 #else
 #ifdef CONFIG_PPC64
 /*
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index ea6ec65..69f4cb3 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -223,12 +223,15 @@ struct pci_dn {
 extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
 					   int devfn);
 extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
-extern struct pci_dn *add_dev_pci_data(struct pci_dev *pdev);
-extern void remove_dev_pci_data(struct pci_dev *pdev);
 extern struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
 					       struct device_node *dn);
 extern void pci_remove_device_node_info(struct device_node *dn);
 
+#ifdef CONFIG_PCI_IOV
+struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev);
+void remove_sriov_vf_pdns(struct pci_dev *pdev);
+#endif
+
 static inline int pci_device_from_OF_node(struct device_node *np,
 					  u8 *bus, u8 *devfn)
 {
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 327567b..63ed7e3 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -113,7 +113,6 @@ extern pgprot_t	pci_phys_mem_access_prot(struct file *file,
 					 pgprot_t prot);
 
 extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
-extern void pcibios_setup_bus_devices(struct pci_bus *bus);
 extern void pcibios_setup_bus_self(struct pci_bus *bus);
 extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
 extern void pcibios_scan_phb(struct pci_controller *hose);
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 0e4ec8c..8cc543e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -94,12 +94,6 @@ void mark_initmem_nx(void);
 static inline void mark_initmem_nx(void) { }
 #endif
 
-#ifdef CONFIG_PPC_DEBUG_WX
-void ptdump_check_wx(void);
-#else
-static inline void ptdump_check_wx(void) { }
-#endif
-
 /*
  * When used, PTE_FRAG_NR is defined in subarch pgtable.h
  * so we are sure it is included when arriving here.
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index edcb1fc..d0ee0ed 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -15,6 +15,7 @@
 #define PCI_SLOT_ID_PREFIX	(1UL << 63)
 #define PCI_SLOT_ID(phb_id, bdfn)	\
 	(PCI_SLOT_ID_PREFIX | ((uint64_t)(bdfn) << 16) | (phb_id))
+#define PCI_PHB_SLOT_ID(phb_id)		(phb_id)
 
 extern int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id);
 extern int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a9993e7..8387698 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -163,6 +163,12 @@ struct thread_struct {
 #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
 	unsigned long	kuap;		/* opened segments for user access */
 #endif
+#ifdef CONFIG_VMAP_STACK
+	unsigned long	srr0;
+	unsigned long	srr1;
+	unsigned long	dar;
+	unsigned long	dsisr;
+#endif
 	/* Debug Registers */
 	struct debug_reg debug;
 	struct thread_fp_state	fp_state;
@@ -412,6 +418,9 @@ static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
 extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
 extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
 extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
+#ifdef CONFIG_PPC_970_NAP
+extern void power4_idle_nap(void);
+#endif
 
 extern unsigned long cpuidle_disable;
 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 07df35e..299ee7b 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -35,7 +35,21 @@
 #define SPRN_CMPE	152
 #define SPRN_CMPF	153
 #define SPRN_LCTRL1	156
+#define   LCTRL1_CTE_GT		0xc0000000
+#define   LCTRL1_CTF_LT		0x14000000
+#define   LCTRL1_CRWE_RW	0x00000000
+#define   LCTRL1_CRWE_RO	0x00040000
+#define   LCTRL1_CRWE_WO	0x000c0000
+#define   LCTRL1_CRWF_RW	0x00000000
+#define   LCTRL1_CRWF_RO	0x00010000
+#define   LCTRL1_CRWF_WO	0x00030000
 #define SPRN_LCTRL2	157
+#define   LCTRL2_LW0EN		0x80000000
+#define   LCTRL2_LW0LA_E	0x00000000
+#define   LCTRL2_LW0LA_F	0x04000000
+#define   LCTRL2_LW0LA_EandF	0x08000000
+#define   LCTRL2_LW0LADC	0x02000000
+#define   LCTRL2_SLW0EN		0x00000002
 #ifdef CONFIG_PPC_8xx
 #define SPRN_ICTRL	158
 #endif
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 8e1d019..a227074 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -10,13 +10,31 @@
 #define _ASM_POWERPC_THREAD_INFO_H
 
 #include <asm/asm-const.h>
+#include <asm/page.h>
 
 #ifdef __KERNEL__
 
+#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
+#define THREAD_SHIFT		PAGE_SHIFT
+#else
 #define THREAD_SHIFT		CONFIG_THREAD_SHIFT
+#endif
 
 #define THREAD_SIZE		(1 << THREAD_SHIFT)
 
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN_SHIFT	(THREAD_SHIFT + 1)
+#else
+#define THREAD_ALIGN_SHIFT	THREAD_SHIFT
+#endif
+
+#define THREAD_ALIGN		(1 << THREAD_ALIGN_SHIFT)
+
 #ifndef __ASSEMBLY__
 #include <linux/cache.h>
 #include <asm/processor.h>
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index b2c0be9..7f3a8b9 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -26,6 +26,17 @@
 
 #define tlb_flush tlb_flush
 extern void tlb_flush(struct mmu_gather *tlb);
+/*
+ * book3s:
+ * Hash does not use the linux page-tables, so we can avoid
+ * the TLB invalidate for page-table freeing, Radix otoh does use the
+ * page-tables and needs the TLBI.
+ *
+ * nohash:
+ * We still do TLB invalidate in the __pte_free_tlb routine before we
+ * add the page table pages to mmu gather table batch.
+ */
+#define tlb_needs_table_invalidate()	radix_enabled()
 
 /* Get the generic bits... */
 #include <asm-generic/tlb.h>
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index c92fe7f..2f500de 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -91,9 +91,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
 #define __get_user(x, ptr) \
-	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
 #define __put_user(x, ptr) \
-	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
+
+#define __get_user_allowed(x, ptr) \
+	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
+#define __put_user_allowed(x, ptr) \
+	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
 
 #define __get_user_inatomic(x, ptr) \
 	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
@@ -138,10 +143,9 @@ extern long __put_user_bad(void);
 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
 #endif /* __powerpc64__ */
 
-#define __put_user_size(x, ptr, size, retval)			\
+#define __put_user_size_allowed(x, ptr, size, retval)		\
 do {								\
 	retval = 0;						\
-	allow_write_to_user(ptr, size);				\
 	switch (size) {						\
 	  case 1: __put_user_asm(x, ptr, retval, "stb"); break;	\
 	  case 2: __put_user_asm(x, ptr, retval, "sth"); break;	\
@@ -149,17 +153,26 @@ do {								\
 	  case 8: __put_user_asm2(x, ptr, retval); break;	\
 	  default: __put_user_bad();				\
 	}							\
+} while (0)
+
+#define __put_user_size(x, ptr, size, retval)			\
+do {								\
+	allow_write_to_user(ptr, size);				\
+	__put_user_size_allowed(x, ptr, size, retval);		\
 	prevent_write_to_user(ptr, size);			\
 } while (0)
 
-#define __put_user_nocheck(x, ptr, size)			\
+#define __put_user_nocheck(x, ptr, size, do_allow)			\
 ({								\
 	long __pu_err;						\
 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
 	if (!is_kernel_addr((unsigned long)__pu_addr))		\
 		might_fault();					\
 	__chk_user_ptr(ptr);					\
-	__put_user_size((x), __pu_addr, (size), __pu_err);	\
+	if (do_allow)								\
+		__put_user_size((x), __pu_addr, (size), __pu_err);		\
+	else									\
+		__put_user_size_allowed((x), __pu_addr, (size), __pu_err);	\
 	__pu_err;						\
 })
 
@@ -236,13 +249,12 @@ extern long __get_user_bad(void);
 		: "b" (addr), "i" (-EFAULT), "0" (err))
 #endif /* __powerpc64__ */
 
-#define __get_user_size(x, ptr, size, retval)			\
+#define __get_user_size_allowed(x, ptr, size, retval)		\
 do {								\
 	retval = 0;						\
 	__chk_user_ptr(ptr);					\
 	if (size > sizeof(x))					\
 		(x) = __get_user_bad();				\
-	allow_read_from_user(ptr, size);			\
 	switch (size) {						\
 	case 1: __get_user_asm(x, ptr, retval, "lbz"); break;	\
 	case 2: __get_user_asm(x, ptr, retval, "lhz"); break;	\
@@ -250,6 +262,12 @@ do {								\
 	case 8: __get_user_asm2(x, ptr, retval);  break;	\
 	default: (x) = __get_user_bad();			\
 	}							\
+} while (0)
+
+#define __get_user_size(x, ptr, size, retval)			\
+do {								\
+	allow_read_from_user(ptr, size);			\
+	__get_user_size_allowed(x, ptr, size, retval);		\
 	prevent_read_from_user(ptr, size);			\
 } while (0)
 
@@ -260,7 +278,7 @@ do {								\
 #define __long_type(x) \
 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 
-#define __get_user_nocheck(x, ptr, size)			\
+#define __get_user_nocheck(x, ptr, size, do_allow)			\
 ({								\
 	long __gu_err;						\
 	__long_type(*(ptr)) __gu_val;				\
@@ -269,7 +287,10 @@ do {								\
 	if (!is_kernel_addr((unsigned long)__gu_addr))		\
 		might_fault();					\
 	barrier_nospec();					\
-	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
+	if (do_allow)								\
+		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);		\
+	else									\
+		__get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err);	\
 	(x) = (__typeof__(*(ptr)))__gu_val;			\
 	__gu_err;						\
 })
@@ -313,9 +334,9 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 	unsigned long ret;
 
 	barrier_nospec();
-	allow_user_access(to, from, n);
+	allow_read_write_user(to, from, n);
 	ret = __copy_tofrom_user(to, from, n);
-	prevent_user_access(to, from, n);
+	prevent_read_write_user(to, from, n);
 	return ret;
 }
 #endif /* __powerpc64__ */
@@ -356,33 +377,40 @@ static inline unsigned long raw_copy_from_user(void *to,
 	return ret;
 }
 
-static inline unsigned long raw_copy_to_user(void __user *to,
-		const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
 {
-	unsigned long ret;
 	if (__builtin_constant_p(n) && (n <= 8)) {
-		ret = 1;
+		unsigned long ret = 1;
 
 		switch (n) {
 		case 1:
-			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
+			__put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
 			break;
 		case 2:
-			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
+			__put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
 			break;
 		case 4:
-			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
+			__put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
 			break;
 		case 8:
-			__put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
+			__put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
 			break;
 		}
 		if (ret == 0)
 			return 0;
 	}
 
+	return __copy_tofrom_user(to, (__force const void __user *)from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	unsigned long ret;
+
 	allow_write_to_user(to, n);
-	ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
+	ret = raw_copy_to_user_allowed(to, from, n);
 	prevent_write_to_user(to, n);
 	return ret;
 }
@@ -428,4 +456,22 @@ extern long __copy_from_user_flushcache(void *dst, const void __user *src,
 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
 			   size_t len);
 
+static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+	if (unlikely(!access_ok(ptr, len)))
+		return false;
+	allow_read_write_user((void __user *)ptr, ptr, len);
+	return true;
+}
+#define user_access_begin	user_access_begin
+#define user_access_end		prevent_current_access_user
+#define user_access_save	prevent_user_access_return
+#define user_access_restore	restore_user_access
+
+#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
+#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
+#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
+#define unsafe_copy_to_user(d, s, l, e) \
+	unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
+
 #endif	/* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 40f13f3..b9ef6cf 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -108,16 +108,22 @@ struct vdso_data {
 	__u32 stamp_sec_fraction;	/* fractional seconds of stamp_xtime */
 	__u32 hrtimer_res;		/* hrtimer resolution */
    	__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
-	__u32 dcache_block_size;	/* L1 d-cache block size     */
-	__u32 icache_block_size;	/* L1 i-cache block size     */
-	__u32 dcache_log_block_size;	/* L1 d-cache log block size */
-	__u32 icache_log_block_size;	/* L1 i-cache log block size */
 };
 
 #endif /* CONFIG_PPC64 */
 
 extern struct vdso_data *vdso_data;
 
+#else /* __ASSEMBLY__ */
+
+.macro get_datapage ptr, tmp
+	bcl	20, 31, .+4
+	mflr	\ptr
+	addi	\ptr, \ptr, (__kernel_datapage_offset - (.-4))@l
+	lwz	\tmp, 0(\ptr)
+	add	\ptr, \tmp, \ptr
+.endm
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 24cdf97..93f982db 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -87,56 +87,56 @@ extern bool __xive_enabled;
 
 static inline bool xive_enabled(void) { return __xive_enabled; }
 
-extern bool xive_spapr_init(void);
-extern bool xive_native_init(void);
-extern void xive_smp_probe(void);
-extern int  xive_smp_prepare_cpu(unsigned int cpu);
-extern void xive_smp_setup_cpu(void);
-extern void xive_smp_disable_cpu(void);
-extern void xive_teardown_cpu(void);
-extern void xive_shutdown(void);
-extern void xive_flush_interrupt(void);
+bool xive_spapr_init(void);
+bool xive_native_init(void);
+void xive_smp_probe(void);
+int  xive_smp_prepare_cpu(unsigned int cpu);
+void xive_smp_setup_cpu(void);
+void xive_smp_disable_cpu(void);
+void xive_teardown_cpu(void);
+void xive_shutdown(void);
+void xive_flush_interrupt(void);
 
 /* xmon hook */
-extern void xmon_xive_do_dump(int cpu);
-extern int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
+void xmon_xive_do_dump(int cpu);
+int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
 
 /* APIs used by KVM */
-extern u32 xive_native_default_eq_shift(void);
-extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
-extern void xive_native_free_vp_block(u32 vp_base);
-extern int xive_native_populate_irq_data(u32 hw_irq,
-					 struct xive_irq_data *data);
-extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
-extern u32 xive_native_alloc_irq(void);
-extern void xive_native_free_irq(u32 irq);
-extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
+u32 xive_native_default_eq_shift(void);
+u32 xive_native_alloc_vp_block(u32 max_vcpus);
+void xive_native_free_vp_block(u32 vp_base);
+int xive_native_populate_irq_data(u32 hw_irq,
+				  struct xive_irq_data *data);
+void xive_cleanup_irq_data(struct xive_irq_data *xd);
+u32 xive_native_alloc_irq(void);
+void xive_native_free_irq(u32 irq);
+int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
 
-extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
-				       __be32 *qpage, u32 order, bool can_escalate);
-extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
+int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
+				__be32 *qpage, u32 order, bool can_escalate);
+void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
 
-extern void xive_native_sync_source(u32 hw_irq);
-extern void xive_native_sync_queue(u32 hw_irq);
-extern bool is_xive_irq(struct irq_chip *chip);
-extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
-extern int xive_native_disable_vp(u32 vp_id);
-extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
-extern bool xive_native_has_single_escalation(void);
+void xive_native_sync_source(u32 hw_irq);
+void xive_native_sync_queue(u32 hw_irq);
+bool is_xive_irq(struct irq_chip *chip);
+int xive_native_enable_vp(u32 vp_id, bool single_escalation);
+int xive_native_disable_vp(u32 vp_id);
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
+bool xive_native_has_single_escalation(void);
 
-extern int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
-				      u64 *out_qpage,
-				      u64 *out_qsize,
-				      u64 *out_qeoi_page,
-				      u32 *out_escalate_irq,
-				      u64 *out_qflags);
+int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
+			       u64 *out_qpage,
+			       u64 *out_qsize,
+			       u64 *out_qeoi_page,
+			       u32 *out_escalate_irq,
+			       u64 *out_qflags);
 
-extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
-				       u32 *qindex);
-extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
-				       u32 qindex);
-extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
-extern bool xive_native_has_queue_state_support(void);
+int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
+				u32 *qindex);
+int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
+				u32 qindex);
+int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
+bool xive_native_has_queue_state_support(void);
 
 #else
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 157b014..78a1b22 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -62,8 +62,7 @@
 obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
 obj-$(CONFIG_PPC64)		+= vdso64/
 obj-$(CONFIG_ALTIVEC)		+= vecemu.o
-obj-$(CONFIG_PPC_970_NAP)	+= idle_power4.o
-obj-$(CONFIG_PPC_P7_NAP)	+= idle_book3s.o
+obj-$(CONFIG_PPC_BOOK3S_IDLE)	+= idle_book3s.o
 procfs-y			:= proc_powerpc.o
 obj-$(CONFIG_PROC_FS)		+= $(procfs-y)
 rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI)	:= rtas_pci.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 3d47aec..c25e562 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -127,6 +127,12 @@ int main(void)
 	OFFSET(KSP_VSID, thread_struct, ksp_vsid);
 #else /* CONFIG_PPC64 */
 	OFFSET(PGDIR, thread_struct, pgdir);
+#ifdef CONFIG_VMAP_STACK
+	OFFSET(SRR0, thread_struct, srr0);
+	OFFSET(SRR1, thread_struct, srr1);
+	OFFSET(DAR, thread_struct, dar);
+	OFFSET(DSISR, thread_struct, dsisr);
+#endif
 #ifdef CONFIG_SPE
 	OFFSET(THREAD_EVR0, thread_struct, evr[0]);
 	OFFSET(THREAD_ACC, thread_struct, acc);
@@ -389,11 +395,11 @@ int main(void)
 	OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec);
 	OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction);
 	OFFSET(CLOCK_HRTIMER_RES, vdso_data, hrtimer_res);
+#ifdef CONFIG_PPC64
 	OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size);
 	OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size);
 	OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size);
 	OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size);
-#ifdef CONFIG_PPC64
 	OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64);
 	OFFSET(TVAL64_TV_SEC, __kernel_old_timeval, tv_sec);
 	OFFSET(TVAL64_TV_USEC, __kernel_old_timeval, tv_usec);
@@ -413,7 +419,10 @@ int main(void)
 	DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
 	DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
 	DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
+	DEFINE(CLOCK_MAX, CLOCK_TAI);
 	DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
+	DEFINE(EINVAL, EINVAL);
+	DEFINE(KTIME_LOW_RES, KTIME_LOW_RES);
 
 #ifdef CONFIG_BUG
 	DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 180b3a5..182b404 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -727,17 +727,20 @@ static __init void cpufeatures_cpu_quirks(void)
 	/*
 	 * Not all quirks can be derived from the cpufeatures device tree.
 	 */
-	if ((version & 0xffffefff) == 0x004e0200)
-		; /* DD2.0 has no feature flag */
-	else if ((version & 0xffffefff) == 0x004e0201)
+	if ((version & 0xffffefff) == 0x004e0200) {
+		/* DD2.0 has no feature flag */
+		cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+	} else if ((version & 0xffffefff) == 0x004e0201) {
 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
-	else if ((version & 0xffffefff) == 0x004e0202) {
+		cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+	} else if ((version & 0xffffefff) == 0x004e0202) {
 		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
 		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
-	} else if ((version & 0xffff0000) == 0x004e0000)
+	} else if ((version & 0xffff0000) == 0x004e0000) {
 		/* DD2.1 and up have DD2_1 */
 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+	}
 
 	if ((version & 0xffff0000) == 0x004e0000) {
 		cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index bc8a551..17cb3e9 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -503,7 +503,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
 	rc = 1;
 	if (pe->state & EEH_PE_ISOLATED) {
 		pe->check_count++;
-		if (pe->check_count % EEH_MAX_FAILS == 0) {
+		if (pe->check_count == EEH_MAX_FAILS) {
 			dn = pci_device_to_OF_node(dev);
 			if (dn)
 				location = of_get_property(dn, "ibm,loc-code",
@@ -1191,7 +1191,6 @@ void eeh_add_device_late(struct pci_dev *dev)
 		eeh_rmv_from_parent_pe(edev);
 		eeh_addr_cache_rmv_dev(edev->pdev);
 		eeh_sysfs_remove_device(edev->pdev);
-		edev->mode &= ~EEH_DEV_SYSFS;
 
 		/*
 		 * We definitely should have the PCI device removed
@@ -1296,6 +1295,23 @@ void eeh_remove_device(struct pci_dev *dev)
 	edev->pdev = NULL;
 
 	/*
+	 * eeh_sysfs_remove_device() uses pci_dev_to_eeh_dev() so we need to
+	 * remove the sysfs files before clearing dev.archdata.edev
+	 */
+	if (edev->mode & EEH_DEV_SYSFS)
+		eeh_sysfs_remove_device(dev);
+
+	/*
+	 * We're removing from the PCI subsystem, that means
+	 * the PCI device driver can't support EEH or not
+	 * well. So we rely on hotplug completely to do recovery
+	 * for the specific PCI device.
+	 */
+	edev->mode |= EEH_DEV_NO_HANDLER;
+
+	eeh_addr_cache_rmv_dev(dev);
+
+	/*
 	 * The flag "in_error" is used to trace EEH devices for VFs
 	 * in error state or not. It's set in eeh_report_error(). If
 	 * it's not set, eeh_report_{reset,resume}() won't be called
@@ -1307,18 +1323,6 @@ void eeh_remove_device(struct pci_dev *dev)
 		eeh_rmv_from_parent_pe(edev);
 	else
 		edev->mode |= EEH_DEV_DISCONNECTED;
-
-	/*
-	 * We're removing from the PCI subsystem, that means
-	 * the PCI device driver can't support EEH or not
-	 * well. So we rely on hotplug completely to do recovery
-	 * for the specific PCI device.
-	 */
-	edev->mode |= EEH_DEV_NO_HANDLER;
-
-	eeh_addr_cache_rmv_dev(dev);
-	eeh_sysfs_remove_device(dev);
-	edev->mode &= ~EEH_DEV_SYSFS;
 }
 
 int eeh_unfreeze_pe(struct eeh_pe *pe)
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index cf11277..6b50bf1 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -159,18 +159,10 @@ eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
 
 static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
 {
-	struct pci_dn *pdn;
 	struct eeh_dev *edev;
 	int i;
 
-	pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
-	if (!pdn) {
-		pr_warn("PCI: no pci dn found for dev=%s\n",
-			pci_name(dev));
-		return;
-	}
-
-	edev = pdn_to_eeh_dev(pdn);
+	edev = pci_dev_to_eeh_dev(dev);
 	if (!edev) {
 		pr_warn("PCI: no EEH dev found for %s\n",
 			pci_name(dev));
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 3dd1a42..a1eaffe 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -525,12 +525,6 @@ static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
 
 		pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
 		edev->pdev = NULL;
-
-		/*
-		 * We have to set the VF PE number to invalid one, which is
-		 * required to plug the VF successfully.
-		 */
-		pdn->pe_number = IODA_INVALID_PE;
 #endif
 		if (rmv_data)
 			list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index ab44d96..4fb0f1e 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -14,7 +14,7 @@
 /**
  * EEH_SHOW_ATTR -- Create sysfs entry for eeh statistic
  * @_name: name of file in sysfs directory
- * @_memb: name of member in struct pci_dn to access
+ * @_memb: name of member in struct eeh_dev to access
  * @_format: printf format for display
  *
  * All of the attributes look very similar, so just
@@ -75,7 +75,7 @@ static ssize_t eeh_pe_state_store(struct device *dev,
 
 static DEVICE_ATTR_RW(eeh_pe_state);
 
-#ifdef CONFIG_PCI_IOV
+#if defined(CONFIG_PCI_IOV) && defined(CONFIG_PPC_PSERIES)
 static ssize_t eeh_notify_resume_show(struct device *dev,
 				      struct device_attribute *attr, char *buf)
 {
@@ -86,7 +86,6 @@ static ssize_t eeh_notify_resume_show(struct device *dev,
 	if (!edev || !edev->pe)
 		return -ENODEV;
 
-	pdn = pci_get_pdn(pdev);
 	return sprintf(buf, "%d\n", pdn->last_allow_rc);
 }
 
@@ -132,7 +131,7 @@ static void eeh_notify_resume_remove(struct pci_dev *pdev)
 #else
 static inline int eeh_notify_resume_add(struct pci_dev *pdev) { return 0; }
 static inline void eeh_notify_resume_remove(struct pci_dev *pdev) { }
-#endif /* CONFIG_PCI_IOV */
+#endif /* CONFIG_PCI_IOV && CONFIG PPC_PSERIES*/
 
 void eeh_sysfs_add_device(struct pci_dev *pdev)
 {
@@ -160,22 +159,23 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
 {
 	struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
 
+	if (!edev) {
+		WARN_ON(eeh_enabled());
+		return;
+	}
+
+	edev->mode &= ~EEH_DEV_SYSFS;
+
 	/*
 	 * The parent directory might have been removed. We needn't
 	 * continue for that case.
 	 */
-	if (!pdev->dev.kobj.sd) {
-		if (edev)
-			edev->mode &= ~EEH_DEV_SYSFS;
+	if (!pdev->dev.kobj.sd)
 		return;
-	}
 
 	device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
 	device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
 	device_remove_file(&pdev->dev, &dev_attr_eeh_pe_state);
 
 	eeh_notify_resume_remove(pdev);
-
-	if (edev)
-		edev->mode &= ~EEH_DEV_SYSFS;
 }
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index e1a4c39..0713daa 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -140,6 +140,7 @@
 	stw	r12,_CTR(r11)
 	stw	r2,_XER(r11)
 	mfspr	r12,SPRN_SPRG_THREAD
+	tovirt_vmstack r12, r12
 	beq	2f			/* if from user, fix up THREAD.regs */
 	addi	r2, r12, -THREAD
 	addi	r11,r1,STACK_FRAME_OVERHEAD
@@ -179,11 +180,13 @@
 2:	/* if from kernel, check interrupted DOZE/NAP mode and
          * check for stack overflow
          */
-	kuap_save_and_lock r11, r12, r9, r2, r0
+	kuap_save_and_lock r11, r12, r9, r2, r6
 	addi	r2, r12, -THREAD
+#ifndef CONFIG_VMAP_STACK
 	lwz	r9,KSP_LIMIT(r12)
 	cmplw	r1,r9			/* if r1 <= ksp_limit */
 	ble-	stack_ovf		/* then the kernel stack overflowed */
+#endif
 5:
 #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
 	lwz	r12,TI_LOCAL_FLAGS(r2)
@@ -195,7 +198,8 @@
 transfer_to_handler_cont:
 3:
 	mflr	r9
-	tovirt(r2, r2)			/* set r2 to current */
+	tovirt_novmstack r2, r2 	/* set r2 to current */
+	tovirt_vmstack r9, r9
 	lwz	r11,0(r9)		/* virtual address of handler */
 	lwz	r9,4(r9)		/* where to go when done */
 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
@@ -210,7 +214,7 @@
 	 * To speed up the syscall path where interrupts stay on, let's check
 	 * first if we are changing the MSR value at all.
 	 */
-	tophys(r12, r1)
+	tophys_novmstack r12, r1
 	lwz	r12,_MSR(r12)
 	andi.	r12,r12,MSR_EE
 	bne	1f
@@ -284,9 +288,11 @@
 	rlwinm	r9,r9,0,~MSR_EE
 	lwz	r12,_LINK(r11)		/* and return to address in LR */
 	kuap_restore r11, r2, r3, r4, r5
+	lwz	r2, GPR2(r11)
 	b	fast_exception_return
 #endif
 
+#ifndef CONFIG_VMAP_STACK
 /*
  * On kernel stack overflow, load up an initial stack pointer
  * and call StackOverflow(regs), which should not return.
@@ -312,6 +318,7 @@
 	mtspr	SPRN_SRR1,r10
 	SYNC
 	RFI
+#endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 trace_syscall_entry_irq_off:
@@ -397,7 +404,7 @@
 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
 	/* Note: We don't bother telling lockdep about it */
 	SYNC
-	MTMSRD(r10)
+	mtmsr	r10
 	lwz	r9,TI_FLAGS(r2)
 	li	r8,-MAX_ERRNO
 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
@@ -554,7 +561,7 @@
 	 */
 	ori	r10,r10,MSR_EE
 	SYNC
-	MTMSRD(r10)
+	mtmsr	r10
 
 	/* Save NVGPRS if they're not saved already */
 	lwz	r4,_TRAP(r1)
@@ -621,7 +628,6 @@
  */
 	.globl	handle_page_fault
 handle_page_fault:
-	stw	r4,_DAR(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 #ifdef CONFIG_PPC_BOOK3S_32
 	andis.  r0,r5,DSISR_DABRMATCH@h
@@ -697,7 +703,7 @@
 	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
 	beq+	1f
 	andc	r11,r11,r0
-	MTMSRD(r11)
+	mtmsr	r11
 	isync
 1:	stw	r11,_MSR(r1)
 	mfcr	r10
@@ -831,7 +837,7 @@
 	/* Note: We don't bother telling lockdep about it */
 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
 	SYNC			/* Some chip revs have problems here... */
-	MTMSRD(r10)		/* disable interrupts */
+	mtmsr	r10		/* disable interrupts */
 
 	lwz	r3,_MSR(r1)	/* Returning to user mode? */
 	andi.	r0,r3,MSR_PR
@@ -998,7 +1004,7 @@
 	 */
 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
 	SYNC
-	MTMSRD(r10)		/* clear the RI bit */
+	mtmsr	r10		/* clear the RI bit */
 	.globl exc_exit_restart
 exc_exit_restart:
 	lwz	r12,_NIP(r1)
@@ -1234,7 +1240,7 @@
 #endif
 	ori	r10,r10,MSR_EE
 	SYNC
-	MTMSRD(r10)		/* hard-enable interrupts */
+	mtmsr	r10		/* hard-enable interrupts */
 	bl	schedule
 recheck:
 	/* Note: And we don't tell it we are disabling them again
@@ -1243,7 +1249,7 @@
 	 */
 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
 	SYNC
-	MTMSRD(r10)		/* disable interrupts */
+	mtmsr	r10		/* disable interrupts */
 	lwz	r9,TI_FLAGS(r2)
 	andi.	r0,r9,_TIF_NEED_RESCHED
 	bne-	do_resched
@@ -1252,7 +1258,7 @@
 do_user_signal:			/* r10 contains MSR_KERNEL here */
 	ori	r10,r10,MSR_EE
 	SYNC
-	MTMSRD(r10)		/* hard-enable interrupts */
+	mtmsr	r10		/* hard-enable interrupts */
 	/* save r13-r31 in the exception frame, if not already done */
 	lwz	r3,_TRAP(r1)
 	andi.	r0,r3,1
@@ -1334,14 +1340,14 @@
 	lis	r6,1f@ha	/* physical return address for rtas */
 	addi	r6,r6,1f@l
 	tophys(r6,r6)
-	tophys(r7,r1)
+	tophys_novmstack r7, r1
 	lwz	r8,RTASENTRY(r4)
 	lwz	r4,RTASBASE(r4)
 	mfmsr	r9
 	stw	r9,8(r1)
 	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
 	SYNC			/* disable interrupts so SRR0/1 */
-	MTMSRD(r0)		/* don't get trashed */
+	mtmsr	r0		/* don't get trashed */
 	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
 	mtlr	r6
 	stw	r7, THREAD + RTAS_SP(r2)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index a9a1d3c..6ba675b 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -597,8 +597,7 @@
 	std	r0,16(r1)
 	stdu	r1,-SWITCH_FRAME_SIZE(r1)
 	/* r3-r13 are caller saved -- Cort */
-	SAVE_8GPRS(14, r1)
-	SAVE_10GPRS(22, r1)
+	SAVE_NVGPRS(r1)
 	std	r0,_NIP(r1)	/* Return to switch caller */
 	mfcr	r23
 	std	r23,_CCR(r1)
@@ -722,8 +721,7 @@
 	mtcrf	0xFF,r6
 
 	/* r3-r13 are destroyed -- Cort */
-	REST_8GPRS(14, r1)
-	REST_10GPRS(22, r1)
+	REST_NVGPRS(r1)
 
 	/* convert old thread to its task_struct for return value */
 	addi	r3,r3,-THREAD
@@ -1155,8 +1153,7 @@
    	 */
 	SAVE_GPR(2, r1)			/* Save the TOC */
 	SAVE_GPR(13, r1)		/* Save paca */
-	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
-	SAVE_10GPRS(22, r1)		/* ditto */
+	SAVE_NVGPRS(r1)			/* Save the non-volatiles */
 
 	mfcr	r4
 	std	r4,_CCR(r1)
@@ -1263,8 +1260,7 @@
 	/* relocation is on at this point */
 	REST_GPR(2, r1)			/* Restore the TOC */
 	REST_GPR(13, r1)		/* Restore paca */
-	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
-	REST_10GPRS(22, r1)		/* ditto */
+	REST_NVGPRS(r1)			/* Restore the non-volatiles */
 
 	GET_PACA(r13)
 
@@ -1298,8 +1294,7 @@
    	 */
 	SAVE_GPR(2, r1)
 	SAVE_GPR(13, r1)
-	SAVE_8GPRS(14, r1)
-	SAVE_10GPRS(22, r1)
+	SAVE_NVGPRS(r1)
 	mfcr	r10
 	mfmsr	r11
 	std	r10,_CCR(r1)
@@ -1343,8 +1338,7 @@
 	/* Restore other registers */
 	REST_GPR(2, r1)
 	REST_GPR(13, r1)
-	REST_8GPRS(14, r1)
-	REST_10GPRS(22, r1)
+	REST_NVGPRS(r1)
 	ld	r4,_CCR(r1)
 	mtcr	r4
 
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 46508b1..ffc15f4 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,22 +1408,9 @@
  *
  * Call convention:
  *
- * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
- *
- * For hypercalls, the register convention is as follows:
- * r0 volatile
- * r1-2 nonvolatile
- * r3 volatile parameter and return value for status
- * r4-r10 volatile input and output value
- * r11 volatile hypercall number and output value
- * r12 volatile input and output value
- * r13-r31 nonvolatile
- * LR nonvolatile
- * CTR volatile
- * XER volatile
- * CR0-1 CR5-7 volatile
- * CR2-4 nonvolatile
- * Other registers nonvolatile
+ * syscall and hypercalls register conventions are documented in
+ * Documentation/powerpc/syscall64-abi.rst and
+ * Documentation/powerpc/papr_hcalls.rst respectively.
  *
  * The intersection of volatile registers that don't contain possible
  * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
@@ -2208,11 +2195,20 @@
 DEFINE_FIXED_SYMBOL(__end_interrupts)
 
 #ifdef CONFIG_PPC_970_NAP
+	/*
+	 * Called by exception entry code if _TLF_NAPPING was set, this clears
+	 * the NAPPING flag, and redirects the exception exit to
+	 * power4_fixup_nap_return.
+	 */
+	.globl power4_fixup_nap
 EXC_COMMON_BEGIN(power4_fixup_nap)
 	andc	r9,r9,r10
 	std	r9,TI_LOCAL_FLAGS(r11)
-	ld	r10,_LINK(r1)		/* make idle task do the */
-	std	r10,_NIP(r1)		/* equivalent of a blr */
+	LOAD_REG_ADDR(r10, power4_idle_nap_return)
+	std	r10,_NIP(r1)
+	blr
+
+power4_idle_nap_return:
 	blr
 #endif
 
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 0bb991d..3235a8d 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -94,6 +94,9 @@
 	/* enable use of FP after return */
 #ifdef CONFIG_PPC32
 	mfspr	r5,SPRN_SPRG_THREAD	/* current task's THREAD (phys) */
+#ifdef CONFIG_VMAP_STACK
+	tovirt(r5, r5)
+#endif
 	lwz	r4,THREAD_FPEXC_MODE(r5)
 	ori	r9,r9,MSR_FP		/* enable FP for current */
 	or	r9,r9,r4
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 4a24f8f0..0493fca 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -272,16 +272,21 @@
  */
 	. = 0x200
 	DO_KVM  0x200
-	mtspr	SPRN_SPRG_SCRATCH0,r10
-	mtspr	SPRN_SPRG_SCRATCH1,r11
-	mfcr	r10
+MachineCheck:
+	EXCEPTION_PROLOG_0
+#ifdef CONFIG_VMAP_STACK
+	li	r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+	mtmsr	r11
+	isync
+#endif
 #ifdef CONFIG_PPC_CHRP
 	mfspr	r11, SPRN_SPRG_THREAD
+	tovirt_vmstack r11, r11
 	lwz	r11, RTAS_SP(r11)
 	cmpwi	cr1, r11, 0
 	bne	cr1, 7f
 #endif /* CONFIG_PPC_CHRP */
-	EXCEPTION_PROLOG_1
+	EXCEPTION_PROLOG_1 for_rtas=1
 7:	EXCEPTION_PROLOG_2
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 #ifdef CONFIG_PPC_CHRP
@@ -296,24 +301,21 @@
 	. = 0x300
 	DO_KVM  0x300
 DataAccess:
-	EXCEPTION_PROLOG
-	mfspr	r10,SPRN_DSISR
-	stw	r10,_DSISR(r11)
-#ifdef CONFIG_PPC_KUAP
-	andis.	r0,r10,(DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
-#else
-	andis.	r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h
-#endif
-	bne	1f			/* if not, try to put a PTE */
-	mfspr	r4,SPRN_DAR		/* into the hash table */
-	rlwinm	r3,r10,32-15,21,21	/* DSISR_STORE -> _PAGE_RW */
+	EXCEPTION_PROLOG handle_dar_dsisr=1
+	get_and_save_dar_dsisr_on_stack	r4, r5, r11
 BEGIN_MMU_FTR_SECTION
+#ifdef CONFIG_PPC_KUAP
+	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
+#else
+	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
+#endif
+	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
+	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
 	bl	hash_page
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
-1:	lwz	r5,_DSISR(r11)		/* get DSISR value */
-	mfspr	r4,SPRN_DAR
-	EXC_XFER_LITE(0x300, handle_page_fault)
-
+	b	handle_page_fault_tramp_1
+FTR_SECTION_ELSE
+	b	handle_page_fault_tramp_2
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
 
 /* Instruction access exception. */
 	. = 0x400
@@ -329,6 +331,7 @@
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
 1:	mr	r4,r12
 	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
+	stw	r4, _DAR(r11)
 	EXC_XFER_LITE(0x400, handle_page_fault)
 
 /* External interrupt */
@@ -338,11 +341,8 @@
 	. = 0x600
 	DO_KVM  0x600
 Alignment:
-	EXCEPTION_PROLOG
-	mfspr	r4,SPRN_DAR
-	stw	r4,_DAR(r11)
-	mfspr	r5,SPRN_DSISR
-	stw	r5,_DSISR(r11)
+	EXCEPTION_PROLOG handle_dar_dsisr=1
+	save_dar_dsisr_on_stack r4, r5, r11
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	EXC_XFER_STD(0x600, alignment_exception)
 
@@ -645,6 +645,16 @@
 
 	. = 0x3000
 
+handle_page_fault_tramp_1:
+	lwz	r4, _DAR(r11)
+	lwz	r5, _DSISR(r11)
+	/* fall through */
+handle_page_fault_tramp_2:
+	EXC_XFER_LITE(0x300, handle_page_fault)
+
+stack_overflow:
+	vmap_stack_overflow_exception
+
 AltiVecUnavailable:
 	EXCEPTION_PROLOG
 #ifdef CONFIG_ALTIVEC
@@ -917,6 +927,8 @@
 	ori	r4,r4,2f@l
 	tophys(r4,r4)
 	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+
+	.align	4
 	mtspr	SPRN_SRR0,r4
 	mtspr	SPRN_SRR1,r3
 	SYNC
@@ -1058,6 +1070,8 @@
 	rlwinm	r0, r6, 0, ~MSR_RI
 	rlwinm	r0, r0, 0, ~MSR_EE
 	mtmsr	r0
+
+	.align	4
 	mtspr	SPRN_SRR0, r4
 	mtspr	SPRN_SRR1, r3
 	SYNC
@@ -1097,6 +1111,8 @@
 	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
 	beqlr
 	andc	r3,r3,r0
+
+	.align	4
 	mtspr	SPRN_SRR0,r4
 	mtspr	SPRN_SRR1,r3
 	sync
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 8abc778..a6a5fbb 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -10,28 +10,60 @@
  * We assume sprg3 has the physical address of the current
  * task's thread_struct.
  */
+.macro EXCEPTION_PROLOG handle_dar_dsisr=0
+	EXCEPTION_PROLOG_0	handle_dar_dsisr=\handle_dar_dsisr
+	EXCEPTION_PROLOG_1
+	EXCEPTION_PROLOG_2	handle_dar_dsisr=\handle_dar_dsisr
+.endm
 
-.macro EXCEPTION_PROLOG
+.macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
 	mtspr	SPRN_SPRG_SCRATCH0,r10
 	mtspr	SPRN_SPRG_SCRATCH1,r11
+#ifdef CONFIG_VMAP_STACK
+	mfspr	r10, SPRN_SPRG_THREAD
+	.if	\handle_dar_dsisr
+	mfspr	r11, SPRN_DAR
+	stw	r11, DAR(r10)
+	mfspr	r11, SPRN_DSISR
+	stw	r11, DSISR(r10)
+	.endif
+	mfspr	r11, SPRN_SRR0
+	stw	r11, SRR0(r10)
+#endif
+	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
+#ifdef CONFIG_VMAP_STACK
+	stw	r11, SRR1(r10)
+#endif
 	mfcr	r10
-	EXCEPTION_PROLOG_1
-	EXCEPTION_PROLOG_2
+	andi.	r11, r11, MSR_PR
 .endm
 
-.macro EXCEPTION_PROLOG_1
-	mfspr	r11,SPRN_SRR1		/* check whether user or kernel */
-	andi.	r11,r11,MSR_PR
+.macro EXCEPTION_PROLOG_1 for_rtas=0
+#ifdef CONFIG_VMAP_STACK
+	.ifeq	\for_rtas
+	li	r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+	mtmsr	r11
+	isync
+	.endif
+	subi	r11, r1, INT_FRAME_SIZE		/* use r1 if kernel */
+#else
 	tophys(r11,r1)			/* use tophys(r1) if kernel */
+	subi	r11, r11, INT_FRAME_SIZE	/* alloc exc. frame */
+#endif
 	beq	1f
 	mfspr	r11,SPRN_SPRG_THREAD
+	tovirt_vmstack r11, r11
 	lwz	r11,TASK_STACK-THREAD(r11)
-	addi	r11,r11,THREAD_SIZE
-	tophys(r11,r11)
-1:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */
+	addi	r11, r11, THREAD_SIZE - INT_FRAME_SIZE
+	tophys_novmstack r11, r11
+1:
+#ifdef CONFIG_VMAP_STACK
+	mtcrf	0x7f, r11
+	bt	32 - THREAD_ALIGN_SHIFT, stack_overflow
+#endif
 .endm
 
-.macro EXCEPTION_PROLOG_2
+.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
 	stw	r10,_CCR(r11)		/* save registers */
 	stw	r12,GPR12(r11)
 	stw	r9,GPR9(r11)
@@ -41,16 +73,33 @@
 	stw	r12,GPR11(r11)
 	mflr	r10
 	stw	r10,_LINK(r11)
+#ifdef CONFIG_VMAP_STACK
+	mfspr	r12, SPRN_SPRG_THREAD
+	tovirt(r12, r12)
+	.if	\handle_dar_dsisr
+	lwz	r10, DAR(r12)
+	stw	r10, _DAR(r11)
+	lwz	r10, DSISR(r12)
+	stw	r10, _DSISR(r11)
+	.endif
+	lwz	r9, SRR1(r12)
+	lwz	r12, SRR0(r12)
+#else
 	mfspr	r12,SPRN_SRR0
 	mfspr	r9,SPRN_SRR1
+#endif
 	stw	r1,GPR1(r11)
 	stw	r1,0(r11)
-	tovirt(r1,r11)			/* set new kernel sp */
+	tovirt_novmstack r1, r11	/* set new kernel sp */
 #ifdef CONFIG_40x
 	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
 #else
+#ifdef CONFIG_VMAP_STACK
+	li	r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
+#else
 	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
-	MTMSRD(r10)			/* (except for mach check in rtas) */
+#endif
+	mtmsr	r10			/* (except for mach check in rtas) */
 #endif
 	stw	r0,GPR0(r11)
 	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
@@ -62,25 +111,46 @@
 
 .macro SYSCALL_ENTRY trapno
 	mfspr	r12,SPRN_SPRG_THREAD
+#ifdef CONFIG_VMAP_STACK
+	mfspr	r9, SPRN_SRR0
+	mfspr	r11, SPRN_SRR1
+	stw	r9, SRR0(r12)
+	stw	r11, SRR1(r12)
+#endif
 	mfcr	r10
 	lwz	r11,TASK_STACK-THREAD(r12)
-	mflr	r9
-	addi	r11,r11,THREAD_SIZE - INT_FRAME_SIZE
 	rlwinm	r10,r10,0,4,2	/* Clear SO bit in CR */
-	tophys(r11,r11)
+	addi	r11, r11, THREAD_SIZE - INT_FRAME_SIZE
+#ifdef CONFIG_VMAP_STACK
+	li	r9, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+	mtmsr	r9
+	isync
+#endif
+	tovirt_vmstack r12, r12
+	tophys_novmstack r11, r11
+	mflr	r9
 	stw	r10,_CCR(r11)		/* save registers */
+	stw	r9, _LINK(r11)
+#ifdef CONFIG_VMAP_STACK
+	lwz	r10, SRR0(r12)
+	lwz	r9, SRR1(r12)
+#else
 	mfspr	r10,SPRN_SRR0
-	stw	r9,_LINK(r11)
 	mfspr	r9,SPRN_SRR1
+#endif
 	stw	r1,GPR1(r11)
 	stw	r1,0(r11)
-	tovirt(r1,r11)			/* set new kernel sp */
+	tovirt_novmstack r1, r11	/* set new kernel sp */
 	stw	r10,_NIP(r11)
 #ifdef CONFIG_40x
 	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
 #else
+#ifdef CONFIG_VMAP_STACK
+	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
+#else
 	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
-	MTMSRD(r10)			/* (except for mach check in rtas) */
+#endif
+	mtmsr	r10			/* (except for mach check in rtas) */
 #endif
 	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
 	stw	r2,GPR2(r11)
@@ -118,7 +188,7 @@
 #endif
 
 3:
-	tovirt(r2, r2)			/* set r2 to current */
+	tovirt_novmstack r2, r2 	/* set r2 to current */
 	lis	r11, transfer_to_syscall@h
 	ori	r11, r11, transfer_to_syscall@l
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -141,6 +211,54 @@
 	RFI				/* jump to handler, enable MMU */
 .endm
 
+.macro save_dar_dsisr_on_stack reg1, reg2, sp
+#ifndef CONFIG_VMAP_STACK
+	mfspr	\reg1, SPRN_DAR
+	mfspr	\reg2, SPRN_DSISR
+	stw	\reg1, _DAR(\sp)
+	stw	\reg2, _DSISR(\sp)
+#endif
+.endm
+
+.macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
+#ifdef CONFIG_VMAP_STACK
+	lwz	\reg1, _DAR(\sp)
+	lwz	\reg2, _DSISR(\sp)
+#else
+	save_dar_dsisr_on_stack \reg1, \reg2, \sp
+#endif
+.endm
+
+.macro tovirt_vmstack dst, src
+#ifdef CONFIG_VMAP_STACK
+	tovirt(\dst, \src)
+#else
+	.ifnc	\dst, \src
+	mr	\dst, \src
+	.endif
+#endif
+.endm
+
+.macro tovirt_novmstack dst, src
+#ifndef CONFIG_VMAP_STACK
+	tovirt(\dst, \src)
+#else
+	.ifnc	\dst, \src
+	mr	\dst, \src
+	.endif
+#endif
+.endm
+
+.macro tophys_novmstack dst, src
+#ifndef CONFIG_VMAP_STACK
+	tophys(\dst, \src)
+#else
+	.ifnc	\dst, \src
+	mr	\dst, \src
+	.endif
+#endif
+.endm
+
 /*
  * Note: code which follows this uses cr0.eq (set if from kernel),
  * r11, r12 (SRR0), and r9 (SRR1).
@@ -187,4 +305,28 @@
 	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
 			  ret_from_except)
 
+.macro vmap_stack_overflow_exception
+#ifdef CONFIG_VMAP_STACK
+#ifdef CONFIG_SMP
+	mfspr	r11, SPRN_SPRG_THREAD
+	tovirt(r11, r11)
+	lwz	r11, TASK_CPU - THREAD(r11)
+	slwi	r11, r11, 3
+	addis	r11, r11, emergency_ctx@ha
+#else
+	lis	r11, emergency_ctx@ha
+#endif
+	lwz	r11, emergency_ctx@l(r11)
+	cmpwi	cr1, r11, 0
+	bne	cr1, 1f
+	lis	r11, init_thread_union@ha
+	addi	r11, r11, init_thread_union@l
+1:	addi	r11, r11, THREAD_SIZE - INT_FRAME_SIZE
+	EXCEPTION_PROLOG_2
+	SAVE_NVGPRS(r11)
+	addi	r3, r1, STACK_FRAME_OVERHEAD
+	EXC_XFER_STD(0, stack_overflow_exception)
+#endif
+.endm
+
 #endif /* __HEAD_32_H__ */
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 585ea19..9bb6639 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -313,6 +313,7 @@
 	START_EXCEPTION(0x0400, InstructionAccess)
 	EXCEPTION_PROLOG
 	mr	r4,r12			/* Pass SRR0 as arg2 */
+	stw	r4, _DEAR(r11)
 	li	r5,0			/* Pass zero as arg3 */
 	EXC_XFER_LITE(0x400, handle_page_fault)
 
@@ -676,6 +677,7 @@
 	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
 	stw	r5,_ESR(r11)
 	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
+	stw	r4, _DEAR(r11)
 	EXC_XFER_LITE(0x300, handle_page_fault)
 
 /* Other PowerPC processors, namely those derived from the 6xx-series
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 19f583e..9922306 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -127,56 +127,36 @@
 /* Machine check */
 	. = 0x200
 MachineCheck:
-	EXCEPTION_PROLOG
-	mfspr r4,SPRN_DAR
-	stw r4,_DAR(r11)
-	li r5,RPN_PATTERN
-	mtspr SPRN_DAR,r5	/* Tag DAR, to be used in DTLB Error */
-	mfspr r5,SPRN_DSISR
-	stw r5,_DSISR(r11)
+	EXCEPTION_PROLOG handle_dar_dsisr=1
+	save_dar_dsisr_on_stack r4, r5, r11
+	li	r6, RPN_PATTERN
+	mtspr	SPRN_DAR, r6	/* Tag DAR, to be used in DTLB Error */
 	addi r3,r1,STACK_FRAME_OVERHEAD
 	EXC_XFER_STD(0x200, machine_check_exception)
 
-/* Data access exception.
- * This is "never generated" by the MPC8xx.
- */
-	. = 0x300
-DataAccess:
-
-/* Instruction access exception.
- * This is "never generated" by the MPC8xx.
- */
-	. = 0x400
-InstructionAccess:
-
 /* External interrupt */
 	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
 
 /* Alignment exception */
 	. = 0x600
 Alignment:
-	EXCEPTION_PROLOG
-	mfspr	r4,SPRN_DAR
-	stw	r4,_DAR(r11)
-	li	r5,RPN_PATTERN
-	mtspr	SPRN_DAR,r5	/* Tag DAR, to be used in DTLB Error */
-	mfspr	r5,SPRN_DSISR
-	stw	r5,_DSISR(r11)
+	EXCEPTION_PROLOG handle_dar_dsisr=1
+	save_dar_dsisr_on_stack r4, r5, r11
+	li	r6, RPN_PATTERN
+	mtspr	SPRN_DAR, r6	/* Tag DAR, to be used in DTLB Error */
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	EXC_XFER_STD(0x600, alignment_exception)
+	b	.Lalignment_exception_ool
 
 /* Program check exception */
 	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
 
-/* No FPU on MPC8xx.  This exception is not supposed to happen.
-*/
-	EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
-
 /* Decrementer */
 	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
 
-	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
-	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
+	/* With VMAP_STACK there's not enough room for this at 0x600 */
+	. = 0xa00
+.Lalignment_exception_ool:
+	EXC_XFER_STD(0x600, alignment_exception)
 
 /* System call */
 	. = 0xc00
@@ -185,25 +165,12 @@
 
 /* Single step - not used on 601 */
 	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
-	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
-	EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_STD)
 
 /* On the MPC8xx, this is a software emulation interrupt.  It occurs
  * for all unimplemented and illegal instructions.
  */
 	EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
 
-/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
-#ifdef CONFIG_PERF_EVENTS
-	patch_site	0f, patch__dtlbmiss_perf
-0:	lwz	r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
-	addi	r10, r10, 1
-	stw	r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
-	mfspr	r10, SPRN_SPRG_SCRATCH0
-	mfspr	r11, SPRN_SPRG_SCRATCH1
-	rfi
-#endif
-
 	. = 0x1100
 /*
  * For the MPC8xx, this is a software tablewalk to load the instruction
@@ -343,8 +310,8 @@
 
 	. = 0x1200
 DataStoreTLBMiss:
-	mtspr	SPRN_SPRG_SCRATCH0, r10
-	mtspr	SPRN_SPRG_SCRATCH1, r11
+	mtspr	SPRN_DAR, r10
+	mtspr	SPRN_M_TW, r11
 	mfcr	r11
 
 	/* If we are faulting a kernel address, we have to use the
@@ -409,10 +376,10 @@
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
-	mtspr	SPRN_DAR, r11	/* Tag DAR */
 
-0:	mfspr	r10, SPRN_SPRG_SCRATCH0
-	mfspr	r11, SPRN_SPRG_SCRATCH1
+0:	mfspr	r10, SPRN_DAR
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r11, SPRN_M_TW
 	rfi
 	patch_site	0b, patch__dtlbmiss_exit_1
 
@@ -428,10 +395,10 @@
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	li	r11, RPN_PATTERN
-	mtspr	SPRN_DAR, r11	/* Tag DAR */
 
-0:	mfspr	r10, SPRN_SPRG_SCRATCH0
-	mfspr	r11, SPRN_SPRG_SCRATCH1
+0:	mfspr	r10, SPRN_DAR
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r11, SPRN_M_TW
 	rfi
 	patch_site	0b, patch__dtlbmiss_exit_2
 
@@ -465,10 +432,10 @@
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	li	r11, RPN_PATTERN
-	mtspr	SPRN_DAR, r11	/* Tag DAR */
 
-0:	mfspr	r10, SPRN_SPRG_SCRATCH0
-	mfspr	r11, SPRN_SPRG_SCRATCH1
+0:	mfspr	r10, SPRN_DAR
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r11, SPRN_M_TW
 	rfi
 	patch_site	0b, patch__dtlbmiss_exit_3
 
@@ -486,6 +453,7 @@
 	tlbie	r4
 	/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
 .Litlbie:
+	stw	r4, _DAR(r11)
 	EXC_XFER_LITE(0x400, handle_page_fault)
 
 /* This is the data TLB error on the MPC8xx.  This could be due to
@@ -494,58 +462,69 @@
  */
 	. = 0x1400
 DataTLBError:
-	mtspr	SPRN_SPRG_SCRATCH0, r10
-	mtspr	SPRN_SPRG_SCRATCH1, r11
-	mfcr	r10
-
+	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
 	mfspr	r11, SPRN_DAR
-	cmpwi	cr0, r11, RPN_PATTERN
-	beq-	FixupDAR	/* must be a buggy dcbX, icbi insn. */
+	cmpwi	cr1, r11, RPN_PATTERN
+	beq-	cr1, FixupDAR	/* must be a buggy dcbX, icbi insn. */
 DARFixed:/* Return from dcbx instruction bug workaround */
+#ifdef CONFIG_VMAP_STACK
+	li	r11, RPN_PATTERN
+	mtspr	SPRN_DAR, r11	/* Tag DAR, to be used in DTLB Error */
+#endif
 	EXCEPTION_PROLOG_1
-	EXCEPTION_PROLOG_2
-	mfspr	r5,SPRN_DSISR
-	stw	r5,_DSISR(r11)
-	mfspr	r4,SPRN_DAR
+	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
+	get_and_save_dar_dsisr_on_stack r4, r5, r11
 	andis.	r10,r5,DSISR_NOHPTE@h
 	beq+	.Ldtlbie
 	tlbie	r4
 .Ldtlbie:
+#ifndef CONFIG_VMAP_STACK
 	li	r10,RPN_PATTERN
 	mtspr	SPRN_DAR,r10	/* Tag DAR, to be used in DTLB Error */
+#endif
 	/* 0x300 is DataAccess exception, needed by bad_page_fault() */
 	EXC_XFER_LITE(0x300, handle_page_fault)
 
-	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
-	EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD)
-	EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD)
-	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
-	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
-	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
-	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
+/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
+#ifdef CONFIG_PERF_EVENTS
+	patch_site	0f, patch__dtlbmiss_perf
+0:	lwz	r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+	addi	r10, r10, 1
+	stw	r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+	mfspr	r10, SPRN_DAR
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r11, SPRN_M_TW
+	rfi
+#endif
+
+stack_overflow:
+	vmap_stack_overflow_exception
 
 /* On the MPC8xx, these next four traps are used for development
  * support of breakpoints and such.  Someday I will get around to
  * using them.
  */
-	. = 0x1c00
-DataBreakpoint:
-	mtspr	SPRN_SPRG_SCRATCH0, r10
-	mtspr	SPRN_SPRG_SCRATCH1, r11
-	mfcr	r10
-	mfspr	r11, SPRN_SRR0
-	cmplwi	cr0, r11, (.Ldtlbie - PAGE_OFFSET)@l
-	cmplwi	cr7, r11, (.Litlbie - PAGE_OFFSET)@l
-	beq-	cr0, 11f
-	beq-	cr7, 11f
+do_databreakpoint:
 	EXCEPTION_PROLOG_1
-	EXCEPTION_PROLOG_2
+	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	mfspr	r4,SPRN_BAR
 	stw	r4,_DAR(r11)
+#ifdef CONFIG_VMAP_STACK
+	lwz	r5,_DSISR(r11)
+#else
 	mfspr	r5,SPRN_DSISR
+#endif
 	EXC_XFER_STD(0x1c00, do_break)
-11:
+
+	. = 0x1c00
+DataBreakpoint:
+	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
+	mfspr	r11, SPRN_SRR0
+	cmplwi	cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l
+	cmplwi	cr7, r11, (.Litlbie - PAGE_OFFSET)@l
+	cror	4*cr1+eq, 4*cr1+eq, 4*cr7+eq
+	bne	cr1, do_databreakpoint
 	mtcr	r10
 	mfspr	r10, SPRN_SPRG_SCRATCH0
 	mfspr	r11, SPRN_SPRG_SCRATCH1
@@ -581,9 +560,9 @@
 	mfspr	r10, SPRN_SRR0
 	mtspr	SPRN_MD_EPN, r10
 	rlwinm	r11, r10, 16, 0xfff8
-	cmpli	cr0, r11, PAGE_OFFSET@h
+	cmpli	cr1, r11, PAGE_OFFSET@h
 	mfspr	r11, SPRN_M_TWB	/* Get level 1 table */
-	blt+	3f
+	blt+	cr1, 3f
 	rlwinm	r11, r10, 16, 0xfff8
 
 0:	cmpli	cr7, r11, (PAGE_OFFSET + 0x1800000)@h
@@ -598,7 +577,7 @@
 3:
 	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11)	/* Get the level 1 entry */
 	mtspr	SPRN_MD_TWC, r11
-	mtcr	r11
+	mtcrf	0x01, r11
 	mfspr	r11, SPRN_MD_TWC
 	lwz	r11, 0(r11)	/* Get the pte */
 	bt	28,200f		/* bit 28 = Large page (8M) */
@@ -611,16 +590,16 @@
  * no need to include them here */
 	xoris	r10, r11, 0x7c00	/* check if major OP code is 31 */
 	rlwinm	r10, r10, 0, 21, 5
-	cmpwi	cr0, r10, 2028	/* Is dcbz? */
-	beq+	142f
-	cmpwi	cr0, r10, 940	/* Is dcbi? */
-	beq+	142f
-	cmpwi	cr0, r10, 108	/* Is dcbst? */
-	beq+	144f		/* Fix up store bit! */
-	cmpwi	cr0, r10, 172	/* Is dcbf? */
-	beq+	142f
-	cmpwi	cr0, r10, 1964	/* Is icbi? */
-	beq+	142f
+	cmpwi	cr1, r10, 2028	/* Is dcbz? */
+	beq+	cr1, 142f
+	cmpwi	cr1, r10, 940	/* Is dcbi? */
+	beq+	cr1, 142f
+	cmpwi	cr1, r10, 108	/* Is dcbst? */
+	beq+	cr1, 144f		/* Fix up store bit! */
+	cmpwi	cr1, r10, 172	/* Is dcbf? */
+	beq+	cr1, 142f
+	cmpwi	cr1, r10, 1964	/* Is icbi? */
+	beq+	cr1, 142f
 141:	mfspr	r10,SPRN_M_TW
 	b	DARFixed	/* Nope, go back to normal TLB processing */
 
@@ -679,8 +658,9 @@
 	add	r10, r10, r30	;b	151f
 	add	r10, r10, r31
 151:
-	rlwinm. r11,r11,19,24,28	/* offset into jump table for reg RA */
-	beq	152f			/* if reg RA is zero, don't add it */
+	rlwinm	r11,r11,19,24,28	/* offset into jump table for reg RA */
+	cmpwi	cr1, r11, 0
+	beq	cr1, 152f		/* if reg RA is zero, don't add it */
 	addi	r11, r11, 150b@l	/* add start of table */
 	mtctr	r11			/* load ctr with jump address */
 	rlwinm	r11,r11,0,16,10		/* make sure we don't execute this more than once */
@@ -688,7 +668,14 @@
 152:
 	mfdar	r11
 	mtctr	r11			/* restore ctr reg from DAR */
+#ifdef CONFIG_VMAP_STACK
+	mfspr	r11, SPRN_SPRG_THREAD
+	stw	r10, DAR(r11)
+	mfspr	r10, SPRN_DSISR
+	stw	r10, DSISR(r11)
+#else
 	mtdar	r10			/* save fault EA to DAR */
+#endif
 	mfspr	r10,SPRN_M_TW
 	b	DARFixed		/* Go back to normal TLB handling */
 
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 2ae635d..37fc84e 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -467,6 +467,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
 	mfspr	r5,SPRN_ESR;		/* Grab the ESR and save it */	      \
 	stw	r5,_ESR(r11);						      \
 	mfspr	r4,SPRN_DEAR;		/* Grab the DEAR */		      \
+	stw	r4, _DEAR(r11);						      \
 	EXC_XFER_LITE(0x0300, handle_page_fault)
 
 #define INSTRUCTION_STORAGE_EXCEPTION					      \
@@ -475,6 +476,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
 	mfspr	r5,SPRN_ESR;		/* Grab the ESR and save it */	      \
 	stw	r5,_ESR(r11);						      \
 	mr      r4,r12;                 /* Pass SRR0 as arg2 */		      \
+	stw	r4, _DEAR(r11);						      \
 	li      r5,0;                   /* Pass zero as arg3 */		      \
 	EXC_XFER_LITE(0x0400, handle_page_fault)
 
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 6f7a3a7..840af00 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -378,6 +378,7 @@
 	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
 	andis.	r10,r5,(ESR_ILK|ESR_DLK)@h
 	bne	1f
+	stw	r4, _DEAR(r11)
 	EXC_XFER_LITE(0x0300, handle_page_fault)
 1:
 	addi	r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 58ce3d3..2462cd7 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -160,6 +160,9 @@ static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
 		/* DAWR region can't cross 512 bytes boundary */
 		if ((start_addr >> 9) != (end_addr >> 9))
 			return -EINVAL;
+	} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
+		/* 8xx can setup a range without limitation */
+		max_len = U16_MAX;
 	}
 
 	if (hw_len > max_len)
@@ -328,13 +331,11 @@ int hw_breakpoint_handler(struct die_args *args)
 	}
 
 	info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
-	if (IS_ENABLED(CONFIG_PPC_8xx)) {
-		if (!dar_within_range(regs->dar, info))
-			info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
-	} else {
-		if (!stepping_handler(regs, bp, info))
-			goto out;
-	}
+	if (!dar_within_range(regs->dar, info))
+		info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+
+	if (!IS_ENABLED(CONFIG_PPC_8xx) && !stepping_handler(regs, bp, info))
+		goto out;
 
 	/*
 	 * As a policy, the callback is invoked in a 'trigger-after-execute'
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index a36fd05..422e31d2 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -77,6 +77,31 @@ void arch_cpu_idle(void)
 
 int powersave_nap;
 
+#ifdef CONFIG_PPC_970_NAP
+void power4_idle(void)
+{
+	if (!cpu_has_feature(CPU_FTR_CAN_NAP))
+		return;
+
+	if (!powersave_nap)
+		return;
+
+	if (!prep_irq_for_idle())
+		return;
+
+	if (cpu_has_feature(CPU_FTR_ALTIVEC))
+		asm volatile("DSSALL ; sync" ::: "memory");
+
+	power4_idle_nap();
+
+	/*
+	 * power4_idle_nap returns with interrupts enabled (soft and hard).
+	 * to our caller with interrupts enabled (soft and hard). Our caller
+	 * can cope with either interrupts disabled or enabled upon return.
+	 */
+}
+#endif
+
 #ifdef CONFIG_SYSCTL
 /*
  * Register the sysctl to set/clear powersave_nap.
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index d3275199..22f249b 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -15,7 +15,9 @@
 #include <asm/asm-offsets.h>
 #include <asm/ppc-opcode.h>
 #include <asm/cpuidle.h>
+#include <asm/thread_info.h> /* TLF_NAPPING */
 
+#ifdef CONFIG_PPC_P7_NAP
 /*
  * Desired PSSCR in r3
  *
@@ -181,4 +183,22 @@
 	bne	2f
 	IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
 2:	IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
+#endif
 
+#ifdef CONFIG_PPC_970_NAP
+_GLOBAL(power4_idle_nap)
+	LOAD_REG_IMMEDIATE(r7, MSR_KERNEL|MSR_EE|MSR_POW)
+	ld	r9,PACA_THREAD_INFO(r13)
+	ld	r8,TI_LOCAL_FLAGS(r9)
+	ori	r8,r8,_TLF_NAPPING
+	std	r8,TI_LOCAL_FLAGS(r9)
+	/*
+	 * NAPPING bit is set, from this point onward power4_fixup_nap
+	 * will cause exceptions to return to power4_idle_nap_return.
+	 */
+1:	sync
+	isync
+	mtmsrd	r7
+	isync
+	b	1b
+#endif
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
deleted file mode 100644
index 33c62532..0000000
--- a/arch/powerpc/kernel/idle_power4.S
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *  This file contains the power_save function for 970-family CPUs.
- */
-
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/irqflags.h>
-#include <asm/hw_irq.h>
-#include <asm/feature-fixups.h>
-
-#undef DEBUG
-
-	.text
-
-_GLOBAL(power4_idle)
-BEGIN_FTR_SECTION
-	blr
-END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
-	/* Now check if user or arch enabled NAP mode */
-	LOAD_REG_ADDRBASE(r3,powersave_nap)
-	lwz	r4,ADDROFF(powersave_nap)(r3)
-	cmpwi	0,r4,0
-	beqlr
-
-	/* This sequence is similar to prep_irq_for_idle() */
-
-	/* Hard disable interrupts */
-	mfmsr	r7
-	rldicl	r0,r7,48,1
-	rotldi	r0,r0,16
-	mtmsrd	r0,1
-
-	/* Check if something happened while soft-disabled */
-	lbz	r0,PACAIRQHAPPENED(r13)
-	cmpwi	cr0,r0,0
-	bne-	2f
-
-	/*
-	 * Soft-enable interrupts. This will make power4_fixup_nap return
-	 * to our caller with interrupts enabled (soft and hard). The caller
-	 * can cope with either interrupts disabled or enabled upon return.
-	 */
-#ifdef CONFIG_TRACE_IRQFLAGS
-	/* Tell the tracer interrupts are on, because idle responds to them. */
-	mflr	r0
-	std	r0,16(r1)
-	stdu    r1,-128(r1)
-	bl	trace_hardirqs_on
-	addi    r1,r1,128
-	ld	r0,16(r1)
-	mtlr	r0
-	mfmsr	r7
-#endif /* CONFIG_TRACE_IRQFLAGS */
-
-	li	r0,IRQS_ENABLED
-	stb	r0,PACAIRQSOFTMASK(r13)	/* we'll hard-enable shortly */
-BEGIN_FTR_SECTION
-	DSSALL
-	sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-	ld	r9, PACA_THREAD_INFO(r13)
-	ld	r8,TI_LOCAL_FLAGS(r9)	/* set napping bit */
-	ori	r8,r8,_TLF_NAPPING	/* so when we take an exception */
-	std	r8,TI_LOCAL_FLAGS(r9)	/* it will return to our caller */
-	ori	r7,r7,MSR_EE
-	oris	r7,r7,MSR_POW@h
-1:	sync
-	isync
-	mtmsrd	r7
-	isync
-	b	1b
-
-2:	/* Return if an interrupt had happened while soft disabled */
-	/* Set the HARD_DIS flag because interrupts are now hard disabled */
-	ori	r0,r0,PACA_IRQ_HARD_DIS
-	stb	r0,PACAIRQHAPPENED(r13)
-	blr
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index add6749..5c9b118 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -50,6 +50,7 @@
 #include <linux/debugfs.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/vmalloc.h>
 
 #include <linux/uaccess.h>
 #include <asm/io.h>
@@ -664,8 +665,29 @@ void do_IRQ(struct pt_regs *regs)
 	set_irq_regs(old_regs);
 }
 
+static void *__init alloc_vm_stack(void)
+{
+	return __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, VMALLOC_START,
+				    VMALLOC_END, THREADINFO_GFP, PAGE_KERNEL,
+				     0, NUMA_NO_NODE, (void*)_RET_IP_);
+}
+
+static void __init vmap_irqstack_init(void)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		softirq_ctx[i] = alloc_vm_stack();
+		hardirq_ctx[i] = alloc_vm_stack();
+	}
+}
+
+
 void __init init_IRQ(void)
 {
+	if (IS_ENABLED(CONFIG_VMAP_STACK))
+		vmap_irqstack_init();
+
 	if (ppc_md.init_IRQ)
 		ppc_md.init_IRQ();
 }
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 7cea597..f061e06 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -479,8 +479,10 @@ static void __init fixup_port_irq(int index,
 	port->irq = virq;
 
 #ifdef CONFIG_SERIAL_8250_FSL
-	if (of_device_is_compatible(np, "fsl,ns16550"))
+	if (of_device_is_compatible(np, "fsl,ns16550")) {
 		port->handle_irq = fsl8250_handle_irq;
+		port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+	}
 #endif
 }
 
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 1c448cf..c6c0341 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -261,12 +261,6 @@ int pcibios_sriov_disable(struct pci_dev *pdev)
 
 #endif /* CONFIG_PCI_IOV */
 
-void pcibios_bus_add_device(struct pci_dev *pdev)
-{
-	if (ppc_md.pcibios_bus_add_device)
-		ppc_md.pcibios_bus_add_device(pdev);
-}
-
 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
 {
 #ifdef CONFIG_PPC64
@@ -964,7 +958,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
 		phb->controller_ops.dma_bus_setup(bus);
 }
 
-static void pcibios_setup_device(struct pci_dev *dev)
+void pcibios_bus_add_device(struct pci_dev *dev)
 {
 	struct pci_controller *phb;
 	/* Fixup NUMA node as it may not be setup yet by the generic
@@ -985,17 +979,13 @@ static void pcibios_setup_device(struct pci_dev *dev)
 	pci_read_irq_line(dev);
 	if (ppc_md.pci_irq_fixup)
 		ppc_md.pci_irq_fixup(dev);
+
+	if (ppc_md.pcibios_bus_add_device)
+		ppc_md.pcibios_bus_add_device(dev);
 }
 
 int pcibios_add_device(struct pci_dev *dev)
 {
-	/*
-	 * We can only call pcibios_setup_device() after bus setup is complete,
-	 * since some of the platform specific DMA setup code depends on it.
-	 */
-	if (dev->bus->is_added)
-		pcibios_setup_device(dev);
-
 #ifdef CONFIG_PCI_IOV
 	if (ppc_md.pcibios_fixup_sriov)
 		ppc_md.pcibios_fixup_sriov(dev);
@@ -1004,24 +994,6 @@ int pcibios_add_device(struct pci_dev *dev)
 	return 0;
 }
 
-void pcibios_setup_bus_devices(struct pci_bus *bus)
-{
-	struct pci_dev *dev;
-
-	pr_debug("PCI: Fixup bus devices %d (%s)\n",
-		 bus->number, bus->self ? pci_name(bus->self) : "PHB");
-
-	list_for_each_entry(dev, &bus->devices, bus_list) {
-		/* Cardbus can call us to add new devices to a bus, so ignore
-		 * those who are already fully discovered
-		 */
-		if (pci_dev_is_added(dev))
-			continue;
-
-		pcibios_setup_device(dev);
-	}
-}
-
 void pcibios_set_master(struct pci_dev *dev)
 {
 	/* No special bus mastering setup handling */
@@ -1037,19 +1009,9 @@ void pcibios_fixup_bus(struct pci_bus *bus)
 
 	/* Now fixup the bus bus */
 	pcibios_setup_bus_self(bus);
-
-	/* Now fixup devices on that bus */
-	pcibios_setup_bus_devices(bus);
 }
 EXPORT_SYMBOL(pcibios_fixup_bus);
 
-void pci_fixup_cardbus(struct pci_bus *bus)
-{
-	/* Now fixup devices on that bus */
-	pcibios_setup_bus_devices(bus);
-}
-
-
 static int skip_isa_ioresource_align(struct pci_dev *dev)
 {
 	if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index fc62c4b..d6a67f8 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -134,7 +134,6 @@ void pci_hp_add_devices(struct pci_bus *bus)
 		 */
 		slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
 		pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
-		pcibios_setup_bus_devices(bus);
 		max = bus->busn_res.start;
 		/*
 		 * Scan bridges that are already configured. We don't touch
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 9524009..4e654df 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -125,7 +125,7 @@ struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_PCI_IOV
-static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
+static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent,
 					   int vf_index,
 					   int busno, int devfn)
 {
@@ -151,17 +151,15 @@ static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
 
 	return pdn;
 }
-#endif
 
-struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
+struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev)
 {
-#ifdef CONFIG_PCI_IOV
 	struct pci_dn *parent, *pdn;
 	int i;
 
 	/* Only support IOV for now */
-	if (!pdev->is_physfn)
-		return pci_get_pdn(pdev);
+	if (WARN_ON(!pdev->is_physfn))
+		return NULL;
 
 	/* Check if VFs have been populated */
 	pdn = pci_get_pdn(pdev);
@@ -176,7 +174,7 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
 	for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
 		struct eeh_dev *edev __maybe_unused;
 
-		pdn = add_one_dev_pci_data(parent, i,
+		pdn = add_one_sriov_vf_pdn(parent, i,
 					   pci_iov_virtfn_bus(pdev, i),
 					   pci_iov_virtfn_devfn(pdev, i));
 		if (!pdn) {
@@ -192,31 +190,17 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
 		edev->physfn = pdev;
 #endif /* CONFIG_EEH */
 	}
-#endif /* CONFIG_PCI_IOV */
-
 	return pci_get_pdn(pdev);
 }
 
-void remove_dev_pci_data(struct pci_dev *pdev)
+void remove_sriov_vf_pdns(struct pci_dev *pdev)
 {
-#ifdef CONFIG_PCI_IOV
 	struct pci_dn *parent;
 	struct pci_dn *pdn, *tmp;
 	int i;
 
-	/*
-	 * VF and VF PE are created/released dynamically, so we need to
-	 * bind/unbind them.  Otherwise the VF and VF PE would be mismatched
-	 * when re-enabling SR-IOV.
-	 */
-	if (pdev->is_virtfn) {
-		pdn = pci_get_pdn(pdev);
-		pdn->pe_number = IODA_INVALID_PE;
-		return;
-	}
-
 	/* Only support IOV PF for now */
-	if (!pdev->is_physfn)
+	if (WARN_ON(!pdev->is_physfn))
 		return;
 
 	/* Check if VFs have been populated */
@@ -244,9 +228,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
 				continue;
 
 #ifdef CONFIG_EEH
-			/* Release EEH device for the VF */
+			/*
+			 * Release EEH state for this VF. The PCI core
+			 * has already torn down the pci_dev for this VF, but
+			 * we're responsible to removing the eeh_dev since it
+			 * has the same lifetime as the pci_dn that spawned it.
+			 */
 			edev = pdn_to_eeh_dev(pdn);
 			if (edev) {
+				/*
+				 * We allocate pci_dn's for the totalvfs count,
+				 * but only only the vfs that were activated
+				 * have a configured PE.
+				 */
+				if (edev->pe)
+					eeh_rmv_from_parent_pe(edev);
+
 				pdn->edev = NULL;
 				kfree(edev);
 			}
@@ -258,8 +255,8 @@ void remove_dev_pci_data(struct pci_dev *pdev)
 			kfree(pdn);
 		}
 	}
-#endif /* CONFIG_PCI_IOV */
 }
+#endif /* CONFIG_PCI_IOV */
 
 struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
 					struct device_node *dn)
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index f91d7e9..c3024f1 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -414,7 +414,6 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
 	 */
 	if (!rescan_existing)
 		pcibios_setup_bus_self(bus);
-	pcibios_setup_bus_devices(bus);
 
 	/* Now scan child busses */
 	for_each_pci_bridge(dev, bus)
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index be3758d..8778174 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -39,10 +39,10 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
 	return 0;
 }
 
-static const struct file_operations page_map_fops = {
-	.llseek	= page_map_seek,
-	.read	= page_map_read,
-	.mmap	= page_map_mmap
+static const struct proc_ops page_map_proc_ops = {
+	.proc_lseek	= page_map_seek,
+	.proc_read	= page_map_read,
+	.proc_mmap	= page_map_mmap,
 };
 
 
@@ -51,7 +51,7 @@ static int __init proc_ppc64_init(void)
 	struct proc_dir_entry *pde;
 
 	pde = proc_create_data("powerpc/systemcfg", S_IFREG | 0444, NULL,
-			       &page_map_fops, vdso_data);
+			       &page_map_proc_ops, vdso_data);
 	if (!pde)
 		return 1;
 	proc_set_size(pde, PAGE_SIZE);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4df94b6..fad50db 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -740,28 +740,6 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
 		mtspr(SPRN_DABRX, dabrx);
 	return 0;
 }
-#elif defined(CONFIG_PPC_8xx)
-static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
-{
-	unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
-	unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
-	unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
-
-	if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
-		lctrl1 |= 0xa0000;
-	else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
-		lctrl1 |= 0xf0000;
-	else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
-		lctrl2 = 0;
-
-	mtspr(SPRN_LCTRL2, 0);
-	mtspr(SPRN_CMPE, addr);
-	mtspr(SPRN_CMPF, addr + 4);
-	mtspr(SPRN_LCTRL1, lctrl1);
-	mtspr(SPRN_LCTRL2, lctrl2);
-
-	return 0;
-}
 #else
 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
 {
@@ -782,6 +760,39 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk)
 	return __set_dabr(dabr, dabrx);
 }
 
+static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
+{
+	unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
+			       LCTRL1_CRWF_RW;
+	unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
+	unsigned long start_addr = brk->address & ~HW_BREAKPOINT_ALIGN;
+	unsigned long end_addr = (brk->address + brk->len - 1) | HW_BREAKPOINT_ALIGN;
+
+	if (start_addr == 0)
+		lctrl2 |= LCTRL2_LW0LA_F;
+	else if (end_addr == ~0U)
+		lctrl2 |= LCTRL2_LW0LA_E;
+	else
+		lctrl2 |= LCTRL2_LW0LA_EandF;
+
+	mtspr(SPRN_LCTRL2, 0);
+
+	if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
+		return 0;
+
+	if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
+		lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
+	if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
+		lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
+
+	mtspr(SPRN_CMPE, start_addr - 1);
+	mtspr(SPRN_CMPF, end_addr + 1);
+	mtspr(SPRN_LCTRL1, lctrl1);
+	mtspr(SPRN_LCTRL2, lctrl2);
+
+	return 0;
+}
+
 void __set_breakpoint(struct arch_hw_breakpoint *brk)
 {
 	memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
@@ -789,6 +800,8 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
 	if (dawr_enabled())
 		// Power8 or later
 		set_dawr(brk);
+	else if (IS_ENABLED(CONFIG_PPC_8xx))
+		set_breakpoint_8xx(brk);
 	else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
 		// Power7 or earlier
 		set_dabr(brk);
@@ -1264,16 +1277,6 @@ void show_user_instructions(struct pt_regs *regs)
 
 	pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
 
-	/*
-	 * Make sure the NIP points at userspace, not kernel text/data or
-	 * elsewhere.
-	 */
-	if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) {
-		pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
-			current->comm, current->pid);
-		return;
-	}
-
 	seq_buf_init(&s, buf, sizeof(buf));
 
 	while (n) {
@@ -1284,7 +1287,7 @@ void show_user_instructions(struct pt_regs *regs)
 		for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
 			int instr;
 
-			if (probe_kernel_address((const void *)pc, instr)) {
+			if (probe_user_read(&instr, (void __user *)pc, sizeof(instr))) {
 				seq_buf_printf(&s, "XXXXXXXX ");
 				continue;
 			}
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 487dcd8..2d33f34 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -159,12 +159,12 @@ static int poweron_open(struct inode *inode, struct file *file)
 	return single_open(file, ppc_rtas_poweron_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_poweron_operations = {
-	.open		= poweron_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.write		= ppc_rtas_poweron_write,
-	.release	= single_release,
+static const struct proc_ops ppc_rtas_poweron_proc_ops = {
+	.proc_open	= poweron_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= ppc_rtas_poweron_write,
+	.proc_release	= single_release,
 };
 
 static int progress_open(struct inode *inode, struct file *file)
@@ -172,12 +172,12 @@ static int progress_open(struct inode *inode, struct file *file)
 	return single_open(file, ppc_rtas_progress_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_progress_operations = {
-	.open		= progress_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.write		= ppc_rtas_progress_write,
-	.release	= single_release,
+static const struct proc_ops ppc_rtas_progress_proc_ops = {
+	.proc_open	= progress_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= ppc_rtas_progress_write,
+	.proc_release	= single_release,
 };
 
 static int clock_open(struct inode *inode, struct file *file)
@@ -185,12 +185,12 @@ static int clock_open(struct inode *inode, struct file *file)
 	return single_open(file, ppc_rtas_clock_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_clock_operations = {
-	.open		= clock_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.write		= ppc_rtas_clock_write,
-	.release	= single_release,
+static const struct proc_ops ppc_rtas_clock_proc_ops = {
+	.proc_open	= clock_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= ppc_rtas_clock_write,
+	.proc_release	= single_release,
 };
 
 static int tone_freq_open(struct inode *inode, struct file *file)
@@ -198,12 +198,12 @@ static int tone_freq_open(struct inode *inode, struct file *file)
 	return single_open(file, ppc_rtas_tone_freq_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_tone_freq_operations = {
-	.open		= tone_freq_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.write		= ppc_rtas_tone_freq_write,
-	.release	= single_release,
+static const struct proc_ops ppc_rtas_tone_freq_proc_ops = {
+	.proc_open	= tone_freq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= ppc_rtas_tone_freq_write,
+	.proc_release	= single_release,
 };
 
 static int tone_volume_open(struct inode *inode, struct file *file)
@@ -211,12 +211,12 @@ static int tone_volume_open(struct inode *inode, struct file *file)
 	return single_open(file, ppc_rtas_tone_volume_show, NULL);
 }
 
-static const struct file_operations ppc_rtas_tone_volume_operations = {
-	.open		= tone_volume_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.write		= ppc_rtas_tone_volume_write,
-	.release	= single_release,
+static const struct proc_ops ppc_rtas_tone_volume_proc_ops = {
+	.proc_open	= tone_volume_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= ppc_rtas_tone_volume_write,
+	.proc_release	= single_release,
 };
 
 static int ppc_rtas_find_all_sensors(void);
@@ -238,17 +238,17 @@ static int __init proc_rtas_init(void)
 		return -ENODEV;
 
 	proc_create("powerpc/rtas/progress", 0644, NULL,
-		    &ppc_rtas_progress_operations);
+		    &ppc_rtas_progress_proc_ops);
 	proc_create("powerpc/rtas/clock", 0644, NULL,
-		    &ppc_rtas_clock_operations);
+		    &ppc_rtas_clock_proc_ops);
 	proc_create("powerpc/rtas/poweron", 0644, NULL,
-		    &ppc_rtas_poweron_operations);
+		    &ppc_rtas_poweron_proc_ops);
 	proc_create_single("powerpc/rtas/sensors", 0444, NULL,
 			ppc_rtas_sensors_show);
 	proc_create("powerpc/rtas/frequency", 0644, NULL,
-		    &ppc_rtas_tone_freq_operations);
+		    &ppc_rtas_tone_freq_proc_ops);
 	proc_create("powerpc/rtas/volume", 0644, NULL,
-		    &ppc_rtas_tone_volume_operations);
+		    &ppc_rtas_tone_volume_proc_ops);
 	proc_create_single("powerpc/rtas/rmo_buffer", 0400, NULL,
 			ppc_rtas_rmo_buf_show);
 	return 0;
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 84f7947..a99179d 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -655,7 +655,7 @@ struct rtas_flash_file {
 	const char *filename;
 	const char *rtas_call_name;
 	int *status;
-	const struct file_operations fops;
+	const struct proc_ops ops;
 };
 
 static const struct rtas_flash_file rtas_flash_files[] = {
@@ -663,36 +663,36 @@ static const struct rtas_flash_file rtas_flash_files[] = {
 		.filename	= "powerpc/rtas/" FIRMWARE_FLASH_NAME,
 		.rtas_call_name	= "ibm,update-flash-64-and-reboot",
 		.status		= &rtas_update_flash_data.status,
-		.fops.read	= rtas_flash_read_msg,
-		.fops.write	= rtas_flash_write,
-		.fops.release	= rtas_flash_release,
-		.fops.llseek	= default_llseek,
+		.ops.proc_read	= rtas_flash_read_msg,
+		.ops.proc_write	= rtas_flash_write,
+		.ops.proc_release = rtas_flash_release,
+		.ops.proc_lseek	= default_llseek,
 	},
 	{
 		.filename	= "powerpc/rtas/" FIRMWARE_UPDATE_NAME,
 		.rtas_call_name	= "ibm,update-flash-64-and-reboot",
 		.status		= &rtas_update_flash_data.status,
-		.fops.read	= rtas_flash_read_num,
-		.fops.write	= rtas_flash_write,
-		.fops.release	= rtas_flash_release,
-		.fops.llseek	= default_llseek,
+		.ops.proc_read	= rtas_flash_read_num,
+		.ops.proc_write	= rtas_flash_write,
+		.ops.proc_release = rtas_flash_release,
+		.ops.proc_lseek	= default_llseek,
 	},
 	{
 		.filename	= "powerpc/rtas/" VALIDATE_FLASH_NAME,
 		.rtas_call_name	= "ibm,validate-flash-image",
 		.status		= &rtas_validate_flash_data.status,
-		.fops.read	= validate_flash_read,
-		.fops.write	= validate_flash_write,
-		.fops.release	= validate_flash_release,
-		.fops.llseek	= default_llseek,
+		.ops.proc_read	= validate_flash_read,
+		.ops.proc_write	= validate_flash_write,
+		.ops.proc_release = validate_flash_release,
+		.ops.proc_lseek	= default_llseek,
 	},
 	{
 		.filename	= "powerpc/rtas/" MANAGE_FLASH_NAME,
 		.rtas_call_name	= "ibm,manage-flash-image",
 		.status		= &rtas_manage_flash_data.status,
-		.fops.read	= manage_flash_read,
-		.fops.write	= manage_flash_write,
-		.fops.llseek	= default_llseek,
+		.ops.proc_read	= manage_flash_read,
+		.ops.proc_write	= manage_flash_write,
+		.ops.proc_lseek	= default_llseek,
 	}
 };
 
@@ -723,7 +723,7 @@ static int __init rtas_flash_init(void)
 		const struct rtas_flash_file *f = &rtas_flash_files[i];
 		int token;
 
-		if (!proc_create(f->filename, 0600, NULL, &f->fops))
+		if (!proc_create(f->filename, 0600, NULL, &f->ops))
 			goto enomem;
 
 		/*
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 8d02e047..89b798f 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -385,12 +385,12 @@ static __poll_t rtas_log_poll(struct file *file, poll_table * wait)
 	return 0;
 }
 
-static const struct file_operations proc_rtas_log_operations = {
-	.read =		rtas_log_read,
-	.poll =		rtas_log_poll,
-	.open =		rtas_log_open,
-	.release =	rtas_log_release,
-	.llseek =	noop_llseek,
+static const struct proc_ops rtas_log_proc_ops = {
+	.proc_read	= rtas_log_read,
+	.proc_poll	= rtas_log_poll,
+	.proc_open	= rtas_log_open,
+	.proc_release	= rtas_log_release,
+	.proc_lseek	= noop_llseek,
 };
 
 static int enable_surveillance(int timeout)
@@ -572,7 +572,7 @@ static int __init rtas_init(void)
 		return -ENODEV;
 
 	entry = proc_create("powerpc/rtas/error_log", 0400, NULL,
-			    &proc_rtas_log_operations);
+			    &rtas_log_proc_ops);
 	if (!entry)
 		printk(KERN_ERR "Failed to create error_log proc entry\n");
 
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 488f1ee..7f8c890 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -949,9 +949,6 @@ void __init setup_arch(char **cmdline_p)
 
 	early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
 
-	if (IS_ENABLED(CONFIG_DUMMY_CONSOLE))
-		conswitchp = &dummy_con;
-
 	if (ppc_md.setup_arch)
 		ppc_md.setup_arch();
 
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index c82577c..2dd0d9c 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -35,7 +35,7 @@ void exc_lvl_early_init(void);
 static inline void exc_lvl_early_init(void) { };
 #endif
 
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) || defined(CONFIG_VMAP_STACK)
 void emergency_stack_init(void);
 #else
 static inline void emergency_stack_init(void) { };
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index dcffe92..5b49b26 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -140,7 +140,7 @@ arch_initcall(ppc_init);
 
 static void *__init alloc_stack(void)
 {
-	void *ptr = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+	void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN);
 
 	if (!ptr)
 		panic("cannot allocate %d bytes for stack at %pS\n",
@@ -153,6 +153,9 @@ void __init irqstack_early_init(void)
 {
 	unsigned int i;
 
+	if (IS_ENABLED(CONFIG_VMAP_STACK))
+		return;
+
 	/* interrupt stacks must be in lowmem, we get that for free on ppc32
 	 * as the memblock is limited to lowmem by default */
 	for_each_possible_cpu(i) {
@@ -161,6 +164,18 @@ void __init irqstack_early_init(void)
 	}
 }
 
+#ifdef CONFIG_VMAP_STACK
+void *emergency_ctx[NR_CPUS] __ro_after_init;
+
+void __init emergency_stack_init(void)
+{
+	unsigned int i;
+
+	for_each_possible_cpu(i)
+		emergency_ctx[i] = alloc_stack();
+}
+#endif
+
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 void __init exc_lvl_early_init(void)
 {
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6104917..e05e6dd 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -633,7 +633,7 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
 
 	BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
 
-	ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE,
+	ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
 				     MEMBLOCK_LOW_LIMIT, limit,
 				     early_cpu_to_node(cpu));
 	if (!ptr)
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 43f736e..35b61bf 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -517,3 +517,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 435	nospu	clone3				ppc_clone3
+437	common	openat2				sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 014ff07..82a3438 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1637,6 +1637,15 @@ void StackOverflow(struct pt_regs *regs)
 	panic("kernel stack overflow");
 }
 
+void stack_overflow_exception(struct pt_regs *regs)
+{
+	enum ctx_state prev_state = exception_enter();
+
+	die("Kernel stack overflow", regs, SIGSEGV);
+
+	exception_exit(prev_state);
+}
+
 void kernel_fp_unavailable_exception(struct pt_regs *regs)
 {
 	enum ctx_state prev_state = exception_enter();
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index eae9dda..b9a1084 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -728,11 +728,6 @@ static int __init vdso_init(void)
 	 */
 	vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
 	DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages);
-#else
-	vdso_data->dcache_block_size = L1_CACHE_BYTES;
-	vdso_data->dcache_log_block_size = L1_CACHE_SHIFT;
-	vdso_data->icache_block_size = L1_CACHE_BYTES;
-	vdso_data->icache_log_block_size = L1_CACHE_SHIFT;
 #endif /* CONFIG_PPC64 */
 
 
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 06f54d9..e147bbd 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -2,9 +2,7 @@
 
 # List of files in the vdso, has to be asm only for now
 
-obj-vdso32-$(CONFIG_PPC64) = getcpu.o
-obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o \
-		$(obj-vdso32-y)
+obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o getcpu.o
 
 # Build rules
 
diff --git a/arch/powerpc/kernel/vdso32/cacheflush.S b/arch/powerpc/kernel/vdso32/cacheflush.S
index 7f882e7..3440ddf 100644
--- a/arch/powerpc/kernel/vdso32/cacheflush.S
+++ b/arch/powerpc/kernel/vdso32/cacheflush.S
@@ -8,7 +8,9 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
 #include <asm/asm-offsets.h>
+#include <asm/cache.h>
 
 	.text
 
@@ -22,42 +24,62 @@
  */
 V_FUNCTION_BEGIN(__kernel_sync_dicache)
   .cfi_startproc
+#ifdef CONFIG_PPC64
 	mflr	r12
   .cfi_register lr,r12
-	mr	r11,r3
-	bl	__get_datapage@local
+	get_datapage	r10, r0
 	mtlr	r12
-	mr	r10,r3
+#endif
 
+#ifdef CONFIG_PPC64
 	lwz	r7,CFG_DCACHE_BLOCKSZ(r10)
 	addi	r5,r7,-1
-	andc	r6,r11,r5		/* round low to line bdy */
+#else
+	li	r5, L1_CACHE_BYTES - 1
+#endif
+	andc	r6,r3,r5		/* round low to line bdy */
 	subf	r8,r6,r4		/* compute length */
 	add	r8,r8,r5		/* ensure we get enough */
+#ifdef CONFIG_PPC64
 	lwz	r9,CFG_DCACHE_LOGBLOCKSZ(r10)
 	srw.	r8,r8,r9		/* compute line count */
+#else
+	srwi.	r8, r8, L1_CACHE_SHIFT
+	mr	r7, r6
+#endif
 	crclr	cr0*4+so
 	beqlr				/* nothing to do? */
 	mtctr	r8
 1:	dcbst	0,r6
+#ifdef CONFIG_PPC64
 	add	r6,r6,r7
+#else
+	addi	r6, r6, L1_CACHE_BYTES
+#endif
 	bdnz	1b
 	sync
 
 /* Now invalidate the instruction cache */
 
+#ifdef CONFIG_PPC64
 	lwz	r7,CFG_ICACHE_BLOCKSZ(r10)
 	addi	r5,r7,-1
-	andc	r6,r11,r5		/* round low to line bdy */
+	andc	r6,r3,r5		/* round low to line bdy */
 	subf	r8,r6,r4		/* compute length */
 	add	r8,r8,r5
 	lwz	r9,CFG_ICACHE_LOGBLOCKSZ(r10)
 	srw.	r8,r8,r9		/* compute line count */
 	crclr	cr0*4+so
 	beqlr				/* nothing to do? */
+#endif
 	mtctr	r8
+#ifdef CONFIG_PPC64
 2:	icbi	0,r6
 	add	r6,r6,r7
+#else
+2:	icbi	0, r7
+	addi	r7, r7, L1_CACHE_BYTES
+#endif
 	bdnz	2b
 	isync
 	li	r3,0
diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S
index 6c7401b..217bb63 100644
--- a/arch/powerpc/kernel/vdso32/datapage.S
+++ b/arch/powerpc/kernel/vdso32/datapage.S
@@ -10,35 +10,13 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
 
 	.text
 	.global	__kernel_datapage_offset;
 __kernel_datapage_offset:
 	.long	0
 
-V_FUNCTION_BEGIN(__get_datapage)
-  .cfi_startproc
-	/* We don't want that exposed or overridable as we want other objects
-	 * to be able to bl directly to here
-	 */
-	.protected __get_datapage
-	.hidden __get_datapage
-
-	mflr	r0
-  .cfi_register lr,r0
-
-	bcl	20,31,data_page_branch
-data_page_branch:
-	mflr	r3
-	mtlr	r0
-	addi	r3, r3, __kernel_datapage_offset-data_page_branch
-	lwz	r0,0(r3)
-  .cfi_restore lr
-	add	r3,r0,r3
-	blr
-  .cfi_endproc
-V_FUNCTION_END(__get_datapage)
-
 /*
  * void *__kernel_get_syscall_map(unsigned int *syscall_count) ;
  *
@@ -52,11 +30,10 @@
   .cfi_startproc
 	mflr	r12
   .cfi_register lr,r12
-	mr	r4,r3
-	bl	__get_datapage@local
+	mr.	r4,r3
+	get_datapage	r3, r0
 	mtlr	r12
 	addi	r3,r3,CFG_SYSCALL_MAP32
-	cmpli	cr0,r4,0
 	beqlr
 	li	r0,NR_syscalls
 	stw	r0,0(r4)
@@ -75,7 +52,7 @@
   .cfi_startproc
 	mflr	r12
   .cfi_register lr,r12
-	bl	__get_datapage@local
+	get_datapage	r3, r0
 	lwz	r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
 	lwz	r3,CFG_TB_TICKS_PER_SEC(r3)
 	mtlr	r12
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S
index 63e9145..ff5e214 100644
--- a/arch/powerpc/kernel/vdso32/getcpu.S
+++ b/arch/powerpc/kernel/vdso32/getcpu.S
@@ -15,6 +15,7 @@
  * int __kernel_getcpu(unsigned *cpu, unsigned *node);
  *
  */
+#if defined(CONFIG_PPC64)
 V_FUNCTION_BEGIN(__kernel_getcpu)
   .cfi_startproc
 	mfspr	r5,SPRN_SPRG_VDSO_READ
@@ -24,10 +25,26 @@
 	rlwinm  r7,r5,16,31-15,31-0
 	beq	cr0,1f
 	stw	r6,0(r3)
-1:	beq	cr1,2f
-	stw	r7,0(r4)
-2:	crclr	cr0*4+so
+1:	crclr	cr0*4+so
 	li	r3,0			/* always success */
+	beqlr	cr1
+	stw	r7,0(r4)
 	blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_getcpu)
+#elif !defined(CONFIG_SMP)
+V_FUNCTION_BEGIN(__kernel_getcpu)
+  .cfi_startproc
+	cmpwi	cr0, r3, 0
+	cmpwi	cr1, r4, 0
+	li	r5, 0
+	beq	cr0, 1f
+	stw	r5, 0(r3)
+1:	li	r3, 0			/* always success */
+	crclr	cr0*4+so
+	beqlr	cr1
+	stw	r5, 0(r4)
+	blr
+  .cfi_endproc
+V_FUNCTION_END(__kernel_getcpu)
+#endif
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 3306672..a395156 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -9,6 +9,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 
@@ -31,28 +32,26 @@
 	mflr	r12
   .cfi_register lr,r12
 
-	mr	r10,r3			/* r10 saves tv */
+	mr.	r10,r3			/* r10 saves tv */
 	mr	r11,r4			/* r11 saves tz */
-	bl	__get_datapage@local	/* get data page */
-	mr	r9, r3			/* datapage ptr in r9 */
-	cmplwi	r10,0			/* check if tv is NULL */
+	get_datapage	r9, r0
 	beq	3f
-	lis	r7,1000000@ha		/* load up USEC_PER_SEC */
-	addi	r7,r7,1000000@l		/* so we get microseconds in r4 */
+	LOAD_REG_IMMEDIATE(r7, 1000000)	/* load up USEC_PER_SEC */
 	bl	__do_get_tspec@local	/* get sec/usec from tb & kernel */
 	stw	r3,TVAL32_TV_SEC(r10)
 	stw	r4,TVAL32_TV_USEC(r10)
 
 3:	cmplwi	r11,0			/* check if tz is NULL */
-	beq	1f
+	mtlr	r12
+	crclr	cr0*4+so
+	li	r3,0
+	beqlr
+
 	lwz	r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
 	lwz	r5,CFG_TZ_DSTTIME(r9)
 	stw	r4,TZONE_TZ_MINWEST(r11)
 	stw	r5,TZONE_TZ_DSTTIME(r11)
 
-1:	mtlr	r12
-	crclr	cr0*4+so
-	li	r3,0
 	blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_gettimeofday)
@@ -69,17 +68,23 @@
 	cmpli	cr0,r3,CLOCK_REALTIME
 	cmpli	cr1,r3,CLOCK_MONOTONIC
 	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
-	bne	cr0,99f
+
+	cmpli	cr5,r3,CLOCK_REALTIME_COARSE
+	cmpli	cr6,r3,CLOCK_MONOTONIC_COARSE
+	cror	cr5*4+eq,cr5*4+eq,cr6*4+eq
+
+	cror	cr0*4+eq,cr0*4+eq,cr5*4+eq
+	bne	cr0, .Lgettime_fallback
 
 	mflr	r12			/* r12 saves lr */
   .cfi_register lr,r12
 	mr	r11,r4			/* r11 saves tp */
-	bl	__get_datapage@local	/* get data page */
-	mr	r9,r3			/* datapage ptr in r9 */
-	lis	r7,NSEC_PER_SEC@h	/* want nanoseconds */
-	ori	r7,r7,NSEC_PER_SEC@l
-50:	bl	__do_get_tspec@local	/* get sec/nsec from tb & kernel */
-	bne	cr1,80f			/* not monotonic -> all done */
+	get_datapage	r9, r0
+	LOAD_REG_IMMEDIATE(r7, NSEC_PER_SEC)	/* load up NSEC_PER_SEC */
+	beq	cr5, .Lcoarse_clocks
+.Lprecise_clocks:
+	bl	__do_get_tspec@local	/* get sec/nsec from tb & kernel */
+	bne	cr1, .Lfinish		/* not monotonic -> all done */
 
 	/*
 	 * CLOCK_MONOTONIC
@@ -103,12 +108,53 @@
 	add	r9,r9,r0
 	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
         cmpl    cr0,r8,r0		/* check if updated */
-	bne-	50b
+	bne-	.Lprecise_clocks
+	b	.Lfinish_monotonic
+
+	/*
+	 * For coarse clocks we get data directly from the vdso data page, so
+	 * we don't need to call __do_get_tspec, but we still need to do the
+	 * counter trick.
+	 */
+.Lcoarse_clocks:
+	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
+	andi.	r0,r8,1                 /* pending update ? loop */
+	bne-	.Lcoarse_clocks
+	add	r9,r9,r0		/* r0 is already 0 */
+
+	/*
+	 * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
+	 * too
+	 */
+	lwz	r3,STAMP_XTIME_SEC+LOPART(r9)
+	lwz	r4,STAMP_XTIME_NSEC+LOPART(r9)
+	bne	cr6,1f
+
+	/* CLOCK_MONOTONIC_COARSE */
+	lwz	r5,(WTOM_CLOCK_SEC+LOPART)(r9)
+	lwz	r6,WTOM_CLOCK_NSEC(r9)
+
+	/* check if counter has updated */
+	or	r0,r6,r5
+1:	or	r0,r0,r3
+	or	r0,r0,r4
+	xor	r0,r0,r0
+	add	r3,r3,r0
+	lwz	r0,CFG_TB_UPDATE_COUNT+LOPART(r9)
+	cmpl	cr0,r0,r8               /* check if updated */
+	bne-	.Lcoarse_clocks
+
+	/* Counter has not updated, so continue calculating proper values for
+	 * sec and nsec if monotonic coarse, or just return with the proper
+	 * values for realtime.
+	 */
+	bne	cr6, .Lfinish
 
 	/* Calculate and store result. Note that this mimics the C code,
 	 * which may cause funny results if nsec goes negative... is that
 	 * possible at all ?
 	 */
+.Lfinish_monotonic:
 	add	r3,r3,r5
 	add	r4,r4,r6
 	cmpw	cr0,r4,r7
@@ -116,11 +162,12 @@
 	blt	1f
 	subf	r4,r7,r4
 	addi	r3,r3,1
-1:	bge	cr1,80f
+1:	bge	cr1, .Lfinish
 	addi	r3,r3,-1
 	add	r4,r4,r7
 
-80:	stw	r3,TSPC32_TV_SEC(r11)
+.Lfinish:
+	stw	r3,TSPC32_TV_SEC(r11)
 	stw	r4,TSPC32_TV_NSEC(r11)
 
 	mtlr	r12
@@ -131,7 +178,7 @@
 	/*
 	 * syscall fallback
 	 */
-99:
+.Lgettime_fallback:
 	li	r0,__NR_clock_gettime
   .cfi_restore lr
 	sc
@@ -149,17 +196,20 @@
 V_FUNCTION_BEGIN(__kernel_clock_getres)
   .cfi_startproc
 	/* Check for supported clock IDs */
-	cmpwi	cr0,r3,CLOCK_REALTIME
-	cmpwi	cr1,r3,CLOCK_MONOTONIC
-	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
-	bne	cr0,99f
+	cmplwi	cr0, r3, CLOCK_MAX
+	cmpwi	cr1, r3, CLOCK_REALTIME_COARSE
+	cmpwi	cr7, r3, CLOCK_MONOTONIC_COARSE
+	bgt	cr0, 99f
+	LOAD_REG_IMMEDIATE(r5, KTIME_LOW_RES)
+	beq	cr1, 1f
+	beq	cr7, 1f
 
 	mflr	r12
   .cfi_register lr,r12
-	bl	__get_datapage@local	/* get data page */
+	get_datapage	r3, r0
 	lwz	r5, CLOCK_HRTIMER_RES(r3)
 	mtlr	r12
-	li	r3,0
+1:	li	r3,0
 	cmpli	cr0,r4,0
 	crclr	cr0*4+so
 	beqlr
@@ -168,11 +218,11 @@
 	blr
 
 	/*
-	 * syscall fallback
+	 * invalid clock
 	 */
 99:
-	li	r0,__NR_clock_getres
-	sc
+	li	r3, EINVAL
+	crset	so
 	blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_clock_getres)
@@ -190,16 +240,15 @@
   .cfi_register lr,r12
 
 	mr	r11,r3			/* r11 holds t */
-	bl	__get_datapage@local
-	mr	r9, r3			/* datapage ptr in r9 */
+	get_datapage	r9, r0
 
 	lwz	r3,STAMP_XTIME_SEC+LOPART(r9)
 
 	cmplwi	r11,0			/* check if t is NULL */
-	beq	2f
-	stw	r3,0(r11)		/* store result at *t */
-2:	mtlr	r12
+	mtlr	r12
 	crclr	cr0*4+so
+	beqlr
+	stw	r3,0(r11)		/* store result at *t */
 	blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_time)
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 00c025b..5206c2e 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -155,7 +155,7 @@
 		__kernel_sync_dicache_p5;
 		__kernel_sigtramp32;
 		__kernel_sigtramp_rt32;
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) || !defined(CONFIG_SMP)
 		__kernel_getcpu;
 #endif
 
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 8eb867d..25c14a0 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -67,6 +67,9 @@
 #ifdef CONFIG_PPC32
 	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
 	oris	r9,r9,MSR_VEC@h
+#ifdef CONFIG_VMAP_STACK
+	tovirt(r5, r5)
+#endif
 #else
 	ld	r4,PACACURRENT(r13)
 	addi	r5,r4,THREAD		/* Get THREAD */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8834220..b4c89a1 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -323,7 +323,7 @@
 #endif
 
 	/* The initial task and kernel stack */
-	INIT_TASK_DATA_SECTION(THREAD_SIZE)
+	INIT_TASK_DATA_SECTION(THREAD_ALIGN)
 
 	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
 		PAGE_ALIGNED_DATA(PAGE_SIZE)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 58a59ee..d07a8e1 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -471,11 +471,6 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
 }
 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
 
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-	return 0;
-}
-
 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
 {
 	return 0;
@@ -789,9 +784,9 @@ void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
 	kvm_vcpu_kick(vcpu);
 }
 
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
 {
-	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
+	return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
 }
 
 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d381526..6c372f5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -284,7 +284,7 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
 	/* Protect linux PTE lookup from page table destruction */
 	rcu_read_lock_sched();	/* this disables preemption too */
 	ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
-				current->mm->pgd, false, pte_idx_ret);
+				kvm->mm->pgd, false, pte_idx_ret);
 	rcu_read_unlock_sched();
 	if (ret == H_TOO_HARD) {
 		/* this can't happen */
@@ -573,7 +573,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	is_ci = false;
 	pfn = 0;
 	page = NULL;
-	mm = current->mm;
+	mm = kvm->mm;
 	pte_size = PAGE_SIZE;
 	writing = (dsisr & DSISR_ISSTORE) != 0;
 	/* If writing != 0, then the HPTE must allow writing, if we get here */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index da857c8..803940d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -63,12 +63,10 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
 	}
 	isync();
 
-	pagefault_disable();
 	if (is_load)
-		ret = raw_copy_from_user(to, from, n);
+		ret = probe_user_read(to, (const void __user *)from, n);
 	else
-		ret = raw_copy_to_user(to, from, n);
-	pagefault_enable();
+		ret = probe_user_write((void __user *)to, from, n);
 
 	/* switch the pid first to avoid running host with unallocated pid */
 	if (quadrant == 1 && pid != old_pid)
@@ -1102,7 +1100,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
 	unsigned int shift;
 
 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
-		kvmppc_uvmem_drop_pages(memslot, kvm);
+		kvmppc_uvmem_drop_pages(memslot, kvm, true);
 
 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
 		return;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 883a66e..ee6c103 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -253,10 +253,11 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
 		}
 	}
 
+	account_locked_vm(kvm->mm,
+		kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
+
 	kvm_put_kvm(stt->kvm);
 
-	account_locked_vm(current->mm,
-		kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
 	call_rcu(&stt->rcu, release_spapr_tce_table);
 
 	return 0;
@@ -272,6 +273,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 {
 	struct kvmppc_spapr_tce_table *stt = NULL;
 	struct kvmppc_spapr_tce_table *siter;
+	struct mm_struct *mm = kvm->mm;
 	unsigned long npages, size = args->size;
 	int ret = -ENOMEM;
 
@@ -280,7 +282,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 		return -EINVAL;
 
 	npages = kvmppc_tce_pages(size);
-	ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
+	ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
 	if (ret)
 		return ret;
 
@@ -326,7 +328,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 
 	kfree(stt);
  fail_acct:
-	account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
+	account_locked_vm(mm, kvmppc_stt_pages(npages), false);
 	return ret;
 }
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6ff3f89..2cefd07 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1091,6 +1091,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 	case H_SVM_INIT_DONE:
 		ret = kvmppc_h_svm_init_done(vcpu->kvm);
 		break;
+	case H_SVM_INIT_ABORT:
+		ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+		break;
 
 	default:
 		return RESUME_HOST;
@@ -2271,22 +2274,16 @@ static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
 }
 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
 
-static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
-						   unsigned int id)
+static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
 {
-	struct kvm_vcpu *vcpu;
 	int err;
 	int core;
 	struct kvmppc_vcore *vcore;
+	struct kvm *kvm;
+	unsigned int id;
 
-	err = -ENOMEM;
-	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-	if (!vcpu)
-		goto out;
-
-	err = kvm_vcpu_init(vcpu, kvm, id);
-	if (err)
-		goto free_vcpu;
+	kvm = vcpu->kvm;
+	id = vcpu->vcpu_id;
 
 	vcpu->arch.shared = &vcpu->arch.shregs;
 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -2368,7 +2365,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
 	mutex_unlock(&kvm->lock);
 
 	if (!vcore)
-		goto free_vcpu;
+		return err;
 
 	spin_lock(&vcore->lock);
 	++vcore->num_threads;
@@ -2383,12 +2380,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
 
 	debugfs_vcpu_init(vcpu, id);
 
-	return vcpu;
-
-free_vcpu:
-	kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
-	return ERR_PTR(err);
+	return 0;
 }
 
 static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
@@ -2442,8 +2434,6 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
 	unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
 	unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
 	spin_unlock(&vcpu->arch.vpa_update_lock);
-	kvm_vcpu_uninit(vcpu);
-	kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
@@ -4285,7 +4275,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 	user_vrsave = mfspr(SPRN_VRSAVE);
 
 	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
-	vcpu->arch.pgdir = current->mm->pgd;
+	vcpu->arch.pgdir = kvm->mm->pgd;
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
 	do {
@@ -4640,14 +4630,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 
 	/* Look up the VMA for the start of this memory slot */
 	hva = memslot->userspace_addr;
-	down_read(&current->mm->mmap_sem);
-	vma = find_vma(current->mm, hva);
+	down_read(&kvm->mm->mmap_sem);
+	vma = find_vma(kvm->mm, hva);
 	if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
 		goto up_out;
 
 	psize = vma_kernel_pagesize(vma);
 
-	up_read(&current->mm->mmap_sem);
+	up_read(&kvm->mm->mmap_sem);
 
 	/* We can handle 4k, 64k or 16M pages in the VRMA */
 	if (psize >= 0x1000000)
@@ -4680,7 +4670,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 	return err;
 
  up_out:
-	up_read(&current->mm->mmap_sem);
+	up_read(&kvm->mm->mmap_sem);
 	goto out_srcu;
 }
 
@@ -5477,7 +5467,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
 			continue;
 
 		kvm_for_each_memslot(memslot, slots) {
-			kvmppc_uvmem_drop_pages(memslot, kvm);
+			kvmppc_uvmem_drop_pages(memslot, kvm, true);
 			uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
 		}
 	}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c6fbbd2..dbc2fec 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1801,6 +1801,7 @@
 	tlbsync
 	ptesync
 
+BEGIN_FTR_SECTION
 	/* Radix: Handle the case where the guest used an illegal PID */
 	LOAD_REG_ADDR(r4, mmu_base_pid)
 	lwz	r3, VCPU_GUEST_PID(r9)
@@ -1830,6 +1831,7 @@
 	addi	r7,r7,0x1000
 	bdnz	1b
 	ptesync
+END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
 
 2:
 #endif /* CONFIG_PPC_RADIX_MMU */
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 2de264f..79b1202 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -258,7 +258,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
  * QEMU page table with normal PTEs from newly allocated pages.
  */
 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
-			     struct kvm *kvm)
+			     struct kvm *kvm, bool skip_page_out)
 {
 	int i;
 	struct kvmppc_uvmem_page_pvt *pvt;
@@ -276,7 +276,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
 
 		uvmem_page = pfn_to_page(uvmem_pfn);
 		pvt = uvmem_page->zone_device_data;
-		pvt->skip_page_out = true;
+		pvt->skip_page_out = skip_page_out;
 		mutex_unlock(&kvm->arch.uvmem_lock);
 
 		pfn = gfn_to_pfn(kvm, gfn);
@@ -286,6 +286,34 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
 	}
 }
 
+unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
+{
+	int srcu_idx;
+	struct kvm_memory_slot *memslot;
+
+	/*
+	 * Expect to be called only after INIT_START and before INIT_DONE.
+	 * If INIT_DONE was completed, use normal VM termination sequence.
+	 */
+	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
+		return H_UNSUPPORTED;
+
+	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+		return H_STATE;
+
+	srcu_idx = srcu_read_lock(&kvm->srcu);
+
+	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
+		kvmppc_uvmem_drop_pages(memslot, kvm, false);
+
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+	kvm->arch.secure_guest = 0;
+	uv_svm_terminate(kvm->arch.lpid);
+
+	return H_PARAMETER;
+}
+
 /*
  * Get a free device PFN from the pool
  *
@@ -543,7 +571,7 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
 
 	ret = migrate_vma_setup(&mig);
 	if (ret)
-		return ret;
+		goto out;
 
 	spage = migrate_pfn_to_page(*mig.src);
 	if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ce4fcf7..729a0f1 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1744,21 +1744,17 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
 	return r;
 }
 
-static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
-						   unsigned int id)
+static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_book3s *vcpu_book3s;
-	struct kvm_vcpu *vcpu;
-	int err = -ENOMEM;
 	unsigned long p;
+	int err;
 
-	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-	if (!vcpu)
-		goto out;
+	err = -ENOMEM;
 
 	vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
 	if (!vcpu_book3s)
-		goto free_vcpu;
+		goto out;
 	vcpu->arch.book3s = vcpu_book3s;
 
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -1768,14 +1764,9 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
 		goto free_vcpu3s;
 #endif
 
-	err = kvm_vcpu_init(vcpu, kvm, id);
-	if (err)
-		goto free_shadow_vcpu;
-
-	err = -ENOMEM;
 	p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
 	if (!p)
-		goto uninit_vcpu;
+		goto free_shadow_vcpu;
 	vcpu->arch.shared = (void *)p;
 #ifdef CONFIG_PPC_BOOK3S_64
 	/* Always start the shared struct in native endian mode */
@@ -1806,22 +1797,20 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
 
 	err = kvmppc_mmu_init(vcpu);
 	if (err < 0)
-		goto uninit_vcpu;
+		goto free_shared_page;
 
-	return vcpu;
+	return 0;
 
-uninit_vcpu:
-	kvm_vcpu_uninit(vcpu);
+free_shared_page:
+	free_page((unsigned long)vcpu->arch.shared);
 free_shadow_vcpu:
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
 	kfree(vcpu->arch.shadow_vcpu);
 free_vcpu3s:
 #endif
 	vfree(vcpu_book3s);
-free_vcpu:
-	kmem_cache_free(kvm_vcpu_cache, vcpu);
 out:
-	return ERR_PTR(err);
+	return err;
 }
 
 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
@@ -1829,12 +1818,10 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 
 	free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
-	kvm_vcpu_uninit(vcpu);
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
 	kfree(vcpu->arch.shadow_vcpu);
 #endif
 	vfree(vcpu_book3s);
-	kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
@@ -2030,6 +2017,7 @@ static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
 {
 	/* We should not get called */
 	BUG();
+	return 0;
 }
 #endif /* CONFIG_PPC64 */
 
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 66858b7..85215e7 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -484,7 +484,7 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
 	kvmppc_xive_select_irq(state, &hw_num, &xd);
 
 	/*
-	 * See command in xive_lock_and_mask() concerning masking
+	 * See comment in xive_lock_and_mask() concerning masking
 	 * via firmware.
 	 */
 	if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index d83adb1..6ef0151 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -631,7 +631,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
 	srcu_idx = srcu_read_lock(&kvm->srcu);
 	gfn = gpa_to_gfn(kvm_eq.qaddr);
 
-	page_size = kvm_host_page_size(kvm, gfn);
+	page_size = kvm_host_page_size(vcpu, gfn);
 	if (1ull << kvm_eq.qshift > page_size) {
 		srcu_read_unlock(&kvm->srcu, srcu_idx);
 		pr_warn("Incompatible host page size %lx!\n", page_size);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index be9a458..7b27604 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -775,7 +775,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	debug = current->thread.debug;
 	current->thread.debug = vcpu->arch.dbg_reg;
 
-	vcpu->arch.pgdir = current->mm->pgd;
+	vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
 	kvmppc_fix_ee_before_entry();
 
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -1377,36 +1377,6 @@ static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
 	update_timer_ints(vcpu);
 }
 
-/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-	int i;
-	int r;
-
-	vcpu->arch.regs.nip = 0;
-	vcpu->arch.shared->pir = vcpu->vcpu_id;
-	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
-	kvmppc_set_msr(vcpu, 0);
-
-#ifndef CONFIG_KVM_BOOKE_HV
-	vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
-	vcpu->arch.shadow_pid = 1;
-	vcpu->arch.shared->msr = 0;
-#endif
-
-	/* Eye-catching numbers so we know if the guest takes an interrupt
-	 * before it's programmed its own IVPR/IVORs. */
-	vcpu->arch.ivpr = 0x55550000;
-	for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
-		vcpu->arch.ivor[i] = 0x7700 | i * 4;
-
-	kvmppc_init_timing_stats(vcpu);
-
-	r = kvmppc_core_vcpu_setup(vcpu);
-	kvmppc_sanity_check(vcpu);
-	return r;
-}
-
 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
 {
 	/* setup watchdog timer once */
@@ -2114,9 +2084,40 @@ int kvmppc_core_init_vm(struct kvm *kvm)
 	return kvm->arch.kvm_ops->init_vm(kvm);
 }
 
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
 {
-	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
+	int i;
+	int r;
+
+	r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
+	if (r)
+		return r;
+
+	/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
+	vcpu->arch.regs.nip = 0;
+	vcpu->arch.shared->pir = vcpu->vcpu_id;
+	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
+	kvmppc_set_msr(vcpu, 0);
+
+#ifndef CONFIG_KVM_BOOKE_HV
+	vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
+	vcpu->arch.shadow_pid = 1;
+	vcpu->arch.shared->msr = 0;
+#endif
+
+	/* Eye-catching numbers so we know if the guest takes an interrupt
+	 * before it's programmed its own IVPR/IVORs. */
+	vcpu->arch.ivpr = 0x55550000;
+	for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
+		vcpu->arch.ivor[i] = 0x7700 | i * 4;
+
+	kvmppc_init_timing_stats(vcpu);
+
+	r = kvmppc_core_vcpu_setup(vcpu);
+	if (r)
+		vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
+	kvmppc_sanity_check(vcpu);
+	return r;
 }
 
 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 00649ca..f2b4fea 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -433,31 +433,16 @@ static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
 	return r;
 }
 
-static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
-						     unsigned int id)
+static int kvmppc_core_vcpu_create_e500(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500;
-	struct kvm_vcpu *vcpu;
 	int err;
 
-	BUILD_BUG_ON_MSG(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0,
-		"struct kvm_vcpu must be at offset 0 for arch usercopy region");
+	BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
+	vcpu_e500 = to_e500(vcpu);
 
-	vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-	if (!vcpu_e500) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	vcpu = &vcpu_e500->vcpu;
-	err = kvm_vcpu_init(vcpu, kvm, id);
-	if (err)
-		goto free_vcpu;
-
-	if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) {
-		err = -ENOMEM;
-		goto uninit_vcpu;
-	}
+	if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+		return -ENOMEM;
 
 	err = kvmppc_e500_tlb_init(vcpu_e500);
 	if (err)
@@ -469,18 +454,13 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
 		goto uninit_tlb;
 	}
 
-	return vcpu;
+	return 0;
 
 uninit_tlb:
 	kvmppc_e500_tlb_uninit(vcpu_e500);
 uninit_id:
 	kvmppc_e500_id_table_free(vcpu_e500);
-uninit_vcpu:
-	kvm_vcpu_uninit(vcpu);
-free_vcpu:
-	kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
-out:
-	return ERR_PTR(err);
+	return err;
 }
 
 static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
@@ -490,8 +470,6 @@ static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
 	free_page((unsigned long)vcpu->arch.shared);
 	kvmppc_e500_tlb_uninit(vcpu_e500);
 	kvmppc_e500_id_table_free(vcpu_e500);
-	kvm_vcpu_uninit(vcpu);
-	kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 }
 
 static int kvmppc_core_init_vm_e500(struct kvm *kvm)
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 318e65c..e6b06cb2 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -301,30 +301,20 @@ static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
 	return r;
 }
 
-static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
-						       unsigned int id)
+static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500;
-	struct kvm_vcpu *vcpu;
 	int err;
 
-	vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-	if (!vcpu_e500) {
-		err = -ENOMEM;
-		goto out;
-	}
-	vcpu = &vcpu_e500->vcpu;
+	BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
+	vcpu_e500 = to_e500(vcpu);
 
 	/* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
 	vcpu->arch.oldpir = 0xffffffff;
 
-	err = kvm_vcpu_init(vcpu, kvm, id);
-	if (err)
-		goto free_vcpu;
-
 	err = kvmppc_e500_tlb_init(vcpu_e500);
 	if (err)
-		goto uninit_vcpu;
+		return err;
 
 	vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 	if (!vcpu->arch.shared) {
@@ -332,17 +322,11 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
 		goto uninit_tlb;
 	}
 
-	return vcpu;
+	return 0;
 
 uninit_tlb:
 	kvmppc_e500_tlb_uninit(vcpu_e500);
-uninit_vcpu:
-	kvm_vcpu_uninit(vcpu);
-
-free_vcpu:
-	kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
-out:
-	return ERR_PTR(err);
+	return err;
 }
 
 static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
@@ -351,8 +335,6 @@ static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
 
 	free_page((unsigned long)vcpu->arch.shared);
 	kvmppc_e500_tlb_uninit(vcpu_e500);
-	kvm_vcpu_uninit(vcpu);
-	kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 }
 
 static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 2e496eb..1139bc5 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -73,7 +73,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 {
 	struct kvm_run *run = vcpu->run;
 	u32 inst;
-	int ra, rs, rt;
 	enum emulation_result emulated = EMULATE_FAIL;
 	int advance = 1;
 	struct instruction_op op;
@@ -85,10 +84,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 	if (emulated != EMULATE_DONE)
 		return emulated;
 
-	ra = get_ra(inst);
-	rs = get_rs(inst);
-	rt = get_rt(inst);
-
 	vcpu->arch.mmio_vsx_copy_nums = 0;
 	vcpu->arch.mmio_vsx_offset = 0;
 	vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 416fb3d..1af96fb5 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -475,7 +475,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 #endif
 
 	kvm_for_each_vcpu(i, vcpu, kvm)
-		kvm_arch_vcpu_free(vcpu);
+		kvm_vcpu_destroy(vcpu);
 
 	mutex_lock(&kvm->lock);
 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
@@ -720,22 +720,55 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 	kvmppc_core_flush_memslot(kvm, slot);
 }
 
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+	return 0;
+}
+
+static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 {
 	struct kvm_vcpu *vcpu;
-	vcpu = kvmppc_core_vcpu_create(kvm, id);
-	if (!IS_ERR(vcpu)) {
-		vcpu->arch.wqp = &vcpu->wq;
-		kvmppc_create_vcpu_debugfs(vcpu, id);
-	}
-	return vcpu;
+
+	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
+	kvmppc_decrementer_func(vcpu);
+
+	return HRTIMER_NORESTART;
+}
+
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+{
+	int err;
+
+	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
+	vcpu->arch.dec_expires = get_tb();
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+	mutex_init(&vcpu->arch.exit_timing_lock);
+#endif
+	err = kvmppc_subarch_vcpu_init(vcpu);
+	if (err)
+		return err;
+
+	err = kvmppc_core_vcpu_create(vcpu);
+	if (err)
+		goto out_vcpu_uninit;
+
+	vcpu->arch.wqp = &vcpu->wq;
+	kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
+	return 0;
+
+out_vcpu_uninit:
+	kvmppc_mmu_destroy(vcpu);
+	kvmppc_subarch_vcpu_uninit(vcpu);
+	return err;
 }
 
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
 }
 
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
 	/* Make sure we're not using the vcpu anymore */
 	hrtimer_cancel(&vcpu->arch.dec_timer);
@@ -758,11 +791,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 	}
 
 	kvmppc_core_vcpu_free(vcpu);
-}
 
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-	kvm_arch_vcpu_free(vcpu);
+	kvmppc_mmu_destroy(vcpu);
+	kvmppc_subarch_vcpu_uninit(vcpu);
 }
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -770,37 +801,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 	return kvmppc_core_pending_dec(vcpu);
 }
 
-static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
-{
-	struct kvm_vcpu *vcpu;
-
-	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
-	kvmppc_decrementer_func(vcpu);
-
-	return HRTIMER_NORESTART;
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-	int ret;
-
-	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
-	vcpu->arch.dec_expires = get_tb();
-
-#ifdef CONFIG_KVM_EXIT_TIMING
-	mutex_init(&vcpu->arch.exit_timing_lock);
-#endif
-	ret = kvmppc_subarch_vcpu_init(vcpu);
-	return ret;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-	kvmppc_mmu_destroy(vcpu);
-	kvmppc_subarch_vcpu_uninit(vcpu);
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 #ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 8bbbd97..c11b0a0 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -25,6 +25,12 @@
 #include <asm/feature-fixups.h>
 #include <asm/code-patching-asm.h>
 
+#ifdef CONFIG_VMAP_STACK
+#define ADDR_OFFSET	0
+#else
+#define ADDR_OFFSET	PAGE_OFFSET
+#endif
+
 #ifdef CONFIG_SMP
 	.section .bss
 	.align	2
@@ -47,8 +53,8 @@
 	.text
 _GLOBAL(hash_page)
 #ifdef CONFIG_SMP
-	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@h
-	ori	r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
+	lis	r8, (mmu_hash_lock - ADDR_OFFSET)@h
+	ori	r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l
 	lis	r0,0x0fff
 	b	10f
 11:	lwz	r6,0(r8)
@@ -66,9 +72,12 @@
 	cmplw	0,r4,r0
 	ori	r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
 	mfspr	r5, SPRN_SPRG_PGDIR	/* phys page-table root */
+#ifdef CONFIG_VMAP_STACK
+	tovirt(r5, r5)
+#endif
 	blt+	112f			/* assume user more likely */
-	lis	r5, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
-	addi	r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
+	lis	r5, (swapper_pg_dir - ADDR_OFFSET)@ha	/* if kernel address, use */
+	addi	r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l	/* kernel page table */
 	rlwimi	r3,r9,32-12,29,29	/* MSR_PR -> _PAGE_USER */
 112:
 #ifndef CONFIG_PTE_64BIT
@@ -80,6 +89,9 @@
 	lwzx	r8,r8,r5		/* Get L1 entry */
 	rlwinm.	r8,r8,0,0,20		/* extract pt base address */
 #endif
+#ifdef CONFIG_VMAP_STACK
+	tovirt(r8, r8)
+#endif
 #ifdef CONFIG_SMP
 	beq-	hash_page_out		/* return if no mapping */
 #else
@@ -137,9 +149,9 @@
 
 #ifdef CONFIG_SMP
 	eieio
-	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@ha
+	lis	r8, (mmu_hash_lock - ADDR_OFFSET)@ha
 	li	r0,0
-	stw	r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
+	stw	r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
 #endif
 
 	/* Return from the exception */
@@ -152,9 +164,9 @@
 #ifdef CONFIG_SMP
 hash_page_out:
 	eieio
-	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@ha
+	lis	r8, (mmu_hash_lock - ADDR_OFFSET)@ha
 	li	r0,0
-	stw	r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
+	stw	r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
 	blr
 #endif /* CONFIG_SMP */
 
@@ -329,7 +341,7 @@
 	patch_site	1f, patch__hash_page_A1
 	patch_site	2f, patch__hash_page_A2
 	/* Get the address of the primary PTE group in the hash table (r3) */
-0:	lis	r0, (Hash_base - PAGE_OFFSET)@h	/* base address of hash table */
+0:	lis	r0, (Hash_base - ADDR_OFFSET)@h	/* base address of hash table */
 1:	rlwimi	r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
 2:	rlwinm	r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
 	xor	r3,r3,r0		/* make primary hash */
@@ -343,10 +355,10 @@
 	beq+	10f			/* no PTE: go look for an empty slot */
 	tlbie	r4
 
-	lis	r4, (htab_hash_searches - PAGE_OFFSET)@ha
-	lwz	r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
+	lis	r4, (htab_hash_searches - ADDR_OFFSET)@ha
+	lwz	r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
 	addi	r6,r6,1			/* count how many searches we do */
-	stw	r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
+	stw	r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
 
 	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
 	mtctr	r0
@@ -378,10 +390,10 @@
 	beq+	found_empty
 
 	/* update counter of times that the primary PTEG is full */
-	lis	r4, (primary_pteg_full - PAGE_OFFSET)@ha
-	lwz	r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+	lis	r4, (primary_pteg_full - ADDR_OFFSET)@ha
+	lwz	r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
 	addi	r6,r6,1
-	stw	r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+	stw	r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
 
 	patch_site	0f, patch__hash_page_C
 	/* Search the secondary PTEG for an empty slot */
@@ -415,8 +427,8 @@
 	 * lockup here but that shouldn't happen
 	 */
 
-1:	lis	r4, (next_slot - PAGE_OFFSET)@ha	/* get next evict slot */
-	lwz	r6, (next_slot - PAGE_OFFSET)@l(r4)
+1:	lis	r4, (next_slot - ADDR_OFFSET)@ha	/* get next evict slot */
+	lwz	r6, (next_slot - ADDR_OFFSET)@l(r4)
 	addi	r6,r6,HPTE_SIZE			/* search for candidate */
 	andi.	r6,r6,7*HPTE_SIZE
 	stw	r6,next_slot@l(r4)
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 69b2419a..0a1c65a 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -413,6 +413,7 @@ void __init MMU_init_hw(void)
 void __init MMU_init_hw_patch(void)
 {
 	unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
+	unsigned int hash;
 
 	if (ppc_md.progress)
 		ppc_md.progress("hash:patch", 0x345);
@@ -424,8 +425,12 @@ void __init MMU_init_hw_patch(void)
 	/*
 	 * Patch up the instructions in hashtable.S:create_hpte
 	 */
-	modify_instruction_site(&patch__hash_page_A0, 0xffff,
-				((unsigned int)Hash - PAGE_OFFSET) >> 16);
+	if (IS_ENABLED(CONFIG_VMAP_STACK))
+		hash = (unsigned int)Hash;
+	else
+		hash = (unsigned int)Hash - PAGE_OFFSET;
+
+	modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
 	modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
 	modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
 	modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index b30435c..523d4d3 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -652,6 +652,7 @@ static void init_hpte_page_sizes(void)
 
 static void __init htab_init_page_sizes(void)
 {
+	bool aligned = true;
 	init_hpte_page_sizes();
 
 	if (!debug_pagealloc_enabled()) {
@@ -659,7 +660,15 @@ static void __init htab_init_page_sizes(void)
 		 * Pick a size for the linear mapping. Currently, we only
 		 * support 16M, 1M and 4K which is the default
 		 */
-		if (mmu_psize_defs[MMU_PAGE_16M].shift)
+		if (IS_ENABLED(STRICT_KERNEL_RWX) &&
+		    (unsigned long)_stext % 0x1000000) {
+			if (mmu_psize_defs[MMU_PAGE_16M].shift)
+				pr_warn("Kernel not 16M aligned, "
+					"disabling 16M linear map alignment");
+			aligned = false;
+		}
+
+		if (mmu_psize_defs[MMU_PAGE_16M].shift && aligned)
 			mmu_linear_psize = MMU_PAGE_16M;
 		else if (mmu_psize_defs[MMU_PAGE_1M].shift)
 			mmu_linear_psize = MMU_PAGE_1M;
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index 56cc845..eba73eb 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -103,7 +103,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
 	for (entry = 0; entry < entries; entry += chunk) {
 		unsigned long n = min(entries - entry, chunk);
 
-		ret = get_user_pages(ua + (entry << PAGE_SHIFT), n,
+		ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n,
 				FOLL_WRITE | FOLL_LONGTERM,
 				mem->hpages + entry, NULL);
 		if (ret == n) {
@@ -167,9 +167,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
 	return 0;
 
 free_exit:
-	/* free the reference taken */
-	for (i = 0; i < pinned; i++)
-		put_page(mem->hpages[i]);
+	/* free the references taken */
+	unpin_user_pages(mem->hpages, pinned);
 
 	vfree(mem->hpas);
 	kfree(mem);
@@ -215,7 +214,8 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
 		if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
 			SetPageDirty(page);
 
-		put_page(page);
+		unpin_user_page(page);
+
 		mem->hpas[i] = 0;
 	}
 }
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 75483b4..2bf7e1b 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -378,7 +378,6 @@ static inline void pgtable_free(void *table, int index)
 	}
 }
 
-#ifdef CONFIG_SMP
 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
 {
 	unsigned long pgf = (unsigned long)table;
@@ -395,12 +394,6 @@ void __tlb_remove_table(void *_table)
 
 	return pgtable_free(table, index);
 }
-#else
-void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
-{
-	return pgtable_free(table, index);
-}
-#endif
 
 #ifdef CONFIG_PROC_FS
 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 974109b..dd1bea4 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -337,7 +337,11 @@ static void __init radix_init_pgtable(void)
 	}
 
 	/* Find out how many PID bits are supported */
-	if (cpu_has_feature(CPU_FTR_HVMODE)) {
+	if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
+		if (!mmu_pid_bits)
+			mmu_pid_bits = 20;
+		mmu_base_pid = 1;
+	} else if (cpu_has_feature(CPU_FTR_HVMODE)) {
 		if (!mmu_pid_bits)
 			mmu_pid_bits = 20;
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index a95175c..03f43c9 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -1161,6 +1161,9 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
 	if (unlikely(pid == MMU_NO_CONTEXT))
 		return;
 
+	if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+		return;
+
 	/*
 	 * If this context hasn't run on that CPU before and KVM is
 	 * around, there's a slim chance that the guest on another
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index b5047f9..8db05076 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -233,7 +233,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
 
 	// Read/write fault in a valid region (the exception table search passed
 	// above), but blocked by KUAP is bad, it can never succeed.
-	if (bad_kuap_fault(regs, is_write))
+	if (bad_kuap_fault(regs, address, is_write))
 		return true;
 
 	// What's left? Kernel fault on user in well defined regions (extable
@@ -279,12 +279,8 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
 		if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
 		    access_ok(nip, sizeof(*nip))) {
 			unsigned int inst;
-			int res;
 
-			pagefault_disable();
-			res = __get_user_inatomic(inst, nip);
-			pagefault_enable();
-			if (!res)
+			if (!probe_user_read(&inst, nip, sizeof(inst)))
 				return !store_updates_sp(inst);
 			*must_retry = true;
 		}
@@ -354,6 +350,9 @@ static void sanity_check_fault(bool is_write, bool is_user,
 	 * Userspace trying to access kernel address, we get PROTFAULT for that.
 	 */
 	if (is_user && address >= TASK_SIZE) {
+		if ((long)address == -1)
+			return;
+
 		pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
 				   current->comm, current->pid, address,
 				   from_kuid(&init_user_ns, current_uid()));
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 0e6ed44..16dd95b 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -12,7 +12,7 @@
 #include <asm/code-patching.h>
 #include <mm/mmu_decl.h>
 
-static pgprot_t kasan_prot_ro(void)
+static pgprot_t __init kasan_prot_ro(void)
 {
 	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
 		return PAGE_READONLY;
@@ -20,7 +20,7 @@ static pgprot_t kasan_prot_ro(void)
 	return PAGE_KERNEL_RO;
 }
 
-static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
+static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
 {
 	unsigned long va = (unsigned long)kasan_early_shadow_page;
 	phys_addr_t pa = __pa(kasan_early_shadow_page);
@@ -30,29 +30,25 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
 		__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
 }
 
-static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
 {
 	pmd_t *pmd;
 	unsigned long k_cur, k_next;
-	pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;
+	pte_t *new = NULL;
 
 	pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
 
 	for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
-		pte_t *new;
-
 		k_next = pgd_addr_end(k_cur, k_end);
 		if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
 			continue;
 
-		if (slab_is_available())
-			new = pte_alloc_one_kernel(&init_mm);
-		else
+		if (!new)
 			new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
 
 		if (!new)
 			return -ENOMEM;
-		kasan_populate_pte(new, prot);
+		kasan_populate_pte(new, PAGE_KERNEL);
 
 		smp_wmb(); /* See comment in __pte_alloc */
 
@@ -63,39 +59,27 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
 			new = NULL;
 		}
 		spin_unlock(&init_mm.page_table_lock);
-
-		if (new && slab_is_available())
-			pte_free_kernel(&init_mm, new);
 	}
 	return 0;
 }
 
-static void __ref *kasan_get_one_page(void)
-{
-	if (slab_is_available())
-		return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-
-	return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-}
-
-static int __ref kasan_init_region(void *start, size_t size)
+static int __init kasan_init_region(void *start, size_t size)
 {
 	unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
 	unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
 	unsigned long k_cur;
 	int ret;
-	void *block = NULL;
+	void *block;
 
 	ret = kasan_init_shadow_page_tables(k_start, k_end);
 	if (ret)
 		return ret;
 
-	if (!slab_is_available())
-		block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+	block = memblock_alloc(k_end - k_start, PAGE_SIZE);
 
 	for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
 		pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
-		void *va = block ? block + k_cur - k_start : kasan_get_one_page();
+		void *va = block + k_cur - k_start;
 		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
 
 		if (!va)
@@ -129,6 +113,31 @@ static void __init kasan_remap_early_shadow_ro(void)
 	flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
 }
 
+static void __init kasan_unmap_early_shadow_vmalloc(void)
+{
+	unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
+	unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
+	unsigned long k_cur;
+	phys_addr_t pa = __pa(kasan_early_shadow_page);
+
+	if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+		int ret = kasan_init_shadow_page_tables(k_start, k_end);
+
+		if (ret)
+			panic("kasan: kasan_init_shadow_page_tables() failed");
+	}
+	for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
+		pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+		pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+
+		if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
+			continue;
+
+		__set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0);
+	}
+	flush_tlb_kernel_range(k_start, k_end);
+}
+
 void __init kasan_mmu_init(void)
 {
 	int ret;
@@ -165,34 +174,22 @@ void __init kasan_init(void)
 	pr_info("KASAN init done\n");
 }
 
-#ifdef CONFIG_MODULES
-void *module_alloc(unsigned long size)
+void __init kasan_late_init(void)
 {
-	void *base;
-
-	base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
-				    GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
-				    NUMA_NO_NODE, __builtin_return_address(0));
-
-	if (!base)
-		return NULL;
-
-	if (!kasan_init_region(base, size))
-		return base;
-
-	vfree(base);
-
-	return NULL;
+	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+		kasan_unmap_early_shadow_vmalloc();
 }
-#endif
 
 #ifdef CONFIG_PPC_BOOK3S_32
 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
 
 static void __init kasan_early_hash_table(void)
 {
-	modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
-	modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
+	unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash :
+							    __pa(early_hash);
+
+	modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
+	modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
 
 	Hash = (struct hash_pte *)early_hash;
 }
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f5535ea..ef7b111 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -49,6 +49,7 @@
 #include <asm/fixmap.h>
 #include <asm/swiotlb.h>
 #include <asm/rtas.h>
+#include <asm/kasan.h>
 
 #include <mm/mmu_decl.h>
 
@@ -301,6 +302,9 @@ void __init mem_init(void)
 
 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 	set_max_mapnr(max_pfn);
+
+	kasan_late_init();
+
 	memblock_free_all();
 
 #ifdef CONFIG_HIGHMEM
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 8e99649c..7097e07 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -181,3 +181,9 @@ void mmu_mark_rodata_ro(void);
 static inline void mmu_mark_initmem_nx(void) { }
 static inline void mmu_mark_rodata_ro(void) { }
 #endif
+
+#ifdef CONFIG_PPC_DEBUG_WX
+void ptdump_check_wx(void);
+#else
+static inline void ptdump_check_wx(void) { }
+#endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 96eb8e4..3189308 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -21,33 +21,34 @@ extern int __map_without_ltlbs;
 static unsigned long block_mapped_ram;
 
 /*
- * Return PA for this VA if it is in an area mapped with LTLBs.
+ * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
  * Otherwise, returns 0
  */
 phys_addr_t v_block_mapped(unsigned long va)
 {
 	unsigned long p = PHYS_IMMR_BASE;
 
-	if (__map_without_ltlbs)
-		return 0;
 	if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
 		return p + va - VIRT_IMMR_BASE;
+	if (__map_without_ltlbs)
+		return 0;
 	if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
 		return __pa(va);
 	return 0;
 }
 
 /*
- * Return VA for a given PA mapped with LTLBs or 0 if not mapped
+ * Return VA for a given PA mapped with LTLBs or fixmap
+ * Return 0 if not mapped
  */
 unsigned long p_block_mapped(phys_addr_t pa)
 {
 	unsigned long p = PHYS_IMMR_BASE;
 
-	if (__map_without_ltlbs)
-		return 0;
 	if (pa >= p && pa < p + IMMR_SIZE)
 		return VIRT_IMMR_BASE + pa - p;
+	if (__map_without_ltlbs)
+		return 0;
 	if (pa < block_mapped_ram)
 		return (unsigned long)__va(pa);
 	return 0;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 50d68d2..3c7dec7 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1616,11 +1616,11 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
 	return count;
 }
 
-static const struct file_operations topology_ops = {
-	.read = seq_read,
-	.write = topology_write,
-	.open = topology_open,
-	.release = single_release
+static const struct proc_ops topology_proc_ops = {
+	.proc_read	= seq_read,
+	.proc_write	= topology_write,
+	.proc_open	= topology_open,
+	.proc_release	= single_release,
 };
 
 static int topology_update_init(void)
@@ -1630,7 +1630,7 @@ static int topology_update_init(void)
 	if (vphn_enabled)
 		topology_schedule_update();
 
-	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
+	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_proc_ops))
 		return -ENOMEM;
 
 	topology_inited = 1;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 73b8416..5fb90ed 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -218,6 +218,7 @@ void mark_rodata_ro(void)
 
 	if (v_block_mapped((unsigned long)_sinittext)) {
 		mmu_mark_rodata_ro();
+		ptdump_check_wx();
 		return;
 	}
 
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 2f9ddc2..2061562 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -24,6 +24,8 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 
+#include <mm/mmu_decl.h>
+
 #include "ptdump.h"
 
 /*
@@ -173,10 +175,12 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
 
 static void note_prot_wx(struct pg_state *st, unsigned long addr)
 {
+	pte_t pte = __pte(st->current_flags);
+
 	if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
 		return;
 
-	if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X)))
+	if (!pte_write(pte) || !pte_exec(pte))
 		return;
 
 	WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index 43245f4..6f347fa 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -9,7 +9,7 @@
 #include <linux/sched.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
-#include <asm/compat.h>
+#include <linux/compat.h>
 #include <asm/oprofile_impl.h>
 
 #define STACK_SP(STACK)		*(STACK)
@@ -28,15 +28,12 @@ static unsigned int user_getsp32(unsigned int sp, int is_first)
 	unsigned int stack_frame[2];
 	void __user *p = compat_ptr(sp);
 
-	if (!access_ok(p, sizeof(stack_frame)))
-		return 0;
-
 	/*
 	 * The most likely reason for this is that we returned -EFAULT,
 	 * which means that we've done all that we can do from
 	 * interrupt context.
 	 */
-	if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame)))
+	if (probe_user_read(stack_frame, (void __user *)p, sizeof(stack_frame)))
 		return 0;
 
 	if (!is_first)
@@ -54,11 +51,7 @@ static unsigned long user_getsp64(unsigned long sp, int is_first)
 {
 	unsigned long stack_frame[3];
 
-	if (!access_ok((void __user *)sp, sizeof(stack_frame)))
-		return 0;
-
-	if (__copy_from_user_inatomic(stack_frame, (void __user *)sp,
-					sizeof(stack_frame)))
+	if (probe_user_read(stack_frame, (void __user *)sp, sizeof(stack_frame)))
 		return 0;
 
 	if (!is_first)
@@ -103,7 +96,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
 			first_frame = 0;
 		}
 	} else {
-		pagefault_disable();
 #ifdef CONFIG_PPC64
 		if (!is_32bit_task()) {
 			while (depth--) {
@@ -112,7 +104,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
 					break;
 				first_frame = 0;
 			}
-			pagefault_enable();
 			return;
 		}
 #endif
@@ -123,6 +114,5 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
 				break;
 			first_frame = 0;
 		}
-		pagefault_enable();
 	}
 }
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 19124b0..1ad03c5 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -157,10 +157,6 @@ static void mpc8xx_pmu_read(struct perf_event *event)
 
 static void mpc8xx_pmu_del(struct perf_event *event, int flags)
 {
-	/* mfspr r10, SPRN_SPRG_SCRATCH0 */
-	unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
-			    __PPC_SPR(SPRN_SPRG_SCRATCH0);
-
 	mpc8xx_pmu_read(event);
 
 	/* If it was the last user, stop counting to avoid useles overhead */
@@ -173,6 +169,10 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
 		break;
 	case PERF_8xx_ID_ITLB_LOAD_MISS:
 		if (atomic_dec_return(&itlb_miss_ref) == 0) {
+			/* mfspr r10, SPRN_SPRG_SCRATCH0 */
+			unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
+					    __PPC_SPR(SPRN_SPRG_SCRATCH0);
+
 			patch_instruction_site(&patch__itlbmiss_exit_1, insn);
 #ifndef CONFIG_PIN_TLB_TEXT
 			patch_instruction_site(&patch__itlbmiss_exit_2, insn);
@@ -181,6 +181,10 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
 		break;
 	case PERF_8xx_ID_DTLB_LOAD_MISS:
 		if (atomic_dec_return(&dtlb_miss_ref) == 0) {
+			/* mfspr r10, SPRN_DAR */
+			unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
+					    __PPC_SPR(SPRN_DAR);
+
 			patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
 			patch_instruction_site(&patch__dtlbmiss_exit_2, insn);
 			patch_instruction_site(&patch__dtlbmiss_exit_3, insn);
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 35d5425..cbc2519 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -155,12 +155,8 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
 	    ((unsigned long)ptr & 7))
 		return -EFAULT;
 
-	pagefault_disable();
-	if (!__get_user_inatomic(*ret, ptr)) {
-		pagefault_enable();
+	if (!probe_user_read(ret, ptr, sizeof(*ret)))
 		return 0;
-	}
-	pagefault_enable();
 
 	return read_user_stack_slow(ptr, ret, 8);
 }
@@ -171,12 +167,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
 	    ((unsigned long)ptr & 3))
 		return -EFAULT;
 
-	pagefault_disable();
-	if (!__get_user_inatomic(*ret, ptr)) {
-		pagefault_enable();
+	if (!probe_user_read(ret, ptr, sizeof(*ret)))
 		return 0;
-	}
-	pagefault_enable();
 
 	return read_user_stack_slow(ptr, ret, 4);
 }
@@ -293,17 +285,11 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
  */
 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
 {
-	int rc;
-
 	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
 	    ((unsigned long)ptr & 3))
 		return -EFAULT;
 
-	pagefault_disable();
-	rc = __get_user_inatomic(*ret, ptr);
-	pagefault_enable();
-
-	return rc;
+	return probe_user_read(ret, ptr, sizeof(*ret));
 }
 
 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4860462..3086055 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -415,7 +415,6 @@ static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
 static __u64 power_pmu_bhrb_to(u64 addr)
 {
 	unsigned int instr;
-	int ret;
 	__u64 target;
 
 	if (is_kernel_addr(addr)) {
@@ -426,13 +425,8 @@ static __u64 power_pmu_bhrb_to(u64 addr)
 	}
 
 	/* Userspace: need copy instruction here then translate it */
-	pagefault_disable();
-	ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
-	if (ret) {
-		pagefault_enable();
+	if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr)))
 		return 0;
-	}
-	pagefault_enable();
 
 	target = branch_target(&instr);
 	if ((!target) || (instr & BRANCH_ABSOLUTE))
diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
index 13631f3..04bf6ec 100644
--- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
+++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
@@ -434,9 +434,9 @@ static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
 	memset(&lpbfifo, 0, sizeof(struct lpbfifo_data));
 	spin_lock_init(&lpbfifo.lock);
 
-	lpbfifo.chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
-	if (lpbfifo.chan == NULL)
-		return -EPROBE_DEFER;
+	lpbfifo.chan = dma_request_chan(&pdev->dev, "rx-tx");
+	if (IS_ERR(lpbfifo.chan))
+		return PTR_ERR(lpbfifo.chan);
 
 	if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) {
 		dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n");
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index 273145a..ada42f0 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -34,7 +34,6 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <soc/fsl/qe/qe.h>
-#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -64,7 +63,7 @@ static void quirk_mpc8360e_qe_enet10(void)
 		return;
 	}
 
-	base = ioremap(res.start, res.end - res.start + 1);
+	base = ioremap(res.start, resource_size(&res));
 
 	/*
 	 * set output delay adjustments to default values according
@@ -178,7 +177,7 @@ define_machine(mpc83xx_km) {
 	.name		= "mpc83xx-km-platform",
 	.probe		= mpc83xx_km_probe,
 	.setup_arch	= mpc83xx_km_setup_arch,
-	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_init_IRQ,
 	.get_irq	= ipic_get_irq,
 	.restart	= mpc83xx_restart,
 	.time_init	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 6399865..a952e91 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -14,7 +14,6 @@
 #include <asm/io.h>
 #include <asm/hw_irq.h>
 #include <asm/ipic.h>
-#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
@@ -91,28 +90,6 @@ void __init mpc83xx_ipic_init_IRQ(void)
 	ipic_set_default_priority();
 }
 
-#ifdef CONFIG_QUICC_ENGINE
-void __init mpc83xx_qe_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (!np) {
-		np = of_find_node_by_type(NULL, "qeic");
-		if (!np)
-			return;
-	}
-	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
-	of_node_put(np);
-}
-
-void __init mpc83xx_ipic_and_qe_init_IRQ(void)
-{
-	mpc83xx_ipic_init_IRQ();
-	mpc83xx_qe_init_IRQ();
-}
-#endif /* CONFIG_QUICC_ENGINE */
-
 static const struct of_device_id of_bus_ids[] __initconst = {
 	{ .type = "soc", },
 	{ .compatible = "soc", },
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index b428835..6fa5402e 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -33,7 +33,6 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <soc/fsl/qe/qe.h>
-#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -102,7 +101,7 @@ define_machine(mpc832x_mds) {
 	.name 		= "MPC832x MDS",
 	.probe 		= mpc832x_sys_probe,
 	.setup_arch 	= mpc832x_sys_setup_arch,
-	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_init_IRQ,
 	.get_irq 	= ipic_get_irq,
 	.restart 	= mpc83xx_restart,
 	.time_init 	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 4588ce6..622c625 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -22,7 +22,6 @@
 #include <asm/ipic.h>
 #include <asm/udbg.h>
 #include <soc/fsl/qe/qe.h>
-#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
@@ -220,7 +219,7 @@ define_machine(mpc832x_rdb) {
 	.name		= "MPC832x RDB",
 	.probe		= mpc832x_rdb_probe,
 	.setup_arch	= mpc832x_rdb_setup_arch,
-	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_init_IRQ,
 	.get_irq	= ipic_get_irq,
 	.restart	= mpc83xx_restart,
 	.time_init	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 240a26d..90d9cbf 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -40,7 +40,6 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <soc/fsl/qe/qe.h>
-#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -202,7 +201,7 @@ define_machine(mpc836x_mds) {
 	.name		= "MPC836x MDS",
 	.probe		= mpc836x_mds_probe,
 	.setup_arch	= mpc836x_mds_setup_arch,
-	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_init_IRQ,
 	.get_irq	= ipic_get_irq,
 	.restart	= mpc83xx_restart,
 	.time_init	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index 9923059..b4aac2c 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -17,7 +17,6 @@
 #include <asm/ipic.h>
 #include <asm/udbg.h>
 #include <soc/fsl/qe/qe.h>
-#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
@@ -42,7 +41,7 @@ define_machine(mpc836x_rdk) {
 	.name		= "MPC836x RDK",
 	.probe		= mpc836x_rdk_probe,
 	.setup_arch	= mpc836x_rdk_setup_arch,
-	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_init_IRQ,
 	.get_irq	= ipic_get_irq,
 	.restart	= mpc83xx_restart,
 	.time_init	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 4591456..f37d043 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -72,13 +72,6 @@ extern int mpc837x_usb_cfg(void);
 extern int mpc834x_usb_cfg(void);
 extern int mpc831x_usb_cfg(void);
 extern void mpc83xx_ipic_init_IRQ(void);
-#ifdef CONFIG_QUICC_ENGINE
-extern void mpc83xx_qe_init_IRQ(void);
-extern void mpc83xx_ipic_and_qe_init_IRQ(void);
-#else
-static inline void __init mpc83xx_qe_init_IRQ(void) {}
-#define mpc83xx_ipic_and_qe_init_IRQ mpc83xx_ipic_init_IRQ
-#endif /* CONFIG_QUICC_ENGINE */
 
 #ifdef CONFIG_PCI
 extern void mpc83xx_setup_pci(void);
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index a328a74..27ac38f 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -24,7 +24,6 @@
 #include <asm/mpic.h>
 #include <asm/ehv_pic.h>
 #include <asm/swiotlb.h>
-#include <soc/fsl/qe/qe_ic.h>
 
 #include <linux/of_platform.h>
 #include <sysdev/fsl_soc.h>
@@ -38,8 +37,6 @@ void __init corenet_gen_pic_init(void)
 	unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
 		MPIC_NO_RESET;
 
-	struct device_node *np;
-
 	if (ppc_md.get_irq == mpic_get_coreint_irq)
 		flags |= MPIC_ENABLE_COREINT;
 
@@ -47,13 +44,6 @@ void __init corenet_gen_pic_init(void)
 	BUG_ON(mpic == NULL);
 
 	mpic_init(mpic);
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (np) {
-		qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
-				qe_ic_cascade_high_mpic);
-		of_node_put(np);
-	}
 }
 
 /*
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 381a6ac..7759eca 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -44,7 +44,6 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <soc/fsl/qe/qe.h>
-#include <soc/fsl/qe/qe_ic.h>
 #include <asm/mpic.h>
 #include <asm/swiotlb.h>
 #include "smp.h"
@@ -268,33 +267,8 @@ static void __init mpc85xx_mds_qe_init(void)
 	}
 }
 
-static void __init mpc85xx_mds_qeic_init(void)
-{
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe");
-	if (!of_device_is_available(np)) {
-		of_node_put(np);
-		return;
-	}
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (!np) {
-		np = of_find_node_by_type(NULL, "qeic");
-		if (!np)
-			return;
-	}
-
-	if (machine_is(p1021_mds))
-		qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
-				qe_ic_cascade_high_mpic);
-	else
-		qe_ic_init(np, 0, qe_ic_cascade_muxed_mpic, NULL);
-	of_node_put(np);
-}
 #else
 static void __init mpc85xx_mds_qe_init(void) { }
-static void __init mpc85xx_mds_qeic_init(void) { }
 #endif	/* CONFIG_QUICC_ENGINE */
 
 static void __init mpc85xx_mds_setup_arch(void)
@@ -364,7 +338,6 @@ static void __init mpc85xx_mds_pic_init(void)
 	BUG_ON(mpic == NULL);
 
 	mpic_init(mpic);
-	mpc85xx_mds_qeic_init();
 }
 
 static int __init mpc85xx_mds_probe(void)
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 7f9a84f..80a8017 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -23,7 +23,6 @@
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <soc/fsl/qe/qe.h>
-#include <soc/fsl/qe/qe_ic.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
@@ -44,10 +43,6 @@ void __init mpc85xx_rdb_pic_init(void)
 {
 	struct mpic *mpic;
 
-#ifdef CONFIG_QUICC_ENGINE
-	struct device_node *np;
-#endif
-
 	if (of_machine_is_compatible("fsl,MPC85XXRDB-CAMP")) {
 		mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET |
 			MPIC_BIG_ENDIAN |
@@ -62,18 +57,6 @@ void __init mpc85xx_rdb_pic_init(void)
 
 	BUG_ON(mpic == NULL);
 	mpic_init(mpic);
-
-#ifdef CONFIG_QUICC_ENGINE
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (np) {
-		qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
-				qe_ic_cascade_high_mpic);
-		of_node_put(np);
-
-	} else
-		pr_err("%s: Could not find qe-ic node\n", __func__);
-#endif
-
 }
 
 /*
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 8c7ea24..48f7d96 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -252,6 +252,15 @@ static int smp_85xx_start_cpu(int cpu)
 	out_be64((u64 *)(&spin_table->addr_h),
 		__pa(ppc_function_entry(generic_secondary_smp_init)));
 #else
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+	/*
+	 * We need also to write addr_h to spin table for systems
+	 * in which their physical memory start address was configured
+	 * to above 4G, otherwise the secondary core can not get
+	 * correct entry to start from.
+	 */
+	out_be32(&spin_table->addr_h, __pa(__early_start) >> 32);
+#endif
 	out_be32(&spin_table->addr_l, __pa(__early_start));
 #endif
 	flush_spin_table(spin_table);
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index 6c3c0cd..eaec099 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -19,7 +19,6 @@
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <soc/fsl/qe/qe.h>
-#include <soc/fsl/qe/qe_ic.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
@@ -31,26 +30,12 @@ static void __init twr_p1025_pic_init(void)
 {
 	struct mpic *mpic;
 
-#ifdef CONFIG_QUICC_ENGINE
-	struct device_node *np;
-#endif
-
 	mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
 			MPIC_SINGLE_DEST_CPU,
 			0, 256, " OpenPIC  ");
 
 	BUG_ON(mpic == NULL);
 	mpic_init(mpic);
-
-#ifdef CONFIG_QUICC_ENGINE
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (np) {
-		qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
-				qe_ic_cascade_high_mpic);
-		of_node_put(np);
-	} else
-		pr_err("Could not find qe-ic node\n");
-#endif
 }
 
 /* ************************************************************************
@@ -60,10 +45,6 @@ static void __init twr_p1025_pic_init(void)
  */
 static void __init twr_p1025_setup_arch(void)
 {
-#ifdef CONFIG_QUICC_ENGINE
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("twr_p1025_setup_arch()", 0);
 
@@ -77,6 +58,7 @@ static void __init twr_p1025_setup_arch(void)
 #if IS_ENABLED(CONFIG_UCC_GETH) || IS_ENABLED(CONFIG_SERIAL_QE)
 	if (machine_is(twr_p1025)) {
 		struct ccsr_guts __iomem *guts;
+		struct device_node *np;
 
 		np = of_find_compatible_node(NULL, NULL, "fsl,p1021-guts");
 		if (np) {
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e28df29..1f80253 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -177,6 +177,10 @@
 config PPC_P7_NAP
 	bool
 
+config PPC_BOOK3S_IDLE
+	def_bool y
+	depends on (PPC_970_NAP || PPC_P7_NAP)
+
 config PPC_INDIRECT_PIO
 	bool
 	select GENERIC_IOMAP
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 8d7f9c3..6caedc8 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,4 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
+config PPC32
+	bool
+	default y if !PPC64
+	select KASAN_VMALLOC if KASAN && MODULES
+
 config PPC64
 	bool "64-bit kernel"
 	select ZLIB_DEFLATE
@@ -31,12 +36,14 @@
 	select PPC_HAVE_PMU_SUPPORT
 	select PPC_HAVE_KUEP
 	select PPC_HAVE_KUAP
+	select HAVE_ARCH_VMAP_STACK
 
 config PPC_BOOK3S_601
 	bool "PowerPC 601"
 	select PPC_BOOK3S_32
 	select PPC_FPU
 	select PPC_HAVE_KUAP
+	select HAVE_ARCH_VMAP_STACK
 
 config PPC_85xx
 	bool "Freescale 85xx"
@@ -49,6 +56,7 @@
 	select PPC_HAVE_KUEP
 	select PPC_HAVE_KUAP
 	select PPC_MM_SLICES if HUGETLB_PAGE
+	select HAVE_ARCH_VMAP_STACK
 
 config 40x
 	bool "AMCC 40x"
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 9680d76..855eedb 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -240,9 +240,6 @@ static void __init cell_setup_arch(void)
 	init_pci_config_tokens();
 
 	cbe_pervasive_init();
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
-#endif
 
 	mmio_nvram_init();
 }
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 9b1586b..2539056 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -583,7 +583,7 @@ enum {
 	Opt_uid, Opt_gid, Opt_mode, Opt_debug,
 };
 
-static const struct fs_parameter_spec spufs_param_specs[] = {
+static const struct fs_parameter_spec spufs_fs_parameters[] = {
 	fsparam_u32	("gid",				Opt_gid),
 	fsparam_u32oct	("mode",			Opt_mode),
 	fsparam_u32	("uid",				Opt_uid),
@@ -591,11 +591,6 @@ static const struct fs_parameter_spec spufs_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description spufs_fs_parameters = {
-	.name		= "spufs",
-	.specs		= spufs_param_specs,
-};
-
 static int spufs_show_options(struct seq_file *m, struct dentry *root)
 {
 	struct spufs_sb_info *sbi = spufs_get_sb_info(root->d_sb);
@@ -623,7 +618,7 @@ static int spufs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	kgid_t gid;
 	int opt;
 
-	opt = fs_parse(fc, &spufs_fs_parameters, param, &result);
+	opt = fs_parse(fc, spufs_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -774,7 +769,7 @@ static struct file_system_type spufs_type = {
 	.owner = THIS_MODULE,
 	.name = "spufs",
 	.init_fs_context = spufs_init_fs_context,
-	.parameters	= &spufs_fs_parameters,
+	.parameters	= spufs_fs_parameters,
 	.kill_sb = kill_litter_super,
 };
 MODULE_ALIAS_FS("spufs");
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 9cd6f3e..6f019df 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -183,9 +183,6 @@ static void __init maple_setup_arch(void)
 	/* Lookup PCI hosts */
        	maple_pci_init();
 
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
-#endif
 	maple_use_rtas_reboot_and_halt_if_present();
 
 	printk(KERN_DEBUG "Using native/NAP idle loop\n");
@@ -232,7 +229,7 @@ static void __init maple_init_IRQ(void)
 	root = of_find_node_by_path("/");
 	naddr = of_n_addr_cells(root);
 	opprop = of_get_property(root, "platform-open-pic", &opplen);
-	if (opprop != 0) {
+	if (opprop) {
 		openpic_addr = of_read_number(opprop, naddr);
 		has_isus = (opplen > naddr);
 		printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n",
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 05a52f1..b612474 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -147,10 +147,6 @@ static void __init pas_setup_arch(void)
 	/* Lookup PCI hosts */
 	pas_pci_init();
 
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
-#endif
-
 	/* Remap SDC register for doing reset */
 	/* XXXOJN This should maybe come out of the device tree */
 	reset_reg = ioremap(0xfc101100, 4);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index a6ee080..2b3dfd0 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -790,42 +790,6 @@ static int opal_sysfs_init(void)
 	return 0;
 }
 
-static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
-			       struct bin_attribute *bin_attr,
-			       char *buf, loff_t off, size_t count)
-{
-	return memory_read_from_buffer(buf, count, &off, bin_attr->private,
-				       bin_attr->size);
-}
-
-static struct bin_attribute symbol_map_attr = {
-	.attr = {.name = "symbol_map", .mode = 0400},
-	.read = symbol_map_read
-};
-
-static void opal_export_symmap(void)
-{
-	const __be64 *syms;
-	unsigned int size;
-	struct device_node *fw;
-	int rc;
-
-	fw = of_find_node_by_path("/ibm,opal/firmware");
-	if (!fw)
-		return;
-	syms = of_get_property(fw, "symbol-map", &size);
-	if (!syms || size != 2 * sizeof(__be64))
-		return;
-
-	/* Setup attributes */
-	symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
-	symbol_map_attr.size = be64_to_cpu(syms[1]);
-
-	rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
-	if (rc)
-		pr_warn("Error %d creating OPAL symbols file\n", rc);
-}
-
 static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
 				struct bin_attribute *bin_attr, char *buf,
 				loff_t off, size_t count)
@@ -834,6 +798,75 @@ static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
 				       bin_attr->size);
 }
 
+static int opal_add_one_export(struct kobject *parent, const char *export_name,
+			       struct device_node *np, const char *prop_name)
+{
+	struct bin_attribute *attr = NULL;
+	const char *name = NULL;
+	u64 vals[2];
+	int rc;
+
+	rc = of_property_read_u64_array(np, prop_name, &vals[0], 2);
+	if (rc)
+		goto out;
+
+	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+	name = kstrdup(export_name, GFP_KERNEL);
+	if (!name) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	sysfs_bin_attr_init(attr);
+	attr->attr.name = name;
+	attr->attr.mode = 0400;
+	attr->read = export_attr_read;
+	attr->private = __va(vals[0]);
+	attr->size = vals[1];
+
+	rc = sysfs_create_bin_file(parent, attr);
+out:
+	if (rc) {
+		kfree(name);
+		kfree(attr);
+	}
+
+	return rc;
+}
+
+static void opal_add_exported_attrs(struct device_node *np,
+				    struct kobject *kobj)
+{
+	struct device_node *child;
+	struct property *prop;
+
+	for_each_property_of_node(np, prop) {
+		int rc;
+
+		if (!strcmp(prop->name, "name") ||
+		    !strcmp(prop->name, "phandle"))
+			continue;
+
+		rc = opal_add_one_export(kobj, prop->name, np, prop->name);
+		if (rc) {
+			pr_warn("Unable to add export %pOF/%s, rc = %d!\n",
+				np, prop->name, rc);
+		}
+	}
+
+	for_each_child_of_node(np, child) {
+		struct kobject *child_kobj;
+
+		child_kobj = kobject_create_and_add(child->name, kobj);
+		if (!child_kobj) {
+			pr_err("Unable to create export dir for %pOF\n", child);
+			continue;
+		}
+
+		opal_add_exported_attrs(child, child_kobj);
+	}
+}
+
 /*
  * opal_export_attrs: creates a sysfs node for each property listed in
  * the device-tree under /ibm,opal/firmware/exports/
@@ -843,11 +876,8 @@ static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
  */
 static void opal_export_attrs(void)
 {
-	struct bin_attribute *attr;
 	struct device_node *np;
-	struct property *prop;
 	struct kobject *kobj;
-	u64 vals[2];
 	int rc;
 
 	np = of_find_node_by_path("/ibm,opal/firmware/exports");
@@ -861,41 +891,16 @@ static void opal_export_attrs(void)
 		return;
 	}
 
-	for_each_property_of_node(np, prop) {
-		if (!strcmp(prop->name, "name") || !strcmp(prop->name, "phandle"))
-			continue;
+	opal_add_exported_attrs(np, kobj);
 
-		if (of_property_read_u64_array(np, prop->name, &vals[0], 2))
-			continue;
-
-		attr = kzalloc(sizeof(*attr), GFP_KERNEL);
-
-		if (attr == NULL) {
-			pr_warn("Failed kmalloc for bin_attribute!");
-			continue;
-		}
-
-		sysfs_bin_attr_init(attr);
-		attr->attr.name = kstrdup(prop->name, GFP_KERNEL);
-		attr->attr.mode = 0400;
-		attr->read = export_attr_read;
-		attr->private = __va(vals[0]);
-		attr->size = vals[1];
-
-		if (attr->attr.name == NULL) {
-			pr_warn("Failed kstrdup for bin_attribute attr.name");
-			kfree(attr);
-			continue;
-		}
-
-		rc = sysfs_create_bin_file(kobj, attr);
-		if (rc) {
-			pr_warn("Error %d creating OPAL sysfs exports/%s file\n",
-				 rc, prop->name);
-			kfree(attr->attr.name);
-			kfree(attr);
-		}
-	}
+	/*
+	 * NB: symbol_map existed before the generic export interface so it
+	 * lives under the top level opal_kobj.
+	 */
+	rc = opal_add_one_export(opal_kobj, "symbol_map",
+				 np->parent, "symbol-map");
+	if (rc)
+		pr_warn("Error %d creating OPAL symbols file\n", rc);
 
 	of_node_put(np);
 }
@@ -1042,8 +1047,6 @@ static int __init opal_init(void)
 	/* Create "opal" kobject under /sys/firmware */
 	rc = opal_sysfs_init();
 	if (rc == 0) {
-		/* Export symbol map to userspace */
-		opal_export_symmap();
 		/* Setup dump region interface */
 		opal_dump_region_init();
 		/* Setup error log interface */
@@ -1056,11 +1059,10 @@ static int __init opal_init(void)
 		opal_sys_param_init();
 		/* Setup message log sysfs interface. */
 		opal_msglog_sysfs_init();
+		/* Add all export properties*/
+		opal_export_attrs();
 	}
 
-	/* Export all properties */
-	opal_export_attrs();
-
 	/* Initialize platform devices: IPMI backend, PRD & flash interface */
 	opal_pdev_init("ibm,opal-ipmi");
 	opal_pdev_init("ibm,opal-flash");
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index da1068a..22c22cd 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -188,7 +188,7 @@ static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
 	unsigned int pe_num = pe->pe_number;
 
 	WARN_ON(pe->pdev);
-	WARN_ON(pe->npucomp); /* NPUs are not supposed to be freed */
+	WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */
 	kfree(pe->npucomp);
 	memset(pe, 0, sizeof(struct pnv_ioda_pe));
 	clear_bit(pe_num, phb->ioda.pe_alloc);
@@ -777,6 +777,34 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
 	return 0;
 }
 
+static void pnv_ioda_unset_peltv(struct pnv_phb *phb,
+				 struct pnv_ioda_pe *pe,
+				 struct pci_dev *parent)
+{
+	int64_t rc;
+
+	while (parent) {
+		struct pci_dn *pdn = pci_get_pdn(parent);
+
+		if (pdn && pdn->pe_number != IODA_INVALID_PE) {
+			rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
+						pe->pe_number,
+						OPAL_REMOVE_PE_FROM_DOMAIN);
+			/* XXX What to do in case of error ? */
+		}
+		parent = parent->bus->self;
+	}
+
+	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
+				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+
+	/* Disassociate PE in PELT */
+	rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
+				pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
+	if (rc)
+		pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
+}
+
 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
 {
 	struct pci_dev *parent;
@@ -792,7 +820,7 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
 		fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
 		parent = pe->pbus->self;
 		if (pe->flags & PNV_IODA_PE_BUS_ALL)
-			count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+			count = resource_size(&pe->pbus->busn_res);
 		else
 			count = 1;
 
@@ -827,25 +855,13 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
 	for (rid = pe->rid; rid < rid_end; rid++)
 		phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
 
-	/* Release from all parents PELT-V */
-	while (parent) {
-		struct pci_dn *pdn = pci_get_pdn(parent);
-		if (pdn && pdn->pe_number != IODA_INVALID_PE) {
-			rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
-						pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
-			/* XXX What to do in case of error ? */
-		}
-		parent = parent->bus->self;
-	}
+	/*
+	 * Release from all parents PELT-V. NPUs don't have a PELTV
+	 * table
+	 */
+	if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
+		pnv_ioda_unset_peltv(phb, pe, parent);
 
-	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
-				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
-
-	/* Disassociate PE in PELT */
-	rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
-				pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
-	if (rc)
-		pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
 	rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
 			     bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
 	if (rc)
@@ -874,7 +890,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
 		fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
 		parent = pe->pbus->self;
 		if (pe->flags & PNV_IODA_PE_BUS_ALL)
-			count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+			count = resource_size(&pe->pbus->busn_res);
 		else
 			count = 1;
 
@@ -1062,20 +1078,20 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
 		return NULL;
 	}
 
-	/* NOTE: We get only one ref to the pci_dev for the pdn, not for the
-	 * pointer in the PE data structure, both should be destroyed at the
-	 * same time. However, this needs to be looked at more closely again
-	 * once we actually start removing things (Hotplug, SR-IOV, ...)
+	/* NOTE: We don't get a reference for the pointer in the PE
+	 * data structure, both the device and PE structures should be
+	 * destroyed at the same time. However, removing nvlink
+	 * devices will need some work.
 	 *
 	 * At some point we want to remove the PDN completely anyways
 	 */
-	pci_dev_get(dev);
 	pdn->pe_number = pe->pe_number;
 	pe->flags = PNV_IODA_PE_DEV;
 	pe->pdev = dev;
 	pe->pbus = NULL;
 	pe->mve_number = -1;
 	pe->rid = dev->bus->number << 8 | pdn->devfn;
+	pe->device_count++;
 
 	pe_info(pe, "Associated device to PE\n");
 
@@ -1084,13 +1100,13 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
 		pnv_ioda_free_pe(pe);
 		pdn->pe_number = IODA_INVALID_PE;
 		pe->pdev = NULL;
-		pci_dev_put(dev);
 		return NULL;
 	}
 
 	/* Put PE to the list */
+	mutex_lock(&phb->ioda.pe_list_mutex);
 	list_add_tail(&pe->list, &phb->ioda.pe_list);
-
+	mutex_unlock(&phb->ioda.pe_list_mutex);
 	return pe;
 }
 
@@ -1206,6 +1222,14 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
 	struct pnv_phb *phb = hose->private_data;
 
 	/*
+	 * Intentionally leak a reference on the npu device (for
+	 * nvlink only; this is not an opencapi path) to make sure it
+	 * never goes away, as it's been the case all along and some
+	 * work is needed otherwise.
+	 */
+	pci_dev_get(npu_pdev);
+
+	/*
 	 * Due to a hardware errata PE#0 on the NPU is reserved for
 	 * error handling. This means we only have three PEs remaining
 	 * which need to be assigned to four links, implying some
@@ -1228,11 +1252,11 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
 			 */
 			dev_info(&npu_pdev->dev,
 				"Associating to existing PE %x\n", pe_num);
-			pci_dev_get(npu_pdev);
 			npu_pdn = pci_get_pdn(npu_pdev);
 			rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
 			npu_pdn->pe_number = pe_num;
 			phb->ioda.pe_rmap[rid] = pe->pe_number;
+			pe->device_count++;
 
 			/* Map the PE to this link */
 			rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
@@ -1268,8 +1292,6 @@ static void pnv_pci_ioda_setup_PEs(void)
 {
 	struct pci_controller *hose;
 	struct pnv_phb *phb;
-	struct pci_bus *bus;
-	struct pci_dev *pdev;
 	struct pnv_ioda_pe *pe;
 
 	list_for_each_entry(hose, &hose_list, list_node) {
@@ -1281,11 +1303,6 @@ static void pnv_pci_ioda_setup_PEs(void)
 			if (phb->model == PNV_PHB_MODEL_NPU2)
 				WARN_ON_ONCE(pnv_npu2_init(hose));
 		}
-		if (phb->type == PNV_PHB_NPU_OCAPI) {
-			bus = hose->bus;
-			list_for_each_entry(pdev, &bus->devices, bus_list)
-				pnv_ioda_setup_dev_PE(pdev);
-		}
 	}
 	list_for_each_entry(hose, &hose_list, list_node) {
 		phb = hose->private_data;
@@ -1558,6 +1575,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
 
 	/* Reserve PE for each VF */
 	for (vf_index = 0; vf_index < num_vfs; vf_index++) {
+		int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
+		int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
+		struct pci_dn *vf_pdn;
+
 		if (pdn->m64_single_mode)
 			pe_num = pdn->pe_num_map[vf_index];
 		else
@@ -1570,13 +1591,11 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
 		pe->pbus = NULL;
 		pe->parent_dev = pdev;
 		pe->mve_number = -1;
-		pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
-			   pci_iov_virtfn_devfn(pdev, vf_index);
+		pe->rid = (vf_bus << 8) | vf_devfn;
 
 		pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
 			hose->global_number, pdev->bus->number,
-			PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
-			PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
+			PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
 
 		if (pnv_ioda_configure_pe(phb, pe)) {
 			/* XXX What do we do here ? */
@@ -1590,6 +1609,15 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
 		list_add_tail(&pe->list, &phb->ioda.pe_list);
 		mutex_unlock(&phb->ioda.pe_list_mutex);
 
+		/* associate this pe to it's pdn */
+		list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
+			if (vf_pdn->busno == vf_bus &&
+			    vf_pdn->devfn == vf_devfn) {
+				vf_pdn->pe_number = pe_num;
+				break;
+			}
+		}
+
 		pnv_pci_ioda2_setup_dma_pe(phb, pe);
 #ifdef CONFIG_IOMMU_API
 		iommu_register_group(&pe->table_group,
@@ -1719,21 +1747,23 @@ int pnv_pcibios_sriov_disable(struct pci_dev *pdev)
 	pnv_pci_sriov_disable(pdev);
 
 	/* Release PCI data */
-	remove_dev_pci_data(pdev);
+	remove_sriov_vf_pdns(pdev);
 	return 0;
 }
 
 int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
 {
 	/* Allocate PCI data */
-	add_dev_pci_data(pdev);
+	add_sriov_vf_pdns(pdev);
 
 	return pnv_pci_sriov_enable(pdev, num_vfs);
 }
 #endif /* CONFIG_PCI_IOV */
 
-static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
+static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)
 {
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct pci_dn *pdn = pci_get_pdn(pdev);
 	struct pnv_ioda_pe *pe;
 
@@ -2889,9 +2919,6 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
 	struct pci_dn *pdn;
 	int mul, total_vfs;
 
-	if (!pdev->is_physfn || pci_dev_is_added(pdev))
-		return;
-
 	pdn = pci_get_pdn(pdev);
 	pdn->vfs_expanded = 0;
 	pdn->m64_single_mode = false;
@@ -2966,6 +2993,30 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
 		res->end = res->start - 1;
 	}
 }
+
+static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
+{
+	if (WARN_ON(pci_dev_is_added(pdev)))
+		return;
+
+	if (pdev->is_virtfn) {
+		struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
+
+		/*
+		 * VF PEs are single-device PEs so their pdev pointer needs to
+		 * be set. The pdev doesn't exist when the PE is allocated (in
+		 * (pcibios_sriov_enable()) so we fix it up here.
+		 */
+		pe->pdev = pdev;
+		WARN_ON(!(pe->flags & PNV_IODA_PE_VF));
+	} else if (pdev->is_physfn) {
+		/*
+		 * For PFs adjust their allocated IOV resources to match what
+		 * the PHB can support using it's M64 BAR table.
+		 */
+		pnv_pci_ioda_fixup_iov_resources(pdev);
+	}
+}
 #endif /* CONFIG_PCI_IOV */
 
 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
@@ -3062,19 +3113,9 @@ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
 #ifdef CONFIG_DEBUG_FS
 static int pnv_pci_diag_data_set(void *data, u64 val)
 {
-	struct pci_controller *hose;
-	struct pnv_phb *phb;
+	struct pnv_phb *phb = data;
 	s64 ret;
 
-	if (val != 1ULL)
-		return -EINVAL;
-
-	hose = (struct pci_controller *)data;
-	if (!hose || !hose->private_data)
-		return -ENODEV;
-
-	phb = hose->private_data;
-
 	/* Retrieve the diag data from firmware */
 	ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
 					  phb->diag_data_size);
@@ -3089,6 +3130,33 @@ static int pnv_pci_diag_data_set(void *data, u64 val)
 DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set,
 			 "%llu\n");
 
+static int pnv_pci_ioda_pe_dump(void *data, u64 val)
+{
+	struct pnv_phb *phb = data;
+	int pe_num;
+
+	for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
+		struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num];
+
+		if (!test_bit(pe_num, phb->ioda.pe_alloc))
+			continue;
+
+		pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n",
+			pe->rid, pe->device_count,
+			(pe->flags & PNV_IODA_PE_DEV) ? "dev " : "",
+			(pe->flags & PNV_IODA_PE_BUS) ? "bus " : "",
+			(pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "",
+			(pe->flags & PNV_IODA_PE_MASTER) ? "master " : "",
+			(pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "",
+			(pe->flags & PNV_IODA_PE_VF) ? "vf " : "");
+	}
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_ioda_pe_dump_fops, NULL,
+			 pnv_pci_ioda_pe_dump, "%llu\n");
+
 #endif /* CONFIG_DEBUG_FS */
 
 static void pnv_pci_ioda_create_dbgfs(void)
@@ -3113,7 +3181,9 @@ static void pnv_pci_ioda_create_dbgfs(void)
 		}
 
 		debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs,
-					   hose, &pnv_pci_diag_data_fops);
+					   phb, &pnv_pci_diag_data_fops);
+		debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs,
+					   phb, &pnv_pci_ioda_pe_dump_fops);
 	}
 #endif /* CONFIG_DEBUG_FS */
 }
@@ -3383,6 +3453,28 @@ static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
 	return true;
 }
 
+static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev)
+{
+	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+	struct pnv_phb *phb = hose->private_data;
+	struct pci_dn *pdn;
+	struct pnv_ioda_pe *pe;
+
+	if (!phb->initialized)
+		return true;
+
+	pdn = pci_get_pdn(dev);
+	if (!pdn)
+		return false;
+
+	if (pdn->pe_number == IODA_INVALID_PE) {
+		pe = pnv_ioda_setup_dev_PE(dev);
+		if (!pe)
+			return false;
+	}
+	return true;
+}
+
 static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
 				       int num)
 {
@@ -3512,7 +3604,10 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
 	struct pnv_phb *phb = pe->phb;
 	struct pnv_ioda_pe *slave, *tmp;
 
+	mutex_lock(&phb->ioda.pe_list_mutex);
 	list_del(&pe->list);
+	mutex_unlock(&phb->ioda.pe_list_mutex);
+
 	switch (phb->type) {
 	case PNV_PHB_IODA1:
 		pnv_pci_ioda1_release_pe_dma(pe);
@@ -3520,6 +3615,8 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
 	case PNV_PHB_IODA2:
 		pnv_pci_ioda2_release_pe_dma(pe);
 		break;
+	case PNV_PHB_NPU_OCAPI:
+		break;
 	default:
 		WARN_ON(1);
 	}
@@ -3594,9 +3691,29 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
 		       OPAL_ASSERT_RESET);
 }
 
+static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus)
+{
+	struct pci_controller *hose = bus->sysdata;
+	struct pnv_phb *phb = hose->private_data;
+	struct pnv_ioda_pe *pe;
+
+	list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+		if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
+			continue;
+
+		if (!pe->pbus)
+			continue;
+
+		if (bus->number == ((pe->rid >> 8) & 0xFF)) {
+			pe->pbus = bus;
+			break;
+		}
+	}
+}
+
 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
-	.dma_dev_setup		= pnv_pci_dma_dev_setup,
-	.dma_bus_setup		= pnv_pci_dma_bus_setup,
+	.dma_dev_setup		= pnv_pci_ioda_dma_dev_setup,
+	.dma_bus_setup		= pnv_pci_ioda_dma_bus_setup,
 	.iommu_bypass_supported	= pnv_pci_ioda_iommu_bypass_supported,
 	.setup_msi_irqs		= pnv_setup_msi_irqs,
 	.teardown_msi_irqs	= pnv_teardown_msi_irqs,
@@ -3609,7 +3726,6 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
 };
 
 static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
-	.dma_dev_setup		= pnv_pci_dma_dev_setup,
 	.setup_msi_irqs		= pnv_setup_msi_irqs,
 	.teardown_msi_irqs	= pnv_teardown_msi_irqs,
 	.enable_device_hook	= pnv_pci_enable_device_hook,
@@ -3620,7 +3736,8 @@ static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
 };
 
 static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
-	.enable_device_hook	= pnv_pci_enable_device_hook,
+	.enable_device_hook	= pnv_ocapi_enable_device_hook,
+	.release_device		= pnv_pci_release_device,
 	.window_alignment	= pnv_pci_window_alignment,
 	.reset_secondary_bus	= pnv_pci_reset_secondary_bus,
 	.shutdown		= pnv_pci_ioda_shutdown,
@@ -3855,14 +3972,13 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
 		hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
 		break;
 	default:
-		phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
 		hose->controller_ops = pnv_pci_ioda_controller_ops;
 	}
 
 	ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
 
 #ifdef CONFIG_PCI_IOV
-	ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
+	ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov;
 	ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
 	ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
 	ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index c0bea75a..5bf8182 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -38,7 +38,7 @@ static DEFINE_MUTEX(tunnel_mutex);
 
 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
 {
-	struct device_node *parent = np;
+	struct device_node *node = np;
 	u32 bdfn;
 	u64 phbid;
 	int ret;
@@ -48,25 +48,29 @@ int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
 		return -ENXIO;
 
 	bdfn = ((bdfn & 0x00ffff00) >> 8);
-	while ((parent = of_get_parent(parent))) {
-		if (!PCI_DN(parent)) {
-			of_node_put(parent);
+	for (node = np; node; node = of_get_parent(node)) {
+		if (!PCI_DN(node)) {
+			of_node_put(node);
 			break;
 		}
 
-		if (!of_device_is_compatible(parent, "ibm,ioda2-phb") &&
-		    !of_device_is_compatible(parent, "ibm,ioda3-phb")) {
-			of_node_put(parent);
+		if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
+		    !of_device_is_compatible(node, "ibm,ioda3-phb") &&
+		    !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
+			of_node_put(node);
 			continue;
 		}
 
-		ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
+		ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
 		if (ret) {
-			of_node_put(parent);
+			of_node_put(node);
 			return -ENXIO;
 		}
 
-		*id = PCI_SLOT_ID(phbid, bdfn);
+		if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
+			*id = PCI_PHB_SLOT_ID(phbid);
+		else
+			*id = PCI_SLOT_ID(phbid, bdfn);
 		return 0;
 	}
 
@@ -810,53 +814,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
 	return tbl;
 }
 
-void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
-{
-	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
-	struct pnv_phb *phb = hose->private_data;
-#ifdef CONFIG_PCI_IOV
-	struct pnv_ioda_pe *pe;
-	struct pci_dn *pdn;
-
-	/* Fix the VF pdn PE number */
-	if (pdev->is_virtfn) {
-		pdn = pci_get_pdn(pdev);
-		WARN_ON(pdn->pe_number != IODA_INVALID_PE);
-		list_for_each_entry(pe, &phb->ioda.pe_list, list) {
-			if (pe->rid == ((pdev->bus->number << 8) |
-			    (pdev->devfn & 0xff))) {
-				pdn->pe_number = pe->pe_number;
-				pe->pdev = pdev;
-				break;
-			}
-		}
-	}
-#endif /* CONFIG_PCI_IOV */
-
-	if (phb && phb->dma_dev_setup)
-		phb->dma_dev_setup(phb, pdev);
-}
-
-void pnv_pci_dma_bus_setup(struct pci_bus *bus)
-{
-	struct pci_controller *hose = bus->sysdata;
-	struct pnv_phb *phb = hose->private_data;
-	struct pnv_ioda_pe *pe;
-
-	list_for_each_entry(pe, &phb->ioda.pe_list, list) {
-		if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
-			continue;
-
-		if (!pe->pbus)
-			continue;
-
-		if (bus->number == ((pe->rid >> 8) & 0xFF)) {
-			pe->pbus = bus;
-			break;
-		}
-	}
-}
-
 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
 {
 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index f914f0b..d3bbdea 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -108,7 +108,6 @@ struct pnv_phb {
 	int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
 			 unsigned int hwirq, unsigned int virq,
 			 unsigned int is_64, struct msi_msg *msg);
-	void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
 	int (*init_m64)(struct pnv_phb *phb);
 	int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
 	void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
@@ -189,8 +188,6 @@ extern void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr);
 extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
 extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
 
-extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
-extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
 extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 8349860..11fdae8 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -233,6 +233,10 @@ static void  __noreturn pnv_restart(char *cmd)
 			rc = opal_cec_reboot();
 		else if (strcmp(cmd, "full") == 0)
 			rc = opal_cec_reboot2(OPAL_REBOOT_FULL_IPL, NULL);
+		else if (strcmp(cmd, "mpipl") == 0)
+			rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, NULL);
+		else if (strcmp(cmd, "error") == 0)
+			rc = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, NULL);
 		else
 			rc = OPAL_UNSUPPORTED;
 
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 8108b9b..b293689 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -200,10 +200,6 @@ static void __init ps3_setup_arch(void)
 	smp_init_ps3();
 #endif
 
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
-#endif
-
 	prealloc_ps3fb_videomemory();
 	prealloc_ps3flash_bounce_buffer();
 
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 595e9f8..24c1836 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -21,7 +21,6 @@
 	select PPC_DOORBELL
 	select HOTPLUG_CPU
 	select ARCH_RANDOM
-	select PPC_DOORBELL
 	select FORCE_SMP
 	select SWIOTLB
 	default y
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index d4a8f17..3e49cc2 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -22,6 +22,7 @@
 #include <asm/firmware.h>
 #include <asm/prom.h>
 #include <asm/udbg.h>
+#include <asm/svm.h>
 
 #include "pseries.h"
 
@@ -55,7 +56,8 @@ hypertas_fw_features_table[] = {
 	{FW_FEATURE_LLAN,		"hcall-lLAN"},
 	{FW_FEATURE_BULK_REMOVE,	"hcall-bulk"},
 	{FW_FEATURE_XDABR,		"hcall-xdabr"},
-	{FW_FEATURE_MULTITCE,		"hcall-multi-tce"},
+	{FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE,
+					"hcall-multi-tce"},
 	{FW_FEATURE_SPLPAR,		"hcall-splpar"},
 	{FW_FEATURE_VPHN,		"hcall-vphn"},
 	{FW_FEATURE_SET_MODE,		"hcall-set-mode"},
@@ -100,6 +102,12 @@ static void __init fw_hypertas_feature_init(const char *hypertas,
 		}
 	}
 
+	if (is_secure_guest() &&
+	    (powerpc_firmware_features & FW_FEATURE_PUT_TCE_IND)) {
+		powerpc_firmware_features &= ~FW_FEATURE_PUT_TCE_IND;
+		pr_debug("SVM: disabling PUT_TCE_IND firmware feature\n");
+	}
+
 	pr_debug(" <- fw_hypertas_feature_init()\n");
 }
 
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index c126b94..a4d40a3 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -360,8 +360,10 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
 
 	for (i = 0; i < scns_per_block; i++) {
 		pfn = PFN_DOWN(phys_addr);
-		if (!pfn_present(pfn))
+		if (!pfn_present(pfn)) {
+			phys_addr += MIN_MEMORY_BLOCK_SIZE;
 			continue;
+		}
 
 		rc = rc && is_mem_section_removable(pfn, PAGES_PER_SECTION);
 		phys_addr += MIN_MEMORY_BLOCK_SIZE;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 6ba081d..2e0a8ea 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -36,7 +36,6 @@
 #include <asm/udbg.h>
 #include <asm/mmzone.h>
 #include <asm/plpar_wrappers.h>
-#include <asm/svm.h>
 
 #include "pseries.h"
 
@@ -133,10 +132,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
 	return be64_to_cpu(*tcep);
 }
 
-static void tce_free_pSeriesLP(struct iommu_table*, long, long);
+static void tce_free_pSeriesLP(unsigned long liobn, long, long);
 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
 
-static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
+static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
 				long npages, unsigned long uaddr,
 				enum dma_data_direction direction,
 				unsigned long attrs)
@@ -147,25 +146,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
 	int ret = 0;
 	long tcenum_start = tcenum, npages_start = npages;
 
-	rpn = __pa(uaddr) >> TCE_SHIFT;
+	rpn = __pa(uaddr) >> tceshift;
 	proto_tce = TCE_PCI_READ;
 	if (direction != DMA_TO_DEVICE)
 		proto_tce |= TCE_PCI_WRITE;
 
 	while (npages--) {
-		tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
-		rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
+		tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
+		rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
 
 		if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
 			ret = (int)rc;
-			tce_free_pSeriesLP(tbl, tcenum_start,
+			tce_free_pSeriesLP(liobn, tcenum_start,
 			                   (npages_start - (npages + 1)));
 			break;
 		}
 
 		if (rc && printk_ratelimit()) {
 			printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
-			printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+			printk("\tindex   = 0x%llx\n", (u64)liobn);
 			printk("\ttcenum  = 0x%llx\n", (u64)tcenum);
 			printk("\ttce val = 0x%llx\n", tce );
 			dump_stack();
@@ -193,8 +192,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 	int ret = 0;
 	unsigned long flags;
 
-	if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
-		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
+	if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
+		return tce_build_pSeriesLP(tbl->it_index, tcenum,
+					   tbl->it_page_shift, npages, uaddr,
 		                           direction, attrs);
 	}
 
@@ -210,8 +210,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 		/* If allocation fails, fall back to the loop implementation */
 		if (!tcep) {
 			local_irq_restore(flags);
-			return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
-					    direction, attrs);
+			return tce_build_pSeriesLP(tbl->it_index, tcenum,
+					tbl->it_page_shift,
+					npages, uaddr, direction, attrs);
 		}
 		__this_cpu_write(tce_page, tcep);
 	}
@@ -262,16 +263,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 	return ret;
 }
 
-static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
+static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
 {
 	u64 rc;
 
 	while (npages--) {
-		rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
+		rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
 
 		if (rc && printk_ratelimit()) {
 			printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
-			printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+			printk("\tindex   = 0x%llx\n", (u64)liobn);
 			printk("\ttcenum  = 0x%llx\n", (u64)tcenum);
 			dump_stack();
 		}
@@ -285,8 +286,8 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
 {
 	u64 rc;
 
-	if (!firmware_has_feature(FW_FEATURE_MULTITCE))
-		return tce_free_pSeriesLP(tbl, tcenum, npages);
+	if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
+		return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
 
 	rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
 
@@ -401,6 +402,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
 	u64 rc = 0;
 	long l, limit;
 
+	if (!firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
+		unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
+		unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
+				be64_to_cpu(maprange->dma_base);
+		unsigned long tcenum = dmastart >> tceshift;
+		unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
+		void *uaddr = __va(start_pfn << PAGE_SHIFT);
+
+		return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
+				tcenum, tceshift, npages, (unsigned long) uaddr,
+				DMA_BIDIRECTIONAL, 0);
+	}
+
 	local_irq_disable();	/* to protect tcep and the page behind it */
 	tcep = __this_cpu_read(tce_page);
 
@@ -1320,24 +1334,18 @@ void iommu_init_early_pSeries(void)
 	of_reconfig_notifier_register(&iommu_reconfig_nb);
 	register_memory_notifier(&iommu_mem_nb);
 
-	/*
-	 * Secure guest memory is inacessible to devices so regular DMA isn't
-	 * possible.
-	 *
-	 * In that case keep devices' dma_map_ops as NULL so that the generic
-	 * DMA code path will use SWIOTLB to bounce buffers for DMA.
-	 */
-	if (!is_secure_guest())
-		set_pci_dma_ops(&dma_iommu_ops);
+	set_pci_dma_ops(&dma_iommu_ops);
 }
 
 static int __init disable_multitce(char *str)
 {
 	if (strcmp(str, "off") == 0 &&
 	    firmware_has_feature(FW_FEATURE_LPAR) &&
-	    firmware_has_feature(FW_FEATURE_MULTITCE)) {
+	    (firmware_has_feature(FW_FEATURE_PUT_TCE_IND) ||
+	     firmware_has_feature(FW_FEATURE_STUFF_TCE))) {
 		printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
-		powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
+		powerpc_firmware_features &=
+			~(FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE);
 	}
 	return 1;
 }
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 60cb29a..3c3da25 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -582,12 +582,12 @@ static int vcpudispatch_stats_open(struct inode *inode, struct file *file)
 	return single_open(file, vcpudispatch_stats_display, NULL);
 }
 
-static const struct file_operations vcpudispatch_stats_proc_ops = {
-	.open		= vcpudispatch_stats_open,
-	.read		= seq_read,
-	.write		= vcpudispatch_stats_write,
-	.llseek		= seq_lseek,
-	.release	= single_release,
+static const struct proc_ops vcpudispatch_stats_proc_ops = {
+	.proc_open	= vcpudispatch_stats_open,
+	.proc_read	= seq_read,
+	.proc_write	= vcpudispatch_stats_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 static ssize_t vcpudispatch_stats_freq_write(struct file *file,
@@ -626,12 +626,12 @@ static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file)
 	return single_open(file, vcpudispatch_stats_freq_display, NULL);
 }
 
-static const struct file_operations vcpudispatch_stats_freq_proc_ops = {
-	.open		= vcpudispatch_stats_freq_open,
-	.read		= seq_read,
-	.write		= vcpudispatch_stats_freq_write,
-	.llseek		= seq_lseek,
-	.release	= single_release,
+static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
+	.proc_open	= vcpudispatch_stats_freq_open,
+	.proc_read	= seq_read,
+	.proc_write	= vcpudispatch_stats_freq_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 static int __init vcpudispatch_stats_procfs_init(void)
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index e33e8bc..b8d28ab 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -435,10 +435,10 @@ static void maxmem_data(struct seq_file *m)
 {
 	unsigned long maxmem = 0;
 
-	maxmem += drmem_info->n_lmbs * drmem_info->lmb_size;
+	maxmem += (unsigned long)drmem_info->n_lmbs * drmem_info->lmb_size;
 	maxmem += hugetlb_total_pages() * PAGE_SIZE;
 
-	seq_printf(m, "MaxMem=%ld\n", maxmem);
+	seq_printf(m, "MaxMem=%lu\n", maxmem);
 }
 
 static int pseries_lparcfg_data(struct seq_file *m, void *v)
@@ -698,12 +698,12 @@ static int lparcfg_open(struct inode *inode, struct file *file)
 	return single_open(file, lparcfg_data, NULL);
 }
 
-static const struct file_operations lparcfg_fops = {
-	.read		= seq_read,
-	.write		= lparcfg_write,
-	.open		= lparcfg_open,
-	.release	= single_release,
-	.llseek		= seq_lseek,
+static const struct proc_ops lparcfg_proc_ops = {
+	.proc_read	= seq_read,
+	.proc_write	= lparcfg_write,
+	.proc_open	= lparcfg_open,
+	.proc_release	= single_release,
+	.proc_lseek	= seq_lseek,
 };
 
 static int __init lparcfg_init(void)
@@ -714,7 +714,7 @@ static int __init lparcfg_init(void)
 	if (firmware_has_feature(FW_FEATURE_SPLPAR))
 		mode |= 0200;
 
-	if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops)) {
+	if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_proc_ops)) {
 		printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
 		return -EIO;
 	}
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index c2ef320..0b4467e 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -69,7 +69,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
 		return rc;
 
 	p->bound_addr = saved;
-	dev_dbg(&p->pdev->dev, "bound drc 0x%x to %pR\n", p->drc_index, &p->res);
+	dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n",
+		p->drc_index, (unsigned long)saved);
 	return rc;
 }
 
@@ -133,7 +134,7 @@ static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
 		goto err_out;
 
 	p->bound_addr = start_addr;
-	dev_dbg(&p->pdev->dev, "bound drc 0x%x to %pR\n", p->drc_index, &p->res);
+	dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n", p->drc_index, start_addr);
 	return rc;
 
 err_out:
@@ -322,6 +323,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
 	p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
 	if (!p->bus) {
 		dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
+		kfree(p->bus_desc.provider_name);
 		return -ENXIO;
 	}
 
@@ -356,7 +358,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
 	ndr_desc.mapping = &mapping;
 	ndr_desc.num_mappings = 1;
 	ndr_desc.nd_set = &p->nd_set;
-	set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
 
 	if (p->is_volatile)
 		p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
@@ -477,6 +478,7 @@ static int papr_scm_remove(struct platform_device *pdev)
 
 	nvdimm_bus_unregister(p->bus);
 	drc_pmem_unbind(p);
+	kfree(p->bus_desc.provider_name);
 	kfree(p);
 
 	return 0;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 7228309..911534b 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -192,7 +192,7 @@ int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
 int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
 {
 	/* Allocate PCI data */
-	add_dev_pci_data(pdev);
+	add_sriov_vf_pdns(pdev);
 	return pseries_pci_sriov_enable(pdev, num_vfs);
 }
 
@@ -204,7 +204,7 @@ int pseries_pcibios_sriov_disable(struct pci_dev *pdev)
 	/* Releasing pe_num_map */
 	kfree(pdn->pe_num_map);
 	/* Release PCI data */
-	remove_dev_pci_data(pdev);
+	remove_sriov_vf_pdns(pdev);
 	pci_vf_drivers_autoprobe(pdev, true);
 	return 0;
 }
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 8a9c4fb..7f7369f 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -391,9 +391,9 @@ static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t coun
 	return rv ? rv : count;
 }
 
-static const struct file_operations ofdt_fops = {
-	.write = ofdt_write,
-	.llseek = noop_llseek,
+static const struct proc_ops ofdt_proc_ops = {
+	.proc_write	= ofdt_write,
+	.proc_lseek	= noop_llseek,
 };
 
 /* create /proc/powerpc/ofdt write-only by root */
@@ -401,7 +401,7 @@ static int proc_ppc64_create_ofdt(void)
 {
 	struct proc_dir_entry *ent;
 
-	ent = proc_create("powerpc/ofdt", 0200, NULL, &ofdt_fops);
+	ent = proc_create("powerpc/ofdt", 0200, NULL, &ofdt_proc_ops);
 	if (ent)
 		proc_set_size(ent, 0);
 
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index a000128..2879c4f 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -152,13 +152,12 @@ static int scanlog_release(struct inode * inode, struct file * file)
 	return 0;
 }
 
-static const struct file_operations scanlog_fops = {
-	.owner		= THIS_MODULE,
-	.read		= scanlog_read,
-	.write		= scanlog_write,
-	.open		= scanlog_open,
-	.release	= scanlog_release,
-	.llseek		= noop_llseek,
+static const struct proc_ops scanlog_proc_ops = {
+	.proc_read	= scanlog_read,
+	.proc_write	= scanlog_write,
+	.proc_open	= scanlog_open,
+	.proc_release	= scanlog_release,
+	.proc_lseek	= noop_llseek,
 };
 
 static int __init scanlog_init(void)
@@ -176,7 +175,7 @@ static int __init scanlog_init(void)
 		goto err;
 
 	ent = proc_create("powerpc/rtas/scan-log-dump", 0400, NULL,
-			  &scanlog_fops);
+			  &scanlog_proc_ops);
 	if (!ent)
 		goto err;
 	return 0;
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 79e2287..f682b7b 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1176,6 +1176,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
 	if (tbl == NULL)
 		return NULL;
 
+	kref_init(&tbl->it_kref);
+
 	of_parse_dma_window(dev->dev.of_node, dma_window,
 			    &tbl->it_index, &offset, &size);
 
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 617a443..4a8874b 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1065,13 +1065,11 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
 	addr += mfspr(SPRN_MCAR);
 
 	if (is_in_pci_mem_space(addr)) {
-		if (user_mode(regs)) {
-			pagefault_disable();
-			ret = get_user(inst, (__u32 __user *)regs->nip);
-			pagefault_enable();
-		} else {
+		if (user_mode(regs))
+			ret = probe_user_read(&inst, (void __user *)regs->nip,
+					      sizeof(inst));
+		else
 			ret = probe_kernel_address((void *)regs->nip, inst);
-		}
 
 		if (!ret && mcheck_handle_load(regs, inst)) {
 			regs->nip += 4;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 934a773..a3a72b7 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -964,7 +964,7 @@ static struct irq_chip mpic_irq_chip = {
 };
 
 #ifdef CONFIG_SMP
-static struct irq_chip mpic_ipi_chip = {
+static const struct irq_chip mpic_ipi_chip = {
 	.irq_mask	= mpic_mask_ipi,
 	.irq_unmask	= mpic_unmask_ipi,
 	.irq_eoi	= mpic_end_ipi,
@@ -978,7 +978,7 @@ static struct irq_chip mpic_tm_chip = {
 };
 
 #ifdef CONFIG_MPIC_U3_HT_IRQS
-static struct irq_chip mpic_irq_ht_chip = {
+static const struct irq_chip mpic_irq_ht_chip = {
 	.irq_startup	= mpic_startup_ht_irq,
 	.irq_shutdown	= mpic_shutdown_ht_irq,
 	.irq_mask	= mpic_mask_irq,
diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
index 7b9fe0a..014e00e 100755
--- a/arch/powerpc/tools/relocs_check.sh
+++ b/arch/powerpc/tools/relocs_check.sh
@@ -10,14 +10,21 @@
 # based on relocs_check.pl
 # Copyright © 2009 IBM Corporation
 
-if [ $# -lt 2 ]; then
-	echo "$0 [path to objdump] [path to vmlinux]" 1>&2
+if [ $# -lt 3 ]; then
+	echo "$0 [path to objdump] [path to nm] [path to vmlinux]" 1>&2
 	exit 1
 fi
 
-# Have Kbuild supply the path to objdump so we handle cross compilation.
+# Have Kbuild supply the path to objdump and nm so we handle cross compilation.
 objdump="$1"
-vmlinux="$2"
+nm="$2"
+vmlinux="$3"
+
+# Remove from the bad relocations those that match an undefined weak symbol
+# which will result in an absolute relocation to 0.
+# Weak unresolved symbols are of that form in nm output:
+# "                  w _binary__btf_vmlinux_bin_end"
+undef_weak_symbols=$($nm "$vmlinux" | awk '$1 ~ /w/ { print $2 }')
 
 bad_relocs=$(
 $objdump -R "$vmlinux" |
@@ -26,8 +33,6 @@
 	# These relocations are okay
 	# On PPC64:
 	#	R_PPC64_RELATIVE, R_PPC64_NONE
-	#	R_PPC64_ADDR64 mach_<name>
-	#	R_PPC64_ADDR64 __crc_<name>
 	# On PPC:
 	#	R_PPC_RELATIVE, R_PPC_ADDR16_HI,
 	#	R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
@@ -39,8 +44,7 @@
 R_PPC_ADDR16_HA
 R_PPC_RELATIVE
 R_PPC_NONE' |
-	grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' |
-	grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_'
+	([ "$undef_weak_symbols" ] && grep -F -w -v "$undef_weak_symbols" || cat)
 )
 
 if [ -z "$bad_relocs" ]; then
diff --git a/arch/powerpc/xmon/dis-asm.h b/arch/powerpc/xmon/dis-asm.h
index c4d246e..c4c982d 100644
--- a/arch/powerpc/xmon/dis-asm.h
+++ b/arch/powerpc/xmon/dis-asm.h
@@ -13,13 +13,13 @@ extern int print_insn_spu(unsigned long insn, unsigned long memaddr);
 #else
 static inline int print_insn_powerpc(unsigned long insn, unsigned long memaddr)
 {
-	printf("%.8x", insn);
+	printf("%.8lx", insn);
 	return 0;
 }
 
 static inline int print_insn_spu(unsigned long insn, unsigned long memaddr)
 {
-	printf("%.8x", insn);
+	printf("%.8lx", insn);
 	return 0;
 }
 #endif
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a705604..e8c84d26 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1192,16 +1192,19 @@ static int do_step(struct pt_regs *regs)
 
 static void bootcmds(void)
 {
+	char tmp[64];
 	int cmd;
 
 	cmd = inchar();
-	if (cmd == 'r')
-		ppc_md.restart(NULL);
-	else if (cmd == 'h')
+	if (cmd == 'r') {
+		getstring(tmp, 64);
+		ppc_md.restart(tmp);
+	} else if (cmd == 'h') {
 		ppc_md.halt();
-	else if (cmd == 'p')
+	} else if (cmd == 'p') {
 		if (pm_power_off)
 			pm_power_off();
+	}
 }
 
 static int cpu_cmd(void)
@@ -1949,15 +1952,14 @@ static void dump_300_sprs(void)
 
 	printf("pidr   = %.16lx  tidr  = %.16lx\n",
 		mfspr(SPRN_PID), mfspr(SPRN_TIDR));
-	printf("asdr   = %.16lx  psscr = %.16lx\n",
-		mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR)
-					: mfspr(SPRN_PSSCR_PR));
+	printf("psscr  = %.16lx\n",
+		hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR));
 
 	if (!hv)
 		return;
 
-	printf("ptcr   = %.16lx\n",
-		mfspr(SPRN_PTCR));
+	printf("ptcr   = %.16lx  asdr  = %.16lx\n",
+		mfspr(SPRN_PTCR), mfspr(SPRN_ASDR));
 #endif
 }
 
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index fa7dc03..73f029e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -12,8 +12,6 @@
 
 config RISCV
 	def_bool y
-	# even on 32-bit, physical (and DMA) addresses are > 32-bits
-	select PHYS_ADDR_T_64BIT
 	select OF
 	select OF_EARLY_FLATTREE
 	select OF_IRQ
@@ -57,6 +55,7 @@
 	select GENERIC_ARCH_TOPOLOGY if SMP
 	select ARCH_HAS_PTE_SPECIAL
 	select ARCH_HAS_MMIOWB
+	select ARCH_HAS_DEBUG_VIRTUAL
 	select HAVE_EBPF_JIT if 64BIT
 	select EDAC_SUPPORT
 	select ARCH_HAS_GIGANTIC_PAGE
@@ -66,6 +65,7 @@
 	select HAVE_ARCH_MMAP_RND_BITS if MMU
 	select ARCH_HAS_GCOV_PROFILE_ALL
 	select HAVE_COPY_THREAD_TLS
+	select HAVE_ARCH_KASAN if MMU && 64BIT
 
 config ARCH_MMAP_RND_BITS_MIN
 	default 18 if 64BIT
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index a2e3d54..7db8610 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -268,6 +268,19 @@ l2cache: cache-controller@2010000 {
 			interrupts = <1 2 3>;
 			reg = <0x0 0x2010000 0x0 0x1000>;
 		};
-
+		gpio: gpio@10060000 {
+			compatible = "sifive,fu540-c000-gpio", "sifive,gpio0";
+			interrupt-parent = <&plic0>;
+			interrupts = <7>, <8>, <9>, <10>, <11>, <12>, <13>,
+				     <14>, <15>, <16>, <17>, <18>, <19>, <20>,
+				     <21>, <22>;
+			reg = <0x0 0x10060000 0x0 0x1000>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&prci PRCI_CLK_TLCLK>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 88cfcb9..609198c 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -94,3 +94,7 @@ &pwm0 {
 &pwm1 {
 	status = "okay";
 };
+
+&gpio {
+	status = "okay";
+};
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 1efaedd..ec0ca8c 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -7,7 +7,6 @@
 generic-y += extable.h
 generic-y += flat.h
 generic-y += dma.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h
index 7b0f92b..e0b319a 100644
--- a/arch/riscv/include/asm/image.h
+++ b/arch/riscv/include/asm/image.h
@@ -42,7 +42,7 @@
  * @res2:		reserved
  * @magic:		Magic number (RISC-V specific; deprecated)
  * @magic2:		Magic number 2 (to match the ARM64 'magic' field pos)
- * @res4:		reserved (will be used for PE COFF offset)
+ * @res3:		reserved (will be used for PE COFF offset)
  *
  * The intention is for this header format to be shared between multiple
  * architectures to avoid a proliferation of image header formats.
@@ -59,7 +59,7 @@ struct riscv_image_header {
 	u64 res2;
 	u64 magic;
 	u32 magic2;
-	u32 res4;
+	u32 res3;
 };
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_RISCV_IMAGE_H */
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
new file mode 100644
index 0000000..eee6e65
--- /dev/null
+++ b/arch/riscv/include/asm/kasan.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Andes Technology Corporation */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <asm/pgtable.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT	3
+
+#define KASAN_SHADOW_SIZE	(UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_START	0xffffffc000000000 /* 2^64 - 2^38 */
+#define KASAN_SHADOW_END	(KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+#define KASAN_SHADOW_OFFSET	(KASAN_SHADOW_END - (1ULL << \
+					(64 - KASAN_SHADOW_SCALE_SHIFT)))
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+
+#endif
+#endif
+#endif /* __ASM_KASAN_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index ac69924..8ca1930 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -100,8 +100,20 @@ extern unsigned long pfn_base;
 extern unsigned long max_low_pfn;
 extern unsigned long min_low_pfn;
 
-#define __pa(x)		((unsigned long)(x) - va_pa_offset)
-#define __va(x)		((void *)((unsigned long) (x) + va_pa_offset))
+#define __pa_to_va_nodebug(x)	((void *)((unsigned long) (x) + va_pa_offset))
+#define __va_to_pa_nodebug(x)	((unsigned long)(x) - va_pa_offset)
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(unsigned long x);
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __virt_to_phys(x)	__va_to_pa_nodebug(x)
+#define __phys_addr_symbol(x)	__va_to_pa_nodebug(x)
+#endif /* CONFIG_DEBUG_VIRTUAL */
+
+#define __pa_symbol(x)	__phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa(x)		__virt_to_phys((unsigned long)(x))
+#define __va(x)		((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
 
 #define phys_to_pfn(phys)	(PFN_DOWN(phys))
 #define pfn_to_phys(pfn)	(PFN_PHYS(pfn))
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 7463098..b15f70a 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -43,6 +43,13 @@ static inline int pud_bad(pud_t pud)
 	return !pud_present(pud);
 }
 
+#define pud_leaf	pud_leaf
+static inline int pud_leaf(pud_t pud)
+{
+	return pud_present(pud) &&
+	       (pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
 	*pudp = pud;
@@ -58,6 +65,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
 	return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
 }
 
+static inline struct page *pud_page(pud_t pud)
+{
+	return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
+}
+
 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index f66b873..e430415 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -130,6 +130,13 @@ static inline int pmd_bad(pmd_t pmd)
 	return !pmd_present(pmd);
 }
 
+#define pmd_leaf	pmd_leaf
+static inline int pmd_leaf(pmd_t pmd)
+{
+	return pmd_present(pmd) &&
+	       (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
 	*pmdp = pmd;
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index 1b5d445..924af13 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -11,8 +11,17 @@
 
 #define __HAVE_ARCH_MEMSET
 extern asmlinkage void *memset(void *, int, size_t);
+extern asmlinkage void *__memset(void *, int, size_t);
 
 #define __HAVE_ARCH_MEMCPY
 extern asmlinkage void *memcpy(void *, const void *, size_t);
+extern asmlinkage void *__memcpy(void *, const void *, size_t);
 
+/* For those files which don't want to check by kasan. */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#endif
 #endif /* _ASM_RISCV_STRING_H */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index a4242be..271860f 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -121,6 +121,9 @@
 	sw zero, TASK_TI_CPU(tp)
 	la sp, init_thread_union + THREAD_SIZE
 
+#ifdef CONFIG_KASAN
+	call kasan_early_init
+#endif
 	/* Start the kernel */
 	call parse_dtb
 	tail start_kernel
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
index 2a02b7e..450492e 100644
--- a/arch/riscv/kernel/riscv_ksyms.c
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -11,3 +11,5 @@
  */
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 365ff84..0a6d415 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -24,6 +24,7 @@
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
 #include <asm/thread_info.h>
+#include <asm/kasan.h>
 
 #include "head.h"
 
@@ -74,12 +75,12 @@ void __init setup_arch(char **cmdline_p)
 	swiotlb_init(1);
 #endif
 
-#ifdef CONFIG_SMP
-	setup_smp();
+#ifdef CONFIG_KASAN
+	kasan_init();
 #endif
 
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
+#ifdef CONFIG_SMP
+	setup_smp();
 #endif
 
 	riscv_fill_hwcap();
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 12f42f9..1e0193d 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -46,6 +46,7 @@
 		KPROBES_TEXT
 		ENTRY_TEXT
 		IRQENTRY_TEXT
+		SOFTIRQENTRY_TEXT
 		*(.fixup)
 		_etext = .;
 	}
diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
index b4c4778..51ab716 100644
--- a/arch/riscv/lib/memcpy.S
+++ b/arch/riscv/lib/memcpy.S
@@ -7,7 +7,8 @@
 #include <asm/asm.h>
 
 /* void *memcpy(void *, const void *, size_t) */
-ENTRY(memcpy)
+ENTRY(__memcpy)
+WEAK(memcpy)
 	move t6, a0  /* Preserve return value */
 
 	/* Defer to byte-oriented copy for small sizes */
@@ -104,4 +105,4 @@
 	bltu a1, a3, 5b
 6:
 	ret
-END(memcpy)
+END(__memcpy)
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
index 5a7386b..34c5360 100644
--- a/arch/riscv/lib/memset.S
+++ b/arch/riscv/lib/memset.S
@@ -8,7 +8,8 @@
 #include <asm/asm.h>
 
 /* void *memset(void *, int, size_t) */
-ENTRY(memset)
+ENTRY(__memset)
+WEAK(memset)
 	move t0, a0  /* Preserve return value */
 
 	/* Defer to byte-oriented fill for small sizes */
@@ -109,4 +110,4 @@
 	bltu t0, a3, 5b
 6:
 	ret
-END(memset)
+END(__memset)
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index a1bd95c..50b7af5 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -15,3 +15,11 @@
 obj-$(CONFIG_SMP) += tlbflush.o
 endif
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_KASAN)   += kasan_init.o
+
+ifdef CONFIG_KASAN
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_init.o := n
+endif
+
+obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
new file mode 100644
index 0000000..f0cc860
--- /dev/null
+++ b/arch/riscv/mm/kasan_init.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Andes Technology Corporation
+
+#include <linux/pfn.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+
+extern pgd_t early_pg_dir[PTRS_PER_PGD];
+asmlinkage void __init kasan_early_init(void)
+{
+	uintptr_t i;
+	pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
+
+	for (i = 0; i < PTRS_PER_PTE; ++i)
+		set_pte(kasan_early_shadow_pte + i,
+			mk_pte(virt_to_page(kasan_early_shadow_page),
+			PAGE_KERNEL));
+
+	for (i = 0; i < PTRS_PER_PMD; ++i)
+		set_pmd(kasan_early_shadow_pmd + i,
+		 pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
+			__pgprot(_PAGE_TABLE)));
+
+	for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
+	     i += PGDIR_SIZE, ++pgd)
+		set_pgd(pgd,
+		 pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
+			__pgprot(_PAGE_TABLE)));
+
+	/* init for swapper_pg_dir */
+	pgd = pgd_offset_k(KASAN_SHADOW_START);
+
+	for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
+	     i += PGDIR_SIZE, ++pgd)
+		set_pgd(pgd,
+		 pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
+			__pgprot(_PAGE_TABLE)));
+
+	flush_tlb_all();
+}
+
+static void __init populate(void *start, void *end)
+{
+	unsigned long i;
+	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+	unsigned long vend = PAGE_ALIGN((unsigned long)end);
+	unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
+	unsigned long n_pmds =
+		(n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
+						n_pages / PTRS_PER_PTE;
+	pgd_t *pgd = pgd_offset_k(vaddr);
+	pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
+	pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+	for (i = 0; i < n_pages; i++) {
+		phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+
+		set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+	}
+
+	for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
+		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
+				__pgprot(_PAGE_TABLE)));
+
+	for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
+		set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
+				__pgprot(_PAGE_TABLE)));
+
+	flush_tlb_all();
+	memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+	struct memblock_region *reg;
+	unsigned long i;
+
+	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+
+	for_each_memblock(memory, reg) {
+		void *start = (void *)__va(reg->base);
+		void *end = (void *)__va(reg->base + reg->size);
+
+		if (start >= end)
+			break;
+
+		populate(kasan_mem_to_shadow(start),
+			 kasan_mem_to_shadow(end));
+	};
+
+	for (i = 0; i < PTRS_PER_PTE; i++)
+		set_pte(&kasan_early_shadow_pte[i],
+			mk_pte(virt_to_page(kasan_early_shadow_page),
+			__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
+
+	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+	init_task.kasan_depth = 0;
+}
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
new file mode 100644
index 0000000..e8e4dcd
--- /dev/null
+++ b/arch/riscv/mm/physaddr.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/mmdebug.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+
+phys_addr_t __virt_to_phys(unsigned long x)
+{
+	phys_addr_t y = x - PAGE_OFFSET;
+
+	/*
+	 * Boundary checking aginst the kernel linear mapping space.
+	 */
+	WARN(y >= KERN_VIRT_SIZE,
+	     "virt_to_phys used for non-linear address: %pK (%pS)\n",
+	     (void *)x, (void *)x);
+
+	return __va_to_pa_nodebug(x);
+}
+EXPORT_SYMBOL(__virt_to_phys);
+
+phys_addr_t __phys_addr_symbol(unsigned long x)
+{
+	unsigned long kernel_start = (unsigned long)PAGE_OFFSET;
+	unsigned long kernel_end = (unsigned long)_end;
+
+	/*
+	 * Boundary checking aginst the kernel image mapping.
+	 * __pa_symbol should only be used on kernel symbol addresses.
+	 */
+	VIRTUAL_BUG_ON(x < kernel_start || x > kernel_end);
+
+	return __va_to_pa_nodebug(x);
+}
+EXPORT_SYMBOL(__phys_addr_symbol);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 287714d..8abe775 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -156,6 +156,7 @@
 	select HAVE_KERNEL_UNCOMPRESSED
 	select HAVE_KERNEL_XZ
 	select HAVE_KPROBES
+	select HAVE_KPROBES_ON_FTRACE
 	select HAVE_KRETPROBES
 	select HAVE_KVM
 	select HAVE_LIVEPATCH
@@ -163,13 +164,13 @@
 	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_MEMBLOCK_PHYS_MAP
-	select HAVE_MMU_GATHER_NO_GATHER
+	select MMU_GATHER_NO_GATHER
 	select HAVE_MOD_ARCH_SPECIFIC
 	select HAVE_NOP_MCOUNT
 	select HAVE_OPROFILE
 	select HAVE_PCI
 	select HAVE_PERF_EVENTS
-	select HAVE_RCU_TABLE_FREE
+	select MMU_GATHER_RCU_TABLE_FREE
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RELIABLE_STACKTRACE
 	select HAVE_RSEQ
diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c
index 4504663..368fd37 100644
--- a/arch/s390/boot/compressed/decompressor.c
+++ b/arch/s390/boot/compressed/decompressor.c
@@ -30,13 +30,13 @@ extern unsigned char _compressed_start[];
 extern unsigned char _compressed_end[];
 
 #ifdef CONFIG_HAVE_KERNEL_BZIP2
-#define HEAP_SIZE	0x400000
+#define BOOT_HEAP_SIZE	0x400000
 #else
-#define HEAP_SIZE	0x10000
+#define BOOT_HEAP_SIZE	0x10000
 #endif
 
 static unsigned long free_mem_ptr = (unsigned long) _end;
-static unsigned long free_mem_end_ptr = (unsigned long) _end + HEAP_SIZE;
+static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE;
 
 #ifdef CONFIG_KERNEL_GZIP
 #include "../../../../lib/decompress_inflate.c"
@@ -62,7 +62,7 @@ static unsigned long free_mem_end_ptr = (unsigned long) _end + HEAP_SIZE;
 #include "../../../../lib/decompress_unxz.c"
 #endif
 
-#define decompress_offset ALIGN((unsigned long)_end + HEAP_SIZE, PAGE_SIZE)
+#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE)
 
 unsigned long mem_safe_offset(void)
 {
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 24ef67e..357adad 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -14,6 +14,7 @@
 char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
 struct ipl_parameter_block __bootdata_preserved(ipl_block);
 int __bootdata_preserved(ipl_block_valid);
+unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
 
 unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE;
 unsigned long __bootdata(memory_end);
@@ -229,6 +230,19 @@ void parse_boot_command_line(void)
 		if (!strcmp(param, "vmalloc") && val)
 			vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE);
 
+		if (!strcmp(param, "dfltcc")) {
+			if (!strcmp(val, "off"))
+				zlib_dfltcc_support = ZLIB_DFLTCC_DISABLED;
+			else if (!strcmp(val, "on"))
+				zlib_dfltcc_support = ZLIB_DFLTCC_FULL;
+			else if (!strcmp(val, "def_only"))
+				zlib_dfltcc_support = ZLIB_DFLTCC_DEFLATE_ONLY;
+			else if (!strcmp(val, "inf_only"))
+				zlib_dfltcc_support = ZLIB_DFLTCC_INFLATE_ONLY;
+			else if (!strcmp(val, "always"))
+				zlib_dfltcc_support = ZLIB_DFLTCC_FULL_DEBUG;
+		}
+
 		if (!strcmp(param, "noexec")) {
 			rc = kstrtobool(val, &enabled);
 			if (!rc && !enabled)
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index e2a8578..f3caeb1 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -5,7 +5,7 @@
  * s390 implementation of the AES Cipher Algorithm with protected keys.
  *
  * s390 Version:
- *   Copyright IBM Corp. 2017,2019
+ *   Copyright IBM Corp. 2017,2020
  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *		Harald Freudenberger <freude@de.ibm.com>
  */
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 #include <linux/init.h>
+#include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/xts.h>
@@ -32,11 +33,11 @@
  * is called. As paes can handle different kinds of key blobs
  * and padding is also possible, the limits need to be generous.
  */
-#define PAES_MIN_KEYSIZE 64
-#define PAES_MAX_KEYSIZE 256
+#define PAES_MIN_KEYSIZE 16
+#define PAES_MAX_KEYSIZE 320
 
 static u8 *ctrblk;
-static DEFINE_SPINLOCK(ctrblk_lock);
+static DEFINE_MUTEX(ctrblk_lock);
 
 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
 
@@ -53,19 +54,46 @@ struct key_blob {
 	unsigned int keylen;
 };
 
-static inline int _copy_key_to_kb(struct key_blob *kb,
-				  const u8 *key,
-				  unsigned int keylen)
+static inline int _key_to_kb(struct key_blob *kb,
+			     const u8 *key,
+			     unsigned int keylen)
 {
-	if (keylen <= sizeof(kb->keybuf))
+	struct clearkey_header {
+		u8  type;
+		u8  res0[3];
+		u8  version;
+		u8  res1[3];
+		u32 keytype;
+		u32 len;
+	} __packed * h;
+
+	switch (keylen) {
+	case 16:
+	case 24:
+	case 32:
+		/* clear key value, prepare pkey clear key token in keybuf */
+		memset(kb->keybuf, 0, sizeof(kb->keybuf));
+		h = (struct clearkey_header *) kb->keybuf;
+		h->version = 0x02; /* TOKVER_CLEAR_KEY */
+		h->keytype = (keylen - 8) >> 3;
+		h->len = keylen;
+		memcpy(kb->keybuf + sizeof(*h), key, keylen);
+		kb->keylen = sizeof(*h) + keylen;
 		kb->key = kb->keybuf;
-	else {
-		kb->key = kmalloc(keylen, GFP_KERNEL);
-		if (!kb->key)
-			return -ENOMEM;
+		break;
+	default:
+		/* other key material, let pkey handle this */
+		if (keylen <= sizeof(kb->keybuf))
+			kb->key = kb->keybuf;
+		else {
+			kb->key = kmalloc(keylen, GFP_KERNEL);
+			if (!kb->key)
+				return -ENOMEM;
+		}
+		memcpy(kb->key, key, keylen);
+		kb->keylen = keylen;
+		break;
 	}
-	memcpy(kb->key, key, keylen);
-	kb->keylen = keylen;
 
 	return 0;
 }
@@ -82,16 +110,18 @@ static inline void _free_kb_keybuf(struct key_blob *kb)
 struct s390_paes_ctx {
 	struct key_blob kb;
 	struct pkey_protkey pk;
+	spinlock_t pk_lock;
 	unsigned long fc;
 };
 
 struct s390_pxts_ctx {
 	struct key_blob kb[2];
 	struct pkey_protkey pk[2];
+	spinlock_t pk_lock;
 	unsigned long fc;
 };
 
-static inline int __paes_convert_key(struct key_blob *kb,
+static inline int __paes_keyblob2pkey(struct key_blob *kb,
 				     struct pkey_protkey *pk)
 {
 	int i, ret;
@@ -106,11 +136,42 @@ static inline int __paes_convert_key(struct key_blob *kb,
 	return ret;
 }
 
-static int __paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
+{
+	struct pkey_protkey pkey;
+
+	if (__paes_keyblob2pkey(&ctx->kb, &pkey))
+		return -EINVAL;
+
+	spin_lock_bh(&ctx->pk_lock);
+	memcpy(&ctx->pk, &pkey, sizeof(pkey));
+	spin_unlock_bh(&ctx->pk_lock);
+
+	return 0;
+}
+
+static int ecb_paes_init(struct crypto_skcipher *tfm)
+{
+	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	ctx->kb.key = NULL;
+	spin_lock_init(&ctx->pk_lock);
+
+	return 0;
+}
+
+static void ecb_paes_exit(struct crypto_skcipher *tfm)
+{
+	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	_free_kb_keybuf(&ctx->kb);
+}
+
+static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
 {
 	unsigned long fc;
 
-	if (__paes_convert_key(&ctx->kb, &ctx->pk))
+	if (__paes_convert_key(ctx))
 		return -EINVAL;
 
 	/* Pick the correct function code based on the protected key type */
@@ -124,22 +185,6 @@ static int __paes_set_key(struct s390_paes_ctx *ctx)
 	return ctx->fc ? 0 : -EINVAL;
 }
 
-static int ecb_paes_init(struct crypto_skcipher *tfm)
-{
-	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-	ctx->kb.key = NULL;
-
-	return 0;
-}
-
-static void ecb_paes_exit(struct crypto_skcipher *tfm)
-{
-	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-	_free_kb_keybuf(&ctx->kb);
-}
-
 static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 			    unsigned int key_len)
 {
@@ -147,11 +192,11 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 	_free_kb_keybuf(&ctx->kb);
-	rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+	rc = _key_to_kb(&ctx->kb, in_key, key_len);
 	if (rc)
 		return rc;
 
-	return __paes_set_key(ctx);
+	return __ecb_paes_set_key(ctx);
 }
 
 static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
@@ -161,18 +206,31 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 	struct skcipher_walk walk;
 	unsigned int nbytes, n, k;
 	int ret;
+	struct {
+		u8 key[MAXPROTKEYSIZE];
+	} param;
 
 	ret = skcipher_walk_virt(&walk, req, false);
+	if (ret)
+		return ret;
+
+	spin_lock_bh(&ctx->pk_lock);
+	memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+	spin_unlock_bh(&ctx->pk_lock);
+
 	while ((nbytes = walk.nbytes) != 0) {
 		/* only use complete blocks */
 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
-		k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
+		k = cpacf_km(ctx->fc | modifier, &param,
 			     walk.dst.virt.addr, walk.src.virt.addr, n);
 		if (k)
 			ret = skcipher_walk_done(&walk, nbytes - k);
 		if (k < n) {
-			if (__paes_set_key(ctx) != 0)
+			if (__paes_convert_key(ctx))
 				return skcipher_walk_done(&walk, -EIO);
+			spin_lock_bh(&ctx->pk_lock);
+			memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+			spin_unlock_bh(&ctx->pk_lock);
 		}
 	}
 	return ret;
@@ -210,6 +268,7 @@ static int cbc_paes_init(struct crypto_skcipher *tfm)
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 	ctx->kb.key = NULL;
+	spin_lock_init(&ctx->pk_lock);
 
 	return 0;
 }
@@ -221,11 +280,11 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm)
 	_free_kb_keybuf(&ctx->kb);
 }
 
-static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
 {
 	unsigned long fc;
 
-	if (__paes_convert_key(&ctx->kb, &ctx->pk))
+	if (__paes_convert_key(ctx))
 		return -EINVAL;
 
 	/* Pick the correct function code based on the protected key type */
@@ -246,7 +305,7 @@ static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 	_free_kb_keybuf(&ctx->kb);
-	rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+	rc = _key_to_kb(&ctx->kb, in_key, key_len);
 	if (rc)
 		return rc;
 
@@ -268,8 +327,12 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 	ret = skcipher_walk_virt(&walk, req, false);
 	if (ret)
 		return ret;
+
 	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
+	spin_lock_bh(&ctx->pk_lock);
 	memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+	spin_unlock_bh(&ctx->pk_lock);
+
 	while ((nbytes = walk.nbytes) != 0) {
 		/* only use complete blocks */
 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -280,9 +343,11 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 			ret = skcipher_walk_done(&walk, nbytes - k);
 		}
 		if (k < n) {
-			if (__cbc_paes_set_key(ctx) != 0)
+			if (__paes_convert_key(ctx))
 				return skcipher_walk_done(&walk, -EIO);
+			spin_lock_bh(&ctx->pk_lock);
 			memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+			spin_unlock_bh(&ctx->pk_lock);
 		}
 	}
 	return ret;
@@ -322,6 +387,7 @@ static int xts_paes_init(struct crypto_skcipher *tfm)
 
 	ctx->kb[0].key = NULL;
 	ctx->kb[1].key = NULL;
+	spin_lock_init(&ctx->pk_lock);
 
 	return 0;
 }
@@ -334,12 +400,27 @@ static void xts_paes_exit(struct crypto_skcipher *tfm)
 	_free_kb_keybuf(&ctx->kb[1]);
 }
 
-static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
+static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
+{
+	struct pkey_protkey pkey0, pkey1;
+
+	if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
+	    __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
+		return -EINVAL;
+
+	spin_lock_bh(&ctx->pk_lock);
+	memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
+	memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
+	spin_unlock_bh(&ctx->pk_lock);
+
+	return 0;
+}
+
+static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
 {
 	unsigned long fc;
 
-	if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) ||
-	    __paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
+	if (__xts_paes_convert_key(ctx))
 		return -EINVAL;
 
 	if (ctx->pk[0].type != ctx->pk[1].type)
@@ -371,10 +452,10 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 
 	_free_kb_keybuf(&ctx->kb[0]);
 	_free_kb_keybuf(&ctx->kb[1]);
-	rc = _copy_key_to_kb(&ctx->kb[0], in_key, key_len);
+	rc = _key_to_kb(&ctx->kb[0], in_key, key_len);
 	if (rc)
 		return rc;
-	rc = _copy_key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
+	rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
 	if (rc)
 		return rc;
 
@@ -416,15 +497,17 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 	ret = skcipher_walk_virt(&walk, req, false);
 	if (ret)
 		return ret;
+
 	keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
 	offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
-retry:
+
 	memset(&pcc_param, 0, sizeof(pcc_param));
 	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
+	spin_lock_bh(&ctx->pk_lock);
 	memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
-	cpacf_pcc(ctx->fc, pcc_param.key + offset);
-
 	memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
+	spin_unlock_bh(&ctx->pk_lock);
+	cpacf_pcc(ctx->fc, pcc_param.key + offset);
 	memcpy(xts_param.init, pcc_param.xts, 16);
 
 	while ((nbytes = walk.nbytes) != 0) {
@@ -435,11 +518,15 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 		if (k)
 			ret = skcipher_walk_done(&walk, nbytes - k);
 		if (k < n) {
-			if (__xts_paes_set_key(ctx) != 0)
+			if (__xts_paes_convert_key(ctx))
 				return skcipher_walk_done(&walk, -EIO);
-			goto retry;
+			spin_lock_bh(&ctx->pk_lock);
+			memcpy(xts_param.key + offset,
+			       ctx->pk[0].protkey, keylen);
+			spin_unlock_bh(&ctx->pk_lock);
 		}
 	}
+
 	return ret;
 }
 
@@ -476,6 +563,7 @@ static int ctr_paes_init(struct crypto_skcipher *tfm)
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 	ctx->kb.key = NULL;
+	spin_lock_init(&ctx->pk_lock);
 
 	return 0;
 }
@@ -487,11 +575,11 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm)
 	_free_kb_keybuf(&ctx->kb);
 }
 
-static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
 {
 	unsigned long fc;
 
-	if (__paes_convert_key(&ctx->kb, &ctx->pk))
+	if (__paes_convert_key(ctx))
 		return -EINVAL;
 
 	/* Pick the correct function code based on the protected key type */
@@ -513,7 +601,7 @@ static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 	_free_kb_keybuf(&ctx->kb);
-	rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+	rc = _key_to_kb(&ctx->kb, in_key, key_len);
 	if (rc)
 		return rc;
 
@@ -543,49 +631,65 @@ static int ctr_paes_crypt(struct skcipher_request *req)
 	struct skcipher_walk walk;
 	unsigned int nbytes, n, k;
 	int ret, locked;
-
-	locked = spin_trylock(&ctrblk_lock);
+	struct {
+		u8 key[MAXPROTKEYSIZE];
+	} param;
 
 	ret = skcipher_walk_virt(&walk, req, false);
+	if (ret)
+		return ret;
+
+	spin_lock_bh(&ctx->pk_lock);
+	memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+	spin_unlock_bh(&ctx->pk_lock);
+
+	locked = mutex_trylock(&ctrblk_lock);
+
 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 		n = AES_BLOCK_SIZE;
 		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
 			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
 		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
-		k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr,
+		k = cpacf_kmctr(ctx->fc, &param, walk.dst.virt.addr,
 				walk.src.virt.addr, n, ctrptr);
 		if (k) {
 			if (ctrptr == ctrblk)
 				memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
 				       AES_BLOCK_SIZE);
 			crypto_inc(walk.iv, AES_BLOCK_SIZE);
-			ret = skcipher_walk_done(&walk, nbytes - n);
+			ret = skcipher_walk_done(&walk, nbytes - k);
 		}
 		if (k < n) {
-			if (__ctr_paes_set_key(ctx) != 0) {
+			if (__paes_convert_key(ctx)) {
 				if (locked)
-					spin_unlock(&ctrblk_lock);
+					mutex_unlock(&ctrblk_lock);
 				return skcipher_walk_done(&walk, -EIO);
 			}
+			spin_lock_bh(&ctx->pk_lock);
+			memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+			spin_unlock_bh(&ctx->pk_lock);
 		}
 	}
 	if (locked)
-		spin_unlock(&ctrblk_lock);
+		mutex_unlock(&ctrblk_lock);
 	/*
 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
 	 */
 	if (nbytes) {
 		while (1) {
-			if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf,
+			if (cpacf_kmctr(ctx->fc, &param, buf,
 					walk.src.virt.addr, AES_BLOCK_SIZE,
 					walk.iv) == AES_BLOCK_SIZE)
 				break;
-			if (__ctr_paes_set_key(ctx) != 0)
+			if (__paes_convert_key(ctx))
 				return skcipher_walk_done(&walk, -EIO);
+			spin_lock_bh(&ctx->pk_lock);
+			memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+			spin_unlock_bh(&ctx->pk_lock);
 		}
 		memcpy(walk.dst.virt.addr, buf, nbytes);
 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
-		ret = skcipher_walk_done(&walk, 0);
+		ret = skcipher_walk_done(&walk, nbytes);
 	}
 
 	return ret;
@@ -618,12 +722,12 @@ static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
 
 static void paes_s390_fini(void)
 {
-	if (ctrblk)
-		free_page((unsigned long) ctrblk);
 	__crypto_unregister_skcipher(&ctr_paes_alg);
 	__crypto_unregister_skcipher(&xts_paes_alg);
 	__crypto_unregister_skcipher(&cbc_paes_alg);
 	__crypto_unregister_skcipher(&ecb_paes_alg);
+	if (ctrblk)
+		free_page((unsigned long) ctrblk);
 }
 
 static int __init paes_s390_init(void)
@@ -661,14 +765,14 @@ static int __init paes_s390_init(void)
 	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
-		ret = crypto_register_skcipher(&ctr_paes_alg);
-		if (ret)
-			goto out_err;
 		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
 		if (!ctrblk) {
 			ret = -ENOMEM;
 			goto out_err;
 		}
+		ret = crypto_register_skcipher(&ctr_paes_alg);
+		if (ret)
+			goto out_err;
 	}
 
 	return 0;
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 70139d0..5c97f48 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -209,17 +209,12 @@ static int hypfs_release(struct inode *inode, struct file *filp)
 
 enum { Opt_uid, Opt_gid, };
 
-static const struct fs_parameter_spec hypfs_param_specs[] = {
+static const struct fs_parameter_spec hypfs_fs_parameters[] = {
 	fsparam_u32("gid", Opt_gid),
 	fsparam_u32("uid", Opt_uid),
 	{}
 };
 
-static const struct fs_parameter_description hypfs_fs_parameters = {
-	.name		= "hypfs",
-	.specs		= hypfs_param_specs,
-};
-
 static int hypfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct hypfs_sb_info *hypfs_info = fc->s_fs_info;
@@ -228,7 +223,7 @@ static int hypfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	kgid_t gid;
 	int opt;
 
-	opt = fs_parse(fc, &hypfs_fs_parameters, param, &result);
+	opt = fs_parse(fc, hypfs_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -455,7 +450,7 @@ static struct file_system_type hypfs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "s390_hypfs",
 	.init_fs_context = hypfs_init_fs_context,
-	.parameters	= &hypfs_fs_parameters,
+	.parameters	= hypfs_fs_parameters,
 	.kill_sb	= hypfs_kill_super
 };
 
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 2531f67..1832ae6 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -7,7 +7,6 @@
 generic-y += asm-offsets.h
 generic-y += cacheflush.h
 generic-y += device.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += div64.h
 generic-y += emergency-restart.h
diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
index c67b82d..de61ce5 100644
--- a/arch/s390/include/asm/archrandom.h
+++ b/arch/s390/include/asm/archrandom.h
@@ -21,29 +21,17 @@ extern atomic64_t s390_arch_random_counter;
 
 bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
 
-static inline bool arch_has_random(void)
+static inline bool __must_check arch_get_random_long(unsigned long *v)
 {
 	return false;
 }
 
-static inline bool arch_has_random_seed(void)
-{
-	if (static_branch_likely(&s390_arch_random_available))
-		return true;
-	return false;
-}
-
-static inline bool arch_get_random_long(unsigned long *v)
+static inline bool __must_check arch_get_random_int(unsigned int *v)
 {
 	return false;
 }
 
-static inline bool arch_get_random_int(unsigned int *v)
-{
-	return false;
-}
-
-static inline bool arch_get_random_seed_long(unsigned long *v)
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 {
 	if (static_branch_likely(&s390_arch_random_available)) {
 		return s390_arch_random_generate((u8 *)v, sizeof(*v));
@@ -51,7 +39,7 @@ static inline bool arch_get_random_seed_long(unsigned long *v)
 	return false;
 }
 
-static inline bool arch_get_random_seed_int(unsigned int *v)
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
 {
 	if (static_branch_likely(&s390_arch_random_available)) {
 		return s390_arch_random_generate((u8 *)v, sizeof(*v));
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 63b46e3..9547cd5 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -177,11 +177,7 @@ static inline void __user *compat_ptr(compat_uptr_t uptr)
 {
 	return (void __user *)(unsigned long)(uptr & 0x7fffffffUL);
 }
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
-	return (u32)(unsigned long)uptr;
-}
+#define compat_ptr(uptr) compat_ptr(uptr)
 
 #ifdef CONFIG_COMPAT
 
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index b106aa2..09cdb63 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -54,7 +54,6 @@ typedef u16 kprobe_opcode_t;
 struct arch_specific_insn {
 	/* copy of original instruction */
 	kprobe_opcode_t *insn;
-	unsigned int is_ftrace_insn : 1;
 };
 
 struct prev_kprobe {
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 02f4c21..1726224 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -122,6 +122,11 @@ struct mcck_volatile_info {
 	__u32 reserved;
 };
 
+#define CR0_INITIAL_MASK (CR0_UNUSED_56 | CR0_INTERRUPT_KEY_SUBMASK | \
+			  CR0_MEASUREMENT_ALERT_SUBMASK)
+#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \
+			   CR14_EXTERNAL_DAMAGE_SUBMASK)
+
 #define CPUSTAT_STOPPED    0x80000000
 #define CPUSTAT_WAIT       0x10000000
 #define CPUSTAT_ECALL_PEND 0x08000000
@@ -914,7 +919,6 @@ extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
 
 static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 static inline void kvm_arch_free_memslot(struct kvm *kvm,
 		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a4d3809..85e944f 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -33,6 +33,8 @@
 #define ARCH_HAS_PREPARE_HUGEPAGE
 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
 
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
 #include <asm/setup.h>
 #ifndef __ASSEMBLY__
 
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 7b03037..137a392 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -673,6 +673,7 @@ static inline int pud_none(pud_t pud)
 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
 }
 
+#define pud_leaf	pud_large
 static inline int pud_large(pud_t pud)
 {
 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
@@ -690,6 +691,7 @@ static inline unsigned long pud_pfn(pud_t pud)
 	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
 }
 
+#define pmd_leaf	pmd_large
 static inline int pmd_large(pmd_t pmd)
 {
 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 69289e9..b241ddb 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -79,6 +79,13 @@ struct parmarea {
 	char command_line[ARCH_COMMAND_LINE_SIZE];	/* 0x10480 */
 };
 
+extern unsigned int zlib_dfltcc_support;
+#define ZLIB_DFLTCC_DISABLED		0
+#define ZLIB_DFLTCC_FULL		1
+#define ZLIB_DFLTCC_DEFLATE_ONLY	2
+#define ZLIB_DFLTCC_INFLATE_ONLY	3
+#define ZLIB_DFLTCC_FULL_DEBUG		4
+
 extern int noexec_disabled;
 extern int memory_end_set;
 extern unsigned long memory_end;
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index e22f072..d27d7d3 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -25,10 +25,11 @@
 #define MAXPROTKEYSIZE	64	/* a protected key blob may be up to 64 bytes */
 #define MAXCLRKEYSIZE	32	   /* a clear key value may be up to 32 bytes */
 #define MAXAESCIPHERKEYSIZE 136  /* our aes cipher keys have always 136 bytes */
+#define MINEP11AESKEYBLOBSIZE 256  /* min EP11 AES key blob size  */
+#define MAXEP11AESKEYBLOBSIZE 320  /* max EP11 AES key blob size */
 
-/* Minimum and maximum size of a key blob */
+/* Minimum size of a key blob */
 #define MINKEYBLOBSIZE	SECKEYBLOBSIZE
-#define MAXKEYBLOBSIZE	MAXAESCIPHERKEYSIZE
 
 /* defines for the type field within the pkey_protkey struct */
 #define PKEY_KEYTYPE_AES_128		      1
@@ -39,6 +40,7 @@
 enum pkey_key_type {
 	PKEY_TYPE_CCA_DATA   = (__u32) 1,
 	PKEY_TYPE_CCA_CIPHER = (__u32) 2,
+	PKEY_TYPE_EP11	     = (__u32) 3,
 };
 
 /* the newer ioctls use a pkey_key_size enum for key size information */
@@ -200,7 +202,7 @@ struct pkey_kblob2pkey {
 
 /*
  * Generate secure key, version 2.
- * Generate either a CCA AES secure key or a CCA AES cipher key.
+ * Generate CCA AES secure key, CCA AES cipher key or EP11 AES secure key.
  * There needs to be a list of apqns given with at least one entry in there.
  * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
  * is not supported. The implementation walks through the list of apqns and
@@ -210,10 +212,13 @@ struct pkey_kblob2pkey {
  * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to
  * generate a list of apqns based on the key type to generate.
  * The keygenflags argument is passed to the low level generation functions
- * individual for the key type and has a key type specific meaning. Currently
- * only CCA AES cipher keys react to this parameter: Use one or more of the
- * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher
- * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * individual for the key type and has a key type specific meaning. When
+ * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_*
+ * flags to widen the export possibilities. By default a cipher key is
+ * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * The keygenflag argument for generating an EP11 AES key should either be 0
+ * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and
+ * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags.
  */
 struct pkey_genseck2 {
 	struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets*/
@@ -229,8 +234,8 @@ struct pkey_genseck2 {
 
 /*
  * Generate secure key from clear key value, version 2.
- * Construct a CCA AES secure key or CCA AES cipher key from a given clear key
- * value.
+ * Construct an CCA AES secure key, CCA AES cipher key or EP11 AES secure
+ * key from a given clear key value.
  * There needs to be a list of apqns given with at least one entry in there.
  * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
  * is not supported. The implementation walks through the list of apqns and
@@ -240,10 +245,13 @@ struct pkey_genseck2 {
  * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to
  * generate a list of apqns based on the key type to generate.
  * The keygenflags argument is passed to the low level generation functions
- * individual for the key type and has a key type specific meaning. Currently
- * only CCA AES cipher keys react to this parameter: Use one or more of the
- * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher
- * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * individual for the key type and has a key type specific meaning. When
+ * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_*
+ * flags to widen the export possibilities. By default a cipher key is
+ * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * The keygenflag argument for generating an EP11 AES key should either be 0
+ * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and
+ * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags.
  */
 struct pkey_clr2seck2 {
 	struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */
@@ -266,14 +274,19 @@ struct pkey_clr2seck2 {
  * with one apqn able to handle this key.
  * The function also checks for the master key verification patterns
  * of the key matching to the current or alternate mkvp of the apqn.
- * Currently CCA AES secure keys and CCA AES cipher keys are supported.
- * The flags field is updated with some additional info about the apqn mkvp
+ * For CCA AES secure keys and CCA AES cipher keys this means to check
+ * the key's mkvp against the current or old mkvp of the apqns. The flags
+ * field is updated with some additional info about the apqn mkvp
  * match: If the current mkvp matches to the key's mkvp then the
  * PKEY_FLAGS_MATCH_CUR_MKVP bit is set, if the alternate mkvp matches to
  * the key's mkvp the PKEY_FLAGS_MATCH_ALT_MKVP is set. For CCA keys the
  * alternate mkvp is the old master key verification pattern.
  * CCA AES secure keys are also checked to have the CPACF export allowed
  * bit enabled (XPRTCPAC) in the kmf1 field.
+ * EP11 keys are also supported and the wkvp of the key is checked against
+ * the current wkvp of the apqns. There is no alternate for this type of
+ * key and so on a match the flag PKEY_FLAGS_MATCH_CUR_MKVP always is set.
+ * EP11 keys are also checked to have XCP_BLOB_PROTKEY_EXTRACTABLE set.
  * The ioctl returns 0 as long as the given or found apqn matches to
  * matches with the current or alternate mkvp to the key's mkvp. If the given
  * apqn does not match or there is no such apqn found, -1 with errno
@@ -313,16 +326,20 @@ struct pkey_kblob2pkey2 {
 /*
  * Build a list of APQNs based on a key blob given.
  * Is able to find out which type of secure key is given (CCA AES secure
- * key or CCA AES cipher key) and tries to find all matching crypto cards
- * based on the MKVP and maybe other criterias (like CCA AES cipher keys
- * need a CEX5C or higher). The list of APQNs is further filtered by the key's
- * mkvp which needs to match to either the current mkvp or the alternate mkvp
- * (which is the old mkvp on CCA adapters) of the apqns. The flags argument may
- * be used to limit the matching apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is
- * given, only the current mkvp of each apqn is compared. Likewise with the
- * PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it is assumed to
- * return apqns where either the current or the alternate mkvp
+ * key, CCA AES cipher key or EP11 AES key) and tries to find all matching
+ * crypto cards based on the MKVP and maybe other criterias (like CCA AES
+ * cipher keys need a CEX5C or higher, EP11 keys with BLOB_PKEY_EXTRACTABLE
+ * need a CEX7 and EP11 api version 4). The list of APQNs is further filtered
+ * by the key's mkvp which needs to match to either the current mkvp (CCA and
+ * EP11) or the alternate mkvp (old mkvp, CCA adapters only) of the apqns. The
+ * flags argument may be used to limit the matching apqns. If the
+ * PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of each apqn is
+ * compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it
+ * is assumed to return apqns where either the current or the alternate mkvp
  * matches. At least one of the matching flags needs to be given.
+ * The flags argument for EP11 keys has no further action and is currently
+ * ignored (but needs to be given as PKEY_FLAGS_MATCH_CUR_MKVP) as there is only
+ * the wkvp from the key to match against the apqn's wkvp.
  * The list of matching apqns is stored into the space given by the apqns
  * argument and the number of stored entries goes into apqn_entries. If the list
  * is empty (apqn_entries is 0) the apqn_entries field is updated to the number
@@ -356,6 +373,10 @@ struct pkey_apqns4key {
  * If both are given, it is assumed to return apqns where either the
  * current or the alternate mkvp matches. If no match flag is given
  * (flags is 0) the mkvp values are ignored for the match process.
+ * For EP11 keys there is only the current wkvp. So if the apqns should also
+ * match to a given wkvp, then the PKEY_FLAGS_MATCH_CUR_MKVP flag should be
+ * set. The wkvp value is 32 bytes but only the leftmost 16 bytes are compared
+ * against the leftmost 16 byte of the wkvp of the apqn.
  * The list of matching apqns is stored into the space given by the apqns
  * argument and the number of stored entries goes into apqn_entries. If the list
  * is empty (apqn_entries is 0) the apqn_entries field is updated to the number
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index f9e5e1f..5a2177e 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -161,17 +161,17 @@ struct ica_xcRB {
  * @payload_len:	Payload length
  */
 struct ep11_cprb {
-	__u16		cprb_len;
-	unsigned char	cprb_ver_id;
-	unsigned char	pad_000[2];
-	unsigned char	flags;
-	unsigned char	func_id[2];
-	__u32		source_id;
-	__u32		target_id;
-	__u32		ret_code;
-	__u32		reserved1;
-	__u32		reserved2;
-	__u32		payload_len;
+	__u16	cprb_len;
+	__u8	cprb_ver_id;
+	__u8	pad_000[2];
+	__u8	flags;
+	__u8	func_id[2];
+	__u32	source_id;
+	__u32	target_id;
+	__u32	ret_code;
+	__u32	reserved1;
+	__u32	reserved2;
+	__u32	payload_len;
 } __attribute__((packed));
 
 /**
@@ -197,13 +197,13 @@ struct ep11_target_dev {
  */
 struct ep11_urb {
 	__u16		targets_num;
-	__u64		targets;
+	__u8 __user    *targets;
 	__u64		weight;
 	__u64		req_no;
 	__u64		req_len;
-	__u64		req;
+	__u8 __user    *req;
 	__u64		resp_len;
-	__u64		resp;
+	__u8 __user    *resp;
 } __attribute__((packed));
 
 /**
@@ -237,7 +237,9 @@ struct zcrypt_device_matrix_ext {
 	struct zcrypt_device_status_ext device[MAX_ZDEV_ENTRIES_EXT];
 };
 
-#define AUTOSELECT 0xFFFFFFFF
+#define AUTOSELECT  0xFFFFFFFF
+#define AUTOSEL_AP  ((__u16) 0xFFFF)
+#define AUTOSEL_DOM ((__u16) 0xFFFF)
 
 #define ZCRYPT_IOCTL_MAGIC 'z'
 
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 1bb85f6..4cd9b1a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -72,15 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
 #endif
 }
 
-static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_KPROBES
-	if (insn->opc == BREAKPOINT_INSTRUCTION)
-		return 1;
-#endif
-	return 0;
-}
-
 static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
 {
 #ifdef CONFIG_KPROBES
@@ -114,16 +105,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 		/* Initial code replacement */
 		ftrace_generate_orig_insn(&orig);
 		ftrace_generate_nop_insn(&new);
-	} else if (is_kprobe_on_ftrace(&old)) {
-		/*
-		 * If we find a breakpoint instruction, a kprobe has been
-		 * placed at the beginning of the function. We write the
-		 * constant KPROBE_ON_FTRACE_NOP into the remaining four
-		 * bytes of the original instruction so that the kprobes
-		 * handler can execute a nop, if it reaches this breakpoint.
-		 */
-		ftrace_generate_kprobe_call_insn(&orig);
-		ftrace_generate_kprobe_nop_insn(&new);
 	} else {
 		/* Replace ftrace call with a nop. */
 		ftrace_generate_call_insn(&orig, rec->ip);
@@ -142,21 +123,10 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
 		return -EFAULT;
-	if (is_kprobe_on_ftrace(&old)) {
-		/*
-		 * If we find a breakpoint instruction, a kprobe has been
-		 * placed at the beginning of the function. We write the
-		 * constant KPROBE_ON_FTRACE_CALL into the remaining four
-		 * bytes of the original instruction so that the kprobes
-		 * handler can execute a brasl if it reaches this breakpoint.
-		 */
-		ftrace_generate_kprobe_nop_insn(&orig);
-		ftrace_generate_kprobe_call_insn(&new);
-	} else {
-		/* Replace nop with an ftrace call. */
-		ftrace_generate_nop_insn(&orig);
-		ftrace_generate_call_insn(&new, rec->ip);
-	}
+	/* Replace nop with an ftrace call. */
+	ftrace_generate_nop_insn(&orig);
+	ftrace_generate_call_insn(&new, rec->ip);
+
 	/* Verify that the to be replaced code matches what we expect. */
 	if (memcmp(&orig, &old, sizeof(old)))
 		return -EINVAL;
@@ -241,3 +211,45 @@ int ftrace_disable_ftrace_graph_caller(void)
 }
 
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+		struct ftrace_ops *ops, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb;
+	struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
+
+	if (unlikely(!p) || kprobe_disabled(p))
+		return;
+
+	if (kprobe_running()) {
+		kprobes_inc_nmissed_count(p);
+		return;
+	}
+
+	__this_cpu_write(current_kprobe, p);
+
+	kcb = get_kprobe_ctlblk();
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+	instruction_pointer_set(regs, ip);
+
+	if (!p->pre_handler || !p->pre_handler(p, regs)) {
+
+		instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
+
+		if (unlikely(p->post_handler)) {
+			kcb->kprobe_status = KPROBE_HIT_SSDONE;
+			p->post_handler(p, regs, 0);
+		}
+	}
+	__this_cpu_write(current_kprobe, NULL);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+	p->ainsn.insn = NULL;
+	return 0;
+}
+#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 6f13883..548d0ea 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -56,21 +56,10 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = {
 
 static void copy_instruction(struct kprobe *p)
 {
-	unsigned long ip = (unsigned long) p->addr;
 	s64 disp, new_disp;
 	u64 addr, new_addr;
 
-	if (ftrace_location(ip) == ip) {
-		/*
-		 * If kprobes patches the instruction that is morphed by
-		 * ftrace make sure that kprobes always sees the branch
-		 * "jg .+24" that skips the mcount block or the "brcl 0,0"
-		 * in case of hotpatch.
-		 */
-		ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
-		p->ainsn.is_ftrace_insn = 1;
-	} else
-		memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
+	memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
 	p->opcode = p->ainsn.insn[0];
 	if (!probe_is_insn_relative_long(p->ainsn.insn))
 		return;
@@ -136,11 +125,6 @@ int arch_prepare_kprobe(struct kprobe *p)
 }
 NOKPROBE_SYMBOL(arch_prepare_kprobe);
 
-int arch_check_ftrace_location(struct kprobe *p)
-{
-	return 0;
-}
-
 struct swap_insn_args {
 	struct kprobe *p;
 	unsigned int arm_kprobe : 1;
@@ -149,28 +133,11 @@ struct swap_insn_args {
 static int swap_instruction(void *data)
 {
 	struct swap_insn_args *args = data;
-	struct ftrace_insn new_insn, *insn;
 	struct kprobe *p = args->p;
-	size_t len;
+	u16 opc;
 
-	new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
-	len = sizeof(new_insn.opc);
-	if (!p->ainsn.is_ftrace_insn)
-		goto skip_ftrace;
-	len = sizeof(new_insn);
-	insn = (struct ftrace_insn *) p->addr;
-	if (args->arm_kprobe) {
-		if (is_ftrace_nop(insn))
-			new_insn.disp = KPROBE_ON_FTRACE_NOP;
-		else
-			new_insn.disp = KPROBE_ON_FTRACE_CALL;
-	} else {
-		ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
-		if (insn->disp == KPROBE_ON_FTRACE_NOP)
-			ftrace_generate_nop_insn(&new_insn);
-	}
-skip_ftrace:
-	s390_kernel_write(p->addr, &new_insn, len);
+	opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
+	s390_kernel_write(p->addr, &opc, sizeof(opc));
 	return 0;
 }
 NOKPROBE_SYMBOL(swap_instruction);
@@ -464,24 +431,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
 	unsigned long ip = regs->psw.addr;
 	int fixup = probe_get_fixup_type(p->ainsn.insn);
 
-	/* Check if the kprobes location is an enabled ftrace caller */
-	if (p->ainsn.is_ftrace_insn) {
-		struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
-		struct ftrace_insn call_insn;
-
-		ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
-		/*
-		 * A kprobe on an enabled ftrace call site actually single
-		 * stepped an unconditional branch (ftrace nop equivalent).
-		 * Now we need to fixup things and pretend that a brasl r0,...
-		 * was executed instead.
-		 */
-		if (insn->disp == KPROBE_ON_FTRACE_CALL) {
-			ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
-			regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
-		}
-	}
-
 	if (fixup & FIXUP_PSW_NORMAL)
 		ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
 
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index f942341..7458dcf 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -42,6 +42,9 @@
 	.globl	ftrace_regs_caller
 	.set	ftrace_regs_caller,ftrace_caller
 	stg	%r14,(__SF_GPRS+8*8)(%r15)	# save traced function caller
+	lghi	%r14,0				# save condition code
+	ipm	%r14				# don't put any instructions
+	sllg	%r14,%r14,16			# clobbering CC before this point
 	lgr	%r1,%r15
 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
 	aghi	%r0,MCOUNT_RETURN_FIXUP
@@ -54,6 +57,9 @@
 	# allocate pt_regs and stack frame for ftrace_trace_function
 	aghi	%r15,-STACK_FRAME_SIZE
 	stg	%r1,(STACK_PTREGS_GPRS+15*8)(%r15)
+	stg	%r14,(STACK_PTREGS_PSW)(%r15)
+	lg	%r14,(__SF_GPRS+8*8)(%r1)	# restore original return address
+	stosm	(STACK_PTREGS_PSW)(%r15),0
 	aghi	%r1,-TRACED_FUNC_FRAME_SIZE
 	stg	%r1,__SF_BACKCHAIN(%r15)
 	stg	%r0,(STACK_PTREGS_PSW+8)(%r15)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d5fbd75..b2c2f75 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -111,6 +111,8 @@ unsigned long __bootdata_preserved(__etext_dma);
 unsigned long __bootdata_preserved(__sdma);
 unsigned long __bootdata_preserved(__edma);
 unsigned long __bootdata_preserved(__kaslr_offset);
+unsigned int __bootdata_preserved(zlib_dfltcc_support);
+EXPORT_SYMBOL(zlib_dfltcc_support);
 
 unsigned long VMALLOC_START;
 EXPORT_SYMBOL(VMALLOC_START);
@@ -241,8 +243,6 @@ static void __init conmode_default(void)
 		SET_CONSOLE_SCLP;
 #endif
 	}
-	if (IS_ENABLED(CONFIG_VT) && IS_ENABLED(CONFIG_DUMMY_CONSOLE))
-		conswitchp = &dummy_con;
 }
 
 #ifdef CONFIG_CRASH_DUMP
@@ -761,14 +761,6 @@ static void __init free_mem_detect_info(void)
 		memblock_free(start, size);
 }
 
-static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
-{
-	memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
-		     start, start + size - 1);
-	memblock_add_range(&memblock.memory, start, size, 0, 0);
-	memblock_add_range(&memblock.physmem, start, size, 0, 0);
-}
-
 static const char * __init get_mem_info_source(void)
 {
 	switch (mem_detect.info_source) {
@@ -793,8 +785,10 @@ static void __init memblock_add_mem_detect_info(void)
 		     get_mem_info_source(), mem_detect.info_source);
 	/* keep memblock lists close to the kernel */
 	memblock_set_bottom_up(true);
-	for_each_mem_detect_block(i, &start, &end)
+	for_each_mem_detect_block(i, &start, &end) {
+		memblock_add(start, end - start);
 		memblock_physmem_add(start, end - start);
+	}
 	memblock_set_bottom_up(false);
 	memblock_dump_all();
 }
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 3054e9c0..bd7bd35 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -438,3 +438,5 @@
 433  common	fspick			sys_fspick			sys_fspick
 434  common	pidfd_open		sys_pidfd_open			sys_pidfd_open
 435  common	clone3			sys_clone3			sys_clone3
+437  common	openat2			sys_openat2			sys_openat2
+438  common	pidfd_getfd		sys_pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 165dea4..c06c89d 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2190,7 +2190,7 @@ static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
 		return -EINVAL;
 
 	if (!test_kvm_facility(kvm, 72))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&fi->ais_lock);
 	ais.simm = fi->simm;
@@ -2499,7 +2499,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
 	int ret = 0;
 
 	if (!test_kvm_facility(kvm, 72))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 	if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
 		return -EFAULT;
@@ -2579,7 +2579,7 @@ static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
 	struct kvm_s390_ais_all ais;
 
 	if (!test_kvm_facility(kvm, 72))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 	if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
 		return -EFAULT;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d9e6bf3..d7ff30e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -529,6 +529,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_S390_CMMA_MIGRATION:
 	case KVM_CAP_S390_AIS:
 	case KVM_CAP_S390_AIS_MIGRATION:
+	case KVM_CAP_S390_VCPU_RESETS:
 		r = 1;
 		break;
 	case KVM_CAP_S390_HPAGE_1M:
@@ -2530,9 +2531,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 	if (vcpu->kvm->arch.use_cmma)
 		kvm_s390_vcpu_unsetup_cmma(vcpu);
 	free_page((unsigned long)(vcpu->arch.sie_block));
-
-	kvm_vcpu_uninit(vcpu);
-	kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
 static void kvm_free_vcpus(struct kvm *kvm)
@@ -2541,7 +2539,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
 	struct kvm_vcpu *vcpu;
 
 	kvm_for_each_vcpu(i, vcpu, kvm)
-		kvm_arch_vcpu_destroy(vcpu);
+		kvm_vcpu_destroy(vcpu);
 
 	mutex_lock(&kvm->lock);
 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
@@ -2703,39 +2701,6 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
 }
 
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
-	kvm_clear_async_pf_completion_queue(vcpu);
-	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
-				    KVM_SYNC_GPRS |
-				    KVM_SYNC_ACRS |
-				    KVM_SYNC_CRS |
-				    KVM_SYNC_ARCH0 |
-				    KVM_SYNC_PFAULT;
-	kvm_s390_set_prefix(vcpu, 0);
-	if (test_kvm_facility(vcpu->kvm, 64))
-		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
-	if (test_kvm_facility(vcpu->kvm, 82))
-		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
-	if (test_kvm_facility(vcpu->kvm, 133))
-		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
-	if (test_kvm_facility(vcpu->kvm, 156))
-		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
-	/* fprs can be synchronized via vrs, even if the guest has no vx. With
-	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
-	 */
-	if (MACHINE_HAS_VX)
-		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
-	else
-		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
-
-	if (kvm_is_ucontrol(vcpu->kvm))
-		return __kvm_ucontrol_vcpu_init(vcpu);
-
-	return 0;
-}
-
 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
 {
@@ -2844,35 +2809,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
 }
 
-static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
-{
-	/* this equals initial cpu reset in pop, but we don't switch to ESA */
-	vcpu->arch.sie_block->gpsw.mask = 0UL;
-	vcpu->arch.sie_block->gpsw.addr = 0UL;
-	kvm_s390_set_prefix(vcpu, 0);
-	kvm_s390_set_cpu_timer(vcpu, 0);
-	vcpu->arch.sie_block->ckc       = 0UL;
-	vcpu->arch.sie_block->todpr     = 0;
-	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
-	vcpu->arch.sie_block->gcr[0]  = CR0_UNUSED_56 |
-					CR0_INTERRUPT_KEY_SUBMASK |
-					CR0_MEASUREMENT_ALERT_SUBMASK;
-	vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
-					CR14_UNUSED_33 |
-					CR14_EXTERNAL_DAMAGE_SUBMASK;
-	/* make sure the new fpc will be lazily loaded */
-	save_fpu_regs();
-	current->thread.fpu.fpc = 0;
-	vcpu->arch.sie_block->gbea = 1;
-	vcpu->arch.sie_block->pp = 0;
-	vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
-	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
-	kvm_clear_async_pf_completion_queue(vcpu);
-	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
-		kvm_s390_vcpu_stop(vcpu);
-	kvm_s390_clear_local_irqs(vcpu);
-}
-
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
 	mutex_lock(&vcpu->kvm->lock);
@@ -2962,7 +2898,7 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
 }
 
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
 {
 	int rc = 0;
 
@@ -3035,26 +2971,22 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 	return rc;
 }
 
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
-				      unsigned int id)
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 {
-	struct kvm_vcpu *vcpu;
-	struct sie_page *sie_page;
-	int rc = -EINVAL;
-
 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
-		goto out;
+		return -EINVAL;
+	return 0;
+}
 
-	rc = -ENOMEM;
-
-	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-	if (!vcpu)
-		goto out;
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+{
+	struct sie_page *sie_page;
+	int rc;
 
 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
 	if (!sie_page)
-		goto out_free_cpu;
+		return -ENOMEM;
 
 	vcpu->arch.sie_block = &sie_page->sie_block;
 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
@@ -3063,27 +2995,59 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 	vcpu->arch.sie_block->mso = 0;
 	vcpu->arch.sie_block->msl = sclp.hamax;
 
-	vcpu->arch.sie_block->icpua = id;
+	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
 	spin_lock_init(&vcpu->arch.local_int.lock);
-	vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
+	vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
 	if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
 		vcpu->arch.sie_block->gd |= GISA_FORMAT1;
 	seqcount_init(&vcpu->arch.cputm_seqcount);
 
-	rc = kvm_vcpu_init(vcpu, kvm, id);
-	if (rc)
-		goto out_free_sie_block;
-	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
-		 vcpu->arch.sie_block);
-	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
+	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+	kvm_clear_async_pf_completion_queue(vcpu);
+	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
+				    KVM_SYNC_GPRS |
+				    KVM_SYNC_ACRS |
+				    KVM_SYNC_CRS |
+				    KVM_SYNC_ARCH0 |
+				    KVM_SYNC_PFAULT;
+	kvm_s390_set_prefix(vcpu, 0);
+	if (test_kvm_facility(vcpu->kvm, 64))
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+	if (test_kvm_facility(vcpu->kvm, 82))
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
+	if (test_kvm_facility(vcpu->kvm, 133))
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
+	if (test_kvm_facility(vcpu->kvm, 156))
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
+	/* fprs can be synchronized via vrs, even if the guest has no vx. With
+	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
+	 */
+	if (MACHINE_HAS_VX)
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
+	else
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
 
-	return vcpu;
+	if (kvm_is_ucontrol(vcpu->kvm)) {
+		rc = __kvm_ucontrol_vcpu_init(vcpu);
+		if (rc)
+			goto out_free_sie_block;
+	}
+
+	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
+		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
+	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
+
+	rc = kvm_s390_vcpu_setup(vcpu);
+	if (rc)
+		goto out_ucontrol_uninit;
+	return 0;
+
+out_ucontrol_uninit:
+	if (kvm_is_ucontrol(vcpu->kvm))
+		gmap_remove(vcpu->arch.gmap);
 out_free_sie_block:
 	free_page((unsigned long)(vcpu->arch.sie_block));
-out_free_cpu:
-	kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
-	return ERR_PTR(rc);
+	return rc;
 }
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
@@ -3287,10 +3251,53 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
 	return r;
 }
 
-static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
+static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
 {
-	kvm_s390_vcpu_initial_reset(vcpu);
-	return 0;
+	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
+	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
+
+	kvm_clear_async_pf_completion_queue(vcpu);
+	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+		kvm_s390_vcpu_stop(vcpu);
+	kvm_s390_clear_local_irqs(vcpu);
+}
+
+static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
+{
+	/* Initial reset is a superset of the normal reset */
+	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
+
+	/* this equals initial cpu reset in pop, but we don't switch to ESA */
+	vcpu->arch.sie_block->gpsw.mask = 0;
+	vcpu->arch.sie_block->gpsw.addr = 0;
+	kvm_s390_set_prefix(vcpu, 0);
+	kvm_s390_set_cpu_timer(vcpu, 0);
+	vcpu->arch.sie_block->ckc = 0;
+	vcpu->arch.sie_block->todpr = 0;
+	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
+	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
+	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
+	vcpu->run->s.regs.fpc = 0;
+	vcpu->arch.sie_block->gbea = 1;
+	vcpu->arch.sie_block->pp = 0;
+	vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+}
+
+static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
+{
+	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
+
+	/* Clear reset is a superset of the initial reset */
+	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+
+	memset(&regs->gprs, 0, sizeof(regs->gprs));
+	memset(&regs->vrs, 0, sizeof(regs->vrs));
+	memset(&regs->acrs, 0, sizeof(regs->acrs));
+	memset(&regs->gscb, 0, sizeof(regs->gscb));
+
+	regs->etoken = 0;
+	regs->etoken_extension = 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
@@ -4351,7 +4358,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 	switch (ioctl) {
 	case KVM_S390_STORE_STATUS:
 		idx = srcu_read_lock(&vcpu->kvm->srcu);
-		r = kvm_s390_vcpu_store_status(vcpu, arg);
+		r = kvm_s390_store_status_unloaded(vcpu, arg);
 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
 		break;
 	case KVM_S390_SET_INITIAL_PSW: {
@@ -4363,8 +4370,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
 		break;
 	}
+	case KVM_S390_CLEAR_RESET:
+		r = 0;
+		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
+		break;
 	case KVM_S390_INITIAL_RESET:
-		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+		r = 0;
+		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+		break;
+	case KVM_S390_NORMAL_RESET:
+		r = 0;
+		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
 		break;
 	case KVM_SET_ONE_REG:
 	case KVM_GET_ONE_REG: {
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index b0246c7..5674710 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -2,7 +2,7 @@
 /*
  *  IBM System z Huge TLB Page Support for Kernel.
  *
- *    Copyright IBM Corp. 2007,2016
+ *    Copyright IBM Corp. 2007,2020
  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  */
 
@@ -11,6 +11,9 @@
 
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/security.h>
 
 /*
  * If the bit selected by single-bit bitmask "a" is set within "x", move
@@ -267,3 +270,98 @@ static __init int setup_hugepagesz(char *opt)
 	return 1;
 }
 __setup("hugepagesz=", setup_hugepagesz);
+
+static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+		unsigned long addr, unsigned long len,
+		unsigned long pgoff, unsigned long flags)
+{
+	struct hstate *h = hstate_file(file);
+	struct vm_unmapped_area_info info;
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = current->mm->mmap_base;
+	info.high_limit = TASK_SIZE;
+	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+	info.align_offset = 0;
+	return vm_unmapped_area(&info);
+}
+
+static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+		unsigned long addr0, unsigned long len,
+		unsigned long pgoff, unsigned long flags)
+{
+	struct hstate *h = hstate_file(file);
+	struct vm_unmapped_area_info info;
+	unsigned long addr;
+
+	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+	info.length = len;
+	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+	info.high_limit = current->mm->mmap_base;
+	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+	info.align_offset = 0;
+	addr = vm_unmapped_area(&info);
+
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	if (addr & ~PAGE_MASK) {
+		VM_BUG_ON(addr != -ENOMEM);
+		info.flags = 0;
+		info.low_limit = TASK_UNMAPPED_BASE;
+		info.high_limit = TASK_SIZE;
+		addr = vm_unmapped_area(&info);
+	}
+
+	return addr;
+}
+
+unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct hstate *h = hstate_file(file);
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	int rc;
+
+	if (len & ~huge_page_mask(h))
+		return -EINVAL;
+	if (len > TASK_SIZE - mmap_min_addr)
+		return -ENOMEM;
+
+	if (flags & MAP_FIXED) {
+		if (prepare_hugepage_range(file, addr, len))
+			return -EINVAL;
+		goto check_asce_limit;
+	}
+
+	if (addr) {
+		addr = ALIGN(addr, huge_page_size(h));
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+		    (!vma || addr + len <= vm_start_gap(vma)))
+			goto check_asce_limit;
+	}
+
+	if (mm->get_unmapped_area == arch_get_unmapped_area)
+		addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
+				pgoff, flags);
+	else
+		addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
+				pgoff, flags);
+	if (addr & ~PAGE_MASK)
+		return addr;
+
+check_asce_limit:
+	if (addr + len > current->mm->context.asce_limit &&
+	    addr + len <= TASK_SIZE) {
+		rc = crst_table_upgrade(mm, addr + len);
+		if (rc)
+			return (unsigned long) rc;
+	}
+	return addr;
+}
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7264.h b/arch/sh/include/cpu-sh2a/cpu/sh7264.h
index d12c191..8a1338a 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7264.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7264.h
@@ -112,12 +112,6 @@ enum {
 	GPIO_FN_TIOC0D, GPIO_FN_TIOC0C, GPIO_FN_TIOC0B, GPIO_FN_TIOC0A,
 	GPIO_FN_TCLKD, GPIO_FN_TCLKC, GPIO_FN_TCLKB, GPIO_FN_TCLKA,
 
-	/* SSU */
-	GPIO_FN_SCS0_PD, GPIO_FN_SSO0_PD, GPIO_FN_SSI0_PD, GPIO_FN_SSCK0_PD,
-	GPIO_FN_SCS0_PF, GPIO_FN_SSO0_PF, GPIO_FN_SSI0_PF, GPIO_FN_SSCK0_PF,
-	GPIO_FN_SCS1_PD, GPIO_FN_SSO1_PD, GPIO_FN_SSI1_PD, GPIO_FN_SSCK1_PD,
-	GPIO_FN_SCS1_PF, GPIO_FN_SSO1_PF, GPIO_FN_SSI1_PF, GPIO_FN_SSCK1_PF,
-
 	/* SCIF */
 	GPIO_FN_SCK0, GPIO_FN_SCK1, GPIO_FN_SCK2, GPIO_FN_SCK3,
 	GPIO_FN_RXD0, GPIO_FN_RXD1, GPIO_FN_RXD2, GPIO_FN_RXD3,
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
index d516e5d..fece521 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
@@ -78,8 +78,15 @@ enum {
 	GPIO_FN_WDTOVF,
 
 	/* CAN */
-	GPIO_FN_CTX1, GPIO_FN_CRX1, GPIO_FN_CTX0, GPIO_FN_CTX0_CTX1,
-	GPIO_FN_CRX0, GPIO_FN_CRX0_CRX1, GPIO_FN_CRX0_CRX1_CRX2,
+	GPIO_FN_CTX2, GPIO_FN_CRX2,
+	GPIO_FN_CTX1, GPIO_FN_CRX1,
+	GPIO_FN_CTX0, GPIO_FN_CRX0,
+	GPIO_FN_CTX0_CTX1, GPIO_FN_CRX0_CRX1,
+	GPIO_FN_CTX0_CTX1_CTX2, GPIO_FN_CRX0_CRX1_CRX2,
+	GPIO_FN_CTX2_PJ21, GPIO_FN_CRX2_PJ20,
+	GPIO_FN_CTX1_PJ23, GPIO_FN_CRX1_PJ22,
+	GPIO_FN_CTX0_CTX1_PJ23, GPIO_FN_CRX0_CRX1_PJ22,
+	GPIO_FN_CTX0_CTX1_CTX2_PJ21, GPIO_FN_CRX0_CRX1_CRX2_PJ20,
 
 	/* DMAC */
 	GPIO_FN_TEND0, GPIO_FN_DACK0, GPIO_FN_DREQ0,
@@ -119,12 +126,6 @@ enum {
 	GPIO_FN_TIOC0D, GPIO_FN_TIOC0C, GPIO_FN_TIOC0B, GPIO_FN_TIOC0A,
 	GPIO_FN_TCLKD, GPIO_FN_TCLKC, GPIO_FN_TCLKB, GPIO_FN_TCLKA,
 
-	/* SSU */
-	GPIO_FN_SCS0_PD, GPIO_FN_SSO0_PD, GPIO_FN_SSI0_PD, GPIO_FN_SSCK0_PD,
-	GPIO_FN_SCS0_PF, GPIO_FN_SSO0_PF, GPIO_FN_SSI0_PF, GPIO_FN_SSCK0_PF,
-	GPIO_FN_SCS1_PD, GPIO_FN_SSO1_PD, GPIO_FN_SSI1_PD, GPIO_FN_SSCK1_PD,
-	GPIO_FN_SCS1_PF, GPIO_FN_SSO1_PF, GPIO_FN_SSI1_PF, GPIO_FN_SSCK1_PF,
-
 	/* SCIF */
 	GPIO_FN_SCK0, GPIO_FN_RXD0, GPIO_FN_TXD0,
 	GPIO_FN_SCK1, GPIO_FN_RXD1, GPIO_FN_TXD1, GPIO_FN_RTS1, GPIO_FN_CTS1,
diff --git a/arch/sh/include/uapi/asm/sockios.h b/arch/sh/include/uapi/asm/sockios.h
index ef18a66..3da5614 100644
--- a/arch/sh/include/uapi/asm/sockios.h
+++ b/arch/sh/include/uapi/asm/sockios.h
@@ -10,7 +10,7 @@
 #define SIOCSPGRP	_IOW('s', 8, pid_t)
 #define SIOCGPGRP	_IOR('s', 9, pid_t)
 
-#define SIOCGSTAMP_OLD	_IOR('s', 100, struct timeval) /* Get stamp (timeval) */
-#define SIOCGSTAMPNS_OLD _IOR('s', 101, struct timespec) /* Get stamp (timespec) */
+#define SIOCGSTAMP_OLD	_IOR('s', 100, struct __kernel_old_timeval) /* Get stamp (timeval) */
+#define SIOCGSTAMPNS_OLD _IOR('s', 101, struct __kernel_old_timespec) /* Get stamp (timespec) */
 
 #endif /* __ASM_SH_SOCKIOS_H */
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index d232cfa..67f5a3b 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -341,10 +341,6 @@ void __init setup_arch(char **cmdline_p)
 
 	paging_init();
 
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
-#endif
-
 	/* Perform the machine specific initialisation */
 	if (likely(sh_mv.mv_setup))
 		sh_mv.mv_setup(cmdline_p);
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index b5ed26c..c7a30fc 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -438,3 +438,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 # 435 reserved for clone3
+437	common	openat2				sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c
index ec2b253..fb517b8 100644
--- a/arch/sh/mm/alignment.c
+++ b/arch/sh/mm/alignment.c
@@ -152,13 +152,12 @@ static ssize_t alignment_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations alignment_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= alignment_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= alignment_proc_write,
+static const struct proc_ops alignment_proc_ops = {
+	.proc_open	= alignment_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= alignment_proc_write,
 };
 
 /*
@@ -176,12 +175,12 @@ static int __init alignment_init(void)
 		return -ENOMEM;
 
 	res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
-			       &alignment_proc_fops, &se_usermode);
+			       &alignment_proc_ops, &se_usermode);
 	if (!res)
 		return -ENOMEM;
 
         res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
-			       &alignment_proc_fops, &se_kernmode_warn);
+			       &alignment_proc_ops, &se_kernmode_warn);
         if (!res)
                 return -ENOMEM;
 
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index e8c3ea0..c1dd6dd 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -64,8 +64,7 @@
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_KRETPROBES
 	select HAVE_KPROBES
-	select HAVE_RCU_TABLE_FREE if SMP
-	select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
+	select MMU_GATHER_RCU_TABLE_FREE if SMP
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
 	select HAVE_DYNAMIC_FTRACE
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 30b1763..40a267b 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -125,23 +125,6 @@ typedef u32		compat_sigset_word;
 
 #define COMPAT_OFF_T_MAX	0x7fffffff
 
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
-	return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
-	return (u32)(unsigned long)uptr;
-}
-
 #ifdef CONFIG_COMPAT
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 9d3e5cc..264e76c 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -16,12 +16,12 @@
 
 extern struct kmem_cache *pgtable_cache;
 
-static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
+static inline void __p4d_populate(p4d_t *p4d, pud_t *pud)
 {
-	pgd_set(pgd, pud);
+	p4d_set(p4d, pud);
 }
 
-#define pgd_populate(MM, PGD, PUD)	__pgd_populate(PGD, PUD)
+#define p4d_populate(MM, P4D, PUD)	__p4d_populate(P4D, PUD)
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 6ae8016..65494c3 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -13,7 +13,7 @@
  * the SpitFire page tables.
  */
 
-#include <asm-generic/5level-fixup.h>
+#include <asm-generic/pgtable-nop4d.h>
 #include <linux/compiler.h>
 #include <linux/const.h>
 #include <asm/types.h>
@@ -683,6 +683,7 @@ static inline unsigned long pte_special(pte_t pte)
 	return pte_val(pte) & _PAGE_SPECIAL;
 }
 
+#define pmd_leaf	pmd_large
 static inline unsigned long pmd_large(pmd_t pmd)
 {
 	pte_t pte = __pte(pmd_val(pmd));
@@ -810,9 +811,9 @@ static inline int pmd_present(pmd_t pmd)
 
 #define pud_bad(pud)			(pud_val(pud) & ~PAGE_MASK)
 
-#define pgd_none(pgd)			(!pgd_val(pgd))
+#define p4d_none(p4d)			(!p4d_val(p4d))
 
-#define pgd_bad(pgd)			(pgd_val(pgd) & ~PAGE_MASK)
+#define p4d_bad(p4d)			(p4d_val(p4d) & ~PAGE_MASK)
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
@@ -859,14 +860,15 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
 #define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
 #define pud_present(pud)		(pud_val(pud) != 0U)
 #define pud_clear(pudp)			(pud_val(*(pudp)) = 0UL)
-#define pgd_page_vaddr(pgd)		\
-	((unsigned long) __va(pgd_val(pgd)))
-#define pgd_present(pgd)		(pgd_val(pgd) != 0U)
-#define pgd_clear(pgdp)			(pgd_val(*(pgdp)) = 0UL)
+#define p4d_page_vaddr(p4d)		\
+	((unsigned long) __va(p4d_val(p4d)))
+#define p4d_present(p4d)		(p4d_val(p4d) != 0U)
+#define p4d_clear(p4dp)			(p4d_val(*(p4dp)) = 0UL)
 
 /* only used by the stubbed out hugetlb gup code, should never be called */
-#define pgd_page(pgd)			NULL
+#define p4d_page(p4d)			NULL
 
+#define pud_leaf	pud_large
 static inline unsigned long pud_large(pud_t pud)
 {
 	pte_t pte = __pte(pud_val(pud));
@@ -884,8 +886,8 @@ static inline unsigned long pud_pfn(pud_t pud)
 /* Same in both SUN4V and SUN4U.  */
 #define pte_none(pte) 			(!pte_val(pte))
 
-#define pgd_set(pgdp, pudp)	\
-	(pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
+#define p4d_set(p4dp, pudp)	\
+	(p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
 
 /* to find an entry in a page-table-directory. */
 #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
@@ -896,8 +898,8 @@ static inline unsigned long pud_pfn(pud_t pud)
 
 /* Find an entry in the third-level page table.. */
 #define pud_index(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset(pgdp, address)	\
-	((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
+#define pud_offset(p4dp, address)	\
+	((pud_t *) p4d_page_vaddr(*(p4dp)) + pud_index(address))
 
 /* Find an entry in the second-level page table.. */
 #define pmd_offset(pudp, address)	\
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index a2f3fa6..6820d35 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -28,6 +28,15 @@ void flush_tlb_pending(void);
 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 #define tlb_flush(tlb)	flush_tlb_pending()
 
+/*
+ * SPARC64's hardware TLB fill does not use the Linux page-tables
+ * and therefore we don't need a TLBI when freeing page-table pages.
+ */
+
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+#define tlb_needs_table_invalidate()	(false)
+#endif
+
 #include <asm-generic/tlb.h>
 
 #endif /* _SPARC64_TLB_H */
diff --git a/arch/sparc/include/uapi/asm/ipcbuf.h b/arch/sparc/include/uapi/asm/ipcbuf.h
index 5b933a5..0ea1240 100644
--- a/arch/sparc/include/uapi/asm/ipcbuf.h
+++ b/arch/sparc/include/uapi/asm/ipcbuf.h
@@ -17,19 +17,19 @@
 
 struct ipc64_perm
 {
-	__kernel_key_t	key;
-	__kernel_uid_t	uid;
-	__kernel_gid_t	gid;
-	__kernel_uid_t	cuid;
-	__kernel_gid_t	cgid;
+	__kernel_key_t		key;
+	__kernel_uid32_t	uid;
+	__kernel_gid32_t	gid;
+	__kernel_uid32_t	cuid;
+	__kernel_gid32_t	cgid;
 #ifndef __arch64__
-	unsigned short	__pad0;
+	unsigned short		__pad0;
 #endif
-	__kernel_mode_t	mode;
-	unsigned short	__pad1;
-	unsigned short	seq;
-	unsigned long long __unused1;
-	unsigned long long __unused2;
+	__kernel_mode_t		mode;
+	unsigned short		__pad1;
+	unsigned short		seq;
+	unsigned long long	__unused1;
+	unsigned long long	__unused2;
 };
 
 #endif /* __SPARC_IPCBUF_H */
diff --git a/arch/sparc/include/uapi/asm/statfs.h b/arch/sparc/include/uapi/asm/statfs.h
deleted file mode 100644
index 20c8f5b..0000000
--- a/arch/sparc/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef ___ASM_SPARC_STATFS_H
-#define ___ASM_SPARC_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index a6292f8..bd48575 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -104,13 +104,12 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
 	return count;
 }
 
-static const struct file_operations led_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= led_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= led_proc_write,
+static const struct proc_ops led_proc_ops = {
+	.proc_open	= led_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= led_proc_write,
 };
 
 static struct proc_dir_entry *led;
@@ -121,7 +120,7 @@ static int __init led_init(void)
 {
 	timer_setup(&led_blink_timer, led_blink, 0);
 
-	led = proc_create("led", 0, NULL, &led_proc_fops);
+	led = proc_create("led", 0, NULL, &led_proc_ops);
 	if (!led)
 		return -ENOMEM;
 
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index ec244d1..da89022 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -132,12 +132,13 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
 		regs->which_io, regs->phys_addr);
 }
 
-/* "name:vendor:device@irq,addrlo" */
+/* "name@irq,addrlo" */
 static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
 {
 	const char *name = of_get_property(dp, "name", NULL);
 	struct amba_prom_registers *regs;
-	unsigned int *intr, *device, *vendor, reg0;
+	unsigned int *intr;
+	unsigned int reg0;
 	struct property *prop;
 	int interrupt = 0;
 
@@ -159,18 +160,7 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
 	else
 		intr = prop->value;
 
-	prop = of_find_property(dp, "vendor", NULL);
-	if (!prop)
-		return;
-	vendor = prop->value;
-	prop = of_find_property(dp, "device", NULL);
-	if (!prop)
-		return;
-	device = prop->value;
-
-	sprintf(tmp_buf, "%s:%d:%d@%x,%x",
-		name, *vendor, *device,
-		*intr, reg0);
+	sprintf(tmp_buf, "%s@%x,%x", name, *intr, reg0);
 }
 
 static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index afe1592..5d1bcfc 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -332,10 +332,6 @@ void __init setup_arch(char **cmdline_p)
 		break;
 	}
 
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
-#endif
-
 	idprom_init();
 	load_mmu();
 
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index fd2182a..75e3992 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -653,10 +653,6 @@ void __init setup_arch(char **cmdline_p)
 	else
 		pr_info("ARCH: SUN4U\n");
 
-#ifdef CONFIG_DUMMY_CONSOLE
-	conswitchp = &dummy_con;
-#endif
-
 	idprom_init();
 
 	if (!root_flags)
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index a237810..2a734ec 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -299,6 +299,7 @@ static void flush_signal_insns(unsigned long address)
 	unsigned long pstate, paddr;
 	pte_t *ptep, pte;
 	pgd_t *pgdp;
+	p4d_t *p4dp;
 	pud_t *pudp;
 	pmd_t *pmdp;
 
@@ -318,7 +319,10 @@ static void flush_signal_insns(unsigned long address)
 	pgdp = pgd_offset(current->mm, address);
 	if (pgd_none(*pgdp))
 		goto out_irqs_on;
-	pudp = pud_offset(pgdp, address);
+	p4dp = p4d_offset(pgdp, address);
+	if (p4d_none(*p4dp))
+		goto out_irqs_on;
+	pudp = pud_offset(p4dp, address);
 	if (pud_none(*pudp))
 		goto out_irqs_on;
 	pmdp = pmd_offset(pudp, address);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 9b45063..80f20b3 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1621,6 +1621,7 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
 static void __init pcpu_populate_pte(unsigned long addr)
 {
 	pgd_t *pgd = pgd_offset_k(addr);
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 
@@ -1633,7 +1634,17 @@ static void __init pcpu_populate_pte(unsigned long addr)
 		pgd_populate(&init_mm, pgd, new);
 	}
 
-	pud = pud_offset(pgd, addr);
+	p4d = p4d_offset(pgd, addr);
+	if (p4d_none(*p4d)) {
+		pud_t *new;
+
+		new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+		if (!new)
+			goto err_alloc;
+		p4d_populate(&init_mm, p4d, new);
+	}
+
+	pud = pud_offset(p4d, addr);
 	if (pud_none(*pud)) {
 		pmd_t *new;
 
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 9f41a6f..6b92fad 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -548,34 +548,35 @@ SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
 	return err;
 }
 
-SYSCALL_DEFINE1(sparc_adjtimex, struct timex __user *, txc_p)
+SYSCALL_DEFINE1(sparc_adjtimex, struct __kernel_timex __user *, txc_p)
 {
-	struct timex txc;		/* Local copy of parameter */
-	struct __kernel_timex *kt = (void *)&txc;
+	struct __kernel_timex txc;
+	struct __kernel_old_timeval *tv = (void *)&txc.time;
 	int ret;
 
 	/* Copy the user data space into the kernel copy
 	 * structure. But bear in mind that the structures
 	 * may change
 	 */
-	if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
+	if (copy_from_user(&txc, txc_p, sizeof(txc)))
 		return -EFAULT;
 
 	/*
 	 * override for sparc64 specific timeval type: tv_usec
 	 * is 32 bit wide instead of 64-bit in __kernel_timex
 	 */
-	kt->time.tv_usec = txc.time.tv_usec;
-	ret = do_adjtimex(kt);
-	txc.time.tv_usec = kt->time.tv_usec;
+	txc.time.tv_usec = tv->tv_usec;
+	ret = do_adjtimex(&txc);
+	tv->tv_usec = txc.time.tv_usec;
 
-	return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
+	return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
 }
 
-SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,struct timex __user *, txc_p)
+SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,
+		struct __kernel_timex __user *, txc_p)
 {
-	struct timex txc;		/* Local copy of parameter */
-	struct __kernel_timex *kt = (void *)&txc;
+	struct __kernel_timex txc;
+	struct __kernel_old_timeval *tv = (void *)&txc.time;
 	int ret;
 
 	if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
@@ -590,18 +591,18 @@ SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,struct timex _
 	 * structure. But bear in mind that the structures
 	 * may change
 	 */
-	if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
+	if (copy_from_user(&txc, txc_p, sizeof(txc)))
 		return -EFAULT;
 
 	/*
 	 * override for sparc64 specific timeval type: tv_usec
 	 * is 32 bit wide instead of 64-bit in __kernel_timex
 	 */
-	kt->time.tv_usec = txc.time.tv_usec;
-	ret = do_clock_adjtime(which_clock, kt);
-	txc.time.tv_usec = kt->time.tv_usec;
+	txc.time.tv_usec = tv->tv_usec;
+	ret = do_clock_adjtime(which_clock, &txc);
+	tv->tv_usec = txc.time.tv_usec;
 
-	return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
+	return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
 }
 
 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 8c8cc75..f13615e 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -481,3 +481,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 # 435 reserved for clone3
+437	common	openat2			sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 7ec7991..f99e99e 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -171,12 +171,14 @@
 	}
 	PERCPU_SECTION(SMP_CACHE_BYTES)
 
-#ifdef CONFIG_JUMP_LABEL
 	. = ALIGN(PAGE_SIZE);
 	.exit.text : {
 		EXIT_TEXT
 	}
-#endif
+
+	.exit.data : {
+		EXIT_DATA
+	}
 
 	. = ALIGN(PAGE_SIZE);
 	__init_end = .;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 2371fb6..8b7ddbd 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -80,6 +80,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
 static unsigned int get_user_insn(unsigned long tpc)
 {
 	pgd_t *pgdp = pgd_offset(current->mm, tpc);
+	p4d_t *p4dp;
 	pud_t *pudp;
 	pmd_t *pmdp;
 	pte_t *ptep, pte;
@@ -88,7 +89,10 @@ static unsigned int get_user_insn(unsigned long tpc)
 
 	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
 		goto out;
-	pudp = pud_offset(pgdp, tpc);
+	p4dp = p4d_offset(pgdp, tpc);
+	if (p4d_none(*p4dp) || unlikely(p4d_bad(*p4dp)))
+		goto out;
+	pudp = pud_offset(p4dp, tpc);
 	if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
 		goto out;
 
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index f78793a..7b9fa86 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -277,11 +277,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 			unsigned long addr, unsigned long sz)
 {
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 
 	pgd = pgd_offset(mm, addr);
-	pud = pud_alloc(mm, pgd, addr);
+	p4d = p4d_offset(pgd, addr);
+	pud = pud_alloc(mm, p4d, addr);
 	if (!pud)
 		return NULL;
 	if (sz >= PUD_SIZE)
@@ -298,13 +300,17 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
 		       unsigned long addr, unsigned long sz)
 {
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 
 	pgd = pgd_offset(mm, addr);
 	if (pgd_none(*pgd))
 		return NULL;
-	pud = pud_offset(pgd, addr);
+	p4d = p4d_offset(pgd, addr);
+	if (p4d_none(*p4d))
+		return NULL;
+	pud = pud_offset(p4d, addr);
 	if (pud_none(*pud))
 		return NULL;
 	if (is_hugetlb_pud(*pud))
@@ -449,7 +455,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 	mm_dec_nr_pmds(tlb->mm);
 }
 
-static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
 				   unsigned long addr, unsigned long end,
 				   unsigned long floor, unsigned long ceiling)
 {
@@ -458,7 +464,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 	unsigned long start;
 
 	start = addr;
-	pud = pud_offset(pgd, addr);
+	pud = pud_offset(p4d, addr);
 	do {
 		next = pud_addr_end(addr, end);
 		if (pud_none_or_clear_bad(pud))
@@ -481,8 +487,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 	if (end - 1 > ceiling - 1)
 		return;
 
-	pud = pud_offset(pgd, start);
-	pgd_clear(pgd);
+	pud = pud_offset(p4d, start);
+	p4d_clear(p4d);
 	pud_free_tlb(tlb, pud, start);
 	mm_dec_nr_puds(tlb->mm);
 }
@@ -492,6 +498,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 			    unsigned long floor, unsigned long ceiling)
 {
 	pgd_t *pgd;
+	p4d_t *p4d;
 	unsigned long next;
 
 	addr &= PMD_MASK;
@@ -511,10 +518,11 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 		return;
 
 	pgd = pgd_offset(tlb->mm, addr);
+	p4d = p4d_offset(pgd, addr);
 	do {
-		next = pgd_addr_end(addr, end);
-		if (pgd_none_or_clear_bad(pgd))
+		next = p4d_addr_end(addr, end);
+		if (p4d_none_or_clear_bad(p4d))
 			continue;
-		hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
-	} while (pgd++, addr = next, addr != end);
+		hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
+	} while (p4d++, addr = next, addr != end);
 }
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index e6d9181..1cf0d66 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -530,7 +530,8 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
 				paddr = kaddr & mask;
 			else {
 				pgd_t *pgdp = pgd_offset_k(kaddr);
-				pud_t *pudp = pud_offset(pgdp, kaddr);
+				p4d_t *p4dp = p4d_offset(pgdp, kaddr);
+				pud_t *pudp = pud_offset(p4dp, kaddr);
 				pmd_t *pmdp = pmd_offset(pudp, kaddr);
 				pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
 
@@ -1653,6 +1654,7 @@ static unsigned long max_phys_bits = 40;
 bool kern_addr_valid(unsigned long addr)
 {
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
@@ -1674,7 +1676,11 @@ bool kern_addr_valid(unsigned long addr)
 	if (pgd_none(*pgd))
 		return 0;
 
-	pud = pud_offset(pgd, addr);
+	p4d = p4d_offset(pgd, addr);
+	if (p4d_none(*p4d))
+		return 0;
+
+	pud = pud_offset(p4d, addr);
 	if (pud_none(*pud))
 		return 0;
 
@@ -1800,6 +1806,7 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 	while (vstart < vend) {
 		unsigned long this_end, paddr = __pa(vstart);
 		pgd_t *pgd = pgd_offset_k(vstart);
+		p4d_t *p4d;
 		pud_t *pud;
 		pmd_t *pmd;
 		pte_t *pte;
@@ -1814,7 +1821,20 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 			alloc_bytes += PAGE_SIZE;
 			pgd_populate(&init_mm, pgd, new);
 		}
-		pud = pud_offset(pgd, vstart);
+
+		p4d = p4d_offset(pgd, vstart);
+		if (p4d_none(*p4d)) {
+			pud_t *new;
+
+			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+						  PAGE_SIZE);
+			if (!new)
+				goto err_alloc;
+			alloc_bytes += PAGE_SIZE;
+			p4d_populate(&init_mm, p4d, new);
+		}
+
+		pud = pud_offset(p4d, vstart);
 		if (pud_none(*pud)) {
 			pmd_t *new;
 
@@ -2612,13 +2632,18 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
 	for (; vstart < vend; vstart += PMD_SIZE) {
 		pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
 		unsigned long pte;
+		p4d_t *p4d;
 		pud_t *pud;
 		pmd_t *pmd;
 
 		if (!pgd)
 			return -ENOMEM;
 
-		pud = vmemmap_pud_populate(pgd, vstart, node);
+		p4d = vmemmap_p4d_populate(pgd, vstart, node);
+		if (!p4d)
+			return -ENOMEM;
+
+		pud = vmemmap_pud_populate(p4d, vstart, node);
 		if (!pud)
 			return -ENOMEM;
 
diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h
index 760c507..103adac 100644
--- a/arch/um/drivers/cow.h
+++ b/arch/um/drivers/cow.h
@@ -11,7 +11,7 @@ extern int init_cow_file(int fd, char *cow_file, char *backing_file,
 extern int file_reader(__u64 offset, char *buf, int len, void *arg);
 extern int read_cow_header(int (*reader)(__u64, char *, int, void *),
 			   void *arg, __u32 *version_out,
-			   char **backing_file_out, time_t *mtime_out,
+			   char **backing_file_out, long long *mtime_out,
 			   unsigned long long *size_out, int *sectorsize_out,
 			   __u32 *align_out, int *bitmap_offset_out);
 
diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c
index 74b0c26..29b4658 100644
--- a/arch/um/drivers/cow_user.c
+++ b/arch/um/drivers/cow_user.c
@@ -17,6 +17,7 @@
 
 #define PATH_LEN_V1 256
 
+/* unsigned time_t works until year 2106 */
 typedef __u32 time32_t;
 
 struct cow_header_v1 {
@@ -197,7 +198,7 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
 		     int sectorsize, int alignment, unsigned long long *size)
 {
 	struct cow_header_v3 *header;
-	unsigned long modtime;
+	long long modtime;
 	int err;
 
 	err = cow_seek_file(fd, 0);
@@ -276,7 +277,7 @@ int file_reader(__u64 offset, char *buf, int len, void *arg)
 
 int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
 		    __u32 *version_out, char **backing_file_out,
-		    time_t *mtime_out, unsigned long long *size_out,
+		    long long *mtime_out, unsigned long long *size_out,
 		    int *sectorsize_out, __u32 *align_out,
 		    int *bitmap_offset_out)
 {
@@ -363,7 +364,7 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
 
 		/*
 		 * this was used until Dec2005 - 64bits are needed to represent
-		 * 2038+. I.e. we can safely do this truncating cast.
+		 * 2106+. I.e. we can safely do this truncating cast.
 		 *
 		 * Additionally, we must use be32toh() instead of be64toh(), since
 		 * the program used to use the former (tested - I got mtime
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 0117489..b80a1d6 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -752,10 +752,9 @@ static ssize_t mconsole_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations mconsole_proc_fops = {
-	.owner		= THIS_MODULE,
-	.write		= mconsole_proc_write,
-	.llseek		= noop_llseek,
+static const struct proc_ops mconsole_proc_ops = {
+	.proc_write	= mconsole_proc_write,
+	.proc_lseek	= noop_llseek,
 };
 
 static int create_proc_mconsole(void)
@@ -765,7 +764,7 @@ static int create_proc_mconsole(void)
 	if (notify_socket == NULL)
 		return 0;
 
-	ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_fops);
+	ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_ops);
 	if (ent == NULL) {
 		printk(KERN_INFO "create_proc_mconsole : proc_create failed\n");
 		return 0;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 6627d7c..247f95d 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -113,6 +113,7 @@ static const struct block_device_operations ubd_blops = {
         .open		= ubd_open,
         .release	= ubd_release,
         .ioctl		= ubd_ioctl,
+        .compat_ioctl	= blkdev_compat_ptr_ioctl,
 	.getgeo		= ubd_getgeo,
 };
 
@@ -561,7 +562,7 @@ static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
 	__u32 version;
 	__u32 align;
 	char *backing_file;
-	time_t mtime;
+	time64_t mtime;
 	unsigned long long size;
 	int sector_size;
 	int bitmap_offset;
@@ -600,9 +601,9 @@ static int read_cow_bitmap(int fd, void *buf, int offset, int len)
 	return 0;
 }
 
-static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
+static int backing_file_mismatch(char *file, __u64 size, time64_t mtime)
 {
-	unsigned long modtime;
+	time64_t modtime;
 	unsigned long long actual;
 	int err;
 
@@ -628,7 +629,7 @@ static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
 		return -EINVAL;
 	}
 	if (modtime != mtime) {
-		printk(KERN_ERR "mtime mismatch (%ld vs %ld) of COW header vs "
+		printk(KERN_ERR "mtime mismatch (%lld vs %lld) of COW header vs "
 		       "backing file\n", mtime, modtime);
 		return -EINVAL;
 	}
@@ -671,7 +672,7 @@ static int open_ubd_file(char *file, struct openflags *openflags, int shared,
 		  unsigned long *bitmap_len_out, int *data_offset_out,
 		  int *create_cow_out)
 {
-	time_t mtime;
+	time64_t mtime;
 	unsigned long long size;
 	__u32 version, align;
 	char *backing_file;
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 5aee062..b4deb1b 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -25,11 +25,6 @@ static inline void arch_unmap(struct mm_struct *mm,
 			unsigned long start, unsigned long end)
 {
 }
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
-				     struct vm_area_struct *vma)
-{
-}
-
 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 		bool write, bool execute, bool foreign)
 {
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 506bcd1..0f30204 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -150,7 +150,7 @@ extern int os_sync_file(int fd);
 extern int os_file_size(const char *file, unsigned long long *size_out);
 extern int os_pread_file(int fd, void *buf, int len, unsigned long long offset);
 extern int os_pwrite_file(int fd, const void *buf, int count, unsigned long long offset);
-extern int os_file_modtime(const char *file, unsigned long *modtime);
+extern int os_file_modtime(const char *file, long long *modtime);
 extern int os_pipe(int *fd, int stream, int close_on_exec);
 extern int os_set_fd_async(int fd);
 extern int os_clear_fd_async(int fd);
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 369fd84..43edc2a 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -55,20 +55,19 @@ static ssize_t exitcode_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations exitcode_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= exitcode_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= exitcode_proc_write,
+static const struct proc_ops exitcode_proc_ops = {
+	.proc_open	= exitcode_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= exitcode_proc_write,
 };
 
 static int make_proc_exitcode(void)
 {
 	struct proc_dir_entry *ent;
 
-	ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops);
+	ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_ops);
 	if (ent == NULL) {
 		printk(KERN_WARNING "make_proc_exitcode : Failed to register "
 		       "/proc/exitcode\n");
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 17045e7..56a0941 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -348,13 +348,12 @@ static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
 	return count;
 }
 
-static const struct file_operations sysemu_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= sysemu_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= sysemu_proc_write,
+static const struct proc_ops sysemu_proc_ops = {
+	.proc_open	= sysemu_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= sysemu_proc_write,
 };
 
 int __init make_proc_sysemu(void)
@@ -363,7 +362,7 @@ int __init make_proc_sysemu(void)
 	if (!sysemu_supported)
 		return 0;
 
-	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
+	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
 
 	if (ent == NULL)
 	{
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index 5133e3a..fbda105 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -341,7 +341,7 @@ int os_file_size(const char *file, unsigned long long *size_out)
 	return 0;
 }
 
-int os_file_modtime(const char *file, unsigned long *modtime)
+int os_file_modtime(const char *file, long long *modtime)
 {
 	struct uml_stat buf;
 	int err;
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
index 247a07a..388c0c8 100644
--- a/arch/unicore32/include/asm/mmu_context.h
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -89,11 +89,6 @@ static inline void arch_unmap(struct mm_struct *mm,
 {
 }
 
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
-				     struct vm_area_struct *vma)
-{
-}
-
 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 		bool write, bool execute, bool foreign)
 {
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index 95ae3b5..0c4242a 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -270,8 +270,6 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
 	conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
 #endif
 #endif
 	early_trap_init();
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a283336..beea770 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -120,6 +120,7 @@
 	select GENERIC_IRQ_RESERVATION_MODE
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PENDING_IRQ		if SMP
+	select GENERIC_PTDUMP
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
@@ -202,7 +203,7 @@
 	select HAVE_PCI
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
-	select HAVE_RCU_TABLE_FREE		if PARAVIRT
+	select MMU_GATHER_RCU_TABLE_FREE		if PARAVIRT
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RELIABLE_STACKTRACE		if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
 	select HAVE_FUNCTION_ARG_ACCESS_API
@@ -1888,34 +1889,6 @@
 	  specific cases in protected and virtual-8086 modes. Emulated
 	  results are dummy.
 
-config X86_INTEL_MPX
-	prompt "Intel MPX (Memory Protection Extensions)"
-	def_bool n
-	# Note: only available in 64-bit mode due to VMA flags shortage
-	depends on CPU_SUP_INTEL && X86_64
-	select ARCH_USES_HIGH_VMA_FLAGS
-	---help---
-	  MPX provides hardware features that can be used in
-	  conjunction with compiler-instrumented code to check
-	  memory references.  It is designed to detect buffer
-	  overflow or underflow bugs.
-
-	  This option enables running applications which are
-	  instrumented or otherwise use MPX.  It does not use MPX
-	  itself inside the kernel or to protect the kernel
-	  against bad memory references.
-
-	  Enabling this option will make the kernel larger:
-	  ~8k of kernel text and 36 bytes of data on a 64-bit
-	  defconfig.  It adds a long to the 'mm_struct' which
-	  will increase the kernel memory overhead of each
-	  process and adds some branches to paths used during
-	  exec() and munmap().
-
-	  For details, see Documentation/x86/intel_mpx.rst
-
-	  If unsure, say N.
-
 config X86_INTEL_MEMORY_PROTECTION_KEYS
 	prompt "Intel Memory Protection Keys"
 	def_bool y
@@ -2959,9 +2932,6 @@
 	def_bool y
 	depends on X86_32
 
-config X86_DEV_DMA_OPS
-	bool
-
 source "drivers/firmware/Kconfig"
 
 source "arch/x86/kvm/Kconfig"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index c4eab8e..2e74690 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -62,26 +62,10 @@
 config MCSAFE_TEST
 	def_bool n
 
-config X86_PTDUMP_CORE
-	def_bool n
-
-config X86_PTDUMP
-	tristate "Export kernel pagetable layout to userspace via debugfs"
-	depends on DEBUG_KERNEL
-	select DEBUG_FS
-	select X86_PTDUMP_CORE
-	---help---
-	  Say Y here if you want to show the kernel pagetable layout in a
-	  debugfs file. This information is only useful for kernel developers
-	  who are working in architecture specific areas of the kernel.
-	  It is probably not a good idea to enable this feature in a production
-	  kernel.
-	  If in doubt, say "N"
-
 config EFI_PGT_DUMP
 	bool "Dump the EFI pagetable"
 	depends on EFI
-	select X86_PTDUMP_CORE
+	select PTDUMP_CORE
 	---help---
 	  Enable this if you want to dump the EFI page table before
 	  enabling virtual mode. This can be used to debug miscellaneous
@@ -90,7 +74,7 @@
 
 config DEBUG_WX
 	bool "Warn on W+X mappings at boot"
-	select X86_PTDUMP_CORE
+	select PTDUMP_CORE
 	---help---
 	  Generate a warning if any W+X mappings are found at boot.
 
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 15908eb..c17cb77 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -440,3 +440,5 @@
 433	i386	fspick			sys_fspick			__ia32_sys_fspick
 434	i386	pidfd_open		sys_pidfd_open			__ia32_sys_pidfd_open
 435	i386	clone3			sys_clone3			__ia32_sys_clone3
+437	i386	openat2			sys_openat2			__ia32_sys_openat2
+438	i386	pidfd_getfd		sys_pidfd_getfd			__ia32_sys_pidfd_getfd
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index c29976e..44d510b 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -357,6 +357,8 @@
 433	common	fspick			__x64_sys_fspick
 434	common	pidfd_open		__x64_sys_pidfd_open
 435	common	clone3			__x64_sys_clone3/ptregs
+437	common	openat2			__x64_sys_openat2
+438	common	pidfd_getfd		__x64_sys_pidfd_getfd
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 8b52bc5..ea34464 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -7,7 +7,6 @@
 generated-y += unistd_64_x32.h
 generated-y += xen-hypercalls.h
 
-generic-y += dma-contiguous.h
 generic-y += early_ioremap.h
 generic-y += export.h
 generic-y += mcs_spinlock.h
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
index af45e14..7a4bb1b 100644
--- a/arch/x86/include/asm/archrandom.h
+++ b/arch/x86/include/asm/archrandom.h
@@ -27,7 +27,7 @@
 
 /* Unconditional execution of RDRAND and RDSEED */
 
-static inline bool rdrand_long(unsigned long *v)
+static inline bool __must_check rdrand_long(unsigned long *v)
 {
 	bool ok;
 	unsigned int retry = RDRAND_RETRY_LOOPS;
@@ -41,7 +41,7 @@ static inline bool rdrand_long(unsigned long *v)
 	return false;
 }
 
-static inline bool rdrand_int(unsigned int *v)
+static inline bool __must_check rdrand_int(unsigned int *v)
 {
 	bool ok;
 	unsigned int retry = RDRAND_RETRY_LOOPS;
@@ -55,7 +55,7 @@ static inline bool rdrand_int(unsigned int *v)
 	return false;
 }
 
-static inline bool rdseed_long(unsigned long *v)
+static inline bool __must_check rdseed_long(unsigned long *v)
 {
 	bool ok;
 	asm volatile(RDSEED_LONG
@@ -64,7 +64,7 @@ static inline bool rdseed_long(unsigned long *v)
 	return ok;
 }
 
-static inline bool rdseed_int(unsigned int *v)
+static inline bool __must_check rdseed_int(unsigned int *v)
 {
 	bool ok;
 	asm volatile(RDSEED_INT
@@ -73,10 +73,6 @@ static inline bool rdseed_int(unsigned int *v)
 	return ok;
 }
 
-/* Conditional execution based on CPU type */
-#define arch_has_random()	static_cpu_has(X86_FEATURE_RDRAND)
-#define arch_has_random_seed()	static_cpu_has(X86_FEATURE_RDSEED)
-
 /*
  * These are the generic interfaces; they must not be declared if the
  * stubs in <linux/random.h> are to be invoked,
@@ -84,24 +80,24 @@ static inline bool rdseed_int(unsigned int *v)
  */
 #ifdef CONFIG_ARCH_RANDOM
 
-static inline bool arch_get_random_long(unsigned long *v)
+static inline bool __must_check arch_get_random_long(unsigned long *v)
 {
-	return arch_has_random() ? rdrand_long(v) : false;
+	return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_long(v) : false;
 }
 
-static inline bool arch_get_random_int(unsigned int *v)
+static inline bool __must_check arch_get_random_int(unsigned int *v)
 {
-	return arch_has_random() ? rdrand_int(v) : false;
+	return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_int(v) : false;
 }
 
-static inline bool arch_get_random_seed_long(unsigned long *v)
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 {
-	return arch_has_random_seed() ? rdseed_long(v) : false;
+	return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_long(v) : false;
 }
 
-static inline bool arch_get_random_seed_int(unsigned int *v)
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
 {
-	return arch_has_random_seed() ? rdseed_int(v) : false;
+	return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_int(v) : false;
 }
 
 extern void x86_init_rdrand(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/bugs.h b/arch/x86/include/asm/bugs.h
index 794eb21..92ae283 100644
--- a/arch/x86/include/asm/bugs.h
+++ b/arch/x86/include/asm/bugs.h
@@ -6,12 +6,6 @@
 
 extern void check_bugs(void);
 
-#if defined(CONFIG_CPU_SUP_INTEL)
-void check_mpx_erratum(struct cpuinfo_x86 *c);
-#else
-static inline void check_mpx_erratum(struct cpuinfo_x86 *c) {}
-#endif
-
 #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
 int ppro_with_ram_bug(void);
 #else
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 22c4dfe..52e9f34 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -177,23 +177,6 @@ typedef struct user_regs_struct compat_elf_gregset_t;
 	(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
 #endif
 
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
-	return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
-	return (u32)(unsigned long)uptr;
-}
-
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	compat_uptr_t sp;
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 5e12c63..7e31f7f 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -8,16 +8,6 @@ struct dev_archdata {
 #endif
 };
 
-#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
-struct dma_domain {
-	struct list_head node;
-	const struct dma_map_ops *dma_ops;
-	int domain_nr;
-};
-void add_dma_domain(struct dma_domain *domain);
-void del_dma_domain(struct dma_domain *domain);
-#endif
-
 struct pdev_archdata {
 };
 
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 8e1d0bb..4ea8584 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -10,12 +10,6 @@
  * cpu_feature_enabled().
  */
 
-#ifdef CONFIG_X86_INTEL_MPX
-# define DISABLE_MPX	0
-#else
-# define DISABLE_MPX	(1<<(X86_FEATURE_MPX & 31))
-#endif
-
 #ifdef CONFIG_X86_SMAP
 # define DISABLE_SMAP	0
 #else
@@ -74,7 +68,7 @@
 #define DISABLED_MASK6	0
 #define DISABLED_MASK7	(DISABLE_PTI)
 #define DISABLED_MASK8	0
-#define DISABLED_MASK9	(DISABLE_MPX|DISABLE_SMAP)
+#define DISABLED_MASK9	(DISABLE_SMAP)
 #define DISABLED_MASK10	0
 #define DISABLED_MASK11	0
 #define DISABLED_MASK12	0
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 5f10f7f..92abc1e 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -809,7 +809,8 @@ union hv_synic_sint {
 		u64 reserved1:8;
 		u64 masked:1;
 		u64 auto_eoi:1;
-		u64 reserved2:46;
+		u64 polling:1;
+		u64 reserved2:45;
 	} __packed;
 };
 
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 77cf6c1..03946eb 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -222,6 +222,10 @@ struct x86_emulate_ops {
 
 	bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx,
 			  u32 *ecx, u32 *edx, bool check_limit);
+	bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
+	bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
+	bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
+
 	void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
 
 	unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b79cd6a..4dffbc1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -78,6 +78,8 @@
 #define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
 #define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
 #define KVM_REQ_GET_VMCS12_PAGES	KVM_ARCH_REQ(24)
+#define KVM_REQ_APICV_UPDATE \
+	KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 
 #define CR0_RESERVED_BITS                                               \
 	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -175,6 +177,11 @@ enum {
 	VCPU_SREG_LDTR,
 };
 
+enum exit_fastpath_completion {
+	EXIT_FASTPATH_NONE,
+	EXIT_FASTPATH_SKIP_EMUL_INS,
+};
+
 #include <asm/kvm_emulate.h>
 
 #define KVM_NR_MEM_OBJS 40
@@ -378,12 +385,12 @@ struct kvm_mmu {
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
 	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
 	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
-	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
+	int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
 			  bool prefault);
 	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
 				  struct x86_exception *fault);
-	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
-			    struct x86_exception *exception);
+	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
+			    u32 access, struct x86_exception *exception);
 	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
 			       struct x86_exception *exception);
 	int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -606,7 +613,7 @@ struct kvm_vcpu_arch {
 	 * Paging state of an L2 guest (used for nested npt)
 	 *
 	 * This context will save all necessary information to walk page tables
-	 * of the an L2 guest. This context is only initialized for page table
+	 * of an L2 guest. This context is only initialized for page table
 	 * walking and not for faulting since we never handle l2 page faults on
 	 * the host.
 	 */
@@ -685,10 +692,10 @@ struct kvm_vcpu_arch {
 	bool pvclock_set_guest_stopped_request;
 
 	struct {
+		u8 preempted;
 		u64 msr_val;
 		u64 last_steal;
-		struct gfn_to_hva_cache stime;
-		struct kvm_steal_time steal;
+		struct gfn_to_pfn_cache cache;
 	} st;
 
 	u64 tsc_offset;
@@ -868,6 +875,12 @@ enum kvm_irqchip_mode {
 	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
 };
 
+#define APICV_INHIBIT_REASON_DISABLE    0
+#define APICV_INHIBIT_REASON_HYPERV     1
+#define APICV_INHIBIT_REASON_NESTED     2
+#define APICV_INHIBIT_REASON_IRQWIN     3
+#define APICV_INHIBIT_REASON_PIT_REINJ  4
+
 struct kvm_arch {
 	unsigned long n_used_mmu_pages;
 	unsigned long n_requested_mmu_pages;
@@ -899,6 +912,7 @@ struct kvm_arch {
 	struct kvm_apic_map *apic_map;
 
 	bool apic_access_page_done;
+	unsigned long apicv_inhibit_reasons;
 
 	gpa_t wall_clock;
 
@@ -1022,6 +1036,11 @@ struct kvm_lapic_irq {
 	bool msi_redir_hint;
 };
 
+static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
+{
+	return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
+}
+
 struct kvm_x86_ops {
 	int (*cpu_has_kvm_support)(void);          /* __init */
 	int (*disabled_by_bios)(void);             /* __init */
@@ -1040,7 +1059,7 @@ struct kvm_x86_ops {
 	void (*vm_destroy)(struct kvm *kvm);
 
 	/* Create, but do not attach this VCPU */
-	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
+	int (*vcpu_create)(struct kvm_vcpu *vcpu);
 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
 	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
 
@@ -1090,7 +1109,8 @@ struct kvm_x86_ops {
 	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
 
 	void (*run)(struct kvm_vcpu *vcpu);
-	int (*handle_exit)(struct kvm_vcpu *vcpu);
+	int (*handle_exit)(struct kvm_vcpu *vcpu,
+		enum exit_fastpath_completion exit_fastpath);
 	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
 	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
 	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
@@ -1107,7 +1127,8 @@ struct kvm_x86_ops {
 	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
 	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
 	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
-	bool (*get_enable_apicv)(struct kvm *kvm);
+	bool (*check_apicv_inhibit_reasons)(ulong bit);
+	void (*pre_update_apicv_exec_ctrl)(struct kvm *kvm, bool activate);
 	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
 	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
 	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
@@ -1140,11 +1161,13 @@ struct kvm_x86_ops {
 	int (*check_intercept)(struct kvm_vcpu *vcpu,
 			       struct x86_instruction_info *info,
 			       enum x86_intercept_stage stage);
-	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
+	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
+		enum exit_fastpath_completion *exit_fastpath);
 	bool (*mpx_supported)(void);
 	bool (*xsaves_supported)(void);
 	bool (*umip_emulated)(void);
 	bool (*pt_supported)(void);
+	bool (*pku_supported)(void);
 
 	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
 	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
@@ -1464,11 +1487,15 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 				struct x86_exception *exception);
 
-void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
+bool kvm_apicv_activated(struct kvm *kvm);
+void kvm_apicv_init(struct kvm *kvm, bool enable);
+void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
+void kvm_request_apicv_update(struct kvm *kvm, bool activate,
+			      unsigned long bit);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
 		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
@@ -1614,7 +1641,6 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
 int kvm_is_in_guest(void);
 
 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
-int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
 
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index e78c7db..bdeae929 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -50,10 +50,6 @@ typedef struct {
 	u16 pkey_allocation_map;
 	s16 execute_only_pkey;
 #endif
-#ifdef CONFIG_X86_INTEL_MPX
-	/* address of the bounds directory */
-	void __user *bd_addr;
-#endif
 } mm_context_t;
 
 #define INIT_MM_CONTEXT(mm)						\
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index b243234..b538d9d 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -12,7 +12,6 @@
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/paravirt.h>
-#include <asm/mpx.h>
 #include <asm/debugreg.h>
 
 extern atomic64_t last_mm_ctx_id;
@@ -200,34 +199,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
 }
 #endif
 
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
-		struct vm_area_struct *vma)
-{
-	mpx_mm_init(mm);
-}
-
 static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
 			      unsigned long end)
 {
-	/*
-	 * mpx_notify_unmap() goes and reads a rarely-hot
-	 * cacheline in the mm_struct.  That can be expensive
-	 * enough to be seen in profiles.
-	 *
-	 * The mpx_notify_unmap() call and its contents have been
-	 * observed to affect munmap() performance on hardware
-	 * where MPX is not present.
-	 *
-	 * The unlikely() optimizes for the fast case: no MPX
-	 * in the CPU, or no MPX use in the process.  Even if
-	 * we get this wrong (in the unlikely event that MPX
-	 * is widely enabled on some system) the overhead of
-	 * MPX itself (reading bounds tables) is expected to
-	 * overwhelm the overhead of getting this unlikely()
-	 * consistently wrong.
-	 */
-	if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
-		mpx_notify_unmap(mm, start, end);
 }
 
 /*
diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h
deleted file mode 100644
index 143a5c1..0000000
--- a/arch/x86/include/asm/mpx.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_MPX_H
-#define _ASM_X86_MPX_H
-
-#include <linux/types.h>
-#include <linux/mm_types.h>
-
-#include <asm/ptrace.h>
-#include <asm/insn.h>
-
-/*
- * NULL is theoretically a valid place to put the bounds
- * directory, so point this at an invalid address.
- */
-#define MPX_INVALID_BOUNDS_DIR	((void __user *)-1)
-#define MPX_BNDCFG_ENABLE_FLAG	0x1
-#define MPX_BD_ENTRY_VALID_FLAG	0x1
-
-/*
- * The upper 28 bits [47:20] of the virtual address in 64-bit
- * are used to index into bounds directory (BD).
- *
- * The directory is 2G (2^31) in size, and with 8-byte entries
- * it has 2^28 entries.
- */
-#define MPX_BD_SIZE_BYTES_64	(1UL<<31)
-#define MPX_BD_ENTRY_BYTES_64	8
-#define MPX_BD_NR_ENTRIES_64	(MPX_BD_SIZE_BYTES_64/MPX_BD_ENTRY_BYTES_64)
-
-/*
- * The 32-bit directory is 4MB (2^22) in size, and with 4-byte
- * entries it has 2^20 entries.
- */
-#define MPX_BD_SIZE_BYTES_32	(1UL<<22)
-#define MPX_BD_ENTRY_BYTES_32	4
-#define MPX_BD_NR_ENTRIES_32	(MPX_BD_SIZE_BYTES_32/MPX_BD_ENTRY_BYTES_32)
-
-/*
- * A 64-bit table is 4MB total in size, and an entry is
- * 4 64-bit pointers in size.
- */
-#define MPX_BT_SIZE_BYTES_64	(1UL<<22)
-#define MPX_BT_ENTRY_BYTES_64	32
-#define MPX_BT_NR_ENTRIES_64	(MPX_BT_SIZE_BYTES_64/MPX_BT_ENTRY_BYTES_64)
-
-/*
- * A 32-bit table is 16kB total in size, and an entry is
- * 4 32-bit pointers in size.
- */
-#define MPX_BT_SIZE_BYTES_32	(1UL<<14)
-#define MPX_BT_ENTRY_BYTES_32	16
-#define MPX_BT_NR_ENTRIES_32	(MPX_BT_SIZE_BYTES_32/MPX_BT_ENTRY_BYTES_32)
-
-#define MPX_BNDSTA_TAIL		2
-#define MPX_BNDCFG_TAIL		12
-#define MPX_BNDSTA_ADDR_MASK	(~((1UL<<MPX_BNDSTA_TAIL)-1))
-#define MPX_BNDCFG_ADDR_MASK	(~((1UL<<MPX_BNDCFG_TAIL)-1))
-#define MPX_BNDSTA_ERROR_CODE	0x3
-
-struct mpx_fault_info {
-	void __user *addr;
-	void __user *lower;
-	void __user *upper;
-};
-
-#ifdef CONFIG_X86_INTEL_MPX
-
-extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
-extern int mpx_handle_bd_fault(void);
-
-static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
-{
-	return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
-}
-
-static inline void mpx_mm_init(struct mm_struct *mm)
-{
-	/*
-	 * NULL is theoretically a valid place to put the bounds
-	 * directory, so point this at an invalid address.
-	 */
-	mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
-}
-
-extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
-extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags);
-
-#else
-static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
-{
-	return -EINVAL;
-}
-static inline int mpx_handle_bd_fault(void)
-{
-	return -EINVAL;
-}
-static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
-{
-	return 0;
-}
-static inline void mpx_mm_init(struct mm_struct *mm)
-{
-}
-static inline void mpx_notify_unmap(struct mm_struct *mm,
-				    unsigned long start, unsigned long end)
-{
-}
-
-static inline unsigned long mpx_unmapped_area_check(unsigned long addr,
-		unsigned long len, unsigned long flags)
-{
-	return addr;
-}
-#endif /* CONFIG_X86_INTEL_MPX */
-
-#endif /* _ASM_X86_MPX_H */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index c1fdd43..7ccb338 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -25,7 +25,7 @@ struct pci_sysdata {
 	void		*fwnode;	/* IRQ domain for MSI assignment */
 #endif
 #if IS_ENABLED(CONFIG_VMD)
-	bool vmd_domain;		/* True if in Intel VMD domain */
+	struct pci_dev	*vmd_dev;	/* VMD Device if in Intel VMD domain */
 #endif
 };
 
@@ -33,14 +33,17 @@ extern int pci_routeirq;
 extern int noioapicquirk;
 extern int noioapicreroute;
 
+static inline struct pci_sysdata *to_pci_sysdata(const struct pci_bus *bus)
+{
+	return bus->sysdata;
+}
+
 #ifdef CONFIG_PCI
 
 #ifdef CONFIG_PCI_DOMAINS
 static inline int pci_domain_nr(struct pci_bus *bus)
 {
-	struct pci_sysdata *sd = bus->sysdata;
-
-	return sd->domain;
+	return to_pci_sysdata(bus)->domain;
 }
 
 static inline int pci_proc_domain(struct pci_bus *bus)
@@ -52,24 +55,20 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
 static inline void *_pci_root_bus_fwnode(struct pci_bus *bus)
 {
-	struct pci_sysdata *sd = bus->sysdata;
-
-	return sd->fwnode;
+	return to_pci_sysdata(bus)->fwnode;
 }
 
 #define pci_root_bus_fwnode	_pci_root_bus_fwnode
 #endif
 
+#if IS_ENABLED(CONFIG_VMD)
 static inline bool is_vmd(struct pci_bus *bus)
 {
-#if IS_ENABLED(CONFIG_VMD)
-	struct pci_sysdata *sd = bus->sysdata;
-
-	return sd->vmd_domain;
-#else
-	return false;
-#endif
+	return to_pci_sysdata(bus)->vmd_dev != NULL;
 }
+#else
+#define is_vmd(bus)		false
+#endif /* CONFIG_VMD */
 
 /* Can be used to override the logic in pci_scan_bus for skipping
    already-configured bus numbers - to be used for buggy BIOSes
@@ -124,9 +123,7 @@ void native_restore_msi_irqs(struct pci_dev *dev);
 /* Returns the node based on pci bus */
 static inline int __pcibus_to_node(const struct pci_bus *bus)
 {
-	const struct pci_sysdata *sd = bus->sysdata;
-
-	return sd->node;
+	return to_pci_sysdata(bus)->node;
 }
 
 static inline const struct cpumask *
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index ad97dc1..7e11866 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -29,8 +29,9 @@
 extern pgd_t early_top_pgt[PTRS_PER_PGD];
 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
 
-void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
-void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
+void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
+void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
+				   bool user);
 void ptdump_walk_pgd_level_checkwx(void);
 void ptdump_walk_user_pgd_level_checkwx(void);
 
@@ -239,6 +240,7 @@ static inline unsigned long pgd_pfn(pgd_t pgd)
 	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
+#define p4d_leaf	p4d_large
 static inline int p4d_large(p4d_t p4d)
 {
 	/* No 512 GiB pages yet */
@@ -247,6 +249,7 @@ static inline int p4d_large(p4d_t p4d)
 
 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
 
+#define pmd_leaf	pmd_large
 static inline int pmd_large(pmd_t pte)
 {
 	return pmd_flags(pte) & _PAGE_PSE;
@@ -874,6 +877,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
 }
 
+#define pud_leaf	pud_large
 static inline int pud_large(pud_t pud)
 {
 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -885,6 +889,7 @@ static inline int pud_bad(pud_t pud)
 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
 }
 #else
+#define pud_leaf	pud_large
 static inline int pud_large(pud_t pud)
 {
 	return 0;
@@ -1233,6 +1238,7 @@ static inline bool pgdp_maps_userspace(void *__ptr)
 	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
 }
 
+#define pgd_leaf	pgd_large
 static inline int pgd_large(pgd_t pgd) { return 0; }
 
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index ea74007..0239998 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -566,6 +566,10 @@ static inline void update_page_count(int level, unsigned long pages) { }
 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
 				    unsigned int *level);
+
+struct mm_struct;
+extern pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
+				   unsigned int *level);
 extern pmd_t *lookup_pmd_address(unsigned long address);
 extern phys_addr_t slow_virt_to_phys(void *__address);
 extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6fb4870..09705ccc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -947,24 +947,6 @@ extern int set_tsc_mode(unsigned int val);
 
 DECLARE_PER_CPU(u64, msr_misc_features_shadow);
 
-/* Register/unregister a process' MPX related resource */
-#define MPX_ENABLE_MANAGEMENT()	mpx_enable_management()
-#define MPX_DISABLE_MANAGEMENT()	mpx_disable_management()
-
-#ifdef CONFIG_X86_INTEL_MPX
-extern int mpx_enable_management(void);
-extern int mpx_disable_management(void);
-#else
-static inline int mpx_enable_management(void)
-{
-	return -EINVAL;
-}
-static inline int mpx_disable_management(void)
-{
-	return -EINVAL;
-}
-#endif /* CONFIG_X86_INTEL_MPX */
-
 #ifdef CONFIG_CPU_SUP_AMD
 extern u16 amd_get_nb_id(int cpu);
 extern u32 amd_get_nodes_per_socket(void);
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index f23e7aa..820082b 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -29,8 +29,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
  * shootdown, enablement code for several hypervisors overrides
  * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
  * a hypercall. To keep software pagetable walkers safe in this case we
- * switch to RCU based table free (HAVE_RCU_TABLE_FREE). See the comment
- * below 'ifdef CONFIG_HAVE_RCU_TABLE_FREE' in include/asm-generic/tlb.h
+ * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment
+ * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
  * for more details.
  */
 static inline void __tlb_remove_table(void *table)
diff --git a/arch/x86/include/asm/trace/mpx.h b/arch/x86/include/asm/trace/mpx.h
deleted file mode 100644
index 5413301..0000000
--- a/arch/x86/include/asm/trace/mpx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mpx
-
-#if !defined(_TRACE_MPX_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_MPX_H
-
-#include <linux/tracepoint.h>
-
-#ifdef CONFIG_X86_INTEL_MPX
-
-TRACE_EVENT(mpx_bounds_register_exception,
-
-	TP_PROTO(void __user *addr_referenced,
-		 const struct mpx_bndreg *bndreg),
-	TP_ARGS(addr_referenced, bndreg),
-
-	TP_STRUCT__entry(
-		__field(void __user *, addr_referenced)
-		__field(u64, lower_bound)
-		__field(u64, upper_bound)
-	),
-
-	TP_fast_assign(
-		__entry->addr_referenced = addr_referenced;
-		__entry->lower_bound = bndreg->lower_bound;
-		__entry->upper_bound = bndreg->upper_bound;
-	),
-	/*
-	 * Note that we are printing out the '~' of the upper
-	 * bounds register here.  It is actually stored in its
-	 * one's complement form so that its 'init' state
-	 * corresponds to all 0's.  But, that looks like
-	 * gibberish when printed out, so print out the 1's
-	 * complement instead of the actual value here.  Note
-	 * though that you still need to specify filters for the
-	 * actual value, not the displayed one.
-	 */
-	TP_printk("address referenced: 0x%p bounds: lower: 0x%llx ~upper: 0x%llx",
-		__entry->addr_referenced,
-		__entry->lower_bound,
-		~__entry->upper_bound
-	)
-);
-
-TRACE_EVENT(bounds_exception_mpx,
-
-	TP_PROTO(const struct mpx_bndcsr *bndcsr),
-	TP_ARGS(bndcsr),
-
-	TP_STRUCT__entry(
-		__field(u64, bndcfgu)
-		__field(u64, bndstatus)
-	),
-
-	TP_fast_assign(
-		/* need to get rid of the 'const' on bndcsr */
-		__entry->bndcfgu   = (u64)bndcsr->bndcfgu;
-		__entry->bndstatus = (u64)bndcsr->bndstatus;
-	),
-
-	TP_printk("bndcfgu:0x%llx bndstatus:0x%llx",
-		__entry->bndcfgu,
-		__entry->bndstatus)
-);
-
-DECLARE_EVENT_CLASS(mpx_range_trace,
-
-	TP_PROTO(unsigned long start,
-		 unsigned long end),
-	TP_ARGS(start, end),
-
-	TP_STRUCT__entry(
-		__field(unsigned long, start)
-		__field(unsigned long, end)
-	),
-
-	TP_fast_assign(
-		__entry->start = start;
-		__entry->end   = end;
-	),
-
-	TP_printk("[0x%p:0x%p]",
-		(void *)__entry->start,
-		(void *)__entry->end
-	)
-);
-
-DEFINE_EVENT(mpx_range_trace, mpx_unmap_zap,
-	TP_PROTO(unsigned long start, unsigned long end),
-	TP_ARGS(start, end)
-);
-
-DEFINE_EVENT(mpx_range_trace, mpx_unmap_search,
-	TP_PROTO(unsigned long start, unsigned long end),
-	TP_ARGS(start, end)
-);
-
-TRACE_EVENT(mpx_new_bounds_table,
-
-	TP_PROTO(unsigned long table_vaddr),
-	TP_ARGS(table_vaddr),
-
-	TP_STRUCT__entry(
-		__field(unsigned long, table_vaddr)
-	),
-
-	TP_fast_assign(
-		__entry->table_vaddr = table_vaddr;
-	),
-
-	TP_printk("table vaddr:%p", (void *)__entry->table_vaddr)
-);
-
-#else
-
-/*
- * This gets used outside of MPX-specific code, so we need a stub.
- */
-static inline
-void trace_bounds_exception_mpx(const struct mpx_bndcsr *bndcsr)
-{
-}
-
-#endif /* CONFIG_X86_INTEL_MPX */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH asm/trace/
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE mpx
-#endif /* _TRACE_MPX_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9fbba31..2a85287 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -22,8 +22,8 @@
 /*
  * Definitions of Primary Processor-Based VM-Execution Controls.
  */
-#define CPU_BASED_VIRTUAL_INTR_PENDING          VMCS_CONTROL_BIT(VIRTUAL_INTR_PENDING)
-#define CPU_BASED_USE_TSC_OFFSETING             VMCS_CONTROL_BIT(TSC_OFFSETTING)
+#define CPU_BASED_INTR_WINDOW_EXITING           VMCS_CONTROL_BIT(INTR_WINDOW_EXITING)
+#define CPU_BASED_USE_TSC_OFFSETTING            VMCS_CONTROL_BIT(USE_TSC_OFFSETTING)
 #define CPU_BASED_HLT_EXITING                   VMCS_CONTROL_BIT(HLT_EXITING)
 #define CPU_BASED_INVLPG_EXITING                VMCS_CONTROL_BIT(INVLPG_EXITING)
 #define CPU_BASED_MWAIT_EXITING                 VMCS_CONTROL_BIT(MWAIT_EXITING)
@@ -34,7 +34,7 @@
 #define CPU_BASED_CR8_LOAD_EXITING              VMCS_CONTROL_BIT(CR8_LOAD_EXITING)
 #define CPU_BASED_CR8_STORE_EXITING             VMCS_CONTROL_BIT(CR8_STORE_EXITING)
 #define CPU_BASED_TPR_SHADOW                    VMCS_CONTROL_BIT(VIRTUAL_TPR)
-#define CPU_BASED_VIRTUAL_NMI_PENDING		VMCS_CONTROL_BIT(VIRTUAL_NMI_PENDING)
+#define CPU_BASED_NMI_WINDOW_EXITING		VMCS_CONTROL_BIT(NMI_WINDOW_EXITING)
 #define CPU_BASED_MOV_DR_EXITING                VMCS_CONTROL_BIT(MOV_DR_EXITING)
 #define CPU_BASED_UNCOND_IO_EXITING             VMCS_CONTROL_BIT(UNCOND_IO_EXITING)
 #define CPU_BASED_USE_IO_BITMAPS                VMCS_CONTROL_BIT(USE_IO_BITMAPS)
diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h
index 0d04d8b..a50e4a0d 100644
--- a/arch/x86/include/asm/vmxfeatures.h
+++ b/arch/x86/include/asm/vmxfeatures.h
@@ -34,8 +34,8 @@
 #define VMX_FEATURE_EPTP_SWITCHING	( 0*32+ 28) /* EPTP switching (in guest) */
 
 /* Primary Processor-Based VM-Execution Controls, word 1 */
-#define VMX_FEATURE_VIRTUAL_INTR_PENDING ( 1*32+  2) /* "" VM-Exit if INTRs are unblocked in guest */
-#define VMX_FEATURE_TSC_OFFSETTING	( 1*32+  3) /* "tsc_offset" Offset hardware TSC when read in guest */
+#define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+  2) /* "" VM-Exit if INTRs are unblocked in guest */
+#define VMX_FEATURE_USE_TSC_OFFSETTING	( 1*32+  3) /* "tsc_offset" Offset hardware TSC when read in guest */
 #define VMX_FEATURE_HLT_EXITING		( 1*32+  7) /* "" VM-Exit on HLT */
 #define VMX_FEATURE_INVLPG_EXITING	( 1*32+  9) /* "" VM-Exit on INVLPG */
 #define VMX_FEATURE_MWAIT_EXITING	( 1*32+ 10) /* "" VM-Exit on MWAIT */
@@ -46,7 +46,7 @@
 #define VMX_FEATURE_CR8_LOAD_EXITING	( 1*32+ 19) /* "" VM-Exit on writes to CR8 */
 #define VMX_FEATURE_CR8_STORE_EXITING	( 1*32+ 20) /* "" VM-Exit on reads from CR8 */
 #define VMX_FEATURE_VIRTUAL_TPR		( 1*32+ 21) /* "vtpr" TPR virtualization, a.k.a. TPR shadow */
-#define VMX_FEATURE_VIRTUAL_NMI_PENDING	( 1*32+ 22) /* "" VM-Exit if NMIs are unblocked in guest */
+#define VMX_FEATURE_NMI_WINDOW_EXITING	( 1*32+ 22) /* "" VM-Exit if NMIs are unblocked in guest */
 #define VMX_FEATURE_MOV_DR_EXITING	( 1*32+ 23) /* "" VM-Exit on accesses to debug registers */
 #define VMX_FEATURE_UNCOND_IO_EXITING	( 1*32+ 24) /* "" VM-Exit on *all* IN{S} and OUT{S}*/
 #define VMX_FEATURE_USE_IO_BITMAPS	( 1*32+ 25) /* "" VM-Exit based on I/O port */
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 3eb8411..e95b72e 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -33,7 +33,7 @@
 #define EXIT_REASON_TRIPLE_FAULT        2
 #define EXIT_REASON_INIT_SIGNAL			3
 
-#define EXIT_REASON_PENDING_INTERRUPT   7
+#define EXIT_REASON_INTERRUPT_WINDOW    7
 #define EXIT_REASON_NMI_WINDOW          8
 #define EXIT_REASON_TASK_SWITCH         9
 #define EXIT_REASON_CPUID               10
@@ -94,7 +94,7 @@
 	{ EXIT_REASON_EXTERNAL_INTERRUPT,    "EXTERNAL_INTERRUPT" }, \
 	{ EXIT_REASON_TRIPLE_FAULT,          "TRIPLE_FAULT" }, \
 	{ EXIT_REASON_INIT_SIGNAL,           "INIT_SIGNAL" }, \
-	{ EXIT_REASON_PENDING_INTERRUPT,     "PENDING_INTERRUPT" }, \
+	{ EXIT_REASON_INTERRUPT_WINDOW,      "INTERRUPT_WINDOW" }, \
 	{ EXIT_REASON_NMI_WINDOW,            "NMI_WINDOW" }, \
 	{ EXIT_REASON_TASK_SWITCH,           "TASK_SWITCH" }, \
 	{ EXIT_REASON_CPUID,                 "CPUID" }, \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 6175e37..9b294c1 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -94,6 +94,7 @@
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
 obj-$(CONFIG_X86_TSC)		+= trace_clock.o
+obj-$(CONFIG_CRASH_CORE)	+= crash_core_$(BITS).o
 obj-$(CONFIG_KEXEC_CORE)	+= machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC_CORE)	+= relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_KEXEC_FILE)	+= kexec-bzimage64.o
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 34360ca..15ac0d5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -23,6 +23,7 @@
 #include <asm/nmi.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
+#include <asm/insn.h>
 #include <asm/io.h>
 #include <asm/fixmap.h>
 
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 86b8241..52c9bfb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -164,22 +164,6 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 } };
 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 
-static int __init x86_mpx_setup(char *s)
-{
-	/* require an exact match without trailing characters */
-	if (strlen(s))
-		return 0;
-
-	/* do not emit a message if the feature is not present */
-	if (!boot_cpu_has(X86_FEATURE_MPX))
-		return 1;
-
-	setup_clear_cpu_cap(X86_FEATURE_MPX);
-	pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
-	return 1;
-}
-__setup("nompx", x86_mpx_setup);
-
 #ifdef CONFIG_X86_64
 static int __init x86_nopcid_setup(char *s)
 {
@@ -306,8 +290,6 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 static __init int setup_disable_smep(char *arg)
 {
 	setup_clear_cpu_cap(X86_FEATURE_SMEP);
-	/* Check for things that depend on SMEP being enabled: */
-	check_mpx_erratum(&boot_cpu_data);
 	return 1;
 }
 __setup("nosmep", setup_disable_smep);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 57473e2..be82cd5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -32,41 +32,6 @@
 #endif
 
 /*
- * Just in case our CPU detection goes bad, or you have a weird system,
- * allow a way to override the automatic disabling of MPX.
- */
-static int forcempx;
-
-static int __init forcempx_setup(char *__unused)
-{
-	forcempx = 1;
-
-	return 1;
-}
-__setup("intel-skd-046-workaround=disable", forcempx_setup);
-
-void check_mpx_erratum(struct cpuinfo_x86 *c)
-{
-	if (forcempx)
-		return;
-	/*
-	 * Turn off the MPX feature on CPUs where SMEP is not
-	 * available or disabled.
-	 *
-	 * Works around Intel Erratum SKD046: "Branch Instructions
-	 * May Initialize MPX Bound Registers Incorrectly".
-	 *
-	 * This might falsely disable MPX on systems without
-	 * SMEP, like Atom processors without SMEP.  But there
-	 * is no such hardware known at the moment.
-	 */
-	if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
-		setup_clear_cpu_cap(X86_FEATURE_MPX);
-		pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
-	}
-}
-
-/*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
  * CPU models in which having conflicting memory types still leads to
@@ -330,7 +295,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
 			c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
 	}
 
-	check_mpx_erratum(c);
 	check_memory_type_self_snoop_errata(c);
 
 	/*
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index da532f6..a5c506f 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -396,15 +396,16 @@ static int mtrr_open(struct inode *inode, struct file *file)
 	return single_open(file, mtrr_seq_show, NULL);
 }
 
-static const struct file_operations mtrr_fops = {
-	.owner			= THIS_MODULE,
-	.open			= mtrr_open,
-	.read			= seq_read,
-	.llseek			= seq_lseek,
-	.write			= mtrr_write,
-	.unlocked_ioctl		= mtrr_ioctl,
-	.compat_ioctl		= mtrr_ioctl,
-	.release		= mtrr_close,
+static const struct proc_ops mtrr_proc_ops = {
+	.proc_open		= mtrr_open,
+	.proc_read		= seq_read,
+	.proc_lseek		= seq_lseek,
+	.proc_write		= mtrr_write,
+	.proc_ioctl		= mtrr_ioctl,
+#ifdef CONFIG_COMPAT
+	.proc_compat_ioctl	= mtrr_ioctl,
+#endif
+	.proc_release		= mtrr_close,
 };
 
 static int __init mtrr_if_init(void)
@@ -417,7 +418,7 @@ static int __init mtrr_if_init(void)
 	    (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
 		return -ENODEV;
 
-	proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
+	proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_proc_ops);
 	return 0;
 }
 arch_initcall(mtrr_if_init);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 1504bca..064e9ef 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2060,7 +2060,7 @@ static int rdt_get_tree(struct fs_context *fc)
 
 	if (rdt_mon_capable) {
 		ret = mongroup_create_dir(rdtgroup_default.kn,
-					  NULL, "mon_groups",
+					  &rdtgroup_default, "mon_groups",
 					  &kn_mongrp);
 		if (ret < 0)
 			goto out_info;
@@ -2127,25 +2127,20 @@ enum rdt_param {
 	nr__rdt_params
 };
 
-static const struct fs_parameter_spec rdt_param_specs[] = {
+static const struct fs_parameter_spec rdt_fs_parameters[] = {
 	fsparam_flag("cdp",		Opt_cdp),
 	fsparam_flag("cdpl2",		Opt_cdpl2),
 	fsparam_flag("mba_MBps",	Opt_mba_mbps),
 	{}
 };
 
-static const struct fs_parameter_description rdt_fs_parameters = {
-	.name		= "rdt",
-	.specs		= rdt_param_specs,
-};
-
 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct rdt_fs_context *ctx = rdt_fc2context(fc);
 	struct fs_parse_result result;
 	int opt;
 
-	opt = fs_parse(fc, &rdt_fs_parameters, param, &result);
+	opt = fs_parse(fc, rdt_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -2295,7 +2290,11 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
 	list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
 		free_rmid(sentry->mon.rmid);
 		list_del(&sentry->mon.crdtgrp_list);
-		kfree(sentry);
+
+		if (atomic_read(&sentry->waitcount) != 0)
+			sentry->flags = RDT_DELETED;
+		else
+			kfree(sentry);
 	}
 }
 
@@ -2333,7 +2332,11 @@ static void rmdir_all_sub(void)
 
 		kernfs_remove(rdtgrp->kn);
 		list_del(&rdtgrp->rdtgroup_list);
-		kfree(rdtgrp);
+
+		if (atomic_read(&rdtgrp->waitcount) != 0)
+			rdtgrp->flags = RDT_DELETED;
+		else
+			kfree(rdtgrp);
 	}
 	/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
 	update_closid_rmid(cpu_online_mask, &rdtgroup_default);
@@ -2370,7 +2373,7 @@ static void rdt_kill_sb(struct super_block *sb)
 static struct file_system_type rdt_fs_type = {
 	.name			= "resctrl",
 	.init_fs_context	= rdt_init_fs_context,
-	.parameters		= &rdt_fs_parameters,
+	.parameters		= rdt_fs_parameters,
 	.kill_sb		= rdt_kill_sb,
 };
 
@@ -2536,7 +2539,7 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn,
 	/*
 	 * Create the mon_data directory first.
 	 */
-	ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn);
+	ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
 	if (ret)
 		return ret;
 
@@ -2726,7 +2729,6 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 }
 
 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
-			     struct kernfs_node *prgrp_kn,
 			     const char *name, umode_t mode,
 			     enum rdt_group_type rtype, struct rdtgroup **r)
 {
@@ -2735,7 +2737,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
 	uint files = 0;
 	int ret;
 
-	prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
+	prdtgrp = rdtgroup_kn_lock_live(parent_kn);
 	if (!prdtgrp) {
 		ret = -ENODEV;
 		goto out_unlock;
@@ -2808,7 +2810,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
 	kernfs_activate(kn);
 
 	/*
-	 * The caller unlocks the prgrp_kn upon success.
+	 * The caller unlocks the parent_kn upon success.
 	 */
 	return 0;
 
@@ -2819,7 +2821,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
 out_free_rgrp:
 	kfree(rdtgrp);
 out_unlock:
-	rdtgroup_kn_unlock(prgrp_kn);
+	rdtgroup_kn_unlock(parent_kn);
 	return ret;
 }
 
@@ -2836,15 +2838,12 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
  * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
  */
 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
-			      struct kernfs_node *prgrp_kn,
-			      const char *name,
-			      umode_t mode)
+			      const char *name, umode_t mode)
 {
 	struct rdtgroup *rdtgrp, *prgrp;
 	int ret;
 
-	ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTMON_GROUP,
-				&rdtgrp);
+	ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
 	if (ret)
 		return ret;
 
@@ -2857,7 +2856,7 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
 	 */
 	list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
 
-	rdtgroup_kn_unlock(prgrp_kn);
+	rdtgroup_kn_unlock(parent_kn);
 	return ret;
 }
 
@@ -2866,7 +2865,6 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
  * to allocate and monitor resources.
  */
 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
-				   struct kernfs_node *prgrp_kn,
 				   const char *name, umode_t mode)
 {
 	struct rdtgroup *rdtgrp;
@@ -2874,8 +2872,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
 	u32 closid;
 	int ret;
 
-	ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTCTRL_GROUP,
-				&rdtgrp);
+	ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
 	if (ret)
 		return ret;
 
@@ -2900,7 +2897,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
 		 * Create an empty mon_groups directory to hold the subset
 		 * of tasks and cpus to monitor.
 		 */
-		ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
+		ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
 		if (ret) {
 			rdt_last_cmd_puts("kernfs subdir error\n");
 			goto out_del_list;
@@ -2916,7 +2913,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
 out_common_fail:
 	mkdir_rdt_prepare_clean(rdtgrp);
 out_unlock:
-	rdtgroup_kn_unlock(prgrp_kn);
+	rdtgroup_kn_unlock(parent_kn);
 	return ret;
 }
 
@@ -2949,14 +2946,14 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
 	 * subdirectory
 	 */
 	if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
-		return rdtgroup_mkdir_ctrl_mon(parent_kn, parent_kn, name, mode);
+		return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
 
 	/*
 	 * If RDT monitoring is supported and the parent directory is a valid
 	 * "mon_groups" directory, add a monitoring subdirectory.
 	 */
 	if (rdt_mon_capable && is_mon_groups(parent_kn, name))
-		return rdtgroup_mkdir_mon(parent_kn, parent_kn->parent, name, mode);
+		return rdtgroup_mkdir_mon(parent_kn, name, mode);
 
 	return -EPERM;
 }
@@ -3042,13 +3039,13 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
 	closid_free(rdtgrp->closid);
 	free_rmid(rdtgrp->mon.rmid);
 
+	rdtgroup_ctrl_remove(kn, rdtgrp);
+
 	/*
 	 * Free all the child monitor group rmids.
 	 */
 	free_all_child_rdtgrp(rdtgrp);
 
-	rdtgroup_ctrl_remove(kn, rdtgrp);
-
 	return 0;
 }
 
diff --git a/arch/x86/kernel/crash_core_32.c b/arch/x86/kernel/crash_core_32.c
new file mode 100644
index 0000000..c0159a7
--- /dev/null
+++ b/arch/x86/kernel/crash_core_32.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/crash_core.h>
+
+#include <asm/pgtable.h>
+#include <asm/setup.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_NUMA
+	VMCOREINFO_SYMBOL(node_data);
+	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
+#endif
+#ifdef CONFIG_X86_PAE
+	VMCOREINFO_CONFIG(X86_PAE);
+#endif
+}
diff --git a/arch/x86/kernel/crash_core_64.c b/arch/x86/kernel/crash_core_64.c
new file mode 100644
index 0000000..845a57e
--- /dev/null
+++ b/arch/x86/kernel/crash_core_64.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/crash_core.h>
+
+#include <asm/pgtable.h>
+#include <asm/setup.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+	u64 sme_mask = sme_me_mask;
+
+	VMCOREINFO_NUMBER(phys_base);
+	VMCOREINFO_SYMBOL(init_top_pgt);
+	vmcoreinfo_append_str("NUMBER(pgtable_l5_enabled)=%d\n",
+			      pgtable_l5_enabled());
+
+#ifdef CONFIG_NUMA
+	VMCOREINFO_SYMBOL(node_data);
+	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
+#endif
+	vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+	VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
+	VMCOREINFO_NUMBER(sme_mask);
+}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 81045aab..d817f25 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -736,6 +736,9 @@ static __init int kvm_setup_pv_tlb_flush(void)
 {
 	int cpu;
 
+	if (!kvm_para_available() || nopv)
+		return 0;
+
 	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
 	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
 	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 7b45e8d..02bddfc 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -250,15 +250,3 @@ void machine_kexec(struct kimage *image)
 
 	__ftrace_enabled_restore(save_ftrace_enabled);
 }
-
-void arch_crash_save_vmcoreinfo(void)
-{
-#ifdef CONFIG_NUMA
-	VMCOREINFO_SYMBOL(node_data);
-	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
-#endif
-#ifdef CONFIG_X86_PAE
-	VMCOREINFO_CONFIG(X86_PAE);
-#endif
-}
-
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 16e125a..ad5cdd6 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -398,25 +398,6 @@ void machine_kexec(struct kimage *image)
 	__ftrace_enabled_restore(save_ftrace_enabled);
 }
 
-void arch_crash_save_vmcoreinfo(void)
-{
-	u64 sme_mask = sme_me_mask;
-
-	VMCOREINFO_NUMBER(phys_base);
-	VMCOREINFO_SYMBOL(init_top_pgt);
-	vmcoreinfo_append_str("NUMBER(pgtable_l5_enabled)=%d\n",
-			pgtable_l5_enabled());
-
-#ifdef CONFIG_NUMA
-	VMCOREINFO_SYMBOL(node_data);
-	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
-#endif
-	vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
-			      kaslr_offset());
-	VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
-	VMCOREINFO_NUMBER(sme_mask);
-}
-
 /* arch-dependent functionality related to kexec file-based syscall */
 
 #ifdef CONFIG_KEXEC_FILE
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2441b64..a74262c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -893,8 +893,6 @@ void __init setup_arch(char **cmdline_p)
 	init_mm.end_data = (unsigned long) _edata;
 	init_mm.brk = _brk_end;
 
-	mpx_mm_init(&init_mm);
-
 	code_resource.start = __pa_symbol(_text);
 	code_resource.end = __pa_symbol(_etext)-1;
 	rodata_resource.start = __pa_symbol(__start_rodata);
@@ -1228,8 +1226,6 @@ void __init setup_arch(char **cmdline_p)
 #if defined(CONFIG_VGA_CONSOLE)
 	if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
 		conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
 #endif
 #endif
 	x86_init.oem.banner();
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index f7476ce..ca3c11a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -22,7 +22,6 @@
 #include <asm/elf.h>
 #include <asm/ia32.h>
 #include <asm/syscalls.h>
-#include <asm/mpx.h>
 
 /*
  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
@@ -137,10 +136,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 	struct vm_unmapped_area_info info;
 	unsigned long begin, end;
 
-	addr = mpx_unmapped_area_check(addr, len, flags);
-	if (IS_ERR_VALUE(addr))
-		return addr;
-
 	if (flags & MAP_FIXED)
 		return addr;
 
@@ -180,10 +175,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	unsigned long addr = addr0;
 	struct vm_unmapped_area_info info;
 
-	addr = mpx_unmapped_area_check(addr, len, flags);
-	if (IS_ERR_VALUE(addr))
-		return addr;
-
 	/* requested length too big for entire address space */
 	if (len > TASK_SIZE)
 		return -ENOMEM;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9e6f822..6ef00eb 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -52,8 +52,6 @@
 #include <asm/mach_traps.h>
 #include <asm/alternative.h>
 #include <asm/fpu/xstate.h>
-#include <asm/trace/mpx.h>
-#include <asm/mpx.h>
 #include <asm/vm86.h>
 #include <asm/umip.h>
 #include <asm/insn.h>
@@ -436,8 +434,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
 
 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 {
-	const struct mpx_bndcsr *bndcsr;
-
 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
@@ -447,76 +443,6 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 	if (!user_mode(regs))
 		die("bounds", regs, error_code);
 
-	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
-		/* The exception is not from Intel MPX */
-		goto exit_trap;
-	}
-
-	/*
-	 * We need to look at BNDSTATUS to resolve this exception.
-	 * A NULL here might mean that it is in its 'init state',
-	 * which is all zeros which indicates MPX was not
-	 * responsible for the exception.
-	 */
-	bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
-	if (!bndcsr)
-		goto exit_trap;
-
-	trace_bounds_exception_mpx(bndcsr);
-	/*
-	 * The error code field of the BNDSTATUS register communicates status
-	 * information of a bound range exception #BR or operation involving
-	 * bound directory.
-	 */
-	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
-	case 2:	/* Bound directory has invalid entry. */
-		if (mpx_handle_bd_fault())
-			goto exit_trap;
-		break; /* Success, it was handled */
-	case 1: /* Bound violation. */
-	{
-		struct task_struct *tsk = current;
-		struct mpx_fault_info mpx;
-
-		if (mpx_fault_info(&mpx, regs)) {
-			/*
-			 * We failed to decode the MPX instruction.  Act as if
-			 * the exception was not caused by MPX.
-			 */
-			goto exit_trap;
-		}
-		/*
-		 * Success, we decoded the instruction and retrieved
-		 * an 'mpx' containing the address being accessed
-		 * which caused the exception.  This information
-		 * allows and application to possibly handle the
-		 * #BR exception itself.
-		 */
-		if (!do_trap_no_signal(tsk, X86_TRAP_BR, "bounds", regs,
-				       error_code))
-			break;
-
-		show_signal(tsk, SIGSEGV, "trap ", "bounds", regs, error_code);
-
-		force_sig_bnderr(mpx.addr, mpx.lower, mpx.upper);
-		break;
-	}
-	case 0: /* No exception caused by Intel MPX operations. */
-		goto exit_trap;
-	default:
-		die("bounds", regs, error_code);
-	}
-
-	return;
-
-exit_trap:
-	/*
-	 * This path out is for all the cases where we could not
-	 * handle the exception in some way (like allocating a
-	 * table or telling userspace about it.  We will also end
-	 * up here if the kernel has MPX turned off at compile
-	 * time..
-	 */
 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL);
 }
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index cf55629..b1c4694 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -62,7 +62,7 @@ u64 kvm_supported_xcr0(void)
 	return xcr0;
 }
 
-#define F(x) bit(X86_FEATURE_##x)
+#define F feature_bit
 
 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
 {
@@ -281,8 +281,9 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
 	return r;
 }
 
-static void cpuid_mask(u32 *word, int wordnum)
+static __always_inline void cpuid_mask(u32 *word, int wordnum)
 {
+	reverse_cpuid_check(wordnum);
 	*word &= boot_cpu_data.x86_capability[wordnum];
 }
 
@@ -352,6 +353,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
 	unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
 	unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
 	unsigned f_la57;
+	unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0;
 
 	/* cpuid 7.0.ebx */
 	const u32 kvm_cpuid_7_0_ebx_x86_features =
@@ -363,7 +365,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
 
 	/* cpuid 7.0.ecx*/
 	const u32 kvm_cpuid_7_0_ecx_x86_features =
-		F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
+		F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) |
 		F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
 		F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
 		F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
@@ -392,6 +394,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
 		/* Set LA57 based on hardware capability. */
 		entry->ecx |= f_la57;
 		entry->ecx |= f_umip;
+		entry->ecx |= f_pku;
 		/* PKU is not yet implemented for shadow paging. */
 		if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
 			entry->ecx &= ~F(PKU);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index d78a614..7366c61 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -53,15 +53,46 @@ static const struct cpuid_reg reverse_cpuid[] = {
 	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
 	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
 	[CPUID_7_EDX]         = {         7, 0, CPUID_EDX},
+	[CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
 };
 
+/*
+ * Reverse CPUID and its derivatives can only be used for hardware-defined
+ * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
+ * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
+ * is nonsensical as the bit number/mask is an arbitrary software-defined value
+ * and can't be used by KVM to query/control guest capabilities.  And obviously
+ * the leaf being queried must have an entry in the lookup table.
+ */
+static __always_inline void reverse_cpuid_check(unsigned x86_leaf)
+{
+	BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
+	BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
+	BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
+	BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
+	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
+	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
+}
+
+/*
+ * Retrieve the bit mask from an X86_FEATURE_* definition.  Features contain
+ * the hardware defined bit number (stored in bits 4:0) and a software defined
+ * "word" (stored in bits 31:5).  The word is used to index into arrays of
+ * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
+ */
+static __always_inline u32 __feature_bit(int x86_feature)
+{
+	reverse_cpuid_check(x86_feature / 32);
+	return 1 << (x86_feature & 31);
+}
+
+#define feature_bit(name)  __feature_bit(X86_FEATURE_##name)
+
 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
 {
 	unsigned x86_leaf = x86_feature / 32;
 
-	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
-	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
-
+	reverse_cpuid_check(x86_leaf);
 	return reverse_cpuid[x86_leaf];
 }
 
@@ -93,15 +124,11 @@ static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_
 {
 	int *reg;
 
-	if (x86_feature == X86_FEATURE_XSAVE &&
-			!static_cpu_has(X86_FEATURE_XSAVE))
-		return false;
-
 	reg = guest_cpuid_get_register(vcpu, x86_feature);
 	if (!reg)
 		return false;
 
-	return *reg & bit(x86_feature);
+	return *reg & __feature_bit(x86_feature);
 }
 
 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
@@ -110,7 +137,7 @@ static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x8
 
 	reg = guest_cpuid_get_register(vcpu, x86_feature);
 	if (reg)
-		*reg &= ~bit(x86_feature);
+		*reg &= ~__feature_bit(x86_feature);
 }
 
 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 952d1a4..ddbc619 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -22,6 +22,7 @@
 #include "kvm_cache_regs.h"
 #include <asm/kvm_emulate.h>
 #include <linux/stringify.h>
+#include <asm/fpu/api.h>
 #include <asm/debugreg.h>
 #include <asm/nospec-branch.h>
 
@@ -310,7 +311,9 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
 #define ON64(x)
 #endif
 
-static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
+typedef void (*fastop_t)(struct fastop *);
+
+static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
 
 #define __FOP_FUNC(name) \
 	".align " __stringify(FASTOP_SIZE) " \n\t" \
@@ -1075,8 +1078,23 @@ static void fetch_register_operand(struct operand *op)
 	}
 }
 
-static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
+static void emulator_get_fpu(void)
 {
+	fpregs_lock();
+
+	fpregs_assert_state_consistent();
+	if (test_thread_flag(TIF_NEED_FPU_LOAD))
+		switch_fpu_return();
+}
+
+static void emulator_put_fpu(void)
+{
+	fpregs_unlock();
+}
+
+static void read_sse_reg(sse128_t *data, int reg)
+{
+	emulator_get_fpu();
 	switch (reg) {
 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
@@ -1098,11 +1116,12 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
 #endif
 	default: BUG();
 	}
+	emulator_put_fpu();
 }
 
-static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
-			  int reg)
+static void write_sse_reg(sse128_t *data, int reg)
 {
+	emulator_get_fpu();
 	switch (reg) {
 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
@@ -1124,10 +1143,12 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
 #endif
 	default: BUG();
 	}
+	emulator_put_fpu();
 }
 
-static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
+static void read_mmx_reg(u64 *data, int reg)
 {
+	emulator_get_fpu();
 	switch (reg) {
 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
@@ -1139,10 +1160,12 @@ static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
 	default: BUG();
 	}
+	emulator_put_fpu();
 }
 
-static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
+static void write_mmx_reg(u64 *data, int reg)
 {
+	emulator_get_fpu();
 	switch (reg) {
 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
@@ -1154,6 +1177,7 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
 	default: BUG();
 	}
+	emulator_put_fpu();
 }
 
 static int em_fninit(struct x86_emulate_ctxt *ctxt)
@@ -1161,7 +1185,9 @@ static int em_fninit(struct x86_emulate_ctxt *ctxt)
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
+	emulator_get_fpu();
 	asm volatile("fninit");
+	emulator_put_fpu();
 	return X86EMUL_CONTINUE;
 }
 
@@ -1172,7 +1198,9 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
+	emulator_get_fpu();
 	asm volatile("fnstcw %0": "+m"(fcw));
+	emulator_put_fpu();
 
 	ctxt->dst.val = fcw;
 
@@ -1186,7 +1214,9 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
+	emulator_get_fpu();
 	asm volatile("fnstsw %0": "+m"(fsw));
+	emulator_put_fpu();
 
 	ctxt->dst.val = fsw;
 
@@ -1205,7 +1235,7 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
 		op->type = OP_XMM;
 		op->bytes = 16;
 		op->addr.xmm = reg;
-		read_sse_reg(ctxt, &op->vec_val, reg);
+		read_sse_reg(&op->vec_val, reg);
 		return;
 	}
 	if (ctxt->d & Mmx) {
@@ -1256,7 +1286,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
 			op->type = OP_XMM;
 			op->bytes = 16;
 			op->addr.xmm = ctxt->modrm_rm;
-			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
+			read_sse_reg(&op->vec_val, ctxt->modrm_rm);
 			return rc;
 		}
 		if (ctxt->d & Mmx) {
@@ -1833,10 +1863,10 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
 				       op->bytes * op->count);
 		break;
 	case OP_XMM:
-		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
+		write_sse_reg(&op->vec_val, op->addr.xmm);
 		break;
 	case OP_MM:
-		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
+		write_mmx_reg(&op->mm_val, op->addr.mm);
 		break;
 	case OP_NONE:
 		/* no writeback */
@@ -2348,12 +2378,7 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
 {
 #ifdef CONFIG_X86_64
-	u32 eax, ebx, ecx, edx;
-
-	eax = 0x80000001;
-	ecx = 0;
-	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
-	return edx & bit(X86_FEATURE_LM);
+	return ctxt->ops->guest_has_long_mode(ctxt);
 #else
 	return false;
 #endif
@@ -3618,18 +3643,11 @@ static int em_mov(struct x86_emulate_ctxt *ctxt)
 	return X86EMUL_CONTINUE;
 }
 
-#define FFL(x) bit(X86_FEATURE_##x)
-
 static int em_movbe(struct x86_emulate_ctxt *ctxt)
 {
-	u32 ebx, ecx, edx, eax = 1;
 	u16 tmp;
 
-	/*
-	 * Check MOVBE is set in the guest-visible CPUID leaf.
-	 */
-	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
-	if (!(ecx & FFL(MOVBE)))
+	if (!ctxt->ops->guest_has_movbe(ctxt))
 		return emulate_ud(ctxt);
 
 	switch (ctxt->op_bytes) {
@@ -4027,10 +4045,7 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
 
 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
 {
-	u32 eax = 1, ebx, ecx = 0, edx;
-
-	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
-	if (!(edx & FFL(FXSR)))
+	if (!ctxt->ops->guest_has_fxsr(ctxt))
 		return emulate_ud(ctxt);
 
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
@@ -4092,8 +4107,12 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
+	emulator_get_fpu();
+
 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
 
+	emulator_put_fpu();
+
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
@@ -4136,6 +4155,8 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
+	emulator_get_fpu();
+
 	if (size < __fxstate_size(16)) {
 		rc = fxregs_fixup(&fx_state, size);
 		if (rc != X86EMUL_CONTINUE)
@@ -4151,6 +4172,8 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
 		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
 
 out:
+	emulator_put_fpu();
+
 	return rc;
 }
 
@@ -5210,16 +5233,28 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 				ctxt->ad_bytes = def_ad_bytes ^ 6;
 			break;
 		case 0x26:	/* ES override */
+			has_seg_override = true;
+			ctxt->seg_override = VCPU_SREG_ES;
+			break;
 		case 0x2e:	/* CS override */
+			has_seg_override = true;
+			ctxt->seg_override = VCPU_SREG_CS;
+			break;
 		case 0x36:	/* SS override */
+			has_seg_override = true;
+			ctxt->seg_override = VCPU_SREG_SS;
+			break;
 		case 0x3e:	/* DS override */
 			has_seg_override = true;
-			ctxt->seg_override = (ctxt->b >> 3) & 3;
+			ctxt->seg_override = VCPU_SREG_DS;
 			break;
 		case 0x64:	/* FS override */
+			has_seg_override = true;
+			ctxt->seg_override = VCPU_SREG_FS;
+			break;
 		case 0x65:	/* GS override */
 			has_seg_override = true;
-			ctxt->seg_override = ctxt->b & 7;
+			ctxt->seg_override = VCPU_SREG_GS;
 			break;
 		case 0x40 ... 0x4f: /* REX */
 			if (mode != X86EMUL_MODE_PROT64)
@@ -5303,10 +5338,15 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 			}
 			break;
 		case Escape:
-			if (ctxt->modrm > 0xbf)
-				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
-			else
+			if (ctxt->modrm > 0xbf) {
+				size_t size = ARRAY_SIZE(opcode.u.esc->high);
+				u32 index = array_index_nospec(
+					ctxt->modrm - 0xc0, size);
+
+				opcode = opcode.u.esc->high[index];
+			} else {
 				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
+			}
 			break;
 		case InstrDual:
 			if ((ctxt->modrm >> 6) == 3)
@@ -5448,7 +5488,9 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
 {
 	int rc;
 
+	emulator_get_fpu();
 	rc = asm_safe("fwait");
+	emulator_put_fpu();
 
 	if (unlikely(rc != X86EMUL_CONTINUE))
 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
@@ -5456,14 +5498,13 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
 	return X86EMUL_CONTINUE;
 }
 
-static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
-				       struct operand *op)
+static void fetch_possible_mmx_operand(struct operand *op)
 {
 	if (op->type == OP_MM)
-		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
+		read_mmx_reg(&op->mm_val, op->addr.mm);
 }
 
-static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
+static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
 {
 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
 
@@ -5539,10 +5580,10 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 			 * Now that we know the fpu is exception safe, we can fetch
 			 * operands from it.
 			 */
-			fetch_possible_mmx_operand(ctxt, &ctxt->src);
-			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
+			fetch_possible_mmx_operand(&ctxt->src);
+			fetch_possible_mmx_operand(&ctxt->src2);
 			if (!(ctxt->d & Mov))
-				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
+				fetch_possible_mmx_operand(&ctxt->dst);
 		}
 
 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
@@ -5641,14 +5682,10 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 		ctxt->eflags &= ~X86_EFLAGS_RF;
 
 	if (ctxt->execute) {
-		if (ctxt->d & Fastop) {
-			void (*fop)(struct fastop *) = (void *)ctxt->execute;
-			rc = fastop(ctxt, fop);
-			if (rc != X86EMUL_CONTINUE)
-				goto done;
-			goto writeback;
-		}
-		rc = ctxt->execute(ctxt);
+		if (ctxt->d & Fastop)
+			rc = fastop(ctxt, (fastop_t)ctxt->execute);
+		else
+			rc = ctxt->execute(ctxt);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
 		goto writeback;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 23ff655..a86fda7 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -33,6 +33,7 @@
 #include <trace/events/kvm.h>
 
 #include "trace.h"
+#include "irq.h"
 
 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
 
@@ -775,9 +776,10 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
 
 	/*
 	 * Hyper-V SynIC auto EOI SINT's are
-	 * not compatible with APICV, so deactivate APICV
+	 * not compatible with APICV, so request
+	 * to deactivate APICV permanently.
 	 */
-	kvm_vcpu_deactivate_apicv(vcpu);
+	kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV);
 	synic->active = true;
 	synic->dont_zero_synic_pages = dont_zero_synic_pages;
 	return 0;
@@ -809,11 +811,12 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
 				     u32 index, u64 *pdata)
 {
 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+	size_t size = ARRAY_SIZE(hv->hv_crash_param);
 
-	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+	if (WARN_ON_ONCE(index >= size))
 		return -EINVAL;
 
-	*pdata = hv->hv_crash_param[index];
+	*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
 	return 0;
 }
 
@@ -852,11 +855,12 @@ static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
 				     u32 index, u64 data)
 {
 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+	size_t size = ARRAY_SIZE(hv->hv_crash_param);
 
-	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+	if (WARN_ON_ONCE(index >= size))
 		return -EINVAL;
 
-	hv->hv_crash_param[index] = data;
+	hv->hv_crash_param[array_index_nospec(index, size)] = data;
 	return 0;
 }
 
@@ -1058,7 +1062,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
 			return 1;
 		break;
 	default:
-		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
+		vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
 			    msr, data);
 		return 1;
 	}
@@ -1121,7 +1125,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 			return 1;
 
 		/*
-		 * Clear apic_assist portion of f(struct hv_vp_assist_page
+		 * Clear apic_assist portion of struct hv_vp_assist_page
 		 * only, there can be valuable data in the rest which needs
 		 * to be preserved e.g. on migration.
 		 */
@@ -1178,7 +1182,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 			return 1;
 		break;
 	default:
-		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
+		vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
 			    msr, data);
 		return 1;
 	}
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 4a6dc54..b24c606 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -295,12 +295,24 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
 	if (atomic_read(&ps->reinject) == reinject)
 		return;
 
+	/*
+	 * AMD SVM AVIC accelerates EOI write and does not trap.
+	 * This cause in-kernel PIT re-inject mode to fail
+	 * since it checks ps->irq_ack before kvm_set_irq()
+	 * and relies on the ack notifier to timely queue
+	 * the pt->worker work iterm and reinject the missed tick.
+	 * So, deactivate APICv when PIT is in reinject mode.
+	 */
 	if (reinject) {
+		kvm_request_apicv_update(kvm, false,
+					 APICV_INHIBIT_REASON_PIT_REINJ);
 		/* The initial state is preserved while ps->reinject == 0. */
 		kvm_pit_reset_reinject(pit);
 		kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
 		kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
 	} else {
+		kvm_request_apicv_update(kvm, true,
+					 APICV_INHIBIT_REASON_PIT_REINJ);
 		kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
 		kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
 	}
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 8b38bb4..629a09c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -460,10 +460,14 @@ static int picdev_write(struct kvm_pic *s,
 	switch (addr) {
 	case 0x20:
 	case 0x21:
+		pic_lock(s);
+		pic_ioport_write(&s->pics[0], addr, data);
+		pic_unlock(s);
+		break;
 	case 0xa0:
 	case 0xa1:
 		pic_lock(s);
-		pic_ioport_write(&s->pics[addr >> 7], addr, data);
+		pic_ioport_write(&s->pics[1], addr, data);
 		pic_unlock(s);
 		break;
 	case 0x4d0:
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 9fd2dd8..7668fed 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -36,6 +36,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/nospec.h>
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/current.h>
@@ -48,6 +49,11 @@
 static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
 		bool line_status);
 
+static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
+				      struct kvm_ioapic *ioapic,
+				      int trigger_mode,
+				      int pin);
+
 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
 					  unsigned long addr,
 					  unsigned long length)
@@ -68,13 +74,14 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
 	default:
 		{
 			u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
-			u64 redir_content;
+			u64 redir_content = ~0ULL;
 
-			if (redir_index < IOAPIC_NUM_PINS)
-				redir_content =
-					ioapic->redirtbl[redir_index].bits;
-			else
-				redir_content = ~0ULL;
+			if (redir_index < IOAPIC_NUM_PINS) {
+				u32 index = array_index_nospec(
+					redir_index, IOAPIC_NUM_PINS);
+
+				redir_content = ioapic->redirtbl[index].bits;
+			}
 
 			result = (ioapic->ioregsel & 0x1) ?
 			    (redir_content >> 32) & 0xffffffff :
@@ -108,8 +115,9 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
 	union kvm_ioapic_redirect_entry *e;
 
 	e = &ioapic->redirtbl[RTC_GSI];
-	if (!kvm_apic_match_dest(vcpu, NULL, 0,	e->fields.dest_id,
-				e->fields.dest_mode))
+	if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
+				 e->fields.dest_id,
+				 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode)))
 		return;
 
 	new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
@@ -151,10 +159,16 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
 	    __rtc_irq_eoi_tracking_restore_one(vcpu);
 }
 
-static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
+static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
+			int vector)
 {
-	if (test_and_clear_bit(vcpu->vcpu_id,
-			       ioapic->rtc_status.dest_map.map)) {
+	struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
+
+	/* RTC special handling */
+	if (test_bit(vcpu->vcpu_id, dest_map->map) &&
+	    (vector == dest_map->vectors[vcpu->vcpu_id]) &&
+	    (test_and_clear_bit(vcpu->vcpu_id,
+				ioapic->rtc_status.dest_map.map))) {
 		--ioapic->rtc_status.pending_eoi;
 		rtc_status_pending_eoi_check_valid(ioapic);
 	}
@@ -168,6 +182,31 @@ static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
 	return false;
 }
 
+static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
+{
+	int i;
+	struct kvm_vcpu *vcpu;
+	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
+
+	kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
+		if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
+					 entry->fields.dest_id,
+					 entry->fields.dest_mode) ||
+		    kvm_apic_pending_eoi(vcpu, entry->fields.vector))
+			continue;
+
+		/*
+		 * If no longer has pending EOI in LAPICs, update
+		 * EOI for this vetor.
+		 */
+		rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
+		kvm_ioapic_update_eoi_one(vcpu, ioapic,
+					  entry->fields.trig_mode,
+					  irq);
+		break;
+	}
+}
+
 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
 		int irq_level, bool line_status)
 {
@@ -186,9 +225,18 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
 	}
 
 	/*
+	 * AMD SVM AVIC accelerate EOI write and do not trap,
+	 * in-kernel IOAPIC will not be able to receive the EOI.
+	 * In this case, we do lazy update of the pending EOI when
+	 * trying to set IOAPIC irq.
+	 */
+	if (kvm_apicv_activated(ioapic->kvm))
+		ioapic_lazy_update_eoi(ioapic, irq);
+
+	/*
 	 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
 	 * this only happens if a previous edge has not been delivered due
-	 * do masking.  For level interrupts, the remote_irr field tells
+	 * to masking.  For level interrupts, the remote_irr field tells
 	 * us if the interrupt is waiting for an EOI.
 	 *
 	 * RTC is special: it is edge-triggered, but userspace likes to know
@@ -250,8 +298,10 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
 		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
 		    kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
 		    index == RTC_GSI) {
-			if (kvm_apic_match_dest(vcpu, NULL, 0,
-			             e->fields.dest_id, e->fields.dest_mode) ||
+			u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
+
+			if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
+						e->fields.dest_id, dm) ||
 			    kvm_apic_pending_eoi(vcpu, e->fields.vector))
 				__set_bit(e->fields.vector,
 					  ioapic_handled_vectors);
@@ -292,6 +342,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 
 		if (index >= IOAPIC_NUM_PINS)
 			return;
+		index = array_index_nospec(index, IOAPIC_NUM_PINS);
 		e = &ioapic->redirtbl[index];
 		mask_before = e->fields.mask;
 		/* Preserve read-only fields */
@@ -327,11 +378,12 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 		if (e->fields.delivery_mode == APIC_DM_FIXED) {
 			struct kvm_lapic_irq irq;
 
-			irq.shorthand = 0;
+			irq.shorthand = APIC_DEST_NOSHORT;
 			irq.vector = e->fields.vector;
 			irq.delivery_mode = e->fields.delivery_mode << 8;
 			irq.dest_id = e->fields.dest_id;
-			irq.dest_mode = e->fields.dest_mode;
+			irq.dest_mode =
+			    kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
 			bitmap_zero(&vcpu_bitmap, 16);
 			kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
 						 &vcpu_bitmap);
@@ -343,7 +395,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 				 * keep ioapic_handled_vectors synchronized.
 				 */
 				irq.dest_id = old_dest_id;
-				irq.dest_mode = old_dest_mode;
+				irq.dest_mode =
+				    kvm_lapic_irq_dest_mode(
+					!!e->fields.dest_mode);
 				kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
 							 &vcpu_bitmap);
 			}
@@ -369,11 +423,11 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
 
 	irqe.dest_id = entry->fields.dest_id;
 	irqe.vector = entry->fields.vector;
-	irqe.dest_mode = entry->fields.dest_mode;
+	irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode);
 	irqe.trig_mode = entry->fields.trig_mode;
 	irqe.delivery_mode = entry->fields.delivery_mode << 8;
 	irqe.level = 1;
-	irqe.shorthand = 0;
+	irqe.shorthand = APIC_DEST_NOSHORT;
 	irqe.msi_redir_hint = false;
 
 	if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
@@ -445,72 +499,68 @@ static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
 }
 
 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
-
-static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
-			struct kvm_ioapic *ioapic, int vector, int trigger_mode)
+static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
+				      struct kvm_ioapic *ioapic,
+				      int trigger_mode,
+				      int pin)
 {
-	struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
 	struct kvm_lapic *apic = vcpu->arch.apic;
-	int i;
+	union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
 
-	/* RTC special handling */
-	if (test_bit(vcpu->vcpu_id, dest_map->map) &&
-	    vector == dest_map->vectors[vcpu->vcpu_id])
-		rtc_irq_eoi(ioapic, vcpu);
+	/*
+	 * We are dropping lock while calling ack notifiers because ack
+	 * notifier callbacks for assigned devices call into IOAPIC
+	 * recursively. Since remote_irr is cleared only after call
+	 * to notifiers if the same vector will be delivered while lock
+	 * is dropped it will be put into irr and will be delivered
+	 * after ack notifier returns.
+	 */
+	spin_unlock(&ioapic->lock);
+	kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
+	spin_lock(&ioapic->lock);
 
-	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
-		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
+	if (trigger_mode != IOAPIC_LEVEL_TRIG ||
+	    kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
+		return;
 
-		if (ent->fields.vector != vector)
-			continue;
-
-		/*
-		 * We are dropping lock while calling ack notifiers because ack
-		 * notifier callbacks for assigned devices call into IOAPIC
-		 * recursively. Since remote_irr is cleared only after call
-		 * to notifiers if the same vector will be delivered while lock
-		 * is dropped it will be put into irr and will be delivered
-		 * after ack notifier returns.
-		 */
-		spin_unlock(&ioapic->lock);
-		kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
-		spin_lock(&ioapic->lock);
-
-		if (trigger_mode != IOAPIC_LEVEL_TRIG ||
-		    kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
-			continue;
-
-		ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
-		ent->fields.remote_irr = 0;
-		if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
-			++ioapic->irq_eoi[i];
-			if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
-				/*
-				 * Real hardware does not deliver the interrupt
-				 * immediately during eoi broadcast, and this
-				 * lets a buggy guest make slow progress
-				 * even if it does not correctly handle a
-				 * level-triggered interrupt.  Emulate this
-				 * behavior if we detect an interrupt storm.
-				 */
-				schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
-				ioapic->irq_eoi[i] = 0;
-				trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
-			} else {
-				ioapic_service(ioapic, i, false);
-			}
+	ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
+	ent->fields.remote_irr = 0;
+	if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
+		++ioapic->irq_eoi[pin];
+		if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
+			/*
+			 * Real hardware does not deliver the interrupt
+			 * immediately during eoi broadcast, and this
+			 * lets a buggy guest make slow progress
+			 * even if it does not correctly handle a
+			 * level-triggered interrupt.  Emulate this
+			 * behavior if we detect an interrupt storm.
+			 */
+			schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
+			ioapic->irq_eoi[pin] = 0;
+			trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
 		} else {
-			ioapic->irq_eoi[i] = 0;
+			ioapic_service(ioapic, pin, false);
 		}
+	} else {
+		ioapic->irq_eoi[pin] = 0;
 	}
 }
 
 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
 {
+	int i;
 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
 
 	spin_lock(&ioapic->lock);
-	__kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
+	rtc_irq_eoi(ioapic, vcpu, vector);
+	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
+
+		if (ent->fields.vector != vector)
+			continue;
+		kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
+	}
 	spin_unlock(&ioapic->lock);
 }
 
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index ea1a4e0..2fb2e3c 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -116,9 +116,6 @@ static inline int ioapic_in_kernel(struct kvm *kvm)
 }
 
 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
-bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
-		int short_hand, unsigned int dest, int dest_mode);
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
 			int trigger_mode);
 int kvm_ioapic_init(struct kvm *kvm);
@@ -126,9 +123,6 @@ void kvm_ioapic_destroy(struct kvm *kvm);
 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
 		       int level, bool line_status);
 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
-int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
-			     struct kvm_lapic_irq *irq,
-			     struct dest_map *dest_map);
 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 7c6233d..f173ab6 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -113,5 +113,8 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu);
 
 int kvm_setup_default_irq_routing(struct kvm *kvm);
 int kvm_setup_empty_irq_routing(struct kvm *kvm);
+int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
+			     struct kvm_lapic_irq *irq,
+			     struct dest_map *dest_map);
 
 #endif
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 8ecd48d..79afa0b 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -52,15 +52,15 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
 	unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
 	unsigned int dest_vcpus = 0;
 
-	if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
-			kvm_lowest_prio_delivery(irq)) {
+	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
+		return r;
+
+	if (irq->dest_mode == APIC_DEST_PHYSICAL &&
+	    irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) {
 		printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
 		irq->delivery_mode = APIC_DM_FIXED;
 	}
 
-	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
-		return r;
-
 	memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
 
 	kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -114,13 +114,14 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
 		irq->dest_id |= MSI_ADDR_EXT_DEST_ID(e->msi.address_hi);
 	irq->vector = (e->msi.data &
 			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
-	irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
+	irq->dest_mode = kvm_lapic_irq_dest_mode(
+	    !!((1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo));
 	irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
 	irq->delivery_mode = e->msi.data & 0x700;
 	irq->msi_redir_hint = ((e->msi.address_lo
 		& MSI_ADDR_REDIRECTION_LOWPRI) > 0);
 	irq->level = 1;
-	irq->shorthand = 0;
+	irq->shorthand = APIC_DEST_NOSHORT;
 }
 EXPORT_SYMBOL_GPL(kvm_set_msi_irq);
 
@@ -416,7 +417,8 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
 
 			kvm_set_msi_irq(vcpu->kvm, entry, &irq);
 
-			if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
+			if (irq.level &&
+			    kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
 						irq.dest_id, irq.dest_mode))
 				__set_bit(irq.vector, ioapic_handled_vectors);
 		}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cf9177b..eafc631 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -56,9 +56,6 @@
 #define APIC_VERSION			(0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
 #define LAPIC_MMIO_LENGTH		(1 << 12)
 /* followed define is not in apicdef.h */
-#define APIC_SHORT_MASK			0xc0000
-#define APIC_DEST_NOSHORT		0x0
-#define APIC_DEST_MASK			0x800
 #define MAX_APIC_VECTOR			256
 #define APIC_VECTORS_PER_REG		32
 
@@ -792,13 +789,13 @@ static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
 }
 
 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
-			   int short_hand, unsigned int dest, int dest_mode)
+			   int shorthand, unsigned int dest, int dest_mode)
 {
 	struct kvm_lapic *target = vcpu->arch.apic;
 	u32 mda = kvm_apic_mda(vcpu, dest, source, target);
 
 	ASSERT(target);
-	switch (short_hand) {
+	switch (shorthand) {
 	case APIC_DEST_NOSHORT:
 		if (dest_mode == APIC_DEST_PHYSICAL)
 			return kvm_apic_match_physical_addr(target, mda);
@@ -967,12 +964,12 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
 }
 
 /*
- * This routine tries to handler interrupts in posted mode, here is how
+ * This routine tries to handle interrupts in posted mode, here is how
  * it deals with different cases:
  * - For single-destination interrupts, handle it in posted mode
  * - Else if vector hashing is enabled and it is a lowest-priority
  *   interrupt, handle it in posted mode and use the following mechanism
- *   to find the destinaiton vCPU.
+ *   to find the destination vCPU.
  *	1. For lowest-priority interrupts, store all the possible
  *	   destination vCPUs in an array.
  *	2. Use "guest vector % max number of destination vCPUs" to find
@@ -1151,7 +1148,7 @@ void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
 			if (!kvm_apic_present(vcpu))
 				continue;
 			if (!kvm_apic_match_dest(vcpu, NULL,
-						 irq->delivery_mode,
+						 irq->shorthand,
 						 irq->dest_id,
 						 irq->dest_mode))
 				continue;
@@ -1574,9 +1571,9 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
 	struct kvm_timer *ktimer = &apic->lapic_timer;
 
 	kvm_apic_local_deliver(apic, APIC_LVTT);
-	if (apic_lvtt_tscdeadline(apic))
+	if (apic_lvtt_tscdeadline(apic)) {
 		ktimer->tscdeadline = 0;
-	if (apic_lvtt_oneshot(apic)) {
+	} else if (apic_lvtt_oneshot(apic)) {
 		ktimer->tscdeadline = 0;
 		ktimer->target_expiration = 0;
 	}
@@ -1963,15 +1960,20 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 	case APIC_LVTTHMR:
 	case APIC_LVTPC:
 	case APIC_LVT1:
-	case APIC_LVTERR:
+	case APIC_LVTERR: {
 		/* TODO: Check vector */
+		size_t size;
+		u32 index;
+
 		if (!kvm_apic_sw_enabled(apic))
 			val |= APIC_LVT_MASKED;
-
-		val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
+		size = ARRAY_SIZE(apic_lvt_mask);
+		index = array_index_nospec(
+				(reg - APIC_LVTT) >> 4, size);
+		val &= apic_lvt_mask[index];
 		kvm_lapic_set_reg(apic, reg, val);
-
 		break;
+	}
 
 	case APIC_LVTT:
 		if (!kvm_apic_sw_enabled(apic))
@@ -2185,6 +2187,21 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 		pr_warn_once("APIC base relocation is unsupported by KVM");
 }
 
+void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
+{
+	struct kvm_lapic *apic = vcpu->arch.apic;
+
+	if (vcpu->arch.apicv_active) {
+		/* irr_pending is always true when apicv is activated. */
+		apic->irr_pending = true;
+		apic->isr_count = 1;
+	} else {
+		apic->irr_pending = (apic_search_irr(apic) != -1);
+		apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+	}
+}
+EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
+
 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
@@ -2227,8 +2244,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 		kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
 		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
 	}
-	apic->irr_pending = vcpu->arch.apicv_active;
-	apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
+	kvm_apic_update_apicv(vcpu);
 	apic->highest_isr_cache = -1;
 	update_divide_count(apic);
 	atomic_set(&apic->lapic_timer.pending, 0);
@@ -2373,14 +2389,13 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
 {
 	u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
-	int r = 0;
 
 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
-		r = 1;
+		return 1;
 	if ((lvt0 & APIC_LVT_MASKED) == 0 &&
 	    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
-		r = 1;
-	return r;
+		return 1;
+	return 0;
 }
 
 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
@@ -2486,9 +2501,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
 	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
 	update_divide_count(apic);
 	start_apic_timer(apic);
-	apic->irr_pending = true;
-	apic->isr_count = vcpu->arch.apicv_active ?
-				1 : count_vectors(apic->regs + APIC_ISR);
+	kvm_apic_update_apicv(vcpu);
 	apic->highest_isr_cache = -1;
 	if (vcpu->arch.apicv_active) {
 		kvm_x86_ops->apicv_post_state_restore(vcpu);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 39925af..ec6fbfe 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -10,8 +10,9 @@
 #define KVM_APIC_SIPI		1
 #define KVM_APIC_LVT_NUM	6
 
-#define KVM_APIC_SHORT_MASK	0xc0000
-#define KVM_APIC_DEST_MASK	0x800
+#define APIC_SHORT_MASK			0xc0000
+#define APIC_DEST_NOSHORT		0x0
+#define APIC_DEST_MASK			0x800
 
 #define APIC_BUS_CYCLE_NS       1
 #define APIC_BUS_FREQUENCY      (1000000000ULL / APIC_BUS_CYCLE_NS)
@@ -82,14 +83,15 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val);
 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
 		       void *data);
 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
-			   int short_hand, unsigned int dest, int dest_mode);
-
+			   int shorthand, unsigned int dest, int dest_mode);
+int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr);
 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr);
 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 		     struct dest_map *dest_map);
 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
+void kvm_apic_update_apicv(struct kvm_vcpu *vcpu);
 
 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
 		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a32b847..7011a4e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -418,22 +418,24 @@ static inline bool is_access_track_spte(u64 spte)
  * requires a full MMU zap).  The flag is instead explicitly queried when
  * checking for MMIO spte cache hits.
  */
-#define MMIO_SPTE_GEN_MASK		GENMASK_ULL(18, 0)
+#define MMIO_SPTE_GEN_MASK		GENMASK_ULL(17, 0)
 
 #define MMIO_SPTE_GEN_LOW_START		3
 #define MMIO_SPTE_GEN_LOW_END		11
 #define MMIO_SPTE_GEN_LOW_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
 						    MMIO_SPTE_GEN_LOW_START)
 
-#define MMIO_SPTE_GEN_HIGH_START	52
-#define MMIO_SPTE_GEN_HIGH_END		61
+#define MMIO_SPTE_GEN_HIGH_START	PT64_SECOND_AVAIL_BITS_SHIFT
+#define MMIO_SPTE_GEN_HIGH_END		62
 #define MMIO_SPTE_GEN_HIGH_MASK		GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
 						    MMIO_SPTE_GEN_HIGH_START)
+
 static u64 generation_mmio_spte_mask(u64 gen)
 {
 	u64 mask;
 
 	WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
+	BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
 
 	mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
 	mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
@@ -444,16 +446,14 @@ static u64 get_mmio_spte_generation(u64 spte)
 {
 	u64 gen;
 
-	spte &= ~shadow_mmio_mask;
-
 	gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
 	gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
 	return gen;
 }
 
-static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
-			   unsigned access)
+static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
 {
+
 	u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
 	u64 mask = generation_mmio_spte_mask(gen);
 	u64 gpa = gfn << PAGE_SHIFT;
@@ -464,6 +464,17 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
 	mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
 		<< shadow_nonpresent_or_rsvd_mask_len;
 
+	return mask;
+}
+
+static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
+			   unsigned int access)
+{
+	u64 mask = make_mmio_spte(vcpu, gfn, access);
+	unsigned int gen = get_mmio_spte_generation(mask);
+
+	access = mask & ACC_ALL;
+
 	trace_mark_mmio_spte(sptep, gfn, access, gen);
 	mmu_spte_set(sptep, mask);
 }
@@ -484,7 +495,7 @@ static unsigned get_mmio_spte_access(u64 spte)
 }
 
 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
-			  kvm_pfn_t pfn, unsigned access)
+			  kvm_pfn_t pfn, unsigned int access)
 {
 	if (unlikely(is_noslot_pfn(pfn))) {
 		mark_mmio_spte(vcpu, sptep, gfn, access);
@@ -538,16 +549,20 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 static u8 kvm_get_shadow_phys_bits(void)
 {
 	/*
-	 * boot_cpu_data.x86_phys_bits is reduced when MKTME is detected
-	 * in CPU detection code, but MKTME treats those reduced bits as
-	 * 'keyID' thus they are not reserved bits. Therefore for MKTME
-	 * we should still return physical address bits reported by CPUID.
+	 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
+	 * in CPU detection code, but the processor treats those reduced bits as
+	 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
+	 * the physical address bits reported by CPUID.
 	 */
-	if (!boot_cpu_has(X86_FEATURE_TME) ||
-	    WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008))
-		return boot_cpu_data.x86_phys_bits;
+	if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
+		return cpuid_eax(0x80000008) & 0xff;
 
-	return cpuid_eax(0x80000008) & 0xff;
+	/*
+	 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
+	 * custom CPUID.  Proceed with whatever the kernel found since these features
+	 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
+	 */
+	return boot_cpu_data.x86_phys_bits;
 }
 
 static void kvm_mmu_reset_all_pte_masks(void)
@@ -1260,56 +1275,6 @@ static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 	list_del(&sp->lpage_disallowed_link);
 }
 
-static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
-					  struct kvm_memory_slot *slot)
-{
-	struct kvm_lpage_info *linfo;
-
-	if (slot) {
-		linfo = lpage_info_slot(gfn, slot, level);
-		return !!linfo->disallow_lpage;
-	}
-
-	return true;
-}
-
-static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
-					int level)
-{
-	struct kvm_memory_slot *slot;
-
-	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-	return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
-}
-
-static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
-{
-	unsigned long page_size;
-	int i, ret = 0;
-
-	page_size = kvm_host_page_size(kvm, gfn);
-
-	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
-		if (page_size >= KVM_HPAGE_SIZE(i))
-			ret = i;
-		else
-			break;
-	}
-
-	return ret;
-}
-
-static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
-					  bool no_dirty_log)
-{
-	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
-		return false;
-	if (no_dirty_log && slot->dirty_bitmap)
-		return false;
-
-	return true;
-}
-
 static struct kvm_memory_slot *
 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
 			    bool no_dirty_log)
@@ -1317,40 +1282,14 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
 	struct kvm_memory_slot *slot;
 
 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-	if (!memslot_valid_for_gpte(slot, no_dirty_log))
-		slot = NULL;
+	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
+		return NULL;
+	if (no_dirty_log && slot->dirty_bitmap)
+		return NULL;
 
 	return slot;
 }
 
-static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
-			 bool *force_pt_level)
-{
-	int host_level, level, max_level;
-	struct kvm_memory_slot *slot;
-
-	if (unlikely(*force_pt_level))
-		return PT_PAGE_TABLE_LEVEL;
-
-	slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
-	*force_pt_level = !memslot_valid_for_gpte(slot, true);
-	if (unlikely(*force_pt_level))
-		return PT_PAGE_TABLE_LEVEL;
-
-	host_level = host_mapping_level(vcpu->kvm, large_gfn);
-
-	if (host_level == PT_PAGE_TABLE_LEVEL)
-		return host_level;
-
-	max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
-
-	for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
-		if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
-			break;
-
-	return level - 1;
-}
-
 /*
  * About rmap_head encoding:
  *
@@ -1410,7 +1349,7 @@ pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
 	if (j != 0)
 		return;
 	if (!prev_desc && !desc->more)
-		rmap_head->val = (unsigned long)desc->sptes[0];
+		rmap_head->val = 0;
 	else
 		if (prev_desc)
 			prev_desc->more = desc->more;
@@ -1525,7 +1464,7 @@ struct rmap_iterator {
 /*
  * Iteration must be started by this function.  This should also be used after
  * removing/dropping sptes from the rmap link because in such cases the
- * information in the itererator may not be valid.
+ * information in the iterator may not be valid.
  *
  * Returns sptep if found, NULL otherwise.
  */
@@ -2547,7 +2486,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 					     gva_t gaddr,
 					     unsigned level,
 					     int direct,
-					     unsigned access)
+					     unsigned int access)
 {
 	union kvm_mmu_page_role role;
 	unsigned quadrant;
@@ -2899,6 +2838,26 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
 	return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
 }
 
+static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
+{
+	LIST_HEAD(invalid_list);
+
+	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
+		return 0;
+
+	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
+		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
+			break;
+
+		++vcpu->kvm->stat.mmu_recycled;
+	}
+	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+
+	if (!kvm_mmu_available_pages(vcpu->kvm))
+		return -ENOSPC;
+	return 0;
+}
+
 /*
  * Changing the number of mmu pages allocated to the vm
  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
@@ -3042,7 +3001,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
 #define SET_SPTE_NEED_REMOTE_TLB_FLUSH	BIT(1)
 
 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
-		    unsigned pte_access, int level,
+		    unsigned int pte_access, int level,
 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
 		    bool can_unsync, bool host_writable)
 {
@@ -3099,17 +3058,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 	spte |= (u64)pfn << PAGE_SHIFT;
 
 	if (pte_access & ACC_WRITE_MASK) {
-
-		/*
-		 * Other vcpu creates new sp in the window between
-		 * mapping_level() and acquiring mmu-lock. We can
-		 * allow guest to retry the access, the mapping can
-		 * be fixed if guest refault.
-		 */
-		if (level > PT_PAGE_TABLE_LEVEL &&
-		    mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
-			goto done;
-
 		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
 
 		/*
@@ -3141,13 +3089,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 set_pte:
 	if (mmu_spte_update(sptep, spte))
 		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
-done:
 	return ret;
 }
 
-static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
-			int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
-		       	bool speculative, bool host_writable)
+static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
+			unsigned int pte_access, int write_fault, int level,
+			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
+			bool host_writable)
 {
 	int was_rmapped = 0;
 	int rmap_count;
@@ -3229,7 +3177,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
 {
 	struct page *pages[PTE_PREFETCH_NUM];
 	struct kvm_memory_slot *slot;
-	unsigned access = sp->role.access;
+	unsigned int access = sp->role.access;
 	int i, ret;
 	gfn_t gfn;
 
@@ -3294,6 +3242,83 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
 	__direct_pte_prefetch(vcpu, sp, sptep);
 }
 
+static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
+				  kvm_pfn_t pfn, struct kvm_memory_slot *slot)
+{
+	unsigned long hva;
+	pte_t *pte;
+	int level;
+
+	BUILD_BUG_ON(PT_PAGE_TABLE_LEVEL != (int)PG_LEVEL_4K ||
+		     PT_DIRECTORY_LEVEL != (int)PG_LEVEL_2M ||
+		     PT_PDPE_LEVEL != (int)PG_LEVEL_1G);
+
+	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
+		return PT_PAGE_TABLE_LEVEL;
+
+	/*
+	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
+	 * is not solely for performance, it's also necessary to avoid the
+	 * "writable" check in __gfn_to_hva_many(), which will always fail on
+	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
+	 * page fault steps have already verified the guest isn't writing a
+	 * read-only memslot.
+	 */
+	hva = __gfn_to_hva_memslot(slot, gfn);
+
+	pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level);
+	if (unlikely(!pte))
+		return PT_PAGE_TABLE_LEVEL;
+
+	return level;
+}
+
+static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
+				   int max_level, kvm_pfn_t *pfnp)
+{
+	struct kvm_memory_slot *slot;
+	struct kvm_lpage_info *linfo;
+	kvm_pfn_t pfn = *pfnp;
+	kvm_pfn_t mask;
+	int level;
+
+	if (unlikely(max_level == PT_PAGE_TABLE_LEVEL))
+		return PT_PAGE_TABLE_LEVEL;
+
+	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
+		return PT_PAGE_TABLE_LEVEL;
+
+	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
+	if (!slot)
+		return PT_PAGE_TABLE_LEVEL;
+
+	max_level = min(max_level, kvm_x86_ops->get_lpage_level());
+	for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
+		linfo = lpage_info_slot(gfn, slot, max_level);
+		if (!linfo->disallow_lpage)
+			break;
+	}
+
+	if (max_level == PT_PAGE_TABLE_LEVEL)
+		return PT_PAGE_TABLE_LEVEL;
+
+	level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
+	if (level == PT_PAGE_TABLE_LEVEL)
+		return level;
+
+	level = min(level, max_level);
+
+	/*
+	 * mmu_notifier_retry() was successful and mmu_lock is held, so
+	 * the pmd can't be split from under us.
+	 */
+	mask = KVM_PAGES_PER_HPAGE(level) - 1;
+	VM_BUG_ON((gfn & mask) != (pfn & mask));
+	*pfnp = pfn & ~mask;
+
+	return level;
+}
+
 static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
 				       gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
 {
@@ -3318,18 +3343,20 @@ static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
 }
 
 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
-			int map_writable, int level, kvm_pfn_t pfn,
-			bool prefault, bool lpage_disallowed)
+			int map_writable, int max_level, kvm_pfn_t pfn,
+			bool prefault, bool account_disallowed_nx_lpage)
 {
 	struct kvm_shadow_walk_iterator it;
 	struct kvm_mmu_page *sp;
-	int ret;
+	int level, ret;
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	gfn_t base_gfn = gfn;
 
-	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
+	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
 		return RET_PF_RETRY;
 
+	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn);
+
 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
 	for_each_shadow_entry(vcpu, gpa, it) {
 		/*
@@ -3348,7 +3375,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
 					      it.level - 1, true, ACC_ALL);
 
 			link_shadow_page(vcpu, it.sptep, sp);
-			if (lpage_disallowed)
+			if (account_disallowed_nx_lpage)
 				account_huge_nx_page(vcpu->kvm, sp);
 		}
 	}
@@ -3384,47 +3411,9 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
 	return -EFAULT;
 }
 
-static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
-					gfn_t gfn, kvm_pfn_t *pfnp,
-					int *levelp)
-{
-	kvm_pfn_t pfn = *pfnp;
-	int level = *levelp;
-
-	/*
-	 * Check if it's a transparent hugepage. If this would be an
-	 * hugetlbfs page, level wouldn't be set to
-	 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
-	 * here.
-	 */
-	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
-	    !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
-	    PageTransCompoundMap(pfn_to_page(pfn)) &&
-	    !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
-		unsigned long mask;
-		/*
-		 * mmu_notifier_retry was successful and we hold the
-		 * mmu_lock here, so the pmd can't become splitting
-		 * from under us, and in turn
-		 * __split_huge_page_refcount() can't run from under
-		 * us and we can safely transfer the refcount from
-		 * PG_tail to PG_head as we switch the pfn to tail to
-		 * head.
-		 */
-		*levelp = level = PT_DIRECTORY_LEVEL;
-		mask = KVM_PAGES_PER_HPAGE(level) - 1;
-		VM_BUG_ON((gfn & mask) != (pfn & mask));
-		if (pfn & mask) {
-			kvm_release_pfn_clean(pfn);
-			pfn &= ~mask;
-			kvm_get_pfn(pfn);
-			*pfnp = pfn;
-		}
-	}
-}
-
 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
-				kvm_pfn_t pfn, unsigned access, int *ret_val)
+				kvm_pfn_t pfn, unsigned int access,
+				int *ret_val)
 {
 	/* The pfn is invalid, report the error! */
 	if (unlikely(is_error_pfn(pfn))) {
@@ -3528,7 +3517,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte)
  * - true: let the vcpu to access on the same address again.
  * - false: let the real page fault path to fix it.
  */
-static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
+static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 			    u32 error_code)
 {
 	struct kvm_shadow_walk_iterator iterator;
@@ -3537,9 +3526,6 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
 	u64 spte = 0ull;
 	uint retry_count = 0;
 
-	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
-		return false;
-
 	if (!page_fault_can_be_fast(error_code))
 		return false;
 
@@ -3548,9 +3534,8 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
 	do {
 		u64 new_spte;
 
-		for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
-			if (!is_shadow_present_pte(spte) ||
-			    iterator.level < level)
+		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
+			if (!is_shadow_present_pte(spte))
 				break;
 
 		sp = page_header(__pa(iterator.sptep));
@@ -3626,71 +3611,13 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
 
 	} while (true);
 
-	trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
+	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
 			      spte, fault_handled);
 	walk_shadow_page_lockless_end(vcpu);
 
 	return fault_handled;
 }
 
-static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
-			 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
-static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
-
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
-			 gfn_t gfn, bool prefault)
-{
-	int r;
-	int level;
-	bool force_pt_level;
-	kvm_pfn_t pfn;
-	unsigned long mmu_seq;
-	bool map_writable, write = error_code & PFERR_WRITE_MASK;
-	bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
-				is_nx_huge_page_enabled();
-
-	force_pt_level = lpage_disallowed;
-	level = mapping_level(vcpu, gfn, &force_pt_level);
-	if (likely(!force_pt_level)) {
-		/*
-		 * This path builds a PAE pagetable - so we can map
-		 * 2mb pages at maximum. Therefore check if the level
-		 * is larger than that.
-		 */
-		if (level > PT_DIRECTORY_LEVEL)
-			level = PT_DIRECTORY_LEVEL;
-
-		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
-	}
-
-	if (fast_page_fault(vcpu, v, level, error_code))
-		return RET_PF_RETRY;
-
-	mmu_seq = vcpu->kvm->mmu_notifier_seq;
-	smp_rmb();
-
-	if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
-		return RET_PF_RETRY;
-
-	if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
-		return r;
-
-	r = RET_PF_RETRY;
-	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
-		goto out_unlock;
-	if (make_mmu_pages_available(vcpu) < 0)
-		goto out_unlock;
-	if (likely(!force_pt_level))
-		transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
-	r = __direct_map(vcpu, v, write, map_writable, level, pfn,
-			 prefault, false);
-out_unlock:
-	spin_unlock(&vcpu->kvm->mmu_lock);
-	kvm_release_pfn_clean(pfn);
-	return r;
-}
-
 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
 			       struct list_head *invalid_list)
 {
@@ -3981,7 +3908,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
 
-static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
+static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
 				  u32 access, struct x86_exception *exception)
 {
 	if (exception)
@@ -3989,7 +3916,7 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
 	return vaddr;
 }
 
-static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
+static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
 					 u32 access,
 					 struct x86_exception *exception)
 {
@@ -4001,20 +3928,14 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
 static bool
 __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
 {
-	int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;
+	int bit7 = (pte >> 7) & 1;
 
-	return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
-		((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
+	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
 }
 
-static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
+static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
 {
-	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
-}
-
-static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
-{
-	return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
+	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
 }
 
 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
@@ -4038,11 +3959,11 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 {
 	struct kvm_shadow_walk_iterator iterator;
 	u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
+	struct rsvd_bits_validate *rsvd_check;
 	int root, leaf;
 	bool reserved = false;
 
-	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
-		goto exit;
+	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
 
 	walk_shadow_page_lockless_begin(vcpu);
 
@@ -4058,8 +3979,13 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 		if (!is_shadow_present_pte(spte))
 			break;
 
-		reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
-						    iterator.level);
+		/*
+		 * Use a bitwise-OR instead of a logical-OR to aggregate the
+		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
+		 * adding a Jcc in the loop.
+		 */
+		reserved |= __is_bad_mt_xwr(rsvd_check, spte) |
+			    __is_rsvd_bits_set(rsvd_check, spte, iterator.level);
 	}
 
 	walk_shadow_page_lockless_end(vcpu);
@@ -4073,7 +3999,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 			root--;
 		}
 	}
-exit:
+
 	*sptep = spte;
 	return reserved;
 }
@@ -4092,7 +4018,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 
 	if (is_mmio_spte(spte)) {
 		gfn_t gfn = get_mmio_spte_gfn(spte);
-		unsigned access = get_mmio_spte_access(spte);
+		unsigned int access = get_mmio_spte_access(spte);
 
 		if (!check_mmio_spte(vcpu, spte))
 			return RET_PF_INVALID;
@@ -4137,9 +4063,6 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
 	struct kvm_shadow_walk_iterator iterator;
 	u64 spte;
 
-	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
-		return;
-
 	walk_shadow_page_lockless_begin(vcpu);
 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
 		clear_sp_write_flooding_count(iterator.sptep);
@@ -4149,29 +4072,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
 	walk_shadow_page_lockless_end(vcpu);
 }
 
-static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-				u32 error_code, bool prefault)
-{
-	gfn_t gfn = gva >> PAGE_SHIFT;
-	int r;
-
-	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
-
-	if (page_fault_handle_page_track(vcpu, error_code, gfn))
-		return RET_PF_EMULATE;
-
-	r = mmu_topup_memory_caches(vcpu);
-	if (r)
-		return r;
-
-	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
-
-
-	return nonpaging_map(vcpu, gva & PAGE_MASK,
-			     error_code, gfn, prefault);
-}
-
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
+static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+				   gfn_t gfn)
 {
 	struct kvm_arch_async_pf arch;
 
@@ -4180,11 +4082,13 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
 	arch.direct_map = vcpu->arch.mmu->direct_map;
 	arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
 
-	return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
+				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
-			 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
+			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
+			 bool *writable)
 {
 	struct kvm_memory_slot *slot;
 	bool async;
@@ -4204,12 +4108,12 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
 		return false; /* *pfn has correct page already */
 
 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
-		trace_kvm_try_async_get_page(gva, gfn);
+		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
-			trace_kvm_async_pf_doublefault(gva, gfn);
+			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
 			return true;
-		} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
+		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
 			return true;
 	}
 
@@ -4217,11 +4121,77 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
 	return false;
 }
 
+static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+			     bool prefault, int max_level, bool is_tdp)
+{
+	bool write = error_code & PFERR_WRITE_MASK;
+	bool exec = error_code & PFERR_FETCH_MASK;
+	bool lpage_disallowed = exec && is_nx_huge_page_enabled();
+	bool map_writable;
+
+	gfn_t gfn = gpa >> PAGE_SHIFT;
+	unsigned long mmu_seq;
+	kvm_pfn_t pfn;
+	int r;
+
+	if (page_fault_handle_page_track(vcpu, error_code, gfn))
+		return RET_PF_EMULATE;
+
+	r = mmu_topup_memory_caches(vcpu);
+	if (r)
+		return r;
+
+	if (lpage_disallowed)
+		max_level = PT_PAGE_TABLE_LEVEL;
+
+	if (fast_page_fault(vcpu, gpa, error_code))
+		return RET_PF_RETRY;
+
+	mmu_seq = vcpu->kvm->mmu_notifier_seq;
+	smp_rmb();
+
+	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+		return RET_PF_RETRY;
+
+	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
+		return r;
+
+	r = RET_PF_RETRY;
+	spin_lock(&vcpu->kvm->mmu_lock);
+	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+		goto out_unlock;
+	if (make_mmu_pages_available(vcpu) < 0)
+		goto out_unlock;
+	r = __direct_map(vcpu, gpa, write, map_writable, max_level, pfn,
+			 prefault, is_tdp && lpage_disallowed);
+
+out_unlock:
+	spin_unlock(&vcpu->kvm->mmu_lock);
+	kvm_release_pfn_clean(pfn);
+	return r;
+}
+
+static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
+				u32 error_code, bool prefault)
+{
+	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
+
+	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
+	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
+				 PT_DIRECTORY_LEVEL, false);
+}
+
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 				u64 fault_address, char *insn, int insn_len)
 {
 	int r = 1;
 
+#ifndef CONFIG_X86_64
+	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
+	if (WARN_ON_ONCE(fault_address >> 32))
+		return -EFAULT;
+#endif
+
 	vcpu->arch.l1tf_flush_l1d = true;
 	switch (vcpu->arch.apf.host_apf_reason) {
 	default:
@@ -4249,76 +4219,23 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 }
 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
 
-static bool
-check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
-{
-	int page_num = KVM_PAGES_PER_HPAGE(level);
-
-	gfn &= ~(page_num - 1);
-
-	return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
-}
-
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 			  bool prefault)
 {
-	kvm_pfn_t pfn;
-	int r;
-	int level;
-	bool force_pt_level;
-	gfn_t gfn = gpa >> PAGE_SHIFT;
-	unsigned long mmu_seq;
-	int write = error_code & PFERR_WRITE_MASK;
-	bool map_writable;
-	bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
-				is_nx_huge_page_enabled();
+	int max_level;
 
-	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
+	for (max_level = PT_MAX_HUGEPAGE_LEVEL;
+	     max_level > PT_PAGE_TABLE_LEVEL;
+	     max_level--) {
+		int page_num = KVM_PAGES_PER_HPAGE(max_level);
+		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
 
-	if (page_fault_handle_page_track(vcpu, error_code, gfn))
-		return RET_PF_EMULATE;
-
-	r = mmu_topup_memory_caches(vcpu);
-	if (r)
-		return r;
-
-	force_pt_level =
-		lpage_disallowed ||
-		!check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL);
-	level = mapping_level(vcpu, gfn, &force_pt_level);
-	if (likely(!force_pt_level)) {
-		if (level > PT_DIRECTORY_LEVEL &&
-		    !check_hugepage_cache_consistency(vcpu, gfn, level))
-			level = PT_DIRECTORY_LEVEL;
-		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
+			break;
 	}
 
-	if (fast_page_fault(vcpu, gpa, level, error_code))
-		return RET_PF_RETRY;
-
-	mmu_seq = vcpu->kvm->mmu_notifier_seq;
-	smp_rmb();
-
-	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
-		return RET_PF_RETRY;
-
-	if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
-		return r;
-
-	r = RET_PF_RETRY;
-	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
-		goto out_unlock;
-	if (make_mmu_pages_available(vcpu) < 0)
-		goto out_unlock;
-	if (likely(!force_pt_level))
-		transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
-	r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
-			 prefault, lpage_disallowed);
-out_unlock:
-	spin_unlock(&vcpu->kvm->mmu_lock);
-	kvm_release_pfn_clean(pfn);
-	return r;
+	return direct_page_fault(vcpu, gpa, error_code, prefault,
+				 max_level, true);
 }
 
 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
@@ -4445,7 +4362,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
 }
 
 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
-			   unsigned access, int *nr_present)
+			   unsigned int access, int *nr_present)
 {
 	if (unlikely(is_mmio_spte(*sptep))) {
 		if (gfn != get_mmio_spte_gfn(*sptep)) {
@@ -5496,47 +5413,30 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
 
-static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
-{
-	LIST_HEAD(invalid_list);
-
-	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
-		return 0;
-
-	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
-		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
-			break;
-
-		++vcpu->kvm->stat.mmu_recycled;
-	}
-	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-
-	if (!kvm_mmu_available_pages(vcpu->kvm))
-		return -ENOSPC;
-	return 0;
-}
-
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
 		       void *insn, int insn_len)
 {
 	int r, emulation_type = 0;
 	bool direct = vcpu->arch.mmu->direct_map;
 
+	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
+		return RET_PF_RETRY;
+
 	/* With shadow page tables, fault_address contains a GVA or nGPA.  */
 	if (vcpu->arch.mmu->direct_map) {
 		vcpu->arch.gpa_available = true;
-		vcpu->arch.gpa_val = cr2;
+		vcpu->arch.gpa_val = cr2_or_gpa;
 	}
 
 	r = RET_PF_INVALID;
 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
-		r = handle_mmio_page_fault(vcpu, cr2, direct);
+		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
 		if (r == RET_PF_EMULATE)
 			goto emulate;
 	}
 
 	if (r == RET_PF_INVALID) {
-		r = vcpu->arch.mmu->page_fault(vcpu, cr2,
+		r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa,
 					       lower_32_bits(error_code),
 					       false);
 		WARN_ON(r == RET_PF_INVALID);
@@ -5556,7 +5456,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
 	 */
 	if (vcpu->arch.mmu->direct_map &&
 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
-		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
 		return 1;
 	}
 
@@ -5571,7 +5471,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
 	 */
-	if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
+	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
 		emulation_type = EMULTYPE_ALLOW_RETRY;
 emulate:
 	/*
@@ -5586,7 +5486,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
 			return 1;
 	}
 
-	return x86_emulate_instruction(vcpu, cr2, emulation_type, insn,
+	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
 				       insn_len);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
@@ -6015,8 +5915,8 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
 		 * mapping if the indirect sp has level = 1.
 		 */
 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
-		    !kvm_is_zone_device_pfn(pfn) &&
-		    PageTransCompoundMap(pfn_to_page(pfn))) {
+		    (kvm_is_zone_device_pfn(pfn) ||
+		     PageCompound(pfn_to_page(pfn)))) {
 			pte_list_remove(rmap_head, sptep);
 
 			if (kvm_available_flush_tlb_with_range())
@@ -6249,7 +6149,7 @@ static void kvm_set_mmio_spte_mask(void)
 	 * If reserved bit is not supported, clear the present bit to disable
 	 * mmio page fault.
 	 */
-	if (IS_ENABLED(CONFIG_X86_64) && shadow_phys_bits == 52)
+	if (shadow_phys_bits == 52)
 		mask &= ~1ull;
 
 	kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 97b21e7..4e1ef04 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -128,6 +128,21 @@ static inline int FNAME(is_present_gpte)(unsigned long pte)
 #endif
 }
 
+static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
+{
+#if PTTYPE != PTTYPE_EPT
+	return false;
+#else
+	return __is_bad_mt_xwr(rsvd_check, gpte);
+#endif
+}
+
+static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
+{
+	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
+	       FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
+}
+
 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			       pt_element_t __user *ptep_user, unsigned index,
 			       pt_element_t orig_pte, pt_element_t new_pte)
@@ -175,9 +190,6 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
 				  struct kvm_mmu_page *sp, u64 *spte,
 				  u64 gpte)
 {
-	if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
-		goto no_present;
-
 	if (!FNAME(is_present_gpte)(gpte))
 		goto no_present;
 
@@ -186,6 +198,9 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
 	    !(gpte & PT_GUEST_ACCESSED_MASK))
 		goto no_present;
 
+	if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+		goto no_present;
+
 	return false;
 
 no_present:
@@ -291,11 +306,11 @@ static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
 }
 
 /*
- * Fetch a guest pte for a guest virtual address
+ * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
  */
 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-				    gva_t addr, u32 access)
+				    gpa_t addr, u32 access)
 {
 	int ret;
 	pt_element_t pte;
@@ -400,7 +415,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 		if (unlikely(!FNAME(is_present_gpte)(pte)))
 			goto error;
 
-		if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
+		if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
 			errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
 			goto error;
 		}
@@ -496,7 +511,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 }
 
 static int FNAME(walk_addr)(struct guest_walker *walker,
-			    struct kvm_vcpu *vcpu, gva_t addr, u32 access)
+			    struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
 {
 	return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
 					access);
@@ -611,17 +626,17 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
  * If the guest tries to write a write-protected page, we need to
  * emulate this operation, return 1 to indicate this case.
  */
-static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
 			 struct guest_walker *gw,
-			 int write_fault, int hlevel,
+			 int write_fault, int max_level,
 			 kvm_pfn_t pfn, bool map_writable, bool prefault,
 			 bool lpage_disallowed)
 {
 	struct kvm_mmu_page *sp = NULL;
 	struct kvm_shadow_walk_iterator it;
 	unsigned direct_access, access = gw->pt_access;
-	int top_level, ret;
-	gfn_t gfn, base_gfn;
+	int top_level, hlevel, ret;
+	gfn_t base_gfn = gw->gfn;
 
 	direct_access = gw->pte_access;
 
@@ -637,7 +652,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 	if (FNAME(gpte_changed)(vcpu, gw, top_level))
 		goto out_gpte_changed;
 
-	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
+	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
 		goto out_gpte_changed;
 
 	for (shadow_walk_init(&it, vcpu, addr);
@@ -666,12 +681,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 			link_shadow_page(vcpu, it.sptep, sp);
 	}
 
-	/*
-	 * FNAME(page_fault) might have clobbered the bottom bits of
-	 * gw->gfn, restore them from the virtual address.
-	 */
-	gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
-	base_gfn = gfn;
+	hlevel = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn);
 
 	trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
 
@@ -682,9 +692,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 		 * We cannot overwrite existing page tables with an NX
 		 * large page, as the leaf could be executable.
 		 */
-		disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel);
+		disallowed_hugepage_adjust(it, gw->gfn, &pfn, &hlevel);
 
-		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+		base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
 		if (it.level == hlevel)
 			break;
 
@@ -765,7 +775,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  *           a negative value on error.
  */
-static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
+static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
 			     bool prefault)
 {
 	int write_fault = error_code & PFERR_WRITE_MASK;
@@ -773,12 +783,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	struct guest_walker walker;
 	int r;
 	kvm_pfn_t pfn;
-	int level = PT_PAGE_TABLE_LEVEL;
 	unsigned long mmu_seq;
 	bool map_writable, is_self_change_mapping;
 	bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
 				is_nx_huge_page_enabled();
-	bool force_pt_level = lpage_disallowed;
+	int max_level;
 
 	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
@@ -818,14 +827,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
 	      &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
 
-	if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
-		level = mapping_level(vcpu, walker.gfn, &force_pt_level);
-		if (likely(!force_pt_level)) {
-			level = min(walker.level, level);
-			walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
-		}
-	} else
-		force_pt_level = true;
+	if (lpage_disallowed || is_self_change_mapping)
+		max_level = PT_PAGE_TABLE_LEVEL;
+	else
+		max_level = walker.level;
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
@@ -865,10 +870,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 	if (make_mmu_pages_available(vcpu) < 0)
 		goto out_unlock;
-	if (!force_pt_level)
-		transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
-	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
-			 level, pfn, map_writable, prefault, lpage_disallowed);
+	r = FNAME(fetch)(vcpu, addr, &walker, write_fault, max_level, pfn,
+			 map_writable, prefault, lpage_disallowed);
 	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 
 out_unlock:
@@ -945,18 +948,19 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 	spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
+/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
+static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
 			       struct x86_exception *exception)
 {
 	struct guest_walker walker;
 	gpa_t gpa = UNMAPPED_GVA;
 	int r;
 
-	r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
+	r = FNAME(walk_addr)(&walker, vcpu, addr, access);
 
 	if (r) {
 		gpa = gfn_to_gpa(walker.gfn);
-		gpa |= vaddr & ~PAGE_MASK;
+		gpa |= addr & ~PAGE_MASK;
 	} else if (exception)
 		*exception = walker.fault;
 
@@ -964,7 +968,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
 }
 
 #if PTTYPE != PTTYPE_EPT
-static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
+/* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
+static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
 				      u32 access,
 				      struct x86_exception *exception)
 {
@@ -972,6 +977,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
 	gpa_t gpa = UNMAPPED_GVA;
 	int r;
 
+#ifndef CONFIG_X86_64
+	/* A 64-bit GVA should be impossible on 32-bit KVM. */
+	WARN_ON_ONCE(vaddr >> 32);
+#endif
+
 	r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
 
 	if (r) {
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 7ca8831..3c6522b 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -249,13 +249,13 @@ TRACE_EVENT(
 
 TRACE_EVENT(
 	fast_page_fault,
-	TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+	TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
 		 u64 *sptep, u64 old_spte, bool retry),
-	TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
+	TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
 
 	TP_STRUCT__entry(
 		__field(int, vcpu_id)
-		__field(gva_t, gva)
+		__field(gpa_t, cr2_or_gpa)
 		__field(u32, error_code)
 		__field(u64 *, sptep)
 		__field(u64, old_spte)
@@ -265,7 +265,7 @@ TRACE_EVENT(
 
 	TP_fast_assign(
 		__entry->vcpu_id = vcpu->vcpu_id;
-		__entry->gva = gva;
+		__entry->cr2_or_gpa = cr2_or_gpa;
 		__entry->error_code = error_code;
 		__entry->sptep = sptep;
 		__entry->old_spte = old_spte;
@@ -273,9 +273,9 @@ TRACE_EVENT(
 		__entry->retry = retry;
 	),
 
-	TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
+	TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
 		  " new %llx spurious %d fixed %d", __entry->vcpu_id,
-		  __entry->gva, __print_flags(__entry->error_code, "|",
+		  __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
 		  kvm_mmu_trace_pferr_flags), __entry->sptep,
 		  __entry->old_spte, __entry->new_spte,
 		  __spte_satisfied(old_spte), __spte_satisfied(new_spte)
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 25ce3ed..7f0059a 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -192,11 +192,15 @@ static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
 		break;
 	case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
 		*seg = 1;
-		*unit = msr - MSR_MTRRfix16K_80000;
+		*unit = array_index_nospec(
+			msr - MSR_MTRRfix16K_80000,
+			MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
 		break;
 	case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
 		*seg = 2;
-		*unit = msr - MSR_MTRRfix4K_C0000;
+		*unit = array_index_nospec(
+			msr - MSR_MTRRfix4K_C0000,
+			MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
 		break;
 	default:
 		return false;
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 7ebb623..1333298 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -2,6 +2,8 @@
 #ifndef __KVM_X86_PMU_H
 #define __KVM_X86_PMU_H
 
+#include <linux/nospec.h>
+
 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
@@ -102,8 +104,12 @@ static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
 					 u32 base)
 {
-	if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
-		return &pmu->gp_counters[msr - base];
+	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
+		u32 index = array_index_nospec(msr - base,
+					       pmu->nr_arch_gp_counters);
+
+		return &pmu->gp_counters[index];
+	}
 
 	return NULL;
 }
@@ -113,8 +119,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
 {
 	int base = MSR_CORE_PERF_FIXED_CTR0;
 
-	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
-		return &pmu->fixed_counters[msr - base];
+	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
+		u32 index = array_index_nospec(msr - base,
+					       pmu->nr_arch_fixed_counters);
+
+		return &pmu->fixed_counters[index];
+	}
 
 	return NULL;
 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 122d4ce..a3e32d6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -387,6 +387,8 @@ static u8 rsm_ins_bytes[] = "\x0f\xaa";
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
+static void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
+static inline void avic_post_state_restore(struct kvm_vcpu *vcpu);
 
 static int nested_svm_exit_handled(struct vcpu_svm *svm);
 static int nested_svm_intercept(struct vcpu_svm *svm);
@@ -1307,6 +1309,47 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
 	}
 }
 
+/*
+ * The default MMIO mask is a single bit (excluding the present bit),
+ * which could conflict with the memory encryption bit. Check for
+ * memory encryption support and override the default MMIO mask if
+ * memory encryption is enabled.
+ */
+static __init void svm_adjust_mmio_mask(void)
+{
+	unsigned int enc_bit, mask_bit;
+	u64 msr, mask;
+
+	/* If there is no memory encryption support, use existing mask */
+	if (cpuid_eax(0x80000000) < 0x8000001f)
+		return;
+
+	/* If memory encryption is not enabled, use existing mask */
+	rdmsrl(MSR_K8_SYSCFG, msr);
+	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+		return;
+
+	enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
+	mask_bit = boot_cpu_data.x86_phys_bits;
+
+	/* Increment the mask bit if it is the same as the encryption bit */
+	if (enc_bit == mask_bit)
+		mask_bit++;
+
+	/*
+	 * If the mask bit location is below 52, then some bits above the
+	 * physical addressing limit will always be reserved, so use the
+	 * rsvd_bits() function to generate the mask. This mask, along with
+	 * the present bit, will be used to generate a page fault with
+	 * PFER.RSV = 1.
+	 *
+	 * If the mask bit location is 52 (or above), then clear the mask.
+	 */
+	mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
+
+	kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
+}
+
 static __init int svm_hardware_setup(void)
 {
 	int cpu;
@@ -1361,6 +1404,8 @@ static __init int svm_hardware_setup(void)
 		}
 	}
 
+	svm_adjust_mmio_mask();
+
 	for_each_possible_cpu(cpu) {
 		r = svm_cpu_init(cpu);
 		if (r)
@@ -1502,7 +1547,10 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
 	vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
 	vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
 	vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
-	vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+	if (kvm_apicv_activated(svm->vcpu.kvm))
+		vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+	else
+		vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
 }
 
 static void init_vmcb(struct vcpu_svm *svm)
@@ -1686,23 +1734,28 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
  * field of the VMCB. Therefore, we set up the
  * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
  */
-static int avic_init_access_page(struct kvm_vcpu *vcpu)
+static int avic_update_access_page(struct kvm *kvm, bool activate)
 {
-	struct kvm *kvm = vcpu->kvm;
 	int ret = 0;
 
 	mutex_lock(&kvm->slots_lock);
-	if (kvm->arch.apic_access_page_done)
+	/*
+	 * During kvm_destroy_vm(), kvm_pit_set_reinject() could trigger
+	 * APICv mode change, which update APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
+	 * memory region. So, we need to ensure that kvm->mm == current->mm.
+	 */
+	if ((kvm->arch.apic_access_page_done == activate) ||
+	    (kvm->mm != current->mm))
 		goto out;
 
 	ret = __x86_set_memory_region(kvm,
 				      APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
 				      APIC_DEFAULT_PHYS_BASE,
-				      PAGE_SIZE);
+				      activate ? PAGE_SIZE : 0);
 	if (ret)
 		goto out;
 
-	kvm->arch.apic_access_page_done = true;
+	kvm->arch.apic_access_page_done = activate;
 out:
 	mutex_unlock(&kvm->slots_lock);
 	return ret;
@@ -1710,21 +1763,24 @@ static int avic_init_access_page(struct kvm_vcpu *vcpu)
 
 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
 {
-	int ret;
 	u64 *entry, new_entry;
 	int id = vcpu->vcpu_id;
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	ret = avic_init_access_page(vcpu);
-	if (ret)
-		return ret;
-
 	if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
 		return -EINVAL;
 
 	if (!svm->vcpu.arch.apic->regs)
 		return -EINVAL;
 
+	if (kvm_apicv_activated(vcpu->kvm)) {
+		int ret;
+
+		ret = avic_update_access_page(vcpu->kvm, true);
+		if (ret)
+			return ret;
+	}
+
 	svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
 
 	/* Setting AVIC backing page address in the phy APIC ID table */
@@ -2009,6 +2065,18 @@ static int avic_vm_init(struct kvm *kvm)
 	return err;
 }
 
+static int svm_vm_init(struct kvm *kvm)
+{
+	if (avic) {
+		int ret = avic_vm_init(kvm);
+		if (ret)
+			return ret;
+	}
+
+	kvm_apicv_init(kvm, avic);
+	return 0;
+}
+
 static inline int
 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
 {
@@ -2144,7 +2212,7 @@ static int avic_init_vcpu(struct vcpu_svm *svm)
 	return ret;
 }
 
-static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
+static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm;
 	struct page *page;
@@ -2153,39 +2221,13 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	struct page *nested_msrpm_pages;
 	int err;
 
-	BUILD_BUG_ON_MSG(offsetof(struct vcpu_svm, vcpu) != 0,
-		"struct kvm_vcpu must be at offset 0 for arch usercopy region");
-
-	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
-	if (!svm) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
-						     GFP_KERNEL_ACCOUNT);
-	if (!svm->vcpu.arch.user_fpu) {
-		printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
-		err = -ENOMEM;
-		goto free_partial_svm;
-	}
-
-	svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
-						     GFP_KERNEL_ACCOUNT);
-	if (!svm->vcpu.arch.guest_fpu) {
-		printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
-		err = -ENOMEM;
-		goto free_user_fpu;
-	}
-
-	err = kvm_vcpu_init(&svm->vcpu, kvm, id);
-	if (err)
-		goto free_svm;
+	BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
+	svm = to_svm(vcpu);
 
 	err = -ENOMEM;
 	page = alloc_page(GFP_KERNEL_ACCOUNT);
 	if (!page)
-		goto uninit;
+		goto out;
 
 	msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
 	if (!msrpm_pages)
@@ -2206,7 +2248,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	/* We initialize this flag to true to make sure that the is_running
 	 * bit would be set the first time the vcpu is loaded.
 	 */
-	svm->avic_is_running = true;
+	if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
+		svm->avic_is_running = true;
 
 	svm->nested.hsave = page_address(hsave_page);
 
@@ -2222,9 +2265,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	svm->asid_generation = 0;
 	init_vmcb(svm);
 
-	svm_init_osvw(&svm->vcpu);
+	svm_init_osvw(vcpu);
 
-	return &svm->vcpu;
+	return 0;
 
 free_page4:
 	__free_page(hsave_page);
@@ -2234,16 +2277,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
 free_page1:
 	__free_page(page);
-uninit:
-	kvm_vcpu_uninit(&svm->vcpu);
-free_svm:
-	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
-free_user_fpu:
-	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
-free_partial_svm:
-	kmem_cache_free(kvm_vcpu_cache, svm);
 out:
-	return ERR_PTR(err);
+	return err;
 }
 
 static void svm_clear_current_vmcb(struct vmcb *vmcb)
@@ -2269,10 +2304,6 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 	__free_page(virt_to_page(svm->nested.hsave));
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
-	kvm_vcpu_uninit(vcpu);
-	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
-	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
-	kmem_cache_free(kvm_vcpu_cache, svm);
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -2343,6 +2374,8 @@ static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
 
 static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
 {
+	if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
+		kvm_vcpu_update_apicv(vcpu);
 	avic_set_running(vcpu, true);
 }
 
@@ -4192,6 +4225,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
 			return 1;
@@ -4277,16 +4312,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr->host_initiated &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
 			return 1;
 
-		/* The STIBP bit doesn't fault even if it's not advertised */
-		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+		if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
 			return 1;
 
 		svm->spec_ctrl = data;
-
 		if (!data)
 			break;
 
@@ -4310,13 +4345,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
 		if (data & ~PRED_CMD_IBPB)
 			return 1;
-
+		if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
+			return 1;
 		if (!data)
 			break;
 
 		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
-		if (is_guest_mode(vcpu))
-			break;
 		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
 		break;
 	case MSR_AMD64_VIRT_SPEC_CTRL:
@@ -4438,6 +4472,14 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
 {
 	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
 	svm_clear_vintr(svm);
+
+	/*
+	 * For AVIC, the only reason to end up here is ExtINTs.
+	 * In this case AVIC was temporarily disabled for
+	 * requesting the IRQ window and we have to re-enable it.
+	 */
+	svm_toggle_avic_for_irq_window(&svm->vcpu, true);
+
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
 	mark_dirty(svm->vmcb, VMCB_INTR);
 	++svm->vcpu.stat.irq_window_exits;
@@ -4519,9 +4561,9 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
 		 */
 		kvm_for_each_vcpu(i, vcpu, kvm) {
 			bool m = kvm_apic_match_dest(vcpu, apic,
-						     icrl & KVM_APIC_SHORT_MASK,
+						     icrl & APIC_SHORT_MASK,
 						     GET_APIC_DEST_FIELD(icrh),
-						     icrl & KVM_APIC_DEST_MASK);
+						     icrl & APIC_DEST_MASK);
 
 			if (m && !avic_vcpu_is_running(vcpu))
 				kvm_vcpu_wake_up(vcpu);
@@ -4935,7 +4977,8 @@ static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
 	*info2 = control->exit_info_2;
 }
 
-static int handle_exit(struct kvm_vcpu *vcpu)
+static int handle_exit(struct kvm_vcpu *vcpu,
+	enum exit_fastpath_completion exit_fastpath)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_run *kvm_run = vcpu->run;
@@ -4993,7 +5036,10 @@ static int handle_exit(struct kvm_vcpu *vcpu)
 		       __func__, svm->vmcb->control.exit_int_info,
 		       exit_code);
 
-	if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
+	if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
+		kvm_skip_emulated_instruction(vcpu);
+		return 1;
+	} else if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
 	    || !svm_exit_handlers[exit_code]) {
 		vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
 		dump_vmcb(vcpu);
@@ -5129,11 +5175,6 @@ static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
 	return;
 }
 
-static bool svm_get_enable_apicv(struct kvm *kvm)
-{
-	return avic && irqchip_split(kvm);
-}
-
 static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
 {
 }
@@ -5142,17 +5183,71 @@ static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
 {
 }
 
-/* Note: Currently only used by Hyper-V. */
+static void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
+{
+	if (!avic || !lapic_in_kernel(vcpu))
+		return;
+
+	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+	kvm_request_apicv_update(vcpu->kvm, activate,
+				 APICV_INHIBIT_REASON_IRQWIN);
+	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static int svm_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct amd_svm_iommu_ir *ir;
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	if (!kvm_arch_has_assigned_device(vcpu->kvm))
+		return 0;
+
+	/*
+	 * Here, we go through the per-vcpu ir_list to update all existing
+	 * interrupt remapping table entry targeting this vcpu.
+	 */
+	spin_lock_irqsave(&svm->ir_list_lock, flags);
+
+	if (list_empty(&svm->ir_list))
+		goto out;
+
+	list_for_each_entry(ir, &svm->ir_list, node) {
+		if (activate)
+			ret = amd_iommu_activate_guest_mode(ir->data);
+		else
+			ret = amd_iommu_deactivate_guest_mode(ir->data);
+		if (ret)
+			break;
+	}
+out:
+	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+	return ret;
+}
+
 static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *vmcb = svm->vmcb;
+	bool activated = kvm_vcpu_apicv_active(vcpu);
 
-	if (kvm_vcpu_apicv_active(vcpu))
+	if (activated) {
+		/**
+		 * During AVIC temporary deactivation, guest could update
+		 * APIC ID, DFR and LDR registers, which would not be trapped
+		 * by avic_unaccelerated_access_interception(). In this case,
+		 * we need to check and update the AVIC logical APIC ID table
+		 * accordingly before re-activating.
+		 */
+		avic_post_state_restore(vcpu);
 		vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
-	else
+	} else {
 		vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
+	}
 	mark_dirty(vmcb, VMCB_AVIC);
+
+	svm_set_pi_irte_mode(vcpu, activated);
 }
 
 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
@@ -5439,9 +5534,6 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (kvm_vcpu_apicv_active(vcpu))
-		return;
-
 	/*
 	 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
 	 * 1, because that's a separate STGI/VMRUN intercept.  The next time we
@@ -5451,6 +5543,13 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
 	 * window under the assumption that the hardware will set the GIF.
 	 */
 	if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
+		/*
+		 * IRQ window is not needed when AVIC is enabled,
+		 * unless we have pending ExtINT since it cannot be injected
+		 * via AVIC. In such case, we need to temporarily disable AVIC,
+		 * and fallback to injecting IRQ via V_IRQ.
+		 */
+		svm_toggle_avic_for_irq_window(vcpu, false);
 		svm_set_vintr(svm);
 		svm_inject_irq(svm, 0x0);
 	}
@@ -5913,6 +6012,7 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+				    boot_cpu_has(X86_FEATURE_XSAVE) &&
 				    boot_cpu_has(X86_FEATURE_XSAVES);
 
 	/* Update nrips enabled cache */
@@ -5922,16 +6022,24 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 		return;
 
 	guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
+
+	/*
+	 * Currently, AVIC does not work with nested virtualization.
+	 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
+	 */
+	if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+		kvm_request_apicv_update(vcpu->kvm, false,
+					 APICV_INHIBIT_REASON_NESTED);
 }
 
-#define F(x) bit(X86_FEATURE_##x)
+#define F feature_bit
 
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
 	switch (func) {
 	case 0x1:
 		if (avic)
-			entry->ecx &= ~bit(X86_FEATURE_X2APIC);
+			entry->ecx &= ~F(X2APIC);
 		break;
 	case 0x80000001:
 		if (nested)
@@ -6001,6 +6109,11 @@ static bool svm_has_wbinvd_exit(void)
 	return true;
 }
 
+static bool svm_pku_supported(void)
+{
+	return false;
+}
+
 #define PRE_EX(exit)  { .exit_code = (exit), \
 			.stage = X86_ICPT_PRE_EXCEPT, }
 #define POST_EX(exit) { .exit_code = (exit), \
@@ -6186,9 +6299,12 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
 	return ret;
 }
 
-static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
+	enum exit_fastpath_completion *exit_fastpath)
 {
-
+	if (!is_guest_mode(vcpu) &&
+		to_svm(vcpu)->vmcb->control.exit_code == EXIT_REASON_MSR_WRITE)
+		*exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
 }
 
 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
@@ -7242,6 +7358,22 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
 		   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
 }
 
+static bool svm_check_apicv_inhibit_reasons(ulong bit)
+{
+	ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+			  BIT(APICV_INHIBIT_REASON_HYPERV) |
+			  BIT(APICV_INHIBIT_REASON_NESTED) |
+			  BIT(APICV_INHIBIT_REASON_IRQWIN) |
+			  BIT(APICV_INHIBIT_REASON_PIT_REINJ);
+
+	return supported & BIT(bit);
+}
+
+static void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate)
+{
+	avic_update_access_page(kvm, activate);
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -7259,7 +7391,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
 	.vm_alloc = svm_vm_alloc,
 	.vm_free = svm_vm_free,
-	.vm_init = avic_vm_init,
+	.vm_init = svm_vm_init,
 	.vm_destroy = svm_vm_destroy,
 
 	.prepare_guest_switch = svm_prepare_guest_switch,
@@ -7316,8 +7448,9 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.enable_irq_window = enable_irq_window,
 	.update_cr8_intercept = update_cr8_intercept,
 	.set_virtual_apic_mode = svm_set_virtual_apic_mode,
-	.get_enable_apicv = svm_get_enable_apicv,
 	.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
+	.check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
+	.pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
 	.load_eoi_exitmap = svm_load_eoi_exitmap,
 	.hwapic_irr_update = svm_hwapic_irr_update,
 	.hwapic_isr_update = svm_hwapic_isr_update,
@@ -7341,6 +7474,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.xsaves_supported = svm_xsaves_supported,
 	.umip_emulated = svm_umip_emulated,
 	.pt_supported = svm_pt_supported,
+	.pku_supported = svm_pku_supported,
 
 	.set_supported_cpuid = svm_set_supported_cpuid,
 
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 7c741a0..f194dd0 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1291,6 +1291,25 @@ TRACE_EVENT(kvm_hv_stimer_cleanup,
 		  __entry->vcpu_id, __entry->timer_index)
 );
 
+TRACE_EVENT(kvm_apicv_update_request,
+	    TP_PROTO(bool activate, unsigned long bit),
+	    TP_ARGS(activate, bit),
+
+	TP_STRUCT__entry(
+		__field(bool, activate)
+		__field(unsigned long, bit)
+	),
+
+	TP_fast_assign(
+		__entry->activate = activate;
+		__entry->bit = bit;
+	),
+
+	TP_printk("%s bit=%lu",
+		  __entry->activate ? "activate" : "deactivate",
+		  __entry->bit)
+);
+
 /*
  * Tracepoint for AMD AVIC
  */
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 7aa6971..283bdb7 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -145,6 +145,11 @@ static inline bool vmx_umip_emulated(void)
 		SECONDARY_EXEC_DESC;
 }
 
+static inline bool vmx_pku_supported(void)
+{
+	return boot_cpu_has(X86_FEATURE_PKU);
+}
+
 static inline bool cpu_has_vmx_rdtscp(void)
 {
 	return vmcs_config.cpu_based_2nd_exec_ctrl &
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 7235970..3038134 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -7,6 +7,7 @@
 #include "evmcs.h"
 #include "vmcs.h"
 #include "vmx.h"
+#include "trace.h"
 
 DEFINE_STATIC_KEY_FALSE(enable_evmcs);
 
@@ -346,26 +347,93 @@ uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
+{
+	u32 ctl_low = (u32)*pdata;
+	u32 ctl_high = (u32)(*pdata >> 32);
+
+	/*
+	 * Hyper-V 2016 and 2019 try using these features even when eVMCS
+	 * is enabled but there are no corresponding fields.
+	 */
+	switch (msr_index) {
+	case MSR_IA32_VMX_EXIT_CTLS:
+	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+		ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+		break;
+	case MSR_IA32_VMX_ENTRY_CTLS:
+	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+		ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+		break;
+	case MSR_IA32_VMX_PROCBASED_CTLS2:
+		ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+		break;
+	}
+
+	*pdata = ctl_low | ((u64)ctl_high << 32);
+}
+
+int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
+{
+	int ret = 0;
+	u32 unsupp_ctl;
+
+	unsupp_ctl = vmcs12->pin_based_vm_exec_control &
+		EVMCS1_UNSUPPORTED_PINCTRL;
+	if (unsupp_ctl) {
+		trace_kvm_nested_vmenter_failed(
+			"eVMCS: unsupported pin-based VM-execution controls",
+			unsupp_ctl);
+		ret = -EINVAL;
+	}
+
+	unsupp_ctl = vmcs12->secondary_vm_exec_control &
+		EVMCS1_UNSUPPORTED_2NDEXEC;
+	if (unsupp_ctl) {
+		trace_kvm_nested_vmenter_failed(
+			"eVMCS: unsupported secondary VM-execution controls",
+			unsupp_ctl);
+		ret = -EINVAL;
+	}
+
+	unsupp_ctl = vmcs12->vm_exit_controls &
+		EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
+	if (unsupp_ctl) {
+		trace_kvm_nested_vmenter_failed(
+			"eVMCS: unsupported VM-exit controls",
+			unsupp_ctl);
+		ret = -EINVAL;
+	}
+
+	unsupp_ctl = vmcs12->vm_entry_controls &
+		EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
+	if (unsupp_ctl) {
+		trace_kvm_nested_vmenter_failed(
+			"eVMCS: unsupported VM-entry controls",
+			unsupp_ctl);
+		ret = -EINVAL;
+	}
+
+	unsupp_ctl = vmcs12->vm_function_control & EVMCS1_UNSUPPORTED_VMFUNC;
+	if (unsupp_ctl) {
+		trace_kvm_nested_vmenter_failed(
+			"eVMCS: unsupported VM-function controls",
+			unsupp_ctl);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
 			uint16_t *vmcs_version)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
 
 	vmx->nested.enlightened_vmcs_enabled = true;
 
 	if (vmcs_version)
 		*vmcs_version = nested_get_evmcs_version(vcpu);
 
-	/* We don't support disabling the feature for simplicity. */
-	if (evmcs_already_enabled)
-		return 0;
-
-	vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
-	vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
-	vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
-	vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
-	vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC;
-
 	return 0;
 }
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index 07ebf68..6de47f2 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -10,6 +10,7 @@
 
 #include "capabilities.h"
 #include "vmcs.h"
+#include "vmcs12.h"
 
 struct vmcs_config;
 
@@ -201,5 +202,7 @@ bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
 			uint16_t *vmcs_version);
+void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
+int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
 
 #endif /* __KVM_X86_VMX_EVMCS_H */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 6879966..657c2ed 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -28,16 +28,6 @@ module_param(nested_early_check, bool, S_IRUGO);
 	failed;								\
 })
 
-#define SET_MSR_OR_WARN(vcpu, idx, data)				\
-({									\
-	bool failed = kvm_set_msr(vcpu, idx, data);			\
-	if (failed)							\
-		pr_warn_ratelimited(					\
-				"%s cannot write MSR (0x%x, 0x%llx)\n",	\
-				__func__, idx, data);			\
-	failed;								\
-})
-
 /*
  * Hyper-V requires all of these, so mark them as supported even though
  * they are just treated the same as all-context.
@@ -1084,10 +1074,10 @@ static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
 }
 
 /*
- * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
- * emulating VM entry into a guest with EPT enabled.
- * Returns 0 on success, 1 on failure. Invalid state exit qualification code
- * is assigned to entry_failure_code on failure.
+ * Load guest's/host's cr3 at nested entry/exit.  @nested_ept is true if we are
+ * emulating VM-Entry into a guest with EPT enabled.  On failure, the expected
+ * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
+ * @entry_failure_code.
  */
 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
 			       u32 *entry_failure_code)
@@ -2172,8 +2162,8 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
 	 * EXEC CONTROLS
 	 */
 	exec_control = vmx_exec_control(vmx); /* L0's desires */
-	exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
-	exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+	exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
+	exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
 	exec_control &= ~CPU_BASED_TPR_SHADOW;
 	exec_control |= vmcs12->cpu_based_vm_exec_control;
 
@@ -2550,8 +2540,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 		vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
 
 	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
-	    SET_MSR_OR_WARN(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
-			    vmcs12->guest_ia32_perf_global_ctrl))
+	    WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+				     vmcs12->guest_ia32_perf_global_ctrl)))
 		return -EINVAL;
 
 	kvm_rsp_write(vcpu, vmcs12->guest_rsp);
@@ -2566,7 +2556,7 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
 		return -EINVAL;
 
 	if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
-	       nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)))
+	       nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
 		return -EINVAL;
 
 	return 0;
@@ -2767,6 +2757,9 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
 	    nested_check_vm_entry_controls(vcpu, vmcs12))
 		return -EINVAL;
 
+	if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled)
+		return nested_evmcs_check_controls(vmcs12);
+
 	return 0;
 }
 
@@ -2823,7 +2816,6 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
 	    CC(vmcs12->host_ss_selector == 0 && !ia32e))
 		return -EINVAL;
 
-#ifdef CONFIG_X86_64
 	if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
 	    CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
 	    CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
@@ -2831,7 +2823,6 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
 	    CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
 	    CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
 		return -EINVAL;
-#endif
 
 	/*
 	 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
@@ -2899,6 +2890,10 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
 	    CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
 		return -EINVAL;
 
+	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
+	    CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
+		return -EINVAL;
+
 	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
 	    CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
 		return -EINVAL;
@@ -3048,9 +3043,6 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
-static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
-						 struct vmcs12 *vmcs12);
-
 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -3183,7 +3175,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
 	u32 exit_qual;
 
 	evaluate_pending_interrupts = exec_controls_get(vmx) &
-		(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+		(CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
 	if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
 		evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
 
@@ -3230,7 +3222,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
 	}
 
 	enter_guest_mode(vcpu);
-	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
 		vcpu->arch.tsc_offset += vmcs12->tsc_offset;
 
 	if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
@@ -3294,7 +3286,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
 	 * 26.7 "VM-entry failures during or after loading guest state".
 	 */
 vmentry_fail_vmexit_guest_mode:
-	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
 		vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
 	leave_guest_mode(vcpu);
 
@@ -3407,8 +3399,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	 */
 	if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
 	    !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
-	    !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
-	    !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
+	    !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_NMI_WINDOW_EXITING) &&
+	    !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_INTR_WINDOW_EXITING) &&
 	      (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
 		vmx->nested.nested_run_pending = 0;
 		return kvm_vcpu_halt(vcpu);
@@ -3427,7 +3419,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 
 /*
  * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
- * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
+ * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
  * This function returns the new value we should put in vmcs12.guest_cr0.
  * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
  *  1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
@@ -3999,8 +3991,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 		vcpu->arch.pat = vmcs12->host_ia32_pat;
 	}
 	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
-		SET_MSR_OR_WARN(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
-				vmcs12->host_ia32_perf_global_ctrl);
+		WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+					 vmcs12->host_ia32_perf_global_ctrl));
 
 	/* Set L1 segment info according to Intel SDM
 	    27.5.2 Loading Host Segment and Descriptor-Table Registers */
@@ -4209,7 +4201,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 	if (nested_cpu_has_preemption_timer(vmcs12))
 		hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
 
-	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
 		vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
 
 	if (likely(!vmx->fail)) {
@@ -4734,8 +4726,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
 	return nested_vmx_succeed(vcpu);
 }
 
-static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
-
 /* Emulate the VMLAUNCH instruction */
 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
 {
@@ -4751,36 +4741,32 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
 
 static int handle_vmread(struct kvm_vcpu *vcpu)
 {
-	unsigned long field;
-	u64 field_value;
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-	int len;
-	gva_t gva = 0;
-	struct vmcs12 *vmcs12;
+	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct x86_exception e;
+	unsigned long field;
+	u64 value;
+	gva_t gva = 0;
 	short offset;
+	int len;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
+	/*
+	 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+	 * any VMREAD sets the ALU flags for VMfailInvalid.
+	 */
+	if (vmx->nested.current_vmptr == -1ull ||
+	    (is_guest_mode(vcpu) &&
+	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
-	if (!is_guest_mode(vcpu))
-		vmcs12 = get_vmcs12(vcpu);
-	else {
-		/*
-		 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
-		 * to shadowed-field sets the ALU flags for VMfailInvalid.
-		 */
-		if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
-			return nested_vmx_failInvalid(vcpu);
-		vmcs12 = get_shadow_vmcs12(vcpu);
-	}
-
 	/* Decode instruction info and find the field to read */
-	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)
@@ -4790,25 +4776,26 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 	if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
 		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
-	/* Read the field, zero-extended to a u64 field_value */
-	field_value = vmcs12_read_any(vmcs12, field, offset);
+	/* Read the field, zero-extended to a u64 value */
+	value = vmcs12_read_any(vmcs12, field, offset);
 
 	/*
 	 * Now copy part of this value to register or memory, as requested.
 	 * Note that the number of bits actually copied is 32 or 64 depending
 	 * on the guest's mode (32 or 64 bit), not on the given field's length.
 	 */
-	if (vmx_instruction_info & (1u << 10)) {
-		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
-			field_value);
+	if (instr_info & BIT(10)) {
+		kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value);
 	} else {
 		len = is_64_bit_mode(vcpu) ? 8 : 4;
 		if (get_vmx_mem_address(vcpu, exit_qualification,
-				vmx_instruction_info, true, len, &gva))
+					instr_info, true, len, &gva))
 			return 1;
 		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
-		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
+		if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
 			kvm_inject_page_fault(vcpu, &e);
+			return 1;
+		}
 	}
 
 	return nested_vmx_succeed(vcpu);
@@ -4840,46 +4827,58 @@ static bool is_shadow_field_ro(unsigned long field)
 
 static int handle_vmwrite(struct kvm_vcpu *vcpu)
 {
-	unsigned long field;
-	int len;
-	gva_t gva;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct x86_exception e;
+	unsigned long field;
+	short offset;
+	gva_t gva;
+	int len;
 
-	/* The value to write might be 32 or 64 bits, depending on L1's long
+	/*
+	 * The value to write might be 32 or 64 bits, depending on L1's long
 	 * mode, and eventually we need to write that into a field of several
 	 * possible lengths. The code below first zero-extends the value to 64
-	 * bit (field_value), and then copies only the appropriate number of
+	 * bit (value), and then copies only the appropriate number of
 	 * bits into the vmcs12 field.
 	 */
-	u64 field_value = 0;
-	struct x86_exception e;
-	struct vmcs12 *vmcs12;
-	short offset;
+	u64 value = 0;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (vmx->nested.current_vmptr == -1ull)
+	/*
+	 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+	 * any VMWRITE sets the ALU flags for VMfailInvalid.
+	 */
+	if (vmx->nested.current_vmptr == -1ull ||
+	    (is_guest_mode(vcpu) &&
+	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
-	if (vmx_instruction_info & (1u << 10))
-		field_value = kvm_register_readl(vcpu,
-			(((vmx_instruction_info) >> 3) & 0xf));
+	if (instr_info & BIT(10))
+		value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf));
 	else {
 		len = is_64_bit_mode(vcpu) ? 8 : 4;
 		if (get_vmx_mem_address(vcpu, exit_qualification,
-				vmx_instruction_info, false, len, &gva))
+					instr_info, false, len, &gva))
 			return 1;
-		if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
+		if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
 			kvm_inject_page_fault(vcpu, &e);
 			return 1;
 		}
 	}
 
+	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
 
-	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	offset = vmcs_field_to_offset(field);
+	if (offset < 0)
+		return nested_vmx_failValid(vcpu,
+			VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
 	/*
 	 * If the vCPU supports "VMWRITE to any supported field in the
 	 * VMCS," then the "read-only" fields are actually read/write.
@@ -4889,29 +4888,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 		return nested_vmx_failValid(vcpu,
 			VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
 
-	if (!is_guest_mode(vcpu)) {
-		vmcs12 = get_vmcs12(vcpu);
-
-		/*
-		 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
-		 * vmcs12, else we may crush a field or consume a stale value.
-		 */
-		if (!is_shadow_field_rw(field))
-			copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
-	} else {
-		/*
-		 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
-		 * to shadowed-field sets the ALU flags for VMfailInvalid.
-		 */
-		if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
-			return nested_vmx_failInvalid(vcpu);
-		vmcs12 = get_shadow_vmcs12(vcpu);
-	}
-
-	offset = vmcs_field_to_offset(field);
-	if (offset < 0)
-		return nested_vmx_failValid(vcpu,
-			VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+	/*
+	 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
+	 * vmcs12, else we may crush a field or consume a stale value.
+	 */
+	if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
+		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
 	/*
 	 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
@@ -4922,9 +4904,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	 * the stripped down value, L2 sees the full value as stored by KVM).
 	 */
 	if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
-		field_value &= 0x1f0ff;
+		value &= 0x1f0ff;
 
-	vmcs12_write_any(vmcs12, field, offset, field_value);
+	vmcs12_write_any(vmcs12, field, offset, value);
 
 	/*
 	 * Do not track vmcs12 dirty-state if in guest-mode as we actually
@@ -4941,7 +4923,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 			preempt_disable();
 			vmcs_load(vmx->vmcs01.shadow_vmcs);
 
-			__vmcs_writel(field, field_value);
+			__vmcs_writel(field, value);
 
 			vmcs_clear(vmx->vmcs01.shadow_vmcs);
 			vmcs_load(vmx->loaded_vmcs->vmcs);
@@ -5524,10 +5506,10 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
 		return false;
 	case EXIT_REASON_TRIPLE_FAULT:
 		return true;
-	case EXIT_REASON_PENDING_INTERRUPT:
-		return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
+	case EXIT_REASON_INTERRUPT_WINDOW:
+		return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
 	case EXIT_REASON_NMI_WINDOW:
-		return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
+		return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
 	case EXIT_REASON_TASK_SWITCH:
 		return true;
 	case EXIT_REASON_CPUID:
@@ -6015,8 +5997,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
 	msrs->procbased_ctls_low =
 		CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
 	msrs->procbased_ctls_high &=
-		CPU_BASED_VIRTUAL_INTR_PENDING |
-		CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
+		CPU_BASED_INTR_WINDOW_EXITING |
+		CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
 		CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
 		CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
 		CPU_BASED_CR3_STORE_EXITING |
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 7023138..fd21cdb 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -86,10 +86,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
 
 static unsigned intel_find_fixed_event(int idx)
 {
-	if (idx >= ARRAY_SIZE(fixed_pmc_events))
+	u32 event;
+	size_t size = ARRAY_SIZE(fixed_pmc_events);
+
+	if (idx >= size)
 		return PERF_COUNT_HW_MAX;
 
-	return intel_arch_events[fixed_pmc_events[idx]].event_type;
+	event = fixed_pmc_events[array_index_nospec(idx, size)];
+	return intel_arch_events[event].event_type;
 }
 
 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
@@ -130,16 +134,20 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	bool fixed = idx & (1u << 30);
 	struct kvm_pmc *counters;
+	unsigned int num_counters;
 
 	idx &= ~(3u << 30);
-	if (!fixed && idx >= pmu->nr_arch_gp_counters)
+	if (fixed) {
+		counters = pmu->fixed_counters;
+		num_counters = pmu->nr_arch_fixed_counters;
+	} else {
+		counters = pmu->gp_counters;
+		num_counters = pmu->nr_arch_gp_counters;
+	}
+	if (idx >= num_counters)
 		return NULL;
-	if (fixed && idx >= pmu->nr_arch_fixed_counters)
-		return NULL;
-	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
-
-	return &counters[idx];
+	return &counters[array_index_nospec(idx, num_counters)];
 }
 
 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
@@ -252,13 +260,12 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		break;
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
-			if (msr_info->host_initiated)
-				pmc->counter = data;
-			else
-				pmc->counter = (s32)data;
+			if (!msr_info->host_initiated)
+				data = (s64)(s32)data;
+			pmc->counter += data - pmc_read_counter(pmc);
 			return 0;
 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
-			pmc->counter = data;
+			pmc->counter += data - pmc_read_counter(pmc);
 			return 0;
 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
 			if (data == pmc->eventsel)
diff --git a/arch/x86/kvm/vmx/vmcs_shadow_fields.h b/arch/x86/kvm/vmx/vmcs_shadow_fields.h
index eb1ecd1..cad128d 100644
--- a/arch/x86/kvm/vmx/vmcs_shadow_fields.h
+++ b/arch/x86/kvm/vmx/vmcs_shadow_fields.h
@@ -23,12 +23,12 @@ BUILD_BUG_ON(1)
  *
  * When adding or removing fields here, note that shadowed
  * fields must always be synced by prepare_vmcs02, not just
- * prepare_vmcs02_full.
+ * prepare_vmcs02_rare.
  */
 
 /*
  * Keeping the fields ordered by size is an attempt at improving
- * branch prediction in vmcs_read_any and vmcs_write_any.
+ * branch prediction in vmcs12_read_any and vmcs12_write_any.
  */
 
 /* 16-bits */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index cdb4bf50..9a66648 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1057,6 +1057,12 @@ static unsigned long segment_base(u16 selector)
 }
 #endif
 
+static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
+{
+	return (pt_mode == PT_MODE_HOST_GUEST) &&
+	       !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
+}
+
 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
 {
 	u32 i;
@@ -1422,8 +1428,6 @@ static bool emulation_required(struct kvm_vcpu *vcpu)
 	return emulate_invalid_guest_state && !guest_state_valid(vcpu);
 }
 
-static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
-
 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1716,7 +1720,7 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 
 	if (is_guest_mode(vcpu) &&
-	    (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
+	    (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
 		return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
 
 	return vcpu->arch.tsc_offset;
@@ -1734,7 +1738,7 @@ static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 	 * to the newly set TSC to get L2's TSC.
 	 */
 	if (is_guest_mode(vcpu) &&
-	    (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
+	    (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
 		g_tsc_offset = vmcs12->tsc_offset;
 
 	trace_kvm_write_tsc_offset(vcpu->vcpu_id,
@@ -1773,8 +1777,6 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
 	default:
 		return 1;
 	}
-
-	return 0;
 }
 
 /*
@@ -1849,8 +1851,20 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
 		if (!nested_vmx_allowed(vcpu))
 			return 1;
-		return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
-				       &msr_info->data);
+		if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
+				    &msr_info->data))
+			return 1;
+		/*
+		 * Enlightened VMCS v1 doesn't have certain fields, but buggy
+		 * Hyper-V versions are still trying to use corresponding
+		 * features when they are exposed. Filter out the essential
+		 * minimum.
+		 */
+		if (!msr_info->host_initiated &&
+		    vmx->nested.enlightened_vmcs_enabled)
+			nested_evmcs_filter_control_msr(msr_info->index,
+							&msr_info->data);
+		break;
 	case MSR_IA32_RTIT_CTL:
 		if (pt_mode != PT_MODE_HOST_GUEST)
 			return 1;
@@ -1916,7 +1930,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 }
 
 /*
- * Writes msr value into into the appropriate "register".
+ * Writes msr value into the appropriate "register".
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
  */
@@ -1994,12 +2008,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
 			return 1;
 
-		/* The STIBP bit doesn't fault even if it's not advertised */
-		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+		if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
 			return 1;
 
 		vmx->spec_ctrl = data;
-
 		if (!data)
 			break;
 
@@ -2010,7 +2022,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		 *
 		 * For nested:
 		 * The handling of the MSR bitmap for L2 guests is done in
-		 * nested_vmx_merge_msr_bitmap. We should not touch the
+		 * nested_vmx_prepare_msr_bitmap. We should not touch the
 		 * vmcs02.msr_bitmap here since it gets completely overwritten
 		 * in the merging. We update the vmcs01 here for L1 as well
 		 * since it will end up touching the MSR anyway now.
@@ -2033,7 +2045,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
 		if (data & ~PRED_CMD_IBPB)
 			return 1;
-
+		if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+			return 1;
 		if (!data)
 			break;
 
@@ -2046,7 +2059,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		 *
 		 * For nested:
 		 * The handling of the MSR bitmap for L2 guests is done in
-		 * nested_vmx_merge_msr_bitmap. We should not touch the
+		 * nested_vmx_prepare_msr_bitmap. We should not touch the
 		 * vmcs02.msr_bitmap here since it gets completely overwritten
 		 * in the merging.
 		 */
@@ -2104,47 +2117,50 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		pt_update_intercept_for_msr(vmx);
 		break;
 	case MSR_IA32_RTIT_STATUS:
-		if ((pt_mode != PT_MODE_HOST_GUEST) ||
-			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
-			(data & MSR_IA32_RTIT_STATUS_MASK))
+		if (!pt_can_write_msr(vmx))
+			return 1;
+		if (data & MSR_IA32_RTIT_STATUS_MASK)
 			return 1;
 		vmx->pt_desc.guest.status = data;
 		break;
 	case MSR_IA32_RTIT_CR3_MATCH:
-		if ((pt_mode != PT_MODE_HOST_GUEST) ||
-			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
-			!intel_pt_validate_cap(vmx->pt_desc.caps,
-						PT_CAP_cr3_filtering))
+		if (!pt_can_write_msr(vmx))
+			return 1;
+		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
+					   PT_CAP_cr3_filtering))
 			return 1;
 		vmx->pt_desc.guest.cr3_match = data;
 		break;
 	case MSR_IA32_RTIT_OUTPUT_BASE:
-		if ((pt_mode != PT_MODE_HOST_GUEST) ||
-			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
-			(!intel_pt_validate_cap(vmx->pt_desc.caps,
-					PT_CAP_topa_output) &&
-			 !intel_pt_validate_cap(vmx->pt_desc.caps,
-					PT_CAP_single_range_output)) ||
-			(data & MSR_IA32_RTIT_OUTPUT_BASE_MASK))
+		if (!pt_can_write_msr(vmx))
+			return 1;
+		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
+					   PT_CAP_topa_output) &&
+		    !intel_pt_validate_cap(vmx->pt_desc.caps,
+					   PT_CAP_single_range_output))
+			return 1;
+		if (data & MSR_IA32_RTIT_OUTPUT_BASE_MASK)
 			return 1;
 		vmx->pt_desc.guest.output_base = data;
 		break;
 	case MSR_IA32_RTIT_OUTPUT_MASK:
-		if ((pt_mode != PT_MODE_HOST_GUEST) ||
-			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
-			(!intel_pt_validate_cap(vmx->pt_desc.caps,
-					PT_CAP_topa_output) &&
-			 !intel_pt_validate_cap(vmx->pt_desc.caps,
-					PT_CAP_single_range_output)))
+		if (!pt_can_write_msr(vmx))
+			return 1;
+		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
+					   PT_CAP_topa_output) &&
+		    !intel_pt_validate_cap(vmx->pt_desc.caps,
+					   PT_CAP_single_range_output))
 			return 1;
 		vmx->pt_desc.guest.output_mask = data;
 		break;
 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
+		if (!pt_can_write_msr(vmx))
+			return 1;
 		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
-		if ((pt_mode != PT_MODE_HOST_GUEST) ||
-			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
-			(index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
-					PT_CAP_num_address_ranges)))
+		if (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
+						       PT_CAP_num_address_ranges))
+			return 1;
+		if (is_noncanonical_address(data, vcpu))
 			return 1;
 		if (index % 2)
 			vmx->pt_desc.guest.addr_b[index / 2] = data;
@@ -2322,7 +2338,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
 	      CPU_BASED_CR3_STORE_EXITING |
 	      CPU_BASED_UNCOND_IO_EXITING |
 	      CPU_BASED_MOV_DR_EXITING |
-	      CPU_BASED_USE_TSC_OFFSETING |
+	      CPU_BASED_USE_TSC_OFFSETTING |
 	      CPU_BASED_MWAIT_EXITING |
 	      CPU_BASED_MONITOR_EXITING |
 	      CPU_BASED_INVLPG_EXITING |
@@ -2657,8 +2673,6 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
 
 	vmx->rmode.vm86_active = 0;
 
-	vmx_segment_cache_clear(vmx);
-
 	vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
 
 	flags = vmcs_readl(GUEST_RFLAGS);
@@ -3444,7 +3458,7 @@ static int init_rmode_tss(struct kvm *kvm)
 static int init_rmode_identity_map(struct kvm *kvm)
 {
 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
-	int i, idx, r = 0;
+	int i, r = 0;
 	kvm_pfn_t identity_map_pfn;
 	u32 tmp;
 
@@ -3452,7 +3466,7 @@ static int init_rmode_identity_map(struct kvm *kvm)
 	mutex_lock(&kvm->slots_lock);
 
 	if (likely(kvm_vmx->ept_identity_pagetable_done))
-		goto out2;
+		goto out;
 
 	if (!kvm_vmx->ept_identity_map_addr)
 		kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
@@ -3461,9 +3475,8 @@ static int init_rmode_identity_map(struct kvm *kvm)
 	r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
 				    kvm_vmx->ept_identity_map_addr, PAGE_SIZE);
 	if (r < 0)
-		goto out2;
+		goto out;
 
-	idx = srcu_read_lock(&kvm->srcu);
 	r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
 	if (r < 0)
 		goto out;
@@ -3479,9 +3492,6 @@ static int init_rmode_identity_map(struct kvm *kvm)
 	kvm_vmx->ept_identity_pagetable_done = true;
 
 out:
-	srcu_read_unlock(&kvm->srcu, idx);
-
-out2:
 	mutex_unlock(&kvm->slots_lock);
 	return r;
 }
@@ -3719,11 +3729,6 @@ void pt_update_intercept_for_msr(struct vcpu_vmx *vmx)
 	}
 }
 
-static bool vmx_get_enable_apicv(struct kvm *kvm)
-{
-	return enable_apicv;
-}
-
 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -4009,6 +4014,7 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
 	if (vmx_xsaves_supported()) {
 		/* Exposing XSAVES only when XSAVE is exposed */
 		bool xsaves_enabled =
+			boot_cpu_has(X86_FEATURE_XSAVE) &&
 			guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
 			guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
 
@@ -4319,7 +4325,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
 static void enable_irq_window(struct kvm_vcpu *vcpu)
 {
-	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
+	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
 }
 
 static void enable_nmi_window(struct kvm_vcpu *vcpu)
@@ -4330,7 +4336,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
 		return;
 	}
 
-	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
+	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
 }
 
 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
@@ -4455,8 +4461,11 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 	if (enable_unrestricted_guest)
 		return 0;
 
-	ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
-				    PAGE_SIZE * 3);
+	mutex_lock(&kvm->slots_lock);
+	ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
+				      PAGE_SIZE * 3);
+	mutex_unlock(&kvm->slots_lock);
+
 	if (ret)
 		return ret;
 	to_kvm_vmx(kvm)->tss_addr = addr;
@@ -4938,7 +4947,7 @@ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
 
 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
 {
-	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
+	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
 
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
 
@@ -5151,7 +5160,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 static int handle_nmi_window(struct kvm_vcpu *vcpu)
 {
 	WARN_ON_ONCE(!enable_vnmi);
-	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
+	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
 	++vcpu->stat.nmi_window_exits;
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
 
@@ -5172,7 +5181,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
 	WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
 
 	intr_window_requested = exec_controls_get(vmx) &
-				CPU_BASED_VIRTUAL_INTR_PENDING;
+				CPU_BASED_INTR_WINDOW_EXITING;
 
 	while (vmx->emulation_required && count-- != 0) {
 		if (intr_window_requested && vmx_interrupt_allowed(vcpu))
@@ -5496,7 +5505,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
 	[EXIT_REASON_CPUID]                   = kvm_emulate_cpuid,
 	[EXIT_REASON_MSR_READ]                = kvm_emulate_rdmsr,
 	[EXIT_REASON_MSR_WRITE]               = kvm_emulate_wrmsr,
-	[EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
+	[EXIT_REASON_INTERRUPT_WINDOW]        = handle_interrupt_window,
 	[EXIT_REASON_HLT]                     = kvm_emulate_halt,
 	[EXIT_REASON_INVD]		      = handle_invd,
 	[EXIT_REASON_INVLPG]		      = handle_invlpg,
@@ -5783,7 +5792,8 @@ void dump_vmcs(void)
  * The guest has exited.  See if we can fix it or if we need userspace
  * assistance.
  */
-static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+static int vmx_handle_exit(struct kvm_vcpu *vcpu,
+	enum exit_fastpath_completion exit_fastpath)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 exit_reason = vmx->exit_reason;
@@ -5869,34 +5879,44 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
 		}
 	}
 
-	if (exit_reason < kvm_vmx_max_exit_handlers
-	    && kvm_vmx_exit_handlers[exit_reason]) {
-#ifdef CONFIG_RETPOLINE
-		if (exit_reason == EXIT_REASON_MSR_WRITE)
-			return kvm_emulate_wrmsr(vcpu);
-		else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
-			return handle_preemption_timer(vcpu);
-		else if (exit_reason == EXIT_REASON_PENDING_INTERRUPT)
-			return handle_interrupt_window(vcpu);
-		else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
-			return handle_external_interrupt(vcpu);
-		else if (exit_reason == EXIT_REASON_HLT)
-			return kvm_emulate_halt(vcpu);
-		else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
-			return handle_ept_misconfig(vcpu);
-#endif
-		return kvm_vmx_exit_handlers[exit_reason](vcpu);
-	} else {
-		vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
-				exit_reason);
-		dump_vmcs();
-		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-		vcpu->run->internal.suberror =
-			KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
-		vcpu->run->internal.ndata = 1;
-		vcpu->run->internal.data[0] = exit_reason;
-		return 0;
+	if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
+		kvm_skip_emulated_instruction(vcpu);
+		return 1;
 	}
+
+	if (exit_reason >= kvm_vmx_max_exit_handlers)
+		goto unexpected_vmexit;
+#ifdef CONFIG_RETPOLINE
+	if (exit_reason == EXIT_REASON_MSR_WRITE)
+		return kvm_emulate_wrmsr(vcpu);
+	else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
+		return handle_preemption_timer(vcpu);
+	else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
+		return handle_interrupt_window(vcpu);
+	else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
+		return handle_external_interrupt(vcpu);
+	else if (exit_reason == EXIT_REASON_HLT)
+		return kvm_emulate_halt(vcpu);
+	else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
+		return handle_ept_misconfig(vcpu);
+#endif
+
+	exit_reason = array_index_nospec(exit_reason,
+					 kvm_vmx_max_exit_handlers);
+	if (!kvm_vmx_exit_handlers[exit_reason])
+		goto unexpected_vmexit;
+
+	return kvm_vmx_exit_handlers[exit_reason](vcpu);
+
+unexpected_vmexit:
+	vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason);
+	dump_vmcs();
+	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+	vcpu->run->internal.suberror =
+			KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
+	vcpu->run->internal.ndata = 1;
+	vcpu->run->internal.data[0] = exit_reason;
+	return 0;
 }
 
 /*
@@ -6217,7 +6237,8 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
 }
 STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff);
 
-static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu,
+	enum exit_fastpath_completion *exit_fastpath)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
@@ -6225,6 +6246,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
 		handle_external_interrupt_irqoff(vcpu);
 	else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
 		handle_exception_nmi_irqoff(vmx);
+	else if (!is_guest_mode(vcpu) &&
+		vmx->exit_reason == EXIT_REASON_MSR_WRITE)
+		*exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
 }
 
 static bool vmx_has_emulated_msr(int index)
@@ -6633,60 +6657,31 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 	free_vpid(vmx->vpid);
 	nested_vmx_free_vcpu(vcpu);
 	free_loaded_vmcs(vmx->loaded_vmcs);
-	kvm_vcpu_uninit(vcpu);
-	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
-	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
-	kmem_cache_free(kvm_vcpu_cache, vmx);
 }
 
-static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
 {
-	int err;
 	struct vcpu_vmx *vmx;
 	unsigned long *msr_bitmap;
-	int i, cpu;
+	int i, cpu, err;
 
-	BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0,
-		"struct kvm_vcpu must be at offset 0 for arch usercopy region");
-
-	vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
-	if (!vmx)
-		return ERR_PTR(-ENOMEM);
-
-	vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
-			GFP_KERNEL_ACCOUNT);
-	if (!vmx->vcpu.arch.user_fpu) {
-		printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
-		err = -ENOMEM;
-		goto free_partial_vcpu;
-	}
-
-	vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
-			GFP_KERNEL_ACCOUNT);
-	if (!vmx->vcpu.arch.guest_fpu) {
-		printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
-		err = -ENOMEM;
-		goto free_user_fpu;
-	}
-
-	vmx->vpid = allocate_vpid();
-
-	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
-	if (err)
-		goto free_vcpu;
+	BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
+	vmx = to_vmx(vcpu);
 
 	err = -ENOMEM;
 
+	vmx->vpid = allocate_vpid();
+
 	/*
 	 * If PML is turned on, failure on enabling PML just results in failure
 	 * of creating the vcpu, therefore we can simplify PML logic (by
 	 * avoiding dealing with cases, such as enabling PML partially on vcpus
-	 * for the guest, etc.
+	 * for the guest), etc.
 	 */
 	if (enable_pml) {
 		vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
 		if (!vmx->pml_pg)
-			goto uninit_vcpu;
+			goto free_vpid;
 	}
 
 	BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != NR_SHARED_MSRS);
@@ -6731,7 +6726,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
 	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
 	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
-	if (kvm_cstate_in_guest(kvm)) {
+	if (kvm_cstate_in_guest(vcpu->kvm)) {
 		vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES, MSR_TYPE_R);
 		vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
 		vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
@@ -6741,19 +6736,19 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 
 	vmx->loaded_vmcs = &vmx->vmcs01;
 	cpu = get_cpu();
-	vmx_vcpu_load(&vmx->vcpu, cpu);
-	vmx->vcpu.cpu = cpu;
+	vmx_vcpu_load(vcpu, cpu);
+	vcpu->cpu = cpu;
 	init_vmcs(vmx);
-	vmx_vcpu_put(&vmx->vcpu);
+	vmx_vcpu_put(vcpu);
 	put_cpu();
-	if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
-		err = alloc_apic_access_page(kvm);
+	if (cpu_need_virtualize_apic_accesses(vcpu)) {
+		err = alloc_apic_access_page(vcpu->kvm);
 		if (err)
 			goto free_vmcs;
 	}
 
 	if (enable_ept && !enable_unrestricted_guest) {
-		err = init_rmode_identity_map(kvm);
+		err = init_rmode_identity_map(vcpu->kvm);
 		if (err)
 			goto free_vmcs;
 	}
@@ -6761,7 +6756,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 	if (nested)
 		nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
 					   vmx_capability.ept,
-					   kvm_vcpu_apicv_active(&vmx->vcpu));
+					   kvm_vcpu_apicv_active(vcpu));
 	else
 		memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
 
@@ -6779,22 +6774,15 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 
 	vmx->ept_pointer = INVALID_PAGE;
 
-	return &vmx->vcpu;
+	return 0;
 
 free_vmcs:
 	free_loaded_vmcs(vmx->loaded_vmcs);
 free_pml:
 	vmx_destroy_pml_buffer(vmx);
-uninit_vcpu:
-	kvm_vcpu_uninit(&vmx->vcpu);
-free_vcpu:
+free_vpid:
 	free_vpid(vmx->vpid);
-	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
-free_user_fpu:
-	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
-free_partial_vcpu:
-	kmem_cache_free(kvm_vcpu_cache, vmx);
-	return ERR_PTR(err);
+	return err;
 }
 
 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
@@ -6830,6 +6818,7 @@ static int vmx_vm_init(struct kvm *kvm)
 			break;
 		}
 	}
+	kvm_apicv_init(kvm, enable_apicv);
 	return 0;
 }
 
@@ -6946,28 +6935,28 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
 } while (0)
 
 	entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
-	cr4_fixed1_update(X86_CR4_VME,        edx, bit(X86_FEATURE_VME));
-	cr4_fixed1_update(X86_CR4_PVI,        edx, bit(X86_FEATURE_VME));
-	cr4_fixed1_update(X86_CR4_TSD,        edx, bit(X86_FEATURE_TSC));
-	cr4_fixed1_update(X86_CR4_DE,         edx, bit(X86_FEATURE_DE));
-	cr4_fixed1_update(X86_CR4_PSE,        edx, bit(X86_FEATURE_PSE));
-	cr4_fixed1_update(X86_CR4_PAE,        edx, bit(X86_FEATURE_PAE));
-	cr4_fixed1_update(X86_CR4_MCE,        edx, bit(X86_FEATURE_MCE));
-	cr4_fixed1_update(X86_CR4_PGE,        edx, bit(X86_FEATURE_PGE));
-	cr4_fixed1_update(X86_CR4_OSFXSR,     edx, bit(X86_FEATURE_FXSR));
-	cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
-	cr4_fixed1_update(X86_CR4_VMXE,       ecx, bit(X86_FEATURE_VMX));
-	cr4_fixed1_update(X86_CR4_SMXE,       ecx, bit(X86_FEATURE_SMX));
-	cr4_fixed1_update(X86_CR4_PCIDE,      ecx, bit(X86_FEATURE_PCID));
-	cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, bit(X86_FEATURE_XSAVE));
+	cr4_fixed1_update(X86_CR4_VME,        edx, feature_bit(VME));
+	cr4_fixed1_update(X86_CR4_PVI,        edx, feature_bit(VME));
+	cr4_fixed1_update(X86_CR4_TSD,        edx, feature_bit(TSC));
+	cr4_fixed1_update(X86_CR4_DE,         edx, feature_bit(DE));
+	cr4_fixed1_update(X86_CR4_PSE,        edx, feature_bit(PSE));
+	cr4_fixed1_update(X86_CR4_PAE,        edx, feature_bit(PAE));
+	cr4_fixed1_update(X86_CR4_MCE,        edx, feature_bit(MCE));
+	cr4_fixed1_update(X86_CR4_PGE,        edx, feature_bit(PGE));
+	cr4_fixed1_update(X86_CR4_OSFXSR,     edx, feature_bit(FXSR));
+	cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM));
+	cr4_fixed1_update(X86_CR4_VMXE,       ecx, feature_bit(VMX));
+	cr4_fixed1_update(X86_CR4_SMXE,       ecx, feature_bit(SMX));
+	cr4_fixed1_update(X86_CR4_PCIDE,      ecx, feature_bit(PCID));
+	cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, feature_bit(XSAVE));
 
 	entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
-	cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, bit(X86_FEATURE_FSGSBASE));
-	cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
-	cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
-	cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
-	cr4_fixed1_update(X86_CR4_UMIP,       ecx, bit(X86_FEATURE_UMIP));
-	cr4_fixed1_update(X86_CR4_LA57,       ecx, bit(X86_FEATURE_LA57));
+	cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, feature_bit(FSGSBASE));
+	cr4_fixed1_update(X86_CR4_SMEP,       ebx, feature_bit(SMEP));
+	cr4_fixed1_update(X86_CR4_SMAP,       ebx, feature_bit(SMAP));
+	cr4_fixed1_update(X86_CR4_PKE,        ecx, feature_bit(PKU));
+	cr4_fixed1_update(X86_CR4_UMIP,       ecx, feature_bit(UMIP));
+	cr4_fixed1_update(X86_CR4_LA57,       ecx, feature_bit(LA57));
 
 #undef cr4_fixed1_update
 }
@@ -7101,7 +7090,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
 	if (func == 1 && nested)
-		entry->ecx |= bit(X86_FEATURE_VMX);
+		entry->ecx |= feature_bit(VMX);
 }
 
 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
@@ -7731,6 +7720,14 @@ static __exit void hardware_unsetup(void)
 	free_kvm_area();
 }
 
+static bool vmx_check_apicv_inhibit_reasons(ulong bit)
+{
+	ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+			  BIT(APICV_INHIBIT_REASON_HYPERV);
+
+	return supported & BIT(bit);
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = cpu_has_kvm_support,
 	.disabled_by_bios = vmx_disabled_by_bios,
@@ -7803,10 +7800,10 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.update_cr8_intercept = update_cr8_intercept,
 	.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
 	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
-	.get_enable_apicv = vmx_get_enable_apicv,
 	.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
 	.load_eoi_exitmap = vmx_load_eoi_exitmap,
 	.apicv_post_state_restore = vmx_apicv_post_state_restore,
+	.check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
 	.hwapic_irr_update = vmx_hwapic_irr_update,
 	.hwapic_isr_update = vmx_hwapic_isr_update,
 	.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
@@ -7843,6 +7840,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.xsaves_supported = vmx_xsaves_supported,
 	.umip_emulated = vmx_umip_emulated,
 	.pt_supported = vmx_pt_supported,
+	.pku_supported = vmx_pku_supported,
 
 	.request_immediate_exit = vmx_request_immediate_exit,
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 740d3ee..fbabb2f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -26,6 +26,7 @@
 #include "cpuid.h"
 #include "pmu.h"
 #include "hyperv.h"
+#include "lapic.h"
 
 #include <linux/clocksource.h>
 #include <linux/interrupt.h>
@@ -93,6 +94,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 #endif
 
+static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
+
 #define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
 #define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
 
@@ -879,30 +882,46 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
+#define __cr4_reserved_bits(__cpu_has, __c)		\
+({							\
+	u64 __reserved_bits = CR4_RESERVED_BITS;	\
+							\
+	if (!__cpu_has(__c, X86_FEATURE_XSAVE))		\
+		__reserved_bits |= X86_CR4_OSXSAVE;	\
+	if (!__cpu_has(__c, X86_FEATURE_SMEP))		\
+		__reserved_bits |= X86_CR4_SMEP;	\
+	if (!__cpu_has(__c, X86_FEATURE_SMAP))		\
+		__reserved_bits |= X86_CR4_SMAP;	\
+	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))	\
+		__reserved_bits |= X86_CR4_FSGSBASE;	\
+	if (!__cpu_has(__c, X86_FEATURE_PKU))		\
+		__reserved_bits |= X86_CR4_PKE;		\
+	if (!__cpu_has(__c, X86_FEATURE_LA57))		\
+		__reserved_bits |= X86_CR4_LA57;	\
+	if (!__cpu_has(__c, X86_FEATURE_UMIP))		\
+		__reserved_bits |= X86_CR4_UMIP;	\
+	__reserved_bits;				\
+})
+
+static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
+{
+	u64 reserved_bits = __cr4_reserved_bits(cpu_has, c);
+
+	if (cpuid_ecx(0x7) & feature_bit(LA57))
+		reserved_bits &= ~X86_CR4_LA57;
+
+	if (kvm_x86_ops->umip_emulated())
+		reserved_bits &= ~X86_CR4_UMIP;
+
+	return reserved_bits;
+}
+
 static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-	if (cr4 & CR4_RESERVED_BITS)
+	if (cr4 & cr4_reserved_bits)
 		return -EINVAL;
 
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
-		return -EINVAL;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
-		return -EINVAL;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
-		return -EINVAL;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
-		return -EINVAL;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
-		return -EINVAL;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
-		return -EINVAL;
-
-	if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+	if (cr4 & __cr4_reserved_bits(guest_cpuid_has, vcpu))
 		return -EINVAL;
 
 	return 0;
@@ -1047,9 +1066,11 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
 
 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
 {
+	size_t size = ARRAY_SIZE(vcpu->arch.db);
+
 	switch (dr) {
 	case 0 ... 3:
-		vcpu->arch.db[dr] = val;
+		vcpu->arch.db[array_index_nospec(dr, size)] = val;
 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
 			vcpu->arch.eff_db[dr] = val;
 		break;
@@ -1064,7 +1085,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
 	case 5:
 		/* fall through */
 	default: /* 7 */
-		if (val & 0xffffffff00000000ULL)
+		if (!kvm_dr7_valid(val))
 			return -1; /* #GP */
 		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
 		kvm_update_dr7(vcpu);
@@ -1086,9 +1107,11 @@ EXPORT_SYMBOL_GPL(kvm_set_dr);
 
 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
 {
+	size_t size = ARRAY_SIZE(vcpu->arch.db);
+
 	switch (dr) {
 	case 0 ... 3:
-		*val = vcpu->arch.db[dr];
+		*val = vcpu->arch.db[array_index_nospec(dr, size)];
 		break;
 	case 4:
 		/* fall through */
@@ -1212,6 +1235,7 @@ static const u32 emulated_msrs_all[] = {
 	MSR_MISC_FEATURES_ENABLES,
 	MSR_AMD64_VIRT_SPEC_CTRL,
 	MSR_IA32_POWER_CTL,
+	MSR_IA32_UCODE_REV,
 
 	/*
 	 * The following list leaves out MSRs whose values are determined
@@ -1526,6 +1550,49 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
 
 /*
+ * The fast path for frequent and performance sensitive wrmsr emulation,
+ * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
+ * the latency of virtual IPI by avoiding the expensive bits of transitioning
+ * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
+ * other cases which must be called after interrupts are enabled on the host.
+ */
+static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
+{
+	if (lapic_in_kernel(vcpu) && apic_x2apic_mode(vcpu->arch.apic) &&
+		((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
+		((data & APIC_MODE_MASK) == APIC_DM_FIXED)) {
+
+		kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32));
+		return kvm_lapic_reg_write(vcpu->arch.apic, APIC_ICR, (u32)data);
+	}
+
+	return 1;
+}
+
+enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
+{
+	u32 msr = kvm_rcx_read(vcpu);
+	u64 data = kvm_read_edx_eax(vcpu);
+	int ret = 0;
+
+	switch (msr) {
+	case APIC_BASE_MSR + (APIC_ICR >> 4):
+		ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
+		break;
+	default:
+		return EXIT_FASTPATH_NONE;
+	}
+
+	if (!ret) {
+		trace_kvm_msr_write(msr, data);
+		return EXIT_FASTPATH_SKIP_EMUL_INS;
+	}
+
+	return EXIT_FASTPATH_NONE;
+}
+EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
+
+/*
  * Adapt set_msr() to msr_io()'s calling convention
  */
 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
@@ -1545,6 +1612,8 @@ struct pvclock_clock {
 	u64 mask;
 	u32 mult;
 	u32 shift;
+	u64 base_cycles;
+	u64 offset;
 };
 
 struct pvclock_gtod_data {
@@ -1553,11 +1622,8 @@ struct pvclock_gtod_data {
 	struct pvclock_clock clock; /* extract of a clocksource struct */
 	struct pvclock_clock raw_clock; /* extract of a clocksource struct */
 
-	u64		boot_ns_raw;
-	u64		boot_ns;
-	u64		nsec_base;
+	ktime_t		offs_boot;
 	u64		wall_time_sec;
-	u64		monotonic_raw_nsec;
 };
 
 static struct pvclock_gtod_data pvclock_gtod_data;
@@ -1565,10 +1631,6 @@ static struct pvclock_gtod_data pvclock_gtod_data;
 static void update_pvclock_gtod(struct timekeeper *tk)
 {
 	struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
-	u64 boot_ns, boot_ns_raw;
-
-	boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
-	boot_ns_raw = ktime_to_ns(ktime_add(tk->tkr_raw.base, tk->offs_boot));
 
 	write_seqcount_begin(&vdata->seq);
 
@@ -1578,23 +1640,35 @@ static void update_pvclock_gtod(struct timekeeper *tk)
 	vdata->clock.mask		= tk->tkr_mono.mask;
 	vdata->clock.mult		= tk->tkr_mono.mult;
 	vdata->clock.shift		= tk->tkr_mono.shift;
+	vdata->clock.base_cycles	= tk->tkr_mono.xtime_nsec;
+	vdata->clock.offset		= tk->tkr_mono.base;
 
 	vdata->raw_clock.vclock_mode	= tk->tkr_raw.clock->archdata.vclock_mode;
 	vdata->raw_clock.cycle_last	= tk->tkr_raw.cycle_last;
 	vdata->raw_clock.mask		= tk->tkr_raw.mask;
 	vdata->raw_clock.mult		= tk->tkr_raw.mult;
 	vdata->raw_clock.shift		= tk->tkr_raw.shift;
-
-	vdata->boot_ns			= boot_ns;
-	vdata->nsec_base		= tk->tkr_mono.xtime_nsec;
+	vdata->raw_clock.base_cycles	= tk->tkr_raw.xtime_nsec;
+	vdata->raw_clock.offset		= tk->tkr_raw.base;
 
 	vdata->wall_time_sec            = tk->xtime_sec;
 
-	vdata->boot_ns_raw		= boot_ns_raw;
-	vdata->monotonic_raw_nsec	= tk->tkr_raw.xtime_nsec;
+	vdata->offs_boot		= tk->offs_boot;
 
 	write_seqcount_end(&vdata->seq);
 }
+
+static s64 get_kvmclock_base_ns(void)
+{
+	/* Count up from boot time, but with the frequency of the raw clock.  */
+	return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot));
+}
+#else
+static s64 get_kvmclock_base_ns(void)
+{
+	/* Master clock not used, so we can just use CLOCK_BOOTTIME.  */
+	return ktime_get_boottime_ns();
+}
 #endif
 
 void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
@@ -1608,7 +1682,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 	int version;
 	int r;
 	struct pvclock_wall_clock wc;
-	struct timespec64 boot;
+	u64 wall_nsec;
 
 	if (!wall_clock)
 		return;
@@ -1628,17 +1702,12 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 	/*
 	 * The guest calculates current wall clock time by adding
 	 * system time (updated by kvm_guest_time_update below) to the
-	 * wall clock specified here.  guest system time equals host
-	 * system time for us, thus we must fill in host boot time here.
+	 * wall clock specified here.  We do the reverse here.
 	 */
-	getboottime64(&boot);
+	wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
 
-	if (kvm->arch.kvmclock_offset) {
-		struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
-		boot = timespec64_sub(boot, ts);
-	}
-	wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
-	wc.nsec = boot.tv_nsec;
+	wc.nsec = do_div(wall_nsec, 1000000000);
+	wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */
 	wc.version = version;
 
 	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
@@ -1886,7 +1955,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
 	offset = kvm_compute_tsc_offset(vcpu, data);
-	ns = ktime_get_boottime_ns();
+	ns = get_kvmclock_base_ns();
 	elapsed = ns - kvm->arch.last_tsc_nsec;
 
 	if (vcpu->arch.virtual_tsc_khz) {
@@ -2061,10 +2130,10 @@ static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
 
 	do {
 		seq = read_seqcount_begin(&gtod->seq);
-		ns = gtod->monotonic_raw_nsec;
+		ns = gtod->raw_clock.base_cycles;
 		ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode);
-		ns >>= gtod->clock.shift;
-		ns += gtod->boot_ns_raw;
+		ns >>= gtod->raw_clock.shift;
+		ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot));
 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 	*t = ns;
 
@@ -2081,7 +2150,7 @@ static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
 	do {
 		seq = read_seqcount_begin(&gtod->seq);
 		ts->tv_sec = gtod->wall_time_sec;
-		ns = gtod->nsec_base;
+		ns = gtod->clock.base_cycles;
 		ns += vgettsc(&gtod->clock, tsc_timestamp, &mode);
 		ns >>= gtod->clock.shift;
 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -2224,7 +2293,7 @@ u64 get_kvmclock_ns(struct kvm *kvm)
 	spin_lock(&ka->pvclock_gtod_sync_lock);
 	if (!ka->use_master_clock) {
 		spin_unlock(&ka->pvclock_gtod_sync_lock);
-		return ktime_get_boottime_ns() + ka->kvmclock_offset;
+		return get_kvmclock_base_ns() + ka->kvmclock_offset;
 	}
 
 	hv_clock.tsc_timestamp = ka->master_cycle_now;
@@ -2240,7 +2309,7 @@ u64 get_kvmclock_ns(struct kvm *kvm)
 				   &hv_clock.tsc_to_system_mul);
 		ret = __pvclock_read_cycles(&hv_clock, rdtsc());
 	} else
-		ret = ktime_get_boottime_ns() + ka->kvmclock_offset;
+		ret = get_kvmclock_base_ns() + ka->kvmclock_offset;
 
 	put_cpu();
 
@@ -2339,7 +2408,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 	}
 	if (!use_master_clock) {
 		host_tsc = rdtsc();
-		kernel_ns = ktime_get_boottime_ns();
+		kernel_ns = get_kvmclock_base_ns();
 	}
 
 	tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
@@ -2379,6 +2448,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
 	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
 	vcpu->last_guest_tsc = tsc_timestamp;
+	WARN_ON(vcpu->hv_clock.system_time < 0);
 
 	/* If the host uses TSC clocksource, then it is stable */
 	pvclock_flags = 0;
@@ -2485,7 +2555,10 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	default:
 		if (msr >= MSR_IA32_MC0_CTL &&
 		    msr < MSR_IA32_MCx_CTL(bank_num)) {
-			u32 offset = msr - MSR_IA32_MC0_CTL;
+			u32 offset = array_index_nospec(
+				msr - MSR_IA32_MC0_CTL,
+				MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
+
 			/* only 0 or all 1s can be written to IA32_MCi_CTL
 			 * some Linux kernels though clear bit 10 in bank 4 to
 			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
@@ -2581,45 +2654,47 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
 
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
+	struct kvm_host_map map;
+	struct kvm_steal_time *st;
+
 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
 		return;
 
-	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
-		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
+	/* -EAGAIN is returned in atomic context so we can just return. */
+	if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
+			&map, &vcpu->arch.st.cache, false))
 		return;
 
+	st = map.hva +
+		offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+
 	/*
 	 * Doing a TLB flush here, on the guest's behalf, can avoid
 	 * expensive IPIs.
 	 */
 	trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
-		vcpu->arch.st.steal.preempted & KVM_VCPU_FLUSH_TLB);
-	if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
+		st->preempted & KVM_VCPU_FLUSH_TLB);
+	if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
 		kvm_vcpu_flush_tlb(vcpu, false);
 
-	if (vcpu->arch.st.steal.version & 1)
-		vcpu->arch.st.steal.version += 1;  /* first time write, random junk */
+	vcpu->arch.st.preempted = 0;
 
-	vcpu->arch.st.steal.version += 1;
+	if (st->version & 1)
+		st->version += 1;  /* first time write, random junk */
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
-		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+	st->version += 1;
 
 	smp_wmb();
 
-	vcpu->arch.st.steal.steal += current->sched_info.run_delay -
+	st->steal += current->sched_info.run_delay -
 		vcpu->arch.st.last_steal;
 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
-		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
-
 	smp_wmb();
 
-	vcpu->arch.st.steal.version += 1;
+	st->version += 1;
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
-		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
 }
 
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -2786,11 +2861,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (data & KVM_STEAL_RESERVED_MASK)
 			return 1;
 
-		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
-						data & KVM_STEAL_VALID_BITS,
-						sizeof(struct kvm_steal_time)))
-			return 1;
-
 		vcpu->arch.st.msr_val = data;
 
 		if (!(data & KVM_MSR_ENABLED))
@@ -2926,7 +2996,10 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
 	default:
 		if (msr >= MSR_IA32_MC0_CTL &&
 		    msr < MSR_IA32_MCx_CTL(bank_num)) {
-			u32 offset = msr - MSR_IA32_MC0_CTL;
+			u32 offset = array_index_nospec(
+				msr - MSR_IA32_MC0_CTL,
+				MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
+
 			data = vcpu->arch.mce_banks[offset];
 			break;
 		}
@@ -3458,10 +3531,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 	kvm_x86_ops->vcpu_load(vcpu, cpu);
 
-	fpregs_assert_state_consistent();
-	if (test_thread_flag(TIF_NEED_FPU_LOAD))
-		switch_fpu_return();
-
 	/* Apply any externally detected TSC adjustments (due to suspend) */
 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
 		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
@@ -3501,15 +3570,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 {
+	struct kvm_host_map map;
+	struct kvm_steal_time *st;
+
 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
 		return;
 
-	vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+	if (vcpu->arch.st.preempted)
+		return;
 
-	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
-			&vcpu->arch.st.steal.preempted,
-			offsetof(struct kvm_steal_time, preempted),
-			sizeof(vcpu->arch.st.steal.preempted));
+	if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
+			&vcpu->arch.st.cache, true))
+		return;
+
+	st = map.hva +
+		offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+
+	st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+
+	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -4660,9 +4739,6 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 {
 	struct kvm_pit *pit = kvm->arch.vpit;
 
-	if (!pit)
-		return -ENXIO;
-
 	/* pit->pit_state.lock was overloaded to prevent userspace from getting
 	 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
 	 * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
@@ -5029,6 +5105,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
 		r =  -EFAULT;
 		if (copy_from_user(&control, argp, sizeof(control)))
 			goto out;
+		r = -ENXIO;
+		if (!kvm->arch.vpit)
+			goto out;
 		r = kvm_vm_ioctl_reinject(kvm, &control);
 		break;
 	}
@@ -6186,6 +6265,21 @@ static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
 	return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, check_limit);
 }
 
+static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt)
+{
+	return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM);
+}
+
+static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
+{
+	return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
+}
+
+static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
+{
+	return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
+}
+
 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
 {
 	return kvm_register_read(emul_to_vcpu(ctxt), reg);
@@ -6263,6 +6357,9 @@ static const struct x86_emulate_ops emulate_ops = {
 	.fix_hypercall       = emulator_fix_hypercall,
 	.intercept           = emulator_intercept,
 	.get_cpuid           = emulator_get_cpuid,
+	.guest_has_long_mode = emulator_guest_has_long_mode,
+	.guest_has_movbe     = emulator_guest_has_movbe,
+	.guest_has_fxsr      = emulator_guest_has_fxsr,
 	.set_nmi_mask        = emulator_set_nmi_mask,
 	.get_hflags          = emulator_get_hflags,
 	.set_hflags          = emulator_set_hflags,
@@ -6379,11 +6476,11 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
 	return 1;
 }
 
-static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
+static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 				  bool write_fault_to_shadow_pgtable,
 				  int emulation_type)
 {
-	gpa_t gpa = cr2;
+	gpa_t gpa = cr2_or_gpa;
 	kvm_pfn_t pfn;
 
 	if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
@@ -6397,7 +6494,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
 		 * Write permission should be allowed since only
 		 * write access need to be emulated.
 		 */
-		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
 
 		/*
 		 * If the mapping is invalid in guest, let cpu retry
@@ -6454,10 +6551,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
 }
 
 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
-			      unsigned long cr2,  int emulation_type)
+			      gpa_t cr2_or_gpa,  int emulation_type)
 {
 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
+	unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
 
 	last_retry_eip = vcpu->arch.last_retry_eip;
 	last_retry_addr = vcpu->arch.last_retry_addr;
@@ -6486,14 +6583,14 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 	if (x86_page_table_writing_insn(ctxt))
 		return false;
 
-	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
+	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
 		return false;
 
 	vcpu->arch.last_retry_eip = ctxt->eip;
-	vcpu->arch.last_retry_addr = cr2;
+	vcpu->arch.last_retry_addr = cr2_or_gpa;
 
 	if (!vcpu->arch.mmu->direct_map)
-		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
 
 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
 
@@ -6639,11 +6736,8 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
 	return false;
 }
 
-int x86_emulate_instruction(struct kvm_vcpu *vcpu,
-			    unsigned long cr2,
-			    int emulation_type,
-			    void *insn,
-			    int insn_len)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+			    int emulation_type, void *insn, int insn_len)
 {
 	int r;
 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
@@ -6689,8 +6783,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 				kvm_queue_exception(vcpu, UD_VECTOR);
 				return 1;
 			}
-			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
-						emulation_type))
+			if (reexecute_instruction(vcpu, cr2_or_gpa,
+						  write_fault_to_spt,
+						  emulation_type))
 				return 1;
 			if (ctxt->have_exception) {
 				/*
@@ -6724,7 +6819,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 		return 1;
 	}
 
-	if (retry_instruction(ctxt, cr2, emulation_type))
+	if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
 		return 1;
 
 	/* this is needed for vmware backdoor interface to work since it
@@ -6736,7 +6831,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 
 restart:
 	/* Save the faulting GPA (cr2) in the address field */
-	ctxt->exception.address = cr2;
+	ctxt->exception.address = cr2_or_gpa;
 
 	r = x86_emulate_insn(ctxt);
 
@@ -6744,7 +6839,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 		return 1;
 
 	if (r == EMULATION_FAILED) {
-		if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+		if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
 					emulation_type))
 			return 1;
 
@@ -7357,8 +7452,8 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
 {
 	struct kvm_lapic_irq lapic_irq;
 
-	lapic_irq.shorthand = 0;
-	lapic_irq.dest_mode = 0;
+	lapic_irq.shorthand = APIC_DEST_NOSHORT;
+	lapic_irq.dest_mode = APIC_DEST_PHYSICAL;
 	lapic_irq.level = 0;
 	lapic_irq.dest_id = apicid;
 	lapic_irq.msi_redir_hint = false;
@@ -7367,18 +7462,22 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
 	kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
 }
 
-void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
+bool kvm_apicv_activated(struct kvm *kvm)
 {
-	if (!lapic_in_kernel(vcpu)) {
-		WARN_ON_ONCE(vcpu->arch.apicv_active);
-		return;
-	}
-	if (!vcpu->arch.apicv_active)
-		return;
-
-	vcpu->arch.apicv_active = false;
-	kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
+	return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
 }
+EXPORT_SYMBOL_GPL(kvm_apicv_activated);
+
+void kvm_apicv_init(struct kvm *kvm, bool enable)
+{
+	if (enable)
+		clear_bit(APICV_INHIBIT_REASON_DISABLE,
+			  &kvm->arch.apicv_inhibit_reasons);
+	else
+		set_bit(APICV_INHIBIT_REASON_DISABLE,
+			&kvm->arch.apicv_inhibit_reasons);
+}
+EXPORT_SYMBOL_GPL(kvm_apicv_init);
 
 static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
 {
@@ -7907,6 +8006,47 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
 	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
 }
 
+void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
+{
+	if (!lapic_in_kernel(vcpu))
+		return;
+
+	vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm);
+	kvm_apic_update_apicv(vcpu);
+	kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
+
+/*
+ * NOTE: Do not hold any lock prior to calling this.
+ *
+ * In particular, kvm_request_apicv_update() expects kvm->srcu not to be
+ * locked, because it calls __x86_set_memory_region() which does
+ * synchronize_srcu(&kvm->srcu).
+ */
+void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+{
+	if (!kvm_x86_ops->check_apicv_inhibit_reasons ||
+	    !kvm_x86_ops->check_apicv_inhibit_reasons(bit))
+		return;
+
+	if (activate) {
+		if (!test_and_clear_bit(bit, &kvm->arch.apicv_inhibit_reasons) ||
+		    !kvm_apicv_activated(kvm))
+			return;
+	} else {
+		if (test_and_set_bit(bit, &kvm->arch.apicv_inhibit_reasons) ||
+		    kvm_apicv_activated(kvm))
+			return;
+	}
+
+	trace_kvm_apicv_update_request(activate, bit);
+	if (kvm_x86_ops->pre_update_apicv_exec_ctrl)
+		kvm_x86_ops->pre_update_apicv_exec_ctrl(kvm, activate);
+	kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
+}
+EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
+
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
 	if (!kvm_apic_present(vcpu))
@@ -7997,6 +8137,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	bool req_int_win =
 		dm_request_for_irq_injection(vcpu) &&
 		kvm_cpu_accept_dm_intr(vcpu);
+	enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
 
 	bool req_immediate_exit = false;
 
@@ -8096,6 +8237,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		 */
 		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
 			kvm_hv_process_stimers(vcpu);
+		if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
+			kvm_vcpu_update_apicv(vcpu);
 	}
 
 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -8198,8 +8341,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	trace_kvm_entry(vcpu->vcpu_id);
 	guest_enter_irqoff();
 
-	/* The preempt notifier should have taken care of the FPU already.  */
-	WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD));
+	fpregs_assert_state_consistent();
+	if (test_thread_flag(TIF_NEED_FPU_LOAD))
+		switch_fpu_return();
 
 	if (unlikely(vcpu->arch.switch_db_regs)) {
 		set_debugreg(0, 7);
@@ -8243,7 +8387,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	smp_wmb();
 
-	kvm_x86_ops->handle_exit_irqoff(vcpu);
+	kvm_x86_ops->handle_exit_irqoff(vcpu, &exit_fastpath);
 
 	/*
 	 * Consume any pending interrupts, including the possible source of
@@ -8287,7 +8431,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		kvm_lapic_sync_from_vapic(vcpu);
 
 	vcpu->arch.gpa_available = false;
-	r = kvm_x86_ops->handle_exit(vcpu);
+	r = kvm_x86_ops->handle_exit(vcpu, exit_fastpath);
 	return r;
 
 cancel_injection:
@@ -8471,12 +8615,26 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+static void kvm_save_current_fpu(struct fpu *fpu)
+{
+	/*
+	 * If the target FPU state is not resident in the CPU registers, just
+	 * memcpy() from current, else save CPU state directly to the target.
+	 */
+	if (test_thread_flag(TIF_NEED_FPU_LOAD))
+		memcpy(&fpu->state, &current->thread.fpu.state,
+		       fpu_kernel_xstate_size);
+	else
+		copy_fpregs_to_fpstate(fpu);
+}
+
 /* Swap (qemu) user FPU context for the guest FPU context. */
 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
 	fpregs_lock();
 
-	copy_fpregs_to_fpstate(vcpu->arch.user_fpu);
+	kvm_save_current_fpu(vcpu->arch.user_fpu);
+
 	/* PKRU is separately restored in kvm_x86_ops->run.  */
 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
 				~XFEATURE_MASK_PKRU);
@@ -8492,7 +8650,8 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
 	fpregs_lock();
 
-	copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
+	kvm_save_current_fpu(vcpu->arch.guest_fpu);
+
 	copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
 
 	fpregs_mark_activate();
@@ -8714,6 +8873,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 				    struct kvm_mp_state *mp_state)
 {
 	vcpu_load(vcpu);
+	if (kvm_mpx_supported())
+		kvm_load_guest_fpu(vcpu);
 
 	kvm_apic_accept_events(vcpu);
 	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
@@ -8722,6 +8883,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 	else
 		mp_state->mp_state = vcpu->arch.mp_state;
 
+	if (kvm_mpx_supported())
+		kvm_put_guest_fpu(vcpu);
 	vcpu_put(vcpu);
 	return 0;
 }
@@ -9082,33 +9245,91 @@ static void fx_init(struct kvm_vcpu *vcpu)
 	vcpu->arch.cr0 |= X86_CR0_ET;
 }
 
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 {
-	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
-
-	kvmclock_reset(vcpu);
-
-	kvm_x86_ops->vcpu_free(vcpu);
-	free_cpumask_var(wbinvd_dirty_mask);
-}
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
-						unsigned int id)
-{
-	struct kvm_vcpu *vcpu;
-
 	if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
-		printk_once(KERN_WARNING
-		"kvm: SMP vm created on host with unstable TSC; "
-		"guest TSC will not be reliable\n");
+		pr_warn_once("kvm: SMP vm created on host with unstable TSC; "
+			     "guest TSC will not be reliable\n");
 
-	vcpu = kvm_x86_ops->vcpu_create(kvm, id);
-
-	return vcpu;
+	return 0;
 }
 
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 {
+	struct page *page;
+	int r;
+
+	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
+	if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
+		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+	else
+		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+
+	kvm_set_tsc_khz(vcpu, max_tsc_khz);
+
+	r = kvm_mmu_create(vcpu);
+	if (r < 0)
+		return r;
+
+	if (irqchip_in_kernel(vcpu->kvm)) {
+		r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
+		if (r < 0)
+			goto fail_mmu_destroy;
+		if (kvm_apicv_activated(vcpu->kvm))
+			vcpu->arch.apicv_active = true;
+	} else
+		static_key_slow_inc(&kvm_no_apic_vcpu);
+
+	r = -ENOMEM;
+
+	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!page)
+		goto fail_free_lapic;
+	vcpu->arch.pio_data = page_address(page);
+
+	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
+				       GFP_KERNEL_ACCOUNT);
+	if (!vcpu->arch.mce_banks)
+		goto fail_free_pio_data;
+	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
+
+	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
+				GFP_KERNEL_ACCOUNT))
+		goto fail_free_mce_banks;
+
+	vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
+						GFP_KERNEL_ACCOUNT);
+	if (!vcpu->arch.user_fpu) {
+		pr_err("kvm: failed to allocate userspace's fpu\n");
+		goto free_wbinvd_dirty_mask;
+	}
+
+	vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
+						 GFP_KERNEL_ACCOUNT);
+	if (!vcpu->arch.guest_fpu) {
+		pr_err("kvm: failed to allocate vcpu's fpu\n");
+		goto free_user_fpu;
+	}
+	fx_init(vcpu);
+
+	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+
+	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+
+	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
+
+	kvm_async_pf_hash_reset(vcpu);
+	kvm_pmu_init(vcpu);
+
+	vcpu->arch.pending_external_vector = -1;
+	vcpu->arch.preempted_in_kernel = false;
+
+	kvm_hv_vcpu_init(vcpu);
+
+	r = kvm_x86_ops->vcpu_create(vcpu);
+	if (r)
+		goto free_guest_fpu;
+
 	vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
 	vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
 	kvm_vcpu_mtrr_init(vcpu);
@@ -9117,6 +9338,22 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 	kvm_init_mmu(vcpu, false);
 	vcpu_put(vcpu);
 	return 0;
+
+free_guest_fpu:
+	kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
+free_user_fpu:
+	kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
+free_wbinvd_dirty_mask:
+	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
+fail_free_mce_banks:
+	kfree(vcpu->arch.mce_banks);
+fail_free_pio_data:
+	free_page((unsigned long)vcpu->arch.pio_data);
+fail_free_lapic:
+	kvm_free_lapic(vcpu);
+fail_mmu_destroy:
+	kvm_mmu_destroy(vcpu);
+	return r;
 }
 
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -9149,13 +9386,29 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
-	vcpu->arch.apf.msr_val = 0;
+	struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
+	int idx;
 
-	vcpu_load(vcpu);
-	kvm_mmu_unload(vcpu);
-	vcpu_put(vcpu);
+	kvm_release_pfn(cache->pfn, cache->dirty, cache);
+
+	kvmclock_reset(vcpu);
 
 	kvm_x86_ops->vcpu_free(vcpu);
+
+	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
+	kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
+	kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
+
+	kvm_hv_vcpu_uninit(vcpu);
+	kvm_pmu_destroy(vcpu);
+	kfree(vcpu->arch.mce_banks);
+	kvm_free_lapic(vcpu);
+	idx = srcu_read_lock(&vcpu->kvm->srcu);
+	kvm_mmu_destroy(vcpu);
+	srcu_read_unlock(&vcpu->kvm->srcu, idx);
+	free_page((unsigned long)vcpu->arch.pio_data);
+	if (!lapic_in_kernel(vcpu))
+		static_key_slow_dec(&kvm_no_apic_vcpu);
 }
 
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -9171,7 +9424,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 	vcpu->arch.nmi_injected = false;
 	kvm_clear_interrupt_queue(vcpu);
 	kvm_clear_exception_queue(vcpu);
-	vcpu->arch.exception.pending = false;
 
 	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
 	kvm_update_dr0123(vcpu);
@@ -9347,6 +9599,8 @@ int kvm_arch_hardware_setup(void)
 	if (r != 0)
 		return r;
 
+	cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);
+
 	if (kvm_has_tsc_control) {
 		/*
 		 * Make sure the user can only configure tsc_khz values that
@@ -9375,6 +9629,13 @@ void kvm_arch_hardware_unsetup(void)
 
 int kvm_arch_check_processor_compat(void)
 {
+	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+
+	WARN_ON(!irqs_disabled());
+
+	if (kvm_host_cr4_reserved_bits(c) != cr4_reserved_bits)
+		return -EIO;
+
 	return kvm_x86_ops->check_processor_compatibility();
 }
 
@@ -9392,98 +9653,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 struct static_key kvm_no_apic_vcpu __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
 
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-	struct page *page;
-	int r;
-
-	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
-	if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
-		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-	else
-		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
-
-	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (!page) {
-		r = -ENOMEM;
-		goto fail;
-	}
-	vcpu->arch.pio_data = page_address(page);
-
-	kvm_set_tsc_khz(vcpu, max_tsc_khz);
-
-	r = kvm_mmu_create(vcpu);
-	if (r < 0)
-		goto fail_free_pio_data;
-
-	if (irqchip_in_kernel(vcpu->kvm)) {
-		vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm);
-		r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
-		if (r < 0)
-			goto fail_mmu_destroy;
-	} else
-		static_key_slow_inc(&kvm_no_apic_vcpu);
-
-	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
-				       GFP_KERNEL_ACCOUNT);
-	if (!vcpu->arch.mce_banks) {
-		r = -ENOMEM;
-		goto fail_free_lapic;
-	}
-	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
-
-	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
-				GFP_KERNEL_ACCOUNT)) {
-		r = -ENOMEM;
-		goto fail_free_mce_banks;
-	}
-
-	fx_init(vcpu);
-
-	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
-
-	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
-
-	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
-
-	kvm_async_pf_hash_reset(vcpu);
-	kvm_pmu_init(vcpu);
-
-	vcpu->arch.pending_external_vector = -1;
-	vcpu->arch.preempted_in_kernel = false;
-
-	kvm_hv_vcpu_init(vcpu);
-
-	return 0;
-
-fail_free_mce_banks:
-	kfree(vcpu->arch.mce_banks);
-fail_free_lapic:
-	kvm_free_lapic(vcpu);
-fail_mmu_destroy:
-	kvm_mmu_destroy(vcpu);
-fail_free_pio_data:
-	free_page((unsigned long)vcpu->arch.pio_data);
-fail:
-	return r;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-	int idx;
-
-	kvm_hv_vcpu_uninit(vcpu);
-	kvm_pmu_destroy(vcpu);
-	kfree(vcpu->arch.mce_banks);
-	kvm_free_lapic(vcpu);
-	idx = srcu_read_lock(&vcpu->kvm->srcu);
-	kvm_mmu_destroy(vcpu);
-	srcu_read_unlock(&vcpu->kvm->srcu, idx);
-	free_page((unsigned long)vcpu->arch.pio_data);
-	if (!lapic_in_kernel(vcpu))
-		static_key_slow_dec(&kvm_no_apic_vcpu);
-}
-
 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -9518,7 +9687,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	mutex_init(&kvm->arch.apic_map_lock);
 	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
 
-	kvm->arch.kvmclock_offset = -ktime_get_boottime_ns();
+	kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
 	pvclock_update_vm_gtod_copy(kvm);
 
 	kvm->arch.guest_can_read_msr_platform_info = true;
@@ -9558,7 +9727,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
 		kvm_unload_vcpu_mmu(vcpu);
 	}
 	kvm_for_each_vcpu(i, vcpu, kvm)
-		kvm_arch_vcpu_free(vcpu);
+		kvm_vcpu_destroy(vcpu);
 
 	mutex_lock(&kvm->lock);
 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
@@ -9627,18 +9796,6 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 }
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 
-int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
-{
-	int r;
-
-	mutex_lock(&kvm->slots_lock);
-	r = __x86_set_memory_region(kvm, id, gpa, size);
-	mutex_unlock(&kvm->slots_lock);
-
-	return r;
-}
-EXPORT_SYMBOL_GPL(x86_set_memory_region);
-
 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
 {
 	kvm_mmu_pre_destroy_vm(kvm);
@@ -9652,9 +9809,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 		 * unless the the memory map has changed due to process exit
 		 * or fd copying.
 		 */
-		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
-		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
-		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
+		mutex_lock(&kvm->slots_lock);
+		__x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+					0, 0);
+		__x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+					0, 0);
+		__x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
+		mutex_unlock(&kvm->slots_lock);
 	}
 	if (kvm_x86_ops->vm_destroy)
 		kvm_x86_ops->vm_destroy(kvm);
@@ -9758,11 +9919,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 
 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 {
+	struct kvm_vcpu *vcpu;
+	int i;
+
 	/*
 	 * memslots->generation has been incremented.
 	 * mmio generation may have reached its maximum value.
 	 */
 	kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+
+	/* Force re-initialization of steal_time cache */
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvm_vcpu_kick(vcpu);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -9792,7 +9960,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
 	 *
 	 * The reason is, in case of PML, we need to set D-bit for any slots
 	 * with dirty logging disabled in order to eliminate unnecessary GPA
-	 * logging in PML buffer (and potential PML buffer full VMEXT). This
+	 * logging in PML buffer (and potential PML buffer full VMEXIT). This
 	 * guarantees leaving PML enabled during guest's lifetime won't have
 	 * any additional overhead from PML when guest is running with dirty
 	 * logging disabled for memory slots.
@@ -10014,7 +10182,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
 	      work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
 		return;
 
-	vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true);
+	vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true);
 }
 
 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
@@ -10127,7 +10295,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
 {
 	struct x86_exception fault;
 
-	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
+	trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
 
 	if (kvm_can_deliver_async_pf(vcpu) &&
@@ -10162,7 +10330,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
 		work->arch.token = ~0; /* broadcast wakeup */
 	else
 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
-	trace_kvm_async_pf_ready(work->arch.token, work->gva);
+	trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
 
 	if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
 	    !apf_get_user(vcpu, &val)) {
@@ -10284,7 +10452,6 @@ bool kvm_vector_hashing_enabled(void)
 {
 	return vector_hashing;
 }
-EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
 
 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
 {
@@ -10292,6 +10459,28 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
 
+u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
+{
+	uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
+
+	/* The STIBP bit doesn't fault even if it's not advertised */
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+	    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+		bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+	if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
+	    !boot_cpu_has(X86_FEATURE_AMD_IBRS))
+		bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
+	    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+		bits &= ~SPEC_CTRL_SSBD;
+	if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+	    !boot_cpu_has(X86_FEATURE_AMD_SSBD))
+		bits &= ~SPEC_CTRL_SSBD;
+
+	return bits;
+}
+EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
@@ -10313,3 +10502,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 29391af..3624665 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -144,11 +144,6 @@ static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
 }
 
-static inline u32 bit(int bitno)
-{
-	return 1 << (bitno & 31);
-}
-
 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
 {
 	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
@@ -166,21 +161,13 @@ static inline u64 get_canonical(u64 la, u8 vaddr_bits)
 
 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
 {
-#ifdef CONFIG_X86_64
 	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
-#else
-	return false;
-#endif
 }
 
 static inline bool emul_is_noncanonical_address(u64 la,
 						struct x86_emulate_ctxt *ctxt)
 {
-#ifdef CONFIG_X86_64
 	return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
-#else
-	return false;
-#endif
 }
 
 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
@@ -289,8 +276,9 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
 					  int page_num);
 bool kvm_vector_hashing_enabled(void);
-int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 			    int emulation_type, void *insn, int insn_len);
+enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
 
 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
 				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
@@ -369,7 +357,14 @@ static inline bool kvm_pat_valid(u64 data)
 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
 }
 
+static inline bool kvm_dr7_valid(u64 data)
+{
+	/* Bits [63:32] are reserved */
+	return !(data >> 32);
+}
+
 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
+u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
 
 #endif
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 8908c58..53adc17 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -929,7 +929,7 @@
 
 GrpTable: Grp3_2
 0: TEST Ev,Iz
-1:
+1: TEST Ev,Iz
 2: NOT Ev
 3: NEG Ev
 4: MUL rAX,Ev
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 345848f..98f7c6f 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -28,8 +28,8 @@
 obj-$(CONFIG_X86_32)		+= pgtable_32.o iomap_32.o
 
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
-obj-$(CONFIG_X86_PTDUMP_CORE)	+= dump_pagetables.o
-obj-$(CONFIG_X86_PTDUMP)	+= debug_pagetables.o
+obj-$(CONFIG_PTDUMP_CORE)	+= dump_pagetables.o
+obj-$(CONFIG_PTDUMP_DEBUGFS)	+= debug_pagetables.o
 
 obj-$(CONFIG_HIGHMEM)		+= highmem_32.o
 
@@ -45,7 +45,6 @@
 obj-$(CONFIG_ACPI_NUMA)		+= srat.o
 obj-$(CONFIG_NUMA_EMU)		+= numa_emulation.o
 
-obj-$(CONFIG_X86_INTEL_MPX)			+= mpx.o
 obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)	+= pkeys.o
 obj-$(CONFIG_RANDOMIZE_MEMORY)			+= kaslr.o
 obj-$(CONFIG_PAGE_TABLE_ISOLATION)		+= pti.o
diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c
index 39001a4..4a3b62f 100644
--- a/arch/x86/mm/debug_pagetables.c
+++ b/arch/x86/mm/debug_pagetables.c
@@ -7,7 +7,7 @@
 
 static int ptdump_show(struct seq_file *m, void *v)
 {
-	ptdump_walk_pgd_level_debugfs(m, NULL, false);
+	ptdump_walk_pgd_level_debugfs(m, &init_mm, false);
 	return 0;
 }
 
@@ -15,11 +15,8 @@ DEFINE_SHOW_ATTRIBUTE(ptdump);
 
 static int ptdump_curknl_show(struct seq_file *m, void *v)
 {
-	if (current->mm->pgd) {
-		down_read(&current->mm->mmap_sem);
-		ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false);
-		up_read(&current->mm->mmap_sem);
-	}
+	if (current->mm->pgd)
+		ptdump_walk_pgd_level_debugfs(m, current->mm, false);
 	return 0;
 }
 
@@ -28,11 +25,8 @@ DEFINE_SHOW_ATTRIBUTE(ptdump_curknl);
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
 static int ptdump_curusr_show(struct seq_file *m, void *v)
 {
-	if (current->mm->pgd) {
-		down_read(&current->mm->mmap_sem);
-		ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true);
-		up_read(&current->mm->mmap_sem);
-	}
+	if (current->mm->pgd)
+		ptdump_walk_pgd_level_debugfs(m, current->mm, true);
 	return 0;
 }
 
@@ -43,7 +37,7 @@ DEFINE_SHOW_ATTRIBUTE(ptdump_curusr);
 static int ptdump_efi_show(struct seq_file *m, void *v)
 {
 	if (efi_mm.pgd)
-		ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false);
+		ptdump_walk_pgd_level_debugfs(m, &efi_mm, false);
 	return 0;
 }
 
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index ab67822..64229da 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -16,6 +16,7 @@
 #include <linux/seq_file.h>
 #include <linux/highmem.h>
 #include <linux/pci.h>
+#include <linux/ptdump.h>
 
 #include <asm/e820/types.h>
 #include <asm/pgtable.h>
@@ -26,16 +27,18 @@
  * when a "break" in the continuity is found.
  */
 struct pg_state {
+	struct ptdump_state ptdump;
 	int level;
-	pgprot_t current_prot;
+	pgprotval_t current_prot;
 	pgprotval_t effective_prot;
+	pgprotval_t prot_levels[5];
 	unsigned long start_address;
-	unsigned long current_address;
 	const struct addr_marker *marker;
 	unsigned long lines;
 	bool to_dmesg;
 	bool check_wx;
 	unsigned long wx_pages;
+	struct seq_file *seq;
 };
 
 struct addr_marker {
@@ -174,11 +177,10 @@ static struct addr_marker address_markers[] = {
 /*
  * Print a readable form of a pgprot_t to the seq_file
  */
-static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
+static void printk_prot(struct seq_file *m, pgprotval_t pr, int level, bool dmsg)
 {
-	pgprotval_t pr = pgprot_val(prot);
 	static const char * const level_name[] =
-		{ "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
+		{ "pgd", "p4d", "pud", "pmd", "pte" };
 
 	if (!(pr & _PAGE_PRESENT)) {
 		/* Not present */
@@ -202,12 +204,12 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
 			pt_dump_cont_printf(m, dmsg, "    ");
 
 		/* Bit 7 has a different meaning on level 3 vs 4 */
-		if (level <= 4 && pr & _PAGE_PSE)
+		if (level <= 3 && pr & _PAGE_PSE)
 			pt_dump_cont_printf(m, dmsg, "PSE ");
 		else
 			pt_dump_cont_printf(m, dmsg, "    ");
-		if ((level == 5 && pr & _PAGE_PAT) ||
-		    ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
+		if ((level == 4 && pr & _PAGE_PAT) ||
+		    ((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE))
 			pt_dump_cont_printf(m, dmsg, "PAT ");
 		else
 			pt_dump_cont_printf(m, dmsg, "    ");
@@ -223,24 +225,11 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
 	pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
 }
 
-/*
- * On 64 bits, sign-extend the 48 bit address to 64 bit
- */
-static unsigned long normalize_addr(unsigned long u)
-{
-	int shift;
-	if (!IS_ENABLED(CONFIG_X86_64))
-		return u;
-
-	shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
-	return (signed long)(u << shift) >> shift;
-}
-
-static void note_wx(struct pg_state *st)
+static void note_wx(struct pg_state *st, unsigned long addr)
 {
 	unsigned long npages;
 
-	npages = (st->current_address - st->start_address) / PAGE_SIZE;
+	npages = (addr - st->start_address) / PAGE_SIZE;
 
 #ifdef CONFIG_PCI_BIOS
 	/*
@@ -248,7 +237,7 @@ static void note_wx(struct pg_state *st)
 	 * Inform about it, but avoid the warning.
 	 */
 	if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
-	    st->current_address <= PAGE_OFFSET + BIOS_END) {
+	    addr <= PAGE_OFFSET + BIOS_END) {
 		pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
 		return;
 	}
@@ -260,27 +249,47 @@ static void note_wx(struct pg_state *st)
 		  (void *)st->start_address);
 }
 
+static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
+{
+	return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
+	       ((prot1 | prot2) & _PAGE_NX);
+}
+
 /*
  * This function gets called on a break in a continuous series
  * of PTE entries; the next one is different so we need to
  * print what we collected so far.
  */
-static void note_page(struct seq_file *m, struct pg_state *st,
-		      pgprot_t new_prot, pgprotval_t new_eff, int level)
+static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
+		      unsigned long val)
 {
-	pgprotval_t prot, cur, eff;
+	struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
+	pgprotval_t new_prot, new_eff;
+	pgprotval_t cur, eff;
 	static const char units[] = "BKMGTPE";
+	struct seq_file *m = st->seq;
+
+	new_prot = val & PTE_FLAGS_MASK;
+
+	if (level > 0) {
+		new_eff = effective_prot(st->prot_levels[level - 1],
+					 new_prot);
+	} else {
+		new_eff = new_prot;
+	}
+
+	if (level >= 0)
+		st->prot_levels[level] = new_eff;
 
 	/*
 	 * If we have a "break" in the series, we need to flush the state that
 	 * we have now. "break" is either changing perms, levels or
 	 * address space marker.
 	 */
-	prot = pgprot_val(new_prot);
-	cur = pgprot_val(st->current_prot);
+	cur = st->current_prot;
 	eff = st->effective_prot;
 
-	if (!st->level) {
+	if (st->level == -1) {
 		/* First entry */
 		st->current_prot = new_prot;
 		st->effective_prot = new_eff;
@@ -289,14 +298,14 @@ static void note_page(struct seq_file *m, struct pg_state *st,
 		st->lines = 0;
 		pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
 				   st->marker->name);
-	} else if (prot != cur || new_eff != eff || level != st->level ||
-		   st->current_address >= st->marker[1].start_address) {
+	} else if (new_prot != cur || new_eff != eff || level != st->level ||
+		   addr >= st->marker[1].start_address) {
 		const char *unit = units;
 		unsigned long delta;
 		int width = sizeof(unsigned long) * 2;
 
 		if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
-			note_wx(st);
+			note_wx(st, addr);
 
 		/*
 		 * Now print the actual finished series
@@ -306,9 +315,9 @@ static void note_page(struct seq_file *m, struct pg_state *st,
 			pt_dump_seq_printf(m, st->to_dmesg,
 					   "0x%0*lx-0x%0*lx   ",
 					   width, st->start_address,
-					   width, st->current_address);
+					   width, addr);
 
-			delta = st->current_address - st->start_address;
+			delta = addr - st->start_address;
 			while (!(delta & 1023) && unit[1]) {
 				delta >>= 10;
 				unit++;
@@ -325,7 +334,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
 		 * such as the start of vmalloc space etc.
 		 * This helps in the interpretation.
 		 */
-		if (st->current_address >= st->marker[1].start_address) {
+		if (addr >= st->marker[1].start_address) {
 			if (st->marker->max_lines &&
 			    st->lines > st->marker->max_lines) {
 				unsigned long nskip =
@@ -341,222 +350,45 @@ static void note_page(struct seq_file *m, struct pg_state *st,
 					   st->marker->name);
 		}
 
-		st->start_address = st->current_address;
+		st->start_address = addr;
 		st->current_prot = new_prot;
 		st->effective_prot = new_eff;
 		st->level = level;
 	}
 }
 
-static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
-{
-	return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
-	       ((prot1 | prot2) & _PAGE_NX);
-}
-
-static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
-			   pgprotval_t eff_in, unsigned long P)
-{
-	int i;
-	pte_t *pte;
-	pgprotval_t prot, eff;
-
-	for (i = 0; i < PTRS_PER_PTE; i++) {
-		st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
-		pte = pte_offset_map(&addr, st->current_address);
-		prot = pte_flags(*pte);
-		eff = effective_prot(eff_in, prot);
-		note_page(m, st, __pgprot(prot), eff, 5);
-		pte_unmap(pte);
-	}
-}
-#ifdef CONFIG_KASAN
-
-/*
- * This is an optimization for KASAN=y case. Since all kasan page tables
- * eventually point to the kasan_early_shadow_page we could call note_page()
- * right away without walking through lower level page tables. This saves
- * us dozens of seconds (minutes for 5-level config) while checking for
- * W+X mapping or reading kernel_page_tables debugfs file.
- */
-static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
-				void *pt)
-{
-	if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
-	    (pgtable_l5_enabled() &&
-			__pa(pt) == __pa(kasan_early_shadow_p4d)) ||
-	    __pa(pt) == __pa(kasan_early_shadow_pud)) {
-		pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
-		note_page(m, st, __pgprot(prot), 0, 5);
-		return true;
-	}
-	return false;
-}
-#else
-static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
-				void *pt)
-{
-	return false;
-}
-#endif
-
-#if PTRS_PER_PMD > 1
-
-static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
-			   pgprotval_t eff_in, unsigned long P)
-{
-	int i;
-	pmd_t *start, *pmd_start;
-	pgprotval_t prot, eff;
-
-	pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
-	for (i = 0; i < PTRS_PER_PMD; i++) {
-		st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
-		if (!pmd_none(*start)) {
-			prot = pmd_flags(*start);
-			eff = effective_prot(eff_in, prot);
-			if (pmd_large(*start) || !pmd_present(*start)) {
-				note_page(m, st, __pgprot(prot), eff, 4);
-			} else if (!kasan_page_table(m, st, pmd_start)) {
-				walk_pte_level(m, st, *start, eff,
-					       P + i * PMD_LEVEL_MULT);
-			}
-		} else
-			note_page(m, st, __pgprot(0), 0, 4);
-		start++;
-	}
-}
-
-#else
-#define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
-#define pud_large(a) pmd_large(__pmd(pud_val(a)))
-#define pud_none(a)  pmd_none(__pmd(pud_val(a)))
-#endif
-
-#if PTRS_PER_PUD > 1
-
-static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
-			   pgprotval_t eff_in, unsigned long P)
-{
-	int i;
-	pud_t *start, *pud_start;
-	pgprotval_t prot, eff;
-
-	pud_start = start = (pud_t *)p4d_page_vaddr(addr);
-
-	for (i = 0; i < PTRS_PER_PUD; i++) {
-		st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
-		if (!pud_none(*start)) {
-			prot = pud_flags(*start);
-			eff = effective_prot(eff_in, prot);
-			if (pud_large(*start) || !pud_present(*start)) {
-				note_page(m, st, __pgprot(prot), eff, 3);
-			} else if (!kasan_page_table(m, st, pud_start)) {
-				walk_pmd_level(m, st, *start, eff,
-					       P + i * PUD_LEVEL_MULT);
-			}
-		} else
-			note_page(m, st, __pgprot(0), 0, 3);
-
-		start++;
-	}
-}
-
-#else
-#define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
-#define p4d_large(a) pud_large(__pud(p4d_val(a)))
-#define p4d_none(a)  pud_none(__pud(p4d_val(a)))
-#endif
-
-static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
-			   pgprotval_t eff_in, unsigned long P)
-{
-	int i;
-	p4d_t *start, *p4d_start;
-	pgprotval_t prot, eff;
-
-	if (PTRS_PER_P4D == 1)
-		return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
-
-	p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
-
-	for (i = 0; i < PTRS_PER_P4D; i++) {
-		st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
-		if (!p4d_none(*start)) {
-			prot = p4d_flags(*start);
-			eff = effective_prot(eff_in, prot);
-			if (p4d_large(*start) || !p4d_present(*start)) {
-				note_page(m, st, __pgprot(prot), eff, 2);
-			} else if (!kasan_page_table(m, st, p4d_start)) {
-				walk_pud_level(m, st, *start, eff,
-					       P + i * P4D_LEVEL_MULT);
-			}
-		} else
-			note_page(m, st, __pgprot(0), 0, 2);
-
-		start++;
-	}
-}
-
-#define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
-#define pgd_none(a)  (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
-
-static inline bool is_hypervisor_range(int idx)
-{
-#ifdef CONFIG_X86_64
-	/*
-	 * A hole in the beginning of kernel address space reserved
-	 * for a hypervisor.
-	 */
-	return	(idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
-		(idx <  pgd_index(GUARD_HOLE_END_ADDR));
-#else
-	return false;
-#endif
-}
-
-static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
+static void ptdump_walk_pgd_level_core(struct seq_file *m,
+				       struct mm_struct *mm, pgd_t *pgd,
 				       bool checkwx, bool dmesg)
 {
-	pgd_t *start = INIT_PGD;
-	pgprotval_t prot, eff;
-	int i;
-	struct pg_state st = {};
+	const struct ptdump_range ptdump_ranges[] = {
+#ifdef CONFIG_X86_64
 
-	if (pgd) {
-		start = pgd;
-		st.to_dmesg = dmesg;
-	}
+#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1))
+#define normalize_addr(u) ((signed long)((u) << normalize_addr_shift) >> \
+			   normalize_addr_shift)
 
-	st.check_wx = checkwx;
-	if (checkwx)
-		st.wx_pages = 0;
-
-	for (i = 0; i < PTRS_PER_PGD; i++) {
-		st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
-		if (!pgd_none(*start) && !is_hypervisor_range(i)) {
-			prot = pgd_flags(*start);
-#ifdef CONFIG_X86_PAE
-			eff = _PAGE_USER | _PAGE_RW;
+	{0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
+	{normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
 #else
-			eff = prot;
+	{0, ~0UL},
 #endif
-			if (pgd_large(*start) || !pgd_present(*start)) {
-				note_page(m, &st, __pgprot(prot), eff, 1);
-			} else {
-				walk_p4d_level(m, &st, *start, eff,
-					       i * PGD_LEVEL_MULT);
-			}
-		} else
-			note_page(m, &st, __pgprot(0), 0, 1);
+	{0, 0}
+};
 
-		cond_resched();
-		start++;
-	}
+	struct pg_state st = {
+		.ptdump = {
+			.note_page	= note_page,
+			.range		= ptdump_ranges
+		},
+		.level = -1,
+		.to_dmesg	= dmesg,
+		.check_wx	= checkwx,
+		.seq		= m
+	};
 
-	/* Flush out the last page */
-	st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
-	note_page(m, &st, __pgprot(0), 0, 0);
+	ptdump_walk_pgd(&st.ptdump, mm, pgd);
+
 	if (!checkwx)
 		return;
 	if (st.wx_pages)
@@ -566,18 +398,20 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
 		pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
 }
 
-void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
+void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm)
 {
-	ptdump_walk_pgd_level_core(m, pgd, false, true);
+	ptdump_walk_pgd_level_core(m, mm, mm->pgd, false, true);
 }
 
-void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
+void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
+				   bool user)
 {
+	pgd_t *pgd = mm->pgd;
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
 	if (user && boot_cpu_has(X86_FEATURE_PTI))
 		pgd = kernel_to_user_pgdp(pgd);
 #endif
-	ptdump_walk_pgd_level_core(m, pgd, false, false);
+	ptdump_walk_pgd_level_core(m, mm, pgd, false, false);
 }
 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
 
@@ -592,13 +426,13 @@ void ptdump_walk_user_pgd_level_checkwx(void)
 
 	pr_info("x86/mm: Checking user space page tables\n");
 	pgd = kernel_to_user_pgdp(pgd);
-	ptdump_walk_pgd_level_core(NULL, pgd, true, false);
+	ptdump_walk_pgd_level_core(NULL, &init_mm, pgd, true, false);
 #endif
 }
 
 void ptdump_walk_pgd_level_checkwx(void)
 {
-	ptdump_walk_pgd_level_core(NULL, NULL, true, false);
+	ptdump_walk_pgd_level_core(NULL, &init_mm, INIT_PGD, true, false);
 }
 
 static int __init pt_dump_init(void)
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index fab0953..5bfd5ae 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -19,7 +19,6 @@
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
 #include <asm/elf.h>
-#include <asm/mpx.h>
 
 #if 0	/* This is just for testing */
 struct page *
@@ -151,10 +150,6 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 	if (len & ~huge_page_mask(h))
 		return -EINVAL;
 
-	addr = mpx_unmapped_area_check(addr, len, flags);
-	if (IS_ERR_VALUE(addr))
-		return addr;
-
 	if (len > TASK_SIZE)
 		return -ENOMEM;
 
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index aae9a93..cb91ecc 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -163,8 +163,6 @@ unsigned long get_mmap_base(int is_legacy)
 
 const char *arch_vma_name(struct vm_area_struct *vma)
 {
-	if (vma->vm_flags & VM_MPX)
-		return "[mpx]";
 	return NULL;
 }
 
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
deleted file mode 100644
index 895fb7a..0000000
--- a/arch/x86/mm/mpx.c
+++ /dev/null
@@ -1,938 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * mpx.c - Memory Protection eXtensions
- *
- * Copyright (c) 2014, Intel Corporation.
- * Qiaowei Ren <qiaowei.ren@intel.com>
- * Dave Hansen <dave.hansen@intel.com>
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm_types.h>
-#include <linux/mman.h>
-#include <linux/syscalls.h>
-#include <linux/sched/sysctl.h>
-
-#include <asm/insn.h>
-#include <asm/insn-eval.h>
-#include <asm/mmu_context.h>
-#include <asm/mpx.h>
-#include <asm/processor.h>
-#include <asm/fpu/internal.h>
-
-#define CREATE_TRACE_POINTS
-#include <asm/trace/mpx.h>
-
-static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
-{
-	if (is_64bit_mm(mm))
-		return MPX_BD_SIZE_BYTES_64;
-	else
-		return MPX_BD_SIZE_BYTES_32;
-}
-
-static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
-{
-	if (is_64bit_mm(mm))
-		return MPX_BT_SIZE_BYTES_64;
-	else
-		return MPX_BT_SIZE_BYTES_32;
-}
-
-/*
- * This is really a simplified "vm_mmap". it only handles MPX
- * bounds tables (the bounds directory is user-allocated).
- */
-static unsigned long mpx_mmap(unsigned long len)
-{
-	struct mm_struct *mm = current->mm;
-	unsigned long addr, populate;
-
-	/* Only bounds table can be allocated here */
-	if (len != mpx_bt_size_bytes(mm))
-		return -EINVAL;
-
-	down_write(&mm->mmap_sem);
-	addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
-		       MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate, NULL);
-	up_write(&mm->mmap_sem);
-	if (populate)
-		mm_populate(addr, populate);
-
-	return addr;
-}
-
-static int mpx_insn_decode(struct insn *insn,
-			   struct pt_regs *regs)
-{
-	unsigned char buf[MAX_INSN_SIZE];
-	int x86_64 = !test_thread_flag(TIF_IA32);
-	int not_copied;
-	int nr_copied;
-
-	not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf));
-	nr_copied = sizeof(buf) - not_copied;
-	/*
-	 * The decoder _should_ fail nicely if we pass it a short buffer.
-	 * But, let's not depend on that implementation detail.  If we
-	 * did not get anything, just error out now.
-	 */
-	if (!nr_copied)
-		return -EFAULT;
-	insn_init(insn, buf, nr_copied, x86_64);
-	insn_get_length(insn);
-	/*
-	 * copy_from_user() tries to get as many bytes as we could see in
-	 * the largest possible instruction.  If the instruction we are
-	 * after is shorter than that _and_ we attempt to copy from
-	 * something unreadable, we might get a short read.  This is OK
-	 * as long as the read did not stop in the middle of the
-	 * instruction.  Check to see if we got a partial instruction.
-	 */
-	if (nr_copied < insn->length)
-		return -EFAULT;
-
-	insn_get_opcode(insn);
-	/*
-	 * We only _really_ need to decode bndcl/bndcn/bndcu
-	 * Error out on anything else.
-	 */
-	if (insn->opcode.bytes[0] != 0x0f)
-		goto bad_opcode;
-	if ((insn->opcode.bytes[1] != 0x1a) &&
-	    (insn->opcode.bytes[1] != 0x1b))
-		goto bad_opcode;
-
-	return 0;
-bad_opcode:
-	return -EINVAL;
-}
-
-/*
- * If a bounds overflow occurs then a #BR is generated. This
- * function decodes MPX instructions to get violation address
- * and set this address into extended struct siginfo.
- *
- * Note that this is not a super precise way of doing this.
- * Userspace could have, by the time we get here, written
- * anything it wants in to the instructions.  We can not
- * trust anything about it.  They might not be valid
- * instructions or might encode invalid registers, etc...
- */
-int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
-{
-	const struct mpx_bndreg_state *bndregs;
-	const struct mpx_bndreg *bndreg;
-	struct insn insn;
-	uint8_t bndregno;
-	int err;
-
-	err = mpx_insn_decode(&insn, regs);
-	if (err)
-		goto err_out;
-
-	/*
-	 * We know at this point that we are only dealing with
-	 * MPX instructions.
-	 */
-	insn_get_modrm(&insn);
-	bndregno = X86_MODRM_REG(insn.modrm.value);
-	if (bndregno > 3) {
-		err = -EINVAL;
-		goto err_out;
-	}
-	/* get bndregs field from current task's xsave area */
-	bndregs = get_xsave_field_ptr(XFEATURE_BNDREGS);
-	if (!bndregs) {
-		err = -EINVAL;
-		goto err_out;
-	}
-	/* now go select the individual register in the set of 4 */
-	bndreg = &bndregs->bndreg[bndregno];
-
-	/*
-	 * The registers are always 64-bit, but the upper 32
-	 * bits are ignored in 32-bit mode.  Also, note that the
-	 * upper bounds are architecturally represented in 1's
-	 * complement form.
-	 *
-	 * The 'unsigned long' cast is because the compiler
-	 * complains when casting from integers to different-size
-	 * pointers.
-	 */
-	info->lower = (void __user *)(unsigned long)bndreg->lower_bound;
-	info->upper = (void __user *)(unsigned long)~bndreg->upper_bound;
-	info->addr  = insn_get_addr_ref(&insn, regs);
-
-	/*
-	 * We were not able to extract an address from the instruction,
-	 * probably because there was something invalid in it.
-	 */
-	if (info->addr == (void __user *)-1) {
-		err = -EINVAL;
-		goto err_out;
-	}
-	trace_mpx_bounds_register_exception(info->addr, bndreg);
-	return 0;
-err_out:
-	/* info might be NULL, but kfree() handles that */
-	return err;
-}
-
-static __user void *mpx_get_bounds_dir(void)
-{
-	const struct mpx_bndcsr *bndcsr;
-
-	if (!cpu_feature_enabled(X86_FEATURE_MPX))
-		return MPX_INVALID_BOUNDS_DIR;
-
-	/*
-	 * The bounds directory pointer is stored in a register
-	 * only accessible if we first do an xsave.
-	 */
-	bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
-	if (!bndcsr)
-		return MPX_INVALID_BOUNDS_DIR;
-
-	/*
-	 * Make sure the register looks valid by checking the
-	 * enable bit.
-	 */
-	if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG))
-		return MPX_INVALID_BOUNDS_DIR;
-
-	/*
-	 * Lastly, mask off the low bits used for configuration
-	 * flags, and return the address of the bounds table.
-	 */
-	return (void __user *)(unsigned long)
-		(bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
-}
-
-int mpx_enable_management(void)
-{
-	void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
-	struct mm_struct *mm = current->mm;
-	int ret = 0;
-
-	/*
-	 * runtime in the userspace will be responsible for allocation of
-	 * the bounds directory. Then, it will save the base of the bounds
-	 * directory into XSAVE/XRSTOR Save Area and enable MPX through
-	 * XRSTOR instruction.
-	 *
-	 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
-	 * expected to be relatively expensive. Storing the bounds
-	 * directory here means that we do not have to do xsave in the
-	 * unmap path; we can just use mm->context.bd_addr instead.
-	 */
-	bd_base = mpx_get_bounds_dir();
-	down_write(&mm->mmap_sem);
-
-	/* MPX doesn't support addresses above 47 bits yet. */
-	if (find_vma(mm, DEFAULT_MAP_WINDOW)) {
-		pr_warn_once("%s (%d): MPX cannot handle addresses "
-				"above 47-bits. Disabling.",
-				current->comm, current->pid);
-		ret = -ENXIO;
-		goto out;
-	}
-	mm->context.bd_addr = bd_base;
-	if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR)
-		ret = -ENXIO;
-out:
-	up_write(&mm->mmap_sem);
-	return ret;
-}
-
-int mpx_disable_management(void)
-{
-	struct mm_struct *mm = current->mm;
-
-	if (!cpu_feature_enabled(X86_FEATURE_MPX))
-		return -ENXIO;
-
-	down_write(&mm->mmap_sem);
-	mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
-	up_write(&mm->mmap_sem);
-	return 0;
-}
-
-static int mpx_cmpxchg_bd_entry(struct mm_struct *mm,
-		unsigned long *curval,
-		unsigned long __user *addr,
-		unsigned long old_val, unsigned long new_val)
-{
-	int ret;
-	/*
-	 * user_atomic_cmpxchg_inatomic() actually uses sizeof()
-	 * the pointer that we pass to it to figure out how much
-	 * data to cmpxchg.  We have to be careful here not to
-	 * pass a pointer to a 64-bit data type when we only want
-	 * a 32-bit copy.
-	 */
-	if (is_64bit_mm(mm)) {
-		ret = user_atomic_cmpxchg_inatomic(curval,
-				addr, old_val, new_val);
-	} else {
-		u32 uninitialized_var(curval_32);
-		u32 old_val_32 = old_val;
-		u32 new_val_32 = new_val;
-		u32 __user *addr_32 = (u32 __user *)addr;
-
-		ret = user_atomic_cmpxchg_inatomic(&curval_32,
-				addr_32, old_val_32, new_val_32);
-		*curval = curval_32;
-	}
-	return ret;
-}
-
-/*
- * With 32-bit mode, a bounds directory is 4MB, and the size of each
- * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB,
- * and the size of each bounds table is 4MB.
- */
-static int allocate_bt(struct mm_struct *mm, long __user *bd_entry)
-{
-	unsigned long expected_old_val = 0;
-	unsigned long actual_old_val = 0;
-	unsigned long bt_addr;
-	unsigned long bd_new_entry;
-	int ret = 0;
-
-	/*
-	 * Carve the virtual space out of userspace for the new
-	 * bounds table:
-	 */
-	bt_addr = mpx_mmap(mpx_bt_size_bytes(mm));
-	if (IS_ERR((void *)bt_addr))
-		return PTR_ERR((void *)bt_addr);
-	/*
-	 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
-	 */
-	bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
-
-	/*
-	 * Go poke the address of the new bounds table in to the
-	 * bounds directory entry out in userspace memory.  Note:
-	 * we may race with another CPU instantiating the same table.
-	 * In that case the cmpxchg will see an unexpected
-	 * 'actual_old_val'.
-	 *
-	 * This can fault, but that's OK because we do not hold
-	 * mmap_sem at this point, unlike some of the other part
-	 * of the MPX code that have to pagefault_disable().
-	 */
-	ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,	bd_entry,
-				   expected_old_val, bd_new_entry);
-	if (ret)
-		goto out_unmap;
-
-	/*
-	 * The user_atomic_cmpxchg_inatomic() will only return nonzero
-	 * for faults, *not* if the cmpxchg itself fails.  Now we must
-	 * verify that the cmpxchg itself completed successfully.
-	 */
-	/*
-	 * We expected an empty 'expected_old_val', but instead found
-	 * an apparently valid entry.  Assume we raced with another
-	 * thread to instantiate this table and desclare succecss.
-	 */
-	if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) {
-		ret = 0;
-		goto out_unmap;
-	}
-	/*
-	 * We found a non-empty bd_entry but it did not have the
-	 * VALID_FLAG set.  Return an error which will result in
-	 * a SEGV since this probably means that somebody scribbled
-	 * some invalid data in to a bounds table.
-	 */
-	if (expected_old_val != actual_old_val) {
-		ret = -EINVAL;
-		goto out_unmap;
-	}
-	trace_mpx_new_bounds_table(bt_addr);
-	return 0;
-out_unmap:
-	vm_munmap(bt_addr, mpx_bt_size_bytes(mm));
-	return ret;
-}
-
-/*
- * When a BNDSTX instruction attempts to save bounds to a bounds
- * table, it will first attempt to look up the table in the
- * first-level bounds directory.  If it does not find a table in
- * the directory, a #BR is generated and we get here in order to
- * allocate a new table.
- *
- * With 32-bit mode, the size of BD is 4MB, and the size of each
- * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
- * and the size of each bound table is 4MB.
- */
-static int do_mpx_bt_fault(void)
-{
-	unsigned long bd_entry, bd_base;
-	const struct mpx_bndcsr *bndcsr;
-	struct mm_struct *mm = current->mm;
-
-	bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
-	if (!bndcsr)
-		return -EINVAL;
-	/*
-	 * Mask off the preserve and enable bits
-	 */
-	bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK;
-	/*
-	 * The hardware provides the address of the missing or invalid
-	 * entry via BNDSTATUS, so we don't have to go look it up.
-	 */
-	bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK;
-	/*
-	 * Make sure the directory entry is within where we think
-	 * the directory is.
-	 */
-	if ((bd_entry < bd_base) ||
-	    (bd_entry >= bd_base + mpx_bd_size_bytes(mm)))
-		return -EINVAL;
-
-	return allocate_bt(mm, (long __user *)bd_entry);
-}
-
-int mpx_handle_bd_fault(void)
-{
-	/*
-	 * Userspace never asked us to manage the bounds tables,
-	 * so refuse to help.
-	 */
-	if (!kernel_managing_mpx_tables(current->mm))
-		return -EINVAL;
-
-	return do_mpx_bt_fault();
-}
-
-/*
- * A thin wrapper around get_user_pages().  Returns 0 if the
- * fault was resolved or -errno if not.
- */
-static int mpx_resolve_fault(long __user *addr, int write)
-{
-	long gup_ret;
-	int nr_pages = 1;
-
-	gup_ret = get_user_pages((unsigned long)addr, nr_pages,
-			write ? FOLL_WRITE : 0,	NULL, NULL);
-	/*
-	 * get_user_pages() returns number of pages gotten.
-	 * 0 means we failed to fault in and get anything,
-	 * probably because 'addr' is bad.
-	 */
-	if (!gup_ret)
-		return -EFAULT;
-	/* Other error, return it */
-	if (gup_ret < 0)
-		return gup_ret;
-	/* must have gup'd a page and gup_ret>0, success */
-	return 0;
-}
-
-static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
-					     unsigned long bd_entry)
-{
-	unsigned long bt_addr = bd_entry;
-	int align_to_bytes;
-	/*
-	 * Bit 0 in a bt_entry is always the valid bit.
-	 */
-	bt_addr &= ~MPX_BD_ENTRY_VALID_FLAG;
-	/*
-	 * Tables are naturally aligned at 8-byte boundaries
-	 * on 64-bit and 4-byte boundaries on 32-bit.  The
-	 * documentation makes it appear that the low bits
-	 * are ignored by the hardware, so we do the same.
-	 */
-	if (is_64bit_mm(mm))
-		align_to_bytes = 8;
-	else
-		align_to_bytes = 4;
-	bt_addr &= ~(align_to_bytes-1);
-	return bt_addr;
-}
-
-/*
- * We only want to do a 4-byte get_user() on 32-bit.  Otherwise,
- * we might run off the end of the bounds table if we are on
- * a 64-bit kernel and try to get 8 bytes.
- */
-static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
-		long __user *bd_entry_ptr)
-{
-	u32 bd_entry_32;
-	int ret;
-
-	if (is_64bit_mm(mm))
-		return get_user(*bd_entry_ret, bd_entry_ptr);
-
-	/*
-	 * Note that get_user() uses the type of the *pointer* to
-	 * establish the size of the get, not the destination.
-	 */
-	ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
-	*bd_entry_ret = bd_entry_32;
-	return ret;
-}
-
-/*
- * Get the base of bounds tables pointed by specific bounds
- * directory entry.
- */
-static int get_bt_addr(struct mm_struct *mm,
-			long __user *bd_entry_ptr,
-			unsigned long *bt_addr_result)
-{
-	int ret;
-	int valid_bit;
-	unsigned long bd_entry;
-	unsigned long bt_addr;
-
-	if (!access_ok((bd_entry_ptr), sizeof(*bd_entry_ptr)))
-		return -EFAULT;
-
-	while (1) {
-		int need_write = 0;
-
-		pagefault_disable();
-		ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
-		pagefault_enable();
-		if (!ret)
-			break;
-		if (ret == -EFAULT)
-			ret = mpx_resolve_fault(bd_entry_ptr, need_write);
-		/*
-		 * If we could not resolve the fault, consider it
-		 * userspace's fault and error out.
-		 */
-		if (ret)
-			return ret;
-	}
-
-	valid_bit = bd_entry & MPX_BD_ENTRY_VALID_FLAG;
-	bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry);
-
-	/*
-	 * When the kernel is managing bounds tables, a bounds directory
-	 * entry will either have a valid address (plus the valid bit)
-	 * *OR* be completely empty. If we see a !valid entry *and* some
-	 * data in the address field, we know something is wrong. This
-	 * -EINVAL return will cause a SIGSEGV.
-	 */
-	if (!valid_bit && bt_addr)
-		return -EINVAL;
-	/*
-	 * Do we have an completely zeroed bt entry?  That is OK.  It
-	 * just means there was no bounds table for this memory.  Make
-	 * sure to distinguish this from -EINVAL, which will cause
-	 * a SEGV.
-	 */
-	if (!valid_bit)
-		return -ENOENT;
-
-	*bt_addr_result = bt_addr;
-	return 0;
-}
-
-static inline int bt_entry_size_bytes(struct mm_struct *mm)
-{
-	if (is_64bit_mm(mm))
-		return MPX_BT_ENTRY_BYTES_64;
-	else
-		return MPX_BT_ENTRY_BYTES_32;
-}
-
-/*
- * Take a virtual address and turns it in to the offset in bytes
- * inside of the bounds table where the bounds table entry
- * controlling 'addr' can be found.
- */
-static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
-		unsigned long addr)
-{
-	unsigned long bt_table_nr_entries;
-	unsigned long offset = addr;
-
-	if (is_64bit_mm(mm)) {
-		/* Bottom 3 bits are ignored on 64-bit */
-		offset >>= 3;
-		bt_table_nr_entries = MPX_BT_NR_ENTRIES_64;
-	} else {
-		/* Bottom 2 bits are ignored on 32-bit */
-		offset >>= 2;
-		bt_table_nr_entries = MPX_BT_NR_ENTRIES_32;
-	}
-	/*
-	 * We know the size of the table in to which we are
-	 * indexing, and we have eliminated all the low bits
-	 * which are ignored for indexing.
-	 *
-	 * Mask out all the high bits which we do not need
-	 * to index in to the table.  Note that the tables
-	 * are always powers of two so this gives us a proper
-	 * mask.
-	 */
-	offset &= (bt_table_nr_entries-1);
-	/*
-	 * We now have an entry offset in terms of *entries* in
-	 * the table.  We need to scale it back up to bytes.
-	 */
-	offset *= bt_entry_size_bytes(mm);
-	return offset;
-}
-
-/*
- * How much virtual address space does a single bounds
- * directory entry cover?
- *
- * Note, we need a long long because 4GB doesn't fit in
- * to a long on 32-bit.
- */
-static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
-{
-	unsigned long long virt_space;
-	unsigned long long GB = (1ULL << 30);
-
-	/*
-	 * This covers 32-bit emulation as well as 32-bit kernels
-	 * running on 64-bit hardware.
-	 */
-	if (!is_64bit_mm(mm))
-		return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
-
-	/*
-	 * 'x86_virt_bits' returns what the hardware is capable
-	 * of, and returns the full >32-bit address space when
-	 * running 32-bit kernels on 64-bit hardware.
-	 */
-	virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
-	return virt_space / MPX_BD_NR_ENTRIES_64;
-}
-
-/*
- * Free the backing physical pages of bounds table 'bt_addr'.
- * Assume start...end is within that bounds table.
- */
-static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
-		unsigned long bt_addr,
-		unsigned long start_mapping, unsigned long end_mapping)
-{
-	struct vm_area_struct *vma;
-	unsigned long addr, len;
-	unsigned long start;
-	unsigned long end;
-
-	/*
-	 * if we 'end' on a boundary, the offset will be 0 which
-	 * is not what we want.  Back it up a byte to get the
-	 * last bt entry.  Then once we have the entry itself,
-	 * move 'end' back up by the table entry size.
-	 */
-	start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping);
-	end   = bt_addr + mpx_get_bt_entry_offset_bytes(mm, end_mapping - 1);
-	/*
-	 * Move end back up by one entry.  Among other things
-	 * this ensures that it remains page-aligned and does
-	 * not screw up zap_page_range()
-	 */
-	end += bt_entry_size_bytes(mm);
-
-	/*
-	 * Find the first overlapping vma. If vma->vm_start > start, there
-	 * will be a hole in the bounds table. This -EINVAL return will
-	 * cause a SIGSEGV.
-	 */
-	vma = find_vma(mm, start);
-	if (!vma || vma->vm_start > start)
-		return -EINVAL;
-
-	/*
-	 * A NUMA policy on a VM_MPX VMA could cause this bounds table to
-	 * be split. So we need to look across the entire 'start -> end'
-	 * range of this bounds table, find all of the VM_MPX VMAs, and
-	 * zap only those.
-	 */
-	addr = start;
-	while (vma && vma->vm_start < end) {
-		/*
-		 * We followed a bounds directory entry down
-		 * here.  If we find a non-MPX VMA, that's bad,
-		 * so stop immediately and return an error.  This
-		 * probably results in a SIGSEGV.
-		 */
-		if (!(vma->vm_flags & VM_MPX))
-			return -EINVAL;
-
-		len = min(vma->vm_end, end) - addr;
-		zap_page_range(vma, addr, len);
-		trace_mpx_unmap_zap(addr, addr+len);
-
-		vma = vma->vm_next;
-		addr = vma->vm_start;
-	}
-	return 0;
-}
-
-static unsigned long mpx_get_bd_entry_offset(struct mm_struct *mm,
-		unsigned long addr)
-{
-	/*
-	 * There are several ways to derive the bd offsets.  We
-	 * use the following approach here:
-	 * 1. We know the size of the virtual address space
-	 * 2. We know the number of entries in a bounds table
-	 * 3. We know that each entry covers a fixed amount of
-	 *    virtual address space.
-	 * So, we can just divide the virtual address by the
-	 * virtual space used by one entry to determine which
-	 * entry "controls" the given virtual address.
-	 */
-	if (is_64bit_mm(mm)) {
-		int bd_entry_size = 8; /* 64-bit pointer */
-		/*
-		 * Take the 64-bit addressing hole in to account.
-		 */
-		addr &= ((1UL << boot_cpu_data.x86_virt_bits) - 1);
-		return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
-	} else {
-		int bd_entry_size = 4; /* 32-bit pointer */
-		/*
-		 * 32-bit has no hole so this case needs no mask
-		 */
-		return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
-	}
-	/*
-	 * The two return calls above are exact copies.  If we
-	 * pull out a single copy and put it in here, gcc won't
-	 * realize that we're doing a power-of-2 divide and use
-	 * shifts.  It uses a real divide.  If we put them up
-	 * there, it manages to figure it out (gcc 4.8.3).
-	 */
-}
-
-static int unmap_entire_bt(struct mm_struct *mm,
-		long __user *bd_entry, unsigned long bt_addr)
-{
-	unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
-	unsigned long uninitialized_var(actual_old_val);
-	int ret;
-
-	while (1) {
-		int need_write = 1;
-		unsigned long cleared_bd_entry = 0;
-
-		pagefault_disable();
-		ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,
-				bd_entry, expected_old_val, cleared_bd_entry);
-		pagefault_enable();
-		if (!ret)
-			break;
-		if (ret == -EFAULT)
-			ret = mpx_resolve_fault(bd_entry, need_write);
-		/*
-		 * If we could not resolve the fault, consider it
-		 * userspace's fault and error out.
-		 */
-		if (ret)
-			return ret;
-	}
-	/*
-	 * The cmpxchg was performed, check the results.
-	 */
-	if (actual_old_val != expected_old_val) {
-		/*
-		 * Someone else raced with us to unmap the table.
-		 * That is OK, since we were both trying to do
-		 * the same thing.  Declare success.
-		 */
-		if (!actual_old_val)
-			return 0;
-		/*
-		 * Something messed with the bounds directory
-		 * entry.  We hold mmap_sem for read or write
-		 * here, so it could not be a _new_ bounds table
-		 * that someone just allocated.  Something is
-		 * wrong, so pass up the error and SIGSEGV.
-		 */
-		return -EINVAL;
-	}
-	/*
-	 * Note, we are likely being called under do_munmap() already. To
-	 * avoid recursion, do_munmap() will check whether it comes
-	 * from one bounds table through VM_MPX flag.
-	 */
-	return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm), NULL);
-}
-
-static int try_unmap_single_bt(struct mm_struct *mm,
-	       unsigned long start, unsigned long end)
-{
-	struct vm_area_struct *next;
-	struct vm_area_struct *prev;
-	/*
-	 * "bta" == Bounds Table Area: the area controlled by the
-	 * bounds table that we are unmapping.
-	 */
-	unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1);
-	unsigned long bta_end_vaddr = bta_start_vaddr + bd_entry_virt_space(mm);
-	unsigned long uninitialized_var(bt_addr);
-	void __user *bde_vaddr;
-	int ret;
-	/*
-	 * We already unlinked the VMAs from the mm's rbtree so 'start'
-	 * is guaranteed to be in a hole. This gets us the first VMA
-	 * before the hole in to 'prev' and the next VMA after the hole
-	 * in to 'next'.
-	 */
-	next = find_vma_prev(mm, start, &prev);
-	/*
-	 * Do not count other MPX bounds table VMAs as neighbors.
-	 * Although theoretically possible, we do not allow bounds
-	 * tables for bounds tables so our heads do not explode.
-	 * If we count them as neighbors here, we may end up with
-	 * lots of tables even though we have no actual table
-	 * entries in use.
-	 */
-	while (next && (next->vm_flags & VM_MPX))
-		next = next->vm_next;
-	while (prev && (prev->vm_flags & VM_MPX))
-		prev = prev->vm_prev;
-	/*
-	 * We know 'start' and 'end' lie within an area controlled
-	 * by a single bounds table.  See if there are any other
-	 * VMAs controlled by that bounds table.  If there are not
-	 * then we can "expand" the are we are unmapping to possibly
-	 * cover the entire table.
-	 */
-	next = find_vma_prev(mm, start, &prev);
-	if ((!prev || prev->vm_end <= bta_start_vaddr) &&
-	    (!next || next->vm_start >= bta_end_vaddr)) {
-		/*
-		 * No neighbor VMAs controlled by same bounds
-		 * table.  Try to unmap the whole thing
-		 */
-		start = bta_start_vaddr;
-		end = bta_end_vaddr;
-	}
-
-	bde_vaddr = mm->context.bd_addr + mpx_get_bd_entry_offset(mm, start);
-	ret = get_bt_addr(mm, bde_vaddr, &bt_addr);
-	/*
-	 * No bounds table there, so nothing to unmap.
-	 */
-	if (ret == -ENOENT) {
-		ret = 0;
-		return 0;
-	}
-	if (ret)
-		return ret;
-	/*
-	 * We are unmapping an entire table.  Either because the
-	 * unmap that started this whole process was large enough
-	 * to cover an entire table, or that the unmap was small
-	 * but was the area covered by a bounds table.
-	 */
-	if ((start == bta_start_vaddr) &&
-	    (end == bta_end_vaddr))
-		return unmap_entire_bt(mm, bde_vaddr, bt_addr);
-	return zap_bt_entries_mapping(mm, bt_addr, start, end);
-}
-
-static int mpx_unmap_tables(struct mm_struct *mm,
-		unsigned long start, unsigned long end)
-{
-	unsigned long one_unmap_start;
-	trace_mpx_unmap_search(start, end);
-
-	one_unmap_start = start;
-	while (one_unmap_start < end) {
-		int ret;
-		unsigned long next_unmap_start = ALIGN(one_unmap_start+1,
-						       bd_entry_virt_space(mm));
-		unsigned long one_unmap_end = end;
-		/*
-		 * if the end is beyond the current bounds table,
-		 * move it back so we only deal with a single one
-		 * at a time
-		 */
-		if (one_unmap_end > next_unmap_start)
-			one_unmap_end = next_unmap_start;
-		ret = try_unmap_single_bt(mm, one_unmap_start, one_unmap_end);
-		if (ret)
-			return ret;
-
-		one_unmap_start = next_unmap_start;
-	}
-	return 0;
-}
-
-/*
- * Free unused bounds tables covered in a virtual address region being
- * munmap()ed. Assume end > start.
- *
- * This function will be called by do_munmap(), and the VMAs covering
- * the virtual address region start...end have already been split if
- * necessary, and the 'vma' is the first vma in this range (start -> end).
- */
-void mpx_notify_unmap(struct mm_struct *mm, unsigned long start,
-		      unsigned long end)
-{
-	struct vm_area_struct *vma;
-	int ret;
-
-	/*
-	 * Refuse to do anything unless userspace has asked
-	 * the kernel to help manage the bounds tables,
-	 */
-	if (!kernel_managing_mpx_tables(current->mm))
-		return;
-	/*
-	 * This will look across the entire 'start -> end' range,
-	 * and find all of the non-VM_MPX VMAs.
-	 *
-	 * To avoid recursion, if a VM_MPX vma is found in the range
-	 * (start->end), we will not continue follow-up work. This
-	 * recursion represents having bounds tables for bounds tables,
-	 * which should not occur normally. Being strict about it here
-	 * helps ensure that we do not have an exploitable stack overflow.
-	 */
-	vma = find_vma(mm, start);
-	while (vma && vma->vm_start < end) {
-		if (vma->vm_flags & VM_MPX)
-			return;
-		vma = vma->vm_next;
-	}
-
-	ret = mpx_unmap_tables(mm, start, end);
-	if (ret)
-		force_sig(SIGSEGV);
-}
-
-/* MPX cannot handle addresses above 47 bits yet. */
-unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
-		unsigned long flags)
-{
-	if (!kernel_managing_mpx_tables(current->mm))
-		return addr;
-	if (addr + len <= DEFAULT_MAP_WINDOW)
-		return addr;
-	if (flags & MAP_FIXED)
-		return -ENOMEM;
-
-	/*
-	 * Requested len is larger than the whole area we're allowed to map in.
-	 * Resetting hinting address wouldn't do much good -- fail early.
-	 */
-	if (len > DEFAULT_MAP_WINDOW)
-		return -ENOMEM;
-
-	/* Look for unmap area within DEFAULT_MAP_WINDOW */
-	return 0;
-}
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 62a8ebe..c4aedd0 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -618,6 +618,17 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
 }
 EXPORT_SYMBOL_GPL(lookup_address);
 
+/*
+ * Lookup the page table entry for a virtual address in a given mm. Return a
+ * pointer to the entry and the level of the mapping.
+ */
+pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
+			    unsigned int *level)
+{
+	return lookup_address_in_pgd(pgd_offset(mm, address), address, level);
+}
+EXPORT_SYMBOL_GPL(lookup_address_in_mm);
+
 static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
 				  unsigned int *level)
 {
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 1e59df0..df1d959 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -625,43 +625,6 @@ unsigned int pcibios_assign_all_busses(void)
 	return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
 }
 
-#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
-static LIST_HEAD(dma_domain_list);
-static DEFINE_SPINLOCK(dma_domain_list_lock);
-
-void add_dma_domain(struct dma_domain *domain)
-{
-	spin_lock(&dma_domain_list_lock);
-	list_add(&domain->node, &dma_domain_list);
-	spin_unlock(&dma_domain_list_lock);
-}
-EXPORT_SYMBOL_GPL(add_dma_domain);
-
-void del_dma_domain(struct dma_domain *domain)
-{
-	spin_lock(&dma_domain_list_lock);
-	list_del(&domain->node);
-	spin_unlock(&dma_domain_list_lock);
-}
-EXPORT_SYMBOL_GPL(del_dma_domain);
-
-static void set_dma_domain_ops(struct pci_dev *pdev)
-{
-	struct dma_domain *domain;
-
-	spin_lock(&dma_domain_list_lock);
-	list_for_each_entry(domain, &dma_domain_list, node) {
-		if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
-			pdev->dev.dma_ops = domain->dma_ops;
-			break;
-		}
-	}
-	spin_unlock(&dma_domain_list_lock);
-}
-#else
-static void set_dma_domain_ops(struct pci_dev *pdev) {}
-#endif
-
 static void set_dev_domain_options(struct pci_dev *pdev)
 {
 	if (is_vmd(pdev->bus))
@@ -697,7 +660,6 @@ int pcibios_add_device(struct pci_dev *dev)
 		pa_data = data->next;
 		memunmap(data);
 	}
-	set_dma_domain_ops(dev);
 	set_dev_domain_options(dev);
 	return 0;
 }
@@ -736,3 +698,13 @@ int pci_ext_cfg_avail(void)
 	else
 		return 0;
 }
+
+#if IS_ENABLED(CONFIG_VMD)
+struct pci_dev *pci_real_dma_dev(struct pci_dev *dev)
+{
+	if (is_vmd(dev->bus))
+		return to_pci_sysdata(dev->bus)->vmd_dev;
+
+	return dev;
+}
+#endif
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 59f7f6d..ae923ee 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -308,7 +308,7 @@ static void __init efi_clean_memmap(void)
 			.phys_map = efi.memmap.phys_map,
 			.desc_version = efi.memmap.desc_version,
 			.desc_size = efi.memmap.desc_size,
-			.size = data.desc_size * (efi.memmap.nr_map - n_removal),
+			.size = efi.memmap.desc_size * (efi.memmap.nr_map - n_removal),
 			.flags = 0,
 		};
 
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 71dddd1..081d466 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -49,7 +49,7 @@ void efi_sync_low_kernel_mappings(void) {}
 void __init efi_dump_pagetable(void)
 {
 #ifdef CONFIG_EFI_PGT_DUMP
-	ptdump_walk_pgd_level(NULL, swapper_pg_dir);
+	ptdump_walk_pgd_level(NULL, &init_mm);
 #endif
 }
 
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index e2accfe..fa8506e 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -471,9 +471,9 @@ void __init efi_dump_pagetable(void)
 {
 #ifdef CONFIG_EFI_PGT_DUMP
 	if (efi_have_uv1_memmap())
-		ptdump_walk_pgd_level(NULL, swapper_pg_dir);
+		ptdump_walk_pgd_level(NULL, &init_mm);
 	else
-		ptdump_walk_pgd_level(NULL, efi_mm.pgd);
+		ptdump_walk_pgd_level(NULL, &efi_mm);
 #endif
 }
 
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
index 44d1f88..139738b 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
@@ -6,21 +6,31 @@
  * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
  */
 
-#include <linux/gpio.h>
-#include <linux/platform_data/tc35876x.h>
+#include <linux/gpio/machine.h>
 #include <asm/intel-mid.h>
 
+static struct gpiod_lookup_table tc35876x_gpio_table = {
+	.dev_id	= "i2c_disp_brig",
+	.table	= {
+		GPIO_LOOKUP("0000:00:0c.0", -1, "bridge-reset", GPIO_ACTIVE_HIGH),
+		GPIO_LOOKUP("0000:00:0c.0", -1, "bl-en", GPIO_ACTIVE_HIGH),
+		GPIO_LOOKUP("0000:00:0c.0", -1, "vadd", GPIO_ACTIVE_HIGH),
+		{ },
+	},
+};
+
 /*tc35876x DSI_LVDS bridge chip and panel platform data*/
 static void *tc35876x_platform_data(void *data)
 {
-	static struct tc35876x_platform_data pdata;
+	struct gpiod_lookup_table *table = &tc35876x_gpio_table;
+	struct gpiod_lookup *lookup = table->table;
 
-	/* gpio pins set to -1 will not be used by the driver */
-	pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
-	pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
-	pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
+	lookup[0].chip_hwnum = get_gpio_by_name("LCMB_RXEN");
+	lookup[1].chip_hwnum = get_gpio_by_name("6S6P_BL_EN");
+	lookup[2].chip_hwnum = get_gpio_by_name("EN_VREG_LCD_V3P3");
+	gpiod_add_lookup_table(table);
 
-	return &pdata;
+	return NULL;
 }
 
 static const struct devs_id tc35876x_dev_id __initconst = {
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 5f0a96bf..1fd321f 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1668,12 +1668,12 @@ static int tunables_open(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static const struct file_operations proc_uv_ptc_operations = {
-	.open		= ptc_proc_open,
-	.read		= seq_read,
-	.write		= ptc_proc_write,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops uv_ptc_proc_ops = {
+	.proc_open	= ptc_proc_open,
+	.proc_read	= seq_read,
+	.proc_write	= ptc_proc_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 static const struct file_operations tunables_fops = {
@@ -1691,7 +1691,7 @@ static int __init uv_ptc_init(void)
 		return 0;
 
 	proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
-				  &proc_uv_ptc_operations);
+				  &uv_ptc_proc_ops);
 	if (!proc_uv_ptc) {
 		pr_err("unable to create %s proc entry\n",
 		       UV_PTC_BASENAME);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 1c64517..32ee759 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -180,11 +180,11 @@
 	depends on XTENSA_VARIANT_CUSTOM
 	select XTENSA_MX
 	help
-	  This option is use to indicate that the system-on-a-chip (SOC)
+	  This option is used to indicate that the system-on-a-chip (SOC)
 	  supports Multiprocessing. Multiprocessor support implemented above
 	  the CPU core definition and currently needs to be selected manually.
 
-	  Multiprocessor support in implemented with external cache and
+	  Multiprocessor support is implemented with external cache and
 	  interrupt controllers.
 
 	  The MX interrupt distributer adds Interprocessor Interrupts
@@ -572,15 +572,41 @@
 
 	  If unsure, leave the default value here.
 
-config VECTORS_OFFSET
-	hex "Kernel vectors offset"
-	default 0x00003000
-	depends on !XIP_KERNEL
+choice
+	prompt "Relocatable vectors location"
+	default XTENSA_VECTORS_IN_TEXT
 	help
-	  This is the offset of the kernel image from the relocatable vectors
-	  base.
+	  Choose whether relocatable vectors are merged into the kernel .text
+	  or placed separately at runtime. This option does not affect
+	  configurations without VECBASE register where vectors are always
+	  placed at their hardware-defined locations.
 
-	  If unsure, leave the default value here.
+config XTENSA_VECTORS_IN_TEXT
+	bool "Merge relocatable vectors into kernel text"
+	depends on !MTD_XIP
+	help
+	  This option puts relocatable vectors into the kernel .text section
+	  with proper alignment.
+	  This is a safe choice for most configurations.
+
+config XTENSA_VECTORS_SEPARATE
+	bool "Put relocatable vectors at fixed address"
+	help
+	  This option puts relocatable vectors at specific virtual address.
+	  Vectors are merged with the .init data in the kernel image and
+	  are copied into their designated location during kernel startup.
+	  Use it to put vectors into IRAM or out of FLASH on kernels with
+	  XIP-aware MTD support.
+
+endchoice
+
+config VECTORS_ADDR
+	hex "Kernel vectors virtual address"
+	default 0x00000000
+	depends on XTENSA_VECTORS_SEPARATE
+	help
+	  This is the virtual address of the (relocatable) vectors base.
+	  It must be within KSEG if MMU is used.
 
 config XIP_DATA_ADDR
 	hex "XIP kernel data virtual address"
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index b6367af..eeb4c53 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -21,8 +21,6 @@
 CONFIG_OPROFILE=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_XTENSA_VARIANT_CUSTOM=y
 CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_kc705_hifi"
 CONFIG_XTENSA_UNALIGNED_USER=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index f4eef6d..fc24073 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -27,8 +27,6 @@
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_XTENSA_VARIANT_CUSTOM=y
 CONFIG_XTENSA_VARIANT_CUSTOM_NAME="csp"
 CONFIG_XTENSA_UNALIGNED_USER=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index c925165..412f611 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -21,8 +21,6 @@
 CONFIG_OPROFILE=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_XTENSA_VARIANT_DC233C=y
 CONFIG_XTENSA_UNALIGNED_USER=y
 CONFIG_PREEMPT=y
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index d1c0174..32ce8fb 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -1,8 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_EXPERT=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 # CONFIG_PCI is not set
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target memmap=128M@0"
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index 380e366..88b2e22 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -25,8 +25,6 @@
 CONFIG_PERF_EVENTS=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_XTENSA_VARIANT_CUSTOM=y
 CONFIG_XTENSA_VARIANT_CUSTOM_NAME="de212"
 # CONFIG_XTENSA_VARIANT_MMU is not set
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index d46b58f..4f1c799 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -21,8 +21,6 @@
 CONFIG_OPROFILE=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_XTENSA_VARIANT_CUSTOM=y
 CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_mmuhifi_c3"
 CONFIG_XTENSA_UNALIGNED_USER=y
@@ -32,7 +30,6 @@
 CONFIG_HOTPLUG_CPU=y
 # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
 # CONFIG_PCI is not set
-CONFIG_VECTORS_OFFSET=0x00002000
 CONFIG_XTENSA_PLATFORM_XTFPGA=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
diff --git a/arch/xtensa/configs/virt_defconfig b/arch/xtensa/configs/virt_defconfig
index 4fddd85..6d1387d 100644
--- a/arch/xtensa/configs/virt_defconfig
+++ b/arch/xtensa/configs/virt_defconfig
@@ -19,7 +19,6 @@
 CONFIG_PERF_EVENTS=y
 CONFIG_XTENSA_VARIANT_DC233C=y
 CONFIG_XTENSA_UNALIGNED_USER=y
-CONFIG_VECTORS_OFFSET=0x00002000
 CONFIG_XTENSA_KSEG_512M=y
 CONFIG_HIGHMEM=y
 CONFIG_CMDLINE_BOOL=y
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 3acc31e..271917c 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -4,7 +4,6 @@
 generic-y += compat.h
 generic-y += device.h
 generic-y += div64.h
-generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 71a7e84..bfc89e1 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -237,4 +237,6 @@
 #error Unsupported Xtensa ABI
 #endif
 
+#define __XTENSA_HANDLER	.section ".exception.text", "ax"
+
 #endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
index 5b4acb7..5590b0f 100644
--- a/arch/xtensa/include/asm/core.h
+++ b/arch/xtensa/include/asm/core.h
@@ -10,10 +10,18 @@
 #define XCHAL_HAVE_EXCLUSIVE 0
 #endif
 
+#ifndef XCHAL_HAVE_EXTERN_REGS
+#define XCHAL_HAVE_EXTERN_REGS 0
+#endif
+
 #ifndef XCHAL_HAVE_MPU
 #define XCHAL_HAVE_MPU 0
 #endif
 
+#ifndef XCHAL_HAVE_VECBASE
+#define XCHAL_HAVE_VECBASE 0
+#endif
+
 #ifndef XCHAL_SPANNING_WAY
 #define XCHAL_SPANNING_WAY 0
 #endif
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index f2c4852..354ca94 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -12,8 +12,6 @@
 #define _XTENSA_PLATFORM_H
 
 #include <linux/types.h>
-#include <linux/pci.h>
-
 #include <asm/bootparam.h>
 
 /*
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 6fa903d..7f63aca 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -237,10 +237,6 @@ extern unsigned long get_wchan(struct task_struct *p);
 	 v; \
 	 })
 
-#ifndef XCHAL_HAVE_EXTERN_REGS
-#define XCHAL_HAVE_EXTERN_REGS 0
-#endif
-
 #if XCHAL_HAVE_EXTERN_REGS
 
 static inline void set_er(unsigned long value, unsigned long addr)
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index fd99b25..704286c 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -34,13 +34,13 @@
 #endif
 
 #define RESET_VECTOR1_VADDR		(XCHAL_RESET_VECTOR1_VADDR)
-#ifdef CONFIG_VECTORS_OFFSET
-#define VECBASE_VADDR			(KERNELOFFSET - CONFIG_VECTORS_OFFSET)
+#ifdef CONFIG_VECTORS_ADDR
+#define VECBASE_VADDR			(CONFIG_VECTORS_ADDR)
 #else
 #define VECBASE_VADDR			_vecbase
 #endif
 
-#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
+#if XCHAL_HAVE_VECBASE
 
 #define VECTOR_VADDR(offset)		(VECBASE_VADDR + offset)
 
diff --git a/arch/xtensa/include/uapi/asm/setup.h b/arch/xtensa/include/uapi/asm/setup.h
index 57e6c21..5356a5f 100644
--- a/arch/xtensa/include/uapi/asm/setup.h
+++ b/arch/xtensa/include/uapi/asm/setup.h
@@ -14,6 +14,4 @@
 
 #define COMMAND_LINE_SIZE	256
 
-extern void set_except_vector(int n, void *addr);
-
 #endif
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index bb8e499..c426b84 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -58,6 +58,8 @@
 	.endif;								\
 	.long THREAD_XTREGS_CP##x
 
+	__XTENSA_HANDLER
+
 	SAVE_CP_REGS(0)
 	SAVE_CP_REGS(1)
 	SAVE_CP_REGS(2)
@@ -76,7 +78,6 @@
 	LOAD_CP_REGS(6)
 	LOAD_CP_REGS(7)
 
-	.section ".rodata", "a"
 	.align 4
 .Lsave_cp_regs_jump_table:
 	SAVE_CP_REGS_TAB(0)
@@ -98,8 +99,6 @@
 	LOAD_CP_REGS_TAB(6)
 	LOAD_CP_REGS_TAB(7)
 
-	.previous
-
 /*
  * coprocessor_flush(struct thread_info*, index)
  *                             a2        a3
@@ -145,13 +144,6 @@
  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
  */
 
-ENTRY(fast_coprocessor_double)
-
-	wsr	a0, excsave1
-	call0	unrecoverable_exception
-
-ENDPROC(fast_coprocessor_double)
-
 ENTRY(fast_coprocessor)
 
 	/* Save remaining registers a1-a3 and SAR */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 2c9e485..06fbb0a 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -939,6 +939,9 @@
 
 /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
 
+	__XTENSA_HANDLER
+	.literal_position
+
 /*
  * Fast-handler for alloca exceptions
  *
@@ -1024,7 +1027,7 @@
 ENTRY(fast_illegal_instruction_user)
 
 	rsr	a0, ps
-	bbsi.l	a0, PS_WOE_BIT, user_exception
+	bbsi.l	a0, PS_WOE_BIT, 1f
 	s32i	a3, a2, PT_AREG3
 	movi	a3, PS_WOE_MASK
 	or	a0, a0, a3
@@ -1033,6 +1036,8 @@
 	l32i	a0, a2, PT_AREG0
 	rsr	a2, depc
 	rfe
+1:
+	call0	user_exception
 
 ENDPROC(fast_illegal_instruction_user)
 #endif
@@ -1071,7 +1076,7 @@
 	_beqz	a0, fast_syscall_spill_registers
 	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
 
-	j	user_exception
+	call0	user_exception
 
 ENDPROC(fast_syscall_user)
 
@@ -1762,8 +1767,8 @@
 
 	rsr	a2, ps
 	bbsi.l	a2, PS_UM_BIT, 1f
-	j	_kernel_exception
-1:	j	_user_exception
+	call0	_kernel_exception
+1:	call0	_user_exception
 
 ENDPROC(fast_second_level_miss)
 
@@ -1859,13 +1864,14 @@
 
 	rsr	a2, ps
 	bbsi.l	a2, PS_UM_BIT, 1f
-	j	_kernel_exception
-1:	j	_user_exception
+	call0	_kernel_exception
+1:	call0	_user_exception
 
 ENDPROC(fast_store_prohibited)
 
 #endif /* CONFIG_MMU */
 
+	.text
 /*
  * System Calls.
  *
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index a95ba05..ac1e0e5 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -12,12 +12,10 @@
  * Chris Zankel <chris@zankel.net>
  */
 
+#include <linux/printk.h>
 #include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/time.h>
 #include <asm/platform.h>
 #include <asm/timex.h>
-#include <asm/param.h>		/* HZ */
 
 #define _F(r,f,a,b)							\
 	r __platform_##f a b;                                   	\
@@ -28,6 +26,7 @@
  * (Please, refer to include/asm-xtensa/platform.h for more information)
  */
 
+_F(void, init, (bp_tag_t *first), { });
 _F(void, setup, (char** cmd), { });
 _F(void, restart, (void), { while(1); });
 _F(void, halt, (void), { while(1); });
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 0f93b67..3880c76 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -284,6 +284,8 @@ extern char _UserExceptionVector_text_start;
 extern char _UserExceptionVector_text_end;
 extern char _DoubleExceptionVector_text_start;
 extern char _DoubleExceptionVector_text_end;
+extern char _exception_text_start;
+extern char _exception_text_end;
 #if XCHAL_EXCM_LEVEL >= 2
 extern char _Level2InterruptVector_text_start;
 extern char _Level2InterruptVector_text_end;
@@ -347,7 +349,7 @@ void __init setup_arch(char **cmdline_p)
 	mem_reserve(__pa(_xip_start), __pa(_xip_end));
 #endif
 
-#ifdef CONFIG_VECTORS_OFFSET
+#ifdef CONFIG_VECTORS_ADDR
 	mem_reserve(__pa(&_WindowVectors_text_start),
 		    __pa(&_WindowVectors_text_end));
 
@@ -363,6 +365,8 @@ void __init setup_arch(char **cmdline_p)
 	mem_reserve(__pa(&_DoubleExceptionVector_text_start),
 		    __pa(&_DoubleExceptionVector_text_end));
 
+	mem_reserve(__pa(&_exception_text_start),
+		    __pa(&_exception_text_end));
 #if XCHAL_EXCM_LEVEL >= 2
 	mem_reserve(__pa(&_Level2InterruptVector_text_start),
 		    __pa(&_Level2InterruptVector_text_end));
@@ -384,7 +388,7 @@ void __init setup_arch(char **cmdline_p)
 		    __pa(&_Level6InterruptVector_text_end));
 #endif
 
-#endif /* CONFIG_VECTORS_OFFSET */
+#endif /* CONFIG_VECTORS_ADDR */
 
 #ifdef CONFIG_SMP
 	mem_reserve(__pa(&_SecondaryResetVector_text_start),
@@ -405,8 +409,6 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_VT
 # if defined(CONFIG_VGA_CONSOLE)
 	conswitchp = &vga_con;
-# elif defined(CONFIG_DUMMY_CONSOLE)
-	conswitchp = &dummy_con;
 # endif
 #endif
 }
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 25f4de7..85a9ab1 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -406,3 +406,5 @@
 433	common	fspick				sys_fspick
 434	common	pidfd_open			sys_pidfd_open
 435	common	clone3				sys_clone3
+437	common	openat2				sys_openat2
+438	common	pidfd_getfd			sys_pidfd_getfd
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 841503d..95ad1e7 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -43,6 +43,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/asmmacro.h>
 #include <asm/ptrace.h>
 #include <asm/current.h>
 #include <asm/asm-offsets.h>
@@ -477,7 +478,6 @@
 
 ENDPROC(_DoubleExceptionVector)
 
-	.text
 /*
  * Fixup handler for TLB miss in double exception handler for window owerflow.
  * We get here with windowbase set to the window that was being spilled and
@@ -505,6 +505,7 @@
  * a3: exctable, original value in excsave1
  */
 
+	__XTENSA_HANDLER
 	.literal_position
 
 ENTRY(window_overflow_restore_a0_fixup)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 409c05c..d23a6e3 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -47,15 +47,20 @@
 	LONG(sym ## _end);			\
 	LONG(LOADADDR(section))
 
+#if !defined(CONFIG_VECTORS_ADDR) && XCHAL_HAVE_VECBASE
+#define MERGED_VECTORS 1
+#else
+#define MERGED_VECTORS 0
+#endif
+
 /*
- * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is
- * defined code for every vector is located with other init data. At startup
+ * Macro to define a section for a vector. When MERGED_VECTORS is 0
+ * code for every vector is located with other init data. At startup
  * time head.S copies code for every vector to its final position according
  * to description recorded in the corresponding RELOCATE_ENTRY.
  */
 
-#ifdef CONFIG_VECTORS_OFFSET
-#define SECTION_VECTOR(sym, section, addr, prevsec)                         \
+#define SECTION_VECTOR4(sym, section, addr, prevsec)			    \
   section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3)      \
   {									    \
     . = ALIGN(4);							    \
@@ -63,11 +68,10 @@
     *(section)								    \
     sym ## _end = ABSOLUTE(.);						    \
   }
-#else
-#define SECTION_VECTOR(section, addr)					    \
+
+#define SECTION_VECTOR2(section, addr)					    \
   . = addr;								    \
   *(section)
-#endif
 
 /*
  *  Mapping of input sections to output sections when linking.
@@ -86,30 +90,32 @@
     /* The HEAD_TEXT section must be the first section! */
     HEAD_TEXT
 
-#ifndef CONFIG_VECTORS_OFFSET
-  . = ALIGN(PAGE_SIZE);
-  _vecbase = .;
+#if MERGED_VECTORS
+    . = ALIGN(PAGE_SIZE);
+    _vecbase = .;
 
-  SECTION_VECTOR (.WindowVectors.text, WINDOW_VECTORS_VADDR)
+    SECTION_VECTOR2 (.WindowVectors.text, WINDOW_VECTORS_VADDR)
 #if XCHAL_EXCM_LEVEL >= 2
-  SECTION_VECTOR (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR)
+    SECTION_VECTOR2 (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR)
 #endif
 #if XCHAL_EXCM_LEVEL >= 3
-  SECTION_VECTOR (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR)
+    SECTION_VECTOR2 (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR)
 #endif
 #if XCHAL_EXCM_LEVEL >= 4
-  SECTION_VECTOR (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR)
+    SECTION_VECTOR2 (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR)
 #endif
 #if XCHAL_EXCM_LEVEL >= 5
-  SECTION_VECTOR (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR)
+    SECTION_VECTOR2 (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR)
 #endif
 #if XCHAL_EXCM_LEVEL >= 6
-  SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
+    SECTION_VECTOR2 (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
 #endif
-  SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
-  SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
-  SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
-  SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
+    SECTION_VECTOR2 (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
+    SECTION_VECTOR2 (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
+    SECTION_VECTOR2 (.UserExceptionVector.text, USER_VECTOR_VADDR)
+    SECTION_VECTOR2 (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
+
+    *(.exception.text)
 #endif
 
     IRQENTRY_TEXT
@@ -159,7 +165,7 @@
     . = ALIGN(16);
     __boot_reloc_table_start = ABSOLUTE(.);
 
-#ifdef CONFIG_VECTORS_OFFSET
+#if !MERGED_VECTORS
     RELOCATE_ENTRY(_WindowVectors_text,
 		   .WindowVectors.text);
 #if XCHAL_EXCM_LEVEL >= 2
@@ -190,16 +196,17 @@
 		   .DoubleExceptionVector.text);
     RELOCATE_ENTRY(_DebugInterruptVector_text,
 		   .DebugInterruptVector.text);
+    RELOCATE_ENTRY(_exception_text,
+		   .exception.text);
 #endif
 #ifdef CONFIG_XIP_KERNEL
     RELOCATE_ENTRY(_xip_data, .data);
     RELOCATE_ENTRY(_xip_init_data, .init.data);
-#else
+#endif
 #if defined(CONFIG_SMP)
     RELOCATE_ENTRY(_SecondaryResetVector_text,
 		   .SecondaryResetVector.text);
 #endif
-#endif
 
     __boot_reloc_table_end = ABSOLUTE(.) ;
 
@@ -216,21 +223,24 @@
   . = ALIGN(4);
   .dummy : { LONG(0) }
 
-#ifdef CONFIG_VECTORS_OFFSET
+#undef LAST
+#define LAST	.dummy
+
+#if !MERGED_VECTORS
   /* The vectors are relocated to the real position at startup time */
 
-  SECTION_VECTOR (_WindowVectors_text,
+  SECTION_VECTOR4 (_WindowVectors_text,
 		  .WindowVectors.text,
 		  WINDOW_VECTORS_VADDR,
 		  .dummy)
-  SECTION_VECTOR (_DebugInterruptVector_text,
+  SECTION_VECTOR4 (_DebugInterruptVector_text,
 		  .DebugInterruptVector.text,
 		  DEBUG_VECTOR_VADDR,
 		  .WindowVectors.text)
 #undef LAST
 #define LAST	.DebugInterruptVector.text
 #if XCHAL_EXCM_LEVEL >= 2
-  SECTION_VECTOR (_Level2InterruptVector_text,
+  SECTION_VECTOR4 (_Level2InterruptVector_text,
 		  .Level2InterruptVector.text,
 		  INTLEVEL2_VECTOR_VADDR,
 		  LAST)
@@ -238,7 +248,7 @@
 # define LAST	.Level2InterruptVector.text
 #endif
 #if XCHAL_EXCM_LEVEL >= 3
-  SECTION_VECTOR (_Level3InterruptVector_text,
+  SECTION_VECTOR4 (_Level3InterruptVector_text,
 		  .Level3InterruptVector.text,
 		  INTLEVEL3_VECTOR_VADDR,
 		  LAST)
@@ -246,7 +256,7 @@
 # define LAST	.Level3InterruptVector.text
 #endif
 #if XCHAL_EXCM_LEVEL >= 4
-  SECTION_VECTOR (_Level4InterruptVector_text,
+  SECTION_VECTOR4 (_Level4InterruptVector_text,
 		  .Level4InterruptVector.text,
 		  INTLEVEL4_VECTOR_VADDR,
 		  LAST)
@@ -254,7 +264,7 @@
 # define LAST	.Level4InterruptVector.text
 #endif
 #if XCHAL_EXCM_LEVEL >= 5
-  SECTION_VECTOR (_Level5InterruptVector_text,
+  SECTION_VECTOR4 (_Level5InterruptVector_text,
 		  .Level5InterruptVector.text,
 		  INTLEVEL5_VECTOR_VADDR,
 		  LAST)
@@ -262,41 +272,51 @@
 # define LAST	.Level5InterruptVector.text
 #endif
 #if XCHAL_EXCM_LEVEL >= 6
-  SECTION_VECTOR (_Level6InterruptVector_text,
+  SECTION_VECTOR4 (_Level6InterruptVector_text,
 		  .Level6InterruptVector.text,
 		  INTLEVEL6_VECTOR_VADDR,
 		  LAST)
 # undef LAST
 # define LAST	.Level6InterruptVector.text
 #endif
-  SECTION_VECTOR (_KernelExceptionVector_text,
+  SECTION_VECTOR4 (_KernelExceptionVector_text,
 		  .KernelExceptionVector.text,
 		  KERNEL_VECTOR_VADDR,
 		  LAST)
 #undef LAST
-  SECTION_VECTOR (_UserExceptionVector_text,
+  SECTION_VECTOR4 (_UserExceptionVector_text,
 		  .UserExceptionVector.text,
 		  USER_VECTOR_VADDR,
 		  .KernelExceptionVector.text)
-  SECTION_VECTOR (_DoubleExceptionVector_text,
+  SECTION_VECTOR4 (_DoubleExceptionVector_text,
 		  .DoubleExceptionVector.text,
 		  DOUBLEEXC_VECTOR_VADDR,
 		  .UserExceptionVector.text)
-
-  . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+#define LAST .DoubleExceptionVector.text
 
 #endif
-#if !defined(CONFIG_XIP_KERNEL) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP)
 
-  SECTION_VECTOR (_SecondaryResetVector_text,
+  SECTION_VECTOR4 (_SecondaryResetVector_text,
 		  .SecondaryResetVector.text,
 		  RESET_VECTOR1_VADDR,
-		  .DoubleExceptionVector.text)
-
-  . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
+		  LAST)
+#undef LAST
+#define LAST .SecondaryResetVector.text
 
 #endif
+#if !MERGED_VECTORS
+  SECTION_VECTOR4 (_exception_text,
+		  .exception.text,
+		  ,
+		  LAST)
+#undef LAST
+#define LAST .exception.text
 
+#endif
+  . = (LOADADDR(LAST) + SIZEOF(LAST) + 3) & ~ 3;
+
+  .dummy1 : AT(ADDR(.dummy1)) { LONG(0) }
   . = ALIGN(PAGE_SIZE);
 
 #ifndef CONFIG_XIP_KERNEL
@@ -314,7 +334,7 @@
 
 #undef LOAD_OFFSET
 #define LOAD_OFFSET \
-  (CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy) + SIZEOF(.dummy) + 3) & ~ 3)
+  (CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy1) + SIZEOF(.dummy1) + 3) & ~ 3)
 
   _xip_data_start = .;
   _sdata = .;
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
index 2ba4585..f42870a 100644
--- a/arch/xtensa/platforms/iss/include/platform/simcall.h
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -66,19 +66,17 @@ static int errno;
 
 static inline int __simc(int a, int b, int c, int d)
 {
-	int ret;
 	register int a1 asm("a2") = a;
 	register int b1 asm("a3") = b;
 	register int c1 asm("a4") = c;
 	register int d1 asm("a5") = d;
 	__asm__ __volatile__ (
 			"simcall\n"
-			"mov %0, a2\n"
-			"mov %1, a3\n"
-			: "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
+			: "+r"(a1), "+r"(b1)
 			: "r"(c1), "r"(d1)
 			: "memory");
-	return ret;
+	errno = b1;
+	return a1;
 }
 
 static inline int simc_exit(int exit_code)
@@ -113,9 +111,9 @@ static inline int simc_write(int fd, const void *buf, size_t count)
 
 static inline int simc_poll(int fd)
 {
-	struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+	long timeval[2] = { 0, 0 };
 
-	return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv);
+	return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&timeval);
 }
 
 static inline int simc_lseek(int fd, uint32_t off, int whence)
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index e28dd53..ed519ae 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -11,32 +11,18 @@
  * Copyright 2001 - 2005 Tensilica Inc.
  * Copyright 2017 Cadence Design Systems Inc.
  */
-#include <linux/memblock.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/reboot.h>
-#include <linux/kdev_t.h>
-#include <linux/types.h>
-#include <linux/major.h>
-#include <linux/blkdev.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/stringify.h>
+#include <linux/kernel.h>
 #include <linux/notifier.h>
+#include <linux/printk.h>
+#include <linux/string.h>
 
 #include <asm/platform.h>
-#include <asm/bootparam.h>
 #include <asm/setup.h>
 
 #include <platform/simcall.h>
 
 
-void __init platform_init(bp_tag_t* bootparam)
-{
-}
-
 void platform_halt(void)
 {
 	pr_info(" ** Called platform_halt() **\n");
@@ -48,6 +34,7 @@ void platform_power_off(void)
 	pr_info(" ** Called platform_power_off() **\n");
 	simc_exit(0);
 }
+
 void platform_restart(void)
 {
 	/* Flush and reset the mmu, simulate a processor reset, and
@@ -56,10 +43,6 @@ void platform_restart(void)
 	/* control never gets here */
 }
 
-void platform_heartbeat(void)
-{
-}
-
 static int
 iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index f9cd458..8331098 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -251,10 +251,10 @@ static ssize_t proc_write_simdisk(struct file *file, const char __user *buf,
 	return err;
 }
 
-static const struct file_operations fops = {
-	.read = proc_read_simdisk,
-	.write = proc_write_simdisk,
-	.llseek = default_llseek,
+static const struct proc_ops simdisk_proc_ops = {
+	.proc_read	= proc_read_simdisk,
+	.proc_write	= proc_write_simdisk,
+	.proc_lseek	= default_llseek,
 };
 
 static int __init simdisk_setup(struct simdisk *dev, int which,
@@ -290,7 +290,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
 	set_capacity(dev->gd, 0);
 	add_disk(dev->gd);
 
-	dev->procfile = proc_create_data(tmp, 0644, procdir, &fops, dev);
+	dev->procfile = proc_create_data(tmp, 0644, procdir, &simdisk_proc_ops, dev);
 	return 0;
 
 out_alloc_disk:
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 829115b..4f7d6142 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -24,6 +24,7 @@
 #include <linux/of.h>
 #include <linux/clk-provider.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 
 #include <asm/timex.h>
 #include <asm/processor.h>
@@ -56,22 +57,6 @@ void platform_restart(void)
 	/* control never gets here */
 }
 
-void __init platform_setup(char **cmdline)
-{
-}
-
-/* early initialization */
-
-void __init platform_init(bp_tag_t *first)
-{
-}
-
-/* Heartbeat. */
-
-void platform_heartbeat(void)
-{
-}
-
 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
 
 void __init platform_calibrate_ccount(void)
diff --git a/block/Makefile b/block/Makefile
index f6cef6d..1a43750 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -25,7 +25,6 @@
 bfq-y				:= bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
 obj-$(CONFIG_IOSCHED_BFQ)	+= bfq.o
 
-obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
 obj-$(CONFIG_BLK_CMDLINE_PARSER)	+= cmdline-parser.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY_T10)	+= t10-pi.o
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index e1419ed..09b69a3 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
 		kfree(bfqg);
 }
 
-static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+void bfqg_and_blkg_get(struct bfq_group *bfqg)
 {
 	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
 	bfqg_get(bfqg);
@@ -651,9 +651,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
 				false, BFQQE_PREEMPTED);
 
+	/*
+	 * get extra reference to prevent bfqq from being freed in
+	 * next possible deactivate
+	 */
+	bfqq->ref++;
+
 	if (bfq_bfqq_busy(bfqq))
 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
-	else if (entity->on_st)
+	else if (entity->on_st_or_in_serv)
 		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
 	bfqg_and_blkg_put(bfqq_group(bfqq));
 
@@ -670,6 +676,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 
 	if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
 		bfq_schedule_dispatch(bfqd);
+	/* release extra ref taken above */
+	bfq_put_queue(bfqq);
 }
 
 /**
@@ -1398,6 +1406,10 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
 	return bfqq->bfqd->root_group;
 }
 
+void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
+
+void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
+
 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
 {
 	struct bfq_group *bfqg;
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4686b68..8c436ab 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -613,6 +613,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 		bfqq->pos_root = NULL;
 	}
 
+	/* oom_bfqq does not participate in queue merging */
+	if (bfqq == &bfqd->oom_bfqq)
+		return;
+
 	/*
 	 * bfqq cannot be merged any longer (see comments in
 	 * bfq_setup_cooperator): no point in adding bfqq into the
@@ -1055,7 +1059,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
 
 static int bfqq_process_refs(struct bfq_queue *bfqq)
 {
-	return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
+	return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv -
 		(bfqq->weight_counter != NULL);
 }
 
@@ -3443,6 +3447,10 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
 static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
 						 struct bfq_queue *bfqq)
 {
+	/* No point in idling for bfqq if it won't get requests any longer */
+	if (unlikely(!bfqq_process_refs(bfqq)))
+		return false;
+
 	return (bfqq->wr_coeff > 1 &&
 		(bfqd->wr_busy_queues <
 		 bfq_tot_busy_queues(bfqd) ||
@@ -4076,6 +4084,10 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
 		bfqq_sequential_and_IO_bound,
 		idling_boosts_thr;
 
+	/* No point in idling for bfqq if it won't get requests any longer */
+	if (unlikely(!bfqq_process_refs(bfqq)))
+		return false;
+
 	bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
 		bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
 
@@ -4169,6 +4181,10 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
 	struct bfq_data *bfqd = bfqq->bfqd;
 	bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
 
+	/* No point in idling for bfqq if it won't get requests any longer */
+	if (unlikely(!bfqq_process_refs(bfqq)))
+		return false;
+
 	if (unlikely(bfqd->strict_guarantees))
 		return true;
 
@@ -4809,9 +4825,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
 {
 	struct bfq_queue *item;
 	struct hlist_node *n;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
 	struct bfq_group *bfqg = bfqq_group(bfqq);
-#endif
 
 	if (bfqq->bfqd)
 		bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
@@ -4884,9 +4898,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
 		bfqq->bfqd->last_completed_rq_bfqq = NULL;
 
 	kmem_cache_free(bfq_pool, bfqq);
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
 	bfqg_and_blkg_put(bfqg);
-#endif
 }
 
 static void bfq_put_cooperator(struct bfq_queue *bfqq)
@@ -5967,6 +5979,8 @@ static void bfq_finish_requeue_request(struct request *rq)
 }
 
 /*
+ * Removes the association between the current task and bfqq, assuming
+ * that bic points to the bfq iocontext of the task.
  * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
  * was the last process referring to that bfqq.
  */
@@ -6374,10 +6388,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
 
 	hrtimer_cancel(&bfqd->idle_slice_timer);
 
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
 	/* release oom-queue reference to root group */
 	bfqg_and_blkg_put(bfqd->root_group);
 
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
 	blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
 #else
 	spin_lock_irq(&bfqd->lock);
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 8526f20..d1233af 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -150,7 +150,7 @@ struct bfq_entity {
 	 * Flag, true if the entity is on a tree (either the active or
 	 * the idle one of its service_tree) or is in service.
 	 */
-	bool on_st;
+	bool on_st_or_in_serv;
 
 	/* B-WF2Q+ start and finish timestamps [sectors/weight] */
 	u64 start, finish;
@@ -921,6 +921,7 @@ struct bfq_group {
 
 #else
 struct bfq_group {
+	struct bfq_entity entity;
 	struct bfq_sched_data sched_data;
 
 	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
@@ -984,6 +985,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
+void bfqg_and_blkg_get(struct bfq_group *bfqg);
 void bfqg_and_blkg_put(struct bfq_group *bfqg);
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index ffe9ce9..eb0e2a6 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -533,7 +533,9 @@ static void bfq_get_entity(struct bfq_entity *entity)
 		bfqq->ref++;
 		bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
 			     bfqq, bfqq->ref);
-	}
+	} else
+		bfqg_and_blkg_get(container_of(entity, struct bfq_group,
+					       entity));
 }
 
 /**
@@ -645,10 +647,16 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
 {
 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
 
-	entity->on_st = false;
+	entity->on_st_or_in_serv = false;
 	st->wsum -= entity->weight;
-	if (bfqq && !is_in_service)
+	if (is_in_service)
+		return;
+
+	if (bfqq)
 		bfq_put_queue(bfqq);
+	else
+		bfqg_and_blkg_put(container_of(entity, struct bfq_group,
+					       entity));
 }
 
 /**
@@ -999,7 +1007,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
 		 */
 		bfq_get_entity(entity);
 
-		entity->on_st = true;
+		entity->on_st_or_in_serv = true;
 	}
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -1165,7 +1173,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
 	struct bfq_service_tree *st;
 	bool is_in_service;
 
-	if (!entity->on_st) /* entity never activated, or already inactive */
+	if (!entity->on_st_or_in_serv) /*
+					* entity never activated, or
+					* already inactive
+					*/
 		return false;
 
 	/*
@@ -1620,7 +1631,7 @@ bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
 	 * service tree either, then release the service reference to
 	 * the queue it represents (taken with bfq_get_entity).
 	 */
-	if (!in_serv_entity->on_st) {
+	if (!in_serv_entity->on_st_or_in_serv) {
 		/*
 		 * If no process is referencing in_serv_bfqq any
 		 * longer, then the service reference may be the only
diff --git a/block/bsg.c b/block/bsg.c
index 833c44b..d7bae94 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -382,6 +382,7 @@ static const struct file_operations bsg_fops = {
 	.open		=	bsg_open,
 	.release	=	bsg_release,
 	.unlocked_ioctl	=	bsg_ioctl,
+	.compat_ioctl	=	compat_ptr_ioctl,
 	.owner		=	THIS_MODULE,
 	.llseek		=	default_llseek,
 };
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
deleted file mode 100644
index 3ed7a0f..0000000
--- a/block/compat_ioctl.c
+++ /dev/null
@@ -1,427 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/blktrace_api.h>
-#include <linux/cdrom.h>
-#include <linux/compat.h>
-#include <linux/elevator.h>
-#include <linux/hdreg.h>
-#include <linux/pr.h>
-#include <linux/slab.h>
-#include <linux/syscalls.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-
-static int compat_put_ushort(unsigned long arg, unsigned short val)
-{
-	return put_user(val, (unsigned short __user *)compat_ptr(arg));
-}
-
-static int compat_put_int(unsigned long arg, int val)
-{
-	return put_user(val, (compat_int_t __user *)compat_ptr(arg));
-}
-
-static int compat_put_uint(unsigned long arg, unsigned int val)
-{
-	return put_user(val, (compat_uint_t __user *)compat_ptr(arg));
-}
-
-static int compat_put_long(unsigned long arg, long val)
-{
-	return put_user(val, (compat_long_t __user *)compat_ptr(arg));
-}
-
-static int compat_put_ulong(unsigned long arg, compat_ulong_t val)
-{
-	return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
-}
-
-static int compat_put_u64(unsigned long arg, u64 val)
-{
-	return put_user(val, (compat_u64 __user *)compat_ptr(arg));
-}
-
-struct compat_hd_geometry {
-	unsigned char heads;
-	unsigned char sectors;
-	unsigned short cylinders;
-	u32 start;
-};
-
-static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
-			struct compat_hd_geometry __user *ugeo)
-{
-	struct hd_geometry geo;
-	int ret;
-
-	if (!ugeo)
-		return -EINVAL;
-	if (!disk->fops->getgeo)
-		return -ENOTTY;
-
-	memset(&geo, 0, sizeof(geo));
-	/*
-	 * We need to set the startsect first, the driver may
-	 * want to override it.
-	 */
-	geo.start = get_start_sect(bdev);
-	ret = disk->fops->getgeo(bdev, &geo);
-	if (ret)
-		return ret;
-
-	ret = copy_to_user(ugeo, &geo, 4);
-	ret |= put_user(geo.start, &ugeo->start);
-	if (ret)
-		ret = -EFAULT;
-
-	return ret;
-}
-
-static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode,
-		unsigned int cmd, unsigned long arg)
-{
-	unsigned long __user *p;
-	int error;
-
-	p = compat_alloc_user_space(sizeof(unsigned long));
-	error = __blkdev_driver_ioctl(bdev, mode,
-				cmd, (unsigned long)p);
-	if (error == 0) {
-		unsigned int __user *uvp = compat_ptr(arg);
-		unsigned long v;
-		if (get_user(v, p) || put_user(v, uvp))
-			error = -EFAULT;
-	}
-	return error;
-}
-
-struct compat_cdrom_read_audio {
-	union cdrom_addr	addr;
-	u8			addr_format;
-	compat_int_t		nframes;
-	compat_caddr_t		buf;
-};
-
-struct compat_cdrom_generic_command {
-	unsigned char	cmd[CDROM_PACKET_SIZE];
-	compat_caddr_t	buffer;
-	compat_uint_t	buflen;
-	compat_int_t	stat;
-	compat_caddr_t	sense;
-	unsigned char	data_direction;
-	compat_int_t	quiet;
-	compat_int_t	timeout;
-	compat_caddr_t	reserved[1];
-};
-
-static int compat_cdrom_read_audio(struct block_device *bdev, fmode_t mode,
-		unsigned int cmd, unsigned long arg)
-{
-	struct cdrom_read_audio __user *cdread_audio;
-	struct compat_cdrom_read_audio __user *cdread_audio32;
-	__u32 data;
-	void __user *datap;
-
-	cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio));
-	cdread_audio32 = compat_ptr(arg);
-
-	if (copy_in_user(&cdread_audio->addr,
-			 &cdread_audio32->addr,
-			 (sizeof(*cdread_audio32) -
-			  sizeof(compat_caddr_t))))
-		return -EFAULT;
-
-	if (get_user(data, &cdread_audio32->buf))
-		return -EFAULT;
-	datap = compat_ptr(data);
-	if (put_user(datap, &cdread_audio->buf))
-		return -EFAULT;
-
-	return __blkdev_driver_ioctl(bdev, mode, cmd,
-			(unsigned long)cdread_audio);
-}
-
-static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
-		unsigned int cmd, unsigned long arg)
-{
-	struct cdrom_generic_command __user *cgc;
-	struct compat_cdrom_generic_command __user *cgc32;
-	u32 data;
-	unsigned char dir;
-	int itmp;
-
-	cgc = compat_alloc_user_space(sizeof(*cgc));
-	cgc32 = compat_ptr(arg);
-
-	if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
-	    get_user(data, &cgc32->buffer) ||
-	    put_user(compat_ptr(data), &cgc->buffer) ||
-	    copy_in_user(&cgc->buflen, &cgc32->buflen,
-			 (sizeof(unsigned int) + sizeof(int))) ||
-	    get_user(data, &cgc32->sense) ||
-	    put_user(compat_ptr(data), &cgc->sense) ||
-	    get_user(dir, &cgc32->data_direction) ||
-	    put_user(dir, &cgc->data_direction) ||
-	    get_user(itmp, &cgc32->quiet) ||
-	    put_user(itmp, &cgc->quiet) ||
-	    get_user(itmp, &cgc32->timeout) ||
-	    put_user(itmp, &cgc->timeout) ||
-	    get_user(data, &cgc32->reserved[0]) ||
-	    put_user(compat_ptr(data), &cgc->reserved[0]))
-		return -EFAULT;
-
-	return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cgc);
-}
-
-struct compat_blkpg_ioctl_arg {
-	compat_int_t op;
-	compat_int_t flags;
-	compat_int_t datalen;
-	compat_caddr_t data;
-};
-
-static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode,
-		unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32)
-{
-	struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
-	compat_caddr_t udata;
-	compat_int_t n;
-	int err;
-
-	err = get_user(n, &ua32->op);
-	err |= put_user(n, &a->op);
-	err |= get_user(n, &ua32->flags);
-	err |= put_user(n, &a->flags);
-	err |= get_user(n, &ua32->datalen);
-	err |= put_user(n, &a->datalen);
-	err |= get_user(udata, &ua32->data);
-	err |= put_user(compat_ptr(udata), &a->data);
-	if (err)
-		return err;
-
-	return blkdev_ioctl(bdev, mode, cmd, (unsigned long)a);
-}
-
-#define BLKBSZGET_32		_IOR(0x12, 112, int)
-#define BLKBSZSET_32		_IOW(0x12, 113, int)
-#define BLKGETSIZE64_32		_IOR(0x12, 114, int)
-
-static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
-			unsigned cmd, unsigned long arg)
-{
-	switch (cmd) {
-	case HDIO_GET_UNMASKINTR:
-	case HDIO_GET_MULTCOUNT:
-	case HDIO_GET_KEEPSETTINGS:
-	case HDIO_GET_32BIT:
-	case HDIO_GET_NOWERR:
-	case HDIO_GET_DMA:
-	case HDIO_GET_NICE:
-	case HDIO_GET_WCACHE:
-	case HDIO_GET_ACOUSTIC:
-	case HDIO_GET_ADDRESS:
-	case HDIO_GET_BUSSTATE:
-		return compat_hdio_ioctl(bdev, mode, cmd, arg);
-	case CDROMREADAUDIO:
-		return compat_cdrom_read_audio(bdev, mode, cmd, arg);
-	case CDROM_SEND_PACKET:
-		return compat_cdrom_generic_command(bdev, mode, cmd, arg);
-
-	/*
-	 * No handler required for the ones below, we just need to
-	 * convert arg to a 64 bit pointer.
-	 */
-	case BLKSECTSET:
-	/*
-	 * 0x03 -- HD/IDE ioctl's used by hdparm and friends.
-	 *         Some need translations, these do not.
-	 */
-	case HDIO_GET_IDENTITY:
-	case HDIO_DRIVE_TASK:
-	case HDIO_DRIVE_CMD:
-	/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
-	case 0x330:
-	/* CDROM stuff */
-	case CDROMPAUSE:
-	case CDROMRESUME:
-	case CDROMPLAYMSF:
-	case CDROMPLAYTRKIND:
-	case CDROMREADTOCHDR:
-	case CDROMREADTOCENTRY:
-	case CDROMSTOP:
-	case CDROMSTART:
-	case CDROMEJECT:
-	case CDROMVOLCTRL:
-	case CDROMSUBCHNL:
-	case CDROMMULTISESSION:
-	case CDROM_GET_MCN:
-	case CDROMRESET:
-	case CDROMVOLREAD:
-	case CDROMSEEK:
-	case CDROMPLAYBLK:
-	case CDROMCLOSETRAY:
-	case CDROM_DISC_STATUS:
-	case CDROM_CHANGER_NSLOTS:
-	case CDROM_GET_CAPABILITY:
-	/* Ignore cdrom.h about these next 5 ioctls, they absolutely do
-	 * not take a struct cdrom_read, instead they take a struct cdrom_msf
-	 * which is compatible.
-	 */
-	case CDROMREADMODE2:
-	case CDROMREADMODE1:
-	case CDROMREADRAW:
-	case CDROMREADCOOKED:
-	case CDROMREADALL:
-	/* DVD ioctls */
-	case DVD_READ_STRUCT:
-	case DVD_WRITE_STRUCT:
-	case DVD_AUTH:
-		arg = (unsigned long)compat_ptr(arg);
-	/* These intepret arg as an unsigned long, not as a pointer,
-	 * so we must not do compat_ptr() conversion. */
-	case HDIO_SET_MULTCOUNT:
-	case HDIO_SET_UNMASKINTR:
-	case HDIO_SET_KEEPSETTINGS:
-	case HDIO_SET_32BIT:
-	case HDIO_SET_NOWERR:
-	case HDIO_SET_DMA:
-	case HDIO_SET_PIO_MODE:
-	case HDIO_SET_NICE:
-	case HDIO_SET_WCACHE:
-	case HDIO_SET_ACOUSTIC:
-	case HDIO_SET_BUSSTATE:
-	case HDIO_SET_ADDRESS:
-	case CDROMEJECT_SW:
-	case CDROM_SET_OPTIONS:
-	case CDROM_CLEAR_OPTIONS:
-	case CDROM_SELECT_SPEED:
-	case CDROM_SELECT_DISC:
-	case CDROM_MEDIA_CHANGED:
-	case CDROM_DRIVE_STATUS:
-	case CDROM_LOCKDOOR:
-	case CDROM_DEBUG:
-		break;
-	default:
-		/* unknown ioctl number */
-		return -ENOIOCTLCMD;
-	}
-
-	return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
-}
-
-/* Most of the generic ioctls are handled in the normal fallback path.
-   This assumes the blkdev's low level compat_ioctl always returns
-   ENOIOCTLCMD for unknown ioctls. */
-long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
-	int ret = -ENOIOCTLCMD;
-	struct inode *inode = file->f_mapping->host;
-	struct block_device *bdev = inode->i_bdev;
-	struct gendisk *disk = bdev->bd_disk;
-	fmode_t mode = file->f_mode;
-	loff_t size;
-	unsigned int max_sectors;
-
-	/*
-	 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
-	 * to updated it before every ioctl.
-	 */
-	if (file->f_flags & O_NDELAY)
-		mode |= FMODE_NDELAY;
-	else
-		mode &= ~FMODE_NDELAY;
-
-	switch (cmd) {
-	case HDIO_GETGEO:
-		return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
-	case BLKPBSZGET:
-		return compat_put_uint(arg, bdev_physical_block_size(bdev));
-	case BLKIOMIN:
-		return compat_put_uint(arg, bdev_io_min(bdev));
-	case BLKIOOPT:
-		return compat_put_uint(arg, bdev_io_opt(bdev));
-	case BLKALIGNOFF:
-		return compat_put_int(arg, bdev_alignment_offset(bdev));
-	case BLKDISCARDZEROES:
-		return compat_put_uint(arg, 0);
-	case BLKFLSBUF:
-	case BLKROSET:
-	case BLKDISCARD:
-	case BLKSECDISCARD:
-	case BLKZEROOUT:
-	/*
-	 * the ones below are implemented in blkdev_locked_ioctl,
-	 * but we call blkdev_ioctl, which gets the lock for us
-	 */
-	case BLKRRPART:
-	case BLKREPORTZONE:
-	case BLKRESETZONE:
-	case BLKOPENZONE:
-	case BLKCLOSEZONE:
-	case BLKFINISHZONE:
-	case BLKGETZONESZ:
-	case BLKGETNRZONES:
-		return blkdev_ioctl(bdev, mode, cmd,
-				(unsigned long)compat_ptr(arg));
-	case BLKBSZSET_32:
-		return blkdev_ioctl(bdev, mode, BLKBSZSET,
-				(unsigned long)compat_ptr(arg));
-	case BLKPG:
-		return compat_blkpg_ioctl(bdev, mode, cmd, compat_ptr(arg));
-	case BLKRAGET:
-	case BLKFRAGET:
-		if (!arg)
-			return -EINVAL;
-		return compat_put_long(arg,
-			       (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
-	case BLKROGET: /* compatible */
-		return compat_put_int(arg, bdev_read_only(bdev) != 0);
-	case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
-		return compat_put_int(arg, block_size(bdev));
-	case BLKSSZGET: /* get block device hardware sector size */
-		return compat_put_int(arg, bdev_logical_block_size(bdev));
-	case BLKSECTGET:
-		max_sectors = min_t(unsigned int, USHRT_MAX,
-				    queue_max_sectors(bdev_get_queue(bdev)));
-		return compat_put_ushort(arg, max_sectors);
-	case BLKROTATIONAL:
-		return compat_put_ushort(arg,
-					 !blk_queue_nonrot(bdev_get_queue(bdev)));
-	case BLKRASET: /* compatible, but no compat_ptr (!) */
-	case BLKFRASET:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EACCES;
-		bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
-		return 0;
-	case BLKGETSIZE:
-		size = i_size_read(bdev->bd_inode);
-		if ((size >> 9) > ~0UL)
-			return -EFBIG;
-		return compat_put_ulong(arg, size >> 9);
-
-	case BLKGETSIZE64_32:
-		return compat_put_u64(arg, i_size_read(bdev->bd_inode));
-
-	case BLKTRACESETUP32:
-	case BLKTRACESTART: /* compatible */
-	case BLKTRACESTOP:  /* compatible */
-	case BLKTRACETEARDOWN: /* compatible */
-		ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
-		return ret;
-	case IOC_PR_REGISTER:
-	case IOC_PR_RESERVE:
-	case IOC_PR_RELEASE:
-	case IOC_PR_PREEMPT:
-	case IOC_PR_PREEMPT_ABORT:
-	case IOC_PR_CLEAR:
-		return blkdev_ioctl(bdev, mode, cmd,
-				(unsigned long)compat_ptr(arg));
-	default:
-		if (disk->fops->compat_ioctl)
-			ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
-		if (ret == -ENOIOCTLCMD)
-			ret = compat_blkdev_driver_ioctl(bdev, mode, cmd, arg);
-		return ret;
-	}
-}
diff --git a/block/ioctl.c b/block/ioctl.c
index 5de98b9..127194b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/capability.h>
+#include <linux/compat.h>
 #include <linux/blkdev.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
@@ -11,12 +12,12 @@
 #include <linux/pr.h>
 #include <linux/uaccess.h>
 
-static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
+static int blkpg_do_ioctl(struct block_device *bdev,
+			  struct blkpg_partition __user *upart, int op)
 {
 	struct block_device *bdevp;
 	struct gendisk *disk;
 	struct hd_struct *part, *lpart;
-	struct blkpg_ioctl_arg a;
 	struct blkpg_partition p;
 	struct disk_part_iter piter;
 	long long start, length;
@@ -24,9 +25,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
-	if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
-		return -EFAULT;
-	if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+	if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
 		return -EFAULT;
 	disk = bdev->bd_disk;
 	if (bdev != bdev->bd_contains)
@@ -34,7 +33,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
 	partno = p.pno;
 	if (partno <= 0)
 		return -EINVAL;
-	switch (a.op) {
+	switch (op) {
 		case BLKPG_ADD_PARTITION:
 			start = p.start >> 9;
 			length = p.length >> 9;
@@ -155,6 +154,39 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
 	}
 }
 
+static int blkpg_ioctl(struct block_device *bdev,
+		       struct blkpg_ioctl_arg __user *arg)
+{
+	struct blkpg_partition __user *udata;
+	int op;
+
+	if (get_user(op, &arg->op) || get_user(udata, &arg->data))
+		return -EFAULT;
+
+	return blkpg_do_ioctl(bdev, udata, op);
+}
+
+#ifdef CONFIG_COMPAT
+struct compat_blkpg_ioctl_arg {
+	compat_int_t op;
+	compat_int_t flags;
+	compat_int_t datalen;
+	compat_caddr_t data;
+};
+
+static int compat_blkpg_ioctl(struct block_device *bdev,
+			      struct compat_blkpg_ioctl_arg __user *arg)
+{
+	compat_caddr_t udata;
+	int op;
+
+	if (get_user(op, &arg->op) || get_user(udata, &arg->data))
+		return -EFAULT;
+
+	return blkpg_do_ioctl(bdev, compat_ptr(udata), op);
+}
+#endif
+
 static int blkdev_reread_part(struct block_device *bdev)
 {
 	int ret;
@@ -238,36 +270,48 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
 			BLKDEV_ZERO_NOUNMAP);
 }
 
-static int put_ushort(unsigned long arg, unsigned short val)
+static int put_ushort(unsigned short __user *argp, unsigned short val)
 {
-	return put_user(val, (unsigned short __user *)arg);
+	return put_user(val, argp);
 }
 
-static int put_int(unsigned long arg, int val)
+static int put_int(int __user *argp, int val)
 {
-	return put_user(val, (int __user *)arg);
+	return put_user(val, argp);
 }
 
-static int put_uint(unsigned long arg, unsigned int val)
+static int put_uint(unsigned int __user *argp, unsigned int val)
 {
-	return put_user(val, (unsigned int __user *)arg);
+	return put_user(val, argp);
 }
 
-static int put_long(unsigned long arg, long val)
+static int put_long(long __user *argp, long val)
 {
-	return put_user(val, (long __user *)arg);
+	return put_user(val, argp);
 }
 
-static int put_ulong(unsigned long arg, unsigned long val)
+static int put_ulong(unsigned long __user *argp, unsigned long val)
 {
-	return put_user(val, (unsigned long __user *)arg);
+	return put_user(val, argp);
 }
 
-static int put_u64(unsigned long arg, u64 val)
+static int put_u64(u64 __user *argp, u64 val)
 {
-	return put_user(val, (u64 __user *)arg);
+	return put_user(val, argp);
 }
 
+#ifdef CONFIG_COMPAT
+static int compat_put_long(compat_long_t *argp, long val)
+{
+	return put_user(val, argp);
+}
+
+static int compat_put_ulong(compat_ulong_t *argp, compat_ulong_t val)
+{
+	return put_user(val, argp);
+}
+#endif
+
 int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
 			unsigned cmd, unsigned long arg)
 {
@@ -285,6 +329,26 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
  */
 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
 
+#ifdef CONFIG_COMPAT
+/*
+ * This is the equivalent of compat_ptr_ioctl(), to be used by block
+ * drivers that implement only commands that are completely compatible
+ * between 32-bit and 64-bit user space
+ */
+int blkdev_compat_ptr_ioctl(struct block_device *bdev, fmode_t mode,
+			unsigned cmd, unsigned long arg)
+{
+	struct gendisk *disk = bdev->bd_disk;
+
+	if (disk->fops->ioctl)
+		return disk->fops->ioctl(bdev, mode, cmd,
+					 (unsigned long)compat_ptr(arg));
+
+	return -ENOIOCTLCMD;
+}
+EXPORT_SYMBOL(blkdev_compat_ptr_ioctl);
+#endif
+
 static int blkdev_pr_register(struct block_device *bdev,
 		struct pr_registration __user *arg)
 {
@@ -455,6 +519,45 @@ static int blkdev_getgeo(struct block_device *bdev,
 	return 0;
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_hd_geometry {
+	unsigned char heads;
+	unsigned char sectors;
+	unsigned short cylinders;
+	u32 start;
+};
+
+static int compat_hdio_getgeo(struct block_device *bdev,
+			      struct compat_hd_geometry __user *ugeo)
+{
+	struct gendisk *disk = bdev->bd_disk;
+	struct hd_geometry geo;
+	int ret;
+
+	if (!ugeo)
+		return -EINVAL;
+	if (!disk->fops->getgeo)
+		return -ENOTTY;
+
+	memset(&geo, 0, sizeof(geo));
+	/*
+	 * We need to set the startsect first, the driver may
+	 * want to override it.
+	 */
+	geo.start = get_start_sect(bdev);
+	ret = disk->fops->getgeo(bdev, &geo);
+	if (ret)
+		return ret;
+
+	ret = copy_to_user(ugeo, &geo, 4);
+	ret |= put_user(geo.start, &ugeo->start);
+	if (ret)
+		ret = -EFAULT;
+
+	return ret;
+}
+#endif
+
 /* set the logical block size */
 static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
 		int __user *argp)
@@ -481,13 +584,13 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
 }
 
 /*
- * always keep this in sync with compat_blkdev_ioctl()
+ * Common commands that are handled the same way on native and compat
+ * user space. Note the separate arg/argp parameters that are needed
+ * to deal with the compat_ptr() conversion.
  */
-int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
-			unsigned long arg)
+static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
+				unsigned cmd, unsigned long arg, void __user *argp)
 {
-	void __user *argp = (void __user *)arg;
-	loff_t size;
 	unsigned int max_sectors;
 
 	switch (cmd) {
@@ -510,60 +613,39 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 	case BLKFINISHZONE:
 		return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
 	case BLKGETZONESZ:
-		return put_uint(arg, bdev_zone_sectors(bdev));
+		return put_uint(argp, bdev_zone_sectors(bdev));
 	case BLKGETNRZONES:
-		return put_uint(arg, blkdev_nr_zones(bdev->bd_disk));
-	case HDIO_GETGEO:
-		return blkdev_getgeo(bdev, argp);
-	case BLKRAGET:
-	case BLKFRAGET:
-		if (!arg)
-			return -EINVAL;
-		return put_long(arg, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
+		return put_uint(argp, blkdev_nr_zones(bdev->bd_disk));
 	case BLKROGET:
-		return put_int(arg, bdev_read_only(bdev) != 0);
-	case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
-		return put_int(arg, block_size(bdev));
+		return put_int(argp, bdev_read_only(bdev) != 0);
 	case BLKSSZGET: /* get block device logical block size */
-		return put_int(arg, bdev_logical_block_size(bdev));
+		return put_int(argp, bdev_logical_block_size(bdev));
 	case BLKPBSZGET: /* get block device physical block size */
-		return put_uint(arg, bdev_physical_block_size(bdev));
+		return put_uint(argp, bdev_physical_block_size(bdev));
 	case BLKIOMIN:
-		return put_uint(arg, bdev_io_min(bdev));
+		return put_uint(argp, bdev_io_min(bdev));
 	case BLKIOOPT:
-		return put_uint(arg, bdev_io_opt(bdev));
+		return put_uint(argp, bdev_io_opt(bdev));
 	case BLKALIGNOFF:
-		return put_int(arg, bdev_alignment_offset(bdev));
+		return put_int(argp, bdev_alignment_offset(bdev));
 	case BLKDISCARDZEROES:
-		return put_uint(arg, 0);
+		return put_uint(argp, 0);
 	case BLKSECTGET:
 		max_sectors = min_t(unsigned int, USHRT_MAX,
 				    queue_max_sectors(bdev_get_queue(bdev)));
-		return put_ushort(arg, max_sectors);
+		return put_ushort(argp, max_sectors);
 	case BLKROTATIONAL:
-		return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev)));
+		return put_ushort(argp, !blk_queue_nonrot(bdev_get_queue(bdev)));
 	case BLKRASET:
 	case BLKFRASET:
 		if(!capable(CAP_SYS_ADMIN))
 			return -EACCES;
 		bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
 		return 0;
-	case BLKBSZSET:
-		return blkdev_bszset(bdev, mode, argp);
-	case BLKPG:
-		return blkpg_ioctl(bdev, argp);
 	case BLKRRPART:
 		return blkdev_reread_part(bdev);
-	case BLKGETSIZE:
-		size = i_size_read(bdev->bd_inode);
-		if ((size >> 9) > ~0UL)
-			return -EFBIG;
-		return put_ulong(arg, size >> 9);
-	case BLKGETSIZE64:
-		return put_u64(arg, i_size_read(bdev->bd_inode));
 	case BLKTRACESTART:
 	case BLKTRACESTOP:
-	case BLKTRACESETUP:
 	case BLKTRACETEARDOWN:
 		return blk_trace_ioctl(bdev, cmd, argp);
 	case IOC_PR_REGISTER:
@@ -579,7 +661,132 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 	case IOC_PR_CLEAR:
 		return blkdev_pr_clear(bdev, argp);
 	default:
-		return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+		return -ENOIOCTLCMD;
 	}
 }
-EXPORT_SYMBOL_GPL(blkdev_ioctl);
+
+/*
+ * Always keep this in sync with compat_blkdev_ioctl()
+ * to handle all incompatible commands in both functions.
+ *
+ * New commands must be compatible and go into blkdev_common_ioctl
+ */
+int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+			unsigned long arg)
+{
+	int ret;
+	loff_t size;
+	void __user *argp = (void __user *)arg;
+
+	switch (cmd) {
+	/* These need separate implementations for the data structure */
+	case HDIO_GETGEO:
+		return blkdev_getgeo(bdev, argp);
+	case BLKPG:
+		return blkpg_ioctl(bdev, argp);
+
+	/* Compat mode returns 32-bit data instead of 'long' */
+	case BLKRAGET:
+	case BLKFRAGET:
+		if (!argp)
+			return -EINVAL;
+		return put_long(argp, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
+	case BLKGETSIZE:
+		size = i_size_read(bdev->bd_inode);
+		if ((size >> 9) > ~0UL)
+			return -EFBIG;
+		return put_ulong(argp, size >> 9);
+
+	/* The data is compatible, but the command number is different */
+	case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
+		return put_int(argp, block_size(bdev));
+	case BLKBSZSET:
+		return blkdev_bszset(bdev, mode, argp);
+	case BLKGETSIZE64:
+		return put_u64(argp, i_size_read(bdev->bd_inode));
+
+	/* Incompatible alignment on i386 */
+	case BLKTRACESETUP:
+		return blk_trace_ioctl(bdev, cmd, argp);
+	default:
+		break;
+	}
+
+	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
+	if (ret == -ENOIOCTLCMD)
+		return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_ioctl); /* for /dev/raw */
+
+#ifdef CONFIG_COMPAT
+
+#define BLKBSZGET_32		_IOR(0x12, 112, int)
+#define BLKBSZSET_32		_IOW(0x12, 113, int)
+#define BLKGETSIZE64_32		_IOR(0x12, 114, int)
+
+/* Most of the generic ioctls are handled in the normal fallback path.
+   This assumes the blkdev's low level compat_ioctl always returns
+   ENOIOCTLCMD for unknown ioctls. */
+long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+	int ret;
+	void __user *argp = compat_ptr(arg);
+	struct inode *inode = file->f_mapping->host;
+	struct block_device *bdev = inode->i_bdev;
+	struct gendisk *disk = bdev->bd_disk;
+	fmode_t mode = file->f_mode;
+	loff_t size;
+
+	/*
+	 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
+	 * to updated it before every ioctl.
+	 */
+	if (file->f_flags & O_NDELAY)
+		mode |= FMODE_NDELAY;
+	else
+		mode &= ~FMODE_NDELAY;
+
+	switch (cmd) {
+	/* These need separate implementations for the data structure */
+	case HDIO_GETGEO:
+		return compat_hdio_getgeo(bdev, argp);
+	case BLKPG:
+		return compat_blkpg_ioctl(bdev, argp);
+
+	/* Compat mode returns 32-bit data instead of 'long' */
+	case BLKRAGET:
+	case BLKFRAGET:
+		if (!argp)
+			return -EINVAL;
+		return compat_put_long(argp,
+			       (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
+	case BLKGETSIZE:
+		size = i_size_read(bdev->bd_inode);
+		if ((size >> 9) > ~0UL)
+			return -EFBIG;
+		return compat_put_ulong(argp, size >> 9);
+
+	/* The data is compatible, but the command number is different */
+	case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
+		return put_int(argp, bdev_logical_block_size(bdev));
+	case BLKBSZSET_32:
+		return blkdev_bszset(bdev, mode, argp);
+	case BLKGETSIZE64_32:
+		return put_u64(argp, i_size_read(bdev->bd_inode));
+
+	/* Incompatible alignment on i386 */
+	case BLKTRACESETUP32:
+		return blk_trace_ioctl(bdev, cmd, argp);
+	default:
+		break;
+	}
+
+	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
+	if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
+		ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
+
+	return ret;
+}
+#endif
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 650bade..b4e73d5 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -20,6 +20,7 @@
 #include <scsi/scsi.h>
 #include <scsi/scsi_ioctl.h>
 #include <scsi/scsi_cmnd.h>
+#include <scsi/sg.h>
 
 struct blk_cmd_filter {
 	unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
@@ -550,34 +551,6 @@ static inline int blk_send_start_stop(struct request_queue *q,
 	return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
 }
 
-#ifdef CONFIG_COMPAT
-struct compat_sg_io_hdr {
-	compat_int_t interface_id;	/* [i] 'S' for SCSI generic (required) */
-	compat_int_t dxfer_direction;	/* [i] data transfer direction  */
-	unsigned char cmd_len;		/* [i] SCSI command length ( <= 16 bytes) */
-	unsigned char mx_sb_len;	/* [i] max length to write to sbp */
-	unsigned short iovec_count;	/* [i] 0 implies no scatter gather */
-	compat_uint_t dxfer_len;	/* [i] byte count of data transfer */
-	compat_uint_t dxferp;		/* [i], [*io] points to data transfer memory
-						or scatter gather list */
-	compat_uptr_t cmdp;		/* [i], [*i] points to command to perform */
-	compat_uptr_t sbp;		/* [i], [*o] points to sense_buffer memory */
-	compat_uint_t timeout;		/* [i] MAX_UINT->no timeout (unit: millisec) */
-	compat_uint_t flags;		/* [i] 0 -> default, see SG_FLAG... */
-	compat_int_t pack_id;		/* [i->o] unused internally (normally) */
-	compat_uptr_t usr_ptr;		/* [i->o] unused internally */
-	unsigned char status;		/* [o] scsi status */
-	unsigned char masked_status;	/* [o] shifted, masked scsi status */
-	unsigned char msg_status;	/* [o] messaging level data (optional) */
-	unsigned char sb_len_wr;	/* [o] byte count actually written to sbp */
-	unsigned short host_status;	/* [o] errors from host adapter */
-	unsigned short driver_status;	/* [o] errors from software driver */
-	compat_int_t resid;		/* [o] dxfer_len - actual_transferred */
-	compat_uint_t duration;		/* [o] time taken by cmd (unit: millisec) */
-	compat_uint_t info;		/* [o] auxiliary information */
-};
-#endif
-
 int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
 {
 #ifdef CONFIG_COMPAT
@@ -666,6 +639,136 @@ int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
 }
 EXPORT_SYMBOL(get_sg_io_hdr);
 
+#ifdef CONFIG_COMPAT
+struct compat_cdrom_generic_command {
+	unsigned char	cmd[CDROM_PACKET_SIZE];
+	compat_caddr_t	buffer;
+	compat_uint_t	buflen;
+	compat_int_t	stat;
+	compat_caddr_t	sense;
+	unsigned char	data_direction;
+	compat_int_t	quiet;
+	compat_int_t	timeout;
+	compat_caddr_t	reserved[1];
+};
+#endif
+
+static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
+				      const void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+	if (in_compat_syscall()) {
+		struct compat_cdrom_generic_command cgc32;
+
+		if (copy_from_user(&cgc32, arg, sizeof(cgc32)))
+			return -EFAULT;
+
+		*cgc = (struct cdrom_generic_command) {
+			.buffer		= compat_ptr(cgc32.buffer),
+			.buflen		= cgc32.buflen,
+			.stat		= cgc32.stat,
+			.sense		= compat_ptr(cgc32.sense),
+			.data_direction	= cgc32.data_direction,
+			.quiet		= cgc32.quiet,
+			.timeout	= cgc32.timeout,
+			.reserved[0]	= compat_ptr(cgc32.reserved[0]),
+		};
+		memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
+		return 0;
+	}
+#endif
+	if (copy_from_user(cgc, arg, sizeof(*cgc)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
+				      void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+	if (in_compat_syscall()) {
+		struct compat_cdrom_generic_command cgc32 = {
+			.buffer		= (uintptr_t)(cgc->buffer),
+			.buflen		= cgc->buflen,
+			.stat		= cgc->stat,
+			.sense		= (uintptr_t)(cgc->sense),
+			.data_direction	= cgc->data_direction,
+			.quiet		= cgc->quiet,
+			.timeout	= cgc->timeout,
+			.reserved[0]	= (uintptr_t)(cgc->reserved[0]),
+		};
+		memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
+
+		if (copy_to_user(arg, &cgc32, sizeof(cgc32)))
+			return -EFAULT;
+
+		return 0;
+	}
+#endif
+	if (copy_to_user(arg, cgc, sizeof(*cgc)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int scsi_cdrom_send_packet(struct request_queue *q,
+				  struct gendisk *bd_disk,
+				  fmode_t mode, void __user *arg)
+{
+	struct cdrom_generic_command cgc;
+	struct sg_io_hdr hdr;
+	int err;
+
+	err = scsi_get_cdrom_generic_arg(&cgc, arg);
+	if (err)
+		return err;
+
+	cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+	memset(&hdr, 0, sizeof(hdr));
+	hdr.interface_id = 'S';
+	hdr.cmd_len = sizeof(cgc.cmd);
+	hdr.dxfer_len = cgc.buflen;
+	switch (cgc.data_direction) {
+		case CGC_DATA_UNKNOWN:
+			hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+			break;
+		case CGC_DATA_WRITE:
+			hdr.dxfer_direction = SG_DXFER_TO_DEV;
+			break;
+		case CGC_DATA_READ:
+			hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+			break;
+		case CGC_DATA_NONE:
+			hdr.dxfer_direction = SG_DXFER_NONE;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	hdr.dxferp = cgc.buffer;
+	hdr.sbp = cgc.sense;
+	if (hdr.sbp)
+		hdr.mx_sb_len = sizeof(struct request_sense);
+	hdr.timeout = jiffies_to_msecs(cgc.timeout);
+	hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
+	hdr.cmd_len = sizeof(cgc.cmd);
+
+	err = sg_io(q, bd_disk, &hdr, mode);
+	if (err == -EFAULT)
+		return -EFAULT;
+
+	if (hdr.status)
+		return -EIO;
+
+	cgc.stat = err;
+	cgc.buflen = hdr.resid;
+	if (scsi_put_cdrom_generic_arg(&cgc, arg))
+		return -EFAULT;
+
+	return err;
+}
+
 int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
 		   unsigned int cmd, void __user *arg)
 {
@@ -716,60 +819,9 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
 				err = -EFAULT;
 			break;
 		}
-		case CDROM_SEND_PACKET: {
-			struct cdrom_generic_command cgc;
-			struct sg_io_hdr hdr;
-
-			err = -EFAULT;
-			if (copy_from_user(&cgc, arg, sizeof(cgc)))
-				break;
-			cgc.timeout = clock_t_to_jiffies(cgc.timeout);
-			memset(&hdr, 0, sizeof(hdr));
-			hdr.interface_id = 'S';
-			hdr.cmd_len = sizeof(cgc.cmd);
-			hdr.dxfer_len = cgc.buflen;
-			err = 0;
-			switch (cgc.data_direction) {
-				case CGC_DATA_UNKNOWN:
-					hdr.dxfer_direction = SG_DXFER_UNKNOWN;
-					break;
-				case CGC_DATA_WRITE:
-					hdr.dxfer_direction = SG_DXFER_TO_DEV;
-					break;
-				case CGC_DATA_READ:
-					hdr.dxfer_direction = SG_DXFER_FROM_DEV;
-					break;
-				case CGC_DATA_NONE:
-					hdr.dxfer_direction = SG_DXFER_NONE;
-					break;
-				default:
-					err = -EINVAL;
-			}
-			if (err)
-				break;
-
-			hdr.dxferp = cgc.buffer;
-			hdr.sbp = cgc.sense;
-			if (hdr.sbp)
-				hdr.mx_sb_len = sizeof(struct request_sense);
-			hdr.timeout = jiffies_to_msecs(cgc.timeout);
-			hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
-			hdr.cmd_len = sizeof(cgc.cmd);
-
-			err = sg_io(q, bd_disk, &hdr, mode);
-			if (err == -EFAULT)
-				break;
-
-			if (hdr.status)
-				err = -EIO;
-
-			cgc.stat = err;
-			cgc.buflen = hdr.resid;
-			if (copy_to_user(arg, &cgc, sizeof(cgc)))
-				err = -EFAULT;
-
+		case CDROM_SEND_PACKET:
+			err = scsi_cdrom_send_packet(q, bd_disk, mode, arg);
 			break;
-		}
 
 		/*
 		 * old junk scsi send command ioctl
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 3d8e530..439367a 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -171,7 +171,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 	sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
 
 	type = alg_get_type(sa->salg_type);
-	if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
+	if (PTR_ERR(type) == -ENOENT) {
 		request_module("algif-%s", sa->salg_type);
 		type = alg_get_type(sa->salg_type);
 	}
diff --git a/drivers/Makefile b/drivers/Makefile
index aaef17c..31cf17d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -171,7 +171,7 @@
 obj-$(CONFIG_MCB)		+= mcb/
 obj-$(CONFIG_PERF_EVENTS)	+= perf/
 obj-$(CONFIG_RAS)		+= ras/
-obj-$(CONFIG_THUNDERBOLT)	+= thunderbolt/
+obj-$(CONFIG_USB4)		+= thunderbolt/
 obj-$(CONFIG_CORESIGHT)		+= hwtracing/coresight/
 obj-y				+= hwtracing/intel_th/
 obj-$(CONFIG_STM)		+= hwtracing/stm/
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 71511ae..ba2612e 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -161,6 +161,11 @@ static const struct apd_device_desc hip08_i2c_desc = {
 	.fixed_clk_rate = 250000000,
 };
 
+static const struct apd_device_desc hip08_lite_i2c_desc = {
+	.setup = acpi_apd_setup,
+	.fixed_clk_rate = 125000000,
+};
+
 static const struct apd_device_desc thunderx2_i2c_desc = {
 	.setup = acpi_apd_setup,
 	.fixed_clk_rate = 125000000,
@@ -243,6 +248,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
 	{ "CAV9007",  APD_ADDR(thunderx2_i2c_desc) },
 	{ "HISI02A1", APD_ADDR(hip07_i2c_desc) },
 	{ "HISI02A2", APD_ADDR(hip08_i2c_desc) },
+	{ "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) },
 	{ "HISI0173", APD_ADDR(hip08_spi_desc) },
 	{ "NXP0001", APD_ADDR(nxp_i2c_desc) },
 #endif
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 70f740b..db18df6 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -69,10 +69,6 @@ ACPI_MODULE_NAME("acpi_lpss");
 #define LPSS_SAVE_CTX			BIT(4)
 #define LPSS_NO_D3_DELAY		BIT(5)
 
-/* Crystal Cove PMIC shares same ACPI ID between different platforms */
-#define BYT_CRC_HRV			2
-#define CHT_CRC_HRV			3
-
 struct lpss_private_data;
 
 struct lpss_device_desc {
@@ -158,7 +154,7 @@ static void lpss_deassert_reset(struct lpss_private_data *pdata)
  */
 static struct pwm_lookup byt_pwm_lookup[] = {
 	PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
-			       "pwm_backlight", 0, PWM_POLARITY_NORMAL,
+			       "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
 			       "pwm-lpss-platform"),
 };
 
@@ -170,8 +166,7 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
 	if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
 		return;
 
-	if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV))
-		pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+	pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
 }
 
 #define LPSS_I2C_ENABLE			0x6c
@@ -204,7 +199,7 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
 /* BSW PWM used for backlight control by the i915 driver */
 static struct pwm_lookup bsw_pwm_lookup[] = {
 	PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
-			       "pwm_backlight", 0, PWM_POLARITY_NORMAL,
+			       "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
 			       "pwm-lpss-platform"),
 };
 
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 6078064..ed3d2d1 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -11,6 +11,7 @@
 #define pr_fmt(fmt)	"ACPI: IORT: " fmt
 
 #include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
 #include <linux/iommu.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -902,9 +903,9 @@ static inline bool iort_iommu_driver_enabled(u8 type)
 {
 	switch (type) {
 	case ACPI_IORT_NODE_SMMU_V3:
-		return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
+		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
 	case ACPI_IORT_NODE_SMMU:
-		return IS_BUILTIN(CONFIG_ARM_SMMU);
+		return IS_ENABLED(CONFIG_ARM_SMMU);
 	default:
 		pr_warn("IORT node type %u does not describe an SMMU\n", type);
 		return false;
@@ -976,6 +977,20 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
 	return iort_iommu_xlate(info->dev, parent, streamid);
 }
 
+static void iort_named_component_init(struct device *dev,
+				      struct acpi_iort_node *node)
+{
+	struct acpi_iort_named_component *nc;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+	if (!fwspec)
+		return;
+
+	nc = (struct acpi_iort_named_component *)node->node_data;
+	fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
+					   nc->node_flags);
+}
+
 /**
  * iort_iommu_configure - Set-up IOMMU configuration for a device.
  *
@@ -1030,6 +1045,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
 			if (parent)
 				err = iort_iommu_xlate(dev, parent, streamid);
 		} while (parent && !err);
+
+		if (!err)
+			iort_named_component_init(dev, node);
 	}
 
 	/*
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 15cc7d5..111a407 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1202,13 +1202,12 @@ static int acpi_battery_alarm_proc_open(struct inode *inode, struct file *file)
 	return single_open(file, acpi_battery_alarm_proc_show, PDE_DATA(inode));
 }
 
-static const struct file_operations acpi_battery_alarm_fops = {
-	.owner		= THIS_MODULE,
-	.open		= acpi_battery_alarm_proc_open,
-	.read		= seq_read,
-	.write		= acpi_battery_write_alarm,
-	.llseek		= seq_lseek,
-	.release	= single_release,
+static const struct proc_ops acpi_battery_alarm_proc_ops = {
+	.proc_open	= acpi_battery_alarm_proc_open,
+	.proc_read	= seq_read,
+	.proc_write	= acpi_battery_write_alarm,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 static int acpi_battery_add_fs(struct acpi_device *device)
@@ -1228,7 +1227,7 @@ static int acpi_battery_add_fs(struct acpi_device *device)
 			acpi_battery_state_proc_show, acpi_driver_data(device)))
 		return -ENODEV;
 	if (!proc_create_data("alarm", S_IFREG | S_IRUGO | S_IWUSR,
-			acpi_device_dir(device), &acpi_battery_alarm_fops,
+			acpi_device_dir(device), &acpi_battery_alarm_proc_ops,
 			acpi_driver_data(device)))
 		return -ENODEV;
 	return 0;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 652f19e..0e62ef2 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -136,18 +136,17 @@ acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
 			   PDE_DATA(inode));
 }
 
-static const struct file_operations acpi_system_wakeup_device_fops = {
-	.owner = THIS_MODULE,
-	.open = acpi_system_wakeup_device_open_fs,
-	.read = seq_read,
-	.write = acpi_system_write_wakeup_device,
-	.llseek = seq_lseek,
-	.release = single_release,
+static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
+	.proc_open	= acpi_system_wakeup_device_open_fs,
+	.proc_read	= seq_read,
+	.proc_write	= acpi_system_write_wakeup_device,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 void __init acpi_sleep_proc_init(void)
 {
 	/* 'wakeup device' [R/W] */
 	proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
-		    acpi_root_dir, &acpi_system_wakeup_device_fops);
+		    acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
 }
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 915650b..6d34488 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1462,7 +1462,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
 	iort_dma_setup(dev, &dma_addr, &size);
 
 	iommu = iort_iommu_configure(dev);
-	if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+	if (PTR_ERR(iommu) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	arch_setup_dma_ops(dev, dma_addr, size,
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d831a61..19067a5 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -27,6 +27,7 @@
 #include <linux/acpi.h>
 #include <linux/workqueue.h>
 #include <linux/uaccess.h>
+#include <linux/units.h>
 
 #define PREFIX "ACPI: "
 
@@ -172,7 +173,7 @@ struct acpi_thermal {
 	struct acpi_handle_list devices;
 	struct thermal_zone_device *thermal_zone;
 	int tz_enabled;
-	int kelvin_offset;
+	int kelvin_offset;	/* in millidegrees */
 	struct work_struct thermal_check_work;
 };
 
@@ -297,7 +298,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
 			if (crt == -1) {
 				tz->trips.critical.flags.valid = 0;
 			} else if (crt > 0) {
-				unsigned long crt_k = CELSIUS_TO_DECI_KELVIN(crt);
+				unsigned long crt_k = celsius_to_deci_kelvin(crt);
+
 				/*
 				 * Allow override critical threshold
 				 */
@@ -333,7 +335,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
 		if (psv == -1) {
 			status = AE_SUPPORT;
 		} else if (psv > 0) {
-			tmp = CELSIUS_TO_DECI_KELVIN(psv);
+			tmp = celsius_to_deci_kelvin(psv);
 			status = AE_OK;
 		} else {
 			status = acpi_evaluate_integer(tz->device->handle,
@@ -413,7 +415,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
 					break;
 				if (i == 1)
 					tz->trips.active[0].temperature =
-						CELSIUS_TO_DECI_KELVIN(act);
+						celsius_to_deci_kelvin(act);
 				else
 					/*
 					 * Don't allow override higher than
@@ -421,9 +423,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
 					 */
 					tz->trips.active[i - 1].temperature =
 						(tz->trips.active[i - 2].temperature <
-						CELSIUS_TO_DECI_KELVIN(act) ?
+						celsius_to_deci_kelvin(act) ?
 						tz->trips.active[i - 2].temperature :
-						CELSIUS_TO_DECI_KELVIN(act));
+						celsius_to_deci_kelvin(act));
 				break;
 			} else {
 				tz->trips.active[i].temperature = tmp;
@@ -519,7 +521,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
 	if (result)
 		return result;
 
-	*temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(tz->temperature,
+	*temp = deci_kelvin_to_millicelsius_with_offset(tz->temperature,
 							tz->kelvin_offset);
 	return 0;
 }
@@ -624,7 +626,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
 
 	if (tz->trips.critical.flags.valid) {
 		if (!trip) {
-			*temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+			*temp = deci_kelvin_to_millicelsius_with_offset(
 				tz->trips.critical.temperature,
 				tz->kelvin_offset);
 			return 0;
@@ -634,7 +636,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
 
 	if (tz->trips.hot.flags.valid) {
 		if (!trip) {
-			*temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+			*temp = deci_kelvin_to_millicelsius_with_offset(
 				tz->trips.hot.temperature,
 				tz->kelvin_offset);
 			return 0;
@@ -644,7 +646,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
 
 	if (tz->trips.passive.flags.valid) {
 		if (!trip) {
-			*temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+			*temp = deci_kelvin_to_millicelsius_with_offset(
 				tz->trips.passive.temperature,
 				tz->kelvin_offset);
 			return 0;
@@ -655,7 +657,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
 	for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
 		tz->trips.active[i].flags.valid; i++) {
 		if (!trip) {
-			*temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+			*temp = deci_kelvin_to_millicelsius_with_offset(
 				tz->trips.active[i].temperature,
 				tz->kelvin_offset);
 			return 0;
@@ -672,7 +674,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
 	struct acpi_thermal *tz = thermal->devdata;
 
 	if (tz->trips.critical.flags.valid) {
-		*temperature = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+		*temperature = deci_kelvin_to_millicelsius_with_offset(
 				tz->trips.critical.temperature,
 				tz->kelvin_offset);
 		return 0;
@@ -692,7 +694,7 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
 
 	if (type == THERMAL_TRIP_ACTIVE) {
 		int trip_temp;
-		int temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+		int temp = deci_kelvin_to_millicelsius_with_offset(
 					tz->temperature, tz->kelvin_offset);
 		if (thermal_get_trip_temp(thermal, trip, &trip_temp))
 			return -EINVAL;
@@ -1043,9 +1045,9 @@ static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
 {
 	if (tz->trips.critical.flags.valid &&
 	    (tz->trips.critical.temperature % 5) == 1)
-		tz->kelvin_offset = 2731;
+		tz->kelvin_offset = 273100;
 	else
-		tz->kelvin_offset = 2732;
+		tz->kelvin_offset = 273200;
 }
 
 static void acpi_thermal_check_fn(struct work_struct *work)
@@ -1087,7 +1089,7 @@ static int acpi_thermal_add(struct acpi_device *device)
 	INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
 
 	pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
-		acpi_device_bid(device), DECI_KELVIN_TO_CELSIUS(tz->temperature));
+		acpi_device_bid(device), deci_kelvin_to_celsius(tz->temperature));
 	goto end;
 
 free_memory:
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b2dad43..a6b2082 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2249,10 +2249,12 @@ static void binder_deferred_fd_close(int fd)
 		return;
 	init_task_work(&twcb->twork, binder_do_fd_close);
 	__close_fd_get_file(fd, &twcb->file);
-	if (twcb->file)
+	if (twcb->file) {
+		filp_close(twcb->file, current->files);
 		task_work_add(current, &twcb->twork, true);
-	else
+	} else {
 		kfree(twcb);
+	}
 }
 
 static void binder_transaction_buffer_release(struct binder_proc *proc,
@@ -5199,10 +5201,11 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 
 static int binder_open(struct inode *nodp, struct file *filp)
 {
-	struct binder_proc *proc;
+	struct binder_proc *proc, *itr;
 	struct binder_device *binder_dev;
 	struct binderfs_info *info;
 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
+	bool existing_pid = false;
 
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
 		     current->group_leader->pid, current->pid);
@@ -5235,19 +5238,24 @@ static int binder_open(struct inode *nodp, struct file *filp)
 	filp->private_data = proc;
 
 	mutex_lock(&binder_procs_lock);
+	hlist_for_each_entry(itr, &binder_procs, proc_node) {
+		if (itr->pid == proc->pid) {
+			existing_pid = true;
+			break;
+		}
+	}
 	hlist_add_head(&proc->proc_node, &binder_procs);
 	mutex_unlock(&binder_procs_lock);
 
-	if (binder_debugfs_dir_entry_proc) {
+	if (binder_debugfs_dir_entry_proc && !existing_pid) {
 		char strbuf[11];
 
 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
 		/*
-		 * proc debug entries are shared between contexts, so
-		 * this will fail if the process tries to open the driver
-		 * again with a different context. The priting code will
-		 * anyway print all contexts that a given PID has, so this
-		 * is not a problem.
+		 * proc debug entries are shared between contexts.
+		 * Only create for the first PID to avoid debugfs log spamming
+		 * The printing code will anyway print all contexts for a given
+		 * PID so this is not a problem.
 		 */
 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
 			binder_debugfs_dir_entry_proc,
@@ -5255,19 +5263,16 @@ static int binder_open(struct inode *nodp, struct file *filp)
 			&proc_fops);
 	}
 
-	if (binder_binderfs_dir_entry_proc) {
+	if (binder_binderfs_dir_entry_proc && !existing_pid) {
 		char strbuf[11];
 		struct dentry *binderfs_entry;
 
 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
 		/*
 		 * Similar to debugfs, the process specific log file is shared
-		 * between contexts. If the file has already been created for a
-		 * process, the following binderfs_create_file() call will
-		 * fail with error code EEXIST if another context of the same
-		 * process invoked binder_open(). This is ok since same as
-		 * debugfs, the log file will contain information on all
-		 * contexts of a given PID.
+		 * between contexts. Only create for the first PID.
+		 * This is ok since same as debugfs, the log file will contain
+		 * information on all contexts of a given PID.
 		 */
 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
@@ -5277,10 +5282,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
 			int error;
 
 			error = PTR_ERR(binderfs_entry);
-			if (error != -EEXIST) {
-				pr_warn("Unable to create file %s in binderfs (error %d)\n",
-					strbuf, error);
-			}
+			pr_warn("Unable to create file %s in binderfs (error %d)\n",
+				strbuf, error);
 		}
 	}
 
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 4bfd1b1..11ea1af 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -81,6 +81,7 @@ enum board_ids {
 
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void ahci_remove_one(struct pci_dev *dev);
+static void ahci_shutdown_one(struct pci_dev *dev);
 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
 				 unsigned long deadline);
 static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -606,6 +607,7 @@ static struct pci_driver ahci_pci_driver = {
 	.id_table		= ahci_pci_tbl,
 	.probe			= ahci_init_one,
 	.remove			= ahci_remove_one,
+	.shutdown		= ahci_shutdown_one,
 	.driver = {
 		.pm		= &ahci_pci_pm_ops,
 	},
@@ -1877,6 +1879,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	return 0;
 }
 
+static void ahci_shutdown_one(struct pci_dev *pdev)
+{
+	ata_pci_shutdown_one(pdev);
+}
+
 static void ahci_remove_one(struct pci_dev *pdev)
 {
 	pm_runtime_get_noresume(&pdev->dev);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6f4ab5c..42c8728 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6767,6 +6767,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
 	ata_host_detach(host);
 }
 
+void ata_pci_shutdown_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ap->pflags |= ATA_PFLAG_FROZEN;
+
+		/* Disable port interrupts */
+		if (ap->ops->freeze)
+			ap->ops->freeze(ap);
+
+		/* Stop the port DMA engines */
+		if (ap->ops->port_stop)
+			ap->ops->port_stop(ap);
+	}
+}
+
 /* move to PCI subsystem */
 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
 {
@@ -7387,6 +7407,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
 
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
 #ifdef CONFIG_PM
 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 58e09ff..eb2eb599 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -17,6 +17,7 @@
  *  - http://www.t13.org/
  */
 
+#include <linux/compat.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/blkdev.h>
@@ -761,6 +762,10 @@ static int ata_ioc32(struct ata_port *ap)
 	return 0;
 }
 
+/*
+ * This handles both native and compat commands, so anything added
+ * here must have a compatible argument, or check in_compat_syscall()
+ */
 int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
 		     unsigned int cmd, void __user *arg)
 {
@@ -773,6 +778,10 @@ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
 		spin_lock_irqsave(ap->lock, flags);
 		val = ata_ioc32(ap);
 		spin_unlock_irqrestore(ap->lock, flags);
+#ifdef CONFIG_COMPAT
+		if (in_compat_syscall())
+			return put_user(val, (compat_ulong_t __user *)arg);
+#endif
 		return put_user(val, (unsigned long __user *)arg);
 
 	case HDIO_SET_32BIT:
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 391dff0..e9cf31f 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -526,9 +526,10 @@ static void data_xfer(struct work_struct *work)
 
 	/* request dma channels */
 	/* dma_request_channel may sleep, so calling from process context */
-	acdev->dma_chan = dma_request_slave_channel(acdev->host->dev, "data");
-	if (!acdev->dma_chan) {
+	acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
+	if (IS_ERR(acdev->dma_chan)) {
 		dev_err(acdev->host->dev, "Unable to get dma_chan\n");
+		acdev->dma_chan = NULL;
 		goto chan_request_fail;
 	}
 
@@ -539,6 +540,7 @@ static void data_xfer(struct work_struct *work)
 	}
 
 	dma_release_channel(acdev->dma_chan);
+	acdev->dma_chan = NULL;
 
 	/* data xferred successfully */
 	if (!ret) {
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 3fe0754..8eb066a 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -309,6 +309,7 @@ static const struct pcmcia_device_id pcmcia_devices[] = {
 	PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000),	/* Toshiba */
 	PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
 	PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000),	/* Samsung */
+	PCMCIA_DEVICE_MANF_CARD(0x00f1, 0x0101),	/* SanDisk High (>8G) CFA */
 	PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000),	/* Hitachi */
 	PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
 	PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100),	/* Viking CFA */
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 4074886..2002291 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -57,7 +57,7 @@ static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	return vm_map_pages_zero(vma, &pages, 1);
 }
 
-static struct fb_ops cfag12864bfb_ops = {
+static const struct fb_ops cfag12864bfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_read = fb_sys_read,
 	.fb_write = fb_sys_write,
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a2fcde5..d951d54 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -228,7 +228,7 @@ static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	return vm_map_pages_zero(vma, &pages, 1);
 }
 
-static struct fb_ops ht16k33_fb_ops = {
+static const struct fb_ops ht16k33_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_read = fb_sys_read,
 	.fb_write = fb_sys_write,
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index c3b3b5c..5f0bc74 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -150,7 +150,7 @@
 
 config PM_QOS_KUNIT_TEST
 	bool "KUnit Test for PM QoS features"
-	depends on KUNIT
+	depends on KUNIT=y
 
 config HMEM_REPORTING
 	bool
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1eb81f11..6119e11 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -248,6 +248,16 @@ core_initcall(free_raw_capacity);
 #endif
 
 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+/*
+ * This function returns the logic cpu number of the node.
+ * There are basically three kinds of return values:
+ * (1) logic cpu number which is > 0.
+ * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
+ * there is no possible logical CPU in the kernel to match. This happens
+ * when CONFIG_NR_CPUS is configure to be smaller than the number of
+ * CPU nodes in DT. We need to just ignore this case.
+ * (3) -1 if the node does not exist in the device tree
+ */
 static int __init get_cpu_for_node(struct device_node *node)
 {
 	struct device_node *cpu_node;
@@ -261,7 +271,8 @@ static int __init get_cpu_for_node(struct device_node *node)
 	if (cpu >= 0)
 		topology_parse_cpu_capacity(cpu_node, cpu);
 	else
-		pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
+		pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
+			cpu_node, cpumask_pr_args(cpu_possible_mask));
 
 	of_node_put(cpu_node);
 	return cpu;
@@ -286,9 +297,8 @@ static int __init parse_core(struct device_node *core, int package_id,
 				cpu_topology[cpu].package_id = package_id;
 				cpu_topology[cpu].core_id = core_id;
 				cpu_topology[cpu].thread_id = i;
-			} else {
-				pr_err("%pOF: Can't get CPU for thread\n",
-				       t);
+			} else if (cpu != -ENODEV) {
+				pr_err("%pOF: Can't get CPU for thread\n", t);
 				of_node_put(t);
 				return -EINVAL;
 			}
@@ -307,7 +317,7 @@ static int __init parse_core(struct device_node *core, int package_id,
 
 		cpu_topology[cpu].package_id = package_id;
 		cpu_topology[cpu].core_id = core_id;
-	} else if (leaf) {
+	} else if (leaf && cpu != -ENODEV) {
 		pr_err("%pOF: Can't get CPU for leaf core\n", core);
 		return -EINVAL;
 	}
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 20736aa..f7bd0f4 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -236,6 +236,109 @@ attribute_container_remove_device(struct device *dev,
 	mutex_unlock(&attribute_container_mutex);
 }
 
+static int
+do_attribute_container_device_trigger_safe(struct device *dev,
+					   struct attribute_container *cont,
+					   int (*fn)(struct attribute_container *,
+						     struct device *, struct device *),
+					   int (*undo)(struct attribute_container *,
+						       struct device *, struct device *))
+{
+	int ret;
+	struct internal_container *ic, *failed;
+	struct klist_iter iter;
+
+	if (attribute_container_no_classdevs(cont))
+		return fn(cont, dev, NULL);
+
+	klist_for_each_entry(ic, &cont->containers, node, &iter) {
+		if (dev == ic->classdev.parent) {
+			ret = fn(cont, dev, &ic->classdev);
+			if (ret) {
+				failed = ic;
+				klist_iter_exit(&iter);
+				goto fail;
+			}
+		}
+	}
+	return 0;
+
+fail:
+	if (!undo)
+		return ret;
+
+	/* Attempt to undo the work partially done. */
+	klist_for_each_entry(ic, &cont->containers, node, &iter) {
+		if (ic == failed) {
+			klist_iter_exit(&iter);
+			break;
+		}
+		if (dev == ic->classdev.parent)
+			undo(cont, dev, &ic->classdev);
+	}
+	return ret;
+}
+
+/**
+ * attribute_container_device_trigger_safe - execute a trigger for each
+ * matching classdev or fail all of them.
+ *
+ * @dev:  The generic device to run the trigger for
+ * @fn	  the function to execute for each classdev.
+ * @undo  A function to undo the work previously done in case of error
+ *
+ * This function is a safe version of
+ * attribute_container_device_trigger. It stops on the first error and
+ * undo the partial work that has been done, on previous classdev.  It
+ * is guaranteed that either they all succeeded, or none of them
+ * succeeded.
+ */
+int
+attribute_container_device_trigger_safe(struct device *dev,
+					int (*fn)(struct attribute_container *,
+						  struct device *,
+						  struct device *),
+					int (*undo)(struct attribute_container *,
+						    struct device *,
+						    struct device *))
+{
+	struct attribute_container *cont, *failed = NULL;
+	int ret = 0;
+
+	mutex_lock(&attribute_container_mutex);
+
+	list_for_each_entry(cont, &attribute_container_list, node) {
+
+		if (!cont->match(cont, dev))
+			continue;
+
+		ret = do_attribute_container_device_trigger_safe(dev, cont,
+								 fn, undo);
+		if (ret) {
+			failed = cont;
+			break;
+		}
+	}
+
+	if (ret && !WARN_ON(!undo)) {
+		list_for_each_entry(cont, &attribute_container_list, node) {
+
+			if (failed == cont)
+				break;
+
+			if (!cont->match(cont, dev))
+				continue;
+
+			do_attribute_container_device_trigger_safe(dev, cont,
+								   undo, NULL);
+		}
+	}
+
+	mutex_unlock(&attribute_container_mutex);
+	return ret;
+
+}
+
 /**
  * attribute_container_device_trigger - execute a trigger for each matching classdev
  *
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0d32544..40fb069 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -1,4 +1,15 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2012 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * Core driver model functions and structures that should not be
+ * shared outside of the drivers/base/ directory.
+ *
+ */
 #include <linux/notifier.h>
 
 /**
@@ -175,3 +186,11 @@ extern void device_links_unbind_consumers(struct device *dev);
 
 /* device pm support */
 void device_pm_move_to_tail(struct device *dev);
+
+#ifdef CONFIG_DEVTMPFS
+int devtmpfs_create_node(struct device *dev);
+int devtmpfs_delete_node(struct device *dev);
+#else
+static inline int devtmpfs_create_node(struct device *dev) { return 0; }
+static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
+#endif
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index a1d1e82..886e905 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/async.h>
+#include <linux/device/bus.h>
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/errno.h>
diff --git a/drivers/base/class.c b/drivers/base/class.c
index d8a6a58..bcd410e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -8,6 +8,7 @@
  * Copyright (c) 2003-2004 IBM Corp.
  */
 
+#include <linux/device/class.h>
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/init.h>
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 532a3a5..c7879f5 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -11,7 +11,6 @@
 #include <linux/device.h>
 #include <linux/kref.h>
 #include <linux/list.h>
-#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
@@ -102,11 +101,11 @@ static int component_devices_show(struct seq_file *s, void *data)
 	seq_printf(s, "%-40s %20s\n", "device name", "status");
 	seq_puts(s, "-------------------------------------------------------------\n");
 	for (i = 0; i < match->num; i++) {
-		struct device *d = (struct device *)match->compare[i].data;
+		struct component *component = match->compare[i].component;
 
-		seq_printf(s, "%-40s %20s\n", dev_name(d),
-			   match->compare[i].component ?
-			   "registered" : "not registered");
+		seq_printf(s, "%-40s %20s\n",
+			   component ? dev_name(component->dev) : "(unknown)",
+			   component ? (component->bound ? "bound" : "not bound") : "not registered");
 	}
 	mutex_unlock(&component_mutex);
 
@@ -775,5 +774,3 @@ void component_del(struct device *dev, const struct component_ops *ops)
 	kfree(component);
 }
 EXPORT_SYMBOL_GPL(component_del);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d811e60..b25bcab 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -516,7 +516,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
 	atomic_inc(&probe_count);
 	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
 		 drv->bus->name, __func__, drv->name, dev_name(dev));
-	WARN_ON(!list_empty(&dev->devres_head));
+	if (!list_empty(&dev->devres_head)) {
+		dev_crit(dev, "Resources present before probing\n");
+		return -EBUSY;
+	}
 
 re_probe:
 	dev->driver = drv;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 6cdbf15..c9017e0 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -30,11 +30,7 @@
 
 static struct task_struct *thread;
 
-#if defined CONFIG_DEVTMPFS_MOUNT
-static int mount_dev = 1;
-#else
-static int mount_dev;
-#endif
+static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
 
 static DEFINE_SPINLOCK(req_lock);
 
@@ -71,10 +67,10 @@ static struct file_system_type internal_fs_type = {
 	.name = "devtmpfs",
 #ifdef CONFIG_TMPFS
 	.init_fs_context = shmem_init_fs_context,
-	.parameters	= &shmem_fs_parameters,
+	.parameters	= shmem_fs_parameters,
 #else
 	.init_fs_context = ramfs_init_fs_context,
-	.parameters	= &ramfs_fs_parameters,
+	.parameters	= ramfs_fs_parameters,
 #endif
 	.kill_sb = kill_litter_super,
 };
@@ -93,6 +89,23 @@ static inline int is_blockdev(struct device *dev)
 static inline int is_blockdev(struct device *dev) { return 0; }
 #endif
 
+static int devtmpfs_submit_req(struct req *req, const char *tmp)
+{
+	init_completion(&req->done);
+
+	spin_lock(&req_lock);
+	req->next = requests;
+	requests = req;
+	spin_unlock(&req_lock);
+
+	wake_up_process(thread);
+	wait_for_completion(&req->done);
+
+	kfree(tmp);
+
+	return req->err;
+}
+
 int devtmpfs_create_node(struct device *dev)
 {
 	const char *tmp = NULL;
@@ -117,19 +130,7 @@ int devtmpfs_create_node(struct device *dev)
 
 	req.dev = dev;
 
-	init_completion(&req.done);
-
-	spin_lock(&req_lock);
-	req.next = requests;
-	requests = &req;
-	spin_unlock(&req_lock);
-
-	wake_up_process(thread);
-	wait_for_completion(&req.done);
-
-	kfree(tmp);
-
-	return req.err;
+	return devtmpfs_submit_req(&req, tmp);
 }
 
 int devtmpfs_delete_node(struct device *dev)
@@ -147,18 +148,7 @@ int devtmpfs_delete_node(struct device *dev)
 	req.mode = 0;
 	req.dev = dev;
 
-	init_completion(&req.done);
-
-	spin_lock(&req_lock);
-	req.next = requests;
-	requests = &req;
-	spin_unlock(&req_lock);
-
-	wake_up_process(thread);
-	wait_for_completion(&req.done);
-
-	kfree(tmp);
-	return req.err;
+	return devtmpfs_submit_req(&req, tmp);
 }
 
 static int dev_mkdir(const char *name, umode_t mode)
@@ -359,7 +349,7 @@ static int handle_remove(const char *nodename, struct device *dev)
  * If configured, or requested by the commandline, devtmpfs will be
  * auto-mounted after the kernel mounted the root filesystem.
  */
-int devtmpfs_mount(void)
+int __init devtmpfs_mount(void)
 {
 	int err;
 
@@ -388,18 +378,30 @@ static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
 		return handle_remove(name, dev);
 }
 
-static int devtmpfsd(void *p)
+static int devtmpfs_setup(void *p)
 {
-	int *err = p;
-	*err = ksys_unshare(CLONE_NEWNS);
-	if (*err)
+	int err;
+
+	err = ksys_unshare(CLONE_NEWNS);
+	if (err)
 		goto out;
-	*err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
-	if (*err)
+	err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+	if (err)
 		goto out;
 	ksys_chdir("/.."); /* will traverse into overmounted root */
 	ksys_chroot(".");
+out:
+	*(int *)p = err;
 	complete(&setup_done);
+	return err;
+}
+
+static int devtmpfsd(void *p)
+{
+	int err = devtmpfs_setup(p);
+
+	if (err)
+		return err;
 	while (1) {
 		spin_lock(&req_lock);
 		while (requests) {
@@ -420,9 +422,6 @@ static int devtmpfsd(void *p)
 		schedule();
 	}
 	return 0;
-out:
-	complete(&setup_done);
-	return *err;
 }
 
 /*
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 4e5ca63..57c6876 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -8,6 +8,7 @@
  * Copyright (c) 2007 Novell Inc.
  */
 
+#include <linux/device/driver.h>
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/errno.h>
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 62ee90b..8704e1b 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -606,7 +606,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
 		return false;
 	}
 
-	if ((opt_flags & FW_OPT_NOFALLBACK))
+	if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS))
 		return false;
 
 	/* Also permit LSMs and IMA to fail firmware sysfs fallback */
@@ -630,10 +630,11 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
  * interface. Userspace is in charge of loading the firmware through the sysfs
  * loading interface. This sysfs fallback mechanism may be disabled completely
  * on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
- * If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK
- * flag, if so it would also disable the fallback mechanism. A system may want
- * to enfoce the sysfs fallback mechanism at all times, it can do this by
- * setting ignore_sysfs_fallback to false and force_sysfs_fallback to true.
+ * If this is false we check if the internal API caller set the
+ * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback
+ * mechanism. A system may want to enforce the sysfs fallback mechanism at all
+ * times, it can do this by setting ignore_sysfs_fallback to false and
+ * force_sysfs_fallback to true.
  * Enabling force_sysfs_fallback is functionally equivalent to build a kernel
  * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
  **/
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 7ecd590..8656e52 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -27,16 +27,16 @@
  *	firmware file lookup on storage is avoided. Used for calls where the
  *	file may be too big, or where the driver takes charge of its own
  *	firmware caching mechanism.
- * @FW_OPT_NOFALLBACK: Disable the fallback mechanism. Takes precedence over
- *	&FW_OPT_UEVENT and &FW_OPT_USERHELPER.
+ * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes
+ *	precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
  */
 enum fw_opt {
-	FW_OPT_UEVENT =         BIT(0),
-	FW_OPT_NOWAIT =         BIT(1),
-	FW_OPT_USERHELPER =     BIT(2),
-	FW_OPT_NO_WARN =        BIT(3),
-	FW_OPT_NOCACHE =        BIT(4),
-	FW_OPT_NOFALLBACK =     BIT(5),
+	FW_OPT_UEVENT			= BIT(0),
+	FW_OPT_NOWAIT			= BIT(1),
+	FW_OPT_USERHELPER		= BIT(2),
+	FW_OPT_NO_WARN			= BIT(3),
+	FW_OPT_NOCACHE			= BIT(4),
+	FW_OPT_NOFALLBACK_SYSFS		= BIT(5),
 };
 
 enum fw_status {
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 249add8..57133a9 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -877,7 +877,7 @@ int request_firmware_direct(const struct firmware **firmware_p,
 	__module_get(THIS_MODULE);
 	ret = _request_firmware(firmware_p, name, device, NULL, 0,
 				FW_OPT_UEVENT | FW_OPT_NO_WARN |
-				FW_OPT_NOFALLBACK);
+				FW_OPT_NOFALLBACK_SYSFS);
 	module_put(THIS_MODULE);
 	return ret;
 }
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 799b431..b9f474c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -70,20 +70,6 @@ void unregister_memory_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_memory_notifier);
 
-static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
-
-int register_memory_isolate_notifier(struct notifier_block *nb)
-{
-	return atomic_notifier_chain_register(&memory_isolate_chain, nb);
-}
-EXPORT_SYMBOL(register_memory_isolate_notifier);
-
-void unregister_memory_isolate_notifier(struct notifier_block *nb)
-{
-	atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
-}
-EXPORT_SYMBOL(unregister_memory_isolate_notifier);
-
 static void memory_block_release(struct device *dev)
 {
 	struct memory_block *mem = to_memory_block(dev);
@@ -175,11 +161,6 @@ int memory_notify(unsigned long val, void *v)
 	return blocking_notifier_call_chain(&memory_chain, val, v);
 }
 
-int memory_isolate_notify(unsigned long val, void *v)
-{
-	return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
-}
-
 /*
  * The probe routines leave the pages uninitialized, just as the bootmem code
  * does. Make sure we do not access them, but instead use only information from
@@ -225,7 +206,7 @@ static bool pages_correctly_probed(unsigned long start_pfn)
  */
 static int
 memory_block_action(unsigned long start_section_nr, unsigned long action,
-		    int online_type)
+		    int online_type, int nid)
 {
 	unsigned long start_pfn;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
@@ -238,7 +219,7 @@ memory_block_action(unsigned long start_section_nr, unsigned long action,
 		if (!pages_correctly_probed(start_pfn))
 			return -EBUSY;
 
-		ret = online_pages(start_pfn, nr_pages, online_type);
+		ret = online_pages(start_pfn, nr_pages, online_type, nid);
 		break;
 	case MEM_OFFLINE:
 		ret = offline_pages(start_pfn, nr_pages);
@@ -264,7 +245,7 @@ static int memory_block_change_state(struct memory_block *mem,
 		mem->state = MEM_GOING_OFFLINE;
 
 	ret = memory_block_action(mem->start_section_nr, to_state,
-				mem->online_type);
+				  mem->online_type, mem->nid);
 
 	mem->state = ret ? from_state_req : to_state;
 
@@ -395,7 +376,6 @@ static ssize_t valid_zones_show(struct device *dev,
 	struct memory_block *mem = to_memory_block(dev);
 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-	unsigned long valid_start_pfn, valid_end_pfn;
 	struct zone *default_zone;
 	int nid;
 
@@ -408,11 +388,11 @@ static ssize_t valid_zones_show(struct device *dev,
 		 * The block contains more than one zone can not be offlined.
 		 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
 		 */
-		if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
-					  &valid_start_pfn, &valid_end_pfn))
+		default_zone = test_pages_in_a_zone(start_pfn,
+						    start_pfn + nr_pages);
+		if (!default_zone)
 			return sprintf(buf, "none\n");
-		start_pfn = valid_start_pfn;
-		strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
+		strcat(buf, default_zone->name);
 		goto out;
 	}
 
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index cf6b6b7..7fa654f 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -27,6 +27,7 @@
 #include <linux/limits.h>
 #include <linux/property.h>
 #include <linux/kmemleak.h>
+#include <linux/types.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -48,7 +49,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
 struct resource *platform_get_resource(struct platform_device *dev,
 				       unsigned int type, unsigned int num)
 {
-	int i;
+	u32 i;
 
 	for (i = 0; i < dev->num_resources; i++) {
 		struct resource *r = &dev->resource[i];
@@ -255,7 +256,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
 					      unsigned int type,
 					      const char *name)
 {
-	int i;
+	u32 i;
 
 	for (i = 0; i < dev->num_resources; i++) {
 		struct resource *r = &dev->resource[i];
@@ -501,7 +502,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties);
  */
 int platform_device_add(struct platform_device *pdev)
 {
-	int i, ret;
+	u32 i;
+	int ret;
 
 	if (!pdev)
 		return -EINVAL;
@@ -569,7 +571,7 @@ int platform_device_add(struct platform_device *pdev)
 		pdev->id = PLATFORM_DEVID_AUTO;
 	}
 
-	while (--i >= 0) {
+	while (i--) {
 		struct resource *r = &pdev->resource[i];
 		if (r->parent)
 			release_resource(r);
@@ -590,7 +592,7 @@ EXPORT_SYMBOL_GPL(platform_device_add);
  */
 void platform_device_del(struct platform_device *pdev)
 {
-	int i;
+	u32 i;
 
 	if (!IS_ERR_OR_NULL(pdev)) {
 		device_del(&pdev->dev);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 8e5725b..959d6d5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2303,6 +2303,44 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
 
 /**
+ * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
+ *
+ * Looks-up a parent PM domain and subdomain based upon phandle args
+ * provided and removes the subdomain from the parent PM domain. Returns a
+ * negative error code on failure.
+ */
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+			      struct of_phandle_args *subdomain_spec)
+{
+	struct generic_pm_domain *parent, *subdomain;
+	int ret;
+
+	mutex_lock(&gpd_list_lock);
+
+	parent = genpd_get_from_provider(parent_spec);
+	if (IS_ERR(parent)) {
+		ret = PTR_ERR(parent);
+		goto out;
+	}
+
+	subdomain = genpd_get_from_provider(subdomain_spec);
+	if (IS_ERR(subdomain)) {
+		ret = PTR_ERR(subdomain);
+		goto out;
+	}
+
+	ret = pm_genpd_remove_subdomain(parent, subdomain);
+
+out:
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
+
+/**
  * of_genpd_remove_last - Remove the last PM domain registered for a provider
  * @provider: Pointer to device structure associated with provider
  *
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 134a8af..0e99a76 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -273,10 +273,38 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
 	device_links_read_unlock(idx);
 }
 
-static void dpm_wait_for_superior(struct device *dev, bool async)
+static bool dpm_wait_for_superior(struct device *dev, bool async)
 {
-	dpm_wait(dev->parent, async);
+	struct device *parent;
+
+	/*
+	 * If the device is resumed asynchronously and the parent's callback
+	 * deletes both the device and the parent itself, the parent object may
+	 * be freed while this function is running, so avoid that by reference
+	 * counting the parent once more unless the device has been deleted
+	 * already (in which case return right away).
+	 */
+	mutex_lock(&dpm_list_mtx);
+
+	if (!device_pm_initialized(dev)) {
+		mutex_unlock(&dpm_list_mtx);
+		return false;
+	}
+
+	parent = get_device(dev->parent);
+
+	mutex_unlock(&dpm_list_mtx);
+
+	dpm_wait(parent, async);
+	put_device(parent);
+
 	dpm_wait_for_suppliers(dev, async);
+
+	/*
+	 * If the parent's callback has deleted the device, attempting to resume
+	 * it would be invalid, so avoid doing that then.
+	 */
+	return device_pm_initialized(dev);
 }
 
 static void dpm_wait_for_consumers(struct device *dev, bool async)
@@ -621,7 +649,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
 	if (!dev->power.is_noirq_suspended)
 		goto Out;
 
-	dpm_wait_for_superior(dev, async);
+	if (!dpm_wait_for_superior(dev, async))
+		goto Out;
 
 	skip_resume = dev_pm_may_skip_resume(dev);
 
@@ -829,7 +858,8 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
 	if (!dev->power.is_late_suspended)
 		goto Out;
 
-	dpm_wait_for_superior(dev, async);
+	if (!dpm_wait_for_superior(dev, async))
+		goto Out;
 
 	callback = dpm_subsys_resume_early_cb(dev, state, &info);
 
@@ -944,7 +974,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 		goto Complete;
 	}
 
-	dpm_wait_for_superior(dev, async);
+	if (!dpm_wait_for_superior(dev, async))
+		goto Complete;
+
 	dpm_watchdog_set(&wd, dev);
 	device_lock(dev);
 
diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c
index 3115db0..79fc6c4 100644
--- a/drivers/base/power/qos-test.c
+++ b/drivers/base/power/qos-test.c
@@ -114,4 +114,4 @@ static struct kunit_suite pm_qos_test_module = {
 	.name = "qos-kunit-test",
 	.test_cases = pm_qos_test_cases,
 };
-kunit_test_suite(pm_qos_test_module);
+kunit_test_suites(&pm_qos_test_module);
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index f4b1d8e..3bb7beb 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -44,7 +44,8 @@ static int test_probe(struct platform_device *pdev)
 	 * performing an async init on that node.
 	 */
 	if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
-		if (dev_to_node(dev) != numa_node_id()) {
+		if (IS_ENABLED(CONFIG_NUMA) &&
+		    dev_to_node(dev) != numa_node_id()) {
 			dev_warn(dev, "NUMA node mismatch %d != %d\n",
 				 dev_to_node(dev), numa_node_id());
 			atomic_inc(&warnings);
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index 5ed86de..ccc8620 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -30,6 +30,10 @@
 #include <linux/attribute_container.h>
 #include <linux/transport_class.h>
 
+static int transport_remove_classdev(struct attribute_container *cont,
+				     struct device *dev,
+				     struct device *classdev);
+
 /**
  * transport_class_register - register an initial transport class
  *
@@ -172,10 +176,11 @@ static int transport_add_class_device(struct attribute_container *cont,
  * routine is simply a trigger point used to add the device to the
  * system and register attributes for it.
  */
-
-void transport_add_device(struct device *dev)
+int transport_add_device(struct device *dev)
 {
-	attribute_container_device_trigger(dev, transport_add_class_device);
+	return attribute_container_device_trigger_safe(dev,
+					transport_add_class_device,
+					transport_remove_classdev);
 }
 EXPORT_SYMBOL_GPL(transport_add_device);
 
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 1bb8ec5..025b1b7 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -432,16 +432,6 @@
 	  This is the virtual block driver for virtio.  It can be used with
           QEMU based VMMs (like KVM or Xen).  Say Y or M.
 
-config VIRTIO_BLK_SCSI
-	bool "SCSI passthrough request for the Virtio block driver"
-	depends on VIRTIO_BLK
-	select BLK_SCSI_REQUEST
-	---help---
-	  Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on
-	  virtio-blk devices.  This is only supported for the legacy
-	  virtio protocol and not enabled by default by any hypervisor.
-	  You probably want to use virtio-scsi instead.
-
 config BLK_DEV_RBD
 	tristate "Rados block device (RBD)"
 	depends on INET && BLOCK
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index bd19f8a..7b32fb6 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -329,6 +329,7 @@ static const struct block_device_operations aoe_bdops = {
 	.open = aoeblk_open,
 	.release = aoeblk_release,
 	.ioctl = aoeblk_ioctl,
+	.compat_ioctl = blkdev_compat_ptr_ioctl,
 	.getgeo = aoeblk_getgeo,
 	.owner = THIS_MODULE,
 };
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index a8730cc..220c5e1 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -473,6 +473,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
 	return kobj;
 }
 
+static inline void brd_check_and_reset_par(void)
+{
+	if (unlikely(!max_part))
+		max_part = 1;
+
+	/*
+	 * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
+	 * otherwise, it is possiable to get same dev_t when adding partitions.
+	 */
+	if ((1U << MINORBITS) % max_part != 0)
+		max_part = 1UL << fls(max_part);
+
+	if (max_part > DISK_MAX_PARTS) {
+		pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
+			DISK_MAX_PARTS, DISK_MAX_PARTS);
+		max_part = DISK_MAX_PARTS;
+	}
+}
+
 static int __init brd_init(void)
 {
 	struct brd_device *brd, *next;
@@ -496,8 +515,7 @@ static int __init brd_init(void)
 	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
 		return -EIO;
 
-	if (unlikely(!max_part))
-		max_part = 1;
+	brd_check_and_reset_par();
 
 	for (i = 0; i < rd_nr; i++) {
 		brd = brd_alloc(i);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index ddbf560..aae99a2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -622,7 +622,7 @@ struct fifo_buffer {
 	int total; /* sum of all values */
 	int values[0];
 };
-extern struct fifo_buffer *fifo_alloc(int fifo_size);
+extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
 
 /* flag bits per connection */
 enum {
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index de2f94d..da4a3eb 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1575,7 +1575,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
 	struct drbd_device *device;
 	struct disk_conf *new_disk_conf, *old_disk_conf;
 	struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
-	int err, fifo_size;
+	int err;
+	unsigned int fifo_size;
 
 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
 	if (!adm_ctx.reply_skb)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 2b3103c..79e2164 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3887,7 +3887,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
 	struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
 	const int apv = connection->agreed_pro_version;
 	struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
-	int fifo_size = 0;
+	unsigned int fifo_size = 0;
 	int err;
 
 	peer_device = conn_peer_device(connection, pi->vnr);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 5bdcc70..b7f605c 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -482,11 +482,11 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
 		fb->values[i] += value;
 }
 
-struct fifo_buffer *fifo_alloc(int fifo_size)
+struct fifo_buffer *fifo_alloc(unsigned int fifo_size)
 {
 	struct fifo_buffer *fb;
 
-	fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO);
+	fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO);
 	if (!fb)
 		return NULL;
 
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 485865f..cd3612e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3879,6 +3879,9 @@ static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
 {
 	int drive = (long)bdev->bd_disk->private_data;
 	switch (cmd) {
+	case CDROMEJECT: /* CD-ROM eject */
+	case 0x6470:	 /* SunOS floppy eject */
+
 	case FDMSGON:
 	case FDMSGOFF:
 	case FDSETEMSGTRESH:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b4607dd..7818190 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1265,6 +1265,16 @@ static int nbd_start_device(struct nbd_device *nbd)
 		args = kzalloc(sizeof(*args), GFP_KERNEL);
 		if (!args) {
 			sock_shutdown(nbd);
+			/*
+			 * If num_connections is m (2 < m),
+			 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
+			 * But NO.(n + 1) failed. We still have n recv threads.
+			 * So, add flush_workqueue here to prevent recv threads
+			 * dropping the last config_refs and trying to destroy
+			 * the workqueue from inside the workqueue.
+			 */
+			if (i)
+				flush_workqueue(nbd->recv_workq);
 			return -ENOMEM;
 		}
 		sk_set_memalloc(config->socks[i]->sock->sk);
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index ae8d4bc..1651079 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -263,34 +263,34 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
 }
 
 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
-#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY)					\
-static ssize_t									\
-nullb_device_##NAME##_show(struct config_item *item, char *page)		\
-{										\
-	return nullb_device_##TYPE##_attr_show(					\
-				to_nullb_device(item)->NAME, page);		\
-}										\
-static ssize_t									\
-nullb_device_##NAME##_store(struct config_item *item, const char *page,		\
-			    size_t count)					\
-{										\
-	int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;	\
-	struct nullb_device *dev = to_nullb_device(item);			\
-	TYPE new_value;								\
-	int ret;								\
-										\
-	ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);	\
-	if (ret < 0)								\
-		return ret;							\
-	if (apply_fn)								\
-		ret = apply_fn(dev, new_value);					\
-	else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) 		\
-		ret = -EBUSY;							\
-	if (ret < 0)								\
-		return ret;							\
-	dev->NAME = new_value;							\
-	return count;								\
-}										\
+#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY)				\
+static ssize_t								\
+nullb_device_##NAME##_show(struct config_item *item, char *page)	\
+{									\
+	return nullb_device_##TYPE##_attr_show(				\
+				to_nullb_device(item)->NAME, page);	\
+}									\
+static ssize_t								\
+nullb_device_##NAME##_store(struct config_item *item, const char *page,	\
+			    size_t count)				\
+{									\
+	int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
+	struct nullb_device *dev = to_nullb_device(item);		\
+	TYPE uninitialized_var(new_value);				\
+	int ret;							\
+									\
+	ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
+	if (ret < 0)							\
+		return ret;						\
+	if (apply_fn)							\
+		ret = apply_fn(dev, new_value);				\
+	else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) 	\
+		ret = -EBUSY;						\
+	if (ret < 0)							\
+		return ret;						\
+	dev->NAME = new_value;						\
+	return count;							\
+}									\
 CONFIGFS_ATTR(nullb_device_, NAME);
 
 static int nullb_apply_submit_queues(struct nullb_device *dev,
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 636bfea..117cfc8 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -275,6 +275,9 @@ static const struct block_device_operations pcd_bdops = {
 	.open		= pcd_block_open,
 	.release	= pcd_block_release,
 	.ioctl		= pcd_block_ioctl,
+#ifdef CONFIG_COMPAT
+	.ioctl		= blkdev_compat_ptr_ioctl,
+#endif
 	.check_events	= pcd_block_check_events,
 };
 
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 6f9ad3f..c096750 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -874,6 +874,7 @@ static const struct block_device_operations pd_fops = {
 	.open		= pd_open,
 	.release	= pd_release,
 	.ioctl		= pd_ioctl,
+	.compat_ioctl	= pd_ioctl,
 	.getgeo		= pd_getgeo,
 	.check_events	= pd_check_events,
 	.revalidate_disk= pd_revalidate
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 6b7d4ca..bb09f21 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -276,6 +276,7 @@ static const struct block_device_operations pf_fops = {
 	.open		= pf_open,
 	.release	= pf_release,
 	.ioctl		= pf_ioctl,
+	.compat_ioctl	= pf_ioctl,
 	.getgeo		= pf_getgeo,
 	.check_events	= pf_check_events,
 };
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 861fc65..5f970a7 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2663,28 +2663,6 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 	return ret;
 }
 
-#ifdef CONFIG_COMPAT
-static int pkt_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
-{
-	switch (cmd) {
-	/* compatible */
-	case CDROMEJECT:
-	case CDROMMULTISESSION:
-	case CDROMREADTOCENTRY:
-	case SCSI_IOCTL_SEND_COMMAND:
-		return pkt_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg));
-
-
-	/* FIXME: no handler so far */
-	case CDROM_LAST_WRITTEN:
-	/* handled in compat_blkdev_driver_ioctl */
-	case CDROM_SEND_PACKET:
-	default:
-		return -ENOIOCTLCMD;
-	}
-}
-#endif
-
 static unsigned int pkt_check_events(struct gendisk *disk,
 				     unsigned int clearing)
 {
@@ -2706,9 +2684,7 @@ static const struct block_device_operations pktcdvd_ops = {
 	.open =			pkt_open,
 	.release =		pkt_close,
 	.ioctl =		pkt_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl =		pkt_compat_ioctl,
-#endif
+	.compat_ioctl =		blkdev_compat_ptr_ioctl,
 	.check_events =		pkt_check_events,
 };
 
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2b18456..6343402 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -848,7 +848,7 @@ enum {
 	Opt_notrim,
 };
 
-static const struct fs_parameter_spec rbd_param_specs[] = {
+static const struct fs_parameter_spec rbd_parameters[] = {
 	fsparam_u32	("alloc_size",			Opt_alloc_size),
 	fsparam_flag	("exclusive",			Opt_exclusive),
 	fsparam_flag	("lock_on_read",		Opt_lock_on_read),
@@ -863,11 +863,6 @@ static const struct fs_parameter_spec rbd_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description rbd_parameters = {
-	.name		= "rbd",
-	.specs		= rbd_param_specs,
-};
-
 struct rbd_options {
 	int	queue_depth;
 	int	alloc_size;
@@ -2662,7 +2657,7 @@ static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
 			       u64 off, u64 len)
 {
 	struct ceph_file_extent ex = { off, len };
-	union rbd_img_fill_iter dummy;
+	union rbd_img_fill_iter dummy = {};
 	struct rbd_img_fill_ctx fctx = {
 		.pos_type = OBJ_REQUEST_NODATA,
 		.pos = &dummy,
@@ -6353,19 +6348,19 @@ static int rbd_parse_param(struct fs_parameter *param,
 {
 	struct rbd_options *opt = pctx->opts;
 	struct fs_parse_result result;
+	struct p_log log = {.prefix = "rbd"};
 	int token, ret;
 
 	ret = ceph_parse_param(param, pctx->copts, NULL);
 	if (ret != -ENOPARAM)
 		return ret;
 
-	token = fs_parse(NULL, &rbd_parameters, param, &result);
+	token = __fs_parse(&log, rbd_parameters, param, &result);
 	dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
 	if (token < 0) {
-		if (token == -ENOPARAM) {
-			return invalf(NULL, "rbd: Unknown parameter '%s'",
-				      param->key);
-		}
+		if (token == -ENOPARAM)
+			return inval_plog(&log, "Unknown parameter '%s'",
+					  param->key);
 		return token;
 	}
 
@@ -6378,9 +6373,8 @@ static int rbd_parse_param(struct fs_parameter *param,
 	case Opt_alloc_size:
 		if (result.uint_32 < SECTOR_SIZE)
 			goto out_of_range;
-		if (!is_power_of_2(result.uint_32)) {
-			return invalf(NULL, "rbd: alloc_size must be a power of 2");
-		}
+		if (!is_power_of_2(result.uint_32))
+			return inval_plog(&log, "alloc_size must be a power of 2");
 		opt->alloc_size = result.uint_32;
 		break;
 	case Opt_lock_timeout:
@@ -6416,7 +6410,7 @@ static int rbd_parse_param(struct fs_parameter *param,
 	return 0;
 
 out_of_range:
-	return invalf(NULL, "rbd: %s out of range", param->key);
+	return inval_plog(&log, "%s out of range", param->key);
 }
 
 /*
@@ -6433,7 +6427,7 @@ static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
 		if (*key) {
 			struct fs_parameter param = {
 				.key	= key,
-				.type	= fs_value_is_string,
+				.type	= fs_value_is_flag,
 			};
 			char *value = strchr(key, '=');
 			size_t v_len = 0;
@@ -6443,14 +6437,11 @@ static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
 					continue;
 				*value++ = 0;
 				v_len = strlen(value);
-			}
-
-
-			if (v_len > 0) {
 				param.string = kmemdup_nul(value, v_len,
 							   GFP_KERNEL);
 				if (!param.string)
 					return -ENOMEM;
+				param.type = fs_value_is_string;
 			}
 			param.size = v_len;
 
@@ -7143,7 +7134,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
 	if (rc)
 		goto err_out_image_lock;
 
-	add_disk(rbd_dev->disk);
+	device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
 	/* see rbd_init_disk() */
 	blk_put_queue(rbd_dev->disk->queue);
 
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 571612e..39aeebc 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -171,6 +171,7 @@ static const struct block_device_operations vdc_fops = {
 	.owner		= THIS_MODULE,
 	.getgeo		= vdc_getgeo,
 	.ioctl		= vdc_ioctl,
+	.compat_ioctl	= blkdev_compat_ptr_ioctl,
 };
 
 static void vdc_blk_queue_start(struct vdc_port *port)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ffd719..5415876 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -11,7 +11,6 @@
 #include <linux/virtio_blk.h>
 #include <linux/scatterlist.h>
 #include <linux/string_helpers.h>
-#include <scsi/scsi_cmnd.h>
 #include <linux/idr.h>
 #include <linux/blk-mq.h>
 #include <linux/blk-mq-virtio.h>
@@ -56,11 +55,6 @@ struct virtio_blk {
 };
 
 struct virtblk_req {
-#ifdef CONFIG_VIRTIO_BLK_SCSI
-	struct scsi_request sreq;	/* for SCSI passthrough, must be first */
-	u8 sense[SCSI_SENSE_BUFFERSIZE];
-	struct virtio_scsi_inhdr in_hdr;
-#endif
 	struct virtio_blk_outhdr out_hdr;
 	u8 status;
 	struct scatterlist sg[];
@@ -78,80 +72,6 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
 	}
 }
 
-/*
- * If this is a packet command we need a couple of additional headers.  Behind
- * the normal outhdr we put a segment with the scsi command block, and before
- * the normal inhdr we put the sense data and the inhdr with additional status
- * information.
- */
-#ifdef CONFIG_VIRTIO_BLK_SCSI
-static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
-		struct scatterlist *data_sg, bool have_data)
-{
-	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
-	unsigned int num_out = 0, num_in = 0;
-
-	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
-	sgs[num_out++] = &hdr;
-	sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
-	sgs[num_out++] = &cmd;
-
-	if (have_data) {
-		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
-			sgs[num_out++] = data_sg;
-		else
-			sgs[num_out + num_in++] = data_sg;
-	}
-
-	sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
-	sgs[num_out + num_in++] = &sense;
-	sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
-	sgs[num_out + num_in++] = &inhdr;
-	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
-	sgs[num_out + num_in++] = &status;
-
-	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
-}
-
-static inline void virtblk_scsi_request_done(struct request *req)
-{
-	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
-	struct virtio_blk *vblk = req->q->queuedata;
-	struct scsi_request *sreq = &vbr->sreq;
-
-	sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
-	sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
-	sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
-}
-
-static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
-			     unsigned int cmd, unsigned long data)
-{
-	struct gendisk *disk = bdev->bd_disk;
-	struct virtio_blk *vblk = disk->private_data;
-
-	/*
-	 * Only allow the generic SCSI ioctls if the host can support it.
-	 */
-	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
-		return -ENOTTY;
-
-	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
-				  (void __user *)data);
-}
-#else
-static inline int virtblk_add_req_scsi(struct virtqueue *vq,
-		struct virtblk_req *vbr, struct scatterlist *data_sg,
-		bool have_data)
-{
-	return -EIO;
-}
-static inline void virtblk_scsi_request_done(struct request *req)
-{
-}
-#define virtblk_ioctl	NULL
-#endif /* CONFIG_VIRTIO_BLK_SCSI */
-
 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
 		struct scatterlist *data_sg, bool have_data)
 {
@@ -216,13 +136,6 @@ static inline void virtblk_request_done(struct request *req)
 		      req->special_vec.bv_offset);
 	}
 
-	switch (req_op(req)) {
-	case REQ_OP_SCSI_IN:
-	case REQ_OP_SCSI_OUT:
-		virtblk_scsi_request_done(req);
-		break;
-	}
-
 	blk_mq_end_request(req, virtblk_result(vbr));
 }
 
@@ -299,10 +212,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 		type = VIRTIO_BLK_T_WRITE_ZEROES;
 		unmap = !(req->cmd_flags & REQ_NOUNMAP);
 		break;
-	case REQ_OP_SCSI_IN:
-	case REQ_OP_SCSI_OUT:
-		type = VIRTIO_BLK_T_SCSI_CMD;
-		break;
 	case REQ_OP_DRV_IN:
 		type = VIRTIO_BLK_T_GET_ID;
 		break;
@@ -333,10 +242,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 	}
 
 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
-	if (blk_rq_is_scsi(req))
-		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
-	else
-		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+	err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
 	if (err) {
 		virtqueue_kick(vblk->vqs[qid].vq);
 		blk_mq_stop_hw_queue(hctx);
@@ -404,7 +310,6 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 }
 
 static const struct block_device_operations virtblk_fops = {
-	.ioctl  = virtblk_ioctl,
 	.owner  = THIS_MODULE,
 	.getgeo = virtblk_getgeo,
 };
@@ -683,9 +588,6 @@ static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
 	struct virtio_blk *vblk = set->driver_data;
 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
 
-#ifdef CONFIG_VIRTIO_BLK_SCSI
-	vbr->sreq.sense = vbr->sense;
-#endif
 	sg_init_table(vbr->sg, vblk->sg_elems);
 	return 0;
 }
@@ -698,23 +600,11 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
 					vblk->vdev, 0);
 }
 
-#ifdef CONFIG_VIRTIO_BLK_SCSI
-static void virtblk_initialize_rq(struct request *req)
-{
-	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
-
-	scsi_req_init(&vbr->sreq);
-}
-#endif
-
 static const struct blk_mq_ops virtio_mq_ops = {
 	.queue_rq	= virtio_queue_rq,
 	.commit_rqs	= virtio_commit_rqs,
 	.complete	= virtblk_request_done,
 	.init_request	= virtblk_init_request,
-#ifdef CONFIG_VIRTIO_BLK_SCSI
-	.initialize_rq_fn = virtblk_initialize_rq,
-#endif
 	.map_queues	= virtblk_map_queues,
 };
 
@@ -991,9 +881,6 @@ static const struct virtio_device_id id_table[] = {
 static unsigned int features_legacy[] = {
 	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
 	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
-#ifdef CONFIG_VIRTIO_BLK_SCSI
-	VIRTIO_BLK_F_SCSI,
-#endif
 	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
 	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
 }
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 716b99a..c2f7126 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -62,8 +62,8 @@
  * IO workloads.
  */
 
-static int xen_blkif_max_buffer_pages = 1024;
-module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
+static int max_buffer_pages = 1024;
+module_param_named(max_buffer_pages, max_buffer_pages, int, 0644);
 MODULE_PARM_DESC(max_buffer_pages,
 "Maximum number of free pages to keep in each block backend buffer");
 
@@ -78,8 +78,8 @@ MODULE_PARM_DESC(max_buffer_pages,
  * algorithm.
  */
 
-static int xen_blkif_max_pgrants = 1056;
-module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
+static int max_pgrants = 1056;
+module_param_named(max_persistent_grants, max_pgrants, int, 0644);
 MODULE_PARM_DESC(max_persistent_grants,
                  "Maximum number of grants to map persistently");
 
@@ -88,8 +88,8 @@ MODULE_PARM_DESC(max_persistent_grants,
  * use. The time is in seconds, 0 means indefinitely long.
  */
 
-static unsigned int xen_blkif_pgrant_timeout = 60;
-module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
+static unsigned int pgrant_timeout = 60;
+module_param_named(persistent_grant_unused_seconds, pgrant_timeout,
 		   uint, 0644);
 MODULE_PARM_DESC(persistent_grant_unused_seconds,
 		 "Time in seconds an unused persistent grant is allowed to "
@@ -137,9 +137,8 @@ module_param(log_stats, int, 0644);
 
 static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
 {
-	return xen_blkif_pgrant_timeout &&
-	       (jiffies - persistent_gnt->last_used >=
-		HZ * xen_blkif_pgrant_timeout);
+	return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
+			HZ * pgrant_timeout);
 }
 
 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
@@ -234,7 +233,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
 	struct persistent_gnt *this;
 	struct xen_blkif *blkif = ring->blkif;
 
-	if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
+	if (ring->persistent_gnt_c >= max_pgrants) {
 		if (!blkif->vbd.overflow_max_grants)
 			blkif->vbd.overflow_max_grants = 1;
 		return -EBUSY;
@@ -397,14 +396,13 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
 		goto out;
 	}
 
-	if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
-	    (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
+	if (ring->persistent_gnt_c < max_pgrants ||
+	    (ring->persistent_gnt_c == max_pgrants &&
 	    !ring->blkif->vbd.overflow_max_grants)) {
 		num_clean = 0;
 	} else {
-		num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
-		num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
-			    num_clean;
+		num_clean = (max_pgrants / 100) * LRU_PERCENT_CLEAN;
+		num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
 		num_clean = min(ring->persistent_gnt_c, num_clean);
 		pr_debug("Going to purge at least %u persistent grants\n",
 			 num_clean);
@@ -599,8 +597,7 @@ static void print_stats(struct xen_blkif_ring *ring)
 		 current->comm, ring->st_oo_req,
 		 ring->st_rd_req, ring->st_wr_req,
 		 ring->st_f_req, ring->st_ds_req,
-		 ring->persistent_gnt_c,
-		 xen_blkif_max_pgrants);
+		 ring->persistent_gnt_c, max_pgrants);
 	ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
 	ring->st_rd_req = 0;
 	ring->st_wr_req = 0;
@@ -656,8 +653,11 @@ int xen_blkif_schedule(void *arg)
 			ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
 		}
 
-		/* Shrink if we have more than xen_blkif_max_buffer_pages */
-		shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
+		/* Shrink the free pages pool if it is too large. */
+		if (time_before(jiffies, blkif->buffer_squeeze_end))
+			shrink_free_pagepool(ring, 0);
+		else
+			shrink_free_pagepool(ring, max_buffer_pages);
 
 		if (log_stats && time_after(jiffies, ring->st_print))
 			print_stats(ring);
@@ -884,7 +884,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
 			continue;
 		}
 		if (use_persistent_gnts &&
-		    ring->persistent_gnt_c < xen_blkif_max_pgrants) {
+		    ring->persistent_gnt_c < max_pgrants) {
 			/*
 			 * We are using persistent grants, the grant is
 			 * not mapped but we might have room for it.
@@ -911,7 +911,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
 			pages[seg_idx]->persistent_gnt = persistent_gnt;
 			pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
 				 persistent_gnt->gnt, ring->persistent_gnt_c,
-				 xen_blkif_max_pgrants);
+				 max_pgrants);
 			goto next;
 		}
 		if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 49132b0..a3eeccf 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -319,6 +319,7 @@ struct xen_blkif {
 	/* All rings for this device. */
 	struct xen_blkif_ring	*rings;
 	unsigned int		nr_rings;
+	unsigned long		buffer_squeeze_end;
 };
 
 struct seg_buf {
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 4c5d99f..42944d4 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -467,7 +467,6 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
 	device_remove_file(&dev->dev, &dev_attr_physical_device);
 }
 
-
 static void xen_vbd_free(struct xen_vbd *vbd)
 {
 	if (vbd->bdev)
@@ -524,6 +523,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
 		handle, blkif->domid);
 	return 0;
 }
+
 static int xen_blkbk_remove(struct xenbus_device *dev)
 {
 	struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -607,6 +607,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
 	if (err)
 		dev_warn(&dev->dev, "writing feature-discard (%d)", err);
 }
+
 int xen_blkbk_barrier(struct xenbus_transaction xbt,
 		      struct backend_info *be, int state)
 {
@@ -691,7 +692,6 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
 	return err;
 }
 
-
 /*
  * Callback received when the hotplug scripts have placed the physical-device
  * node.  Read it and the mode node, and create a vbd.  If the frontend is
@@ -783,7 +783,6 @@ static void backend_changed(struct xenbus_watch *watch,
 	}
 }
 
-
 /*
  * Callback received when the frontend's state changes.
  */
@@ -858,10 +857,28 @@ static void frontend_changed(struct xenbus_device *dev,
 	}
 }
 
+/* Once a memory pressure is detected, squeeze free page pools for a while. */
+static unsigned int buffer_squeeze_duration_ms = 10;
+module_param_named(buffer_squeeze_duration_ms,
+		buffer_squeeze_duration_ms, int, 0644);
+MODULE_PARM_DESC(buffer_squeeze_duration_ms,
+"Duration in ms to squeeze pages buffer when a memory pressure is detected");
+
+/*
+ * Callback received when the memory pressure is detected.
+ */
+static void reclaim_memory(struct xenbus_device *dev)
+{
+	struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+	if (!be)
+		return;
+	be->blkif->buffer_squeeze_end = jiffies +
+		msecs_to_jiffies(buffer_squeeze_duration_ms);
+}
 
 /* ** Connection ** */
 
-
 /*
  * Write the physical details regarding the block device to the store, and
  * switch to Connected state.
@@ -1152,6 +1169,7 @@ static struct xenbus_driver xen_blkbk_driver = {
 	.remove = xen_blkbk_remove,
 	.otherend_changed = frontend_changed,
 	.allow_rebind = true,
+	.reclaim_memory = reclaim_memory,
 };
 
 int xen_blkif_xenbus_init(void)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c02be06..e2ad6bb 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -151,9 +151,6 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the
 #define BLK_RING_SIZE(info)	\
 	__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
 
-#define BLK_MAX_RING_SIZE	\
-	__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
-
 /*
  * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
  * characters are enough. Define to 20 to keep consistent with backend.
@@ -177,12 +174,12 @@ struct blkfront_ring_info {
 	unsigned int evtchn, irq;
 	struct work_struct work;
 	struct gnttab_free_callback callback;
-	struct blk_shadow shadow[BLK_MAX_RING_SIZE];
 	struct list_head indirect_pages;
 	struct list_head grants;
 	unsigned int persistent_gnts_c;
 	unsigned long shadow_free;
 	struct blkfront_info *dev_info;
+	struct blk_shadow shadow[];
 };
 
 /*
@@ -1915,7 +1912,8 @@ static int negotiate_mq(struct blkfront_info *info)
 		info->nr_rings = 1;
 
 	info->rinfo = kvcalloc(info->nr_rings,
-			       sizeof(struct blkfront_ring_info),
+			       struct_size(info->rinfo, shadow,
+					   BLK_RING_SIZE(info)),
 			       GFP_KERNEL);
 	if (!info->rinfo) {
 		xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
@@ -2632,6 +2630,7 @@ static const struct block_device_operations xlvbd_block_fops =
 	.release = blkif_release,
 	.getgeo = blkif_getgeo,
 	.ioctl = blkif_ioctl,
+	.compat_ioctl = blkdev_compat_ptr_ioctl,
 };
 
 
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 4285e75..1bdb579 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -207,14 +207,17 @@ static inline void zram_fill_page(void *ptr, unsigned long len,
 
 static bool page_same_filled(void *ptr, unsigned long *element)
 {
-	unsigned int pos;
 	unsigned long *page;
 	unsigned long val;
+	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
 
 	page = (unsigned long *)ptr;
 	val = page[0];
 
-	for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
+	if (val != page[last_pos])
+		return false;
+
+	for (pos = 1; pos < last_pos; pos++) {
 		if (val != page[pos])
 			return false;
 	}
@@ -626,7 +629,7 @@ static ssize_t writeback_store(struct device *dev,
 	struct bio bio;
 	struct bio_vec bio_vec;
 	struct page *page;
-	ssize_t ret;
+	ssize_t ret = len;
 	int mode;
 	unsigned long blk_idx = 0;
 
@@ -762,7 +765,6 @@ static ssize_t writeback_store(struct device *dev,
 
 	if (blk_idx)
 		free_block_bdev(zram, blk_idx);
-	ret = len;
 	__free_page(page);
 release_init_lock:
 	up_read(&zram->init_lock);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 50200d1..6095b6d 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -139,7 +139,6 @@
 	tristate "Tegra ACONNECT Bus Driver"
 	depends on ARCH_TEGRA_210_SOC
 	depends on OF && PM
-	select PM_CLK
 	help
 	  Driver for the Tegra ACONNECT bus which is used to interface with
 	  the devices inside the Audio Processing Engine (APE) for Tegra210.
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index a07cc19..c78d10e 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -715,9 +715,9 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
 struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
 {
 	struct fsl_mc_device *mc_bus_dev, *endpoint;
-	struct fsl_mc_obj_desc endpoint_desc = { 0 };
-	struct dprc_endpoint endpoint1 = { 0 };
-	struct dprc_endpoint endpoint2 = { 0 };
+	struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
+	struct dprc_endpoint endpoint1 = {{ 0 }};
+	struct dprc_endpoint endpoint2 = {{ 0 }};
 	int state, err;
 
 	mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 36cf13e..15fa293 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -102,12 +102,11 @@ static int moxtet_match(struct device *dev, struct device_driver *drv)
 	return 0;
 }
 
-struct bus_type moxtet_bus_type = {
+static struct bus_type moxtet_bus_type = {
 	.name		= "moxtet",
 	.dev_groups	= moxtet_dev_groups,
 	.match		= moxtet_match,
 };
-EXPORT_SYMBOL_GPL(moxtet_bus_type);
 
 int __moxtet_register_driver(struct module *owner,
 			     struct moxtet_driver *mdrv)
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index ccb44fe..f702c85 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -479,7 +479,7 @@ static void sysc_clkdm_deny_idle(struct sysc *ddata)
 {
 	struct ti_sysc_platform_data *pdata;
 
-	if (ddata->legacy_mode)
+	if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
 		return;
 
 	pdata = dev_get_platdata(ddata->dev);
@@ -491,7 +491,7 @@ static void sysc_clkdm_allow_idle(struct sysc *ddata)
 {
 	struct ti_sysc_platform_data *pdata;
 
-	if (ddata->legacy_mode)
+	if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
 		return;
 
 	pdata = dev_get_platdata(ddata->dev);
@@ -509,10 +509,8 @@ static int sysc_init_resets(struct sysc *ddata)
 {
 	ddata->rsts =
 		devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
-	if (IS_ERR(ddata->rsts))
-		return PTR_ERR(ddata->rsts);
 
-	return 0;
+	return PTR_ERR_OR_ZERO(ddata->rsts);
 }
 
 /**
@@ -1216,10 +1214,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
 	/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
 	SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
 		   SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
-	SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
-		   SYSC_QUIRK_LEGACY_IDLE),
-	SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000030, 0xffffffff,
-		   SYSC_QUIRK_LEGACY_IDLE),
 	SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
 		   SYSC_QUIRK_LEGACY_IDLE),
 	SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
@@ -1251,6 +1245,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
 	/* Quirks that need to be set based on detected module */
 	SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
 		   SYSC_MODULE_QUIRK_AESS),
+	SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff,
+		   SYSC_QUIRK_CLKDM_NOAUTO),
+	SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+		   SYSC_QUIRK_CLKDM_NOAUTO),
+	SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+		   SYSC_QUIRK_CLKDM_NOAUTO),
 	SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
 		   SYSC_MODULE_QUIRK_HDQ1W),
 	SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index eebdcbe..faca0f3 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3017,9 +3017,31 @@ static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
 	struct cdrom_read_audio ra;
 	int lba;
 
-	if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg,
-			   sizeof(ra)))
-		return -EFAULT;
+#ifdef CONFIG_COMPAT
+	if (in_compat_syscall()) {
+		struct compat_cdrom_read_audio {
+			union cdrom_addr	addr;
+			u8			addr_format;
+			compat_int_t		nframes;
+			compat_caddr_t		buf;
+		} ra32;
+
+		if (copy_from_user(&ra32, arg, sizeof(ra32)))
+			return -EFAULT;
+
+		ra = (struct cdrom_read_audio) {
+			.addr		= ra32.addr,
+			.addr_format	= ra32.addr_format,
+			.nframes	= ra32.nframes,
+			.buf		= compat_ptr(ra32.buf),
+		};
+	} else
+#endif
+	{
+		if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg,
+				   sizeof(ra)))
+			return -EFAULT;
+	}
 
 	if (ra.addr_format == CDROM_MSF)
 		lba = msf_to_lba(ra.addr.msf.minute,
@@ -3271,9 +3293,10 @@ static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
 	ret = cdrom_get_last_written(cdi, &last);
 	if (ret)
 		return ret;
-	if (copy_to_user((long __user *)arg, &last, sizeof(last)))
-		return -EFAULT;
-	return 0;
+	if (in_compat_syscall())
+		return put_user(last, (__s32 __user *)arg);
+
+	return put_user(last, (long __user *)arg);
 }
 
 static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 5b21dc4..886b263 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -518,6 +518,9 @@ static const struct block_device_operations gdrom_bdops = {
 	.release		= gdrom_bdops_release,
 	.check_events		= gdrom_bdops_check_events,
 	.ioctl			= gdrom_bdops_ioctl,
+#ifdef CONFIG_COMPAT
+	.ioctl			= blkdev_compat_ptr_ioctl,
+#endif
 };
 
 static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9ac6671..ed3b7da 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -110,7 +110,7 @@ struct hpets {
 	unsigned long hp_delta;
 	unsigned int hp_ntimer;
 	unsigned int hp_which;
-	struct hpet_dev hp_dev[1];
+	struct hpet_dev hp_dev[];
 };
 
 static struct hpets *hpets;
@@ -855,7 +855,7 @@ int hpet_alloc(struct hpet_data *hdp)
 		return 0;
 	}
 
-	hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1),
+	hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs),
 			GFP_KERNEL);
 
 	if (!hpetp)
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index d2a5791..cbf5eae 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -157,7 +157,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
 
 	/* Clock is optional on most platforms */
 	priv->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+	if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	priv->rng.name = pdev->name;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 0ed07d1..6595239 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -476,7 +476,7 @@ static int omap_rng_probe(struct platform_device *pdev)
 	}
 
 	priv->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+	if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 	if (!IS_ERR(priv->clk)) {
 		ret = clk_prepare_enable(priv->clk);
@@ -488,7 +488,7 @@ static int omap_rng_probe(struct platform_device *pdev)
 	}
 
 	priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
-	if (IS_ERR(priv->clk_reg) && PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
+	if (PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 	if (!IS_ERR(priv->clk_reg)) {
 		ret = clk_prepare_enable(priv->clk_reg);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index cda1293..c7f9584 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -307,6 +307,8 @@
  * Eastlake, Steve Crocker, and Jeff Schiller.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/utsname.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -354,7 +356,6 @@
 #define INPUT_POOL_WORDS	(1 << (INPUT_POOL_SHIFT-5))
 #define OUTPUT_POOL_SHIFT	10
 #define OUTPUT_POOL_WORDS	(1 << (OUTPUT_POOL_SHIFT-5))
-#define SEC_XFER_SIZE		512
 #define EXTRACT_SIZE		10
 
 
@@ -371,12 +372,6 @@
 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
 
 /*
- * The minimum number of bits of entropy before we wake up a read on
- * /dev/random.  Should be enough to do a significant reseed.
- */
-static int random_read_wakeup_bits = 64;
-
-/*
  * If the entropy count falls under this number of bits, then we
  * should wake up processes which are selecting or polling on write
  * access to /dev/random.
@@ -436,42 +431,11 @@ static const struct poolinfo {
 	/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
 	/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
 	{ S(128),	104,	76,	51,	25,	1 },
-	/* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
-	/* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
-	{ S(32),	26,	19,	14,	7,	1 },
-#if 0
-	/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
-	{ S(2048),	1638,	1231,	819,	411,	1 },
-
-	/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
-	{ S(1024),	817,	615,	412,	204,	1 },
-
-	/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
-	{ S(1024),	819,	616,	410,	207,	2 },
-
-	/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
-	{ S(512),	411,	308,	208,	104,	1 },
-
-	/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
-	{ S(512),	409,	307,	206,	102,	2 },
-	/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
-	{ S(512),	409,	309,	205,	103,	2 },
-
-	/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
-	{ S(256),	205,	155,	101,	52,	1 },
-
-	/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
-	{ S(128),	103,	78,	51,	27,	2 },
-
-	/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
-	{ S(64),	52,	39,	26,	14,	1 },
-#endif
 };
 
 /*
  * Static global variables
  */
-static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static struct fasync_struct *fasync;
 
@@ -530,11 +494,8 @@ struct entropy_store {
 	const struct poolinfo *poolinfo;
 	__u32 *pool;
 	const char *name;
-	struct entropy_store *pull;
-	struct work_struct push_work;
 
 	/* read-write data: */
-	unsigned long last_pulled;
 	spinlock_t lock;
 	unsigned short add_ptr;
 	unsigned short input_rotate;
@@ -550,9 +511,7 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
 				size_t nbytes, int fips);
 
 static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
-static void push_to_pool(struct work_struct *work);
 static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
 
 static struct entropy_store input_pool = {
 	.poolinfo = &poolinfo_table[0],
@@ -561,16 +520,6 @@ static struct entropy_store input_pool = {
 	.pool = input_pool_data
 };
 
-static struct entropy_store blocking_pool = {
-	.poolinfo = &poolinfo_table[1],
-	.name = "blocking",
-	.pull = &input_pool,
-	.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
-	.pool = blocking_pool_data,
-	.push_work = __WORK_INITIALIZER(blocking_pool.push_work,
-					push_to_pool),
-};
-
 static __u32 const twist_table[8] = {
 	0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
 	0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
@@ -759,22 +708,17 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
 		} while (unlikely(entropy_count < pool_size-2 && pnfrac));
 	}
 
-	if (unlikely(entropy_count < 0)) {
-		pr_warn("random: negative entropy/overflow: pool %s count %d\n",
+	if (WARN_ON(entropy_count < 0)) {
+		pr_warn("negative entropy/overflow: pool %s count %d\n",
 			r->name, entropy_count);
-		WARN_ON(1);
 		entropy_count = 0;
 	} else if (entropy_count > pool_size)
 		entropy_count = pool_size;
-	if ((r == &blocking_pool) && !r->initialized &&
-	    (entropy_count >> ENTROPY_SHIFT) > 128)
-		has_initialized = 1;
 	if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
 		goto retry;
 
 	if (has_initialized) {
 		r->initialized = 1;
-		wake_up_interruptible(&random_read_wait);
 		kill_fasync(&fasync, SIGIO, POLL_IN);
 	}
 
@@ -783,36 +727,13 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
 
 	if (r == &input_pool) {
 		int entropy_bits = entropy_count >> ENTROPY_SHIFT;
-		struct entropy_store *other = &blocking_pool;
 
 		if (crng_init < 2) {
 			if (entropy_bits < 128)
 				return;
 			crng_reseed(&primary_crng, r);
-			entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
+			entropy_bits = ENTROPY_BITS(r);
 		}
-
-		/* initialize the blocking pool if necessary */
-		if (entropy_bits >= random_read_wakeup_bits &&
-		    !other->initialized) {
-			schedule_work(&other->push_work);
-			return;
-		}
-
-		/* should we wake readers? */
-		if (entropy_bits >= random_read_wakeup_bits &&
-		    wq_has_sleeper(&random_read_wait)) {
-			wake_up_interruptible(&random_read_wait);
-			kill_fasync(&fasync, SIGIO, POLL_IN);
-		}
-		/* If the input pool is getting full, and the blocking
-		 * pool has room, send some entropy to the blocking
-		 * pool.
-		 */
-		if (!work_pending(&other->push_work) &&
-		    (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
-		    (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
-			schedule_work(&other->push_work);
 	}
 }
 
@@ -884,7 +805,7 @@ static void crng_initialize(struct crng_state *crng)
 		invalidate_batched_entropy();
 		numa_crng_init();
 		crng_init = 2;
-		pr_notice("random: crng done (trusting CPU's manufacturer)\n");
+		pr_notice("crng done (trusting CPU's manufacturer)\n");
 	}
 	crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 }
@@ -946,8 +867,7 @@ static int crng_fast_load(const char *cp, size_t len)
 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
 		invalidate_batched_entropy();
 		crng_init = 1;
-		wake_up_interruptible(&crng_init_wait);
-		pr_notice("random: fast init done\n");
+		pr_notice("fast init done\n");
 	}
 	return 1;
 }
@@ -1032,16 +952,15 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 		crng_init = 2;
 		process_random_ready_list();
 		wake_up_interruptible(&crng_init_wait);
-		pr_notice("random: crng init done\n");
+		kill_fasync(&fasync, SIGIO, POLL_IN);
+		pr_notice("crng init done\n");
 		if (unseeded_warning.missed) {
-			pr_notice("random: %d get_random_xx warning(s) missed "
-				  "due to ratelimiting\n",
+			pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
 				  unseeded_warning.missed);
 			unseeded_warning.missed = 0;
 		}
 		if (urandom_warning.missed) {
-			pr_notice("random: %d urandom warning(s) missed "
-				  "due to ratelimiting\n",
+			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
 				  urandom_warning.missed);
 			urandom_warning.missed = 0;
 		}
@@ -1246,7 +1165,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
 	/*
 	 * delta is now minimum absolute delta.
 	 * Round down by 1 bit on general principles,
-	 * and limit entropy entimate to 12 bits.
+	 * and limit entropy estimate to 12 bits.
 	 */
 	credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
 }
@@ -1390,57 +1309,6 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
  *********************************************************************/
 
 /*
- * This utility inline function is responsible for transferring entropy
- * from the primary pool to the secondary extraction pool. We make
- * sure we pull enough for a 'catastrophic reseed'.
- */
-static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
-static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
-{
-	if (!r->pull ||
-	    r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
-	    r->entropy_count > r->poolinfo->poolfracbits)
-		return;
-
-	_xfer_secondary_pool(r, nbytes);
-}
-
-static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
-{
-	__u32	tmp[OUTPUT_POOL_WORDS];
-
-	int bytes = nbytes;
-
-	/* pull at least as much as a wakeup */
-	bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
-	/* but never more than the buffer size */
-	bytes = min_t(int, bytes, sizeof(tmp));
-
-	trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
-				  ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
-	bytes = extract_entropy(r->pull, tmp, bytes,
-				random_read_wakeup_bits / 8, 0);
-	mix_pool_bytes(r, tmp, bytes);
-	credit_entropy_bits(r, bytes*8);
-}
-
-/*
- * Used as a workqueue function so that when the input pool is getting
- * full, we can "spill over" some entropy to the output pools.  That
- * way the output pools can store some of the excess entropy instead
- * of letting it go to waste.
- */
-static void push_to_pool(struct work_struct *work)
-{
-	struct entropy_store *r = container_of(work, struct entropy_store,
-					      push_work);
-	BUG_ON(!r);
-	_xfer_secondary_pool(r, random_read_wakeup_bits/8);
-	trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
-			   r->pull->entropy_count >> ENTROPY_SHIFT);
-}
-
-/*
  * This function decides how many bytes to actually take from the
  * given pool, and also debits the entropy count accordingly.
  */
@@ -1465,10 +1333,9 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 	if (ibytes < min)
 		ibytes = 0;
 
-	if (unlikely(entropy_count < 0)) {
-		pr_warn("random: negative entropy count: pool %s count %d\n",
+	if (WARN_ON(entropy_count < 0)) {
+		pr_warn("negative entropy count: pool %s count %d\n",
 			r->name, entropy_count);
-		WARN_ON(1);
 		entropy_count = 0;
 	}
 	nfrac = ibytes << (ENTROPY_SHIFT + 3);
@@ -1481,8 +1348,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 		goto retry;
 
 	trace_debit_entropy(r->name, 8 * ibytes);
-	if (ibytes &&
-	    (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
+	if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
 		wake_up_interruptible(&random_write_wait);
 		kill_fasync(&fasync, SIGIO, POLL_OUT);
 	}
@@ -1603,7 +1469,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 			spin_unlock_irqrestore(&r->lock, flags);
 			trace_extract_entropy(r->name, EXTRACT_SIZE,
 					      ENTROPY_BITS(r), _RET_IP_);
-			xfer_secondary_pool(r, EXTRACT_SIZE);
 			extract_buf(r, tmp);
 			spin_lock_irqsave(&r->lock, flags);
 			memcpy(r->last_data, tmp, EXTRACT_SIZE);
@@ -1612,60 +1477,11 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 	}
 
 	trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
-	xfer_secondary_pool(r, nbytes);
 	nbytes = account(r, nbytes, min, reserved);
 
 	return _extract_entropy(r, buf, nbytes, fips_enabled);
 }
 
-/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a userspace buffer.
- */
-static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
-				    size_t nbytes)
-{
-	ssize_t ret = 0, i;
-	__u8 tmp[EXTRACT_SIZE];
-	int large_request = (nbytes > 256);
-
-	trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
-	if (!r->initialized && r->pull) {
-		xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
-		if (!r->initialized)
-			return 0;
-	}
-	xfer_secondary_pool(r, nbytes);
-	nbytes = account(r, nbytes, 0, 0);
-
-	while (nbytes) {
-		if (large_request && need_resched()) {
-			if (signal_pending(current)) {
-				if (ret == 0)
-					ret = -ERESTARTSYS;
-				break;
-			}
-			schedule();
-		}
-
-		extract_buf(r, tmp);
-		i = min_t(int, nbytes, EXTRACT_SIZE);
-		if (copy_to_user(buf, tmp, i)) {
-			ret = -EFAULT;
-			break;
-		}
-
-		nbytes -= i;
-		buf += i;
-		ret += i;
-	}
-
-	/* Wipe data just returned from memory */
-	memzero_explicit(tmp, sizeof(tmp));
-
-	return ret;
-}
-
 #define warn_unseeded_randomness(previous) \
 	_warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
 
@@ -1687,8 +1503,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
 	print_once = true;
 #endif
 	if (__ratelimit(&unseeded_warning))
-		pr_notice("random: %s called from %pS with crng_init=%d\n",
-			  func_name, caller, crng_init);
+		printk_deferred(KERN_NOTICE "random: %s called from %pS "
+				"with crng_init=%d\n", func_name, caller,
+				crng_init);
 }
 
 /*
@@ -1931,7 +1748,6 @@ static void __init init_std_data(struct entropy_store *r)
 	ktime_t now = ktime_get_real();
 	unsigned long rv;
 
-	r->last_pulled = jiffies;
 	mix_pool_bytes(r, &now, sizeof(now));
 	for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
 		if (!arch_get_random_seed_long(&rv) &&
@@ -1955,7 +1771,6 @@ static void __init init_std_data(struct entropy_store *r)
 int __init rand_initialize(void)
 {
 	init_std_data(&input_pool);
-	init_std_data(&blocking_pool);
 	crng_initialize(&primary_crng);
 	crng_global_init_time = jiffies;
 	if (ratelimit_disable) {
@@ -1983,40 +1798,15 @@ void rand_initialize_disk(struct gendisk *disk)
 #endif
 
 static ssize_t
-_random_read(int nonblock, char __user *buf, size_t nbytes)
+urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
+		    loff_t *ppos)
 {
-	ssize_t n;
+	int ret;
 
-	if (nbytes == 0)
-		return 0;
-
-	nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
-	while (1) {
-		n = extract_entropy_user(&blocking_pool, buf, nbytes);
-		if (n < 0)
-			return n;
-		trace_random_read(n*8, (nbytes-n)*8,
-				  ENTROPY_BITS(&blocking_pool),
-				  ENTROPY_BITS(&input_pool));
-		if (n > 0)
-			return n;
-
-		/* Pool is (near) empty.  Maybe wait and retry. */
-		if (nonblock)
-			return -EAGAIN;
-
-		wait_event_interruptible(random_read_wait,
-		    blocking_pool.initialized &&
-		    (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits));
-		if (signal_pending(current))
-			return -ERESTARTSYS;
-	}
-}
-
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
-{
-	return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
+	nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
+	ret = extract_crng_user(buf, nbytes);
+	trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
+	return ret;
 }
 
 static ssize_t
@@ -2024,22 +1814,29 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
 	unsigned long flags;
 	static int maxwarn = 10;
-	int ret;
 
 	if (!crng_ready() && maxwarn > 0) {
 		maxwarn--;
 		if (__ratelimit(&urandom_warning))
-			printk(KERN_NOTICE "random: %s: uninitialized "
-			       "urandom read (%zd bytes read)\n",
-			       current->comm, nbytes);
+			pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
+				  current->comm, nbytes);
 		spin_lock_irqsave(&primary_crng.lock, flags);
 		crng_init_cnt = 0;
 		spin_unlock_irqrestore(&primary_crng.lock, flags);
 	}
-	nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
-	ret = extract_crng_user(buf, nbytes);
-	trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
-	return ret;
+
+	return urandom_read_nowarn(file, buf, nbytes, ppos);
+}
+
+static ssize_t
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+	int ret;
+
+	ret = wait_for_random_bytes();
+	if (ret != 0)
+		return ret;
+	return urandom_read_nowarn(file, buf, nbytes, ppos);
 }
 
 static __poll_t
@@ -2047,10 +1844,10 @@ random_poll(struct file *file, poll_table * wait)
 {
 	__poll_t mask;
 
-	poll_wait(file, &random_read_wait, wait);
+	poll_wait(file, &crng_init_wait, wait);
 	poll_wait(file, &random_write_wait, wait);
 	mask = 0;
-	if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
+	if (crng_ready())
 		mask |= EPOLLIN | EPOLLRDNORM;
 	if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
 		mask |= EPOLLOUT | EPOLLWRNORM;
@@ -2141,7 +1938,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
 		input_pool.entropy_count = 0;
-		blocking_pool.entropy_count = 0;
 		return 0;
 	case RNDRESEEDCRNG:
 		if (!capable(CAP_SYS_ADMIN))
@@ -2185,23 +1981,27 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
 {
 	int ret;
 
-	if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
+	if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
+		return -EINVAL;
+
+	/*
+	 * Requesting insecure and blocking randomness at the same time makes
+	 * no sense.
+	 */
+	if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
 		return -EINVAL;
 
 	if (count > INT_MAX)
 		count = INT_MAX;
 
-	if (flags & GRND_RANDOM)
-		return _random_read(flags & GRND_NONBLOCK, buf, count);
-
-	if (!crng_ready()) {
+	if (!(flags & GRND_INSECURE) && !crng_ready()) {
 		if (flags & GRND_NONBLOCK)
 			return -EAGAIN;
 		ret = wait_for_random_bytes();
 		if (unlikely(ret))
 			return ret;
 	}
-	return urandom_read(NULL, buf, count, NULL);
+	return urandom_read_nowarn(NULL, buf, count, NULL);
 }
 
 /********************************************************************
@@ -2214,8 +2014,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
 
 #include <linux/sysctl.h>
 
-static int min_read_thresh = 8, min_write_thresh;
-static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+static int min_write_thresh;
 static int max_write_thresh = INPUT_POOL_WORDS * 32;
 static int random_min_urandom_seed = 60;
 static char sysctl_bootid[16];
@@ -2291,15 +2090,6 @@ struct ctl_table random_table[] = {
 		.data		= &input_pool.entropy_count,
 	},
 	{
-		.procname	= "read_wakeup_threshold",
-		.data		= &random_read_wakeup_bits,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &min_read_thresh,
-		.extra2		= &max_read_thresh,
-	},
-	{
 		.procname	= "write_wakeup_threshold",
 		.data		= &random_write_wakeup_bits,
 		.maxlen		= sizeof(int),
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 4f24e46..56db949 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -15,10 +15,11 @@
 #include <linux/serial.h>
 #include <linux/tty.h>
 #include <linux/module.h>
+#include <linux/spinlock.h>
 
 struct ttyprintk_port {
 	struct tty_port port;
-	struct mutex port_write_mutex;
+	spinlock_t spinlock;
 };
 
 static struct ttyprintk_port tpk_port;
@@ -99,11 +100,12 @@ static int tpk_open(struct tty_struct *tty, struct file *filp)
 static void tpk_close(struct tty_struct *tty, struct file *filp)
 {
 	struct ttyprintk_port *tpkp = tty->driver_data;
+	unsigned long flags;
 
-	mutex_lock(&tpkp->port_write_mutex);
+	spin_lock_irqsave(&tpkp->spinlock, flags);
 	/* flush tpk_printk buffer */
 	tpk_printk(NULL, 0);
-	mutex_unlock(&tpkp->port_write_mutex);
+	spin_unlock_irqrestore(&tpkp->spinlock, flags);
 
 	tty_port_close(&tpkp->port, tty, filp);
 }
@@ -115,13 +117,14 @@ static int tpk_write(struct tty_struct *tty,
 		const unsigned char *buf, int count)
 {
 	struct ttyprintk_port *tpkp = tty->driver_data;
+	unsigned long flags;
 	int ret;
 
 
 	/* exclusive use of tpk_printk within this tty */
-	mutex_lock(&tpkp->port_write_mutex);
+	spin_lock_irqsave(&tpkp->spinlock, flags);
 	ret = tpk_printk(buf, count);
-	mutex_unlock(&tpkp->port_write_mutex);
+	spin_unlock_irqrestore(&tpkp->spinlock, flags);
 
 	return ret;
 }
@@ -171,7 +174,7 @@ static int __init ttyprintk_init(void)
 {
 	int ret = -ENOMEM;
 
-	mutex_init(&tpk_port.port_write_mutex);
+	spin_lock_init(&tpk_port.spinlock);
 
 	ttyprintk_driver = tty_alloc_driver(1,
 			TTY_DRIVER_RESET_TERMIOS |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 45653a0..bcb257b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -27,7 +27,7 @@
 	tristate "Clock driver for WM831x/2x PMICs"
 	depends on MFD_WM831X
 	---help---
-          Supports the clocking subsystem of the WM831x/2x series of
+	  Supports the clocking subsystem of the WM831x/2x series of
 	  PMICs from Wolfson Microelectronics.
 
 source "drivers/clk/versatile/Kconfig"
@@ -174,6 +174,18 @@
 	help
 	  If you say yes here you get support for the CS2000 clock multiplier.
 
+config COMMON_CLK_FSL_SAI
+	bool "Clock driver for BCLK of Freescale SAI cores"
+	depends on ARCH_LAYERSCAPE || COMPILE_TEST
+	help
+	  This driver supports the Freescale SAI (Synchronous Audio Interface)
+	  to be used as a generic clock output. Some SoCs have restrictions
+	  regarding the possible pin multiplexer settings. Eg. on some SoCs
+	  two SAI interfaces can only be enabled together. If just one is
+	  needed, the BCLK pin of the second one can be used as general
+	  purpose clock output. Ideally, it can be used to drive an audio
+	  codec (sometimes known as MCLK).
+
 config COMMON_CLK_GEMINI
 	bool "Clock driver for Cortina Systems Gemini SoC"
 	depends on ARCH_GEMINI || COMPILE_TEST
@@ -225,6 +237,16 @@
 	  This adds the clock driver support for Freescale QorIQ platforms
 	  using common clock framework.
 
+config CLK_LS1028A_PLLDIG
+        tristate "Clock driver for LS1028A Display output"
+        depends on ARCH_LAYERSCAPE || COMPILE_TEST
+        default ARCH_LAYERSCAPE
+        help
+          This driver support the Display output interfaces(LCD, DPHY) pixel clocks
+          of the QorIQ Layerscape LS1028A, as implemented TSMC CLN28HPM PLL. Not all
+          features of the PLL are currently supported by the driver. By default,
+          configured bypass mode with this PLL.
+
 config COMMON_CLK_XGENE
 	bool "Clock driver for APM XGene SoC"
 	default ARCH_XGENE
@@ -305,10 +327,10 @@
 	  Support for Marvell MMP2 and MMP3 SoC clocks
 
 config COMMON_CLK_BD718XX
-	tristate "Clock driver for ROHM BD718x7 PMIC"
-	depends on MFD_ROHM_BD718XX || MFD_ROHM_BD70528
+	tristate "Clock driver for 32K clk gates on ROHM PMICs"
+	depends on MFD_ROHM_BD718XX || MFD_ROHM_BD70528 || MFD_ROHM_BD71828
 	help
-	  This driver supports ROHM BD71837, ROHM BD71847 and
+	  This driver supports ROHM BD71837, ROHM BD71847, ROHM BD71828 and
 	  ROHM BD70528 PMICs clock gates.
 
 config COMMON_CLK_FIXED_MMIO
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 0696a0c..f4169cc 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_COMMON_CLK_CS2000_CP)	+= clk-cs2000-cp.o
 obj-$(CONFIG_ARCH_EFM32)		+= clk-efm32gg.o
 obj-$(CONFIG_COMMON_CLK_FIXED_MMIO)	+= clk-fixed-mmio.o
+obj-$(CONFIG_COMMON_CLK_FSL_SAI)	+= clk-fsl-sai.o
 obj-$(CONFIG_COMMON_CLK_GEMINI)		+= clk-gemini.o
 obj-$(CONFIG_COMMON_CLK_ASPEED)		+= clk-aspeed.o
 obj-$(CONFIG_MACH_ASPEED_G6)		+= clk-ast2600.o
@@ -44,6 +45,7 @@
 obj-$(CONFIG_ARCH_NSPIRE)		+= clk-nspire.o
 obj-$(CONFIG_COMMON_CLK_OXNAS)		+= clk-oxnas.o
 obj-$(CONFIG_COMMON_CLK_PALMAS)		+= clk-palmas.o
+obj-$(CONFIG_CLK_LS1028A_PLLDIG)	+= clk-plldig.o
 obj-$(CONFIG_COMMON_CLK_PWM)		+= clk-pwm.o
 obj-$(CONFIG_CLK_QORIQ)			+= clk-qoriq.o
 obj-$(CONFIG_COMMON_CLK_RK808)		+= clk-rk808.o
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index 34b8178..dfb354a 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -25,7 +25,8 @@
 #define		PMC_PLL_CTRL1_MUL_MSK		GENMASK(30, 24)
 
 #define PMC_PLL_ACR	0x18
-#define		PMC_PLL_ACR_DEFAULT		0x1b040010UL
+#define		PMC_PLL_ACR_DEFAULT_UPLL	0x12020010UL
+#define		PMC_PLL_ACR_DEFAULT_PLLA	0x00020010UL
 #define		PMC_PLL_ACR_UTMIVR		BIT(12)
 #define		PMC_PLL_ACR_UTMIBG		BIT(13)
 #define		PMC_PLL_ACR_LOOP_FILTER_MSK	GENMASK(31, 24)
@@ -88,7 +89,10 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
 	}
 
 	/* Recommended value for PMC_PLL_ACR */
-	val = PMC_PLL_ACR_DEFAULT;
+	if (pll->characteristics->upll)
+		val = PMC_PLL_ACR_DEFAULT_UPLL;
+	else
+		val = PMC_PLL_ACR_DEFAULT_PLLA;
 	regmap_write(regmap, PMC_PLL_ACR, val);
 
 	regmap_write(regmap, PMC_PLL_CTRL1,
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 86238d5..77398ae 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -47,6 +47,7 @@ static const struct clk_programmable_layout sam9x60_programmable_layout = {
 	.pres_shift = 8,
 	.css_mask = 0x1f,
 	.have_slck_mck = 0,
+	.is_pres_direct = 1,
 };
 
 static const struct clk_pcr_layout sam9x60_pcr_layout = {
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index dd0f90c..536b59a 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -260,7 +260,6 @@ static void __init asm9260_acc_init(struct device_node *np)
 	const char *ref_clk, *pll_clk = "pll";
 	u32 rate;
 	int n;
-	u32 accuracy = 0;
 
 	clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
 	if (!clk_data)
@@ -275,10 +274,11 @@ static void __init asm9260_acc_init(struct device_node *np)
 	/* register pll */
 	rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
 
+	/* TODO: Convert to DT parent scheme */
 	ref_clk = of_clk_get_parent_name(np, 0);
-	accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
-	hw = clk_hw_register_fixed_rate_with_accuracy(NULL, pll_clk,
-			ref_clk, 0, rate, accuracy);
+	hw = __clk_hw_register_fixed_rate_with_accuracy(NULL, NULL, pll_clk,
+			ref_clk, NULL, NULL, 0, rate, 0,
+			CLK_FIXED_RATE_PARENT_ACCURACY);
 
 	if (IS_ERR(hw))
 		panic("%pOFn: can't register REFCLK. Check DT!", np);
diff --git a/drivers/clk/clk-bd718x7.c b/drivers/clk/clk-bd718x7.c
index 00926c5..b52e8d6 100644
--- a/drivers/clk/clk-bd718x7.c
+++ b/drivers/clk/clk-bd718x7.c
@@ -7,12 +7,25 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/mfd/rohm-bd718x7.h>
-#include <linux/mfd/rohm-bd70528.h>
+#include <linux/mfd/rohm-generic.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/regmap.h>
 
+/* clk control registers */
+/* BD70528 */
+#define BD70528_REG_OUT32K	0x2c
+/* BD71828 */
+#define BD71828_REG_OUT32K	0x4B
+/* BD71837 and BD71847 */
+#define BD718XX_REG_OUT32K	0x2E
+
+/*
+ * BD71837, BD71847, BD70528 and BD71828 all use bit [0] to clk output control
+ */
+#define CLK_OUT_EN_MASK		BIT(0)
+
+
 struct bd718xx_clk {
 	struct clk_hw hw;
 	u8 reg;
@@ -21,10 +34,8 @@ struct bd718xx_clk {
 	struct rohm_regmap_dev *mfd;
 };
 
-static int bd71837_clk_set(struct clk_hw *hw, int status)
+static int bd71837_clk_set(struct bd718xx_clk *c, unsigned int status)
 {
-	struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
-
 	return regmap_update_bits(c->mfd->regmap, c->reg, c->mask, status);
 }
 
@@ -33,14 +44,16 @@ static void bd71837_clk_disable(struct clk_hw *hw)
 	int rv;
 	struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
 
-	rv = bd71837_clk_set(hw, 0);
+	rv = bd71837_clk_set(c, 0);
 	if (rv)
 		dev_dbg(&c->pdev->dev, "Failed to disable 32K clk (%d)\n", rv);
 }
 
 static int bd71837_clk_enable(struct clk_hw *hw)
 {
-	return bd71837_clk_set(hw, 1);
+	struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
+
+	return bd71837_clk_set(c, 0xffffffff);
 }
 
 static int bd71837_clk_is_enabled(struct clk_hw *hw)
@@ -74,6 +87,7 @@ static int bd71837_clk_probe(struct platform_device *pdev)
 		.name = "bd718xx-32k-out",
 		.ops = &bd71837_clk_ops,
 	};
+	enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
 
 	c = devm_kzalloc(&pdev->dev, sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -87,15 +101,19 @@ static int bd71837_clk_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "No parent clk found\n");
 		return -EINVAL;
 	}
-	switch (mfd->chip_type) {
+	switch (chip) {
 	case ROHM_CHIP_TYPE_BD71837:
 	case ROHM_CHIP_TYPE_BD71847:
 		c->reg = BD718XX_REG_OUT32K;
-		c->mask = BD718XX_OUT32K_EN;
+		c->mask = CLK_OUT_EN_MASK;
+		break;
+	case ROHM_CHIP_TYPE_BD71828:
+		c->reg = BD71828_REG_OUT32K;
+		c->mask = CLK_OUT_EN_MASK;
 		break;
 	case ROHM_CHIP_TYPE_BD70528:
-		c->reg = BD70528_REG_CLK_OUT;
-		c->mask = BD70528_CLK_OUT_EN_MASK;
+		c->reg = BD70528_REG_OUT32K;
+		c->mask = CLK_OUT_EN_MASK;
 		break;
 	default:
 		dev_err(&pdev->dev, "Unknown clk chip\n");
@@ -121,11 +139,21 @@ static int bd71837_clk_probe(struct platform_device *pdev)
 	return rval;
 }
 
+static const struct platform_device_id bd718x7_clk_id[] = {
+	{ "bd71837-clk", ROHM_CHIP_TYPE_BD71837 },
+	{ "bd71847-clk", ROHM_CHIP_TYPE_BD71847 },
+	{ "bd70528-clk", ROHM_CHIP_TYPE_BD70528 },
+	{ "bd71828-clk", ROHM_CHIP_TYPE_BD71828 },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, bd718x7_clk_id);
+
 static struct platform_driver bd71837_clk = {
 	.driver = {
 		.name = "bd718xx-clk",
 	},
 	.probe = bd71837_clk_probe,
+	.id_table = bd718x7_clk_id,
 };
 
 module_platform_driver(bd71837_clk);
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index 4cd175a..e6d6599 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -474,11 +474,10 @@ static struct bm1880_composite_clock bm1880_composite_clks[] = {
 static unsigned long bm1880_pll_rate_calc(u32 regval, unsigned long parent_rate)
 {
 	u64 numerator;
-	u32 fbdiv, fref, refdiv;
+	u32 fbdiv, refdiv;
 	u32 postdiv1, postdiv2, denominator;
 
 	fbdiv = (regval >> 16) & 0xfff;
-	fref = parent_rate;
 	refdiv = regval & 0x1f;
 	postdiv1 = (regval >> 8) & 0x7;
 	postdiv2 = (regval >> 12) & 0x7;
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 3e9c3e6..7376f57 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -199,8 +199,9 @@ static void clk_composite_disable(struct clk_hw *hw)
 	gate_ops->disable(gate_hw);
 }
 
-struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
-			const char * const *parent_names, int num_parents,
+static struct clk_hw *__clk_hw_register_composite(struct device *dev,
+			const char *name, const char * const *parent_names,
+			const struct clk_parent_data *pdata, int num_parents,
 			struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
 			struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
 			struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
@@ -218,7 +219,10 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
 
 	init.name = name;
 	init.flags = flags;
-	init.parent_names = parent_names;
+	if (parent_names)
+		init.parent_names = parent_names;
+	else
+		init.parent_data = pdata;
 	init.num_parents = num_parents;
 	hw = &composite->hw;
 
@@ -312,6 +316,34 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
 	return hw;
 }
 
+struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
+			const char * const *parent_names, int num_parents,
+			struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+			struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+			struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+			unsigned long flags)
+{
+	return __clk_hw_register_composite(dev, name, parent_names, NULL,
+					   num_parents, mux_hw, mux_ops,
+					   rate_hw, rate_ops, gate_hw,
+					   gate_ops, flags);
+}
+
+struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
+			const char *name,
+			const struct clk_parent_data *parent_data,
+			int num_parents,
+			struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+			struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+			struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+			unsigned long flags)
+{
+	return __clk_hw_register_composite(dev, name, NULL, parent_data,
+					   num_parents, mux_hw, mux_ops,
+					   rate_hw, rate_ops, gate_hw,
+					   gate_ops, flags);
+}
+
 struct clk *clk_register_composite(struct device *dev, const char *name,
 			const char * const *parent_names, int num_parents,
 			struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
@@ -329,6 +361,24 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
 	return hw->clk;
 }
 
+struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
+			const struct clk_parent_data *parent_data,
+			int num_parents,
+			struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+			struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+			struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+			unsigned long flags)
+{
+	struct clk_hw *hw;
+
+	hw = clk_hw_register_composite_pdata(dev, name, parent_data,
+			num_parents, mux_hw, mux_ops, rate_hw, rate_ops,
+			gate_hw, gate_ops, flags);
+	if (IS_ERR(hw))
+		return ERR_CAST(hw);
+	return hw->clk;
+}
+
 void clk_unregister_composite(struct clk *clk)
 {
 	struct clk_composite *composite;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 098b2b0..8de12cb 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -463,11 +463,12 @@ const struct clk_ops clk_divider_ro_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
 
-static struct clk_hw *_register_divider(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_divider_flags, const struct clk_div_table *table,
-		spinlock_t *lock)
+struct clk_hw *__clk_hw_register_divider(struct device *dev,
+		struct device_node *np, const char *name,
+		const char *parent_name, const struct clk_hw *parent_hw,
+		const struct clk_parent_data *parent_data, unsigned long flags,
+		void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+		const struct clk_div_table *table, spinlock_t *lock)
 {
 	struct clk_divider *div;
 	struct clk_hw *hw;
@@ -514,55 +515,7 @@ static struct clk_hw *_register_divider(struct device *dev, const char *name,
 
 	return hw;
 }
-
-/**
- * clk_register_divider - register a divider clock with the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk *clk_register_divider(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_divider_flags, spinlock_t *lock)
-{
-	struct clk_hw *hw;
-
-	hw =  _register_divider(dev, name, parent_name, flags, reg, shift,
-			width, clk_divider_flags, NULL, lock);
-	if (IS_ERR(hw))
-		return ERR_CAST(hw);
-	return hw->clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_divider);
-
-/**
- * clk_hw_register_divider - register a divider clock with the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_divider_flags, spinlock_t *lock)
-{
-	return _register_divider(dev, name, parent_name, flags, reg, shift,
-			width, clk_divider_flags, NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_divider);
+EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
 
 /**
  * clk_register_divider_table - register a table based divider clock with
@@ -586,39 +539,15 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
 {
 	struct clk_hw *hw;
 
-	hw =  _register_divider(dev, name, parent_name, flags, reg, shift,
-			width, clk_divider_flags, table, lock);
+	hw =  __clk_hw_register_divider(dev, NULL, name, parent_name, NULL,
+			NULL, flags, reg, shift, width, clk_divider_flags,
+			table, lock);
 	if (IS_ERR(hw))
 		return ERR_CAST(hw);
 	return hw->clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_divider_table);
 
-/**
- * clk_hw_register_divider_table - register a table based divider clock with
- * the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @table: array of divider/value pairs ending with a div set to 0
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_divider_table(struct device *dev,
-		const char *name, const char *parent_name, unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_divider_flags, const struct clk_div_table *table,
-		spinlock_t *lock)
-{
-	return _register_divider(dev, name, parent_name, flags, reg, shift,
-			width, clk_divider_flags, table, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_divider_table);
-
 void clk_unregister_divider(struct clk *clk)
 {
 	struct clk_divider *div;
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 2c4486c..77499a2 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -24,6 +24,8 @@
  * parent - fixed parent.  No clk_set_parent support
  */
 
+#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+
 static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
 		unsigned long parent_rate)
 {
@@ -33,7 +35,12 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
 static unsigned long clk_fixed_rate_recalc_accuracy(struct clk_hw *hw,
 		unsigned long parent_accuracy)
 {
-	return to_clk_fixed_rate(hw)->fixed_accuracy;
+	struct clk_fixed_rate *fixed = to_clk_fixed_rate(hw);
+
+	if (fixed->flags & CLK_FIXED_RATE_PARENT_ACCURACY)
+		return parent_accuracy;
+
+	return fixed->fixed_accuracy;
 }
 
 const struct clk_ops clk_fixed_rate_ops = {
@@ -42,24 +49,17 @@ const struct clk_ops clk_fixed_rate_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
 
-/**
- * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
- * the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @fixed_rate: non-adjustable clock rate
- * @fixed_accuracy: non-adjustable clock rate
- */
-struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
-		const char *name, const char *parent_name, unsigned long flags,
-		unsigned long fixed_rate, unsigned long fixed_accuracy)
+struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+		struct device_node *np, const char *name,
+		const char *parent_name, const struct clk_hw *parent_hw,
+		const struct clk_parent_data *parent_data, unsigned long flags,
+		unsigned long fixed_rate, unsigned long fixed_accuracy,
+		unsigned long clk_fixed_flags)
 {
 	struct clk_fixed_rate *fixed;
 	struct clk_hw *hw;
 	struct clk_init_data init = {};
-	int ret;
+	int ret = -EINVAL;
 
 	/* allocate fixed-rate clock */
 	fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
@@ -69,17 +69,26 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
 	init.name = name;
 	init.ops = &clk_fixed_rate_ops;
 	init.flags = flags;
-	init.parent_names = (parent_name ? &parent_name: NULL);
-	init.num_parents = (parent_name ? 1 : 0);
+	init.parent_names = parent_name ? &parent_name : NULL;
+	init.parent_hws = parent_hw ? &parent_hw : NULL;
+	init.parent_data = parent_data;
+	if (parent_name || parent_hw || parent_data)
+		init.num_parents = 1;
+	else
+		init.num_parents = 0;
 
 	/* struct clk_fixed_rate assignments */
+	fixed->flags = clk_fixed_flags;
 	fixed->fixed_rate = fixed_rate;
 	fixed->fixed_accuracy = fixed_accuracy;
 	fixed->hw.init = &init;
 
 	/* register the clock */
 	hw = &fixed->hw;
-	ret = clk_hw_register(dev, hw);
+	if (dev || !np)
+		ret = clk_hw_register(dev, hw);
+	else if (np)
+		ret = of_clk_hw_register(np, hw);
 	if (ret) {
 		kfree(fixed);
 		hw = ERR_PTR(ret);
@@ -87,46 +96,19 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
 
 	return hw;
 }
-EXPORT_SYMBOL_GPL(clk_hw_register_fixed_rate_with_accuracy);
-
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
-		const char *name, const char *parent_name, unsigned long flags,
-		unsigned long fixed_rate, unsigned long fixed_accuracy)
-{
-	struct clk_hw *hw;
-
-	hw = clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
-			flags, fixed_rate, fixed_accuracy);
-	if (IS_ERR(hw))
-		return ERR_CAST(hw);
-	return hw->clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_fixed_rate_with_accuracy);
-
-/**
- * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
- * framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @fixed_rate: non-adjustable clock rate
- */
-struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
-		unsigned long fixed_rate)
-{
-	return clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
-						     flags, fixed_rate, 0);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_fixed_rate);
+EXPORT_SYMBOL_GPL(__clk_hw_register_fixed_rate);
 
 struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		unsigned long fixed_rate)
 {
-	return clk_register_fixed_rate_with_accuracy(dev, name, parent_name,
-						     flags, fixed_rate, 0);
+	struct clk_hw *hw;
+
+	hw = clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
+						      flags, fixed_rate, 0);
+	if (IS_ERR(hw))
+		return ERR_CAST(hw);
+	return hw->clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
 
@@ -155,9 +137,9 @@ void clk_hw_unregister_fixed_rate(struct clk_hw *hw)
 EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_rate);
 
 #ifdef CONFIG_OF
-static struct clk *_of_fixed_clk_setup(struct device_node *node)
+static struct clk_hw *_of_fixed_clk_setup(struct device_node *node)
 {
-	struct clk *clk;
+	struct clk_hw *hw;
 	const char *clk_name = node->name;
 	u32 rate;
 	u32 accuracy = 0;
@@ -170,18 +152,18 @@ static struct clk *_of_fixed_clk_setup(struct device_node *node)
 
 	of_property_read_string(node, "clock-output-names", &clk_name);
 
-	clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
+	hw = clk_hw_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
 						    0, rate, accuracy);
-	if (IS_ERR(clk))
-		return clk;
+	if (IS_ERR(hw))
+		return hw;
 
-	ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+	ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
 	if (ret) {
-		clk_unregister(clk);
+		clk_hw_unregister_fixed_rate(hw);
 		return ERR_PTR(ret);
 	}
 
-	return clk;
+	return hw;
 }
 
 /**
@@ -195,27 +177,27 @@ CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
 
 static int of_fixed_clk_remove(struct platform_device *pdev)
 {
-	struct clk *clk = platform_get_drvdata(pdev);
+	struct clk_hw *hw = platform_get_drvdata(pdev);
 
 	of_clk_del_provider(pdev->dev.of_node);
-	clk_unregister_fixed_rate(clk);
+	clk_hw_unregister_fixed_rate(hw);
 
 	return 0;
 }
 
 static int of_fixed_clk_probe(struct platform_device *pdev)
 {
-	struct clk *clk;
+	struct clk_hw *hw;
 
 	/*
 	 * This function is not executed when of_fixed_clk_setup
 	 * succeeded.
 	 */
-	clk = _of_fixed_clk_setup(pdev->dev.of_node);
-	if (IS_ERR(clk))
-		return PTR_ERR(clk);
+	hw = _of_fixed_clk_setup(pdev->dev.of_node);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
 
-	platform_set_drvdata(pdev, clk);
+	platform_set_drvdata(pdev, hw);
 
 	return 0;
 }
@@ -224,7 +206,6 @@ static const struct of_device_id of_fixed_clk_ids[] = {
 	{ .compatible = "fixed-clock" },
 	{ }
 };
-MODULE_DEVICE_TABLE(of, of_fixed_clk_ids);
 
 static struct platform_driver of_fixed_clk_driver = {
 	.driver = {
diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c
new file mode 100644
index 0000000..0221180
--- /dev/null
+++ b/drivers/clk/clk-fsl-sai.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale SAI BCLK as a generic clock driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#define I2S_CSR		0x00
+#define I2S_CR2		0x08
+#define CSR_BCE_BIT	28
+#define CR2_BCD		BIT(24)
+#define CR2_DIV_SHIFT	0
+#define CR2_DIV_WIDTH	8
+
+struct fsl_sai_clk {
+	struct clk_divider div;
+	struct clk_gate gate;
+	spinlock_t lock;
+};
+
+static int fsl_sai_clk_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fsl_sai_clk *sai_clk;
+	struct clk_parent_data pdata = { .index = 0 };
+	void __iomem *base;
+	struct clk_hw *hw;
+	struct resource *res;
+
+	sai_clk = devm_kzalloc(dev, sizeof(*sai_clk), GFP_KERNEL);
+	if (!sai_clk)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	spin_lock_init(&sai_clk->lock);
+
+	sai_clk->gate.reg = base + I2S_CSR;
+	sai_clk->gate.bit_idx = CSR_BCE_BIT;
+	sai_clk->gate.lock = &sai_clk->lock;
+
+	sai_clk->div.reg = base + I2S_CR2;
+	sai_clk->div.shift = CR2_DIV_SHIFT;
+	sai_clk->div.width = CR2_DIV_WIDTH;
+	sai_clk->div.lock = &sai_clk->lock;
+
+	/* set clock direction, we are the BCLK master */
+	writel(CR2_BCD, base + I2S_CR2);
+
+	hw = clk_hw_register_composite_pdata(dev, dev->of_node->name,
+					     &pdata, 1, NULL, NULL,
+					     &sai_clk->div.hw,
+					     &clk_divider_ops,
+					     &sai_clk->gate.hw,
+					     &clk_gate_ops,
+					     CLK_SET_RATE_GATE);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
+static const struct of_device_id of_fsl_sai_clk_ids[] = {
+	{ .compatible = "fsl,vf610-sai-clock" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, of_fsl_sai_clk_ids);
+
+static struct platform_driver fsl_sai_clk_driver = {
+	.probe = fsl_sai_clk_probe,
+	.driver		= {
+		.name	= "fsl-sai-clk",
+		.of_match_table = of_fsl_sai_clk_ids,
+	},
+};
+module_platform_driver(fsl_sai_clk_driver);
+
+MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:fsl-sai-clk");
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 670053c..2ca1f2a 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -123,26 +123,18 @@ const struct clk_ops clk_gate_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_gate_ops);
 
-/**
- * clk_hw_register_gate - register a gate clock with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of this clock's parent
- * @flags: framework-specific flags for this clock
- * @reg: register address to control gating of this clock
- * @bit_idx: which bit in the register controls gating of this clock
- * @clk_gate_flags: gate-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
+struct clk_hw *__clk_hw_register_gate(struct device *dev,
+		struct device_node *np, const char *name,
+		const char *parent_name, const struct clk_hw *parent_hw,
+		const struct clk_parent_data *parent_data,
+		unsigned long flags,
 		void __iomem *reg, u8 bit_idx,
 		u8 clk_gate_flags, spinlock_t *lock)
 {
 	struct clk_gate *gate;
 	struct clk_hw *hw;
 	struct clk_init_data init = {};
-	int ret;
+	int ret = -EINVAL;
 
 	if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
 		if (bit_idx > 15) {
@@ -160,7 +152,12 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
 	init.ops = &clk_gate_ops;
 	init.flags = flags;
 	init.parent_names = parent_name ? &parent_name : NULL;
-	init.num_parents = parent_name ? 1 : 0;
+	init.parent_hws = parent_hw ? &parent_hw : NULL;
+	init.parent_data = parent_data;
+	if (parent_name || parent_hw || parent_data)
+		init.num_parents = 1;
+	else
+		init.num_parents = 0;
 
 	/* struct clk_gate assignments */
 	gate->reg = reg;
@@ -170,15 +167,19 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
 	gate->hw.init = &init;
 
 	hw = &gate->hw;
-	ret = clk_hw_register(dev, hw);
+	if (dev || !np)
+		ret = clk_hw_register(dev, hw);
+	else if (np)
+		ret = of_clk_hw_register(np, hw);
 	if (ret) {
 		kfree(gate);
 		hw = ERR_PTR(ret);
 	}
 
 	return hw;
+
 }
-EXPORT_SYMBOL_GPL(clk_hw_register_gate);
+EXPORT_SYMBOL_GPL(__clk_hw_register_gate);
 
 struct clk *clk_register_gate(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 13304cf..70397b4 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -28,6 +28,26 @@
  * parent - fixed parent.  No clk_set_parent support
  */
 
+/**
+ * struct clk_gpio - gpio gated clock
+ *
+ * @hw:		handle between common and hardware-specific interfaces
+ * @gpiod:	gpio descriptor
+ *
+ * Clock with a gpio control for enabling and disabling the parent clock
+ * or switching between two parents by asserting or deasserting the gpio.
+ *
+ * Implements .enable, .disable and .is_enabled or
+ * .get_parent, .set_parent and .determine_rate depending on which clk_ops
+ * is used.
+ */
+struct clk_gpio {
+	struct clk_hw	hw;
+	struct gpio_desc *gpiod;
+};
+
+#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
+
 static int clk_gpio_gate_enable(struct clk_hw *hw)
 {
 	struct clk_gpio *clk = to_clk_gpio(hw);
@@ -51,12 +71,11 @@ static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
 	return gpiod_get_value(clk->gpiod);
 }
 
-const struct clk_ops clk_gpio_gate_ops = {
+static const struct clk_ops clk_gpio_gate_ops = {
 	.enable = clk_gpio_gate_enable,
 	.disable = clk_gpio_gate_disable,
 	.is_enabled = clk_gpio_gate_is_enabled,
 };
-EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
 
 static int clk_sleeping_gpio_gate_prepare(struct clk_hw *hw)
 {
@@ -111,67 +130,49 @@ static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index)
 	return 0;
 }
 
-const struct clk_ops clk_gpio_mux_ops = {
+static const struct clk_ops clk_gpio_mux_ops = {
 	.get_parent = clk_gpio_mux_get_parent,
 	.set_parent = clk_gpio_mux_set_parent,
 	.determine_rate = __clk_mux_determine_rate,
 };
-EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
 
-static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-		unsigned long flags, const struct clk_ops *clk_gpio_ops)
+static struct clk_hw *clk_register_gpio(struct device *dev, u8 num_parents,
+					struct gpio_desc *gpiod,
+					const struct clk_ops *clk_gpio_ops)
 {
 	struct clk_gpio *clk_gpio;
 	struct clk_hw *hw;
 	struct clk_init_data init = {};
 	int err;
+	const struct clk_parent_data gpio_parent_data[] = {
+		{ .index = 0 },
+		{ .index = 1 },
+	};
 
-	if (dev)
-		clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio),	GFP_KERNEL);
-	else
-		clk_gpio = kzalloc(sizeof(*clk_gpio), GFP_KERNEL);
-
+	clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio),	GFP_KERNEL);
 	if (!clk_gpio)
 		return ERR_PTR(-ENOMEM);
 
-	init.name = name;
+	init.name = dev->of_node->name;
 	init.ops = clk_gpio_ops;
-	init.flags = flags;
-	init.parent_names = parent_names;
+	init.parent_data = gpio_parent_data;
 	init.num_parents = num_parents;
+	init.flags = CLK_SET_RATE_PARENT;
 
 	clk_gpio->gpiod = gpiod;
 	clk_gpio->hw.init = &init;
 
 	hw = &clk_gpio->hw;
-	if (dev)
-		err = devm_clk_hw_register(dev, hw);
-	else
-		err = clk_hw_register(NULL, hw);
+	err = devm_clk_hw_register(dev, hw);
+	if (err)
+		return ERR_PTR(err);
 
-	if (!err)
-		return hw;
-
-	if (!dev) {
-		kfree(clk_gpio);
-	}
-
-	return ERR_PTR(err);
+	return hw;
 }
 
-/**
- * clk_hw_register_gpio_gate - register a gpio clock gate with the clock
- * framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of this clock's parent
- * @gpiod: gpio descriptor to gate this clock
- * @flags: clock flags
- */
-struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
-		const char *parent_name, struct gpio_desc *gpiod,
-		unsigned long flags)
+static struct clk_hw *clk_hw_register_gpio_gate(struct device *dev,
+						int num_parents,
+						struct gpio_desc *gpiod)
 {
 	const struct clk_ops *ops;
 
@@ -180,88 +181,36 @@ struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
 	else
 		ops = &clk_gpio_gate_ops;
 
-	return clk_register_gpio(dev, name,
-			(parent_name ? &parent_name : NULL),
-			(parent_name ? 1 : 0), gpiod, flags, ops);
+	return clk_register_gpio(dev, num_parents, gpiod, ops);
 }
-EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
 
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
-		const char *parent_name, struct gpio_desc *gpiod,
-		unsigned long flags)
+static struct clk_hw *clk_hw_register_gpio_mux(struct device *dev,
+					       struct gpio_desc *gpiod)
 {
-	struct clk_hw *hw;
-
-	hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpiod, flags);
-	if (IS_ERR(hw))
-		return ERR_CAST(hw);
-	return hw->clk;
+	return clk_register_gpio(dev, 2, gpiod, &clk_gpio_mux_ops);
 }
-EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
-
-/**
- * clk_hw_register_gpio_mux - register a gpio clock mux with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_names: names of this clock's parents
- * @num_parents: number of parents listed in @parent_names
- * @gpiod: gpio descriptor to gate this clock
- * @flags: clock flags
- */
-struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-		unsigned long flags)
-{
-	if (num_parents != 2) {
-		pr_err("mux-clock %s must have 2 parents\n", name);
-		return ERR_PTR(-EINVAL);
-	}
-
-	return clk_register_gpio(dev, name, parent_names, num_parents,
-			gpiod, flags, &clk_gpio_mux_ops);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_gpio_mux);
-
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-		unsigned long flags)
-{
-	struct clk_hw *hw;
-
-	hw = clk_hw_register_gpio_mux(dev, name, parent_names, num_parents,
-			gpiod, flags);
-	if (IS_ERR(hw))
-		return ERR_CAST(hw);
-	return hw->clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
 
 static int gpio_clk_driver_probe(struct platform_device *pdev)
 {
-	struct device_node *node = pdev->dev.of_node;
-	const char **parent_names, *gpio_name;
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	const char *gpio_name;
 	unsigned int num_parents;
 	struct gpio_desc *gpiod;
-	struct clk *clk;
+	struct clk_hw *hw;
 	bool is_mux;
 	int ret;
 
-	num_parents = of_clk_get_parent_count(node);
-	if (num_parents) {
-		parent_names = devm_kcalloc(&pdev->dev, num_parents,
-					    sizeof(char *), GFP_KERNEL);
-		if (!parent_names)
-			return -ENOMEM;
-
-		of_clk_parent_fill(node, parent_names, num_parents);
-	} else {
-		parent_names = NULL;
-	}
-
 	is_mux = of_device_is_compatible(node, "gpio-mux-clock");
 
+	num_parents = of_clk_get_parent_count(node);
+	if (is_mux && num_parents != 2) {
+		dev_err(dev, "mux-clock must have 2 parents\n");
+		return -EINVAL;
+	}
+
 	gpio_name = is_mux ? "select" : "enable";
-	gpiod = devm_gpiod_get(&pdev->dev, gpio_name, GPIOD_OUT_LOW);
+	gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW);
 	if (IS_ERR(gpiod)) {
 		ret = PTR_ERR(gpiod);
 		if (ret == -EPROBE_DEFER)
@@ -275,16 +224,13 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
 	}
 
 	if (is_mux)
-		clk = clk_register_gpio_mux(&pdev->dev, node->name,
-				parent_names, num_parents, gpiod, 0);
+		hw = clk_hw_register_gpio_mux(dev, gpiod);
 	else
-		clk = clk_register_gpio_gate(&pdev->dev, node->name,
-				parent_names ?  parent_names[0] : NULL, gpiod,
-				CLK_SET_RATE_PARENT);
-	if (IS_ERR(clk))
-		return PTR_ERR(clk);
+		hw = clk_hw_register_gpio_gate(dev, num_parents, gpiod);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
 
-	return of_clk_add_provider(node, of_clk_src_simple_get, clk);
+	return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
 }
 
 static const struct of_device_id gpio_clk_match_table[] = {
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 570b6e5..e54e797 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -145,17 +145,19 @@ const struct clk_ops clk_mux_ro_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
 
-struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents,
-		unsigned long flags,
-		void __iomem *reg, u8 shift, u32 mask,
+struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
+		const char *name, u8 num_parents,
+		const char * const *parent_names,
+		const struct clk_hw **parent_hws,
+		const struct clk_parent_data *parent_data,
+		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
 		u8 clk_mux_flags, u32 *table, spinlock_t *lock)
 {
 	struct clk_mux *mux;
 	struct clk_hw *hw;
 	struct clk_init_data init = {};
 	u8 width = 0;
-	int ret;
+	int ret = -EINVAL;
 
 	if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
 		width = fls(mask) - ffs(mask) + 1;
@@ -177,6 +179,8 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
 		init.ops = &clk_mux_ops;
 	init.flags = flags;
 	init.parent_names = parent_names;
+	init.parent_data = parent_data;
+	init.parent_hws = parent_hws;
 	init.num_parents = num_parents;
 
 	/* struct clk_mux assignments */
@@ -189,7 +193,10 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
 	mux->hw.init = &init;
 
 	hw = &mux->hw;
-	ret = clk_hw_register(dev, hw);
+	if (dev || !np)
+		ret = clk_hw_register(dev, hw);
+	else if (np)
+		ret = of_clk_hw_register(np, hw);
 	if (ret) {
 		kfree(mux);
 		hw = ERR_PTR(ret);
@@ -197,53 +204,24 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
 
 	return hw;
 }
-EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
+EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
 
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
 		const char * const *parent_names, u8 num_parents,
-		unsigned long flags,
-		void __iomem *reg, u8 shift, u32 mask,
+		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
 		u8 clk_mux_flags, u32 *table, spinlock_t *lock)
 {
 	struct clk_hw *hw;
 
-	hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
-				       flags, reg, shift, mask, clk_mux_flags,
-				       table, lock);
+	hw = clk_hw_register_mux_table(dev, name, parent_names,
+				       num_parents, flags, reg, shift, mask,
+				       clk_mux_flags, table, lock);
 	if (IS_ERR(hw))
 		return ERR_CAST(hw);
 	return hw->clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_mux_table);
 
-struct clk *clk_register_mux(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents,
-		unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_mux_flags, spinlock_t *lock)
-{
-	u32 mask = BIT(width) - 1;
-
-	return clk_register_mux_table(dev, name, parent_names, num_parents,
-				      flags, reg, shift, mask, clk_mux_flags,
-				      NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_register_mux);
-
-struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents,
-		unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_mux_flags, spinlock_t *lock)
-{
-	u32 mask = BIT(width) - 1;
-
-	return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
-				      flags, reg, shift, mask, clk_mux_flags,
-				      NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_mux);
-
 void clk_unregister_mux(struct clk *clk)
 {
 	struct clk_mux *mux;
diff --git a/drivers/clk/clk-plldig.c b/drivers/clk/clk-plldig.c
new file mode 100644
index 0000000..2502016
--- /dev/null
+++ b/drivers/clk/clk-plldig.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ *
+ * Clock driver for LS1028A Display output interfaces(LCD, DPHY).
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+
+/* PLLDIG register offsets and bit masks */
+#define PLLDIG_REG_PLLSR            0x24
+#define PLLDIG_LOCK_MASK            BIT(2)
+#define PLLDIG_REG_PLLDV            0x28
+#define PLLDIG_MFD_MASK             GENMASK(7, 0)
+#define PLLDIG_RFDPHI1_MASK         GENMASK(30, 25)
+#define PLLDIG_REG_PLLFM            0x2c
+#define PLLDIG_SSCGBYP_ENABLE       BIT(30)
+#define PLLDIG_REG_PLLFD            0x30
+#define PLLDIG_FDEN                 BIT(30)
+#define PLLDIG_FRAC_MASK            GENMASK(15, 0)
+#define PLLDIG_REG_PLLCAL1          0x38
+#define PLLDIG_REG_PLLCAL2          0x3c
+
+/* Range of the VCO frequencies, in Hz */
+#define PLLDIG_MIN_VCO_FREQ         650000000
+#define PLLDIG_MAX_VCO_FREQ         1300000000
+
+/* Range of the output frequencies, in Hz */
+#define PHI1_MIN_FREQ               27000000UL
+#define PHI1_MAX_FREQ               600000000UL
+
+/* Maximum value of the reduced frequency divider */
+#define MAX_RFDPHI1          63UL
+
+/* Best value of multiplication factor divider */
+#define PLLDIG_DEFAULT_MFD   44
+
+/*
+ * Denominator part of the fractional part of the
+ * loop multiplication factor.
+ */
+#define MFDEN          20480
+
+static const struct clk_parent_data parent_data[] = {
+	{ .index = 0 },
+};
+
+struct clk_plldig {
+	struct clk_hw hw;
+	void __iomem *regs;
+	unsigned int vco_freq;
+};
+
+#define to_clk_plldig(_hw)	container_of(_hw, struct clk_plldig, hw)
+
+static int plldig_enable(struct clk_hw *hw)
+{
+	struct clk_plldig *data = to_clk_plldig(hw);
+	u32 val;
+
+	val = readl(data->regs + PLLDIG_REG_PLLFM);
+	/*
+	 * Use Bypass mode with PLL off by default, the frequency overshoot
+	 * detector output was disable. SSCG Bypass mode should be enable.
+	 */
+	val |= PLLDIG_SSCGBYP_ENABLE;
+	writel(val, data->regs + PLLDIG_REG_PLLFM);
+
+	return 0;
+}
+
+static void plldig_disable(struct clk_hw *hw)
+{
+	struct clk_plldig *data = to_clk_plldig(hw);
+	u32 val;
+
+	val = readl(data->regs + PLLDIG_REG_PLLFM);
+
+	val &= ~PLLDIG_SSCGBYP_ENABLE;
+	val |= FIELD_PREP(PLLDIG_SSCGBYP_ENABLE, 0x0);
+
+	writel(val, data->regs + PLLDIG_REG_PLLFM);
+}
+
+static int plldig_is_enabled(struct clk_hw *hw)
+{
+	struct clk_plldig *data = to_clk_plldig(hw);
+
+	return readl(data->regs + PLLDIG_REG_PLLFM) &
+			      PLLDIG_SSCGBYP_ENABLE;
+}
+
+static unsigned long plldig_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_plldig *data = to_clk_plldig(hw);
+	u32 val, rfdphi1;
+
+	val = readl(data->regs + PLLDIG_REG_PLLDV);
+
+	/* Check if PLL is bypassed */
+	if (val & PLLDIG_SSCGBYP_ENABLE)
+		return parent_rate;
+
+	rfdphi1 = FIELD_GET(PLLDIG_RFDPHI1_MASK, val);
+
+	/*
+	 * If RFDPHI1 has a value of 1 the VCO frequency is also divided by
+	 * one.
+	 */
+	if (!rfdphi1)
+		rfdphi1 = 1;
+
+	return DIV_ROUND_UP(data->vco_freq, rfdphi1);
+}
+
+static unsigned long plldig_calc_target_div(unsigned long vco_freq,
+					    unsigned long target_rate)
+{
+	unsigned long div;
+
+	div = DIV_ROUND_CLOSEST(vco_freq, target_rate);
+	div = clamp(div, 1UL, MAX_RFDPHI1);
+
+	return div;
+}
+
+static int plldig_determine_rate(struct clk_hw *hw,
+				 struct clk_rate_request *req)
+{
+	struct clk_plldig *data = to_clk_plldig(hw);
+	unsigned int div;
+
+	req->rate = clamp(req->rate, PHI1_MIN_FREQ, PHI1_MAX_FREQ);
+	div = plldig_calc_target_div(data->vco_freq, req->rate);
+	req->rate = DIV_ROUND_UP(data->vco_freq, div);
+
+	return 0;
+}
+
+static int plldig_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct clk_plldig *data = to_clk_plldig(hw);
+	unsigned int val, cond;
+	unsigned int rfdphi1;
+
+	rate = clamp(rate, PHI1_MIN_FREQ, PHI1_MAX_FREQ);
+	rfdphi1 = plldig_calc_target_div(data->vco_freq, rate);
+
+	/* update the divider value */
+	val = readl(data->regs + PLLDIG_REG_PLLDV);
+	val &= ~PLLDIG_RFDPHI1_MASK;
+	val |= FIELD_PREP(PLLDIG_RFDPHI1_MASK, rfdphi1);
+	writel(val, data->regs + PLLDIG_REG_PLLDV);
+
+	/* waiting for old lock state to clear */
+	udelay(200);
+
+	/* Wait until PLL is locked or timeout */
+	return readl_poll_timeout_atomic(data->regs + PLLDIG_REG_PLLSR, cond,
+					 cond & PLLDIG_LOCK_MASK, 0,
+					 USEC_PER_MSEC);
+}
+
+static const struct clk_ops plldig_clk_ops = {
+	.enable = plldig_enable,
+	.disable = plldig_disable,
+	.is_enabled = plldig_is_enabled,
+	.recalc_rate = plldig_recalc_rate,
+	.determine_rate = plldig_determine_rate,
+	.set_rate = plldig_set_rate,
+};
+
+static int plldig_init(struct clk_hw *hw)
+{
+	struct clk_plldig *data = to_clk_plldig(hw);
+	struct clk_hw *parent = clk_hw_get_parent(hw);
+	unsigned long parent_rate;
+	unsigned long val;
+	unsigned long long lltmp;
+	unsigned int mfd, fracdiv = 0;
+
+	if (!parent)
+		return -EINVAL;
+
+	parent_rate = clk_hw_get_rate(parent);
+
+	if (data->vco_freq) {
+		mfd = data->vco_freq / parent_rate;
+		lltmp = data->vco_freq % parent_rate;
+		lltmp *= MFDEN;
+		do_div(lltmp, parent_rate);
+		fracdiv = lltmp;
+	} else {
+		mfd = PLLDIG_DEFAULT_MFD;
+		data->vco_freq = parent_rate * mfd;
+	}
+
+	val = FIELD_PREP(PLLDIG_MFD_MASK, mfd);
+	writel(val, data->regs + PLLDIG_REG_PLLDV);
+
+	/* Enable fractional divider */
+	if (fracdiv) {
+		val = FIELD_PREP(PLLDIG_FRAC_MASK, fracdiv);
+		val |= PLLDIG_FDEN;
+		writel(val, data->regs + PLLDIG_REG_PLLFD);
+	}
+
+	return 0;
+}
+
+static int plldig_clk_probe(struct platform_device *pdev)
+{
+	struct clk_plldig *data;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->regs = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(data->regs))
+		return PTR_ERR(data->regs);
+
+	data->hw.init = CLK_HW_INIT_PARENTS_DATA("dpclk",
+						 parent_data,
+						 &plldig_clk_ops,
+						 0);
+
+	ret = devm_clk_hw_register(dev, &data->hw);
+	if (ret) {
+		dev_err(dev, "failed to register %s clock\n",
+						dev->of_node->name);
+		return ret;
+	}
+
+	ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+					  &data->hw);
+	if (ret) {
+		dev_err(dev, "unable to add clk provider\n");
+		return ret;
+	}
+
+	/*
+	 * The frequency of the VCO cannot be changed during runtime.
+	 * Therefore, let the user specify a desired frequency.
+	 */
+	if (!of_property_read_u32(dev->of_node, "fsl,vco-hz",
+				  &data->vco_freq)) {
+		if (data->vco_freq < PLLDIG_MIN_VCO_FREQ ||
+		    data->vco_freq > PLLDIG_MAX_VCO_FREQ)
+			return -EINVAL;
+	}
+
+	return plldig_init(&data->hw);
+}
+
+static const struct of_device_id plldig_clk_id[] = {
+	{ .compatible = "fsl,ls1028a-plldig" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, plldig_clk_id);
+
+static struct platform_driver plldig_clk_driver = {
+	.driver = {
+		.name = "plldig-clock",
+		.of_match_table = plldig_clk_id,
+	},
+	.probe = plldig_clk_probe,
+};
+module_platform_driver(plldig_clk_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wen He <wen.he_1@nxp.com>");
+MODULE_DESCRIPTION("LS1028A Display output interface pixel clock driver");
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index bed140f..d5946f7 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -342,6 +342,32 @@ static const struct clockgen_muxinfo ls1046a_hwa2 = {
 	},
 };
 
+static const struct clockgen_muxinfo ls1088a_hwa1 = {
+	{
+		{},
+		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+		{},
+		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+	},
+};
+
+static const struct clockgen_muxinfo ls1088a_hwa2 = {
+	{
+		{},
+		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+		{},
+		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+	},
+};
+
 static const struct clockgen_muxinfo ls1012a_cmux = {
 	{
 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
@@ -607,6 +633,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
 		.cmux_groups = {
 			&clockgen2_cmux_cga12
 		},
+		.hwaccel = {
+			&ls1088a_hwa1, &ls1088a_hwa2
+		},
 		.cmux_to_group = {
 			0, 0, -1
 		},
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 886f7c5..c491f5d 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -176,7 +176,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
 }
 
 static const struct scmi_device_id scmi_id_table[] = {
-	{ SCMI_PROTOCOL_CLOCK },
+	{ SCMI_PROTOCOL_CLOCK, "clocks" },
 	{ },
 };
 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 772258d..f0f2b59 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -429,7 +429,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
 			parent = ERR_PTR(-EPROBE_DEFER);
 	} else {
 		parent = clk_core_get(core, index);
-		if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
+		if (PTR_ERR(parent) == -ENOENT && entry->name)
 			parent = clk_core_lookup(entry->name);
 	}
 
@@ -2996,6 +2996,41 @@ static int clk_dump_show(struct seq_file *s, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(clk_dump);
 
+#undef CLOCK_ALLOW_WRITE_DEBUGFS
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+/*
+ * This can be dangerous, therefore don't provide any real compile time
+ * configuration option for this feature.
+ * People who want to use this will need to modify the source code directly.
+ */
+static int clk_rate_set(void *data, u64 val)
+{
+	struct clk_core *core = data;
+	int ret;
+
+	clk_prepare_lock();
+	ret = clk_core_set_rate_nolock(core, val);
+	clk_prepare_unlock();
+
+	return ret;
+}
+
+#define clk_rate_mode	0644
+#else
+#define clk_rate_set	NULL
+#define clk_rate_mode	0444
+#endif
+
+static int clk_rate_get(void *data, u64 *val)
+{
+	struct clk_core *core = data;
+
+	*val = core->rate;
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
+
 static const struct {
 	unsigned long flag;
 	const char *name;
@@ -3145,7 +3180,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
 	root = debugfs_create_dir(core->name, pdentry);
 	core->dentry = root;
 
-	debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
+	debugfs_create_file("clk_rate", clk_rate_mode, root, core,
+			    &clk_rate_fops);
 	debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
 	debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
 	debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
@@ -3338,6 +3374,26 @@ static int __clk_core_init(struct clk_core *core)
 		goto out;
 	}
 
+	/*
+	 * optional platform-specific magic
+	 *
+	 * The .init callback is not used by any of the basic clock types, but
+	 * exists for weird hardware that must perform initialization magic for
+	 * CCF to get an accurate view of clock for any other callbacks. It may
+	 * also be used needs to perform dynamic allocations. Such allocation
+	 * must be freed in the terminate() callback.
+	 * This callback shall not be used to initialize the parameters state,
+	 * such as rate, parent, etc ...
+	 *
+	 * If it exist, this callback should called before any other callback of
+	 * the clock
+	 */
+	if (core->ops->init) {
+		ret = core->ops->init(core->hw);
+		if (ret)
+			goto out;
+	}
+
 	core->parent = __clk_init_parent(core);
 
 	/*
@@ -3363,17 +3419,6 @@ static int __clk_core_init(struct clk_core *core)
 	}
 
 	/*
-	 * optional platform-specific magic
-	 *
-	 * The .init callback is not used by any of the basic clock types, but
-	 * exists for weird hardware that must perform initialization magic.
-	 * Please consider other ways of solving initialization problems before
-	 * using this callback, as its use is discouraged.
-	 */
-	if (core->ops->init)
-		core->ops->init(core->hw);
-
-	/*
 	 * Set clk's accuracy.  The preferred method is to use
 	 * .recalc_accuracy. For simple clocks and lazy developers the default
 	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
@@ -3427,13 +3472,18 @@ static int __clk_core_init(struct clk_core *core)
 		unsigned long flags;
 
 		ret = clk_core_prepare(core);
-		if (ret)
+		if (ret) {
+			pr_warn("%s: critical clk '%s' failed to prepare\n",
+			       __func__, core->name);
 			goto out;
+		}
 
 		flags = clk_enable_lock();
 		ret = clk_core_enable(core);
 		clk_enable_unlock(flags);
 		if (ret) {
+			pr_warn("%s: critical clk '%s' failed to enable\n",
+			       __func__, core->name);
 			clk_core_unprepare(core);
 			goto out;
 		}
@@ -3733,6 +3783,28 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
 }
 
 /**
+ * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
+ * @dev: Device to get device node of
+ *
+ * Return: device node pointer of @dev, or the device node pointer of
+ * @dev->parent if dev doesn't have a device node, or NULL if neither
+ * @dev or @dev->parent have a device node.
+ */
+static struct device_node *dev_or_parent_of_node(struct device *dev)
+{
+	struct device_node *np;
+
+	if (!dev)
+		return NULL;
+
+	np = dev_of_node(dev);
+	if (!np)
+		np = dev_of_node(dev->parent);
+
+	return np;
+}
+
+/**
  * clk_register - allocate a new clock, register it and return an opaque cookie
  * @dev: device that is registering this clock
  * @hw: link to hardware-specific clock data
@@ -3747,7 +3819,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
  */
 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
-	return __clk_register(dev, dev_of_node(dev), hw);
+	return __clk_register(dev, dev_or_parent_of_node(dev), hw);
 }
 EXPORT_SYMBOL_GPL(clk_register);
 
@@ -3763,7 +3835,8 @@ EXPORT_SYMBOL_GPL(clk_register);
  */
 int clk_hw_register(struct device *dev, struct clk_hw *hw)
 {
-	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
+	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
+			       hw));
 }
 EXPORT_SYMBOL_GPL(clk_hw_register);
 
@@ -3866,6 +3939,7 @@ static void clk_core_evict_parent_cache(struct clk_core *core)
 void clk_unregister(struct clk *clk)
 {
 	unsigned long flags;
+	const struct clk_ops *ops;
 
 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
 		return;
@@ -3874,7 +3948,8 @@ void clk_unregister(struct clk *clk)
 
 	clk_prepare_lock();
 
-	if (clk->core->ops == &clk_nodrv_ops) {
+	ops = clk->core->ops;
+	if (ops == &clk_nodrv_ops) {
 		pr_err("%s: unregistered clock: %s\n", __func__,
 		       clk->core->name);
 		goto unlock;
@@ -3887,6 +3962,9 @@ void clk_unregister(struct clk *clk)
 	clk->core->ops = &clk_nodrv_ops;
 	clk_enable_unlock(flags);
 
+	if (ops->terminate)
+		ops->terminate(clk->core->hw);
+
 	if (!hlist_empty(&clk->core->children)) {
 		struct clk_core *child;
 		struct hlist_node *t;
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 1ac0c79..01eadee 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -20,6 +20,12 @@
 	help
 	    Build the driver for i.MX8MN CCM Clock Driver
 
+config CLK_IMX8MP
+	bool "IMX8MP CCM Clock Driver"
+	depends on ARCH_MXC && ARM64
+	help
+	    Build the driver for i.MX8MP CCM Clock Driver
+
 config CLK_IMX8MQ
 	bool "IMX8MQ CCM Clock Driver"
 	depends on ARCH_MXC && ARM64
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 77a3d71..928f874 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -18,7 +18,7 @@
 	clk-pllv2.o \
 	clk-pllv3.o \
 	clk-pllv4.o \
-	clk-sccg-pll.o \
+	clk-sscg-pll.o \
 	clk-pll14xx.o
 
 obj-$(CONFIG_MXC_CLK_SCU) += \
@@ -27,6 +27,7 @@
 
 obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
 obj-$(CONFIG_CLK_IMX8MN) += clk-imx8mn.o
+obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o
 obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
 obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
 
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index 060f860..b9efcc8 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -21,7 +21,7 @@
 #define PCG_PCD_WIDTH	3
 #define PCG_PCD_MASK	0x7
 
-struct clk_hw *imx7ulp_clk_composite(const char *name,
+struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
 				     const char * const *parent_names,
 				     int num_parents, bool mux_present,
 				     bool rate_present, bool gate_present,
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index d3486ee..20f7c91 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -123,7 +123,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
 	.set_rate = imx8m_clk_composite_divider_set_rate,
 };
 
-struct clk *imx8m_clk_composite_flags(const char *name,
+struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
 					const char * const *parent_names,
 					int num_parents, void __iomem *reg,
 					unsigned long flags)
@@ -171,7 +171,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
 	if (IS_ERR(hw))
 		goto fail;
 
-	return hw->clk;
+	return hw;
 
 fail:
 	kfree(gate);
diff --git a/drivers/clk/imx/clk-divider-gate.c b/drivers/clk/imx/clk-divider-gate.c
index 2a8352a..0322a84 100644
--- a/drivers/clk/imx/clk-divider-gate.c
+++ b/drivers/clk/imx/clk-divider-gate.c
@@ -43,7 +43,7 @@ static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
 {
 	struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
 	struct clk_divider *div = to_clk_divider(hw);
-	unsigned long flags = 0;
+	unsigned long flags;
 	unsigned int val;
 
 	spin_lock_irqsave(div->lock, flags);
@@ -75,7 +75,7 @@ static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
 {
 	struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
 	struct clk_divider *div = to_clk_divider(hw);
-	unsigned long flags = 0;
+	unsigned long flags;
 	int value;
 	u32 val;
 
@@ -104,7 +104,7 @@ static int clk_divider_enable(struct clk_hw *hw)
 {
 	struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
 	struct clk_divider *div = to_clk_divider(hw);
-	unsigned long flags = 0;
+	unsigned long flags;
 	u32 val;
 
 	if (!div_gate->cached_val) {
@@ -127,7 +127,7 @@ static void clk_divider_disable(struct clk_hw *hw)
 {
 	struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
 	struct clk_divider *div = to_clk_divider(hw);
-	unsigned long flags = 0;
+	unsigned long flags;
 	u32 val;
 
 	spin_lock_irqsave(div->lock, flags);
@@ -167,13 +167,13 @@ static const struct clk_ops clk_divider_gate_ops = {
 };
 
 /*
- * NOTE: In order to resue the most code from the common divider,
+ * NOTE: In order to reuse the most code from the common divider,
  * we also design our divider following the way that provids an extra
  * clk_divider_flags, however it's fixed to CLK_DIVIDER_ONE_BASED by
  * default as our HW is. Besides that it supports only CLK_DIVIDER_READ_ONLY
  * flag which can be specified by user flexibly.
  */
-struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
 				    unsigned long flags, void __iomem *reg,
 				    u8 shift, u8 width, u8 clk_divider_flags,
 				    const struct clk_div_table *table,
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index fece503..101e0a3 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -201,8 +201,9 @@ static const struct clk_ops clk_frac_pll_ops = {
 	.set_rate	= clk_pll_set_rate,
 };
 
-struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
-			     void __iomem *base)
+struct clk_hw *imx_clk_hw_frac_pll(const char *name,
+				   const char *parent_name,
+				   void __iomem *base)
 {
 	struct clk_init_data init;
 	struct clk_frac_pll *pll;
@@ -230,5 +231,5 @@ struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
 		return ERR_PTR(ret);
 	}
 
-	return hw->clk;
+	return hw;
 }
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 60f2de8..ba33c79 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -598,7 +598,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
 	}
 
 	hws[IMX6QDL_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
-	hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+	if (clk_on_imx6q() || clk_on_imx6qp())
+		hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = imx_clk_hw_fixed_factor("pll4_audio_div", "pll4_post_div", 1, 1);
+	else
+		hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
 	hws[IMX6QDL_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
 	hws[IMX6QDL_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
 
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 281191b..0620d6c 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -59,7 +59,7 @@ static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata
 static void __init imx7ulp_clk_scg1_init(struct device_node *np)
 {
 	struct clk_hw_onecell_data *clk_data;
-	struct clk_hw **clks;
+	struct clk_hw **hws;
 	void __iomem *base;
 
 	clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SCG1_END),
@@ -68,76 +68,76 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
 		return;
 
 	clk_data->num = IMX7ULP_CLK_SCG1_END;
-	clks = clk_data->hws;
+	hws = clk_data->hws;
 
-	clks[IMX7ULP_CLK_DUMMY]		= imx_clk_hw_fixed("dummy", 0);
+	hws[IMX7ULP_CLK_DUMMY]		= imx_clk_hw_fixed("dummy", 0);
 
-	clks[IMX7ULP_CLK_ROSC]		= imx_obtain_fixed_clk_hw(np, "rosc");
-	clks[IMX7ULP_CLK_SOSC]		= imx_obtain_fixed_clk_hw(np, "sosc");
-	clks[IMX7ULP_CLK_SIRC]		= imx_obtain_fixed_clk_hw(np, "sirc");
-	clks[IMX7ULP_CLK_FIRC]		= imx_obtain_fixed_clk_hw(np, "firc");
-	clks[IMX7ULP_CLK_UPLL]		= imx_obtain_fixed_clk_hw(np, "upll");
+	hws[IMX7ULP_CLK_ROSC]		= imx_obtain_fixed_clk_hw(np, "rosc");
+	hws[IMX7ULP_CLK_SOSC]		= imx_obtain_fixed_clk_hw(np, "sosc");
+	hws[IMX7ULP_CLK_SIRC]		= imx_obtain_fixed_clk_hw(np, "sirc");
+	hws[IMX7ULP_CLK_FIRC]		= imx_obtain_fixed_clk_hw(np, "firc");
+	hws[IMX7ULP_CLK_UPLL]		= imx_obtain_fixed_clk_hw(np, "upll");
 
 	/* SCG1 */
 	base = of_iomap(np, 0);
 	WARN_ON(!base);
 
 	/* NOTE: xPLL config can't be changed when xPLL is enabled */
-	clks[IMX7ULP_CLK_APLL_PRE_SEL]	= imx_clk_hw_mux_flags("apll_pre_sel", base + 0x508, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
-	clks[IMX7ULP_CLK_SPLL_PRE_SEL]	= imx_clk_hw_mux_flags("spll_pre_sel", base + 0x608, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+	hws[IMX7ULP_CLK_APLL_PRE_SEL]	= imx_clk_hw_mux_flags("apll_pre_sel", base + 0x508, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+	hws[IMX7ULP_CLK_SPLL_PRE_SEL]	= imx_clk_hw_mux_flags("spll_pre_sel", base + 0x608, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
 
 	/*							   name		    parent_name	   reg			shift	width	flags */
-	clks[IMX7ULP_CLK_APLL_PRE_DIV]	= imx_clk_hw_divider_flags("apll_pre_div", "apll_pre_sel", base + 0x508,	8,	3,	CLK_SET_RATE_GATE);
-	clks[IMX7ULP_CLK_SPLL_PRE_DIV]	= imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608,	8,	3,	CLK_SET_RATE_GATE);
+	hws[IMX7ULP_CLK_APLL_PRE_DIV]	= imx_clk_hw_divider_flags("apll_pre_div", "apll_pre_sel", base + 0x508,	8,	3,	CLK_SET_RATE_GATE);
+	hws[IMX7ULP_CLK_SPLL_PRE_DIV]	= imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608,	8,	3,	CLK_SET_RATE_GATE);
 
 	/*						name	 parent_name	 base */
-	clks[IMX7ULP_CLK_APLL]		= imx_clk_pllv4("apll",  "apll_pre_div", base + 0x500);
-	clks[IMX7ULP_CLK_SPLL]		= imx_clk_pllv4("spll",  "spll_pre_div", base + 0x600);
+	hws[IMX7ULP_CLK_APLL]		= imx_clk_hw_pllv4("apll",  "apll_pre_div", base + 0x500);
+	hws[IMX7ULP_CLK_SPLL]		= imx_clk_hw_pllv4("spll",  "spll_pre_div", base + 0x600);
 
 	/* APLL PFDs */
-	clks[IMX7ULP_CLK_APLL_PFD0]	= imx_clk_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
-	clks[IMX7ULP_CLK_APLL_PFD1]	= imx_clk_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
-	clks[IMX7ULP_CLK_APLL_PFD2]	= imx_clk_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
-	clks[IMX7ULP_CLK_APLL_PFD3]	= imx_clk_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
+	hws[IMX7ULP_CLK_APLL_PFD0]	= imx_clk_hw_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
+	hws[IMX7ULP_CLK_APLL_PFD1]	= imx_clk_hw_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
+	hws[IMX7ULP_CLK_APLL_PFD2]	= imx_clk_hw_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
+	hws[IMX7ULP_CLK_APLL_PFD3]	= imx_clk_hw_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
 
 	/* SPLL PFDs */
-	clks[IMX7ULP_CLK_SPLL_PFD0]	= imx_clk_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
-	clks[IMX7ULP_CLK_SPLL_PFD1]	= imx_clk_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
-	clks[IMX7ULP_CLK_SPLL_PFD2]	= imx_clk_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
-	clks[IMX7ULP_CLK_SPLL_PFD3]	= imx_clk_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
+	hws[IMX7ULP_CLK_SPLL_PFD0]	= imx_clk_hw_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
+	hws[IMX7ULP_CLK_SPLL_PFD1]	= imx_clk_hw_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
+	hws[IMX7ULP_CLK_SPLL_PFD2]	= imx_clk_hw_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
+	hws[IMX7ULP_CLK_SPLL_PFD3]	= imx_clk_hw_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
 
 	/* PLL Mux */
-	clks[IMX7ULP_CLK_APLL_PFD_SEL]	= imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
-	clks[IMX7ULP_CLK_SPLL_PFD_SEL]	= imx_clk_hw_mux_flags("spll_pfd_sel", base + 0x608, 14, 2, spll_pfd_sels, ARRAY_SIZE(spll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
-	clks[IMX7ULP_CLK_APLL_SEL]	= imx_clk_hw_mux_flags("apll_sel", base + 0x508, 1, 1, apll_sels, ARRAY_SIZE(apll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
-	clks[IMX7ULP_CLK_SPLL_SEL]	= imx_clk_hw_mux_flags("spll_sel", base + 0x608, 1, 1, spll_sels, ARRAY_SIZE(spll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+	hws[IMX7ULP_CLK_APLL_PFD_SEL]	= imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+	hws[IMX7ULP_CLK_SPLL_PFD_SEL]	= imx_clk_hw_mux_flags("spll_pfd_sel", base + 0x608, 14, 2, spll_pfd_sels, ARRAY_SIZE(spll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+	hws[IMX7ULP_CLK_APLL_SEL]	= imx_clk_hw_mux_flags("apll_sel", base + 0x508, 1, 1, apll_sels, ARRAY_SIZE(apll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+	hws[IMX7ULP_CLK_SPLL_SEL]	= imx_clk_hw_mux_flags("spll_sel", base + 0x608, 1, 1, spll_sels, ARRAY_SIZE(spll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
 
-	clks[IMX7ULP_CLK_SPLL_BUS_CLK]	= imx_clk_divider_gate("spll_bus_clk", "spll_sel", CLK_SET_RATE_GATE, base + 0x604, 8, 3, 0, ulp_div_table, &imx_ccm_lock);
+	hws[IMX7ULP_CLK_SPLL_BUS_CLK]	= imx_clk_hw_divider_gate("spll_bus_clk", "spll_sel", CLK_SET_RATE_GATE, base + 0x604, 8, 3, 0, ulp_div_table, &imx_ccm_lock);
 
 	/* scs/ddr/nic select different clock source requires that clock to be enabled first */
-	clks[IMX7ULP_CLK_SYS_SEL]	= imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
-	clks[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
-	clks[IMX7ULP_CLK_NIC_SEL]	= imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
-	clks[IMX7ULP_CLK_DDR_SEL]	= imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+	hws[IMX7ULP_CLK_SYS_SEL]	= imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
+	hws[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
+	hws[IMX7ULP_CLK_NIC_SEL]	= imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
+	hws[IMX7ULP_CLK_DDR_SEL]	= imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
 
-	clks[IMX7ULP_CLK_CORE_DIV]	= imx_clk_hw_divider_flags("divcore",	"scs_sel",  base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
-	clks[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
+	hws[IMX7ULP_CLK_CORE_DIV]	= imx_clk_hw_divider_flags("divcore",	"scs_sel",  base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
+	hws[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
 
-	clks[IMX7ULP_CLK_DDR_DIV]	= imx_clk_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
+	hws[IMX7ULP_CLK_DDR_DIV]	= imx_clk_hw_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
 							       0, ulp_div_table, &imx_ccm_lock);
 
-	clks[IMX7ULP_CLK_NIC0_DIV]	= imx_clk_hw_divider_flags("nic0_clk",		"nic_sel",  base + 0x40, 24, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
-	clks[IMX7ULP_CLK_NIC1_DIV]	= imx_clk_hw_divider_flags("nic1_clk",		"nic0_clk", base + 0x40, 16, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
-	clks[IMX7ULP_CLK_NIC1_BUS_DIV]	= imx_clk_hw_divider_flags("nic1_bus_clk",	"nic0_clk", base + 0x40, 4,  4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+	hws[IMX7ULP_CLK_NIC0_DIV]	= imx_clk_hw_divider_flags("nic0_clk",		"nic_sel",  base + 0x40, 24, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+	hws[IMX7ULP_CLK_NIC1_DIV]	= imx_clk_hw_divider_flags("nic1_clk",		"nic0_clk", base + 0x40, 16, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+	hws[IMX7ULP_CLK_NIC1_BUS_DIV]	= imx_clk_hw_divider_flags("nic1_bus_clk",	"nic0_clk", base + 0x40, 4,  4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
 
-	clks[IMX7ULP_CLK_GPU_DIV]	= imx_clk_hw_divider("gpu_clk", "nic0_clk", base + 0x40, 20, 4);
+	hws[IMX7ULP_CLK_GPU_DIV]	= imx_clk_hw_divider("gpu_clk", "nic0_clk", base + 0x40, 20, 4);
 
-	clks[IMX7ULP_CLK_SOSC_BUS_CLK]	= imx_clk_divider_gate("sosc_bus_clk", "sosc", 0, base + 0x104, 8, 3,
+	hws[IMX7ULP_CLK_SOSC_BUS_CLK]	= imx_clk_hw_divider_gate("sosc_bus_clk", "sosc", 0, base + 0x104, 8, 3,
 							       CLK_DIVIDER_READ_ONLY, ulp_div_table, &imx_ccm_lock);
-	clks[IMX7ULP_CLK_FIRC_BUS_CLK]	= imx_clk_divider_gate("firc_bus_clk", "firc", 0, base + 0x304, 8, 3,
+	hws[IMX7ULP_CLK_FIRC_BUS_CLK]	= imx_clk_hw_divider_gate("firc_bus_clk", "firc", 0, base + 0x304, 8, 3,
 							       CLK_DIVIDER_READ_ONLY, ulp_div_table, &imx_ccm_lock);
 
-	imx_check_clk_hws(clks, clk_data->num);
+	imx_check_clk_hws(hws, clk_data->num);
 
 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 }
@@ -146,7 +146,7 @@ CLK_OF_DECLARE(imx7ulp_clk_scg1, "fsl,imx7ulp-scg1", imx7ulp_clk_scg1_init);
 static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
 {
 	struct clk_hw_onecell_data *clk_data;
-	struct clk_hw **clks;
+	struct clk_hw **hws;
 	void __iomem *base;
 	int i;
 
@@ -156,42 +156,42 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
 		return;
 
 	clk_data->num = IMX7ULP_CLK_PCC2_END;
-	clks = clk_data->hws;
+	hws = clk_data->hws;
 
 	/* PCC2 */
 	base = of_iomap(np, 0);
 	WARN_ON(!base);
 
-	clks[IMX7ULP_CLK_DMA1]		= imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
-	clks[IMX7ULP_CLK_RGPIO2P1]	= imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
-	clks[IMX7ULP_CLK_DMA_MUX1]	= imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
-	clks[IMX7ULP_CLK_CAAM]		= imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
-	clks[IMX7ULP_CLK_LPTPM4]	= imx7ulp_clk_composite("lptpm4",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
-	clks[IMX7ULP_CLK_LPTPM5]	= imx7ulp_clk_composite("lptpm5",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
-	clks[IMX7ULP_CLK_LPIT1]		= imx7ulp_clk_composite("lpit1",   periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
-	clks[IMX7ULP_CLK_LPSPI2]	= imx7ulp_clk_composite("lpspi2",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa4);
-	clks[IMX7ULP_CLK_LPSPI3]	= imx7ulp_clk_composite("lpspi3",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa8);
-	clks[IMX7ULP_CLK_LPI2C4]	= imx7ulp_clk_composite("lpi2c4",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xac);
-	clks[IMX7ULP_CLK_LPI2C5]	= imx7ulp_clk_composite("lpi2c5",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb0);
-	clks[IMX7ULP_CLK_LPUART4]	= imx7ulp_clk_composite("lpuart4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb4);
-	clks[IMX7ULP_CLK_LPUART5]	= imx7ulp_clk_composite("lpuart5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb8);
-	clks[IMX7ULP_CLK_FLEXIO1]	= imx7ulp_clk_composite("flexio1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xc4);
-	clks[IMX7ULP_CLK_USB0]		= imx7ulp_clk_composite("usb0",    periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xcc);
-	clks[IMX7ULP_CLK_USB1]		= imx7ulp_clk_composite("usb1",    periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xd0);
-	clks[IMX7ULP_CLK_USB_PHY]	= imx_clk_hw_gate("usb_phy", "nic1_bus_clk", base + 0xd4, 30);
-	clks[IMX7ULP_CLK_USDHC0]	= imx7ulp_clk_composite("usdhc0",  periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xdc);
-	clks[IMX7ULP_CLK_USDHC1]	= imx7ulp_clk_composite("usdhc1",  periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xe0);
-	clks[IMX7ULP_CLK_WDG1]		= imx7ulp_clk_composite("wdg1",    periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0xf4);
-	clks[IMX7ULP_CLK_WDG2]		= imx7ulp_clk_composite("sdg2",    periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0x10c);
+	hws[IMX7ULP_CLK_DMA1]		= imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
+	hws[IMX7ULP_CLK_RGPIO2P1]	= imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
+	hws[IMX7ULP_CLK_DMA_MUX1]	= imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
+	hws[IMX7ULP_CLK_CAAM]		= imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
+	hws[IMX7ULP_CLK_LPTPM4]		= imx7ulp_clk_hw_composite("lptpm4",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
+	hws[IMX7ULP_CLK_LPTPM5]		= imx7ulp_clk_hw_composite("lptpm5",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
+	hws[IMX7ULP_CLK_LPIT1]		= imx7ulp_clk_hw_composite("lpit1",   periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
+	hws[IMX7ULP_CLK_LPSPI2]		= imx7ulp_clk_hw_composite("lpspi2",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa4);
+	hws[IMX7ULP_CLK_LPSPI3]		= imx7ulp_clk_hw_composite("lpspi3",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa8);
+	hws[IMX7ULP_CLK_LPI2C4]		= imx7ulp_clk_hw_composite("lpi2c4",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xac);
+	hws[IMX7ULP_CLK_LPI2C5]		= imx7ulp_clk_hw_composite("lpi2c5",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb0);
+	hws[IMX7ULP_CLK_LPUART4]	= imx7ulp_clk_hw_composite("lpuart4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb4);
+	hws[IMX7ULP_CLK_LPUART5]	= imx7ulp_clk_hw_composite("lpuart5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb8);
+	hws[IMX7ULP_CLK_FLEXIO1]	= imx7ulp_clk_hw_composite("flexio1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xc4);
+	hws[IMX7ULP_CLK_USB0]		= imx7ulp_clk_hw_composite("usb0",    periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xcc);
+	hws[IMX7ULP_CLK_USB1]		= imx7ulp_clk_hw_composite("usb1",    periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xd0);
+	hws[IMX7ULP_CLK_USB_PHY]	= imx_clk_hw_gate("usb_phy", "nic1_bus_clk", base + 0xd4, 30);
+	hws[IMX7ULP_CLK_USDHC0]		= imx7ulp_clk_hw_composite("usdhc0",  periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xdc);
+	hws[IMX7ULP_CLK_USDHC1]		= imx7ulp_clk_hw_composite("usdhc1",  periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xe0);
+	hws[IMX7ULP_CLK_WDG1]		= imx7ulp_clk_hw_composite("wdg1",    periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0xf4);
+	hws[IMX7ULP_CLK_WDG2]		= imx7ulp_clk_hw_composite("wdg2",    periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0x10c);
 
-	imx_check_clk_hws(clks, clk_data->num);
+	imx_check_clk_hws(hws, clk_data->num);
 
 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 
 	for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
 		int index = pcc2_uart_clk_ids[i];
 
-		pcc2_uart_clks[i] = &clks[index]->clk;
+		pcc2_uart_clks[i] = &hws[index]->clk;
 	}
 
 	imx_register_uart_clocks(pcc2_uart_clks);
@@ -201,7 +201,7 @@ CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
 static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
 {
 	struct clk_hw_onecell_data *clk_data;
-	struct clk_hw **clks;
+	struct clk_hw **hws;
 	void __iomem *base;
 	int i;
 
@@ -211,41 +211,41 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
 		return;
 
 	clk_data->num = IMX7ULP_CLK_PCC3_END;
-	clks = clk_data->hws;
+	hws = clk_data->hws;
 
 	/* PCC3 */
 	base = of_iomap(np, 0);
 	WARN_ON(!base);
 
-	clks[IMX7ULP_CLK_LPTPM6]	= imx7ulp_clk_composite("lptpm6",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x84);
-	clks[IMX7ULP_CLK_LPTPM7]	= imx7ulp_clk_composite("lptpm7",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x88);
+	hws[IMX7ULP_CLK_LPTPM6]	= imx7ulp_clk_hw_composite("lptpm6",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x84);
+	hws[IMX7ULP_CLK_LPTPM7]	= imx7ulp_clk_hw_composite("lptpm7",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x88);
 
-	clks[IMX7ULP_CLK_MMDC]		= clk_hw_register_gate(NULL, "mmdc", "nic1_clk", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+	hws[IMX7ULP_CLK_MMDC]		= clk_hw_register_gate(NULL, "mmdc", "nic1_clk", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
 							       base + 0xac, 30, 0, &imx_ccm_lock);
-	clks[IMX7ULP_CLK_LPI2C6]	= imx7ulp_clk_composite("lpi2c6",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x90);
-	clks[IMX7ULP_CLK_LPI2C7]	= imx7ulp_clk_composite("lpi2c7",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
-	clks[IMX7ULP_CLK_LPUART6]	= imx7ulp_clk_composite("lpuart6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
-	clks[IMX7ULP_CLK_LPUART7]	= imx7ulp_clk_composite("lpuart7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
-	clks[IMX7ULP_CLK_DSI]		= imx7ulp_clk_composite("dsi",     periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0xa4);
-	clks[IMX7ULP_CLK_LCDIF]		= imx7ulp_clk_composite("lcdif",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xa8);
+	hws[IMX7ULP_CLK_LPI2C6]	= imx7ulp_clk_hw_composite("lpi2c6",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x90);
+	hws[IMX7ULP_CLK_LPI2C7]	= imx7ulp_clk_hw_composite("lpi2c7",  periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
+	hws[IMX7ULP_CLK_LPUART6]	= imx7ulp_clk_hw_composite("lpuart6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
+	hws[IMX7ULP_CLK_LPUART7]	= imx7ulp_clk_hw_composite("lpuart7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
+	hws[IMX7ULP_CLK_DSI]		= imx7ulp_clk_hw_composite("dsi",     periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true,  true, base + 0xa4);
+	hws[IMX7ULP_CLK_LCDIF]		= imx7ulp_clk_hw_composite("lcdif",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true,  true, base + 0xa8);
 
-	clks[IMX7ULP_CLK_VIU]		= imx_clk_hw_gate("viu",   "nic1_clk",	   base + 0xa0, 30);
-	clks[IMX7ULP_CLK_PCTLC]		= imx_clk_hw_gate("pctlc", "nic1_bus_clk", base + 0xb8, 30);
-	clks[IMX7ULP_CLK_PCTLD]		= imx_clk_hw_gate("pctld", "nic1_bus_clk", base + 0xbc, 30);
-	clks[IMX7ULP_CLK_PCTLE]		= imx_clk_hw_gate("pctle", "nic1_bus_clk", base + 0xc0, 30);
-	clks[IMX7ULP_CLK_PCTLF]		= imx_clk_hw_gate("pctlf", "nic1_bus_clk", base + 0xc4, 30);
+	hws[IMX7ULP_CLK_VIU]		= imx_clk_hw_gate("viu",   "nic1_clk",	   base + 0xa0, 30);
+	hws[IMX7ULP_CLK_PCTLC]		= imx_clk_hw_gate("pctlc", "nic1_bus_clk", base + 0xb8, 30);
+	hws[IMX7ULP_CLK_PCTLD]		= imx_clk_hw_gate("pctld", "nic1_bus_clk", base + 0xbc, 30);
+	hws[IMX7ULP_CLK_PCTLE]		= imx_clk_hw_gate("pctle", "nic1_bus_clk", base + 0xc0, 30);
+	hws[IMX7ULP_CLK_PCTLF]		= imx_clk_hw_gate("pctlf", "nic1_bus_clk", base + 0xc4, 30);
 
-	clks[IMX7ULP_CLK_GPU3D]		= imx7ulp_clk_composite("gpu3d",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x140);
-	clks[IMX7ULP_CLK_GPU2D]		= imx7ulp_clk_composite("gpu2d",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x144);
+	hws[IMX7ULP_CLK_GPU3D]		= imx7ulp_clk_hw_composite("gpu3d",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x140);
+	hws[IMX7ULP_CLK_GPU2D]		= imx7ulp_clk_hw_composite("gpu2d",   periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x144);
 
-	imx_check_clk_hws(clks, clk_data->num);
+	imx_check_clk_hws(hws, clk_data->num);
 
 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 
 	for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
 		int index = pcc3_uart_clk_ids[i];
 
-		pcc3_uart_clks[i] = &clks[index]->clk;
+		pcc3_uart_clks[i] = &hws[index]->clk;
 	}
 
 	imx_register_uart_clocks(pcc3_uart_clks);
@@ -255,7 +255,7 @@ CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
 static void __init imx7ulp_clk_smc1_init(struct device_node *np)
 {
 	struct clk_hw_onecell_data *clk_data;
-	struct clk_hw **clks;
+	struct clk_hw **hws;
 	void __iomem *base;
 
 	clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SMC1_END),
@@ -264,15 +264,15 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
 		return;
 
 	clk_data->num = IMX7ULP_CLK_SMC1_END;
-	clks = clk_data->hws;
+	hws = clk_data->hws;
 
 	/* SMC1 */
 	base = of_iomap(np, 0);
 	WARN_ON(!base);
 
-	clks[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
+	hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
 
-	imx_check_clk_hws(clks, clk_data->num);
+	imx_check_clk_hws(hws, clk_data->num);
 
 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
 }
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 030b15d..2ed93fc 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -12,6 +12,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 #include <linux/types.h>
 
 #include "clk.h"
@@ -285,118 +286,126 @@ static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }
 static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out",
 					 "vpu_pll", "sys_pll1_80m", };
 
-static struct clk *clks[IMX8MM_CLK_END];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
 
-static struct clk ** const uart_clks[] = {
-	&clks[IMX8MM_CLK_UART1_ROOT],
-	&clks[IMX8MM_CLK_UART2_ROOT],
-	&clks[IMX8MM_CLK_UART3_ROOT],
-	&clks[IMX8MM_CLK_UART4_ROOT],
-	NULL
+static const int uart_clk_ids[] = {
+	IMX8MM_CLK_UART1_ROOT,
+	IMX8MM_CLK_UART2_ROOT,
+	IMX8MM_CLK_UART3_ROOT,
+	IMX8MM_CLK_UART4_ROOT,
 };
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
 
 static int imx8mm_clocks_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	void __iomem *base;
-	int ret;
+	int ret, i;
 
-	clks[IMX8MM_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
-	clks[IMX8MM_CLK_24M] = of_clk_get_by_name(np, "osc_24m");
-	clks[IMX8MM_CLK_32K] = of_clk_get_by_name(np, "osc_32k");
-	clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
-	clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
-	clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
-	clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+					  IMX8MM_CLK_END), GFP_KERNEL);
+	if (WARN_ON(!clk_hw_data))
+		return -ENOMEM;
+
+	clk_hw_data->num = IMX8MM_CLK_END;
+	hws = clk_hw_data->hws;
+
+	hws[IMX8MM_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+	hws[IMX8MM_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+	hws[IMX8MM_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+	hws[IMX8MM_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+	hws[IMX8MM_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+	hws[IMX8MM_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+	hws[IMX8MM_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
 
 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
 	base = of_iomap(np, 0);
 	if (WARN_ON(!base))
 		return -ENOMEM;
 
-	clks[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
 
-	clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
-	clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
-	clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
-	clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
-	clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
-	clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
-	clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
-	clks[IMX8MM_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
-	clks[IMX8MM_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
-	clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
+	hws[IMX8MM_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+	hws[IMX8MM_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+	hws[IMX8MM_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+	hws[IMX8MM_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll);
+	hws[IMX8MM_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+	hws[IMX8MM_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+	hws[IMX8MM_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+	hws[IMX8MM_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000);
+	hws[IMX8MM_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000);
+	hws[IMX8MM_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
 
 	/* PLL bypass out */
-	clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MM_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MM_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MM_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
 
 	/* PLL out gate */
-	clks[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
-	clks[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
-	clks[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
-	clks[IMX8MM_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
-	clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
-	clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
-	clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
-	clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
+	hws[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+	hws[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+	hws[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+	hws[IMX8MM_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+	hws[IMX8MM_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
+	hws[IMX8MM_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
+	hws[IMX8MM_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
+	hws[IMX8MM_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
 
 	/* SYS PLL1 fixed output */
-	clks[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
-	clks[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
-	clks[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
-	clks[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
-	clks[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
-	clks[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
-	clks[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
-	clks[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
-	clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+	hws[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+	hws[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+	hws[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+	hws[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+	hws[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+	hws[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+	hws[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+	hws[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+	hws[IMX8MM_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
 
-	clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
-	clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
-	clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
-	clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
-	clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
-	clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
-	clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
-	clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
-	clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+	hws[IMX8MM_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+	hws[IMX8MM_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+	hws[IMX8MM_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+	hws[IMX8MM_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+	hws[IMX8MM_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+	hws[IMX8MM_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+	hws[IMX8MM_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+	hws[IMX8MM_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+	hws[IMX8MM_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
 
 	/* SYS PLL2 fixed output */
-	clks[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
-	clks[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
-	clks[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
-	clks[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
-	clks[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
-	clks[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
-	clks[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
-	clks[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
-	clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+	hws[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+	hws[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+	hws[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+	hws[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+	hws[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+	hws[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+	hws[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+	hws[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+	hws[IMX8MM_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
 
-	clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
-	clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
-	clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
-	clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
-	clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
-	clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
-	clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
-	clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
-	clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+	hws[IMX8MM_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+	hws[IMX8MM_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+	hws[IMX8MM_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+	hws[IMX8MM_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+	hws[IMX8MM_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+	hws[IMX8MM_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+	hws[IMX8MM_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+	hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+	hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
 
 	np = dev->of_node;
 	base = devm_platform_ioremap_resource(pdev, 0);
@@ -404,204 +413,213 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
 		return PTR_ERR(base);
 
 	/* Core Slice */
-	clks[IMX8MM_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
-	clks[IMX8MM_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
-	clks[IMX8MM_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
-	clks[IMX8MM_CLK_GPU3D_SRC] = imx_clk_mux2("gpu3d_src", base + 0x8180, 24, 3,  imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
-	clks[IMX8MM_CLK_GPU2D_SRC] = imx_clk_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels,  ARRAY_SIZE(imx8mm_gpu2d_sels));
-	clks[IMX8MM_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
-	clks[IMX8MM_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
-	clks[IMX8MM_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
-	clks[IMX8MM_CLK_GPU3D_CG] = imx_clk_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
-	clks[IMX8MM_CLK_GPU2D_CG] = imx_clk_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
-	clks[IMX8MM_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
-	clks[IMX8MM_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
-	clks[IMX8MM_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
-	clks[IMX8MM_CLK_GPU3D_DIV] = imx_clk_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
-	clks[IMX8MM_CLK_GPU2D_DIV] = imx_clk_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
+	hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
+	hws[IMX8MM_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
+	hws[IMX8MM_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
+	hws[IMX8MM_CLK_GPU3D_SRC] = imx_clk_hw_mux2("gpu3d_src", base + 0x8180, 24, 3,  imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
+	hws[IMX8MM_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels,  ARRAY_SIZE(imx8mm_gpu2d_sels));
+	hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+	hws[IMX8MM_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+	hws[IMX8MM_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+	hws[IMX8MM_CLK_GPU3D_CG] = imx_clk_hw_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
+	hws[IMX8MM_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
+	hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+	hws[IMX8MM_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+	hws[IMX8MM_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+	hws[IMX8MM_CLK_GPU3D_DIV] = imx_clk_hw_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
+	hws[IMX8MM_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
 
 	/* BUS */
-	clks[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi",  imx8mm_main_axi_sels, base + 0x8800);
-	clks[IMX8MM_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
-	clks[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
-	clks[IMX8MM_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
-	clks[IMX8MM_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
-	clks[IMX8MM_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
-	clks[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
-	clks[IMX8MM_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
-	clks[IMX8MM_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
-	clks[IMX8MM_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
-	clks[IMX8MM_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
-	clks[IMX8MM_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
+	hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi",  imx8mm_main_axi_sels, base + 0x8800);
+	hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
+	hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+	hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
+	hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
+	hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
+	hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
+	hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
+	hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
+	hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
+	hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
+	hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
 
 	/* AHB */
-	clks[IMX8MM_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
-	clks[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
+	hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
+	hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
 
 	/* IPG */
-	clks[IMX8MM_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
-	clks[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+	hws[IMX8MM_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+	hws[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+	/*
+	 * DRAM clocks are manipulated from TF-A outside clock framework.
+	 * Mark with GET_RATE_NOCACHE to always read div value from hardware
+	 */
+	hws[IMX8MM_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+	hws[IMX8MM_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
 
 	/* IP */
-	clks[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
-	clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
-	clks[IMX8MM_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
-	clks[IMX8MM_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
-	clks[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
-	clks[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
-	clks[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
-	clks[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
-	clks[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
-	clks[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
-	clks[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
-	clks[IMX8MM_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
-	clks[IMX8MM_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
-	clks[IMX8MM_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
-	clks[IMX8MM_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
-	clks[IMX8MM_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
-	clks[IMX8MM_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
-	clks[IMX8MM_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
-	clks[IMX8MM_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
-	clks[IMX8MM_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
-	clks[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
-	clks[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
-	clks[IMX8MM_CLK_NAND] = imx8m_clk_composite("nand", imx8mm_nand_sels, base + 0xab00);
-	clks[IMX8MM_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
-	clks[IMX8MM_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
-	clks[IMX8MM_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
-	clks[IMX8MM_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
-	clks[IMX8MM_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
-	clks[IMX8MM_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
-	clks[IMX8MM_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
-	clks[IMX8MM_CLK_UART1] = imx8m_clk_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
-	clks[IMX8MM_CLK_UART2] = imx8m_clk_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
-	clks[IMX8MM_CLK_UART3] = imx8m_clk_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
-	clks[IMX8MM_CLK_UART4] = imx8m_clk_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
-	clks[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
-	clks[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
-	clks[IMX8MM_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mm_gic_sels, base + 0xb200);
-	clks[IMX8MM_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
-	clks[IMX8MM_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
-	clks[IMX8MM_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
-	clks[IMX8MM_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
-	clks[IMX8MM_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
-	clks[IMX8MM_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
-	clks[IMX8MM_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
-	clks[IMX8MM_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
-	clks[IMX8MM_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
-	clks[IMX8MM_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
-	clks[IMX8MM_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
-	clks[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
-	clks[IMX8MM_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
-	clks[IMX8MM_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
-	clks[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
-	clks[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
-	clks[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
-	clks[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
-	clks[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
-	clks[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
-	clks[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
-	clks[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
-	clks[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
-	clks[IMX8MM_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
-	clks[IMX8MM_CLK_PDM] = imx8m_clk_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
-	clks[IMX8MM_CLK_VPU_H1] = imx8m_clk_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
+	hws[IMX8MM_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
+	hws[IMX8MM_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
+	hws[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_hw_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
+	hws[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_hw_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
+	hws[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_hw_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
+	hws[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_hw_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
+	hws[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_hw_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
+	hws[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_hw_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
+	hws[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_hw_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
+	hws[IMX8MM_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
+	hws[IMX8MM_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
+	hws[IMX8MM_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
+	hws[IMX8MM_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
+	hws[IMX8MM_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
+	hws[IMX8MM_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
+	hws[IMX8MM_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
+	hws[IMX8MM_CLK_SPDIF2] = imx8m_clk_hw_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
+	hws[IMX8MM_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
+	hws[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
+	hws[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
+	hws[IMX8MM_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mm_nand_sels, base + 0xab00);
+	hws[IMX8MM_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
+	hws[IMX8MM_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
+	hws[IMX8MM_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
+	hws[IMX8MM_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
+	hws[IMX8MM_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
+	hws[IMX8MM_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
+	hws[IMX8MM_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
+	hws[IMX8MM_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
+	hws[IMX8MM_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
+	hws[IMX8MM_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
+	hws[IMX8MM_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
+	hws[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
+	hws[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
+	hws[IMX8MM_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mm_gic_sels, base + 0xb200);
+	hws[IMX8MM_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
+	hws[IMX8MM_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
+	hws[IMX8MM_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
+	hws[IMX8MM_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
+	hws[IMX8MM_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
+	hws[IMX8MM_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
+	hws[IMX8MM_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
+	hws[IMX8MM_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
+	hws[IMX8MM_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
+	hws[IMX8MM_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
+	hws[IMX8MM_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
+	hws[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
+	hws[IMX8MM_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
+	hws[IMX8MM_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
+	hws[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_hw_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
+	hws[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
+	hws[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_hw_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
+	hws[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_hw_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
+	hws[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
+	hws[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
+	hws[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
+	hws[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
+	hws[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_hw_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
+	hws[IMX8MM_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
+	hws[IMX8MM_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
+	hws[IMX8MM_CLK_VPU_H1] = imx8m_clk_hw_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
 
 	/* CCGR */
-	clks[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
-	clks[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
-	clks[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
-	clks[IMX8MM_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
-	clks[IMX8MM_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
-	clks[IMX8MM_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
-	clks[IMX8MM_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
-	clks[IMX8MM_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
-	clks[IMX8MM_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
-	clks[IMX8MM_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
-	clks[IMX8MM_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
-	clks[IMX8MM_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
-	clks[IMX8MM_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
-	clks[IMX8MM_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
-	clks[IMX8MM_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
-	clks[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
-	clks[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
-	clks[IMX8MM_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
-	clks[IMX8MM_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
-	clks[IMX8MM_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
-	clks[IMX8MM_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
-	clks[IMX8MM_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
-	clks[IMX8MM_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
-	clks[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
-	clks[IMX8MM_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
-	clks[IMX8MM_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
-	clks[IMX8MM_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
-	clks[IMX8MM_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
-	clks[IMX8MM_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
-	clks[IMX8MM_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
-	clks[IMX8MM_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
-	clks[IMX8MM_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
-	clks[IMX8MM_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
-	clks[IMX8MM_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
-	clks[IMX8MM_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
-	clks[IMX8MM_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
-	clks[IMX8MM_CLK_SNVS_ROOT] = imx_clk_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
-	clks[IMX8MM_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
-	clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
-	clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
-	clks[IMX8MM_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
-	clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
-	clks[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
-	clks[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
-	clks[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
-	clks[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
-	clks[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
-	clks[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
-	clks[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
-	clks[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
-	clks[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
-	clks[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
-	clks[IMX8MM_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
-	clks[IMX8MM_CLK_PDM_IPG]  = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
-	clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_disp);
-	clks[IMX8MM_CLK_DISP_AXI_ROOT]  = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
-	clks[IMX8MM_CLK_DISP_APB_ROOT]  = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
-	clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_disp);
-	clks[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
-	clks[IMX8MM_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
-	clks[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
-	clks[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
-	clks[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
-	clks[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
-	clks[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
-	clks[IMX8MM_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+	hws[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+	hws[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+	hws[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+	hws[IMX8MM_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+	hws[IMX8MM_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+	hws[IMX8MM_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+	hws[IMX8MM_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+	hws[IMX8MM_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+	hws[IMX8MM_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+	hws[IMX8MM_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+	hws[IMX8MM_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+	hws[IMX8MM_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+	hws[IMX8MM_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+	hws[IMX8MM_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+	hws[IMX8MM_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+	hws[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+	hws[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_hw_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+	hws[IMX8MM_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+	hws[IMX8MM_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+	hws[IMX8MM_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+	hws[IMX8MM_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+	hws[IMX8MM_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+	hws[IMX8MM_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+	hws[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+	hws[IMX8MM_CLK_SAI1_ROOT] = imx_clk_hw_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+	hws[IMX8MM_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+	hws[IMX8MM_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+	hws[IMX8MM_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+	hws[IMX8MM_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+	hws[IMX8MM_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+	hws[IMX8MM_CLK_SAI4_ROOT] = imx_clk_hw_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+	hws[IMX8MM_CLK_SAI4_IPG] = imx_clk_hw_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+	hws[IMX8MM_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+	hws[IMX8MM_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+	hws[IMX8MM_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+	hws[IMX8MM_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+	hws[IMX8MM_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
+	hws[IMX8MM_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+	hws[IMX8MM_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+	hws[IMX8MM_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+	hws[IMX8MM_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+	hws[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+	hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
+	hws[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+	hws[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+	hws[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+	hws[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+	hws[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+	hws[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_hw_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
+	hws[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+	hws[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_hw_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
+	hws[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
+	hws[IMX8MM_CLK_PDM_ROOT] = imx_clk_hw_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+	hws[IMX8MM_CLK_PDM_IPG]  = imx_clk_hw_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+	hws[IMX8MM_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_disp);
+	hws[IMX8MM_CLK_DISP_AXI_ROOT]  = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
+	hws[IMX8MM_CLK_DISP_APB_ROOT]  = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
+	hws[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_hw_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_disp);
+	hws[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+	hws[IMX8MM_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+	hws[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_hw_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
+	hws[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+	hws[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+	hws[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+	hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
+	hws[IMX8MM_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
 
-	clks[IMX8MM_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc_24m", 1, 8);
+	hws[IMX8MM_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
 
-	clks[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
-	clks[IMX8MM_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
+	hws[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+	hws[IMX8MM_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
 
-	clks[IMX8MM_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
-					   clks[IMX8MM_CLK_A53_DIV],
-					   clks[IMX8MM_CLK_A53_SRC],
-					   clks[IMX8MM_ARM_PLL_OUT],
-					   clks[IMX8MM_SYS_PLL1_800M]);
+	hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+					   hws[IMX8MM_CLK_A53_DIV]->clk,
+					   hws[IMX8MM_CLK_A53_SRC]->clk,
+					   hws[IMX8MM_ARM_PLL_OUT]->clk,
+					   hws[IMX8MM_SYS_PLL1_800M]->clk);
 
-	imx_check_clocks(clks, ARRAY_SIZE(clks));
+	imx_check_clk_hws(hws, IMX8MM_CLK_END);
 
-	clk_data.clks = clks;
-	clk_data.clk_num = ARRAY_SIZE(clks);
-	ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
 	if (ret < 0) {
-		pr_err("failed to register clks for i.MX8MM\n");
-		goto unregister_clks;
+		dev_err(dev, "failed to register clks for i.MX8MM\n");
+		goto unregister_hws;
 	}
 
-	imx_register_uart_clocks(uart_clks);
+	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+		int index = uart_clk_ids[i];
+
+		uart_hws[i] = &hws[index]->clk;
+	}
+
+	imx_register_uart_clocks(uart_hws);
 
 	return 0;
 
-unregister_clks:
-	imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+	imx_unregister_hw_clocks(hws, IMX8MM_CLK_END);
 
 	return ret;
 }
@@ -616,6 +634,11 @@ static struct platform_driver imx8mm_clk_driver = {
 	.probe = imx8mm_clocks_probe,
 	.driver = {
 		.name = "imx8mm-ccm",
+		/*
+		 * Disable bind attributes: clocks are not removed and
+		 * reloading the driver will crash or break devices.
+		 */
+		.suppress_bind_attrs = true,
 		.of_match_table = of_match_ptr(imx8mm_clk_of_match),
 	},
 };
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 9f5a5a5..c5e7316 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -12,6 +12,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 #include <linux/types.h>
 
 #include "clk.h"
@@ -280,284 +281,302 @@ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sy
 						 "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
 						 "video_pll1_out", "osc_32k", };
 
-static struct clk *clks[IMX8MN_CLK_END];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
 
-static struct clk ** const uart_clks[] = {
-	&clks[IMX8MN_CLK_UART1_ROOT],
-	&clks[IMX8MN_CLK_UART2_ROOT],
-	&clks[IMX8MN_CLK_UART3_ROOT],
-	&clks[IMX8MN_CLK_UART4_ROOT],
-	NULL
+static const int uart_clk_ids[] = {
+	IMX8MN_CLK_UART1_ROOT,
+	IMX8MN_CLK_UART2_ROOT,
+	IMX8MN_CLK_UART3_ROOT,
+	IMX8MN_CLK_UART4_ROOT,
 };
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
 
 static int imx8mn_clocks_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	void __iomem *base;
-	int ret;
+	int ret, i;
 
-	clks[IMX8MN_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
-	clks[IMX8MN_CLK_24M] = of_clk_get_by_name(np, "osc_24m");
-	clks[IMX8MN_CLK_32K] = of_clk_get_by_name(np, "osc_32k");
-	clks[IMX8MN_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
-	clks[IMX8MN_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
-	clks[IMX8MN_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
-	clks[IMX8MN_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+					  IMX8MN_CLK_END), GFP_KERNEL);
+	if (WARN_ON(!clk_hw_data))
+		return -ENOMEM;
+
+	clk_hw_data->num = IMX8MN_CLK_END;
+	hws = clk_hw_data->hws;
+
+	hws[IMX8MN_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+	hws[IMX8MN_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+	hws[IMX8MN_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+	hws[IMX8MN_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+	hws[IMX8MN_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+	hws[IMX8MN_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+	hws[IMX8MN_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
 
 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop");
 	base = of_iomap(np, 0);
 	if (WARN_ON(!base)) {
 		ret = -ENOMEM;
-		goto unregister_clks;
+		goto unregister_hws;
 	}
 
-	clks[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
 
-	clks[IMX8MN_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
-	clks[IMX8MN_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
-	clks[IMX8MN_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
-	clks[IMX8MN_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
-	clks[IMX8MN_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
-	clks[IMX8MN_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
-	clks[IMX8MN_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
-	clks[IMX8MN_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
-	clks[IMX8MN_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
-	clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
+	hws[IMX8MN_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+	hws[IMX8MN_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+	hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+	hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll);
+	hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+	hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+	hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+	hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000);
+	hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000);
+	hws[IMX8MN_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
 
 	/* PLL bypass out */
-	clks[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MN_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MN_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MN_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
 
 	/* PLL out gate */
-	clks[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
-	clks[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
-	clks[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
-	clks[IMX8MN_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
-	clks[IMX8MN_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
-	clks[IMX8MN_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
-	clks[IMX8MN_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
-	clks[IMX8MN_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
+	hws[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+	hws[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+	hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+	hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+	hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
+	hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
+	hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
+	hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
 
 	/* SYS PLL1 fixed output */
-	clks[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
-	clks[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
-	clks[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
-	clks[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
-	clks[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
-	clks[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
-	clks[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
-	clks[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
-	clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+	hws[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+	hws[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+	hws[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+	hws[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+	hws[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+	hws[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+	hws[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+	hws[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+	hws[IMX8MN_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
 
-	clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
-	clks[IMX8MN_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
-	clks[IMX8MN_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
-	clks[IMX8MN_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
-	clks[IMX8MN_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
-	clks[IMX8MN_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
-	clks[IMX8MN_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
-	clks[IMX8MN_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
-	clks[IMX8MN_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+	hws[IMX8MN_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+	hws[IMX8MN_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+	hws[IMX8MN_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+	hws[IMX8MN_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+	hws[IMX8MN_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+	hws[IMX8MN_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+	hws[IMX8MN_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+	hws[IMX8MN_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+	hws[IMX8MN_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
 
 	/* SYS PLL2 fixed output */
-	clks[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
-	clks[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
-	clks[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
-	clks[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
-	clks[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
-	clks[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
-	clks[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
-	clks[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
-	clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+	hws[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+	hws[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+	hws[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+	hws[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+	hws[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+	hws[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+	hws[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+	hws[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+	hws[IMX8MN_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
 
-	clks[IMX8MN_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
-	clks[IMX8MN_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
-	clks[IMX8MN_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
-	clks[IMX8MN_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
-	clks[IMX8MN_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
-	clks[IMX8MN_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
-	clks[IMX8MN_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
-	clks[IMX8MN_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
-	clks[IMX8MN_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+	hws[IMX8MN_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+	hws[IMX8MN_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+	hws[IMX8MN_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+	hws[IMX8MN_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+	hws[IMX8MN_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+	hws[IMX8MN_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+	hws[IMX8MN_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+	hws[IMX8MN_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+	hws[IMX8MN_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
 
 	np = dev->of_node;
 	base = devm_platform_ioremap_resource(pdev, 0);
 	if (WARN_ON(IS_ERR(base))) {
 		ret = PTR_ERR(base);
-		goto unregister_clks;
+		goto unregister_hws;
 	}
 
 	/* CORE */
-	clks[IMX8MN_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
-	clks[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3,  imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
-	clks[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels,  ARRAY_SIZE(imx8mn_gpu_shader_sels));
-	clks[IMX8MN_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
-	clks[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
-	clks[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
+	hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
+	hws[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3,  imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
+	hws[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels,  ARRAY_SIZE(imx8mn_gpu_shader_sels));
+	hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+	hws[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
+	hws[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
 
-	clks[IMX8MN_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
-	clks[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
-	clks[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+	hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+	hws[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
+	hws[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
 
 	/* BUS */
-	clks[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
-	clks[IMX8MN_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
-	clks[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
-	clks[IMX8MN_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
-	clks[IMX8MN_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
-	clks[IMX8MN_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
-	clks[IMX8MN_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
-	clks[IMX8MN_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
-	clks[IMX8MN_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
+	hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
+	hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
+	hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
+	hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
+	hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
+	hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
+	hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
+	hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
+	hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
 
-	clks[IMX8MN_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
-	clks[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
-	clks[IMX8MN_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
-	clks[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
-	clks[IMX8MN_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
-	clks[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
-	clks[IMX8MN_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
-	clks[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
-	clks[IMX8MN_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
-	clks[IMX8MN_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mn_sai3_sels, base + 0xa680);
-	clks[IMX8MN_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mn_sai5_sels, base + 0xa780);
-	clks[IMX8MN_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mn_sai6_sels, base + 0xa800);
-	clks[IMX8MN_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mn_spdif1_sels, base + 0xa880);
-	clks[IMX8MN_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mn_enet_ref_sels, base + 0xa980);
-	clks[IMX8MN_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mn_enet_timer_sels, base + 0xaa00);
-	clks[IMX8MN_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mn_enet_phy_sels, base + 0xaa80);
-	clks[IMX8MN_CLK_NAND] = imx8m_clk_composite("nand", imx8mn_nand_sels, base + 0xab00);
-	clks[IMX8MN_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mn_qspi_sels, base + 0xab80);
-	clks[IMX8MN_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mn_usdhc1_sels, base + 0xac00);
-	clks[IMX8MN_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mn_usdhc2_sels, base + 0xac80);
-	clks[IMX8MN_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mn_i2c1_sels, base + 0xad00);
-	clks[IMX8MN_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mn_i2c2_sels, base + 0xad80);
-	clks[IMX8MN_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mn_i2c3_sels, base + 0xae00);
-	clks[IMX8MN_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mn_i2c4_sels, base + 0xae80);
-	clks[IMX8MN_CLK_UART1] = imx8m_clk_composite("uart1", imx8mn_uart1_sels, base + 0xaf00);
-	clks[IMX8MN_CLK_UART2] = imx8m_clk_composite("uart2", imx8mn_uart2_sels, base + 0xaf80);
-	clks[IMX8MN_CLK_UART3] = imx8m_clk_composite("uart3", imx8mn_uart3_sels, base + 0xb000);
-	clks[IMX8MN_CLK_UART4] = imx8m_clk_composite("uart4", imx8mn_uart4_sels, base + 0xb080);
-	clks[IMX8MN_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mn_usb_core_sels, base + 0xb100);
-	clks[IMX8MN_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mn_usb_phy_sels, base + 0xb180);
-	clks[IMX8MN_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mn_gic_sels, base + 0xb200);
-	clks[IMX8MN_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mn_ecspi1_sels, base + 0xb280);
-	clks[IMX8MN_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mn_ecspi2_sels, base + 0xb300);
-	clks[IMX8MN_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mn_pwm1_sels, base + 0xb380);
-	clks[IMX8MN_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400);
-	clks[IMX8MN_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480);
-	clks[IMX8MN_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500);
-	clks[IMX8MN_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mn_wdog_sels, base + 0xb900);
-	clks[IMX8MN_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980);
-	clks[IMX8MN_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mn_clko1_sels, base + 0xba00);
-	clks[IMX8MN_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mn_clko2_sels, base + 0xba80);
-	clks[IMX8MN_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mn_dsi_core_sels, base + 0xbb00);
-	clks[IMX8MN_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mn_dsi_phy_sels, base + 0xbb80);
-	clks[IMX8MN_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mn_dsi_dbi_sels, base + 0xbc00);
-	clks[IMX8MN_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mn_usdhc3_sels, base + 0xbc80);
-	clks[IMX8MN_CLK_CAMERA_PIXEL] = imx8m_clk_composite("camera_pixel", imx8mn_camera_pixel_sels, base + 0xbd00);
-	clks[IMX8MN_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mn_csi1_phy_sels, base + 0xbd80);
-	clks[IMX8MN_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mn_csi2_phy_sels, base + 0xbf00);
-	clks[IMX8MN_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mn_csi2_esc_sels, base + 0xbf80);
-	clks[IMX8MN_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mn_ecspi3_sels, base + 0xc180);
-	clks[IMX8MN_CLK_PDM] = imx8m_clk_composite("pdm", imx8mn_pdm_sels, base + 0xc200);
-	clks[IMX8MN_CLK_SAI7] = imx8m_clk_composite("sai7", imx8mn_sai7_sels, base + 0xc300);
+	hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
+	hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
+	hws[IMX8MN_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+	hws[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+	hws[IMX8MN_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
 
-	clks[IMX8MN_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
-	clks[IMX8MN_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
-	clks[IMX8MN_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
-	clks[IMX8MN_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
-	clks[IMX8MN_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
-	clks[IMX8MN_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
-	clks[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
-	clks[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
-	clks[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
-	clks[IMX8MN_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
-	clks[IMX8MN_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
-	clks[IMX8MN_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
-	clks[IMX8MN_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
-	clks[IMX8MN_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
-	clks[IMX8MN_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
-	clks[IMX8MN_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
-	clks[IMX8MN_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
-	clks[IMX8MN_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
-	clks[IMX8MN_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
-	clks[IMX8MN_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
-	clks[IMX8MN_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
-	clks[IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
-	clks[IMX8MN_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
-	clks[IMX8MN_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
-	clks[IMX8MN_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
-	clks[IMX8MN_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
-	clks[IMX8MN_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
-	clks[IMX8MN_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
-	clks[IMX8MN_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
-	clks[IMX8MN_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
-	clks[IMX8MN_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
-	clks[IMX8MN_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
-	clks[IMX8MN_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
-	clks[IMX8MN_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
-	clks[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
-	clks[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
-	clks[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
-	clks[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
-	clks[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
-	clks[IMX8MN_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
-	clks[IMX8MN_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
-	clks[IMX8MN_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
-	clks[IMX8MN_CLK_ASRC_ROOT] = imx_clk_gate4("asrc_root_clk", "audio_ahb", base + 0x4580, 0);
-	clks[IMX8MN_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
-	clks[IMX8MN_CLK_PDM_IPG]  = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
-	clks[IMX8MN_CLK_DISP_AXI_ROOT]  = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
-	clks[IMX8MN_CLK_DISP_APB_ROOT]  = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
-	clks[IMX8MN_CLK_CAMERA_PIXEL_ROOT] = imx_clk_gate2_shared2("camera_pixel_clk", "camera_pixel", base + 0x45d0, 0, &share_count_disp);
-	clks[IMX8MN_CLK_DISP_PIXEL_ROOT] = imx_clk_gate2_shared2("disp_pixel_clk", "disp_pixel", base + 0x45d0, 0, &share_count_disp);
-	clks[IMX8MN_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
-	clks[IMX8MN_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
-	clks[IMX8MN_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
-	clks[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
-	clks[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
-	clks[IMX8MN_CLK_SAI7_ROOT] = imx_clk_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
+	/*
+	 * DRAM clocks are manipulated from TF-A outside clock framework.
+	 * Mark with GET_RATE_NOCACHE to always read div value from hardware
+	 */
+	hws[IMX8MN_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+	hws[IMX8MN_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mn_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
 
-	clks[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+	hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
+	hws[IMX8MN_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
+	hws[IMX8MN_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mn_sai3_sels, base + 0xa680);
+	hws[IMX8MN_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mn_sai5_sels, base + 0xa780);
+	hws[IMX8MN_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mn_sai6_sels, base + 0xa800);
+	hws[IMX8MN_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mn_spdif1_sels, base + 0xa880);
+	hws[IMX8MN_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mn_enet_ref_sels, base + 0xa980);
+	hws[IMX8MN_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mn_enet_timer_sels, base + 0xaa00);
+	hws[IMX8MN_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mn_enet_phy_sels, base + 0xaa80);
+	hws[IMX8MN_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mn_nand_sels, base + 0xab00);
+	hws[IMX8MN_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mn_qspi_sels, base + 0xab80);
+	hws[IMX8MN_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mn_usdhc1_sels, base + 0xac00);
+	hws[IMX8MN_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mn_usdhc2_sels, base + 0xac80);
+	hws[IMX8MN_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mn_i2c1_sels, base + 0xad00);
+	hws[IMX8MN_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mn_i2c2_sels, base + 0xad80);
+	hws[IMX8MN_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mn_i2c3_sels, base + 0xae00);
+	hws[IMX8MN_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mn_i2c4_sels, base + 0xae80);
+	hws[IMX8MN_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mn_uart1_sels, base + 0xaf00);
+	hws[IMX8MN_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mn_uart2_sels, base + 0xaf80);
+	hws[IMX8MN_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mn_uart3_sels, base + 0xb000);
+	hws[IMX8MN_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mn_uart4_sels, base + 0xb080);
+	hws[IMX8MN_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mn_usb_core_sels, base + 0xb100);
+	hws[IMX8MN_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mn_usb_phy_sels, base + 0xb180);
+	hws[IMX8MN_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mn_gic_sels, base + 0xb200);
+	hws[IMX8MN_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mn_ecspi1_sels, base + 0xb280);
+	hws[IMX8MN_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mn_ecspi2_sels, base + 0xb300);
+	hws[IMX8MN_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mn_pwm1_sels, base + 0xb380);
+	hws[IMX8MN_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400);
+	hws[IMX8MN_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480);
+	hws[IMX8MN_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500);
+	hws[IMX8MN_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mn_wdog_sels, base + 0xb900);
+	hws[IMX8MN_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980);
+	hws[IMX8MN_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mn_clko1_sels, base + 0xba00);
+	hws[IMX8MN_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mn_clko2_sels, base + 0xba80);
+	hws[IMX8MN_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mn_dsi_core_sels, base + 0xbb00);
+	hws[IMX8MN_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mn_dsi_phy_sels, base + 0xbb80);
+	hws[IMX8MN_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mn_dsi_dbi_sels, base + 0xbc00);
+	hws[IMX8MN_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mn_usdhc3_sels, base + 0xbc80);
+	hws[IMX8MN_CLK_CAMERA_PIXEL] = imx8m_clk_hw_composite("camera_pixel", imx8mn_camera_pixel_sels, base + 0xbd00);
+	hws[IMX8MN_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mn_csi1_phy_sels, base + 0xbd80);
+	hws[IMX8MN_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mn_csi2_phy_sels, base + 0xbf00);
+	hws[IMX8MN_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mn_csi2_esc_sels, base + 0xbf80);
+	hws[IMX8MN_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mn_ecspi3_sels, base + 0xc180);
+	hws[IMX8MN_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mn_pdm_sels, base + 0xc200);
+	hws[IMX8MN_CLK_SAI7] = imx8m_clk_hw_composite("sai7", imx8mn_sai7_sels, base + 0xc300);
 
-	clks[IMX8MN_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
-					   clks[IMX8MN_CLK_A53_DIV],
-					   clks[IMX8MN_CLK_A53_SRC],
-					   clks[IMX8MN_ARM_PLL_OUT],
-					   clks[IMX8MN_SYS_PLL1_800M]);
+	hws[IMX8MN_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+	hws[IMX8MN_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+	hws[IMX8MN_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+	hws[IMX8MN_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+	hws[IMX8MN_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+	hws[IMX8MN_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+	hws[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+	hws[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+	hws[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+	hws[IMX8MN_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+	hws[IMX8MN_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+	hws[IMX8MN_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+	hws[IMX8MN_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+	hws[IMX8MN_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+	hws[IMX8MN_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+	hws[IMX8MN_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+	hws[IMX8MN_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+	hws[IMX8MN_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+	hws[IMX8MN_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+	hws[IMX8MN_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+	hws[IMX8MN_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+	hws[IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+	hws[IMX8MN_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+	hws[IMX8MN_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+	hws[IMX8MN_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+	hws[IMX8MN_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+	hws[IMX8MN_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+	hws[IMX8MN_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+	hws[IMX8MN_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+	hws[IMX8MN_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+	hws[IMX8MN_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+	hws[IMX8MN_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+	hws[IMX8MN_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+	hws[IMX8MN_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+	hws[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+	hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
+	hws[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+	hws[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+	hws[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+	hws[IMX8MN_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+	hws[IMX8MN_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+	hws[IMX8MN_CLK_GPU_BUS_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+	hws[IMX8MN_CLK_ASRC_ROOT] = imx_clk_hw_gate4("asrc_root_clk", "audio_ahb", base + 0x4580, 0);
+	hws[IMX8MN_CLK_PDM_ROOT] = imx_clk_hw_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+	hws[IMX8MN_CLK_PDM_IPG]  = imx_clk_hw_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+	hws[IMX8MN_CLK_DISP_AXI_ROOT]  = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
+	hws[IMX8MN_CLK_DISP_APB_ROOT]  = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
+	hws[IMX8MN_CLK_CAMERA_PIXEL_ROOT] = imx_clk_hw_gate2_shared2("camera_pixel_clk", "camera_pixel", base + 0x45d0, 0, &share_count_disp);
+	hws[IMX8MN_CLK_DISP_PIXEL_ROOT] = imx_clk_hw_gate2_shared2("disp_pixel_clk", "disp_pixel", base + 0x45d0, 0, &share_count_disp);
+	hws[IMX8MN_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+	hws[IMX8MN_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+	hws[IMX8MN_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+	hws[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+	hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+	hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
 
-	imx_check_clocks(clks, ARRAY_SIZE(clks));
+	hws[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
 
-	clk_data.clks = clks;
-	clk_data.clk_num = ARRAY_SIZE(clks);
-	ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+	hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+					   hws[IMX8MN_CLK_A53_DIV]->clk,
+					   hws[IMX8MN_CLK_A53_SRC]->clk,
+					   hws[IMX8MN_ARM_PLL_OUT]->clk,
+					   hws[IMX8MN_SYS_PLL1_800M]->clk);
+
+	imx_check_clk_hws(hws, IMX8MN_CLK_END);
+
+	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
 	if (ret < 0) {
-		dev_err(dev, "failed to register clks for i.MX8MN\n");
-		goto unregister_clks;
+		dev_err(dev, "failed to register hws for i.MX8MN\n");
+		goto unregister_hws;
 	}
 
-	imx_register_uart_clocks(uart_clks);
+	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+		int index = uart_clk_ids[i];
+
+		uart_hws[i] = &hws[index]->clk;
+	}
+
+	imx_register_uart_clocks(uart_hws);
 
 	return 0;
 
-unregister_clks:
-	imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+	imx_unregister_hw_clocks(hws, IMX8MN_CLK_END);
 
 	return ret;
 }
@@ -572,6 +591,11 @@ static struct platform_driver imx8mn_clk_driver = {
 	.probe = imx8mn_clocks_probe,
 	.driver = {
 		.name = "imx8mn-ccm",
+		/*
+		 * Disable bind attributes: clocks are not removed and
+		 * reloading the driver will crash or break devices.
+		 */
+		.suppress_bind_attrs = true,
 		.of_match_table = of_match_ptr(imx8mn_clk_of_match),
 	},
 };
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
new file mode 100644
index 0000000..f6c120c
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <dt-bindings/clock/imx8mp-clock.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+static u32 share_count_nand;
+static u32 share_count_media;
+
+static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
+static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char * const sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
+static const char * const sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
+static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
+
+static const char * const imx8mp_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m",
+					       "sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m",
+					       "audio_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m",
+					      "vpu_pll_out", "sys_pll1_800m", "audio_pll1_out",
+					      "video_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_ml_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+					      "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+					      "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu3d_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+						      "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+						      "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu3d_shader_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+							"sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+							"video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu2d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+						 "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+						 "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_audio_axi_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+						     "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+						     "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_hsio_axi_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+						    "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+						    "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_media_isp_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m",
+						     "sys_pll3_out", "sys_pll1_400m", "audio_pll2_out",
+						     "clk_ext1", "sys_pll2_500m", };
+
+static const char * const imx8mp_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m",
+						    "sys_pll2_250m", "sys_pll2_1000m", "audio_pll1_out",
+						    "video_pll1_out", "sys_pll1_100m",};
+
+static const char * const imx8mp_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+						    "sys_pll2_250m", "sys_pll2_200m", "audio_pll1_out",
+						    "video_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+						      "sys_pll2_200m", "sys_pll1_133m", "sys_pll3_out",
+						      "sys_pll2_250m", "audio_pll1_out", };
+
+static const char * const imx8mp_vpu_bus_sels[] = {"osc_24m", "sys_pll1_800m", "vpu_pll_out",
+						   "audio_pll2_out", "sys_pll3_out", "sys_pll2_1000m",
+						   "sys_pll2_200m", "sys_pll1_100m", };
+
+static const char * const imx8mp_media_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m",
+						     "sys_pll3_out", "sys_pll1_40m", "audio_pll2_out",
+						     "clk_ext1", "sys_pll2_500m", };
+
+static const char * const imx8mp_media_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m",
+						     "sys_pll3_out", "sys_pll1_40m", "audio_pll2_out",
+						     "clk_ext1", "sys_pll1_133m", };
+
+static const char * const imx8mp_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+						   "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+						   "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+						   "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+						   "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+					       "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+					       "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_noc_io_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+						  "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+						  "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ml_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+						  "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+						  "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ml_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+						  "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+						  "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m",
+					       "sys_pll1_400m", "sys_pll2_125m", "sys_pll3_out",
+					       "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+						     "sys_pll2_1000m", "sys_pll2_166m", "sys_pll3_out",
+						     "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_mipi_dsi_esc_rx_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+							   "sys_pll1_800m", "sys_pll2_1000m",
+							   "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m",
+						    "sys_pll2_500m", "sys_pll2_1000m", "sys_pll3_out",
+						    "audio_pll1_out", "sys_pll1_266m", };
+
+static const char * const imx8mp_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+						    "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+						    "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_vpu_g1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+						  "sys_pll2_1000m", "sys_pll1_100m", "sys_pll2_125m",
+						  "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_vpu_g2_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+						  "sys_pll2_1000m", "sys_pll1_100m", "sys_pll2_125m",
+						  "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_can1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+						"sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+						"sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_can2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+						"sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+						"sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_memrepair_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+						     "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+						     "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_pcie_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m",
+						    "clk_ext1", "clk_ext2", "clk_ext3",
+						    "clk_ext4", "sys_pll1_400m", };
+
+static const char * const imx8mp_pcie_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m",
+						    "sys_pll3_out", "sys_pll2_100m", "sys_pll1_80m",
+						    "sys_pll1_160m", "sys_pll1_200m", };
+
+static const char * const imx8mp_i2c5_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+						"audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c6_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+						"audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_sai1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+						"clk_ext1", "clk_ext2", };
+
+static const char * const imx8mp_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+						"clk_ext2", "clk_ext3", };
+
+static const char * const imx8mp_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+						"clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+						"clk_ext1", "clk_ext2", };
+
+static const char * const imx8mp_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+						"clk_ext2", "clk_ext3", };
+
+static const char * const imx8mp_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+						"clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_enet_qos_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+						    "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+						    "video_pll1_out", "clk_ext4", };
+
+static const char * const imx8mp_enet_qos_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+							  "clk_ext1", "clk_ext2", "clk_ext3",
+							  "clk_ext4", "video_pll1_out", };
+
+static const char * const imx8mp_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+						    "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+						    "video_pll1_out", "clk_ext4", };
+
+static const char * const imx8mp_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+						      "clk_ext1", "clk_ext2", "clk_ext3",
+						      "clk_ext4", "video_pll1_out", };
+
+static const char * const imx8mp_enet_phy_ref_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m",
+							"sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out",
+							"video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out",
+						"sys_pll1_400m", "audio_pll2_out", "sys_pll3_out",
+						"sys_pll2_250m", "video_pll1_out", };
+
+static const char * const imx8mp_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll2_333m",
+						"sys_pll2_500m", "audio_pll2_out", "sys_pll1_266m",
+						"sys_pll3_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+						  "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+						  "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+						  "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+						  "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+						"audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+						"audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+						"audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+						"audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+						 "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+						 "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_uart2_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+						 "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+						 "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_uart3_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+						 "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+						 "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_uart4_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+						 "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+						 "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_usb_core_ref_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m",
+							"sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+							"clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_usb_phy_ref_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m",
+						       "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+						       "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_gic_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+					       "sys_pll2_100m", "sys_pll1_800m",
+					       "sys_pll2_500m", "clk_ext4", "audio_pll2_out" };
+
+static const char * const imx8mp_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+						  "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+						  "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+						  "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+						  "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+						"sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+						"sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+						"sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+						"sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+						"sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+						"sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+						"sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+						"sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+						"audio_pll1_out", "clk_ext1" };
+
+static const char * const imx8mp_gpt2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+						"audio_pll1_out", "clk_ext2" };
+
+static const char * const imx8mp_gpt3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+						"audio_pll1_out", "clk_ext3" };
+
+static const char * const imx8mp_gpt4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+						"audio_pll1_out", "clk_ext1" };
+
+static const char * const imx8mp_gpt5_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+						"audio_pll1_out", "clk_ext2" };
+
+static const char * const imx8mp_gpt6_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+						"audio_pll1_out", "clk_ext3" };
+
+static const char * const imx8mp_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m",
+						"vpu_pll_out", "sys_pll2_125m", "sys_pll3_out",
+						"sys_pll1_80m", "sys_pll2_166m" };
+
+static const char * const imx8mp_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out",
+						 "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m",
+						 "sys_pll2_500m", "sys_pll1_100m" };
+
+static const char * const imx8mp_ipp_do_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_133m",
+							"sys_pll1_200m", "audio_pll2_out", "sys_pll2_500m",
+							"vpu_pll_out", "sys_pll1_80m" };
+
+static const char * const imx8mp_ipp_do_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m",
+							"sys_pll1_166m", "sys_pll3_out", "audio_pll1_out",
+							"video_pll1_out", "osc_32k" };
+
+static const char * const imx8mp_hdmi_fdcc_tst_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+							 "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+							 "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mp_hdmi_27m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+						    "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+						    "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_hdmi_ref_266m_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll3_out",
+							 "sys_pll2_333m", "sys_pll1_266m", "sys_pll2_200m",
+							 "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+						  "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+						  "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_media_cam1_pix_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+							  "sys_pll1_800m", "sys_pll2_1000m",
+							  "sys_pll3_out", "audio_pll2_out",
+							  "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_phy1_ref_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+							       "sys_pll1_800m", "sys_pll2_1000m",
+							       "clk_ext2", "audio_pll2_out",
+							       "video_pll1_out", };
+
+static const char * const imx8mp_media_disp1_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out",
+							   "audio_pll1_out", "sys_pll1_800m",
+							   "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
+
+static const char * const imx8mp_media_cam2_pix_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+							  "sys_pll1_800m", "sys_pll2_1000m",
+							  "sys_pll3_out", "audio_pll2_out",
+							  "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_phy2_ref_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+							       "sys_pll1_800m", "sys_pll2_1000m",
+							       "clk_ext2", "audio_pll2_out",
+							       "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+							       "sys_pll1_800m", "sys_pll2_1000m",
+							       "sys_pll3_out", "clk_ext3",
+							       "audio_pll2_out", };
+
+static const char * const imx8mp_pcie2_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m",
+						      "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_500m",
+						      "sys_pll2_333m", "sys_pll3_out", };
+
+static const char * const imx8mp_pcie2_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m",
+						     "clk_ext1", "clk_ext2", "clk_ext3",
+						     "clk_ext4", "sys_pll1_400m", };
+
+static const char * const imx8mp_media_mipi_test_byte_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m",
+								"sys_pll3_out", "sys_pll2_100m",
+								"sys_pll1_80m", "sys_pll1_160m",
+								"sys_pll1_200m", };
+
+static const char * const imx8mp_ecspi3_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+						  "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+						  "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+					       "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+					       "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_vpu_vc8000e_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+						       "sys_pll2_1000m", "audio_pll2_out", "sys_pll2_125m",
+						       "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+						"clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+
+static struct clk_hw **hws;
+static struct clk_hw_onecell_data *clk_hw_data;
+
+static const int uart_clk_ids[] = {
+	IMX8MP_CLK_UART1_ROOT,
+	IMX8MP_CLK_UART2_ROOT,
+	IMX8MP_CLK_UART3_ROOT,
+	IMX8MP_CLK_UART4_ROOT,
+};
+static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
+
+static int imx8mp_clocks_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	void __iomem *anatop_base, *ccm_base;
+	int i;
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
+	anatop_base = of_iomap(np, 0);
+	if (WARN_ON(!anatop_base))
+		return -ENOMEM;
+
+	np = dev->of_node;
+	ccm_base = devm_platform_ioremap_resource(pdev, 0);
+	if (WARN_ON(IS_ERR(ccm_base))) {
+		iounmap(anatop_base);
+		return PTR_ERR(ccm_base);
+	}
+
+	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL);
+	if (WARN_ON(!clk_hw_data)) {
+		iounmap(anatop_base);
+		return -ENOMEM;
+	}
+
+	clk_hw_data->num = IMX8MP_CLK_END;
+	hws = clk_hw_data->hws;
+
+	hws[IMX8MP_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+	hws[IMX8MP_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+	hws[IMX8MP_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+	hws[IMX8MP_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+	hws[IMX8MP_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+	hws[IMX8MP_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+	hws[IMX8MP_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
+
+	hws[IMX8MP_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", anatop_base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", anatop_base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", anatop_base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", anatop_base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", anatop_base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", anatop_base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", anatop_base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_SYS_PLL1_REF_SEL] = imx_clk_hw_mux("sys_pll1_ref_sel", anatop_base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_SYS_PLL2_REF_SEL] = imx_clk_hw_mux("sys_pll2_ref_sel", anatop_base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MP_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", anatop_base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+	hws[IMX8MP_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", anatop_base, &imx_1443x_pll);
+	hws[IMX8MP_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", anatop_base + 0x14, &imx_1443x_pll);
+	hws[IMX8MP_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", anatop_base + 0x28, &imx_1443x_pll);
+	hws[IMX8MP_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", anatop_base + 0x50, &imx_1443x_dram_pll);
+	hws[IMX8MP_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", anatop_base + 0x64, &imx_1416x_pll);
+	hws[IMX8MP_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", anatop_base + 0x74, &imx_1416x_pll);
+	hws[IMX8MP_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", anatop_base + 0x84, &imx_1416x_pll);
+	hws[IMX8MP_SYS_PLL1] = imx_clk_hw_pll14xx("sys_pll1", "sys_pll1_ref_sel", anatop_base + 0x94, &imx_1416x_pll);
+	hws[IMX8MP_SYS_PLL2] = imx_clk_hw_pll14xx("sys_pll2", "sys_pll2_ref_sel", anatop_base + 0x104, &imx_1416x_pll);
+	hws[IMX8MP_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", anatop_base + 0x114, &imx_1416x_pll);
+
+	hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+
+	hws[IMX8MP_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", anatop_base, 13);
+	hws[IMX8MP_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", anatop_base + 0x14, 13);
+	hws[IMX8MP_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", anatop_base + 0x28, 13);
+	hws[IMX8MP_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", anatop_base + 0x50, 13);
+	hws[IMX8MP_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", anatop_base + 0x64, 11);
+	hws[IMX8MP_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", anatop_base + 0x74, 11);
+	hws[IMX8MP_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", anatop_base + 0x84, 11);
+	hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11);
+	hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11);
+	hws[IMX8MP_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", anatop_base + 0x114, 11);
+
+	hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+	hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+	hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+	hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+	hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+	hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+	hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+	hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+	hws[IMX8MP_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+
+	hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+	hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+	hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+	hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+	hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+	hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+	hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+	hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+	hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+
+	hws[IMX8MP_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", ccm_base + 0x8000, 24, 3, imx8mp_a53_sels, ARRAY_SIZE(imx8mp_a53_sels));
+	hws[IMX8MP_CLK_M7_SRC] = imx_clk_hw_mux2("arm_m7_src", ccm_base + 0x8080, 24, 3, imx8mp_m7_sels, ARRAY_SIZE(imx8mp_m7_sels));
+	hws[IMX8MP_CLK_ML_SRC] = imx_clk_hw_mux2("ml_src", ccm_base + 0x8100, 24, 3, imx8mp_ml_sels, ARRAY_SIZE(imx8mp_ml_sels));
+	hws[IMX8MP_CLK_GPU3D_CORE_SRC] = imx_clk_hw_mux2("gpu3d_core_src", ccm_base + 0x8180, 24, 3,  imx8mp_gpu3d_core_sels, ARRAY_SIZE(imx8mp_gpu3d_core_sels));
+	hws[IMX8MP_CLK_GPU3D_SHADER_SRC] = imx_clk_hw_mux2("gpu3d_shader_src", ccm_base + 0x8200, 24, 3, imx8mp_gpu3d_shader_sels, ARRAY_SIZE(imx8mp_gpu3d_shader_sels));
+	hws[IMX8MP_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", ccm_base + 0x8280, 24, 3, imx8mp_gpu2d_sels, ARRAY_SIZE(imx8mp_gpu2d_sels));
+	hws[IMX8MP_CLK_AUDIO_AXI_SRC] = imx_clk_hw_mux2("audio_axi_src", ccm_base + 0x8300, 24, 3, imx8mp_audio_axi_sels, ARRAY_SIZE(imx8mp_audio_axi_sels));
+	hws[IMX8MP_CLK_HSIO_AXI_SRC] = imx_clk_hw_mux2("hsio_axi_src", ccm_base + 0x8380, 24, 3, imx8mp_hsio_axi_sels, ARRAY_SIZE(imx8mp_hsio_axi_sels));
+	hws[IMX8MP_CLK_MEDIA_ISP_SRC] = imx_clk_hw_mux2("media_isp_src", ccm_base + 0x8400, 24, 3, imx8mp_media_isp_sels, ARRAY_SIZE(imx8mp_media_isp_sels));
+	hws[IMX8MP_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", ccm_base + 0x8000, 28);
+	hws[IMX8MP_CLK_M4_CG] = imx_clk_hw_gate3("arm_m7_cg", "arm_m7_src", ccm_base + 0x8080, 28);
+	hws[IMX8MP_CLK_ML_CG] = imx_clk_hw_gate3("ml_cg", "ml_src", ccm_base + 0x8100, 28);
+	hws[IMX8MP_CLK_GPU3D_CORE_CG] = imx_clk_hw_gate3("gpu3d_core_cg", "gpu3d_core_src", ccm_base + 0x8180, 28);
+	hws[IMX8MP_CLK_GPU3D_SHADER_CG] = imx_clk_hw_gate3("gpu3d_shader_cg", "gpu3d_shader_src", ccm_base + 0x8200, 28);
+	hws[IMX8MP_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", ccm_base + 0x8280, 28);
+	hws[IMX8MP_CLK_AUDIO_AXI_CG] = imx_clk_hw_gate3("audio_axi_cg", "audio_axi_src", ccm_base + 0x8300, 28);
+	hws[IMX8MP_CLK_HSIO_AXI_CG] = imx_clk_hw_gate3("hsio_axi_cg", "hsio_axi_src", ccm_base + 0x8380, 28);
+	hws[IMX8MP_CLK_MEDIA_ISP_CG] = imx_clk_hw_gate3("media_isp_cg", "media_isp_src", ccm_base + 0x8400, 28);
+	hws[IMX8MP_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", ccm_base + 0x8000, 0, 3);
+	hws[IMX8MP_CLK_M7_DIV] = imx_clk_hw_divider2("arm_m7_div", "arm_m7_cg", ccm_base + 0x8080, 0, 3);
+	hws[IMX8MP_CLK_ML_DIV] = imx_clk_hw_divider2("ml_div", "ml_cg", ccm_base + 0x8100, 0, 3);
+	hws[IMX8MP_CLK_GPU3D_CORE_DIV] = imx_clk_hw_divider2("gpu3d_core_div", "gpu3d_core_cg", ccm_base + 0x8180, 0, 3);
+	hws[IMX8MP_CLK_GPU3D_SHADER_DIV] = imx_clk_hw_divider2("gpu3d_shader_div", "gpu3d_shader_cg", ccm_base + 0x8200, 0, 3);
+	hws[IMX8MP_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", ccm_base + 0x8280, 0, 3);
+	hws[IMX8MP_CLK_AUDIO_AXI_DIV] = imx_clk_hw_divider2("audio_axi_div", "audio_axi_cg", ccm_base + 0x8300, 0, 3);
+	hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3);
+	hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3);
+
+	hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
+	hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
+	hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
+	hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
+	hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
+	hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
+	hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
+	hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_apb_sels, ccm_base + 0x8b80);
+	hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
+	hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
+	hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
+	hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
+	hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
+	hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
+
+	hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
+	hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
+	hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
+
+	hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
+	hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1);
+
+	hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
+	hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
+	hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
+	hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
+	hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
+	hws[IMX8MP_CLK_CAN2] = imx8m_clk_hw_composite("can2", imx8mp_can2_sels, ccm_base + 0xa280);
+	hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite("memrepair", imx8mp_memrepair_sels, ccm_base + 0xa300);
+	hws[IMX8MP_CLK_PCIE_PHY] = imx8m_clk_hw_composite("pcie_phy", imx8mp_pcie_phy_sels, ccm_base + 0xa380);
+	hws[IMX8MP_CLK_PCIE_AUX] = imx8m_clk_hw_composite("pcie_aux", imx8mp_pcie_aux_sels, ccm_base + 0xa400);
+	hws[IMX8MP_CLK_I2C5] = imx8m_clk_hw_composite("i2c5", imx8mp_i2c5_sels, ccm_base + 0xa480);
+	hws[IMX8MP_CLK_I2C6] = imx8m_clk_hw_composite("i2c6", imx8mp_i2c6_sels, ccm_base + 0xa500);
+	hws[IMX8MP_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mp_sai1_sels, ccm_base + 0xa580);
+	hws[IMX8MP_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mp_sai2_sels, ccm_base + 0xa600);
+	hws[IMX8MP_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mp_sai3_sels, ccm_base + 0xa680);
+	hws[IMX8MP_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mp_sai4_sels, ccm_base + 0xa700);
+	hws[IMX8MP_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mp_sai5_sels, ccm_base + 0xa780);
+	hws[IMX8MP_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mp_sai6_sels, ccm_base + 0xa800);
+	hws[IMX8MP_CLK_ENET_QOS] = imx8m_clk_hw_composite("enet_qos", imx8mp_enet_qos_sels, ccm_base + 0xa880);
+	hws[IMX8MP_CLK_ENET_QOS_TIMER] = imx8m_clk_hw_composite("enet_qos_timer", imx8mp_enet_qos_timer_sels, ccm_base + 0xa900);
+	hws[IMX8MP_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mp_enet_ref_sels, ccm_base + 0xa980);
+	hws[IMX8MP_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mp_enet_timer_sels, ccm_base + 0xaa00);
+	hws[IMX8MP_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy_ref", imx8mp_enet_phy_ref_sels, ccm_base + 0xaa80);
+	hws[IMX8MP_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mp_nand_sels, ccm_base + 0xab00);
+	hws[IMX8MP_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mp_qspi_sels, ccm_base + 0xab80);
+	hws[IMX8MP_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mp_usdhc1_sels, ccm_base + 0xac00);
+	hws[IMX8MP_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mp_usdhc2_sels, ccm_base + 0xac80);
+	hws[IMX8MP_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mp_i2c1_sels, ccm_base + 0xad00);
+	hws[IMX8MP_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mp_i2c2_sels, ccm_base + 0xad80);
+	hws[IMX8MP_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mp_i2c3_sels, ccm_base + 0xae00);
+	hws[IMX8MP_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mp_i2c4_sels, ccm_base + 0xae80);
+
+	hws[IMX8MP_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mp_uart1_sels, ccm_base + 0xaf00);
+	hws[IMX8MP_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mp_uart2_sels, ccm_base + 0xaf80);
+	hws[IMX8MP_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mp_uart3_sels, ccm_base + 0xb000);
+	hws[IMX8MP_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mp_uart4_sels, ccm_base + 0xb080);
+	hws[IMX8MP_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mp_usb_core_ref_sels, ccm_base + 0xb100);
+	hws[IMX8MP_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mp_usb_phy_ref_sels, ccm_base + 0xb180);
+	hws[IMX8MP_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mp_gic_sels, ccm_base + 0xb200);
+	hws[IMX8MP_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mp_ecspi1_sels, ccm_base + 0xb280);
+	hws[IMX8MP_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mp_ecspi2_sels, ccm_base + 0xb300);
+	hws[IMX8MP_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mp_pwm1_sels, ccm_base + 0xb380);
+	hws[IMX8MP_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mp_pwm2_sels, ccm_base + 0xb400);
+	hws[IMX8MP_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mp_pwm3_sels, ccm_base + 0xb480);
+	hws[IMX8MP_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mp_pwm4_sels, ccm_base + 0xb500);
+
+	hws[IMX8MP_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mp_gpt1_sels, ccm_base + 0xb580);
+	hws[IMX8MP_CLK_GPT2] = imx8m_clk_hw_composite("gpt2", imx8mp_gpt2_sels, ccm_base + 0xb600);
+	hws[IMX8MP_CLK_GPT3] = imx8m_clk_hw_composite("gpt3", imx8mp_gpt3_sels, ccm_base + 0xb680);
+	hws[IMX8MP_CLK_GPT4] = imx8m_clk_hw_composite("gpt4", imx8mp_gpt4_sels, ccm_base + 0xb700);
+	hws[IMX8MP_CLK_GPT5] = imx8m_clk_hw_composite("gpt5", imx8mp_gpt5_sels, ccm_base + 0xb780);
+	hws[IMX8MP_CLK_GPT6] = imx8m_clk_hw_composite("gpt6", imx8mp_gpt6_sels, ccm_base + 0xb800);
+	hws[IMX8MP_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mp_wdog_sels, ccm_base + 0xb900);
+	hws[IMX8MP_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mp_wrclk_sels, ccm_base + 0xb980);
+	hws[IMX8MP_CLK_IPP_DO_CLKO1] = imx8m_clk_hw_composite("ipp_do_clko1", imx8mp_ipp_do_clko1_sels, ccm_base + 0xba00);
+	hws[IMX8MP_CLK_IPP_DO_CLKO2] = imx8m_clk_hw_composite("ipp_do_clko2", imx8mp_ipp_do_clko2_sels, ccm_base + 0xba80);
+	hws[IMX8MP_CLK_HDMI_FDCC_TST] = imx8m_clk_hw_composite("hdmi_fdcc_tst", imx8mp_hdmi_fdcc_tst_sels, ccm_base + 0xbb00);
+	hws[IMX8MP_CLK_HDMI_27M] = imx8m_clk_hw_composite("hdmi_27m", imx8mp_hdmi_27m_sels, ccm_base + 0xbb80);
+	hws[IMX8MP_CLK_HDMI_REF_266M] = imx8m_clk_hw_composite("hdmi_ref_266m", imx8mp_hdmi_ref_266m_sels, ccm_base + 0xbc00);
+	hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
+	hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
+	hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80);
+	hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp1_pix_sels, ccm_base + 0xbe00);
+	hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80);
+	hws[IMX8MP_CLK_MEDIA_MIPI_PHY2_REF] = imx8m_clk_hw_composite("media_mipi_phy2_ref", imx8mp_media_mipi_phy2_ref_sels, ccm_base + 0xbf00);
+	hws[IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC] = imx8m_clk_hw_composite("media_mipi_csi2_esc", imx8mp_media_mipi_csi2_esc_sels, ccm_base + 0xbf80);
+	hws[IMX8MP_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mp_pcie2_ctrl_sels, ccm_base + 0xc000);
+	hws[IMX8MP_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mp_pcie2_phy_sels, ccm_base + 0xc080);
+	hws[IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE] = imx8m_clk_hw_composite("media_mipi_test_byte", imx8mp_media_mipi_test_byte_sels, ccm_base + 0xc100);
+	hws[IMX8MP_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mp_ecspi3_sels, ccm_base + 0xc180);
+	hws[IMX8MP_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mp_pdm_sels, ccm_base + 0xc200);
+	hws[IMX8MP_CLK_VPU_VC8000E] = imx8m_clk_hw_composite("vpu_vc8000e", imx8mp_vpu_vc8000e_sels, ccm_base + 0xc280);
+	hws[IMX8MP_CLK_SAI7] = imx8m_clk_hw_composite("sai7", imx8mp_sai7_sels, ccm_base + 0xc300);
+
+	hws[IMX8MP_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+	hws[IMX8MP_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", ccm_base + 0x9800, 24, 1, imx8mp_dram_core_sels, ARRAY_SIZE(imx8mp_dram_core_sels), CLK_IS_CRITICAL);
+
+	hws[IMX8MP_CLK_DRAM1_ROOT] = imx_clk_hw_gate4_flags("dram1_root_clk", "dram_core_clk", ccm_base + 0x4050, 0, CLK_IS_CRITICAL);
+	hws[IMX8MP_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", ccm_base + 0x4070, 0);
+	hws[IMX8MP_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", ccm_base + 0x4080, 0);
+	hws[IMX8MP_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", ccm_base + 0x4090, 0);
+	hws[IMX8MP_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", ccm_base + 0x40a0, 0);
+	hws[IMX8MP_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", ccm_base + 0x40b0, 0);
+	hws[IMX8MP_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", ccm_base + 0x40c0, 0);
+	hws[IMX8MP_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", ccm_base + 0x40d0, 0);
+	hws[IMX8MP_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", ccm_base + 0x40e0, 0);
+	hws[IMX8MP_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", ccm_base + 0x40f0, 0);
+	hws[IMX8MP_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", ccm_base + 0x4100, 0);
+	hws[IMX8MP_CLK_GPT2_ROOT] = imx_clk_hw_gate4("gpt2_root_clk", "gpt2", ccm_base + 0x4110, 0);
+	hws[IMX8MP_CLK_GPT3_ROOT] = imx_clk_hw_gate4("gpt3_root_clk", "gpt3", ccm_base + 0x4120, 0);
+	hws[IMX8MP_CLK_GPT4_ROOT] = imx_clk_hw_gate4("gpt4_root_clk", "gpt4", ccm_base + 0x4130, 0);
+	hws[IMX8MP_CLK_GPT5_ROOT] = imx_clk_hw_gate4("gpt5_root_clk", "gpt5", ccm_base + 0x4140, 0);
+	hws[IMX8MP_CLK_GPT6_ROOT] = imx_clk_hw_gate4("gpt6_root_clk", "gpt6", ccm_base + 0x4150, 0);
+	hws[IMX8MP_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", ccm_base + 0x4170, 0);
+	hws[IMX8MP_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", ccm_base + 0x4180, 0);
+	hws[IMX8MP_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", ccm_base + 0x4190, 0);
+	hws[IMX8MP_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", ccm_base + 0x41a0, 0);
+	hws[IMX8MP_CLK_PCIE_ROOT] = imx_clk_hw_gate4("pcie_root_clk", "pcie_aux", ccm_base + 0x4250, 0);
+	hws[IMX8MP_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", ccm_base + 0x4280, 0);
+	hws[IMX8MP_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", ccm_base + 0x4290, 0);
+	hws[IMX8MP_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", ccm_base + 0x42a0, 0);
+	hws[IMX8MP_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", ccm_base + 0x42b0, 0);
+	hws[IMX8MP_CLK_QOS_ROOT] = imx_clk_hw_gate4("qos_root_clk", "ipg_root", ccm_base + 0x42c0, 0);
+	hws[IMX8MP_CLK_QOS_ENET_ROOT] = imx_clk_hw_gate4("qos_enet_root_clk", "ipg_root", ccm_base + 0x42e0, 0);
+	hws[IMX8MP_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", ccm_base + 0x42f0, 0);
+	hws[IMX8MP_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", ccm_base + 0x4300, 0, &share_count_nand);
+	hws[IMX8MP_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", ccm_base + 0x4300, 0, &share_count_nand);
+	hws[IMX8MP_CLK_I2C5_ROOT] = imx_clk_hw_gate2("i2c5_root_clk", "i2c5", ccm_base + 0x4330, 0);
+	hws[IMX8MP_CLK_I2C6_ROOT] = imx_clk_hw_gate2("i2c6_root_clk", "i2c6", ccm_base + 0x4340, 0);
+	hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0);
+	hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0);
+	hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
+	hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "enet_axi", ccm_base + 0x43b0, 0);
+	hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
+	hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0);
+	hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0);
+	hws[IMX8MP_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", ccm_base + 0x4470, 0);
+	hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0);
+	hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
+	hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
+	hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
+	hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0);
+	hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
+	hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
+	hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
+	hws[IMX8MP_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", ccm_base + 0x4530, 0);
+	hws[IMX8MP_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", ccm_base + 0x4540, 0);
+	hws[IMX8MP_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", ccm_base + 0x4550, 0);
+	hws[IMX8MP_CLK_VPU_G1_ROOT] = imx_clk_hw_gate4("vpu_g1_root_clk", "vpu_g1", ccm_base + 0x4560, 0);
+	hws[IMX8MP_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", ccm_base + 0x4570, 0);
+	hws[IMX8MP_CLK_VPU_VC8KE_ROOT] = imx_clk_hw_gate4("vpu_vc8ke_root_clk", "vpu_vc8000e", ccm_base + 0x4590, 0);
+	hws[IMX8MP_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", ccm_base + 0x45a0, 0);
+	hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_div", ccm_base + 0x45b0, 0);
+	hws[IMX8MP_CLK_HSIO_ROOT] = imx_clk_hw_gate4("hsio_root_clk", "ipg_root", ccm_base + 0x45c0, 0);
+	hws[IMX8MP_CLK_MEDIA_APB_ROOT] = imx_clk_hw_gate2_shared2("media_apb_root_clk", "media_apb", ccm_base + 0x45d0, 0, &share_count_media);
+	hws[IMX8MP_CLK_MEDIA_AXI_ROOT] = imx_clk_hw_gate2_shared2("media_axi_root_clk", "media_axi", ccm_base + 0x45d0, 0, &share_count_media);
+	hws[IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam1_pix_root_clk", "media_cam1_pix", ccm_base + 0x45d0, 0, &share_count_media);
+	hws[IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam2_pix_root_clk", "media_cam2_pix", ccm_base + 0x45d0, 0, &share_count_media);
+	hws[IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp1_pix_root_clk", "media_disp1_pix", ccm_base + 0x45d0, 0, &share_count_media);
+	hws[IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp2_pix_root_clk", "media_disp2_pix", ccm_base + 0x45d0, 0, &share_count_media);
+	hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp_div", ccm_base + 0x45d0, 0, &share_count_media);
+
+	hws[IMX8MP_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", ccm_base + 0x45e0, 0);
+	hws[IMX8MP_CLK_HDMI_ROOT] = imx_clk_hw_gate4("hdmi_root_clk", "hdmi_axi", ccm_base + 0x45f0, 0);
+	hws[IMX8MP_CLK_TSENSOR_ROOT] = imx_clk_hw_gate4("tsensor_root_clk", "ipg_root", ccm_base + 0x4620, 0);
+	hws[IMX8MP_CLK_VPU_ROOT] = imx_clk_hw_gate4("vpu_root_clk", "vpu_bus", ccm_base + 0x4630, 0);
+	hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "ipg_root", ccm_base + 0x4650, 0);
+
+	hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+					     hws[IMX8MP_CLK_A53_DIV]->clk,
+					     hws[IMX8MP_CLK_A53_SRC]->clk,
+					     hws[IMX8MP_ARM_PLL_OUT]->clk,
+					     hws[IMX8MP_SYS_PLL1_800M]->clk);
+
+	imx_check_clk_hws(hws, IMX8MP_CLK_END);
+
+	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+
+	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+		int index = uart_clk_ids[i];
+
+		uart_clks[i] = &hws[index]->clk;
+	}
+
+	imx_register_uart_clocks(uart_clks);
+
+	return 0;
+}
+
+static const struct of_device_id imx8mp_clk_of_match[] = {
+	{ .compatible = "fsl,imx8mp-ccm" },
+	{ /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_clk_of_match);
+
+static struct platform_driver imx8mp_clk_driver = {
+	.probe = imx8mp_clocks_probe,
+	.driver = {
+		.name = "imx8mp-ccm",
+		/*
+		 * Disable bind attributes: clocks are not removed and
+		 * reloading the driver will crash or break devices.
+		 */
+		.suppress_bind_attrs = true,
+		.of_match_table = of_match_ptr(imx8mp_clk_of_match),
+	},
+};
+module_platform_driver(imx8mp_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 5f10a60..4c0edca 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/types.h>
+#include <linux/slab.h>
 #include <linux/platform_device.h>
 
 #include "clk.h"
@@ -24,8 +25,6 @@ static u32 share_count_sai6;
 static u32 share_count_dcss;
 static u32 share_count_nand;
 
-static struct clk *clks[IMX8MQ_CLK_END];
-
 static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
 static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
 static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
@@ -269,124 +268,133 @@ static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "os
 static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m",
 					  "sys3_pll_out", "audio_pll1_out", "video_pll1_out", "ckil", };
 
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
 
-static struct clk ** const uart_clks[] = {
-	&clks[IMX8MQ_CLK_UART1_ROOT],
-	&clks[IMX8MQ_CLK_UART2_ROOT],
-	&clks[IMX8MQ_CLK_UART3_ROOT],
-	&clks[IMX8MQ_CLK_UART4_ROOT],
-	NULL
+static const int uart_clk_ids[] = {
+	IMX8MQ_CLK_UART1_ROOT,
+	IMX8MQ_CLK_UART2_ROOT,
+	IMX8MQ_CLK_UART3_ROOT,
+	IMX8MQ_CLK_UART4_ROOT,
 };
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
 
 static int imx8mq_clocks_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	void __iomem *base;
-	int err;
+	int err, i;
 
-	clks[IMX8MQ_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
-	clks[IMX8MQ_CLK_32K] = of_clk_get_by_name(np, "ckil");
-	clks[IMX8MQ_CLK_25M] = of_clk_get_by_name(np, "osc_25m");
-	clks[IMX8MQ_CLK_27M] = of_clk_get_by_name(np, "osc_27m");
-	clks[IMX8MQ_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
-	clks[IMX8MQ_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
-	clks[IMX8MQ_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
-	clks[IMX8MQ_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+					  IMX8MQ_CLK_END), GFP_KERNEL);
+	if (WARN_ON(!clk_hw_data))
+		return -ENOMEM;
+
+	clk_hw_data->num = IMX8MQ_CLK_END;
+	hws = clk_hw_data->hws;
+
+	hws[IMX8MQ_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+	hws[IMX8MQ_CLK_32K] = imx_obtain_fixed_clk_hw(np, "ckil");
+	hws[IMX8MQ_CLK_25M] = imx_obtain_fixed_clk_hw(np, "osc_25m");
+	hws[IMX8MQ_CLK_27M] = imx_obtain_fixed_clk_hw(np, "osc_27m");
+	hws[IMX8MQ_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+	hws[IMX8MQ_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+	hws[IMX8MQ_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+	hws[IMX8MQ_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
 
 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
 	base = of_iomap(np, 0);
 	if (WARN_ON(!base))
 		return -ENOMEM;
 
-	clks[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MQ_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x20, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MQ_SYS3_PLL1_REF_SEL]	= imx_clk_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MQ_DRAM_PLL1_REF_SEL]	= imx_clk_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-	clks[IMX8MQ_VIDEO2_PLL1_REF_SEL] = imx_clk_mux("video2_pll1_ref_sel", base + 0x54, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x20, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_SYS3_PLL1_REF_SEL]	= imx_clk_hw_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_DRAM_PLL1_REF_SEL]	= imx_clk_hw_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+	hws[IMX8MQ_VIDEO2_PLL1_REF_SEL] = imx_clk_hw_mux("video2_pll1_ref_sel", base + 0x54, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
 
-	clks[IMX8MQ_ARM_PLL_REF_DIV]	= imx_clk_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
-	clks[IMX8MQ_GPU_PLL_REF_DIV]	= imx_clk_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
-	clks[IMX8MQ_VPU_PLL_REF_DIV]	= imx_clk_divider("vpu_pll_ref_div", "vpu_pll_ref_sel", base + 0x20, 5, 6);
-	clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
-	clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
-	clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
+	hws[IMX8MQ_ARM_PLL_REF_DIV]	= imx_clk_hw_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
+	hws[IMX8MQ_GPU_PLL_REF_DIV]	= imx_clk_hw_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
+	hws[IMX8MQ_VPU_PLL_REF_DIV]	= imx_clk_hw_divider("vpu_pll_ref_div", "vpu_pll_ref_sel", base + 0x20, 5, 6);
+	hws[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_hw_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
+	hws[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_hw_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
+	hws[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_hw_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
 
-	clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
-	clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
-	clks[IMX8MQ_VPU_PLL] = imx_clk_frac_pll("vpu_pll", "vpu_pll_ref_div", base + 0x20);
-	clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
-	clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
-	clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
+	hws[IMX8MQ_ARM_PLL] = imx_clk_hw_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
+	hws[IMX8MQ_GPU_PLL] = imx_clk_hw_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
+	hws[IMX8MQ_VPU_PLL] = imx_clk_hw_frac_pll("vpu_pll", "vpu_pll_ref_div", base + 0x20);
+	hws[IMX8MQ_AUDIO_PLL1] = imx_clk_hw_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
+	hws[IMX8MQ_AUDIO_PLL2] = imx_clk_hw_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
+	hws[IMX8MQ_VIDEO_PLL1] = imx_clk_hw_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
 
 	/* PLL bypass out */
-	clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
-	clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
-	clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
-	clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
-	clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
-	clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
+	hws[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+	hws[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_hw_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
+	hws[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_hw_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
+	hws[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
+	hws[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
+	hws[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
 
 	/* PLL OUT GATE */
-	clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
-	clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
-	clks[IMX8MQ_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x20, 21);
-	clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
-	clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
-	clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
+	hws[IMX8MQ_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
+	hws[IMX8MQ_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
+	hws[IMX8MQ_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x20, 21);
+	hws[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
+	hws[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
+	hws[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
 
-	clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_fixed("sys1_pll_out", 800000000);
-	clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_fixed("sys2_pll_out", 1000000000);
-	clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 0, base + 0x48, CLK_IS_CRITICAL);
-	clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL);
-	clks[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_sccg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
+	hws[IMX8MQ_SYS1_PLL_OUT] = imx_clk_hw_fixed("sys1_pll_out", 800000000);
+	hws[IMX8MQ_SYS2_PLL_OUT] = imx_clk_hw_fixed("sys2_pll_out", 1000000000);
+	hws[IMX8MQ_SYS3_PLL_OUT] = imx_clk_hw_sscg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 0, base + 0x48, CLK_IS_CRITICAL);
+	hws[IMX8MQ_DRAM_PLL_OUT] = imx_clk_hw_sscg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+	hws[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_hw_sscg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
 
 	/* SYS PLL1 fixed output */
-	clks[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
-	clks[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
-	clks[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
-	clks[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
-	clks[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
-	clks[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
-	clks[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
-	clks[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
-	clks[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
+	hws[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_hw_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
+	hws[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_hw_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
+	hws[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_hw_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
+	hws[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_hw_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
+	hws[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_hw_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
+	hws[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_hw_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
+	hws[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_hw_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
+	hws[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_hw_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
+	hws[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_hw_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
 
-	clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
-	clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
-	clks[IMX8MQ_SYS1_PLL_100M] = imx_clk_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
-	clks[IMX8MQ_SYS1_PLL_133M] = imx_clk_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
-	clks[IMX8MQ_SYS1_PLL_160M] = imx_clk_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
-	clks[IMX8MQ_SYS1_PLL_200M] = imx_clk_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
-	clks[IMX8MQ_SYS1_PLL_266M] = imx_clk_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
-	clks[IMX8MQ_SYS1_PLL_400M] = imx_clk_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
-	clks[IMX8MQ_SYS1_PLL_800M] = imx_clk_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
+	hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
+	hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
+	hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
+	hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
+	hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
+	hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
+	hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
+	hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
+	hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
 
 	/* SYS PLL2 fixed output */
-	clks[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
-	clks[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
-	clks[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
-	clks[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
-	clks[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
-	clks[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
-	clks[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
-	clks[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
-	clks[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
+	hws[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_hw_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
+	hws[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_hw_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
+	hws[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_hw_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
+	hws[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_hw_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
+	hws[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_hw_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
+	hws[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_hw_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
+	hws[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_hw_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
+	hws[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_hw_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
+	hws[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_hw_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
 
-	clks[IMX8MQ_SYS2_PLL_50M] = imx_clk_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
-	clks[IMX8MQ_SYS2_PLL_100M] = imx_clk_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
-	clks[IMX8MQ_SYS2_PLL_125M] = imx_clk_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
-	clks[IMX8MQ_SYS2_PLL_166M] = imx_clk_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
-	clks[IMX8MQ_SYS2_PLL_200M] = imx_clk_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
-	clks[IMX8MQ_SYS2_PLL_250M] = imx_clk_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
-	clks[IMX8MQ_SYS2_PLL_333M] = imx_clk_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
-	clks[IMX8MQ_SYS2_PLL_500M] = imx_clk_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
-	clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
+	hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
+	hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
+	hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
+	hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
+	hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
+	hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
+	hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
+	hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
+	hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
 
 	np = dev->of_node;
 	base = devm_platform_ioremap_resource(pdev, 0);
@@ -394,206 +402,213 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
 		return PTR_ERR(base);
 
 	/* CORE */
-	clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
-	clks[IMX8MQ_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
-	clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
-	clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3,  imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
-	clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels,  ARRAY_SIZE(imx8mq_gpu_shader_sels));
+	hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
+	hws[IMX8MQ_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
+	hws[IMX8MQ_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
+	hws[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3,  imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
+	hws[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels,  ARRAY_SIZE(imx8mq_gpu_shader_sels));
 
-	clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
-	clks[IMX8MQ_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
-	clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
-	clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
-	clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
+	hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
+	hws[IMX8MQ_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+	hws[IMX8MQ_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+	hws[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
+	hws[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
 
-	clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
-	clks[IMX8MQ_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
-	clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
-	clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
-	clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+	hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+	hws[IMX8MQ_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+	hws[IMX8MQ_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+	hws[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
+	hws[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
 
 	/* BUS */
-	clks[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
-	clks[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
-	clks[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
-	clks[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
-	clks[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
-	clks[IMX8MQ_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
-	clks[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
-	clks[IMX8MQ_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
-	clks[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
-	clks[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
-	clks[IMX8MQ_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
-	clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
+	hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
+	hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
+	hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
+	hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
+	hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
+	hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
+	hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
+	hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
+	hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
+	hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
+	hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
+	hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
 
 	/* AHB */
 	/* AHB clock is used by the AHB bus therefore marked as critical */
-	clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
-	clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
+	hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
+	hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
 
 	/* IPG */
-	clks[IMX8MQ_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
-	clks[IMX8MQ_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+	hws[IMX8MQ_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+	hws[IMX8MQ_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+	/*
+	 * DRAM clocks are manipulated from TF-A outside clock framework.
+	 * Mark with GET_RATE_NOCACHE to always read div value from hardware
+	 */
+	hws[IMX8MQ_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
+	hws[IMX8MQ_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+	hws[IMX8MQ_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mq_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
 
 	/* IP */
-	clks[IMX8MQ_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
+	hws[IMX8MQ_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
+	hws[IMX8MQ_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mq_vpu_g2_sels, base + 0xa180);
+	hws[IMX8MQ_CLK_DISP_DTRC] = imx8m_clk_hw_composite("disp_dtrc", imx8mq_disp_dtrc_sels, base + 0xa200);
+	hws[IMX8MQ_CLK_DISP_DC8000] = imx8m_clk_hw_composite("disp_dc8000", imx8mq_disp_dc8000_sels, base + 0xa280);
+	hws[IMX8MQ_CLK_PCIE1_CTRL] = imx8m_clk_hw_composite("pcie1_ctrl", imx8mq_pcie1_ctrl_sels, base + 0xa300);
+	hws[IMX8MQ_CLK_PCIE1_PHY] = imx8m_clk_hw_composite("pcie1_phy", imx8mq_pcie1_phy_sels, base + 0xa380);
+	hws[IMX8MQ_CLK_PCIE1_AUX] = imx8m_clk_hw_composite("pcie1_aux", imx8mq_pcie1_aux_sels, base + 0xa400);
+	hws[IMX8MQ_CLK_DC_PIXEL] = imx8m_clk_hw_composite("dc_pixel", imx8mq_dc_pixel_sels, base + 0xa480);
+	hws[IMX8MQ_CLK_LCDIF_PIXEL] = imx8m_clk_hw_composite("lcdif_pixel", imx8mq_lcdif_pixel_sels, base + 0xa500);
+	hws[IMX8MQ_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mq_sai1_sels, base + 0xa580);
+	hws[IMX8MQ_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mq_sai2_sels, base + 0xa600);
+	hws[IMX8MQ_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mq_sai3_sels, base + 0xa680);
+	hws[IMX8MQ_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mq_sai4_sels, base + 0xa700);
+	hws[IMX8MQ_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mq_sai5_sels, base + 0xa780);
+	hws[IMX8MQ_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mq_sai6_sels, base + 0xa800);
+	hws[IMX8MQ_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mq_spdif1_sels, base + 0xa880);
+	hws[IMX8MQ_CLK_SPDIF2] = imx8m_clk_hw_composite("spdif2", imx8mq_spdif2_sels, base + 0xa900);
+	hws[IMX8MQ_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mq_enet_ref_sels, base + 0xa980);
+	hws[IMX8MQ_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mq_enet_timer_sels, base + 0xaa00);
+	hws[IMX8MQ_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mq_enet_phy_sels, base + 0xaa80);
+	hws[IMX8MQ_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mq_nand_sels, base + 0xab00);
+	hws[IMX8MQ_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mq_qspi_sels, base + 0xab80);
+	hws[IMX8MQ_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mq_usdhc1_sels, base + 0xac00);
+	hws[IMX8MQ_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mq_usdhc2_sels, base + 0xac80);
+	hws[IMX8MQ_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mq_i2c1_sels, base + 0xad00);
+	hws[IMX8MQ_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mq_i2c2_sels, base + 0xad80);
+	hws[IMX8MQ_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mq_i2c3_sels, base + 0xae00);
+	hws[IMX8MQ_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mq_i2c4_sels, base + 0xae80);
+	hws[IMX8MQ_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mq_uart1_sels, base + 0xaf00);
+	hws[IMX8MQ_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mq_uart2_sels, base + 0xaf80);
+	hws[IMX8MQ_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mq_uart3_sels, base + 0xb000);
+	hws[IMX8MQ_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mq_uart4_sels, base + 0xb080);
+	hws[IMX8MQ_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mq_usb_core_sels, base + 0xb100);
+	hws[IMX8MQ_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mq_usb_phy_sels, base + 0xb180);
+	hws[IMX8MQ_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mq_gic_sels, base + 0xb200);
+	hws[IMX8MQ_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mq_ecspi1_sels, base + 0xb280);
+	hws[IMX8MQ_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mq_ecspi2_sels, base + 0xb300);
+	hws[IMX8MQ_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mq_pwm1_sels, base + 0xb380);
+	hws[IMX8MQ_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mq_pwm2_sels, base + 0xb400);
+	hws[IMX8MQ_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mq_pwm3_sels, base + 0xb480);
+	hws[IMX8MQ_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mq_pwm4_sels, base + 0xb500);
+	hws[IMX8MQ_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
+	hws[IMX8MQ_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
+	hws[IMX8MQ_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
+	hws[IMX8MQ_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
+	hws[IMX8MQ_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
+	hws[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
+	hws[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
+	hws[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
+	hws[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_hw_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
+	hws[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_hw_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
+	hws[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_hw_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6);
+	hws[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_hw_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
+	hws[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
+	hws[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_hw_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
+	hws[IMX8MQ_CLK_CSI2_CORE] = imx8m_clk_hw_composite("csi2_core", imx8mq_csi2_core_sels, base + 0xbe80);
+	hws[IMX8MQ_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mq_csi2_phy_sels, base + 0xbf00);
+	hws[IMX8MQ_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mq_csi2_esc_sels, base + 0xbf80);
+	hws[IMX8MQ_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mq_pcie2_ctrl_sels, base + 0xc000);
+	hws[IMX8MQ_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mq_pcie2_phy_sels, base + 0xc080);
+	hws[IMX8MQ_CLK_PCIE2_AUX] = imx8m_clk_hw_composite("pcie2_aux", imx8mq_pcie2_aux_sels, base + 0xc100);
+	hws[IMX8MQ_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mq_ecspi3_sels, base + 0xc180);
 
-	clks[IMX8MQ_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000);
-	clks[IMX8MQ_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mq_dram_apb_sels, base + 0xa080);
-	clks[IMX8MQ_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
-	clks[IMX8MQ_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mq_vpu_g2_sels, base + 0xa180);
-	clks[IMX8MQ_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mq_disp_dtrc_sels, base + 0xa200);
-	clks[IMX8MQ_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mq_disp_dc8000_sels, base + 0xa280);
-	clks[IMX8MQ_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mq_pcie1_ctrl_sels, base + 0xa300);
-	clks[IMX8MQ_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mq_pcie1_phy_sels, base + 0xa380);
-	clks[IMX8MQ_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mq_pcie1_aux_sels, base + 0xa400);
-	clks[IMX8MQ_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mq_dc_pixel_sels, base + 0xa480);
-	clks[IMX8MQ_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mq_lcdif_pixel_sels, base + 0xa500);
-	clks[IMX8MQ_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mq_sai1_sels, base + 0xa580);
-	clks[IMX8MQ_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mq_sai2_sels, base + 0xa600);
-	clks[IMX8MQ_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mq_sai3_sels, base + 0xa680);
-	clks[IMX8MQ_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mq_sai4_sels, base + 0xa700);
-	clks[IMX8MQ_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mq_sai5_sels, base + 0xa780);
-	clks[IMX8MQ_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mq_sai6_sels, base + 0xa800);
-	clks[IMX8MQ_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mq_spdif1_sels, base + 0xa880);
-	clks[IMX8MQ_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mq_spdif2_sels, base + 0xa900);
-	clks[IMX8MQ_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mq_enet_ref_sels, base + 0xa980);
-	clks[IMX8MQ_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mq_enet_timer_sels, base + 0xaa00);
-	clks[IMX8MQ_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mq_enet_phy_sels, base + 0xaa80);
-	clks[IMX8MQ_CLK_NAND] = imx8m_clk_composite("nand", imx8mq_nand_sels, base + 0xab00);
-	clks[IMX8MQ_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mq_qspi_sels, base + 0xab80);
-	clks[IMX8MQ_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mq_usdhc1_sels, base + 0xac00);
-	clks[IMX8MQ_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mq_usdhc2_sels, base + 0xac80);
-	clks[IMX8MQ_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mq_i2c1_sels, base + 0xad00);
-	clks[IMX8MQ_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mq_i2c2_sels, base + 0xad80);
-	clks[IMX8MQ_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mq_i2c3_sels, base + 0xae00);
-	clks[IMX8MQ_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mq_i2c4_sels, base + 0xae80);
-	clks[IMX8MQ_CLK_UART1] = imx8m_clk_composite("uart1", imx8mq_uart1_sels, base + 0xaf00);
-	clks[IMX8MQ_CLK_UART2] = imx8m_clk_composite("uart2", imx8mq_uart2_sels, base + 0xaf80);
-	clks[IMX8MQ_CLK_UART3] = imx8m_clk_composite("uart3", imx8mq_uart3_sels, base + 0xb000);
-	clks[IMX8MQ_CLK_UART4] = imx8m_clk_composite("uart4", imx8mq_uart4_sels, base + 0xb080);
-	clks[IMX8MQ_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mq_usb_core_sels, base + 0xb100);
-	clks[IMX8MQ_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mq_usb_phy_sels, base + 0xb180);
-	clks[IMX8MQ_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mq_gic_sels, base + 0xb200);
-	clks[IMX8MQ_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mq_ecspi1_sels, base + 0xb280);
-	clks[IMX8MQ_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mq_ecspi2_sels, base + 0xb300);
-	clks[IMX8MQ_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mq_pwm1_sels, base + 0xb380);
-	clks[IMX8MQ_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mq_pwm2_sels, base + 0xb400);
-	clks[IMX8MQ_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mq_pwm3_sels, base + 0xb480);
-	clks[IMX8MQ_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mq_pwm4_sels, base + 0xb500);
-	clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
-	clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
-	clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
-	clks[IMX8MQ_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
-	clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
-	clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
-	clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
-	clks[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
-	clks[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
-	clks[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
-	clks[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6);
-	clks[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
-	clks[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
-	clks[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
-	clks[IMX8MQ_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mq_csi2_core_sels, base + 0xbe80);
-	clks[IMX8MQ_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mq_csi2_phy_sels, base + 0xbf00);
-	clks[IMX8MQ_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mq_csi2_esc_sels, base + 0xbf80);
-	clks[IMX8MQ_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mq_pcie2_ctrl_sels, base + 0xc000);
-	clks[IMX8MQ_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mq_pcie2_phy_sels, base + 0xc080);
-	clks[IMX8MQ_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mq_pcie2_aux_sels, base + 0xc100);
-	clks[IMX8MQ_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mq_ecspi3_sels, base + 0xc180);
+	hws[IMX8MQ_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+	hws[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+	hws[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+	hws[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+	hws[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+	hws[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+	hws[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+	hws[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+	hws[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+	hws[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+	hws[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+	hws[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+	hws[IMX8MQ_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+	hws[IMX8MQ_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+	hws[IMX8MQ_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+	hws[IMX8MQ_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+	hws[IMX8MQ_CLK_PCIE1_ROOT] = imx_clk_hw_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+	hws[IMX8MQ_CLK_PCIE2_ROOT] = imx_clk_hw_gate4("pcie2_root_clk", "pcie2_ctrl", base + 0x4640, 0);
+	hws[IMX8MQ_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+	hws[IMX8MQ_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+	hws[IMX8MQ_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+	hws[IMX8MQ_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+	hws[IMX8MQ_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+	hws[IMX8MQ_CLK_RAWNAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+	hws[IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+	hws[IMX8MQ_CLK_SAI1_ROOT] = imx_clk_hw_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+	hws[IMX8MQ_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+	hws[IMX8MQ_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+	hws[IMX8MQ_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_root", base + 0x4340, 0, &share_count_sai2);
+	hws[IMX8MQ_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+	hws[IMX8MQ_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_root", base + 0x4350, 0, &share_count_sai3);
+	hws[IMX8MQ_CLK_SAI4_ROOT] = imx_clk_hw_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+	hws[IMX8MQ_CLK_SAI4_IPG] = imx_clk_hw_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+	hws[IMX8MQ_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+	hws[IMX8MQ_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+	hws[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+	hws[IMX8MQ_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+	hws[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
+	hws[IMX8MQ_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+	hws[IMX8MQ_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+	hws[IMX8MQ_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+	hws[IMX8MQ_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+	hws[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+	hws[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_hw_gate4("usb2_ctrl_root_clk", "usb_bus", base + 0x44e0, 0);
+	hws[IMX8MQ_CLK_USB1_PHY_ROOT] = imx_clk_hw_gate4("usb1_phy_root_clk", "usb_phy_ref", base + 0x44f0, 0);
+	hws[IMX8MQ_CLK_USB2_PHY_ROOT] = imx_clk_hw_gate4("usb2_phy_root_clk", "usb_phy_ref", base + 0x4500, 0);
+	hws[IMX8MQ_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+	hws[IMX8MQ_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+	hws[IMX8MQ_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+	hws[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+	hws[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+	hws[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_hw_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+	hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
+	hws[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_hw_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+	hws[IMX8MQ_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
+	hws[IMX8MQ_CLK_DISP_AXI_ROOT]  = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
+	hws[IMX8MQ_CLK_DISP_APB_ROOT]  = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
+	hws[IMX8MQ_CLK_DISP_RTRM_ROOT] = imx_clk_hw_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
+	hws[IMX8MQ_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+	hws[IMX8MQ_CLK_VPU_DEC_ROOT] = imx_clk_hw_gate2_flags("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+	hws[IMX8MQ_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+	hws[IMX8MQ_CLK_CSI2_ROOT] = imx_clk_hw_gate4("csi2_root_clk", "csi2_core", base + 0x4660, 0);
+	hws[IMX8MQ_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+	hws[IMX8MQ_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
 
-	clks[IMX8MQ_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
-	clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
-	clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
-	clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
-	clks[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
-	clks[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
-	clks[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
-	clks[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
-	clks[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
-	clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
-	clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
-	clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
-	clks[IMX8MQ_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
-	clks[IMX8MQ_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
-	clks[IMX8MQ_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
-	clks[IMX8MQ_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
-	clks[IMX8MQ_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
-	clks[IMX8MQ_CLK_PCIE2_ROOT] = imx_clk_gate4("pcie2_root_clk", "pcie2_ctrl", base + 0x4640, 0);
-	clks[IMX8MQ_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
-	clks[IMX8MQ_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
-	clks[IMX8MQ_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
-	clks[IMX8MQ_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
-	clks[IMX8MQ_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
-	clks[IMX8MQ_CLK_RAWNAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
-	clks[IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
-	clks[IMX8MQ_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
-	clks[IMX8MQ_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
-	clks[IMX8MQ_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
-	clks[IMX8MQ_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_root", base + 0x4340, 0, &share_count_sai2);
-	clks[IMX8MQ_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
-	clks[IMX8MQ_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_root", base + 0x4350, 0, &share_count_sai3);
-	clks[IMX8MQ_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
-	clks[IMX8MQ_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
-	clks[IMX8MQ_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
-	clks[IMX8MQ_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
-	clks[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
-	clks[IMX8MQ_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
-	clks[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
-	clks[IMX8MQ_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
-	clks[IMX8MQ_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
-	clks[IMX8MQ_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
-	clks[IMX8MQ_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
-	clks[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
-	clks[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_gate4("usb2_ctrl_root_clk", "usb_bus", base + 0x44e0, 0);
-	clks[IMX8MQ_CLK_USB1_PHY_ROOT] = imx_clk_gate4("usb1_phy_root_clk", "usb_phy_ref", base + 0x44f0, 0);
-	clks[IMX8MQ_CLK_USB2_PHY_ROOT] = imx_clk_gate4("usb2_phy_root_clk", "usb_phy_ref", base + 0x4500, 0);
-	clks[IMX8MQ_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
-	clks[IMX8MQ_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
-	clks[IMX8MQ_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
-	clks[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
-	clks[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
-	clks[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
-	clks[IMX8MQ_CLK_GPU_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
-	clks[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
-	clks[IMX8MQ_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
-	clks[IMX8MQ_CLK_DISP_AXI_ROOT]  = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
-	clks[IMX8MQ_CLK_DISP_APB_ROOT]  = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
-	clks[IMX8MQ_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
-	clks[IMX8MQ_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
-	clks[IMX8MQ_CLK_VPU_DEC_ROOT] = imx_clk_gate2_flags("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
-	clks[IMX8MQ_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
-	clks[IMX8MQ_CLK_CSI2_ROOT] = imx_clk_gate4("csi2_root_clk", "csi2_core", base + 0x4660, 0);
-	clks[IMX8MQ_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
-	clks[IMX8MQ_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+	hws[IMX8MQ_GPT_3M_CLK] = imx_clk_hw_fixed_factor("gpt_3m", "osc_25m", 1, 8);
+	hws[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
 
-	clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8);
-	clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+	hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+					   hws[IMX8MQ_CLK_A53_DIV]->clk,
+					   hws[IMX8MQ_CLK_A53_SRC]->clk,
+					   hws[IMX8MQ_ARM_PLL_OUT]->clk,
+					   hws[IMX8MQ_SYS1_PLL_800M]->clk);
 
-	clks[IMX8MQ_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
-					   clks[IMX8MQ_CLK_A53_DIV],
-					   clks[IMX8MQ_CLK_A53_SRC],
-					   clks[IMX8MQ_ARM_PLL_OUT],
-					   clks[IMX8MQ_SYS1_PLL_800M]);
+	imx_check_clk_hws(hws, IMX8MQ_CLK_END);
 
-	imx_check_clocks(clks, ARRAY_SIZE(clks));
-
-	clk_data.clks = clks;
-	clk_data.clk_num = ARRAY_SIZE(clks);
-
-	err = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+	err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
 	if (err < 0) {
-		dev_err(dev, "failed to register clks for i.MX8MQ\n");
-		goto unregister_clks;
+		dev_err(dev, "failed to register hws for i.MX8MQ\n");
+		goto unregister_hws;
 	}
 
-	imx_register_uart_clocks(uart_clks);
+	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+		int index = uart_clk_ids[i];
+
+		uart_hws[i] = &hws[index]->clk;
+	}
+
+	imx_register_uart_clocks(uart_hws);
 
 	return 0;
 
-unregister_clks:
-	imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+	imx_unregister_hw_clocks(hws, IMX8MQ_CLK_END);
 
 	return err;
 }
@@ -609,6 +624,11 @@ static struct platform_driver imx8mq_clk_driver = {
 	.probe = imx8mq_clocks_probe,
 	.driver = {
 		.name = "imx8mq-ccm",
+		/*
+		 * Disable bind attributes: clocks are not removed and
+		 * reloading the driver will crash or break devices.
+		 */
+		.suppress_bind_attrs = true,
 		.of_match_table = of_match_ptr(imx8mq_clk_of_match),
 	},
 };
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index c0aff7c..04c8ee3 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -173,6 +173,17 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
 	if (!ss_lpcg)
 		return -ENODEV;
 
+	/*
+	 * Please don't replace this with devm_platform_ioremap_resource.
+	 *
+	 * devm_platform_ioremap_resource calls devm_ioremap_resource which
+	 * differs from devm_ioremap by also calling devm_request_mem_region
+	 * and preventing other mappings in the same area.
+	 *
+	 * On imx8 the LPCG nodes map entire subsystems and overlap
+	 * peripherals, this means that using devm_platform_ioremap_resource
+	 * will cause many devices to fail to probe including serial ports.
+	 */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res)
 		return -EINVAL;
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index a03bbed..de93ce7 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -166,7 +166,7 @@ static const struct clk_ops clk_pfdv2_ops = {
 	.is_enabled     = clk_pfdv2_is_enabled,
 };
 
-struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
 			     void __iomem *reg, u8 idx)
 {
 	struct clk_init_data init;
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 3636c80..5b0519a 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -67,6 +67,13 @@ struct imx_pll14xx_clk imx_1443x_pll = {
 	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
 };
 
+struct imx_pll14xx_clk imx_1443x_dram_pll = {
+	.type = PLL_1443X,
+	.rate_table = imx_pll1443x_tbl,
+	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
+	.flags = CLK_GET_RATE_NOCACHE,
+};
+
 struct imx_pll14xx_clk imx_1416x_pll = {
 	.type = PLL_1416X,
 	.rate_table = imx_pll1416x_tbl,
@@ -369,13 +376,14 @@ static const struct clk_ops clk_pll1443x_ops = {
 	.set_rate	= clk_pll1443x_set_rate,
 };
 
-struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
-			    void __iomem *base,
-			    const struct imx_pll14xx_clk *pll_clk)
+struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
+				  void __iomem *base,
+				  const struct imx_pll14xx_clk *pll_clk)
 {
 	struct clk_pll14xx *pll;
-	struct clk *clk;
+	struct clk_hw *hw;
 	struct clk_init_data init;
+	int ret;
 	u32 val;
 
 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
@@ -412,12 +420,15 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
 	val &= ~BYPASS_MASK;
 	writel_relaxed(val, pll->base + GNRL_CTL);
 
-	clk = clk_register(NULL, &pll->hw);
-	if (IS_ERR(clk)) {
-		pr_err("%s: failed to register pll %s %lu\n",
-			__func__, name, PTR_ERR(clk));
+	hw = &pll->hw;
+
+	ret = clk_hw_register(NULL, hw);
+	if (ret) {
+		pr_err("%s: failed to register pll %s %d\n",
+			__func__, name, ret);
 		kfree(pll);
+		return ERR_PTR(ret);
 	}
 
-	return clk;
+	return hw;
 }
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 4ba9973..de4f8a4 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -111,12 +111,13 @@ static const struct clk_ops clk_pllv1_ops = {
 	.recalc_rate = clk_pllv1_recalc_rate,
 };
 
-struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
+struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name,
 		const char *parent, void __iomem *base)
 {
 	struct clk_pllv1 *pll;
-	struct clk *clk;
+	struct clk_hw *hw;
 	struct clk_init_data init;
+	int ret;
 
 	pll = kmalloc(sizeof(*pll), GFP_KERNEL);
 	if (!pll)
@@ -132,10 +133,13 @@ struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
 	init.num_parents = 1;
 
 	pll->hw.init = &init;
+	hw = &pll->hw;
 
-	clk = clk_register(NULL, &pll->hw);
-	if (IS_ERR(clk))
+	ret = clk_hw_register(NULL, hw);
+	if (ret) {
 		kfree(pll);
+		return ERR_PTR(ret);
+	}
 
-	return clk;
+	return hw;
 }
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index eeba3cb..ff17f06 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -239,12 +239,13 @@ static const struct clk_ops clk_pllv2_ops = {
 	.set_rate = clk_pllv2_set_rate,
 };
 
-struct clk *imx_clk_pllv2(const char *name, const char *parent,
+struct clk_hw *imx_clk_hw_pllv2(const char *name, const char *parent,
 		void __iomem *base)
 {
 	struct clk_pllv2 *pll;
-	struct clk *clk;
+	struct clk_hw *hw;
 	struct clk_init_data init;
+	int ret;
 
 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
 	if (!pll)
@@ -259,10 +260,13 @@ struct clk *imx_clk_pllv2(const char *name, const char *parent,
 	init.num_parents = 1;
 
 	pll->hw.init = &init;
+	hw = &pll->hw;
 
-	clk = clk_register(NULL, &pll->hw);
-	if (IS_ERR(clk))
+	ret = clk_hw_register(NULL, hw);
+	if (ret) {
 		kfree(pll);
+		return ERR_PTR(ret);
+	}
 
-	return clk;
+	return hw;
 }
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index 8155b12..f51a800 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -206,7 +206,7 @@ static const struct clk_ops clk_pllv4_ops = {
 	.is_enabled	= clk_pllv4_is_enabled,
 };
 
-struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
 			  void __iomem *base)
 {
 	struct clk_pllv4 *pll;
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
deleted file mode 100644
index 5d65f65..0000000
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ /dev/null
@@ -1,549 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Copyright 2018 NXP.
- *
- * This driver supports the SCCG plls found in the imx8m SOCs
- *
- * Documentation for this SCCG pll can be found at:
- *   https://www.nxp.com/docs/en/reference-manual/IMX8MDQLQRM.pdf#page=834
- */
-
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/slab.h>
-#include <linux/bitfield.h>
-
-#include "clk.h"
-
-/* PLL CFGs */
-#define PLL_CFG0		0x0
-#define PLL_CFG1		0x4
-#define PLL_CFG2		0x8
-
-#define PLL_DIVF1_MASK		GENMASK(18, 13)
-#define PLL_DIVF2_MASK		GENMASK(12, 7)
-#define PLL_DIVR1_MASK		GENMASK(27, 25)
-#define PLL_DIVR2_MASK		GENMASK(24, 19)
-#define PLL_DIVQ_MASK           GENMASK(6, 1)
-#define PLL_REF_MASK		GENMASK(2, 0)
-
-#define PLL_LOCK_MASK		BIT(31)
-#define PLL_PD_MASK		BIT(7)
-
-/* These are the specification limits for the SSCG PLL */
-#define PLL_REF_MIN_FREQ		25000000UL
-#define PLL_REF_MAX_FREQ		235000000UL
-
-#define PLL_STAGE1_MIN_FREQ		1600000000UL
-#define PLL_STAGE1_MAX_FREQ		2400000000UL
-
-#define PLL_STAGE1_REF_MIN_FREQ		25000000UL
-#define PLL_STAGE1_REF_MAX_FREQ		54000000UL
-
-#define PLL_STAGE2_MIN_FREQ		1200000000UL
-#define PLL_STAGE2_MAX_FREQ		2400000000UL
-
-#define PLL_STAGE2_REF_MIN_FREQ		54000000UL
-#define PLL_STAGE2_REF_MAX_FREQ		75000000UL
-
-#define PLL_OUT_MIN_FREQ		20000000UL
-#define PLL_OUT_MAX_FREQ		1200000000UL
-
-#define PLL_DIVR1_MAX			7
-#define PLL_DIVR2_MAX			63
-#define PLL_DIVF1_MAX			63
-#define PLL_DIVF2_MAX			63
-#define PLL_DIVQ_MAX			63
-
-#define PLL_BYPASS_NONE			0x0
-#define PLL_BYPASS1			0x2
-#define PLL_BYPASS2			0x1
-
-#define SSCG_PLL_BYPASS1_MASK           BIT(5)
-#define SSCG_PLL_BYPASS2_MASK           BIT(4)
-#define SSCG_PLL_BYPASS_MASK		GENMASK(5, 4)
-
-#define PLL_SCCG_LOCK_TIMEOUT		70
-
-struct clk_sccg_pll_setup {
-	int divr1, divf1;
-	int divr2, divf2;
-	int divq;
-	int bypass;
-
-	uint64_t vco1;
-	uint64_t vco2;
-	uint64_t fout;
-	uint64_t ref;
-	uint64_t ref_div1;
-	uint64_t ref_div2;
-	uint64_t fout_request;
-	int fout_error;
-};
-
-struct clk_sccg_pll {
-	struct clk_hw	hw;
-	const struct clk_ops  ops;
-
-	void __iomem *base;
-
-	struct clk_sccg_pll_setup setup;
-
-	u8 parent;
-	u8 bypass1;
-	u8 bypass2;
-};
-
-#define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw)
-
-static int clk_sccg_pll_wait_lock(struct clk_sccg_pll *pll)
-{
-	u32 val;
-
-	val = readl_relaxed(pll->base + PLL_CFG0);
-
-	/* don't wait for lock if all plls are bypassed */
-	if (!(val & SSCG_PLL_BYPASS2_MASK))
-		return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK,
-						0, PLL_SCCG_LOCK_TIMEOUT);
-
-	return 0;
-}
-
-static int clk_sccg_pll2_check_match(struct clk_sccg_pll_setup *setup,
-					struct clk_sccg_pll_setup *temp_setup)
-{
-	int new_diff = temp_setup->fout - temp_setup->fout_request;
-	int diff = temp_setup->fout_error;
-
-	if (abs(diff) > abs(new_diff)) {
-		temp_setup->fout_error = new_diff;
-		memcpy(setup, temp_setup, sizeof(struct clk_sccg_pll_setup));
-
-		if (temp_setup->fout_request == temp_setup->fout)
-			return 0;
-	}
-	return -1;
-}
-
-static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup,
-				struct clk_sccg_pll_setup *temp_setup)
-{
-	int ret = -EINVAL;
-
-	for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX;
-	     temp_setup->divq++) {
-		temp_setup->vco2 = temp_setup->vco1;
-		do_div(temp_setup->vco2, temp_setup->divr2 + 1);
-		temp_setup->vco2 *= 2;
-		temp_setup->vco2 *= temp_setup->divf2 + 1;
-		if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ &&
-				temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) {
-			temp_setup->fout = temp_setup->vco2;
-			do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
-
-			ret = clk_sccg_pll2_check_match(setup, temp_setup);
-			if (!ret) {
-				temp_setup->bypass = PLL_BYPASS1;
-				return ret;
-			}
-		}
-	}
-
-	return ret;
-}
-
-static int clk_sccg_divf2_lookup(struct clk_sccg_pll_setup *setup,
-					struct clk_sccg_pll_setup *temp_setup)
-{
-	int ret = -EINVAL;
-
-	for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
-	     temp_setup->divf2++) {
-		ret = clk_sccg_divq_lookup(setup, temp_setup);
-		if (!ret)
-			return ret;
-	}
-
-	return ret;
-}
-
-static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup,
-				struct clk_sccg_pll_setup *temp_setup)
-{
-	int ret = -EINVAL;
-
-	for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX;
-	     temp_setup->divr2++) {
-		temp_setup->ref_div2 = temp_setup->vco1;
-		do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
-		if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
-		    temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
-			ret = clk_sccg_divf2_lookup(setup, temp_setup);
-			if (!ret)
-				return ret;
-		}
-	}
-
-	return ret;
-}
-
-static int clk_sccg_pll2_find_setup(struct clk_sccg_pll_setup *setup,
-					struct clk_sccg_pll_setup *temp_setup,
-					uint64_t ref)
-{
-
-	int ret = -EINVAL;
-
-	if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
-		return ret;
-
-	temp_setup->vco1 = ref;
-
-	ret = clk_sccg_divr2_lookup(setup, temp_setup);
-	return ret;
-}
-
-static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup,
-				struct clk_sccg_pll_setup *temp_setup)
-{
-	int ret = -EINVAL;
-
-	for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX;
-	     temp_setup->divf1++) {
-		uint64_t vco1 = temp_setup->ref;
-
-		do_div(vco1, temp_setup->divr1 + 1);
-		vco1 *= 2;
-		vco1 *= temp_setup->divf1 + 1;
-
-		ret = clk_sccg_pll2_find_setup(setup, temp_setup, vco1);
-		if (!ret) {
-			temp_setup->bypass = PLL_BYPASS_NONE;
-			return ret;
-		}
-	}
-
-	return ret;
-}
-
-static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup,
-				struct clk_sccg_pll_setup *temp_setup)
-{
-	int ret = -EINVAL;
-
-	for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX;
-	     temp_setup->divr1++) {
-		temp_setup->ref_div1 = temp_setup->ref;
-		do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
-		if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
-		    temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
-			ret = clk_sccg_divf1_lookup(setup, temp_setup);
-			if (!ret)
-				return ret;
-		}
-	}
-
-	return ret;
-}
-
-static int clk_sccg_pll1_find_setup(struct clk_sccg_pll_setup *setup,
-					struct clk_sccg_pll_setup *temp_setup,
-					uint64_t ref)
-{
-
-	int ret = -EINVAL;
-
-	if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
-		return ret;
-
-	temp_setup->ref = ref;
-
-	ret = clk_sccg_divr1_lookup(setup, temp_setup);
-
-	return ret;
-}
-
-static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup,
-					uint64_t prate,
-					uint64_t rate, int try_bypass)
-{
-	struct clk_sccg_pll_setup temp_setup;
-	int ret = -EINVAL;
-
-	memset(&temp_setup, 0, sizeof(struct clk_sccg_pll_setup));
-	memset(setup, 0, sizeof(struct clk_sccg_pll_setup));
-
-	temp_setup.fout_error = PLL_OUT_MAX_FREQ;
-	temp_setup.fout_request = rate;
-
-	switch (try_bypass) {
-
-	case PLL_BYPASS2:
-		if (prate == rate) {
-			setup->bypass = PLL_BYPASS2;
-			setup->fout = rate;
-			ret = 0;
-		}
-		break;
-
-	case PLL_BYPASS1:
-		ret = clk_sccg_pll2_find_setup(setup, &temp_setup, prate);
-		break;
-
-	case PLL_BYPASS_NONE:
-		ret = clk_sccg_pll1_find_setup(setup, &temp_setup, prate);
-		break;
-	}
-
-	return ret;
-}
-
-
-static int clk_sccg_pll_is_prepared(struct clk_hw *hw)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-
-	u32 val = readl_relaxed(pll->base + PLL_CFG0);
-
-	return (val & PLL_PD_MASK) ? 0 : 1;
-}
-
-static int clk_sccg_pll_prepare(struct clk_hw *hw)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-	u32 val;
-
-	val = readl_relaxed(pll->base + PLL_CFG0);
-	val &= ~PLL_PD_MASK;
-	writel_relaxed(val, pll->base + PLL_CFG0);
-
-	return clk_sccg_pll_wait_lock(pll);
-}
-
-static void clk_sccg_pll_unprepare(struct clk_hw *hw)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-	u32 val;
-
-	val = readl_relaxed(pll->base + PLL_CFG0);
-	val |= PLL_PD_MASK;
-	writel_relaxed(val, pll->base + PLL_CFG0);
-}
-
-static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
-					 unsigned long parent_rate)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-	u32 val, divr1, divf1, divr2, divf2, divq;
-	u64 temp64;
-
-	val = readl_relaxed(pll->base + PLL_CFG2);
-	divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
-	divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
-	divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
-	divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
-	divq = FIELD_GET(PLL_DIVQ_MASK, val);
-
-	temp64 = parent_rate;
-
-	val = readl(pll->base + PLL_CFG0);
-	if (val & SSCG_PLL_BYPASS2_MASK) {
-		temp64 = parent_rate;
-	} else if (val & SSCG_PLL_BYPASS1_MASK) {
-		temp64 *= divf2;
-		do_div(temp64, (divr2 + 1) * (divq + 1));
-	} else {
-		temp64 *= 2;
-		temp64 *= (divf1 + 1) * (divf2 + 1);
-		do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1));
-	}
-
-	return temp64;
-}
-
-static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-			    unsigned long parent_rate)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-	struct clk_sccg_pll_setup *setup = &pll->setup;
-	u32 val;
-
-	/* set bypass here too since the parent might be the same */
-	val = readl(pll->base + PLL_CFG0);
-	val &= ~SSCG_PLL_BYPASS_MASK;
-	val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
-	writel(val, pll->base + PLL_CFG0);
-
-	val = readl_relaxed(pll->base + PLL_CFG2);
-	val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
-	val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK);
-	val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1);
-	val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2);
-	val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1);
-	val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2);
-	val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
-	writel_relaxed(val, pll->base + PLL_CFG2);
-
-	return clk_sccg_pll_wait_lock(pll);
-}
-
-static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-	u32 val;
-	u8 ret = pll->parent;
-
-	val = readl(pll->base + PLL_CFG0);
-	if (val & SSCG_PLL_BYPASS2_MASK)
-		ret = pll->bypass2;
-	else if (val & SSCG_PLL_BYPASS1_MASK)
-		ret = pll->bypass1;
-	return ret;
-}
-
-static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-	u32 val;
-
-	val = readl(pll->base + PLL_CFG0);
-	val &= ~SSCG_PLL_BYPASS_MASK;
-	val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
-	writel(val, pll->base + PLL_CFG0);
-
-	return clk_sccg_pll_wait_lock(pll);
-}
-
-static int __clk_sccg_pll_determine_rate(struct clk_hw *hw,
-					struct clk_rate_request *req,
-					uint64_t min,
-					uint64_t max,
-					uint64_t rate,
-					int bypass)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-	struct clk_sccg_pll_setup *setup = &pll->setup;
-	struct clk_hw *parent_hw = NULL;
-	int bypass_parent_index;
-	int ret = -EINVAL;
-
-	req->max_rate = max;
-	req->min_rate = min;
-
-	switch (bypass) {
-	case PLL_BYPASS2:
-		bypass_parent_index = pll->bypass2;
-		break;
-	case PLL_BYPASS1:
-		bypass_parent_index = pll->bypass1;
-		break;
-	default:
-		bypass_parent_index = pll->parent;
-		break;
-	}
-
-	parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
-	ret = __clk_determine_rate(parent_hw, req);
-	if (!ret) {
-		ret = clk_sccg_pll_find_setup(setup, req->rate,
-						rate, bypass);
-	}
-
-	req->best_parent_hw = parent_hw;
-	req->best_parent_rate = req->rate;
-	req->rate = setup->fout;
-
-	return ret;
-}
-
-static int clk_sccg_pll_determine_rate(struct clk_hw *hw,
-				       struct clk_rate_request *req)
-{
-	struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
-	struct clk_sccg_pll_setup *setup = &pll->setup;
-	uint64_t rate = req->rate;
-	uint64_t min = req->min_rate;
-	uint64_t max = req->max_rate;
-	int ret = -EINVAL;
-
-	if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
-		return ret;
-
-	ret = __clk_sccg_pll_determine_rate(hw, req, req->rate, req->rate,
-						rate, PLL_BYPASS2);
-	if (!ret)
-		return ret;
-
-	ret = __clk_sccg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
-						PLL_STAGE1_REF_MAX_FREQ, rate,
-						PLL_BYPASS1);
-	if (!ret)
-		return ret;
-
-	ret = __clk_sccg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
-						PLL_REF_MAX_FREQ, rate,
-						PLL_BYPASS_NONE);
-	if (!ret)
-		return ret;
-
-	if (setup->fout >= min && setup->fout <= max)
-		ret = 0;
-
-	return ret;
-}
-
-static const struct clk_ops clk_sccg_pll_ops = {
-	.prepare	= clk_sccg_pll_prepare,
-	.unprepare	= clk_sccg_pll_unprepare,
-	.is_prepared	= clk_sccg_pll_is_prepared,
-	.recalc_rate	= clk_sccg_pll_recalc_rate,
-	.set_rate	= clk_sccg_pll_set_rate,
-	.set_parent	= clk_sccg_pll_set_parent,
-	.get_parent	= clk_sccg_pll_get_parent,
-	.determine_rate	= clk_sccg_pll_determine_rate,
-};
-
-struct clk *imx_clk_sccg_pll(const char *name,
-				const char * const *parent_names,
-				u8 num_parents,
-				u8 parent, u8 bypass1, u8 bypass2,
-				void __iomem *base,
-				unsigned long flags)
-{
-	struct clk_sccg_pll *pll;
-	struct clk_init_data init;
-	struct clk_hw *hw;
-	int ret;
-
-	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
-	if (!pll)
-		return ERR_PTR(-ENOMEM);
-
-	pll->parent = parent;
-	pll->bypass1 = bypass1;
-	pll->bypass2 = bypass2;
-
-	pll->base = base;
-	init.name = name;
-	init.ops = &clk_sccg_pll_ops;
-
-	init.flags = flags;
-	init.parent_names = parent_names;
-	init.num_parents = num_parents;
-
-	pll->base = base;
-	pll->hw.init = &init;
-
-	hw = &pll->hw;
-
-	ret = clk_hw_register(NULL, hw);
-	if (ret) {
-		kfree(pll);
-		return ERR_PTR(ret);
-	}
-
-	return hw->clk;
-}
diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c
new file mode 100644
index 0000000..acd1b90
--- /dev/null
+++ b/drivers/clk/imx/clk-sscg-pll.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2018 NXP.
+ *
+ * This driver supports the SCCG plls found in the imx8m SOCs
+ *
+ * Documentation for this SCCG pll can be found at:
+ *   https://www.nxp.com/docs/en/reference-manual/IMX8MDQLQRM.pdf#page=834
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+
+#include "clk.h"
+
+/* PLL CFGs */
+#define PLL_CFG0		0x0
+#define PLL_CFG1		0x4
+#define PLL_CFG2		0x8
+
+#define PLL_DIVF1_MASK		GENMASK(18, 13)
+#define PLL_DIVF2_MASK		GENMASK(12, 7)
+#define PLL_DIVR1_MASK		GENMASK(27, 25)
+#define PLL_DIVR2_MASK		GENMASK(24, 19)
+#define PLL_DIVQ_MASK           GENMASK(6, 1)
+#define PLL_REF_MASK		GENMASK(2, 0)
+
+#define PLL_LOCK_MASK		BIT(31)
+#define PLL_PD_MASK		BIT(7)
+
+/* These are the specification limits for the SSCG PLL */
+#define PLL_REF_MIN_FREQ		25000000UL
+#define PLL_REF_MAX_FREQ		235000000UL
+
+#define PLL_STAGE1_MIN_FREQ		1600000000UL
+#define PLL_STAGE1_MAX_FREQ		2400000000UL
+
+#define PLL_STAGE1_REF_MIN_FREQ		25000000UL
+#define PLL_STAGE1_REF_MAX_FREQ		54000000UL
+
+#define PLL_STAGE2_MIN_FREQ		1200000000UL
+#define PLL_STAGE2_MAX_FREQ		2400000000UL
+
+#define PLL_STAGE2_REF_MIN_FREQ		54000000UL
+#define PLL_STAGE2_REF_MAX_FREQ		75000000UL
+
+#define PLL_OUT_MIN_FREQ		20000000UL
+#define PLL_OUT_MAX_FREQ		1200000000UL
+
+#define PLL_DIVR1_MAX			7
+#define PLL_DIVR2_MAX			63
+#define PLL_DIVF1_MAX			63
+#define PLL_DIVF2_MAX			63
+#define PLL_DIVQ_MAX			63
+
+#define PLL_BYPASS_NONE			0x0
+#define PLL_BYPASS1			0x2
+#define PLL_BYPASS2			0x1
+
+#define SSCG_PLL_BYPASS1_MASK           BIT(5)
+#define SSCG_PLL_BYPASS2_MASK           BIT(4)
+#define SSCG_PLL_BYPASS_MASK		GENMASK(5, 4)
+
+#define PLL_SCCG_LOCK_TIMEOUT		70
+
+struct clk_sscg_pll_setup {
+	int divr1, divf1;
+	int divr2, divf2;
+	int divq;
+	int bypass;
+
+	uint64_t vco1;
+	uint64_t vco2;
+	uint64_t fout;
+	uint64_t ref;
+	uint64_t ref_div1;
+	uint64_t ref_div2;
+	uint64_t fout_request;
+	int fout_error;
+};
+
+struct clk_sscg_pll {
+	struct clk_hw	hw;
+	const struct clk_ops  ops;
+
+	void __iomem *base;
+
+	struct clk_sscg_pll_setup setup;
+
+	u8 parent;
+	u8 bypass1;
+	u8 bypass2;
+};
+
+#define to_clk_sscg_pll(_hw) container_of(_hw, struct clk_sscg_pll, hw)
+
+static int clk_sscg_pll_wait_lock(struct clk_sscg_pll *pll)
+{
+	u32 val;
+
+	val = readl_relaxed(pll->base + PLL_CFG0);
+
+	/* don't wait for lock if all plls are bypassed */
+	if (!(val & SSCG_PLL_BYPASS2_MASK))
+		return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK,
+						0, PLL_SCCG_LOCK_TIMEOUT);
+
+	return 0;
+}
+
+static int clk_sscg_pll2_check_match(struct clk_sscg_pll_setup *setup,
+					struct clk_sscg_pll_setup *temp_setup)
+{
+	int new_diff = temp_setup->fout - temp_setup->fout_request;
+	int diff = temp_setup->fout_error;
+
+	if (abs(diff) > abs(new_diff)) {
+		temp_setup->fout_error = new_diff;
+		memcpy(setup, temp_setup, sizeof(struct clk_sscg_pll_setup));
+
+		if (temp_setup->fout_request == temp_setup->fout)
+			return 0;
+	}
+	return -1;
+}
+
+static int clk_sscg_divq_lookup(struct clk_sscg_pll_setup *setup,
+				struct clk_sscg_pll_setup *temp_setup)
+{
+	int ret = -EINVAL;
+
+	for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX;
+	     temp_setup->divq++) {
+		temp_setup->vco2 = temp_setup->vco1;
+		do_div(temp_setup->vco2, temp_setup->divr2 + 1);
+		temp_setup->vco2 *= 2;
+		temp_setup->vco2 *= temp_setup->divf2 + 1;
+		if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ &&
+				temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) {
+			temp_setup->fout = temp_setup->vco2;
+			do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
+
+			ret = clk_sscg_pll2_check_match(setup, temp_setup);
+			if (!ret) {
+				temp_setup->bypass = PLL_BYPASS1;
+				return ret;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int clk_sscg_divf2_lookup(struct clk_sscg_pll_setup *setup,
+					struct clk_sscg_pll_setup *temp_setup)
+{
+	int ret = -EINVAL;
+
+	for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
+	     temp_setup->divf2++) {
+		ret = clk_sscg_divq_lookup(setup, temp_setup);
+		if (!ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int clk_sscg_divr2_lookup(struct clk_sscg_pll_setup *setup,
+				struct clk_sscg_pll_setup *temp_setup)
+{
+	int ret = -EINVAL;
+
+	for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX;
+	     temp_setup->divr2++) {
+		temp_setup->ref_div2 = temp_setup->vco1;
+		do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
+		if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
+		    temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
+			ret = clk_sscg_divf2_lookup(setup, temp_setup);
+			if (!ret)
+				return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup,
+					struct clk_sscg_pll_setup *temp_setup,
+					uint64_t ref)
+{
+
+	int ret = -EINVAL;
+
+	if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
+		return ret;
+
+	temp_setup->vco1 = ref;
+
+	ret = clk_sscg_divr2_lookup(setup, temp_setup);
+	return ret;
+}
+
+static int clk_sscg_divf1_lookup(struct clk_sscg_pll_setup *setup,
+				struct clk_sscg_pll_setup *temp_setup)
+{
+	int ret = -EINVAL;
+
+	for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX;
+	     temp_setup->divf1++) {
+		uint64_t vco1 = temp_setup->ref;
+
+		do_div(vco1, temp_setup->divr1 + 1);
+		vco1 *= 2;
+		vco1 *= temp_setup->divf1 + 1;
+
+		ret = clk_sscg_pll2_find_setup(setup, temp_setup, vco1);
+		if (!ret) {
+			temp_setup->bypass = PLL_BYPASS_NONE;
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int clk_sscg_divr1_lookup(struct clk_sscg_pll_setup *setup,
+				struct clk_sscg_pll_setup *temp_setup)
+{
+	int ret = -EINVAL;
+
+	for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX;
+	     temp_setup->divr1++) {
+		temp_setup->ref_div1 = temp_setup->ref;
+		do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
+		if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
+		    temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
+			ret = clk_sscg_divf1_lookup(setup, temp_setup);
+			if (!ret)
+				return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup,
+					struct clk_sscg_pll_setup *temp_setup,
+					uint64_t ref)
+{
+
+	int ret = -EINVAL;
+
+	if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
+		return ret;
+
+	temp_setup->ref = ref;
+
+	ret = clk_sscg_divr1_lookup(setup, temp_setup);
+
+	return ret;
+}
+
+static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
+					uint64_t prate,
+					uint64_t rate, int try_bypass)
+{
+	struct clk_sscg_pll_setup temp_setup;
+	int ret = -EINVAL;
+
+	memset(&temp_setup, 0, sizeof(struct clk_sscg_pll_setup));
+	memset(setup, 0, sizeof(struct clk_sscg_pll_setup));
+
+	temp_setup.fout_error = PLL_OUT_MAX_FREQ;
+	temp_setup.fout_request = rate;
+
+	switch (try_bypass) {
+
+	case PLL_BYPASS2:
+		if (prate == rate) {
+			setup->bypass = PLL_BYPASS2;
+			setup->fout = rate;
+			ret = 0;
+		}
+		break;
+
+	case PLL_BYPASS1:
+		ret = clk_sscg_pll2_find_setup(setup, &temp_setup, prate);
+		break;
+
+	case PLL_BYPASS_NONE:
+		ret = clk_sscg_pll1_find_setup(setup, &temp_setup, prate);
+		break;
+	}
+
+	return ret;
+}
+
+
+static int clk_sscg_pll_is_prepared(struct clk_hw *hw)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+
+	u32 val = readl_relaxed(pll->base + PLL_CFG0);
+
+	return (val & PLL_PD_MASK) ? 0 : 1;
+}
+
+static int clk_sscg_pll_prepare(struct clk_hw *hw)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+	u32 val;
+
+	val = readl_relaxed(pll->base + PLL_CFG0);
+	val &= ~PLL_PD_MASK;
+	writel_relaxed(val, pll->base + PLL_CFG0);
+
+	return clk_sscg_pll_wait_lock(pll);
+}
+
+static void clk_sscg_pll_unprepare(struct clk_hw *hw)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+	u32 val;
+
+	val = readl_relaxed(pll->base + PLL_CFG0);
+	val |= PLL_PD_MASK;
+	writel_relaxed(val, pll->base + PLL_CFG0);
+}
+
+static unsigned long clk_sscg_pll_recalc_rate(struct clk_hw *hw,
+					 unsigned long parent_rate)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+	u32 val, divr1, divf1, divr2, divf2, divq;
+	u64 temp64;
+
+	val = readl_relaxed(pll->base + PLL_CFG2);
+	divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
+	divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
+	divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
+	divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
+	divq = FIELD_GET(PLL_DIVQ_MASK, val);
+
+	temp64 = parent_rate;
+
+	val = readl(pll->base + PLL_CFG0);
+	if (val & SSCG_PLL_BYPASS2_MASK) {
+		temp64 = parent_rate;
+	} else if (val & SSCG_PLL_BYPASS1_MASK) {
+		temp64 *= divf2;
+		do_div(temp64, (divr2 + 1) * (divq + 1));
+	} else {
+		temp64 *= 2;
+		temp64 *= (divf1 + 1) * (divf2 + 1);
+		do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1));
+	}
+
+	return temp64;
+}
+
+static int clk_sscg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+			    unsigned long parent_rate)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+	struct clk_sscg_pll_setup *setup = &pll->setup;
+	u32 val;
+
+	/* set bypass here too since the parent might be the same */
+	val = readl(pll->base + PLL_CFG0);
+	val &= ~SSCG_PLL_BYPASS_MASK;
+	val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
+	writel(val, pll->base + PLL_CFG0);
+
+	val = readl_relaxed(pll->base + PLL_CFG2);
+	val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
+	val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK);
+	val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1);
+	val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2);
+	val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1);
+	val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2);
+	val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
+	writel_relaxed(val, pll->base + PLL_CFG2);
+
+	return clk_sscg_pll_wait_lock(pll);
+}
+
+static u8 clk_sscg_pll_get_parent(struct clk_hw *hw)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+	u32 val;
+	u8 ret = pll->parent;
+
+	val = readl(pll->base + PLL_CFG0);
+	if (val & SSCG_PLL_BYPASS2_MASK)
+		ret = pll->bypass2;
+	else if (val & SSCG_PLL_BYPASS1_MASK)
+		ret = pll->bypass1;
+	return ret;
+}
+
+static int clk_sscg_pll_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+	u32 val;
+
+	val = readl(pll->base + PLL_CFG0);
+	val &= ~SSCG_PLL_BYPASS_MASK;
+	val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
+	writel(val, pll->base + PLL_CFG0);
+
+	return clk_sscg_pll_wait_lock(pll);
+}
+
+static int __clk_sscg_pll_determine_rate(struct clk_hw *hw,
+					struct clk_rate_request *req,
+					uint64_t min,
+					uint64_t max,
+					uint64_t rate,
+					int bypass)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+	struct clk_sscg_pll_setup *setup = &pll->setup;
+	struct clk_hw *parent_hw = NULL;
+	int bypass_parent_index;
+	int ret = -EINVAL;
+
+	req->max_rate = max;
+	req->min_rate = min;
+
+	switch (bypass) {
+	case PLL_BYPASS2:
+		bypass_parent_index = pll->bypass2;
+		break;
+	case PLL_BYPASS1:
+		bypass_parent_index = pll->bypass1;
+		break;
+	default:
+		bypass_parent_index = pll->parent;
+		break;
+	}
+
+	parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
+	ret = __clk_determine_rate(parent_hw, req);
+	if (!ret) {
+		ret = clk_sscg_pll_find_setup(setup, req->rate,
+						rate, bypass);
+	}
+
+	req->best_parent_hw = parent_hw;
+	req->best_parent_rate = req->rate;
+	req->rate = setup->fout;
+
+	return ret;
+}
+
+static int clk_sscg_pll_determine_rate(struct clk_hw *hw,
+				       struct clk_rate_request *req)
+{
+	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+	struct clk_sscg_pll_setup *setup = &pll->setup;
+	uint64_t rate = req->rate;
+	uint64_t min = req->min_rate;
+	uint64_t max = req->max_rate;
+	int ret = -EINVAL;
+
+	if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
+		return ret;
+
+	ret = __clk_sscg_pll_determine_rate(hw, req, req->rate, req->rate,
+						rate, PLL_BYPASS2);
+	if (!ret)
+		return ret;
+
+	ret = __clk_sscg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
+						PLL_STAGE1_REF_MAX_FREQ, rate,
+						PLL_BYPASS1);
+	if (!ret)
+		return ret;
+
+	ret = __clk_sscg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
+						PLL_REF_MAX_FREQ, rate,
+						PLL_BYPASS_NONE);
+	if (!ret)
+		return ret;
+
+	if (setup->fout >= min && setup->fout <= max)
+		ret = 0;
+
+	return ret;
+}
+
+static const struct clk_ops clk_sscg_pll_ops = {
+	.prepare	= clk_sscg_pll_prepare,
+	.unprepare	= clk_sscg_pll_unprepare,
+	.is_prepared	= clk_sscg_pll_is_prepared,
+	.recalc_rate	= clk_sscg_pll_recalc_rate,
+	.set_rate	= clk_sscg_pll_set_rate,
+	.set_parent	= clk_sscg_pll_set_parent,
+	.get_parent	= clk_sscg_pll_get_parent,
+	.determine_rate	= clk_sscg_pll_determine_rate,
+};
+
+struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
+				const char * const *parent_names,
+				u8 num_parents,
+				u8 parent, u8 bypass1, u8 bypass2,
+				void __iomem *base,
+				unsigned long flags)
+{
+	struct clk_sscg_pll *pll;
+	struct clk_init_data init;
+	struct clk_hw *hw;
+	int ret;
+
+	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return ERR_PTR(-ENOMEM);
+
+	pll->parent = parent;
+	pll->bypass1 = bypass1;
+	pll->bypass2 = bypass2;
+
+	pll->base = base;
+	init.name = name;
+	init.ops = &clk_sscg_pll_ops;
+
+	init.flags = flags;
+	init.parent_names = parent_names;
+	init.num_parents = num_parents;
+
+	pll->base = base;
+	pll->hw.init = &init;
+
+	hw = &pll->hw;
+
+	ret = clk_hw_register(NULL, hw);
+	if (ret) {
+		kfree(pll);
+		return ERR_PTR(ret);
+	}
+
+	return hw;
+}
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index cfc05e4..87ab8db 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -22,6 +22,14 @@ void imx_unregister_clocks(struct clk *clks[], unsigned int count)
 		clk_unregister(clks[i]);
 }
 
+void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count)
+{
+	unsigned int i;
+
+	for (i = 0; i < count; i++)
+		clk_hw_unregister(hws[i]);
+}
+
 void __init imx_mmdc_mask_handshake(void __iomem *ccm_base,
 				    unsigned int chn)
 {
@@ -94,8 +102,8 @@ struct clk_hw * __init imx_obtain_fixed_clock_hw(
 	return __clk_get_hw(clk);
 }
 
-struct clk_hw * __init imx_obtain_fixed_clk_hw(struct device_node *np,
-					       const char *name)
+struct clk_hw * imx_obtain_fixed_clk_hw(struct device_node *np,
+					const char *name)
 {
 	struct clk *clk;
 
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index bc5bb6a..b05213b 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -12,6 +12,7 @@ void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
 void imx_register_uart_clocks(struct clk ** const clks[]);
 void imx_mmdc_mask_handshake(void __iomem *ccm_base, unsigned int chn);
 void imx_unregister_clocks(struct clk *clks[], unsigned int count);
+void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count);
 
 extern void imx_cscmr1_fixup(u32 *val);
 
@@ -24,7 +25,7 @@ enum imx_pllv1_type {
 	IMX_PLLV1_IMX35,
 };
 
-enum imx_sccg_pll_type {
+enum imx_sscg_pll_type {
 	SCCG_PLL1,
 	SCCG_PLL2,
 };
@@ -52,64 +53,98 @@ struct imx_pll14xx_clk {
 
 extern struct imx_pll14xx_clk imx_1416x_pll;
 extern struct imx_pll14xx_clk imx_1443x_pll;
+extern struct imx_pll14xx_clk imx_1443x_dram_pll;
 
 #define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
-	imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk
+	to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
 
 #define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
 				cgr_val, clk_gate_flags, lock, share_count) \
-	clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
-				cgr_val, clk_gate_flags, lock, share_count)->clk
+	to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
+				cgr_val, clk_gate_flags, lock, share_count))
 
 #define imx_clk_pllv3(type, name, parent_name, base, div_mask) \
-	imx_clk_hw_pllv3(type, name, parent_name, base, div_mask)->clk
+	to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask))
 
 #define imx_clk_pfd(name, parent_name, reg, idx) \
-	imx_clk_hw_pfd(name, parent_name, reg, idx)->clk
+	to_clk(imx_clk_hw_pfd(name, parent_name, reg, idx))
 
 #define imx_clk_gate_exclusive(name, parent, reg, shift, exclusive_mask) \
-	imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)->clk
+	to_clk(imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask))
+
+#define imx_clk_fixed(name, rate) \
+	to_clk(imx_clk_hw_fixed(name, rate))
 
 #define imx_clk_fixed_factor(name, parent, mult, div) \
-	imx_clk_hw_fixed_factor(name, parent, mult, div)->clk
+	to_clk(imx_clk_hw_fixed_factor(name, parent, mult, div))
+
+#define imx_clk_divider(name, parent, reg, shift, width) \
+	to_clk(imx_clk_hw_divider(name, parent, reg, shift, width))
 
 #define imx_clk_divider2(name, parent, reg, shift, width) \
-	imx_clk_hw_divider2(name, parent, reg, shift, width)->clk
+	to_clk(imx_clk_hw_divider2(name, parent, reg, shift, width))
+
+#define imx_clk_divider_flags(name, parent, reg, shift, width, flags) \
+	to_clk(imx_clk_hw_divider_flags(name, parent, reg, shift, width, flags))
+
+#define imx_clk_gate(name, parent, reg, shift) \
+	to_clk(imx_clk_hw_gate(name, parent, reg, shift))
 
 #define imx_clk_gate_dis(name, parent, reg, shift) \
-	imx_clk_hw_gate_dis(name, parent, reg, shift)->clk
+	to_clk(imx_clk_hw_gate_dis(name, parent, reg, shift))
 
 #define imx_clk_gate2(name, parent, reg, shift) \
-	imx_clk_hw_gate2(name, parent, reg, shift)->clk
+	to_clk(imx_clk_hw_gate2(name, parent, reg, shift))
 
 #define imx_clk_gate2_flags(name, parent, reg, shift, flags) \
-	imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)->clk
+	to_clk(imx_clk_hw_gate2_flags(name, parent, reg, shift, flags))
 
 #define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \
-	imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)->clk
+	to_clk(imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count))
 
 #define imx_clk_gate3(name, parent, reg, shift) \
-	imx_clk_hw_gate3(name, parent, reg, shift)->clk
+	to_clk(imx_clk_hw_gate3(name, parent, reg, shift))
 
 #define imx_clk_gate4(name, parent, reg, shift) \
-	imx_clk_hw_gate4(name, parent, reg, shift)->clk
+	to_clk(imx_clk_hw_gate4(name, parent, reg, shift))
 
 #define imx_clk_mux(name, reg, shift, width, parents, num_parents) \
-	imx_clk_hw_mux(name, reg, shift, width, parents, num_parents)->clk
+	to_clk(imx_clk_hw_mux(name, reg, shift, width, parents, num_parents))
+
+#define imx_clk_pllv1(type, name, parent, base) \
+	to_clk(imx_clk_hw_pllv1(type, name, parent, base))
+
+#define imx_clk_pllv2(name, parent, base) \
+	to_clk(imx_clk_hw_pllv2(name, parent, base))
+
+#define imx_clk_frac_pll(name, parent_name, base) \
+	to_clk(imx_clk_hw_frac_pll(name, parent_name, base))
+
+#define imx_clk_sscg_pll(name, parent_names, num_parents, parent,\
+				bypass1, bypass2, base, flags) \
+	to_clk(imx_clk_hw_sscg_pll(name, parent_names, num_parents, parent,\
+				bypass1, bypass2, base, flags))
 
 struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
 		 void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
 
-struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
+#define imx_clk_pll14xx(name, parent_name, base, pll_clk) \
+	to_clk(imx_clk_hw_pll14xx(name, parent_name, base, pll_clk))
+
+struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
+				  void __iomem *base,
+				  const struct imx_pll14xx_clk *pll_clk);
+
+struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name,
 		const char *parent, void __iomem *base);
 
-struct clk *imx_clk_pllv2(const char *name, const char *parent,
+struct clk_hw *imx_clk_hw_pllv2(const char *name, const char *parent,
 		void __iomem *base);
 
-struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_frac_pll(const char *name, const char *parent_name,
 			     void __iomem *base);
 
-struct clk *imx_clk_sccg_pll(const char *name,
+struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
 				const char * const *parent_names,
 				u8 num_parents,
 				u8 parent, u8 bypass1, u8 bypass2,
@@ -149,7 +184,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
 		.kdiv	=	(_k),			\
 	}
 
-struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
 			     void __iomem *base);
 
 struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
@@ -173,7 +208,7 @@ struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent,
 struct clk_hw *imx_clk_hw_pfd(const char *name, const char *parent_name,
 		void __iomem *reg, u8 idx);
 
-struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
 			     void __iomem *reg, u8 idx);
 
 struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
@@ -184,7 +219,7 @@ struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift
 			     u8 width, void __iomem *busy_reg, u8 busy_shift,
 			     const char * const *parent_names, int num_parents);
 
-struct clk_hw *imx7ulp_clk_composite(const char *name,
+struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
 				     const char * const *parent_names,
 				     int num_parents, bool mux_present,
 				     bool rate_present, bool gate_present,
@@ -198,9 +233,11 @@ struct clk_hw *imx_clk_hw_fixup_mux(const char *name, void __iomem *reg,
 			      u8 shift, u8 width, const char * const *parents,
 			      int num_parents, void (*fixup)(u32 *val));
 
-static inline struct clk *imx_clk_fixed(const char *name, int rate)
+static inline struct clk *to_clk(struct clk_hw *hw)
 {
-	return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
+	if (IS_ERR_OR_NULL(hw))
+		return ERR_CAST(hw);
+	return hw->clk;
 }
 
 static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
@@ -224,13 +261,6 @@ static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name,
 			CLK_SET_RATE_PARENT, mult, div);
 }
 
-static inline struct clk *imx_clk_divider(const char *name, const char *parent,
-		void __iomem *reg, u8 shift, u8 width)
-{
-	return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
-			reg, shift, width, 0, &imx_ccm_lock);
-}
-
 static inline struct clk_hw *imx_clk_hw_divider(const char *name,
 						const char *parent,
 						void __iomem *reg, u8 shift,
@@ -240,14 +270,6 @@ static inline struct clk_hw *imx_clk_hw_divider(const char *name,
 				       reg, shift, width, 0, &imx_ccm_lock);
 }
 
-static inline struct clk *imx_clk_divider_flags(const char *name,
-		const char *parent, void __iomem *reg, u8 shift, u8 width,
-		unsigned long flags)
-{
-	return clk_register_divider(NULL, name, parent, flags,
-			reg, shift, width, 0, &imx_ccm_lock);
-}
-
 static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name,
 						   const char *parent,
 						   void __iomem *reg, u8 shift,
@@ -274,13 +296,6 @@ static inline struct clk *imx_clk_divider2_flags(const char *name,
 			reg, shift, width, 0, &imx_ccm_lock);
 }
 
-static inline struct clk *imx_clk_gate(const char *name, const char *parent,
-		void __iomem *reg, u8 shift)
-{
-	return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
-			shift, 0, &imx_ccm_lock);
-}
-
 static inline struct clk_hw *imx_clk_hw_gate_flags(const char *name, const char *parent,
 		void __iomem *reg, u8 shift, unsigned long flags)
 {
@@ -355,15 +370,18 @@ static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *pare
 			reg, shift, 0, &imx_ccm_lock);
 }
 
-static inline struct clk *imx_clk_gate3_flags(const char *name,
+static inline struct clk_hw *imx_clk_hw_gate3_flags(const char *name,
 		const char *parent, void __iomem *reg, u8 shift,
 		unsigned long flags)
 {
-	return clk_register_gate(NULL, name, parent,
+	return clk_hw_register_gate(NULL, name, parent,
 			flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
 			reg, shift, 0, &imx_ccm_lock);
 }
 
+#define imx_clk_gate3_flags(name, parent, reg, shift, flags) \
+	to_clk(imx_clk_hw_gate3_flags(name, parent, reg, shift, flags))
+
 static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *parent,
 		void __iomem *reg, u8 shift)
 {
@@ -372,15 +390,18 @@ static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *pare
 			reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
 }
 
-static inline struct clk *imx_clk_gate4_flags(const char *name,
+static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
 		const char *parent, void __iomem *reg, u8 shift,
 		unsigned long flags)
 {
-	return clk_register_gate2(NULL, name, parent,
+	return clk_hw_register_gate2(NULL, name, parent,
 			flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
 			reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
 }
 
+#define imx_clk_gate4_flags(name, parent, reg, shift, flags) \
+	to_clk(imx_clk_hw_gate4_flags(name, parent, reg, shift, flags))
+
 static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg,
 			u8 shift, u8 width, const char * const *parents,
 			int num_parents)
@@ -420,6 +441,16 @@ static inline struct clk *imx_clk_mux_flags(const char *name,
 			&imx_ccm_lock);
 }
 
+static inline struct clk_hw *imx_clk_hw_mux2_flags(const char *name,
+		void __iomem *reg, u8 shift, u8 width,
+		const char * const *parents,
+		int num_parents, unsigned long flags)
+{
+	return clk_hw_register_mux(NULL, name, parents, num_parents,
+			flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
+			reg, shift, width, 0, &imx_ccm_lock);
+}
+
 static inline struct clk *imx_clk_mux2_flags(const char *name,
 		void __iomem *reg, u8 shift, u8 width,
 		const char * const *parents,
@@ -446,23 +477,38 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
 		struct clk *div, struct clk *mux, struct clk *pll,
 		struct clk *step);
 
-struct clk *imx8m_clk_composite_flags(const char *name,
-					const char * const *parent_names,
-					int num_parents, void __iomem *reg,
-					unsigned long flags);
+struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
+					    const char * const *parent_names,
+					    int num_parents,
+					    void __iomem *reg,
+					    unsigned long flags);
 
-#define __imx8m_clk_composite(name, parent_names, reg, flags) \
-	imx8m_clk_composite_flags(name, parent_names, \
+#define imx8m_clk_composite_flags(name, parent_names, num_parents, reg, \
+				  flags) \
+	to_clk(imx8m_clk_hw_composite_flags(name, parent_names, \
+				num_parents, reg, flags))
+
+#define __imx8m_clk_hw_composite(name, parent_names, reg, flags) \
+	imx8m_clk_hw_composite_flags(name, parent_names, \
 		ARRAY_SIZE(parent_names), reg, \
 		flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
 
+#define __imx8m_clk_composite(name, parent_names, reg, flags) \
+	to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
+
+#define imx8m_clk_hw_composite(name, parent_names, reg) \
+	__imx8m_clk_hw_composite(name, parent_names, reg, 0)
+
 #define imx8m_clk_composite(name, parent_names, reg) \
 	__imx8m_clk_composite(name, parent_names, reg, 0)
 
+#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
+	__imx8m_clk_hw_composite(name, parent_names, reg, CLK_IS_CRITICAL)
+
 #define imx8m_clk_composite_critical(name, parent_names, reg) \
 	__imx8m_clk_composite(name, parent_names, reg, CLK_IS_CRITICAL)
 
-struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
 		unsigned long flags, void __iomem *reg, u8 shift, u8 width,
 		u8 clk_divider_flags, const struct clk_div_table *table,
 		spinlock_t *lock);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 7efc361..ea3c70d 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -174,36 +174,36 @@
 	  This driver supports Mediatek MT6779 audsys clocks.
 
 config COMMON_CLK_MT6797
-       bool "Clock driver for MediaTek MT6797"
-       depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
-       select COMMON_CLK_MEDIATEK
-       default ARCH_MEDIATEK && ARM64
-       ---help---
-         This driver supports MediaTek MT6797 basic clocks.
+	bool "Clock driver for MediaTek MT6797"
+	depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+	select COMMON_CLK_MEDIATEK
+	default ARCH_MEDIATEK && ARM64
+	---help---
+	  This driver supports MediaTek MT6797 basic clocks.
 
 config COMMON_CLK_MT6797_MMSYS
-       bool "Clock driver for MediaTek MT6797 mmsys"
-       depends on COMMON_CLK_MT6797
-       ---help---
-         This driver supports MediaTek MT6797 mmsys clocks.
+	bool "Clock driver for MediaTek MT6797 mmsys"
+	depends on COMMON_CLK_MT6797
+	---help---
+	  This driver supports MediaTek MT6797 mmsys clocks.
 
 config COMMON_CLK_MT6797_IMGSYS
-       bool "Clock driver for MediaTek MT6797 imgsys"
-       depends on COMMON_CLK_MT6797
-       ---help---
-         This driver supports MediaTek MT6797 imgsys clocks.
+	bool "Clock driver for MediaTek MT6797 imgsys"
+	depends on COMMON_CLK_MT6797
+	---help---
+	  This driver supports MediaTek MT6797 imgsys clocks.
 
 config COMMON_CLK_MT6797_VDECSYS
-       bool "Clock driver for MediaTek MT6797 vdecsys"
-       depends on COMMON_CLK_MT6797
-       ---help---
-         This driver supports MediaTek MT6797 vdecsys clocks.
+	bool "Clock driver for MediaTek MT6797 vdecsys"
+	depends on COMMON_CLK_MT6797
+	---help---
+	  This driver supports MediaTek MT6797 vdecsys clocks.
 
 config COMMON_CLK_MT6797_VENCSYS
-       bool "Clock driver for MediaTek MT6797 vencsys"
-       depends on COMMON_CLK_MT6797
-       ---help---
-         This driver supports MediaTek MT6797 vencsys clocks.
+	bool "Clock driver for MediaTek MT6797 vencsys"
+	depends on COMMON_CLK_MT6797
+	---help---
+	  This driver supports MediaTek MT6797 vencsys clocks.
 
 config COMMON_CLK_MT7622
 	bool "Clock driver for MediaTek MT7622"
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 3939f21..6eca2a4 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -18,4 +18,4 @@
 obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
 obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
 obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o
-obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
+obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o meson8-ddr.o
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 2d39a8b..fc9df48 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -129,7 +129,7 @@ static int mpll_set_rate(struct clk_hw *hw,
 	return 0;
 }
 
-static void mpll_init(struct clk_hw *hw)
+static int mpll_init(struct clk_hw *hw)
 {
 	struct clk_regmap *clk = to_clk_regmap(hw);
 	struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
@@ -151,6 +151,8 @@ static void mpll_init(struct clk_hw *hw)
 	/* Set the magic misc bit if required */
 	if (MESON_PARM_APPLICABLE(&mpll->misc))
 		meson_parm_write(clk->map, &mpll->misc, 1);
+
+	return 0;
 }
 
 const struct clk_ops meson_clk_mpll_ro_ops = {
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index 80c3ada1..fe22e17 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -78,7 +78,7 @@ meson_clk_triphase_data(struct clk_regmap *clk)
 	return (struct meson_clk_triphase_data *)clk->data;
 }
 
-static void meson_clk_triphase_sync(struct clk_hw *hw)
+static int meson_clk_triphase_sync(struct clk_hw *hw)
 {
 	struct clk_regmap *clk = to_clk_regmap(hw);
 	struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
@@ -88,6 +88,8 @@ static void meson_clk_triphase_sync(struct clk_hw *hw)
 	val = meson_parm_read(clk->map, &tph->ph0);
 	meson_parm_write(clk->map, &tph->ph1, val);
 	meson_parm_write(clk->map, &tph->ph2, val);
+
+	return 0;
 }
 
 static int meson_clk_triphase_get_phase(struct clk_hw *hw)
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index ddb1e56..b17a13e 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -77,6 +77,15 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
 	unsigned int m, n, frac;
 
 	n = meson_parm_read(clk->map, &pll->n);
+
+	/*
+	 * On some HW, N is set to zero on init. This value is invalid as
+	 * it would result in a division by zero. The rate can't be
+	 * calculated in this case
+	 */
+	if (n == 0)
+		return 0;
+
 	m = meson_parm_read(clk->map, &pll->m);
 
 	frac = MESON_PARM_APPLICABLE(&pll->frac) ?
@@ -277,7 +286,7 @@ static int meson_clk_pll_wait_lock(struct clk_hw *hw)
 	return -ETIMEDOUT;
 }
 
-static void meson_clk_pll_init(struct clk_hw *hw)
+static int meson_clk_pll_init(struct clk_hw *hw)
 {
 	struct clk_regmap *clk = to_clk_regmap(hw);
 	struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
@@ -288,6 +297,8 @@ static void meson_clk_pll_init(struct clk_hw *hw)
 				       pll->init_count);
 		meson_parm_write(clk->map, &pll->rst, 0);
 	}
+
+	return 0;
 }
 
 static int meson_clk_pll_is_enabled(struct clk_hw *hw)
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index b3af61c..d2760a0 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
 	&g12a_bt656,
 	&g12a_usb1_to_ddr,
 	&g12a_mmc_pclk,
+	&g12a_uart2,
 	&g12a_vpu_intr,
 	&g12a_gic,
 	&g12a_sd_emmc_a_clk0,
diff --git a/drivers/clk/meson/meson8-ddr.c b/drivers/clk/meson/meson8-ddr.c
new file mode 100644
index 0000000..4b73ea2
--- /dev/null
+++ b/drivers/clk/meson/meson8-ddr.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson8 DDR clock controller
+ *
+ * Copyright (C) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <dt-bindings/clock/meson8-ddr-clkc.h>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+
+#define AM_DDR_PLL_CNTL			0x00
+#define AM_DDR_PLL_CNTL1		0x04
+#define AM_DDR_PLL_CNTL2		0x08
+#define AM_DDR_PLL_CNTL3		0x0c
+#define AM_DDR_PLL_CNTL4		0x10
+#define AM_DDR_PLL_STS			0x14
+#define DDR_CLK_CNTL			0x18
+#define DDR_CLK_STS			0x1c
+
+static struct clk_regmap meson8_ddr_pll_dco = {
+	.data = &(struct meson_clk_pll_data){
+		.en = {
+			.reg_off = AM_DDR_PLL_CNTL,
+			.shift   = 30,
+			.width   = 1,
+		},
+		.m = {
+			.reg_off = AM_DDR_PLL_CNTL,
+			.shift   = 0,
+			.width   = 9,
+		},
+		.n = {
+			.reg_off = AM_DDR_PLL_CNTL,
+			.shift   = 9,
+			.width   = 5,
+		},
+		.l = {
+			.reg_off = AM_DDR_PLL_CNTL,
+			.shift   = 31,
+			.width   = 1,
+		},
+		.rst = {
+			.reg_off = AM_DDR_PLL_CNTL,
+			.shift   = 29,
+			.width   = 1,
+		},
+	},
+	.hw.init = &(struct clk_init_data){
+		.name = "ddr_pll_dco",
+		.ops = &meson_clk_pll_ro_ops,
+		.parent_data = &(const struct clk_parent_data) {
+			.fw_name = "xtal",
+		},
+		.num_parents = 1,
+	},
+};
+
+static struct clk_regmap meson8_ddr_pll = {
+	.data = &(struct clk_regmap_div_data){
+		.offset = AM_DDR_PLL_CNTL,
+		.shift = 16,
+		.width = 2,
+		.flags = CLK_DIVIDER_POWER_OF_TWO,
+	},
+	.hw.init = &(struct clk_init_data){
+		.name = "ddr_pll",
+		.ops = &clk_regmap_divider_ro_ops,
+		.parent_hws = (const struct clk_hw *[]) {
+			&meson8_ddr_pll_dco.hw
+		},
+		.num_parents = 1,
+	},
+};
+
+static struct clk_hw_onecell_data meson8_ddr_clk_hw_onecell_data = {
+	.hws = {
+		[DDR_CLKID_DDR_PLL_DCO]		= &meson8_ddr_pll_dco.hw,
+		[DDR_CLKID_DDR_PLL]		= &meson8_ddr_pll.hw,
+	},
+	.num = 2,
+};
+
+static struct clk_regmap *const meson8_ddr_clk_regmaps[] = {
+	&meson8_ddr_pll_dco,
+	&meson8_ddr_pll,
+};
+
+static const struct regmap_config meson8_ddr_clkc_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.max_register = DDR_CLK_STS,
+};
+
+static int meson8_ddr_clkc_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	void __iomem *base;
+	struct clk_hw *hw;
+	int ret, i;
+
+	base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	regmap = devm_regmap_init_mmio(&pdev->dev, base,
+				       &meson8_ddr_clkc_regmap_config);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* Populate regmap */
+	for (i = 0; i < ARRAY_SIZE(meson8_ddr_clk_regmaps); i++)
+		meson8_ddr_clk_regmaps[i]->map = regmap;
+
+	/* Register all clks */
+	for (i = 0; i < meson8_ddr_clk_hw_onecell_data.num; i++) {
+		hw = meson8_ddr_clk_hw_onecell_data.hws[i];
+
+		ret = devm_clk_hw_register(&pdev->dev, hw);
+		if (ret) {
+			dev_err(&pdev->dev, "Clock registration failed\n");
+			return ret;
+		}
+	}
+
+	return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+					   &meson8_ddr_clk_hw_onecell_data);
+}
+
+static const struct of_device_id meson8_ddr_clkc_match_table[] = {
+	{ .compatible = "amlogic,meson8-ddr-clkc" },
+	{ .compatible = "amlogic,meson8b-ddr-clkc" },
+	{ /* sentinel */ }
+};
+
+static struct platform_driver meson8_ddr_clkc_driver = {
+	.probe		= meson8_ddr_clkc_probe,
+	.driver		= {
+		.name	= "meson8-ddr-clkc",
+		.of_match_table = meson8_ddr_clkc_match_table,
+	},
+};
+
+builtin_platform_driver(meson8_ddr_clkc_driver);
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 67e6691..9fd31f2 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -97,8 +97,10 @@ static struct clk_regmap meson8b_fixed_pll_dco = {
 	.hw.init = &(struct clk_init_data){
 		.name = "fixed_pll_dco",
 		.ops = &meson_clk_pll_ro_ops,
-		.parent_hws = (const struct clk_hw *[]) {
-			&meson8b_xtal.hw
+		.parent_data = &(const struct clk_parent_data) {
+			.fw_name = "xtal",
+			.name = "xtal",
+			.index = -1,
 		},
 		.num_parents = 1,
 	},
@@ -162,8 +164,10 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
 		/* sometimes also called "HPLL" or "HPLL PLL" */
 		.name = "hdmi_pll_dco",
 		.ops = &meson_clk_pll_ro_ops,
-		.parent_hws = (const struct clk_hw *[]) {
-			&meson8b_xtal.hw
+		.parent_data = &(const struct clk_parent_data) {
+			.fw_name = "xtal",
+			.name = "xtal",
+			.index = -1,
 		},
 		.num_parents = 1,
 	},
@@ -237,8 +241,10 @@ static struct clk_regmap meson8b_sys_pll_dco = {
 	.hw.init = &(struct clk_init_data){
 		.name = "sys_pll_dco",
 		.ops = &meson_clk_pll_ops,
-		.parent_hws = (const struct clk_hw *[]) {
-			&meson8b_xtal.hw
+		.parent_data = &(const struct clk_parent_data) {
+			.fw_name = "xtal",
+			.name = "xtal",
+			.index = -1,
 		},
 		.num_parents = 1,
 	},
@@ -631,9 +637,9 @@ static struct clk_regmap meson8b_cpu_in_sel = {
 	.hw.init = &(struct clk_init_data){
 		.name = "cpu_in_sel",
 		.ops = &clk_regmap_mux_ops,
-		.parent_hws = (const struct clk_hw *[]) {
-			&meson8b_xtal.hw,
-			&meson8b_sys_pll.hw,
+		.parent_data = (const struct clk_parent_data[]) {
+			{ .fw_name = "xtal", .name = "xtal", .index = -1, },
+			{ .hw = &meson8b_sys_pll.hw, },
 		},
 		.num_parents = 2,
 		.flags = (CLK_SET_RATE_PARENT |
@@ -736,9 +742,9 @@ static struct clk_regmap meson8b_cpu_clk = {
 	.hw.init = &(struct clk_init_data){
 		.name = "cpu_clk",
 		.ops = &clk_regmap_mux_ops,
-		.parent_hws = (const struct clk_hw *[]) {
-			&meson8b_xtal.hw,
-			&meson8b_cpu_scale_out_sel.hw,
+		.parent_data = (const struct clk_parent_data[]) {
+			{ .fw_name = "xtal", .name = "xtal", .index = -1, },
+			{ .hw = &meson8b_cpu_scale_out_sel.hw, },
 		},
 		.num_parents = 2,
 		.flags = (CLK_SET_RATE_PARENT |
@@ -758,12 +764,12 @@ static struct clk_regmap meson8b_nand_clk_sel = {
 		.name = "nand_clk_sel",
 		.ops = &clk_regmap_mux_ops,
 		/* FIXME all other parents are unknown: */
-		.parent_hws = (const struct clk_hw *[]) {
-			&meson8b_fclk_div4.hw,
-			&meson8b_fclk_div3.hw,
-			&meson8b_fclk_div5.hw,
-			&meson8b_fclk_div7.hw,
-			&meson8b_xtal.hw,
+		.parent_data = (const struct clk_parent_data[]) {
+			{ .hw = &meson8b_fclk_div4.hw, },
+			{ .hw = &meson8b_fclk_div3.hw, },
+			{ .hw = &meson8b_fclk_div5.hw, },
+			{ .hw = &meson8b_fclk_div7.hw, },
+			{ .fw_name = "xtal", .name = "xtal", .index = -1, },
 		},
 		.num_parents = 5,
 		.flags = CLK_SET_RATE_PARENT,
@@ -1721,8 +1727,10 @@ static struct clk_regmap meson8b_hdmi_sys_sel = {
 		.name = "hdmi_sys_sel",
 		.ops = &clk_regmap_mux_ro_ops,
 		/* FIXME: all other parents are unknown */
-		.parent_hws = (const struct clk_hw *[]) {
-			&meson8b_xtal.hw
+		.parent_data = &(const struct clk_parent_data) {
+			.fw_name = "xtal",
+			.name = "xtal",
+			.index = -1,
 		},
 		.num_parents = 1,
 		.flags = CLK_SET_RATE_NO_REPARENT,
@@ -1764,17 +1772,20 @@ static struct clk_regmap meson8b_hdmi_sys = {
 
 /*
  * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
- * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
- * has mali_0 and no glitch-free mux.
+ * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can
+ * actually manage this glitch-free mux because it does top-to-bottom
+ * updates the each clock tree and switches to the "inactive" one when
+ * CLK_SET_RATE_GATE is set.
+ * Meson8 only has mali_0 and no glitch-free mux.
  */
-static const struct clk_hw *meson8b_mali_0_1_parent_hws[] = {
-	&meson8b_xtal.hw,
-	&meson8b_mpll2.hw,
-	&meson8b_mpll1.hw,
-	&meson8b_fclk_div7.hw,
-	&meson8b_fclk_div4.hw,
-	&meson8b_fclk_div3.hw,
-	&meson8b_fclk_div5.hw,
+static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
+	{ .fw_name = "xtal", .name = "xtal", .index = -1, },
+	{ .hw = &meson8b_mpll2.hw, },
+	{ .hw = &meson8b_mpll1.hw, },
+	{ .hw = &meson8b_fclk_div7.hw, },
+	{ .hw = &meson8b_fclk_div4.hw, },
+	{ .hw = &meson8b_fclk_div3.hw, },
+	{ .hw = &meson8b_fclk_div5.hw, },
 };
 
 static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
@@ -1789,8 +1800,8 @@ static struct clk_regmap meson8b_mali_0_sel = {
 	.hw.init = &(struct clk_init_data){
 		.name = "mali_0_sel",
 		.ops = &clk_regmap_mux_ops,
-		.parent_hws = meson8b_mali_0_1_parent_hws,
-		.num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_hws),
+		.parent_data = meson8b_mali_0_1_parent_data,
+		.num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
 		/*
 		 * Don't propagate rate changes up because the only changeable
 		 * parents are mpll1 and mpll2 but we need those for audio and
@@ -1830,7 +1841,7 @@ static struct clk_regmap meson8b_mali_0 = {
 			&meson8b_mali_0_div.hw
 		},
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
+		.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
 	},
 };
 
@@ -1844,8 +1855,8 @@ static struct clk_regmap meson8b_mali_1_sel = {
 	.hw.init = &(struct clk_init_data){
 		.name = "mali_1_sel",
 		.ops = &clk_regmap_mux_ops,
-		.parent_hws = meson8b_mali_0_1_parent_hws,
-		.num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_hws),
+		.parent_data = meson8b_mali_0_1_parent_data,
+		.num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
 		/*
 		 * Don't propagate rate changes up because the only changeable
 		 * parents are mpll1 and mpll2 but we need those for audio and
@@ -1885,7 +1896,7 @@ static struct clk_regmap meson8b_mali_1 = {
 			&meson8b_mali_1_div.hw
 		},
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
+		.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
 	},
 };
 
@@ -1944,8 +1955,10 @@ static struct clk_regmap meson8m2_gp_pll_dco = {
 	.hw.init = &(struct clk_init_data){
 		.name = "gp_pll_dco",
 		.ops = &meson_clk_pll_ops,
-		.parent_hws = (const struct clk_hw *[]) {
-			&meson8b_xtal.hw
+		.parent_data = &(const struct clk_parent_data) {
+			.fw_name = "xtal",
+			.name = "xtal",
+			.index = -1,
 		},
 		.num_parents = 1,
 	},
@@ -3585,7 +3598,7 @@ static const struct reset_control_ops meson8b_clk_reset_ops = {
 
 struct meson8b_nb_data {
 	struct notifier_block nb;
-	struct clk_hw_onecell_data *onecell_data;
+	struct clk_hw *cpu_clk;
 };
 
 static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
@@ -3593,30 +3606,25 @@ static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
 {
 	struct meson8b_nb_data *nb_data =
 		container_of(nb, struct meson8b_nb_data, nb);
-	struct clk_hw **hws = nb_data->onecell_data->hws;
-	struct clk_hw *cpu_clk_hw, *parent_clk_hw;
-	struct clk *cpu_clk, *parent_clk;
+	struct clk_hw *parent_clk;
 	int ret;
 
 	switch (event) {
 	case PRE_RATE_CHANGE:
-		parent_clk_hw = hws[CLKID_XTAL];
+		/* xtal */
+		parent_clk = clk_hw_get_parent_by_index(nb_data->cpu_clk, 0);
 		break;
 
 	case POST_RATE_CHANGE:
-		parent_clk_hw = hws[CLKID_CPU_SCALE_OUT_SEL];
+		/* cpu_scale_out_sel */
+		parent_clk = clk_hw_get_parent_by_index(nb_data->cpu_clk, 1);
 		break;
 
 	default:
 		return NOTIFY_DONE;
 	}
 
-	cpu_clk_hw = hws[CLKID_CPUCLK];
-	cpu_clk = __clk_lookup(clk_hw_get_name(cpu_clk_hw));
-
-	parent_clk = __clk_lookup(clk_hw_get_name(parent_clk_hw));
-
-	ret = clk_set_parent(cpu_clk, parent_clk);
+	ret = clk_hw_set_parent(nb_data->cpu_clk, parent_clk);
 	if (ret)
 		return notifier_from_errno(ret);
 
@@ -3682,20 +3690,26 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
 		meson8b_clk_regmaps[i]->map = map;
 
 	/*
-	 * register all clks
-	 * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
+	 * always skip CLKID_UNUSED and also skip XTAL if the .dtb provides the
+	 * XTAL clock as input.
 	 */
-	for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
+	if (!IS_ERR(of_clk_get_by_name(np, "xtal")))
+		i = CLKID_PLL_FIXED;
+	else
+		i = CLKID_XTAL;
+
+	/* register all clks */
+	for (; i < CLK_NR_CLKS; i++) {
 		/* array might be sparse */
 		if (!clk_hw_onecell_data->hws[i])
 			continue;
 
-		ret = clk_hw_register(NULL, clk_hw_onecell_data->hws[i]);
+		ret = of_clk_hw_register(np, clk_hw_onecell_data->hws[i]);
 		if (ret)
 			return;
 	}
 
-	meson8b_cpu_nb_data.onecell_data = clk_hw_onecell_data;
+	meson8b_cpu_nb_data.cpu_clk = clk_hw_onecell_data->hws[CLKID_CPUCLK];
 
 	/*
 	 * FIXME we shouldn't program the muxes in notifier handlers. The
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index 3acf037..76d31c0 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -216,7 +216,7 @@ static int sclk_div_is_enabled(struct clk_hw *hw)
 	return 0;
 }
 
-static void sclk_div_init(struct clk_hw *hw)
+static int sclk_div_init(struct clk_hw *hw)
 {
 	struct clk_regmap *clk = to_clk_regmap(hw);
 	struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
@@ -231,6 +231,8 @@ static void sclk_div_init(struct clk_hw *hw)
 		sclk->cached_div = val + 1;
 
 	sclk_div_get_duty_cycle(hw, &sclk->cached_duty);
+
+	return 0;
 }
 
 const struct clk_ops meson_sclk_div_ops = {
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index 567755d..1b4f023 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -266,10 +266,12 @@ static void roclk_disable(struct clk_hw *hw)
 	writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
 }
 
-static void roclk_init(struct clk_hw *hw)
+static int roclk_init(struct clk_hw *hw)
 {
 	/* initialize clock in disabled state */
 	roclk_disable(hw);
+
+	return 0;
 }
 
 static u8 roclk_get_parent(struct clk_hw *hw)
@@ -880,7 +882,7 @@ static int sclk_set_parent(struct clk_hw *hw, u8 index)
 	return err;
 }
 
-static void sclk_init(struct clk_hw *hw)
+static int sclk_init(struct clk_hw *hw)
 {
 	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
 	unsigned long flags;
@@ -899,6 +901,8 @@ static void sclk_init(struct clk_hw *hw)
 		writel(v, sclk->slew_reg);
 		spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
 	}
+
+	return 0;
 }
 
 /* sclk with post-divider */
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 90bf181..fabc09ac 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -109,7 +109,7 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
 	return 0;
 }
 
-static void clk_factor_init(struct clk_hw *hw)
+static int clk_factor_init(struct clk_hw *hw)
 {
 	struct mmp_clk_factor *factor = to_clk_factor(hw);
 	struct mmp_clk_factor_masks *masks = factor->masks;
@@ -146,6 +146,8 @@ static void clk_factor_init(struct clk_hw *hw)
 
 	if (factor->lock)
 		spin_unlock_irqrestore(factor->lock, flags);
+
+	return 0;
 }
 
 static const struct clk_ops clk_factor_ops = {
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index 90814b2..d2cd36c 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -419,12 +419,14 @@ static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
 	}
 }
 
-static void mmp_clk_mix_init(struct clk_hw *hw)
+static int mmp_clk_mix_init(struct clk_hw *hw)
 {
 	struct mmp_clk_mix *mix = to_clk_mix(hw);
 
 	if (mix->table)
 		_filter_clk_table(mix, mix->table, mix->table_size);
+
+	return 0;
 }
 
 const struct clk_ops mmp_clk_mix_ops = {
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index b4a95cb..6e71591 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -53,6 +53,8 @@
 #define APMU_DISP1	0x110
 #define APMU_CCIC0	0x50
 #define APMU_CCIC1	0xf4
+#define APMU_USBHSIC0	0xf8
+#define APMU_USBHSIC1	0xfc
 #define MPMU_UART_PLL	0x14
 
 struct mmp2_clk_unit {
@@ -194,6 +196,8 @@ static struct mmp_clk_mix_config sdh_mix_config = {
 };
 
 static DEFINE_SPINLOCK(usb_lock);
+static DEFINE_SPINLOCK(usbhsic0_lock);
+static DEFINE_SPINLOCK(usbhsic1_lock);
 
 static DEFINE_SPINLOCK(disp0_lock);
 static DEFINE_SPINLOCK(disp1_lock);
@@ -224,6 +228,8 @@ static struct mmp_param_div_clk apmu_div_clks[] = {
 
 static struct mmp_param_gate_clk apmu_gate_clks[] = {
 	{MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+	{MMP2_CLK_USBHSIC0, "usbhsic0_clk", "usb_pll", 0, APMU_USBHSIC0, 0x1b, 0x1b, 0x0, 0, &usbhsic0_lock},
+	{MMP2_CLK_USBHSIC1, "usbhsic1_clk", "usb_pll", 0, APMU_USBHSIC1, 0x1b, 0x1b, 0x0, 0, &usbhsic1_lock},
 	/* The gate clocks has mux parent. */
 	{MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
 	{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 415e690..ded07b0 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -29,7 +29,7 @@
 	select MVEBU_CLK_COMMON
 
 config ARMADA_37XX_CLK
-       bool
+	bool
 
 config ARMADA_XP_CLK
 	bool
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 3b33ef1..15cdcdc 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config KRAIT_CLOCKS
-       bool
-       select KRAIT_L2_ACCESSORS
+	bool
+	select KRAIT_L2_ACCESSORS
 
 config QCOM_GDSC
 	bool
@@ -14,6 +14,7 @@
 	tristate "Support for Qualcomm's clock controllers"
 	depends on OF
 	depends on ARCH_QCOM || COMPILE_TEST
+	select RATIONAL
 	select REGMAP_MMIO
 	select RESET_CONTROLLER
 
@@ -95,6 +96,14 @@
 	  Say Y if you want to use peripheral devices such as UART, SPI,
 	  i2c, USB, SD/eMMC, etc.
 
+config IPQ_GCC_6018
+	tristate "IPQ6018 Global Clock Controller"
+	help
+	  Support for global clock controller on ipq6018 devices.
+	  Say Y if you want to use peripheral devices such as UART, SPI,
+	  i2c, USB, SD/eMMC, etc. Select this for the root clock
+	  of ipq6018.
+
 config IPQ_GCC_806X
 	tristate "IPQ806x Global Clock Controller"
 	help
@@ -229,6 +238,15 @@
 	  Say Y if you want to support graphics controller devices and
 	  functionality such as 3D graphics.
 
+config MSM_MMCC_8998
+	tristate "MSM8998 Multimedia Clock Controller"
+	select MSM_GCC_8998
+	select QCOM_GDSC
+	help
+	  Support for the multimedia clock controller on msm8998 devices.
+	  Say Y if you want to support multimedia devices such as display,
+	  graphics, video encode/decode, camera, etc.
+
 config QCS_GCC_404
 	tristate "QCS404 Global Clock Controller"
 	help
@@ -236,6 +254,15 @@
 	  Say Y if you want to use multimedia devices or peripheral
 	  devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
 
+config SC_DISPCC_7180
+	tristate "SC7180 Display Clock Controller"
+	select SC_GCC_7180
+	help
+	  Support for the display clock controller on Qualcomm Technologies, Inc
+	  SC7180 devices.
+	  Say Y if you want to support display devices and functionality such as
+	  splash screen.
+
 config SC_GCC_7180
 	tristate "SC7180 Global Clock Controller"
 	select QCOM_GDSC
@@ -245,6 +272,22 @@
 	  Say Y if you want to use peripheral devices such as UART, SPI,
 	  I2C, USB, UFS, SDCC, etc.
 
+config SC_GPUCC_7180
+	tristate "SC7180 Graphics Clock Controller"
+	select SC_GCC_7180
+	help
+	  Support for the graphics clock controller on SC7180 devices.
+	  Say Y if you want to support graphics controller devices and
+	  functionality such as 3D graphics.
+
+config SC_VIDEOCC_7180
+	tristate "SC7180 Video Clock Controller"
+	select SC_GCC_7180
+	help
+	  Support for the video clock controller on SC7180 devices.
+	  Say Y if you want to support video devices and functionality such as
+	  video encode and decode.
+
 config SDM_CAMCC_845
 	tristate "SDM845 Camera Clock Controller"
 	select SDM_GCC_845
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index d899661..656a87e 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
 obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
@@ -37,6 +38,7 @@
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_MMCC_8998) += mmcc-msm8998.o
 obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
 obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
 obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
@@ -45,7 +47,10 @@
 obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
 obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
 obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
 obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
+obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
+obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
 obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
 obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
 obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index a6c89a3..cf69a97 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -19,9 +19,9 @@
 
 static const u32 gpll0_a53cc_map[] = { 4, 5 };
 
-static const char * const gpll0_a53cc[] = {
-	"gpll0_vote",
-	"a53pll",
+static const struct clk_parent_data pdata[] = {
+	{ .fw_name = "aux", .name = "gpll0_vote", },
+	{ .fw_name = "pll", .name = "a53pll", },
 };
 
 /*
@@ -62,8 +62,8 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	init.name = "a53mux";
-	init.parent_names = gpll0_a53cc;
-	init.num_parents = ARRAY_SIZE(gpll0_a53cc);
+	init.parent_data = pdata;
+	init.num_parents = ARRAY_SIZE(pdata);
 	init.ops = &clk_regmap_mux_div_ops;
 	init.flags = CLK_SET_RATE_PARENT;
 
@@ -79,7 +79,8 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
 	a53cc->pclk = devm_clk_get(parent, NULL);
 	if (IS_ERR(a53cc->pclk)) {
 		ret = PTR_ERR(a53cc->pclk);
-		dev_err(dev, "failed to get clk: %d\n", ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to get clk: %d\n", ret);
 		return ret;
 	}
 
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 055318f..7c2936d 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -878,6 +878,14 @@ static long clk_trion_pll_round_rate(struct clk_hw *hw, unsigned long rate,
 	return clamp(rate, min_freq, max_freq);
 }
 
+const struct clk_ops clk_alpha_pll_fixed_ops = {
+	.enable = clk_alpha_pll_enable,
+	.disable = clk_alpha_pll_disable,
+	.is_enabled = clk_alpha_pll_is_enabled,
+	.recalc_rate = clk_alpha_pll_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_ops);
+
 const struct clk_ops clk_alpha_pll_ops = {
 	.enable = clk_alpha_pll_enable,
 	.disable = clk_alpha_pll_disable,
@@ -1024,6 +1032,25 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 		regmap_write(regmap, PLL_CONFIG_CTL(pll),
 						config->config_ctl_val);
 
+	if (config->config_ctl_hi_val)
+		regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+						config->config_ctl_hi_val);
+
+	if (config->user_ctl_val)
+		regmap_write(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+
+	if (config->user_ctl_hi_val)
+		regmap_write(regmap, PLL_USER_CTL_U(pll),
+						config->user_ctl_hi_val);
+
+	if (config->test_ctl_val)
+		regmap_write(regmap, PLL_TEST_CTL(pll),
+						config->test_ctl_val);
+
+	if (config->test_ctl_hi_val)
+		regmap_write(regmap, PLL_TEST_CTL_U(pll),
+						config->test_ctl_hi_val);
+
 	if (config->post_div_mask) {
 		mask = config->post_div_mask;
 		val = config->post_div_val;
@@ -1141,14 +1168,9 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
 						unsigned long prate)
 {
 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
-	u32 val, l, alpha_width = pll_alpha_width(pll);
+	u32 l, alpha_width = pll_alpha_width(pll);
 	u64 a;
 	unsigned long rrate;
-	int ret = 0;
-
-	ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
-	if (ret)
-		return ret;
 
 	rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
 
@@ -1167,7 +1189,64 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
 	return __clk_alpha_pll_update_latch(pll);
 }
 
+static int alpha_pll_fabia_prepare(struct clk_hw *hw)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	const struct pll_vco *vco;
+	struct clk_hw *parent_hw;
+	unsigned long cal_freq, rrate;
+	u32 cal_l, val, alpha_width = pll_alpha_width(pll);
+	u64 a;
+	int ret;
+
+	/* Check if calibration needs to be done i.e. PLL is in reset */
+	ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+	if (ret)
+		return ret;
+
+	/* Return early if calibration is not needed. */
+	if (val & PLL_RESET_N)
+		return 0;
+
+	vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+	if (!vco) {
+		pr_err("alpha pll: not in a valid vco range\n");
+		return -EINVAL;
+	}
+
+	cal_freq = DIV_ROUND_CLOSEST((pll->vco_table[0].min_freq +
+				pll->vco_table[0].max_freq) * 54, 100);
+
+	parent_hw = clk_hw_get_parent(hw);
+	if (!parent_hw)
+		return -EINVAL;
+
+	rrate = alpha_pll_round_rate(cal_freq, clk_hw_get_rate(parent_hw),
+					&cal_l, &a, alpha_width);
+	/*
+	 * Due to a limited number of bits for fractional rate programming, the
+	 * rounded up rate could be marginally higher than the requested rate.
+	 */
+	if (rrate > (cal_freq + FABIA_PLL_RATE_MARGIN) || rrate < cal_freq)
+		return -EINVAL;
+
+	/* Setup PLL for calibration frequency */
+	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), cal_l);
+
+	/* Bringup the PLL at calibration frequency */
+	ret = clk_alpha_pll_enable(hw);
+	if (ret) {
+		pr_err("alpha pll calibration failed\n");
+		return ret;
+	}
+
+	clk_alpha_pll_disable(hw);
+
+	return 0;
+}
+
 const struct clk_ops clk_alpha_pll_fabia_ops = {
+	.prepare = alpha_pll_fabia_prepare,
 	.enable = alpha_pll_fabia_enable,
 	.disable = alpha_pll_fabia_disable,
 	.is_enabled = clk_alpha_pll_is_enabled,
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 15f27f4..fbc1f67 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -94,6 +94,10 @@ struct alpha_pll_config {
 	u32 alpha_hi;
 	u32 config_ctl_val;
 	u32 config_ctl_hi_val;
+	u32 user_ctl_val;
+	u32 user_ctl_hi_val;
+	u32 test_ctl_val;
+	u32 test_ctl_hi_val;
 	u32 main_output_mask;
 	u32 aux_output_mask;
 	u32 aux2_output_mask;
@@ -109,6 +113,7 @@ struct alpha_pll_config {
 };
 
 extern const struct clk_ops clk_alpha_pll_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_ops;
 extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
 extern const struct clk_ops clk_alpha_pll_postdiv_ops;
 extern const struct clk_ops clk_alpha_pll_huayra_ops;
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
index 3c04805..e847d58 100644
--- a/drivers/clk/qcom/clk-hfpll.c
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -196,7 +196,7 @@ static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw,
 	return l_val * parent_rate;
 }
 
-static void clk_hfpll_init(struct clk_hw *hw)
+static int clk_hfpll_init(struct clk_hw *hw)
 {
 	struct clk_hfpll *h = to_clk_hfpll(hw);
 	struct hfpll_data const *hd = h->d;
@@ -206,7 +206,7 @@ static void clk_hfpll_init(struct clk_hw *hw)
 	regmap_read(regmap, hd->mode_reg, &mode);
 	if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) {
 		__clk_hfpll_init_once(hw);
-		return;
+		return 0;
 	}
 
 	if (hd->status_reg) {
@@ -218,6 +218,8 @@ static void clk_hfpll_init(struct clk_hw *hw)
 			__clk_hfpll_init_once(hw);
 		}
 	}
+
+	return 0;
 }
 
 static int hfpll_is_enabled(struct clk_hw *hw)
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 78358b8..86d2b8b 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -161,6 +161,7 @@ extern const struct clk_ops clk_byte2_ops;
 extern const struct clk_ops clk_pixel_ops;
 extern const struct clk_ops clk_gfx3d_ops;
 extern const struct clk_ops clk_rcg2_shared_ops;
+extern const struct clk_ops clk_dp_ops;
 
 struct clk_rcg_dfs_data {
 	struct clk_rcg2 *rcg;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8f4b9be..357159f 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -10,6 +10,7 @@
 #include <linux/export.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
+#include <linux/rational.h>
 #include <linux/regmap.h>
 #include <linux/math64.h>
 #include <linux/slab.h>
@@ -217,6 +218,9 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
 
 	clk_flags = clk_hw_get_flags(hw);
 	p = clk_hw_get_parent_by_index(hw, index);
+	if (!p)
+		return -EINVAL;
+
 	if (clk_flags & CLK_SET_RATE_PARENT) {
 		rate = f->freq;
 		if (f->pre_div) {
@@ -952,7 +956,7 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	struct clk_hw *p;
 	unsigned long prate = 0;
-	u32 val, mask, cfg, mode;
+	u32 val, mask, cfg, mode, src;
 	int i, num_parents;
 
 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
@@ -962,12 +966,12 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
 	if (cfg & mask)
 		f->pre_div = cfg & mask;
 
-	cfg &= CFG_SRC_SEL_MASK;
-	cfg >>= CFG_SRC_SEL_SHIFT;
+	src = cfg & CFG_SRC_SEL_MASK;
+	src >>= CFG_SRC_SEL_SHIFT;
 
 	num_parents = clk_hw_get_num_parents(hw);
 	for (i = 0; i < num_parents; i++) {
-		if (cfg == rcg->parent_map[i].cfg) {
+		if (src == rcg->parent_map[i].cfg) {
 			f->src = rcg->parent_map[i].src;
 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
 			prate = clk_hw_get_rate(p);
@@ -1124,3 +1128,79 @@ int qcom_cc_register_rcg_dfs(struct regmap *regmap,
 	return 0;
 }
 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
+
+static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	struct freq_tbl f = { 0 };
+	u32 mask = BIT(rcg->hid_width) - 1;
+	u32 hid_div, cfg;
+	int i, num_parents = clk_hw_get_num_parents(hw);
+	unsigned long num, den;
+
+	rational_best_approximation(parent_rate, rate,
+			GENMASK(rcg->mnd_width - 1, 0),
+			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
+
+	if (!num || !den)
+		return -EINVAL;
+
+	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+	hid_div = cfg;
+	cfg &= CFG_SRC_SEL_MASK;
+	cfg >>= CFG_SRC_SEL_SHIFT;
+
+	for (i = 0; i < num_parents; i++) {
+		if (cfg == rcg->parent_map[i].cfg) {
+			f.src = rcg->parent_map[i].src;
+			break;
+		}
+	}
+
+	f.pre_div = hid_div;
+	f.pre_div >>= CFG_SRC_DIV_SHIFT;
+	f.pre_div &= mask;
+
+	if (num != den) {
+		f.m = num;
+		f.n = den;
+	} else {
+		f.m = 0;
+		f.n = 0;
+	}
+
+	return clk_rcg2_configure(rcg, &f);
+}
+
+static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
+		unsigned long rate, unsigned long parent_rate, u8 index)
+{
+	return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
+				struct clk_rate_request *req)
+{
+	struct clk_rate_request parent_req = *req;
+	int ret;
+
+	ret = __clk_determine_rate(clk_hw_get_parent(hw), &parent_req);
+	if (ret)
+		return ret;
+
+	req->best_parent_rate = parent_req.rate;
+
+	return 0;
+}
+
+const struct clk_ops clk_dp_ops = {
+	.is_enabled = clk_rcg2_is_enabled,
+	.get_parent = clk_rcg2_get_parent,
+	.set_parent = clk_rcg2_set_parent,
+	.recalc_rate = clk_rcg2_recalc_rate,
+	.set_rate = clk_rcg2_dp_set_rate,
+	.set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
+	.determine_rate = clk_rcg2_dp_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_dp_ops);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 7ed313a..98a118c 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -396,6 +396,7 @@ static struct clk_hw *sc7180_rpmh_clocks[] = {
 	[RPMH_RF_CLK1_A]	= &sdm845_rf_clk1_ao.hw,
 	[RPMH_RF_CLK2]		= &sdm845_rf_clk2.hw,
 	[RPMH_RF_CLK2_A]	= &sdm845_rf_clk2_ao.hw,
+	[RPMH_IPA_CLK]		= &sdm845_ipa.hw,
 };
 
 static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
@@ -431,11 +432,16 @@ static int clk_rpmh_probe(struct platform_device *pdev)
 	hw_clks = desc->clks;
 
 	for (i = 0; i < desc->num_clks; i++) {
-		const char *name = hw_clks[i]->init->name;
+		const char *name;
 		u32 res_addr;
 		size_t aux_data_len;
 		const struct bcm_db *data;
 
+		if (!hw_clks[i])
+			continue;
+
+		name = hw_clks[i]->init->name;
+
 		rpmh_clk = to_clk_rpmh(hw_clks[i]);
 		res_addr = cmd_db_read_addr(rpmh_clk->res_name);
 		if (!res_addr) {
@@ -481,9 +487,9 @@ static int clk_rpmh_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id clk_rpmh_match_table[] = {
+	{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
 	{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
 	{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
-	{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 930fa4a4..0bbfef9 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -485,6 +485,8 @@ static struct clk_smd_rpm *msm8974_clks[] = {
 	[RPM_SMD_MMSSNOC_AHB_CLK]	= &msm8974_mmssnoc_ahb_clk,
 	[RPM_SMD_MMSSNOC_AHB_A_CLK]	= &msm8974_mmssnoc_ahb_a_clk,
 	[RPM_SMD_BIMC_CLK]		= &msm8974_bimc_clk,
+	[RPM_SMD_GFX3D_CLK_SRC]		= &msm8974_gfx3d_clk_src,
+	[RPM_SMD_GFX3D_A_CLK_SRC]	= &msm8974_gfx3d_a_clk_src,
 	[RPM_SMD_BIMC_A_CLK]		= &msm8974_bimc_a_clk,
 	[RPM_SMD_OCMEMGX_CLK]		= &msm8974_ocmemgx_clk,
 	[RPM_SMD_OCMEMGX_A_CLK]		= &msm8974_ocmemgx_a_clk,
@@ -648,6 +650,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
 };
 
 /* msm8998 */
+DEFINE_CLK_SMD_RPM(msm8998, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
 DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
 DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
 DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
@@ -671,6 +674,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
 DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
 static struct clk_smd_rpm *msm8998_clks[] = {
+	[RPM_SMD_BIMC_CLK] = &msm8998_bimc_clk,
+	[RPM_SMD_BIMC_A_CLK] = &msm8998_bimc_a_clk,
 	[RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
 	[RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
 	[RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
new file mode 100644
index 0000000..dd7af41
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+	P_BI_TCXO,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+	P_DISP_CC_PLL0_OUT_EVEN,
+	P_DISP_CC_PLL0_OUT_MAIN,
+	P_DP_PHY_PLL_LINK_CLK,
+	P_DP_PHY_PLL_VCO_DIV_CLK,
+	P_DSI0_PHY_PLL_OUT_BYTECLK,
+	P_DSI0_PHY_PLL_OUT_DSICLK,
+	P_GPLL0_OUT_MAIN,
+};
+
+static const struct pll_vco fabia_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_pll0",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "bi_tcxo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fabia_ops,
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_disp_cc_pll0_out_even[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv disp_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_disp_cc_pll0_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_disp_cc_pll0_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_pll0_out_even",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &disp_cc_pll0.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+	{ .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DP_PHY_PLL_LINK_CLK, 1 },
+	{ P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+	{ .fw_name = "bi_tcxo" },
+	{ .fw_name = "dp_phy_pll_link_clk" },
+	{ .fw_name = "dp_phy_pll_vco_div_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+	{ .fw_name = "bi_tcxo" },
+	{ .fw_name = "dsi0_phy_pll_out_byteclk" },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DISP_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+	{ P_DISP_CC_PLL0_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+	{ .fw_name = "bi_tcxo" },
+	{ .hw = &disp_cc_pll0.clkr.hw },
+	{ .fw_name = "gcc_disp_gpll0_clk_src" },
+	{ .hw = &disp_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+	{ .fw_name = "bi_tcxo" },
+	{ .fw_name = "gcc_disp_gpll0_clk_src" },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+	{ .fw_name = "bi_tcxo" },
+	{ .fw_name = "dsi0_phy_pll_out_dsiclk" },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+	F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+	.cmd_rcgr = 0x22bc,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_4,
+	.freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_ahb_clk_src",
+		.parent_data = disp_cc_parent_data_4,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_shared_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+	.cmd_rcgr = 0x2110,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_byte0_clk_src",
+		.parent_data = disp_cc_parent_data_2,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_byte2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+	.cmd_rcgr = 0x21dc,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_aux_clk_src",
+		.parent_data = disp_cc_parent_data_0,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+	.cmd_rcgr = 0x2194,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_crypto_clk_src",
+		.parent_data = disp_cc_parent_data_1,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_byte2_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+	.cmd_rcgr = 0x2178,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_link_clk_src",
+		.parent_data = disp_cc_parent_data_1,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_byte2_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+	.cmd_rcgr = 0x21ac,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_pixel_clk_src",
+		.parent_data = disp_cc_parent_data_1,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_dp_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+	.cmd_rcgr = 0x2148,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_esc0_clk_src",
+		.parent_data = disp_cc_parent_data_2,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	F(345000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+	F(460000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+	.cmd_rcgr = 0x20c8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_3,
+	.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_mdp_clk_src",
+		.parent_data = disp_cc_parent_data_3,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+		.ops = &clk_rcg2_shared_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+	.cmd_rcgr = 0x2098,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_5,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_pclk0_clk_src",
+		.parent_data = disp_cc_parent_data_5,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_pixel_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+	.cmd_rcgr = 0x20e0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_3,
+	.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_rot_clk_src",
+		.parent_data = disp_cc_parent_data_3,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+		.ops = &clk_rcg2_shared_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+	.cmd_rcgr = 0x20f8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_0,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_vsync_clk_src",
+		.parent_data = disp_cc_parent_data_0,
+		.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+		.ops = &clk_rcg2_shared_ops,
+	},
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+	.halt_reg = 0x2080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+	.halt_reg = 0x2028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+	.reg = 0x2128,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "disp_cc_mdss_byte0_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &disp_cc_mdss_byte0_clk_src.clkr.hw
+		},
+		.num_parents = 1,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+	.reg = 0x2190,
+	.shift = 0,
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "disp_cc_mdss_dp_link_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw
+		},
+		.num_parents = 1,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+	.halt_reg = 0x202c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x202c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_byte0_intf_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+	.halt_reg = 0x2054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_aux_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+	.halt_reg = 0x2048,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_crypto_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+	.halt_reg = 0x2040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+	.halt_reg = 0x2044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_intf_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+	.halt_reg = 0x204c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x204c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_pixel_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+	.halt_reg = 0x2038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_esc0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_esc0_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+	.halt_reg = 0x200c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x200c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+	.halt_reg = 0x201c,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x201c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_mdp_lut_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+	.halt_reg = 0x4004,
+	.halt_check = BRANCH_VOTED,
+	.clkr = {
+		.enable_reg = 0x4004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_non_gdsc_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+	.halt_reg = 0x2004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_pclk0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+	.halt_reg = 0x2014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rot_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+	.halt_reg = 0x400c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x400c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rscc_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+	.halt_reg = 0x4008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_rscc_vsync_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+	.halt_reg = 0x2024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_vsync_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct gdsc mdss_gdsc = {
+	.gdscr = 0x3000,
+	.pd = {
+		.name = "mdss_gdsc",
+	},
+	.pwrsts = PWRSTS_OFF_ON,
+	.flags = HW_CTRL,
+};
+
+static struct gdsc *disp_cc_sc7180_gdscs[] = {
+	[MDSS_GDSC] = &mdss_gdsc,
+};
+
+static struct clk_regmap *disp_cc_sc7180_clocks[] = {
+	[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+	[DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+	[DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+	[DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+	[DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+	[DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+	[DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+	[DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+	[DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
+				&disp_cc_mdss_dp_link_div_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+	[DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+	[DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+	[DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+	[DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+	[DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+	[DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+	[DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+	[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+	[DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+	[DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+	[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+	[DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+	[DISP_CC_PLL0_OUT_EVEN] = &disp_cc_pll0_out_even.clkr,
+};
+
+static const struct regmap_config disp_cc_sc7180_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x10000,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sc7180_desc = {
+	.config = &disp_cc_sc7180_regmap_config,
+	.clks = disp_cc_sc7180_clocks,
+	.num_clks = ARRAY_SIZE(disp_cc_sc7180_clocks),
+	.gdscs = disp_cc_sc7180_gdscs,
+	.num_gdscs = ARRAY_SIZE(disp_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id disp_cc_sc7180_match_table[] = {
+	{ .compatible = "qcom,sc7180-dispcc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sc7180_match_table);
+
+static int disp_cc_sc7180_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct alpha_pll_config disp_cc_pll_config = {};
+
+	regmap = qcom_cc_map(pdev, &disp_cc_sc7180_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* 1380MHz configuration */
+	disp_cc_pll_config.l = 0x47;
+	disp_cc_pll_config.alpha = 0xe000;
+	disp_cc_pll_config.user_ctl_val = 0x00000001;
+	disp_cc_pll_config.user_ctl_hi_val = 0x00004805;
+
+	clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll_config);
+
+	return qcom_cc_really_probe(pdev, &disp_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sc7180_driver = {
+	.probe = disp_cc_sc7180_probe,
+	.driver = {
+		.name = "sc7180-dispcc",
+		.of_match_table = disp_cc_sc7180_match_table,
+	},
+};
+
+static int __init disp_cc_sc7180_init(void)
+{
+	return platform_driver_register(&disp_cc_sc7180_driver);
+}
+subsys_initcall(disp_cc_sc7180_init);
+
+static void __exit disp_cc_sc7180_exit(void)
+{
+	platform_driver_unregister(&disp_cc_sc7180_driver);
+}
+module_exit(disp_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI DISP_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 0cc4909..5c932cd 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/clk-provider.h>
@@ -29,6 +29,8 @@ enum {
 	P_DSI1_PHY_PLL_OUT_DSICLK,
 	P_GPLL0_OUT_MAIN,
 	P_GPLL0_OUT_MAIN_DIV,
+	P_DP_PHY_PLL_LINK_CLK,
+	P_DP_PHY_PLL_VCO_DIV_CLK,
 };
 
 static const struct parent_map disp_cc_parent_map_0[] = {
@@ -45,6 +47,20 @@ static const char * const disp_cc_parent_names_0[] = {
 	"core_bi_pll_test_se",
 };
 
+static const struct parent_map disp_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_DP_PHY_PLL_LINK_CLK, 1 },
+	{ P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"dp_link_clk_divsel_ten",
+	"dp_vco_divided_clk_src_mux",
+	"core_bi_pll_test_se",
+};
+
 static const struct parent_map disp_cc_parent_map_2[] = {
 	{ P_BI_TCXO, 0 },
 	{ P_CORE_BI_PLL_TEST_SE, 7 },
@@ -128,6 +144,81 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
 	},
 };
 
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+	.cmd_rcgr = 0x219c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_2,
+	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_aux_clk_src",
+		.parent_names = disp_cc_parent_names_2,
+		.num_parents = 2,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+	.cmd_rcgr = 0x2154,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_crypto_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.ops = &clk_byte2_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+	.cmd_rcgr = 0x2138,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_link_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_byte2_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+	.cmd_rcgr = 0x2184,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_pixel1_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_dp_ops,
+	},
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+	.cmd_rcgr = 0x216c,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = disp_cc_parent_map_1,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "disp_cc_mdss_dp_pixel_clk_src",
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_dp_ops,
+	},
+};
+
 static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	{ }
@@ -391,6 +482,114 @@ static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
 	},
 };
 
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+	.halt_reg = 0x2054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_aux_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+	.halt_reg = 0x2048,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_crypto_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_crypto_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+	.halt_reg = 0x2040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_link_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+/* reset state of disp_cc_mdss_dp_link_div_clk_src divider is 0x3 (div 4) */
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+	.halt_reg = 0x2044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_link_intf_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_link_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+	.halt_reg = 0x2050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_pixel1_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_pixel1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+	.halt_reg = 0x204c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x204c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "disp_cc_mdss_dp_pixel_clk",
+			.parent_names = (const char *[]){
+				"disp_cc_mdss_dp_pixel_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_branch disp_cc_mdss_esc0_clk = {
 	.halt_reg = 0x2038,
 	.halt_check = BRANCH_HALT,
@@ -589,6 +788,19 @@ static struct clk_regmap *disp_cc_sdm845_clocks[] = {
 	[DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
 	[DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] =
 					&disp_cc_mdss_byte1_div_clk_src.clkr,
+	[DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+	[DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+	[DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+	[DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] =
+					&disp_cc_mdss_dp_crypto_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+	[DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+	[DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] =
+					&disp_cc_mdss_dp_pixel1_clk_src.clkr,
+	[DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+	[DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
 	[DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
 	[DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
 	[DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
new file mode 100644
index 0000000..3f9c2f6
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -0,0 +1,4635 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,gcc-ipq6018.h>
+#include <dt-bindings/reset/qcom,gcc-ipq6018.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+	P_XO,
+	P_BIAS_PLL,
+	P_UNIPHY0_RX,
+	P_UNIPHY0_TX,
+	P_UNIPHY1_RX,
+	P_BIAS_PLL_NSS_NOC,
+	P_UNIPHY1_TX,
+	P_PCIE20_PHY0_PIPE,
+	P_USB3PHY_0_PIPE,
+	P_GPLL0,
+	P_GPLL0_DIV2,
+	P_GPLL2,
+	P_GPLL4,
+	P_GPLL6,
+	P_SLEEP_CLK,
+	P_UBI32_PLL,
+	P_NSS_CRYPTO_PLL,
+	P_PI_SLEEP,
+};
+
+static struct clk_alpha_pll gpll0_main = {
+	.offset = 0x21000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x0b000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll0_main",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor gpll0_out_main_div2 = {
+	.mult = 1,
+	.div = 2,
+	.hw.init = &(struct clk_init_data){
+		.name = "gpll0_out_main_div2",
+		.parent_hws = (const struct clk_hw *[]){
+				&gpll0_main.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+	.offset = 0x21000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll0",
+		.parent_hws = (const struct clk_hw *[]){
+				&gpll0_main.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0_out_main_div2[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw},
+	{ .hw = &gpll0_out_main_div2.hw},
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_alpha_pll ubi32_pll_main = {
+	.offset = 0x25000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA],
+	.flags = SUPPORTS_DYNAMIC_UPDATE,
+	.clkr = {
+		.enable_reg = 0x0b000,
+		.enable_mask = BIT(6),
+		.hw.init = &(struct clk_init_data){
+			.name = "ubi32_pll_main",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_huayra_ops,
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv ubi32_pll = {
+	.offset = 0x25000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA],
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ubi32_pll",
+		.parent_hws = (const struct clk_hw *[]){
+				&ubi32_pll_main.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_alpha_pll gpll6_main = {
+	.offset = 0x37000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+	.clkr = {
+		.enable_reg = 0x0b000,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll6_main",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv gpll6 = {
+	.offset = 0x37000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll6",
+		.parent_hws = (const struct clk_hw *[]){
+				&gpll6_main.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_alpha_pll gpll4_main = {
+	.offset = 0x24000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x0b000,
+		.enable_mask = BIT(5),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll4_main",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+	.offset = 0x24000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll4",
+		.parent_hws = (const struct clk_hw *[]){
+				&gpll4_main.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static const struct freq_tbl ftbl_pcnoc_bfdcd_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(50000000, P_GPLL0, 16, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+	.cmd_rcgr = 0x27000,
+	.freq_tbl = ftbl_pcnoc_bfdcd_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pcnoc_bfdcd_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_alpha_pll gpll2_main = {
+	.offset = 0x4a000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x0b000,
+		.enable_mask = BIT(2),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll2_main",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv gpll2 = {
+	.offset = 0x4a000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll2",
+		.parent_hws = (const struct clk_hw *[]){
+				&gpll2_main.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_alpha_pll nss_crypto_pll_main = {
+	.offset = 0x22000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.clkr = {
+		.enable_reg = 0x0b000,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_crypto_pll_main",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_ops,
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+	.offset = 0x22000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_crypto_pll",
+		.parent_hws = (const struct clk_hw *[]){
+				&nss_crypto_pll_main.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static const struct freq_tbl ftbl_qdss_tsctr_clk_src[] = {
+	F(160000000, P_GPLL0_DIV2, 2.5, 0, 0),
+	F(320000000, P_GPLL0, 2.5, 0, 0),
+	F(600000000, P_GPLL4, 2, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_gpll4_gpll0_gpll6_gpll0_div2[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll4.clkr.hw },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll6.clkr.hw },
+	{ .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL4, 1 },
+	{ P_GPLL0, 2 },
+	{ P_GPLL6, 3 },
+	{ P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 qdss_tsctr_clk_src = {
+	.cmd_rcgr = 0x29064,
+	.freq_tbl = ftbl_qdss_tsctr_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "qdss_tsctr_clk_src",
+		.parent_data = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_fixed_factor qdss_dap_sync_clk_src = {
+	.mult = 1,
+	.div = 4,
+	.hw.init = &(struct clk_init_data){
+		.name = "qdss_dap_sync_clk_src",
+		.parent_hws = (const struct clk_hw *[]){
+				&qdss_tsctr_clk_src.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_qdss_at_clk_src[] = {
+	F(66670000, P_GPLL0_DIV2, 6, 0, 0),
+	F(240000000, P_GPLL4, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 qdss_at_clk_src = {
+	.cmd_rcgr = 0x2900c,
+	.freq_tbl = ftbl_qdss_at_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "qdss_at_clk_src",
+		.parent_data = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_fixed_factor qdss_tsctr_div2_clk_src = {
+	.mult = 1,
+	.div = 2,
+	.hw.init = &(struct clk_init_data){
+		.name = "qdss_tsctr_div2_clk_src",
+		.parent_hws = (const struct clk_hw *[]){
+				&qdss_tsctr_clk_src.clkr.hw },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_ppe_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(300000000, P_BIAS_PLL, 1, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_bias_gpll0_gpll4_nss_ubi32[] = {
+	{ .fw_name = "xo" },
+	{ .fw_name = "bias_pll_cc_clk" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll4.clkr.hw },
+	{ .hw = &nss_crypto_pll.clkr.hw },
+	{ .hw = &ubi32_pll.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_bias_gpll0_gpll4_nss_ubi32_map[] = {
+	{ P_XO, 0 },
+	{ P_BIAS_PLL, 1 },
+	{ P_GPLL0, 2 },
+	{ P_GPLL4, 3 },
+	{ P_NSS_CRYPTO_PLL, 4 },
+	{ P_UBI32_PLL, 5 },
+};
+
+static struct clk_rcg2 nss_ppe_clk_src = {
+	.cmd_rcgr = 0x68080,
+	.freq_tbl = ftbl_nss_ppe_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_bias_gpll0_gpll4_nss_ubi32_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_ppe_clk_src",
+		.parent_data = gcc_xo_bias_gpll0_gpll4_nss_ubi32,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_xo_clk_src = {
+	.halt_reg = 0x30018,
+	.clkr = {
+		.enable_reg = 0x30018,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_xo_clk_src",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_nss_ce_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(200000000, P_GPLL0, 4, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+};
+
+static struct clk_rcg2 nss_ce_clk_src = {
+	.cmd_rcgr = 0x68098,
+	.freq_tbl = ftbl_nss_ce_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_ce_clk_src",
+		.parent_data = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_sleep_clk_src = {
+	.halt_reg = 0x30000,
+	.clkr = {
+		.enable_reg = 0x30000,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sleep_clk_src",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "sleep_clk",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_snoc_nssnoc_bfdcd_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	F(133333333, P_GPLL0, 6, 0, 0),
+	F(160000000, P_GPLL0, 5, 0, 0),
+	F(200000000, P_GPLL0, 4, 0, 0),
+	F(266666667, P_GPLL0, 3, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data
+			gcc_xo_gpll0_gpll6_gpll0_out_main_div2[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll6.clkr.hw },
+	{ .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL6, 2 },
+	{ P_GPLL0_DIV2, 3 },
+};
+
+static struct clk_rcg2 snoc_nssnoc_bfdcd_clk_src = {
+	.cmd_rcgr = 0x76054,
+	.freq_tbl = ftbl_snoc_nssnoc_bfdcd_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "snoc_nssnoc_bfdcd_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll6_gpll0_out_main_div2,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+	F(50000000, P_GPLL0, 16, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+	.cmd_rcgr = 0x46000,
+	.freq_tbl = ftbl_apss_ahb_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "apss_ahb_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
+	F(25000000, P_UNIPHY0_RX, 5, 0, 0),
+	F(78125000, P_UNIPHY1_RX, 4, 0, 0),
+	F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
+	F(125000000, P_UNIPHY0_RX, 1, 0, 0),
+	F(156250000, P_UNIPHY1_RX, 2, 0, 0),
+	F(312500000, P_UNIPHY1_RX, 1, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data
+gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias[] = {
+	{ .fw_name = "xo" },
+	{ .fw_name = "uniphy0_gcc_rx_clk" },
+	{ .fw_name = "uniphy0_gcc_tx_clk" },
+	{ .fw_name = "uniphy1_gcc_rx_clk" },
+	{ .fw_name = "uniphy1_gcc_tx_clk" },
+	{ .hw = &ubi32_pll.clkr.hw },
+	{ .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map
+gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = {
+	{ P_XO, 0 },
+	{ P_UNIPHY0_RX, 1 },
+	{ P_UNIPHY0_TX, 2 },
+	{ P_UNIPHY1_RX, 3 },
+	{ P_UNIPHY1_TX, 4 },
+	{ P_UBI32_PLL, 5 },
+	{ P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port5_rx_clk_src = {
+	.cmd_rcgr = 0x68060,
+	.freq_tbl = ftbl_nss_port5_rx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port5_rx_clk_src",
+		.parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
+	F(25000000, P_UNIPHY0_TX, 5, 0, 0),
+	F(78125000, P_UNIPHY1_TX, 4, 0, 0),
+	F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
+	F(125000000, P_UNIPHY0_TX, 1, 0, 0),
+	F(156250000, P_UNIPHY1_TX, 2, 0, 0),
+	F(312500000, P_UNIPHY1_TX, 1, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data
+gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias[] = {
+	{ .fw_name = "xo" },
+	{ .fw_name = "uniphy0_gcc_tx_clk" },
+	{ .fw_name = "uniphy0_gcc_rx_clk" },
+	{ .fw_name = "uniphy1_gcc_tx_clk" },
+	{ .fw_name = "uniphy1_gcc_rx_clk" },
+	{ .hw = &ubi32_pll.clkr.hw },
+	{ .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map
+gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = {
+	{ P_XO, 0 },
+	{ P_UNIPHY0_TX, 1 },
+	{ P_UNIPHY0_RX, 2 },
+	{ P_UNIPHY1_TX, 3 },
+	{ P_UNIPHY1_RX, 4 },
+	{ P_UBI32_PLL, 5 },
+	{ P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port5_tx_clk_src = {
+	.cmd_rcgr = 0x68068,
+	.freq_tbl = ftbl_nss_port5_tx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port5_tx_clk_src",
+		.parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_pcie_axi_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(200000000, P_GPLL0, 4, 0, 0),
+	F(240000000, P_GPLL4, 5, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL4, 2 },
+};
+
+static struct clk_rcg2 pcie0_axi_clk_src = {
+	.cmd_rcgr = 0x75054,
+	.freq_tbl = ftbl_pcie_axi_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll4_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pcie0_axi_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll4,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_usb0_master_clk_src[] = {
+	F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	F(133330000, P_GPLL0, 6, 0, 0),
+	F(200000000, P_GPLL0, 4, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_out_main_div2_gpll0[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0_out_main_div2.hw },
+	{ .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_out_main_div2_gpll0_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0_DIV2, 2 },
+	{ P_GPLL0, 1 },
+};
+
+static struct clk_rcg2 usb0_master_clk_src = {
+	.cmd_rcgr = 0x3e00c,
+	.freq_tbl = ftbl_usb0_master_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb0_master_clk_src",
+		.parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_regmap_div apss_ahb_postdiv_clk_src = {
+	.reg = 0x46018,
+	.shift = 4,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "apss_ahb_postdiv_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&apss_ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor gcc_xo_div4_clk_src = {
+	.mult = 1,
+	.div = 4,
+	.hw.init = &(struct clk_init_data){
+		.name = "gcc_xo_div4_clk_src",
+		.parent_hws = (const struct clk_hw *[]){
+				&gcc_xo_clk_src.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_port1_rx_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(25000000, P_UNIPHY0_RX, 5, 0, 0),
+	F(125000000, P_UNIPHY0_RX, 1, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_uniphy0_rx_tx_ubi32_bias[] = {
+	{ .fw_name = "xo" },
+	{ .fw_name = "uniphy0_gcc_rx_clk" },
+	{ .fw_name = "uniphy0_gcc_tx_clk" },
+	{ .hw = &ubi32_pll.clkr.hw },
+	{ .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map gcc_xo_uniphy0_rx_tx_ubi32_bias_map[] = {
+	{ P_XO, 0 },
+	{ P_UNIPHY0_RX, 1 },
+	{ P_UNIPHY0_TX, 2 },
+	{ P_UBI32_PLL, 5 },
+	{ P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port1_rx_clk_src = {
+	.cmd_rcgr = 0x68020,
+	.freq_tbl = ftbl_nss_port1_rx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port1_rx_clk_src",
+		.parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_port1_tx_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(25000000, P_UNIPHY0_TX, 5, 0, 0),
+	F(125000000, P_UNIPHY0_TX, 1, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_uniphy0_tx_rx_ubi32_bias[] = {
+	{ .fw_name = "xo" },
+	{ .fw_name = "uniphy0_gcc_tx_clk" },
+	{ .fw_name = "uniphy0_gcc_rx_clk" },
+	{ .hw = &ubi32_pll.clkr.hw },
+	{ .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map gcc_xo_uniphy0_tx_rx_ubi32_bias_map[] = {
+	{ P_XO, 0 },
+	{ P_UNIPHY0_TX, 1 },
+	{ P_UNIPHY0_RX, 2 },
+	{ P_UBI32_PLL, 5 },
+	{ P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port1_tx_clk_src = {
+	.cmd_rcgr = 0x68028,
+	.freq_tbl = ftbl_nss_port1_tx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port1_tx_clk_src",
+		.parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_port2_rx_clk_src = {
+	.cmd_rcgr = 0x68030,
+	.freq_tbl = ftbl_nss_port1_rx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port2_rx_clk_src",
+		.parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_port2_tx_clk_src = {
+	.cmd_rcgr = 0x68038,
+	.freq_tbl = ftbl_nss_port1_tx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port2_tx_clk_src",
+		.parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_port3_rx_clk_src = {
+	.cmd_rcgr = 0x68040,
+	.freq_tbl = ftbl_nss_port1_rx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port3_rx_clk_src",
+		.parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_port3_tx_clk_src = {
+	.cmd_rcgr = 0x68048,
+	.freq_tbl = ftbl_nss_port1_tx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port3_tx_clk_src",
+		.parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_port4_rx_clk_src = {
+	.cmd_rcgr = 0x68050,
+	.freq_tbl = ftbl_nss_port1_rx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port4_rx_clk_src",
+		.parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_port4_tx_clk_src = {
+	.cmd_rcgr = 0x68058,
+	.freq_tbl = ftbl_nss_port1_tx_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_port4_tx_clk_src",
+		.parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_regmap_div nss_port5_rx_div_clk_src = {
+	.reg = 0x68440,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port5_rx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port5_rx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div nss_port5_tx_div_clk_src = {
+	.reg = 0x68444,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port5_tx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port5_tx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_apss_axi_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(100000000, P_GPLL0_DIV2, 4, 0, 0),
+	F(200000000, P_GPLL0, 4, 0, 0),
+	F(308570000, P_GPLL6, 3.5, 0, 0),
+	F(400000000, P_GPLL0, 2, 0, 0),
+	F(533000000, P_GPLL0, 1.5, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_ubi32_gpll0_div2[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll6.clkr.hw },
+	{ .hw = &ubi32_pll.clkr.hw },
+	{ .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map
+gcc_xo_gpll0_gpll6_ubi32_gpll0_div2_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL6, 2 },
+	{ P_UBI32_PLL, 3 },
+	{ P_GPLL0_DIV2, 6 },
+};
+
+static struct clk_rcg2 apss_axi_clk_src = {
+	.cmd_rcgr = 0x38048,
+	.freq_tbl = ftbl_apss_axi_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll6_ubi32_gpll0_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "apss_axi_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll6_ubi32_gpll0_div2,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_crypto_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(300000000, P_NSS_CRYPTO_PLL, 2, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_nss_crypto_pll_gpll0[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &nss_crypto_pll.clkr.hw },
+	{ .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_nss_crypto_pll_gpll0_map[] = {
+	{ P_XO, 0 },
+	{ P_NSS_CRYPTO_PLL, 1 },
+	{ P_GPLL0, 2 },
+};
+
+static struct clk_rcg2 nss_crypto_clk_src = {
+	.cmd_rcgr = 0x68144,
+	.freq_tbl = ftbl_nss_crypto_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_nss_crypto_pll_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_crypto_clk_src",
+		.parent_data = gcc_xo_nss_crypto_pll_gpll0,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_regmap_div nss_port1_rx_div_clk_src = {
+	.reg = 0x68400,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port1_rx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+				&nss_port1_rx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div nss_port1_tx_div_clk_src = {
+	.reg = 0x68404,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port1_tx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port1_tx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div nss_port2_rx_div_clk_src = {
+	.reg = 0x68410,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port2_rx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port2_rx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div nss_port2_tx_div_clk_src = {
+	.reg = 0x68414,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port2_tx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port2_tx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div nss_port3_rx_div_clk_src = {
+	.reg = 0x68420,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port3_rx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port3_rx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div nss_port3_tx_div_clk_src = {
+	.reg = 0x68424,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port3_tx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port3_tx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div nss_port4_rx_div_clk_src = {
+	.reg = 0x68430,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port4_rx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port4_rx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div nss_port4_tx_div_clk_src = {
+	.reg = 0x68434,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_port4_tx_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port4_tx_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_nss_ubi_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(149760000, P_UBI32_PLL, 10, 0, 0),
+	F(187200000, P_UBI32_PLL, 8, 0, 0),
+	F(249600000, P_UBI32_PLL, 6, 0, 0),
+	F(374400000, P_UBI32_PLL, 4, 0, 0),
+	F(748800000, P_UBI32_PLL, 2, 0, 0),
+	F(1497600000, P_UBI32_PLL, 1, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data
+			gcc_xo_ubi32_pll_gpll0_gpll2_gpll4_gpll6[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &ubi32_pll.clkr.hw },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll2.clkr.hw },
+	{ .hw = &gpll4.clkr.hw },
+	{ .hw = &gpll6.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_ubi32_gpll0_gpll2_gpll4_gpll6_map[] = {
+	{ P_XO, 0 },
+	{ P_UBI32_PLL, 1 },
+	{ P_GPLL0, 2 },
+	{ P_GPLL2, 3 },
+	{ P_GPLL4, 4 },
+	{ P_GPLL6, 5 },
+};
+
+static struct clk_rcg2 nss_ubi0_clk_src = {
+	.cmd_rcgr = 0x68104,
+	.freq_tbl = ftbl_nss_ubi_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_ubi32_gpll0_gpll2_gpll4_gpll6_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_ubi0_clk_src",
+		.parent_data = gcc_xo_ubi32_pll_gpll0_gpll2_gpll4_gpll6,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static const struct freq_tbl ftbl_adss_pwm_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 adss_pwm_clk_src = {
+	.cmd_rcgr = 0x1c008,
+	.freq_tbl = ftbl_adss_pwm_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "adss_pwm_clk_src",
+		.parent_data = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_i2c_apps_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+	F(50000000, P_GPLL0, 16, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x0200c,
+	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup1_i2c_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src[] = {
+	F(960000, P_XO, 10, 2, 5),
+	F(4800000, P_XO, 5, 0, 0),
+	F(9600000, P_XO, 2, 4, 5),
+	F(12500000, P_GPLL0_DIV2, 16, 1, 2),
+	F(16000000, P_GPLL0, 10, 1, 5),
+	F(24000000, P_XO, 1, 0, 0),
+	F(25000000, P_GPLL0, 16, 1, 2),
+	F(50000000, P_GPLL0, 16, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+	.cmd_rcgr = 0x02024,
+	.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup1_spi_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x03000,
+	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup2_i2c_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+	.cmd_rcgr = 0x03014,
+	.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup2_spi_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x04000,
+	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup3_i2c_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+	.cmd_rcgr = 0x04014,
+	.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup3_spi_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x05000,
+	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup4_i2c_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+	.cmd_rcgr = 0x05014,
+	.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup4_spi_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x06000,
+	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup5_i2c_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+	.cmd_rcgr = 0x06014,
+	.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup5_spi_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+	.cmd_rcgr = 0x07000,
+	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup6_i2c_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+	.cmd_rcgr = 0x07014,
+	.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_qup6_spi_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_blsp1_uart_apps_clk_src[] = {
+	F(3686400, P_GPLL0_DIV2, 1, 144, 15625),
+	F(7372800, P_GPLL0_DIV2, 1, 288, 15625),
+	F(14745600, P_GPLL0_DIV2, 1, 576, 15625),
+	F(16000000, P_GPLL0_DIV2, 5, 1, 5),
+	F(24000000, P_XO, 1, 0, 0),
+	F(24000000, P_GPLL0, 1, 3, 100),
+	F(25000000, P_GPLL0, 16, 1, 2),
+	F(32000000, P_GPLL0, 1, 1, 25),
+	F(40000000, P_GPLL0, 1, 1, 20),
+	F(46400000, P_GPLL0, 1, 29, 500),
+	F(48000000, P_GPLL0, 1, 3, 50),
+	F(51200000, P_GPLL0, 1, 8, 125),
+	F(56000000, P_GPLL0, 1, 7, 100),
+	F(58982400, P_GPLL0, 1, 1152, 15625),
+	F(60000000, P_GPLL0, 1, 3, 40),
+	F(64000000, P_GPLL0, 12.5, 1, 1),
+	{ }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+	.cmd_rcgr = 0x02044,
+	.freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart1_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+	.cmd_rcgr = 0x03034,
+	.freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart2_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+	.cmd_rcgr = 0x04034,
+	.freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart3_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+	.cmd_rcgr = 0x05034,
+	.freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart4_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+	.cmd_rcgr = 0x06034,
+	.freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart5_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+	.cmd_rcgr = 0x07034,
+	.freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "blsp1_uart6_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+	F(40000000, P_GPLL0_DIV2, 10, 0, 0),
+	F(80000000, P_GPLL0, 10, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	F(160000000, P_GPLL0, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+	.cmd_rcgr = 0x16004,
+	.freq_tbl = ftbl_crypto_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "crypto_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gp_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	F(200000000, P_GPLL0, 4, 0, 0),
+	F(266666666, P_GPLL0, 3, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_sleep_clk[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll6.clkr.hw },
+	{ .hw = &gpll0_out_main_div2.hw },
+	{ .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL6, 2 },
+	{ P_GPLL0_DIV2, 4 },
+	{ P_SLEEP_CLK, 6 },
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+	.cmd_rcgr = 0x08004,
+	.freq_tbl = ftbl_gp_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gp1_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+	.cmd_rcgr = 0x09004,
+	.freq_tbl = ftbl_gp_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gp2_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+	.cmd_rcgr = 0x0a004,
+	.freq_tbl = ftbl_gp_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gp3_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_fixed_factor nss_ppe_cdiv_clk_src = {
+	.mult = 1,
+	.div = 4,
+	.hw.init = &(struct clk_init_data){
+		.name = "nss_ppe_cdiv_clk_src",
+		.parent_hws = (const struct clk_hw *[]){
+				&nss_ppe_clk_src.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_regmap_div nss_ubi0_div_clk_src = {
+	.reg = 0x68118,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_ubi0_div_clk_src",
+			.parent_hws = (const struct clk_hw *[]){
+				&nss_ubi0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ro_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_core_pi_sleep_clk_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 2 },
+	{ P_PI_SLEEP, 6 },
+};
+
+static struct clk_rcg2 pcie0_aux_clk_src = {
+	.cmd_rcgr = 0x75024,
+	.freq_tbl = ftbl_pcie_aux_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pcie0_aux_clk_src",
+		.parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct clk_parent_data gcc_pcie20_phy0_pipe_clk_xo[] = {
+	{ .fw_name = "pcie20_phy0_pipe_clk" },
+	{ .fw_name = "xo" },
+};
+
+static const struct parent_map gcc_pcie20_phy0_pipe_clk_xo_map[] = {
+	{ P_PCIE20_PHY0_PIPE, 0 },
+	{ P_XO, 2 },
+};
+
+static struct clk_regmap_mux pcie0_pipe_clk_src = {
+	.reg = 0x7501c,
+	.shift = 8,
+	.width = 2,
+	.parent_map = gcc_pcie20_phy0_pipe_clk_xo_map,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "pcie0_pipe_clk_src",
+			.parent_data = gcc_pcie20_phy0_pipe_clk_xo,
+			.num_parents = 2,
+			.ops = &clk_regmap_mux_closest_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_sdcc_apps_clk_src[] = {
+	F(144000, P_XO, 16, 12, 125),
+	F(400000, P_XO, 12, 1, 5),
+	F(24000000, P_GPLL2, 12, 1, 4),
+	F(48000000, P_GPLL2, 12, 1, 2),
+	F(96000000, P_GPLL2, 12, 0, 0),
+	F(177777778, P_GPLL0, 4.5, 0, 0),
+	F(192000000, P_GPLL2, 6, 0, 0),
+	F(384000000, P_GPLL2, 3, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data
+			gcc_xo_gpll0_gpll2_gpll0_out_main_div2[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll2.clkr.hw },
+	{ .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL2, 2 },
+	{ P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+	.cmd_rcgr = 0x42004,
+	.freq_tbl = ftbl_sdcc_apps_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sdcc1_apps_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_usb_aux_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 usb0_aux_clk_src = {
+	.cmd_rcgr = 0x3e05c,
+	.freq_tbl = ftbl_usb_aux_clk_src,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb0_aux_clk_src",
+		.parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_usb_mock_utmi_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(60000000, P_GPLL6, 6, 1, 3),
+	{ }
+};
+
+static const struct clk_parent_data
+			gcc_xo_gpll6_gpll0_gpll0_out_main_div2[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll6.clkr.hw },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL6, 1 },
+	{ P_GPLL0, 3 },
+	{ P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 usb0_mock_utmi_clk_src = {
+	.cmd_rcgr = 0x3e020,
+	.freq_tbl = ftbl_usb_mock_utmi_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb0_mock_utmi_clk_src",
+		.parent_data = gcc_xo_gpll6_gpll0_gpll0_out_main_div2,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct clk_parent_data gcc_usb3phy_0_cc_pipe_clk_xo[] = {
+	{ .fw_name = "usb3phy_0_cc_pipe_clk" },
+	{ .fw_name = "xo" },
+};
+
+static const struct parent_map gcc_usb3phy_0_cc_pipe_clk_xo_map[] = {
+	{ P_USB3PHY_0_PIPE, 0 },
+	{ P_XO, 2 },
+};
+
+static struct clk_regmap_mux usb0_pipe_clk_src = {
+	.reg = 0x3e048,
+	.shift = 8,
+	.width = 2,
+	.parent_map = gcc_usb3phy_0_cc_pipe_clk_xo_map,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "usb0_pipe_clk_src",
+			.parent_data = gcc_usb3phy_0_cc_pipe_clk_xo,
+			.num_parents = 2,
+			.ops = &clk_regmap_mux_closest_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+	F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+	F(160000000, P_GPLL0, 5, 0, 0),
+	F(216000000, P_GPLL6, 5, 0, 0),
+	F(308570000, P_GPLL6, 3.5, 0, 0),
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
+	{ .fw_name = "xo"},
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll6.clkr.hw },
+	{ .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_div2_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL6, 2 },
+	{ P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+	.cmd_rcgr = 0x5d000,
+	.freq_tbl = ftbl_sdcc_ice_core_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll6_gpll0_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "sdcc1_ice_core_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll6_gpll0_div2,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_qdss_stm_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	F(200000000, P_GPLL0, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 qdss_stm_clk_src = {
+	.cmd_rcgr = 0x2902C,
+	.freq_tbl = ftbl_qdss_stm_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "qdss_stm_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_qdss_traceclkin_clk_src[] = {
+	F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+	F(160000000, P_GPLL0, 5, 0, 0),
+	F(300000000, P_GPLL4, 4, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data gcc_xo_gpll4_gpll0_gpll0_div2[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll4.clkr.hw },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll0_div2_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL4, 1 },
+	{ P_GPLL0, 2 },
+	{ P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 qdss_traceclkin_clk_src = {
+	.cmd_rcgr = 0x29048,
+	.freq_tbl = ftbl_qdss_traceclkin_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "qdss_traceclkin_clk_src",
+		.parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 usb1_mock_utmi_clk_src = {
+	.cmd_rcgr = 0x3f020,
+	.freq_tbl = ftbl_usb_mock_utmi_clk_src,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "usb1_mock_utmi_clk_src",
+		.parent_data = gcc_xo_gpll6_gpll0_gpll0_out_main_div2,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+	.halt_reg = 0x1c020,
+	.clkr = {
+		.enable_reg = 0x1c020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_adss_pwm_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&adss_pwm_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_apss_ahb_clk = {
+	.halt_reg = 0x4601c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x0b004,
+		.enable_mask = BIT(14),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_apss_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&apss_ahb_postdiv_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_system_noc_bfdcd_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	F(133333333, P_GPLL0, 6, 0, 0),
+	F(160000000, P_GPLL0, 5, 0, 0),
+	F(200000000, P_GPLL0, 4, 0, 0),
+	F(266666667, P_GPLL0, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+	.cmd_rcgr = 0x26004,
+	.freq_tbl = ftbl_system_noc_bfdcd_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "system_noc_bfdcd_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll6_gpll0_out_main_div2,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_ubi32_mem_noc_bfdcd_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(307670000, P_BIAS_PLL_NSS_NOC, 1.5, 0, 0),
+	F(533333333, P_GPLL0, 1.5, 0, 0),
+	{ }
+};
+
+static const struct clk_parent_data
+			gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll2.clkr.hw },
+	{ .fw_name = "bias_pll_nss_noc_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL2, 3 },
+	{ P_BIAS_PLL_NSS_NOC, 4 },
+};
+
+static struct clk_rcg2 ubi32_mem_noc_bfdcd_clk_src = {
+	.cmd_rcgr = 0x68088,
+	.freq_tbl = ftbl_ubi32_mem_noc_bfdcd_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ubi32_mem_noc_bfdcd_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_apss_axi_clk = {
+	.halt_reg = 0x46020,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x0b004,
+		.enable_mask = BIT(13),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_apss_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&apss_axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+	.halt_reg = 0x01008,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x0b004,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+	.halt_reg = 0x02008,
+	.clkr = {
+		.enable_reg = 0x02008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup1_i2c_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup1_i2c_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+	.halt_reg = 0x02004,
+	.clkr = {
+		.enable_reg = 0x02004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup1_spi_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup1_spi_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+	.halt_reg = 0x03010,
+	.clkr = {
+		.enable_reg = 0x03010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup2_i2c_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup2_i2c_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+	.halt_reg = 0x0300c,
+	.clkr = {
+		.enable_reg = 0x0300c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup2_spi_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup2_spi_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+	.halt_reg = 0x04010,
+	.clkr = {
+		.enable_reg = 0x04010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup3_i2c_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup3_i2c_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+	.halt_reg = 0x0400c,
+	.clkr = {
+		.enable_reg = 0x0400c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup3_spi_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup3_spi_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+	.halt_reg = 0x05010,
+	.clkr = {
+		.enable_reg = 0x05010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup4_i2c_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup4_i2c_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+	.halt_reg = 0x0500c,
+	.clkr = {
+		.enable_reg = 0x0500c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup4_spi_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup4_spi_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+	.halt_reg = 0x06010,
+	.clkr = {
+		.enable_reg = 0x06010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup5_i2c_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup5_i2c_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+	.halt_reg = 0x0600c,
+	.clkr = {
+		.enable_reg = 0x0600c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup5_spi_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup5_spi_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+	.halt_reg = 0x0700c,
+	.clkr = {
+		.enable_reg = 0x0700c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_qup6_spi_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_qup6_spi_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+	.halt_reg = 0x0203c,
+	.clkr = {
+		.enable_reg = 0x0203c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart1_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_uart1_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+	.halt_reg = 0x0302c,
+	.clkr = {
+		.enable_reg = 0x0302c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart2_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_uart2_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+	.halt_reg = 0x0402c,
+	.clkr = {
+		.enable_reg = 0x0402c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart3_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_uart3_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+	.halt_reg = 0x0502c,
+	.clkr = {
+		.enable_reg = 0x0502c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart4_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_uart4_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+	.halt_reg = 0x0602c,
+	.clkr = {
+		.enable_reg = 0x0602c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart5_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_uart5_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+	.halt_reg = 0x0702c,
+	.clkr = {
+		.enable_reg = 0x0702c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_blsp1_uart6_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&blsp1_uart6_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+	.halt_reg = 0x16024,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x0b004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_crypto_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+	.halt_reg = 0x16020,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x0b004,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_crypto_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_crypto_clk = {
+	.halt_reg = 0x1601c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x0b004,
+		.enable_mask = BIT(2),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_crypto_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&crypto_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor gpll6_out_main_div2 = {
+	.mult = 1,
+	.div = 2,
+	.hw.init = &(struct clk_init_data){
+		.name = "gpll6_out_main_div2",
+		.parent_hws = (const struct clk_hw *[]){
+				&gpll6_main.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_branch gcc_xo_clk = {
+	.halt_reg = 0x30030,
+	.clkr = {
+		.enable_reg = 0x30030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_xo_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gcc_xo_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp1_clk = {
+	.halt_reg = 0x08000,
+	.clkr = {
+		.enable_reg = 0x08000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp1_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gp1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp2_clk = {
+	.halt_reg = 0x09000,
+	.clkr = {
+		.enable_reg = 0x09000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp2_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gp2_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp3_clk = {
+	.halt_reg = 0x0a000,
+	.clkr = {
+		.enable_reg = 0x0a000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp3_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gp3_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_mdio_ahb_clk = {
+	.halt_reg = 0x58004,
+	.clkr = {
+		.enable_reg = 0x58004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_mdio_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_crypto_ppe_clk = {
+	.halt_reg = 0x68310,
+	.clkr = {
+		.enable_reg = 0x68310,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_crypto_ppe_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_ce_apb_clk = {
+	.halt_reg = 0x68174,
+	.clkr = {
+		.enable_reg = 0x68174,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_ce_apb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ce_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_ce_axi_clk = {
+	.halt_reg = 0x68170,
+	.clkr = {
+		.enable_reg = 0x68170,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_ce_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ce_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_cfg_clk = {
+	.halt_reg = 0x68160,
+	.clkr = {
+		.enable_reg = 0x68160,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_cfg_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_crypto_clk = {
+	.halt_reg = 0x68164,
+	.clkr = {
+		.enable_reg = 0x68164,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_crypto_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_crypto_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_csr_clk = {
+	.halt_reg = 0x68318,
+	.clkr = {
+		.enable_reg = 0x68318,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_csr_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ce_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_edma_cfg_clk = {
+	.halt_reg = 0x6819C,
+	.clkr = {
+		.enable_reg = 0x6819C,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_edma_cfg_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_edma_clk = {
+	.halt_reg = 0x68198,
+	.clkr = {
+		.enable_reg = 0x68198,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_edma_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_noc_clk = {
+	.halt_reg = 0x68168,
+	.clkr = {
+		.enable_reg = 0x68168,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_noc_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ubi0_utcm_clk = {
+	.halt_reg = 0x2606c,
+	.clkr = {
+		.enable_reg = 0x2606c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ubi0_utcm_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_snoc_nssnoc_clk = {
+	.halt_reg = 0x26070,
+	.clkr = {
+		.enable_reg = 0x26070,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_snoc_nssnoc_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_wcss_ahb_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(133333333, P_GPLL0, 6, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_q6_axi_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(400000000, P_GPLL0, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 wcss_ahb_clk_src = {
+	.cmd_rcgr = 0x59020,
+	.freq_tbl = ftbl_wcss_ahb_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "wcss_ahb_clk_src",
+		.parent_data = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2_gpll4_gpll6[] = {
+	{ .fw_name = "xo" },
+	{ .hw = &gpll0.clkr.hw },
+	{ .hw = &gpll2.clkr.hw },
+	{ .hw = &gpll4.clkr.hw },
+	{ .hw = &gpll6.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll4_gpll6_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 1 },
+	{ P_GPLL2, 2 },
+	{ P_GPLL4, 3 },
+	{ P_GPLL6, 4 },
+};
+
+static struct clk_rcg2 q6_axi_clk_src = {
+	.cmd_rcgr = 0x59120,
+	.freq_tbl = ftbl_q6_axi_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_gpll2_gpll4_gpll6_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "q6_axi_clk_src",
+		.parent_data = gcc_xo_gpll0_gpll2_gpll4_gpll6,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_lpass_core_axim_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(100000000, P_GPLL0, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 lpass_core_axim_clk_src = {
+	.cmd_rcgr = 0x1F020,
+	.freq_tbl = ftbl_lpass_core_axim_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "lpass_core_axim_clk_src",
+		.parent_data = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_lpass_snoc_cfg_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(266666667, P_GPLL0, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 lpass_snoc_cfg_clk_src = {
+	.cmd_rcgr = 0x1F040,
+	.freq_tbl = ftbl_lpass_snoc_cfg_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "lpass_snoc_cfg_clk_src",
+		.parent_data = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_lpass_q6_axim_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(400000000, P_GPLL0, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 lpass_q6_axim_clk_src = {
+	.cmd_rcgr = 0x1F008,
+	.freq_tbl = ftbl_lpass_q6_axim_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "lpass_q6_axim_clk_src",
+		.parent_data = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(50000000, P_GPLL0, 16, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 rbcpr_wcss_clk_src = {
+	.cmd_rcgr = 0x3a00c,
+	.freq_tbl = ftbl_rbcpr_wcss_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "rbcpr_wcss_clk_src",
+		.parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_lpass_core_axim_clk = {
+	.halt_reg = 0x1F028,
+	.clkr = {
+		.enable_reg = 0x1F028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_lpass_core_axim_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&lpass_core_axim_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_lpass_snoc_cfg_clk = {
+	.halt_reg = 0x1F048,
+	.clkr = {
+		.enable_reg = 0x1F048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_lpass_snoc_cfg_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&lpass_snoc_cfg_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_lpass_q6_axim_clk = {
+	.halt_reg = 0x1F010,
+	.clkr = {
+		.enable_reg = 0x1F010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_lpass_q6_axim_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&lpass_q6_axim_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_lpass_q6_atbm_at_clk = {
+	.halt_reg = 0x1F018,
+	.clkr = {
+		.enable_reg = 0x1F018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_lpass_q6_atbm_at_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&qdss_at_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_lpass_q6_pclkdbg_clk = {
+	.halt_reg = 0x1F01C,
+	.clkr = {
+		.enable_reg = 0x1F01C,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_lpass_q6_pclkdbg_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&qdss_dap_sync_clk_src.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_lpass_q6ss_tsctr_1to2_clk = {
+	.halt_reg = 0x1F014,
+	.clkr = {
+		.enable_reg = 0x1F014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_lpass_q6ss_tsctr_1to2_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&qdss_tsctr_div2_clk_src.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_lpass_q6ss_trig_clk = {
+	.halt_reg = 0x1F038,
+	.clkr = {
+		.enable_reg = 0x1F038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_lpass_q6ss_trig_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&qdss_dap_sync_clk_src.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_lpass_tbu_clk = {
+	.halt_reg = 0x12094,
+	.clkr = {
+		.enable_reg = 0xb00c,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_lpass_tbu_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&lpass_q6_axim_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcnoc_lpass_clk = {
+	.halt_reg = 0x27020,
+	.clkr = {
+		.enable_reg = 0x27020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcnoc_lpass_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&lpass_core_axim_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_mem_noc_lpass_clk = {
+	.halt_reg = 0x1D044,
+	.clkr = {
+		.enable_reg = 0x1D044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_mem_noc_lpass_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&lpass_q6_axim_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_snoc_lpass_cfg_clk = {
+	.halt_reg = 0x26074,
+	.clkr = {
+		.enable_reg = 0x26074,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_snoc_lpass_cfg_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&lpass_snoc_cfg_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_mem_noc_ubi32_clk = {
+	.halt_reg = 0x1D03C,
+	.clkr = {
+		.enable_reg = 0x1D03C,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_mem_noc_ubi32_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&ubi32_mem_noc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port1_rx_clk = {
+	.halt_reg = 0x68240,
+	.clkr = {
+		.enable_reg = 0x68240,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port1_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port1_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port1_tx_clk = {
+	.halt_reg = 0x68244,
+	.clkr = {
+		.enable_reg = 0x68244,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port1_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port1_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port2_rx_clk = {
+	.halt_reg = 0x68248,
+	.clkr = {
+		.enable_reg = 0x68248,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port2_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port2_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port2_tx_clk = {
+	.halt_reg = 0x6824c,
+	.clkr = {
+		.enable_reg = 0x6824c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port2_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port2_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port3_rx_clk = {
+	.halt_reg = 0x68250,
+	.clkr = {
+		.enable_reg = 0x68250,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port3_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port3_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port3_tx_clk = {
+	.halt_reg = 0x68254,
+	.clkr = {
+		.enable_reg = 0x68254,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port3_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port3_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port4_rx_clk = {
+	.halt_reg = 0x68258,
+	.clkr = {
+		.enable_reg = 0x68258,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port4_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port4_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port4_tx_clk = {
+	.halt_reg = 0x6825c,
+	.clkr = {
+		.enable_reg = 0x6825c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port4_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port4_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port5_rx_clk = {
+	.halt_reg = 0x68260,
+	.clkr = {
+		.enable_reg = 0x68260,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port5_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port5_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_port5_tx_clk = {
+	.halt_reg = 0x68264,
+	.clkr = {
+		.enable_reg = 0x68264,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_port5_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port5_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_ppe_cfg_clk = {
+	.halt_reg = 0x68194,
+	.clkr = {
+		.enable_reg = 0x68194,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_ppe_cfg_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_ppe_clk = {
+	.halt_reg = 0x68190,
+	.clkr = {
+		.enable_reg = 0x68190,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_ppe_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_ppe_ipe_clk = {
+	.halt_reg = 0x68338,
+	.clkr = {
+		.enable_reg = 0x68338,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_ppe_ipe_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nss_ptp_ref_clk = {
+	.halt_reg = 0x6816C,
+	.clkr = {
+		.enable_reg = 0x6816C,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nss_ptp_ref_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_cdiv_clk_src.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_ce_apb_clk = {
+	.halt_reg = 0x6830C,
+	.clkr = {
+		.enable_reg = 0x6830C,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_ce_apb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ce_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_ce_axi_clk = {
+	.halt_reg = 0x68308,
+	.clkr = {
+		.enable_reg = 0x68308,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_ce_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ce_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_crypto_clk = {
+	.halt_reg = 0x68314,
+	.clkr = {
+		.enable_reg = 0x68314,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_crypto_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_crypto_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_ppe_cfg_clk = {
+	.halt_reg = 0x68304,
+	.clkr = {
+		.enable_reg = 0x68304,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_ppe_cfg_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_ppe_clk = {
+	.halt_reg = 0x68300,
+	.clkr = {
+		.enable_reg = 0x68300,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_ppe_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_qosgen_ref_clk = {
+	.halt_reg = 0x68180,
+	.clkr = {
+		.enable_reg = 0x68180,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_qosgen_ref_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gcc_xo_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_snoc_clk = {
+	.halt_reg = 0x68188,
+	.clkr = {
+		.enable_reg = 0x68188,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_snoc_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&system_noc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_timeout_ref_clk = {
+	.halt_reg = 0x68184,
+	.clkr = {
+		.enable_reg = 0x68184,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_timeout_ref_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gcc_xo_div4_clk_src.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_nssnoc_ubi0_ahb_clk = {
+	.halt_reg = 0x68270,
+	.clkr = {
+		.enable_reg = 0x68270,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_nssnoc_ubi0_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ce_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_port1_mac_clk = {
+	.halt_reg = 0x68320,
+	.clkr = {
+		.enable_reg = 0x68320,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_port1_mac_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_port2_mac_clk = {
+	.halt_reg = 0x68324,
+	.clkr = {
+		.enable_reg = 0x68324,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_port2_mac_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_port3_mac_clk = {
+	.halt_reg = 0x68328,
+	.clkr = {
+		.enable_reg = 0x68328,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_port3_mac_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_port4_mac_clk = {
+	.halt_reg = 0x6832c,
+	.clkr = {
+		.enable_reg = 0x6832c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_port4_mac_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_port5_mac_clk = {
+	.halt_reg = 0x68330,
+	.clkr = {
+		.enable_reg = 0x68330,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_port5_mac_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ppe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ubi0_ahb_clk = {
+	.halt_reg = 0x6820C,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x6820C,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ubi0_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ce_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ubi0_axi_clk = {
+	.halt_reg = 0x68200,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x68200,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ubi0_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&ubi32_mem_noc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ubi0_nc_axi_clk = {
+	.halt_reg = 0x68204,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x68204,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ubi0_nc_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ubi0_core_clk = {
+	.halt_reg = 0x68210,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x68210,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ubi0_core_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_ubi0_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie0_ahb_clk = {
+	.halt_reg = 0x75010,
+	.clkr = {
+		.enable_reg = 0x75010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie0_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie0_aux_clk = {
+	.halt_reg = 0x75014,
+	.clkr = {
+		.enable_reg = 0x75014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie0_aux_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcie0_aux_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie0_axi_m_clk = {
+	.halt_reg = 0x75008,
+	.clkr = {
+		.enable_reg = 0x75008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie0_axi_m_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcie0_axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie0_axi_s_clk = {
+	.halt_reg = 0x7500c,
+	.clkr = {
+		.enable_reg = 0x7500c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie0_axi_s_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcie0_axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_pcie0_axi_clk = {
+	.halt_reg = 0x26048,
+	.clkr = {
+		.enable_reg = 0x26048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_pcie0_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcie0_axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie0_pipe_clk = {
+	.halt_reg = 0x75018,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x75018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie0_pipe_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcie0_pipe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+	.halt_reg = 0x13004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x0b004,
+		.enable_mask = BIT(8),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_prng_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+	.halt_reg = 0x29084,
+	.clkr = {
+		.enable_reg = 0x29084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qdss_dap_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&qdss_dap_sync_clk_src.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+	.halt_reg = 0x57024,
+	.clkr = {
+		.enable_reg = 0x57024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qpic_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qpic_clk = {
+	.halt_reg = 0x57020,
+	.clkr = {
+		.enable_reg = 0x57020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qpic_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+	.halt_reg = 0x4201c,
+	.clkr = {
+		.enable_reg = 0x4201c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+	.halt_reg = 0x42018,
+	.clkr = {
+		.enable_reg = 0x42018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_apps_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&sdcc1_apps_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_ahb_clk = {
+	.halt_reg = 0x56008,
+	.clkr = {
+		.enable_reg = 0x56008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port1_rx_clk = {
+	.halt_reg = 0x56010,
+	.clkr = {
+		.enable_reg = 0x56010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port1_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port1_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port1_tx_clk = {
+	.halt_reg = 0x56014,
+	.clkr = {
+		.enable_reg = 0x56014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port1_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port1_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port2_rx_clk = {
+	.halt_reg = 0x56018,
+	.clkr = {
+		.enable_reg = 0x56018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port2_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port2_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port2_tx_clk = {
+	.halt_reg = 0x5601c,
+	.clkr = {
+		.enable_reg = 0x5601c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port2_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port2_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port3_rx_clk = {
+	.halt_reg = 0x56020,
+	.clkr = {
+		.enable_reg = 0x56020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port3_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port3_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port3_tx_clk = {
+	.halt_reg = 0x56024,
+	.clkr = {
+		.enable_reg = 0x56024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port3_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port3_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port4_rx_clk = {
+	.halt_reg = 0x56028,
+	.clkr = {
+		.enable_reg = 0x56028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port4_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port4_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port4_tx_clk = {
+	.halt_reg = 0x5602c,
+	.clkr = {
+		.enable_reg = 0x5602c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port4_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port4_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port5_rx_clk = {
+	.halt_reg = 0x56030,
+	.clkr = {
+		.enable_reg = 0x56030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port5_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port5_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_port5_tx_clk = {
+	.halt_reg = 0x56034,
+	.clkr = {
+		.enable_reg = 0x56034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_port5_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port5_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy0_sys_clk = {
+	.halt_reg = 0x5600C,
+	.clkr = {
+		.enable_reg = 0x5600C,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy0_sys_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gcc_xo_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy1_ahb_clk = {
+	.halt_reg = 0x56108,
+	.clkr = {
+		.enable_reg = 0x56108,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy1_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy1_port5_rx_clk = {
+	.halt_reg = 0x56110,
+	.clkr = {
+		.enable_reg = 0x56110,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy1_port5_rx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port5_rx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy1_port5_tx_clk = {
+	.halt_reg = 0x56114,
+	.clkr = {
+		.enable_reg = 0x56114,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy1_port5_tx_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&nss_port5_tx_div_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_uniphy1_sys_clk = {
+	.halt_reg = 0x5610C,
+	.clkr = {
+		.enable_reg = 0x5610C,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_uniphy1_sys_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gcc_xo_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb0_aux_clk = {
+	.halt_reg = 0x3e044,
+	.clkr = {
+		.enable_reg = 0x3e044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb0_aux_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&usb0_aux_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb0_master_clk = {
+	.halt_reg = 0x3e000,
+	.clkr = {
+		.enable_reg = 0x3e000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb0_master_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&usb0_master_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = {
+	.halt_reg = 0x47014,
+	.clkr = {
+		.enable_reg = 0x47014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_snoc_bus_timeout2_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&usb0_master_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_rcg2 pcie0_rchng_clk_src = {
+	.cmd_rcgr = 0x75070,
+	.freq_tbl = ftbl_pcie_rchng_clk_src,
+	.hid_width = 5,
+	.parent_map = gcc_xo_gpll0_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pcie0_rchng_clk_src",
+		.parent_data = gcc_xo_gpll0,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch gcc_pcie0_rchng_clk = {
+	.halt_reg = 0x75070,
+	.clkr = {
+		.enable_reg = 0x75070,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie0_rchng_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcie0_rchng_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
+	.halt_reg = 0x75048,
+	.clkr = {
+		.enable_reg = 0x75048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pcie0_axi_s_bridge_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcie0_axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_usb0_axi_clk = {
+	.halt_reg = 0x26040,
+	.clkr = {
+		.enable_reg = 0x26040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_usb0_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&usb0_master_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb0_mock_utmi_clk = {
+	.halt_reg = 0x3e008,
+	.clkr = {
+		.enable_reg = 0x3e008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb0_mock_utmi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&usb0_mock_utmi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = {
+	.halt_reg = 0x3e080,
+	.clkr = {
+		.enable_reg = 0x3e080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb0_phy_cfg_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb0_pipe_clk = {
+	.halt_reg = 0x3e040,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x3e040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb0_pipe_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&usb0_pipe_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb0_sleep_clk = {
+	.halt_reg = 0x3e004,
+	.clkr = {
+		.enable_reg = 0x3e004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb0_sleep_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gcc_sleep_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb1_master_clk = {
+	.halt_reg = 0x3f000,
+	.clkr = {
+		.enable_reg = 0x3f000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb1_master_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb1_mock_utmi_clk = {
+	.halt_reg = 0x3f008,
+	.clkr = {
+		.enable_reg = 0x3f008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb1_mock_utmi_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&usb1_mock_utmi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb1_phy_cfg_ahb_clk = {
+	.halt_reg = 0x3f080,
+	.clkr = {
+		.enable_reg = 0x3f080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb1_phy_cfg_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb1_sleep_clk = {
+	.halt_reg = 0x3f004,
+	.clkr = {
+		.enable_reg = 0x3f004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb1_sleep_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gcc_sleep_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cmn_12gpll_ahb_clk = {
+	.halt_reg = 0x56308,
+	.clkr = {
+		.enable_reg = 0x56308,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cmn_12gpll_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cmn_12gpll_sys_clk = {
+	.halt_reg = 0x5630c,
+	.clkr = {
+		.enable_reg = 0x5630c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cmn_12gpll_sys_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&gcc_xo_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+	.halt_reg = 0x5d014,
+	.clkr = {
+		.enable_reg = 0x5d014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ice_core_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&sdcc1_ice_core_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_dcc_clk = {
+	.halt_reg = 0x77004,
+	.clkr = {
+		.enable_reg = 0x77004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_dcc_clk",
+			.parent_hws = (const struct clk_hw *[]){
+					&pcnoc_bfdcd_clk_src.clkr.hw },
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+	.l = 0x3e,
+	.alpha = 0x57,
+	.config_ctl_val = 0x240d6aa8,
+	.config_ctl_hi_val = 0x3c2,
+	.main_output_mask = BIT(0),
+	.aux_output_mask = BIT(1),
+	.pre_div_val = 0x0,
+	.pre_div_mask = BIT(12),
+	.post_div_val = 0x0,
+	.post_div_mask = GENMASK(9, 8),
+};
+
+static const struct alpha_pll_config nss_crypto_pll_config = {
+	.l = 0x32,
+	.alpha = 0x0,
+	.alpha_hi = 0x0,
+	.config_ctl_val = 0x4001055b,
+	.main_output_mask = BIT(0),
+	.pre_div_val = 0x0,
+	.pre_div_mask = GENMASK(14, 12),
+	.post_div_val = 0x1 << 8,
+	.post_div_mask = GENMASK(11, 8),
+	.vco_mask = GENMASK(21, 20),
+	.vco_val = 0x0,
+	.alpha_en_mask = BIT(24),
+};
+
+static struct clk_hw *gcc_ipq6018_hws[] = {
+	&gpll0_out_main_div2.hw,
+	&gcc_xo_div4_clk_src.hw,
+	&nss_ppe_cdiv_clk_src.hw,
+	&gpll6_out_main_div2.hw,
+	&qdss_dap_sync_clk_src.hw,
+	&qdss_tsctr_div2_clk_src.hw,
+};
+
+static struct clk_regmap *gcc_ipq6018_clks[] = {
+	[GPLL0_MAIN] = &gpll0_main.clkr,
+	[GPLL0] = &gpll0.clkr,
+	[UBI32_PLL_MAIN] = &ubi32_pll_main.clkr,
+	[UBI32_PLL] = &ubi32_pll.clkr,
+	[GPLL6_MAIN] = &gpll6_main.clkr,
+	[GPLL6] = &gpll6.clkr,
+	[GPLL4_MAIN] = &gpll4_main.clkr,
+	[GPLL4] = &gpll4.clkr,
+	[PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+	[GPLL2_MAIN] = &gpll2_main.clkr,
+	[GPLL2] = &gpll2.clkr,
+	[NSS_CRYPTO_PLL_MAIN] = &nss_crypto_pll_main.clkr,
+	[NSS_CRYPTO_PLL] = &nss_crypto_pll.clkr,
+	[QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr,
+	[QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr,
+	[NSS_PPE_CLK_SRC] = &nss_ppe_clk_src.clkr,
+	[GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+	[SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+	[SNOC_NSSNOC_BFDCD_CLK_SRC] = &snoc_nssnoc_bfdcd_clk_src.clkr,
+	[NSS_CE_CLK_SRC] = &nss_ce_clk_src.clkr,
+	[GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+	[APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+	[NSS_PORT5_RX_CLK_SRC] = &nss_port5_rx_clk_src.clkr,
+	[NSS_PORT5_TX_CLK_SRC] = &nss_port5_tx_clk_src.clkr,
+	[UBI32_MEM_NOC_BFDCD_CLK_SRC] = &ubi32_mem_noc_bfdcd_clk_src.clkr,
+	[PCIE0_AXI_CLK_SRC] = &pcie0_axi_clk_src.clkr,
+	[USB0_MASTER_CLK_SRC] = &usb0_master_clk_src.clkr,
+	[APSS_AHB_POSTDIV_CLK_SRC] = &apss_ahb_postdiv_clk_src.clkr,
+	[NSS_PORT1_RX_CLK_SRC] = &nss_port1_rx_clk_src.clkr,
+	[NSS_PORT1_TX_CLK_SRC] = &nss_port1_tx_clk_src.clkr,
+	[NSS_PORT2_RX_CLK_SRC] = &nss_port2_rx_clk_src.clkr,
+	[NSS_PORT2_TX_CLK_SRC] = &nss_port2_tx_clk_src.clkr,
+	[NSS_PORT3_RX_CLK_SRC] = &nss_port3_rx_clk_src.clkr,
+	[NSS_PORT3_TX_CLK_SRC] = &nss_port3_tx_clk_src.clkr,
+	[NSS_PORT4_RX_CLK_SRC] = &nss_port4_rx_clk_src.clkr,
+	[NSS_PORT4_TX_CLK_SRC] = &nss_port4_tx_clk_src.clkr,
+	[NSS_PORT5_RX_DIV_CLK_SRC] = &nss_port5_rx_div_clk_src.clkr,
+	[NSS_PORT5_TX_DIV_CLK_SRC] = &nss_port5_tx_div_clk_src.clkr,
+	[APSS_AXI_CLK_SRC] = &apss_axi_clk_src.clkr,
+	[NSS_CRYPTO_CLK_SRC] = &nss_crypto_clk_src.clkr,
+	[NSS_PORT1_RX_DIV_CLK_SRC] = &nss_port1_rx_div_clk_src.clkr,
+	[NSS_PORT1_TX_DIV_CLK_SRC] = &nss_port1_tx_div_clk_src.clkr,
+	[NSS_PORT2_RX_DIV_CLK_SRC] = &nss_port2_rx_div_clk_src.clkr,
+	[NSS_PORT2_TX_DIV_CLK_SRC] = &nss_port2_tx_div_clk_src.clkr,
+	[NSS_PORT3_RX_DIV_CLK_SRC] = &nss_port3_rx_div_clk_src.clkr,
+	[NSS_PORT3_TX_DIV_CLK_SRC] = &nss_port3_tx_div_clk_src.clkr,
+	[NSS_PORT4_RX_DIV_CLK_SRC] = &nss_port4_rx_div_clk_src.clkr,
+	[NSS_PORT4_TX_DIV_CLK_SRC] = &nss_port4_tx_div_clk_src.clkr,
+	[NSS_UBI0_CLK_SRC] = &nss_ubi0_clk_src.clkr,
+	[ADSS_PWM_CLK_SRC] = &adss_pwm_clk_src.clkr,
+	[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+	[BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+	[BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+	[BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+	[BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+	[BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+	[BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+	[BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+	[BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+	[BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+	[BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+	[BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+	[BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+	[CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+	[GP1_CLK_SRC] = &gp1_clk_src.clkr,
+	[GP2_CLK_SRC] = &gp2_clk_src.clkr,
+	[GP3_CLK_SRC] = &gp3_clk_src.clkr,
+	[NSS_UBI0_DIV_CLK_SRC] = &nss_ubi0_div_clk_src.clkr,
+	[PCIE0_AUX_CLK_SRC] = &pcie0_aux_clk_src.clkr,
+	[PCIE0_PIPE_CLK_SRC] = &pcie0_pipe_clk_src.clkr,
+	[SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+	[USB0_AUX_CLK_SRC] = &usb0_aux_clk_src.clkr,
+	[USB0_MOCK_UTMI_CLK_SRC] = &usb0_mock_utmi_clk_src.clkr,
+	[USB0_PIPE_CLK_SRC] = &usb0_pipe_clk_src.clkr,
+	[USB1_MOCK_UTMI_CLK_SRC] = &usb1_mock_utmi_clk_src.clkr,
+	[GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
+	[GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+	[GCC_APSS_AXI_CLK] = &gcc_apss_axi_clk.clkr,
+	[GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+	[GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+	[GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+	[GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+	[GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+	[GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+	[GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+	[GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+	[GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+	[GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+	[GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+	[GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+	[GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+	[GCC_XO_CLK] = &gcc_xo_clk.clkr,
+	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+	[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+	[GCC_MDIO_AHB_CLK] = &gcc_mdio_ahb_clk.clkr,
+	[GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr,
+	[GCC_NSS_CE_APB_CLK] = &gcc_nss_ce_apb_clk.clkr,
+	[GCC_NSS_CE_AXI_CLK] = &gcc_nss_ce_axi_clk.clkr,
+	[GCC_NSS_CFG_CLK] = &gcc_nss_cfg_clk.clkr,
+	[GCC_NSS_CRYPTO_CLK] = &gcc_nss_crypto_clk.clkr,
+	[GCC_NSS_CSR_CLK] = &gcc_nss_csr_clk.clkr,
+	[GCC_NSS_EDMA_CFG_CLK] = &gcc_nss_edma_cfg_clk.clkr,
+	[GCC_NSS_EDMA_CLK] = &gcc_nss_edma_clk.clkr,
+	[GCC_NSS_NOC_CLK] = &gcc_nss_noc_clk.clkr,
+	[GCC_UBI0_UTCM_CLK] = &gcc_ubi0_utcm_clk.clkr,
+	[GCC_SNOC_NSSNOC_CLK] = &gcc_snoc_nssnoc_clk.clkr,
+	[GCC_NSS_PORT1_RX_CLK] = &gcc_nss_port1_rx_clk.clkr,
+	[GCC_NSS_PORT1_TX_CLK] = &gcc_nss_port1_tx_clk.clkr,
+	[GCC_NSS_PORT2_RX_CLK] = &gcc_nss_port2_rx_clk.clkr,
+	[GCC_NSS_PORT2_TX_CLK] = &gcc_nss_port2_tx_clk.clkr,
+	[GCC_NSS_PORT3_RX_CLK] = &gcc_nss_port3_rx_clk.clkr,
+	[GCC_NSS_PORT3_TX_CLK] = &gcc_nss_port3_tx_clk.clkr,
+	[GCC_NSS_PORT4_RX_CLK] = &gcc_nss_port4_rx_clk.clkr,
+	[GCC_NSS_PORT4_TX_CLK] = &gcc_nss_port4_tx_clk.clkr,
+	[GCC_NSS_PORT5_RX_CLK] = &gcc_nss_port5_rx_clk.clkr,
+	[GCC_NSS_PORT5_TX_CLK] = &gcc_nss_port5_tx_clk.clkr,
+	[GCC_NSS_PPE_CFG_CLK] = &gcc_nss_ppe_cfg_clk.clkr,
+	[GCC_NSS_PPE_CLK] = &gcc_nss_ppe_clk.clkr,
+	[GCC_NSS_PPE_IPE_CLK] = &gcc_nss_ppe_ipe_clk.clkr,
+	[GCC_NSS_PTP_REF_CLK] = &gcc_nss_ptp_ref_clk.clkr,
+	[GCC_NSSNOC_CE_APB_CLK] = &gcc_nssnoc_ce_apb_clk.clkr,
+	[GCC_NSSNOC_CE_AXI_CLK] = &gcc_nssnoc_ce_axi_clk.clkr,
+	[GCC_NSSNOC_CRYPTO_CLK] = &gcc_nssnoc_crypto_clk.clkr,
+	[GCC_NSSNOC_PPE_CFG_CLK] = &gcc_nssnoc_ppe_cfg_clk.clkr,
+	[GCC_NSSNOC_PPE_CLK] = &gcc_nssnoc_ppe_clk.clkr,
+	[GCC_NSSNOC_QOSGEN_REF_CLK] = &gcc_nssnoc_qosgen_ref_clk.clkr,
+	[GCC_NSSNOC_SNOC_CLK] = &gcc_nssnoc_snoc_clk.clkr,
+	[GCC_NSSNOC_TIMEOUT_REF_CLK] = &gcc_nssnoc_timeout_ref_clk.clkr,
+	[GCC_NSSNOC_UBI0_AHB_CLK] = &gcc_nssnoc_ubi0_ahb_clk.clkr,
+	[GCC_PORT1_MAC_CLK] = &gcc_port1_mac_clk.clkr,
+	[GCC_PORT2_MAC_CLK] = &gcc_port2_mac_clk.clkr,
+	[GCC_PORT3_MAC_CLK] = &gcc_port3_mac_clk.clkr,
+	[GCC_PORT4_MAC_CLK] = &gcc_port4_mac_clk.clkr,
+	[GCC_PORT5_MAC_CLK] = &gcc_port5_mac_clk.clkr,
+	[GCC_UBI0_AHB_CLK] = &gcc_ubi0_ahb_clk.clkr,
+	[GCC_UBI0_AXI_CLK] = &gcc_ubi0_axi_clk.clkr,
+	[GCC_UBI0_NC_AXI_CLK] = &gcc_ubi0_nc_axi_clk.clkr,
+	[GCC_UBI0_CORE_CLK] = &gcc_ubi0_core_clk.clkr,
+	[GCC_PCIE0_AHB_CLK] = &gcc_pcie0_ahb_clk.clkr,
+	[GCC_PCIE0_AUX_CLK] = &gcc_pcie0_aux_clk.clkr,
+	[GCC_PCIE0_AXI_M_CLK] = &gcc_pcie0_axi_m_clk.clkr,
+	[GCC_PCIE0_AXI_S_CLK] = &gcc_pcie0_axi_s_clk.clkr,
+	[GCC_SYS_NOC_PCIE0_AXI_CLK] = &gcc_sys_noc_pcie0_axi_clk.clkr,
+	[GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
+	[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+	[GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+	[GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+	[GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+	[GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+	[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+	[GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
+	[GCC_UNIPHY0_PORT1_RX_CLK] = &gcc_uniphy0_port1_rx_clk.clkr,
+	[GCC_UNIPHY0_PORT1_TX_CLK] = &gcc_uniphy0_port1_tx_clk.clkr,
+	[GCC_UNIPHY0_PORT2_RX_CLK] = &gcc_uniphy0_port2_rx_clk.clkr,
+	[GCC_UNIPHY0_PORT2_TX_CLK] = &gcc_uniphy0_port2_tx_clk.clkr,
+	[GCC_UNIPHY0_PORT3_RX_CLK] = &gcc_uniphy0_port3_rx_clk.clkr,
+	[GCC_UNIPHY0_PORT3_TX_CLK] = &gcc_uniphy0_port3_tx_clk.clkr,
+	[GCC_UNIPHY0_PORT4_RX_CLK] = &gcc_uniphy0_port4_rx_clk.clkr,
+	[GCC_UNIPHY0_PORT4_TX_CLK] = &gcc_uniphy0_port4_tx_clk.clkr,
+	[GCC_UNIPHY0_PORT5_RX_CLK] = &gcc_uniphy0_port5_rx_clk.clkr,
+	[GCC_UNIPHY0_PORT5_TX_CLK] = &gcc_uniphy0_port5_tx_clk.clkr,
+	[GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
+	[GCC_UNIPHY1_AHB_CLK] = &gcc_uniphy1_ahb_clk.clkr,
+	[GCC_UNIPHY1_PORT5_RX_CLK] = &gcc_uniphy1_port5_rx_clk.clkr,
+	[GCC_UNIPHY1_PORT5_TX_CLK] = &gcc_uniphy1_port5_tx_clk.clkr,
+	[GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
+	[GCC_USB0_AUX_CLK] = &gcc_usb0_aux_clk.clkr,
+	[GCC_SYS_NOC_USB0_AXI_CLK] = &gcc_sys_noc_usb0_axi_clk.clkr,
+	[GCC_SNOC_BUS_TIMEOUT2_AHB_CLK] = &gcc_snoc_bus_timeout2_ahb_clk.clkr,
+	[GCC_USB0_MASTER_CLK] = &gcc_usb0_master_clk.clkr,
+	[GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr,
+	[GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr,
+	[GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
+	[GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
+	[GCC_USB1_MASTER_CLK] = &gcc_usb1_master_clk.clkr,
+	[GCC_USB1_MOCK_UTMI_CLK] = &gcc_usb1_mock_utmi_clk.clkr,
+	[GCC_USB1_PHY_CFG_AHB_CLK] = &gcc_usb1_phy_cfg_ahb_clk.clkr,
+	[GCC_USB1_SLEEP_CLK] = &gcc_usb1_sleep_clk.clkr,
+	[GCC_CMN_12GPLL_AHB_CLK] = &gcc_cmn_12gpll_ahb_clk.clkr,
+	[GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
+	[GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+	[SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+	[GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+	[PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr,
+	[GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+	[PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
+	[WCSS_AHB_CLK_SRC] = &wcss_ahb_clk_src.clkr,
+	[Q6_AXI_CLK_SRC] = &q6_axi_clk_src.clkr,
+	[RBCPR_WCSS_CLK_SRC] = &rbcpr_wcss_clk_src.clkr,
+	[GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr,
+	[LPASS_CORE_AXIM_CLK_SRC] = &lpass_core_axim_clk_src.clkr,
+	[GCC_LPASS_SNOC_CFG_CLK] = &gcc_lpass_snoc_cfg_clk.clkr,
+	[LPASS_SNOC_CFG_CLK_SRC] = &lpass_snoc_cfg_clk_src.clkr,
+	[GCC_LPASS_Q6_AXIM_CLK] = &gcc_lpass_q6_axim_clk.clkr,
+	[LPASS_Q6_AXIM_CLK_SRC] = &lpass_q6_axim_clk_src.clkr,
+	[GCC_LPASS_Q6_ATBM_AT_CLK] = &gcc_lpass_q6_atbm_at_clk.clkr,
+	[GCC_LPASS_Q6_PCLKDBG_CLK] = &gcc_lpass_q6_pclkdbg_clk.clkr,
+	[GCC_LPASS_Q6SS_TSCTR_1TO2_CLK] = &gcc_lpass_q6ss_tsctr_1to2_clk.clkr,
+	[GCC_LPASS_Q6SS_TRIG_CLK] = &gcc_lpass_q6ss_trig_clk.clkr,
+	[GCC_LPASS_TBU_CLK] = &gcc_lpass_tbu_clk.clkr,
+	[GCC_PCNOC_LPASS_CLK] = &gcc_pcnoc_lpass_clk.clkr,
+	[GCC_MEM_NOC_UBI32_CLK] = &gcc_mem_noc_ubi32_clk.clkr,
+	[GCC_MEM_NOC_LPASS_CLK] = &gcc_mem_noc_lpass_clk.clkr,
+	[GCC_SNOC_LPASS_CFG_CLK] = &gcc_snoc_lpass_cfg_clk.clkr,
+	[QDSS_STM_CLK_SRC] = &qdss_stm_clk_src.clkr,
+	[QDSS_TRACECLKIN_CLK_SRC] = &qdss_traceclkin_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq6018_resets[] = {
+	[GCC_BLSP1_BCR] = { 0x01000, 0 },
+	[GCC_BLSP1_QUP1_BCR] = { 0x02000, 0 },
+	[GCC_BLSP1_UART1_BCR] = { 0x02038, 0 },
+	[GCC_BLSP1_QUP2_BCR] = { 0x03008, 0 },
+	[GCC_BLSP1_UART2_BCR] = { 0x03028, 0 },
+	[GCC_BLSP1_QUP3_BCR] = { 0x04008, 0 },
+	[GCC_BLSP1_UART3_BCR] = { 0x04028, 0 },
+	[GCC_BLSP1_QUP4_BCR] = { 0x05008, 0 },
+	[GCC_BLSP1_UART4_BCR] = { 0x05028, 0 },
+	[GCC_BLSP1_QUP5_BCR] = { 0x06008, 0 },
+	[GCC_BLSP1_UART5_BCR] = { 0x06028, 0 },
+	[GCC_BLSP1_QUP6_BCR] = { 0x07008, 0 },
+	[GCC_BLSP1_UART6_BCR] = { 0x07028, 0 },
+	[GCC_IMEM_BCR] = { 0x0e000, 0 },
+	[GCC_SMMU_BCR] = { 0x12000, 0 },
+	[GCC_APSS_TCU_BCR] = { 0x12050, 0 },
+	[GCC_SMMU_XPU_BCR] = { 0x12054, 0 },
+	[GCC_PCNOC_TBU_BCR] = { 0x12058, 0 },
+	[GCC_SMMU_CFG_BCR] = { 0x1208c, 0 },
+	[GCC_PRNG_BCR] = { 0x13000, 0 },
+	[GCC_BOOT_ROM_BCR] = { 0x13008, 0 },
+	[GCC_CRYPTO_BCR] = { 0x16000, 0 },
+	[GCC_WCSS_BCR] = { 0x18000, 0 },
+	[GCC_WCSS_Q6_BCR] = { 0x18100, 0 },
+	[GCC_NSS_BCR] = { 0x19000, 0 },
+	[GCC_SEC_CTRL_BCR] = { 0x1a000, 0 },
+	[GCC_ADSS_BCR] = { 0x1c000, 0 },
+	[GCC_DDRSS_BCR] = { 0x1e000, 0 },
+	[GCC_SYSTEM_NOC_BCR] = { 0x26000, 0 },
+	[GCC_PCNOC_BCR] = { 0x27018, 0 },
+	[GCC_TCSR_BCR] = { 0x28000, 0 },
+	[GCC_QDSS_BCR] = { 0x29000, 0 },
+	[GCC_DCD_BCR] = { 0x2a000, 0 },
+	[GCC_MSG_RAM_BCR] = { 0x2b000, 0 },
+	[GCC_MPM_BCR] = { 0x2c000, 0 },
+	[GCC_SPDM_BCR] = { 0x2f000, 0 },
+	[GCC_RBCPR_BCR] = { 0x33000, 0 },
+	[GCC_RBCPR_MX_BCR] = { 0x33014, 0 },
+	[GCC_TLMM_BCR] = { 0x34000, 0 },
+	[GCC_RBCPR_WCSS_BCR] = { 0x3a000, 0 },
+	[GCC_USB0_PHY_BCR] = { 0x3e034, 0 },
+	[GCC_USB3PHY_0_PHY_BCR] = { 0x3e03c, 0 },
+	[GCC_USB0_BCR] = { 0x3e070, 0 },
+	[GCC_USB1_BCR] = { 0x3f070, 0 },
+	[GCC_QUSB2_0_PHY_BCR] = { 0x4103c, 0 },
+	[GCC_QUSB2_1_PHY_BCR] = { 0x41040, 0 },
+	[GCC_SDCC1_BCR] = { 0x42000, 0 },
+	[GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000, 0 },
+	[GCC_SNOC_BUS_TIMEOUT1_BCR] = { 0x47008, 0 },
+	[GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x47010, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040, 0 },
+	[GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048, 0 },
+	[GCC_UNIPHY0_BCR] = { 0x56000, 0 },
+	[GCC_UNIPHY1_BCR] = { 0x56100, 0 },
+	[GCC_CMN_12GPLL_BCR] = { 0x56300, 0 },
+	[GCC_QPIC_BCR] = { 0x57018, 0 },
+	[GCC_MDIO_BCR] = { 0x58000, 0 },
+	[GCC_WCSS_CORE_TBU_BCR] = { 0x66000, 0 },
+	[GCC_WCSS_Q6_TBU_BCR] = { 0x67000, 0 },
+	[GCC_USB0_TBU_BCR] = { 0x6a000, 0 },
+	[GCC_PCIE0_TBU_BCR] = { 0x6b000, 0 },
+	[GCC_NSS_NOC_TBU_BCR] = { 0x6e000, 0 },
+	[GCC_PCIE0_BCR] = { 0x75004, 0 },
+	[GCC_PCIE0_PHY_BCR] = { 0x75038, 0 },
+	[GCC_PCIE0PHY_PHY_BCR] = { 0x7503c, 0 },
+	[GCC_PCIE0_LINK_DOWN_BCR] = { 0x75044, 0 },
+	[GCC_DCC_BCR] = { 0x77000, 0 },
+	[GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x78000, 0 },
+	[GCC_SMMU_CATS_BCR] = { 0x7c000, 0 },
+	[GCC_UBI0_AXI_ARES] = { 0x68010, 0 },
+	[GCC_UBI0_AHB_ARES] = { 0x68010, 1 },
+	[GCC_UBI0_NC_AXI_ARES] = { 0x68010, 2 },
+	[GCC_UBI0_DBG_ARES] = { 0x68010, 3 },
+	[GCC_UBI0_CORE_CLAMP_ENABLE] = { 0x68010, 4 },
+	[GCC_UBI0_CLKRST_CLAMP_ENABLE] = { 0x68010, 5 },
+	[GCC_UBI0_UTCM_ARES] = { 0x68010, 6 },
+	[GCC_UBI0_CORE_ARES] = { 0x68010, 7 },
+	[GCC_NSS_CFG_ARES] = { 0x68010, 16 },
+	[GCC_NSS_NOC_ARES] = { 0x68010, 18 },
+	[GCC_NSS_CRYPTO_ARES] = { 0x68010, 19 },
+	[GCC_NSS_CSR_ARES] = { 0x68010, 20 },
+	[GCC_NSS_CE_APB_ARES] = { 0x68010, 21 },
+	[GCC_NSS_CE_AXI_ARES] = { 0x68010, 22 },
+	[GCC_NSSNOC_CE_APB_ARES] = { 0x68010, 23 },
+	[GCC_NSSNOC_CE_AXI_ARES] = { 0x68010, 24 },
+	[GCC_NSSNOC_UBI0_AHB_ARES] = { 0x68010, 25 },
+	[GCC_NSSNOC_SNOC_ARES] = { 0x68010, 27 },
+	[GCC_NSSNOC_CRYPTO_ARES] = { 0x68010, 28 },
+	[GCC_NSSNOC_ATB_ARES] = { 0x68010, 29 },
+	[GCC_NSSNOC_QOSGEN_REF_ARES] = { 0x68010, 30 },
+	[GCC_NSSNOC_TIMEOUT_REF_ARES] = { 0x68010, 31 },
+	[GCC_PCIE0_PIPE_ARES] = { 0x75040, 0 },
+	[GCC_PCIE0_SLEEP_ARES] = { 0x75040, 1 },
+	[GCC_PCIE0_CORE_STICKY_ARES] = { 0x75040, 2 },
+	[GCC_PCIE0_AXI_MASTER_ARES] = { 0x75040, 3 },
+	[GCC_PCIE0_AXI_SLAVE_ARES] = { 0x75040, 4 },
+	[GCC_PCIE0_AHB_ARES] = { 0x75040, 5 },
+	[GCC_PCIE0_AXI_MASTER_STICKY_ARES] = { 0x75040, 6 },
+	[GCC_PCIE0_AXI_SLAVE_STICKY_ARES] = { 0x75040, 7 },
+	[GCC_PPE_FULL_RESET] = { 0x68014, 0 },
+	[GCC_UNIPHY0_SOFT_RESET] = { 0x56004, 0 },
+	[GCC_UNIPHY0_XPCS_RESET] = { 0x56004, 2 },
+	[GCC_UNIPHY1_SOFT_RESET] = { 0x56104, 0 },
+	[GCC_UNIPHY1_XPCS_RESET] = { 0x56104, 2 },
+	[GCC_EDMA_HW_RESET] = { 0x68014, 0 },
+	[GCC_NSSPORT1_RESET] = { 0x68014, 0 },
+	[GCC_NSSPORT2_RESET] = { 0x68014, 0 },
+	[GCC_NSSPORT3_RESET] = { 0x68014, 0 },
+	[GCC_NSSPORT4_RESET] = { 0x68014, 0 },
+	[GCC_NSSPORT5_RESET] = { 0x68014, 0 },
+	[GCC_UNIPHY0_PORT1_ARES] = { 0x56004, 0 },
+	[GCC_UNIPHY0_PORT2_ARES] = { 0x56004, 0 },
+	[GCC_UNIPHY0_PORT3_ARES] = { 0x56004, 0 },
+	[GCC_UNIPHY0_PORT4_ARES] = { 0x56004, 0 },
+	[GCC_UNIPHY0_PORT5_ARES] = { 0x56004, 0 },
+	[GCC_UNIPHY0_PORT_4_5_RESET] = { 0x56004, 0 },
+	[GCC_UNIPHY0_PORT_4_RESET] = { 0x56004, 0 },
+	[GCC_LPASS_BCR] = {0x1F000, 0},
+	[GCC_UBI32_TBU_BCR] = {0x65000, 0},
+	[GCC_LPASS_TBU_BCR] = {0x6C000, 0},
+	[GCC_WCSSAON_RESET] = {0x59010, 0},
+	[GCC_LPASS_Q6_AXIM_ARES] = {0x1F004, 0},
+	[GCC_LPASS_Q6SS_TSCTR_1TO2_ARES] = {0x1F004, 1},
+	[GCC_LPASS_Q6SS_TRIG_ARES] = {0x1F004, 2},
+	[GCC_LPASS_Q6_ATBM_AT_ARES] = {0x1F004, 3},
+	[GCC_LPASS_Q6_PCLKDBG_ARES] = {0x1F004, 4},
+	[GCC_LPASS_CORE_AXIM_ARES] = {0x1F004, 5},
+	[GCC_LPASS_SNOC_CFG_ARES] = {0x1F004, 6},
+	[GCC_WCSS_DBG_ARES] = {0x59008, 0},
+	[GCC_WCSS_ECAHB_ARES] = {0x59008, 1},
+	[GCC_WCSS_ACMT_ARES] = {0x59008, 2},
+	[GCC_WCSS_DBG_BDG_ARES] = {0x59008, 3},
+	[GCC_WCSS_AHB_S_ARES] = {0x59008, 4},
+	[GCC_WCSS_AXI_M_ARES] = {0x59008, 5},
+	[GCC_Q6SS_DBG_ARES] = {0x59110, 0},
+	[GCC_Q6_AHB_S_ARES] = {0x59110, 1},
+	[GCC_Q6_AHB_ARES] = {0x59110, 2},
+	[GCC_Q6_AXIM2_ARES] = {0x59110, 3},
+	[GCC_Q6_AXIM_ARES] = {0x59110, 4},
+};
+
+static const struct of_device_id gcc_ipq6018_match_table[] = {
+	{ .compatible = "qcom,gcc-ipq6018" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq6018_match_table);
+
+static const struct regmap_config gcc_ipq6018_regmap_config = {
+	.reg_bits       = 32,
+	.reg_stride     = 4,
+	.val_bits       = 32,
+	.max_register   = 0x7fffc,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc gcc_ipq6018_desc = {
+	.config = &gcc_ipq6018_regmap_config,
+	.clks = gcc_ipq6018_clks,
+	.num_clks = ARRAY_SIZE(gcc_ipq6018_clks),
+	.resets = gcc_ipq6018_resets,
+	.num_resets = ARRAY_SIZE(gcc_ipq6018_resets),
+	.clk_hws = gcc_ipq6018_hws,
+	.num_clk_hws = ARRAY_SIZE(gcc_ipq6018_hws),
+};
+
+static int gcc_ipq6018_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+
+	regmap = qcom_cc_map(pdev, &gcc_ipq6018_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* Disable SW_COLLAPSE for USB0 GDSCR */
+	regmap_update_bits(regmap, 0x3e078, BIT(0), 0x0);
+	/* Enable SW_OVERRIDE for USB0 GDSCR */
+	regmap_update_bits(regmap, 0x3e078, BIT(2), BIT(2));
+	/* Disable SW_COLLAPSE for USB1 GDSCR */
+	regmap_update_bits(regmap, 0x3f078, BIT(0), 0x0);
+	/* Enable SW_OVERRIDE for USB1 GDSCR */
+	regmap_update_bits(regmap, 0x3f078, BIT(2), BIT(2));
+
+	/* SW Workaround for UBI Huyara PLL */
+	regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26));
+
+	clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+
+	clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+				&nss_crypto_pll_config);
+
+	return qcom_cc_really_probe(pdev, &gcc_ipq6018_desc, regmap);
+}
+
+static struct platform_driver gcc_ipq6018_driver = {
+	.probe = gcc_ipq6018_probe,
+	.driver = {
+		.name   = "qcom,gcc-ipq6018",
+		.of_match_table = gcc_ipq6018_match_table,
+	},
+};
+
+static int __init gcc_ipq6018_init(void)
+{
+	return platform_driver_register(&gcc_ipq6018_driver);
+}
+core_initcall(gcc_ipq6018_init);
+
+static void __exit gcc_ipq6018_exit(void)
+{
+	platform_driver_unregister(&gcc_ipq6018_driver);
+}
+module_exit(gcc_ipq6018_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GCC IPQ6018 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index d004cda..3c3a7ff 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -3046,7 +3046,10 @@ static struct clk_branch gcc_usb3_clkref_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_usb3_clkref_clk",
-			.parent_names = (const char *[]){ "xo" },
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "cxo2",
+				.name = "xo",
+			},
 			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
@@ -3060,7 +3063,10 @@ static struct clk_branch gcc_hdmi_clkref_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_hdmi_clkref_clk",
-			.parent_names = (const char *[]){ "xo" },
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "cxo2",
+				.name = "xo",
+			},
 			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
@@ -3074,7 +3080,10 @@ static struct clk_branch gcc_edp_clkref_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_edp_clkref_clk",
-			.parent_names = (const char *[]){ "xo" },
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "cxo2",
+				.name = "xo",
+			},
 			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
@@ -3088,7 +3097,10 @@ static struct clk_branch gcc_ufs_clkref_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_ufs_clkref_clk",
-			.parent_names = (const char *[]){ "xo" },
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "cxo2",
+				.name = "xo",
+			},
 			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
@@ -3102,7 +3114,10 @@ static struct clk_branch gcc_pcie_clkref_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_pcie_clkref_clk",
-			.parent_names = (const char *[]){ "xo" },
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "cxo2",
+				.name = "xo",
+			},
 			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
@@ -3116,7 +3131,10 @@ static struct clk_branch gcc_rx2_usb2_clkref_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_rx2_usb2_clkref_clk",
-			.parent_names = (const char *[]){ "xo" },
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "cxo2",
+				.name = "xo",
+			},
 			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
@@ -3130,7 +3148,10 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_rx1_usb2_clkref_clk",
-			.parent_names = (const char *[]){ "xo" },
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "cxo2",
+				.name = "xo",
+			},
 			.num_parents = 1,
 			.ops = &clk_branch2_ops,
 		},
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index cf31b5d..df1d705 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1996,6 +1996,19 @@ static struct clk_branch gcc_gp3_clk = {
 	},
 };
 
+static struct clk_branch gcc_bimc_gfx_clk = {
+	.halt_reg = 0x46040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x46040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_bimc_gfx_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_gpu_bimc_gfx_clk = {
 	.halt_reg = 0x71010,
 	.halt_check = BRANCH_HALT,
@@ -2810,6 +2823,7 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
 	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
 	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
 	[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+	[GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
 	[GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr,
 	[GCC_GPU_BIMC_GFX_SRC_CLK] = &gcc_gpu_bimc_gfx_src_clk.clkr,
 	[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 9b0c4ce..46d314d 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -330,7 +330,7 @@ static struct clk_alpha_pll gpll0_ao_out_main = {
 			.parent_names = (const char *[]){ "cxo" },
 			.num_parents = 1,
 			.flags = CLK_IS_CRITICAL,
-			.ops = &clk_alpha_pll_ops,
+			.ops = &clk_alpha_pll_fixed_ops,
 		},
 	},
 };
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
new file mode 100644
index 0000000..a96c0b9
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK		0xF
+#define CX_GMU_CBCR_SLEEP_SHIFT		4
+#define CX_GMU_CBCR_WAKE_MASK		0xF
+#define CX_GMU_CBCR_WAKE_SHIFT		8
+#define CLK_DIS_WAIT_SHIFT		12
+#define CLK_DIS_WAIT_MASK		(0xf << CLK_DIS_WAIT_SHIFT)
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL0_OUT_MAIN_DIV,
+	P_GPU_CC_PLL1_OUT_EVEN,
+	P_GPU_CC_PLL1_OUT_MAIN,
+	P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static const struct pll_vco fabia_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+	.offset = 0x100,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll1",
+			.parent_data =  &(const struct clk_parent_data){
+				.fw_name = "bi_tcxo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fabia_ops,
+		},
+	},
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL1_OUT_MAIN, 3 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+	{ .fw_name = "bi_tcxo" },
+	{ .hw = &gpu_cc_pll1.clkr.hw },
+	{ .fw_name = "gcc_gpu_gpll0_clk_src" },
+	{ .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+	.cmd_rcgr = 0x1120,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_0,
+	.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gmu_clk_src",
+		.parent_data = gpu_cc_parent_data_0,
+		.num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_shared_ops,
+	},
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+	.halt_reg = 0x107c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x107c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_crc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+	.halt_reg = 0x1098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gmu_clk",
+			.parent_data =  &(const struct clk_parent_data){
+				.hw = &gpu_cc_gmu_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+	.halt_reg = 0x108c,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x108c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_snoc_dvm_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+	.halt_reg = 0x1004,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x1004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_aon_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+	.halt_reg = 0x109c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x109c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct gdsc cx_gdsc = {
+	.gdscr = 0x106c,
+	.gds_hw_ctrl = 0x1540,
+	.pd = {
+		.name = "cx_gdsc",
+	},
+	.pwrsts = PWRSTS_OFF_ON,
+	.flags = VOTABLE,
+};
+
+static struct gdsc *gpu_cc_sc7180_gdscs[] = {
+	[CX_GDSC] = &cx_gdsc,
+};
+
+static struct clk_regmap *gpu_cc_sc7180_clocks[] = {
+	[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+	[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+	[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+	[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+	[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+	[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+	[GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+};
+
+static const struct regmap_config gpu_cc_sc7180_regmap_config = {
+	.reg_bits =	32,
+	.reg_stride =	4,
+	.val_bits =	32,
+	.max_register =	0x8008,
+	.fast_io =	true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sc7180_desc = {
+	.config = &gpu_cc_sc7180_regmap_config,
+	.clks = gpu_cc_sc7180_clocks,
+	.num_clks = ARRAY_SIZE(gpu_cc_sc7180_clocks),
+	.gdscs = gpu_cc_sc7180_gdscs,
+	.num_gdscs = ARRAY_SIZE(gpu_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sc7180_match_table[] = {
+	{ .compatible = "qcom,sc7180-gpucc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sc7180_match_table);
+
+static int gpu_cc_sc7180_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct alpha_pll_config gpu_cc_pll_config = {};
+	unsigned int value, mask;
+
+	regmap = qcom_cc_map(pdev, &gpu_cc_sc7180_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* 360MHz Configuration */
+	gpu_cc_pll_config.l = 0x12;
+	gpu_cc_pll_config.alpha = 0xc000;
+	gpu_cc_pll_config.config_ctl_val = 0x20485699;
+	gpu_cc_pll_config.config_ctl_hi_val = 0x00002067;
+	gpu_cc_pll_config.user_ctl_val = 0x00000001;
+	gpu_cc_pll_config.user_ctl_hi_val = 0x00004805;
+	gpu_cc_pll_config.test_ctl_hi_val = 0x40000000;
+
+	clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll_config);
+
+	/* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+	mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+	mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+	value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+	regmap_update_bits(regmap, 0x1098, mask, value);
+
+	/* Configure clk_dis_wait for gpu_cx_gdsc */
+	regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK,
+						8 << CLK_DIS_WAIT_SHIFT);
+
+	return qcom_cc_really_probe(pdev, &gpu_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sc7180_driver = {
+	.probe = gpu_cc_sc7180_probe,
+	.driver = {
+		.name = "sc7180-gpucc",
+		.of_match_table = gpu_cc_sc7180_match_table,
+	},
+};
+
+static int __init gpu_cc_sc7180_init(void)
+{
+	return platform_driver_register(&gpu_cc_sc7180_driver);
+}
+subsys_initcall(gpu_cc_sc7180_init);
+
+static void __exit gpu_cc_sc7180_exit(void)
+{
+	platform_driver_unregister(&gpu_cc_sc7180_driver);
+}
+module_exit(gpu_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
index a6de7101..5ff7f5a 100644
--- a/drivers/clk/qcom/hfpll.c
+++ b/drivers/clk/qcom/hfpll.c
@@ -53,10 +53,18 @@ static int qcom_hfpll_probe(struct platform_device *pdev)
 	struct regmap *regmap;
 	struct clk_hfpll *h;
 	struct clk_init_data init = {
-		.parent_names = (const char *[]){ "xo" },
 		.num_parents = 1,
 		.ops = &clk_ops_hfpll,
+		/*
+		 * rather than marking the clock critical and forcing the clock
+		 * to be always enabled, we make sure that the clock is not
+		 * disabled: the firmware remains responsible of enabling this
+		 * clock (for more info check the commit log)
+		 */
+		.flags = CLK_IGNORE_UNUSED,
 	};
+	int ret;
+	struct clk_parent_data pdata = { .index = 0 };
 
 	h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
 	if (!h)
@@ -75,11 +83,20 @@ static int qcom_hfpll_probe(struct platform_device *pdev)
 					  0, &init.name))
 		return -ENODEV;
 
+	init.parent_data = &pdata;
+
 	h->d = &hdata;
 	h->clkr.hw.init = &init;
 	spin_lock_init(&h->lock);
 
-	return devm_clk_register_regmap(&pdev->dev, &h->clkr);
+	ret = devm_clk_register_regmap(dev, &h->clkr);
+	if (ret) {
+		dev_err(dev, "failed to register regmap clock: %d\n", ret);
+		return ret;
+	}
+
+	return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+					   &h->clkr.hw);
 }
 
 static struct platform_driver qcom_hfpll_driver = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bcb0a39..0154262 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -452,18 +452,6 @@ static struct clk_rcg2 mdp_clk_src = {
 	},
 };
 
-static struct clk_rcg2 gfx3d_clk_src = {
-	.cmd_rcgr = 0x4000,
-	.hid_width = 5,
-	.parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gfx3d_clk_src",
-		.parent_names = mmcc_xo_mmpll0_1_2_gpll0,
-		.num_parents = 5,
-		.ops = &clk_rcg2_ops,
-	},
-};
-
 static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
 	F(75000000, P_GPLL0, 8, 0, 0),
 	F(133330000, P_GPLL0, 4.5, 0, 0),
@@ -2411,7 +2399,6 @@ static struct clk_regmap *mmcc_msm8974_clocks[] = {
 	[VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
 	[VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
 	[MDP_CLK_SRC] = &mdp_clk_src.clkr,
-	[GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
 	[JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
 	[JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
 	[JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
new file mode 100644
index 0000000..dd68983
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -0,0 +1,2913 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8998.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+	P_XO,
+	P_GPLL0,
+	P_GPLL0_DIV,
+	P_MMPLL0_OUT_EVEN,
+	P_MMPLL1_OUT_EVEN,
+	P_MMPLL3_OUT_EVEN,
+	P_MMPLL4_OUT_EVEN,
+	P_MMPLL5_OUT_EVEN,
+	P_MMPLL6_OUT_EVEN,
+	P_MMPLL7_OUT_EVEN,
+	P_MMPLL10_OUT_EVEN,
+	P_DSI0PLL,
+	P_DSI1PLL,
+	P_DSI0PLL_BYTE,
+	P_DSI1PLL_BYTE,
+	P_HDMIPLL,
+	P_DPVCO,
+	P_DPLINK,
+	P_CORE_BI_PLL_TEST_SE,
+};
+
+static struct clk_fixed_factor gpll0_div = {
+	.mult = 1,
+	.div = 2,
+	.hw.init = &(struct clk_init_data){
+		.name = "mmss_gpll0_div",
+		.parent_data = &(const struct clk_parent_data){
+			.fw_name = "gpll0",
+			.name = "gpll0"
+		},
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+	{ 0x0, 1 },
+	{ 0x1, 2 },
+	{ 0x3, 4 },
+	{ 0x7, 8 },
+	{ }
+};
+
+static struct clk_alpha_pll mmpll0 = {
+	.offset = 0xc000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.enable_reg = 0x1e0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmpll0",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+				.name = "xo"
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fixed_fabia_ops,
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv mmpll0_out_even = {
+	.offset = 0xc000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll0_out_even",
+		.parent_hws = (const struct clk_hw *[]){ &mmpll0.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll mmpll1 = {
+	.offset = 0xc050,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.enable_reg = 0x1e0,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "mmpll1",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "xo",
+				.name = "xo"
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fixed_fabia_ops,
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv mmpll1_out_even = {
+	.offset = 0xc050,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll1_out_even",
+		.parent_hws = (const struct clk_hw *[]){ &mmpll1.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll mmpll3 = {
+	.offset = 0x0,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll3",
+		.parent_data = &(const struct clk_parent_data){
+			.fw_name = "xo",
+			.name = "xo"
+		},
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_fixed_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll_postdiv mmpll3_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll3_out_even",
+		.parent_hws = (const struct clk_hw *[]){ &mmpll3.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll mmpll4 = {
+	.offset = 0x50,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll4",
+		.parent_data = &(const struct clk_parent_data){
+			.fw_name = "xo",
+			.name = "xo"
+		},
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_fixed_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll_postdiv mmpll4_out_even = {
+	.offset = 0x50,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll4_out_even",
+		.parent_hws = (const struct clk_hw *[]){ &mmpll4.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll mmpll5 = {
+	.offset = 0xa0,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll5",
+		.parent_data = &(const struct clk_parent_data){
+			.fw_name = "xo",
+			.name = "xo"
+		},
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_fixed_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll_postdiv mmpll5_out_even = {
+	.offset = 0xa0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll5_out_even",
+		.parent_hws = (const struct clk_hw *[]){ &mmpll5.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll mmpll6 = {
+	.offset = 0xf0,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll6",
+		.parent_data = &(const struct clk_parent_data){
+			.fw_name = "xo",
+			.name = "xo"
+		},
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_fixed_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll_postdiv mmpll6_out_even = {
+	.offset = 0xf0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll6_out_even",
+		.parent_hws = (const struct clk_hw *[]){ &mmpll6.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll mmpll7 = {
+	.offset = 0x140,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll7",
+		.parent_data = &(const struct clk_parent_data){
+			.fw_name = "xo",
+			.name = "xo"
+		},
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_fixed_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll_postdiv mmpll7_out_even = {
+	.offset = 0x140,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll7_out_even",
+		.parent_hws = (const struct clk_hw *[]){ &mmpll7.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll mmpll10 = {
+	.offset = 0x190,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll10",
+		.parent_data = &(const struct clk_parent_data){
+			.fw_name = "xo",
+			.name = "xo"
+		},
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_fixed_fabia_ops,
+	},
+};
+
+static struct clk_alpha_pll_postdiv mmpll10_out_even = {
+	.offset = 0x190,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mmpll10_out_even",
+		.parent_hws = (const struct clk_hw *[]){ &mmpll10.clkr.hw },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+	},
+};
+
+static const struct parent_map mmss_xo_hdmi_map[] = {
+	{ P_XO, 0 },
+	{ P_HDMIPLL, 1 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_hdmi[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .fw_name = "hdmipll", .name = "hdmipll" },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
+	{ P_XO, 0 },
+	{ P_DSI0PLL, 1 },
+	{ P_DSI1PLL, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .fw_name = "dsi0dsi", .name = "dsi0dsi" },
+	{ .fw_name = "dsi1dsi", .name = "dsi1dsi" },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dsibyte_map[] = {
+	{ P_XO, 0 },
+	{ P_DSI0PLL_BYTE, 1 },
+	{ P_DSI1PLL_BYTE, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dsibyte[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .fw_name = "dsi0byte", .name = "dsi0byte" },
+	{ .fw_name = "dsi1byte", .name = "dsi1byte" },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dp_map[] = {
+	{ P_XO, 0 },
+	{ P_DPLINK, 1 },
+	{ P_DPVCO, 2 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dp[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .fw_name = "dplink", .name = "dplink" },
+	{ .fw_name = "dpvco", .name = "dpvco" },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
+	{ P_XO, 0 },
+	{ P_GPLL0, 5 },
+	{ P_GPLL0_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .fw_name = "gpll0", .name = "gpll0" },
+	{ .hw = &gpll0_div.hw },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
+	{ P_XO, 0 },
+	{ P_MMPLL0_OUT_EVEN, 1 },
+	{ P_GPLL0, 5 },
+	{ P_GPLL0_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .hw = &mmpll0_out_even.clkr.hw },
+	{ .fw_name = "gpll0", .name = "gpll0" },
+	{ .hw = &gpll0_div.hw },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
+	{ P_XO, 0 },
+	{ P_MMPLL0_OUT_EVEN, 1 },
+	{ P_MMPLL1_OUT_EVEN, 2 },
+	{ P_GPLL0, 5 },
+	{ P_GPLL0_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .hw = &mmpll0_out_even.clkr.hw },
+	{ .hw = &mmpll1_out_even.clkr.hw },
+	{ .fw_name = "gpll0", .name = "gpll0" },
+	{ .hw = &gpll0_div.hw },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
+	{ P_XO, 0 },
+	{ P_MMPLL0_OUT_EVEN, 1 },
+	{ P_MMPLL5_OUT_EVEN, 2 },
+	{ P_GPLL0, 5 },
+	{ P_GPLL0_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .hw = &mmpll0_out_even.clkr.hw },
+	{ .hw = &mmpll5_out_even.clkr.hw },
+	{ .fw_name = "gpll0", .name = "gpll0" },
+	{ .hw = &gpll0_div.hw },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[] = {
+	{ P_XO, 0 },
+	{ P_MMPLL0_OUT_EVEN, 1 },
+	{ P_MMPLL3_OUT_EVEN, 3 },
+	{ P_MMPLL6_OUT_EVEN, 4 },
+	{ P_GPLL0, 5 },
+	{ P_GPLL0_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .hw = &mmpll0_out_even.clkr.hw },
+	{ .hw = &mmpll3_out_even.clkr.hw },
+	{ .hw = &mmpll6_out_even.clkr.hw },
+	{ .fw_name = "gpll0", .name = "gpll0" },
+	{ .hw = &gpll0_div.hw },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+	{ P_XO, 0 },
+	{ P_MMPLL4_OUT_EVEN, 1 },
+	{ P_MMPLL7_OUT_EVEN, 2 },
+	{ P_MMPLL10_OUT_EVEN, 3 },
+	{ P_GPLL0, 5 },
+	{ P_GPLL0_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .hw = &mmpll4_out_even.clkr.hw },
+	{ .hw = &mmpll7_out_even.clkr.hw },
+	{ .hw = &mmpll10_out_even.clkr.hw },
+	{ .fw_name = "gpll0", .name = "gpll0" },
+	{ .hw = &gpll0_div.hw },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+	{ P_XO, 0 },
+	{ P_MMPLL0_OUT_EVEN, 1 },
+	{ P_MMPLL7_OUT_EVEN, 2 },
+	{ P_MMPLL10_OUT_EVEN, 3 },
+	{ P_GPLL0, 5 },
+	{ P_GPLL0_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .hw = &mmpll0_out_even.clkr.hw },
+	{ .hw = &mmpll7_out_even.clkr.hw },
+	{ .hw = &mmpll10_out_even.clkr.hw },
+	{ .fw_name = "gpll0", .name = "gpll0" },
+	{ .hw = &gpll0_div.hw },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+	{ P_XO, 0 },
+	{ P_MMPLL0_OUT_EVEN, 1 },
+	{ P_MMPLL4_OUT_EVEN, 2 },
+	{ P_MMPLL7_OUT_EVEN, 3 },
+	{ P_MMPLL10_OUT_EVEN, 4 },
+	{ P_GPLL0, 5 },
+	{ P_GPLL0_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+	{ .fw_name = "xo", .name = "xo" },
+	{ .hw = &mmpll0_out_even.clkr.hw },
+	{ .hw = &mmpll4_out_even.clkr.hw },
+	{ .hw = &mmpll7_out_even.clkr.hw },
+	{ .hw = &mmpll10_out_even.clkr.hw },
+	{ .fw_name = "gpll0", .name = "gpll0" },
+	{ .hw = &gpll0_div.hw },
+	{ .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+	.cmd_rcgr = 0x2120,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dsibyte_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "byte0_clk_src",
+		.parent_data = mmss_xo_dsibyte,
+		.num_parents = 4,
+		.ops = &clk_byte2_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+	.cmd_rcgr = 0x2140,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dsibyte_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "byte1_clk_src",
+		.parent_data = mmss_xo_dsibyte,
+		.num_parents = 4,
+		.ops = &clk_byte2_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+	F(37500000, P_GPLL0, 16, 0, 0),
+	F(50000000, P_GPLL0, 12, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+	.cmd_rcgr = 0x3300,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_cci_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cci_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+	F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+	F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_GPLL0, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+	.cmd_rcgr = 0x3640,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_cpp_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cpp_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_csi_clk_src[] = {
+	F(164571429, P_MMPLL10_OUT_EVEN, 3.5, 0, 0),
+	F(256000000, P_MMPLL4_OUT_EVEN, 3, 0, 0),
+	F(274290000, P_MMPLL7_OUT_EVEN, 3.5, 0, 0),
+	F(300000000, P_GPLL0, 2, 0, 0),
+	F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+	F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+	.cmd_rcgr = 0x3090,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_csi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi0_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+	.cmd_rcgr = 0x3100,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_csi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi1_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+	.cmd_rcgr = 0x3160,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_csi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi2_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+	.cmd_rcgr = 0x31c0,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_csi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi3_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_csiphy_clk_src[] = {
+	F(164571429, P_MMPLL10_OUT_EVEN, 3.5, 0, 0),
+	F(256000000, P_MMPLL4_OUT_EVEN, 3, 0, 0),
+	F(274290000, P_MMPLL7_OUT_EVEN, 3.5, 0, 0),
+	F(300000000, P_GPLL0, 2, 0, 0),
+	F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 csiphy_clk_src = {
+	.cmd_rcgr = 0x3800,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_csiphy_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csiphy_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_csiphytimer_clk_src[] = {
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(269333333, P_MMPLL0_OUT_EVEN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+	.cmd_rcgr = 0x3000,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_csiphytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi0phytimer_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+	.cmd_rcgr = 0x3030,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_csiphytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi1phytimer_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+	.cmd_rcgr = 0x3060,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_csiphytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "csi2phytimer_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_dp_aux_clk_src[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 dp_aux_clk_src = {
+	.cmd_rcgr = 0x2260,
+	.hid_width = 5,
+	.parent_map = mmss_xo_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_dp_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "dp_aux_clk_src",
+		.parent_data = mmss_xo_gpll0_gpll0_div,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_dp_crypto_clk_src[] = {
+	F(101250, P_DPLINK, 1, 5, 16),
+	F(168750, P_DPLINK, 1, 5, 16),
+	F(337500, P_DPLINK, 1, 5, 16),
+	{ }
+};
+
+static struct clk_rcg2 dp_crypto_clk_src = {
+	.cmd_rcgr = 0x2220,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dp_map,
+	.freq_tbl = ftbl_dp_crypto_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "dp_crypto_clk_src",
+		.parent_data = mmss_xo_dp,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_dp_link_clk_src[] = {
+	F(162000, P_DPLINK, 2, 0, 0),
+	F(270000, P_DPLINK, 2, 0, 0),
+	F(540000, P_DPLINK, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 dp_link_clk_src = {
+	.cmd_rcgr = 0x2200,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dp_map,
+	.freq_tbl = ftbl_dp_link_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "dp_link_clk_src",
+		.parent_data = mmss_xo_dp,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_dp_pixel_clk_src[] = {
+	F(154000000, P_DPVCO, 1, 0, 0),
+	F(337500000, P_DPVCO, 2, 0, 0),
+	F(675000000, P_DPVCO, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 dp_pixel_clk_src = {
+	.cmd_rcgr = 0x2240,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dp_map,
+	.freq_tbl = ftbl_dp_pixel_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "dp_pixel_clk_src",
+		.parent_data = mmss_xo_dp,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_esc_clk_src[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+	.cmd_rcgr = 0x2160,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dsibyte_map,
+	.freq_tbl = ftbl_esc_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "esc0_clk_src",
+		.parent_data = mmss_xo_dsibyte,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+	.cmd_rcgr = 0x2180,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dsibyte_map,
+	.freq_tbl = ftbl_esc_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "esc1_clk_src",
+		.parent_data = mmss_xo_dsibyte,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_extpclk_clk_src[] = {
+	{ .src = P_HDMIPLL },
+	{ }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+	.cmd_rcgr = 0x2060,
+	.hid_width = 5,
+	.parent_map = mmss_xo_hdmi_map,
+	.freq_tbl = ftbl_extpclk_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "extpclk_clk_src",
+		.parent_data = mmss_xo_hdmi,
+		.num_parents = 3,
+		.ops = &clk_byte_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static const struct freq_tbl ftbl_fd_core_clk_src[] = {
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+	F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 fd_core_clk_src = {
+	.cmd_rcgr = 0x3b00,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_fd_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "fd_core_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_hdmi_clk_src[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+	.cmd_rcgr = 0x2100,
+	.hid_width = 5,
+	.parent_map = mmss_xo_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_hdmi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "hdmi_clk_src",
+		.parent_data = mmss_xo_gpll0_gpll0_div,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+	F(75000000, P_GPLL0, 8, 0, 0),
+	F(150000000, P_GPLL0, 4, 0, 0),
+	F(320000000, P_MMPLL7_OUT_EVEN, 3, 0, 0),
+	F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+	.cmd_rcgr = 0x3500,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_jpeg0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "jpeg0_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_maxi_clk_src[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	F(75000000, P_GPLL0_DIV, 4, 0, 0),
+	F(171428571, P_GPLL0, 3.5, 0, 0),
+	F(323200000, P_MMPLL0_OUT_EVEN, 2.5, 0, 0),
+	F(406000000, P_MMPLL1_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 maxi_clk_src = {
+	.cmd_rcgr = 0xf020,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_maxi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "maxi_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+	F(4800000, P_XO, 4, 0, 0),
+	F(6000000, P_GPLL0_DIV, 10, 1, 5),
+	F(8000000, P_GPLL0_DIV, 1, 2, 75),
+	F(9600000, P_XO, 2, 0, 0),
+	F(16666667, P_GPLL0_DIV, 2, 1, 9),
+	F(19200000, P_XO, 1, 0, 0),
+	F(24000000, P_GPLL0_DIV, 1, 2, 25),
+	F(33333333, P_GPLL0_DIV, 1, 2, 9),
+	F(48000000, P_GPLL0, 1, 2, 25),
+	F(66666667, P_GPLL0, 1, 2, 9),
+	{ }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+	.cmd_rcgr = 0x3360,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_mclk_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mclk0_clk_src",
+		.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+	.cmd_rcgr = 0x3390,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_mclk_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mclk1_clk_src",
+		.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+	.cmd_rcgr = 0x33c0,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_mclk_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mclk2_clk_src",
+		.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+	.cmd_rcgr = 0x33f0,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_mclk_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mclk3_clk_src",
+		.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+	F(85714286, P_GPLL0, 7, 0, 0),
+	F(100000000, P_GPLL0, 6, 0, 0),
+	F(150000000, P_GPLL0, 4, 0, 0),
+	F(171428571, P_GPLL0, 3.5, 0, 0),
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(275000000, P_MMPLL5_OUT_EVEN, 3, 0, 0),
+	F(300000000, P_GPLL0, 2, 0, 0),
+	F(330000000, P_MMPLL5_OUT_EVEN, 2.5, 0, 0),
+	F(412500000, P_MMPLL5_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+	.cmd_rcgr = 0x2040,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_mdp_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "mdp_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+	.cmd_rcgr = 0x2080,
+	.hid_width = 5,
+	.parent_map = mmss_xo_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_vsync_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vsync_clk_src",
+		.parent_data = mmss_xo_gpll0_gpll0_div,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_ahb_clk_src[] = {
+	F(19200000, P_XO, 1, 0, 0),
+	F(40000000, P_GPLL0, 15, 0, 0),
+	F(80800000, P_MMPLL0_OUT_EVEN, 10, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 ahb_clk_src = {
+	.cmd_rcgr = 0x5000,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ahb_clk_src",
+		.parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_axi_clk_src[] = {
+	F(75000000, P_GPLL0, 8, 0, 0),
+	F(171428571, P_GPLL0, 3.5, 0, 0),
+	F(240000000, P_GPLL0, 2.5, 0, 0),
+	F(323200000, P_MMPLL0_OUT_EVEN, 2.5, 0, 0),
+	F(406000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+/* RO to linux */
+static struct clk_rcg2 axi_clk_src = {
+	.cmd_rcgr = 0xd000,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_axi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "axi_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+	.cmd_rcgr = 0x2000,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dsi0pll_dsi1pll_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pclk0_clk_src",
+		.parent_data = mmss_xo_dsi0pll_dsi1pll,
+		.num_parents = 4,
+		.ops = &clk_pixel_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+	.cmd_rcgr = 0x2020,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = mmss_xo_dsi0pll_dsi1pll_map,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pclk1_clk_src",
+		.parent_data = mmss_xo_dsi0pll_dsi1pll,
+		.num_parents = 4,
+		.ops = &clk_pixel_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+static const struct freq_tbl ftbl_rot_clk_src[] = {
+	F(171428571, P_GPLL0, 3.5, 0, 0),
+	F(275000000, P_MMPLL5_OUT_EVEN, 3, 0, 0),
+	F(330000000, P_MMPLL5_OUT_EVEN, 2.5, 0, 0),
+	F(412500000, P_MMPLL5_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 rot_clk_src = {
+	.cmd_rcgr = 0x21a0,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_rot_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "rot_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+		.num_parents = 6,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_video_core_clk_src[] = {
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(269330000, P_MMPLL0_OUT_EVEN, 3, 0, 0),
+	F(355200000, P_MMPLL6_OUT_EVEN, 2.5, 0, 0),
+	F(444000000, P_MMPLL6_OUT_EVEN, 2, 0, 0),
+	F(533000000, P_MMPLL3_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 video_core_clk_src = {
+	.cmd_rcgr = 0x1000,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_video_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "video_core_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 video_subcore0_clk_src = {
+	.cmd_rcgr = 0x1060,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_video_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "video_subcore0_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 video_subcore1_clk_src = {
+	.cmd_rcgr = 0x1080,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_video_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "video_subcore1_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+		.num_parents = 7,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_vfe_clk_src[] = {
+	F(200000000, P_GPLL0, 3, 0, 0),
+	F(300000000, P_GPLL0, 2, 0, 0),
+	F(320000000, P_MMPLL7_OUT_EVEN, 3, 0, 0),
+	F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+	F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+	F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_GPLL0, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+	.cmd_rcgr = 0x3600,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_vfe_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vfe0_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+	.cmd_rcgr = 0x3620,
+	.hid_width = 5,
+	.parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+	.freq_tbl = ftbl_vfe_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "vfe1_clk_src",
+		.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+		.num_parents = 8,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch misc_ahb_clk = {
+	.halt_reg = 0x328,
+	.clkr = {
+		.enable_reg = 0x328,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "misc_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch video_core_clk = {
+	.halt_reg = 0x1028,
+	.clkr = {
+		.enable_reg = 0x1028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_core_clk",
+			.parent_hws = (const struct clk_hw *[]){ &video_core_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch video_ahb_clk = {
+	.halt_reg = 0x1030,
+	.clkr = {
+		.enable_reg = 0x1030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch video_axi_clk = {
+	.halt_reg = 0x1034,
+	.clkr = {
+		.enable_reg = 0x1034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_maxi_clk = {
+	.halt_reg = 0x1038,
+	.clkr = {
+		.enable_reg = 0x1038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_maxi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch video_subcore0_clk = {
+	.halt_reg = 0x1048,
+	.clkr = {
+		.enable_reg = 0x1048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_subcore0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &video_subcore0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch video_subcore1_clk = {
+	.halt_reg = 0x104c,
+	.clkr = {
+		.enable_reg = 0x104c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_subcore1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &video_subcore1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_ahb_clk = {
+	.halt_reg = 0x2308,
+	.clkr = {
+		.enable_reg = 0x2308,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_hdmi_dp_ahb_clk = {
+	.halt_reg = 0x230c,
+	.clkr = {
+		.enable_reg = 0x230c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_hdmi_dp_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_axi_clk = {
+	.halt_reg = 0x2310,
+	.clkr = {
+		.enable_reg = 0x2310,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+	.halt_reg = 0x2314,
+	.clkr = {
+		.enable_reg = 0x2314,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_pclk0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &pclk0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+	.halt_reg = 0x2318,
+	.clkr = {
+		.enable_reg = 0x2318,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_pclk1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &pclk1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_mdp_clk = {
+	.halt_reg = 0x231c,
+	.clkr = {
+		.enable_reg = 0x231c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_mdp_clk",
+			.parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_mdp_lut_clk = {
+	.halt_reg = 0x2320,
+	.clkr = {
+		.enable_reg = 0x2320,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_mdp_lut_clk",
+			.parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+	.halt_reg = 0x2324,
+	.clkr = {
+		.enable_reg = 0x2324,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_extpclk_clk",
+			.parent_hws = (const struct clk_hw *[]){ &extpclk_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_vsync_clk = {
+	.halt_reg = 0x2328,
+	.clkr = {
+		.enable_reg = 0x2328,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_vsync_clk",
+			.parent_hws = (const struct clk_hw *[]){ &vsync_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+	.halt_reg = 0x2338,
+	.clkr = {
+		.enable_reg = 0x2338,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_hdmi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &hdmi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_byte0_clk = {
+	.halt_reg = 0x233c,
+	.clkr = {
+		.enable_reg = 0x233c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_byte0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_byte1_clk = {
+	.halt_reg = 0x2340,
+	.clkr = {
+		.enable_reg = 0x2340,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_byte1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_esc0_clk = {
+	.halt_reg = 0x2344,
+	.clkr = {
+		.enable_reg = 0x2344,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_esc0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &esc0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_esc1_clk = {
+	.halt_reg = 0x2348,
+	.clkr = {
+		.enable_reg = 0x2348,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_esc1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &esc1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_rot_clk = {
+	.halt_reg = 0x2350,
+	.clkr = {
+		.enable_reg = 0x2350,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_rot_clk",
+			.parent_hws = (const struct clk_hw *[]){ &rot_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_dp_link_clk = {
+	.halt_reg = 0x2354,
+	.clkr = {
+		.enable_reg = 0x2354,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_dp_link_clk",
+			.parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_dp_link_intf_clk = {
+	.halt_reg = 0x2358,
+	.clkr = {
+		.enable_reg = 0x2358,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_dp_link_intf_clk",
+			.parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_dp_crypto_clk = {
+	.halt_reg = 0x235c,
+	.clkr = {
+		.enable_reg = 0x235c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_dp_crypto_clk",
+			.parent_hws = (const struct clk_hw *[]){ &dp_crypto_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_dp_pixel_clk = {
+	.halt_reg = 0x2360,
+	.clkr = {
+		.enable_reg = 0x2360,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_dp_pixel_clk",
+			.parent_hws = (const struct clk_hw *[]){ &dp_pixel_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_dp_aux_clk = {
+	.halt_reg = 0x2364,
+	.clkr = {
+		.enable_reg = 0x2364,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_dp_aux_clk",
+			.parent_hws = (const struct clk_hw *[]){ &dp_aux_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_byte0_intf_clk = {
+	.halt_reg = 0x2374,
+	.clkr = {
+		.enable_reg = 0x2374,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_byte0_intf_clk",
+			.parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mdss_byte1_intf_clk = {
+	.halt_reg = 0x2378,
+	.clkr = {
+		.enable_reg = 0x2378,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mdss_byte1_intf_clk",
+			.parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0phytimer_clk = {
+	.halt_reg = 0x3024,
+	.clkr = {
+		.enable_reg = 0x3024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0phytimer_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi0phytimer_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1phytimer_clk = {
+	.halt_reg = 0x3054,
+	.clkr = {
+		.enable_reg = 0x3054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1phytimer_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi1phytimer_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2phytimer_clk = {
+	.halt_reg = 0x3084,
+	.clkr = {
+		.enable_reg = 0x3084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2phytimer_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi2phytimer_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0_clk = {
+	.halt_reg = 0x30b4,
+	.clkr = {
+		.enable_reg = 0x30b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+	.halt_reg = 0x30bc,
+	.clkr = {
+		.enable_reg = 0x30bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+	.halt_reg = 0x30d4,
+	.clkr = {
+		.enable_reg = 0x30d4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0rdi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+	.halt_reg = 0x30e4,
+	.clkr = {
+		.enable_reg = 0x30e4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi0pix_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1_clk = {
+	.halt_reg = 0x3124,
+	.clkr = {
+		.enable_reg = 0x3124,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+	.halt_reg = 0x3128,
+	.clkr = {
+		.enable_reg = 0x3128,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+	.halt_reg = 0x3144,
+	.clkr = {
+		.enable_reg = 0x3144,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1rdi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+	.halt_reg = 0x3154,
+	.clkr = {
+		.enable_reg = 0x3154,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi1pix_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2_clk = {
+	.halt_reg = 0x3184,
+	.clkr = {
+		.enable_reg = 0x3184,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+	.halt_reg = 0x3188,
+	.clkr = {
+		.enable_reg = 0x3188,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+	.halt_reg = 0x31a4,
+	.clkr = {
+		.enable_reg = 0x31a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2rdi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+	.halt_reg = 0x31b4,
+	.clkr = {
+		.enable_reg = 0x31b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi2pix_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3_clk = {
+	.halt_reg = 0x31e4,
+	.clkr = {
+		.enable_reg = 0x31e4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+	.halt_reg = 0x31e8,
+	.clkr = {
+		.enable_reg = 0x31e8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+	.halt_reg = 0x3204,
+	.clkr = {
+		.enable_reg = 0x3204,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3rdi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+	.halt_reg = 0x3214,
+	.clkr = {
+		.enable_reg = 0x3214,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi3pix_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+	.halt_reg = 0x3224,
+	.clkr = {
+		.enable_reg = 0x3224,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_ispif_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_cci_clk = {
+	.halt_reg = 0x3344,
+	.clkr = {
+		.enable_reg = 0x3344,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cci_clk",
+			.parent_hws = (const struct clk_hw *[]){ &cci_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_cci_ahb_clk = {
+	.halt_reg = 0x3348,
+	.clkr = {
+		.enable_reg = 0x3348,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cci_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_mclk0_clk = {
+	.halt_reg = 0x3384,
+	.clkr = {
+		.enable_reg = 0x3384,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_mclk0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &mclk0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_mclk1_clk = {
+	.halt_reg = 0x33b4,
+	.clkr = {
+		.enable_reg = 0x33b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_mclk1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &mclk1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_mclk2_clk = {
+	.halt_reg = 0x33e4,
+	.clkr = {
+		.enable_reg = 0x33e4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_mclk2_clk",
+			.parent_hws = (const struct clk_hw *[]){ &mclk2_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_mclk3_clk = {
+	.halt_reg = 0x3414,
+	.clkr = {
+		.enable_reg = 0x3414,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_mclk3_clk",
+			.parent_hws = (const struct clk_hw *[]){ &mclk3_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+	.halt_reg = 0x3484,
+	.clkr = {
+		.enable_reg = 0x3484,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_top_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_ahb_clk = {
+	.halt_reg = 0x348c,
+	.clkr = {
+		.enable_reg = 0x348c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+	.halt_reg = 0x3494,
+	.clkr = {
+		.enable_reg = 0x3494,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_micro_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_jpeg0_clk = {
+	.halt_reg = 0x35a8,
+	.clkr = {
+		.enable_reg = 0x35a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_jpeg0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &jpeg0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_jpeg_ahb_clk = {
+	.halt_reg = 0x35b4,
+	.clkr = {
+		.enable_reg = 0x35b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_jpeg_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_jpeg_axi_clk = {
+	.halt_reg = 0x35b8,
+	.clkr = {
+		.enable_reg = 0x35b8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_jpeg_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe0_ahb_clk = {
+	.halt_reg = 0x3668,
+	.clkr = {
+		.enable_reg = 0x3668,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe0_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe1_ahb_clk = {
+	.halt_reg = 0x3678,
+	.clkr = {
+		.enable_reg = 0x3678,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe1_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe0_clk = {
+	.halt_reg = 0x36a8,
+	.clkr = {
+		.enable_reg = 0x36a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe1_clk = {
+	.halt_reg = 0x36ac,
+	.clkr = {
+		.enable_reg = 0x36ac,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_cpp_clk = {
+	.halt_reg = 0x36b0,
+	.clkr = {
+		.enable_reg = 0x36b0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cpp_clk",
+			.parent_hws = (const struct clk_hw *[]){ &cpp_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_cpp_ahb_clk = {
+	.halt_reg = 0x36b4,
+	.clkr = {
+		.enable_reg = 0x36b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cpp_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe_vbif_ahb_clk = {
+	.halt_reg = 0x36b8,
+	.clkr = {
+		.enable_reg = 0x36b8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe_vbif_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe_vbif_axi_clk = {
+	.halt_reg = 0x36bc,
+	.clkr = {
+		.enable_reg = 0x36bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe_vbif_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_cpp_axi_clk = {
+	.halt_reg = 0x36c4,
+	.clkr = {
+		.enable_reg = 0x36c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cpp_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch camss_cpp_vbif_ahb_clk = {
+	.halt_reg = 0x36c8,
+	.clkr = {
+		.enable_reg = 0x36c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cpp_vbif_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+	.halt_reg = 0x3704,
+	.clkr = {
+		.enable_reg = 0x3704,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi_vfe0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+	.halt_reg = 0x3714,
+	.clkr = {
+		.enable_reg = 0x3714,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csi_vfe1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe0_stream_clk = {
+	.halt_reg = 0x3720,
+	.clkr = {
+		.enable_reg = 0x3720,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe0_stream_clk",
+			.parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_vfe1_stream_clk = {
+	.halt_reg = 0x3724,
+	.clkr = {
+		.enable_reg = 0x3724,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_vfe1_stream_clk",
+			.parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_cphy_csid0_clk = {
+	.halt_reg = 0x3730,
+	.clkr = {
+		.enable_reg = 0x3730,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cphy_csid0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_cphy_csid1_clk = {
+	.halt_reg = 0x3734,
+	.clkr = {
+		.enable_reg = 0x3734,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cphy_csid1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_cphy_csid2_clk = {
+	.halt_reg = 0x3738,
+	.clkr = {
+		.enable_reg = 0x3738,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cphy_csid2_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_cphy_csid3_clk = {
+	.halt_reg = 0x373c,
+	.clkr = {
+		.enable_reg = 0x373c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_cphy_csid3_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csiphy0_clk = {
+	.halt_reg = 0x3740,
+	.clkr = {
+		.enable_reg = 0x3740,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csiphy0_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csiphy1_clk = {
+	.halt_reg = 0x3744,
+	.clkr = {
+		.enable_reg = 0x3744,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csiphy1_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch camss_csiphy2_clk = {
+	.halt_reg = 0x3748,
+	.clkr = {
+		.enable_reg = 0x3748,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "camss_csiphy2_clk",
+			.parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch fd_core_clk = {
+	.halt_reg = 0x3b68,
+	.clkr = {
+		.enable_reg = 0x3b68,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "fd_core_clk",
+			.parent_hws = (const struct clk_hw *[]){ &fd_core_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch fd_core_uar_clk = {
+	.halt_reg = 0x3b6c,
+	.clkr = {
+		.enable_reg = 0x3b6c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "fd_core_uar_clk",
+			.parent_hws = (const struct clk_hw *[]){ &fd_core_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch fd_ahb_clk = {
+	.halt_reg = 0x3b74,
+	.clkr = {
+		.enable_reg = 0x3b74,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "fd_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch mnoc_ahb_clk = {
+	.halt_reg = 0x5024,
+	.clkr = {
+		.enable_reg = 0x5024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mnoc_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch bimc_smmu_ahb_clk = {
+	.halt_reg = 0xe004,
+	.clkr = {
+		.enable_reg = 0xe004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "bimc_smmu_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch bimc_smmu_axi_clk = {
+	.halt_reg = 0xe008,
+	.clkr = {
+		.enable_reg = 0xe008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "bimc_smmu_axi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch mnoc_maxi_clk = {
+	.halt_reg = 0xf004,
+	.clkr = {
+		.enable_reg = 0xf004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "mnoc_maxi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch vmem_maxi_clk = {
+	.halt_reg = 0xf064,
+	.clkr = {
+		.enable_reg = 0xf064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vmem_maxi_clk",
+			.parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch vmem_ahb_clk = {
+	.halt_reg = 0xf068,
+	.clkr = {
+		.enable_reg = 0xf068,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "vmem_ahb_clk",
+			.parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_hw *mmcc_msm8998_hws[] = {
+	&gpll0_div.hw,
+};
+
+static struct gdsc video_top_gdsc = {
+	.gdscr = 0x1024,
+	.pd = {
+		.name = "video_top",
+	},
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc video_subcore0_gdsc = {
+	.gdscr = 0x1040,
+	.pd = {
+		.name = "video_subcore0",
+	},
+	.parent = &video_top_gdsc.pd,
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc video_subcore1_gdsc = {
+	.gdscr = 0x1044,
+	.pd = {
+		.name = "video_subcore1",
+	},
+	.parent = &video_top_gdsc.pd,
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+	.gdscr = 0x2304,
+	.cxcs = (unsigned int []){ 0x2310, 0x2350, 0x231c, 0x2320 },
+	.cxc_count = 4,
+	.pd = {
+		.name = "mdss",
+	},
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_top_gdsc = {
+	.gdscr = 0x34a0,
+	.cxcs = (unsigned int []){ 0x35b8, 0x36c4, 0x3704, 0x3714, 0x3494,
+				   0x35a8, 0x3868 },
+	.cxc_count = 7,
+	.pd = {
+		.name = "camss_top",
+	},
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe0_gdsc = {
+	.gdscr = 0x3664,
+	.pd = {
+		.name = "camss_vfe0",
+	},
+	.parent = &camss_top_gdsc.pd,
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe1_gdsc = {
+	.gdscr = 0x3674,
+	.pd = {
+		.name = "camss_vfe1_gdsc",
+	},
+	.parent = &camss_top_gdsc.pd,
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_cpp_gdsc = {
+	.gdscr = 0x36d4,
+	.pd = {
+		.name = "camss_cpp",
+	},
+	.parent = &camss_top_gdsc.pd,
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc bimc_smmu_gdsc = {
+	.gdscr = 0xe020,
+	.gds_hw_ctrl = 0xe024,
+	.pd = {
+		.name = "bimc_smmu",
+	},
+	.pwrsts = PWRSTS_OFF_ON,
+	.flags = HW_CTRL,
+};
+
+static struct clk_regmap *mmcc_msm8998_clocks[] = {
+	[MMPLL0] = &mmpll0.clkr,
+	[MMPLL0_OUT_EVEN] = &mmpll0_out_even.clkr,
+	[MMPLL1] = &mmpll1.clkr,
+	[MMPLL1_OUT_EVEN] = &mmpll1_out_even.clkr,
+	[MMPLL3] = &mmpll3.clkr,
+	[MMPLL3_OUT_EVEN] = &mmpll3_out_even.clkr,
+	[MMPLL4] = &mmpll4.clkr,
+	[MMPLL4_OUT_EVEN] = &mmpll4_out_even.clkr,
+	[MMPLL5] = &mmpll5.clkr,
+	[MMPLL5_OUT_EVEN] = &mmpll5_out_even.clkr,
+	[MMPLL6] = &mmpll6.clkr,
+	[MMPLL6_OUT_EVEN] = &mmpll6_out_even.clkr,
+	[MMPLL7] = &mmpll7.clkr,
+	[MMPLL7_OUT_EVEN] = &mmpll7_out_even.clkr,
+	[MMPLL10] = &mmpll10.clkr,
+	[MMPLL10_OUT_EVEN] = &mmpll10_out_even.clkr,
+	[BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+	[BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+	[CCI_CLK_SRC] = &cci_clk_src.clkr,
+	[CPP_CLK_SRC] = &cpp_clk_src.clkr,
+	[CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+	[CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+	[CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+	[CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+	[CSIPHY_CLK_SRC] = &csiphy_clk_src.clkr,
+	[CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+	[CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+	[CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+	[DP_AUX_CLK_SRC] = &dp_aux_clk_src.clkr,
+	[DP_CRYPTO_CLK_SRC] = &dp_crypto_clk_src.clkr,
+	[DP_LINK_CLK_SRC] = &dp_link_clk_src.clkr,
+	[DP_PIXEL_CLK_SRC] = &dp_pixel_clk_src.clkr,
+	[ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+	[ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+	[EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+	[FD_CORE_CLK_SRC] = &fd_core_clk_src.clkr,
+	[HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+	[JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+	[MAXI_CLK_SRC] = &maxi_clk_src.clkr,
+	[MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+	[MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+	[MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+	[MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+	[MDP_CLK_SRC] = &mdp_clk_src.clkr,
+	[VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+	[AHB_CLK_SRC] = &ahb_clk_src.clkr,
+	[AXI_CLK_SRC] = &axi_clk_src.clkr,
+	[PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+	[PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+	[ROT_CLK_SRC] = &rot_clk_src.clkr,
+	[VIDEO_CORE_CLK_SRC] = &video_core_clk_src.clkr,
+	[VIDEO_SUBCORE0_CLK_SRC] = &video_subcore0_clk_src.clkr,
+	[VIDEO_SUBCORE1_CLK_SRC] = &video_subcore1_clk_src.clkr,
+	[VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+	[VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+	[MISC_AHB_CLK] = &misc_ahb_clk.clkr,
+	[VIDEO_CORE_CLK] = &video_core_clk.clkr,
+	[VIDEO_AHB_CLK] = &video_ahb_clk.clkr,
+	[VIDEO_AXI_CLK] = &video_axi_clk.clkr,
+	[VIDEO_MAXI_CLK] = &video_maxi_clk.clkr,
+	[VIDEO_SUBCORE0_CLK] = &video_subcore0_clk.clkr,
+	[VIDEO_SUBCORE1_CLK] = &video_subcore1_clk.clkr,
+	[MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+	[MDSS_HDMI_DP_AHB_CLK] = &mdss_hdmi_dp_ahb_clk.clkr,
+	[MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+	[MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+	[MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+	[MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+	[MDSS_MDP_LUT_CLK] = &mdss_mdp_lut_clk.clkr,
+	[MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+	[MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+	[MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+	[MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+	[MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+	[MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+	[MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+	[MDSS_ROT_CLK] = &mdss_rot_clk.clkr,
+	[MDSS_DP_LINK_CLK] = &mdss_dp_link_clk.clkr,
+	[MDSS_DP_LINK_INTF_CLK] = &mdss_dp_link_intf_clk.clkr,
+	[MDSS_DP_CRYPTO_CLK] = &mdss_dp_crypto_clk.clkr,
+	[MDSS_DP_PIXEL_CLK] = &mdss_dp_pixel_clk.clkr,
+	[MDSS_DP_AUX_CLK] = &mdss_dp_aux_clk.clkr,
+	[MDSS_BYTE0_INTF_CLK] = &mdss_byte0_intf_clk.clkr,
+	[MDSS_BYTE1_INTF_CLK] = &mdss_byte1_intf_clk.clkr,
+	[CAMSS_CSI0PHYTIMER_CLK] = &camss_csi0phytimer_clk.clkr,
+	[CAMSS_CSI1PHYTIMER_CLK] = &camss_csi1phytimer_clk.clkr,
+	[CAMSS_CSI2PHYTIMER_CLK] = &camss_csi2phytimer_clk.clkr,
+	[CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+	[CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+	[CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+	[CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+	[CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+	[CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+	[CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+	[CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+	[CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+	[CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+	[CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+	[CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+	[CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+	[CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+	[CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+	[CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+	[CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+	[CAMSS_CCI_CLK] = &camss_cci_clk.clkr,
+	[CAMSS_CCI_AHB_CLK] = &camss_cci_ahb_clk.clkr,
+	[CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+	[CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+	[CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+	[CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+	[CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+	[CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+	[CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+	[CAMSS_JPEG0_CLK] = &camss_jpeg0_clk.clkr,
+	[CAMSS_JPEG_AHB_CLK] = &camss_jpeg_ahb_clk.clkr,
+	[CAMSS_JPEG_AXI_CLK] = &camss_jpeg_axi_clk.clkr,
+	[CAMSS_VFE0_AHB_CLK] = &camss_vfe0_ahb_clk.clkr,
+	[CAMSS_VFE1_AHB_CLK] = &camss_vfe1_ahb_clk.clkr,
+	[CAMSS_VFE0_CLK] = &camss_vfe0_clk.clkr,
+	[CAMSS_VFE1_CLK] = &camss_vfe1_clk.clkr,
+	[CAMSS_CPP_CLK] = &camss_cpp_clk.clkr,
+	[CAMSS_CPP_AHB_CLK] = &camss_cpp_ahb_clk.clkr,
+	[CAMSS_VFE_VBIF_AHB_CLK] = &camss_vfe_vbif_ahb_clk.clkr,
+	[CAMSS_VFE_VBIF_AXI_CLK] = &camss_vfe_vbif_axi_clk.clkr,
+	[CAMSS_CPP_AXI_CLK] = &camss_cpp_axi_clk.clkr,
+	[CAMSS_CPP_VBIF_AHB_CLK] = &camss_cpp_vbif_ahb_clk.clkr,
+	[CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+	[CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+	[CAMSS_VFE0_STREAM_CLK] = &camss_vfe0_stream_clk.clkr,
+	[CAMSS_VFE1_STREAM_CLK] = &camss_vfe1_stream_clk.clkr,
+	[CAMSS_CPHY_CSID0_CLK] = &camss_cphy_csid0_clk.clkr,
+	[CAMSS_CPHY_CSID1_CLK] = &camss_cphy_csid1_clk.clkr,
+	[CAMSS_CPHY_CSID2_CLK] = &camss_cphy_csid2_clk.clkr,
+	[CAMSS_CPHY_CSID3_CLK] = &camss_cphy_csid3_clk.clkr,
+	[CAMSS_CSIPHY0_CLK] = &camss_csiphy0_clk.clkr,
+	[CAMSS_CSIPHY1_CLK] = &camss_csiphy1_clk.clkr,
+	[CAMSS_CSIPHY2_CLK] = &camss_csiphy2_clk.clkr,
+	[FD_CORE_CLK] = &fd_core_clk.clkr,
+	[FD_CORE_UAR_CLK] = &fd_core_uar_clk.clkr,
+	[FD_AHB_CLK] = &fd_ahb_clk.clkr,
+	[MNOC_AHB_CLK] = &mnoc_ahb_clk.clkr,
+	[BIMC_SMMU_AHB_CLK] = &bimc_smmu_ahb_clk.clkr,
+	[BIMC_SMMU_AXI_CLK] = &bimc_smmu_axi_clk.clkr,
+	[MNOC_MAXI_CLK] = &mnoc_maxi_clk.clkr,
+	[VMEM_MAXI_CLK] = &vmem_maxi_clk.clkr,
+	[VMEM_AHB_CLK] = &vmem_ahb_clk.clkr,
+};
+
+static struct gdsc *mmcc_msm8998_gdscs[] = {
+	[VIDEO_TOP_GDSC] = &video_top_gdsc,
+	[VIDEO_SUBCORE0_GDSC] = &video_subcore0_gdsc,
+	[VIDEO_SUBCORE1_GDSC] = &video_subcore1_gdsc,
+	[MDSS_GDSC] = &mdss_gdsc,
+	[CAMSS_TOP_GDSC] = &camss_top_gdsc,
+	[CAMSS_VFE0_GDSC] = &camss_vfe0_gdsc,
+	[CAMSS_VFE1_GDSC] = &camss_vfe1_gdsc,
+	[CAMSS_CPP_GDSC] = &camss_cpp_gdsc,
+	[BIMC_SMMU_GDSC] = &bimc_smmu_gdsc,
+};
+
+static const struct qcom_reset_map mmcc_msm8998_resets[] = {
+	[SPDM_BCR] = { 0x200 },
+	[SPDM_RM_BCR] = { 0x300 },
+	[MISC_BCR] = { 0x320 },
+	[VIDEO_TOP_BCR] = { 0x1020 },
+	[THROTTLE_VIDEO_BCR] = { 0x1180 },
+	[MDSS_BCR] = { 0x2300 },
+	[THROTTLE_MDSS_BCR] = { 0x2460 },
+	[CAMSS_PHY0_BCR] = { 0x3020 },
+	[CAMSS_PHY1_BCR] = { 0x3050 },
+	[CAMSS_PHY2_BCR] = { 0x3080 },
+	[CAMSS_CSI0_BCR] = { 0x30b0 },
+	[CAMSS_CSI0RDI_BCR] = { 0x30d0 },
+	[CAMSS_CSI0PIX_BCR] = { 0x30e0 },
+	[CAMSS_CSI1_BCR] = { 0x3120 },
+	[CAMSS_CSI1RDI_BCR] = { 0x3140 },
+	[CAMSS_CSI1PIX_BCR] = { 0x3150 },
+	[CAMSS_CSI2_BCR] = { 0x3180 },
+	[CAMSS_CSI2RDI_BCR] = { 0x31a0 },
+	[CAMSS_CSI2PIX_BCR] = { 0x31b0 },
+	[CAMSS_CSI3_BCR] = { 0x31e0 },
+	[CAMSS_CSI3RDI_BCR] = { 0x3200 },
+	[CAMSS_CSI3PIX_BCR] = { 0x3210 },
+	[CAMSS_ISPIF_BCR] = { 0x3220 },
+	[CAMSS_CCI_BCR] = { 0x3340 },
+	[CAMSS_TOP_BCR] = { 0x3480 },
+	[CAMSS_AHB_BCR] = { 0x3488 },
+	[CAMSS_MICRO_BCR] = { 0x3490 },
+	[CAMSS_JPEG_BCR] = { 0x35a0 },
+	[CAMSS_VFE0_BCR] = { 0x3660 },
+	[CAMSS_VFE1_BCR] = { 0x3670 },
+	[CAMSS_VFE_VBIF_BCR] = { 0x36a0 },
+	[CAMSS_CPP_TOP_BCR] = { 0x36c0 },
+	[CAMSS_CPP_BCR] = { 0x36d0 },
+	[CAMSS_CSI_VFE0_BCR] = { 0x3700 },
+	[CAMSS_CSI_VFE1_BCR] = { 0x3710 },
+	[CAMSS_FD_BCR] = { 0x3b60 },
+	[THROTTLE_CAMSS_BCR] = { 0x3c30 },
+	[MNOCAHB_BCR] = { 0x5020 },
+	[MNOCAXI_BCR] = { 0xd020 },
+	[BMIC_SMMU_BCR] = { 0xe000 },
+	[MNOC_MAXI_BCR] = { 0xf000 },
+	[VMEM_BCR] = { 0xf060 },
+	[BTO_BCR] = { 0x10004 },
+};
+
+static const struct regmap_config mmcc_msm8998_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x10004,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc mmcc_msm8998_desc = {
+	.config = &mmcc_msm8998_regmap_config,
+	.clks = mmcc_msm8998_clocks,
+	.num_clks = ARRAY_SIZE(mmcc_msm8998_clocks),
+	.resets = mmcc_msm8998_resets,
+	.num_resets = ARRAY_SIZE(mmcc_msm8998_resets),
+	.gdscs = mmcc_msm8998_gdscs,
+	.num_gdscs = ARRAY_SIZE(mmcc_msm8998_gdscs),
+	.clk_hws = mmcc_msm8998_hws,
+	.num_clk_hws = ARRAY_SIZE(mmcc_msm8998_hws),
+};
+
+static const struct of_device_id mmcc_msm8998_match_table[] = {
+	{ .compatible = "qcom,mmcc-msm8998" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8998_match_table);
+
+static int mmcc_msm8998_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+
+	regmap = qcom_cc_map(pdev, &mmcc_msm8998_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	return qcom_cc_really_probe(pdev, &mmcc_msm8998_desc, regmap);
+}
+
+static struct platform_driver mmcc_msm8998_driver = {
+	.probe		= mmcc_msm8998_probe,
+	.driver		= {
+		.name	= "mmcc-msm8998",
+		.of_match_table = mmcc_msm8998_match_table,
+	},
+};
+module_platform_driver(mmcc_msm8998_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8998 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c
new file mode 100644
index 0000000..c363c3c
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sc7180.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,videocc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+	P_BI_TCXO,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+	P_VIDEO_PLL0_OUT_EVEN,
+	P_VIDEO_PLL0_OUT_MAIN,
+	P_VIDEO_PLL0_OUT_ODD,
+};
+
+static const struct pll_vco fabia_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll video_pll0 = {
+	.offset = 0x42c,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "video_pll0",
+			.parent_data = &(const struct clk_parent_data){
+				.fw_name = "bi_tcxo",
+			},
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fabia_ops,
+		},
+	},
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_VIDEO_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+	{ .fw_name = "bi_tcxo" },
+	{ .hw = &video_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(150000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
+	F(270000000, P_VIDEO_PLL0_OUT_MAIN, 2.5, 0, 0),
+	F(340000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(434000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(500000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 video_cc_venus_clk_src = {
+	.cmd_rcgr = 0x7f0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = video_cc_parent_map_1,
+	.freq_tbl = ftbl_video_cc_venus_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "video_cc_venus_clk_src",
+		.parent_data = video_cc_parent_data_1,
+		.num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_shared_ops,
+	},
+};
+
+static struct clk_branch video_cc_vcodec0_axi_clk = {
+	.halt_reg = 0x9ec,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9ec,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_vcodec0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_vcodec0_core_clk = {
+	.halt_reg = 0x890,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x890,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_vcodec0_core_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &video_cc_venus_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+	.halt_reg = 0xa4c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa4c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_venus_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_venus_ctl_axi_clk = {
+	.halt_reg = 0x9cc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9cc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_venus_ctl_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch video_cc_venus_ctl_core_clk = {
+	.halt_reg = 0x850,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x850,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "video_cc_venus_ctl_core_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &video_cc_venus_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct gdsc venus_gdsc = {
+	.gdscr = 0x814,
+	.pd = {
+		.name = "venus_gdsc",
+	},
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vcodec0_gdsc = {
+	.gdscr = 0x874,
+	.pd = {
+		.name = "vcodec0_gdsc",
+	},
+	.flags = HW_CTRL,
+	.pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *video_cc_sc7180_clocks[] = {
+	[VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
+	[VIDEO_CC_VCODEC0_CORE_CLK] = &video_cc_vcodec0_core_clk.clkr,
+	[VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+	[VIDEO_CC_VENUS_CLK_SRC] = &video_cc_venus_clk_src.clkr,
+	[VIDEO_CC_VENUS_CTL_AXI_CLK] = &video_cc_venus_ctl_axi_clk.clkr,
+	[VIDEO_CC_VENUS_CTL_CORE_CLK] = &video_cc_venus_ctl_core_clk.clkr,
+	[VIDEO_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_sc7180_gdscs[] = {
+	[VENUS_GDSC] = &venus_gdsc,
+	[VCODEC0_GDSC] = &vcodec0_gdsc,
+};
+
+static const struct regmap_config video_cc_sc7180_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xb94,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sc7180_desc = {
+	.config = &video_cc_sc7180_regmap_config,
+	.clks = video_cc_sc7180_clocks,
+	.num_clks = ARRAY_SIZE(video_cc_sc7180_clocks),
+	.gdscs = video_cc_sc7180_gdscs,
+	.num_gdscs = ARRAY_SIZE(video_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id video_cc_sc7180_match_table[] = {
+	{ .compatible = "qcom,sc7180-videocc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sc7180_match_table);
+
+static int video_cc_sc7180_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct alpha_pll_config video_pll0_config = {};
+
+	regmap = qcom_cc_map(pdev, &video_cc_sc7180_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	video_pll0_config.l = 0x1f;
+	video_pll0_config.alpha = 0x4000;
+	video_pll0_config.user_ctl_val = 0x00000001;
+	video_pll0_config.user_ctl_hi_val = 0x00004805;
+
+	clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+	/* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
+	regmap_update_bits(regmap, 0x984, 0x1, 0x1);
+
+	return qcom_cc_really_probe(pdev, &video_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver video_cc_sc7180_driver = {
+	.probe = video_cc_sc7180_probe,
+	.driver = {
+		.name = "sc7180-videocc",
+		.of_match_table = video_cc_sc7180_match_table,
+	},
+};
+
+static int __init video_cc_sc7180_init(void)
+{
+	return platform_driver_register(&video_cc_sc7180_driver);
+}
+subsys_initcall(video_cc_sc7180_init);
+
+static void __exit video_cc_sc7180_exit(void)
+{
+	platform_driver_unregister(&video_cc_sc7180_driver);
+}
+module_exit(video_cc_sc7180_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI VIDEOCC SC7180 Driver");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 4cd846b..250d816 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -20,8 +20,8 @@
 	select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
 	select CLK_R8A7792 if ARCH_R8A7792
 	select CLK_R8A7794 if ARCH_R8A7794
-	select CLK_R8A7795 if ARCH_R8A7795
-	select CLK_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+	select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951 || ARCH_R8A7795
+	select CLK_R8A77960 if ARCH_R8A77960
 	select CLK_R8A77961 if ARCH_R8A77961
 	select CLK_R8A77965 if ARCH_R8A77965
 	select CLK_R8A77970 if ARCH_R8A77970
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index cf65d4e..443bff0 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -93,6 +93,7 @@ static const struct mssr_mod_clk r7s9210_mod_clks[] __initconst = {
 	DEF_MOD_STB("ether1",	 64,	R7S9210_CLK_B),
 	DEF_MOD_STB("ether0",	 65,	R7S9210_CLK_B),
 
+	DEF_MOD_STB("spibsc",	 83,	R7S9210_CLK_P1),
 	DEF_MOD_STB("i2c3",	 84,	R7S9210_CLK_P1),
 	DEF_MOD_STB("i2c2",	 85,	R7S9210_CLK_P1),
 	DEF_MOD_STB("i2c1",	 86,	R7S9210_CLK_P1),
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index db2f57e..bdcd4a3 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -24,10 +24,10 @@ enum rcar_gen2_clk_types {
 };
 
 struct rcar_gen2_cpg_pll_config {
-	unsigned int extal_div;
-	unsigned int pll1_mult;
-	unsigned int pll3_mult;
-	unsigned int pll0_mult;		/* leave as zero if PLL0CR exists */
+	u8 extal_div;
+	u8 pll1_mult;
+	u8 pll3_mult;
+	u8 pll0_mult;		/* leave as zero if PLL0CR exists */
 };
 
 struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index c97b647..488f8b3 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -470,7 +470,8 @@ static struct clk * __init cpg_rpc_clk_register(const char *name,
 
 	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
 				     &rpc->div.hw,  &clk_divider_ops,
-				     &rpc->gate.hw, &clk_gate_ops, 0);
+				     &rpc->gate.hw, &clk_gate_ops,
+				     CLK_SET_RATE_PARENT);
 	if (IS_ERR(clk)) {
 		kfree(rpc);
 		return clk;
@@ -506,7 +507,8 @@ static struct clk * __init cpg_rpcd2_clk_register(const char *name,
 
 	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
 				     &rpcd2->fixed.hw, &clk_fixed_factor_ops,
-				     &rpcd2->gate.hw, &clk_gate_ops, 0);
+				     &rpcd2->gate.hw, &clk_gate_ops,
+				     CLK_SET_RATE_PARENT);
 	if (IS_ERR(clk))
 		kfree(rpcd2);
 
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 198417d..10560d9 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -282,7 +282,7 @@ static int rockchip_rk3036_pll_is_enabled(struct clk_hw *hw)
 	return !(pllcon & RK3036_PLLCON1_PWRDOWN);
 }
 
-static void rockchip_rk3036_pll_init(struct clk_hw *hw)
+static int rockchip_rk3036_pll_init(struct clk_hw *hw)
 {
 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
 	const struct rockchip_pll_rate_table *rate;
@@ -290,14 +290,14 @@ static void rockchip_rk3036_pll_init(struct clk_hw *hw)
 	unsigned long drate;
 
 	if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
-		return;
+		return 0;
 
 	drate = clk_hw_get_rate(hw);
 	rate = rockchip_get_pll_settings(pll, drate);
 
 	/* when no rate setting for the current rate, rely on clk_set_rate */
 	if (!rate)
-		return;
+		return 0;
 
 	rockchip_rk3036_pll_get_params(pll, &cur);
 
@@ -319,13 +319,15 @@ static void rockchip_rk3036_pll_init(struct clk_hw *hw)
 		if (!parent) {
 			pr_warn("%s: parent of %s not available\n",
 				__func__, __clk_get_name(hw->clk));
-			return;
+			return 0;
 		}
 
 		pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
 			 __func__, __clk_get_name(hw->clk));
 		rockchip_rk3036_pll_set_params(pll, rate);
 	}
+
+	return 0;
 }
 
 static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = {
@@ -515,7 +517,7 @@ static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
 	return !(pllcon & RK3066_PLLCON3_PWRDOWN);
 }
 
-static void rockchip_rk3066_pll_init(struct clk_hw *hw)
+static int rockchip_rk3066_pll_init(struct clk_hw *hw)
 {
 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
 	const struct rockchip_pll_rate_table *rate;
@@ -523,14 +525,14 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
 	unsigned long drate;
 
 	if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
-		return;
+		return 0;
 
 	drate = clk_hw_get_rate(hw);
 	rate = rockchip_get_pll_settings(pll, drate);
 
 	/* when no rate setting for the current rate, rely on clk_set_rate */
 	if (!rate)
-		return;
+		return 0;
 
 	rockchip_rk3066_pll_get_params(pll, &cur);
 
@@ -543,6 +545,8 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
 			 __func__, clk_hw_get_name(hw));
 		rockchip_rk3066_pll_set_params(pll, rate);
 	}
+
+	return 0;
 }
 
 static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
@@ -761,7 +765,7 @@ static int rockchip_rk3399_pll_is_enabled(struct clk_hw *hw)
 	return !(pllcon & RK3399_PLLCON3_PWRDOWN);
 }
 
-static void rockchip_rk3399_pll_init(struct clk_hw *hw)
+static int rockchip_rk3399_pll_init(struct clk_hw *hw)
 {
 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
 	const struct rockchip_pll_rate_table *rate;
@@ -769,14 +773,14 @@ static void rockchip_rk3399_pll_init(struct clk_hw *hw)
 	unsigned long drate;
 
 	if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
-		return;
+		return 0;
 
 	drate = clk_hw_get_rate(hw);
 	rate = rockchip_get_pll_settings(pll, drate);
 
 	/* when no rate setting for the current rate, rely on clk_set_rate */
 	if (!rate)
-		return;
+		return 0;
 
 	rockchip_rk3399_pll_get_params(pll, &cur);
 
@@ -798,13 +802,15 @@ static void rockchip_rk3399_pll_init(struct clk_hw *hw)
 		if (!parent) {
 			pr_warn("%s: parent of %s not available\n",
 				__func__, __clk_get_name(hw->clk));
-			return;
+			return 0;
 		}
 
 		pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
 			 __func__, __clk_get_name(hw->clk));
 		rockchip_rk3399_pll_set_params(pll, rate);
 	}
+
+	return 0;
 }
 
 static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 49bd7a4..5f66bf8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -921,11 +921,26 @@ static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
 	.num_resets	= ARRAY_SIZE(sun50i_a64_ccu_resets),
 };
 
+static struct ccu_pll_nb sun50i_a64_pll_cpu_nb = {
+	.common	= &pll_cpux_clk.common,
+	/* copy from pll_cpux_clk */
+	.enable	= BIT(31),
+	.lock	= BIT(28),
+};
+
+static struct ccu_mux_nb sun50i_a64_cpu_nb = {
+	.common		= &cpux_clk.common,
+	.cm		= &cpux_clk.mux,
+	.delay_us	= 1, /* > 8 clock cycles at 24 MHz */
+	.bypass_index	= 1, /* index of 24 MHz oscillator */
+};
+
 static int sun50i_a64_ccu_probe(struct platform_device *pdev)
 {
 	struct resource *res;
 	void __iomem *reg;
 	u32 val;
+	int ret;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	reg = devm_ioremap_resource(&pdev->dev, res);
@@ -939,7 +954,18 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
 
 	writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
 
-	return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+	ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+	if (ret)
+		return ret;
+
+	/* Gate then ungate PLL CPU after any rate changes */
+	ccu_pll_notifier_register(&sun50i_a64_pll_cpu_nb);
+
+	/* Reparent CPU during PLL CPU rate changes */
+	ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+				  &sun50i_a64_cpu_nb);
+
+	return 0;
 }
 
 static const struct of_device_id sun50i_a64_ccu_ids[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 9799292..116e6f8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -36,7 +36,6 @@
 #define CLK_PLL_HSIC			18
 #define CLK_PLL_DE			19
 #define CLK_PLL_DDR1			20
-#define CLK_CPUX			21
 #define CLK_AXI				22
 #define CLK_APB				23
 #define CLK_AHB1			24
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
index a361388..3ed2a59 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
@@ -32,7 +32,9 @@
 /* The PLL_VIDEO1_2X clock is exported */
 
 #define CLK_PLL_GPU		14
-#define CLK_PLL_MIPI		15
+
+/* The PLL_VIDEO1_2X clock is exported */
+
 #define CLK_PLL9		16
 #define CLK_PLL10		17
 
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h b/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
index 72df692..5bf5c4d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
@@ -24,7 +24,9 @@
 #define CLK_PLL_PERIPH		10
 #define CLK_PLL_PERIPH_2X	11
 #define CLK_PLL_GPU		12
-#define CLK_PLL_MIPI		13
+
+/* The PLL MIPI clock is exported */
+
 #define CLK_PLL_HSIC		14
 #define CLK_PLL_DE		15
 #define CLK_PLL_DDR1		16
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
index a69637b..6f7071d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
@@ -55,10 +55,6 @@
 
 /* Some more module clocks are exported */
 
-#define CLK_MBUS		155
-
-/* Another bunch of module clocks are exported */
-
 #define CLK_NUMBER		(CLK_OUTB + 1)
 
 #endif /* _CCU_SUN8I_R40_H_ */
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index a165e71..4c75b07 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -37,7 +37,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct clk_onecell_data *clk_data;
-	const struct of_device_id *device;
 	const struct gates_data *data;
 	const char *clk_parent;
 	const char *clk_name;
@@ -50,10 +49,9 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
 	if (!np)
 		return -ENODEV;
 
-	device = of_match_device(sun6i_a31_apb0_gates_clk_dt_ids, &pdev->dev);
-	if (!device)
+	data = of_device_get_match_data(&pdev->dev);
+	if (!data)
 		return -ENODEV;
-	data = device->data;
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	reg = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index c051d92..cfbaa90 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1487,7 +1487,6 @@ static int dfll_init(struct tegra_dfll *td)
 	td->last_unrounded_rate = 0;
 
 	pm_runtime_enable(td->dev);
-	pm_runtime_irq_safe(td->dev);
 	pm_runtime_get_sync(td->dev);
 
 	dfll_set_mode(td, DFLL_DISABLED);
@@ -1516,7 +1515,7 @@ static int dfll_init(struct tegra_dfll *td)
 
 /**
  * tegra_dfll_suspend - check DFLL is disabled
- * @dev: DFLL device *
+ * @dev: DFLL instance
  *
  * DFLL clock should be disabled by the CPUFreq driver. So, make
  * sure it is disabled and disable all clocks needed by the DFLL.
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index ca0de5f..38daf48 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -40,8 +40,13 @@ static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
 	int div, mul;
 	u64 rate = parent_rate;
 
-	reg = readl_relaxed(divider->reg) >> divider->shift;
-	div = reg & div_mask(divider);
+	reg = readl_relaxed(divider->reg);
+
+	if ((divider->flags & TEGRA_DIVIDER_UART) &&
+	    !(reg & PERIPH_CLK_UART_DIV_ENB))
+		return rate;
+
+	div = (reg >> divider->shift) & div_mask(divider);
 
 	mul = get_mul(divider);
 	div += mul;
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 0d07c0b..2b2a3b8 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -777,7 +777,11 @@ static struct tegra_periph_init_data gate_clks[] = {
 	GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
 	GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
 	GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
-	GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
+	/*
+	 * Critical for RAM re-repair operation, which must occur on resume
+	 * from LP1 system suspend and as part of CCPLEX cluster switching.
+	 */
+	GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
 	GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
 	GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
 	GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 4d8222f..fff5cba 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -1046,11 +1046,9 @@ static struct tegra_clk_init_table init_table[] __initdata = {
 	{ TEGRA20_CLK_SBC3, TEGRA20_CLK_PLL_P, 100000000, 0 },
 	{ TEGRA20_CLK_SBC4, TEGRA20_CLK_PLL_P, 100000000, 0 },
 	{ TEGRA20_CLK_HOST1X, TEGRA20_CLK_PLL_C, 150000000, 0 },
-	{ TEGRA20_CLK_DISP1, TEGRA20_CLK_PLL_P, 600000000, 0 },
-	{ TEGRA20_CLK_DISP2, TEGRA20_CLK_PLL_P, 600000000, 0 },
 	{ TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0 },
 	{ TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0 },
-	{ TEGRA20_CLK_VDE, TEGRA20_CLK_CLK_MAX, 300000000, 0 },
+	{ TEGRA20_CLK_VDE, TEGRA20_CLK_PLL_C, 300000000, 0 },
 	/* must be the last entry */
 	{ TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
 };
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index c8bc18e..b208914 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1251,14 +1251,12 @@ static struct tegra_clk_init_table init_table[] __initdata = {
 	{ TEGRA30_CLK_SBC6, TEGRA30_CLK_PLL_P, 100000000, 0 },
 	{ TEGRA30_CLK_PLL_C, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
 	{ TEGRA30_CLK_HOST1X, TEGRA30_CLK_PLL_C, 150000000, 0 },
-	{ TEGRA30_CLK_DISP1, TEGRA30_CLK_PLL_P, 600000000, 0 },
-	{ TEGRA30_CLK_DISP2, TEGRA30_CLK_PLL_P, 600000000, 0 },
 	{ TEGRA30_CLK_TWD, TEGRA30_CLK_CLK_MAX, 0, 1 },
 	{ TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0 },
 	{ TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
 	{ TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
 	{ TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
-	{ TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
+	{ TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 600000000, 0 },
 	{ TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
 	{ TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
 	{ TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 2b4dab63..312a20f 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -604,6 +604,18 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons
 	{ 0 },
 };
 
+static const struct
+omap_clkctrl_reg_data omap4_l4_secure_clkctrl_regs[] __initconst = {
+	{ OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+	{ OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+	{ OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+	{ OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+	{ OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+	{ OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+	{ OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+	{ 0 },
+};
+
 static const struct omap_clkctrl_bit_data omap4_gpio1_bit_data[] __initconst = {
 	{ 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
 	{ 0 },
@@ -691,6 +703,7 @@ const struct omap_clkctrl_data omap4_clkctrl_data[] __initconst = {
 	{ 0x4a009220, omap4_l3_gfx_clkctrl_regs },
 	{ 0x4a009320, omap4_l3_init_clkctrl_regs },
 	{ 0x4a009420, omap4_l4_per_clkctrl_regs },
+	{ 0x4a0095a0, omap4_l4_secure_clkctrl_regs },
 	{ 0x4a307820, omap4_l4_wkup_clkctrl_regs },
 	{ 0x4a307a20, omap4_emu_sys_clkctrl_regs },
 	{ 0 },
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index c9608e5..92bf2dd 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -35,6 +35,20 @@ static const struct omap_clkctrl_reg_data omap5_dsp_clkctrl_regs[] __initconst =
 	{ 0 },
 };
 
+static const char * const omap5_aess_fclk_parents[] __initconst = {
+	"abe_clk",
+	NULL,
+};
+
+static const struct omap_clkctrl_div_data omap5_aess_fclk_data __initconst = {
+	.max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = {
+	{ 24, TI_CLK_DIVIDER, omap5_aess_fclk_parents, &omap5_aess_fclk_data },
+	{ 0 },
+};
+
 static const char * const omap5_dmic_gfclk_parents[] __initconst = {
 	"abe_cm:clk:0018:26",
 	"pad_clks_ck",
@@ -122,6 +136,7 @@ static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst =
 
 static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = {
 	{ OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" },
+	{ OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
 	{ OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
 	{ OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
 	{ OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
@@ -286,6 +301,18 @@ static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst
 	{ 0 },
 };
 
+static const struct
+omap_clkctrl_reg_data omap5_l4_secure_clkctrl_regs[] __initconst = {
+	{ OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+	{ OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+	{ OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+	{ OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+	{ OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+	{ OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+	{ OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+	{ 0 },
+};
+
 static const struct omap_clkctrl_reg_data omap5_iva_clkctrl_regs[] __initconst = {
 	{ OMAP5_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
 	{ OMAP5_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
@@ -508,6 +535,7 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
 	{ 0x4a008d20, omap5_l4cfg_clkctrl_regs },
 	{ 0x4a008e20, omap5_l3instr_clkctrl_regs },
 	{ 0x4a009020, omap5_l4per_clkctrl_regs },
+	{ 0x4a0091a0, omap5_l4_secure_clkctrl_regs },
 	{ 0x4a009220, omap5_iva_clkctrl_regs },
 	{ 0x4a009420, omap5_dss_clkctrl_regs },
 	{ 0x4a009520, omap5_gpu_clkctrl_regs },
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 5f46782..14b6450 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -146,6 +146,29 @@ static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst =
 	{ 0 },
 };
 
+static const char * const dra7_cam_gfclk_mux_parents[] __initconst = {
+	"l3_iclk_div",
+	"core_iss_main_clk",
+	NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_cam_bit_data[] __initconst = {
+	{ 24, TI_CLK_MUX, dra7_cam_gfclk_mux_parents, NULL },
+	{ 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_cam_clkctrl_regs[] __initconst = {
+	{ DRA7_CAM_VIP1_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+	{ DRA7_CAM_VIP2_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+	{ DRA7_CAM_VIP3_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+	{ 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_vpe_clkctrl_regs[] __initconst = {
+	{ DRA7_VPE_VPE_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h23x2_ck" },
+	{ 0 },
+};
+
 static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = {
 	{ DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
 	{ DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
@@ -275,6 +298,40 @@ static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst =
 	{ 0 },
 };
 
+static const char * const dra7_gpu_core_mux_parents[] __initconst = {
+	"dpll_core_h14x2_ck",
+	"dpll_per_h14x2_ck",
+	"dpll_gpu_m2_ck",
+	NULL,
+};
+
+static const char * const dra7_gpu_hyd_mux_parents[] __initconst = {
+	"dpll_core_h14x2_ck",
+	"dpll_per_h14x2_ck",
+	"dpll_gpu_m2_ck",
+	NULL,
+};
+
+static const char * const dra7_gpu_sys_clk_parents[] __initconst = {
+	"sys_clkin",
+	NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_gpu_sys_clk_data __initconst = {
+	.max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = {
+	{ 24, TI_CLK_MUX, dra7_gpu_core_mux_parents, NULL, },
+	{ 26, TI_CLK_MUX, dra7_gpu_hyd_mux_parents, NULL, },
+	{ 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_gpu_clkctrl_regs[] __initconst = {
+	{ DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24", },
+	{ 0 },
+};
+
 static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = {
 	"func_128m_clk",
 	"dpll_per_m2x2_ck",
@@ -405,7 +462,7 @@ static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
 };
 
 static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = {
-	{ DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck" },
+	{ DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "gmac_main_clk" },
 	{ 0 },
 };
 
@@ -769,6 +826,7 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
 	{ 0x4a005550, dra7_ipu_clkctrl_regs },
 	{ 0x4a005620, dra7_dsp2_clkctrl_regs },
 	{ 0x4a005720, dra7_rtc_clkctrl_regs },
+	{ 0x4a005760, dra7_vpe_clkctrl_regs },
 	{ 0x4a008620, dra7_coreaon_clkctrl_regs },
 	{ 0x4a008720, dra7_l3main1_clkctrl_regs },
 	{ 0x4a008920, dra7_ipu2_clkctrl_regs },
@@ -777,7 +835,9 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
 	{ 0x4a008c00, dra7_atl_clkctrl_regs },
 	{ 0x4a008d20, dra7_l4cfg_clkctrl_regs },
 	{ 0x4a008e20, dra7_l3instr_clkctrl_regs },
+	{ 0x4a009020, dra7_cam_clkctrl_regs },
 	{ 0x4a009120, dra7_dss_clkctrl_regs },
+	{ 0x4a009220, dra7_gpu_clkctrl_regs },
 	{ 0x4a009320, dra7_l3init_clkctrl_regs },
 	{ 0x4a0093b0, dra7_pcie_clkctrl_regs },
 	{ 0x4a0093d0, dra7_gmac_clkctrl_regs },
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index e0b8ed3..3da33c7 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -171,7 +171,9 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
 		node = of_find_node_by_name(NULL, buf);
 		if (num_args && compat_mode) {
 			parent = node;
-			node = of_get_child_by_name(parent, "clk");
+			node = of_get_child_by_name(parent, "clock");
+			if (!node)
+				node = of_get_child_by_name(parent, "clk");
 			of_node_put(parent);
 		}
 
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 17b9a76..0622660 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -440,6 +440,63 @@ static void __init _clkctrl_add_provider(void *data,
 	of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
 }
 
+/* Get clock name based on compatible string for clkctrl */
+static char * __init clkctrl_get_name(struct device_node *np)
+{
+	struct property *prop;
+	const int prefix_len = 11;
+	const char *compat;
+	char *name;
+
+	of_property_for_each_string(np, "compatible", prop, compat) {
+		if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
+			/* Two letter minimum name length for l3, l4 etc */
+			if (strnlen(compat + prefix_len, 16) < 2)
+				continue;
+			name = kasprintf(GFP_KERNEL, "%s", compat + prefix_len);
+			if (!name)
+				continue;
+			strreplace(name, '-', '_');
+
+			return name;
+		}
+	}
+	of_node_put(np);
+
+	return NULL;
+}
+
+/* Get clkctrl clock base name based on clkctrl_name or dts node */
+static const char * __init clkctrl_get_clock_name(struct device_node *np,
+						  const char *clkctrl_name,
+						  int offset, int index,
+						  bool legacy_naming)
+{
+	char *clock_name;
+
+	/* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
+	if (clkctrl_name && !legacy_naming) {
+		clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+				       clkctrl_name, offset, index);
+		strreplace(clock_name, '_', '-');
+
+		return clock_name;
+	}
+
+	/* l4per:1234:0 old style naming based on clkctrl_name */
+	if (clkctrl_name)
+		return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
+				 clkctrl_name, offset, index);
+
+	/* l4per_cm:1234:0 old style naming based on parent node name */
+	if (legacy_naming)
+		return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
+				 np->parent, offset, index);
+
+	/* l4per-clkctrl:1234:0 style naming based on node name */
+	return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
+}
+
 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 {
 	struct omap_clkctrl_provider *provider;
@@ -448,8 +505,10 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 	struct clk_init_data init = { NULL };
 	struct clk_hw_omap *hw;
 	struct clk *clk;
-	struct omap_clkctrl_clk *clkctrl_clk;
+	struct omap_clkctrl_clk *clkctrl_clk = NULL;
 	const __be32 *addrp;
+	bool legacy_naming;
+	char *clkctrl_name;
 	u32 addr;
 	int ret;
 	char *c;
@@ -537,7 +596,19 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 
 	provider->base = of_iomap(node, 0);
 
-	if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) {
+	legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
+	clkctrl_name = clkctrl_get_name(node);
+	if (clkctrl_name) {
+		provider->clkdm_name = kasprintf(GFP_KERNEL,
+						 "%s_clkdm", clkctrl_name);
+		goto clkdm_found;
+	}
+
+	/*
+	 * The code below can be removed when all clkctrl nodes use domain
+	 * specific compatible proprerty and standard clock node naming
+	 */
+	if (legacy_naming) {
 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
 		if (!provider->clkdm_name) {
 			kfree(provider);
@@ -573,7 +644,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 			*c = '_';
 		c++;
 	}
-
+clkdm_found:
 	INIT_LIST_HEAD(&provider->clocks);
 
 	/* Generate clocks */
@@ -612,15 +683,15 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 		init.flags = 0;
 		if (reg_data->flags & CLKF_SET_RATE_PARENT)
 			init.flags |= CLK_SET_RATE_PARENT;
-		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-			init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
-					      node->parent, node,
-					      reg_data->offset, 0);
-		else
-			init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d",
-					      node, reg_data->offset, 0);
+
+		init.name = clkctrl_get_clock_name(node, clkctrl_name,
+						   reg_data->offset, 0,
+						   legacy_naming);
+		if (!init.name)
+			goto cleanup;
+
 		clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
-		if (!init.name || !clkctrl_clk)
+		if (!clkctrl_clk)
 			goto cleanup;
 
 		init.ops = &omap4_clkctrl_clk_ops;
@@ -642,11 +713,14 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 	if (ret == -EPROBE_DEFER)
 		ti_clk_retry_init(node, provider, _clkctrl_add_provider);
 
+	kfree(clkctrl_name);
+
 	return;
 
 cleanup:
 	kfree(hw);
 	kfree(init.name);
+	kfree(clkctrl_name);
 	kfree(clkctrl_clk);
 }
 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index e6995c0..f1dd62d 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -253,7 +253,7 @@ extern const struct clk_ops omap_gate_clk_ops;
 
 extern struct ti_clk_features ti_clk_features;
 
-void omap2_init_clk_clkdm(struct clk_hw *hw);
+int omap2_init_clk_clkdm(struct clk_hw *hw);
 int omap2_clkops_enable_clkdm(struct clk_hw *hw);
 void omap2_clkops_disable_clkdm(struct clk_hw *hw);
 
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 423a99b..ee56306 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -101,16 +101,16 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw)
  *
  * Convert a clockdomain name stored in a struct clk 'clk' into a
  * clockdomain pointer, and save it into the struct clk.  Intended to be
- * called during clk_register().  No return value.
+ * called during clk_register(). Returns 0 on success, -EERROR otherwise.
  */
-void omap2_init_clk_clkdm(struct clk_hw *hw)
+int omap2_init_clk_clkdm(struct clk_hw *hw)
 {
 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
 	struct clockdomain *clkdm;
 	const char *clk_name;
 
 	if (!clk->clkdm_name)
-		return;
+		return 0;
 
 	clk_name = __clk_get_name(hw->clk);
 
@@ -123,6 +123,8 @@ void omap2_init_clk_clkdm(struct clk_hw *hw)
 		pr_debug("clock: could not associate clk %s to clkdm %s\n",
 			 clk_name, clk->clkdm_name);
 	}
+
+	return 0;
 }
 
 static void __init of_ti_clockdomain_setup(struct device_node *node)
diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c
index 9caa529..3e32db9 100644
--- a/drivers/clk/uniphier/clk-uniphier-peri.c
+++ b/drivers/clk/uniphier/clk-uniphier-peri.c
@@ -18,8 +18,8 @@
 #define UNIPHIER_PERI_CLK_FI2C(idx, ch)					\
 	UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
 
-#define UNIPHIER_PERI_CLK_SCSSI(idx)					\
-	UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17)
+#define UNIPHIER_PERI_CLK_SCSSI(idx, ch)				\
+	UNIPHIER_CLK_GATE("scssi" #ch, (idx), "spi", 0x20, 17 + (ch))
 
 #define UNIPHIER_PERI_CLK_MCSSI(idx)					\
 	UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
@@ -35,7 +35,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
 	UNIPHIER_PERI_CLK_I2C(6, 2),
 	UNIPHIER_PERI_CLK_I2C(7, 3),
 	UNIPHIER_PERI_CLK_I2C(8, 4),
-	UNIPHIER_PERI_CLK_SCSSI(11),
+	UNIPHIER_PERI_CLK_SCSSI(11, 0),
 	{ /* sentinel */ }
 };
 
@@ -51,7 +51,10 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
 	UNIPHIER_PERI_CLK_FI2C(8, 4),
 	UNIPHIER_PERI_CLK_FI2C(9, 5),
 	UNIPHIER_PERI_CLK_FI2C(10, 6),
-	UNIPHIER_PERI_CLK_SCSSI(11),
-	UNIPHIER_PERI_CLK_MCSSI(12),
+	UNIPHIER_PERI_CLK_SCSSI(11, 0),
+	UNIPHIER_PERI_CLK_SCSSI(12, 1),
+	UNIPHIER_PERI_CLK_SCSSI(13, 2),
+	UNIPHIER_PERI_CLK_SCSSI(14, 3),
+	UNIPHIER_PERI_CLK_MCSSI(15),
 	{ /* sentinel */ }
 };
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 72ed97c..0aedd42 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -99,8 +99,10 @@ static void u8500_clk_init(struct device_node *np)
 	if (fw_version != NULL) {
 		switch (fw_version->project) {
 		case PRCMU_FW_PROJECT_U8500_C2:
+		case PRCMU_FW_PROJECT_U8500_MBL:
 		case PRCMU_FW_PROJECT_U8520:
 		case PRCMU_FW_PROJECT_U8420:
+		case PRCMU_FW_PROJECT_U8420_SYSCLK:
 			sgaclk_parent = "soc0_pll";
 			break;
 		default:
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index ac76685..c2618f1 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -9,7 +9,7 @@
 		COMPILE_TEST
 	select REGMAP_MMIO
 	---help---
-          Supports clocking on ARM Reference designs:
+	  Supports clocking on ARM Reference designs:
 	  - Integrator/AP and Integrator/CP
 	  - RealView PB1176, EB, PB11MP and PBX
 	  - Versatile Express
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index a11f93e..10e89f2 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -2,7 +2,7 @@
 /*
  * Zynq UltraScale+ MPSoC clock controller
  *
- *  Copyright (C) 2016-2018 Xilinx
+ *  Copyright (C) 2016-2019 Xilinx
  *
  * Based on drivers/clk/zynq/clkc.c
  */
@@ -749,6 +749,7 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
 
 static const struct of_device_id zynqmp_clock_of_match[] = {
 	{.compatible = "xlnx,zynqmp-clk"},
+	{.compatible = "xlnx,versal-clk"},
 	{},
 };
 MODULE_DEVICE_TABLE(of, zynqmp_clock_of_match);
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index d8f5b70d..4be2cc7 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -2,7 +2,7 @@
 /*
  * Zynq UltraScale+ MPSoC Divider support
  *
- *  Copyright (C) 2016-2018 Xilinx
+ *  Copyright (C) 2016-2019 Xilinx
  *
  * Adjustable divider clock implementation
  */
@@ -41,12 +41,30 @@ struct zynqmp_clk_divider {
 	bool is_frac;
 	u32 clk_id;
 	u32 div_type;
+	u16 max_div;
 };
 
 static inline int zynqmp_divider_get_val(unsigned long parent_rate,
-					 unsigned long rate)
+					 unsigned long rate, u16 flags)
 {
-	return DIV_ROUND_CLOSEST(parent_rate, rate);
+	int up, down;
+	unsigned long up_rate, down_rate;
+
+	if (flags & CLK_DIVIDER_POWER_OF_TWO) {
+		up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+		down = DIV_ROUND_DOWN_ULL((u64)parent_rate, rate);
+
+		up = __roundup_pow_of_two(up);
+		down = __rounddown_pow_of_two(down);
+
+		up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
+		down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
+
+		return (rate - up_rate) <= (down_rate - rate) ? up : down;
+
+	} else {
+		return DIV_ROUND_CLOSEST(parent_rate, rate);
+	}
 }
 
 /**
@@ -78,6 +96,9 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
 	else
 		value = div >> 16;
 
+	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+		value = 1 << value;
+
 	if (!value) {
 		WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
 		     "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
@@ -88,6 +109,42 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
 	return DIV_ROUND_UP_ULL(parent_rate, value);
 }
 
+static void zynqmp_get_divider2_val(struct clk_hw *hw,
+				    unsigned long rate,
+				    unsigned long parent_rate,
+				    struct zynqmp_clk_divider *divider,
+				    int *bestdiv)
+{
+	int div1;
+	int div2;
+	long error = LONG_MAX;
+	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+	struct zynqmp_clk_divider *pdivider = to_zynqmp_clk_divider(parent_hw);
+
+	if (!pdivider)
+		return;
+
+	*bestdiv = 1;
+	for (div1 = 1; div1 <= pdivider->max_div;) {
+		for (div2 = 1; div2 <= divider->max_div;) {
+			long new_error = ((parent_rate / div1) / div2) - rate;
+
+			if (abs(new_error) < abs(error)) {
+				*bestdiv = div2;
+				error = new_error;
+			}
+			if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+				div2 = div2 << 1;
+			else
+				div2++;
+		}
+		if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
+			div1 = div1 << 1;
+		else
+			div1++;
+	}
+}
+
 /**
  * zynqmp_clk_divider_round_rate() - Round rate of divider clock
  * @hw:			handle between common and hardware-specific interfaces
@@ -120,10 +177,23 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
 		else
 			bestdiv  = bestdiv >> 16;
 
+		if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+			bestdiv = 1 << bestdiv;
+
 		return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
 	}
 
-	bestdiv = zynqmp_divider_get_val(*prate, rate);
+	bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
+
+	/*
+	 * In case of two divisors, compute best divider values and return
+	 * divider2 value based on compute value. div1 will  be automatically
+	 * set to optimum based on required total divider value.
+	 */
+	if (div_type == TYPE_DIV2 &&
+	    (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+		zynqmp_get_divider2_val(hw, rate, *prate, divider, &bestdiv);
+	}
 
 	if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
 		bestdiv = rate % *prate ? 1 : bestdiv;
@@ -151,7 +221,7 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
 	int ret;
 	const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
 
-	value = zynqmp_divider_get_val(parent_rate, rate);
+	value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
 	if (div_type == TYPE_DIV1) {
 		div = value & 0xFFFF;
 		div |= 0xffff << 16;
@@ -160,6 +230,9 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
 		div |= value << 16;
 	}
 
+	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+		div = __ffs(div);
+
 	ret = eemi_ops->clock_setdivider(clk_id, div);
 
 	if (ret)
@@ -176,6 +249,35 @@ static const struct clk_ops zynqmp_clk_divider_ops = {
 };
 
 /**
+ * zynqmp_clk_get_max_divisor() - Get maximum supported divisor from firmware.
+ * @clk_id:		Id of clock
+ * @type:		Divider type
+ *
+ * Return: Maximum divisor of a clock if query data is successful
+ *	   U16_MAX in case of query data is not success
+ */
+u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
+{
+	const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+	struct zynqmp_pm_query_data qdata = {0};
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
+	qdata.arg1 = clk_id;
+	qdata.arg2 = type;
+	ret = eemi_ops->query_data(qdata, ret_payload);
+	/*
+	 * To maintain backward compatibility return maximum possible value
+	 * (0xFFFF) if query for max divisor is not successful.
+	 */
+	if (ret)
+		return U16_MAX;
+
+	return ret_payload[1];
+}
+
+/**
  * zynqmp_clk_register_divider() - Register a divider clock
  * @name:		Name of this clock
  * @clk_id:		Id of clock
@@ -215,6 +317,12 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
 	div->clk_id = clk_id;
 	div->div_type = nodes->type;
 
+	/*
+	 * To achieve best possible rate, maximum limit of divider is required
+	 * while computation.
+	 */
+	div->max_div = zynqmp_clk_get_max_divisor(clk_id, nodes->type);
+
 	hw = &div->hw;
 	ret = clk_hw_register(NULL, hw);
 	if (ret) {
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index a541397..89b5995 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -188,10 +188,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 		frac = (parent_rate * f) / FRAC_DIV;
 
 		ret = eemi_ops->clock_setdivider(clk_id, m);
-		if (ret)
+		if (ret == -EUSERS)
+			WARN(1, "More than allowed devices are using the %s, which is forbidden\n",
+			     clk_name);
+		else if (ret)
 			pr_warn_once("%s() set divider failed for %s, ret = %d\n",
 				     __func__, clk_name, ret);
-
 		eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
 
 		return rate + frac;
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index 62745c9..e421946 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -302,10 +302,6 @@ int __init davinci_timer_register(struct clk *clk,
 		return rv;
 	}
 
-	clockevents_config_and_register(&clockevent->dev, tick_rate,
-					DAVINCI_TIMER_MIN_DELTA,
-					DAVINCI_TIMER_MAX_DELTA);
-
 	davinci_clocksource.dev.rating = 300;
 	davinci_clocksource.dev.read = davinci_clocksource_read;
 	davinci_clocksource.dev.mask =
@@ -323,6 +319,10 @@ int __init davinci_timer_register(struct clk *clk,
 		davinci_clocksource_init_tim34(base);
 	}
 
+	clockevents_config_and_register(&clockevent->dev, tick_rate,
+					DAVINCI_TIMER_MIN_DELTA,
+					DAVINCI_TIMER_MAX_DELTA);
+
 	rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
 	if (rv) {
 		pr_err("Unable to register clocksource");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index a06777c..bda0b24 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -221,7 +221,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
 	return ret;
 }
 
-static int cppc_verify_policy(struct cpufreq_policy *policy)
+static int cppc_verify_policy(struct cpufreq_policy_data *policy)
 {
 	cpufreq_verify_within_cpu_limits(policy);
 	return 0;
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index cd53272..f7a7bcf 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -291,7 +291,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
  * nforce2_verify - verifies a new CPUFreq policy
  * @policy: new policy
  */
-static int nforce2_verify(struct cpufreq_policy *policy)
+static int nforce2_verify(struct cpufreq_policy_data *policy)
 {
 	unsigned int fsb_pol_max;
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 77114a3..4adac3a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -74,6 +74,9 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy);
 static int cpufreq_start_governor(struct cpufreq_policy *policy);
 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+			      struct cpufreq_governor *new_gov,
+			      unsigned int new_pol);
 
 /**
  * Two notifier lists: the "policy" list is involved in the
@@ -616,25 +619,22 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
 	return NULL;
 }
 
-static int cpufreq_parse_policy(char *str_governor,
-				struct cpufreq_policy *policy)
+static unsigned int cpufreq_parse_policy(char *str_governor)
 {
-	if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
-		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
-		return 0;
-	}
-	if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
-		policy->policy = CPUFREQ_POLICY_POWERSAVE;
-		return 0;
-	}
-	return -EINVAL;
+	if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+		return CPUFREQ_POLICY_PERFORMANCE;
+
+	if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+		return CPUFREQ_POLICY_POWERSAVE;
+
+	return CPUFREQ_POLICY_UNKNOWN;
 }
 
 /**
  * cpufreq_parse_governor - parse a governor string only for has_target()
+ * @str_governor: Governor name.
  */
-static int cpufreq_parse_governor(char *str_governor,
-				  struct cpufreq_policy *policy)
+static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
 {
 	struct cpufreq_governor *t;
 
@@ -648,7 +648,7 @@ static int cpufreq_parse_governor(char *str_governor,
 
 		ret = request_module("cpufreq_%s", str_governor);
 		if (ret)
-			return -EINVAL;
+			return NULL;
 
 		mutex_lock(&cpufreq_governor_mutex);
 
@@ -659,12 +659,7 @@ static int cpufreq_parse_governor(char *str_governor,
 
 	mutex_unlock(&cpufreq_governor_mutex);
 
-	if (t) {
-		policy->governor = t;
-		return 0;
-	}
-
-	return -EINVAL;
+	return t;
 }
 
 /**
@@ -765,29 +760,34 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
 					const char *buf, size_t count)
 {
+	char str_governor[16];
 	int ret;
-	char	str_governor[16];
-	struct cpufreq_policy new_policy;
-
-	memcpy(&new_policy, policy, sizeof(*policy));
 
 	ret = sscanf(buf, "%15s", str_governor);
 	if (ret != 1)
 		return -EINVAL;
 
 	if (cpufreq_driver->setpolicy) {
-		if (cpufreq_parse_policy(str_governor, &new_policy))
+		unsigned int new_pol;
+
+		new_pol = cpufreq_parse_policy(str_governor);
+		if (!new_pol)
 			return -EINVAL;
+
+		ret = cpufreq_set_policy(policy, NULL, new_pol);
 	} else {
-		if (cpufreq_parse_governor(str_governor, &new_policy))
+		struct cpufreq_governor *new_gov;
+
+		new_gov = cpufreq_parse_governor(str_governor);
+		if (!new_gov)
 			return -EINVAL;
+
+		ret = cpufreq_set_policy(policy, new_gov,
+					 CPUFREQ_POLICY_UNKNOWN);
+
+		module_put(new_gov->owner);
 	}
 
-	ret = cpufreq_set_policy(policy, &new_policy);
-
-	if (new_policy.governor)
-		module_put(new_policy.governor->owner);
-
 	return ret ? ret : count;
 }
 
@@ -1053,40 +1053,33 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void)
 
 static int cpufreq_init_policy(struct cpufreq_policy *policy)
 {
-	struct cpufreq_governor *gov = NULL, *def_gov = NULL;
-	struct cpufreq_policy new_policy;
-
-	memcpy(&new_policy, policy, sizeof(*policy));
-
-	def_gov = cpufreq_default_governor();
+	struct cpufreq_governor *def_gov = cpufreq_default_governor();
+	struct cpufreq_governor *gov = NULL;
+	unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
 
 	if (has_target()) {
-		/*
-		 * Update governor of new_policy to the governor used before
-		 * hotplug
-		 */
+		/* Update policy governor to the one used before hotplug. */
 		gov = find_governor(policy->last_governor);
 		if (gov) {
 			pr_debug("Restoring governor %s for cpu %d\n",
-				policy->governor->name, policy->cpu);
-		} else {
-			if (!def_gov)
-				return -ENODATA;
+				 policy->governor->name, policy->cpu);
+		} else if (def_gov) {
 			gov = def_gov;
+		} else {
+			return -ENODATA;
 		}
-		new_policy.governor = gov;
 	} else {
 		/* Use the default policy if there is no last_policy. */
 		if (policy->last_policy) {
-			new_policy.policy = policy->last_policy;
+			pol = policy->last_policy;
+		} else if (def_gov) {
+			pol = cpufreq_parse_policy(def_gov->name);
 		} else {
-			if (!def_gov)
-				return -ENODATA;
-			cpufreq_parse_policy(def_gov->name, &new_policy);
+			return -ENODATA;
 		}
 	}
 
-	return cpufreq_set_policy(policy, &new_policy);
+	return cpufreq_set_policy(policy, gov, pol);
 }
 
 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
@@ -1114,13 +1107,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
 
 void refresh_frequency_limits(struct cpufreq_policy *policy)
 {
-	struct cpufreq_policy new_policy;
-
 	if (!policy_is_inactive(policy)) {
-		new_policy = *policy;
 		pr_debug("updating policy for CPU %u\n", policy->cpu);
 
-		cpufreq_set_policy(policy, &new_policy);
+		cpufreq_set_policy(policy, policy->governor, policy->policy);
 	}
 }
 EXPORT_SYMBOL(refresh_frequency_limits);
@@ -2364,46 +2354,49 @@ EXPORT_SYMBOL(cpufreq_get_policy);
 /**
  * cpufreq_set_policy - Modify cpufreq policy parameters.
  * @policy: Policy object to modify.
- * @new_policy: New policy data.
+ * @new_gov: Policy governor pointer.
+ * @new_pol: Policy value (for drivers with built-in governors).
  *
- * Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the
- * min and max parameters of @new_policy to @policy and either invoke the
- * driver's ->setpolicy() callback (if present) or carry out a governor update
- * for @policy.  That is, run the current governor's ->limits() callback (if the
- * governor field in @new_policy points to the same object as the one in
- * @policy) or replace the governor for @policy with the new one stored in
- * @new_policy.
+ * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
+ * limits to be set for the policy, update @policy with the verified limits
+ * values and either invoke the driver's ->setpolicy() callback (if present) or
+ * carry out a governor update for @policy.  That is, run the current governor's
+ * ->limits() callback (if @new_gov points to the same object as the one in
+ * @policy) or replace the governor for @policy with @new_gov.
  *
  * The cpuinfo part of @policy is not updated by this function.
  */
-int cpufreq_set_policy(struct cpufreq_policy *policy,
-		       struct cpufreq_policy *new_policy)
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+			      struct cpufreq_governor *new_gov,
+			      unsigned int new_pol)
 {
+	struct cpufreq_policy_data new_data;
 	struct cpufreq_governor *old_gov;
 	int ret;
 
-	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
-		 new_policy->cpu, new_policy->min, new_policy->max);
-
-	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
-
+	memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
+	new_data.freq_table = policy->freq_table;
+	new_data.cpu = policy->cpu;
 	/*
 	 * PM QoS framework collects all the requests from users and provide us
 	 * the final aggregated value here.
 	 */
-	new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
-	new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+	new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+	new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+
+	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+		 new_data.cpu, new_data.min, new_data.max);
 
 	/*
 	 * Verify that the CPU speed can be set within these limits and make sure
 	 * that min <= max.
 	 */
-	ret = cpufreq_driver->verify(new_policy);
+	ret = cpufreq_driver->verify(&new_data);
 	if (ret)
 		return ret;
 
-	policy->min = new_policy->min;
-	policy->max = new_policy->max;
+	policy->min = new_data.min;
+	policy->max = new_data.max;
 	trace_cpu_frequency_limits(policy);
 
 	policy->cached_target_freq = UINT_MAX;
@@ -2412,12 +2405,12 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
 		 policy->min, policy->max);
 
 	if (cpufreq_driver->setpolicy) {
-		policy->policy = new_policy->policy;
+		policy->policy = new_pol;
 		pr_debug("setting range\n");
 		return cpufreq_driver->setpolicy(policy);
 	}
 
-	if (new_policy->governor == policy->governor) {
+	if (new_gov == policy->governor) {
 		pr_debug("governor limits update\n");
 		cpufreq_governor_limits(policy);
 		return 0;
@@ -2434,7 +2427,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
 	}
 
 	/* start new governor */
-	policy->governor = new_policy->governor;
+	policy->governor = new_gov;
 	ret = cpufreq_init_governor(policy);
 	if (!ret) {
 		ret = cpufreq_start_governor(policy);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index ded427e..e117b00 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -60,7 +60,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
 		return 0;
 }
 
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
 				   struct cpufreq_frequency_table *table)
 {
 	struct cpufreq_frequency_table *pos;
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
  * Generic routine to verify policy & frequency table, requires driver to set
  * policy->freq_table prior to it.
  */
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
 {
 	if (!policy->freq_table)
 		return -ENODEV;
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index e97b573..75b3ef7 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -328,7 +328,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
  *      for the hardware supported by the driver.
  */
 
-static int cpufreq_gx_verify(struct cpufreq_policy *policy)
+static int cpufreq_gx_verify(struct cpufreq_policy_data *policy)
 {
 	unsigned int tmp_freq = 0;
 	u8 tmp1, tmp2;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ad6a17c..c81e1ff 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2036,8 +2036,9 @@ static int intel_pstate_get_max_freq(struct cpudata *cpu)
 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
 }
 
-static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
-					    struct cpudata *cpu)
+static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+					    unsigned int policy_min,
+					    unsigned int policy_max)
 {
 	int max_freq = intel_pstate_get_max_freq(cpu);
 	int32_t max_policy_perf, min_policy_perf;
@@ -2056,18 +2057,17 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 		turbo_max = cpu->pstate.turbo_pstate;
 	}
 
-	max_policy_perf = max_state * policy->max / max_freq;
-	if (policy->max == policy->min) {
+	max_policy_perf = max_state * policy_max / max_freq;
+	if (policy_max == policy_min) {
 		min_policy_perf = max_policy_perf;
 	} else {
-		min_policy_perf = max_state * policy->min / max_freq;
+		min_policy_perf = max_state * policy_min / max_freq;
 		min_policy_perf = clamp_t(int32_t, min_policy_perf,
 					  0, max_policy_perf);
 	}
 
 	pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
-		 policy->cpu, max_state,
-		 min_policy_perf, max_policy_perf);
+		 cpu->cpu, max_state, min_policy_perf, max_policy_perf);
 
 	/* Normalize user input to [min_perf, max_perf] */
 	if (per_cpu_limits) {
@@ -2081,7 +2081,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 		global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
 		global_min = clamp_t(int32_t, global_min, 0, global_max);
 
-		pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
+		pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
 			 global_min, global_max);
 
 		cpu->min_perf_ratio = max(min_policy_perf, global_min);
@@ -2094,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 					  cpu->max_perf_ratio);
 
 	}
-	pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
+	pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
 		 cpu->max_perf_ratio,
 		 cpu->min_perf_ratio);
 }
@@ -2114,7 +2114,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
 	mutex_lock(&intel_pstate_limits_lock);
 
-	intel_pstate_update_perf_limits(policy, cpu);
+	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
 
 	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
 		/*
@@ -2143,8 +2143,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 	return 0;
 }
 
-static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
-					 struct cpudata *cpu)
+static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
+					   struct cpufreq_policy_data *policy)
 {
 	if (!hwp_active &&
 	    cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
@@ -2155,7 +2155,7 @@ static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
 	}
 }
 
-static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
+static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
 {
 	struct cpudata *cpu = all_cpu_data[policy->cpu];
 
@@ -2163,11 +2163,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
 				     intel_pstate_get_max_freq(cpu));
 
-	if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
-	    policy->policy != CPUFREQ_POLICY_PERFORMANCE)
-		return -EINVAL;
-
-	intel_pstate_adjust_policy_max(policy, cpu);
+	intel_pstate_adjust_policy_max(cpu, policy);
 
 	return 0;
 }
@@ -2268,7 +2264,7 @@ static struct cpufreq_driver intel_pstate = {
 	.name		= "intel_pstate",
 };
 
-static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
+static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
 {
 	struct cpudata *cpu = all_cpu_data[policy->cpu];
 
@@ -2276,9 +2272,9 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
 				     intel_pstate_get_max_freq(cpu));
 
-	intel_pstate_adjust_policy_max(policy, cpu);
+	intel_pstate_adjust_policy_max(cpu, policy);
 
-	intel_pstate_update_perf_limits(policy, cpu);
+	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 64b8689f..0b08be8 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -122,7 +122,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
  * Validates a new CPUFreq policy. This function has to be called with
  * cpufreq_driver locked.
  */
-static int longrun_verify_policy(struct cpufreq_policy *policy)
+static int longrun_verify_policy(struct cpufreq_policy_data *policy)
 {
 	if (!policy)
 		return -EINVAL;
@@ -130,10 +130,6 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
 	policy->cpu = 0;
 	cpufreq_verify_within_cpu_limits(policy);
 
-	if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
-	    (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
-		return -EINVAL;
-
 	return 0;
 }
 
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 89d4fa8..5789fe7 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -109,7 +109,7 @@ struct pcc_cpu {
 
 static struct pcc_cpu __percpu *pcc_cpu_info;
 
-static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
+static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy)
 {
 	cpufreq_verify_within_cpu_limits(policy);
 	return 0;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index e6182c8..61623e2 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -261,7 +261,7 @@ static void scmi_cpufreq_remove(struct scmi_device *sdev)
 }
 
 static const struct scmi_device_id scmi_id_table[] = {
-	{ SCMI_PROTOCOL_PERF },
+	{ SCMI_PROTOCOL_PERF, "cpufreq" },
 	{ },
 };
 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 5096c0a..0ac265d 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -87,7 +87,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
 	return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
 }
 
-static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
 {
 	struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
 	struct cpufreq_frequency_table *freq_table;
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 707dbc1..98d3921 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -22,7 +22,7 @@ static struct cpufreq_driver ucv2_driver;
 /* make sure that only the "userspace" governor is run
  * -- anything else wouldn't make sense on this platform, anyway.
  */
-static int ucv2_verify_speed(struct cpufreq_policy *policy)
+static int ucv2_verify_speed(struct cpufreq_policy_data *policy)
 {
 	if (policy->cpu)
 		return -EINVAL;
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index ee70d5c..cc8c769 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -21,7 +21,9 @@
 obj-$(CONFIG_ARM_AT91_CPUIDLE)          += cpuidle-at91.o
 obj-$(CONFIG_ARM_EXYNOS_CPUIDLE)        += cpuidle-exynos.o
 obj-$(CONFIG_ARM_CPUIDLE)		+= cpuidle-arm.o
-obj-$(CONFIG_ARM_PSCI_CPUIDLE)		+= cpuidle-psci.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE)		+= cpuidle_psci.o
+cpuidle_psci-y				:= cpuidle-psci.o
+cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
 
 ###############################################################################
 # MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
new file mode 100644
index 0000000..423f03b
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PM domains for CPUs via genpd - managed by cpuidle-psci.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ */
+
+#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "cpuidle-psci.h"
+
+struct psci_pd_provider {
+	struct list_head link;
+	struct device_node *node;
+};
+
+static LIST_HEAD(psci_pd_providers);
+static bool osi_mode_enabled __initdata;
+
+static int psci_pd_power_off(struct generic_pm_domain *pd)
+{
+	struct genpd_power_state *state = &pd->states[pd->state_idx];
+	u32 *pd_state;
+
+	if (!state->data)
+		return 0;
+
+	/* OSI mode is enabled, set the corresponding domain state. */
+	pd_state = state->data;
+	psci_set_domain_state(*pd_state);
+
+	return 0;
+}
+
+static int __init psci_pd_parse_state_nodes(struct genpd_power_state *states,
+					int state_count)
+{
+	int i, ret;
+	u32 psci_state, *psci_state_buf;
+
+	for (i = 0; i < state_count; i++) {
+		ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
+					&psci_state);
+		if (ret)
+			goto free_state;
+
+		psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+		if (!psci_state_buf) {
+			ret = -ENOMEM;
+			goto free_state;
+		}
+		*psci_state_buf = psci_state;
+		states[i].data = psci_state_buf;
+	}
+
+	return 0;
+
+free_state:
+	i--;
+	for (; i >= 0; i--)
+		kfree(states[i].data);
+	return ret;
+}
+
+static int __init psci_pd_parse_states(struct device_node *np,
+			struct genpd_power_state **states, int *state_count)
+{
+	int ret;
+
+	/* Parse the domain idle states. */
+	ret = of_genpd_parse_idle_states(np, states, state_count);
+	if (ret)
+		return ret;
+
+	/* Fill out the PSCI specifics for each found state. */
+	ret = psci_pd_parse_state_nodes(*states, *state_count);
+	if (ret)
+		kfree(*states);
+
+	return ret;
+}
+
+static void psci_pd_free_states(struct genpd_power_state *states,
+				unsigned int state_count)
+{
+	int i;
+
+	for (i = 0; i < state_count; i++)
+		kfree(states[i].data);
+	kfree(states);
+}
+
+static int __init psci_pd_init(struct device_node *np)
+{
+	struct generic_pm_domain *pd;
+	struct psci_pd_provider *pd_provider;
+	struct dev_power_governor *pd_gov;
+	struct genpd_power_state *states = NULL;
+	int ret = -ENOMEM, state_count = 0;
+
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		goto out;
+
+	pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
+	if (!pd_provider)
+		goto free_pd;
+
+	pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
+	if (!pd->name)
+		goto free_pd_prov;
+
+	/*
+	 * Parse the domain idle states and let genpd manage the state selection
+	 * for those being compatible with "domain-idle-state".
+	 */
+	ret = psci_pd_parse_states(np, &states, &state_count);
+	if (ret)
+		goto free_name;
+
+	pd->free_states = psci_pd_free_states;
+	pd->name = kbasename(pd->name);
+	pd->power_off = psci_pd_power_off;
+	pd->states = states;
+	pd->state_count = state_count;
+	pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+
+	/* Use governor for CPU PM domains if it has some states to manage. */
+	pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
+
+	ret = pm_genpd_init(pd, pd_gov, false);
+	if (ret) {
+		psci_pd_free_states(states, state_count);
+		goto free_name;
+	}
+
+	ret = of_genpd_add_provider_simple(np, pd);
+	if (ret)
+		goto remove_pd;
+
+	pd_provider->node = of_node_get(np);
+	list_add(&pd_provider->link, &psci_pd_providers);
+
+	pr_debug("init PM domain %s\n", pd->name);
+	return 0;
+
+remove_pd:
+	pm_genpd_remove(pd);
+free_name:
+	kfree(pd->name);
+free_pd_prov:
+	kfree(pd_provider);
+free_pd:
+	kfree(pd);
+out:
+	pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
+	return ret;
+}
+
+static void __init psci_pd_remove(void)
+{
+	struct psci_pd_provider *pd_provider, *it;
+	struct generic_pm_domain *genpd;
+
+	list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
+		of_genpd_del_provider(pd_provider->node);
+
+		genpd = of_genpd_remove_last(pd_provider->node);
+		if (!IS_ERR(genpd))
+			kfree(genpd);
+
+		of_node_put(pd_provider->node);
+		list_del(&pd_provider->link);
+		kfree(pd_provider);
+	}
+}
+
+static int __init psci_pd_init_topology(struct device_node *np, bool add)
+{
+	struct device_node *node;
+	struct of_phandle_args child, parent;
+	int ret;
+
+	for_each_child_of_node(np, node) {
+		if (of_parse_phandle_with_args(node, "power-domains",
+					"#power-domain-cells", 0, &parent))
+			continue;
+
+		child.np = node;
+		child.args_count = 0;
+
+		ret = add ? of_genpd_add_subdomain(&parent, &child) :
+			of_genpd_remove_subdomain(&parent, &child);
+		of_node_put(parent.np);
+		if (ret) {
+			of_node_put(node);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int __init psci_pd_add_topology(struct device_node *np)
+{
+	return psci_pd_init_topology(np, true);
+}
+
+static void __init psci_pd_remove_topology(struct device_node *np)
+{
+	psci_pd_init_topology(np, false);
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+	{ .compatible = "arm,psci-1.0" },
+	{}
+};
+
+static int __init psci_idle_init_domains(void)
+{
+	struct device_node *np = of_find_matching_node(NULL, psci_of_match);
+	struct device_node *node;
+	int ret = 0, pd_count = 0;
+
+	if (!np)
+		return -ENODEV;
+
+	/* Currently limit the hierarchical topology to be used in OSI mode. */
+	if (!psci_has_osi_support())
+		goto out;
+
+	/*
+	 * Parse child nodes for the "#power-domain-cells" property and
+	 * initialize a genpd/genpd-of-provider pair when it's found.
+	 */
+	for_each_child_of_node(np, node) {
+		if (!of_find_property(node, "#power-domain-cells", NULL))
+			continue;
+
+		ret = psci_pd_init(node);
+		if (ret)
+			goto put_node;
+
+		pd_count++;
+	}
+
+	/* Bail out if not using the hierarchical CPU topology. */
+	if (!pd_count)
+		goto out;
+
+	/* Link genpd masters/subdomains to model the CPU topology. */
+	ret = psci_pd_add_topology(np);
+	if (ret)
+		goto remove_pd;
+
+	/* Try to enable OSI mode. */
+	ret = psci_set_osi_mode();
+	if (ret) {
+		pr_warn("failed to enable OSI mode: %d\n", ret);
+		psci_pd_remove_topology(np);
+		goto remove_pd;
+	}
+
+	osi_mode_enabled = true;
+	of_node_put(np);
+	pr_info("Initialized CPU PM domain topology\n");
+	return pd_count;
+
+put_node:
+	of_node_put(node);
+remove_pd:
+	if (pd_count)
+		psci_pd_remove();
+	pr_err("failed to create CPU PM domains ret=%d\n", ret);
+out:
+	of_node_put(np);
+	return ret;
+}
+subsys_initcall(psci_idle_init_domains);
+
+struct device __init *psci_dt_attach_cpu(int cpu)
+{
+	struct device *dev;
+
+	if (!osi_mode_enabled)
+		return NULL;
+
+	dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
+	if (IS_ERR_OR_NULL(dev))
+		return dev;
+
+	pm_runtime_irq_safe(dev);
+	if (cpu_online(cpu))
+		pm_runtime_get_sync(dev);
+
+	return dev;
+}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index f3c1a23..edd7a54 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -8,6 +8,7 @@
 
 #define pr_fmt(fmt) "CPUidle PSCI: " fmt
 
+#include <linux/cpuhotplug.h>
 #include <linux/cpuidle.h>
 #include <linux/cpumask.h>
 #include <linux/cpu_pm.h>
@@ -16,21 +17,107 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/psci.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 #include <asm/cpuidle.h>
 
+#include "cpuidle-psci.h"
 #include "dt_idle_states.h"
 
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+struct psci_cpuidle_data {
+	u32 *psci_states;
+	struct device *dev;
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
+static DEFINE_PER_CPU(u32, domain_state);
+static bool psci_cpuidle_use_cpuhp __initdata;
+
+void psci_set_domain_state(u32 state)
+{
+	__this_cpu_write(domain_state, state);
+}
+
+static inline u32 psci_get_domain_state(void)
+{
+	return __this_cpu_read(domain_state);
+}
+
+static inline int psci_enter_state(int idx, u32 state)
+{
+	return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state);
+}
+
+static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
+					struct cpuidle_driver *drv, int idx)
+{
+	struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
+	u32 *states = data->psci_states;
+	struct device *pd_dev = data->dev;
+	u32 state;
+	int ret;
+
+	/* Do runtime PM to manage a hierarchical CPU toplogy. */
+	pm_runtime_put_sync_suspend(pd_dev);
+
+	state = psci_get_domain_state();
+	if (!state)
+		state = states[idx];
+
+	ret = psci_enter_state(idx, state);
+
+	pm_runtime_get_sync(pd_dev);
+
+	/* Clear the domain state to start fresh when back from idle. */
+	psci_set_domain_state(0);
+	return ret;
+}
+
+static int psci_idle_cpuhp_up(unsigned int cpu)
+{
+	struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+	if (pd_dev)
+		pm_runtime_get_sync(pd_dev);
+
+	return 0;
+}
+
+static int psci_idle_cpuhp_down(unsigned int cpu)
+{
+	struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+	if (pd_dev) {
+		pm_runtime_put_sync(pd_dev);
+		/* Clear domain state to start fresh at next online. */
+		psci_set_domain_state(0);
+	}
+
+	return 0;
+}
+
+static void __init psci_idle_init_cpuhp(void)
+{
+	int err;
+
+	if (!psci_cpuidle_use_cpuhp)
+		return;
+
+	err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+					"cpuidle/psci:online",
+					psci_idle_cpuhp_up,
+					psci_idle_cpuhp_down);
+	if (err)
+		pr_warn("Failed %d while setup cpuhp state\n", err);
+}
 
 static int psci_enter_idle_state(struct cpuidle_device *dev,
 				struct cpuidle_driver *drv, int idx)
 {
-	u32 *state = __this_cpu_read(psci_power_state);
+	u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
 
-	return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
-					   idx, state[idx - 1]);
+	return psci_enter_state(idx, state[idx]);
 }
 
 static struct cpuidle_driver psci_idle_driver __initdata = {
@@ -56,7 +143,7 @@ static const struct of_device_id psci_idle_state_match[] __initconst = {
 	{ },
 };
 
-static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
+int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
 {
 	int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
 
@@ -73,28 +160,25 @@ static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
 	return 0;
 }
 
-static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
+					struct device_node *cpu_node,
+					unsigned int state_count, int cpu)
 {
-	int i, ret = 0, count = 0;
+	int i, ret = 0;
 	u32 *psci_states;
 	struct device_node *state_node;
+	struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
 
-	/* Count idle states */
-	while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
-					      count))) {
-		count++;
-		of_node_put(state_node);
-	}
-
-	if (!count)
-		return -ENODEV;
-
-	psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+	state_count++; /* Add WFI state too */
+	psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL);
 	if (!psci_states)
 		return -ENOMEM;
 
-	for (i = 0; i < count; i++) {
-		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+	for (i = 1; i < state_count; i++) {
+		state_node = of_get_cpu_state_node(cpu_node, i - 1);
+		if (!state_node)
+			break;
+
 		ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
 		of_node_put(state_node);
 
@@ -104,8 +188,33 @@ static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
 		pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
 	}
 
-	/* Idle states parsed correctly, initialize per-cpu pointer */
-	per_cpu(psci_power_state, cpu) = psci_states;
+	if (i != state_count) {
+		ret = -ENODEV;
+		goto free_mem;
+	}
+
+	/* Currently limit the hierarchical topology to be used in OSI mode. */
+	if (psci_has_osi_support()) {
+		data->dev = psci_dt_attach_cpu(cpu);
+		if (IS_ERR(data->dev)) {
+			ret = PTR_ERR(data->dev);
+			goto free_mem;
+		}
+
+		/*
+		 * Using the deepest state for the CPU to trigger a potential
+		 * selection of a shared state for the domain, assumes the
+		 * domain states are all deeper states.
+		 */
+		if (data->dev) {
+			drv->states[state_count - 1].enter =
+				psci_enter_domain_idle_state;
+			psci_cpuidle_use_cpuhp = true;
+		}
+	}
+
+	/* Idle states parsed correctly, store them in the per-cpu struct. */
+	data->psci_states = psci_states;
 	return 0;
 
 free_mem:
@@ -113,7 +222,8 @@ static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
 	return ret;
 }
 
-static __init int psci_cpu_init_idle(unsigned int cpu)
+static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
+				     unsigned int cpu, unsigned int state_count)
 {
 	struct device_node *cpu_node;
 	int ret;
@@ -129,7 +239,7 @@ static __init int psci_cpu_init_idle(unsigned int cpu)
 	if (!cpu_node)
 		return -ENODEV;
 
-	ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+	ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu);
 
 	of_node_put(cpu_node);
 
@@ -185,7 +295,7 @@ static int __init psci_idle_init_cpu(int cpu)
 	/*
 	 * Initialize PSCI idle states.
 	 */
-	ret = psci_cpu_init_idle(cpu);
+	ret = psci_cpu_init_idle(drv, cpu, ret);
 	if (ret) {
 		pr_err("CPU %d failed to PSCI idle\n", cpu);
 		goto out_kfree_drv;
@@ -221,6 +331,7 @@ static int __init psci_idle_init(void)
 			goto out_fail;
 	}
 
+	psci_idle_init_cpuhp();
 	return 0;
 
 out_fail:
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
new file mode 100644
index 0000000..7299a04
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __CPUIDLE_PSCI_H
+#define __CPUIDLE_PSCI_H
+
+struct device_node;
+
+void psci_set_domain_state(u32 state);
+int __init psci_dt_parse_state_node(struct device_node *np, u32 *state);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+struct device __init *psci_dt_attach_cpu(int cpu);
+#else
+static inline struct device __init *psci_dt_attach_cpu(int cpu) { return NULL; }
+#endif
+
+#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index d06d21a..252f2a9 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -111,8 +111,7 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
 	for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
 	     cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
 		cpu_node = of_cpu_device_node_get(cpu);
-		curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
-						   idx);
+		curr_state_node = of_get_cpu_state_node(cpu_node, idx);
 		if (state_node != curr_state_node)
 			valid = false;
 
@@ -170,7 +169,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
 	cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
 
 	for (i = 0; ; i++) {
-		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+		state_node = of_get_cpu_state_node(cpu_node, i);
 		if (!state_node)
 			break;
 
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index a23b675..0613bb7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -44,4 +44,15 @@
 	default n
 	depends on DMA_SHARED_BUFFER
 
+menuconfig DMABUF_HEAPS
+	bool "DMA-BUF Userland Memory Heaps"
+	select DMA_SHARED_BUFFER
+	help
+	  Choose this option to enable the DMA-BUF userland memory heaps.
+	  This options creates per heap chardevs in /dev/dma_heap/ which
+	  allows userspace to allocate dma-bufs that can be shared
+	  between drivers.
+
+source "drivers/dma-buf/heaps/Kconfig"
+
 endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 03479da..9c19002 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
 	 dma-resv.o seqno-fence.o
+obj-$(CONFIG_DMABUF_HEAPS)	+= dma-heap.o
+obj-$(CONFIG_DMABUF_HEAPS)	+= heaps/
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
 obj-$(CONFIG_UDMABUF)		+= udmabuf.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ce41cd9..d409785 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -878,29 +878,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
  *   access.
  *
- *   To support dma_buf objects residing in highmem cpu access is page-based
- *   using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
- *   of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
- *   returns a pointer in kernel virtual address space. Afterwards the chunk
- *   needs to be unmapped again. There is no limit on how often a given chunk
- *   can be mapped and unmapped, i.e. the importer does not need to call
- *   begin_cpu_access again before mapping the same chunk again.
- *
- *   Interfaces::
- *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
- *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
- *
- *   Implementing the functions is optional for exporters and for importers all
- *   the restrictions of using kmap apply.
- *
- *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
- *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
- *   the partial chunks at the beginning and end but may return stale or bogus
- *   data outside of the range (in these partial chunks).
- *
- *   For some cases the overhead of kmap can be too high, a vmap interface
- *   is introduced. This interface should be used very carefully, as vmalloc
- *   space is a limited resources on many architectures.
+ *   Since for most kernel internal dma-buf accesses need the entire buffer, a
+ *   vmap interface is introduced. Note that on very old 32-bit architectures
+ *   vmalloc space might be limited and result in vmap calls failing.
  *
  *   Interfaces::
  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
@@ -1050,43 +1030,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 }
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
-/**
- * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
- * same restrictions as for kmap and friends apply.
- * @dmabuf:	[in]	buffer to map page from.
- * @page_num:	[in]	page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
-{
-	WARN_ON(!dmabuf);
-
-	if (!dmabuf->ops->map)
-		return NULL;
-	return dmabuf->ops->map(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap);
-
-/**
- * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
- * @dmabuf:	[in]	buffer to unmap page from.
- * @page_num:	[in]	page in PAGE_SIZE units to unmap.
- * @vaddr:	[in]	kernel space pointer obtained from dma_buf_kmap.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
-		    void *vaddr)
-{
-	WARN_ON(!dmabuf);
-
-	if (dmabuf->ops->unmap)
-		dmabuf->ops->unmap(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap);
-
 
 /**
  * dma_buf_mmap - Setup up a userspace mmap with the given vma
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
new file mode 100644
index 0000000..afd22c9
--- /dev/null
+++ b/drivers/dma-buf/dma-heap.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framework for userspace DMA-BUF allocations
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/xarray.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
+
+#define DEVNAME "dma_heap"
+
+#define NUM_HEAP_MINORS 128
+
+/**
+ * struct dma_heap - represents a dmabuf heap in the system
+ * @name:		used for debugging/device-node name
+ * @ops:		ops struct for this heap
+ * @heap_devt		heap device node
+ * @list		list head connecting to list of heaps
+ * @heap_cdev		heap char device
+ *
+ * Represents a heap of memory from which buffers can be made.
+ */
+struct dma_heap {
+	const char *name;
+	const struct dma_heap_ops *ops;
+	void *priv;
+	dev_t heap_devt;
+	struct list_head list;
+	struct cdev heap_cdev;
+};
+
+static LIST_HEAD(heap_list);
+static DEFINE_MUTEX(heap_list_lock);
+static dev_t dma_heap_devt;
+static struct class *dma_heap_class;
+static DEFINE_XARRAY_ALLOC(dma_heap_minors);
+
+static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+				 unsigned int fd_flags,
+				 unsigned int heap_flags)
+{
+	/*
+	 * Allocations from all heaps have to begin
+	 * and end on page boundaries.
+	 */
+	len = PAGE_ALIGN(len);
+	if (!len)
+		return -EINVAL;
+
+	return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+}
+
+static int dma_heap_open(struct inode *inode, struct file *file)
+{
+	struct dma_heap *heap;
+
+	heap = xa_load(&dma_heap_minors, iminor(inode));
+	if (!heap) {
+		pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
+		return -ENODEV;
+	}
+
+	/* instance data as context */
+	file->private_data = heap;
+	nonseekable_open(inode, file);
+
+	return 0;
+}
+
+static long dma_heap_ioctl_allocate(struct file *file, void *data)
+{
+	struct dma_heap_allocation_data *heap_allocation = data;
+	struct dma_heap *heap = file->private_data;
+	int fd;
+
+	if (heap_allocation->fd)
+		return -EINVAL;
+
+	if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
+		return -EINVAL;
+
+	if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
+		return -EINVAL;
+
+	fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
+				   heap_allocation->fd_flags,
+				   heap_allocation->heap_flags);
+	if (fd < 0)
+		return fd;
+
+	heap_allocation->fd = fd;
+
+	return 0;
+}
+
+static unsigned int dma_heap_ioctl_cmds[] = {
+	DMA_HEAP_IOCTL_ALLOC,
+};
+
+static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
+			   unsigned long arg)
+{
+	char stack_kdata[128];
+	char *kdata = stack_kdata;
+	unsigned int kcmd;
+	unsigned int in_size, out_size, drv_size, ksize;
+	int nr = _IOC_NR(ucmd);
+	int ret = 0;
+
+	if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
+		return -EINVAL;
+
+	/* Get the kernel ioctl cmd that matches */
+	kcmd = dma_heap_ioctl_cmds[nr];
+
+	/* Figure out the delta between user cmd size and kernel cmd size */
+	drv_size = _IOC_SIZE(kcmd);
+	out_size = _IOC_SIZE(ucmd);
+	in_size = out_size;
+	if ((ucmd & kcmd & IOC_IN) == 0)
+		in_size = 0;
+	if ((ucmd & kcmd & IOC_OUT) == 0)
+		out_size = 0;
+	ksize = max(max(in_size, out_size), drv_size);
+
+	/* If necessary, allocate buffer for ioctl argument */
+	if (ksize > sizeof(stack_kdata)) {
+		kdata = kmalloc(ksize, GFP_KERNEL);
+		if (!kdata)
+			return -ENOMEM;
+	}
+
+	if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+		ret = -EFAULT;
+		goto err;
+	}
+
+	/* zero out any difference between the kernel/user structure size */
+	if (ksize > in_size)
+		memset(kdata + in_size, 0, ksize - in_size);
+
+	switch (kcmd) {
+	case DMA_HEAP_IOCTL_ALLOC:
+		ret = dma_heap_ioctl_allocate(file, kdata);
+		break;
+	default:
+		ret = -ENOTTY;
+		goto err;
+	}
+
+	if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+		ret = -EFAULT;
+err:
+	if (kdata != stack_kdata)
+		kfree(kdata);
+	return ret;
+}
+
+static const struct file_operations dma_heap_fops = {
+	.owner          = THIS_MODULE,
+	.open		= dma_heap_open,
+	.unlocked_ioctl = dma_heap_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= dma_heap_ioctl,
+#endif
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-subdriver data for the heap
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-subdriver data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap)
+{
+	return heap->priv;
+}
+
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
+{
+	struct dma_heap *heap, *h, *err_ret;
+	struct device *dev_ret;
+	unsigned int minor;
+	int ret;
+
+	if (!exp_info->name || !strcmp(exp_info->name, "")) {
+		pr_err("dma_heap: Cannot add heap without a name\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!exp_info->ops || !exp_info->ops->allocate) {
+		pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* check the name is unique */
+	mutex_lock(&heap_list_lock);
+	list_for_each_entry(h, &heap_list, list) {
+		if (!strcmp(h->name, exp_info->name)) {
+			mutex_unlock(&heap_list_lock);
+			pr_err("dma_heap: Already registered heap named %s\n",
+			       exp_info->name);
+			return ERR_PTR(-EINVAL);
+		}
+	}
+	mutex_unlock(&heap_list_lock);
+
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+
+	heap->name = exp_info->name;
+	heap->ops = exp_info->ops;
+	heap->priv = exp_info->priv;
+
+	/* Find unused minor number */
+	ret = xa_alloc(&dma_heap_minors, &minor, heap,
+		       XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
+	if (ret < 0) {
+		pr_err("dma_heap: Unable to get minor number for heap\n");
+		err_ret = ERR_PTR(ret);
+		goto err0;
+	}
+
+	/* Create device */
+	heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
+
+	cdev_init(&heap->heap_cdev, &dma_heap_fops);
+	ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
+	if (ret < 0) {
+		pr_err("dma_heap: Unable to add char device\n");
+		err_ret = ERR_PTR(ret);
+		goto err1;
+	}
+
+	dev_ret = device_create(dma_heap_class,
+				NULL,
+				heap->heap_devt,
+				NULL,
+				heap->name);
+	if (IS_ERR(dev_ret)) {
+		pr_err("dma_heap: Unable to create device\n");
+		err_ret = ERR_CAST(dev_ret);
+		goto err2;
+	}
+	/* Add heap to the list */
+	mutex_lock(&heap_list_lock);
+	list_add(&heap->list, &heap_list);
+	mutex_unlock(&heap_list_lock);
+
+	return heap;
+
+err2:
+	cdev_del(&heap->heap_cdev);
+err1:
+	xa_erase(&dma_heap_minors, minor);
+err0:
+	kfree(heap);
+	return err_ret;
+}
+
+static char *dma_heap_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
+}
+
+static int dma_heap_init(void)
+{
+	int ret;
+
+	ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
+	if (ret)
+		return ret;
+
+	dma_heap_class = class_create(THIS_MODULE, DEVNAME);
+	if (IS_ERR(dma_heap_class)) {
+		unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
+		return PTR_ERR(dma_heap_class);
+	}
+	dma_heap_class->devnode = dma_heap_devnode;
+
+	return 0;
+}
+subsys_initcall(dma_heap_init);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 7090025..4264e64 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -34,6 +34,7 @@
 
 #include <linux/dma-resv.h>
 #include <linux/export.h>
+#include <linux/sched/mm.h>
 
 /**
  * DOC: Reservation Object Overview
@@ -95,6 +96,37 @@ static void dma_resv_list_free(struct dma_resv_list *list)
 	kfree_rcu(list, rcu);
 }
 
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+	struct mm_struct *mm = mm_alloc();
+	struct ww_acquire_ctx ctx;
+	struct dma_resv obj;
+	int ret;
+
+	if (!mm)
+		return -ENOMEM;
+
+	dma_resv_init(&obj);
+
+	down_read(&mm->mmap_sem);
+	ww_acquire_init(&ctx, &reservation_ww_class);
+	ret = dma_resv_lock(&obj, &ctx);
+	if (ret == -EDEADLK)
+		dma_resv_lock_slow(&obj, &ctx);
+	fs_reclaim_acquire(GFP_KERNEL);
+	fs_reclaim_release(GFP_KERNEL);
+	ww_mutex_unlock(&obj.lock);
+	ww_acquire_fini(&ctx);
+	up_read(&mm->mmap_sem);
+	
+	mmput(mm);
+
+	return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
+
 /**
  * dma_resv_init - initialize a reservation object
  * @obj: the reservation object
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
new file mode 100644
index 0000000..a5eef06
--- /dev/null
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -0,0 +1,14 @@
+config DMABUF_HEAPS_SYSTEM
+	bool "DMA-BUF System Heap"
+	depends on DMABUF_HEAPS
+	help
+	  Choose this option to enable the system dmabuf heap. The system heap
+	  is backed by pages from the buddy allocator. If in doubt, say Y.
+
+config DMABUF_HEAPS_CMA
+	bool "DMA-BUF CMA Heap"
+	depends on DMABUF_HEAPS && DMA_CMA
+	help
+	  Choose this option to enable dma-buf CMA heap. This heap is backed
+	  by the Contiguous Memory Allocator (CMA). If your system has these
+	  regions, you should say Y here.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
new file mode 100644
index 0000000..6e54cde
--- /dev/null
+++ b/drivers/dma-buf/heaps/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y					+= heap-helpers.o
+obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
new file mode 100644
index 0000000..626cf7f
--- /dev/null
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF CMA heap exporter
+ *
+ * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ */
+
+#include <linux/cma.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-contiguous.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
+
+#include "heap-helpers.h"
+
+struct cma_heap {
+	struct dma_heap *heap;
+	struct cma *cma;
+};
+
+static void cma_heap_free(struct heap_helper_buffer *buffer)
+{
+	struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
+	unsigned long nr_pages = buffer->pagecount;
+	struct page *cma_pages = buffer->priv_virt;
+
+	/* free page list */
+	kfree(buffer->pages);
+	/* release memory */
+	cma_release(cma_heap->cma, cma_pages, nr_pages);
+	kfree(buffer);
+}
+
+/* dmabuf heap CMA operations functions */
+static int cma_heap_allocate(struct dma_heap *heap,
+			     unsigned long len,
+			     unsigned long fd_flags,
+			     unsigned long heap_flags)
+{
+	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
+	struct heap_helper_buffer *helper_buffer;
+	struct page *cma_pages;
+	size_t size = PAGE_ALIGN(len);
+	unsigned long nr_pages = size >> PAGE_SHIFT;
+	unsigned long align = get_order(size);
+	struct dma_buf *dmabuf;
+	int ret = -ENOMEM;
+	pgoff_t pg;
+
+	if (align > CONFIG_CMA_ALIGNMENT)
+		align = CONFIG_CMA_ALIGNMENT;
+
+	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+	if (!helper_buffer)
+		return -ENOMEM;
+
+	init_heap_helper_buffer(helper_buffer, cma_heap_free);
+	helper_buffer->heap = heap;
+	helper_buffer->size = len;
+
+	cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
+	if (!cma_pages)
+		goto free_buf;
+
+	if (PageHighMem(cma_pages)) {
+		unsigned long nr_clear_pages = nr_pages;
+		struct page *page = cma_pages;
+
+		while (nr_clear_pages > 0) {
+			void *vaddr = kmap_atomic(page);
+
+			memset(vaddr, 0, PAGE_SIZE);
+			kunmap_atomic(vaddr);
+			/*
+			 * Avoid wasting time zeroing memory if the process
+			 * has been killed by by SIGKILL
+			 */
+			if (fatal_signal_pending(current))
+				goto free_cma;
+
+			page++;
+			nr_clear_pages--;
+		}
+	} else {
+		memset(page_address(cma_pages), 0, size);
+	}
+
+	helper_buffer->pagecount = nr_pages;
+	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+					     sizeof(*helper_buffer->pages),
+					     GFP_KERNEL);
+	if (!helper_buffer->pages) {
+		ret = -ENOMEM;
+		goto free_cma;
+	}
+
+	for (pg = 0; pg < helper_buffer->pagecount; pg++)
+		helper_buffer->pages[pg] = &cma_pages[pg];
+
+	/* create the dmabuf */
+	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+	if (IS_ERR(dmabuf)) {
+		ret = PTR_ERR(dmabuf);
+		goto free_pages;
+	}
+
+	helper_buffer->dmabuf = dmabuf;
+	helper_buffer->priv_virt = cma_pages;
+
+	ret = dma_buf_fd(dmabuf, fd_flags);
+	if (ret < 0) {
+		dma_buf_put(dmabuf);
+		/* just return, as put will call release and that will free */
+		return ret;
+	}
+
+	return ret;
+
+free_pages:
+	kfree(helper_buffer->pages);
+free_cma:
+	cma_release(cma_heap->cma, cma_pages, nr_pages);
+free_buf:
+	kfree(helper_buffer);
+	return ret;
+}
+
+static const struct dma_heap_ops cma_heap_ops = {
+	.allocate = cma_heap_allocate,
+};
+
+static int __add_cma_heap(struct cma *cma, void *data)
+{
+	struct cma_heap *cma_heap;
+	struct dma_heap_export_info exp_info;
+
+	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+	if (!cma_heap)
+		return -ENOMEM;
+	cma_heap->cma = cma;
+
+	exp_info.name = cma_get_name(cma);
+	exp_info.ops = &cma_heap_ops;
+	exp_info.priv = cma_heap;
+
+	cma_heap->heap = dma_heap_add(&exp_info);
+	if (IS_ERR(cma_heap->heap)) {
+		int ret = PTR_ERR(cma_heap->heap);
+
+		kfree(cma_heap);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int add_default_cma_heap(void)
+{
+	struct cma *default_cma = dev_get_cma_area(NULL);
+	int ret = 0;
+
+	if (default_cma)
+		ret = __add_cma_heap(default_cma, NULL);
+
+	return ret;
+}
+module_init(add_default_cma_heap);
+MODULE_DESCRIPTION("DMA-BUF CMA Heap");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
new file mode 100644
index 0000000..9f964ca
--- /dev/null
+++ b/drivers/dma-buf/heaps/heap-helpers.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <uapi/linux/dma-heap.h>
+
+#include "heap-helpers.h"
+
+void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
+			     void (*free)(struct heap_helper_buffer *))
+{
+	buffer->priv_virt = NULL;
+	mutex_init(&buffer->lock);
+	buffer->vmap_cnt = 0;
+	buffer->vaddr = NULL;
+	buffer->pagecount = 0;
+	buffer->pages = NULL;
+	INIT_LIST_HEAD(&buffer->attachments);
+	buffer->free = free;
+}
+
+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
+					  int fd_flags)
+{
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+	exp_info.ops = &heap_helper_ops;
+	exp_info.size = buffer->size;
+	exp_info.flags = fd_flags;
+	exp_info.priv = buffer;
+
+	return dma_buf_export(&exp_info);
+}
+
+static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
+{
+	void *vaddr;
+
+	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+	if (!vaddr)
+		return ERR_PTR(-ENOMEM);
+
+	return vaddr;
+}
+
+static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
+{
+	if (buffer->vmap_cnt > 0) {
+		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
+		vunmap(buffer->vaddr);
+	}
+
+	buffer->free(buffer);
+}
+
+static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
+{
+	void *vaddr;
+
+	if (buffer->vmap_cnt) {
+		buffer->vmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = dma_heap_map_kernel(buffer);
+	if (IS_ERR(vaddr))
+		return vaddr;
+	buffer->vaddr = vaddr;
+	buffer->vmap_cnt++;
+	return vaddr;
+}
+
+static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
+{
+	if (!--buffer->vmap_cnt) {
+		vunmap(buffer->vaddr);
+		buffer->vaddr = NULL;
+	}
+}
+
+struct dma_heaps_attachment {
+	struct device *dev;
+	struct sg_table table;
+	struct list_head list;
+};
+
+static int dma_heap_attach(struct dma_buf *dmabuf,
+			   struct dma_buf_attachment *attachment)
+{
+	struct dma_heaps_attachment *a;
+	struct heap_helper_buffer *buffer = dmabuf->priv;
+	int ret;
+
+	a = kzalloc(sizeof(*a), GFP_KERNEL);
+	if (!a)
+		return -ENOMEM;
+
+	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
+					buffer->pagecount, 0,
+					buffer->pagecount << PAGE_SHIFT,
+					GFP_KERNEL);
+	if (ret) {
+		kfree(a);
+		return ret;
+	}
+
+	a->dev = attachment->dev;
+	INIT_LIST_HEAD(&a->list);
+
+	attachment->priv = a;
+
+	mutex_lock(&buffer->lock);
+	list_add(&a->list, &buffer->attachments);
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static void dma_heap_detach(struct dma_buf *dmabuf,
+			    struct dma_buf_attachment *attachment)
+{
+	struct dma_heaps_attachment *a = attachment->priv;
+	struct heap_helper_buffer *buffer = dmabuf->priv;
+
+	mutex_lock(&buffer->lock);
+	list_del(&a->list);
+	mutex_unlock(&buffer->lock);
+
+	sg_free_table(&a->table);
+	kfree(a);
+}
+
+static
+struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+				      enum dma_data_direction direction)
+{
+	struct dma_heaps_attachment *a = attachment->priv;
+	struct sg_table *table;
+
+	table = &a->table;
+
+	if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
+			direction))
+		table = ERR_PTR(-ENOMEM);
+	return table;
+}
+
+static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+				   struct sg_table *table,
+				   enum dma_data_direction direction)
+{
+	dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+}
+
+static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct heap_helper_buffer *buffer = vma->vm_private_data;
+
+	if (vmf->pgoff > buffer->pagecount)
+		return VM_FAULT_SIGBUS;
+
+	vmf->page = buffer->pages[vmf->pgoff];
+	get_page(vmf->page);
+
+	return 0;
+}
+
+static const struct vm_operations_struct dma_heap_vm_ops = {
+	.fault = dma_heap_vm_fault,
+};
+
+static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct heap_helper_buffer *buffer = dmabuf->priv;
+
+	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+		return -EINVAL;
+
+	vma->vm_ops = &dma_heap_vm_ops;
+	vma->vm_private_data = buffer;
+
+	return 0;
+}
+
+static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct heap_helper_buffer *buffer = dmabuf->priv;
+
+	dma_heap_buffer_destroy(buffer);
+}
+
+static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+					     enum dma_data_direction direction)
+{
+	struct heap_helper_buffer *buffer = dmabuf->priv;
+	struct dma_heaps_attachment *a;
+	int ret = 0;
+
+	mutex_lock(&buffer->lock);
+
+	if (buffer->vmap_cnt)
+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
+
+	list_for_each_entry(a, &buffer->attachments, list) {
+		dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
+				    direction);
+	}
+	mutex_unlock(&buffer->lock);
+
+	return ret;
+}
+
+static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+					   enum dma_data_direction direction)
+{
+	struct heap_helper_buffer *buffer = dmabuf->priv;
+	struct dma_heaps_attachment *a;
+
+	mutex_lock(&buffer->lock);
+
+	if (buffer->vmap_cnt)
+		flush_kernel_vmap_range(buffer->vaddr, buffer->size);
+
+	list_for_each_entry(a, &buffer->attachments, list) {
+		dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
+				       direction);
+	}
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+	struct heap_helper_buffer *buffer = dmabuf->priv;
+	void *vaddr;
+
+	mutex_lock(&buffer->lock);
+	vaddr = dma_heap_buffer_vmap_get(buffer);
+	mutex_unlock(&buffer->lock);
+
+	return vaddr;
+}
+
+static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+	struct heap_helper_buffer *buffer = dmabuf->priv;
+
+	mutex_lock(&buffer->lock);
+	dma_heap_buffer_vmap_put(buffer);
+	mutex_unlock(&buffer->lock);
+}
+
+const struct dma_buf_ops heap_helper_ops = {
+	.map_dma_buf = dma_heap_map_dma_buf,
+	.unmap_dma_buf = dma_heap_unmap_dma_buf,
+	.mmap = dma_heap_mmap,
+	.release = dma_heap_dma_buf_release,
+	.attach = dma_heap_attach,
+	.detach = dma_heap_detach,
+	.begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
+	.end_cpu_access = dma_heap_dma_buf_end_cpu_access,
+	.vmap = dma_heap_dma_buf_vmap,
+	.vunmap = dma_heap_dma_buf_vunmap,
+};
diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h
new file mode 100644
index 0000000..805d2df
--- /dev/null
+++ b/drivers/dma-buf/heaps/heap-helpers.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps helper code
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _HEAP_HELPERS_H
+#define _HEAP_HELPERS_H
+
+#include <linux/dma-heap.h>
+#include <linux/list.h>
+
+/**
+ * struct heap_helper_buffer - helper buffer metadata
+ * @heap:		back pointer to the heap the buffer came from
+ * @dmabuf:		backing dma-buf for this buffer
+ * @size:		size of the buffer
+ * @priv_virt		pointer to heap specific private value
+ * @lock		mutext to protect the data in this structure
+ * @vmap_cnt		count of vmap references on the buffer
+ * @vaddr		vmap'ed virtual address
+ * @pagecount		number of pages in the buffer
+ * @pages		list of page pointers
+ * @attachments		list of device attachments
+ *
+ * @free		heap callback to free the buffer
+ */
+struct heap_helper_buffer {
+	struct dma_heap *heap;
+	struct dma_buf *dmabuf;
+	size_t size;
+
+	void *priv_virt;
+	struct mutex lock;
+	int vmap_cnt;
+	void *vaddr;
+	pgoff_t pagecount;
+	struct page **pages;
+	struct list_head attachments;
+
+	void (*free)(struct heap_helper_buffer *buffer);
+};
+
+void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
+			     void (*free)(struct heap_helper_buffer *));
+
+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
+					  int fd_flags);
+
+extern const struct dma_buf_ops heap_helper_ops;
+#endif /* _HEAP_HELPERS_H */
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
new file mode 100644
index 0000000..0bf688e
--- /dev/null
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF System heap exporter
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-heap.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <asm/page.h>
+
+#include "heap-helpers.h"
+
+struct dma_heap *sys_heap;
+
+static void system_heap_free(struct heap_helper_buffer *buffer)
+{
+	pgoff_t pg;
+
+	for (pg = 0; pg < buffer->pagecount; pg++)
+		__free_page(buffer->pages[pg]);
+	kfree(buffer->pages);
+	kfree(buffer);
+}
+
+static int system_heap_allocate(struct dma_heap *heap,
+				unsigned long len,
+				unsigned long fd_flags,
+				unsigned long heap_flags)
+{
+	struct heap_helper_buffer *helper_buffer;
+	struct dma_buf *dmabuf;
+	int ret = -ENOMEM;
+	pgoff_t pg;
+
+	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+	if (!helper_buffer)
+		return -ENOMEM;
+
+	init_heap_helper_buffer(helper_buffer, system_heap_free);
+	helper_buffer->heap = heap;
+	helper_buffer->size = len;
+
+	helper_buffer->pagecount = len / PAGE_SIZE;
+	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+					     sizeof(*helper_buffer->pages),
+					     GFP_KERNEL);
+	if (!helper_buffer->pages) {
+		ret = -ENOMEM;
+		goto err0;
+	}
+
+	for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+		/*
+		 * Avoid trying to allocate memory if the process
+		 * has been killed by by SIGKILL
+		 */
+		if (fatal_signal_pending(current))
+			goto err1;
+
+		helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (!helper_buffer->pages[pg])
+			goto err1;
+	}
+
+	/* create the dmabuf */
+	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+	if (IS_ERR(dmabuf)) {
+		ret = PTR_ERR(dmabuf);
+		goto err1;
+	}
+
+	helper_buffer->dmabuf = dmabuf;
+
+	ret = dma_buf_fd(dmabuf, fd_flags);
+	if (ret < 0) {
+		dma_buf_put(dmabuf);
+		/* just return, as put will call release and that will free */
+		return ret;
+	}
+
+	return ret;
+
+err1:
+	while (pg > 0)
+		__free_page(helper_buffer->pages[--pg]);
+	kfree(helper_buffer->pages);
+err0:
+	kfree(helper_buffer);
+
+	return ret;
+}
+
+static const struct dma_heap_ops system_heap_ops = {
+	.allocate = system_heap_allocate,
+};
+
+static int system_heap_create(void)
+{
+	struct dma_heap_export_info exp_info;
+	int ret = 0;
+
+	exp_info.name = "system";
+	exp_info.ops = &system_heap_ops;
+	exp_info.priv = NULL;
+
+	sys_heap = dma_heap_add(&exp_info);
+	if (IS_ERR(sys_heap))
+		ret = PTR_ERR(sys_heap);
+
+	return ret;
+}
+module_init(system_heap_create);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 9635897..acb26c6 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -18,6 +18,8 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
 struct udmabuf {
 	pgoff_t pagecount;
 	struct page **pages;
+	struct sg_table *sg;
+	struct miscdevice *device;
 };
 
 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -46,10 +48,10 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
 	return 0;
 }
 
-static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
-				    enum dma_data_direction direction)
+static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
+				     enum dma_data_direction direction)
 {
-	struct udmabuf *ubuf = at->dmabuf->priv;
+	struct udmabuf *ubuf = buf->priv;
 	struct sg_table *sg;
 	int ret;
 
@@ -61,7 +63,7 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
 					GFP_KERNEL);
 	if (ret < 0)
 		goto err;
-	if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
+	if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
 		ret = -EINVAL;
 		goto err;
 	}
@@ -73,54 +75,89 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
 	return ERR_PTR(ret);
 }
 
+static void put_sg_table(struct device *dev, struct sg_table *sg,
+			 enum dma_data_direction direction)
+{
+	dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
+	sg_free_table(sg);
+	kfree(sg);
+}
+
+static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
+				    enum dma_data_direction direction)
+{
+	return get_sg_table(at->dev, at->dmabuf, direction);
+}
+
 static void unmap_udmabuf(struct dma_buf_attachment *at,
 			  struct sg_table *sg,
 			  enum dma_data_direction direction)
 {
-	dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
-	sg_free_table(sg);
-	kfree(sg);
+	return put_sg_table(at->dev, sg, direction);
 }
 
 static void release_udmabuf(struct dma_buf *buf)
 {
 	struct udmabuf *ubuf = buf->priv;
+	struct device *dev = ubuf->device->this_device;
 	pgoff_t pg;
 
+	if (ubuf->sg)
+		put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
 	for (pg = 0; pg < ubuf->pagecount; pg++)
 		put_page(ubuf->pages[pg]);
 	kfree(ubuf->pages);
 	kfree(ubuf);
 }
 
-static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+			     enum dma_data_direction direction)
 {
 	struct udmabuf *ubuf = buf->priv;
-	struct page *page = ubuf->pages[page_num];
+	struct device *dev = ubuf->device->this_device;
 
-	return kmap(page);
+	if (!ubuf->sg) {
+		ubuf->sg = get_sg_table(dev, buf, direction);
+		if (IS_ERR(ubuf->sg))
+			return PTR_ERR(ubuf->sg);
+	} else {
+		dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
+				    direction);
+	}
+
+	return 0;
 }
 
-static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
-			   void *vaddr)
+static int end_cpu_udmabuf(struct dma_buf *buf,
+			   enum dma_data_direction direction)
 {
-	kunmap(vaddr);
+	struct udmabuf *ubuf = buf->priv;
+	struct device *dev = ubuf->device->this_device;
+
+	if (!ubuf->sg)
+		return -EINVAL;
+
+	dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+	return 0;
 }
 
 static const struct dma_buf_ops udmabuf_ops = {
-	.map_dma_buf	  = map_udmabuf,
-	.unmap_dma_buf	  = unmap_udmabuf,
-	.release	  = release_udmabuf,
-	.map		  = kmap_udmabuf,
-	.unmap		  = kunmap_udmabuf,
-	.mmap		  = mmap_udmabuf,
+	.cache_sgt_mapping = true,
+	.map_dma_buf	   = map_udmabuf,
+	.unmap_dma_buf	   = unmap_udmabuf,
+	.release	   = release_udmabuf,
+	.mmap		   = mmap_udmabuf,
+	.begin_cpu_access  = begin_cpu_udmabuf,
+	.end_cpu_access    = end_cpu_udmabuf,
 };
 
 #define SEALS_WANTED (F_SEAL_SHRINK)
 #define SEALS_DENIED (F_SEAL_WRITE)
 
-static long udmabuf_create(const struct udmabuf_create_list *head,
-			   const struct udmabuf_create_item *list)
+static long udmabuf_create(struct miscdevice *device,
+			   struct udmabuf_create_list *head,
+			   struct udmabuf_create_item *list)
 {
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 	struct file *memfd = NULL;
@@ -187,6 +224,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
 	exp_info.priv = ubuf;
 	exp_info.flags = O_RDWR;
 
+	ubuf->device = device;
 	buf = dma_buf_export(&exp_info);
 	if (IS_ERR(buf)) {
 		ret = PTR_ERR(buf);
@@ -224,7 +262,7 @@ static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
 	list.offset = create.offset;
 	list.size   = create.size;
 
-	return udmabuf_create(&head, &list);
+	return udmabuf_create(filp->private_data, &head, &list);
 }
 
 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
@@ -243,7 +281,7 @@ static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
 	if (IS_ERR(list))
 		return PTR_ERR(list);
 
-	ret = udmabuf_create(&head, list);
+	ret = udmabuf_create(filp->private_data, &head, list);
 	kfree(list);
 	return ret;
 }
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index f3ef4ed..c3b1283 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -756,22 +756,21 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 	}
 	mutex_unlock(&dma_list_mutex);
 
-	if (!IS_ERR_OR_NULL(chan))
-		goto found;
-
-	return ERR_PTR(-EPROBE_DEFER);
+	if (IS_ERR_OR_NULL(chan))
+		return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 
 found:
-	chan->slave = dev;
 	chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
 	if (!chan->name)
-		return ERR_PTR(-ENOMEM);
+		return chan;
+	chan->slave = dev;
 
 	if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
 			      DMA_SLAVE_NAME))
-		dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+		dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
 	if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
-		dev_err(dev, "Cannot create DMA %s symlink\n", chan->name);
+		dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
+
 	return chan;
 }
 EXPORT_SYMBOL_GPL(dma_request_chan);
@@ -830,13 +829,14 @@ void dma_release_channel(struct dma_chan *chan)
 	/* drop PRIVATE cap enabled by __dma_request_channel() */
 	if (--chan->device->privatecnt == 0)
 		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+
 	if (chan->slave) {
+		sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
 		sysfs_remove_link(&chan->slave->kobj, chan->name);
 		kfree(chan->name);
 		chan->name = NULL;
 		chan->slave = NULL;
 	}
-	sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
 	mutex_unlock(&dma_list_mutex);
 }
 EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -962,6 +962,9 @@ static int __dma_async_device_channel_register(struct dma_device *device,
 
 	tchan = list_first_entry_or_null(&device->channels,
 					 struct dma_chan, device_node);
+	if (!tchan)
+		return -ENODEV;
+
 	if (tchan->dev) {
 		idr_ref = tchan->dev->idr_ref;
 	} else {
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 849c50a..6d907fe 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -66,7 +66,7 @@ static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
 
 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
 {
-	return wq->type == IDXD_WQT_USER ? true : false;
+	return wq->type == IDXD_WQT_USER;
 }
 
 static int idxd_config_bus_match(struct device *dev,
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index e3850f0..157c959 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -750,7 +750,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 	}
 
 	xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+	if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
 		ret = EPROBE_DEFER;
 		goto disable_reg_clk;
 	}
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 6b6ba23..a014ab9 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -2,6 +2,7 @@
 /*
  * OMAP DMAengine support
  */
+#include <linux/cpu_pm.h>
 #include <linux/delay.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
@@ -23,12 +24,33 @@
 #define OMAP_SDMA_REQUESTS	127
 #define OMAP_SDMA_CHANNELS	32
 
+struct omap_dma_config {
+	int lch_end;
+	unsigned int rw_priority:1;
+	unsigned int needs_busy_check:1;
+	unsigned int may_lose_context:1;
+	unsigned int needs_lch_clear:1;
+};
+
+struct omap_dma_context {
+	u32 irqenable_l0;
+	u32 irqenable_l1;
+	u32 ocp_sysconfig;
+	u32 gcr;
+};
+
 struct omap_dmadev {
 	struct dma_device ddev;
 	spinlock_t lock;
 	void __iomem *base;
 	const struct omap_dma_reg *reg_map;
 	struct omap_system_dma_plat_info *plat;
+	const struct omap_dma_config *cfg;
+	struct notifier_block nb;
+	struct omap_dma_context context;
+	int lch_count;
+	DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS);
+	struct mutex lch_lock;		/* for assigning logical channels */
 	bool legacy;
 	bool ll123_supported;
 	struct dma_pool *desc_pool;
@@ -376,6 +398,19 @@ static unsigned omap_dma_get_csr(struct omap_chan *c)
 	return val;
 }
 
+static void omap_dma_clear_lch(struct omap_dmadev *od, int lch)
+{
+	struct omap_chan *c;
+	int i;
+
+	c = od->lch_map[lch];
+	if (!c)
+		return;
+
+	for (i = CSDP; i <= od->cfg->lch_end; i++)
+		omap_dma_chan_write(c, i, 0);
+}
+
 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
 	unsigned lch)
 {
@@ -633,6 +668,37 @@ static irqreturn_t omap_dma_irq(int irq, void *devid)
 	return IRQ_HANDLED;
 }
 
+static int omap_dma_get_lch(struct omap_dmadev *od, int *lch)
+{
+	int channel;
+
+	mutex_lock(&od->lch_lock);
+	channel = find_first_zero_bit(od->lch_bitmap, od->lch_count);
+	if (channel >= od->lch_count)
+		goto out_busy;
+	set_bit(channel, od->lch_bitmap);
+	mutex_unlock(&od->lch_lock);
+
+	omap_dma_clear_lch(od, channel);
+	*lch = channel;
+
+	return 0;
+
+out_busy:
+	mutex_unlock(&od->lch_lock);
+	*lch = -EINVAL;
+
+	return -EBUSY;
+}
+
+static void omap_dma_put_lch(struct omap_dmadev *od, int lch)
+{
+	omap_dma_clear_lch(od, lch);
+	mutex_lock(&od->lch_lock);
+	clear_bit(lch, od->lch_bitmap);
+	mutex_unlock(&od->lch_lock);
+}
+
 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
 {
 	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
@@ -644,8 +710,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
 		ret = omap_request_dma(c->dma_sig, "DMA engine",
 				       omap_dma_callback, c, &c->dma_ch);
 	} else {
-		ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
-				       &c->dma_ch);
+		ret = omap_dma_get_lch(od, &c->dma_ch);
 	}
 
 	dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
@@ -702,7 +767,11 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan)
 	c->channel_base = NULL;
 	od->lch_map[c->dma_ch] = NULL;
 	vchan_free_chan_resources(&c->vc);
-	omap_free_dma(c->dma_ch);
+
+	if (od->legacy)
+		omap_free_dma(c->dma_ch);
+	else
+		omap_dma_put_lch(od, c->dma_ch);
 
 	dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
 		c->dma_sig);
@@ -1453,16 +1522,128 @@ static void omap_dma_free(struct omap_dmadev *od)
 	}
 }
 
+/* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
+static int omap_dma_busy_notifier(struct notifier_block *nb,
+				  unsigned long cmd, void *v)
+{
+	struct omap_dmadev *od;
+	struct omap_chan *c;
+	int lch = -1;
+
+	od = container_of(nb, struct omap_dmadev, nb);
+
+	switch (cmd) {
+	case CPU_CLUSTER_PM_ENTER:
+		while (1) {
+			lch = find_next_bit(od->lch_bitmap, od->lch_count,
+					    lch + 1);
+			if (lch >= od->lch_count)
+				break;
+			c = od->lch_map[lch];
+			if (!c)
+				continue;
+			if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
+				return NOTIFY_BAD;
+		}
+		break;
+	case CPU_CLUSTER_PM_ENTER_FAILED:
+	case CPU_CLUSTER_PM_EXIT:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+/*
+ * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0.
+ * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for
+ * now. Context save seems to be only currently needed on omap3.
+ */
+static void omap_dma_context_save(struct omap_dmadev *od)
+{
+	od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0);
+	od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1);
+	od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
+	od->context.gcr = omap_dma_glbl_read(od, GCR);
+}
+
+static void omap_dma_context_restore(struct omap_dmadev *od)
+{
+	int i;
+
+	omap_dma_glbl_write(od, GCR, od->context.gcr);
+	omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig);
+	omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0);
+	omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1);
+
+	/* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */
+	if (od->plat->errata & DMA_ROMCODE_BUG)
+		omap_dma_glbl_write(od, IRQSTATUS_L0, 0);
+
+	/* Clear dma channels */
+	for (i = 0; i < od->lch_count; i++)
+		omap_dma_clear_lch(od, i);
+}
+
+/* Currently only used for omap3 */
+static int omap_dma_context_notifier(struct notifier_block *nb,
+				     unsigned long cmd, void *v)
+{
+	struct omap_dmadev *od;
+
+	od = container_of(nb, struct omap_dmadev, nb);
+
+	switch (cmd) {
+	case CPU_CLUSTER_PM_ENTER:
+		omap_dma_context_save(od);
+		break;
+	case CPU_CLUSTER_PM_ENTER_FAILED:
+	case CPU_CLUSTER_PM_EXIT:
+		omap_dma_context_restore(od);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate,
+			      int max_fifo_depth, int tparams)
+{
+	u32 val;
+
+	/* Set only for omap2430 and later */
+	if (!od->cfg->rw_priority)
+		return;
+
+	if (max_fifo_depth == 0)
+		max_fifo_depth = 1;
+	if (arb_rate == 0)
+		arb_rate = 1;
+
+	val = 0xff & max_fifo_depth;
+	val |= (0x3 & tparams) << 12;
+	val |= (arb_rate & 0xff) << 16;
+
+	omap_dma_glbl_write(od, GCR, val);
+}
+
 #define OMAP_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
+/*
+ * No flags currently set for default configuration as omap1 is still
+ * using platform data.
+ */
+static const struct omap_dma_config default_cfg;
+
 static int omap_dma_probe(struct platform_device *pdev)
 {
+	const struct omap_dma_config *conf;
 	struct omap_dmadev *od;
 	struct resource *res;
 	int rc, i, irq;
-	u32 lch_count;
+	u32 val;
 
 	od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
 	if (!od)
@@ -1473,9 +1654,21 @@ static int omap_dma_probe(struct platform_device *pdev)
 	if (IS_ERR(od->base))
 		return PTR_ERR(od->base);
 
-	od->plat = omap_get_plat_info();
-	if (!od->plat)
-		return -EPROBE_DEFER;
+	conf = of_device_get_match_data(&pdev->dev);
+	if (conf) {
+		od->cfg = conf;
+		od->plat = dev_get_platdata(&pdev->dev);
+		if (!od->plat) {
+			dev_err(&pdev->dev, "omap_system_dma_plat_info is missing");
+			return -ENODEV;
+		}
+	} else {
+		od->cfg = &default_cfg;
+
+		od->plat = omap_get_plat_info();
+		if (!od->plat)
+			return -EPROBE_DEFER;
+	}
 
 	od->reg_map = od->plat->reg_map;
 
@@ -1507,6 +1700,7 @@ static int omap_dma_probe(struct platform_device *pdev)
 	od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
 	od->ddev.dev = &pdev->dev;
 	INIT_LIST_HEAD(&od->ddev.channels);
+	mutex_init(&od->lch_lock);
 	spin_lock_init(&od->lock);
 	spin_lock_init(&od->irq_lock);
 
@@ -1522,18 +1716,30 @@ static int omap_dma_probe(struct platform_device *pdev)
 
 	/* Number of available logical channels */
 	if (!pdev->dev.of_node) {
-		lch_count = od->plat->dma_attr->lch_count;
-		if (unlikely(!lch_count))
-			lch_count = OMAP_SDMA_CHANNELS;
+		od->lch_count = od->plat->dma_attr->lch_count;
+		if (unlikely(!od->lch_count))
+			od->lch_count = OMAP_SDMA_CHANNELS;
 	} else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
-					&lch_count)) {
+					&od->lch_count)) {
 		dev_info(&pdev->dev,
 			 "Missing dma-channels property, using %u.\n",
 			 OMAP_SDMA_CHANNELS);
-		lch_count = OMAP_SDMA_CHANNELS;
+		od->lch_count = OMAP_SDMA_CHANNELS;
 	}
 
-	od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
+	/* Mask of allowed logical channels */
+	if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node,
+						       "dma-channel-mask",
+						       &val)) {
+		/* Tag channels not in mask as reserved */
+		val = ~val;
+		bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
+	}
+	if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
+		bitmap_set(od->lch_bitmap, 0, 2);
+
+	od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
+				   sizeof(*od->lch_map),
 				   GFP_KERNEL);
 	if (!od->lch_map)
 		return -ENOMEM;
@@ -1605,6 +1811,16 @@ static int omap_dma_probe(struct platform_device *pdev)
 		}
 	}
 
+	omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);
+
+	if (od->cfg->needs_busy_check) {
+		od->nb.notifier_call = omap_dma_busy_notifier;
+		cpu_pm_register_notifier(&od->nb);
+	} else if (od->cfg->may_lose_context) {
+		od->nb.notifier_call = omap_dma_context_notifier;
+		cpu_pm_register_notifier(&od->nb);
+	}
+
 	dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
 		 od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
 
@@ -1616,6 +1832,9 @@ static int omap_dma_remove(struct platform_device *pdev)
 	struct omap_dmadev *od = platform_get_drvdata(pdev);
 	int irq;
 
+	if (od->cfg->may_lose_context)
+		cpu_pm_unregister_notifier(&od->nb);
+
 	if (pdev->dev.of_node)
 		of_dma_controller_free(pdev->dev.of_node);
 
@@ -1637,12 +1856,45 @@ static int omap_dma_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct omap_dma_config omap2420_data = {
+	.lch_end = CCFN,
+	.rw_priority = true,
+	.needs_lch_clear = true,
+	.needs_busy_check = true,
+};
+
+static const struct omap_dma_config omap2430_data = {
+	.lch_end = CCFN,
+	.rw_priority = true,
+	.needs_lch_clear = true,
+};
+
+static const struct omap_dma_config omap3430_data = {
+	.lch_end = CCFN,
+	.rw_priority = true,
+	.needs_lch_clear = true,
+	.may_lose_context = true,
+};
+
+static const struct omap_dma_config omap3630_data = {
+	.lch_end = CCDN,
+	.rw_priority = true,
+	.needs_lch_clear = true,
+	.may_lose_context = true,
+};
+
+static const struct omap_dma_config omap4_data = {
+	.lch_end = CCDN,
+	.rw_priority = true,
+	.needs_lch_clear = true,
+};
+
 static const struct of_device_id omap_dma_match[] = {
-	{ .compatible = "ti,omap2420-sdma", },
-	{ .compatible = "ti,omap2430-sdma", },
-	{ .compatible = "ti,omap3430-sdma", },
-	{ .compatible = "ti,omap3630-sdma", },
-	{ .compatible = "ti,omap4430-sdma", },
+	{ .compatible = "ti,omap2420-sdma", .data = &omap2420_data, },
+	{ .compatible = "ti,omap2430-sdma", .data = &omap2430_data, },
+	{ .compatible = "ti,omap3430-sdma", .data = &omap3430_data, },
+	{ .compatible = "ti,omap3630-sdma", .data = &omap3630_data, },
+	{ .compatible = "ti,omap4430-sdma", .data = &omap4_data, },
 	{},
 };
 MODULE_DEVICE_TABLE(of, omap_dma_match);
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index e970134..7401733d 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -77,8 +77,6 @@ struct arizona_extcon_info {
 	const struct arizona_micd_range *micd_ranges;
 	int num_micd_ranges;
 
-	int micd_timeout;
-
 	bool micd_reva;
 	bool micd_clamp;
 
@@ -310,9 +308,13 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
 	}
 
 	if (info->micd_reva) {
-		regmap_write(arizona->regmap, 0x80, 0x3);
-		regmap_write(arizona->regmap, 0x294, 0);
-		regmap_write(arizona->regmap, 0x80, 0x0);
+		const struct reg_sequence reva[] = {
+			{ 0x80,  0x3 },
+			{ 0x294, 0x0 },
+			{ 0x80,  0x0 },
+		};
+
+		regmap_multi_reg_write(arizona->regmap, reva, ARRAY_SIZE(reva));
 	}
 
 	if (info->detecting && arizona->pdata.micd_software_compare)
@@ -361,9 +363,13 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
 	snd_soc_dapm_sync(dapm);
 
 	if (info->micd_reva) {
-		regmap_write(arizona->regmap, 0x80, 0x3);
-		regmap_write(arizona->regmap, 0x294, 2);
-		regmap_write(arizona->regmap, 0x80, 0x0);
+		const struct reg_sequence reva[] = {
+			{ 0x80,  0x3 },
+			{ 0x294, 0x2 },
+			{ 0x80,  0x0 },
+		};
+
+		regmap_multi_reg_write(arizona->regmap, reva, ARRAY_SIZE(reva));
 	}
 
 	ret = regulator_allow_bypass(info->micvdd, true);
@@ -527,67 +533,65 @@ static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading,
 	struct arizona *arizona = info->arizona;
 	int id_gpio = arizona->pdata.hpdet_id_gpio;
 
+	if (!arizona->pdata.hpdet_acc_id)
+		return 0;
+
 	/*
 	 * If we're using HPDET for accessory identification we need
 	 * to take multiple measurements, step through them in sequence.
 	 */
-	if (arizona->pdata.hpdet_acc_id) {
-		info->hpdet_res[info->num_hpdet_res++] = *reading;
+	info->hpdet_res[info->num_hpdet_res++] = *reading;
 
-		/* Only check the mic directly if we didn't already ID it */
-		if (id_gpio && info->num_hpdet_res == 1) {
-			dev_dbg(arizona->dev, "Measuring mic\n");
+	/* Only check the mic directly if we didn't already ID it */
+	if (id_gpio && info->num_hpdet_res == 1) {
+		dev_dbg(arizona->dev, "Measuring mic\n");
 
-			regmap_update_bits(arizona->regmap,
-					   ARIZONA_ACCESSORY_DETECT_MODE_1,
-					   ARIZONA_ACCDET_MODE_MASK |
-					   ARIZONA_ACCDET_SRC,
-					   ARIZONA_ACCDET_MODE_HPR |
-					   info->micd_modes[0].src);
-
-			gpio_set_value_cansleep(id_gpio, 1);
-
-			regmap_update_bits(arizona->regmap,
-					   ARIZONA_HEADPHONE_DETECT_1,
-					   ARIZONA_HP_POLL, ARIZONA_HP_POLL);
-			return -EAGAIN;
-		}
-
-		/* OK, got both.  Now, compare... */
-		dev_dbg(arizona->dev, "HPDET measured %d %d\n",
-			info->hpdet_res[0], info->hpdet_res[1]);
-
-		/* Take the headphone impedance for the main report */
-		*reading = info->hpdet_res[0];
-
-		/* Sometimes we get false readings due to slow insert */
-		if (*reading >= ARIZONA_HPDET_MAX && !info->hpdet_retried) {
-			dev_dbg(arizona->dev, "Retrying high impedance\n");
-			info->num_hpdet_res = 0;
-			info->hpdet_retried = true;
-			arizona_start_hpdet_acc_id(info);
-			pm_runtime_put(info->dev);
-			return -EAGAIN;
-		}
-
-		/*
-		 * If we measure the mic as high impedance
-		 */
-		if (!id_gpio || info->hpdet_res[1] > 50) {
-			dev_dbg(arizona->dev, "Detected mic\n");
-			*mic = true;
-			info->detecting = true;
-		} else {
-			dev_dbg(arizona->dev, "Detected headphone\n");
-		}
-
-		/* Make sure everything is reset back to the real polarity */
 		regmap_update_bits(arizona->regmap,
 				   ARIZONA_ACCESSORY_DETECT_MODE_1,
+				   ARIZONA_ACCDET_MODE_MASK |
 				   ARIZONA_ACCDET_SRC,
+				   ARIZONA_ACCDET_MODE_HPR |
 				   info->micd_modes[0].src);
+
+		gpio_set_value_cansleep(id_gpio, 1);
+
+		regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+				   ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+		return -EAGAIN;
 	}
 
+	/* OK, got both.  Now, compare... */
+	dev_dbg(arizona->dev, "HPDET measured %d %d\n",
+		info->hpdet_res[0], info->hpdet_res[1]);
+
+	/* Take the headphone impedance for the main report */
+	*reading = info->hpdet_res[0];
+
+	/* Sometimes we get false readings due to slow insert */
+	if (*reading >= ARIZONA_HPDET_MAX && !info->hpdet_retried) {
+		dev_dbg(arizona->dev, "Retrying high impedance\n");
+		info->num_hpdet_res = 0;
+		info->hpdet_retried = true;
+		arizona_start_hpdet_acc_id(info);
+		pm_runtime_put(info->dev);
+		return -EAGAIN;
+	}
+
+	/*
+	 * If we measure the mic as high impedance
+	 */
+	if (!id_gpio || info->hpdet_res[1] > 50) {
+		dev_dbg(arizona->dev, "Detected mic\n");
+		*mic = true;
+		info->detecting = true;
+	} else {
+		dev_dbg(arizona->dev, "Detected headphone\n");
+	}
+
+	/* Make sure everything is reset back to the real polarity */
+	regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+			   ARIZONA_ACCDET_SRC, info->micd_modes[0].src);
+
 	return 0;
 }
 
@@ -662,11 +666,6 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
 	if (id_gpio)
 		gpio_set_value_cansleep(id_gpio, 0);
 
-	/* Revert back to MICDET mode */
-	regmap_update_bits(arizona->regmap,
-			   ARIZONA_ACCESSORY_DETECT_MODE_1,
-			   ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
-
 	/* If we have a mic then reenable MICDET */
 	if (mic || info->mic)
 		arizona_start_mic(info);
@@ -699,8 +698,7 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
 
 	info->hpdet_active = true;
 
-	if (info->mic)
-		arizona_stop_mic(info);
+	arizona_stop_mic(info);
 
 	arizona_extcon_hp_clamp(info, true);
 
@@ -724,8 +722,8 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
 	return;
 
 err:
-	regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
-			   ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+	arizona_extcon_hp_clamp(info, false);
+	pm_runtime_put_autosuspend(info->dev);
 
 	/* Just report headphone */
 	ret = extcon_set_state_sync(info->edev, EXTCON_JACK_HEADPHONE, true);
@@ -781,9 +779,6 @@ static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
 	return;
 
 err:
-	regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
-			   ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
-
 	/* Just report headphone */
 	ret = extcon_set_state_sync(info->edev, EXTCON_JACK_HEADPHONE, true);
 	if (ret != 0)
@@ -806,75 +801,58 @@ static void arizona_micd_timeout_work(struct work_struct *work)
 
 	arizona_identify_headphone(info);
 
-	arizona_stop_mic(info);
-
 	mutex_unlock(&info->lock);
 }
 
-static void arizona_micd_detect(struct work_struct *work)
+static int arizona_micd_adc_read(struct arizona_extcon_info *info)
 {
-	struct arizona_extcon_info *info = container_of(work,
-						struct arizona_extcon_info,
-						micd_detect_work.work);
 	struct arizona *arizona = info->arizona;
-	unsigned int val = 0, lvl;
-	int ret, i, key;
+	unsigned int val;
+	int ret;
 
-	cancel_delayed_work_sync(&info->micd_timeout_work);
+	/* Must disable MICD before we read the ADCVAL */
+	regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+			   ARIZONA_MICD_ENA, 0);
 
-	mutex_lock(&info->lock);
-
-	/* If the cable was removed while measuring ignore the result */
-	ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
-	if (ret < 0) {
-		dev_err(arizona->dev, "Failed to check cable state: %d\n",
-				ret);
-		mutex_unlock(&info->lock);
-		return;
-	} else if (!ret) {
-		dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
-		mutex_unlock(&info->lock);
-		return;
+	ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_4, &val);
+	if (ret != 0) {
+		dev_err(arizona->dev,
+			"Failed to read MICDET_ADCVAL: %d\n", ret);
+		return ret;
 	}
 
-	if (info->detecting && arizona->pdata.micd_software_compare) {
-		/* Must disable MICD before we read the ADCVAL */
-		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
-				   ARIZONA_MICD_ENA, 0);
-		ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_4, &val);
-		if (ret != 0) {
-			dev_err(arizona->dev,
-				"Failed to read MICDET_ADCVAL: %d\n",
-				ret);
-			mutex_unlock(&info->lock);
-			return;
-		}
+	dev_dbg(arizona->dev, "MICDET_ADCVAL: %x\n", val);
 
-		dev_dbg(arizona->dev, "MICDET_ADCVAL: %x\n", val);
+	val &= ARIZONA_MICDET_ADCVAL_MASK;
+	if (val < ARRAY_SIZE(arizona_micd_levels))
+		val = arizona_micd_levels[val];
+	else
+		val = INT_MAX;
 
-		val &= ARIZONA_MICDET_ADCVAL_MASK;
-		if (val < ARRAY_SIZE(arizona_micd_levels))
-			val = arizona_micd_levels[val];
-		else
-			val = INT_MAX;
+	if (val <= QUICK_HEADPHONE_MAX_OHM)
+		val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_0;
+	else if (val <= MICROPHONE_MIN_OHM)
+		val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_1;
+	else if (val <= MICROPHONE_MAX_OHM)
+		val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_8;
+	else
+		val = ARIZONA_MICD_LVL_8;
 
-		if (val <= QUICK_HEADPHONE_MAX_OHM)
-			val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_0;
-		else if (val <= MICROPHONE_MIN_OHM)
-			val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_1;
-		else if (val <= MICROPHONE_MAX_OHM)
-			val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_8;
-		else
-			val = ARIZONA_MICD_LVL_8;
-	}
+	return val;
+}
+
+static int arizona_micd_read(struct arizona_extcon_info *info)
+{
+	struct arizona *arizona = info->arizona;
+	unsigned int val = 0;
+	int ret, i;
 
 	for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) {
 		ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
 		if (ret != 0) {
 			dev_err(arizona->dev,
 				"Failed to read MICDET: %d\n", ret);
-			mutex_unlock(&info->lock);
-			return;
+			return ret;
 		}
 
 		dev_dbg(arizona->dev, "MICDET: %x\n", val);
@@ -882,29 +860,44 @@ static void arizona_micd_detect(struct work_struct *work)
 		if (!(val & ARIZONA_MICD_VALID)) {
 			dev_warn(arizona->dev,
 				 "Microphone detection state invalid\n");
-			mutex_unlock(&info->lock);
-			return;
+			return -EINVAL;
 		}
 	}
 
 	if (i == 10 && !(val & MICD_LVL_0_TO_8)) {
 		dev_err(arizona->dev, "Failed to get valid MICDET value\n");
-		mutex_unlock(&info->lock);
-		return;
+		return -EINVAL;
 	}
 
+	return val;
+}
+
+static int arizona_micdet_reading(void *priv)
+{
+	struct arizona_extcon_info *info = priv;
+	struct arizona *arizona = info->arizona;
+	int ret, val;
+
+	if (info->detecting && arizona->pdata.micd_software_compare)
+		ret = arizona_micd_adc_read(info);
+	else
+		ret = arizona_micd_read(info);
+	if (ret < 0)
+		return ret;
+
+	val = ret;
+
 	/* Due to jack detect this should never happen */
 	if (!(val & ARIZONA_MICD_STS)) {
 		dev_warn(arizona->dev, "Detected open circuit\n");
 		info->mic = false;
-		arizona_stop_mic(info);
 		info->detecting = false;
 		arizona_identify_headphone(info);
-		goto handled;
+		return 0;
 	}
 
 	/* If we got a high impedence we should have a headset, report it. */
-	if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
+	if (val & ARIZONA_MICD_LVL_8) {
 		info->mic = true;
 		info->detecting = false;
 
@@ -923,7 +916,7 @@ static void arizona_micd_detect(struct work_struct *work)
 				ret);
 		}
 
-		goto handled;
+		return 0;
 	}
 
 	/* If we detected a lower impedence during initial startup
@@ -932,15 +925,13 @@ static void arizona_micd_detect(struct work_struct *work)
 	 * plain headphones.  If both polarities report a low
 	 * impedence then give up and report headphones.
 	 */
-	if (info->detecting && (val & MICD_LVL_1_TO_7)) {
+	if (val & MICD_LVL_1_TO_7) {
 		if (info->jack_flips >= info->micd_num_modes * 10) {
 			dev_dbg(arizona->dev, "Detected HP/line\n");
 
 			info->detecting = false;
 
 			arizona_identify_headphone(info);
-
-			arizona_stop_mic(info);
 		} else {
 			info->micd_mode++;
 			if (info->micd_mode == info->micd_num_modes)
@@ -948,13 +939,45 @@ static void arizona_micd_detect(struct work_struct *work)
 			arizona_extcon_set_mode(info, info->micd_mode);
 
 			info->jack_flips++;
+
+			if (arizona->pdata.micd_software_compare)
+				regmap_update_bits(arizona->regmap,
+						   ARIZONA_MIC_DETECT_1,
+						   ARIZONA_MICD_ENA,
+						   ARIZONA_MICD_ENA);
+
+			queue_delayed_work(system_power_efficient_wq,
+					   &info->micd_timeout_work,
+					   msecs_to_jiffies(arizona->pdata.micd_timeout));
 		}
 
-		goto handled;
+		return 0;
 	}
 
 	/*
 	 * If we're still detecting and we detect a short then we've
+	 * got a headphone.
+	 */
+	dev_dbg(arizona->dev, "Headphone detected\n");
+	info->detecting = false;
+
+	arizona_identify_headphone(info);
+
+	return 0;
+}
+
+static int arizona_button_reading(void *priv)
+{
+	struct arizona_extcon_info *info = priv;
+	struct arizona *arizona = info->arizona;
+	int val, key, lvl, i;
+
+	val = arizona_micd_read(info);
+	if (val < 0)
+		return val;
+
+	/*
+	 * If we're still detecting and we detect a short then we've
 	 * got a headphone.  Otherwise it's a button press.
 	 */
 	if (val & MICD_LVL_0_TO_7) {
@@ -968,20 +991,13 @@ static void arizona_micd_detect(struct work_struct *work)
 				input_report_key(info->input,
 						 info->micd_ranges[i].key, 0);
 
-			WARN_ON(!lvl);
-			WARN_ON(ffs(lvl) - 1 >= info->num_micd_ranges);
 			if (lvl && ffs(lvl) - 1 < info->num_micd_ranges) {
 				key = info->micd_ranges[ffs(lvl) - 1].key;
 				input_report_key(info->input, key, 1);
 				input_sync(info->input);
+			} else {
+				dev_err(arizona->dev, "Button out of range\n");
 			}
-
-		} else if (info->detecting) {
-			dev_dbg(arizona->dev, "Headphone detected\n");
-			info->detecting = false;
-			arizona_stop_mic(info);
-
-			arizona_identify_headphone(info);
 		} else {
 			dev_warn(arizona->dev, "Button with no mic: %x\n",
 				 val);
@@ -995,19 +1011,39 @@ static void arizona_micd_detect(struct work_struct *work)
 		arizona_extcon_pulse_micbias(info);
 	}
 
-handled:
-	if (info->detecting) {
-		if (arizona->pdata.micd_software_compare)
-			regmap_update_bits(arizona->regmap,
-					   ARIZONA_MIC_DETECT_1,
-					   ARIZONA_MICD_ENA,
-					   ARIZONA_MICD_ENA);
+	return 0;
+}
 
-		queue_delayed_work(system_power_efficient_wq,
-				   &info->micd_timeout_work,
-				   msecs_to_jiffies(info->micd_timeout));
+static void arizona_micd_detect(struct work_struct *work)
+{
+	struct arizona_extcon_info *info = container_of(work,
+						struct arizona_extcon_info,
+						micd_detect_work.work);
+	struct arizona *arizona = info->arizona;
+	int ret;
+
+	cancel_delayed_work_sync(&info->micd_timeout_work);
+
+	mutex_lock(&info->lock);
+
+	/* If the cable was removed while measuring ignore the result */
+	ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
+	if (ret < 0) {
+		dev_err(arizona->dev, "Failed to check cable state: %d\n",
+				ret);
+		mutex_unlock(&info->lock);
+		return;
+	} else if (!ret) {
+		dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
+		mutex_unlock(&info->lock);
+		return;
 	}
 
+	if (info->detecting)
+		arizona_micdet_reading(info);
+	else
+		arizona_button_reading(info);
+
 	pm_runtime_mark_last_busy(info->dev);
 	mutex_unlock(&info->lock);
 }
@@ -1125,7 +1161,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
 					   msecs_to_jiffies(HPDET_DEBOUNCE));
 
 		if (cancelled_mic) {
-			int micd_timeout = info->micd_timeout;
+			int micd_timeout = arizona->pdata.micd_timeout;
 
 			queue_delayed_work(system_power_efficient_wq,
 					   &info->micd_timeout_work,
@@ -1145,11 +1181,11 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
 			dev_err(arizona->dev, "Mechanical report failed: %d\n",
 				ret);
 
-		if (!arizona->pdata.hpdet_acc_id) {
-			info->detecting = true;
-			info->mic = false;
-			info->jack_flips = 0;
+		info->detecting = true;
+		info->mic = false;
+		info->jack_flips = 0;
 
+		if (!arizona->pdata.hpdet_acc_id) {
 			arizona_start_mic(info);
 		} else {
 			queue_delayed_work(system_power_efficient_wq,
@@ -1202,11 +1238,6 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
 				   ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB);
 	}
 
-	if (arizona->pdata.micd_timeout)
-		info->micd_timeout = arizona->pdata.micd_timeout;
-	else
-		info->micd_timeout = DEFAULT_MICD_TIMEOUT;
-
 out:
 	/* Clear trig_sts to make sure DCVDD is not forced up */
 	regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
@@ -1435,6 +1466,9 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 	info->input->name = "Headset";
 	info->input->phys = "arizona/extcon";
 
+	if (!pdata->micd_timeout)
+		pdata->micd_timeout = DEFAULT_MICD_TIMEOUT;
+
 	if (pdata->num_micd_configs) {
 		info->micd_modes = pdata->micd_configs;
 		info->micd_num_modes = pdata->num_micd_configs;
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index bcf65aa..106d4da 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -249,7 +249,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
 		dev_err(info->dev, "Unknown DM_CON/DP_CON switch type (%d)\n",
 				con_sw);
 		return -EINVAL;
-	};
+	}
 
 	switch (vbus_sw) {
 	case VBUSIN_SWITCH_OPEN:
@@ -268,7 +268,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
 	default:
 		dev_err(info->dev, "Unknown VBUS switch type (%d)\n", vbus_sw);
 		return -EINVAL;
-	};
+	}
 
 	return 0;
 }
@@ -357,13 +357,13 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
 				"cannot identify the cable type: adc(0x%x)\n",
 				adc);
 			return -EINVAL;
-		};
+		}
 		break;
 	default:
 		dev_err(info->dev,
 			"failed to identify the cable type: adc(0x%x)\n", adc);
 		return -EINVAL;
-	};
+	}
 
 	return cable_type;
 }
@@ -405,7 +405,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
 		dev_dbg(info->dev,
 			"cannot handle this cable_type (0x%x)\n", cable_type);
 		return 0;
-	};
+	}
 
 	/* Change internal hardware path(DM_CON/DP_CON, VBUSIN) */
 	ret = sm5502_muic_set_path(info, con_sw, vbus_sw, attached);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index e40a77b..ea869ad 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -239,14 +239,6 @@
 	depends on ARM || ARM64
 	select RESET_CONTROLLER
 
-config QCOM_SCM_32
-	def_bool y
-	depends on QCOM_SCM && ARM
-
-config QCOM_SCM_64
-	def_bool y
-	depends on QCOM_SCM && ARM64
-
 config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
 	bool "Qualcomm download mode enabled by default"
 	depends on QCOM_SCM
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 3fcb919..e9fb838 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -17,10 +17,7 @@
 obj-$(CONFIG_FIRMWARE_MEMMAP)	+= memmap.o
 obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
 obj-$(CONFIG_FW_CFG_SYSFS)	+= qemu_fw_cfg.o
-obj-$(CONFIG_QCOM_SCM)		+= qcom_scm.o
-obj-$(CONFIG_QCOM_SCM_64)	+= qcom_scm-64.o
-obj-$(CONFIG_QCOM_SCM_32)	+= qcom_scm-32.o
-CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
+obj-$(CONFIG_QCOM_SCM)		+= qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
 obj-$(CONFIG_TI_SCI_PROTOCOL)	+= ti_sci.o
 obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
 obj-$(CONFIG_TURRIS_MOX_RWTM)	+= turris-mox-rwtm.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 7a30952..db55c43 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -28,8 +28,12 @@ scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
 		return NULL;
 
 	for (; id->protocol_id; id++)
-		if (id->protocol_id == scmi_dev->protocol_id)
-			return id;
+		if (id->protocol_id == scmi_dev->protocol_id) {
+			if (!id->name)
+				return id;
+			else if (!strcmp(id->name, scmi_dev->name))
+				return id;
+		}
 
 	return NULL;
 }
@@ -56,6 +60,11 @@ static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
 	return fn(handle);
 }
 
+static int scmi_protocol_dummy_init(struct scmi_handle *handle)
+{
+	return 0;
+}
+
 static int scmi_dev_probe(struct device *dev)
 {
 	struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
@@ -74,6 +83,10 @@ static int scmi_dev_probe(struct device *dev)
 	if (ret)
 		return ret;
 
+	/* Skip protocol initialisation for additional devices */
+	idr_replace(&scmi_protocols, &scmi_protocol_dummy_init,
+		    scmi_dev->protocol_id);
+
 	return scmi_drv->probe(scmi_dev);
 }
 
@@ -125,7 +138,8 @@ static void scmi_device_release(struct device *dev)
 }
 
 struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol)
+scmi_device_create(struct device_node *np, struct device *parent, int protocol,
+		   const char *name)
 {
 	int id, retval;
 	struct scmi_device *scmi_dev;
@@ -134,8 +148,15 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
 	if (!scmi_dev)
 		return NULL;
 
+	scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL);
+	if (!scmi_dev->name) {
+		kfree(scmi_dev);
+		return NULL;
+	}
+
 	id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
 	if (id < 0) {
+		kfree_const(scmi_dev->name);
 		kfree(scmi_dev);
 		return NULL;
 	}
@@ -154,6 +175,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
 
 	return scmi_dev;
 put_dev:
+	kfree_const(scmi_dev->name);
 	put_device(&scmi_dev->dev);
 	ida_simple_remove(&scmi_bus_id, id);
 	return NULL;
@@ -161,6 +183,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
 
 void scmi_device_destroy(struct scmi_device *scmi_dev)
 {
+	kfree_const(scmi_dev->name);
 	scmi_handle_put(scmi_dev->handle);
 	ida_simple_remove(&scmi_bus_id, scmi_dev->id);
 	device_unregister(&scmi_dev->dev);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 32526a7..4c22276 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -65,6 +65,7 @@ struct scmi_clock_set_rate {
 };
 
 struct clock_info {
+	u32 version;
 	int num_clocks;
 	int max_async_req;
 	atomic_t cur_async_req;
@@ -340,6 +341,7 @@ static int scmi_clock_protocol_init(struct scmi_handle *handle)
 			scmi_clock_describe_rates_get(handle, clkid, clk);
 	}
 
+	cinfo->version = version;
 	handle->clk_ops = &clk_ops;
 	handle->clk_priv = cinfo;
 
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 5237c2ff..df35358 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -81,6 +81,7 @@ struct scmi_msg {
 /**
  * struct scmi_xfer - Structure representing a message flow
  *
+ * @transfer_id: Unique ID for debug & profiling purpose
  * @hdr: Transmit message header
  * @tx: Transmit message
  * @rx: Receive message, the buffer should be pre-allocated to store
@@ -90,6 +91,7 @@ struct scmi_msg {
  * @async: pointer to delayed response message received event completion
  */
 struct scmi_xfer {
+	int transfer_id;
 	struct scmi_msg_hdr hdr;
 	struct scmi_msg tx;
 	struct scmi_msg rx;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 3eb0382..2c96f6b 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -29,6 +29,9 @@
 
 #include "common.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/scmi.h>
+
 #define MSG_ID_MASK		GENMASK(7, 0)
 #define MSG_XTRACT_ID(hdr)	FIELD_GET(MSG_ID_MASK, (hdr))
 #define MSG_TYPE_MASK		GENMASK(9, 8)
@@ -61,6 +64,8 @@ enum scmi_error_codes {
 static LIST_HEAD(scmi_list);
 /* Protection for the entire list */
 static DEFINE_MUTEX(scmi_list_mutex);
+/* Track the unique id for the transfers for debug & profiling purpose */
+static atomic_t transfer_last_id;
 
 /**
  * struct scmi_xfers_info - Structure to manage transfer information
@@ -304,6 +309,7 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
 	xfer = &minfo->xfer_block[xfer_id];
 	xfer->hdr.seq = xfer_id;
 	reinit_completion(&xfer->done);
+	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
 
 	return xfer;
 }
@@ -374,6 +380,10 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
 
 	scmi_fetch_response(xfer, mem);
 
+	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+			   xfer->hdr.protocol_id, xfer->hdr.seq,
+			   msg_type);
+
 	if (msg_type == MSG_TYPE_DELAYED_RESP)
 		complete(xfer->async_done);
 	else
@@ -439,6 +449,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
 	if (unlikely(!cinfo))
 		return -EINVAL;
 
+	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+			      xfer->hdr.protocol_id, xfer->hdr.seq,
+			      xfer->hdr.poll_completion);
+
 	ret = mbox_send_message(cinfo->chan, xfer);
 	if (ret < 0) {
 		dev_dbg(dev, "mbox send fail %d\n", ret);
@@ -478,6 +492,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
 	 */
 	mbox_client_txdone(cinfo->chan, ret);
 
+	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+			    xfer->hdr.protocol_id, xfer->hdr.seq,
+			    xfer->hdr.status);
+
 	return ret;
 }
 
@@ -735,6 +753,11 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
 	idx = tx ? 0 : 1;
 	idr = tx ? &info->tx_idr : &info->rx_idr;
 
+	/* check if already allocated, used for multiple device per protocol */
+	cinfo = idr_find(idr, prot_id);
+	if (cinfo)
+		return 0;
+
 	if (scmi_mailbox_check(np, idx)) {
 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
@@ -803,11 +826,11 @@ scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
 
 static inline void
 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
-			    int prot_id)
+			    int prot_id, const char *name)
 {
 	struct scmi_device *sdev;
 
-	sdev = scmi_device_create(np, info->dev, prot_id);
+	sdev = scmi_device_create(np, info->dev, prot_id, name);
 	if (!sdev) {
 		dev_err(info->dev, "failed to create %d protocol device\n",
 			prot_id);
@@ -824,6 +847,40 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
 	scmi_set_handle(sdev);
 }
 
+#define MAX_SCMI_DEV_PER_PROTOCOL	2
+struct scmi_prot_devnames {
+	int protocol_id;
+	char *names[MAX_SCMI_DEV_PER_PROTOCOL];
+};
+
+static struct scmi_prot_devnames devnames[] = {
+	{ SCMI_PROTOCOL_POWER,  { "genpd" },},
+	{ SCMI_PROTOCOL_PERF,   { "cpufreq" },},
+	{ SCMI_PROTOCOL_CLOCK,  { "clocks" },},
+	{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
+	{ SCMI_PROTOCOL_RESET,  { "reset" },},
+};
+
+static inline void
+scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
+			     int prot_id)
+{
+	int loop, cnt;
+
+	for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
+		if (devnames[loop].protocol_id != prot_id)
+			continue;
+
+		for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
+			const char *name = devnames[loop].names[cnt];
+
+			if (name)
+				scmi_create_protocol_device(np, info, prot_id,
+							    name);
+		}
+	}
+}
+
 static int scmi_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -892,7 +949,7 @@ static int scmi_probe(struct platform_device *pdev)
 			continue;
 		}
 
-		scmi_create_protocol_device(child, info, prot_id);
+		scmi_create_protocol_devices(child, info, prot_id);
 	}
 
 	return 0;
@@ -940,6 +997,52 @@ static int scmi_remove(struct platform_device *pdev)
 	return ret;
 }
 
+static ssize_t protocol_version_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct scmi_info *info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u.%u\n", info->version.major_ver,
+		       info->version.minor_ver);
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct scmi_info *info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "0x%x\n", info->version.impl_ver);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t vendor_id_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct scmi_info *info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", info->version.vendor_id);
+}
+static DEVICE_ATTR_RO(vendor_id);
+
+static ssize_t sub_vendor_id_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct scmi_info *info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
+}
+static DEVICE_ATTR_RO(sub_vendor_id);
+
+static struct attribute *versions_attrs[] = {
+	&dev_attr_firmware_version.attr,
+	&dev_attr_protocol_version.attr,
+	&dev_attr_vendor_id.attr,
+	&dev_attr_sub_vendor_id.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(versions);
+
 static const struct scmi_desc scmi_generic_desc = {
 	.max_rx_timeout_ms = 30,	/* We may increase this if required */
 	.max_msg = 20,		/* Limited by MBOX_TX_QUEUE_LEN */
@@ -958,6 +1061,7 @@ static struct platform_driver scmi_driver = {
 	.driver = {
 		   .name = "arm-scmi",
 		   .of_match_table = scmi_of_match,
+		   .dev_groups = versions_groups,
 		   },
 	.probe = scmi_probe,
 	.remove = scmi_remove,
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 601af4e..ec81e6f 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -145,6 +145,7 @@ struct perf_dom_info {
 };
 
 struct scmi_perf_info {
+	u32 version;
 	int num_domains;
 	bool power_scale_mw;
 	u64 stats_addr;
@@ -736,6 +737,7 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
 			scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
 	}
 
+	pinfo->version = version;
 	handle->perf_ops = &perf_ops;
 	handle->perf_priv = pinfo;
 
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 5abef70..214886c 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -50,6 +50,7 @@ struct power_dom_info {
 };
 
 struct scmi_power_info {
+	u32 version;
 	int num_domains;
 	u64 stats_addr;
 	u32 stats_size;
@@ -207,6 +208,7 @@ static int scmi_power_protocol_init(struct scmi_handle *handle)
 		scmi_power_domain_attributes_get(handle, domain, dom);
 	}
 
+	pinfo->version = version;
 	handle->power_ops = &power_ops;
 	handle->power_priv = pinfo;
 
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index ab42c21..de73054 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -48,6 +48,7 @@ struct reset_dom_info {
 };
 
 struct scmi_reset_info {
+	u32 version;
 	int num_domains;
 	struct reset_dom_info *dom_info;
 };
@@ -217,6 +218,7 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle)
 		scmi_reset_domain_attributes_get(handle, domain, dom);
 	}
 
+	pinfo->version = version;
 	handle->reset_ops = &reset_ops;
 	handle->reset_priv = pinfo;
 
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 87f737e..bafbfe3 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -112,7 +112,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
 }
 
 static const struct scmi_device_id scmi_id_table[] = {
-	{ SCMI_PROTOCOL_POWER },
+	{ SCMI_PROTOCOL_POWER, "genpd" },
 	{ },
 };
 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index a400ea8..eba61b9 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -68,6 +68,7 @@ struct scmi_msg_sensor_reading_get {
 };
 
 struct sensors_info {
+	u32 version;
 	int num_sensors;
 	int max_requests;
 	u64 reg_addr;
@@ -294,6 +295,7 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
 
 	scmi_sensor_description_get(handle, sinfo);
 
+	sinfo->version = version;
 	handle->sensor_ops = &sensor_ops;
 	handle->sensor_priv = sinfo;
 
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 899b803..9dda260 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -27,7 +27,7 @@
 
 extern u64 efi_system_table;
 
-#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64)
 #include <asm/ptdump.h>
 
 static struct ptdump_info efi_ptdump_info = {
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 8d132e4..0205987 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -163,8 +163,15 @@ static int coreboot_table_probe(struct platform_device *pdev)
 	return ret;
 }
 
+static int __cb_dev_unregister(struct device *dev, void *dummy)
+{
+	device_unregister(dev);
+	return 0;
+}
+
 static int coreboot_table_remove(struct platform_device *pdev)
 {
+	bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
 	bus_unregister(&coreboot_bus_type);
 	return 0;
 }
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index edaa4e5..5b2011e 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -76,6 +76,7 @@
 #define GSMI_CMD_LOG_S0IX_RESUME	0x0b
 #define GSMI_CMD_CLEAR_CONFIG		0x20
 #define GSMI_CMD_HANDSHAKE_TYPE		0xC1
+#define GSMI_CMD_RESERVED		0xff
 
 /* Magic entry type for kernel events */
 #define GSMI_LOG_ENTRY_TYPE_KERNEL     0xDEAD
@@ -746,6 +747,7 @@ MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
 static __init int gsmi_system_valid(void)
 {
 	u32 hash;
+	u16 cmd, result;
 
 	if (!dmi_check_system(gsmi_dmi_table))
 		return -ENODEV;
@@ -780,6 +782,23 @@ static __init int gsmi_system_valid(void)
 		return -ENODEV;
 	}
 
+	/* Test the smihandler with a bogus command. If it leaves the
+	 * calling argument in %ax untouched, there is no handler for
+	 * GSMI commands.
+	 */
+	cmd = GSMI_CALLBACK | GSMI_CMD_RESERVED << 8;
+	asm volatile (
+		"outb %%al, %%dx\n\t"
+		: "=a" (result)
+		: "0" (cmd),
+		  "d" (acpi_gbl_FADT.smi_command)
+		: "memory", "cc"
+		);
+	if (cmd == result) {
+		pr_info("gsmi: no gsmi handler in firmware\n");
+		return -ENODEV;
+	}
+
 	/* Found */
 	return 0;
 }
@@ -1016,6 +1035,9 @@ static __init int gsmi_init(void)
 	dma_pool_destroy(gsmi_dev.dma_pool);
 	platform_device_unregister(gsmi_dev.pdev);
 	pr_info("gsmi: failed to load: %d\n", ret);
+#ifdef CONFIG_PM
+	platform_driver_unregister(&gsmi_driver_info);
+#endif
 	return ret;
 }
 
@@ -1037,6 +1059,9 @@ static void __exit gsmi_exit(void)
 	gsmi_buf_free(gsmi_dev.name_buf);
 	dma_pool_destroy(gsmi_dev.dma_pool);
 	platform_device_unregister(gsmi_dev.pdev);
+#ifdef CONFIG_PM
+	platform_driver_unregister(&gsmi_driver_info);
+#endif
 }
 
 module_init(gsmi_init);
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 0dbee32..1d2e5b8 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config IMX_DSP
-	bool "IMX DSP Protocol driver"
+	tristate "IMX DSP Protocol driver"
 	depends on IMX_MBOX
 	help
 	  This enables DSP IPC protocol between host AP (Linux)
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 7e12cbd..96758b7 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -104,6 +104,7 @@ struct ibft_control {
 	u16 tgt0_off;
 	u16 nic1_off;
 	u16 tgt1_off;
+	u16 expansion[0];
 } __attribute__((__packed__));
 
 struct ibft_initiator {
@@ -235,7 +236,7 @@ static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
 				"found %d instead!\n", t, id, hdr->id);
 		return -ENODEV;
 	}
-	if (hdr->length != length) {
+	if (length && hdr->length != length) {
 		printk(KERN_ERR "iBFT error: We expected the %s " \
 				"field header.length to have %d but " \
 				"found %d instead!\n", t, length, hdr->length);
@@ -749,16 +750,16 @@ static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
 	control = (void *)header + sizeof(*header);
 	end = (void *)control + control->hdr.length;
 	eot_offset = (void *)header + header->header.length - (void *)control;
-	rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control,
-			     sizeof(*control));
+	rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, 0);
 
 	/* iBFT table safety checking */
 	rc |= ((control->hdr.index) ? -ENODEV : 0);
+	rc |= ((control->hdr.length < sizeof(*control)) ? -ENODEV : 0);
 	if (rc) {
 		printk(KERN_ERR "iBFT error: Control header is invalid!\n");
 		return rc;
 	}
-	for (ptr = &control->initiator_off; ptr < end; ptr += sizeof(u16)) {
+	for (ptr = &control->initiator_off; ptr + sizeof(u16) <= end; ptr += sizeof(u16)) {
 		offset = *(u16 *)ptr;
 		if (offset && offset < header->header.length &&
 						offset < eot_offset) {
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index b3b6c15..2937d44 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -97,7 +97,7 @@ static inline bool psci_has_ext_power_state(void)
 				PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
 }
 
-static inline bool psci_has_osi_support(void)
+bool psci_has_osi_support(void)
 {
 	return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
 }
@@ -162,6 +162,15 @@ static u32 psci_get_version(void)
 	return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
 }
 
+int psci_set_osi_mode(void)
+{
+	int err;
+
+	err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE,
+			     PSCI_1_0_SUSPEND_MODE_OSI, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
 static int psci_cpu_suspend(u32 state, unsigned long entry_point)
 {
 	int err;
@@ -544,9 +553,14 @@ static int __init psci_1_0_init(struct device_node *np)
 	if (err)
 		return err;
 
-	if (psci_has_osi_support())
+	if (psci_has_osi_support()) {
 		pr_info("OSI mode supported.\n");
 
+		/* Default to PC mode. */
+		invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE,
+			       PSCI_1_0_SUSPEND_MODE_PC, 0, 0);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
deleted file mode 100644
index 48e2ef7..0000000
--- a/drivers/firmware/qcom_scm-32.c
+++ /dev/null
@@ -1,671 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015 Linaro Ltd.
- */
-
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/qcom_scm.h>
-#include <linux/dma-mapping.h>
-
-#include "qcom_scm.h"
-
-#define QCOM_SCM_FLAG_COLDBOOT_CPU0	0x00
-#define QCOM_SCM_FLAG_COLDBOOT_CPU1	0x01
-#define QCOM_SCM_FLAG_COLDBOOT_CPU2	0x08
-#define QCOM_SCM_FLAG_COLDBOOT_CPU3	0x20
-
-#define QCOM_SCM_FLAG_WARMBOOT_CPU0	0x04
-#define QCOM_SCM_FLAG_WARMBOOT_CPU1	0x02
-#define QCOM_SCM_FLAG_WARMBOOT_CPU2	0x10
-#define QCOM_SCM_FLAG_WARMBOOT_CPU3	0x40
-
-struct qcom_scm_entry {
-	int flag;
-	void *entry;
-};
-
-static struct qcom_scm_entry qcom_scm_wb[] = {
-	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
-	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
-	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
-	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
-};
-
-static DEFINE_MUTEX(qcom_scm_lock);
-
-/**
- * struct qcom_scm_command - one SCM command buffer
- * @len: total available memory for command and response
- * @buf_offset: start of command buffer
- * @resp_hdr_offset: start of response buffer
- * @id: command to be executed
- * @buf: buffer returned from qcom_scm_get_command_buffer()
- *
- * An SCM command is laid out in memory as follows:
- *
- *	------------------- <--- struct qcom_scm_command
- *	| command header  |
- *	------------------- <--- qcom_scm_get_command_buffer()
- *	| command buffer  |
- *	------------------- <--- struct qcom_scm_response and
- *	| response header |      qcom_scm_command_to_response()
- *	------------------- <--- qcom_scm_get_response_buffer()
- *	| response buffer |
- *	-------------------
- *
- * There can be arbitrary padding between the headers and buffers so
- * you should always use the appropriate qcom_scm_get_*_buffer() routines
- * to access the buffers in a safe manner.
- */
-struct qcom_scm_command {
-	__le32 len;
-	__le32 buf_offset;
-	__le32 resp_hdr_offset;
-	__le32 id;
-	__le32 buf[0];
-};
-
-/**
- * struct qcom_scm_response - one SCM response buffer
- * @len: total available memory for response
- * @buf_offset: start of response data relative to start of qcom_scm_response
- * @is_complete: indicates if the command has finished processing
- */
-struct qcom_scm_response {
-	__le32 len;
-	__le32 buf_offset;
-	__le32 is_complete;
-};
-
-/**
- * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
- * @cmd: command
- *
- * Returns a pointer to a response for a command.
- */
-static inline struct qcom_scm_response *qcom_scm_command_to_response(
-		const struct qcom_scm_command *cmd)
-{
-	return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
-}
-
-/**
- * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
- * @cmd: command
- *
- * Returns a pointer to the command buffer of a command.
- */
-static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
-{
-	return (void *)cmd->buf;
-}
-
-/**
- * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
- * @rsp: response
- *
- * Returns a pointer to a response buffer of a response.
- */
-static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
-{
-	return (void *)rsp + le32_to_cpu(rsp->buf_offset);
-}
-
-static u32 smc(u32 cmd_addr)
-{
-	int context_id;
-	register u32 r0 asm("r0") = 1;
-	register u32 r1 asm("r1") = (u32)&context_id;
-	register u32 r2 asm("r2") = cmd_addr;
-	do {
-		asm volatile(
-			__asmeq("%0", "r0")
-			__asmeq("%1", "r0")
-			__asmeq("%2", "r1")
-			__asmeq("%3", "r2")
-#ifdef REQUIRES_SEC
-			".arch_extension sec\n"
-#endif
-			"smc	#0	@ switch to secure world\n"
-			: "=r" (r0)
-			: "r" (r0), "r" (r1), "r" (r2)
-			: "r3", "r12");
-	} while (r0 == QCOM_SCM_INTERRUPTED);
-
-	return r0;
-}
-
-/**
- * qcom_scm_call() - Send an SCM command
- * @dev: struct device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @cmd_buf: command buffer
- * @cmd_len: length of the command buffer
- * @resp_buf: response buffer
- * @resp_len: length of the response buffer
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- *
- * A note on cache maintenance:
- * Note that any buffers that are expected to be accessed by the secure world
- * must be flushed before invoking qcom_scm_call and invalidated in the cache
- * immediately after qcom_scm_call returns. Cache maintenance on the command
- * and response buffers is taken care of by qcom_scm_call; however, callers are
- * responsible for any other cached buffers passed over to the secure world.
- */
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
-			 const void *cmd_buf, size_t cmd_len, void *resp_buf,
-			 size_t resp_len)
-{
-	int ret;
-	struct qcom_scm_command *cmd;
-	struct qcom_scm_response *rsp;
-	size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
-	dma_addr_t cmd_phys;
-
-	cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
-	cmd->len = cpu_to_le32(alloc_len);
-	cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
-	cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
-
-	cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
-	if (cmd_buf)
-		memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
-
-	rsp = qcom_scm_command_to_response(cmd);
-
-	cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
-	if (dma_mapping_error(dev, cmd_phys)) {
-		kfree(cmd);
-		return -ENOMEM;
-	}
-
-	mutex_lock(&qcom_scm_lock);
-	ret = smc(cmd_phys);
-	if (ret < 0)
-		ret = qcom_scm_remap_error(ret);
-	mutex_unlock(&qcom_scm_lock);
-	if (ret)
-		goto out;
-
-	do {
-		dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
-					sizeof(*rsp), DMA_FROM_DEVICE);
-	} while (!rsp->is_complete);
-
-	if (resp_buf) {
-		dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
-					le32_to_cpu(rsp->buf_offset),
-					resp_len, DMA_FROM_DEVICE);
-		memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
-		       resp_len);
-	}
-out:
-	dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
-	kfree(cmd);
-	return ret;
-}
-
-#define SCM_CLASS_REGISTER	(0x2 << 8)
-#define SCM_MASK_IRQS		BIT(5)
-#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
-				SCM_CLASS_REGISTER | \
-				SCM_MASK_IRQS | \
-				(n & 0xf))
-
-/**
- * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @arg1: first argument
- *
- * This shall only be used with commands that are guaranteed to be
- * uninterruptable, atomic and SMP safe.
- */
-static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
-{
-	int context_id;
-
-	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
-	register u32 r1 asm("r1") = (u32)&context_id;
-	register u32 r2 asm("r2") = arg1;
-
-	asm volatile(
-			__asmeq("%0", "r0")
-			__asmeq("%1", "r0")
-			__asmeq("%2", "r1")
-			__asmeq("%3", "r2")
-#ifdef REQUIRES_SEC
-			".arch_extension sec\n"
-#endif
-			"smc    #0      @ switch to secure world\n"
-			: "=r" (r0)
-			: "r" (r0), "r" (r1), "r" (r2)
-			: "r3", "r12");
-	return r0;
-}
-
-/**
- * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
- * @svc_id:	service identifier
- * @cmd_id:	command identifier
- * @arg1:	first argument
- * @arg2:	second argument
- *
- * This shall only be used with commands that are guaranteed to be
- * uninterruptable, atomic and SMP safe.
- */
-static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
-{
-	int context_id;
-
-	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
-	register u32 r1 asm("r1") = (u32)&context_id;
-	register u32 r2 asm("r2") = arg1;
-	register u32 r3 asm("r3") = arg2;
-
-	asm volatile(
-			__asmeq("%0", "r0")
-			__asmeq("%1", "r0")
-			__asmeq("%2", "r1")
-			__asmeq("%3", "r2")
-			__asmeq("%4", "r3")
-#ifdef REQUIRES_SEC
-			".arch_extension sec\n"
-#endif
-			"smc    #0      @ switch to secure world\n"
-			: "=r" (r0)
-			: "r" (r0), "r" (r1), "r" (r2), "r" (r3)
-			: "r12");
-	return r0;
-}
-
-u32 qcom_scm_get_version(void)
-{
-	int context_id;
-	static u32 version = -1;
-	register u32 r0 asm("r0");
-	register u32 r1 asm("r1");
-
-	if (version != -1)
-		return version;
-
-	mutex_lock(&qcom_scm_lock);
-
-	r0 = 0x1 << 8;
-	r1 = (u32)&context_id;
-	do {
-		asm volatile(
-			__asmeq("%0", "r0")
-			__asmeq("%1", "r1")
-			__asmeq("%2", "r0")
-			__asmeq("%3", "r1")
-#ifdef REQUIRES_SEC
-			".arch_extension sec\n"
-#endif
-			"smc	#0	@ switch to secure world\n"
-			: "=r" (r0), "=r" (r1)
-			: "r" (r0), "r" (r1)
-			: "r2", "r3", "r12");
-	} while (r0 == QCOM_SCM_INTERRUPTED);
-
-	version = r1;
-	mutex_unlock(&qcom_scm_lock);
-
-	return version;
-}
-EXPORT_SYMBOL(qcom_scm_get_version);
-
-/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
- */
-int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
-{
-	int flags = 0;
-	int cpu;
-	int scm_cb_flags[] = {
-		QCOM_SCM_FLAG_COLDBOOT_CPU0,
-		QCOM_SCM_FLAG_COLDBOOT_CPU1,
-		QCOM_SCM_FLAG_COLDBOOT_CPU2,
-		QCOM_SCM_FLAG_COLDBOOT_CPU3,
-	};
-
-	if (!cpus || (cpus && cpumask_empty(cpus)))
-		return -EINVAL;
-
-	for_each_cpu(cpu, cpus) {
-		if (cpu < ARRAY_SIZE(scm_cb_flags))
-			flags |= scm_cb_flags[cpu];
-		else
-			set_cpu_present(cpu, false);
-	}
-
-	return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
-				    flags, virt_to_phys(entry));
-}
-
-/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
-int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
-				  const cpumask_t *cpus)
-{
-	int ret;
-	int flags = 0;
-	int cpu;
-	struct {
-		__le32 flags;
-		__le32 addr;
-	} cmd;
-
-	/*
-	 * Reassign only if we are switching from hotplug entry point
-	 * to cpuidle entry point or vice versa.
-	 */
-	for_each_cpu(cpu, cpus) {
-		if (entry == qcom_scm_wb[cpu].entry)
-			continue;
-		flags |= qcom_scm_wb[cpu].flag;
-	}
-
-	/* No change in entry function */
-	if (!flags)
-		return 0;
-
-	cmd.addr = cpu_to_le32(virt_to_phys(entry));
-	cmd.flags = cpu_to_le32(flags);
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
-			    &cmd, sizeof(cmd), NULL, 0);
-	if (!ret) {
-		for_each_cpu(cpu, cpus)
-			qcom_scm_wb[cpu].entry = entry;
-	}
-
-	return ret;
-}
-
-/**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
- *
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
- */
-void __qcom_scm_cpu_power_down(u32 flags)
-{
-	qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
-			flags & QCOM_SCM_FLUSH_FLAG_MASK);
-}
-
-int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
-{
-	int ret;
-	__le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
-	__le32 ret_val = 0;
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
-			    &svc_cmd, sizeof(svc_cmd), &ret_val,
-			    sizeof(ret_val));
-	if (ret)
-		return ret;
-
-	return le32_to_cpu(ret_val);
-}
-
-int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
-			u32 req_cnt, u32 *resp)
-{
-	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
-		return -ERANGE;
-
-	return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
-		req, req_cnt * sizeof(*req), resp, sizeof(*resp));
-}
-
-int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size,
-			  u32 mode)
-{
-	struct ocmem_tz_lock {
-		__le32 id;
-		__le32 offset;
-		__le32 size;
-		__le32 mode;
-	} request;
-
-	request.id = cpu_to_le32(id);
-	request.offset = cpu_to_le32(offset);
-	request.size = cpu_to_le32(size);
-	request.mode = cpu_to_le32(mode);
-
-	return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD,
-			     &request, sizeof(request), NULL, 0);
-}
-
-int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size)
-{
-	struct ocmem_tz_unlock {
-		__le32 id;
-		__le32 offset;
-		__le32 size;
-	} request;
-
-	request.id = cpu_to_le32(id);
-	request.offset = cpu_to_le32(offset);
-	request.size = cpu_to_le32(size);
-
-	return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD,
-			     &request, sizeof(request), NULL, 0);
-}
-
-void __qcom_scm_init(void)
-{
-}
-
-bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
-{
-	__le32 out;
-	__le32 in;
-	int ret;
-
-	in = cpu_to_le32(peripheral);
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
-			    QCOM_SCM_PAS_IS_SUPPORTED_CMD,
-			    &in, sizeof(in),
-			    &out, sizeof(out));
-
-	return ret ? false : !!out;
-}
-
-int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
-			      dma_addr_t metadata_phys)
-{
-	__le32 scm_ret;
-	int ret;
-	struct {
-		__le32 proc;
-		__le32 image_addr;
-	} request;
-
-	request.proc = cpu_to_le32(peripheral);
-	request.image_addr = cpu_to_le32(metadata_phys);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
-			    QCOM_SCM_PAS_INIT_IMAGE_CMD,
-			    &request, sizeof(request),
-			    &scm_ret, sizeof(scm_ret));
-
-	return ret ? : le32_to_cpu(scm_ret);
-}
-
-int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
-			     phys_addr_t addr, phys_addr_t size)
-{
-	__le32 scm_ret;
-	int ret;
-	struct {
-		__le32 proc;
-		__le32 addr;
-		__le32 len;
-	} request;
-
-	request.proc = cpu_to_le32(peripheral);
-	request.addr = cpu_to_le32(addr);
-	request.len = cpu_to_le32(size);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
-			    QCOM_SCM_PAS_MEM_SETUP_CMD,
-			    &request, sizeof(request),
-			    &scm_ret, sizeof(scm_ret));
-
-	return ret ? : le32_to_cpu(scm_ret);
-}
-
-int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
-{
-	__le32 out;
-	__le32 in;
-	int ret;
-
-	in = cpu_to_le32(peripheral);
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
-			    QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
-			    &in, sizeof(in),
-			    &out, sizeof(out));
-
-	return ret ? : le32_to_cpu(out);
-}
-
-int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
-{
-	__le32 out;
-	__le32 in;
-	int ret;
-
-	in = cpu_to_le32(peripheral);
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
-			    QCOM_SCM_PAS_SHUTDOWN_CMD,
-			    &in, sizeof(in),
-			    &out, sizeof(out));
-
-	return ret ? : le32_to_cpu(out);
-}
-
-int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
-{
-	__le32 out;
-	__le32 in = cpu_to_le32(reset);
-	int ret;
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
-			&in, sizeof(in),
-			&out, sizeof(out));
-
-	return ret ? : le32_to_cpu(out);
-}
-
-int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
-{
-	return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
-				     enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
-}
-
-int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
-{
-	struct {
-		__le32 state;
-		__le32 id;
-	} req;
-	__le32 scm_ret = 0;
-	int ret;
-
-	req.state = cpu_to_le32(state);
-	req.id = cpu_to_le32(id);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
-			    &req, sizeof(req), &scm_ret, sizeof(scm_ret));
-
-	return ret ? : le32_to_cpu(scm_ret);
-}
-
-int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
-			  size_t mem_sz, phys_addr_t src, size_t src_sz,
-			  phys_addr_t dest, size_t dest_sz)
-{
-	return -ENODEV;
-}
-
-int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
-			       u32 spare)
-{
-	struct msm_scm_sec_cfg {
-		__le32 id;
-		__le32 ctx_bank_num;
-	} cfg;
-	int ret, scm_ret = 0;
-
-	cfg.id = cpu_to_le32(device_id);
-	cfg.ctx_bank_num = cpu_to_le32(spare);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
-			    &cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret));
-
-	if (ret || scm_ret)
-		return ret ? ret : -EINVAL;
-
-	return 0;
-}
-
-int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
-				      size_t *size)
-{
-	return -ENODEV;
-}
-
-int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
-				      u32 spare)
-{
-	return -ENODEV;
-}
-
-int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
-			unsigned int *val)
-{
-	int ret;
-
-	ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
-	if (ret >= 0)
-		*val = ret;
-
-	return ret < 0 ? ret : 0;
-}
-
-int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
-{
-	return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
-				     addr, val);
-}
-
-int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
-{
-	return -ENODEV;
-}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
deleted file mode 100644
index 3c58503..0000000
--- a/drivers/firmware/qcom_scm-64.c
+++ /dev/null
@@ -1,579 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/qcom_scm.h>
-#include <linux/arm-smccc.h>
-#include <linux/dma-mapping.h>
-
-#include "qcom_scm.h"
-
-#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
-
-#define MAX_QCOM_SCM_ARGS 10
-#define MAX_QCOM_SCM_RETS 3
-
-enum qcom_scm_arg_types {
-	QCOM_SCM_VAL,
-	QCOM_SCM_RO,
-	QCOM_SCM_RW,
-	QCOM_SCM_BUFVAL,
-};
-
-#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
-			   (((a) & 0x3) << 4) | \
-			   (((b) & 0x3) << 6) | \
-			   (((c) & 0x3) << 8) | \
-			   (((d) & 0x3) << 10) | \
-			   (((e) & 0x3) << 12) | \
-			   (((f) & 0x3) << 14) | \
-			   (((g) & 0x3) << 16) | \
-			   (((h) & 0x3) << 18) | \
-			   (((i) & 0x3) << 20) | \
-			   (((j) & 0x3) << 22) | \
-			   ((num) & 0xf))
-
-#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
-
-/**
- * struct qcom_scm_desc
- * @arginfo:	Metadata describing the arguments in args[]
- * @args:	The array of arguments for the secure syscall
- * @res:	The values returned by the secure syscall
- */
-struct qcom_scm_desc {
-	u32 arginfo;
-	u64 args[MAX_QCOM_SCM_ARGS];
-};
-
-static u64 qcom_smccc_convention = -1;
-static DEFINE_MUTEX(qcom_scm_lock);
-
-#define QCOM_SCM_EBUSY_WAIT_MS 30
-#define QCOM_SCM_EBUSY_MAX_RETRY 20
-
-#define N_EXT_QCOM_SCM_ARGS 7
-#define FIRST_EXT_ARG_IDX 3
-#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
-
-static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
-			       struct arm_smccc_res *res, u32 fn_id,
-			       u64 x5, u32 type)
-{
-	u64 cmd;
-	struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
-
-	cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
-				 ARM_SMCCC_OWNER_SIP, fn_id);
-
-	quirk.state.a6 = 0;
-
-	do {
-		arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
-				    desc->args[1], desc->args[2], x5,
-				    quirk.state.a6, 0, res, &quirk);
-
-		if (res->a0 == QCOM_SCM_INTERRUPTED)
-			cmd = res->a0;
-
-	} while (res->a0 == QCOM_SCM_INTERRUPTED);
-}
-
-static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
-			     struct arm_smccc_res *res, u32 fn_id,
-			     u64 x5, bool atomic)
-{
-	int retry_count = 0;
-
-	if (atomic) {
-		__qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
-		return;
-	}
-
-	do {
-		mutex_lock(&qcom_scm_lock);
-
-		__qcom_scm_call_do(desc, res, fn_id, x5,
-				   ARM_SMCCC_STD_CALL);
-
-		mutex_unlock(&qcom_scm_lock);
-
-		if (res->a0 == QCOM_SCM_V2_EBUSY) {
-			if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
-				break;
-			msleep(QCOM_SCM_EBUSY_WAIT_MS);
-		}
-	}  while (res->a0 == QCOM_SCM_V2_EBUSY);
-}
-
-static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
-			    const struct qcom_scm_desc *desc,
-			    struct arm_smccc_res *res, bool atomic)
-{
-	int arglen = desc->arginfo & 0xf;
-	int i;
-	u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
-	u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
-	dma_addr_t args_phys = 0;
-	void *args_virt = NULL;
-	size_t alloc_len;
-	gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
-
-	if (unlikely(arglen > N_REGISTER_ARGS)) {
-		alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
-		args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
-
-		if (!args_virt)
-			return -ENOMEM;
-
-		if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
-			__le32 *args = args_virt;
-
-			for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
-				args[i] = cpu_to_le32(desc->args[i +
-						      FIRST_EXT_ARG_IDX]);
-		} else {
-			__le64 *args = args_virt;
-
-			for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
-				args[i] = cpu_to_le64(desc->args[i +
-						      FIRST_EXT_ARG_IDX]);
-		}
-
-		args_phys = dma_map_single(dev, args_virt, alloc_len,
-					   DMA_TO_DEVICE);
-
-		if (dma_mapping_error(dev, args_phys)) {
-			kfree(args_virt);
-			return -ENOMEM;
-		}
-
-		x5 = args_phys;
-	}
-
-	qcom_scm_call_do(desc, res, fn_id, x5, atomic);
-
-	if (args_virt) {
-		dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
-		kfree(args_virt);
-	}
-
-	if ((long)res->a0 < 0)
-		return qcom_scm_remap_error(res->a0);
-
-	return 0;
-}
-
-/**
- * qcom_scm_call() - Invoke a syscall in the secure world
- * @dev:	device
- * @svc_id:	service identifier
- * @cmd_id:	command identifier
- * @desc:	Descriptor structure containing arguments and return values
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This should *only* be called in pre-emptible context.
- */
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
-			 const struct qcom_scm_desc *desc,
-			 struct arm_smccc_res *res)
-{
-	might_sleep();
-	return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
-}
-
-/**
- * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
- * @dev:	device
- * @svc_id:	service identifier
- * @cmd_id:	command identifier
- * @desc:	Descriptor structure containing arguments and return values
- * @res:	Structure containing results from SMC/HVC call
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This can be called in atomic context.
- */
-static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
-				const struct qcom_scm_desc *desc,
-				struct arm_smccc_res *res)
-{
-	return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
-}
-
-/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
- */
-int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
-{
-	return -ENOTSUPP;
-}
-
-/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @dev: Device pointer
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
-int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
-				  const cpumask_t *cpus)
-{
-	return -ENOTSUPP;
-}
-
-/**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
- *
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
- */
-void __qcom_scm_cpu_power_down(u32 flags)
-{
-}
-
-int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
-{
-	int ret;
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.arginfo = QCOM_SCM_ARGS(1);
-	desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) |
-			(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
-			    &desc, &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
-			u32 req_cnt, u32 *resp)
-{
-	int ret;
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
-		return -ERANGE;
-
-	desc.args[0] = req[0].addr;
-	desc.args[1] = req[0].val;
-	desc.args[2] = req[1].addr;
-	desc.args[3] = req[1].val;
-	desc.args[4] = req[2].addr;
-	desc.args[5] = req[2].val;
-	desc.args[6] = req[3].addr;
-	desc.args[7] = req[3].val;
-	desc.args[8] = req[4].addr;
-	desc.args[9] = req[4].val;
-	desc.arginfo = QCOM_SCM_ARGS(10);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc,
-			    &res);
-	*resp = res.a1;
-
-	return ret;
-}
-
-int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset,
-			  uint32_t size, uint32_t mode)
-{
-	return -ENOTSUPP;
-}
-
-int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset,
-			    uint32_t size)
-{
-	return -ENOTSUPP;
-}
-
-void __qcom_scm_init(void)
-{
-	u64 cmd;
-	struct arm_smccc_res res;
-	u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD);
-
-	/* First try a SMC64 call */
-	cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
-				 ARM_SMCCC_OWNER_SIP, function);
-
-	arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)),
-		      0, 0, 0, 0, 0, &res);
-
-	if (!res.a0 && res.a1)
-		qcom_smccc_convention = ARM_SMCCC_SMC_64;
-	else
-		qcom_smccc_convention = ARM_SMCCC_SMC_32;
-}
-
-bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
-{
-	int ret;
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = peripheral;
-	desc.arginfo = QCOM_SCM_ARGS(1);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
-				QCOM_SCM_PAS_IS_SUPPORTED_CMD,
-				&desc, &res);
-
-	return ret ? false : !!res.a1;
-}
-
-int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
-			      dma_addr_t metadata_phys)
-{
-	int ret;
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = peripheral;
-	desc.args[1] = metadata_phys;
-	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
-				&desc, &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
-			      phys_addr_t addr, phys_addr_t size)
-{
-	int ret;
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = peripheral;
-	desc.args[1] = addr;
-	desc.args[2] = size;
-	desc.arginfo = QCOM_SCM_ARGS(3);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
-				&desc, &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
-{
-	int ret;
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = peripheral;
-	desc.arginfo = QCOM_SCM_ARGS(1);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
-				QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
-				&desc, &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
-{
-	int ret;
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = peripheral;
-	desc.arginfo = QCOM_SCM_ARGS(1);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
-			&desc, &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-	int ret;
-
-	desc.args[0] = reset;
-	desc.args[1] = 0;
-	desc.arginfo = QCOM_SCM_ARGS(2);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc,
-			    &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-	int ret;
-
-	desc.args[0] = state;
-	desc.args[1] = id;
-	desc.arginfo = QCOM_SCM_ARGS(2);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
-			    &desc, &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
-			  size_t mem_sz, phys_addr_t src, size_t src_sz,
-			  phys_addr_t dest, size_t dest_sz)
-{
-	int ret;
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = mem_region;
-	desc.args[1] = mem_sz;
-	desc.args[2] = src;
-	desc.args[3] = src_sz;
-	desc.args[4] = dest;
-	desc.args[5] = dest_sz;
-	desc.args[6] = 0;
-
-	desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
-				     QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
-				     QCOM_SCM_VAL, QCOM_SCM_VAL);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
-			    QCOM_MEM_PROT_ASSIGN_ID,
-			    &desc, &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-	int ret;
-
-	desc.args[0] = device_id;
-	desc.args[1] = spare;
-	desc.arginfo = QCOM_SCM_ARGS(2);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
-			    &desc, &res);
-
-	return ret ? : res.a1;
-}
-
-int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
-				      size_t *size)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-	int ret;
-
-	desc.args[0] = spare;
-	desc.arginfo = QCOM_SCM_ARGS(1);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
-			    QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res);
-
-	if (size)
-		*size = res.a1;
-
-	return ret ? : res.a2;
-}
-
-int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
-				      u32 spare)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-	int ret;
-
-	desc.args[0] = addr;
-	desc.args[1] = size;
-	desc.args[2] = spare;
-	desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
-				     QCOM_SCM_VAL);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
-			    QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res);
-
-	/* the pg table has been initialized already, ignore the error */
-	if (ret == -EPERM)
-		ret = 0;
-
-	return ret;
-}
-
-int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = QCOM_SCM_SET_DLOAD_MODE;
-	desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0;
-	desc.arginfo = QCOM_SCM_ARGS(2);
-
-	return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
-			     &desc, &res);
-}
-
-int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
-			unsigned int *val)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-	int ret;
-
-	desc.args[0] = addr;
-	desc.arginfo = QCOM_SCM_ARGS(1);
-
-	ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
-			    &desc, &res);
-	if (ret >= 0)
-		*val = res.a1;
-
-	return ret < 0 ? ret : 0;
-}
-
-int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = addr;
-	desc.args[1] = val;
-	desc.arginfo = QCOM_SCM_ARGS(2);
-
-	return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
-			     &desc, &res);
-}
-
-int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
-{
-	struct qcom_scm_desc desc = {0};
-	struct arm_smccc_res res;
-
-	desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
-	desc.args[1] = en;
-	desc.arginfo = QCOM_SCM_ARGS(2);
-
-	return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
-				    QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
-}
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
new file mode 100644
index 0000000..8532e7c
--- /dev/null
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+
+/**
+ * struct arm_smccc_args
+ * @args:	The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+	unsigned long args[8];
+};
+
+
+/**
+ * struct scm_legacy_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_legacy_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ *	------------------- <--- struct scm_legacy_command
+ *	| command header  |
+ *	------------------- <--- scm_legacy_get_command_buffer()
+ *	| command buffer  |
+ *	------------------- <--- struct scm_legacy_response and
+ *	| response header |      scm_legacy_command_to_response()
+ *	------------------- <--- scm_legacy_get_response_buffer()
+ *	| response buffer |
+ *	-------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_legacy_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_legacy_command {
+	__le32 len;
+	__le32 buf_offset;
+	__le32 resp_hdr_offset;
+	__le32 id;
+	__le32 buf[0];
+};
+
+/**
+ * struct scm_legacy_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_legacy_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_legacy_response {
+	__le32 len;
+	__le32 buf_offset;
+	__le32 is_complete;
+};
+
+/**
+ * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_legacy_response *scm_legacy_command_to_response(
+		const struct scm_legacy_command *cmd)
+{
+	return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
+}
+
+/**
+ * scm_legacy_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_legacy_get_command_buffer(
+		const struct scm_legacy_command *cmd)
+{
+	return (void *)cmd->buf;
+}
+
+/**
+ * scm_legacy_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_legacy_get_response_buffer(
+		const struct scm_legacy_response *rsp)
+{
+	return (void *)rsp + le32_to_cpu(rsp->buf_offset);
+}
+
+static void __scm_legacy_do(const struct arm_smccc_args *smc,
+			    struct arm_smccc_res *res)
+{
+	do {
+		arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2],
+			      smc->args[3], smc->args[4], smc->args[5],
+			      smc->args[6], smc->args[7], res);
+	} while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+/**
+ * qcom_scm_call() - Sends a command to the SCM and waits for the command to
+ * finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. Cache maintenance on the command
+ * and response buffers is taken care of by qcom_scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+		    struct qcom_scm_res *res)
+{
+	u8 arglen = desc->arginfo & 0xf;
+	int ret = 0, context_id;
+	unsigned int i;
+	struct scm_legacy_command *cmd;
+	struct scm_legacy_response *rsp;
+	struct arm_smccc_args smc = {0};
+	struct arm_smccc_res smc_res;
+	const size_t cmd_len = arglen * sizeof(__le32);
+	const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32);
+	size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+	dma_addr_t cmd_phys;
+	__le32 *arg_buf;
+	const __le32 *res_buf;
+
+	cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->len = cpu_to_le32(alloc_len);
+	cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+	cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+	cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd));
+
+	arg_buf = scm_legacy_get_command_buffer(cmd);
+	for (i = 0; i < arglen; i++)
+		arg_buf[i] = cpu_to_le32(desc->args[i]);
+
+	rsp = scm_legacy_command_to_response(cmd);
+
+	cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, cmd_phys)) {
+		kfree(cmd);
+		return -ENOMEM;
+	}
+
+	smc.args[0] = 1;
+	smc.args[1] = (unsigned long)&context_id;
+	smc.args[2] = cmd_phys;
+
+	mutex_lock(&qcom_scm_lock);
+	__scm_legacy_do(&smc, &smc_res);
+	if (smc_res.a0)
+		ret = qcom_scm_remap_error(smc_res.a0);
+	mutex_unlock(&qcom_scm_lock);
+	if (ret)
+		goto out;
+
+	do {
+		dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+					sizeof(*rsp), DMA_FROM_DEVICE);
+	} while (!rsp->is_complete);
+
+	dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+				le32_to_cpu(rsp->buf_offset),
+				resp_len, DMA_FROM_DEVICE);
+
+	if (res) {
+		res_buf = scm_legacy_get_response_buffer(rsp);
+		for (i = 0; i < MAX_QCOM_SCM_RETS; i++)
+			res->result[i] = le32_to_cpu(res_buf[i]);
+	}
+out:
+	dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+	kfree(cmd);
+	return ret;
+}
+
+#define SCM_LEGACY_ATOMIC_N_REG_ARGS	5
+#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX	2
+#define SCM_LEGACY_CLASS_REGISTER		(0x2 << 8)
+#define SCM_LEGACY_MASK_IRQS		BIT(5)
+#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \
+				((SCM_LEGACY_FNID(svc, cmd) << 12) | \
+				SCM_LEGACY_CLASS_REGISTER | \
+				SCM_LEGACY_MASK_IRQS | \
+				(n & 0xf))
+
+/**
+ * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments
+ * and 3 return values
+ * @desc: SCM call descriptor containing arguments
+ * @res:  SCM call return values
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+int scm_legacy_call_atomic(struct device *unused,
+			   const struct qcom_scm_desc *desc,
+			   struct qcom_scm_res *res)
+{
+	int context_id;
+	struct arm_smccc_res smc_res;
+	size_t arglen = desc->arginfo & 0xf;
+
+	BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS);
+
+	arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen),
+		      (unsigned long)&context_id,
+		      desc->args[0], desc->args[1], desc->args[2],
+		      desc->args[3], desc->args[4], 0, &smc_res);
+
+	if (res) {
+		res->result[0] = smc_res.a1;
+		res->result[1] = smc_res.a2;
+		res->result[2] = smc_res.a3;
+	}
+
+	return smc_res.a0;
+}
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
new file mode 100644
index 0000000..497c13b
--- /dev/null
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+/**
+ * struct arm_smccc_args
+ * @args:	The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+	unsigned long args[8];
+};
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define SCM_SMC_N_REG_ARGS	4
+#define SCM_SMC_FIRST_EXT_IDX	(SCM_SMC_N_REG_ARGS - 1)
+#define SCM_SMC_N_EXT_ARGS	(MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
+#define SCM_SMC_FIRST_REG_IDX	2
+#define SCM_SMC_LAST_REG_IDX	(SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
+
+static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
+			       struct arm_smccc_res *res)
+{
+	unsigned long a0 = smc->args[0];
+	struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+	quirk.state.a6 = 0;
+
+	do {
+		arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
+				    smc->args[3], smc->args[4], smc->args[5],
+				    quirk.state.a6, smc->args[7], res, &quirk);
+
+		if (res->a0 == QCOM_SCM_INTERRUPTED)
+			a0 = res->a0;
+
+	} while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void __scm_smc_do(const struct arm_smccc_args *smc,
+			 struct arm_smccc_res *res, bool atomic)
+{
+	int retry_count = 0;
+
+	if (atomic) {
+		__scm_smc_do_quirk(smc, res);
+		return;
+	}
+
+	do {
+		mutex_lock(&qcom_scm_lock);
+
+		__scm_smc_do_quirk(smc, res);
+
+		mutex_unlock(&qcom_scm_lock);
+
+		if (res->a0 == QCOM_SCM_V2_EBUSY) {
+			if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+				break;
+			msleep(QCOM_SCM_EBUSY_WAIT_MS);
+		}
+	}  while (res->a0 == QCOM_SCM_V2_EBUSY);
+}
+
+int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+		 struct qcom_scm_res *res, bool atomic)
+{
+	int arglen = desc->arginfo & 0xf;
+	int i;
+	dma_addr_t args_phys = 0;
+	void *args_virt = NULL;
+	size_t alloc_len;
+	gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+	u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
+	u32 qcom_smccc_convention =
+			(qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
+			ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+	struct arm_smccc_res smc_res;
+	struct arm_smccc_args smc = {0};
+
+	smc.args[0] = ARM_SMCCC_CALL_VAL(
+		smccc_call_type,
+		qcom_smccc_convention,
+		desc->owner,
+		SCM_SMC_FNID(desc->svc, desc->cmd));
+	smc.args[1] = desc->arginfo;
+	for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
+		smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
+
+	if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+		alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
+		args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
+
+		if (!args_virt)
+			return -ENOMEM;
+
+		if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+			__le32 *args = args_virt;
+
+			for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+				args[i] = cpu_to_le32(desc->args[i +
+						      SCM_SMC_FIRST_EXT_IDX]);
+		} else {
+			__le64 *args = args_virt;
+
+			for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+				args[i] = cpu_to_le64(desc->args[i +
+						      SCM_SMC_FIRST_EXT_IDX]);
+		}
+
+		args_phys = dma_map_single(dev, args_virt, alloc_len,
+					   DMA_TO_DEVICE);
+
+		if (dma_mapping_error(dev, args_phys)) {
+			kfree(args_virt);
+			return -ENOMEM;
+		}
+
+		smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
+	}
+
+	__scm_smc_do(&smc, &smc_res, atomic);
+
+	if (args_virt) {
+		dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+		kfree(args_virt);
+	}
+
+	if (res) {
+		res->result[0] = smc_res.a1;
+		res->result[1] = smc_res.a2;
+		res->result[2] = smc_res.a3;
+	}
+
+	return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 1ba0df4..059bb0f 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1,8 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * Qualcomm SCM driver
- *
- * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
  * Copyright (C) 2015 Linaro Ltd.
  */
 #include <linux/platform_device.h>
@@ -19,6 +16,7 @@
 #include <linux/of_platform.h>
 #include <linux/clk.h>
 #include <linux/reset-controller.h>
+#include <linux/arm-smccc.h>
 
 #include "qcom_scm.h"
 
@@ -52,6 +50,35 @@ struct qcom_scm_mem_map_info {
 	__le64 mem_size;
 };
 
+#define QCOM_SCM_FLAG_COLDBOOT_CPU0	0x00
+#define QCOM_SCM_FLAG_COLDBOOT_CPU1	0x01
+#define QCOM_SCM_FLAG_COLDBOOT_CPU2	0x08
+#define QCOM_SCM_FLAG_COLDBOOT_CPU3	0x20
+
+#define QCOM_SCM_FLAG_WARMBOOT_CPU0	0x04
+#define QCOM_SCM_FLAG_WARMBOOT_CPU1	0x02
+#define QCOM_SCM_FLAG_WARMBOOT_CPU2	0x10
+#define QCOM_SCM_FLAG_WARMBOOT_CPU3	0x40
+
+struct qcom_scm_wb_entry {
+	int flag;
+	void *entry;
+};
+
+static struct qcom_scm_wb_entry qcom_scm_wb[] = {
+	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
+	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
+	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
+	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
+};
+
+static const char *qcom_scm_convention_names[] = {
+	[SMC_CONVENTION_UNKNOWN] = "unknown",
+	[SMC_CONVENTION_ARM_32] = "smc arm 32",
+	[SMC_CONVENTION_ARM_64] = "smc arm 64",
+	[SMC_CONVENTION_LEGACY] = "smc legacy",
+};
+
 static struct qcom_scm *__scm;
 
 static int qcom_scm_clk_enable(void)
@@ -87,19 +114,142 @@ static void qcom_scm_clk_disable(void)
 	clk_disable_unprepare(__scm->bus_clk);
 }
 
-/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
- */
-int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+					u32 cmd_id);
+
+enum qcom_scm_convention qcom_scm_convention;
+static bool has_queried __read_mostly;
+static DEFINE_SPINLOCK(query_lock);
+
+static void __query_convention(void)
 {
-	return __qcom_scm_set_cold_boot_addr(entry, cpus);
+	unsigned long flags;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_INFO,
+		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
+					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
+			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
+		.arginfo = QCOM_SCM_ARGS(1),
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
+	int ret;
+
+	spin_lock_irqsave(&query_lock, flags);
+	if (has_queried)
+		goto out;
+
+	qcom_scm_convention = SMC_CONVENTION_ARM_64;
+	// Device isn't required as there is only one argument - no device
+	// needed to dma_map_single to secure world
+	ret = scm_smc_call(NULL, &desc, &res, true);
+	if (!ret && res.result[0] == 1)
+		goto out;
+
+	qcom_scm_convention = SMC_CONVENTION_ARM_32;
+	ret = scm_smc_call(NULL, &desc, &res, true);
+	if (!ret && res.result[0] == 1)
+		goto out;
+
+	qcom_scm_convention = SMC_CONVENTION_LEGACY;
+out:
+	has_queried = true;
+	spin_unlock_irqrestore(&query_lock, flags);
+	pr_info("qcom_scm: convention: %s\n",
+		qcom_scm_convention_names[qcom_scm_convention]);
 }
-EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
+
+static inline enum qcom_scm_convention __get_convention(void)
+{
+	if (unlikely(!has_queried))
+		__query_convention();
+	return qcom_scm_convention;
+}
+
+/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev:	device
+ * @svc_id:	service identifier
+ * @cmd_id:	command identifier
+ * @desc:	Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
+			 struct qcom_scm_res *res)
+{
+	might_sleep();
+	switch (__get_convention()) {
+	case SMC_CONVENTION_ARM_32:
+	case SMC_CONVENTION_ARM_64:
+		return scm_smc_call(dev, desc, res, false);
+	case SMC_CONVENTION_LEGACY:
+		return scm_legacy_call(dev, desc, res);
+	default:
+		pr_err("Unknown current SCM calling convention.\n");
+		return -EINVAL;
+	}
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev:	device
+ * @svc_id:	service identifier
+ * @cmd_id:	command identifier
+ * @desc:	Descriptor structure containing arguments and return values
+ * @res:	Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
+ */
+static int qcom_scm_call_atomic(struct device *dev,
+				const struct qcom_scm_desc *desc,
+				struct qcom_scm_res *res)
+{
+	switch (__get_convention()) {
+	case SMC_CONVENTION_ARM_32:
+	case SMC_CONVENTION_ARM_64:
+		return scm_smc_call(dev, desc, res, true);
+	case SMC_CONVENTION_LEGACY:
+		return scm_legacy_call_atomic(dev, desc, res);
+	default:
+		pr_err("Unknown current SCM calling convention.\n");
+		return -EINVAL;
+	}
+}
+
+static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+					u32 cmd_id)
+{
+	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_INFO,
+		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
+
+	desc.arginfo = QCOM_SCM_ARGS(1);
+	switch (__get_convention()) {
+	case SMC_CONVENTION_ARM_32:
+	case SMC_CONVENTION_ARM_64:
+		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
+				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+		break;
+	case SMC_CONVENTION_LEGACY:
+		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
+		break;
+	default:
+		pr_err("Unknown SMC convention being used\n");
+		return -EINVAL;
+	}
+
+	ret = qcom_scm_call(dev, &desc, &res);
+
+	return ret ? : res.result[0];
+}
 
 /**
  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
@@ -111,11 +261,85 @@ EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
  */
 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
 {
-	return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
+	int ret;
+	int flags = 0;
+	int cpu;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_BOOT,
+		.cmd = QCOM_SCM_BOOT_SET_ADDR,
+		.arginfo = QCOM_SCM_ARGS(2),
+	};
+
+	/*
+	 * Reassign only if we are switching from hotplug entry point
+	 * to cpuidle entry point or vice versa.
+	 */
+	for_each_cpu(cpu, cpus) {
+		if (entry == qcom_scm_wb[cpu].entry)
+			continue;
+		flags |= qcom_scm_wb[cpu].flag;
+	}
+
+	/* No change in entry function */
+	if (!flags)
+		return 0;
+
+	desc.args[0] = flags;
+	desc.args[1] = virt_to_phys(entry);
+
+	ret = qcom_scm_call(__scm->dev, &desc, NULL);
+	if (!ret) {
+		for_each_cpu(cpu, cpus)
+			qcom_scm_wb[cpu].entry = entry;
+	}
+
+	return ret;
 }
 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
 
 /**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	int flags = 0;
+	int cpu;
+	int scm_cb_flags[] = {
+		QCOM_SCM_FLAG_COLDBOOT_CPU0,
+		QCOM_SCM_FLAG_COLDBOOT_CPU1,
+		QCOM_SCM_FLAG_COLDBOOT_CPU2,
+		QCOM_SCM_FLAG_COLDBOOT_CPU3,
+	};
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_BOOT,
+		.cmd = QCOM_SCM_BOOT_SET_ADDR,
+		.arginfo = QCOM_SCM_ARGS(2),
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+
+	if (!cpus || (cpus && cpumask_empty(cpus)))
+		return -EINVAL;
+
+	for_each_cpu(cpu, cpus) {
+		if (cpu < ARRAY_SIZE(scm_cb_flags))
+			flags |= scm_cb_flags[cpu];
+		else
+			set_cpu_present(cpu, false);
+	}
+
+	desc.args[0] = flags;
+	desc.args[1] = virt_to_phys(entry);
+
+	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
+
+/**
  * qcom_scm_cpu_power_down() - Power down the cpu
  * @flags - Flags to flush cache
  *
@@ -125,111 +349,73 @@ EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
  */
 void qcom_scm_cpu_power_down(u32 flags)
 {
-	__qcom_scm_cpu_power_down(flags);
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_BOOT,
+		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
+		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
+		.arginfo = QCOM_SCM_ARGS(1),
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+
+	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 }
 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
 
-/**
- * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
- *
- * Return true if HDCP is supported, false if not.
- */
-bool qcom_scm_hdcp_available(void)
+int qcom_scm_set_remote_state(u32 state, u32 id)
 {
-	int ret = qcom_scm_clk_enable();
-
-	if (ret)
-		return ret;
-
-	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
-						QCOM_SCM_CMD_HDCP);
-
-	qcom_scm_clk_disable();
-
-	return ret > 0 ? true : false;
-}
-EXPORT_SYMBOL(qcom_scm_hdcp_available);
-
-/**
- * qcom_scm_hdcp_req() - Send HDCP request.
- * @req: HDCP request array
- * @req_cnt: HDCP request array count
- * @resp: response buffer passed to SCM
- *
- * Write HDCP register(s) through SCM.
- */
-int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
-{
-	int ret = qcom_scm_clk_enable();
-
-	if (ret)
-		return ret;
-
-	ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
-	qcom_scm_clk_disable();
-	return ret;
-}
-EXPORT_SYMBOL(qcom_scm_hdcp_req);
-
-/**
- * qcom_scm_pas_supported() - Check if the peripheral authentication service is
- *			      available for the given peripherial
- * @peripheral:	peripheral id
- *
- * Returns true if PAS is supported for this peripheral, otherwise false.
- */
-bool qcom_scm_pas_supported(u32 peripheral)
-{
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_BOOT,
+		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
+		.arginfo = QCOM_SCM_ARGS(2),
+		.args[0] = state,
+		.args[1] = id,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
 	int ret;
 
-	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
-					   QCOM_SCM_PAS_IS_SUPPORTED_CMD);
-	if (ret <= 0)
-		return false;
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
 
-	return __qcom_scm_pas_supported(__scm->dev, peripheral);
+	return ret ? : res.result[0];
 }
-EXPORT_SYMBOL(qcom_scm_pas_supported);
+EXPORT_SYMBOL(qcom_scm_set_remote_state);
 
-/**
- * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
- */
-bool qcom_scm_ocmem_lock_available(void)
+static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 {
-	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC,
-					    QCOM_SCM_OCMEM_LOCK_CMD);
-}
-EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_BOOT,
+		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
+		.arginfo = QCOM_SCM_ARGS(2),
+		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
 
-/**
- * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
- * region to the specified initiator
- *
- * @id:     tz initiator id
- * @offset: OCMEM offset
- * @size:   OCMEM size
- * @mode:   access mode (WIDE/NARROW)
- */
-int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
-			u32 mode)
-{
-	return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode);
-}
-EXPORT_SYMBOL(qcom_scm_ocmem_lock);
+	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
 
-/**
- * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
- * region from the specified initiator
- *
- * @id:     tz initiator id
- * @offset: OCMEM offset
- * @size:   OCMEM size
- */
-int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
-{
-	return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size);
+	return qcom_scm_call(__scm->dev, &desc, NULL);
 }
-EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
+
+static void qcom_scm_set_download_mode(bool enable)
+{
+	bool avail;
+	int ret = 0;
+
+	avail = __qcom_scm_is_call_available(__scm->dev,
+					     QCOM_SCM_SVC_BOOT,
+					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
+	if (avail) {
+		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+	} else if (__scm->dload_mode_addr) {
+		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
+				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
+	} else {
+		dev_err(__scm->dev,
+			"No available mechanism for setting download mode\n");
+	}
+
+	if (ret)
+		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
+}
 
 /**
  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
@@ -248,6 +434,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
 	dma_addr_t mdata_phys;
 	void *mdata_buf;
 	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_PIL,
+		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
+		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
+		.args[0] = peripheral,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
 
 	/*
 	 * During the scm call memory protection will be enabled for the meta
@@ -266,14 +460,16 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
 	if (ret)
 		goto free_metadata;
 
-	ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
+	desc.args[1] = mdata_phys;
+
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
 
 	qcom_scm_clk_disable();
 
 free_metadata:
 	dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
 
-	return ret;
+	return ret ? : res.result[0];
 }
 EXPORT_SYMBOL(qcom_scm_pas_init_image);
 
@@ -289,15 +485,25 @@ EXPORT_SYMBOL(qcom_scm_pas_init_image);
 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
 {
 	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_PIL,
+		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
+		.arginfo = QCOM_SCM_ARGS(3),
+		.args[0] = peripheral,
+		.args[1] = addr,
+		.args[2] = size,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
 
 	ret = qcom_scm_clk_enable();
 	if (ret)
 		return ret;
 
-	ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
 	qcom_scm_clk_disable();
 
-	return ret;
+	return ret ? : res.result[0];
 }
 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
 
@@ -311,15 +517,23 @@ EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
 int qcom_scm_pas_auth_and_reset(u32 peripheral)
 {
 	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_PIL,
+		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
+		.arginfo = QCOM_SCM_ARGS(1),
+		.args[0] = peripheral,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
 
 	ret = qcom_scm_clk_enable();
 	if (ret)
 		return ret;
 
-	ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
 	qcom_scm_clk_disable();
 
-	return ret;
+	return ret ? : res.result[0];
 }
 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
 
@@ -332,18 +546,75 @@ EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
 int qcom_scm_pas_shutdown(u32 peripheral)
 {
 	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_PIL,
+		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
+		.arginfo = QCOM_SCM_ARGS(1),
+		.args[0] = peripheral,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
 
 	ret = qcom_scm_clk_enable();
 	if (ret)
 		return ret;
 
-	ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
+
 	qcom_scm_clk_disable();
 
-	return ret;
+	return ret ? : res.result[0];
 }
 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
 
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ *			      available for the given peripherial
+ * @peripheral:	peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_PIL,
+		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
+		.arginfo = QCOM_SCM_ARGS(1),
+		.args[0] = peripheral,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
+
+	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+					   QCOM_SCM_PIL_PAS_IS_SUPPORTED);
+	if (ret <= 0)
+		return false;
+
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+	return ret ? false : !!res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_PIL,
+		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
+		.arginfo = QCOM_SCM_ARGS(2),
+		.args[0] = reset,
+		.args[1] = 0,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
+	int ret;
+
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+	return ret ? : res.result[0];
+}
+
 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
 				     unsigned long idx)
 {
@@ -367,6 +638,43 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 	.deassert = qcom_scm_pas_reset_deassert,
 };
 
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+{
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_IO,
+		.cmd = QCOM_SCM_IO_READ,
+		.arginfo = QCOM_SCM_ARGS(1),
+		.args[0] = addr,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
+	int ret;
+
+
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
+	if (ret >= 0)
+		*val = res.result[0];
+
+	return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL(qcom_scm_io_readl);
+
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+{
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_IO,
+		.cmd = QCOM_SCM_IO_WRITE,
+		.arginfo = QCOM_SCM_ARGS(2),
+		.args[0] = addr,
+		.args[1] = val,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+
+
+	return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_io_writel);
+
 /**
  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
  * supports restore security config interface.
@@ -376,110 +684,108 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 bool qcom_scm_restore_sec_cfg_available(void)
 {
 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
-					    QCOM_SCM_RESTORE_SEC_CFG);
+					    QCOM_SCM_MP_RESTORE_SEC_CFG);
 }
 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
 
 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
 {
-	return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_MP,
+		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
+		.arginfo = QCOM_SCM_ARGS(2),
+		.args[0] = device_id,
+		.args[1] = spare,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
+	int ret;
+
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+	return ret ? : res.result[0];
 }
 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
 
 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
 {
-	return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_MP,
+		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
+		.arginfo = QCOM_SCM_ARGS(1),
+		.args[0] = spare,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
+	int ret;
+
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+	if (size)
+		*size = res.result[0];
+
+	return ret ? : res.result[1];
 }
 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
 
 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
 {
-	return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_MP,
+		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
+		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
+					 QCOM_SCM_VAL),
+		.args[0] = addr,
+		.args[1] = size,
+		.args[2] = spare,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	int ret;
+
+	desc.args[0] = addr;
+	desc.args[1] = size;
+	desc.args[2] = spare;
+	desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
+				     QCOM_SCM_VAL);
+
+	ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+	/* the pg table has been initialized already, ignore the error */
+	if (ret == -EPERM)
+		ret = 0;
+
+	return ret;
 }
 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
 
-int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+				 size_t mem_sz, phys_addr_t src, size_t src_sz,
+				 phys_addr_t dest, size_t dest_sz)
 {
-	return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
-}
-EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
-
-int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
-{
-	return __qcom_scm_io_readl(__scm->dev, addr, val);
-}
-EXPORT_SYMBOL(qcom_scm_io_readl);
-
-int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
-{
-	return __qcom_scm_io_writel(__scm->dev, addr, val);
-}
-EXPORT_SYMBOL(qcom_scm_io_writel);
-
-static void qcom_scm_set_download_mode(bool enable)
-{
-	bool avail;
-	int ret = 0;
-
-	avail = __qcom_scm_is_call_available(__scm->dev,
-					     QCOM_SCM_SVC_BOOT,
-					     QCOM_SCM_SET_DLOAD_MODE);
-	if (avail) {
-		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
-	} else if (__scm->dload_mode_addr) {
-		ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
-					   enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
-	} else {
-		dev_err(__scm->dev,
-			"No available mechanism for setting download mode\n");
-	}
-
-	if (ret)
-		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
-}
-
-static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
-{
-	struct device_node *tcsr;
-	struct device_node *np = dev->of_node;
-	struct resource res;
-	u32 offset;
 	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_MP,
+		.cmd = QCOM_SCM_MP_ASSIGN,
+		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
+					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
+					 QCOM_SCM_VAL, QCOM_SCM_VAL),
+		.args[0] = mem_region,
+		.args[1] = mem_sz,
+		.args[2] = src,
+		.args[3] = src_sz,
+		.args[4] = dest,
+		.args[5] = dest_sz,
+		.args[6] = 0,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
 
-	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
-	if (!tcsr)
-		return 0;
+	ret = qcom_scm_call(dev, &desc, &res);
 
-	ret = of_address_to_resource(tcsr, 0, &res);
-	of_node_put(tcsr);
-	if (ret)
-		return ret;
-
-	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
-	if (ret < 0)
-		return ret;
-
-	*addr = res.start + offset;
-
-	return 0;
+	return ret ? : res.result[0];
 }
 
 /**
- * qcom_scm_is_available() - Checks if SCM is available
- */
-bool qcom_scm_is_available(void)
-{
-	return !!__scm;
-}
-EXPORT_SYMBOL(qcom_scm_is_available);
-
-int qcom_scm_set_remote_state(u32 state, u32 id)
-{
-	return __qcom_scm_set_remote_state(__scm->dev, state, id);
-}
-EXPORT_SYMBOL(qcom_scm_set_remote_state);
-
-/**
  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
  * @mem_addr: mem region whose ownership need to be reassigned
  * @mem_sz:   size of the region.
@@ -561,6 +867,184 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 }
 EXPORT_SYMBOL(qcom_scm_assign_mem);
 
+/**
+ * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
+ */
+bool qcom_scm_ocmem_lock_available(void)
+{
+	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
+					    QCOM_SCM_OCMEM_LOCK_CMD);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
+
+/**
+ * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
+ * region to the specified initiator
+ *
+ * @id:     tz initiator id
+ * @offset: OCMEM offset
+ * @size:   OCMEM size
+ * @mode:   access mode (WIDE/NARROW)
+ */
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+			u32 mode)
+{
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_OCMEM,
+		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
+		.args[0] = id,
+		.args[1] = offset,
+		.args[2] = size,
+		.args[3] = mode,
+		.arginfo = QCOM_SCM_ARGS(4),
+	};
+
+	return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_lock);
+
+/**
+ * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
+ * region from the specified initiator
+ *
+ * @id:     tz initiator id
+ * @offset: OCMEM offset
+ * @size:   OCMEM size
+ */
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
+{
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_OCMEM,
+		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
+		.args[0] = id,
+		.args[1] = offset,
+		.args[2] = size,
+		.arginfo = QCOM_SCM_ARGS(3),
+	};
+
+	return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
+
+/**
+ * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
+ *
+ * Return true if HDCP is supported, false if not.
+ */
+bool qcom_scm_hdcp_available(void)
+{
+	int ret = qcom_scm_clk_enable();
+
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+						QCOM_SCM_HDCP_INVOKE);
+
+	qcom_scm_clk_disable();
+
+	return ret > 0 ? true : false;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_available);
+
+/**
+ * qcom_scm_hdcp_req() - Send HDCP request.
+ * @req: HDCP request array
+ * @req_cnt: HDCP request array count
+ * @resp: response buffer passed to SCM
+ *
+ * Write HDCP register(s) through SCM.
+ */
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_HDCP,
+		.cmd = QCOM_SCM_HDCP_INVOKE,
+		.arginfo = QCOM_SCM_ARGS(10),
+		.args = {
+			req[0].addr,
+			req[0].val,
+			req[1].addr,
+			req[1].val,
+			req[2].addr,
+			req[2].val,
+			req[3].addr,
+			req[3].val,
+			req[4].addr,
+			req[4].val
+		},
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+	struct qcom_scm_res res;
+
+	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+		return -ERANGE;
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		return ret;
+
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
+	*resp = res.result[0];
+
+	qcom_scm_clk_disable();
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
+		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
+		.arginfo = QCOM_SCM_ARGS(2),
+		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
+		.args[1] = en,
+		.owner = ARM_SMCCC_OWNER_SIP,
+	};
+
+
+	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+
+static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+{
+	struct device_node *tcsr;
+	struct device_node *np = dev->of_node;
+	struct resource res;
+	u32 offset;
+	int ret;
+
+	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
+	if (!tcsr)
+		return 0;
+
+	ret = of_address_to_resource(tcsr, 0, &res);
+	of_node_put(tcsr);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
+	if (ret < 0)
+		return ret;
+
+	*addr = res.start + offset;
+
+	return 0;
+}
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+	return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
 static int qcom_scm_probe(struct platform_device *pdev)
 {
 	struct qcom_scm *scm;
@@ -631,7 +1115,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
 	__scm = scm;
 	__scm->dev = &pdev->dev;
 
-	__qcom_scm_init();
+	__query_convention();
 
 	/*
 	 * If requested enable "download mode", from this point on warmboot
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 81dcf5f..d9ed670 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -1,72 +1,117 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved.
  */
 #ifndef __QCOM_SCM_INT_H
 #define __QCOM_SCM_INT_H
 
-#define QCOM_SCM_SVC_BOOT		0x1
-#define QCOM_SCM_BOOT_ADDR		0x1
-#define QCOM_SCM_SET_DLOAD_MODE		0x10
-#define QCOM_SCM_BOOT_ADDR_MC		0x11
-#define QCOM_SCM_SET_REMOTE_STATE	0xa
-extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id);
-extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable);
+enum qcom_scm_convention {
+	SMC_CONVENTION_UNKNOWN,
+	SMC_CONVENTION_LEGACY,
+	SMC_CONVENTION_ARM_32,
+	SMC_CONVENTION_ARM_64,
+};
 
-#define QCOM_SCM_FLAG_HLOS		0x01
-#define QCOM_SCM_FLAG_COLDBOOT_MC	0x02
-#define QCOM_SCM_FLAG_WARMBOOT_MC	0x04
-extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
-		const cpumask_t *cpus);
-extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
+extern enum qcom_scm_convention qcom_scm_convention;
 
-#define QCOM_SCM_CMD_TERMINATE_PC	0x2
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+	QCOM_SCM_VAL,
+	QCOM_SCM_RO,
+	QCOM_SCM_RW,
+	QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+			   (((a) & 0x3) << 4) | \
+			   (((b) & 0x3) << 6) | \
+			   (((c) & 0x3) << 8) | \
+			   (((d) & 0x3) << 10) | \
+			   (((e) & 0x3) << 12) | \
+			   (((f) & 0x3) << 14) | \
+			   (((g) & 0x3) << 16) | \
+			   (((h) & 0x3) << 18) | \
+			   (((i) & 0x3) << 20) | \
+			   (((j) & 0x3) << 22) | \
+			   ((num) & 0xf))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+
+/**
+ * struct qcom_scm_desc
+ * @arginfo:	Metadata describing the arguments in args[]
+ * @args:	The array of arguments for the secure syscall
+ */
+struct qcom_scm_desc {
+	u32 svc;
+	u32 cmd;
+	u32 arginfo;
+	u64 args[MAX_QCOM_SCM_ARGS];
+	u32 owner;
+};
+
+/**
+ * struct qcom_scm_res
+ * @result:	The values returned by the secure syscall
+ */
+struct qcom_scm_res {
+	u64 result[MAX_QCOM_SCM_RETS];
+};
+
+#define SCM_SMC_FNID(s, c)	((((s) & 0xFF) << 8) | ((c) & 0xFF))
+extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+			struct qcom_scm_res *res, bool atomic);
+
+#define SCM_LEGACY_FNID(s, c)	(((s) << 10) | ((c) & 0x3ff))
+extern int scm_legacy_call_atomic(struct device *dev,
+				  const struct qcom_scm_desc *desc,
+				  struct qcom_scm_res *res);
+extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+			   struct qcom_scm_res *res);
+
+#define QCOM_SCM_SVC_BOOT		0x01
+#define QCOM_SCM_BOOT_SET_ADDR		0x01
+#define QCOM_SCM_BOOT_TERMINATE_PC	0x02
+#define QCOM_SCM_BOOT_SET_DLOAD_MODE	0x10
+#define QCOM_SCM_BOOT_SET_REMOTE_STATE	0x0a
 #define QCOM_SCM_FLUSH_FLAG_MASK	0x3
-#define QCOM_SCM_CMD_CORE_HOTPLUGGED	0x10
-extern void __qcom_scm_cpu_power_down(u32 flags);
 
-#define QCOM_SCM_SVC_IO			0x5
-#define QCOM_SCM_IO_READ		0x1
-#define QCOM_SCM_IO_WRITE		0x2
-extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
-extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
+#define QCOM_SCM_SVC_PIL		0x02
+#define QCOM_SCM_PIL_PAS_INIT_IMAGE	0x01
+#define QCOM_SCM_PIL_PAS_MEM_SETUP	0x02
+#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET	0x05
+#define QCOM_SCM_PIL_PAS_SHUTDOWN	0x06
+#define QCOM_SCM_PIL_PAS_IS_SUPPORTED	0x07
+#define QCOM_SCM_PIL_PAS_MSS_RESET	0x0a
 
-#define QCOM_SCM_SVC_INFO		0x6
-#define QCOM_IS_CALL_AVAIL_CMD		0x1
-extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
-		u32 cmd_id);
+#define QCOM_SCM_SVC_IO			0x05
+#define QCOM_SCM_IO_READ		0x01
+#define QCOM_SCM_IO_WRITE		0x02
+
+#define QCOM_SCM_SVC_INFO		0x06
+#define QCOM_SCM_INFO_IS_CALL_AVAIL	0x01
+
+#define QCOM_SCM_SVC_MP				0x0c
+#define QCOM_SCM_MP_RESTORE_SEC_CFG		0x02
+#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE	0x03
+#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT	0x04
+#define QCOM_SCM_MP_ASSIGN			0x16
+
+#define QCOM_SCM_SVC_OCMEM		0x0f
+#define QCOM_SCM_OCMEM_LOCK_CMD		0x01
+#define QCOM_SCM_OCMEM_UNLOCK_CMD	0x02
 
 #define QCOM_SCM_SVC_HDCP		0x11
-#define QCOM_SCM_CMD_HDCP		0x01
-extern int __qcom_scm_hdcp_req(struct device *dev,
-		struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+#define QCOM_SCM_HDCP_INVOKE		0x01
+
+#define QCOM_SCM_SVC_SMMU_PROGRAM		0x15
+#define QCOM_SCM_SMMU_CONFIG_ERRATA1		0x03
+#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL	0x02
 
 extern void __qcom_scm_init(void);
 
-#define QCOM_SCM_OCMEM_SVC			0xf
-#define QCOM_SCM_OCMEM_LOCK_CMD		0x1
-#define QCOM_SCM_OCMEM_UNLOCK_CMD		0x2
-
-extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset,
-				 u32 size, u32 mode);
-extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset,
-				   u32 size);
-
-#define QCOM_SCM_SVC_PIL		0x2
-#define QCOM_SCM_PAS_INIT_IMAGE_CMD	0x1
-#define QCOM_SCM_PAS_MEM_SETUP_CMD	0x2
-#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD	0x5
-#define QCOM_SCM_PAS_SHUTDOWN_CMD	0x6
-#define QCOM_SCM_PAS_IS_SUPPORTED_CMD	0x7
-#define QCOM_SCM_PAS_MSS_RESET		0xa
-extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral);
-extern int  __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
-		dma_addr_t metadata_phys);
-extern int  __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
-		phys_addr_t addr, phys_addr_t size);
-extern int  __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral);
-extern int  __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral);
-extern int  __qcom_scm_pas_mss_reset(struct device *dev, bool reset);
-
 /* common error codes */
 #define QCOM_SCM_V2_EBUSY	-12
 #define QCOM_SCM_ENOMEM		-5
@@ -94,25 +139,4 @@ static inline int qcom_scm_remap_error(int err)
 	return -EINVAL;
 }
 
-#define QCOM_SCM_SVC_MP			0xc
-#define QCOM_SCM_RESTORE_SEC_CFG	2
-extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
-				      u32 spare);
-#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE	3
-#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT	4
-#define QCOM_SCM_SVC_SMMU_PROGRAM	0x15
-#define QCOM_SCM_CONFIG_ERRATA1		0x3
-#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL	0x2
-extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
-					     size_t *size);
-extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
-					     u32 size, u32 spare);
-extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
-						bool enable);
-#define QCOM_MEM_PROT_ASSIGN_ID	0x16
-extern int  __qcom_scm_assign_mem(struct device *dev,
-				  phys_addr_t mem_region, size_t mem_sz,
-				  phys_addr_t src, size_t src_sz,
-				  phys_addr_t dest, size_t dest_sz);
-
 #endif
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index c6c3140..7ffb42b 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -268,7 +268,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
 		 */
 		msleep(1000);
 		count_in_sec--;
-	};
+	}
 
 	if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
 		cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
@@ -512,7 +512,7 @@ static int svc_normal_to_secure_thread(void *data)
 			break;
 
 		}
-	};
+	}
 
 	kfree(cbdata);
 	kfree(pdata);
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 72be589..e27f684 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -197,7 +197,7 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
 		rwtm->serial_number = reply->status[1];
 		rwtm->serial_number <<= 32;
 		rwtm->serial_number |= reply->status[0];
-			rwtm->board_version = reply->status[2];
+		rwtm->board_version = reply->status[2];
 		rwtm->ram_size = reply->status[3];
 		reply_to_mac_addr(rwtm->mac_address1, reply->status[4],
 				  reply->status[5]);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 75bdfaa..ecc339d 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -26,6 +26,9 @@
 
 static const struct zynqmp_eemi_ops *eemi_ops_tbl;
 
+static bool feature_check_enabled;
+static u32 zynqmp_pm_features[PM_API_MAX];
+
 static const struct mfd_cell firmware_devs[] = {
 	{
 		.name = "zynqmp_power_controller",
@@ -44,10 +47,14 @@ static int zynqmp_pm_ret_code(u32 ret_status)
 	case XST_PM_SUCCESS:
 	case XST_PM_DOUBLE_REQ:
 		return 0;
+	case XST_PM_NO_FEATURE:
+		return -ENOTSUPP;
 	case XST_PM_NO_ACCESS:
 		return -EACCES;
 	case XST_PM_ABORT_SUSPEND:
 		return -ECANCELED;
+	case XST_PM_MULT_USER:
+		return -EUSERS;
 	case XST_PM_INTERNAL:
 	case XST_PM_CONFLICT:
 	case XST_PM_INVALID_NODE:
@@ -127,6 +134,39 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
 }
 
 /**
+ * zynqmp_pm_feature() - Check weather given feature is supported or not
+ * @api_id:		API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_feature(u32 api_id)
+{
+	int ret;
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	u64 smc_arg[2];
+
+	if (!feature_check_enabled)
+		return 0;
+
+	/* Return value if feature is already checked */
+	if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
+		return zynqmp_pm_features[api_id];
+
+	smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+	smc_arg[1] = api_id;
+
+	ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+	if (ret) {
+		zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
+		return PM_FEATURE_INVALID;
+	}
+
+	zynqmp_pm_features[api_id] = ret_payload[1];
+
+	return zynqmp_pm_features[api_id];
+}
+
+/**
  * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
  *			   caller function depending on the configuration
  * @pm_api_id:		Requested PM-API call
@@ -160,6 +200,9 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
 	 */
 	u64 smc_arg[4];
 
+	if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
+		return -ENOTSUPP;
+
 	smc_arg[0] = PM_SIP_SVC | pm_api_id;
 	smc_arg[1] = ((u64)arg1 << 32) | arg0;
 	smc_arg[2] = ((u64)arg3 << 32) | arg2;
@@ -715,6 +758,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
 		np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
 		if (!np)
 			return 0;
+
+		feature_check_enabled = true;
 	}
 	of_node_put(np);
 
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index e4a34dc..65437b6 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -813,10 +813,8 @@ static int afu_dev_init(struct platform_device *pdev)
 static int afu_dev_destroy(struct platform_device *pdev)
 {
 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	struct dfl_afu *afu;
 
 	mutex_lock(&pdata->lock);
-	afu = dfl_fpga_pdata_get_private(pdata);
 	afu_mmio_region_destroy(pdata);
 	afu_dma_region_destroy(pdata);
 	dfl_fpga_pdata_set_private(pdata, NULL);
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 7c930e6..1d4690c 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -675,10 +675,8 @@ static int fme_dev_init(struct platform_device *pdev)
 static void fme_dev_destroy(struct platform_device *pdev)
 {
 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	struct dfl_fme *fme;
 
 	mutex_lock(&pdata->lock);
-	fme = dfl_fpga_pdata_get_private(pdata);
 	dfl_fpga_pdata_set_private(pdata, NULL);
 	mutex_unlock(&pdata->lock);
 }
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 9a17fe9..2888ff0 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -119,10 +119,8 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	priv->io_base = devm_ioremap_resource(kdev, res);
-	if (IS_ERR(priv->io_base)) {
-		dev_err(kdev, "unable to remap registers\n");
+	if (IS_ERR(priv->io_base))
 		return PTR_ERR(priv->io_base);
-	}
 
 	mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
 				   &ts73xx_fpga_ops, priv);
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index af9b387..7d69af2 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -101,7 +101,8 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
 
 	priv->clk = devm_clk_get(&pdev->dev, "aclk");
 	if (IS_ERR(priv->clk)) {
-		dev_err(&pdev->dev, "input clock not found\n");
+		if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "input clock not found\n");
 		return PTR_ERR(priv->clk);
 	}
 
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f57d95a..b8013cf 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -312,6 +312,12 @@
 	  IXP4xx series of chips.
 
 	  If unsure, say N.
+config GPIO_LOGICVC
+	tristate "Xylon LogiCVC GPIO support"
+	depends on MFD_SYSCON && OF
+	help
+	  Say yes here to support GPIO functionality of the Xylon LogiCVC
+	  programmable logic block.
 
 config GPIO_LOONGSON
 	bool "Loongson-2/3 GPIO support"
@@ -335,14 +341,6 @@
 	  Select this option to enable GPIO driver for
 	  NXP LPC32XX devices.
 
-config GPIO_LYNXPOINT
-	tristate "Intel Lynxpoint GPIO support"
-	depends on ACPI && X86
-	select GPIOLIB_IRQCHIP
-	help
-	  driver for GPIO functionality on Intel Lynxpoint PCH chipset
-	  Requires ACPI device enumeration code to set up a platform device.
-
 config GPIO_MB86S7X
 	tristate "GPIO support for Fujitsu MB86S7x Platforms"
 	help
@@ -582,6 +580,7 @@
 	tristate "Cavium ThunderX/OCTEON-TX GPIO"
 	depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
 	depends on PCI_MSI
+	select GPIOLIB_IRQCHIP
 	select IRQ_DOMAIN_HIERARCHY
 	select IRQ_FASTEOI_HIERARCHY_HANDLERS
 	help
@@ -621,6 +620,13 @@
 	  additional drivers must be enabled in order to use the
 	  functionality of the device.
 
+config GPIO_WCD934X
+	tristate "Qualcomm Technologies Inc WCD9340/WCD9341 gpio controller driver"
+	depends on MFD_WCD934X && OF_GPIO
+	help
+         This driver is to supprot GPIO block found on the Qualcomm Technologies
+	 Inc WCD9340/WCD9341 Audio Codec.
+
 config GPIO_XGENE
 	bool "APM X-Gene GPIO controller support"
 	depends on ARM64 && OF_GPIO
@@ -1029,6 +1035,18 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called gpio-bd70528.
 
+config GPIO_BD71828
+	tristate "ROHM BD71828 GPIO support"
+	depends on MFD_ROHM_BD71828
+	help
+	  Support for GPIOs on ROHM BD71828 PMIC. There are three GPIOs
+	  available on the ROHM PMIC in total. The GPIOs are limited to
+	  outputs only and pins must be configured to GPIO outputs by
+	  OTP. Enable this only if you want to use these pins as outputs.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called gpio-bd71828.
+
 config GPIO_BD9571MWV
 	tristate "ROHM BD9571 GPIO support"
 	depends on MFD_BD9571MWV
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 11eeeeb..0b57126 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -37,6 +37,7 @@
 obj-$(CONFIG_GPIO_BCM_KONA)		+= gpio-bcm-kona.o
 obj-$(CONFIG_GPIO_BCM_XGS_IPROC)	+= gpio-xgs-iproc.o
 obj-$(CONFIG_GPIO_BD70528)		+= gpio-bd70528.o
+obj-$(CONFIG_GPIO_BD71828)		+= gpio-bd71828.o
 obj-$(CONFIG_GPIO_BD9571MWV)		+= gpio-bd9571mwv.o
 obj-$(CONFIG_GPIO_BRCMSTB)		+= gpio-brcmstb.o
 obj-$(CONFIG_GPIO_BT8XX)		+= gpio-bt8xx.o
@@ -69,6 +70,7 @@
 obj-$(CONFIG_GPIO_IXP4XX)		+= gpio-ixp4xx.o
 obj-$(CONFIG_GPIO_JANZ_TTL)		+= gpio-janz-ttl.o
 obj-$(CONFIG_GPIO_KEMPLD)		+= gpio-kempld.o
+obj-$(CONFIG_GPIO_LOGICVC)		+= gpio-logicvc.o
 obj-$(CONFIG_GPIO_LOONGSON1)		+= gpio-loongson1.o
 obj-$(CONFIG_GPIO_LOONGSON)		+= gpio-loongson.o
 obj-$(CONFIG_GPIO_LP3943)		+= gpio-lp3943.o
@@ -76,7 +78,6 @@
 obj-$(CONFIG_GPIO_LP87565)		+= gpio-lp87565.o
 obj-$(CONFIG_GPIO_LPC18XX)		+= gpio-lpc18xx.o
 obj-$(CONFIG_GPIO_LPC32XX)		+= gpio-lpc32xx.o
-obj-$(CONFIG_GPIO_LYNXPOINT)		+= gpio-lynxpoint.o
 obj-$(CONFIG_GPIO_MADERA)		+= gpio-madera.o
 obj-$(CONFIG_GPIO_MAX3191X)		+= gpio-max3191x.o
 obj-$(CONFIG_GPIO_MAX7300)		+= gpio-max7300.o
@@ -158,6 +159,7 @@
 obj-$(CONFIG_GPIO_VIPERBOARD)		+= gpio-viperboard.o
 obj-$(CONFIG_GPIO_VR41XX)		+= gpio-vr41xx.o
 obj-$(CONFIG_GPIO_VX855)		+= gpio-vx855.o
+obj-$(CONFIG_GPIO_WCD934X)		+= gpio-wcd934x.o
 obj-$(CONFIG_GPIO_WHISKEY_COVE)		+= gpio-wcove.o
 obj-$(CONFIG_GPIO_WINBOND)		+= gpio-winbond.o
 obj-$(CONFIG_GPIO_WM831X)		+= gpio-wm831x.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 76f8c7f..3a44e6a 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -10,6 +10,28 @@
 ideally have no use or idea of the global GPIO numberspace that has/was
 used in the inception of the GPIO subsystem.
 
+The numberspace issue is the same as to why irq is moving away from irq
+numbers to IRQ descriptors.
+
+The underlying motivation for this is that the GPIO numberspace has become
+unmanageable: machine board files tend to become full of macros trying to
+establish the numberspace at compile-time, making it hard to add any numbers
+in the middle (such as if you missed a pin on a chip) without the numberspace
+breaking.
+
+Machine descriptions such as device tree or ACPI does not have a concept of the
+Linux GPIO number as those descriptions are external to the Linux kernel
+and treat GPIO lines as abstract entities.
+
+The runtime-assigned GPIO numberspace (what you get if you assign the GPIO
+base as -1 in struct gpio_chip) has also became unpredictable due to factors
+such as probe ordering and the introduction of -EPROBE_DEFER making probe
+ordering of independent GPIO chips essentially unpredictable, as their base
+number will be assigned on a first come first serve basis.
+
+The best way to get out of the problem is to make the global GPIO numbers
+unimportant by simply not using them. GPIO descriptors deal with this.
+
 Work items:
 
 - Convert all GPIO device drivers to only #include <linux/gpio/driver.h>
@@ -33,7 +55,7 @@
 driver infrastructure for doing simpler MMIO GPIO devices and there was
 no core support for parsing device tree GPIOs from the core library with
 the [devm_]gpiod_get() calls we have today that will implicitly go into
-the device tree back-end.
+the device tree back-end. It is legacy and should not be used in new code.
 
 Work items:
 
@@ -59,6 +81,15 @@
   uses <linux/gpio/consumer.h> or <linux/gpio/driver.h> instead.
 
 
+Get rid of <linux/gpio.h>
+
+This legacy header is a one stop shop for anything GPIO is closely tied
+to the global GPIO numberspace. The endgame of the above refactorings will
+be the removal of <linux/gpio.h> and from that point only the specialized
+headers under <linux/gpio/*.h> will be used. This requires all the above to
+be completed and is expected to take a long time.
+
+
 Collect drivers
 
 Collect GPIO drivers from arch/* and other places that should be placed
@@ -109,7 +140,7 @@
 
   int irq; /* from platform etc */
   struct my_gpio *g;
-  struct gpio_irq_chip *girq
+  struct gpio_irq_chip *girq;
 
   /* Set up the irqchip dynamically */
   g->irq.name = "my_gpio_irq";
@@ -137,9 +168,14 @@
 - Look over and identify any remaining easily converted drivers and
   dry-code conversions to gpiolib irqchip for maintainers to test
 
-- Support generic hierarchical GPIO interrupts: these are for the
-  non-cascading case where there is one IRQ per GPIO line, there is
-  currently no common infrastructure for this.
+- Drop gpiochip_set_chained_irqchip() when all the chained irqchips
+  have been converted to the above infrastructure.
+
+- Add more infrastructure to make it possible to also pass a threaded
+  irqchip in struct gpio_irq_chip.
+
+- Drop gpiochip_irqchip_add_nested() when all the chained irqchips
+  have been converted to the above infrastructure.
 
 
 Increase integration with pin control
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 9f2e6b0..cc4ba71 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -266,7 +266,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
 	altera_gc->mmchip.gc.owner		= THIS_MODULE;
 	altera_gc->mmchip.gc.parent		= &pdev->dev;
 
-	altera_gc->mapped_irq = platform_get_irq(pdev, 0);
+	altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
 
 	if (altera_gc->mapped_irq < 0)
 		goto skip_irq;
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 8319812..d16645c 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -391,7 +391,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
 
 	gpio->irq = rc;
 
-	/* Disable IRQ and clear Interrupt status registers for all SPGIO Pins. */
+	/* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
 	for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
 		bank =  &aspeed_sgpio_banks[i];
 		/* disable irq enable bits */
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index f1037b6..879db23 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -978,7 +978,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
 }
 
 /**
- * aspeed_gpio_copro_set_ops - Sets the callbacks used for handhsaking with
+ * aspeed_gpio_copro_set_ops - Sets the callbacks used for handshaking with
  *                             the coprocessor for shared GPIO banks
  * @ops: The callbacks
  * @data: Pointer passed back to the callbacks
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 4122683..baee8c3 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -19,7 +19,6 @@
 #include <linux/io.h>
 #include <linux/gpio/driver.h>
 #include <linux/of_device.h>
-#include <linux/of_irq.h>
 #include <linux/init.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip/chained_irq.h>
@@ -586,11 +585,18 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
 
 	kona_gpio->gpio_chip = template_chip;
 	chip = &kona_gpio->gpio_chip;
-	kona_gpio->num_bank = of_irq_count(dev->of_node);
-	if (kona_gpio->num_bank == 0) {
+	ret = platform_irq_count(pdev);
+	if (!ret) {
 		dev_err(dev, "Couldn't determine # GPIO banks\n");
 		return -ENOENT;
+	} else if (ret < 0) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Couldn't determine GPIO banks: (%pe)\n",
+				ERR_PTR(ret));
+		return ret;
 	}
+	kona_gpio->num_bank = ret;
+
 	if (kona_gpio->num_bank > GPIO_MAX_BANK_NUM) {
 		dev_err(dev, "Too many GPIO banks configured (max=%d)\n",
 			GPIO_MAX_BANK_NUM);
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
new file mode 100644
index 0000000..04aade9
--- /dev/null
+++ b/drivers/gpio/gpio-bd71828.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2018 ROHM Semiconductors
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define GPIO_OUT_REG(off) (BD71828_REG_GPIO_CTRL1 + (off))
+#define HALL_GPIO_OFFSET 3
+
+/*
+ * These defines can be removed when
+ * "gpio: Add definition for GPIO direction"
+ * (9208b1e77d6e8e9776f34f46ef4079ecac9c3c25 in GPIO tree) gets merged,
+ */
+#ifndef GPIO_LINE_DIRECTION_IN
+	#define GPIO_LINE_DIRECTION_IN 1
+	#define GPIO_LINE_DIRECTION_OUT 0
+#endif
+
+struct bd71828_gpio {
+	struct rohm_regmap_dev chip;
+	struct gpio_chip gpio;
+};
+
+static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
+			     int value)
+{
+	int ret;
+	struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
+	u8 val = (value) ? BD71828_GPIO_OUT_HI : BD71828_GPIO_OUT_LO;
+
+	/*
+	 * The HALL input pin can only be used as input. If this is the pin
+	 * we are dealing with - then we are done
+	 */
+	if (offset == HALL_GPIO_OFFSET)
+		return;
+
+	ret = regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+				 BD71828_GPIO_OUT_MASK, val);
+	if (ret)
+		dev_err(bdgpio->chip.dev, "Could not set gpio to %d\n", value);
+}
+
+static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+	int ret;
+	unsigned int val;
+	struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
+
+	if (offset == HALL_GPIO_OFFSET)
+		ret = regmap_read(bdgpio->chip.regmap, BD71828_REG_IO_STAT,
+				  &val);
+	else
+		ret = regmap_read(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+				  &val);
+	if (!ret)
+		ret = (val & BD71828_GPIO_OUT_MASK);
+
+	return ret;
+}
+
+static int bd71828_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+				   unsigned long config)
+{
+	struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
+
+	if (offset == HALL_GPIO_OFFSET)
+		return -ENOTSUPP;
+
+	switch (pinconf_to_config_param(config)) {
+	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+		return regmap_update_bits(bdgpio->chip.regmap,
+					  GPIO_OUT_REG(offset),
+					  BD71828_GPIO_DRIVE_MASK,
+					  BD71828_GPIO_OPEN_DRAIN);
+	case PIN_CONFIG_DRIVE_PUSH_PULL:
+		return regmap_update_bits(bdgpio->chip.regmap,
+					  GPIO_OUT_REG(offset),
+					  BD71828_GPIO_DRIVE_MASK,
+					  BD71828_GPIO_PUSH_PULL);
+	default:
+		break;
+	}
+	return -ENOTSUPP;
+}
+
+static int bd71828_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+	/*
+	 * Pin usage is selected by OTP data. We can't read it runtime. Hence
+	 * we trust that if the pin is not excluded by "gpio-reserved-ranges"
+	 * the OTP configuration is set to OUT. (Other pins but HALL input pin
+	 * on BD71828 can't really be used for general purpose input - input
+	 * states are used for specific cases like regulator control or
+	 * PMIC_ON_REQ.
+	 */
+	if (offset == HALL_GPIO_OFFSET)
+		return GPIO_LINE_DIRECTION_IN;
+
+	return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int bd71828_probe(struct platform_device *pdev)
+{
+	struct bd71828_gpio *bdgpio;
+	struct rohm_regmap_dev *bd71828;
+
+	bd71828 = dev_get_drvdata(pdev->dev.parent);
+	if (!bd71828) {
+		dev_err(&pdev->dev, "No MFD driver data\n");
+		return -EINVAL;
+	}
+
+	bdgpio = devm_kzalloc(&pdev->dev, sizeof(*bdgpio),
+			      GFP_KERNEL);
+	if (!bdgpio)
+		return -ENOMEM;
+
+	bdgpio->chip.dev = &pdev->dev;
+	bdgpio->gpio.parent = pdev->dev.parent;
+	bdgpio->gpio.label = "bd71828-gpio";
+	bdgpio->gpio.owner = THIS_MODULE;
+	bdgpio->gpio.get_direction = bd71828_get_direction;
+	bdgpio->gpio.set_config = bd71828_gpio_set_config;
+	bdgpio->gpio.can_sleep = true;
+	bdgpio->gpio.get = bd71828_gpio_get;
+	bdgpio->gpio.set = bd71828_gpio_set;
+	bdgpio->gpio.base = -1;
+
+	/*
+	 * See if we need some implementation to mark some PINs as
+	 * not controllable based on DT info or if core can handle
+	 * "gpio-reserved-ranges" and exclude them from control
+	 */
+	bdgpio->gpio.ngpio = 4;
+	bdgpio->gpio.of_node = pdev->dev.parent->of_node;
+	bdgpio->chip.regmap = bd71828->regmap;
+
+	return devm_gpiochip_add_data(&pdev->dev, &bdgpio->gpio,
+				     bdgpio);
+}
+
+static struct platform_driver bd71828_gpio = {
+	.driver = {
+		.name = "bd71828-gpio"
+	},
+	.probe = bd71828_probe,
+};
+
+module_platform_driver(bd71828_gpio);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71828 voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd71828-gpio");
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index ff19a8a..1d0827e 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -64,11 +64,11 @@ static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg,
 	if (layout->bit_per_gpio[i] < 1 || layout->bit_per_gpio[i] > 8)
 		return -EINVAL;
 
-	/* Check that on valiue fits it's placeholder */
+	/* Check that on value fits its placeholder */
 	if (GENMASK(31, layout->bit_per_gpio[i]) & layout->on[i])
 		return -EINVAL;
 
-	/* Check that off valiue fits it's placeholder */
+	/* Check that off value fits its placeholder */
 	if (GENMASK(31, layout->bit_per_gpio[i]) & layout->off[i])
 		return -EINVAL;
 
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 08234e6..f954359 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -253,17 +253,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
 	lirq->irq = irq;
 	uirq = &priv->uirqs[lirq->index];
 	if (uirq->refcnt == 0) {
+		spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
 		ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
 				  dev_name(priv->dev), priv);
 		if (ret) {
 			dev_err(priv->dev,
 				"Could not request underlying irq %d\n",
 				uirq->uirq);
-
-			spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
 			return ret;
 		}
+		spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
 	}
 	uirq->refcnt++;
 
@@ -309,8 +308,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
 	if (index >= 0) {
 		uirq = &priv->uirqs[lirq->index];
 		uirq->refcnt--;
-		if (uirq->refcnt == 0)
+		if (uirq->refcnt == 0) {
+			spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
 			free_irq(uirq->uirq, priv);
+			return;
+		}
 	}
 
 	spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
@@ -433,12 +435,9 @@ static int grgpio_probe(struct platform_device *ofdev)
 static int grgpio_remove(struct platform_device *ofdev)
 {
 	struct grgpio_priv *priv = platform_get_drvdata(ofdev);
-	unsigned long flags;
 	int i;
 	int ret = 0;
 
-	spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
-
 	if (priv->domain) {
 		for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
 			if (priv->uirqs[i].refcnt != 0) {
@@ -454,8 +453,6 @@ static int grgpio_remove(struct platform_device *ofdev)
 		irq_domain_remove(priv->domain);
 
 out:
-	spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
 	return ret;
 }
 
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
new file mode 100644
index 0000000..015632c
--- /dev/null
+++ b/drivers/gpio/gpio-logicvc.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define LOGICVC_CTRL_REG		0x40
+#define LOGICVC_CTRL_GPIO_SHIFT		11
+#define LOGICVC_CTRL_GPIO_BITS		5
+
+#define LOGICVC_POWER_CTRL_REG		0x78
+#define LOGICVC_POWER_CTRL_GPIO_SHIFT	0
+#define LOGICVC_POWER_CTRL_GPIO_BITS	4
+
+struct logicvc_gpio {
+	struct gpio_chip chip;
+	struct regmap *regmap;
+};
+
+static void logicvc_gpio_offset(struct logicvc_gpio *logicvc, unsigned offset,
+				unsigned int *reg, unsigned int *bit)
+{
+	if (offset >= LOGICVC_CTRL_GPIO_BITS) {
+		*reg = LOGICVC_POWER_CTRL_REG;
+
+		/* To the (virtual) power ctrl offset. */
+		offset -= LOGICVC_CTRL_GPIO_BITS;
+		/* To the actual bit offset in reg. */
+		offset += LOGICVC_POWER_CTRL_GPIO_SHIFT;
+	} else {
+		*reg = LOGICVC_CTRL_REG;
+
+		/* To the actual bit offset in reg. */
+		offset += LOGICVC_CTRL_GPIO_SHIFT;
+	}
+
+	*bit = BIT(offset);
+}
+
+static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
+	unsigned int reg, bit, value;
+	int ret;
+
+	logicvc_gpio_offset(logicvc, offset, &reg, &bit);
+
+	ret = regmap_read(logicvc->regmap, reg, &value);
+	if (ret)
+		return ret;
+
+	return !!(value & bit);
+}
+
+static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
+	unsigned int reg, bit;
+
+	logicvc_gpio_offset(logicvc, offset, &reg, &bit);
+
+	regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
+}
+
+static int logicvc_gpio_direction_output(struct gpio_chip *chip,
+					 unsigned offset, int value)
+{
+	/* Pins are always configured as output, so just set the value. */
+	logicvc_gpio_set(chip, offset, value);
+
+	return 0;
+}
+
+static struct regmap_config logicvc_gpio_regmap_config = {
+	.reg_bits	= 32,
+	.val_bits	= 32,
+	.reg_stride	= 4,
+	.name		= "logicvc-gpio",
+};
+
+static int logicvc_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *of_node = dev->of_node;
+	struct logicvc_gpio *logicvc;
+	int ret;
+
+	logicvc = devm_kzalloc(dev, sizeof(*logicvc), GFP_KERNEL);
+	if (!logicvc)
+		return -ENOMEM;
+
+	/* Try to get regmap from parent first. */
+	logicvc->regmap = syscon_node_to_regmap(of_node->parent);
+
+	/* Grab our own regmap if that fails. */
+	if (IS_ERR(logicvc->regmap)) {
+		struct resource res;
+		void __iomem *base;
+
+		ret = of_address_to_resource(of_node, 0, &res);
+		if (ret) {
+			dev_err(dev, "Failed to get resource from address\n");
+			return ret;
+		}
+
+		base = devm_ioremap_resource(dev, &res);
+		if (IS_ERR(base)) {
+			dev_err(dev, "Failed to map I/O base\n");
+			return PTR_ERR(base);
+		}
+
+		logicvc_gpio_regmap_config.max_register = resource_size(&res) -
+			logicvc_gpio_regmap_config.reg_stride;
+
+		logicvc->regmap =
+			devm_regmap_init_mmio(dev, base,
+					      &logicvc_gpio_regmap_config);
+		if (IS_ERR(logicvc->regmap)) {
+			dev_err(dev, "Failed to create regmap for I/O\n");
+			return PTR_ERR(logicvc->regmap);
+		}
+	}
+
+	logicvc->chip.parent = dev;
+	logicvc->chip.owner = THIS_MODULE;
+	logicvc->chip.label = dev_name(dev);
+	logicvc->chip.base = -1;
+	logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
+			      LOGICVC_POWER_CTRL_GPIO_BITS;
+	logicvc->chip.get = logicvc_gpio_get;
+	logicvc->chip.set = logicvc_gpio_set;
+	logicvc->chip.direction_output = logicvc_gpio_direction_output;
+
+	platform_set_drvdata(pdev, logicvc);
+
+	return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
+}
+
+static const struct of_device_id logicivc_gpio_of_table[] = {
+	{
+		.compatible	= "xylon,logicvc-3.02.a-gpio",
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, logicivc_gpio_of_table);
+
+static struct platform_driver logicvc_gpio_driver = {
+	.driver	= {
+		.name		= "gpio-logicvc",
+		.of_match_table	= logicivc_gpio_of_table,
+	},
+	.probe	= logicvc_gpio_probe,
+};
+
+module_platform_driver(logicvc_gpio_driver);
+
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_DESCRIPTION("Xylon LogiCVC GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
deleted file mode 100644
index 490ce7b..0000000
--- a/drivers/gpio/gpio-lynxpoint.c
+++ /dev/null
@@ -1,471 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPIO controller driver for Intel Lynxpoint PCH chipset>
- * Copyright (c) 2012, Intel Corporation.
- *
- * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/bitops.h>
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-/* LynxPoint chipset has support for 94 gpio pins */
-
-#define LP_NUM_GPIO	94
-
-/* Bitmapped register offsets */
-#define LP_ACPI_OWNED	0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
-#define LP_GC		0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
-#define LP_INT_STAT	0x80
-#define LP_INT_ENABLE	0x90
-
-/* Each pin has two 32 bit config registers, starting at 0x100 */
-#define LP_CONFIG1	0x100
-#define LP_CONFIG2	0x104
-
-/* LP_CONFIG1 reg bits */
-#define OUT_LVL_BIT	BIT(31)
-#define IN_LVL_BIT	BIT(30)
-#define TRIG_SEL_BIT	BIT(4) /* 0: Edge, 1: Level */
-#define INT_INV_BIT	BIT(3) /* Invert interrupt triggering */
-#define DIR_BIT		BIT(2) /* 0: Output, 1: Input */
-#define USE_SEL_BIT	BIT(0) /* 0: Native, 1: GPIO */
-
-/* LP_CONFIG2 reg bits */
-#define GPINDIS_BIT	BIT(2) /* disable input sensing */
-#define GPIWP_BIT	(BIT(0) | BIT(1)) /* weak pull options */
-
-struct lp_gpio {
-	struct gpio_chip	chip;
-	struct platform_device	*pdev;
-	spinlock_t		lock;
-	unsigned long		reg_base;
-};
-
-/*
- * Lynxpoint gpios are controlled through both bitmapped registers and
- * per gpio specific registers. The bitmapped registers are in chunks of
- * 3 x 32bit registers to cover all 94 gpios
- *
- * per gpio specific registers consist of two 32bit registers per gpio
- * (LP_CONFIG1 and LP_CONFIG2), with 94 gpios there's a total of
- * 188 config registers.
- *
- * A simplified view of the register layout look like this:
- *
- * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31  (bitmapped registers)
- * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
- * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
- * ...
- * LP_INT_ENABLE[31:0] ...
- * LP_INT_ENABLE[63:31] ...
- * LP_INT_ENABLE[94:64] ...
- * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
- * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
- * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
- * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
- * LP2_CONFIG1 (gpio 2) ...
- * LP2_CONFIG2 (gpio 2) ...
- * ...
- * LP94_CONFIG1 (gpio 94) ...
- * LP94_CONFIG2 (gpio 94) ...
- */
-
-static unsigned long lp_gpio_reg(struct gpio_chip *chip, unsigned offset,
-				 int reg)
-{
-	struct lp_gpio *lg = gpiochip_get_data(chip);
-	int reg_offset;
-
-	if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
-		/* per gpio specific config registers */
-		reg_offset = offset * 8;
-	else
-		/* bitmapped registers */
-		reg_offset = (offset / 32) * 4;
-
-	return lg->reg_base + reg + reg_offset;
-}
-
-static int lp_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-	struct lp_gpio *lg = gpiochip_get_data(chip);
-	unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
-	unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
-	unsigned long acpi_use = lp_gpio_reg(chip, offset, LP_ACPI_OWNED);
-
-	pm_runtime_get(&lg->pdev->dev); /* should we put if failed */
-
-	/* Fail if BIOS reserved pin for ACPI use */
-	if (!(inl(acpi_use) & BIT(offset % 32))) {
-		dev_err(&lg->pdev->dev, "gpio %d reserved for ACPI\n", offset);
-		return -EBUSY;
-	}
-	/* Fail if pin is in alternate function mode (not GPIO mode) */
-	if (!(inl(reg) & USE_SEL_BIT))
-		return -ENODEV;
-
-	/* enable input sensing */
-	outl(inl(conf2) & ~GPINDIS_BIT, conf2);
-
-
-	return 0;
-}
-
-static void lp_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-	struct lp_gpio *lg = gpiochip_get_data(chip);
-	unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
-
-	/* disable input sensing */
-	outl(inl(conf2) | GPINDIS_BIT, conf2);
-
-	pm_runtime_put(&lg->pdev->dev);
-}
-
-static int lp_irq_type(struct irq_data *d, unsigned type)
-{
-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct lp_gpio *lg = gpiochip_get_data(gc);
-	u32 hwirq = irqd_to_hwirq(d);
-	unsigned long flags;
-	u32 value;
-	unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
-
-	if (hwirq >= lg->chip.ngpio)
-		return -EINVAL;
-
-	spin_lock_irqsave(&lg->lock, flags);
-	value = inl(reg);
-
-	/* set both TRIG_SEL and INV bits to 0 for rising edge */
-	if (type & IRQ_TYPE_EDGE_RISING)
-		value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
-
-	/* TRIG_SEL bit 0, INV bit 1 for falling edge */
-	if (type & IRQ_TYPE_EDGE_FALLING)
-		value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
-
-	/* TRIG_SEL bit 1, INV bit 0 for level low */
-	if (type & IRQ_TYPE_LEVEL_LOW)
-		value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
-
-	/* TRIG_SEL bit 1, INV bit 1 for level high */
-	if (type & IRQ_TYPE_LEVEL_HIGH)
-		value |= TRIG_SEL_BIT | INT_INV_BIT;
-
-	outl(value, reg);
-
-	if (type & IRQ_TYPE_EDGE_BOTH)
-		irq_set_handler_locked(d, handle_edge_irq);
-	else if (type & IRQ_TYPE_LEVEL_MASK)
-		irq_set_handler_locked(d, handle_level_irq);
-
-	spin_unlock_irqrestore(&lg->lock, flags);
-
-	return 0;
-}
-
-static int lp_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
-	unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
-	return !!(inl(reg) & IN_LVL_BIT);
-}
-
-static void lp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-	struct lp_gpio *lg = gpiochip_get_data(chip);
-	unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
-	unsigned long flags;
-
-	spin_lock_irqsave(&lg->lock, flags);
-
-	if (value)
-		outl(inl(reg) | OUT_LVL_BIT, reg);
-	else
-		outl(inl(reg) & ~OUT_LVL_BIT, reg);
-
-	spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-	struct lp_gpio *lg = gpiochip_get_data(chip);
-	unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
-	unsigned long flags;
-
-	spin_lock_irqsave(&lg->lock, flags);
-	outl(inl(reg) | DIR_BIT, reg);
-	spin_unlock_irqrestore(&lg->lock, flags);
-
-	return 0;
-}
-
-static int lp_gpio_direction_output(struct gpio_chip *chip,
-				      unsigned offset, int value)
-{
-	struct lp_gpio *lg = gpiochip_get_data(chip);
-	unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
-	unsigned long flags;
-
-	lp_gpio_set(chip, offset, value);
-
-	spin_lock_irqsave(&lg->lock, flags);
-	outl(inl(reg) & ~DIR_BIT, reg);
-	spin_unlock_irqrestore(&lg->lock, flags);
-
-	return 0;
-}
-
-static void lp_gpio_irq_handler(struct irq_desc *desc)
-{
-	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
-	struct lp_gpio *lg = gpiochip_get_data(gc);
-	struct irq_chip *chip = irq_data_get_irq_chip(data);
-	unsigned long reg, ena, pending;
-	u32 base, pin;
-
-	/* check from GPIO controller which pin triggered the interrupt */
-	for (base = 0; base < lg->chip.ngpio; base += 32) {
-		reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
-		ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
-
-		/* Only interrupts that are enabled */
-		pending = inl(reg) & inl(ena);
-
-		for_each_set_bit(pin, &pending, 32) {
-			unsigned irq;
-
-			/* Clear before handling so we don't lose an edge */
-			outl(BIT(pin), reg);
-
-			irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
-			generic_handle_irq(irq);
-		}
-	}
-	chip->irq_eoi(data);
-}
-
-static void lp_irq_unmask(struct irq_data *d)
-{
-}
-
-static void lp_irq_mask(struct irq_data *d)
-{
-}
-
-static void lp_irq_enable(struct irq_data *d)
-{
-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct lp_gpio *lg = gpiochip_get_data(gc);
-	u32 hwirq = irqd_to_hwirq(d);
-	unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
-	unsigned long flags;
-
-	spin_lock_irqsave(&lg->lock, flags);
-	outl(inl(reg) | BIT(hwirq % 32), reg);
-	spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static void lp_irq_disable(struct irq_data *d)
-{
-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct lp_gpio *lg = gpiochip_get_data(gc);
-	u32 hwirq = irqd_to_hwirq(d);
-	unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
-	unsigned long flags;
-
-	spin_lock_irqsave(&lg->lock, flags);
-	outl(inl(reg) & ~BIT(hwirq % 32), reg);
-	spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static struct irq_chip lp_irqchip = {
-	.name = "LP-GPIO",
-	.irq_mask = lp_irq_mask,
-	.irq_unmask = lp_irq_unmask,
-	.irq_enable = lp_irq_enable,
-	.irq_disable = lp_irq_disable,
-	.irq_set_type = lp_irq_type,
-	.flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
-{
-	struct lp_gpio *lg = gpiochip_get_data(chip);
-	unsigned long reg;
-	unsigned base;
-
-	for (base = 0; base < lg->chip.ngpio; base += 32) {
-		/* disable gpio pin interrupts */
-		reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
-		outl(0, reg);
-		/* Clear interrupt status register */
-		reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
-		outl(0xffffffff, reg);
-	}
-
-	return 0;
-}
-
-static int lp_gpio_probe(struct platform_device *pdev)
-{
-	struct lp_gpio *lg;
-	struct gpio_chip *gc;
-	struct resource *io_rc, *irq_rc;
-	struct device *dev = &pdev->dev;
-	unsigned long reg_len;
-	int ret = -ENODEV;
-
-	lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL);
-	if (!lg)
-		return -ENOMEM;
-
-	lg->pdev = pdev;
-	platform_set_drvdata(pdev, lg);
-
-	io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
-	irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-
-	if (!io_rc) {
-		dev_err(dev, "missing IO resources\n");
-		return -EINVAL;
-	}
-
-	lg->reg_base = io_rc->start;
-	reg_len = resource_size(io_rc);
-
-	if (!devm_request_region(dev, lg->reg_base, reg_len, "lp-gpio")) {
-		dev_err(dev, "failed requesting IO region 0x%x\n",
-			(unsigned int)lg->reg_base);
-		return -EBUSY;
-	}
-
-	spin_lock_init(&lg->lock);
-
-	gc = &lg->chip;
-	gc->label = dev_name(dev);
-	gc->owner = THIS_MODULE;
-	gc->request = lp_gpio_request;
-	gc->free = lp_gpio_free;
-	gc->direction_input = lp_gpio_direction_input;
-	gc->direction_output = lp_gpio_direction_output;
-	gc->get = lp_gpio_get;
-	gc->set = lp_gpio_set;
-	gc->base = -1;
-	gc->ngpio = LP_NUM_GPIO;
-	gc->can_sleep = false;
-	gc->parent = dev;
-
-	/* set up interrupts  */
-	if (irq_rc && irq_rc->start) {
-		struct gpio_irq_chip *girq;
-
-		girq = &gc->irq;
-		girq->chip = &lp_irqchip;
-		girq->init_hw = lp_gpio_irq_init_hw;
-		girq->parent_handler = lp_gpio_irq_handler;
-		girq->num_parents = 1;
-		girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
-					     sizeof(*girq->parents),
-					     GFP_KERNEL);
-		if (!girq->parents)
-			return -ENOMEM;
-		girq->parents[0] = (unsigned)irq_rc->start;
-		girq->default_type = IRQ_TYPE_NONE;
-		girq->handler = handle_bad_irq;
-	}
-
-	ret = devm_gpiochip_add_data(dev, gc, lg);
-	if (ret) {
-		dev_err(dev, "failed adding lp-gpio chip\n");
-		return ret;
-	}
-
-	pm_runtime_enable(dev);
-
-	return 0;
-}
-
-static int lp_gpio_runtime_suspend(struct device *dev)
-{
-	return 0;
-}
-
-static int lp_gpio_runtime_resume(struct device *dev)
-{
-	return 0;
-}
-
-static int lp_gpio_resume(struct device *dev)
-{
-	struct lp_gpio *lg = dev_get_drvdata(dev);
-	unsigned long reg;
-	int i;
-
-	/* on some hardware suspend clears input sensing, re-enable it here */
-	for (i = 0; i < lg->chip.ngpio; i++) {
-		if (gpiochip_is_requested(&lg->chip, i) != NULL) {
-			reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
-			outl(inl(reg) & ~GPINDIS_BIT, reg);
-		}
-	}
-	return 0;
-}
-
-static const struct dev_pm_ops lp_gpio_pm_ops = {
-	.runtime_suspend = lp_gpio_runtime_suspend,
-	.runtime_resume = lp_gpio_runtime_resume,
-	.resume = lp_gpio_resume,
-};
-
-static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
-	{ "INT33C7", 0 },
-	{ "INT3437", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
-
-static int lp_gpio_remove(struct platform_device *pdev)
-{
-	pm_runtime_disable(&pdev->dev);
-	return 0;
-}
-
-static struct platform_driver lp_gpio_driver = {
-	.probe          = lp_gpio_probe,
-	.remove         = lp_gpio_remove,
-	.driver         = {
-		.name   = "lp_gpio",
-		.pm	= &lp_gpio_pm_ops,
-		.acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
-	},
-};
-
-static int __init lp_gpio_init(void)
-{
-	return platform_driver_register(&lp_gpio_driver);
-}
-
-static void __exit lp_gpio_exit(void)
-{
-	platform_driver_unregister(&lp_gpio_driver);
-}
-
-subsys_initcall(lp_gpio_init);
-module_exit(lp_gpio_exit);
-
-MODULE_AUTHOR("Mathias Nyman (Intel)");
-MODULE_DESCRIPTION("GPIO interface for Intel Lynxpoint");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 94b8d3ae..7d343be 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * GPIO Testing Device Driver
  *
@@ -7,18 +7,18 @@
  * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
  */
 
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gpio/driver.h>
+#include <linux/debugfs.h>
 #include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irq_sim.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
 #include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
 
 #include "gpiolib.h"
 
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 5ae30de..604dfec 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -296,6 +296,7 @@ static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
 
 static const struct mpc8xxx_gpio_devtype ls1028a_gpio_devtype = {
 	.gpio_dir_in_init = ls1028a_gpio_dir_in_init,
+	.irq_set_type = mpc8xxx_irq_set_type,
 };
 
 static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d1d785f..b992321 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -253,8 +253,7 @@ mediatek_gpio_bank_probe(struct device *dev,
 
 		/*
 		 * Directly request the irq here instead of passing
-		 * a flow-handler to gpiochip_set_chained_irqchip,
-		 * because the irq is shared.
+		 * a flow-handler because the irq is shared.
 		 */
 		ret = devm_request_irq(dev, mtk->gpio_irq,
 				       mediatek_gpio_irq_handler, IRQF_SHARED,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 993bbeb..d2b999c 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -46,7 +46,6 @@
 #include <linux/irqdomain.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
-#include <linux/of_irq.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
@@ -432,6 +431,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
 	u32 mask = d->mask;
 
 	irq_gc_lock(gc);
+	mvebu_gpio_write_edge_cause(mvchip, ~mask);
 	ct->mask_cache_priv |= mask;
 	mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
 	irq_gc_unlock(gc);
@@ -1102,7 +1102,11 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
 		soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
 
 	/* Some gpio controllers do not provide irq support */
-	have_irqs = of_irq_count(np) != 0;
+	err = platform_irq_count(pdev);
+	if (err < 0)
+		return err;
+
+	have_irqs = err != 0;
 
 	mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
 			      GFP_KERNEL);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 9853547..5638b4e5 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -764,8 +764,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
 
 	ret = devm_request_threaded_irq(&client->dev, client->irq,
 					NULL, pca953x_irq_handler,
-					IRQF_TRIGGER_LOW | IRQF_ONESHOT |
-					IRQF_SHARED,
+					IRQF_ONESHOT | IRQF_SHARED,
 					dev_name(&client->dev), chip);
 	if (ret) {
 		dev_err(&client->dev, "failed to request irq %d\n",
@@ -855,8 +854,6 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
 	return ret;
 }
 
-static const struct of_device_id pca953x_dt_ids[];
-
 static int pca953x_probe(struct i2c_client *client,
 			 const struct i2c_device_id *i2c_id)
 {
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index b04c561..4d47b2c 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -244,7 +244,6 @@ static struct platform_driver sama5d2_piobu_driver = {
 
 module_platform_driver(sama5d2_piobu_driver);
 
-MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("SAMA5D2 PIOBU controller driver");
 MODULE_AUTHOR("Andrei Stefanescu <andrei.stefanescu@microchip.com>");
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 5e37518..866201c 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -243,4 +243,3 @@ static struct platform_driver tb10x_gpio_driver = {
 module_platform_driver(tb10x_gpio_driver);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("tb10x gpio.");
-MODULE_VERSION("0.0.1");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 6fdfe4c..acb99ef 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -96,12 +96,12 @@ struct tegra_gpio_info {
 static inline void tegra_gpio_writel(struct tegra_gpio_info *tgi,
 				     u32 val, u32 reg)
 {
-	__raw_writel(val, tgi->regs + reg);
+	writel_relaxed(val, tgi->regs + reg);
 }
 
 static inline u32 tegra_gpio_readl(struct tegra_gpio_info *tgi, u32 reg)
 {
-	return __raw_readl(tgi->regs + reg);
+	return readl_relaxed(tgi->regs + reg);
 }
 
 static unsigned int tegra_gpio_compose(unsigned int bank, unsigned int port,
@@ -416,11 +416,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
 static int tegra_gpio_resume(struct device *dev)
 {
 	struct tegra_gpio_info *tgi = dev_get_drvdata(dev);
-	unsigned long flags;
 	unsigned int b, p;
 
-	local_irq_save(flags);
-
 	for (b = 0; b < tgi->bank_count; b++) {
 		struct tegra_gpio_bank *bank = &tgi->bank_info[b];
 
@@ -448,17 +445,14 @@ static int tegra_gpio_resume(struct device *dev)
 		}
 	}
 
-	local_irq_restore(flags);
 	return 0;
 }
 
 static int tegra_gpio_suspend(struct device *dev)
 {
 	struct tegra_gpio_info *tgi = dev_get_drvdata(dev);
-	unsigned long flags;
 	unsigned int b, p;
 
-	local_irq_save(flags);
 	for (b = 0; b < tgi->bank_count; b++) {
 		struct tegra_gpio_bank *bank = &tgi->bank_info[b];
 
@@ -488,7 +482,7 @@ static int tegra_gpio_suspend(struct device *dev)
 					  GPIO_INT_ENB(tgi, gpio));
 		}
 	}
-	local_irq_restore(flags);
+
 	return 0;
 }
 
@@ -497,6 +491,11 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
 	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	unsigned int gpio = d->hwirq;
 	u32 port, bit, mask;
+	int err;
+
+	err = irq_set_irq_wake(bank->irq, enable);
+	if (err)
+		return err;
 
 	port = GPIO_PORT(gpio);
 	bit = GPIO_BIT(gpio);
@@ -507,7 +506,7 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
 	else
 		bank->wake_enb[port] &= ~mask;
 
-	return irq_set_irq_wake(bank->irq, enable);
+	return 0;
 }
 #endif
 
@@ -557,7 +556,7 @@ static inline void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
 #endif
 
 static const struct dev_pm_ops tegra_gpio_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
 };
 
 static int tegra_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 55b43b7..de24126 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -448,17 +448,24 @@ static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain,
 	return 0;
 }
 
-static void tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
-						 struct irq_fwspec *fwspec,
+static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
 						 unsigned int parent_hwirq,
 						 unsigned int parent_type)
 {
 	struct tegra_gpio *gpio = gpiochip_get_data(chip);
+	struct irq_fwspec *fwspec;
 
+	fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+	if (!fwspec)
+		return NULL;
+
+	fwspec->fwnode = chip->irq.parent_domain->fwnode;
 	fwspec->param_count = 3;
 	fwspec->param[0] = gpio->soc->instance;
 	fwspec->param[1] = parent_hwirq;
 	fwspec->param[2] = parent_type;
+
+	return fwspec;
 }
 
 static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
@@ -621,7 +628,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
 	irq->chip = &gpio->intc;
 	irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
 	irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq;
-	irq->populate_parent_fwspec = tegra186_gpio_populate_parent_fwspec;
+	irq->populate_parent_alloc_arg = tegra186_gpio_populate_parent_fwspec;
 	irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq;
 	irq->child_irq_domain_ops.translate = tegra186_gpio_irq_domain_translate;
 	irq->handler = handle_simple_irq;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 4627704..9f66dea 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/spinlock.h>
+#include <asm-generic/msi.h>
 
 
 #define GPIO_RX_DAT	0x0
@@ -53,7 +54,6 @@ struct thunderx_line {
 struct thunderx_gpio {
 	struct gpio_chip	chip;
 	u8 __iomem		*register_base;
-	struct irq_domain	*irqd;
 	struct msix_entry	*msix_entries;	/* per line MSI-X */
 	struct thunderx_line	*line_entries;	/* per line irq info */
 	raw_spinlock_t		lock;
@@ -286,54 +286,60 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
 	}
 }
 
-static void thunderx_gpio_irq_ack(struct irq_data *data)
+static void thunderx_gpio_irq_ack(struct irq_data *d)
 {
-	struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
 	writeq(GPIO_INTR_INTR,
-	       txline->txgpio->register_base + intr_reg(txline->line));
+	       txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
 }
 
-static void thunderx_gpio_irq_mask(struct irq_data *data)
+static void thunderx_gpio_irq_mask(struct irq_data *d)
 {
-	struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
 	writeq(GPIO_INTR_ENA_W1C,
-	       txline->txgpio->register_base + intr_reg(txline->line));
+	       txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
 }
 
-static void thunderx_gpio_irq_mask_ack(struct irq_data *data)
+static void thunderx_gpio_irq_mask_ack(struct irq_data *d)
 {
-	struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
 	writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR,
-	       txline->txgpio->register_base + intr_reg(txline->line));
+	       txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
 }
 
-static void thunderx_gpio_irq_unmask(struct irq_data *data)
+static void thunderx_gpio_irq_unmask(struct irq_data *d)
 {
-	struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
 	writeq(GPIO_INTR_ENA_W1S,
-	       txline->txgpio->register_base + intr_reg(txline->line));
+	       txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
 }
 
-static int thunderx_gpio_irq_set_type(struct irq_data *data,
+static int thunderx_gpio_irq_set_type(struct irq_data *d,
 				      unsigned int flow_type)
 {
-	struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
-	struct thunderx_gpio *txgpio = txline->txgpio;
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+	struct thunderx_line *txline =
+		&txgpio->line_entries[irqd_to_hwirq(d)];
 	u64 bit_cfg;
 
-	irqd_set_trigger_type(data, flow_type);
+	irqd_set_trigger_type(d, flow_type);
 
 	bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
 
 	if (flow_type & IRQ_TYPE_EDGE_BOTH) {
-		irq_set_handler_locked(data, handle_fasteoi_ack_irq);
+		irq_set_handler_locked(d, handle_fasteoi_ack_irq);
 		bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
 	} else {
-		irq_set_handler_locked(data, handle_fasteoi_mask_irq);
+		irq_set_handler_locked(d, handle_fasteoi_mask_irq);
 	}
 
 	raw_spin_lock(&txgpio->lock);
@@ -362,33 +368,6 @@ static void thunderx_gpio_irq_disable(struct irq_data *data)
 	irq_chip_disable_parent(data);
 }
 
-static int thunderx_gpio_irq_request_resources(struct irq_data *data)
-{
-	struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
-	struct thunderx_gpio *txgpio = txline->txgpio;
-	int r;
-
-	r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
-	if (r)
-		return r;
-
-	r = irq_chip_request_resources_parent(data);
-	if (r)
-		gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
-
-	return r;
-}
-
-static void thunderx_gpio_irq_release_resources(struct irq_data *data)
-{
-	struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
-	struct thunderx_gpio *txgpio = txline->txgpio;
-
-	irq_chip_release_resources_parent(data);
-
-	gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
-}
-
 /*
  * Interrupts are chained from underlying MSI-X vectors.  We have
  * these irq_chip functions to be able to handle level triggering
@@ -405,48 +384,42 @@ static struct irq_chip thunderx_gpio_irq_chip = {
 	.irq_unmask		= thunderx_gpio_irq_unmask,
 	.irq_eoi		= irq_chip_eoi_parent,
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
-	.irq_request_resources	= thunderx_gpio_irq_request_resources,
-	.irq_release_resources	= thunderx_gpio_irq_release_resources,
 	.irq_set_type		= thunderx_gpio_irq_set_type,
 
 	.flags			= IRQCHIP_SET_TYPE_MASKED
 };
 
-static int thunderx_gpio_irq_translate(struct irq_domain *d,
-				       struct irq_fwspec *fwspec,
-				       irq_hw_number_t *hwirq,
-				       unsigned int *type)
+static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+					       unsigned int child,
+					       unsigned int child_type,
+					       unsigned int *parent,
+					       unsigned int *parent_type)
 {
-	struct thunderx_gpio *txgpio = d->host_data;
+	struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+	struct irq_data *irqd;
+	unsigned int irq;
 
-	if (WARN_ON(fwspec->param_count < 2))
+	irq = txgpio->msix_entries[child].vector;
+	irqd = irq_domain_get_irq_data(gc->irq.parent_domain, irq);
+	if (!irqd)
 		return -EINVAL;
-	if (fwspec->param[0] >= txgpio->chip.ngpio)
-		return -EINVAL;
-	*hwirq = fwspec->param[0];
-	*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+	*parent = irqd_to_hwirq(irqd);
+	*parent_type = IRQ_TYPE_LEVEL_HIGH;
 	return 0;
 }
 
-static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
-				   unsigned int nr_irqs, void *arg)
+static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip,
+						      unsigned int parent_hwirq,
+						      unsigned int parent_type)
 {
-	struct thunderx_line *txline = arg;
+	msi_alloc_info_t *info;
 
-	return irq_domain_set_hwirq_and_chip(d, virq, txline->line,
-					     &thunderx_gpio_irq_chip, txline);
-}
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return NULL;
 
-static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
-	.alloc		= thunderx_gpio_irq_alloc,
-	.translate	= thunderx_gpio_irq_translate
-};
-
-static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
-	struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
-
-	return irq_find_mapping(txgpio->irqd, offset);
+	info->hwirq = parent_hwirq;
+	return info;
 }
 
 static int thunderx_gpio_probe(struct pci_dev *pdev,
@@ -456,6 +429,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
 	struct device *dev = &pdev->dev;
 	struct thunderx_gpio *txgpio;
 	struct gpio_chip *chip;
+	struct gpio_irq_chip *girq;
 	int ngpio, i;
 	int err = 0;
 
@@ -500,8 +474,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
 	}
 
 	txgpio->msix_entries = devm_kcalloc(dev,
-					  ngpio, sizeof(struct msix_entry),
-					  GFP_KERNEL);
+					    ngpio, sizeof(struct msix_entry),
+					    GFP_KERNEL);
 	if (!txgpio->msix_entries) {
 		err = -ENOMEM;
 		goto out;
@@ -542,27 +516,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
 	if (err < 0)
 		goto out;
 
-	/*
-	 * Push GPIO specific irqdomain on hierarchy created as a side
-	 * effect of the pci_enable_msix()
-	 */
-	txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
-						   0, 0, of_node_to_fwnode(dev->of_node),
-						   &thunderx_gpio_irqd_ops, txgpio);
-	if (!txgpio->irqd) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	/* Push on irq_data and the domain for each line. */
-	for (i = 0; i < ngpio; i++) {
-		err = irq_domain_push_irq(txgpio->irqd,
-					  txgpio->msix_entries[i].vector,
-					  &txgpio->line_entries[i]);
-		if (err < 0)
-			dev_err(dev, "irq_domain_push_irq: %d\n", err);
-	}
-
 	chip->label = KBUILD_MODNAME;
 	chip->parent = dev;
 	chip->owner = THIS_MODULE;
@@ -577,11 +530,35 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
 	chip->set = thunderx_gpio_set;
 	chip->set_multiple = thunderx_gpio_set_multiple;
 	chip->set_config = thunderx_gpio_set_config;
-	chip->to_irq = thunderx_gpio_to_irq;
+	girq = &chip->irq;
+	girq->chip = &thunderx_gpio_irq_chip;
+	girq->fwnode = of_node_to_fwnode(dev->of_node);
+	girq->parent_domain =
+		irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
+	girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
+	girq->populate_parent_alloc_arg = thunderx_gpio_populate_parent_alloc_info;
+	girq->handler = handle_bad_irq;
+	girq->default_type = IRQ_TYPE_NONE;
+
 	err = devm_gpiochip_add_data(dev, chip, txgpio);
 	if (err)
 		goto out;
 
+	/* Push on irq_data and the domain for each line. */
+	for (i = 0; i < ngpio; i++) {
+		struct irq_fwspec fwspec;
+
+		fwspec.fwnode = of_node_to_fwnode(dev->of_node);
+		fwspec.param_count = 2;
+		fwspec.param[0] = i;
+		fwspec.param[1] = IRQ_TYPE_NONE;
+		err = irq_domain_push_irq(girq->domain,
+					  txgpio->msix_entries[i].vector,
+					  &fwspec);
+		if (err < 0)
+			dev_err(dev, "irq_domain_push_irq: %d\n", err);
+	}
+
 	dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
 		 ngpio, chip->base);
 	return 0;
@@ -596,10 +573,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev)
 	struct thunderx_gpio *txgpio = pci_get_drvdata(pdev);
 
 	for (i = 0; i < txgpio->chip.ngpio; i++)
-		irq_domain_pop_irq(txgpio->irqd,
+		irq_domain_pop_irq(txgpio->chip.irq.domain,
 				   txgpio->msix_entries[i].vector);
 
-	irq_domain_remove(txgpio->irqd);
+	irq_domain_remove(txgpio->chip.irq.domain);
 
 	pci_set_drvdata(pdev, NULL);
 }
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 4ff146c..3bf397b 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -71,7 +71,7 @@ static inline u_int32_t gpio_o_bit(int i)
 		return 1 << (i + 13);
 }
 
-/* Mapping betwee numeric GPIO ID and the actual GPIO hardware numbering:
+/* Mapping between numeric GPIO ID and the actual GPIO hardware numbering:
  * 0..13	GPI 0..13
  * 14..26	GPO 0..12
  * 27..41	GPIO 0..14
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
new file mode 100644
index 0000000..74913f2
--- /dev/null
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+
+#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_REG_DIR_CTL_OFFSET 0x42
+#define WCD_REG_VAL_CTL_OFFSET 0x43
+#define WCD934X_NPINS		5
+
+struct wcd_gpio_data {
+	struct regmap *map;
+	struct gpio_chip chip;
+};
+
+static int wcd_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
+{
+	struct wcd_gpio_data *data = gpiochip_get_data(chip);
+	unsigned int value;
+	int ret;
+
+	ret = regmap_read(data->map, WCD_REG_DIR_CTL_OFFSET, &value);
+	if (ret < 0)
+		return ret;
+
+	if (value & WCD_PIN_MASK(pin))
+		return GPIO_LINE_DIRECTION_OUT;
+
+	return GPIO_LINE_DIRECTION_IN;
+}
+
+static int wcd_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
+{
+	struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+	return regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+				  WCD_PIN_MASK(pin), 0);
+}
+
+static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
+				     int val)
+{
+	struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+	regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+			   WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+
+	return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+				  WCD_PIN_MASK(pin),
+				  val ? WCD_PIN_MASK(pin) : 0);
+}
+
+static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
+{
+	struct wcd_gpio_data *data = gpiochip_get_data(chip);
+	int value;
+
+	regmap_read(data->map, WCD_REG_VAL_CTL_OFFSET, &value);
+
+	return !!(value && WCD_PIN_MASK(pin));
+}
+
+static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+{
+	wcd_gpio_direction_output(chip, pin, val);
+}
+
+static int wcd_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct wcd_gpio_data *data;
+	struct gpio_chip *chip;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->map = dev_get_regmap(dev->parent, NULL);
+	if (!data->map) {
+		dev_err(dev, "%s: failed to get regmap\n", __func__);
+		return  -EINVAL;
+	}
+
+	chip = &data->chip;
+	chip->direction_input  = wcd_gpio_direction_input;
+	chip->direction_output = wcd_gpio_direction_output;
+	chip->get_direction = wcd_gpio_get_direction;
+	chip->get = wcd_gpio_get;
+	chip->set = wcd_gpio_set;
+	chip->parent = dev;
+	chip->base = -1;
+	chip->ngpio = WCD934X_NPINS;
+	chip->label = dev_name(dev);
+	chip->of_gpio_n_cells = 2;
+	chip->can_sleep = false;
+
+	return devm_gpiochip_add_data(dev, chip, data);
+}
+
+static const struct of_device_id wcd_gpio_of_match[] = {
+	{ .compatible = "qcom,wcd9340-gpio" },
+	{ .compatible = "qcom,wcd9341-gpio" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, wcd_gpio_of_match);
+
+static struct platform_driver wcd_gpio_driver = {
+	.driver = {
+		   .name = "wcd934x-gpio",
+		   .of_match_table = wcd_gpio_of_match,
+	},
+	.probe = wcd_gpio_probe,
+};
+
+module_platform_driver(wcd_gpio_driver);
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc WCD GPIO control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index b21c2e4..ad5489a 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -251,8 +251,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
 
 		/*
 		 * Directly request the irq here instead of passing
-		 * a flow-handler to gpiochip_set_chained_irqchip,
-		 * because the irq is shared.
+		 * a flow-handler because the irq is shared.
 		 */
 		ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
 				       IRQF_SHARED, chip->gc.label, &chip->gc);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 4421be0..72b6001 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -308,7 +308,7 @@ devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
 	struct gpio_descs *descs;
 
 	descs = devm_gpiod_get_array(dev, con_id, flags);
-	if (IS_ERR(descs) && (PTR_ERR(descs) == -ENOENT))
+	if (PTR_ERR(descs) == -ENOENT)
 		return NULL;
 
 	return descs;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 1b3f217..c6d30f7 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -484,24 +484,24 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
 			break;
 	}
 
-	if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+	if (PTR_ERR(desc) == -ENOENT) {
 		/* Special handling for SPI GPIOs if used */
 		desc = of_find_spi_gpio(dev, con_id, &of_flags);
 	}
 
-	if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+	if (PTR_ERR(desc) == -ENOENT) {
 		/* This quirk looks up flags and all */
 		desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
 		if (!IS_ERR(desc))
 			return desc;
 	}
 
-	if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+	if (PTR_ERR(desc) == -ENOENT) {
 		/* Special handling for regulator GPIOs if used */
 		desc = of_find_regulator_gpio(dev, con_id, &of_flags);
 	}
 
-	if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+	if (PTR_ERR(desc) == -ENOENT)
 		desc = of_find_arizona_gpio(dev, con_id, &of_flags);
 
 	if (IS_ERR(desc))
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index fbf6b1a..23e3d33 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -762,10 +762,9 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
 		parent = &gdev->dev;
 
 	/* use chip->base for the ID; it's already known to be unique */
-	dev = device_create_with_groups(&gpio_class, parent,
-					MKDEV(0, 0),
-					chip, gpiochip_groups,
-					"gpiochip%d", chip->base);
+	dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), chip,
+					gpiochip_groups, GPIOCHIP_NAME "%d",
+					chip->base);
 	if (IS_ERR(dev))
 		return PTR_ERR(dev);
 
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bcfbfde..7532834 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
  * in the given chip for the specified hardware number.
  */
 struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
-				    u16 hwnum)
+				    unsigned int hwnum)
 {
 	struct gpio_device *gdev = chip->gpiodev;
 
@@ -232,15 +232,15 @@ int gpiod_get_direction(struct gpio_desc *desc)
 		return -ENOTSUPP;
 
 	ret = chip->get_direction(chip, offset);
-	if (ret > 0) {
-		/* GPIOF_DIR_IN, or other positive */
+	if (ret < 0)
+		return ret;
+
+	/* GPIOF_DIR_IN or other positive, otherwise GPIOF_DIR_OUT */
+	if (ret > 0)
 		ret = 1;
-		clear_bit(FLAG_IS_OUT, &desc->flags);
-	}
-	if (ret == 0) {
-		/* GPIOF_DIR_OUT */
-		set_bit(FLAG_IS_OUT, &desc->flags);
-	}
+
+	assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(gpiod_get_direction);
@@ -492,15 +492,6 @@ static int linehandle_validate_flags(u32 flags)
 	return 0;
 }
 
-static void linehandle_configure_flag(unsigned long *flagsp,
-				      u32 bit, bool active)
-{
-	if (active)
-		set_bit(bit, flagsp);
-	else
-		clear_bit(bit, flagsp);
-}
-
 static long linehandle_set_config(struct linehandle_state *lh,
 				  void __user *ip)
 {
@@ -522,22 +513,22 @@ static long linehandle_set_config(struct linehandle_state *lh,
 		desc = lh->descs[i];
 		flagsp = &desc->flags;
 
-		linehandle_configure_flag(flagsp, FLAG_ACTIVE_LOW,
+		assign_bit(FLAG_ACTIVE_LOW, flagsp,
 			lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
 
-		linehandle_configure_flag(flagsp, FLAG_OPEN_DRAIN,
+		assign_bit(FLAG_OPEN_DRAIN, flagsp,
 			lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
 
-		linehandle_configure_flag(flagsp, FLAG_OPEN_SOURCE,
+		assign_bit(FLAG_OPEN_SOURCE, flagsp,
 			lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
 
-		linehandle_configure_flag(flagsp, FLAG_PULL_UP,
+		assign_bit(FLAG_PULL_UP, flagsp,
 			lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
 
-		linehandle_configure_flag(flagsp, FLAG_PULL_DOWN,
+		assign_bit(FLAG_PULL_DOWN, flagsp,
 			lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
 
-		linehandle_configure_flag(flagsp, FLAG_BIAS_DISABLE,
+		assign_bit(FLAG_BIAS_DISABLE, flagsp,
 			lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
 
 		/*
@@ -686,14 +677,13 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
 	/* Request each GPIO */
 	for (i = 0; i < handlereq.lines; i++) {
 		u32 offset = handlereq.lineoffsets[i];
-		struct gpio_desc *desc;
+		struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
 
-		if (offset >= gdev->ngpio) {
-			ret = -EINVAL;
+		if (IS_ERR(desc)) {
+			ret = PTR_ERR(desc);
 			goto out_free_descs;
 		}
 
-		desc = &gdev->descs[offset];
 		ret = gpiod_request(desc, lh->label);
 		if (ret)
 			goto out_free_descs;
@@ -1018,8 +1008,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
 	lflags = eventreq.handleflags;
 	eflags = eventreq.eventflags;
 
-	if (offset >= gdev->ngpio)
-		return -EINVAL;
+	desc = gpiochip_get_desc(gdev->chip, offset);
+	if (IS_ERR(desc))
+		return PTR_ERR(desc);
 
 	/* Return an error if a unknown flag is set */
 	if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
@@ -1057,7 +1048,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
 		}
 	}
 
-	desc = &gdev->descs[offset];
 	ret = gpiod_request(desc, le->label);
 	if (ret)
 		goto out_free_label;
@@ -1184,10 +1174,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
 		if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
 			return -EFAULT;
-		if (lineinfo.line_offset >= gdev->ngpio)
-			return -EINVAL;
 
-		desc = &gdev->descs[lineinfo.line_offset];
+		desc = gpiochip_get_desc(chip, lineinfo.line_offset);
+		if (IS_ERR(desc))
+			return PTR_ERR(desc);
+
 		if (desc->name) {
 			strncpy(lineinfo.name, desc->name,
 				sizeof(lineinfo.name));
@@ -1427,7 +1418,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 		ret = gdev->id;
 		goto err_free_gdev;
 	}
-	dev_set_name(&gdev->dev, "gpiochip%d", gdev->id);
+	dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
 	device_initialize(&gdev->dev);
 	dev_set_drvdata(&gdev->dev, gdev);
 	if (chip->parent && chip->parent->driver)
@@ -1452,7 +1443,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
 	if (chip->ngpio > FASTPATH_NGPIO)
 		chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n",
-		chip->ngpio, FASTPATH_NGPIO);
+			  chip->ngpio, FASTPATH_NGPIO);
 
 	gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
 	if (!gdev->label) {
@@ -1495,11 +1486,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 		goto err_free_label;
 	}
 
-	spin_unlock_irqrestore(&gpio_lock, flags);
-
 	for (i = 0; i < chip->ngpio; i++)
 		gdev->descs[i].gdev = gdev;
 
+	spin_unlock_irqrestore(&gpio_lock, flags);
+
 #ifdef CONFIG_PINCTRL
 	INIT_LIST_HEAD(&gdev->pin_ranges);
 #endif
@@ -1524,15 +1515,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 		struct gpio_desc *desc = &gdev->descs[i];
 
 		if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
-			if (!chip->get_direction(chip, i))
-				set_bit(FLAG_IS_OUT, &desc->flags);
-			else
-				clear_bit(FLAG_IS_OUT, &desc->flags);
+			assign_bit(FLAG_IS_OUT,
+				   &desc->flags, !chip->get_direction(chip, i));
 		} else {
-			if (!chip->direction_input)
-				set_bit(FLAG_IS_OUT, &desc->flags);
-			else
-				clear_bit(FLAG_IS_OUT, &desc->flags);
+			assign_bit(FLAG_IS_OUT,
+				   &desc->flags, !chip->direction_input);
 		}
 	}
 
@@ -1813,7 +1800,7 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
  * gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
  * @gc: the gpiochip to set the irqchip chain to
  * @parent_irq: the irq number corresponding to the parent IRQ for this
- * chained irqchip
+ * cascaded irqchip
  * @parent_handler: the parent interrupt handler for the accumulated IRQ
  * coming out of the gpiochip. If the interrupt is nested rather than
  * cascaded, pass NULL in this handler argument
@@ -1856,29 +1843,6 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
 }
 
 /**
- * gpiochip_set_chained_irqchip() - connects a chained irqchip to a gpiochip
- * @gpiochip: the gpiochip to set the irqchip chain to
- * @irqchip: the irqchip to chain to the gpiochip
- * @parent_irq: the irq number corresponding to the parent IRQ for this
- * chained irqchip
- * @parent_handler: the parent interrupt handler for the accumulated IRQ
- * coming out of the gpiochip.
- */
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
-				  struct irq_chip *irqchip,
-				  unsigned int parent_irq,
-				  irq_flow_handler_t parent_handler)
-{
-	if (gpiochip->irq.threaded) {
-		chip_err(gpiochip, "tried to chain a threaded gpiochip\n");
-		return;
-	}
-
-	gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, parent_handler);
-}
-EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
-
-/**
  * gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
  * @gpiochip: the gpiochip to set the irqchip nested handler to
  * @irqchip: the irqchip to nest to the gpiochip
@@ -2003,7 +1967,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
 	irq_hw_number_t hwirq;
 	unsigned int type = IRQ_TYPE_NONE;
 	struct irq_fwspec *fwspec = data;
-	struct irq_fwspec parent_fwspec;
+	void *parent_arg;
 	unsigned int parent_hwirq;
 	unsigned int parent_type;
 	struct gpio_irq_chip *girq = &gc->irq;
@@ -2019,7 +1983,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
 	if (ret)
 		return ret;
 
-	chip_info(gc, "allocate IRQ %d, hwirq %lu\n", irq,  hwirq);
+	chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq,  hwirq);
 
 	ret = girq->child_to_parent_hwirq(gc, hwirq, type,
 					  &parent_hwirq, &parent_type);
@@ -2027,7 +1991,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
 		chip_err(gc, "can't look up hwirq %lu\n", hwirq);
 		return ret;
 	}
-	chip_info(gc, "found parent hwirq %u\n", parent_hwirq);
+	chip_dbg(gc, "found parent hwirq %u\n", parent_hwirq);
 
 	/*
 	 * We set handle_bad_irq because the .set_type() should
@@ -2042,23 +2006,27 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
 			    NULL, NULL);
 	irq_set_probe(irq);
 
-	/*
-	 * Create a IRQ fwspec to send up to the parent irqdomain:
-	 * specify the hwirq we address on the parent and tie it
-	 * all together up the chain.
-	 */
-	parent_fwspec.fwnode = d->parent->fwnode;
 	/* This parent only handles asserted level IRQs */
-	girq->populate_parent_fwspec(gc, &parent_fwspec, parent_hwirq,
-				     parent_type);
-	chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+	parent_arg = girq->populate_parent_alloc_arg(gc, parent_hwirq, parent_type);
+	if (!parent_arg)
+		return -ENOMEM;
+
+	chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
 		  irq, parent_hwirq);
-	ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec);
+	irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
+	ret = irq_domain_alloc_irqs_parent(d, irq, 1, parent_arg);
+	/*
+	 * If the parent irqdomain is msi, the interrupts have already
+	 * been allocated, so the EEXIST is good.
+	 */
+	if (irq_domain_is_msi(d->parent) && (ret == -EEXIST))
+		ret = 0;
 	if (ret)
 		chip_err(gc,
 			 "failed to allocate parent hwirq %d for hwirq %lu\n",
 			 parent_hwirq, hwirq);
 
+	kfree(parent_arg);
 	return ret;
 }
 
@@ -2095,8 +2063,8 @@ static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
 	if (!gc->irq.child_offset_to_irq)
 		gc->irq.child_offset_to_irq = gpiochip_child_offset_to_irq_noop;
 
-	if (!gc->irq.populate_parent_fwspec)
-		gc->irq.populate_parent_fwspec =
+	if (!gc->irq.populate_parent_alloc_arg)
+		gc->irq.populate_parent_alloc_arg =
 			gpiochip_populate_parent_fwspec_twocell;
 
 	gpiochip_hierarchy_setup_domain_ops(&gc->irq.child_irq_domain_ops);
@@ -2122,27 +2090,43 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
 	return !!gc->irq.parent_domain;
 }
 
-void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
-					     struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
 					     unsigned int parent_hwirq,
 					     unsigned int parent_type)
 {
+	struct irq_fwspec *fwspec;
+
+	fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+	if (!fwspec)
+		return NULL;
+
+	fwspec->fwnode = chip->irq.parent_domain->fwnode;
 	fwspec->param_count = 2;
 	fwspec->param[0] = parent_hwirq;
 	fwspec->param[1] = parent_type;
+
+	return fwspec;
 }
 EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
 
-void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
-					      struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
 					      unsigned int parent_hwirq,
 					      unsigned int parent_type)
 {
+	struct irq_fwspec *fwspec;
+
+	fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+	if (!fwspec)
+		return NULL;
+
+	fwspec->fwnode = chip->irq.parent_domain->fwnode;
 	fwspec->param_count = 4;
 	fwspec->param[0] = 0;
 	fwspec->param[1] = parent_hwirq;
 	fwspec->param[2] = 0;
 	fwspec->param[3] = parent_type;
+
+	return fwspec;
 }
 EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell);
 
@@ -2998,7 +2982,8 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
  * A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error
  * code on failure.
  */
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+					    unsigned int hwnum,
 					    const char *label,
 					    enum gpio_lookup_flags lflags,
 					    enum gpiod_flags dflags)
@@ -3050,25 +3035,13 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
  * rely on gpio_request() having been called beforehand.
  */
 
-static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
+static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
 			   enum pin_config_param mode)
 {
-	unsigned long config;
-	unsigned arg;
+	if (!gc->set_config)
+		return -ENOTSUPP;
 
-	switch (mode) {
-	case PIN_CONFIG_BIAS_DISABLE:
-	case PIN_CONFIG_BIAS_PULL_DOWN:
-	case PIN_CONFIG_BIAS_PULL_UP:
-		arg = 1;
-		break;
-
-	default:
-		arg = 0;
-	}
-
-	config = PIN_CONF_PACKED(mode, arg);
-	return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
+	return gc->set_config(gc, offset, mode);
 }
 
 static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
@@ -3302,15 +3275,9 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 
 	VALIDATE_DESC(desc);
 	chip = desc->gdev->chip;
-	if (!chip->set || !chip->set_config) {
-		gpiod_dbg(desc,
-			  "%s: missing set() or set_config() operations\n",
-			  __func__);
-		return -ENOTSUPP;
-	}
 
 	config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
-	return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
+	return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_debounce);
 
@@ -3334,10 +3301,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
 	 * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
 	 * persistence state.
 	 */
-	if (transitory)
-		set_bit(FLAG_TRANSITORY, &desc->flags);
-	else
-		clear_bit(FLAG_TRANSITORY, &desc->flags);
+	assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
 
 	/* If the driver supports it, set the persistence state now */
 	chip = desc->gdev->chip;
@@ -3347,7 +3311,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
 	packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
 					  !transitory);
 	gpio = gpio_chip_hwgpio(desc);
-	rc = chip->set_config(chip, gpio, packed);
+	rc = gpio_set_config(chip, gpio, packed);
 	if (rc == -ENOTSUPP) {
 		dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
 				gpio);
@@ -3804,10 +3768,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
 				gpio_set_open_source_value_commit(desc, value);
 			} else {
 				__set_bit(hwgpio, mask);
-				if (value)
-					__set_bit(hwgpio, bits);
-				else
-					__clear_bit(hwgpio, bits);
+				__assign_bit(hwgpio, bits, value);
 				count++;
 			}
 			i++;
@@ -5078,7 +5039,7 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
 	struct gpio_descs *descs;
 
 	descs = gpiod_get_array(dev, con_id, flags);
-	if (IS_ERR(descs) && (PTR_ERR(descs) == -ENOENT))
+	if (PTR_ERR(descs) == -ENOENT)
 		return NULL;
 
 	return descs;
@@ -5124,7 +5085,7 @@ static int __init gpiolib_dev_init(void)
 		return ret;
 	}
 
-	ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, "gpiochip");
+	ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, GPIOCHIP_NAME);
 	if (ret < 0) {
 		pr_err("gpiolib: failed to allocate char dev region\n");
 		bus_unregister(&gpio_bus_type);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index ca9bc1e..3e0aab2 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,6 +16,8 @@
 #include <linux/module.h>
 #include <linux/cdev.h>
 
+#define GPIOCHIP_NAME	"gpiochip"
+
 /**
  * struct gpio_device - internal state container for GPIO devices
  * @id: numerical ID number for the GPIO chip
@@ -78,7 +80,8 @@ struct gpio_array {
 	unsigned long		invert_mask[];
 };
 
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
+				    unsigned int hwnum);
 int gpiod_get_array_value_complex(bool raw, bool can_sleep,
 				  unsigned int array_size,
 				  struct gpio_desc **desc_array,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index bfdadc3..d0aa6cf 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -54,6 +54,9 @@
 
 	  If in doubt, say "N".
 
+config DRM_EXPORT_FOR_TESTS
+	bool
+
 config DRM_DEBUG_SELFTEST
 	tristate "kselftests for DRM"
 	depends on DRM
@@ -61,6 +64,7 @@
 	select PRIME_NUMBERS
 	select DRM_LIB_RANDOM
 	select DRM_KMS_HELPER
+	select DRM_EXPORT_FOR_TESTS if m
 	default n
 	help
 	  This option provides kernel modules that can be used to run
@@ -164,6 +168,7 @@
 
 config DRM_DP_CEC
 	bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+	depends on DRM
 	select CEC_CORE
 	help
 	  Choose this option if you want to enable HDMI CEC support for
@@ -294,9 +299,6 @@
 
 	  If M is selected the module will be called vkms.
 
-config DRM_ATI_PCIGART
-        bool
-
 source "drivers/gpu/drm/exynos/Kconfig"
 
 source "drivers/gpu/drm/rockchip/Kconfig"
@@ -393,7 +395,6 @@
 	bool "Enable legacy drivers (DANGEROUS)"
 	depends on DRM && MMU
 	select DRM_VM
-	select DRM_ATI_PCIGART if PCI
 	help
 	  Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
 	  APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9f1c7c4..6493088 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
 
 drm-y       :=	drm_auth.o drm_cache.o \
 		drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
-		drm_memory.o drm_drv.o drm_pci.o \
+		drm_memory.o drm_drv.o \
 		drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
 		drm_encoder_slave.o \
@@ -25,10 +25,10 @@
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
-drm-$(CONFIG_DRM_ATI_PCIGART) += ati_pcigart.o
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 drm-$(CONFIG_OF) += drm_of.o
 drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_PCI) += drm_pci.o
 drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
 drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 0d12ebf..13340f3 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -2,11 +2,11 @@
 menu "ACP (Audio CoProcessor) Configuration"
 
 config DRM_AMD_ACP
-       bool "Enable AMD Audio CoProcessor IP support"
-       depends on DRM_AMDGPU
-       select MFD_CORE
-       select PM_GENERIC_DOMAINS if PM
-       help
+	bool "Enable AMD Audio CoProcessor IP support"
+	depends on DRM_AMDGPU
+	select MFD_CORE
+	select PM_GENERIC_DOMAINS if PM
+	help
 	Choose this option to enable ACP IP support for AMD SOCs.
 	This adds the ACP (Audio CoProcessor) IP driver and wires
 	it up into the amdgpu driver.  The ACP block provides the DMA
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index ca0e435..c2bbcdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -120,6 +120,7 @@
 	amdgpu_rlc.o \
 	gfx_v8_0.o \
 	gfx_v9_0.o \
+	gfx_v9_4.o \
 	gfx_v10_0.o
 
 # add async DMA block
@@ -147,12 +148,16 @@
 	vce_v3_0.o \
 	vce_v4_0.o
 
-# add VCN block
+# add VCN and JPEG block
 amdgpu-y += \
 	amdgpu_vcn.o \
 	vcn_v1_0.o \
 	vcn_v2_0.o \
-	vcn_v2_5.o
+	vcn_v2_5.o \
+	amdgpu_jpeg.o \
+	jpeg_v1_0.o \
+	jpeg_v2_0.o \
+	jpeg_v2_5.o
 
 # add ATHUB block
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0c229a9..da3bcff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -69,6 +69,7 @@
 #include "amdgpu_uvd.h"
 #include "amdgpu_vce.h"
 #include "amdgpu_vcn.h"
+#include "amdgpu_jpeg.h"
 #include "amdgpu_mn.h"
 #include "amdgpu_gmc.h"
 #include "amdgpu_gfx.h"
@@ -89,6 +90,7 @@
 #include "amdgpu_mes.h"
 #include "amdgpu_umc.h"
 #include "amdgpu_mmhub.h"
+#include "amdgpu_df.h"
 
 #define MAX_GPU_INSTANCE		16
 
@@ -588,6 +590,8 @@ struct amdgpu_asic_funcs {
 	bool (*need_reset_on_init)(struct amdgpu_device *adev);
 	/* PCIe replay counter */
 	uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
+	/* device supports BACO */
+	bool (*supports_baco)(struct amdgpu_device *adev);
 };
 
 /*
@@ -633,9 +637,8 @@ struct amdgpu_fw_vram_usage {
 	struct amdgpu_bo *reserved_bo;
 	void *va;
 
-	/* Offset on the top of VRAM, used as c2p write buffer.
+	/* GDDR6 training support flag.
 	*/
-	u64 mem_train_fb_loc;
 	bool mem_train_support;
 };
 
@@ -662,29 +665,6 @@ struct amdgpu_mmio_remap {
 	resource_size_t bus_addr;
 };
 
-struct amdgpu_df_funcs {
-	void (*sw_init)(struct amdgpu_device *adev);
-	void (*sw_fini)(struct amdgpu_device *adev);
-	void (*enable_broadcast_mode)(struct amdgpu_device *adev,
-				      bool enable);
-	u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
-	u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
-	void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
-						 bool enable);
-	void (*get_clockgating_state)(struct amdgpu_device *adev,
-				      u32 *flags);
-	void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
-					    bool enable);
-	int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
-					 int is_enable);
-	int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
-					 int is_disable);
-	void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
-					 uint64_t *count);
-	uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
-	void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
-			 uint32_t ficadl_val, uint32_t ficadh_val);
-};
 /* Define the HW IP blocks will be used in driver , add more if necessary */
 enum amd_hw_ip_block_type {
 	GC_HWIP = 1,
@@ -704,6 +684,7 @@ enum amd_hw_ip_block_type {
 	MP1_HWIP,
 	UVD_HWIP,
 	VCN_HWIP = UVD_HWIP,
+	JPEG_HWIP = VCN_HWIP,
 	VCE_HWIP,
 	DF_HWIP,
 	DCE_HWIP,
@@ -899,6 +880,9 @@ struct amdgpu_device {
 	/* vcn */
 	struct amdgpu_vcn		vcn;
 
+	/* jpeg */
+	struct amdgpu_jpeg		jpeg;
+
 	/* firmwares */
 	struct amdgpu_firmware		firmware;
 
@@ -924,6 +908,9 @@ struct amdgpu_device {
 	bool                            enable_mes;
 	struct amdgpu_mes               mes;
 
+	/* df */
+	struct amdgpu_df                df;
+
 	struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
 	int				num_ip_blocks;
 	struct mutex	mn_lock;
@@ -937,8 +924,6 @@ struct amdgpu_device {
 	/* soc15 register offset based on ip, instance and  segment */
 	uint32_t 		*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
-	const struct amdgpu_df_funcs	*df_funcs;
-
 	/* delayed work_func for deferring clockgating during resume */
 	struct delayed_work     delayed_init_work;
 
@@ -982,6 +967,11 @@ struct amdgpu_device {
 
 	/* device pstate */
 	int				pstate;
+	/* enable runtime pm on the device */
+	bool                            runpm;
+
+	bool                            pm_sysfs_en;
+	bool                            ucode_sysfs_en;
 };
 
 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1019,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 
 #define AMDGPU_REGS_IDX       (1<<0)
 #define AMDGPU_REGS_NO_KIQ    (1<<1)
+#define AMDGPU_REGS_KIQ       (1<<2)
 
 #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
 #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
 
+#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
+#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+
 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
 
@@ -1117,6 +1111,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
+#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
+
 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
 
 /* Common functions */
@@ -1133,9 +1129,12 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 					     const u32 *registers,
 					     const u32 array_size);
 
-bool amdgpu_device_is_px(struct drm_device *dev);
+bool amdgpu_device_supports_boco(struct drm_device *dev);
+bool amdgpu_device_supports_baco(struct drm_device *dev);
 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
 				      struct amdgpu_device *peer_adev);
+int amdgpu_device_baco_enter(struct drm_device *dev);
+int amdgpu_device_baco_exit(struct drm_device *dev);
 
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
@@ -1173,8 +1172,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
 void amdgpu_driver_postclose_kms(struct drm_device *dev,
 				 struct drm_file *file_priv);
 int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
-int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 82155ac..12247a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -527,7 +527,7 @@ static int acp_set_powergating_state(void *handle,
 				     enum amd_powergating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = state == AMD_PG_STATE_GATE ? true : false;
+	bool enable = (state == AMD_PG_STATE_GATE);
 
 	if (adev->powerplay.pp_funcs &&
 		adev->powerplay.pp_funcs->set_powergating_by_smu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d3da9dd..8609287 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -613,15 +613,9 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
-	if (is_support_sw_smu(adev))
-		smu_switch_power_profile(&adev->smu,
-					 PP_SMC_POWER_PROFILE_COMPUTE,
-					 !idle);
-	else if (adev->powerplay.pp_funcs &&
-		 adev->powerplay.pp_funcs->switch_power_profile)
-		amdgpu_dpm_switch_power_profile(adev,
-						PP_SMC_POWER_PROFILE_COMPUTE,
-						!idle);
+	amdgpu_dpm_switch_power_profile(adev,
+					PP_SMC_POWER_PROFILE_COMPUTE,
+					!idle);
 }
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
@@ -634,6 +628,38 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
 	return false;
 }
 
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+	if (adev->family == AMDGPU_FAMILY_AI) {
+		int i;
+
+		for (i = 0; i < adev->num_vmhubs; i++)
+			amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
+	} else {
+		amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
+	}
+
+	return 0;
+}
+
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+	uint32_t flush_type = 0;
+	bool all_hub = false;
+
+	if (adev->gmc.xgmi.num_physical_nodes &&
+		adev->asic_type == CHIP_VEGA20)
+		flush_type = 2;
+
+	if (adev->family == AMDGPU_FAMILY_AI)
+		all_hub = true;
+
+	return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
+}
+
 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 069d5d2..47b0f29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -136,6 +136,8 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
 				uint32_t *ib_cmd, uint32_t ib_len);
 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index b6713e0..4bcc175 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -46,6 +46,8 @@
 #include "soc15.h"
 #include "soc15d.h"
 #include "amdgpu_amdkfd_gfx_v9.h"
+#include "gfxhub_v1_0.h"
+#include "mmhub_v9_4.h"
 
 #define HQD_N_REGS 56
 #define DUMP_REG(addr) do {				\
@@ -69,32 +71,56 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
 				unsigned int engine_id,
 				unsigned int queue_id)
 {
-	uint32_t sdma_engine_reg_base[8] = {
-		SOC15_REG_OFFSET(SDMA0, 0,
-				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
-		SOC15_REG_OFFSET(SDMA1, 0,
-				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL,
-		SOC15_REG_OFFSET(SDMA2, 0,
-				 mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL,
-		SOC15_REG_OFFSET(SDMA3, 0,
-				 mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL,
-		SOC15_REG_OFFSET(SDMA4, 0,
-				 mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL,
-		SOC15_REG_OFFSET(SDMA5, 0,
-				 mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL,
-		SOC15_REG_OFFSET(SDMA6, 0,
-				 mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL,
-		SOC15_REG_OFFSET(SDMA7, 0,
-				 mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
-	};
+	uint32_t sdma_engine_reg_base = 0;
+	uint32_t sdma_rlc_reg_offset;
 
-	uint32_t retval = sdma_engine_reg_base[engine_id]
+	switch (engine_id) {
+	default:
+		dev_warn(adev->dev,
+			 "Invalid sdma engine id (%d), using engine id 0\n",
+			 engine_id);
+		/* fall through */
+	case 0:
+		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+		break;
+	case 1:
+		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+				mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL;
+		break;
+	case 2:
+		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
+				mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+		break;
+	case 3:
+		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
+				mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL;
+		break;
+	case 4:
+		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0,
+				mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL;
+		break;
+	case 5:
+		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0,
+				mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL;
+		break;
+	case 6:
+		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0,
+				mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL;
+		break;
+	case 7:
+		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0,
+				mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL;
+		break;
+	}
+
+	sdma_rlc_reg_offset = sdma_engine_reg_base
 		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
 
 	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
-			queue_id, retval);
+			queue_id, sdma_rlc_reg_offset);
 
-	return retval;
+	return sdma_rlc_reg_offset;
 }
 
 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
@@ -258,11 +284,28 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
 	return 0;
 }
 
+static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+		uint64_t page_table_base)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+		pr_err("trying to set page table base for wrong VMID %u\n",
+		       vmid);
+		return;
+	}
+
+	mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
+
+	gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
 const struct kfd2kgd_calls arcturus_kfd2kgd = {
 	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_gfx_v9_init_interrupts,
 	.hqd_load = kgd_gfx_v9_hqd_load,
+	.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
 	.hqd_sdma_load = kgd_hqd_sdma_load,
 	.hqd_dump = kgd_gfx_v9_hqd_dump,
 	.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -277,8 +320,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
 	.get_atc_vmid_pasid_mapping_info =
 			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
 	.get_tile_config = kgd_gfx_v9_get_tile_config,
-	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
-	.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
-	.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
+	.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 61cd707..a7b17c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -107,13 +107,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
 	lock_srbm(kgd, mec, pipe, queue_id, 0);
 }
 
-static uint32_t get_queue_mask(struct amdgpu_device *adev,
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
 			       uint32_t pipe_id, uint32_t queue_id)
 {
-	unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
-			    queue_id) & 31;
+	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+			queue_id;
 
-	return ((uint32_t)1) << bit;
+	return 1ull << bit;
 }
 
 static void release_queue(struct kgd_dev *kgd)
@@ -268,21 +268,6 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 	pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
 	acquire_queue(kgd, pipe_id, queue_id);
 
-	/* HIQ is set during driver init period with vmid set to 0*/
-	if (m->cp_hqd_vmid == 0) {
-		uint32_t value, mec, pipe;
-
-		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
-		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
-
-		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
-			mec, pipe, queue_id);
-		value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
-		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
-			((mec << 5) | (pipe << 3) | queue_id | 0x80));
-		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
-	}
-
 	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
 	mqd_hqd = &m->cp_mqd_base_addr_lo;
 	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
@@ -332,9 +317,10 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 		       lower_32_bits((uint64_t)wptr));
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
 		       upper_32_bits((uint64_t)wptr));
-		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id));
+		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
+			 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
-		       get_queue_mask(adev, pipe_id, queue_id));
+		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
 	}
 
 	/* Start the EOP fetcher */
@@ -350,6 +336,59 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 	return 0;
 }
 
+static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+			    uint32_t pipe_id, uint32_t queue_id,
+			    uint32_t doorbell_off)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+	struct v10_compute_mqd *m;
+	uint32_t mec, pipe;
+	int r;
+
+	m = get_mqd(mqd);
+
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+		 mec, pipe, queue_id);
+
+	spin_lock(&adev->gfx.kiq.ring_lock);
+	r = amdgpu_ring_alloc(kiq_ring, 7);
+	if (r) {
+		pr_err("Failed to alloc KIQ (%d).\n", r);
+		goto out_unlock;
+	}
+
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+	amdgpu_ring_write(kiq_ring,
+			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+			  PACKET3_MAP_QUEUES_PIPE(pipe) |
+			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
+			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+	amdgpu_ring_write(kiq_ring,
+			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+	amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+	spin_unlock(&adev->gfx.kiq.ring_lock);
+	release_queue(kgd);
+
+	return r;
+}
+
 static int kgd_hqd_dump(struct kgd_dev *kgd,
 			uint32_t pipe_id, uint32_t queue_id,
 			uint32_t (**dump)[2], uint32_t *n_regs)
@@ -686,71 +725,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 }
 
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
-{
-	signed long r;
-	uint32_t seq;
-	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
-	spin_lock(&adev->gfx.kiq.ring_lock);
-	amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
-	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
-	amdgpu_ring_write(ring,
-			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
-			PACKET3_INVALIDATE_TLBS_PASID(pasid));
-	amdgpu_fence_emit_polling(ring, &seq);
-	amdgpu_ring_commit(ring);
-	spin_unlock(&adev->gfx.kiq.ring_lock);
-
-	r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
-	if (r < 1) {
-		DRM_ERROR("wait for kiq fence error: %ld.\n", r);
-		return -ETIME;
-	}
-
-	return 0;
-}
-
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-	int vmid;
-	uint16_t queried_pasid;
-	bool ret;
-	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
-	if (amdgpu_emu_mode == 0 && ring->sched.ready)
-		return invalidate_tlbs_with_kiq(adev, pasid);
-
-	for (vmid = 0; vmid < 16; vmid++) {
-		if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
-			continue;
-
-		ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
-				&queried_pasid);
-		if (ret	&& queried_pasid == pasid) {
-			amdgpu_gmc_flush_gpu_tlb(adev, vmid,
-					AMDGPU_GFXHUB_0, 0);
-			break;
-		}
-	}
-
-	return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
-	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
-		pr_err("non kfd vmid %d\n", vmid);
-		return 0;
-	}
-
-	amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
-	return 0;
-}
-
 static int kgd_address_watch_disable(struct kgd_dev *kgd)
 {
 	return 0;
@@ -817,6 +791,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_init_interrupts,
 	.hqd_load = kgd_hqd_load,
+	.hiq_mqd_load = kgd_hiq_mqd_load,
 	.hqd_sdma_load = kgd_hqd_sdma_load,
 	.hqd_dump = kgd_hqd_dump,
 	.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -832,7 +807,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
 			get_atc_vmid_pasid_mapping_info,
 	.get_tile_config = amdgpu_amdkfd_get_tile_config,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.invalidate_tlbs = invalidate_tlbs,
-	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 6e6f0a9..8f052e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -696,45 +696,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
 		lower_32_bits(page_table_base));
 }
 
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-	int vmid;
-	unsigned int tmp;
-
-	if (adev->in_gpu_reset)
-		return -EIO;
-
-	for (vmid = 0; vmid < 16; vmid++) {
-		if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
-			continue;
-
-		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
-		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
-			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
-			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
-			RREG32(mmVM_INVALIDATE_RESPONSE);
-			break;
-		}
-	}
-
-	return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
-	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
-		pr_err("non kfd vmid\n");
-		return 0;
-	}
-
-	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
-	RREG32(mmVM_INVALIDATE_RESPONSE);
-	return 0;
-}
-
  /**
   * read_vmid_from_vmfault_reg - read vmid from register
   *
@@ -771,7 +732,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
 	.set_scratch_backing_va = set_scratch_backing_va,
 	.get_tile_config = get_tile_config,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.invalidate_tlbs = invalidate_tlbs,
-	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
 	.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index bfbdded..19a10db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -657,45 +657,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
 			lower_32_bits(page_table_base));
 }
 
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-	int vmid;
-	unsigned int tmp;
-
-	if (adev->in_gpu_reset)
-		return -EIO;
-
-	for (vmid = 0; vmid < 16; vmid++) {
-		if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
-			continue;
-
-		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
-		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
-			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
-			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
-			RREG32(mmVM_INVALIDATE_RESPONSE);
-			break;
-		}
-	}
-
-	return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
-	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
-		pr_err("non kfd vmid %d\n", vmid);
-		return -EINVAL;
-	}
-
-	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
-	RREG32(mmVM_INVALIDATE_RESPONSE);
-	return 0;
-}
-
 const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -717,6 +678,4 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
 	.set_scratch_backing_va = set_scratch_backing_va,
 	.get_tile_config = get_tile_config,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.invalidate_tlbs = invalidate_tlbs,
-	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 47c853e..8562afe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -40,7 +40,6 @@
 #include "soc15d.h"
 #include "mmhub_v1_0.h"
 #include "gfxhub_v1_0.h"
-#include "gmc_v9_0.h"
 
 
 enum hqd_dequeue_request_type {
@@ -104,13 +103,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
 	lock_srbm(kgd, mec, pipe, queue_id, 0);
 }
 
-static uint32_t get_queue_mask(struct amdgpu_device *adev,
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
 			       uint32_t pipe_id, uint32_t queue_id)
 {
-	unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
-			    queue_id) & 31;
+	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+			queue_id;
 
-	return ((uint32_t)1) << bit;
+	return 1ull << bit;
 }
 
 static void release_queue(struct kgd_dev *kgd)
@@ -259,21 +258,6 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 
 	acquire_queue(kgd, pipe_id, queue_id);
 
-	/* HIQ is set during driver init period with vmid set to 0*/
-	if (m->cp_hqd_vmid == 0) {
-		uint32_t value, mec, pipe;
-
-		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
-		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
-
-		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
-			mec, pipe, queue_id);
-		value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
-		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
-			((mec << 5) | (pipe << 3) | queue_id | 0x80));
-		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
-	}
-
 	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
 	mqd_hqd = &m->cp_mqd_base_addr_lo;
 	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
@@ -324,7 +308,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
 		       upper_32_bits((uintptr_t)wptr));
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
-		       get_queue_mask(adev, pipe_id, queue_id));
+		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
 	}
 
 	/* Start the EOP fetcher */
@@ -340,6 +324,59 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 	return 0;
 }
 
+int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+			    uint32_t pipe_id, uint32_t queue_id,
+			    uint32_t doorbell_off)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+	struct v9_mqd *m;
+	uint32_t mec, pipe;
+	int r;
+
+	m = get_mqd(mqd);
+
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+		 mec, pipe, queue_id);
+
+	spin_lock(&adev->gfx.kiq.ring_lock);
+	r = amdgpu_ring_alloc(kiq_ring, 7);
+	if (r) {
+		pr_err("Failed to alloc KIQ (%d).\n", r);
+		goto out_unlock;
+	}
+
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+	amdgpu_ring_write(kiq_ring,
+			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+			  PACKET3_MAP_QUEUES_PIPE(pipe) |
+			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
+			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+	amdgpu_ring_write(kiq_ring,
+			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+	amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+	spin_unlock(&adev->gfx.kiq.ring_lock);
+	release_queue(kgd);
+
+	return r;
+}
+
 int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
 			uint32_t pipe_id, uint32_t queue_id,
 			uint32_t (**dump)[2], uint32_t *n_regs)
@@ -618,100 +655,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 }
 
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
-			uint32_t flush_type)
-{
-	signed long r;
-	uint32_t seq;
-	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
-	spin_lock(&adev->gfx.kiq.ring_lock);
-	amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
-	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
-	amdgpu_ring_write(ring,
-			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
-			PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
-			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
-			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
-	amdgpu_fence_emit_polling(ring, &seq);
-	amdgpu_ring_commit(ring);
-	spin_unlock(&adev->gfx.kiq.ring_lock);
-
-	r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
-	if (r < 1) {
-		DRM_ERROR("wait for kiq fence error: %ld.\n", r);
-		return -ETIME;
-	}
-
-	return 0;
-}
-
-int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-	int vmid, i;
-	uint16_t queried_pasid;
-	bool ret;
-	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-	uint32_t flush_type = 0;
-
-	if (adev->in_gpu_reset)
-		return -EIO;
-	if (adev->gmc.xgmi.num_physical_nodes &&
-		adev->asic_type == CHIP_VEGA20)
-		flush_type = 2;
-
-	if (ring->sched.ready)
-		return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
-
-	for (vmid = 0; vmid < 16; vmid++) {
-		if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
-			continue;
-
-		ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
-				&queried_pasid);
-		if (ret && queried_pasid == pasid) {
-			for (i = 0; i < adev->num_vmhubs; i++)
-				amdgpu_gmc_flush_gpu_tlb(adev, vmid,
-							i, flush_type);
-			break;
-		}
-	}
-
-	return 0;
-}
-
-int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-	int i;
-
-	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
-		pr_err("non kfd vmid %d\n", vmid);
-		return 0;
-	}
-
-	/* Use legacy mode tlb invalidation.
-	 *
-	 * Currently on Raven the code below is broken for anything but
-	 * legacy mode due to a MMHUB power gating problem. A workaround
-	 * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
-	 * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
-	 * bit.
-	 *
-	 * TODO 1: agree on the right set of invalidation registers for
-	 * KFD use. Use the last one for now. Invalidate both GC and
-	 * MMHUB.
-	 *
-	 * TODO 2: support range-based invalidation, requires kfg2kgd
-	 * interface change
-	 */
-	for (i = 0; i < adev->num_vmhubs; i++)
-		amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
-
-	return 0;
-}
-
 int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
 {
 	return 0;
@@ -758,8 +701,8 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
 	return 0;
 }
 
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
-		uint64_t page_table_base)
+static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+			uint32_t vmid, uint64_t page_table_base)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
@@ -769,16 +712,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
 		return;
 	}
 
-	/* TODO: take advantage of per-process address space size. For
-	 * now, all processes share the same address space size, like
-	 * on GFX8 and older.
-	 */
-	if (adev->asic_type == CHIP_ARCTURUS) {
-		/* Two MMHUBs */
-		mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base);
-		mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base);
-	} else
-		mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+	mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 
 	gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 }
@@ -788,6 +722,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
 	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_gfx_v9_init_interrupts,
 	.hqd_load = kgd_gfx_v9_hqd_load,
+	.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
 	.hqd_sdma_load = kgd_hqd_sdma_load,
 	.hqd_dump = kgd_gfx_v9_hqd_dump,
 	.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -803,7 +738,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
 			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
 	.get_tile_config = kgd_gfx_v9_get_tile_config,
 	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
-	.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
-	.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index d9e9ad2..63d3e66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -33,6 +33,9 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 			uint32_t queue_id, uint32_t __user *wptr,
 			uint32_t wptr_shift, uint32_t wptr_mask,
 			struct mm_struct *mm);
+int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+			    uint32_t pipe_id, uint32_t queue_id,
+			    uint32_t doorbell_off);
 int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
 			uint32_t pipe_id, uint32_t queue_id,
 			uint32_t (**dump)[2], uint32_t *n_regs);
@@ -57,9 +60,5 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
 
 bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
 					uint8_t vmid, uint16_t *p_pasid);
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
-		uint64_t page_table_base);
-int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
 int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
 		struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 888209e..fa8ac9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -85,7 +85,7 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
 }
 
 /* Set memory usage limits. Current, limits are
- *  System (TTM + userptr) memory - 3/4th System RAM
+ *  System (TTM + userptr) memory - 15/16th System RAM
  *  TTM memory - 3/8th System RAM
  */
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
@@ -98,7 +98,7 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
 	mem *= si.mem_unit;
 
 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
-	kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
 		(kfd_mem_limit.max_system_mem_limit >> 20),
@@ -358,7 +358,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 	if (ret)
 		return ret;
 
-	return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
+	return amdgpu_sync_fence(sync, vm->last_update, false);
 }
 
 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
@@ -750,7 +750,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
 
 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
 
-	amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
+	amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
 
 	return 0;
 }
@@ -769,7 +769,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
 		return ret;
 	}
 
-	return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
+	return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
 }
 
 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -1674,10 +1674,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
 				struct mm_struct *mm)
 {
 	struct amdkfd_process_info *process_info = mem->process_info;
-	int invalid, evicted_bos;
+	int evicted_bos;
 	int r = 0;
 
-	invalid = atomic_inc_return(&mem->invalid);
+	atomic_inc(&mem->invalid);
 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
 	if (evicted_bos == 1) {
 		/* First eviction, stop the queues */
@@ -2048,7 +2048,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
 			goto validate_map_fail;
 		}
-		ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
 		if (ret) {
 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
 			goto validate_map_fail;
@@ -2129,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
 		return -ENOMEM;
 
 	mutex_init(&(*mem)->lock);
+	INIT_LIST_HEAD(&(*mem)->bo_va_list);
 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
 	(*mem)->process_info = process_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 72232fc..fdd52d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
 		path_size += le16_to_cpu(path->usSize);
 
 		if (device_support & le16_to_cpu(path->usDeviceTag)) {
-			uint8_t con_obj_id, con_obj_num, con_obj_type;
-
-			con_obj_id =
+			uint8_t con_obj_id =
 			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
 			    >> OBJECT_ID_SHIFT;
-			con_obj_num =
-			    (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
-			    >> ENUM_ID_SHIFT;
-			con_obj_type =
-			    (le16_to_cpu(path->usConnObjectId) &
-			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
 			/* Skip TV/CV support */
 			if ((le16_to_cpu(path->usDeviceTag) ==
@@ -373,15 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
 			router.ddc_valid = false;
 			router.cd_valid = false;
 			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
-				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
-				grph_obj_id =
-				    (le16_to_cpu(path->usGraphicObjIds[j]) &
-				     OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
-				grph_obj_num =
-				    (le16_to_cpu(path->usGraphicObjIds[j]) &
-				     ENUM_ID_MASK) >> ENUM_ID_SHIFT;
-				grph_obj_type =
+				uint8_t grph_obj_type =
 				    (le16_to_cpu(path->usGraphicObjIds[j]) &
 				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
@@ -2038,7 +2022,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
 	if (adev->is_atom_fw) {
 		amdgpu_atomfirmware_scratch_regs_init(adev);
 		amdgpu_atomfirmware_allocate_fb_scratch(adev);
-		ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
+		ret = amdgpu_atomfirmware_get_mem_train_info(adev);
 		if (ret) {
 			DRM_ERROR("Failed to get mem train fb location.\n");
 			return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ff4eb96..58f9d8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -525,16 +525,12 @@ static int gddr6_mem_train_support(struct amdgpu_device *adev)
 	return ret;
 }
 
-int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
+int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
 {
 	struct atom_context *ctx = adev->mode_info.atom_context;
-	unsigned char *bios = ctx->bios;
-	struct vram_reserve_block *reserved_block;
-	int index, block_number;
+	int index;
 	uint8_t frev, crev;
 	uint16_t data_offset, size;
-	uint32_t start_address_in_kb;
-	uint64_t offset;
 	int ret;
 
 	adev->fw_vram_usage.mem_train_support = false;
@@ -569,32 +565,6 @@ int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
 		return -EINVAL;
 	}
 
-	reserved_block = (struct vram_reserve_block *)
-		(bios + data_offset + sizeof(struct atom_common_table_header));
-	block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
-		/ sizeof(struct vram_reserve_block);
-	reserved_block += (block_number > 0) ? block_number-1 : 0;
-	DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
-		  block_number,
-		  le32_to_cpu(reserved_block->start_address_in_kb),
-		  le16_to_cpu(reserved_block->used_by_firmware_in_kb),
-		  le16_to_cpu(reserved_block->used_by_driver_in_kb));
-	if (reserved_block->used_by_firmware_in_kb > 0) {
-		start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
-		offset = (uint64_t)start_address_in_kb * ONE_KiB;
-		if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
-			offset -= ONE_MiB;
-		}
-
-		offset &= ~(ONE_MiB - 1);
-		adev->fw_vram_usage.mem_train_fb_loc = offset;
-		adev->fw_vram_usage.mem_train_support = true;
-		DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
-		ret = 0;
-	} else {
-		DRM_ERROR("used_by_firmware_in_kb is 0!\n");
-		ret = -EINVAL;
-	}
-
-	return ret;
+	adev->fw_vram_usage.mem_train_support = true;
+	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index f871af5..434fe2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -31,7 +31,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
 	int *vram_width, int *vram_type, int *vram_vendor);
-int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5ca905b..a52a084 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -795,29 +795,23 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 	if (r)
 		return r;
 
-	r = amdgpu_sync_fence(adev, &p->job->sync,
-			      fpriv->prt_va->last_pt_update, false);
+	r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
 	if (r)
 		return r;
 
 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
-		struct dma_fence *f;
-
 		bo_va = fpriv->csa_va;
 		BUG_ON(!bo_va);
 		r = amdgpu_vm_bo_update(adev, bo_va, false);
 		if (r)
 			return r;
 
-		f = bo_va->last_pt_update;
-		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+		r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
 
 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
-		struct dma_fence *f;
-
 		/* ignore duplicates */
 		bo = ttm_to_amdgpu_bo(e->tv.bo);
 		if (!bo)
@@ -831,8 +825,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 		if (r)
 			return r;
 
-		f = bo_va->last_pt_update;
-		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+		r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
@@ -845,7 +838,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 	if (r)
 		return r;
 
-	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
+	r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
 	if (r)
 		return r;
 
@@ -916,6 +909,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 		if (parser->entity && parser->entity != entity)
 			return -EINVAL;
 
+		/* Return if there is no run queue associated with this entity.
+		 * Possibly because of disabled HW IP*/
+		if (entity->rq == NULL)
+			return -EINVAL;
+
 		parser->entity = entity;
 
 		ring = to_amdgpu_ring(entity->rq->sched);
@@ -987,7 +985,7 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
 			dma_fence_put(old);
 		}
 
-		r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+		r = amdgpu_sync_fence(&p->job->sync, fence, true);
 		dma_fence_put(fence);
 		if (r)
 			return r;
@@ -1009,7 +1007,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
 		return r;
 	}
 
-	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+	r = amdgpu_sync_fence(&p->job->sync, fence, true);
 	dma_fence_put(fence);
 
 	return r;
@@ -1236,7 +1234,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 		goto error_abort;
 	}
 
-	job->owner = p->filp;
 	p->fence = dma_fence_get(&job->base.s_fence->finished);
 
 	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6614d8a..94a6c42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
 };
 
-static int amdgpu_ctx_total_num_entities(void)
-{
-	unsigned i, num_entities = 0;
-
-	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
-		num_entities += amdgpu_ctx_num_entities[i];
-
-	return num_entities;
-}
-
 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
 				      enum drm_sched_priority priority)
 {
+	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+		return -EINVAL;
+
 	/* NORMAL and below are accessible by everyone */
 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
 		return 0;
@@ -68,48 +61,95 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
 	return -EACCES;
 }
 
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
+{
+	struct amdgpu_device *adev = ctx->adev;
+	struct amdgpu_ctx_entity *entity;
+	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
+	unsigned num_scheds = 0;
+	enum drm_sched_priority priority;
+	int r;
+
+	entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
+			 GFP_KERNEL);
+	if (!entity)
+		return  -ENOMEM;
+
+	entity->sequence = 1;
+	priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
+				ctx->init_priority : ctx->override_priority;
+	switch (hw_ip) {
+		case AMDGPU_HW_IP_GFX:
+			sched = &adev->gfx.gfx_ring[0].sched;
+			scheds = &sched;
+			num_scheds = 1;
+			break;
+		case AMDGPU_HW_IP_COMPUTE:
+			scheds = adev->gfx.compute_sched;
+			num_scheds = adev->gfx.num_compute_sched;
+			break;
+		case AMDGPU_HW_IP_DMA:
+			scheds = adev->sdma.sdma_sched;
+			num_scheds = adev->sdma.num_sdma_sched;
+			break;
+		case AMDGPU_HW_IP_UVD:
+			sched = &adev->uvd.inst[0].ring.sched;
+			scheds = &sched;
+			num_scheds = 1;
+			break;
+		case AMDGPU_HW_IP_VCE:
+			sched = &adev->vce.ring[0].sched;
+			scheds = &sched;
+			num_scheds = 1;
+			break;
+		case AMDGPU_HW_IP_UVD_ENC:
+			sched = &adev->uvd.inst[0].ring_enc[0].sched;
+			scheds = &sched;
+			num_scheds = 1;
+			break;
+		case AMDGPU_HW_IP_VCN_DEC:
+			scheds = adev->vcn.vcn_dec_sched;
+			num_scheds =  adev->vcn.num_vcn_dec_sched;
+			break;
+		case AMDGPU_HW_IP_VCN_ENC:
+			scheds = adev->vcn.vcn_enc_sched;
+			num_scheds =  adev->vcn.num_vcn_enc_sched;
+			break;
+		case AMDGPU_HW_IP_VCN_JPEG:
+			scheds = adev->jpeg.jpeg_sched;
+			num_scheds =  adev->jpeg.num_jpeg_sched;
+			break;
+	}
+
+	r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
+				  &ctx->guilty);
+	if (r)
+		goto error_free_entity;
+
+	ctx->entities[hw_ip][ring] = entity;
+	return 0;
+
+error_free_entity:
+	kfree(entity);
+
+	return r;
+}
+
 static int amdgpu_ctx_init(struct amdgpu_device *adev,
 			   enum drm_sched_priority priority,
 			   struct drm_file *filp,
 			   struct amdgpu_ctx *ctx)
 {
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
-	unsigned i, j, k;
 	int r;
 
-	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
-		return -EINVAL;
-
 	r = amdgpu_ctx_priority_permit(filp, priority);
 	if (r)
 		return r;
 
 	memset(ctx, 0, sizeof(*ctx));
+
 	ctx->adev = adev;
 
-	ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
-			      sizeof(struct dma_fence*), GFP_KERNEL);
-	if (!ctx->fences)
-		return -ENOMEM;
-
-	ctx->entities[0] = kcalloc(num_entities,
-				   sizeof(struct amdgpu_ctx_entity),
-				   GFP_KERNEL);
-	if (!ctx->entities[0]) {
-		r = -ENOMEM;
-		goto error_free_fences;
-	}
-
-	for (i = 0; i < num_entities; ++i) {
-		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
-
-		entity->sequence = 1;
-		entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
-	}
-	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
-		ctx->entities[i] = ctx->entities[i - 1] +
-			amdgpu_ctx_num_entities[i - 1];
-
 	kref_init(&ctx->refcount);
 	spin_lock_init(&ctx->ring_lock);
 	mutex_init(&ctx->lock);
@@ -120,114 +160,49 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 	ctx->init_priority = priority;
 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
 
-	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
-		struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-		struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
-		unsigned num_rings = 0;
-		unsigned num_rqs = 0;
-
-		switch (i) {
-		case AMDGPU_HW_IP_GFX:
-			rings[0] = &adev->gfx.gfx_ring[0];
-			num_rings = 1;
-			break;
-		case AMDGPU_HW_IP_COMPUTE:
-			for (j = 0; j < adev->gfx.num_compute_rings; ++j)
-				rings[j] = &adev->gfx.compute_ring[j];
-			num_rings = adev->gfx.num_compute_rings;
-			break;
-		case AMDGPU_HW_IP_DMA:
-			for (j = 0; j < adev->sdma.num_instances; ++j)
-				rings[j] = &adev->sdma.instance[j].ring;
-			num_rings = adev->sdma.num_instances;
-			break;
-		case AMDGPU_HW_IP_UVD:
-			rings[0] = &adev->uvd.inst[0].ring;
-			num_rings = 1;
-			break;
-		case AMDGPU_HW_IP_VCE:
-			rings[0] = &adev->vce.ring[0];
-			num_rings = 1;
-			break;
-		case AMDGPU_HW_IP_UVD_ENC:
-			rings[0] = &adev->uvd.inst[0].ring_enc[0];
-			num_rings = 1;
-			break;
-		case AMDGPU_HW_IP_VCN_DEC:
-			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
-				if (adev->vcn.harvest_config & (1 << j))
-					continue;
-				rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
-			}
-			break;
-		case AMDGPU_HW_IP_VCN_ENC:
-			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
-				if (adev->vcn.harvest_config & (1 << j))
-					continue;
-				for (k = 0; k < adev->vcn.num_enc_rings; ++k)
-					rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
-			}
-			break;
-		case AMDGPU_HW_IP_VCN_JPEG:
-			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
-				if (adev->vcn.harvest_config & (1 << j))
-					continue;
-				rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg;
-			}
-			break;
-		}
-
-		for (j = 0; j < num_rings; ++j) {
-			if (!rings[j]->adev)
-				continue;
-
-			rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
-		}
-
-		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
-			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
-						  rqs, num_rqs, &ctx->guilty);
-		if (r)
-			goto error_cleanup_entities;
-	}
-
 	return 0;
 
-error_cleanup_entities:
-	for (i = 0; i < num_entities; ++i)
-		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
-	kfree(ctx->entities[0]);
+}
 
-error_free_fences:
-	kfree(ctx->fences);
-	ctx->fences = NULL;
-	return r;
+static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+{
+
+	int i;
+
+	if (!entity)
+		return;
+
+	for (i = 0; i < amdgpu_sched_jobs; ++i)
+		dma_fence_put(entity->fences[i]);
+
+	kfree(entity);
 }
 
 static void amdgpu_ctx_fini(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
 	struct amdgpu_device *adev = ctx->adev;
 	unsigned i, j;
 
 	if (!adev)
 		return;
 
-	for (i = 0; i < num_entities; ++i)
-		for (j = 0; j < amdgpu_sched_jobs; ++j)
-			dma_fence_put(ctx->entities[0][i].fences[j]);
-	kfree(ctx->fences);
-	kfree(ctx->entities[0]);
+	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+		for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
+			amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+			ctx->entities[i][j] = NULL;
+		}
+	}
 
 	mutex_destroy(&ctx->lock);
-
 	kfree(ctx);
 }
 
 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
 			  u32 ring, struct drm_sched_entity **entity)
 {
+	int r;
+
 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
 		return -EINVAL;
@@ -244,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
 		return -EINVAL;
 	}
 
-	*entity = &ctx->entities[hw_ip][ring].entity;
+	if (ctx->entities[hw_ip][ring] == NULL) {
+		r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
+		if (r)
+			return r;
+	}
+
+	*entity = &ctx->entities[hw_ip][ring]->entity;
 	return 0;
 }
 
@@ -284,14 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 static void amdgpu_ctx_do_release(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx;
-	unsigned num_entities;
-	u32 i;
+	u32 i, j;
 
 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
+	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+			if (!ctx->entities[i][j])
+				continue;
 
-	num_entities = amdgpu_ctx_total_num_entities();
-	for (i = 0; i < num_entities; i++)
-		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
+			drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
+		}
+	}
 
 	amdgpu_ctx_fini(ref);
 }
@@ -521,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
 				  enum drm_sched_priority priority)
 {
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
 	enum drm_sched_priority ctx_prio;
-	unsigned i;
+	unsigned i, j;
 
 	ctx->override_priority = priority;
 
 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
 			ctx->init_priority : ctx->override_priority;
+	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+			struct drm_sched_entity *entity;
 
-	for (i = 0; i < num_entities; i++) {
-		struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
+			if (!ctx->entities[i][j])
+				continue;
 
-		drm_sched_entity_set_priority(entity, ctx_prio);
+			entity = &ctx->entities[i][j]->entity;
+			drm_sched_entity_set_priority(entity, ctx_prio);
+		}
 	}
 }
 
@@ -569,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
 {
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
 	struct amdgpu_ctx *ctx;
 	struct idr *idp;
-	uint32_t id, i;
+	uint32_t id, i, j;
 
 	idp = &mgr->ctx_handles;
 
 	mutex_lock(&mgr->lock);
 	idr_for_each_entry(idp, ctx, id) {
-		for (i = 0; i < num_entities; i++) {
-			struct drm_sched_entity *entity;
+		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+				struct drm_sched_entity *entity;
 
-			entity = &ctx->entities[0][i].entity;
-			timeout = drm_sched_entity_flush(entity, timeout);
+				if (!ctx->entities[i][j])
+					continue;
+
+				entity = &ctx->entities[i][j]->entity;
+				timeout = drm_sched_entity_flush(entity, timeout);
+			}
 		}
 	}
 	mutex_unlock(&mgr->lock);
@@ -591,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
 
 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 {
-	unsigned num_entities = amdgpu_ctx_total_num_entities();
 	struct amdgpu_ctx *ctx;
 	struct idr *idp;
-	uint32_t id, i;
+	uint32_t id, i, j;
 
 	idp = &mgr->ctx_handles;
 
@@ -604,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 			continue;
 		}
 
-		for (i = 0; i < num_entities; i++)
-			drm_sched_entity_fini(&ctx->entities[0][i].entity);
+		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+				struct drm_sched_entity *entity;
+
+				if (!ctx->entities[i][j])
+					continue;
+
+				entity = &ctx->entities[i][j]->entity;
+				drm_sched_entity_fini(entity);
+			}
+		}
 	}
 }
 
@@ -627,3 +627,45 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
 	idr_destroy(&mgr->ctx_handles);
 	mutex_destroy(&mgr->lock);
 }
+
+void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
+		adev->gfx.num_gfx_sched++;
+	}
+
+	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+		adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
+		adev->gfx.num_compute_sched++;
+	}
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
+		adev->sdma.num_sdma_sched++;
+	}
+
+	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+		if (adev->vcn.harvest_config & (1 << i))
+			continue;
+		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
+			&adev->vcn.inst[i].ring_dec.sched;
+	}
+
+	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+		if (adev->vcn.harvest_config & (1 << i))
+			continue;
+		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
+			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
+				&adev->vcn.inst[i].ring_enc[j].sched;
+	}
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
+			&adev->jpeg.inst[i].ring_dec.sched;
+	}
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index da80863..de490f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -29,10 +29,12 @@ struct drm_device;
 struct drm_file;
 struct amdgpu_fpriv;
 
+#define AMDGPU_MAX_ENTITY_NUM 4
+
 struct amdgpu_ctx_entity {
 	uint64_t		sequence;
-	struct dma_fence	**fences;
 	struct drm_sched_entity	entity;
+	struct dma_fence	*fences[];
 };
 
 struct amdgpu_ctx {
@@ -42,8 +44,7 @@ struct amdgpu_ctx {
 	unsigned			reset_counter_query;
 	uint32_t			vram_lost_counter;
 	spinlock_t			ring_lock;
-	struct dma_fence		**fences;
-	struct amdgpu_ctx_entity	*entities[AMDGPU_HW_IP_NUM];
+	struct amdgpu_ctx_entity	*entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
 	bool				preamble_presented;
 	enum drm_sched_priority		init_priority;
 	enum drm_sched_priority		override_priority;
@@ -87,4 +88,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 
+void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
+
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8e6726e..f24ed9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -26,6 +26,7 @@
 #include <linux/kthread.h>
 #include <linux/pci.h>
 #include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drm_debugfs.h>
 
@@ -129,7 +130,7 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
 			sh_bank = 0xFFFFFFFF;
 		if (instance_bank == 0x3FF)
 			instance_bank = 0xFFFFFFFF;
-		use_bank = 1;
+		use_bank = true;
 	} else if (*pos & (1ULL << 61)) {
 
 		me = (*pos & GENMASK_ULL(33, 24)) >> 24;
@@ -137,17 +138,24 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
 		queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
 		vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
 
-		use_ring = 1;
+		use_ring = true;
 	} else {
-		use_bank = use_ring = 0;
+		use_bank = use_ring = false;
 	}
 
 	*pos &= (1UL << 22) - 1;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	if (use_bank) {
 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
-		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return -EINVAL;
+		}
 		mutex_lock(&adev->grbm_idx_mutex);
 		amdgpu_gfx_select_se_sh(adev, se_bank,
 					sh_bank, instance_bank);
@@ -193,6 +201,9 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
 	if (pm_pg_lock)
 		mutex_unlock(&adev->pm.mutex);
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return result;
 }
 
@@ -237,13 +248,20 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	while (size) {
 		uint32_t value;
 
 		value = RREG32_PCIE(*pos >> 2);
 		r = put_user(value, (uint32_t *)buf);
-		if (r)
+		if (r) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return r;
+		}
 
 		result += 4;
 		buf += 4;
@@ -251,6 +269,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
 		size -= 4;
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return result;
 }
 
@@ -276,12 +297,19 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	while (size) {
 		uint32_t value;
 
 		r = get_user(value, (uint32_t *)buf);
-		if (r)
+		if (r) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return r;
+		}
 
 		WREG32_PCIE(*pos >> 2, value);
 
@@ -291,6 +319,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
 		size -= 4;
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return result;
 }
 
@@ -316,13 +347,20 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	while (size) {
 		uint32_t value;
 
 		value = RREG32_DIDT(*pos >> 2);
 		r = put_user(value, (uint32_t *)buf);
-		if (r)
+		if (r) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return r;
+		}
 
 		result += 4;
 		buf += 4;
@@ -330,6 +368,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
 		size -= 4;
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return result;
 }
 
@@ -355,12 +396,19 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	while (size) {
 		uint32_t value;
 
 		r = get_user(value, (uint32_t *)buf);
-		if (r)
+		if (r) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return r;
+		}
 
 		WREG32_DIDT(*pos >> 2, value);
 
@@ -370,6 +418,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
 		size -= 4;
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return result;
 }
 
@@ -395,13 +446,20 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	while (size) {
 		uint32_t value;
 
 		value = RREG32_SMC(*pos);
 		r = put_user(value, (uint32_t *)buf);
-		if (r)
+		if (r) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return r;
+		}
 
 		result += 4;
 		buf += 4;
@@ -409,6 +467,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
 		size -= 4;
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return result;
 }
 
@@ -434,12 +495,19 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	while (size) {
 		uint32_t value;
 
 		r = get_user(value, (uint32_t *)buf);
-		if (r)
+		if (r) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return r;
+		}
 
 		WREG32_SMC(*pos, value);
 
@@ -449,6 +517,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
 		size -= 4;
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return result;
 }
 
@@ -572,7 +643,16 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
 	idx = *pos >> 2;
 
 	valuesize = sizeof(values);
+
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (r)
 		return r;
 
@@ -633,6 +713,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
 	wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
 	simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	/* switch to the specific se/sh/cu */
 	mutex_lock(&adev->grbm_idx_mutex);
 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -644,6 +728,9 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (!x)
 		return -EINVAL;
 
@@ -711,6 +798,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 	if (!data)
 		return -ENOMEM;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	/* switch to the specific se/sh/cu */
 	mutex_lock(&adev->grbm_idx_mutex);
 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -726,6 +817,9 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	while (size) {
 		uint32_t value;
 
@@ -859,6 +953,10 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
 	struct amdgpu_device *adev = dev->dev_private;
 	int r = 0, i;
 
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return r;
+
 	/* Avoid accidently unparking the sched thread during GPU reset */
 	mutex_lock(&adev->lock_reset);
 
@@ -889,6 +987,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
 
 	mutex_unlock(&adev->lock_reset);
 
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+
 	return 0;
 }
 
@@ -907,8 +1008,17 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct amdgpu_device *adev = dev->dev_private;
+	int r;
+
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return r;
 
 	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+
 	return 0;
 }
 
@@ -917,8 +1027,17 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct amdgpu_device *adev = dev->dev_private;
+	int r;
+
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return r;
 
 	seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c17505fb..39cd545 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -66,6 +66,7 @@
 #include "amdgpu_pmu.h"
 
 #include <linux/suspend.h>
+#include <drm/task_barrier.h>
 
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -137,14 +138,14 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 
 /**
- * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
+ * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
  *
  * @dev: drm_device pointer
  *
  * Returns true if the device is a dGPU with HG/PX power control,
  * otherwise return false.
  */
-bool amdgpu_device_is_px(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct drm_device *dev)
 {
 	struct amdgpu_device *adev = dev->dev_private;
 
@@ -154,6 +155,21 @@ bool amdgpu_device_is_px(struct drm_device *dev)
 }
 
 /**
+ * amdgpu_device_supports_baco - Does the device support BACO
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device supporte BACO,
+ * otherwise return false.
+ */
+bool amdgpu_device_supports_baco(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	return amdgpu_asic_supports_baco(adev);
+}
+
+/**
  * VRAM access helper functions.
  *
  * amdgpu_device_vram_access - read/write a buffer in vram
@@ -200,8 +216,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 {
 	uint32_t ret;
 
-	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
-		return amdgpu_virt_kiq_rreg(adev, reg);
+	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+		return amdgpu_kiq_rreg(adev, reg);
 
 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -278,8 +294,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 		adev->last_mm_index = v;
 	}
 
-	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
-		return amdgpu_virt_kiq_wreg(adev, reg, v);
+	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+		return amdgpu_kiq_wreg(adev, reg, v);
 
 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -969,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
 {
 	struct sysinfo si;
-	bool is_os_64 = (sizeof(void *) == 8) ? true : false;
+	bool is_os_64 = (sizeof(void *) == 8);
 	uint64_t total_memory;
 	uint64_t dram_size_seven_GB = 0x1B8000000;
 	uint64_t dram_size_three_GB = 0xB8000000;
@@ -1016,8 +1032,6 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
  */
 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 {
-	int ret = 0;
-
 	if (amdgpu_sched_jobs < 4) {
 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
 			 amdgpu_sched_jobs);
@@ -1057,7 +1071,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 
 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -1072,8 +1086,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
+	int r;
 
-	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
+	if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
 		return;
 
 	if (state == VGA_SWITCHEROO_ON) {
@@ -1081,7 +1096,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
 		/* don't suspend or resume card normally */
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-		amdgpu_device_resume(dev, true, true);
+		pci_set_power_state(dev->pdev, PCI_D0);
+		pci_restore_state(dev->pdev);
+		r = pci_enable_device(dev->pdev);
+		if (r)
+			DRM_WARN("pci_enable_device failed (%d)\n", r);
+		amdgpu_device_resume(dev, true);
 
 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 		drm_kms_helper_poll_enable(dev);
@@ -1089,7 +1109,11 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
 		pr_info("amdgpu: switched off\n");
 		drm_kms_helper_poll_disable(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		amdgpu_device_suspend(dev, true, true);
+		amdgpu_device_suspend(dev, true);
+		pci_save_state(dev->pdev);
+		/* Shut down the device */
+		pci_disable_device(dev->pdev);
+		pci_set_power_state(dev->pdev, PCI_D3cold);
 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
@@ -1527,7 +1551,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
 		}
 
 parse_soc_bounding_box:
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 		/*
 		 * soc bounding box info is not integrated in disocovery table,
 		 * we always need to parse it from gpu info firmware.
@@ -1538,7 +1561,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
 		}
-#endif
 		break;
 	}
 	default:
@@ -1787,7 +1809,8 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
 		}
 	}
 
-	r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
+	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
+		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
 
 	return r;
 }
@@ -1854,6 +1877,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 		}
 	}
 
+	if (amdgpu_sriov_vf(adev))
+		amdgpu_virt_init_data_exchange(adev);
+
 	r = amdgpu_ib_pool_init(adev);
 	if (r) {
 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
@@ -1895,11 +1921,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 	amdgpu_amdkfd_device_init(adev);
 
 init_failed:
-	if (amdgpu_sriov_vf(adev)) {
-		if (!r)
-			amdgpu_virt_init_data_exchange(adev);
+	if (amdgpu_sriov_vf(adev))
 		amdgpu_virt_release_full_gpu(adev, true);
-	}
 
 	return r;
 }
@@ -1938,6 +1961,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
  *
  * @adev: amdgpu_device pointer
+ * @state: clockgating state (gate or ungate)
  *
  * The list of all the hardware IPs that make up the asic is walked and the
  * set_clockgating_state callbacks are run.
@@ -1962,6 +1986,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
 			/* enable clockgating to save power */
 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
@@ -1992,6 +2017,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
 			/* enable powergating to save power */
 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
@@ -2319,14 +2345,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
 		adev->ip_blocks[i].status.hw = false;
 		/* handle putting the SMC in the appropriate state */
 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
-			if (is_support_sw_smu(adev)) {
-				r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
-			} else if (adev->powerplay.pp_funcs &&
-					   adev->powerplay.pp_funcs->set_mp1_state) {
-				r = adev->powerplay.pp_funcs->set_mp1_state(
-					adev->powerplay.pp_handle,
-					adev->mp1_state);
-			}
+			r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
 			if (r) {
 				DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
 					  adev->mp1_state, r);
@@ -2413,7 +2432,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
 		AMD_IP_BLOCK_TYPE_GFX,
 		AMD_IP_BLOCK_TYPE_SDMA,
 		AMD_IP_BLOCK_TYPE_UVD,
-		AMD_IP_BLOCK_TYPE_VCE
+		AMD_IP_BLOCK_TYPE_VCE,
+		AMD_IP_BLOCK_TYPE_VCN
 	};
 
 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
@@ -2428,7 +2448,11 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
 				block->status.hw)
 				continue;
 
-			r = block->version->funcs->hw_init(adev);
+			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
+				r = block->version->funcs->resume(adev);
+			else
+				r = block->version->funcs->hw_init(adev);
+
 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
 			if (r)
 				return r;
@@ -2600,20 +2624,19 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
 	case CHIP_VEGA20:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case CHIP_RAVEN:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case CHIP_NAVI10:
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	case CHIP_RENOIR:
 #endif
 		return amdgpu_dc != 0;
 #endif
 	default:
+		if (amdgpu_dc > 0)
+			DRM_INFO("Display Core has been requested via kernel parameter "
+					 "but isn't supported by ASIC, ignoring\n");
 		return false;
 	}
 }
@@ -2638,8 +2661,38 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
 {
 	struct amdgpu_device *adev =
 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
+	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
 
-	adev->asic_reset_res =  amdgpu_asic_reset(adev);
+	/* It's a bug to not have a hive within this function */
+	if (WARN_ON(!hive))
+		return;
+
+	/*
+	 * Use task barrier to synchronize all xgmi reset works across the
+	 * hive. task_barrier_enter and task_barrier_exit will block
+	 * until all the threads running the xgmi reset works reach
+	 * those points. task_barrier_full will do both blocks.
+	 */
+	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+
+		task_barrier_enter(&hive->tb);
+		adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
+
+		if (adev->asic_reset_res)
+			goto fail;
+
+		task_barrier_exit(&hive->tb);
+		adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
+
+		if (adev->asic_reset_res)
+			goto fail;
+	} else {
+
+		task_barrier_full(&hive->tb);
+		adev->asic_reset_res =  amdgpu_asic_reset(adev);
+	}
+
+fail:
 	if (adev->asic_reset_res)
 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
 			 adev->asic_reset_res, adev->ddev->unique);
@@ -2731,7 +2784,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 		       uint32_t flags)
 {
 	int r, i;
-	bool runtime = false;
+	bool boco = false;
 	u32 max_MBps;
 
 	adev->shutdown = false;
@@ -2754,7 +2807,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->mman.buffer_funcs = NULL;
 	adev->mman.buffer_funcs_ring = NULL;
 	adev->vm_manager.vm_pte_funcs = NULL;
-	adev->vm_manager.vm_pte_num_rqs = 0;
+	adev->vm_manager.vm_pte_num_scheds = 0;
 	adev->gmc.gmc_funcs = NULL;
 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@@ -2794,9 +2847,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	mutex_init(&adev->virt.vf_errors.lock);
 	hash_init(adev->mn_hash);
 	mutex_init(&adev->lock_reset);
-	mutex_init(&adev->notifier_lock);
-	mutex_init(&adev->virt.dpm_mutex);
 	mutex_init(&adev->psp.mutex);
+	mutex_init(&adev->notifier_lock);
 
 	r = amdgpu_device_check_arguments(adev);
 	if (r)
@@ -2902,12 +2954,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	 * ignore it */
 	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
 
-	if (amdgpu_device_is_px(ddev))
-		runtime = true;
-	if (!pci_is_thunderbolt_attached(adev->pdev))
+	if (amdgpu_device_supports_boco(ddev))
+		boco = true;
+	if (amdgpu_has_atpx() &&
+	    (amdgpu_is_atpx_hybrid() ||
+	     amdgpu_has_atpx_dgpu_power_cntl()) &&
+	    !pci_is_thunderbolt_attached(adev->pdev))
 		vga_switcheroo_register_client(adev->pdev,
-					       &amdgpu_switcheroo_ops, runtime);
-	if (runtime)
+					       &amdgpu_switcheroo_ops, boco);
+	if (boco)
 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
 
 	if (amdgpu_emu_mode == 1) {
@@ -2994,11 +3049,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 		}
 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
-		if (amdgpu_virt_request_full_gpu(adev, false))
-			amdgpu_virt_release_full_gpu(adev, false);
 		goto failed;
 	}
 
+	DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+			adev->gfx.config.max_shader_engines,
+			adev->gfx.config.max_sh_per_se,
+			adev->gfx.config.max_cu_per_sh,
+			adev->gfx.cu_info.number);
+
+	amdgpu_ctx_init_sched(adev);
+
 	adev->accel_working = true;
 
 	amdgpu_vm_check_compute_bug(adev);
@@ -3013,16 +3074,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
 	amdgpu_fbdev_init(adev);
 
-	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
-		amdgpu_pm_virt_sysfs_init(adev);
-
 	r = amdgpu_pm_sysfs_init(adev);
-	if (r)
+	if (r) {
+		adev->pm_sysfs_en = false;
 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
+	} else
+		adev->pm_sysfs_en = true;
 
 	r = amdgpu_ucode_sysfs_init(adev);
-	if (r)
+	if (r) {
+		adev->ucode_sysfs_en = false;
 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+	} else
+		adev->ucode_sysfs_en = true;
 
 	r = amdgpu_debugfs_gem_init(adev);
 	if (r)
@@ -3091,7 +3155,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
 failed:
 	amdgpu_vf_error_trans_all(adev);
-	if (runtime)
+	if (boco)
 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
 
 	return r;
@@ -3122,7 +3186,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 			drm_atomic_helper_shutdown(adev->ddev);
 	}
 	amdgpu_fence_driver_fini(adev);
-	amdgpu_pm_sysfs_fini(adev);
+	if (adev->pm_sysfs_en)
+		amdgpu_pm_sysfs_fini(adev);
 	amdgpu_fbdev_fini(adev);
 	r = amdgpu_device_ip_fini(adev);
 	if (adev->firmware.gpu_info_fw) {
@@ -3139,9 +3204,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 
 	kfree(adev->bios);
 	adev->bios = NULL;
-	if (!pci_is_thunderbolt_attached(adev->pdev))
+	if (amdgpu_has_atpx() &&
+	    (amdgpu_is_atpx_hybrid() ||
+	     amdgpu_has_atpx_dgpu_power_cntl()) &&
+	    !pci_is_thunderbolt_attached(adev->pdev))
 		vga_switcheroo_unregister_client(adev->pdev);
-	if (adev->flags & AMD_IS_PX)
+	if (amdgpu_device_supports_boco(adev->ddev))
 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
 	vga_client_register(adev->pdev, NULL, NULL, NULL);
 	if (adev->rio_mem)
@@ -3150,12 +3218,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 	iounmap(adev->rmmio);
 	adev->rmmio = NULL;
 	amdgpu_device_doorbell_fini(adev);
-	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
-		amdgpu_pm_virt_sysfs_fini(adev);
 
 	amdgpu_debugfs_regs_cleanup(adev);
 	device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
-	amdgpu_ucode_sysfs_fini(adev);
+	if (adev->ucode_sysfs_en)
+		amdgpu_ucode_sysfs_fini(adev);
 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
 		amdgpu_pmu_fini(adev);
 	amdgpu_debugfs_preempt_cleanup(adev);
@@ -3178,7 +3245,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  * Returns 0 for success or an error on failure.
  * Called at driver suspend.
  */
-int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 {
 	struct amdgpu_device *adev;
 	struct drm_crtc *crtc;
@@ -3261,13 +3328,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 	 */
 	amdgpu_bo_evict_vram(adev);
 
-	if (suspend) {
-		pci_save_state(dev->pdev);
-		/* Shut down the device */
-		pci_disable_device(dev->pdev);
-		pci_set_power_state(dev->pdev, PCI_D3hot);
-	}
-
 	return 0;
 }
 
@@ -3282,7 +3342,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
  * Returns 0 for success or an error on failure.
  * Called at driver resume.
  */
-int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
 {
 	struct drm_connector *connector;
 	struct drm_connector_list_iter iter;
@@ -3293,14 +3353,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	if (resume) {
-		pci_set_power_state(dev->pdev, PCI_D0);
-		pci_restore_state(dev->pdev);
-		r = pci_enable_device(dev->pdev);
-		if (r)
-			return r;
-	}
-
 	/* post card */
 	if (amdgpu_device_need_post(adev)) {
 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -3639,13 +3691,12 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 	if (r)
 		return r;
 
-	amdgpu_amdkfd_pre_reset(adev);
-
 	/* Resume IP prior to SMC */
 	r = amdgpu_device_ip_reinit_early_sriov(adev);
 	if (r)
 		goto error;
 
+	amdgpu_virt_init_data_exchange(adev);
 	/* we need recover gart prior to run SMC/CP/SDMA resume */
 	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
 
@@ -3663,7 +3714,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 	amdgpu_amdkfd_post_reset(adev);
 
 error:
-	amdgpu_virt_init_data_exchange(adev);
 	amdgpu_virt_release_full_gpu(adev, true);
 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
 		amdgpu_inc_vram_lost(adev);
@@ -3709,6 +3759,11 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
 		case CHIP_VEGA10:
 		case CHIP_VEGA12:
 		case CHIP_RAVEN:
+		case CHIP_ARCTURUS:
+		case CHIP_RENOIR:
+		case CHIP_NAVI10:
+		case CHIP_NAVI14:
+		case CHIP_NAVI12:
 			break;
 		default:
 			goto disabled;
@@ -3785,7 +3840,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
 		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 			/* For XGMI run all resets in parallel to speed up the process */
 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
-				if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
 					r = -EALREADY;
 			} else
 				r = amdgpu_asic_reset(tmp_adev);
@@ -3797,7 +3852,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
 			}
 		}
 
-		/* For XGMI wait for all PSP resets to complete before proceed */
+		/* For XGMI wait for all resets to complete before proceed */
 		if (!r) {
 			list_for_each_entry(tmp_adev, device_list_handle,
 					    gmc.xgmi.head) {
@@ -3811,6 +3866,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
 		}
 	}
 
+	if (!r && amdgpu_ras_intr_triggered())
+		amdgpu_ras_intr_cleared();
 
 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 		if (need_full_reset) {
@@ -3899,7 +3956,7 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
 		mutex_lock(&adev->lock_reset);
 
 	atomic_inc(&adev->gpu_reset_counter);
-	adev->in_gpu_reset = 1;
+	adev->in_gpu_reset = true;
 	switch (amdgpu_asic_reset_method(adev)) {
 	case AMD_RESET_METHOD_MODE1:
 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
@@ -3919,7 +3976,7 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
 {
 	amdgpu_vf_error_trans_all(adev);
 	adev->mp1_state = PP_MP1_STATE_NONE;
-	adev->in_gpu_reset = 0;
+	adev->in_gpu_reset = false;
 	mutex_unlock(&adev->lock_reset);
 }
 
@@ -3943,12 +4000,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 	struct amdgpu_device *tmp_adev = NULL;
 	int i, r = 0;
 	bool in_ras_intr = amdgpu_ras_intr_triggered();
+	bool use_baco =
+		(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
+		true : false;
 
 	/*
 	 * Flush RAM to disk so that after reboot
 	 * the user can read log and see why the system rebooted.
 	 */
-	if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
+	if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) {
 
 		DRM_WARN("Emergency reboot.");
 
@@ -3959,7 +4019,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 	need_full_reset = job_signaled = false;
 	INIT_LIST_HEAD(&device_list);
 
-	dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
+	dev_info(adev->dev, "GPU %s begin!\n",
+		(in_ras_intr && !use_baco) ? "jobs stop":"reset");
 
 	cancel_delayed_work_sync(&adev->delayed_init_work);
 
@@ -4026,7 +4087,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		amdgpu_unregister_gpu_instance(tmp_adev);
 
 		/* disable ras on ALL IPs */
-		if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
+		if (!(in_ras_intr && !use_baco) &&
+		      amdgpu_device_ip_need_full_reset(tmp_adev))
 			amdgpu_ras_suspend(tmp_adev);
 
 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -4037,13 +4099,13 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
 
-			if (in_ras_intr)
+			if (in_ras_intr && !use_baco)
 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
 		}
 	}
 
 
-	if (in_ras_intr)
+	if (in_ras_intr && !use_baco)
 		goto skip_sched_resume;
 
 	/*
@@ -4136,7 +4198,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 skip_sched_resume:
 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 		/*unlock kfd: SRIOV would do it separately */
-		if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
+		if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
 	                amdgpu_amdkfd_post_reset(tmp_adev);
 		amdgpu_device_unlock_adev(tmp_adev);
 	}
@@ -4285,3 +4347,35 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
 	}
 }
 
+int amdgpu_device_baco_enter(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+	if (!amdgpu_device_supports_baco(adev->ddev))
+		return -ENOTSUPP;
+
+	if (ras && ras->supported)
+		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
+
+	return amdgpu_dpm_baco_enter(adev);
+}
+
+int amdgpu_device_baco_exit(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+	int ret = 0;
+
+	if (!amdgpu_device_supports_baco(adev->ddev))
+		return -ENOTSUPP;
+
+	ret = amdgpu_dpm_baco_exit(adev);
+	if (ret)
+		return ret;
+
+	if (ras && ras->supported)
+		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
new file mode 100644
index 0000000..057f6ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DF_H__
+#define __AMDGPU_DF_H__
+
+struct amdgpu_df_hash_status {
+	bool hash_64k;
+	bool hash_2m;
+	bool hash_1g;
+};
+
+struct amdgpu_df_funcs {
+	void (*sw_init)(struct amdgpu_device *adev);
+	void (*sw_fini)(struct amdgpu_device *adev);
+	void (*enable_broadcast_mode)(struct amdgpu_device *adev,
+				      bool enable);
+	u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
+	u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
+	void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+						 bool enable);
+	void (*get_clockgating_state)(struct amdgpu_device *adev,
+				      u32 *flags);
+	void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
+					    bool enable);
+	int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
+					 int is_enable);
+	int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
+					 int is_disable);
+	void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
+					 uint64_t *count);
+	uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
+	void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
+			 uint32_t ficadl_val, uint32_t ficadh_val);
+	uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
+				       uint32_t df_inst);
+	uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_df {
+	struct amdgpu_df_hash_status	hash_status;
+	const struct amdgpu_df_funcs	*funcs;
+};
+
+#endif /* __AMDGPU_DF_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 3cadb0b..6d520a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -513,13 +513,23 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
 	 * will not allow USWC mappings.
 	 * Also, don't allow GTT domain if the BO doens't have USWC falg set.
 	 */
-	if (adev->asic_type >= CHIP_CARRIZO &&
-	    adev->asic_type < CHIP_RAVEN &&
-	    (adev->flags & AMD_IS_APU) &&
-	    (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
+	if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
 	    amdgpu_bo_support_uswc(bo_flags) &&
-	    amdgpu_device_asic_has_dc_support(adev->asic_type))
-		domain |= AMDGPU_GEM_DOMAIN_GTT;
+	    amdgpu_device_asic_has_dc_support(adev->asic_type)) {
+		switch (adev->asic_type) {
+		case CHIP_CARRIZO:
+		case CHIP_STONEY:
+			domain |= AMDGPU_GEM_DOMAIN_GTT;
+			break;
+		case CHIP_RAVEN:
+			/* enable S/G on PCO and RV2 */
+			if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+				domain |= AMDGPU_GEM_DOMAIN_GTT;
+			break;
+		default:
+			break;
+		}
+	}
 #endif
 
 	return domain;
@@ -690,7 +700,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	struct amdgpu_encoder *amdgpu_encoder;
 	struct drm_connector *connector;
-	struct amdgpu_connector *amdgpu_connector;
 	u32 src_v = 1, dst_v = 1;
 	u32 src_h = 1, dst_h = 1;
 
@@ -702,7 +711,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
 			continue;
 		amdgpu_encoder = to_amdgpu_encoder(encoder);
 		connector = amdgpu_get_connector_for_encoder(encoder);
-		amdgpu_connector = to_amdgpu_connector(connector);
 
 		/* set scaling */
 		if (amdgpu_encoder->rmx_type == RMX_OFF)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e2eec7b..a59cd47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -360,10 +360,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
 		return ERR_PTR(-EPERM);
 
 	buf = drm_gem_prime_export(gobj, flags);
-	if (!IS_ERR(buf)) {
-		buf->file->f_mapping = gobj->dev->anon_inode->i_mapping;
+	if (!IS_ERR(buf))
 		buf->ops = &amdgpu_dmabuf_ops;
-	}
 
 	return buf;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 9cc270e..a2e8c3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -946,20 +946,63 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 	bool swsmu = is_support_sw_smu(adev);
 
 	switch (block_type) {
-	case AMD_IP_BLOCK_TYPE_GFX:
 	case AMD_IP_BLOCK_TYPE_UVD:
-	case AMD_IP_BLOCK_TYPE_VCN:
 	case AMD_IP_BLOCK_TYPE_VCE:
+		if (swsmu) {
+			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
+		} else if (adev->powerplay.pp_funcs &&
+			   adev->powerplay.pp_funcs->set_powergating_by_smu) {
+			/*
+			 * TODO: need a better lock mechanism
+			 *
+			 * Here adev->pm.mutex lock protection is enforced on
+			 * UVD and VCE cases only. Since for other cases, there
+			 * may be already lock protection in amdgpu_pm.c.
+			 * This is a quick fix for the deadlock issue below.
+			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
+			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
+			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+			 *     cltst          D    0  2028   2026 0x00000000
+			 *     all Trace:
+			 *     __schedule+0x2c0/0x870
+			 *     schedule+0x2c/0x70
+			 *     schedule_preempt_disabled+0xe/0x10
+			 *     __mutex_lock.isra.9+0x26d/0x4e0
+			 *     __mutex_lock_slowpath+0x13/0x20
+			 *     ? __mutex_lock_slowpath+0x13/0x20
+			 *     mutex_lock+0x2f/0x40
+			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
+			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
+			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
+			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
+			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
+			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
+			 */
+			mutex_lock(&adev->pm.mutex);
+			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+				(adev)->powerplay.pp_handle, block_type, gate));
+			mutex_unlock(&adev->pm.mutex);
+		}
+		break;
+	case AMD_IP_BLOCK_TYPE_GFX:
+	case AMD_IP_BLOCK_TYPE_VCN:
 	case AMD_IP_BLOCK_TYPE_SDMA:
 		if (swsmu)
 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
-		else
+		else if (adev->powerplay.pp_funcs &&
+			 adev->powerplay.pp_funcs->set_powergating_by_smu)
 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
 				(adev)->powerplay.pp_handle, block_type, gate));
 		break;
+	case AMD_IP_BLOCK_TYPE_JPEG:
+		if (swsmu)
+			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
+		break;
 	case AMD_IP_BLOCK_TYPE_GMC:
 	case AMD_IP_BLOCK_TYPE_ACP:
-		ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+		if (adev->powerplay.pp_funcs &&
+		    adev->powerplay.pp_funcs->set_powergating_by_smu)
+			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
 				(adev)->powerplay.pp_handle, block_type, gate));
 		break;
 	default:
@@ -968,3 +1011,163 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 
 	return ret;
 }
+
+int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
+{
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+	void *pp_handle = adev->powerplay.pp_handle;
+	struct smu_context *smu = &adev->smu;
+	int ret = 0;
+
+	if (is_support_sw_smu(adev)) {
+		ret = smu_baco_enter(smu);
+	} else {
+		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+			return -ENOENT;
+
+		/* enter BACO state */
+		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+	}
+
+	return ret;
+}
+
+int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
+{
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+	void *pp_handle = adev->powerplay.pp_handle;
+	struct smu_context *smu = &adev->smu;
+	int ret = 0;
+
+	if (is_support_sw_smu(adev)) {
+		ret = smu_baco_exit(smu);
+	} else {
+		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+			return -ENOENT;
+
+		/* exit BACO state */
+		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+	}
+
+	return ret;
+}
+
+int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
+			     enum pp_mp1_state mp1_state)
+{
+	int ret = 0;
+
+	if (is_support_sw_smu(adev)) {
+		ret = smu_set_mp1_state(&adev->smu, mp1_state);
+	} else if (adev->powerplay.pp_funcs &&
+		   adev->powerplay.pp_funcs->set_mp1_state) {
+		ret = adev->powerplay.pp_funcs->set_mp1_state(
+				adev->powerplay.pp_handle,
+				mp1_state);
+	}
+
+	return ret;
+}
+
+bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+{
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+	void *pp_handle = adev->powerplay.pp_handle;
+	struct smu_context *smu = &adev->smu;
+	bool baco_cap;
+
+	if (is_support_sw_smu(adev)) {
+		return smu_baco_is_support(smu);
+	} else {
+		if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
+			return false;
+
+		if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
+			return false;
+
+		return baco_cap ? true : false;
+	}
+}
+
+int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
+{
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+	void *pp_handle = adev->powerplay.pp_handle;
+	struct smu_context *smu = &adev->smu;
+
+	if (is_support_sw_smu(adev)) {
+		return smu_mode2_reset(smu);
+	} else {
+		if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
+			return -ENOENT;
+
+		return pp_funcs->asic_reset_mode_2(pp_handle);
+	}
+}
+
+int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
+{
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+	void *pp_handle = adev->powerplay.pp_handle;
+	struct smu_context *smu = &adev->smu;
+	int ret = 0;
+
+	dev_info(adev->dev, "GPU BACO reset\n");
+
+	if (is_support_sw_smu(adev)) {
+		ret = smu_baco_enter(smu);
+		if (ret)
+			return ret;
+
+		ret = smu_baco_exit(smu);
+		if (ret)
+			return ret;
+	} else {
+		if (!pp_funcs
+		    || !pp_funcs->set_asic_baco_state)
+			return -ENOENT;
+
+		/* enter BACO state */
+		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+		if (ret)
+			return ret;
+
+		/* exit BACO state */
+		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
+				    enum PP_SMC_POWER_PROFILE type,
+				    bool en)
+{
+	int ret = 0;
+
+	if (is_support_sw_smu(adev))
+		ret = smu_switch_power_profile(&adev->smu, type, en);
+	else if (adev->powerplay.pp_funcs &&
+		 adev->powerplay.pp_funcs->switch_power_profile)
+		ret = adev->powerplay.pp_funcs->switch_power_profile(
+			adev->powerplay.pp_handle, type, en);
+
+	return ret;
+}
+
+int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
+			       uint32_t pstate)
+{
+	int ret = 0;
+
+	if (is_support_sw_smu_xgmi(adev))
+		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
+	else if (adev->powerplay.pp_funcs &&
+		 adev->powerplay.pp_funcs->set_xgmi_pstate)
+		ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
+								pstate);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 2cfb677..902ca6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -341,10 +341,6 @@ enum amdgpu_pcie_gen {
 		((adev)->powerplay.pp_funcs->reset_power_profile_state(\
 			(adev)->powerplay.pp_handle, request))
 
-#define amdgpu_dpm_switch_power_profile(adev, type, en) \
-		((adev)->powerplay.pp_funcs->switch_power_profile(\
-			(adev)->powerplay.pp_handle, type, en))
-
 #define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
 		((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
 			(adev)->powerplay.pp_handle, msg_id))
@@ -517,4 +513,24 @@ extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
 
 extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
 
+int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
+			       uint32_t pstate);
+
+int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
+				    enum PP_SMC_POWER_PROFILE type,
+				    bool en);
+
+int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
+
+int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+
+bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
+
+int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
+			     enum pp_mp1_state mp1_state);
+
+int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
+
+int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 30a1e3a..94e2fd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1147,7 +1147,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 
-	return amdgpu_device_suspend(drm_dev, true, true);
+	return amdgpu_device_suspend(drm_dev, true);
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
@@ -1155,13 +1155,14 @@ static int amdgpu_pmops_resume(struct device *dev)
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 
 	/* GPU comes up enabled by the bios on resume */
-	if (amdgpu_device_is_px(drm_dev)) {
+	if (amdgpu_device_supports_boco(drm_dev) ||
+	    amdgpu_device_supports_baco(drm_dev)) {
 		pm_runtime_disable(dev);
 		pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
 	}
 
-	return amdgpu_device_resume(drm_dev, true, true);
+	return amdgpu_device_resume(drm_dev, true);
 }
 
 static int amdgpu_pmops_freeze(struct device *dev)
@@ -1170,7 +1171,7 @@ static int amdgpu_pmops_freeze(struct device *dev)
 	struct amdgpu_device *adev = drm_dev->dev_private;
 	int r;
 
-	r = amdgpu_device_suspend(drm_dev, false, true);
+	r = amdgpu_device_suspend(drm_dev, true);
 	if (r)
 		return r;
 	return amdgpu_asic_reset(adev);
@@ -1180,46 +1181,66 @@ static int amdgpu_pmops_thaw(struct device *dev)
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 
-	return amdgpu_device_resume(drm_dev, false, true);
+	return amdgpu_device_resume(drm_dev, true);
 }
 
 static int amdgpu_pmops_poweroff(struct device *dev)
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 
-	return amdgpu_device_suspend(drm_dev, true, true);
+	return amdgpu_device_suspend(drm_dev, true);
 }
 
 static int amdgpu_pmops_restore(struct device *dev)
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 
-	return amdgpu_device_resume(drm_dev, false, true);
+	return amdgpu_device_resume(drm_dev, true);
 }
 
 static int amdgpu_pmops_runtime_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	int ret;
+	struct amdgpu_device *adev = drm_dev->dev_private;
+	int ret, i;
 
-	if (!amdgpu_device_is_px(drm_dev)) {
+	if (!adev->runpm) {
 		pm_runtime_forbid(dev);
 		return -EBUSY;
 	}
 
-	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+	/* wait for all rings to drain before suspending */
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (ring && ring->sched.ready) {
+			ret = amdgpu_fence_wait_empty(ring);
+			if (ret)
+				return -EBUSY;
+		}
+	}
+
+	if (amdgpu_device_supports_boco(drm_dev))
+		drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 	drm_kms_helper_poll_disable(drm_dev);
 
-	ret = amdgpu_device_suspend(drm_dev, false, false);
-	pci_save_state(pdev);
-	pci_disable_device(pdev);
-	pci_ignore_hotplug(pdev);
-	if (amdgpu_is_atpx_hybrid())
-		pci_set_power_state(pdev, PCI_D3cold);
-	else if (!amdgpu_has_atpx_dgpu_power_cntl())
-		pci_set_power_state(pdev, PCI_D3hot);
-	drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+	ret = amdgpu_device_suspend(drm_dev, false);
+	if (amdgpu_device_supports_boco(drm_dev)) {
+		/* Only need to handle PCI state in the driver for ATPX
+		 * PCI core handles it for _PR3.
+		 */
+		if (amdgpu_is_atpx_hybrid()) {
+			pci_ignore_hotplug(pdev);
+		} else {
+			pci_save_state(pdev);
+			pci_disable_device(pdev);
+			pci_ignore_hotplug(pdev);
+			pci_set_power_state(pdev, PCI_D3cold);
+		}
+		drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+	} else if (amdgpu_device_supports_baco(drm_dev)) {
+		amdgpu_device_baco_enter(drm_dev);
+	}
 
 	return 0;
 }
@@ -1228,34 +1249,45 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct amdgpu_device *adev = drm_dev->dev_private;
 	int ret;
 
-	if (!amdgpu_device_is_px(drm_dev))
+	if (!adev->runpm)
 		return -EINVAL;
 
-	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+	if (amdgpu_device_supports_boco(drm_dev)) {
+		drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-	if (amdgpu_is_atpx_hybrid() ||
-	    !amdgpu_has_atpx_dgpu_power_cntl())
-		pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	ret = pci_enable_device(pdev);
-	if (ret)
-		return ret;
-	pci_set_master(pdev);
-
-	ret = amdgpu_device_resume(drm_dev, false, false);
+		/* Only need to handle PCI state in the driver for ATPX
+		 * PCI core handles it for _PR3.
+		 */
+		if (amdgpu_is_atpx_hybrid()) {
+			pci_set_master(pdev);
+		} else {
+			pci_set_power_state(pdev, PCI_D0);
+			pci_restore_state(pdev);
+			ret = pci_enable_device(pdev);
+			if (ret)
+				return ret;
+			pci_set_master(pdev);
+		}
+	} else if (amdgpu_device_supports_baco(drm_dev)) {
+		amdgpu_device_baco_exit(drm_dev);
+	}
+	ret = amdgpu_device_resume(drm_dev, false);
 	drm_kms_helper_poll_enable(drm_dev);
-	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	if (amdgpu_device_supports_boco(drm_dev))
+		drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	return 0;
 }
 
 static int amdgpu_pmops_runtime_idle(struct device *dev)
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = drm_dev->dev_private;
 	struct drm_crtc *crtc;
 
-	if (!amdgpu_device_is_px(drm_dev)) {
+	if (!adev->runpm) {
 		pm_runtime_forbid(dev);
 		return -EBUSY;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 143753d..2672dc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -69,7 +69,7 @@ amdgpufb_release(struct fb_info *info, int user)
 	return 0;
 }
 
-static struct fb_ops amdgpufb_ops = {
+static const struct fb_ops amdgpufb_ops = {
 	.owner = THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_open = amdgpufb_open,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 377fe20..3c01252 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -34,6 +34,7 @@
 #include <linux/kref.h>
 #include <linux/slab.h>
 #include <linux/firmware.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drm_debugfs.h>
 
@@ -154,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
 		       seq);
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 			       seq, flags | AMDGPU_FENCE_FLAG_INT);
-
+	pm_runtime_get_noresume(adev->ddev->dev);
 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
 		struct dma_fence *old;
@@ -234,6 +235,7 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
 bool amdgpu_fence_process(struct amdgpu_ring *ring)
 {
 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
+	struct amdgpu_device *adev = ring->adev;
 	uint32_t seq, last_seq;
 	int r;
 
@@ -274,6 +276,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
 			BUG();
 
 		dma_fence_put(fence);
+		pm_runtime_mark_last_busy(adev->ddev->dev);
+		pm_runtime_put_autosuspend(adev->ddev->dev);
 	} while (last_seq != seq);
 
 	return true;
@@ -737,10 +741,18 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct amdgpu_device *adev = dev->dev_private;
+	int r;
+
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return 0;
 
 	seq_printf(m, "gpu recover\n");
 	amdgpu_device_gpu_recover(adev, NULL);
 
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 19705e3..e01e681 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -302,6 +302,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
  * @pages: number of pages to bind
  * @pagelist: pages to bind
  * @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
  *
  * Binds the requested pages to the gart page table
  * (all asics).
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index e00b461..0f960b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 
 	spin_lock_init(&kiq->ring_lock);
 
-	r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
+	r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
 	if (r)
 		return r;
 
@@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 
 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
 {
-	amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+	amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
 	amdgpu_ring_fini(ring);
 }
 
@@ -543,12 +543,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
 		return;
 
-	if (!is_support_sw_smu(adev) &&
-	    (!adev->powerplay.pp_funcs ||
-	     !adev->powerplay.pp_funcs->set_powergating_by_smu))
-		return;
-
-
 	mutex_lock(&adev->gfx.gfx_off_mutex);
 
 	if (!enable)
@@ -641,7 +635,7 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
 		if (adev->gfx.funcs->query_ras_error_count)
 			adev->gfx.funcs->query_ras_error_count(adev, err_data);
-		amdgpu_ras_reset_gpu(adev, 0);
+		amdgpu_ras_reset_gpu(adev);
 	}
 	return AMDGPU_RAS_SUCCESS;
 }
@@ -664,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
 	return 0;
 }
+
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+
+	BUG_ON(!ring->funcs->emit_rreg);
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_emit_rreg(ring, reg);
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for gpu reset case because this way may
+	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+	 * never return if we keep waiting in virt_kiq_rreg, which cause
+	 * gpu_recover() hang there.
+	 *
+	 * also don't wait anymore for IRQ context
+	 * */
+	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+		goto failed_kiq_read;
+
+	might_sleep();
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq_read;
+
+	return adev->wb.wb[kiq->reg_val_offs];
+
+failed_kiq_read:
+	pr_err("failed to read reg:%x\n", reg);
+	return ~0;
+}
+
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+
+	BUG_ON(!ring->funcs->emit_wreg);
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_emit_wreg(ring, reg, v);
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for gpu reset case because this way may
+	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+	 * never return if we keep waiting in virt_kiq_rreg, which cause
+	 * gpu_recover() hang there.
+	 *
+	 * also don't wait anymore for IRQ context
+	 * */
+	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+		goto failed_kiq_write;
+
+	might_sleep();
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq_write;
+
+	return;
+
+failed_kiq_write:
+	pr_err("failed to write reg:%x\n", reg);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 0ae0a27..ca17ffb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -76,11 +76,15 @@ struct kiq_pm4_funcs {
 					struct amdgpu_ring *ring,
 					u64 addr,
 					u64 seq);
+	void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
+				uint16_t pasid, uint32_t flush_type,
+				bool all_hub);
 	/* Packet sizes */
 	int set_resources_size;
 	int map_queues_size;
 	int unmap_queues_size;
 	int query_status_size;
+	int invalidate_tlbs_size;
 };
 
 struct amdgpu_kiq {
@@ -90,6 +94,7 @@ struct amdgpu_kiq {
 	struct amdgpu_ring	ring;
 	struct amdgpu_irq_src	irq;
 	const struct kiq_pm4_funcs *pmf;
+	uint32_t			reg_val_offs;
 };
 
 /*
@@ -269,8 +274,12 @@ struct amdgpu_gfx {
 	bool				me_fw_write_wait;
 	bool				cp_fw_write_wait;
 	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
+	struct drm_gpu_scheduler	*gfx_sched[AMDGPU_MAX_GFX_RINGS];
+	uint32_t			num_gfx_sched;
 	unsigned			num_gfx_rings;
 	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+	struct drm_gpu_scheduler	*compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
+	uint32_t			num_compute_sched;
 	unsigned			num_compute_rings;
 	struct amdgpu_irq_src		eop_irq;
 	struct amdgpu_irq_src		priv_reg_irq;
@@ -367,4 +376,6 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
 				  struct amdgpu_irq_src *source,
 				  struct amdgpu_iv_entry *entry);
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index a12f33c..5884ab5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -223,7 +223,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
 	u64 size_af, size_bf;
 
 	if (amdgpu_sriov_vf(adev)) {
-		mc->agp_start = 0xffffffff;
+		mc->agp_start = 0xffffffffffff;
 		mc->agp_end = 0x0;
 		mc->agp_size = 0;
 
@@ -333,3 +333,43 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
 	amdgpu_mmhub_ras_fini(adev);
 	amdgpu_xgmi_ras_fini(adev);
 }
+
+	/*
+	 * The latest engine allocation on gfx9/10 is:
+	 * Engine 2, 3: firmware
+	 * Engine 0, 1, 4~16: amdgpu ring,
+	 *                    subject to change when ring number changes
+	 * Engine 17: Gart flushes
+	 */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP		0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP		0x1FFF3
+
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
+		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
+	unsigned i;
+	unsigned vmhub, inv_eng;
+
+	for (i = 0; i < adev->num_rings; ++i) {
+		ring = adev->rings[i];
+		vmhub = ring->funcs->vmhub;
+
+		inv_eng = ffs(vm_inv_engs[vmhub]);
+		if (!inv_eng) {
+			dev_err(adev->dev, "no VM inv eng for ring %s\n",
+				ring->name);
+			return -EINVAL;
+		}
+
+		ring->vm_inv_eng = inv_eng - 1;
+		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
+
+		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b499a3d..d3c27a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -92,6 +92,9 @@ struct amdgpu_gmc_funcs {
 	/* flush the vm tlb via mmio */
 	void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
 				uint32_t vmhub, uint32_t flush_type);
+	/* flush the vm tlb via pasid */
+	int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
+					uint32_t flush_type, bool all_hub);
 	/* flush the vm tlb via ring */
 	uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
 				       uint64_t pd_addr);
@@ -216,6 +219,9 @@ struct amdgpu_gmc {
 };
 
 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
+#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
+	((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
+	((adev), (pasid), (type), (allhub)))
 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
 #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
@@ -267,5 +273,6 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
 			      uint16_t pasid, uint64_t timestamp);
 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 6f92897..3a67f6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -206,7 +206,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 	int r;
 
 	if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
-		return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
+		return amdgpu_sync_fence(sync, ring->vmid_wait, false);
 
 	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
 	if (!fences)
@@ -241,7 +241,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 			return -ENOMEM;
 		}
 
-		r = amdgpu_sync_fence(adev, sync, &array->base, false);
+		r = amdgpu_sync_fence(sync, &array->base, false);
 		dma_fence_put(ring->vmid_wait);
 		ring->vmid_wait = &array->base;
 		return r;
@@ -294,7 +294,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
 		if (tmp) {
 			*id = NULL;
-			r = amdgpu_sync_fence(adev, sync, tmp, false);
+			r = amdgpu_sync_fence(sync, tmp, false);
 			return r;
 		}
 		needs_flush = true;
@@ -303,7 +303,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 	/* Good we can use this VMID. Remember this submission as
 	* user of the VMID.
 	*/
-	r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+	r = amdgpu_sync_fence(&(*id)->active, fence, false);
 	if (r)
 		return r;
 
@@ -375,7 +375,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
 		/* Good, we can use this VMID. Remember this submission as
 		 * user of the VMID.
 		 */
-		r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+		r = amdgpu_sync_fence(&(*id)->active, fence, false);
 		if (r)
 			return r;
 
@@ -435,8 +435,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 			id = idle;
 
 			/* Remember this submission as user of the VMID */
-			r = amdgpu_sync_fence(ring->adev, &id->active,
-					      fence, false);
+			r = amdgpu_sync_fence(&id->active, fence, false);
 			if (r)
 				goto error;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 6d8f055..111a301 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -66,7 +66,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
 		if (ih->ring == NULL)
 			return -ENOMEM;
 
-		memset((void *)ih->ring, 0, ih->ring_size + 8);
 		ih->gpu_addr = dma_addr;
 		ih->wptr_addr = dma_addr + ih->ring_size;
 		ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 30d540d..5ed4227 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -55,6 +55,7 @@
 #include "amdgpu_connectors.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
 
 #include <linux/pm_runtime.h>
 
@@ -162,13 +163,15 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
 	 * register to check whether the interrupt is triggered or not, and properly
 	 * ack the interrupt if it is there
 	 */
-	if (adev->nbio.funcs &&
-	    adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
-		adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
+		if (adev->nbio.funcs &&
+		    adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
+			adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
 
-	if (adev->nbio.funcs &&
-	    adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
-		adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+		if (adev->nbio.funcs &&
+		    adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
+			adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+	}
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4fb20e8..d42be88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -153,7 +153,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 	if (r)
 		return r;
 
-	job->owner = owner;
 	*f = dma_fence_get(&job->base.s_fence->finished);
 	amdgpu_job_free_resources(job);
 	priority = job->base.s_priority;
@@ -193,8 +192,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 	fence = amdgpu_sync_get_fence(&job->sync, &explicit);
 	if (fence && explicit) {
 		if (drm_sched_dependency_optimized(fence, s_entity)) {
-			r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
-					      fence, false);
+			r = amdgpu_sync_fence(&job->sched_sync, fence, false);
 			if (r)
 				DRM_ERROR("Error adding fence (%d)\n", r);
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index dc7ee93..3f7b843 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -49,7 +49,6 @@ struct amdgpu_job {
 	uint32_t		preamble_status;
 	uint32_t                preemption_status;
 	uint32_t		num_ibs;
-	void			*owner;
 	bool                    vm_needs_flush;
 	uint64_t		vm_pd_addr;
 	unsigned		vmid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
new file mode 100644
index 0000000..5727f00a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15d.h"
+#include "soc15_common.h"
+
+#define JPEG_IDLE_TIMEOUT	msecs_to_jiffies(1000)
+
+static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
+
+int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
+{
+	INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
+
+	return 0;
+}
+
+int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
+{
+	int i;
+
+	cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec);
+	}
+
+	return 0;
+}
+
+int amdgpu_jpeg_suspend(struct amdgpu_device *adev)
+{
+	cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+	return 0;
+}
+
+int amdgpu_jpeg_resume(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
+{
+	struct amdgpu_device *adev =
+		container_of(work, struct amdgpu_device, jpeg.idle_work.work);
+	unsigned int fences = 0;
+	unsigned int i;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec);
+	}
+
+	if (fences == 0)
+		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
+						       AMD_PG_STATE_GATE);
+	else
+		schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+}
+
+void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	bool set_clocks = !cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+	if (set_clocks)
+		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
+						       AMD_PG_STATE_UNGATE);
+}
+
+void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring)
+{
+	schedule_delayed_work(&ring->adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+}
+
+int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
+	r = amdgpu_ring_alloc(ring, 3);
+	if (r)
+		return r;
+
+	amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch, 0));
+	amdgpu_ring_write(ring, 0xDEADBEEF);
+	amdgpu_ring_commit(ring);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		udelay(1);
+	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+	return r;
+}
+
+static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+		struct dma_fence **fence)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_job *job;
+	struct amdgpu_ib *ib;
+	struct dma_fence *f = NULL;
+	const unsigned ib_size_dw = 16;
+	int i, r;
+
+	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+	if (r)
+		return r;
+
+	ib = &job->ibs[0];
+
+	ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
+	ib->ptr[1] = 0xDEADBEEF;
+	for (i = 2; i < 16; i += 2) {
+		ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+		ib->ptr[i+1] = 0;
+	}
+	ib->length_dw = 16;
+
+	r = amdgpu_job_submit_direct(job, ring, &f);
+	if (r)
+		goto err;
+
+	if (fence)
+		*fence = dma_fence_get(f);
+	dma_fence_put(f);
+
+	return 0;
+
+err:
+	amdgpu_job_free(job);
+	return r;
+}
+
+int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t tmp = 0;
+	unsigned i;
+	struct dma_fence *fence = NULL;
+	long r = 0;
+
+	r = amdgpu_jpeg_dec_set_reg(ring, 1, &fence);
+	if (r)
+		goto error;
+
+	r = dma_fence_wait_timeout(fence, false, timeout);
+	if (r == 0) {
+		r = -ETIMEDOUT;
+		goto error;
+	} else if (r < 0) {
+		goto error;
+	} else {
+		r = 0;
+	}
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		udelay(1);
+	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+	dma_fence_put(fence);
+error:
+	return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
new file mode 100644
index 0000000..bd9ef9c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_JPEG_H__
+#define __AMDGPU_JPEG_H__
+
+#define AMDGPU_MAX_JPEG_INSTANCES	2
+
+#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
+#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
+
+struct amdgpu_jpeg_reg{
+	unsigned jpeg_pitch;
+};
+
+struct amdgpu_jpeg_inst {
+	struct amdgpu_ring ring_dec;
+	struct amdgpu_irq_src irq;
+	struct amdgpu_jpeg_reg external;
+};
+
+struct amdgpu_jpeg {
+	uint8_t	num_jpeg_inst;
+	struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
+	struct amdgpu_jpeg_reg internal;
+	struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
+	uint32_t num_jpeg_sched;
+	unsigned harvest_config;
+	struct delayed_work idle_work;
+	enum amd_powergating_state cur_state;
+};
+
+int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev);
+int amdgpu_jpeg_suspend(struct amdgpu_device *adev);
+int amdgpu_jpeg_resume(struct amdgpu_device *adev);
+
+void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring);
+
+int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
+#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b6db28a..60591db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
 	if (amdgpu_sriov_vf(adev))
 		amdgpu_virt_request_full_gpu(adev, false);
 
-	if (amdgpu_device_is_px(dev)) {
+	if (adev->runpm) {
 		pm_runtime_get_sync(dev->dev);
 		pm_runtime_forbid(dev->dev);
 	}
@@ -150,8 +150,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	}
 	dev->dev_private = (void *)adev;
 
-	if ((amdgpu_runtime_pm != 0) &&
-	    amdgpu_has_atpx() &&
+	if (amdgpu_has_atpx() &&
 	    (amdgpu_is_atpx_hybrid() ||
 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
 	    ((flags & AMD_IS_APU) == 0) &&
@@ -170,6 +169,13 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 		goto out;
 	}
 
+	if (amdgpu_device_supports_boco(dev) &&
+	    (amdgpu_runtime_pm != 0)) /* enable runpm by default */
+		adev->runpm = true;
+	else if (amdgpu_device_supports_baco(dev) &&
+		 (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
+		adev->runpm = true;
+
 	/* Call ACPI methods: require modeset init
 	 * but failure is not fatal
 	 */
@@ -180,7 +186,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 				"Error during ACPI methods call\n");
 	}
 
-	if (amdgpu_device_is_px(dev)) {
+	if (adev->runpm) {
 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
 		pm_runtime_use_autosuspend(dev->dev);
 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -193,7 +199,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 out:
 	if (r) {
 		/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
-		if (adev->rmmio && amdgpu_device_is_px(dev))
+		if (adev->rmmio && adev->runpm)
 			pm_runtime_put_noidle(dev->dev);
 		amdgpu_driver_unload_kms(dev);
 	}
@@ -293,6 +299,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
 		fw_info->ver = adev->dm.dmcu_fw_version;
 		fw_info->feature = 0;
 		break;
+	case AMDGPU_INFO_FW_DMCUB:
+		fw_info->ver = adev->dm.dmcub_fw_version;
+		fw_info->feature = 0;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -396,12 +406,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 		ib_size_alignment = 1;
 		break;
 	case AMDGPU_HW_IP_VCN_JPEG:
-		type = AMD_IP_BLOCK_TYPE_VCN;
-		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
-			if (adev->uvd.harvest_config & (1 << i))
+		type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
+			AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+
+		for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+			if (adev->jpeg.harvest_config & (1 << i))
 				continue;
 
-			if (adev->vcn.inst[i].ring_jpeg.sched.ready)
+			if (adev->jpeg.inst[i].ring_dec.sched.ready)
 				++num_rings;
 		}
 		ib_start_alignment = 16;
@@ -517,9 +529,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 			break;
 		case AMDGPU_HW_IP_VCN_DEC:
 		case AMDGPU_HW_IP_VCN_ENC:
-		case AMDGPU_HW_IP_VCN_JPEG:
 			type = AMD_IP_BLOCK_TYPE_VCN;
 			break;
+		case AMDGPU_HW_IP_VCN_JPEG:
+			type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
+				AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+			break;
 		default:
 			return -EINVAL;
 		}
@@ -688,10 +703,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		if (adev->pm.dpm_enabled) {
 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
-		} else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
-			   adev->virt.ops->get_pp_clk) {
-			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
-			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
 		} else {
 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
 			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
@@ -1394,6 +1405,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
 	seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
 		   fw_info.feature, fw_info.ver);
 
+	/* DMCUB */
+	query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
+	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+	if (ret)
+		return ret;
+	seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
+		   fw_info.feature, fw_info.ver);
+
 
 	seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f205f56..b03b1eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -37,6 +37,7 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/nospec.h>
+#include <linux/pm_runtime.h>
 #include "hwmgr.h"
 #define WIDTH_4K 3840
 
@@ -158,6 +159,14 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 	enum amd_pm_state_type pm;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev)) {
 		if (adev->smu.ppt_funcs->get_current_power_state)
@@ -170,6 +179,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
 		pm = adev->pm.dpm.user_state;
 	}
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
 			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -183,6 +195,10 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 	enum amd_pm_state_type  state;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
 
 	if (strncmp("battery", buf, strlen("battery")) == 0)
 		state = POWER_STATE_TYPE_BATTERY;
@@ -190,10 +206,12 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
 		state = POWER_STATE_TYPE_BALANCED;
 	else if (strncmp("performance", buf, strlen("performance")) == 0)
 		state = POWER_STATE_TYPE_PERFORMANCE;
-	else {
-		count = -EINVAL;
-		goto fail;
-	}
+	else
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev)) {
 		mutex_lock(&adev->pm.mutex);
@@ -206,12 +224,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
 		adev->pm.dpm.user_state = state;
 		mutex_unlock(&adev->pm.mutex);
 
-		/* Can't set dpm state when the card is off */
-		if (!(adev->flags & AMD_IS_PX) ||
-		    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
-			amdgpu_pm_compute_clocks(adev);
+		amdgpu_pm_compute_clocks(adev);
 	}
-fail:
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return count;
 }
 
@@ -282,13 +299,14 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 	enum amd_dpm_forced_level level = 0xff;
+	int ret;
 
-	if (amdgpu_sriov_vf(adev))
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
 		return 0;
 
-	if ((adev->flags & AMD_IS_PX) &&
-	    (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return snprintf(buf, PAGE_SIZE, "off\n");
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
 		level = smu_get_performance_level(&adev->smu);
@@ -297,6 +315,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
 	else
 		level = adev->pm.dpm.forced_level;
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
 			(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -320,9 +341,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 	enum amd_dpm_forced_level current_level = 0xff;
 	int ret = 0;
 
-	/* Can't force performance level when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
 		return -EINVAL;
 
 	if (strncmp("low", buf, strlen("low")) == 0) {
@@ -344,30 +363,23 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
 		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 	}  else {
-		count = -EINVAL;
-		goto fail;
+		return -EINVAL;
 	}
 
-	/* handle sriov case here */
-	if (amdgpu_sriov_vf(adev)) {
-		if (amdgim_is_hwperf(adev) &&
-		    adev->virt.ops->force_dpm_level) {
-			mutex_lock(&adev->pm.mutex);
-			adev->virt.ops->force_dpm_level(adev, level);
-			mutex_unlock(&adev->pm.mutex);
-			return count;
-		} else {
-			return -EINVAL;
-		}
-	}
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
 		current_level = smu_get_performance_level(&adev->smu);
 	else if (adev->powerplay.pp_funcs->get_performance_level)
 		current_level = amdgpu_dpm_get_performance_level(adev);
 
-	if (current_level == level)
+	if (current_level == level) {
+		pm_runtime_mark_last_busy(ddev->dev);
+		pm_runtime_put_autosuspend(ddev->dev);
 		return count;
+	}
 
 	/* profile_exit setting is valid only when current mode is in profile mode */
 	if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
@@ -376,29 +388,40 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 	    AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
 	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
 		pr_err("Currently not in any profile mode!\n");
+		pm_runtime_mark_last_busy(ddev->dev);
+		pm_runtime_put_autosuspend(ddev->dev);
 		return -EINVAL;
 	}
 
 	if (is_support_sw_smu(adev)) {
 		ret = smu_force_performance_level(&adev->smu, level);
-		if (ret)
-			count = -EINVAL;
+		if (ret) {
+			pm_runtime_mark_last_busy(ddev->dev);
+			pm_runtime_put_autosuspend(ddev->dev);
+			return -EINVAL;
+		}
 	} else if (adev->powerplay.pp_funcs->force_performance_level) {
 		mutex_lock(&adev->pm.mutex);
 		if (adev->pm.dpm.thermal_active) {
-			count = -EINVAL;
 			mutex_unlock(&adev->pm.mutex);
-			goto fail;
+			pm_runtime_mark_last_busy(ddev->dev);
+			pm_runtime_put_autosuspend(ddev->dev);
+			return -EINVAL;
 		}
 		ret = amdgpu_dpm_force_performance_level(adev, level);
-		if (ret)
-			count = -EINVAL;
-		else
+		if (ret) {
+			mutex_unlock(&adev->pm.mutex);
+			pm_runtime_mark_last_busy(ddev->dev);
+			pm_runtime_put_autosuspend(ddev->dev);
+			return -EINVAL;
+		} else {
 			adev->pm.dpm.forced_level = level;
+		}
 		mutex_unlock(&adev->pm.mutex);
 	}
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
 
-fail:
 	return count;
 }
 
@@ -411,6 +434,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
 	struct pp_states_info data;
 	int i, buf_len, ret;
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev)) {
 		ret = smu_get_power_num_states(&adev->smu, &data);
 		if (ret)
@@ -418,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
 	} else if (adev->powerplay.pp_funcs->get_pp_num_states)
 		amdgpu_dpm_get_pp_num_states(adev, &data);
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
 	for (i = 0; i < data.nums; i++)
 		buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
@@ -440,6 +470,13 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
 	enum amd_pm_state_type pm = 0;
 	int i = 0, ret = 0;
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev)) {
 		pm = smu_get_current_power_state(smu);
 		ret = smu_get_power_num_states(smu, &data);
@@ -451,6 +488,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
 		amdgpu_dpm_get_pp_num_states(adev, &data);
 	}
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	for (i = 0; i < data.nums; i++) {
 		if (pm == data.states[i])
 			break;
@@ -469,6 +509,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
 	if (adev->pp_force_state_enabled)
 		return amdgpu_get_pp_cur_state(dev, attr, buf);
 	else
@@ -486,6 +529,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
 	unsigned long idx;
 	int ret;
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
+
 	if (strlen(buf) == 1)
 		adev->pp_force_state_enabled = false;
 	else if (is_support_sw_smu(adev))
@@ -495,14 +541,18 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
 		struct pp_states_info data;
 
 		ret = kstrtoul(buf, 0, &idx);
-		if (ret || idx >= ARRAY_SIZE(data.states)) {
-			count = -EINVAL;
-			goto fail;
-		}
+		if (ret || idx >= ARRAY_SIZE(data.states))
+			return -EINVAL;
+
 		idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
 
 		amdgpu_dpm_get_pp_num_states(adev, &data);
 		state = data.states[idx];
+
+		ret = pm_runtime_get_sync(ddev->dev);
+		if (ret < 0)
+			return ret;
+
 		/* only set user selected power states */
 		if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
 		    state != POWER_STATE_TYPE_DEFAULT) {
@@ -510,8 +560,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
 					AMD_PP_TASK_ENABLE_USER_STATE, &state);
 			adev->pp_force_state_enabled = true;
 		}
+		pm_runtime_mark_last_busy(ddev->dev);
+		pm_runtime_put_autosuspend(ddev->dev);
 	}
-fail:
+
 	return count;
 }
 
@@ -533,17 +585,32 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 	char *table = NULL;
-	int size;
+	int size, ret;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev)) {
 		size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
+		pm_runtime_mark_last_busy(ddev->dev);
+		pm_runtime_put_autosuspend(ddev->dev);
 		if (size < 0)
 			return size;
-	}
-	else if (adev->powerplay.pp_funcs->get_pp_table)
+	} else if (adev->powerplay.pp_funcs->get_pp_table) {
 		size = amdgpu_dpm_get_pp_table(adev, &table);
-	else
+		pm_runtime_mark_last_busy(ddev->dev);
+		pm_runtime_put_autosuspend(ddev->dev);
+		if (size < 0)
+			return size;
+	} else {
+		pm_runtime_mark_last_busy(ddev->dev);
+		pm_runtime_put_autosuspend(ddev->dev);
 		return 0;
+	}
 
 	if (size >= PAGE_SIZE)
 		size = PAGE_SIZE - 1;
@@ -562,13 +629,26 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
 	struct amdgpu_device *adev = ddev->dev_private;
 	int ret = 0;
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev)) {
 		ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
-		if (ret)
+		if (ret) {
+			pm_runtime_mark_last_busy(ddev->dev);
+			pm_runtime_put_autosuspend(ddev->dev);
 			return ret;
+		}
 	} else if (adev->powerplay.pp_funcs->set_pp_table)
 		amdgpu_dpm_set_pp_table(adev, buf, count);
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return count;
 }
 
@@ -654,6 +734,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 	const char delimiter[3] = {' ', '\n', '\0'};
 	uint32_t type;
 
+	if (amdgpu_sriov_vf(adev))
+		return -EINVAL;
+
 	if (count > 127)
 		return -EINVAL;
 
@@ -689,18 +772,28 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 			tmp_str++;
 	}
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev)) {
 		ret = smu_od_edit_dpm_table(&adev->smu, type,
 					    parameter, parameter_size);
 
-		if (ret)
+		if (ret) {
+			pm_runtime_mark_last_busy(ddev->dev);
+			pm_runtime_put_autosuspend(ddev->dev);
 			return -EINVAL;
+		}
 	} else {
 		if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
 			ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
 						parameter, parameter_size);
-			if (ret)
+			if (ret) {
+				pm_runtime_mark_last_busy(ddev->dev);
+				pm_runtime_put_autosuspend(ddev->dev);
 				return -EINVAL;
+			}
 		}
 
 		if (type == PP_OD_COMMIT_DPM_TABLE) {
@@ -708,12 +801,18 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 				amdgpu_dpm_dispatch_task(adev,
 						AMD_PP_TASK_READJUST_POWER_STATE,
 						NULL);
+				pm_runtime_mark_last_busy(ddev->dev);
+				pm_runtime_put_autosuspend(ddev->dev);
 				return count;
 			} else {
+				pm_runtime_mark_last_busy(ddev->dev);
+				pm_runtime_put_autosuspend(ddev->dev);
 				return -EINVAL;
 			}
 		}
 	}
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
 
 	return count;
 }
@@ -724,24 +823,33 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
-	uint32_t size = 0;
+	ssize_t size;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev)) {
 		size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
 		size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
 		size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
 		size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
-		return size;
 	} else if (adev->powerplay.pp_funcs->print_clock_levels) {
 		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
 		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
 		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
 		size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
-		return size;
 	} else {
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
 	}
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
 
+	return size;
 }
 
 /**
@@ -770,21 +878,36 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
 	uint64_t featuremask;
 	int ret;
 
+	if (amdgpu_sriov_vf(adev))
+		return -EINVAL;
+
 	ret = kstrtou64(buf, 0, &featuremask);
 	if (ret)
 		return -EINVAL;
 
 	pr_debug("featuremask = 0x%llx\n", featuremask);
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev)) {
 		ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
-		if (ret)
+		if (ret) {
+			pm_runtime_mark_last_busy(ddev->dev);
+			pm_runtime_put_autosuspend(ddev->dev);
 			return -EINVAL;
+		}
 	} else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
 		ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
-		if (ret)
+		if (ret) {
+			pm_runtime_mark_last_busy(ddev->dev);
+			pm_runtime_put_autosuspend(ddev->dev);
 			return -EINVAL;
+		}
 	}
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
 
 	return count;
 }
@@ -795,13 +918,27 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size;
+	int ret;
 
-	if (is_support_sw_smu(adev)) {
-		return smu_sys_get_pp_feature_mask(&adev->smu, buf);
-	} else if (adev->powerplay.pp_funcs->get_ppfeature_status)
-		return amdgpu_dpm_get_ppfeature_status(adev, buf);
+	if (amdgpu_sriov_vf(adev))
+		return 0;
 
-	return snprintf(buf, PAGE_SIZE, "\n");
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
+	if (is_support_sw_smu(adev))
+		size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
+	else if (adev->powerplay.pp_funcs->get_ppfeature_status)
+		size = amdgpu_dpm_get_ppfeature_status(adev, buf);
+	else
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
 }
 
 /**
@@ -840,17 +977,27 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size;
+	int ret;
 
-	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
-	    adev->virt.ops->get_pp_clk)
-		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
-		return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
+		size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
-		return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+		size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
 	else
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
 }
 
 /*
@@ -899,18 +1046,25 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
 	int ret;
 	uint32_t mask = 0;
 
-	if (amdgpu_sriov_vf(adev))
-		return 0;
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
 
 	ret = amdgpu_read_mask(buf, count, &mask);
 	if (ret)
 		return ret;
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev))
 		ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	if (ret)
 		return -EINVAL;
 
@@ -923,17 +1077,27 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size;
+	int ret;
 
-	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
-	    adev->virt.ops->get_pp_clk)
-		return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
-		return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
+		size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
-		return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+		size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
 	else
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
 }
 
 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -943,21 +1107,28 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
-	int ret;
 	uint32_t mask = 0;
+	int ret;
 
-	if (amdgpu_sriov_vf(adev))
-		return 0;
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+			return -EINVAL;
 
 	ret = amdgpu_read_mask(buf, count, &mask);
 	if (ret)
 		return ret;
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev))
 		ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	if (ret)
 		return -EINVAL;
 
@@ -970,13 +1141,27 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
-		return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
+		size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
-		return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
+		size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
 	else
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
 }
 
 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
@@ -989,14 +1174,26 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
 	int ret;
 	uint32_t mask = 0;
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
+
 	ret = amdgpu_read_mask(buf, count, &mask);
 	if (ret)
 		return ret;
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev))
 		ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
+	else
+		ret = 0;
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
 
 	if (ret)
 		return -EINVAL;
@@ -1010,13 +1207,27 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
-		return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
+		size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
-		return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
+		size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
 	else
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
 }
 
 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
@@ -1029,14 +1240,26 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
 	int ret;
 	uint32_t mask = 0;
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
+
 	ret = amdgpu_read_mask(buf, count, &mask);
 	if (ret)
 		return ret;
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev))
 		ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
+	else
+		ret = 0;
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
 
 	if (ret)
 		return -EINVAL;
@@ -1050,13 +1273,27 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
-		return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
+		size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
-		return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
+		size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
 	else
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
 }
 
 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
@@ -1069,14 +1306,26 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
 	int ret;
 	uint32_t mask = 0;
 
+	if (amdgpu_sriov_vf(adev))
+		return -EINVAL;
+
 	ret = amdgpu_read_mask(buf, count, &mask);
 	if (ret)
 		return ret;
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev))
 		ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
+	else
+		ret = 0;
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
 
 	if (ret)
 		return -EINVAL;
@@ -1090,13 +1339,27 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
-		return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
+		size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
-		return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+		size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
 	else
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
 }
 
 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -1109,14 +1372,26 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
 	int ret;
 	uint32_t mask = 0;
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
+
 	ret = amdgpu_read_mask(buf, count, &mask);
 	if (ret)
 		return ret;
 
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev))
 		ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+	else
+		ret = 0;
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
 
 	if (ret)
 		return -EINVAL;
@@ -1131,12 +1406,23 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 	uint32_t value = 0;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
 		value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
 	else if (adev->powerplay.pp_funcs->get_sclk_od)
 		value = amdgpu_dpm_get_sclk_od(adev);
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return snprintf(buf, PAGE_SIZE, "%d\n", value);
 }
 
@@ -1150,12 +1436,17 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
 	int ret;
 	long int value;
 
+	if (amdgpu_sriov_vf(adev))
+		return -EINVAL;
+
 	ret = kstrtol(buf, 0, &value);
 
-	if (ret) {
-		count = -EINVAL;
-		goto fail;
-	}
+	if (ret)
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev)) {
 		value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
@@ -1171,7 +1462,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
 		}
 	}
 
-fail:
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return count;
 }
 
@@ -1182,12 +1475,23 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 	uint32_t value = 0;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
 		value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
 	else if (adev->powerplay.pp_funcs->get_mclk_od)
 		value = amdgpu_dpm_get_mclk_od(adev);
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return snprintf(buf, PAGE_SIZE, "%d\n", value);
 }
 
@@ -1201,12 +1505,17 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
 	int ret;
 	long int value;
 
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
 	ret = kstrtol(buf, 0, &value);
 
-	if (ret) {
-		count = -EINVAL;
-		goto fail;
-	}
+	if (ret)
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev)) {
 		value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
@@ -1222,7 +1531,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
 		}
 	}
 
-fail:
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return count;
 }
 
@@ -1252,13 +1563,27 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev))
-		return smu_get_power_profile_mode(&adev->smu, buf);
+		size = smu_get_power_profile_mode(&adev->smu, buf);
 	else if (adev->powerplay.pp_funcs->get_power_profile_mode)
-		return amdgpu_dpm_get_power_profile_mode(adev, buf);
+		size = amdgpu_dpm_get_power_profile_mode(adev, buf);
+	else
+		size = snprintf(buf, PAGE_SIZE, "\n");
 
-	return snprintf(buf, PAGE_SIZE, "\n");
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
 }
 
 
@@ -1283,7 +1608,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
 	tmp[1] = '\0';
 	ret = kstrtol(tmp, 0, &profile_mode);
 	if (ret)
-		goto fail;
+		return -EINVAL;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
 
 	if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
 		if (count < 2 || count > 127)
@@ -1295,23 +1623,30 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
 		while (tmp_str[0]) {
 			sub_str = strsep(&tmp_str, delimiter);
 			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
-			if (ret) {
-				count = -EINVAL;
-				goto fail;
-			}
+			if (ret)
+				return -EINVAL;
 			parameter_size++;
 			while (isspace(*tmp_str))
 				tmp_str++;
 		}
 	}
 	parameter[parameter_size] = profile_mode;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev))
 		ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
 	else if (adev->powerplay.pp_funcs->set_power_profile_mode)
 		ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	if (!ret)
 		return count;
-fail:
+
 	return -EINVAL;
 }
 
@@ -1331,10 +1666,20 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
 	struct amdgpu_device *adev = ddev->dev_private;
 	int r, value, size = sizeof(value);
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	r = pm_runtime_get_sync(ddev->dev);
+	if (r < 0)
+		return r;
+
 	/* read the IP busy sensor */
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
 				   (void *)&value, &size);
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	if (r)
 		return r;
 
@@ -1357,10 +1702,20 @@ static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
 	struct amdgpu_device *adev = ddev->dev_private;
 	int r, value, size = sizeof(value);
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	r = pm_runtime_get_sync(ddev->dev);
+	if (r < 0)
+		return r;
+
 	/* read the IP busy sensor */
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
 				   (void *)&value, &size);
 
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	if (r)
 		return r;
 
@@ -1386,8 +1741,20 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 	uint64_t count0, count1;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
+
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
 	return snprintf(buf, PAGE_SIZE,	"%llu %llu %i\n",
 			count0, count1, pcie_get_mps(adev->pdev));
 }
@@ -1409,6 +1776,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
 	if (adev->unique_id)
 		return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
 
@@ -1472,42 +1842,43 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
 				      char *buf)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	struct drm_device *ddev = adev->ddev;
 	int channel = to_sensor_dev_attr(attr)->index;
 	int r, temp = 0, size = sizeof(temp);
 
-	/* Can't get temperature when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
-
 	if (channel >= PP_TEMP_MAX)
 		return -EINVAL;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	switch (channel) {
 	case PP_TEMP_JUNCTION:
 		/* get current junction temperature */
 		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
 					   (void *)&temp, &size);
-		if (r)
-			return r;
 		break;
 	case PP_TEMP_EDGE:
 		/* get current edge temperature */
 		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
 					   (void *)&temp, &size);
-		if (r)
-			return r;
 		break;
 	case PP_TEMP_MEM:
 		/* get current memory temperature */
 		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
 					   (void *)&temp, &size);
-		if (r)
-			return r;
+		break;
+	default:
+		r = -EINVAL;
 		break;
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	if (r)
+		return r;
+
 	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 }
 
@@ -1603,15 +1974,27 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
 	u32 pwm_mode = 0;
+	int ret;
+
+	ret = pm_runtime_get_sync(adev->ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev)) {
 		pwm_mode = smu_get_fan_control_mode(&adev->smu);
 	} else {
-		if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+		if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return -EINVAL;
+		}
 
 		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return sprintf(buf, "%i\n", pwm_mode);
 }
 
@@ -1621,27 +2004,32 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
 					    size_t count)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	int err;
+	int err, ret;
 	int value;
 
-	/* Can't adjust fan when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
-
 	err = kstrtoint(buf, 10, &value);
 	if (err)
 		return err;
 
+	ret = pm_runtime_get_sync(adev->ddev->dev);
+	if (ret < 0)
+		return ret;
+
 	if (is_support_sw_smu(adev)) {
 		smu_set_fan_control_mode(&adev->smu, value);
 	} else {
-		if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+		if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return -EINVAL;
+		}
 
 		amdgpu_dpm_set_fan_control_mode(adev, value);
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return count;
 }
 
@@ -1668,34 +2056,43 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
 	u32 value;
 	u32 pwm_mode;
 
-	/* Can't adjust fan when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	err = pm_runtime_get_sync(adev->ddev->dev);
+	if (err < 0)
+		return err;
+
 	if (is_support_sw_smu(adev))
 		pwm_mode = smu_get_fan_control_mode(&adev->smu);
 	else
 		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
 	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
 		pr_info("manual fan speed control should be enabled first\n");
+		pm_runtime_mark_last_busy(adev->ddev->dev);
+		pm_runtime_put_autosuspend(adev->ddev->dev);
 		return -EINVAL;
 	}
 
 	err = kstrtou32(buf, 10, &value);
-	if (err)
+	if (err) {
+		pm_runtime_mark_last_busy(adev->ddev->dev);
+		pm_runtime_put_autosuspend(adev->ddev->dev);
 		return err;
+	}
 
 	value = (value * 100) / 255;
 
-	if (is_support_sw_smu(adev)) {
+	if (is_support_sw_smu(adev))
 		err = smu_set_fan_speed_percent(&adev->smu, value);
-		if (err)
-			return err;
-	} else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+	else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
 		err = amdgpu_dpm_set_fan_speed_percent(adev, value);
-		if (err)
-			return err;
-	}
+	else
+		err = -EINVAL;
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	if (err)
+		return err;
 
 	return count;
 }
@@ -1708,20 +2105,22 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
 	int err;
 	u32 speed = 0;
 
-	/* Can't adjust fan when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	err = pm_runtime_get_sync(adev->ddev->dev);
+	if (err < 0)
+		return err;
 
-	if (is_support_sw_smu(adev)) {
+	if (is_support_sw_smu(adev))
 		err = smu_get_fan_speed_percent(&adev->smu, &speed);
-		if (err)
-			return err;
-	} else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+	else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
 		err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
-		if (err)
-			return err;
-	}
+	else
+		err = -EINVAL;
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	if (err)
+		return err;
 
 	speed = (speed * 255) / 100;
 
@@ -1736,20 +2135,22 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
 	int err;
 	u32 speed = 0;
 
-	/* Can't adjust fan when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	err = pm_runtime_get_sync(adev->ddev->dev);
+	if (err < 0)
+		return err;
 
-	if (is_support_sw_smu(adev)) {
+	if (is_support_sw_smu(adev))
 		err = smu_get_fan_speed_rpm(&adev->smu, &speed);
-		if (err)
-			return err;
-	} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+	else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
 		err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
-		if (err)
-			return err;
-	}
+	else
+		err = -EINVAL;
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	if (err)
+		return err;
 
 	return sprintf(buf, "%i\n", speed);
 }
@@ -1763,8 +2164,16 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
 	u32 size = sizeof(min_rpm);
 	int r;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
 				   (void *)&min_rpm, &size);
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (r)
 		return r;
 
@@ -1780,8 +2189,16 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
 	u32 size = sizeof(max_rpm);
 	int r;
 
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
 				   (void *)&max_rpm, &size);
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (r)
 		return r;
 
@@ -1796,20 +2213,22 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
 	int err;
 	u32 rpm = 0;
 
-	/* Can't adjust fan when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	err = pm_runtime_get_sync(adev->ddev->dev);
+	if (err < 0)
+		return err;
 
-	if (is_support_sw_smu(adev)) {
+	if (is_support_sw_smu(adev))
 		err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
-		if (err)
-			return err;
-	} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+	else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
 		err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
-		if (err)
-			return err;
-	}
+	else
+		err = -EINVAL;
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	if (err)
+		return err;
 
 	return sprintf(buf, "%i\n", rpm);
 }
@@ -1823,33 +2242,41 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
 	u32 value;
 	u32 pwm_mode;
 
+	err = pm_runtime_get_sync(adev->ddev->dev);
+	if (err < 0)
+		return err;
+
 	if (is_support_sw_smu(adev))
 		pwm_mode = smu_get_fan_control_mode(&adev->smu);
 	else
 		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
 
-	if (pwm_mode != AMD_FAN_CTRL_MANUAL)
+	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
+		pm_runtime_mark_last_busy(adev->ddev->dev);
+		pm_runtime_put_autosuspend(adev->ddev->dev);
 		return -ENODATA;
-
-	/* Can't adjust fan when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	}
 
 	err = kstrtou32(buf, 10, &value);
+	if (err) {
+		pm_runtime_mark_last_busy(adev->ddev->dev);
+		pm_runtime_put_autosuspend(adev->ddev->dev);
+		return err;
+	}
+
+	if (is_support_sw_smu(adev))
+		err = smu_set_fan_speed_rpm(&adev->smu, value);
+	else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
+		err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
+	else
+		err = -EINVAL;
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (err)
 		return err;
 
-	if (is_support_sw_smu(adev)) {
-		err = smu_set_fan_speed_rpm(&adev->smu, value);
-		if (err)
-			return err;
-	} else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
-		err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
-		if (err)
-			return err;
-	}
-
 	return count;
 }
 
@@ -1859,15 +2286,27 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
 	u32 pwm_mode = 0;
+	int ret;
+
+	ret = pm_runtime_get_sync(adev->ddev->dev);
+	if (ret < 0)
+		return ret;
 
 	if (is_support_sw_smu(adev)) {
 		pwm_mode = smu_get_fan_control_mode(&adev->smu);
 	} else {
-		if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+		if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return -EINVAL;
+		}
 
 		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
 	}
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
 }
 
@@ -1881,12 +2320,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
 	int value;
 	u32 pwm_mode;
 
-	/* Can't adjust fan when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
-
-
 	err = kstrtoint(buf, 10, &value);
 	if (err)
 		return err;
@@ -1898,14 +2331,24 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
 	else
 		return -EINVAL;
 
+	err = pm_runtime_get_sync(adev->ddev->dev);
+	if (err < 0)
+		return err;
+
 	if (is_support_sw_smu(adev)) {
 		smu_set_fan_control_mode(&adev->smu, pwm_mode);
 	} else {
-		if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+		if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
 			return -EINVAL;
+		}
 		amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
 	}
 
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	return count;
 }
 
@@ -1914,18 +2357,20 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
 					char *buf)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	struct drm_device *ddev = adev->ddev;
 	u32 vddgfx;
 	int r, size = sizeof(vddgfx);
 
-	/* Can't get voltage when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
 
 	/* get the voltage */
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
 				   (void *)&vddgfx, &size);
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (r)
 		return r;
 
@@ -1944,7 +2389,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
 				       char *buf)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	struct drm_device *ddev = adev->ddev;
 	u32 vddnb;
 	int r, size = sizeof(vddnb);
 
@@ -1952,14 +2396,17 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
 	if  (!(adev->flags & AMD_IS_APU))
 		return -EINVAL;
 
-	/* Can't get voltage when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
 
 	/* get the voltage */
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
 				   (void *)&vddnb, &size);
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (r)
 		return r;
 
@@ -1978,19 +2425,21 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
 					   char *buf)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	struct drm_device *ddev = adev->ddev;
 	u32 query = 0;
 	int r, size = sizeof(u32);
 	unsigned uw;
 
-	/* Can't get power when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
 
 	/* get the voltage */
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
 				   (void *)&query, &size);
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (r)
 		return r;
 
@@ -2013,16 +2462,27 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
 	uint32_t limit = 0;
+	ssize_t size;
+	int r;
+
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
 
 	if (is_support_sw_smu(adev)) {
 		smu_get_power_limit(&adev->smu, &limit, true, true);
-		return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
 	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
 		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
-		return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
 	} else {
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
 	}
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	return size;
 }
 
 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
@@ -2031,16 +2491,27 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
 	uint32_t limit = 0;
+	ssize_t size;
+	int r;
+
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
 
 	if (is_support_sw_smu(adev)) {
 		smu_get_power_limit(&adev->smu, &limit, false,  true);
-		return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
 	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
 		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
-		return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
 	} else {
-		return snprintf(buf, PAGE_SIZE, "\n");
+		size = snprintf(buf, PAGE_SIZE, "\n");
 	}
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	return size;
 }
 
 
@@ -2053,19 +2524,29 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
 	int err;
 	u32 value;
 
+	if (amdgpu_sriov_vf(adev))
+		return -EINVAL;
+
 	err = kstrtou32(buf, 10, &value);
 	if (err)
 		return err;
 
 	value = value / 1000000; /* convert to Watt */
 
-	if (is_support_sw_smu(adev)) {
+
+	err = pm_runtime_get_sync(adev->ddev->dev);
+	if (err < 0)
+		return err;
+
+	if (is_support_sw_smu(adev))
 		err = smu_set_power_limit(&adev->smu, value);
-	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+	else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
 		err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
-	} else {
+	else
 		err = -EINVAL;
-	}
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
 
 	if (err)
 		return err;
@@ -2078,18 +2559,20 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
 				      char *buf)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	struct drm_device *ddev = adev->ddev;
 	uint32_t sclk;
 	int r, size = sizeof(sclk);
 
-	/* Can't get voltage when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
 
 	/* get the sclk */
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
 				   (void *)&sclk, &size);
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (r)
 		return r;
 
@@ -2108,18 +2591,20 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
 				      char *buf)
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
-	struct drm_device *ddev = adev->ddev;
 	uint32_t mclk;
 	int r, size = sizeof(mclk);
 
-	/* Can't get voltage when the card is off */
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-		return -EINVAL;
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
 
 	/* get the sclk */
 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
 				   (void *)&mclk, &size);
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
 	if (r)
 		return r;
 
@@ -2299,6 +2784,23 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
 	umode_t effective_mode = attr->mode;
 
+	/* under multi-vf mode, the hwmon attributes are all not supported */
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	/* there is no fan under pp one vf mode */
+	if (amdgpu_sriov_is_pp_one_vf(adev) &&
+	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+		return 0;
+
 	/* Skip fan attributes if fan is not present */
 	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
 	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
@@ -2666,17 +3168,12 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 {
 	int ret = 0;
-	if (is_support_sw_smu(adev)) {
-	    ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
-	    if (ret)
-		DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
-			  enable ? "true" : "false", ret);
-	} else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
-		/* enable/disable UVD */
-		mutex_lock(&adev->pm.mutex);
-		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
-		mutex_unlock(&adev->pm.mutex);
-	}
+
+	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+	if (ret)
+		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+			  enable ? "enable" : "disable", ret);
+
 	/* enable/disable Low Memory PState for UVD (4k videos) */
 	if (adev->asic_type == CHIP_STONEY &&
 		adev->uvd.decode_image_width >= WIDTH_4K) {
@@ -2693,17 +3190,11 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 {
 	int ret = 0;
-	if (is_support_sw_smu(adev)) {
-	    ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
-	    if (ret)
-		DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
-			  enable ? "true" : "false", ret);
-	} else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
-		/* enable/disable VCE */
-		mutex_lock(&adev->pm.mutex);
-		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
-		mutex_unlock(&adev->pm.mutex);
-	}
+
+	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+	if (ret)
+		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+			  enable ? "enable" : "disable", ret);
 }
 
 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -2718,42 +3209,14 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
 
 }
 
-int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev)
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
 {
 	int ret = 0;
 
-	if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
-		return ret;
-
-	ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
-	if (ret) {
-		DRM_ERROR("failed to create device file pp_dpm_sclk\n");
-		return ret;
-	}
-
-	ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
-	if (ret) {
-		DRM_ERROR("failed to create device file pp_dpm_mclk\n");
-		return ret;
-	}
-
-	ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-	if (ret) {
-		DRM_ERROR("failed to create device file for dpm state\n");
-		return ret;
-	}
-
-	return ret;
-}
-
-void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev)
-{
-	if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
-		return;
-
-	device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-	device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
-	device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+	if (ret)
+		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+			  enable ? "enable" : "disable", ret);
 }
 
 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
@@ -3163,8 +3626,12 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct amdgpu_device *adev = dev->dev_private;
-	struct drm_device *ddev = adev->ddev;
 	u32 flags = 0;
+	int r;
+
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return r;
 
 	amdgpu_device_ip_get_clockgating_state(adev, &flags);
 	seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
@@ -3173,23 +3640,28 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
 
 	if (!adev->pm.dpm_enabled) {
 		seq_printf(m, "dpm not enabled\n");
+		pm_runtime_mark_last_busy(dev->dev);
+		pm_runtime_put_autosuspend(dev->dev);
 		return 0;
 	}
-	if  ((adev->flags & AMD_IS_PX) &&
-	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
-		seq_printf(m, "PX asic powered off\n");
-	} else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
+
+	if (!is_support_sw_smu(adev) &&
+	    adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
 		mutex_lock(&adev->pm.mutex);
 		if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
 			adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
 		else
 			seq_printf(m, "Debugfs support not implemented for this asic\n");
 		mutex_unlock(&adev->pm.mutex);
+		r = 0;
 	} else {
-		return amdgpu_debugfs_pm_info_pp(m, adev);
+		r = amdgpu_debugfs_pm_info_pp(m, adev);
 	}
 
-	return 0;
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+
+	return r;
 }
 
 static const struct drm_info_list amdgpu_pm_info_list[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index ef31448..3da1da2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -41,5 +41,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
 void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 0e6dba9..07914e34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
 	switch (pe->pmu_perf_type) {
 	case PERF_TYPE_AMDGPU_DF:
 		if (!(flags & PERF_EF_RELOAD))
-			pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
+			pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
 
-		pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
+		pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0);
 		break;
 	default:
 		break;
@@ -101,13 +101,13 @@ static void amdgpu_perf_read(struct perf_event *event)
 
 		switch (pe->pmu_perf_type) {
 		case PERF_TYPE_AMDGPU_DF:
-			pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf,
+			pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf,
 							  &count);
 			break;
 		default:
 			count = 0;
 			break;
-		};
+		}
 	} while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
 
 	local64_add(count - prev, &event->count);
@@ -126,11 +126,11 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
 
 	switch (pe->pmu_perf_type) {
 	case PERF_TYPE_AMDGPU_DF:
-		pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
+		pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0);
 		break;
 	default:
 		break;
-	};
+	}
 
 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 	hwc->state |= PERF_HES_STOPPED;
@@ -156,11 +156,11 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
 
 	switch (pe->pmu_perf_type) {
 	case PERF_TYPE_AMDGPU_DF:
-		retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
+		retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
 		break;
 	default:
 		return 0;
-	};
+	}
 
 	if (retval)
 		return retval;
@@ -184,11 +184,11 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
 
 	switch (pe->pmu_perf_type) {
 	case PERF_TYPE_AMDGPU_DF:
-		pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
+		pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1);
 		break;
 	default:
 		break;
-	};
+	}
 
 	perf_event_update_userpage(event);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e1b8d8d..3a1570d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -158,7 +158,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
 
 	index = atomic_inc_return(&psp->fence_value);
-	ret = psp_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
+	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
 	if (ret) {
 		atomic_dec(&psp->fence_value);
 		mutex_unlock(&psp->mutex);
@@ -191,9 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
 		if (ucode)
 			DRM_WARN("failed to load ucode id (%d) ",
 				  ucode->ucode_id);
-		DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
+		DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
 			 psp->cmd_buf_mem->cmd_id,
-			 psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
+			 psp->cmd_buf_mem->resp.status);
 		if (!timeout) {
 			mutex_unlock(&psp->mutex);
 			return -EINVAL;
@@ -318,35 +318,17 @@ static int psp_tmr_load(struct psp_context *psp)
 	return ret;
 }
 
-static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-				 uint64_t asd_mc, uint64_t asd_mc_shared,
-				 uint32_t size, uint32_t shared_size)
+static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+				uint64_t asd_mc, uint32_t size)
 {
 	cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
 	cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
 	cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
 	cmd->cmd.cmd_load_ta.app_len = size;
 
-	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared);
-	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared);
-	cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
-static int psp_asd_init(struct psp_context *psp)
-{
-	int ret;
-
-	/*
-	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
-	 * physical) for shared ASD <-> Driver
-	 */
-	ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE,
-				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-				      &psp->asd_shared_bo,
-				      &psp->asd_shared_mc_addr,
-				      &psp->asd_shared_buf);
-
-	return ret;
+	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
+	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
+	cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
 }
 
 static int psp_asd_load(struct psp_context *psp)
@@ -368,11 +350,49 @@ static int psp_asd_load(struct psp_context *psp)
 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 	memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
 
-	psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
-			     psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
+	psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+				  psp->asd_ucode_size);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 				 psp->fence_buf_mc_addr);
+	if (!ret) {
+		psp->asd_context.asd_initialized = true;
+		psp->asd_context.session_id = cmd->resp.session_id;
+	}
+
+	kfree(cmd);
+
+	return ret;
+}
+
+static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+				       uint32_t session_id)
+{
+	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+	cmd->cmd.cmd_unload_ta.session_id = session_id;
+}
+
+static int psp_asd_unload(struct psp_context *psp)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	if (!psp->asd_context.asd_initialized)
+		return 0;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd,
+				 psp->fence_buf_mc_addr);
+	if (!ret)
+		psp->asd_context.asd_initialized = false;
 
 	kfree(cmd);
 
@@ -407,18 +427,20 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
 	return ret;
 }
 
-static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-					  uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
-					  uint32_t xgmi_ta_size, uint32_t shared_size)
+static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+				     uint64_t ta_bin_mc,
+				     uint32_t ta_bin_size,
+				     uint64_t ta_shared_mc,
+				     uint32_t ta_shared_size)
 {
-        cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
-        cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
-        cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
-        cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+	cmd->cmd_id 				= GFX_CMD_ID_LOAD_TA;
+	cmd->cmd.cmd_load_ta.app_phy_addr_lo 	= lower_32_bits(ta_bin_mc);
+	cmd->cmd.cmd_load_ta.app_phy_addr_hi 	= upper_32_bits(ta_bin_mc);
+	cmd->cmd.cmd_load_ta.app_len 		= ta_bin_size;
 
-        cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
-        cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
-        cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
+	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
+	cmd->cmd.cmd_load_ta.cmd_buf_len 	 = ta_shared_size;
 }
 
 static int psp_xgmi_init_shared_buf(struct psp_context *psp)
@@ -438,6 +460,36 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp)
 	return ret;
 }
 
+static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+				       uint32_t ta_cmd_id,
+				       uint32_t session_id)
+{
+	cmd->cmd_id 				= GFX_CMD_ID_INVOKE_CMD;
+	cmd->cmd.cmd_invoke_cmd.session_id 	= session_id;
+	cmd->cmd.cmd_invoke_cmd.ta_cmd_id 	= ta_cmd_id;
+}
+
+int psp_ta_invoke(struct psp_context *psp,
+		  uint32_t ta_cmd_id,
+		  uint32_t session_id)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd,
+				 psp->fence_buf_mc_addr);
+
+	kfree(cmd);
+
+	return ret;
+}
+
 static int psp_xgmi_load(struct psp_context *psp)
 {
 	int ret;
@@ -446,8 +498,6 @@ static int psp_xgmi_load(struct psp_context *psp)
 	/*
 	 * TODO: bypass the loading in sriov for now
 	 */
-	if (amdgpu_sriov_vf(psp->adev))
-		return 0;
 
 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 	if (!cmd)
@@ -456,9 +506,11 @@ static int psp_xgmi_load(struct psp_context *psp)
 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 	memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
 
-	psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
-				      psp->xgmi_context.xgmi_shared_mc_addr,
-				      psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+	psp_prep_ta_load_cmd_buf(cmd,
+				 psp->fw_pri_mc_addr,
+				 psp->ta_xgmi_ucode_size,
+				 psp->xgmi_context.xgmi_shared_mc_addr,
+				 PSP_XGMI_SHARED_MEM_SIZE);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 				 psp->fence_buf_mc_addr);
@@ -473,29 +525,25 @@ static int psp_xgmi_load(struct psp_context *psp)
 	return ret;
 }
 
-static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-					    uint32_t xgmi_session_id)
-{
-	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
-	cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
-}
-
 static int psp_xgmi_unload(struct psp_context *psp)
 {
 	int ret;
 	struct psp_gfx_cmd_resp *cmd;
+	struct amdgpu_device *adev = psp->adev;
+
+	/* XGMI TA unload currently is not supported on Arcturus */
+	if (adev->asic_type == CHIP_ARCTURUS)
+		return 0;
 
 	/*
 	 * TODO: bypass the unloading in sriov for now
 	 */
-	if (amdgpu_sriov_vf(psp->adev))
-		return 0;
 
 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 	if (!cmd)
 		return -ENOMEM;
 
-	psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+	psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 				 psp->fence_buf_mc_addr);
@@ -505,40 +553,9 @@ static int psp_xgmi_unload(struct psp_context *psp)
 	return ret;
 }
 
-static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-					    uint32_t ta_cmd_id,
-					    uint32_t xgmi_session_id)
-{
-	cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
-	cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
-	cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
-	/* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
-	int ret;
-	struct psp_gfx_cmd_resp *cmd;
-
-	/*
-	 * TODO: bypass the loading in sriov for now
-	*/
-	if (amdgpu_sriov_vf(psp->adev))
-		return 0;
-
-	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
-	psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
-					psp->xgmi_context.session_id);
-
-	ret = psp_cmd_submit_buf(psp, NULL, cmd,
-				 psp->fence_buf_mc_addr);
-
-	kfree(cmd);
-
-        return ret;
+	return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
 }
 
 static int psp_xgmi_terminate(struct psp_context *psp)
@@ -594,20 +611,6 @@ static int psp_xgmi_initialize(struct psp_context *psp)
 }
 
 // ras begin
-static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-		uint64_t ras_ta_mc, uint64_t ras_mc_shared,
-		uint32_t ras_ta_size, uint32_t shared_size)
-{
-	cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
-	cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
-	cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
-	cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
-
-	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
-	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
-	cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
 static int psp_ras_init_shared_buf(struct psp_context *psp)
 {
 	int ret;
@@ -643,15 +646,17 @@ static int psp_ras_load(struct psp_context *psp)
 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 	memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
 
-	psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
-			psp->ras.ras_shared_mc_addr,
-			psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
+	psp_prep_ta_load_cmd_buf(cmd,
+				 psp->fw_pri_mc_addr,
+				 psp->ta_ras_ucode_size,
+				 psp->ras.ras_shared_mc_addr,
+				 PSP_RAS_SHARED_MEM_SIZE);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 			psp->fence_buf_mc_addr);
 
 	if (!ret) {
-		psp->ras.ras_initialized = 1;
+		psp->ras.ras_initialized = true;
 		psp->ras.session_id = cmd->resp.session_id;
 	}
 
@@ -660,13 +665,6 @@ static int psp_ras_load(struct psp_context *psp)
 	return ret;
 }
 
-static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-						uint32_t ras_session_id)
-{
-	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
-	cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
-}
-
 static int psp_ras_unload(struct psp_context *psp)
 {
 	int ret;
@@ -682,7 +680,7 @@ static int psp_ras_unload(struct psp_context *psp)
 	if (!cmd)
 		return -ENOMEM;
 
-	psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+	psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 			psp->fence_buf_mc_addr);
@@ -692,40 +690,15 @@ static int psp_ras_unload(struct psp_context *psp)
 	return ret;
 }
 
-static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-		uint32_t ta_cmd_id,
-		uint32_t ras_session_id)
-{
-	cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
-	cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
-	cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
-	/* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
-	int ret;
-	struct psp_gfx_cmd_resp *cmd;
-
 	/*
 	 * TODO: bypass the loading in sriov for now
 	 */
 	if (amdgpu_sriov_vf(psp->adev))
 		return 0;
 
-	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
-	psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
-			psp->ras.session_id);
-
-	ret = psp_cmd_submit_buf(psp, NULL, cmd,
-			psp->fence_buf_mc_addr);
-
-	kfree(cmd);
-
-	return ret;
+	return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
 }
 
 int psp_ras_enable_features(struct psp_context *psp,
@@ -771,7 +744,7 @@ static int psp_ras_terminate(struct psp_context *psp)
 	if (ret)
 		return ret;
 
-	psp->ras.ras_initialized = 0;
+	psp->ras.ras_initialized = false;
 
 	/* free ras shared memory */
 	amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
@@ -812,24 +785,6 @@ static int psp_ras_initialize(struct psp_context *psp)
 // ras end
 
 // HDCP start
-static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-					  uint64_t hdcp_ta_mc,
-					  uint64_t hdcp_mc_shared,
-					  uint32_t hdcp_ta_size,
-					  uint32_t shared_size)
-{
-	cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
-	cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
-	cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
-	cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
-
-	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
-		lower_32_bits(hdcp_mc_shared);
-	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
-		upper_32_bits(hdcp_mc_shared);
-	cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
 static int psp_hdcp_init_shared_buf(struct psp_context *psp)
 {
 	int ret;
@@ -866,15 +821,16 @@ static int psp_hdcp_load(struct psp_context *psp)
 	memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
 	       psp->ta_hdcp_ucode_size);
 
-	psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
-				      psp->hdcp_context.hdcp_shared_mc_addr,
-				      psp->ta_hdcp_ucode_size,
-				      PSP_HDCP_SHARED_MEM_SIZE);
+	psp_prep_ta_load_cmd_buf(cmd,
+				 psp->fw_pri_mc_addr,
+				 psp->ta_hdcp_ucode_size,
+				 psp->hdcp_context.hdcp_shared_mc_addr,
+				 PSP_HDCP_SHARED_MEM_SIZE);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
 	if (!ret) {
-		psp->hdcp_context.hdcp_initialized = 1;
+		psp->hdcp_context.hdcp_initialized = true;
 		psp->hdcp_context.session_id = cmd->resp.session_id;
 	}
 
@@ -910,12 +866,6 @@ static int psp_hdcp_initialize(struct psp_context *psp)
 
 	return 0;
 }
-static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-					    uint32_t hdcp_session_id)
-{
-	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
-	cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
-}
 
 static int psp_hdcp_unload(struct psp_context *psp)
 {
@@ -932,7 +882,7 @@ static int psp_hdcp_unload(struct psp_context *psp)
 	if (!cmd)
 		return -ENOMEM;
 
-	psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+	psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -941,39 +891,15 @@ static int psp_hdcp_unload(struct psp_context *psp)
 	return ret;
 }
 
-static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-					    uint32_t ta_cmd_id,
-					    uint32_t hdcp_session_id)
-{
-	cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
-	cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
-	cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
-	/* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
-	int ret;
-	struct psp_gfx_cmd_resp *cmd;
-
 	/*
 	 * TODO: bypass the loading in sriov for now
 	 */
 	if (amdgpu_sriov_vf(psp->adev))
 		return 0;
 
-	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
-	psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
-					psp->hdcp_context.session_id);
-
-	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
-	kfree(cmd);
-
-	return ret;
+	return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
 }
 
 static int psp_hdcp_terminate(struct psp_context *psp)
@@ -993,7 +919,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
 	if (ret)
 		return ret;
 
-	psp->hdcp_context.hdcp_initialized = 0;
+	psp->hdcp_context.hdcp_initialized = false;
 
 	/* free hdcp shared memory */
 	amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
@@ -1005,22 +931,6 @@ static int psp_hdcp_terminate(struct psp_context *psp)
 // HDCP end
 
 // DTM start
-static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-					 uint64_t dtm_ta_mc,
-					 uint64_t dtm_mc_shared,
-					 uint32_t dtm_ta_size,
-					 uint32_t shared_size)
-{
-	cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
-	cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
-	cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
-	cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
-
-	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
-	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
-	cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
 static int psp_dtm_init_shared_buf(struct psp_context *psp)
 {
 	int ret;
@@ -1056,15 +966,16 @@ static int psp_dtm_load(struct psp_context *psp)
 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 	memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
 
-	psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
-				     psp->dtm_context.dtm_shared_mc_addr,
-				     psp->ta_dtm_ucode_size,
-				     PSP_DTM_SHARED_MEM_SIZE);
+	psp_prep_ta_load_cmd_buf(cmd,
+				 psp->fw_pri_mc_addr,
+				 psp->ta_dtm_ucode_size,
+				 psp->dtm_context.dtm_shared_mc_addr,
+				 PSP_DTM_SHARED_MEM_SIZE);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
 	if (!ret) {
-		psp->dtm_context.dtm_initialized = 1;
+		psp->dtm_context.dtm_initialized = true;
 		psp->dtm_context.session_id = cmd->resp.session_id;
 	}
 
@@ -1102,39 +1013,15 @@ static int psp_dtm_initialize(struct psp_context *psp)
 	return 0;
 }
 
-static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
-					   uint32_t ta_cmd_id,
-					   uint32_t dtm_session_id)
-{
-	cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
-	cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
-	cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
-	/* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
-	int ret;
-	struct psp_gfx_cmd_resp *cmd;
-
 	/*
 	 * TODO: bypass the loading in sriov for now
 	 */
 	if (amdgpu_sriov_vf(psp->adev))
 		return 0;
 
-	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
-	psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
-				       psp->dtm_context.session_id);
-
-	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
-	kfree(cmd);
-
-	return ret;
+	return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
 }
 
 static int psp_dtm_terminate(struct psp_context *psp)
@@ -1154,7 +1041,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
 	if (ret)
 		return ret;
 
-	psp->dtm_context.dtm_initialized = 0;
+	psp->dtm_context.dtm_initialized = false;
 
 	/* free hdcp shared memory */
 	amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
@@ -1211,45 +1098,6 @@ static int psp_hw_start(struct psp_context *psp)
 		return ret;
 	}
 
-	ret = psp_asd_init(psp);
-	if (ret) {
-		DRM_ERROR("PSP asd init failed!\n");
-		return ret;
-	}
-
-	ret = psp_asd_load(psp);
-	if (ret) {
-		DRM_ERROR("PSP load asd failed!\n");
-		return ret;
-	}
-
-	if (adev->gmc.xgmi.num_physical_nodes > 1) {
-		ret = psp_xgmi_initialize(psp);
-		/* Warning the XGMI seesion initialize failure
-		 * Instead of stop driver initialization
-		 */
-		if (ret)
-			dev_err(psp->adev->dev,
-				"XGMI: Failed to initialize XGMI session\n");
-	}
-
-	if (psp->adev->psp.ta_fw) {
-		ret = psp_ras_initialize(psp);
-		if (ret)
-			dev_err(psp->adev->dev,
-					"RAS: Failed to initialize RAS\n");
-
-		ret = psp_hdcp_initialize(psp);
-		if (ret)
-			dev_err(psp->adev->dev,
-				"HDCP: Failed to initialize HDCP\n");
-
-		ret = psp_dtm_initialize(psp);
-		if (ret)
-			dev_err(psp->adev->dev,
-				"DTM: Failed to initialize DTM\n");
-	}
-
 	return 0;
 }
 
@@ -1329,6 +1177,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
 	case AMDGPU_UCODE_ID_VCN:
 		*type = GFX_FW_TYPE_VCN;
 		break;
+	case AMDGPU_UCODE_ID_VCN1:
+		*type = GFX_FW_TYPE_VCN1;
+		break;
 	case AMDGPU_UCODE_ID_DMCU_ERAM:
 		*type = GFX_FW_TYPE_DMCU_ERAM;
 		break;
@@ -1341,6 +1192,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
 	case AMDGPU_UCODE_ID_VCN1_RAM:
 		*type = GFX_FW_TYPE_VCN1_RAM;
 		break;
+	case AMDGPU_UCODE_ID_DMCUB:
+		*type = GFX_FW_TYPE_DMUB;
+		break;
 	case AMDGPU_UCODE_ID_MAXIMUM:
 	default:
 		return -EINVAL;
@@ -1470,7 +1324,8 @@ static int psp_np_fw_load(struct psp_context *psp)
                     || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
 	            || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
 	            || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
-	            || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
+	            || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+	            || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
 			/*skip ucode loading in SRIOV VF */
 			continue;
 
@@ -1519,16 +1374,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
 	if (!psp->cmd)
 		return -ENOMEM;
 
-	/* this fw pri bo is not used under SRIOV */
-	if (!amdgpu_sriov_vf(psp->adev)) {
-		ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
-					      AMDGPU_GEM_DOMAIN_GTT,
-					      &psp->fw_pri_bo,
-					      &psp->fw_pri_mc_addr,
-					      &psp->fw_pri_buf);
-		if (ret)
-			goto failed;
-	}
+	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
+					AMDGPU_GEM_DOMAIN_GTT,
+					&psp->fw_pri_bo,
+					&psp->fw_pri_mc_addr,
+					&psp->fw_pri_buf);
+	if (ret)
+		goto failed;
 
 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
 					AMDGPU_GEM_DOMAIN_VRAM,
@@ -1562,6 +1414,39 @@ static int psp_load_fw(struct amdgpu_device *adev)
 	if (ret)
 		goto failed;
 
+	ret = psp_asd_load(psp);
+	if (ret) {
+		DRM_ERROR("PSP load asd failed!\n");
+		return ret;
+	}
+
+	if (adev->gmc.xgmi.num_physical_nodes > 1) {
+		ret = psp_xgmi_initialize(psp);
+		/* Warning the XGMI seesion initialize failure
+		 * Instead of stop driver initialization
+		 */
+		if (ret)
+			dev_err(psp->adev->dev,
+				"XGMI: Failed to initialize XGMI session\n");
+	}
+
+	if (psp->adev->psp.ta_fw) {
+		ret = psp_ras_initialize(psp);
+		if (ret)
+			dev_err(psp->adev->dev,
+					"RAS: Failed to initialize RAS\n");
+
+		ret = psp_hdcp_initialize(psp);
+		if (ret)
+			dev_err(psp->adev->dev,
+				"HDCP: Failed to initialize HDCP\n");
+
+		ret = psp_dtm_initialize(psp);
+		if (ret)
+			dev_err(psp->adev->dev,
+				"DTM: Failed to initialize DTM\n");
+	}
+
 	return 0;
 
 failed:
@@ -1619,6 +1504,8 @@ static int psp_hw_fini(void *handle)
 		psp_hdcp_terminate(psp);
 	}
 
+	psp_asd_unload(psp);
+
 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
@@ -1627,8 +1514,6 @@ static int psp_hw_fini(void *handle)
 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
-	amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr,
-			      &psp->asd_shared_buf);
 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
 			      (void **)&psp->cmd_buf_mem);
 
@@ -1704,6 +1589,39 @@ static int psp_resume(void *handle)
 	if (ret)
 		goto failed;
 
+	ret = psp_asd_load(psp);
+	if (ret) {
+		DRM_ERROR("PSP load asd failed!\n");
+		goto failed;
+	}
+
+	if (adev->gmc.xgmi.num_physical_nodes > 1) {
+		ret = psp_xgmi_initialize(psp);
+		/* Warning the XGMI seesion initialize failure
+		 * Instead of stop driver initialization
+		 */
+		if (ret)
+			dev_err(psp->adev->dev,
+				"XGMI: Failed to initialize XGMI session\n");
+	}
+
+	if (psp->adev->psp.ta_fw) {
+		ret = psp_ras_initialize(psp);
+		if (ret)
+			dev_err(psp->adev->dev,
+					"RAS: Failed to initialize RAS\n");
+
+		ret = psp_hdcp_initialize(psp);
+		if (ret)
+			dev_err(psp->adev->dev,
+				"HDCP: Failed to initialize HDCP\n");
+
+		ret = psp_dtm_initialize(psp);
+		if (ret)
+			dev_err(psp->adev->dev,
+				"DTM: Failed to initialize DTM\n");
+	}
+
 	mutex_unlock(&adev->firmware.mutex);
 
 	return 0;
@@ -1758,6 +1676,56 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
 	return psp_execute_np_fw_load(&adev->psp, &ucode);
 }
 
+int psp_ring_cmd_submit(struct psp_context *psp,
+			uint64_t cmd_buf_mc_addr,
+			uint64_t fence_mc_addr,
+			int index)
+{
+	unsigned int psp_write_ptr_reg = 0;
+	struct psp_gfx_rb_frame *write_frame;
+	struct psp_ring *ring = &psp->km_ring;
+	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
+	struct amdgpu_device *adev = psp->adev;
+	uint32_t ring_size_dw = ring->ring_size / 4;
+	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
+
+	/* KM (GPCOM) prepare write pointer */
+	psp_write_ptr_reg = psp_ring_get_wptr(psp);
+
+	/* Update KM RB frame pointer to new frame */
+	/* write_frame ptr increments by size of rb_frame in bytes */
+	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
+	if ((psp_write_ptr_reg % ring_size_dw) == 0)
+		write_frame = ring_buffer_start;
+	else
+		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+	/* Check invalid write_frame ptr address */
+	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+			  ring_buffer_start, ring_buffer_end, write_frame);
+		DRM_ERROR("write_frame is pointing to address out of bounds\n");
+		return -EINVAL;
+	}
+
+	/* Initialize KM RB frame */
+	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
+
+	/* Update KM RB frame */
+	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
+	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
+	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
+	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
+	write_frame->fence_value = index;
+	amdgpu_asic_flush_hdp(adev, NULL);
+
+	/* Update the write Pointer in DWORDs */
+	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
+	psp_ring_set_wptr(psp, psp_write_ptr_reg);
+	return 0;
+}
+
 static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
 					enum AMDGPU_UCODE_ID ucode_type)
 {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 09c5474..6110215 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -32,7 +32,6 @@
 
 #define PSP_FENCE_BUFFER_SIZE	0x1000
 #define PSP_CMD_BUFFER_SIZE	0x1000
-#define PSP_ASD_SHARED_MEM_SIZE 0x4000
 #define PSP_XGMI_SHARED_MEM_SIZE 0x4000
 #define PSP_RAS_SHARED_MEM_SIZE 0x4000
 #define PSP_1_MEG		0x100000
@@ -94,9 +93,6 @@ struct psp_funcs
 			    enum psp_ring_type ring_type);
 	int (*ring_destroy)(struct psp_context *psp,
 			    enum psp_ring_type ring_type);
-	int (*cmd_submit)(struct psp_context *psp,
-			  uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
-			  int index);
 	bool (*compare_sram_data)(struct psp_context *psp,
 				  struct amdgpu_firmware_info *ucode,
 				  enum AMDGPU_UCODE_ID ucode_type);
@@ -116,6 +112,8 @@ struct psp_funcs
 	int (*mem_training_init)(struct psp_context *psp);
 	void (*mem_training_fini)(struct psp_context *psp);
 	int (*mem_training)(struct psp_context *psp, uint32_t ops);
+	uint32_t (*ring_get_wptr)(struct psp_context *psp);
+	void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
 };
 
 #define AMDGPU_XGMI_MAX_CONNECTED_NODES		64
@@ -131,6 +129,11 @@ struct psp_xgmi_topology_info {
 	struct psp_xgmi_node_info	nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
 };
 
+struct psp_asd_context {
+	bool			asd_initialized;
+	uint32_t		session_id;
+};
+
 struct psp_xgmi_context {
 	uint8_t				initialized;
 	uint32_t			session_id;
@@ -169,6 +172,8 @@ struct psp_dtm_context {
 #define MEM_TRAIN_SYSTEM_SIGNATURE		0x54534942
 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES	0x1000
 #define GDDR6_MEM_TRAINING_OFFSET		0x8000
+/*Define the VRAM size that will be encroached by BIST training.*/
+#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE	0x2000000
 
 enum psp_memory_training_init_flag {
 	PSP_MEM_TRAIN_NOT_SUPPORT	= 0x0,
@@ -199,7 +204,6 @@ struct psp_memory_training_context {
 
 	/*vram offset of the p2c training data*/
 	u64 p2c_train_data_offset;
-	struct amdgpu_bo *p2c_bo;
 
 	/*vram offset of the c2p training data*/
 	u64 c2p_train_data_offset;
@@ -239,15 +243,12 @@ struct psp_context
 	struct amdgpu_bo		*tmr_bo;
 	uint64_t			tmr_mc_addr;
 
-	/* asd firmware and buffer */
+	/* asd firmware */
 	const struct firmware		*asd_fw;
 	uint32_t			asd_fw_version;
 	uint32_t			asd_feature_version;
 	uint32_t			asd_ucode_size;
 	uint8_t				*asd_start_addr;
-	struct amdgpu_bo		*asd_shared_bo;
-	uint64_t			asd_shared_mc_addr;
-	void				*asd_shared_buf;
 
 	/* fence buffer */
 	struct amdgpu_bo		*fence_buf_bo;
@@ -282,6 +283,7 @@ struct psp_context
 	uint32_t			ta_dtm_ucode_size;
 	uint8_t				*ta_dtm_start_addr;
 
+	struct psp_asd_context		asd_context;
 	struct psp_xgmi_context		xgmi_context;
 	struct psp_ras_context		ras;
 	struct psp_hdcp_context 	hdcp_context;
@@ -300,8 +302,6 @@ struct amdgpu_psp_funcs {
 #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
 #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
 #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
-#define psp_cmd_submit(psp, cmd_mc, fence_mc, index) \
-		(psp)->funcs->cmd_submit((psp), (cmd_mc), (fence_mc), (index))
 #define psp_compare_sram_data(psp, ucode, type) \
 		(psp)->funcs->compare_sram_data((psp), (ucode), (type))
 #define psp_init_microcode(psp) \
@@ -346,6 +346,9 @@ struct amdgpu_psp_funcs {
 	((psp)->funcs->ras_cure_posion ? \
 	(psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
 
+#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
+#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
+
 extern const struct amd_ip_funcs psp_ip_funcs;
 
 extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -372,4 +375,8 @@ int psp_rlc_autoload_start(struct psp_context *psp);
 extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
 		uint32_t value);
+int psp_ring_cmd_submit(struct psp_context *psp,
+			uint64_t cmd_buf_mc_addr,
+			uint64_t fence_mc_addr,
+			int index);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 4044834..cef94e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -198,9 +198,6 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
 	return 0;
 }
 
-static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
-		struct ras_common_if *head);
-
 /**
  * DOC: AMDGPU RAS debugfs control interface
  *
@@ -318,7 +315,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
 	default:
 		ret = -EINVAL;
 		break;
-	};
+	}
 
 	if (ret)
 		return -EINVAL;
@@ -445,7 +442,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
 }
 
 /* return an obj equal to head, or the first when head is NULL */
-static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
 		struct ras_common_if *head)
 {
 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -689,6 +686,7 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
 {
 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
 	struct ras_err_data err_data = {0, 0, 0, NULL};
+	int i;
 
 	if (!obj)
 		return -EINVAL;
@@ -703,6 +701,13 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
 		if (adev->umc.funcs->query_ras_error_address)
 			adev->umc.funcs->query_ras_error_address(adev, &err_data);
 		break;
+	case AMDGPU_RAS_BLOCK__SDMA:
+		if (adev->sdma.funcs->query_ras_error_count) {
+			for (i = 0; i < adev->sdma.num_instances; i++)
+				adev->sdma.funcs->query_ras_error_count(adev, i,
+									&err_data);
+		}
+		break;
 	case AMDGPU_RAS_BLOCK__GFX:
 		if (adev->gfx.funcs->query_ras_error_count)
 			adev->gfx.funcs->query_ras_error_count(adev, &err_data);
@@ -737,6 +742,20 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
 	return 0;
 }
 
+uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
+{
+	uint32_t df_inst_id;
+
+	if ((!adev->df.funcs)                 ||
+	    (!adev->df.funcs->get_df_inst_id) ||
+	    (!adev->df.funcs->get_dram_base_addr))
+		return addr;
+
+	df_inst_id = adev->df.funcs->get_df_inst_id(adev);
+
+	return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
+}
+
 /* wrapper of psp_ras_trigger_error */
 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 		struct ras_inject_if *info)
@@ -754,6 +773,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 	if (!obj)
 		return -EINVAL;
 
+	/* Calculate XGMI relative offset */
+	if (adev->gmc.xgmi.num_physical_nodes > 1) {
+		block_info.address = get_xgmi_relative_phy_addr(adev,
+								block_info.address);
+	}
+
 	switch (info->head.block) {
 	case AMDGPU_RAS_BLOCK__GFX:
 		if (adev->gfx.funcs->ras_error_inject)
@@ -1314,6 +1339,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
 	data = con->eh_data;
 	if (!data || data->count == 0) {
 		*bps = NULL;
+		ret = -EINVAL;
 		goto out;
 	}
 
@@ -1347,7 +1373,8 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
 	struct amdgpu_ras *ras =
 		container_of(work, struct amdgpu_ras, recovery_work);
 
-	amdgpu_device_gpu_recover(ras->adev, 0);
+	if (amdgpu_device_should_recover_gpu(ras->adev))
+		amdgpu_device_gpu_recover(ras->adev, 0);
 	atomic_set(&ras->in_recovery, 0);
 }
 
@@ -1687,7 +1714,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
 	*supported = 0;
 
 	if (amdgpu_sriov_vf(adev) ||
-			adev->asic_type != CHIP_VEGA20)
+	    (adev->asic_type != CHIP_VEGA20 &&
+	     adev->asic_type != CHIP_ARCTURUS))
 		return;
 
 	if (adev->is_atom_fw &&
@@ -1872,7 +1900,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
 		 * See feature_enable_on_boot
 		 */
 		amdgpu_ras_disable_all_features(adev, 1);
-		amdgpu_ras_reset_gpu(adev, 0);
+		amdgpu_ras_reset_gpu(adev);
 	}
 }
 
@@ -1935,6 +1963,6 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
 		DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
 
-		amdgpu_ras_reset_gpu(adev, false);
+		amdgpu_ras_reset_gpu(adev);
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index f80fd34..a5fe29a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -494,8 +494,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
 
 int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
 
-static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
-		bool is_baco)
+static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
 {
 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
@@ -611,6 +610,9 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
 		struct ras_dispatch_if *info);
 
+struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+		struct ras_common_if *head);
+
 extern atomic_t amdgpu_ras_in_intr;
 
 static inline bool amdgpu_ras_intr_triggered(void)
@@ -618,6 +620,11 @@ static inline bool amdgpu_ras_intr_triggered(void)
 	return !!atomic_read(&amdgpu_ras_in_intr);
 }
 
+static inline void amdgpu_ras_intr_cleared(void)
+{
+	atomic_set(&amdgpu_ras_in_intr, 0);
+}
+
 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 6010999..a2ee30b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -160,7 +160,7 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
 		struct amdgpu_iv_entry *entry)
 {
 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-	amdgpu_ras_reset_gpu(adev, 0);
+	amdgpu_ras_reset_gpu(adev);
 
 	return AMDGPU_RAS_SUCCESS;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 761ff8b..4853352 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,8 +50,18 @@ struct amdgpu_sdma_instance {
 	bool			burst_nop;
 };
 
+struct amdgpu_sdma_ras_funcs {
+	int (*ras_late_init)(struct amdgpu_device *adev,
+			void *ras_ih_info);
+	void (*ras_fini)(struct amdgpu_device *adev);
+	int (*query_ras_error_count)(struct amdgpu_device *adev,
+			uint32_t instance, void *ras_error_status);
+};
+
 struct amdgpu_sdma {
 	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+	struct drm_gpu_scheduler    *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
+	uint32_t		    num_sdma_sched;
 	struct amdgpu_irq_src	trap_irq;
 	struct amdgpu_irq_src	illegal_inst_irq;
 	struct amdgpu_irq_src	ecc_irq;
@@ -59,6 +69,7 @@ struct amdgpu_sdma {
 	uint32_t                    srbm_soft_reset;
 	bool			has_page_queue;
 	struct ras_common_if	*ras_if;
+	const struct amdgpu_sdma_ras_funcs	*funcs;
 };
 
 /*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 95e5e93..a09b6b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -129,7 +129,8 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
  * Tries to add the fence to an existing hash entry. Returns true when an entry
  * was found, false otherwise.
  */
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
+				  bool explicit)
 {
 	struct amdgpu_sync_entry *e;
 
@@ -151,19 +152,18 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
  * amdgpu_sync_fence - remember to sync to this fence
  *
  * @sync: sync object to add fence to
- * @fence: fence to sync to
+ * @f: fence to sync to
+ * @explicit: if this is an explicit dependency
  *
+ * Add the fence to the sync object.
  */
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct dma_fence *f, bool explicit)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+		      bool explicit)
 {
 	struct amdgpu_sync_entry *e;
 
 	if (!f)
 		return 0;
-	if (amdgpu_sync_same_dev(adev, f) &&
-	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
-		amdgpu_sync_keep_later(&sync->last_vm_update, f);
 
 	if (amdgpu_sync_add_later(sync, f, explicit))
 		return 0;
@@ -180,6 +180,24 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 }
 
 /**
+ * amdgpu_sync_vm_fence - remember to sync to this VM fence
+ *
+ * @adev: amdgpu device
+ * @sync: sync object to add fence to
+ * @fence: the VM fence to add
+ *
+ * Add the fence to the sync object and remember it as VM update.
+ */
+int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
+{
+	if (!fence)
+		return 0;
+
+	amdgpu_sync_keep_later(&sync->last_vm_update, fence);
+	return amdgpu_sync_fence(sync, fence, false);
+}
+
+/**
  * amdgpu_sync_resv - sync to a reservation object
  *
  * @sync: sync object to add fences from reservation object to
@@ -204,7 +222,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 
 	/* always sync to the exclusive fence */
 	f = dma_resv_get_excl(resv);
-	r = amdgpu_sync_fence(adev, sync, f, false);
+	r = amdgpu_sync_fence(sync, f, false);
 
 	flist = dma_resv_get_list(resv);
 	if (!flist || r)
@@ -222,13 +240,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 			continue;
 
 		if (amdgpu_sync_same_dev(adev, f)) {
-			/* VM updates are only interesting
-			 * for other VM updates and moves.
+			/* VM updates only sync with moves but not with user
+			 * command submissions or KFD evictions fences
 			 */
-			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
-			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
-			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
-			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+			if (owner == AMDGPU_FENCE_OWNER_VM &&
+			    fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
 				continue;
 
 			/* Ignore fence from the same owner and explicit one as
@@ -239,7 +255,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 				continue;
 		}
 
-		r = amdgpu_sync_fence(adev, sync, f, false);
+		r = amdgpu_sync_fence(sync, f, false);
 		if (r)
 			break;
 	}
@@ -340,7 +356,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
 	hash_for_each_safe(source->fences, i, tmp, e, node) {
 		f = e->fence;
 		if (!dma_fence_is_signaled(f)) {
-			r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
+			r = amdgpu_sync_fence(clone, f, e->explicit);
 			if (r)
 				return r;
 		} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index b5f1778..d62c2b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -40,8 +40,9 @@ struct amdgpu_sync {
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct dma_fence *f, bool explicit);
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+		      bool explicit);
+int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
 		     struct dma_resv *resv,
@@ -49,7 +50,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     bool explicit_sync);
 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 				     struct amdgpu_ring *ring);
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
+					bool *explicit);
 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2616e2e..dee4462 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -41,6 +41,7 @@
 #include <linux/swap.h>
 #include <linux/swiotlb.h>
 #include <linux/dma-buf.h>
+#include <linux/sizes.h>
 
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
@@ -1522,11 +1523,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	struct dma_fence *f;
 	int i;
 
-	/* Don't evict VM page tables while they are busy, otherwise we can't
-	 * cleanly handle page faults.
-	 */
 	if (bo->type == ttm_bo_type_kernel &&
-	    !dma_resv_test_signaled_rcu(bo->base.resv, true))
+	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
 		return false;
 
 	/* If bo is a KFD BO, check if the bo belongs to the current process.
@@ -1717,12 +1715,17 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
 	amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
 	ctx->c2p_bo = NULL;
 
-	amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
-	ctx->p2c_bo = NULL;
-
 	return 0;
 }
 
+static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size)
+{
+       if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) )
+               vram_size -= SZ_1M;
+
+       return ALIGN(vram_size, SZ_1M);
+}
+
 /**
  * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
  *
@@ -1741,7 +1744,7 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
 		return 0;
 	}
 
-	ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
+	ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size);
 	ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
 	ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
 
@@ -1751,17 +1754,6 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
 		  ctx->c2p_train_data_offset);
 
 	ret = amdgpu_bo_create_kernel_at(adev,
-					 ctx->p2c_train_data_offset,
-					 ctx->train_data_size,
-					 AMDGPU_GEM_DOMAIN_VRAM,
-					 &ctx->p2c_bo,
-					 NULL);
-	if (ret) {
-		DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
-		goto Err_out;
-	}
-
-	ret = amdgpu_bo_create_kernel_at(adev,
 					 ctx->c2p_train_data_offset,
 					 ctx->train_data_size,
 					 AMDGPU_GEM_DOMAIN_VRAM,
@@ -1769,15 +1761,12 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
 					 NULL);
 	if (ret) {
 		DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
-		goto Err_out;
+		amdgpu_ttm_training_reserve_vram_fini(adev);
+		return ret;
 	}
 
 	ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
 	return 0;
-
-Err_out:
-	amdgpu_ttm_training_reserve_vram_fini(adev);
-	return ret;
 }
 
 /**
@@ -1990,11 +1979,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 
 	if (enable) {
 		struct amdgpu_ring *ring;
-		struct drm_sched_rq *rq;
+		struct drm_gpu_scheduler *sched;
 
 		ring = adev->mman.buffer_funcs_ring;
-		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-		r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+		sched = &ring->sched;
+		r = drm_sched_entity_init(&adev->mman.entity,
+				          DRM_SCHED_PRIORITY_KERNEL, &sched,
+					  1, NULL);
 		if (r) {
 			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
 				  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 833fc4b..9ef3124 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -447,6 +447,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
 	const struct common_firmware_header *header = NULL;
 	const struct gfx_firmware_header_v1_0 *cp_hdr = NULL;
 	const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
+	const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
 
 	if (NULL == ucode->fw)
 		return 0;
@@ -460,6 +461,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
 	header = (const struct common_firmware_header *)ucode->fw->data;
 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
 	dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data;
+	dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
 
 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
 	    (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
@@ -470,7 +472,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
 	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
 	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
 		 ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
-		 ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) {
+		 ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
+		 ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
 		ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
 
 		memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
@@ -506,6 +509,12 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
 					      le32_to_cpu(header->ucode_array_offset_bytes) +
 					      le32_to_cpu(dmcu_hdr->intv_offset_bytes)),
 		       ucode->ucode_size);
+	} else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) {
+		ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
+		memcpy(ucode->kaddr,
+		       (void *)((uint8_t *)ucode->fw->data +
+				le32_to_cpu(header->ucode_array_offset_bytes)),
+		       ucode->ucode_size);
 	} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
 		ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
 		memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 914acec..b0e6564 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -251,6 +251,13 @@ struct dmcu_firmware_header_v1_0 {
 	uint32_t intv_size_bytes;  /* size of interrupt vectors, in bytes */
 };
 
+/* version_major=1, version_minor=0 */
+struct dmcub_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t inst_const_bytes; /* size of instruction region, in bytes */
+	uint32_t bss_data_bytes; /* size of bss/data region, in bytes */
+};
+
 /* header is fixed size */
 union amdgpu_firmware_header {
 	struct common_firmware_header common;
@@ -268,6 +275,7 @@ union amdgpu_firmware_header {
 	struct sdma_firmware_header_v1_1 sdma_v1_1;
 	struct gpu_info_firmware_header_v1_0 gpu_info;
 	struct dmcu_firmware_header_v1_0 dmcu;
+	struct dmcub_firmware_header_v1_0 dmcub;
 	uint8_t raw[0x100];
 };
 
@@ -307,6 +315,7 @@ enum AMDGPU_UCODE_ID {
 	AMDGPU_UCODE_ID_DMCU_INTV,
 	AMDGPU_UCODE_ID_VCN0_RAM,
 	AMDGPU_UCODE_ID_VCN1_RAM,
+	AMDGPU_UCODE_ID_DMCUB,
 	AMDGPU_UCODE_ID_MAXIMUM,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index d4fb9cf..f4d4085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -95,13 +95,6 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
 {
 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 
-	/* When “Full RAS” is enabled, the per-IP interrupt sources should
-	 * be disabled and the driver should only look for the aggregated
-	 * interrupt via sync flood
-	 */
-	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
-		return AMDGPU_RAS_SUCCESS;
-
 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
 	if (adev->umc.funcs &&
 	    adev->umc.funcs->query_ras_error_count)
@@ -113,6 +106,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
 		err_data->err_addr =
 			kcalloc(adev->umc.max_ras_err_cnt_per_query,
 				sizeof(struct eeprom_table_record), GFP_KERNEL);
+
 		/* still call query_ras_error_address to clear error status
 		 * even NOMEM error is encountered
 		 */
@@ -132,7 +126,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
 						err_data->err_addr_cnt))
 			DRM_WARN("Failed to add ras bad page!\n");
 
-		amdgpu_ras_reset_gpu(adev, 0);
+		amdgpu_ras_reset_gpu(adev);
 	}
 
 	kfree(err_data->err_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 3283032..a615a1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -21,38 +21,6 @@
 #ifndef __AMDGPU_UMC_H__
 #define __AMDGPU_UMC_H__
 
-/* implement 64 bits REG operations via 32 bits interface */
-#define RREG64_UMC(reg)	(RREG32(reg) | \
-				((uint64_t)RREG32((reg) + 1) << 32))
-#define WREG64_UMC(reg, v)	\
-	do {	\
-		WREG32((reg), lower_32_bits(v));	\
-		WREG32((reg) + 1, upper_32_bits(v));	\
-	} while (0)
-
-/*
- * void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data,
- *				uint32_t umc_reg_offset, uint32_t channel_index)
- */
-#define amdgpu_umc_for_each_channel(func)	\
-	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;	\
-	uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index;	\
-	for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) {	\
-		/* enable the index mode to query eror count per channel */	\
-		adev->umc.funcs->enable_umc_index_mode(adev, umc_inst);	\
-		for (channel_inst = 0;	\
-			channel_inst < adev->umc.channel_inst_num;	\
-			channel_inst++) {	\
-			/* calc the register offset according to channel instance */	\
-			umc_reg_offset = adev->umc.channel_offs * channel_inst;	\
-			/* get channel index of interleaved memory */	\
-			channel_index = adev->umc.channel_idx_tbl[	\
-				umc_inst * adev->umc.channel_inst_num + channel_inst];	\
-			(func)(adev, err_data, umc_reg_offset, channel_index);	\
-		}	\
-	}	\
-	adev->umc.funcs->disable_umc_index_mode(adev);
-
 struct amdgpu_umc_funcs {
 	void (*err_cnt_init)(struct amdgpu_device *adev);
 	int (*ras_late_init)(struct amdgpu_device *adev);
@@ -60,9 +28,6 @@ struct amdgpu_umc_funcs {
 					void *ras_error_status);
 	void (*query_ras_error_address)(struct amdgpu_device *adev,
 					void *ras_error_status);
-	void (*enable_umc_index_mode)(struct amdgpu_device *adev,
-					uint32_t umc_instance);
-	void (*disable_umc_index_mode)(struct amdgpu_device *adev);
 	void (*init_registers)(struct amdgpu_device *adev);
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe..a92f3b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring;
-	struct drm_sched_rq *rq;
+	struct drm_gpu_scheduler *sched;
 	int r;
 
 	ring = &adev->uvd.inst[0].ring;
-	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+	sched = &ring->sched;
+	r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+				  &sched, 1, NULL);
 	if (r) {
 		DRM_ERROR("Failed setting up UVD kernel entity.\n");
 		return r;
@@ -349,6 +350,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 	unsigned size;
 	void *ptr;
 	int i, j;
+	bool in_ras_intr = amdgpu_ras_intr_triggered();
 
 	cancel_delayed_work_sync(&adev->uvd.idle_work);
 
@@ -376,13 +378,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 			return -ENOMEM;
 
 		/* re-write 0 since err_event_athub will corrupt VCPU buffer */
-		if (amdgpu_ras_intr_triggered()) {
-			DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+		if (in_ras_intr)
 			memset(adev->uvd.inst[j].saved_bo, 0, size);
-		} else {
+		else
 			memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
-		}
 	}
+
+	if (in_ras_intr)
+		DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590a..59ddba1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring;
-	struct drm_sched_rq *rq;
+	struct drm_gpu_scheduler *sched;
 	int r;
 
 	ring = &adev->vce.ring[0];
-	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+	sched = &ring->sched;
+	r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
+				  &sched, 1, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up VCE run queue.\n");
 		return r;
@@ -651,7 +652,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
 
 	if ((addr + (uint64_t)size) >
 	    (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
-		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
+		DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
 			  addr, lo, hi);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 9d87044..f96464e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -28,19 +28,10 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 
-#include <drm/drm.h>
-
 #include "amdgpu.h"
 #include "amdgpu_pm.h"
 #include "amdgpu_vcn.h"
 #include "soc15d.h"
-#include "soc15_common.h"
-
-#include "vcn/vcn_1_0_offset.h"
-#include "vcn/vcn_1_0_sh_mask.h"
-
-/* 1 second timeout */
-#define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
 
 /* Firmware Names */
 #define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
@@ -84,6 +75,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 		break;
 	case CHIP_ARCTURUS:
 		fw_name = FIRMWARE_ARCTURUS;
+		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+			adev->vcn.indirect_sram = true;
 		break;
 	case CHIP_RENOIR:
 		fw_name = FIRMWARE_RENOIR;
@@ -174,15 +168,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 			return r;
 		}
-	}
 
-	if (adev->vcn.indirect_sram) {
-		r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
-			    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
-			    &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
-		if (r) {
-			dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
-			return r;
+		if (adev->vcn.indirect_sram) {
+			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
+					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
+			if (r) {
+				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
+				return r;
+			}
 		}
 	}
 
@@ -195,15 +189,14 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 
 	cancel_delayed_work_sync(&adev->vcn.idle_work);
 
-	if (adev->vcn.indirect_sram) {
-		amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
-				      &adev->vcn.dpg_sram_gpu_addr,
-				      (void **)&adev->vcn.dpg_sram_cpu_addr);
-	}
-
 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 		if (adev->vcn.harvest_config & (1 << j))
 			continue;
+		if (adev->vcn.indirect_sram) {
+			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
+						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
+						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+		}
 		kvfree(adev->vcn.inst[j].saved_bo);
 
 		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
@@ -214,8 +207,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 
 		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
-
-		amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg);
 	}
 
 	release_firmware(adev->vcn.fw);
@@ -296,6 +287,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 		if (adev->vcn.harvest_config & (1 << j))
 			continue;
+
 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 		}
@@ -308,26 +300,17 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 			else
 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-			if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg))
-				new_state.jpeg = VCN_DPG_STATE__PAUSE;
-			else
-				new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
-			adev->vcn.pause_dpg_mode(adev, &new_state);
+			adev->vcn.pause_dpg_mode(adev, j, &new_state);
 		}
 
-		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg);
 		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
 		fences += fence[j];
 	}
 
 	if (fences == 0) {
 		amdgpu_gfx_off_ctrl(adev, true);
-		if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
-			amdgpu_dpm_enable_uvd(adev, false);
-		else
-			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
-							       AMD_PG_STATE_GATE);
+		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+		       AMD_PG_STATE_GATE);
 	} else {
 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 	}
@@ -340,11 +323,8 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 
 	if (set_clocks) {
 		amdgpu_gfx_off_ctrl(adev, false);
-		if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
-			amdgpu_dpm_enable_uvd(adev, true);
-		else
-			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
-							       AMD_PG_STATE_UNGATE);
+		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+		       AMD_PG_STATE_UNGATE);
 	}
 
 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
@@ -360,17 +340,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 		else
 			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-		if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg))
-			new_state.jpeg = VCN_DPG_STATE__PAUSE;
-		else
-			new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
-		else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
-			new_state.jpeg = VCN_DPG_STATE__PAUSE;
 
-		adev->vcn.pause_dpg_mode(adev, &new_state);
+		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 	}
 }
 
@@ -520,9 +493,14 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 
 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
+	struct amdgpu_device *adev = ring->adev;
 	struct dma_fence *fence;
 	long r;
 
+	/* temporarily disable ib test for sriov */
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
 	if (r)
 		goto error;
@@ -678,10 +656,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
+	struct amdgpu_device *adev = ring->adev;
 	struct dma_fence *fence = NULL;
 	struct amdgpu_bo *bo = NULL;
 	long r;
 
+	/* temporarily disable ib test for sriov */
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
 	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 				      AMDGPU_GEM_DOMAIN_VRAM,
 				      &bo, NULL, NULL);
@@ -708,108 +691,3 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	amdgpu_bo_unref(&bo);
 	return r;
 }
-
-int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-	uint32_t tmp = 0;
-	unsigned i;
-	int r;
-
-	WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
-	r = amdgpu_ring_alloc(ring, 3);
-	if (r)
-		return r;
-
-	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
-	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_commit(ring);
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
-		if (tmp == 0xDEADBEEF)
-			break;
-		udelay(1);
-	}
-
-	if (i >= adev->usec_timeout)
-		r = -ETIMEDOUT;
-
-	return r;
-}
-
-static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
-		struct dma_fence **fence)
-{
-	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_job *job;
-	struct amdgpu_ib *ib;
-	struct dma_fence *f = NULL;
-	const unsigned ib_size_dw = 16;
-	int i, r;
-
-	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
-	if (r)
-		return r;
-
-	ib = &job->ibs[0];
-
-	ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
-	ib->ptr[1] = 0xDEADBEEF;
-	for (i = 2; i < 16; i += 2) {
-		ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
-		ib->ptr[i+1] = 0;
-	}
-	ib->length_dw = 16;
-
-	r = amdgpu_job_submit_direct(job, ring, &f);
-	if (r)
-		goto err;
-
-	if (fence)
-		*fence = dma_fence_get(f);
-	dma_fence_put(f);
-
-	return 0;
-
-err:
-	amdgpu_job_free(job);
-	return r;
-}
-
-int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
-{
-	struct amdgpu_device *adev = ring->adev;
-	uint32_t tmp = 0;
-	unsigned i;
-	struct dma_fence *fence = NULL;
-	long r = 0;
-
-	r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
-	if (r)
-		goto error;
-
-	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		r = -ETIMEDOUT;
-		goto error;
-	} else if (r < 0) {
-		goto error;
-	} else {
-		r = 0;
-	}
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
-		if (tmp == 0xDEADBEEF)
-			break;
-		udelay(1);
-	}
-
-	if (i >= adev->usec_timeout)
-		r = -ETIMEDOUT;
-
-	dma_fence_put(fence);
-error:
-	return r;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index dface27..d6deb0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -31,6 +31,7 @@
 #define AMDGPU_VCN_MAX_ENC_RINGS	3
 
 #define AMDGPU_MAX_VCN_INSTANCES	2
+#define AMDGPU_MAX_VCN_ENC_RINGS  AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
 
 #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
 #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
@@ -56,33 +57,41 @@
 #define VCN_VID_IP_ADDRESS_2_0		0x0
 #define VCN_AON_IP_ADDRESS_2_0		0x30000
 
-#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) 				\
-	({	WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); 			\
-		WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, 				\
+#define mmUVD_RBC_XX_IB_REG_CHECK 					0x026b
+#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 				1
+#define mmUVD_REG_XX_MASK 						0x026c
+#define mmUVD_REG_XX_MASK_BASE_IDX 					1
+
+/* 1 second timeout */
+#define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
+
+#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) 			\
+	({	WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); 			\
+		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, 				\
 			UVD_DPG_LMA_CTL__MASK_EN_MASK | 				\
-			((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) 	\
+			((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) 	\
 			<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | 			\
 			(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); 		\
-		RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); 				\
+		RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); 			\
 	})
 
-#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) 			\
+#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) 		\
 	do { 										\
-		WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); 			\
-		WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); 			\
-		WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, 				\
+		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); 			\
+		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); 			\
+		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, 				\
 			UVD_DPG_LMA_CTL__READ_WRITE_MASK | 				\
-			((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) 	\
+			((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) 	\
 			<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | 			\
 			(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); 		\
 	} while (0)
 
-#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) 						\
+#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) 						\
 	({											\
 		uint32_t internal_reg_offset, addr;						\
 		bool video_range, aon_range;							\
 												\
-		addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg);		\
+		addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg);		\
 		addr <<= 2; 									\
 		video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && 		\
 				((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600)))));	\
@@ -100,27 +109,27 @@
 		internal_reg_offset >>= 2;							\
 	})
 
-#define RREG32_SOC15_DPG_MODE_2_0(offset, mask_en) 						\
-	({ 											\
-		WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, 					\
-			(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | 				\
-			mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | 				\
-			offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); 			\
-		RREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA); 					\
+#define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) 					\
+	({											\
+		WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, 					\
+			(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |				\
+			mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |				\
+			offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));			\
+		RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA);				\
 	})
 
-#define WREG32_SOC15_DPG_MODE_2_0(offset, value, mask_en, indirect)				\
-	do { 											\
-		if (!indirect) { 								\
-			WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA, value); 			\
-			WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, 				\
-				(0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | 			\
-				 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | 			\
-				 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); 		\
-		} else { 									\
-			*adev->vcn.dpg_sram_curr_addr++ = offset; 				\
-			*adev->vcn.dpg_sram_curr_addr++ = value; 				\
-		} 										\
+#define WREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, value, mask_en, indirect)			\
+	do {											\
+		if (!indirect) {								\
+			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value);			\
+			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, 				\
+				(0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |			\
+				 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |			\
+				 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));		\
+		} else {									\
+			*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset;		\
+			*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value;			\
+		}										\
 	} while (0)
 
 enum engine_status_constants {
@@ -158,7 +167,6 @@ struct amdgpu_vcn_reg{
 	unsigned	ib_size;
 	unsigned	gp_scratch8;
 	unsigned	scratch9;
-	unsigned	jpeg_pitch;
 };
 
 struct amdgpu_vcn_inst {
@@ -168,9 +176,12 @@ struct amdgpu_vcn_inst {
 	void			*saved_bo;
 	struct amdgpu_ring	ring_dec;
 	struct amdgpu_ring	ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
-	struct amdgpu_ring	ring_jpeg;
 	struct amdgpu_irq_src	irq;
 	struct amdgpu_vcn_reg	external;
+	struct amdgpu_bo	*dpg_sram_bo;
+	void			*dpg_sram_cpu_addr;
+	uint64_t		dpg_sram_gpu_addr;
+	uint32_t		*dpg_sram_curr_addr;
 };
 
 struct amdgpu_vcn {
@@ -182,18 +193,18 @@ struct amdgpu_vcn {
 	struct dpg_pause_state pause_state;
 
 	bool			indirect_sram;
-	struct amdgpu_bo	*dpg_sram_bo;
-	void			*dpg_sram_cpu_addr;
-	uint64_t		dpg_sram_gpu_addr;
-	uint32_t		*dpg_sram_curr_addr;
 
 	uint8_t	num_vcn_inst;
-	struct amdgpu_vcn_inst	inst[AMDGPU_MAX_VCN_INSTANCES];
-	struct amdgpu_vcn_reg	internal;
+	struct amdgpu_vcn_inst	 inst[AMDGPU_MAX_VCN_INSTANCES];
+	struct amdgpu_vcn_reg	 internal;
+	struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
+	struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
+	uint32_t		 num_vcn_enc_sched;
+	uint32_t		 num_vcn_dec_sched;
 
 	unsigned	harvest_config;
 	int (*pause_dpg_mode)(struct amdgpu_device *adev,
-		struct dpg_pause_state *new_state);
+		int inst_idx, struct dpg_pause_state *new_state);
 };
 
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
@@ -209,7 +220,4 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 
-int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
-int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
-
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e32ae90..adc813c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -45,98 +45,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 	adev->pg_flags = 0;
 }
 
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
-{
-	signed long r, cnt = 0;
-	unsigned long flags;
-	uint32_t seq;
-	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-	struct amdgpu_ring *ring = &kiq->ring;
-
-	BUG_ON(!ring->funcs->emit_rreg);
-
-	spin_lock_irqsave(&kiq->ring_lock, flags);
-	amdgpu_ring_alloc(ring, 32);
-	amdgpu_ring_emit_rreg(ring, reg);
-	amdgpu_fence_emit_polling(ring, &seq);
-	amdgpu_ring_commit(ring);
-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-	/* don't wait anymore for gpu reset case because this way may
-	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
-	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
-	 * never return if we keep waiting in virt_kiq_rreg, which cause
-	 * gpu_recover() hang there.
-	 *
-	 * also don't wait anymore for IRQ context
-	 * */
-	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
-		goto failed_kiq_read;
-
-	might_sleep();
-	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-	}
-
-	if (cnt > MAX_KIQ_REG_TRY)
-		goto failed_kiq_read;
-
-	return adev->wb.wb[adev->virt.reg_val_offs];
-
-failed_kiq_read:
-	pr_err("failed to read reg:%x\n", reg);
-	return ~0;
-}
-
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
-{
-	signed long r, cnt = 0;
-	unsigned long flags;
-	uint32_t seq;
-	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-	struct amdgpu_ring *ring = &kiq->ring;
-
-	BUG_ON(!ring->funcs->emit_wreg);
-
-	spin_lock_irqsave(&kiq->ring_lock, flags);
-	amdgpu_ring_alloc(ring, 32);
-	amdgpu_ring_emit_wreg(ring, reg, v);
-	amdgpu_fence_emit_polling(ring, &seq);
-	amdgpu_ring_commit(ring);
-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-	/* don't wait anymore for gpu reset case because this way may
-	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
-	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
-	 * never return if we keep waiting in virt_kiq_rreg, which cause
-	 * gpu_recover() hang there.
-	 *
-	 * also don't wait anymore for IRQ context
-	 * */
-	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
-		goto failed_kiq_write;
-
-	might_sleep();
-	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-
-		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-	}
-
-	if (cnt > MAX_KIQ_REG_TRY)
-		goto failed_kiq_write;
-
-	return;
-
-failed_kiq_write:
-	pr_err("failed to write reg:%x\n", reg);
-}
-
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 					uint32_t reg0, uint32_t reg1,
 					uint32_t ref, uint32_t mask)
@@ -379,54 +287,3 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 		}
 	}
 }
-
-static uint32_t parse_clk(char *buf, bool min)
-{
-        char *ptr = buf;
-        uint32_t clk = 0;
-
-        do {
-                ptr = strchr(ptr, ':');
-                if (!ptr)
-                        break;
-                ptr+=2;
-		if (kstrtou32(ptr, 10, &clk))
-			return 0;
-        } while (!min);
-
-        return clk * 100;
-}
-
-uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
-{
-	char *buf = NULL;
-	uint32_t clk = 0;
-
-	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
-	clk = parse_clk(buf, lowest);
-
-	kfree(buf);
-
-	return clk;
-}
-
-uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
-{
-	char *buf = NULL;
-	uint32_t clk = 0;
-
-	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
-	clk = parse_clk(buf, lowest);
-
-	kfree(buf);
-
-	return clk;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b0b2bdc..daaf909 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,8 +57,6 @@ struct amdgpu_virt_ops {
 	int (*reset_gpu)(struct amdgpu_device *adev);
 	int (*wait_reset)(struct amdgpu_device *adev);
 	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
-	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
-	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
 };
 
 /*
@@ -85,8 +83,8 @@ enum AMDGIM_FEATURE_FLAG {
 	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
 	/* VRAM LOST by GIM */
 	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
-	/* HW PERF SIM in GIM */
-	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
+	/* PP ONE VF MODE in GIM */
+	AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
 };
 
 struct amd_sriov_msg_pf2vf_info_header {
@@ -257,8 +255,6 @@ struct amdgpu_virt {
 	struct amdgpu_vf_error_buffer   vf_errors;
 	struct amdgpu_virt_fw_reserve	fw_reserve;
 	uint32_t gim_feature;
-	/* protect DPM events to GIM */
-	struct mutex                    dpm_mutex;
 	uint32_t reg_access_mode;
 };
 
@@ -286,13 +282,11 @@ static inline bool is_virtual_machine(void)
 #endif
 }
 
-#define amdgim_is_hwperf(adev) \
-	((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+#define amdgpu_sriov_is_pp_one_vf(adev) \
+	((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
 
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 					uint32_t reg0, uint32_t rreg1,
 					uint32_t ref, uint32_t mask);
@@ -306,6 +300,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
 					unsigned int key,
 					unsigned int chksum);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
-uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 598c245..d16231d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -83,6 +83,32 @@ struct amdgpu_prt_cb {
 };
 
 /**
+ * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
+{
+	mutex_lock(&vm->eviction_lock);
+	vm->saved_flags = memalloc_nofs_save();
+}
+
+static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+{
+	if (mutex_trylock(&vm->eviction_lock)) {
+		vm->saved_flags = memalloc_nofs_save();
+		return 1;
+	}
+	return 0;
+}
+
+static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
+{
+	memalloc_nofs_restore(vm->saved_flags);
+	mutex_unlock(&vm->eviction_lock);
+}
+
+/**
  * amdgpu_vm_level_shift - return the addr shift for each level
  *
  * @adev: amdgpu_device pointer
@@ -562,8 +588,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 {
 	entry->priority = 0;
 	entry->tv.bo = &vm->root.base.bo->tbo;
-	/* One for the VM updates, one for TTM and one for the CS job */
-	entry->tv.num_shared = 3;
+	/* One for TTM and one for the CS job */
+	entry->tv.num_shared = 2;
 	entry->user_pages = NULL;
 	list_add(&entry->tv.head, validated);
 }
@@ -656,7 +682,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 			      void *param)
 {
 	struct amdgpu_vm_bo_base *bo_base, *tmp;
-	int r = 0;
+	int r;
 
 	vm->bulk_moveable &= list_empty(&vm->evicted);
 
@@ -665,7 +691,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 		r = validate(param, bo);
 		if (r)
-			break;
+			return r;
 
 		if (bo->tbo.type != ttm_bo_type_kernel) {
 			amdgpu_vm_bo_moved(bo_base);
@@ -678,7 +704,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		}
 	}
 
-	return r;
+	amdgpu_vm_eviction_lock(vm);
+	vm->evicting = false;
+	amdgpu_vm_eviction_unlock(vm);
+
+	return 0;
 }
 
 /**
@@ -1555,15 +1585,25 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	if (!(flags & AMDGPU_PTE_VALID))
 		owner = AMDGPU_FENCE_OWNER_KFD;
 
+	amdgpu_vm_eviction_lock(vm);
+	if (vm->evicting) {
+		r = -EBUSY;
+		goto error_unlock;
+	}
+
 	r = vm->update_funcs->prepare(&params, owner, exclusive);
 	if (r)
-		return r;
+		goto error_unlock;
 
 	r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
 	if (r)
-		return r;
+		goto error_unlock;
 
-	return vm->update_funcs->commit(&params, fence);
+	r = vm->update_funcs->commit(&params, fence);
+
+error_unlock:
+	amdgpu_vm_eviction_unlock(vm);
+	return r;
 }
 
 /**
@@ -2500,6 +2540,41 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 }
 
 /**
+ * amdgpu_vm_evictable - check if we can evict a VM
+ *
+ * @bo: A page table of the VM.
+ *
+ * Check if it is possible to evict a VM.
+ */
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
+{
+	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
+
+	/* Page tables of a destroyed VM can go away immediately */
+	if (!bo_base || !bo_base->vm)
+		return true;
+
+	/* Don't evict VM page tables while they are busy */
+	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+		return false;
+
+	/* Try to block ongoing updates */
+	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
+		return false;
+
+	/* Don't evict VM page tables while they are updated */
+	if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
+	    !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+		amdgpu_vm_eviction_unlock(bo_base->vm);
+		return false;
+	}
+
+	bo_base->vm->evicting = true;
+	amdgpu_vm_eviction_unlock(bo_base->vm);
+	return true;
+}
+
+/**
  * amdgpu_vm_bo_invalidate - mark the bo as invalid
  *
  * @adev: amdgpu_device pointer
@@ -2661,8 +2736,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
  */
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
-	return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
-						   true, true, timeout);
+	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+					    true, true, timeout);
+	if (timeout <= 0)
+		return timeout;
+
+	timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
+	if (timeout <= 0)
+		return timeout;
+
+	return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
 }
 
 /**
@@ -2696,18 +2779,22 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	spin_lock_init(&vm->invalidated_lock);
 	INIT_LIST_HEAD(&vm->freed);
 
+
 	/* create scheduler entities for page table updates */
-	r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
-				  adev->vm_manager.vm_pte_num_rqs, NULL);
+	r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+				  adev->vm_manager.vm_pte_scheds,
+				  adev->vm_manager.vm_pte_num_scheds, NULL);
 	if (r)
 		return r;
 
-	r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
-				  adev->vm_manager.vm_pte_num_rqs, NULL);
+	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
+				  adev->vm_manager.vm_pte_scheds,
+				  adev->vm_manager.vm_pte_num_scheds, NULL);
 	if (r)
 		goto error_free_direct;
 
 	vm->pte_support_ats = false;
+	vm->is_compute_context = false;
 
 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2730,6 +2817,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	else
 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
 	vm->last_update = NULL;
+	vm->last_direct = dma_fence_get_stub();
+	vm->last_delayed = dma_fence_get_stub();
+
+	mutex_init(&vm->eviction_lock);
+	vm->evicting = false;
 
 	amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
@@ -2780,6 +2872,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	vm->root.base.bo = NULL;
 
 error_free_delayed:
+	dma_fence_put(vm->last_direct);
+	dma_fence_put(vm->last_delayed);
 	drm_sched_entity_destroy(&vm->delayed);
 
 error_free_direct:
@@ -2893,6 +2987,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
 	dma_fence_put(vm->last_update);
 	vm->last_update = NULL;
+	vm->is_compute_context = true;
 
 	if (vm->pasid) {
 		unsigned long flags;
@@ -2947,6 +3042,7 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
 	}
 	vm->pasid = 0;
+	vm->is_compute_context = false;
 }
 
 /**
@@ -2978,6 +3074,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		vm->pasid = 0;
 	}
 
+	dma_fence_wait(vm->last_direct, false);
+	dma_fence_put(vm->last_direct);
+	dma_fence_wait(vm->last_delayed, false);
+	dma_fence_put(vm->last_delayed);
+
 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
 			amdgpu_vm_prt_fini(adev, vm);
@@ -3194,11 +3295,20 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
 		AMDGPU_PTE_SYSTEM;
 
-	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
+	if (vm->is_compute_context) {
+		/* Intentionally setting invalid PTE flag
+		 * combination to force a no-retry-fault
+		 */
+		flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
+			AMDGPU_PTE_TF;
+		value = 0;
+
+	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
 		/* Redirect the access to the dummy page */
 		value = adev->dummy_page_addr;
 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
 			AMDGPU_PTE_WRITEABLE;
+
 	} else {
 		/* Let the hw retry silently on the PTE */
 		value = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 4dbbe1b..b4640ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -30,6 +30,7 @@
 #include <drm/gpu_scheduler.h>
 #include <drm/drm_file.h>
 #include <drm/ttm/ttm_bo_driver.h>
+#include <linux/sched/mm.h>
 
 #include "amdgpu_sync.h"
 #include "amdgpu_ring.h"
@@ -239,6 +240,13 @@ struct amdgpu_vm {
 	/* tree of virtual addresses mapped */
 	struct rb_root_cached	va;
 
+	/* Lock to prevent eviction while we are updating page tables
+	 * use vm_eviction_lock/unlock(vm)
+	 */
+	struct mutex		eviction_lock;
+	bool			evicting;
+	unsigned int		saved_flags;
+
 	/* BOs who needs a validation */
 	struct list_head	evicted;
 
@@ -266,6 +274,10 @@ struct amdgpu_vm {
 	struct drm_sched_entity	direct;
 	struct drm_sched_entity	delayed;
 
+	/* Last submission to the scheduler entities */
+	struct dma_fence	*last_direct;
+	struct dma_fence	*last_delayed;
+
 	unsigned int		pasid;
 	/* dedicated to vm */
 	struct amdgpu_vmid	*reserved_vmid[AMDGPU_MAX_VMHUBS];
@@ -298,6 +310,8 @@ struct amdgpu_vm {
 	struct ttm_lru_bulk_move lru_bulk_move;
 	/* mark whether can do the bulk move */
 	bool			bulk_moveable;
+	/* Flag to indicate if VM is used for compute */
+	bool			is_compute_context;
 };
 
 struct amdgpu_vm_manager {
@@ -317,8 +331,8 @@ struct amdgpu_vm_manager {
 	u64					vram_base_offset;
 	/* vm pte handling */
 	const struct amdgpu_vm_pte_funcs	*vm_pte_funcs;
-	struct drm_sched_rq			*vm_pte_rqs[AMDGPU_MAX_RINGS];
-	unsigned				vm_pte_num_rqs;
+	struct drm_gpu_scheduler		*vm_pte_scheds[AMDGPU_MAX_RINGS];
+	unsigned				vm_pte_num_scheds;
 	struct amdgpu_ring			*page_fault;
 
 	/* partial resident texture handling */
@@ -376,6 +390,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			struct amdgpu_bo_va *bo_va,
 			bool clear);
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 			     struct amdgpu_bo *bo, bool evicted);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 832db59..19b7f80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -71,7 +71,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
 	p->num_dw_left = ndw;
 
 	/* Wait for moves to be completed */
-	r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
+	r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
 	if (r)
 		return r;
 
@@ -95,11 +95,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
 				 struct dma_fence **fence)
 {
-	struct amdgpu_bo *root = p->vm->root.base.bo;
 	struct amdgpu_ib *ib = p->job->ibs;
 	struct drm_sched_entity *entity;
+	struct dma_fence *f, *tmp;
 	struct amdgpu_ring *ring;
-	struct dma_fence *f;
 	int r;
 
 	entity = p->direct ? &p->vm->direct : &p->vm->delayed;
@@ -112,7 +111,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
 	if (r)
 		goto error;
 
-	amdgpu_bo_fence(root, f, true);
+	tmp = dma_fence_get(f);
+	if (p->direct)
+		swap(p->vm->last_direct, tmp);
+	else
+		swap(p->vm->last_delayed, tmp);
+	dma_fence_put(tmp);
+
 	if (fence && !p->direct)
 		swap(*fence, f);
 	dma_fence_put(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 61d13d8..a97af42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -146,16 +146,16 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
 	ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
 	ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
 
-	fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in);
+	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
 	if (fica_out != 0x1f)
 		pr_err("xGMI error counters not enabled!\n");
 
-	fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in);
+	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
 
 	if ((fica_out & 0xffff) == 2)
 		error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
 
-	adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
+	adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
 }
@@ -261,6 +261,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
 	INIT_LIST_HEAD(&tmp->device_list);
 	mutex_init(&tmp->hive_lock);
 	mutex_init(&tmp->reset_lock);
+	task_barrier_init(&tmp->tb);
 
 	if (lock)
 		mutex_lock(&tmp->hive_lock);
@@ -290,13 +291,7 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
 
 	dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
 
-	if (is_support_sw_smu_xgmi(adev))
-		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
-	else if (adev->powerplay.pp_funcs &&
-		 adev->powerplay.pp_funcs->set_xgmi_pstate)
-		ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
-								pstate);
-
+	ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
 	if (ret) {
 		dev_err(adev->dev,
 			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
@@ -408,6 +403,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 	top_info->num_nodes = count;
 	hive->number_devices = count;
 
+	task_barrier_add_task(&hive->tb);
+
 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
 			/* update node list for other device in the hive */
@@ -470,6 +467,7 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
 		mutex_destroy(&hive->hive_lock);
 		mutex_destroy(&hive->reset_lock);
 	} else {
+		task_barrier_rem_task(&hive->tb);
 		amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
 		mutex_unlock(&hive->hive_lock);
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index bbf504f..74011fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -22,6 +22,7 @@
 #ifndef __AMDGPU_XGMI_H__
 #define __AMDGPU_XGMI_H__
 
+#include <drm/task_barrier.h>
 #include "amdgpu_psp.h"
 
 struct amdgpu_hive_info {
@@ -33,6 +34,7 @@ struct amdgpu_hive_info {
 	struct device_attribute dev_attr;
 	struct amdgpu_device *adev;
 	int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+	struct task_barrier tb;
 };
 
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
index d9cc746..847ca9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -74,9 +74,9 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		athub_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		athub_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index ceb9aa4..921a69a 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -77,9 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		athub_v2_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		athub_v2_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 6858cde..ea702a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -361,7 +361,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
 			       struct drm_connector *connector)
 {
 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-	struct amdgpu_connector_atom_dig *dig_connector;
 	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
 	u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
 	u8 tmp;
@@ -369,8 +368,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
 	if (!amdgpu_connector->con_priv)
 		return panel_mode;
 
-	dig_connector = amdgpu_connector->con_priv;
-
 	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
 		/* DP bridge chips */
 		if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
@@ -713,7 +710,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
 	struct drm_device *dev = encoder->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-	struct amdgpu_encoder_atom_dig *dig;
 	struct amdgpu_connector *amdgpu_connector;
 	struct amdgpu_connector_atom_dig *dig_connector;
 	struct amdgpu_atombios_dp_link_train_info dp_info;
@@ -721,7 +717,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
 
 	if (!amdgpu_encoder->enc_priv)
 		return;
-	dig = amdgpu_encoder->enc_priv;
 
 	amdgpu_connector = to_amdgpu_connector(connector);
 	if (!amdgpu_connector->con_priv)
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index 980c363..b4cc7c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -76,11 +76,6 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
 		}
 		args.lpI2CDataOut = cpu_to_le16(out);
 	} else {
-		if (num > ATOM_MAX_HW_I2C_READ) {
-			DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
-			r = -EINVAL;
-			goto done;
-		}
 		args.ucRegIndex = 0;
 		args.lpI2CDataOut = 0;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 1befdee..006f21e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1310,6 +1310,17 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
 	return r;
 }
 
+static bool cik_asic_supports_baco(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+		return amdgpu_dpm_is_baco_supported(adev);
+	default:
+		return false;
+	}
+}
+
 static enum amd_reset_method
 cik_asic_reset_method(struct amdgpu_device *adev)
 {
@@ -1349,7 +1360,7 @@ static int cik_asic_reset(struct amdgpu_device *adev)
 	if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
 		if (!adev->in_suspend)
 			amdgpu_inc_vram_lost(adev);
-		r = smu7_asic_baco_reset(adev);
+		r = amdgpu_dpm_baco_reset(adev);
 	} else {
 		r = cik_asic_pci_config_reset(adev);
 	}
@@ -1927,6 +1938,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
 	.get_pcie_usage = &cik_get_pcie_usage,
 	.need_reset_on_init = &cik_need_reset_on_init,
 	.get_pcie_replay_count = &cik_get_pcie_replay_count,
+	.supports_baco = &cik_asic_supports_baco,
 };
 
 static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 9870bf2..f91ab4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -31,7 +31,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
 int cik_set_ip_blocks(struct amdgpu_device *adev);
 
 void legacy_doorbell_index_init(struct amdgpu_device *adev);
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
-int smu7_asic_baco_reset(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c45304f..580d3f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
 	u32 extra_bits = vmid & 0xf;
 
 	/* IB packet must end on a 8 DW boundary */
-	cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
+	cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
 
 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
@@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 	u32 pad_count;
 	int i;
 
-	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	pad_count = (-ib->length_dw) & 7;
 	for (i = 0; i < pad_count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
 			ib->ptr[ib->length_dw++] =
@@ -1372,16 +1372,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] =
+			&adev->sdma.instance[i].ring.sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index d622129..d6aca1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -31,6 +31,9 @@ static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
 
 static void df_v1_7_sw_init(struct amdgpu_device *adev)
 {
+	adev->df.hash_status.hash_64k = false;
+	adev->df.hash_status.hash_2m = false;
+	adev->df.hash_status.hash_1g = false;
 }
 
 static void df_v1_7_sw_fini(struct amdgpu_device *adev)
@@ -66,7 +69,7 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
 {
 	int fb_channel_number;
 
-	fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+	fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
 
 	return df_v1_7_channel_number[fb_channel_number];
 }
@@ -77,7 +80,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 	u32 tmp;
 
 	/* Put DF on broadcast mode */
-	adev->df_funcs->enable_broadcast_mode(adev, true);
+	adev->df.funcs->enable_broadcast_mode(adev, true);
 
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
 		tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
@@ -92,7 +95,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 	}
 
 	/* Exit boradcast mode */
-	adev->df_funcs->enable_broadcast_mode(adev, false);
+	adev->df.funcs->enable_broadcast_mode(adev, false);
 }
 
 static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebc..5a1bd8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -27,6 +27,9 @@
 #include "df/df_3_6_offset.h"
 #include "df/df_3_6_sh_mask.h"
 
+#define DF_3_6_SMN_REG_INST_DIST        0x8
+#define DF_3_6_INST_CNT                 8
+
 static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
 				       16, 32, 0, 0, 0, 2, 4, 8};
 
@@ -183,6 +186,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 }
 
+/* same as perfmon_wreg but return status on write value check */
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val)
+{
+	unsigned long flags, address, data;
+	uint32_t lo_val_rb, hi_val_rb;
+
+	address = adev->nbio.funcs->get_pcie_index_offset(adev);
+	data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(address, lo_addr);
+	WREG32(data, lo_val);
+	WREG32(address, hi_addr);
+	WREG32(data, hi_val);
+
+	WREG32(address, lo_addr);
+	lo_val_rb = RREG32(data);
+	WREG32(address, hi_addr);
+	hi_val_rb = RREG32(data);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+	if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+		return -EBUSY;
+
+	return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT	1000
+#define ARM_RETRY_USEC_INTERVAL	100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val)
+{
+	int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+	while (countdown) {
+
+		if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+						     hi_addr, hi_val))
+			break;
+
+		countdown -= ARM_RETRY_USEC_INTERVAL;
+		udelay(ARM_RETRY_USEC_INTERVAL);
+	}
+
+	return countdown > 0 ? 0 : -ETIME;
+}
+
 /* get the number of df counters available */
 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
 		struct device_attribute *attr,
@@ -207,6 +265,32 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
 /* device attr for available perfmon counters */
 static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
 
+static void df_v3_6_query_hashes(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	adev->df.hash_status.hash_64k = false;
+	adev->df.hash_status.hash_2m = false;
+	adev->df.hash_status.hash_1g = false;
+
+	if (adev->asic_type != CHIP_ARCTURUS)
+		return;
+
+	/* encoding for hash-enabled on Arcturus */
+	if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) {
+		tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl);
+		adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp,
+						DF_CS_UMC_AON0_DfGlobalCtrl,
+						GlbHashIntlvCtl64K);
+		adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp,
+						DF_CS_UMC_AON0_DfGlobalCtrl,
+						GlbHashIntlvCtl2M);
+		adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp,
+						DF_CS_UMC_AON0_DfGlobalCtrl,
+						GlbHashIntlvCtl1G);
+	}
+}
+
 /* init perfmons */
 static void df_v3_6_sw_init(struct amdgpu_device *adev)
 {
@@ -218,6 +302,8 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
 
 	for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++)
 		adev->df_perfmon_config_assign_mask[i] = 0;
+
+	df_v3_6_query_hashes(adev);
 }
 
 static void df_v3_6_sw_fini(struct amdgpu_device *adev)
@@ -256,7 +342,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
 {
 	int fb_channel_number;
 
-	fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+	fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
 	if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
 		fb_channel_number = 0;
 
@@ -270,7 +356,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 
 	if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
 		/* Put DF on broadcast mode */
-		adev->df_funcs->enable_broadcast_mode(adev, true);
+		adev->df.funcs->enable_broadcast_mode(adev, true);
 
 		if (enable) {
 			tmp = RREG32_SOC15(DF, 0,
@@ -289,7 +375,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 		}
 
 		/* Exit broadcast mode */
-		adev->df_funcs->enable_broadcast_mode(adev, false);
+		adev->df.funcs->enable_broadcast_mode(adev, false);
 	}
 }
 
@@ -334,20 +420,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
 	switch (target_cntr) {
 
 	case 0:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
 		break;
 	case 1:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
 		break;
 	case 2:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
 		break;
 	case 3:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
 		break;
 
 	}
@@ -422,6 +508,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
 	return -ENOSPC;
 }
 
+#define DEFERRED_ARM_MASK	(1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+				    uint64_t config, bool is_deferred)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	if (target_cntr < 0)
+		return -EINVAL;
+
+	if (is_deferred)
+		adev->df_perfmon_config_assign_mask[target_cntr] |=
+							DEFERRED_ARM_MASK;
+	else
+		adev->df_perfmon_config_assign_mask[target_cntr] &=
+							~DEFERRED_ARM_MASK;
+
+	return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+				    uint64_t config)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	/*
+	 * we never get target_cntr < 0 since this funciton is only called in
+	 * pmc_count for now but we should check anyways.
+	 */
+	return (target_cntr >= 0 &&
+			(adev->df_perfmon_config_assign_mask[target_cntr]
+			& DEFERRED_ARM_MASK));
+
+}
+
 /* release performance counter */
 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
 				     uint64_t config)
@@ -451,29 +575,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
 			     int is_enable)
 {
 	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
-	int ret = 0;
+	int err = 0, ret = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
+		if (is_enable)
+			return df_v3_6_pmc_add_cntr(adev, config);
 
 		df_v3_6_reset_perfmon_cntr(adev, config);
 
-		if (is_enable) {
-			ret = df_v3_6_pmc_add_cntr(adev, config);
-		} else {
-			ret = df_v3_6_pmc_get_ctrl_settings(adev,
+		ret = df_v3_6_pmc_get_ctrl_settings(adev,
 					config,
 					&lo_base_addr,
 					&hi_base_addr,
 					&lo_val,
 					&hi_val);
 
-			if (ret)
-				return ret;
+		if (ret)
+			return ret;
 
-			df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
-					hi_base_addr, hi_val);
-		}
+		err = df_v3_6_perfmon_arm_with_retry(adev,
+						     lo_base_addr,
+						     lo_val,
+						     hi_base_addr,
+						     hi_val);
+
+		if (err)
+			ret = df_v3_6_pmc_set_deferred(adev, config, true);
 
 		break;
 	default:
@@ -501,7 +629,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
 		if (ret)
 			return ret;
 
-		df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+		df_v3_6_reset_perfmon_cntr(adev, config);
 
 		if (is_disable)
 			df_v3_6_pmc_release_cntr(adev, config);
@@ -518,18 +646,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 				  uint64_t config,
 				  uint64_t *count)
 {
-	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+	uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
 	*count = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
-
 		df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
 				      &hi_base_addr);
 
 		if ((lo_base_addr == 0) || (hi_base_addr == 0))
 			return;
 
+		/* rearm the counter or throw away count value on failure */
+		if (df_v3_6_pmc_is_deferred(adev, config)) {
+			int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+							lo_base_addr, lo_val,
+							hi_base_addr, hi_val);
+
+			if (rearm_err)
+				return;
+
+			df_v3_6_pmc_set_deferred(adev, config, false);
+		}
+
 		df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
 				hi_base_addr, &hi_val);
 
@@ -542,12 +681,63 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 			 config, lo_base_addr, hi_base_addr, lo_val, hi_val);
 
 		break;
-
 	default:
 		break;
 	}
 }
 
+static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
+					   uint32_t df_inst)
+{
+	uint32_t base_addr_reg_val 	= 0;
+	uint64_t base_addr	 	= 0;
+
+	base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
+					df_inst * DF_3_6_SMN_REG_INST_DIST);
+
+	if (REG_GET_FIELD(base_addr_reg_val,
+			  DF_CS_UMC_AON0_DramBaseAddress0,
+			  AddrRngVal) == 0) {
+		DRM_WARN("address range not valid");
+		return 0;
+	}
+
+	base_addr = REG_GET_FIELD(base_addr_reg_val,
+				  DF_CS_UMC_AON0_DramBaseAddress0,
+				  DramBaseAddr);
+
+	return base_addr << 28;
+}
+
+static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
+{
+	uint32_t xgmi_node_id	= 0;
+	uint32_t df_inst_id 	= 0;
+
+	/* Walk through DF dst nodes to find current XGMI node */
+	for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
+
+		xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
+					   df_inst_id * DF_3_6_SMN_REG_INST_DIST);
+		xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
+					     DF_CS_UMC_AON0_DramLimitAddress0,
+					     DstFabricID);
+
+		/* TODO: establish reason dest fabric id is offset by 7 */
+		xgmi_node_id = xgmi_node_id >> 7;
+
+		if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
+			break;
+	}
+
+	if (df_inst_id == DF_3_6_INST_CNT) {
+		DRM_WARN("cant match df dst id with gpu node");
+		return 0;
+	}
+
+	return df_inst_id;
+}
+
 const struct amdgpu_df_funcs df_v3_6_funcs = {
 	.sw_init = df_v3_6_sw_init,
 	.sw_fini = df_v3_6_sw_fini,
@@ -561,5 +751,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
 	.pmc_stop = df_v3_6_pmc_stop,
 	.pmc_get_count = df_v3_6_pmc_get_count,
 	.get_fica = df_v3_6_get_fica,
-	.set_fica = df_v3_6_set_fica
+	.set_fica = df_v3_6_set_fica,
+	.get_dram_base_addr = df_v3_6_get_dram_base_addr,
+	.get_df_inst_id = df_v3_6_get_df_inst_id
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ba9e53a..1785fda 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -40,6 +40,7 @@
 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
 
 #include "soc15.h"
+#include "soc15d.h"
 #include "soc15_common.h"
 #include "clearstate_gfx10.h"
 #include "v10_structs.h"
@@ -50,9 +51,6 @@
  * Navi10 has two graphic rings to share each graphic pipe.
  * 1. Primary ring
  * 2. Async ring
- *
- * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
- * first.
  */
 #define GFX10_NUM_GFX_RINGS	2
 #define GFX10_MEC_HPD_SIZE	2048
@@ -123,7 +121,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
@@ -171,7 +169,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070105),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
@@ -348,15 +346,29 @@ static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
 }
 
+static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+				uint16_t pasid, uint32_t flush_type,
+				bool all_hub)
+{
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+	amdgpu_ring_write(kiq_ring,
+			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
 static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
 	.kiq_set_resources = gfx10_kiq_set_resources,
 	.kiq_map_queues = gfx10_kiq_map_queues,
 	.kiq_unmap_queues = gfx10_kiq_unmap_queues,
 	.kiq_query_status = gfx10_kiq_query_status,
+	.kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs,
 	.set_resources_size = 8,
 	.map_queues_size = 7,
 	.unmap_queues_size = 6,
 	.query_status_size = 7,
+	.invalidate_tlbs_size = 2,
 };
 
 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
@@ -474,18 +486,10 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
 		else
 			udelay(1);
 	}
-	if (i < adev->usec_timeout) {
-		if (amdgpu_emu_mode == 1)
-			DRM_INFO("ring test on %d succeeded in %d msecs\n",
-				 ring->idx, i);
-		else
-			DRM_INFO("ring test on %d succeeded in %d usecs\n",
-				 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	amdgpu_gfx_scratch_free(adev, scratch);
 
 	return r;
@@ -535,14 +539,10 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	}
 
 	tmp = RREG32(scratch);
-	if (tmp == 0xDEADBEEF) {
-		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-			  scratch, tmp);
+	else
 		r = -EINVAL;
-	}
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
 	dma_fence_put(f);
@@ -591,8 +591,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
 	}
 
 	if (adev->gfx.cp_fw_write_wait == false)
-		DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
-			      GRBM requires 1-cycle delay in cp firmware\n");
+		DRM_WARN_ONCE("CP firmware version too old, please update!");
 }
 
 
@@ -617,11 +616,29 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
 			le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
 }
 
+static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
+{
+	bool ret = false;
+
+	switch (adev->pdev->revision) {
+	case 0xc2:
+	case 0xc3:
+		ret = true;
+		break;
+	default:
+		ret = false;
+		break;
+	}
+
+	return ret ;
+}
+
 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
 	case CHIP_NAVI10:
-		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+		if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
+			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 		break;
 	default:
 		break;
@@ -805,10 +822,11 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
 		info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
 		info->fw = adev->gfx.rlc_fw;
-		header = (const struct common_firmware_header *)info->fw->data;
-		adev->firmware.fw_size +=
-			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
+		if (info->fw) {
+			header = (const struct common_firmware_header *)info->fw->data;
+			adev->firmware.fw_size +=
+				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+		}
 		if (adev->gfx.rlc.is_rlc_v2_1 &&
 		    adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
 		    adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
@@ -1948,7 +1966,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
 		rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
 
 		rlc_toc++;
-	};
+	}
 
 	return 0;
 }
@@ -3319,8 +3337,11 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
 	mqd->cp_hqd_ib_control = tmp;
 
-	/* activate the queue */
-	mqd->cp_hqd_active = 1;
+	/* map_queues packet doesn't need activate the queue,
+	 * so only kiq need set this field.
+	 */
+	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+		mqd->cp_hqd_active = 1;
 
 	return 0;
 }
@@ -3591,23 +3612,16 @@ static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
 
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
 		ring = &adev->gfx.gfx_ring[i];
-		DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
-			 i, ring->me, ring->pipe, ring->queue);
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->sched.ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 	}
 
 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
 		ring = &adev->gfx.compute_ring[i];
-		ring->sched.ready = true;
-		DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
-			 i, ring->me, ring->pipe, ring->queue);
-		r = amdgpu_ring_test_ring(ring);
+		r = amdgpu_ring_test_helper(ring);
 		if (r)
-			ring->sched.ready = false;
+			return r;
 	}
 
 	return 0;
@@ -4215,7 +4229,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
 					  enum amd_powergating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_PG_STATE_GATE);
 	switch (adev->asic_type) {
 	case CHIP_NAVI10:
 	case CHIP_NAVI14:
@@ -4241,7 +4255,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		gfx_v10_0_update_gfx_clock_gating(adev,
-						 state == AMD_CG_STATE_GATE ? true : false);
+						 state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
@@ -4723,6 +4737,7 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 {
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
 	amdgpu_ring_write(ring, 0 |	/* src: register*/
@@ -4731,9 +4746,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 	amdgpu_ring_write(ring, reg);
 	amdgpu_ring_write(ring, 0);
 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 }
 
 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 7f0a636..31f44d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1576,7 +1576,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev)
 static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
 {
 	u32 gb_addr_config = 0;
-	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 mc_arb_ramcfg;
 	u32 sx_debug_1;
 	u32 hdp_host_path_cntl;
 	u32 tmp;
@@ -1678,7 +1678,6 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
 
 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 
-	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
 	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
 	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d92e92e..8f20a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4258,7 +4258,7 @@ static int gfx_v7_0_late_init(void *handle)
 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
 {
 	u32 gb_addr_config;
-	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 mc_arb_ramcfg;
 	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
 	u32 tmp;
 
@@ -4335,7 +4335,6 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
 		break;
 	}
 
-	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
 	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
 	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 52a647d..fa24597 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1677,7 +1677,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
 {
 	u32 gb_addr_config;
-	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 mc_arb_ramcfg;
 	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
 	u32 tmp;
 	int ret;
@@ -1817,7 +1817,6 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
 		break;
 	}
 
-	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
 	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
 	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
 
@@ -4559,8 +4558,11 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
 	mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
 	mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
 
-	/* activate the queue */
-	mqd->cp_hqd_active = 1;
+	/* map_queues packet doesn't need activate the queue,
+	 * so only kiq need set this field.
+	 */
+	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+		mqd->cp_hqd_active = 1;
 
 	return 0;
 }
@@ -6447,6 +6449,7 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
 static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 {
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
 	amdgpu_ring_write(ring, 0 |	/* src: register*/
@@ -6455,9 +6458,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 	amdgpu_ring_write(ring, reg);
 	amdgpu_ring_write(ring, 0);
 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 }
 
 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 97105a5..90f64b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -48,6 +48,8 @@
 
 #include "amdgpu_ras.h"
 
+#include "gfx_v9_4.h"
+
 #define GFX9_NUM_GFX_RINGS     1
 #define GFX9_MEC_HPD_SIZE 4096
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -131,18 +133,6 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
 #define mmTCP_CHAN_STEER_5_ARCT								0x0b0c
 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX							0
 
-struct ras_gfx_subblock_reg {
-	const char *name;
-	uint32_t hwip;
-	uint32_t inst;
-	uint32_t seg;
-	uint32_t reg_offset;
-	uint32_t sec_count_mask;
-	uint32_t sec_count_shift;
-	uint32_t ded_count_mask;
-	uint32_t ded_count_shift;
-};
-
 enum ta_ras_gfx_subblock {
 	/*CPC*/
 	TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -748,9 +738,138 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 					  void *ras_error_status);
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
 				     void *inject_if);
 
+static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
+				uint64_t queue_mask)
+{
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+	amdgpu_ring_write(kiq_ring,
+		PACKET3_SET_RESOURCES_VMID_MASK(0) |
+		/* vmid_mask:0* queue_type:0 (KIQ) */
+		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
+	amdgpu_ring_write(kiq_ring,
+			lower_32_bits(queue_mask));	/* queue mask lo */
+	amdgpu_ring_write(kiq_ring,
+			upper_32_bits(queue_mask));	/* queue mask hi */
+	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
+	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
+	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
+	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
+}
+
+static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
+				 struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = kiq_ring->adev;
+	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
+			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
+			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
+			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
+			 /*queue_type: normal compute queue */
+			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
+			 /* alloc format: all_on_one_pipe */
+			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
+			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
+			 /* num_queues: must be 1 */
+			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
+	amdgpu_ring_write(kiq_ring,
+			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
+	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+}
+
+static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
+				   struct amdgpu_ring *ring,
+				   enum amdgpu_unmap_queues_action action,
+				   u64 gpu_addr, u64 seq)
+{
+	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+			  PACKET3_UNMAP_QUEUES_ACTION(action) |
+			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
+			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+	amdgpu_ring_write(kiq_ring,
+			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+
+	if (action == PREEMPT_QUEUES_NO_UNMAP) {
+		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
+		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
+		amdgpu_ring_write(kiq_ring, seq);
+	} else {
+		amdgpu_ring_write(kiq_ring, 0);
+		amdgpu_ring_write(kiq_ring, 0);
+		amdgpu_ring_write(kiq_ring, 0);
+	}
+}
+
+static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
+				   struct amdgpu_ring *ring,
+				   u64 addr,
+				   u64 seq)
+{
+	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
+	amdgpu_ring_write(kiq_ring,
+			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
+			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
+			  PACKET3_QUERY_STATUS_COMMAND(2));
+	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+	amdgpu_ring_write(kiq_ring,
+			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
+			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
+	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
+	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
+	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
+	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
+}
+
+static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+				uint16_t pasid, uint32_t flush_type,
+				bool all_hub)
+{
+	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+	amdgpu_ring_write(kiq_ring,
+			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
+static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
+	.kiq_set_resources = gfx_v9_0_kiq_set_resources,
+	.kiq_map_queues = gfx_v9_0_kiq_map_queues,
+	.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
+	.kiq_query_status = gfx_v9_0_kiq_query_status,
+	.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
+	.set_resources_size = 8,
+	.map_queues_size = 7,
+	.unmap_queues_size = 6,
+	.query_status_size = 7,
+	.invalidate_tlbs_size = 2,
+};
+
+static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
+{
+	adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
+}
+
 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
@@ -991,8 +1110,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
 	    (adev->gfx.mec_feature_version < 46) ||
 	    (adev->gfx.pfp_fw_version < 0x000000b7) ||
 	    (adev->gfx.pfp_feature_version < 46))
-		DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
-			      GRBM requires 1-cycle delay in cp firmware\n");
+		DRM_WARN_ONCE("CP firmware version too old, please update!");
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
@@ -1044,18 +1162,54 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
 	}
 }
 
+struct amdgpu_gfxoff_quirk {
+	u16 chip_vendor;
+	u16 chip_device;
+	u16 subsys_vendor;
+	u16 subsys_device;
+	u8 revision;
+};
+
+static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
+	/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
+	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+	{ 0, 0, 0, 0, 0 },
+};
+
+static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
+{
+	const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
+
+	while (p && p->chip_device != 0) {
+		if (pdev->vendor == p->chip_vendor &&
+		    pdev->device == p->chip_device &&
+		    pdev->subsystem_vendor == p->subsys_vendor &&
+		    pdev->subsystem_device == p->subsys_device &&
+		    pdev->revision == p->revision) {
+			return true;
+		}
+		++p;
+	}
+	return false;
+}
+
 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
 {
+	if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
+		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 		break;
 	case CHIP_RAVEN:
-		if (!(adev->rev_id >= 0x8 ||
-		      adev->pdev->device == 0x15d8) &&
-		    (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
-		     !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
+		if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+		    ((adev->gfx.rlc_fw_version != 106 &&
+		      adev->gfx.rlc_fw_version < 531) ||
+		     (adev->gfx.rlc_fw_version == 53815) ||
+		     (adev->gfx.rlc_feature_version < 1) ||
+		     !adev->gfx.rlc.is_rlc_v2_1))
 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
@@ -1834,6 +1988,17 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
 	.query_ras_error_count = &gfx_v9_0_query_ras_error_count
 };
 
+static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
+	.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+	.select_se_sh = &gfx_v9_0_select_se_sh,
+	.read_wave_data = &gfx_v9_0_read_wave_data,
+	.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+	.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+	.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+	.ras_error_inject = &gfx_v9_4_ras_error_inject,
+	.query_ras_error_count = &gfx_v9_4_query_ras_error_count
+};
+
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 {
 	u32 gb_addr_config;
@@ -1885,6 +2050,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 			gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_ARCTURUS:
+		adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
 		adev->gfx.config.max_hw_contexts = 8;
 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2275,6 +2441,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
 	}
 }
 
+static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
+{
+	uint32_t tmp;
+
+	switch (adev->asic_type) {
+	case CHIP_ARCTURUS:
+		tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
+		tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
+					DISABLE_BARRIER_WAITCNT, 1);
+		WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
+		break;
+	default:
+		break;
+	};
+}
+
 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
 {
 	u32 tmp;
@@ -2320,6 +2502,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
 
 	gfx_v9_0_init_compute_vmid(adev);
 	gfx_v9_0_init_gds_vmid(adev);
+	gfx_v9_0_init_sq_config(adev);
 }
 
 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
@@ -3119,74 +3302,6 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
 	WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
 }
 
-static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
-{
-	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
-	uint64_t queue_mask = 0;
-	int r, i;
-
-	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
-		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
-			continue;
-
-		/* This situation may be hit in the future if a new HW
-		 * generation exposes more than 64 queues. If so, the
-		 * definition of queue_mask needs updating */
-		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
-			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
-			break;
-		}
-
-		queue_mask |= (1ull << i);
-	}
-
-	r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
-	if (r) {
-		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-		return r;
-	}
-
-	/* set resources */
-	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
-	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
-			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
-	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
-	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
-	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
-	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
-	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
-	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
-	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
-		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
-		uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-
-		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
-		/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
-		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
-				  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
-				  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
-				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
-				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
-				  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
-				  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
-				  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
-				  PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
-				  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
-		amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
-		amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
-		amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
-		amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
-		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
-	}
-
-	r = amdgpu_ring_test_helper(kiq_ring);
-	if (r)
-		DRM_ERROR("KCQ enable failed\n");
-
-	return r;
-}
-
 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
@@ -3323,8 +3438,11 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
 	mqd->cp_hqd_ib_control = tmp;
 
-	/* activate the queue */
-	mqd->cp_hqd_active = 1;
+	/* map_queues packet doesn't need activate the queue,
+	 * so only kiq need set this field.
+	 */
+	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+		mqd->cp_hqd_active = 1;
 
 	return 0;
 }
@@ -3593,7 +3711,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
 			goto done;
 	}
 
-	r = gfx_v9_0_kiq_kcq_enable(adev);
+	r = amdgpu_gfx_enable_kcq(adev);
 done:
 	return r;
 }
@@ -3650,6 +3768,23 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
 	return 0;
 }
 
+static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	if (adev->asic_type != CHIP_ARCTURUS)
+		return;
+
+	tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
+	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
+				adev->df.hash_status.hash_64k);
+	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
+				adev->df.hash_status.hash_2m);
+	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
+				adev->df.hash_status.hash_1g);
+	WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
+}
+
 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
 {
 	if (adev->asic_type != CHIP_ARCTURUS)
@@ -3667,6 +3802,8 @@ static int gfx_v9_0_hw_init(void *handle)
 
 	gfx_v9_0_constants_init(adev);
 
+	gfx_v9_0_init_tcp_config(adev);
+
 	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
@@ -3678,36 +3815,6 @@ static int gfx_v9_0_hw_init(void *handle)
 	return r;
 }
 
-static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
-{
-	int r, i;
-	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
-
-	r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
-	if (r)
-		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-
-	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
-
-		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
-		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
-						PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
-						PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
-						PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
-						PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
-		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
-		amdgpu_ring_write(kiq_ring, 0);
-		amdgpu_ring_write(kiq_ring, 0);
-		amdgpu_ring_write(kiq_ring, 0);
-	}
-	r = amdgpu_ring_test_helper(kiq_ring);
-	if (r)
-		DRM_ERROR("KCQ disable failed\n");
-
-	return r;
-}
-
 static int gfx_v9_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3719,7 +3826,7 @@ static int gfx_v9_0_hw_fini(void *handle)
 	/* DF freeze and kcq disable will fail */
 	if (!amdgpu_ras_intr_triggered())
 		/* disable KCQ to avoid CPC touch memory not valid anymore */
-		gfx_v9_0_kcq_disable(adev);
+		amdgpu_gfx_disable_kcq(adev);
 
 	if (amdgpu_sriov_vf(adev)) {
 		gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3936,33 +4043,61 @@ static const u32 sgpr_init_compute_shader[] =
 	0xbe800080, 0xbf810000,
 };
 
+/* When below register arrays changed, please update gpr_reg_size,
+  and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
+  to cover all gfx9 ASICs */
 static const struct soc15_reg_entry vgpr_init_regs[] = {
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
-};
-
-static const struct soc15_reg_entry sgpr_init_regs[] = {
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */
-   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
 };
 
-static const struct soc15_reg_entry sec_ded_counter_registers[] = {
+static const struct soc15_reg_entry sgpr1_init_regs[] = {
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
+};
+
+static const struct soc15_reg_entry sgpr2_init_regs[] = {
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
+};
+
+static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
@@ -3996,6 +4131,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
+   { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
 };
 
 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
@@ -4050,10 +4186,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
 	struct amdgpu_ib ib;
 	struct dma_fence *f = NULL;
-	int r, i, j, k;
+	int r, i;
 	unsigned total_size, vgpr_offset, sgpr_offset;
 	u64 gpu_addr;
 
+	int compute_dim_x = adev->gfx.config.max_shader_engines *
+						adev->gfx.config.max_cu_per_sh *
+						adev->gfx.config.max_sh_per_se;
+	int sgpr_work_group_size = 5;
+	int gpr_reg_size = compute_dim_x / 16 + 6;
+
 	/* only support when RAS is enabled */
 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
 		return 0;
@@ -4063,9 +4205,11 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 		return 0;
 
 	total_size =
-		((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
 	total_size +=
-		((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
+	total_size +=
+		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
 	total_size = ALIGN(total_size, 256);
 	vgpr_offset = total_size;
 	total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
@@ -4092,7 +4236,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
 	/* VGPR */
 	/* write the register state for the compute dispatch */
-	for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
+	for (i = 0; i < gpr_reg_size; i++) {
 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
 								- PACKET3_SET_SH_REG_START;
@@ -4108,7 +4252,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
 	/* write dispatch packet */
 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
-	ib.ptr[ib.length_dw++] = 128; /* x */
+	ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
 	ib.ptr[ib.length_dw++] = 1; /* y */
 	ib.ptr[ib.length_dw++] = 1; /* z */
 	ib.ptr[ib.length_dw++] =
@@ -4118,13 +4262,13 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 
-	/* SGPR */
+	/* SGPR1 */
 	/* write the register state for the compute dispatch */
-	for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) {
+	for (i = 0; i < gpr_reg_size; i++) {
 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
-		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i])
+		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
 								- PACKET3_SET_SH_REG_START;
-		ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value;
+		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
 	}
 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
@@ -4136,7 +4280,35 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
 	/* write dispatch packet */
 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
-	ib.ptr[ib.length_dw++] = 128; /* x */
+	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
+	ib.ptr[ib.length_dw++] = 1; /* y */
+	ib.ptr[ib.length_dw++] = 1; /* z */
+	ib.ptr[ib.length_dw++] =
+		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
+
+	/* write CS partial flush packet */
+	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+
+	/* SGPR2 */
+	/* write the register state for the compute dispatch */
+	for (i = 0; i < gpr_reg_size; i++) {
+		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
+								- PACKET3_SET_SH_REG_START;
+		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
+	}
+	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
+	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
+	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+							- PACKET3_SET_SH_REG_START;
+	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
+	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+
+	/* write dispatch packet */
+	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
 	ib.ptr[ib.length_dw++] = 1; /* y */
 	ib.ptr[ib.length_dw++] = 1; /* z */
 	ib.ptr[ib.length_dw++] =
@@ -4160,18 +4332,17 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 		goto fail;
 	}
 
-	/* read back registers to clear the counters */
-	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
-		for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
-			for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
-				gfx_v9_0_select_se_sh(adev, j, 0x0, k);
-				RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
-			}
-		}
+	switch (adev->asic_type)
+	{
+	case CHIP_VEGA20:
+		gfx_v9_0_clear_ras_edc_counter(adev);
+		break;
+	case CHIP_ARCTURUS:
+		gfx_v9_4_clear_ras_edc_counter(adev);
+		break;
+	default:
+		break;
 	}
-	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
-	mutex_unlock(&adev->grbm_idx_mutex);
 
 fail:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -4189,6 +4360,7 @@ static int gfx_v9_0_early_init(void *handle)
 	else
 		adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
 	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+	gfx_v9_0_set_kiq_pm4_funcs(adev);
 	gfx_v9_0_set_ring_funcs(adev);
 	gfx_v9_0_set_irq_funcs(adev);
 	gfx_v9_0_set_gds_init(adev);
@@ -4202,10 +4374,6 @@ static int gfx_v9_0_ecc_late_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int r;
 
-	r = amdgpu_gfx_ras_late_init(adev);
-	if (r)
-		return r;
-
 	r = gfx_v9_0_do_edc_gds_workarounds(adev);
 	if (r)
 		return r;
@@ -4215,6 +4383,10 @@ static int gfx_v9_0_ecc_late_init(void *handle)
 	if (r)
 		return r;
 
+	r = amdgpu_gfx_ras_late_init(adev);
+	if (r)
+		return r;
+
 	return 0;
 }
 
@@ -4533,7 +4705,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
 					  enum amd_powergating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_PG_STATE_GATE);
 
 	switch (adev->asic_type) {
 	case CHIP_RAVEN:
@@ -4595,7 +4767,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
 	case CHIP_ARCTURUS:
 	case CHIP_RENOIR:
 		gfx_v9_0_update_gfx_clock_gating(adev,
-						 state == AMD_CG_STATE_GATE ? true : false);
+						 state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
@@ -4612,12 +4784,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
 		*flags = 0;
 
 	/* AMD_CG_SUPPORT_GFX_MGCG */
-	data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
 
 	/* AMD_CG_SUPPORT_GFX_CGCG */
-	data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
 
@@ -4626,18 +4798,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
 
 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
-	data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
+	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
 
 	/* AMD_CG_SUPPORT_GFX_CP_LS */
-	data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
 
 	if (adev->asic_type != CHIP_ARCTURUS) {
 		/* AMD_CG_SUPPORT_GFX_3D_CGCG */
-		data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+		data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
 
@@ -5108,6 +5280,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 {
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
 	amdgpu_ring_write(ring, 0 |	/* src: register*/
@@ -5116,9 +5289,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
 	amdgpu_ring_write(ring, reg);
 	amdgpu_ring_write(ring, 0);
 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
-				adev->virt.reg_val_offs * 4));
+				kiq->reg_val_offs * 4));
 }
 
 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -5440,7 +5613,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
 }
 
 
-static const struct ras_gfx_subblock_reg ras_subblock_regs[] = {
+static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
 	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
@@ -5888,7 +6061,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
 	int ret;
 	struct ta_ras_trigger_error_input block_info = { 0 };
 
-	if (adev->asic_type != CHIP_VEGA20)
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
 		return -EINVAL;
 
 	if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
@@ -6013,7 +6186,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
 
-	for (i = 0; i < 16; i++) {
+	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
 		data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
 
@@ -6032,7 +6205,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 		}
 	}
 
-	for (i = 0; i < 7; i++) {
+	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
 		data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
 
@@ -6053,7 +6226,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 		}
 	}
 
-	for (i = 0; i < 4; i++) {
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
 
@@ -6065,7 +6238,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 		}
 	}
 
-	for (i = 0; i < 32; i++) {
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
 
@@ -6092,36 +6265,36 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
 	return 0;
 }
 
-static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
 	uint32_t se_id, uint32_t inst_id, uint32_t value,
 	uint32_t *sec_count, uint32_t *ded_count)
 {
 	uint32_t i;
 	uint32_t sec_cnt, ded_cnt;
 
-	for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) {
-		if(ras_subblock_regs[i].reg_offset != reg->reg_offset ||
-			ras_subblock_regs[i].seg != reg->seg ||
-			ras_subblock_regs[i].inst != reg->inst)
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
+		if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
+			gfx_v9_0_ras_fields[i].seg != reg->seg ||
+			gfx_v9_0_ras_fields[i].inst != reg->inst)
 			continue;
 
 		sec_cnt = (value &
-				ras_subblock_regs[i].sec_count_mask) >>
-				ras_subblock_regs[i].sec_count_shift;
+				gfx_v9_0_ras_fields[i].sec_count_mask) >>
+				gfx_v9_0_ras_fields[i].sec_count_shift;
 		if (sec_cnt) {
 			DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
-				ras_subblock_regs[i].name,
+				gfx_v9_0_ras_fields[i].name,
 				se_id, inst_id,
 				sec_cnt);
 			*sec_count += sec_cnt;
 		}
 
 		ded_cnt = (value &
-				ras_subblock_regs[i].ded_count_mask) >>
-				ras_subblock_regs[i].ded_count_shift;
+				gfx_v9_0_ras_fields[i].ded_count_mask) >>
+				gfx_v9_0_ras_fields[i].ded_count_shift;
 		if (ded_cnt) {
 			DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
-				ras_subblock_regs[i].name,
+				gfx_v9_0_ras_fields[i].name,
 				se_id, inst_id,
 				ded_cnt);
 			*ded_count += ded_cnt;
@@ -6131,6 +6304,58 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg,
 	return 0;
 }
 
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+	int i, j, k;
+
+	/* read back registers to clear the counters */
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
+				gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+				RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
+			}
+		}
+	}
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+	}
+
+	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+}
+
 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 					  void *ras_error_status)
 {
@@ -6139,7 +6364,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 	uint32_t i, j, k;
 	uint32_t reg_value;
 
-	if (adev->asic_type != CHIP_VEGA20)
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
 		return -EINVAL;
 
 	err_data->ue_count = 0;
@@ -6147,14 +6372,14 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 
 	mutex_lock(&adev->grbm_idx_mutex);
 
-	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
-		for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
-			for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
 				gfx_v9_0_select_se_sh(adev, j, 0, k);
 				reg_value =
-					RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+					RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
 				if (reg_value)
-					__get_ras_error_count(&sec_ded_counter_registers[i],
+					gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
 							j, k, reg_value,
 							&sec_count, &ded_count);
 			}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
new file mode 100644
index 0000000..f099f13
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -0,0 +1,978 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_pm.h"
+
+#include "gc/gc_9_4_1_offset.h"
+#include "gc/gc_9_4_1_sh_mask.h"
+#include "soc15_common.h"
+
+#include "gfx_v9_4.h"
+#include "amdgpu_ras.h"
+
+static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
+	/* CPC */
+	{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 },
+	/* DC */
+	{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 },
+	/* CPF */
+	{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 },
+	/* GDS */
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
+	/* SPI */
+	{ SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
+	/* SQ */
+	{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 },
+	/* SQC */
+	{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 },
+	{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 },
+	/* TA */
+	{ SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 },
+	/* TCA */
+	{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 },
+	/* TCC */
+	{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 },
+	/* TCI */
+	{ SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 },
+	/* TCP */
+	{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 },
+	{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 },
+	/* TD */
+	{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 },
+	/* GCEA */
+	{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 },
+	{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 },
+	/* RLC */
+	{ SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 },
+	{ SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 },
+};
+
+static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+				  u32 sh_num, u32 instance)
+{
+	u32 data;
+
+	if (instance == 0xffffffff)
+		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+				     INSTANCE_BROADCAST_WRITES, 1);
+	else
+		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
+				     instance);
+
+	if (se_num == 0xffffffff)
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
+				     1);
+	else
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+	if (sh_num == 0xffffffff)
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
+				     1);
+	else
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+
+	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
+}
+
+static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = {
+	/* CPC */
+	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
+	{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
+	{ "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
+	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
+	{ "CPC_DC_CSINVOC_RAM_ME1",
+	  SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
+	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
+	{ "CPC_DC_RESTORE_RAM_ME1",
+	  SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
+	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
+	{ "CPC_DC_CSINVOC_RAM1_ME1",
+	  SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
+	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
+	{ "CPC_DC_RESTORE_RAM1_ME1",
+	  SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
+	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
+
+	/* CPF */
+	{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
+	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
+	{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
+	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
+	{ "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
+
+	/* GDS */
+	{ "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
+	  SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
+	{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
+	{ "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+	{ "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
+	{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+	{ "GDS_ME1_PIPE0_PIPE_MEM",
+	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+	{ "GDS_ME1_PIPE1_PIPE_MEM",
+	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+	{ "GDS_ME1_PIPE2_PIPE_MEM",
+	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+	{ "GDS_ME1_PIPE3_PIPE_MEM",
+	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
+
+	/* SPI */
+	{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
+	{ "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
+	{ "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
+	{ "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) },
+	{ "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
+	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
+
+	/* SQ */
+	{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
+	{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
+	{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
+	{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
+	{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
+	{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
+	{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+
+	/* SQC */
+	{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
+	{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
+	{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+	{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
+	{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+	{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
+	{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
+	{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
+	{ "SQC_INST_BANKA_UTCL1_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
+	{ "SQC_INST_BANKA_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKA_MISS_FIFO_DED_COUNT) },
+	{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
+	{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
+	{ "SQC_DATA_BANKA_HIT_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
+	{ "SQC_DATA_BANKA_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  DATA_BANKA_MISS_FIFO_DED_COUNT) },
+	{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
+	{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
+	{ "SQC_INST_BANKB_UTCL1_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
+	{ "SQC_INST_BANKB_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  INST_BANKB_MISS_FIFO_DED_COUNT) },
+	{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
+	{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
+	{ "SQC_DATA_BANKB_HIT_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
+	{ "SQC_DATA_BANKB_MISS_FIFO",
+	  SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+			  DATA_BANKB_MISS_FIFO_DED_COUNT) },
+	{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
+
+	/* TA */
+	{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
+	{ "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) },
+	{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
+	{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
+	{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
+
+	/* TCA */
+	{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
+	{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
+
+	/* TCC */
+	{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
+	{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
+	{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
+	{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
+	{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
+	{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
+	{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
+	{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
+	{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
+	{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
+	{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
+	{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
+	{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
+	{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
+	{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
+
+	/* TCI */
+	{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
+
+	/* TCP */
+	{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
+	{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
+	{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
+	{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
+	{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 },
+	{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
+	{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
+
+	/* TD */
+	{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
+	{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
+	{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
+	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
+
+	/* EA */
+	{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
+	{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
+	{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
+	{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
+	{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
+	{ "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
+	{ "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
+	{ "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
+	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
+	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
+	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
+	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
+	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
+	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
+	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
+	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
+	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 },
+	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) },
+	{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
+	{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
+	{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
+	{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
+	{ "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
+	{ "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
+	{ "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
+	{ "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
+	{ "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
+	{ "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
+	{ "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
+	{ "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
+	{ "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+	  SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 },
+	{ "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
+
+	/* RLC */
+	{ "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
+	{ "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
+	{ "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
+	{ "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
+	{ "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
+	{ "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
+	{ "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
+	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
+};
+
+static const char * const vml2_mems[] = {
+	"UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+	"UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+	"UTC_VML2_BANK_CACHE_0_4K_MEM0",
+	"UTC_VML2_BANK_CACHE_0_4K_MEM1",
+	"UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+	"UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+	"UTC_VML2_BANK_CACHE_1_4K_MEM0",
+	"UTC_VML2_BANK_CACHE_1_4K_MEM1",
+	"UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+	"UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+	"UTC_VML2_BANK_CACHE_2_4K_MEM0",
+	"UTC_VML2_BANK_CACHE_2_4K_MEM1",
+	"UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+	"UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+	"UTC_VML2_BANK_CACHE_3_4K_MEM0",
+	"UTC_VML2_BANK_CACHE_3_4K_MEM1",
+	"UTC_VML2_IFIFO_GROUP0",
+	"UTC_VML2_IFIFO_GROUP1",
+	"UTC_VML2_IFIFO_GROUP2",
+	"UTC_VML2_IFIFO_GROUP3",
+	"UTC_VML2_IFIFO_GROUP4",
+	"UTC_VML2_IFIFO_GROUP5",
+	"UTC_VML2_IFIFO_GROUP6",
+	"UTC_VML2_IFIFO_GROUP7",
+	"UTC_VML2_IFIFO_GROUP8",
+	"UTC_VML2_IFIFO_GROUP9",
+	"UTC_VML2_IFIFO_GROUP10",
+	"UTC_VML2_IFIFO_GROUP11",
+	"UTC_VML2_IFIFO_GROUP12",
+	"UTC_VML2_IFIFO_GROUP13",
+	"UTC_VML2_IFIFO_GROUP14",
+	"UTC_VML2_IFIFO_GROUP15",
+	"UTC_VML2_IFIFO_GROUP16",
+	"UTC_VML2_IFIFO_GROUP17",
+	"UTC_VML2_IFIFO_GROUP18",
+	"UTC_VML2_IFIFO_GROUP19",
+	"UTC_VML2_IFIFO_GROUP20",
+	"UTC_VML2_IFIFO_GROUP21",
+	"UTC_VML2_IFIFO_GROUP22",
+	"UTC_VML2_IFIFO_GROUP23",
+	"UTC_VML2_IFIFO_GROUP24",
+};
+
+static const char * const vml2_walker_mems[] = {
+	"UTC_VML2_CACHE_PDE0_MEM0",
+	"UTC_VML2_CACHE_PDE0_MEM1",
+	"UTC_VML2_CACHE_PDE1_MEM0",
+	"UTC_VML2_CACHE_PDE1_MEM1",
+	"UTC_VML2_CACHE_PDE2_MEM0",
+	"UTC_VML2_CACHE_PDE2_MEM1",
+	"UTC_VML2_RDIF_ARADDRS",
+	"UTC_VML2_RDIF_LOG_FIFO",
+	"UTC_VML2_QUEUE_REQ",
+	"UTC_VML2_QUEUE_RET",
+};
+
+static const char * const utcl2_router_mems[] = {
+	"UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0",
+	"UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1",
+	"UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2",
+	"UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3",
+	"UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4",
+	"UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5",
+	"UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6",
+	"UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7",
+	"UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8",
+	"UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9",
+	"UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10",
+	"UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11",
+	"UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12",
+	"UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13",
+	"UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14",
+	"UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15",
+	"UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16",
+	"UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17",
+	"UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18",
+	"UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19",
+	"UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20",
+	"UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21",
+	"UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22",
+	"UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23",
+	"UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24",
+};
+
+static const char * const atc_l2_cache_2m_mems[] = {
+	"UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+	"UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+	"UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+	"UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char * const atc_l2_cache_4k_mems[] = {
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
+					 struct ras_err_data *err_data)
+{
+	uint32_t i, data;
+	uint32_t sec_count, ded_count;
+
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+
+		sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 vml2_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 vml2_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+
+		sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+					  SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 vml2_walker_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+					  DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 vml2_walker_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+		WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+
+		sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 utcl2_router_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 utcl2_router_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+
+		sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+					  SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 atc_l2_cache_2m_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+					  DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 atc_l2_cache_2m_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+
+		sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+					  SEC_COUNT);
+		if (sec_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+				 atc_l2_cache_4k_mems[i], sec_count);
+			err_data->ce_count += sec_count;
+		}
+
+		ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+					  DED_COUNT);
+		if (ded_count) {
+			DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+				 atc_l2_cache_4k_mems[i], ded_count);
+			err_data->ue_count += ded_count;
+		}
+	}
+
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+
+	return 0;
+}
+
+static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+				    uint32_t se_id, uint32_t inst_id,
+				    uint32_t value, uint32_t *sec_count,
+				    uint32_t *ded_count)
+{
+	uint32_t i;
+	uint32_t sec_cnt, ded_cnt;
+
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) {
+		if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset ||
+		    gfx_v9_4_ras_fields[i].seg != reg->seg ||
+		    gfx_v9_4_ras_fields[i].inst != reg->inst)
+			continue;
+
+		sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
+			  gfx_v9_4_ras_fields[i].sec_count_shift;
+		if (sec_cnt) {
+			DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+				 gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+				 sec_cnt);
+			*sec_count += sec_cnt;
+		}
+
+		ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
+			  gfx_v9_4_ras_fields[i].ded_count_shift;
+		if (ded_cnt) {
+			DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+				 gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+				 ded_cnt);
+			*ded_count += ded_cnt;
+		}
+	}
+
+	return 0;
+}
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+				   void *ras_error_status)
+{
+	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+	uint32_t sec_count = 0, ded_count = 0;
+	uint32_t i, j, k;
+	uint32_t reg_value;
+
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+		return -EINVAL;
+
+	err_data->ue_count = 0;
+	err_data->ce_count = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+		for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+			for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+			     k++) {
+				gfx_v9_4_select_se_sh(adev, j, 0, k);
+				reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+					gfx_v9_4_edc_counter_regs[i]));
+				if (reg_value)
+					gfx_v9_4_ras_error_count(
+						&gfx_v9_4_edc_counter_regs[i],
+						j, k, reg_value, &sec_count,
+						&ded_count);
+			}
+		}
+	}
+
+	err_data->ce_count += sec_count;
+	err_data->ue_count += ded_count;
+
+	gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	gfx_v9_4_query_utc_edc_status(adev, err_data);
+
+	return 0;
+}
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+	int i, j, k;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+		for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+			for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+			     k++) {
+				gfx_v9_4_select_se_sh(adev, j, 0x0, k);
+				RREG32(SOC15_REG_ENTRY_OFFSET(
+					gfx_v9_4_edc_counter_regs[i]));
+			}
+		}
+	}
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+		WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+		WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+		RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+	}
+
+	WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+}
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+{
+	struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+	int ret;
+	struct ta_ras_trigger_error_input block_info = { 0 };
+
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+		return -EINVAL;
+
+	block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
+	block_info.sub_block_index = info->head.sub_block_index;
+	block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+	block_info.address = info->address;
+	block_info.value = info->value;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	ret = psp_ras_trigger_error(&adev->psp, &block_info);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
new file mode 100644
index 0000000..2e3f6f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V9_4_H__
+#define __GFX_V9_4_H__
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+				   void *ras_error_status);
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+				     void *inject_if);
+
+#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index e91bd79..1a2f18b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -75,40 +75,45 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
 	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
 	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
 
-	/* Program the system aperture low logical page number. */
-	WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
-		     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+	if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+		/* Program the system aperture low logical page number. */
+		WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
-	if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
-		/*
-		 * Raven2 has a HW issue that it is unable to use the vram which
-		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
-		 * workaround that increase system aperture high address (add 1)
-		 * to get rid of the VM fault and hardware hang.
-		 */
-		WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-			     max((adev->gmc.fb_end >> 18) + 0x1,
-				 adev->gmc.agp_end >> 18));
-	else
-		WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-			     max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+		if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+			/*
+			* Raven2 has a HW issue that it is unable to use the
+			* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+			* So here is the workaround that increase system
+			* aperture high address (add 1) to get rid of the VM
+			* fault and hardware hang.
+			*/
+			WREG32_SOC15_RLC(GC, 0,
+					 mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+					 max((adev->gmc.fb_end >> 18) + 0x1,
+					     adev->gmc.agp_end >> 18));
+		else
+			WREG32_SOC15_RLC(
+				GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 
-	/* Set default page address. */
-	value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
-		+ adev->vm_manager.vram_base_offset;
-	WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
-		     (u32)(value >> 12));
-	WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
-		     (u32)(value >> 44));
+		/* Set default page address. */
+		value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+			adev->vm_manager.vram_base_offset;
+		WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+			     (u32)(value >> 12));
+		WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+			     (u32)(value >> 44));
 
-	/* Program "protection fault". */
-	WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
-		     (u32)(adev->dummy_page_addr >> 12));
-	WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
-		     (u32)((u64)adev->dummy_page_addr >> 44));
+		/* Program "protection fault". */
+		WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+			     (u32)(adev->dummy_page_addr >> 12));
+		WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+			     (u32)((u64)adev->dummy_page_addr >> 44));
 
-	WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
-		       ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+		WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
+			       ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+	}
 }
 
 static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
@@ -264,7 +269,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
 
 int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
 {
-	if (amdgpu_sriov_vf(adev)) {
+	if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
 		/*
 		 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
 		 * VF copy registers so vbios post doesn't program them, for
@@ -280,10 +285,12 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
 	gfxhub_v1_0_init_gart_aperture_regs(adev);
 	gfxhub_v1_0_init_system_aperture_regs(adev);
 	gfxhub_v1_0_init_tlb_regs(adev);
-	gfxhub_v1_0_init_cache_regs(adev);
+	if (!amdgpu_sriov_vf(adev))
+		gfxhub_v1_0_init_cache_regs(adev);
 
 	gfxhub_v1_0_enable_system_domain(adev);
-	gfxhub_v1_0_disable_identity_aperture(adev);
+	if (!amdgpu_sriov_vf(adev))
+		gfxhub_v1_0_disable_identity_aperture(adev);
 	gfxhub_v1_0_setup_vmid_config(adev);
 	gfxhub_v1_0_program_invalidation(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f572533..9775eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -30,6 +30,8 @@
 #include "hdp/hdp_5_0_0_sh_mask.h"
 #include "gc/gc_10_1_0_sh_mask.h"
 #include "mmhub/mmhub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_offset.h"
 #include "dcn/dcn_2_0_0_offset.h"
 #include "dcn/dcn_2_0_0_sh_mask.h"
 #include "oss/osssys_5_0_0_offset.h"
@@ -37,6 +39,7 @@
 #include "navi10_enum.h"
 
 #include "soc15.h"
+#include "soc15d.h"
 #include "soc15_common.h"
 
 #include "nbio_v2_3.h"
@@ -234,6 +237,19 @@ static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
 		(!amdgpu_sriov_vf(adev)));
 }
 
+static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
+					struct amdgpu_device *adev,
+					uint8_t vmid, uint16_t *p_pasid)
+{
+	uint32_t value;
+
+	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+		     + vmid);
+	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -246,7 +262,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
 {
 	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
-	u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+	u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+	u32 tmp;
 	/* Use register 17 for GART */
 	const unsigned eng = 17;
 	unsigned int i;
@@ -273,7 +290,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 	}
 
-	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
 
 	/*
 	 * Issue a dummy read to wait for the ACK register to be cleared
@@ -380,6 +397,64 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 	DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
 }
 
+/**
+ * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+					uint16_t pasid, uint32_t flush_type,
+					bool all_hub)
+{
+	int vmid, i;
+	signed long r;
+	uint32_t seq;
+	uint16_t queried_pasid;
+	bool ret;
+	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+	if (amdgpu_emu_mode == 0 && ring->sched.ready) {
+		spin_lock(&adev->gfx.kiq.ring_lock);
+		/* 2 dwords flush + 8 dwords fence */
+		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+		kiq->pmf->kiq_invalidate_tlbs(ring,
+					pasid, flush_type, all_hub);
+		amdgpu_fence_emit_polling(ring, &seq);
+		amdgpu_ring_commit(ring);
+		spin_unlock(&adev->gfx.kiq.ring_lock);
+		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+		if (r < 1) {
+			DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+			return -ETIME;
+		}
+
+		return 0;
+	}
+
+	for (vmid = 1; vmid < 16; vmid++) {
+
+		ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+				&queried_pasid);
+		if (ret	&& queried_pasid == pasid) {
+			if (all_hub) {
+				for (i = 0; i < adev->num_vmhubs; i++)
+					gmc_v10_0_flush_gpu_tlb(adev, vmid,
+							i, flush_type);
+			} else {
+				gmc_v10_0_flush_gpu_tlb(adev, vmid,
+						AMDGPU_GFXHUB_0, flush_type);
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 					     unsigned vmid, uint64_t pd_addr)
 {
@@ -531,6 +606,7 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
 
 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
 	.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
+	.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
 	.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
 	.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
 	.map_mtype = gmc_v10_0_map_mtype,
@@ -564,22 +640,13 @@ static int gmc_v10_0_early_init(void *handle)
 static int gmc_v10_0_late_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
-	unsigned i;
+	int r;
 
-	for(i = 0; i < adev->num_rings; ++i) {
-		struct amdgpu_ring *ring = adev->rings[i];
-		unsigned vmhub = ring->funcs->vmhub;
+	amdgpu_bo_late_init(adev);
 
-		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
-		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
-			 ring->idx, ring->name, ring->vm_inv_eng,
-			 ring->funcs->vmhub);
-	}
-
-	/* Engine 17 is used for GART flushes */
-	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
-		BUG_ON(vm_inv_eng[i] > 17);
+	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
+	if (r)
+		return r;
 
 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 }
@@ -731,6 +798,10 @@ static int gmc_v10_0_sw_init(void *handle)
 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
 			      VMC_1_0__SRCID__VM_FAULT,
 			      &adev->gmc.vm_fault);
+
+	if (r)
+		return r;
+
 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
 			      UTCL2_1_0__SRCID__FAULT,
 			      &adev->gmc.vm_fault);
@@ -743,15 +814,6 @@ static int gmc_v10_0_sw_init(void *handle)
 	 */
 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
 
-	/*
-	 * Reserve 8M stolen memory for navi10 like vega10
-	 * TODO: will check if it's really needed on asic.
-	 */
-	if (amdgpu_emu_mode == 1)
-		adev->gmc.stolen_size = 0;
-	else
-		adev->gmc.stolen_size = 9 * 1024 *1024;
-
 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
 	if (r) {
 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index f08e533..9da9596 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 #ifdef CONFIG_X86_64
-	if (adev->flags & AMD_IS_APU) {
+	if (adev->flags & AMD_IS_APU &&
+	    adev->gmc.real_vram_size > adev->gmc.aper_size) {
 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
 		adev->gmc.aper_size = adev->gmc.real_vram_size;
 	}
@@ -418,6 +419,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+/**
+ * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+					uint16_t pasid, uint32_t flush_type,
+					bool all_hub)
+{
+	int vmid;
+	unsigned int tmp;
+
+	if (adev->in_gpu_reset)
+		return -EIO;
+
+	for (vmid = 1; vmid < 16; vmid++) {
+
+		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+			RREG32(mmVM_INVALIDATE_RESPONSE);
+			break;
+		}
+	}
+
+	return 0;
+}
+
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -1333,6 +1366,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 
 static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
 	.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
+	.flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
 	.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
 	.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
 	.set_prt = gmc_v7_0_set_prt,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 6d96d40..27d83204 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -620,6 +620,39 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+/**
+ * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+					uint16_t pasid, uint32_t flush_type,
+					bool all_hub)
+{
+	int vmid;
+	unsigned int tmp;
+
+	if (adev->in_gpu_reset)
+		return -EIO;
+
+	for (vmid = 1; vmid < 16; vmid++) {
+
+		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+			RREG32(mmVM_INVALIDATE_RESPONSE);
+			break;
+		}
+	}
+
+	return 0;
+
+}
+
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -1700,6 +1733,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 
 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
 	.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
+	.flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
 	.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
 	.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
 	.set_prt = gmc_v8_0_set_prt,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index a5b68b5..90216ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -38,10 +38,12 @@
 #include "dce/dce_12_0_sh_mask.h"
 #include "vega10_enum.h"
 #include "mmhub/mmhub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
 #include "athub/athub_1_0_offset.h"
 #include "oss/osssys_4_0_offset.h"
 
 #include "soc15.h"
+#include "soc15d.h"
 #include "soc15_common.h"
 #include "umc/umc_6_0_sh_mask.h"
 
@@ -207,6 +209,11 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
 {
 	u32 bits, i, tmp, reg;
 
+	/* Devices newer then VEGA10/12 shall have these programming
+	     sequences performed by PSP BL */
+	if (adev->asic_type >= CHIP_VEGA20)
+		return 0;
+
 	bits = 0x7f;
 
 	switch (state) {
@@ -393,8 +400,10 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->gmc.vm_fault.num_types = 1;
 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 
-	adev->gmc.ecc_irq.num_types = 1;
-	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
+	if (!amdgpu_sriov_vf(adev)) {
+		adev->gmc.ecc_irq.num_types = 1;
+		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
+	}
 }
 
 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
@@ -434,6 +443,18 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
 		   adev->pdev->device == 0x15d8)));
 }
 
+static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+					uint8_t vmid, uint16_t *p_pasid)
+{
+	uint32_t value;
+
+	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+		     + vmid);
+	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -455,13 +476,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 {
 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
 	const unsigned eng = 17;
-	u32 j, tmp;
+	u32 j, inv_req, tmp;
 	struct amdgpu_vmhub *hub;
 
 	BUG_ON(vmhub >= adev->num_vmhubs);
 
 	hub = &adev->vmhub[vmhub];
-	tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+	inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 
 	/* This is necessary for a HW workaround under SRIOV as well
 	 * as GFXOFF under bare metal
@@ -472,7 +493,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 		uint32_t req = hub->vm_inv_eng0_req + eng;
 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
 
-		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
 				1 << vmid);
 		return;
 	}
@@ -500,7 +521,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 	}
 
-	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
 
 	/*
 	 * Issue a dummy read to wait for the ACK register to be cleared
@@ -532,6 +553,68 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 }
 
+/**
+ * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+					uint16_t pasid, uint32_t flush_type,
+					bool all_hub)
+{
+	int vmid, i;
+	signed long r;
+	uint32_t seq;
+	uint16_t queried_pasid;
+	bool ret;
+	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+	if (adev->in_gpu_reset)
+		return -EIO;
+
+	if (ring->sched.ready) {
+		spin_lock(&adev->gfx.kiq.ring_lock);
+		/* 2 dwords flush + 8 dwords fence */
+		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+		kiq->pmf->kiq_invalidate_tlbs(ring,
+					pasid, flush_type, all_hub);
+		amdgpu_fence_emit_polling(ring, &seq);
+		amdgpu_ring_commit(ring);
+		spin_unlock(&adev->gfx.kiq.ring_lock);
+		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+		if (r < 1) {
+			DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+			return -ETIME;
+		}
+
+		return 0;
+	}
+
+	for (vmid = 1; vmid < 16; vmid++) {
+
+		ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+				&queried_pasid);
+		if (ret && queried_pasid == pasid) {
+			if (all_hub) {
+				for (i = 0; i < adev->num_vmhubs; i++)
+					gmc_v9_0_flush_gpu_tlb(adev, vmid,
+							i, flush_type);
+			} else {
+				gmc_v9_0_flush_gpu_tlb(adev, vmid,
+						AMDGPU_GFXHUB_0, flush_type);
+			}
+			break;
+		}
+	}
+
+	return 0;
+
+}
+
 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 					    unsigned vmid, uint64_t pd_addr)
 {
@@ -693,6 +776,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
 
 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
+	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
 	.map_mtype = gmc_v9_0_map_mtype,
@@ -715,7 +799,15 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
-		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
+		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
+		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
+		adev->umc.funcs = &umc_v6_1_funcs;
+		break;
+	case CHIP_ARCTURUS:
+		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
+		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
+		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
+		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
 		adev->umc.funcs = &umc_v6_1_funcs;
 		break;
@@ -730,6 +822,9 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
 	case CHIP_VEGA20:
 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
 		break;
+	case CHIP_ARCTURUS:
+		adev->mmhub.funcs = &mmhub_v9_4_funcs;
+		break;
 	default:
 		break;
 	}
@@ -779,36 +874,6 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
 	}
 }
 
-static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
-{
-	struct amdgpu_ring *ring;
-	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
-		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
-		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
-	unsigned i;
-	unsigned vmhub, inv_eng;
-
-	for (i = 0; i < adev->num_rings; ++i) {
-		ring = adev->rings[i];
-		vmhub = ring->funcs->vmhub;
-
-		inv_eng = ffs(vm_inv_engs[vmhub]);
-		if (!inv_eng) {
-			dev_err(adev->dev, "no VM inv eng for ring %s\n",
-				ring->name);
-			return -EINVAL;
-		}
-
-		ring->vm_inv_eng = inv_eng - 1;
-		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
-
-		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
-			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
-	}
-
-	return 0;
-}
-
 static int gmc_v9_0_late_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -817,7 +882,7 @@ static int gmc_v9_0_late_init(void *handle)
 	if (!gmc_v9_0_keep_stolen_memory(adev))
 		amdgpu_bo_late_init(adev);
 
-	r = gmc_v9_0_allocate_vm_inv_eng(adev);
+	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
 	if (r)
 		return r;
 	/* Check if ecc is available */
@@ -825,11 +890,12 @@ static int gmc_v9_0_late_init(void *handle)
 		switch (adev->asic_type) {
 		case CHIP_VEGA10:
 		case CHIP_VEGA20:
+		case CHIP_ARCTURUS:
 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
 			if (!r) {
 				DRM_INFO("ECC is not present.\n");
-				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
-					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
+				if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+					adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
 			} else {
 				DRM_INFO("ECC is active.\n");
 			}
@@ -1034,7 +1100,7 @@ static int gmc_v9_0_sw_init(void *handle)
 		else
 			chansize = 128;
 
-		numchan = adev->df_funcs->get_hbm_channel_number(adev);
+		numchan = adev->df.funcs->get_hbm_channel_number(adev);
 		adev->gmc.vram_width = numchan * chansize;
 	}
 
@@ -1100,11 +1166,13 @@ static int gmc_v9_0_sw_init(void *handle)
 	if (r)
 		return r;
 
-	/* interrupt sent to DF. */
-	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
-			&adev->gmc.ecc_irq);
-	if (r)
-		return r;
+	if (!amdgpu_sriov_vf(adev)) {
+		/* interrupt sent to DF. */
+		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+				      &adev->gmc.ecc_irq);
+		if (r)
+			return r;
+	}
 
 	/* Set the internal MC address mask
 	 * This is the max address of the GPU's
@@ -1290,12 +1358,13 @@ static int gmc_v9_0_hw_init(void *handle)
 	else
 		value = true;
 
-	gfxhub_v1_0_set_fault_enable_default(adev, value);
-	if (adev->asic_type == CHIP_ARCTURUS)
-		mmhub_v9_4_set_fault_enable_default(adev, value);
-	else
-		mmhub_v1_0_set_fault_enable_default(adev, value);
-
+	if (!amdgpu_sriov_vf(adev)) {
+		gfxhub_v1_0_set_fault_enable_default(adev, value);
+		if (adev->asic_type == CHIP_ARCTURUS)
+			mmhub_v9_4_set_fault_enable_default(adev, value);
+		else
+			mmhub_v1_0_set_fault_enable_default(adev, value);
+	}
 	for (i = 0; i < adev->num_vmhubs; ++i)
 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index 971c084..e0585e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -24,24 +24,6 @@
 #ifndef __GMC_V9_0_H__
 #define __GMC_V9_0_H__
 
-	/*
-	 * The latest engine allocation on gfx9 is:
-	 * Engine 2, 3: firmware
-	 * Engine 0, 1, 4~16: amdgpu ring,
-	 *                    subject to change when ring number changes
-	 * Engine 17: Gart flushes
-	 */
-#define GFXHUB_FREE_VM_INV_ENGS_BITMAP		0x1FFF3
-#define MMHUB_FREE_VM_INV_ENGS_BITMAP		0x1FFF3
-
 extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
 extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
-
-/* amdgpu_amdkfd*.c */
-void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-				uint64_t value);
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-				uint64_t value);
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid,
-				uint32_t vmid, uint64_t value);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
new file mode 100644
index 0000000..0debfd9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "vcn_v1_0.h"
+
+#include "vcn/vcn_1_0_offset.h"
+#include "vcn/vcn_1_0_sh_mask.h"
+
+static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+{
+	struct amdgpu_device *adev = ring->adev;
+	ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+	if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+		((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+		ring->ring[(*ptr)++] = 0;
+		ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
+	} else {
+		ring->ring[(*ptr)++] = reg_offset;
+		ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
+	}
+	ring->ring[(*ptr)++] = val;
+}
+
+static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	uint32_t reg, reg_offset, val, mask, i;
+
+	// 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+	reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
+	reg_offset = (reg << 2);
+	val = lower_32_bits(ring->gpu_addr);
+	jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+	// 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+	reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
+	reg_offset = (reg << 2);
+	val = upper_32_bits(ring->gpu_addr);
+	jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+	// 3rd to 5th: issue MEM_READ commands
+	for (i = 0; i <= 2; i++) {
+		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
+		ring->ring[ptr++] = 0;
+	}
+
+	// 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
+	reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+	reg_offset = (reg << 2);
+	val = 0x13;
+	jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+	// 7th: program mmUVD_JRBC_RB_REF_DATA
+	reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA);
+	reg_offset = (reg << 2);
+	val = 0x1;
+	jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+	// 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
+	reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+	reg_offset = (reg << 2);
+	val = 0x1;
+	mask = 0x1;
+
+	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
+	ring->ring[ptr++] = 0x01400200;
+	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
+	ring->ring[ptr++] = val;
+	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+	if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+		((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+		ring->ring[ptr++] = 0;
+		ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
+	} else {
+		ring->ring[ptr++] = reg_offset;
+		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
+	}
+	ring->ring[ptr++] = mask;
+
+	//9th to 21st: insert no-op
+	for (i = 0; i <= 12; i++) {
+		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+		ring->ring[ptr++] = 0;
+	}
+
+	//22nd: reset mmUVD_JRBC_RB_RPTR
+	reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+	reg_offset = (reg << 2);
+	val = 0;
+	jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+	//23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
+	reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+	reg_offset = (reg << 2);
+	val = 0x12;
+	jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v1_0_decode_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v1_0_decode_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v1_0_decode_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+}
+
+/**
+ * jpeg_v1_0_decode_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_insert_start(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x68e04);
+
+	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_insert_end(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x68e04);
+
+	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+				     unsigned flags)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, seq);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, seq);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x8);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+	amdgpu_ring_write(ring, 0);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x01400200);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, seq);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
+	amdgpu_ring_write(ring, 0xffffffff);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x3fbc);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x1);
+
+	/* emit trap */
+	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
+	amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring,
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					uint32_t flags)
+{
+	struct amdgpu_device *adev = ring->adev;
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, ib->length_dw);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+	amdgpu_ring_write(ring,
+		PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+	amdgpu_ring_write(ring, 0);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x01400200);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x2);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+	amdgpu_ring_write(ring, 0x2);
+}
+
+static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring,
+					    uint32_t reg, uint32_t val,
+					    uint32_t mask)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t reg_offset = (reg << 2);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x01400200);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, val);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+	if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+		((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring,
+			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+	} else {
+		amdgpu_ring_write(ring, reg_offset);
+		amdgpu_ring_write(ring,
+			PACKETJ(0, 0, 0, PACKETJ_TYPE3));
+	}
+	amdgpu_ring_write(ring, mask);
+}
+
+static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring,
+		unsigned vmid, uint64_t pd_addr)
+{
+	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+	uint32_t data0, data1, mask;
+
+	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+	/* wait for register write */
+	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+	data1 = lower_32_bits(pd_addr);
+	mask = 0xffffffff;
+	jpeg_v1_0_decode_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void jpeg_v1_0_decode_ring_emit_wreg(struct amdgpu_ring *ring,
+					uint32_t reg, uint32_t val)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t reg_offset = (reg << 2);
+
+	amdgpu_ring_write(ring,
+		PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+	if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+			((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring,
+			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+	} else {
+		amdgpu_ring_write(ring, reg_offset);
+		amdgpu_ring_write(ring,
+			PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+	}
+	amdgpu_ring_write(ring, val);
+}
+
+static void jpeg_v1_0_decode_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	int i;
+
+	WARN_ON(ring->wptr % 2 || count % 2);
+
+	for (i = 0; i < count / 2; i++) {
+		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
+static int jpeg_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned type,
+					enum amdgpu_interrupt_state state)
+{
+	return 0;
+}
+
+static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	DRM_DEBUG("IH: JPEG decode TRAP\n");
+
+	switch (entry->src_id) {
+	case 126:
+		amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+		break;
+	default:
+		DRM_ERROR("Unhandled interrupt: %d %d\n",
+			  entry->src_id, entry->src_data[0]);
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * jpeg_v1_0_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+int jpeg_v1_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->jpeg.num_jpeg_inst = 1;
+
+	jpeg_v1_0_set_dec_ring_funcs(adev);
+	jpeg_v1_0_set_irq_funcs(adev);
+
+	return 0;
+}
+
+/**
+ * jpeg_v1_0_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+int jpeg_v1_0_sw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_ring *ring;
+	int r;
+
+	/* JPEG TRAP */
+	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->jpeg.inst->irq);
+	if (r)
+		return r;
+
+	ring = &adev->jpeg.inst->ring_dec;
+	sprintf(ring->name, "jpeg_dec");
+	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+	if (r)
+		return r;
+
+	adev->jpeg.internal.jpeg_pitch = adev->jpeg.inst->external.jpeg_pitch =
+		SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
+
+	return 0;
+}
+
+/**
+ * jpeg_v1_0_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG free up sw allocation
+ */
+void jpeg_v1_0_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_ring_fini(&adev->jpeg.inst[0].ring_dec);
+}
+
+/**
+ * jpeg_v1_0_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+void jpeg_v1_0_start(struct amdgpu_device *adev, int mode)
+{
+	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+
+	if (mode == 0) {
+		WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
+				UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+		WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+		WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
+		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
+		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+	}
+
+	/* initialize wptr */
+	ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+
+	/* copy patch commands to the jpeg ring */
+	jpeg_v1_0_decode_ring_set_patch_ring(ring,
+		(ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+}
+
+static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
+	.type = AMDGPU_RING_TYPE_VCN_JPEG,
+	.align_mask = 0xf,
+	.nop = PACKET0(0x81ff, 0),
+	.support_64bit_ptrs = false,
+	.no_user_fence = true,
+	.vmhub = AMDGPU_MMHUB_0,
+	.extra_dw = 64,
+	.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
+	.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
+	.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
+	.emit_frame_size =
+		6 + 6 + /* hdp invalidate / flush */
+		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+		8 + /* jpeg_v1_0_decode_ring_emit_vm_flush */
+		26 + 26 + /* jpeg_v1_0_decode_ring_emit_fence x2 vm fence */
+		6,
+	.emit_ib_size = 22, /* jpeg_v1_0_decode_ring_emit_ib */
+	.emit_ib = jpeg_v1_0_decode_ring_emit_ib,
+	.emit_fence = jpeg_v1_0_decode_ring_emit_fence,
+	.emit_vm_flush = jpeg_v1_0_decode_ring_emit_vm_flush,
+	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
+	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
+	.insert_nop = jpeg_v1_0_decode_ring_nop,
+	.insert_start = jpeg_v1_0_decode_ring_insert_start,
+	.insert_end = jpeg_v1_0_decode_ring_insert_end,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.begin_use = vcn_v1_0_ring_begin_use,
+	.end_use = amdgpu_vcn_ring_end_use,
+	.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
+	.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+	adev->jpeg.inst->ring_dec.funcs = &jpeg_v1_0_decode_ring_vm_funcs;
+	DRM_INFO("JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v1_0_irq_funcs = {
+	.set = jpeg_v1_0_set_interrupt_state,
+	.process = jpeg_v1_0_process_interrupt,
+};
+
+static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
new file mode 100644
index 0000000..bbf33a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V1_0_H__
+#define __JPEG_V1_0_H__
+
+int jpeg_v1_0_early_init(void *handle);
+int jpeg_v1_0_sw_init(void *handle);
+void jpeg_v1_0_sw_fini(void *handle);
+void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
+
+#endif /*__JPEG_V1_0_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
new file mode 100644
index 0000000..ff2e6e1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+
+#include "vcn/vcn_2_0_0_offset.h"
+#include "vcn/vcn_2_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+
+#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 			0x1bfff
+#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET				0x4029
+#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET				0x402a
+#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET				0x402b
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET		0x40ea
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 	0x40eb
+#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET				0x40cf
+#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET				0x40d1
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 		0x40e8
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x40e9
+#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET				0x4082
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET		0x40ec
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 	0x40ed
+#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET			0x4085
+#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET				0x4084
+#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET				0x4089
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET				0x401f
+
+#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR				0x18000
+
+static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v2_0_set_powergating_state(void *handle,
+				enum amd_powergating_state state);
+
+/**
+ * jpeg_v2_0_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v2_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->jpeg.num_jpeg_inst = 1;
+
+	jpeg_v2_0_set_dec_ring_funcs(adev);
+	jpeg_v2_0_set_irq_funcs(adev);
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_0_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v2_0_sw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_ring *ring;
+	int r;
+
+	/* JPEG TRAP */
+	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+		VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
+	if (r)
+		return r;
+
+	r = amdgpu_jpeg_sw_init(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_jpeg_resume(adev);
+	if (r)
+		return r;
+
+	ring = &adev->jpeg.inst->ring_dec;
+	ring->use_doorbell = true;
+	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
+	sprintf(ring->name, "jpeg_dec");
+	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+	if (r)
+		return r;
+
+	adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+	adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_0_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v2_0_sw_fini(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_jpeg_suspend(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_jpeg_sw_fini(adev);
+
+	return r;
+}
+
+/**
+ * jpeg_v2_0_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v2_0_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+	int r;
+
+	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+		(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+	r = amdgpu_ring_test_helper(ring);
+	if (!r)
+		DRM_INFO("JPEG decode initialized successfully.\n");
+
+	return r;
+}
+
+/**
+ * jpeg_v2_0_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v2_0_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+
+	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
+		jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+	ring->sched.ready = false;
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_0_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v2_0_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = jpeg_v2_0_hw_fini(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_jpeg_suspend(adev);
+
+	return r;
+}
+
+/**
+ * jpeg_v2_0_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v2_0_resume(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_jpeg_resume(adev);
+	if (r)
+		return r;
+
+	r = jpeg_v2_0_hw_init(adev);
+
+	return r;
+}
+
+static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev)
+{
+	uint32_t data;
+	int r = 0;
+
+	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+		data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
+		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
+
+		SOC15_WAIT_ON_RREG(JPEG, 0,
+			mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
+			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
+
+		if (r) {
+			DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
+			return r;
+		}
+	}
+
+	/* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
+	data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
+	WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data);
+
+	return 0;
+}
+
+static int jpeg_v2_0_enable_power_gating(struct amdgpu_device* adev)
+{
+	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+		uint32_t data;
+		int r = 0;
+
+		data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS));
+		data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
+		data |=  0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
+		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data);
+
+		data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
+		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
+
+		SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
+			(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
+			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
+
+		if (r) {
+			DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static void jpeg_v2_0_disable_clock_gating(struct amdgpu_device* adev)
+{
+	uint32_t data;
+
+	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
+	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+	else
+		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
+
+	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
+	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
+		| JPEG_CGC_GATE__JPEG2_DEC_MASK
+		| JPEG_CGC_GATE__JPEG_ENC_MASK
+		| JPEG_CGC_GATE__JMCIF_MASK
+		| JPEG_CGC_GATE__JRBBM_MASK);
+	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
+}
+
+static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device* adev)
+{
+	uint32_t data;
+
+	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
+	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+	else
+		data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
+
+	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
+	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
+		|JPEG_CGC_GATE__JPEG2_DEC_MASK
+		|JPEG_CGC_GATE__JPEG_ENC_MASK
+		|JPEG_CGC_GATE__JMCIF_MASK
+		|JPEG_CGC_GATE__JRBBM_MASK);
+	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
+}
+
+/**
+ * jpeg_v2_0_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v2_0_start(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+	int r;
+
+	if (adev->pm.dpm_enabled)
+		amdgpu_dpm_enable_jpeg(adev, true);
+
+	/* disable power gating */
+	r = jpeg_v2_0_disable_power_gating(adev);
+	if (r)
+		return r;
+
+	/* JPEG disable CGC */
+	jpeg_v2_0_disable_clock_gating(adev);
+
+	WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
+	/* enable JMI channel */
+	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
+		~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+	/* enable System Interrupt for JRBC */
+	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
+		JPEG_SYS_INT_EN__DJRBC_MASK,
+		~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+		lower_32_bits(ring->gpu_addr));
+	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+		upper_32_bits(ring->gpu_addr));
+	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
+	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
+	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+	ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_0_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v2_0_stop(struct amdgpu_device *adev)
+{
+	int r;
+
+	/* reset JMI */
+	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
+		UVD_JMI_CNTL__SOFT_RESET_MASK,
+		~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+	/* enable JPEG CGC */
+	jpeg_v2_0_enable_clock_gating(adev);
+
+	/* enable power gating */
+	r = jpeg_v2_0_enable_power_gating(adev);
+	if (r)
+		return r;
+
+	if (adev->pm.dpm_enabled)
+		amdgpu_dpm_enable_jpeg(adev, false);
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_0_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->use_doorbell)
+		return adev->wb.wb[ring->wptr_offs];
+	else
+		return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->use_doorbell) {
+		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+	} else {
+		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+	}
+}
+
+/**
+ * jpeg_v2_0_dec_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x68e04);
+
+	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x68e04);
+
+	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+				unsigned flags)
+{
+	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+	amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, seq);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, seq);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x8);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
+		0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+	amdgpu_ring_write(ring, 0);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x3fbc);
+
+	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x1);
+
+	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
+	amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
+				struct amdgpu_job *job,
+				struct amdgpu_ib *ib,
+				uint32_t flags)
+{
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+	amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+	amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, ib->length_dw);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+	amdgpu_ring_write(ring,	PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+	amdgpu_ring_write(ring, 0);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x01400200);
+
+	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x2);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
+		0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+	amdgpu_ring_write(ring, 0x2);
+}
+
+void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+				uint32_t val, uint32_t mask)
+{
+	uint32_t reg_offset = (reg << 2);
+
+	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, 0x01400200);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	amdgpu_ring_write(ring, val);
+
+	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring,
+			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+	} else {
+		amdgpu_ring_write(ring, reg_offset);
+		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+			0, 0, PACKETJ_TYPE3));
+	}
+	amdgpu_ring_write(ring, mask);
+}
+
+void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+				unsigned vmid, uint64_t pd_addr)
+{
+	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+	uint32_t data0, data1, mask;
+
+	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+	/* wait for register write */
+	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+	data1 = lower_32_bits(pd_addr);
+	mask = 0xffffffff;
+	jpeg_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+{
+	uint32_t reg_offset = (reg << 2);
+
+	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+		0, 0, PACKETJ_TYPE0));
+	if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring,
+			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+	} else {
+		amdgpu_ring_write(ring, reg_offset);
+		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+			0, 0, PACKETJ_TYPE0));
+	}
+	amdgpu_ring_write(ring, val);
+}
+
+void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	int i;
+
+	WARN_ON(ring->wptr % 2 || count % 2);
+
+	for (i = 0; i < count / 2; i++) {
+		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
+static bool jpeg_v2_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
+		UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+		UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+}
+
+static int jpeg_v2_0_wait_for_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret = 0;
+
+	SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+		UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret);
+
+	return ret;
+}
+
+static int jpeg_v2_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE);
+
+	if (enable) {
+		if (jpeg_v2_0_is_idle(handle))
+			return -EBUSY;
+		jpeg_v2_0_enable_clock_gating(adev);
+	} else {
+		jpeg_v2_0_disable_clock_gating(adev);
+	}
+
+	return 0;
+}
+
+static int jpeg_v2_0_set_powergating_state(void *handle,
+					enum amd_powergating_state state)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret;
+
+	if (state == adev->jpeg.cur_state)
+		return 0;
+
+	if (state == AMD_PG_STATE_GATE)
+		ret = jpeg_v2_0_stop(adev);
+	else
+		ret = jpeg_v2_0_start(adev);
+
+	if (!ret)
+		adev->jpeg.cur_state = state;
+
+	return ret;
+}
+
+static int jpeg_v2_0_set_interrupt_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned type,
+					enum amdgpu_interrupt_state state)
+{
+	return 0;
+}
+
+static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	DRM_DEBUG("IH: JPEG TRAP\n");
+
+	switch (entry->src_id) {
+	case VCN_2_0__SRCID__JPEG_DECODE:
+		amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+		break;
+	default:
+		DRM_ERROR("Unhandled interrupt: %d %d\n",
+			  entry->src_id, entry->src_data[0]);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
+	.name = "jpeg_v2_0",
+	.early_init = jpeg_v2_0_early_init,
+	.late_init = NULL,
+	.sw_init = jpeg_v2_0_sw_init,
+	.sw_fini = jpeg_v2_0_sw_fini,
+	.hw_init = jpeg_v2_0_hw_init,
+	.hw_fini = jpeg_v2_0_hw_fini,
+	.suspend = jpeg_v2_0_suspend,
+	.resume = jpeg_v2_0_resume,
+	.is_idle = jpeg_v2_0_is_idle,
+	.wait_for_idle = jpeg_v2_0_wait_for_idle,
+	.check_soft_reset = NULL,
+	.pre_soft_reset = NULL,
+	.soft_reset = NULL,
+	.post_soft_reset = NULL,
+	.set_clockgating_state = jpeg_v2_0_set_clockgating_state,
+	.set_powergating_state = jpeg_v2_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
+	.type = AMDGPU_RING_TYPE_VCN_JPEG,
+	.align_mask = 0xf,
+	.vmhub = AMDGPU_MMHUB_0,
+	.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
+	.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
+	.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
+	.emit_frame_size =
+		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+		8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */
+		18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */
+		8 + 16,
+	.emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */
+	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
+	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
+	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
+	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
+	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
+	.insert_nop = jpeg_v2_0_dec_ring_nop,
+	.insert_start = jpeg_v2_0_dec_ring_insert_start,
+	.insert_end = jpeg_v2_0_dec_ring_insert_end,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.begin_use = amdgpu_jpeg_ring_begin_use,
+	.end_use = amdgpu_jpeg_ring_end_use,
+	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
+	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+	adev->jpeg.inst->ring_dec.funcs = &jpeg_v2_0_dec_ring_vm_funcs;
+	DRM_INFO("JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v2_0_irq_funcs = {
+	.set = jpeg_v2_0_set_interrupt_state,
+	.process = jpeg_v2_0_process_interrupt,
+};
+
+static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->jpeg.inst->irq.num_types = 1;
+	adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v2_0_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_JPEG,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &jpeg_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
new file mode 100644
index 0000000..15a344e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V2_0_H__
+#define __JPEG_V2_0_H__
+
+void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
+void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
+void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+				unsigned flags);
+void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+				struct amdgpu_ib *ib, uint32_t flags);
+void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+				uint32_t val, uint32_t mask);
+void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+				unsigned vmid, uint64_t pd_addr);
+void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
+
+extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
+
+#endif /* __JPEG_V2_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
new file mode 100644
index 0000000..c6d046d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -0,0 +1,641 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v2_0.h"
+
+#include "vcn/vcn_2_5_offset.h"
+#include "vcn/vcn_2_5_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET			0x401f
+
+#define JPEG25_MAX_HW_INSTANCES_ARCTURUS			2
+
+static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v2_5_set_powergating_state(void *handle,
+				enum amd_powergating_state state);
+
+static int amdgpu_ih_clientid_jpeg[] = {
+	SOC15_IH_CLIENTID_VCN,
+	SOC15_IH_CLIENTID_VCN1
+};
+
+/**
+ * jpeg_v2_5_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v2_5_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	if (adev->asic_type == CHIP_ARCTURUS) {
+		u32 harvest;
+		int i;
+
+		adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
+		for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+			harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
+			if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+				adev->jpeg.harvest_config |= 1 << i;
+		}
+
+		if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
+						 AMDGPU_JPEG_HARVEST_JPEG1))
+			return -ENOENT;
+	} else
+		adev->jpeg.num_jpeg_inst = 1;
+
+	jpeg_v2_5_set_dec_ring_funcs(adev);
+	jpeg_v2_5_set_irq_funcs(adev);
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_5_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v2_5_sw_init(void *handle)
+{
+	struct amdgpu_ring *ring;
+	int i, r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		/* JPEG TRAP */
+		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
+				VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
+		if (r)
+			return r;
+	}
+
+	r = amdgpu_jpeg_sw_init(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_jpeg_resume(adev);
+	if (r)
+		return r;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		ring = &adev->jpeg.inst[i].ring_dec;
+		ring->use_doorbell = true;
+		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
+		sprintf(ring->name, "jpeg_dec_%d", i);
+		r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
+		if (r)
+			return r;
+
+		adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+		adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
+	}
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_5_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v2_5_sw_fini(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_jpeg_suspend(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_jpeg_sw_fini(adev);
+
+	return r;
+}
+
+/**
+ * jpeg_v2_5_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v2_5_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_ring *ring;
+	int i, r;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		ring = &adev->jpeg.inst[i].ring_dec;
+		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+			(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
+
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
+			return r;
+	}
+
+	DRM_INFO("JPEG decode initialized successfully.\n");
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_5_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v2_5_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_ring *ring;
+	int i;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		ring = &adev->jpeg.inst[i].ring_dec;
+		if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+		      RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
+			jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+		ring->sched.ready = false;
+	}
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_5_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v2_5_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = jpeg_v2_5_hw_fini(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_jpeg_suspend(adev);
+
+	return r;
+}
+
+/**
+ * jpeg_v2_5_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v2_5_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = amdgpu_jpeg_resume(adev);
+	if (r)
+		return r;
+
+	r = jpeg_v2_5_hw_init(adev);
+
+	return r;
+}
+
+static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
+{
+	uint32_t data;
+
+	data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
+	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+	else
+		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+	WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
+
+	data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
+	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
+		| JPEG_CGC_GATE__JPEG2_DEC_MASK
+		| JPEG_CGC_GATE__JPEG_ENC_MASK
+		| JPEG_CGC_GATE__JMCIF_MASK
+		| JPEG_CGC_GATE__JRBBM_MASK);
+	WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
+
+	data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
+	data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
+		| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
+		| JPEG_CGC_CTRL__JMCIF_MODE_MASK
+		| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
+	WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
+}
+
+static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device* adev, int inst)
+{
+	uint32_t data;
+
+	data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
+	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
+		|JPEG_CGC_GATE__JPEG2_DEC_MASK
+		|JPEG_CGC_GATE__JPEG_ENC_MASK
+		|JPEG_CGC_GATE__JMCIF_MASK
+		|JPEG_CGC_GATE__JRBBM_MASK);
+	WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
+}
+
+/**
+ * jpeg_v2_5_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v2_5_start(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	int i;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		ring = &adev->jpeg.inst[i].ring_dec;
+		/* disable anti hang mechanism */
+		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
+			~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+		/* JPEG disable CGC */
+		jpeg_v2_5_disable_clock_gating(adev, i);
+
+		/* MJPEG global tiling registers */
+		WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
+			adev->gfx.config.gb_addr_config);
+		WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
+			adev->gfx.config.gb_addr_config);
+
+		/* enable JMI channel */
+		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
+			~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+		/* enable System Interrupt for JRBC */
+		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
+			JPEG_SYS_INT_EN__DJRBC_MASK,
+			~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+		WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
+		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+		WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+			lower_32_bits(ring->gpu_addr));
+		WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+			upper_32_bits(ring->gpu_addr));
+		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
+		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
+		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+		WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+		ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
+	}
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_5_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v2_5_stop(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		/* reset JMI */
+		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
+			UVD_JMI_CNTL__SOFT_RESET_MASK,
+			~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+		jpeg_v2_5_enable_clock_gating(adev, i);
+
+		/* enable anti hang mechanism */
+		WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
+			UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+			~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+	}
+
+	return 0;
+}
+
+/**
+ * jpeg_v2_5_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v2_5_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->use_doorbell)
+		return adev->wb.wb[ring->wptr_offs];
+	else
+		return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v2_5_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->use_doorbell) {
+		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+	} else {
+		WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+	}
+}
+
+static bool jpeg_v2_5_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int i, ret = 1;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) &
+			UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+			UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
+	}
+
+	return ret;
+}
+
+static int jpeg_v2_5_wait_for_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int i, ret = 0;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS,
+			UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+			UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int jpeg_v2_5_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE);
+	int i;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		if (enable) {
+			if (jpeg_v2_5_is_idle(handle))
+				return -EBUSY;
+			jpeg_v2_5_enable_clock_gating(adev, i);
+		} else {
+			jpeg_v2_5_disable_clock_gating(adev, i);
+		}
+	}
+
+	return 0;
+}
+
+static int jpeg_v2_5_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret;
+
+	if(state == adev->jpeg.cur_state)
+		return 0;
+
+	if (state == AMD_PG_STATE_GATE)
+		ret = jpeg_v2_5_stop(adev);
+	else
+		ret = jpeg_v2_5_start(adev);
+
+	if(!ret)
+		adev->jpeg.cur_state = state;
+
+	return ret;
+}
+
+static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned type,
+					enum amdgpu_interrupt_state state)
+{
+	return 0;
+}
+
+static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	uint32_t ip_instance;
+
+	switch (entry->client_id) {
+	case SOC15_IH_CLIENTID_VCN:
+		ip_instance = 0;
+		break;
+	case SOC15_IH_CLIENTID_VCN1:
+		ip_instance = 1;
+		break;
+	default:
+		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+		return 0;
+	}
+
+	DRM_DEBUG("IH: JPEG TRAP\n");
+
+	switch (entry->src_id) {
+	case VCN_2_0__SRCID__JPEG_DECODE:
+		amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
+		break;
+	default:
+		DRM_ERROR("Unhandled interrupt: %d %d\n",
+			  entry->src_id, entry->src_data[0]);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
+	.name = "jpeg_v2_5",
+	.early_init = jpeg_v2_5_early_init,
+	.late_init = NULL,
+	.sw_init = jpeg_v2_5_sw_init,
+	.sw_fini = jpeg_v2_5_sw_fini,
+	.hw_init = jpeg_v2_5_hw_init,
+	.hw_fini = jpeg_v2_5_hw_fini,
+	.suspend = jpeg_v2_5_suspend,
+	.resume = jpeg_v2_5_resume,
+	.is_idle = jpeg_v2_5_is_idle,
+	.wait_for_idle = jpeg_v2_5_wait_for_idle,
+	.check_soft_reset = NULL,
+	.pre_soft_reset = NULL,
+	.soft_reset = NULL,
+	.post_soft_reset = NULL,
+	.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
+	.set_powergating_state = jpeg_v2_5_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
+	.type = AMDGPU_RING_TYPE_VCN_JPEG,
+	.align_mask = 0xf,
+	.vmhub = AMDGPU_MMHUB_1,
+	.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
+	.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
+	.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+	.emit_frame_size =
+		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+		8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
+		18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
+		8 + 16,
+	.emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
+	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
+	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
+	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
+	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
+	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
+	.insert_nop = jpeg_v2_0_dec_ring_nop,
+	.insert_start = jpeg_v2_0_dec_ring_insert_start,
+	.insert_end = jpeg_v2_0_dec_ring_insert_end,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.begin_use = amdgpu_jpeg_ring_begin_use,
+	.end_use = amdgpu_jpeg_ring_end_use,
+	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
+	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
+		adev->jpeg.inst[i].ring_dec.me = i;
+		DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
+	}
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
+	.set = jpeg_v2_5_set_interrupt_state,
+	.process = jpeg_v2_5_process_interrupt,
+};
+
+static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+
+		adev->jpeg.inst[i].irq.num_types = 1;
+		adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
+	}
+}
+
+const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_JPEG,
+		.major = 2,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &jpeg_v2_5_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
new file mode 100644
index 0000000..2b4087c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V2_5_H__
+#define __JPEG_V2_5_H__
+
+extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block;
+
+#endif /* __JPEG_V2_5_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 28105e4..49a3a56 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -27,17 +27,13 @@
 #include "mmhub/mmhub_1_0_offset.h"
 #include "mmhub/mmhub_1_0_sh_mask.h"
 #include "mmhub/mmhub_1_0_default.h"
-#include "mmhub/mmhub_9_4_0_offset.h"
 #include "vega10_enum.h"
-
+#include "soc15.h"
 #include "soc15_common.h"
 
 #define mmDAGB0_CNTL_MISC2_RV 0x008f
 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
 
-#define EA_EDC_CNT_MASK 0x3
-#define EA_EDC_CNT_SHIFT 0x2
-
 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
 {
 	u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
@@ -527,9 +523,9 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
 	case CHIP_RAVEN:
 	case CHIP_RENOIR:
 		mmhub_v1_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		mmhub_v1_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
@@ -564,59 +560,191 @@ void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 		*flags |= AMD_CG_SUPPORT_MC_LS;
 }
 
+static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
+	{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	}
+};
+
+static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
+   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
+   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
+   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
+   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
+};
+
+static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
+	uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+{
+	uint32_t i;
+	uint32_t sec_cnt, ded_cnt;
+
+	for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
+		if(mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
+			continue;
+
+		sec_cnt = (value &
+				mmhub_v1_0_ras_fields[i].sec_count_mask) >>
+				mmhub_v1_0_ras_fields[i].sec_count_shift;
+		if (sec_cnt) {
+			DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+				mmhub_v1_0_ras_fields[i].name,
+				sec_cnt);
+			*sec_count += sec_cnt;
+		}
+
+		ded_cnt = (value &
+				mmhub_v1_0_ras_fields[i].ded_count_mask) >>
+				mmhub_v1_0_ras_fields[i].ded_count_shift;
+		if (ded_cnt) {
+			DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+				mmhub_v1_0_ras_fields[i].name,
+				ded_cnt);
+			*ded_count += ded_cnt;
+		}
+	}
+
+	return 0;
+}
+
 static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
 					   void *ras_error_status)
 {
-	int i;
-	uint32_t ea0_edc_cnt, ea0_edc_cnt2;
-	uint32_t ea1_edc_cnt, ea1_edc_cnt2;
 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+	uint32_t sec_count = 0, ded_count = 0;
+	uint32_t i;
+	uint32_t reg_value;
 
-	/* EDC CNT will be cleared automatically after read */
-	ea0_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT_VG20);
-	ea0_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20);
-	ea1_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT_VG20);
-	ea1_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20);
+	err_data->ue_count = 0;
+	err_data->ce_count = 0;
 
-	/* error count of each error type is recorded by 2 bits,
-	 * ce and ue count in EDC_CNT
-	 */
-	for (i = 0; i < 5; i++) {
-		err_data->ce_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
-		err_data->ce_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
-		ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
-		ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
-		err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
-		err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
-		ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
-		ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
-	}
-	/* successive ue count in EDC_CNT */
-	for (i = 0; i < 5; i++) {
-		err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
-		err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
-		ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
-		ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
+	for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
+		reg_value =
+			RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
+		if (reg_value)
+			mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i],
+				reg_value, &sec_count, &ded_count);
 	}
 
-	/* ce and ue count in EDC_CNT2 */
-	for (i = 0; i < 3; i++) {
-		err_data->ce_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
-		err_data->ce_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
-		ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
-		ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
-		err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
-		err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
-		ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
-		ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
-	}
-	/* successive ue count in EDC_CNT2 */
-	for (i = 0; i < 6; i++) {
-		err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
-		err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
-		ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
-		ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
-	}
+	err_data->ce_count += sec_count;
+	err_data->ue_count += ded_count;
 }
 
 const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index a7cb185..bde1896 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -427,9 +427,9 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		mmhub_v2_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		mmhub_v2_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 66efe2f..a5281df 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -21,6 +21,7 @@
  *
  */
 #include "amdgpu.h"
+#include "amdgpu_ras.h"
 #include "mmhub_v9_4.h"
 
 #include "mmhub/mmhub_9_4_1_offset.h"
@@ -29,7 +30,7 @@
 #include "athub/athub_1_0_offset.h"
 #include "athub/athub_1_0_sh_mask.h"
 #include "vega10_enum.h"
-
+#include "soc15.h"
 #include "soc15_common.h"
 
 #define MMHUB_NUM_INSTANCES			2
@@ -53,7 +54,7 @@ u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
 	return base;
 }
 
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid,
+static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
 				uint32_t vmid, uint64_t value)
 {
 	/* two registers distance between mmVML2VC0_VM_CONTEXT0_* to
@@ -79,7 +80,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
 {
 	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 
-	mmhub_v9_4_setup_vm_pt_regs(adev, hubid, 0, pt_base);
+	mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
 
 	WREG32_SOC15_OFFSET(MMHUB, 0,
 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
@@ -100,6 +101,16 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
 			    (u32)(adev->gmc.gart_end >> 44));
 }
 
+void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base)
+{
+	int i;
+
+	for (i = 0; i < MMHUB_NUM_INSTANCES; i++)
+		mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid,
+				page_table_base);
+}
+
 static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
 					         int hubid)
 {
@@ -117,45 +128,53 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
 			    adev->gmc.agp_start >> 24);
 
-	/* Program the system aperture low logical page number. */
-	WREG32_SOC15_OFFSET(MMHUB, 0,
-			    mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
-			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
-			    min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
-	WREG32_SOC15_OFFSET(MMHUB, 0,
-			    mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
-			    max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+	if (!amdgpu_sriov_vf(adev)) {
+		/* Program the system aperture low logical page number. */
+		WREG32_SOC15_OFFSET(
+			MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+		WREG32_SOC15_OFFSET(
+			MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+			max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 
-	/* Set default page address. */
-	value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
-		adev->vm_manager.vram_base_offset;
-	WREG32_SOC15_OFFSET(MMHUB, 0,
+		/* Set default page address. */
+		value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+			adev->vm_manager.vram_base_offset;
+		WREG32_SOC15_OFFSET(
+			MMHUB, 0,
 			mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
 			(u32)(value >> 12));
-	WREG32_SOC15_OFFSET(MMHUB, 0,
+		WREG32_SOC15_OFFSET(
+			MMHUB, 0,
 			mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
 			(u32)(value >> 44));
 
-	/* Program "protection fault". */
-	WREG32_SOC15_OFFSET(MMHUB, 0,
-			    mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
-			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
-			    (u32)(adev->dummy_page_addr >> 12));
-	WREG32_SOC15_OFFSET(MMHUB, 0,
-			    mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
-			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
-			    (u32)((u64)adev->dummy_page_addr >> 44));
+		/* Program "protection fault". */
+		WREG32_SOC15_OFFSET(
+			MMHUB, 0,
+			mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+			(u32)(adev->dummy_page_addr >> 12));
+		WREG32_SOC15_OFFSET(
+			MMHUB, 0,
+			mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+			(u32)((u64)adev->dummy_page_addr >> 44));
 
-	tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
-				  mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
-				  hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
-	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
-			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
-	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
-			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+		tmp = RREG32_SOC15_OFFSET(
+			MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+			hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
+		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+		WREG32_SOC15_OFFSET(MMHUB, 0,
+				    mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+				    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+				    tmp);
+	}
 }
 
 static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
@@ -313,7 +332,8 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
 				    adev->vm_manager.block_size - 9);
 		/* Send no-retry XNACK on fault to suppress VM fault storm. */
 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
-				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+				    !amdgpu_noretry);
 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
 				    hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i,
 				    tmp);
@@ -356,30 +376,16 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
 	int i;
 
 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
-		if (amdgpu_sriov_vf(adev)) {
-			/*
-			 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase
-			 * they are VF copy registers so vbios post doesn't
-			 * program them, for SRIOV driver need to program them
-			 */
-			WREG32_SOC15_OFFSET(MMHUB, 0,
-				     mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE,
-				     i * MMHUB_INSTANCE_REGISTER_OFFSET,
-				     adev->gmc.vram_start >> 24);
-			WREG32_SOC15_OFFSET(MMHUB, 0,
-				     mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP,
-				     i * MMHUB_INSTANCE_REGISTER_OFFSET,
-				     adev->gmc.vram_end >> 24);
-		}
-
 		/* GART Enable. */
 		mmhub_v9_4_init_gart_aperture_regs(adev, i);
 		mmhub_v9_4_init_system_aperture_regs(adev, i);
 		mmhub_v9_4_init_tlb_regs(adev, i);
-		mmhub_v9_4_init_cache_regs(adev, i);
+		if (!amdgpu_sriov_vf(adev))
+			mmhub_v9_4_init_cache_regs(adev, i);
 
 		mmhub_v9_4_enable_system_domain(adev, i);
-		mmhub_v9_4_disable_identity_aperture(adev, i);
+		if (!amdgpu_sriov_vf(adev))
+			mmhub_v9_4_disable_identity_aperture(adev, i);
 		mmhub_v9_4_setup_vmid_config(adev, i);
 		mmhub_v9_4_program_invalidation(adev, i);
 	}
@@ -619,9 +625,9 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
 	switch (adev->asic_type) {
 	case CHIP_ARCTURUS:
 		mmhub_v9_4_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		mmhub_v9_4_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
@@ -655,3 +661,942 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 	if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
 		*flags |= AMD_CG_SUPPORT_MC_LS;
 }
+
+static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
+	/* MMHUB Range 0 */
+	{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Range 1 */
+	{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHAB Range 2*/
+	{ "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Rang 3 */
+	{ "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Range 4 */
+	{ "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUAB Range 5 */
+	{ "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Range 6 */
+	{ "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	},
+
+	/* MMHUB Range 7*/
+	{ "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+	},
+	{ "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+	0, 0,
+	},
+	{ "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+	},
+	{ "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+	0, 0,
+	SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+	},
+	{ "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+	},
+	{ "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+	},
+	{ "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+	},
+	{ "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+	SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+	}
+};
+
+static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 },
+	{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
+};
+
+static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
+	uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+{
+	uint32_t i;
+	uint32_t sec_cnt, ded_cnt;
+
+	for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
+		if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
+			continue;
+
+		sec_cnt = (value &
+				mmhub_v9_4_ras_fields[i].sec_count_mask) >>
+				mmhub_v9_4_ras_fields[i].sec_count_shift;
+		if (sec_cnt) {
+			DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+				mmhub_v9_4_ras_fields[i].name,
+				sec_cnt);
+			*sec_count += sec_cnt;
+		}
+
+		ded_cnt = (value &
+				mmhub_v9_4_ras_fields[i].ded_count_mask) >>
+				mmhub_v9_4_ras_fields[i].ded_count_shift;
+		if (ded_cnt) {
+			DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+				mmhub_v9_4_ras_fields[i].name,
+				ded_cnt);
+			*ded_count += ded_cnt;
+		}
+	}
+
+	return 0;
+}
+
+static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+					   void *ras_error_status)
+{
+	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+	uint32_t sec_count = 0, ded_count = 0;
+	uint32_t i;
+	uint32_t reg_value;
+
+	err_data->ue_count = 0;
+	err_data->ce_count = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) {
+		reg_value =
+			RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
+		if (reg_value)
+			mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
+				reg_value, &sec_count, &ded_count);
+	}
+
+	err_data->ce_count += sec_count;
+	err_data->ue_count += ded_count;
+}
+
+const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
+	.ras_late_init = amdgpu_mmhub_ras_late_init,
+	.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
index d435cfc..1b97977 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -23,6 +23,8 @@
 #ifndef __MMHUB_V9_4_H__
 #define __MMHUB_V9_4_H__
 
+extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
+
 u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev);
 int mmhub_v9_4_gart_enable(struct amdgpu_device *adev);
 void mmhub_v9_4_gart_disable(struct amdgpu_device *adev);
@@ -32,5 +34,7 @@ void mmhub_v9_4_init(struct amdgpu_device *adev);
 int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
 			       enum amd_clockgating_state state);
 void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
index 8af0bdd..2095863 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
@@ -47,6 +47,18 @@ struct mmsch_v1_0_init_header {
 	uint32_t uvd_table_size;
 };
 
+struct mmsch_vf_eng_init_header {
+	uint32_t init_status;
+	uint32_t table_offset;
+	uint32_t table_size;
+};
+
+struct mmsch_v1_1_init_header {
+	uint32_t version;
+	uint32_t total_size;
+	struct mmsch_vf_eng_init_header eng[2];
+};
+
 struct mmsch_v1_0_cmd_direct_reg_header {
 	uint32_t reg_offset   : 28;
 	uint32_t command_type : 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index cc5bf59..5fd67e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -158,82 +158,6 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
 	xgpu_ai_mailbox_set_valid(adev, false);
 }
 
-static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
-{
-        int r = 0;
-        u32 req, val, size;
-
-        if (!amdgim_is_hwperf(adev) || buf == NULL)
-                return -EBADRQC;
-
-        switch(type) {
-        case PP_SCLK:
-                req = IDH_IRQ_GET_PP_SCLK;
-                break;
-        case PP_MCLK:
-                req = IDH_IRQ_GET_PP_MCLK;
-                break;
-        default:
-                return -EBADRQC;
-        }
-
-        mutex_lock(&adev->virt.dpm_mutex);
-
-        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
-
-        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
-        if (!r && adev->fw_vram_usage.va != NULL) {
-                val = RREG32_NO_KIQ(
-                        SOC15_REG_OFFSET(NBIO, 0,
-                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
-                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
-                                val), PAGE_SIZE);
-
-                if (size < PAGE_SIZE)
-                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
-                else
-                        size = 0;
-
-                r = size;
-                goto out;
-        }
-
-        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
-        if(r)
-                pr_info("%s DPM request failed",
-                        (type == PP_SCLK)? "SCLK" : "MCLK");
-
-out:
-        mutex_unlock(&adev->virt.dpm_mutex);
-        return r;
-}
-
-static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
-{
-        int r = 0;
-        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
-
-        if (!amdgim_is_hwperf(adev))
-                return -EBADRQC;
-
-        mutex_lock(&adev->virt.dpm_mutex);
-        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
-
-        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
-        if (!r)
-                goto out;
-
-        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
-        if (!r)
-                pr_info("DPM request failed");
-        else
-                pr_info("Mailbox is broken");
-
-out:
-        mutex_unlock(&adev->virt.dpm_mutex);
-        return r;
-}
-
 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
 					enum idh_request req)
 {
@@ -326,7 +250,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
 	 */
 	locked = mutex_trylock(&adev->lock_reset);
 	if (locked)
-		adev->in_gpu_reset = 1;
+		adev->in_gpu_reset = true;
 
 	do {
 		if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -338,7 +262,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
 
 flr_done:
 	if (locked) {
-		adev->in_gpu_reset = 0;
+		adev->in_gpu_reset = false;
 		mutex_unlock(&adev->lock_reset);
 	}
 
@@ -455,6 +379,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
 	.reset_gpu = xgpu_ai_request_reset,
 	.wait_reset = NULL,
 	.trans_msg = xgpu_ai_mailbox_trans_msg,
-	.get_pp_clk = xgpu_ai_get_pp_clk,
-	.force_dpm_level = xgpu_ai_force_dpm_level,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 077e91a..37dbe0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,10 +35,6 @@ enum idh_request {
 	IDH_REL_GPU_FINI_ACCESS,
 	IDH_REQ_GPU_RESET_ACCESS,
 
-	IDH_IRQ_FORCE_DPM_LEVEL = 10,
-	IDH_IRQ_GET_PP_SCLK,
-	IDH_IRQ_GET_PP_MCLK,
-
 	IDH_LOG_VF_ERROR       = 200,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 0d8767e..237fa5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -252,7 +252,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
 	 */
 	locked = mutex_trylock(&adev->lock_reset);
 	if (locked)
-		adev->in_gpu_reset = 1;
+		adev->in_gpu_reset = true;
 
 	do {
 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -264,12 +264,16 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
 
 flr_done:
 	if (locked) {
-		adev->in_gpu_reset = 0;
+		adev->in_gpu_reset = false;
 		mutex_unlock(&adev->lock_reset);
 	}
 
 	/* Trigger recovery for world switch failure if no TDR */
-	if (amdgpu_device_should_recover_gpu(adev))
+	if (amdgpu_device_should_recover_gpu(adev)
+		&& (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
+		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
+		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
+		adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
 		amdgpu_device_gpu_recover(adev, NULL);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 9af7356..cf557a42 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -110,7 +110,6 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
 static int navi10_ih_irq_init(struct amdgpu_device *adev)
 {
 	struct amdgpu_ih_ring *ih = &adev->irq.ih;
-	int ret = 0;
 	u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
 	u32 tmp;
 
@@ -179,7 +178,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
 	/* enable interrupts */
 	navi10_ih_enable_interrupts(adev);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -427,7 +426,7 @@ static int navi10_ih_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	navi10_ih_update_clockgating_state(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 0db458f..65eb378 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -52,6 +52,9 @@
 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK          0x00000FFCL
 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK            0x001F0000L
 
+static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
+					void *ras_error_status);
+
 static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
 {
 	WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -314,6 +317,7 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
 static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
 {
 	uint32_t bif_doorbell_intr_cntl;
+	struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
 
 	bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
 	if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -324,7 +328,18 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
 						RAS_CNTLR_INTERRUPT_CLEAR, 1);
 		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
 
-		amdgpu_ras_global_ras_isr(adev);
+		/*
+		 * clear error status after ras_controller_intr according to
+		 * hw team and count ue number for query
+		 */
+		nbio_v7_4_query_ras_error_count(adev, &obj->err_data);
+
+		DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
+
+		/* ras_controller_int is dedicated for nbif ras error,
+		 * not the global interrupt for sync flood
+		 */
+		amdgpu_ras_reset_gpu(adev);
 	}
 }
 
@@ -441,10 +456,8 @@ static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
 			      NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
 			      &adev->nbio.ras_controller_irq);
-	if (r)
-		return r;
 
-	return 0;
+	return r;
 }
 
 static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
@@ -461,16 +474,16 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a
 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
 			      NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
 			      &adev->nbio.ras_err_event_athub_irq);
-	if (r)
-		return r;
 
-	return 0;
+	return r;
 }
 
+#define smnPARITY_ERROR_STATUS_UNCORR_GRP2	0x13a20030
+
 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
 					void *ras_error_status)
 {
-	uint32_t global_sts, central_sts, int_eoi;
+	uint32_t global_sts, central_sts, int_eoi, parity_sts;
 	uint32_t corr, fatal, non_fatal;
 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 
@@ -479,6 +492,7 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
 	fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
 	non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
 				ParityErrNonFatal);
+	parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
 
 	if (corr)
 		err_data->ce_count++;
@@ -490,6 +504,11 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
 		/* clear error status register */
 		WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
 
+		if (fatal)
+			/* clear parity fatal error indication field */
+			WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2,
+				    parity_sts);
+
 		if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
 				BIFL_RasContller_Intr_Recv)) {
 			/* clear interrupt status register */
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 0ba66be..2d1bebd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -53,6 +53,7 @@
 #include "gfx_v10_0.h"
 #include "sdma_v5_0.h"
 #include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
 #include "dce_virtual.h"
 #include "mes_v10_1.h"
 #include "mxgpu_nv.h"
@@ -314,6 +315,16 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
 	return ret;
 }
 
+static bool nv_asic_supports_baco(struct amdgpu_device *adev)
+{
+	struct smu_context *smu = &adev->smu;
+
+	if (smu_baco_is_support(smu))
+		return true;
+	else
+		return false;
+}
+
 static enum amd_reset_method
 nv_asic_reset_method(struct amdgpu_device *adev)
 {
@@ -342,7 +353,12 @@ static int nv_asic_reset(struct amdgpu_device *adev)
 	if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
 		if (!adev->in_suspend)
 			amdgpu_inc_vram_lost(adev);
-		ret = smu_baco_reset(smu);
+		ret = smu_baco_enter(smu);
+		if (ret)
+			return ret;
+		ret = smu_baco_exit(smu);
+		if (ret)
+			return ret;
 	} else {
 		if (!adev->in_suspend)
 			amdgpu_inc_vram_lost(adev);
@@ -462,7 +478,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
-		    is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+		    !amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -473,9 +489,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
-		    is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+		    !amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
 		if (adev->enable_mes)
 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
 		break;
@@ -485,7 +502,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
-		    is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+		    !amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -496,9 +513,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
-		    is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+		    !amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
 		break;
 	default:
 		return -EINVAL;
@@ -617,6 +635,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
 	.get_pcie_usage = &nv_get_pcie_usage,
 	.need_reset_on_init = &nv_need_reset_on_init,
 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
+	.supports_baco = &nv_asic_supports_baco,
 };
 
 static int nv_common_early_init(void *handle)
@@ -656,10 +675,12 @@ static int nv_common_early_init(void *handle)
 			AMD_CG_SUPPORT_ATHUB_MGCG |
 			AMD_CG_SUPPORT_ATHUB_LS |
 			AMD_CG_SUPPORT_VCN_MGCG |
+			AMD_CG_SUPPORT_JPEG_MGCG |
 			AMD_CG_SUPPORT_BIF_MGCG |
 			AMD_CG_SUPPORT_BIF_LS;
 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 			AMD_PG_SUPPORT_VCN_DPG |
+			AMD_PG_SUPPORT_JPEG |
 			AMD_PG_SUPPORT_ATHUB;
 		adev->external_rev_id = adev->rev_id + 0x1;
 		break;
@@ -676,9 +697,11 @@ static int nv_common_early_init(void *handle)
 			AMD_CG_SUPPORT_ATHUB_MGCG |
 			AMD_CG_SUPPORT_ATHUB_LS |
 			AMD_CG_SUPPORT_VCN_MGCG |
+			AMD_CG_SUPPORT_JPEG_MGCG |
 			AMD_CG_SUPPORT_BIF_MGCG |
 			AMD_CG_SUPPORT_BIF_LS;
 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
+			AMD_PG_SUPPORT_JPEG |
 			AMD_PG_SUPPORT_VCN_DPG;
 		adev->external_rev_id = adev->rev_id + 20;
 		break;
@@ -697,10 +720,18 @@ static int nv_common_early_init(void *handle)
 			AMD_CG_SUPPORT_MC_LS |
 			AMD_CG_SUPPORT_ATHUB_MGCG |
 			AMD_CG_SUPPORT_ATHUB_LS |
-			AMD_CG_SUPPORT_VCN_MGCG;
+			AMD_CG_SUPPORT_VCN_MGCG |
+			AMD_CG_SUPPORT_JPEG_MGCG;
 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
 			AMD_PG_SUPPORT_VCN_DPG |
+			AMD_PG_SUPPORT_JPEG |
 			AMD_PG_SUPPORT_ATHUB;
+		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
+		 * as a consequence, the rev_id and external_rev_id are wrong.
+		 * workaround it by hardcoding rev_id to 0 (default value).
+		 */
+		if (amdgpu_sriov_vf(adev))
+			adev->rev_id = 0;
 		adev->external_rev_id = adev->rev_id + 0xa;
 		break;
 	default:
@@ -919,13 +950,13 @@ static int nv_common_set_clockgating_state(void *handle,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		nv_update_hdp_mem_power_gating(adev,
-				   state == AMD_CG_STATE_GATE ? true : false);
+				   state == AMD_CG_STATE_GATE);
 		nv_update_hdp_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 74a9fe8..36b6579 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -242,6 +242,7 @@ enum psp_gfx_fw_type {
 	GFX_FW_TYPE_SDMA5                           = 55,   /* SDMA5                    MI      */
 	GFX_FW_TYPE_SDMA6                           = 56,   /* SDMA6                    MI      */
 	GFX_FW_TYPE_SDMA7                           = 57,   /* SDMA7                    MI      */
+	GFX_FW_TYPE_VCN1                            = 58,   /* VCN1                     MI      */
 	GFX_FW_TYPE_MAX
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index b345e69..7539104 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -230,54 +230,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
 	return ret;
 }
 
-static int psp_v10_0_cmd_submit(struct psp_context *psp,
-				uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
-				int index)
-{
-	unsigned int psp_write_ptr_reg = 0;
-	struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
-	struct psp_ring *ring = &psp->km_ring;
-	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
-	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
-		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
-	struct amdgpu_device *adev = psp->adev;
-	uint32_t ring_size_dw = ring->ring_size / 4;
-	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
-	/* KM (GPCOM) prepare write pointer */
-	psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
-	/* Update KM RB frame pointer to new frame */
-	if ((psp_write_ptr_reg % ring_size_dw) == 0)
-		write_frame = ring_buffer_start;
-	else
-		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
-	/* Check invalid write_frame ptr address */
-	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
-		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
-			  ring_buffer_start, ring_buffer_end, write_frame);
-		DRM_ERROR("write_frame is pointing to address out of bounds\n");
-		return -EINVAL;
-	}
-
-	/* Initialize KM RB frame */
-	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
-	/* Update KM RB frame */
-	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
-	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
-	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
-	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
-	write_frame->fence_value = index;
-	amdgpu_asic_flush_hdp(adev, NULL);
-
-	/* Update the write Pointer in DWORDs */
-	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
-	return 0;
-}
-
 static int
 psp_v10_0_sram_map(struct amdgpu_device *adev,
 		   unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -407,15 +359,30 @@ static int psp_v10_0_mode1_reset(struct psp_context *psp)
 	return -EINVAL;
 }
 
+static uint32_t psp_v10_0_ring_get_wptr(struct psp_context *psp)
+{
+	struct amdgpu_device *adev = psp->adev;
+
+	return RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+}
+
+static void psp_v10_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+	struct amdgpu_device *adev = psp->adev;
+
+	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
 static const struct psp_funcs psp_v10_0_funcs = {
 	.init_microcode = psp_v10_0_init_microcode,
 	.ring_init = psp_v10_0_ring_init,
 	.ring_create = psp_v10_0_ring_create,
 	.ring_stop = psp_v10_0_ring_stop,
 	.ring_destroy = psp_v10_0_ring_destroy,
-	.cmd_submit = psp_v10_0_cmd_submit,
 	.compare_sram_data = psp_v10_0_compare_sram_data,
 	.mode1_reset = psp_v10_0_mode1_reset,
+	.ring_get_wptr = psp_v10_0_ring_get_wptr,
+	.ring_set_wptr = psp_v10_0_ring_set_wptr,
 };
 
 void psp_v10_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index ffeaa2f..0829188 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -22,6 +22,7 @@
 
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
@@ -43,10 +44,13 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
 MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
 MODULE_FIRMWARE("amdgpu/navi10_sos.bin");
 MODULE_FIRMWARE("amdgpu/navi10_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi10_ta.bin");
 MODULE_FIRMWARE("amdgpu/navi14_sos.bin");
 MODULE_FIRMWARE("amdgpu/navi14_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi14_ta.bin");
 MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
 MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi12_ta.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
@@ -186,6 +190,31 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
 	case CHIP_NAVI10:
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
+		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+		err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+		if (err) {
+			release_firmware(adev->psp.ta_fw);
+			adev->psp.ta_fw = NULL;
+			dev_info(adev->dev,
+				 "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+		} else {
+			err = amdgpu_ucode_validate(adev->psp.ta_fw);
+			if (err)
+				goto out2;
+
+			ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+			adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+			adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+			adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr +
+				le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+			adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+			adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+			adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+			adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr +
+				le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+		}
 		break;
 	default:
 		BUG();
@@ -208,6 +237,29 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
 	return err;
 }
 
+int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+	struct amdgpu_device *adev = psp->adev;
+
+	int ret;
+	int retry_loop;
+
+	for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+		/* Wait for bootloader to signify that is
+		    ready having bit 31 of C2PMSG_35 set to 1 */
+		ret = psp_wait_for(psp,
+				   SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+				   0x80000000,
+				   0x80000000,
+				   false);
+
+		if (ret == 0)
+			return 0;
+	}
+
+	return ret;
+}
+
 static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
 {
 	struct amdgpu_device *adev = psp->adev;
@@ -233,9 +285,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
 		return 0;
 	}
 
-	/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
-	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
-			   0x80000000, 0x80000000, false);
+	ret = psp_v11_0_wait_for_bootloader(psp);
 	if (ret)
 		return ret;
 
@@ -251,9 +301,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
 	       psp_gfxdrv_command_reg);
 
-	/* Wait for bootloader to signify that is ready having  bit 31 of C2PMSG_35 set to 1*/
-	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
-			   0x80000000, 0x80000000, false);
+	ret = psp_v11_0_wait_for_bootloader(psp);
 
 	return ret;
 }
@@ -273,9 +321,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
 		return 0;
 	}
 
-	/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
-	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
-			   0x80000000, 0x80000000, false);
+	ret = psp_v11_0_wait_for_bootloader(psp);
 	if (ret)
 		return ret;
 
@@ -294,8 +340,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
 	/* there might be handshake issue with hardware which needs delay */
 	mdelay(20);
 
-	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
-			   0x80000000, 0x80000000, false);
+	ret = psp_v11_0_wait_for_bootloader(psp);
 
 	return ret;
 }
@@ -312,9 +357,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
 	if (psp_v11_0_is_sos_alive(psp))
 		return 0;
 
-	/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
-	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
-			   0x80000000, 0x80000000, false);
+	ret = psp_v11_0_wait_for_bootloader(psp);
 	if (ret)
 		return ret;
 
@@ -519,63 +562,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
 	return ret;
 }
 
-static int psp_v11_0_cmd_submit(struct psp_context *psp,
-			       uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
-			       int index)
-{
-	unsigned int psp_write_ptr_reg = 0;
-	struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem;
-	struct psp_ring *ring = &psp->km_ring;
-	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
-	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
-		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
-	struct amdgpu_device *adev = psp->adev;
-	uint32_t ring_size_dw = ring->ring_size / 4;
-	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
-	/* KM (GPCOM) prepare write pointer */
-	if (psp_v11_0_support_vmr_ring(psp))
-		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
-	else
-		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
-	/* Update KM RB frame pointer to new frame */
-	/* write_frame ptr increments by size of rb_frame in bytes */
-	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
-	if ((psp_write_ptr_reg % ring_size_dw) == 0)
-		write_frame = ring_buffer_start;
-	else
-		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
-	/* Check invalid write_frame ptr address */
-	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
-		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
-			  ring_buffer_start, ring_buffer_end, write_frame);
-		DRM_ERROR("write_frame is pointing to address out of bounds\n");
-		return -EINVAL;
-	}
-
-	/* Initialize KM RB frame */
-	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
-	/* Update KM RB frame */
-	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
-	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
-	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
-	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
-	write_frame->fence_value = index;
-	amdgpu_asic_flush_hdp(adev, NULL);
-
-	/* Update the write Pointer in DWORDs */
-	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
-	if (psp_v11_0_support_vmr_ring(psp)) {
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
-	} else
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
-	return 0;
-}
-
 static int
 psp_v11_0_sram_map(struct amdgpu_device *adev,
 		  unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -986,10 +972,13 @@ static int psp_v11_0_memory_training_init(struct psp_context *psp)
  */
 static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 {
-	int ret;
-	uint32_t p2c_header[4];
 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
 	uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+	struct amdgpu_device *adev = psp->adev;
+	uint32_t p2c_header[4];
+	uint32_t sz;
+	void *buf;
+	int ret;
 
 	if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
 		DRM_DEBUG("Memory training is not supported.\n");
@@ -1004,7 +993,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 		return 0;
 	}
 
-	amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+	amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
 	DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
 		  pcache[0], pcache[1], pcache[2], pcache[3],
 		  p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
@@ -1041,11 +1030,38 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 	DRM_DEBUG("Memory training ops:%x.\n", ops);
 
 	if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+		/*
+		 * Long traing will encroach certain mount of bottom VRAM,
+		 * saving the content of this bottom VRAM to system memory
+		 * before training, and restoring it after training to avoid
+		 * VRAM corruption.
+		 */
+		sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+
+		if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
+			DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
+				  adev->gmc.visible_vram_size,
+				  adev->mman.aper_base_kaddr);
+			return -EINVAL;
+		}
+
+		buf = vmalloc(sz);
+		if (!buf) {
+			DRM_ERROR("failed to allocate system memory.\n");
+			return -ENOMEM;
+		}
+
+		memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
 		ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
 		if (ret) {
 			DRM_ERROR("Send long training msg failed.\n");
+			vfree(buf);
 			return ret;
 		}
+
+		memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+		adev->nbio.funcs->hdp_flush(adev, NULL);
+		vfree(buf);
 	}
 
 	if (ops & PSP_MEM_TRAIN_SAVE) {
@@ -1068,6 +1084,30 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 	return 0;
 }
 
+static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
+{
+	uint32_t data;
+	struct amdgpu_device *adev = psp->adev;
+
+	if (psp_v11_0_support_vmr_ring(psp))
+		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+	else
+		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+
+	return data;
+}
+
+static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+	struct amdgpu_device *adev = psp->adev;
+
+	if (psp_v11_0_support_vmr_ring(psp)) {
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+	} else
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
 static const struct psp_funcs psp_v11_0_funcs = {
 	.init_microcode = psp_v11_0_init_microcode,
 	.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -1077,7 +1117,6 @@ static const struct psp_funcs psp_v11_0_funcs = {
 	.ring_create = psp_v11_0_ring_create,
 	.ring_stop = psp_v11_0_ring_stop,
 	.ring_destroy = psp_v11_0_ring_destroy,
-	.cmd_submit = psp_v11_0_cmd_submit,
 	.compare_sram_data = psp_v11_0_compare_sram_data,
 	.mode1_reset = psp_v11_0_mode1_reset,
 	.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
@@ -1091,6 +1130,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
 	.mem_training_init = psp_v11_0_memory_training_init,
 	.mem_training_fini = psp_v11_0_memory_training_fini,
 	.mem_training = psp_v11_0_memory_training,
+	.ring_get_wptr = psp_v11_0_ring_get_wptr,
+	.ring_set_wptr = psp_v11_0_ring_set_wptr,
 };
 
 void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 8f553f6..58d8b6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -334,63 +334,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp,
 	return ret;
 }
 
-static int psp_v12_0_cmd_submit(struct psp_context *psp,
-			       uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
-			       int index)
-{
-	unsigned int psp_write_ptr_reg = 0;
-	struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem;
-	struct psp_ring *ring = &psp->km_ring;
-	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
-	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
-		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
-	struct amdgpu_device *adev = psp->adev;
-	uint32_t ring_size_dw = ring->ring_size / 4;
-	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
-	/* KM (GPCOM) prepare write pointer */
-	if (psp_v12_0_support_vmr_ring(psp))
-		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
-	else
-		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
-	/* Update KM RB frame pointer to new frame */
-	/* write_frame ptr increments by size of rb_frame in bytes */
-	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
-	if ((psp_write_ptr_reg % ring_size_dw) == 0)
-		write_frame = ring_buffer_start;
-	else
-		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
-	/* Check invalid write_frame ptr address */
-	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
-		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
-			  ring_buffer_start, ring_buffer_end, write_frame);
-		DRM_ERROR("write_frame is pointing to address out of bounds\n");
-		return -EINVAL;
-	}
-
-	/* Initialize KM RB frame */
-	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
-	/* Update KM RB frame */
-	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
-	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
-	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
-	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
-	write_frame->fence_value = index;
-	amdgpu_asic_flush_hdp(adev, NULL);
-
-	/* Update the write Pointer in DWORDs */
-	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
-	if (psp_v12_0_support_vmr_ring(psp)) {
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
-	} else
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
-	return 0;
-}
-
 static int
 psp_v12_0_sram_map(struct amdgpu_device *adev,
 		  unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -547,6 +490,30 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
 	return 0;
 }
 
+static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp)
+{
+	uint32_t data;
+	struct amdgpu_device *adev = psp->adev;
+
+	if (psp_v12_0_support_vmr_ring(psp))
+		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+	else
+		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+
+	return data;
+}
+
+static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+	struct amdgpu_device *adev = psp->adev;
+
+	if (psp_v12_0_support_vmr_ring(psp)) {
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+	} else
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
 static const struct psp_funcs psp_v12_0_funcs = {
 	.init_microcode = psp_v12_0_init_microcode,
 	.bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv,
@@ -555,9 +522,10 @@ static const struct psp_funcs psp_v12_0_funcs = {
 	.ring_create = psp_v12_0_ring_create,
 	.ring_stop = psp_v12_0_ring_stop,
 	.ring_destroy = psp_v12_0_ring_destroy,
-	.cmd_submit = psp_v12_0_cmd_submit,
 	.compare_sram_data = psp_v12_0_compare_sram_data,
 	.mode1_reset = psp_v12_0_mode1_reset,
+	.ring_get_wptr = psp_v12_0_ring_get_wptr,
+	.ring_set_wptr = psp_v12_0_ring_set_wptr,
 };
 
 void psp_v12_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index fdc0093..735c43c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -179,7 +179,7 @@ static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
 	 * Double check if the latest four legacy versions.
 	 * If yes, it is still the right version.
 	 */
-	for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) {
+	for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) {
 		if (sos_old_versions[i] == adev->psp.sos_fw_version)
 			return true;
 	}
@@ -410,65 +410,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
 	return ret;
 }
 
-static int psp_v3_1_cmd_submit(struct psp_context *psp,
-			       uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
-			       int index)
-{
-	unsigned int psp_write_ptr_reg = 0;
-	struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
-	struct psp_ring *ring = &psp->km_ring;
-	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
-	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
-		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
-	struct amdgpu_device *adev = psp->adev;
-	uint32_t ring_size_dw = ring->ring_size / 4;
-	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
-	/* KM (GPCOM) prepare write pointer */
-	if (psp_v3_1_support_vmr_ring(psp))
-		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
-	else
-		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
-	/* Update KM RB frame pointer to new frame */
-	/* write_frame ptr increments by size of rb_frame in bytes */
-	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
-	if ((psp_write_ptr_reg % ring_size_dw) == 0)
-		write_frame = ring_buffer_start;
-	else
-		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
-	/* Check invalid write_frame ptr address */
-	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
-		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
-			  ring_buffer_start, ring_buffer_end, write_frame);
-		DRM_ERROR("write_frame is pointing to address out of bounds\n");
-		return -EINVAL;
-	}
-
-	/* Initialize KM RB frame */
-	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
-	/* Update KM RB frame */
-	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
-	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
-	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
-	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
-	write_frame->fence_value = index;
-	amdgpu_asic_flush_hdp(adev, NULL);
-
-	/* Update the write Pointer in DWORDs */
-	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
-	if (psp_v3_1_support_vmr_ring(psp)) {
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
-		/* send interrupt to PSP for SRIOV ring write pointer update */
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
-					GFX_CTRL_CMD_ID_CONSUME_CMD);
-	} else
-		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
-	return 0;
-}
-
 static int
 psp_v3_1_sram_map(struct amdgpu_device *adev,
 		  unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -642,6 +583,31 @@ static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
 	return false;
 }
 
+static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
+{
+	uint32_t data;
+	struct amdgpu_device *adev = psp->adev;
+
+	if (psp_v3_1_support_vmr_ring(psp))
+		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+	else
+		data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+	return data;
+}
+
+static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+	struct amdgpu_device *adev = psp->adev;
+
+	if (psp_v3_1_support_vmr_ring(psp)) {
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+		/* send interrupt to PSP for SRIOV ring write pointer update */
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+			GFX_CTRL_CMD_ID_CONSUME_CMD);
+	} else
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
 static const struct psp_funcs psp_v3_1_funcs = {
 	.init_microcode = psp_v3_1_init_microcode,
 	.bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
@@ -650,11 +616,12 @@ static const struct psp_funcs psp_v3_1_funcs = {
 	.ring_create = psp_v3_1_ring_create,
 	.ring_stop = psp_v3_1_ring_stop,
 	.ring_destroy = psp_v3_1_ring_destroy,
-	.cmd_submit = psp_v3_1_cmd_submit,
 	.compare_sram_data = psp_v3_1_compare_sram_data,
 	.smu_reload_quirk = psp_v3_1_smu_reload_quirk,
 	.mode1_reset = psp_v3_1_mode1_reset,
 	.support_vmr_ring = psp_v3_1_support_vmr_ring,
+	.ring_get_wptr = psp_v3_1_ring_get_wptr,
+	.ring_set_wptr = psp_v3_1_ring_set_wptr,
 };
 
 void psp_v3_1_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a101758..7d509a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	/* IB packet must end on a 8 DW boundary */
-	sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+	sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
 	u32 pad_count;
 	int i;
 
-	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	pad_count = (-ib->length_dw) & 7;
 	for (i = 0; i < pad_count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
 			ib->ptr[ib->length_dw++] =
@@ -1260,16 +1260,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
 
 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] =
+			&adev->sdma.instance[i].ring.sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 5f4e2c6..b6109a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	/* IB packet must end on a 8 DW boundary */
-	sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+	sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
 	u32 pad_count;
 	int i;
 
-	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	pad_count = (-ib->length_dw) & 7;
 	for (i = 0; i < pad_count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
 			ib->ptr[ib->length_dw++] =
@@ -1698,16 +1698,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
 
 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] =
+			 &adev->sdma.instance[i].ring.sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2f52b7f..e55884d20 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -82,6 +82,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
+static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
 
 static const struct soc15_reg_golden golden_settings_sdma_4[] = {
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
@@ -257,6 +258,105 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
 };
 
+static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = {
+	{ "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED),
+	0, 0,
+	},
+	{ "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED),
+	0, 0,
+	},
+	{ "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED),
+	0, 0,
+	},
+	{ "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED),
+	0, 0,
+	},
+	{ "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED),
+	0, 0,
+	},
+	{ "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
+	0, 0,
+	},
+	{ "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
+	0, 0,
+	},
+	{ "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED),
+	0, 0,
+	},
+	{ "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+	SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED),
+	0, 0,
+	},
+};
+
 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
 		u32 instance, u32 offset)
 {
@@ -698,7 +798,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	/* IB packet must end on a 8 DW boundary */
-	sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+	sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -1579,7 +1679,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
 	u32 pad_count;
 	int i;
 
-	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	pad_count = (-ib->length_dw) & 7;
 	for (i = 0; i < pad_count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
 			ib->ptr[ib->length_dw++] =
@@ -1686,6 +1786,7 @@ static int sdma_v4_0_early_init(void *handle)
 	sdma_v4_0_set_buffer_funcs(adev);
 	sdma_v4_0_set_vm_pte_funcs(adev);
 	sdma_v4_0_set_irq_funcs(adev);
+	sdma_v4_0_set_ras_funcs(adev);
 
 	return 0;
 }
@@ -1700,8 +1801,18 @@ static int sdma_v4_0_late_init(void *handle)
 	struct ras_ih_if ih_info = {
 		.cb = sdma_v4_0_process_ras_data_cb,
 	};
+	int i;
 
-	return amdgpu_sdma_ras_late_init(adev, &ih_info);
+	/* read back edc counter registers to clear the counters */
+	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+		for (i = 0; i < adev->sdma.num_instances; i++)
+			RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
+	}
+
+	if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
+		return adev->sdma.funcs->ras_late_init(adev, &ih_info);
+	else
+		return 0;
 }
 
 static int sdma_v4_0_sw_init(void *handle)
@@ -1773,7 +1884,8 @@ static int sdma_v4_0_sw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int i;
 
-	amdgpu_sdma_ras_fini(adev);
+	if (adev->sdma.funcs && adev->sdma.funcs->ras_fini)
+		adev->sdma.funcs->ras_fini(adev);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -2064,9 +2176,9 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
 	case CHIP_ARCTURUS:
 	case CHIP_RENOIR:
 		sdma_v4_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		sdma_v4_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
@@ -2409,10 +2521,73 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 			sched = &adev->sdma.instance[i].page.sched;
 		else
 			sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] = sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+}
+
+static void sdma_v4_0_get_ras_error_count(uint32_t value,
+					uint32_t instance,
+					uint32_t *sec_count)
+{
+	uint32_t i;
+	uint32_t sec_cnt;
+
+	/* double bits error (multiple bits) error detection is not supported */
+	for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) {
+		/* the SDMA_EDC_COUNTER register in each sdma instance
+		 * shares the same sed shift_mask
+		 * */
+		sec_cnt = (value &
+			sdma_v4_0_ras_fields[i].sec_count_mask) >>
+			sdma_v4_0_ras_fields[i].sec_count_shift;
+		if (sec_cnt) {
+			DRM_INFO("Detected %s in SDMA%d, SED %d\n",
+				sdma_v4_0_ras_fields[i].name,
+				instance, sec_cnt);
+			*sec_count += sec_cnt;
+		}
+	}
+}
+
+static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
+			uint32_t instance, void *ras_error_status)
+{
+	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+	uint32_t sec_count = 0;
+	uint32_t reg_value = 0;
+
+	reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
+	/* double bit error is not supported */
+	if (reg_value)
+		sdma_v4_0_get_ras_error_count(reg_value,
+				instance, &sec_count);
+	/* err_data->ce_count should be initialized to 0
+	 * before calling into this function */
+	err_data->ce_count += sec_count;
+	/* double bit error is not supported
+	 * set ue count to 0 */
+	err_data->ue_count = 0;
+
+	return 0;
+};
+
+static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
+	.ras_late_init = amdgpu_sdma_ras_late_init,
+	.ras_fini = amdgpu_sdma_ras_fini,
+	.query_ras_error_count = sdma_v4_0_query_ras_error_count,
+};
+
+static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_VEGA20:
+	case CHIP_ARCTURUS:
+		adev->sdma.funcs = &sdma_v4_0_ras_funcs;
+		break;
+	default:
+		break;
+	}
 }
 
 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index f4ad299..67b9830b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -382,8 +382,15 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
 
-	/* IB packet must end on a 8 DW boundary */
-	sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+	/* An IB packet must end on a 8 DW boundary--the next dword
+	 * must be on a 8-dword boundary. Our IB packet below is 6
+	 * dwords long, thus add x number of NOPs, such that, in
+	 * modular arithmetic,
+	 * wptr + 6 + x = 8k, k >= 0, which in C is,
+	 * (wptr + 6 + x) % 8 = 0.
+	 * The expression below, is a solution of x.
+	 */
+	sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -907,16 +914,9 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 			udelay(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		if (amdgpu_emu_mode == 1)
-			DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
-		else
-			DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	amdgpu_device_wb_free(adev, index);
 
 	return r;
@@ -981,13 +981,10 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -1086,10 +1083,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
 }
 
 /**
- * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
- *
+ * sdma_v5_0_ring_pad_ib - pad the IB
  * @ib: indirect buffer to fill with padding
  *
+ * Pad the IB with NOPs to a boundary multiple of 8.
  */
 static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
@@ -1097,7 +1094,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
 	u32 pad_count;
 	int i;
 
-	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	pad_count = (-ib->length_dw) & 0x7;
 	for (i = 0; i < pad_count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
 			ib->ptr[ib->length_dw++] =
@@ -1528,9 +1525,9 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
 	case CHIP_NAVI14:
 	case CHIP_NAVI12:
 		sdma_v5_0_update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		sdma_v5_0_update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
@@ -1721,17 +1718,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
 
 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
 		for (i = 0; i < adev->sdma.num_instances; i++) {
-			sched = &adev->sdma.instance[i].ring.sched;
-			adev->vm_manager.vm_pte_rqs[i] =
-				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+			adev->vm_manager.vm_pte_scheds[i] =
+				&adev->sdma.instance[i].ring.sched;
 		}
-		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+		adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index f2d70a4..4d415bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1197,6 +1197,11 @@ static int si_asic_reset(struct amdgpu_device *adev)
 	return 0;
 }
 
+static bool si_asic_supports_baco(struct amdgpu_device *adev)
+{
+	return false;
+}
+
 static enum amd_reset_method
 si_asic_reset_method(struct amdgpu_device *adev)
 {
@@ -1425,6 +1430,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
 	.get_pcie_usage = &si_get_pcie_usage,
 	.need_reset_on_init = &si_need_reset_on_init,
 	.get_pcie_replay_count = &si_get_pcie_replay_count,
+	.supports_baco = &si_asic_supports_baco,
 };
 
 static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index bdda8b4..42d5601 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -648,7 +648,7 @@ static int si_dma_set_clockgating_state(void *handle,
 	bool enable;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
 		for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -834,16 +834,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
 
 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] =
+			&adev->sdma.instance[i].ring.sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version si_dma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index c44723c..c902f26 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -234,7 +234,7 @@ static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control,
 	DRM_DEBUG_DRIVER("I2C_Transmit(), address = %x, bytes = %d , data: ",
 		 (uint16_t)address, numbytes);
 
-	if (drm_debug & DRM_UT_DRIVER) {
+	if (drm_debug_enabled(DRM_UT_DRIVER)) {
 		print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE,
 			       16, 1, data, numbytes, false);
 	}
@@ -388,7 +388,7 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
 	DRM_DEBUG_DRIVER("I2C_Receive(), address = %x, bytes = %d, data :",
 		  (uint16_t)address, bytes_received);
 
-	if (drm_debug & DRM_UT_DRIVER) {
+	if (drm_debug_enabled(DRM_UT_DRIVER)) {
 		print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE,
 			       16, 1, data, bytes_received, false);
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8e1640b..15f3424 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -67,7 +67,9 @@
 #include "vce_v4_0.h"
 #include "vcn_v1_0.h"
 #include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
 #include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
 #include "dce_virtual.h"
 #include "mxgpu_ai.h"
 #include "amdgpu_smu.h"
@@ -477,56 +479,18 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
 	return ret;
 }
 
-static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
-{
-	if (is_support_sw_smu(adev)) {
-		struct smu_context *smu = &adev->smu;
-
-		*cap = smu_baco_is_support(smu);
-		return 0;
-	} else {
-		void *pp_handle = adev->powerplay.pp_handle;
-		const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
-		if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
-			*cap = false;
-			return -ENOENT;
-		}
-
-		return pp_funcs->get_asic_baco_capability(pp_handle, cap);
-	}
-}
-
 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
 {
 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+	int ret = 0;
 
 	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
 	if (ras && ras->supported)
 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
 
-	dev_info(adev->dev, "GPU BACO reset\n");
-
-	if (is_support_sw_smu(adev)) {
-		struct smu_context *smu = &adev->smu;
-
-		if (smu_baco_reset(smu))
-			return -EIO;
-	} else {
-		void *pp_handle = adev->powerplay.pp_handle;
-		const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
-		if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
-			return -ENOENT;
-
-		/* enter BACO state */
-		if (pp_funcs->set_asic_baco_state(pp_handle, 1))
-			return -EIO;
-
-		/* exit BACO state */
-		if (pp_funcs->set_asic_baco_state(pp_handle, 0))
-			return -EIO;
-	}
+	ret = amdgpu_dpm_baco_reset(adev);
+	if (ret)
+		return ret;
 
 	/* re-enable doorbell interrupt after BACO exit */
 	if (ras && ras->supported)
@@ -535,21 +499,11 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
 	return 0;
 }
 
-static int soc15_mode2_reset(struct amdgpu_device *adev)
-{
-	if (is_support_sw_smu(adev))
-		return smu_mode2_reset(&adev->smu);
-	if (!adev->powerplay.pp_funcs ||
-	    !adev->powerplay.pp_funcs->asic_reset_mode_2)
-		return -ENOENT;
-
-	return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
-}
-
 static enum amd_reset_method
 soc15_asic_reset_method(struct amdgpu_device *adev)
 {
-	bool baco_reset;
+	bool baco_reset = false;
+	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
 	switch (adev->asic_type) {
 	case CHIP_RAVEN:
@@ -557,23 +511,21 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
 		return AMD_RESET_METHOD_MODE2;
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
-		soc15_asic_get_baco_capability(adev, &baco_reset);
+	case CHIP_ARCTURUS:
+		baco_reset = amdgpu_dpm_is_baco_supported(adev);
 		break;
 	case CHIP_VEGA20:
 		if (adev->psp.sos_fw_version >= 0x80067)
-			soc15_asic_get_baco_capability(adev, &baco_reset);
-		else
-			baco_reset = false;
-		if (baco_reset) {
-			struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
-			struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+			baco_reset = amdgpu_dpm_is_baco_supported(adev);
 
-			if (hive || (ras && ras->supported))
-				baco_reset = false;
-		}
+		/*
+		 * 1. PMFW version > 0x284300: all cases use baco
+		 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
+		 */
+		if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
+			baco_reset = false;
 		break;
 	default:
-		baco_reset = false;
 		break;
 	}
 
@@ -585,13 +537,17 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
 
 static int soc15_asic_reset(struct amdgpu_device *adev)
 {
+	/* original raven doesn't have full asic reset */
+	if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+		return 0;
+
 	switch (soc15_asic_reset_method(adev)) {
 		case AMD_RESET_METHOD_BACO:
 			if (!adev->in_suspend)
 				amdgpu_inc_vram_lost(adev);
 			return soc15_asic_baco_reset(adev);
 		case AMD_RESET_METHOD_MODE2:
-			return soc15_mode2_reset(adev);
+			return amdgpu_dpm_mode2_reset(adev);
 		default:
 			if (!adev->in_suspend)
 				amdgpu_inc_vram_lost(adev);
@@ -599,6 +555,22 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
 	}
 }
 
+static bool soc15_supports_baco(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_VEGA10:
+	case CHIP_VEGA12:
+	case CHIP_ARCTURUS:
+		return amdgpu_dpm_is_baco_supported(adev);
+	case CHIP_VEGA20:
+		if (adev->psp.sos_fw_version >= 0x80067)
+			return amdgpu_dpm_is_baco_supported(adev);
+		return false;
+	default:
+		return false;
+	}
+}
+
 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 			u32 cntl_reg, u32 status_reg)
 {
@@ -709,9 +681,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 	}
 
 	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
-		adev->df_funcs = &df_v3_6_funcs;
+		adev->df.funcs = &df_v3_6_funcs;
 	else
-		adev->df_funcs = &df_v1_7_funcs;
+		adev->df.funcs = &df_v1_7_funcs;
 
 	adev->rev_id = soc15_get_rev_id(adev);
 	adev->nbio.funcs->detect_hw_virt(adev);
@@ -746,11 +718,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 		}
 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
-		if (!amdgpu_sriov_vf(adev)) {
-			if (is_support_sw_smu(adev))
+		if (is_support_sw_smu(adev)) {
+			if (!amdgpu_sriov_vf(adev))
 				amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
-			else
-				amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+		} else {
+			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
 		}
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -798,11 +770,16 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
-		if (!amdgpu_sriov_vf(adev))
-			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 
-		if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
+		if (amdgpu_sriov_vf(adev)) {
+			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+				amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+		} else {
 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+		}
+		if (!amdgpu_sriov_vf(adev))
+			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
 		break;
 	case CHIP_RENOIR:
 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
@@ -810,8 +787,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 			amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
-		if (is_support_sw_smu(adev))
-			amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
@@ -821,6 +797,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
 #endif
 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
 		break;
 	default:
 		return -EINVAL;
@@ -999,6 +976,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
 	.get_pcie_usage = &soc15_get_pcie_usage,
 	.need_reset_on_init = &soc15_need_reset_on_init,
 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
+	.supports_baco = &soc15_supports_baco,
 };
 
 static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -1007,6 +985,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
 	.read_bios_from_rom = &soc15_read_bios_from_rom,
 	.read_register = &soc15_read_register,
 	.reset = &soc15_asic_reset,
+	.reset_method = &soc15_asic_reset_method,
 	.set_vga_state = &soc15_vga_set_state,
 	.get_xclk = &soc15_get_xclk,
 	.set_uvd_clocks = &soc15_set_uvd_clocks,
@@ -1019,7 +998,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
 	.get_pcie_usage = &vega20_get_pcie_usage,
 	.need_reset_on_init = &soc15_need_reset_on_init,
 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
-	.reset_method = &soc15_asic_reset_method
+	.supports_baco = &soc15_supports_baco,
 };
 
 static int soc15_common_early_init(void *handle)
@@ -1145,9 +1124,7 @@ static int soc15_common_early_init(void *handle)
 				AMD_CG_SUPPORT_SDMA_LS |
 				AMD_CG_SUPPORT_VCN_MGCG;
 
-			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
-				AMD_PG_SUPPORT_VCN |
-				AMD_PG_SUPPORT_VCN_DPG;
+			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
 		} else if (adev->pdev->device == 0x15d8) {
 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 				AMD_CG_SUPPORT_GFX_MGLS |
@@ -1190,9 +1167,7 @@ static int soc15_common_early_init(void *handle)
 				AMD_CG_SUPPORT_SDMA_LS |
 				AMD_CG_SUPPORT_VCN_MGCG;
 
-			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
-				AMD_PG_SUPPORT_VCN |
-				AMD_PG_SUPPORT_VCN_DPG;
+			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
 		}
 		break;
 	case CHIP_ARCTURUS:
@@ -1208,7 +1183,9 @@ static int soc15_common_early_init(void *handle)
 			AMD_CG_SUPPORT_SDMA_LS |
 			AMD_CG_SUPPORT_MC_MGCG |
 			AMD_CG_SUPPORT_MC_LS |
-			AMD_CG_SUPPORT_IH_CG;
+			AMD_CG_SUPPORT_IH_CG |
+			AMD_CG_SUPPORT_VCN_MGCG |
+			AMD_CG_SUPPORT_JPEG_MGCG;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x32;
 		break;
@@ -1229,12 +1206,14 @@ static int soc15_common_early_init(void *handle)
 				 AMD_CG_SUPPORT_HDP_LS |
 				 AMD_CG_SUPPORT_ROM_MGCG |
 				 AMD_CG_SUPPORT_VCN_MGCG |
+				 AMD_CG_SUPPORT_JPEG_MGCG |
 				 AMD_CG_SUPPORT_IH_CG |
 				 AMD_CG_SUPPORT_ATHUB_LS |
 				 AMD_CG_SUPPORT_ATHUB_MGCG |
 				 AMD_CG_SUPPORT_DF_MGCG;
 		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
 				 AMD_PG_SUPPORT_VCN |
+				 AMD_PG_SUPPORT_JPEG |
 				 AMD_PG_SUPPORT_VCN_DPG;
 		adev->external_rev_id = adev->rev_id + 0x91;
 		break;
@@ -1272,7 +1251,7 @@ static int soc15_common_sw_init(void *handle)
 	if (amdgpu_sriov_vf(adev))
 		xgpu_ai_mailbox_add_irq_id(adev);
 
-	adev->df_funcs->sw_init(adev);
+	adev->df.funcs->sw_init(adev);
 
 	return 0;
 }
@@ -1282,7 +1261,7 @@ static int soc15_common_sw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_nbio_ras_fini(adev);
-	adev->df_funcs->sw_fini(adev);
+	adev->df.funcs->sw_fini(adev);
 	return 0;
 }
 
@@ -1492,38 +1471,38 @@ static int soc15_common_set_clockgating_state(void *handle,
 	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_hdp_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_rom_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
-		adev->df_funcs->update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
+		adev->df.funcs->update_medium_grain_clock_gating(adev,
+				state == AMD_CG_STATE_GATE);
 		break;
 	case CHIP_RAVEN:
 	case CHIP_RENOIR:
 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_hdp_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_drm_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		soc15_update_rom_medium_grain_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	case CHIP_ARCTURUS:
 		soc15_update_hdp_light_sleep(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 		break;
 	default:
 		break;
@@ -1561,7 +1540,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
 
-	adev->df_funcs->get_clockgating_state(adev, flags);
+	adev->df.funcs->get_clockgating_state(adev, flags);
 }
 
 static int soc15_common_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 57af489..d0fb7a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -60,6 +60,18 @@ struct soc15_allowed_register_entry {
 	bool grbm_indexed;
 };
 
+struct soc15_ras_field_entry {
+	const char *name;
+	uint32_t hwip;
+	uint32_t inst;
+	uint32_t seg;
+	uint32_t reg_offset;
+	uint32_t sec_count_mask;
+	uint32_t sec_count_shift;
+	uint32_t ded_count_mask;
+	uint32_t ded_count_shift;
+};
+
 #define SOC15_REG_ENTRY(ip, inst, reg)	ip##_HWIP, inst, reg##_BASE_IDX, reg
 
 #define SOC15_REG_ENTRY_OFFSET(entry)	(adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 839f186..19e870c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -52,6 +52,7 @@
 		uint32_t old_ = 0;	\
 		uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
 		uint32_t loop = adev->usec_timeout;		\
+		ret = 0;					\
 		while ((tmp_ & (mask)) != (expected_value)) {	\
 			if (old_ != tmp_) {			\
 				loop = adev->usec_timeout;	\
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 47c4b96..793bf70 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -28,19 +28,24 @@
 #include "rsmu/rsmu_0_0_2_sh_mask.h"
 #include "umc/umc_6_1_1_offset.h"
 #include "umc/umc_6_1_1_sh_mask.h"
+#include "umc/umc_6_1_2_offset.h"
 
-#define smnMCA_UMC0_MCUMC_ADDRT0	0x50f10
+#define UMC_6_INST_DIST			0x40000
 
 /*
  * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
  * is the index of 8KB block
  */
-#define ADDR_OF_8KB_BLOCK(addr)		(((addr) & ~0xffULL) << 5)
+#define ADDR_OF_8KB_BLOCK(addr)			(((addr) & ~0xffULL) << 5)
 /* channel index is the index of 256B block */
 #define ADDR_OF_256B_BLOCK(channel_index)	((channel_index) << 8)
 /* offset in 256B block */
 #define OFFSET_IN_256B_BLOCK(addr)		((addr) & 0xffULL)
 
+#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
+#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
+#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
+
 const uint32_t
 	umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
 		{2, 18, 11, 27},	{4, 20, 13, 29},
@@ -49,24 +54,10 @@ const uint32_t
 		{9, 25, 0, 16},		{15, 31, 6, 22}
 };
 
-static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev,
-					   uint32_t umc_instance)
+static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
 {
-	uint32_t rsmu_umc_index;
-
-	rsmu_umc_index = RREG32_SOC15(RSMU, 0,
-			mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
-	rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
-			RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+	WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
 			RSMU_UMC_INDEX_MODE_EN, 1);
-	rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
-			RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
-			RSMU_UMC_INDEX_INSTANCE, umc_instance);
-	rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
-			RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
-			RSMU_UMC_INDEX_WREN, 1 << umc_instance);
-	WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
-				rsmu_umc_index);
 }
 
 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
@@ -75,15 +66,23 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
 			RSMU_UMC_INDEX_MODE_EN, 0);
 }
 
-static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev)
+static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
 {
 	uint32_t rsmu_umc_index;
 
 	rsmu_umc_index = RREG32_SOC15(RSMU, 0,
-				mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+			mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+
 	return REG_GET_FIELD(rsmu_umc_index,
-				RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
-				RSMU_UMC_INDEX_INSTANCE);
+			RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+			RSMU_UMC_INDEX_MODE_EN);
+}
+
+static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
+					    uint32_t umc_inst,
+					    uint32_t ch_inst)
+{
+	return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
 }
 
 static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
@@ -95,39 +94,50 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
 	uint64_t mc_umc_status;
 	uint32_t mc_umc_status_addr;
 
-	ecc_err_cnt_sel_addr =
-		SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
-	ecc_err_cnt_addr =
-		SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
-	mc_umc_status_addr =
-		SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+	if (adev->asic_type == CHIP_ARCTURUS) {
+		/* UMC 6_1_2 registers */
+		ecc_err_cnt_sel_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
+		ecc_err_cnt_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
+		mc_umc_status_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+	} else {
+		/* UMC 6_1_1 registers */
+		ecc_err_cnt_sel_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
+		ecc_err_cnt_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
+		mc_umc_status_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+	}
 
 	/* select the lower chip and check the error count */
-	ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset);
+	ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
 					EccErrCntCsSel, 0);
-	WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
-	ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset);
+	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+	ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
 	*error_count +=
 		(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
 		 UMC_V6_1_CE_CNT_INIT);
 	/* clear the lower chip err count */
-	WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+	WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
 
 	/* select the higher chip and check the err counter */
 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
 					EccErrCntCsSel, 1);
-	WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
-	ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset);
+	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+	ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
 	*error_count +=
 		(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
 		 UMC_V6_1_CE_CNT_INIT);
 	/* clear the higher chip err count */
-	WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+	WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
 
 	/* check for SRAM correctable error
 	  MCUMC_STATUS is a 64 bit register */
-	mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
@@ -141,11 +151,18 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev
 	uint64_t mc_umc_status;
 	uint32_t mc_umc_status_addr;
 
-	mc_umc_status_addr =
-                SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+	if (adev->asic_type == CHIP_ARCTURUS) {
+		/* UMC 6_1_2 registers */
+		mc_umc_status_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+	} else {
+		/* UMC 6_1_1 registers */
+		mc_umc_status_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+	}
 
 	/* check the MCUMC_STATUS */
-	mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
 	if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
 	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
@@ -155,49 +172,78 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev
 		*error_count += 1;
 }
 
-static void umc_v6_1_query_error_count(struct amdgpu_device *adev,
-					   struct ras_err_data *err_data, uint32_t umc_reg_offset,
-					   uint32_t channel_index)
-{
-	umc_v6_1_query_correctable_error_count(adev, umc_reg_offset,
-						   &(err_data->ce_count));
-	umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset,
-						  &(err_data->ue_count));
-}
-
 static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
 					   void *ras_error_status)
 {
-	amdgpu_umc_for_each_channel(umc_v6_1_query_error_count);
+	struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+	uint32_t umc_inst        = 0;
+	uint32_t ch_inst         = 0;
+	uint32_t umc_reg_offset  = 0;
+
+	uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+	if (rsmu_umc_index_state)
+		umc_v6_1_disable_umc_index_mode(adev);
+
+	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+		umc_reg_offset = get_umc_6_reg_offset(adev,
+						      umc_inst,
+						      ch_inst);
+
+		umc_v6_1_query_correctable_error_count(adev,
+						       umc_reg_offset,
+						       &(err_data->ce_count));
+		umc_v6_1_querry_uncorrectable_error_count(adev,
+							  umc_reg_offset,
+							  &(err_data->ue_count));
+	}
+
+	if (rsmu_umc_index_state)
+		umc_v6_1_enable_umc_index_mode(adev);
 }
 
 static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
 					 struct ras_err_data *err_data,
-					 uint32_t umc_reg_offset, uint32_t channel_index)
+					 uint32_t umc_reg_offset,
+					 uint32_t ch_inst,
+					 uint32_t umc_inst)
 {
 	uint32_t lsb, mc_umc_status_addr;
-	uint64_t mc_umc_status, err_addr, retired_page;
+	uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
 	struct eeprom_table_record *err_rec;
+	uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
 
-	mc_umc_status_addr =
-		SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+	if (adev->asic_type == CHIP_ARCTURUS) {
+		/* UMC 6_1_2 registers */
+		mc_umc_status_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+		mc_umc_addrt0 =
+			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT);
+	} else {
+		/* UMC 6_1_1 registers */
+		mc_umc_status_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+		mc_umc_addrt0 =
+			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
+	}
 
 	/* skip error address process if -ENOMEM */
 	if (!err_data->err_addr) {
 		/* clear umc status */
-		WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
+		WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
 		return;
 	}
 
 	err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-	mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
 
 	/* calculate error address if ue/ce error is detected */
 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
 	    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
 	    REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
-		err_addr = RREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4);
 
+		err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
 		/* the lowest lsb bits should be ignored */
 		lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
 		err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
@@ -218,57 +264,105 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
 			err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
 			err_rec->cu = 0;
 			err_rec->mem_channel = channel_index;
-			err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev);
+			err_rec->mcumc_id = umc_inst;
 
 			err_data->err_addr_cnt++;
 		}
 	}
 
 	/* clear umc status */
-	WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
+	WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
 }
 
 static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
 					     void *ras_error_status)
 {
-	amdgpu_umc_for_each_channel(umc_v6_1_query_error_address);
+	struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+	uint32_t umc_inst        = 0;
+	uint32_t ch_inst         = 0;
+	uint32_t umc_reg_offset  = 0;
+
+	uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+	if (rsmu_umc_index_state)
+		umc_v6_1_disable_umc_index_mode(adev);
+
+	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+		umc_reg_offset = get_umc_6_reg_offset(adev,
+						      umc_inst,
+						      ch_inst);
+
+		umc_v6_1_query_error_address(adev,
+					     err_data,
+					     umc_reg_offset,
+					     ch_inst,
+					     umc_inst);
+	}
+
+	if (rsmu_umc_index_state)
+		umc_v6_1_enable_umc_index_mode(adev);
 }
 
 static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
-					 struct ras_err_data *err_data,
-					 uint32_t umc_reg_offset, uint32_t channel_index)
+					      uint32_t umc_reg_offset)
 {
 	uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
 	uint32_t ecc_err_cnt_addr;
 
-	ecc_err_cnt_sel_addr =
-		SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
-	ecc_err_cnt_addr =
-		SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
+	if (adev->asic_type == CHIP_ARCTURUS) {
+		/* UMC 6_1_2 registers */
+		ecc_err_cnt_sel_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
+		ecc_err_cnt_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
+	} else {
+		/* UMC 6_1_1 registers */
+		ecc_err_cnt_sel_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
+		ecc_err_cnt_addr =
+			SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
+	}
 
 	/* select the lower chip and check the error count */
-	ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset);
+	ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
 					EccErrCntCsSel, 0);
 	/* set ce error interrupt type to APIC based interrupt */
 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
 					EccErrInt, 0x1);
-	WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
+	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
 	/* set error count to initial value */
-	WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+	WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
 
 	/* select the higher chip and check the err counter */
 	ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
 					EccErrCntCsSel, 1);
-	WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
-	WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+	WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+	WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
 }
 
 static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
 {
-	void *ras_error_status = NULL;
+	uint32_t umc_inst        = 0;
+	uint32_t ch_inst         = 0;
+	uint32_t umc_reg_offset  = 0;
 
-	amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel);
+	uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+	if (rsmu_umc_index_state)
+		umc_v6_1_disable_umc_index_mode(adev);
+
+	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+		umc_reg_offset = get_umc_6_reg_offset(adev,
+						      umc_inst,
+						      ch_inst);
+
+		umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
+	}
+
+	if (rsmu_umc_index_state)
+		umc_v6_1_enable_umc_index_mode(adev);
 }
 
 const struct amdgpu_umc_funcs umc_v6_1_funcs = {
@@ -276,6 +370,4 @@ const struct amdgpu_umc_funcs umc_v6_1_funcs = {
 	.ras_late_init = amdgpu_umc_ras_late_init,
 	.query_ras_error_count = umc_v6_1_query_ras_error_count,
 	.query_ras_error_address = umc_v6_1_query_ras_error_address,
-	.enable_umc_index_mode = umc_v6_1_enable_umc_index_mode,
-	.disable_umc_index_mode = umc_v6_1_disable_umc_index_mode,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
index dab9cbd..0ce1d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
@@ -35,7 +35,8 @@
 /* total channel instances in one umc block */
 #define UMC_V6_1_TOTAL_CHANNEL_NUM	(UMC_V6_1_CHANNEL_INSTANCE_NUM * UMC_V6_1_UMC_INSTANCE_NUM)
 /* UMC regiser per channel offset */
-#define UMC_V6_1_PER_CHANNEL_OFFSET		0x800
+#define UMC_V6_1_PER_CHANNEL_OFFSET_VG20	0x800
+#define UMC_V6_1_PER_CHANNEL_OFFSET_ARCT	0x400
 
 /* EccErrCnt max value */
 #define UMC_V6_1_CE_CNT_MAX		0xffff
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 01e62fb..0fa8aae 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -763,7 +763,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 217084d..e0aadca 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1421,7 +1421,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 475ae68..217db18 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -739,7 +739,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 	int i;
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 683701c..3fd102e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -887,7 +887,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 	int i;
 
 	if ((adev->asic_type == CHIP_POLARIS10) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index b4f84a8..1a24fad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -25,6 +25,7 @@
 
 #include "amdgpu.h"
 #include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
 #include "soc15.h"
 #include "soc15d.h"
 #include "soc15_common.h"
@@ -36,21 +37,22 @@
 #include "mmhub/mmhub_9_1_sh_mask.h"
 
 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+#include "jpeg_v1_0.h"
 
-#define mmUVD_RBC_XX_IB_REG_CHECK				0x05ab
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX	1
-#define mmUVD_REG_XX_MASK							0x05ac
-#define mmUVD_REG_XX_MASK_BASE_IDX				1
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0		0x05ab
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX	1
+#define mmUVD_REG_XX_MASK_1_0			0x05ac
+#define mmUVD_REG_XX_MASK_1_0_BASE_IDX		1
 
 static int vcn_v1_0_stop(struct amdgpu_device *adev);
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
-static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
-				struct dpg_pause_state *new_state);
+				int inst_idx, struct dpg_pause_state *new_state);
+
+static void vcn_v1_0_idle_work_handler(struct work_struct *work);
 
 /**
  * vcn_v1_0_early_init - set function pointers
@@ -68,9 +70,10 @@ static int vcn_v1_0_early_init(void *handle)
 
 	vcn_v1_0_set_dec_ring_funcs(adev);
 	vcn_v1_0_set_enc_ring_funcs(adev);
-	vcn_v1_0_set_jpeg_ring_funcs(adev);
 	vcn_v1_0_set_irq_funcs(adev);
 
+	jpeg_v1_0_early_init(handle);
+
 	return 0;
 }
 
@@ -101,15 +104,13 @@ static int vcn_v1_0_sw_init(void *handle)
 			return r;
 	}
 
-	/* VCN JPEG TRAP */
-	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq);
-	if (r)
-		return r;
-
 	r = amdgpu_vcn_sw_init(adev);
 	if (r)
 		return r;
 
+	/* Override the work func */
+	adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
+
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 		const struct common_firmware_header *hdr;
 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
@@ -149,17 +150,11 @@ static int vcn_v1_0_sw_init(void *handle)
 			return r;
 	}
 
-	ring = &adev->vcn.inst->ring_jpeg;
-	sprintf(ring->name, "vcn_jpeg");
-	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
-	if (r)
-		return r;
-
 	adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
-	adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch =
-		SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
 
-	return 0;
+	r = jpeg_v1_0_sw_init(handle);
+
+	return r;
 }
 
 /**
@@ -178,6 +173,8 @@ static int vcn_v1_0_sw_fini(void *handle)
 	if (r)
 		return r;
 
+	jpeg_v1_0_sw_fini(handle);
+
 	r = amdgpu_vcn_sw_fini(adev);
 
 	return r;
@@ -207,7 +204,7 @@ static int vcn_v1_0_hw_init(void *handle)
 			goto done;
 	}
 
-	ring = &adev->vcn.inst->ring_jpeg;
+	ring = &adev->jpeg.inst->ring_dec;
 	r = amdgpu_ring_test_helper(ring);
 	if (r)
 		goto done;
@@ -838,9 +835,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
 
 	vcn_v1_0_mc_resume_spg_mode(adev);
 
-	WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
-	WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
-		RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
+	WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
+	WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
+		RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
 
 	/* enable VCPU clock */
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
@@ -947,22 +944,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 
-	ring = &adev->vcn.inst->ring_jpeg;
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
-			UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
-
-	/* initialize wptr */
-	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
-	/* copy patch commands to the jpeg ring */
-	vcn_v1_0_jpeg_ring_set_patch_ring(ring,
-		(ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+	jpeg_v1_0_start(adev, 0);
 
 	return 0;
 }
@@ -1106,13 +1088,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
 			~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 
-	/* initialize JPEG wptr */
-	ring = &adev->vcn.inst->ring_jpeg;
-	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
-	/* copy patch commands to the jpeg ring */
-	vcn_v1_0_jpeg_ring_set_patch_ring(ring,
-		(ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+	jpeg_v1_0_start(adev, 1);
 
 	return 0;
 }
@@ -1223,7 +1199,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
 }
 
 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
-				struct dpg_pause_state *new_state)
+				int inst_idx, struct dpg_pause_state *new_state)
 {
 	int ret_code;
 	uint32_t reg_data = 0;
@@ -1316,7 +1292,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
 							UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
 
 				/* Restore */
-				ring = &adev->vcn.inst->ring_jpeg;
+				ring = &adev->jpeg.inst->ring_dec;
 				WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
 				WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
 							UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
@@ -1370,7 +1346,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		/* wait for STATUS to clear */
@@ -1716,389 +1692,6 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, val);
 }
 
-
-/**
- * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware read pointer
- */
-static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware write pointer
- */
-static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Commits the write pointer to the hardware
- */
-static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
-}
-
-/**
- * vcn_v1_0_jpeg_ring_insert_start - insert a start command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a start command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x68e04);
-
-	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x80010000);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_insert_end - insert a end command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a end command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x68e04);
-
-	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x00010000);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
- *
- * @ring: amdgpu_ring pointer
- * @fence: fence to emit
- *
- * Write a fence and a trap command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
-				     unsigned flags)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, seq);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, seq);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, lower_32_bits(addr));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, upper_32_bits(addr));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x8);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
-	amdgpu_ring_write(ring, 0);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x01400200);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, seq);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, lower_32_bits(addr));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, upper_32_bits(addr));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
-	amdgpu_ring_write(ring, 0xffffffff);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x3fbc);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(0, 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x1);
-
-	/* emit trap */
-	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
-	amdgpu_ring_write(ring, 0);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
- *
- * @ring: amdgpu_ring pointer
- * @ib: indirect buffer to execute
- *
- * Write ring commands to execute the indirect buffer.
- */
-static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
-					struct amdgpu_job *job,
-					struct amdgpu_ib *ib,
-					uint32_t flags)
-{
-	struct amdgpu_device *adev = ring->adev;
-	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, ib->length_dw);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
-
-	amdgpu_ring_write(ring,
-		PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
-	amdgpu_ring_write(ring, 0);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x01400200);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x2);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
-	amdgpu_ring_write(ring, 0x2);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
-					    uint32_t reg, uint32_t val,
-					    uint32_t mask)
-{
-	struct amdgpu_device *adev = ring->adev;
-	uint32_t reg_offset = (reg << 2);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x01400200);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, val);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
-	if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
-		((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring,
-			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
-	} else {
-		amdgpu_ring_write(ring, reg_offset);
-		amdgpu_ring_write(ring,
-			PACKETJ(0, 0, 0, PACKETJ_TYPE3));
-	}
-	amdgpu_ring_write(ring, mask);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
-		unsigned vmid, uint64_t pd_addr)
-{
-	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t data0, data1, mask;
-
-	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
-
-	/* wait for register write */
-	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
-	data1 = lower_32_bits(pd_addr);
-	mask = 0xffffffff;
-	vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
-					uint32_t reg, uint32_t val)
-{
-	struct amdgpu_device *adev = ring->adev;
-	uint32_t reg_offset = (reg << 2);
-
-	amdgpu_ring_write(ring,
-		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
-	if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
-			((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring,
-			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
-	} else {
-		amdgpu_ring_write(ring, reg_offset);
-		amdgpu_ring_write(ring,
-			PACKETJ(0, 0, 0, PACKETJ_TYPE0));
-	}
-	amdgpu_ring_write(ring, val);
-}
-
-static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
-{
-	int i;
-
-	WARN_ON(ring->wptr % 2 || count % 2);
-
-	for (i = 0; i < count / 2; i++) {
-		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
-		amdgpu_ring_write(ring, 0);
-	}
-}
-
-static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
-{
-	struct amdgpu_device *adev = ring->adev;
-	ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
-	if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
-		((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
-		ring->ring[(*ptr)++] = 0;
-		ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
-	} else {
-		ring->ring[(*ptr)++] = reg_offset;
-		ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
-	}
-	ring->ring[(*ptr)++] = val;
-}
-
-static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	uint32_t reg, reg_offset, val, mask, i;
-
-	// 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
-	reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
-	reg_offset = (reg << 2);
-	val = lower_32_bits(ring->gpu_addr);
-	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
-	// 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
-	reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
-	reg_offset = (reg << 2);
-	val = upper_32_bits(ring->gpu_addr);
-	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
-	// 3rd to 5th: issue MEM_READ commands
-	for (i = 0; i <= 2; i++) {
-		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
-		ring->ring[ptr++] = 0;
-	}
-
-	// 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
-	reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
-	reg_offset = (reg << 2);
-	val = 0x13;
-	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
-	// 7th: program mmUVD_JRBC_RB_REF_DATA
-	reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
-	reg_offset = (reg << 2);
-	val = 0x1;
-	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
-	// 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
-	reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
-	reg_offset = (reg << 2);
-	val = 0x1;
-	mask = 0x1;
-
-	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
-	ring->ring[ptr++] = 0x01400200;
-	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
-	ring->ring[ptr++] = val;
-	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
-	if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
-		((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
-		ring->ring[ptr++] = 0;
-		ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
-	} else {
-		ring->ring[ptr++] = reg_offset;
-		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
-	}
-	ring->ring[ptr++] = mask;
-
-	//9th to 21st: insert no-op
-	for (i = 0; i <= 12; i++) {
-		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
-		ring->ring[ptr++] = 0;
-	}
-
-	//22nd: reset mmUVD_JRBC_RB_RPTR
-	reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
-	reg_offset = (reg << 2);
-	val = 0;
-	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
-	//23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
-	reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
-	reg_offset = (reg << 2);
-	val = 0x12;
-	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-}
-
 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
 					struct amdgpu_irq_src *source,
 					unsigned type,
@@ -2123,9 +1716,6 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
 	case 120:
 		amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
 		break;
-	case 126:
-		amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
-		break;
 	default:
 		DRM_ERROR("Unhandled interrupt: %d %d\n",
 			  entry->src_id, entry->src_data[0]);
@@ -2174,6 +1764,86 @@ static int vcn_v1_0_set_powergating_state(void *handle,
 	return ret;
 }
 
+static void vcn_v1_0_idle_work_handler(struct work_struct *work)
+{
+	struct amdgpu_device *adev =
+		container_of(work, struct amdgpu_device, vcn.idle_work.work);
+	unsigned int fences = 0, i;
+
+	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+		fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
+
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+		struct dpg_pause_state new_state;
+
+		if (fences)
+			new_state.fw_based = VCN_DPG_STATE__PAUSE;
+		else
+			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+		if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+			new_state.jpeg = VCN_DPG_STATE__PAUSE;
+		else
+			new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+		adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+	}
+
+	fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
+	fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
+
+	if (fences == 0) {
+		amdgpu_gfx_off_ctrl(adev, true);
+		if (adev->pm.dpm_enabled)
+			amdgpu_dpm_enable_uvd(adev, false);
+		else
+			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+			       AMD_PG_STATE_GATE);
+	} else {
+		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+	}
+}
+
+void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+	if (set_clocks) {
+		amdgpu_gfx_off_ctrl(adev, false);
+		if (adev->pm.dpm_enabled)
+			amdgpu_dpm_enable_uvd(adev, true);
+		else
+			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+			       AMD_PG_STATE_UNGATE);
+	}
+
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+		struct dpg_pause_state new_state;
+		unsigned int fences = 0, i;
+
+		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+			fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
+
+		if (fences)
+			new_state.fw_based = VCN_DPG_STATE__PAUSE;
+		else
+			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+		if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+			new_state.jpeg = VCN_DPG_STATE__PAUSE;
+		else
+			new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+			new_state.fw_based = VCN_DPG_STATE__PAUSE;
+		else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+			new_state.jpeg = VCN_DPG_STATE__PAUSE;
+
+		adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+	}
+}
+
 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
 	.name = "vcn_v1_0",
 	.early_init = vcn_v1_0_early_init,
@@ -2220,7 +1890,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
 	.insert_start = vcn_v1_0_dec_ring_insert_start,
 	.insert_end = vcn_v1_0_dec_ring_insert_end,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.begin_use = amdgpu_vcn_ring_begin_use,
+	.begin_use = vcn_v1_0_ring_begin_use,
 	.end_use = amdgpu_vcn_ring_end_use,
 	.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
 	.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
@@ -2252,48 +1922,13 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
 	.insert_nop = amdgpu_ring_insert_nop,
 	.insert_end = vcn_v1_0_enc_ring_insert_end,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.begin_use = amdgpu_vcn_ring_begin_use,
+	.begin_use = vcn_v1_0_ring_begin_use,
 	.end_use = amdgpu_vcn_ring_end_use,
 	.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
 	.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
-static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
-	.type = AMDGPU_RING_TYPE_VCN_JPEG,
-	.align_mask = 0xf,
-	.nop = PACKET0(0x81ff, 0),
-	.support_64bit_ptrs = false,
-	.no_user_fence = true,
-	.vmhub = AMDGPU_MMHUB_0,
-	.extra_dw = 64,
-	.get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
-	.get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
-	.set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
-	.emit_frame_size =
-		6 + 6 + /* hdp invalidate / flush */
-		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
-		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
-		8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
-		26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
-		6,
-	.emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
-	.emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
-	.emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
-	.emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
-	.test_ring = amdgpu_vcn_jpeg_ring_test_ring,
-	.test_ib = amdgpu_vcn_jpeg_ring_test_ib,
-	.insert_nop = vcn_v1_0_jpeg_ring_nop,
-	.insert_start = vcn_v1_0_jpeg_ring_insert_start,
-	.insert_end = vcn_v1_0_jpeg_ring_insert_end,
-	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.begin_use = amdgpu_vcn_ring_begin_use,
-	.end_use = amdgpu_vcn_ring_end_use,
-	.emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
-	.emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
-	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
 	adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
@@ -2310,12 +1945,6 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
 	DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
-static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
-{
-	adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
-	DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
-}
-
 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
 	.set = vcn_v1_0_set_interrupt_state,
 	.process = vcn_v1_0_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
index 2a497a7..f67d739 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
@@ -24,6 +24,8 @@
 #ifndef __VCN_V1_0_H__
 #define __VCN_V1_0_H__
 
+void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+
 extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 38f787a..4f72167 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -47,39 +47,13 @@
 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x5a7
 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x1e2
 
-#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 			0x1bfff
-#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET				0x4029
-#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET				0x402a
-#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET				0x402b
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET		0x40ea
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 	0x40eb
-#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET				0x40cf
-#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET				0x40d1
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 		0x40e8
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x40e9
-#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET				0x4082
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET		0x40ec
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 	0x40ed
-#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET			0x4085
-#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET				0x4084
-#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET				0x4089
-#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET				0x401f
-
-#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR				0x18000
-
-#define mmUVD_RBC_XX_IB_REG_CHECK 					0x026b
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 				1
-#define mmUVD_REG_XX_MASK 						0x026c
-#define mmUVD_REG_XX_MASK_BASE_IDX 					1
-
 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
-static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
 static int vcn_v2_0_set_powergating_state(void *handle,
 				enum amd_powergating_state state);
 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
-				struct dpg_pause_state *new_state);
+				int inst_idx, struct dpg_pause_state *new_state);
 
 /**
  * vcn_v2_0_early_init - set function pointers
@@ -97,7 +71,6 @@ static int vcn_v2_0_early_init(void *handle)
 
 	vcn_v2_0_set_dec_ring_funcs(adev);
 	vcn_v2_0_set_enc_ring_funcs(adev);
-	vcn_v2_0_set_jpeg_ring_funcs(adev);
 	vcn_v2_0_set_irq_funcs(adev);
 
 	return 0;
@@ -132,12 +105,6 @@ static int vcn_v2_0_sw_init(void *handle)
 			return r;
 	}
 
-	/* VCN JPEG TRAP */
-	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-			      VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq);
-	if (r)
-		return r;
-
 	r = amdgpu_vcn_sw_init(adev);
 	if (r)
 		return r;
@@ -194,19 +161,8 @@ static int vcn_v2_0_sw_init(void *handle)
 			return r;
 	}
 
-	ring = &adev->vcn.inst->ring_jpeg;
-	ring->use_doorbell = true;
-	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
-	sprintf(ring->name, "vcn_jpeg");
-	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
-	if (r)
-		return r;
-
 	adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
 
-	adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
-	adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
-
 	return 0;
 }
 
@@ -258,11 +214,6 @@ static int vcn_v2_0_hw_init(void *handle)
 			goto done;
 	}
 
-	ring = &adev->vcn.inst->ring_jpeg;
-	r = amdgpu_ring_test_helper(ring);
-	if (r)
-		goto done;
-
 done:
 	if (!r)
 		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -296,9 +247,6 @@ static int vcn_v2_0_hw_fini(void *handle)
 		ring->sched.ready = false;
 	}
 
-	ring = &adev->vcn.inst->ring_jpeg;
-	ring->sched.ready = false;
-
 	return 0;
 }
 
@@ -393,7 +341,6 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 
 	WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
-	WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 }
 
 static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
@@ -404,88 +351,88 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
 	/* cache window 0: fw */
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 		if (!indirect) {
-			WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
-			WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
-			WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
 		} else {
-			WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
-			WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
-			WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
 		}
 		offset = 0;
 	} else {
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 			lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 			upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
 		offset = size;
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
 	}
 
 	if (!indirect)
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
 	else
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
 
 	/* cache window 1: stack */
 	if (!indirect) {
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 			lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 			upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
 	} else {
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
-		WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
 	}
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
 
 	/* cache window 2: context */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 		lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 		upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
 
 	/* non-cache window */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
 
 	/* VCN global tiling registers */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
 }
 
@@ -631,146 +578,23 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
 		 UVD_CGC_CTRL__WCB_MODE_MASK |
 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
 		 UVD_CGC_CTRL__SCPU_MODE_MASK);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
 
 	/* turn off clock gating */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
 
 	/* turn on SUVD clock gating */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
 
 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
 }
 
 /**
- * jpeg_v2_0_start - start JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * Setup and start the JPEG block
- */
-static int jpeg_v2_0_start(struct amdgpu_device *adev)
-{
-	struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg;
-	uint32_t tmp;
-	int r = 0;
-
-	/* disable power gating */
-	tmp = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
-	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
-
-	SOC15_WAIT_ON_RREG(VCN, 0,
-		mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
-		UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
-
-	if (r) {
-		DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
-		return r;
-	}
-
-	/* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
-	tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
-	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
-
-	/* JPEG disable CGC */
-	tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
-	tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
-	tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
-	tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
-	WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
-
-	tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
-	tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
-		| JPEG_CGC_GATE__JPEG2_DEC_MASK
-		| JPEG_CGC_GATE__JPEG_ENC_MASK
-		| JPEG_CGC_GATE__JMCIF_MASK
-		| JPEG_CGC_GATE__JRBBM_MASK);
-	WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
-
-	/* enable JMI channel */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0,
-		~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
-	/* enable System Interrupt for JRBC */
-	WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN),
-		JPEG_SYS_INT_EN__DJRBC_MASK,
-		~JPEG_SYS_INT_EN__DJRBC_MASK);
-
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
-		lower_32_bits(ring->gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
-		upper_32_bits(ring->gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
-	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
-	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
-	return 0;
-}
-
-/**
- * jpeg_v2_0_stop - stop JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * stop the JPEG block
- */
-static int jpeg_v2_0_stop(struct amdgpu_device *adev)
-{
-	uint32_t tmp;
-	int r = 0;
-
-	/* reset JMI */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL),
-		UVD_JMI_CNTL__SOFT_RESET_MASK,
-		~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
-	/* enable JPEG CGC */
-	tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
-	tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
-	tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
-	tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
-	WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
-
-
-	tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
-	tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
-		|JPEG_CGC_GATE__JPEG2_DEC_MASK
-		|JPEG_CGC_GATE__JPEG_ENC_MASK
-		|JPEG_CGC_GATE__JMCIF_MASK
-		|JPEG_CGC_GATE__JRBBM_MASK);
-	WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
-
-	/* enable power gating */
-	tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS));
-	tmp &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
-	tmp |=  0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
-	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
-
-	tmp = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
-	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
-
-	SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
-		(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
-		UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
-
-	if (r) {
-		DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
-		return r;
-	}
-
-	return r;
-}
-
-/**
  * vcn_v2_0_enable_clock_gating - enable VCN clock gating
  *
  * @adev: amdgpu_device pointer
@@ -930,7 +754,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 	WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
 
 	if (indirect)
-		adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr;
+		adev->vcn.inst->dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst->dpg_sram_cpu_addr;
 
 	/* enable clock gating */
 	vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
@@ -939,11 +763,11 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
 	tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
 	/* disable master interupt */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
 
 	/* setup mmUVD_LMI_CTRL */
@@ -955,28 +779,28 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 		0x00100000L);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
 
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_MPC_CNTL),
 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
 
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_MPC_SET_MUXA0),
 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
 
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_MPC_SET_MUXB0),
 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
 
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_MPC_SET_MUX),
 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
@@ -984,29 +808,29 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 
 	vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
 
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
 
 	/* release VCPU reset to boot */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
 
 	/* enable LMI MC and UMC channels */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_LMI_CTRL2),
 		0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
 
 	/* enable master interrupt */
-	WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
 		UVD, 0, mmUVD_MASTINT_EN),
 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 
 	if (indirect)
-		psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
-				    (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
-					       (uintptr_t)adev->vcn.dpg_sram_cpu_addr));
+		psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
+				    (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
+					       (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
 
 	/* force RBC into idle state */
 	rb_bufsz = order_base_2(ring->ring_size);
@@ -1052,12 +876,8 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
 	if (adev->pm.dpm_enabled)
 		amdgpu_dpm_enable_uvd(adev, true);
 
-	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
-		r = vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
-		if (r)
-			return r;
-		goto jpeg;
-	}
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+		return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
 
 	vcn_v2_0_disable_static_power_gating(adev);
 
@@ -1209,10 +1029,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 
-jpeg:
-	r = jpeg_v2_0_start(adev);
-
-	return r;
+	return 0;
 }
 
 static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
@@ -1231,9 +1048,6 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
 	tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
 
-	tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
-
 	tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
 
@@ -1252,10 +1066,6 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
 	uint32_t tmp;
 	int r;
 
-	r = jpeg_v2_0_stop(adev);
-	if (r)
-		return r;
-
 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
 		r = vcn_v2_0_stop_dpg_mode(adev);
 		if (r)
@@ -1320,7 +1130,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
 }
 
 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
-				struct dpg_pause_state *new_state)
+				int inst_idx, struct dpg_pause_state *new_state)
 {
 	struct amdgpu_ring *ring;
 	uint32_t reg_data = 0;
@@ -1403,7 +1213,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
 
 	if (enable) {
 		/* wait for STATUS to clear */
@@ -1781,272 +1591,6 @@ void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_
 	amdgpu_ring_write(ring, val);
 }
 
-/**
- * vcn_v2_0_jpeg_ring_get_rptr - get read pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware read pointer
- */
-static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_get_wptr - get write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware write pointer
- */
-static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	if (ring->use_doorbell)
-		return adev->wb.wb[ring->wptr_offs];
-	else
-		return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_set_wptr - set write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Commits the write pointer to the hardware
- */
-static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	if (ring->use_doorbell) {
-		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
-		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
-	} else {
-		WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
-	}
-}
-
-/**
- * vcn_v2_0_jpeg_ring_insert_start - insert a start command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a start command to the ring.
- */
-void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
-{
-	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x68e04);
-
-	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x80010000);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_insert_end - insert a end command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a end command to the ring.
- */
-void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
-{
-	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x68e04);
-
-	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x00010000);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command
- *
- * @ring: amdgpu_ring pointer
- * @fence: fence to emit
- *
- * Write a fence and a trap command to the ring.
- */
-void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
-				unsigned flags)
-{
-	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
-
-	amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, seq);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, seq);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, lower_32_bits(addr));
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, upper_32_bits(addr));
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x8);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
-		0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
-	amdgpu_ring_write(ring, 0);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x3fbc);
-
-	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x1);
-
-	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
-	amdgpu_ring_write(ring, 0);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer
- *
- * @ring: amdgpu_ring pointer
- * @ib: indirect buffer to execute
- *
- * Write ring commands to execute the indirect buffer.
- */
-void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
-				struct amdgpu_job *job,
-				struct amdgpu_ib *ib,
-				uint32_t flags)
-{
-	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
-
-	amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
-	amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, ib->length_dw);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
-
-	amdgpu_ring_write(ring,	PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
-	amdgpu_ring_write(ring, 0);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x01400200);
-
-	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x2);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
-		0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
-	amdgpu_ring_write(ring, 0x2);
-}
-
-void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
-				uint32_t val, uint32_t mask)
-{
-	uint32_t reg_offset = (reg << 2);
-
-	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, 0x01400200);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	amdgpu_ring_write(ring, val);
-
-	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring,
-			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
-	} else {
-		amdgpu_ring_write(ring, reg_offset);
-		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
-			0, 0, PACKETJ_TYPE3));
-	}
-	amdgpu_ring_write(ring, mask);
-}
-
-void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
-				unsigned vmid, uint64_t pd_addr)
-{
-	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t data0, data1, mask;
-
-	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
-
-	/* wait for register write */
-	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
-	data1 = lower_32_bits(pd_addr);
-	mask = 0xffffffff;
-	vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
-}
-
-void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
-{
-	uint32_t reg_offset = (reg << 2);
-
-	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
-		0, 0, PACKETJ_TYPE0));
-	if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring,
-			PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
-	} else {
-		amdgpu_ring_write(ring, reg_offset);
-		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
-			0, 0, PACKETJ_TYPE0));
-	}
-	amdgpu_ring_write(ring, val);
-}
-
-void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
-{
-	int i;
-
-	WARN_ON(ring->wptr % 2 || count % 2);
-
-	for (i = 0; i < count / 2; i++) {
-		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
-		amdgpu_ring_write(ring, 0);
-	}
-}
-
 static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
 					struct amdgpu_irq_src *source,
 					unsigned type,
@@ -2071,9 +1615,6 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
 		amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
 		break;
-	case VCN_2_0__SRCID__JPEG_DECODE:
-		amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
-		break;
 	default:
 		DRM_ERROR("Unhandled interrupt: %d %d\n",
 			  entry->src_id, entry->src_data[0]);
@@ -2083,7 +1624,7 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
 	return 0;
 }
 
-static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
+int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	uint32_t tmp = 0;
@@ -2219,36 +1760,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
-static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = {
-	.type = AMDGPU_RING_TYPE_VCN_JPEG,
-	.align_mask = 0xf,
-	.vmhub = AMDGPU_MMHUB_0,
-	.get_rptr = vcn_v2_0_jpeg_ring_get_rptr,
-	.get_wptr = vcn_v2_0_jpeg_ring_get_wptr,
-	.set_wptr = vcn_v2_0_jpeg_ring_set_wptr,
-	.emit_frame_size =
-		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
-		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
-		8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
-		18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
-		8 + 16,
-	.emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
-	.emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
-	.emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
-	.emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
-	.test_ring = amdgpu_vcn_jpeg_ring_test_ring,
-	.test_ib = amdgpu_vcn_jpeg_ring_test_ib,
-	.insert_nop = vcn_v2_0_jpeg_ring_nop,
-	.insert_start = vcn_v2_0_jpeg_ring_insert_start,
-	.insert_end = vcn_v2_0_jpeg_ring_insert_end,
-	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.begin_use = amdgpu_vcn_ring_begin_use,
-	.end_use = amdgpu_vcn_ring_end_use,
-	.emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
-	.emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
-	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
 	adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
@@ -2265,12 +1776,6 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
 	DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
-static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
-{
-	adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
-	DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
-}
-
 static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
 	.set = vcn_v2_0_set_interrupt_state,
 	.process = vcn_v2_0_process_interrupt,
@@ -2278,7 +1783,7 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
 
 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-	adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+	adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
 	adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
index 8467292..6c9de18 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
@@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
 				unsigned vmid, uint64_t pd_addr);
 extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
 				uint32_t reg, uint32_t val);
+extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring);
 
 extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring);
 extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
@@ -49,19 +50,6 @@ extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
 				unsigned int vmid, uint64_t pd_addr);
 extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
 
-extern void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring);
-extern void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring);
-extern void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
-				unsigned flags);
-extern void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
-				struct amdgpu_ib *ib, uint32_t flags);
-extern void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
-				uint32_t val, uint32_t mask);
-extern void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
-				unsigned vmid, uint64_t pd_addr);
-extern void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
-extern void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count);
-
 extern const struct amdgpu_ip_block_version vcn_v2_0_ip_block;
 
 #endif /* __VCN_V2_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 93edf91..70fae79 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -29,6 +29,7 @@
 #include "soc15.h"
 #include "soc15d.h"
 #include "vcn_v2_0.h"
+#include "mmsch_v1_0.h"
 
 #include "vcn/vcn_2_5_offset.h"
 #include "vcn/vcn_2_5_sh_mask.h"
@@ -47,16 +48,16 @@
 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 	0x3b5
 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x25c
 
-#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET			0x401f
-
-#define VCN25_MAX_HW_INSTANCES_ARCTURUS				2
+#define VCN25_MAX_HW_INSTANCES_ARCTURUS			2
 
 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
-static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
 static int vcn_v2_5_set_powergating_state(void *handle,
 				enum amd_powergating_state state);
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+				int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
 
 static int amdgpu_ih_clientid_vcns[] = {
 	SOC15_IH_CLIENTID_VCN,
@@ -91,11 +92,16 @@ static int vcn_v2_5_early_init(void *handle)
 	} else
 		adev->vcn.num_vcn_inst = 1;
 
-	adev->vcn.num_enc_rings = 2;
+	if (amdgpu_sriov_vf(adev)) {
+		adev->vcn.num_vcn_inst = 2;
+		adev->vcn.harvest_config = 0;
+		adev->vcn.num_enc_rings = 1;
+	} else {
+		adev->vcn.num_enc_rings = 2;
+	}
 
 	vcn_v2_5_set_dec_ring_funcs(adev);
 	vcn_v2_5_set_enc_ring_funcs(adev);
-	vcn_v2_5_set_jpeg_ring_funcs(adev);
 	vcn_v2_5_set_irq_funcs(adev);
 
 	return 0;
@@ -130,12 +136,6 @@ static int vcn_v2_5_sw_init(void *handle)
 			if (r)
 				return r;
 		}
-
-		/* VCN JPEG TRAP */
-		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
-				VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq);
-		if (r)
-			return r;
 	}
 
 	r = amdgpu_vcn_sw_init(adev);
@@ -184,12 +184,11 @@ static int vcn_v2_5_sw_init(void *handle)
 		adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
 		adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
 
-		adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
-		adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH);
-
 		ring = &adev->vcn.inst[j].ring_dec;
 		ring->use_doorbell = true;
-		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
+
+		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+				(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
 		sprintf(ring->name, "vcn_dec_%d", j);
 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
 		if (r)
@@ -198,22 +197,26 @@ static int vcn_v2_5_sw_init(void *handle)
 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 			ring = &adev->vcn.inst[j].ring_enc[i];
 			ring->use_doorbell = true;
-			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
+
+			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+					(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
+
 			sprintf(ring->name, "vcn_enc_%d.%d", j, i);
 			r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
 			if (r)
 				return r;
 		}
+	}
 
-		ring = &adev->vcn.inst[j].ring_jpeg;
-		ring->use_doorbell = true;
-		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
-		sprintf(ring->name, "vcn_jpeg_%d", j);
-		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+	if (amdgpu_sriov_vf(adev)) {
+		r = amdgpu_virt_alloc_mm_table(adev);
 		if (r)
 			return r;
 	}
 
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+		adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+
 	return 0;
 }
 
@@ -229,6 +232,9 @@ static int vcn_v2_5_sw_fini(void *handle)
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (amdgpu_sriov_vf(adev))
+		amdgpu_virt_free_mm_table(adev);
+
 	r = amdgpu_vcn_suspend(adev);
 	if (r)
 		return r;
@@ -249,35 +255,44 @@ static int vcn_v2_5_hw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring;
-	int i, j, r;
+	int i, j, r = 0;
+
+	if (amdgpu_sriov_vf(adev))
+		r = vcn_v2_5_sriov_start(adev);
 
 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 		if (adev->vcn.harvest_config & (1 << j))
 			continue;
-		ring = &adev->vcn.inst[j].ring_dec;
 
-		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+		if (amdgpu_sriov_vf(adev)) {
+			adev->vcn.inst[j].ring_enc[0].sched.ready = true;
+			adev->vcn.inst[j].ring_enc[1].sched.ready = false;
+			adev->vcn.inst[j].ring_enc[2].sched.ready = false;
+			adev->vcn.inst[j].ring_dec.sched.ready = true;
+		} else {
+
+			ring = &adev->vcn.inst[j].ring_dec;
+
+			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
 						     ring->doorbell_index, j);
 
-		r = amdgpu_ring_test_helper(ring);
-		if (r)
-			goto done;
-
-		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-			ring = &adev->vcn.inst[j].ring_enc[i];
 			r = amdgpu_ring_test_helper(ring);
 			if (r)
 				goto done;
-		}
 
-		ring = &adev->vcn.inst[j].ring_jpeg;
-		r = amdgpu_ring_test_helper(ring);
-		if (r)
-			goto done;
+			for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+				ring = &adev->vcn.inst[j].ring_enc[i];
+				r = amdgpu_ring_test_helper(ring);
+				if (r)
+					goto done;
+			}
+		}
 	}
+
 done:
 	if (!r)
-		DRM_INFO("VCN decode and encode initialized successfully.\n");
+		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
+			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
 
 	return r;
 }
@@ -300,7 +315,9 @@ static int vcn_v2_5_hw_fini(void *handle)
 			continue;
 		ring = &adev->vcn.inst[i].ring_dec;
 
-		if (RREG32_SOC15(VCN, i, mmUVD_STATUS))
+		if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+		    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+		     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
 			vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
 		ring->sched.ready = false;
@@ -309,9 +326,6 @@ static int vcn_v2_5_hw_fini(void *handle)
 			ring = &adev->vcn.inst[i].ring_enc[j];
 			ring->sched.ready = false;
 		}
-
-		ring = &adev->vcn.inst[i].ring_jpeg;
-		ring->sched.ready = false;
 	}
 
 	return 0;
@@ -378,9 +392,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
 		/* cache window 0: fw */
 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
 			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 			offset = 0;
 		} else {
@@ -412,6 +426,99 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
 	}
 }
 
+static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+	uint32_t offset;
+
+	/* cache window 0: fw */
+	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+		if (!indirect) {
+			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
+			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
+			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+		} else {
+			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+			WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+		}
+		offset = 0;
+	} else {
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+		offset = size;
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+	}
+
+	if (!indirect)
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+	else
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+	/* cache window 1: stack */
+	if (!indirect) {
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+	} else {
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+		WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+	}
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+	/* cache window 2: context */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+	/* non-cache window */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+
+	/* VCN global tiling registers */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+}
+
 /**
  * vcn_v2_5_disable_clock_gating - disable VCN clock gating
  *
@@ -530,6 +637,54 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
 	}
 }
 
+static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
+		uint8_t sram_sel, int inst_idx, uint8_t indirect)
+{
+	uint32_t reg_data = 0;
+
+	/* enable sw clock gating control */
+	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+	else
+		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+		 UVD_CGC_CTRL__SYS_MODE_MASK |
+		 UVD_CGC_CTRL__UDEC_MODE_MASK |
+		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
+		 UVD_CGC_CTRL__REGS_MODE_MASK |
+		 UVD_CGC_CTRL__RBC_MODE_MASK |
+		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+		 UVD_CGC_CTRL__IDCT_MODE_MASK |
+		 UVD_CGC_CTRL__MPRD_MODE_MASK |
+		 UVD_CGC_CTRL__MPC_MODE_MASK |
+		 UVD_CGC_CTRL__LBSI_MODE_MASK |
+		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
+		 UVD_CGC_CTRL__WCB_MODE_MASK |
+		 UVD_CGC_CTRL__VCPU_MODE_MASK |
+		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+
+	/* turn off clock gating */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+
+	/* turn on SUVD clock gating */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+
+	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+}
+
 /**
  * vcn_v2_5_enable_clock_gating - enable VCN clock gating
  *
@@ -592,111 +747,134 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
 	}
 }
 
-/**
- * jpeg_v2_5_start - start JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * Setup and start the JPEG block
- */
-static int jpeg_v2_5_start(struct amdgpu_device *adev)
+static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
 {
 	struct amdgpu_ring *ring;
-	uint32_t tmp;
-	int i;
+	uint32_t rb_bufsz, tmp;
 
-	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-		if (adev->vcn.harvest_config & (1 << i))
-			continue;
-		ring = &adev->vcn.inst[i].ring_jpeg;
-		/* disable anti hang mechanism */
-		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0,
-			~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+	/* disable register anti-hang mechanism */
+	WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+	/* enable dynamic power gating mode */
+	tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+	WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
 
-		/* JPEG disable CGC */
-		tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
-		tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
-		tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
-		tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
-		WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
+	if (indirect)
+		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
 
-		tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
-		tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
-			| JPEG_CGC_GATE__JPEG2_DEC_MASK
-			| JPEG_CGC_GATE__JMCIF_MASK
-			| JPEG_CGC_GATE__JRBBM_MASK);
-		WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
+	/* enable clock gating */
+	vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
 
-		tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
-		tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
-			| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
-			| JPEG_CGC_CTRL__JMCIF_MODE_MASK
-			| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
-		WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
+	/* enable VCPU clock */
+	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
-		/* MJPEG global tiling registers */
-		WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
-			adev->gfx.config.gb_addr_config);
-		WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
-			adev->gfx.config.gb_addr_config);
+	/* disable master interupt */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
 
-		/* enable JMI channel */
-		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0,
-			~UVD_JMI_CNTL__SOFT_RESET_MASK);
+	/* setup mmUVD_LMI_CTRL */
+	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+		UVD_LMI_CTRL__REQ_MODE_MASK |
+		UVD_LMI_CTRL__CRC_RESET_MASK |
+		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+		0x00100000L);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
 
-		/* enable System Interrupt for JRBC */
-		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN),
-			JPEG_SYS_INT_EN__DJRBC_MASK,
-			~JPEG_SYS_INT_EN__DJRBC_MASK);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_MPC_CNTL),
+		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
 
-		WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0);
-		WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
-		WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
-			lower_32_bits(ring->gpu_addr));
-		WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
-			upper_32_bits(ring->gpu_addr));
-		WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0);
-		WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0);
-		WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
-		WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
-		ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
-	}
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_MPC_SET_MUXA0),
+		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
 
-	return 0;
-}
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_MPC_SET_MUXB0),
+		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
 
-/**
- * jpeg_v2_5_stop - stop JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * stop the JPEG block
- */
-static int jpeg_v2_5_stop(struct amdgpu_device *adev)
-{
-	uint32_t tmp;
-	int i;
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_MPC_SET_MUX),
+		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
 
-	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-		if (adev->vcn.harvest_config & (1 << i))
-			continue;
-		/* reset JMI */
-		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL),
-			UVD_JMI_CNTL__SOFT_RESET_MASK,
-			~UVD_JMI_CNTL__SOFT_RESET_MASK);
+	vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
 
-		tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
-		tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
-			|JPEG_CGC_GATE__JPEG2_DEC_MASK
-			|JPEG_CGC_GATE__JMCIF_MASK
-			|JPEG_CGC_GATE__JRBBM_MASK);
-		WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
 
-		/* enable anti hang mechanism */
-		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS),
-			UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
-			~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
-	}
+	/* enable LMI MC and UMC channels */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
+
+	/* unblock VCPU register access */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+
+	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+	/* enable master interrupt */
+	WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+		UVD, 0, mmUVD_MASTINT_EN),
+		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+	if (indirect)
+		psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+				    (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+					       (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+
+	ring = &adev->vcn.inst[inst_idx].ring_dec;
+	/* force RBC into idle state */
+	rb_bufsz = order_base_2(ring->ring_size);
+	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+	WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+	/* set the write pointer delay */
+	WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+	/* set the wb address */
+	WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+		(upper_32_bits(ring->gpu_addr) >> 2));
+
+	/* programm the RB_BASE for ring buffer */
+	WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+		lower_32_bits(ring->gpu_addr));
+	WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+		upper_32_bits(ring->gpu_addr));
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+
+	WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+
+	ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
+	WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+		lower_32_bits(ring->wptr));
 
 	return 0;
 }
@@ -713,6 +891,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 		if (adev->vcn.harvest_config & (1 << i))
 			continue;
+		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+			r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+			continue;
+		}
+
 		/* disable register anti-hang mechanism */
 		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
 			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -722,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
 		WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
 	}
 
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+		return 0;
+
 	/*SW clock gating */
 	vcn_v2_5_disable_clock_gating(adev);
 
@@ -874,23 +1060,250 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
 		WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 		WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
 	}
-	r = jpeg_v2_5_start(adev);
 
-	return r;
+	return 0;
+}
+
+static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
+				struct amdgpu_mm_table *table)
+{
+	uint32_t data = 0, loop = 0, size = 0;
+	uint64_t addr = table->gpu_addr;
+	struct mmsch_v1_1_init_header *header = NULL;;
+
+	header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
+	size = header->total_size;
+
+	/*
+	 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
+	 *  memory descriptor location
+	 */
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+	/* 2, update vmid of descriptor */
+	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+	/* use domain0 for MM scheduler */
+	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+	/* 3, notify mmsch about the size of this descriptor */
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+	/* 4, set resp to zero */
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+	/*
+	 * 5, kick off the initialization and wait until
+	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+	 */
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+	loop = 10;
+	while ((data & 0x10000002) != 0x10000002) {
+		udelay(100);
+		data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+		loop--;
+		if (!loop)
+			break;
+	}
+
+	if (!loop) {
+		dev_err(adev->dev,
+			"failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
+			data);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	uint32_t offset, size, tmp, i, rb_bufsz;
+	uint32_t table_size = 0;
+	struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
+	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
+	struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
+	struct mmsch_v1_0_cmd_end end = { { 0 } };
+	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+	struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
+
+	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
+	end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+	header->version = MMSCH_VERSION;
+	header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
+	init_table += header->total_size;
+
+	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+		header->eng[i].table_offset = header->total_size;
+		header->eng[i].init_status = 0;
+		header->eng[i].table_size = 0;
+
+		table_size = 0;
+
+		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+		/* mc resume*/
+		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+			MMSCH_V1_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i,
+					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+			MMSCH_V1_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i,
+					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+			offset = 0;
+			MMSCH_V1_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+		} else {
+			MMSCH_V1_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i,
+					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+				lower_32_bits(adev->vcn.inst[i].gpu_addr));
+			MMSCH_V1_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i,
+					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+				upper_32_bits(adev->vcn.inst[i].gpu_addr));
+			offset = size;
+			MMSCH_V1_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+		}
+
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+			size);
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+			0);
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+			AMDGPU_VCN_STACK_SIZE);
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
+				AMDGPU_VCN_STACK_SIZE));
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
+				AMDGPU_VCN_STACK_SIZE));
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+			0);
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+			AMDGPU_VCN_CONTEXT_SIZE);
+
+		ring = &adev->vcn.inst[i].ring_enc[0];
+		ring->wptr = 0;
+
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+			lower_32_bits(ring->gpu_addr));
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+			upper_32_bits(ring->gpu_addr));
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+			ring->ring_size / 4);
+
+		ring = &adev->vcn.inst[i].ring_dec;
+		ring->wptr = 0;
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+			lower_32_bits(ring->gpu_addr));
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+			upper_32_bits(ring->gpu_addr));
+
+		/* force RBC into idle state */
+		rb_bufsz = order_base_2(ring->ring_size);
+		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+		MMSCH_V1_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+		/* add end packet */
+		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
+		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+		init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+
+		/* refine header */
+		header->eng[i].table_size = table_size;
+		header->total_size += table_size;
+	}
+
+	return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
+}
+
+static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+	int ret_code = 0;
+	uint32_t tmp;
+
+	/* Wait for power status to be 1 */
+	SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+	/* wait for read ptr to be equal to write ptr */
+	tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
+	SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+	tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
+	SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+	tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+	SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+	SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+	/* disable dynamic power gating mode */
+	WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+	return 0;
 }
 
 static int vcn_v2_5_stop(struct amdgpu_device *adev)
 {
 	uint32_t tmp;
-	int i, r;
-
-	r = jpeg_v2_5_stop(adev);
-	if (r)
-		return r;
+	int i, r = 0;
 
 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 		if (adev->vcn.harvest_config & (1 << i))
 			continue;
+		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+			r = vcn_v2_5_stop_dpg_mode(adev, i);
+			continue;
+		}
+
 		/* wait for vcn idle */
 		SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
 		if (r)
@@ -946,6 +1359,67 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
 	return 0;
 }
 
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+				int inst_idx, struct dpg_pause_state *new_state)
+{
+	struct amdgpu_ring *ring;
+	uint32_t reg_data = 0;
+	int ret_code;
+
+	/* pause/unpause if state is changed */
+	if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+		DRM_DEBUG("dpg pause state changed %d -> %d",
+			adev->vcn.pause_state.fw_based,	new_state->fw_based);
+		reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+			ret_code = 0;
+			SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+			if (!ret_code) {
+				/* pause DPG */
+				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+				WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+
+				/* wait for ACK */
+				SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+				/* Restore */
+				ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+				ring = &adev->vcn.inst[inst_idx].ring_enc[1];
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+
+				WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+					   RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+
+				SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+					   0x0, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+			}
+		} else {
+			/* unpause dpg, no need to wait */
+			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+			WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+		}
+		adev->vcn.pause_state.fw_based = new_state->fw_based;
+	}
+
+	return 0;
+}
+
 /**
  * vcn_v2_5_dec_ring_get_rptr - get read pointer
  *
@@ -988,6 +1462,10 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+		WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
+			lower_32_bits(ring->wptr) | 0x80000000);
+
 	if (ring->use_doorbell) {
 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
@@ -1013,7 +1491,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
-	.test_ring = amdgpu_vcn_dec_ring_test_ring,
+	.test_ring = vcn_v2_0_dec_ring_test_ring,
 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
 	.insert_start = vcn_v2_0_dec_ring_insert_start,
@@ -1125,86 +1603,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
-/**
- * vcn_v2_5_jpeg_ring_get_rptr - get read pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware read pointer
- */
-static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
-}
-
-/**
- * vcn_v2_5_jpeg_ring_get_wptr - get write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware write pointer
- */
-static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	if (ring->use_doorbell)
-		return adev->wb.wb[ring->wptr_offs];
-	else
-		return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
-}
-
-/**
- * vcn_v2_5_jpeg_ring_set_wptr - set write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Commits the write pointer to the hardware
- */
-static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	if (ring->use_doorbell) {
-		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
-		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
-	} else {
-		WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
-	}
-}
-
-static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
-	.type = AMDGPU_RING_TYPE_VCN_JPEG,
-	.align_mask = 0xf,
-	.vmhub = AMDGPU_MMHUB_1,
-	.get_rptr = vcn_v2_5_jpeg_ring_get_rptr,
-	.get_wptr = vcn_v2_5_jpeg_ring_get_wptr,
-	.set_wptr = vcn_v2_5_jpeg_ring_set_wptr,
-	.emit_frame_size =
-		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
-		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
-		8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
-		18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
-		8 + 16,
-	.emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
-	.emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
-	.emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
-	.emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
-	.test_ring = amdgpu_vcn_jpeg_ring_test_ring,
-	.test_ib = amdgpu_vcn_jpeg_ring_test_ib,
-	.insert_nop = vcn_v2_0_jpeg_ring_nop,
-	.insert_start = vcn_v2_0_jpeg_ring_insert_start,
-	.insert_end = vcn_v2_0_jpeg_ring_insert_end,
-	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.begin_use = amdgpu_vcn_ring_begin_use,
-	.end_use = amdgpu_vcn_ring_end_use,
-	.emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
-	.emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
-	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
 	int i;
@@ -1233,19 +1631,6 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
 	}
 }
 
-static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
-{
-	int i;
-
-	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-		if (adev->vcn.harvest_config & (1 << i))
-			continue;
-		adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
-		adev->vcn.inst[i].ring_jpeg.me = i;
-		DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i);
-	}
-}
-
 static bool vcn_v2_5_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1281,7 +1666,10 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	bool enable = (state == AMD_CG_STATE_GATE);
+
+	if (amdgpu_sriov_vf(adev))
+		return 0;
 
 	if (enable) {
 		if (vcn_v2_5_is_idle(handle))
@@ -1300,6 +1688,9 @@ static int vcn_v2_5_set_powergating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int ret;
 
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
 	if(state == adev->vcn.cur_state)
 		return 0;
 
@@ -1352,9 +1743,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
 		break;
-	case VCN_2_0__SRCID__JPEG_DECODE:
-		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg);
-		break;
 	default:
 		DRM_ERROR("Unhandled interrupt: %d %d\n",
 			  entry->src_id, entry->src_data[0]);
@@ -1376,7 +1764,7 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 		if (adev->vcn.harvest_config & (1 << i))
 			continue;
-		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2;
+		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
 		adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 5cb7e23..407c609 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -234,16 +234,9 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
 
 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-	ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
 	ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
-	if (adev->irq.ih.use_bus_addr) {
-		ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
-	} else {
-		ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_FBPA_ENABLE, 1);
-	}
 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
 				   !!adev->irq.msi_enabled);
-
 	if (amdgpu_sriov_vf(adev)) {
 		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
 			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
@@ -253,10 +246,19 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
 	}
 
-	if ((adev->asic_type == CHIP_ARCTURUS
-		&& adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
-		|| adev->asic_type == CHIP_RENOIR)
+	if ((adev->asic_type == CHIP_ARCTURUS &&
+	     adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
+	    adev->asic_type == CHIP_RENOIR) {
+		ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+		if (adev->irq.ih.use_bus_addr) {
+			ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+						   MC_SPACE_GPA_ENABLE, 1);
+		} else {
+			ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+						   MC_SPACE_FBPA_ENABLE, 1);
+		}
 		WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
+	}
 
 	/* set the writeback address whether it's enabled or not */
 	WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
@@ -715,7 +717,7 @@ static int vega10_ih_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	vega10_ih_update_clockgating_state(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+				state == AMD_CG_STATE_GATE);
 	return 0;
 
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f1b171e..78b3590 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -689,40 +689,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 	return -EINVAL;
 }
 
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
-{
-	void *pp_handle = adev->powerplay.pp_handle;
-	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
-	if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
-		*cap = false;
-		return -ENOENT;
-	}
-
-	return pp_funcs->get_asic_baco_capability(pp_handle, cap);
-}
-
-int smu7_asic_baco_reset(struct amdgpu_device *adev)
-{
-	void *pp_handle = adev->powerplay.pp_handle;
-	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
-	if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
-		return -ENOENT;
-
-	/* enter BACO state */
-	if (pp_funcs->set_asic_baco_state(pp_handle, 1))
-		return -EIO;
-
-	/* exit BACO state */
-	if (pp_funcs->set_asic_baco_state(pp_handle, 0))
-		return -EIO;
-
-	dev_info(adev->dev, "GPU BACO reset\n");
-
-	return 0;
-}
-
 /**
  * vi_asic_pci_config_reset - soft reset GPU
  *
@@ -745,6 +711,21 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
 	return r;
 }
 
+static bool vi_asic_supports_baco(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
+	case CHIP_TOPAZ:
+		return amdgpu_dpm_is_baco_supported(adev);
+	default:
+		return false;
+	}
+}
+
 static enum amd_reset_method
 vi_asic_reset_method(struct amdgpu_device *adev)
 {
@@ -757,7 +738,7 @@ vi_asic_reset_method(struct amdgpu_device *adev)
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
 	case CHIP_TOPAZ:
-		smu7_asic_get_baco_capability(adev, &baco_reset);
+		baco_reset = amdgpu_dpm_is_baco_supported(adev);
 		break;
 	default:
 		baco_reset = false;
@@ -786,7 +767,7 @@ static int vi_asic_reset(struct amdgpu_device *adev)
 	if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
 		if (!adev->in_suspend)
 			amdgpu_inc_vram_lost(adev);
-		r = smu7_asic_baco_reset(adev);
+		r = amdgpu_dpm_baco_reset(adev);
 	} else {
 		r = vi_asic_pci_config_reset(adev);
 	}
@@ -1119,6 +1100,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
 	.get_pcie_usage = &vi_get_pcie_usage,
 	.need_reset_on_init = &vi_need_reset_on_init,
 	.get_pcie_replay_count = &vi_get_pcie_replay_count,
+	.supports_baco = &vi_asic_supports_baco,
 };
 
 #define CZ_REV_BRISTOL(rev)	 \
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 40d4174..defb4aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -31,7 +31,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
 int vi_set_ip_blocks(struct amdgpu_device *adev);
 
 void legacy_doorbell_index_init(struct amdgpu_device *adev);
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
-int smu7_asic_baco_reset(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 4815506..6147462 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -38,11 +38,9 @@
 		$(AMDKFD_PATH)/kfd_mqd_manager_v9.o \
 		$(AMDKFD_PATH)/kfd_mqd_manager_v10.o \
 		$(AMDKFD_PATH)/kfd_kernel_queue.o \
-		$(AMDKFD_PATH)/kfd_kernel_queue_cik.o \
-		$(AMDKFD_PATH)/kfd_kernel_queue_vi.o \
-		$(AMDKFD_PATH)/kfd_kernel_queue_v9.o \
-		$(AMDKFD_PATH)/kfd_kernel_queue_v10.o \
 		$(AMDKFD_PATH)/kfd_packet_manager.o \
+		$(AMDKFD_PATH)/kfd_packet_manager_vi.o \
+		$(AMDKFD_PATH)/kfd_packet_manager_v9.o \
 		$(AMDKFD_PATH)/kfd_process_queue_manager.o \
 		$(AMDKFD_PATH)/kfd_device_queue_manager.o \
 		$(AMDKFD_PATH)/kfd_device_queue_manager_cik.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1544007..3f0300e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -42,6 +42,7 @@
 
 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
 static int kfd_open(struct inode *, struct file *);
+static int kfd_release(struct inode *, struct file *);
 static int kfd_mmap(struct file *, struct vm_area_struct *);
 
 static const char kfd_dev_name[] = "kfd";
@@ -51,6 +52,7 @@ static const struct file_operations kfd_fops = {
 	.unlocked_ioctl = kfd_ioctl,
 	.compat_ioctl = compat_ptr_ioctl,
 	.open = kfd_open,
+	.release = kfd_release,
 	.mmap = kfd_mmap,
 };
 
@@ -124,8 +126,13 @@ static int kfd_open(struct inode *inode, struct file *filep)
 	if (IS_ERR(process))
 		return PTR_ERR(process);
 
-	if (kfd_is_locked())
+	if (kfd_is_locked()) {
+		kfd_unref_process(process);
 		return -EAGAIN;
+	}
+
+	/* filep now owns the reference returned by kfd_create_process */
+	filep->private_data = process;
 
 	dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
 		process->pasid, process->is_32bit_user_mode);
@@ -133,6 +140,16 @@ static int kfd_open(struct inode *inode, struct file *filep)
 	return 0;
 }
 
+static int kfd_release(struct inode *inode, struct file *filep)
+{
+	struct kfd_process *process = filep->private_data;
+
+	if (process)
+		kfd_unref_process(process);
+
+	return 0;
+}
+
 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
 					void *data)
 {
@@ -258,6 +275,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 	unsigned int queue_id;
 	struct kfd_process_device *pdd;
 	struct queue_properties q_properties;
+	uint32_t doorbell_offset_in_process = 0;
 
 	memset(&q_properties, 0, sizeof(struct queue_properties));
 
@@ -286,7 +304,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 			p->pasid,
 			dev->id);
 
-	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
+	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+			&doorbell_offset_in_process);
 	if (err != 0)
 		goto err_create_queue;
 
@@ -296,14 +315,11 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 	/* Return gpu_id as doorbell offset for mmap usage */
 	args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
 	args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
-	args->doorbell_offset <<= PAGE_SHIFT;
 	if (KFD_IS_SOC15(dev->device_info->asic_family))
-		/* On SOC15 ASICs, doorbell allocation must be
-		 * per-device, and independent from the per-process
-		 * queue_id. Return the doorbell offset within the
-		 * doorbell aperture to user mode.
+		/* On SOC15 ASICs, include the doorbell offset within the
+		 * process doorbell frame, which is 2 pages.
 		 */
-		args->doorbell_offset |= q_properties.doorbell_off;
+		args->doorbell_offset |= doorbell_offset_in_process;
 
 	mutex_unlock(&p->mutex);
 
@@ -1312,10 +1328,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
 	/* MMIO is mapped through kfd device
 	 * Generate a kfd mmap offset
 	 */
-	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
-		args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id);
-		args->mmap_offset <<= PAGE_SHIFT;
-	}
+	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
+		args->mmap_offset = KFD_MMAP_TYPE_MMIO
+					| KFD_MMAP_GPU_ID(args->gpu_id);
 
 	return 0;
 
@@ -1803,9 +1818,14 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 
 	dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
 
-	process = kfd_get_process(current);
-	if (IS_ERR(process)) {
-		dev_dbg(kfd_device, "no process\n");
+	/* Get the process struct from the filep. Only the process
+	 * that opened /dev/kfd can use the file descriptor. Child
+	 * processes need to create their own KFD device context.
+	 */
+	process = filep->private_data;
+	if (process->lead_thread != current->group_leader) {
+		dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
+		retcode = -EBADF;
 		goto err_i1;
 	}
 
@@ -1899,20 +1919,19 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct kfd_process *process;
 	struct kfd_dev *dev = NULL;
-	unsigned long vm_pgoff;
+	unsigned long mmap_offset;
 	unsigned int gpu_id;
 
 	process = kfd_get_process(current);
 	if (IS_ERR(process))
 		return PTR_ERR(process);
 
-	vm_pgoff = vma->vm_pgoff;
-	vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
-	gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
+	mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
+	gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
 	if (gpu_id)
 		dev = kfd_device_by_id(gpu_id);
 
-	switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
+	switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
 	case KFD_MMAP_TYPE_DOORBELL:
 		if (!dev)
 			return -ENODEV;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index d59f2cd0..27bcc5b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -72,11 +72,11 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
 	 * The receive packet buff will be sitting on the Indirect Buffer
 	 * and in the PQ we put the IB packet + sync packet(s).
 	 */
-	status = kq->ops.acquire_packet_buffer(kq,
+	status = kq_acquire_packet_buffer(kq,
 				pq_packets_size_in_bytes / sizeof(uint32_t),
 				&ib_packet_buff);
 	if (status) {
-		pr_err("acquire_packet_buffer failed\n");
+		pr_err("kq_acquire_packet_buffer failed\n");
 		return status;
 	}
 
@@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
 
 	if (status) {
 		pr_err("Failed to allocate GART memory\n");
-		kq->ops.rollback_packet(kq);
+		kq_rollback_packet(kq);
 		return status;
 	}
 
@@ -151,7 +151,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
 
 	rm_packet->data_lo = QUEUESTATE__ACTIVE;
 
-	kq->ops.submit_packet(kq);
+	kq_submit_packet(kq);
 
 	/* Wait till CP writes sync code: */
 	status = amdkfd_fence_wait_timeout(
@@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
 	properties.type = KFD_QUEUE_TYPE_DIQ;
 
 	status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
-				&properties, &qid);
+				&properties, &qid, NULL);
 
 	if (status) {
 		pr_err("Failed to create DIQ\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 15c5230..511712c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -93,7 +93,7 @@ void kfd_debugfs_init(void)
 			    kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
 	debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
 			    kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
-	debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+	debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
 			    NULL, &kfd_debugfs_hang_hws_fops);
 }
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 4fa8834..2a9e401 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -728,6 +728,9 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
 {
 	if (!kfd->init_complete)
 		return 0;
+
+	kfd->dqm->ops.pre_reset(kfd->dqm);
+
 	kgd2kfd_suspend(kfd);
 
 	kfd_signal_reset_event(kfd);
@@ -742,7 +745,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
 
 int kgd2kfd_post_reset(struct kfd_dev *kfd)
 {
-	int ret, count;
+	int ret;
 
 	if (!kfd->init_complete)
 		return 0;
@@ -750,7 +753,7 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
 	ret = kfd_resume(kfd);
 	if (ret)
 		return ret;
-	count = atomic_dec_return(&kfd_locked);
+	atomic_dec(&kfd_locked);
 
 	atomic_set(&kfd->sram_ecc_flag, 0);
 
@@ -822,6 +825,21 @@ static int kfd_resume(struct kfd_dev *kfd)
 	return err;
 }
 
+static inline void kfd_queue_work(struct workqueue_struct *wq,
+				  struct work_struct *work)
+{
+	int cpu, new_cpu;
+
+	cpu = new_cpu = smp_processor_id();
+	do {
+		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+		if (cpu_to_node(new_cpu) == numa_node_id())
+			break;
+	} while (cpu != new_cpu);
+
+	queue_work_on(new_cpu, wq, work);
+}
+
 /* This is called directly from KGD at ISR. */
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 {
@@ -844,7 +862,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 				   patched_ihre, &is_patched)
 	    && enqueue_ih_ring_entry(kfd,
 				     is_patched ? patched_ihre : ih_ring_entry))
-		queue_work(kfd->ih_wq, &kfd->interrupt_work);
+		kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
 
 	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 984c2f2..80d22bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -170,7 +170,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
 	}
 
 	q->properties.doorbell_off =
-		kfd_doorbell_id_to_offset(dev, q->process,
+		kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
 					  q->doorbell_id);
 
 	return 0;
@@ -930,11 +930,11 @@ static void uninitialize(struct device_queue_manager *dqm)
 	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
 		kfree(dqm->mqd_mgrs[i]);
 	mutex_destroy(&dqm->lock_hidden);
-	kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
 }
 
 static int start_nocpsch(struct device_queue_manager *dqm)
 {
+	pr_info("SW scheduler is used");
 	init_interrupts(dqm);
 	
 	if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
@@ -947,12 +947,19 @@ static int start_nocpsch(struct device_queue_manager *dqm)
 static int stop_nocpsch(struct device_queue_manager *dqm)
 {
 	if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
-		pm_uninit(&dqm->packets);
+		pm_uninit(&dqm->packets, false);
 	dqm->sched_running = false;
 
 	return 0;
 }
 
+static void pre_reset(struct device_queue_manager *dqm)
+{
+	dqm_lock(dqm);
+	dqm->is_resetting = true;
+	dqm_unlock(dqm);
+}
+
 static int allocate_sdma_queue(struct device_queue_manager *dqm,
 				struct queue *q)
 {
@@ -1100,6 +1107,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
 	dqm_lock(dqm);
 	/* clear hang status when driver try to start the hw scheduler */
 	dqm->is_hws_hang = false;
+	dqm->is_resetting = false;
 	dqm->sched_running = true;
 	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	dqm_unlock(dqm);
@@ -1107,20 +1115,24 @@ static int start_cpsch(struct device_queue_manager *dqm)
 	return 0;
 fail_allocate_vidmem:
 fail_set_sched_resources:
-	pm_uninit(&dqm->packets);
+	pm_uninit(&dqm->packets, false);
 fail_packet_manager_init:
 	return retval;
 }
 
 static int stop_cpsch(struct device_queue_manager *dqm)
 {
+	bool hanging;
+
 	dqm_lock(dqm);
-	unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+	if (!dqm->is_hws_hang)
+		unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+	hanging = dqm->is_hws_hang || dqm->is_resetting;
 	dqm->sched_running = false;
 	dqm_unlock(dqm);
 
 	kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
-	pm_uninit(&dqm->packets);
+	pm_uninit(&dqm->packets, hanging);
 
 	return 0;
 }
@@ -1225,16 +1237,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 
 	list_add(&q->list, &qpd->queues_list);
 	qpd->queue_count++;
+
+	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+		dqm->sdma_queue_count++;
+	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+		dqm->xgmi_sdma_queue_count++;
+
 	if (q->properties.is_active) {
 		dqm->queue_count++;
 		retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	}
 
-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
-		dqm->sdma_queue_count++;
-	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
-		dqm->xgmi_sdma_queue_count++;
 	/*
 	 * Unconditionally increment this counter, regardless of the queue's
 	 * type or whether the queue is active.
@@ -1352,8 +1366,17 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
 	/* should be timed out */
 	retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
 				queue_preemption_timeout_ms);
-	if (retval)
+	if (retval) {
+		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+		dqm->is_hws_hang = true;
+		/* It's possible we're detecting a HWS hang in the
+		 * middle of a GPU reset. No need to schedule another
+		 * reset in this case.
+		 */
+		if (!dqm->is_resetting)
+			schedule_work(&dqm->hw_exception_work);
 		return retval;
+	}
 
 	pm_release_ib(&dqm->packets);
 	dqm->active_runlist = false;
@@ -1371,12 +1394,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
 	if (dqm->is_hws_hang)
 		return -EIO;
 	retval = unmap_queues_cpsch(dqm, filter, filter_param);
-	if (retval) {
-		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
-		dqm->is_hws_hang = true;
-		schedule_work(&dqm->hw_exception_work);
+	if (retval)
 		return retval;
-	}
 
 	return map_queues_cpsch(dqm);
 }
@@ -1595,7 +1614,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
 		goto dqm_unlock;
 	}
 
-	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
+	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
 
 	if (!mqd_mgr->get_wave_state) {
 		r = -EINVAL;
@@ -1770,6 +1789,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
 		dqm->ops.initialize = initialize_cpsch;
 		dqm->ops.start = start_cpsch;
 		dqm->ops.stop = stop_cpsch;
+		dqm->ops.pre_reset = pre_reset;
 		dqm->ops.destroy_queue = destroy_queue_cpsch;
 		dqm->ops.update_queue = update_queue;
 		dqm->ops.register_process = register_process;
@@ -1788,6 +1808,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
 		/* initialize dqm for no cp scheduling */
 		dqm->ops.start = start_nocpsch;
 		dqm->ops.stop = stop_nocpsch;
+		dqm->ops.pre_reset = pre_reset;
 		dqm->ops.create_queue = create_queue_nocpsch;
 		dqm->ops.destroy_queue = destroy_queue_nocpsch;
 		dqm->ops.update_queue = update_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index a8c37e6..871d3b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -104,6 +104,7 @@ struct device_queue_manager_ops {
 	int	(*initialize)(struct device_queue_manager *dqm);
 	int	(*start)(struct device_queue_manager *dqm);
 	int	(*stop)(struct device_queue_manager *dqm);
+	void	(*pre_reset)(struct device_queue_manager *dqm);
 	void	(*uninitialize)(struct device_queue_manager *dqm);
 	int	(*create_kernel_queue)(struct device_queue_manager *dqm,
 					struct kernel_queue *kq,
@@ -190,7 +191,6 @@ struct device_queue_manager {
 	/* the pasid mapping for each kfd vmid */
 	uint16_t		vmid_pasid[VMID_NUM];
 	uint64_t		pipelines_addr;
-	struct kfd_mem_obj	*pipeline_mem;
 	uint64_t		fence_gpu_addr;
 	unsigned int		*fence_addr;
 	struct kfd_mem_obj	*fence_mem;
@@ -199,6 +199,7 @@ struct device_queue_manager {
 
 	/* hw exception  */
 	bool			is_hws_hang;
+	bool			is_resetting;
 	struct work_struct	hw_exception_work;
 	struct kfd_mem_obj	hiq_sdma_mqd;
 	bool			sched_running;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index ebe79bf..8e0c00b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -91,7 +91,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
 	kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
 				doorbell_start_offset;
 
-	kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
+	kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32);
 
 	kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
 					   kfd_doorbell_process_slice(kfd));
@@ -103,8 +103,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
 	pr_debug("doorbell base           == 0x%08lX\n",
 			(uintptr_t)kfd->doorbell_base);
 
-	pr_debug("doorbell_id_offset      == 0x%08lX\n",
-			kfd->doorbell_id_offset);
+	pr_debug("doorbell_base_dw_offset      == 0x%08lX\n",
+			kfd->doorbell_base_dw_offset);
 
 	pr_debug("doorbell_process_limit  == 0x%08lX\n",
 			doorbell_process_limit);
@@ -185,7 +185,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 	 * Calculating the kernel doorbell offset using the first
 	 * doorbell page.
 	 */
-	*doorbell_off = kfd->doorbell_id_offset + inx;
+	*doorbell_off = kfd->doorbell_base_dw_offset + inx;
 
 	pr_debug("Get kernel queue doorbell\n"
 			"     doorbell offset   == 0x%08X\n"
@@ -225,17 +225,17 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
 	}
 }
 
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
 					struct kfd_process *process,
 					unsigned int doorbell_id)
 {
 	/*
-	 * doorbell_id_offset accounts for doorbells taken by KGD.
+	 * doorbell_base_dw_offset accounts for doorbells taken by KGD.
 	 * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to
 	 * the process's doorbells. The offset returned is in dword
 	 * units regardless of the ASIC-dependent doorbell size.
 	 */
-	return kfd->doorbell_id_offset +
+	return kfd->doorbell_base_dw_offset +
 		process->doorbell_index
 		* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
 		doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 908081c..1f83655 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -346,7 +346,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
 		ret = create_signal_event(devkfd, p, ev);
 		if (!ret) {
 			*event_page_offset = KFD_MMAP_TYPE_EVENTS;
-			*event_page_offset <<= PAGE_SHIFT;
 			*event_slot_index = ev->event_id;
 		}
 		break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 193e283..8d87151 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,6 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
 	struct amd_iommu_device_info iommu_info;
 	unsigned int pasid_limit;
 	int err;
-	struct kfd_topology_device *top_dev;
-
-	top_dev = kfd_topology_device_by_id(kfd->id);
 
 	if (!kfd->device_info->needs_iommu_device)
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 11d2448..bae7064 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -34,7 +34,10 @@
 
 #define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
 
-static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+/* Initialize a kernel queue, including allocations of GART memory
+ * needed for the queue.
+ */
+static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
 		enum kfd_queue_type type, unsigned int queue_size)
 {
 	struct queue_properties prop;
@@ -87,9 +90,17 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
 	kq->pq_kernel_addr = kq->pq->cpu_ptr;
 	kq->pq_gpu_addr = kq->pq->gpu_addr;
 
-	retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
-	if (!retval)
-		goto err_eop_allocate_vidmem;
+	/* For CIK family asics, kq->eop_mem is not needed */
+	if (dev->device_info->asic_family > CHIP_MULLINS) {
+		retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+		if (retval != 0)
+			goto err_eop_allocate_vidmem;
+
+		kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+		kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
+
+		memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
+	}
 
 	retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
 					&kq->rptr_mem);
@@ -183,9 +194,10 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
 
 }
 
-static void uninitialize(struct kernel_queue *kq)
+/* Uninitialize a kernel queue and free all its memory usages. */
+static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
 {
-	if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+	if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging)
 		kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
 					kq->queue->mqd,
 					KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
@@ -200,14 +212,19 @@ static void uninitialize(struct kernel_queue *kq)
 
 	kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
 	kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
-	kq->ops_asic_specific.uninitialize(kq);
+
+	/* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free()
+	 * is able to handle NULL properly.
+	 */
+	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
+
 	kfd_gtt_sa_free(kq->dev, kq->pq);
 	kfd_release_kernel_doorbell(kq->dev,
 					kq->queue->properties.doorbell_ptr);
 	uninit_queue(kq->queue);
 }
 
-static int acquire_packet_buffer(struct kernel_queue *kq,
+int kq_acquire_packet_buffer(struct kernel_queue *kq,
 		size_t packet_size_in_dwords, unsigned int **buffer_ptr)
 {
 	size_t available_size;
@@ -268,7 +285,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
 	return -ENOMEM;
 }
 
-static void submit_packet(struct kernel_queue *kq)
+void kq_submit_packet(struct kernel_queue *kq)
 {
 #ifdef DEBUG
 	int i;
@@ -280,11 +297,18 @@ static void submit_packet(struct kernel_queue *kq)
 	}
 	pr_debug("\n");
 #endif
-
-	kq->ops_asic_specific.submit_packet(kq);
+	if (kq->dev->device_info->doorbell_size == 8) {
+		*kq->wptr64_kernel = kq->pending_wptr64;
+		write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
+					kq->pending_wptr64);
+	} else {
+		*kq->wptr_kernel = kq->pending_wptr;
+		write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+					kq->pending_wptr);
+	}
 }
 
-static void rollback_packet(struct kernel_queue *kq)
+void kq_rollback_packet(struct kernel_queue *kq)
 {
 	if (kq->dev->device_info->doorbell_size == 8) {
 		kq->pending_wptr64 = *kq->wptr64_kernel;
@@ -304,60 +328,18 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
 	if (!kq)
 		return NULL;
 
-	kq->ops.initialize = initialize;
-	kq->ops.uninitialize = uninitialize;
-	kq->ops.acquire_packet_buffer = acquire_packet_buffer;
-	kq->ops.submit_packet = submit_packet;
-	kq->ops.rollback_packet = rollback_packet;
-
-	switch (dev->device_info->asic_family) {
-	case CHIP_CARRIZO:
-	case CHIP_TONGA:
-	case CHIP_FIJI:
-	case CHIP_POLARIS10:
-	case CHIP_POLARIS11:
-	case CHIP_POLARIS12:
-	case CHIP_VEGAM:
-		kernel_queue_init_vi(&kq->ops_asic_specific);
-		break;
-
-	case CHIP_KAVERI:
-	case CHIP_HAWAII:
-		kernel_queue_init_cik(&kq->ops_asic_specific);
-		break;
-
-	case CHIP_VEGA10:
-	case CHIP_VEGA12:
-	case CHIP_VEGA20:
-	case CHIP_RAVEN:
-	case CHIP_RENOIR:
-	case CHIP_ARCTURUS:
-		kernel_queue_init_v9(&kq->ops_asic_specific);
-		break;
-	case CHIP_NAVI10:
-	case CHIP_NAVI12:
-	case CHIP_NAVI14:
-		kernel_queue_init_v10(&kq->ops_asic_specific);
-		break;
-	default:
-		WARN(1, "Unexpected ASIC family %u",
-		     dev->device_info->asic_family);
-		goto out_free;
-	}
-
-	if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
+	if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
 		return kq;
 
 	pr_err("Failed to init kernel queue\n");
 
-out_free:
 	kfree(kq);
 	return NULL;
 }
 
-void kernel_queue_uninit(struct kernel_queue *kq)
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
 {
-	kq->ops.uninitialize(kq);
+	kq_uninitialize(kq, hanging);
 	kfree(kq);
 }
 
@@ -377,7 +359,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
 		return;
 	}
 
-	retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
+	retval = kq_acquire_packet_buffer(kq, 5, &buffer);
 	if (unlikely(retval != 0)) {
 		pr_err("  Failed to acquire packet buffer\n");
 		pr_err("Kernel queue test failed\n");
@@ -385,7 +367,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
 	}
 	for (i = 0; i < 5; i++)
 		buffer[i] = kq->nop_packet;
-	kq->ops.submit_packet(kq);
+	kq_submit_packet(kq);
 
 	pr_err("Ending kernel queue test\n");
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 365fc67..f4cfe9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -29,45 +29,28 @@
 #include "kfd_priv.h"
 
 /**
- * struct kernel_queue_ops
- *
- * @initialize: Initialize a kernel queue, including allocations of GART memory
- * needed for the queue.
- *
- * @uninitialize: Uninitialize a kernel queue and free all its memory usages.
- *
- * @acquire_packet_buffer: Returns a pointer to the location in the kernel
+ * kq_acquire_packet_buffer: Returns a pointer to the location in the kernel
  * queue ring buffer where the calling function can write its packet. It is
  * Guaranteed that there is enough space for that packet. It also updates the
  * pending write pointer to that location so subsequent calls to
  * acquire_packet_buffer will get a correct write pointer
  *
- * @submit_packet: Update the write pointer and doorbell of a kernel queue.
+ * kq_submit_packet: Update the write pointer and doorbell of a kernel queue.
  *
- * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
- * queue are equal, which means the CP has read all the submitted packets.
- *
- * @rollback_packet: This routine is called if we failed to build an acquired
+ * kq_rollback_packet: This routine is called if we failed to build an acquired
  * packet for some reason. It just overwrites the pending wptr with the current
  * one
  *
  */
-struct kernel_queue_ops {
-	bool	(*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size);
-	void	(*uninitialize)(struct kernel_queue *kq);
-	int	(*acquire_packet_buffer)(struct kernel_queue *kq,
-					size_t packet_size_in_dwords,
-					unsigned int **buffer_ptr);
 
-	void	(*submit_packet)(struct kernel_queue *kq);
-	void	(*rollback_packet)(struct kernel_queue *kq);
-};
+int kq_acquire_packet_buffer(struct kernel_queue *kq,
+				size_t packet_size_in_dwords,
+				unsigned int **buffer_ptr);
+void kq_submit_packet(struct kernel_queue *kq);
+void kq_rollback_packet(struct kernel_queue *kq);
+
 
 struct kernel_queue {
-	struct kernel_queue_ops ops;
-	struct kernel_queue_ops ops_asic_specific;
-
 	/* data */
 	struct kfd_dev		*dev;
 	struct mqd_manager	*mqd_mgr;
@@ -99,9 +82,4 @@ struct kernel_queue {
 	struct list_head	list;
 };
 
-void kernel_queue_init_cik(struct kernel_queue_ops *ops);
-void kernel_queue_init_vi(struct kernel_queue_ops *ops);
-void kernel_queue_init_v9(struct kernel_queue_ops *ops);
-void kernel_queue_init_v10(struct kernel_queue_ops *ops);
-
 #endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
deleted file mode 100644
index 19e54ac..0000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "kfd_kernel_queue.h"
-
-static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_cik(struct kernel_queue *kq);
-static void submit_packet_cik(struct kernel_queue *kq);
-
-void kernel_queue_init_cik(struct kernel_queue_ops *ops)
-{
-	ops->initialize = initialize_cik;
-	ops->uninitialize = uninitialize_cik;
-	ops->submit_packet = submit_packet_cik;
-}
-
-static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size)
-{
-	return true;
-}
-
-static void uninitialize_cik(struct kernel_queue *kq)
-{
-}
-
-static void submit_packet_cik(struct kernel_queue *kq)
-{
-	*kq->wptr_kernel = kq->pending_wptr;
-	write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
-				kq->pending_wptr);
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c
deleted file mode 100644
index aed32ab..0000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "kfd_kernel_queue.h"
-#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers_ai.h"
-#include "kfd_pm4_opcodes.h"
-#include "gc/gc_10_1_0_sh_mask.h"
-
-static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_v10(struct kernel_queue *kq);
-static void submit_packet_v10(struct kernel_queue *kq);
-
-void kernel_queue_init_v10(struct kernel_queue_ops *ops)
-{
-	ops->initialize = initialize_v10;
-	ops->uninitialize = uninitialize_v10;
-	ops->submit_packet = submit_packet_v10;
-}
-
-static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size)
-{
-	int retval;
-
-	retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
-	if (retval != 0)
-		return false;
-
-	kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
-	kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
-	memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
-	return true;
-}
-
-static void uninitialize_v10(struct kernel_queue *kq)
-{
-	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_v10(struct kernel_queue *kq)
-{
-	*kq->wptr64_kernel = kq->pending_wptr64;
-	write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
-				kq->pending_wptr64);
-}
-
-static int pm_map_process_v10(struct packet_manager *pm,
-		uint32_t *buffer, struct qcm_process_device *qpd)
-{
-	struct pm4_mes_map_process *packet;
-	uint64_t vm_page_table_base_addr = qpd->page_table_base;
-
-	packet = (struct pm4_mes_map_process *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
-
-	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
-					sizeof(struct pm4_mes_map_process));
-	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
-	packet->bitfields2.process_quantum = 1;
-	packet->bitfields2.pasid = qpd->pqm->process->pasid;
-	packet->bitfields14.gds_size = qpd->gds_size;
-	packet->bitfields14.num_gws = qpd->num_gws;
-	packet->bitfields14.num_oac = qpd->num_oac;
-	packet->bitfields14.sdma_enable = 1;
-
-	packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
-
-	packet->sh_mem_config = qpd->sh_mem_config;
-	packet->sh_mem_bases = qpd->sh_mem_bases;
-	if (qpd->tba_addr) {
-		packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
-		packet->sq_shader_tba_hi = (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT) |
-			upper_32_bits(qpd->tba_addr >> 8);
-		packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
-		packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
-	}
-
-	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
-	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
-
-	packet->vm_context_page_table_base_addr_lo32 =
-			lower_32_bits(vm_page_table_base_addr);
-	packet->vm_context_page_table_base_addr_hi32 =
-			upper_32_bits(vm_page_table_base_addr);
-
-	return 0;
-}
-
-static int pm_runlist_v10(struct packet_manager *pm, uint32_t *buffer,
-			uint64_t ib, size_t ib_size_in_dwords, bool chain)
-{
-	struct pm4_mes_runlist *packet;
-
-	int concurrent_proc_cnt = 0;
-	struct kfd_dev *kfd = pm->dqm->dev;
-
-	/* Determine the number of processes to map together to HW:
-	 * it can not exceed the number of VMIDs available to the
-	 * scheduler, and it is determined by the smaller of the number
-	 * of processes in the runlist and kfd module parameter
-	 * hws_max_conc_proc.
-	 * Note: the arbitration between the number of VMIDs and
-	 * hws_max_conc_proc has been done in
-	 * kgd2kfd_device_init().
-	 */
-	concurrent_proc_cnt = min(pm->dqm->processes_count,
-			kfd->max_proc_per_quantum);
-
-
-	packet = (struct pm4_mes_runlist *)buffer;
-
-	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
-	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
-						sizeof(struct pm4_mes_runlist));
-
-	packet->bitfields4.ib_size = ib_size_in_dwords;
-	packet->bitfields4.chain = chain ? 1 : 0;
-	packet->bitfields4.offload_polling = 0;
-	packet->bitfields4.valid = 1;
-	packet->bitfields4.process_cnt = concurrent_proc_cnt;
-	packet->ordinal2 = lower_32_bits(ib);
-	packet->ib_base_hi = upper_32_bits(ib);
-
-	return 0;
-}
-
-static int pm_map_queues_v10(struct packet_manager *pm, uint32_t *buffer,
-		struct queue *q, bool is_static)
-{
-	struct pm4_mes_map_queues *packet;
-	bool use_static = is_static;
-
-	packet = (struct pm4_mes_map_queues *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
-
-	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
-					sizeof(struct pm4_mes_map_queues));
-	packet->bitfields2.num_queues = 1;
-	packet->bitfields2.queue_sel =
-		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
-
-	packet->bitfields2.engine_sel =
-		engine_sel__mes_map_queues__compute_vi;
-	packet->bitfields2.queue_type =
-		queue_type__mes_map_queues__normal_compute_vi;
-
-	switch (q->properties.type) {
-	case KFD_QUEUE_TYPE_COMPUTE:
-		if (use_static)
-			packet->bitfields2.queue_type =
-		queue_type__mes_map_queues__normal_latency_static_queue_vi;
-		break;
-	case KFD_QUEUE_TYPE_DIQ:
-		packet->bitfields2.queue_type =
-			queue_type__mes_map_queues__debug_interface_queue_vi;
-		break;
-	case KFD_QUEUE_TYPE_SDMA:
-	case KFD_QUEUE_TYPE_SDMA_XGMI:
-		packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
-				engine_sel__mes_map_queues__sdma0_vi;
-		use_static = false; /* no static queues under SDMA */
-		break;
-	default:
-		WARN(1, "queue type %d\n", q->properties.type);
-		return -EINVAL;
-	}
-	packet->bitfields3.doorbell_offset =
-			q->properties.doorbell_off;
-
-	packet->mqd_addr_lo =
-			lower_32_bits(q->gart_mqd_addr);
-
-	packet->mqd_addr_hi =
-			upper_32_bits(q->gart_mqd_addr);
-
-	packet->wptr_addr_lo =
-			lower_32_bits((uint64_t)q->properties.write_ptr);
-
-	packet->wptr_addr_hi =
-			upper_32_bits((uint64_t)q->properties.write_ptr);
-
-	return 0;
-}
-
-static int pm_unmap_queues_v10(struct packet_manager *pm, uint32_t *buffer,
-			enum kfd_queue_type type,
-			enum kfd_unmap_queues_filter filter,
-			uint32_t filter_param, bool reset,
-			unsigned int sdma_engine)
-{
-	struct pm4_mes_unmap_queues *packet;
-
-	packet = (struct pm4_mes_unmap_queues *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
-
-	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
-					sizeof(struct pm4_mes_unmap_queues));
-	switch (type) {
-	case KFD_QUEUE_TYPE_COMPUTE:
-	case KFD_QUEUE_TYPE_DIQ:
-		packet->bitfields2.engine_sel =
-			engine_sel__mes_unmap_queues__compute;
-		break;
-	case KFD_QUEUE_TYPE_SDMA:
-	case KFD_QUEUE_TYPE_SDMA_XGMI:
-		packet->bitfields2.engine_sel =
-			engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
-		break;
-	default:
-		WARN(1, "queue type %d\n", type);
-		break;
-	}
-
-	if (reset)
-		packet->bitfields2.action =
-			action__mes_unmap_queues__reset_queues;
-	else
-		packet->bitfields2.action =
-			action__mes_unmap_queues__preempt_queues;
-
-	switch (filter) {
-	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
-		packet->bitfields2.num_queues = 1;
-		packet->bitfields3b.doorbell_offset0 = filter_param;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
-		packet->bitfields3a.pasid = filter_param;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__unmap_all_queues;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
-		/* in this case, we do not preempt static queues */
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
-		break;
-	default:
-		WARN(1, "filter %d\n", filter);
-		break;
-	}
-
-	return 0;
-
-}
-
-static int pm_query_status_v10(struct packet_manager *pm, uint32_t *buffer,
-			uint64_t fence_address,	uint32_t fence_value)
-{
-	struct pm4_mes_query_status *packet;
-
-	packet = (struct pm4_mes_query_status *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
-
-
-	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
-					sizeof(struct pm4_mes_query_status));
-
-	packet->bitfields2.context_id = 0;
-	packet->bitfields2.interrupt_sel =
-			interrupt_sel__mes_query_status__completion_status;
-	packet->bitfields2.command =
-			command__mes_query_status__fence_only_after_write_ack;
-
-	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
-	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
-	packet->data_hi = upper_32_bits((uint64_t)fence_value);
-	packet->data_lo = lower_32_bits((uint64_t)fence_value);
-
-	return 0;
-}
-
-
-static int pm_release_mem_v10(uint64_t gpu_addr, uint32_t *buffer)
-{
-	struct pm4_mec_release_mem *packet;
-
-	WARN_ON(!buffer);
-
-	packet = (struct pm4_mec_release_mem *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
-
-	packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
-					sizeof(struct pm4_mec_release_mem));
-
-	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
-	packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
-	packet->bitfields2.tcl1_action_ena = 1;
-	packet->bitfields2.tc_action_ena = 1;
-	packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
-
-	packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
-	packet->bitfields3.int_sel =
-		int_sel__mec_release_mem__send_interrupt_after_write_confirm;
-
-	packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
-	packet->address_hi = upper_32_bits(gpu_addr);
-
-	packet->data_lo = 0;
-
-	return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
-}
-
-const struct packet_manager_funcs kfd_v10_pm_funcs = {
-	.map_process			= pm_map_process_v10,
-	.runlist			= pm_runlist_v10,
-	.set_resources			= pm_set_resources_vi,
-	.map_queues			= pm_map_queues_v10,
-	.unmap_queues			= pm_unmap_queues_v10,
-	.query_status			= pm_query_status_v10,
-	.release_mem			= pm_release_mem_v10,
-	.map_process_size		= sizeof(struct pm4_mes_map_process),
-	.runlist_size			= sizeof(struct pm4_mes_runlist),
-	.set_resources_size		= sizeof(struct pm4_mes_set_resources),
-	.map_queues_size		= sizeof(struct pm4_mes_map_queues),
-	.unmap_queues_size		= sizeof(struct pm4_mes_unmap_queues),
-	.query_status_size		= sizeof(struct pm4_mes_query_status),
-	.release_mem_size		= sizeof(struct pm4_mec_release_mem)
-};
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
deleted file mode 100644
index 9a4bafb..0000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Copyright 2016-2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "kfd_kernel_queue.h"
-#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers_ai.h"
-#include "kfd_pm4_opcodes.h"
-
-static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_v9(struct kernel_queue *kq);
-static void submit_packet_v9(struct kernel_queue *kq);
-
-void kernel_queue_init_v9(struct kernel_queue_ops *ops)
-{
-	ops->initialize = initialize_v9;
-	ops->uninitialize = uninitialize_v9;
-	ops->submit_packet = submit_packet_v9;
-}
-
-static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size)
-{
-	int retval;
-
-	retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
-	if (retval)
-		return false;
-
-	kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
-	kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
-	memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
-	return true;
-}
-
-static void uninitialize_v9(struct kernel_queue *kq)
-{
-	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_v9(struct kernel_queue *kq)
-{
-	*kq->wptr64_kernel = kq->pending_wptr64;
-	write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
-				kq->pending_wptr64);
-}
-
-static int pm_map_process_v9(struct packet_manager *pm,
-		uint32_t *buffer, struct qcm_process_device *qpd)
-{
-	struct pm4_mes_map_process *packet;
-	uint64_t vm_page_table_base_addr = qpd->page_table_base;
-
-	packet = (struct pm4_mes_map_process *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
-
-	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
-					sizeof(struct pm4_mes_map_process));
-	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
-	packet->bitfields2.process_quantum = 1;
-	packet->bitfields2.pasid = qpd->pqm->process->pasid;
-	packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
-	packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
-	packet->bitfields14.num_gws = qpd->num_gws;
-	packet->bitfields14.num_oac = qpd->num_oac;
-	packet->bitfields14.sdma_enable = 1;
-	packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
-
-	packet->sh_mem_config = qpd->sh_mem_config;
-	packet->sh_mem_bases = qpd->sh_mem_bases;
-	packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
-	packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
-	packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
-	packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
-
-	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
-	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
-
-	packet->vm_context_page_table_base_addr_lo32 =
-			lower_32_bits(vm_page_table_base_addr);
-	packet->vm_context_page_table_base_addr_hi32 =
-			upper_32_bits(vm_page_table_base_addr);
-
-	return 0;
-}
-
-static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
-			uint64_t ib, size_t ib_size_in_dwords, bool chain)
-{
-	struct pm4_mes_runlist *packet;
-
-	int concurrent_proc_cnt = 0;
-	struct kfd_dev *kfd = pm->dqm->dev;
-
-	/* Determine the number of processes to map together to HW:
-	 * it can not exceed the number of VMIDs available to the
-	 * scheduler, and it is determined by the smaller of the number
-	 * of processes in the runlist and kfd module parameter
-	 * hws_max_conc_proc.
-	 * Note: the arbitration between the number of VMIDs and
-	 * hws_max_conc_proc has been done in
-	 * kgd2kfd_device_init().
-	 */
-	concurrent_proc_cnt = min(pm->dqm->processes_count,
-			kfd->max_proc_per_quantum);
-
-	packet = (struct pm4_mes_runlist *)buffer;
-
-	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
-	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
-						sizeof(struct pm4_mes_runlist));
-
-	packet->bitfields4.ib_size = ib_size_in_dwords;
-	packet->bitfields4.chain = chain ? 1 : 0;
-	packet->bitfields4.offload_polling = 0;
-	packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0;
-	packet->bitfields4.valid = 1;
-	packet->bitfields4.process_cnt = concurrent_proc_cnt;
-	packet->ordinal2 = lower_32_bits(ib);
-	packet->ib_base_hi = upper_32_bits(ib);
-
-	return 0;
-}
-
-static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
-				struct scheduling_resources *res)
-{
-	struct pm4_mes_set_resources *packet;
-
-	packet = (struct pm4_mes_set_resources *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
-
-	packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
-					sizeof(struct pm4_mes_set_resources));
-
-	packet->bitfields2.queue_type =
-			queue_type__mes_set_resources__hsa_interface_queue_hiq;
-	packet->bitfields2.vmid_mask = res->vmid_mask;
-	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
-	packet->bitfields7.oac_mask = res->oac_mask;
-	packet->bitfields8.gds_heap_base = res->gds_heap_base;
-	packet->bitfields8.gds_heap_size = res->gds_heap_size;
-
-	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
-	packet->gws_mask_hi = upper_32_bits(res->gws_mask);
-
-	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
-	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
-
-	return 0;
-}
-
-static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
-		struct queue *q, bool is_static)
-{
-	struct pm4_mes_map_queues *packet;
-	bool use_static = is_static;
-
-	packet = (struct pm4_mes_map_queues *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
-
-	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
-					sizeof(struct pm4_mes_map_queues));
-	packet->bitfields2.num_queues = 1;
-	packet->bitfields2.queue_sel =
-		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
-
-	packet->bitfields2.engine_sel =
-		engine_sel__mes_map_queues__compute_vi;
-	packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
-	packet->bitfields2.extended_engine_sel =
-		extended_engine_sel__mes_map_queues__legacy_engine_sel;
-	packet->bitfields2.queue_type =
-		queue_type__mes_map_queues__normal_compute_vi;
-
-	switch (q->properties.type) {
-	case KFD_QUEUE_TYPE_COMPUTE:
-		if (use_static)
-			packet->bitfields2.queue_type =
-		queue_type__mes_map_queues__normal_latency_static_queue_vi;
-		break;
-	case KFD_QUEUE_TYPE_DIQ:
-		packet->bitfields2.queue_type =
-			queue_type__mes_map_queues__debug_interface_queue_vi;
-		break;
-	case KFD_QUEUE_TYPE_SDMA:
-	case KFD_QUEUE_TYPE_SDMA_XGMI:
-		use_static = false; /* no static queues under SDMA */
-		if (q->properties.sdma_engine_id < 2)
-			packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
-				engine_sel__mes_map_queues__sdma0_vi;
-		else {
-			packet->bitfields2.extended_engine_sel =
-				extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
-			packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
-		}
-		break;
-	default:
-		WARN(1, "queue type %d", q->properties.type);
-		return -EINVAL;
-	}
-	packet->bitfields3.doorbell_offset =
-			q->properties.doorbell_off;
-
-	packet->mqd_addr_lo =
-			lower_32_bits(q->gart_mqd_addr);
-
-	packet->mqd_addr_hi =
-			upper_32_bits(q->gart_mqd_addr);
-
-	packet->wptr_addr_lo =
-			lower_32_bits((uint64_t)q->properties.write_ptr);
-
-	packet->wptr_addr_hi =
-			upper_32_bits((uint64_t)q->properties.write_ptr);
-
-	return 0;
-}
-
-static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
-			enum kfd_queue_type type,
-			enum kfd_unmap_queues_filter filter,
-			uint32_t filter_param, bool reset,
-			unsigned int sdma_engine)
-{
-	struct pm4_mes_unmap_queues *packet;
-
-	packet = (struct pm4_mes_unmap_queues *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
-
-	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
-					sizeof(struct pm4_mes_unmap_queues));
-	switch (type) {
-	case KFD_QUEUE_TYPE_COMPUTE:
-	case KFD_QUEUE_TYPE_DIQ:
-		packet->bitfields2.extended_engine_sel =
-			extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
-		packet->bitfields2.engine_sel =
-			engine_sel__mes_unmap_queues__compute;
-		break;
-	case KFD_QUEUE_TYPE_SDMA:
-	case KFD_QUEUE_TYPE_SDMA_XGMI:
-		if (sdma_engine < 2) {
-			packet->bitfields2.extended_engine_sel =
-				extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
-			packet->bitfields2.engine_sel =
-				engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
-		} else {
-			packet->bitfields2.extended_engine_sel =
-				extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel;
-			packet->bitfields2.engine_sel = sdma_engine;
-		}
-		break;
-	default:
-		WARN(1, "queue type %d", type);
-		return -EINVAL;
-	}
-
-	if (reset)
-		packet->bitfields2.action =
-			action__mes_unmap_queues__reset_queues;
-	else
-		packet->bitfields2.action =
-			action__mes_unmap_queues__preempt_queues;
-
-	switch (filter) {
-	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
-		packet->bitfields2.num_queues = 1;
-		packet->bitfields3b.doorbell_offset0 = filter_param;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
-		packet->bitfields3a.pasid = filter_param;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__unmap_all_queues;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
-		/* in this case, we do not preempt static queues */
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
-		break;
-	default:
-		WARN(1, "filter %d", filter);
-		return -EINVAL;
-	}
-
-	return 0;
-
-}
-
-static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
-			uint64_t fence_address,	uint32_t fence_value)
-{
-	struct pm4_mes_query_status *packet;
-
-	packet = (struct pm4_mes_query_status *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
-
-
-	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
-					sizeof(struct pm4_mes_query_status));
-
-	packet->bitfields2.context_id = 0;
-	packet->bitfields2.interrupt_sel =
-			interrupt_sel__mes_query_status__completion_status;
-	packet->bitfields2.command =
-			command__mes_query_status__fence_only_after_write_ack;
-
-	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
-	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
-	packet->data_hi = upper_32_bits((uint64_t)fence_value);
-	packet->data_lo = lower_32_bits((uint64_t)fence_value);
-
-	return 0;
-}
-
-
-static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
-{
-	struct pm4_mec_release_mem *packet;
-
-	packet = (struct pm4_mec_release_mem *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
-
-	packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
-					sizeof(struct pm4_mec_release_mem));
-
-	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
-	packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
-	packet->bitfields2.tcl1_action_ena = 1;
-	packet->bitfields2.tc_action_ena = 1;
-	packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
-
-	packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
-	packet->bitfields3.int_sel =
-		int_sel__mec_release_mem__send_interrupt_after_write_confirm;
-
-	packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
-	packet->address_hi = upper_32_bits(gpu_addr);
-
-	packet->data_lo = 0;
-
-	return 0;
-}
-
-const struct packet_manager_funcs kfd_v9_pm_funcs = {
-	.map_process		= pm_map_process_v9,
-	.runlist		= pm_runlist_v9,
-	.set_resources		= pm_set_resources_v9,
-	.map_queues		= pm_map_queues_v9,
-	.unmap_queues		= pm_unmap_queues_v9,
-	.query_status		= pm_query_status_v9,
-	.release_mem		= pm_release_mem_v9,
-	.map_process_size	= sizeof(struct pm4_mes_map_process),
-	.runlist_size		= sizeof(struct pm4_mes_runlist),
-	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
-	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
-	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
-	.query_status_size	= sizeof(struct pm4_mes_query_status),
-	.release_mem_size	= sizeof(struct pm4_mec_release_mem)
-};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
deleted file mode 100644
index 2adaf40..0000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "kfd_kernel_queue.h"
-#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers_vi.h"
-#include "kfd_pm4_opcodes.h"
-
-static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_vi(struct kernel_queue *kq);
-static void submit_packet_vi(struct kernel_queue *kq);
-
-void kernel_queue_init_vi(struct kernel_queue_ops *ops)
-{
-	ops->initialize = initialize_vi;
-	ops->uninitialize = uninitialize_vi;
-	ops->submit_packet = submit_packet_vi;
-}
-
-static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
-			enum kfd_queue_type type, unsigned int queue_size)
-{
-	int retval;
-
-	retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
-	if (retval != 0)
-		return false;
-
-	kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
-	kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
-	memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
-	return true;
-}
-
-static void uninitialize_vi(struct kernel_queue *kq)
-{
-	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_vi(struct kernel_queue *kq)
-{
-	*kq->wptr_kernel = kq->pending_wptr;
-	write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
-				kq->pending_wptr);
-}
-
-unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
-{
-	union PM4_MES_TYPE_3_HEADER header;
-
-	header.u32All = 0;
-	header.opcode = opcode;
-	header.count = packet_size / 4 - 2;
-	header.type = PM4_TYPE_3;
-
-	return header.u32All;
-}
-
-static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
-				struct qcm_process_device *qpd)
-{
-	struct pm4_mes_map_process *packet;
-
-	packet = (struct pm4_mes_map_process *)buffer;
-
-	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
-
-	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
-					sizeof(struct pm4_mes_map_process));
-	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
-	packet->bitfields2.process_quantum = 1;
-	packet->bitfields2.pasid = qpd->pqm->process->pasid;
-	packet->bitfields3.page_table_base = qpd->page_table_base;
-	packet->bitfields10.gds_size = qpd->gds_size;
-	packet->bitfields10.num_gws = qpd->num_gws;
-	packet->bitfields10.num_oac = qpd->num_oac;
-	packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
-
-	packet->sh_mem_config = qpd->sh_mem_config;
-	packet->sh_mem_bases = qpd->sh_mem_bases;
-	packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
-	packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
-
-	packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
-
-	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
-	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
-
-	return 0;
-}
-
-static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
-			uint64_t ib, size_t ib_size_in_dwords, bool chain)
-{
-	struct pm4_mes_runlist *packet;
-	int concurrent_proc_cnt = 0;
-	struct kfd_dev *kfd = pm->dqm->dev;
-
-	if (WARN_ON(!ib))
-		return -EFAULT;
-
-	/* Determine the number of processes to map together to HW:
-	 * it can not exceed the number of VMIDs available to the
-	 * scheduler, and it is determined by the smaller of the number
-	 * of processes in the runlist and kfd module parameter
-	 * hws_max_conc_proc.
-	 * Note: the arbitration between the number of VMIDs and
-	 * hws_max_conc_proc has been done in
-	 * kgd2kfd_device_init().
-	 */
-	concurrent_proc_cnt = min(pm->dqm->processes_count,
-			kfd->max_proc_per_quantum);
-
-	packet = (struct pm4_mes_runlist *)buffer;
-
-	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
-	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
-						sizeof(struct pm4_mes_runlist));
-
-	packet->bitfields4.ib_size = ib_size_in_dwords;
-	packet->bitfields4.chain = chain ? 1 : 0;
-	packet->bitfields4.offload_polling = 0;
-	packet->bitfields4.valid = 1;
-	packet->bitfields4.process_cnt = concurrent_proc_cnt;
-	packet->ordinal2 = lower_32_bits(ib);
-	packet->bitfields3.ib_base_hi = upper_32_bits(ib);
-
-	return 0;
-}
-
-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
-				struct scheduling_resources *res)
-{
-	struct pm4_mes_set_resources *packet;
-
-	packet = (struct pm4_mes_set_resources *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
-
-	packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
-					sizeof(struct pm4_mes_set_resources));
-
-	packet->bitfields2.queue_type =
-			queue_type__mes_set_resources__hsa_interface_queue_hiq;
-	packet->bitfields2.vmid_mask = res->vmid_mask;
-	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
-	packet->bitfields7.oac_mask = res->oac_mask;
-	packet->bitfields8.gds_heap_base = res->gds_heap_base;
-	packet->bitfields8.gds_heap_size = res->gds_heap_size;
-
-	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
-	packet->gws_mask_hi = upper_32_bits(res->gws_mask);
-
-	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
-	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
-
-	return 0;
-}
-
-static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
-		struct queue *q, bool is_static)
-{
-	struct pm4_mes_map_queues *packet;
-	bool use_static = is_static;
-
-	packet = (struct pm4_mes_map_queues *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
-
-	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
-					sizeof(struct pm4_mes_map_queues));
-	packet->bitfields2.num_queues = 1;
-	packet->bitfields2.queue_sel =
-		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
-
-	packet->bitfields2.engine_sel =
-		engine_sel__mes_map_queues__compute_vi;
-	packet->bitfields2.queue_type =
-		queue_type__mes_map_queues__normal_compute_vi;
-
-	switch (q->properties.type) {
-	case KFD_QUEUE_TYPE_COMPUTE:
-		if (use_static)
-			packet->bitfields2.queue_type =
-		queue_type__mes_map_queues__normal_latency_static_queue_vi;
-		break;
-	case KFD_QUEUE_TYPE_DIQ:
-		packet->bitfields2.queue_type =
-			queue_type__mes_map_queues__debug_interface_queue_vi;
-		break;
-	case KFD_QUEUE_TYPE_SDMA:
-	case KFD_QUEUE_TYPE_SDMA_XGMI:
-		packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
-				engine_sel__mes_map_queues__sdma0_vi;
-		use_static = false; /* no static queues under SDMA */
-		break;
-	default:
-		WARN(1, "queue type %d", q->properties.type);
-		return -EINVAL;
-	}
-	packet->bitfields3.doorbell_offset =
-			q->properties.doorbell_off;
-
-	packet->mqd_addr_lo =
-			lower_32_bits(q->gart_mqd_addr);
-
-	packet->mqd_addr_hi =
-			upper_32_bits(q->gart_mqd_addr);
-
-	packet->wptr_addr_lo =
-			lower_32_bits((uint64_t)q->properties.write_ptr);
-
-	packet->wptr_addr_hi =
-			upper_32_bits((uint64_t)q->properties.write_ptr);
-
-	return 0;
-}
-
-static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
-			enum kfd_queue_type type,
-			enum kfd_unmap_queues_filter filter,
-			uint32_t filter_param, bool reset,
-			unsigned int sdma_engine)
-{
-	struct pm4_mes_unmap_queues *packet;
-
-	packet = (struct pm4_mes_unmap_queues *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
-
-	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
-					sizeof(struct pm4_mes_unmap_queues));
-	switch (type) {
-	case KFD_QUEUE_TYPE_COMPUTE:
-	case KFD_QUEUE_TYPE_DIQ:
-		packet->bitfields2.engine_sel =
-			engine_sel__mes_unmap_queues__compute;
-		break;
-	case KFD_QUEUE_TYPE_SDMA:
-	case KFD_QUEUE_TYPE_SDMA_XGMI:
-		packet->bitfields2.engine_sel =
-			engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
-		break;
-	default:
-		WARN(1, "queue type %d", type);
-		return -EINVAL;
-	}
-
-	if (reset)
-		packet->bitfields2.action =
-			action__mes_unmap_queues__reset_queues;
-	else
-		packet->bitfields2.action =
-			action__mes_unmap_queues__preempt_queues;
-
-	switch (filter) {
-	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
-		packet->bitfields2.num_queues = 1;
-		packet->bitfields3b.doorbell_offset0 = filter_param;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
-		packet->bitfields3a.pasid = filter_param;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__unmap_all_queues;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
-		/* in this case, we do not preempt static queues */
-		packet->bitfields2.queue_sel =
-			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
-		break;
-	default:
-		WARN(1, "filter %d", filter);
-		return -EINVAL;
-	}
-
-	return 0;
-
-}
-
-static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
-			uint64_t fence_address,	uint32_t fence_value)
-{
-	struct pm4_mes_query_status *packet;
-
-	packet = (struct pm4_mes_query_status *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
-
-	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
-					sizeof(struct pm4_mes_query_status));
-
-	packet->bitfields2.context_id = 0;
-	packet->bitfields2.interrupt_sel =
-			interrupt_sel__mes_query_status__completion_status;
-	packet->bitfields2.command =
-			command__mes_query_status__fence_only_after_write_ack;
-
-	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
-	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
-	packet->data_hi = upper_32_bits((uint64_t)fence_value);
-	packet->data_lo = lower_32_bits((uint64_t)fence_value);
-
-	return 0;
-}
-
-static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
-{
-	struct pm4_mec_release_mem *packet;
-
-	packet = (struct pm4_mec_release_mem *)buffer;
-	memset(buffer, 0, sizeof(*packet));
-
-	packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
-						 sizeof(*packet));
-
-	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
-	packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
-	packet->bitfields2.tcl1_action_ena = 1;
-	packet->bitfields2.tc_action_ena = 1;
-	packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
-	packet->bitfields2.atc = 0;
-
-	packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low;
-	packet->bitfields3.int_sel =
-		int_sel___release_mem__send_interrupt_after_write_confirm;
-
-	packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
-	packet->address_hi = upper_32_bits(gpu_addr);
-
-	packet->data_lo = 0;
-
-	return 0;
-}
-
-const struct packet_manager_funcs kfd_vi_pm_funcs = {
-	.map_process		= pm_map_process_vi,
-	.runlist		= pm_runlist_vi,
-	.set_resources		= pm_set_resources_vi,
-	.map_queues		= pm_map_queues_vi,
-	.unmap_queues		= pm_unmap_queues_vi,
-	.query_status		= pm_query_status_vi,
-	.release_mem		= pm_release_mem_vi,
-	.map_process_size	= sizeof(struct pm4_mes_map_process),
-	.runlist_size		= sizeof(struct pm4_mes_runlist),
-	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
-	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
-	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
-	.query_status_size	= sizeof(struct pm4_mes_query_status),
-	.release_mem_size	= sizeof(struct pm4_mec_release_mem)
-};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 28876ac..19f0fe5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -374,7 +374,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
 
 	switch (type) {
 	case KFD_MQD_TYPE_CP:
-	case KFD_MQD_TYPE_COMPUTE:
 		mqd->allocate_mqd = allocate_mqd;
 		mqd->init_mqd = init_mqd;
 		mqd->free_mqd = free_mqd;
@@ -401,7 +400,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
 #endif
 		break;
 	case KFD_MQD_TYPE_DIQ:
-		mqd->allocate_mqd = allocate_hiq_mqd;
+		mqd->allocate_mqd = allocate_mqd;
 		mqd->init_mqd = init_mqd_hiq;
 		mqd->free_mqd = free_mqd;
 		mqd->load_mqd = load_mqd;
@@ -442,7 +441,7 @@ struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
 	mqd = mqd_manager_init_cik(type, dev);
 	if (!mqd)
 		return NULL;
-	if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+	if (type == KFD_MQD_TYPE_CP)
 		mqd->update_mqd = update_mqd_hawaii;
 	return mqd;
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 4a236b2..d1d68a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -66,6 +66,12 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
 		m->compute_static_thread_mgmt_se3);
 }
 
+static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
+{
+	m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
+	m->cp_hqd_queue_priority = q->priority;
+}
+
 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
 		struct queue_properties *q)
 {
@@ -109,9 +115,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
 			1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
 			10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
 
-	m->cp_hqd_pipe_priority = 1;
-	m->cp_hqd_queue_priority = 15;
-
 	if (q->format == KFD_QUEUE_FORMAT_AQL) {
 		m->cp_hqd_aql_control =
 			1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
@@ -150,6 +153,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
 	return r;
 }
 
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+			    uint32_t pipe_id, uint32_t queue_id,
+			    struct queue_properties *p, struct mm_struct *mms)
+{
+	return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+					      queue_id, p->doorbell_off);
+}
+
 static void update_mqd(struct mqd_manager *mm, void *mqd,
 		      struct queue_properties *q)
 {
@@ -208,11 +219,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
 		m->cp_hqd_ctx_save_control = 0;
 
 	update_cu_mask(mm, mqd, q);
+	set_priority(m, q);
 
-	q->is_active = (q->queue_size > 0 &&
-			q->queue_address != 0 &&
-			q->queue_percent > 0 &&
-			!q->is_evicted);
+	q->is_active = QUEUE_IS_ACTIVE(*q);
 }
 
 static int destroy_mqd(struct mqd_manager *mm, void *mqd,
@@ -247,18 +256,22 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
 {
 	struct v10_compute_mqd *m;
 
-	/* Control stack is located one page after MQD. */
-	void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
-
 	m = get_mqd(mqd);
 
+	/* Control stack is written backwards, while workgroup context data
+	 * is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
+	 * Current position is at m->cp_hqd_cntl_stack_offset and
+	 * m->cp_hqd_wg_state_offset, respectively.
+	 */
 	*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
 		m->cp_hqd_cntl_stack_offset;
 	*save_area_used_size = m->cp_hqd_wg_state_offset -
 		m->cp_hqd_cntl_stack_size;
 
-	if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
-		return -EFAULT;
+	/* Control stack is not copied to user mode for GFXv10 because
+	 * it's part of the context save area that is already
+	 * accessible to user mode
+	 */
 
 	return 0;
 }
@@ -277,18 +290,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
 			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
 }
 
-static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
-			struct queue_properties *q)
-{
-	struct v10_compute_mqd *m;
-
-	update_mqd(mm, mqd, q);
-
-	/* TODO: what's the point? update_mqd already does this. */
-	m = get_mqd(mqd);
-	m->cp_hqd_vmid = q->vmid;
-}
-
 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
 		struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 		struct queue_properties *q)
@@ -340,11 +341,7 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
 	m->sdma_queue_id = q->sdma_queue_id;
 	m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
 
-
-	q->is_active = (q->queue_size > 0 &&
-			q->queue_address != 0 &&
-			q->queue_percent > 0 &&
-			!q->is_evicted);
+	q->is_active = QUEUE_IS_ACTIVE(*q);
 }
 
 /*
@@ -392,7 +389,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
 	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
 		return NULL;
 
-	mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+	mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
 	if (!mqd)
 		return NULL;
 
@@ -400,7 +397,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
 
 	switch (type) {
 	case KFD_MQD_TYPE_CP:
-	case KFD_MQD_TYPE_COMPUTE:
 		pr_debug("%s@%i\n", __func__, __LINE__);
 		mqd->allocate_mqd = allocate_mqd;
 		mqd->init_mqd = init_mqd;
@@ -421,8 +417,8 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
 		mqd->allocate_mqd = allocate_hiq_mqd;
 		mqd->init_mqd = init_mqd_hiq;
 		mqd->free_mqd = free_mqd_hiq_sdma;
-		mqd->load_mqd = load_mqd;
-		mqd->update_mqd = update_mqd_hiq;
+		mqd->load_mqd = hiq_load_mqd_kiq;
+		mqd->update_mqd = update_mqd;
 		mqd->destroy_mqd = destroy_mqd;
 		mqd->is_occupied = is_occupied;
 		mqd->mqd_size = sizeof(struct v10_compute_mqd);
@@ -432,11 +428,11 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
 		pr_debug("%s@%i\n", __func__, __LINE__);
 		break;
 	case KFD_MQD_TYPE_DIQ:
-		mqd->allocate_mqd = allocate_hiq_mqd;
+		mqd->allocate_mqd = allocate_mqd;
 		mqd->init_mqd = init_mqd_hiq;
 		mqd->free_mqd = free_mqd;
 		mqd->load_mqd = load_mqd;
-		mqd->update_mqd = update_mqd_hiq;
+		mqd->update_mqd = update_mqd;
 		mqd->destroy_mqd = destroy_mqd;
 		mqd->is_occupied = is_occupied;
 		mqd->mqd_size = sizeof(struct v10_compute_mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index d3380c5..436b7f5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -92,7 +92,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
 	 * instead of sub-allocation function.
 	 */
 	if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
-		mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+		mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
 		if (!mqd_mem_obj)
 			return NULL;
 		retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
@@ -191,6 +191,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
 					  wptr_shift, 0, mms);
 }
 
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+			    uint32_t pipe_id, uint32_t queue_id,
+			    struct queue_properties *p, struct mm_struct *mms)
+{
+	return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+					      queue_id, p->doorbell_off);
+}
+
 static void update_mqd(struct mqd_manager *mm, void *mqd,
 		      struct queue_properties *q)
 {
@@ -302,7 +310,8 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
 
 	*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
 		m->cp_hqd_cntl_stack_offset;
-	*save_area_used_size = m->cp_hqd_wg_state_offset;
+	*save_area_used_size = m->cp_hqd_wg_state_offset -
+		m->cp_hqd_cntl_stack_size;
 
 	if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
 		return -EFAULT;
@@ -324,18 +333,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
 			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
 }
 
-static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
-			struct queue_properties *q)
-{
-	struct v9_mqd *m;
-
-	update_mqd(mm, mqd, q);
-
-	/* TODO: what's the point? update_mqd already does this. */
-	m = get_mqd(mqd);
-	m->cp_hqd_vmid = q->vmid;
-}
-
 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
 		struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 		struct queue_properties *q)
@@ -443,7 +440,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
 
 	switch (type) {
 	case KFD_MQD_TYPE_CP:
-	case KFD_MQD_TYPE_COMPUTE:
 		mqd->allocate_mqd = allocate_mqd;
 		mqd->init_mqd = init_mqd;
 		mqd->free_mqd = free_mqd;
@@ -461,8 +457,8 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
 		mqd->allocate_mqd = allocate_hiq_mqd;
 		mqd->init_mqd = init_mqd_hiq;
 		mqd->free_mqd = free_mqd_hiq_sdma;
-		mqd->load_mqd = load_mqd;
-		mqd->update_mqd = update_mqd_hiq;
+		mqd->load_mqd = hiq_load_mqd_kiq;
+		mqd->update_mqd = update_mqd;
 		mqd->destroy_mqd = destroy_mqd;
 		mqd->is_occupied = is_occupied;
 		mqd->mqd_size = sizeof(struct v9_mqd);
@@ -471,11 +467,11 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
 #endif
 		break;
 	case KFD_MQD_TYPE_DIQ:
-		mqd->allocate_mqd = allocate_hiq_mqd;
+		mqd->allocate_mqd = allocate_mqd;
 		mqd->init_mqd = init_mqd_hiq;
 		mqd->free_mqd = free_mqd;
 		mqd->load_mqd = load_mqd;
-		mqd->update_mqd = update_mqd_hiq;
+		mqd->update_mqd = update_mqd;
 		mqd->destroy_mqd = destroy_mqd;
 		mqd->is_occupied = is_occupied;
 		mqd->mqd_size = sizeof(struct v9_mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 7d144f5..a5e8ff1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -312,11 +312,7 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
 static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
 			struct queue_properties *q)
 {
-	struct vi_mqd *m;
 	__update_mqd(mm, mqd, q, MTYPE_UC, 0);
-
-	m = get_mqd(mqd);
-	m->cp_hqd_vmid = q->vmid;
 }
 
 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
@@ -425,7 +421,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
 
 	switch (type) {
 	case KFD_MQD_TYPE_CP:
-	case KFD_MQD_TYPE_COMPUTE:
 		mqd->allocate_mqd = allocate_mqd;
 		mqd->init_mqd = init_mqd;
 		mqd->free_mqd = free_mqd;
@@ -453,7 +448,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
 #endif
 		break;
 	case KFD_MQD_TYPE_DIQ:
-		mqd->allocate_mqd = allocate_hiq_mqd;
+		mqd->allocate_mqd = allocate_mqd;
 		mqd->init_mqd = init_mqd_hiq;
 		mqd->free_mqd = free_mqd;
 		mqd->load_mqd = load_mqd;
@@ -494,7 +489,7 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
 	mqd = mqd_manager_init_vi(type, dev);
 	if (!mqd)
 		return NULL;
-	if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+	if (type == KFD_MQD_TYPE_CP)
 		mqd->update_mqd = update_mqd_tonga;
 	return mqd;
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 83ef4b3..dc406e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -241,12 +241,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
 	case CHIP_RAVEN:
 	case CHIP_RENOIR:
 	case CHIP_ARCTURUS:
-		pm->pmf = &kfd_v9_pm_funcs;
-		break;
 	case CHIP_NAVI10:
 	case CHIP_NAVI12:
 	case CHIP_NAVI14:
-		pm->pmf = &kfd_v10_pm_funcs;
+		pm->pmf = &kfd_v9_pm_funcs;
 		break;
 	default:
 		WARN(1, "Unexpected ASIC family %u",
@@ -266,10 +264,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
 	return 0;
 }
 
-void pm_uninit(struct packet_manager *pm)
+void pm_uninit(struct packet_manager *pm, bool hanging)
 {
 	mutex_destroy(&pm->lock);
-	kernel_queue_uninit(pm->priv_queue);
+	kernel_queue_uninit(pm->priv_queue, hanging);
 }
 
 int pm_send_set_resources(struct packet_manager *pm,
@@ -280,7 +278,7 @@ int pm_send_set_resources(struct packet_manager *pm,
 
 	size = pm->pmf->set_resources_size;
 	mutex_lock(&pm->lock);
-	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+	kq_acquire_packet_buffer(pm->priv_queue,
 					size / sizeof(uint32_t),
 					(unsigned int **)&buffer);
 	if (!buffer) {
@@ -291,9 +289,9 @@ int pm_send_set_resources(struct packet_manager *pm,
 
 	retval = pm->pmf->set_resources(pm, buffer, res);
 	if (!retval)
-		pm->priv_queue->ops.submit_packet(pm->priv_queue);
+		kq_submit_packet(pm->priv_queue);
 	else
-		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+		kq_rollback_packet(pm->priv_queue);
 
 out:
 	mutex_unlock(&pm->lock);
@@ -318,7 +316,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
 	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
 	mutex_lock(&pm->lock);
 
-	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+	retval = kq_acquire_packet_buffer(pm->priv_queue,
 					packet_size_dwords, &rl_buffer);
 	if (retval)
 		goto fail_acquire_packet_buffer;
@@ -328,14 +326,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
 	if (retval)
 		goto fail_create_runlist;
 
-	pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	kq_submit_packet(pm->priv_queue);
 
 	mutex_unlock(&pm->lock);
 
 	return retval;
 
 fail_create_runlist:
-	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+	kq_rollback_packet(pm->priv_queue);
 fail_acquire_packet_buffer:
 	mutex_unlock(&pm->lock);
 fail_create_runlist_ib:
@@ -354,7 +352,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
 
 	size = pm->pmf->query_status_size;
 	mutex_lock(&pm->lock);
-	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+	kq_acquire_packet_buffer(pm->priv_queue,
 			size / sizeof(uint32_t), (unsigned int **)&buffer);
 	if (!buffer) {
 		pr_err("Failed to allocate buffer on kernel queue\n");
@@ -364,9 +362,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
 
 	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
 	if (!retval)
-		pm->priv_queue->ops.submit_packet(pm->priv_queue);
+		kq_submit_packet(pm->priv_queue);
 	else
-		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+		kq_rollback_packet(pm->priv_queue);
 
 out:
 	mutex_unlock(&pm->lock);
@@ -383,7 +381,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
 
 	size = pm->pmf->unmap_queues_size;
 	mutex_lock(&pm->lock);
-	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+	kq_acquire_packet_buffer(pm->priv_queue,
 			size / sizeof(uint32_t), (unsigned int **)&buffer);
 	if (!buffer) {
 		pr_err("Failed to allocate buffer on kernel queue\n");
@@ -394,9 +392,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
 	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
 				       reset, sdma_engine);
 	if (!retval)
-		pm->priv_queue->ops.submit_packet(pm->priv_queue);
+		kq_submit_packet(pm->priv_queue);
 	else
-		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+		kq_rollback_packet(pm->priv_queue);
 
 out:
 	mutex_unlock(&pm->lock);
@@ -441,7 +439,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
 
 	size = pm->pmf->query_status_size;
 	mutex_lock(&pm->lock);
-	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+	kq_acquire_packet_buffer(pm->priv_queue,
 			size / sizeof(uint32_t), (unsigned int **)&buffer);
 	if (!buffer) {
 		pr_err("Failed to allocate buffer on kernel queue\n");
@@ -449,7 +447,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
 		goto out;
 	}
 	memset(buffer, 0x55, size);
-	pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	kq_submit_packet(pm->priv_queue);
 
 	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
 		buffer[0], buffer[1], buffer[2], buffer[3],
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
new file mode 100644
index 0000000..2de0100
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "kfd_kernel_queue.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers_ai.h"
+#include "kfd_pm4_opcodes.h"
+#include "gc/gc_10_1_0_sh_mask.h"
+
+static int pm_map_process_v9(struct packet_manager *pm,
+		uint32_t *buffer, struct qcm_process_device *qpd)
+{
+	struct pm4_mes_map_process *packet;
+	uint64_t vm_page_table_base_addr = qpd->page_table_base;
+
+	packet = (struct pm4_mes_map_process *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+
+	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
+					sizeof(struct pm4_mes_map_process));
+	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+	packet->bitfields2.process_quantum = 1;
+	packet->bitfields2.pasid = qpd->pqm->process->pasid;
+	packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
+	packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
+	packet->bitfields14.num_gws = qpd->num_gws;
+	packet->bitfields14.num_oac = qpd->num_oac;
+	packet->bitfields14.sdma_enable = 1;
+	packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+
+	packet->sh_mem_config = qpd->sh_mem_config;
+	packet->sh_mem_bases = qpd->sh_mem_bases;
+	if (qpd->tba_addr) {
+		packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
+		/* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is
+		 * not defined, so setting it won't do any harm.
+		 */
+		packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8)
+				| 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT;
+
+		packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
+		packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
+	}
+
+	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+
+	packet->vm_context_page_table_base_addr_lo32 =
+			lower_32_bits(vm_page_table_base_addr);
+	packet->vm_context_page_table_base_addr_hi32 =
+			upper_32_bits(vm_page_table_base_addr);
+
+	return 0;
+}
+
+static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t ib, size_t ib_size_in_dwords, bool chain)
+{
+	struct pm4_mes_runlist *packet;
+
+	int concurrent_proc_cnt = 0;
+	struct kfd_dev *kfd = pm->dqm->dev;
+
+	/* Determine the number of processes to map together to HW:
+	 * it can not exceed the number of VMIDs available to the
+	 * scheduler, and it is determined by the smaller of the number
+	 * of processes in the runlist and kfd module parameter
+	 * hws_max_conc_proc.
+	 * Note: the arbitration between the number of VMIDs and
+	 * hws_max_conc_proc has been done in
+	 * kgd2kfd_device_init().
+	 */
+	concurrent_proc_cnt = min(pm->dqm->processes_count,
+			kfd->max_proc_per_quantum);
+
+	packet = (struct pm4_mes_runlist *)buffer;
+
+	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
+						sizeof(struct pm4_mes_runlist));
+
+	packet->bitfields4.ib_size = ib_size_in_dwords;
+	packet->bitfields4.chain = chain ? 1 : 0;
+	packet->bitfields4.offload_polling = 0;
+	packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0;
+	packet->bitfields4.valid = 1;
+	packet->bitfields4.process_cnt = concurrent_proc_cnt;
+	packet->ordinal2 = lower_32_bits(ib);
+	packet->ib_base_hi = upper_32_bits(ib);
+
+	return 0;
+}
+
+static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
+				struct scheduling_resources *res)
+{
+	struct pm4_mes_set_resources *packet;
+
+	packet = (struct pm4_mes_set_resources *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
+
+	packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
+					sizeof(struct pm4_mes_set_resources));
+
+	packet->bitfields2.queue_type =
+			queue_type__mes_set_resources__hsa_interface_queue_hiq;
+	packet->bitfields2.vmid_mask = res->vmid_mask;
+	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+	packet->bitfields7.oac_mask = res->oac_mask;
+	packet->bitfields8.gds_heap_base = res->gds_heap_base;
+	packet->bitfields8.gds_heap_size = res->gds_heap_size;
+
+	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+	packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+
+	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+
+	return 0;
+}
+
+static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
+		struct queue *q, bool is_static)
+{
+	struct pm4_mes_map_queues *packet;
+	bool use_static = is_static;
+
+	packet = (struct pm4_mes_map_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
+
+	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
+					sizeof(struct pm4_mes_map_queues));
+	packet->bitfields2.num_queues = 1;
+	packet->bitfields2.queue_sel =
+		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+	packet->bitfields2.engine_sel =
+		engine_sel__mes_map_queues__compute_vi;
+	packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
+	packet->bitfields2.extended_engine_sel =
+		extended_engine_sel__mes_map_queues__legacy_engine_sel;
+	packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_compute_vi;
+
+	switch (q->properties.type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+		if (use_static)
+			packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_latency_static_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.queue_type =
+			queue_type__mes_map_queues__debug_interface_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+	case KFD_QUEUE_TYPE_SDMA_XGMI:
+		use_static = false; /* no static queues under SDMA */
+		if (q->properties.sdma_engine_id < 2)
+			packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
+				engine_sel__mes_map_queues__sdma0_vi;
+		else {
+			packet->bitfields2.extended_engine_sel =
+				extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
+			packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
+		}
+		break;
+	default:
+		WARN(1, "queue type %d", q->properties.type);
+		return -EINVAL;
+	}
+	packet->bitfields3.doorbell_offset =
+			q->properties.doorbell_off;
+
+	packet->mqd_addr_lo =
+			lower_32_bits(q->gart_mqd_addr);
+
+	packet->mqd_addr_hi =
+			upper_32_bits(q->gart_mqd_addr);
+
+	packet->wptr_addr_lo =
+			lower_32_bits((uint64_t)q->properties.write_ptr);
+
+	packet->wptr_addr_hi =
+			upper_32_bits((uint64_t)q->properties.write_ptr);
+
+	return 0;
+}
+
+static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
+			enum kfd_queue_type type,
+			enum kfd_unmap_queues_filter filter,
+			uint32_t filter_param, bool reset,
+			unsigned int sdma_engine)
+{
+	struct pm4_mes_unmap_queues *packet;
+
+	packet = (struct pm4_mes_unmap_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
+
+	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
+					sizeof(struct pm4_mes_unmap_queues));
+	switch (type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.extended_engine_sel =
+			extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
+		packet->bitfields2.engine_sel =
+			engine_sel__mes_unmap_queues__compute;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+	case KFD_QUEUE_TYPE_SDMA_XGMI:
+		if (sdma_engine < 2) {
+			packet->bitfields2.extended_engine_sel =
+				extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
+			packet->bitfields2.engine_sel =
+				engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+		} else {
+			packet->bitfields2.extended_engine_sel =
+				extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel;
+			packet->bitfields2.engine_sel = sdma_engine;
+		}
+		break;
+	default:
+		WARN(1, "queue type %d", type);
+		return -EINVAL;
+	}
+
+	if (reset)
+		packet->bitfields2.action =
+			action__mes_unmap_queues__reset_queues;
+	else
+		packet->bitfields2.action =
+			action__mes_unmap_queues__preempt_queues;
+
+	switch (filter) {
+	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
+		packet->bitfields2.num_queues = 1;
+		packet->bitfields3b.doorbell_offset0 = filter_param;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+		packet->bitfields3a.pasid = filter_param;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__unmap_all_queues;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
+		/* in this case, we do not preempt static queues */
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
+		break;
+	default:
+		WARN(1, "filter %d", filter);
+		return -EINVAL;
+	}
+
+	return 0;
+
+}
+
+static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t fence_address,	uint32_t fence_value)
+{
+	struct pm4_mes_query_status *packet;
+
+	packet = (struct pm4_mes_query_status *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
+
+
+	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
+					sizeof(struct pm4_mes_query_status));
+
+	packet->bitfields2.context_id = 0;
+	packet->bitfields2.interrupt_sel =
+			interrupt_sel__mes_query_status__completion_status;
+	packet->bitfields2.command =
+			command__mes_query_status__fence_only_after_write_ack;
+
+	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
+	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
+	packet->data_hi = upper_32_bits((uint64_t)fence_value);
+	packet->data_lo = lower_32_bits((uint64_t)fence_value);
+
+	return 0;
+}
+
+const struct packet_manager_funcs kfd_v9_pm_funcs = {
+	.map_process		= pm_map_process_v9,
+	.runlist		= pm_runlist_v9,
+	.set_resources		= pm_set_resources_v9,
+	.map_queues		= pm_map_queues_v9,
+	.unmap_queues		= pm_unmap_queues_v9,
+	.query_status		= pm_query_status_v9,
+	.release_mem		= NULL,
+	.map_process_size	= sizeof(struct pm4_mes_map_process),
+	.runlist_size		= sizeof(struct pm4_mes_runlist),
+	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
+	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
+	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
+	.query_status_size	= sizeof(struct pm4_mes_query_status),
+	.release_mem_size	= 0,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
new file mode 100644
index 0000000..bed4d0c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "kfd_kernel_queue.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers_vi.h"
+#include "kfd_pm4_opcodes.h"
+
+unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
+{
+	union PM4_MES_TYPE_3_HEADER header;
+
+	header.u32All = 0;
+	header.opcode = opcode;
+	header.count = packet_size / 4 - 2;
+	header.type = PM4_TYPE_3;
+
+	return header.u32All;
+}
+
+static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
+				struct qcm_process_device *qpd)
+{
+	struct pm4_mes_map_process *packet;
+
+	packet = (struct pm4_mes_map_process *)buffer;
+
+	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+
+	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
+					sizeof(struct pm4_mes_map_process));
+	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+	packet->bitfields2.process_quantum = 1;
+	packet->bitfields2.pasid = qpd->pqm->process->pasid;
+	packet->bitfields3.page_table_base = qpd->page_table_base;
+	packet->bitfields10.gds_size = qpd->gds_size;
+	packet->bitfields10.num_gws = qpd->num_gws;
+	packet->bitfields10.num_oac = qpd->num_oac;
+	packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+
+	packet->sh_mem_config = qpd->sh_mem_config;
+	packet->sh_mem_bases = qpd->sh_mem_bases;
+	packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+	packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+
+	packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
+
+	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+
+	return 0;
+}
+
+static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t ib, size_t ib_size_in_dwords, bool chain)
+{
+	struct pm4_mes_runlist *packet;
+	int concurrent_proc_cnt = 0;
+	struct kfd_dev *kfd = pm->dqm->dev;
+
+	if (WARN_ON(!ib))
+		return -EFAULT;
+
+	/* Determine the number of processes to map together to HW:
+	 * it can not exceed the number of VMIDs available to the
+	 * scheduler, and it is determined by the smaller of the number
+	 * of processes in the runlist and kfd module parameter
+	 * hws_max_conc_proc.
+	 * Note: the arbitration between the number of VMIDs and
+	 * hws_max_conc_proc has been done in
+	 * kgd2kfd_device_init().
+	 */
+	concurrent_proc_cnt = min(pm->dqm->processes_count,
+			kfd->max_proc_per_quantum);
+
+	packet = (struct pm4_mes_runlist *)buffer;
+
+	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
+						sizeof(struct pm4_mes_runlist));
+
+	packet->bitfields4.ib_size = ib_size_in_dwords;
+	packet->bitfields4.chain = chain ? 1 : 0;
+	packet->bitfields4.offload_polling = 0;
+	packet->bitfields4.valid = 1;
+	packet->bitfields4.process_cnt = concurrent_proc_cnt;
+	packet->ordinal2 = lower_32_bits(ib);
+	packet->bitfields3.ib_base_hi = upper_32_bits(ib);
+
+	return 0;
+}
+
+int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+				struct scheduling_resources *res)
+{
+	struct pm4_mes_set_resources *packet;
+
+	packet = (struct pm4_mes_set_resources *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
+
+	packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
+					sizeof(struct pm4_mes_set_resources));
+
+	packet->bitfields2.queue_type =
+			queue_type__mes_set_resources__hsa_interface_queue_hiq;
+	packet->bitfields2.vmid_mask = res->vmid_mask;
+	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+	packet->bitfields7.oac_mask = res->oac_mask;
+	packet->bitfields8.gds_heap_base = res->gds_heap_base;
+	packet->bitfields8.gds_heap_size = res->gds_heap_size;
+
+	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+	packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+
+	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+
+	return 0;
+}
+
+static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+		struct queue *q, bool is_static)
+{
+	struct pm4_mes_map_queues *packet;
+	bool use_static = is_static;
+
+	packet = (struct pm4_mes_map_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
+
+	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
+					sizeof(struct pm4_mes_map_queues));
+	packet->bitfields2.num_queues = 1;
+	packet->bitfields2.queue_sel =
+		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+	packet->bitfields2.engine_sel =
+		engine_sel__mes_map_queues__compute_vi;
+	packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_compute_vi;
+
+	switch (q->properties.type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+		if (use_static)
+			packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_latency_static_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.queue_type =
+			queue_type__mes_map_queues__debug_interface_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+	case KFD_QUEUE_TYPE_SDMA_XGMI:
+		packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
+				engine_sel__mes_map_queues__sdma0_vi;
+		use_static = false; /* no static queues under SDMA */
+		break;
+	default:
+		WARN(1, "queue type %d", q->properties.type);
+		return -EINVAL;
+	}
+	packet->bitfields3.doorbell_offset =
+			q->properties.doorbell_off;
+
+	packet->mqd_addr_lo =
+			lower_32_bits(q->gart_mqd_addr);
+
+	packet->mqd_addr_hi =
+			upper_32_bits(q->gart_mqd_addr);
+
+	packet->wptr_addr_lo =
+			lower_32_bits((uint64_t)q->properties.write_ptr);
+
+	packet->wptr_addr_hi =
+			upper_32_bits((uint64_t)q->properties.write_ptr);
+
+	return 0;
+}
+
+static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+			enum kfd_queue_type type,
+			enum kfd_unmap_queues_filter filter,
+			uint32_t filter_param, bool reset,
+			unsigned int sdma_engine)
+{
+	struct pm4_mes_unmap_queues *packet;
+
+	packet = (struct pm4_mes_unmap_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
+
+	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
+					sizeof(struct pm4_mes_unmap_queues));
+	switch (type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.engine_sel =
+			engine_sel__mes_unmap_queues__compute;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+	case KFD_QUEUE_TYPE_SDMA_XGMI:
+		packet->bitfields2.engine_sel =
+			engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+		break;
+	default:
+		WARN(1, "queue type %d", type);
+		return -EINVAL;
+	}
+
+	if (reset)
+		packet->bitfields2.action =
+			action__mes_unmap_queues__reset_queues;
+	else
+		packet->bitfields2.action =
+			action__mes_unmap_queues__preempt_queues;
+
+	switch (filter) {
+	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
+		packet->bitfields2.num_queues = 1;
+		packet->bitfields3b.doorbell_offset0 = filter_param;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+		packet->bitfields3a.pasid = filter_param;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__unmap_all_queues;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
+		/* in this case, we do not preempt static queues */
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
+		break;
+	default:
+		WARN(1, "filter %d", filter);
+		return -EINVAL;
+	}
+
+	return 0;
+
+}
+
+static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t fence_address,	uint32_t fence_value)
+{
+	struct pm4_mes_query_status *packet;
+
+	packet = (struct pm4_mes_query_status *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
+
+	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
+					sizeof(struct pm4_mes_query_status));
+
+	packet->bitfields2.context_id = 0;
+	packet->bitfields2.interrupt_sel =
+			interrupt_sel__mes_query_status__completion_status;
+	packet->bitfields2.command =
+			command__mes_query_status__fence_only_after_write_ack;
+
+	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
+	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
+	packet->data_hi = upper_32_bits((uint64_t)fence_value);
+	packet->data_lo = lower_32_bits((uint64_t)fence_value);
+
+	return 0;
+}
+
+static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+{
+	struct pm4_mec_release_mem *packet;
+
+	packet = (struct pm4_mec_release_mem *)buffer;
+	memset(buffer, 0, sizeof(*packet));
+
+	packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
+						 sizeof(*packet));
+
+	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
+	packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
+	packet->bitfields2.tcl1_action_ena = 1;
+	packet->bitfields2.tc_action_ena = 1;
+	packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
+	packet->bitfields2.atc = 0;
+
+	packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low;
+	packet->bitfields3.int_sel =
+		int_sel___release_mem__send_interrupt_after_write_confirm;
+
+	packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
+	packet->address_hi = upper_32_bits(gpu_addr);
+
+	packet->data_lo = 0;
+
+	return 0;
+}
+
+const struct packet_manager_funcs kfd_vi_pm_funcs = {
+	.map_process		= pm_map_process_vi,
+	.runlist		= pm_runlist_vi,
+	.set_resources		= pm_set_resources_vi,
+	.map_queues		= pm_map_queues_vi,
+	.unmap_queues		= pm_unmap_queues_vi,
+	.query_status		= pm_query_status_vi,
+	.release_mem		= pm_release_mem_vi,
+	.map_process_size	= sizeof(struct pm4_mes_map_process),
+	.runlist_size		= sizeof(struct pm4_mes_runlist),
+	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
+	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
+	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
+	.query_status_size	= sizeof(struct pm4_mes_query_status),
+	.release_mem_size	= sizeof(struct pm4_mec_release_mem)
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 060a9e8..6af1b58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -59,24 +59,21 @@
  * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
  *  defines are w.r.t to PAGE_SIZE
  */
-#define KFD_MMAP_TYPE_SHIFT	(62 - PAGE_SHIFT)
+#define KFD_MMAP_TYPE_SHIFT	62
 #define KFD_MMAP_TYPE_MASK	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
 #define KFD_MMAP_TYPE_DOORBELL	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
 #define KFD_MMAP_TYPE_EVENTS	(0x2ULL << KFD_MMAP_TYPE_SHIFT)
 #define KFD_MMAP_TYPE_RESERVED_MEM	(0x1ULL << KFD_MMAP_TYPE_SHIFT)
 #define KFD_MMAP_TYPE_MMIO	(0x0ULL << KFD_MMAP_TYPE_SHIFT)
 
-#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT)
+#define KFD_MMAP_GPU_ID_SHIFT 46
 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
 				<< KFD_MMAP_GPU_ID_SHIFT)
 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
 				& KFD_MMAP_GPU_ID_MASK)
-#define KFD_MMAP_GPU_ID_GET(offset)    ((offset & KFD_MMAP_GPU_ID_MASK) \
+#define KFD_MMAP_GET_GPU_ID(offset)    ((offset & KFD_MMAP_GPU_ID_MASK) \
 				>> KFD_MMAP_GPU_ID_SHIFT)
 
-#define KFD_MMAP_OFFSET_VALUE_MASK	(0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
-#define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
-
 /*
  * When working with cp scheduler we should assign the HIQ manually or via
  * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
@@ -238,9 +235,10 @@ struct kfd_dev {
 					 * KFD. It is aligned for mapping
 					 * into user mode
 					 */
-	size_t doorbell_id_offset;	/* Doorbell offset (from KFD doorbell
-					 * to HW doorbell, GFX reserved some
-					 * at the start)
+	size_t doorbell_base_dw_offset;	/* Offset from the start of the PCI
+					 * doorbell BAR to the first KFD
+					 * doorbell in dwords. GFX reserves
+					 * the segment before this offset.
 					 */
 	u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
 					   * page used by kernel queue
@@ -510,8 +508,7 @@ struct queue {
  * Please read the kfd_mqd_manager.h description.
  */
 enum KFD_MQD_TYPE {
-	KFD_MQD_TYPE_COMPUTE = 0,	/* for no cp scheduling */
-	KFD_MQD_TYPE_HIQ,		/* for hiq */
+	KFD_MQD_TYPE_HIQ = 0,		/* for hiq */
 	KFD_MQD_TYPE_CP,		/* for cp queues and diq */
 	KFD_MQD_TYPE_SDMA,		/* for sdma queues */
 	KFD_MQD_TYPE_DIQ,		/* for diq */
@@ -818,7 +815,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
 u32 read_kernel_doorbell(u32 __iomem *db);
 void write_kernel_doorbell(void __iomem *db, u32 value);
 void write_kernel_doorbell64(void __iomem *db, u64 value);
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
 					struct kfd_process *process,
 					unsigned int doorbell_id);
 phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
@@ -886,7 +883,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
 void device_queue_manager_uninit(struct device_queue_manager *dqm);
 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
 					enum kfd_queue_type type);
-void kernel_queue_uninit(struct kernel_queue *kq);
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
 int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
 
 /* Process Queue Manager */
@@ -904,7 +901,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 			    struct kfd_dev *dev,
 			    struct file *f,
 			    struct queue_properties *properties,
-			    unsigned int *qid);
+			    unsigned int *qid,
+			    uint32_t *p_doorbell_offset_in_process);
 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
 			struct queue_properties *p);
@@ -972,10 +970,9 @@ struct packet_manager_funcs {
 
 extern const struct packet_manager_funcs kfd_vi_pm_funcs;
 extern const struct packet_manager_funcs kfd_v9_pm_funcs;
-extern const struct packet_manager_funcs kfd_v10_pm_funcs;
 
 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
-void pm_uninit(struct packet_manager *pm);
+void pm_uninit(struct packet_manager *pm, bool hanging);
 int pm_send_set_resources(struct packet_manager *pm,
 				struct scheduling_resources *res);
 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
@@ -991,9 +988,6 @@ void pm_release_ib(struct packet_manager *pm);
 
 /* Following PM funcs can be shared among VI and AI */
 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
-			struct scheduling_resources *res);
-
 
 uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 10f9af5..25b90f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -32,6 +32,7 @@
 #include <linux/mman.h>
 #include <linux/file.h>
 #include "amdgpu_amdkfd.h"
+#include "amdgpu.h"
 
 struct mm_struct;
 
@@ -324,6 +325,8 @@ struct kfd_process *kfd_create_process(struct file *filep)
 					(int)process->lead_thread->pid);
 	}
 out:
+	if (!IS_ERR(process))
+		kref_get(&process->ref);
 	mutex_unlock(&kfd_processes_mutex);
 
 	return process;
@@ -560,8 +563,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
 		if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
 			continue;
 
-		offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
-			<< PAGE_SHIFT;
+		offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
 		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
 			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
 			MAP_SHARED, offset);
@@ -1151,16 +1153,17 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
 void kfd_flush_tlb(struct kfd_process_device *pdd)
 {
 	struct kfd_dev *dev = pdd->dev;
-	const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
 
 	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
 		/* Nothing to flush until a VMID is assigned, which
 		 * only happens when the first queue is created.
 		 */
 		if (pdd->qpd.vmid)
-			f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
+			amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
+							pdd->qpd.vmid);
 	} else {
-		f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
+		amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
+						pdd->process->pasid);
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 2659d22..31fcd1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -162,7 +162,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
 	pqm->queue_slot_bitmap = NULL;
 }
 
-static int create_cp_queue(struct process_queue_manager *pqm,
+static int init_user_queue(struct process_queue_manager *pqm,
 				struct kfd_dev *dev, struct queue **q,
 				struct queue_properties *q_properties,
 				struct file *f, unsigned int qid)
@@ -192,7 +192,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 			    struct kfd_dev *dev,
 			    struct file *f,
 			    struct queue_properties *properties,
-			    unsigned int *qid)
+			    unsigned int *qid,
+			    uint32_t *p_doorbell_offset_in_process)
 {
 	int retval;
 	struct kfd_process_device *pdd;
@@ -250,7 +251,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 			goto err_create_queue;
 		}
 
-		retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+		retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
 		if (retval != 0)
 			goto err_create_queue;
 		pqn->q = q;
@@ -271,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 			goto err_create_queue;
 		}
 
-		retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+		retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
 		if (retval != 0)
 			goto err_create_queue;
 		pqn->q = q;
@@ -303,12 +304,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 		goto err_create_queue;
 	}
 
-	if (q)
+	if (q && p_doorbell_offset_in_process)
 		/* Return the doorbell offset within the doorbell page
 		 * to the caller so it can be passed up to user mode
 		 * (in bytes).
+		 * There are always 1024 doorbells per process, so in case
+		 * of 8-byte doorbells, there are two doorbell pages per
+		 * process.
 		 */
-		properties->doorbell_off =
+		*p_doorbell_offset_in_process =
 			(q->properties.doorbell_off * sizeof(uint32_t)) &
 			(kfd_doorbell_process_slice(dev) - 1);
 
@@ -370,7 +374,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
 		/* destroy kernel queue (DIQ) */
 		dqm = pqn->kq->dev->dqm;
 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
-		kernel_queue_uninit(pqn->kq);
+		kernel_queue_uninit(pqn->kq, false);
 	}
 
 	if (pqn->q) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 69bd062..203c823 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -486,6 +486,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
 			dev->node_props.num_sdma_engines);
 	sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines",
 			dev->node_props.num_sdma_xgmi_engines);
+	sysfs_show_32bit_prop(buffer, "num_sdma_queues_per_engine",
+			dev->node_props.num_sdma_queues_per_engine);
+	sysfs_show_32bit_prop(buffer, "num_cp_queues",
+			dev->node_props.num_cp_queues);
 
 	if (dev->gpu) {
 		log_max_watch_addr =
@@ -1309,9 +1313,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
 	dev->node_props.num_sdma_xgmi_engines =
 				gpu->device_info->num_xgmi_sdma_engines;
+	dev->node_props.num_sdma_queues_per_engine =
+				gpu->device_info->num_sdma_queues_per_engine;
 	dev->node_props.num_gws = (hws_gws_support &&
 		dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
 		amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
+	dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
 
 	kfd_fill_mem_clk_max_info(dev);
 	kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 15843e0..74e9b16 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -81,6 +81,8 @@ struct kfd_node_properties {
 	int32_t  drm_render_minor;
 	uint32_t num_sdma_engines;
 	uint32_t num_sdma_xgmi_engines;
+	uint32_t num_sdma_queues_per_engine;
+	uint32_t num_cp_queues;
 	char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
 };
 
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index ae161fe..87858bc 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,43 +6,16 @@
 	bool "AMD DC - Enable new display engine"
 	default y
 	select SND_HDA_COMPONENT if SND_HDA_CORE
-	select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+	select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
 	help
 	  Choose this option if you want to use the new display engine
 	  support for AMDGPU. This adds required support for Vega and
 	  Raven ASICs.
 
-config DRM_AMD_DC_DCN1_0
+config DRM_AMD_DC_DCN
 	def_bool n
 	help
-	  RV family support for display engine
-
-config DRM_AMD_DC_DCN2_0
-	bool "DCN 2.0 family"
-	default y
-	depends on DRM_AMD_DC && X86
-	depends on DRM_AMD_DC_DCN1_0
-	help
-	  Choose this option if you want to have
-	  Navi support for display engine
-
-config DRM_AMD_DC_DCN2_1
-	bool "DCN 2.1 family"
-	depends on DRM_AMD_DC && X86
-	depends on DRM_AMD_DC_DCN2_0
-	help
-	  Choose this option if you want to have
-	  Renoir support for display engine
-
-config DRM_AMD_DC_DSC_SUPPORT
-	bool "DSC support"
-	default y
-	depends on DRM_AMD_DC && X86
-	depends on DRM_AMD_DC_DCN1_0
-	depends on DRM_AMD_DC_DCN2_0
-	help
-	  Choose this option if you want to have
-	  Dynamic Stream Compression support
+	  Raven, Navi and Renoir family support for display engine
 
 config DRM_AMD_DC_HDCP
 	bool "Enable HDCP support in DC"
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 36b3d6a..2633de7 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -34,6 +34,8 @@
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc
+
 ifdef CONFIG_DRM_AMD_DC_HDCP
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
 endif
@@ -41,7 +43,7 @@
 #TODO: remove when Timing Sync feature is complete
 subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
 
-DAL_LIBS = amdgpu_dm dc	modules/freesync modules/color modules/info_packet modules/power
+DAL_LIBS = amdgpu_dm dc	modules/freesync modules/color modules/info_packet modules/power dmub/src
 
 ifdef CONFIG_DRM_AMD_DC_HDCP
 DAL_LIBS += modules/hdcp
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 803e59d..2795415 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -30,6 +30,10 @@
 #include "dc.h"
 #include "dc/inc/core_types.h"
 #include "dal_asic_id.h"
+#include "dmub/inc/dmub_srv.h"
+#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc/dc_dmub_srv.h"
 
 #include "vid.h"
 #include "amdgpu.h"
@@ -39,6 +43,7 @@
 #include "amdgpu_dm.h"
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 #include "amdgpu_dm_hdcp.h"
+#include <drm/drm_hdcp.h>
 #endif
 #include "amdgpu_pm.h"
 
@@ -72,7 +77,7 @@
 #include <drm/drm_audio_component.h>
 #include <drm/drm_hdcp.h>
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
 
 #include "dcn/dcn_1_0_offset.h"
@@ -87,9 +92,18 @@
 #include "modules/power/power_helpers.h"
 #include "modules/inc/mod_info_packet.h"
 
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+
 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
 
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
 /**
  * DOC: overview
  *
@@ -478,6 +492,70 @@ static void dm_crtc_high_irq(void *interrupt_params)
 	}
 }
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+/**
+ * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
+ * @interrupt params - interrupt parameters
+ *
+ * Notify DRM's vblank event handler at VSTARTUP
+ *
+ * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
+ * * We are close enough to VUPDATE - the point of no return for hw
+ * * We are in the fixed portion of variable front porch when vrr is enabled
+ * * We are before VUPDATE, where double-buffered vrr registers are swapped
+ *
+ * It is therefore the correct place to signal vblank, send user flip events,
+ * and update VRR.
+ */
+static void dm_dcn_crtc_high_irq(void *interrupt_params)
+{
+	struct common_irq_params *irq_params = interrupt_params;
+	struct amdgpu_device *adev = irq_params->adev;
+	struct amdgpu_crtc *acrtc;
+	struct dm_crtc_state *acrtc_state;
+	unsigned long flags;
+
+	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+	if (!acrtc)
+		return;
+
+	acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+	DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+				amdgpu_dm_vrr_active(acrtc_state));
+
+	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+	drm_crtc_handle_vblank(&acrtc->base);
+
+	spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+	if (acrtc_state->vrr_params.supported &&
+	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+		mod_freesync_handle_v_update(
+		adev->dm.freesync_module,
+		acrtc_state->stream,
+		&acrtc_state->vrr_params);
+
+		dc_stream_adjust_vmin_vmax(
+			adev->dm.dc,
+			acrtc_state->stream,
+			&acrtc_state->vrr_params.adjust);
+	}
+
+	if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
+		if (acrtc->event) {
+			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+			acrtc->event = NULL;
+			drm_crtc_vblank_put(&acrtc->base);
+		}
+		acrtc->pflip_status = AMDGPU_FLIP_NONE;
+	}
+
+	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+#endif
+
 static int dm_set_clockgating_state(void *handle,
 		  enum amd_clockgating_state state)
 {
@@ -667,12 +745,126 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
 	}
 }
 
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
+{
+	const struct dmcub_firmware_header_v1_0 *hdr;
+	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+	const struct firmware *dmub_fw = adev->dm.dmub_fw;
+	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+	struct abm *abm = adev->dm.dc->res_pool->abm;
+	struct dmub_srv_hw_params hw_params;
+	enum dmub_status status;
+	const unsigned char *fw_inst_const, *fw_bss_data;
+	uint32_t i, fw_inst_const_size, fw_bss_data_size;
+	bool has_hw_support;
+
+	if (!dmub_srv)
+		/* DMUB isn't supported on the ASIC. */
+		return 0;
+
+	if (!fb_info) {
+		DRM_ERROR("No framebuffer info for DMUB service.\n");
+		return -EINVAL;
+	}
+
+	if (!dmub_fw) {
+		/* Firmware required for DMUB support. */
+		DRM_ERROR("No firmware provided for DMUB.\n");
+		return -EINVAL;
+	}
+
+	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
+	if (status != DMUB_STATUS_OK) {
+		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+		return -EINVAL;
+	}
+
+	if (!has_hw_support) {
+		DRM_INFO("DMUB unsupported on ASIC\n");
+		return 0;
+	}
+
+	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
+
+	fw_inst_const = dmub_fw->data +
+			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+			PSP_HEADER_BYTES;
+
+	fw_bss_data = dmub_fw->data +
+		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+		      le32_to_cpu(hdr->inst_const_bytes);
+
+	/* Copy firmware and bios info into FB memory. */
+	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+	memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+	       fw_inst_const_size);
+	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
+	       fw_bss_data_size);
+	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+	       adev->bios_size);
+
+	/* Reset regions that need to be reset. */
+	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+
+	/* Initialize hardware. */
+	memset(&hw_params, 0, sizeof(hw_params));
+	hw_params.fb_base = adev->gmc.fb_start;
+	hw_params.fb_offset = adev->gmc.aper_base;
+
+	if (dmcu)
+		hw_params.psp_version = dmcu->psp_version;
+
+	for (i = 0; i < fb_info->num_fb; ++i)
+		hw_params.fb[i] = &fb_info->fb[i];
+
+	status = dmub_srv_hw_init(dmub_srv, &hw_params);
+	if (status != DMUB_STATUS_OK) {
+		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+		return -EINVAL;
+	}
+
+	/* Wait for firmware load to finish. */
+	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+	if (status != DMUB_STATUS_OK)
+		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+
+	/* Init DMCU and ABM if available. */
+	if (dmcu && abm) {
+		dmcu->funcs->dmcu_init(dmcu);
+		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+	}
+
+	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+	if (!adev->dm.dc->ctx->dmub_srv) {
+		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+		return -ENOMEM;
+	}
+
+	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+		 adev->dm.dmcub_fw_version);
+
+	return 0;
+}
+
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
 	struct dc_init_data init_data;
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 	struct dc_callback_init init_params;
 #endif
+	int r;
 
 	adev->dm.ddev = adev->ddev;
 	adev->dm.adev = adev;
@@ -714,13 +906,16 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
 
-	/*
-	 * TODO debug why this doesn't work on Raven
-	 */
-	if (adev->flags & AMD_IS_APU &&
-	    adev->asic_type >= CHIP_CARRIZO &&
-	    adev->asic_type < CHIP_RAVEN)
+	switch (adev->asic_type) {
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+	case CHIP_RAVEN:
+	case CHIP_RENOIR:
 		init_data.flags.gpu_vm_support = true;
+		break;
+	default:
+		break;
+	}
 
 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
 		init_data.flags.fbc_support = true;
@@ -733,9 +928,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 	init_data.flags.power_down_display_on_boot = true;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
-#endif
 
 	/* Display Core create. */
 	adev->dm.dc = dc_create(&init_data);
@@ -747,6 +940,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 		goto error;
 	}
 
+	r = dm_dmub_hw_init(adev);
+	if (r) {
+		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+		goto error;
+	}
+
 	dc_hardware_init(adev->dm.dc);
 
 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
@@ -821,6 +1020,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 	if (adev->dm.dc)
 		dc_deinit_callbacks(adev->dm.dc);
 #endif
+	if (adev->dm.dc->ctx->dmub_srv) {
+		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+		adev->dm.dc->ctx->dmub_srv = NULL;
+	}
+
+	if (adev->dm.dmub_bo)
+		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
+				      &adev->dm.dmub_bo_gpu_addr,
+				      &adev->dm.dmub_bo_cpu_addr);
 
 	/* DC Destroy TODO: Replace destroy DAL */
 	if (adev->dm.dc)
@@ -932,9 +1140,160 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
 	return 0;
 }
 
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
+{
+	struct amdgpu_device *adev = ctx;
+
+	return dm_read_reg(adev->dm.dc->ctx, address);
+}
+
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
+				     uint32_t value)
+{
+	struct amdgpu_device *adev = ctx;
+
+	return dm_write_reg(adev->dm.dc->ctx, address, value);
+}
+
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
+{
+	struct dmub_srv_create_params create_params;
+	struct dmub_srv_region_params region_params;
+	struct dmub_srv_region_info region_info;
+	struct dmub_srv_fb_params fb_params;
+	struct dmub_srv_fb_info *fb_info;
+	struct dmub_srv *dmub_srv;
+	const struct dmcub_firmware_header_v1_0 *hdr;
+	const char *fw_name_dmub;
+	enum dmub_asic dmub_asic;
+	enum dmub_status status;
+	int r;
+
+	switch (adev->asic_type) {
+	case CHIP_RENOIR:
+		dmub_asic = DMUB_ASIC_DCN21;
+		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+		break;
+
+	default:
+		/* ASIC doesn't support DMUB. */
+		return 0;
+	}
+
+	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
+	if (r) {
+		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+		return 0;
+	}
+
+	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
+	if (r) {
+		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
+		return 0;
+	}
+
+	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+		DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
+		return 0;
+	}
+
+	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+		AMDGPU_UCODE_ID_DMCUB;
+	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
+	adev->firmware.fw_size +=
+		ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+	DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+		 adev->dm.dmcub_fw_version);
+
+	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+	dmub_srv = adev->dm.dmub_srv;
+
+	if (!dmub_srv) {
+		DRM_ERROR("Failed to allocate DMUB service!\n");
+		return -ENOMEM;
+	}
+
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.user_ctx = adev;
+	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+	create_params.asic = dmub_asic;
+
+	/* Create the DMUB service. */
+	status = dmub_srv_create(dmub_srv, &create_params);
+	if (status != DMUB_STATUS_OK) {
+		DRM_ERROR("Error creating DMUB service: %d\n", status);
+		return -EINVAL;
+	}
+
+	/* Calculate the size of all the regions for the DMUB service. */
+	memset(&region_params, 0, sizeof(region_params));
+
+	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+	region_params.vbios_size = adev->bios_size;
+	region_params.fw_bss_data =
+		adev->dm.dmub_fw->data +
+		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+		le32_to_cpu(hdr->inst_const_bytes);
+
+	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+					   &region_info);
+
+	if (status != DMUB_STATUS_OK) {
+		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+		return -EINVAL;
+	}
+
+	/*
+	 * Allocate a framebuffer based on the total size of all the regions.
+	 * TODO: Move this into GART.
+	 */
+	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+				    &adev->dm.dmub_bo_gpu_addr,
+				    &adev->dm.dmub_bo_cpu_addr);
+	if (r)
+		return r;
+
+	/* Rebase the regions on the framebuffer address. */
+	memset(&fb_params, 0, sizeof(fb_params));
+	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+	fb_params.region_info = &region_info;
+
+	adev->dm.dmub_fb_info =
+		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+	fb_info = adev->dm.dmub_fb_info;
+
+	if (!fb_info) {
+		DRM_ERROR(
+			"Failed to allocate framebuffer info for DMUB service!\n");
+		return -ENOMEM;
+	}
+
+	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
+	if (status != DMUB_STATUS_OK) {
+		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int dm_sw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = dm_dmub_sw_init(adev);
+	if (r)
+		return r;
 
 	return load_dmcu_fw(adev);
 }
@@ -943,6 +1302,19 @@ static int dm_sw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	kfree(adev->dm.dmub_fb_info);
+	adev->dm.dmub_fb_info = NULL;
+
+	if (adev->dm.dmub_srv) {
+		dmub_srv_destroy(adev->dm.dmub_srv);
+		adev->dm.dmub_srv = NULL;
+	}
+
+	if (adev->dm.dmub_fw) {
+		release_firmware(adev->dm.dmub_fw);
+		adev->dm.dmub_fw = NULL;
+	}
+
 	if(adev->dm.fw_dmcu) {
 		release_firmware(adev->dm.fw_dmcu);
 		adev->dm.fw_dmcu = NULL;
@@ -1235,7 +1607,7 @@ static int dm_resume(void *handle)
 	struct dm_plane_state *dm_new_plane_state;
 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
 	enum dc_connection_type new_connection_type = dc_connection_none;
-	int i;
+	int i, r;
 
 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
 	dc_release_state(dm_state->context);
@@ -1243,6 +1615,11 @@ static int dm_resume(void *handle)
 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
 	dc_resource_state_construct(dm->dc, dm_state->context);
 
+	/* Before powering on DC we need to re-initialize DMUB. */
+	r = dm_dmub_hw_init(adev);
+	if (r)
+		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
 	/* power on hardware */
 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 
@@ -1868,7 +2245,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
 	return 0;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 /* Register IRQ sources and initialize IRQ callbacks */
 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 {
@@ -1914,35 +2291,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 		c_irq_params->irq_src = int_params.irq_source;
 
 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
-				dm_crtc_high_irq, c_irq_params);
-	}
-
-	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
-	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
-	 * to trigger at end of each vblank, regardless of state of the lock,
-	 * matching DCE behaviour.
-	 */
-	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
-	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
-	     i++) {
-		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
-
-		if (r) {
-			DRM_ERROR("Failed to add vupdate irq id!\n");
-			return r;
-		}
-
-		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
-		int_params.irq_source =
-			dc_interrupt_to_irq_source(dc, i, 0);
-
-		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
-
-		c_irq_params->adev = adev;
-		c_irq_params->irq_src = int_params.irq_source;
-
-		amdgpu_dm_irq_register_interrupt(adev, &int_params,
-				dm_vupdate_high_irq, c_irq_params);
+				dm_dcn_crtc_high_irq, c_irq_params);
 	}
 
 	/* Use GRPH_PFLIP interrupt */
@@ -2457,16 +2806,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 			goto fail;
 		}
 		break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case CHIP_RAVEN:
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case CHIP_NAVI12:
 	case CHIP_NAVI10:
 	case CHIP_NAVI14:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	case CHIP_RENOIR:
-#endif
 		if (dcn10_register_irq_handlers(dm->adev)) {
 			DRM_ERROR("DM: Failed to initialize IRQ\n");
 			goto fail;
@@ -2612,14 +2957,13 @@ static int dm_early_init(void *handle)
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
 		break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case CHIP_RAVEN:
 		adev->mode_info.num_crtc = 4;
 		adev->mode_info.num_hpd = 4;
 		adev->mode_info.num_dig = 4;
 		break;
 #endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case CHIP_NAVI10:
 	case CHIP_NAVI12:
 		adev->mode_info.num_crtc = 6;
@@ -2631,14 +2975,11 @@ static int dm_early_init(void *handle)
 		adev->mode_info.num_hpd = 5;
 		adev->mode_info.num_dig = 5;
 		break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	case CHIP_RENOIR:
 		adev->mode_info.num_crtc = 4;
 		adev->mode_info.num_hpd = 4;
 		adev->mode_info.num_dig = 4;
 		break;
-#endif
 	default:
 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
 		return -EINVAL;
@@ -2931,14 +3272,10 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
 	if (adev->asic_type == CHIP_VEGA10 ||
 	    adev->asic_type == CHIP_VEGA12 ||
 	    adev->asic_type == CHIP_VEGA20 ||
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	    adev->asic_type == CHIP_NAVI10 ||
 	    adev->asic_type == CHIP_NAVI14 ||
 	    adev->asic_type == CHIP_NAVI12 ||
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	    adev->asic_type == CHIP_RENOIR ||
-#endif
 	    adev->asic_type == CHIP_RAVEN) {
 		/* Fill GFX9 params */
 		tiling_info->gfx9.num_pipes =
@@ -3256,12 +3593,26 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
 
 static enum dc_color_depth
 convert_color_depth_from_display_info(const struct drm_connector *connector,
-				      const struct drm_connector_state *state)
+				      const struct drm_connector_state *state,
+				      bool is_y420)
 {
-	uint8_t bpc = (uint8_t)connector->display_info.bpc;
+	uint8_t bpc;
 
-	/* Assume 8 bpc by default if no bpc is specified. */
-	bpc = bpc ? bpc : 8;
+	if (is_y420) {
+		bpc = 8;
+
+		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
+		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
+			bpc = 16;
+		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
+			bpc = 12;
+		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
+			bpc = 10;
+	} else {
+		bpc = (uint8_t)connector->display_info.bpc;
+		/* Assume 8 bpc by default if no bpc is specified. */
+		bpc = bpc ? bpc : 8;
+	}
 
 	if (!state)
 		state = connector->state;
@@ -3427,7 +3778,8 @@ static void fill_stream_properties_from_drm_display_mode(
 
 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
 	timing_out->display_color_depth = convert_color_depth_from_display_info(
-		connector, connector_state);
+		connector, connector_state,
+		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
 	timing_out->scan_type = SCANNING_TYPE_NODATA;
 	timing_out->hdmi_vic = 0;
 
@@ -3645,10 +3997,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
 	int mode_refresh;
 	int preferred_refresh = 0;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	struct dsc_dec_dpcd_caps dsc_caps;
-	uint32_t link_bandwidth_kbps;
 #endif
+	uint32_t link_bandwidth_kbps;
 
 	struct dc_sink *sink = NULL;
 	if (aconnector == NULL) {
@@ -3723,16 +4075,19 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 		fill_stream_properties_from_drm_display_mode(stream,
 			&mode, &aconnector->base, con_state, old_stream);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	stream->timing.flags.DSC = 0;
 
 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
-		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
 				      &dsc_caps);
+#endif
 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
 							     dc_link_get_link_cap(aconnector->dc_link));
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 		if (dsc_caps.is_dsc_supported)
 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
 						  &dsc_caps,
@@ -3741,8 +4096,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 						  &stream->timing,
 						  &stream->timing.dsc_cfg))
 				stream->timing.flags.DSC = 1;
-	}
 #endif
+	}
 
 	update_stream_scaling_settings(&mode, dm_state, stream);
 
@@ -3762,7 +4117,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
 
 			stream->psr_version = dmcu->dmcu_version.psr_version;
-			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+			mod_build_vsc_infopacket(stream,
+					&stream->vsc_infopacket,
+					&stream->use_vsc_sdp_for_colorimetry);
 		}
 	}
 finish:
@@ -3853,6 +4210,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
 	struct amdgpu_device *adev = crtc->dev->dev_private;
 	int rc;
 
+	/* Do not set vupdate for DCN hardware */
+	if (adev->family > AMDGPU_FAMILY_AI)
+		return 0;
+
 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
 
 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -4096,7 +4457,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
 		state->underscan_hborder = 0;
 		state->underscan_vborder = 0;
 		state->base.max_requested_bpc = 8;
-
+		state->vcpi_slots = 0;
+		state->pbn = 0;
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
 			state->abm_level = amdgpu_dm_abm_level;
 
@@ -4124,7 +4486,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 	new_state->underscan_enable = state->underscan_enable;
 	new_state->underscan_hborder = state->underscan_hborder;
 	new_state->underscan_vborder = state->underscan_vborder;
-
+	new_state->vcpi_slots = state->vcpi_slots;
+	new_state->pbn = state->pbn;
 	return &new_state->base;
 }
 
@@ -4521,10 +4884,69 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder)
 
 }
 
+static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
+{
+	switch (display_color_depth) {
+		case COLOR_DEPTH_666:
+			return 6;
+		case COLOR_DEPTH_888:
+			return 8;
+		case COLOR_DEPTH_101010:
+			return 10;
+		case COLOR_DEPTH_121212:
+			return 12;
+		case COLOR_DEPTH_141414:
+			return 14;
+		case COLOR_DEPTH_161616:
+			return 16;
+		default:
+			break;
+		}
+	return 0;
+}
+
 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
 					  struct drm_crtc_state *crtc_state,
 					  struct drm_connector_state *conn_state)
 {
+	struct drm_atomic_state *state = crtc_state->state;
+	struct drm_connector *connector = conn_state->connector;
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
+	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+	struct drm_dp_mst_topology_mgr *mst_mgr;
+	struct drm_dp_mst_port *mst_port;
+	enum dc_color_depth color_depth;
+	int clock, bpp = 0;
+	bool is_y420 = false;
+
+	if (!aconnector->port || !aconnector->dc_sink)
+		return 0;
+
+	mst_port = aconnector->port;
+	mst_mgr = &aconnector->mst_port->mst_mgr;
+
+	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+		return 0;
+
+	if (!state->duplicated) {
+		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+				aconnector->force_yuv420_output;
+		color_depth = convert_color_depth_from_display_info(connector, conn_state,
+								    is_y420);
+		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+		clock = adjusted_mode->clock;
+		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+	}
+	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
+									   mst_mgr,
+									   mst_port,
+									   dm_new_connector_state->pbn,
+									   0);
+	if (dm_new_connector_state->vcpi_slots < 0) {
+		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+		return dm_new_connector_state->vcpi_slots;
+	}
 	return 0;
 }
 
@@ -4533,6 +4955,71 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
 	.atomic_check = dm_encoder_helper_atomic_check
 };
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+					    struct dc_state *dc_state)
+{
+	struct dc_stream_state *stream = NULL;
+	struct drm_connector *connector;
+	struct drm_connector_state *new_con_state, *old_con_state;
+	struct amdgpu_dm_connector *aconnector;
+	struct dm_connector_state *dm_conn_state;
+	int i, j, clock, bpp;
+	int vcpi, pbn_div, pbn = 0;
+
+	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+
+		aconnector = to_amdgpu_dm_connector(connector);
+
+		if (!aconnector->port)
+			continue;
+
+		if (!new_con_state || !new_con_state->crtc)
+			continue;
+
+		dm_conn_state = to_dm_connector_state(new_con_state);
+
+		for (j = 0; j < dc_state->stream_count; j++) {
+			stream = dc_state->streams[j];
+			if (!stream)
+				continue;
+
+			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
+				break;
+
+			stream = NULL;
+		}
+
+		if (!stream)
+			continue;
+
+		if (stream->timing.flags.DSC != 1) {
+			drm_dp_mst_atomic_enable_dsc(state,
+						     aconnector->port,
+						     dm_conn_state->pbn,
+						     0,
+						     false);
+			continue;
+		}
+
+		pbn_div = dm_mst_get_pbn_divider(stream->link);
+		bpp = stream->timing.dsc_cfg.bits_per_pixel;
+		clock = stream->timing.pix_clk_100hz / 10;
+		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+		vcpi = drm_dp_mst_atomic_enable_dsc(state,
+						    aconnector->port,
+						    pbn, pbn_div,
+						    true);
+		if (vcpi < 0)
+			return vcpi;
+
+		dm_conn_state->pbn = pbn;
+		dm_conn_state->vcpi_slots = vcpi;
+	}
+	return 0;
+}
+#endif
+
 static void dm_drm_plane_reset(struct drm_plane *plane)
 {
 	struct dm_plane_state *amdgpu_state = NULL;
@@ -5195,9 +5682,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 
 	drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
 
-	/* This defaults to the max in the range, but we want 8bpc. */
-	aconnector->base.state->max_bpc = 8;
-	aconnector->base.state->max_requested_bpc = 8;
+	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
+	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
+	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
 
 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
 	    dc_is_dmcu_initialized(adev->dm.dc)) {
@@ -5216,7 +5703,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 			&aconnector->base);
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 		if (adev->asic_type >= CHIP_RAVEN)
-			drm_connector_attach_content_protection_property(&aconnector->base, false);
+			drm_connector_attach_content_protection_property(&aconnector->base, true);
 #endif
 	}
 }
@@ -5325,11 +5812,12 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 
 	connector_type = to_drm_connector_type(link->connector_signal);
 
-	res = drm_connector_init(
+	res = drm_connector_init_with_ddc(
 			dm->ddev,
 			&aconnector->base,
 			&amdgpu_dm_connector_funcs,
-			connector_type);
+			connector_type,
+			&i2c->base);
 
 	if (res) {
 		DRM_ERROR("connector_init failed\n");
@@ -5467,6 +5955,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
 {
 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
+	if (old_state->hdcp_content_type != state->hdcp_content_type &&
+	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		return true;
+	}
+
 	/* CP is being re enabled, ignore this */
 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
@@ -5495,17 +5989,6 @@ static bool is_content_protection_different(struct drm_connector_state *state,
 	return false;
 }
 
-static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
-				      struct hdcp_workqueue *hdcp_w)
-{
-	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
-	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
-		hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
-	else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
-		hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
-
-}
 #endif
 static void remove_stream(struct amdgpu_device *adev,
 			  struct amdgpu_crtc *acrtc,
@@ -6475,7 +6958,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 
 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
-			update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
+			hdcp_update_display(
+				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+				new_con_state->hdcp_content_type,
+				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
+													 : false);
 	}
 #endif
 
@@ -7265,31 +7752,34 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 	int i, j, num_plane, ret = 0;
 	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
-	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+	struct drm_crtc *new_plane_crtc;
 	struct drm_plane *plane;
 
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
 	struct dc_stream_status *status = NULL;
-
-	struct dc_surface_update *updates;
 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
+	struct surface_info_bundle {
+		struct dc_surface_update surface_updates[MAX_SURFACES];
+		struct dc_plane_info plane_infos[MAX_SURFACES];
+		struct dc_scaling_info scaling_infos[MAX_SURFACES];
+		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+		struct dc_stream_update stream_update;
+	} *bundle;
 
-	updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
+	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 
-	if (!updates) {
-		DRM_ERROR("Failed to allocate plane updates\n");
+	if (!bundle) {
+		DRM_ERROR("Failed to allocate update bundle\n");
 		/* Set type to FULL to avoid crashing in DC*/
 		update_type = UPDATE_TYPE_FULL;
 		goto cleanup;
 	}
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		struct dc_scaling_info scaling_info;
-		struct dc_stream_update stream_update;
 
-		memset(&stream_update, 0, sizeof(stream_update));
+		memset(bundle, 0, sizeof(struct surface_info_bundle));
 
 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -7306,12 +7796,12 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
 			const struct amdgpu_framebuffer *amdgpu_fb =
 				to_amdgpu_framebuffer(new_plane_state->fb);
-			struct dc_plane_info plane_info;
-			struct dc_flip_addrs flip_addr;
+			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
+			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
+			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
 			uint64_t tiling_flags;
 
 			new_plane_crtc = new_plane_state->crtc;
-			old_plane_crtc = old_plane_state->crtc;
 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
 
@@ -7326,49 +7816,48 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 			if (crtc != new_plane_crtc)
 				continue;
 
-			updates[num_plane].surface = new_dm_plane_state->dc_state;
+			bundle->surface_updates[num_plane].surface =
+					new_dm_plane_state->dc_state;
 
 			if (new_crtc_state->mode_changed) {
-				stream_update.dst = new_dm_crtc_state->stream->dst;
-				stream_update.src = new_dm_crtc_state->stream->src;
+				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
+				bundle->stream_update.src = new_dm_crtc_state->stream->src;
 			}
 
 			if (new_crtc_state->color_mgmt_changed) {
-				updates[num_plane].gamma =
+				bundle->surface_updates[num_plane].gamma =
 						new_dm_plane_state->dc_state->gamma_correction;
-				updates[num_plane].in_transfer_func =
+				bundle->surface_updates[num_plane].in_transfer_func =
 						new_dm_plane_state->dc_state->in_transfer_func;
-				stream_update.gamut_remap =
+				bundle->stream_update.gamut_remap =
 						&new_dm_crtc_state->stream->gamut_remap_matrix;
-				stream_update.output_csc_transform =
+				bundle->stream_update.output_csc_transform =
 						&new_dm_crtc_state->stream->csc_color_matrix;
-				stream_update.out_transfer_func =
+				bundle->stream_update.out_transfer_func =
 						new_dm_crtc_state->stream->out_transfer_func;
 			}
 
 			ret = fill_dc_scaling_info(new_plane_state,
-						   &scaling_info);
+						   scaling_info);
 			if (ret)
 				goto cleanup;
 
-			updates[num_plane].scaling_info = &scaling_info;
+			bundle->surface_updates[num_plane].scaling_info = scaling_info;
 
 			if (amdgpu_fb) {
 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
 				if (ret)
 					goto cleanup;
 
-				memset(&flip_addr, 0, sizeof(flip_addr));
-
 				ret = fill_dc_plane_info_and_addr(
 					dm->adev, new_plane_state, tiling_flags,
-					&plane_info,
-					&flip_addr.address);
+					plane_info,
+					&flip_addr->address);
 				if (ret)
 					goto cleanup;
 
-				updates[num_plane].plane_info = &plane_info;
-				updates[num_plane].flip_addr = &flip_addr;
+				bundle->surface_updates[num_plane].plane_info = plane_info;
+				bundle->surface_updates[num_plane].flip_addr = flip_addr;
 			}
 
 			num_plane++;
@@ -7389,14 +7878,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 
 		status = dc_stream_get_status_from_state(old_dm_state->context,
 							 new_dm_crtc_state->stream);
-		stream_update.stream = new_dm_crtc_state->stream;
+		bundle->stream_update.stream = new_dm_crtc_state->stream;
 		/*
 		 * TODO: DC modifies the surface during this call so we need
 		 * to lock here - find a way to do this without locking.
 		 */
 		mutex_lock(&dm->dc_lock);
-		update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
-								  &stream_update, status);
+		update_type = dc_check_update_surfaces_for_stream(
+				dc,	bundle->surface_updates, num_plane,
+				&bundle->stream_update, status);
 		mutex_unlock(&dm->dc_lock);
 
 		if (update_type > UPDATE_TYPE_MED) {
@@ -7406,12 +7896,35 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 	}
 
 cleanup:
-	kfree(updates);
+	kfree(bundle);
 
 	*out_type = update_type;
 	return ret;
 }
 
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+	struct drm_connector *connector;
+	struct drm_connector_state *conn_state;
+	struct amdgpu_dm_connector *aconnector = NULL;
+	int i;
+	for_each_new_connector_in_state(state, connector, conn_state, i) {
+		if (conn_state->crtc != crtc)
+			continue;
+
+		aconnector = to_amdgpu_dm_connector(connector);
+		if (!aconnector->port || !aconnector->mst_port)
+			aconnector = NULL;
+		else
+			break;
+	}
+
+	if (!aconnector)
+		return 0;
+
+	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
+}
+
 /**
  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
  * @dev: The DRM device
@@ -7464,6 +7977,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 	if (ret)
 		goto fail;
 
+	if (adev->asic_type >= CHIP_NAVI10) {
+		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+				ret = add_affected_mst_dsc_crtcs(state, crtc);
+				if (ret)
+					goto fail;
+			}
+		}
+	}
+
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
 		    !new_crtc_state->color_mgmt_changed &&
@@ -7635,6 +8158,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		if (ret)
 			goto fail;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+			goto fail;
+
+		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+		if (ret)
+			goto fail;
+#endif
+
+		/*
+		 * Perform validation of MST topology in the state:
+		 * We need to perform MST atomic check before calling
+		 * dc_validate_global_state(), or there is a chance
+		 * to get stuck in an infinite loop and hang eventually.
+		 */
+		ret = drm_dp_mst_atomic_check(state);
+		if (ret)
+			goto fail;
+
 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
 			ret = -EINVAL;
 			goto fail;
@@ -7861,17 +8403,37 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
 {
 	struct dc_link *link = stream->link;
-	struct dc_static_screen_events triggers = {0};
+	unsigned int vsync_rate_hz = 0;
+	struct dc_static_screen_params params = {0};
+	/* Calculate number of static frames before generating interrupt to
+	 * enter PSR.
+	 */
+	unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+	// Init fail safe of 2 frames static
+	unsigned int num_frames_static = 2;
 
 	DRM_DEBUG_DRIVER("Enabling psr...\n");
 
-	triggers.cursor_update = true;
-	triggers.overlay_update = true;
-	triggers.surface_update = true;
+	vsync_rate_hz = div64_u64(div64_u64((
+			stream->timing.pix_clk_100hz * 100),
+			stream->timing.v_total),
+			stream->timing.h_total);
 
-	dc_stream_set_static_screen_events(link->ctx->dc,
+	/* Round up
+	 * Calculate number of frames such that at least 30 ms of time has
+	 * passed.
+	 */
+	if (vsync_rate_hz != 0)
+		num_frames_static = (30000 / frame_time_microsec) + 1;
+
+	params.triggers.cursor_update = true;
+	params.triggers.overlay_update = true;
+	params.triggers.surface_update = true;
+	params.num_frames = num_frames_static;
+
+	dc_stream_set_static_screen_params(link->ctx->dc,
 					   &stream, 1,
-					   &triggers);
+					   &params);
 
 	return dc_link_set_psr_allow_active(link, true, false);
 }
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 77c5166..7ea9acb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -57,6 +57,8 @@ struct amdgpu_device;
 struct drm_device;
 struct amdgpu_dm_irq_handler_data;
 struct dc;
+struct amdgpu_bo;
+struct dmub_srv;
 
 struct common_irq_params {
 	struct amdgpu_device *adev;
@@ -122,6 +124,57 @@ struct amdgpu_display_manager {
 	struct dc *dc;
 
 	/**
+	 * @dmub_srv:
+	 *
+	 * DMUB service, used for controlling the DMUB on hardware
+	 * that supports it. The pointer to the dmub_srv will be
+	 * NULL on hardware that does not support it.
+	 */
+	struct dmub_srv *dmub_srv;
+
+	/**
+	 * @dmub_fb_info:
+	 *
+	 * Framebuffer regions for the DMUB.
+	 */
+	struct dmub_srv_fb_info *dmub_fb_info;
+
+	/**
+	 * @dmub_fw:
+	 *
+	 * DMUB firmware, required on hardware that has DMUB support.
+	 */
+	const struct firmware *dmub_fw;
+
+	/**
+	 * @dmub_bo:
+	 *
+	 * Buffer object for the DMUB.
+	 */
+	struct amdgpu_bo *dmub_bo;
+
+	/**
+	 * @dmub_bo_gpu_addr:
+	 *
+	 * GPU virtual address for the DMUB buffer object.
+	 */
+	u64 dmub_bo_gpu_addr;
+
+	/**
+	 * @dmub_bo_cpu_addr:
+	 *
+	 * CPU address for the DMUB buffer object.
+	 */
+	void *dmub_bo_cpu_addr;
+
+	/**
+	 * @dmcub_fw_version:
+	 *
+	 * DMCUB firmware version.
+	 */
+	uint32_t dmcub_fw_version;
+
+	/**
 	 * @cgs_device:
 	 *
 	 * The Common Graphics Services device. It provides an interface for
@@ -241,7 +294,6 @@ struct amdgpu_display_manager {
 
 	const struct firmware *fw_dmcu;
 	uint32_t dmcu_fw_version;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	/**
 	 * @soc_bounding_box:
 	 *
@@ -249,7 +301,6 @@ struct amdgpu_display_manager {
 	 * available in FW
 	 */
 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
 };
 
 struct amdgpu_dm_connector {
@@ -279,6 +330,7 @@ struct amdgpu_dm_connector {
 	struct drm_dp_mst_port *port;
 	struct amdgpu_dm_connector *mst_port;
 	struct amdgpu_encoder *mst_encoder;
+	struct drm_dp_aux *dsc_aux;
 
 	/* TODO see if we can merge with ddc_bus or make a dm_connector */
 	struct amdgpu_i2c_adapter *i2c;
@@ -359,6 +411,8 @@ struct dm_connector_state {
 	bool underscan_enable;
 	bool freesync_capable;
 	uint8_t abm_level;
+	int vcpi_slots;
+	uint64_t pbn;
 };
 
 #define to_dm_connector_state(x)\
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index bdb37e6..f81d343 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -657,6 +657,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
 	dc_link_set_test_pattern(
 		link,
 		test_pattern,
+		DP_TEST_PATTERN_COLOR_SPACE_RGB,
 		&link_training_settings,
 		custom_pattern,
 		10);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 77181dd..0acd340 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -85,44 +85,70 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
 		schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
 				      msecs_to_jiffies(output.watchdog_timer_delay));
 
+	schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
 }
 
-void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector)
+void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+			 unsigned int link_index,
+			 struct amdgpu_dm_connector *aconnector,
+			 uint8_t content_type,
+			 bool enable_encryption)
 {
 	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
 	struct mod_hdcp_display *display = &hdcp_work[link_index].display;
 	struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+	struct mod_hdcp_display_query query;
 
 	mutex_lock(&hdcp_w->mutex);
 	hdcp_w->aconnector = aconnector;
 
+	query.display = NULL;
+	mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
+
+	if (query.display != NULL) {
+		memcpy(display, query.display, sizeof(struct mod_hdcp_display));
+		mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
+
+		hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+
+		if (enable_encryption) {
+			display->adjust.disable = 0;
+			if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
+				hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+			else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1)
+				hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
+
+			schedule_delayed_work(&hdcp_w->property_validate_dwork,
+					      msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+		} else {
+			display->adjust.disable = 1;
+			hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+			cancel_delayed_work(&hdcp_w->property_validate_dwork);
+		}
+
+		display->state = MOD_HDCP_DISPLAY_ACTIVE;
+	}
+
 	mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
 
-	schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
-
 	process_output(hdcp_w);
-
 	mutex_unlock(&hdcp_w->mutex);
-
 }
 
-void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index,  unsigned int display_index)
+static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
+			 unsigned int link_index,
+			 struct amdgpu_dm_connector *aconnector)
 {
 	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
 
 	mutex_lock(&hdcp_w->mutex);
+	hdcp_w->aconnector = aconnector;
 
-	mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output);
-
-	cancel_delayed_work(&hdcp_w->property_validate_dwork);
-	hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+	mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
 
 	process_output(hdcp_w);
-
 	mutex_unlock(&hdcp_w->mutex);
-
 }
-
 void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
 {
 	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -190,10 +216,16 @@ static void event_property_update(struct work_struct *work)
 		}
 	}
 
-	if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON)
-		drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
-	else
+	if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
+		if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 &&
+		    hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
+			drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+		else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 &&
+			 hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
+			drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+	} else {
 		drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+	}
 
 
 	mutex_unlock(&hdcp_work->mutex);
@@ -207,6 +239,9 @@ static void event_property_validate(struct work_struct *work)
 	struct mod_hdcp_display_query query;
 	struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
 
+	if (!aconnector)
+		return;
+
 	mutex_lock(&hdcp_work->mutex);
 
 	query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
@@ -217,8 +252,6 @@ static void event_property_validate(struct work_struct *work)
 		schedule_work(&hdcp_work->property_update_work);
 	}
 
-	schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
-
 	mutex_unlock(&hdcp_work->mutex);
 }
 
@@ -284,6 +317,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
 	memset(link, 0, sizeof(*link));
 
 	display->index = aconnector->base.index;
+
+	if (config->dpms_off) {
+		hdcp_remove_display(hdcp_work, link_index, aconnector);
+		return;
+	}
 	display->state = MOD_HDCP_DISPLAY_ACTIVE;
 
 	if (aconnector->dc_sink != NULL)
@@ -294,8 +332,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
 	link->dig_be = config->link_enc_inst;
 	link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
 	link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
-	link->adjust.hdcp2.disable = 1;
+	display->adjust.disable = 1;
+	link->adjust.auth_delay = 2;
 
+	hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
 }
 
 struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index d3ba505..6abde86 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -54,9 +54,12 @@ struct hdcp_workqueue {
 	uint8_t max_link;
 };
 
-void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index,
-		      struct amdgpu_dm_connector *aconnector);
-void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index);
+void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+			 unsigned int link_index,
+			 struct amdgpu_dm_connector *aconnector,
+			 uint8_t content_type,
+			 bool enable_encryption);
+
 void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
 void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
 void hdcp_destroy(struct hdcp_workqueue *work);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 11e5784..318b474 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -37,6 +37,7 @@
 #include "dc.h"
 #include "amdgpu_dm.h"
 #include "amdgpu_dm_irq.h"
+#include "amdgpu_dm_mst_types.h"
 
 #include "dm_helpers.h"
 
@@ -97,8 +98,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
 			(struct edid *) edid->raw_edid);
 
 	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
-	if (sad_count < 0)
-		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
 	if (sad_count <= 0)
 		return result;
 
@@ -182,19 +181,22 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
 		bool enable)
 {
 	struct amdgpu_dm_connector *aconnector;
+	struct dm_connector_state *dm_conn_state;
 	struct drm_dp_mst_topology_mgr *mst_mgr;
 	struct drm_dp_mst_port *mst_port;
-	int slots = 0;
 	bool ret;
-	int clock;
-	int bpp = 0;
-	int pbn = 0;
 
 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+	/* Accessing the connector state is required for vcpi_slots allocation
+	 * and directly relies on behaviour in commit check
+	 * that blocks before commit guaranteeing that the state
+	 * is not gonna be swapped while still in use in commit tail */
 
 	if (!aconnector || !aconnector->mst_port)
 		return false;
 
+	dm_conn_state = to_dm_connector_state(aconnector->base.state);
+
 	mst_mgr = &aconnector->mst_port->mst_mgr;
 
 	if (!mst_mgr->mst_state)
@@ -203,42 +205,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
 	mst_port = aconnector->port;
 
 	if (enable) {
-		clock = stream->timing.pix_clk_100hz / 10;
 
-		switch (stream->timing.display_color_depth) {
-
-		case COLOR_DEPTH_666:
-			bpp = 6;
-			break;
-		case COLOR_DEPTH_888:
-			bpp = 8;
-			break;
-		case COLOR_DEPTH_101010:
-			bpp = 10;
-			break;
-		case COLOR_DEPTH_121212:
-			bpp = 12;
-			break;
-		case COLOR_DEPTH_141414:
-			bpp = 14;
-			break;
-		case COLOR_DEPTH_161616:
-			bpp = 16;
-			break;
-		default:
-			ASSERT(bpp != 0);
-			break;
-		}
-
-		bpp = bpp * 3;
-
-		/* TODO need to know link rate */
-
-		pbn = drm_dp_calc_pbn_mode(clock, bpp);
-
-		slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
-		ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
-
+		ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
+					       dm_conn_state->pbn,
+					       dm_conn_state->vcpi_slots);
 		if (!ret)
 			return false;
 
@@ -246,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
 		drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
 	}
 
-	ret = drm_dp_update_payload_part1(mst_mgr);
+	/* It's OK for this to fail */
+	drm_dp_update_payload_part1(mst_mgr);
 
 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
@@ -255,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
 
 	get_payload_table(aconnector, proposed_table);
 
-	if (ret)
-		return false;
-
 	return true;
 }
 
@@ -315,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
 	struct amdgpu_dm_connector *aconnector;
 	struct drm_dp_mst_topology_mgr *mst_mgr;
 	struct drm_dp_mst_port *mst_port;
-	int ret;
 
 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
 
@@ -329,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
 	if (!mst_mgr->mst_state)
 		return false;
 
-	ret = drm_dp_update_payload_part2(mst_mgr);
-
-	if (ret)
-		return false;
+	/* It's OK for this to fail */
+	drm_dp_update_payload_part2(mst_mgr);
 
 	if (!enable)
 		drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
@@ -540,7 +505,6 @@ bool dm_helpers_submit_i2c(
 
 	return result;
 }
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 bool dm_helpers_dp_write_dsc_enable(
 		struct dc_context *ctx,
 		const struct dc_stream_state *stream,
@@ -548,10 +512,25 @@ bool dm_helpers_dp_write_dsc_enable(
 )
 {
 	uint8_t enable_dsc = enable ? 1 : 0;
+	struct amdgpu_dm_connector *aconnector;
 
-	return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1);
+	if (!stream)
+		return false;
+
+	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+		if (!aconnector->dsc_aux)
+			return false;
+
+		return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
+	}
+
+	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
+		return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
+
+	return false;
 }
-#endif
 
 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
 {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 64445c4..cbcf504 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -111,17 +111,12 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
  */
 static void dm_irq_work_func(struct work_struct *work)
 {
-	struct list_head *entry;
 	struct irq_list_head *irq_list_head =
 		container_of(work, struct irq_list_head, work);
 	struct list_head *handler_list = &irq_list_head->head;
 	struct amdgpu_dm_irq_handler_data *handler_data;
 
-	list_for_each(entry, handler_list) {
-		handler_data = list_entry(entry,
-					  struct amdgpu_dm_irq_handler_data,
-					  list);
-
+	list_for_each_entry(handler_data, handler_list, list) {
 		DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
 				handler_data->irq_source);
 
@@ -528,19 +523,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
 					 enum dc_irq_source irq_source)
 {
 	struct amdgpu_dm_irq_handler_data *handler_data;
-	struct list_head *entry;
 	unsigned long irq_table_flags;
 
 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 
-	list_for_each(
-		entry,
-		&adev->dm.irq_handler_list_high_tab[irq_source]) {
-
-		handler_data = list_entry(entry,
-					  struct amdgpu_dm_irq_handler_data,
-					  list);
-
+	list_for_each_entry(handler_data,
+			    &adev->dm.irq_handler_list_high_tab[irq_source],
+			    list) {
 		/* Call a subcomponent which registered for immediate
 		 * interrupt notification */
 		handler_data->handler(handler_data->handler_arg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 2bf8534..5672f77 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -25,6 +25,7 @@
 
 #include <linux/version.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
 #include "dm_services.h"
 #include "amdgpu.h"
 #include "amdgpu_dm.h"
@@ -39,6 +40,12 @@
 #if defined(CONFIG_DEBUG_FS)
 #include "amdgpu_dm_debugfs.h"
 #endif
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#include "dc/dcn20/dcn20_resource.h"
+#endif
+
 /* #define TRACE_DPCD */
 
 #ifdef TRACE_DPCD
@@ -180,6 +187,30 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
 	.early_unregister = amdgpu_dm_mst_connector_early_unregister,
 };
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
+{
+	struct dc_sink *dc_sink = aconnector->dc_sink;
+	struct drm_dp_mst_port *port = aconnector->port;
+	u8 dsc_caps[16] = { 0 };
+
+	aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
+
+	if (!aconnector->dsc_aux)
+		return false;
+
+	if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
+		return false;
+
+	if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+				   dsc_caps, NULL,
+				   &dc_sink->sink_dsc_caps.dsc_dec_caps))
+		return false;
+
+	return true;
+}
+#endif
+
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
 {
 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -222,10 +253,16 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 		/* dc_link_add_remote_sink returns a new reference */
 		aconnector->dc_sink = dc_sink;
 
-		if (aconnector->dc_sink)
+		if (aconnector->dc_sink) {
 			amdgpu_dm_update_freesync_caps(
 					connector, aconnector->edid);
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+			if (!validate_dsc_caps_on_connector(aconnector))
+				memset(&aconnector->dc_sink->sink_dsc_caps,
+				       0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+#endif
+		}
 	}
 
 	drm_connector_update_edid_property(
@@ -254,11 +291,43 @@ dm_dp_mst_detect(struct drm_connector *connector,
 				      aconnector->port);
 }
 
+static int dm_dp_mst_atomic_check(struct drm_connector *connector,
+				struct drm_atomic_state *state)
+{
+	struct drm_connector_state *new_conn_state =
+			drm_atomic_get_new_connector_state(state, connector);
+	struct drm_connector_state *old_conn_state =
+			drm_atomic_get_old_connector_state(state, connector);
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+	struct drm_crtc_state *new_crtc_state;
+	struct drm_dp_mst_topology_mgr *mst_mgr;
+	struct drm_dp_mst_port *mst_port;
+
+	mst_port = aconnector->port;
+	mst_mgr = &aconnector->mst_port->mst_mgr;
+
+	if (!old_conn_state->crtc)
+		return 0;
+
+	if (new_conn_state->crtc) {
+		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+		if (!new_crtc_state ||
+		    !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+		    new_crtc_state->enable)
+			return 0;
+		}
+
+	return drm_dp_atomic_release_vcpi_slots(state,
+						mst_mgr,
+						mst_port);
+}
+
 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
 	.get_modes = dm_dp_mst_get_modes,
 	.mode_valid = amdgpu_dm_connector_mode_valid,
 	.atomic_best_encoder = dm_mst_atomic_best_encoder,
 	.detect_ctx = dm_dp_mst_detect,
+	.atomic_check = dm_dp_mst_atomic_check,
 };
 
 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -434,3 +503,384 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
 		aconnector->connector_id);
 }
 
+int dm_mst_get_pbn_divider(struct dc_link *link)
+{
+	if (!link)
+		return 0;
+
+	return dc_link_bandwidth_kbps(link,
+			dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_params {
+	struct dc_crtc_timing *timing;
+	struct dc_sink *sink;
+	struct dc_dsc_bw_range bw_range;
+	bool compression_possible;
+	struct drm_dp_mst_port *port;
+};
+
+struct dsc_mst_fairness_vars {
+	int pbn;
+	bool dsc_enabled;
+	int bpp_x16;
+};
+
+static int kbps_to_peak_pbn(int kbps)
+{
+	u64 peak_kbps = kbps;
+
+	peak_kbps *= 1006;
+	peak_kbps = div_u64(peak_kbps, 1000);
+	return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+}
+
+static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
+		struct dsc_mst_fairness_vars *vars,
+		int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
+		if (vars[i].dsc_enabled && dc_dsc_compute_config(
+					params[i].sink->ctx->dc->res_pool->dscs[0],
+					&params[i].sink->sink_dsc_caps.dsc_dec_caps,
+					params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
+					0,
+					params[i].timing,
+					&params[i].timing->dsc_cfg)) {
+			params[i].timing->flags.DSC = 1;
+			params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
+		} else {
+			params[i].timing->flags.DSC = 0;
+		}
+	}
+}
+
+static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
+{
+	struct dc_dsc_config dsc_config;
+	u64 kbps;
+
+	kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+	dc_dsc_compute_config(
+			param.sink->ctx->dc->res_pool->dscs[0],
+			&param.sink->sink_dsc_caps.dsc_dec_caps,
+			param.sink->ctx->dc->debug.dsc_min_slice_height_override,
+			(int) kbps, param.timing, &dsc_config);
+
+	return dsc_config.bits_per_pixel;
+}
+
+static void increase_dsc_bpp(struct drm_atomic_state *state,
+			     struct dc_link *dc_link,
+			     struct dsc_mst_fairness_params *params,
+			     struct dsc_mst_fairness_vars *vars,
+			     int count)
+{
+	int i;
+	bool bpp_increased[MAX_PIPES];
+	int initial_slack[MAX_PIPES];
+	int min_initial_slack;
+	int next_index;
+	int remaining_to_increase = 0;
+	int pbn_per_timeslot;
+	int link_timeslots_used;
+	int fair_pbn_alloc;
+
+	for (i = 0; i < count; i++) {
+		if (vars[i].dsc_enabled) {
+			initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
+			bpp_increased[i] = false;
+			remaining_to_increase += 1;
+		} else {
+			initial_slack[i] = 0;
+			bpp_increased[i] = true;
+		}
+	}
+
+	pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
+			dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
+
+	while (remaining_to_increase) {
+		next_index = -1;
+		min_initial_slack = -1;
+		for (i = 0; i < count; i++) {
+			if (!bpp_increased[i]) {
+				if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
+					min_initial_slack = initial_slack[i];
+					next_index = i;
+				}
+			}
+		}
+
+		if (next_index == -1)
+			break;
+
+		link_timeslots_used = 0;
+
+		for (i = 0; i < count; i++)
+			link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
+
+		fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
+
+		if (initial_slack[next_index] > fair_pbn_alloc) {
+			vars[next_index].pbn += fair_pbn_alloc;
+			if (drm_dp_atomic_find_vcpi_slots(state,
+							  params[next_index].port->mgr,
+							  params[next_index].port,
+							  vars[next_index].pbn,
+							  dm_mst_get_pbn_divider(dc_link)) < 0)
+				return;
+			if (!drm_dp_mst_atomic_check(state)) {
+				vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
+			} else {
+				vars[next_index].pbn -= fair_pbn_alloc;
+				if (drm_dp_atomic_find_vcpi_slots(state,
+								  params[next_index].port->mgr,
+								  params[next_index].port,
+								  vars[next_index].pbn,
+								  dm_mst_get_pbn_divider(dc_link)) < 0)
+					return;
+			}
+		} else {
+			vars[next_index].pbn += initial_slack[next_index];
+			if (drm_dp_atomic_find_vcpi_slots(state,
+							  params[next_index].port->mgr,
+							  params[next_index].port,
+							  vars[next_index].pbn,
+							  dm_mst_get_pbn_divider(dc_link)) < 0)
+				return;
+			if (!drm_dp_mst_atomic_check(state)) {
+				vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
+			} else {
+				vars[next_index].pbn -= initial_slack[next_index];
+				if (drm_dp_atomic_find_vcpi_slots(state,
+								  params[next_index].port->mgr,
+								  params[next_index].port,
+								  vars[next_index].pbn,
+								  dm_mst_get_pbn_divider(dc_link)) < 0)
+					return;
+			}
+		}
+
+		bpp_increased[next_index] = true;
+		remaining_to_increase--;
+	}
+}
+
+static void try_disable_dsc(struct drm_atomic_state *state,
+			    struct dc_link *dc_link,
+			    struct dsc_mst_fairness_params *params,
+			    struct dsc_mst_fairness_vars *vars,
+			    int count)
+{
+	int i;
+	bool tried[MAX_PIPES];
+	int kbps_increase[MAX_PIPES];
+	int max_kbps_increase;
+	int next_index;
+	int remaining_to_try = 0;
+
+	for (i = 0; i < count; i++) {
+		if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
+			kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
+			tried[i] = false;
+			remaining_to_try += 1;
+		} else {
+			kbps_increase[i] = 0;
+			tried[i] = true;
+		}
+	}
+
+	while (remaining_to_try) {
+		next_index = -1;
+		max_kbps_increase = -1;
+		for (i = 0; i < count; i++) {
+			if (!tried[i]) {
+				if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
+					max_kbps_increase = kbps_increase[i];
+					next_index = i;
+				}
+			}
+		}
+
+		if (next_index == -1)
+			break;
+
+		vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+		if (drm_dp_atomic_find_vcpi_slots(state,
+						  params[next_index].port->mgr,
+						  params[next_index].port,
+						  vars[next_index].pbn,
+						  0) < 0)
+			return;
+
+		if (!drm_dp_mst_atomic_check(state)) {
+			vars[next_index].dsc_enabled = false;
+			vars[next_index].bpp_x16 = 0;
+		} else {
+			vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
+			if (drm_dp_atomic_find_vcpi_slots(state,
+							  params[next_index].port->mgr,
+							  params[next_index].port,
+							  vars[next_index].pbn,
+							  dm_mst_get_pbn_divider(dc_link)) < 0)
+				return;
+		}
+
+		tried[next_index] = true;
+		remaining_to_try--;
+	}
+}
+
+static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+					     struct dc_state *dc_state,
+					     struct dc_link *dc_link)
+{
+	int i;
+	struct dc_stream_state *stream;
+	struct dsc_mst_fairness_params params[MAX_PIPES];
+	struct dsc_mst_fairness_vars vars[MAX_PIPES];
+	struct amdgpu_dm_connector *aconnector;
+	int count = 0;
+
+	memset(params, 0, sizeof(params));
+
+	/* Set up params */
+	for (i = 0; i < dc_state->stream_count; i++) {
+		struct dc_dsc_policy dsc_policy = {0};
+
+		stream = dc_state->streams[i];
+
+		if (stream->link != dc_link)
+			continue;
+
+		stream->timing.flags.DSC = 0;
+
+		params[count].timing = &stream->timing;
+		params[count].sink = stream->sink;
+		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+		params[count].port = aconnector->port;
+		params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+		dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
+		if (!dc_dsc_compute_bandwidth_range(
+				stream->sink->ctx->dc->res_pool->dscs[0],
+				stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+				dsc_policy.min_target_bpp,
+				dsc_policy.max_target_bpp,
+				&stream->sink->sink_dsc_caps.dsc_dec_caps,
+				&stream->timing, &params[count].bw_range))
+			params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+		count++;
+	}
+	/* Try no compression */
+	for (i = 0; i < count; i++) {
+		vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+		vars[i].dsc_enabled = false;
+		vars[i].bpp_x16 = 0;
+		if (drm_dp_atomic_find_vcpi_slots(state,
+						 params[i].port->mgr,
+						 params[i].port,
+						 vars[i].pbn,
+						 0) < 0)
+			return false;
+	}
+	if (!drm_dp_mst_atomic_check(state)) {
+		set_dsc_configs_from_fairness_vars(params, vars, count);
+		return true;
+	}
+
+	/* Try max compression */
+	for (i = 0; i < count; i++) {
+		if (params[i].compression_possible) {
+			vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+			vars[i].dsc_enabled = true;
+			vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+			if (drm_dp_atomic_find_vcpi_slots(state,
+							  params[i].port->mgr,
+							  params[i].port,
+							  vars[i].pbn,
+							  dm_mst_get_pbn_divider(dc_link)) < 0)
+				return false;
+		} else {
+			vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+			vars[i].dsc_enabled = false;
+			vars[i].bpp_x16 = 0;
+			if (drm_dp_atomic_find_vcpi_slots(state,
+							  params[i].port->mgr,
+							  params[i].port,
+							  vars[i].pbn,
+							  0) < 0)
+				return false;
+		}
+	}
+	if (drm_dp_mst_atomic_check(state))
+		return false;
+
+	/* Optimize degree of compression */
+	increase_dsc_bpp(state, dc_link, params, vars, count);
+
+	try_disable_dsc(state, dc_link, params, vars, count);
+
+	set_dsc_configs_from_fairness_vars(params, vars, count);
+
+	return true;
+}
+
+bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+				       struct dc_state *dc_state)
+{
+	int i, j;
+	struct dc_stream_state *stream;
+	bool computed_streams[MAX_PIPES];
+	struct amdgpu_dm_connector *aconnector;
+
+	for (i = 0; i < dc_state->stream_count; i++)
+		computed_streams[i] = false;
+
+	for (i = 0; i < dc_state->stream_count; i++) {
+		stream = dc_state->streams[i];
+
+		if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
+			continue;
+
+		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+		if (!aconnector || !aconnector->dc_sink)
+			continue;
+
+		if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+			continue;
+
+		if (computed_streams[i])
+			continue;
+
+		mutex_lock(&aconnector->mst_mgr.lock);
+		if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+			mutex_unlock(&aconnector->mst_mgr.lock);
+			return false;
+		}
+		mutex_unlock(&aconnector->mst_mgr.lock);
+
+		for (j = 0; j < dc_state->stream_count; j++) {
+			if (dc_state->streams[j]->link == stream->link)
+				computed_streams[j] = true;
+		}
+	}
+
+	for (i = 0; i < dc_state->stream_count; i++) {
+		stream = dc_state->streams[i];
+
+		if (stream->timing.flags.DSC == 1)
+			dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
+	}
+
+	return true;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b..d6813ce 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -29,7 +29,14 @@
 struct amdgpu_display_manager;
 struct amdgpu_dm_connector;
 
+int dm_mst_get_pbn_divider(struct dc_link *link);
+
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
 				       struct amdgpu_dm_connector *aconnector);
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+				       struct dc_state *dc_state);
+#endif
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 778f186..a2e1a73 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -892,7 +892,6 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
 	return PP_SMU_RESULT_FAIL;
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 enum pp_smu_status pp_rn_get_dpm_clock_table(
 		struct pp_smu *pp, struct dpm_clocks *clock_table)
 {
@@ -974,7 +973,6 @@ enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
 
 	return PP_SMU_RESULT_OK;
 }
-#endif
 
 void dm_pp_get_funcs(
 		struct dc_context *ctx,
@@ -996,7 +994,6 @@ void dm_pp_get_funcs(
 		funcs->rv_funcs.set_hard_min_fclk_by_freq =
 				pp_rv_set_hard_min_fclk_by_freq;
 		break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	case DCN_VERSION_2_0:
 		funcs->ctx.ver = PP_SMU_VER_NV;
 		funcs->nv_funcs.pp_smu.dm = ctx;
@@ -1019,16 +1016,13 @@ void dm_pp_get_funcs(
 		funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
 		funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
 		break;
-#endif
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 	case DCN_VERSION_2_1:
 		funcs->ctx.ver = PP_SMU_VER_RN;
 		funcs->rn_funcs.pp_smu.dm = ctx;
 		funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
 		funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
 		break;
-#endif
 	default:
 		DRM_ERROR("smu version is not supported !\n");
 		break;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index a160512..6e3dddc 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -25,19 +25,10 @@
 
 DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
 DC_LIBS += dcn20
-endif
-
-
-ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 DC_LIBS += dsc
-endif
-
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
 DC_LIBS += dcn10 dml
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 DC_LIBS += dcn21
 endif
 
@@ -59,7 +50,7 @@
 DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
 dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
 DISPLAY_CORE += dc_vm_helper.o
 endif
 
@@ -70,5 +61,6 @@
 AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
 AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
 
-
-
+DC_DMUB += dc_dmub_srv.o
+AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index a50a764..7ad0cad 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -25,7 +25,7 @@
 # subcomponents.
 
 BASICS = conversion.o fixpt31_32.o \
-	log_helpers.o vector.o
+	log_helpers.o vector.o dc_common.o
 
 AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
 
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
new file mode 100644
index 0000000..b2fc4f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dc_common.h"
+#include "basics/conversion.h"
+
+bool is_rgb_cspace(enum dc_color_space output_color_space)
+{
+	switch (output_color_space) {
+	case COLOR_SPACE_SRGB:
+	case COLOR_SPACE_SRGB_LIMITED:
+	case COLOR_SPACE_2020_RGB_FULLRANGE:
+	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+	case COLOR_SPACE_ADOBERGB:
+		return true;
+	case COLOR_SPACE_YCBCR601:
+	case COLOR_SPACE_YCBCR709:
+	case COLOR_SPACE_YCBCR601_LIMITED:
+	case COLOR_SPACE_YCBCR709_LIMITED:
+	case COLOR_SPACE_2020_YCBCR:
+		return false;
+	default:
+		/* Add a case to switch */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+		return true;
+	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+		return true;
+	return false;
+}
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+		return true;
+	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+		return true;
+	return false;
+}
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+		return true;
+	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+		return true;
+	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+		return true;
+	return false;
+}
+
+void build_prescale_params(struct  dc_bias_and_scale *bias_and_scale,
+		const struct dc_plane_state *plane_state)
+{
+	if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+			&& plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
+			&& plane_state->input_csc_color_matrix.enable_adjustment
+			&& plane_state->coeff_reduction_factor.value != 0) {
+		bias_and_scale->scale_blue = fixed_point_to_int_frac(
+			dc_fixpt_mul(plane_state->coeff_reduction_factor,
+					dc_fixpt_from_fraction(256, 255)),
+				2,
+				13);
+		bias_and_scale->scale_red = bias_and_scale->scale_blue;
+		bias_and_scale->scale_green = bias_and_scale->scale_blue;
+	} else {
+		bias_and_scale->scale_blue = 0x2000;
+		bias_and_scale->scale_red = 0x2000;
+		bias_and_scale->scale_green = 0x2000;
+	}
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
new file mode 100644
index 0000000..7c0cbf4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DC_COMMON_H__
+#define __DAL_DC_COMMON_H__
+
+#include "core_types.h"
+
+bool is_rgb_cspace(enum dc_color_space output_color_space);
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+void build_prescale_params(struct  dc_bias_and_scale *bias_and_scale,
+		const struct dc_plane_state *plane_state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 823843c..008d4d1 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -111,7 +111,7 @@ struct dc_bios *bios_parser_create(
 	return NULL;
 }
 
-static void destruct(struct bios_parser *bp)
+static void bios_parser_destruct(struct bios_parser *bp)
 {
 	kfree(bp->base.bios_local_image);
 	kfree(bp->base.integrated_info);
@@ -126,7 +126,7 @@ static void bios_parser_destroy(struct dc_bios **dcb)
 		return;
 	}
 
-	destruct(bp);
+	bios_parser_destruct(bp);
 
 	kfree(bp);
 	*dcb = NULL;
@@ -2189,7 +2189,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
 		break;
 	default:
 		break;
-	};
+	}
 
 	/* Unidentified device ID, return empty support mask. */
 	return 0;
@@ -2739,7 +2739,6 @@ static enum bp_result bios_get_board_layout_info(
 	struct board_layout_info *board_layout_info)
 {
 	unsigned int i;
-	struct bios_parser *bp;
 	enum bp_result record_result;
 
 	const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
@@ -2748,7 +2747,6 @@ static enum bp_result bios_get_board_layout_info(
 		0, 0
 	};
 
-	bp = BP_FROM_DCB(dcb);
 	if (board_layout_info == NULL) {
 		DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
 		return BP_RESULT_BADINPUT;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 5c3fcaa..2f1c958 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -111,7 +111,7 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
 
 #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
 
-static void destruct(struct bios_parser *bp)
+static void bios_parser2_destruct(struct bios_parser *bp)
 {
 	kfree(bp->base.bios_local_image);
 	kfree(bp->base.integrated_info);
@@ -126,7 +126,7 @@ static void firmware_parser_destroy(struct dc_bios **dcb)
 		return;
 	}
 
-	destruct(bp);
+	bios_parser2_destruct(bp);
 
 	kfree(bp);
 	*dcb = NULL;
@@ -294,11 +294,21 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
 	struct atom_display_object_path_v2 *object;
 	struct atom_common_record_header *header;
 	struct atom_i2c_record *record;
+	struct atom_i2c_record dummy_record = {0};
 	struct bios_parser *bp = BP_FROM_DCB(dcb);
 
 	if (!info)
 		return BP_RESULT_BADINPUT;
 
+	if (id.type == OBJECT_TYPE_GENERIC) {
+		dummy_record.i2c_id = id.id;
+
+		if (get_gpio_i2c_info(bp, &dummy_record, info) == BP_RESULT_OK)
+			return BP_RESULT_OK;
+		else
+			return BP_RESULT_NORECORD;
+	}
+
 	object = get_bios_object(bp, id);
 
 	if (!object)
@@ -341,6 +351,7 @@ static enum bp_result get_gpio_i2c_info(
 	struct atom_gpio_pin_lut_v2_1 *header;
 	uint32_t count = 0;
 	unsigned int table_index = 0;
+	bool find_valid = false;
 
 	if (!info)
 		return BP_RESULT_BADINPUT;
@@ -368,33 +379,28 @@ static enum bp_result get_gpio_i2c_info(
 			- sizeof(struct atom_common_table_header))
 				/ sizeof(struct atom_gpio_pin_assignment);
 
-	table_index = record->i2c_id  & I2C_HW_LANE_MUX;
-
-	if (count < table_index) {
-		bool find_valid = false;
-
-		for (table_index = 0; table_index < count; table_index++) {
-			if (((record->i2c_id & I2C_HW_CAP) == (
-			header->gpio_pin[table_index].gpio_id &
-							I2C_HW_CAP)) &&
-			((record->i2c_id & I2C_HW_ENGINE_ID_MASK)  ==
-			(header->gpio_pin[table_index].gpio_id &
-						I2C_HW_ENGINE_ID_MASK)) &&
-			((record->i2c_id & I2C_HW_LANE_MUX) ==
-			(header->gpio_pin[table_index].gpio_id &
-							I2C_HW_LANE_MUX))) {
-				/* still valid */
-				find_valid = true;
-				break;
-			}
+	for (table_index = 0; table_index < count; table_index++) {
+		if (((record->i2c_id & I2C_HW_CAP) == (
+		header->gpio_pin[table_index].gpio_id &
+						I2C_HW_CAP)) &&
+		((record->i2c_id & I2C_HW_ENGINE_ID_MASK)  ==
+		(header->gpio_pin[table_index].gpio_id &
+					I2C_HW_ENGINE_ID_MASK)) &&
+		((record->i2c_id & I2C_HW_LANE_MUX) ==
+		(header->gpio_pin[table_index].gpio_id &
+						I2C_HW_LANE_MUX))) {
+			/* still valid */
+			find_valid = true;
+			break;
 		}
-		/* If we don't find the entry that we are looking for then
-		 *  we will return BP_Result_BadBiosTable.
-		 */
-		if (find_valid == false)
-			return BP_RESULT_BADBIOSTABLE;
 	}
 
+	/* If we don't find the entry that we are looking for then
+	 *  we will return BP_Result_BadBiosTable.
+	 */
+	if (find_valid == false)
+		return BP_RESULT_BADBIOSTABLE;
+
 	/* get the GPIO_I2C_INFO */
 	info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false;
 	info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX;
@@ -828,6 +834,7 @@ static enum bp_result bios_parser_get_spread_spectrum_info(
 		case 1:
 			return get_ss_info_v4_1(bp, signal, index, ss_info);
 		case 2:
+		case 3:
 			return get_ss_info_v4_2(bp, signal, index, ss_info);
 		default:
 			break;
@@ -986,7 +993,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
 		break;
 	default:
 		break;
-	};
+	}
 
 	/* Unidentified device ID, return empty support mask. */
 	return 0;
@@ -1205,6 +1212,8 @@ static enum bp_result get_firmware_info_v3_1(
 				bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
 	}
 
+	info->oem_i2c_present = false;
+
 	return BP_RESULT_OK;
 }
 
@@ -1283,6 +1292,13 @@ static enum bp_result get_firmware_info_v3_2(
 					bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
 	}
 
+	if (firmware_info->board_i2c_feature_id == 0x2) {
+		info->oem_i2c_present = true;
+		info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id;
+	} else {
+		info->oem_i2c_present = false;
+	}
+
 	return BP_RESULT_OK;
 }
 
@@ -1402,10 +1418,8 @@ static enum bp_result get_integrated_info_v11(
 	info->ma_channel_number = info_v11->umachannelnumber;
 	info->lvds_ss_percentage =
 	le16_to_cpu(info_v11->lvds_ss_percentage);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	info->dp_ss_control =
 	le16_to_cpu(info_v11->reserved1);
-#endif
 	info->lvds_sspread_rate_in_10hz =
 	le16_to_cpu(info_v11->lvds_ss_rate_10hz);
 	info->hdmi_ss_percentage =
@@ -1826,7 +1840,6 @@ static enum bp_result bios_get_board_layout_info(
 	struct board_layout_info *board_layout_info)
 {
 	unsigned int i;
-	struct bios_parser *bp;
 	enum bp_result record_result;
 
 	const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
@@ -1835,7 +1848,6 @@ static enum bp_result bios_get_board_layout_info(
 		0, 0
 	};
 
-	bp = BP_FROM_DCB(dcb);
 	if (board_layout_info == NULL) {
 		DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
 		return BP_RESULT_BADINPUT;
@@ -1915,7 +1927,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
 	.get_board_layout_info = bios_get_board_layout_info,
 };
 
-static bool bios_parser_construct(
+static bool bios_parser2_construct(
 	struct bios_parser *bp,
 	struct bp_init_data *init,
 	enum dce_version dce_version)
@@ -2008,7 +2020,7 @@ struct dc_bios *firmware_parser_create(
 	if (!bp)
 		return NULL;
 
-	if (bios_parser_construct(bp, init, dce_version))
+	if (bios_parser2_construct(bp, init, dce_version))
 		return &bp->base;
 
 	kfree(bp);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index bb2e810..629a07a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -37,6 +37,8 @@
 #include "bios_parser_types_internal2.h"
 #include "amdgpu.h"
 
+#include "dc_dmub_srv.h"
+#include "dc.h"
 
 #define DC_LOGGER \
 	bp->base.ctx->logger
@@ -87,6 +89,10 @@ static enum bp_result encoder_control_digx_v1_5(
 	struct bios_parser *bp,
 	struct bp_encoder_control *cntl);
 
+static enum bp_result encoder_control_fallback(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
 static void init_dig_encoder_control(struct bios_parser *bp)
 {
 	uint32_t version =
@@ -98,11 +104,26 @@ static void init_dig_encoder_control(struct bios_parser *bp)
 		break;
 	default:
 		dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
-		bp->cmd_tbl.dig_encoder_control = NULL;
+		bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
 		break;
 	}
 }
 
+static void encoder_control_dmcub(
+		struct dc_dmub_srv *dmcub,
+		struct dig_encoder_stream_setup_parameters_v1_5 *dig)
+{
+	struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
+
+	encoder_control.header.type = DMUB_CMD__VBIOS;
+	encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
+	encoder_control.encoder_control.dig.stream_param = *dig;
+
+	dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
+	dc_dmub_srv_cmd_execute(dmcub);
+	dc_dmub_srv_wait_idle(dmcub);
+}
+
 static enum bp_result encoder_control_digx_v1_5(
 	struct bios_parser *bp,
 	struct bp_encoder_control *cntl)
@@ -155,12 +176,30 @@ static enum bp_result encoder_control_digx_v1_5(
 			break;
 		}
 
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		encoder_control_dmcub(bp->base.ctx->dmub_srv, &params);
+		return BP_RESULT_OK;
+	}
+
 	if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))
 		result = BP_RESULT_OK;
 
 	return result;
 }
 
+static enum bp_result encoder_control_fallback(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		return encoder_control_digx_v1_5(bp, cntl);
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
 /*****************************************************************************
  ******************************************************************************
  **
@@ -173,6 +212,10 @@ static enum bp_result transmitter_control_v1_6(
 	struct bios_parser *bp,
 	struct bp_transmitter_control *cntl);
 
+static enum bp_result transmitter_control_fallback(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+
 static void init_transmitter_control(struct bios_parser *bp)
 {
 	uint8_t frev;
@@ -186,11 +229,27 @@ static void init_transmitter_control(struct bios_parser *bp)
 		break;
 	default:
 		dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
-		bp->cmd_tbl.transmitter_control = NULL;
+		bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
 		break;
 	}
 }
 
+static void transmitter_control_dmcub(
+		struct dc_dmub_srv *dmcub,
+		struct dig_transmitter_control_parameters_v1_6 *dig)
+{
+	struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
+
+	transmitter_control.header.type = DMUB_CMD__VBIOS;
+	transmitter_control.header.sub_type =
+		DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
+	transmitter_control.transmitter_control.dig = *dig;
+
+	dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
+	dc_dmub_srv_cmd_execute(dmcub);
+	dc_dmub_srv_wait_idle(dmcub);
+}
+
 static enum bp_result transmitter_control_v1_6(
 	struct bios_parser *bp,
 	struct bp_transmitter_control *cntl)
@@ -222,6 +281,11 @@ static enum bp_result transmitter_control_v1_6(
 		__func__, ps.param.symclk_10khz);
 	}
 
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param);
+		return BP_RESULT_OK;
+	}
 
 /*color_depth not used any more, driver has deep color factor in the Phyclk*/
 	if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps))
@@ -229,6 +293,18 @@ static enum bp_result transmitter_control_v1_6(
 	return result;
 }
 
+static enum bp_result transmitter_control_fallback(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		return transmitter_control_v1_6(bp, cntl);
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
 /******************************************************************************
  ******************************************************************************
  **
@@ -241,6 +317,10 @@ static enum bp_result set_pixel_clock_v7(
 	struct bios_parser *bp,
 	struct bp_pixel_clock_parameters *bp_params);
 
+static enum bp_result set_pixel_clock_fallback(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+
 static void init_set_pixel_clock(struct bios_parser *bp)
 {
 	switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) {
@@ -250,12 +330,25 @@ static void init_set_pixel_clock(struct bios_parser *bp)
 	default:
 		dm_output_to_console("Don't have set_pixel_clock for v%d\n",
 			 BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
-		bp->cmd_tbl.set_pixel_clock = NULL;
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
 		break;
 	}
 }
 
+static void set_pixel_clock_dmcub(
+		struct dc_dmub_srv *dmcub,
+		struct set_pixel_clock_parameter_v1_7 *clk)
+{
+	struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
 
+	pixel_clock.header.type = DMUB_CMD__VBIOS;
+	pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
+	pixel_clock.pixel_clock.clk = *clk;
+
+	dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
+	dc_dmub_srv_cmd_execute(dmcub);
+	dc_dmub_srv_wait_idle(dmcub);
+}
 
 static enum bp_result set_pixel_clock_v7(
 	struct bios_parser *bp,
@@ -331,12 +424,30 @@ static enum bp_result set_pixel_clock_v7(
 		if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
 			clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
 
+		if (bp->base.ctx->dc->ctx->dmub_srv &&
+		    bp->base.ctx->dc->debug.dmub_command_table) {
+			set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk);
+			return BP_RESULT_OK;
+		}
+
 		if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))
 			result = BP_RESULT_OK;
 	}
 	return result;
 }
 
+static enum bp_result set_pixel_clock_fallback(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		return set_pixel_clock_v7(bp, bp_params);
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
 /******************************************************************************
  ******************************************************************************
  **
@@ -569,6 +680,11 @@ static enum bp_result enable_disp_power_gating_v2_1(
 	enum controller_id crtc_id,
 	enum bp_pipe_control_action action);
 
+static enum bp_result enable_disp_power_gating_fallback(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action);
+
 static void init_enable_disp_power_gating(
 	struct bios_parser *bp)
 {
@@ -580,11 +696,30 @@ static void init_enable_disp_power_gating(
 	default:
 		dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
 			 BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
-		bp->cmd_tbl.enable_disp_power_gating = NULL;
+		bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback;
 		break;
 	}
 }
 
+static void enable_disp_power_gating_dmcub(
+	struct dc_dmub_srv *dmcub,
+	struct enable_disp_power_gating_parameters_v2_1 *pwr)
+{
+	struct dmub_rb_cmd_enable_disp_power_gating power_gating;
+
+	power_gating.header.type = DMUB_CMD__VBIOS;
+	power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
+	power_gating.power_gating.pwr = *pwr;
+
+	/* ATOM_ENABLE is old API in DMUB */
+	if (power_gating.power_gating.pwr.enable == ATOM_ENABLE)
+		power_gating.power_gating.pwr.enable = ATOM_INIT;
+
+	dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
+	dc_dmub_srv_cmd_execute(dmcub);
+	dc_dmub_srv_wait_idle(dmcub);
+}
+
 static enum bp_result enable_disp_power_gating_v2_1(
 	struct bios_parser *bp,
 	enum controller_id crtc_id,
@@ -604,12 +739,32 @@ static enum bp_result enable_disp_power_gating_v2_1(
 	ps.param.enable =
 		bp->cmd_helper->disp_power_gating_action_to_atom(action);
 
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv,
+					       &ps.param);
+		return BP_RESULT_OK;
+	}
+
 	if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))
 		result = BP_RESULT_OK;
 
 	return result;
 }
 
+static enum bp_result enable_disp_power_gating_fallback(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action)
+{
+	if (bp->base.ctx->dc->ctx->dmub_srv &&
+	    bp->base.ctx->dc->debug.dmub_command_table) {
+		return enable_disp_power_gating_v2_1(bp, crtc_id, action);
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
 /******************************************************************************
 *******************************************************************************
  **
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index db153dd..7388c98 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -55,23 +55,19 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
 	case DCE_VERSION_11_22:
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
 		return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case DCN_VERSION_1_0:
 	case DCN_VERSION_1_01:
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
 		return true;
 #endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case DCN_VERSION_2_0:
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
 		return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	case DCN_VERSION_2_1:
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
 		return true;
-#endif
 	case DCE_VERSION_12_0:
 	case DCE_VERSION_12_1:
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 26c6d73..4674aca 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -1,5 +1,6 @@
 #
 # Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2019 Raptor Engineering, LLC
 #
 # Permission is hereby granted, free of charge, to any person obtaining a
 # copy of this software and associated documentation files (the "Software"),
@@ -24,7 +25,13 @@
 # It calculates Bandwidth and Watermarks values for HW programming
 #
 
+ifdef CONFIG_X86
 calcs_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+calcs_ccflags := -mhard-float -maltivec
+endif
 
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -32,6 +39,7 @@
 endif
 endif
 
+ifdef CONFIG_X86
 ifdef IS_OLD_GCC
 # Stack alignment mismatch, proceed with caution.
 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -40,6 +48,7 @@
 else
 calcs_ccflags += -msse2
 endif
+endif
 
 CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags)
@@ -47,7 +56,7 @@
 
 BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
 
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
 BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
 endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index a1d4925..5d081c4 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -154,14 +154,14 @@ static void calculate_bandwidth(
 
 
 
-	if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
-	else {
-		d0_underlay_enable = 1;
-	}
-	if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
-	else {
-		d1_underlay_enable = 1;
-	}
+	if (data->d0_underlay_mode == bw_def_none)
+		d0_underlay_enable = false;
+	else
+		d0_underlay_enable = true;
+	if (data->d1_underlay_mode == bw_def_none)
+		d1_underlay_enable = false;
+	else
+		d1_underlay_enable = true;
 	data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
 	switch (data->underlay_surface_type) {
 	case bw_def_420:
@@ -286,8 +286,8 @@ static void calculate_bandwidth(
 	data->cursor_width_pixels[2] = bw_int_to_fixed(0);
 	data->cursor_width_pixels[3] = bw_int_to_fixed(0);
 	/* graphics surface parameters from spreadsheet*/
-	fbc_enabled = 0;
-	lpt_enabled = 0;
+	fbc_enabled = false;
+	lpt_enabled = false;
 	for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
 		if (i < data->number_of_displays + 4) {
 			if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
@@ -338,9 +338,9 @@ static void calculate_bandwidth(
 			data->access_one_channel_only[i] = 0;
 		}
 		if (data->fbc_en[i] == 1) {
-			fbc_enabled = 1;
+			fbc_enabled = true;
 			if (data->lpt_en[i] == 1) {
-				lpt_enabled = 1;
+				lpt_enabled = true;
 			}
 		}
 		data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 9b2cb57..1a37550 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1,5 +1,6 @@
 /*
  * Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -53,13 +54,9 @@
  * remain as-is as it provides us with a guarantee from HW that it is correct.
  */
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 /* Defaults from spreadsheet rev#247.
  * RV2 delta: dram_clock_change_latency, max_num_dpp
  */
-#else
-/* Defaults from spreadsheet rev#247 */
-#endif
 const struct dcn_soc_bounding_box dcn10_soc_defaults = {
 		/* latencies */
 		.sr_exit_time = 17, /*us*/
@@ -626,7 +623,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
 {
 	bool updated = false;
 
-	kernel_fpu_begin();
+	DC_FP_START();
 	if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
 			&& dc->debug.sr_exit_time_ns) {
 		updated = true;
@@ -662,7 +659,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
 		dc->dcn_soc->dram_clock_change_latency =
 				dc->debug.dram_clock_change_latency_ns / 1000.0;
 	}
-	kernel_fpu_end();
+	DC_FP_END();
 
 	return updated;
 }
@@ -708,8 +705,8 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
 
 unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
 {
-	/* for dali, the highest voltage level we want is 0 */
-	if (ASICREV_IS_DALI(hw_internal_rev))
+	/* for dali & pollock, the highest voltage level we want is 0 */
+	if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
 		return 0;
 
 	/* we are ok with all levels */
@@ -742,7 +739,7 @@ bool dcn_validate_bandwidth(
 		dcn_bw_sync_calcs_and_dml(dc);
 
 	memset(v, 0, sizeof(*v));
-	kernel_fpu_begin();
+	DC_FP_START();
 
 	v->sr_exit_time = dc->dcn_soc->sr_exit_time;
 	v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
@@ -1275,7 +1272,7 @@ bool dcn_validate_bandwidth(
 	bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
 	bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
 
-	kernel_fpu_end();
+	DC_FP_END();
 
 	PERFORMANCE_TRACE_END();
 	BW_VAL_TRACE_FINISH();
@@ -1438,37 +1435,49 @@ void dcn_bw_update_from_pplib(struct dc *dc)
 	struct dc_context *ctx = dc->ctx;
 	struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
 	bool res;
+	unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
 
 	/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
 	res = dm_pp_get_clock_levels_by_type_with_voltage(
 			ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
 
-	kernel_fpu_begin();
+	DC_FP_START();
 
 	if (res)
 		res = verify_clock_values(&fclks);
 
 	if (res) {
-		ASSERT(fclks.num_levels >= 3);
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
-				(fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0)
-				* ddr4_dram_factor_single_Channel / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
-				(fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0)
-				* ddr4_dram_factor_single_Channel / 1000.0;
-		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
-				(fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0)
-				* ddr4_dram_factor_single_Channel / 1000.0;
+		ASSERT(fclks.num_levels);
+
+		vmin0p65_idx = 0;
+		vmid0p72_idx = fclks.num_levels -
+			(fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
+		vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
+		vmax0p9_idx = fclks.num_levels - 1;
+
+		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+			32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+			dc->dcn_soc->number_of_channels *
+			(fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
+			* ddr4_dram_factor_single_Channel / 1000.0;
+		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+			dc->dcn_soc->number_of_channels *
+			(fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
+			* ddr4_dram_factor_single_Channel / 1000.0;
+		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+			dc->dcn_soc->number_of_channels *
+			(fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
+			* ddr4_dram_factor_single_Channel / 1000.0;
 	} else
 		BREAK_TO_DEBUGGER();
 
-	kernel_fpu_end();
+	DC_FP_END();
 
 	res = dm_pp_get_clock_levels_by_type_with_voltage(
 			ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
 
-	kernel_fpu_begin();
+	DC_FP_START();
 
 	if (res)
 		res = verify_clock_values(&dcfclks);
@@ -1481,7 +1490,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
 	} else
 		BREAK_TO_DEBUGGER();
 
-	kernel_fpu_end();
+	DC_FP_END();
 }
 
 void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
@@ -1496,11 +1505,11 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
 	if (!pp || !pp->set_wm_ranges)
 		return;
 
-	kernel_fpu_begin();
+	DC_FP_START();
 	min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
 	min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
 	socclk_khz = dc->dcn_soc->socclk * 1000;
-	kernel_fpu_end();
+	DC_FP_END();
 
 	/* Now notify PPLib/SMU about which Watermarks sets they should select
 	 * depending on DPM state they are in. And update BW MGR GFX Engine and
@@ -1551,7 +1560,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
 
 void dcn_bw_sync_calcs_and_dml(struct dc *dc)
 {
-	kernel_fpu_begin();
+	DC_FP_START();
 	DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
 			"sr_enter_plus_exit_time: %f ns\n"
 			"urgent_latency: %f ns\n"
@@ -1740,5 +1749,5 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
 	dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
 		dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
 	dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
-	kernel_fpu_end();
+	DC_FP_END();
 }
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index b864869c..3cd28319 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -63,7 +63,7 @@
 AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120)
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
 ###############################################################################
 # DCN10
 ###############################################################################
@@ -72,9 +72,7 @@
 AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10)
-endif
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
 ###############################################################################
 # DCN20
 ###############################################################################
@@ -83,9 +81,7 @@
 AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
-endif
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 ###############################################################################
 # DCN21
 ###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 8828dd9..a78e5c7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -37,9 +37,7 @@
 #include "dcn10/rv1_clk_mgr.h"
 #include "dcn10/rv2_clk_mgr.h"
 #include "dcn20/dcn20_clk_mgr.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #include "dcn21/rn_clk_mgr.h"
-#endif
 
 
 int clk_mgr_helper_get_active_display_cnt(
@@ -134,14 +132,19 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
 			dce120_clk_mgr_construct(ctx, clk_mgr);
 		break;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case FAMILY_RV:
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+		if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
+				ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
+			/* TEMP: this check has to come before ASICREV_IS_RENOIR */
+			/* which also incorrectly returns true for Dali/Pollock*/
+			rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
+			break;
+		}
 		if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
 			rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
 			break;
 		}
-#endif	/* DCN2_1 */
 		if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
 			rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
 			break;
@@ -152,13 +155,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
 			break;
 		}
 		break;
-#endif	/* Family RV */
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case FAMILY_NV:
 		dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
 		break;
-#endif /* Family NV */
+#endif	/* Family RV and NV*/
 
 	default:
 		ASSERT(0); /* Unknown Asic */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index a6c46e9..d031bd3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -72,8 +72,8 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
 	struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 	struct bp_set_dce_clock_parameters dce_clk_params;
 	struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
-	struct dc *core_dc = clk_mgr_base->ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc *dc = clk_mgr_base->ctx->dc;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
 	int actual_clock = requested_clk_khz;
 	/* Prepare to program display clock*/
 	memset(&dce_clk_params, 0, sizeof(dce_clk_params));
@@ -110,7 +110,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
 
 	bp->funcs->set_dce_clock(bp, &dce_clk_params);
 
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
 			if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
 				dmcu->funcs->set_psr_wait_loop(dmcu,
@@ -126,8 +126,8 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
 {
 	struct bp_set_dce_clock_parameters dce_clk_params;
 	struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
-	struct dc *core_dc = clk_mgr->base.ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc *dc = clk_mgr->base.ctx->dc;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
 	int actual_clock = requested_clk_khz;
 	/* Prepare to program display clock*/
 	memset(&dce_clk_params, 0, sizeof(dce_clk_params));
@@ -152,7 +152,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
 		clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
 
 
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
 			if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
 				dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 1897e91..97b7f32 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -88,8 +88,8 @@ int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned
 int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
 {
 	int actual_dispclk_set_mhz = -1;
-	struct dc *core_dc = clk_mgr->base.ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc *dc = clk_mgr->base.ctx->dc;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
 
 	/*  Unit of SMU msg parameter is Mhz */
 	actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
@@ -100,7 +100,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
 	/* Actual dispclk set is returned in the parameter register */
 	actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
 
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
 			if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
 				dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 25d7b7c..495f01e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -27,6 +27,7 @@
 #include "clk_mgr_internal.h"
 
 #include "dce100/dce_clk_mgr.h"
+#include "dcn20_clk_mgr.h"
 #include "reg_helper.h"
 #include "core_types.h"
 #include "dm_helpers.h"
@@ -100,13 +101,13 @@ uint32_t dentist_get_did_from_divider(int divider)
 }
 
 void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
-		struct dc_state *context)
+		struct dc_state *context, bool safe_to_lower)
 {
 	int i;
 
 	clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
 	for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
-		int dpp_inst, dppclk_khz;
+		int dpp_inst, dppclk_khz, prev_dppclk_khz;
 
 		/* Loop index will match dpp->inst if resource exists,
 		 * and we want to avoid dependency on dpp object
@@ -114,8 +115,12 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
 		dpp_inst = i;
 		dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
 
-		clk_mgr->dccg->funcs->update_dpp_dto(
-				clk_mgr->dccg, dpp_inst, dppclk_khz);
+		prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+		if (safe_to_lower || prev_dppclk_khz < dppclk_khz) {
+			clk_mgr->dccg->funcs->update_dpp_dto(
+							clk_mgr->dccg, dpp_inst, dppclk_khz);
+		}
 	}
 }
 
@@ -161,6 +166,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 		dc->debug.force_clock_mode & 0x1) {
 		//this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.
 		force_reset = true;
+
+		dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
+
 		//force_clock_mode 0x1:  force reset the clock even it is the same clock as long as it is in Passive level.
 	}
 	display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
@@ -240,7 +248,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 	if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
 		if (dpp_clock_lowered) {
 			// if clock is being lowered, increase DTO before lowering refclk
-			dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
 			dcn20_update_clocks_update_dentist(clk_mgr);
 		} else {
 			// if clock is being raised, increase refclk before lowering DTO
@@ -248,7 +256,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 				dcn20_update_clocks_update_dentist(clk_mgr);
 			// always update dtos unless clock is lowered and not safe to lower
 			if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
-				dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+				dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
 		}
 	}
 
@@ -339,6 +347,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
 	}
 }
 
+
+void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base)
+{
+	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+	uint32_t dispclk_wdivider;
+	uint32_t dppclk_wdivider;
+	int disp_divider;
+	int dpp_divider;
+
+	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
+	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider);
+
+	disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+	dpp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+
+	if (disp_divider && dpp_divider) {
+		/* Calculate the current DFS clock, in kHz.*/
+		clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+			* clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
+
+		clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+				* clk_mgr->base.dentist_vco_freq_khz) / dpp_divider;
+	}
+
+}
+
 void dcn2_get_clock(struct clk_mgr *clk_mgr,
 		struct dc_state *context,
 			enum dc_clock_type clock_type,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index c9fd824..0b9c045 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
 			struct dc_state *context,
 			bool safe_to_lower);
 void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
-		struct dc_state *context);
+		struct dc_state *context, bool safe_to_lower);
 
 void dcn2_init_clocks(struct clk_mgr *clk_mgr);
 
@@ -51,4 +51,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
 			struct dc_clock_config *clock_cfg);
 
 void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
+
+void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base);
+
+
 #endif //__DCN20_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 35c55e5..7ae4c06 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -59,14 +59,16 @@ int rn_get_active_display_cnt_wa(
 		struct dc_state *context)
 {
 	int i, display_count;
-	bool hdmi_present = false;
+	bool tmds_present = false;
 
 	display_count = 0;
 	for (i = 0; i < context->stream_count; i++) {
 		const struct dc_stream_state *stream = context->streams[i];
 
-		if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
-			hdmi_present = true;
+		if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+				stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+				stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+			tmds_present = true;
 	}
 
 	for (i = 0; i < dc->link_count; i++) {
@@ -85,7 +87,7 @@ int rn_get_active_display_cnt_wa(
 	}
 
 	/* WA for hang on HDMI after display off back back on*/
-	if (display_count == 0 && hdmi_present)
+	if (display_count == 0 && tmds_present)
 		display_count = 1;
 
 	return display_count;
@@ -164,16 +166,16 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
 	}
 
 	if (dpp_clock_lowered) {
-		// if clock is being lowered, increase DTO before lowering refclk
-		dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+		// increase per DPP DTO before lowering global dppclk
+		dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
 		rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
 	} else {
-		// if clock is being raised, increase refclk before lowering DTO
+		// increase global DPPCLK before lowering per DPP DTO
 		if (update_dppclk || update_dispclk)
 			rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
 		// always update dtos unless clock is lowered and not safe to lower
 		if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
-			dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
 	}
 
 	if (update_dispclk &&
@@ -409,7 +411,7 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
 			continue;
 
 		ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
-		ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
+		ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
 		/* We will not select WM based on dcfclk, so leave it as unconstrained */
 		ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 		ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
@@ -578,33 +580,33 @@ struct wm_table lpddr4_wm_table = {
 		{
 			.wm_inst = WM_A,
 			.wm_type = WM_TYPE_PSTATE_CHG,
-			.pstate_latency_us = 23.84,
-			.sr_exit_time_us = 12.5,
-			.sr_enter_plus_exit_time_us = 17.0,
+			.pstate_latency_us = 11.65333,
+			.sr_exit_time_us = 5.32,
+			.sr_enter_plus_exit_time_us = 6.38,
 			.valid = true,
 		},
 		{
 			.wm_inst = WM_B,
 			.wm_type = WM_TYPE_PSTATE_CHG,
-			.pstate_latency_us = 23.84,
-			.sr_exit_time_us = 12.5,
-			.sr_enter_plus_exit_time_us = 17.0,
+			.pstate_latency_us = 11.65333,
+			.sr_exit_time_us = 9.82,
+			.sr_enter_plus_exit_time_us = 11.196,
 			.valid = true,
 		},
 		{
 			.wm_inst = WM_C,
 			.wm_type = WM_TYPE_PSTATE_CHG,
-			.pstate_latency_us = 23.84,
-			.sr_exit_time_us = 12.5,
-			.sr_enter_plus_exit_time_us = 17.0,
+			.pstate_latency_us = 11.65333,
+			.sr_exit_time_us = 9.89,
+			.sr_enter_plus_exit_time_us = 11.24,
 			.valid = true,
 		},
 		{
 			.wm_inst = WM_D,
 			.wm_type = WM_TYPE_PSTATE_CHG,
-			.pstate_latency_us = 23.84,
-			.sr_exit_time_us = 12.5,
-			.sr_enter_plus_exit_time_us = 17.0,
+			.pstate_latency_us = 11.65333,
+			.sr_exit_time_us = 9.748,
+			.sr_enter_plus_exit_time_us = 11.102,
 			.valid = true,
 		},
 	}
@@ -691,7 +693,6 @@ void rn_clk_mgr_construct(
 {
 	struct dc_debug_options *debug = &ctx->dc->debug;
 	struct dpm_clocks clock_table = { 0 };
-	struct clk_state_registers_and_bypass s = { 0 };
 
 	clk_mgr->base.ctx = ctx;
 	clk_mgr->base.funcs = &dcn21_funcs;
@@ -711,7 +712,6 @@ void rn_clk_mgr_construct(
 	if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
 		dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
 		clk_mgr->base.dentist_vco_freq_khz = 3600000;
-		clk_mgr->base.dprefclk_khz = 600000;
 	} else {
 		struct clk_log_info log_info = {0};
 
@@ -722,24 +722,16 @@ void rn_clk_mgr_construct(
 		if (clk_mgr->base.dentist_vco_freq_khz == 0)
 			clk_mgr->base.dentist_vco_freq_khz = 3600000;
 
-		rn_dump_clk_registers(&s, &clk_mgr->base, &log_info);
-		/* Convert dprefclk units from MHz to KHz */
-		/* Value already divided by 10, some resolution lost */
-		clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
-
-		/* in case we don't get a value from the register, use default */
-		if (clk_mgr->base.dprefclk_khz == 0) {
-			ASSERT(clk_mgr->base.dprefclk_khz == 600000);
-			clk_mgr->base.dprefclk_khz = 600000;
-		}
-
 		if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
 			rn_bw_params.wm_table = lpddr4_wm_table;
 		} else {
 			rn_bw_params.wm_table = ddr4_wm_table;
 		}
+		/* Saved clocks configured at boot for debug purposes */
+		rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
 	}
 
+	clk_mgr->base.dprefclk_khz = 600000;
 	dce_clock_read_ss_info(clk_mgr);
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index cb7c0e8..6878aed 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -82,8 +82,8 @@ int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
 int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
 {
 	int actual_dispclk_set_mhz = -1;
-	struct dc *core_dc = clk_mgr->base.ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc *dc = clk_mgr->base.ctx->dc;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
 
 	/*  Unit of SMU msg parameter is Mhz */
 	actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
@@ -91,7 +91,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
 			VBIOSSMC_MSG_SetDispclkFreq,
 			requested_dispclk_khz / 1000);
 
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
 			if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
 				dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 32f31bf..04441db 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -58,21 +58,21 @@
 #include "hubp.h"
 
 #include "dc_link_dp.h"
+#include "dc_dmub_srv.h"
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #include "dsc.h"
-#endif
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 #include "vm_helper.h"
-#endif
 
 #include "dce/dce_i2c.h"
 
+#define CTX \
+	dc->ctx
+
 #define DC_LOGGER \
 	dc->ctx->logger
 
-const static char DC_BUILD_ID[] = "production-build";
+static const char DC_BUILD_ID[] = "production-build";
 
 /**
  * DOC: Overview
@@ -287,7 +287,6 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
 		if (pipe->stream == stream && pipe->stream_res.tg) {
-			pipe->stream->adjust = *adjust;
 			dc->hwss.set_drr(&pipe,
 					1,
 					adjust->v_total_min,
@@ -511,10 +510,10 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
 	return ret;
 }
 
-void dc_stream_set_static_screen_events(struct dc *dc,
+void dc_stream_set_static_screen_params(struct dc *dc,
 		struct dc_stream_state **streams,
 		int num_streams,
-		const struct dc_static_screen_events *events)
+		const struct dc_static_screen_params *params)
 {
 	int i = 0;
 	int j = 0;
@@ -533,10 +532,10 @@ void dc_stream_set_static_screen_events(struct dc *dc,
 		}
 	}
 
-	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
 }
 
-static void destruct(struct dc *dc)
+static void dc_destruct(struct dc *dc)
 {
 	if (dc->current_state) {
 		dc_release_state(dc->current_state);
@@ -569,7 +568,7 @@ static void destruct(struct dc *dc)
 	kfree(dc->bw_dceip);
 	dc->bw_dceip = NULL;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
 	kfree(dc->dcn_soc);
 	dc->dcn_soc = NULL;
 
@@ -577,28 +576,58 @@ static void destruct(struct dc *dc)
 	dc->dcn_ip = NULL;
 
 #endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	kfree(dc->vm_helper);
 	dc->vm_helper = NULL;
 
-#endif
 }
 
-static bool construct(struct dc *dc,
+static bool dc_construct_ctx(struct dc *dc,
+		const struct dc_init_data *init_params)
+{
+	struct dc_context *dc_ctx;
+	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+	if (!dc_ctx)
+		return false;
+
+	dc_ctx->cgs_device = init_params->cgs_device;
+	dc_ctx->driver_context = init_params->driver;
+	dc_ctx->dc = dc;
+	dc_ctx->asic_id = init_params->asic_id;
+	dc_ctx->dc_sink_id_count = 0;
+	dc_ctx->dc_stream_id_count = 0;
+	dc_ctx->dce_environment = init_params->dce_environment;
+
+	/* Create logger */
+
+	dc_version = resource_parse_asic_id(init_params->asic_id);
+	dc_ctx->dce_version = dc_version;
+
+	dc_ctx->perf_trace = dc_perf_trace_create();
+	if (!dc_ctx->perf_trace) {
+		ASSERT_CRITICAL(false);
+		return false;
+	}
+
+	dc->ctx = dc_ctx;
+
+	return true;
+}
+
+static bool dc_construct(struct dc *dc,
 		const struct dc_init_data *init_params)
 {
 	struct dc_context *dc_ctx;
 	struct bw_calcs_dceip *dc_dceip;
 	struct bw_calcs_vbios *dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
 	struct dcn_soc_bounding_box *dcn_soc;
 	struct dcn_ip_params *dcn_ip;
 #endif
 
-	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
 	dc->config = init_params->flags;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	// Allocate memory for the vm_helper
 	dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
 	if (!dc->vm_helper) {
@@ -606,7 +635,6 @@ static bool construct(struct dc *dc,
 		goto fail;
 	}
 
-#endif
 	memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
 
 	dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
@@ -624,7 +652,7 @@ static bool construct(struct dc *dc,
 	}
 
 	dc->bw_vbios = dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
 	dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
 	if (!dcn_soc) {
 		dm_error("%s: failed to create dcn_soc\n", __func__);
@@ -640,31 +668,15 @@ static bool construct(struct dc *dc,
 	}
 
 	dc->dcn_ip = dcn_ip;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	dc->soc_bounding_box = init_params->soc_bounding_box;
 #endif
-#endif
 
-	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
-	if (!dc_ctx) {
+	if (!dc_construct_ctx(dc, init_params)) {
 		dm_error("%s: failed to create ctx\n", __func__);
 		goto fail;
 	}
 
-	dc_ctx->cgs_device = init_params->cgs_device;
-	dc_ctx->driver_context = init_params->driver;
-	dc_ctx->dc = dc;
-	dc_ctx->asic_id = init_params->asic_id;
-	dc_ctx->dc_sink_id_count = 0;
-	dc_ctx->dc_stream_id_count = 0;
-	dc->ctx = dc_ctx;
-
-	/* Create logger */
-
-	dc_ctx->dce_environment = init_params->dce_environment;
-
-	dc_version = resource_parse_asic_id(init_params->asic_id);
-	dc_ctx->dce_version = dc_version;
+        dc_ctx = dc->ctx;
 
 	/* Resource should construct all asic specific resources.
 	 * This should be the only place where we need to parse the asic id
@@ -679,7 +691,7 @@ static bool construct(struct dc *dc,
 		bp_init_data.bios = init_params->asic_id.atombios_base_address;
 
 		dc_ctx->dc_bios = dal_bios_parser_create(
-				&bp_init_data, dc_version);
+				&bp_init_data, dc_ctx->dce_version);
 
 		if (!dc_ctx->dc_bios) {
 			ASSERT_CRITICAL(false);
@@ -687,17 +699,13 @@ static bool construct(struct dc *dc,
 		}
 
 		dc_ctx->created_bios = true;
-		}
-
-	dc_ctx->perf_trace = dc_perf_trace_create();
-	if (!dc_ctx->perf_trace) {
-		ASSERT_CRITICAL(false);
-		goto fail;
 	}
 
+
+
 	/* Create GPIO service */
 	dc_ctx->gpio_service = dal_gpio_service_create(
-			dc_version,
+			dc_ctx->dce_version,
 			dc_ctx->dce_environment,
 			dc_ctx);
 
@@ -706,7 +714,7 @@ static bool construct(struct dc *dc,
 		goto fail;
 	}
 
-	dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
+	dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
 	if (!dc->res_pool)
 		goto fail;
 
@@ -714,10 +722,8 @@ static bool construct(struct dc *dc,
 	if (!dc->clk_mgr)
 		goto fail;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 	if (dc->res_pool->funcs->update_bw_bounding_box)
 		dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
-#endif
 
 	/* Creation of current_state must occur after dc->dml
 	 * is initialized in dc_create_resource_pool because
@@ -739,12 +745,9 @@ static bool construct(struct dc *dc,
 	return true;
 
 fail:
-
-	destruct(dc);
 	return false;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 static bool disable_all_writeback_pipes_for_stream(
 		const struct dc *dc,
 		struct dc_stream_state *stream,
@@ -757,7 +760,6 @@ static bool disable_all_writeback_pipes_for_stream(
 
 	return true;
 }
-#endif
 
 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 {
@@ -783,16 +785,12 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 		}
 		if (should_disable && old_stream) {
 			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
-#endif
 			if (dc->hwss.apply_ctx_for_surface)
 				dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
 		}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		if (dc->hwss.program_front_end_for_ctx)
 			dc->hwss.program_front_end_for_ctx(dc, dangling_context);
-#endif
 	}
 
 	current_ctx = dc->current_state;
@@ -800,6 +798,33 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 	dc_release_state(current_ctx);
 }
 
+static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+{
+	int i;
+	int count = 0;
+	struct pipe_ctx *pipe;
+	PERF_TRACE();
+	for (i = 0; i < MAX_PIPES; i++) {
+		pipe = &context->res_ctx.pipe_ctx[i];
+
+		if (!pipe->plane_state)
+			continue;
+
+		/* Timeout 100 ms */
+		while (count < 100000) {
+			/* Must set to false to start with, due to OR in update function */
+			pipe->plane_state->status.is_flip_pending = false;
+			dc->hwss.update_pending_status(pipe);
+			if (!pipe->plane_state->status.is_flip_pending)
+				break;
+			udelay(1);
+			count++;
+		}
+		ASSERT(!pipe->plane_state->status.is_flip_pending);
+	}
+	PERF_TRACE();
+}
+
 /*******************************************************************************
  * Public functions
  ******************************************************************************/
@@ -812,26 +837,38 @@ struct dc *dc_create(const struct dc_init_data *init_params)
 	if (NULL == dc)
 		goto alloc_fail;
 
-	if (false == construct(dc, init_params))
-		goto construct_fail;
+	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
+		if (false == dc_construct_ctx(dc, init_params)) {
+			dc_destruct(dc);
+			goto construct_fail;
+		}
+	} else {
+		if (false == dc_construct(dc, init_params)) {
+			dc_destruct(dc);
+			goto construct_fail;
+		}
 
-	full_pipe_count = dc->res_pool->pipe_count;
-	if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
-		full_pipe_count--;
-	dc->caps.max_streams = min(
-			full_pipe_count,
-			dc->res_pool->stream_enc_count);
+		full_pipe_count = dc->res_pool->pipe_count;
+		if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+			full_pipe_count--;
+		dc->caps.max_streams = min(
+				full_pipe_count,
+				dc->res_pool->stream_enc_count);
 
-	dc->caps.max_links = dc->link_count;
-	dc->caps.max_audios = dc->res_pool->audio_count;
-	dc->caps.linear_pitch_alignment = 64;
+		dc->optimize_seamless_boot_streams = 0;
+		dc->caps.max_links = dc->link_count;
+		dc->caps.max_audios = dc->res_pool->audio_count;
+		dc->caps.linear_pitch_alignment = 64;
+
+		dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+
+		if (dc->res_pool->dmcu != NULL)
+			dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
+	}
 
 	/* Populate versioning information */
 	dc->versions.dc_ver = DC_VER;
 
-	if (dc->res_pool->dmcu != NULL)
-		dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
-
 	dc->build_id = DC_BUILD_ID;
 
 	DC_LOG_DC("Display Core initialized\n");
@@ -849,7 +886,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
 
 void dc_hardware_init(struct dc *dc)
 {
-	dc->hwss.init_hw(dc);
+	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
+		dc->hwss.init_hw(dc);
 }
 
 void dc_init_callbacks(struct dc *dc,
@@ -869,7 +907,7 @@ void dc_deinit_callbacks(struct dc *dc)
 
 void dc_destroy(struct dc **dc)
 {
-	destruct(*dc);
+	dc_destruct(*dc);
 	kfree(*dc);
 	*dc = NULL;
 }
@@ -1163,10 +1201,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 
 	for (i = 0; i < context->stream_count; i++) {
 		if (context->streams[i]->apply_seamless_boot_optimization)
-			dc->optimize_seamless_boot = true;
+			dc->optimize_seamless_boot_streams++;
 	}
 
-	if (!dc->optimize_seamless_boot)
+	if (dc->optimize_seamless_boot_streams == 0)
 		dc->hwss.prepare_bandwidth(dc, context);
 
 	/* re-program planes for existing stream, in case we need to
@@ -1182,10 +1220,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 				context->stream_status[i].plane_count,
 				context); /* use new pipe config in new context */
 		}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-	if (dc->hwss.program_front_end_for_ctx)
-		dc->hwss.program_front_end_for_ctx(dc, context);
-#endif
 
 	/* Program hardware */
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1204,10 +1238,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	}
 
 	/* Program all planes within new context*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	if (dc->hwss.program_front_end_for_ctx)
 		dc->hwss.program_front_end_for_ctx(dc, context);
-#endif
 	for (i = 0; i < context->stream_count; i++) {
 		const struct dc_link *link = context->streams[i]->link;
 
@@ -1245,6 +1277,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 
 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
+	if (dc->optimize_seamless_boot_streams == 0) {
+		/* Must wait for no flips to be pending before doing optimize bw */
+		wait_for_no_pipes_pending(dc, context);
+		/* pplib is notified if disp_num changed */
+		dc->hwss.optimize_bandwidth(dc, context);
+	}
+
 	for (i = 0; i < context->stream_count; i++)
 		context->streams[i]->mode_changed = false;
 
@@ -1279,12 +1318,18 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
 	return (result == DC_OK);
 }
 
+bool dc_is_hw_initialized(struct dc *dc)
+{
+	struct dc_bios *dcb = dc->ctx->dc_bios;
+	return dcb->funcs->is_accelerated_mode(dcb);
+}
+
 bool dc_post_update_surfaces_to_stream(struct dc *dc)
 {
 	int i;
 	struct dc_state *context = dc->current_state;
 
-	if (!dc->optimized_required || dc->optimize_seamless_boot)
+	if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
 		return true;
 
 	post_surface_trace(dc);
@@ -1313,7 +1358,7 @@ struct dc_state *dc_create_state(struct dc *dc)
 	 * initialize and obtain IP and SOC the base DML instance from DC is
 	 * initially copied into every context
 	 */
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
 	memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
 #endif
 
@@ -1486,11 +1531,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
 	}
 
-	if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
-		update_flags->bits.sdr_white_level = 1;
-		elevate_update_type(&update_type, UPDATE_TYPE_MED);
-	}
-
 	if (u->plane_info->dcc.enable != u->surface->dcc.enable
 			|| u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
 			|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
@@ -1508,7 +1548,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
 	}
 
 	if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
-			|| u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
 			|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
 		update_flags->bits.plane_size_change = 1;
 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
@@ -1547,7 +1586,10 @@ static enum surface_update_type get_scaling_info_update_type(
 	if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
 			|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
 			|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
-			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
+			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
+			|| u->scaling_info->scaling_quality.integer_scaling !=
+				u->surface->scaling_quality.integer_scaling
+			) {
 		update_flags->bits.scaling_change = 1;
 
 		if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
@@ -1563,7 +1605,7 @@ static enum surface_update_type get_scaling_info_update_type(
 
 		update_flags->bits.scaling_change = 1;
 		if (u->scaling_info->src_rect.width > u->surface->src_rect.width
-				&& u->scaling_info->src_rect.height > u->surface->src_rect.height)
+				|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
 			/* Making src rect bigger requires a bandwidth change */
 			update_flags->bits.clock_change = 1;
 	}
@@ -1577,11 +1619,11 @@ static enum surface_update_type get_scaling_info_update_type(
 		update_flags->bits.position_change = 1;
 
 	if (update_flags->bits.clock_change
-			|| update_flags->bits.bandwidth_change)
+			|| update_flags->bits.bandwidth_change
+			|| update_flags->bits.scaling_change)
 		return UPDATE_TYPE_FULL;
 
-	if (update_flags->bits.scaling_change
-			|| update_flags->bits.position_change)
+	if (update_flags->bits.position_change)
 		return UPDATE_TYPE_MED;
 
 	return UPDATE_TYPE_FAST;
@@ -1635,6 +1677,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
 			update_flags->bits.gamma_change = 1;
 	}
 
+	if (u->hdr_mult.value)
+		if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+			update_flags->bits.hdr_mult = 1;
+			elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+		}
+
 	if (update_flags->bits.in_transfer_func_change) {
 		type = UPDATE_TYPE_MED;
 		elevate_update_type(&overall_type, type);
@@ -1668,7 +1716,8 @@ static enum surface_update_type check_update_surfaces_for_stream(
 		union stream_update_flags *su_flags = &stream_update->stream->update_flags;
 
 		if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
-				(stream_update->dst.height != 0 && stream_update->dst.width != 0))
+			(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+			stream_update->integer_scaling_update)
 			su_flags->bits.scaling = 1;
 
 		if (stream_update->out_transfer_func)
@@ -1683,15 +1732,16 @@ static enum surface_update_type check_update_surfaces_for_stream(
 		if (stream_update->gamut_remap)
 			su_flags->bits.gamut_remap = 1;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		if (stream_update->wb_update)
 			su_flags->bits.wb_update = 1;
-#endif
 		if (su_flags->raw != 0)
 			overall_type = UPDATE_TYPE_FULL;
 
 		if (stream_update->output_csc_transform || stream_update->output_color_space)
 			su_flags->bits.out_csc = 1;
+
+		if (stream_update->dsc_config)
+			overall_type = UPDATE_TYPE_FULL;
 	}
 
 	for (i = 0 ; i < surface_count; i++) {
@@ -1817,8 +1867,6 @@ static void copy_surface_update_to_plane(
 				srf_update->plane_info->global_alpha_value;
 		surface->dcc =
 				srf_update->plane_info->dcc;
-		surface->sdr_white_level =
-				srf_update->plane_info->sdr_white_level;
 		surface->layer_index =
 				srf_update->plane_info->layer_index;
 	}
@@ -1851,7 +1899,6 @@ static void copy_surface_update_to_plane(
 			sizeof(struct dc_transfer_func_distributed_points));
 	}
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	if (srf_update->func_shaper &&
 			(surface->in_shaper_func !=
 			srf_update->func_shaper))
@@ -1864,13 +1911,16 @@ static void copy_surface_update_to_plane(
 		memcpy(surface->lut3d_func, srf_update->lut3d_func,
 		sizeof(*surface->lut3d_func));
 
+	if (srf_update->hdr_mult.value)
+		surface->hdr_mult =
+				srf_update->hdr_mult;
+
 	if (srf_update->blend_tf &&
 			(surface->blend_tf !=
 			srf_update->blend_tf))
 		memcpy(surface->blend_tf, srf_update->blend_tf,
 		sizeof(*surface->blend_tf));
 
-#endif
 	if (srf_update->input_csc_color_matrix)
 		surface->input_csc_color_matrix =
 			*srf_update->input_csc_color_matrix;
@@ -1883,8 +1933,10 @@ static void copy_surface_update_to_plane(
 static void copy_stream_update_to_stream(struct dc *dc,
 					 struct dc_state *context,
 					 struct dc_stream_state *stream,
-					 const struct dc_stream_update *update)
+					 struct dc_stream_update *update)
 {
+	struct dc_context *dc_ctx = dc->ctx;
+
 	if (update == NULL || stream == NULL)
 		return;
 
@@ -1945,7 +1997,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
 
 	if (update->dither_option)
 		stream->dither_option = *update->dither_option;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	/* update current stream with writeback info */
 	if (update->wb_update) {
 		int i;
@@ -1956,23 +2007,32 @@ static void copy_stream_update_to_stream(struct dc *dc,
 			stream->writeback_info[i] =
 				update->wb_update->writeback_info[i];
 	}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
 	if (update->dsc_config) {
 		struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
 		uint32_t old_dsc_enabled = stream->timing.flags.DSC;
 		uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
 				       update->dsc_config->num_slices_v != 0);
 
-		stream->timing.dsc_cfg = *update->dsc_config;
-		stream->timing.flags.DSC = enable_dsc;
-		if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
-							     true)) {
-			stream->timing.dsc_cfg = old_dsc_cfg;
-			stream->timing.flags.DSC = old_dsc_enabled;
+		/* Use temporarry context for validating new DSC config */
+		struct dc_state *dsc_validate_context = dc_create_state(dc);
+
+		if (dsc_validate_context) {
+			dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
+
+			stream->timing.dsc_cfg = *update->dsc_config;
+			stream->timing.flags.DSC = enable_dsc;
+			if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+				stream->timing.dsc_cfg = old_dsc_cfg;
+				stream->timing.flags.DSC = old_dsc_enabled;
+				update->dsc_config = NULL;
+			}
+
+			dc_release_state(dsc_validate_context);
+		} else {
+			DC_ERROR("Failed to allocate new validate context for DSC change\n");
+			update->dsc_config = NULL;
 		}
 	}
-#endif
 }
 
 static void commit_planes_do_stream_update(struct dc *dc,
@@ -1992,11 +2052,11 @@ static void commit_planes_do_stream_update(struct dc *dc,
 
 			if (stream_update->periodic_interrupt0 &&
 					dc->hwss.setup_periodic_interrupt)
-				dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
+				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
 
 			if (stream_update->periodic_interrupt1 &&
 					dc->hwss.setup_periodic_interrupt)
-				dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
+				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
 
 			if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
 					stream_update->vrr_infopacket ||
@@ -2006,6 +2066,12 @@ static void commit_planes_do_stream_update(struct dc *dc,
 				dc->hwss.update_info_frame(pipe_ctx);
 			}
 
+			if (stream_update->hdr_static_metadata &&
+					stream->use_dynamic_meta &&
+					dc->hwss.set_dmdata_attributes &&
+					pipe_ctx->stream->dmdata_address.quad_part != 0)
+				dc->hwss.set_dmdata_attributes(pipe_ctx);
+
 			if (stream_update->gamut_remap)
 				dc_stream_set_gamut_remap(dc, stream);
 
@@ -2013,31 +2079,25 @@ static void commit_planes_do_stream_update(struct dc *dc,
 				dc_stream_program_csc_matrix(dc, stream);
 
 			if (stream_update->dither_option) {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 				struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
-#endif
 				resource_build_bit_depth_reduction_params(pipe_ctx->stream,
 									&pipe_ctx->stream->bit_depth_params);
 				pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
 						&stream->bit_depth_params,
 						&stream->clamping);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 				while (odm_pipe) {
 					odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
 							&stream->bit_depth_params,
 							&stream->clamping);
 					odm_pipe = odm_pipe->next_odm_pipe;
 				}
-#endif
 			}
 
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
 			if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
 				dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
 				dp_update_dsc_config(pipe_ctx);
 				dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
 			}
-#endif
 			/* Full fe update*/
 			if (update_type == UPDATE_TYPE_FAST)
 				continue;
@@ -2053,7 +2113,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
 
 					dc->hwss.optimize_bandwidth(dc, dc->current_state);
 				} else {
-					if (!dc->optimize_seamless_boot)
+					if (dc->optimize_seamless_boot_streams == 0)
 						dc->hwss.prepare_bandwidth(dc, dc->current_state);
 
 					core_link_enable_stream(dc->current_state, pipe_ctx);
@@ -2094,7 +2154,7 @@ static void commit_planes_for_stream(struct dc *dc,
 	int i, j;
 	struct pipe_ctx *top_pipe_to_program = NULL;
 
-	if (dc->optimize_seamless_boot && surface_count > 0) {
+	if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
 		/* Optimize seamless boot flag keeps clocks and watermarks high until
 		 * first flip. After first flip, optimization is required to lower
 		 * bandwidth. Important to note that it is expected UEFI will
@@ -2103,12 +2163,14 @@ static void commit_planes_for_stream(struct dc *dc,
 		 */
 		if (stream->apply_seamless_boot_optimization) {
 			stream->apply_seamless_boot_optimization = false;
-			dc->optimize_seamless_boot = false;
-			dc->optimized_required = true;
+			dc->optimize_seamless_boot_streams--;
+
+			if (dc->optimize_seamless_boot_streams == 0)
+				dc->optimized_required = true;
 		}
 	}
 
-	if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
+	if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
 		dc->hwss.prepare_bandwidth(dc, context);
 		context_clock_trace(dc, context);
 	}
@@ -2124,15 +2186,12 @@ static void commit_planes_for_stream(struct dc *dc,
 		 */
 		if (dc->hwss.apply_ctx_for_surface)
 			dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		if (dc->hwss.program_front_end_for_ctx)
 			dc->hwss.program_front_end_for_ctx(dc, context);
-#endif
 
 		return;
 	}
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
 		for (i = 0; i < surface_count; i++) {
 			struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2154,7 +2213,6 @@ static void commit_planes_for_stream(struct dc *dc,
 			}
 		}
 	}
-#endif
 
 	// Update Type FULL, Surface updates
 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -2175,7 +2233,6 @@ static void commit_planes_for_stream(struct dc *dc,
 			if (update_type == UPDATE_TYPE_FAST)
 				continue;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 			ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
 
 			if (dc->hwss.program_triplebuffer != NULL &&
@@ -2184,7 +2241,6 @@ static void commit_planes_for_stream(struct dc *dc,
 				dc->hwss.program_triplebuffer(
 					dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
 			}
-#endif
 			stream_status =
 				stream_get_status(context, pipe_ctx->stream);
 
@@ -2193,10 +2249,24 @@ static void commit_planes_for_stream(struct dc *dc,
 					dc, pipe_ctx->stream, stream_status->plane_count, context);
 		}
 	}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-	if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
+	if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
 		dc->hwss.program_front_end_for_ctx(dc, context);
+#ifdef CONFIG_DRM_AMD_DC_DCN
+		if (dc->debug.validate_dml_output) {
+			for (i = 0; i < dc->res_pool->pipe_count; i++) {
+				struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
+				if (cur_pipe.stream == NULL)
+					continue;
+
+				cur_pipe.plane_res.hubp->funcs->validate_dml_output(
+						cur_pipe.plane_res.hubp, dc->ctx,
+						&context->res_ctx.pipe_ctx[i].rq_regs,
+						&context->res_ctx.pipe_ctx[i].dlg_regs,
+						&context->res_ctx.pipe_ctx[i].ttu_regs);
+			}
+		}
 #endif
+	}
 
 	// Update Type FAST, Surface updates
 	if (update_type == UPDATE_TYPE_FAST) {
@@ -2206,7 +2276,6 @@ static void commit_planes_for_stream(struct dc *dc,
 		 */
 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		if (dc->hwss.set_flip_control_gsl)
 			for (i = 0; i < surface_count; i++) {
 				struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2225,7 +2294,6 @@ static void commit_planes_for_stream(struct dc *dc,
 							plane_state->flip_immediate);
 				}
 			}
-#endif
 		/* Perform requested Updates */
 		for (i = 0; i < surface_count; i++) {
 			struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2238,7 +2306,6 @@ static void commit_planes_for_stream(struct dc *dc,
 
 				if (pipe_ctx->plane_state != plane_state)
 					continue;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 				/*program triple buffer after lock based on flip type*/
 				if (dc->hwss.program_triplebuffer != NULL &&
 					!dc->debug.disable_tri_buf) {
@@ -2246,7 +2313,6 @@ static void commit_planes_for_stream(struct dc *dc,
 					dc->hwss.program_triplebuffer(
 						dc, pipe_ctx, plane_state->triplebuffer_flips);
 				}
-#endif
 				if (srf_updates[i].flip_addr)
 					dc->hwss.update_plane_addr(dc, pipe_ctx);
 			}
@@ -2396,25 +2462,21 @@ void dc_set_power_state(
 	enum dc_acpi_cm_power_state power_state)
 {
 	struct kref refcount;
-	struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
-						GFP_KERNEL);
-
-	ASSERT(dml);
-	if (!dml)
-		return;
+	struct display_mode_lib *dml;
 
 	switch (power_state) {
 	case DC_ACPI_CM_POWER_STATE_D0:
 		dc_resource_state_construct(dc, dc->current_state);
 
+		if (dc->ctx->dmub_srv)
+			dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
+
 		dc->hwss.init_hw(dc);
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 		if (dc->hwss.init_sys_ctx != NULL &&
 			dc->vm_pa_config.valid) {
 			dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
 		}
-#endif
 
 		break;
 	default:
@@ -2423,6 +2485,12 @@ void dc_set_power_state(
 		 * clean state, and dc hw programming optimizations will not
 		 * cause any trouble.
 		 */
+		dml = kzalloc(sizeof(struct display_mode_lib),
+				GFP_KERNEL);
+
+		ASSERT(dml);
+		if (!dml)
+			return;
 
 		/* Preserve refcount */
 		refcount = dc->current_state->refcount;
@@ -2436,10 +2504,10 @@ void dc_set_power_state(
 		dc->current_state->refcount = refcount;
 		dc->current_state->bw_ctx.dml = *dml;
 
+		kfree(dml);
+
 		break;
 	}
-
-	kfree(dml);
 }
 
 void dc_resume(struct dc *dc)
@@ -2494,6 +2562,17 @@ bool dc_submit_i2c(
 		cmd);
 }
 
+bool dc_submit_i2c_oem(
+		struct dc *dc,
+		struct i2c_command *cmd)
+{
+	struct ddc_service *ddc = dc->res_pool->oem_device;
+	return dce_i2c_submit_command(
+		dc->res_pool,
+		ddc->ddc_pin,
+		cmd);
+}
+
 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
 {
 	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index b9227d5..502ed3c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -33,7 +33,6 @@
 
 #include "core_status.h"
 #include "core_types.h"
-#include "hw_sequencer.h"
 
 #include "resource.h"
 
@@ -310,14 +309,13 @@ void context_timing_trace(
 		struct resource_context *res_ctx)
 {
 	int i;
-	struct dc  *core_dc = dc;
 	int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
 	struct crtc_position position;
-	unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+	unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
 	DC_LOGGER_INIT(dc->ctx->logger);
 
 
-	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 		/* get_position() returns CRTC vertical/horizontal counter
 		 * hence not applicable for underlay pipe
@@ -329,7 +327,7 @@ void context_timing_trace(
 		h_pos[i] = position.horizontal_count;
 		v_pos[i] = position.vertical_count;
 	}
-	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 
 		if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
@@ -347,7 +345,7 @@ void context_clock_trace(
 		struct dc *dc,
 		struct dc_state *context)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	DC_LOGGER_INIT(dc->ctx->logger);
 	CLOCK_TRACE("Current: dispclk_khz:%d  max_dppclk_khz:%d  dcfclk_khz:%d\n"
 			"dcfclk_deep_sleep_khz:%d  fclk_khz:%d  socclk_khz:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4619f94..a09119c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,6 +45,7 @@
 #include "dpcd_defs.h"
 #include "dmcu.h"
 #include "hw/clk_mgr.h"
+#include "../dce/dmub_psr.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -74,7 +75,7 @@ enum {
 /*******************************************************************************
  * Private functions
  ******************************************************************************/
-static void destruct(struct dc_link *link)
+static void dc_link_destruct(struct dc_link *link)
 {
 	int i;
 
@@ -850,18 +851,12 @@ static bool dc_link_detect_helper(struct dc_link *link,
 				if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
 					same_dpcd = false;
 			}
-			/* Active dongle plug in without display or downstream unplug*/
+			/* Active dongle downstream unplug*/
 			if (link->type == dc_connection_active_dongle &&
 				link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
-				if (prev_sink != NULL) {
+				if (prev_sink != NULL)
 					/* Downstream unplug */
 					dc_sink_release(prev_sink);
-				} else {
-					/* Empty dongle plug in */
-					dp_verify_link_cap_with_retries(link,
-							&link->reported_link_cap,
-							LINK_TRAINING_MAX_VERIFY_RETRY);
-				}
 				return true;
 			}
 
@@ -968,8 +963,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
 			same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
 
 		if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
-			sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
-			reason != DETECT_REASON_HPDRX) {
+			sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
 			/*
 			 * TODO debug why Dell 2413 doesn't like
 			 *  two link trainings
@@ -1244,7 +1238,7 @@ static enum transmitter translate_encoder_to_transmitter(
 	}
 }
 
-static bool construct(
+static bool dc_link_construct(
 	struct dc_link *link,
 	const struct link_init_data *init_params)
 {
@@ -1446,7 +1440,7 @@ struct dc_link *link_create(const struct link_init_data *init_params)
 	if (NULL == link)
 		goto alloc_fail;
 
-	if (false == construct(link, init_params))
+	if (false == dc_link_construct(link, init_params))
 		goto construct_fail;
 
 	return link;
@@ -1460,7 +1454,7 @@ struct dc_link *link_create(const struct link_init_data *init_params)
 
 void link_destroy(struct dc_link **link)
 {
-	destruct(*link);
+	dc_link_destruct(*link);
 	kfree(*link);
 	*link = NULL;
 }
@@ -1495,10 +1489,7 @@ static enum dc_status enable_link_dp(
 	bool skip_video_pattern;
 	struct dc_link *link = stream->link;
 	struct dc_link_settings link_settings = {0};
-	enum dp_panel_mode panel_mode;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	bool fec_enable;
-#endif
 	int i;
 	bool apply_seamless_boot_optimization = false;
 
@@ -1514,15 +1505,6 @@ static enum dc_status enable_link_dp(
 	decide_link_settings(stream, &link_settings);
 
 	if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
-		/* If link settings are different than current and link already enabled
-		 * then need to disable before programming to new rate.
-		 */
-		if (link->link_status.link_active &&
-			(link->cur_link_settings.lane_count != link_settings.lane_count ||
-			 link->cur_link_settings.link_rate != link_settings.link_rate)) {
-			dp_disable_link_phy(link, pipe_ctx->stream->signal);
-		}
-
 		/*in case it is not on*/
 		link->dc->hwss.edp_power_control(link, true);
 		link->dc->hwss.edp_wait_for_hpd_ready(link, true);
@@ -1533,50 +1515,29 @@ static enum dc_status enable_link_dp(
 	if (state->clk_mgr && !apply_seamless_boot_optimization)
 		state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
 
-	dp_enable_link_phy(
-		link,
-		pipe_ctx->stream->signal,
-		pipe_ctx->clock_source->id,
-		&link_settings);
-
-	if (stream->sink_patches.dppowerup_delay > 0) {
-		int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
-
-		msleep(delay_dp_power_up_in_ms);
-	}
-
-	panel_mode = dp_get_panel_mode(link);
-	dp_set_panel_mode(link, panel_mode);
-
 	skip_video_pattern = true;
 
 	if (link_settings.link_rate == LINK_RATE_LOW)
 			skip_video_pattern = false;
 
-	if (link->aux_access_disabled) {
-		dc_link_dp_perform_link_training_skip_aux(link, &link_settings);
-
-		link->cur_link_settings = link_settings;
-		status = DC_OK;
-	} else if (perform_link_training_with_retries(
-			link,
+	if (perform_link_training_with_retries(
 			&link_settings,
 			skip_video_pattern,
-			LINK_TRAINING_ATTEMPTS)) {
+			LINK_TRAINING_ATTEMPTS,
+			pipe_ctx,
+			pipe_ctx->stream->signal)) {
 		link->cur_link_settings = link_settings;
 		status = DC_OK;
 	}
 	else
 		status = DC_FAIL_DP_LINK_TRAINING;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	if (link->preferred_training_settings.fec_enable != NULL)
 		fec_enable = *link->preferred_training_settings.fec_enable;
 	else
 		fec_enable = true;
 
 	dp_set_fec_enable(link, fec_enable);
-#endif
 	return status;
 }
 
@@ -2063,6 +2024,45 @@ static void write_i2c_redriver_setting(
 		ASSERT(i2c_success);
 }
 
+static void disable_link(struct dc_link *link, enum signal_type signal)
+{
+	/*
+	 * TODO: implement call for dp_set_hw_test_pattern
+	 * it is needed for compliance testing
+	 */
+
+	/* Here we need to specify that encoder output settings
+	 * need to be calculated as for the set mode,
+	 * it will lead to querying dynamic link capabilities
+	 * which should be done before enable output
+	 */
+
+	if (dc_is_dp_signal(signal)) {
+		/* SST DP, eDP */
+		if (dc_is_dp_sst_signal(signal))
+			dp_disable_link_phy(link, signal);
+		else
+			dp_disable_link_phy_mst(link, signal);
+
+		if (dc_is_dp_sst_signal(signal) ||
+				link->mst_stream_alloc_table.stream_count == 0) {
+			dp_set_fec_enable(link, false);
+			dp_set_fec_ready(link, false);
+		}
+	} else {
+		if (signal != SIGNAL_TYPE_VIRTUAL)
+			link->link_enc->funcs->disable_output(link->link_enc, signal);
+	}
+
+	if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+		/* MST disable link only when no stream use the link */
+		if (link->mst_stream_alloc_table.stream_count <= 0)
+			link->link_status.link_active = false;
+	} else {
+		link->link_status.link_active = false;
+	}
+}
+
 static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_stream_state *stream = pipe_ctx->stream;
@@ -2147,6 +2147,19 @@ static enum dc_status enable_link(
 		struct pipe_ctx *pipe_ctx)
 {
 	enum dc_status status = DC_ERROR_UNEXPECTED;
+	struct dc_stream_state *stream = pipe_ctx->stream;
+	struct dc_link *link = stream->link;
+
+	/* There's some scenarios where driver is unloaded with display
+	 * still enabled. When driver is reloaded, it may cause a display
+	 * to not light up if there is a mismatch between old and new
+	 * link settings. Need to call disable first before enabling at
+	 * new link settings.
+	 */
+	if (link->link_status.link_active) {
+		disable_link(link, pipe_ctx->stream->signal);
+	}
+
 	switch (pipe_ctx->stream->signal) {
 	case SIGNAL_TYPE_DISPLAY_PORT:
 		status = enable_link_dp(state, pipe_ctx);
@@ -2181,46 +2194,6 @@ static enum dc_status enable_link(
 	return status;
 }
 
-static void disable_link(struct dc_link *link, enum signal_type signal)
-{
-	/*
-	 * TODO: implement call for dp_set_hw_test_pattern
-	 * it is needed for compliance testing
-	 */
-
-	/* here we need to specify that encoder output settings
-	 * need to be calculated as for the set mode,
-	 * it will lead to querying dynamic link capabilities
-	 * which should be done before enable output */
-
-	if (dc_is_dp_signal(signal)) {
-		/* SST DP, eDP */
-		if (dc_is_dp_sst_signal(signal))
-			dp_disable_link_phy(link, signal);
-		else
-			dp_disable_link_phy_mst(link, signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-
-		if (dc_is_dp_sst_signal(signal) ||
-				link->mst_stream_alloc_table.stream_count == 0) {
-			dp_set_fec_enable(link, false);
-			dp_set_fec_ready(link, false);
-		}
-#endif
-	} else {
-		if (signal != SIGNAL_TYPE_VIRTUAL)
-			link->link_enc->funcs->disable_output(link->link_enc, signal);
-	}
-
-	if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
-		/* MST disable link only when no stream use the link */
-		if (link->mst_stream_alloc_table.stream_count <= 0)
-			link->link_status.link_active = false;
-	} else {
-		link->link_status.link_active = false;
-	}
-}
-
 static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
 {
 
@@ -2357,9 +2330,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
 		uint32_t backlight_pwm_u16_16,
 		uint32_t frame_ramp)
 {
-	struct dc  *core_dc = link->ctx->dc;
-	struct abm *abm = core_dc->res_pool->abm;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc  *dc = link->ctx->dc;
+	struct abm *abm = dc->res_pool->abm;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
 	unsigned int controller_id = 0;
 	bool use_smooth_brightness = true;
 	int i;
@@ -2377,22 +2350,22 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
 
 	if (dc_is_embedded_signal(link->connector_signal)) {
 		for (i = 0; i < MAX_PIPES; i++) {
-			if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
-				if (core_dc->current_state->res_ctx.
+			if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+				if (dc->current_state->res_ctx.
 						pipe_ctx[i].stream->link
 						== link) {
 					/* DMCU -1 for all controller id values,
 					 * therefore +1 here
 					 */
 					controller_id =
-						core_dc->current_state->
+						dc->current_state->
 						res_ctx.pipe_ctx[i].stream_res.tg->inst +
 						1;
 
 					/* Disable brightness ramping when the display is blanked
 					 * as it can hang the DMCU
 					 */
-					if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
+					if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
 						frame_ramp = 0;
 				}
 			}
@@ -2410,8 +2383,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
 
 bool dc_link_set_abm_disable(const struct dc_link *link)
 {
-	struct dc  *core_dc = link->ctx->dc;
-	struct abm *abm = core_dc->res_pool->abm;
+	struct dc  *dc = link->ctx->dc;
+	struct abm *abm = dc->res_pool->abm;
 
 	if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
 		return false;
@@ -2423,12 +2396,13 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
 
 bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
 {
-	struct dc  *core_dc = link->ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc  *dc = link->ctx->dc;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
+	struct dmub_psr *psr = dc->res_pool->psr;
 
-
-
-	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+	if ((psr != NULL) && link->psr_feature_enabled)
+		psr->funcs->set_psr_enable(psr, allow_active);
+	else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
 		dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
 
 	link->psr_allow_active = allow_active;
@@ -2438,10 +2412,13 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
 
 bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
 {
-	struct dc  *core_dc = link->ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc  *dc = link->ctx->dc;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
+	struct dmub_psr *psr = dc->res_pool->psr;
 
-	if (dmcu != NULL && link->psr_feature_enabled)
+	if (psr != NULL && link->psr_feature_enabled)
+		psr->funcs->get_psr_state(psr_state);
+	else if (dmcu != NULL && link->psr_feature_enabled)
 		dmcu->funcs->get_psr_state(dmcu, psr_state);
 
 	return true;
@@ -2486,8 +2463,9 @@ bool dc_link_setup_psr(struct dc_link *link,
 		const struct dc_stream_state *stream, struct psr_config *psr_config,
 		struct psr_context *psr_context)
 {
-	struct dc *core_dc;
+	struct dc *dc;
 	struct dmcu *dmcu;
+	struct dmub_psr *psr;
 	int i;
 	/* updateSinkPsrDpcdConfig*/
 	union dpcd_psr_configuration psr_configuration;
@@ -2497,10 +2475,11 @@ bool dc_link_setup_psr(struct dc_link *link,
 	if (!link)
 		return false;
 
-	core_dc = link->ctx->dc;
-	dmcu = core_dc->res_pool->dmcu;
+	dc = link->ctx->dc;
+	dmcu = dc->res_pool->dmcu;
+	psr = dc->res_pool->psr;
 
-	if (!dmcu)
+	if (!dmcu && !psr)
 		return false;
 
 
@@ -2537,13 +2516,13 @@ bool dc_link_setup_psr(struct dc_link *link,
 	psr_context->engineId = link->link_enc->preferred_engine;
 
 	for (i = 0; i < MAX_PIPES; i++) {
-		if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+		if (dc->current_state->res_ctx.pipe_ctx[i].stream
 				== stream) {
 			/* dmcu -1 for all controller id values,
 			 * therefore +1 here
 			 */
 			psr_context->controllerId =
-				core_dc->current_state->res_ctx.
+				dc->current_state->res_ctx.
 				pipe_ctx[i].stream_res.tg->inst + 1;
 			break;
 		}
@@ -2556,7 +2535,7 @@ bool dc_link_setup_psr(struct dc_link *link,
 		transmitter_to_phy_id(link->link_enc->transmitter);
 
 	psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
-	psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+	psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->
 					timing.pix_clk_100hz * 100),
 					stream->timing.v_total),
 					stream->timing.h_total);
@@ -2586,7 +2565,7 @@ bool dc_link_setup_psr(struct dc_link *link,
 
 	psr_context->psr_level.u32all = 0;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	/*skip power down the single pipe since it blocks the cstate*/
 	if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
 		psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
@@ -2609,7 +2588,10 @@ bool dc_link_setup_psr(struct dc_link *link,
 	 */
 	psr_context->frame_delay = 0;
 
-	link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+	if (psr)
+		link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+	else
+		link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
 
 	/* psr_enabled == 0 indicates setup_psr did not succeed, but this
 	 * should not happen since firmware should be running at this point
@@ -2644,28 +2626,13 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
 	return dc_fixpt_div_int(mbytes_per_sec, 54);
 }
 
-static int get_color_depth(enum dc_color_depth color_depth)
-{
-	switch (color_depth) {
-	case COLOR_DEPTH_666: return 6;
-	case COLOR_DEPTH_888: return 8;
-	case COLOR_DEPTH_101010: return 10;
-	case COLOR_DEPTH_121212: return 12;
-	case COLOR_DEPTH_141414: return 14;
-	case COLOR_DEPTH_161616: return 16;
-	default: return 0;
-	}
-}
-
 static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
 {
-	uint32_t bpc;
 	uint64_t kbps;
 	struct fixed31_32 peak_kbps;
 	uint32_t numerator;
 	uint32_t denominator;
 
-	bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
 	kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
 
 	/*
@@ -2899,6 +2866,52 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
 
 	return DC_OK;
 }
+
+enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
+{
+	int i;
+	struct pipe_ctx *pipe_ctx;
+
+	// Clear all of MST payload then reallocate
+	for (i = 0; i < MAX_PIPES; i++) {
+		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+
+		/* driver enable split pipe for external monitors
+		 * we have to check pipe_ctx is split pipe or not
+		 * If it's split pipe, driver using top pipe to
+		 * reaallocate.
+		 */
+		if (!pipe_ctx || pipe_ctx->top_pipe)
+			continue;
+
+		if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
+				pipe_ctx->stream->dpms_off == false &&
+				pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+			deallocate_mst_payload(pipe_ctx);
+		}
+	}
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+
+		if (!pipe_ctx || pipe_ctx->top_pipe)
+			continue;
+
+		if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
+				pipe_ctx->stream->dpms_off == false &&
+				pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+			/* enable/disable PHY will clear connection between BE and FE
+			 * need to restore it.
+			 */
+			link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+									pipe_ctx->stream_res.stream_enc->id, true);
+			dc_link_allocate_mst_payload(pipe_ctx);
+		}
+	}
+
+	return DC_OK;
+}
+
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
 static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
 {
@@ -2922,12 +2935,12 @@ void core_link_enable_stream(
 		struct dc_state *state,
 		struct pipe_ctx *pipe_ctx)
 {
-	struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+	struct dc *dc = pipe_ctx->stream->ctx->dc;
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	enum dc_status status;
 	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
 			dc_is_virtual_signal(pipe_ctx->stream->signal))
 		return;
 
@@ -2946,6 +2959,7 @@ void core_link_enable_stream(
 			pipe_ctx->stream_res.stream_enc,
 			&stream->timing,
 			stream->output_color_space,
+			stream->use_vsc_sdp_for_colorimetry,
 			stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
 
 	if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
@@ -2969,14 +2983,14 @@ void core_link_enable_stream(
 			pipe_ctx->stream_res.stream_enc,
 			&stream->timing);
 
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 		bool apply_edp_fast_boot_optimization =
 			pipe_ctx->stream->apply_edp_fast_boot_optimization;
 
 		pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
 
 		resource_build_info_frame(pipe_ctx);
-		core_dc->hwss.update_info_frame(pipe_ctx);
+		dc->hwss.update_info_frame(pipe_ctx);
 
 		/* Do not touch link on seamless boot optimization. */
 		if (pipe_ctx->stream->apply_seamless_boot_optimization) {
@@ -3019,7 +3033,7 @@ void core_link_enable_stream(
 			}
 		}
 
-		core_dc->hwss.enable_audio_stream(pipe_ctx);
+		dc->hwss.enable_audio_stream(pipe_ctx);
 
 		/* turn off otg test pattern if enable */
 		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
@@ -3027,28 +3041,24 @@ void core_link_enable_stream(
 					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
 					COLOR_DEPTH_UNDEFINED);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		if (pipe_ctx->stream->timing.flags.DSC) {
 			if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
 					dc_is_virtual_signal(pipe_ctx->stream->signal))
 				dp_set_dsc_enable(pipe_ctx, true);
 		}
-#endif
-		core_dc->hwss.enable_stream(pipe_ctx);
+		dc->hwss.enable_stream(pipe_ctx);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		/* Set DPS PPS SDP (AKA "info frames") */
 		if (pipe_ctx->stream->timing.flags.DSC) {
 			if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
 					dc_is_virtual_signal(pipe_ctx->stream->signal))
 				dp_set_dsc_pps_sdp(pipe_ctx, true);
 		}
-#endif
 
 		if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
 			dc_link_allocate_mst_payload(pipe_ctx);
 
-		core_dc->hwss.unblank_stream(pipe_ctx,
+		dc->hwss.unblank_stream(pipe_ctx,
 			&pipe_ctx->stream->link->cur_link_settings);
 
 		if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -3056,24 +3066,21 @@ void core_link_enable_stream(
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
 		update_psp_stream_config(pipe_ctx, false);
 #endif
-	}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-	else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
+	} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
 		if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
 				dc_is_virtual_signal(pipe_ctx->stream->signal))
 			dp_set_dsc_enable(pipe_ctx, true);
 
 	}
-#endif
 }
 
 void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
 {
-	struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+	struct dc  *dc = pipe_ctx->stream->ctx->dc;
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct dc_link *link = stream->sink->link;
 
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
 			dc_is_virtual_signal(pipe_ctx->stream->signal))
 		return;
 
@@ -3081,7 +3088,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
 	update_psp_stream_config(pipe_ctx, true);
 #endif
 
-	core_dc->hwss.blank_stream(pipe_ctx);
+	dc->hwss.blank_stream(pipe_ctx);
 
 	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
 		deallocate_mst_payload(pipe_ctx);
@@ -3110,25 +3117,23 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
 			write_i2c_redriver_setting(pipe_ctx, false);
 		}
 	}
-	core_dc->hwss.disable_stream(pipe_ctx);
+	dc->hwss.disable_stream(pipe_ctx);
 
 	disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	if (pipe_ctx->stream->timing.flags.DSC) {
 		if (dc_is_dp_signal(pipe_ctx->stream->signal))
 			dp_set_dsc_enable(pipe_ctx, false);
 	}
-#endif
 }
 
 void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
 {
-	struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+	struct dc  *dc = pipe_ctx->stream->ctx->dc;
 
 	if (!dc_is_hdmi_signal(pipe_ctx->stream->signal))
 		return;
 
-	core_dc->hwss.set_avmute(pipe_ctx, enable);
+	dc->hwss.set_avmute(pipe_ctx, enable);
 }
 
 /**
@@ -3186,13 +3191,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
 	uint32_t bits_per_channel = 0;
 	uint32_t kbps;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	if (timing->flags.DSC) {
 		kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
 		kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
 		return kbps;
 	}
-#endif
 
 	switch (timing->display_color_depth) {
 	case COLOR_DEPTH_666:
@@ -3345,6 +3348,7 @@ void dc_link_disable_hpd(const struct dc_link *link)
 
 void dc_link_set_test_pattern(struct dc_link *link,
 			      enum dp_test_pattern test_pattern,
+			      enum dp_test_pattern_color_space test_pattern_color_space,
 			      const struct link_training_settings *p_link_settings,
 			      const unsigned char *p_custom_pattern,
 			      unsigned int cust_pattern_size)
@@ -3353,6 +3357,7 @@ void dc_link_set_test_pattern(struct dc_link *link,
 		dc_link_dp_set_test_pattern(
 			link,
 			test_pattern,
+			test_pattern_color_space,
 			p_link_settings,
 			p_custom_pattern,
 			cust_pattern_size);
@@ -3368,7 +3373,6 @@ uint32_t dc_link_bandwidth_kbps(
 	link_bw_kbps *= 8;   /* 8 bits per byte*/
 	link_bw_kbps *= link_setting->lane_count;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
 		/* Account for FEC overhead.
 		 * We have to do it based on caps,
@@ -3393,7 +3397,6 @@ uint32_t dc_link_bandwidth_kbps(
 		link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
 					       link_bw_kbps, 32);
 	}
-#endif
 
 	return link_bw_kbps;
 
@@ -3407,3 +3410,10 @@ const struct dc_link_settings *dc_link_get_link_cap(
 		return &link->preferred_link_setting;
 	return &link->verified_link_cap;
 }
+
+void dc_link_overwrite_extended_receiver_cap(
+		struct dc_link *link)
+{
+	dp_overwrite_extended_receiver_cap(link);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 8178919..a49c10d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -187,7 +187,7 @@ void dal_ddc_i2c_payloads_add(
 
 }
 
-static void construct(
+static void ddc_service_construct(
 	struct ddc_service *ddc_service,
 	struct ddc_service_init_data *init_data)
 {
@@ -206,7 +206,10 @@ static void construct(
 		ddc_service->ddc_pin = NULL;
 	} else {
 		hw_info.ddc_channel = i2c_info.i2c_line;
-		hw_info.hw_supported = i2c_info.i2c_hw_assist;
+		if (ddc_service->link != NULL)
+			hw_info.hw_supported = i2c_info.i2c_hw_assist;
+		else
+			hw_info.hw_supported = false;
 
 		ddc_service->ddc_pin = dal_gpio_create_ddc(
 			gpio_service,
@@ -236,11 +239,11 @@ struct ddc_service *dal_ddc_service_create(
 	if (!ddc_service)
 		return NULL;
 
-	construct(ddc_service, init_data);
+	ddc_service_construct(ddc_service, init_data);
 	return ddc_service;
 }
 
-static void destruct(struct ddc_service *ddc)
+static void ddc_service_destruct(struct ddc_service *ddc)
 {
 	if (ddc->ddc_pin)
 		dal_gpio_destroy_ddc(&ddc->ddc_pin);
@@ -252,7 +255,7 @@ void dal_ddc_service_destroy(struct ddc_service **ddc)
 		BREAK_TO_DEBUGGER();
 		return;
 	}
-	destruct(*ddc);
+	ddc_service_destruct(*ddc);
 	kfree(*ddc);
 	*ddc = NULL;
 }
@@ -587,7 +590,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
 		struct aux_payload *payload)
 {
 	uint32_t retrieved = 0;
-	bool ret = 0;
+	bool ret = false;
 
 	if (!ddc)
 		return false;
@@ -647,17 +650,16 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
 }
 
 
-enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
 		uint32_t timeout)
 {
-	enum dc_status status = DC_OK;
+	uint32_t prev_timeout = 0;
 	struct ddc *ddc_pin = ddc->ddc_pin;
 
-	if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL)
-		return DC_ERROR_UNEXPECTED;
-	if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout))
-		status = DC_ERROR_UNEXPECTED;
-	return status;
+	if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout)
+		prev_timeout =
+				ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
+	return prev_timeout;
 }
 
 /*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 504055f..cb731c1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4,12 +4,8 @@
 #include "dc_link_dp.h"
 #include "dm_helpers.h"
 #include "opp.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #include "dsc.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "resource.h"
-#endif
 
 #include "inc/core_types.h"
 #include "link_hwss.h"
@@ -21,6 +17,9 @@
 #define DC_LOGGER \
 	link->ctx->logger
 
+
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE   0x50
+
 /* maximum pre emphasis level allowed for each voltage swing level*/
 static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
 		PRE_EMPHASIS_LEVEL3,
@@ -221,19 +220,31 @@ static enum dpcd_training_patterns
 	return dpcd_tr_pattern;
 }
 
+static inline bool is_repeater(struct dc_link *link, uint32_t offset)
+{
+	return (!link->is_lttpr_mode_transparent && offset != 0);
+}
+
 static void dpcd_set_lt_pattern_and_lane_settings(
 	struct dc_link *link,
 	const struct link_training_settings *lt_settings,
-	enum dc_dp_training_pattern pattern)
+	enum dc_dp_training_pattern pattern,
+	uint32_t offset)
 {
 	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
-	const uint32_t dpcd_base_lt_offset =
-	DP_TRAINING_PATTERN_SET;
+
+	uint32_t dpcd_base_lt_offset;
+
 	uint8_t dpcd_lt_buffer[5] = {0};
 	union dpcd_training_pattern dpcd_pattern = { {0} };
 	uint32_t lane;
 	uint32_t size_in_bytes;
 	bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+	dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
+
+	if (is_repeater(link, offset))
+		dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+			((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
 
 	/*****************************************************************
 	* DpcdAddress_TrainingPatternSet
@@ -241,14 +252,21 @@ static void dpcd_set_lt_pattern_and_lane_settings(
 	dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
 		dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
 
-	dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+	dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
 		= dpcd_pattern.raw;
 
-	DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
-		__func__,
-		DP_TRAINING_PATTERN_SET,
-		dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
-
+	if (is_repeater(link, offset)) {
+		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
+			__func__,
+			offset,
+			dpcd_base_lt_offset,
+			dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+	} else {
+		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
+			__func__,
+			dpcd_base_lt_offset,
+			dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+	}
 	/*****************************************************************
 	* DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
 	*****************************************************************/
@@ -268,24 +286,35 @@ static void dpcd_set_lt_pattern_and_lane_settings(
 		PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
 	}
 
-	/* concatinate everything into one buffer*/
+	/* concatenate everything into one buffer*/
 
 	size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
 
 	 // 0x00103 - 0x00102
 	memmove(
-		&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset],
+		&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
 		dpcd_lane,
 		size_in_bytes);
 
-	DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
-		__func__,
-		DP_TRAINING_LANE0_SET,
-		dpcd_lane[0].bits.VOLTAGE_SWING_SET,
-		dpcd_lane[0].bits.PRE_EMPHASIS_SET,
-		dpcd_lane[0].bits.MAX_SWING_REACHED,
-		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
-
+	if (is_repeater(link, offset)) {
+		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+				" 0x%X VS set = %x PE set = %x max VS Reached = %x  max PE Reached = %x\n",
+			__func__,
+			offset,
+			dpcd_base_lt_offset,
+			dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+			dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+			dpcd_lane[0].bits.MAX_SWING_REACHED,
+			dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+	} else {
+		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
+			__func__,
+			dpcd_base_lt_offset,
+			dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+			dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+			dpcd_lane[0].bits.MAX_SWING_REACHED,
+			dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+	}
 	if (edp_workaround) {
 		/* for eDP write in 2 parts because the 5-byte burst is
 		* causing issues on some eDP panels (EPR#366724)
@@ -495,8 +524,12 @@ static void get_lane_status_and_drive_settings(
 	const struct link_training_settings *link_training_setting,
 	union lane_status *ln_status,
 	union lane_align_status_updated *ln_status_updated,
-	struct link_training_settings *req_settings)
+	struct link_training_settings *req_settings,
+	uint32_t offset)
 {
+	unsigned int lane01_status_address = DP_LANE0_1_STATUS;
+	uint8_t lane_adjust_offset = 4;
+	unsigned int lane01_adjust_address;
 	uint8_t dpcd_buf[6] = {0};
 	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
 	struct link_training_settings request_settings = { {0} };
@@ -504,9 +537,16 @@ static void get_lane_status_and_drive_settings(
 
 	memset(req_settings, '\0', sizeof(struct link_training_settings));
 
+	if (is_repeater(link, offset)) {
+		lane01_status_address =
+				DP_LANE0_1_STATUS_PHY_REPEATER1 +
+				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+		lane_adjust_offset = 3;
+	}
+
 	core_link_read_dpcd(
 		link,
-		DP_LANE0_1_STATUS,
+		lane01_status_address,
 		(uint8_t *)(dpcd_buf),
 		sizeof(dpcd_buf));
 
@@ -517,22 +557,47 @@ static void get_lane_status_and_drive_settings(
 		ln_status[lane].raw =
 			get_nibble_at_index(&dpcd_buf[0], lane);
 		dpcd_lane_adjust[lane].raw =
-			get_nibble_at_index(&dpcd_buf[4], lane);
+			get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
 	}
 
 	ln_status_updated->raw = dpcd_buf[2];
 
-	DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
-		__func__,
-		DP_LANE0_1_STATUS, dpcd_buf[0],
-		DP_LANE2_3_STATUS, dpcd_buf[1]);
+	if (is_repeater(link, offset)) {
+		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+				" 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+			__func__,
+			offset,
+			lane01_status_address, dpcd_buf[0],
+			lane01_status_address + 1, dpcd_buf[1]);
+	} else {
+		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+			__func__,
+			lane01_status_address, dpcd_buf[0],
+			lane01_status_address + 1, dpcd_buf[1]);
+	}
+	lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
 
-	DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
-		__func__,
-		DP_ADJUST_REQUEST_LANE0_1,
-		dpcd_buf[4],
-		DP_ADJUST_REQUEST_LANE2_3,
-		dpcd_buf[5]);
+	if (is_repeater(link, offset))
+		lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
+				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+	if (is_repeater(link, offset)) {
+		DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+				" 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+					__func__,
+					offset,
+					lane01_adjust_address,
+					dpcd_buf[lane_adjust_offset],
+					lane01_adjust_address + 1,
+					dpcd_buf[lane_adjust_offset + 1]);
+	} else {
+		DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+			__func__,
+			lane01_adjust_address,
+			dpcd_buf[lane_adjust_offset],
+			lane01_adjust_address + 1,
+			dpcd_buf[lane_adjust_offset + 1]);
+	}
 
 	/*copy to req_settings*/
 	request_settings.link_settings.lane_count =
@@ -571,10 +636,18 @@ static void get_lane_status_and_drive_settings(
 
 static void dpcd_set_lane_settings(
 	struct dc_link *link,
-	const struct link_training_settings *link_training_setting)
+	const struct link_training_settings *link_training_setting,
+	uint32_t offset)
 {
 	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
 	uint32_t lane;
+	unsigned int lane0_set_address;
+
+	lane0_set_address = DP_TRAINING_LANE0_SET;
+
+	if (is_repeater(link, offset))
+		lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
+		((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
 
 	for (lane = 0; lane <
 		(uint32_t)(link_training_setting->
@@ -597,7 +670,7 @@ static void dpcd_set_lane_settings(
 	}
 
 	core_link_write_dpcd(link,
-		DP_TRAINING_LANE0_SET,
+		lane0_set_address,
 		(uint8_t *)(dpcd_lane),
 		link_training_setting->link_settings.lane_count);
 
@@ -620,14 +693,26 @@ static void dpcd_set_lane_settings(
 	}
 	*/
 
-	DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
-		__func__,
-		DP_TRAINING_LANE0_SET,
-		dpcd_lane[0].bits.VOLTAGE_SWING_SET,
-		dpcd_lane[0].bits.PRE_EMPHASIS_SET,
-		dpcd_lane[0].bits.MAX_SWING_REACHED,
-		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+	if (is_repeater(link, offset)) {
+		DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
+				" 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
+			__func__,
+			offset,
+			lane0_set_address,
+			dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+			dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+			dpcd_lane[0].bits.MAX_SWING_REACHED,
+			dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
 
+	} else {
+		DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
+			__func__,
+			lane0_set_address,
+			dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+			dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+			dpcd_lane[0].bits.MAX_SWING_REACHED,
+			dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+	}
 	link->cur_lane_setting = link_training_setting->lane_settings[0];
 
 }
@@ -647,17 +732,6 @@ static bool is_max_vs_reached(
 
 }
 
-void dc_link_dp_set_drive_settings(
-	struct dc_link *link,
-	struct link_training_settings *lt_settings)
-{
-	/* program ASIC PHY settings*/
-	dp_set_hw_lane_settings(link, lt_settings);
-
-	/* Notify DP sink the PHY settings from source */
-	dpcd_set_lane_settings(link, lt_settings);
-}
-
 static bool perform_post_lt_adj_req_sequence(
 	struct dc_link *link,
 	struct link_training_settings *lt_settings)
@@ -690,7 +764,8 @@ static bool perform_post_lt_adj_req_sequence(
 			lt_settings,
 			dpcd_lane_status,
 			&dpcd_lane_status_updated,
-			&req_settings);
+			&req_settings,
+			DPRX);
 
 			if (dpcd_lane_status_updated.bits.
 					POST_LT_ADJ_REQ_IN_PROGRESS == 0)
@@ -747,6 +822,31 @@ static bool perform_post_lt_adj_req_sequence(
 
 }
 
+/* Only used for channel equalization */
+static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
+{
+	unsigned int aux_rd_interval_us = 400;
+
+	switch (dpcd_aux_read_interval) {
+	case 0x01:
+		aux_rd_interval_us = 400;
+		break;
+	case 0x02:
+		aux_rd_interval_us = 4000;
+		break;
+	case 0x03:
+		aux_rd_interval_us = 8000;
+		break;
+	case 0x04:
+		aux_rd_interval_us = 16000;
+		break;
+	default:
+		break;
+	}
+
+	return aux_rd_interval_us;
+}
+
 static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
 					union lane_status *dpcd_lane_status)
 {
@@ -765,37 +865,55 @@ static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
 
 static enum link_training_result perform_channel_equalization_sequence(
 	struct dc_link *link,
-	struct link_training_settings *lt_settings)
+	struct link_training_settings *lt_settings,
+	uint32_t offset)
 {
 	struct link_training_settings req_settings;
 	enum dc_dp_training_pattern tr_pattern;
 	uint32_t retries_ch_eq;
+	uint32_t wait_time_microsec;
 	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
 	union lane_align_status_updated dpcd_lane_status_updated = { {0} };
 	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
 
+	/* Note: also check that TPS4 is a supported feature*/
+
 	tr_pattern = lt_settings->pattern_for_eq;
 
-	dp_set_hw_training_pattern(link, tr_pattern);
+	if (is_repeater(link, offset))
+		tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+
+	dp_set_hw_training_pattern(link, tr_pattern, offset);
 
 	for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
 		retries_ch_eq++) {
 
-		dp_set_hw_lane_settings(link, lt_settings);
+		dp_set_hw_lane_settings(link, lt_settings, offset);
 
 		/* 2. update DPCD*/
 		if (!retries_ch_eq)
 			/* EPR #361076 - write as a 5-byte burst,
-			 * but only for the 1-st iteration*/
+			 * but only for the 1-st iteration
+			 */
+
 			dpcd_set_lt_pattern_and_lane_settings(
 				link,
 				lt_settings,
-				tr_pattern);
+				tr_pattern, offset);
 		else
-			dpcd_set_lane_settings(link, lt_settings);
+			dpcd_set_lane_settings(link, lt_settings, offset);
 
 		/* 3. wait for receiver to lock-on*/
-		wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time);
+		wait_time_microsec = lt_settings->eq_pattern_time;
+
+		if (is_repeater(link, offset))
+			wait_time_microsec =
+					translate_training_aux_read_interval(
+						link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+
+		wait_for_training_aux_rd_interval(
+				link,
+				wait_time_microsec);
 
 		/* 4. Read lane status and requested
 		 * drive settings as set by the sink*/
@@ -805,7 +923,8 @@ static enum link_training_result perform_channel_equalization_sequence(
 			lt_settings,
 			dpcd_lane_status,
 			&dpcd_lane_status_updated,
-			&req_settings);
+			&req_settings,
+			offset);
 
 		/* 5. check CR done*/
 		if (!is_cr_done(lane_count, dpcd_lane_status))
@@ -824,13 +943,16 @@ static enum link_training_result perform_channel_equalization_sequence(
 	return LINK_TRAINING_EQ_FAIL_EQ;
 
 }
+#define TRAINING_AUX_RD_INTERVAL 100 //us
 
 static enum link_training_result perform_clock_recovery_sequence(
 	struct dc_link *link,
-	struct link_training_settings *lt_settings)
+	struct link_training_settings *lt_settings,
+	uint32_t offset)
 {
 	uint32_t retries_cr;
 	uint32_t retry_count;
+	uint32_t wait_time_microsec;
 	struct link_training_settings req_settings;
 	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
 	enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
@@ -840,7 +962,7 @@ static enum link_training_result perform_clock_recovery_sequence(
 	retries_cr = 0;
 	retry_count = 0;
 
-	dp_set_hw_training_pattern(link, tr_pattern);
+	dp_set_hw_training_pattern(link, tr_pattern, offset);
 
 	/* najeeb - The synaptics MST hub can put the LT in
 	* infinite loop by switching the VS
@@ -857,25 +979,33 @@ static enum link_training_result perform_clock_recovery_sequence(
 		/* 1. call HWSS to set lane settings*/
 		dp_set_hw_lane_settings(
 				link,
-				lt_settings);
+				lt_settings,
+				offset);
 
 		/* 2. update DPCD of the receiver*/
-		if (!retries_cr)
+		if (!retry_count)
 			/* EPR #361076 - write as a 5-byte burst,
 			 * but only for the 1-st iteration.*/
 			dpcd_set_lt_pattern_and_lane_settings(
 					link,
 					lt_settings,
-					tr_pattern);
+					tr_pattern,
+					offset);
 		else
 			dpcd_set_lane_settings(
 					link,
-					lt_settings);
+					lt_settings,
+					offset);
 
 		/* 3. wait receiver to lock-on*/
+		wait_time_microsec = lt_settings->cr_pattern_time;
+
+		if (!link->is_lttpr_mode_transparent)
+			wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+
 		wait_for_training_aux_rd_interval(
 				link,
-				lt_settings->cr_pattern_time);
+				wait_time_microsec);
 
 		/* 4. Read lane status and requested drive
 		* settings as set by the sink
@@ -885,7 +1015,8 @@ static enum link_training_result perform_clock_recovery_sequence(
 				lt_settings,
 				dpcd_lane_status,
 				&dpcd_lane_status_updated,
-				&req_settings);
+				&req_settings,
+				offset);
 
 		/* 5. check CR done*/
 		if (is_cr_done(lane_count, dpcd_lane_status))
@@ -1054,6 +1185,102 @@ static void initialize_training_settings(
 		lt_settings->enhanced_framing = 1;
 }
 
+static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
+{
+	switch (lttpr_repeater_count) {
+	case 0x80: // 1 lttpr repeater
+		return 1;
+	case 0x40: // 2 lttpr repeaters
+		return 2;
+	case 0x20: // 3 lttpr repeaters
+		return 3;
+	case 0x10: // 4 lttpr repeaters
+		return 4;
+	case 0x08: // 5 lttpr repeaters
+		return 5;
+	case 0x04: // 6 lttpr repeaters
+		return 6;
+	case 0x02: // 7 lttpr repeaters
+		return 7;
+	case 0x01: // 8 lttpr repeaters
+		return 8;
+	default:
+		break;
+	}
+	return 0; // invalid value
+}
+
+static void configure_lttpr_mode(struct dc_link *link)
+{
+	/* aux timeout is already set to extended */
+	/* RESET/SET lttpr mode to enable non transparent mode */
+	uint8_t repeater_cnt;
+	uint32_t aux_interval_address;
+	uint8_t repeater_id;
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+	DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
+	result = core_link_write_dpcd(link,
+			DP_PHY_REPEATER_MODE,
+			(uint8_t *)&repeater_mode,
+			sizeof(repeater_mode));
+
+	if (result == DC_OK) {
+		link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+	}
+
+	if (!link->is_lttpr_mode_transparent) {
+
+		DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
+
+		repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+		result = core_link_write_dpcd(link,
+				DP_PHY_REPEATER_MODE,
+				(uint8_t *)&repeater_mode,
+				sizeof(repeater_mode));
+
+		if (result == DC_OK) {
+			link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+		}
+
+		repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+		for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
+			aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
+						((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
+			core_link_read_dpcd(
+				link,
+				aux_interval_address,
+				(uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
+				sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
+			link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
+		}
+	}
+}
+
+static void repeater_training_done(struct dc_link *link, uint32_t offset)
+{
+	union dpcd_training_pattern dpcd_pattern = { {0} };
+
+	const uint32_t dpcd_base_lt_offset =
+			DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+	/* Set training not in progress*/
+	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+	core_link_write_dpcd(
+		link,
+		dpcd_base_lt_offset,
+		&dpcd_pattern.raw,
+		1);
+
+	DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n",
+		__func__,
+		offset,
+		dpcd_base_lt_offset,
+		dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
 static void print_status_message(
 	struct dc_link *link,
 	const struct link_training_settings *lt_settings,
@@ -1133,6 +1360,17 @@ static void print_status_message(
 				lt_spread);
 }
 
+void dc_link_dp_set_drive_settings(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings)
+{
+	/* program ASIC PHY settings*/
+	dp_set_hw_lane_settings(link, lt_settings, DPRX);
+
+	/* Notify DP sink the PHY settings from source */
+	dpcd_set_lane_settings(link, lt_settings, DPRX);
+}
+
 bool dc_link_dp_perform_link_training_skip_aux(
 	struct dc_link *link,
 	const struct dc_link_settings *link_setting)
@@ -1149,10 +1387,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
 	/* 1. Perform_clock_recovery_sequence. */
 
 	/* transmit training pattern for clock recovery */
-	dp_set_hw_training_pattern(link, pattern_for_cr);
+	dp_set_hw_training_pattern(link, pattern_for_cr, DPRX);
 
 	/* call HWSS to set lane settings*/
-	dp_set_hw_lane_settings(link, &lt_settings);
+	dp_set_hw_lane_settings(link, &lt_settings, DPRX);
 
 	/* wait receiver to lock-on*/
 	wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
@@ -1160,10 +1398,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
 	/* 2. Perform_channel_equalization_sequence. */
 
 	/* transmit training pattern for channel equalization. */
-	dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq);
+	dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX);
 
 	/* call HWSS to set lane settings*/
-	dp_set_hw_lane_settings(link, &lt_settings);
+	dp_set_hw_lane_settings(link, &lt_settings, DPRX);
 
 	/* wait receiver to lock-on. */
 	wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
@@ -1185,9 +1423,10 @@ enum link_training_result dc_link_dp_perform_link_training(
 {
 	enum link_training_result status = LINK_TRAINING_SUCCESS;
 	struct link_training_settings lt_settings;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+
 	bool fec_enable;
-#endif
+	uint8_t repeater_cnt;
+	uint8_t repeater_id;
 
 	initialize_training_settings(
 			link,
@@ -1198,23 +1437,47 @@ enum link_training_result dc_link_dp_perform_link_training(
 	/* 1. set link rate, lane count and spread. */
 	dpcd_set_link_settings(link, &lt_settings);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	if (link->preferred_training_settings.fec_enable != NULL)
 		fec_enable = *link->preferred_training_settings.fec_enable;
 	else
 		fec_enable = true;
 
 	dp_set_fec_ready(link, fec_enable);
-#endif
 
+	if (!link->is_lttpr_mode_transparent) {
+		/* Configure lttpr mode */
+		configure_lttpr_mode(link);
 
-	/* 2. perform link training (set link training done
-	 *  to false is done as well)
-	 */
-	status = perform_clock_recovery_sequence(link, &lt_settings);
+		/* 2. perform link training (set link training done
+		 *  to false is done as well)
+		 */
+		repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+		for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
+				repeater_id--) {
+			status = perform_clock_recovery_sequence(link, &lt_settings, repeater_id);
+
+			if (status != LINK_TRAINING_SUCCESS)
+				break;
+
+			status = perform_channel_equalization_sequence(link,
+					&lt_settings,
+					repeater_id);
+
+			if (status != LINK_TRAINING_SUCCESS)
+				break;
+
+			repeater_training_done(link, repeater_id);
+		}
+	}
+
+	if (status == LINK_TRAINING_SUCCESS) {
+		status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
 	if (status == LINK_TRAINING_SUCCESS) {
 		status = perform_channel_equalization_sequence(link,
-				&lt_settings);
+					&lt_settings,
+					DPRX);
+		}
 	}
 
 	if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
@@ -1233,23 +1496,58 @@ enum link_training_result dc_link_dp_perform_link_training(
 }
 
 bool perform_link_training_with_retries(
-	struct dc_link *link,
 	const struct dc_link_settings *link_setting,
 	bool skip_video_pattern,
-	int attempts)
+	int attempts,
+	struct pipe_ctx *pipe_ctx,
+	enum signal_type signal)
 {
 	uint8_t j;
 	uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+	struct dc_stream_state *stream = pipe_ctx->stream;
+	struct dc_link *link = stream->link;
+	enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
 
 	for (j = 0; j < attempts; ++j) {
 
-		if (dc_link_dp_perform_link_training(
+		dp_enable_link_phy(
+			link,
+			signal,
+			pipe_ctx->clock_source->id,
+			link_setting);
+
+		if (stream->sink_patches.dppowerup_delay > 0) {
+			int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
+
+			msleep(delay_dp_power_up_in_ms);
+		}
+
+		dp_set_panel_mode(link, panel_mode);
+
+		/* We need to do this before the link training to ensure the idle pattern in SST
+		 * mode will be sent right after the link training
+		 */
+		link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+								pipe_ctx->stream_res.stream_enc->id, true);
+
+		if (link->aux_access_disabled) {
+			dc_link_dp_perform_link_training_skip_aux(link, link_setting);
+			return true;
+		} else if (dc_link_dp_perform_link_training(
 				link,
 				link_setting,
 				skip_video_pattern) == LINK_TRAINING_SUCCESS)
 			return true;
 
+		/* latest link training still fail, skip delay and keep PHY on
+		 */
+		if (j == (attempts - 1))
+			break;
+
+		dp_disable_link_phy(link, signal);
+
 		msleep(delay_between_attempts);
+
 		delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
 	}
 
@@ -1321,9 +1619,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
 	enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
 	enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
 	enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	bool fec_enable = false;
-#endif
 
 	initialize_training_settings(
 		link,
@@ -1343,11 +1639,9 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
 	dp_enable_link_phy(link, link->connector_signal,
 		dp_cs_id, link_settings);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	/* Set FEC enable */
 	fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
 	dp_set_fec_ready(link, fec_enable);
-#endif
 
 	if (lt_overrides->alternate_scrambler_reset) {
 		if (*lt_overrides->alternate_scrambler_reset)
@@ -1367,10 +1661,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
 	/* 2. perform link training (set link training done
 	 *  to false is done as well)
 	 */
-	lt_status = perform_clock_recovery_sequence(link, &lt_settings);
+	lt_status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
 	if (lt_status == LINK_TRAINING_SUCCESS) {
 		lt_status = perform_channel_equalization_sequence(link,
-						&lt_settings);
+						&lt_settings,
+						DPRX);
 	}
 
 	/* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/
@@ -1387,9 +1682,7 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
 	 */
 	if (link_down == true) {
 		dp_disable_link_phy(link, link->connector_signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		dp_set_fec_ready(link, false);
-#endif
 	}
 
 	link->sync_lt_in_progress = false;
@@ -1423,6 +1716,22 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
 			max_link_cap.link_spread)
 		max_link_cap.link_spread =
 				link->reported_link_cap.link_spread;
+	/*
+	 * account for lttpr repeaters cap
+	 * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
+	 */
+	if (!link->is_lttpr_mode_transparent) {
+		if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
+			max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+
+		if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate)
+			max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+		DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR,  max_lane count %d max_link rate %d \n",
+						__func__,
+						max_link_cap.lane_count,
+						max_link_cap.link_rate);
+	}
 	return max_link_cap;
 }
 
@@ -1568,6 +1877,13 @@ bool dp_verify_link_cap(
 
 	max_link_cap = get_max_link_cap(link);
 
+	/* Grant extended timeout request */
+	if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+		uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
+
+		core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
+	}
+
 	/* TODO implement override and monitor patch later */
 
 	/* try to train the link from high to low to
@@ -1576,6 +1892,16 @@ bool dp_verify_link_cap(
 	/* disable PHY done possible by BIOS, will be done by driver itself */
 	dp_disable_link_phy(link, link->connector_signal);
 
+	/* Temporary Renoir-specific workaround for SWDEV-215184;
+	 * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
+	 * so add extra cycle of enabling and disabling the PHY before first link training.
+	 */
+	if (link->link_enc->features.flags.bits.DP_IS_USB_C &&
+			link->dc->debug.usbc_combo_phy_reset_wa) {
+		dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
+		dp_disable_link_phy(link, link->connector_signal);
+	}
+
 	dp_cs_id = get_clock_source_id(link);
 
 	/* link training starts with the maximum common settings
@@ -2280,6 +2606,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
 	dc_link_dp_set_test_pattern(
 		link,
 		test_pattern,
+		DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
 		&link_training_settings,
 		test_80_bit_pattern,
 		(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
@@ -2291,6 +2618,8 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
 	union link_test_pattern dpcd_test_pattern;
 	union test_misc dpcd_test_params;
 	enum dp_test_pattern test_pattern;
+	enum dp_test_pattern_color_space test_pattern_color_space =
+			DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
 
 	memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
 	memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
@@ -2325,14 +2654,105 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
 	break;
 	}
 
+	test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+			DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+			DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+
 	dc_link_dp_set_test_pattern(
 			link,
 			test_pattern,
+			test_pattern_color_space,
 			NULL,
 			NULL,
 			0);
 }
 
+static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
+{
+	union audio_test_mode            dpcd_test_mode = {0};
+	struct audio_test_pattern_type   dpcd_pattern_type = {0};
+	union audio_test_pattern_period  dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0};
+	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+
+	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+	struct pipe_ctx *pipe_ctx = &pipes[0];
+	unsigned int channel_count;
+	unsigned int channel = 0;
+	unsigned int modes = 0;
+	unsigned int sampling_rate_in_hz = 0;
+
+	// get audio test mode and test pattern parameters
+	core_link_read_dpcd(
+		link,
+		DP_TEST_AUDIO_MODE,
+		&dpcd_test_mode.raw,
+		sizeof(dpcd_test_mode));
+
+	core_link_read_dpcd(
+		link,
+		DP_TEST_AUDIO_PATTERN_TYPE,
+		&dpcd_pattern_type.value,
+		sizeof(dpcd_pattern_type));
+
+	channel_count = dpcd_test_mode.bits.channel_count + 1;
+
+	// read pattern periods for requested channels when sawTooth pattern is requested
+	if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
+			dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) {
+
+		test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ?
+				DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+		// read period for each channel
+		for (channel = 0; channel < channel_count; channel++) {
+			core_link_read_dpcd(
+							link,
+							DP_TEST_AUDIO_PERIOD_CH1 + channel,
+							&dpcd_pattern_period[channel].raw,
+							sizeof(dpcd_pattern_period[channel]));
+		}
+	}
+
+	// translate sampling rate
+	switch (dpcd_test_mode.bits.sampling_rate) {
+	case AUDIO_SAMPLING_RATE_32KHZ:
+		sampling_rate_in_hz = 32000;
+		break;
+	case AUDIO_SAMPLING_RATE_44_1KHZ:
+		sampling_rate_in_hz = 44100;
+		break;
+	case AUDIO_SAMPLING_RATE_48KHZ:
+		sampling_rate_in_hz = 48000;
+		break;
+	case AUDIO_SAMPLING_RATE_88_2KHZ:
+		sampling_rate_in_hz = 88200;
+		break;
+	case AUDIO_SAMPLING_RATE_96KHZ:
+		sampling_rate_in_hz = 96000;
+		break;
+	case AUDIO_SAMPLING_RATE_176_4KHZ:
+		sampling_rate_in_hz = 176400;
+		break;
+	case AUDIO_SAMPLING_RATE_192KHZ:
+		sampling_rate_in_hz = 192000;
+		break;
+	default:
+		sampling_rate_in_hz = 0;
+		break;
+	}
+
+	link->audio_test_data.flags.test_requested = 1;
+	link->audio_test_data.flags.disable_video = disable_video;
+	link->audio_test_data.sampling_rate = sampling_rate_in_hz;
+	link->audio_test_data.channel_count = channel_count;
+	link->audio_test_data.pattern_type = test_pattern;
+
+	if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) {
+		for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) {
+			link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period;
+		}
+	}
+}
+
 static void handle_automated_test(struct dc_link *link)
 {
 	union test_request test_request;
@@ -2362,6 +2782,12 @@ static void handle_automated_test(struct dc_link *link)
 		dp_test_send_link_test_pattern(link);
 		test_response.bits.ACK = 1;
 	}
+
+	if (test_request.bits.AUDIO_TEST_PATTERN) {
+		dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO);
+		test_response.bits.ACK = 1;
+	}
+
 	if (test_request.bits.PHY_TEST_PATTERN) {
 		dp_test_send_phy_test_pattern(link);
 		test_response.bits.ACK = 1;
@@ -2381,9 +2807,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
 	union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
 	union device_service_irq device_service_clear = { { 0 } };
 	enum dc_status result;
-
 	bool status = false;
 	struct pipe_ctx *pipe_ctx;
+	struct dc_link_settings previous_link_settings;
 	int i;
 
 	if (out_link_loss)
@@ -2447,29 +2873,37 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
 	/* For now we only handle 'Downstream port status' case.
 	 * If we got sink count changed it means
 	 * Downstream port status changed,
-	 * then DM should call DC to do the detection. */
-	if (hpd_rx_irq_check_link_loss_status(
-		link,
-		&hpd_irq_dpcd_data)) {
+	 * then DM should call DC to do the detection.
+	 * NOTE: Do not handle link loss on eDP since it is internal link*/
+	if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
+		hpd_rx_irq_check_link_loss_status(
+			link,
+			&hpd_irq_dpcd_data)) {
 		/* Connectivity log: link loss */
 		CONN_DATA_LINK_LOSS(link,
 					hpd_irq_dpcd_data.raw,
 					sizeof(hpd_irq_dpcd_data),
 					"Status: ");
 
-		perform_link_training_with_retries(link,
-			&link->cur_link_settings,
-			true, LINK_TRAINING_ATTEMPTS);
-
 		for (i = 0; i < MAX_PIPES; i++) {
 			pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-			if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
-					pipe_ctx->stream->dpms_off == false &&
-					pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
-				dc_link_allocate_mst_payload(pipe_ctx);
-			}
+			if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+				break;
 		}
 
+		if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+			return false;
+
+		previous_link_settings = link->cur_link_settings;
+
+		perform_link_training_with_retries(&previous_link_settings,
+			true, LINK_TRAINING_ATTEMPTS,
+			pipe_ctx,
+			pipe_ctx->stream->signal);
+
+		if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+			dc_link_reallocate_mst_payload(link);
+
 		status = false;
 		if (out_link_loss)
 			*out_link_loss = true;
@@ -2697,7 +3131,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
 		int length)
 {
 	int retry = 0;
-	union dp_downstream_port_present ds_port = { 0 };
 
 	if (!link->dpcd_caps.dpcd_rev.raw) {
 		do {
@@ -2710,9 +3143,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
 		} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
 	}
 
-	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
-				 DP_DPCD_REV];
-
 	if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
 		switch (link->dpcd_caps.branch_dev_id) {
 		/* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down
@@ -2737,7 +3167,11 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
 
 static bool retrieve_link_cap(struct dc_link *link)
 {
-	uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+	/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
+	 * which means size 16 will be good for both of those DPCD register block reads
+	 */
+	uint8_t dpcd_data[16];
+	uint8_t lttpr_dpcd_data[6];
 
 	/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
 	 */
@@ -2753,7 +3187,19 @@ static bool retrieve_link_cap(struct dc_link *link)
 	int i;
 	struct dp_sink_hw_fw_revision dp_hw_fw_revision;
 
+	/* Set default timeout to 3.2ms and read LTTPR capabilities */
+	bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support &&
+			!link->dc->config.disable_extended_timeout_support;
+
+	link->is_lttpr_mode_transparent = true;
+
+	if (ext_timeout_support) {
+		dc_link_aux_configure_timeout(link->ddc,
+					LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD);
+	}
+
 	memset(dpcd_data, '\0', sizeof(dpcd_data));
+	memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
 	memset(&down_strm_port_count,
 		'\0', sizeof(union down_stream_port_count));
 	memset(&edp_config_cap, '\0',
@@ -2785,6 +3231,52 @@ static bool retrieve_link_cap(struct dc_link *link)
 		return false;
 	}
 
+	if (ext_timeout_support) {
+
+		status = core_link_read_dpcd(
+				link,
+				DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+				lttpr_dpcd_data,
+				sizeof(lttpr_dpcd_data));
+
+		link->dpcd_caps.lttpr_caps.revision.raw =
+				lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+		link->dpcd_caps.lttpr_caps.max_link_rate =
+				lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+		link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+				lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+		link->dpcd_caps.lttpr_caps.max_lane_count =
+				lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+		link->dpcd_caps.lttpr_caps.mode =
+				lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+		link->dpcd_caps.lttpr_caps.max_ext_timeout =
+				lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+								DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+		if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
+				link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+				link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+				link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) {
+			link->is_lttpr_mode_transparent = false;
+		} else {
+			/*No lttpr reset timeout to its default value*/
+			link->is_lttpr_mode_transparent = true;
+			dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+		}
+
+		CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+	}
+
 	{
 		union training_aux_rd_interval aux_rd_interval;
 
@@ -2792,7 +3284,7 @@ static bool retrieve_link_cap(struct dc_link *link)
 			dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
 
 		link->dpcd_caps.ext_receiver_cap_field_present =
-				aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
+				aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;
 
 		if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
 			uint8_t ext_cap_data[16];
@@ -2923,7 +3415,6 @@ static bool retrieve_link_cap(struct dc_link *link)
 		dp_hw_fw_revision.ieee_fw_rev,
 		sizeof(dp_hw_fw_revision.ieee_fw_rev));
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	memset(&link->dpcd_caps.dsc_caps, '\0',
 			sizeof(link->dpcd_caps.dsc_caps));
 	memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
@@ -2945,7 +3436,6 @@ static bool retrieve_link_cap(struct dc_link *link)
 				link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
 				sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
 	}
-#endif
 
 	/* Connectivity log: detection */
 	CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -2953,6 +3443,68 @@ static bool retrieve_link_cap(struct dc_link *link)
 	return true;
 }
 
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
+{
+	uint8_t dpcd_data[16];
+	uint32_t read_dpcd_retry_cnt = 3;
+	enum dc_status status = DC_ERROR_UNEXPECTED;
+	union dp_downstream_port_present ds_port = { 0 };
+	union down_stream_port_count down_strm_port_count;
+	union edp_configuration_cap edp_config_cap;
+
+	int i;
+
+	for (i = 0; i < read_dpcd_retry_cnt; i++) {
+		status = core_link_read_dpcd(
+				link,
+				DP_DPCD_REV,
+				dpcd_data,
+				sizeof(dpcd_data));
+		if (status == DC_OK)
+			break;
+	}
+
+	link->dpcd_caps.dpcd_rev.raw =
+		dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+	if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+		return false;
+
+	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+			DP_DPCD_REV];
+
+	get_active_converter_info(ds_port.byte, link);
+
+	down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
+			DP_DPCD_REV];
+
+	link->dpcd_caps.allow_invalid_MSA_timing_param =
+		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+	link->dpcd_caps.max_ln_count.raw = dpcd_data[
+		DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+	link->dpcd_caps.max_down_spread.raw = dpcd_data[
+		DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+	link->reported_link_cap.lane_count =
+		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+	link->reported_link_cap.link_rate = dpcd_data[
+		DP_MAX_LINK_RATE - DP_DPCD_REV];
+	link->reported_link_cap.link_spread =
+		link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+	edp_config_cap.raw = dpcd_data[
+		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+	link->dpcd_caps.panel_mode_edp =
+		edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+	link->dpcd_caps.dpcd_display_control_capable =
+		edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+	return true;
+}
+
 bool detect_dp_sink_caps(struct dc_link *link)
 {
 	return retrieve_link_cap(link);
@@ -3067,21 +3619,20 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
 
 static void set_crtc_test_pattern(struct dc_link *link,
 				struct pipe_ctx *pipe_ctx,
-				enum dp_test_pattern test_pattern)
+				enum dp_test_pattern test_pattern,
+				enum dp_test_pattern_color_space test_pattern_color_space)
 {
 	enum controller_dp_test_pattern controller_test_pattern;
 	enum dc_color_depth color_depth = pipe_ctx->
 		stream->timing.display_color_depth;
 	struct bit_depth_reduction_params params;
 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	int width = pipe_ctx->stream->timing.h_addressable +
 		pipe_ctx->stream->timing.h_border_left +
 		pipe_ctx->stream->timing.h_border_right;
 	int height = pipe_ctx->stream->timing.v_addressable +
 		pipe_ctx->stream->timing.v_border_bottom +
 		pipe_ctx->stream->timing.v_border_top;
-#endif
 
 	memset(&params, 0, sizeof(params));
 
@@ -3125,10 +3676,29 @@ static void set_crtc_test_pattern(struct dc_link *link,
 		if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
 			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
 				controller_test_pattern, color_depth);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		else if (opp->funcs->opp_set_disp_pattern_generator) {
 			struct pipe_ctx *odm_pipe;
+			enum controller_dp_color_space controller_color_space;
 			int opp_cnt = 1;
+			int count;
+
+			switch (test_pattern_color_space) {
+			case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+				controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+				break;
+			case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+				controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601;
+				break;
+			case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+				controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709;
+				break;
+			case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED:
+			default:
+				controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+				DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__);
+				ASSERT(0);
+				break;
+			}
 
 			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
 				opp_cnt++;
@@ -3141,6 +3711,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
 				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
 				odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
 					controller_test_pattern,
+					controller_color_space,
 					color_depth,
 					NULL,
 					width,
@@ -3148,12 +3719,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
 			}
 			opp->funcs->opp_set_disp_pattern_generator(opp,
 				controller_test_pattern,
+				controller_color_space,
 				color_depth,
 				NULL,
 				width,
 				height);
+			/* wait for dpg to blank pixel data with test pattern */
+			for (count = 0; count < 1000; count++) {
+				if (opp->funcs->dpg_is_blanked(opp))
+					break;
+				udelay(100);
+			}
 		}
-#endif
 	}
 	break;
 	case DP_TEST_PATTERN_VIDEO_MODE:
@@ -3166,7 +3743,6 @@ static void set_crtc_test_pattern(struct dc_link *link,
 			pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
 				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
 				color_depth);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		else if (opp->funcs->opp_set_disp_pattern_generator) {
 			struct pipe_ctx *odm_pipe;
 			int opp_cnt = 1;
@@ -3181,6 +3757,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
 				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
 				odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
 					CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+					CONTROLLER_DP_COLOR_SPACE_UDEFINED,
 					color_depth,
 					NULL,
 					width,
@@ -3188,12 +3765,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
 			}
 			opp->funcs->opp_set_disp_pattern_generator(opp,
 				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+				CONTROLLER_DP_COLOR_SPACE_UDEFINED,
 				color_depth,
 				NULL,
 				width,
 				height);
 		}
-#endif
 	}
 	break;
 
@@ -3205,6 +3782,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
 bool dc_link_dp_set_test_pattern(
 	struct dc_link *link,
 	enum dp_test_pattern test_pattern,
+	enum dp_test_pattern_color_space test_pattern_color_space,
 	const struct link_training_settings *p_link_settings,
 	const unsigned char *p_custom_pattern,
 	unsigned int cust_pattern_size)
@@ -3233,7 +3811,7 @@ bool dc_link_dp_set_test_pattern(
 	if (link->test_pattern_enabled && test_pattern ==
 			DP_TEST_PATTERN_VIDEO_MODE) {
 		/* Set CRTC Test Pattern */
-		set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
 		dp_set_hw_test_pattern(link, test_pattern,
 				(uint8_t *)p_custom_pattern,
 				(uint32_t)cust_pattern_size);
@@ -3256,8 +3834,8 @@ bool dc_link_dp_set_test_pattern(
 	if (is_dp_phy_pattern(test_pattern)) {
 		/* Set DPCD Lane Settings before running test pattern */
 		if (p_link_settings != NULL) {
-			dp_set_hw_lane_settings(link, p_link_settings);
-			dpcd_set_lane_settings(link, p_link_settings);
+			dp_set_hw_lane_settings(link, p_link_settings, DPRX);
+			dpcd_set_lane_settings(link, p_link_settings, DPRX);
 		}
 
 		/* Blank stream if running test pattern */
@@ -3347,8 +3925,38 @@ bool dc_link_dp_set_test_pattern(
 					     sizeof(training_pattern));
 		}
 	} else {
-	/* CRTC Patterns */
-		set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+		enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+
+		switch (test_pattern_color_space) {
+		case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+			color_space = COLOR_SPACE_SRGB;
+			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+				color_space = COLOR_SPACE_SRGB_LIMITED;
+			break;
+
+		case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+			color_space = COLOR_SPACE_YCBCR601;
+			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+				color_space = COLOR_SPACE_YCBCR601_LIMITED;
+			break;
+		case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+			color_space = COLOR_SPACE_YCBCR709;
+			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+				color_space = COLOR_SPACE_YCBCR709_LIMITED;
+			break;
+		default:
+			break;
+		}
+		/* update MSA to requested color space */
+		pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
+				&pipe_ctx->stream->timing,
+				color_space,
+				pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
+				link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+
+		/* CRTC Patterns */
+		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
+
 		/* Set Test Pattern state */
 		link->test_pattern_enabled = true;
 	}
@@ -3468,7 +4076,6 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
 	return DP_PANEL_MODE_DEFAULT;
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 void dp_set_fec_ready(struct dc_link *link, bool ready)
 {
 	/* FEC has to be "set ready" before the link training.
@@ -3538,5 +4145,4 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 		}
 	}
 }
-#endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index a519dbc..ddb8550 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -12,12 +12,38 @@
 #include "dc_link_ddc.h"
 #include "dm_helpers.h"
 #include "dpcd_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #include "dsc.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "resource.h"
-#endif
+
+static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
+{
+	switch (lttpr_repeater_count) {
+	case 0x80: // 1 lttpr repeater
+		return 1;
+	case 0x40: // 2 lttpr repeaters
+		return 2;
+	case 0x20: // 3 lttpr repeaters
+		return 3;
+	case 0x10: // 4 lttpr repeaters
+		return 4;
+	case 0x08: // 5 lttpr repeaters
+		return 5;
+	case 0x04: // 6 lttpr repeaters
+		return 6;
+	case 0x02: // 7 lttpr repeaters
+		return 7;
+	case 0x01: // 8 lttpr repeaters
+		return 8;
+	default:
+		break;
+	}
+	return 0; // invalid value
+}
+
+static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
+{
+	return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
+}
 
 enum dc_status core_link_read_dpcd(
 	struct dc_link *link,
@@ -69,8 +95,8 @@ void dp_enable_link_phy(
 	const struct dc_link_settings *link_settings)
 {
 	struct link_encoder *link_enc = link->link_enc;
-	struct dc  *core_dc = link->ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc  *dc = link->ctx->dc;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
 
 	struct pipe_ctx *pipes =
 			link->dc->current_state->res_ctx.pipe_ctx;
@@ -147,15 +173,20 @@ bool edp_receiver_ready_T9(struct dc_link *link)
 }
 bool edp_receiver_ready_T7(struct dc_link *link)
 {
-	unsigned int tries = 0;
 	unsigned char sinkstatus = 0;
 	unsigned char edpRev = 0;
 	enum dc_status result = DC_OK;
 
+	/* use absolute time stamp to constrain max T7*/
+	unsigned long long enter_timestamp = 0;
+	unsigned long long finish_timestamp = 0;
+	unsigned long long time_taken_in_ns = 0;
+
 	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
 	if (result == DC_OK && edpRev < DP_EDP_12)
 		return true;
 	/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+	enter_timestamp = dm_get_timestamp(link->ctx);
 	do {
 		sinkstatus = 0;
 		result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
@@ -163,8 +194,10 @@ bool edp_receiver_ready_T7(struct dc_link *link)
 			break;
 		if (result != DC_OK)
 			break;
-		udelay(25); //MAx T7 is 50ms
-	} while (++tries < 300);
+		udelay(25);
+		finish_timestamp = dm_get_timestamp(link->ctx);
+		time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+	} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
 
 	if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
 		udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
@@ -174,8 +207,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
 
 void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
 {
-	struct dc  *core_dc = link->ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	struct dc  *dc = link->ctx->dc;
+	struct dmcu *dmcu = dc->res_pool->dmcu;
 
 	if (!link->wa_flags.dp_keep_receiver_powered)
 		dp_receiver_power_ctrl(link, false);
@@ -212,7 +245,8 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
 
 bool dp_set_hw_training_pattern(
 	struct dc_link *link,
-	enum dc_dp_training_pattern pattern)
+	enum dc_dp_training_pattern pattern,
+	uint32_t offset)
 {
 	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
 
@@ -240,10 +274,14 @@ bool dp_set_hw_training_pattern(
 
 void dp_set_hw_lane_settings(
 	struct dc_link *link,
-	const struct link_training_settings *link_settings)
+	const struct link_training_settings *link_settings,
+	uint32_t offset)
 {
 	struct link_encoder *encoder = link->link_enc;
 
+	if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset))
+		return;
+
 	/* call Encoder to set lane settings */
 	encoder->funcs->dp_set_lane_settings(encoder, link_settings);
 }
@@ -302,20 +340,12 @@ void dp_retrain_link_dp_test(struct dc_link *link,
 			memset(&link->cur_link_settings, 0,
 				sizeof(link->cur_link_settings));
 
-			link->link_enc->funcs->enable_dp_output(
-						link->link_enc,
-						link_setting,
-						pipes[i].clock_source->id);
-			link->cur_link_settings = *link_setting;
-
-			dp_receiver_power_ctrl(link, true);
-
 			perform_link_training_with_retries(
-					link,
 					link_setting,
 					skip_video_pattern,
-					LINK_TRAINING_ATTEMPTS);
-
+					LINK_TRAINING_ATTEMPTS,
+					&pipes[i],
+					SIGNAL_TYPE_DISPLAY_PORT);
 
 			link->dc->hwss.enable_stream(&pipes[i]);
 
@@ -339,7 +369,6 @@ void dp_retrain_link_dp_test(struct dc_link *link,
 	}
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #define DC_LOGGER \
 	dsc->ctx->logger
 static void dsc_optc_config_log(struct display_stream_compressor *dsc,
@@ -365,14 +394,14 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,
 
 static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
 {
-	struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+	struct dc *dc = pipe_ctx->stream->ctx->dc;
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	bool result = false;
 
-	if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
+	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
 		result = true;
 	else
-		result = dm_helpers_dp_write_dsc_enable(core_dc->ctx, stream, enable);
+		result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
 	return result;
 }
 
@@ -382,7 +411,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
 void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
 {
 	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-	struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+	struct dc *dc = pipe_ctx->stream->ctx->dc;
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct pipe_ctx *odm_pipe;
 	int opp_cnt = 1;
@@ -418,7 +447,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
 		optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
 
 		/* Enable DSC in encoder */
-		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 			DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
 			dsc_optc_config_log(dsc, &dsc_optc_cfg);
 			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
@@ -443,7 +472,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
 				OPTC_DSC_DISABLED, 0, 0);
 
 		/* disable DSC in stream encoder */
-		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
 					pipe_ctx->stream_res.stream_enc,
 					OPTC_DSC_DISABLED, 0, 0);
@@ -486,7 +515,7 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
 bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 {
 	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-	struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+	struct dc *dc = pipe_ctx->stream->ctx->dc;
 	struct dc_stream_state *stream = pipe_ctx->stream;
 
 	if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
@@ -496,6 +525,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 		struct dsc_config dsc_cfg;
 		uint8_t dsc_packed_pps[128];
 
+		memset(&dsc_cfg, 0, sizeof(dsc_cfg));
+		memset(dsc_packed_pps, 0, 128);
+
 		/* Enable DSC hw block */
 		dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
 		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
@@ -505,7 +537,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 
 		DC_LOG_DSC(" ");
 		dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
-		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 			DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
 			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
 									pipe_ctx->stream_res.stream_enc,
@@ -514,7 +546,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 		}
 	} else {
 		/* disable DSC PPS in stream encoder */
-		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+		if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 			pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
 						pipe_ctx->stream_res.stream_enc, false, NULL);
 		}
@@ -537,5 +569,4 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
 	dp_set_dsc_pps_sdp(pipe_ctx, true);
 	return true;
 }
-#endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 3769830..a0eb9e5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -46,15 +46,11 @@
 #include "dce100/dce100_resource.h"
 #include "dce110/dce110_resource.h"
 #include "dce112/dce112_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dcn10/dcn10_resource.h"
 #endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "dcn20/dcn20_resource.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #include "dcn21/dcn21_resource.h"
-#endif
 #include "dce120/dce120_resource.h"
 
 #define DC_LOGGER_INIT(logger)
@@ -99,23 +95,19 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
 		else
 			dc_version = DCE_VERSION_12_0;
 		break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case FAMILY_RV:
 		dc_version = DCN_VERSION_1_0;
 		if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
 			dc_version = DCN_VERSION_1_01;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 		if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
 			dc_version = DCN_VERSION_2_1;
-#endif
 		break;
 #endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case FAMILY_NV:
 		dc_version = DCN_VERSION_2_0;
 		break;
-#endif
 	default:
 		dc_version = DCE_VERSION_UNKNOWN;
 		break;
@@ -162,20 +154,16 @@ struct resource_pool *dc_create_resource_pool(struct dc  *dc,
 				init_data->num_virtual_links, dc);
 		break;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case DCN_VERSION_1_0:
 	case DCN_VERSION_1_01:
 		res_pool = dcn10_create_resource_pool(init_data, dc);
 		break;
-#endif
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case DCN_VERSION_2_0:
 		res_pool = dcn20_create_resource_pool(init_data, dc);
 		break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	case DCN_VERSION_2_1:
 		res_pool = dcn21_create_resource_pool(init_data, dc);
 		break;
@@ -951,44 +939,44 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
 	data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
 
 }
-static bool are_rects_integer_multiples(struct rect src, struct rect dest)
-{
-	if (dest.width  >= src.width  && dest.width  % src.width  == 0 &&
-		dest.height >= src.height && dest.height % src.height == 0)
-		return true;
 
-	return false;
+/*
+ * When handling 270 rotation in mixed SLS mode, we have
+ * stream->timing.h_border_left that is non zero.  If we are doing
+ * pipe-splitting, this h_border_left value gets added to recout.x and when it
+ * calls calculate_inits_and_adj_vp() and
+ * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a
+ * pipe to be incorrect.
+ *
+ * To fix this, instead of using stream->timing.h_border_left, we can use
+ * stream->dst.x to represent the border instead.  So we will set h_border_left
+ * to 0 and shift the appropriate amount in stream->dst.x.  We will then
+ * perform all calculations in resource_build_scaling_params() based on this
+ * and then restore the h_border_left and stream->dst.x to their original
+ * values.
+ *
+ * shift_border_left_to_dst() will shift the amount of h_border_left to
+ * stream->dst.x and set h_border_left to 0.  restore_border_left_from_dst()
+ * will restore h_border_left and stream->dst.x back to their original values
+ * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
+ * original h_border_left value in its calculation.
+ */
+int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
+{
+	int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
+
+	if (store_h_border_left) {
+		pipe_ctx->stream->timing.h_border_left = 0;
+		pipe_ctx->stream->dst.x += store_h_border_left;
+	}
+	return store_h_border_left;
 }
 
-static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
+void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+                                  int store_h_border_left)
 {
-	if (!pipe_ctx->plane_state->scaling_quality.integer_scaling)
-		return;
-
-	//for Centered Mode
-	if (pipe_ctx->stream->dst.width  == pipe_ctx->stream->src.width &&
-		pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) {
-		// calculate maximum # of replication of src onto addressable
-		unsigned int integer_multiple = min(
-				pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width,
-				pipe_ctx->stream->timing.v_addressable  / pipe_ctx->stream->src.height);
-
-		//scale dst
-		pipe_ctx->stream->dst.width  = integer_multiple * pipe_ctx->stream->src.width;
-		pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height;
-
-		//center dst onto addressable
-		pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
-		pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
-	}
-
-	//disable taps if src & dst are integer ratio
-	if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) {
-		pipe_ctx->plane_state->scaling_quality.v_taps = 1;
-		pipe_ctx->plane_state->scaling_quality.h_taps = 1;
-		pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
-		pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
-	}
+	pipe_ctx->stream->dst.x -= store_h_border_left;
+	pipe_ctx->stream->timing.h_border_left = store_h_border_left;
 }
 
 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
@@ -996,6 +984,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
 	bool res = false;
+	int store_h_border_left = shift_border_left_to_dst(pipe_ctx);
 	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 	/* Important: scaling ratio calculation requires pixel format,
 	 * lb depth calculation requires recout and taps require scaling ratios.
@@ -1004,14 +993,18 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
 			pipe_ctx->plane_state->format);
 
-	calculate_integer_scaling(pipe_ctx);
-
 	calculate_scaling_ratios(pipe_ctx);
 
 	calculate_viewport(pipe_ctx);
 
-	if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+	if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
+		pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+		if (store_h_border_left) {
+			restore_border_left_from_dst(pipe_ctx,
+				store_h_border_left);
+		}
 		return false;
+	}
 
 	calculate_recout(pipe_ctx);
 
@@ -1024,8 +1017,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
 	pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
 
-	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
-	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
+		store_h_border_left + timing->h_border_right;
+	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
+		timing->v_border_top + timing->v_border_bottom;
 
 	/* Taps calculations */
 	if (pipe_ctx->plane_res.xfm != NULL)
@@ -1072,6 +1067,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 				plane_state->dst_rect.x,
 				plane_state->dst_rect.y);
 
+	if (store_h_border_left)
+		restore_border_left_from_dst(pipe_ctx, store_h_border_left);
+
 	return res;
 }
 
@@ -1217,7 +1215,7 @@ static struct pipe_ctx *acquire_free_pipe_for_head(
 	return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 static int acquire_first_split_pipe(
 		struct resource_context *res_ctx,
 		const struct resource_pool *pool,
@@ -1298,7 +1296,7 @@ bool dc_add_plane_to_context(
 
 		free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
 
-	#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	#if defined(CONFIG_DRM_AMD_DC_DCN)
 		if (!free_pipe) {
 			int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
 			if (pipe_idx >= 0)
@@ -1891,7 +1889,7 @@ static int acquire_resource_from_hw_enabled_state(
 	inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
 
 	if (inst == ENGINE_ID_UNKNOWN)
-		return false;
+		return -1;
 
 	for (i = 0; i < pool->stream_enc_count; i++) {
 		if (pool->stream_enc[i]->id == inst) {
@@ -1903,10 +1901,10 @@ static int acquire_resource_from_hw_enabled_state(
 
 	// tg_inst not found
 	if (i == pool->stream_enc_count)
-		return false;
+		return -1;
 
 	if (tg_inst >= pool->timing_generator_count)
-		return false;
+		return -1;
 
 	if (!res_ctx->pipe_ctx[tg_inst].stream) {
 		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst];
@@ -1919,8 +1917,26 @@ static int acquire_resource_from_hw_enabled_state(
 		pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
 		pipe_ctx->stream_res.opp = pool->opps[tg_inst];
 
-		if (pool->dpps[tg_inst])
+		if (pool->dpps[tg_inst]) {
 			pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
+
+			// Read DPP->MPCC->OPP Pipe from HW State
+			if (pool->mpc->funcs->read_mpcc_state) {
+				struct mpcc_state s = {0};
+
+				pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
+
+				if (s.dpp_id < MAX_MPCC)
+					pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id;
+
+				if (s.bot_mpcc_id < MAX_MPCC)
+					pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
+							&pool->mpc->mpcc_array[s.bot_mpcc_id];
+
+				if (s.opp_id < MAX_OPP)
+					pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
+			}
+		}
 		pipe_ctx->pipe_idx = tg_inst;
 
 		pipe_ctx->stream = stream;
@@ -1972,7 +1988,7 @@ enum dc_status resource_map_pool_resources(
 		/* acquire new resources */
 		pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
 
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
 	if (pipe_idx < 0)
 		pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
 #endif
@@ -2050,6 +2066,13 @@ void dc_resource_state_construct(
 	dst_ctx->clk_mgr = dc->clk_mgr;
 }
 
+
+bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
+{
+	return dc->res_pool->res_cap->num_dsc > 0;
+}
+
+
 /**
  * dc_validate_global_state() - Determine if HW can support a given state
  * Checks HW resource availability and bandwidth requirement.
@@ -2306,7 +2329,7 @@ static void set_avi_info_frame(
 		if (color_space == COLOR_SPACE_SRGB ||
 			color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
 			hdmi_info.bits.Q0_Q1   = RGB_QUANTIZATION_FULL_RANGE;
-			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
 		} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
 					color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
 			hdmi_info.bits.Q0_Q1   = RGB_QUANTIZATION_LIMITED_RANGE;
@@ -2772,9 +2795,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
 
 enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
 {
-	struct dc  *core_dc = dc;
 	struct dc_link *link = stream->link;
-	struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
+	struct timing_generator *tg = dc->res_pool->timing_generators[0];
 	enum dc_status res = DC_OK;
 
 	calculate_phy_pix_clks(stream);
@@ -2837,3 +2859,48 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
 		return -1;
 	}
 }
+static unsigned int get_max_audio_sample_rate(struct audio_mode *modes)
+{
+	if (modes) {
+		if (modes->sample_rates.rate.RATE_192)
+			return 192000;
+		if (modes->sample_rates.rate.RATE_176_4)
+			return 176400;
+		if (modes->sample_rates.rate.RATE_96)
+			return 96000;
+		if (modes->sample_rates.rate.RATE_88_2)
+			return 88200;
+		if (modes->sample_rates.rate.RATE_48)
+			return 48000;
+		if (modes->sample_rates.rate.RATE_44_1)
+			return 44100;
+		if (modes->sample_rates.rate.RATE_32)
+			return 32000;
+	}
+	/*original logic when no audio info*/
+	return 441000;
+}
+
+void get_audio_check(struct audio_info *aud_modes,
+	struct audio_check *audio_chk)
+{
+	unsigned int i;
+	unsigned int max_sample_rate = 0;
+
+	if (aud_modes) {
+		audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/
+
+		audio_chk->max_audiosample_rate = 0;
+		for (i = 0; i < aud_modes->mode_count; i++) {
+			max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]);
+			if (audio_chk->max_audiosample_rate < max_sample_rate)
+				audio_chk->max_audiosample_rate = max_sample_rate;
+			/*dts takes the same as type 2: AP = 0.25*/
+		}
+		/*check which one take more bandwidth*/
+		if (audio_chk->max_audiosample_rate > 192000)
+			audio_chk->audio_packet_type = 0x9;/*AP =1*/
+		audio_chk->acat = 0;/*not support*/
+	}
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index 5cbfdf1..a249a0e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -33,7 +33,7 @@
  * Private functions
  ******************************************************************************/
 
-static void destruct(struct dc_sink *sink)
+static void dc_sink_destruct(struct dc_sink *sink)
 {
 	if (sink->dc_container_id) {
 		kfree(sink->dc_container_id);
@@ -41,7 +41,7 @@ static void destruct(struct dc_sink *sink)
 	}
 }
 
-static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
+static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
 {
 
 	struct dc_link *link = init_params->link;
@@ -75,7 +75,7 @@ void dc_sink_retain(struct dc_sink *sink)
 static void dc_sink_free(struct kref *kref)
 {
 	struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
-	destruct(sink);
+	dc_sink_destruct(sink);
 	kfree(sink);
 }
 
@@ -91,7 +91,7 @@ struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
 	if (NULL == sink)
 		goto alloc_fail;
 
-	if (false == construct(sink, init_params))
+	if (false == dc_sink_construct(sink, init_params))
 		goto construct_fail;
 
 	kref_init(&sink->refcount);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index bb09243..6ddbb00 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -27,14 +27,12 @@
 #include <linux/slab.h>
 
 #include "dm_services.h"
+#include "basics/dc_common.h"
 #include "dc.h"
 #include "core_types.h"
 #include "resource.h"
 #include "ipp.h"
 #include "timing_generator.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include "dcn10/dcn10_hw_sequencer.h"
-#endif
 
 #define DC_LOGGER dc->ctx->logger
 
@@ -58,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
 	}
 }
 
-static void construct(struct dc_stream_state *stream,
+static void dc_stream_construct(struct dc_stream_state *stream,
 	struct dc_sink *dc_sink_data)
 {
 	uint32_t i = 0;
@@ -108,7 +106,6 @@ static void construct(struct dc_stream_state *stream,
 	/* EDID CAP translation for HDMI 2.0 */
 	stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg));
 	stream->timing.dsc_cfg.num_slices_h = 0;
 	stream->timing.dsc_cfg.num_slices_v = 0;
@@ -117,7 +114,6 @@ static void construct(struct dc_stream_state *stream,
 	stream->timing.dsc_cfg.linebuf_depth = 9;
 	stream->timing.dsc_cfg.version_minor = 2;
 	stream->timing.dsc_cfg.ycbcr422_simple = 0;
-#endif
 
 	update_stream_signal(stream, dc_sink_data);
 
@@ -129,7 +125,7 @@ static void construct(struct dc_stream_state *stream,
 	stream->ctx->dc_stream_id_count++;
 }
 
-static void destruct(struct dc_stream_state *stream)
+static void dc_stream_destruct(struct dc_stream_state *stream)
 {
 	dc_sink_release(stream->sink);
 	if (stream->out_transfer_func != NULL) {
@@ -147,7 +143,7 @@ static void dc_stream_free(struct kref *kref)
 {
 	struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount);
 
-	destruct(stream);
+	dc_stream_destruct(stream);
 	kfree(stream);
 }
 
@@ -170,7 +166,7 @@ struct dc_stream_state *dc_create_stream_for_sink(
 	if (stream == NULL)
 		return NULL;
 
-	construct(stream, sink);
+	dc_stream_construct(stream, sink);
 
 	kref_init(&stream->refcount);
 
@@ -237,7 +233,7 @@ struct dc_stream_status *dc_stream_get_status(
 
 static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	unsigned int vupdate_line;
 	unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
 	struct dc_stream_state *stream = pipe_ctx->stream;
@@ -246,7 +242,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
 	if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
 			ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
 
-		vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
+		vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
 		if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
 			return;
 
@@ -272,7 +268,7 @@ bool dc_stream_set_cursor_attributes(
 	const struct dc_cursor_attributes *attributes)
 {
 	int i;
-	struct dc  *core_dc;
+	struct dc  *dc;
 	struct resource_context *res_ctx;
 	struct pipe_ctx *pipe_to_program = NULL;
 
@@ -290,8 +286,8 @@ bool dc_stream_set_cursor_attributes(
 		return false;
 	}
 
-	core_dc = stream->ctx->dc;
-	res_ctx = &core_dc->current_state->res_ctx;
+	dc = stream->ctx->dc;
+	res_ctx = &dc->current_state->res_ctx;
 	stream->cursor_attributes = *attributes;
 
 	for (i = 0; i < MAX_PIPES; i++) {
@@ -303,17 +299,17 @@ bool dc_stream_set_cursor_attributes(
 		if (!pipe_to_program) {
 			pipe_to_program = pipe_ctx;
 
-			delay_cursor_until_vupdate(pipe_ctx, core_dc);
-			core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+			delay_cursor_until_vupdate(pipe_ctx, dc);
+			dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
 		}
 
-		core_dc->hwss.set_cursor_attribute(pipe_ctx);
-		if (core_dc->hwss.set_cursor_sdr_white_level)
-			core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+		dc->hwss.set_cursor_attribute(pipe_ctx);
+		if (dc->hwss.set_cursor_sdr_white_level)
+			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
 	}
 
 	if (pipe_to_program)
-		core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+		dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
 
 	return true;
 }
@@ -323,7 +319,7 @@ bool dc_stream_set_cursor_position(
 	const struct dc_cursor_position *position)
 {
 	int i;
-	struct dc  *core_dc;
+	struct dc  *dc;
 	struct resource_context *res_ctx;
 	struct pipe_ctx *pipe_to_program = NULL;
 
@@ -337,8 +333,8 @@ bool dc_stream_set_cursor_position(
 		return false;
 	}
 
-	core_dc = stream->ctx->dc;
-	res_ctx = &core_dc->current_state->res_ctx;
+	dc = stream->ctx->dc;
+	res_ctx = &dc->current_state->res_ctx;
 	stream->cursor_position = *position;
 
 	for (i = 0; i < MAX_PIPES; i++) {
@@ -354,20 +350,19 @@ bool dc_stream_set_cursor_position(
 		if (!pipe_to_program) {
 			pipe_to_program = pipe_ctx;
 
-			delay_cursor_until_vupdate(pipe_ctx, core_dc);
-			core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+			delay_cursor_until_vupdate(pipe_ctx, dc);
+			dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
 		}
 
-		core_dc->hwss.set_cursor_position(pipe_ctx);
+		dc->hwss.set_cursor_position(pipe_ctx);
 	}
 
 	if (pipe_to_program)
-		core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+		dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
 
 	return true;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 bool dc_stream_add_writeback(struct dc *dc,
 		struct dc_stream_state *stream,
 		struct dc_writeback_info *wb_info)
@@ -411,25 +406,30 @@ bool dc_stream_add_writeback(struct dc *dc,
 		stream->writeback_info[stream->num_wb_info++] = *wb_info;
 	}
 
-	if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
-		dm_error("DC: update_bandwidth failed!\n");
-		return false;
-	}
-
-	/* enable writeback */
 	if (dc->hwss.enable_writeback) {
 		struct dc_stream_status *stream_status = dc_stream_get_status(stream);
 		struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+		dwb->otg_inst = stream_status->primary_otg_inst;
+	}
+	if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+		if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+			dm_error("DC: update_bandwidth failed!\n");
+			return false;
+		}
 
-		if (dwb->funcs->is_enabled(dwb)) {
-			/* writeback pipe already enabled, only need to update */
-			dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
-		} else {
-			/* Enable writeback pipe from scratch*/
-			dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
+		/* enable writeback */
+		if (dc->hwss.enable_writeback) {
+			struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+			if (dwb->funcs->is_enabled(dwb)) {
+				/* writeback pipe already enabled, only need to update */
+				dc->hwss.update_writeback(dc, wb_info, dc->current_state);
+			} else {
+				/* Enable writeback pipe from scratch*/
+				dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
+			}
 		}
 	}
-
 	return true;
 }
 
@@ -468,26 +468,35 @@ bool dc_stream_remove_writeback(struct dc *dc,
 	}
 	stream->num_wb_info = j;
 
-	/* recalculate and apply DML parameters */
-	if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
-		dm_error("DC: update_bandwidth failed!\n");
-		return false;
+	if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+		/* recalculate and apply DML parameters */
+		if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+			dm_error("DC: update_bandwidth failed!\n");
+			return false;
+		}
+
+		/* disable writeback */
+		if (dc->hwss.disable_writeback)
+			dc->hwss.disable_writeback(dc, dwb_pipe_inst);
 	}
-
-	/* disable writeback */
-	if (dc->hwss.disable_writeback)
-		dc->hwss.disable_writeback(dc, dwb_pipe_inst);
-
 	return true;
 }
-#endif
 
+bool dc_stream_warmup_writeback(struct dc *dc,
+		int num_dwb,
+		struct dc_writeback_info *wb_info)
+{
+	if (dc->hwss.mmhubbub_warmup)
+		return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
+	else
+		return false;
+}
 uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
 {
 	uint8_t i;
-	struct dc  *core_dc = stream->ctx->dc;
+	struct dc  *dc = stream->ctx->dc;
 	struct resource_context *res_ctx =
-		&core_dc->current_state->res_ctx;
+		&dc->current_state->res_ctx;
 
 	for (i = 0; i < MAX_PIPES; i++) {
 		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -544,9 +553,9 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
 {
 	uint8_t i;
 	bool ret = false;
-	struct dc  *core_dc = stream->ctx->dc;
+	struct dc  *dc = stream->ctx->dc;
 	struct resource_context *res_ctx =
-		&core_dc->current_state->res_ctx;
+		&dc->current_state->res_ctx;
 
 	for (i = 0; i < MAX_PIPES; i++) {
 		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -567,10 +576,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
 	return ret;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
 {
-	bool status = true;
 	struct pipe_ctx *pipe = NULL;
 	int i;
 
@@ -586,8 +593,7 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
 	if (i == MAX_PIPES)
 		return true;
 
-	status = dc->hwss.dmdata_status_done(pipe);
-	return status;
+	return dc->hwss.dmdata_status_done(pipe);
 }
 
 bool dc_stream_set_dynamic_metadata(struct dc *dc,
@@ -630,7 +636,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
 
 	return true;
 }
-#endif
 
 void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index b9d6a5b..ea1229a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -37,7 +37,7 @@
 /*******************************************************************************
  * Private functions
  ******************************************************************************/
-static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
 {
 	plane_state->ctx = ctx;
 
@@ -50,7 +50,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
 		plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
 		plane_state->in_transfer_func->ctx = ctx;
 	}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	plane_state->in_shaper_func = dc_create_transfer_func();
 	if (plane_state->in_shaper_func != NULL) {
 		plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
@@ -67,10 +66,9 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
 		plane_state->blend_tf->ctx = ctx;
 	}
 
-#endif
 }
 
-static void destruct(struct dc_plane_state *plane_state)
+static void dc_plane_destruct(struct dc_plane_state *plane_state)
 {
 	if (plane_state->gamma_correction != NULL) {
 		dc_gamma_release(&plane_state->gamma_correction);
@@ -80,7 +78,6 @@ static void destruct(struct dc_plane_state *plane_state)
 				plane_state->in_transfer_func);
 		plane_state->in_transfer_func = NULL;
 	}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	if (plane_state->in_shaper_func != NULL) {
 		dc_transfer_func_release(
 				plane_state->in_shaper_func);
@@ -97,7 +94,6 @@ static void destruct(struct dc_plane_state *plane_state)
 		plane_state->blend_tf = NULL;
 	}
 
-#endif
 }
 
 /*******************************************************************************
@@ -112,16 +108,14 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
 
 struct dc_plane_state *dc_create_plane_state(struct dc *dc)
 {
-	struct dc *core_dc = dc;
-
 	struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
-						      GFP_KERNEL);
+							GFP_KERNEL);
 
 	if (NULL == plane_state)
 		return NULL;
 
 	kref_init(&plane_state->refcount);
-	construct(core_dc->ctx, plane_state);
+	dc_plane_construct(dc->ctx, plane_state);
 
 	return plane_state;
 }
@@ -141,7 +135,7 @@ const struct dc_plane_status *dc_plane_get_status(
 		const struct dc_plane_state *plane_state)
 {
 	const struct dc_plane_status *plane_status;
-	struct dc  *core_dc;
+	struct dc  *dc;
 	int i;
 
 	if (!plane_state ||
@@ -152,15 +146,15 @@ const struct dc_plane_status *dc_plane_get_status(
 	}
 
 	plane_status = &plane_state->status;
-	core_dc = plane_state->ctx->dc;
+	dc = plane_state->ctx->dc;
 
-	if (core_dc->current_state == NULL)
+	if (dc->current_state == NULL)
 		return NULL;
 
 	/* Find the current plane state and set its pending bit to false */
-	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx =
-				&core_dc->current_state->res_ctx.pipe_ctx[i];
+				&dc->current_state->res_ctx.pipe_ctx[i];
 
 		if (pipe_ctx->plane_state != plane_state)
 			continue;
@@ -170,14 +164,14 @@ const struct dc_plane_status *dc_plane_get_status(
 		break;
 	}
 
-	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx =
-				&core_dc->current_state->res_ctx.pipe_ctx[i];
+				&dc->current_state->res_ctx.pipe_ctx[i];
 
 		if (pipe_ctx->plane_state != plane_state)
 			continue;
 
-		core_dc->hwss.update_pending_status(pipe_ctx);
+		dc->hwss.update_pending_status(pipe_ctx);
 	}
 
 	return plane_status;
@@ -191,7 +185,7 @@ void dc_plane_state_retain(struct dc_plane_state *plane_state)
 static void dc_plane_state_free(struct kref *kref)
 {
 	struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
-	destruct(plane_state);
+	dc_plane_destruct(plane_state);
 	kvfree(plane_state);
 }
 
@@ -262,7 +256,6 @@ struct dc_transfer_func *dc_create_transfer_func(void)
 	return NULL;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 static void dc_3dlut_func_free(struct kref *kref)
 {
 	struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount);
@@ -296,6 +289,5 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)
 {
 	kref_get(&lut->refcount);
 }
-#endif
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 0416a17..8ff25b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.56"
+#define DC_VER "3.2.69"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -54,6 +54,10 @@ struct dc_versions {
 	struct dmcu_version dmcu_version;
 };
 
+enum dp_protocol_version {
+	DP_VERSION_1_4,
+};
+
 enum dc_plane_type {
 	DC_PLANE_TYPE_INVALID,
 	DC_PLANE_TYPE_DCE_RGB,
@@ -112,17 +116,15 @@ struct dc_caps {
 	bool disable_dp_clk_share;
 	bool psp_setup_panel_mode;
 	bool extended_aux_timeout_support;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+	bool dmcub_support;
 	bool hw_3d_lut;
-#endif
+	enum dp_protocol_version max_dp_protocol_version;
 	struct dc_plane_cap planes[MAX_PLANES];
 };
 
 struct dc_bug_wa {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	bool no_connect_phy_config;
 	bool dedcn20_305_wa;
-#endif
 	bool skip_clock_update;
 };
 
@@ -155,11 +157,14 @@ struct dc_surface_dcc_cap {
 	bool const_color_support;
 };
 
-struct dc_static_screen_events {
-	bool force_trigger;
-	bool cursor_update;
-	bool surface_update;
-	bool overlay_update;
+struct dc_static_screen_params {
+	struct {
+		bool force_trigger;
+		bool cursor_update;
+		bool surface_update;
+		bool overlay_update;
+	} triggers;
+	unsigned int num_frames;
 };
 
 
@@ -363,10 +368,10 @@ struct dc_debug_options {
 	bool disable_dfs_bypass;
 	bool disable_dpp_power_gate;
 	bool disable_hubp_power_gate;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	bool disable_dsc_power_gate;
 	int dsc_min_slice_height_override;
-#endif
+	int dsc_bpp_increment_div;
+	bool native422_support;
 	bool disable_pplib_wm_range;
 	enum wm_report_mode pplib_wm_report_mode;
 	unsigned int min_disp_clk_khz;
@@ -401,22 +406,26 @@ struct dc_debug_options {
 	unsigned int force_odm_combine; //bit vector based on otg inst
 	unsigned int force_fclk_khz;
 	bool disable_tri_buf;
+	bool dmub_offload_enabled;
+	bool dmcub_emulation;
+	bool dmub_command_table; /* for testing only */
 	struct dc_bw_validation_profile bw_val_profile;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	bool disable_fec;
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 	bool disable_48mhz_pwrdwn;
-#endif
 	/* This forces a hard min on the DCFCLK requested to SMU/PP
 	 * watermarks are not affected.
 	 */
 	unsigned int force_min_dcfclk_mhz;
 	bool disable_timing_sync;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	bool cm_in_bypass;
-#endif
 	int force_clock_mode;/*every mode change.*/
+
+	bool nv12_iflip_vm_wa;
+	bool disable_dram_clock_change_vactive_support;
+	bool validate_dml_output;
+	bool enable_dmcub_surface_flip;
+	bool usbc_combo_phy_reset_wa;
+	bool disable_dsc;
 };
 
 struct dc_debug_data {
@@ -425,7 +434,6 @@ struct dc_debug_data {
 	uint32_t auxErrorCount;
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 struct dc_phy_addr_space_config {
 	struct {
 		uint64_t start_addr;
@@ -455,7 +463,6 @@ struct dc_virtual_addr_space_config {
 	uint32_t	page_table_block_size_in_bytes;
 	uint8_t		page_table_depth; // 1 = 1 level, 2 = 2 level, etc.  0 = invalid
 };
-#endif
 
 struct dc_bounding_box_overrides {
 	int sr_exit_time_ns;
@@ -483,9 +490,7 @@ struct dc {
 	struct dc_bounding_box_overrides bb_overrides;
 	struct dc_bug_wa work_arounds;
 	struct dc_context *ctx;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	struct dc_phy_addr_space_config vm_pa_config;
-#endif
 
 	uint8_t link_count;
 	struct dc_link *links[MAX_PIPES * 2];
@@ -501,7 +506,7 @@ struct dc {
 	/* Inputs into BW and WM calculations. */
 	struct bw_calcs_dceip *bw_dceip;
 	struct bw_calcs_vbios *bw_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
 	struct dcn_soc_bounding_box *dcn_soc;
 	struct dcn_ip_params *dcn_ip;
 	struct display_mode_lib dml;
@@ -515,7 +520,7 @@ struct dc {
 	bool optimized_required;
 
 	/* Require to maintain clocks and bandwidth for UEFI enabled HW */
-	bool optimize_seamless_boot;
+	int optimize_seamless_boot_streams;
 
 	/* FBC compressor */
 	struct compressor *fbc_compressor;
@@ -523,10 +528,8 @@ struct dc {
 	struct dc_debug_data debug_data;
 
 	const char *build_id;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	struct vm_helper *vm_helper;
 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
 };
 
 enum frame_buffer_mode {
@@ -558,15 +561,16 @@ struct dc_init_data {
 	struct dc_bios *vbios_override;
 	enum dce_environment dce_environment;
 
+	struct dmub_offload_funcs *dmub_if;
+	struct dc_reg_helper_state *dmub_offload;
+
 	struct dc_config flags;
 	uint32_t log_mask;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	/**
 	 * gpu_info FW provided soc bounding box struct or 0 if not
 	 * available in FW
 	 */
 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
 };
 
 struct dc_callback_init {
@@ -581,11 +585,9 @@ struct dc *dc_create(const struct dc_init_data *init_params);
 void dc_hardware_init(struct dc *dc);
 
 int dc_get_vmid_use_vector(struct dc *dc);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);
 /* Returns the number of vmids supported */
 int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config);
-#endif
 void dc_init_callbacks(struct dc *dc,
 		const struct dc_callback_init *init_params);
 void dc_deinit_callbacks(struct dc *dc);
@@ -661,7 +663,6 @@ struct dc_transfer_func {
 	};
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 union dc_3dlut_state {
 	struct {
@@ -680,12 +681,11 @@ union dc_3dlut_state {
 struct dc_3dlut {
 	struct kref refcount;
 	struct tetrahedral_params lut_3d;
-	uint32_t hdr_multiplier;
+	struct fixed31_32 hdr_multiplier;
 	bool initialized; /*remove after diag fix*/
 	union dc_3dlut_state state;
 	struct dc_context *ctx;
 };
-#endif
 /*
  * This structure is filled in by dc_surface_get_status and contains
  * the last requested address and the currently active address so the called
@@ -708,7 +708,7 @@ union surface_update_flags {
 		uint32_t horizontal_mirror_change:1;
 		uint32_t per_pixel_alpha_change:1;
 		uint32_t global_alpha_change:1;
-		uint32_t sdr_white_level:1;
+		uint32_t hdr_mult:1;
 		uint32_t rotation_change:1;
 		uint32_t swizzle_change:1;
 		uint32_t scaling_change:1;
@@ -736,9 +736,7 @@ union surface_update_flags {
 struct dc_plane_state {
 	struct dc_plane_address address;
 	struct dc_plane_flip_time time;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	bool triplebuffer_flips;
-#endif
 	struct scaling_taps scaling_quality;
 	struct rect src_rect;
 	struct rect dst_rect;
@@ -754,18 +752,16 @@ struct dc_plane_state {
 	struct dc_bias_and_scale *bias_and_scale;
 	struct dc_csc_transform input_csc_color_matrix;
 	struct fixed31_32 coeff_reduction_factor;
-	uint32_t sdr_white_level;
+	struct fixed31_32 hdr_mult;
 
 	// TODO: No longer used, remove
 	struct dc_hdr_static_metadata hdr_static_ctx;
 
 	enum dc_color_space color_space;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	struct dc_3dlut *lut3d_func;
 	struct dc_transfer_func *in_shaper_func;
 	struct dc_transfer_func *blend_tf;
-#endif
 
 	enum surface_pixel_format format;
 	enum dc_rotation_angle rotation;
@@ -801,7 +797,6 @@ struct dc_plane_info {
 	enum dc_rotation_angle rotation;
 	enum plane_stereo_format stereo_format;
 	enum dc_color_space color_space;
-	unsigned int sdr_white_level;
 	bool horizontal_mirror;
 	bool visible;
 	bool per_pixel_alpha;
@@ -825,7 +820,7 @@ struct dc_surface_update {
 	const struct dc_flip_addrs *flip_addr;
 	const struct dc_plane_info *plane_info;
 	const struct dc_scaling_info *scaling_info;
-
+	struct fixed31_32 hdr_mult;
 	/* following updates require alloc/sleep/spin that is not isr safe,
 	 * null means no updates
 	 */
@@ -834,11 +829,9 @@ struct dc_surface_update {
 
 	const struct dc_csc_transform *input_csc_color_matrix;
 	const struct fixed31_32 *coeff_reduction_factor;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	const struct dc_transfer_func *func_shaper;
 	const struct dc_3dlut *lut3d_func;
 	const struct dc_transfer_func *blend_tf;
-#endif
 };
 
 /*
@@ -859,11 +852,9 @@ void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);
 void dc_transfer_func_release(struct dc_transfer_func *dc_tf);
 struct dc_transfer_func *dc_create_transfer_func(void);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct dc_3dlut *dc_create_3dlut_func(void);
 void dc_3dlut_func_release(struct dc_3dlut *lut);
 void dc_3dlut_func_retain(struct dc_3dlut *lut);
-#endif
 /*
  * This structure holds a surface address.  There could be multiple addresses
  * in cases such as Stereo 3D, Planar YUV, etc.  Other per-flip attributes such
@@ -925,6 +916,8 @@ void dc_resource_state_copy_construct_current(
 
 void dc_resource_state_destruct(struct dc_state *context);
 
+bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+
 /*
  * TODO update to make it about validation sets
  * Set up streams and links associated to drive sinks
@@ -980,10 +973,10 @@ struct dpcd_caps {
 	bool panel_mode_edp;
 	bool dpcd_display_control_capable;
 	bool ext_receiver_cap_field_present;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	union dpcd_fec_capability fec_cap;
 	struct dpcd_dsc_capabilities dsc_caps;
-#endif
+	struct dc_lttpr_caps lttpr_caps;
+
 };
 
 #include "dc_link.h"
@@ -1004,14 +997,12 @@ struct dc_container_id {
 };
 
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 struct dc_sink_dsc_caps {
 	// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
 	// 'false' if they are sink's DSC caps
 	bool is_virtual_dpcd_dsc;
 	struct dsc_dec_dpcd_caps dsc_dec_caps;
 };
-#endif
 
 /*
  * The sink structure contains EDID and other display device properties
@@ -1026,9 +1017,7 @@ struct dc_sink {
 	struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
 	bool converter_disable_audio;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	struct dc_sink_dsc_caps sink_dsc_caps;
-#endif
 
 	/* private to DC core */
 	struct dc_link *link;
@@ -1086,13 +1075,12 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
 unsigned int dc_get_target_backlight_pwm(struct dc *dc);
 
 bool dc_is_dmcu_initialized(struct dc *dc);
+bool dc_is_hw_initialized(struct dc *dc);
 
 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
 /*******************************************************************************
  * DSC Interfaces
  ******************************************************************************/
 #include "dc_dsc.h"
-#endif
 #endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
new file mode 100644
index 0000000..59c298a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "../dmub/inc/dmub_srv.h"
+
+static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
+				  struct dmub_srv *dmub)
+{
+	dc_srv->dmub = dmub;
+	dc_srv->ctx = dc->ctx;
+}
+
+struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
+{
+	struct dc_dmub_srv *dc_srv =
+		kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
+
+	if (dc_srv == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	dc_dmub_srv_construct(dc_srv, dc, dmub);
+
+	return dc_srv;
+}
+
+void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
+{
+	if (*dmub_srv) {
+		kfree(*dmub_srv);
+		*dmub_srv = NULL;
+	}
+}
+
+void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
+			   struct dmub_cmd_header *cmd)
+{
+	struct dmub_srv *dmub = dc_dmub_srv->dmub;
+	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+	enum dmub_status status;
+
+	status = dmub_srv_cmd_queue(dmub, cmd);
+	if (status == DMUB_STATUS_OK)
+		return;
+
+	if (status != DMUB_STATUS_QUEUE_FULL)
+		goto error;
+
+	/* Execute and wait for queue to become empty again. */
+	dc_dmub_srv_cmd_execute(dc_dmub_srv);
+	dc_dmub_srv_wait_idle(dc_dmub_srv);
+
+	/* Requeue the command. */
+	status = dmub_srv_cmd_queue(dmub, cmd);
+	if (status == DMUB_STATUS_OK)
+		return;
+
+error:
+	DC_ERROR("Error queuing DMUB command: status=%d\n", status);
+}
+
+void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
+{
+	struct dmub_srv *dmub = dc_dmub_srv->dmub;
+	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+	enum dmub_status status;
+
+	status = dmub_srv_cmd_execute(dmub);
+	if (status != DMUB_STATUS_OK)
+		DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+}
+
+void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+{
+	struct dmub_srv *dmub = dc_dmub_srv->dmub;
+	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+	enum dmub_status status;
+
+	status = dmub_srv_wait_for_idle(dmub, 100000);
+	if (status != DMUB_STATUS_OK)
+		DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
+}
+
+void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
+{
+	struct dmub_srv *dmub = dc_dmub_srv->dmub;
+	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+	enum dmub_status status;
+
+	for (;;) {
+		/* Wait up to a second for PHY init. */
+		status = dmub_srv_wait_for_phy_init(dmub, 1000000);
+		if (status == DMUB_STATUS_OK)
+			/* Initialization OK */
+			break;
+
+		DC_ERROR("DMCUB PHY init failed: status=%d\n", status);
+		ASSERT(0);
+
+		if (status != DMUB_STATUS_TIMEOUT)
+			/*
+			 * Server likely initialized or we don't have
+			 * DMCUB HW support - this won't end.
+			 */
+			break;
+
+		/* Continue spinning so we don't hang the ASIC. */
+	}
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
new file mode 100644
index 0000000..754b607
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DC_SRV_H_
+#define _DMUB_DC_SRV_H_
+
+#include "os_types.h"
+#include "../dmub/inc/dmub_cmd.h"
+
+struct dmub_srv;
+struct dmub_cmd_header;
+
+struct dc_reg_helper_state {
+	bool gather_in_progress;
+	uint32_t same_addr_count;
+	bool should_burst_write;
+	union dmub_rb_cmd cmd_data;
+	unsigned int reg_seq_count;
+};
+
+struct dc_dmub_srv {
+	struct dmub_srv *dmub;
+	struct dc_reg_helper_state reg_helper_offload;
+
+	struct dc_context *ctx;
+	void *dm;
+};
+
+void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
+			   struct dmub_cmd_header *cmd);
+
+void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
+
+void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+
+void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
+
+#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index ef79a68..dfe4472 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -129,9 +129,7 @@ struct dc_link_training_overrides {
 	bool *alternate_scrambler_reset;
 	bool *enhanced_framing;
 	bool *mst_enable;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	bool *fec_enable;
-#endif
 };
 
 union dpcd_rev {
@@ -471,13 +469,13 @@ union training_aux_rd_interval {
 /* Automated test structures */
 union test_request {
 	struct {
-	uint8_t LINK_TRAINING         :1;
-	uint8_t LINK_TEST_PATTRN      :1;
-	uint8_t EDID_READ             :1;
-	uint8_t PHY_TEST_PATTERN      :1;
-	uint8_t AUDIO_TEST_PATTERN    :1;
-	uint8_t RESERVED              :1;
-	uint8_t TEST_STEREO_3D        :1;
+	uint8_t LINK_TRAINING                :1;
+	uint8_t LINK_TEST_PATTRN             :1;
+	uint8_t EDID_READ                    :1;
+	uint8_t PHY_TEST_PATTERN             :1;
+	uint8_t RESERVED                     :1;
+	uint8_t AUDIO_TEST_PATTERN           :1;
+	uint8_t TEST_AUDIO_DISABLED_VIDEO    :1;
 	} bits;
 	uint8_t raw;
 };
@@ -524,19 +522,52 @@ union link_test_pattern {
 
 union test_misc {
 	struct dpcd_test_misc_bits {
-		unsigned char SYNC_CLOCK :1;
+		unsigned char SYNC_CLOCK  :1;
 		/* dpcd_test_color_format */
-		unsigned char CLR_FORMAT :2;
+		unsigned char CLR_FORMAT  :2;
 		/* dpcd_test_dyn_range */
-		unsigned char DYN_RANGE  :1;
-		unsigned char YCBCR      :1;
+		unsigned char DYN_RANGE   :1;
+		unsigned char YCBCR_COEFS :1;
 		/* dpcd_test_bit_depth */
-		unsigned char BPC        :3;
+		unsigned char BPC         :3;
 	} bits;
 	unsigned char raw;
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+union audio_test_mode {
+	struct {
+		unsigned char sampling_rate   :4;
+		unsigned char channel_count   :4;
+	} bits;
+	unsigned char raw;
+};
+
+union audio_test_pattern_period {
+	struct {
+		unsigned char pattern_period   :4;
+		unsigned char reserved         :4;
+	} bits;
+	unsigned char raw;
+};
+
+struct audio_test_pattern_type {
+	unsigned char value;
+};
+
+struct dp_audio_test_data_flags {
+	uint8_t test_requested  :1;
+	uint8_t disable_video   :1;
+};
+
+struct dp_audio_test_data {
+
+	struct dp_audio_test_data_flags flags;
+	uint8_t sampling_rate;
+	uint8_t channel_count;
+	uint8_t pattern_type;
+	uint8_t pattern_period[8];
+};
+
 /* FEC capability DPCD register field bits-*/
 union dpcd_fec_capability {
 	struct {
@@ -661,6 +692,5 @@ struct dpcd_dsc_capabilities {
 	union dpcd_dsc_ext_capabilities dsc_ext_caps;
 };
 
-#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
 
 #endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 0ed2962..3800340 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -1,4 +1,3 @@
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #ifndef DC_DSC_H_
 #define DC_DSC_H_
 /*
@@ -42,21 +41,28 @@ struct dc_dsc_bw_range {
 
 struct display_stream_compressor {
 	const struct dsc_funcs *funcs;
-#ifndef AMD_EDID_UTILITY
 	struct dc_context *ctx;
 	int inst;
-#endif
 };
 
-bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
+struct dc_dsc_policy {
+	bool use_min_slices_h;
+	int max_slices_h; // Maximum available if 0
+	int min_slice_height; // Must not be less than 8
+	uint32_t max_target_bpp;
+	uint32_t min_target_bpp;
+};
+
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
+		const uint8_t *dpcd_dsc_basic_data,
 		const uint8_t *dpcd_dsc_ext_data,
 		struct dsc_dec_dpcd_caps *dsc_sink_caps);
 
 bool dc_dsc_compute_bandwidth_range(
 		const struct display_stream_compressor *dsc,
 		const uint32_t dsc_min_slice_height_override,
-		const uint32_t min_kbps,
-		const uint32_t max_kbps,
+		const uint32_t min_bpp,
+		const uint32_t max_bpp,
 		const struct dsc_dec_dpcd_caps *dsc_sink_caps,
 		const struct dc_crtc_timing *timing,
 		struct dc_dsc_bw_range *range);
@@ -68,5 +74,10 @@ bool dc_dsc_compute_config(
 		uint32_t target_bandwidth_kbps,
 		const struct dc_crtc_timing *timing,
 		struct dc_dsc_config *dsc_cfg);
-#endif
+
+void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
+		struct dc_dsc_policy *policy);
+
+void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 30b2f9e..737048d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -32,6 +32,74 @@
 #include "dm_services.h"
 #include <stdarg.h>
 
+#include "dc.h"
+#include "dc_dmub_srv.h"
+
+static inline void submit_dmub_read_modify_write(
+	struct dc_reg_helper_state *offload,
+	const struct dc_context *ctx)
+{
+	struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
+	bool gather = false;
+
+	offload->should_burst_write =
+			(offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
+	cmd_buf->header.payload_bytes =
+			sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
+
+	gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+	ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+	ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+
+	memset(cmd_buf, 0, sizeof(*cmd_buf));
+
+	offload->reg_seq_count = 0;
+	offload->same_addr_count = 0;
+}
+
+static inline void submit_dmub_burst_write(
+	struct dc_reg_helper_state *offload,
+	const struct dc_context *ctx)
+{
+	struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
+	bool gather = false;
+
+	cmd_buf->header.payload_bytes =
+			sizeof(uint32_t) * offload->reg_seq_count;
+
+	gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+	ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+	ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+
+	memset(cmd_buf, 0, sizeof(*cmd_buf));
+
+	offload->reg_seq_count = 0;
+}
+
+static inline void submit_dmub_reg_wait(
+		struct dc_reg_helper_state *offload,
+		const struct dc_context *ctx)
+{
+	struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
+	bool gather = false;
+
+	gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+	ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+	memset(cmd_buf, 0, sizeof(*cmd_buf));
+	offload->reg_seq_count = 0;
+
+	ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+}
+
 struct dc_reg_value_masks {
 	uint32_t value;
 	uint32_t mask;
@@ -77,6 +145,100 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
 	}
 }
 
+static void dmub_flush_buffer_execute(
+		struct dc_reg_helper_state *offload,
+		const struct dc_context *ctx)
+{
+	submit_dmub_read_modify_write(offload, ctx);
+	dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+}
+
+static void dmub_flush_burst_write_buffer_execute(
+		struct dc_reg_helper_state *offload,
+		const struct dc_context *ctx)
+{
+	submit_dmub_burst_write(offload, ctx);
+	dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+}
+
+static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
+		uint32_t reg_val)
+{
+	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+	struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
+
+	/* flush command if buffer is full */
+	if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX)
+		dmub_flush_burst_write_buffer_execute(offload, ctx);
+
+	if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE &&
+			addr != cmd_buf->addr) {
+		dmub_flush_burst_write_buffer_execute(offload, ctx);
+		return false;
+	}
+
+	cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
+	cmd_buf->header.sub_type = 0;
+	cmd_buf->addr = addr;
+	cmd_buf->write_values[offload->reg_seq_count] = reg_val;
+	offload->reg_seq_count++;
+
+	return true;
+}
+
+static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr,
+		struct dc_reg_value_masks *field_value_mask)
+{
+	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+	struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
+	struct dmub_cmd_read_modify_write_sequence *seq;
+
+	/* flush command if buffer is full */
+	if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE &&
+			offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX)
+		dmub_flush_buffer_execute(offload, ctx);
+
+	if (offload->should_burst_write) {
+		if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value))
+			return field_value_mask->value;
+		else
+			offload->should_burst_write = false;
+	}
+
+	/* pack commands */
+	cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
+	cmd_buf->header.sub_type = 0;
+	seq = &cmd_buf->seq[offload->reg_seq_count];
+
+	if (offload->reg_seq_count) {
+		if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr)
+			offload->same_addr_count++;
+		else
+			offload->same_addr_count = 0;
+	}
+
+	seq->addr = addr;
+	seq->modify_mask = field_value_mask->mask;
+	seq->modify_value = field_value_mask->value;
+	offload->reg_seq_count++;
+
+	return field_value_mask->value;
+}
+
+static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr,
+		uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us)
+{
+	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+	struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
+
+	cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
+	cmd_buf->header.sub_type = 0;
+	cmd_buf->reg_wait.addr = addr;
+	cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift);
+	cmd_buf->reg_wait.mask = mask;
+	cmd_buf->reg_wait.time_out_us = time_out_us;
+}
+
 uint32_t generic_reg_update_ex(const struct dc_context *ctx,
 		uint32_t addr, int n,
 		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
@@ -93,6 +255,11 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
 
 	va_end(ap);
 
+	if (ctx->dmub_srv &&
+	    ctx->dmub_srv->reg_helper_offload.gather_in_progress)
+		return dmub_reg_value_pack(ctx, addr, &field_value_mask);
+		/* todo: return void so we can decouple code running in driver from register states */
+
 	/* mmio write directly */
 	reg_val = dm_read_reg(ctx, addr);
 	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
@@ -118,6 +285,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx,
 
 	/* mmio write directly */
 	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+
+	if (ctx->dmub_srv &&
+	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+		return dmub_reg_value_burst_set_pack(ctx, addr, reg_val);
+		/* todo: return void so we can decouple code running in driver from register states */
+	}
+
 	dm_write_reg(ctx, addr, reg_val);
 	return reg_val;
 }
@@ -134,6 +308,14 @@ uint32_t dm_read_reg_func(
 		return 0;
 	}
 #endif
+
+	if (ctx->dmub_srv &&
+	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
+	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
+		ASSERT(false);
+		return 0;
+	}
+
 	value = cgs_read_register(ctx->cgs_device, address);
 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
 
@@ -299,7 +481,19 @@ void generic_reg_wait(const struct dc_context *ctx,
 	uint32_t reg_val;
 	int i;
 
-	/* something is terribly wrong if time out is > 200ms. (5Hz) */
+	if (ctx->dmub_srv &&
+	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+		dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value,
+				delay_between_poll_us * time_out_num_tries);
+		return;
+	}
+
+	/*
+	 * Something is terribly wrong if time out is > 3000ms.
+	 * 3000ms is the maximum time needed for SMU to pass values back.
+	 * This value comes from experiments.
+	 *
+	 */
 	ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
 
 	for (i = 0; i <= time_out_num_tries; i++) {
@@ -346,12 +540,48 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
 {
 	uint32_t value = 0;
 
+	// when reg read, there should not be any offload.
+	if (ctx->dmub_srv &&
+	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+		ASSERT(false);
+	}
+
 	dm_write_reg(ctx, addr_index, index);
 	value = dm_read_reg(ctx, addr_data);
 
 	return value;
 }
 
+uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
+		uint32_t addr_index, uint32_t addr_data,
+		uint32_t index, int n,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		...)
+{
+	uint32_t shift, mask, *field_value;
+	uint32_t value = 0;
+	int i = 1;
+
+	va_list ap;
+
+	va_start(ap, field_value1);
+
+	value = generic_read_indirect_reg(ctx, addr_index, addr_data, index);
+	*field_value1 = get_reg_field_value_ex(value, mask1, shift1);
+
+	while (i < n) {
+		shift = va_arg(ap, uint32_t);
+		mask = va_arg(ap, uint32_t);
+		field_value = va_arg(ap, uint32_t *);
+
+		*field_value = get_reg_field_value_ex(value, mask, shift);
+		i++;
+	}
+
+	va_end(ap);
+
+	return value;
+}
 
 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
 		uint32_t addr_index, uint32_t addr_data,
@@ -382,3 +612,68 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
 
 	return reg_val;
 }
+
+void reg_sequence_start_gather(const struct dc_context *ctx)
+{
+	/* if reg sequence is supported and enabled, set flag to
+	 * indicate we want to have REG_SET, REG_UPDATE macro build
+	 * reg sequence command buffer rather than MMIO directly.
+	 */
+
+	if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) {
+		struct dc_reg_helper_state *offload =
+			&ctx->dmub_srv->reg_helper_offload;
+
+		/* caller sequence mismatch.  need to debug caller.  offload will not work!!! */
+		ASSERT(!offload->gather_in_progress);
+
+		offload->gather_in_progress = true;
+	}
+}
+
+void reg_sequence_start_execute(const struct dc_context *ctx)
+{
+	struct dc_reg_helper_state *offload;
+
+	if (!ctx->dmub_srv)
+		return;
+
+	offload = &ctx->dmub_srv->reg_helper_offload;
+
+	if (offload && offload->gather_in_progress) {
+		offload->gather_in_progress = false;
+		offload->should_burst_write = false;
+		switch (offload->cmd_data.cmd_common.header.type) {
+		case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE:
+			submit_dmub_read_modify_write(offload, ctx);
+			break;
+		case DMUB_CMD__REG_REG_WAIT:
+			submit_dmub_reg_wait(offload, ctx);
+			break;
+		case DMUB_CMD__REG_SEQ_BURST_WRITE:
+			submit_dmub_burst_write(offload, ctx);
+			break;
+		default:
+			return;
+		}
+
+		dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+	}
+}
+
+void reg_sequence_wait_done(const struct dc_context *ctx)
+{
+	/* callback to DM to poll for last submission done*/
+	struct dc_reg_helper_state *offload;
+
+	if (!ctx->dmub_srv)
+		return;
+
+	offload = &ctx->dmub_srv->reg_helper_offload;
+
+	if (offload &&
+	    ctx->dc->debug.dmub_offload_enabled &&
+	    !ctx->dc->debug.dmcub_emulation) {
+		dc_dmub_srv_wait_idle(ctx->dmub_srv);
+	}
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index e0856bb..25c50bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -26,8 +26,6 @@
 #ifndef DC_HW_TYPES_H
 #define DC_HW_TYPES_H
 
-#ifndef AMD_EDID_UTILITY
-
 #include "os_types.h"
 #include "fixed31_32.h"
 #include "signal_types.h"
@@ -167,12 +165,10 @@ enum surface_pixel_format {
 	/*swaped & float*/
 	SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
 	/*grow graphics here if necessary */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX,
 	SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX,
 	SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT,
 	SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT,
-#endif
 	SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
 	SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
 		SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
@@ -180,10 +176,8 @@ enum surface_pixel_format {
 	SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
 	SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
 		SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010,
 	SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102,
-#endif
 	SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
 	SURFACE_PIXEL_FORMAT_INVALID
 
@@ -222,12 +216,10 @@ enum tile_split_values {
 	DC_ROTATED_MICRO_TILING = 0x3,
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 enum tripleBuffer_enable {
 	DC_TRIPLEBUFFER_DISABLE = 0x0,
 	DC_TRIPLEBUFFER_ENABLE = 0x1,
 };
-#endif
 
 /* TODO: These values come from hardware spec. We need to readdress this
  * if they ever change.
@@ -427,13 +419,11 @@ struct dc_csc_transform {
 	bool enable_adjustment;
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 struct dc_rgb_fixed {
 	struct fixed31_32 red;
 	struct fixed31_32 green;
 	struct fixed31_32 blue;
 };
-#endif
 
 struct dc_gamma {
 	struct kref refcount;
@@ -468,10 +458,8 @@ enum dc_cursor_color_format {
 	CURSOR_MODE_COLOR_1BIT_AND,
 	CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
 	CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED,
 	CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED
-#endif
 };
 
 /*
@@ -594,8 +582,6 @@ struct scaling_taps {
 	bool integer_scaling;
 };
 
-#endif /* AMD_EDID_UTILITY */
-
 enum dc_timing_standard {
 	DC_TIMING_STANDARD_UNDEFINED,
 	DC_TIMING_STANDARD_DMT,
@@ -626,10 +612,8 @@ enum dc_color_depth {
 	COLOR_DEPTH_121212,
 	COLOR_DEPTH_141414,
 	COLOR_DEPTH_161616,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	COLOR_DEPTH_999,
 	COLOR_DEPTH_111111,
-#endif
 	COLOR_DEPTH_COUNT
 };
 
@@ -690,9 +674,7 @@ struct dc_crtc_timing_flags {
 	 * rates less than or equal to 340Mcsc */
 	uint32_t LTE_340MCSC_SCRAMBLE:1;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	uint32_t DSC : 1; /* Use DSC with this timing */
-#endif
 };
 
 enum dc_timing_3d_format {
@@ -717,7 +699,6 @@ enum dc_timing_3d_format {
 	TIMING_3D_FORMAT_MAX,
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 struct dc_dsc_config {
 	uint32_t num_slices_h; /* Number of DSC slices - horizontal */
 	uint32_t num_slices_v; /* Number of DSC slices - vertical */
@@ -728,7 +709,6 @@ struct dc_dsc_config {
 	bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
 	int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
 };
-#endif
 struct dc_crtc_timing {
 	uint32_t h_total;
 	uint32_t h_border_left;
@@ -755,13 +735,9 @@ struct dc_crtc_timing {
 	enum scanning_type scan_type;
 
 	struct dc_crtc_timing_flags flags;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	struct dc_dsc_config dsc_cfg;
-#endif
 };
 
-#ifndef AMD_EDID_UTILITY
-
 enum trigger_delay {
 	TRIGGER_DELAY_NEXT_PIXEL = 0,
 	TRIGGER_DELAY_NEXT_LINE,
@@ -796,7 +772,6 @@ enum vram_type {
 	VIDEO_MEMORY_TYPE_GDDR6  = 6,
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 enum dwb_cnv_out_bpc {
 	DWB_CNV_OUT_BPC_8BPC  = 0,
 	DWB_CNV_OUT_BPC_10BPC = 1,
@@ -847,7 +822,6 @@ struct mcif_buf_params {
 	unsigned int		swlock;
 };
 
-#endif
 
 #define MAX_TG_COLOR_VALUE 0x3FF
 struct tg_color {
@@ -857,7 +831,5 @@ struct tg_color {
 	uint16_t color_b_cb;
 };
 
-#endif /* AMD_EDID_UTILITY */
-
 #endif /* DC_HW_TYPES_H */
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index f24fd19e..d256031 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -29,13 +29,11 @@
 #include "dc_types.h"
 #include "grph_object_defs.h"
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 enum dc_link_fec_state {
 	dc_link_fec_not_ready,
 	dc_link_fec_ready,
 	dc_link_fec_enabled
 };
-#endif
 struct dc_link_status {
 	bool link_active;
 	struct dpcd_caps *dpcd_caps;
@@ -85,6 +83,7 @@ struct dc_link {
 	bool link_state_valid;
 	bool aux_access_disabled;
 	bool sync_lt_in_progress;
+	bool is_lttpr_mode_transparent;
 
 	/* caps is the same as reported_link_cap. link_traing use
 	 * reported_link_cap. Will clean up.  TODO
@@ -95,6 +94,7 @@ struct dc_link {
 	struct dc_lane_settings cur_lane_setting;
 	struct dc_link_settings preferred_link_setting;
 	struct dc_link_training_overrides preferred_training_settings;
+	struct dp_audio_test_data audio_test_data;
 
 	uint8_t ddc_hw_inst;
 
@@ -133,6 +133,7 @@ struct dc_link {
 	struct link_flags {
 		bool dp_keep_receiver_powered;
 		bool dp_skip_DID2;
+		bool dp_skip_reset_segment;
 	} wa_flags;
 	struct link_mst_stream_allocation_table mst_stream_alloc_table;
 
@@ -140,9 +141,7 @@ struct dc_link {
 
 	struct link_trace link_trace;
 	struct gpio *hpd_gpio;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	enum dc_link_fec_state fec_state;
-#endif
 };
 
 const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
@@ -206,6 +205,7 @@ enum dc_detect_reason {
 bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
 bool dc_link_get_hpd_state(struct dc_link *dc_link);
 enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
+enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
 
 /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
  * Return:
@@ -259,6 +259,7 @@ void dc_link_dp_disable_hpd(const struct dc_link *link);
 bool dc_link_dp_set_test_pattern(
 	struct dc_link *link,
 	enum dp_test_pattern test_pattern,
+	enum dp_test_pattern_color_space test_pattern_color_space,
 	const struct link_training_settings *p_link_settings,
 	const unsigned char *p_custom_pattern,
 	unsigned int cust_pattern_size);
@@ -290,6 +291,7 @@ void dc_link_enable_hpd(const struct dc_link *link);
 void dc_link_disable_hpd(const struct dc_link *link);
 void dc_link_set_test_pattern(struct dc_link *link,
 			enum dp_test_pattern test_pattern,
+			enum dp_test_pattern_color_space test_pattern_color_space,
 			const struct link_training_settings *p_link_settings,
 			const unsigned char *p_custom_pattern,
 			unsigned int cust_pattern_size);
@@ -300,11 +302,18 @@ uint32_t dc_link_bandwidth_kbps(
 const struct dc_link_settings *dc_link_get_link_cap(
 		const struct dc_link *link);
 
+void dc_link_overwrite_extended_receiver_cap(
+		struct dc_link *link);
+
 bool dc_submit_i2c(
 		struct dc *dc,
 		uint32_t link_index,
 		struct i2c_command *cmd);
 
+bool dc_submit_i2c_oem(
+		struct dc *dc,
+		struct i2c_command *cmd);
+
 uint32_t dc_bandwidth_in_kbps_from_timing(
 	const struct dc_crtc_timing *timing);
 #endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index fdb6adc..92096de 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -52,7 +52,6 @@ struct freesync_context {
 	bool dummy;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 enum hubp_dmdata_mode {
 	DMDATA_SW_MODE,
 	DMDATA_HW_MODE
@@ -82,9 +81,7 @@ struct dc_dmdata_attributes {
 	/* An unbounded array of uint32s, represents software dmdata to be loaded */
 	uint32_t *dmdata_sw_data;
 };
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct dc_writeback_info {
 	bool wb_enabled;
 	int dwb_pipe_inst;
@@ -96,7 +93,6 @@ struct dc_writeback_update {
 	unsigned int num_wb_info;
 	struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
 };
-#endif
 
 enum vertical_interrupt_ref_point {
 	START_V_UPDATE = 0,
@@ -121,9 +117,7 @@ union stream_update_flags {
 		uint32_t abm_level:1;
 		uint32_t dpms_off:1;
 		uint32_t gamut_remap:1;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		uint32_t wb_update:1;
-#endif
 	} bits;
 
 	uint32_t raw;
@@ -164,6 +158,7 @@ struct dc_stream_state {
 
 	enum view_3d_format view_format;
 
+	bool use_vsc_sdp_for_colorimetry;
 	bool ignore_msa_timing_param;
 	bool converter_disable_audio;
 	uint8_t qs_bit;
@@ -203,11 +198,9 @@ struct dc_stream_state {
 
 	struct crtc_trigger_info triggered_crtc_reset;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	/* writeback */
 	unsigned int num_wb_info;
 	struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
-#endif
 	/* Computed state bits */
 	bool mode_changed : 1;
 
@@ -226,9 +219,7 @@ struct dc_stream_state {
 	bool apply_seamless_boot_optimization;
 
 	uint32_t stream_id;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	bool is_dsc_enabled;
-#endif
 	union stream_update_flags update_flags;
 };
 
@@ -251,6 +242,7 @@ struct dc_stream_update {
 	struct dc_info_packet *vsp_infopacket;
 
 	bool *dpms_off;
+	bool integer_scaling_update;
 
 	struct colorspace_transform *gamut_remap;
 	enum dc_color_space *output_color_space;
@@ -258,12 +250,8 @@ struct dc_stream_update {
 
 	struct dc_csc_transform *output_csc_transform;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	struct dc_writeback_update *wb_update;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
 	struct dc_dsc_config *dsc_config;
-#endif
 };
 
 bool dc_is_stream_unchanged(
@@ -353,18 +341,23 @@ bool dc_add_all_planes_for_stream(
 		int plane_count,
 		struct dc_state *context);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 bool dc_stream_add_writeback(struct dc *dc,
 		struct dc_stream_state *stream,
 		struct dc_writeback_info *wb_info);
+
 bool dc_stream_remove_writeback(struct dc *dc,
 		struct dc_stream_state *stream,
 		uint32_t dwb_pipe_inst);
+
+bool dc_stream_warmup_writeback(struct dc *dc,
+		int num_dwb,
+		struct dc_writeback_info *wb_info);
+
 bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
+
 bool dc_stream_set_dynamic_metadata(struct dc *dc,
 		struct dc_stream_state *stream,
 		struct dc_dmdata_attributes *dmdata_attr);
-#endif
 
 enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
 
@@ -446,10 +439,10 @@ bool dc_stream_get_crc(struct dc *dc,
 		       uint32_t *g_y,
 		       uint32_t *b_cb);
 
-void dc_stream_set_static_screen_events(struct dc *dc,
+void dc_stream_set_static_screen_params(struct dc *dc,
 					struct dc_stream_state **stream,
 					int num_streams,
-					const struct dc_static_screen_events *events);
+					const struct dc_static_screen_params *params);
 
 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
 		enum dc_dynamic_expansion option);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index d9be8fc..e59532d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -25,7 +25,6 @@
 #ifndef DC_TYPES_H_
 #define DC_TYPES_H_
 
-#ifndef AMD_EDID_UTILITY
 /* AND EdidUtility only needs a portion
  * of this file, including the rest only
  * causes additional issues.
@@ -48,6 +47,7 @@ struct dc_stream_state;
 struct dc_link;
 struct dc_sink;
 struct dal;
+struct dc_dmub_srv;
 
 /********************************
  * Environment definitions
@@ -60,7 +60,12 @@ enum dce_environment {
 	DCE_ENV_FPGA_MAXIMUS,
 	/* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
 	 * requirements of Diagnostics team. */
-	DCE_ENV_DIAG
+	DCE_ENV_DIAG,
+	/*
+	 * Guest VM system, DC HW may exist but is not virtualized and
+	 * should not be used.  SW support for VDI only.
+	 */
+	DCE_ENV_VIRTUAL_HW
 };
 
 /* Note: use these macro definitions instead of direct comparison! */
@@ -109,6 +114,8 @@ struct dc_context {
 	uint32_t dc_sink_id_count;
 	uint32_t dc_stream_id_count;
 	uint64_t fbc_gpu_addr;
+	struct dc_dmub_srv *dmub_srv;
+
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 	struct cp_psp cp_psp;
 #endif
@@ -119,6 +126,7 @@ struct dc_context {
 #define DC_EDID_BLOCK_SIZE 128
 #define MAX_SURFACE_NUM 4
 #define NUM_PIXEL_FORMATS 10
+#define MAX_REPEATER_CNT 8
 
 #include "dc_ddc_types.h"
 
@@ -221,6 +229,7 @@ struct dc_panel_patch {
 	unsigned int extra_t12_ms;
 	unsigned int extra_delay_backlight_off;
 	unsigned int extra_t7_ms;
+	unsigned int manage_secondary_link;
 };
 
 struct dc_edid_caps {
@@ -402,6 +411,30 @@ enum dpcd_downstream_port_max_bpc {
 	DOWN_STREAM_MAX_12BPC,
 	DOWN_STREAM_MAX_16BPC
 };
+
+
+enum link_training_offset {
+	DPRX                = 0,
+	LTTPR_PHY_REPEATER1 = 1,
+	LTTPR_PHY_REPEATER2 = 2,
+	LTTPR_PHY_REPEATER3 = 3,
+	LTTPR_PHY_REPEATER4 = 4,
+	LTTPR_PHY_REPEATER5 = 5,
+	LTTPR_PHY_REPEATER6 = 6,
+	LTTPR_PHY_REPEATER7 = 7,
+	LTTPR_PHY_REPEATER8 = 8
+};
+
+struct dc_lttpr_caps {
+	union dpcd_rev revision;
+	uint8_t mode;
+	uint8_t max_lane_count;
+	uint8_t max_link_rate;
+	uint8_t phy_repeater_cnt;
+	uint8_t max_ext_timeout;
+	uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+};
+
 struct dc_dongle_caps {
 	/* dongle type (DP converter, CV smart dongle) */
 	enum display_dongle_type dongle_type;
@@ -440,7 +473,6 @@ enum display_content_type {
 	DISPLAY_CONTENT_TYPE_GAME = 8
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 /* writeback */
 struct dwb_stereo_params {
 	bool				stereo_enabled;		/* false: normal mode, true: 3D stereo */
@@ -471,7 +503,6 @@ struct dc_dwb_params {
 	enum dwb_subsample_position	subsample_position;
 	struct dc_transfer_func *out_transfer_func;
 };
-#endif
 
 /* audio*/
 
@@ -573,15 +604,17 @@ struct audio_info {
 	/* this field must be last in this struct */
 	struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
 };
-
+struct audio_check {
+	unsigned int audio_packet_type;
+	unsigned int max_audiosample_rate;
+	unsigned int acat;
+};
 enum dc_infoframe_type {
 	DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
 	DC_HDMI_INFOFRAME_TYPE_AVI = 0x82,
 	DC_HDMI_INFOFRAME_TYPE_SPD = 0x83,
 	DC_HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	DC_DP_INFOFRAME_TYPE_PPS = 0x10,
-#endif
 };
 
 struct dc_info_packet {
@@ -696,7 +729,7 @@ struct psr_context {
 	/* The VSync rate in Hz used to calculate the
 	 * step size for smooth brightness feature
 	 */
-	unsigned int vsyncRateHz;
+	unsigned int vsync_rate_hz;
 	unsigned int skipPsrWaitForPllLock;
 	unsigned int numberOfControllers;
 	/* Unused, for future use. To indicate that first changed frame from
@@ -757,10 +790,6 @@ struct dc_clock_config {
 	uint32_t current_clock_khz;/*current clock in use*/
 };
 
-#endif /*AMD_EDID_UTILITY*/
-//AMD EDID UTILITY does not need any of the above structures
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 /* DSC DPCD capabilities */
 union dsc_slice_caps1 {
 	struct {
@@ -830,6 +859,5 @@ struct dsc_dec_dpcd_caps {
 	uint32_t branch_overall_throughput_1_mps; /* In MPs */
 	uint32_t branch_max_line_width;
 };
-#endif
 
 #endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 7ba7e6f..ba0caaf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -67,7 +67,6 @@
 	SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
 	NBIO_SR(BIOS_SCRATCH_2)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define ABM_DCN20_REG_LIST() \
 	ABM_COMMON_REG_LIST_DCE_BASE(), \
 	SR(DC_ABM1_HG_SAMPLE_RATE), \
@@ -81,7 +80,6 @@
 	SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
 	SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
 	NBIO_SR(BIOS_SCRATCH_2)
-#endif
 
 #define ABM_SF(reg_name, field_name, post_fix)\
 	.field_name = reg_name ## __ ## field_name ## post_fix
@@ -163,9 +161,7 @@
 	ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
 			ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh)
-#endif
 
 #define ABM_REG_FIELD_LIST(type) \
 	type ABM1_HG_NUM_OF_BINS_SEL; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 793c0ce..f1a5d2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -60,12 +60,14 @@ enum {
 	AUX_DEFER_RETRY_COUNTER = 6
 };
 
-#define TIME_OUT_INCREMENT      1016
-#define TIME_OUT_MULTIPLIER_8 	8
-#define TIME_OUT_MULTIPLIER_16  16
-#define TIME_OUT_MULTIPLIER_32  32
-#define TIME_OUT_MULTIPLIER_64  64
-#define MAX_TIMEOUT_LENGTH      127
+#define TIME_OUT_INCREMENT        1016
+#define TIME_OUT_MULTIPLIER_8     8
+#define TIME_OUT_MULTIPLIER_16    16
+#define TIME_OUT_MULTIPLIER_32    32
+#define TIME_OUT_MULTIPLIER_64    64
+#define MAX_TIMEOUT_LENGTH        127
+#define DEFAULT_AUX_ENGINE_MULT   0
+#define DEFAULT_AUX_ENGINE_LENGTH 69
 
 static void release_engine(
 	struct dce_aux *engine)
@@ -427,11 +429,14 @@ void dce110_engine_destroy(struct dce_aux **engine)
 
 }
 
-static bool dce_aux_configure_timeout(struct ddc_service *ddc,
+static uint32_t dce_aux_configure_timeout(struct ddc_service *ddc,
 		uint32_t timeout_in_us)
 {
 	uint32_t multiplier = 0;
 	uint32_t length = 0;
+	uint32_t prev_length = 0;
+	uint32_t prev_mult = 0;
+	uint32_t prev_timeout_val = 0;
 	struct ddc *ddc_pin = ddc->ddc_pin;
 	struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
 	struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
@@ -440,7 +445,10 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc,
 	aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;
 
 	/* 2-Update aux timeout period length and multiplier */
-	if (timeout_in_us <= TIME_OUT_INCREMENT) {
+	if (timeout_in_us == 0) {
+		multiplier = DEFAULT_AUX_ENGINE_MULT;
+		length = DEFAULT_AUX_ENGINE_LENGTH;
+	} else if (timeout_in_us <= TIME_OUT_INCREMENT) {
 		multiplier = 0;
 		length = timeout_in_us/TIME_OUT_MULTIPLIER_8;
 		if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0)
@@ -464,9 +472,29 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc,
 
 	length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH;
 
+	REG_GET_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, &prev_length, AUX_RX_TIMEOUT_LEN_MUL, &prev_mult);
+
+	switch (prev_mult) {
+	case 0:
+		prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_8;
+		break;
+	case 1:
+		prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_16;
+		break;
+	case 2:
+		prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_32;
+		break;
+	case 3:
+		prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_64;
+		break;
+	default:
+		prev_timeout_val = DEFAULT_AUX_ENGINE_LENGTH * TIME_OUT_MULTIPLIER_8;
+		break;
+	}
+
 	REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier);
 
-	return true;
+	return prev_timeout_val;
 }
 
 static struct dce_aux_funcs aux_functions = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index b4b2c79..3824658 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -30,7 +30,6 @@
 #include "inc/hw/aux_engine.h"
 
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 #define AUX_COMMON_REG_LIST0(id)\
 	SRI(AUX_CONTROL, DP_AUX, id), \
 	SRI(AUX_ARB_CONTROL, DP_AUX, id), \
@@ -39,7 +38,6 @@
 	SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
 	SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
 	SRI(AUX_SW_STATUS, DP_AUX, id)
-#endif
 
 #define AUX_COMMON_REG_LIST(id)\
 	SRI(AUX_CONTROL, DP_AUX, id), \
@@ -311,7 +309,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
 		struct aux_payload *cmd);
 
 struct dce_aux_funcs {
-	bool (*configure_timeout)
+	uint32_t (*configure_timeout)
 		(struct ddc_service *ddc,
 		 uint32_t timeout);
 	void (*destroy)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index f787a6b..2e992fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -905,7 +905,7 @@ static bool dce112_program_pix_clk(
 	struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
 	struct bp_pixel_clock_parameters bp_pc_params = {0};
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
 		unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
 		unsigned dp_dto_ref_100hz = 7000000;
@@ -1004,7 +1004,6 @@ static bool get_pixel_clk_frequency_100hz(
 	return false;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 /* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
 struct pixel_rate_range_table_entry {
@@ -1064,7 +1063,6 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = {
 	.get_pix_clk_dividers = dce112_get_pix_clk_dividers,
 	.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
 };
-#endif
 
 /*****************************************/
 /* Constructor                           */
@@ -1435,7 +1433,6 @@ bool dce112_clk_src_construct(
 	return true;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 bool dcn20_clk_src_construct(
 	struct dce110_clk_src *clk_src,
 	struct dc_context *ctx,
@@ -1451,4 +1448,3 @@ bool dcn20_clk_src_construct(
 
 	return ret;
 }
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 43c1bf6..51bd250 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -55,7 +55,6 @@
 	CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
 	CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define CS_COMMON_REG_LIST_DCN2_0(index, pllid) \
 		SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
 		SRII(PHASE, DP_DTO, 0),\
@@ -76,9 +75,7 @@
 		SRII(PIXEL_RATE_CNTL, OTG, 3),\
 		SRII(PIXEL_RATE_CNTL, OTG, 4),\
 		SRII(PIXEL_RATE_CNTL, OTG, 5)
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \
 		SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
 		SRII(PHASE, DP_DTO, 0),\
@@ -93,17 +90,14 @@
 		SRII(PIXEL_RATE_CNTL, OTG, 1),\
 		SRII(PIXEL_RATE_CNTL, OTG, 2),\
 		SRII(PIXEL_RATE_CNTL, OTG, 3)
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
 	CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
 	CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
 	CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
 	CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 
 #define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
 		SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
@@ -201,7 +195,6 @@ bool dce112_clk_src_construct(
 	const struct dce110_clk_src_shift *cs_shift,
 	const struct dce110_clk_src_mask *cs_mask);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 bool dcn20_clk_src_construct(
 	struct dce110_clk_src *clk_src,
 	struct dc_context *ctx,
@@ -210,6 +203,5 @@ bool dcn20_clk_src_construct(
 	const struct dce110_clk_src_regs *regs,
 	const struct dce110_clk_src_shift *cs_shift,
 	const struct dce110_clk_src_mask *cs_mask);
-#endif
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index ba995d3..30d953a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -59,6 +59,12 @@
 #define MCP_BL_SET_PWM_FRAC 0x6A  /* Enable or disable Fractional PWM */
 #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK   0x00000001L
 
+// PSP FW version
+#define mmMP0_SMN_C2PMSG_58				0x1607A
+
+//Register access policy version
+#define mmMP0_SMN_C2PMSG_91				0x1609B
+
 static bool dce_dmcu_init(struct dmcu *dmcu)
 {
 	// Do nothing
@@ -318,7 +324,7 @@ static void dce_get_psr_wait_loop(
 	return;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 static void dcn10_get_dmcu_version(struct dmcu *dmcu)
 {
 	struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -373,6 +379,7 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
 	const struct dc_config *config = &dmcu->ctx->dc->config;
 	bool status = false;
 
+	PERF_TRACE();
 	/*  Definition of DC_DMCU_SCRATCH
 	 *  0 : firmare not loaded
 	 *  1 : PSP load DMCU FW but not initialized
@@ -429,9 +436,21 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
 		break;
 	}
 
+	PERF_TRACE();
 	return status;
 }
 
+static bool dcn21_dmcu_init(struct dmcu *dmcu)
+{
+	struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+	uint32_t dmcub_psp_version = REG_READ(DMCUB_SCRATCH15);
+
+	if (dmcu->auto_load_dmcu && dmcub_psp_version == 0) {
+		return false;
+	}
+
+	return dcn10_dmcu_init(dmcu);
+}
 
 static bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
 		unsigned int start_offset,
@@ -518,9 +537,6 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
 	if (dmcu->dmcu_state != DMCU_RUNNING)
 		return;
 
-	dcn10_get_dmcu_psr_state(dmcu, &psr_state);
-	if (psr_state == 0 && !enable)
-		return;
 	/* waitDMCUReadyForCmd */
 	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
 				dmcu_wait_reg_ready_interval,
@@ -727,9 +743,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
 	return true;
 }
 
-#endif //(CONFIG_DRM_AMD_DC_DCN1_0)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 static bool dcn20_lock_phy(struct dmcu *dmcu)
 {
@@ -777,7 +791,7 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu)
 	return true;
 }
 
-#endif //(CONFIG_DRM_AMD_DC_DCN2_0)
+#endif //(CONFIG_DRM_AMD_DC_DCN)
 
 static const struct dmcu_funcs dce_funcs = {
 	.dmcu_init = dce_dmcu_init,
@@ -790,7 +804,7 @@ static const struct dmcu_funcs dce_funcs = {
 	.is_dmcu_initialized = dce_is_dmcu_initialized
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 static const struct dmcu_funcs dcn10_funcs = {
 	.dmcu_init = dcn10_dmcu_init,
 	.load_iram = dcn10_dmcu_load_iram,
@@ -801,9 +815,7 @@ static const struct dmcu_funcs dcn10_funcs = {
 	.get_psr_wait_loop = dcn10_get_psr_wait_loop,
 	.is_dmcu_initialized = dcn10_is_dmcu_initialized
 };
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 static const struct dmcu_funcs dcn20_funcs = {
 	.dmcu_init = dcn10_dmcu_init,
 	.load_iram = dcn10_dmcu_load_iram,
@@ -816,6 +828,19 @@ static const struct dmcu_funcs dcn20_funcs = {
 	.lock_phy = dcn20_lock_phy,
 	.unlock_phy = dcn20_unlock_phy
 };
+
+static const struct dmcu_funcs dcn21_funcs = {
+	.dmcu_init = dcn21_dmcu_init,
+	.load_iram = dcn10_dmcu_load_iram,
+	.set_psr_enable = dcn10_dmcu_set_psr_enable,
+	.setup_psr = dcn10_dmcu_setup_psr,
+	.get_psr_state = dcn10_get_dmcu_psr_state,
+	.set_psr_wait_loop = dcn10_psr_wait_loop,
+	.get_psr_wait_loop = dcn10_get_psr_wait_loop,
+	.is_dmcu_initialized = dcn10_is_dmcu_initialized,
+	.lock_phy = dcn20_lock_phy,
+	.unlock_phy = dcn20_unlock_phy
+};
 #endif
 
 static void dce_dmcu_construct(
@@ -836,6 +861,26 @@ static void dce_dmcu_construct(
 	dmcu_dce->dmcu_mask = dmcu_mask;
 }
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void dcn21_dmcu_construct(
+		struct dce_dmcu *dmcu_dce,
+		struct dc_context *ctx,
+		const struct dce_dmcu_registers *regs,
+		const struct dce_dmcu_shift *dmcu_shift,
+		const struct dce_dmcu_mask *dmcu_mask)
+{
+	uint32_t psp_version = 0;
+
+	dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+	if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+		psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58);
+		dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029);
+		dmcu_dce->base.psp_version = psp_version;
+	}
+}
+#endif
+
 struct dmcu *dce_dmcu_create(
 	struct dc_context *ctx,
 	const struct dce_dmcu_registers *regs,
@@ -857,7 +902,7 @@ struct dmcu *dce_dmcu_create(
 	return &dmcu_dce->base;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 struct dmcu *dcn10_dmcu_create(
 	struct dc_context *ctx,
 	const struct dce_dmcu_registers *regs,
@@ -878,9 +923,7 @@ struct dmcu *dcn10_dmcu_create(
 
 	return &dmcu_dce->base;
 }
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct dmcu *dcn20_dmcu_create(
 	struct dc_context *ctx,
 	const struct dce_dmcu_registers *regs,
@@ -901,6 +944,27 @@ struct dmcu *dcn20_dmcu_create(
 
 	return &dmcu_dce->base;
 }
+
+struct dmcu *dcn21_dmcu_create(
+	struct dc_context *ctx,
+	const struct dce_dmcu_registers *regs,
+	const struct dce_dmcu_shift *dmcu_shift,
+	const struct dce_dmcu_mask *dmcu_mask)
+{
+	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+	if (dmcu_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	dcn21_dmcu_construct(
+		dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+	dmcu_dce->base.funcs = &dcn21_funcs;
+
+	return &dmcu_dce->base;
+}
 #endif
 
 void dce_dmcu_destroy(struct dmcu **dmcu)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index cc85876..5e044c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -71,6 +71,10 @@
 	DMCU_COMMON_REG_LIST_DCE_BASE(), \
 	SR(DMU_MEM_PWR_CNTL)
 
+#define DMCU_DCN20_REG_LIST()\
+	DMCU_DCN10_REG_LIST(), \
+	SR(DMCUB_SCRATCH15)
+
 #define DMCU_SF(reg_name, field_name, post_fix)\
 	.field_name = reg_name ## __ ## field_name ## post_fix
 
@@ -175,6 +179,7 @@ struct dce_dmcu_registers {
 	uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
 	uint32_t SMU_INTERRUPT_CONTROL;
 	uint32_t DC_DMCU_SCRATCH;
+	uint32_t DMCUB_SCRATCH15;
 };
 
 struct dce_dmcu {
@@ -261,13 +266,17 @@ struct dmcu *dcn10_dmcu_create(
 	const struct dce_dmcu_shift *dmcu_shift,
 	const struct dce_dmcu_mask *dmcu_mask);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct dmcu *dcn20_dmcu_create(
 	struct dc_context *ctx,
 	const struct dce_dmcu_registers *regs,
 	const struct dce_dmcu_shift *dmcu_shift,
 	const struct dce_dmcu_mask *dmcu_mask);
-#endif
+
+struct dmcu *dcn21_dmcu_create(
+	struct dc_context *ctx,
+	const struct dce_dmcu_registers *regs,
+	const struct dce_dmcu_shift *dmcu_shift,
+	const struct dce_dmcu_mask *dmcu_mask);
 
 void dce_dmcu_destroy(struct dmcu **dmcu);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 0275d6d..e1c5839 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -25,7 +25,7 @@
 
 #include "dce_hwseq.h"
 #include "reg_helper.h"
-#include "hw_sequencer.h"
+#include "hw_sequencer_private.h"
 #include "core_types.h"
 
 #define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 32d145a..c5aa1f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -25,7 +25,7 @@
 #ifndef __DCE_HWSEQ_H__
 #define __DCE_HWSEQ_H__
 
-#include "hw_sequencer.h"
+#include "dc_types.h"
 
 #define BL_REG_LIST()\
 	SR(LVTMA_PWRSEQ_CNTL), \
@@ -210,7 +210,6 @@
 	SR(DC_IP_REQUEST_CNTL), \
 	BL_REG_LIST()
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define HWSEQ_DCN2_REG_LIST()\
 	HWSEQ_DCN_REG_LIST(), \
 	HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
@@ -276,9 +275,7 @@
 	SR(D6VGA_CONTROL), \
 	SR(DC_IP_REQUEST_CNTL), \
 	BL_REG_LIST()
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define HWSEQ_DCN21_REG_LIST()\
 	HWSEQ_DCN_REG_LIST(), \
 	HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
@@ -329,7 +326,6 @@
 	SR(D6VGA_CONTROL), \
 	SR(DC_IP_REQUEST_CNTL), \
 	BL_REG_LIST()
-#endif
 
 struct dce_hwseq_registers {
 
@@ -577,7 +573,6 @@ struct dce_hwseq_registers {
 	HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
 	HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
 	HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
 	HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -637,9 +632,7 @@ struct dce_hwseq_registers {
 	HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
 	HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
 	HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
 	HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
 	HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -682,7 +675,6 @@ struct dce_hwseq_registers {
 	HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
 	HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
 	HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-#endif
 
 #define HWSEQ_REG_FIELD_LIST(type) \
 	type DCFE_CLOCK_ENABLE; \
@@ -800,8 +792,7 @@ struct dce_hwseq_registers {
 	type D2VGA_MODE_ENABLE; \
 	type D3VGA_MODE_ENABLE; \
 	type D4VGA_MODE_ENABLE; \
-	type AZALIA_AUDIO_DTO_MODULE;\
-	type HPO_HDMISTREAMCLK_GATE_DIS;
+	type AZALIA_AUDIO_DTO_MODULE;
 
 struct dce_hwseq_shift {
 	HWSEQ_REG_FIELD_LIST(uint8_t)
@@ -820,6 +811,10 @@ enum blnd_mode {
 	BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */
 };
 
+struct dce_hwseq;
+struct pipe_ctx;
+struct clock_source;
+
 void dce_enable_fe_clock(struct dce_hwseq *hwss,
 		unsigned int inst, bool enable);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
index 35a7539..dd41736 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
@@ -31,7 +31,7 @@ bool dce_i2c_submit_command(
 	struct i2c_command *cmd)
 {
 	struct dce_i2c_hw *dce_i2c_hw;
-	struct dce_i2c_sw *dce_i2c_sw;
+	struct dce_i2c_sw dce_i2c_sw = {0};
 
 	if (!ddc) {
 		BREAK_TO_DEBUGGER();
@@ -43,18 +43,15 @@ bool dce_i2c_submit_command(
 		return false;
 	}
 
-	/* The software engine is only available on dce8 */
-	dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc);
+	dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc);
 
-	if (!dce_i2c_sw) {
-		dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc);
-
-		if (!dce_i2c_hw)
-			return false;
-
+	if (dce_i2c_hw)
 		return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw);
+
+	dce_i2c_sw.ctx = ddc->ctx;
+	if (dce_i2c_engine_acquire_sw(&dce_i2c_sw, ddc)) {
+		return dce_i2c_submit_command_sw(pool, ddc, cmd, &dce_i2c_sw);
 	}
 
-	return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw);
-
+	return false;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index aad7b52..066188b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -100,20 +100,6 @@ static uint32_t get_hw_buffer_available_size(
 			dce_i2c_hw->buffer_used_bytes;
 }
 
-static uint32_t get_speed(
-	const struct dce_i2c_hw *dce_i2c_hw)
-{
-	uint32_t pre_scale = 0;
-
-	REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
-
-	/* [anaumov] it seems following is unnecessary */
-	/*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
-	return pre_scale ?
-		dce_i2c_hw->reference_frequency / pre_scale :
-		dce_i2c_hw->default_speed;
-}
-
 static void process_channel_reply(
 	struct dce_i2c_hw *dce_i2c_hw,
 	struct i2c_payload *reply)
@@ -278,16 +264,25 @@ static void set_speed(
 	struct dce_i2c_hw *dce_i2c_hw,
 	uint32_t speed)
 {
+	uint32_t xtal_ref_div = 0;
+	uint32_t prescale = 0;
+
+	REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+
+	if (xtal_ref_div == 0)
+		xtal_ref_div = 2;
+
+	prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
 
 	if (speed) {
 		if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
 			REG_UPDATE_N(SPEED, 3,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
 				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
 				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
 		else
 			REG_UPDATE_N(SPEED, 2,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
 				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
 	}
 }
@@ -296,9 +291,7 @@ static bool setup_engine(
 	struct dce_i2c_hw *dce_i2c_hw)
 {
 	uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	uint32_t  reset_length = 0;
-#endif
 	/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
 	REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
 
@@ -322,14 +315,12 @@ static bool setup_engine(
 		REG_UPDATE_N(SETUP, 2,
 			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
 			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	} else {
 		reset_length = dce_i2c_hw->send_reset_length;
 		REG_UPDATE_N(SETUP, 3,
 			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
 			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length,
 			     FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-#endif
 	}
 	/* Program HW priority
 	 * set to High - interrupt software I2C at any time
@@ -348,9 +339,7 @@ static void release_engine(
 	bool safe_to_reset;
 
 	/* Restore original HW engine speed */
-
-	set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
-
+	set_speed(dce_i2c_hw, dce_i2c_hw->default_speed);
 
 	/* Reset HW engine */
 	{
@@ -382,7 +371,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
 {
 	uint32_t counter = 0;
 	enum gpio_result result;
-	uint32_t current_speed;
 	struct dce_i2c_hw *dce_i2c_hw = NULL;
 
 	if (!ddc)
@@ -420,11 +408,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
 
 	dce_i2c_hw->ddc = ddc;
 
-	current_speed = get_speed(dce_i2c_hw);
-
-	if (current_speed)
-		dce_i2c_hw->original_speed = current_speed;
-
 	if (!setup_engine(dce_i2c_hw)) {
 		release_engine(dce_i2c_hw);
 		return NULL;
@@ -482,13 +465,9 @@ static void submit_channel_request_hw(
 
 static uint32_t get_transaction_timeout_hw(
 	const struct dce_i2c_hw *dce_i2c_hw,
-	uint32_t length)
+	uint32_t length,
+	uint32_t speed)
 {
-
-	uint32_t speed = get_speed(dce_i2c_hw);
-
-
-
 	uint32_t period_timeout;
 	uint32_t num_of_clock_stretches;
 
@@ -508,7 +487,8 @@ static uint32_t get_transaction_timeout_hw(
 bool dce_i2c_hw_engine_submit_payload(
 	struct dce_i2c_hw *dce_i2c_hw,
 	struct i2c_payload *payload,
-	bool middle_of_transaction)
+	bool middle_of_transaction,
+	uint32_t speed)
 {
 
 	struct i2c_request_transaction_data request;
@@ -546,7 +526,7 @@ bool dce_i2c_hw_engine_submit_payload(
 	/* obtain timeout value before submitting request */
 
 	transaction_timeout = get_transaction_timeout_hw(
-		dce_i2c_hw, payload->length + 1);
+		dce_i2c_hw, payload->length + 1, speed);
 
 	submit_channel_request_hw(
 		dce_i2c_hw, &request);
@@ -592,13 +572,11 @@ bool dce_i2c_submit_command_hw(
 		struct i2c_payload *payload = cmd->payloads + index_of_payload;
 
 		if (!dce_i2c_hw_engine_submit_payload(
-				dce_i2c_hw, payload, mot)) {
+				dce_i2c_hw, payload, mot, cmd->speed)) {
 			result = false;
 			break;
 		}
 
-
-
 		++index_of_payload;
 	}
 
@@ -629,7 +607,6 @@ void dce_i2c_hw_construct(
 	dce_i2c_hw->buffer_used_bytes = 0;
 	dce_i2c_hw->transaction_count = 0;
 	dce_i2c_hw->engine_keep_power_up_count = 1;
-	dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED;
 	dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED;
 	dce_i2c_hw->send_reset_length = 0;
 	dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
@@ -644,9 +621,6 @@ void dce100_i2c_hw_construct(
 	const struct dce_i2c_shift *shifts,
 	const struct dce_i2c_mask *masks)
 {
-
-	uint32_t xtal_ref_div = 0;
-
 	dce_i2c_hw_construct(dce_i2c_hw,
 			ctx,
 			engine_id,
@@ -654,21 +628,6 @@ void dce100_i2c_hw_construct(
 			shifts,
 			masks);
 	dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100;
-
-	REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
-
-	if (xtal_ref_div == 0)
-		xtal_ref_div = 2;
-
-	/*Calculating Reference Clock by divding original frequency by
-	 * XTAL_REF_DIV.
-	 * At upper level, uint32_t reference_frequency =
-	 *  dal_dce_i2c_get_reference_clock(as) >> 1
-	 *  which already divided by 2. So we need x2 to get original
-	 *  reference clock from ppll_info
-	 */
-	dce_i2c_hw->reference_frequency =
-		(dce_i2c_hw->reference_frequency * 2) / xtal_ref_div;
 }
 
 void dce112_i2c_hw_construct(
@@ -705,7 +664,6 @@ void dcn1_i2c_hw_construct(
 	dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 void dcn2_i2c_hw_construct(
 	struct dce_i2c_hw *dce_i2c_hw,
 	struct dc_context *ctx,
@@ -724,4 +682,3 @@ void dcn2_i2c_hw_construct(
 	if (ctx->dc->debug.scl_reset_length10)
 		dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10;
 }
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index cb0234e..fb055e68 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -177,9 +177,7 @@ struct dce_i2c_shift {
 	uint8_t DC_I2C_INDEX;
 	uint8_t DC_I2C_INDEX_WRITE;
 	uint8_t XTAL_REF_DIV;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	uint8_t DC_I2C_DDC1_SEND_RESET_LENGTH;
-#endif
 	uint8_t DC_I2C_REG_RW_CNTL_STATUS;
 };
 
@@ -220,17 +218,13 @@ struct dce_i2c_mask {
 	uint32_t DC_I2C_INDEX;
 	uint32_t DC_I2C_INDEX_WRITE;
 	uint32_t XTAL_REF_DIV;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	uint32_t DC_I2C_DDC1_SEND_RESET_LENGTH;
-#endif
 	uint32_t DC_I2C_REG_RW_CNTL_STATUS;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define I2C_COMMON_MASK_SH_LIST_DCN2(mask_sh)\
 	I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh),\
 	I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH, mask_sh)
-#endif
 
 struct dce_i2c_registers {
 	uint32_t SETUP;
@@ -262,7 +256,6 @@ struct i2c_request_transaction_data {
 
 struct dce_i2c_hw {
 	struct ddc *ddc;
-	uint32_t original_speed;
 	uint32_t engine_keep_power_up_count;
 	uint32_t transaction_count;
 	uint32_t buffer_used_bytes;
@@ -312,7 +305,6 @@ void dcn1_i2c_hw_construct(
 	const struct dce_i2c_shift *shifts,
 	const struct dce_i2c_mask *masks);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 void dcn2_i2c_hw_construct(
 	struct dce_i2c_hw *dce_i2c_hw,
 	struct dc_context *ctx,
@@ -320,7 +312,6 @@ void dcn2_i2c_hw_construct(
 	const struct dce_i2c_registers *regs,
 	const struct dce_i2c_shift *shifts,
 	const struct dce_i2c_mask *masks);
-#endif
 
 bool dce_i2c_submit_command_hw(
 	struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index a5a11c2..87d8428 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -73,31 +73,6 @@ static void release_engine_dce_sw(
 	dce_i2c_sw->ddc = NULL;
 }
 
-static bool get_hw_supported_ddc_line(
-	struct ddc *ddc,
-	enum gpio_ddc_line *line)
-{
-	enum gpio_ddc_line line_found;
-
-	*line = GPIO_DDC_LINE_UNKNOWN;
-
-	if (!ddc) {
-		BREAK_TO_DEBUGGER();
-		return false;
-	}
-
-	if (!ddc->hw_info.hw_supported)
-		return false;
-
-	line_found = dal_ddc_get_line(ddc);
-
-	if (line_found >= GPIO_DDC_LINE_COUNT)
-		return false;
-
-	*line = line_found;
-
-	return true;
-}
 static bool wait_for_scl_high_sw(
 	struct dc_context *ctx,
 	struct ddc *ddc,
@@ -524,21 +499,3 @@ bool dce_i2c_submit_command_sw(
 
 	return result;
 }
-struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine(
-	struct resource_pool *pool,
-	struct ddc *ddc)
-{
-	enum gpio_ddc_line line;
-	struct dce_i2c_sw *engine = NULL;
-
-	if (get_hw_supported_ddc_line(ddc, &line))
-		engine = pool->sw_i2cs[line];
-
-	if (!engine)
-		return NULL;
-
-	if (!dce_i2c_engine_acquire_sw(engine, ddc))
-		return NULL;
-
-	return engine;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
index 5bbcdd4..019fc47 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
@@ -49,9 +49,9 @@ bool dce_i2c_submit_command_sw(
 	struct i2c_command *cmd,
 	struct dce_i2c_sw *dce_i2c_sw);
 
-struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine(
-	struct resource_pool *pool,
-	struct ddc *ddc);
+bool dce_i2c_engine_acquire_sw(
+	struct dce_i2c_sw *dce_i2c_sw,
+	struct ddc *ddc_handle);
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 6ed922a..4515749 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -137,7 +137,7 @@ static void dce110_update_generic_info_packet(
 			AFMT_GENERIC0_UPDATE, (packet_index == 0),
 			AFMT_GENERIC2_UPDATE, (packet_index == 2));
 	}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	if (REG(AFMT_VBI_PACKET_CONTROL1)) {
 		switch (packet_index) {
 		case 0:
@@ -231,7 +231,7 @@ static void dce110_update_hdmi_info_packet(
 				HDMI_GENERIC1_SEND, send,
 				HDMI_GENERIC1_LINE, line);
 		break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case 4:
 		if (REG(HDMI_GENERIC_PACKET_CONTROL2))
 			REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -275,9 +275,10 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	enum dc_color_space output_color_space,
+	bool use_vsc_sdp_for_colorimetry,
 	uint32_t enable_sdp_splitting)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	uint32_t h_active_start;
 	uint32_t v_active_start;
 	uint32_t misc0 = 0;
@@ -329,7 +330,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 		if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
 			REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 		if (enc110->se_mask->DP_VID_N_MUL)
 			REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
 #endif
@@ -340,7 +341,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 		break;
 	}
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	if (REG(DP_MSA_MISC))
 		misc1 = REG_READ(DP_MSA_MISC);
 #endif
@@ -374,7 +375,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 	/* set dynamic range and YCbCr range */
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	switch (hw_crtc_timing.display_color_depth) {
 	case COLOR_DEPTH_666:
 		colorimetry_bpc = 0;
@@ -454,7 +455,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 				DP_DYN_RANGE, dynamic_range_rgb,
 				DP_YCBCR_RANGE, dynamic_range_ycbcr);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 		if (REG(DP_MSA_COLORIMETRY))
 			REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
 
@@ -489,7 +490,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 				hw_crtc_timing.v_front_porch;
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 		/* start at begining of left border */
 		if (REG(DP_MSA_TIMING_PARAM2))
 			REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -786,7 +787,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
 		dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
 	}
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	if (enc110->se_mask->HDMI_DB_DISABLE) {
 		/* for bring up, disable dp double  TODO */
 		if (REG(HDMI_DB_CONTROL))
@@ -824,7 +825,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
 		HDMI_GENERIC1_LINE, 0,
 		HDMI_GENERIC1_SEND, 0);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	/* stop generic packets 2 & 3 on HDMI */
 	if (REG(HDMI_GENERIC_PACKET_CONTROL2))
 		REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
new file mode 100644
index 0000000..225955e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_psr.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "../../dmub/inc/dmub_srv.h"
+#include "dmub_fw_state.h"
+#include "core_types.h"
+#include "ipp.h"
+
+#define MAX_PIPES 6
+
+/**
+ * Get PSR state from firmware.
+ */
+static void dmub_get_psr_state(uint32_t *psr_state)
+{
+	// Not yet implemented
+	// Trigger GPINT interrupt from firmware
+}
+
+/**
+ * Enable/Disable PSR.
+ */
+static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+{
+	union dmub_rb_cmd cmd;
+	struct dc_context *dc = dmub->ctx;
+
+	cmd.psr_enable.header.type = DMUB_CMD__PSR;
+
+	if (enable)
+		cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE;
+	else
+		cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_DISABLE;
+
+	cmd.psr_enable.header.payload_bytes = 0; // Send header only
+
+	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+	dc_dmub_srv_cmd_execute(dc->dmub_srv);
+	dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+/**
+ * Set PSR level.
+ */
+static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+{
+	union dmub_rb_cmd cmd;
+	uint32_t psr_state = 0;
+	struct dc_context *dc = dmub->ctx;
+
+	dmub_get_psr_state(&psr_state);
+
+	if (psr_state == 0)
+		return;
+
+	cmd.psr_set_level.header.type = DMUB_CMD__PSR;
+	cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
+	cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
+	cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
+
+	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+	dc_dmub_srv_cmd_execute(dc->dmub_srv);
+	dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+/**
+ * Setup PSR by programming phy registers and sending psr hw context values to firmware.
+ */
+static bool dmub_setup_psr(struct dmub_psr *dmub,
+		struct dc_link *link,
+		struct psr_context *psr_context)
+{
+	union dmub_rb_cmd cmd;
+	struct dc_context *dc = dmub->ctx;
+	struct dmub_cmd_psr_copy_settings_data *copy_settings_data
+		= &cmd.psr_copy_settings.psr_copy_settings_data;
+	struct pipe_ctx *pipe_ctx = NULL;
+	struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+
+	for (int i = 0; i < MAX_PIPES; i++) {
+		if (res_ctx &&
+				res_ctx->pipe_ctx[i].stream &&
+				res_ctx->pipe_ctx[i].stream->link &&
+				res_ctx->pipe_ctx[i].stream->link == link &&
+				res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+			pipe_ctx = &res_ctx->pipe_ctx[i];
+			break;
+		}
+	}
+
+	if (!pipe_ctx ||
+			!&pipe_ctx->plane_res ||
+			!&pipe_ctx->stream_res)
+		return false;
+
+	// Program DP DPHY fast training registers
+	link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+			psr_context->psrExitLinkTrainingRequired);
+
+	// Program DP_SEC_CNTL1 register to set transmission GPS0 line num and priority to high
+	link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+			psr_context->sdpTransmitLineNumDeadline);
+
+	cmd.psr_copy_settings.header.type = DMUB_CMD__PSR;
+	cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS;
+	cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
+
+	// Hw insts
+	copy_settings_data->dpphy_inst				= psr_context->phyType;
+	copy_settings_data->aux_inst				= psr_context->channel;
+	copy_settings_data->digfe_inst				= psr_context->engineId;
+	copy_settings_data->digbe_inst				= psr_context->transmitterId;
+
+	copy_settings_data->mpcc_inst				= pipe_ctx->plane_res.mpcc_inst;
+
+	if (pipe_ctx->plane_res.hubp)
+		copy_settings_data->hubp_inst			= pipe_ctx->plane_res.hubp->inst;
+	else
+		copy_settings_data->hubp_inst			= 0;
+	if (pipe_ctx->plane_res.dpp)
+		copy_settings_data->dpp_inst			= pipe_ctx->plane_res.dpp->inst;
+	else
+		copy_settings_data->dpp_inst			= 0;
+	if (pipe_ctx->stream_res.opp)
+		copy_settings_data->opp_inst			= pipe_ctx->stream_res.opp->inst;
+	else
+		copy_settings_data->opp_inst			= 0;
+	if (pipe_ctx->stream_res.tg)
+		copy_settings_data->otg_inst			= pipe_ctx->stream_res.tg->inst;
+	else
+		copy_settings_data->otg_inst			= 0;
+
+	// Misc
+	copy_settings_data->psr_level				= psr_context->psr_level.u32all;
+	copy_settings_data->hyst_frames				= psr_context->timehyst_frames;
+	copy_settings_data->hyst_lines				= psr_context->hyst_lines;
+	copy_settings_data->phy_type				= psr_context->phyType;
+	copy_settings_data->aux_repeat				= psr_context->aux_repeats;
+	copy_settings_data->smu_optimizations_en	= psr_context->allow_smu_optimizations;
+	copy_settings_data->skip_wait_for_pll_lock	= psr_context->skipPsrWaitForPllLock;
+	copy_settings_data->frame_delay				= psr_context->frame_delay;
+	copy_settings_data->smu_phy_id				= psr_context->smuPhyId;
+	copy_settings_data->num_of_controllers		= psr_context->numberOfControllers;
+	copy_settings_data->frame_cap_ind			= psr_context->psrFrameCaptureIndicationReq;
+	copy_settings_data->phy_num					= psr_context->frame_delay & 0x7;
+	copy_settings_data->link_rate				= psr_context->frame_delay & 0xF;
+
+	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+	dc_dmub_srv_cmd_execute(dc->dmub_srv);
+	dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+	return true;
+}
+
+static const struct dmub_psr_funcs psr_funcs = {
+	.set_psr_enable			= dmub_set_psr_enable,
+	.setup_psr				= dmub_setup_psr,
+	.get_psr_state			= dmub_get_psr_state,
+	.set_psr_level			= dmub_set_psr_level,
+};
+
+/**
+ * Construct PSR object.
+ */
+static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
+{
+	psr->ctx = ctx;
+	psr->funcs = &psr_funcs;
+}
+
+/**
+ * Allocate and initialize PSR object.
+ */
+struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
+{
+	struct dmub_psr *psr = kzalloc(sizeof(struct dmub_psr), GFP_KERNEL);
+
+	if (psr == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	dmub_psr_construct(psr, ctx);
+
+	return psr;
+}
+
+/**
+ * Deallocate PSR object.
+ */
+void dmub_psr_destroy(struct dmub_psr **dmub)
+{
+	kfree(dmub);
+	*dmub = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
new file mode 100644
index 0000000..229958d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_PSR_H_
+#define _DMUB_PSR_H_
+
+#include "os_types.h"
+
+struct dmub_psr {
+	struct dc_context *ctx;
+	const struct dmub_psr_funcs *funcs;
+};
+
+struct dmub_psr_funcs {
+	void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
+	bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+	void (*get_psr_state)(uint32_t *psr_state);
+	void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
+};
+
+struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
+void dmub_psr_destroy(struct dmub_psr **dmub);
+
+
+#endif /* _DCE_DMUB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 799d362..753cb8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -26,7 +26,6 @@
 #include "dc.h"
 #include "core_types.h"
 #include "clk_mgr.h"
-#include "hw_sequencer.h"
 #include "dce100_hw_sequencer.h"
 #include "resource.h"
 
@@ -136,7 +135,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
 {
 	dce110_hw_sequencer_construct(dc);
 
-	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+	dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
 	dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
 	dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index a6b80fd..34518da 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -27,6 +27,7 @@
 #define __DC_HWSS_DCE100_H__
 
 #include "core_types.h"
+#include "hw_sequencer_private.h"
 
 struct dc;
 struct dc_state;
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index a5e122c..8f78bf9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -725,7 +725,7 @@ void dce100_clock_source_destroy(struct clock_source **clk_src)
 	*clk_src = NULL;
 }
 
-static void destruct(struct dce110_resource_pool *pool)
+static void dce100_resource_destruct(struct dce110_resource_pool *pool)
 {
 	unsigned int i;
 
@@ -885,7 +885,7 @@ static void dce100_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
 
-	destruct(dce110_pool);
+	dce100_resource_destruct(dce110_pool);
 	kfree(dce110_pool);
 	*pool = NULL;
 }
@@ -950,7 +950,7 @@ static const struct resource_funcs dce100_res_pool_funcs = {
 	.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
 };
 
-static bool construct(
+static bool dce100_resource_construct(
 	uint8_t num_virtual_links,
 	struct dc  *dc,
 	struct dce110_resource_pool *pool)
@@ -1122,7 +1122,7 @@ static bool construct(
 	return true;
 
 res_create_fail:
-	destruct(pool);
+	dce100_resource_destruct(pool);
 
 	return false;
 }
@@ -1137,7 +1137,7 @@ struct resource_pool *dce100_create_resource_pool(
 	if (!pool)
 		return NULL;
 
-	if (construct(num_virtual_links, dc, pool))
+	if (dce100_resource_construct(num_virtual_links, dc, pool))
 		return &pool->base;
 
 	kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index f0e837d1..5b68927 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -61,6 +61,8 @@
 
 #include "atomfirmware.h"
 
+#define GAMMA_HW_POINTS_NUM 256
+
 /*
  * All values are in milliseconds;
  * For eDP, after power-up/power/down,
@@ -268,7 +270,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
 }
 
 static bool
-dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
 			       const struct dc_plane_state *plane_state)
 {
 	struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
@@ -596,7 +598,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 }
 
 static bool
-dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
 				const struct dc_stream_state *stream)
 {
 	struct transform *xfm = pipe_ctx->plane_res.xfm;
@@ -651,10 +653,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
 {
 	enum dc_lane_count lane_count =
 		pipe_ctx->stream->link->cur_link_settings.lane_count;
-
 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
 	struct dc_link *link = pipe_ctx->stream->link;
-
+	const struct dc *dc = link->dc;
 
 	uint32_t active_total_with_borders;
 	uint32_t early_control = 0;
@@ -667,7 +668,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
 	link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
 						    pipe_ctx->stream_res.stream_enc->id, true);
 
-	link->dc->hwss.update_info_frame(pipe_ctx);
+	dc->hwss.update_info_frame(pipe_ctx);
 
 	/* enable early control to avoid corruption on DP monitor*/
 	active_total_with_borders =
@@ -943,15 +944,15 @@ void dce110_edp_backlight_control(
 void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
 {
 	/* notify audio driver for audio modes of monitor */
-	struct dc *core_dc;
+	struct dc *dc;
 	struct clk_mgr *clk_mgr;
 	unsigned int i, num_audio = 1;
 
 	if (!pipe_ctx->stream)
 		return;
 
-	core_dc = pipe_ctx->stream->ctx->dc;
-	clk_mgr = core_dc->clk_mgr;
+	dc = pipe_ctx->stream->ctx->dc;
+	clk_mgr = dc->clk_mgr;
 
 	if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
 		return;
@@ -959,7 +960,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
 	if (pipe_ctx->stream_res.audio) {
 		for (i = 0; i < MAX_PIPES; i++) {
 			/*current_state not updated yet*/
-			if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+			if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
 				num_audio++;
 		}
 
@@ -1047,6 +1048,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
 	struct encoder_unblank_param params = { { 0 } };
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct dc_link *link = stream->link;
+	struct dce_hwseq *hws = link->dc->hwseq;
 
 	/* only 3 items below are used by unblank */
 	params.timing = pipe_ctx->stream->timing;
@@ -1056,7 +1058,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
 
 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
-		link->dc->hwss.edp_backlight_control(link, true);
+		hws->funcs.edp_backlight_control(link, true);
 	}
 }
 
@@ -1064,9 +1066,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct dc_link *link = stream->link;
+	struct dce_hwseq *hws = link->dc->hwseq;
 
 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
-		link->dc->hwss.edp_backlight_control(link, false);
+		hws->funcs.edp_backlight_control(link, false);
 		dc_link_set_abm_disable(link);
 	}
 
@@ -1223,7 +1226,7 @@ static void program_scaler(const struct dc *dc,
 {
 	struct tg_color color = {0};
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	/* TOFPGA */
 	if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
 		return;
@@ -1322,12 +1325,11 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct drr_params params = {0};
 	unsigned int event_triggers = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
-#endif
+	struct dce_hwseq *hws = dc->hwseq;
 
-	if (dc->hwss.disable_stream_gating) {
-		dc->hwss.disable_stream_gating(dc, pipe_ctx);
+	if (hws->funcs.disable_stream_gating) {
+		hws->funcs.disable_stream_gating(dc, pipe_ctx);
 	}
 
 	if (pipe_ctx->stream_res.audio != NULL) {
@@ -1357,10 +1359,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 	/*  */
 	/* Do not touch stream timing on seamless boot optimization. */
 	if (!pipe_ctx->stream->apply_seamless_boot_optimization)
-		dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
+		hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
 
-	if (dc->hwss.setup_vupdate_interrupt)
-		dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+	if (hws->funcs.setup_vupdate_interrupt)
+		hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
 
 	params.vertical_total_min = stream->adjust.v_total_min;
 	params.vertical_total_max = stream->adjust.v_total_max;
@@ -1371,9 +1373,13 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 	// DRR should set trigger event to monitor surface update event
 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
 		event_triggers = 0x80;
+	/* Event triggers and num frames initialized for DRR, but can be
+	 * later updated for PSR use. Note DRR trigger events are generated
+	 * regardless of whether num frames met.
+	 */
 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
-				pipe_ctx->stream_res.tg, event_triggers);
+				pipe_ctx->stream_res.tg, event_triggers, 2);
 
 	if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
 		pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
@@ -1390,7 +1396,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 		pipe_ctx->stream_res.opp,
 		&stream->bit_depth_params,
 		&stream->clamping);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	while (odm_pipe) {
 		odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion(
 				odm_pipe->stream_res.opp,
@@ -1404,7 +1409,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 				&stream->clamping);
 		odm_pipe = odm_pipe->next_odm_pipe;
 	}
-#endif
 
 	if (!stream->dpms_off)
 		core_link_enable_stream(context, pipe_ctx);
@@ -1438,6 +1442,9 @@ static void power_down_encoders(struct dc *dc)
 			if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
 				dp_receiver_power_ctrl(dc->links[i], false);
 
+		if (signal != SIGNAL_TYPE_EDP)
+			signal = SIGNAL_TYPE_NONE;
+
 		dc->links[i]->link_enc->funcs->disable_output(
 				dc->links[i]->link_enc, signal);
 	}
@@ -1552,9 +1559,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 	bool can_apply_edp_fast_boot = false;
 	bool can_apply_seamless_boot = false;
 	bool keep_edp_vdd_on = false;
+	struct dce_hwseq *hws = dc->hwseq;
 
-	if (dc->hwss.init_pipes)
-		dc->hwss.init_pipes(dc, context);
+	if (hws->funcs.init_pipes)
+		hws->funcs.init_pipes(dc, context);
 
 	edp_stream = get_edp_stream(context);
 
@@ -1591,7 +1599,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 	if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
 		if (edp_link_with_sink && !keep_edp_vdd_on) {
 			/*turn off backlight before DP_blank and encoder powered down*/
-			dc->hwss.edp_backlight_control(edp_link_with_sink, false);
+			hws->funcs.edp_backlight_control(edp_link_with_sink, false);
 		}
 		/*resume from S3, no vbios posting, no need to power down again*/
 		power_down_all_hw_blocks(dc);
@@ -1702,6 +1710,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
 	struct drr_params params = {0};
 	// DRR should set trigger event to monitor surface update event
 	unsigned int event_triggers = 0x80;
+	// Note DRR trigger events are generated regardless of whether num frames met.
+	unsigned int num_frames = 2;
 
 	params.vertical_total_max = vmax;
 	params.vertical_total_min = vmin;
@@ -1717,7 +1727,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
 		if (vmax != 0 && vmin != 0)
 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
 					pipe_ctx[i]->stream_res.tg,
-					event_triggers);
+					event_triggers, num_frames);
 	}
 }
 
@@ -1734,30 +1744,31 @@ static void get_position(struct pipe_ctx **pipe_ctx,
 }
 
 static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
-		int num_pipes, const struct dc_static_screen_events *events)
+		int num_pipes, const struct dc_static_screen_params *params)
 {
 	unsigned int i;
-	unsigned int value = 0;
+	unsigned int triggers = 0;
 
-	if (events->overlay_update)
-		value |= 0x100;
-	if (events->surface_update)
-		value |= 0x80;
-	if (events->cursor_update)
-		value |= 0x2;
-	if (events->force_trigger)
-		value |= 0x1;
+	if (params->triggers.overlay_update)
+		triggers |= 0x100;
+	if (params->triggers.surface_update)
+		triggers |= 0x80;
+	if (params->triggers.cursor_update)
+		triggers |= 0x2;
+	if (params->triggers.force_trigger)
+		triggers |= 0x1;
 
 	if (num_pipes) {
 		struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
 
 		if (dc->fbc_compressor)
-			value |= 0x84;
+			triggers |= 0x84;
 	}
 
 	for (i = 0; i < num_pipes; i++)
 		pipe_ctx[i]->stream_res.tg->funcs->
-			set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+					triggers, params->num_frames);
 }
 
 /*
@@ -2006,13 +2017,14 @@ enum dc_status dce110_apply_ctx_to_hw(
 		struct dc *dc,
 		struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct dc_bios *dcb = dc->ctx->dc_bios;
 	enum dc_status status;
 	int i;
 
 	/* Reset old context */
 	/* look up the targets that have been removed since last commit */
-	dc->hwss.reset_hw_ctx_wrap(dc, context);
+	hws->funcs.reset_hw_ctx_wrap(dc, context);
 
 	/* Skip applying if no targets */
 	if (context->stream_count <= 0)
@@ -2037,7 +2049,7 @@ enum dc_status dce110_apply_ctx_to_hw(
 			continue;
 		}
 
-		dc->hwss.enable_display_power_gating(
+		hws->funcs.enable_display_power_gating(
 				dc, i, dc->ctx->dc_bios,
 				PIPE_GATING_CONTROL_DISABLE);
 	}
@@ -2346,19 +2358,20 @@ static void init_hw(struct dc *dc)
 	struct transform *xfm;
 	struct abm *abm;
 	struct dmcu *dmcu;
+	struct dce_hwseq *hws = dc->hwseq;
 
 	bp = dc->ctx->dc_bios;
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		xfm = dc->res_pool->transforms[i];
 		xfm->funcs->transform_reset(xfm);
 
-		dc->hwss.enable_display_power_gating(
+		hws->funcs.enable_display_power_gating(
 				dc, i, bp,
 				PIPE_GATING_CONTROL_INIT);
-		dc->hwss.enable_display_power_gating(
+		hws->funcs.enable_display_power_gating(
 				dc, i, bp,
 				PIPE_GATING_CONTROL_DISABLE);
-		dc->hwss.enable_display_pipe_clock_gating(
+		hws->funcs.enable_display_pipe_clock_gating(
 			dc->ctx,
 			true);
 	}
@@ -2444,6 +2457,8 @@ static void dce110_program_front_end_for_pipe(
 	struct xfm_grph_csc_adjustment adjust;
 	struct out_csc_color_matrix tbl_entry;
 	unsigned int i;
+	struct dce_hwseq *hws = dc->hwseq;
+
 	DC_LOGGER_INIT();
 	memset(&tbl_entry, 0, sizeof(tbl_entry));
 
@@ -2502,10 +2517,10 @@ static void dce110_program_front_end_for_pipe(
 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
-		dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
 
 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
-		dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
 
 	DC_LOG_SURFACE(
 			"Pipe:%d %p: addr hi:0x%x, "
@@ -2608,6 +2623,7 @@ static void dce110_apply_ctx_for_surface(
 
 static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	int fe_idx = pipe_ctx->plane_res.mi ?
 		pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx;
 
@@ -2615,7 +2631,7 @@ static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
 		return;
 
-	dc->hwss.enable_display_power_gating(
+	hws->funcs.enable_display_power_gating(
 		dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE);
 
 	dc->res_pool->transforms[fe_idx]->funcs->transform_reset(
@@ -2704,14 +2720,10 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.program_gamut_remap = program_gamut_remap,
 	.program_output_csc = program_output_csc,
 	.init_hw = init_hw,
-	.init_pipes = init_pipes,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
 	.update_plane_addr = update_plane_addr,
 	.update_pending_status = dce110_update_pending_status,
-	.set_input_transfer_func = dce110_set_input_transfer_func,
-	.set_output_transfer_func = dce110_set_output_transfer_func,
-	.power_down = dce110_power_down,
 	.enable_accelerated_mode = dce110_enable_accelerated_mode,
 	.enable_timing_synchronization = dce110_enable_timing_synchronization,
 	.enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset,
@@ -2722,8 +2734,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.blank_stream = dce110_blank_stream,
 	.enable_audio_stream = dce110_enable_audio_stream,
 	.disable_audio_stream = dce110_disable_audio_stream,
-	.enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
-	.enable_display_power_gating = dce110_enable_display_power_gating,
 	.disable_plane = dce110_power_down_fe,
 	.pipe_control_lock = dce_pipe_control_lock,
 	.prepare_bandwidth = dce110_prepare_bandwidth,
@@ -2731,22 +2741,33 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.set_drr = set_drr,
 	.get_position = get_position,
 	.set_static_screen_control = set_static_screen_control,
-	.reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
-	.enable_stream_timing = dce110_enable_stream_timing,
-	.disable_stream_gating = NULL,
-	.enable_stream_gating = NULL,
 	.setup_stereo = NULL,
 	.set_avmute = dce110_set_avmute,
 	.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
-	.edp_backlight_control = dce110_edp_backlight_control,
 	.edp_power_control = dce110_edp_power_control,
 	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
 	.set_cursor_position = dce110_set_cursor_position,
 	.set_cursor_attribute = dce110_set_cursor_attribute
 };
 
+static const struct hwseq_private_funcs dce110_private_funcs = {
+	.init_pipes = init_pipes,
+	.update_plane_addr = update_plane_addr,
+	.set_input_transfer_func = dce110_set_input_transfer_func,
+	.set_output_transfer_func = dce110_set_output_transfer_func,
+	.power_down = dce110_power_down,
+	.enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
+	.enable_display_power_gating = dce110_enable_display_power_gating,
+	.reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
+	.enable_stream_timing = dce110_enable_stream_timing,
+	.disable_stream_gating = NULL,
+	.enable_stream_gating = NULL,
+	.edp_backlight_control = dce110_edp_backlight_control,
+};
+
 void dce110_hw_sequencer_construct(struct dc *dc)
 {
 	dc->hwss = dce110_funcs;
+	dc->hwseq->funcs = dce110_private_funcs;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 2f9b7db..26a9c14 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -27,8 +27,8 @@
 #define __DC_HWSS_DCE110_H__
 
 #include "core_types.h"
+#include "hw_sequencer_private.h"
 
-#define GAMMA_HW_POINTS_NUM 256
 struct dc;
 struct dc_state;
 struct dm_pp_display_configuration;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 83a4dbf..bf14e9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -782,7 +782,7 @@ void dce110_clock_source_destroy(struct clock_source **clk_src)
 	*clk_src = NULL;
 }
 
-static void destruct(struct dce110_resource_pool *pool)
+static void dce110_resource_destruct(struct dce110_resource_pool *pool)
 {
 	unsigned int i;
 
@@ -1097,6 +1097,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
 		struct dc_stream_state *stream)
 {
 	struct dc *dc = stream->ctx->dc;
+	struct dce_hwseq *hws = dc->hwseq;
 	struct resource_context *res_ctx = &context->res_ctx;
 	unsigned int underlay_idx = pool->underlay_pipe_index;
 	struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
@@ -1117,7 +1118,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
 		struct tg_color black_color = {0};
 		struct dc_bios *dcb = dc->ctx->dc_bios;
 
-		dc->hwss.enable_display_power_gating(
+		hws->funcs.enable_display_power_gating(
 				dc,
 				pipe_ctx->stream_res.tg->inst,
 				dcb, PIPE_GATING_CONTROL_DISABLE);
@@ -1161,7 +1162,7 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
 
-	destruct(dce110_pool);
+	dce110_resource_destruct(dce110_pool);
 	kfree(dce110_pool);
 	*pool = NULL;
 }
@@ -1313,7 +1314,7 @@ const struct resource_caps *dce110_resource_cap(
 		return &carrizo_resource_cap;
 }
 
-static bool construct(
+static bool dce110_resource_construct(
 	uint8_t num_virtual_links,
 	struct dc *dc,
 	struct dce110_resource_pool *pool,
@@ -1492,7 +1493,7 @@ static bool construct(
 	return true;
 
 res_create_fail:
-	destruct(pool);
+	dce110_resource_destruct(pool);
 	return false;
 }
 
@@ -1507,7 +1508,7 @@ struct resource_pool *dce110_create_resource_pool(
 	if (!pool)
 		return NULL;
 
-	if (construct(num_virtual_links, dc, pool, asic_id))
+	if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id))
 		return &pool->base;
 
 	kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 5f7c2c5..1ea7db8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -469,22 +469,27 @@ void dce110_timing_generator_set_drr(
 
 void dce110_timing_generator_set_static_screen_control(
 	struct timing_generator *tg,
-	uint32_t value)
+	uint32_t event_triggers,
+	uint32_t num_frames)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
 	uint32_t static_screen_cntl = 0;
 	uint32_t addr = 0;
 
+	// By register spec, it only takes 8 bit value
+	if (num_frames > 0xFF)
+		num_frames = 0xFF;
+
 	addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
 	static_screen_cntl = dm_read_reg(tg->ctx, addr);
 
 	set_reg_field_value(static_screen_cntl,
-				value,
+				event_triggers,
 				CRTC_STATIC_SCREEN_CONTROL,
 				CRTC_STATIC_SCREEN_EVENT_MASK);
 
 	set_reg_field_value(static_screen_cntl,
-				2,
+				num_frames,
 				CRTC_STATIC_SCREEN_CONTROL,
 				CRTC_STATIC_SCREEN_FRAME_COUNT);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 768ccf2..d8a5ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -231,7 +231,8 @@ void dce110_timing_generator_set_drr(
 
 void dce110_timing_generator_set_static_screen_control(
 	struct timing_generator *tg,
-	uint32_t value);
+	uint32_t event_triggers,
+	uint32_t num_frames);
 
 void dce110_timing_generator_get_crtc_scanoutpos(
 	struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
index 1e4a7c1..19873ee1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -158,6 +158,6 @@ void dce112_hw_sequencer_construct(struct dc *dc)
 	 * structure
 	 */
 	dce110_hw_sequencer_construct(dc);
-	dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating;
+	dc->hwseq->funcs.enable_display_power_gating = dce112_enable_display_power_gating;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
index e646f4a..943f1b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
@@ -27,6 +27,7 @@
 #define __DC_HWSS_DCE112_H__
 
 #include "core_types.h"
+#include "hw_sequencer_private.h"
 
 struct dc;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 97dcc5d..700ad8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -744,7 +744,7 @@ void dce112_clock_source_destroy(struct clock_source **clk_src)
 	*clk_src = NULL;
 }
 
-static void destruct(struct dce110_resource_pool *pool)
+static void dce112_resource_destruct(struct dce110_resource_pool *pool)
 {
 	unsigned int i;
 
@@ -1013,7 +1013,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
 
-	destruct(dce110_pool);
+	dce112_resource_destruct(dce110_pool);
 	kfree(dce110_pool);
 	*pool = NULL;
 }
@@ -1186,7 +1186,7 @@ const struct resource_caps *dce112_resource_cap(
 		return &polaris_10_resource_cap;
 }
 
-static bool construct(
+static bool dce112_resource_construct(
 	uint8_t num_virtual_links,
 	struct dc *dc,
 	struct dce110_resource_pool *pool)
@@ -1372,7 +1372,7 @@ static bool construct(
 	return true;
 
 res_create_fail:
-	destruct(pool);
+	dce112_resource_destruct(pool);
 	return false;
 }
 
@@ -1386,7 +1386,7 @@ struct resource_pool *dce112_create_resource_pool(
 	if (!pool)
 		return NULL;
 
-	if (construct(num_virtual_links, dc, pool))
+	if (dce112_resource_construct(num_virtual_links, dc, pool))
 		return &pool->base;
 
 	kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 1ca3092..66a13aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -265,7 +265,7 @@ void dce120_hw_sequencer_construct(struct dc *dc)
 	 * structure
 	 */
 	dce110_hw_sequencer_construct(dc);
-	dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
+	dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating;
 	dc->hwss.update_dchub = dce120_update_dchub;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
index c51afbd..bc02453 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
@@ -27,6 +27,7 @@
 #define __DC_HWSS_DCE120_H__
 
 #include "core_types.h"
+#include "hw_sequencer_private.h"
 
 struct dc;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 63543f6..53ab88e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -63,8 +63,8 @@
 #include "soc15_hw_ip.h"
 #include "vega10_ip_offset.h"
 #include "nbio/nbio_6_1_offset.h"
-#include "mmhub/mmhub_9_4_0_offset.h"
-#include "mmhub/mmhub_9_4_0_sh_mask.h"
+#include "mmhub/mmhub_1_0_offset.h"
+#include "mmhub/mmhub_1_0_sh_mask.h"
 #include "reg_helper.h"
 
 #include "dce100/dce100_resource.h"
@@ -587,7 +587,7 @@ static void dce120_transform_destroy(struct transform **xfm)
 	*xfm = NULL;
 }
 
-static void destruct(struct dce110_resource_pool *pool)
+static void dce120_resource_destruct(struct dce110_resource_pool *pool)
 {
 	unsigned int i;
 
@@ -872,7 +872,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
 
-	destruct(dce110_pool);
+	dce120_resource_destruct(dce110_pool);
 	kfree(dce110_pool);
 	*pool = NULL;
 }
@@ -1024,7 +1024,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
 	return value;
 }
 
-static bool construct(
+static bool dce120_resource_construct(
 	uint8_t num_virtual_links,
 	struct dc *dc,
 	struct dce110_resource_pool *pool)
@@ -1237,7 +1237,7 @@ static bool construct(
 clk_src_create_fail:
 res_create_fail:
 
-	destruct(pool);
+	dce120_resource_destruct(pool);
 
 	return false;
 }
@@ -1252,7 +1252,7 @@ struct resource_pool *dce120_create_resource_pool(
 	if (!pool)
 		return NULL;
 
-	if (construct(num_virtual_links, dc, pool))
+	if (dce120_resource_construct(num_virtual_links, dc, pool))
 		return &pool->base;
 
 	kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 098e569..82bc4e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -819,13 +819,18 @@ void dce120_tg_set_colors(struct timing_generator *tg,
 
 static void dce120_timing_generator_set_static_screen_control(
 	struct timing_generator *tg,
-	uint32_t value)
+	uint32_t event_triggers,
+	uint32_t num_frames)
 {
 	struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
 
+	// By register spec, it only takes 8 bit value
+	if (num_frames > 0xFF)
+		num_frames = 0xFF;
+
 	CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL,
-			CRTC_STATIC_SCREEN_EVENT_MASK, value,
-			CRTC_STATIC_SCREEN_FRAME_COUNT, 2);
+			CRTC_STATIC_SCREEN_EVENT_MASK, event_triggers,
+			CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);
 }
 
 void dce120_timing_generator_set_test_pattern(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index c454317..893261c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -74,7 +74,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
 {
 	dce110_hw_sequencer_construct(dc);
 
-	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+	dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
 	dc->hwss.pipe_control_lock = dce_pipe_control_lock;
 	dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
 	dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
index 7a1b31d..e43af83 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
@@ -27,6 +27,7 @@
 #define __DC_HWSS_DCE80_H__
 
 #include "core_types.h"
+#include "hw_sequencer_private.h"
 
 struct dc;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 3e8d4b4..2ad5c28 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -773,7 +773,7 @@ static struct input_pixel_processor *dce80_ipp_create(
 	return &ipp->base;
 }
 
-static void destruct(struct dce110_resource_pool *pool)
+static void dce80_resource_destruct(struct dce110_resource_pool *pool)
 {
 	unsigned int i;
 
@@ -901,7 +901,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
 
-	destruct(dce110_pool);
+	dce80_resource_destruct(dce110_pool);
 	kfree(dce110_pool);
 	*pool = NULL;
 }
@@ -1093,7 +1093,7 @@ static bool dce80_construct(
 	return true;
 
 res_create_fail:
-	destruct(pool);
+	dce80_resource_destruct(pool);
 	return false;
 }
 
@@ -1290,7 +1290,7 @@ static bool dce81_construct(
 	return true;
 
 res_create_fail:
-	destruct(pool);
+	dce80_resource_destruct(pool);
 	return false;
 }
 
@@ -1483,7 +1483,7 @@ static bool dce83_construct(
 	return true;
 
 res_create_fail:
-	destruct(pool);
+	dce80_resource_destruct(pool);
 	return false;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 032f872..62ad1a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -22,7 +22,8 @@
 #
 # Makefile for DCN.
 
-DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
+DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
+		dcn10_hw_sequencer_debug.o \
 		dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
 		dcn10_hubp.o dcn10_mpc.o \
 		dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 997e958..0e682b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -290,12 +290,8 @@ void dpp1_cnv_setup (
 		enum surface_pixel_format format,
 		enum expansion_mode mode,
 		struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 		enum dc_color_space input_color_space,
 		struct cnv_alpha_2bit_lut *alpha_2bit_lut)
-#else
-		enum dc_color_space input_color_space)
-#endif
 {
 	uint32_t pixel_format;
 	uint32_t alpha_en;
@@ -542,11 +538,9 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
 		.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
 		.dpp_dppclk_control = dpp1_dppclk_control,
 		.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		.dpp_program_blnd_lut = NULL,
 		.dpp_program_shaper_lut = NULL,
 		.dpp_program_3dlut = NULL
-#endif
 };
 
 static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 1d4a7d6..2edf566 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1486,12 +1486,8 @@ void dpp1_cnv_setup (
 		enum surface_pixel_format format,
 		enum expansion_mode mode,
 		struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 		enum dc_color_space input_color_space,
 		struct cnv_alpha_2bit_lut *alpha_2bit_lut);
-#else
-		enum dc_color_space input_color_space);
-#endif
 
 void dpp1_full_bypass(struct dpp *dpp_base);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index aa0c7a7..4d3f7d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -88,26 +88,6 @@ enum dscl_mode_sel {
 	DSCL_MODE_DSCL_BYPASS = 6
 };
 
-static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
-	{COLOR_SPACE_SRGB,
-		{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
-	{COLOR_SPACE_SRGB_LIMITED,
-		{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
-	{COLOR_SPACE_YCBCR601,
-		{0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
-						0, 0x2000, 0x38b4, 0xe3a6} },
-	{COLOR_SPACE_YCBCR601_LIMITED,
-		{0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
-						0, 0x2568, 0x40de, 0xdd3a} },
-	{COLOR_SPACE_YCBCR709,
-		{0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
-						0x2000, 0x3b61, 0xe24f} },
-
-	{COLOR_SPACE_YCBCR709_LIMITED,
-		{0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
-						0x2568, 0x43ee, 0xdbb2} }
-};
-
 static void program_gamut_remap(
 		struct dcn10_dpp *dpp,
 		const uint16_t *regval,
@@ -352,6 +332,8 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
 	uint32_t i;
 	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
 
+	REG_SEQ_START();
+
 	for (i = 0 ; i < num; i++) {
 		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
 		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
@@ -626,10 +608,16 @@ void dpp1_set_degamma(
 	case IPP_DEGAMMA_MODE_HW_xvYCC:
 		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
 			break;
+	case IPP_DEGAMMA_MODE_USER_PWL:
+		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+		break;
 	default:
 		BREAK_TO_DEBUGGER();
 		break;
 	}
+
+	REG_SEQ_SUBMIT();
+	REG_SEQ_WAIT_DONE();
 }
 
 void dpp1_degamma_ram_select(
@@ -731,10 +719,8 @@ void dpp1_full_bypass(struct dpp *dpp_base)
 	/* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
 	if (dpp->tf_mask->CM_BYPASS_EN)
 		REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	else
 		REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
-#endif
 
 	/* Setting degamma bypass for now */
 	REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index d67e0ab..fce37c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -218,14 +218,12 @@ static void dpp1_dscl_set_lb(
 			INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
 			LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
 	}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	else {
 		/* DSCL caps: pixel data processed in float format */
 		REG_SET_2(LB_DATA_FORMAT, 0,
 			INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
 			LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
 	}
-#endif
 
 	REG_SET_2(LB_MEMORY_CTRL, 0,
 		MEMORY_CONFIG, mem_size_config,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
index 374cc9a..b6391a5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
@@ -23,7 +23,7 @@
  *
  */
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 
 #include "reg_helper.h"
 #include "resource.h"
@@ -109,9 +109,7 @@ const struct dwbc_funcs dcn10_dwbc_funcs = {
 	.update				= NULL,
 	.set_stereo			= NULL,
 	.set_new_content		= NULL,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	.set_warmup			= NULL,
-#endif
 	.dwb_set_scaler			= NULL,
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
index c175edd..d56ea7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
@@ -24,7 +24,7 @@
 #ifndef __DC_DWBC_DCN10_H__
 #define __DC_DWBC_DCN10_H__
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 
 /* DCN */
 #define BASE_INNER(seg) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index a02c10e..f36a0d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -930,6 +930,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
 		output->grph.rgb.max_compressed_blk_size = 64;
 		output->grph.rgb.independent_64b_blks = true;
 		break;
+	default:
+		ASSERT(false);
+		break;
 	}
 
 	output->capable = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 69d903d..af577512 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -121,7 +121,6 @@ struct dcn_hubbub_registers {
 	uint32_t DCN_VM_AGP_BASE;
 	uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
 	uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;
 	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;
 	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;
@@ -140,7 +139,6 @@ struct dcn_hubbub_registers {
 	uint32_t DCHVM_CLK_CTRL;
 	uint32_t DCHVM_RIOMMU_CTRL0;
 	uint32_t DCHVM_RIOMMU_STAT0;
-#endif
 };
 
 /* set field name */
@@ -232,7 +230,6 @@ struct dcn_hubbub_registers {
 		type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\
 		type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define HUBBUB_HVM_REG_FIELD_LIST(type) \
 		type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\
 		type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\
@@ -278,22 +275,17 @@ struct dcn_hubbub_registers {
 		type HOSTVM_POWERSTATUS; \
 		type RIOMMU_ACTIVE; \
 		type HOSTVM_PREFETCH_DONE
-#endif
 
 struct dcn_hubbub_shift {
 	DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
 	HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	HUBBUB_HVM_REG_FIELD_LIST(uint8_t);
-#endif
 };
 
 struct dcn_hubbub_mask {
 	DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
 	HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	HUBBUB_HVM_REG_FIELD_LIST(uint32_t);
-#endif
 };
 
 struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 14d1be6..31b6473 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -306,7 +306,6 @@ void hubp1_program_pixel_format(
 		REG_UPDATE(DCSURF_SURFACE_CONFIG,
 				SURFACE_PIXEL_FORMAT, 12);
 		break;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
 		REG_UPDATE(DCSURF_SURFACE_CONFIG,
 				SURFACE_PIXEL_FORMAT, 112);
@@ -327,7 +326,6 @@ void hubp1_program_pixel_format(
 		REG_UPDATE(DCSURF_SURFACE_CONFIG,
 				SURFACE_PIXEL_FORMAT, 119);
 		break;
-#endif
 	default:
 		BREAK_TO_DEBUGGER();
 		break;
@@ -1014,6 +1012,9 @@ void hubp1_read_state_common(struct hubp *hubp)
 			HUBP_TTU_DISABLE, &s->ttu_disable,
 			HUBP_UNDERFLOW_STATUS, &s->underflow_status);
 
+	REG_GET(HUBP_CLK_CNTL,
+			HUBP_CLOCK_ENABLE, &s->clock_en);
+
 	REG_GET(DCN_GLOBAL_TTU_CNTL,
 			MIN_TTU_VBLANK, &s->min_ttu_vblank);
 
@@ -1248,10 +1249,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
 	.hubp_get_underflow_status = hubp1_get_underflow_status,
 	.hubp_init = hubp1_init,
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	.dmdata_set_attributes = NULL,
 	.dmdata_load = NULL,
-#endif
 };
 
 /*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index ae70d9c..780af5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -670,6 +670,7 @@ struct dcn_hubp_state {
 	uint32_t sw_mode;
 	uint32_t dcc_en;
 	uint32_t blank_en;
+	uint32_t clock_en;
 	uint32_t underflow_status;
 	uint32_t ttu_disable;
 	uint32_t min_ttu_vblank;
@@ -728,13 +729,11 @@ void hubp1_dcc_control(struct hubp *hubp,
 		bool enable,
 		enum hubp_ind_block_size independent_64b_blks);
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 bool hubp1_program_surface_flip_and_addr(
 	struct hubp *hubp,
 	const struct dc_plane_address *address,
 	bool flip_immediate);
 
-#endif
 bool hubp1_is_flip_pending(struct hubp *hubp);
 
 void hubp1_cursor_set_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index eb91432..1008ac8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -25,17 +25,18 @@
 
 #include <linux/delay.h>
 #include "dm_services.h"
+#include "basics/dc_common.h"
 #include "core_types.h"
 #include "resource.h"
 #include "custom_float.h"
 #include "dcn10_hw_sequencer.h"
-#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10_hw_sequencer_debug.h"
 #include "dce/dce_hwseq.h"
 #include "abm.h"
 #include "dmcu.h"
 #include "dcn10_optc.h"
-#include "dcn10/dcn10_dpp.h"
-#include "dcn10/dcn10_mpc.h"
+#include "dcn10_dpp.h"
+#include "dcn10_mpc.h"
 #include "timing_generator.h"
 #include "opp.h"
 #include "ipp.h"
@@ -49,9 +50,7 @@
 #include "clk_mgr.h"
 
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #include "dsc.h"
-#endif
 
 #define DC_LOGGER_INIT(logger)
 
@@ -68,6 +67,8 @@
 #define DTN_INFO_MICRO_SEC(ref_cycle) \
 	print_microsec(dc_ctx, log_ctx, ref_cycle)
 
+#define GAMMA_HW_POINTS_NUM 256
+
 void print_microsec(struct dc_context *dc_ctx,
 	struct dc_log_buffer_ctx *log_ctx,
 	uint32_t ref_cycle)
@@ -81,6 +82,33 @@ void print_microsec(struct dc_context *dc_ctx,
 			us_x10 % frac);
 }
 
+static void dcn10_lock_all_pipes(struct dc *dc,
+	struct dc_state *context,
+	bool lock)
+{
+	struct pipe_ctx *pipe_ctx;
+	struct timing_generator *tg;
+	int i;
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		tg = pipe_ctx->stream_res.tg;
+		/*
+		 * Only lock the top pipe's tg to prevent redundant
+		 * (un)locking. Also skip if pipe is disabled.
+		 */
+		if (pipe_ctx->top_pipe ||
+		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
+		    !tg->funcs->is_tg_enabled(tg))
+			continue;
+
+		if (lock)
+			tg->funcs->lock(tg);
+		else
+			tg->funcs->unlock(tg);
+	}
+}
+
 static void log_mpc_crc(struct dc *dc,
 	struct dc_log_buffer_ctx *log_ctx)
 {
@@ -129,9 +157,8 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
 	struct resource_pool *pool = dc->res_pool;
 	int i;
 
-	DTN_INFO("HUBP:  format  addr_hi  width  height"
-			"  rot  mir  sw_mode  dcc_en  blank_en  ttu_dis  underflow"
-			"   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
+	DTN_INFO(
+		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
 	for (i = 0; i < pool->pipe_count; i++) {
 		struct hubp *hubp = pool->hubps[i];
 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
@@ -139,8 +166,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
 		hubp->funcs->hubp_read_state(hubp);
 
 		if (!s->blank_en) {
-			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh"
-					"  %6d  %8d  %7d  %8xh",
+			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
 					hubp->inst,
 					s->pixel_format,
 					s->inuse_addr_hi,
@@ -151,6 +177,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
 					s->sw_mode,
 					s->dcc_en,
 					s->blank_en,
+					s->clock_en,
 					s->ttu_disable,
 					s->underflow_status);
 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
@@ -308,21 +335,31 @@ void dcn10_log_hw_state(struct dc *dc,
 	}
 	DTN_INFO("\n");
 
-	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel"
-			"  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow\n");
+	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
 
 	for (i = 0; i < pool->timing_generator_count; i++) {
 		struct timing_generator *tg = pool->timing_generators[i];
 		struct dcn_otg_state s = {0};
-
+		/* Read shared OTG state registers for all DCNx */
 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
 
+		/*
+		 * For DCN2 and greater, a register on the OPP is used to
+		 * determine if the CRTC is blanked instead of the OTG. So use
+		 * dpg_is_blanked() if exists, otherwise fallback on otg.
+		 *
+		 * TODO: Implement DCN-specific read_otg_state hooks.
+		 */
+		if (pool->opps[i]->funcs->dpg_is_blanked)
+			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
+		else
+			s.blank_enabled = tg->funcs->is_blanked(tg);
+
 		//only print if OTG master is enabled
 		if ((s.otg_enabled & 1) == 0)
 			continue;
 
-		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
-				" %5d %5d %5d %5d  %9d\n",
+		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
 				tg->inst,
 				s.v_blank_start,
 				s.v_blank_end,
@@ -340,7 +377,8 @@ void dcn10_log_hw_state(struct dc *dc,
 				s.h_sync_a_pol,
 				s.h_total,
 				s.v_total,
-				s.underflow_occurred_status);
+				s.underflow_occurred_status,
+				s.blank_enabled);
 
 		// Clear underflow for debug purposes
 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
@@ -350,7 +388,6 @@ void dcn10_log_hw_state(struct dc *dc,
 	}
 	DTN_INFO("\n");
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
 		struct display_stream_compressor *dsc = pool->dscs[i];
@@ -387,7 +424,7 @@ void dcn10_log_hw_state(struct dc *dc,
 	}
 	DTN_INFO("\n");
 
-	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS\n");
+	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
 	for (i = 0; i < dc->link_count; i++) {
 		struct link_encoder *lenc = dc->links[i]->link_enc;
 
@@ -395,16 +432,16 @@ void dcn10_log_hw_state(struct dc *dc,
 
 		if (lenc->funcs->read_state) {
 			lenc->funcs->read_state(lenc, &s);
-			DTN_INFO("[%-3d]: %-12d %-22d %-22d\n",
+			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
 				i,
 				s.dphy_fec_en,
 				s.dphy_fec_ready_shadow,
-				s.dphy_fec_active_status);
+				s.dphy_fec_active_status,
+				s.dp_link_training_complete);
 			DTN_INFO("\n");
 		}
 	}
 	DTN_INFO("\n");
-#endif
 
 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
@@ -438,14 +475,14 @@ bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	return false;
 }
 
-static void dcn10_enable_power_gating_plane(
+void dcn10_enable_power_gating_plane(
 	struct dce_hwseq *hws,
 	bool enable)
 {
-	bool force_on = 1; /* disable power gating */
+	bool force_on = true; /* disable power gating */
 
 	if (enable)
-		force_on = 0;
+		force_on = false;
 
 	/* DCHUBP0/1/2/3 */
 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
@@ -460,7 +497,7 @@ static void dcn10_enable_power_gating_plane(
 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
 }
 
-static void dcn10_disable_vga(
+void dcn10_disable_vga(
 	struct dce_hwseq *hws)
 {
 	unsigned int in_vga1_mode = 0;
@@ -493,7 +530,7 @@ static void dcn10_disable_vga(
 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
 }
 
-static void dcn10_dpp_pg_control(
+void dcn10_dpp_pg_control(
 		struct dce_hwseq *hws,
 		unsigned int dpp_inst,
 		bool power_on)
@@ -545,7 +582,7 @@ static void dcn10_dpp_pg_control(
 	}
 }
 
-static void dcn10_hubp_pg_control(
+void dcn10_hubp_pg_control(
 		struct dce_hwseq *hws,
 		unsigned int hubp_inst,
 		bool power_on)
@@ -605,8 +642,8 @@ static void power_on_plane(
 	if (REG(DC_IP_REQUEST_CNTL)) {
 		REG_SET(DC_IP_REQUEST_CNTL, 0,
 				IP_REQUEST_EN, 1);
-		hws->ctx->dc->hwss.dpp_pg_control(hws, plane_id, true);
-		hws->ctx->dc->hwss.hubp_pg_control(hws, plane_id, true);
+		hws->funcs.dpp_pg_control(hws, plane_id, true);
+		hws->funcs.hubp_pg_control(hws, plane_id, true);
 		REG_SET(DC_IP_REQUEST_CNTL, 0,
 				IP_REQUEST_EN, 0);
 		DC_LOG_DEBUG(
@@ -627,7 +664,7 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc)
 	REG_SET(DC_IP_REQUEST_CNTL, 0,
 			IP_REQUEST_EN, 1);
 
-	dc->hwss.hubp_pg_control(hws, 0, false);
+	hws->funcs.hubp_pg_control(hws, 0, false);
 	REG_SET(DC_IP_REQUEST_CNTL, 0,
 			IP_REQUEST_EN, 0);
 
@@ -656,7 +693,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)
 	REG_SET(DC_IP_REQUEST_CNTL, 0,
 			IP_REQUEST_EN, 1);
 
-	dc->hwss.hubp_pg_control(hws, 0, true);
+	hws->funcs.hubp_pg_control(hws, 0, true);
 	REG_SET(DC_IP_REQUEST_CNTL, 0,
 			IP_REQUEST_EN, 0);
 
@@ -664,16 +701,16 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)
 	hws->wa_state.DEGVIDCN10_253_applied = true;
 }
 
-static void dcn10_bios_golden_init(struct dc *dc)
+void dcn10_bios_golden_init(struct dc *dc)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct dc_bios *bp = dc->ctx->dc_bios;
 	int i;
 	bool allow_self_fresh_force_enable = true;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
-	if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
+	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
 		return;
-#endif
+
 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
 		allow_self_fresh_force_enable =
 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
@@ -732,7 +769,7 @@ static void false_optc_underflow_wa(
 		tg->funcs->clear_optc_underflow(tg);
 }
 
-static enum dc_status dcn10_enable_stream_timing(
+enum dc_status dcn10_enable_stream_timing(
 		struct pipe_ctx *pipe_ctx,
 		struct dc_state *context,
 		struct dc *dc)
@@ -823,6 +860,7 @@ static void dcn10_reset_back_end_for_pipe(
 		struct dc_state *context)
 {
 	int i;
+	struct dc_link *link;
 	DC_LOGGER_INIT(dc->ctx->logger);
 	if (pipe_ctx->stream_res.stream_enc == NULL) {
 		pipe_ctx->stream = NULL;
@@ -830,8 +868,14 @@ static void dcn10_reset_back_end_for_pipe(
 	}
 
 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-		/* DPMS may already disable */
-		if (!pipe_ctx->stream->dpms_off)
+		link = pipe_ctx->stream->link;
+		/* DPMS may already disable or */
+		/* dpms_off status is incorrect due to fastboot
+		 * feature. When system resume from S4 with second
+		 * screen only, the dpms_off would be true but
+		 * VBIOS lit up eDP, so check link status too.
+		 */
+		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
 			core_link_disable_stream(pipe_ctx);
 		else if (pipe_ctx->stream_res.audio)
 			dc->hwss.disable_audio_stream(pipe_ctx);
@@ -978,8 +1022,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
 }
 
 /* trigger HW to start disconnect plane from stream on the next vsync */
-void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
 	struct mpc *mpc = dc->res_pool->mpc;
@@ -1004,10 +1049,10 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
 		hubp->funcs->hubp_disconnect(hubp);
 
 	if (dc->debug.sanity_checks)
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 }
 
-static void dcn10_plane_atomic_power_down(struct dc *dc,
+void dcn10_plane_atomic_power_down(struct dc *dc,
 		struct dpp *dpp,
 		struct hubp *hubp)
 {
@@ -1017,8 +1062,8 @@ static void dcn10_plane_atomic_power_down(struct dc *dc,
 	if (REG(DC_IP_REQUEST_CNTL)) {
 		REG_SET(DC_IP_REQUEST_CNTL, 0,
 				IP_REQUEST_EN, 1);
-		dc->hwss.dpp_pg_control(hws, dpp->inst, false);
-		dc->hwss.hubp_pg_control(hws, hubp->inst, false);
+		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
 		dpp->funcs->dpp_reset(dpp);
 		REG_SET(DC_IP_REQUEST_CNTL, 0,
 				IP_REQUEST_EN, 0);
@@ -1030,8 +1075,9 @@ static void dcn10_plane_atomic_power_down(struct dc *dc,
 /* disable HW used by plane.
  * note:  cannot disable until disconnect is complete
  */
-static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
 	int opp_id = hubp->opp_id;
@@ -1050,7 +1096,7 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	hubp->power_gated = true;
 	dc->optimized_required = false; /* We're powering off, no need to optimize */
 
-	dc->hwss.plane_atomic_power_down(dc,
+	hws->funcs.plane_atomic_power_down(dc,
 			pipe_ctx->plane_res.dpp,
 			pipe_ctx->plane_res.hubp);
 
@@ -1062,14 +1108,15 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	pipe_ctx->plane_state = NULL;
 }
 
-static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	DC_LOGGER_INIT(dc->ctx->logger);
 
 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
 		return;
 
-	dc->hwss.plane_atomic_disable(dc, pipe_ctx);
+	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
 
 	apply_DEGVIDCN10_253_wa(dc);
 
@@ -1077,9 +1124,10 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
 					pipe_ctx->pipe_idx);
 }
 
-static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
+void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 {
 	int i;
+	struct dce_hwseq *hws = dc->hwseq;
 	bool can_apply_seamless_boot = false;
 
 	for (i = 0; i < context->stream_count; i++) {
@@ -1104,8 +1152,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 		 * command table.
 		 */
 		if (tg->funcs->is_tg_enabled(tg)) {
-			if (dc->hwss.init_blank != NULL) {
-				dc->hwss.init_blank(dc, tg);
+			if (hws->funcs.init_blank != NULL) {
+				hws->funcs.init_blank(dc, tg);
 				tg->funcs->lock(tg);
 			} else {
 				tg->funcs->lock(tg);
@@ -1115,7 +1163,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 		}
 	}
 
-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+	/* num_opp will be equal to number of mpcc */
+	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
 		/* Cannot reset the MPC mux if seamless boot */
@@ -1139,8 +1188,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 		if (can_apply_seamless_boot &&
 			pipe_ctx->stream != NULL &&
 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
-				pipe_ctx->stream_res.tg))
+				pipe_ctx->stream_res.tg)) {
+			// Enable double buffering for OTG_BLANK no matter if
+			// seamless boot is enabled or not to suppress global sync
+			// signals when OTG blanked. This is to prevent pipe from
+			// requesting data while in PSR.
+			tg->funcs->tg_init(tg);
 			continue;
+		}
 
 		/* Disable on the current state so the new one isn't cleared. */
 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1162,7 +1217,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
 
-		dc->hwss.plane_atomic_disconnect(dc, pipe_ctx);
+		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
 
 		if (tg->funcs->is_tg_enabled(tg))
 			tg->funcs->unlock(tg);
@@ -1176,7 +1231,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 	}
 }
 
-static void dcn10_init_hw(struct dc *dc)
+void dcn10_init_hw(struct dc *dc)
 {
 	int i;
 	struct abm *abm = dc->res_pool->abm;
@@ -1208,15 +1263,15 @@ static void dcn10_init_hw(struct dc *dc)
 		}
 
 		//Enable ability to power gate / don't force power on permanently
-		dc->hwss.enable_power_gating_plane(hws, true);
+		hws->funcs.enable_power_gating_plane(hws, true);
 
 		return;
 	}
 
 	if (!dcb->funcs->is_accelerated_mode(dcb))
-		dc->hwss.disable_vga(dc->hwseq);
+		hws->funcs.disable_vga(dc->hwseq);
 
-	dc->hwss.bios_golden_init(dc);
+	hws->funcs.bios_golden_init(dc);
 	if (dc->ctx->dc_bios->fw_info_valid) {
 		res_pool->ref_clocks.xtalin_clock_inKhz =
 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
@@ -1258,11 +1313,9 @@ static void dcn10_init_hw(struct dc *dc)
 	}
 
 	/* Power gate DSCs */
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	for (i = 0; i < res_pool->res_cap->num_dsc; i++)
-		if (dc->hwss.dsc_pg_control != NULL)
-			dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-#endif
+		if (hws->funcs.dsc_pg_control != NULL)
+			hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
 
 	/* If taking control over from VBIOS, we may want to optimize our first
 	 * mode set, so we need to skip powering down pipes until we know which
@@ -1271,7 +1324,7 @@ static void dcn10_init_hw(struct dc *dc)
 	 * everything down.
 	 */
 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
-		dc->hwss.init_pipes(dc, dc->current_state);
+		hws->funcs.init_pipes(dc, dc->current_state);
 	}
 
 	for (i = 0; i < res_pool->audio_count; i++) {
@@ -1285,7 +1338,7 @@ static void dcn10_init_hw(struct dc *dc)
 		abm->funcs->abm_init(abm);
 	}
 
-	if (dmcu != NULL)
+	if (dmcu != NULL && !dmcu->auto_load_dmcu)
 		dmcu->funcs->dmcu_init(dmcu);
 
 	if (abm != NULL && dmcu != NULL)
@@ -1303,18 +1356,19 @@ static void dcn10_init_hw(struct dc *dc)
 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
 	}
 
-	dc->hwss.enable_power_gating_plane(dc->hwseq, true);
+	hws->funcs.enable_power_gating_plane(dc->hwseq, true);
 
 	if (dc->clk_mgr->funcs->notify_wm_ranges)
 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
 
 }
 
-static void dcn10_reset_hw_ctx_wrap(
+void dcn10_reset_hw_ctx_wrap(
 		struct dc *dc,
 		struct dc_state *context)
 {
 	int i;
+	struct dce_hwseq *hws = dc->hwseq;
 
 	/* Reset Back End*/
 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
@@ -1333,8 +1387,8 @@ static void dcn10_reset_hw_ctx_wrap(
 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
 
 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
-			if (dc->hwss.enable_stream_gating)
-				dc->hwss.enable_stream_gating(dc, pipe_ctx);
+			if (hws->funcs.enable_stream_gating)
+				hws->funcs.enable_stream_gating(dc, pipe_ctx);
 			if (old_clk)
 				old_clk->funcs->cs_power_down(old_clk);
 		}
@@ -1367,9 +1421,7 @@ static bool patch_address_for_sbs_tb_stereo(
 	return false;
 }
 
-
-
-static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
 	bool addr_patched = false;
 	PHYSICAL_ADDRESS_LOC addr;
@@ -1394,8 +1446,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
 }
 
-static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
-					  const struct dc_plane_state *plane_state)
+bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+			const struct dc_plane_state *plane_state)
 {
 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
 	const struct dc_transfer_func *tf = NULL;
@@ -1427,6 +1479,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
 			break;
 		case TRANSFER_FUNCTION_PQ:
+			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+			result = true;
+			break;
 		default:
 			result = false;
 			break;
@@ -1472,9 +1529,8 @@ static void log_tf(struct dc_context *ctx,
 	}
 }
 
-static bool
-dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
-			       const struct dc_stream_state *stream)
+bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+				const struct dc_stream_state *stream)
 {
 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
 
@@ -1510,11 +1566,13 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
 	return true;
 }
 
-static void dcn10_pipe_control_lock(
+void dcn10_pipe_control_lock(
 	struct dc *dc,
 	struct pipe_ctx *pipe,
 	bool lock)
 {
+	struct dce_hwseq *hws = dc->hwseq;
+
 	/* use TG master update lock to lock everything on the TG
 	 * therefore only top pipe need to lock
 	 */
@@ -1522,7 +1580,7 @@ static void dcn10_pipe_control_lock(
 		return;
 
 	if (dc->debug.sanity_checks)
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 
 	if (lock)
 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
@@ -1530,7 +1588,7 @@ static void dcn10_pipe_control_lock(
 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
 
 	if (dc->debug.sanity_checks)
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 }
 
 static bool wait_for_reset_trigger_to_occur(
@@ -1570,7 +1628,7 @@ static bool wait_for_reset_trigger_to_occur(
 	return rc;
 }
 
-static void dcn10_enable_timing_synchronization(
+void dcn10_enable_timing_synchronization(
 	struct dc *dc,
 	int group_index,
 	int group_size,
@@ -1600,7 +1658,7 @@ static void dcn10_enable_timing_synchronization(
 	DC_SYNC_INFO("Sync complete\n");
 }
 
-static void dcn10_enable_per_frame_crtc_position_reset(
+void dcn10_enable_per_frame_crtc_position_reset(
 	struct dc *dc,
 	int group_size,
 	struct pipe_ctx *grouped_pipes[])
@@ -1625,10 +1683,10 @@ static void dcn10_enable_per_frame_crtc_position_reset(
 }
 
 /*static void print_rq_dlg_ttu(
-		struct dc *core_dc,
+		struct dc *dc,
 		struct pipe_ctx *pipe_ctx)
 {
-	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
 			"\n============== DML TTU Output parameters [%d] ==============\n"
 			"qos_level_low_wm: %d, \n"
 			"qos_level_high_wm: %d, \n"
@@ -1658,7 +1716,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
 			);
 
-	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
 			"\n============== DML DLG Output parameters [%d] ==============\n"
 			"refcyc_h_blank_end: %d, \n"
 			"dlg_vblank_end: %d, \n"
@@ -1693,7 +1751,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
 			);
 
-	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
 			"\ndst_y_per_meta_row_nom_l: %d, \n"
 			"refcyc_per_meta_chunk_nom_l: %d, \n"
 			"refcyc_per_line_delivery_pre_l: %d, \n"
@@ -1723,7 +1781,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
 			);
 
-	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
 			"\n============== DML RQ Output parameters [%d] ==============\n"
 			"chunk_size: %d \n"
 			"min_chunk_size: %d \n"
@@ -1838,7 +1896,7 @@ static void dcn10_enable_plane(
 	struct dce_hwseq *hws = dc->hwseq;
 
 	if (dc->debug.sanity_checks) {
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 	}
 
 	undo_DEGVIDCN10_253_wa(dc);
@@ -1895,11 +1953,11 @@ static void dcn10_enable_plane(
 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
 
 	if (dc->debug.sanity_checks) {
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 	}
 }
 
-static void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
+void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
 {
 	int i = 0;
 	struct dpp_grph_csc_adjustment adjust;
@@ -1947,7 +2005,7 @@ static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint
 	matrix[11] = rgb_bias;
 }
 
-static void dcn10_program_output_csc(struct dc *dc,
+void dcn10_program_output_csc(struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		enum dc_color_space colorspace,
 		uint16_t *matrix,
@@ -1979,57 +2037,6 @@ static void dcn10_program_output_csc(struct dc *dc,
 	}
 }
 
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
-{
-	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
-		return true;
-	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
-		return true;
-	return false;
-}
-
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
-{
-	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
-		return true;
-	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
-		return true;
-	return false;
-}
-
-bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
-{
-	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
-		return true;
-	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
-		return true;
-	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
-		return true;
-	return false;
-}
-
-bool is_rgb_cspace(enum dc_color_space output_color_space)
-{
-	switch (output_color_space) {
-	case COLOR_SPACE_SRGB:
-	case COLOR_SPACE_SRGB_LIMITED:
-	case COLOR_SPACE_2020_RGB_FULLRANGE:
-	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
-	case COLOR_SPACE_ADOBERGB:
-		return true;
-	case COLOR_SPACE_YCBCR601:
-	case COLOR_SPACE_YCBCR709:
-	case COLOR_SPACE_YCBCR601_LIMITED:
-	case COLOR_SPACE_YCBCR709_LIMITED:
-	case COLOR_SPACE_2020_YCBCR:
-		return false;
-	default:
-		/* Add a case to switch */
-		BREAK_TO_DEBUGGER();
-		return false;
-	}
-}
-
 void dcn10_get_surface_visual_confirm_color(
 		const struct pipe_ctx *pipe_ctx,
 		struct tg_color *color)
@@ -2103,70 +2110,7 @@ void dcn10_get_hdr_visual_confirm_color(
 	}
 }
 
-static uint16_t fixed_point_to_int_frac(
-	struct fixed31_32 arg,
-	uint8_t integer_bits,
-	uint8_t fractional_bits)
-{
-	int32_t numerator;
-	int32_t divisor = 1 << fractional_bits;
-
-	uint16_t result;
-
-	uint16_t d = (uint16_t)dc_fixpt_floor(
-		dc_fixpt_abs(
-			arg));
-
-	if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
-		numerator = (uint16_t)dc_fixpt_floor(
-			dc_fixpt_mul_int(
-				arg,
-				divisor));
-	else {
-		numerator = dc_fixpt_floor(
-			dc_fixpt_sub(
-				dc_fixpt_from_int(
-					1LL << integer_bits),
-				dc_fixpt_recip(
-					dc_fixpt_from_int(
-						divisor))));
-	}
-
-	if (numerator >= 0)
-		result = (uint16_t)numerator;
-	else
-		result = (uint16_t)(
-		(1 << (integer_bits + fractional_bits + 1)) + numerator);
-
-	if ((result != 0) && dc_fixpt_lt(
-		arg, dc_fixpt_zero))
-		result |= 1 << (integer_bits + fractional_bits);
-
-	return result;
-}
-
-void dcn10_build_prescale_params(struct  dc_bias_and_scale *bias_and_scale,
-		const struct dc_plane_state *plane_state)
-{
-	if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
-			&& plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
-			&& plane_state->input_csc_color_matrix.enable_adjustment
-			&& plane_state->coeff_reduction_factor.value != 0) {
-		bias_and_scale->scale_blue = fixed_point_to_int_frac(
-			dc_fixpt_mul(plane_state->coeff_reduction_factor,
-					dc_fixpt_from_fraction(256, 255)),
-				2,
-				13);
-		bias_and_scale->scale_red = bias_and_scale->scale_blue;
-		bias_and_scale->scale_green = bias_and_scale->scale_blue;
-	} else {
-		bias_and_scale->scale_blue = 0x2000;
-		bias_and_scale->scale_red = 0x2000;
-		bias_and_scale->scale_green = 0x2000;
-	}
-}
-
-static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
+static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
 {
 	struct dc_bias_and_scale bns_params = {0};
 
@@ -2175,21 +2119,18 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
 			plane_state->format,
 			EXPANSION_MODE_ZERO,
 			plane_state->input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 			plane_state->color_space,
 			NULL);
-#else
-			plane_state->color_space);
-#endif
 
 	//set scale and bias registers
-	dcn10_build_prescale_params(&bns_params, plane_state);
+	build_prescale_params(&bns_params, plane_state);
 	if (dpp->funcs->dpp_program_bias_and_scale)
 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
 }
 
-static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	struct mpcc_blnd_cfg blnd_cfg = {{0}};
 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
@@ -2199,10 +2140,10 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
-		dcn10_get_hdr_visual_confirm_color(
+		hws->funcs.get_hdr_visual_confirm_color(
 				pipe_ctx, &blnd_cfg.black_color);
 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
-		dcn10_get_surface_visual_confirm_color(
+		hws->funcs.get_surface_visual_confirm_color(
 				pipe_ctx, &blnd_cfg.black_color);
 	} else {
 		color_space_to_black_color(
@@ -2284,11 +2225,12 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
 }
 
-void update_dchubp_dpp(
+static void dcn10_update_dchubp_dpp(
 	struct dc *dc,
 	struct pipe_ctx *pipe_ctx,
 	struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -2342,12 +2284,12 @@ void update_dchubp_dpp(
 
 	if (plane_state->update_flags.bits.full_update ||
 		plane_state->update_flags.bits.bpp_change)
-		update_dpp(dpp, plane_state);
+		dcn10_update_dpp(dpp, plane_state);
 
 	if (plane_state->update_flags.bits.full_update ||
 		plane_state->update_flags.bits.per_pixel_alpha_change ||
 		plane_state->update_flags.bits.global_alpha_change)
-		dc->hwss.update_mpcc(dc, pipe_ctx);
+		hws->funcs.update_mpcc(dc, pipe_ctx);
 
 	if (plane_state->update_flags.bits.full_update ||
 		plane_state->update_flags.bits.per_pixel_alpha_change ||
@@ -2407,13 +2349,13 @@ void update_dchubp_dpp(
 
 	hubp->power_gated = false;
 
-	dc->hwss.update_plane_addr(dc, pipe_ctx);
+	hws->funcs.update_plane_addr(dc, pipe_ctx);
 
 	if (is_pipe_tree_visible(pipe_ctx))
 		hubp->funcs->set_blank(hubp, false);
 }
 
-static void dcn10_blank_pixel_data(
+void dcn10_blank_pixel_data(
 		struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		bool blank)
@@ -2456,10 +2398,9 @@ static void dcn10_blank_pixel_data(
 	}
 }
 
-void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
 {
-	struct fixed31_32 multiplier = dc_fixpt_from_fraction(
-			pipe_ctx->plane_state->sdr_white_level, 80);
+	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
 	struct custom_float_format fmt;
 
@@ -2467,7 +2408,8 @@ void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
 	fmt.mantissa_bits = 12;
 	fmt.sign = true;
 
-	if (pipe_ctx->plane_state->sdr_white_level > 80)
+
+	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
 
 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
@@ -2479,17 +2421,19 @@ void dcn10_program_pipe(
 		struct pipe_ctx *pipe_ctx,
 		struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
+
 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
 		dcn10_enable_plane(dc, pipe_ctx, context);
 
-	update_dchubp_dpp(dc, pipe_ctx, context);
+	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
 
-	set_hdr_multiplier(pipe_ctx);
+	hws->funcs.set_hdr_multiplier(pipe_ctx);
 
 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
-		dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
 
 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
 	 * only do gamma programming for full update.
@@ -2498,14 +2442,16 @@ void dcn10_program_pipe(
 	 * doing heavy calculation and programming
 	 */
 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
-		dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
 }
 
-static void program_all_pipe_in_tree(
+static void dcn10_program_all_pipe_in_tree(
 		struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
+
 	if (pipe_ctx->top_pipe == NULL) {
 		bool blank = !is_pipe_tree_visible(pipe_ctx);
 
@@ -2519,20 +2465,20 @@ static void program_all_pipe_in_tree(
 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
 
-		if (dc->hwss.setup_vupdate_interrupt)
-			dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+		if (hws->funcs.setup_vupdate_interrupt)
+			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
 
-		dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
 	}
 
 	if (pipe_ctx->plane_state != NULL)
-		dcn10_program_pipe(dc, pipe_ctx, context);
+		hws->funcs.program_pipe(dc, pipe_ctx, context);
 
 	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
-		program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
+		dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
 }
 
-struct pipe_ctx *find_top_pipe_for_stream(
+static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
 		struct dc *dc,
 		struct dc_state *context,
 		const struct dc_stream_state *stream)
@@ -2556,19 +2502,20 @@ struct pipe_ctx *find_top_pipe_for_stream(
 	return NULL;
 }
 
-static void dcn10_apply_ctx_for_surface(
+void dcn10_apply_ctx_for_surface(
 		struct dc *dc,
 		const struct dc_stream_state *stream,
 		int num_planes,
 		struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	int i;
 	struct timing_generator *tg;
 	uint32_t underflow_check_delay_us;
 	bool removed_pipe[4] = { false };
 	bool interdependent_update = false;
 	struct pipe_ctx *top_pipe_to_program =
-			find_top_pipe_for_stream(dc, context, stream);
+			dcn10_find_top_pipe_for_stream(dc, context, stream);
 	DC_LOGGER_INIT(dc->ctx->logger);
 
 	if (!top_pipe_to_program)
@@ -2581,23 +2528,23 @@ static void dcn10_apply_ctx_for_surface(
 
 	underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
 
-	if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
-		ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
+	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
+		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
 
 	if (interdependent_update)
-		lock_all_pipes(dc, context, true);
+		dcn10_lock_all_pipes(dc, context, true);
 	else
 		dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
 
 	if (underflow_check_delay_us != 0xFFFFFFFF)
 		udelay(underflow_check_delay_us);
 
-	if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
-		ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
+	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
+		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
 
 	if (num_planes == 0) {
 		/* OTG blank before remove all front end */
-		dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+		hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
 	}
 
 	/* Disconnect unused mpcc */
@@ -2623,7 +2570,7 @@ static void dcn10_apply_ctx_for_surface(
 		    old_pipe_ctx->plane_state &&
 		    old_pipe_ctx->stream_res.tg == tg) {
 
-			dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
+			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
 			removed_pipe[i] = true;
 
 			DC_LOG_DC("Reset mpcc for pipe %d\n",
@@ -2632,13 +2579,11 @@ static void dcn10_apply_ctx_for_surface(
 	}
 
 	if (num_planes > 0)
-		program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+		dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	/* Program secondary blending tree and writeback pipes */
-	if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
-		dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
-#endif
+	if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
+		hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
 	if (interdependent_update)
 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2654,7 +2599,7 @@ static void dcn10_apply_ctx_for_surface(
 		}
 
 	if (interdependent_update)
-		lock_all_pipes(dc, context, false);
+		dcn10_lock_all_pipes(dc, context, false);
 	else
 		dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
 
@@ -2691,14 +2636,15 @@ static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *contex
 	}
 }
 
-static void dcn10_prepare_bandwidth(
+void dcn10_prepare_bandwidth(
 		struct dc *dc,
 		struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubbub *hubbub = dc->res_pool->hubbub;
 
 	if (dc->debug.sanity_checks)
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 
 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 		if (context->stream_count == 0)
@@ -2720,17 +2666,18 @@ static void dcn10_prepare_bandwidth(
 		dcn_bw_notify_pplib_of_wm_ranges(dc);
 
 	if (dc->debug.sanity_checks)
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 }
 
-static void dcn10_optimize_bandwidth(
+void dcn10_optimize_bandwidth(
 		struct dc *dc,
 		struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubbub *hubbub = dc->res_pool->hubbub;
 
 	if (dc->debug.sanity_checks)
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 
 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
 		if (context->stream_count == 0)
@@ -2752,10 +2699,10 @@ static void dcn10_optimize_bandwidth(
 		dcn_bw_notify_pplib_of_wm_ranges(dc);
 
 	if (dc->debug.sanity_checks)
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 }
 
-static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
+void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
 		int num_pipes, unsigned int vmin, unsigned int vmax,
 		unsigned int vmid, unsigned int vmid_frame_number)
 {
@@ -2763,6 +2710,8 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
 	struct drr_params params = {0};
 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
 	unsigned int event_triggers = 0x800;
+	// Note DRR trigger events are generated regardless of whether num frames met.
+	unsigned int num_frames = 2;
 
 	params.vertical_total_max = vmax;
 	params.vertical_total_min = vmin;
@@ -2779,11 +2728,11 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
 		if (vmax != 0 && vmin != 0)
 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
 					pipe_ctx[i]->stream_res.tg,
-					event_triggers);
+					event_triggers, num_frames);
 	}
 }
 
-static void dcn10_get_position(struct pipe_ctx **pipe_ctx,
+void dcn10_get_position(struct pipe_ctx **pipe_ctx,
 		int num_pipes,
 		struct crtc_position *position)
 {
@@ -2795,22 +2744,23 @@ static void dcn10_get_position(struct pipe_ctx **pipe_ctx,
 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
 }
 
-static void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
-		int num_pipes, const struct dc_static_screen_events *events)
+void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+		int num_pipes, const struct dc_static_screen_params *params)
 {
 	unsigned int i;
-	unsigned int value = 0;
+	unsigned int triggers = 0;
 
-	if (events->surface_update)
-		value |= 0x80;
-	if (events->cursor_update)
-		value |= 0x2;
-	if (events->force_trigger)
-		value |= 0x1;
+	if (params->triggers.surface_update)
+		triggers |= 0x80;
+	if (params->triggers.cursor_update)
+		triggers |= 0x2;
+	if (params->triggers.force_trigger)
+		triggers |= 0x1;
 
 	for (i = 0; i < num_pipes; i++)
 		pipe_ctx[i]->stream_res.tg->funcs->
-			set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+					triggers, params->num_frames);
 }
 
 static void dcn10_config_stereo_parameters(
@@ -2850,7 +2800,7 @@ static void dcn10_config_stereo_parameters(
 	return;
 }
 
-static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
+void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
 {
 	struct crtc_stereo_flags flags = { 0 };
 	struct dc_stream_state *stream = pipe_ctx->stream;
@@ -2889,15 +2839,16 @@ static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_in
 	return NULL;
 }
 
-static void dcn10_wait_for_mpcc_disconnect(
+void dcn10_wait_for_mpcc_disconnect(
 		struct dc *dc,
 		struct resource_pool *res_pool,
 		struct pipe_ctx *pipe_ctx)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	int mpcc_inst;
 
 	if (dc->debug.sanity_checks) {
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 	}
 
 	if (!pipe_ctx->stream_res.opp)
@@ -2914,12 +2865,12 @@ static void dcn10_wait_for_mpcc_disconnect(
 	}
 
 	if (dc->debug.sanity_checks) {
-		dcn10_verify_allow_pstate_change_high(dc);
+		hws->funcs.verify_allow_pstate_change_high(dc);
 	}
 
 }
 
-static bool dcn10_dummy_display_power_gating(
+bool dcn10_dummy_display_power_gating(
 	struct dc *dc,
 	uint8_t controller_id,
 	struct dc_bios *dcb,
@@ -2928,7 +2879,7 @@ static bool dcn10_dummy_display_power_gating(
 	return true;
 }
 
-static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2952,7 +2903,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
 	}
 }
 
-static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
+void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
 {
 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
 
@@ -2960,7 +2911,34 @@ static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh
 	hubbub->funcs->update_dchub(hubbub, dh_data);
 }
 
-static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+{
+	struct pipe_ctx *test_pipe;
+	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
+	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
+
+	/**
+	 * Disable the cursor if there's another pipe above this with a
+	 * plane that contains this pipe's viewport to prevent double cursor
+	 * and incorrect scaling artifacts.
+	 */
+	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+	     test_pipe = test_pipe->top_pipe) {
+		if (!test_pipe->plane_state->visible)
+			continue;
+
+		r2 = &test_pipe->plane_res.scl_data.recout;
+		r2_r = r2->x + r2->width;
+		r2_b = r2->y + r2->height;
+
+		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
+			return true;
+	}
+
+	return false;
+}
+
+void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -2974,23 +2952,44 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 		.rotation = pipe_ctx->plane_state->rotation,
 		.mirror = pipe_ctx->plane_state->horizontal_mirror
 	};
-	uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
-	uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
-	uint32_t x_offset = min(x_plane, pos_cpy.x);
-	uint32_t y_offset = min(y_plane, pos_cpy.y);
+	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
+		(pipe_ctx->bottom_pipe != NULL);
 
-	pos_cpy.x -= x_offset;
-	pos_cpy.y -= y_offset;
-	pos_cpy.x_hotspot += (x_plane - x_offset);
-	pos_cpy.y_hotspot += (y_plane - y_offset);
+	int x_plane = pipe_ctx->plane_state->dst_rect.x;
+	int y_plane = pipe_ctx->plane_state->dst_rect.y;
+	int x_pos = pos_cpy.x;
+	int y_pos = pos_cpy.y;
+
+	// translate cursor from stream space to plane space
+	x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
+			pipe_ctx->plane_state->dst_rect.width;
+	y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
+			pipe_ctx->plane_state->dst_rect.height;
+
+	if (x_pos < 0) {
+		pos_cpy.x_hotspot -= x_pos;
+		x_pos = 0;
+	}
+
+	if (y_pos < 0) {
+		pos_cpy.y_hotspot -= y_pos;
+		y_pos = 0;
+	}
+
+	pos_cpy.x = (uint32_t)x_pos;
+	pos_cpy.y = (uint32_t)y_pos;
 
 	if (pipe_ctx->plane_state->address.type
 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
 		pos_cpy.enable = false;
 
+	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
+		pos_cpy.enable = false;
+
 	// Swap axis and mirror horizontally
 	if (param.rotation == ROTATION_ANGLE_90) {
 		uint32_t temp_x = pos_cpy.x;
+
 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
 		pos_cpy.y = temp_x;
@@ -2998,26 +2997,44 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 	// Swap axis and mirror vertically
 	else if (param.rotation == ROTATION_ANGLE_270) {
 		uint32_t temp_y = pos_cpy.y;
-		if (pos_cpy.x >  pipe_ctx->plane_res.scl_data.viewport.height) {
-			pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height;
-			pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
-		} else {
-			pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
-		}
+		int viewport_height =
+			pipe_ctx->plane_res.scl_data.viewport.height;
+
+		if (pipe_split_on) {
+			if (pos_cpy.x > viewport_height) {
+				pos_cpy.x = pos_cpy.x - viewport_height;
+				pos_cpy.y = viewport_height - pos_cpy.x;
+			} else {
+				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
+			}
+		} else
+			pos_cpy.y = viewport_height - pos_cpy.x;
 		pos_cpy.x = temp_y;
 	}
 	// Mirror horizontally and vertically
 	else if (param.rotation == ROTATION_ANGLE_180) {
-		if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) {
-			pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width
-					- pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x;
-		} else {
-			uint32_t temp_x = pos_cpy.x;
-			pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x;
-			if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width
-					|| pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) {
-				pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width;
+		int viewport_width =
+			pipe_ctx->plane_res.scl_data.viewport.width;
+		int viewport_x =
+			pipe_ctx->plane_res.scl_data.viewport.x;
+
+		if (pipe_split_on) {
+			if (pos_cpy.x >= viewport_width + viewport_x) {
+				pos_cpy.x = 2 * viewport_width
+						- pos_cpy.x + 2 * viewport_x;
+			} else {
+				uint32_t temp_x = pos_cpy.x;
+
+				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+				if (temp_x >= viewport_x +
+					(int)hubp->curs_attr.width || pos_cpy.x
+					<= (int)hubp->curs_attr.width +
+					pipe_ctx->plane_state->src_rect.x) {
+					pos_cpy.x = temp_x + viewport_width;
+				}
 			}
+		} else {
+			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
 		}
 		pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
 	}
@@ -3026,7 +3043,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
 }
 
-static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
 
@@ -3036,7 +3053,7 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
 		pipe_ctx->plane_res.dpp, attributes);
 }
 
-static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
+void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
 {
 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
 	struct fixed31_32 multiplier;
@@ -3063,12 +3080,12 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->plane_res.dpp, &opt_attr);
 }
 
-/**
-* apply_front_porch_workaround  TODO FPGA still need?
-*
-* This is a workaround for a bug that has existed since R5xx and has not been
-* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
-*/
+/*
+ * apply_front_porch_workaround  TODO FPGA still need?
+ *
+ * This is a workaround for a bug that has existed since R5xx and has not been
+ * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+ */
 static void apply_front_porch_workaround(
 	struct dc_crtc_timing *timing)
 {
@@ -3081,7 +3098,7 @@ static void apply_front_porch_workaround(
 	}
 }
 
-int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
+int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
 {
 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
 	struct dc_crtc_timing patched_crtc_timing;
@@ -3110,34 +3127,8 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
 	return vertical_line_start;
 }
 
-void lock_all_pipes(struct dc *dc,
-	struct dc_state *context,
-	bool lock)
-{
-	struct pipe_ctx *pipe_ctx;
-	struct timing_generator *tg;
-	int i;
-
-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
-		pipe_ctx = &context->res_ctx.pipe_ctx[i];
-		tg = pipe_ctx->stream_res.tg;
-		/*
-		 * Only lock the top pipe's tg to prevent redundant
-		 * (un)locking. Also skip if pipe is disabled.
-		 */
-		if (pipe_ctx->top_pipe ||
-		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
-		    !tg->funcs->is_tg_enabled(tg))
-			continue;
-
-		if (lock)
-			tg->funcs->lock(tg);
-		else
-			tg->funcs->unlock(tg);
-	}
-}
-
-static void calc_vupdate_position(
+static void dcn10_calc_vupdate_position(
+		struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		uint32_t *start_line,
 		uint32_t *end_line)
@@ -3145,7 +3136,7 @@ static void calc_vupdate_position(
 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
 	int vline_int_offset_from_vupdate =
 			pipe_ctx->stream->periodic_interrupt0.lines_offset;
-	int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
+	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
 	int start_position;
 
 	if (vline_int_offset_from_vupdate > 0)
@@ -3166,7 +3157,8 @@ static void calc_vupdate_position(
 		*end_line = 2;
 }
 
-static void cal_vline_position(
+static void dcn10_cal_vline_position(
+		struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		enum vline_select vline,
 		uint32_t *start_line,
@@ -3181,7 +3173,8 @@ static void cal_vline_position(
 
 	switch (ref_point) {
 	case START_V_UPDATE:
-		calc_vupdate_position(
+		dcn10_calc_vupdate_position(
+				dc,
 				pipe_ctx,
 				start_line,
 				end_line);
@@ -3195,7 +3188,8 @@ static void cal_vline_position(
 	}
 }
 
-static void dcn10_setup_periodic_interrupt(
+void dcn10_setup_periodic_interrupt(
+		struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		enum vline_select vline)
 {
@@ -3205,7 +3199,7 @@ static void dcn10_setup_periodic_interrupt(
 		uint32_t start_line = 0;
 		uint32_t end_line = 0;
 
-		cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
+		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
 
 		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
 
@@ -3216,10 +3210,10 @@ static void dcn10_setup_periodic_interrupt(
 	}
 }
 
-static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
-	int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
 
 	if (start_line < 0) {
 		ASSERT(0);
@@ -3230,12 +3224,13 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
 }
 
-static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
 		struct dc_link_settings *link_settings)
 {
 	struct encoder_unblank_param params = { { 0 } };
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct dc_link *link = stream->link;
+	struct dce_hwseq *hws = link->dc->hwseq;
 
 	/* only 3 items below are used by unblank */
 	params.timing = pipe_ctx->stream->timing;
@@ -3249,11 +3244,11 @@ static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
 	}
 
 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
-		link->dc->hwss.edp_backlight_control(link, true);
+		hws->funcs.edp_backlight_control(link, true);
 	}
 }
 
-static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
+void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
 				const uint8_t *custom_sdp_message,
 				unsigned int sdp_message_size)
 {
@@ -3264,7 +3259,7 @@ static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
 				sdp_message_size);
 	}
 }
-static enum dc_status dcn10_set_clock(struct dc *dc,
+enum dc_status dcn10_set_clock(struct dc *dc,
 			enum dc_clock_type clock_type,
 			uint32_t clk_khz,
 			uint32_t stepping)
@@ -3304,7 +3299,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc,
 
 }
 
-static void dcn10_get_clock(struct dc *dc,
+void dcn10_get_clock(struct dc *dc,
 			enum dc_clock_type clock_type,
 			struct dc_clock_config *clock_cfg)
 {
@@ -3314,77 +3309,3 @@ static void dcn10_get_clock(struct dc *dc,
 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
 
 }
-
-static const struct hw_sequencer_funcs dcn10_funcs = {
-	.program_gamut_remap = dcn10_program_gamut_remap,
-	.init_hw = dcn10_init_hw,
-	.init_pipes = dcn10_init_pipes,
-	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
-	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
-	.update_plane_addr = dcn10_update_plane_addr,
-	.plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
-	.update_dchub = dcn10_update_dchub,
-	.update_mpcc = dcn10_update_mpcc,
-	.update_pending_status = dcn10_update_pending_status,
-	.set_input_transfer_func = dcn10_set_input_transfer_func,
-	.set_output_transfer_func = dcn10_set_output_transfer_func,
-	.program_output_csc = dcn10_program_output_csc,
-	.power_down = dce110_power_down,
-	.enable_accelerated_mode = dce110_enable_accelerated_mode,
-	.enable_timing_synchronization = dcn10_enable_timing_synchronization,
-	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
-	.update_info_frame = dce110_update_info_frame,
-	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
-	.enable_stream = dce110_enable_stream,
-	.disable_stream = dce110_disable_stream,
-	.unblank_stream = dcn10_unblank_stream,
-	.blank_stream = dce110_blank_stream,
-	.enable_audio_stream = dce110_enable_audio_stream,
-	.disable_audio_stream = dce110_disable_audio_stream,
-	.enable_display_power_gating = dcn10_dummy_display_power_gating,
-	.disable_plane = dcn10_disable_plane,
-	.blank_pixel_data = dcn10_blank_pixel_data,
-	.pipe_control_lock = dcn10_pipe_control_lock,
-	.prepare_bandwidth = dcn10_prepare_bandwidth,
-	.optimize_bandwidth = dcn10_optimize_bandwidth,
-	.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
-	.enable_stream_timing = dcn10_enable_stream_timing,
-	.set_drr = dcn10_set_drr,
-	.get_position = dcn10_get_position,
-	.set_static_screen_control = dcn10_set_static_screen_control,
-	.setup_stereo = dcn10_setup_stereo,
-	.set_avmute = dce110_set_avmute,
-	.log_hw_state = dcn10_log_hw_state,
-	.get_hw_state = dcn10_get_hw_state,
-	.clear_status_bits = dcn10_clear_status_bits,
-	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
-	.edp_backlight_control = dce110_edp_backlight_control,
-	.edp_power_control = dce110_edp_power_control,
-	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
-	.set_cursor_position = dcn10_set_cursor_position,
-	.set_cursor_attribute = dcn10_set_cursor_attribute,
-	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
-	.disable_stream_gating = NULL,
-	.enable_stream_gating = NULL,
-	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
-	.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
-	.set_clock = dcn10_set_clock,
-	.get_clock = dcn10_get_clock,
-	.did_underflow_occur = dcn10_did_underflow_occur,
-	.init_blank = NULL,
-	.disable_vga = dcn10_disable_vga,
-	.bios_golden_init = dcn10_bios_golden_init,
-	.plane_atomic_disable = dcn10_plane_atomic_disable,
-	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
-	.enable_power_gating_plane = dcn10_enable_power_gating_plane,
-	.dpp_pg_control = dcn10_dpp_pg_control,
-	.hubp_pg_control = dcn10_hubp_pg_control,
-	.dsc_pg_control = NULL,
-};
-
-
-void dcn10_hw_sequencer_construct(struct dc *dc)
-{
-	dc->hwss = dcn10_funcs;
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index d3616b1..4d20f65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -27,68 +27,160 @@
 #define __DC_HWSS_DCN10_H__
 
 #include "core_types.h"
+#include "hw_sequencer_private.h"
 
 struct dc;
 
 void dcn10_hw_sequencer_construct(struct dc *dc);
-extern void fill_display_configs(
-	const struct dc_state *context,
-	struct dm_pp_display_configuration *pp_display_cfg);
 
-bool is_rgb_cspace(enum dc_color_space output_color_space);
-
-void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-void dcn10_verify_allow_pstate_change_high(struct dc *dc);
-
+int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+enum dc_status dcn10_enable_stream_timing(
+		struct pipe_ctx *pipe_ctx,
+		struct dc_state *context,
+		struct dc *dc);
+void dcn10_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
+void dcn10_prepare_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
+void dcn10_pipe_control_lock(
+	struct dc *dc,
+	struct pipe_ctx *pipe,
+	bool lock);
+void dcn10_blank_pixel_data(
+		struct dc *dc,
+		struct pipe_ctx *pipe_ctx,
+		bool blank);
+void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+		struct dc_link_settings *link_settings);
+void dcn10_program_output_csc(struct dc *dc,
+		struct pipe_ctx *pipe_ctx,
+		enum dc_color_space colorspace,
+		uint16_t *matrix,
+		int opp_id);
+bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+				const struct dc_stream_state *stream);
+bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+			const struct dc_plane_state *plane_state);
+void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_reset_hw_ctx_wrap(
+		struct dc *dc,
+		struct dc_state *context);
+void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_apply_ctx_for_surface(
+		struct dc *dc,
+		const struct dc_stream_state *stream,
+		int num_planes,
+		struct dc_state *context);
+void dcn10_hubp_pg_control(
+		struct dce_hwseq *hws,
+		unsigned int hubp_inst,
+		bool power_on);
+void dcn10_dpp_pg_control(
+		struct dce_hwseq *hws,
+		unsigned int dpp_inst,
+		bool power_on);
+void dcn10_enable_power_gating_plane(
+	struct dce_hwseq *hws,
+	bool enable);
+void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_disable_vga(
+	struct dce_hwseq *hws);
 void dcn10_program_pipe(
 		struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		struct dc_state *context);
-
-void dcn10_get_hw_state(
+void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx);
+void dcn10_init_hw(struct dc *dc);
+void dcn10_init_pipes(struct dc *dc, struct dc_state *context);
+enum dc_status dce110_apply_ctx_to_hw(
 		struct dc *dc,
-		char *pBuf, unsigned int bufSize,
+		struct dc_state *context);
+void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data);
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx);
+void dce110_power_down(struct dc *dc);
+void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
+void dcn10_enable_timing_synchronization(
+		struct dc *dc,
+		int group_index,
+		int group_size,
+		struct pipe_ctx *grouped_pipes[]);
+void dcn10_enable_per_frame_crtc_position_reset(
+		struct dc *dc,
+		int group_size,
+		struct pipe_ctx *grouped_pipes[]);
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
+void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
+		const uint8_t *custom_sdp_message,
+		unsigned int sdp_message_size);
+void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx);
+bool dcn10_dummy_display_power_gating(
+		struct dc *dc,
+		uint8_t controller_id,
+		struct dc_bios *dcb,
+		enum pipe_gating_control power_gating);
+void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
+		int num_pipes, unsigned int vmin, unsigned int vmax,
+		unsigned int vmid, unsigned int vmid_frame_number);
+void dcn10_get_position(struct pipe_ctx **pipe_ctx,
+		int num_pipes,
+		struct crtc_position *position);
+void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+		int num_pipes, const struct dc_static_screen_params *params);
+void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc);
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+void dcn10_log_hw_state(struct dc *dc,
+		struct dc_log_buffer_ctx *log_ctx);
+void dcn10_get_hw_state(struct dc *dc,
+		char *pBuf,
+		unsigned int bufSize,
 		unsigned int mask);
-
 void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
-
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
-
-void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
-
+void dcn10_wait_for_mpcc_disconnect(
+		struct dc *dc,
+		struct resource_pool *res_pool,
+		struct pipe_ctx *pipe_ctx);
+void dce110_edp_backlight_control(
+		struct dc_link *link,
+		bool enable);
+void dce110_edp_power_control(
+		struct dc_link *link,
+		bool power_up);
+void dce110_edp_wait_for_hpd_ready(
+		struct dc_link *link,
+		bool power_up);
+void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx);
+void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
+void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx);
+void dcn10_setup_periodic_interrupt(
+		struct dc *dc,
+		struct pipe_ctx *pipe_ctx,
+		enum vline_select vline);
+enum dc_status dcn10_set_clock(struct dc *dc,
+		enum dc_clock_type clock_type,
+		uint32_t clk_khz,
+		uint32_t stepping);
+void dcn10_get_clock(struct dc *dc,
+		enum dc_clock_type clock_type,
+		struct dc_clock_config *clock_cfg);
+bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_bios_golden_init(struct dc *dc);
+void dcn10_plane_atomic_power_down(struct dc *dc,
+		struct dpp *dpp,
+		struct hubp *hubp);
 void dcn10_get_surface_visual_confirm_color(
 		const struct pipe_ctx *pipe_ctx,
 		struct tg_color *color);
-
 void dcn10_get_hdr_visual_confirm_color(
 		struct pipe_ctx *pipe_ctx,
 		struct tg_color *color);
-
-bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-void update_dchubp_dpp(
-	struct dc *dc,
-	struct pipe_ctx *pipe_ctx,
-	struct dc_state *context);
-
-struct pipe_ctx *find_top_pipe_for_stream(
-		struct dc *dc,
-		struct dc_state *context,
-		const struct dc_stream_state *stream);
-
-int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
-
-void dcn10_build_prescale_params(struct  dc_bias_and_scale *bias_and_scale,
-		const struct dc_plane_state *plane_state);
-void lock_all_pipes(struct dc *dc,
-	struct dc_state *context,
-	bool lock);
+void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+void dcn10_verify_allow_pstate_change_high(struct dc *dc);
 
 #endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h
new file mode 100644
index 0000000..596f95c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN10_DEBUG_H__
+#define __DC_HWSS_DCN10_DEBUG_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
+
+void dcn10_log_hw_state(struct dc *dc,
+		struct dc_log_buffer_ctx *log_ctx);
+
+void dcn10_get_hw_state(struct dc *dc,
+		char *pBuf,
+		unsigned int bufSize,
+		unsigned int mask);
+
+#endif /* __DC_HWSS_DCN10_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
new file mode 100644
index 0000000..e7e5352
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hw_sequencer_private.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10_hw_sequencer.h"
+
+static const struct hw_sequencer_funcs dcn10_funcs = {
+	.program_gamut_remap = dcn10_program_gamut_remap,
+	.init_hw = dcn10_init_hw,
+	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+	.update_plane_addr = dcn10_update_plane_addr,
+	.update_dchub = dcn10_update_dchub,
+	.update_pending_status = dcn10_update_pending_status,
+	.program_output_csc = dcn10_program_output_csc,
+	.enable_accelerated_mode = dce110_enable_accelerated_mode,
+	.enable_timing_synchronization = dcn10_enable_timing_synchronization,
+	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+	.update_info_frame = dce110_update_info_frame,
+	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+	.enable_stream = dce110_enable_stream,
+	.disable_stream = dce110_disable_stream,
+	.unblank_stream = dcn10_unblank_stream,
+	.blank_stream = dce110_blank_stream,
+	.enable_audio_stream = dce110_enable_audio_stream,
+	.disable_audio_stream = dce110_disable_audio_stream,
+	.disable_plane = dcn10_disable_plane,
+	.pipe_control_lock = dcn10_pipe_control_lock,
+	.prepare_bandwidth = dcn10_prepare_bandwidth,
+	.optimize_bandwidth = dcn10_optimize_bandwidth,
+	.set_drr = dcn10_set_drr,
+	.get_position = dcn10_get_position,
+	.set_static_screen_control = dcn10_set_static_screen_control,
+	.setup_stereo = dcn10_setup_stereo,
+	.set_avmute = dce110_set_avmute,
+	.log_hw_state = dcn10_log_hw_state,
+	.get_hw_state = dcn10_get_hw_state,
+	.clear_status_bits = dcn10_clear_status_bits,
+	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+	.edp_power_control = dce110_edp_power_control,
+	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+	.set_cursor_position = dcn10_set_cursor_position,
+	.set_cursor_attribute = dcn10_set_cursor_attribute,
+	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+	.set_clock = dcn10_set_clock,
+	.get_clock = dcn10_get_clock,
+	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+};
+
+static const struct hwseq_private_funcs dcn10_private_funcs = {
+	.init_pipes = dcn10_init_pipes,
+	.update_plane_addr = dcn10_update_plane_addr,
+	.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+	.program_pipe = dcn10_program_pipe,
+	.update_mpcc = dcn10_update_mpcc,
+	.set_input_transfer_func = dcn10_set_input_transfer_func,
+	.set_output_transfer_func = dcn10_set_output_transfer_func,
+	.power_down = dce110_power_down,
+	.enable_display_power_gating = dcn10_dummy_display_power_gating,
+	.blank_pixel_data = dcn10_blank_pixel_data,
+	.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
+	.enable_stream_timing = dcn10_enable_stream_timing,
+	.edp_backlight_control = dce110_edp_backlight_control,
+	.disable_stream_gating = NULL,
+	.enable_stream_gating = NULL,
+	.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
+	.did_underflow_occur = dcn10_did_underflow_occur,
+	.init_blank = NULL,
+	.disable_vga = dcn10_disable_vga,
+	.bios_golden_init = dcn10_bios_golden_init,
+	.plane_atomic_disable = dcn10_plane_atomic_disable,
+	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
+	.enable_power_gating_plane = dcn10_enable_power_gating_plane,
+	.dpp_pg_control = dcn10_dpp_pg_control,
+	.hubp_pg_control = dcn10_hubp_pg_control,
+	.dsc_pg_control = NULL,
+	.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+	.get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+	.set_hdr_multiplier = dcn10_set_hdr_multiplier,
+	.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+};
+
+void dcn10_hw_sequencer_construct(struct dc *dc)
+{
+	dc->hwss = dcn10_funcs;
+	dc->hwseq->funcs = dcn10_private_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
new file mode 100644
index 0000000..8c6fd7b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN10_INIT_H__
+#define __DC_DCN10_INIT_H__
+
+struct dc;
+
+void dcn10_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN10_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
index 0fb9e44..f05371c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
@@ -53,11 +53,9 @@ static const struct ipp_funcs dcn10_ipp_funcs = {
 	.ipp_destroy			= dcn10_ipp_destroy
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 static const struct ipp_funcs dcn20_ipp_funcs = {
 	.ipp_destroy			= dcn10_ipp_destroy
 };
-#endif
 
 void dcn10_ipp_construct(
 	struct dcn10_ipp *ippn10,
@@ -76,7 +74,6 @@ void dcn10_ipp_construct(
 	ippn10->ipp_mask = ipp_mask;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 void dcn20_ipp_construct(
 	struct dcn10_ipp *ippn10,
 	struct dc_context *ctx,
@@ -93,4 +90,3 @@ void dcn20_ipp_construct(
 	ippn10->ipp_shift = ipp_shift;
 	ippn10->ipp_mask = ipp_mask;
 }
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
index cfa24459..f0e0d07 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -49,7 +49,6 @@
 	SRI(CURSOR_HOT_SPOT, CURSOR, id), \
 	SRI(CURSOR_DST_OFFSET, CURSOR, id)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define IPP_REG_LIST_DCN20(id) \
 	IPP_REG_LIST_DCN(id), \
 	SRI(CURSOR_SETTINGS, HUBPREQ, id), \
@@ -60,7 +59,6 @@
 	SRI(CURSOR_POSITION, CURSOR0_, id), \
 	SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
 	SRI(CURSOR_DST_OFFSET, CURSOR0_, id)
-#endif
 
 #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT	0x4
 #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK		0x00000010L
@@ -105,7 +103,6 @@
 	IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
 	IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define IPP_MASK_SH_LIST_DCN20(mask_sh) \
 	IPP_MASK_SH_LIST_DCN(mask_sh), \
 	IPP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
@@ -124,7 +121,6 @@
 	IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
 	IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
 	IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
-#endif
 
 #define IPP_DCN10_REG_FIELD_LIST(type) \
 	type CNVC_SURFACE_PIXEL_FORMAT; \
@@ -196,13 +192,11 @@ void dcn10_ipp_construct(struct dcn10_ipp *ippn10,
 	const struct dcn10_ipp_shift *ipp_shift,
 	const struct dcn10_ipp_mask *ipp_mask);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 void dcn20_ipp_construct(struct dcn10_ipp *ippn10,
 	struct dc_context *ctx,
 	int inst,
 	const struct dcn10_ipp_registers *regs,
 	const struct dcn10_ipp_shift *ipp_shift,
 	const struct dcn10_ipp_mask *ipp_mask);
-#endif
 
 #endif /* _DCN10_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 88fcc39..eb13589 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -72,9 +72,7 @@
 struct dcn10_link_enc_aux_registers {
 	uint32_t AUX_CONTROL;
 	uint32_t AUX_DPHY_RX_CONTROL0;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	uint32_t AUX_DPHY_TX_CONTROL;
-#endif
 };
 
 struct dcn10_link_enc_hpd_registers {
@@ -106,7 +104,6 @@ struct dcn10_link_enc_registers {
 	uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
 	uint32_t DP_SEC_CNTL1;
 	uint32_t TMDS_CTL_BITS;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	/* DCCG  */
 	uint32_t CLOCK_ENABLE;
 	/* DIG */
@@ -127,6 +124,26 @@ struct dcn10_link_enc_registers {
 	uint32_t RDPCSTX_PHY_CNTL13;
 	uint32_t RDPCSTX_PHY_CNTL14;
 	uint32_t RDPCSTX_PHY_CNTL15;
+	uint32_t RDPCSTX_CNTL;
+	uint32_t RDPCSTX_CLOCK_CNTL;
+	uint32_t RDPCSTX_PHY_CNTL0;
+	uint32_t RDPCSTX_PHY_CNTL2;
+	uint32_t RDPCSTX_PLL_UPDATE_DATA;
+	uint32_t RDPCS_TX_CR_ADDR;
+	uint32_t RDPCS_TX_CR_DATA;
+	uint32_t DPCSTX_TX_CLOCK_CNTL;
+	uint32_t DPCSTX_TX_CNTL;
+	uint32_t RDPCSTX_INTERRUPT_CONTROL;
+	uint32_t RDPCSTX_PHY_FUSE0;
+	uint32_t RDPCSTX_PHY_FUSE1;
+	uint32_t RDPCSTX_PHY_FUSE2;
+	uint32_t RDPCSTX_PHY_FUSE3;
+	uint32_t RDPCSTX_PHY_RX_LD_VAL;
+	uint32_t DPCSTX_DEBUG_CONFIG;
+	uint32_t RDPCSTX_DEBUG_CONFIG;
+	uint32_t RDPCSTX0_RDPCSTX_SCRATCH;
+	uint32_t RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG;
+	uint32_t DCIO_SOFT_RESET;
 	/* indirect registers */
 	uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
 	uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
@@ -136,7 +153,6 @@ struct dcn10_link_enc_registers {
 	uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3;
 	uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2;
 	uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3;
-#endif
 };
 
 #define LE_SF(reg_name, field_name, post_fix)\
@@ -242,7 +258,6 @@ struct dcn10_link_enc_registers {
 	type AUX_LS_READ_EN;\
 	type AUX_RX_RECEIVE_WINDOW
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 #define DCN20_LINK_ENCODER_DPCS_REG_FIELD_LIST(type) \
 		type RDPCS_PHY_DP_TX0_DATA_EN;\
@@ -423,20 +438,15 @@ struct dcn10_link_enc_registers {
 	type AUX_TX_PRECHARGE_SYMBOLS; \
 	type AUX_MODE_DET_CHECK_DELAY;\
 	type DPCS_DBG_CBUS_DIS
-#endif
 
 struct dcn10_link_enc_shift {
 	DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
-#endif
 };
 
 struct dcn10_link_enc_mask {
 	DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
-#endif
 };
 
 struct dcn10_link_encoder {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 8b2f29f..04f8634 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -42,20 +42,27 @@ void mpc1_set_bg_color(struct mpc *mpc,
 		int mpcc_id)
 {
 	struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+	struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+	uint32_t bg_r_cr, bg_g_y, bg_b_cb;
+
+	/* find bottommost mpcc. */
+	while (bottommost_mpcc->mpcc_bot) {
+		bottommost_mpcc = bottommost_mpcc->mpcc_bot;
+	}
 
 	/* mpc color is 12 bit.  tg_color is 10 bit */
 	/* todo: might want to use 16 bit to represent color and have each
 	 * hw block translate to correct color depth.
 	 */
-	uint32_t bg_r_cr = bg_color->color_r_cr << 2;
-	uint32_t bg_g_y = bg_color->color_g_y << 2;
-	uint32_t bg_b_cb = bg_color->color_b_cb << 2;
+	bg_r_cr = bg_color->color_r_cr << 2;
+	bg_g_y = bg_color->color_g_y << 2;
+	bg_b_cb = bg_color->color_b_cb << 2;
 
-	REG_SET(MPCC_BG_R_CR[mpcc_id], 0,
+	REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0,
 			MPCC_BG_R_CR, bg_r_cr);
-	REG_SET(MPCC_BG_G_Y[mpcc_id], 0,
+	REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0,
 			MPCC_BG_G_Y, bg_g_y);
-	REG_SET(MPCC_BG_B_CB[mpcc_id], 0,
+	REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,
 			MPCC_BG_B_CB, bg_b_cb);
 }
 
@@ -457,12 +464,10 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
 	.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
 	.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
 	.update_blending = mpc1_update_blending,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	.set_denorm = NULL,
 	.set_denorm_clamp = NULL,
 	.set_output_csc = NULL,
 	.set_output_gamma = NULL,
-#endif
 };
 
 void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 0a9ad69..d79718f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -373,11 +373,9 @@ void opp1_program_oppbuf(
 	 */
 	REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	/* Controls the number of padded pixels at the end of a segment */
 	if (REG(OPPBUF_CONTROL1))
 		REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels);
-#endif
 }
 
 void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
@@ -404,9 +402,8 @@ static const struct opp_funcs dcn10_opp_funcs = {
 		.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
 		.opp_program_stereo = opp1_program_stereo,
 		.opp_pipe_clock_control = opp1_pipe_clock_control,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		.opp_set_disp_pattern_generator = NULL,
-#endif
+		.dpg_is_blanked = NULL,
 		.opp_destroy = opp1_destroy
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index dabccbd..a9a43b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -457,11 +457,16 @@ static bool optc1_enable_crtc(struct timing_generator *optc)
 	REG_UPDATE(CONTROL,
 			VTG0_ENABLE, 1);
 
+	REG_SEQ_START();
+
 	/* Enable CRTC */
 	REG_UPDATE_2(OTG_CONTROL,
 			OTG_DISABLE_POINT_CNTL, 3,
 			OTG_MASTER_EN, 1);
 
+	REG_SEQ_SUBMIT();
+	REG_SEQ_WAIT_DONE();
+
 	return true;
 }
 
@@ -784,21 +789,26 @@ void optc1_set_early_control(
 
 void optc1_set_static_screen_control(
 	struct timing_generator *optc,
-	uint32_t value)
+	uint32_t event_triggers,
+	uint32_t num_frames)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
+	// By register spec, it only takes 8 bit value
+	if (num_frames > 0xFF)
+		num_frames = 0xFF;
+
 	/* Bit 8 is no longer applicable in RV for PSR case,
 	 * set bit 8 to 0 if given
 	 */
-	if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
+	if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
 			!= 0)
-		value = value &
+		event_triggers = event_triggers &
 		~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;
 
 	REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0,
-			OTG_STATIC_SCREEN_EVENT_MASK, value,
-			OTG_STATIC_SCREEN_FRAME_COUNT, 2);
+			OTG_STATIC_SCREEN_EVENT_MASK, event_triggers,
+			OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
 }
 
 void optc1_setup_manual_trigger(struct timing_generator *optc)
@@ -1497,7 +1507,6 @@ void dcn10_timing_generator_init(struct optc *optc1)
 	optc1->min_v_sync_width = 1;
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 /* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this:
  *
  * - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as
@@ -1510,15 +1519,12 @@ void dcn10_timing_generator_init(struct optc *optc1)
  *   to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well.
  *
  */
-#endif
 bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
 {
 	bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
 			&& !timing->dsc_cfg.ycbcr422_simple);
-#endif
 	return two_pix;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c8d795b..f277656 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -165,13 +165,11 @@ struct dcn_optc_registers {
 	uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
 	uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
 	uint32_t GSL_SOURCE_SELECT;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	uint32_t DWB_SOURCE_SELECT;
 	uint32_t OTG_DSC_START_POSITION;
 	uint32_t OPTC_DATA_FORMAT_CONTROL;
 	uint32_t OPTC_BYTES_PER_PIXEL;
 	uint32_t OPTC_WIDTH_CONTROL;
-#endif
 };
 
 #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -456,7 +454,6 @@ struct dcn_optc_registers {
 	type MANUAL_FLOW_CONTROL;\
 	type MANUAL_FLOW_CONTROL_SEL;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 
 #define TG_REG_FIELD_LIST(type) \
 	TG_REG_FIELD_LIST_DCN1_0(type)\
@@ -479,12 +476,6 @@ struct dcn_optc_registers {
 	type OPTC_DWB0_SOURCE_SELECT;\
 	type OPTC_DWB1_SOURCE_SELECT;
 
-#else
-
-#define TG_REG_FIELD_LIST(type) \
-	TG_REG_FIELD_LIST_DCN1_0(type)
-
-#endif
 
 
 struct dcn_optc_shift {
@@ -542,6 +533,7 @@ struct dcn_otg_state {
 	uint32_t h_total;
 	uint32_t underflow_occurred_status;
 	uint32_t otg_enabled;
+	uint32_t blank_enabled;
 };
 
 void optc1_read_otg_state(struct optc *optc1,
@@ -633,7 +625,8 @@ void optc1_set_drr(
 
 void optc1_set_static_screen_control(
 	struct timing_generator *optc,
-	uint32_t value);
+	uint32_t event_triggers,
+	uint32_t num_frames);
 
 void optc1_program_stereo(struct timing_generator *optc,
 	const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 15640ae..3b71898 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -28,6 +28,8 @@
 #include "dm_services.h"
 #include "dc.h"
 
+#include "dcn10_init.h"
+
 #include "resource.h"
 #include "include/irq_service_interface.h"
 #include "dcn10_resource.h"
@@ -919,7 +921,7 @@ static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)
 	return pp_smu;
 }
 
-static void destruct(struct dcn10_resource_pool *pool)
+static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
 {
 	unsigned int i;
 
@@ -1166,7 +1168,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
 
-	destruct(dcn10_pool);
+	dcn10_resource_destruct(dcn10_pool);
 	kfree(dcn10_pool);
 	*pool = NULL;
 }
@@ -1305,7 +1307,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
 	return value;
 }
 
-static bool construct(
+static bool dcn10_resource_construct(
 	uint8_t num_virtual_links,
 	struct dc *dc,
 	struct dcn10_resource_pool *pool)
@@ -1592,7 +1594,7 @@ static bool construct(
 
 fail:
 
-	destruct(pool);
+	dcn10_resource_destruct(pool);
 
 	return false;
 }
@@ -1607,7 +1609,7 @@ struct resource_pool *dcn10_create_resource_pool(
 	if (!pool)
 		return NULL;
 
-	if (construct(init_data->num_virtual_links, dc, pool))
+	if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool))
 		return &pool->base;
 
 	kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 06e5bbb..376c426 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -247,6 +247,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	enum dc_color_space output_color_space,
+	bool use_vsc_sdp_for_colorimetry,
 	uint32_t enable_sdp_splitting)
 {
 	uint32_t h_active_start;
@@ -312,10 +313,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
 	 * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
 	 * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
 	 */
-	if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
-			(output_color_space == COLOR_SPACE_2020_YCBCR) ||
-			(output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
-			(output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+	if (use_vsc_sdp_for_colorimetry)
 		misc1 = misc1 | 0x40;
 	else
 		misc1 = misc1 & ~0x40;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index c9cbc21..f9b9e22 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -163,14 +163,12 @@ struct dcn10_stream_enc_registers {
 	uint32_t DP_MSA_TIMING_PARAM3;
 	uint32_t DP_MSA_TIMING_PARAM4;
 	uint32_t HDMI_DB_CONTROL;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	uint32_t DP_DSC_CNTL;
 	uint32_t DP_DSC_BYTES_PER_PIXEL;
 	uint32_t DME_CONTROL;
 	uint32_t DP_SEC_METADATA_TRANSMISSION;
 	uint32_t HDMI_METADATA_PACKET_CONTROL;
 	uint32_t DP_SEC_FRAMING4;
-#endif
 	uint32_t DIG_CLOCK_PATTERN;
 };
 
@@ -466,7 +464,6 @@ struct dcn10_stream_enc_registers {
 	type DIG_SOURCE_SELECT;\
 	type DIG_CLOCK_PATTERN
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define SE_REG_FIELD_LIST_DCN2_0(type) \
 	type DP_DSC_MODE;\
 	type DP_DSC_SLICE_WIDTH;\
@@ -485,20 +482,15 @@ struct dcn10_stream_enc_registers {
 	type DOLBY_VISION_EN;\
 	type DP_PIXEL_COMBINE;\
 	type DP_SST_SDP_SPLITTING
-#endif
 
 struct dcn10_stream_encoder_shift {
 	SE_REG_FIELD_LIST_DCN1_0(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	SE_REG_FIELD_LIST_DCN2_0(uint8_t);
-#endif
 };
 
 struct dcn10_stream_encoder_mask {
 	SE_REG_FIELD_LIST_DCN1_0(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	SE_REG_FIELD_LIST_DCN2_0(uint32_t);
-#endif
 };
 
 struct dcn10_stream_encoder {
@@ -526,6 +518,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	enum dc_color_space output_color_space,
+	bool use_vsc_sdp_for_colorimetry,
 	uint32_t enable_sdp_splitting);
 
 void enc1_stream_encoder_hdmi_set_stream_attribute(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 10b4798..5fcaf78 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,16 +2,20 @@
 #
 # Makefile for DCN.
 
-DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
 		dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
 		dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
 		dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
 
-ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 DCN20 += dcn20_dsc.o
+
+ifdef CONFIG_X86
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
 endif
 
-CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
+endif
 
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -19,6 +23,7 @@
 endif
 endif
 
+ifdef CONFIG_X86
 ifdef IS_OLD_GCC
 # Stack alignment mismatch, proceed with caution.
 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -27,6 +32,7 @@
 else
 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
 endif
+endif
 
 AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 1e11513..50bffbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -50,20 +50,20 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
 
 	if (dccg->ref_dppclk && req_dppclk) {
 		int ref_dppclk = dccg->ref_dppclk;
+		int modulo, phase;
 
-		ASSERT(req_dppclk <= ref_dppclk);
-		/* need to clamp to 8 bits */
-		if (ref_dppclk > 0xff) {
-			int divider = (ref_dppclk + 0xfe) / 0xff;
+		// phase / modulo = dpp pipe clk / dpp global clk
+		modulo = 0xff;   // use FF at the end
+		phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
 
-			ref_dppclk /= divider;
-			req_dppclk = (req_dppclk + divider - 1) / divider;
-			if (req_dppclk > ref_dppclk)
-				req_dppclk = ref_dppclk;
+		if (phase > 0xff) {
+			ASSERT(false);
+			phase = 0xff;
 		}
+
 		REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
-				DPPCLK0_DTO_PHASE, req_dppclk,
-				DPPCLK0_DTO_MODULO, ref_dppclk);
+				DPPCLK0_DTO_PHASE, phase,
+				DPPCLK0_DTO_MODULO, modulo);
 		REG_UPDATE(DPPCLK_DTO_CTRL,
 				DPPCLK_DTO_ENABLE[dpp_inst], 1);
 	} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 4d7e458..13e057d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -104,7 +104,7 @@ static void dpp2_cnv_setup (
 	uint32_t pixel_format = 0;
 	uint32_t alpha_en = 1;
 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
-	enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+	enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;
 	bool force_disable_cursor = false;
 	struct out_csc_color_matrix tbl_entry;
 	uint32_t is_2bit = 0;
@@ -145,25 +145,25 @@ static void dpp2_cnv_setup (
 		force_disable_cursor = false;
 		pixel_format = 65;
 		color_space = COLOR_SPACE_YCBCR709;
-		select = INPUT_CSC_SELECT_ICSC;
+		select = DCN2_ICSC_SELECT_ICSC_A;
 		break;
 	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
 		force_disable_cursor = true;
 		pixel_format = 64;
 		color_space = COLOR_SPACE_YCBCR709;
-		select = INPUT_CSC_SELECT_ICSC;
+		select = DCN2_ICSC_SELECT_ICSC_A;
 		break;
 	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
 		force_disable_cursor = true;
 		pixel_format = 67;
 		color_space = COLOR_SPACE_YCBCR709;
-		select = INPUT_CSC_SELECT_ICSC;
+		select = DCN2_ICSC_SELECT_ICSC_A;
 		break;
 	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
 		force_disable_cursor = true;
 		pixel_format = 66;
 		color_space = COLOR_SPACE_YCBCR709;
-		select = INPUT_CSC_SELECT_ICSC;
+		select = DCN2_ICSC_SELECT_ICSC_A;
 		break;
 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
 		pixel_format = 22;
@@ -177,7 +177,7 @@ static void dpp2_cnv_setup (
 	case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
 		pixel_format = 12;
 		color_space = COLOR_SPACE_YCBCR709;
-		select = INPUT_CSC_SELECT_ICSC;
+		select = DCN2_ICSC_SELECT_ICSC_A;
 		break;
 	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
 		pixel_format = 112;
@@ -188,13 +188,13 @@ static void dpp2_cnv_setup (
 	case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
 		pixel_format = 114;
 		color_space = COLOR_SPACE_YCBCR709;
-		select = INPUT_CSC_SELECT_ICSC;
+		select = DCN2_ICSC_SELECT_ICSC_A;
 		is_2bit = 1;
 		break;
 	case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
 		pixel_format = 115;
 		color_space = COLOR_SPACE_YCBCR709;
-		select = INPUT_CSC_SELECT_ICSC;
+		select = DCN2_ICSC_SELECT_ICSC_A;
 		is_2bit = 1;
 		break;
 	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
@@ -227,13 +227,13 @@ static void dpp2_cnv_setup (
 		tbl_entry.color_space = input_color_space;
 
 		if (color_space >= COLOR_SPACE_YCBCR601)
-			select = INPUT_CSC_SELECT_ICSC;
+			select = DCN2_ICSC_SELECT_ICSC_A;
 		else
-			select = INPUT_CSC_SELECT_BYPASS;
+			select = DCN2_ICSC_SELECT_BYPASS;
 
-		dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry);
+		dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);
 	} else
-	dpp1_program_input_csc(dpp_base, color_space, select, NULL);
+	dpp2_program_input_csc(dpp_base, color_space, select, NULL);
 
 	if (force_disable_cursor) {
 		REG_UPDATE(CURSOR_CONTROL,
@@ -458,7 +458,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
 	.dpp_reset = dpp_reset,
 	.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
 	.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
-	.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+	.dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,
 	.dpp_set_csc_adjustment = NULL,
 	.dpp_set_csc_default = NULL,
 	.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 5b03b73..2761025 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -150,6 +150,16 @@
 	SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
 	SRI(CM_SHAPER_LUT_INDEX, CM, id)
 
+#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \
+	SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+	SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+	SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+	SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+	SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+	SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+	SRI(CM_ICSC_B_C11_C12, CM, id), \
+	SRI(CM_ICSC_B_C33_C34, CM, id)
+
 #define TF_REG_LIST_DCN20(id) \
 	TF_REG_LIST_DCN(id), \
 	TF_REG_LIST_DCN20_COMMON(id), \
@@ -572,10 +582,29 @@
 	TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
 	TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
 
+/* DPP CM debug status register:
+ *
+ *		Status index including current ICSC, Gamut Remap Mode is 9
+ *			ICSC Mode: [4..3]
+ *			Gamut Remap Mode: [10..9]
+ */
+#define CM_TEST_DEBUG_DATA_STATUS_IDX 9
+
+#define TF_DEBUG_REG_LIST_SH_DCN20 \
+	TF_DEBUG_REG_LIST_SH_DCN10, \
+	.CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \
+	.CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9
+
+#define TF_DEBUG_REG_LIST_MASK_DCN20 \
+	TF_DEBUG_REG_LIST_MASK_DCN10, \
+	.CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \
+	.CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600
 
 #define TF_REG_FIELD_LIST_DCN2_0(type) \
 	TF_REG_FIELD_LIST(type) \
 	type CM_BLNDGAM_LUT_DATA; \
+	type CM_TEST_DEBUG_DATA_ICSC_MODE; \
+	type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \
 	type FORMAT_CNV16; \
 	type CNVC_BYPASS_MSB_ALIGN; \
 	type CLAMP_POSITIVE; \
@@ -630,11 +659,22 @@ struct dcn2_dpp_mask {
 	uint32_t COLOR_KEYER_RED; \
 	uint32_t COLOR_KEYER_GREEN; \
 	uint32_t COLOR_KEYER_BLUE; \
-	uint32_t OBUF_MEM_PWR_CTRL;\
+	uint32_t OBUF_MEM_PWR_CTRL; \
 	uint32_t DSCL_MEM_PWR_CTRL
 
+#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \
+	uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+	uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+	uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+	uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+	uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+	uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+	uint32_t CM_ICSC_B_C11_C12; \
+	uint32_t CM_ICSC_B_C33_C34
+
 struct dcn2_dpp_registers {
 	DPP_DCN2_REG_VARIABLE_LIST;
+	DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND;
 };
 
 struct dcn20_dpp {
@@ -656,6 +696,18 @@ struct dcn20_dpp {
 	struct pwl_params pwl_data;
 };
 
+enum dcn20_input_csc_select {
+	DCN2_ICSC_SELECT_BYPASS = 0,
+	DCN2_ICSC_SELECT_ICSC_A = 1,
+	DCN2_ICSC_SELECT_ICSC_B = 2
+};
+
+enum dcn20_gamut_remap_select {
+	DCN2_GAMUT_REMAP_BYPASS = 0,
+	DCN2_GAMUT_REMAP_COEF_A = 1,
+	DCN2_GAMUT_REMAP_COEF_B = 2
+};
+
 void dpp20_read_state(struct dpp *dpp_base,
 		struct dcn_dpp_state *s);
 
@@ -667,6 +719,16 @@ void dpp2_set_degamma(
 		struct dpp *dpp_base,
 		enum ipp_degamma_mode mode);
 
+void dpp2_cm_set_gamut_remap(
+	struct dpp *dpp_base,
+	const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp2_program_input_csc(
+		struct dpp *dpp_base,
+		enum dc_color_space color_space,
+		enum dcn20_input_csc_select input_select,
+		const struct out_csc_color_matrix *tbl_entry);
+
 bool dpp20_program_blnd_lut(
 	struct dpp *dpp_base, const struct pwl_params *params);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index 2d112c3..8dc3d1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -36,6 +36,9 @@
 #define REG(reg)\
 	dpp->tf_regs->reg
 
+#define IND_REG(index) \
+	(index)
+
 #define CTX \
 	dpp->base.ctx
 
@@ -44,9 +47,6 @@
 	dpp->tf_shift->field_name, dpp->tf_mask->field_name
 
 
-
-
-
 static void dpp2_enable_cm_block(
 		struct dpp *dpp_base)
 {
@@ -149,12 +149,164 @@ void dpp2_set_degamma(
 	case IPP_DEGAMMA_MODE_HW_xvYCC:
 		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
 			break;
+	case IPP_DEGAMMA_MODE_USER_PWL:
+		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+		break;
 	default:
 		BREAK_TO_DEBUGGER();
 		break;
 	}
 }
 
+static void program_gamut_remap(
+		struct dcn20_dpp *dpp,
+		const uint16_t *regval,
+		enum dcn20_gamut_remap_select select)
+{
+	uint32_t cur_select = 0;
+	struct color_matrices_reg gam_regs;
+
+	if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) {
+		REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+				CM_GAMUT_REMAP_MODE, 0);
+		return;
+	}
+
+	/* determine which gamut_remap coefficients (A or B) we are using
+	 * currently. select the alternate set to double buffer
+	 * the update so gamut_remap is updated on frame boundary
+	 */
+	IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+					CM_TEST_DEBUG_DATA_STATUS_IDX,
+					CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select);
+
+	/* value stored in dbg reg will be 1 greater than mode we want */
+	if (cur_select != DCN2_GAMUT_REMAP_COEF_A)
+		select = DCN2_GAMUT_REMAP_COEF_A;
+	else
+		select = DCN2_GAMUT_REMAP_COEF_B;
+
+	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+	if (select == DCN2_GAMUT_REMAP_COEF_A) {
+		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+	} else {
+		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+	}
+
+	cm_helper_program_color_matrices(
+				dpp->base.ctx,
+				regval,
+				&gam_regs);
+
+	REG_SET(
+			CM_GAMUT_REMAP_CONTROL, 0,
+			CM_GAMUT_REMAP_MODE, select);
+
+}
+
+void dpp2_cm_set_gamut_remap(
+	struct dpp *dpp_base,
+	const struct dpp_grph_csc_adjustment *adjust)
+{
+	struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+	int i = 0;
+
+	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+		/* Bypass if type is bypass or hw */
+		program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS);
+	else {
+		struct fixed31_32 arr_matrix[12];
+		uint16_t arr_reg_val[12];
+
+		for (i = 0; i < 12; i++)
+			arr_matrix[i] = adjust->temperature_matrix[i];
+
+		convert_float_matrix(
+			arr_reg_val, arr_matrix, 12);
+
+		program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A);
+	}
+}
+
+void dpp2_program_input_csc(
+		struct dpp *dpp_base,
+		enum dc_color_space color_space,
+		enum dcn20_input_csc_select input_select,
+		const struct out_csc_color_matrix *tbl_entry)
+{
+	struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+	int i;
+	int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+	const uint16_t *regval = NULL;
+	uint32_t cur_select = 0;
+	enum dcn20_input_csc_select select;
+	struct color_matrices_reg icsc_regs;
+
+	if (input_select == DCN2_ICSC_SELECT_BYPASS) {
+		REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+		return;
+	}
+
+	if (tbl_entry == NULL) {
+		for (i = 0; i < arr_size; i++)
+			if (dpp_input_csc_matrix[i].color_space == color_space) {
+				regval = dpp_input_csc_matrix[i].regval;
+				break;
+			}
+
+		if (regval == NULL) {
+			BREAK_TO_DEBUGGER();
+			return;
+		}
+	} else {
+		regval = tbl_entry->regval;
+	}
+
+	/* determine which CSC coefficients (A or B) we are using
+	 * currently.  select the alternate set to double buffer
+	 * the CSC update so CSC is updated on frame boundary
+	 */
+	IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+					CM_TEST_DEBUG_DATA_STATUS_IDX,
+					CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select);
+
+	if (cur_select != DCN2_ICSC_SELECT_ICSC_A)
+		select = DCN2_ICSC_SELECT_ICSC_A;
+	else
+		select = DCN2_ICSC_SELECT_ICSC_B;
+
+	icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+	icsc_regs.masks.csc_c11  = dpp->tf_mask->CM_ICSC_C11;
+	icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+	icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+	if (select == DCN2_ICSC_SELECT_ICSC_A) {
+
+		icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+		icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+	} else {
+
+		icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12);
+		icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34);
+
+	}
+
+	cm_helper_program_color_matrices(
+			dpp->base.ctx,
+			regval,
+			&icsc_regs);
+
+	REG_SET(CM_ICSC_CONTROL, 0,
+				CM_ICSC_MODE, select);
+}
+
 static void dpp20_power_on_blnd_lut(
 	struct dpp *dpp_base,
 	bool power_on)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 63eb377..6bdfee2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -23,7 +23,6 @@
  *
  */
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #include "reg_helper.h"
 #include "dcn20_dsc.h"
 #include "dsc/dscc_types.h"
@@ -207,6 +206,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str
 	struct dsc_reg_values dsc_reg_vals;
 	struct dsc_optc_config dsc_optc_cfg;
 
+	memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
+	memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
+
 	DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
 	dsc_config_log(dsc, dsc_cfg);
 	DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
@@ -222,9 +224,18 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str
 static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
 {
 	struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+	int dsc_clock_en;
+	int dsc_fw_config;
+	int enabled_opp_pipe;
 
-	/* TODO Check if DSC alreay in use? */
-	DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe);
+	DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
+
+	REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+	REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+	if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
+		DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
+		ASSERT(0);
+	}
 
 	REG_UPDATE(DSC_TOP_CONTROL,
 		DSC_CLOCK_EN, 1);
@@ -238,8 +249,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
 static void dsc2_disable(struct display_stream_compressor *dsc)
 {
 	struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+	int dsc_clock_en;
+	int dsc_fw_config;
+	int enabled_opp_pipe;
 
-	DC_LOG_DSC("disable DSC");
+	DC_LOG_DSC("disable DSC %d", dsc->inst);
+
+	REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+	REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+	if (!dsc_clock_en || !dsc_fw_config) {
+		DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe);
+		ASSERT(0);
+	}
 
 	REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
 		DSCRM_DSC_FORWARD_EN, 0);
@@ -715,4 +736,3 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
 	}
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 4e2fb38..9855a7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -21,7 +21,6 @@
  * Authors: AMD
  *
  */
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #ifndef __DCN20_DSC_H__
 #define __DCN20_DSC_H__
 
@@ -572,4 +571,3 @@ void dsc2_construct(struct dcn20_dsc *dsc,
 
 #endif
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 8b84385..9235f7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -293,6 +293,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
 		output->grph.rgb.max_compressed_blk_size = 64;
 		output->grph.rgb.independent_64b_blks = true;
 		break;
+	default:
+		ASSERT(false);
+		break;
 	}
 	output->capable = true;
 	output->const_color_support = true;
@@ -601,7 +604,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
 	.wm_read_state = hubbub2_wm_read_state,
 	.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
 	.program_watermarks = hubbub2_program_watermarks,
-	.allow_self_refresh_control = hubbub1_allow_self_refresh_control
+	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
 };
 
 void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 69e2aae..84d7ac5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -30,6 +30,8 @@
 #include "reg_helper.h"
 #include "basics/conversion.h"
 
+#define DC_LOGGER_INIT(logger)
+
 #define REG(reg)\
 	hubp2->hubp_regs->reg
 
@@ -483,7 +485,6 @@ void hubp2_program_pixel_format(
 		REG_UPDATE(DCSURF_SURFACE_CONFIG,
 				SURFACE_PIXEL_FORMAT, 12);
 		break;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
 		REG_UPDATE(DCSURF_SURFACE_CONFIG,
 				SURFACE_PIXEL_FORMAT, 112);
@@ -504,7 +505,6 @@ void hubp2_program_pixel_format(
 		REG_UPDATE(DCSURF_SURFACE_CONFIG,
 				SURFACE_PIXEL_FORMAT, 119);
 		break;
-#endif
 	default:
 		BREAK_TO_DEBUGGER();
 		break;
@@ -1204,6 +1204,9 @@ void hubp2_read_state_common(struct hubp *hubp)
 			HUBP_TTU_DISABLE, &s->ttu_disable,
 			HUBP_UNDERFLOW_STATUS, &s->underflow_status);
 
+	REG_GET(HUBP_CLK_CNTL,
+			HUBP_CLOCK_ENABLE, &s->clock_en);
+
 	REG_GET(DCN_GLOBAL_TTU_CNTL,
 			MIN_TTU_VBLANK, &s->min_ttu_vblank);
 
@@ -1243,6 +1246,314 @@ void hubp2_read_state(struct hubp *hubp)
 
 }
 
+void hubp2_validate_dml_output(struct hubp *hubp,
+		struct dc_context *ctx,
+		struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+		struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+		struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+	struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+	struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+	struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+	DC_LOGGER_INIT(ctx->logger);
+	DC_LOG_DEBUG("DML Validation | Running Validation");
+
+	/* Requestor Regs */
+	REG_GET(HUBPRET_CONTROL,
+		DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+	REG_GET_4(DCN_EXPANSION_MODE,
+		DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+		PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+		MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+		CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+	REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+		CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+		MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+		META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+		MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+		DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+		MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+		SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+		PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+	REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+		CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+		MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+		META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+		MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+		DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+		MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size,
+		SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+		PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+	if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+		DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u  Actual: %u\n",
+				dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+	if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+	if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+	if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u  Actual: %u\n",
+				dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+	if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+	if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+	if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+	if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+	if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+	if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+	if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+	if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+	if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+	if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+	if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+	if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+	if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+	if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+	if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size);
+	if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+	if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+	/* DLG - Per hubp */
+	REG_GET_2(BLANK_OFFSET_0,
+		REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+		DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+	REG_GET(BLANK_OFFSET_1,
+		MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+	REG_GET(DST_DIMENSIONS,
+		REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+	REG_GET_2(DST_AFTER_SCALER,
+		REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+		DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+	REG_GET(REF_FREQ_TO_PIX_FREQ,
+		REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+	if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+	if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+	if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+	if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+		DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+	if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+		DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+	if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+		DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+	if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+		DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+	/* DLG - Per luma/chroma */
+	REG_GET(VBLANK_PARAMETERS_1,
+		REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+	if (REG(NOM_PARAMETERS_0))
+		REG_GET(NOM_PARAMETERS_0,
+			DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+	if (REG(NOM_PARAMETERS_1))
+		REG_GET(NOM_PARAMETERS_1,
+			REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+	REG_GET(NOM_PARAMETERS_4,
+		DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+	REG_GET(NOM_PARAMETERS_5,
+		REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+	REG_GET_2(PER_LINE_DELIVERY,
+		REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+		REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+	REG_GET_2(PER_LINE_DELIVERY_PRE,
+		REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+		REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+	REG_GET(VBLANK_PARAMETERS_2,
+		REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+	if (REG(NOM_PARAMETERS_2))
+		REG_GET(NOM_PARAMETERS_2,
+			DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+	if (REG(NOM_PARAMETERS_3))
+		REG_GET(NOM_PARAMETERS_3,
+			REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+	REG_GET(NOM_PARAMETERS_6,
+		DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+	REG_GET(NOM_PARAMETERS_7,
+		REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+	REG_GET(VBLANK_PARAMETERS_3,
+			REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+	REG_GET(VBLANK_PARAMETERS_4,
+			REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+	if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+	if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+	if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+	if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+	if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+	if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+	if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+	if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+	if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+	if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+	if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+	if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+	if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+	if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+	if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+	if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+	/* TTU - per hubp */
+	REG_GET_2(DCN_TTU_QOS_WM,
+		QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+		QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+	if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+		DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+	if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+		DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+	/* TTU - per luma/chroma */
+	/* Assumed surf0 is luma and 1 is chroma */
+	REG_GET_3(DCN_SURF0_TTU_CNTL0,
+		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+	REG_GET_3(DCN_SURF1_TTU_CNTL0,
+		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+	REG_GET_3(DCN_CUR0_TTU_CNTL0,
+		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+	REG_GET(FLIP_PARAMETERS_1,
+		REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+	REG_GET(DCN_CUR0_TTU_CNTL1,
+			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+	REG_GET(DCN_CUR1_TTU_CNTL1,
+			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+	REG_GET(DCN_SURF0_TTU_CNTL1,
+			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+	REG_GET(DCN_SURF1_TTU_CNTL1,
+			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+	if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+	if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+	if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+	if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+	if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+	if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+	if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+	if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+	if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+	if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+	if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+	if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+	if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+	if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+}
+
 static struct hubp_funcs dcn20_hubp_funcs = {
 	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
 	.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -1266,6 +1577,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
 	.hubp_clear_underflow = hubp2_clear_underflow,
 	.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
 	.hubp_init = hubp1_init,
+	.validate_dml_output = hubp2_validate_dml_output,
 };
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index d5c8615..8c04a36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -148,7 +148,6 @@
 	uint32_t VMID_SETTINGS_0
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
 	DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
 	uint32_t FLIP_PARAMETERS_3;\
@@ -157,7 +156,6 @@
 	uint32_t FLIP_PARAMETERS_6;\
 	uint32_t VBLANK_PARAMETERS_5;\
 	uint32_t VBLANK_PARAMETERS_6
-#endif
 
 #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
 	DCN_HUBP_REG_FIELD_BASE_LIST(type); \
@@ -184,7 +182,6 @@
 	type SURFACE_TRIPLE_BUFFER_ENABLE;\
 	type VMID
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 #define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \
 	DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\
 	type REFCYC_PER_VM_GROUP_FLIP;\
@@ -194,31 +191,18 @@
 	type REFCYC_PER_PTE_GROUP_FLIP_C; \
 	type REFCYC_PER_META_CHUNK_FLIP_C; \
 	type VM_GROUP_SIZE
-#endif
 
 
 struct dcn_hubp2_registers {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	DCN21_HUBP_REG_COMMON_VARIABLE_LIST;
-#else
-	DCN2_HUBP_REG_COMMON_VARIABLE_LIST;
-#endif
 };
 
 struct dcn_hubp2_shift {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#else
-	DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#endif
 };
 
 struct dcn_hubp2_mask {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#else
-	DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#endif
 };
 
 struct dcn20_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index ac8c18f..cfbbaff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -25,17 +25,15 @@
 #include <linux/delay.h>
 
 #include "dm_services.h"
+#include "basics/dc_common.h"
 #include "dm_helpers.h"
 #include "core_types.h"
 #include "resource.h"
-#include "dcn20/dcn20_resource.h"
-#include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_resource.h"
 #include "dcn20_hwseq.h"
 #include "dce/dce_hwseq.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dcn20/dcn20_dsc.h"
-#endif
+#include "dcn20_dsc.h"
+#include "dcn20_optc.h"
 #include "abm.h"
 #include "clk_mgr.h"
 #include "dmcu.h"
@@ -45,10 +43,9 @@
 #include "ipp.h"
 #include "mpc.h"
 #include "mcif_wb.h"
+#include "dchubbub.h"
 #include "reg_helper.h"
 #include "dcn10/dcn10_cm_common.h"
-#include "dcn10/dcn10_hubbub.h"
-#include "dcn10/dcn10_optc.h"
 #include "dc_link_dp.h"
 #include "vm_helper.h"
 #include "dccg.h"
@@ -64,14 +61,132 @@
 #define FN(reg_name, field_name) \
 	hws->shifts->field_name, hws->masks->field_name
 
-static void dcn20_enable_power_gating_plane(
+static int find_free_gsl_group(const struct dc *dc)
+{
+	if (dc->res_pool->gsl_groups.gsl_0 == 0)
+		return 1;
+	if (dc->res_pool->gsl_groups.gsl_1 == 0)
+		return 2;
+	if (dc->res_pool->gsl_groups.gsl_2 == 0)
+		return 3;
+
+	return 0;
+}
+
+/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
+ * This is only used to lock pipes in pipe splitting case with immediate flip
+ * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
+ * so we get tearing with freesync since we cannot flip multiple pipes
+ * atomically.
+ * We use GSL for this:
+ * - immediate flip: find first available GSL group if not already assigned
+ *                   program gsl with that group, set current OTG as master
+ *                   and always us 0x4 = AND of flip_ready from all pipes
+ * - vsync flip: disable GSL if used
+ *
+ * Groups in stream_res are stored as +1 from HW registers, i.e.
+ * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
+ * Using a magic value like -1 would require tracking all inits/resets
+ */
+static void dcn20_setup_gsl_group_as_lock(
+		const struct dc *dc,
+		struct pipe_ctx *pipe_ctx,
+		bool enable)
+{
+	struct gsl_params gsl;
+	int group_idx;
+
+	memset(&gsl, 0, sizeof(struct gsl_params));
+
+	if (enable) {
+		/* return if group already assigned since GSL was set up
+		 * for vsync flip, we would unassign so it can't be "left over"
+		 */
+		if (pipe_ctx->stream_res.gsl_group > 0)
+			return;
+
+		group_idx = find_free_gsl_group(dc);
+		ASSERT(group_idx != 0);
+		pipe_ctx->stream_res.gsl_group = group_idx;
+
+		/* set gsl group reg field and mark resource used */
+		switch (group_idx) {
+		case 1:
+			gsl.gsl0_en = 1;
+			dc->res_pool->gsl_groups.gsl_0 = 1;
+			break;
+		case 2:
+			gsl.gsl1_en = 1;
+			dc->res_pool->gsl_groups.gsl_1 = 1;
+			break;
+		case 3:
+			gsl.gsl2_en = 1;
+			dc->res_pool->gsl_groups.gsl_2 = 1;
+			break;
+		default:
+			BREAK_TO_DEBUGGER();
+			return; // invalid case
+		}
+		gsl.gsl_master_en = 1;
+	} else {
+		group_idx = pipe_ctx->stream_res.gsl_group;
+		if (group_idx == 0)
+			return; // if not in use, just return
+
+		pipe_ctx->stream_res.gsl_group = 0;
+
+		/* unset gsl group reg field and mark resource free */
+		switch (group_idx) {
+		case 1:
+			gsl.gsl0_en = 0;
+			dc->res_pool->gsl_groups.gsl_0 = 0;
+			break;
+		case 2:
+			gsl.gsl1_en = 0;
+			dc->res_pool->gsl_groups.gsl_1 = 0;
+			break;
+		case 3:
+			gsl.gsl2_en = 0;
+			dc->res_pool->gsl_groups.gsl_2 = 0;
+			break;
+		default:
+			BREAK_TO_DEBUGGER();
+			return;
+		}
+		gsl.gsl_master_en = 0;
+	}
+
+	/* at this point we want to program whether it's to enable or disable */
+	if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
+		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+		pipe_ctx->stream_res.tg->funcs->set_gsl(
+			pipe_ctx->stream_res.tg,
+			&gsl);
+
+		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+			pipe_ctx->stream_res.tg, group_idx,	enable ? 4 : 0);
+	} else
+		BREAK_TO_DEBUGGER();
+}
+
+void dcn20_set_flip_control_gsl(
+		struct pipe_ctx *pipe_ctx,
+		bool flip_immediate)
+{
+	if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
+		pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
+				pipe_ctx->plane_res.hubp, flip_immediate);
+
+}
+
+void dcn20_enable_power_gating_plane(
 	struct dce_hwseq *hws,
 	bool enable)
 {
-	bool force_on = 1; /* disable power gating */
+	bool force_on = true; /* disable power gating */
 
 	if (enable)
-		force_on = 0;
+		force_on = false;
 
 	/* DCHUBP0/1/2/3/4/5 */
 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
@@ -128,44 +243,6 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
 	/* This value is dependent on the hardware pipeline delay so set once per SOC */
 	REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
 }
-void dcn20_display_init(struct dc *dc)
-{
-	struct dce_hwseq *hws = dc->hwseq;
-
-	/* RBBMIF
-	 * disable RBBMIF timeout detection for all clients
-	 * Ensure RBBMIF does not drop register accesses due to the per-client timeout
-	 */
-	REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
-	REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
-
-	/* DCCG */
-	dcn20_dccg_init(hws);
-
-	REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0);
-
-	/* DCHUB/MMHUBBUB
-	 * set global timer refclk divider
-	 * 100Mhz refclk -> 2
-	 * 27Mhz refclk ->  1
-	 * 48Mhz refclk ->  1
-	 */
-	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
-	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
-	REG_WRITE(REFCLK_CNTL, 0);
-
-	/* OPTC
-	 * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc
-	 */
-
-	/* AZ
-	 * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser,
-	 * if not, it should be programmed according to the ref clock
-	 */
-	REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64);
-	/* Enable controller clock gating */
-	REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1);
-}
 
 void dcn20_disable_vga(
 	struct dce_hwseq *hws)
@@ -178,15 +255,15 @@ void dcn20_disable_vga(
 	REG_WRITE(D6VGA_CONTROL, 0);
 }
 
-void dcn20_program_tripleBuffer(
+void dcn20_program_triple_buffer(
 	const struct dc *dc,
 	struct pipe_ctx *pipe_ctx,
-	bool enableTripleBuffer)
+	bool enable_triple_buffer)
 {
 	if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
 		pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
 			pipe_ctx->plane_res.hubp,
-			enableTripleBuffer);
+			enable_triple_buffer);
 	}
 }
 
@@ -195,6 +272,7 @@ void dcn20_init_blank(
 		struct dc *dc,
 		struct timing_generator *tg)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	enum dc_color_space color_space;
 	struct tg_color black_color = {0};
 	struct output_pixel_processor *opp = NULL;
@@ -225,6 +303,7 @@ void dcn20_init_blank(
 	opp->funcs->opp_set_disp_pattern_generator(
 			opp,
 			CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+			CONTROLLER_DP_COLOR_SPACE_UDEFINED,
 			COLOR_DEPTH_UNDEFINED,
 			&black_color,
 			otg_active_width,
@@ -234,17 +313,17 @@ void dcn20_init_blank(
 		bottom_opp->funcs->opp_set_disp_pattern_generator(
 				bottom_opp,
 				CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+				CONTROLLER_DP_COLOR_SPACE_UDEFINED,
 				COLOR_DEPTH_UNDEFINED,
 				&black_color,
 				otg_active_width,
 				otg_active_height);
 	}
 
-	dcn20_hwss_wait_for_blank_complete(opp);
+	hws->funcs.wait_for_blank_complete(opp);
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static void dcn20_dsc_pg_control(
+void dcn20_dsc_pg_control(
 		struct dce_hwseq *hws,
 		unsigned int dsc_inst,
 		bool power_on)
@@ -320,9 +399,8 @@ static void dcn20_dsc_pg_control(
 	if (org_ip_request_cntl == 0)
 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
 }
-#endif
 
-static void dcn20_dpp_pg_control(
+void dcn20_dpp_pg_control(
 		struct dce_hwseq *hws,
 		unsigned int dpp_inst,
 		bool power_on)
@@ -396,7 +474,7 @@ static void dcn20_dpp_pg_control(
 }
 
 
-static void dcn20_hubp_pg_control(
+void dcn20_hubp_pg_control(
 		struct dce_hwseq *hws,
 		unsigned int hubp_inst,
 		bool power_on)
@@ -473,8 +551,9 @@ static void dcn20_hubp_pg_control(
 /* disable HW used by plane.
  * note:  cannot disable until disconnect is complete
  */
-static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
 
@@ -495,7 +574,7 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	hubp->power_gated = true;
 	dc->optimized_required = false; /* We're powering off, no need to optimize */
 
-	dc->hwss.plane_atomic_power_down(dc,
+	hws->funcs.plane_atomic_power_down(dc,
 			pipe_ctx->plane_res.dpp,
 			pipe_ctx->plane_res.hubp);
 
@@ -526,6 +605,7 @@ enum dc_status dcn20_enable_stream_timing(
 		struct dc_state *context,
 		struct dc *dc)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct drr_params params = {0};
 	unsigned int event_triggers = 0;
@@ -585,7 +665,7 @@ enum dc_status dcn20_enable_stream_timing(
 			pipe_ctx->stream_res.opp,
 			true);
 
-	dc->hwss.blank_pixel_data(dc, pipe_ctx, true);
+	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
 
 	/* VTG is  within DCHUB command block. DCFCLK is always on */
 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
@@ -593,7 +673,7 @@ enum dc_status dcn20_enable_stream_timing(
 		return DC_ERROR_UNEXPECTED;
 	}
 
-	dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp);
+	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
 
 	params.vertical_total_min = stream->adjust.v_total_min;
 	params.vertical_total_max = stream->adjust.v_total_max;
@@ -606,9 +686,13 @@ enum dc_status dcn20_enable_stream_timing(
 	// DRR should set trigger event to monitor surface update event
 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
 		event_triggers = 0x80;
+	/* Event triggers and num frames initialized for DRR, but can be
+	 * later updated for PSR use. Note DRR trigger events are generated
+	 * regardless of whether num frames met.
+	 */
 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
-				pipe_ctx->stream_res.tg, event_triggers);
+				pipe_ctx->stream_res.tg, event_triggers, 2);
 
 	/* TODO program crtc source select for non-virtual signal*/
 	/* TODO program FMT */
@@ -649,7 +733,7 @@ void dcn20_program_output_csc(struct dc *dc,
 	}
 }
 
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
 				const struct dc_stream_state *stream)
 {
 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
@@ -736,20 +820,14 @@ bool dcn20_set_shaper_3dlut(
 	else
 		result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
 
-	if (plane_state->lut3d_func &&
-		plane_state->lut3d_func->state.bits.initialized == 1 &&
-		plane_state->lut3d_func->hdr_multiplier != 0)
-		dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base,
-				plane_state->lut3d_func->hdr_multiplier);
-	else
-		dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000);
-
 	return result;
 }
 
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
-					  const struct dc_plane_state *plane_state)
+bool dcn20_set_input_transfer_func(struct dc *dc,
+				struct pipe_ctx *pipe_ctx,
+				const struct dc_plane_state *plane_state)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
 	const struct dc_transfer_func *tf = NULL;
 	bool result = true;
@@ -758,8 +836,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
 	if (dpp_base == NULL || plane_state == NULL)
 		return false;
 
-	dcn20_set_shaper_3dlut(pipe_ctx, plane_state);
-	dcn20_set_blend_lut(pipe_ctx, plane_state);
+	hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
+	hws->funcs.set_blend_lut(pipe_ctx, plane_state);
 
 	if (plane_state->in_transfer_func)
 		tf = plane_state->in_transfer_func;
@@ -804,6 +882,11 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
 					IPP_DEGAMMA_MODE_BYPASS);
 			break;
 		case TRANSFER_FUNCTION_PQ:
+			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+			result = true;
+			break;
 		default:
 			result = false;
 			break;
@@ -824,7 +907,7 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
 	return result;
 }
 
-static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
 {
 	struct pipe_ctx *odm_pipe;
 	int opp_cnt = 1;
@@ -855,12 +938,16 @@ void dcn20_blank_pixel_data(
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	enum dc_color_space color_space = stream->output_color_space;
 	enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+	enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
 	struct pipe_ctx *odm_pipe;
 	int odm_cnt = 1;
 
 	int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
 	int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
 
+	if (stream->link->test_pattern_enabled)
+		return;
+
 	/* get opp dpg blank color */
 	color_space_to_black_color(dc, color_space, &black_color);
 
@@ -873,8 +960,10 @@ void dcn20_blank_pixel_data(
 		if (stream_res->abm)
 			stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
 
-		if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+		if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
 			test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+			test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+		}
 	} else {
 		test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
 	}
@@ -882,6 +971,7 @@ void dcn20_blank_pixel_data(
 	stream_res->opp->funcs->opp_set_disp_pattern_generator(
 			stream_res->opp,
 			test_pattern,
+			test_pattern_color_space,
 			stream->timing.display_color_depth,
 			&black_color,
 			width,
@@ -892,6 +982,7 @@ void dcn20_blank_pixel_data(
 				odm_pipe->stream_res.opp,
 				dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
 						CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+				test_pattern_color_space,
 				stream->timing.display_color_depth,
 				&black_color,
 				width,
@@ -1217,9 +1308,11 @@ static void dcn20_update_dchubp_dpp(
 	struct pipe_ctx *pipe_ctx,
 	struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+	bool viewport_changed = false;
 
 	if (pipe_ctx->update_flags.bits.dppclk)
 		dpp->funcs->dpp_dppclk_control(dpp, false, true);
@@ -1261,7 +1354,7 @@ static void dcn20_update_dchubp_dpp(
 
 		if (dpp->funcs->dpp_program_bias_and_scale) {
 			//TODO :for CNVC set scale and bias registers if necessary
-			dcn10_build_prescale_params(&bns_params, plane_state);
+			build_prescale_params(&bns_params, plane_state);
 			dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
 		}
 	}
@@ -1269,19 +1362,19 @@ static void dcn20_update_dchubp_dpp(
 	if (pipe_ctx->update_flags.bits.mpcc
 			|| plane_state->update_flags.bits.global_alpha_change
 			|| plane_state->update_flags.bits.per_pixel_alpha_change) {
-		/* Need mpcc to be idle if changing opp */
-		if (pipe_ctx->update_flags.bits.opp_changed) {
-			struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
-			int mpcc_inst;
+		// MPCC inst is equal to pipe index in practice
+		int mpcc_inst = hubp->inst;
+		int opp_inst;
+		int opp_count = dc->res_pool->pipe_count;
 
-			for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
-				if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
-					continue;
+		for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+			if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
 				dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
-				old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+				dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+				break;
 			}
 		}
-		dc->hwss.update_mpcc(dc, pipe_ctx);
+		hws->funcs.update_mpcc(dc, pipe_ctx);
 	}
 
 	if (pipe_ctx->update_flags.bits.scaler ||
@@ -1298,14 +1391,18 @@ static void dcn20_update_dchubp_dpp(
 
 	if (pipe_ctx->update_flags.bits.viewport ||
 			(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
-			(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
+			(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+
 		hubp->funcs->mem_program_viewport(
 			hubp,
 			&pipe_ctx->plane_res.scl_data.viewport,
 			&pipe_ctx->plane_res.scl_data.viewport_c);
+		viewport_changed = true;
+	}
 
 	/* Any updates are handled in dc interface, just need to apply existing for plane enable */
-	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
+	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+			pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
 			&& pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
 		dc->hwss.set_cursor_position(pipe_ctx);
 		dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -1355,8 +1452,13 @@ static void dcn20_update_dchubp_dpp(
 		hubp->power_gated = false;
 	}
 
+	if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed)
+		hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address);
+
 	if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
-		dc->hwss.update_plane_addr(dc, pipe_ctx);
+		hws->funcs.update_plane_addr(dc, pipe_ctx);
+
+
 
 	if (pipe_ctx->update_flags.bits.enable)
 		hubp->funcs->set_blank(hubp, false);
@@ -1368,10 +1470,11 @@ static void dcn20_program_pipe(
 		struct pipe_ctx *pipe_ctx,
 		struct dc_state *context)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	/* Only need to unblank on top pipe */
 	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
 			&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
-		dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+		hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
 
 	if (pipe_ctx->update_flags.bits.global_sync) {
 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
@@ -1384,12 +1487,12 @@ static void dcn20_program_pipe(
 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
 
-		if (dc->hwss.setup_vupdate_interrupt)
-			dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+		if (hws->funcs.setup_vupdate_interrupt)
+			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
 	}
 
 	if (pipe_ctx->update_flags.bits.odm)
-		dc->hwss.update_odm(dc, context, pipe_ctx);
+		hws->funcs.update_odm(dc, context, pipe_ctx);
 
 	if (pipe_ctx->update_flags.bits.enable)
 		dcn20_enable_plane(dc, pipe_ctx, context);
@@ -1398,20 +1501,20 @@ static void dcn20_program_pipe(
 		dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
 
 	if (pipe_ctx->update_flags.bits.enable
-			|| pipe_ctx->plane_state->update_flags.bits.sdr_white_level)
-		set_hdr_multiplier(pipe_ctx);
+			|| pipe_ctx->plane_state->update_flags.bits.hdr_mult)
+		hws->funcs.set_hdr_multiplier(pipe_ctx);
 
 	if (pipe_ctx->update_flags.bits.enable ||
 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
-		dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
 
 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
 	 * only do gamma programming for powering on, internal memcmp to avoid
 	 * updating on slave planes
 	 */
 	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
-		dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
 
 	/* If the pipe has been enabled or has a different opp, we
 	 * should reprogram the fmt. This deals with cases where
@@ -1445,12 +1548,13 @@ static bool does_pipe_need_lock(struct pipe_ctx *pipe)
 	return false;
 }
 
-static void dcn20_program_front_end_for_ctx(
+void dcn20_program_front_end_for_ctx(
 		struct dc *dc,
 		struct dc_state *context)
 {
 	const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
 	int i;
+	struct dce_hwseq *hws = dc->hwseq;
 	bool pipe_locked[MAX_PIPES] = {false};
 	DC_LOGGER_INIT(dc->ctx->logger);
 
@@ -1482,13 +1586,13 @@ static void dcn20_program_front_end_for_ctx(
 				&& !context->res_ctx.pipe_ctx[i].top_pipe
 				&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
 				&& context->res_ctx.pipe_ctx[i].stream)
-			dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
 
 	/* Disconnect mpcc */
 	for (i = 0; i < dc->res_pool->pipe_count; i++)
 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
 				|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
-			dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+			hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
 			DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
 		}
 
@@ -1508,8 +1612,8 @@ static void dcn20_program_front_end_for_ctx(
 			pipe = &context->res_ctx.pipe_ctx[i];
 			if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
 					&& (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
-					&& dc->hwss.program_all_writeback_pipes_in_tree)
-				dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+					&& hws->funcs.program_all_writeback_pipes_in_tree)
+				hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
 		}
 	}
 
@@ -1541,9 +1645,9 @@ static void dcn20_program_front_end_for_ctx(
 			struct hubp *hubp = pipe->plane_res.hubp;
 			int j = 0;
 
-			for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
+			for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
 					&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
-				msleep(1);
+				mdelay(1);
 		}
 	}
 
@@ -1594,6 +1698,7 @@ bool dcn20_update_bandwidth(
 		struct dc_state *context)
 {
 	int i;
+	struct dce_hwseq *hws = dc->hwseq;
 
 	/* recalculate DML parameters */
 	if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
@@ -1623,10 +1728,10 @@ bool dcn20_update_bandwidth(
 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
 
 			if (pipe_ctx->prev_odm_pipe == NULL)
-				dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+				hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
 
-			if (dc->hwss.setup_vupdate_interrupt)
-				dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+			if (hws->funcs.setup_vupdate_interrupt)
+				hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
 		}
 
 		pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1640,9 +1745,8 @@ bool dcn20_update_bandwidth(
 	return true;
 }
 
-static void dcn20_enable_writeback(
+void dcn20_enable_writeback(
 		struct dc *dc,
-		const struct dc_stream_status *stream_status,
 		struct dc_writeback_info *wb_info,
 		struct dc_state *context)
 {
@@ -1656,8 +1760,7 @@ static void dcn20_enable_writeback(
 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
 
 	/* set the OPTC source mux */
-	ASSERT(stream_status->primary_otg_inst < MAX_PIPES);
-	optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst];
+	optc = dc->res_pool->timing_generators[dwb->otg_inst];
 	optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
 	/* set MCIF_WB buffer and arbitration configuration */
 	mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
@@ -1684,7 +1787,7 @@ void dcn20_disable_writeback(
 	mcif_wb->funcs->disable_mcif(mcif_wb);
 }
 
-bool dcn20_hwss_wait_for_blank_complete(
+bool dcn20_wait_for_blank_complete(
 		struct output_pixel_processor *opp)
 {
 	int counter;
@@ -1713,9 +1816,8 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
 	return hubp->funcs->dmdata_status_done(hubp);
 }
 
-static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	struct dce_hwseq *hws = dc->hwseq;
 
 	if (pipe_ctx->stream_res.dsc) {
@@ -1727,12 +1829,10 @@ static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx
 			odm_pipe = odm_pipe->next_odm_pipe;
 		}
 	}
-#endif
 }
 
-static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	struct dce_hwseq *hws = dc->hwseq;
 
 	if (pipe_ctx->stream_res.dsc) {
@@ -1744,7 +1844,6 @@ static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
 			odm_pipe = odm_pipe->next_odm_pipe;
 		}
 	}
-#endif
 }
 
 void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
@@ -1767,12 +1866,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
 	hubp->funcs->dmdata_set_attributes(hubp, &attr);
 }
 
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx)
-{
-	dce110_disable_stream(pipe_ctx);
-}
-
-static void dcn20_init_vm_ctx(
+void dcn20_init_vm_ctx(
 		struct dce_hwseq *hws,
 		struct dc *dc,
 		struct dc_virtual_addr_space_config *va_config,
@@ -1794,7 +1888,7 @@ static void dcn20_init_vm_ctx(
 	dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);
 }
 
-static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
 {
 	struct dcn_hubbub_phys_addr_config config;
 
@@ -1838,8 +1932,7 @@ static bool patch_address_for_sbs_tb_stereo(
 	return false;
 }
 
-
-static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
 	bool addr_patched = false;
 	PHYSICAL_ADDRESS_LOC addr;
@@ -1873,6 +1966,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
 	struct encoder_unblank_param params = { { 0 } };
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct dc_link *link = stream->link;
+	struct dce_hwseq *hws = link->dc->hwseq;
 	struct pipe_ctx *odm_pipe;
 
 	params.opp_cnt = 1;
@@ -1885,7 +1979,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
 	params.link_settings.link_rate = link_settings->link_rate;
 
 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
-		if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
+		if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
 			params.timing.pix_clk_100hz /= 2;
 		pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
 				pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
@@ -1893,14 +1987,14 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
 	}
 
 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
-		link->dc->hwss.edp_backlight_control(link, true);
+		hws->funcs.edp_backlight_control(link, true);
 	}
 }
 
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
-	int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
 
 	if (start_line < 0)
 		start_line = 0;
@@ -1915,6 +2009,7 @@ static void dcn20_reset_back_end_for_pipe(
 		struct dc_state *context)
 {
 	int i;
+	struct dc_link *link;
 	DC_LOGGER_INIT(dc->ctx->logger);
 	if (pipe_ctx->stream_res.stream_enc == NULL) {
 		pipe_ctx->stream = NULL;
@@ -1922,8 +2017,14 @@ static void dcn20_reset_back_end_for_pipe(
 	}
 
 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-		/* DPMS may already disable */
-		if (!pipe_ctx->stream->dpms_off)
+		link = pipe_ctx->stream->link;
+		/* DPMS may already disable or */
+		/* dpms_off status is incorrect due to fastboot
+		 * feature. When system resume from S4 with second
+		 * screen only, the dpms_off would be true but
+		 * VBIOS lit up eDP, so check link status too.
+		 */
+		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
 			core_link_disable_stream(pipe_ctx);
 		else if (pipe_ctx->stream_res.audio)
 			dc->hwss.disable_audio_stream(pipe_ctx);
@@ -1943,11 +2044,9 @@ static void dcn20_reset_back_end_for_pipe(
 			}
 		}
 	}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	else if (pipe_ctx->stream_res.dsc) {
 		dp_set_dsc_enable(pipe_ctx, false);
 	}
-#endif
 
 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
 	 * back end share by all pipes and will be disable only when disable
@@ -1978,11 +2077,12 @@ static void dcn20_reset_back_end_for_pipe(
 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
 }
 
-static void dcn20_reset_hw_ctx_wrap(
+void dcn20_reset_hw_ctx_wrap(
 		struct dc *dc,
 		struct dc_state *context)
 {
 	int i;
+	struct dce_hwseq *hws = dc->hwseq;
 
 	/* Reset Back End*/
 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
@@ -2001,8 +2101,8 @@ static void dcn20_reset_hw_ctx_wrap(
 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
 
 			dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
-			if (dc->hwss.enable_stream_gating)
-				dc->hwss.enable_stream_gating(dc, pipe_ctx);
+			if (hws->funcs.enable_stream_gating)
+				hws->funcs.enable_stream_gating(dc, pipe_ctx);
 			if (old_clk)
 				old_clk->funcs->cs_power_down(old_clk);
 		}
@@ -2031,8 +2131,9 @@ void dcn20_get_mpctree_visual_confirm_color(
 	*color = pipe_colors[top_pipe->pipe_idx];
 }
 
-static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+	struct dce_hwseq *hws = dc->hwseq;
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	struct mpcc_blnd_cfg blnd_cfg = { {0} };
 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
@@ -2043,10 +2144,10 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 
 	// input to MPCC is always RGB, by default leave black_color at 0
 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
-		dcn10_get_hdr_visual_confirm_color(
+		hws->funcs.get_hdr_visual_confirm_color(
 				pipe_ctx, &blnd_cfg.black_color);
 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
-		dcn10_get_surface_visual_confirm_color(
+		hws->funcs.get_surface_visual_confirm_color(
 				pipe_ctx, &blnd_cfg.black_color);
 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
 		dcn20_get_mpctree_visual_confirm_color(
@@ -2083,12 +2184,6 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	 */
 	mpcc_id = hubp->inst;
 
-	/* If there is no full update, don't need to touch MPC tree*/
-	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
-		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
-		return;
-	}
-
 	/* check if this MPCC is already being used */
 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
 	/* remove MPCC if being used */
@@ -2113,125 +2208,7 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	hubp->mpcc_id = mpcc_id;
 }
 
-static int find_free_gsl_group(const struct dc *dc)
-{
-	if (dc->res_pool->gsl_groups.gsl_0 == 0)
-		return 1;
-	if (dc->res_pool->gsl_groups.gsl_1 == 0)
-		return 2;
-	if (dc->res_pool->gsl_groups.gsl_2 == 0)
-		return 3;
-
-	return 0;
-}
-
-/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
- * This is only used to lock pipes in pipe splitting case with immediate flip
- * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
- * so we get tearing with freesync since we cannot flip multiple pipes
- * atomically.
- * We use GSL for this:
- * - immediate flip: find first available GSL group if not already assigned
- *                   program gsl with that group, set current OTG as master
- *                   and always us 0x4 = AND of flip_ready from all pipes
- * - vsync flip: disable GSL if used
- *
- * Groups in stream_res are stored as +1 from HW registers, i.e.
- * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
- * Using a magic value like -1 would require tracking all inits/resets
- */
-void dcn20_setup_gsl_group_as_lock(
-		const struct dc *dc,
-		struct pipe_ctx *pipe_ctx,
-		bool enable)
-{
-	struct gsl_params gsl;
-	int group_idx;
-
-	memset(&gsl, 0, sizeof(struct gsl_params));
-
-	if (enable) {
-		/* return if group already assigned since GSL was set up
-		 * for vsync flip, we would unassign so it can't be "left over"
-		 */
-		if (pipe_ctx->stream_res.gsl_group > 0)
-			return;
-
-		group_idx = find_free_gsl_group(dc);
-		ASSERT(group_idx != 0);
-		pipe_ctx->stream_res.gsl_group = group_idx;
-
-		/* set gsl group reg field and mark resource used */
-		switch (group_idx) {
-		case 1:
-			gsl.gsl0_en = 1;
-			dc->res_pool->gsl_groups.gsl_0 = 1;
-			break;
-		case 2:
-			gsl.gsl1_en = 1;
-			dc->res_pool->gsl_groups.gsl_1 = 1;
-			break;
-		case 3:
-			gsl.gsl2_en = 1;
-			dc->res_pool->gsl_groups.gsl_2 = 1;
-			break;
-		default:
-			BREAK_TO_DEBUGGER();
-			return; // invalid case
-		}
-		gsl.gsl_master_en = 1;
-	} else {
-		group_idx = pipe_ctx->stream_res.gsl_group;
-		if (group_idx == 0)
-			return; // if not in use, just return
-
-		pipe_ctx->stream_res.gsl_group = 0;
-
-		/* unset gsl group reg field and mark resource free */
-		switch (group_idx) {
-		case 1:
-			gsl.gsl0_en = 0;
-			dc->res_pool->gsl_groups.gsl_0 = 0;
-			break;
-		case 2:
-			gsl.gsl1_en = 0;
-			dc->res_pool->gsl_groups.gsl_1 = 0;
-			break;
-		case 3:
-			gsl.gsl2_en = 0;
-			dc->res_pool->gsl_groups.gsl_2 = 0;
-			break;
-		default:
-			BREAK_TO_DEBUGGER();
-			return;
-		}
-		gsl.gsl_master_en = 0;
-	}
-
-	/* at this point we want to program whether it's to enable or disable */
-	if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
-		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
-		pipe_ctx->stream_res.tg->funcs->set_gsl(
-			pipe_ctx->stream_res.tg,
-			&gsl);
-
-		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
-			pipe_ctx->stream_res.tg, group_idx,	enable ? 4 : 0);
-	} else
-		BREAK_TO_DEBUGGER();
-}
-
-static void dcn20_set_flip_control_gsl(
-		struct pipe_ctx *pipe_ctx,
-		bool flip_immediate)
-{
-	if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
-		pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
-				pipe_ctx->plane_res.hubp, flip_immediate);
-
-}
-
-static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
 {
 	enum dc_lane_count lane_count =
 		pipe_ctx->stream->link->cur_link_settings.lane_count;
@@ -2279,7 +2256,7 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
 	}
 }
 
-static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_stream_state    *stream     = pipe_ctx->stream;
 	struct hubp               *hubp       = pipe_ctx->plane_res.hubp;
@@ -2305,7 +2282,7 @@ static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
 						hubp->inst, mode);
 }
 
-static void dcn20_fpga_init_hw(struct dc *dc)
+void dcn20_fpga_init_hw(struct dc *dc)
 {
 	int i, j;
 	struct dce_hwseq *hws = dc->hwseq;
@@ -2320,13 +2297,13 @@ static void dcn20_fpga_init_hw(struct dc *dc)
 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
 
 	//Enable ability to power gate / don't force power on permanently
-	dc->hwss.enable_power_gating_plane(hws, true);
+	hws->funcs.enable_power_gating_plane(hws, true);
 
 	// Specific to FPGA dccg and registers
 	REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
 	REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
 
-	dcn20_dccg_init(hws);
+	hws->funcs.dccg_init(hws);
 
 	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
 	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
@@ -2390,7 +2367,7 @@ static void dcn20_fpga_init_hw(struct dc *dc)
 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
 		/*to do*/
-		hwss1_plane_atomic_disconnect(dc, pipe_ctx);
+		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
 	}
 
 	/* initialize DWB pointer to MCIF_WB */
@@ -2419,57 +2396,3 @@ static void dcn20_fpga_init_hw(struct dc *dc)
 		tg->funcs->tg_init(tg);
 	}
 }
-
-void dcn20_hw_sequencer_construct(struct dc *dc)
-{
-	dcn10_hw_sequencer_construct(dc);
-	dc->hwss.unblank_stream = dcn20_unblank_stream;
-	dc->hwss.update_plane_addr = dcn20_update_plane_addr;
-	dc->hwss.enable_stream_timing = dcn20_enable_stream_timing;
-	dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
-	dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
-	dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
-	dc->hwss.apply_ctx_for_surface = NULL;
-	dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx;
-	dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
-	dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
-	dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
-	dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth;
-	dc->hwss.update_bandwidth = dcn20_update_bandwidth;
-	dc->hwss.enable_writeback = dcn20_enable_writeback;
-	dc->hwss.disable_writeback = dcn20_disable_writeback;
-	dc->hwss.program_output_csc = dcn20_program_output_csc;
-	dc->hwss.update_odm = dcn20_update_odm;
-	dc->hwss.blank_pixel_data = dcn20_blank_pixel_data;
-	dc->hwss.dmdata_status_done = dcn20_dmdata_status_done;
-	dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine;
-	dc->hwss.enable_stream = dcn20_enable_stream;
-	dc->hwss.disable_stream = dcn20_disable_stream;
-	dc->hwss.init_sys_ctx = dcn20_init_sys_ctx;
-	dc->hwss.init_vm_ctx = dcn20_init_vm_ctx;
-	dc->hwss.disable_stream_gating = dcn20_disable_stream_gating;
-	dc->hwss.enable_stream_gating = dcn20_enable_stream_gating;
-	dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt;
-	dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap;
-	dc->hwss.update_mpcc = dcn20_update_mpcc;
-	dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl;
-	dc->hwss.init_blank = dcn20_init_blank;
-	dc->hwss.disable_plane = dcn20_disable_plane;
-	dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable;
-	dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane;
-	dc->hwss.dpp_pg_control = dcn20_dpp_pg_control;
-	dc->hwss.hubp_pg_control = dcn20_hubp_pg_control;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-	dc->hwss.dsc_pg_control = dcn20_dsc_pg_control;
-#else
-	dc->hwss.dsc_pg_control = NULL;
-#endif
-	dc->hwss.disable_vga = dcn20_disable_vga;
-
-	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-		dc->hwss.init_hw = dcn20_fpga_init_hw;
-		dc->hwss.init_pipes = NULL;
-	}
-
-
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 3098f10..02c9be5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -26,90 +26,112 @@
 #ifndef __DC_HWSS_DCN20_H__
 #define __DC_HWSS_DCN20_H__
 
-struct dc;
+#include "hw_sequencer_private.h"
 
-void dcn20_hw_sequencer_construct(struct dc *dc);
-
-enum dc_status dcn20_enable_stream_timing(
-		struct pipe_ctx *pipe_ctx,
-		struct dc_state *context,
-		struct dc *dc);
-
-void dcn20_blank_pixel_data(
+bool dcn20_set_blend_lut(
+	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_program_front_end_for_ctx(
 		struct dc *dc,
-		struct pipe_ctx *pipe_ctx,
-		bool blank);
-
+		struct dc_state *context);
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+			const struct dc_plane_state *plane_state);
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+			const struct dc_stream_state *stream);
 void dcn20_program_output_csc(struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		enum dc_color_space colorspace,
 		uint16_t *matrix,
 		int opp_id);
-
-void dcn20_prepare_bandwidth(
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
+void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
+		struct dc_link_settings *link_settings);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_blank_pixel_data(
 		struct dc *dc,
-		struct dc_state *context);
-
-void dcn20_optimize_bandwidth(
-		struct dc *dc,
-		struct dc_state *context);
-
-bool dcn20_update_bandwidth(
-		struct dc *dc,
-		struct dc_state *context);
-
-void dcn20_disable_writeback(
-		struct dc *dc,
-		unsigned int dwb_pipe_inst);
-
-bool dcn20_hwss_wait_for_blank_complete(
-		struct output_pixel_processor *opp);
-
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
-			const struct dc_stream_state *stream);
-
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
-			const struct dc_plane_state *plane_state);
-
-bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx);
-
-void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
-
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx);
-
-void dcn20_program_tripleBuffer(
-		const struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
-		bool enableTripleBuffer);
-
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx);
-
-void dcn20_pipe_control_lock_global(
-		struct dc *dc,
-		struct pipe_ctx *pipe,
-		bool lock);
-void dcn20_setup_gsl_group_as_lock(const struct dc *dc,
-				struct pipe_ctx *pipe_ctx,
-				bool enable);
-void dcn20_dccg_init(struct dce_hwseq *hws);
-void dcn20_init_blank(
-	   struct dc *dc,
-	   struct timing_generator *tg);
-void dcn20_display_init(struct dc *dc);
+		bool blank);
 void dcn20_pipe_control_lock(
 	struct dc *dc,
 	struct pipe_ctx *pipe,
 	bool lock);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn20_enable_plane(
-	struct dc *dc,
-	struct pipe_ctx *pipe_ctx,
-	struct dc_state *context);
-bool dcn20_set_blend_lut(
-	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
-bool dcn20_set_shaper_3dlut(
-	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
-void dcn20_get_mpctree_visual_confirm_color(
+void dcn20_pipe_control_lock_global(
+		struct dc *dc,
+		struct pipe_ctx *pipe,
+		bool lock);
+void dcn20_prepare_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
+void dcn20_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
+bool dcn20_update_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
+void dcn20_reset_hw_ctx_wrap(
+		struct dc *dc,
+		struct dc_state *context);
+enum dc_status dcn20_enable_stream_timing(
 		struct pipe_ctx *pipe_ctx,
-		struct tg_color *color);
+		struct dc_state *context,
+		struct dc *dc);
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_init_blank(
+		struct dc *dc,
+		struct timing_generator *tg);
+void dcn20_disable_vga(
+	struct dce_hwseq *hws);
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_power_gating_plane(
+	struct dce_hwseq *hws,
+	bool enable);
+void dcn20_dpp_pg_control(
+		struct dce_hwseq *hws,
+		unsigned int dpp_inst,
+		bool power_on);
+void dcn20_hubp_pg_control(
+		struct dce_hwseq *hws,
+		unsigned int hubp_inst,
+		bool power_on);
+void dcn20_program_triple_buffer(
+	const struct dc *dc,
+	struct pipe_ctx *pipe_ctx,
+	bool enable_triple_buffer);
+void dcn20_enable_writeback(
+		struct dc *dc,
+		struct dc_writeback_info *wb_info,
+		struct dc_state *context);
+void dcn20_disable_writeback(
+		struct dc *dc,
+		unsigned int dwb_pipe_inst);
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
+bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx);
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
+void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
+void dcn20_init_vm_ctx(
+		struct dce_hwseq *hws,
+		struct dc *dc,
+		struct dc_virtual_addr_space_config *va_config,
+		int vmid);
+void dcn20_set_flip_control_gsl(
+		struct pipe_ctx *pipe_ctx,
+		bool flip_immediate);
+void dcn20_dsc_pg_control(
+		struct dce_hwseq *hws,
+		unsigned int dsc_inst,
+		bool power_on);
+void dcn20_fpga_init_hw(struct dc *dc);
+bool dcn20_wait_for_blank_complete(
+		struct output_pixel_processor *opp);
+void dcn20_dccg_init(struct dce_hwseq *hws);
+int dcn20_init_sys_ctx(struct dce_hwseq *hws,
+		struct dc *dc,
+		struct dc_phy_addr_space_config *pa_config);
+
 #endif /* __DC_HWSS_DCN20_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
new file mode 100644
index 0000000..d51e02f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn20_funcs = {
+	.program_gamut_remap = dcn10_program_gamut_remap,
+	.init_hw = dcn10_init_hw,
+	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+	.apply_ctx_for_surface = NULL,
+	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+	.update_plane_addr = dcn20_update_plane_addr,
+	.update_dchub = dcn10_update_dchub,
+	.update_pending_status = dcn10_update_pending_status,
+	.program_output_csc = dcn20_program_output_csc,
+	.enable_accelerated_mode = dce110_enable_accelerated_mode,
+	.enable_timing_synchronization = dcn10_enable_timing_synchronization,
+	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+	.update_info_frame = dce110_update_info_frame,
+	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+	.enable_stream = dcn20_enable_stream,
+	.disable_stream = dce110_disable_stream,
+	.unblank_stream = dcn20_unblank_stream,
+	.blank_stream = dce110_blank_stream,
+	.enable_audio_stream = dce110_enable_audio_stream,
+	.disable_audio_stream = dce110_disable_audio_stream,
+	.disable_plane = dcn20_disable_plane,
+	.pipe_control_lock = dcn20_pipe_control_lock,
+	.pipe_control_lock_global = dcn20_pipe_control_lock_global,
+	.prepare_bandwidth = dcn20_prepare_bandwidth,
+	.optimize_bandwidth = dcn20_optimize_bandwidth,
+	.update_bandwidth = dcn20_update_bandwidth,
+	.set_drr = dcn10_set_drr,
+	.get_position = dcn10_get_position,
+	.set_static_screen_control = dcn10_set_static_screen_control,
+	.setup_stereo = dcn10_setup_stereo,
+	.set_avmute = dce110_set_avmute,
+	.log_hw_state = dcn10_log_hw_state,
+	.get_hw_state = dcn10_get_hw_state,
+	.clear_status_bits = dcn10_clear_status_bits,
+	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+	.edp_power_control = dce110_edp_power_control,
+	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+	.set_cursor_position = dcn10_set_cursor_position,
+	.set_cursor_attribute = dcn10_set_cursor_attribute,
+	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+	.set_clock = dcn10_set_clock,
+	.get_clock = dcn10_get_clock,
+	.program_triplebuffer = dcn20_program_triple_buffer,
+	.enable_writeback = dcn20_enable_writeback,
+	.disable_writeback = dcn20_disable_writeback,
+	.dmdata_status_done = dcn20_dmdata_status_done,
+	.program_dmdata_engine = dcn20_program_dmdata_engine,
+	.set_dmdata_attributes = dcn20_set_dmdata_attributes,
+	.init_sys_ctx = dcn20_init_sys_ctx,
+	.init_vm_ctx = dcn20_init_vm_ctx,
+	.set_flip_control_gsl = dcn20_set_flip_control_gsl,
+	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+};
+
+static const struct hwseq_private_funcs dcn20_private_funcs = {
+	.init_pipes = dcn10_init_pipes,
+	.update_plane_addr = dcn20_update_plane_addr,
+	.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+	.update_mpcc = dcn20_update_mpcc,
+	.set_input_transfer_func = dcn20_set_input_transfer_func,
+	.set_output_transfer_func = dcn20_set_output_transfer_func,
+	.power_down = dce110_power_down,
+	.enable_display_power_gating = dcn10_dummy_display_power_gating,
+	.blank_pixel_data = dcn20_blank_pixel_data,
+	.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+	.enable_stream_timing = dcn20_enable_stream_timing,
+	.edp_backlight_control = dce110_edp_backlight_control,
+	.disable_stream_gating = dcn20_disable_stream_gating,
+	.enable_stream_gating = dcn20_enable_stream_gating,
+	.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+	.did_underflow_occur = dcn10_did_underflow_occur,
+	.init_blank = dcn20_init_blank,
+	.disable_vga = dcn20_disable_vga,
+	.bios_golden_init = dcn10_bios_golden_init,
+	.plane_atomic_disable = dcn20_plane_atomic_disable,
+	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
+	.enable_power_gating_plane = dcn20_enable_power_gating_plane,
+	.dpp_pg_control = dcn20_dpp_pg_control,
+	.hubp_pg_control = dcn20_hubp_pg_control,
+	.dsc_pg_control = NULL,
+	.update_odm = dcn20_update_odm,
+	.dsc_pg_control = dcn20_dsc_pg_control,
+	.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+	.get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+	.set_hdr_multiplier = dcn10_set_hdr_multiplier,
+	.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+	.wait_for_blank_complete = dcn20_wait_for_blank_complete,
+	.dccg_init = dcn20_dccg_init,
+	.set_blend_lut = dcn20_set_blend_lut,
+	.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn20_hw_sequencer_construct(struct dc *dc)
+{
+	dc->hwss = dcn20_funcs;
+	dc->hwseq->funcs = dcn20_private_funcs;
+
+	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+		dc->hwss.init_hw = dcn20_fpga_init_hw;
+		dc->hwseq->funcs.init_pipes = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
new file mode 100644
index 0000000..12277797
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN20_INIT_H__
+#define __DC_DCN20_INIT_H__
+
+struct dc;
+
+void dcn20_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index e476f27..e4ac730 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -168,10 +168,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = {
 void enc2_fec_set_enable(struct link_encoder *enc, bool enable)
 {
 	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	DC_LOG_DSC("%s FEC at link encoder inst %d",
 			enable ? "Enabling" : "Disabling", enc->id.enum_id);
-#endif
 	REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable);
 }
 
@@ -192,7 +190,6 @@ bool enc2_fec_is_active(struct link_encoder *enc)
 	return (active != 0);
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 /* this function reads dsc related register fields to be logged later in dcn10_log_hw_state
  * into a dcn_dsc_state struct.
  */
@@ -203,8 +200,8 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s)
 	REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en);
 	REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow);
 	REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status);
+	REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete);
 }
-#endif
 
 static bool update_cfg_data(
 		struct dcn10_link_encoder *enc10,
@@ -315,9 +312,7 @@ void enc2_hw_init(struct link_encoder *enc)
 }
 
 static const struct link_encoder_funcs dcn20_link_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	.read_state = link_enc2_read_state,
-#endif
 	.validate_output_with_stream =
 		dcn10_link_encoder_validate_output_with_stream,
 	.hw_init = enc2_hw_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 0c98a0bb..8cab810 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -33,7 +33,142 @@
 	SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id)
 
 #define UNIPHY_MASK_SH_LIST(mask_sh)\
-	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh)
+	LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\
+	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\
+	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh)
+
+#define DPCS_MASK_SH_LIST(mask_sh)\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+	LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\
+	LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\
+	LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\
+	LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
+	LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
+	LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
+	LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+	LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
+
+#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
+	DPCS_MASK_SH_LIST(mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
 
 #define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
 	LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
@@ -63,6 +198,49 @@
 	SRI(CLOCK_ENABLE, SYMCLK, id), \
 	SRI(CHANNEL_XBAR_CNTL, UNIPHY, id)
 
+#define DPCS_DCN2_CMN_REG_LIST(id) \
+	SRI(DIG_LANE_ENABLE, DIG, id), \
+	SRI(TMDS_CTL_BITS, DIG, id), \
+	SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+	SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+	SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+	SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+	SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \
+	SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+	SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+	SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+	SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
+	SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+	SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
+	SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
+	SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+
+#define DPCS_DCN2_REG_LIST(id) \
+	DPCS_DCN2_CMN_REG_LIST(id), \
+	SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+	SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define LE_DCN2_REG_LIST(id) \
+		LE_DCN10_REG_LIST(id), \
+		SR(DCIO_SOFT_RESET)
+
 struct mpll_cfg {
 	uint32_t mpllb_ana_v2i;
 	uint32_t mpllb_ana_freq_vco;
@@ -158,9 +336,7 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready);
 bool enc2_fec_is_active(struct link_encoder *enc);
 void enc2_hw_init(struct link_encoder *enc);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s);
-#endif
 
 void dcn20_link_encoder_enable_dp_output(
 	struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 5a188b2..de9c857 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -33,6 +33,9 @@
 #define REG(reg)\
 	mpc20->mpc_regs->reg
 
+#define IND_REG(index) \
+	(index)
+
 #define CTX \
 	mpc20->base.ctx
 
@@ -132,19 +135,33 @@ void mpc2_set_output_csc(
 		const uint16_t *regval,
 		enum mpc_output_csc_mode ocsc_mode)
 {
+	uint32_t cur_mode;
 	struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
 	struct color_matrices_reg ocsc_regs;
 
-	REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
-
-	if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+	if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+		REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
 		return;
+	}
 
 	if (regval == NULL) {
 		BREAK_TO_DEBUGGER();
 		return;
 	}
 
+	/* determine which CSC coefficients (A or B) we are using
+	 * currently.  select the alternate set to double buffer
+	 * the CSC update so CSC is updated on frame boundary
+	 */
+	IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+						MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+						MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+	if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+		ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+	else
+		ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
 	ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
 	ocsc_regs.masks.csc_c11  = mpc20->mpc_mask->MPC_OCSC_C11_A;
 	ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -157,10 +174,13 @@ void mpc2_set_output_csc(
 		ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
 		ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
 	}
+
 	cm_helper_program_color_matrices(
 			mpc20->base.ctx,
 			regval,
 			&ocsc_regs);
+
+	REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
 }
 
 void mpc2_set_ocsc_default(
@@ -169,14 +189,16 @@ void mpc2_set_ocsc_default(
 		enum dc_color_space color_space,
 		enum mpc_output_csc_mode ocsc_mode)
 {
+	uint32_t cur_mode;
 	struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
 	uint32_t arr_size;
 	struct color_matrices_reg ocsc_regs;
 	const uint16_t *regval = NULL;
 
-	REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
-	if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+	if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+		REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
 		return;
+	}
 
 	regval = find_color_matrix(color_space, &arr_size);
 
@@ -185,6 +207,19 @@ void mpc2_set_ocsc_default(
 		return;
 	}
 
+	/* determine which CSC coefficients (A or B) we are using
+	 * currently.  select the alternate set to double buffer
+	 * the CSC update so CSC is updated on frame boundary
+	 */
+	IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+						MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+						MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+	if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+		ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+	else
+		ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
 	ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
 	ocsc_regs.masks.csc_c11  = mpc20->mpc_mask->MPC_OCSC_C11_A;
 	ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -203,6 +238,8 @@ void mpc2_set_ocsc_default(
 			mpc20->base.ctx,
 			regval,
 			&ocsc_regs);
+
+	REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
 }
 
 static void mpc2_ogam_get_reg_field(
@@ -345,6 +382,9 @@ static void mpc20_program_ogam_pwl(
 	uint32_t i;
 	struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
 
+	PERF_TRACE();
+	REG_SEQ_START();
+
 	for (i = 0 ; i < num; i++) {
 		REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
 		REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
@@ -463,6 +503,11 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
 		ASSERT(!mpc_disabled);
 		ASSERT(!mpc_idle);
 	}
+
+	REG_SEQ_SUBMIT();
+	PERF_TRACE();
+	REG_SEQ_WAIT_DONE();
+	PERF_TRACE();
 }
 
 static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
index 9f53192..c78fd51 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -80,6 +80,10 @@
 	SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\
 	SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst)
 
+#define MPC_DBG_REG_LIST_DCN2_0() \
+	SR(MPC_OCSC_TEST_DEBUG_DATA),\
+	SR(MPC_OCSC_TEST_DEBUG_INDEX)
+
 #define MPC_REG_VARIABLE_LIST_DCN2_0 \
 	MPC_COMMON_REG_VARIABLE_LIST \
 	uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \
@@ -118,6 +122,8 @@
 	uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\
 	uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\
 	uint32_t MPCC_OGAM_MODE[MAX_MPCC];\
+	uint32_t MPC_OCSC_TEST_DEBUG_DATA;\
+	uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\
 	uint32_t CSC_MODE[MAX_OPP]; \
 	uint32_t CSC_C11_C12_A[MAX_OPP]; \
 	uint32_t CSC_C33_C34_A[MAX_OPP]; \
@@ -134,6 +140,7 @@
 	SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
 	SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
 	SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+	SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\
 	SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
 	SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
 	SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
@@ -174,6 +181,19 @@
 	SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
 	SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
 
+/*
+ *	DCN2 MPC_OCSC debug status register:
+ *
+ *		Status index including current OCSC Mode is 1
+ *			OCSC Mode: [1..0]
+ */
+#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1
+
+#define MPC_DEBUG_REG_LIST_SH_DCN20 \
+	.MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0
+
+#define MPC_DEBUG_REG_LIST_MASK_DCN20 \
+	.MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3
 
 #define MPC_REG_FIELD_LIST_DCN2_0(type) \
 	MPC_REG_FIELD_LIST(type)\
@@ -182,6 +202,8 @@
 	type MPCC_TOP_GAIN;\
 	type MPCC_BOT_GAIN_INSIDE;\
 	type MPCC_BOT_GAIN_OUTSIDE;\
+	type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\
+	type MPC_OCSC_TEST_DEBUG_INDEX;\
 	type MPC_OCSC_MODE;\
 	type MPC_OCSC_C11_A;\
 	type MPC_OCSC_C12_A;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 40164ed..023cc71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -41,6 +41,7 @@
 void opp2_set_disp_pattern_generator(
 		struct output_pixel_processor *opp,
 		enum controller_dp_test_pattern test_pattern,
+		enum controller_dp_color_space color_space,
 		enum dc_color_depth color_depth,
 		const struct tg_color *solid_color,
 		int width,
@@ -100,9 +101,22 @@ void opp2_set_disp_pattern_generator(
 				TEST_PATTERN_DYN_RANGE_CEA :
 				TEST_PATTERN_DYN_RANGE_VESA);
 
+		switch (color_space) {
+		case CONTROLLER_DP_COLOR_SPACE_YCBCR601:
+			mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601;
+		break;
+		case CONTROLLER_DP_COLOR_SPACE_YCBCR709:
+			mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709;
+		break;
+		case CONTROLLER_DP_COLOR_SPACE_RGB:
+		default:
+			mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+		break;
+		}
+
 		REG_UPDATE_6(DPG_CONTROL,
 			DPG_EN, 1,
-			DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB,
+			DPG_MODE, mode,
 			DPG_DYNAMIC_RANGE, dyn_range,
 			DPG_BIT_DEPTH, bit_depth,
 			DPG_VRES, 6,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index abd8de9..4093bec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -140,6 +140,7 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20,
 void opp2_set_disp_pattern_generator(
 	struct output_pixel_processor *opp,
 	enum controller_dp_test_pattern test_pattern,
+	enum controller_dp_color_space color_space,
 	enum dc_color_depth color_depth,
 	const struct tg_color *solid_color,
 	int width,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 3b613fb..d875b0c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc)
 	REG_UPDATE(CONTROL,
 			VTG0_ENABLE, 1);
 
+	REG_SEQ_START();
+
 	/* Enable CRTC */
 	REG_UPDATE_2(OTG_CONTROL,
 			OTG_DISABLE_POINT_CNTL, 3,
 			OTG_MASTER_EN, 1);
 
+	REG_SEQ_SUBMIT();
+	REG_SEQ_WAIT_DONE();
+
 	return true;
 }
 
@@ -167,7 +172,6 @@ void optc2_set_gsl_source_select(
 	}
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 /* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */
 void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc,
 					int x_position,
@@ -201,13 +205,12 @@ void optc2_set_dsc_config(struct timing_generator *optc,
 	REG_UPDATE(OPTC_WIDTH_CONTROL,
 		OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
 }
-#endif
 
-/**
- * PTI i think is already done somewhere else for 2ka
- * (opp?, please double check.
- * OPTC side only has 1 register to set for PTI_ENABLE)
- */
+/*TEMP: Need to figure out inheritance model here.*/
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+	return optc1_is_two_pixels_per_containter(timing);
+}
 
 void optc2_set_odm_bypass(struct timing_generator *optc,
 		const struct dc_crtc_timing *dc_crtc_timing)
@@ -221,7 +224,7 @@ void optc2_set_odm_bypass(struct timing_generator *optc,
 			OPTC_SEG1_SRC_SEL, 0xf);
 	REG_WRITE(OTG_H_TIMING_CNTL, 0);
 
-	h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing);
+	h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing);
 	REG_UPDATE(OTG_H_TIMING_CNTL,
 			OTG_H_TIMING_DIV_BY2, h_div_2);
 	REG_SET(OPTC_MEMORY_CONFIG, 0,
@@ -233,12 +236,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
 		struct dc_crtc_timing *timing)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
-	/* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */
 	int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
 			/ opp_cnt;
-	int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf;
+	uint32_t memory_mask;
 	uint32_t data_fmt = 0;
 
+	ASSERT(opp_cnt == 2);
+
 	/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
 	 * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
 	 * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
@@ -246,9 +250,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
 	 *		MASTER_UPDATE_LOCK_DB_X, 160,
 	 *		MASTER_UPDATE_LOCK_DB_Y, 240);
 	 */
+
+	/* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
+	 * however, for ODM combine we can simplify by always using 4.
+	 * To make sure there's no overlap, each instance "reserves" 2 memories and
+	 * they are uniquely combined here.
+	 */
+	memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
+
 	if (REG(OPTC_MEMORY_CONFIG))
 		REG_SET(OPTC_MEMORY_CONFIG, 0,
-			OPTC_MEM_SEL, memory_mask << (optc->inst * 4));
+			OPTC_MEM_SEL, memory_mask);
 
 	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
 		data_fmt = 1;
@@ -257,7 +269,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
 
 	REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
 
-	ASSERT(opp_cnt == 2);
 	REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
 			OPTC_NUM_OF_INPUT_SEGMENT, 1,
 			OPTC_SEG0_SRC_SEL, opp_id[0],
@@ -379,14 +390,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
-	REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
-			MANUAL_FLOW_CONTROL, 1);
-
-	REG_SET(OTG_GLOBAL_CONTROL2, 0,
-			MANUAL_FLOW_CONTROL_SEL, optc->inst);
-
 	REG_SET_8(OTG_TRIGA_CNTL, 0,
-			OTG_TRIGA_SOURCE_SELECT, 22,
+			OTG_TRIGA_SOURCE_SELECT, 21,
 			OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
 			OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
 			OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
@@ -448,9 +453,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
 		.setup_global_swap_lock = NULL,
 		.get_crc = optc1_get_crc,
 		.configure_crc = optc1_configure_crc,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		.set_dsc_config = optc2_set_dsc_config,
-#endif
 		.set_dwb_source = optc2_set_dwb_source,
 		.set_odm_bypass = optc2_set_odm_bypass,
 		.set_odm_combine = optc2_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index 32a5843..239cc40 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -86,12 +86,10 @@ void optc2_set_gsl_source_select(struct timing_generator *optc,
 		int group_idx,
 		uint32_t gsl_ready_signal);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 void optc2_set_dsc_config(struct timing_generator *optc,
 					enum optc_dsc_mode dsc_mode,
 					uint32_t dsc_bytes_per_pixel,
 					uint32_t dsc_slice_width);
-#endif
 
 void optc2_set_odm_bypass(struct timing_generator *optc,
 		const struct dc_crtc_timing *dc_crtc_timing);
@@ -108,6 +106,7 @@ void optc2_triplebuffer_lock(struct timing_generator *optc);
 void optc2_triplebuffer_unlock(struct timing_generator *optc);
 void optc2_lock_doublebuffer_disable(struct timing_generator *optc);
 void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
+void optc2_setup_manual_trigger(struct timing_generator *optc);
 void optc2_program_manual_trigger(struct timing_generator *optc);
-
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
 #endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 23ff2f1c..85f90f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1,5 +1,6 @@
 /*
 * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -28,6 +29,8 @@
 #include "dm_services.h"
 #include "dc.h"
 
+#include "dcn20_init.h"
+
 #include "resource.h"
 #include "include/irq_service_interface.h"
 #include "dcn20/dcn20_resource.h"
@@ -45,9 +48,7 @@
 #include "dcn10/dcn10_resource.h"
 #include "dcn20_opp.h"
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #include "dcn20_dsc.h"
-#endif
 
 #include "dcn20_link_encoder.h"
 #include "dcn20_stream_encoder.h"
@@ -59,11 +60,14 @@
 #include "dml/display_mode_vba.h"
 #include "dcn20_dccg.h"
 #include "dcn20_vmid.h"
+#include "dc_link_ddc.h"
 
 #include "navi10_ip_offset.h"
 
 #include "dcn/dcn_2_0_0_offset.h"
 #include "dcn/dcn_2_0_0_sh_mask.h"
+#include "dpcs/dpcs_2_0_0_offset.h"
+#include "dpcs/dpcs_2_0_0_sh_mask.h"
 
 #include "nbio/nbio_2_3_offset.h"
 
@@ -82,8 +86,6 @@
 
 #include "amdgpu_socbb.h"
 
-/* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */
-#define SOC_BOUNDING_BOX_VALID false
 #define DC_LOGGER_INIT(logger)
 
 struct _vcs_dpi_ip_params_st dcn2_0_ip = {
@@ -94,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
 	.hostvm_max_page_table_levels = 4,
 	.hostvm_cached_page_table_levels = 0,
 	.pte_group_size_bytes = 2048,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	.num_dsc = 6,
-#else
-	.num_dsc = 0,
-#endif
 	.rob_buffer_size_kbytes = 168,
 	.det_buffer_size_kbytes = 164,
 	.dpte_buffer_size_in_pte_reqs_luma = 84,
@@ -553,6 +551,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
 [id] = {\
 	LE_DCN10_REG_LIST(id), \
 	UNIPHY_DCN2_REG_LIST(phyid), \
+	DPCS_DCN2_REG_LIST(id), \
 	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
 }
 
@@ -566,11 +565,13 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
 };
 
 static const struct dcn10_link_enc_shift le_shift = {
-	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+	DPCS_DCN2_MASK_SH_LIST(__SHIFT)
 };
 
 static const struct dcn10_link_enc_mask le_mask = {
-	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+	DPCS_DCN2_MASK_SH_LIST(_MASK)
 };
 
 #define ipp_regs(id)\
@@ -637,6 +638,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
 #define tf_regs(id)\
 [id] = {\
 	TF_REG_LIST_DCN20(id),\
+	TF_REG_LIST_DCN20_COMMON_APPEND(id),\
 }
 
 static const struct dcn2_dpp_registers tf_regs[] = {
@@ -650,12 +652,12 @@ static const struct dcn2_dpp_registers tf_regs[] = {
 
 static const struct dcn2_dpp_shift tf_shift = {
 		TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
-		TF_DEBUG_REG_LIST_SH_DCN10
+		TF_DEBUG_REG_LIST_SH_DCN20
 };
 
 static const struct dcn2_dpp_mask tf_mask = {
 		TF_REG_LIST_SH_MASK_DCN20(_MASK),
-		TF_DEBUG_REG_LIST_MASK_DCN10
+		TF_DEBUG_REG_LIST_MASK_DCN20
 };
 
 #define dwbc_regs_dcn2(id)\
@@ -705,14 +707,17 @@ static const struct dcn20_mpc_registers mpc_regs = {
 		MPC_OUT_MUX_REG_LIST_DCN2_0(3),
 		MPC_OUT_MUX_REG_LIST_DCN2_0(4),
 		MPC_OUT_MUX_REG_LIST_DCN2_0(5),
+		MPC_DBG_REG_LIST_DCN2_0()
 };
 
 static const struct dcn20_mpc_shift mpc_shift = {
-	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+	MPC_DEBUG_REG_LIST_SH_DCN20
 };
 
 static const struct dcn20_mpc_mask mpc_mask = {
-	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+	MPC_DEBUG_REG_LIST_MASK_DCN20
 };
 
 #define tg_regs(id)\
@@ -838,7 +843,6 @@ static int map_transmitter_id_to_phy_instance(
 	}
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #define dsc_regsDCN20(id)\
 [id] = {\
 	DSC_REG_LIST_DCN20(id)\
@@ -860,7 +864,6 @@ static const struct dcn20_dsc_shift dsc_shift = {
 static const struct dcn20_dsc_mask dsc_mask = {
 	DSC_REG_LIST_SH_MASK_DCN20(_MASK)
 };
-#endif
 
 static const struct dccg_registers dccg_regs = {
 		DCCG_REG_LIST_DCN2()
@@ -884,9 +887,7 @@ static const struct resource_caps res_cap_nv10 = {
 		.num_dwb = 1,
 		.num_ddc = 6,
 		.num_vmid = 16,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		.num_dsc = 6,
-#endif
 };
 
 static const struct dc_plane_cap plane_cap = {
@@ -923,9 +924,7 @@ static const struct resource_caps res_cap_nv14 = {
 		.num_dwb = 1,
 		.num_ddc = 5,
 		.num_vmid = 16,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		.num_dsc = 5,
-#endif
 };
 
 static const struct dc_debug_options debug_defaults_drv = {
@@ -1284,7 +1283,6 @@ void dcn20_clock_source_destroy(struct clock_source **clk_src)
 	*clk_src = NULL;
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 
 struct display_stream_compressor *dcn20_dsc_create(
 	struct dc_context *ctx, uint32_t inst)
@@ -1307,9 +1305,8 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
 	*dsc = NULL;
 }
 
-#endif
 
-static void destruct(struct dcn20_resource_pool *pool)
+static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
 {
 	unsigned int i;
 
@@ -1320,12 +1317,10 @@ static void destruct(struct dcn20_resource_pool *pool)
 		}
 	}
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
 		if (pool->base.dscs[i] != NULL)
 			dcn20_dsc_destroy(&pool->base.dscs[i]);
 	}
-#endif
 
 	if (pool->base.mpc != NULL) {
 		kfree(TO_DCN20_MPC(pool->base.mpc));
@@ -1418,6 +1413,8 @@ static void destruct(struct dcn20_resource_pool *pool)
 	if (pool->base.pp_smu != NULL)
 		dcn20_pp_smu_destroy(&pool->base.pp_smu);
 
+	if (pool->base.oem_device != NULL)
+		dal_ddc_service_destroy(&pool->base.oem_device);
 }
 
 struct hubp *dcn20_hubp_create(
@@ -1468,7 +1465,7 @@ static void get_pixel_clock_parameters(
 
 	if (opp_cnt == 4)
 		pixel_clk_params->requested_pix_clk_100hz /= 4;
-	else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
+	else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
 		pixel_clk_params->requested_pix_clk_100hz /= 2;
 
 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -1534,7 +1531,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
 	return status;
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 
 static void acquire_dsc(struct resource_context *res_ctx,
 			const struct resource_pool *pool,
@@ -1575,11 +1571,9 @@ static void release_dsc(struct resource_context *res_ctx,
 		}
 }
 
-#endif
 
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
 		struct dc_state *dc_ctx,
 		struct dc_stream_state *dc_stream)
 {
@@ -1594,11 +1588,13 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
 		if (pipe_ctx->stream != dc_stream)
 			continue;
 
+		if (pipe_ctx->stream_res.dsc)
+			continue;
+
 		acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
 
 		/* The number of DSCs can be less than the number of pipes */
 		if (!pipe_ctx->stream_res.dsc) {
-			dm_output_to_console("No DSCs available\n");
 			result = DC_NO_DSC_RESOURCE;
 		}
 
@@ -1630,7 +1626,6 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
 	else
 		return DC_OK;
 }
-#endif
 
 
 enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
@@ -1642,11 +1637,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,
 	if (result == DC_OK)
 		result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	/* Get a DSC if required and available */
 	if (result == DC_OK && dc_stream->timing.flags.DSC)
-		result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
-#endif
+		result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
 
 	if (result == DC_OK)
 		result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
@@ -1659,9 +1652,7 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_
 {
 	enum dc_status result = DC_OK;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
-#endif
 
 	return result;
 }
@@ -1744,9 +1735,7 @@ bool dcn20_split_stream_for_odm(
 	next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
 	next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
 	next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	next_odm_pipe->stream_res.dsc = NULL;
-#endif
 	if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
 		next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
 		next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
@@ -1792,14 +1781,12 @@ bool dcn20_split_stream_for_odm(
 		sd->recout.x = 0;
 	}
 	next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	if (next_odm_pipe->stream->timing.flags.DSC == 1) {
 		acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
 		ASSERT(next_odm_pipe->stream_res.dsc);
 		if (next_odm_pipe->stream_res.dsc == NULL)
 			return false;
 	}
-#endif
 
 	return true;
 }
@@ -1823,9 +1810,7 @@ void dcn20_split_stream_for_mpc(
 	secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
 	secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
 	secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	secondary_pipe->stream_res.dsc = NULL;
-#endif
 	if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
 		ASSERT(!secondary_pipe->bottom_pipe);
 		secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
@@ -1876,11 +1861,28 @@ void dcn20_populate_dml_writeback_from_context(
 
 }
 
+static int get_num_odm_heads(struct pipe_ctx *pipe)
+{
+	int odm_head_count = 0;
+	struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+	while (next_pipe) {
+		odm_head_count++;
+		next_pipe = next_pipe->next_odm_pipe;
+	}
+	pipe = pipe->prev_odm_pipe;
+	while (pipe) {
+		odm_head_count++;
+		pipe = pipe->prev_odm_pipe;
+	}
+	return odm_head_count ? odm_head_count + 1 : 0;
+}
+
 int dcn20_populate_dml_pipes_from_context(
-		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
 {
 	int pipe_cnt, i;
 	bool synchronized_vblank = true;
+	struct resource_context *res_ctx = &context->res_ctx;
 
 	for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
 		if (!res_ctx->pipe_ctx[i].stream)
@@ -1900,25 +1902,30 @@ int dcn20_populate_dml_pipes_from_context(
 
 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
 		struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
+		unsigned int v_total;
+		unsigned int front_porch;
 		int output_bpc;
 
 		if (!res_ctx->pipe_ctx[i].stream)
 			continue;
+
+		v_total = timing->v_total;
+		front_porch = timing->v_front_porch;
 		/* todo:
 		pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
 		pipes[pipe_cnt].pipe.src.dcc = 0;
 		pipes[pipe_cnt].pipe.src.vm = 0;*/
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+		pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+
 		pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
 		/* todo: rotation?*/
 		pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
-#endif
 		if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
 			pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
 			/* 1/2 vblank */
 			pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
-				(timing->v_total - timing->v_addressable
+				(v_total - timing->v_addressable
 					- timing->v_border_top - timing->v_border_bottom) / 2;
 			/* 36 bytes dp, 32 hdmi */
 			pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
@@ -1932,13 +1939,13 @@ int dcn20_populate_dml_pipes_from_context(
 				- timing->h_addressable
 				- timing->h_border_left
 				- timing->h_border_right;
-		pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
+		pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch;
 		pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
 				- timing->v_addressable
 				- timing->v_border_top
 				- timing->v_border_bottom;
 		pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
-		pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+		pipes[pipe_cnt].pipe.dest.vtotal = v_total;
 		pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
 		pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
 		pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
@@ -1949,8 +1956,13 @@ int dcn20_populate_dml_pipes_from_context(
 		pipes[pipe_cnt].dout.dp_lanes = 4;
 		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
 		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
-		pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe
-							|| res_ctx->pipe_ctx[i].next_odm_pipe;
+		switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
+		case 2:
+			pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
+			break;
+		default:
+			pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled;
+		}
 		pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
 		if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
 				== res_ctx->pipe_ctx[i].plane_state)
@@ -2001,14 +2013,12 @@ int dcn20_populate_dml_pipes_from_context(
 		case COLOR_DEPTH_161616:
 			output_bpc = 16;
 			break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 		case COLOR_DEPTH_999:
 			output_bpc = 9;
 			break;
 		case COLOR_DEPTH_111111:
 			output_bpc = 11;
 			break;
-#endif
 		default:
 			output_bpc = 8;
 			break;
@@ -2036,10 +2046,8 @@ int dcn20_populate_dml_pipes_from_context(
 			pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
 		}
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
 			pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
-#endif
 
 		/* todo: default max for now, until there is logic reflecting this in dc*/
 		pipes[pipe_cnt].dout.output_bpc = 12;
@@ -2063,6 +2071,10 @@ int dcn20_populate_dml_pipes_from_context(
 			pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
 			if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
 				pipes[pipe_cnt].pipe.src.viewport_height = 1080;
+			pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height;
+			pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
+			pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
+			pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
 			pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
 			pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
 			pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
@@ -2077,8 +2089,8 @@ int dcn20_populate_dml_pipes_from_context(
 			pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
 			pipes[pipe_cnt].pipe.src.is_hsplit = 0;
 			pipes[pipe_cnt].pipe.dest.odm_combine = 0;
-			pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
-			pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
+			pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
+			pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
 		} else {
 			struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
 			struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
@@ -2096,6 +2108,10 @@ int dcn20_populate_dml_pipes_from_context(
 			pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
 			pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
 			pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+			pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
+			pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+			pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+			pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
 			if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
 				pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
 				pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
@@ -2261,7 +2277,6 @@ void dcn20_set_mcif_arb_params(
 	}
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
 {
 	int i;
@@ -2295,7 +2310,6 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
 	}
 	return true;
 }
-#endif
 
 struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
 		struct resource_context *res_ctx,
@@ -2398,10 +2412,8 @@ void dcn20_merge_pipes_for_validate(
 			odm_pipe->bottom_pipe = NULL;
 			odm_pipe->prev_odm_pipe = NULL;
 			odm_pipe->next_odm_pipe = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 			if (odm_pipe->stream_res.dsc)
 				release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
-#endif
 			/* Clear plane_res and stream_res */
 			memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
 			memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
@@ -2513,7 +2525,7 @@ int dcn20_validate_apply_pipe_split_flags(
 			split[i] = true;
 		if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
 			split[i] = true;
-			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
+			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
 		}
 		context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
 			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
@@ -2544,7 +2556,7 @@ bool dcn20_fast_validate_bw(
 
 	dcn20_merge_pipes_for_validate(dc, context);
 
-	pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes);
+	pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes);
 
 	*pipe_cnt_out = pipe_cnt;
 
@@ -2621,14 +2633,12 @@ bool dcn20_fast_validate_bw(
 			ASSERT(0);
 		}
 	}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	/* Actual dsc count per stream dsc validation*/
 	if (!dcn20_validate_dsc(dc, context)) {
 		context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
 				DML_FAIL_DSC_VALIDATION_FAILURE;
 		goto validate_fail;
 	}
-#endif
 
 	*vlevel_out = vlevel;
 
@@ -2692,10 +2702,10 @@ static void dcn20_calculate_wm(
 	if (pipe_cnt != pipe_idx) {
 		if (dc->res_pool->funcs->populate_dml_pipes)
 			pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
-				&context->res_ctx, pipes);
+				context, pipes);
 		else
 			pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
-				&context->res_ctx, pipes);
+				context, pipes);
 	}
 
 	*out_pipe_cnt = pipe_cnt;
@@ -2715,11 +2725,9 @@ static void dcn20_calculate_wm(
 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
 
 	if (vlevel < 2) {
 		pipes[0].clks_cfg.voltage = 2;
@@ -2731,10 +2739,8 @@ static void dcn20_calculate_wm(
 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
 
 	if (vlevel < 3) {
 		pipes[0].clks_cfg.voltage = 3;
@@ -2746,10 +2752,8 @@ static void dcn20_calculate_wm(
 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
 
 	pipes[0].clks_cfg.voltage = vlevel;
 	pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
@@ -2759,10 +2763,8 @@ static void dcn20_calculate_wm(
 	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 	context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
 }
 
 void dcn20_calculate_dlg_params(
@@ -2928,11 +2930,19 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 	bool voltage_supported = false;
 	bool full_pstate_supported = false;
 	bool dummy_pstate_supported = false;
-	double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+	double p_state_latency_us;
 
-	if (fast_validate)
-		return dcn20_validate_bandwidth_internal(dc, context, true);
+	DC_FP_START();
+	p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+	context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
+		dc->debug.disable_dram_clock_change_vactive_support;
 
+	if (fast_validate) {
+		voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
+
+		DC_FP_END();
+		return voltage_supported;
+	}
 
 	// Best case, we support full UCLK switch latency
 	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
@@ -2940,7 +2950,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 
 	if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
 		(voltage_supported && full_pstate_supported)) {
-		context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+		context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
 		goto restore_dml_state;
 	}
 
@@ -2961,6 +2971,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 restore_dml_state:
 	context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
 
+	DC_FP_END();
 	return voltage_supported;
 }
 
@@ -3005,7 +3016,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
 
-	destruct(dcn20_pool);
+	dcn20_resource_destruct(dcn20_pool);
 	kfree(dcn20_pool);
 	*pool = NULL;
 }
@@ -3252,7 +3263,6 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
 
 void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
 {
-	kernel_fpu_begin();
 	if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
 			&& dc->bb_overrides.sr_exit_time_ns) {
 		bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
@@ -3276,7 +3286,6 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
 		bb->dram_clock_change_latency_us =
 				dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
 	}
-	kernel_fpu_end();
 }
 
 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3318,12 +3327,13 @@ static bool init_soc_bounding_box(struct dc *dc,
 
 	DC_LOGGER_INIT(dc->ctx->logger);
 
-	if (!bb && !SOC_BOUNDING_BOX_VALID) {
+	/* TODO: upstream NV12 bounding box when its launched */
+	if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
 		DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
 		return false;
 	}
 
-	if (bb && !SOC_BOUNDING_BOX_VALID) {
+	if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
 		int i;
 
 		dcn2_0_nv12_soc.sr_exit_time_us =
@@ -3465,7 +3475,7 @@ static bool init_soc_bounding_box(struct dc *dc,
 	return true;
 }
 
-static bool construct(
+static bool dcn20_resource_construct(
 	uint8_t num_virtual_links,
 	struct dc *dc,
 	struct dcn20_resource_pool *pool)
@@ -3473,6 +3483,7 @@ static bool construct(
 	int i;
 	struct dc_context *ctx = dc->ctx;
 	struct irq_service_init_data init_data;
+	struct ddc_service_init_data ddc_init_data;
 	struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
 			get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
 	struct _vcs_dpi_ip_params_st *loaded_ip =
@@ -3480,6 +3491,8 @@ static bool construct(
 	enum dml_project dml_project_version =
 			get_dml_project_version(ctx->asic_id.hw_internal_rev);
 
+	DC_FP_START();
+
 	ctx->dc_bios->regs = &bios_regs;
 	pool->base.funcs = &dcn20_res_pool_funcs;
 
@@ -3732,7 +3745,6 @@ static bool construct(
 		goto create_fail;
 	}
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
 		pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
 		if (pool->base.dscs[i] == NULL) {
@@ -3741,7 +3753,6 @@ static bool construct(
 			goto create_fail;
 		}
 	}
-#endif
 
 	if (!dcn20_dwbc_create(ctx, &pool->base)) {
 		BREAK_TO_DEBUGGER();
@@ -3768,11 +3779,24 @@ static bool construct(
 
 	dc->cap_funcs = cap_funcs;
 
+	if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+		ddc_init_data.ctx = dc->ctx;
+		ddc_init_data.link = NULL;
+		ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+		ddc_init_data.id.enum_id = 0;
+		ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+		pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+	} else {
+		pool->base.oem_device = NULL;
+	}
+
+	DC_FP_END();
 	return true;
 
 create_fail:
 
-	destruct(pool);
+	DC_FP_END();
+	dcn20_resource_destruct(pool);
 
 	return false;
 }
@@ -3787,7 +3811,7 @@ struct resource_pool *dcn20_create_resource_pool(
 	if (!pool)
 		return NULL;
 
-	if (construct(init_data->num_virtual_links, dc, pool))
+	if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))
 		return &pool->base;
 
 	BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index fef473d..f589384 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -50,7 +50,7 @@ unsigned int dcn20_calc_max_scaled_time(
 		enum mmhubbub_wbif_mode mode,
 		unsigned int urgent_watermark);
 int dcn20_populate_dml_pipes_from_context(
-		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
 struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
 		struct dc_state *state,
 		const struct resource_pool *pool,
@@ -127,9 +127,7 @@ int dcn20_validate_apply_pipe_split_flags(
 		struct dc_state *context,
 		int vlevel,
 		bool *split);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
-#endif
 void dcn20_split_stream_for_mpc(
 		struct resource_context *res_ctx,
 		const struct resource_pool *pool,
@@ -159,6 +157,7 @@ void dcn20_calculate_dlg_params(
 
 enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
 enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
 enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
 enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index fcb3877..9b70a1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -205,7 +205,6 @@ static void enc2_stream_encoder_stop_hdmi_info_packets(
 		HDMI_GENERIC7_LINE, 0);
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 
 /* Update GSP7 SDP 128 byte long */
 static void enc2_update_gsp7_128_info_packet(
@@ -360,7 +359,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
 		REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
 	}
 }
-#endif
 
 /* Set Dynamic Metadata-configuration.
  *   enable_dme:         TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME
@@ -440,10 +438,8 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
 {
 	bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
 			&& !timing->dsc_cfg.ycbcr422_simple);
-#endif
 	return two_pix;
 }
 
@@ -541,11 +537,16 @@ void enc2_stream_encoder_dp_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	enum dc_color_space output_color_space,
+	bool use_vsc_sdp_for_colorimetry,
 	uint32_t enable_sdp_splitting)
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
-	enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting);
+	enc1_stream_encoder_dp_set_stream_attribute(enc,
+			crtc_timing,
+			output_color_space,
+			use_vsc_sdp_for_colorimetry,
+			enable_sdp_splitting);
 
 	REG_UPDATE(DP_SEC_FRAMING4,
 		DP_SST_SDP_SPLITTING, enable_sdp_splitting);
@@ -568,6 +569,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
 		enc2_stream_encoder_stop_hdmi_info_packets,
 	.update_dp_info_packets =
 		enc2_stream_encoder_update_dp_info_packets,
+	.send_immediate_sdp_message =
+		enc1_stream_encoder_send_immediate_sdp_message,
 	.stop_dp_info_packets =
 		enc1_stream_encoder_stop_dp_info_packets,
 	.dp_blank =
@@ -590,11 +593,9 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
 	.dp_get_pixel_format =
 		enc1_stream_encoder_dp_get_pixel_format,
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	.enc_read_state = enc2_read_state,
 	.dp_set_dsc_config = enc2_dp_set_dsc_config,
 	.dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet,
-#endif
 	.set_dynamic_metadata = enc2_set_dynamic_metadata,
 	.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
 };
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index 3f94a9f..d2a805b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -98,6 +98,7 @@ void enc2_stream_encoder_dp_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	enum dc_color_space output_color_space,
+	bool use_vsc_sdp_for_colorimetry,
 	uint32_t enable_sdp_splitting);
 
 void enc2_stream_encoder_dp_unblank(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 5b8c175..07684d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -2,9 +2,16 @@
 #
 # Makefile for DCN21.
 
-DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o
+DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \
+	 dcn21_hwseq.o dcn21_link_encoder.o
 
+ifdef CONFIG_X86
 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
+endif
 
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -12,6 +19,7 @@
 endif
 endif
 
+ifdef CONFIG_X86
 ifdef IS_OLD_GCC
 # Stack alignment mismatch, proceed with caution.
 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -20,6 +28,7 @@
 else
 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
 endif
+endif
 
 AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 2f5a586..cf09b93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -29,6 +29,10 @@
 #include "dm_services.h"
 #include "reg_helper.h"
 
+#include "dc_dmub_srv.h"
+
+#define DC_LOGGER_INIT(logger)
+
 #define REG(reg)\
 	hubp21->hubp_regs->reg
 
@@ -164,6 +168,158 @@ static void hubp21_setup(
 
 }
 
+void hubp21_set_viewport(
+	struct hubp *hubp,
+	const struct rect *viewport,
+	const struct rect *viewport_c)
+{
+	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+	REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
+		  PRI_VIEWPORT_WIDTH, viewport->width,
+		  PRI_VIEWPORT_HEIGHT, viewport->height);
+
+	REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
+		  PRI_VIEWPORT_X_START, viewport->x,
+		  PRI_VIEWPORT_Y_START, viewport->y);
+
+	/*for stereo*/
+	REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
+		  SEC_VIEWPORT_WIDTH, viewport->width,
+		  SEC_VIEWPORT_HEIGHT, viewport->height);
+
+	REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
+		  SEC_VIEWPORT_X_START, viewport->x,
+		  SEC_VIEWPORT_Y_START, viewport->y);
+
+	/* DC supports NV12 only at the moment */
+	REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
+		  PRI_VIEWPORT_WIDTH_C, viewport_c->width,
+		  PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+	REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
+		  PRI_VIEWPORT_X_START_C, viewport_c->x,
+		  PRI_VIEWPORT_Y_START_C, viewport_c->y);
+
+	REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
+		  SEC_VIEWPORT_WIDTH_C, viewport_c->width,
+		  SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+	REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
+		  SEC_VIEWPORT_X_START_C, viewport_c->x,
+		  SEC_VIEWPORT_Y_START_C, viewport_c->y);
+}
+
+static void hubp21_apply_PLAT_54186_wa(
+		struct hubp *hubp,
+		const struct dc_plane_address *address)
+{
+	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+	struct dc_debug_options *debug = &hubp->ctx->dc->debug;
+	unsigned int chroma_bpe = 2;
+	unsigned int luma_addr_high_part = 0;
+	unsigned int row_height = 0;
+	unsigned int chroma_pitch = 0;
+	unsigned int viewport_c_height = 0;
+	unsigned int viewport_c_width = 0;
+	unsigned int patched_viewport_height = 0;
+	unsigned int patched_viewport_width = 0;
+	unsigned int rotation_angle = 0;
+	unsigned int pix_format = 0;
+	unsigned int h_mirror_en = 0;
+	unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */
+
+
+	if (!debug->nv12_iflip_vm_wa)
+		return;
+
+	REG_GET(DCHUBP_REQ_SIZE_CONFIG_C,
+		PTE_ROW_HEIGHT_LINEAR_C, &row_height);
+
+	REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
+			PRI_VIEWPORT_WIDTH_C, &viewport_c_width,
+			PRI_VIEWPORT_HEIGHT_C, &viewport_c_height);
+
+	REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+			PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part);
+
+	REG_GET(DCSURF_SURFACE_PITCH_C,
+			PITCH_C, &chroma_pitch);
+
+	chroma_pitch += 1;
+
+	REG_GET_3(DCSURF_SURFACE_CONFIG,
+			SURFACE_PIXEL_FORMAT, &pix_format,
+			ROTATION_ANGLE, &rotation_angle,
+			H_MIRROR_EN, &h_mirror_en);
+
+	/* reset persistent cached data */
+	hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+	/* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along
+	 * the vertical direction*/
+	if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+			address->video_progressive.luma_addr.high_part == 0xf4)
+		return;
+
+	if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180)
+			&& viewport_c_height <= 512)
+		return;
+
+	if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270)
+				&& viewport_c_width <= 512)
+		return;
+
+	switch (rotation_angle) {
+	case ROTATION_ANGLE_0: /* 0 degree rotation */
+		row_height = 128;
+		patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1;
+		patched_viewport_width = viewport_c_width;
+		hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+		break;
+	case ROTATION_ANGLE_180: /* 180 degree rotation */
+		row_height = 128;
+		patched_viewport_height = viewport_c_height + row_height;
+		patched_viewport_width = viewport_c_width;
+		hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe;
+		break;
+	case ROTATION_ANGLE_90: /* 90 degree rotation */
+		row_height = 256;
+		if (h_mirror_en) {
+			patched_viewport_height = viewport_c_height;
+			patched_viewport_width = viewport_c_width + row_height;
+			hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+		} else {
+			patched_viewport_height = viewport_c_height;
+			patched_viewport_width = viewport_c_width + row_height;
+			hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
+		}
+		break;
+	case ROTATION_ANGLE_270: /* 270 degree rotation */
+		row_height = 256;
+		if (h_mirror_en) {
+			patched_viewport_height = viewport_c_height;
+			patched_viewport_width = viewport_c_width + row_height;
+			hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
+		} else {
+			patched_viewport_height = viewport_c_height;
+			patched_viewport_width = viewport_c_width + row_height;
+			hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+		}
+		break;
+	default:
+		ASSERT(0);
+		break;
+	}
+
+	/* catch cases where viewport keep growing */
+	ASSERT(patched_viewport_height && patched_viewport_height < 5000);
+	ASSERT(patched_viewport_width && patched_viewport_width < 5000);
+
+	REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
+			PRI_VIEWPORT_WIDTH_C, patched_viewport_width,
+			PRI_VIEWPORT_HEIGHT_C, patched_viewport_height);
+}
+
 void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
 		struct vm_system_aperture_param *apt)
 {
@@ -191,6 +347,562 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
 			SYSTEM_ACCESS_MODE, 0x3);
 }
 
+void hubp21_validate_dml_output(struct hubp *hubp,
+		struct dc_context *ctx,
+		struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+		struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+		struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+	struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+	struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+	struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+	DC_LOGGER_INIT(ctx->logger);
+	DC_LOG_DEBUG("DML Validation | Running Validation");
+
+	/* Requester - Per hubp */
+	REG_GET(HUBPRET_CONTROL,
+		DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+	REG_GET_4(DCN_EXPANSION_MODE,
+		DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+		PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+		MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+		CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+	REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+		CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+		MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+		META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+		MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+		DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+		VM_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+		SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+		PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+	REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C,
+		CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+		MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+		META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+		MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+		DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+		SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+		PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+	if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+		DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u  Actual: %u\n",
+				dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+	if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+	if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+	if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u  Actual: %u\n",
+				dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+	if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+		DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+	if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+	if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+	if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+	if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+	if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+	if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:VM_GROUP_SIZE - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+	if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+	if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+	if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+	if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+	if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+	if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+	if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+	if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+	if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+		DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u  Actual: %u\n",
+				dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+
+	/* DLG - Per hubp */
+	REG_GET_2(BLANK_OFFSET_0,
+		REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+		DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+	REG_GET(BLANK_OFFSET_1,
+		MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+	REG_GET(DST_DIMENSIONS,
+		REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+	REG_GET_2(DST_AFTER_SCALER,
+		REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+		DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+	REG_GET(REF_FREQ_TO_PIX_FREQ,
+		REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+	if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+	if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+	if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+		DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+	if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+		DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+	if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+		DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+	if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+		DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+	if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+		DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+	/* DLG - Per luma/chroma */
+	REG_GET(VBLANK_PARAMETERS_1,
+		REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+	if (REG(NOM_PARAMETERS_0))
+		REG_GET(NOM_PARAMETERS_0,
+			DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+	if (REG(NOM_PARAMETERS_1))
+		REG_GET(NOM_PARAMETERS_1,
+			REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+	REG_GET(NOM_PARAMETERS_4,
+		DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+	REG_GET(NOM_PARAMETERS_5,
+		REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+	REG_GET_2(PER_LINE_DELIVERY,
+		REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+		REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+	REG_GET_2(PER_LINE_DELIVERY_PRE,
+		REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+		REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+	REG_GET(VBLANK_PARAMETERS_2,
+		REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+	if (REG(NOM_PARAMETERS_2))
+		REG_GET(NOM_PARAMETERS_2,
+			DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+	if (REG(NOM_PARAMETERS_3))
+		REG_GET(NOM_PARAMETERS_3,
+			REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+	REG_GET(NOM_PARAMETERS_6,
+		DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+	REG_GET(NOM_PARAMETERS_7,
+		REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+	REG_GET(VBLANK_PARAMETERS_3,
+			REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+	REG_GET(VBLANK_PARAMETERS_4,
+			REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+	if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+	if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+	if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+	if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+	if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+	if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+	if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+	if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+	if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+	if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+	if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+	if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+		DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+	if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+	if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+		DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+	if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+	if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+	/* TTU - per hubp */
+	REG_GET_2(DCN_TTU_QOS_WM,
+		QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+		QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+	if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+		DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+	if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+		DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+	/* TTU - per luma/chroma */
+	/* Assumed surf0 is luma and 1 is chroma */
+	REG_GET_3(DCN_SURF0_TTU_CNTL0,
+		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+	REG_GET_3(DCN_SURF1_TTU_CNTL0,
+		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+	REG_GET_3(DCN_CUR0_TTU_CNTL0,
+		REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+		QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+		QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+	REG_GET(FLIP_PARAMETERS_1,
+		REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+	REG_GET(DCN_CUR0_TTU_CNTL1,
+			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+	REG_GET(DCN_CUR1_TTU_CNTL1,
+			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+	REG_GET(DCN_SURF0_TTU_CNTL1,
+			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+	REG_GET(DCN_SURF1_TTU_CNTL1,
+			REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+	if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+	if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+	if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+	if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+	if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+	if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+	if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+	if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+	if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+	if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+	if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+	if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+		DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+	if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+	if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+		DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u  Actual: %u\n",
+				dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+
+	/* Host VM deadline regs */
+	REG_GET(VBLANK_PARAMETERS_5,
+		REFCYC_PER_VM_GROUP_VBLANK, &dlg_attr.refcyc_per_vm_group_vblank);
+	REG_GET(VBLANK_PARAMETERS_6,
+		REFCYC_PER_VM_REQ_VBLANK, &dlg_attr.refcyc_per_vm_req_vblank);
+	REG_GET(FLIP_PARAMETERS_3,
+		REFCYC_PER_VM_GROUP_FLIP, &dlg_attr.refcyc_per_vm_group_flip);
+	REG_GET(FLIP_PARAMETERS_4,
+		REFCYC_PER_VM_REQ_FLIP, &dlg_attr.refcyc_per_vm_req_flip);
+	REG_GET(FLIP_PARAMETERS_5,
+		REFCYC_PER_PTE_GROUP_FLIP_C, &dlg_attr.refcyc_per_pte_group_flip_c);
+	REG_GET(FLIP_PARAMETERS_6,
+		REFCYC_PER_META_CHUNK_FLIP_C, &dlg_attr.refcyc_per_meta_chunk_flip_c);
+	REG_GET(FLIP_PARAMETERS_2,
+		REFCYC_PER_META_CHUNK_FLIP_L, &dlg_attr.refcyc_per_meta_chunk_flip_l);
+
+	if (dlg_attr.refcyc_per_vm_group_vblank != dml_dlg_attr->refcyc_per_vm_group_vblank)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_5:REFCYC_PER_VM_GROUP_VBLANK - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_vm_group_vblank, dlg_attr.refcyc_per_vm_group_vblank);
+	if (dlg_attr.refcyc_per_vm_req_vblank != dml_dlg_attr->refcyc_per_vm_req_vblank)
+		DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_6:REFCYC_PER_VM_REQ_VBLANK - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_vm_req_vblank, dlg_attr.refcyc_per_vm_req_vblank);
+	if (dlg_attr.refcyc_per_vm_group_flip != dml_dlg_attr->refcyc_per_vm_group_flip)
+		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_3:REFCYC_PER_VM_GROUP_FLIP - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_vm_group_flip, dlg_attr.refcyc_per_vm_group_flip);
+	if (dlg_attr.refcyc_per_vm_req_flip != dml_dlg_attr->refcyc_per_vm_req_flip)
+		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_4:REFCYC_PER_VM_REQ_FLIP - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_vm_req_flip, dlg_attr.refcyc_per_vm_req_flip);
+	if (dlg_attr.refcyc_per_pte_group_flip_c != dml_dlg_attr->refcyc_per_pte_group_flip_c)
+		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_5:REFCYC_PER_PTE_GROUP_FLIP_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_pte_group_flip_c, dlg_attr.refcyc_per_pte_group_flip_c);
+	if (dlg_attr.refcyc_per_meta_chunk_flip_c != dml_dlg_attr->refcyc_per_meta_chunk_flip_c)
+		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_6:REFCYC_PER_META_CHUNK_FLIP_C - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_flip_c, dlg_attr.refcyc_per_meta_chunk_flip_c);
+	if (dlg_attr.refcyc_per_meta_chunk_flip_l != dml_dlg_attr->refcyc_per_meta_chunk_flip_l)
+		DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_2:REFCYC_PER_META_CHUNK_FLIP_L - Expected: %u  Actual: %u\n",
+				dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l);
+}
+
+static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+{
+	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+	REG_UPDATE_3(DCSURF_FLIP_CONTROL,
+					SURFACE_FLIP_TYPE, flip_regs->immediate,
+					SURFACE_FLIP_MODE_FOR_STEREOSYNC, flip_regs->grph_stereo,
+					SURFACE_FLIP_IN_STEREOSYNC, flip_regs->grph_stereo);
+
+	REG_UPDATE(VMID_SETTINGS_0,
+				VMID, flip_regs->vmid);
+
+	REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
+			PRIMARY_SURFACE_TMZ, flip_regs->tmz_surface,
+			PRIMARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
+			PRIMARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
+			PRIMARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface,
+			SECONDARY_SURFACE_TMZ, flip_regs->tmz_surface,
+			SECONDARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
+			SECONDARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
+			SECONDARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface);
+
+	REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+			PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+			flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+
+	REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+			PRIMARY_META_SURFACE_ADDRESS_C,
+			flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+
+	REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+			PRIMARY_META_SURFACE_ADDRESS_HIGH,
+			flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+
+	REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+			PRIMARY_META_SURFACE_ADDRESS,
+			flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+
+	REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+			SECONDARY_META_SURFACE_ADDRESS_HIGH,
+			flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+
+	REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+			SECONDARY_META_SURFACE_ADDRESS,
+			flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+
+
+	REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+			SECONDARY_SURFACE_ADDRESS_HIGH,
+			flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+
+	REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+			SECONDARY_SURFACE_ADDRESS,
+			flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS);
+
+
+	REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+			PRIMARY_SURFACE_ADDRESS_HIGH_C,
+			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+
+	REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+			PRIMARY_SURFACE_ADDRESS_C,
+			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+
+	REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+			PRIMARY_SURFACE_ADDRESS_HIGH,
+			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+
+	REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+			PRIMARY_SURFACE_ADDRESS,
+			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS);
+}
+
+void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+{
+	struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
+	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+	struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
+
+	PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+	PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+	PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+	PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+	PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+	PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+	PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+	PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+	PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+	PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+
+	PERF_TRACE();  // TODO: remove after performance is stable.
+	dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+	PERF_TRACE();  // TODO: remove after performance is stable.
+	dc_dmub_srv_cmd_execute(dmcub);
+	PERF_TRACE();  // TODO: remove after performance is stable.
+	dc_dmub_srv_wait_idle(dmcub);
+	PERF_TRACE();  // TODO: remove after performance is stable.
+}
+
+bool hubp21_program_surface_flip_and_addr(
+		struct hubp *hubp,
+		const struct dc_plane_address *address,
+		bool flip_immediate)
+{
+	struct dc_debug_options *debug = &hubp->ctx->dc->debug;
+	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+	struct surface_flip_registers flip_regs = { 0 };
+
+	flip_regs.vmid = address->vmid;
+
+	switch (address->type) {
+	case PLN_ADDR_TYPE_GRAPHICS:
+		if (address->grph.addr.quad_part == 0) {
+			BREAK_TO_DEBUGGER();
+			break;
+		}
+
+		if (address->grph.meta_addr.quad_part != 0) {
+			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+					address->grph.meta_addr.low_part;
+			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+					address->grph.meta_addr.high_part;
+		}
+
+		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+				address->grph.addr.low_part;
+		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+				address->grph.addr.high_part;
+		break;
+	case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+		if (address->video_progressive.luma_addr.quad_part == 0
+				|| address->video_progressive.chroma_addr.quad_part == 0)
+			break;
+
+		if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+					address->video_progressive.luma_meta_addr.low_part;
+			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+					address->video_progressive.luma_meta_addr.high_part;
+
+			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_C =
+					address->video_progressive.chroma_meta_addr.low_part;
+			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C =
+					address->video_progressive.chroma_meta_addr.high_part;
+		}
+
+		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+				address->video_progressive.luma_addr.low_part;
+		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+				address->video_progressive.luma_addr.high_part;
+
+		if (debug->nv12_iflip_vm_wa) {
+			flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+					address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset;
+		} else
+			flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+					address->video_progressive.chroma_addr.low_part;
+
+		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+				address->video_progressive.chroma_addr.high_part;
+
+		break;
+	case PLN_ADDR_TYPE_GRPH_STEREO:
+		if (address->grph_stereo.left_addr.quad_part == 0)
+			break;
+		if (address->grph_stereo.right_addr.quad_part == 0)
+			break;
+
+		flip_regs.grph_stereo = true;
+
+		if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+			flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS =
+					address->grph_stereo.right_meta_addr.low_part;
+			flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH =
+					address->grph_stereo.right_meta_addr.high_part;
+		}
+
+		if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+					address->grph_stereo.left_meta_addr.low_part;
+			flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+					address->grph_stereo.left_meta_addr.high_part;
+		}
+
+		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+				address->grph_stereo.left_addr.low_part;
+		flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+				address->grph_stereo.left_addr.high_part;
+
+		flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS =
+				address->grph_stereo.right_addr.low_part;
+		flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH =
+				address->grph_stereo.right_addr.high_part;
+
+		break;
+	default:
+		BREAK_TO_DEBUGGER();
+		break;
+	}
+
+	flip_regs.tmz_surface = address->tmz_surface;
+	flip_regs.immediate = flip_immediate;
+
+	if (hubp->ctx->dc->debug.enable_dmcub_surface_flip && address->type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+		dmcub_PLAT_54186_wa(hubp, &flip_regs);
+	else
+		program_surface_flip_and_addr(hubp, &flip_regs);
+
+	hubp->request_address = *address;
+
+	return true;
+}
+
 void hubp21_init(struct hubp *hubp)
 {
 	// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
@@ -203,7 +915,7 @@ void hubp21_init(struct hubp *hubp)
 static struct hubp_funcs dcn21_hubp_funcs = {
 	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
 	.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
-	.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
+	.hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr,
 	.hubp_program_surface_config = hubp1_program_surface_config,
 	.hubp_is_flip_pending = hubp1_is_flip_pending,
 	.hubp_setup = hubp21_setup,
@@ -211,7 +923,8 @@ static struct hubp_funcs dcn21_hubp_funcs = {
 	.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
 	.set_blank = hubp1_set_blank,
 	.dcc_control = hubp1_dcc_control,
-	.mem_program_viewport = min_set_viewport,
+	.mem_program_viewport = hubp21_set_viewport,
+	.apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa,
 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
 	.set_cursor_position	= hubp1_cursor_set_position,
 	.hubp_clk_cntl = hubp1_clk_cntl,
@@ -223,6 +936,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
 	.hubp_clear_underflow = hubp1_clear_underflow,
 	.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
 	.hubp_init = hubp21_init,
+	.validate_dml_output = hubp21_validate_dml_output,
 };
 
 bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
index aeda719..9873b6cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
@@ -108,6 +108,7 @@ struct dcn21_hubp {
 	const struct dcn_hubp2_registers *hubp_regs;
 	const struct dcn_hubp2_shift *hubp_shift;
 	const struct dcn_hubp2_mask *hubp_mask;
+	int PLAT_54186_wa_chroma_addr_offset;
 };
 
 bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index b25215c..081ad8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -28,7 +28,7 @@
 #include "core_types.h"
 #include "resource.h"
 #include "dce/dce_hwseq.h"
-#include "dcn20/dcn20_hwseq.h"
+#include "dcn21_hwseq.h"
 #include "vmid.h"
 #include "reg_helper.h"
 #include "hw/clk_mgr.h"
@@ -61,7 +61,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c
 
 }
 
-static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
 {
 	struct dcn_hubbub_phys_addr_config config;
 
@@ -82,7 +82,7 @@ static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_ph
 
 // work around for Renoir s0i3, if register is programmed, bypass golden init.
 
-static bool dcn21_s0i3_golden_init_wa(struct dc *dc)
+bool dcn21_s0i3_golden_init_wa(struct dc *dc)
 {
 	struct dce_hwseq *hws = dc->hwseq;
 	uint32_t value = 0;
@@ -112,11 +112,3 @@ void dcn21_optimize_pwr_state(
 			true);
 }
 
-void dcn21_hw_sequencer_construct(struct dc *dc)
-{
-	dcn20_hw_sequencer_construct(dc);
-	dc->hwss.init_sys_ctx = dcn21_init_sys_ctx;
-	dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa;
-	dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state;
-	dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index be67b62..1827360 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -26,8 +26,22 @@
 #ifndef __DC_HWSS_DCN21_H__
 #define __DC_HWSS_DCN21_H__
 
+#include "hw_sequencer_private.h"
+
 struct dc;
 
-void dcn21_hw_sequencer_construct(struct dc *dc);
+int dcn21_init_sys_ctx(struct dce_hwseq *hws,
+		struct dc *dc,
+		struct dc_phy_addr_space_config *pa_config);
+
+bool dcn21_s0i3_golden_init_wa(struct dc *dc);
+
+void dcn21_exit_optimized_pwr_state(
+		const struct dc *dc,
+		struct dc_state *context);
+
+void dcn21_optimize_pwr_state(
+		const struct dc *dc,
+		struct dc_state *context);
 
 #endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
new file mode 100644
index 0000000..4861aa5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn21_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn21_funcs = {
+	.program_gamut_remap = dcn10_program_gamut_remap,
+	.init_hw = dcn10_init_hw,
+	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+	.apply_ctx_for_surface = NULL,
+	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+	.update_plane_addr = dcn20_update_plane_addr,
+	.update_dchub = dcn10_update_dchub,
+	.update_pending_status = dcn10_update_pending_status,
+	.program_output_csc = dcn20_program_output_csc,
+	.enable_accelerated_mode = dce110_enable_accelerated_mode,
+	.enable_timing_synchronization = dcn10_enable_timing_synchronization,
+	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+	.update_info_frame = dce110_update_info_frame,
+	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+	.enable_stream = dcn20_enable_stream,
+	.disable_stream = dce110_disable_stream,
+	.unblank_stream = dcn20_unblank_stream,
+	.blank_stream = dce110_blank_stream,
+	.enable_audio_stream = dce110_enable_audio_stream,
+	.disable_audio_stream = dce110_disable_audio_stream,
+	.disable_plane = dcn20_disable_plane,
+	.pipe_control_lock = dcn20_pipe_control_lock,
+	.pipe_control_lock_global = dcn20_pipe_control_lock_global,
+	.prepare_bandwidth = dcn20_prepare_bandwidth,
+	.optimize_bandwidth = dcn20_optimize_bandwidth,
+	.update_bandwidth = dcn20_update_bandwidth,
+	.set_drr = dcn10_set_drr,
+	.get_position = dcn10_get_position,
+	.set_static_screen_control = dcn10_set_static_screen_control,
+	.setup_stereo = dcn10_setup_stereo,
+	.set_avmute = dce110_set_avmute,
+	.log_hw_state = dcn10_log_hw_state,
+	.get_hw_state = dcn10_get_hw_state,
+	.clear_status_bits = dcn10_clear_status_bits,
+	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+	.edp_power_control = dce110_edp_power_control,
+	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+	.set_cursor_position = dcn10_set_cursor_position,
+	.set_cursor_attribute = dcn10_set_cursor_attribute,
+	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+	.set_clock = dcn10_set_clock,
+	.get_clock = dcn10_get_clock,
+	.program_triplebuffer = dcn20_program_triple_buffer,
+	.enable_writeback = dcn20_enable_writeback,
+	.disable_writeback = dcn20_disable_writeback,
+	.dmdata_status_done = dcn20_dmdata_status_done,
+	.program_dmdata_engine = dcn20_program_dmdata_engine,
+	.set_dmdata_attributes = dcn20_set_dmdata_attributes,
+	.init_sys_ctx = dcn21_init_sys_ctx,
+	.init_vm_ctx = dcn20_init_vm_ctx,
+	.set_flip_control_gsl = dcn20_set_flip_control_gsl,
+	.optimize_pwr_state = dcn21_optimize_pwr_state,
+	.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+	.set_cursor_position = dcn10_set_cursor_position,
+	.set_cursor_attribute = dcn10_set_cursor_attribute,
+	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+	.optimize_pwr_state = dcn21_optimize_pwr_state,
+	.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+};
+
+static const struct hwseq_private_funcs dcn21_private_funcs = {
+	.init_pipes = dcn10_init_pipes,
+	.update_plane_addr = dcn20_update_plane_addr,
+	.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+	.update_mpcc = dcn20_update_mpcc,
+	.set_input_transfer_func = dcn20_set_input_transfer_func,
+	.set_output_transfer_func = dcn20_set_output_transfer_func,
+	.power_down = dce110_power_down,
+	.enable_display_power_gating = dcn10_dummy_display_power_gating,
+	.blank_pixel_data = dcn20_blank_pixel_data,
+	.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+	.enable_stream_timing = dcn20_enable_stream_timing,
+	.edp_backlight_control = dce110_edp_backlight_control,
+	.disable_stream_gating = dcn20_disable_stream_gating,
+	.enable_stream_gating = dcn20_enable_stream_gating,
+	.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+	.did_underflow_occur = dcn10_did_underflow_occur,
+	.init_blank = dcn20_init_blank,
+	.disable_vga = dcn20_disable_vga,
+	.bios_golden_init = dcn10_bios_golden_init,
+	.plane_atomic_disable = dcn20_plane_atomic_disable,
+	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
+	.enable_power_gating_plane = dcn20_enable_power_gating_plane,
+	.dpp_pg_control = dcn20_dpp_pg_control,
+	.hubp_pg_control = dcn20_hubp_pg_control,
+	.dsc_pg_control = NULL,
+	.update_odm = dcn20_update_odm,
+	.dsc_pg_control = dcn20_dsc_pg_control,
+	.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+	.get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+	.set_hdr_multiplier = dcn10_set_hdr_multiplier,
+	.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+	.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa,
+	.wait_for_blank_complete = dcn20_wait_for_blank_complete,
+	.dccg_init = dcn20_dccg_init,
+	.set_blend_lut = dcn20_set_blend_lut,
+	.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn21_hw_sequencer_construct(struct dc *dc)
+{
+	dc->hwss = dcn21_funcs;
+	dc->hwseq->funcs = dcn21_private_funcs;
+
+	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+		dc->hwss.init_hw = dcn20_fpga_init_hw;
+		dc->hwseq->funcs.init_pipes = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
new file mode 100644
index 0000000..3ed2429
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN21_INIT_H__
+#define __DC_DCN21_INIT_H__
+
+struct dc;
+
+void dcn21_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index e8a504c..e45683a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -323,9 +323,7 @@ void dcn21_link_encoder_disable_output(
 
 
 static const struct link_encoder_funcs dcn21_link_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	.read_state = link_enc2_read_state,
-#endif
 	.validate_output_with_stream =
 		dcn10_link_encoder_validate_output_with_stream,
 	.hw_init = enc2_hw_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
index 1d7a1a5..033d5d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
@@ -33,6 +33,45 @@ struct dcn21_link_encoder {
 	struct dpcssys_phy_seq_cfg phy_seq_cfg;
 };
 
+#define DPCS_DCN21_MASK_SH_LIST(mask_sh)\
+	DPCS_DCN2_MASK_SH_LIST(mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_MPLLB_CP_PROP_GS, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_RX_VREF_CTRL, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_CP_INT_GS, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCS_DMCU_DPALT_DIS_BLOCK_REG, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_SUP_PRE_HP, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX0_VREGDRV_BYP, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX1_VREGDRV_BYP, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX2_VREGDRV_BYP, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX3_VREGDRV_BYP, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+	LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+	LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
+
+#define DPCS_DCN21_REG_LIST(id) \
+	DPCS_DCN2_REG_LIST(id),\
+	SRI(RDPCSTX_PHY_CNTL15, RDPCSTX, id),\
+	SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
 #define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
 	LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
 	LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index b29b2c9..0d506d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1,5 +1,6 @@
 /*
 * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -28,6 +29,8 @@
 #include "dm_services.h"
 #include "dc.h"
 
+#include "dcn21_init.h"
+
 #include "resource.h"
 #include "include/irq_service_interface.h"
 #include "dcn20/dcn20_resource.h"
@@ -60,6 +63,8 @@
 
 #include "dcn20/dcn20_dwb.h"
 #include "dcn20/dcn20_mmhubbub.h"
+#include "dpcs/dpcs_2_1_0_offset.h"
+#include "dpcs/dpcs_2_1_0_sh_mask.h"
 
 #include "renoir_ip_offset.h"
 #include "dcn/dcn_2_1_0_offset.h"
@@ -78,6 +83,7 @@
 #include "dcn21_resource.h"
 #include "vm_helper.h"
 #include "dcn20/dcn20_vmid.h"
+#include "../dce/dmub_psr.h"
 
 #define SOC_BOUNDING_BOX_VALID false
 #define DC_LOGGER_INIT(logger)
@@ -90,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
 	.gpuvm_max_page_table_levels = 1,
 	.hostvm_max_page_table_levels = 4,
 	.hostvm_cached_page_table_levels = 2,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	.num_dsc = 3,
-#else
-	.num_dsc = 0,
-#endif
 	.rob_buffer_size_kbytes = 168,
 	.det_buffer_size_kbytes = 164,
 	.dpte_buffer_size_in_pte_reqs_luma = 44,
@@ -352,7 +354,7 @@ static const struct bios_registers bios_regs = {
 };
 
 static const struct dce_dmcu_registers dmcu_regs = {
-		DMCU_DCN10_REG_LIST()
+		DMCU_DCN20_REG_LIST()
 };
 
 static const struct dce_dmcu_shift dmcu_shift = {
@@ -375,20 +377,6 @@ static const struct dce_abm_mask abm_mask = {
 		ABM_MASK_SH_LIST_DCN20(_MASK)
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DMUB
-static const struct dcn21_dmcub_registers dmcub_regs = {
-		DMCUB_REG_LIST_DCN()
-};
-
-static const struct dcn21_dmcub_shift dmcub_shift = {
-		DMCUB_COMMON_MASK_SH_LIST_BASE(__SHIFT)
-};
-
-static const struct dcn21_dmcub_mask dmcub_mask = {
-		DMCUB_COMMON_MASK_SH_LIST_BASE(_MASK)
-};
-#endif
-
 #define audio_regs(id)\
 [id] = {\
 		AUD_COMMON_REG_LIST(id)\
@@ -478,15 +466,18 @@ static const struct dcn20_mpc_registers mpc_regs = {
 		MPC_OUT_MUX_REG_LIST_DCN2_0(0),
 		MPC_OUT_MUX_REG_LIST_DCN2_0(1),
 		MPC_OUT_MUX_REG_LIST_DCN2_0(2),
-		MPC_OUT_MUX_REG_LIST_DCN2_0(3)
+		MPC_OUT_MUX_REG_LIST_DCN2_0(3),
+		MPC_DBG_REG_LIST_DCN2_0()
 };
 
 static const struct dcn20_mpc_shift mpc_shift = {
-	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+	MPC_DEBUG_REG_LIST_SH_DCN20
 };
 
 static const struct dcn20_mpc_mask mpc_mask = {
-	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+	MPC_DEBUG_REG_LIST_MASK_DCN20
 };
 
 #define hubp_regs(id)\
@@ -554,7 +545,6 @@ static const struct dcn20_vmid_mask vmid_masks = {
 		DCN20_VMID_MASK_SH_LIST(_MASK)
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #define dsc_regsDCN20(id)\
 [id] = {\
 	DSC_REG_LIST_DCN20(id)\
@@ -576,7 +566,6 @@ static const struct dcn20_dsc_shift dsc_shift = {
 static const struct dcn20_dsc_mask dsc_mask = {
 	DSC_REG_LIST_SH_MASK_DCN20(_MASK)
 };
-#endif
 
 #define ipp_regs(id)\
 [id] = {\
@@ -623,6 +612,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
 #define tf_regs(id)\
 [id] = {\
 	TF_REG_LIST_DCN20(id),\
+	TF_REG_LIST_DCN20_COMMON_APPEND(id),\
 }
 
 static const struct dcn2_dpp_registers tf_regs[] = {
@@ -633,11 +623,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
 };
 
 static const struct dcn2_dpp_shift tf_shift = {
-		TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+		TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+		TF_DEBUG_REG_LIST_SH_DCN20
 };
 
 static const struct dcn2_dpp_mask tf_mask = {
-		TF_REG_LIST_SH_MASK_DCN20(_MASK)
+		TF_REG_LIST_SH_MASK_DCN20(_MASK),
+		TF_DEBUG_REG_LIST_MASK_DCN20
 };
 
 #define stream_enc_regs(id)\
@@ -672,7 +664,7 @@ static const struct dcn10_stream_encoder_mask se_mask = {
 static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
 
 static int dcn21_populate_dml_pipes_from_context(
-		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
 
 static struct input_pixel_processor *dcn21_ipp_create(
 	struct dc_context *ctx, uint32_t inst)
@@ -773,9 +765,7 @@ static const struct resource_caps res_cap_rn = {
 		.num_dwb = 1,
 		.num_ddc = 5,
 		.num_vmid = 1,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		.num_dsc = 3,
-#endif
 };
 
 #ifdef DIAGS_BUILD
@@ -800,9 +790,7 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
 		.num_pll = 4,
 		.num_dwb = 1,
 		.num_ddc = 4,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 		.num_dsc = 2,
-#endif
 };
 #endif
 
@@ -842,11 +830,13 @@ static const struct dc_debug_options debug_defaults_drv = {
 		.disable_dcc = DCC_ENABLE,
 		.vsr_support = true,
 		.performance_trace = false,
-		.max_downscale_src_width = 3840,
+		.max_downscale_src_width = 4096,
 		.disable_pplib_wm_range = false,
 		.scl_reset_length10 = true,
 		.sanity_checks = true,
 		.disable_48mhz_pwrdwn = false,
+		.nv12_iflip_vm_wa = true,
+		.usbc_combo_phy_reset_wa = true
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -869,7 +859,7 @@ enum dcn20_clk_src_array_id {
 	DCN20_CLK_SRC_TOTAL_DCN21
 };
 
-static void destruct(struct dcn21_resource_pool *pool)
+static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
 {
 	unsigned int i;
 
@@ -880,12 +870,10 @@ static void destruct(struct dcn21_resource_pool *pool)
 		}
 	}
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
 		if (pool->base.dscs[i] != NULL)
 			dcn20_dsc_destroy(&pool->base.dscs[i]);
 	}
-#endif
 
 	if (pool->base.mpc != NULL) {
 		kfree(TO_DCN20_MPC(pool->base.mpc));
@@ -972,11 +960,6 @@ static void destruct(struct dcn21_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-#ifdef CONFIG_DRM_AMD_DC_DMUB
-	if (pool->base.dmcub != NULL)
-		dcn21_dmcub_destroy(&pool->base.dmcub);
-#endif
-
 	if (pool->base.dccg != NULL)
 		dcn_dccg_destroy(&pool->base.dccg);
 
@@ -1010,11 +993,9 @@ static void calculate_wm_set_for_vlevel(
 	wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
 	wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
 	wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
 	wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
 	wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
-#endif
 	dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
 
 }
@@ -1023,7 +1004,8 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
 {
 	int i;
 
-	kernel_fpu_begin();
+	DC_FP_START();
+
 	if (dc->bb_overrides.sr_exit_time_ns) {
 		for (i = 0; i < WM_SET_COUNT; i++) {
 			  dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
@@ -1049,7 +1031,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
 		}
 	}
 
-	kernel_fpu_end();
+	DC_FP_END();
 }
 
 void dcn21_calculate_wm(
@@ -1099,10 +1081,10 @@ void dcn21_calculate_wm(
 	if (pipe_cnt != pipe_idx) {
 		if (dc->res_pool->funcs->populate_dml_pipes)
 			pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
-				&context->res_ctx, pipes);
+				context, pipes);
 		else
 			pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
-				&context->res_ctx, pipes);
+				context, pipes);
 	}
 
 	*out_pipe_cnt = pipe_cnt;
@@ -1192,7 +1174,7 @@ static void dcn21_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
 
-	destruct(dcn21_pool);
+	dcn21_resource_destruct(dcn21_pool);
 	kfree(dcn21_pool);
 	*pool = NULL;
 }
@@ -1331,7 +1313,6 @@ static void read_dce_straps(
 
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 
 struct display_stream_compressor *dcn21_dsc_create(
 	struct dc_context *ctx, uint32_t inst)
@@ -1347,16 +1328,9 @@ struct display_stream_compressor *dcn21_dsc_create(
 	dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
 	return &dsc->base;
 }
-#endif
 
 static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
 {
-	/*
-	TODO: Fix this function to calcualte correct values.
-	There are known issues with this function currently
-	that will need to be investigated. Use hardcoded known good values for now.
-
-
 	struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
 	struct clk_limit_table *clk_table = &bw_params->clk_table;
 	int i;
@@ -1371,11 +1345,14 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
 		dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
 		dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
 		dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-		dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
+		dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
 	}
-	dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
+	dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
 	dcn2_1_soc.num_states = i;
-	*/
+
+	// diags does not retrieve proper values from SMU, do not update DML instance for diags
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
+		dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
 }
 
 /* Temporary Place holder until we can get them from fuse */
@@ -1529,8 +1506,9 @@ static const struct encoder_feature_support link_enc_feature = {
 
 #define link_regs(id, phyid)\
 [id] = {\
-	LE_DCN10_REG_LIST(id), \
+	LE_DCN2_REG_LIST(id), \
 	UNIPHY_DCN2_REG_LIST(phyid), \
+	DPCS_DCN21_REG_LIST(id), \
 	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
 }
 
@@ -1569,11 +1547,13 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
 };
 
 static const struct dcn10_link_enc_shift le_shift = {
-	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+	DPCS_DCN21_MASK_SH_LIST(__SHIFT)
 };
 
 static const struct dcn10_link_enc_mask le_mask = {
-	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+	DPCS_DCN21_MASK_SH_LIST(_MASK)
 };
 
 static int map_transmitter_id_to_phy_instance(
@@ -1639,10 +1619,11 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
 }
 
 static int dcn21_populate_dml_pipes_from_context(
-		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
 {
-	uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes);
+	uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);
 	int i;
+	struct resource_context *res_ctx = &context->res_ctx;
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 
@@ -1671,7 +1652,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
 	.update_bw_bounding_box = update_bw_bounding_box
 };
 
-static bool construct(
+static bool dcn21_resource_construct(
 	uint8_t num_virtual_links,
 	struct dc *dc,
 	struct dcn21_resource_pool *pool)
@@ -1711,6 +1692,7 @@ static bool construct(
 	dc->caps.post_blend_color_processing = true;
 	dc->caps.force_dp_tps4_for_cp2520 = true;
 	dc->caps.extended_aux_timeout_support = true;
+	dc->caps.dmcub_support = true;
 
 	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
 		dc->debug = debug_defaults_drv;
@@ -1760,7 +1742,7 @@ static bool construct(
 		goto create_fail;
 	}
 
-	pool->base.dmcu = dcn20_dmcu_create(ctx,
+	pool->base.dmcu = dcn21_dmcu_create(ctx,
 			&dmcu_regs,
 			&dmcu_shift,
 			&dmcu_mask);
@@ -1770,6 +1752,10 @@ static bool construct(
 		goto create_fail;
 	}
 
+	// Leave as NULL to not affect current dmcu psr programming sequence
+	// Will be uncommented when functionality is confirmed to be working
+	pool->base.psr = NULL;
+
 	pool->base.abm = dce_abm_create(ctx,
 			&abm_regs,
 			&abm_shift,
@@ -1780,18 +1766,6 @@ static bool construct(
 		goto create_fail;
 	}
 
-#ifdef CONFIG_DRM_AMD_DC_DMUB
-	pool->base.dmcub = dcn21_dmcub_create(ctx,
-			&dmcub_regs,
-			&dmcub_shift,
-			&dmcub_mask);
-	if (pool->base.dmcub == NULL) {
-		dm_error("DC: failed to create dmcub!\n");
-		BREAK_TO_DEBUGGER();
-		goto create_fail;
-	}
-#endif
-
 	pool->base.pp_smu = dcn21_pp_smu_create(ctx);
 
 	num_pipes = dcn2_1_ip.max_num_dpp;
@@ -1818,41 +1792,41 @@ static bool construct(
 		if ((pipe_fuses & (1 << i)) != 0)
 			continue;
 
-		pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
-		if (pool->base.hubps[i] == NULL) {
+		pool->base.hubps[j] = dcn21_hubp_create(ctx, i);
+		if (pool->base.hubps[j] == NULL) {
 			BREAK_TO_DEBUGGER();
 			dm_error(
 				"DC: failed to create memory input!\n");
 			goto create_fail;
 		}
 
-		pool->base.ipps[i] = dcn21_ipp_create(ctx, i);
-		if (pool->base.ipps[i] == NULL) {
+		pool->base.ipps[j] = dcn21_ipp_create(ctx, i);
+		if (pool->base.ipps[j] == NULL) {
 			BREAK_TO_DEBUGGER();
 			dm_error(
 				"DC: failed to create input pixel processor!\n");
 			goto create_fail;
 		}
 
-		pool->base.dpps[i] = dcn21_dpp_create(ctx, i);
-		if (pool->base.dpps[i] == NULL) {
+		pool->base.dpps[j] = dcn21_dpp_create(ctx, i);
+		if (pool->base.dpps[j] == NULL) {
 			BREAK_TO_DEBUGGER();
 			dm_error(
 				"DC: failed to create dpps!\n");
 			goto create_fail;
 		}
 
-		pool->base.opps[i] = dcn21_opp_create(ctx, i);
-		if (pool->base.opps[i] == NULL) {
+		pool->base.opps[j] = dcn21_opp_create(ctx, i);
+		if (pool->base.opps[j] == NULL) {
 			BREAK_TO_DEBUGGER();
 			dm_error(
 				"DC: failed to create output pixel processor!\n");
 			goto create_fail;
 		}
 
-		pool->base.timing_generators[i] = dcn21_timing_generator_create(
+		pool->base.timing_generators[j] = dcn21_timing_generator_create(
 				ctx, i);
-		if (pool->base.timing_generators[i] == NULL) {
+		if (pool->base.timing_generators[j] == NULL) {
 			BREAK_TO_DEBUGGER();
 			dm_error("DC: failed to create tg!\n");
 			goto create_fail;
@@ -1896,7 +1870,6 @@ static bool construct(
 		goto create_fail;
 	}
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
 		pool->base.dscs[i] = dcn21_dsc_create(ctx, i);
 		if (pool->base.dscs[i] == NULL) {
@@ -1905,7 +1878,6 @@ static bool construct(
 			goto create_fail;
 		}
 	}
-#endif
 
 	if (!dcn20_dwbc_create(ctx, &pool->base)) {
 		BREAK_TO_DEBUGGER();
@@ -1936,7 +1908,7 @@ static bool construct(
 
 create_fail:
 
-	destruct(pool);
+	dcn21_resource_destruct(pool);
 
 	return false;
 }
@@ -1951,7 +1923,7 @@ struct resource_pool *dcn21_create_resource_pool(
 	if (!pool)
 		return NULL;
 
-	if (construct(init_data->num_virtual_links, dc, pool))
+	if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool))
 		return &pool->base;
 
 	BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 94b75e9..8bde1d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -118,13 +118,11 @@ bool dm_helpers_submit_i2c(
 		const struct dc_link *link,
 		struct i2c_command *cmd);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 bool dm_helpers_dp_write_dsc_enable(
 		struct dc_context *ctx,
 		const struct dc_stream_state *stream,
 		bool enable
 );
-#endif
 bool dm_helpers_is_dp_sink_present(
 		struct dc_link *link);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index ef7df9e..ae608c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -41,12 +41,8 @@ enum pp_smu_ver {
 	 */
 	PP_SMU_UNSUPPORTED,
 	PP_SMU_VER_RV,
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
 	PP_SMU_VER_NV,
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	PP_SMU_VER_RN,
-#endif
 
 	PP_SMU_VER_MAX
 };
@@ -143,7 +139,6 @@ struct pp_smu_funcs_rv {
 	void (*set_pme_wa_enable)(struct pp_smu *pp);
 };
 
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
 /* Used by pp_smu_funcs_nv.set_voltage_by_freq
  *
  */
@@ -247,7 +242,6 @@ struct pp_smu_funcs_nv {
 	enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp,
 			BOOLEAN pstate_handshake_supported);
 };
-#endif
 
 #define PP_SMU_NUM_SOCCLK_DPM_LEVELS  8
 #define PP_SMU_NUM_DCFCLK_DPM_LEVELS  8
@@ -291,12 +285,8 @@ struct pp_smu_funcs {
 	struct pp_smu ctx;
 	union {
 		struct pp_smu_funcs_rv rv_funcs;
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
 		struct pp_smu_funcs_nv nv_funcs;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 		struct pp_smu_funcs_rn rn_funcs;
-#endif
 
 	};
 };
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 1a04297..968ff1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -40,6 +40,9 @@
 
 #undef DEPRECATED
 
+struct dmub_srv;
+struct dc_dmub_srv;
+
 irq_handler_idx dm_register_interrupt(
 	struct dc_context *ctx,
 	struct dc_interrupt_params *int_params,
@@ -139,6 +142,13 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
 		uint32_t addr, int n,
 		uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
 
+struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub);
+void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv);
+
+void reg_sequence_start_gather(const struct dc_context *ctx);
+void reg_sequence_start_execute(const struct dc_context *ctx);
+void reg_sequence_wait_done(const struct dc_context *ctx);
+
 #define FD(reg_field)	reg_field ## __SHIFT, \
 						reg_field ## _MASK
 
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index a3d1be2..b52ba6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -220,6 +220,7 @@ struct dm_bl_data_point {
 };
 
 /* Total size of the structure should not exceed 256 bytes */
+#define BL_DATA_POINTS 99
 struct dm_acpi_atif_backlight_caps {
 	uint16_t size; /* Bytes 0-1 (2 bytes) */
 	uint16_t flags; /* Byted 2-3 (2 bytes) */
@@ -229,7 +230,7 @@ struct dm_acpi_atif_backlight_caps {
 	uint8_t  min_input_signal; /* Byte 7 */
 	uint8_t  max_input_signal; /* Byte 8 */
 	uint8_t  num_data_points; /* Byte 9 */
-	struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
+	struct dm_bl_data_point data_points[BL_DATA_POINTS]; /* Bytes 10-207 (198 bytes)*/
 };
 
 enum dm_acpi_display_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 8df2516..7ee8b84 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -1,5 +1,6 @@
 #
 # Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2019 Raptor Engineering, LLC
 #
 # Permission is hereby granted, free of charge, to any person obtaining a
 # copy of this software and associated documentation files (the "Software"),
@@ -24,7 +25,13 @@
 # It provides the general basic services required by other DAL
 # subcomponents.
 
+ifdef CONFIG_X86
 dml_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+dml_ccflags := -mhard-float -maltivec
+endif
 
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -32,6 +39,7 @@
 endif
 endif
 
+ifdef CONFIG_X86
 ifdef IS_OLD_GCC
 # Stack alignment mismatch, proceed with caution.
 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -40,17 +48,16 @@
 else
 dml_ccflags += -msse2
 endif
+endif
 
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
 endif
@@ -61,11 +68,9 @@
 DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
 	dml_common_defs.o
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
 DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
 endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 6c6c486..45f0289 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -38,6 +38,7 @@
 
 #define BPP_INVALID 0
 #define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_420_IMAGE_WIDTH 4096
 
 static double adjust_ReturnBW(
 		struct display_mode_lib *mode_lib,
@@ -937,7 +938,7 @@ static unsigned int CalculateVMAndRowBytes(
 		*MetaRowByte = 0;
 	}
 
-	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
 		MacroTileSizeBytes = 256;
 		MacroTileHeight = BlockHeight256Bytes;
 	} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1335,11 +1336,11 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
 		else
 			mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
 
-		if (mode_lib->vba.ODMCombineEnabled[k] == true)
+		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
 			MainPlaneDoesODMCombine = true;
 		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
 			if (mode_lib->vba.BlendingAndTiming[k] == j
-					&& mode_lib->vba.ODMCombineEnabled[j] == true)
+					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
 				MainPlaneDoesODMCombine = true;
 
 		if (MainPlaneDoesODMCombine == true)
@@ -2577,7 +2578,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
 			mode_lib->vba.MinActiveDRAMClockChangeMargin
 					+ mode_lib->vba.DRAMClockChangeLatency;
 
-	if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+	if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
+			mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
 		mode_lib->vba.DRAMClockChangeWatermark += 25;
 		mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
 	} else {
@@ -2847,12 +2849,12 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
 			SwathWidth = mode_lib->vba.ViewportHeight[k];
 		}
 
-		if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
 			MainPlaneDoesODMCombine = true;
 		}
 		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
 			if (mode_lib->vba.BlendingAndTiming[k] == j
-					&& mode_lib->vba.ODMCombineEnabled[j] == true) {
+					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
 				MainPlaneDoesODMCombine = true;
 			}
 		}
@@ -3347,7 +3349,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 										== dm_420_10))
 				|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
 						|| mode_lib->vba.SurfaceTiling[k]
-								== dm_sw_gfx7_2d_thin_lvp)
+								== dm_sw_gfx7_2d_thin_l_vp)
 						&& !((mode_lib->vba.SourcePixelFormat[k]
 								== dm_444_64
 								|| mode_lib->vba.SourcePixelFormat[k]
@@ -3445,10 +3447,10 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				locals->FabricAndDRAMBandwidthPerState[i] * 1000)
 				* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
 
-		locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState;
+		locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
 
 		if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
-			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
 					locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
 					((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
 					/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3459,7 +3461,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
 
 		if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
-			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
 				4 * locals->ReturnBWToDCNPerState *
 				(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
 				* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3471,7 +3473,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
 
 		if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
-			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
 					locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
 					((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
 					/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3482,7 +3484,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
 
 		if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
-			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
 				4 * locals->ReturnBWToDCNPerState *
 				(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
 				* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3520,12 +3522,12 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
 		locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
 				(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
-				+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
-		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+				+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
 				> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
-			locals->ROBSupport[i] = true;
+			locals->ROBSupport[i][0] = true;
 		} else {
-			locals->ROBSupport[i] = false;
+			locals->ROBSupport[i][0] = false;
 		}
 	}
 	/*Writeback Mode Support Check*/
@@ -3893,16 +3895,22 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						&& i == mode_lib->vba.soc.num_states)
 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
-				if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
-					locals->ODMCombineEnablePerState[i][k] = false;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
-				} else {
-					locals->ODMCombineEnablePerState[i][k] = true;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+				locals->ODMCombineEnablePerState[i][k] = false;
+				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+				if (mode_lib->vba.ODMCapability) {
+					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					}
 				}
+
 				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
 						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
-						&& locals->ODMCombineEnablePerState[i][k] == false) {
+						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
 					locals->NoOfDPP[i][j][k] = 1;
 					locals->RequiredDPPCLK[i][j][k] =
 						locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -3991,16 +3999,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	/*Viewport Size Check*/
 
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
-		locals->ViewportSizeSupport[i] = true;
+		locals->ViewportSizeSupport[i][0] = true;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-			if (locals->ODMCombineEnablePerState[i][k] == true) {
+			if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 				if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
 						> locals->MaximumSwathWidth[k]) {
-					locals->ViewportSizeSupport[i] = false;
+					locals->ViewportSizeSupport[i][0] = false;
 				}
 			} else {
 				if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
-					locals->ViewportSizeSupport[i] = false;
+					locals->ViewportSizeSupport[i][0] = false;
 				}
 			}
 		}
@@ -4182,8 +4190,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						mode_lib->vba.DSCFormatFactor = 1;
 					}
 					if (locals->RequiresDSC[i][k] == true) {
-						if (locals->ODMCombineEnablePerState[i][k]
-								== true) {
+						if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 							if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
 									> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
 								locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4206,7 +4213,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 		mode_lib->vba.TotalDSCUnitsRequired = 0.0;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 			if (locals->RequiresDSC[i][k] == true) {
-				if (locals->ODMCombineEnablePerState[i][k] == true) {
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 					mode_lib->vba.TotalDSCUnitsRequired =
 							mode_lib->vba.TotalDSCUnitsRequired + 2.0;
 				} else {
@@ -4248,7 +4255,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
 			}
 			if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
-				if (locals->ODMCombineEnablePerState[i][k] == false) {
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
 					locals->DSCDelayPerState[i][k] =
 							dscceComputeDelay(
 									mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4291,7 +4298,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
 		for (j = 0; j < 2; j++) {
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-				if (locals->ODMCombineEnablePerState[i][k] == true)
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
 					locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
 				else
 					locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
@@ -4344,28 +4351,28 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
 				locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma +  dml_min(
 						locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
-						locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i],
+						locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
 						locals->EffectiveLBLatencyHidingSourceLinesLuma),
 						locals->SwathHeightYPerState[i][j][k]);
 
 				locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
 						locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
-						locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i],
+						locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
 						locals->EffectiveLBLatencyHidingSourceLinesChroma),
 						locals->SwathHeightCPerState[i][j][k]);
 
 				if (locals->BytePerPixelInDETC[k] == 0) {
 					locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
 							/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
-								dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]);
+								dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
 				} else {
 					locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
 						locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
 						/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
-						dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]),
+						dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
 							locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
 							locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
-							dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]));
+							dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
 				}
 			}
 		}
@@ -4405,14 +4412,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
 				locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
 				locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
-				mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
-						mode_lib->vba.ProjectedDCFCLKDeepSleep,
+				mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 						mode_lib->vba.PixelClock[k] / 16.0);
 				if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
 					if (mode_lib->vba.VRatio[k] <= 1.0) {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETY[k],
@@ -4422,9 +4429,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 												* mode_lib->vba.PixelClock[k]
 												/ mode_lib->vba.NoOfDPP[i][j][k]);
 					} else {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETY[k],
@@ -4435,9 +4442,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					}
 				} else {
 					if (mode_lib->vba.VRatio[k] <= 1.0) {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETY[k],
@@ -4447,9 +4454,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 												* mode_lib->vba.PixelClock[k]
 												/ mode_lib->vba.NoOfDPP[i][j][k]);
 					} else {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETY[k],
@@ -4459,9 +4466,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 												* mode_lib->vba.RequiredDPPCLK[i][j][k]);
 					}
 					if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETC[k],
@@ -4472,9 +4479,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 												* mode_lib->vba.PixelClock[k]
 												/ mode_lib->vba.NoOfDPP[i][j][k]);
 					} else {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETC[k],
@@ -4510,7 +4517,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
 						&mode_lib->vba.dpte_row_height[k],
 						&mode_lib->vba.meta_row_height[k]);
-				mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+				mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
 						mode_lib,
 						mode_lib->vba.VRatio[k],
 						mode_lib->vba.vtaps[k],
@@ -4549,7 +4556,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 							&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
 							&mode_lib->vba.dpte_row_height_chroma[k],
 							&mode_lib->vba.meta_row_height_chroma[k]);
-					mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+					mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
 							mode_lib,
 							mode_lib->vba.VRatio[k] / 2.0,
 							mode_lib->vba.VTAPsChroma[k],
@@ -4563,14 +4570,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
 					mode_lib->vba.MetaRowBytesC = 0.0;
 					mode_lib->vba.DPTEBytesPerRowC = 0.0;
-					locals->PrefetchLinesC[k] = 0.0;
+					locals->PrefetchLinesC[0][0][k] = 0.0;
 					locals->PTEBufferSizeNotExceededC[i][j][k] = true;
 					locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
 				}
-				locals->PDEAndMetaPTEBytesPerFrame[k] =
+				locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
 						mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
-				locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
-				locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+				locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+				locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
 
 				CalculateActiveRowBandwidth(
 						mode_lib->vba.GPUVMEnable,
@@ -4597,14 +4604,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 									+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
 											* mode_lib->vba.MetaChunkSize)
 									* 1024.0
-									/ mode_lib->vba.ReturnBWPerState[i];
+									/ mode_lib->vba.ReturnBWPerState[i][0];
 			if (mode_lib->vba.GPUVMEnable == true) {
 				mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
 						+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
 								* mode_lib->vba.PTEGroupSize
-								/ mode_lib->vba.ReturnBWPerState[i];
+								/ mode_lib->vba.ReturnBWPerState[i][0];
 			}
-			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
 
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 				if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -4654,7 +4661,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 			}
 
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-				locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+				locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
 					- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
 			}
 
@@ -4699,7 +4706,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 									mode_lib->vba.RequiredDPPCLK[i][j][k],
 									mode_lib->vba.RequiredDISPCLK[i][j],
 									mode_lib->vba.PixelClock[k],
-									mode_lib->vba.ProjectedDCFCLKDeepSleep,
+									mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 									mode_lib->vba.DSCDelayPerState[i][k],
 									mode_lib->vba.NoOfDPP[i][j][k],
 									mode_lib->vba.ScalerEnabled[k],
@@ -4717,7 +4724,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 											- mode_lib->vba.VActive[k],
 									mode_lib->vba.HTotal[k],
 									mode_lib->vba.MaxInterDCNTileRepeaters,
-									mode_lib->vba.MaximumVStartup[k],
+									mode_lib->vba.MaximumVStartup[0][0][k],
 									mode_lib->vba.GPUVMMaxPageTableLevels,
 									mode_lib->vba.GPUVMEnable,
 									mode_lib->vba.DynamicMetadataEnable[k],
@@ -4727,15 +4734,15 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 									mode_lib->vba.UrgentLatencyPixelDataOnly,
 									mode_lib->vba.ExtraLatency,
 									mode_lib->vba.TimeCalc,
-									mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
-									mode_lib->vba.MetaRowBytes[k],
-									mode_lib->vba.DPTEBytesPerRow[k],
-									mode_lib->vba.PrefetchLinesY[k],
+									mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+									mode_lib->vba.MetaRowBytes[0][0][k],
+									mode_lib->vba.DPTEBytesPerRow[0][0][k],
+									mode_lib->vba.PrefetchLinesY[0][0][k],
 									mode_lib->vba.SwathWidthYPerState[i][j][k],
 									mode_lib->vba.BytePerPixelInDETY[k],
 									mode_lib->vba.PrefillY[k],
 									mode_lib->vba.MaxNumSwY[k],
-									mode_lib->vba.PrefetchLinesC[k],
+									mode_lib->vba.PrefetchLinesC[0][0][k],
 									mode_lib->vba.BytePerPixelInDETC[k],
 									mode_lib->vba.PrefillC[k],
 									mode_lib->vba.MaxNumSwC[k],
@@ -4766,19 +4773,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				locals->prefetch_vm_bw_valid = true;
 				locals->prefetch_row_bw_valid = true;
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-					if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0)
+					if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
 						locals->prefetch_vm_bw[k] = 0;
 					else if (locals->LinesForMetaPTE[k] > 0)
-						locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k]
+						locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
 							/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
 					else {
 						locals->prefetch_vm_bw[k] = 0;
 						locals->prefetch_vm_bw_valid = false;
 					}
-					if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0)
+					if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
 						locals->prefetch_row_bw[k] = 0;
 					else if (locals->LinesForMetaAndDPTERow[k] > 0)
-						locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k])
+						locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
 							/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
 					else {
 						locals->prefetch_row_bw[k] = 0;
@@ -4797,13 +4804,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 											mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
 											+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
 				}
-				locals->BandwidthWithoutPrefetchSupported[i] = true;
-				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) {
-					locals->BandwidthWithoutPrefetchSupported[i] = false;
+				locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
+					locals->BandwidthWithoutPrefetchSupported[i][0] = false;
 				}
 
 				locals->PrefetchSupported[i][j] = true;
-				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) {
+				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
 					locals->PrefetchSupported[i][j] = false;
 				}
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4828,7 +4835,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 			if (mode_lib->vba.PrefetchSupported[i][j] == true
 					&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
 				mode_lib->vba.BandwidthAvailableForImmediateFlip =
-						mode_lib->vba.ReturnBWPerState[i];
+						mode_lib->vba.ReturnBWPerState[i][0];
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 					mode_lib->vba.BandwidthAvailableForImmediateFlip =
 							mode_lib->vba.BandwidthAvailableForImmediateFlip
@@ -4842,9 +4849,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
 							&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
 						mode_lib->vba.ImmediateFlipBytes[k] =
-								mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
-										+ mode_lib->vba.MetaRowBytes[k]
-										+ mode_lib->vba.DPTEBytesPerRow[k];
+								mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+										+ mode_lib->vba.MetaRowBytes[0][0][k]
+										+ mode_lib->vba.DPTEBytesPerRow[0][0][k];
 					}
 				}
 				mode_lib->vba.TotImmediateFlipBytes = 0.0;
@@ -4872,9 +4879,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 									/ mode_lib->vba.PixelClock[k],
 							mode_lib->vba.VRatio[k],
 							mode_lib->vba.Tno_bw[k],
-							mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
-							mode_lib->vba.MetaRowBytes[k],
-							mode_lib->vba.DPTEBytesPerRow[k],
+							mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+							mode_lib->vba.MetaRowBytes[0][0][k],
+							mode_lib->vba.DPTEBytesPerRow[0][0][k],
 							mode_lib->vba.DCCEnable[k],
 							mode_lib->vba.dpte_row_height[k],
 							mode_lib->vba.meta_row_height[k],
@@ -4899,7 +4906,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				}
 				mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
 				if (mode_lib->vba.total_dcn_read_bw_with_flip
-						> mode_lib->vba.ReturnBWPerState[i]) {
+						> mode_lib->vba.ReturnBWPerState[i][0]) {
 					mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
 				}
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4918,13 +4925,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++)
 		mode_lib->vba.MaxTotalVActiveRDBandwidth = mode_lib->vba.MaxTotalVActiveRDBandwidth + mode_lib->vba.ReadBandwidth[k];
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
-		mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth *
+		mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
 				mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
 				mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
-		if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i])
-			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true;
+		if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
+			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
 		else
-			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false;
+			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
 	}
 
 	/*PTE Buffer Size Check*/
@@ -5012,7 +5019,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				status = DML_FAIL_SCALE_RATIO_TAP;
 			} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
 				status = DML_FAIL_SOURCE_PIXEL_FORMAT;
-			} else if (locals->ViewportSizeSupport[i] != true) {
+			} else if (locals->ViewportSizeSupport[i][0] != true) {
 				status = DML_FAIL_VIEWPORT_SIZE;
 			} else if (locals->DIOSupport[i] != true) {
 				status = DML_FAIL_DIO_SUPPORT;
@@ -5022,7 +5029,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				status = DML_FAIL_DSC_CLK_REQUIRED;
 			} else if (locals->UrgentLatencySupport[i][j] != true) {
 				status = DML_FAIL_URGENT_LATENCY;
-			} else if (locals->ROBSupport[i] != true) {
+			} else if (locals->ROBSupport[i][0] != true) {
 				status = DML_FAIL_REORDERING_BUFFER;
 			} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
 				status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5042,7 +5049,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				status = DML_FAIL_PITCH_SUPPORT;
 			} else if (locals->PrefetchSupported[i][j] != true) {
 				status = DML_FAIL_PREFETCH_SUPPORT;
-			} else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+			} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
 				status = DML_FAIL_TOTAL_V_ACTIVE_BW;
 			} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
 				status = DML_FAIL_V_RATIO_PREFETCH;
@@ -5088,7 +5095,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
 	mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
 	mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
-	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
 	mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
 	for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 		if (mode_lib->vba.BlendingAndTiming[k] == k) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 3c70dd5..485a9c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -39,6 +39,7 @@
 #define BPP_INVALID 0
 #define BPP_BLENDED_PIPE 0xffffffff
 #define DCN20_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN20_MAX_420_IMAGE_WIDTH 4096
 
 static double adjust_ReturnBW(
 		struct display_mode_lib *mode_lib,
@@ -997,7 +998,7 @@ static unsigned int CalculateVMAndRowBytes(
 		*MetaRowByte = 0;
 	}
 
-	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
 		MacroTileSizeBytes = 256;
 		MacroTileHeight = BlockHeight256Bytes;
 	} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1395,11 +1396,11 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
 		else
 			mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
 
-		if (mode_lib->vba.ODMCombineEnabled[k] == true)
+		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
 			MainPlaneDoesODMCombine = true;
 		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
 			if (mode_lib->vba.BlendingAndTiming[k] == j
-					&& mode_lib->vba.ODMCombineEnabled[j] == true)
+					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
 				MainPlaneDoesODMCombine = true;
 
 		if (MainPlaneDoesODMCombine == true)
@@ -2611,9 +2612,13 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
 			mode_lib->vba.MinActiveDRAMClockChangeMargin
 					+ mode_lib->vba.DRAMClockChangeLatency;
 
-	if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+	if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
+			mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
 		mode_lib->vba.DRAMClockChangeWatermark += 25;
 		mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
+	} else if (mode_lib->vba.DummyPStateCheck &&
+			mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+		mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
 	} else {
 		if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
 			mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
@@ -2881,12 +2886,12 @@ static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
 			SwathWidth = mode_lib->vba.ViewportHeight[k];
 		}
 
-		if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
 			MainPlaneDoesODMCombine = true;
 		}
 		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
 			if (mode_lib->vba.BlendingAndTiming[k] == j
-					&& mode_lib->vba.ODMCombineEnabled[j] == true) {
+					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
 				MainPlaneDoesODMCombine = true;
 			}
 		}
@@ -3381,7 +3386,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 										== dm_420_10))
 				|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
 						|| mode_lib->vba.SurfaceTiling[k]
-								== dm_sw_gfx7_2d_thin_lvp)
+								== dm_sw_gfx7_2d_thin_l_vp)
 						&& !((mode_lib->vba.SourcePixelFormat[k]
 								== dm_444_64
 								|| mode_lib->vba.SourcePixelFormat[k]
@@ -3479,10 +3484,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				locals->FabricAndDRAMBandwidthPerState[i] * 1000)
 				* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
 
-		locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState;
+		locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
 
 		if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
-			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
 					locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
 					((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
 					/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3493,7 +3498,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
 
 		if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
-			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
 				4 * locals->ReturnBWToDCNPerState *
 				(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
 				* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3505,7 +3510,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
 
 		if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
-			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
 					locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
 					((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
 					/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3516,7 +3521,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
 
 		if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
-			locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+			locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
 				4 * locals->ReturnBWToDCNPerState *
 				(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
 				* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3554,12 +3559,12 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
 		locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
 				(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
-				+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
-		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+				+ locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
 				> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
-			locals->ROBSupport[i] = true;
+			locals->ROBSupport[i][0] = true;
 		} else {
-			locals->ROBSupport[i] = false;
+			locals->ROBSupport[i][0] = false;
 		}
 	}
 	/*Writeback Mode Support Check*/
@@ -3931,18 +3936,25 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						&& i == mode_lib->vba.soc.num_states)
 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
-				if (mode_lib->vba.ODMCapability == false ||
-						(locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
-							&& (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) {
-					locals->ODMCombineEnablePerState[i][k] = false;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
-				} else {
-					locals->ODMCombineEnablePerState[i][k] = true;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+				locals->ODMCombineEnablePerState[i][k] = false;
+				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+				if (mode_lib->vba.ODMCapability) {
+					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					}
 				}
+
 				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
 						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
-						&& locals->ODMCombineEnablePerState[i][k] == false) {
+						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
 					locals->NoOfDPP[i][j][k] = 1;
 					locals->RequiredDPPCLK[i][j][k] =
 						locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -4031,16 +4043,16 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 	/*Viewport Size Check*/
 
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
-		locals->ViewportSizeSupport[i] = true;
+		locals->ViewportSizeSupport[i][0] = true;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-			if (locals->ODMCombineEnablePerState[i][k] == true) {
+			if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 				if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
 						> locals->MaximumSwathWidth[k]) {
-					locals->ViewportSizeSupport[i] = false;
+					locals->ViewportSizeSupport[i][0] = false;
 				}
 			} else {
 				if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
-					locals->ViewportSizeSupport[i] = false;
+					locals->ViewportSizeSupport[i][0] = false;
 				}
 			}
 		}
@@ -4222,8 +4234,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						mode_lib->vba.DSCFormatFactor = 1;
 					}
 					if (locals->RequiresDSC[i][k] == true) {
-						if (locals->ODMCombineEnablePerState[i][k]
-								== true) {
+						if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 							if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
 									> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
 								locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4246,7 +4257,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 		mode_lib->vba.TotalDSCUnitsRequired = 0.0;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 			if (locals->RequiresDSC[i][k] == true) {
-				if (locals->ODMCombineEnablePerState[i][k] == true) {
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 					mode_lib->vba.TotalDSCUnitsRequired =
 							mode_lib->vba.TotalDSCUnitsRequired + 2.0;
 				} else {
@@ -4288,7 +4299,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
 			}
 			if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
-				if (locals->ODMCombineEnablePerState[i][k] == false) {
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
 					locals->DSCDelayPerState[i][k] =
 							dscceComputeDelay(
 									mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4331,7 +4342,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
 		for (j = 0; j < 2; j++) {
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-				if (locals->ODMCombineEnablePerState[i][k] == true)
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
 					locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
 				else
 					locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
@@ -4384,28 +4395,28 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 
 				locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma +  dml_min(
 						locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
-						locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i],
+						locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
 						locals->EffectiveLBLatencyHidingSourceLinesLuma),
 						locals->SwathHeightYPerState[i][j][k]);
 
 				locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
 						locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
-						locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i],
+						locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
 						locals->EffectiveLBLatencyHidingSourceLinesChroma),
 						locals->SwathHeightCPerState[i][j][k]);
 
 				if (locals->BytePerPixelInDETC[k] == 0) {
 					locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
 							/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
-								dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]);
+								dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
 				} else {
 					locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
 						locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
 						/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
-						dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]),
+						dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
 							locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
 							locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
-							dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]));
+							dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
 				}
 			}
 		}
@@ -4450,14 +4461,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
 				locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
 				locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
-				mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
-						mode_lib->vba.ProjectedDCFCLKDeepSleep,
+				mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 						mode_lib->vba.PixelClock[k] / 16.0);
 				if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
 					if (mode_lib->vba.VRatio[k] <= 1.0) {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETY[k],
@@ -4467,9 +4478,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 												* mode_lib->vba.PixelClock[k]
 												/ mode_lib->vba.NoOfDPP[i][j][k]);
 					} else {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETY[k],
@@ -4480,9 +4491,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 					}
 				} else {
 					if (mode_lib->vba.VRatio[k] <= 1.0) {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETY[k],
@@ -4492,9 +4503,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 												* mode_lib->vba.PixelClock[k]
 												/ mode_lib->vba.NoOfDPP[i][j][k]);
 					} else {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETY[k],
@@ -4504,9 +4515,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 												* mode_lib->vba.RequiredDPPCLK[i][j][k]);
 					}
 					if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETC[k],
@@ -4517,9 +4528,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 												* mode_lib->vba.PixelClock[k]
 												/ mode_lib->vba.NoOfDPP[i][j][k]);
 					} else {
-						mode_lib->vba.ProjectedDCFCLKDeepSleep =
+						mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
 								dml_max(
-										mode_lib->vba.ProjectedDCFCLKDeepSleep,
+										mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 										1.1
 												* dml_ceil(
 														mode_lib->vba.BytePerPixelInDETC[k],
@@ -4555,7 +4566,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
 						&mode_lib->vba.dpte_row_height[k],
 						&mode_lib->vba.meta_row_height[k]);
-				mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+				mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
 						mode_lib,
 						mode_lib->vba.VRatio[k],
 						mode_lib->vba.vtaps[k],
@@ -4594,7 +4605,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 							&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
 							&mode_lib->vba.dpte_row_height_chroma[k],
 							&mode_lib->vba.meta_row_height_chroma[k]);
-					mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+					mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
 							mode_lib,
 							mode_lib->vba.VRatio[k] / 2.0,
 							mode_lib->vba.VTAPsChroma[k],
@@ -4608,14 +4619,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 					mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
 					mode_lib->vba.MetaRowBytesC = 0.0;
 					mode_lib->vba.DPTEBytesPerRowC = 0.0;
-					locals->PrefetchLinesC[k] = 0.0;
+					locals->PrefetchLinesC[0][0][k] = 0.0;
 					locals->PTEBufferSizeNotExceededC[i][j][k] = true;
 					locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
 				}
-				locals->PDEAndMetaPTEBytesPerFrame[k] =
+				locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
 						mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
-				locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
-				locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+				locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+				locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
 
 				CalculateActiveRowBandwidth(
 						mode_lib->vba.GPUVMEnable,
@@ -4642,14 +4653,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 									+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
 											* mode_lib->vba.MetaChunkSize)
 									* 1024.0
-									/ mode_lib->vba.ReturnBWPerState[i];
+									/ mode_lib->vba.ReturnBWPerState[i][0];
 			if (mode_lib->vba.GPUVMEnable == true) {
 				mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
 						+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
 								* mode_lib->vba.PTEGroupSize
-								/ mode_lib->vba.ReturnBWPerState[i];
+								/ mode_lib->vba.ReturnBWPerState[i][0];
 			}
-			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
 
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 				if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -4699,7 +4710,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 			}
 
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-				locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+				locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
 					- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
 			}
 
@@ -4739,7 +4750,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 						mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
 					}
 
-					CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,
+					CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,
 						mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k],
 						mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k],
 						mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal,
@@ -4753,14 +4764,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 									mode_lib->vba.RequiredDPPCLK[i][j][k],
 									mode_lib->vba.RequiredDISPCLK[i][j],
 									mode_lib->vba.PixelClock[k],
-									mode_lib->vba.ProjectedDCFCLKDeepSleep,
+									mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 									mode_lib->vba.NoOfDPP[i][j][k],
 									mode_lib->vba.NumberOfCursors[k],
 									mode_lib->vba.VTotal[k]
 											- mode_lib->vba.VActive[k],
 									mode_lib->vba.HTotal[k],
 									mode_lib->vba.MaxInterDCNTileRepeaters,
-									mode_lib->vba.MaximumVStartup[k],
+									mode_lib->vba.MaximumVStartup[0][0][k],
 									mode_lib->vba.GPUVMMaxPageTableLevels,
 									mode_lib->vba.GPUVMEnable,
 									mode_lib->vba.DynamicMetadataEnable[k],
@@ -4770,15 +4781,15 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 									mode_lib->vba.UrgentLatencyPixelDataOnly,
 									mode_lib->vba.ExtraLatency,
 									mode_lib->vba.TimeCalc,
-									mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
-									mode_lib->vba.MetaRowBytes[k],
-									mode_lib->vba.DPTEBytesPerRow[k],
-									mode_lib->vba.PrefetchLinesY[k],
+									mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+									mode_lib->vba.MetaRowBytes[0][0][k],
+									mode_lib->vba.DPTEBytesPerRow[0][0][k],
+									mode_lib->vba.PrefetchLinesY[0][0][k],
 									mode_lib->vba.SwathWidthYPerState[i][j][k],
 									mode_lib->vba.BytePerPixelInDETY[k],
 									mode_lib->vba.PrefillY[k],
 									mode_lib->vba.MaxNumSwY[k],
-									mode_lib->vba.PrefetchLinesC[k],
+									mode_lib->vba.PrefetchLinesC[0][0][k],
 									mode_lib->vba.BytePerPixelInDETC[k],
 									mode_lib->vba.PrefillC[k],
 									mode_lib->vba.MaxNumSwC[k],
@@ -4808,19 +4819,19 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				locals->prefetch_vm_bw_valid = true;
 				locals->prefetch_row_bw_valid = true;
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-					if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0)
+					if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
 						locals->prefetch_vm_bw[k] = 0;
 					else if (locals->LinesForMetaPTE[k] > 0)
-						locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k]
+						locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
 							/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
 					else {
 						locals->prefetch_vm_bw[k] = 0;
 						locals->prefetch_vm_bw_valid = false;
 					}
-					if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0)
+					if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
 						locals->prefetch_row_bw[k] = 0;
 					else if (locals->LinesForMetaAndDPTERow[k] > 0)
-						locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k])
+						locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
 							/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
 					else {
 						locals->prefetch_row_bw[k] = 0;
@@ -4839,13 +4850,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 											mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
 											+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
 				}
-				locals->BandwidthWithoutPrefetchSupported[i] = true;
-				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) {
-					locals->BandwidthWithoutPrefetchSupported[i] = false;
+				locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
+					locals->BandwidthWithoutPrefetchSupported[i][0] = false;
 				}
 
 				locals->PrefetchSupported[i][j] = true;
-				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) {
+				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
 					locals->PrefetchSupported[i][j] = false;
 				}
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4870,7 +4881,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 			if (mode_lib->vba.PrefetchSupported[i][j] == true
 					&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
 				mode_lib->vba.BandwidthAvailableForImmediateFlip =
-						mode_lib->vba.ReturnBWPerState[i];
+						mode_lib->vba.ReturnBWPerState[i][0];
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 					mode_lib->vba.BandwidthAvailableForImmediateFlip =
 							mode_lib->vba.BandwidthAvailableForImmediateFlip
@@ -4884,9 +4895,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 					if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
 							&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
 						mode_lib->vba.ImmediateFlipBytes[k] =
-								mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
-										+ mode_lib->vba.MetaRowBytes[k]
-										+ mode_lib->vba.DPTEBytesPerRow[k];
+								mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+										+ mode_lib->vba.MetaRowBytes[0][0][k]
+										+ mode_lib->vba.DPTEBytesPerRow[0][0][k];
 					}
 				}
 				mode_lib->vba.TotImmediateFlipBytes = 0.0;
@@ -4914,9 +4925,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 									/ mode_lib->vba.PixelClock[k],
 							mode_lib->vba.VRatio[k],
 							mode_lib->vba.Tno_bw[k],
-							mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
-							mode_lib->vba.MetaRowBytes[k],
-							mode_lib->vba.DPTEBytesPerRow[k],
+							mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+							mode_lib->vba.MetaRowBytes[0][0][k],
+							mode_lib->vba.DPTEBytesPerRow[0][0][k],
 							mode_lib->vba.DCCEnable[k],
 							mode_lib->vba.dpte_row_height[k],
 							mode_lib->vba.meta_row_height[k],
@@ -4941,7 +4952,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				}
 				mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
 				if (mode_lib->vba.total_dcn_read_bw_with_flip
-						> mode_lib->vba.ReturnBWPerState[i]) {
+						> mode_lib->vba.ReturnBWPerState[i][0]) {
 					mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
 				}
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4957,13 +4968,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 
 	/*Vertical Active BW support*/
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
-		mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth *
+		mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
 				mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
 				mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
-		if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i])
-			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true;
+		if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
+			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
 		else
-			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false;
+			mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
 	}
 
 	/*PTE Buffer Size Check*/
@@ -5051,7 +5062,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				status = DML_FAIL_SCALE_RATIO_TAP;
 			} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
 				status = DML_FAIL_SOURCE_PIXEL_FORMAT;
-			} else if (locals->ViewportSizeSupport[i] != true) {
+			} else if (locals->ViewportSizeSupport[i][0] != true) {
 				status = DML_FAIL_VIEWPORT_SIZE;
 			} else if (locals->DIOSupport[i] != true) {
 				status = DML_FAIL_DIO_SUPPORT;
@@ -5061,7 +5072,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				status = DML_FAIL_DSC_CLK_REQUIRED;
 			} else if (locals->UrgentLatencySupport[i][j] != true) {
 				status = DML_FAIL_URGENT_LATENCY;
-			} else if (locals->ROBSupport[i] != true) {
+			} else if (locals->ROBSupport[i][0] != true) {
 				status = DML_FAIL_REORDERING_BUFFER;
 			} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
 				status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5081,7 +5092,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 				status = DML_FAIL_PITCH_SUPPORT;
 			} else if (locals->PrefetchSupported[i][j] != true) {
 				status = DML_FAIL_PREFETCH_SUPPORT;
-			} else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+			} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
 				status = DML_FAIL_TOTAL_V_ACTIVE_BW;
 			} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
 				status = DML_FAIL_V_RATIO_PREFETCH;
@@ -5127,7 +5138,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
 	mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
 	mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
 	mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
-	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
 	mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
 	for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 		if (mode_lib->vba.BlendingAndTiming[k] == k) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 2c7455e..ca80784 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
 
 static bool is_dual_plane(enum source_format_class source_format)
 {
-	bool ret_val = 0;
+	bool ret_val = false;
 
 	if ((source_format == dm_420_8) || (source_format == dm_420_10))
-		ret_val = 1;
+		ret_val = true;
 
 	return ret_val;
 }
@@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
 	unsigned int swath_bytes_c = 0;
 	unsigned int full_swath_bytes_packed_l = 0;
 	unsigned int full_swath_bytes_packed_c = 0;
-	bool req128_l = 0;
-	bool req128_c = 0;
+	bool req128_l = false;
+	bool req128_c = false;
 	bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
 	bool surf_vert = (pipe_src_param.source_scan == dm_vert);
 	unsigned int log2_swath_height_l = 0;
@@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
 		total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
 
 		if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
-			req128_l = 0;
-			req128_c = 0;
+			req128_l = false;
+			req128_c = false;
 			swath_bytes_l = full_swath_bytes_packed_l;
 			swath_bytes_c = full_swath_bytes_packed_c;
 		} else { //128b request (for luma only for yuv420 8bpc)
-			req128_l = 1;
-			req128_c = 0;
+			req128_l = true;
+			req128_c = false;
 			swath_bytes_l = full_swath_bytes_packed_l / 2;
 			swath_bytes_c = full_swath_bytes_packed_c;
 		}
@@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
 		total_swath_bytes = 2 * full_swath_bytes_packed_l;
 
 		if (total_swath_bytes <= detile_buf_size_in_bytes)
-			req128_l = 0;
+			req128_l = false;
 		else
-			req128_l = 1;
+			req128_l = true;
 
 		swath_bytes_l = total_swath_bytes;
 		swath_bytes_c = 0;
@@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
 		const display_pipe_source_params_st pipe_src_param,
 		bool is_chroma)
 {
-	bool mode_422 = 0;
+	bool mode_422 = false;
 	unsigned int vp_width = 0;
 	unsigned int vp_height = 0;
 	unsigned int data_pitch = 0;
@@ -929,8 +929,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
 	min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
 	dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
 
-	disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
-			+ min_dst_y_ttu_vblank) * dml_pow(2, 2));
+	disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) dlg_vblank_start * dml_pow(2, 2));
 	ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
 
 	dml_print("DML_DLG: %s: min_dcfclk_mhz                         = %3.2f\n",
@@ -959,7 +958,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
 	// Source
 //             dcc_en              = src.dcc;
 	dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
-	mode_422 = 0; // TODO
+	mode_422 = false; // TODO
 	access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
 //      bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
 //      bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 1e6aeb1..287b7a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
 
 static bool is_dual_plane(enum source_format_class source_format)
 {
-	bool ret_val = 0;
+	bool ret_val = false;
 
 	if ((source_format == dm_420_8) || (source_format == dm_420_10))
-		ret_val = 1;
+		ret_val = true;
 
 	return ret_val;
 }
@@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
 	unsigned int swath_bytes_c = 0;
 	unsigned int full_swath_bytes_packed_l = 0;
 	unsigned int full_swath_bytes_packed_c = 0;
-	bool req128_l = 0;
-	bool req128_c = 0;
+	bool req128_l = false;
+	bool req128_c = false;
 	bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
 	bool surf_vert = (pipe_src_param.source_scan == dm_vert);
 	unsigned int log2_swath_height_l = 0;
@@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
 		total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
 
 		if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
-			req128_l = 0;
-			req128_c = 0;
+			req128_l = false;
+			req128_c = false;
 			swath_bytes_l = full_swath_bytes_packed_l;
 			swath_bytes_c = full_swath_bytes_packed_c;
 		} else { //128b request (for luma only for yuv420 8bpc)
-			req128_l = 1;
-			req128_c = 0;
+			req128_l = true;
+			req128_c = false;
 			swath_bytes_l = full_swath_bytes_packed_l / 2;
 			swath_bytes_c = full_swath_bytes_packed_c;
 		}
@@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
 		total_swath_bytes = 2 * full_swath_bytes_packed_l;
 
 		if (total_swath_bytes <= detile_buf_size_in_bytes)
-			req128_l = 0;
+			req128_l = false;
 		else
-			req128_l = 1;
+			req128_l = true;
 
 		swath_bytes_l = total_swath_bytes;
 		swath_bytes_c = 0;
@@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
 		const display_pipe_source_params_st pipe_src_param,
 		bool is_chroma)
 {
-	bool mode_422 = 0;
+	bool mode_422 = false;
 	unsigned int vp_width = 0;
 	unsigned int vp_height = 0;
 	unsigned int data_pitch = 0;
@@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
 	// Source
 //             dcc_en              = src.dcc;
 	dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
-	mode_422 = 0; // TODO
+	mode_422 = false; // TODO
 	access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
 //      bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
 //      bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index ba77957..e6617c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -23,7 +23,6 @@
  *
  */
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 
 #include "../display_mode_lib.h"
 #include "../dml_inline_defs.h"
@@ -66,6 +65,7 @@ typedef struct {
 #define BPP_INVALID 0
 #define BPP_BLENDED_PIPE 0xffffffff
 #define DCN21_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN21_MAX_420_IMAGE_WIDTH 4096
 
 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
 static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -198,7 +198,7 @@ static unsigned int CalculateVMAndRowBytes(
 		unsigned int *meta_row_width,
 		unsigned int *meta_row_height,
 		unsigned int *vm_group_bytes,
-		long         *dpte_group_bytes,
+		unsigned int *dpte_group_bytes,
 		unsigned int *PixelPTEReqWidth,
 		unsigned int *PixelPTEReqHeight,
 		unsigned int *PTERequestSize,
@@ -296,7 +296,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 		double UrgentOutOfOrderReturn,
 		double ReturnBW,
 		bool GPUVMEnable,
-		long dpte_group_bytes[],
+		int dpte_group_bytes[],
 		unsigned int MetaChunkSize,
 		double UrgentLatency,
 		double ExtraLatency,
@@ -310,13 +310,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 		int DPPPerPlane[],
 		bool DCCEnable[],
 		double DPPCLK[],
-		unsigned int SwathWidthSingleDPPY[],
+		double SwathWidthSingleDPPY[],
 		unsigned int SwathHeightY[],
 		double ReadBandwidthPlaneLuma[],
 		unsigned int SwathHeightC[],
 		double ReadBandwidthPlaneChroma[],
 		unsigned int LBBitPerPixel[],
-		unsigned int SwathWidthY[],
+		double SwathWidthY[],
 		double HRatio[],
 		unsigned int vtaps[],
 		unsigned int VTAPsChroma[],
@@ -345,7 +345,7 @@ static void CalculateDCFCLKDeepSleep(
 		double BytePerPixelDETY[],
 		double BytePerPixelDETC[],
 		double VRatio[],
-		unsigned int SwathWidthY[],
+		double SwathWidthY[],
 		int DPPPerPlane[],
 		double HRatio[],
 		double PixelClock[],
@@ -436,7 +436,7 @@ static void CalculateMetaAndPTETimes(
 		unsigned int           meta_row_height[],
 		unsigned int           meta_req_width[],
 		unsigned int           meta_req_height[],
-		long                   dpte_group_bytes[],
+		int                   dpte_group_bytes[],
 		unsigned int           PTERequestSizeY[],
 		unsigned int           PTERequestSizeC[],
 		unsigned int           PixelPTEReqWidthY[],
@@ -478,7 +478,7 @@ static double CalculateExtraLatency(
 		bool HostVMEnable,
 		int NumberOfActivePlanes,
 		int NumberOfDPP[],
-		long dpte_group_bytes[],
+		int dpte_group_bytes[],
 		double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
 		double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
 		int HostVMMaxPageTableLevels,
@@ -1281,7 +1281,7 @@ static unsigned int CalculateVMAndRowBytes(
 		unsigned int *meta_row_width,
 		unsigned int *meta_row_height,
 		unsigned int *vm_group_bytes,
-		long         *dpte_group_bytes,
+		unsigned int *dpte_group_bytes,
 		unsigned int *PixelPTEReqWidth,
 		unsigned int *PixelPTEReqHeight,
 		unsigned int *PTERequestSize,
@@ -1339,7 +1339,7 @@ static unsigned int CalculateVMAndRowBytes(
 		*MetaRowByte = 0;
 	}
 
-	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+	if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
 		MacroTileSizeBytes = 256;
 		MacroTileHeight = BlockHeight256Bytes;
 	} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1684,11 +1684,11 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
 		else
 			locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
 
-		if (mode_lib->vba.ODMCombineEnabled[k] == true)
+		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
 			MainPlaneDoesODMCombine = true;
 		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
 			if (mode_lib->vba.BlendingAndTiming[k] == j
-					&& mode_lib->vba.ODMCombineEnabled[j] == true)
+					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
 				MainPlaneDoesODMCombine = true;
 
 		if (MainPlaneDoesODMCombine == true)
@@ -2941,12 +2941,12 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
 			SwathWidth = mode_lib->vba.ViewportHeight[k];
 		}
 
-		if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+		if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
 			MainPlaneDoesODMCombine = true;
 		}
 		for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
 			if (mode_lib->vba.BlendingAndTiming[k] == j
-					&& mode_lib->vba.ODMCombineEnabled[j] == true) {
+					&& mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
 				MainPlaneDoesODMCombine = true;
 			}
 		}
@@ -3454,7 +3454,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 										== dm_420_10))
 				|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
 						|| mode_lib->vba.SurfaceTiling[k]
-								== dm_sw_gfx7_2d_thin_lvp)
+								== dm_sw_gfx7_2d_thin_l_vp)
 						&& !((mode_lib->vba.SourcePixelFormat[k]
 								== dm_444_64
 								|| mode_lib->vba.SourcePixelFormat[k]
@@ -3543,17 +3543,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 		}
 	}
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
-		locals->IdealSDPPortBandwidthPerState[i] = dml_min3(
+		locals->IdealSDPPortBandwidthPerState[i][0] = dml_min3(
 				mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
 				mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
 						* mode_lib->vba.DRAMChannelWidth,
 				mode_lib->vba.FabricClockPerState[i]
 						* mode_lib->vba.FabricDatapathToDCNDataReturn);
 		if (mode_lib->vba.HostVMEnable == false) {
-			locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i]
+			locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
 					* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100.0;
 		} else {
-			locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i]
+			locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
 					* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0;
 		}
 	}
@@ -3590,12 +3590,12 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				+ dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
 						mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
 						mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly)
-					* mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
-		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+					* mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+		if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
 				> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
-			locals->ROBSupport[i] = true;
+			locals->ROBSupport[i][0] = true;
 		} else {
-			locals->ROBSupport[i] = false;
+			locals->ROBSupport[i][0] = false;
 		}
 	}
 	/*Writeback Mode Support Check*/
@@ -3972,18 +3972,25 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						&& i == mode_lib->vba.soc.num_states)
 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
-				if (mode_lib->vba.ODMCapability == false ||
-						(locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
-							&& (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) {
-					locals->ODMCombineEnablePerState[i][k] = false;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
-				} else {
-					locals->ODMCombineEnablePerState[i][k] = true;
-					mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+				locals->ODMCombineEnablePerState[i][k] = false;
+				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+				if (mode_lib->vba.ODMCapability) {
+					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					} else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+						locals->ODMCombineEnablePerState[i][k] = true;
+						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+					}
 				}
+
 				if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
 						&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
-						&& locals->ODMCombineEnablePerState[i][k] == false) {
+						&& locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
 					locals->NoOfDPP[i][j][k] = 1;
 					locals->RequiredDPPCLK[i][j][k] =
 						locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -4072,16 +4079,16 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	/*Viewport Size Check*/
 
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
-		locals->ViewportSizeSupport[i] = true;
+		locals->ViewportSizeSupport[i][0] = true;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-			if (locals->ODMCombineEnablePerState[i][k] == true) {
+			if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 				if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
 						> locals->MaximumSwathWidth[k]) {
-					locals->ViewportSizeSupport[i] = false;
+					locals->ViewportSizeSupport[i][0] = false;
 				}
 			} else {
 				if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
-					locals->ViewportSizeSupport[i] = false;
+					locals->ViewportSizeSupport[i][0] = false;
 				}
 			}
 		}
@@ -4122,11 +4129,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	}
 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-			locals->RequiresDSC[i][k] = 0;
+			locals->RequiresDSC[i][k] = false;
 			locals->RequiresFEC[i][k] = 0;
 			if (mode_lib->vba.BlendingAndTiming[k] == k) {
 				if (mode_lib->vba.Output[k] == dm_hdmi) {
-					locals->RequiresDSC[i][k] = 0;
+					locals->RequiresDSC[i][k] = false;
 					locals->RequiresFEC[i][k] = 0;
 					locals->OutputBppPerState[i][k] = TruncToValidBPP(
 							dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
@@ -4270,8 +4277,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						mode_lib->vba.DSCFormatFactor = 1;
 					}
 					if (locals->RequiresDSC[i][k] == true) {
-						if (locals->ODMCombineEnablePerState[i][k]
-								== true) {
+						if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 							if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
 									> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
 								locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4294,7 +4300,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 		mode_lib->vba.TotalDSCUnitsRequired = 0.0;
 		for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 			if (locals->RequiresDSC[i][k] == true) {
-				if (locals->ODMCombineEnablePerState[i][k] == true) {
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 					mode_lib->vba.TotalDSCUnitsRequired =
 							mode_lib->vba.TotalDSCUnitsRequired + 2.0;
 				} else {
@@ -4336,7 +4342,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
 			}
 			if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
-				if (locals->ODMCombineEnablePerState[i][k] == false) {
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
 					locals->DSCDelayPerState[i][k] =
 							dscceComputeDelay(
 									mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4400,7 +4406,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 				locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];
 				locals->NoOfDPPThisState[k]        = locals->NoOfDPP[i][j][k];
-				if (locals->ODMCombineEnablePerState[i][k] == true) {
+				if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
 					locals->SwathWidthYThisState[k] =
 						dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]));
 				} else {
@@ -4452,7 +4458,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					locals->PSCL_FACTOR,
 					locals->PSCL_FACTOR_CHROMA,
 					locals->RequiredDPPCLKThisState,
-					&mode_lib->vba.ProjectedDCFCLKDeepSleep);
+					&mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]);
 
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 				if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
@@ -4497,7 +4503,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 							locals->PTERequestSizeC,
 							locals->dpde0_bytes_per_frame_ub_c,
 							locals->meta_pte_bytes_per_frame_ub_c);
-					locals->PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+					locals->PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
 							mode_lib,
 							mode_lib->vba.VRatio[k]/2,
 							mode_lib->vba.VTAPsChroma[k],
@@ -4512,7 +4518,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
 					mode_lib->vba.MetaRowBytesC = 0.0;
 					mode_lib->vba.DPTEBytesPerRowC = 0.0;
-					locals->PrefetchLinesC[k] = 0.0;
+					locals->PrefetchLinesC[0][0][k] = 0.0;
 					locals->PTEBufferSizeNotExceededC[i][j][k] = true;
 					locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
 				}
@@ -4553,7 +4559,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						locals->PTERequestSizeY,
 						locals->dpde0_bytes_per_frame_ub_l,
 						locals->meta_pte_bytes_per_frame_ub_l);
-				locals->PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+				locals->PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
 						mode_lib,
 						mode_lib->vba.VRatio[k],
 						mode_lib->vba.vtaps[k],
@@ -4563,10 +4569,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						mode_lib->vba.ViewportYStartY[k],
 						&locals->PrefillY[k],
 						&locals->MaxNumSwY[k]);
-				locals->PDEAndMetaPTEBytesPerFrame[k] =
+				locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
 						mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
-				locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
-				locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+				locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+				locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
 
 				CalculateActiveRowBandwidth(
 						mode_lib->vba.GPUVMEnable,
@@ -4592,7 +4598,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					mode_lib->vba.PixelChunkSizeInKByte,
 					locals->TotalNumberOfDCCActiveDPP[i][j],
 					mode_lib->vba.MetaChunkSize,
-					locals->ReturnBWPerState[i],
+					locals->ReturnBWPerState[i][0],
 					mode_lib->vba.GPUVMEnable,
 					mode_lib->vba.HostVMEnable,
 					mode_lib->vba.NumberOfActivePlanes,
@@ -4603,7 +4609,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					mode_lib->vba.HostVMMaxPageTableLevels,
 					mode_lib->vba.HostVMCachedPageTableLevels);
 
-			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+			mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 				if (mode_lib->vba.BlendingAndTiming[k] == k) {
 					if (mode_lib->vba.WritebackEnable[k] == true) {
@@ -4645,15 +4651,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					}
 				}
 			}
-			mode_lib->vba.MaxMaxVStartup = 0;
+			mode_lib->vba.MaxMaxVStartup[0][0] = 0;
 			for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-				locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+				locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
 					- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
-				mode_lib->vba.MaxMaxVStartup = dml_max(mode_lib->vba.MaxMaxVStartup, locals->MaximumVStartup[k]);
+				mode_lib->vba.MaxMaxVStartup[0][0] = dml_max(mode_lib->vba.MaxMaxVStartup[0][0], locals->MaximumVStartup[0][0][k]);
 			}
 
 			mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode;
-			mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup;
+			mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
 			do {
 				mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;
 				mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup;
@@ -4694,7 +4700,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
 					myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
 					myPipe.PixelClock = mode_lib->vba.PixelClock[k];
-					myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep;
+					myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
 					myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
 					myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
 					myPipe.SourceScan = mode_lib->vba.SourceScan[k];
@@ -4728,8 +4734,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 							locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
 							mode_lib->vba.OutputFormat[k],
 							mode_lib->vba.MaxInterDCNTileRepeaters,
-							dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[k]),
-							locals->MaximumVStartup[k],
+							dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
+							locals->MaximumVStartup[0][0][k],
 							mode_lib->vba.GPUVMMaxPageTableLevels,
 							mode_lib->vba.GPUVMEnable,
 							&myHostVM,
@@ -4740,15 +4746,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 							mode_lib->vba.UrgentLatency,
 							mode_lib->vba.ExtraLatency,
 							mode_lib->vba.TimeCalc,
-							locals->PDEAndMetaPTEBytesPerFrame[k],
-							locals->MetaRowBytes[k],
-							locals->DPTEBytesPerRow[k],
-							locals->PrefetchLinesY[k],
+							locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+							locals->MetaRowBytes[0][0][k],
+							locals->DPTEBytesPerRow[0][0][k],
+							locals->PrefetchLinesY[0][0][k],
 							locals->SwathWidthYThisState[k],
 							locals->BytePerPixelInDETY[k],
 							locals->PrefillY[k],
 							locals->MaxNumSwY[k],
-							locals->PrefetchLinesC[k],
+							locals->PrefetchLinesC[0][0][k],
 							locals->BytePerPixelInDETC[k],
 							locals->PrefillC[k],
 							locals->MaxNumSwC[k],
@@ -4837,14 +4843,14 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 						+ locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k]
 						+ locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
 				}
-				locals->BandwidthWithoutPrefetchSupported[i] = true;
-				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]
+				locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]
 						|| locals->NotEnoughUrgentLatencyHiding == 1) {
-					locals->BandwidthWithoutPrefetchSupported[i] = false;
+					locals->BandwidthWithoutPrefetchSupported[i][0] = false;
 				}
 
 				locals->PrefetchSupported[i][j] = true;
-				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]
+				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]
 						|| locals->NotEnoughUrgentLatencyHiding == 1
 						|| locals->NotEnoughUrgentLatencyHidingPre == 1) {
 					locals->PrefetchSupported[i][j] = false;
@@ -4873,17 +4879,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				}
 
 				if (mode_lib->vba.MaxVStartup <= 13 || mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) {
-					mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup;
+					mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
 					mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;
 				} else {
 					mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1;
 				}
 			} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
-					&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup
+					&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
 						|| mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
 
 			if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
-				mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i];
+				mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 					mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip
 						- dml_max(locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
@@ -4896,7 +4902,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				mode_lib->vba.TotImmediateFlipBytes = 0.0;
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 					mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes
-						+ locals->PDEAndMetaPTEBytesPerFrame[k] + locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k];
+						+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] + locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k];
 				}
 
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4911,9 +4917,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 							mode_lib->vba.HostVMMaxPageTableLevels,
 							mode_lib->vba.HostVMCachedPageTableLevels,
 							mode_lib->vba.GPUVMEnable,
-							locals->PDEAndMetaPTEBytesPerFrame[k],
-							locals->MetaRowBytes[k],
-							locals->DPTEBytesPerRow[k],
+							locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+							locals->MetaRowBytes[0][0][k],
+							locals->DPTEBytesPerRow[0][0][k],
 							mode_lib->vba.BandwidthAvailableForImmediateFlip,
 							mode_lib->vba.TotImmediateFlipBytes,
 							mode_lib->vba.SourcePixelFormat[k],
@@ -4944,7 +4950,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				}
 				locals->ImmediateFlipSupportedForState[i][j] = true;
 				if (mode_lib->vba.total_dcn_read_bw_with_flip
-						> locals->ReturnBWPerState[i]) {
+						> locals->ReturnBWPerState[i][0]) {
 					locals->ImmediateFlipSupportedForState[i][j] = false;
 				}
 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4971,7 +4977,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					mode_lib->vba.WritebackInterfaceChromaBufferSize,
 					mode_lib->vba.DCFCLKPerState[i],
 					mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels,
-					locals->ReturnBWPerState[i],
+					locals->ReturnBWPerState[i][0],
 					mode_lib->vba.GPUVMEnable,
 					locals->dpte_group_bytes,
 					mode_lib->vba.MetaChunkSize,
@@ -4983,7 +4989,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 					mode_lib->vba.DRAMClockChangeLatency,
 					mode_lib->vba.SRExitTime,
 					mode_lib->vba.SREnterPlusExitTime,
-					mode_lib->vba.ProjectedDCFCLKDeepSleep,
+					mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
 					locals->NoOfDPPThisState,
 					mode_lib->vba.DCCEnable,
 					locals->RequiredDPPCLKThisState,
@@ -5026,8 +5032,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k];
 		}
 		for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) {
-			locals->MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(
-				locals->IdealSDPPortBandwidthPerState[i] *
+			locals->MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(
+				locals->IdealSDPPortBandwidthPerState[i][0] *
 				mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation
 				/ 100.0, mode_lib->vba.DRAMSpeedPerState[i] *
 				mode_lib->vba.NumberOfChannels *
@@ -5035,10 +5041,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation
 				/ 100.0);
 
-			if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i]) {
-				locals->TotalVerticalActiveBandwidthSupport[i] = true;
+			if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i][0]) {
+				locals->TotalVerticalActiveBandwidthSupport[i][0] = true;
 			} else {
-				locals->TotalVerticalActiveBandwidthSupport[i] = false;
+				locals->TotalVerticalActiveBandwidthSupport[i][0] = false;
 			}
 		}
 	}
@@ -5117,7 +5123,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				status = DML_FAIL_SCALE_RATIO_TAP;
 			} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
 				status = DML_FAIL_SOURCE_PIXEL_FORMAT;
-			} else if (locals->ViewportSizeSupport[i] != true) {
+			} else if (locals->ViewportSizeSupport[i][0] != true) {
 				status = DML_FAIL_VIEWPORT_SIZE;
 			} else if (locals->DIOSupport[i] != true) {
 				status = DML_FAIL_DIO_SUPPORT;
@@ -5125,7 +5131,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				status = DML_FAIL_NOT_ENOUGH_DSC;
 			} else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
 				status = DML_FAIL_DSC_CLK_REQUIRED;
-			} else if (locals->ROBSupport[i] != true) {
+			} else if (locals->ROBSupport[i][0] != true) {
 				status = DML_FAIL_REORDERING_BUFFER;
 			} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
 				status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5143,7 +5149,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 				status = DML_FAIL_CURSOR_SUPPORT;
 			} else if (mode_lib->vba.PitchSupport != true) {
 				status = DML_FAIL_PITCH_SUPPORT;
-			} else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+			} else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
 				status = DML_FAIL_TOTAL_V_ACTIVE_BW;
 			} else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
 				status = DML_FAIL_PTE_BUFFER_SIZE;
@@ -5199,13 +5205,13 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 	mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
 	mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
 	mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
-	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+	mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
 	for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
 		if (mode_lib->vba.BlendingAndTiming[k] == k) {
 			mode_lib->vba.ODMCombineEnabled[k] =
 					locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
 		} else {
-			mode_lib->vba.ODMCombineEnabled[k] = 0;
+			mode_lib->vba.ODMCombineEnabled[k] = false;
 		}
 		mode_lib->vba.DSCEnabled[k] =
 				locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
@@ -5228,7 +5234,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 		double UrgentOutOfOrderReturn,
 		double ReturnBW,
 		bool GPUVMEnable,
-		long dpte_group_bytes[],
+		int dpte_group_bytes[],
 		unsigned int MetaChunkSize,
 		double UrgentLatency,
 		double ExtraLatency,
@@ -5242,13 +5248,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 		int DPPPerPlane[],
 		bool DCCEnable[],
 		double DPPCLK[],
-		unsigned int SwathWidthSingleDPPY[],
+		double SwathWidthSingleDPPY[],
 		unsigned int SwathHeightY[],
 		double ReadBandwidthPlaneLuma[],
 		unsigned int SwathHeightC[],
 		double ReadBandwidthPlaneChroma[],
 		unsigned int LBBitPerPixel[],
-		unsigned int SwathWidthY[],
+		double SwathWidthY[],
 		double HRatio[],
 		unsigned int vtaps[],
 		unsigned int VTAPsChroma[],
@@ -5504,7 +5510,7 @@ static void CalculateDCFCLKDeepSleep(
 		double BytePerPixelDETY[],
 		double BytePerPixelDETC[],
 		double VRatio[],
-		unsigned int SwathWidthY[],
+		double SwathWidthY[],
 		int DPPPerPlane[],
 		double HRatio[],
 		double PixelClock[],
@@ -5832,7 +5838,7 @@ static void CalculateMetaAndPTETimes(
 		unsigned int meta_row_height[],
 		unsigned int meta_req_width[],
 		unsigned int meta_req_height[],
-		long dpte_group_bytes[],
+		int dpte_group_bytes[],
 		unsigned int PTERequestSizeY[],
 		unsigned int PTERequestSizeC[],
 		unsigned int PixelPTEReqWidthY[],
@@ -6088,7 +6094,7 @@ static double CalculateExtraLatency(
 		bool HostVMEnable,
 		int NumberOfActivePlanes,
 		int NumberOfDPP[],
-		long dpte_group_bytes[],
+		int dpte_group_bytes[],
 		double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
 		double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
 		int HostVMMaxPageTableLevels,
@@ -6126,4 +6132,3 @@ static double CalculateExtraLatency(
 	return CalculateExtraLatency;
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index a1f207c..a38baa7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -23,7 +23,6 @@
  *
  */
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 
 #include "../display_mode_lib.h"
 #include "../display_mode_vba.h"
@@ -83,10 +82,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
 
 static bool is_dual_plane(enum source_format_class source_format)
 {
-	bool ret_val = 0;
+	bool ret_val = false;
 
 	if ((source_format == dm_420_8) || (source_format == dm_420_10))
-		ret_val = 1;
+		ret_val = true;
 
 	return ret_val;
 }
@@ -223,8 +222,8 @@ static void handle_det_buf_split(
 	unsigned int swath_bytes_c = 0;
 	unsigned int full_swath_bytes_packed_l = 0;
 	unsigned int full_swath_bytes_packed_c = 0;
-	bool req128_l = 0;
-	bool req128_c = 0;
+	bool req128_l = false;
+	bool req128_c = false;
 	bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
 	bool surf_vert = (pipe_src_param.source_scan == dm_vert);
 	unsigned int log2_swath_height_l = 0;
@@ -249,13 +248,13 @@ static void handle_det_buf_split(
 		total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
 
 		if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
-			req128_l = 0;
-			req128_c = 0;
+			req128_l = false;
+			req128_c = false;
 			swath_bytes_l = full_swath_bytes_packed_l;
 			swath_bytes_c = full_swath_bytes_packed_c;
 		} else { //128b request (for luma only for yuv420 8bpc)
-			req128_l = 1;
-			req128_c = 0;
+			req128_l = true;
+			req128_c = false;
 			swath_bytes_l = full_swath_bytes_packed_l / 2;
 			swath_bytes_c = full_swath_bytes_packed_c;
 		}
@@ -265,9 +264,9 @@ static void handle_det_buf_split(
 		total_swath_bytes = 2 * full_swath_bytes_packed_l;
 
 		if (total_swath_bytes <= detile_buf_size_in_bytes)
-			req128_l = 0;
+			req128_l = false;
 		else
-			req128_l = 1;
+			req128_l = true;
 
 		swath_bytes_l = total_swath_bytes;
 		swath_bytes_c = 0;
@@ -680,7 +679,7 @@ static void get_surf_rq_param(
 		const display_pipe_params_st pipe_param,
 		bool is_chroma)
 {
-	bool mode_422 = 0;
+	bool mode_422 = false;
 	unsigned int vp_width = 0;
 	unsigned int vp_height = 0;
 	unsigned int data_pitch = 0;
@@ -1011,7 +1010,7 @@ static void dml_rq_dlg_get_dlg_params(
 	// Source
 	//             dcc_en              = src.dcc;
 	dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
-	mode_422 = 0; // FIXME
+	mode_422 = false; // FIXME
 	access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
 						    //      bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
 						    //      bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1523,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params(
 
 	disp_dlg_regs->refcyc_per_vm_group_vblank   = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
 	disp_dlg_regs->refcyc_per_vm_group_flip     = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
-	disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;;
-	disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;;
+	disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+	disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
 
 	// Clamp to max for now
 	if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
@@ -1820,4 +1819,3 @@ static void calculate_ttu_cursor(
 	}
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index 1c97083..bfc2f39b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -85,7 +85,7 @@ enum dm_swizzle_mode {
 	dm_sw_var_s_x = 29,
 	dm_sw_var_d_x = 30,
 	dm_sw_64kb_r_x,
-	dm_sw_gfx7_2d_thin_lvp,
+	dm_sw_gfx7_2d_thin_l_vp,
 	dm_sw_gfx7_2d_thin_gl,
 };
 enum lb_depth {
@@ -119,6 +119,10 @@ enum mpc_combine_affinity {
 	dm_mpc_never
 };
 
+enum RequestType {
+	REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
+};
+
 enum self_refresh_affinity {
 	dm_try_to_allow_self_refresh_and_mclk_switch,
 	dm_allow_self_refresh_and_mclk_switch,
@@ -135,9 +139,7 @@ enum dm_validation_status {
 	DML_FAIL_DIO_SUPPORT,
 	DML_FAIL_NOT_ENOUGH_DSC,
 	DML_FAIL_DSC_CLK_REQUIRED,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	DML_FAIL_DSC_VALIDATION_FAILURE,
-#endif
 	DML_FAIL_URGENT_LATENCY,
 	DML_FAIL_REORDERING_BUFFER,
 	DML_FAIL_DISPCLK_DPPCLK,
@@ -167,4 +169,16 @@ enum odm_combine_mode {
 	dm_odm_combine_mode_4to1,
 };
 
+enum odm_combine_policy {
+	dm_odm_combine_policy_dal,
+	dm_odm_combine_policy_none,
+	dm_odm_combine_policy_2to1,
+	dm_odm_combine_policy_4to1,
+};
+
+enum immediate_flip_requirement {
+	dm_immediate_flip_not_required,
+	dm_immediate_flip_required,
+};
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 704efef..2689401 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -25,18 +25,13 @@
 
 #include "display_mode_lib.h"
 #include "dc_features.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "dcn20/display_mode_vba_20.h"
 #include "dcn20/display_rq_dlg_calc_20.h"
 #include "dcn20/display_mode_vba_20v2.h"
 #include "dcn20/display_rq_dlg_calc_20v2.h"
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 #include "dcn21/display_mode_vba_21.h"
 #include "dcn21/display_rq_dlg_calc_21.h"
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 const struct dml_funcs dml20_funcs = {
 	.validate = dml20_ModeSupportAndSystemConfigurationFull,
 	.recalculate = dml20_recalculate,
@@ -50,16 +45,13 @@ const struct dml_funcs dml20v2_funcs = {
 	.rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg,
 	.rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg
 };
-#endif
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 const struct dml_funcs dml21_funcs = {
         .validate = dml21_ModeSupportAndSystemConfigurationFull,
         .recalculate = dml21_recalculate,
         .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,
         .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
 };
-#endif
 
 void dml_init_instance(struct display_mode_lib *lib,
 		const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
@@ -70,19 +62,15 @@ void dml_init_instance(struct display_mode_lib *lib,
 	lib->ip = *ip_params;
 	lib->project = project;
 	switch (project) {
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	case DML_PROJECT_NAVI10:
 		lib->funcs = dml20_funcs;
 		break;
 	case DML_PROJECT_NAVI10v2:
 		lib->funcs = dml20v2_funcs;
 		break;
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
         case DML_PROJECT_DCN21:
                 lib->funcs = dml21_funcs;
                 break;
-#endif
 
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index d8c59aa..cf2758c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -27,20 +27,14 @@
 
 
 #include "dml_common_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 #include "display_mode_vba.h"
-#endif
 
 enum dml_project {
 	DML_PROJECT_UNDEFINED,
 	DML_PROJECT_RAVEN1,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	DML_PROJECT_NAVI10,
 	DML_PROJECT_NAVI10v2,
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 	DML_PROJECT_DCN21,
-#endif
 };
 
 struct display_mode_lib;
@@ -70,9 +64,7 @@ struct display_mode_lib {
 	struct _vcs_dpi_ip_params_st ip;
 	struct _vcs_dpi_soc_bounding_box_st soc;
 	enum dml_project project;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	struct vba_vars_st vba;
-#endif
 	struct dal_logger *logger;
 	struct dml_funcs funcs;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index cfacd60..658f81e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -63,6 +63,7 @@ struct _vcs_dpi_voltage_scaling_st {
 	double dispclk_mhz;
 	double phyclk_mhz;
 	double dppclk_mhz;
+	double dtbclk_mhz;
 };
 
 struct _vcs_dpi_soc_bounding_box_st {
@@ -99,6 +100,7 @@ struct _vcs_dpi_soc_bounding_box_st {
 	unsigned int num_chans;
 	unsigned int vmm_page_size_bytes;
 	unsigned int hostvm_min_page_size_bytes;
+	unsigned int gpuvm_min_page_size_bytes;
 	double dram_clock_change_latency_us;
 	double dummy_pstate_latency_us;
 	double writeback_dram_clock_change_latency_us;
@@ -112,6 +114,7 @@ struct _vcs_dpi_soc_bounding_box_st {
 	bool do_urgent_latency_adjustment;
 	double urgent_latency_adjustment_fabric_clock_component_us;
 	double urgent_latency_adjustment_fabric_clock_reference_mhz;
+	bool disable_dram_clock_change_vactive_support;
 };
 
 struct _vcs_dpi_ip_params_st {
@@ -145,7 +148,6 @@ struct _vcs_dpi_ip_params_st {
 	unsigned int writeback_interface_buffer_size_kbytes;
 	unsigned int writeback_line_buffer_buffer_size;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	unsigned int writeback_10bpc420_supported;
 	double writeback_max_hscl_ratio;
 	double writeback_max_vscl_ratio;
@@ -155,7 +157,6 @@ struct _vcs_dpi_ip_params_st {
 	unsigned int writeback_max_vscl_taps;
 	unsigned int writeback_line_buffer_luma_buffer_size;
 	unsigned int writeback_line_buffer_chroma_buffer_size;
-#endif
 
 	unsigned int max_page_table_levels;
 	unsigned int max_num_dpp;
@@ -214,6 +215,7 @@ struct _vcs_dpi_display_pipe_source_params_st {
 	int source_format;
 	unsigned char dcc;
 	unsigned int dcc_rate;
+	unsigned int dcc_rate_chroma;
 	unsigned char dcc_use_global;
 	unsigned char vm;
 	bool gpuvm;    // gpuvm enabled
@@ -225,6 +227,10 @@ struct _vcs_dpi_display_pipe_source_params_st {
 	int source_scan;
 	int sw_mode;
 	int macro_tile_size;
+	unsigned int surface_width_y;
+	unsigned int surface_height_y;
+	unsigned int surface_width_c;
+	unsigned int surface_height_c;
 	unsigned int viewport_width;
 	unsigned int viewport_height;
 	unsigned int viewport_y_y;
@@ -277,6 +283,7 @@ struct _vcs_dpi_display_output_params_st {
 	int output_type;
 	int output_format;
 	int dsc_slices;
+	int max_audio_sample_rate;
 	struct writeback_st wb;
 };
 
@@ -322,7 +329,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
 	double pixel_rate_mhz;
 	unsigned char synchronized_vblank_all_planes;
 	unsigned char otg_inst;
-	unsigned char odm_combine;
+	unsigned int odm_combine;
 	unsigned char use_maximum_vstartup;
 	unsigned int vtotal_max;
 	unsigned int vtotal_min;
@@ -401,6 +408,7 @@ struct _vcs_dpi_display_rq_misc_params_st {
 struct _vcs_dpi_display_rq_params_st {
 	unsigned char yuv420;
 	unsigned char yuv420_10bpc;
+	unsigned char rgbe_alpha;
 	display_rq_misc_params_st misc;
 	display_rq_sizing_params_st sizing;
 	display_rq_dlg_params_st dlg;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 7f9a562..b3c96d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -23,7 +23,6 @@
  *
  */
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 
 #include "display_mode_lib.h"
 #include "display_mode_vba.h"
@@ -222,13 +221,17 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
 	mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
 	mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
 	mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
+	mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
+	mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
+			mode_lib->vba.DummyPStateCheck;
+
 	mode_lib->vba.Downspreading = soc->downspread_percent;
 	mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes;   // new!
 	mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
 	mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent;   // new
 	mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz;   // new
 	mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
-	mode_lib->vba.GPUVMMinPageSize = soc->vmm_page_size_bytes / 1024;
+	mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024;
 	mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 1024;
 	// Set the voltage scaling clocks as the defaults. Most of these will
 	// be set to different values by the test
@@ -261,7 +264,10 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
 		mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mts;
 		//mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
 		mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
+		mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
 	}
+	mode_lib->vba.MinVoltageLevel = 0;
+	mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
 
 	mode_lib->vba.DoUrgentLatencyAdjustment =
 		soc->do_urgent_latency_adjustment;
@@ -303,8 +309,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
 
 	mode_lib->vba.WritebackInterfaceBufferSize = ip->writeback_interface_buffer_size_kbytes;
 	mode_lib->vba.WritebackLineBufferSize = ip->writeback_line_buffer_buffer_size;
-	mode_lib->vba.MinVoltageLevel = 0;
-	mode_lib->vba.MaxVoltageLevel = 5;
 
 	mode_lib->vba.WritebackChromaLineBufferWidth =
 			ip->writeback_chroma_line_buffer_width_pixels;
@@ -420,8 +424,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
 						ip->dcc_supported : src->dcc && ip->dcc_supported;
 		mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
 		/* TODO: Needs to be set based on src->dcc_rate_luma/chroma */
-		mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = 0;
-		mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = 0;
+		mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
+		mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma;
 
 		mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
 				(enum source_format_class) (src->source_format);
@@ -433,8 +437,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
 				dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
 		mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
 				dst->odm_combine;
-		mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] =
-				dst->odm_combine;
 		mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
 				(enum output_format_class) (dout->output_format);
 		mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
@@ -451,7 +453,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
 				dout->dp_lanes;
 		/* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */
 		mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] =
-			44.1 * 1000;
+			dout->max_audio_sample_rate;
 		mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
 			1;
 		mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
@@ -587,6 +589,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
 			for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
 				display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
 				display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest;
+				display_output_params_st *dout_k = &pipes[j].dout;
 
 				if (src_k->is_hsplit && !visited[k]
 						&& src->hsplit_grp == src_k->hsplit_grp) {
@@ -597,12 +600,18 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
 							== dm_horz) {
 						mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
 								src_k->viewport_width;
+						mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] +=
+								src_k->viewport_width;
 						mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] +=
 								dst_k->recout_width;
 					} else {
 						mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
 								src_k->viewport_height;
+						mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] +=
+								src_k->viewport_height;
 					}
+					mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
+							dout_k->dsc_slices;
 
 					visited[k] = true;
 				}
@@ -808,7 +817,9 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
 	unsigned int total_pipes = 0;
 
 	mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
-	mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel];
+	mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb];
+	if (mode_lib->vba.ReturnBW == 0)
+		mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
 	mode_lib->vba.FabricAndDRAMBandwidth = mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
 
 	fetch_socbb_params(mode_lib);
@@ -858,4 +869,3 @@ double CalculateWriteBackDISPCLK(
 	return CalculateWriteBackDISPCLK;
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 1540ffb..2875efd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -23,7 +23,6 @@
  *
  */
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 
 #ifndef __DML2_DISPLAY_MODE_VBA_H__
 #define __DML2_DISPLAY_MODE_VBA_H__
@@ -155,7 +154,10 @@ struct vba_vars_st {
 	double UrgentLatencySupportUsChroma;
 	unsigned int DSCFormatFactor;
 
+	bool DummyPStateCheck;
+	bool DRAMClockChangeSupportsVActive;
 	bool PrefetchModeSupported;
+	bool PrefetchAndImmediateFlipSupported;
 	enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only
 	double XFCRemoteSurfaceFlipDelay;
 	double TInitXFill;
@@ -317,8 +319,7 @@ struct vba_vars_st {
 	unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
 	double DCCRate[DC__NUM_DPP__MAX];
 	double AverageDCCCompressionRate;
-	bool ODMCombineEnabled[DC__NUM_DPP__MAX];
-	enum odm_combine_mode ODMCombineTypeEnabled[DC__NUM_DPP__MAX];
+	enum odm_combine_mode ODMCombineEnabled[DC__NUM_DPP__MAX];
 	double OutputBpp[DC__NUM_DPP__MAX];
 	bool DSCEnabled[DC__NUM_DPP__MAX];
 	unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
@@ -346,6 +347,7 @@ struct vba_vars_st {
 	unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
 	double BandwidthAvailableForImmediateFlip;
 	unsigned int PrefetchMode[DC__VOLTAGE_STATES + 1][2];
+	unsigned int PrefetchModePerState[DC__VOLTAGE_STATES + 1][2];
 	unsigned int MinPrefetchMode;
 	unsigned int MaxPrefetchMode;
 	bool AnyLinesForVMOrRowTooLarge;
@@ -395,6 +397,7 @@ struct vba_vars_st {
 	bool WritebackLumaAndChromaScalingSupported;
 	bool Cursor64BppSupport;
 	double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
+	double DCFCLKState[DC__VOLTAGE_STATES + 1][2];
 	double FabricClockPerState[DC__VOLTAGE_STATES + 1];
 	double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
 	double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
@@ -443,7 +446,7 @@ struct vba_vars_st {
 	double OutputLinkDPLanes[DC__NUM_DPP__MAX];
 	double ForcedOutputLinkBPP[DC__NUM_DPP__MAX]; // Mode Support only
 	double ImmediateFlipBW[DC__NUM_DPP__MAX];
-	double MaxMaxVStartup;
+	double MaxMaxVStartup[DC__VOLTAGE_STATES + 1][2];
 
 	double WritebackLumaVExtra;
 	double WritebackChromaVExtra;
@@ -470,7 +473,7 @@ struct vba_vars_st {
 	double RoundedUpMaxSwathSizeBytesC;
 	double EffectiveDETLBLinesLuma;
 	double EffectiveDETLBLinesChroma;
-	double ProjectedDCFCLKDeepSleep;
+	double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES + 1][2];
 	double PDEAndMetaPTEBytesPerFrameY;
 	double PDEAndMetaPTEBytesPerFrameC;
 	unsigned int MetaRowBytesY;
@@ -488,12 +491,11 @@ struct vba_vars_st {
 	double FractionOfUrgentBandwidthImmediateFlip; // Mode Support debugging output
 
 	/* ms locals */
-	double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1];
+	double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1][2];
 	unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
 	int NoOfDPPThisState[DC__NUM_DPP__MAX];
-	bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
-	enum odm_combine_mode ODMCombineTypeEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
-	unsigned int SwathWidthYThisState[DC__NUM_DPP__MAX];
+	enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+	double SwathWidthYThisState[DC__NUM_DPP__MAX];
 	unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
 	unsigned int SwathHeightYThisState[DC__NUM_DPP__MAX];
 	unsigned int SwathHeightCThisState[DC__NUM_DPP__MAX];
@@ -505,7 +507,7 @@ struct vba_vars_st {
 	double RequiredDPPCLKThisState[DC__NUM_DPP__MAX];
 	bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
 	bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
-	bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1];
+	bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
 	bool PrefetchSupported[DC__VOLTAGE_STATES + 1][2];
 	bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
 	double RequiredDISPCLK[DC__VOLTAGE_STATES + 1][2];
@@ -514,22 +516,22 @@ struct vba_vars_st {
 	unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1][2];
 	unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1][2];
 	bool ModeSupport[DC__VOLTAGE_STATES + 1][2];
-	double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
+	double ReturnBWPerState[DC__VOLTAGE_STATES + 1][2];
 	bool DIOSupport[DC__VOLTAGE_STATES + 1];
 	bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
 	bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
 	bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
 	double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
-	bool ROBSupport[DC__VOLTAGE_STATES + 1];
+	bool ROBSupport[DC__VOLTAGE_STATES + 1][2];
 	bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1][2];
-	bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1];
-	double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1];
+	bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1][2];
+	double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1][2];
 	double PrefetchBW[DC__NUM_DPP__MAX];
-	double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
-	double MetaRowBytes[DC__NUM_DPP__MAX];
-	double DPTEBytesPerRow[DC__NUM_DPP__MAX];
-	double PrefetchLinesY[DC__NUM_DPP__MAX];
-	double PrefetchLinesC[DC__NUM_DPP__MAX];
+	double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double MetaRowBytes[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double DPTEBytesPerRow[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double PrefetchLinesY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double PrefetchLinesC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
 	unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
 	unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
 	double PrefillY[DC__NUM_DPP__MAX];
@@ -538,7 +540,7 @@ struct vba_vars_st {
 	double LinesForMetaPTE[DC__NUM_DPP__MAX];
 	double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
 	double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
-	unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
+	double SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
 	double BytePerPixelInDETY[DC__NUM_DPP__MAX];
 	double BytePerPixelInDETC[DC__NUM_DPP__MAX];
 	bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
@@ -546,7 +548,7 @@ struct vba_vars_st {
 	double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
 	double OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
 	double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
-	bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
+	bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1][2];
 	unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
 	unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
 	unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
@@ -561,7 +563,7 @@ struct vba_vars_st {
 	double WriteBandwidth[DC__NUM_DPP__MAX];
 	double PSCL_FACTOR[DC__NUM_DPP__MAX];
 	double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
-	double MaximumVStartup[DC__NUM_DPP__MAX];
+	double MaximumVStartup[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
 	unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
 	unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
 	double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
@@ -578,7 +580,7 @@ struct vba_vars_st {
 	bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1][2];
 	double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
 	unsigned int vm_group_bytes[DC__NUM_DPP__MAX];
-	long dpte_group_bytes[DC__NUM_DPP__MAX];
+	unsigned int dpte_group_bytes[DC__NUM_DPP__MAX];
 	unsigned int dpte_row_height[DC__NUM_DPP__MAX];
 	unsigned int meta_req_height[DC__NUM_DPP__MAX];
 	unsigned int meta_req_width[DC__NUM_DPP__MAX];
@@ -604,14 +606,14 @@ struct vba_vars_st {
 	double UrgentBurstFactorChroma[DC__NUM_DPP__MAX];
 	double UrgentBurstFactorChromaPre[DC__NUM_DPP__MAX];
 
+
 	bool           MPCCombine[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
 	double         SwathWidthCSingleDPP[DC__NUM_DPP__MAX];
 	double         MaximumSwathWidthInLineBufferLuma;
 	double         MaximumSwathWidthInLineBufferChroma;
 	double         MaximumSwathWidthLuma[DC__NUM_DPP__MAX];
 	double         MaximumSwathWidthChroma[DC__NUM_DPP__MAX];
-	bool odm_combine_dummy[DC__NUM_DPP__MAX];
-	enum odm_combine_mode odm_combine_mode_dummy[DC__NUM_DPP__MAX];
+	enum odm_combine_mode odm_combine_dummy[DC__NUM_DPP__MAX];
 	double         dummy1[DC__NUM_DPP__MAX];
 	double         dummy2[DC__NUM_DPP__MAX];
 	double         dummy3[DC__NUM_DPP__MAX];
@@ -621,9 +623,9 @@ struct vba_vars_st {
 	double         dummy7[DC__NUM_DPP__MAX];
 	double         dummy8[DC__NUM_DPP__MAX];
 	unsigned int        dummyinteger1ms[DC__NUM_DPP__MAX];
-	unsigned int        dummyinteger2ms[DC__NUM_DPP__MAX];
+	double        dummyinteger2ms[DC__NUM_DPP__MAX];
 	unsigned int        dummyinteger3[DC__NUM_DPP__MAX];
-	unsigned int        dummyinteger4;
+	unsigned int        dummyinteger4[DC__NUM_DPP__MAX];
 	unsigned int        dummyinteger5;
 	unsigned int        dummyinteger6;
 	unsigned int        dummyinteger7;
@@ -636,7 +638,6 @@ struct vba_vars_st {
 	unsigned int        dummyintegerarr2[DC__NUM_DPP__MAX];
 	unsigned int        dummyintegerarr3[DC__NUM_DPP__MAX];
 	unsigned int        dummyintegerarr4[DC__NUM_DPP__MAX];
-	long                dummylongarr1[DC__NUM_DPP__MAX];
 	bool           dummysinglestring;
 	bool           SingleDPPViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
 	double         PlaneRequiredDISPCLKWithODMCombine2To1;
@@ -644,20 +645,19 @@ struct vba_vars_st {
 	unsigned int   TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES + 1][2];
 	bool           LinkDSCEnable;
 	bool           ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES + 1];
-	bool ODMCombineEnableThisState[DC__NUM_DPP__MAX];
-	enum odm_combine_mode ODMCombineEnableTypeThisState[DC__NUM_DPP__MAX];
-	unsigned int   SwathWidthCThisState[DC__NUM_DPP__MAX];
+	enum odm_combine_mode ODMCombineEnableThisState[DC__NUM_DPP__MAX];
+	double   SwathWidthCThisState[DC__NUM_DPP__MAX];
 	bool           ViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
 	double         AlignedDCCMetaPitchY[DC__NUM_DPP__MAX];
 	double         AlignedDCCMetaPitchC[DC__NUM_DPP__MAX];
 
 	unsigned int NotEnoughUrgentLatencyHiding;
 	unsigned int NotEnoughUrgentLatencyHidingPre;
-	long PTEBufferSizeInRequestsForLuma;
-	long PTEBufferSizeInRequestsForChroma;
+	int PTEBufferSizeInRequestsForLuma;
+	int PTEBufferSizeInRequestsForChroma;
 
 	// Missing from VBA
-	long dpte_group_bytes_chroma;
+	int dpte_group_bytes_chroma;
 	unsigned int vm_group_bytes_chroma;
 	double dst_x_after_scaler;
 	double dst_y_after_scaler;
@@ -682,8 +682,8 @@ struct vba_vars_st {
 	double MinTTUVBlank[DC__NUM_DPP__MAX];
 	double BytePerPixelDETY[DC__NUM_DPP__MAX];
 	double BytePerPixelDETC[DC__NUM_DPP__MAX];
-	unsigned int SwathWidthY[DC__NUM_DPP__MAX];
-	unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
+	double SwathWidthY[DC__NUM_DPP__MAX];
+	double SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
 	double CursorRequestDeliveryTime[DC__NUM_DPP__MAX];
 	double CursorRequestDeliveryTimePrefetch[DC__NUM_DPP__MAX];
 	double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
@@ -759,12 +759,12 @@ struct vba_vars_st {
 	double LinesInDETY[DC__NUM_DPP__MAX];
 	double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
 
-	unsigned int SwathWidthSingleDPPC[DC__NUM_DPP__MAX];
-	unsigned int SwathWidthC[DC__NUM_DPP__MAX];
+	double SwathWidthSingleDPPC[DC__NUM_DPP__MAX];
+	double SwathWidthC[DC__NUM_DPP__MAX];
 	unsigned int BytePerPixelY[DC__NUM_DPP__MAX];
 	unsigned int BytePerPixelC[DC__NUM_DPP__MAX];
-	long dummyinteger1;
-	long dummyinteger2;
+	unsigned int dummyinteger1;
+	unsigned int dummyinteger2;
 	double FinalDRAMClockChangeLatency;
 	double Tdmdl_vm[DC__NUM_DPP__MAX];
 	double Tdmdl[DC__NUM_DPP__MAX];
@@ -778,6 +778,7 @@ struct vba_vars_st {
 	unsigned int DCCCMaxCompressedBlock[DC__NUM_DPP__MAX];
 	unsigned int DCCCIndependent64ByteBlock[DC__NUM_DPP__MAX];
 	double VStartupMargin;
+	bool NotEnoughTimeForDynamicMetadata;
 
 	/* Missing from VBA */
 	unsigned int MaximumMaxVStartupLines;
@@ -813,7 +814,7 @@ struct vba_vars_st {
 	unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX];
 	double HRatioChroma[DC__NUM_DPP__MAX];
 	double VRatioChroma[DC__NUM_DPP__MAX];
-	long WritebackSourceWidth[DC__NUM_DPP__MAX];
+	int WritebackSourceWidth[DC__NUM_DPP__MAX];
 
 	bool ModeIsSupported;
 	bool ODMCombine4To1Supported;
@@ -849,6 +850,58 @@ struct vba_vars_st {
 	unsigned int MaxNumHDMIFRLOutputs;
 	int    AudioSampleRate[DC__NUM_DPP__MAX];
 	int    AudioSampleLayout[DC__NUM_DPP__MAX];
+
+	int PercentMarginOverMinimumRequiredDCFCLK;
+	bool DynamicMetadataSupported[DC__VOLTAGE_STATES + 1][2];
+	enum immediate_flip_requirement ImmediateFlipRequirement;
+	double DETBufferSizeYThisState[DC__NUM_DPP__MAX];
+	double DETBufferSizeCThisState[DC__NUM_DPP__MAX];
+	bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX];
+	bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX];
+	int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX];
+	int swath_width_chroma_ub_this_state[DC__NUM_DPP__MAX];
+	double UrgLatency[DC__VOLTAGE_STATES + 1];
+	double VActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double VActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	bool NoTimeForPrefetch[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double dpte_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double meta_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double DETBufferSizeYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double DETBufferSizeCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES + 1][2];
+	unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+	double TotalDPTERowBandwidth[DC__VOLTAGE_STATES + 1][2];
+	double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
+	double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
+	double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
+	bool UseMinimumRequiredDCFCLK;
+	double WritebackDelayTime[DC__NUM_DPP__MAX];
+	unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
+	unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
+	unsigned int dummyinteger15;
+	unsigned int dummyinteger16;
+	unsigned int dummyinteger17;
+	unsigned int dummyinteger18;
+	unsigned int dummyinteger19;
+	unsigned int dummyinteger20;
+	unsigned int dummyinteger21;
+	unsigned int dummyinteger22;
+	unsigned int dummyinteger23;
+	unsigned int dummyinteger24;
+	unsigned int dummyinteger25;
+	unsigned int dummyinteger26;
+	unsigned int dummyinteger27;
+	unsigned int dummyinteger28;
+	unsigned int dummyinteger29;
+	bool dummystring[DC__NUM_DPP__MAX];
+	double BPP;
+	enum odm_combine_policy ODMCombinePolicy;
 };
 
 bool CalculateMinAndMaxPrefetchMode(
@@ -870,4 +923,3 @@ double CalculateWriteBackDISPCLK(
 		unsigned int WritebackChromaLineBufferWidth);
 
 #endif /* _DML2_DISPLAY_MODE_VBA_H_ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
index b953b02..723af0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
@@ -24,7 +24,7 @@
  */
 
 #include "dml_common_defs.h"
-#include "../calcs/dcn_calc_math.h"
+#include "dcn_calc_math.h"
 
 #include "dml_inline_defs.h"
 
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index eca140d..ded71ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -27,7 +27,7 @@
 #define __DML_INLINE_DEFS_H__
 
 #include "dml_common_defs.h"
-#include "../calcs/dcn_calc_math.h"
+#include "dcn_calc_math.h"
 #include "dml_logger.h"
 
 static inline double dml_min(double a, double b)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 641ffb7..3f66868 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -2,7 +2,13 @@
 #
 # Makefile for the 'dsc' sub-component of DAL.
 
+ifdef CONFIG_X86
 dsc_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+dsc_ccflags := -mhard-float -maltivec
+endif
 
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -10,6 +16,7 @@
 endif
 endif
 
+ifdef CONFIG_X86
 ifdef IS_OLD_GCC
 # Stack alignment mismatch, proceed with caution.
 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -18,6 +25,7 @@
 else
 dsc_ccflags += -msse2
 endif
+endif
 
 CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index e60f760..87d682d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -22,30 +22,16 @@
  * Author: AMD
  */
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #include "dc_hw_types.h"
 #include "dsc.h"
 #include <drm/drm_dp_helper.h>
-
-struct dc_dsc_policy {
-	bool use_min_slices_h;
-	int max_slices_h; // Maximum available if 0
-	int min_sice_height; // Must not be less than 8
-	int max_target_bpp;
-	int min_target_bpp; // Minimum target bits per pixel
-};
-
-const struct dc_dsc_policy dsc_policy = {
-	.use_min_slices_h = true, // DSC Policy: Use minimum number of slices that fits the pixel clock
-	.max_slices_h = 0, // DSC Policy: Use max available slices (in our case 4 for or 8, depending on the mode)
-	.min_sice_height = 108, // DSC Policy: Use slice height recommended by VESA DSC Spreadsheet user guide
-	.max_target_bpp = 16,
-	.min_target_bpp = 8,
-};
-
+#include "dc.h"
 
 /* This module's internal functions */
 
+/* default DSC policy target bitrate limit is 16bpp */
+static uint32_t dsc_policy_max_target_bpp_limit = 16;
+
 static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
 	const struct dc_crtc_timing *timing)
 {
@@ -237,8 +223,12 @@ static void get_dsc_enc_caps(
 	// This is a static HW query, so we can use any DSC
 
 	memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
-	if (dsc)
-		dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+	if (dsc) {
+		if (!dsc->ctx->dc->debug.disable_dsc)
+			dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+		if (dsc->ctx->dc->debug.native422_support)
+			dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+	}
 }
 
 /* Returns 'false' if no intersection was found for at least one capablity.
@@ -578,9 +568,11 @@ static bool setup_dsc_config(
 	bool is_dsc_possible = false;
 	int pic_height;
 	int slice_height;
+	struct dc_dsc_policy policy;
 
 	memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
 
+	dc_dsc_get_policy_for_timing(timing, &policy);
 	pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
 	pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
 
@@ -596,7 +588,12 @@ static bool setup_dsc_config(
 		goto done;
 
 	if (target_bandwidth_kbps > 0) {
-		is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_policy, &dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp);
+		is_dsc_possible = decide_dsc_target_bpp_x16(
+				&policy,
+				&dsc_common_caps,
+				target_bandwidth_kbps,
+				timing,
+				&target_bpp);
 		dsc_cfg->bits_per_pixel = target_bpp;
 	}
 	if (!is_dsc_possible)
@@ -698,20 +695,20 @@ static bool setup_dsc_config(
 	if (!is_dsc_possible)
 		goto done;
 
-	if (dsc_policy.use_min_slices_h) {
+	if (policy.use_min_slices_h) {
 		if (min_slices_h > 0)
 			num_slices_h = min_slices_h;
 		else if (max_slices_h > 0) { // Fall back to max slices if min slices is not working out
-			if (dsc_policy.max_slices_h)
-				num_slices_h = min(dsc_policy.max_slices_h, max_slices_h);
+			if (policy.max_slices_h)
+				num_slices_h = min(policy.max_slices_h, max_slices_h);
 			else
 				num_slices_h = max_slices_h;
 		} else
 			is_dsc_possible = false;
 	} else {
 		if (max_slices_h > 0) {
-			if (dsc_policy.max_slices_h)
-				num_slices_h = min(dsc_policy.max_slices_h, max_slices_h);
+			if (policy.max_slices_h)
+				num_slices_h = min(policy.max_slices_h, max_slices_h);
 			else
 				num_slices_h = max_slices_h;
 		} else if (min_slices_h > 0) // Fall back to min slices if max slices is not possible
@@ -733,7 +730,7 @@ static bool setup_dsc_config(
 	// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
 	// For 4:2:0 make sure the slice height is divisible by 2 as well.
 	if (min_slice_height_override == 0)
-		slice_height = min(dsc_policy.min_sice_height, pic_height);
+		slice_height = min(policy.min_slice_height, pic_height);
 	else
 		slice_height = min(min_slice_height_override, pic_height);
 
@@ -764,7 +761,7 @@ static bool setup_dsc_config(
 	return is_dsc_possible;
 }
 
-bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
 {
 	if (!dpcd_dsc_basic_data)
 		return false;
@@ -817,6 +814,23 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
 	if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div))
 		return false;
 
+	if (dc->debug.dsc_bpp_increment_div) {
+		/* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values,
+		 * we'll accept all and get it into range. This also makes the above check against 0 redundant,
+		 * but that one stresses out the override will be only used if it's not 0.
+		 */
+		if (dc->debug.dsc_bpp_increment_div >= 1)
+			dsc_sink_caps->bpp_increment_div = 1;
+		if (dc->debug.dsc_bpp_increment_div >= 2)
+			dsc_sink_caps->bpp_increment_div = 2;
+		if (dc->debug.dsc_bpp_increment_div >= 4)
+			dsc_sink_caps->bpp_increment_div = 4;
+		if (dc->debug.dsc_bpp_increment_div >= 8)
+			dsc_sink_caps->bpp_increment_div = 8;
+		if (dc->debug.dsc_bpp_increment_div >= 16)
+			dsc_sink_caps->bpp_increment_div = 16;
+	}
+
 	/* Extended caps */
 	if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST
 		dsc_sink_caps->branch_overall_throughput_0_mps = 0;
@@ -903,4 +917,67 @@ bool dc_dsc_compute_config(
 			timing, dsc_min_slice_height_override, dsc_cfg);
 	return is_dsc_possible;
 }
-#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
+
+void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc_dsc_policy *policy)
+{
+	uint32_t bpc = 0;
+
+	policy->min_target_bpp = 0;
+	policy->max_target_bpp = 0;
+
+	/* DSC Policy: Use minimum number of slices that fits the pixel clock */
+	policy->use_min_slices_h = true;
+
+	/* DSC Policy: Use max available slices
+	 * (in our case 4 for or 8, depending on the mode)
+	 */
+	policy->max_slices_h = 0;
+
+	/* DSC Policy: Use slice height recommended
+	 * by VESA DSC Spreadsheet user guide
+	 */
+	policy->min_slice_height = 108;
+
+	/* DSC Policy: follow DP specs with an internal upper limit to 16 bpp
+	 * for better interoperability
+	 */
+	switch (timing->display_color_depth) {
+	case COLOR_DEPTH_888:
+		bpc = 8;
+		break;
+	case COLOR_DEPTH_101010:
+		bpc = 10;
+		break;
+	case COLOR_DEPTH_121212:
+		bpc = 12;
+		break;
+	default:
+		return;
+	}
+	switch (timing->pixel_encoding) {
+	case PIXEL_ENCODING_RGB:
+	case PIXEL_ENCODING_YCBCR444:
+	case PIXEL_ENCODING_YCBCR422: /* assume no YCbCr422 native support */
+		/* DP specs limits to 8 */
+		policy->min_target_bpp = 8;
+		/* DP specs limits to 3 x bpc */
+		policy->max_target_bpp = 3 * bpc;
+		break;
+	case PIXEL_ENCODING_YCBCR420:
+		/* DP specs limits to 6 */
+		policy->min_target_bpp = 6;
+		/* DP specs limits to 1.5 x bpc assume bpc is an even number */
+		policy->max_target_bpp = bpc * 3 / 2;
+		break;
+	default:
+		return;
+	}
+	/* internal upper limit, default 16 bpp */
+	if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
+		policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
+}
+
+void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit)
+{
+	dsc_policy_max_target_bpp_limit = limit;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
index 020ad8f..9f70e87 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 /*
  * Copyright 2017 Advanced Micro Devices, Inc.
@@ -51,4 +50,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
 
 #endif
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
index f66d006..e5fac9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 /*
  * Copyright 2017 Advanced Micro Devices, Inc.
@@ -703,4 +702,3 @@ const qp_table   qp_table_422_8bpc_max = {
 	{  16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }
 };
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index 76c4b12..03ae159 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
 
 /*
  * Copyright 2017 Advanced Micro Devices, Inc.
@@ -252,4 +251,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
 	rc->rc_buf_thresh[13] = 8064;
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index f1d6e79..b6b1f09 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 /*
  * Copyright 2017 Advanced Micro Devices, Inc.
@@ -82,4 +81,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
 
 #endif
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 73172fd..1f6e63b 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
 /*
  * Copyright 2012-17 Advanced Micro Devices, Inc.
  *
@@ -144,4 +143,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
 	return ret;
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index b306227..202baa2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -61,26 +61,25 @@
 ###############################################################################
 # DCN 1x
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
 GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
 
 AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10)
-endif
 
 ###############################################################################
 # DCN 2
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
 GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o
 
 AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20)
-endif
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
+###############################################################################
+# DCN 21
+###############################################################################
 GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o
 
 AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
index 43a44038..83f798c 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
@@ -22,7 +22,6 @@
  * Authors: AMD
  *
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "dm_services.h"
 #include "include/gpio_types.h"
 #include "../hw_factory.h"
@@ -110,6 +109,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
 	ddc_data_regs_dcn2(4),
 	ddc_data_regs_dcn2(5),
 	ddc_data_regs_dcn2(6),
+	{
+			DDC_GPIO_VGA_REG_LIST(DATA),
+			.ddc_setup = 0,
+			.phy_aux_cntl = 0,
+			.dc_gpio_aux_ctrl_5 = 0
+	}
 };
 
 static const struct ddc_registers ddc_clk_regs_dcn[] = {
@@ -119,6 +124,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
 	ddc_clk_regs_dcn2(4),
 	ddc_clk_regs_dcn2(5),
 	ddc_clk_regs_dcn2(6),
+	{
+			DDC_GPIO_VGA_REG_LIST(CLK),
+			.ddc_setup = 0,
+			.phy_aux_cntl = 0,
+			.dc_gpio_aux_ctrl_5 = 0
+	}
 };
 
 static const struct ddc_sh_mask ddc_shift[] = {
@@ -246,4 +257,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory)
 	factory->funcs = &funcs;
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
index 43a4ce7..0fd9b31 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
@@ -22,7 +22,6 @@
  * Authors: AMD
  *
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #ifndef __DAL_HW_FACTORY_DCN20_H__
 #define __DAL_HW_FACTORY_DCN20_H__
 
@@ -30,4 +29,3 @@
 void dal_hw_factory_dcn20_init(struct hw_factory *factory);
 
 #endif /* __DAL_HW_FACTORY_DCN20_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
index 915e896e..52ba62b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
@@ -26,7 +26,6 @@
 /*
  * Pre-requisites: headers required by header of this unit
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "hw_translate_dcn20.h"
 
 #include "dm_services.h"
@@ -379,4 +378,3 @@ void dal_hw_translate_dcn20_init(struct hw_translate *tr)
 	tr->funcs = &funcs;
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
index 01f52c7..5f7a355 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
@@ -22,7 +22,6 @@
  * Authors: AMD
  *
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #ifndef __DAL_HW_TRANSLATE_DCN20_H__
 #define __DAL_HW_TRANSLATE_DCN20_H__
 
@@ -32,4 +31,3 @@ struct hw_translate;
 void dal_hw_translate_dcn20_init(struct hw_translate *tr);
 
 #endif /* __DAL_HW_TRANSLATE_DCN20_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
index 8572678..907c591 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
@@ -22,7 +22,6 @@
  * Authors: AMD
  *
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "dm_services.h"
 #include "include/gpio_types.h"
 #include "../hw_factory.h"
@@ -239,4 +238,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory)
 	factory->funcs = &funcs;
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h
index 2443f9e..4949e0c 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h
@@ -22,7 +22,6 @@
  * Authors: AMD
  *
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #ifndef __DAL_HW_FACTORY_DCN21_H__
 #define __DAL_HW_FACTORY_DCN21_H__
 
@@ -30,4 +29,3 @@
 void dal_hw_factory_dcn21_init(struct hw_factory *factory);
 
 #endif /* __DAL_HW_FACTORY_DCN20_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
index fbb58fb..291966e 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
@@ -26,7 +26,6 @@
 /*
  * Pre-requisites: headers required by header of this unit
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "hw_translate_dcn21.h"
 
 #include "dm_services.h"
@@ -382,4 +381,3 @@ void dal_hw_translate_dcn21_init(struct hw_translate *tr)
 	tr->funcs = &funcs;
 }
 
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h
index 2bfaac2..9462b0a 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h
@@ -22,7 +22,6 @@
  * Authors: AMD
  *
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #ifndef __DAL_HW_TRANSLATE_DCN21_H__
 #define __DAL_HW_TRANSLATE_DCN21_H__
 
@@ -32,4 +31,3 @@ struct hw_translate;
 void dal_hw_translate_dcn21_init(struct hw_translate *tr);
 
 #endif /* __DAL_HW_TRANSLATE_DCN21_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index f91e85b..308a543 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -48,13 +48,11 @@
 	DDC_GPIO_REG_LIST(cd,id),\
 	.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	#define DDC_REG_LIST_DCN2(cd, id) \
 	DDC_GPIO_REG_LIST(cd, id),\
 	.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP),\
 	.phy_aux_cntl = REG(PHY_AUX_CNTL), \
 	.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5)
-#endif
 
 #define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
 	.type ## _reg =   REG(DC_GPIO_DDCVGA_ ## type),\
@@ -90,13 +88,11 @@
 	DDC_GPIO_I2C_REG_LIST(cd),\
 	.ddc_setup = 0
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define DDC_I2C_REG_LIST_DCN2(cd) \
 	DDC_GPIO_I2C_REG_LIST(cd),\
 	.ddc_setup = 0,\
 	.phy_aux_cntl = REG(PHY_AUX_CNTL), \
 	.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5)
-#endif
 #define DDC_MASK_SH_LIST_COMMON(mask_sh) \
 		SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
 		SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
@@ -110,22 +106,18 @@
 		SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
 		SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define DDC_MASK_SH_LIST_DCN2(mask_sh, cd) \
 	{DDC_MASK_SH_LIST_COMMON(mask_sh),\
 	0,\
 	0,\
 	(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\
 	(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)}
-#endif
 
 struct ddc_registers {
 	struct gpio_registers gpio;
 	uint32_t ddc_setup;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	uint32_t phy_aux_cntl;
 	uint32_t dc_gpio_aux_ctrl_5;
-#endif
 };
 
 struct ddc_sh_mask {
@@ -140,11 +132,9 @@ struct ddc_sh_mask {
 	/* i2cpad_mask */
 	uint32_t DC_GPIO_SDA_PD_DIS;
 	uint32_t DC_GPIO_SCL_PD_DIS;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	//phy_aux_cntl
 	uint32_t AUX_PAD_RXSEL;
 	uint32_t DDC_PAD_I2CMODE;
-#endif
 };
 
 
@@ -180,7 +170,6 @@ struct ddc_sh_mask {
 {\
 	DDC_I2C_REG_LIST(SCL)\
 }
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define ddc_data_regs_dcn2(id) \
 {\
 	DDC_REG_LIST_DCN2(DATA, id)\
@@ -200,7 +189,6 @@ struct ddc_sh_mask {
 {\
 	DDC_REG_LIST_DCN2(SCL)\
 }
-#endif
 
 
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 1c12961..1ae153e 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -48,18 +48,18 @@
 
 struct gpio;
 
-static void destruct(
+static void dal_hw_ddc_destruct(
 	struct hw_ddc *pin)
 {
 	dal_hw_gpio_destruct(&pin->base);
 }
 
-static void destroy(
+static void dal_hw_ddc_destroy(
 	struct hw_gpio_pin **ptr)
 {
 	struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr);
 
-	destruct(pin);
+	dal_hw_ddc_destruct(pin);
 
 	kfree(pin);
 
@@ -150,7 +150,6 @@ static enum gpio_result set_config(
 					AUX_PAD1_MODE, 0);
 		}
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
 				REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1);
 		}
@@ -158,7 +157,6 @@ static enum gpio_result set_config(
 		if (ddc->regs->phy_aux_cntl != 0) {
 				REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1);
 		}
-#endif
 		return GPIO_RESULT_OK;
 	case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
 		/* set the AUX pad mode */
@@ -166,12 +164,10 @@ static enum gpio_result set_config(
 			REG_SET(gpio.MASK_reg, regval,
 					AUX_PAD1_MODE, 1);
 		}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 		if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
 			REG_UPDATE(dc_gpio_aux_ctrl_5,
 					DDC_PAD_I2CMODE, 0);
 		}
-#endif
 
 		return GPIO_RESULT_OK;
 	case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
@@ -211,7 +207,7 @@ static enum gpio_result set_config(
 }
 
 static const struct hw_gpio_pin_funcs funcs = {
-	.destroy = destroy,
+	.destroy = dal_hw_ddc_destroy,
 	.open = dal_hw_gpio_open,
 	.get_value = dal_hw_gpio_get_value,
 	.set_value = dal_hw_gpio_set_value,
@@ -220,7 +216,7 @@ static const struct hw_gpio_pin_funcs funcs = {
 	.close = dal_hw_gpio_close,
 };
 
-static void construct(
+static void dal_hw_ddc_construct(
 	struct hw_ddc *ddc,
 	enum gpio_id id,
 	uint32_t en,
@@ -247,7 +243,7 @@ void dal_hw_ddc_init(
 		return;
 	}
 
-	construct(*hw_ddc, id, en, ctx);
+	dal_hw_ddc_construct(*hw_ddc, id, en, ctx);
 }
 
 struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index fa9f1d0..d2d36d4 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -45,15 +45,11 @@
 #include "dce80/hw_factory_dce80.h"
 #include "dce110/hw_factory_dce110.h"
 #include "dce120/hw_factory_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dcn10/hw_factory_dcn10.h"
 #endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "dcn20/hw_factory_dcn20.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #include "dcn21/hw_factory_dcn21.h"
-#endif
 
 #include "diagnostics/hw_factory_diag.h"
 
@@ -90,19 +86,15 @@ bool dal_hw_factory_init(
 	case DCE_VERSION_12_1:
 		dal_hw_factory_dce120_init(factory);
 		return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case DCN_VERSION_1_0:
 	case DCN_VERSION_1_01:
 		dal_hw_factory_dcn10_init(factory);
 		return true;
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case DCN_VERSION_2_0:
 		dal_hw_factory_dcn20_init(factory);
 		return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	case DCN_VERSION_2_1:
 		dal_hw_factory_dcn21_init(factory);
 		return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
index 69b8997..f9e847e 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
@@ -46,22 +46,13 @@
 
 struct gpio;
 
-static void dal_hw_generic_construct(
-	struct hw_generic *pin,
-	enum gpio_id id,
-	uint32_t en,
-	struct dc_context *ctx)
-{
-	dal_hw_gpio_construct(&pin->base, id, en, ctx);
-}
-
 static void dal_hw_generic_destruct(
 	struct hw_generic *pin)
 {
 	dal_hw_gpio_destruct(&pin->base);
 }
 
-static void destroy(
+static void dal_hw_generic_destroy(
 	struct hw_gpio_pin **ptr)
 {
 	struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr);
@@ -90,7 +81,7 @@ static enum gpio_result set_config(
 }
 
 static const struct hw_gpio_pin_funcs funcs = {
-	.destroy = destroy,
+	.destroy = dal_hw_generic_destroy,
 	.open = dal_hw_gpio_open,
 	.get_value = dal_hw_gpio_get_value,
 	.set_value = dal_hw_gpio_set_value,
@@ -99,14 +90,14 @@ static const struct hw_gpio_pin_funcs funcs = {
 	.close = dal_hw_gpio_close,
 };
 
-static void construct(
-	struct hw_generic *generic,
+static void dal_hw_generic_construct(
+	struct hw_generic *pin,
 	enum gpio_id id,
 	uint32_t en,
 	struct dc_context *ctx)
 {
-	dal_hw_generic_construct(generic, id, en, ctx);
-	generic->base.base.funcs = &funcs;
+	dal_hw_gpio_construct(&pin->base, id, en, ctx);
+	pin->base.base.funcs = &funcs;
 }
 
 void dal_hw_generic_init(
@@ -126,7 +117,7 @@ void dal_hw_generic_init(
 		return;
 	}
 
-	construct(*hw_generic, id, en, ctx);
+	dal_hw_generic_construct(*hw_generic, id, en, ctx);
 }
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
index 00c9bcf..692f29d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -46,34 +46,18 @@
 
 struct gpio;
 
-static void dal_hw_hpd_construct(
-	struct hw_hpd *pin,
-	enum gpio_id id,
-	uint32_t en,
-	struct dc_context *ctx)
-{
-	dal_hw_gpio_construct(&pin->base, id, en, ctx);
-}
-
 static void dal_hw_hpd_destruct(
 	struct hw_hpd *pin)
 {
 	dal_hw_gpio_destruct(&pin->base);
 }
 
-
-static void destruct(
-	struct hw_hpd *hpd)
-{
-	dal_hw_hpd_destruct(hpd);
-}
-
-static void destroy(
+static void dal_hw_hpd_destroy(
 	struct hw_gpio_pin **ptr)
 {
 	struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr);
 
-	destruct(hpd);
+	dal_hw_hpd_destruct(hpd);
 
 	kfree(hpd);
 
@@ -120,7 +104,7 @@ static enum gpio_result set_config(
 }
 
 static const struct hw_gpio_pin_funcs funcs = {
-	.destroy = destroy,
+	.destroy = dal_hw_hpd_destroy,
 	.open = dal_hw_gpio_open,
 	.get_value = get_value,
 	.set_value = dal_hw_gpio_set_value,
@@ -129,14 +113,14 @@ static const struct hw_gpio_pin_funcs funcs = {
 	.close = dal_hw_gpio_close,
 };
 
-static void construct(
-	struct hw_hpd *hpd,
+static void dal_hw_hpd_construct(
+	struct hw_hpd *pin,
 	enum gpio_id id,
 	uint32_t en,
 	struct dc_context *ctx)
 {
-	dal_hw_hpd_construct(hpd, id, en, ctx);
-	hpd->base.base.funcs = &funcs;
+	dal_hw_gpio_construct(&pin->base, id, en, ctx);
+	pin->base.base.funcs = &funcs;
 }
 
 void dal_hw_hpd_init(
@@ -156,7 +140,7 @@ void dal_hw_hpd_init(
 		return;
 	}
 
-	construct(*hw_hpd, id, en, ctx);
+	dal_hw_hpd_construct(*hw_hpd, id, en, ctx);
 }
 
 struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index f2046f5..5d39665 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -43,15 +43,11 @@
 #include "dce80/hw_translate_dce80.h"
 #include "dce110/hw_translate_dce110.h"
 #include "dce120/hw_translate_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dcn10/hw_translate_dcn10.h"
 #endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "dcn20/hw_translate_dcn20.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #include "dcn21/hw_translate_dcn21.h"
-#endif
 
 #include "diagnostics/hw_translate_diag.h"
 
@@ -85,19 +81,15 @@ bool dal_hw_translate_init(
 	case DCE_VERSION_12_1:
 		dal_hw_translate_dce120_init(translate);
 		return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	case DCN_VERSION_1_0:
 	case DCN_VERSION_1_01:
 		dal_hw_translate_dcn10_init(translate);
 		return true;
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	case DCN_VERSION_2_0:
 		dal_hw_translate_dcn20_init(translate);
 		return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	case DCN_VERSION_2_1:
 		dal_hw_translate_dcn21_init(translate);
 		return true;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index fd39e2a..4ead89d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -43,10 +43,8 @@ enum dc_status {
 	DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
 	DC_FAIL_SCALING = 14,
 	DC_FAIL_DP_LINK_TRAINING = 15,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	DC_FAIL_DSC_VALIDATE = 16,
 	DC_NO_DSC_RESOURCE = 17,
-#endif
 	DC_FAIL_UNSUPPORTED_1 = 18,
 	DC_FAIL_CLK_EXCEED_MAX = 21,
 	DC_FAIL_CLK_BELOW_MIN = 22, /*THIS IS MIN PER IP*/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index a831079..f285b76 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -33,13 +33,11 @@
 #include "dc_bios_types.h"
 #include "mem_input.h"
 #include "hubp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "mpc.h"
 #endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #include "dwb.h"
 #include "mcif_wb.h"
-#endif
 
 #define MAX_CLOCK_SOURCES 7
 
@@ -89,9 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
 struct resource_pool;
 struct dc_state;
 struct resource_context;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 struct clk_bw_params;
-#endif
 
 struct resource_funcs {
 	void (*destroy)(struct resource_pool **pool);
@@ -105,7 +101,7 @@ struct resource_funcs {
 
 	int (*populate_dml_pipes)(
 		struct dc *dc,
-		struct resource_context *res_ctx,
+		struct dc_state *context,
 		display_e2e_pipe_params_st *pipes);
 
 	enum dc_status (*validate_global)(
@@ -135,7 +131,6 @@ struct resource_funcs {
 			struct resource_context *res_ctx,
 			const struct resource_pool *pool,
 			struct dc_stream_state *stream);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	void (*populate_dml_writeback_from_context)(
 			struct dc *dc,
 			struct resource_context *res_ctx,
@@ -146,12 +141,9 @@ struct resource_funcs {
 			struct dc_state *context,
 			display_e2e_pipe_params_st *pipes,
 			int pipe_cnt);
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	void (*update_bw_bounding_box)(
 			struct dc *dc,
 			struct clk_bw_params *bw_params);
-#endif
 
 };
 
@@ -180,7 +172,6 @@ struct resource_pool {
 	struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
 	bool i2c_hw_buffer_in_use;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	struct dwbc *dwbc[MAX_DWB_PIPES];
 	struct mcif_wb *mcif_wb[MAX_DWB_PIPES];
 	struct {
@@ -188,11 +179,8 @@ struct resource_pool {
 		unsigned int gsl_1:1;
 		unsigned int gsl_2:1;
 	} gsl_groups;
-#endif
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	struct display_stream_compressor *dscs[MAX_PIPES];
-#endif
 
 	unsigned int pipe_count;
 	unsigned int underlay_pipe_index;
@@ -206,9 +194,7 @@ struct resource_pool {
 	unsigned int timing_generator_count;
 	unsigned int mpcc_count;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	unsigned int writeback_pipe_count;
-#endif
 	/*
 	 * reserved clock source for DP
 	 */
@@ -226,9 +212,12 @@ struct resource_pool {
 
 	struct abm *abm;
 	struct dmcu *dmcu;
+	struct dmub_psr *psr;
 
 	const struct resource_funcs *funcs;
 	const struct resource_caps *res_cap;
+
+	struct ddc_service *oem_device;
 };
 
 struct dcn_fe_bandwidth {
@@ -238,9 +227,7 @@ struct dcn_fe_bandwidth {
 
 struct stream_resource {
 	struct output_pixel_processor *opp;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	struct display_stream_compressor *dsc;
-#endif
 	struct timing_generator *tg;
 	struct stream_encoder *stream_enc;
 	struct audio *audio;
@@ -249,12 +236,10 @@ struct stream_resource {
 	struct encoder_info_frame encoder_info_frame;
 
 	struct abm *abm;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	/* There are only (num_pipes+1)/2 groups. 0 means unassigned,
 	 * otherwise it's using group number 'gsl_group-1'
 	 */
 	uint8_t gsl_group;
-#endif
 };
 
 struct plane_resource {
@@ -306,17 +291,15 @@ struct pipe_ctx {
 	struct pipe_ctx *next_odm_pipe;
 	struct pipe_ctx *prev_odm_pipe;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
 	struct _vcs_dpi_display_dlg_regs_st dlg_regs;
 	struct _vcs_dpi_display_ttu_regs_st ttu_regs;
 	struct _vcs_dpi_display_rq_regs_st rq_regs;
 	struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
 #endif
 	union pipe_update_flags update_flags;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	struct dwbc *dwbc;
 	struct mcif_wb *mcif_wb;
-#endif
 };
 
 struct resource_context {
@@ -325,9 +308,7 @@ struct resource_context {
 	bool is_audio_acquired[MAX_PIPES];
 	uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
 	uint8_t dp_clock_source_ref_count;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	bool is_dsc_acquired[MAX_PIPES];
-#endif
 };
 
 struct dce_bw_output {
@@ -347,18 +328,14 @@ struct dce_bw_output {
 	int blackout_recovery_time_us;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct dcn_bw_writeback {
 	struct mcif_arb_params mcif_wb_arb[MAX_DWB_PIPES];
 };
-#endif
 
 struct dcn_bw_output {
 	struct dc_clocks clk;
 	struct dcn_watermark_set watermarks;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	struct dcn_bw_writeback bw_writeback;
-#endif
 };
 
 union bw_output {
@@ -392,7 +369,7 @@ struct dc_state {
 
 	/* Note: these are big structures, do *not* put on stack! */
 	struct dm_pp_display_configuration pp_display_cfg;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
 	struct dcn_bw_internal_vars dcn_bw_vars;
 #endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 14716ba..de2d160 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -105,7 +105,7 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
 bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
 		struct aux_payload *payload);
 
-enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
 		uint32_t timeout);
 
 void dal_ddc_service_write_scdc_data(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 045138d..8b1f0ce 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -28,8 +28,8 @@
 
 #define LINK_TRAINING_ATTEMPTS 4
 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */
-#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/
-#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/
+#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
 
 struct dc_link;
 struct dc_stream_state;
@@ -57,10 +57,11 @@ void decide_link_settings(
 	struct dc_link_settings *link_setting);
 
 bool perform_link_training_with_retries(
-	struct dc_link *link,
 	const struct dc_link_settings *link_setting,
 	bool skip_video_pattern,
-	int attempts);
+	int attempts,
+	struct pipe_ctx *pipe_ctx,
+	enum signal_type signal);
 
 bool is_mst_supported(struct dc_link *link);
 
@@ -75,13 +76,13 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
 enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
 void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+
 void dp_set_fec_ready(struct dc_link *link, bool ready);
 void dp_set_fec_enable(struct dc_link *link, bool enable);
 bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
 bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
 void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
 bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
-#endif
 
 #endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
similarity index 100%
rename from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
rename to drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 026e6a2..ac530c0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -31,7 +31,6 @@
 #define DCN_MINIMUM_DISPCLK_Khz 100000
 #define DCN_MINIMUM_DPPCLK_Khz 100000
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 /* Constants */
 #define DDR4_DRAM_WIDTH   64
 #define WM_A 0
@@ -39,12 +38,10 @@
 #define WM_C 2
 #define WM_D 3
 #define WM_SET_COUNT 4
-#endif
 
 #define DCN_MINIMUM_DISPCLK_Khz 100000
 #define DCN_MINIMUM_DPPCLK_Khz 100000
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 /* Will these bw structures be ASIC specific? */
 
 #define MAX_NUM_DPM_LVL		8
@@ -154,7 +151,6 @@ struct clk_bw_params {
 	struct clk_limit_table clk_table;
 	struct wm_table wm_table;
 };
-#endif
 /* Public interfaces */
 
 struct clk_states {
@@ -195,9 +191,8 @@ struct clk_mgr {
 	bool psr_allow_active_cache;
 	int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
 	int dentist_vco_freq_khz;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+	struct clk_state_registers_and_bypass boot_snapshot;
 	struct clk_bw_params *bw_params;
-#endif
 };
 
 /* forward declarations */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index a17a771..862952c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -96,12 +96,10 @@ enum dentist_divider_range {
 	.MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \
 	.MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 #define CLK_REG_LIST_NV10() \
 	SR(DENTIST_DISPCLK_CNTL), \
 	CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \
 	CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0)
-#endif
 
 #define CLK_SF(reg_name, field_name, post_fix)\
 	.field_name = reg_name ## __ ## field_name ## post_fix
@@ -120,7 +118,6 @@ enum dentist_divider_range {
 	CLK_SF(MP1_SMN_C2PMSG_83, CONTENT, mask_sh),\
 	CLK_SF(MP1_SMN_C2PMSG_91, CONTENT, mask_sh),
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 #define CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh) \
 	CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
 	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
@@ -130,7 +127,6 @@ enum dentist_divider_range {
 	CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\
 	CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\
 	CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh)
-#endif
 
 #define CLK_REG_FIELD_LIST(type) \
 	type DPREFCLK_SRC_SEL; \
@@ -143,30 +139,24 @@ enum dentist_divider_range {
  ****************** Clock Manager Private Structures ***********************************
  ***************************************************************************************
  */
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 #define CLK20_REG_FIELD_LIST(type) \
 	type DENTIST_DPPCLK_WDIVIDER; \
 	type DENTIST_DPPCLK_CHG_DONE; \
 	type FbMult_int; \
 	type FbMult_frac;
-#endif
 
 #define VBIOS_SMU_REG_FIELD_LIST(type) \
 	type CONTENT;
 
 struct clk_mgr_shift {
 	CLK_REG_FIELD_LIST(uint8_t)
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	CLK20_REG_FIELD_LIST(uint8_t)
-#endif
 	VBIOS_SMU_REG_FIELD_LIST(uint32_t)
 };
 
 struct clk_mgr_mask {
 	CLK_REG_FIELD_LIST(uint32_t)
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	CLK20_REG_FIELD_LIST(uint32_t)
-#endif
 	VBIOS_SMU_REG_FIELD_LIST(uint32_t)
 };
 
@@ -174,10 +164,8 @@ struct clk_mgr_registers {
 	uint32_t DPREFCLK_CNTL;
 	uint32_t DENTIST_DISPCLK_CNTL;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	uint32_t CLK3_CLK2_DFS_CNTL;
 	uint32_t CLK3_CLK_PLL_REQ;
-#endif
 
 	uint32_t MP1_SMN_C2PMSG_67;
 	uint32_t MP1_SMN_C2PMSG_83;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index c81a17a..c0dc1d0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -52,7 +52,6 @@ struct dcn_hubbub_wm {
 	struct dcn_hubbub_wm_set sets[4];
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 enum dcn_hubbub_page_table_depth {
 	DCN_PAGE_TABLE_DEPTH_1_LEVEL,
 	DCN_PAGE_TABLE_DEPTH_2_LEVEL,
@@ -101,13 +100,11 @@ struct hubbub_addr_config {
 	} default_addrs;
 };
 
-#endif
 struct hubbub_funcs {
 	void (*update_dchub)(
 			struct hubbub *hubbub,
 			struct dchub_init_data *dh_data);
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	int (*init_dchub_sys_ctx)(
 			struct hubbub *hubbub,
 			struct dcn_hubbub_phys_addr_config *pa_config);
@@ -116,7 +113,6 @@ struct hubbub_funcs {
 			struct dcn_hubbub_virt_addr_config *va_config,
 			int vmid);
 
-#endif
 	bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
 			const struct dc_dcc_surface_param *input,
 			struct dc_surface_dcc_cap *output);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index c68f0ce..5315f1f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -52,6 +52,8 @@ struct dmcu {
 	enum dmcu_state dmcu_state;
 	struct dmcu_version dmcu_version;
 	unsigned int cached_wait_loop_number;
+	uint32_t psp_version;
+	bool auto_load_dmcu;
 };
 
 struct dmcu_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 474c719..45ef390 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -36,14 +36,10 @@ struct dpp {
 	struct dpp_caps *caps;
 	struct pwl_params regamma_params;
 	struct pwl_params degamma_params;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	struct dpp_cursor_attributes cur_attr;
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	struct pwl_params shaper_params;
 	bool cm_bypass_mode;
-#endif
 };
 
 struct dpp_input_csc_matrix {
@@ -51,12 +47,31 @@ struct dpp_input_csc_matrix {
 	uint16_t regval[12];
 };
 
+static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
+	{COLOR_SPACE_SRGB,
+		{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+	{COLOR_SPACE_SRGB_LIMITED,
+		{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+	{COLOR_SPACE_YCBCR601,
+		{0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+						0, 0x2000, 0x38b4, 0xe3a6} },
+	{COLOR_SPACE_YCBCR601_LIMITED,
+		{0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+						0, 0x2568, 0x40de, 0xdd3a} },
+	{COLOR_SPACE_YCBCR709,
+		{0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+						0x2000, 0x3b61, 0xe24f} },
+
+	{COLOR_SPACE_YCBCR709_LIMITED,
+		{0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+						0x2568, 0x43ee, 0xdbb2} }
+};
+
 struct dpp_grph_csc_adjustment {
 	struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
 	enum graphics_gamut_adjust_type gamut_adjust_type;
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 struct cnv_color_keyer_params {
 	int color_keyer_en;
 	int color_keyer_mode;
@@ -82,7 +97,6 @@ struct cnv_alpha_2bit_lut {
 	int lut2;
 	int lut3;
 };
-#endif
 
 struct dcn_dpp_state {
 	uint32_t is_enabled;
@@ -190,12 +204,8 @@ struct dpp_funcs {
 			enum surface_pixel_format format,
 			enum expansion_mode mode,
 			struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 			enum dc_color_space input_color_space,
 			struct cnv_alpha_2bit_lut *alpha_2bit_lut);
-#else
-			enum dc_color_space input_color_space);
-#endif
 
 	void (*dpp_full_bypass)(struct dpp *dpp_base);
 
@@ -224,7 +234,6 @@ struct dpp_funcs {
 			bool dppclk_div,
 			bool enable);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	bool (*dpp_program_blnd_lut)(
 			struct dpp *dpp,
 			const struct pwl_params *params);
@@ -237,7 +246,6 @@ struct dpp_funcs {
 	void (*dpp_cnv_set_alpha_keyer)(
 			struct dpp *dpp_base,
 			struct cnv_color_keyer_params *color_keyer);
-#endif
 };
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index c6ff3d7..c597400 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -22,7 +22,6 @@
  * Authors: AMD
  *
  */
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #ifndef __DAL_DSC_H__
 #define __DAL_DSC_H__
 
@@ -98,4 +97,3 @@ struct dsc_funcs {
 };
 
 #endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index ff1a07b..459f95f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -51,20 +51,15 @@ enum dwb_source {
 	dwb_src_otg3,		/* for DCN1.x/DCN2.x */
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 /* DCN1.x, DCN2.x support 2 pipes */
-#else
-/* DCN1.x supports 2 pipes */
-#endif
 enum dwb_pipe {
 	dwb_pipe0 = 0,
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 	dwb_pipe1,
 #endif
 	dwb_pipe_max_num,
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 enum dwb_frame_capture_enable {
 	DWB_FRAME_CAPTURE_DISABLE = 0,
 	DWB_FRAME_CAPTURE_ENABLE = 1,
@@ -77,9 +72,7 @@ enum wbscl_coef_filter_type_sel {
 	WBSCL_COEF_CHROMA_HORZ_FILTER = 3
 };
 
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct dwb_warmup_params {
 	bool	warmup_en;	/* false: normal mode, true: enable pattern generator */
 	bool	warmup_mode;	/* false: 420, true: 444 */
@@ -88,7 +81,6 @@ struct dwb_warmup_params {
 	int	warmup_width;	/* Pattern width (pixels) */
 	int	warmup_height;	/* Pattern height (lines) */
 };
-#endif
 
 struct dwb_caps {
 	enum dce_version hw_version;	/* DCN engine version. */
@@ -121,7 +113,8 @@ struct dwbc {
 	int wb_src_plane_inst;/*hubp, mpcc, inst*/
 	bool update_privacymask;
 	uint32_t mask_id;
-
+        int otg_inst;
+        bool mvc_cfg;
 };
 
 struct dwbc_funcs {
@@ -150,13 +143,11 @@ struct dwbc_funcs {
 		struct dwbc *dwbc,
 		bool is_new_content);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 	void (*set_warmup)(
 		struct dwbc *dwbc,
 		struct dwb_warmup_params *warmup_params);
 
-#endif
 
 	bool (*get_dwb_status)(
 		struct dwbc *dwbc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 809b62b..2cb8466 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -38,9 +38,7 @@ enum cursor_pitch {
 };
 
 enum cursor_lines_per_chunk {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	CURSOR_LINE_PER_CHUNK_1 = 0, /* new for DCN2 */
-#endif
 	CURSOR_LINE_PER_CHUNK_2 = 1,
 	CURSOR_LINE_PER_CHUNK_4,
 	CURSOR_LINE_PER_CHUNK_8,
@@ -65,6 +63,26 @@ struct hubp {
 	bool power_gated;
 };
 
+struct surface_flip_registers {
+	uint32_t DCSURF_SURFACE_CONTROL;
+	uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
+	uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
+	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+	uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
+	uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
+	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+	uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
+	uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
+	uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
+	uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
+	bool tmz_surface;
+	bool immediate;
+	uint8_t vmid;
+	bool grph_stereo;
+};
+
 struct hubp_funcs {
 	void (*hubp_setup)(
 			struct hubp *hubp,
@@ -86,6 +104,9 @@ struct hubp_funcs {
 			const struct rect *viewport,
 			const struct rect *viewport_c);
 
+	void (*apply_PLAT_54186_wa)(struct hubp *hubp,
+			const struct dc_plane_address *address);
+
 	bool (*hubp_program_surface_flip_and_addr)(
 		struct hubp *hubp,
 		const struct dc_plane_address *address,
@@ -139,7 +160,6 @@ struct hubp_funcs {
 	unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
 	void (*hubp_init)(struct hubp *hubp);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	void (*dmdata_set_attributes)(
 			struct hubp *hubp,
 			const struct dc_dmdata_attributes *attr);
@@ -159,7 +179,13 @@ struct hubp_funcs {
 	void (*hubp_set_flip_control_surface_gsl)(
 		struct hubp *hubp,
 		bool enable);
-#endif
+
+	void (*validate_dml_output)(
+			struct hubp *hubp,
+			struct dc_context *ctx,
+			struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+			struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+			struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr);
 
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index f82365e..75d4190 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -36,9 +36,7 @@
 
 #define MAX_AUDIOS 7
 #define MAX_PIPES 6
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define MAX_DWB_PIPES	1
-#endif
 
 struct gamma_curve {
 	uint32_t offset;
@@ -81,7 +79,6 @@ struct pwl_result_data {
 	uint32_t delta_blue_reg;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct dc_rgb {
 	uint32_t red;
 	uint32_t green;
@@ -110,7 +107,6 @@ struct tetrahedral_params {
 	bool use_12bits;
 
 };
-#endif
 
 /* arr_curve_points - regamma regions/segments specification
  * arr_points - beginning and end point specified separately (only one on DCE)
@@ -195,13 +191,11 @@ enum opp_regamma {
 	OPP_REGAMMA_USER
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 enum optc_dsc_mode {
 	OPTC_DSC_DISABLED = 0,
 	OPTC_DSC_ENABLED_444 = 1, /* 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4) */
 	OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED = 2 /* Native 4:2:2 or 4:2:0 */
 };
-#endif
 
 struct dc_bias_and_scale {
 	uint16_t scale_red;
@@ -224,12 +218,8 @@ enum test_pattern_mode {
 	TEST_PATTERN_MODE_VERTICALBARS,
 	TEST_PATTERN_MODE_HORIZONTALBARS,
 	TEST_PATTERN_MODE_SINGLERAMP_RGB,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	TEST_PATTERN_MODE_DUALRAMP_RGB,
 	TEST_PATTERN_MODE_XR_BIAS_RGB
-#else
-	TEST_PATTERN_MODE_DUALRAMP_RGB
-#endif
 };
 
 enum test_pattern_color_format {
@@ -255,6 +245,13 @@ enum controller_dp_test_pattern {
 	CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR
 };
 
+enum controller_dp_color_space {
+	CONTROLLER_DP_COLOR_SPACE_RGB,
+	CONTROLLER_DP_COLOR_SPACE_YCBCR601,
+	CONTROLLER_DP_COLOR_SPACE_YCBCR709,
+	CONTROLLER_DP_COLOR_SPACE_UDEFINED
+};
+
 enum dc_lut_mode {
 	LUT_BYPASS,
 	LUT_RAM_A,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index b219092..fb748f0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -113,26 +113,21 @@ struct link_encoder {
 	struct encoder_feature_support features;
 	enum transmitter transmitter;
 	enum hpd_source_id hpd_source;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	bool usbc_combo_phy;
-#endif
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 struct link_enc_state {
 
 		uint32_t dphy_fec_en;
 		uint32_t dphy_fec_ready_shadow;
 		uint32_t dphy_fec_active_status;
+		uint32_t dp_link_training_complete;
 
 };
-#endif
 
 struct link_encoder_funcs {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	void (*read_state)(
 			struct link_encoder *enc, struct link_enc_state *s);
-#endif
 	bool (*validate_output_with_stream)(
 		struct link_encoder *enc, const struct dc_stream_state *stream);
 	void (*hw_init)(struct link_encoder *enc);
@@ -174,7 +169,6 @@ struct link_encoder_funcs {
 	unsigned int (*get_dig_frontend)(struct link_encoder *enc);
 	void (*destroy)(struct link_encoder **enc);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	void (*fec_set_enable)(struct link_encoder *enc,
 		bool enable);
 
@@ -182,7 +176,6 @@ struct link_encoder_funcs {
 		bool ready);
 
 	bool (*fec_is_active)(struct link_encoder *enc);
-#endif
 	bool (*is_in_alt_mode) (struct link_encoder *enc);
 
 	void (*get_max_link_cap)(struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 67b610d..2e2310f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -40,11 +40,9 @@ struct cstate_pstate_watermarks_st {
 struct dcn_watermarks {
 	uint32_t pte_meta_urgent_ns;
 	uint32_t urgent_ns;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	uint32_t frac_urg_bw_nom;
 	uint32_t frac_urg_bw_flip;
 	int32_t urgent_latency_ns;
-#endif
 	struct cstate_pstate_watermarks_st cstate_pstate;
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 58826be..094afc4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -31,9 +31,7 @@
 #define MAX_MPCC 6
 #define MAX_OPP 6
 
-#if   defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define MAX_DWB		1
-#endif
 
 enum mpc_output_csc_mode {
 	MPC_OUTPUT_CSC_DISABLE = 0,
@@ -66,14 +64,12 @@ struct mpcc_blnd_cfg {
 	int global_alpha;
 	bool overlap_only;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	/* MPCC top/bottom gain settings */
 	int bottom_gain_mode;
 	int background_color_bpc;
 	int top_gain;
 	int bottom_inside_gain;
 	int bottom_outside_gain;
-#endif
 };
 
 struct mpcc_sm_cfg {
@@ -90,7 +86,6 @@ struct mpcc_sm_cfg {
 	int force_next_field_polarity;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct mpc_denorm_clamp {
 	int clamp_max_r_cr;
 	int clamp_min_r_cr;
@@ -99,7 +94,6 @@ struct mpc_denorm_clamp {
 	int clamp_max_b_cb;
 	int clamp_min_b_cb;
 };
-#endif
 
 /*
  * MPCC connection and blending configuration for a single MPCC instance.
@@ -126,10 +120,8 @@ struct mpc {
 	struct dc_context *ctx;
 
 	struct mpcc mpcc_array[MAX_MPCC];
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	struct pwl_params blender_params;
 	bool cm_bypass_mode;
-#endif
 };
 
 struct mpcc_state {
@@ -230,7 +222,6 @@ struct mpc_funcs {
 		struct mpc *mpc,
 		struct mpc_tree *tree);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	void (*set_denorm)(struct mpc *mpc,
 			int opp_id,
 			enum dc_color_depth output_depth);
@@ -258,7 +249,6 @@ struct mpc_funcs {
 			struct mpc *mpc,
 			int mpcc_id,
 			bool power_on);
-#endif
 
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 18def2b..7575564 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -263,9 +263,7 @@ struct oppbuf_params {
 	enum oppbuf_display_segmentation mso_segmentation;
 	uint32_t mso_overlap_pixel_num;
 	uint32_t pixel_repetition;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	uint32_t num_segment_padded_pixels;
-#endif
 };
 
 struct opp_funcs {
@@ -305,10 +303,10 @@ struct opp_funcs {
 			struct output_pixel_processor *opp,
 			bool enable);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	void (*opp_set_disp_pattern_generator)(
 			struct output_pixel_processor *opp,
 			enum controller_dp_test_pattern test_pattern,
+			enum controller_dp_color_space color_space,
 			enum dc_color_depth color_depth,
 			const struct tg_color *solid_color,
 			int width,
@@ -324,7 +322,6 @@ struct opp_funcs {
 	void (*opp_program_left_edge_extra_pixel)(
 			struct output_pixel_processor *opp,
 			bool count);
-#endif
 
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 6305e38..351b387 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -65,13 +65,11 @@ struct audio_clock_info {
 	uint32_t cts_48khz;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 enum dynamic_metadata_mode {
 	dmdata_dp,
 	dmdata_hdmi,
 	dmdata_dolby_vision
 };
-#endif
 
 struct encoder_info_frame {
 	/* auxiliary video information */
@@ -90,9 +88,7 @@ struct encoder_info_frame {
 struct encoder_unblank_param {
 	struct dc_link_settings link_settings;
 	struct dc_crtc_timing timing;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	int opp_cnt;
-#endif
 };
 
 struct encoder_set_dp_phy_pattern_param {
@@ -109,7 +105,6 @@ struct stream_encoder {
 	enum engine_id id;
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 struct enc_state {
 	uint32_t dsc_mode;  // DISABLED  0; 1 or 2 indicate enabled state.
 	uint32_t dsc_slice_width;
@@ -119,13 +114,13 @@ struct enc_state {
 	uint32_t sec_gsp_pps_enable;
 	uint32_t sec_stream_enable;
 };
-#endif
 
 struct stream_encoder_funcs {
 	void (*dp_set_stream_attribute)(
 		struct stream_encoder *enc,
 		struct dc_crtc_timing *crtc_timing,
 		enum dc_color_space output_color_space,
+		bool use_vsc_sdp_for_colorimetry,
 		uint32_t enable_sdp_splitting);
 
 	void (*hdmi_set_stream_attribute)(
@@ -219,8 +214,6 @@ struct stream_encoder_funcs {
 		enum dc_pixel_encoding *encoding,
 		enum dc_color_depth *depth);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);
 
 	void (*dp_set_dsc_config)(
@@ -232,7 +225,6 @@ struct stream_encoder_funcs {
 	void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc,
 				bool enable,
 				uint8_t *dsc_packed_pps);
-#endif
 
 	void (*set_dynamic_metadata)(struct stream_encoder *enc,
 			bool enable,
@@ -242,7 +234,6 @@ struct stream_encoder_funcs {
 	void (*dp_set_odm_combine)(
 		struct stream_encoder *enc,
 		bool odm_combine);
-#endif
 };
 
 #endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 27c73ca..e5e7d94 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -195,10 +195,8 @@ struct timing_generator_funcs {
 	void (*lock)(struct timing_generator *tg);
 	void (*lock_doublebuffer_disable)(struct timing_generator *tg);
 	void (*lock_doublebuffer_enable)(struct timing_generator *tg);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	void(*triplebuffer_unlock)(struct timing_generator *tg);
 	void(*triplebuffer_lock)(struct timing_generator *tg);
-#endif
 	void (*enable_reset_trigger)(struct timing_generator *tg,
 				     int source_tg_inst);
 	void (*enable_crtc_reset)(struct timing_generator *tg,
@@ -210,7 +208,8 @@ struct timing_generator_funcs {
 					bool enable, const struct dc_crtc_timing *timing);
 	void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);
 	void (*set_static_screen_control)(struct timing_generator *tg,
-							uint32_t value);
+						uint32_t event_triggers,
+						uint32_t num_frames);
 	void (*set_test_pattern)(
 		struct timing_generator *tg,
 		enum controller_dp_test_pattern test_pattern,
@@ -235,7 +234,6 @@ struct timing_generator_funcs {
 	bool (*is_optc_underflow_occurred)(struct timing_generator *tg);
 	void (*clear_optc_underflow)(struct timing_generator *tg);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	void (*set_dwb_source)(struct timing_generator *optc,
 		uint32_t dwb_pipe_inst);
 
@@ -243,7 +241,6 @@ struct timing_generator_funcs {
 			uint32_t *num_of_input_segments,
 			uint32_t *seg0_src_sel,
 			uint32_t *seg1_src_sel);
-#endif
 
 	/**
 	 * Configure CRCs for the given timing generator. Return false if TG is
@@ -267,13 +264,10 @@ struct timing_generator_funcs {
 	void (*set_vtg_params)(struct timing_generator *optc,
 			const struct dc_crtc_timing *dc_crtc_timing);
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	void (*set_dsc_config)(struct timing_generator *optc,
 			       enum optc_dsc_mode dsc_mode,
 			       uint32_t dsc_bytes_per_pixel,
 			       uint32_t dsc_slice_width);
-#endif
 	void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing);
 	void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,
 			struct dc_crtc_timing *timing);
@@ -281,7 +275,6 @@ struct timing_generator_funcs {
 	void (*set_gsl_source_select)(struct timing_generator *optc,
 			int group_idx,
 			uint32_t gsl_ready_signal);
-#endif
 };
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index d39c1e1..209118f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,326 +32,160 @@
 #include "inc/hw/link_encoder.h"
 #include "core_status.h"
 
-enum pipe_gating_control {
-	PIPE_GATING_CONTROL_DISABLE = 0,
-	PIPE_GATING_CONTROL_ENABLE,
-	PIPE_GATING_CONTROL_INIT
-};
-
 enum vline_select {
 	VLINE0,
 	VLINE1
 };
 
-struct dce_hwseq_wa {
-	bool blnd_crtc_trigger;
-	bool DEGVIDCN10_253;
-	bool false_optc_underflow;
-	bool DEGVIDCN10_254;
-	bool DEGVIDCN21;
-};
-
-struct hwseq_wa_state {
-	bool DEGVIDCN10_253_applied;
-};
-
-struct dce_hwseq {
-	struct dc_context *ctx;
-	const struct dce_hwseq_registers *regs;
-	const struct dce_hwseq_shift *shifts;
-	const struct dce_hwseq_mask *masks;
-	struct dce_hwseq_wa wa;
-	struct hwseq_wa_state wa_state;
-};
-
 struct pipe_ctx;
 struct dc_state;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 struct dc_stream_status;
 struct dc_writeback_info;
-#endif
 struct dchub_init_data;
-struct dc_static_screen_events;
+struct dc_static_screen_params;
 struct resource_pool;
-struct resource_context;
-struct stream_resource;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 struct dc_phy_addr_space_config;
 struct dc_virtual_addr_space_config;
-#endif
-struct hubp;
 struct dpp;
+struct dce_hwseq;
 
 struct hw_sequencer_funcs {
+	/* Embedded Display Related */
+	void (*edp_power_control)(struct dc_link *link, bool enable);
+	void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
 
-	void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-	void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
+	/* Pipe Programming Related */
 	void (*init_hw)(struct dc *dc);
-
-	void (*init_pipes)(struct dc *dc, struct dc_state *context);
-
-	enum dc_status (*apply_ctx_to_hw)(
-			struct dc *dc, struct dc_state *context);
-
-	void (*reset_hw_ctx_wrap)(
-			struct dc *dc, struct dc_state *context);
-
-	void (*apply_ctx_for_surface)(
-			struct dc *dc,
-			const struct dc_stream_state *stream,
-			int num_planes,
+	void (*enable_accelerated_mode)(struct dc *dc,
 			struct dc_state *context);
-
-	void (*program_gamut_remap)(
-			struct pipe_ctx *pipe_ctx);
-
-	void (*program_output_csc)(struct dc *dc,
-			struct pipe_ctx *pipe_ctx,
-			enum dc_color_space colorspace,
-			uint16_t *matrix,
-			int opp_id);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-	void (*program_front_end_for_ctx)(
-			struct dc *dc,
+	enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
 			struct dc_state *context);
-	void (*program_triplebuffer)(
-		const struct dc *dc,
-		struct pipe_ctx *pipe_ctx,
-		bool enableTripleBuffer);
-	void (*set_flip_control_gsl)(
-		struct pipe_ctx *pipe_ctx,
-		bool flip_immediate);
-#endif
-
-	void (*update_plane_addr)(
-		const struct dc *dc,
-		struct pipe_ctx *pipe_ctx);
-
-	void (*plane_atomic_disconnect)(
-		struct dc *dc,
-		struct pipe_ctx *pipe_ctx);
-
-	void (*update_dchub)(
-		struct dce_hwseq *hws,
-		struct dchub_init_data *dh_data);
-
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-	int (*init_sys_ctx)(
-			struct dce_hwseq *hws,
-			struct dc *dc,
-			struct dc_phy_addr_space_config *pa_config);
-	void (*init_vm_ctx)(
-			struct dce_hwseq *hws,
-			struct dc *dc,
-			struct dc_virtual_addr_space_config *va_config,
-			int vmid);
-#endif
-	void (*update_mpcc)(
-		struct dc *dc,
-		struct pipe_ctx *pipe_ctx);
-
-	void (*update_pending_status)(
-			struct pipe_ctx *pipe_ctx);
-
-	bool (*set_input_transfer_func)(
-				struct pipe_ctx *pipe_ctx,
-				const struct dc_plane_state *plane_state);
-
-	bool (*set_output_transfer_func)(
-				struct pipe_ctx *pipe_ctx,
-				const struct dc_stream_state *stream);
-
-	void (*power_down)(struct dc *dc);
-
-	void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context);
-
-	void (*enable_timing_synchronization)(
-			struct dc *dc,
-			int group_index,
-			int group_size,
-			struct pipe_ctx *grouped_pipes[]);
-
-	void (*enable_per_frame_crtc_position_reset)(
-			struct dc *dc,
-			int group_size,
-			struct pipe_ctx *grouped_pipes[]);
-
-	void (*enable_display_pipe_clock_gating)(
-					struct dc_context *ctx,
-					bool clock_gating);
-
-	bool (*enable_display_power_gating)(
-					struct dc *dc,
-					uint8_t controller_id,
-					struct dc_bios *dcb,
-					enum pipe_gating_control power_gating);
-
 	void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-	void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
-
-	void (*send_immediate_sdp_message)(
-				struct pipe_ctx *pipe_ctx,
-				const uint8_t *custom_sdp_message,
-				unsigned int sdp_message_size);
-
-	void (*enable_stream)(struct pipe_ctx *pipe_ctx);
-
-	void (*disable_stream)(struct pipe_ctx *pipe_ctx);
-
-	void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
-			struct dc_link_settings *link_settings);
-
-	void (*blank_stream)(struct pipe_ctx *pipe_ctx);
-
-	void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
-
-	void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx);
-
-	void (*pipe_control_lock)(
-				struct dc *dc,
-				struct pipe_ctx *pipe,
-				bool lock);
-
-	void (*pipe_control_lock_global)(
-				struct dc *dc,
-				struct pipe_ctx *pipe,
-				bool lock);
-	void (*blank_pixel_data)(
-			struct dc *dc,
-			struct pipe_ctx *pipe_ctx,
-			bool blank);
-
-	void (*prepare_bandwidth)(
-			struct dc *dc,
+	void (*apply_ctx_for_surface)(struct dc *dc,
+			const struct dc_stream_state *stream,
+			int num_planes, struct dc_state *context);
+	void (*program_front_end_for_ctx)(struct dc *dc,
 			struct dc_state *context);
-	void (*optimize_bandwidth)(
-			struct dc *dc,
-			struct dc_state *context);
-
-	void (*exit_optimized_pwr_state)(
-			const struct dc *dc,
-			struct dc_state *context);
-	void (*optimize_pwr_state)(
-			const struct dc *dc,
-			struct dc_state *context);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-	bool (*update_bandwidth)(
-			struct dc *dc,
-			struct dc_state *context);
-	void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx);
-	bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx);
-#endif
-
-	void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
-			unsigned int vmin, unsigned int vmax,
-			unsigned int vmid, unsigned int vmid_frame_number);
-
-	void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
-			struct crtc_position *position);
-
-	void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
-			int num_pipes, const struct dc_static_screen_events *events);
-
-	enum dc_status (*enable_stream_timing)(
-			struct pipe_ctx *pipe_ctx,
-			struct dc_state *context,
-			struct dc *dc);
-
-	void (*setup_stereo)(
-			struct pipe_ctx *pipe_ctx,
-			struct dc *dc);
-
-	void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
-
-	void (*log_hw_state)(struct dc *dc,
-		struct dc_log_buffer_ctx *log_ctx);
-	void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
-	void (*clear_status_bits)(struct dc *dc, unsigned int mask);
-
+	void (*update_plane_addr)(const struct dc *dc,
+			struct pipe_ctx *pipe_ctx);
+	void (*update_dchub)(struct dce_hwseq *hws,
+			struct dchub_init_data *dh_data);
 	void (*wait_for_mpcc_disconnect)(struct dc *dc,
 			struct resource_pool *res_pool,
 			struct pipe_ctx *pipe_ctx);
+	void (*program_triplebuffer)(const struct dc *dc,
+		struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
+	void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
 
-	void (*edp_power_control)(
-			struct dc_link *link,
-			bool enable);
-	void (*edp_backlight_control)(
-			struct dc_link *link,
-			bool enable);
-	void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
+	/* Pipe Lock Related */
+	void (*pipe_control_lock_global)(struct dc *dc,
+			struct pipe_ctx *pipe, bool lock);
+	void (*pipe_control_lock)(struct dc *dc,
+			struct pipe_ctx *pipe, bool lock);
+	void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
+			bool flip_immediate);
 
+	/* Timing Related */
+	void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
+			struct crtc_position *position);
+	int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
+	void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
+			int group_size, struct pipe_ctx *grouped_pipes[]);
+	void (*enable_timing_synchronization)(struct dc *dc,
+			int group_index, int group_size,
+			struct pipe_ctx *grouped_pipes[]);
+	void (*setup_periodic_interrupt)(struct dc *dc,
+			struct pipe_ctx *pipe_ctx,
+			enum vline_select vline);
+	void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
+			unsigned int vmin, unsigned int vmax,
+			unsigned int vmid, unsigned int vmid_frame_number);
+	void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
+			int num_pipes,
+			const struct dc_static_screen_params *events);
+
+	/* Stream Related */
+	void (*enable_stream)(struct pipe_ctx *pipe_ctx);
+	void (*disable_stream)(struct pipe_ctx *pipe_ctx);
+	void (*blank_stream)(struct pipe_ctx *pipe_ctx);
+	void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
+			struct dc_link_settings *link_settings);
+
+	/* Bandwidth Related */
+	void (*prepare_bandwidth)(struct dc *dc, struct dc_state *context);
+	bool (*update_bandwidth)(struct dc *dc, struct dc_state *context);
+	void (*optimize_bandwidth)(struct dc *dc, struct dc_state *context);
+
+	/* Infopacket Related */
+	void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+	void (*send_immediate_sdp_message)(
+			struct pipe_ctx *pipe_ctx,
+			const uint8_t *custom_sdp_message,
+			unsigned int sdp_message_size);
+	void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
+	void (*set_dmdata_attributes)(struct pipe_ctx *pipe);
+	void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx);
+	bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx);
+
+	/* Cursor Related */
 	void (*set_cursor_position)(struct pipe_ctx *pipe);
 	void (*set_cursor_attribute)(struct pipe_ctx *pipe);
 	void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
 
-	void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline);
-	void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx);
-	bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+	/* Colour Related */
+	void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx);
+	void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+			enum dc_color_space colorspace,
+			uint16_t *matrix, int opp_id);
 
-	void (*init_blank)(struct dc *dc, struct timing_generator *tg);
-	void (*disable_vga)(struct dce_hwseq *hws);
-	void (*bios_golden_init)(struct dc *dc);
-	void (*plane_atomic_power_down)(struct dc *dc,
-			struct dpp *dpp,
-			struct hubp *hubp);
-
-	void (*plane_atomic_disable)(
-			struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-	void (*enable_power_gating_plane)(
-		struct dce_hwseq *hws,
-		bool enable);
-
-	void (*dpp_pg_control)(
-			struct dce_hwseq *hws,
-			unsigned int dpp_inst,
-			bool power_on);
-
-	void (*hubp_pg_control)(
-			struct dce_hwseq *hws,
-			unsigned int hubp_inst,
-			bool power_on);
-
-	void (*dsc_pg_control)(
-			struct dce_hwseq *hws,
-			unsigned int dsc_inst,
-			bool power_on);
-
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-	void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
-	void (*program_all_writeback_pipes_in_tree)(
+	/* VM Related */
+	int (*init_sys_ctx)(struct dce_hwseq *hws,
 			struct dc *dc,
-			const struct dc_stream_state *stream,
-			struct dc_state *context);
+			struct dc_phy_addr_space_config *pa_config);
+	void (*init_vm_ctx)(struct dce_hwseq *hws,
+			struct dc *dc,
+			struct dc_virtual_addr_space_config *va_config,
+			int vmid);
+
+	/* Writeback Related */
 	void (*update_writeback)(struct dc *dc,
-			const struct dc_stream_status *stream_status,
 			struct dc_writeback_info *wb_info,
 			struct dc_state *context);
 	void (*enable_writeback)(struct dc *dc,
-			const struct dc_stream_status *stream_status,
 			struct dc_writeback_info *wb_info,
 			struct dc_state *context);
 	void (*disable_writeback)(struct dc *dc,
 			unsigned int dwb_pipe_inst);
-#endif
+
+	bool (*mmhubbub_warmup)(struct dc *dc,
+			unsigned int num_dwb,
+			struct dc_writeback_info *wb_info);
+
+	/* Clock Related */
 	enum dc_status (*set_clock)(struct dc *dc,
 			enum dc_clock_type clock_type,
-			uint32_t clk_khz,
-			uint32_t stepping);
-
-	void (*get_clock)(struct dc *dc,
-			enum dc_clock_type clock_type,
+			uint32_t clk_khz, uint32_t stepping);
+	void (*get_clock)(struct dc *dc, enum dc_clock_type clock_type,
 			struct dc_clock_config *clock_cfg);
+	void (*optimize_pwr_state)(const struct dc *dc,
+			struct dc_state *context);
+	void (*exit_optimized_pwr_state)(const struct dc *dc,
+			struct dc_state *context);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
-	bool (*s0i3_golden_init_wa)(struct dc *dc);
-#endif
+	/* Audio Related */
+	void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+	void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx);
+
+	/* Stereo 3D Related */
+	void (*setup_stereo)(struct pipe_ctx *pipe_ctx, struct dc *dc);
+
+	/* HW State Logging Related */
+	void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx);
+	void (*get_hw_state)(struct dc *dc, char *pBuf,
+			unsigned int bufSize, unsigned int mask);
+	void (*clear_status_bits)(struct dc *dc, unsigned int mask);
+
+
 };
 
 void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
new file mode 100644
index 0000000..ecf5663
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HW_SEQUENCER_PRIVATE_H__
+#define __DC_HW_SEQUENCER_PRIVATE_H__
+
+#include "dc_types.h"
+
+enum pipe_gating_control {
+	PIPE_GATING_CONTROL_DISABLE = 0,
+	PIPE_GATING_CONTROL_ENABLE,
+	PIPE_GATING_CONTROL_INIT
+};
+
+struct dce_hwseq_wa {
+	bool blnd_crtc_trigger;
+	bool DEGVIDCN10_253;
+	bool false_optc_underflow;
+	bool DEGVIDCN10_254;
+	bool DEGVIDCN21;
+};
+
+struct hwseq_wa_state {
+	bool DEGVIDCN10_253_applied;
+};
+
+struct pipe_ctx;
+struct dc_state;
+struct dc_stream_status;
+struct dc_writeback_info;
+struct dchub_init_data;
+struct dc_static_screen_params;
+struct resource_pool;
+struct resource_context;
+struct stream_resource;
+struct dc_phy_addr_space_config;
+struct dc_virtual_addr_space_config;
+struct hubp;
+struct dpp;
+struct dce_hwseq;
+struct timing_generator;
+struct tg_color;
+struct output_pixel_processor;
+
+struct hwseq_private_funcs {
+
+	void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+	void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+	void (*init_pipes)(struct dc *dc, struct dc_state *context);
+	void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context);
+	void (*update_plane_addr)(const struct dc *dc,
+			struct pipe_ctx *pipe_ctx);
+	void (*plane_atomic_disconnect)(struct dc *dc,
+			struct pipe_ctx *pipe_ctx);
+	void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+	bool (*set_input_transfer_func)(struct dc *dc,
+				struct pipe_ctx *pipe_ctx,
+				const struct dc_plane_state *plane_state);
+	bool (*set_output_transfer_func)(struct dc *dc,
+				struct pipe_ctx *pipe_ctx,
+				const struct dc_stream_state *stream);
+	void (*power_down)(struct dc *dc);
+	void (*enable_display_pipe_clock_gating)(struct dc_context *ctx,
+					bool clock_gating);
+	bool (*enable_display_power_gating)(struct dc *dc,
+					uint8_t controller_id,
+					struct dc_bios *dcb,
+					enum pipe_gating_control power_gating);
+	void (*blank_pixel_data)(struct dc *dc,
+			struct pipe_ctx *pipe_ctx,
+			bool blank);
+	enum dc_status (*enable_stream_timing)(
+			struct pipe_ctx *pipe_ctx,
+			struct dc_state *context,
+			struct dc *dc);
+	void (*edp_backlight_control)(struct dc_link *link,
+			bool enable);
+	void (*setup_vupdate_interrupt)(struct dc *dc,
+			struct pipe_ctx *pipe_ctx);
+	bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+	void (*init_blank)(struct dc *dc, struct timing_generator *tg);
+	void (*disable_vga)(struct dce_hwseq *hws);
+	void (*bios_golden_init)(struct dc *dc);
+	void (*plane_atomic_power_down)(struct dc *dc,
+			struct dpp *dpp,
+			struct hubp *hubp);
+	void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+	void (*enable_power_gating_plane)(struct dce_hwseq *hws,
+		bool enable);
+	void (*dpp_pg_control)(struct dce_hwseq *hws,
+			unsigned int dpp_inst,
+			bool power_on);
+	void (*hubp_pg_control)(struct dce_hwseq *hws,
+			unsigned int hubp_inst,
+			bool power_on);
+	void (*dsc_pg_control)(struct dce_hwseq *hws,
+			unsigned int dsc_inst,
+			bool power_on);
+	void (*update_odm)(struct dc *dc, struct dc_state *context,
+			struct pipe_ctx *pipe_ctx);
+	void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
+			const struct dc_stream_state *stream,
+			struct dc_state *context);
+	bool (*s0i3_golden_init_wa)(struct dc *dc);
+	void (*get_surface_visual_confirm_color)(
+			const struct pipe_ctx *pipe_ctx,
+			struct tg_color *color);
+	void (*get_hdr_visual_confirm_color)(struct pipe_ctx *pipe_ctx,
+			struct tg_color *color);
+	void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx);
+	void (*verify_allow_pstate_change_high)(struct dc *dc);
+	void (*program_pipe)(struct dc *dc,
+			struct pipe_ctx *pipe_ctx,
+			struct dc_state *context);
+	bool (*wait_for_blank_complete)(struct output_pixel_processor *opp);
+	void (*dccg_init)(struct dce_hwseq *hws);
+	bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx,
+			const struct dc_plane_state *plane_state);
+	bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
+			const struct dc_plane_state *plane_state);
+};
+
+struct dce_hwseq {
+	struct dc_context *ctx;
+	const struct dce_hwseq_registers *regs;
+	const struct dce_hwseq_shift *shifts;
+	const struct dce_hwseq_mask *masks;
+	struct dce_hwseq_wa wa;
+	struct hwseq_wa_state wa_state;
+	struct hwseq_private_funcs funcs;
+
+};
+
+#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 4eff5d3..9af7ee5b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -60,11 +60,13 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
 
 bool dp_set_hw_training_pattern(
 	struct dc_link *link,
-	enum dc_dp_training_pattern pattern);
+	enum dc_dp_training_pattern pattern,
+	uint32_t offset);
 
 void dp_set_hw_lane_settings(
 	struct dc_link *link,
-	const struct link_training_settings *link_settings);
+	const struct link_training_settings *link_settings,
+	uint32_t offset);
 
 void dp_set_hw_test_pattern(
 	struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 8503d9c..2470405 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -458,7 +458,14 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
 #define IX_REG_READ(index_reg_name, data_reg_name, index) \
 		generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
 
+#define IX_REG_GET_N(index_reg_name, data_reg_name, index, n, ...) \
+		generic_indirect_reg_get(CTX, REG(index_reg_name), REG(data_reg_name), \
+				IND_REG(index), \
+				n, __VA_ARGS__)
 
+#define IX_REG_GET(index_reg_name, data_reg_name, index, field, val) \
+		IX_REG_GET_N(index_reg_name, data_reg_name, index, 1, \
+				FN(data_reg_name, field), val)
 
 #define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...)	\
 		generic_indirect_reg_update_ex(CTX, \
@@ -479,10 +486,35 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
 		uint32_t addr_index, uint32_t addr_data,
 		uint32_t index);
 
+uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
+		uint32_t addr_index, uint32_t addr_data,
+		uint32_t index, int n,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		...);
+
 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
 		uint32_t addr_index, uint32_t addr_data,
 		uint32_t index, uint32_t reg_val, int n,
 		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
 		...);
 
+/* register offload macros
+ *
+ * instead of MMIO to register directly, in some cases we want
+ * to gather register sequence and execute the register sequence
+ * from another thread so we optimize time required for lengthy ops
+ */
+
+/* start gathering register sequence */
+#define REG_SEQ_START() \
+	reg_sequence_start_gather(CTX)
+
+/* start execution of register sequence gathered since REG_SEQ_START */
+#define REG_SEQ_SUBMIT() \
+	reg_sequence_start_execute(CTX)
+
+/* wait for the last REG_SEQ_SUBMIT to finish */
+#define REG_SEQ_WAIT_DONE() \
+	reg_sequence_wait_done(CTX)
+
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index bef224b..5ae8ada 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -46,12 +46,8 @@ struct resource_caps {
 	int num_pll;
 	int num_dwb;
 	int num_ddc;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 	int num_vmid;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	int num_dsc;
-#endif
-#endif
 };
 
 struct resource_straps {
@@ -181,4 +177,6 @@ void update_audio_usage(
 
 unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
 
+void get_audio_check(struct audio_info *aud_modes,
+	struct audio_check *aud_chk);
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index ea75420..0f682ac 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -60,27 +60,23 @@
 ###############################################################################
 # DCN 1x
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
 IRQ_DCN1 = irq_service_dcn10.o
 
 AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1)
-endif
 ###############################################################################
 # DCN 20
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
 IRQ_DCN2 = irq_service_dcn20.o
 
 AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2)
-endif
 ###############################################################################
 # DCN 21
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 IRQ_DCN21 = irq_service_dcn21.o
 
 AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 1a581c4..378cc11a 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -204,7 +204,7 @@ bool dce110_vblank_set(struct irq_service *irq_service,
 		       bool enable)
 {
 	struct dc_context *dc_ctx = irq_service->ctx;
-	struct dc *core_dc = irq_service->ctx->dc;
+	struct dc *dc = irq_service->ctx->dc;
 	enum dc_irq_source dal_irq_src =
 			dc_interrupt_to_irq_source(irq_service->ctx->dc,
 						   info->src_id,
@@ -212,7 +212,7 @@ bool dce110_vblank_set(struct irq_service *irq_service,
 	uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
 
 	struct timing_generator *tg =
-			core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+			dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
 
 	if (enable) {
 		if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
@@ -403,7 +403,7 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = {
 		.to_dal_irq_source = to_dal_irq_source_dce110
 };
 
-static void construct(struct irq_service *irq_service,
+static void dce110_irq_construct(struct irq_service *irq_service,
 		      struct irq_service_init_data *init_data)
 {
 	dal_irq_service_construct(irq_service, init_data);
@@ -421,6 +421,6 @@ dal_irq_service_dce110_create(struct irq_service_init_data *init_data)
 	if (!irq_service)
 		return NULL;
 
-	construct(irq_service, init_data);
+	dce110_irq_construct(irq_service, init_data);
 	return irq_service;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 1538033..2fe4703 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -273,7 +273,7 @@ static const struct irq_service_funcs irq_service_funcs_dce120 = {
 		.to_dal_irq_source = to_dal_irq_source_dce110
 };
 
-static void construct(
+static void dce120_irq_construct(
 	struct irq_service *irq_service,
 	struct irq_service_init_data *init_data)
 {
@@ -292,6 +292,6 @@ struct irq_service *dal_irq_service_dce120_create(
 	if (!irq_service)
 		return NULL;
 
-	construct(irq_service, init_data);
+	dce120_irq_construct(irq_service, init_data);
 	return irq_service;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 281fee8..17e426b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -283,7 +283,7 @@ static const struct irq_service_funcs irq_service_funcs_dce80 = {
 		.to_dal_irq_source = to_dal_irq_source_dce110
 };
 
-static void construct(
+static void dce80_irq_construct(
 	struct irq_service *irq_service,
 	struct irq_service_init_data *init_data)
 {
@@ -302,7 +302,7 @@ struct irq_service *dal_irq_service_dce80_create(
 	if (!irq_service)
 		return NULL;
 
-	construct(irq_service, init_data);
+	dce80_irq_construct(irq_service, init_data);
 	return irq_service;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index cc8e7de..f956b3b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -355,7 +355,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn10 = {
 		.to_dal_irq_source = to_dal_irq_source_dcn10
 };
 
-static void construct(
+static void dcn10_irq_construct(
 	struct irq_service *irq_service,
 	struct irq_service_init_data *init_data)
 {
@@ -374,6 +374,6 @@ struct irq_service *dal_irq_service_dcn10_create(
 	if (!irq_service)
 		return NULL;
 
-	construct(irq_service, init_data);
+	dcn10_irq_construct(irq_service, init_data);
 	return irq_service;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 5db29bf..2a1fea5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -359,7 +359,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn20 = {
 		.to_dal_irq_source = to_dal_irq_source_dcn20
 };
 
-static void construct(
+static void dcn20_irq_construct(
 	struct irq_service *irq_service,
 	struct irq_service_init_data *init_data)
 {
@@ -378,6 +378,6 @@ struct irq_service *dal_irq_service_dcn20_create(
 	if (!irq_service)
 		return NULL;
 
-	construct(irq_service, init_data);
+	dcn20_irq_construct(irq_service, init_data);
 	return irq_service;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index cbe7818..1b97126 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -350,7 +350,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn21 = {
 		.to_dal_irq_source = to_dal_irq_source_dcn21
 };
 
-static void construct(
+static void dcn21_irq_construct(
 	struct irq_service *irq_service,
 	struct irq_service_init_data *init_data)
 {
@@ -369,6 +369,6 @@ struct irq_service *dal_irq_service_dcn21_create(
 	if (!irq_service)
 		return NULL;
 
-	construct(irq_service, init_data);
+	dcn21_irq_construct(irq_service, init_data);
 	return irq_service;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 0878550..33053b9 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -38,7 +38,7 @@
 #include "dce120/irq_service_dce120.h"
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dcn10/irq_service_dcn10.h"
 #endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 30ec80a..c34eba1 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -1,5 +1,6 @@
 /*
  * Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -29,6 +30,7 @@
 #include <linux/kgdb.h>
 #include <linux/kref.h>
 #include <linux/types.h>
+#include <linux/slab.h>
 
 #include <asm/byteorder.h>
 
@@ -48,8 +50,39 @@
 
 #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_X86)
 #include <asm/fpu/api.h>
+#define DC_FP_START() kernel_fpu_begin()
+#define DC_FP_END() kernel_fpu_end()
+#elif defined(CONFIG_PPC64)
+#include <asm/switch_to.h>
+#include <asm/cputable.h>
+#define DC_FP_START() { \
+	if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
+		preempt_disable(); \
+		enable_kernel_vsx(); \
+	} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
+		preempt_disable(); \
+		enable_kernel_altivec(); \
+	} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
+		preempt_disable(); \
+		enable_kernel_fp(); \
+	} \
+}
+#define DC_FP_END() { \
+	if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
+		disable_kernel_vsx(); \
+		preempt_enable(); \
+	} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
+		disable_kernel_altivec(); \
+		preempt_enable(); \
+	} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
+		disable_kernel_fp(); \
+		preempt_enable(); \
+	} \
+}
+#endif
 #endif
 
 /*
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index ff664bd..b8040da 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -32,6 +32,7 @@ static void virtual_stream_encoder_dp_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	enum dc_color_space output_color_space,
+	bool use_vsc_sdp_for_colorimetry,
 	uint32_t enable_sdp_splitting) {}
 
 static void virtual_stream_encoder_hdmi_set_stream_attribute(
@@ -81,22 +82,14 @@ static void virtual_stream_encoder_reset_hdmi_stream_attribute(
 		struct stream_encoder *enc)
 {}
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 static void virtual_enc_dp_set_odm_combine(
 	struct stream_encoder *enc,
 	bool odm_combine)
 {}
-#endif
-#endif
 
 static const struct stream_encoder_funcs virtual_str_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	.dp_set_odm_combine =
 		virtual_enc_dp_set_odm_combine,
-#endif
-#endif
 	.dp_set_stream_attribute =
 		virtual_stream_encoder_dp_set_stream_attribute,
 	.hdmi_set_stream_attribute =
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
new file mode 100644
index 0000000..cd9532b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_H_
+#define _DMUB_CMD_H_
+
+#include "dmub_types.h"
+#include "dmub_cmd_dal.h"
+#include "dmub_cmd_vbios.h"
+#include "atomfirmware.h"
+
+#define DMUB_RB_CMD_SIZE 64
+#define DMUB_RB_MAX_ENTRY 128
+#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
+#define REG_SET_MASK 0xFFFF
+
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_type {
+	DMUB_CMD__NULL = 0,
+	DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE = 1,
+	DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2,
+	DMUB_CMD__REG_SEQ_BURST_WRITE = 3,
+	DMUB_CMD__REG_REG_WAIT = 4,
+	DMUB_CMD__PLAT_54186_WA = 5,
+	DMUB_CMD__PSR = 64,
+	DMUB_CMD__VBIOS = 128,
+};
+
+#pragma pack(push, 1)
+
+struct dmub_cmd_header {
+	unsigned int type : 8;
+	unsigned int sub_type : 8;
+	unsigned int reserved0 : 8;
+	unsigned int payload_bytes : 6;  /* up to 60 bytes */
+	unsigned int reserved1 : 2;
+};
+
+/*
+ * Read modify write
+ *
+ * 60 payload bytes can hold up to 5 sets of read modify writes,
+ * each take 3 dwords.
+ *
+ * number of sequences = header.payload_bytes / sizeof(struct dmub_cmd_read_modify_write_sequence)
+ *
+ * modify_mask = 0xffff'ffff means all fields are going to be updated.  in this case
+ * command parser will skip the read and we can use modify_mask = 0xffff'ffff as reg write
+ */
+struct dmub_cmd_read_modify_write_sequence {
+	uint32_t addr;
+	uint32_t modify_mask;
+	uint32_t modify_value;
+};
+
+#define DMUB_READ_MODIFY_WRITE_SEQ__MAX		5
+struct dmub_rb_cmd_read_modify_write {
+	struct dmub_cmd_header header;  // type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE
+	struct dmub_cmd_read_modify_write_sequence seq[DMUB_READ_MODIFY_WRITE_SEQ__MAX];
+};
+
+/*
+ * Update a register with specified masks and values sequeunce
+ *
+ * 60 payload bytes can hold address + up to 7 sets of mask/value combo, each take 2 dword
+ *
+ * number of field update sequence = (header.payload_bytes - sizeof(addr)) / sizeof(struct read_modify_write_sequence)
+ *
+ *
+ * USE CASE:
+ *   1. auto-increment register where additional read would update pointer and produce wrong result
+ *   2. toggle a bit without read in the middle
+ */
+
+struct dmub_cmd_reg_field_update_sequence {
+	uint32_t modify_mask;  // 0xffff'ffff to skip initial read
+	uint32_t modify_value;
+};
+
+#define DMUB_REG_FIELD_UPDATE_SEQ__MAX		7
+
+struct dmub_rb_cmd_reg_field_update_sequence {
+	struct dmub_cmd_header header;
+	uint32_t addr;
+	struct dmub_cmd_reg_field_update_sequence seq[DMUB_REG_FIELD_UPDATE_SEQ__MAX];
+};
+
+
+/*
+ * Burst write
+ *
+ * support use case such as writing out LUTs.
+ *
+ * 60 payload bytes can hold up to 14 values to write to given address
+ *
+ * number of payload = header.payload_bytes / sizeof(struct read_modify_write_sequence)
+ */
+#define DMUB_BURST_WRITE_VALUES__MAX  14
+struct dmub_rb_cmd_burst_write {
+	struct dmub_cmd_header header;  // type = DMUB_CMD__REG_SEQ_BURST_WRITE
+	uint32_t addr;
+	uint32_t write_values[DMUB_BURST_WRITE_VALUES__MAX];
+};
+
+
+struct dmub_rb_cmd_common {
+	struct dmub_cmd_header header;
+	uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)];
+};
+
+struct dmub_cmd_reg_wait_data {
+	uint32_t addr;
+	uint32_t mask;
+	uint32_t condition_field_value;
+	uint32_t time_out_us;
+};
+
+struct dmub_rb_cmd_reg_wait {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_reg_wait_data reg_wait;
+};
+
+#ifndef PHYSICAL_ADDRESS_LOC
+#define PHYSICAL_ADDRESS_LOC union large_integer
+#endif
+
+struct dmub_cmd_PLAT_54186_wa {
+	uint32_t DCSURF_SURFACE_CONTROL;
+	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+	uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+	struct {
+		uint8_t hubp_inst : 4;
+		uint8_t tmz_surface : 1;
+		uint8_t immediate :1;
+		uint8_t vmid : 4;
+		uint8_t grph_stereo : 1;
+		uint32_t reserved : 21;
+	} flip_params;
+	uint32_t reserved[9];
+};
+
+struct dmub_rb_cmd_PLAT_54186_wa {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_PLAT_54186_wa flip;
+};
+
+struct dmub_cmd_digx_encoder_control_data {
+	union dig_encoder_control_parameters_v1_5 dig;
+};
+
+struct dmub_rb_cmd_digx_encoder_control {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_digx_encoder_control_data encoder_control;
+};
+
+struct dmub_cmd_set_pixel_clock_data {
+	struct set_pixel_clock_parameter_v1_7 clk;
+};
+
+struct dmub_rb_cmd_set_pixel_clock {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_set_pixel_clock_data pixel_clock;
+};
+
+struct dmub_cmd_enable_disp_power_gating_data {
+	struct enable_disp_power_gating_parameters_v2_1 pwr;
+};
+
+struct dmub_rb_cmd_enable_disp_power_gating {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_enable_disp_power_gating_data power_gating;
+};
+
+struct dmub_cmd_dig1_transmitter_control_data {
+	struct dig_transmitter_control_parameters_v1_6 dig;
+};
+
+struct dmub_rb_cmd_dig1_transmitter_control {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_dig1_transmitter_control_data transmitter_control;
+};
+
+struct dmub_rb_cmd_dpphy_init {
+	struct dmub_cmd_header header;
+	uint8_t reserved[60];
+};
+
+struct dmub_cmd_psr_copy_settings_data {
+	uint16_t psr_level;
+	uint8_t hubp_inst;
+	uint8_t dpp_inst;
+	uint8_t mpcc_inst;
+	uint8_t opp_inst;
+	uint8_t otg_inst;
+	uint8_t digfe_inst;
+	uint8_t digbe_inst;
+	uint8_t dpphy_inst;
+	uint8_t aux_inst;
+	uint8_t hyst_frames;
+	uint8_t hyst_lines;
+	uint8_t phy_num;
+	uint8_t phy_type;
+	uint8_t aux_repeat;
+	uint8_t smu_optimizations_en;
+	uint8_t skip_wait_for_pll_lock;
+	uint8_t frame_delay;
+	uint8_t smu_phy_id;
+	uint8_t num_of_controllers;
+	uint8_t link_rate;
+	uint8_t frame_cap_ind;
+};
+
+struct dmub_rb_cmd_psr_copy_settings {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_psr_copy_settings_data psr_copy_settings_data;
+};
+
+struct dmub_cmd_psr_set_level_data {
+	uint16_t psr_level;
+};
+
+struct dmub_rb_cmd_psr_set_level {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_psr_set_level_data psr_set_level_data;
+};
+
+struct dmub_rb_cmd_psr_enable {
+	struct dmub_cmd_header header;
+};
+
+struct dmub_cmd_psr_setup_data {
+	enum psr_version version; // PSR version 1 or 2
+};
+
+struct dmub_rb_cmd_psr_setup {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_psr_setup_data psr_setup_data;
+};
+
+union dmub_rb_cmd {
+	struct dmub_rb_cmd_read_modify_write read_modify_write;
+	struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
+	struct dmub_rb_cmd_burst_write burst_write;
+	struct dmub_rb_cmd_reg_wait reg_wait;
+	struct dmub_rb_cmd_common cmd_common;
+	struct dmub_rb_cmd_digx_encoder_control digx_encoder_control;
+	struct dmub_rb_cmd_set_pixel_clock set_pixel_clock;
+	struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating;
+	struct dmub_rb_cmd_dpphy_init dpphy_init;
+	struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
+	struct dmub_rb_cmd_psr_enable psr_enable;
+	struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
+	struct dmub_rb_cmd_psr_set_level psr_set_level;
+	struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
+	struct dmub_rb_cmd_psr_setup psr_setup;
+};
+
+#pragma pack(pop)
+
+#endif /* _DMUB_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
new file mode 100644
index 0000000..7b69eb3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_DAL_H_
+#define _DMUB_CMD_DAL_H_
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_psr_type {
+	DMUB_CMD__PSR_SETUP		= 0,
+	DMUB_CMD__PSR_COPY_SETTINGS	= 1,
+	DMUB_CMD__PSR_ENABLE		= 2,
+	DMUB_CMD__PSR_DISABLE		= 3,
+	DMUB_CMD__PSR_SET_LEVEL		= 4,
+};
+
+enum psr_version {
+	PSR_VERSION_1			= 0x10, // PSR Version 1
+	PSR_VERSION_2			= 0x20, // PSR Version 2, includes selective update
+	PSR_VERSION_2_Y_COORD		= 0x21, // PSR Version 2, includes Y-coordinate support for SU
+};
+
+#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h
new file mode 100644
index 0000000..b6deb8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_VBIOS_H_
+#define _DMUB_CMD_VBIOS_H_
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_vbios_type {
+	DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0,
+	DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1,
+	DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2,
+	DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
+};
+
+#endif /* _DMUB_CMD_VBIOS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
new file mode 100644
index 0000000..242ec25
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef _DMUB_META_H_
+#define _DMUB_META_H_
+
+#include "dmub_types.h"
+
+#pragma pack(push, 1)
+
+/* Magic value for identifying dmub_fw_meta_info */
+#define DMUB_FW_META_MAGIC 0x444D5542
+
+/* Offset from the end of the file to the dmub_fw_meta_info */
+#define DMUB_FW_META_OFFSET 0x24
+
+/**
+ * struct dmub_fw_meta_info - metadata associated with fw binary
+ *
+ * NOTE: This should be considered a stable API. Fields should
+ *       not be repurposed or reordered. New fields should be
+ *       added instead to extend the structure.
+ *
+ * @magic_value: magic value identifying DMUB firmware meta info
+ * @fw_region_size: size of the firmware state region
+ * @trace_buffer_size: size of the tracebuffer region
+ */
+struct dmub_fw_meta_info {
+	uint32_t magic_value;
+	uint32_t fw_region_size;
+	uint32_t trace_buffer_size;
+};
+
+/* Ensure that the structure remains 64 bytes. */
+union dmub_fw_meta {
+	struct dmub_fw_meta_info info;
+	uint8_t reserved[64];
+};
+
+#pragma pack(pop)
+
+#endif /* _DMUB_META_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
new file mode 100644
index 0000000..df875fd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_RB_H_
+#define _DMUB_RB_H_
+
+#include "dmub_types.h"
+#include "dmub_cmd.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+struct dmub_cmd_header;
+
+struct dmub_rb_init_params {
+	void *ctx;
+	void *base_address;
+	uint32_t capacity;
+};
+
+struct dmub_rb {
+	void *base_address;
+	uint32_t data_count;
+	uint32_t rptr;
+	uint32_t wrpt;
+	uint32_t capacity;
+
+	void *ctx;
+	void *dmub;
+};
+
+
+static inline bool dmub_rb_empty(struct dmub_rb *rb)
+{
+	return (rb->wrpt == rb->rptr);
+}
+
+static inline bool dmub_rb_full(struct dmub_rb *rb)
+{
+	uint32_t data_count;
+
+	if (rb->wrpt >= rb->rptr)
+		data_count = rb->wrpt - rb->rptr;
+	else
+		data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+	return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
+}
+
+static inline bool dmub_rb_push_front(struct dmub_rb *rb,
+				      const struct dmub_cmd_header *cmd)
+{
+	uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+	const uint64_t *src = (const uint64_t *)cmd;
+	int i;
+
+	if (dmub_rb_full(rb))
+		return false;
+
+	// copying data
+	for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+		*dst++ = *src++;
+
+	rb->wrpt += DMUB_RB_CMD_SIZE;
+
+	if (rb->wrpt >= rb->capacity)
+		rb->wrpt %= rb->capacity;
+
+	return true;
+}
+
+static inline bool dmub_rb_front(struct dmub_rb *rb,
+				 struct dmub_cmd_header *cmd)
+{
+	uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
+
+	if (dmub_rb_empty(rb))
+		return false;
+
+	dmub_memcpy(cmd, rd_ptr, DMUB_RB_CMD_SIZE);
+
+	return true;
+}
+
+static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
+{
+	if (dmub_rb_empty(rb))
+		return false;
+
+	rb->rptr += DMUB_RB_CMD_SIZE;
+
+	if (rb->rptr >= rb->capacity)
+		rb->rptr %= rb->capacity;
+
+	return true;
+}
+
+static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
+{
+	uint32_t rptr = rb->rptr;
+	uint32_t wptr = rb->wrpt;
+
+	while (rptr != wptr) {
+		uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+		//uint64_t volatile *p = (uint64_t volatile *)data;
+		uint64_t temp;
+		int i;
+
+		for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+			temp = *data++;
+
+		rptr += DMUB_RB_CMD_SIZE;
+		if (rptr >= rb->capacity)
+			rptr %= rb->capacity;
+	}
+}
+
+static inline void dmub_rb_init(struct dmub_rb *rb,
+				struct dmub_rb_init_params *init_params)
+{
+	rb->base_address = init_params->base_address;
+	rb->capacity = init_params->capacity;
+	rb->rptr = 0;
+	rb->wrpt = 0;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_RB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
new file mode 100644
index 0000000..f891759
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -0,0 +1,523 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_SRV_H_
+#define _DMUB_SRV_H_
+
+/**
+ * DOC: DMUB interface and operation
+ *
+ * DMUB is the interface to the display DMCUB microcontroller on DCN hardware.
+ * It delegates hardware initialization and command submission to the
+ * microcontroller. DMUB is the shortname for DMCUB.
+ *
+ * This interface is not thread-safe. Ensure that all access to the interface
+ * is properly synchronized by the caller.
+ *
+ * Initialization and usage of the DMUB service should be done in the
+ * steps given below:
+ *
+ * 1. dmub_srv_create()
+ * 2. dmub_srv_has_hw_support()
+ * 3. dmub_srv_calc_region_info()
+ * 4. dmub_srv_hw_init()
+ *
+ * The call to dmub_srv_create() is required to use the server.
+ *
+ * The calls to dmub_srv_has_hw_support() and dmub_srv_calc_region_info()
+ * are helpers to query cache window size and allocate framebuffer(s)
+ * for the cache windows.
+ *
+ * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
+ * for command submission. Commands can be queued via dmub_srv_cmd_queue()
+ * and executed via dmub_srv_cmd_execute().
+ *
+ * If the queue is full the dmub_srv_wait_for_idle() call can be used to
+ * wait until the queue has been cleared.
+ *
+ * Destroying the DMUB service can be done by calling dmub_srv_destroy().
+ * This does not clear DMUB hardware state, only software state.
+ *
+ * The interface is intended to be standalone and should not depend on any
+ * other component within DAL.
+ */
+
+#include "dmub_types.h"
+#include "dmub_cmd.h"
+#include "dmub_rb.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Forward declarations */
+struct dmub_srv;
+struct dmub_cmd_header;
+struct dmub_srv_common_regs;
+
+/* enum dmub_status - return code for dmcub functions */
+enum dmub_status {
+	DMUB_STATUS_OK = 0,
+	DMUB_STATUS_NO_CTX,
+	DMUB_STATUS_QUEUE_FULL,
+	DMUB_STATUS_TIMEOUT,
+	DMUB_STATUS_INVALID,
+};
+
+/* enum dmub_asic - dmub asic identifier */
+enum dmub_asic {
+	DMUB_ASIC_NONE = 0,
+	DMUB_ASIC_DCN20,
+	DMUB_ASIC_DCN21,
+	DMUB_ASIC_MAX,
+};
+
+/* enum dmub_window_id - dmub window identifier */
+enum dmub_window_id {
+	DMUB_WINDOW_0_INST_CONST = 0,
+	DMUB_WINDOW_1_STACK,
+	DMUB_WINDOW_2_BSS_DATA,
+	DMUB_WINDOW_3_VBIOS,
+	DMUB_WINDOW_4_MAILBOX,
+	DMUB_WINDOW_5_TRACEBUFF,
+	DMUB_WINDOW_6_FW_STATE,
+	DMUB_WINDOW_7_RESERVED,
+	DMUB_WINDOW_TOTAL,
+};
+
+/**
+ * struct dmub_region - dmub hw memory region
+ * @base: base address for region, must be 256 byte aligned
+ * @top: top address for region
+ */
+struct dmub_region {
+	uint32_t base;
+	uint32_t top;
+};
+
+/**
+ * struct dmub_window - dmub hw cache window
+ * @off: offset to the fb memory in gpu address space
+ * @r: region in uc address space for cache window
+ */
+struct dmub_window {
+	union dmub_addr offset;
+	struct dmub_region region;
+};
+
+/**
+ * struct dmub_fb - defines a dmub framebuffer memory region
+ * @cpu_addr: cpu virtual address for the region, NULL if invalid
+ * @gpu_addr: gpu virtual address for the region, NULL if invalid
+ * @size: size of the region in bytes, zero if invalid
+ */
+struct dmub_fb {
+	void *cpu_addr;
+	uint64_t gpu_addr;
+	uint32_t size;
+};
+
+/**
+ * struct dmub_srv_region_params - params used for calculating dmub regions
+ * @inst_const_size: size of the fw inst const section
+ * @bss_data_size: size of the fw bss data section
+ * @vbios_size: size of the vbios data
+ * @fw_bss_data: raw firmware bss data section
+ */
+struct dmub_srv_region_params {
+	uint32_t inst_const_size;
+	uint32_t bss_data_size;
+	uint32_t vbios_size;
+	const uint8_t *fw_bss_data;
+};
+
+/**
+ * struct dmub_srv_region_info - output region info from the dmub service
+ * @fb_size: required minimum fb size for all regions, aligned to 4096 bytes
+ * @num_regions: number of regions used by the dmub service
+ * @regions: region info
+ *
+ * The regions are aligned such that they can be all placed within the
+ * same framebuffer but they can also be placed into different framebuffers.
+ *
+ * The size of each region can be calculated by the caller:
+ * size = reg.top - reg.base
+ *
+ * Care must be taken when performing custom allocations to ensure that each
+ * region base address is 256 byte aligned.
+ */
+struct dmub_srv_region_info {
+	uint32_t fb_size;
+	uint8_t num_regions;
+	struct dmub_region regions[DMUB_WINDOW_TOTAL];
+};
+
+/**
+ * struct dmub_srv_fb_params - parameters used for driver fb setup
+ * @region_info: region info calculated by dmub service
+ * @cpu_addr: base cpu address for the framebuffer
+ * @gpu_addr: base gpu virtual address for the framebuffer
+ */
+struct dmub_srv_fb_params {
+	const struct dmub_srv_region_info *region_info;
+	void *cpu_addr;
+	uint64_t gpu_addr;
+};
+
+/**
+ * struct dmub_srv_fb_info - output fb info from the dmub service
+ * @num_fbs: number of required dmub framebuffers
+ * @fbs: fb data for each region
+ *
+ * Output from the dmub service helper that can be used by the
+ * driver to prepare dmub_fb that can be passed into the dmub
+ * hw init service.
+ *
+ * Assumes that all regions are within the same framebuffer
+ * and have been setup according to the region_info generated
+ * by the dmub service.
+ */
+struct dmub_srv_fb_info {
+	uint8_t num_fb;
+	struct dmub_fb fb[DMUB_WINDOW_TOTAL];
+};
+
+/**
+ * struct dmub_srv_base_funcs - Driver specific base callbacks
+ */
+struct dmub_srv_base_funcs {
+	/**
+	 * @reg_read:
+	 *
+	 * Hook for reading a register.
+	 *
+	 * Return: The 32-bit register value from the given address.
+	 */
+	uint32_t (*reg_read)(void *ctx, uint32_t address);
+
+	/**
+	 * @reg_write:
+	 *
+	 * Hook for writing a value to the register specified by address.
+	 */
+	void (*reg_write)(void *ctx, uint32_t address, uint32_t value);
+};
+
+/**
+ * struct dmub_srv_hw_funcs - hardware sequencer funcs for dmub
+ */
+struct dmub_srv_hw_funcs {
+	/* private: internal use only */
+
+	void (*init)(struct dmub_srv *dmub);
+
+	void (*reset)(struct dmub_srv *dmub);
+
+	void (*reset_release)(struct dmub_srv *dmub);
+
+	void (*backdoor_load)(struct dmub_srv *dmub,
+			      const struct dmub_window *cw0,
+			      const struct dmub_window *cw1);
+
+	void (*setup_windows)(struct dmub_srv *dmub,
+			      const struct dmub_window *cw2,
+			      const struct dmub_window *cw3,
+			      const struct dmub_window *cw4,
+			      const struct dmub_window *cw5,
+			      const struct dmub_window *cw6);
+
+	void (*setup_mailbox)(struct dmub_srv *dmub,
+			      const struct dmub_region *inbox1);
+
+	uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
+
+	void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
+
+	bool (*is_supported)(struct dmub_srv *dmub);
+
+	bool (*is_hw_init)(struct dmub_srv *dmub);
+
+	bool (*is_phy_init)(struct dmub_srv *dmub);
+
+	bool (*is_auto_load_done)(struct dmub_srv *dmub);
+};
+
+/**
+ * struct dmub_srv_create_params - params for dmub service creation
+ * @base_funcs: driver supplied base routines
+ * @hw_funcs: optional overrides for hw funcs
+ * @user_ctx: context data for callback funcs
+ * @asic: driver supplied asic
+ * @is_virtual: false for hw support only
+ */
+struct dmub_srv_create_params {
+	struct dmub_srv_base_funcs funcs;
+	struct dmub_srv_hw_funcs *hw_funcs;
+	void *user_ctx;
+	enum dmub_asic asic;
+	bool is_virtual;
+};
+
+/*
+ * struct dmub_srv_hw_params - params for dmub hardware initialization
+ * @fb: framebuffer info for each region
+ * @fb_base: base of the framebuffer aperture
+ * @fb_offset: offset of the framebuffer aperture
+ * @psp_version: psp version to pass for DMCU init
+ * @load_inst_const: true if DMUB should load inst const fw
+ */
+struct dmub_srv_hw_params {
+	struct dmub_fb *fb[DMUB_WINDOW_TOTAL];
+	uint64_t fb_base;
+	uint64_t fb_offset;
+	uint32_t psp_version;
+	bool load_inst_const;
+};
+
+/**
+ * struct dmub_srv - software state for dmcub
+ * @asic: dmub asic identifier
+ * @user_ctx: user provided context for the dmub_srv
+ * @is_virtual: false if hardware support only
+ * @fw_state: dmub firmware state pointer
+ */
+struct dmub_srv {
+	enum dmub_asic asic;
+	void *user_ctx;
+	bool is_virtual;
+	volatile const struct dmub_fw_state *fw_state;
+
+	/* private: internal use only */
+	const struct dmub_srv_common_regs *regs;
+
+	struct dmub_srv_base_funcs funcs;
+	struct dmub_srv_hw_funcs hw_funcs;
+	struct dmub_rb inbox1_rb;
+
+	bool sw_init;
+	bool hw_init;
+
+	uint64_t fb_base;
+	uint64_t fb_offset;
+	uint32_t psp_version;
+};
+
+/**
+ * dmub_srv_create() - creates the DMUB service.
+ * @dmub: the dmub service
+ * @params: creation parameters for the service
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
+				 const struct dmub_srv_create_params *params);
+
+/**
+ * dmub_srv_destroy() - destroys the DMUB service.
+ * @dmub: the dmub service
+ */
+void dmub_srv_destroy(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_calc_region_info() - retreives region info from the dmub service
+ * @dmub: the dmub service
+ * @params: parameters used to calculate region locations
+ * @info_out: the output region info from dmub
+ *
+ * Calculates the base and top address for all relevant dmub regions
+ * using the parameters given (if any).
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_calc_region_info(struct dmub_srv *dmub,
+			  const struct dmub_srv_region_params *params,
+			  struct dmub_srv_region_info *out);
+
+/**
+ * dmub_srv_calc_region_info() - retreives fb info from the dmub service
+ * @dmub: the dmub service
+ * @params: parameters used to calculate fb locations
+ * @info_out: the output fb info from dmub
+ *
+ * Calculates the base and top address for all relevant dmub regions
+ * using the parameters given (if any).
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+				       const struct dmub_srv_fb_params *params,
+				       struct dmub_srv_fb_info *out);
+
+/**
+ * dmub_srv_has_hw_support() - returns hw support state for dmcub
+ * @dmub: the dmub service
+ * @is_supported: hw support state
+ *
+ * Queries the hardware for DMCUB support and returns the result.
+ *
+ * Can be called before dmub_srv_hw_init().
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
+					 bool *is_supported);
+
+/**
+ * dmub_srv_is_hw_init() - returns hardware init state
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init);
+
+/**
+ * dmub_srv_hw_init() - initializes the underlying DMUB hardware
+ * @dmub: the dmub service
+ * @params: params for hardware initialization
+ *
+ * Resets the DMUB hardware and performs backdoor loading of the
+ * required cache regions based on the input framebuffer regions.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_NO_CTX - dmcub context not initialized
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+				  const struct dmub_srv_hw_params *params);
+
+/**
+ * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized
+ * @dmub: the dmub service
+ *
+ * Before destroying the DMUB service or releasing the backing framebuffer
+ * memory we'll need to put the DMCUB into reset first.
+ *
+ * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * @dmub: the dmub service
+ * @cmd: the command to queue
+ *
+ * Queues a command to the DMUB service but does not begin execution
+ * immediately.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_QUEUE_FULL - no remaining room in queue
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+				    const struct dmub_cmd_header *cmd);
+
+/**
+ * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * @dmub: the dmub service
+ *
+ * Begins execution of queued commands on the dmub.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until firmware has been autoloaded by the DMCUB. The maximum
+ * wait time is given in microseconds to prevent spinning forever.
+ *
+ * On ASICs without firmware autoload support this function will return
+ * immediately.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for phy init timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
+					     uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_phy_init() - Waits for DMUB PHY init to complete
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the PHY has been initialized by the DMUB. The maximum
+ * wait time is given in microseconds to prevent spinning forever.
+ *
+ * On ASICs without PHY init support this function will return
+ * immediately.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for phy init timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
+					    uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the DMUB buffer is empty and all commands have
+ * finished processing. The maximum wait time is given in
+ * microseconds to prevent spinning forever.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
+					uint32_t timeout_us);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
new file mode 100644
index 0000000..6b3ee42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef _DMUB_TRACE_BUFFER_H_
+#define _DMUB_TRACE_BUFFER_H_
+
+#include "dmub_types.h"
+
+#define LOAD_DMCU_FW	1
+#define LOAD_PHY_FW	2
+
+
+enum dmucb_trace_code {
+	DMCUB__UNKNOWN,
+	DMCUB__MAIN_BEGIN,
+	DMCUB__PHY_INIT_BEGIN,
+	DMCUB__PHY_FW_SRAM_LOAD_BEGIN,
+	DMCUB__PHY_FW_SRAM_LOAD_END,
+	DMCUB__PHY_INIT_POLL_DONE,
+	DMCUB__PHY_INIT_END,
+	DMCUB__DMCU_ERAM_LOAD_BEGIN,
+	DMCUB__DMCU_ERAM_LOAD_END,
+	DMCUB__DMCU_ISR_LOAD_BEGIN,
+	DMCUB__DMCU_ISR_LOAD_END,
+	DMCUB__MAIN_IDLE,
+	DMCUB__PERF_TRACE,
+	DMCUB__PG_DONE,
+};
+
+struct dmcub_trace_buf_entry {
+	enum dmucb_trace_code trace_code;
+	uint32_t tick_count;
+	uint32_t param0;
+	uint32_t param1;
+};
+
+#define TRACE_BUF_SIZE (1024) //1 kB
+#define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct dmcub_trace_buf_entry))
+
+
+struct dmcub_trace_buf {
+	uint32_t entry_count;
+	uint32_t clk_freq;
+	struct dmcub_trace_buf_entry entries[PERF_TRACE_MAX_ENTRY];
+};
+
+
+#endif /* _DMUB_TRACE_BUFFER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
new file mode 100644
index 0000000..41d524b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_TYPES_H_
+#define _DMUB_TYPES_H_
+
+/* Basic type definitions. */
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <stdarg.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef dmub_memcpy
+#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
+#endif
+
+#ifndef dmub_memset
+#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
+#endif
+
+#ifndef dmub_udelay
+#define dmub_udelay(microseconds) udelay(microseconds)
+#endif
+
+union dmub_addr {
+	struct {
+		uint32_t low_part;
+		uint32_t high_part;
+	} u;
+	uint64_t quad_part;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
new file mode 100644
index 0000000..e08dfee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -0,0 +1,27 @@
+#
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o
+
+AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DMUB)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
new file mode 100644
index 0000000..b2ca8e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn20.h"
+
+#include "dcn/dcn_2_0_0_offset.h"
+#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
+#define CTX dmub
+#define REGS dmub->regs
+
+/* Registers. */
+
+const struct dmub_srv_common_regs dmub_srv_dcn20_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+	{ DMUB_COMMON_REGS() },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+	{ DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+	{ DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
+
+/* Shared functions. */
+
+static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
+					  uint64_t *fb_base,
+					  uint64_t *fb_offset)
+{
+	uint32_t tmp;
+
+	REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
+	*fb_base = (uint64_t)tmp << 24;
+
+	REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp);
+	*fb_offset = (uint64_t)tmp << 24;
+}
+
+static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
+					     uint64_t fb_base,
+					     uint64_t fb_offset,
+					     union dmub_addr *addr_out)
+{
+	addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
+}
+
+void dmub_dcn20_reset(struct dmub_srv *dmub)
+{
+	REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
+	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+	REG_WRITE(DMCUB_INBOX1_RPTR, 0);
+	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+}
+
+void dmub_dcn20_reset_release(struct dmub_srv *dmub)
+{
+	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
+	REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
+	REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
+	REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0);
+}
+
+void dmub_dcn20_backdoor_load(struct dmub_srv *dmub,
+			      const struct dmub_window *cw0,
+			      const struct dmub_window *cw1)
+{
+	union dmub_addr offset;
+	uint64_t fb_base, fb_offset;
+
+	dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+
+	REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+	REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3,
+		     DMCUB_MEM_WRITE_SPACE, 0x3);
+
+	dmub_dcn20_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
+
+	REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
+	REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
+	REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
+	REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
+		  DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
+		  DMCUB_REGION3_CW0_ENABLE, 1);
+
+	dmub_dcn20_translate_addr(&cw1->offset, fb_base, fb_offset, &offset);
+
+	REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
+	REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
+	REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
+	REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
+		  DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
+		  DMCUB_REGION3_CW1_ENABLE, 1);
+
+	REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
+		     0x20);
+}
+
+void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
+			      const struct dmub_window *cw2,
+			      const struct dmub_window *cw3,
+			      const struct dmub_window *cw4,
+			      const struct dmub_window *cw5,
+			      const struct dmub_window *cw6)
+{
+	union dmub_addr offset;
+	uint64_t fb_base, fb_offset;
+
+	dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+
+	dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
+
+	REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+	REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+	REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+	REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+		  DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+		  DMCUB_REGION3_CW2_ENABLE, 1);
+
+	dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
+
+	REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part);
+	REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part);
+	REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base);
+	REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0,
+		  DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top,
+		  DMCUB_REGION3_CW3_ENABLE, 1);
+
+	/* TODO: Move this to CW4. */
+	dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
+
+	REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part);
+	REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part);
+	REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS,
+		  cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE,
+		  1);
+
+	dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset);
+
+	REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part);
+	REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part);
+	REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
+	REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
+		  DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
+		  DMCUB_REGION3_CW5_ENABLE, 1);
+
+	dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset);
+
+	REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part);
+	REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part);
+	REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base);
+	REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
+		  DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
+		  DMCUB_REGION3_CW6_ENABLE, 1);
+}
+
+void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+			      const struct dmub_region *inbox1)
+{
+	/* TODO: Use CW4 instead of region 4. */
+
+	REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
+	REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+}
+
+uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
+{
+	return REG_READ(DMCUB_INBOX1_RPTR);
+}
+
+void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset)
+{
+	REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset);
+}
+
+bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub)
+{
+	return REG_READ(DMCUB_REGION3_CW2_BASE_ADDRESS) != 0;
+}
+
+bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
+{
+	uint32_t supported = 0;
+
+	REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported);
+
+	return supported;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
new file mode 100644
index 0000000..04b0fa1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DCN20_H_
+#define _DMUB_DCN20_H_
+
+#include "../inc/dmub_types.h"
+
+struct dmub_srv;
+
+/* DCN20 register definitions. */
+
+#define DMUB_COMMON_REGS() \
+	DMUB_SR(DMCUB_CNTL) \
+	DMUB_SR(DMCUB_MEM_CNTL) \
+	DMUB_SR(DMCUB_SEC_CNTL) \
+	DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_INBOX1_SIZE) \
+	DMUB_SR(DMCUB_INBOX1_RPTR) \
+	DMUB_SR(DMCUB_INBOX1_WPTR) \
+	DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \
+	DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \
+	DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \
+	DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \
+	DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \
+	DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \
+	DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \
+	DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \
+	DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_REGION4_OFFSET) \
+	DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \
+	DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \
+	DMUB_SR(DMCUB_SCRATCH0) \
+	DMUB_SR(DMCUB_SCRATCH1) \
+	DMUB_SR(DMCUB_SCRATCH2) \
+	DMUB_SR(DMCUB_SCRATCH3) \
+	DMUB_SR(DMCUB_SCRATCH4) \
+	DMUB_SR(DMCUB_SCRATCH5) \
+	DMUB_SR(DMCUB_SCRATCH6) \
+	DMUB_SR(DMCUB_SCRATCH7) \
+	DMUB_SR(DMCUB_SCRATCH8) \
+	DMUB_SR(DMCUB_SCRATCH9) \
+	DMUB_SR(DMCUB_SCRATCH10) \
+	DMUB_SR(DMCUB_SCRATCH11) \
+	DMUB_SR(DMCUB_SCRATCH12) \
+	DMUB_SR(DMCUB_SCRATCH13) \
+	DMUB_SR(DMCUB_SCRATCH14) \
+	DMUB_SR(DMCUB_SCRATCH15) \
+	DMUB_SR(CC_DC_PIPE_DIS) \
+	DMUB_SR(MMHUBBUB_SOFT_RESET) \
+	DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
+	DMUB_SR(DCN_VM_FB_OFFSET)
+
+#define DMUB_COMMON_FIELDS() \
+	DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
+	DMUB_SF(DMCUB_CNTL, DMCUB_SOFT_RESET) \
+	DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \
+	DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE) \
+	DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \
+	DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
+	DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
+	DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
+	DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \
+	DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \
+	DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \
+	DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \
+	DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \
+	DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \
+	DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \
+	DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \
+	DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
+	DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
+	DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
+	DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
+	DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET)
+
+struct dmub_srv_common_reg_offset {
+#define DMUB_SR(reg) uint32_t reg;
+	DMUB_COMMON_REGS()
+#undef DMUB_SR
+};
+
+struct dmub_srv_common_reg_shift {
+#define DMUB_SF(reg, field) uint8_t reg##__##field;
+	DMUB_COMMON_FIELDS()
+#undef DMUB_SF
+};
+
+struct dmub_srv_common_reg_mask {
+#define DMUB_SF(reg, field) uint32_t reg##__##field;
+	DMUB_COMMON_FIELDS()
+#undef DMUB_SF
+};
+
+struct dmub_srv_common_regs {
+	const struct dmub_srv_common_reg_offset offset;
+	const struct dmub_srv_common_reg_mask mask;
+	const struct dmub_srv_common_reg_shift shift;
+};
+
+extern const struct dmub_srv_common_regs dmub_srv_dcn20_regs;
+
+/* Hardware functions. */
+
+void dmub_dcn20_init(struct dmub_srv *dmub);
+
+void dmub_dcn20_reset(struct dmub_srv *dmub);
+
+void dmub_dcn20_reset_release(struct dmub_srv *dmub);
+
+void dmub_dcn20_backdoor_load(struct dmub_srv *dmub,
+			      const struct dmub_window *cw0,
+			      const struct dmub_window *cw1);
+
+void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
+			      const struct dmub_window *cw2,
+			      const struct dmub_window *cw3,
+			      const struct dmub_window *cw4,
+			      const struct dmub_window *cw5,
+			      const struct dmub_window *cw6);
+
+void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+			      const struct dmub_region *inbox1);
+
+uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
+
+void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+
+bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
+
+bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
+
+#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
new file mode 100644
index 0000000..5bed9fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn21.h"
+
+#include "dcn/dcn_2_1_0_offset.h"
+#include "dcn/dcn_2_1_0_sh_mask.h"
+#include "renoir_ip_offset.h"
+
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg
+#define CTX dmub
+#define REGS dmub->regs
+
+/* Registers. */
+
+const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+	{ DMUB_COMMON_REGS() },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+	{ DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+	{ DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
+
+/* Shared functions. */
+
+bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub)
+{
+	return (REG_READ(DMCUB_SCRATCH0) == 3);
+}
+
+bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub)
+{
+	return REG_READ(DMCUB_SCRATCH10) == 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
new file mode 100644
index 0000000..2bbea23
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DCN21_H_
+#define _DMUB_DCN21_H_
+
+#include "dmub_dcn20.h"
+
+/* Registers. */
+
+extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs;
+
+/* Hardware functions. */
+
+bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub);
+
+bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub);
+
+#endif /* _DMUB_DCN21_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
new file mode 100644
index 0000000..4094eca
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_reg.h"
+#include "../inc/dmub_srv.h"
+
+struct dmub_reg_value_masks {
+	uint32_t value;
+	uint32_t mask;
+};
+
+static inline void
+set_reg_field_value_masks(struct dmub_reg_value_masks *field_value_mask,
+			  uint32_t value, uint32_t mask, uint8_t shift)
+{
+	field_value_mask->value =
+		(field_value_mask->value & ~mask) | (mask & (value << shift));
+	field_value_mask->mask = field_value_mask->mask | mask;
+}
+
+static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask,
+				 uint32_t addr, int n, uint8_t shift1,
+				 uint32_t mask1, uint32_t field_value1,
+				 va_list ap)
+{
+	uint32_t shift, mask, field_value;
+	int i = 1;
+
+	/* gather all bits value/mask getting updated in this register */
+	set_reg_field_value_masks(field_value_mask, field_value1, mask1,
+				  shift1);
+
+	while (i < n) {
+		shift = va_arg(ap, uint32_t);
+		mask = va_arg(ap, uint32_t);
+		field_value = va_arg(ap, uint32_t);
+
+		set_reg_field_value_masks(field_value_mask, field_value, mask,
+					  shift);
+		i++;
+	}
+}
+
+static inline uint32_t get_reg_field_value_ex(uint32_t reg_value, uint32_t mask,
+					      uint8_t shift)
+{
+	return (mask & reg_value) >> shift;
+}
+
+void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1,
+		     uint32_t mask1, uint32_t field_value1, ...)
+{
+	struct dmub_reg_value_masks field_value_mask = { 0 };
+	uint32_t reg_val;
+	va_list ap;
+
+	va_start(ap, field_value1);
+	set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+			     field_value1, ap);
+	va_end(ap);
+
+	reg_val = srv->funcs.reg_read(srv->user_ctx, addr);
+	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+	srv->funcs.reg_write(srv->user_ctx, addr, reg_val);
+}
+
+void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n,
+		  uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...)
+{
+	struct dmub_reg_value_masks field_value_mask = { 0 };
+	va_list ap;
+
+	va_start(ap, field_value1);
+	set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+			     field_value1, ap);
+	va_end(ap);
+
+	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+	srv->funcs.reg_write(srv->user_ctx, addr, reg_val);
+}
+
+void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift,
+		  uint32_t mask, uint32_t *field_value)
+{
+	uint32_t reg_val = srv->funcs.reg_read(srv->user_ctx, addr);
+	*field_value = get_reg_field_value_ex(reg_val, mask, shift);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
new file mode 100644
index 0000000..c1f4030
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_REG_H_
+#define _DMUB_REG_H_
+
+#include "../inc/dmub_types.h"
+
+struct dmub_srv;
+
+/* Register offset and field lookup. */
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define REG_OFFSET(reg_name) (BASE(mm##reg_name##_BASE_IDX) + mm##reg_name)
+
+#define FD_SHIFT(reg_name, field) reg_name##__##field##__SHIFT
+
+#define FD_MASK(reg_name, field) reg_name##__##field##_MASK
+
+#define REG(reg) (REGS)->offset.reg
+
+#define FD(reg_field) (REGS)->shift.reg_field, (REGS)->mask.reg_field
+
+#define FN(reg_name, field) FD(reg_name##__##field)
+
+/* Register reads and writes. */
+
+#define REG_READ(reg) ((CTX)->funcs.reg_read((CTX)->user_ctx, REG(reg)))
+
+#define REG_WRITE(reg, val) \
+	((CTX)->funcs.reg_write((CTX)->user_ctx, REG(reg), (val)))
+
+/* Register field setting. */
+
+#define REG_SET_N(reg_name, n, initial_val, ...) \
+	dmub_reg_set(CTX, REG(reg_name), initial_val, n, __VA_ARGS__)
+
+#define REG_SET(reg_name, initial_val, field, val) \
+		REG_SET_N(reg_name, 1, initial_val, \
+				FN(reg_name, field), val)
+
+#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \
+		REG_SET_N(reg, 2, init_value, \
+				FN(reg, f1), v1, \
+				FN(reg, f2), v2)
+
+#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \
+		REG_SET_N(reg, 3, init_value, \
+				FN(reg, f1), v1, \
+				FN(reg, f2), v2, \
+				FN(reg, f3), v3)
+
+#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \
+		REG_SET_N(reg, 4, init_value, \
+				FN(reg, f1), v1, \
+				FN(reg, f2), v2, \
+				FN(reg, f3), v3, \
+				FN(reg, f4), v4)
+
+/* Register field updating. */
+
+#define REG_UPDATE_N(reg_name, n, ...)\
+		dmub_reg_update(CTX, REG(reg_name), n, __VA_ARGS__)
+
+#define REG_UPDATE(reg_name, field, val)	\
+		REG_UPDATE_N(reg_name, 1, \
+				FN(reg_name, field), val)
+
+#define REG_UPDATE_2(reg, f1, v1, f2, v2)	\
+		REG_UPDATE_N(reg, 2,\
+				FN(reg, f1), v1,\
+				FN(reg, f2), v2)
+
+#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \
+		REG_UPDATE_N(reg, 3, \
+				FN(reg, f1), v1, \
+				FN(reg, f2), v2, \
+				FN(reg, f3), v3)
+
+#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \
+		REG_UPDATE_N(reg, 4, \
+				FN(reg, f1), v1, \
+				FN(reg, f2), v2, \
+				FN(reg, f3), v3, \
+				FN(reg, f4), v4)
+
+/* Register field getting. */
+
+#define REG_GET(reg_name, field, val) \
+	dmub_reg_get(CTX, REG(reg_name), FN(reg_name, field), val)
+
+void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n,
+		  uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
+void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1,
+		     uint32_t mask1, uint32_t field_value1, ...);
+
+void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift,
+		  uint32_t mask, uint32_t *field_value);
+
+#endif /* _DMUB_REG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
new file mode 100644
index 0000000..85a518b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -0,0 +1,524 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_dcn20.h"
+#include "dmub_dcn21.h"
+#include "dmub_fw_meta.h"
+#include "os_types.h"
+/*
+ * Note: the DMUB service is standalone. No additional headers should be
+ * added below or above this line unless they reside within the DMUB
+ * folder.
+ */
+
+/* Alignment for framebuffer memory. */
+#define DMUB_FB_ALIGNMENT (1024 * 1024)
+
+/* Stack size. */
+#define DMUB_STACK_SIZE (128 * 1024)
+
+/* Context size. */
+#define DMUB_CONTEXT_SIZE (512 * 1024)
+
+/* Mailbox size */
+#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE)
+
+/* Default state size if meta is absent. */
+#define DMUB_FW_STATE_SIZE (1024)
+
+/* Default tracebuffer size if meta is absent. */
+#define DMUB_TRACE_BUFFER_SIZE (1024)
+
+/* Number of windows in use. */
+#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
+/* Base addresses. */
+
+#define DMUB_CW0_BASE (0x60000000)
+#define DMUB_CW1_BASE (0x61000000)
+#define DMUB_CW3_BASE (0x63000000)
+#define DMUB_CW5_BASE (0x65000000)
+#define DMUB_CW6_BASE (0x66000000)
+
+static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
+{
+	return (val + factor - 1) / factor * factor;
+}
+
+static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+{
+	const uint8_t *base = (const uint8_t *)fb->cpu_addr;
+	uint8_t buf[64];
+	uint32_t pos, end;
+
+	/**
+	 * Read 64-byte chunks since we don't want to store a
+	 * large temporary buffer for this purpose.
+	 */
+	end = fb->size / sizeof(buf) * sizeof(buf);
+
+	for (pos = 0; pos < end; pos += sizeof(buf))
+		dmub_memcpy(buf, base + pos, sizeof(buf));
+
+	/* Read anything leftover into the buffer. */
+	if (end < fb->size)
+		dmub_memcpy(buf, base + pos, fb->size - end);
+}
+
+static const struct dmub_fw_meta_info *
+dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+{
+	const union dmub_fw_meta *meta;
+
+	if (fw_bss_data == NULL)
+		return NULL;
+
+	if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+		return NULL;
+
+	meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
+					    DMUB_FW_META_OFFSET -
+					    sizeof(union dmub_fw_meta));
+
+	if (meta->info.magic_value != DMUB_FW_META_MAGIC)
+		return NULL;
+
+	return &meta->info;
+}
+
+static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+{
+	struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+
+	switch (asic) {
+	case DMUB_ASIC_DCN20:
+	case DMUB_ASIC_DCN21:
+		dmub->regs = &dmub_srv_dcn20_regs;
+
+		funcs->reset = dmub_dcn20_reset;
+		funcs->reset_release = dmub_dcn20_reset_release;
+		funcs->backdoor_load = dmub_dcn20_backdoor_load;
+		funcs->setup_windows = dmub_dcn20_setup_windows;
+		funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
+		funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
+		funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
+		funcs->is_supported = dmub_dcn20_is_supported;
+		funcs->is_hw_init = dmub_dcn20_is_hw_init;
+
+		if (asic == DMUB_ASIC_DCN21) {
+			dmub->regs = &dmub_srv_dcn21_regs;
+
+			funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done;
+			funcs->is_phy_init = dmub_dcn21_is_phy_init;
+		}
+		break;
+
+	default:
+		return false;
+	}
+
+	return true;
+}
+
+enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
+				 const struct dmub_srv_create_params *params)
+{
+	enum dmub_status status = DMUB_STATUS_OK;
+
+	dmub_memset(dmub, 0, sizeof(*dmub));
+
+	dmub->funcs = params->funcs;
+	dmub->user_ctx = params->user_ctx;
+	dmub->asic = params->asic;
+	dmub->is_virtual = params->is_virtual;
+
+	/* Setup asic dependent hardware funcs. */
+	if (!dmub_srv_hw_setup(dmub, params->asic)) {
+		status = DMUB_STATUS_INVALID;
+		goto cleanup;
+	}
+
+	/* Override (some) hardware funcs based on user params. */
+	if (params->hw_funcs) {
+		if (params->hw_funcs->get_inbox1_rptr)
+			dmub->hw_funcs.get_inbox1_rptr =
+				params->hw_funcs->get_inbox1_rptr;
+
+		if (params->hw_funcs->set_inbox1_wptr)
+			dmub->hw_funcs.set_inbox1_wptr =
+				params->hw_funcs->set_inbox1_wptr;
+
+		if (params->hw_funcs->is_supported)
+			dmub->hw_funcs.is_supported =
+				params->hw_funcs->is_supported;
+	}
+
+	/* Sanity checks for required hw func pointers. */
+	if (!dmub->hw_funcs.get_inbox1_rptr ||
+	    !dmub->hw_funcs.set_inbox1_wptr) {
+		status = DMUB_STATUS_INVALID;
+		goto cleanup;
+	}
+
+cleanup:
+	if (status == DMUB_STATUS_OK)
+		dmub->sw_init = true;
+	else
+		dmub_srv_destroy(dmub);
+
+	return status;
+}
+
+void dmub_srv_destroy(struct dmub_srv *dmub)
+{
+	dmub_memset(dmub, 0, sizeof(*dmub));
+}
+
+enum dmub_status
+dmub_srv_calc_region_info(struct dmub_srv *dmub,
+			  const struct dmub_srv_region_params *params,
+			  struct dmub_srv_region_info *out)
+{
+	struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST];
+	struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK];
+	struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA];
+	struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS];
+	struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
+	struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
+	struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+	const struct dmub_fw_meta_info *fw_info;
+	uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+	uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	memset(out, 0, sizeof(*out));
+
+	out->num_regions = DMUB_NUM_WINDOWS;
+
+	inst->base = 0x0;
+	inst->top = inst->base + params->inst_const_size;
+
+	data->base = dmub_align(inst->top, 256);
+	data->top = data->base + params->bss_data_size;
+
+	/*
+	 * All cache windows below should be aligned to the size
+	 * of the DMCUB cache line, 64 bytes.
+	 */
+
+	stack->base = dmub_align(data->top, 256);
+	stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
+
+	bios->base = dmub_align(stack->top, 256);
+	bios->top = bios->base + params->vbios_size;
+
+	mail->base = dmub_align(bios->top, 256);
+	mail->top = mail->base + DMUB_MAILBOX_SIZE;
+
+	fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
+					params->bss_data_size);
+
+	if (fw_info) {
+		fw_state_size = fw_info->fw_region_size;
+		trace_buffer_size = fw_info->trace_buffer_size;
+	}
+
+	trace_buff->base = dmub_align(mail->top, 256);
+	trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
+
+	fw_state->base = dmub_align(trace_buff->top, 256);
+	fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
+
+	out->fb_size = dmub_align(fw_state->top, 4096);
+
+	return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+				       const struct dmub_srv_fb_params *params,
+				       struct dmub_srv_fb_info *out)
+{
+	uint8_t *cpu_base;
+	uint64_t gpu_base;
+	uint32_t i;
+
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	memset(out, 0, sizeof(*out));
+
+	if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
+		return DMUB_STATUS_INVALID;
+
+	cpu_base = (uint8_t *)params->cpu_addr;
+	gpu_base = params->gpu_addr;
+
+	for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
+		const struct dmub_region *reg =
+			&params->region_info->regions[i];
+
+		out->fb[i].cpu_addr = cpu_base + reg->base;
+		out->fb[i].gpu_addr = gpu_base + reg->base;
+		out->fb[i].size = reg->top - reg->base;
+	}
+
+	out->num_fb = DMUB_NUM_WINDOWS;
+
+	return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
+					 bool *is_supported)
+{
+	*is_supported = false;
+
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (dmub->hw_funcs.is_supported)
+		*is_supported = dmub->hw_funcs.is_supported(dmub);
+
+	return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
+{
+	*is_hw_init = false;
+
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (!dmub->hw_init)
+		return DMUB_STATUS_OK;
+
+	if (dmub->hw_funcs.is_hw_init)
+		*is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
+
+	return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+				  const struct dmub_srv_hw_params *params)
+{
+	struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST];
+	struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK];
+	struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA];
+	struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS];
+	struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
+	struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
+	struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
+
+	struct dmub_rb_init_params rb_params;
+	struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
+	struct dmub_region inbox1;
+
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	dmub->fb_base = params->fb_base;
+	dmub->fb_offset = params->fb_offset;
+	dmub->psp_version = params->psp_version;
+
+	if (inst_fb && data_fb) {
+		cw0.offset.quad_part = inst_fb->gpu_addr;
+		cw0.region.base = DMUB_CW0_BASE;
+		cw0.region.top = cw0.region.base + inst_fb->size - 1;
+
+		cw1.offset.quad_part = stack_fb->gpu_addr;
+		cw1.region.base = DMUB_CW1_BASE;
+		cw1.region.top = cw1.region.base + stack_fb->size - 1;
+
+		/**
+		 * Read back all the instruction memory so we don't hang the
+		 * DMCUB when backdoor loading if the write from x86 hasn't been
+		 * flushed yet. This only occurs in backdoor loading.
+		 */
+		dmub_flush_buffer_mem(inst_fb);
+
+		if (params->load_inst_const && dmub->hw_funcs.backdoor_load)
+			dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
+	}
+
+	if (dmub->hw_funcs.reset)
+		dmub->hw_funcs.reset(dmub);
+
+	if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
+	    fw_state_fb) {
+		cw2.offset.quad_part = data_fb->gpu_addr;
+		cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
+		cw2.region.top = cw2.region.base + data_fb->size;
+
+		cw3.offset.quad_part = bios_fb->gpu_addr;
+		cw3.region.base = DMUB_CW3_BASE;
+		cw3.region.top = cw3.region.base + bios_fb->size;
+
+		cw4.offset.quad_part = mail_fb->gpu_addr;
+		cw4.region.base = cw3.region.top + 1;
+		cw4.region.top = cw4.region.base + mail_fb->size;
+
+		inbox1.base = cw4.region.base;
+		inbox1.top = cw4.region.top;
+
+		cw5.offset.quad_part = tracebuff_fb->gpu_addr;
+		cw5.region.base = DMUB_CW5_BASE;
+		cw5.region.top = cw5.region.base + tracebuff_fb->size;
+
+		cw6.offset.quad_part = fw_state_fb->gpu_addr;
+		cw6.region.base = DMUB_CW6_BASE;
+		cw6.region.top = cw6.region.base + fw_state_fb->size;
+
+		dmub->fw_state = fw_state_fb->cpu_addr;
+
+		if (dmub->hw_funcs.setup_windows)
+			dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
+						     &cw5, &cw6);
+
+		if (dmub->hw_funcs.setup_mailbox)
+			dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
+	}
+
+	if (mail_fb) {
+		dmub_memset(&rb_params, 0, sizeof(rb_params));
+		rb_params.ctx = dmub;
+		rb_params.base_address = mail_fb->cpu_addr;
+		rb_params.capacity = DMUB_RB_SIZE;
+
+		dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+	}
+
+	if (dmub->hw_funcs.reset_release)
+		dmub->hw_funcs.reset_release(dmub);
+
+	dmub->hw_init = true;
+
+	return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
+{
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (dmub->hw_init == false)
+		return DMUB_STATUS_OK;
+
+	if (dmub->hw_funcs.reset)
+		dmub->hw_funcs.reset(dmub);
+
+	dmub->hw_init = false;
+
+	return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+				    const struct dmub_cmd_header *cmd)
+{
+	if (!dmub->hw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+		return DMUB_STATUS_OK;
+
+	return DMUB_STATUS_QUEUE_FULL;
+}
+
+enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+{
+	if (!dmub->hw_init)
+		return DMUB_STATUS_INVALID;
+
+	/**
+	 * Read back all the queued commands to ensure that they've
+	 * been flushed to framebuffer memory. Otherwise DMCUB might
+	 * read back stale, fully invalid or partially invalid data.
+	 */
+	dmub_rb_flush_pending(&dmub->inbox1_rb);
+
+	dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+	return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
+					     uint32_t timeout_us)
+{
+	uint32_t i;
+
+	if (!dmub->hw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (!dmub->hw_funcs.is_auto_load_done)
+		return DMUB_STATUS_OK;
+
+	for (i = 0; i <= timeout_us; i += 100) {
+		if (dmub->hw_funcs.is_auto_load_done(dmub))
+			return DMUB_STATUS_OK;
+
+		udelay(100);
+	}
+
+	return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
+					    uint32_t timeout_us)
+{
+	uint32_t i = 0;
+
+	if (!dmub->hw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (!dmub->hw_funcs.is_phy_init)
+		return DMUB_STATUS_OK;
+
+	for (i = 0; i <= timeout_us; i += 10) {
+		if (dmub->hw_funcs.is_phy_init(dmub))
+			return DMUB_STATUS_OK;
+
+		udelay(10);
+	}
+
+	return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
+					uint32_t timeout_us)
+{
+	uint32_t i;
+
+	if (!dmub->hw_init)
+		return DMUB_STATUS_INVALID;
+
+	for (i = 0; i <= timeout_us; ++i) {
+		dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+		if (dmub_rb_empty(&dmub->inbox1_rb))
+			return DMUB_STATUS_OK;
+
+		udelay(1);
+	}
+
+	return DMUB_STATUS_TIMEOUT;
+}
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 1be6c44..a290398 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,23 +134,34 @@
 #define PICASSO_A0 0x41
 /* DCN1_01 */
 #define RAVEN2_A0 0x81
+#define RAVEN2_15D8_REV_94 0x94
+#define RAVEN2_15D8_REV_95 0x95
+#define RAVEN2_15D8_REV_E3 0xE3
+#define RAVEN2_15D8_REV_E4 0xE4
+#define RAVEN2_15D8_REV_E9 0xE9
+#define RAVEN2_15D8_REV_EA 0xEA
+#define RAVEN2_15D8_REV_EB 0xEB
 #define RAVEN1_F0 0xF0
 #define RAVEN_UNKNOWN 0xFF
-
-#define PICASSO_15D8_REV_E3 0xE3
-#define PICASSO_15D8_REV_E4 0xE4
-
+#ifndef ASICREV_IS_RAVEN
 #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
+#endif
+
 #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < PICASSO_15D8_REV_E3))
-#define ASICREV_IS_DALI(eChipRev) ((eChipRev >= PICASSO_15D8_REV_E3) && (eChipRev < RAVEN1_F0))
-
+#ifndef ASICREV_IS_RAVEN2
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#endif
 #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
-
+#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
+		|| (eChipRev == RAVEN2_15D8_REV_E4))
+#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
+		|| eChipRev == RAVEN2_15D8_REV_95 \
+			|| eChipRev == RAVEN2_15D8_REV_E9 \
+				|| eChipRev == RAVEN2_15D8_REV_EA \
+					|| eChipRev == RAVEN2_15D8_REV_EB)
 
 #define FAMILY_RV 142 /* DCN 1*/
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 
 #define FAMILY_NV 143 /* DCN 2*/
 
@@ -164,12 +175,9 @@ enum {
 #define ASICREV_IS_NAVI10_P(eChipRev)        (eChipRev < NV_NAVI12_P_A0)
 #define ASICREV_IS_NAVI12_P(eChipRev)        ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
 #define ASICREV_IS_NAVI14_M(eChipRev)        ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define RENOIR_A0 0x91
 #define DEVICE_ID_RENOIR_1636 0x1636   // Renoir
 #define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
-#endif
 
 /*
  * ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index fcc4237..0b68591 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -46,12 +46,8 @@ enum dce_version {
 	DCE_VERSION_MAX,
 	DCN_VERSION_1_0,
 	DCN_VERSION_1_01,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 	DCN_VERSION_2_0,
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 	DCN_VERSION_2_1,
-#endif
 	DCN_VERSION_MAX
 };
 
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index f312834..d51de94 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -178,7 +178,8 @@ struct dc_firmware_info {
 	uint32_t default_engine_clk; /* in KHz */
 	uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */
 	uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */
-
+	bool oem_i2c_present;
+	uint8_t oem_i2c_obj_id;
 
 };
 
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 876b0b3..4869d45 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -123,6 +123,13 @@ enum dp_test_pattern {
 	DP_TEST_PATTERN_UNSUPPORTED
 };
 
+enum dp_test_pattern_color_space {
+	DP_TEST_PATTERN_COLOR_SPACE_RGB,
+	DP_TEST_PATTERN_COLOR_SPACE_YCBCR601,
+	DP_TEST_PATTERN_COLOR_SPACE_YCBCR709,
+	DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED
+};
+
 enum dp_panel_mode {
 	/* not required */
 	DP_PANEL_MODE_DEFAULT,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 2b219cd..89a7092 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -66,12 +66,8 @@
 #define DC_LOG_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__)
 #define DC_LOG_ALL_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__)
 #define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__)
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0) || defined(CONFIG_DRM_AMD_DC_DCN2_0)
 #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
 
 struct dal_logger;
 
@@ -116,9 +112,7 @@ enum dc_log_type {
 	LOG_PERF_TRACE,
 	LOG_DISPLAYSTATS,
 	LOG_HDMI_RETIMER_REDRIVER,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 	LOG_DSC,
-#endif
 	LOG_DWB,
 	LOG_GAMMA_DEBUG,
 	LOG_MAX_HW_POINTS,
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 1de4805..cac09d5 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -154,6 +154,7 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
 
 	struct fixed31_32 l_pow_m1;
 	struct fixed31_32 base, div;
+	struct fixed31_32 base2;
 
 
 	if (dc_fixpt_lt(in_x, dc_fixpt_zero))
@@ -163,13 +164,15 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
 			dc_fixpt_div(dc_fixpt_one, m2));
 	base = dc_fixpt_sub(l_pow_m1, c1);
 
-	if (dc_fixpt_lt(base, dc_fixpt_zero))
-		base = dc_fixpt_zero;
-
 	div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1));
 
-	*out_y = dc_fixpt_pow(dc_fixpt_div(base, div),
-			dc_fixpt_div(dc_fixpt_one, m1));
+	base2 = dc_fixpt_div(base, div);
+	//avoid complex numbers
+	if (dc_fixpt_lt(base2, dc_fixpt_zero))
+		base2 = dc_fixpt_sub(dc_fixpt_zero, base2);
+
+
+	*out_y = dc_fixpt_pow(base2, dc_fixpt_div(dc_fixpt_one, m1));
 
 }
 
@@ -361,8 +364,10 @@ static struct fixed31_32 translate_from_linear_space(
 			scratch_2 = dc_fixpt_mul(gamma_of_2,
 					pow_buffer[pow_buffer_ptr%16]);
 
-		pow_buffer[pow_buffer_ptr%16] = scratch_2;
-		pow_buffer_ptr++;
+		if (pow_buffer_ptr != -1) {
+			pow_buffer[pow_buffer_ptr%16] = scratch_2;
+			pow_buffer_ptr++;
+		}
 
 		scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
 		scratch_1 = dc_fixpt_sub(scratch_1, args->a2);
@@ -937,7 +942,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
 	struct fixed31_32 max_display;
 	struct fixed31_32 min_display;
 	struct fixed31_32 max_content;
-	struct fixed31_32 min_content;
 	struct fixed31_32 clip = dc_fixpt_one;
 	struct fixed31_32 output;
 	bool use_eetf = false;
@@ -951,7 +955,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
 	max_display = dc_fixpt_from_int(fs_params->max_display);
 	min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
 	max_content = dc_fixpt_from_int(fs_params->max_content);
-	min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
 	sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
 
 	if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
@@ -1670,129 +1673,6 @@ static bool map_regamma_hw_to_x_user(
 
 #define _EXTRA_POINTS 3
 
-bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
-		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
-		const struct freesync_hdr_tf_params *fs_params)
-{
-	struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
-	struct dividers dividers;
-
-	struct pwl_float_data *rgb_user = NULL;
-	struct pwl_float_data_ex *rgb_regamma = NULL;
-	struct gamma_pixel *axis_x = NULL;
-	struct pixel_gamma_point *coeff = NULL;
-	enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
-	bool ret = false;
-
-	if (output_tf->type == TF_TYPE_BYPASS)
-		return false;
-
-	/* we can use hardcoded curve for plain SRGB TF */
-	if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
-			output_tf->tf == TRANSFER_FUNCTION_SRGB) {
-		if (ramp == NULL)
-			return true;
-		if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
-				(!mapUserRamp && ramp->type == GAMMA_RGB_256))
-			return true;
-	}
-
-	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
-
-	if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
-			(mapUserRamp || ramp->type != GAMMA_RGB_256)) {
-		rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
-			    sizeof(*rgb_user),
-			    GFP_KERNEL);
-		if (!rgb_user)
-			goto rgb_user_alloc_fail;
-
-		axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
-				GFP_KERNEL);
-		if (!axis_x)
-			goto axis_x_alloc_fail;
-
-		dividers.divider1 = dc_fixpt_from_fraction(3, 2);
-		dividers.divider2 = dc_fixpt_from_int(2);
-		dividers.divider3 = dc_fixpt_from_fraction(5, 2);
-
-		build_evenly_distributed_points(
-				axis_x,
-				ramp->num_entries,
-				dividers);
-
-		if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
-			scale_gamma(rgb_user, ramp, dividers);
-		else if (ramp->type == GAMMA_RGB_FLOAT_1024)
-			scale_gamma_dx(rgb_user, ramp, dividers);
-	}
-
-	rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
-			       sizeof(*rgb_regamma),
-			       GFP_KERNEL);
-	if (!rgb_regamma)
-		goto rgb_regamma_alloc_fail;
-
-	coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
-			 GFP_KERNEL);
-	if (!coeff)
-		goto coeff_alloc_fail;
-
-	tf = output_tf->tf;
-	if (tf == TRANSFER_FUNCTION_PQ) {
-		tf_pts->end_exponent = 7;
-		tf_pts->x_point_at_y1_red = 125;
-		tf_pts->x_point_at_y1_green = 125;
-		tf_pts->x_point_at_y1_blue = 125;
-
-		build_pq(rgb_regamma,
-				MAX_HW_POINTS,
-				coordinates_x,
-				output_tf->sdr_ref_white_level);
-	} else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
-			fs_params != NULL && fs_params->skip_tm == 0) {
-		build_freesync_hdr(rgb_regamma,
-				MAX_HW_POINTS,
-				coordinates_x,
-				fs_params);
-	} else if (tf == TRANSFER_FUNCTION_HLG) {
-		build_freesync_hdr(rgb_regamma,
-				MAX_HW_POINTS,
-				coordinates_x,
-				fs_params);
-
-	} else {
-		tf_pts->end_exponent = 0;
-		tf_pts->x_point_at_y1_red = 1;
-		tf_pts->x_point_at_y1_green = 1;
-		tf_pts->x_point_at_y1_blue = 1;
-
-		build_regamma(rgb_regamma,
-				MAX_HW_POINTS,
-				coordinates_x, tf);
-	}
-	map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
-			coordinates_x, axis_x, rgb_regamma,
-			MAX_HW_POINTS, tf_pts,
-			(mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
-			(ramp && ramp->type != GAMMA_CS_TFM_1D));
-
-	if (ramp && ramp->type == GAMMA_CS_TFM_1D)
-		apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
-
-	ret = true;
-
-	kvfree(coeff);
-coeff_alloc_fail:
-	kvfree(rgb_regamma);
-rgb_regamma_alloc_fail:
-	kvfree(axis_x);
-axis_x_alloc_fail:
-	kvfree(rgb_user);
-rgb_user_alloc_fail:
-	return ret;
-}
-
 bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
 		const struct regamma_lut *regamma)
 {
@@ -2000,10 +1880,28 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 	tf_pts->x_point_at_y1_green = 1;
 	tf_pts->x_point_at_y1_blue = 1;
 
-	map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
-			coordinates_x, axis_x, curve,
-			MAX_HW_POINTS, tf_pts,
-			mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+	if (input_tf->tf == TRANSFER_FUNCTION_PQ) {
+		/* just copy current rgb_regamma into  tf_pts */
+		struct pwl_float_data_ex *curvePt = curve;
+		int i = 0;
+
+		while (i <= MAX_HW_POINTS) {
+			tf_pts->red[i]   = curvePt->r;
+			tf_pts->green[i] = curvePt->g;
+			tf_pts->blue[i]  = curvePt->b;
+			++curvePt;
+			++i;
+		}
+	} else {
+		//clamps to 0-1
+		map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+				coordinates_x, axis_x, curve,
+				MAX_HW_POINTS, tf_pts,
+				mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+	}
+
+
+
 	if (ramp->type == GAMMA_CUSTOM)
 		apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
 
@@ -2022,14 +1920,14 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 	return ret;
 }
 
-
-bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+static bool calculate_curve(enum dc_transfer_func_predefined trans,
 				struct dc_transfer_func_distributed_points *points,
+				struct pwl_float_data_ex *rgb_regamma,
+				const struct freesync_hdr_tf_params *fs_params,
 				uint32_t sdr_ref_white_level)
 {
 	uint32_t i;
 	bool ret = false;
-	struct pwl_float_data_ex *rgb_regamma = NULL;
 
 	if (trans == TRANSFER_FUNCTION_UNITY ||
 		trans == TRANSFER_FUNCTION_LINEAR) {
@@ -2039,68 +1937,33 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
 		points->x_point_at_y1_blue = 1;
 
 		for (i = 0; i <= MAX_HW_POINTS ; i++) {
-			points->red[i]    = coordinates_x[i].x;
-			points->green[i]  = coordinates_x[i].x;
-			points->blue[i]   = coordinates_x[i].x;
+			rgb_regamma[i].r = coordinates_x[i].x;
+			rgb_regamma[i].g = coordinates_x[i].x;
+			rgb_regamma[i].b = coordinates_x[i].x;
 		}
+
 		ret = true;
 	} else if (trans == TRANSFER_FUNCTION_PQ) {
-		rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
-				       sizeof(*rgb_regamma),
-				       GFP_KERNEL);
-		if (!rgb_regamma)
-			goto rgb_regamma_alloc_fail;
 		points->end_exponent = 7;
 		points->x_point_at_y1_red = 125;
 		points->x_point_at_y1_green = 125;
 		points->x_point_at_y1_blue = 125;
 
-
 		build_pq(rgb_regamma,
 				MAX_HW_POINTS,
 				coordinates_x,
 				sdr_ref_white_level);
-		for (i = 0; i <= MAX_HW_POINTS ; i++) {
-			points->red[i]    = rgb_regamma[i].r;
-			points->green[i]  = rgb_regamma[i].g;
-			points->blue[i]   = rgb_regamma[i].b;
-		}
+
 		ret = true;
-
-		kvfree(rgb_regamma);
-	} else if (trans == TRANSFER_FUNCTION_SRGB ||
-		trans == TRANSFER_FUNCTION_BT709 ||
-		trans == TRANSFER_FUNCTION_GAMMA22 ||
-		trans == TRANSFER_FUNCTION_GAMMA24 ||
-		trans == TRANSFER_FUNCTION_GAMMA26) {
-		rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
-				       sizeof(*rgb_regamma),
-				       GFP_KERNEL);
-		if (!rgb_regamma)
-			goto rgb_regamma_alloc_fail;
-		points->end_exponent = 0;
-		points->x_point_at_y1_red = 1;
-		points->x_point_at_y1_green = 1;
-		points->x_point_at_y1_blue = 1;
-
-		build_regamma(rgb_regamma,
+	} else if (trans == TRANSFER_FUNCTION_GAMMA22 &&
+			fs_params != NULL && fs_params->skip_tm == 0) {
+		build_freesync_hdr(rgb_regamma,
 				MAX_HW_POINTS,
 				coordinates_x,
-				trans);
-		for (i = 0; i <= MAX_HW_POINTS ; i++) {
-			points->red[i]    = rgb_regamma[i].r;
-			points->green[i]  = rgb_regamma[i].g;
-			points->blue[i]   = rgb_regamma[i].b;
-		}
-		ret = true;
+				fs_params);
 
-		kvfree(rgb_regamma);
+		ret = true;
 	} else if (trans == TRANSFER_FUNCTION_HLG) {
-		rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
-				       sizeof(*rgb_regamma),
-				       GFP_KERNEL);
-		if (!rgb_regamma)
-			goto rgb_regamma_alloc_fail;
 		points->end_exponent = 4;
 		points->x_point_at_y1_red = 12;
 		points->x_point_at_y1_green = 12;
@@ -2110,18 +1973,127 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
 				MAX_HW_POINTS,
 				coordinates_x,
 				80, 1000);
-		for (i = 0; i <= MAX_HW_POINTS ; i++) {
-			points->red[i]    = rgb_regamma[i].r;
-			points->green[i]  = rgb_regamma[i].g;
-			points->blue[i]   = rgb_regamma[i].b;
-		}
+
 		ret = true;
-		kvfree(rgb_regamma);
+	} else {
+		// trans == TRANSFER_FUNCTION_SRGB
+		// trans == TRANSFER_FUNCTION_BT709
+		// trans == TRANSFER_FUNCTION_GAMMA22
+		// trans == TRANSFER_FUNCTION_GAMMA24
+		// trans == TRANSFER_FUNCTION_GAMMA26
+		points->end_exponent = 0;
+		points->x_point_at_y1_red = 1;
+		points->x_point_at_y1_green = 1;
+		points->x_point_at_y1_blue = 1;
+
+		build_regamma(rgb_regamma,
+				MAX_HW_POINTS,
+				coordinates_x,
+				trans);
+
+		ret = true;
 	}
-rgb_regamma_alloc_fail:
+
 	return ret;
 }
 
+bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+		const struct freesync_hdr_tf_params *fs_params)
+{
+	struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+	struct dividers dividers;
+
+	struct pwl_float_data *rgb_user = NULL;
+	struct pwl_float_data_ex *rgb_regamma = NULL;
+	struct gamma_pixel *axis_x = NULL;
+	struct pixel_gamma_point *coeff = NULL;
+	enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
+	bool ret = false;
+
+	if (output_tf->type == TF_TYPE_BYPASS)
+		return false;
+
+	/* we can use hardcoded curve for plain SRGB TF */
+	if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
+			output_tf->tf == TRANSFER_FUNCTION_SRGB) {
+		if (ramp == NULL)
+			return true;
+		if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
+				(!mapUserRamp && ramp->type == GAMMA_RGB_256))
+			return true;
+	}
+
+	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+	if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
+			(mapUserRamp || ramp->type != GAMMA_RGB_256)) {
+		rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+			    sizeof(*rgb_user),
+			    GFP_KERNEL);
+		if (!rgb_user)
+			goto rgb_user_alloc_fail;
+
+		axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
+				GFP_KERNEL);
+		if (!axis_x)
+			goto axis_x_alloc_fail;
+
+		dividers.divider1 = dc_fixpt_from_fraction(3, 2);
+		dividers.divider2 = dc_fixpt_from_int(2);
+		dividers.divider3 = dc_fixpt_from_fraction(5, 2);
+
+		build_evenly_distributed_points(
+				axis_x,
+				ramp->num_entries,
+				dividers);
+
+		if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
+			scale_gamma(rgb_user, ramp, dividers);
+		else if (ramp->type == GAMMA_RGB_FLOAT_1024)
+			scale_gamma_dx(rgb_user, ramp, dividers);
+	}
+
+	rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+			       sizeof(*rgb_regamma),
+			       GFP_KERNEL);
+	if (!rgb_regamma)
+		goto rgb_regamma_alloc_fail;
+
+	coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
+			 GFP_KERNEL);
+	if (!coeff)
+		goto coeff_alloc_fail;
+
+	tf = output_tf->tf;
+
+	ret = calculate_curve(tf,
+			tf_pts,
+			rgb_regamma,
+			fs_params,
+			output_tf->sdr_ref_white_level);
+
+	if (ret) {
+		map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+				coordinates_x, axis_x, rgb_regamma,
+				MAX_HW_POINTS, tf_pts,
+				(mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
+				(ramp && ramp->type != GAMMA_CS_TFM_1D));
+
+		if (ramp && ramp->type == GAMMA_CS_TFM_1D)
+			apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
+	}
+
+	kvfree(coeff);
+coeff_alloc_fail:
+	kvfree(rgb_regamma);
+rgb_regamma_alloc_fail:
+	kvfree(axis_x);
+axis_x_alloc_fail:
+	kvfree(rgb_user);
+rgb_user_alloc_fail:
+	return ret;
+}
 
 bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
 				struct dc_transfer_func_distributed_points *points)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 44ddea5..9994817 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -103,10 +103,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
 		const struct dc_gamma *ramp, bool mapUserRamp);
 
-bool mod_color_calculate_curve(enum dc_transfer_func_predefined  trans,
-		struct dc_transfer_func_distributed_points *points,
-		uint32_t sdr_ref_white_level);
-
 bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
 				struct dc_transfer_func_distributed_points *points);
 
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 5437b50..b9992eb 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
 #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
 /* Number of elements in the render times cache array */
 #define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
 /* Threshold to change BTR multiplier (to avoid frequent changes) */
 #define BTR_DRIFT_MARGIN 2000
 /*Threshold to exit fixed refresh rate*/
@@ -122,7 +122,7 @@ static unsigned int calc_v_total_from_refresh(
 		const struct dc_stream_state *stream,
 		unsigned int refresh_in_uhz)
 {
-	unsigned int v_total = stream->timing.v_total;
+	unsigned int v_total;
 	unsigned int frame_duration_in_ns;
 
 	frame_duration_in_ns =
@@ -254,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
 	unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
 	unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
 	unsigned int frames_to_insert = 0;
-	unsigned int min_frame_duration_in_ns = 0;
-	unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
 	unsigned int delta_from_mid_point_delta_in_us;
-
-	min_frame_duration_in_ns = ((unsigned int) (div64_u64(
-		(1000000000ULL * 1000000),
-		in_out_vrr->max_refresh_in_uhz)));
+	unsigned int max_render_time_in_us =
+			in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
 
 	/* Program BTR */
-	if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+	if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
 		/* Exit Below the Range */
 		if (in_out_vrr->btr.btr_active) {
 			in_out_vrr->btr.frame_counter = 0;
 			in_out_vrr->btr.btr_active = false;
 		}
-	} else if (last_render_time_in_us > max_render_time_in_us) {
+	} else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
 		/* Enter Below the Range */
-		in_out_vrr->btr.btr_active = true;
+		if (!in_out_vrr->btr.btr_active) {
+			in_out_vrr->btr.btr_active = true;
+		}
 	}
 
 	/* BTR set to "not active" so disengage */
@@ -327,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
 		/* Choose number of frames to insert based on how close it
 		 * can get to the mid point of the variable range.
 		 */
-		if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+		if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
+				(delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
+						mid_point_frames_floor < 2)) {
 			frames_to_insert = mid_point_frames_ceil;
 			delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
 					delta_from_mid_point_in_us_1;
@@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
 		if (in_out_vrr->btr.frames_to_insert != 0 &&
 				delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
 			if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
-					in_out_vrr->max_duration_in_us) &&
+					max_render_time_in_us) &&
 				((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
 					in_out_vrr->min_duration_in_us))
 				frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -381,7 +381,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
 	bool update = false;
 	unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
 
-	//Compute the exit refresh rate and exit frame duration
+	/* Compute the exit refresh rate and exit frame duration */
 	unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
 			+ (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
 	unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
@@ -796,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
 		refresh_range = in_out_vrr->max_refresh_in_uhz -
 				in_out_vrr->min_refresh_in_uhz;
 
+		in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+				2 * in_out_vrr->min_duration_in_us;
+		if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+			in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
 		in_out_vrr->supported = true;
 	}
 
@@ -811,6 +816,9 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
 	in_out_vrr->btr.inserted_duration_in_us = 0;
 	in_out_vrr->btr.frames_to_insert = 0;
 	in_out_vrr->btr.frame_counter = 0;
+	in_out_vrr->fixed.fixed_active = false;
+	in_out_vrr->fixed.target_refresh_in_uhz = 0;
+
 	in_out_vrr->btr.mid_point_in_us =
 				(in_out_vrr->min_duration_in_us +
 				 in_out_vrr->max_duration_in_us) / 2;
@@ -826,6 +834,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
 		in_out_vrr->adjust.v_total_max = stream->timing.v_total;
 	} else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
 			refresh_range >= MIN_REFRESH_RANGE_IN_US) {
+
 		in_out_vrr->adjust.v_total_min =
 			calc_v_total_from_refresh(stream,
 				in_out_vrr->max_refresh_in_uhz);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
index 1c3c6d4..904424d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
@@ -24,7 +24,8 @@
 #
 
 HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \
-		hdcp1_execution.o hdcp1_transition.o
+		hdcp1_execution.o hdcp1_transition.o \
+		hdcp2_execution.o hdcp2_transition.o
 
 AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP))
 #$(info ************  DAL-HDCP_MAKEFILE ************)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index d7ac445..8aa528e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -37,24 +37,52 @@ static void push_error_status(struct mod_hdcp *hdcp,
 		HDCP_ERROR_TRACE(hdcp, status);
 	}
 
-	hdcp->connection.hdcp1_retry_count++;
+	if (is_hdcp1(hdcp)) {
+		hdcp->connection.hdcp1_retry_count++;
+	} else if (is_hdcp2(hdcp)) {
+		hdcp->connection.hdcp2_retry_count++;
+	}
 }
 
 static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
 {
-	int i, display_enabled = 0;
+	int i, is_auth_needed = 0;
 
-	/* if all displays on the link are disabled, hdcp is not desired */
+	/* if all displays on the link don't need authentication,
+	 * hdcp is not desired
+	 */
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
 		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
 				!hdcp->connection.displays[i].adjust.disable) {
-			display_enabled = 1;
+			is_auth_needed = 1;
 			break;
 		}
 	}
 
 	return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
-			display_enabled && !hdcp->connection.link.adjust.hdcp1.disable;
+			is_auth_needed &&
+			!hdcp->connection.link.adjust.hdcp1.disable;
+}
+
+static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
+{
+	int i, is_auth_needed = 0;
+
+	/* if all displays on the link don't need authentication,
+	 * hdcp is not desired
+	 */
+	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+				!hdcp->connection.displays[i].adjust.disable) {
+			is_auth_needed = 1;
+			break;
+		}
+	}
+
+	return (hdcp->connection.hdcp2_retry_count < MAX_NUM_OF_ATTEMPTS) &&
+			is_auth_needed &&
+			!hdcp->connection.link.adjust.hdcp2.disable &&
+			!hdcp->connection.is_hdcp2_revoked;
 }
 
 static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
@@ -82,6 +110,11 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
 	} else if (is_in_hdcp1_dp_states(hdcp)) {
 		status = mod_hdcp_hdcp1_dp_execution(hdcp,
 				event_ctx, &input->hdcp1);
+	} else if (is_in_hdcp2_states(hdcp)) {
+		status = mod_hdcp_hdcp2_execution(hdcp, event_ctx, &input->hdcp2);
+	} else if (is_in_hdcp2_dp_states(hdcp)) {
+		status = mod_hdcp_hdcp2_dp_execution(hdcp,
+				event_ctx, &input->hdcp2);
 	}
 out:
 	return status;
@@ -99,7 +132,10 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
 
 	if (is_in_initialized_state(hdcp)) {
 		if (is_dp_hdcp(hdcp))
-			if (is_cp_desired_hdcp1(hdcp)) {
+			if (is_cp_desired_hdcp2(hdcp)) {
+				callback_in_ms(0, output);
+				set_state_id(hdcp, output, D2_A0_DETERMINE_RX_HDCP_CAPABLE);
+			} else if (is_cp_desired_hdcp1(hdcp)) {
 				callback_in_ms(0, output);
 				set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);
 			} else {
@@ -107,7 +143,10 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
 				set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
 			}
 		else if (is_hdmi_dvi_sl_hdcp(hdcp))
-			if (is_cp_desired_hdcp1(hdcp)) {
+			if (is_cp_desired_hdcp2(hdcp)) {
+				callback_in_ms(0, output);
+				set_state_id(hdcp, output, H2_A0_KNOWN_HDCP2_CAPABLE_RX);
+			} else if (is_cp_desired_hdcp1(hdcp)) {
 				callback_in_ms(0, output);
 				set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);
 			} else {
@@ -126,6 +165,12 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
 	} else if (is_in_hdcp1_dp_states(hdcp)) {
 		status = mod_hdcp_hdcp1_dp_transition(hdcp,
 				event_ctx, &input->hdcp1, output);
+	} else if (is_in_hdcp2_states(hdcp)) {
+		status = mod_hdcp_hdcp2_transition(hdcp,
+				event_ctx, &input->hdcp2, output);
+	} else if (is_in_hdcp2_dp_states(hdcp)) {
+		status = mod_hdcp_hdcp2_dp_transition(hdcp,
+				event_ctx, &input->hdcp2, output);
 	} else {
 		status = MOD_HDCP_STATUS_INVALID_STATE;
 	}
@@ -139,9 +184,13 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
 	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
 	if (is_hdcp1(hdcp)) {
-		if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN)
+		if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) {
+			/* TODO - update psp to unify create session failure
+			 * recovery between hdcp1 and 2.
+			 */
 			mod_hdcp_hdcp1_destroy_session(hdcp);
 
+		}
 		if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
 			status = mod_hdcp_remove_display_topology(hdcp);
 			if (status != MOD_HDCP_STATUS_SUCCESS) {
@@ -154,6 +203,27 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
 		memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
 		memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
 		set_state_id(hdcp, output, HDCP_INITIALIZED);
+	} else if (is_hdcp2(hdcp)) {
+		if (hdcp->auth.trans_input.hdcp2.create_session == PASS) {
+			status = mod_hdcp_hdcp2_destroy_session(hdcp);
+			if (status != MOD_HDCP_STATUS_SUCCESS) {
+				output->callback_needed = 0;
+				output->watchdog_timer_needed = 0;
+				goto out;
+			}
+		}
+		if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) {
+			status = mod_hdcp_remove_display_topology(hdcp);
+			if (status != MOD_HDCP_STATUS_SUCCESS) {
+				output->callback_needed = 0;
+				output->watchdog_timer_needed = 0;
+				goto out;
+			}
+		}
+		HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+		memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+		memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+		set_state_id(hdcp, output, HDCP_INITIALIZED);
 	} else if (is_in_cp_not_desired_state(hdcp)) {
 		status = mod_hdcp_remove_display_topology(hdcp);
 		if (status != MOD_HDCP_STATUS_SUCCESS) {
@@ -347,7 +417,20 @@ enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
 	query->trace = &hdcp->connection.trace;
 	query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
 
-	mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status);
+	if (is_display_encryption_enabled(display)) {
+		if (is_hdcp1(hdcp)) {
+			query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
+		} else if (is_hdcp2(hdcp)) {
+			if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+				query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
+			else if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1)
+				query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
+			else
+				query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON;
+		}
+	} else {
+		query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+	}
 
 out:
 	return status;
@@ -420,7 +503,7 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
 		break;
 	default:
 		break;
-	};
+	}
 
 	return mode;
 }
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 5664bc0..af78e4f 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -29,32 +29,8 @@
 #include "mod_hdcp.h"
 #include "hdcp_log.h"
 
-#define BCAPS_READY_MASK				0x20
-#define BCAPS_REPEATER_MASK				0x40
-#define BSTATUS_DEVICE_COUNT_MASK			0X007F
-#define BSTATUS_MAX_DEVS_EXCEEDED_MASK			0x0080
-#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK		0x0800
-#define BCAPS_HDCP_CAPABLE_MASK_DP			0x01
-#define BCAPS_REPEATER_MASK_DP				0x02
-#define BSTATUS_READY_MASK_DP				0x01
-#define BSTATUS_R0_P_AVAILABLE_MASK_DP			0x02
-#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP		0x04
-#define BSTATUS_REAUTH_REQUEST_MASK_DP			0x08
-#define BINFO_DEVICE_COUNT_MASK_DP			0X007F
-#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP			0x0080
-#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP		0x0800
-
-#define RXSTATUS_MSG_SIZE_MASK				0x03FF
-#define RXSTATUS_READY_MASK				0x0400
-#define RXSTATUS_REAUTH_REQUEST_MASK			0x0800
-#define RXIDLIST_DEVICE_COUNT_LOWER_MASK		0xf0
-#define RXIDLIST_DEVICE_COUNT_UPPER_MASK		0x01
-#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP		0x02
-#define RXSTATUS_READY_MASK_DP				0x0001
-#define RXSTATUS_H_P_AVAILABLE_MASK_DP			0x0002
-#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP		0x0004
-#define RXSTATUS_REAUTH_REQUEST_MASK_DP			0x0008
-#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP		0x0010
+#include <drm/drm_hdcp.h>
+#include <drm/drm_dp_helper.h>
 
 enum mod_hdcp_trans_input_result {
 	UNKNOWN = 0,
@@ -87,13 +63,57 @@ struct mod_hdcp_transition_input_hdcp1 {
 	uint8_t hdcp_capable_dp;
 	uint8_t binfo_read_dp;
 	uint8_t r0p_available_dp;
-	uint8_t link_integiry_check;
+	uint8_t link_integrity_check;
 	uint8_t reauth_request_check;
 	uint8_t stream_encryption_dp;
 };
 
+struct mod_hdcp_transition_input_hdcp2 {
+	uint8_t hdcp2version_read;
+	uint8_t hdcp2_capable_check;
+	uint8_t add_topology;
+	uint8_t create_session;
+	uint8_t ake_init_prepare;
+	uint8_t ake_init_write;
+	uint8_t rxstatus_read;
+	uint8_t ake_cert_available;
+	uint8_t ake_cert_read;
+	uint8_t ake_cert_validation;
+	uint8_t stored_km_write;
+	uint8_t no_stored_km_write;
+	uint8_t h_prime_available;
+	uint8_t h_prime_read;
+	uint8_t pairing_available;
+	uint8_t pairing_info_read;
+	uint8_t h_prime_validation;
+	uint8_t lc_init_prepare;
+	uint8_t lc_init_write;
+	uint8_t l_prime_available_poll;
+	uint8_t l_prime_read;
+	uint8_t l_prime_validation;
+	uint8_t eks_prepare;
+	uint8_t eks_write;
+	uint8_t enable_encryption;
+	uint8_t reauth_request_check;
+	uint8_t rx_id_list_read;
+	uint8_t device_count_check;
+	uint8_t rx_id_list_validation;
+	uint8_t repeater_auth_ack_write;
+	uint8_t prepare_stream_manage;
+	uint8_t stream_manage_write;
+	uint8_t stream_ready_available;
+	uint8_t stream_ready_read;
+	uint8_t stream_ready_validation;
+
+	uint8_t rx_caps_read_dp;
+	uint8_t content_stream_type_write;
+	uint8_t link_integrity_check_dp;
+	uint8_t stream_encryption_dp;
+};
+
 union mod_hdcp_transition_input {
 	struct mod_hdcp_transition_input_hdcp1 hdcp1;
+	struct mod_hdcp_transition_input_hdcp2 hdcp2;
 };
 
 struct mod_hdcp_message_hdcp1 {
@@ -111,8 +131,33 @@ struct mod_hdcp_message_hdcp1 {
 	uint16_t	binfo_dp;
 };
 
+struct mod_hdcp_message_hdcp2 {
+	uint8_t		hdcp2version_hdmi;
+	uint8_t		rxcaps_dp[3];
+	uint8_t		rxstatus[2];
+
+	uint8_t		ake_init[12];
+	uint8_t		ake_cert[534];
+	uint8_t		ake_no_stored_km[129];
+	uint8_t		ake_stored_km[33];
+	uint8_t		ake_h_prime[33];
+	uint8_t		ake_pairing_info[17];
+	uint8_t		lc_init[9];
+	uint8_t		lc_l_prime[33];
+	uint8_t		ske_eks[25];
+	uint8_t		rx_id_list[177]; // 22 + 5 * 31
+	uint16_t	rx_id_list_size;
+	uint8_t		repeater_auth_ack[17];
+	uint8_t		repeater_auth_stream_manage[68]; // 6 + 2 * 31
+	uint16_t	stream_manage_size;
+	uint8_t		repeater_auth_stream_ready[33];
+	uint8_t		rxstatus_dp;
+	uint8_t		content_stream_type_dp[2];
+};
+
 union mod_hdcp_message {
 	struct mod_hdcp_message_hdcp1 hdcp1;
+	struct mod_hdcp_message_hdcp2 hdcp2;
 };
 
 struct mod_hdcp_auth_counters {
@@ -125,8 +170,10 @@ struct mod_hdcp_connection {
 	struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
 	uint8_t is_repeater;
 	uint8_t is_km_stored;
+	uint8_t is_hdcp2_revoked;
 	struct mod_hdcp_trace trace;
 	uint8_t hdcp1_retry_count;
+	uint8_t hdcp2_retry_count;
 };
 
 /* contains values per authentication cycle */
@@ -194,6 +241,50 @@ enum mod_hdcp_hdcp1_dp_state_id {
 	HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST,
 };
 
+enum mod_hdcp_hdcp2_state_id {
+	HDCP2_STATE_START = HDCP1_DP_STATE_END,
+	H2_A0_KNOWN_HDCP2_CAPABLE_RX,
+	H2_A1_SEND_AKE_INIT,
+	H2_A1_VALIDATE_AKE_CERT,
+	H2_A1_SEND_NO_STORED_KM,
+	H2_A1_READ_H_PRIME,
+	H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME,
+	H2_A1_SEND_STORED_KM,
+	H2_A1_VALIDATE_H_PRIME,
+	H2_A2_LOCALITY_CHECK,
+	H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER,
+	H2_ENABLE_ENCRYPTION,
+	H2_A5_AUTHENTICATED,
+	H2_A6_WAIT_FOR_RX_ID_LIST,
+	H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK,
+	H2_A9_SEND_STREAM_MANAGEMENT,
+	H2_A9_VALIDATE_STREAM_READY,
+	HDCP2_STATE_END = H2_A9_VALIDATE_STREAM_READY,
+};
+
+enum mod_hdcp_hdcp2_dp_state_id {
+	HDCP2_DP_STATE_START = HDCP2_STATE_END,
+	D2_A0_DETERMINE_RX_HDCP_CAPABLE,
+	D2_A1_SEND_AKE_INIT,
+	D2_A1_VALIDATE_AKE_CERT,
+	D2_A1_SEND_NO_STORED_KM,
+	D2_A1_READ_H_PRIME,
+	D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME,
+	D2_A1_SEND_STORED_KM,
+	D2_A1_VALIDATE_H_PRIME,
+	D2_A2_LOCALITY_CHECK,
+	D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER,
+	D2_SEND_CONTENT_STREAM_TYPE,
+	D2_ENABLE_ENCRYPTION,
+	D2_A5_AUTHENTICATED,
+	D2_A6_WAIT_FOR_RX_ID_LIST,
+	D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK,
+	D2_A9_SEND_STREAM_MANAGEMENT,
+	D2_A9_VALIDATE_STREAM_READY,
+	HDCP2_DP_STATE_END = D2_A9_VALIDATE_STREAM_READY,
+	HDCP_STATE_END = HDCP2_DP_STATE_END,
+};
+
 /* hdcp1 executions and transitions */
 typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp);
 uint8_t mod_hdcp_execute_and_set(
@@ -214,6 +305,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 	struct mod_hdcp_transition_input_hdcp1 *input,
 	struct mod_hdcp_output *output);
 
+/* hdcp2 executions and transitions */
+enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp,
+	struct mod_hdcp_event_context *event_ctx,
+	struct mod_hdcp_transition_input_hdcp2 *input);
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp,
+	struct mod_hdcp_event_context *event_ctx,
+	struct mod_hdcp_transition_input_hdcp2 *input);
+enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
+	struct mod_hdcp_event_context *event_ctx,
+	struct mod_hdcp_transition_input_hdcp2 *input,
+	struct mod_hdcp_output *output);
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
+	struct mod_hdcp_event_context *event_ctx,
+	struct mod_hdcp_transition_input_hdcp2 *input,
+	struct mod_hdcp_output *output);
+
 /* log functions */
 void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
 		uint8_t *buf, uint32_t buf_size);
@@ -234,6 +341,25 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
 enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
 							       enum mod_hdcp_encryption_status *encryption_status);
+enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(
+		struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(
+		struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
+		struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
+							       enum mod_hdcp_encryption_status *encryption_status);
+
 /* ddc functions */
 enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp);
@@ -245,6 +371,7 @@ enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp);
@@ -308,11 +435,28 @@ static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp)
 			current_state(hdcp) <= HDCP1_DP_STATE_END);
 }
 
+static inline uint8_t is_in_hdcp2_states(struct mod_hdcp *hdcp)
+{
+	return (current_state(hdcp) > HDCP2_STATE_START &&
+			current_state(hdcp) <= HDCP2_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp2_dp_states(struct mod_hdcp *hdcp)
+{
+	return (current_state(hdcp) > HDCP2_DP_STATE_START &&
+			current_state(hdcp) <= HDCP2_DP_STATE_END);
+}
+
 static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
 {
 	return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
 }
 
+static inline uint8_t is_hdcp2(struct mod_hdcp *hdcp)
+{
+	return (is_in_hdcp2_states(hdcp) || is_in_hdcp2_dp_states(hdcp));
+}
+
 static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp)
 {
 	return current_state(hdcp) == HDCP_CP_NOT_DESIRED;
@@ -437,6 +581,7 @@ static inline struct mod_hdcp_display *get_empty_display_container(
 static inline void reset_retry_counts(struct mod_hdcp *hdcp)
 {
 	hdcp->connection.hdcp1_retry_count = 0;
+	hdcp->connection.hdcp2_retry_count = 0;
 }
 
 #endif /* HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 3db4a7d..37670db 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -27,9 +27,11 @@
 
 static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
 {
-	uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv;
+	uint64_t n = 0;
 	uint8_t count = 0;
 
+	memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t));
+
 	while (n) {
 		count++;
 		n &= (n - 1);
@@ -41,17 +43,17 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
 static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
 {
 	if (is_dp_hdcp(hdcp))
-		return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ?
+		return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_READY) ?
 				MOD_HDCP_STATUS_SUCCESS :
 				MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
-	return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ?
+	return (hdcp->auth.msg.hdcp1.bcaps & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY) ?
 			MOD_HDCP_STATUS_SUCCESS :
 			MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
 }
 
 static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
 {
-	return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ?
+	return (hdcp->auth.msg.hdcp1.bcaps & DP_BCAPS_HDCP_CAPABLE) ?
 			MOD_HDCP_STATUS_SUCCESS :
 			MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;
 }
@@ -61,7 +63,7 @@ static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
 	enum mod_hdcp_status status;
 	if (is_dp_hdcp(hdcp)) {
 		status = (hdcp->auth.msg.hdcp1.bstatus &
-				BSTATUS_R0_P_AVAILABLE_MASK_DP) ?
+				DP_BSTATUS_R0_PRIME_READY) ?
 			MOD_HDCP_STATUS_SUCCESS :
 			MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;
 	} else {
@@ -74,7 +76,7 @@ static inline enum mod_hdcp_status check_link_integrity_dp(
 		struct mod_hdcp *hdcp)
 {
 	return (hdcp->auth.msg.hdcp1.bstatus &
-			BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ?
+			DP_BSTATUS_LINK_FAILURE) ?
 			MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :
 			MOD_HDCP_STATUS_SUCCESS;
 }
@@ -82,7 +84,7 @@ static inline enum mod_hdcp_status check_link_integrity_dp(
 static inline enum mod_hdcp_status check_no_reauthentication_request_dp(
 		struct mod_hdcp *hdcp)
 {
-	return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ?
+	return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_REAUTH_REQ) ?
 			MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :
 			MOD_HDCP_STATUS_SUCCESS;
 }
@@ -92,15 +94,13 @@ static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)
 	enum mod_hdcp_status status;
 
 	if (is_dp_hdcp(hdcp))
-		status = (hdcp->auth.msg.hdcp1.binfo_dp &
-				BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ?
-			MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
-			MOD_HDCP_STATUS_SUCCESS;
+		status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp >> 8)
+				 ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE
+				 : MOD_HDCP_STATUS_SUCCESS;
 	else
-		status = (hdcp->auth.msg.hdcp1.bstatus &
-				BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ?
-				MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
-				MOD_HDCP_STATUS_SUCCESS;
+		status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus >> 8)
+				 ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE
+				 : MOD_HDCP_STATUS_SUCCESS;
 	return status;
 }
 
@@ -109,13 +109,11 @@ static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
 	enum mod_hdcp_status status;
 
 	if (is_dp_hdcp(hdcp))
-		status = (hdcp->auth.msg.hdcp1.binfo_dp &
-				BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ?
+		status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp) ?
 				MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
 				MOD_HDCP_STATUS_SUCCESS;
 	else
-		status = (hdcp->auth.msg.hdcp1.bstatus &
-				BSTATUS_MAX_DEVS_EXCEEDED_MASK) ?
+		status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus) ?
 				MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
 				MOD_HDCP_STATUS_SUCCESS;
 	return status;
@@ -124,8 +122,8 @@ static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
 static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 {
 	return is_dp_hdcp(hdcp) ?
-			(hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) :
-			(hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK);
+			DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.binfo_dp) :
+			DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.bstatus);
 }
 
 static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
@@ -285,8 +283,8 @@ static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
 				hdcp, "bstatus_read"))
 			goto out;
 		if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
-				&input->link_integiry_check, &status,
-				hdcp, "link_integiry_check"))
+				&input->link_integrity_check, &status,
+				hdcp, "link_integrity_check"))
 			goto out;
 		if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
 				&input->reauth_request_check, &status,
@@ -433,8 +431,8 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
 			hdcp, "bstatus_read"))
 		goto out;
 	if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
-			&input->link_integiry_check, &status,
-			hdcp, "link_integiry_check"))
+			&input->link_integrity_check, &status,
+			hdcp, "link_integrity_check"))
 		goto out;
 	if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
 			&input->reauth_request_check, &status,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 136b801..76edcbe 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -67,11 +67,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
 		break;
 	case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
 		if (input->bcaps_read != PASS ||
-				input->r0p_read != PASS ||
-				input->rx_validation != PASS ||
-				(!conn->is_repeater && input->encryption != PASS)) {
+				input->r0p_read != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (input->rx_validation != PASS) {
 			/* 1A-06: consider invalid r0' a failure */
 			/* 1A-08: consider bksv listed in SRM a failure */
+			/*
+			 * some slow RX will fail rx validation when it is
+			 * not ready. give it more time to react before retry.
+			 */
+			fail_and_restart_in_ms(1000, &status, output);
+			break;
+		} else if (!conn->is_repeater && input->encryption != PASS) {
 			fail_and_restart_in_ms(0, &status, output);
 			break;
 		}
@@ -212,7 +220,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 				 * after 3 attempts.
 				 * 1A-08: consider bksv listed in SRM a failure
 				 */
-				fail_and_restart_in_ms(0, &status, output);
+				/*
+				 * some slow RX will fail rx validation when it is
+				 * not ready. give it more time to react before retry.
+				 */
+				fail_and_restart_in_ms(1000, &status, output);
 			}
 			break;
 		} else if ((!conn->is_repeater && input->encryption != PASS) ||
@@ -229,7 +241,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 		}
 		break;
 	case D1_A4_AUTHENTICATED:
-		if (input->link_integiry_check != PASS ||
+		if (input->link_integrity_check != PASS ||
 				input->reauth_request_check != PASS) {
 			/* 1A-07: restart hdcp on a link integrity failure */
 			fail_and_restart_in_ms(0, &status, output);
@@ -237,7 +249,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 		}
 		break;
 	case D1_A6_WAIT_FOR_READY:
-		if (input->link_integiry_check == FAIL ||
+		if (input->link_integrity_check == FAIL ||
 				input->reauth_request_check == FAIL) {
 			fail_and_restart_in_ms(0, &status, output);
 			break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
new file mode 100644
index 0000000..f730b94
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/delay.h>
+
+#include "hdcp.h"
+
+static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp)
+{
+	uint8_t is_ready = 0;
+
+	if (is_dp_hdcp(hdcp))
+		is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
+	else
+		is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) &&
+				(HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+						hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
+	return is_ready ? MOD_HDCP_STATUS_SUCCESS :
+			MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
+}
+
+static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) &&
+				HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ?
+				MOD_HDCP_STATUS_SUCCESS :
+				MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+	else
+		status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ?
+				MOD_HDCP_STATUS_SUCCESS :
+				MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+	return status;
+}
+
+static inline enum mod_hdcp_status check_reauthentication_request(
+		struct mod_hdcp *hdcp)
+{
+	uint8_t ret = 0;
+
+	if (is_dp_hdcp(hdcp))
+		ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+				MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
+				MOD_HDCP_STATUS_SUCCESS;
+	else
+		ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ?
+				MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
+				MOD_HDCP_STATUS_SUCCESS;
+	return ret;
+}
+
+static inline enum mod_hdcp_status check_link_integrity_failure_dp(
+		struct mod_hdcp *hdcp)
+{
+	return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+			MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE :
+			MOD_HDCP_STATUS_SUCCESS;
+}
+
+static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+	uint16_t size;
+
+	if (is_dp_hdcp(hdcp)) {
+		status = MOD_HDCP_STATUS_SUCCESS;
+	} else {
+		status = mod_hdcp_read_rxstatus(hdcp);
+		if (status == MOD_HDCP_STATUS_SUCCESS) {
+			size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+			       hdcp->auth.msg.hdcp2.rxstatus[0];
+			status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ?
+					MOD_HDCP_STATUS_SUCCESS :
+					MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING;
+		}
+	}
+	return status;
+}
+
+static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+	uint8_t size;
+
+	status = mod_hdcp_read_rxstatus(hdcp);
+	if (status != MOD_HDCP_STATUS_SUCCESS)
+		goto out;
+
+	if (is_dp_hdcp(hdcp)) {
+		status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+				MOD_HDCP_STATUS_SUCCESS :
+				MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
+	} else {
+		size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+		       hdcp->auth.msg.hdcp2.rxstatus[0];
+		status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ?
+				MOD_HDCP_STATUS_SUCCESS :
+				MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
+	}
+out:
+	return status;
+}
+
+static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+	uint8_t size;
+
+	status = mod_hdcp_read_rxstatus(hdcp);
+	if (status != MOD_HDCP_STATUS_SUCCESS)
+		goto out;
+
+	if (is_dp_hdcp(hdcp)) {
+		status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+				MOD_HDCP_STATUS_SUCCESS :
+				MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
+	} else {
+		size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+		       hdcp->auth.msg.hdcp2.rxstatus[0];
+		status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ?
+				MOD_HDCP_STATUS_SUCCESS :
+				MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
+	}
+out:
+	return status;
+}
+
+static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+	uint8_t size;
+	uint16_t max_wait = 20; // units of ms
+	uint16_t num_polls = 5;
+	uint16_t wait_time = max_wait / num_polls;
+
+	if (is_dp_hdcp(hdcp))
+		status = MOD_HDCP_STATUS_INVALID_OPERATION;
+	else
+		for (; num_polls; num_polls--) {
+			msleep(wait_time);
+
+			status = mod_hdcp_read_rxstatus(hdcp);
+			if (status != MOD_HDCP_STATUS_SUCCESS)
+				break;
+
+			size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+			       hdcp->auth.msg.hdcp2.rxstatus[0];
+			status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ?
+					MOD_HDCP_STATUS_SUCCESS :
+					MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING;
+			if (status == MOD_HDCP_STATUS_SUCCESS)
+				break;
+		}
+	return status;
+}
+
+static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+	uint8_t size;
+
+	if (is_dp_hdcp(hdcp)) {
+		status = MOD_HDCP_STATUS_INVALID_OPERATION;
+	} else {
+		status = mod_hdcp_read_rxstatus(hdcp);
+		if (status != MOD_HDCP_STATUS_SUCCESS)
+			goto out;
+		size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+		       hdcp->auth.msg.hdcp2.rxstatus[0];
+		status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ?
+				MOD_HDCP_STATUS_SUCCESS :
+				MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING;
+	}
+out:
+	return status;
+}
+
+static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
+{
+	return HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+			(HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+}
+
+static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
+{
+	/* device count must be greater than or equal to tracked hdcp displays */
+	return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+			MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
+			MOD_HDCP_STATUS_SUCCESS;
+}
+
+static uint8_t process_rxstatus(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input,
+		enum mod_hdcp_status *status)
+{
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxstatus,
+			&input->rxstatus_read, status,
+			hdcp, "rxstatus_read"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(check_reauthentication_request,
+			&input->reauth_request_check, status,
+			hdcp, "reauth_request_check"))
+		goto out;
+	if (is_dp_hdcp(hdcp)) {
+		if (!mod_hdcp_execute_and_set(check_link_integrity_failure_dp,
+				&input->link_integrity_check_dp, status,
+				hdcp, "link_integrity_check_dp"))
+			goto out;
+	}
+	if (hdcp->connection.is_repeater)
+		if (check_receiver_id_list_ready(hdcp) ==
+				MOD_HDCP_STATUS_SUCCESS) {
+			HDCP_INPUT_PASS_TRACE(hdcp, "rx_id_list_ready");
+			event_ctx->rx_id_list_ready = 1;
+			if (is_dp_hdcp(hdcp))
+				hdcp->auth.msg.hdcp2.rx_id_list_size =
+						sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
+			else
+				hdcp->auth.msg.hdcp2.rx_id_list_size =
+					HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+					hdcp->auth.msg.hdcp2.rxstatus[0];
+		}
+out:
+	return (*status == MOD_HDCP_STATUS_SUCCESS);
+}
+
+static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
+			&input->hdcp2version_read, &status,
+			hdcp, "hdcp2version_read"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(check_hdcp2_capable,
+			&input->hdcp2_capable_check, &status,
+			hdcp, "hdcp2_capable"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+	if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
+			&input->add_topology, &status,
+			hdcp, "add_topology"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
+			&input->create_session, &status,
+			hdcp, "create_session"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_ake_init,
+			&input->ake_init_prepare, &status,
+			hdcp, "ake_init_prepare"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_write_ake_init,
+			&input->ake_init_write, &status,
+			hdcp, "ake_init_write"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status validate_ake_cert(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (is_hdmi_dvi_sl_hdcp(hdcp))
+		if (!mod_hdcp_execute_and_set(check_ake_cert_available,
+				&input->ake_cert_available, &status,
+				hdcp, "ake_cert_available"))
+			goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_ake_cert,
+			&input->ake_cert_read, &status,
+			hdcp, "ake_cert_read"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_ake_cert,
+			&input->ake_cert_validation, &status,
+			hdcp, "ake_cert_validation"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status send_no_stored_km(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(mod_hdcp_write_no_stored_km,
+			&input->no_stored_km_write, &status,
+			hdcp, "no_stored_km_write"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status read_h_prime(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(check_h_prime_available,
+			&input->h_prime_available, &status,
+			hdcp, "h_prime_available"))
+		goto out;
+
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime,
+			&input->h_prime_read, &status,
+			hdcp, "h_prime_read"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status read_pairing_info_and_validate_h_prime(
+		struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(check_pairing_info_available,
+			&input->pairing_available, &status,
+			hdcp, "pairing_available"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_pairing_info,
+			&input->pairing_info_read, &status,
+			hdcp, "pairing_info_read"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime,
+			&input->h_prime_validation, &status,
+			hdcp, "h_prime_validation"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status send_stored_km(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(mod_hdcp_write_stored_km,
+			&input->stored_km_write, &status,
+			hdcp, "stored_km_write"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status validate_h_prime(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(check_h_prime_available,
+			&input->h_prime_available, &status,
+			hdcp, "h_prime_available"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime,
+			&input->h_prime_read, &status,
+			hdcp, "h_prime_read"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime,
+			&input->h_prime_validation, &status,
+			hdcp, "h_prime_validation"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
+			&input->lc_init_prepare, &status,
+			hdcp, "lc_init_prepare"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
+			&input->lc_init_write, &status,
+			 hdcp, "lc_init_write"))
+		goto out;
+	if (is_dp_hdcp(hdcp))
+		msleep(16);
+	else
+		if (!mod_hdcp_execute_and_set(poll_l_prime_available,
+				&input->l_prime_available_poll, &status,
+				hdcp, "l_prime_available_poll"))
+			goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
+			&input->l_prime_read, &status,
+			hdcp, "l_prime_read"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
+			&input->l_prime_validation, &status,
+			hdcp, "l_prime_validation"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status exchange_ks_and_test_for_repeater(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_eks,
+			&input->eks_prepare, &status,
+			hdcp, "eks_prepare"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_write_eks,
+			&input->eks_write, &status,
+			hdcp, "eks_write"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status enable_encryption(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+	if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+		process_rxstatus(hdcp, event_ctx, input, &status);
+		goto out;
+	}
+
+	if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+		if (!process_rxstatus(hdcp, event_ctx, input, &status))
+			goto out;
+		if (event_ctx->rx_id_list_ready)
+			goto out;
+	}
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_enable_encryption,
+			&input->enable_encryption, &status,
+			hdcp, "enable_encryption"))
+		goto out;
+	if (is_dp_mst_hdcp(hdcp)) {
+		if (!mod_hdcp_execute_and_set(
+				mod_hdcp_hdcp2_enable_dp_stream_encryption,
+				&input->stream_encryption_dp, &status,
+				hdcp, "stream_encryption_dp"))
+			goto out;
+	}
+out:
+	return status;
+}
+
+static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!process_rxstatus(hdcp, event_ctx, input, &status))
+		goto out;
+	if (event_ctx->rx_id_list_ready)
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status wait_for_rx_id_list(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!process_rxstatus(hdcp, event_ctx, input, &status))
+		goto out;
+	if (!event_ctx->rx_id_list_ready) {
+		status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
+		goto out;
+	}
+out:
+	return status;
+}
+
+static enum mod_hdcp_status verify_rx_id_list_and_send_ack(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+	if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+		process_rxstatus(hdcp, event_ctx, input, &status);
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_rx_id_list,
+			&input->rx_id_list_read,
+			&status, hdcp, "receiver_id_list_read"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(check_device_count,
+			&input->device_count_check,
+			&status, hdcp, "device_count_check"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_rx_id_list,
+			&input->rx_id_list_validation,
+			&status, hdcp, "rx_id_list_validation"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_write_repeater_auth_ack,
+			&input->repeater_auth_ack_write,
+			&status, hdcp, "repeater_auth_ack_write"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status send_stream_management(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+	if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+		process_rxstatus(hdcp, event_ctx, input, &status);
+		goto out;
+	}
+
+	if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+		if (!process_rxstatus(hdcp, event_ctx, input, &status))
+			goto out;
+		if (event_ctx->rx_id_list_ready)
+			goto out;
+	}
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_stream_management,
+			&input->prepare_stream_manage,
+			&status, hdcp, "prepare_stream_manage"))
+		goto out;
+
+	if (!mod_hdcp_execute_and_set(mod_hdcp_write_stream_manage,
+			&input->stream_manage_write,
+			&status, hdcp, "stream_manage_write"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+			event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+	if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+		process_rxstatus(hdcp, event_ctx, input, &status);
+		goto out;
+	}
+
+	if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+		if (!process_rxstatus(hdcp, event_ctx, input, &status))
+			goto out;
+		if (event_ctx->rx_id_list_ready) {
+			goto out;
+		}
+	}
+	if (is_hdmi_dvi_sl_hdcp(hdcp))
+		if (!mod_hdcp_execute_and_set(check_stream_ready_available,
+				&input->stream_ready_available,
+				&status, hdcp, "stream_ready_available"))
+			goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_stream_ready,
+			&input->stream_ready_read,
+			&status, hdcp, "stream_ready_read"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_stream_ready,
+			&input->stream_ready_validation,
+			&status, hdcp, "stream_ready_validation"))
+		goto out;
+
+out:
+	return status;
+}
+
+static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxcaps,
+			&input->rx_caps_read_dp,
+			&status, hdcp, "rx_caps_read_dp"))
+		goto out;
+	if (!mod_hdcp_execute_and_set(check_hdcp2_capable,
+			&input->hdcp2_capable_check, &status,
+			hdcp, "hdcp2_capable_check"))
+		goto out;
+out:
+	return status;
+}
+
+static enum mod_hdcp_status send_content_stream_type_dp(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+			event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+		event_ctx->unexpected_event = 1;
+		goto out;
+	}
+
+	if (!process_rxstatus(hdcp, event_ctx, input, &status))
+		goto out;
+	if (!mod_hdcp_execute_and_set(mod_hdcp_write_content_type,
+			&input->content_stream_type_write, &status,
+			hdcp, "content_stream_type_write"))
+		goto out;
+out:
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp,
+	struct mod_hdcp_event_context *event_ctx,
+	struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	switch (current_state(hdcp)) {
+	case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+		status = known_hdcp2_capable_rx(hdcp, event_ctx, input);
+		break;
+	case H2_A1_SEND_AKE_INIT:
+		status = send_ake_init(hdcp, event_ctx, input);
+		break;
+	case H2_A1_VALIDATE_AKE_CERT:
+		status = validate_ake_cert(hdcp, event_ctx, input);
+		break;
+	case H2_A1_SEND_NO_STORED_KM:
+		status = send_no_stored_km(hdcp, event_ctx, input);
+		break;
+	case H2_A1_READ_H_PRIME:
+		status = read_h_prime(hdcp, event_ctx, input);
+		break;
+	case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+		status = read_pairing_info_and_validate_h_prime(hdcp,
+				event_ctx, input);
+		break;
+	case H2_A1_SEND_STORED_KM:
+		status = send_stored_km(hdcp, event_ctx, input);
+		break;
+	case H2_A1_VALIDATE_H_PRIME:
+		status = validate_h_prime(hdcp, event_ctx, input);
+		break;
+	case H2_A2_LOCALITY_CHECK:
+		status = locality_check(hdcp, event_ctx, input);
+		break;
+	case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+		status = exchange_ks_and_test_for_repeater(hdcp, event_ctx, input);
+		break;
+	case H2_ENABLE_ENCRYPTION:
+		status = enable_encryption(hdcp, event_ctx, input);
+		break;
+	case H2_A5_AUTHENTICATED:
+		status = authenticated(hdcp, event_ctx, input);
+		break;
+	case H2_A6_WAIT_FOR_RX_ID_LIST:
+		status = wait_for_rx_id_list(hdcp, event_ctx, input);
+		break;
+	case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+		status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input);
+		break;
+	case H2_A9_SEND_STREAM_MANAGEMENT:
+		status = send_stream_management(hdcp, event_ctx, input);
+		break;
+	case H2_A9_VALIDATE_STREAM_READY:
+		status = validate_stream_ready(hdcp, event_ctx, input);
+		break;
+	default:
+		status = MOD_HDCP_STATUS_INVALID_STATE;
+		break;
+	}
+
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp,
+	struct mod_hdcp_event_context *event_ctx,
+	struct mod_hdcp_transition_input_hdcp2 *input)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+	switch (current_state(hdcp)) {
+	case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+		status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
+		break;
+	case D2_A1_SEND_AKE_INIT:
+		status = send_ake_init(hdcp, event_ctx, input);
+		break;
+	case D2_A1_VALIDATE_AKE_CERT:
+		status = validate_ake_cert(hdcp, event_ctx, input);
+		break;
+	case D2_A1_SEND_NO_STORED_KM:
+		status = send_no_stored_km(hdcp, event_ctx, input);
+		break;
+	case D2_A1_READ_H_PRIME:
+		status = read_h_prime(hdcp, event_ctx, input);
+		break;
+	case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+		status = read_pairing_info_and_validate_h_prime(hdcp,
+				event_ctx, input);
+		break;
+	case D2_A1_SEND_STORED_KM:
+		status = send_stored_km(hdcp, event_ctx, input);
+		break;
+	case D2_A1_VALIDATE_H_PRIME:
+		status = validate_h_prime(hdcp, event_ctx, input);
+		break;
+	case D2_A2_LOCALITY_CHECK:
+		status = locality_check(hdcp, event_ctx, input);
+		break;
+	case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+		status = exchange_ks_and_test_for_repeater(hdcp,
+				event_ctx, input);
+		break;
+	case D2_SEND_CONTENT_STREAM_TYPE:
+		status = send_content_stream_type_dp(hdcp, event_ctx, input);
+		break;
+	case D2_ENABLE_ENCRYPTION:
+		status = enable_encryption(hdcp, event_ctx, input);
+		break;
+	case D2_A5_AUTHENTICATED:
+		status = authenticated(hdcp, event_ctx, input);
+		break;
+	case D2_A6_WAIT_FOR_RX_ID_LIST:
+		status = wait_for_rx_id_list(hdcp, event_ctx, input);
+		break;
+	case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+		status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input);
+		break;
+	case D2_A9_SEND_STREAM_MANAGEMENT:
+		status = send_stream_management(hdcp, event_ctx, input);
+		break;
+	case D2_A9_VALIDATE_STREAM_READY:
+		status = validate_stream_ready(hdcp, event_ctx, input);
+		break;
+	default:
+		status = MOD_HDCP_STATUS_INVALID_STATE;
+		break;
+	}
+
+	return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
new file mode 100644
index 0000000..8cae3e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input,
+		struct mod_hdcp_output *output)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+	struct mod_hdcp_connection *conn = &hdcp->connection;
+	struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+	switch (current_state(hdcp)) {
+	case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+		if (input->hdcp2version_read != PASS ||
+				input->hdcp2_capable_check != PASS) {
+			adjust->hdcp2.disable = 1;
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, HDCP_INITIALIZED);
+		} else {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, H2_A1_SEND_AKE_INIT);
+		}
+		break;
+	case H2_A1_SEND_AKE_INIT:
+		if (input->add_topology != PASS ||
+				input->create_session != PASS ||
+				input->ake_init_prepare != PASS) {
+			/* out of sync with psp state */
+			adjust->hdcp2.disable = 1;
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (input->ake_init_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		set_watchdog_in_ms(hdcp, 100, output);
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A1_VALIDATE_AKE_CERT);
+		break;
+	case H2_A1_VALIDATE_AKE_CERT:
+		if (input->ake_cert_available != PASS) {
+			if (event_ctx->event ==
+					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+				/* 1A-08: consider ake timeout a failure */
+				/* some hdmi receivers are not ready for HDCP
+				 * immediately after video becomes active,
+				 * delay 1s before retry on first HDCP message
+				 * timeout.
+				 */
+				fail_and_restart_in_ms(1000, &status, output);
+			} else {
+				/* continue ake cert polling*/
+				callback_in_ms(10, output);
+				increment_stay_counter(hdcp);
+			}
+			break;
+		} else if (input->ake_cert_read != PASS ||
+				input->ake_cert_validation != PASS) {
+			/*
+			 * 1A-09: consider invalid ake cert a failure
+			 * 1A-10: consider receiver id listed in SRM a failure
+			 */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		if (conn->is_km_stored &&
+				!adjust->hdcp2.force_no_stored_km) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, H2_A1_SEND_STORED_KM);
+		} else {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, H2_A1_SEND_NO_STORED_KM);
+		}
+		break;
+	case H2_A1_SEND_NO_STORED_KM:
+		if (input->no_stored_km_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		if (adjust->hdcp2.increase_h_prime_timeout)
+			set_watchdog_in_ms(hdcp, 2000, output);
+		else
+			set_watchdog_in_ms(hdcp, 1000, output);
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A1_READ_H_PRIME);
+		break;
+	case H2_A1_READ_H_PRIME:
+		if (input->h_prime_available != PASS) {
+			if (event_ctx->event ==
+					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+				/* 1A-11-3: consider h' timeout a failure */
+				fail_and_restart_in_ms(1000, &status, output);
+			} else {
+				/* continue h' polling */
+				callback_in_ms(100, output);
+				increment_stay_counter(hdcp);
+			}
+			break;
+		} else if (input->h_prime_read != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		set_watchdog_in_ms(hdcp, 200, output);
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME);
+		break;
+	case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+		if (input->pairing_available != PASS) {
+			if (event_ctx->event ==
+					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+				/* 1A-12: consider pairing info timeout
+				 * a failure
+				 */
+				fail_and_restart_in_ms(0, &status, output);
+			} else {
+				/* continue pairing info polling */
+				callback_in_ms(20, output);
+				increment_stay_counter(hdcp);
+			}
+			break;
+		} else if (input->pairing_info_read != PASS ||
+				input->h_prime_validation != PASS) {
+			/* 1A-11-1: consider invalid h' a failure */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
+		break;
+	case H2_A1_SEND_STORED_KM:
+		if (input->stored_km_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		set_watchdog_in_ms(hdcp, 200, output);
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A1_VALIDATE_H_PRIME);
+		break;
+	case H2_A1_VALIDATE_H_PRIME:
+		if (input->h_prime_available != PASS) {
+			if (event_ctx->event ==
+					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+				/* 1A-11-2: consider h' timeout a failure */
+				fail_and_restart_in_ms(1000, &status, output);
+			} else {
+				/* continue h' polling */
+				callback_in_ms(20, output);
+				increment_stay_counter(hdcp);
+			}
+			break;
+		} else if (input->h_prime_read != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (input->h_prime_validation != PASS) {
+			/* 1A-11-1: consider invalid h' a failure */
+			adjust->hdcp2.force_no_stored_km = 1;
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
+		break;
+	case H2_A2_LOCALITY_CHECK:
+		if (hdcp->state.stay_count > 10 ||
+				input->lc_init_prepare != PASS ||
+				input->lc_init_write != PASS ||
+				input->l_prime_available_poll != PASS ||
+				input->l_prime_read != PASS) {
+			/*
+			 * 1A-05: consider disconnection after LC init a failure
+			 * 1A-13-1: consider invalid l' a failure
+			 * 1A-13-2: consider l' timeout a failure
+			 */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (input->l_prime_validation != PASS) {
+			callback_in_ms(0, output);
+			increment_stay_counter(hdcp);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
+		break;
+	case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+		if (input->eks_prepare != PASS ||
+				input->eks_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		if (conn->is_repeater) {
+			set_watchdog_in_ms(hdcp, 3000, output);
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, H2_A6_WAIT_FOR_RX_ID_LIST);
+		} else {
+			/* some CTS equipment requires a delay GREATER than
+			 * 200 ms, so delay 210 ms instead of 200 ms
+			 */
+			callback_in_ms(210, output);
+			set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION);
+		}
+		break;
+	case H2_ENABLE_ENCRYPTION:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS) {
+			/*
+			 * 1A-07: restart hdcp on REAUTH_REQ
+			 * 1B-08: restart hdcp on REAUTH_REQ
+			 */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+			break;
+		} else if (input->enable_encryption != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A5_AUTHENTICATED);
+		HDCP_FULL_DDC_TRACE(hdcp);
+		break;
+	case H2_A5_AUTHENTICATED:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+			break;
+		}
+		callback_in_ms(500, output);
+		increment_stay_counter(hdcp);
+		break;
+	case H2_A6_WAIT_FOR_RX_ID_LIST:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (!event_ctx->rx_id_list_ready) {
+			if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+				/* 1B-02: consider rx id list timeout a failure */
+				/* some CTS equipment's actual timeout
+				 * measurement is slightly greater than 3000 ms.
+				 * Delay 100 ms to ensure it is fully timeout
+				 * before re-authentication.
+				 */
+				fail_and_restart_in_ms(100, &status, output);
+			} else {
+				callback_in_ms(300, output);
+				increment_stay_counter(hdcp);
+			}
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+		break;
+	case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS ||
+				input->rx_id_list_read != PASS ||
+				input->device_count_check != PASS ||
+				input->rx_id_list_validation != PASS ||
+				input->repeater_auth_ack_write != PASS) {
+			/* 1B-03: consider invalid v' a failure
+			 * 1B-04: consider MAX_DEVS_EXCEEDED a failure
+			 * 1B-05: consider MAX_CASCADE_EXCEEDED a failure
+			 * 1B-06: consider invalid seq_num_V a failure
+			 * 1B-09: consider seq_num_V rollover a failure
+			 */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+		break;
+	case H2_A9_SEND_STREAM_MANAGEMENT:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+			break;
+		} else if (input->prepare_stream_manage != PASS ||
+				input->stream_manage_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		set_watchdog_in_ms(hdcp, 100, output);
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, H2_A9_VALIDATE_STREAM_READY);
+		break;
+	case H2_A9_VALIDATE_STREAM_READY:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+			break;
+		} else if (input->stream_ready_available != PASS) {
+			if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+				/* 1B-10-2: restart content stream management on
+				 * stream ready timeout
+				 */
+				hdcp->auth.count.stream_management_retry_count++;
+				callback_in_ms(0, output);
+				set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+			} else {
+				callback_in_ms(10, output);
+				increment_stay_counter(hdcp);
+			}
+			break;
+		} else if (input->stream_ready_read != PASS ||
+				input->stream_ready_validation != PASS) {
+			/*
+			 * 1B-10-1: restart content stream management
+			 * on invalid M'
+			 */
+			if (hdcp->auth.count.stream_management_retry_count > 10) {
+				fail_and_restart_in_ms(0, &status, output);
+			} else {
+				hdcp->auth.count.stream_management_retry_count++;
+				callback_in_ms(0, output);
+				set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+			}
+			break;
+		}
+		callback_in_ms(200, output);
+		set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION);
+		break;
+	default:
+		status = MOD_HDCP_STATUS_INVALID_STATE;
+		fail_and_restart_in_ms(0, &status, output);
+		break;
+	}
+
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
+		struct mod_hdcp_event_context *event_ctx,
+		struct mod_hdcp_transition_input_hdcp2 *input,
+		struct mod_hdcp_output *output)
+{
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+	struct mod_hdcp_connection *conn = &hdcp->connection;
+	struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+	switch (current_state(hdcp)) {
+	case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+		if (input->rx_caps_read_dp != PASS ||
+				input->hdcp2_capable_check != PASS) {
+			adjust->hdcp2.disable = 1;
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, HDCP_INITIALIZED);
+		} else {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, D2_A1_SEND_AKE_INIT);
+		}
+		break;
+	case D2_A1_SEND_AKE_INIT:
+		if (input->add_topology != PASS ||
+				input->create_session != PASS ||
+				input->ake_init_prepare != PASS) {
+			/* out of sync with psp state */
+			adjust->hdcp2.disable = 1;
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (input->ake_init_write != PASS) {
+			/* possibly display not ready */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(100, output);
+		set_state_id(hdcp, output, D2_A1_VALIDATE_AKE_CERT);
+		break;
+	case D2_A1_VALIDATE_AKE_CERT:
+		if (input->ake_cert_read != PASS ||
+				input->ake_cert_validation != PASS) {
+			/*
+			 * 1A-08: consider invalid ake cert a failure
+			 * 1A-09: consider receiver id listed in SRM a failure
+			 */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		if (conn->is_km_stored &&
+				!adjust->hdcp2.force_no_stored_km) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, D2_A1_SEND_STORED_KM);
+		} else {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, D2_A1_SEND_NO_STORED_KM);
+		}
+		break;
+	case D2_A1_SEND_NO_STORED_KM:
+		if (input->no_stored_km_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		if (adjust->hdcp2.increase_h_prime_timeout)
+			set_watchdog_in_ms(hdcp, 2000, output);
+		else
+			set_watchdog_in_ms(hdcp, 1000, output);
+		set_state_id(hdcp, output, D2_A1_READ_H_PRIME);
+		break;
+	case D2_A1_READ_H_PRIME:
+		if (input->h_prime_available != PASS) {
+			if (event_ctx->event ==
+					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+				/* 1A-10-3: consider h' timeout a failure */
+				fail_and_restart_in_ms(1000, &status, output);
+			else
+				increment_stay_counter(hdcp);
+			break;
+		} else if (input->h_prime_read != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		set_watchdog_in_ms(hdcp, 200, output);
+		set_state_id(hdcp, output, D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME);
+		break;
+	case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+		if (input->pairing_available != PASS) {
+			if (event_ctx->event ==
+					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+				/*
+				 * 1A-11: consider pairing info timeout
+				 * a failure
+				 */
+				fail_and_restart_in_ms(0, &status, output);
+			else
+				increment_stay_counter(hdcp);
+			break;
+		} else if (input->pairing_info_read != PASS ||
+				input->h_prime_validation != PASS) {
+			/* 1A-10-1: consider invalid h' a failure */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
+		break;
+	case D2_A1_SEND_STORED_KM:
+		if (input->stored_km_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		set_watchdog_in_ms(hdcp, 200, output);
+		set_state_id(hdcp, output, D2_A1_VALIDATE_H_PRIME);
+		break;
+	case D2_A1_VALIDATE_H_PRIME:
+		if (input->h_prime_available != PASS) {
+			if (event_ctx->event ==
+					MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+				/* 1A-10-2: consider h' timeout a failure */
+				fail_and_restart_in_ms(1000, &status, output);
+			else
+				increment_stay_counter(hdcp);
+			break;
+		} else if (input->h_prime_read != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (input->h_prime_validation != PASS) {
+			/* 1A-10-1: consider invalid h' a failure */
+			adjust->hdcp2.force_no_stored_km = 1;
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
+		break;
+	case D2_A2_LOCALITY_CHECK:
+		if (hdcp->state.stay_count > 10 ||
+				input->lc_init_prepare != PASS ||
+				input->lc_init_write != PASS ||
+				input->l_prime_read != PASS) {
+			/* 1A-12: consider invalid l' a failure */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (input->l_prime_validation != PASS) {
+			callback_in_ms(0, output);
+			increment_stay_counter(hdcp);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
+		break;
+	case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+		if (input->eks_prepare != PASS ||
+				input->eks_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		if (conn->is_repeater) {
+			set_watchdog_in_ms(hdcp, 3000, output);
+			set_state_id(hdcp, output, D2_A6_WAIT_FOR_RX_ID_LIST);
+		} else {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, D2_SEND_CONTENT_STREAM_TYPE);
+		}
+		break;
+	case D2_SEND_CONTENT_STREAM_TYPE:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS ||
+				input->link_integrity_check_dp != PASS ||
+				input->content_stream_type_write != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(210, output);
+		set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION);
+		break;
+	case D2_ENABLE_ENCRYPTION:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS ||
+				input->link_integrity_check_dp != PASS) {
+			/*
+			 * 1A-07: restart hdcp on REAUTH_REQ
+			 * 1B-08: restart hdcp on REAUTH_REQ
+			 */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+			break;
+		} else if (input->enable_encryption != PASS ||
+				(is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		set_state_id(hdcp, output, D2_A5_AUTHENTICATED);
+		HDCP_FULL_DDC_TRACE(hdcp);
+		break;
+	case D2_A5_AUTHENTICATED:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (input->link_integrity_check_dp != PASS) {
+			if (hdcp->connection.hdcp2_retry_count >= 1)
+				adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+			break;
+		}
+		increment_stay_counter(hdcp);
+		break;
+	case D2_A6_WAIT_FOR_RX_ID_LIST:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS ||
+				input->link_integrity_check_dp != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (!event_ctx->rx_id_list_ready) {
+			if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+				/* 1B-02: consider rx id list timeout a failure */
+				fail_and_restart_in_ms(0, &status, output);
+			else
+				increment_stay_counter(hdcp);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+		break;
+	case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS ||
+				input->link_integrity_check_dp != PASS ||
+				input->rx_id_list_read != PASS ||
+				input->device_count_check != PASS ||
+				input->rx_id_list_validation != PASS ||
+				input->repeater_auth_ack_write != PASS) {
+			/*
+			 * 1B-03: consider invalid v' a failure
+			 * 1B-04: consider MAX_DEVS_EXCEEDED a failure
+			 * 1B-05: consider MAX_CASCADE_EXCEEDED a failure
+			 * 1B-06: consider invalid seq_num_V a failure
+			 * 1B-09: consider seq_num_V rollover a failure
+			 */
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		}
+		callback_in_ms(0, output);
+		set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
+		break;
+	case D2_A9_SEND_STREAM_MANAGEMENT:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS ||
+				input->link_integrity_check_dp != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (event_ctx->rx_id_list_ready) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+			break;
+		} else if (input->prepare_stream_manage != PASS ||
+				input->stream_manage_write != PASS) {
+			if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK)
+				fail_and_restart_in_ms(0, &status, output);
+			else
+				increment_stay_counter(hdcp);
+			break;
+		}
+		callback_in_ms(100, output);
+		set_state_id(hdcp, output, D2_A9_VALIDATE_STREAM_READY);
+		break;
+	case D2_A9_VALIDATE_STREAM_READY:
+		if (input->rxstatus_read != PASS ||
+				input->reauth_request_check != PASS ||
+				input->link_integrity_check_dp != PASS) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
+		} else if (event_ctx->rx_id_list_ready) {
+			callback_in_ms(0, output);
+			set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+			break;
+		} else if (input->stream_ready_read != PASS ||
+				input->stream_ready_validation != PASS) {
+			/*
+			 * 1B-10-1: restart content stream management
+			 * on invalid M'
+			 * 1B-10-2: consider stream ready timeout a failure
+			 */
+			if (hdcp->auth.count.stream_management_retry_count > 10) {
+				fail_and_restart_in_ms(0, &status, output);
+			} else if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) {
+				hdcp->auth.count.stream_management_retry_count++;
+				callback_in_ms(0, output);
+				set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
+			} else {
+				increment_stay_counter(hdcp);
+			}
+			break;
+		}
+		callback_in_ms(200, output);
+		set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION);
+		break;
+	default:
+		status = MOD_HDCP_STATUS_INVALID_STATE;
+		fail_and_restart_in_ms(0, &status, output);
+		break;
+	}
+	return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index e7baae0..ff9d548 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -51,6 +51,26 @@ enum mod_hdcp_ddc_message_id {
 	MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
 	MOD_HDCP_MESSAGE_ID_READ_BINFO,
 
+	/* HDCP 2.2 */
+
+	MOD_HDCP_MESSAGE_ID_HDCP2VERSION,
+	MOD_HDCP_MESSAGE_ID_RX_CAPS,
+	MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+	MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+	MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+	MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+	MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+	MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+	MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+	MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+	MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+	MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+	MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+	MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+	MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+
 	MOD_HDCP_MESSAGE_ID_MAX
 };
 
@@ -70,6 +90,22 @@ static const uint8_t hdcp_i2c_offsets[] = {
 	[MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
 	[MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
 	[MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+	[MOD_HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
+	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
+	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
+	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
+	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
+	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
+	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
+	[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
+	[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
+	[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
+	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+	[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
+	[MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0
 };
 
 static const uint32_t hdcp_dpcd_addrs[] = {
@@ -88,6 +124,22 @@ static const uint32_t hdcp_dpcd_addrs[] = {
 	[MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
 	[MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
 	[MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+	[MOD_HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
+	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
+	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
+	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
+	[MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
+	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
+	[MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
+	[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
+	[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
+	[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
+	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
+	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
+	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
+	[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
+	[MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
 };
 
 static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
@@ -303,3 +355,277 @@ enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)
 			hdcp->auth.msg.hdcp1.an,
 			sizeof(hdcp->auth.msg.hdcp1.an));
 }
+
+enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = MOD_HDCP_STATUS_INVALID_OPERATION;
+	else
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_HDCP2VERSION,
+				&hdcp->auth.msg.hdcp2.hdcp2version_hdmi,
+				sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi));
+
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (!is_dp_hdcp(hdcp))
+		status = MOD_HDCP_STATUS_INVALID_OPERATION;
+	else
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_RX_CAPS,
+				hdcp->auth.msg.hdcp2.rxcaps_dp,
+				sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp));
+
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp)) {
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+				&hdcp->auth.msg.hdcp2.rxstatus_dp,
+				1);
+	} else {
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+					(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus,
+					sizeof(hdcp->auth.msg.hdcp2.rxstatus));
+	}
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp)) {
+		hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+				hdcp->auth.msg.hdcp2.ake_cert+1,
+				sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
+
+	} else {
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+					hdcp->auth.msg.hdcp2.ake_cert,
+					sizeof(hdcp->auth.msg.hdcp2.ake_cert));
+	}
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp)) {
+		hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+				hdcp->auth.msg.hdcp2.ake_h_prime+1,
+				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
+
+	} else {
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+				hdcp->auth.msg.hdcp2.ake_h_prime,
+				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
+	}
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp)) {
+		hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+				hdcp->auth.msg.hdcp2.ake_pairing_info+1,
+				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
+
+	} else {
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+				hdcp->auth.msg.hdcp2.ake_pairing_info,
+				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
+	}
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp)) {
+		hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+				hdcp->auth.msg.hdcp2.lc_l_prime+1,
+				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
+
+	} else {
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+				hdcp->auth.msg.hdcp2.lc_l_prime,
+				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
+	}
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp)) {
+		hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+				hdcp->auth.msg.hdcp2.rx_id_list+1,
+				sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
+
+	} else {
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+				hdcp->auth.msg.hdcp2.rx_id_list,
+				hdcp->auth.msg.hdcp2.rx_id_list_size);
+	}
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp)) {
+		hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
+				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
+
+	} else {
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
+				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
+	}
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+				hdcp->auth.msg.hdcp2.ake_init+1,
+				sizeof(hdcp->auth.msg.hdcp2.ake_init)-1);
+	else
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+					hdcp->auth.msg.hdcp2.ake_init,
+					sizeof(hdcp->auth.msg.hdcp2.ake_init));
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+				hdcp->auth.msg.hdcp2.ake_no_stored_km+1,
+				sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)-1);
+	else
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+			hdcp->auth.msg.hdcp2.ake_no_stored_km,
+			sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+				hdcp->auth.msg.hdcp2.ake_stored_km+1,
+				sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)-1);
+	else
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+				hdcp->auth.msg.hdcp2.ake_stored_km,
+				sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+				hdcp->auth.msg.hdcp2.lc_init+1,
+				sizeof(hdcp->auth.msg.hdcp2.lc_init)-1);
+	else
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+				hdcp->auth.msg.hdcp2.lc_init,
+				sizeof(hdcp->auth.msg.hdcp2.lc_init));
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = write(hdcp,
+				MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+				hdcp->auth.msg.hdcp2.ske_eks+1,
+				sizeof(hdcp->auth.msg.hdcp2.ske_eks)-1);
+	else
+		status = write(hdcp,
+			MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+			hdcp->auth.msg.hdcp2.ske_eks,
+			sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+				hdcp->auth.msg.hdcp2.repeater_auth_ack+1,
+				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)-1);
+	else
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+				hdcp->auth.msg.hdcp2.repeater_auth_ack,
+				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = write(hdcp,
+				MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+				hdcp->auth.msg.hdcp2.repeater_auth_stream_manage+1,
+				hdcp->auth.msg.hdcp2.stream_manage_size-1);
+	else
+		status = write(hdcp,
+				MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+				hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+				hdcp->auth.msg.hdcp2.stream_manage_size);
+	return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp)
+{
+	enum mod_hdcp_status status;
+
+	if (is_dp_hdcp(hdcp))
+		status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+				hdcp->auth.msg.hdcp2.content_stream_type_dp+1,
+				sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)-1);
+	else
+		status = MOD_HDCP_STATUS_INVALID_OPERATION;
+	return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 3982ced..724ebce 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -116,6 +116,58 @@ char *mod_hdcp_status_to_str(int32_t status)
 		return "MOD_HDCP_STATUS_DDC_FAILURE";
 	case MOD_HDCP_STATUS_INVALID_OPERATION:
 		return "MOD_HDCP_STATUS_INVALID_OPERATION";
+	case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE:
+		return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE";
+	case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING:
+		return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING";
+	case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING:
+		return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING";
+	case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING:
+		return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING";
+	case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED:
+		return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED";
+	case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING:
+		return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING";
+	case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED:
+		return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
+	case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
+		return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
+	case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION:
+		return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION";
+	case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
+		return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
+	case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST:
+		return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST";
+	case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE";
+	case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE:
+		return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";
 	default:
 		return "MOD_HDCP_STATUS_UNKNOWN";
 	}
@@ -156,6 +208,72 @@ char *mod_hdcp_state_id_to_str(int32_t id)
 		return "D1_A6_WAIT_FOR_READY";
 	case D1_A7_READ_KSV_LIST:
 		return "D1_A7_READ_KSV_LIST";
+	case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+		return "H2_A0_KNOWN_HDCP2_CAPABLE_RX";
+	case H2_A1_SEND_AKE_INIT:
+		return "H2_A1_SEND_AKE_INIT";
+	case H2_A1_VALIDATE_AKE_CERT:
+		return "H2_A1_VALIDATE_AKE_CERT";
+	case H2_A1_SEND_NO_STORED_KM:
+		return "H2_A1_SEND_NO_STORED_KM";
+	case H2_A1_READ_H_PRIME:
+		return "H2_A1_READ_H_PRIME";
+	case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+		return "H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME";
+	case H2_A1_SEND_STORED_KM:
+		return "H2_A1_SEND_STORED_KM";
+	case H2_A1_VALIDATE_H_PRIME:
+		return "H2_A1_VALIDATE_H_PRIME";
+	case H2_A2_LOCALITY_CHECK:
+		return "H2_A2_LOCALITY_CHECK";
+	case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+		return "H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER";
+	case H2_ENABLE_ENCRYPTION:
+		return "H2_ENABLE_ENCRYPTION";
+	case H2_A5_AUTHENTICATED:
+		return "H2_A5_AUTHENTICATED";
+	case H2_A6_WAIT_FOR_RX_ID_LIST:
+		return "H2_A6_WAIT_FOR_RX_ID_LIST";
+	case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+		return "H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK";
+	case H2_A9_SEND_STREAM_MANAGEMENT:
+		return "H2_A9_SEND_STREAM_MANAGEMENT";
+	case H2_A9_VALIDATE_STREAM_READY:
+		return "H2_A9_VALIDATE_STREAM_READY";
+	case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+		return "D2_A0_DETERMINE_RX_HDCP_CAPABLE";
+	case D2_A1_SEND_AKE_INIT:
+		return "D2_A1_SEND_AKE_INIT";
+	case D2_A1_VALIDATE_AKE_CERT:
+		return "D2_A1_VALIDATE_AKE_CERT";
+	case D2_A1_SEND_NO_STORED_KM:
+		return "D2_A1_SEND_NO_STORED_KM";
+	case D2_A1_READ_H_PRIME:
+		return "D2_A1_READ_H_PRIME";
+	case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+		return "D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME";
+	case D2_A1_SEND_STORED_KM:
+		return "D2_A1_SEND_STORED_KM";
+	case D2_A1_VALIDATE_H_PRIME:
+		return "D2_A1_VALIDATE_H_PRIME";
+	case D2_A2_LOCALITY_CHECK:
+		return "D2_A2_LOCALITY_CHECK";
+	case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+		return "D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER";
+	case D2_SEND_CONTENT_STREAM_TYPE:
+		return "D2_SEND_CONTENT_STREAM_TYPE";
+	case D2_ENABLE_ENCRYPTION:
+		return "D2_ENABLE_ENCRYPTION";
+	case D2_A5_AUTHENTICATED:
+		return "D2_A5_AUTHENTICATED";
+	case D2_A6_WAIT_FOR_RX_ID_LIST:
+		return "D2_A6_WAIT_FOR_RX_ID_LIST";
+	case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+		return "D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK";
+	case D2_A9_SEND_STREAM_MANAGEMENT:
+		return "D2_A9_SEND_STREAM_MANAGEMENT";
+	case D2_A9_VALIDATE_STREAM_READY:
+		return "D2_A9_VALIDATE_STREAM_READY";
 	default:
 		return "UNKNOWN_STATE_ID";
 	};
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index 2fd0e0a..ff91373 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -27,7 +27,7 @@
 #define MOD_HDCP_LOG_H_
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
-#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__)
+#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
 #define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
@@ -37,7 +37,7 @@
 /* default logs */
 #define HDCP_ERROR_TRACE(hdcp, status) \
 		HDCP_LOG_ERR(hdcp, \
-			"[Link %d] ERROR %s IN STATE %s", \
+			"[Link %d] WARNING %s IN STATE %s", \
 			hdcp->config.index, \
 			mod_hdcp_status_to_str(status), \
 			mod_hdcp_state_id_to_str(hdcp->state.id))
@@ -45,6 +45,10 @@
 		HDCP_LOG_VER(hdcp, \
 			"[Link %d] HDCP 1.4 enabled on display %d", \
 			hdcp->config.index, displayIndex)
+#define HDCP_HDCP2_ENABLED_TRACE(hdcp, displayIndex) \
+		HDCP_LOG_VER(hdcp, \
+			"[Link %d] HDCP 2.2 enabled on display %d", \
+			hdcp->config.index, displayIndex)
 /* state machine logs */
 #define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
 		HDCP_LOG_FSM(hdcp, \
@@ -93,26 +97,73 @@
 				hdcp->buf); \
 } while (0)
 #define HDCP_FULL_DDC_TRACE(hdcp) do { \
-	HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
-			sizeof(hdcp->auth.msg.hdcp1.bksv)); \
-	HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
-			sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
-	HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
-			sizeof(hdcp->auth.msg.hdcp1.an)); \
-	HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
-			sizeof(hdcp->auth.msg.hdcp1.aksv)); \
-	HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
-			sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
-	HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
-			(uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
-			sizeof(hdcp->auth.msg.hdcp1.r0p)); \
-	HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
-			(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
-			sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
-	HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
-			hdcp->auth.msg.hdcp1.ksvlist_size); \
-	HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
-			sizeof(hdcp->auth.msg.hdcp1.vp)); \
+	if (is_hdcp1(hdcp)) { \
+		HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
+				sizeof(hdcp->auth.msg.hdcp1.bksv)); \
+		HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
+				sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
+				sizeof(hdcp->auth.msg.hdcp1.an)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
+				sizeof(hdcp->auth.msg.hdcp1.aksv)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
+				sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
+		HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
+				(uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
+				sizeof(hdcp->auth.msg.hdcp1.r0p)); \
+		HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
+				(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
+				sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
+		HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
+				hdcp->auth.msg.hdcp1.ksvlist_size); \
+		HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
+				sizeof(hdcp->auth.msg.hdcp1.vp)); \
+	} else { \
+		HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \
+				&hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \
+				sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \
+		HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \
+				sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \
+				sizeof(hdcp->auth.msg.hdcp2.ake_init)); \
+		HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \
+				sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \
+				hdcp->auth.msg.hdcp2.ake_stored_km, \
+				sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \
+				hdcp->auth.msg.hdcp2.ake_no_stored_km, \
+				sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \
+		HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \
+				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \
+		HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \
+				hdcp->auth.msg.hdcp2.ake_pairing_info, \
+				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \
+				sizeof(hdcp->auth.msg.hdcp2.lc_init)); \
+		HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \
+				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \
+				sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \
+		HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \
+				(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \
+				sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \
+		HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \
+				hdcp->auth.msg.hdcp2.rx_id_list, \
+				hdcp->auth.msg.hdcp2.rx_id_list_size); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \
+				hdcp->auth.msg.hdcp2.repeater_auth_ack, \
+				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \
+				hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \
+				hdcp->auth.msg.hdcp2.stream_manage_size); \
+		HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \
+				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \
+				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \
+		HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \
+				hdcp->auth.msg.hdcp2.content_stream_type_dp, \
+				sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \
+	} \
 } while (0)
 #define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
 		HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
@@ -123,6 +174,9 @@
 #define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \
 		HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \
 				hdcp->config.index)
+#define HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp) \
+		HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp2 session", \
+				hdcp->config.index)
 #define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \
 		HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index)
 #define HDCP_TOP_RESET_CONN_TRACE(hdcp) \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 646d909..7911dc1 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -31,6 +31,19 @@
 #include "amdgpu.h"
 #include "hdcp_psp.h"
 
+static void hdcp2_message_init(struct mod_hdcp *hdcp,
+			       struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *in)
+{
+	in->session_handle = hdcp->auth.id;
+	in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+	in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+	in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+	in->process.msg1_desc.msg_size = 0;
+	in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+	in->process.msg2_desc.msg_size = 0;
+	in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+	in->process.msg3_desc.msg_size = 0;
+}
 enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
 {
 
@@ -42,7 +55,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
 	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
 
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
-		if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) {
+		if (is_display_added(&(hdcp->connection.displays[i]))) {
 
 			memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
 
@@ -96,7 +109,7 @@ enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
 			dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
 			dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
 			dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
-				TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x;
+				TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
 			dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
 
 			psp_dtm_invoke(psp, dtm_cmd->cmd_id);
@@ -132,10 +145,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
 
 	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
+	hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
+
 	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
 		return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
 
-	hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
 	hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
 	memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
 		sizeof(hdcp->auth.msg.hdcp1.aksv));
@@ -326,3 +340,493 @@ enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *
 	return MOD_HDCP_STATUS_SUCCESS;
 }
 
+enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+	if (!psp->hdcp_context.hdcp_initialized) {
+		DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
+		return MOD_HDCP_STATUS_FAILURE;
+	}
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	if (!display)
+		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
+	hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
+
+	if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+		hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+			TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0;
+	else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1)
+		hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+			TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1;
+	else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_MAX)
+		hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+			TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED;
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
+
+	hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id;
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
+
+	HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_INIT;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
+
+	memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
+	       sizeof(hdcp->auth.msg.hdcp2.ake_init));
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT;
+	msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT;
+
+	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_cert,
+	       sizeof(hdcp->auth.msg.hdcp2.ake_cert));
+
+	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM;
+	msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM;
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+
+	memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0],
+	       sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+
+	memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
+	       &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
+	       sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+
+	if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+		hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+		hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+		return MOD_HDCP_STATUS_SUCCESS;
+	}
+
+	return MOD_HDCP_STATUS_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME;
+	msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME;
+
+	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_h_prime,
+	       sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
+
+	if (!hdcp->connection.is_km_stored) {
+		msg_in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO;
+		msg_in->process.msg2_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO;
+		memcpy(&msg_in->process.receiver_message[sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)],
+		       hdcp->auth.msg.hdcp2.ake_pairing_info, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
+	}
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+
+	if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+	else if (!hdcp->connection.is_km_stored &&
+		 msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
+
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__LC_INIT;
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
+
+	memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
+	       sizeof(hdcp->auth.msg.hdcp2.lc_init));
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME;
+	msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME;
+
+	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.lc_l_prime,
+	       sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+
+	if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS;
+
+	if (is_dp_hdcp(hdcp))
+		msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP;
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
+
+	memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0],
+	       sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+	msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks);
+
+	if (is_dp_hdcp(hdcp)) {
+		memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
+		       &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
+		       sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+	}
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	if (!display)
+		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
+	hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
+
+	if (!is_dp_mst_hdcp(hdcp)) {
+		display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+		HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
+	}
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST;
+	msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
+	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.rx_id_list,
+	       sizeof(hdcp->auth.msg.hdcp2.rx_id_list));
+
+	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK;
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+
+	memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0],
+	       sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+
+	if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+		hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+		hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+		return MOD_HDCP_STATUS_SUCCESS;
+	}
+
+
+	return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	uint8_t i;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+
+	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+		    hdcp->connection.displays[i].adjust.disable)
+			continue;
+		hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+		hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
+
+		hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
+		psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+		if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+			break;
+
+		hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+		HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+	}
+
+	return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
+								  : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
+{
+
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE;
+
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
+
+	hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
+
+	memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0],
+	       sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+	msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+	hdcp2_message_init(hdcp, msg_in);
+
+	msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY;
+
+	msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready);
+
+	memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
+	       sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
+			       (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+		       ? MOD_HDCP_STATUS_SUCCESS
+		       : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
+							       enum mod_hdcp_encryption_status *encryption_status)
+{
+	struct psp_context *psp = hdcp->config.psp.handle;
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id;
+	hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
+	*encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_FAILURE;
+
+	if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) {
+		if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1)
+			*encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
+		else
+			*encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
+	}
+
+	return MOD_HDCP_STATUS_SUCCESS;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 986fc07..82a5e99 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -36,6 +36,11 @@ enum bgd_security_hdcp_encryption_level {
 	HDCP_ENCRYPTION_LEVEL__ON
 };
 
+enum bgd_security_hdcp2_content_type {
+	HDCP2_CONTENT_TYPE__INVALID = 0,
+	HDCP2_CONTENT_TYPE__TYPE0,
+	HDCP2_CONTENT_TYPE__TYPE1
+};
 enum ta_dtm_command {
 	TA_DTM_COMMAND__UNUSED_1 = 1,
 	TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2,
@@ -121,8 +126,64 @@ enum ta_hdcp_command {
 	TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION,
 	TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION,
 	TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS,
+	TA_HDCP_COMMAND__UNUSED_1,
+	TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION,
+	TA_HDCP_COMMAND__UNUSED_2,
+	TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION,
+	TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS,
+	TA_HDCP_COMMAND__UNUSED_3,
+	TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2,
+	TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2,
+	TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION
 };
 
+enum ta_hdcp2_msg_id {
+	TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE = 1,
+	TA_HDCP_HDCP2_MSG_ID__AKE_INIT = 2,
+	TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT = 3,
+	TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM = 4,
+	TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM = 5,
+	TA_HDCP_HDCP2_MSG_ID__AKE_SEND_RRX = 6,
+	TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME = 7,
+	TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO = 8,
+	TA_HDCP_HDCP2_MSG_ID__LC_INIT = 9,
+	TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME = 10,
+	TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS = 11,
+	TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST = 12,
+	TA_HDCP_HDCP2_MSG_ID__RTT_READY = 13,
+	TA_HDCP_HDCP2_MSG_ID__RTT_CHALLENGE = 14,
+	TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK = 15,
+	TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE = 16,
+	TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY = 17,
+	TA_HDCP_HDCP2_MSG_ID__RECEIVER_AUTH_STATUS = 18,
+	TA_HDCP_HDCP2_MSG_ID__AKE_TRANSMITTER_INFO = 19,
+	TA_HDCP_HDCP2_MSG_ID__AKE_RECEIVER_INFO = 20,
+	TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP = 129
+};
+
+enum ta_hdcp2_hdcp2_msg_id_max_size {
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__NULL_MESSAGE = 0,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_INIT = 12,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT = 534,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM = 129,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM = 33,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_RRX = 9,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME = 33,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO = 17,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_INIT = 9,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME = 33,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SKE_SEND_EKS = 25,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RECEIVERID_LIST = 181,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_READY = 1,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_CHALLENGE = 17,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RACK = 17,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_MANAGE = 13,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_READY = 33,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RECEIVER_AUTH_STATUS = 4,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_TRANSMITTER_INFO = 6,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO = 6,
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SIGNAL_CONTENT_STREAM_TYPE_DP = 1
+};
 
 /* HDCP related enumerations */
 /**********************************************************/
@@ -131,6 +192,12 @@ enum ta_hdcp_command {
 #define TA_HDCP__HDCP1_KSV_SIZE 5
 #define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
 #define TA_HDCP__HDCP1_V_PRIME_SIZE 20
+#define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE                                                                                 \
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6
+
+// 64 bits boundaries
+#define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE                                                                                 \
+	TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4
 
 enum ta_hdcp_status {
 	TA_HDCP_STATUS__SUCCESS = 0x00,
@@ -165,9 +232,47 @@ enum ta_hdcp_authentication_status {
 	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02,
 	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03,
 	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04,
+	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06,
+	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07,
+	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,
 	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
 };
 
+enum ta_hdcp2_msg_authentication_status {
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS = 0,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__KM_NOT_AVAILABLE,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNUSED,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID = 100, // everything above does not fail the request
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_ENOUGH_MEMORY,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_EXPECTED_MSG,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__SIGNATURE_CERTIFICAT_ERROR,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INCORRECT_HDCP_VERSION,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNKNOWN_MESSAGE,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_HMAC,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_TOPOLOGY,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST
+};
+
+enum ta_hdcp_content_type {
+	TA_HDCP2_CONTENT_TYPE__TYPE0 = 1,
+	TA_HDCP2_CONTENT_TYPE__TYPE1,
+};
+
+enum ta_hdcp_content_type_negotiation_type {
+	TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0 = 1,
+	TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1,
+	TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED
+};
+
+enum ta_hdcp2_version {
+	TA_HDCP2_VERSION_UNKNOWN = 0,
+	TA_HDCP2_VERSION_2_0 = 20,
+	TA_HDCP2_VERSION_2_1 = 21,
+	TA_HDCP2_VERSION_2_2 = 22
+};
 
 /* input/output structures for HDCP commands */
 /**********************************************************/
@@ -232,6 +337,84 @@ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output {
 	uint32_t protection_level;
 };
 
+struct ta_hdcp_cmd_hdcp2_create_session_input_v2 {
+	uint32_t display_handle;
+	enum ta_hdcp_content_type_negotiation_type negotiate_content_type;
+};
+
+struct ta_hdcp_cmd_hdcp2_create_session_output_v2 {
+	uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_destroy_session_input {
+	uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_authentication_message_v2 {
+	enum ta_hdcp2_msg_id msg_id;
+	uint32_t msg_size;
+};
+
+struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 {
+	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc;
+	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc;
+	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg3_desc;
+	uint8_t receiver_message[TA_HDCP__HDCP2_RX_BUF_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 {
+	uint32_t hdcp_version;
+	uint32_t is_km_stored;
+	uint32_t is_locality_precompute_support;
+	uint32_t is_repeater;
+	enum ta_hdcp2_msg_authentication_status msg1_status;
+	enum ta_hdcp2_msg_authentication_status msg2_status;
+	enum ta_hdcp2_msg_authentication_status msg3_status;
+};
+
+struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 {
+	enum ta_hdcp2_msg_id msg1_id;
+	enum ta_hdcp2_msg_id msg2_id;
+};
+
+struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 {
+	enum ta_hdcp2_msg_authentication_status msg1_status;
+	enum ta_hdcp2_msg_authentication_status msg2_status;
+	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc;
+	struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc;
+	uint8_t transmitter_message[TA_HDCP__HDCP2_TX_BUF_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 {
+	uint32_t session_handle;
+	struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 process;
+	struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 prepare;
+};
+
+struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 {
+	uint32_t authentication_status;
+	struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 process;
+	struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 prepare;
+};
+
+struct ta_hdcp_cmd_hdcp2_set_encryption_input {
+	uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_get_encryption_status_input {
+	uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_get_encryption_status_output {
+	enum ta_hdcp_content_type hdcp2_type;
+	uint32_t protection_level;
+};
+
+struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input {
+	uint32_t session_handle;
+	uint32_t display_handle;
+};
+
 /**********************************************************/
 /* Common input structure for HDCP callbacks */
 union ta_hdcp_cmd_input {
@@ -242,6 +425,13 @@ union ta_hdcp_cmd_input {
 	struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption;
 	struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption;
 	struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status;
+	struct ta_hdcp_cmd_hdcp2_destroy_session_input hdcp2_destroy_session;
+	struct ta_hdcp_cmd_hdcp2_set_encryption_input hdcp2_set_encryption;
+	struct ta_hdcp_cmd_hdcp2_get_encryption_status_input hdcp2_get_encryption_status;
+	struct ta_hdcp_cmd_hdcp2_create_session_input_v2 hdcp2_create_session_v2;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2
+		hdcp2_prepare_process_authentication_message_v2;
+	struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;
 };
 
 /* Common output structure for HDCP callbacks */
@@ -250,6 +440,10 @@ union ta_hdcp_cmd_output {
 	struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication;
 	struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication;
 	struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status;
+	struct ta_hdcp_cmd_hdcp2_get_encryption_status_output hdcp2_get_encryption_status;
+	struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2;
+	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2
+		hdcp2_prepare_process_authentication_message_v2;
 };
 /**********************************************************/
 
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dc18784..dbe7835 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
 	uint32_t inserted_duration_in_us;
 	uint32_t frames_to_insert;
 	uint32_t frame_counter;
+	uint32_t margin_in_us;
 };
 
 struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index dea2170..f2a0e1a 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -77,6 +77,7 @@ enum mod_hdcp_status {
 	MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
 	MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
 	MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
+	MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED,
 	MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
 	MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
 	MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
@@ -86,6 +87,7 @@ enum mod_hdcp_status {
 	MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
 	MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
 	MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
+	MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
 	MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
 	MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
 	MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
@@ -156,12 +158,18 @@ struct mod_hdcp_link_adjustment_hdcp1 {
 	uint8_t reserved		: 6;
 };
 
+enum mod_hdcp_force_hdcp_type {
+	MOD_HDCP_FORCE_TYPE_MAX = 0,
+	MOD_HDCP_FORCE_TYPE_0,
+	MOD_HDCP_FORCE_TYPE_1
+};
+
 struct mod_hdcp_link_adjustment_hdcp2 {
 	uint8_t disable			: 1;
-	uint8_t disable_type1		: 1;
+	uint8_t force_type		: 2;
 	uint8_t force_no_stored_km	: 1;
 	uint8_t increase_h_prime_timeout: 1;
-	uint8_t reserved		: 4;
+	uint8_t reserved		: 3;
 };
 
 struct mod_hdcp_link_adjustment {
@@ -184,7 +192,8 @@ enum mod_hdcp_encryption_status {
 	MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0,
 	MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON,
 	MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON,
-	MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON
+	MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON,
+	MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON
 };
 
 /* per link events dm has to notify to hdcp module */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index ca8ce3c..42cbeff 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -26,6 +26,7 @@
 #ifndef MOD_INFO_PACKET_H_
 #define MOD_INFO_PACKET_H_
 
+#include "dm_services.h"
 #include "mod_shared.h"
 //Forward Declarations
 struct dc_stream_state;
@@ -33,7 +34,8 @@ struct dc_info_packet;
 struct mod_vrr_params;
 
 void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
-		struct dc_info_packet *info_packet);
+		struct dc_info_packet *info_packet,
+		bool *use_vsc_sdp_for_colorimetry);
 
 void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
 		struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index b45f7d6..fe21179 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -45,7 +45,6 @@ enum vrr_packet_type {
 	PACKET_TYPE_VTEM
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 union lut3d_control_flags {
 	unsigned int raw;
 	struct {
@@ -104,6 +103,5 @@ struct lut3d_settings {
 	enum lut3d_control_gamut_map map2;
 	enum lut3d_control_rotation_mode rotation2;
 };
-#endif
 
 #endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index db6b08f..6a8a056 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -30,6 +30,20 @@
 #include "mod_freesync.h"
 #include "dc.h"
 
+enum vsc_packet_revision {
+	vsc_packet_undefined = 0,
+	//01h = VSC SDP supports only 3D stereo.
+	vsc_packet_rev1 = 1,
+	//02h = 3D stereo + PSR.
+	vsc_packet_rev2 = 2,
+	//03h = 3D stereo + PSR2.
+	vsc_packet_rev3 = 3,
+	//04h = 3D stereo + PSR/PSR2 + Y-coordinate.
+	vsc_packet_rev4 = 4,
+	//05h = 3D stereo + PSR/PSR2 + Y-coordinate + Pixel Encoding/Colorimetry Format
+	vsc_packet_rev5 = 5,
+};
+
 #define HDMI_INFOFRAME_TYPE_VENDOR 0x81
 #define HF_VSIF_VERSION 1
 
@@ -116,35 +130,41 @@ enum ColorimetryYCCDP {
 };
 
 void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
-		struct dc_info_packet *info_packet)
+		struct dc_info_packet *info_packet,
+		bool *use_vsc_sdp_for_colorimetry)
 {
-	unsigned int vscPacketRevision = 0;
+	unsigned int vsc_packet_revision = vsc_packet_undefined;
 	unsigned int i;
 	unsigned int pixelEncoding = 0;
 	unsigned int colorimetryFormat = 0;
 	bool stereo3dSupport = false;
 
+	/* Initialize first, later if infopacket is valid determine if VSC SDP
+	 * should be used to signal colorimetry format and pixel encoding.
+	 */
+	*use_vsc_sdp_for_colorimetry = false;
+
 	if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
-		vscPacketRevision = 1;
+		vsc_packet_revision = vsc_packet_rev1;
 		stereo3dSupport = true;
 	}
 
 	/*VSC packet set to 2 when DP revision >= 1.2*/
 	if (stream->psr_version != 0)
-		vscPacketRevision = 2;
+		vsc_packet_revision = vsc_packet_rev2;
 
 	/* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
 	if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
 			stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
-		vscPacketRevision = 5;
+		vsc_packet_revision = vsc_packet_rev5;
 
 	/* VSC packet not needed based on the features
 	 * supported by this DP display
 	 */
-	if (vscPacketRevision == 0)
+	if (vsc_packet_revision == vsc_packet_undefined)
 		return;
 
-	if (vscPacketRevision == 0x2) {
+	if (vsc_packet_revision == vsc_packet_rev2) {
 		/* Secondary-data Packet ID = 0*/
 		info_packet->hb0 = 0x00;
 		/* 07h - Packet Type Value indicating Video
@@ -166,7 +186,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 		info_packet->valid = true;
 	}
 
-	if (vscPacketRevision == 0x1) {
+	if (vsc_packet_revision == vsc_packet_rev1) {
 
 		info_packet->hb0 = 0x00;	// Secondary-data Packet ID = 0
 		info_packet->hb1 = 0x07;	// 07h = Packet Type Value indicating Video Stream Configuration packet
@@ -237,7 +257,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 	 *   the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
 	 *   MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
 	 */
-	if (vscPacketRevision == 0x5) {
+	if (vsc_packet_revision == vsc_packet_rev5) {
 		/* Secondary-data Packet ID = 0 */
 		info_packet->hb0 = 0x00;
 		/* 07h - Packet Type Value indicating Video Stream Configuration packet */
@@ -249,6 +269,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 
 		info_packet->valid = true;
 
+		/* If we are using VSC SDP revision 05h, use this to signal for
+		 * colorimetry format and pixel encoding. HW should later be
+		 * programmed to set MSA MISC1 bit 6 to indicate ignore
+		 * colorimetry format and pixel encoding in the MSA.
+		 */
+		*use_vsc_sdp_for_colorimetry = true;
+
 		/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
 		 * Data Bytes DB 18~16
 		 * Bits 3:0 (Colorimetry Format)        |  Bits 7:4 (Pixel Encoding)
@@ -393,7 +420,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 		 */
 		info_packet->sb[18] = 0;
 	}
-
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 4e2f615..e75a4bb 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -662,7 +662,11 @@ bool dmcu_load_iram(struct dmcu *dmcu,
 
 	memset(&ram_table, 0, sizeof(ram_table));
 
-	if (dmcu->dmcu_version.abm_version == 0x23) {
+	if (dmcu->dmcu_version.abm_version == 0x24) {
+		fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+		result = dmcu->funcs->load_iram(
+				dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+	} else if (dmcu->dmcu_version.abm_version == 0x23) {
 		fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
 
 		result = dmcu->funcs->load_iram(
@@ -687,3 +691,4 @@ bool dmcu_load_iram(struct dmcu *dmcu,
 
 	return result;
 }
+
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index dc7eb28..d655a76 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -53,7 +53,8 @@ enum amd_ip_block_type {
 	AMD_IP_BLOCK_TYPE_VCE,
 	AMD_IP_BLOCK_TYPE_ACP,
 	AMD_IP_BLOCK_TYPE_VCN,
-	AMD_IP_BLOCK_TYPE_MES
+	AMD_IP_BLOCK_TYPE_MES,
+	AMD_IP_BLOCK_TYPE_JPEG
 };
 
 enum amd_clockgating_state {
@@ -99,6 +100,7 @@ enum amd_powergating_state {
 #define AMD_CG_SUPPORT_IH_CG			(1 << 27)
 #define AMD_CG_SUPPORT_ATHUB_LS			(1 << 28)
 #define AMD_CG_SUPPORT_ATHUB_MGCG		(1 << 29)
+#define AMD_CG_SUPPORT_JPEG_MGCG		(1 << 30)
 /* PG flags */
 #define AMD_PG_SUPPORT_GFX_PG			(1 << 0)
 #define AMD_PG_SUPPORT_GFX_SMG			(1 << 1)
@@ -117,6 +119,7 @@ enum amd_powergating_state {
 #define AMD_PG_SUPPORT_VCN			(1 << 14)
 #define AMD_PG_SUPPORT_VCN_DPG			(1 << 15)
 #define AMD_PG_SUPPORT_ATHUB			(1 << 16)
+#define AMD_PG_SUPPORT_JPEG			(1 << 17)
 
 enum PP_FEATURE_MASK {
 	PP_SCLK_DPM_MASK = 0x1,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
index cff8f91..e9b2bd8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
@@ -8134,6 +8134,10 @@
 #define mmMPC_OUT5_CSC_C33_C34_B                                                                       0x1604
 #define mmMPC_OUT5_CSC_C33_C34_B_BASE_IDX                                                              2
 
+#define mmMPC_OCSC_TEST_DEBUG_INDEX                                                                    0x163b
+#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX                                                           2
+#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX                                                            2
+#define mmMPC_OCSC_TEST_DEBUG_DATA                                                                     0x163c
 
 // addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
 // base address: 0x5964
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
index 10c83fe..dc8ce7a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
@@ -28263,7 +28263,14 @@
 #define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT                                                         0x10
 #define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK                                                           0x0000FFFFL
 #define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK                                                           0xFFFF0000L
-
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT                                           0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT                                        0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK                                             0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK                                          0x00000100L
+//MPC_OCSC_TEST_DEBUG_DATA
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT                                             0x0
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK                                               0xFFFFFFFFL
 
 // addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
 //DC_PERFMON17_PERFCOUNTER_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
index eddf83e..7cd0ee6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
@@ -7103,7 +7103,10 @@
 #define mmMPC_OUT3_CSC_C31_C32_B_BASE_IDX                                                              2
 #define mmMPC_OUT3_CSC_C33_C34_B                                                                       0x15ea
 #define mmMPC_OUT3_CSC_C33_C34_B_BASE_IDX                                                              2
-
+#define mmMPC_OCSC_TEST_DEBUG_INDEX                                                                    0x163b
+#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX                                                           2
+#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX                                                            2
+#define mmMPC_OCSC_TEST_DEBUG_DATA                                                                     0x163c
 
 // addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
 // base address: 0x5964
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
index faa0e76..2f780ae 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
@@ -56634,5 +56634,13 @@
 #define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK                 0x00FF0000L
 #define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK                  0x80000000L
 
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT                                           0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT                                        0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK                                             0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK                                          0x00000100L
+//MPC_OCSC_TEST_DEBUG_DATA
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT                                             0x0
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK                                               0xFFFFFFFFL
 
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index c2bd255..bb2c9c7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -27,6 +27,9 @@
 #define mmDF_PIE_AON0_DfGlobalClkGater									0x00fc
 #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX								0
 
+#define mmDF_CS_UMC_AON0_DfGlobalCtrl									0x00fe
+#define mmDF_CS_UMC_AON0_DfGlobalCtrl_BASE_IDX								0
+
 #define mmDF_CS_UMC_AON0_DramBaseAddress0								0x0044
 #define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX							0
 
@@ -38,6 +41,14 @@
 #define smnPerfMonCtlHi2					0x01d464UL
 #define smnPerfMonCtlLo3					0x01d470UL
 #define smnPerfMonCtlHi3					0x01d474UL
+#define smnPerfMonCtlLo4					0x01d880UL
+#define smnPerfMonCtlHi4					0x01d884UL
+#define smnPerfMonCtlLo5					0x01d888UL
+#define smnPerfMonCtlHi5					0x01d88cUL
+#define smnPerfMonCtlLo6					0x01d890UL
+#define smnPerfMonCtlHi6					0x01d894UL
+#define smnPerfMonCtlLo7					0x01d898UL
+#define smnPerfMonCtlHi7					0x01d89cUL
 
 #define smnPerfMonCtrLo0					0x01d448UL
 #define smnPerfMonCtrHi0					0x01d44cUL
@@ -47,9 +58,20 @@
 #define smnPerfMonCtrHi2					0x01d46cUL
 #define smnPerfMonCtrLo3					0x01d478UL
 #define smnPerfMonCtrHi3					0x01d47cUL
+#define smnPerfMonCtrLo4					0x01d790UL
+#define smnPerfMonCtrHi4					0x01d794UL
+#define smnPerfMonCtrLo5					0x01d798UL
+#define smnPerfMonCtrHi5					0x01d79cUL
+#define smnPerfMonCtrLo6					0x01d7a0UL
+#define smnPerfMonCtrHi6					0x01d7a4UL
+#define smnPerfMonCtrLo7					0x01d7a8UL
+#define smnPerfMonCtrHi7					0x01d7acUL
 
 #define smnDF_PIE_AON_FabricIndirectConfigAccessAddress3	0x1d05cUL
 #define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3		0x1d098UL
 #define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3		0x1d09cUL
 
+#define smnDF_CS_UMC_AON0_DramBaseAddress0 	0x1c110UL
+#define smnDF_CS_UMC_AON0_DramLimitAddress0 	0x1c114UL
+
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
index 06fac50..7afa87c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -33,6 +33,14 @@
 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT							0x0
 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK							0x0000000FL
 
+/* DF_CS_UMC_AON0_DfGlobalCtrl */
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K__SHIFT						0x14
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M__SHIFT						0x15
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G__SHIFT						0x16
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K_MASK						0x00100000L
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M_MASK						0x00200000L
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G_MASK						0x00400000L
+
 /* DF_CS_AON0_DramBaseAddress0 */
 #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT						0x0
 #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT						0x1
@@ -45,4 +53,12 @@
 #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK						0x00000E00L
 #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK						0xFFFFF000L
 
+//DF_CS_UMC_AON0_DramLimitAddress0
+#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID__SHIFT                                                  0x0
+#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO__SHIFT                                                   0xa
+#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr__SHIFT                                                0xc
+#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID_MASK                                                    0x000003FFL
+#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK                                                     0x00000400L
+#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK                                                  0xFFFFF000L
+
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
new file mode 100644
index 0000000..36ae5b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
@@ -0,0 +1,647 @@
+/*
+ * Copyright (C) 2019  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_0_0_OFFSET_HEADER
+#define _dpcs_2_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+// base address: 0x0
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL                                                                 0x2928
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX                                                        2
+#define mmDPCSTX0_DPCSTX_TX_CNTL                                                                       0x2929
+#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX                                                              2
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL                                                                     0x292a
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX                                                            2
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL                                                                0x292b
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX                                                       2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR                                                               0x292c
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX                                                      2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA                                                               0x292d
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                      2
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG                                                                  0x292e
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+// base address: 0x0
+#define mmRDPCSTX0_RDPCSTX_CNTL                                                                        0x2930
+#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX                                                               2
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL                                                                  0x2931
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL                                                           0x2932
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX                                                  2
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA                                                             0x2933
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                    2
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR                                                                    0x2934
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX                                                           2
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA                                                                    0x2935
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX                                                           2
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL                                                                  0x2936
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL                                                              0x2937
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL_BASE_IDX                                                     2
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2                                                             0x2938
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX                                                    2
+#define mmRDPCSTX0_RDPCSTX_SCRATCH                                                                     0x2939
+#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX                                                            2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG                                                    0x293c
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX                                           2
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG                                                                0x293d
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX                                                       2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0                                                                   0x2940
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1                                                                   0x2941
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2                                                                   0x2942
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3                                                                   0x2943
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4                                                                   0x2944
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5                                                                   0x2945
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6                                                                   0x2946
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7                                                                   0x2947
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8                                                                   0x2948
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9                                                                   0x2949
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10                                                                  0x294a
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX                                                         2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11                                                                  0x294b
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX                                                         2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12                                                                  0x294c
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX                                                         2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13                                                                  0x294d
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX                                                         2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14                                                                  0x294e
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX                                                         2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0                                                                   0x294f
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1                                                                   0x2950
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2                                                                   0x2951
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3                                                                   0x2952
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3_BASE_IDX                                                          2
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL                                                               0x2953
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX                                                      2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3                                                        0x2954
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX                                               2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6                                                        0x2955
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX                                               2
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG                                                           0x2956
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX                                                  2
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+// base address: 0x0
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR                                                                  0x2934
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR_BASE_IDX                                                         2
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA                                                                  0x2935
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+// base address: 0x360
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL                                                                 0x2a00
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX                                                        2
+#define mmDPCSTX1_DPCSTX_TX_CNTL                                                                       0x2a01
+#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX                                                              2
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL                                                                     0x2a02
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX                                                            2
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL                                                                0x2a03
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX                                                       2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR                                                               0x2a04
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX                                                      2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA                                                               0x2a05
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                      2
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG                                                                  0x2a06
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+// base address: 0x360
+#define mmRDPCSTX1_RDPCSTX_CNTL                                                                        0x2a08
+#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX                                                               2
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL                                                                  0x2a09
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL                                                           0x2a0a
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX                                                  2
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA                                                             0x2a0b
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                    2
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR                                                                    0x2a0c
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX                                                           2
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA                                                                    0x2a0d
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX                                                           2
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL                                                                  0x2a0e
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL                                                              0x2a0f
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL_BASE_IDX                                                     2
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2                                                             0x2a10
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX                                                    2
+#define mmRDPCSTX1_RDPCSTX_SCRATCH                                                                     0x2a11
+#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX                                                            2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG                                                    0x2a14
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX                                           2
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG                                                                0x2a15
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX                                                       2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0                                                                   0x2a18
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1                                                                   0x2a19
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2                                                                   0x2a1a
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3                                                                   0x2a1b
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4                                                                   0x2a1c
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5                                                                   0x2a1d
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6                                                                   0x2a1e
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7                                                                   0x2a1f
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8                                                                   0x2a20
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9                                                                   0x2a21
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10                                                                  0x2a22
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX                                                         2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11                                                                  0x2a23
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX                                                         2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12                                                                  0x2a24
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX                                                         2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13                                                                  0x2a25
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX                                                         2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14                                                                  0x2a26
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX                                                         2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0                                                                   0x2a27
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1                                                                   0x2a28
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2                                                                   0x2a29
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3                                                                   0x2a2a
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3_BASE_IDX                                                          2
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL                                                               0x2a2b
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX                                                      2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3                                                        0x2a2c
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX                                               2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6                                                        0x2a2d
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX                                               2
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG                                                           0x2a2e
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX                                                  2
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+// base address: 0x360
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR                                                                  0x2a0c
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR_BASE_IDX                                                         2
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA                                                                  0x2a0d
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+// base address: 0x6c0
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL                                                                 0x2ad8
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL_BASE_IDX                                                        2
+#define mmDPCSTX2_DPCSTX_TX_CNTL                                                                       0x2ad9
+#define mmDPCSTX2_DPCSTX_TX_CNTL_BASE_IDX                                                              2
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL                                                                     0x2ada
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL_BASE_IDX                                                            2
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL                                                                0x2adb
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL_BASE_IDX                                                       2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR                                                               0x2adc
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX                                                      2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA                                                               0x2add
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                      2
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG                                                                  0x2ade
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+// base address: 0x6c0
+#define mmRDPCSTX2_RDPCSTX_CNTL                                                                        0x2ae0
+#define mmRDPCSTX2_RDPCSTX_CNTL_BASE_IDX                                                               2
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL                                                                  0x2ae1
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL                                                           0x2ae2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX                                                  2
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA                                                             0x2ae3
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                    2
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR                                                                    0x2ae4
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR_BASE_IDX                                                           2
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA                                                                    0x2ae5
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA_BASE_IDX                                                           2
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL                                                                  0x2ae6
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL                                                              0x2ae7
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL_BASE_IDX                                                     2
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2                                                             0x2ae8
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX                                                    2
+#define mmRDPCSTX2_RDPCSTX_SCRATCH                                                                     0x2ae9
+#define mmRDPCSTX2_RDPCSTX_SCRATCH_BASE_IDX                                                            2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG                                                    0x2aec
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX                                           2
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG                                                                0x2aed
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX                                                       2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0                                                                   0x2af0
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1                                                                   0x2af1
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2                                                                   0x2af2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3                                                                   0x2af3
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4                                                                   0x2af4
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5                                                                   0x2af5
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6                                                                   0x2af6
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7                                                                   0x2af7
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8                                                                   0x2af8
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9                                                                   0x2af9
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10                                                                  0x2afa
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10_BASE_IDX                                                         2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11                                                                  0x2afb
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11_BASE_IDX                                                         2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12                                                                  0x2afc
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12_BASE_IDX                                                         2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13                                                                  0x2afd
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13_BASE_IDX                                                         2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14                                                                  0x2afe
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14_BASE_IDX                                                         2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0                                                                   0x2aff
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1                                                                   0x2b00
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2                                                                   0x2b01
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3                                                                   0x2b02
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3_BASE_IDX                                                          2
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL                                                               0x2b03
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX                                                      2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3                                                        0x2b04
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX                                               2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6                                                        0x2b05
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX                                               2
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG                                                           0x2b06
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX                                                  2
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+// base address: 0x6c0
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR                                                                  0x2ae4
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR_BASE_IDX                                                         2
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA                                                                  0x2ae5
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+// base address: 0xa20
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL                                                                 0x2bb0
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL_BASE_IDX                                                        2
+#define mmDPCSTX3_DPCSTX_TX_CNTL                                                                       0x2bb1
+#define mmDPCSTX3_DPCSTX_TX_CNTL_BASE_IDX                                                              2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL                                                                     0x2bb2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL_BASE_IDX                                                            2
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL                                                                0x2bb3
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL_BASE_IDX                                                       2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR                                                               0x2bb4
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX                                                      2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA                                                               0x2bb5
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                      2
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG                                                                  0x2bb6
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+// base address: 0xa20
+#define mmRDPCSTX3_RDPCSTX_CNTL                                                                        0x2bb8
+#define mmRDPCSTX3_RDPCSTX_CNTL_BASE_IDX                                                               2
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL                                                                  0x2bb9
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL                                                           0x2bba
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX                                                  2
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA                                                             0x2bbb
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                    2
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR                                                                    0x2bbc
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR_BASE_IDX                                                           2
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA                                                                    0x2bbd
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA_BASE_IDX                                                           2
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL                                                                  0x2bbe
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL                                                              0x2bbf
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL_BASE_IDX                                                     2
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2                                                             0x2bc0
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX                                                    2
+#define mmRDPCSTX3_RDPCSTX_SCRATCH                                                                     0x2bc1
+#define mmRDPCSTX3_RDPCSTX_SCRATCH_BASE_IDX                                                            2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG                                                    0x2bc4
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX                                           2
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG                                                                0x2bc5
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX                                                       2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0                                                                   0x2bc8
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1                                                                   0x2bc9
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2                                                                   0x2bca
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3                                                                   0x2bcb
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4                                                                   0x2bcc
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5                                                                   0x2bcd
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6                                                                   0x2bce
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7                                                                   0x2bcf
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8                                                                   0x2bd0
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9                                                                   0x2bd1
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10                                                                  0x2bd2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10_BASE_IDX                                                         2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11                                                                  0x2bd3
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11_BASE_IDX                                                         2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12                                                                  0x2bd4
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12_BASE_IDX                                                         2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13                                                                  0x2bd5
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13_BASE_IDX                                                         2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14                                                                  0x2bd6
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14_BASE_IDX                                                         2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0                                                                   0x2bd7
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1                                                                   0x2bd8
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2                                                                   0x2bd9
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3                                                                   0x2bda
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3_BASE_IDX                                                          2
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL                                                               0x2bdb
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX                                                      2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3                                                        0x2bdc
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX                                               2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6                                                        0x2bdd
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX                                               2
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG                                                           0x2bde
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX                                                  2
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+// base address: 0xa20
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR                                                                  0x2bbc
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR_BASE_IDX                                                         2
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA                                                                  0x2bbd
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec
+// base address: 0x0
+#define mmDPCSRX_PHY_CNTL                                                                              0x2c76
+#define mmDPCSRX_PHY_CNTL_BASE_IDX                                                                     2
+#define mmDPCSRX_RX_CLOCK_CNTL                                                                         0x2c78
+#define mmDPCSRX_RX_CLOCK_CNTL_BASE_IDX                                                                2
+#define mmDPCSRX_RX_CNTL                                                                               0x2c7a
+#define mmDPCSRX_RX_CNTL_BASE_IDX                                                                      2
+#define mmDPCSRX_CBUS_CNTL                                                                             0x2c7b
+#define mmDPCSRX_CBUS_CNTL_BASE_IDX                                                                    2
+#define mmDPCSRX_REG_ERROR_STATUS                                                                      0x2c7c
+#define mmDPCSRX_REG_ERROR_STATUS_BASE_IDX                                                             2
+#define mmDPCSRX_RX_ERROR_STATUS                                                                       0x2c7d
+#define mmDPCSRX_RX_ERROR_STATUS_BASE_IDX                                                              2
+#define mmDPCSRX_INDEX_MODE_ADDR                                                                       0x2c80
+#define mmDPCSRX_INDEX_MODE_ADDR_BASE_IDX                                                              2
+#define mmDPCSRX_INDEX_MODE_DATA                                                                       0x2c81
+#define mmDPCSRX_INDEX_MODE_DATA_BASE_IDX                                                              2
+#define mmDPCSRX_DEBUG_CONFIG                                                                          0x2c82
+#define mmDPCSRX_DEBUG_CONFIG_BASE_IDX                                                                 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+// base address: 0xd80
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL                                                                 0x2c88
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL_BASE_IDX                                                        2
+#define mmDPCSTX4_DPCSTX_TX_CNTL                                                                       0x2c89
+#define mmDPCSTX4_DPCSTX_TX_CNTL_BASE_IDX                                                              2
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL                                                                     0x2c8a
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL_BASE_IDX                                                            2
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL                                                                0x2c8b
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL_BASE_IDX                                                       2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR                                                               0x2c8c
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX                                                      2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA                                                               0x2c8d
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                      2
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG                                                                  0x2c8e
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+// base address: 0xd80
+#define mmRDPCSTX4_RDPCSTX_CNTL                                                                        0x2c90
+#define mmRDPCSTX4_RDPCSTX_CNTL_BASE_IDX                                                               2
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL                                                                  0x2c91
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL                                                           0x2c92
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX                                                  2
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA                                                             0x2c93
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                    2
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR                                                                    0x2c94
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR_BASE_IDX                                                           2
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA                                                                    0x2c95
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA_BASE_IDX                                                           2
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL                                                                  0x2c96
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL                                                              0x2c97
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL_BASE_IDX                                                     2
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2                                                             0x2c98
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX                                                    2
+#define mmRDPCSTX4_RDPCSTX_SCRATCH                                                                     0x2c99
+#define mmRDPCSTX4_RDPCSTX_SCRATCH_BASE_IDX                                                            2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG                                                    0x2c9c
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX                                           2
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG                                                                0x2c9d
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX                                                       2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0                                                                   0x2ca0
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1                                                                   0x2ca1
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2                                                                   0x2ca2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3                                                                   0x2ca3
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4                                                                   0x2ca4
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5                                                                   0x2ca5
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6                                                                   0x2ca6
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7                                                                   0x2ca7
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8                                                                   0x2ca8
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9                                                                   0x2ca9
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10                                                                  0x2caa
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10_BASE_IDX                                                         2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11                                                                  0x2cab
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11_BASE_IDX                                                         2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12                                                                  0x2cac
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12_BASE_IDX                                                         2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13                                                                  0x2cad
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13_BASE_IDX                                                         2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14                                                                  0x2cae
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14_BASE_IDX                                                         2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0                                                                   0x2caf
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1                                                                   0x2cb0
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2                                                                   0x2cb1
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3                                                                   0x2cb2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3_BASE_IDX                                                          2
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL                                                               0x2cb3
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX                                                      2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3                                                        0x2cb4
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX                                               2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6                                                        0x2cb5
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX                                               2
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG                                                           0x2cb6
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX                                                  2
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+// base address: 0xd80
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR                                                                  0x2c94
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR_BASE_IDX                                                         2
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA                                                                  0x2c95
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec
+// base address: 0x10e0
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL                                                                 0x2d60
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL_BASE_IDX                                                        2
+#define mmDPCSTX5_DPCSTX_TX_CNTL                                                                       0x2d61
+#define mmDPCSTX5_DPCSTX_TX_CNTL_BASE_IDX                                                              2
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL                                                                     0x2d62
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL_BASE_IDX                                                            2
+#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL                                                                0x2d63
+#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL_BASE_IDX                                                       2
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR                                                               0x2d64
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX                                                      2
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA                                                               0x2d65
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                      2
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG                                                                  0x2d66
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX                                                         2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
+// base address: 0x10e0
+#define mmRDPCSTX5_RDPCSTX_CNTL                                                                        0x2d68
+#define mmRDPCSTX5_RDPCSTX_CNTL_BASE_IDX                                                               2
+#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL                                                                  0x2d69
+#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL                                                           0x2d6a
+#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX                                                  2
+#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA                                                             0x2d6b
+#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX                                                    2
+#define mmRDPCSTX5_RDPCS_TX_CR_ADDR                                                                    0x2d6c
+#define mmRDPCSTX5_RDPCS_TX_CR_ADDR_BASE_IDX                                                           2
+#define mmRDPCSTX5_RDPCS_TX_CR_DATA                                                                    0x2d6d
+#define mmRDPCSTX5_RDPCS_TX_CR_DATA_BASE_IDX                                                           2
+#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL                                                                  0x2d6e
+#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL_BASE_IDX                                                         2
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL                                                              0x2d6f
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL_BASE_IDX                                                     2
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2                                                             0x2d70
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX                                                    2
+#define mmRDPCSTX5_RDPCSTX_SCRATCH                                                                     0x2d71
+#define mmRDPCSTX5_RDPCSTX_SCRATCH_BASE_IDX                                                            2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG                                                    0x2d74
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX                                           2
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG                                                                0x2d75
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX                                                       2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0                                                                   0x2d78
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1                                                                   0x2d79
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2                                                                   0x2d7a
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3                                                                   0x2d7b
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4                                                                   0x2d7c
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5                                                                   0x2d7d
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6                                                                   0x2d7e
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7                                                                   0x2d7f
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8                                                                   0x2d80
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9                                                                   0x2d81
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10                                                                  0x2d82
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10_BASE_IDX                                                         2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11                                                                  0x2d83
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11_BASE_IDX                                                         2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12                                                                  0x2d84
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12_BASE_IDX                                                         2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13                                                                  0x2d85
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13_BASE_IDX                                                         2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14                                                                  0x2d86
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14_BASE_IDX                                                         2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0                                                                   0x2d87
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1                                                                   0x2d88
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2                                                                   0x2d89
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3                                                                   0x2d8a
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3_BASE_IDX                                                          2
+#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL                                                               0x2d8b
+#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX                                                      2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3                                                        0x2d8c
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX                                               2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6                                                        0x2d8d
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX                                               2
+#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG                                                           0x2d8e
+#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX                                                  2
+
+
+// addressBlock: dpcssys_dpcssys_cr5_dispdec
+// base address: 0x10e0
+#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR                                                                  0x2d6c
+#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR_BASE_IDX                                                         2
+#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA                                                                  0x2d6d
+#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA_BASE_IDX                                                         2
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
new file mode 100644
index 0000000..25e0569
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
@@ -0,0 +1,3912 @@
+/*
+ * Copyright (C) 2019  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_0_0_SH_MASK_HEADER
+#define _dpcs_2_0_0_SH_MASK_HEADER
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+//DPCSTX0_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT                                             0x0
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT                                                   0x1
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT                                             0x2
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0x3
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK                                               0x00000001L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK                                                     0x00000002L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK                                               0x00000004L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000008L
+//DPCSTX0_DPCSTX_TX_CNTL
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT                                                 0xc
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT                                             0xd
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT                                                      0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT                                              0xf
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK                                                   0x00001000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK                                               0x00002000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK                                                        0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK                                                0x00008000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//DPCSTX0_DPCSTX_CBUS_CNTL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT                                               0x0
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT                                                 0x1f
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK                                                 0x000000FFL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK                                                   0x80000000L
+//DPCSTX0_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT                                          0x0
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT                                              0x1
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT                                        0x4
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT                                             0x8
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT                                             0x9
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT                                             0xa
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT                                             0xb
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT                                               0xc
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT                                         0x10
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT                                             0x14
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK                                            0x00000001L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK                                                0x00000002L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK                                          0x00000010L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK                                               0x00000100L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK                                               0x00000200L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK                                               0x00000400L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK                                               0x00000800L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK                                                 0x00001000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK                                           0x00010000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK                                               0x00100000L
+//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT                                           0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK                                             0x0003FFFFL
+//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT                                           0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK                                             0xFFFFFFFFL
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT                                                       0x0
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT                                               0x1
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT                                            0x4
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT                                       0x8
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT                                                 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT                                          0x10
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK                                                         0x00000001L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK                                                 0x0000000EL
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK                                              0x00000070L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK                                         0x00000700L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK                                                   0x00004000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK                                            0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+//RDPCSTX0_RDPCSTX_CNTL
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT                                                   0x0
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT                                                   0x4
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT                                                  0xc
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT                                                  0xd
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT                                                  0xe
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT                                                  0xf
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT                                              0x18
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT                                       0x19
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT                                                0x1a
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK                                                     0x00000001L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK                                                     0x00000010L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK                                                    0x00001000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK                                                    0x00002000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK                                                    0x00004000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK                                                    0x00008000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK                                                0x01000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK                                         0x02000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK                                                  0x04000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//RDPCSTX0_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT                                               0x0
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT                                          0x4
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT                                          0x5
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT                                          0x6
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT                                          0x7
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT                                        0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT                                              0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0xa
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT                                            0xc
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT                                                  0xd
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT                                            0xe
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT                                              0x10
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK                                                 0x00000001L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK                                            0x00000010L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK                                            0x00000020L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK                                            0x00000040L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK                                            0x00000080L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK                                          0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK                                                0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000400L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK                                              0x00001000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK                                                    0x00002000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK                                              0x00004000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK                                                0x00010000L
+//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT                                    0x0
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT                                 0x1
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT                                   0x2
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT                                       0x4
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT                                       0x5
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT                                       0x6
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT                                       0x7
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT                                        0x8
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT                             0x9
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT                               0xa
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT                                         0xc
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT                                  0x10
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT                            0x11
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT                              0x12
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT                                   0x14
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK                                      0x00000001L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK                                   0x00000002L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK                                     0x00000004L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK                                         0x00000010L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK                                         0x00000020L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK                                         0x00000040L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK                                         0x00000080L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK                                          0x00000100L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK                               0x00000200L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK                                 0x00000400L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK                                           0x00001000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK                                    0x00010000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK                              0x00020000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK                                0x00040000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK                                     0x00100000L
+//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT                                        0x0
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK                                          0x00000001L
+//RDPCSTX0_RDPCS_TX_CR_ADDR
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                    0x0
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                      0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_CR_DATA
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                    0x0
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                      0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT                                                 0x14
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT                                               0x18
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT                                           0x1c
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK                                                   0x00100000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK                                                 0x03000000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK                                             0x30000000L
+//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT                                           0x0
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT                                    0xc
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT                                  0x1a
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT                                  0x1b
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT                                   0x1c
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT                                   0x1d
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT                                         0x1e
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK                                             0x00000FFFL
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK                                      0x03FFF000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK                                    0x04000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK                                    0x08000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK                                     0x10000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK                                     0x20000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK                                           0x40000000L
+//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT                                    0x0
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT                                    0x2
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK                                      0x00000003L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK                                      0x00000004L
+//RDPCSTX0_RDPCSTX_SCRATCH
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT                                                      0x0
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK                                                        0xFFFFFFFFL
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT                      0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT              0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT                      0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK                        0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK                0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK                        0x0000FF00L
+//RDPCSTX0_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT                                                    0x0
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT                                        0x4
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT                                        0x7
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT                                          0x8
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT                                       0xf
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT                                          0x10
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT                                              0x18
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK                                                      0x00000001L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK                                          0x00000070L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK                                          0x00000080L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK                                            0x00001F00L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK                                         0x00008000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK                                            0x00FF0000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK                                                0xFF000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT                                                    0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT                                            0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT                                          0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT                                           0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT                                                  0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT                                          0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT                                                0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT                                            0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT                                                0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT                                                0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT                                              0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT                                               0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT                                            0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT                                        0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT                                               0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT                                             0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT                                                  0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK                                                      0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK                                              0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK                                            0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK                                             0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK                                                    0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK                                            0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK                                                  0x00003E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK                                              0x0001C000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK                                                  0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK                                                  0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK                                                0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK                                                 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK                                              0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK                                          0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK                                                 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK                                               0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK                                                    0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT                                               0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT                                               0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT                                           0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT                                               0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT                                           0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT                                              0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT                                               0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT                                           0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK                                                 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK                                                 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK                                             0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK                                                 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK                                             0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK                                                0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK                                                 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK                                             0x00000080L
+//RDPCSTX0_RDPCSTX_PHY_CNTL2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT                                                  0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT                                 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT                                 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT                                 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT                                 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT                                 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT                                 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT                                 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT                                 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK                                                    0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK                                   0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK                                   0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK                                   0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK                                   0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK                                   0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK                                   0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK                                   0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK                                   0x00000800L
+//RDPCSTX0_RDPCSTX_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT                                             0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT                                           0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT                                           0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT                                           0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT                                               0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT                                               0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT                                             0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT                                           0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT                                           0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT                                           0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT                                               0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT                                               0xd
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT                                             0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT                                           0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT                                           0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT                                           0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT                                               0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT                                               0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT                                             0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT                                           0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT                                           0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT                                           0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT                                               0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT                                               0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK                                               0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK                                             0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK                                             0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK                                             0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK                                                 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK                                                 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK                                               0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK                                             0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK                                             0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK                                             0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK                                                 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK                                                 0x00002000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK                                               0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK                                             0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK                                             0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK                                             0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK                                                 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK                                                 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK                                               0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK                                             0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK                                             0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK                                             0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK                                                 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK                                                 0x20000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT                                         0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT                                            0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT                                    0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT                                        0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT                                         0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT                                            0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT                                    0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT                                        0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT                                         0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT                                            0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT                                    0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT                                        0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT                                         0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT                                            0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT                                    0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT                                        0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK                                           0x00000007L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK                                              0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK                                      0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK                                          0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK                                           0x00000700L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK                                              0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK                                      0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK                                          0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK                                           0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK                                              0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK                                      0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK                                          0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK                                           0x07000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK                                              0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK                                      0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK                                          0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT                                               0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT                                              0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT                                             0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT                                         0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT                                      0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT                                               0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT                                              0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT                                             0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT                                         0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT                                      0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT                                               0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT                                              0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT                                             0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT                                         0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT                                      0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT                                               0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT                                              0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT                                             0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT                                         0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT                                      0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK                                                 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK                                                0x0000000EL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK                                               0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK                                           0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK                                        0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK                                                 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK                                                0x00000E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK                                               0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK                                           0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK                                        0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK                                                 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK                                                0x000E0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK                                               0x00300000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK                                           0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK                                        0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK                                                 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK                                                0x0E000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK                                               0x30000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK                                           0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK                                        0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT                                            0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT                                           0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT                                            0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT                                           0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT                                            0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT                                           0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT                                            0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT                                           0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                                0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                            0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                        0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT                                            0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT                                           0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK                                              0x00000003L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK                                             0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK                                              0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK                                             0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK                                              0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK                                             0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK                                              0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK                                             0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                                  0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                              0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                          0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK                                              0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK                                             0x00100000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT                                       0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT                                      0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK                                         0x0000FFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK                                        0xFFFF0000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT                                        0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK                                          0x000FFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT                                    0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT                                   0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK                                      0x001FFFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK                                     0x01000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT                                      0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK                                        0x0000FFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT                                     0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT                                     0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT                                    0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT                           0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK                                       0x0000FFF0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK                                       0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK                                      0x00700000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK                             0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT                                    0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT                                   0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT                                     0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT                                          0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT                                         0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK                                      0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK                                     0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK                                       0x00000070L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK                                            0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK                                           0x00000100L
+//RDPCSTX0_RDPCSTX_PHY_CNTL13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT                                 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT                                     0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT                                       0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT                               0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK                                   0x0FF00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK                                       0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK                                         0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK                                 0x40000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT                                      0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT                                       0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT                                        0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK                                        0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK                                         0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK                                          0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT                                             0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT                                        0x14
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK                                               0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK                                          0x00300000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE1
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT                                          0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT                                         0x19
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK                                            0x01FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK                                           0xFE000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE2
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK                                             0x0003F000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE3
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT                                             0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT                                                0x18
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK                                               0x00FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK                                                  0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT                                        0x0
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT                                        0x8
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK                                          0x0000007FL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK                                          0x001FFF00L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT                         0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT                       0x1
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT                       0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT                       0x3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT                           0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT                           0x5
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT                         0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT                       0x9
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT                       0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT                       0xb
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT                           0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT                           0xd
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT                         0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT                       0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT                       0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT                       0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT                           0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT                           0x15
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT                         0x18
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT                       0x19
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT                       0x1a
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT                       0x1b
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT                           0x1c
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT                           0x1d
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK                           0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK                         0x00000002L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK                         0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK                         0x00000008L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK                             0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK                             0x00000020L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK                           0x00000100L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK                         0x00000200L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK                         0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK                         0x00000800L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK                             0x00001000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK                             0x00002000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK                           0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK                         0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK                         0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK                         0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK                             0x00100000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK                             0x00200000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK                           0x01000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK                         0x02000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK                         0x04000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK                         0x08000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK                             0x10000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK                             0x20000000L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT                        0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT                       0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT                        0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT                       0x6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT                        0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT                       0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT                        0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT                       0xe
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT                            0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT                        0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT                    0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT                        0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT                       0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK                          0x00000003L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK                         0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK                          0x00000030L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK                         0x00000040L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK                          0x00000300L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK                         0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK                          0x00003000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK                         0x00004000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK                              0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK                          0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK                      0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK                          0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK                         0x00100000L
+//RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT                                  0x0
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT                                0x4
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT                                  0x8
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK                                    0x00000001L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK                                  0x00000010L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK                                    0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+//DPCSSYS_CR0_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                  0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                    0x0000FFFFL
+//DPCSSYS_CR0_DPCSSYS_CR_DATA
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                  0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                    0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+//DPCSTX1_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT                                             0x0
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT                                                   0x1
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT                                             0x2
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0x3
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK                                               0x00000001L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK                                                     0x00000002L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK                                               0x00000004L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000008L
+//DPCSTX1_DPCSTX_TX_CNTL
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT                                                 0xc
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT                                             0xd
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT                                                      0xe
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT                                              0xf
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK                                                   0x00001000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK                                               0x00002000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK                                                        0x00004000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK                                                0x00008000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//DPCSTX1_DPCSTX_CBUS_CNTL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT                                               0x0
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT                                                 0x1f
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK                                                 0x000000FFL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK                                                   0x80000000L
+//DPCSTX1_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT                                          0x0
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT                                              0x1
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT                                        0x4
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT                                             0x8
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT                                             0x9
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT                                             0xa
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT                                             0xb
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT                                               0xc
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT                                         0x10
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT                                             0x14
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK                                            0x00000001L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK                                                0x00000002L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK                                          0x00000010L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK                                               0x00000100L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK                                               0x00000200L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK                                               0x00000400L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK                                               0x00000800L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK                                                 0x00001000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK                                           0x00010000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK                                               0x00100000L
+//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT                                           0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK                                             0x0003FFFFL
+//DPCSTX1_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT                                           0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK                                             0xFFFFFFFFL
+//DPCSTX1_DPCSTX_DEBUG_CONFIG
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT                                                       0x0
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT                                               0x1
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT                                            0x4
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT                                       0x8
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT                                                 0xe
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT                                          0x10
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK                                                         0x00000001L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK                                                 0x0000000EL
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK                                              0x00000070L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK                                         0x00000700L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK                                                   0x00004000L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK                                            0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+//RDPCSTX1_RDPCSTX_CNTL
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT                                                   0x0
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT                                                   0x4
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT                                                  0xc
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT                                                  0xd
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT                                                  0xe
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT                                                  0xf
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT                                              0x18
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT                                       0x19
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT                                                0x1a
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK                                                     0x00000001L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK                                                     0x00000010L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK                                                    0x00001000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK                                                    0x00002000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK                                                    0x00004000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK                                                    0x00008000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK                                                0x01000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK                                         0x02000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK                                                  0x04000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//RDPCSTX1_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT                                               0x0
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT                                          0x4
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT                                          0x5
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT                                          0x6
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT                                          0x7
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT                                        0x8
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT                                              0x9
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0xa
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT                                            0xc
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT                                                  0xd
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT                                            0xe
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT                                              0x10
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK                                                 0x00000001L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK                                            0x00000010L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK                                            0x00000020L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK                                            0x00000040L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK                                            0x00000080L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK                                          0x00000100L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK                                                0x00000200L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000400L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK                                              0x00001000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK                                                    0x00002000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK                                              0x00004000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK                                                0x00010000L
+//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT                                    0x0
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT                                 0x1
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT                                   0x2
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT                                       0x4
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT                                       0x5
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT                                       0x6
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT                                       0x7
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT                                        0x8
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT                             0x9
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT                               0xa
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT                                         0xc
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT                                  0x10
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT                            0x11
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT                              0x12
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT                                   0x14
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK                                      0x00000001L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK                                   0x00000002L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK                                     0x00000004L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK                                         0x00000010L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK                                         0x00000020L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK                                         0x00000040L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK                                         0x00000080L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK                                          0x00000100L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK                               0x00000200L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK                                 0x00000400L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK                                           0x00001000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK                                    0x00010000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK                              0x00020000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK                                0x00040000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK                                     0x00100000L
+//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT                                        0x0
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK                                          0x00000001L
+//RDPCSTX1_RDPCS_TX_CR_ADDR
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                    0x0
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                      0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_CR_DATA
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                    0x0
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                      0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT                                                 0x14
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT                                               0x18
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT                                           0x1c
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK                                                   0x00100000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK                                                 0x03000000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK                                             0x30000000L
+//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT                                           0x0
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT                                    0xc
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT                                  0x1a
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT                                  0x1b
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT                                   0x1c
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT                                   0x1d
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT                                         0x1e
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK                                             0x00000FFFL
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK                                      0x03FFF000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK                                    0x04000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK                                    0x08000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK                                     0x10000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK                                     0x20000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK                                           0x40000000L
+//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT                                    0x0
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT                                    0x2
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK                                      0x00000003L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK                                      0x00000004L
+//RDPCSTX1_RDPCSTX_SCRATCH
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT                                                      0x0
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK                                                        0xFFFFFFFFL
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT                      0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT              0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT                      0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK                        0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK                0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK                        0x0000FF00L
+//RDPCSTX1_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT                                                    0x0
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT                                        0x4
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT                                        0x7
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT                                          0x8
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT                                       0xf
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT                                          0x10
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT                                              0x18
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK                                                      0x00000001L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK                                          0x00000070L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK                                          0x00000080L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK                                            0x00001F00L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK                                         0x00008000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK                                            0x00FF0000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK                                                0xFF000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT                                                    0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT                                            0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT                                          0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT                                           0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT                                                  0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT                                          0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT                                                0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT                                            0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT                                                0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT                                                0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT                                              0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT                                               0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT                                            0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT                                        0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT                                               0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT                                             0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT                                                  0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK                                                      0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK                                              0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK                                            0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK                                             0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK                                                    0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK                                            0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK                                                  0x00003E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK                                              0x0001C000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK                                                  0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK                                                  0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK                                                0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK                                                 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK                                              0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK                                          0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK                                                 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK                                               0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK                                                    0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT                                               0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT                                               0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT                                           0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT                                               0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT                                           0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT                                              0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT                                               0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT                                           0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK                                                 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK                                                 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK                                             0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK                                                 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK                                             0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK                                                0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK                                                 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK                                             0x00000080L
+//RDPCSTX1_RDPCSTX_PHY_CNTL2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT                                                  0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT                                 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT                                 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT                                 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT                                 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT                                 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT                                 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT                                 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT                                 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK                                                    0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK                                   0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK                                   0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK                                   0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK                                   0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK                                   0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK                                   0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK                                   0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK                                   0x00000800L
+//RDPCSTX1_RDPCSTX_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT                                             0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT                                           0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT                                           0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT                                           0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT                                               0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT                                               0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT                                             0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT                                           0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT                                           0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT                                           0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT                                               0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT                                               0xd
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT                                             0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT                                           0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT                                           0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT                                           0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT                                               0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT                                               0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT                                             0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT                                           0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT                                           0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT                                           0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT                                               0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT                                               0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK                                               0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK                                             0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK                                             0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK                                             0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK                                                 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK                                                 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK                                               0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK                                             0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK                                             0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK                                             0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK                                                 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK                                                 0x00002000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK                                               0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK                                             0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK                                             0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK                                             0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK                                                 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK                                                 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK                                               0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK                                             0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK                                             0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK                                             0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK                                                 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK                                                 0x20000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT                                         0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT                                            0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT                                    0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT                                        0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT                                         0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT                                            0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT                                    0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT                                        0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT                                         0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT                                            0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT                                    0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT                                        0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT                                         0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT                                            0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT                                    0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT                                        0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK                                           0x00000007L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK                                              0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK                                      0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK                                          0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK                                           0x00000700L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK                                              0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK                                      0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK                                          0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK                                           0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK                                              0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK                                      0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK                                          0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK                                           0x07000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK                                              0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK                                      0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK                                          0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT                                               0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT                                              0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT                                             0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT                                         0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT                                      0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT                                               0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT                                              0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT                                             0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT                                         0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT                                      0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT                                               0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT                                              0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT                                             0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT                                         0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT                                      0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT                                               0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT                                              0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT                                             0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT                                         0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT                                      0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK                                                 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK                                                0x0000000EL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK                                               0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK                                           0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK                                        0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK                                                 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK                                                0x00000E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK                                               0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK                                           0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK                                        0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK                                                 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK                                                0x000E0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK                                               0x00300000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK                                           0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK                                        0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK                                                 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK                                                0x0E000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK                                               0x30000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK                                           0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK                                        0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT                                            0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT                                           0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT                                            0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT                                           0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT                                            0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT                                           0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT                                            0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT                                           0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                                0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                            0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                        0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT                                            0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT                                           0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK                                              0x00000003L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK                                             0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK                                              0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK                                             0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK                                              0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK                                             0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK                                              0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK                                             0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                                  0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                              0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                          0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK                                              0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK                                             0x00100000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT                                       0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT                                      0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK                                         0x0000FFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK                                        0xFFFF0000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT                                        0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK                                          0x000FFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT                                    0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT                                   0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK                                      0x001FFFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK                                     0x01000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT                                      0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK                                        0x0000FFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT                                     0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT                                     0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT                                    0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT                           0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK                                       0x0000FFF0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK                                       0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK                                      0x00700000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK                             0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT                                    0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT                                   0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT                                     0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT                                          0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT                                         0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK                                      0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK                                     0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK                                       0x00000070L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK                                            0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK                                           0x00000100L
+//RDPCSTX1_RDPCSTX_PHY_CNTL13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT                                 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT                                     0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT                                       0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT                               0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK                                   0x0FF00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK                                       0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK                                         0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK                                 0x40000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT                                      0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT                                       0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT                                        0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK                                        0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK                                         0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK                                          0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT                                             0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT                                        0x14
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK                                               0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK                                          0x00300000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE1
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT                                          0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT                                         0x19
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK                                            0x01FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK                                           0xFE000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE2
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK                                             0x0003F000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE3
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT                                             0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT                                                0x18
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK                                               0x00FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK                                                  0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT                                        0x0
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT                                        0x8
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK                                          0x0000007FL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK                                          0x001FFF00L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT                         0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT                       0x1
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT                       0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT                       0x3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT                           0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT                           0x5
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT                         0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT                       0x9
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT                       0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT                       0xb
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT                           0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT                           0xd
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT                         0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT                       0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT                       0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT                       0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT                           0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT                           0x15
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT                         0x18
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT                       0x19
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT                       0x1a
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT                       0x1b
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT                           0x1c
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT                           0x1d
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK                           0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK                         0x00000002L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK                         0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK                         0x00000008L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK                             0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK                             0x00000020L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK                           0x00000100L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK                         0x00000200L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK                         0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK                         0x00000800L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK                             0x00001000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK                             0x00002000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK                           0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK                         0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK                         0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK                         0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK                             0x00100000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK                             0x00200000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK                           0x01000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK                         0x02000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK                         0x04000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK                         0x08000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK                             0x10000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK                             0x20000000L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT                        0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT                       0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT                        0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT                       0x6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT                        0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT                       0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT                        0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT                       0xe
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT                            0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT                        0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT                    0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT                        0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT                       0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK                          0x00000003L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK                         0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK                          0x00000030L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK                         0x00000040L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK                          0x00000300L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK                         0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK                          0x00003000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK                         0x00004000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK                              0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK                          0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK                      0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK                          0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK                         0x00100000L
+//RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT                                  0x0
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT                                0x4
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT                                  0x8
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK                                    0x00000001L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK                                  0x00000010L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK                                    0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+//DPCSSYS_CR1_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                  0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                    0x0000FFFFL
+//DPCSSYS_CR1_DPCSSYS_CR_DATA
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                  0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                    0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+//DPCSTX2_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT                                             0x0
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT                                                   0x1
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT                                             0x2
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0x3
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK                                               0x00000001L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK                                                     0x00000002L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK                                               0x00000004L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000008L
+//DPCSTX2_DPCSTX_TX_CNTL
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT                                                 0xc
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT                                             0xd
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT                                                      0xe
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT                                              0xf
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK                                                   0x00001000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK                                               0x00002000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK                                                        0x00004000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK                                                0x00008000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//DPCSTX2_DPCSTX_CBUS_CNTL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT                                               0x0
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT                                                 0x1f
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK                                                 0x000000FFL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK                                                   0x80000000L
+//DPCSTX2_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT                                          0x0
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT                                              0x1
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT                                        0x4
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT                                             0x8
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT                                             0x9
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT                                             0xa
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT                                             0xb
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT                                               0xc
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT                                         0x10
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT                                             0x14
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK                                            0x00000001L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK                                                0x00000002L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK                                          0x00000010L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK                                               0x00000100L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK                                               0x00000200L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK                                               0x00000400L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK                                               0x00000800L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK                                                 0x00001000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK                                           0x00010000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK                                               0x00100000L
+//DPCSTX2_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT                                           0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK                                             0x0003FFFFL
+//DPCSTX2_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT                                           0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK                                             0xFFFFFFFFL
+//DPCSTX2_DPCSTX_DEBUG_CONFIG
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT                                                       0x0
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT                                               0x1
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT                                            0x4
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT                                       0x8
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT                                                 0xe
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT                                          0x10
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK                                                         0x00000001L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK                                                 0x0000000EL
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK                                              0x00000070L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK                                         0x00000700L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK                                                   0x00004000L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK                                            0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+//RDPCSTX2_RDPCSTX_CNTL
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT                                                   0x0
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT                                                   0x4
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT                                                  0xc
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT                                                  0xd
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT                                                  0xe
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT                                                  0xf
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT                                              0x18
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT                                       0x19
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT                                                0x1a
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK                                                     0x00000001L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK                                                     0x00000010L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK                                                    0x00001000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK                                                    0x00002000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK                                                    0x00004000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK                                                    0x00008000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK                                                0x01000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK                                         0x02000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK                                                  0x04000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//RDPCSTX2_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT                                               0x0
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT                                          0x4
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT                                          0x5
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT                                          0x6
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT                                          0x7
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT                                        0x8
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT                                              0x9
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0xa
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT                                            0xc
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT                                                  0xd
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT                                            0xe
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT                                              0x10
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK                                                 0x00000001L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK                                            0x00000010L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK                                            0x00000020L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK                                            0x00000040L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK                                            0x00000080L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK                                          0x00000100L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK                                                0x00000200L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000400L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK                                              0x00001000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK                                                    0x00002000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK                                              0x00004000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK                                                0x00010000L
+//RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT                                    0x0
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT                                 0x1
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT                                   0x2
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT                                       0x4
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT                                       0x5
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT                                       0x6
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT                                       0x7
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT                                        0x8
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT                             0x9
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT                               0xa
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT                                         0xc
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT                                  0x10
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT                            0x11
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT                              0x12
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT                                   0x14
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK                                      0x00000001L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK                                   0x00000002L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK                                     0x00000004L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK                                         0x00000010L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK                                         0x00000020L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK                                         0x00000040L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK                                         0x00000080L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK                                          0x00000100L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK                               0x00000200L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK                                 0x00000400L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK                                           0x00001000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK                                    0x00010000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK                              0x00020000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK                                0x00040000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK                                     0x00100000L
+//RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT                                        0x0
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK                                          0x00000001L
+//RDPCSTX2_RDPCS_TX_CR_ADDR
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                    0x0
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                      0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_CR_DATA
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                    0x0
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                      0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT                                                 0x14
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT                                               0x18
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT                                           0x1c
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK                                                   0x00100000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK                                                 0x03000000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK                                             0x30000000L
+//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT                                           0x0
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT                                    0xc
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT                                  0x1a
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT                                  0x1b
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT                                   0x1c
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT                                   0x1d
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT                                         0x1e
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK                                             0x00000FFFL
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK                                      0x03FFF000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK                                    0x04000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK                                    0x08000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK                                     0x10000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK                                     0x20000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK                                           0x40000000L
+//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT                                    0x0
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT                                    0x2
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK                                      0x00000003L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK                                      0x00000004L
+//RDPCSTX2_RDPCSTX_SCRATCH
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT                                                      0x0
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK                                                        0xFFFFFFFFL
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT                      0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT              0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT                      0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK                        0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK                0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK                        0x0000FF00L
+//RDPCSTX2_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT                                                    0x0
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT                                        0x4
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT                                        0x7
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT                                          0x8
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT                                       0xf
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT                                          0x10
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT                                              0x18
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK                                                      0x00000001L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK                                          0x00000070L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK                                          0x00000080L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK                                            0x00001F00L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK                                         0x00008000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK                                            0x00FF0000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK                                                0xFF000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT                                                    0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT                                            0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT                                          0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT                                           0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT                                                  0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT                                          0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT                                                0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT                                            0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT                                                0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT                                                0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT                                              0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT                                               0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT                                            0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT                                        0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT                                               0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT                                             0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT                                                  0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK                                                      0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK                                              0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK                                            0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK                                             0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK                                                    0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK                                            0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK                                                  0x00003E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK                                              0x0001C000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK                                                  0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK                                                  0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK                                                0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK                                                 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK                                              0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK                                          0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK                                                 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK                                               0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK                                                    0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT                                               0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT                                               0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT                                           0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT                                               0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT                                           0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT                                              0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT                                               0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT                                           0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK                                                 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK                                                 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK                                             0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK                                                 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK                                             0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK                                                0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK                                                 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK                                             0x00000080L
+//RDPCSTX2_RDPCSTX_PHY_CNTL2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT                                                  0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT                                 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT                                 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT                                 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT                                 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT                                 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT                                 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT                                 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT                                 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK                                                    0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK                                   0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK                                   0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK                                   0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK                                   0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK                                   0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK                                   0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK                                   0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK                                   0x00000800L
+//RDPCSTX2_RDPCSTX_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT                                             0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT                                           0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT                                           0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT                                           0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT                                               0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT                                               0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT                                             0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT                                           0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT                                           0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT                                           0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT                                               0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT                                               0xd
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT                                             0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT                                           0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT                                           0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT                                           0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT                                               0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT                                               0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT                                             0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT                                           0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT                                           0x1a
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT                                           0x1b
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT                                               0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT                                               0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK                                               0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK                                             0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK                                             0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK                                             0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK                                                 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK                                                 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK                                               0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK                                             0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK                                             0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK                                             0x00000800L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK                                                 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK                                                 0x00002000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK                                               0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK                                             0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK                                             0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK                                             0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK                                                 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK                                                 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK                                               0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK                                             0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK                                             0x04000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK                                             0x08000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK                                                 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK                                                 0x20000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT                                         0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT                                            0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT                                    0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT                                        0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT                                         0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT                                            0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT                                    0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT                                        0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT                                         0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT                                            0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT                                    0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT                                        0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT                                         0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT                                            0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT                                    0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT                                        0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK                                           0x00000007L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK                                              0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK                                      0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK                                          0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK                                           0x00000700L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK                                              0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK                                      0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK                                          0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK                                           0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK                                              0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK                                      0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK                                          0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK                                           0x07000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK                                              0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK                                      0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK                                          0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT                                               0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT                                              0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT                                             0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT                                         0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT                                      0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT                                               0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT                                              0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT                                             0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT                                         0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT                                      0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT                                               0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT                                              0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT                                             0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT                                         0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT                                      0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT                                               0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT                                              0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT                                             0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT                                         0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT                                      0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK                                                 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK                                                0x0000000EL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK                                               0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK                                           0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK                                        0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK                                                 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK                                                0x00000E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK                                               0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK                                           0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK                                        0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK                                                 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK                                                0x000E0000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK                                               0x00300000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK                                           0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK                                        0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK                                                 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK                                                0x0E000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK                                               0x30000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK                                           0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK                                        0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT                                            0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT                                           0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT                                            0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT                                           0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT                                            0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT                                           0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT                                            0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT                                           0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                                0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                            0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                        0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT                                            0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT                                           0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK                                              0x00000003L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK                                             0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK                                              0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK                                             0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK                                              0x00000300L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK                                             0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK                                              0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK                                             0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                                  0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                              0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                          0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK                                              0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK                                             0x00100000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT                                       0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT                                      0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK                                         0x0000FFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK                                        0xFFFF0000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT                                        0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK                                          0x000FFFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT                                    0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT                                   0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK                                      0x001FFFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK                                     0x01000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT                                      0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK                                        0x0000FFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT                                     0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT                                     0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT                                    0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT                           0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK                                       0x0000FFF0L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK                                       0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK                                      0x00700000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK                             0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT                                    0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT                                   0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT                                     0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT                                          0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT                                         0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK                                      0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK                                     0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK                                       0x00000070L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK                                            0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK                                           0x00000100L
+//RDPCSTX2_RDPCSTX_PHY_CNTL13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT                                 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT                                     0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT                                       0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT                               0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK                                   0x0FF00000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK                                       0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK                                         0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK                                 0x40000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT                                      0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT                                       0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT                                        0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK                                        0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK                                         0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK                                          0x10000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT                                             0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT                                        0x14
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK                                               0x000C0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK                                          0x00300000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE1
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT                                          0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT                                         0x19
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK                                            0x01FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK                                           0xFE000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE2
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK                                             0x0003F000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE3
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT                                             0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT                                                0x18
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK                                               0x00FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK                                                  0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT                                        0x0
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT                                        0x8
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK                                          0x0000007FL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK                                          0x001FFF00L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT                         0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT                       0x1
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT                       0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT                       0x3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT                           0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT                           0x5
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT                         0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT                       0x9
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT                       0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT                       0xb
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT                           0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT                           0xd
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT                         0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT                       0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT                       0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT                       0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT                           0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT                           0x15
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT                         0x18
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT                       0x19
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT                       0x1a
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT                       0x1b
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT                           0x1c
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT                           0x1d
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK                           0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK                         0x00000002L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK                         0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK                         0x00000008L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK                             0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK                             0x00000020L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK                           0x00000100L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK                         0x00000200L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK                         0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK                         0x00000800L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK                             0x00001000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK                             0x00002000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK                           0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK                         0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK                         0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK                         0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK                             0x00100000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK                             0x00200000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK                           0x01000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK                         0x02000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK                         0x04000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK                         0x08000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK                             0x10000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK                             0x20000000L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT                        0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT                       0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT                        0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT                       0x6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT                        0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT                       0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT                        0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT                       0xe
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT                            0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT                        0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT                    0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT                        0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT                       0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK                          0x00000003L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK                         0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK                          0x00000030L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK                         0x00000040L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK                          0x00000300L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK                         0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK                          0x00003000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK                         0x00004000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK                              0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK                          0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK                      0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK                          0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK                         0x00100000L
+//RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT                                  0x0
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT                                0x4
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT                                  0x8
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK                                    0x00000001L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK                                  0x00000010L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK                                    0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+//DPCSSYS_CR2_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                  0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                    0x0000FFFFL
+//DPCSSYS_CR2_DPCSSYS_CR_DATA
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                  0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                    0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+//DPCSTX3_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT                                             0x0
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT                                                   0x1
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT                                             0x2
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0x3
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK                                               0x00000001L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK                                                     0x00000002L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK                                               0x00000004L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000008L
+//DPCSTX3_DPCSTX_TX_CNTL
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT                                                 0xc
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT                                             0xd
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT                                                      0xe
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT                                              0xf
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK                                                   0x00001000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK                                               0x00002000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK                                                        0x00004000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK                                                0x00008000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//DPCSTX3_DPCSTX_CBUS_CNTL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT                                               0x0
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT                                                 0x1f
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK                                                 0x000000FFL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK                                                   0x80000000L
+//DPCSTX3_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT                                          0x0
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT                                              0x1
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT                                        0x4
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT                                             0x8
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT                                             0x9
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT                                             0xa
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT                                             0xb
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT                                               0xc
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT                                         0x10
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT                                             0x14
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK                                            0x00000001L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK                                                0x00000002L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK                                          0x00000010L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK                                               0x00000100L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK                                               0x00000200L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK                                               0x00000400L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK                                               0x00000800L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK                                                 0x00001000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK                                           0x00010000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK                                               0x00100000L
+//DPCSTX3_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT                                           0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK                                             0x0003FFFFL
+//DPCSTX3_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT                                           0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK                                             0xFFFFFFFFL
+//DPCSTX3_DPCSTX_DEBUG_CONFIG
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT                                                       0x0
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT                                               0x1
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT                                            0x4
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT                                       0x8
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT                                                 0xe
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT                                          0x10
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK                                                         0x00000001L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK                                                 0x0000000EL
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK                                              0x00000070L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK                                         0x00000700L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK                                                   0x00004000L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK                                            0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+//RDPCSTX3_RDPCSTX_CNTL
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT                                                   0x0
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT                                                   0x4
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT                                                  0xc
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT                                                  0xd
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT                                                  0xe
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT                                                  0xf
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT                                              0x18
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT                                       0x19
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT                                                0x1a
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK                                                     0x00000001L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK                                                     0x00000010L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK                                                    0x00001000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK                                                    0x00002000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK                                                    0x00004000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK                                                    0x00008000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK                                                0x01000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK                                         0x02000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK                                                  0x04000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//RDPCSTX3_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT                                               0x0
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT                                          0x4
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT                                          0x5
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT                                          0x6
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT                                          0x7
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT                                        0x8
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT                                              0x9
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0xa
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT                                            0xc
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT                                                  0xd
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT                                            0xe
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT                                              0x10
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK                                                 0x00000001L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK                                            0x00000010L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK                                            0x00000020L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK                                            0x00000040L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK                                            0x00000080L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK                                          0x00000100L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK                                                0x00000200L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000400L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK                                              0x00001000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK                                                    0x00002000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK                                              0x00004000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK                                                0x00010000L
+//RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT                                    0x0
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT                                 0x1
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT                                   0x2
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT                                       0x4
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT                                       0x5
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT                                       0x6
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT                                       0x7
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT                                        0x8
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT                             0x9
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT                               0xa
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT                                         0xc
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT                                  0x10
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT                            0x11
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT                              0x12
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT                                   0x14
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK                                      0x00000001L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK                                   0x00000002L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK                                     0x00000004L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK                                         0x00000010L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK                                         0x00000020L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK                                         0x00000040L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK                                         0x00000080L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK                                          0x00000100L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK                               0x00000200L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK                                 0x00000400L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK                                           0x00001000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK                                    0x00010000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK                              0x00020000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK                                0x00040000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK                                     0x00100000L
+//RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT                                        0x0
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK                                          0x00000001L
+//RDPCSTX3_RDPCS_TX_CR_ADDR
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                    0x0
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                      0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_CR_DATA
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                    0x0
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                      0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT                                                 0x14
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT                                               0x18
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT                                           0x1c
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK                                                   0x00100000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK                                                 0x03000000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK                                             0x30000000L
+//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT                                           0x0
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT                                    0xc
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT                                  0x1a
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT                                  0x1b
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT                                   0x1c
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT                                   0x1d
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT                                         0x1e
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK                                             0x00000FFFL
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK                                      0x03FFF000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK                                    0x04000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK                                    0x08000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK                                     0x10000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK                                     0x20000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK                                           0x40000000L
+//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT                                    0x0
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT                                    0x2
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK                                      0x00000003L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK                                      0x00000004L
+//RDPCSTX3_RDPCSTX_SCRATCH
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT                                                      0x0
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK                                                        0xFFFFFFFFL
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT                      0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT              0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT                      0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK                        0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK                0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK                        0x0000FF00L
+//RDPCSTX3_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT                                                    0x0
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT                                        0x4
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT                                        0x7
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT                                          0x8
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT                                       0xf
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT                                          0x10
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT                                              0x18
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK                                                      0x00000001L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK                                          0x00000070L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK                                          0x00000080L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK                                            0x00001F00L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK                                         0x00008000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK                                            0x00FF0000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK                                                0xFF000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT                                                    0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT                                            0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT                                          0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT                                           0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT                                                  0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT                                          0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT                                                0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT                                            0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT                                                0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT                                                0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT                                              0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT                                               0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT                                            0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT                                        0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT                                               0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT                                             0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT                                                  0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK                                                      0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK                                              0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK                                            0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK                                             0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK                                                    0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK                                            0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK                                                  0x00003E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK                                              0x0001C000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK                                                  0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK                                                  0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK                                                0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK                                                 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK                                              0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK                                          0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK                                                 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK                                               0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK                                                    0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT                                               0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT                                               0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT                                           0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT                                               0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT                                           0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT                                              0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT                                               0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT                                           0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK                                                 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK                                                 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK                                             0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK                                                 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK                                             0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK                                                0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK                                                 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK                                             0x00000080L
+//RDPCSTX3_RDPCSTX_PHY_CNTL2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT                                                  0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT                                 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT                                 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT                                 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT                                 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT                                 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT                                 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT                                 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT                                 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK                                                    0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK                                   0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK                                   0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK                                   0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK                                   0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK                                   0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK                                   0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK                                   0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK                                   0x00000800L
+//RDPCSTX3_RDPCSTX_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT                                             0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT                                           0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT                                           0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT                                           0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT                                               0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT                                               0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT                                             0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT                                           0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT                                           0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT                                           0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT                                               0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT                                               0xd
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT                                             0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT                                           0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT                                           0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT                                           0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT                                               0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT                                               0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT                                             0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT                                           0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT                                           0x1a
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT                                           0x1b
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT                                               0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT                                               0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK                                               0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK                                             0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK                                             0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK                                             0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK                                                 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK                                                 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK                                               0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK                                             0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK                                             0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK                                             0x00000800L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK                                                 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK                                                 0x00002000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK                                               0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK                                             0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK                                             0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK                                             0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK                                                 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK                                                 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK                                               0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK                                             0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK                                             0x04000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK                                             0x08000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK                                                 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK                                                 0x20000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT                                         0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT                                            0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT                                    0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT                                        0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT                                         0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT                                            0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT                                    0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT                                        0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT                                         0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT                                            0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT                                    0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT                                        0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT                                         0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT                                            0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT                                    0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT                                        0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK                                           0x00000007L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK                                              0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK                                      0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK                                          0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK                                           0x00000700L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK                                              0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK                                      0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK                                          0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK                                           0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK                                              0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK                                      0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK                                          0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK                                           0x07000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK                                              0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK                                      0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK                                          0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT                                               0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT                                              0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT                                             0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT                                         0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT                                      0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT                                               0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT                                              0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT                                             0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT                                         0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT                                      0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT                                               0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT                                              0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT                                             0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT                                         0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT                                      0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT                                               0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT                                              0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT                                             0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT                                         0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT                                      0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK                                                 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK                                                0x0000000EL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK                                               0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK                                           0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK                                        0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK                                                 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK                                                0x00000E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK                                               0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK                                           0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK                                        0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK                                                 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK                                                0x000E0000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK                                               0x00300000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK                                           0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK                                        0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK                                                 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK                                                0x0E000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK                                               0x30000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK                                           0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK                                        0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT                                            0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT                                           0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT                                            0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT                                           0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT                                            0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT                                           0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT                                            0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT                                           0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                                0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                            0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                        0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT                                            0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT                                           0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK                                              0x00000003L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK                                             0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK                                              0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK                                             0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK                                              0x00000300L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK                                             0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK                                              0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK                                             0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                                  0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                              0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                          0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK                                              0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK                                             0x00100000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT                                       0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT                                      0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK                                         0x0000FFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK                                        0xFFFF0000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT                                        0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK                                          0x000FFFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT                                    0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT                                   0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK                                      0x001FFFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK                                     0x01000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT                                      0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK                                        0x0000FFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT                                     0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT                                     0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT                                    0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT                           0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK                                       0x0000FFF0L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK                                       0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK                                      0x00700000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK                             0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT                                    0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT                                   0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT                                     0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT                                          0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT                                         0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK                                      0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK                                     0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK                                       0x00000070L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK                                            0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK                                           0x00000100L
+//RDPCSTX3_RDPCSTX_PHY_CNTL13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT                                 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT                                     0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT                                       0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT                               0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK                                   0x0FF00000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK                                       0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK                                         0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK                                 0x40000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT                                      0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT                                       0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT                                        0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK                                        0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK                                         0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK                                          0x10000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT                                             0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT                                        0x14
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK                                               0x000C0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK                                          0x00300000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE1
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT                                          0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT                                         0x19
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK                                            0x01FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK                                           0xFE000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE2
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK                                             0x0003F000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE3
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT                                             0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT                                                0x18
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK                                               0x00FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK                                                  0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT                                        0x0
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT                                        0x8
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK                                          0x0000007FL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK                                          0x001FFF00L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT                         0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT                       0x1
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT                       0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT                       0x3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT                           0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT                           0x5
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT                         0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT                       0x9
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT                       0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT                       0xb
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT                           0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT                           0xd
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT                         0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT                       0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT                       0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT                       0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT                           0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT                           0x15
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT                         0x18
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT                       0x19
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT                       0x1a
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT                       0x1b
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT                           0x1c
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT                           0x1d
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK                           0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK                         0x00000002L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK                         0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK                         0x00000008L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK                             0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK                             0x00000020L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK                           0x00000100L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK                         0x00000200L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK                         0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK                         0x00000800L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK                             0x00001000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK                             0x00002000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK                           0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK                         0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK                         0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK                         0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK                             0x00100000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK                             0x00200000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK                           0x01000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK                         0x02000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK                         0x04000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK                         0x08000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK                             0x10000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK                             0x20000000L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT                        0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT                       0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT                        0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT                       0x6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT                        0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT                       0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT                        0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT                       0xe
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT                            0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT                        0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT                    0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT                        0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT                       0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK                          0x00000003L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK                         0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK                          0x00000030L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK                         0x00000040L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK                          0x00000300L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK                         0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK                          0x00003000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK                         0x00004000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK                              0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK                          0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK                      0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK                          0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK                         0x00100000L
+//RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT                                  0x0
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT                                0x4
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT                                  0x8
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK                                    0x00000001L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK                                  0x00000010L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK                                    0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+//DPCSSYS_CR3_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                  0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                    0x0000FFFFL
+//DPCSSYS_CR3_DPCSSYS_CR_DATA
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                  0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                    0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec
+//DPCSRX_PHY_CNTL
+#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET__SHIFT                                                                0x0
+#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET_MASK                                                                  0x00000001L
+//DPCSRX_RX_CLOCK_CNTL
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS__SHIFT                                                  0x0
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN__SHIFT                                                        0x1
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL__SHIFT                                                       0x2
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON__SHIFT                                                  0x4
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS__SHIFT                                                 0x10
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN__SHIFT                                                       0x11
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON__SHIFT                                                 0x12
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS__SHIFT                                                 0x14
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN__SHIFT                                                       0x15
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON__SHIFT                                                 0x16
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS__SHIFT                                                 0x18
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN__SHIFT                                                       0x19
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON__SHIFT                                                 0x1a
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS__SHIFT                                                 0x1c
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN__SHIFT                                                       0x1d
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON__SHIFT                                                 0x1e
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS_MASK                                                    0x00000001L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN_MASK                                                          0x00000002L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL_MASK                                                         0x0000000CL
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON_MASK                                                    0x00000010L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS_MASK                                                   0x00010000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN_MASK                                                         0x00020000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON_MASK                                                   0x00040000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS_MASK                                                   0x00100000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN_MASK                                                         0x00200000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON_MASK                                                   0x00400000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS_MASK                                                   0x01000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN_MASK                                                         0x02000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON_MASK                                                   0x04000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS_MASK                                                   0x10000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN_MASK                                                         0x20000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON_MASK                                                   0x40000000L
+//DPCSRX_RX_CNTL
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN__SHIFT                                                               0x0
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN__SHIFT                                                               0x1
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN__SHIFT                                                               0x2
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN__SHIFT                                                               0x3
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN__SHIFT                                                                0x4
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START__SHIFT                                                             0x5
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY__SHIFT                                                    0x8
+#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET__SHIFT                                                             0x1f
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN_MASK                                                                 0x00000001L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN_MASK                                                                 0x00000002L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN_MASK                                                                 0x00000004L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN_MASK                                                                 0x00000008L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN_MASK                                                                  0x00000010L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START_MASK                                                               0x00000020L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY_MASK                                                      0x00000F00L
+#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET_MASK                                                               0x80000000L
+//DPCSRX_CBUS_CNTL
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT                                                       0x0
+#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY__SHIFT                                                    0x8
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT                                                         0x1f
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK                                                         0x0000000FL
+#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY_MASK                                                      0x0000FF00L
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK                                                           0x80000000L
+//DPCSRX_REG_ERROR_STATUS
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW__SHIFT                                                0x0
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR__SHIFT                                                    0x1
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK__SHIFT                                              0x4
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW_MASK                                                  0x00000001L
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR_MASK                                                      0x00000002L
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK_MASK                                                0x00000010L
+//DPCSRX_RX_ERROR_STATUS
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR__SHIFT                                                    0x0
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR__SHIFT                                                    0x1
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR__SHIFT                                                    0x2
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR__SHIFT                                                    0x3
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR__SHIFT                                                      0x8
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK__SHIFT                                                0xc
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR_MASK                                                      0x00000001L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR_MASK                                                      0x00000002L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR_MASK                                                      0x00000004L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR_MASK                                                      0x00000008L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR_MASK                                                        0x00000100L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK_MASK                                                  0x00001000L
+//DPCSRX_INDEX_MODE_ADDR
+#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR__SHIFT                                                   0x0
+#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR_MASK                                                     0x0003FFFFL
+//DPCSRX_INDEX_MODE_DATA
+#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA__SHIFT                                                   0x0
+#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA_MASK                                                     0xFFFFFFFFL
+//DPCSRX_DEBUG_CONFIG
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT                                                               0x0
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT                                                       0x1
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL__SHIFT                                                    0x6
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL__SHIFT                                                        0xb
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT                                                         0xe
+#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT                                                  0x10
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN_MASK                                                                 0x00000001L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK                                                         0x0000000EL
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL_MASK                                                      0x000000C0L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL_MASK                                                          0x00003800L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK                                                           0x00004000L
+#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK                                                    0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+//DPCSTX4_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT                                             0x0
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT                                                   0x1
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT                                             0x2
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0x3
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK                                               0x00000001L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK                                                     0x00000002L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK                                               0x00000004L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000008L
+//DPCSTX4_DPCSTX_TX_CNTL
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT                                                 0xc
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT                                             0xd
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT                                                      0xe
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT                                              0xf
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK                                                   0x00001000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK                                               0x00002000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK                                                        0x00004000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK                                                0x00008000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//DPCSTX4_DPCSTX_CBUS_CNTL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT                                               0x0
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT                                                 0x1f
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK                                                 0x000000FFL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK                                                   0x80000000L
+//DPCSTX4_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT                                          0x0
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT                                              0x1
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT                                        0x4
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT                                             0x8
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT                                             0x9
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT                                             0xa
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT                                             0xb
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT                                               0xc
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT                                         0x10
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT                                             0x14
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK                                            0x00000001L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK                                                0x00000002L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK                                          0x00000010L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK                                               0x00000100L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK                                               0x00000200L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK                                               0x00000400L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK                                               0x00000800L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK                                                 0x00001000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK                                           0x00010000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK                                               0x00100000L
+//DPCSTX4_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT                                           0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK                                             0x0003FFFFL
+//DPCSTX4_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT                                           0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK                                             0xFFFFFFFFL
+//DPCSTX4_DPCSTX_DEBUG_CONFIG
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT                                                       0x0
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT                                               0x1
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT                                            0x4
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT                                       0x8
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT                                                 0xe
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT                                          0x10
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK                                                         0x00000001L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK                                                 0x0000000EL
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK                                              0x00000070L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK                                         0x00000700L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK                                                   0x00004000L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK                                            0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+//RDPCSTX4_RDPCSTX_CNTL
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT                                                   0x0
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT                                                   0x4
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT                                                  0xc
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT                                                  0xd
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT                                                  0xe
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT                                                  0xf
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT                                              0x18
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT                                       0x19
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT                                                0x1a
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK                                                     0x00000001L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK                                                     0x00000010L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK                                                    0x00001000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK                                                    0x00002000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK                                                    0x00004000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK                                                    0x00008000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK                                                0x01000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK                                         0x02000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK                                                  0x04000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//RDPCSTX4_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT                                               0x0
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT                                          0x4
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT                                          0x5
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT                                          0x6
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT                                          0x7
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT                                        0x8
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT                                              0x9
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0xa
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT                                            0xc
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT                                                  0xd
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT                                            0xe
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT                                              0x10
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK                                                 0x00000001L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK                                            0x00000010L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK                                            0x00000020L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK                                            0x00000040L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK                                            0x00000080L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK                                          0x00000100L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK                                                0x00000200L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000400L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK                                              0x00001000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK                                                    0x00002000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK                                              0x00004000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK                                                0x00010000L
+//RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT                                    0x0
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT                                 0x1
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT                                   0x2
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT                                       0x4
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT                                       0x5
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT                                       0x6
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT                                       0x7
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT                                        0x8
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT                             0x9
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT                               0xa
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT                                         0xc
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT                                  0x10
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT                            0x11
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT                              0x12
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT                                   0x14
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK                                      0x00000001L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK                                   0x00000002L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK                                     0x00000004L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK                                         0x00000010L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK                                         0x00000020L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK                                         0x00000040L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK                                         0x00000080L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK                                          0x00000100L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK                               0x00000200L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK                                 0x00000400L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK                                           0x00001000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK                                    0x00010000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK                              0x00020000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK                                0x00040000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK                                     0x00100000L
+//RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT                                        0x0
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK                                          0x00000001L
+//RDPCSTX4_RDPCS_TX_CR_ADDR
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                    0x0
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                      0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_CR_DATA
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                    0x0
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                      0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT                                                 0x14
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT                                               0x18
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT                                           0x1c
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK                                                   0x00100000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK                                                 0x03000000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK                                             0x30000000L
+//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT                                           0x0
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT                                    0xc
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT                                  0x1a
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT                                  0x1b
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT                                   0x1c
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT                                   0x1d
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT                                         0x1e
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK                                             0x00000FFFL
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK                                      0x03FFF000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK                                    0x04000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK                                    0x08000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK                                     0x10000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK                                     0x20000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK                                           0x40000000L
+//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT                                    0x0
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT                                    0x2
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK                                      0x00000003L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK                                      0x00000004L
+//RDPCSTX4_RDPCSTX_SCRATCH
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT                                                      0x0
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK                                                        0xFFFFFFFFL
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT                      0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT              0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT                      0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK                        0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK                0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK                        0x0000FF00L
+//RDPCSTX4_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT                                                    0x0
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT                                        0x4
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT                                        0x7
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT                                          0x8
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT                                       0xf
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT                                          0x10
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT                                              0x18
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK                                                      0x00000001L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK                                          0x00000070L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK                                          0x00000080L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK                                            0x00001F00L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK                                         0x00008000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK                                            0x00FF0000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK                                                0xFF000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT                                                    0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT                                            0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT                                          0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT                                           0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT                                                  0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT                                          0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT                                                0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT                                            0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT                                                0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT                                                0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT                                              0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT                                               0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT                                            0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT                                        0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT                                               0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT                                             0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT                                                  0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK                                                      0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK                                              0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK                                            0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK                                             0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK                                                    0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK                                            0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK                                                  0x00003E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK                                              0x0001C000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK                                                  0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK                                                  0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK                                                0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK                                                 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK                                              0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK                                          0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK                                                 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK                                               0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK                                                    0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT                                               0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT                                               0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT                                           0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT                                               0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT                                           0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT                                              0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT                                               0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT                                           0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK                                                 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK                                                 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK                                             0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK                                                 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK                                             0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK                                                0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK                                                 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK                                             0x00000080L
+//RDPCSTX4_RDPCSTX_PHY_CNTL2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT                                                  0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT                                 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT                                 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT                                 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT                                 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT                                 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT                                 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT                                 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT                                 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK                                                    0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK                                   0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK                                   0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK                                   0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK                                   0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK                                   0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK                                   0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK                                   0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK                                   0x00000800L
+//RDPCSTX4_RDPCSTX_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT                                             0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT                                           0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT                                           0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT                                           0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT                                               0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT                                               0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT                                             0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT                                           0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT                                           0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT                                           0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT                                               0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT                                               0xd
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT                                             0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT                                           0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT                                           0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT                                           0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT                                               0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT                                               0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT                                             0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT                                           0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT                                           0x1a
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT                                           0x1b
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT                                               0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT                                               0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK                                               0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK                                             0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK                                             0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK                                             0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK                                                 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK                                                 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK                                               0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK                                             0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK                                             0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK                                             0x00000800L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK                                                 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK                                                 0x00002000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK                                               0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK                                             0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK                                             0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK                                             0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK                                                 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK                                                 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK                                               0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK                                             0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK                                             0x04000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK                                             0x08000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK                                                 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK                                                 0x20000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT                                         0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT                                            0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT                                    0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT                                        0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT                                         0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT                                            0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT                                    0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT                                        0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT                                         0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT                                            0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT                                    0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT                                        0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT                                         0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT                                            0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT                                    0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT                                        0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK                                           0x00000007L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK                                              0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK                                      0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK                                          0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK                                           0x00000700L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK                                              0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK                                      0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK                                          0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK                                           0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK                                              0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK                                      0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK                                          0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK                                           0x07000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK                                              0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK                                      0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK                                          0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT                                               0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT                                              0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT                                             0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT                                         0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT                                      0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT                                               0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT                                              0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT                                             0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT                                         0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT                                      0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT                                               0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT                                              0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT                                             0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT                                         0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT                                      0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT                                               0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT                                              0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT                                             0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT                                         0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT                                      0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK                                                 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK                                                0x0000000EL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK                                               0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK                                           0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK                                        0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK                                                 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK                                                0x00000E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK                                               0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK                                           0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK                                        0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK                                                 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK                                                0x000E0000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK                                               0x00300000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK                                           0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK                                        0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK                                                 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK                                                0x0E000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK                                               0x30000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK                                           0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK                                        0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT                                            0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT                                           0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT                                            0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT                                           0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT                                            0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT                                           0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT                                            0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT                                           0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                                0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                            0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                        0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT                                            0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT                                           0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK                                              0x00000003L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK                                             0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK                                              0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK                                             0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK                                              0x00000300L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK                                             0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK                                              0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK                                             0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                                  0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                              0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                          0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK                                              0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK                                             0x00100000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT                                       0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT                                      0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK                                         0x0000FFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK                                        0xFFFF0000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT                                        0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK                                          0x000FFFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT                                    0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT                                   0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK                                      0x001FFFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK                                     0x01000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT                                      0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK                                        0x0000FFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT                                     0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT                                     0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT                                    0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT                           0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK                                       0x0000FFF0L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK                                       0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK                                      0x00700000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK                             0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT                                    0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT                                   0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT                                     0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT                                          0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT                                         0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK                                      0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK                                     0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK                                       0x00000070L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK                                            0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK                                           0x00000100L
+//RDPCSTX4_RDPCSTX_PHY_CNTL13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT                                 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT                                     0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT                                       0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT                               0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK                                   0x0FF00000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK                                       0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK                                         0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK                                 0x40000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT                                      0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT                                       0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT                                        0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK                                        0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK                                         0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK                                          0x10000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT                                             0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT                                        0x14
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK                                               0x000C0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK                                          0x00300000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE1
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT                                          0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT                                         0x19
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK                                            0x01FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK                                           0xFE000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE2
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK                                             0x0003F000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE3
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT                                             0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT                                                0x18
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK                                               0x00FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK                                                  0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT                                        0x0
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT                                        0x8
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK                                          0x0000007FL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK                                          0x001FFF00L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT                         0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT                       0x1
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT                       0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT                       0x3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT                           0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT                           0x5
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT                         0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT                       0x9
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT                       0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT                       0xb
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT                           0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT                           0xd
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT                         0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT                       0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT                       0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT                       0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT                           0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT                           0x15
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT                         0x18
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT                       0x19
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT                       0x1a
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT                       0x1b
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT                           0x1c
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT                           0x1d
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK                           0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK                         0x00000002L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK                         0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK                         0x00000008L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK                             0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK                             0x00000020L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK                           0x00000100L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK                         0x00000200L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK                         0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK                         0x00000800L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK                             0x00001000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK                             0x00002000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK                           0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK                         0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK                         0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK                         0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK                             0x00100000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK                             0x00200000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK                           0x01000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK                         0x02000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK                         0x04000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK                         0x08000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK                             0x10000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK                             0x20000000L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT                        0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT                       0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT                        0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT                       0x6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT                        0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT                       0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT                        0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT                       0xe
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT                            0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT                        0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT                    0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT                        0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT                       0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK                          0x00000003L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK                         0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK                          0x00000030L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK                         0x00000040L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK                          0x00000300L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK                         0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK                          0x00003000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK                         0x00004000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK                              0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK                          0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK                      0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK                          0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK                         0x00100000L
+//RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT                                  0x0
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT                                0x4
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT                                  0x8
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK                                    0x00000001L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK                                  0x00000010L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK                                    0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+//DPCSSYS_CR4_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                  0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                    0x0000FFFFL
+//DPCSSYS_CR4_DPCSSYS_CR_DATA
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                  0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                    0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec
+//DPCSTX5_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT                                             0x0
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT                                                   0x1
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT                                             0x2
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0x3
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK                                               0x00000001L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK                                                     0x00000002L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK                                               0x00000004L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000008L
+//DPCSTX5_DPCSTX_TX_CNTL
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT                                                 0xc
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT                                             0xd
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT                                                      0xe
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT                                              0xf
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK                                                   0x00001000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK                                               0x00002000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK                                                        0x00004000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK                                                0x00008000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//DPCSTX5_DPCSTX_CBUS_CNTL
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT                                               0x0
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT                                                 0x1f
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK                                                 0x000000FFL
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK                                                   0x80000000L
+//DPCSTX5_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT                                          0x0
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT                                              0x1
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT                                        0x4
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT                                             0x8
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT                                             0x9
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT                                             0xa
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT                                             0xb
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT                                               0xc
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT                                         0x10
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT                                             0x14
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK                                            0x00000001L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK                                                0x00000002L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK                                          0x00000010L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK                                               0x00000100L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK                                               0x00000200L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK                                               0x00000400L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK                                               0x00000800L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK                                                 0x00001000L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK                                           0x00010000L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK                                               0x00100000L
+//DPCSTX5_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT                                           0x0
+#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK                                             0x0003FFFFL
+//DPCSTX5_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT                                           0x0
+#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK                                             0xFFFFFFFFL
+//DPCSTX5_DPCSTX_DEBUG_CONFIG
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT                                                       0x0
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT                                               0x1
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT                                            0x4
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT                                       0x8
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT                                                 0xe
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT                                          0x10
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK                                                         0x00000001L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK                                                 0x0000000EL
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK                                              0x00000070L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK                                         0x00000700L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK                                                   0x00004000L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK                                            0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
+//RDPCSTX5_RDPCSTX_CNTL
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT                                                   0x0
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT                                                   0x4
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT                                                  0xc
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT                                                  0xd
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT                                                  0xe
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT                                                  0xf
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT                                                        0x10
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT                                                     0x11
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT                                            0x14
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT                                              0x18
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT                                       0x19
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT                                                0x1a
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT                                                     0x1f
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK                                                     0x00000001L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK                                                     0x00000010L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK                                                    0x00001000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK                                                    0x00002000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK                                                    0x00004000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK                                                    0x00008000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK                                                          0x00010000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK                                                       0x00020000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK                                              0x00F00000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK                                                0x01000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK                                         0x02000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK                                                  0x04000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK                                                       0x80000000L
+//RDPCSTX5_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT                                               0x0
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT                                          0x4
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT                                          0x5
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT                                          0x6
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT                                          0x7
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT                                        0x8
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT                                              0x9
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT                                        0xa
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT                                            0xc
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT                                                  0xd
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT                                            0xe
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT                                              0x10
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK                                                 0x00000001L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK                                            0x00000010L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK                                            0x00000020L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK                                            0x00000040L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK                                            0x00000080L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK                                          0x00000100L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK                                                0x00000200L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK                                          0x00000400L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK                                              0x00001000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK                                                    0x00002000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK                                              0x00004000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK                                                0x00010000L
+//RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT                                    0x0
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT                                 0x1
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT                                   0x2
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT                                       0x4
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT                                       0x5
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT                                       0x6
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT                                       0x7
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT                                        0x8
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT                             0x9
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT                               0xa
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT                                         0xc
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT                                  0x10
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT                            0x11
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT                              0x12
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT                                   0x14
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK                                      0x00000001L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK                                   0x00000002L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK                                     0x00000004L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK                                         0x00000010L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK                                         0x00000020L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK                                         0x00000040L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK                                         0x00000080L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK                                          0x00000100L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK                               0x00000200L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK                                 0x00000400L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK                                           0x00001000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK                                    0x00010000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK                              0x00020000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK                                0x00040000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK                                     0x00100000L
+//RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT                                        0x0
+#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK                                          0x00000001L
+//RDPCSTX5_RDPCS_TX_CR_ADDR
+#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                    0x0
+#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                      0x0000FFFFL
+//RDPCSTX5_RDPCS_TX_CR_DATA
+#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                    0x0
+#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                      0x0000FFFFL
+//RDPCSTX5_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT                                                 0x14
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT                                               0x18
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT                                           0x1c
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK                                                   0x00100000L
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK                                                 0x03000000L
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK                                             0x30000000L
+//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT                                           0x0
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT                                    0xc
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT                                  0x1a
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT                                  0x1b
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT                                   0x1c
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT                                   0x1d
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT                                         0x1e
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK                                             0x00000FFFL
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK                                      0x03FFF000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK                                    0x04000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK                                    0x08000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK                                     0x10000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK                                     0x20000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK                                           0x40000000L
+//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT                                    0x0
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT                                    0x2
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK                                      0x00000003L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK                                      0x00000004L
+//RDPCSTX5_RDPCSTX_SCRATCH
+#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT                                                      0x0
+#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK                                                        0xFFFFFFFFL
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT                      0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT              0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT                      0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK                        0x00000001L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK                0x00000010L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK                        0x0000FF00L
+//RDPCSTX5_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT                                                    0x0
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT                                        0x4
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT                                        0x7
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT                                          0x8
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT                                       0xf
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT                                          0x10
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT                                              0x18
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK                                                      0x00000001L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK                                          0x00000070L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK                                          0x00000080L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK                                            0x00001F00L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK                                         0x00008000L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK                                            0x00FF0000L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK                                                0xFF000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT                                                    0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT                                            0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT                                          0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT                                           0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT                                                  0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT                                          0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT                                                0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT                                            0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT                                                0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT                                                0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT                                              0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT                                               0x15
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT                                            0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT                                        0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT                                               0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT                                             0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT                                                  0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK                                                      0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK                                              0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK                                            0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK                                             0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK                                                    0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK                                            0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK                                                  0x00003E00L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK                                              0x0001C000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK                                                  0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK                                                  0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK                                                0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK                                                 0x00200000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK                                              0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK                                          0x02000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK                                                 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK                                               0x20000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK                                                    0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT                                               0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT                                               0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT                                           0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT                                               0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT                                           0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT                                              0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT                                               0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT                                           0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK                                                 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK                                                 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK                                             0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK                                                 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK                                             0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK                                                0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK                                                 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK                                             0x00000080L
+//RDPCSTX5_RDPCSTX_PHY_CNTL2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT                                                  0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT                                 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT                                 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT                                 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT                                 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT                                 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT                                 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT                                 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT                                 0xb
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK                                                    0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK                                   0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK                                   0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK                                   0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK                                   0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK                                   0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK                                   0x00000200L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK                                   0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK                                   0x00000800L
+//RDPCSTX5_RDPCSTX_PHY_CNTL3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT                                             0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT                                           0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT                                           0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT                                           0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT                                               0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT                                               0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT                                             0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT                                           0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT                                           0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT                                           0xb
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT                                               0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT                                               0xd
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT                                             0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT                                           0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT                                           0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT                                           0x13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT                                               0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT                                               0x15
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT                                             0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT                                           0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT                                           0x1a
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT                                           0x1b
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT                                               0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT                                               0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK                                               0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK                                             0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK                                             0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK                                             0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK                                                 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK                                                 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK                                               0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK                                             0x00000200L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK                                             0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK                                             0x00000800L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK                                                 0x00001000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK                                                 0x00002000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK                                               0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK                                             0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK                                             0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK                                             0x00080000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK                                                 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK                                                 0x00200000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK                                               0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK                                             0x02000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK                                             0x04000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK                                             0x08000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK                                                 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK                                                 0x20000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT                                         0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT                                            0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT                                    0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT                                        0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT                                         0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT                                            0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT                                    0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT                                        0xf
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT                                         0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT                                            0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT                                    0x16
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT                                        0x17
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT                                         0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT                                            0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT                                    0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT                                        0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK                                           0x00000007L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK                                              0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK                                      0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK                                          0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK                                           0x00000700L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK                                              0x00001000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK                                      0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK                                          0x00008000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK                                           0x00070000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK                                              0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK                                      0x00400000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK                                          0x00800000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK                                           0x07000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK                                              0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK                                      0x40000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK                                          0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT                                               0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT                                              0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT                                             0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT                                         0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT                                      0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT                                               0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT                                              0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT                                             0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT                                         0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT                                      0xf
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT                                               0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT                                              0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT                                             0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT                                         0x16
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT                                      0x17
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT                                               0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT                                              0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT                                             0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT                                         0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT                                      0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK                                                 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK                                                0x0000000EL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK                                               0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK                                           0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK                                        0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK                                                 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK                                                0x00000E00L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK                                               0x00003000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK                                           0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK                                        0x00008000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK                                                 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK                                                0x000E0000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK                                               0x00300000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK                                           0x00400000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK                                        0x00800000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK                                                 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK                                                0x0E000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK                                               0x30000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK                                           0x40000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK                                        0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT                                            0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT                                           0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT                                            0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT                                           0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT                                            0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT                                           0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT                                            0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT                                           0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                                0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                            0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                        0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT                                            0x13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT                                           0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK                                              0x00000003L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK                                             0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK                                              0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK                                             0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK                                              0x00000300L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK                                             0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK                                              0x00003000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK                                             0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                                  0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                              0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                          0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK                                              0x00080000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK                                             0x00100000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT                                       0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT                                      0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK                                         0x0000FFFFL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK                                        0xFFFF0000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT                                        0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK                                          0x000FFFFFL
+//RDPCSTX5_RDPCSTX_PHY_CNTL9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT                                    0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT                                   0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK                                      0x001FFFFFL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK                                     0x01000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT                                      0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK                                        0x0000FFFFL
+//RDPCSTX5_RDPCSTX_PHY_CNTL11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT                                     0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT                                     0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT                                    0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT                           0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK                                       0x0000FFF0L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK                                       0x00070000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK                                      0x00700000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK                             0x03000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT                                    0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT                                   0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT                                     0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT                                          0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT                                         0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK                                      0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK                                     0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK                                       0x00000070L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK                                            0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK                                           0x00000100L
+//RDPCSTX5_RDPCSTX_PHY_CNTL13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT                                 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT                                     0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT                                       0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT                               0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK                                   0x0FF00000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK                                       0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK                                         0x20000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK                                 0x40000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT                                      0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT                                       0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT                                        0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK                                        0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK                                         0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK                                          0x10000000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT                                             0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT                                        0x14
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK                                               0x000C0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK                                          0x00300000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE1
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT                                          0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT                                         0x19
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK                                            0x01FC0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK                                           0xFE000000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE2
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK                                             0x0003F000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE3
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT                                           0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT                                            0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT                                           0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT                                             0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT                                                0x18
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK                                             0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK                                              0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK                                             0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK                                               0x00FC0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK                                                  0x03000000L
+//RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT                                        0x0
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT                                        0x8
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK                                          0x0000007FL
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK                                          0x001FFF00L
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT                         0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT                       0x1
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT                       0x2
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT                       0x3
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT                           0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT                           0x5
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT                         0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT                       0x9
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT                       0xa
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT                       0xb
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT                           0xc
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT                           0xd
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT                         0x10
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT                       0x11
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT                       0x12
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT                       0x13
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT                           0x14
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT                           0x15
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT                         0x18
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT                       0x19
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT                       0x1a
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT                       0x1b
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT                           0x1c
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT                           0x1d
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK                           0x00000001L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK                         0x00000002L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK                         0x00000004L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK                         0x00000008L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK                             0x00000010L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK                             0x00000020L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK                           0x00000100L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK                         0x00000200L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK                         0x00000400L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK                         0x00000800L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK                             0x00001000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK                             0x00002000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK                           0x00010000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK                         0x00020000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK                         0x00040000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK                         0x00080000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK                             0x00100000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK                             0x00200000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK                           0x01000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK                         0x02000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK                         0x04000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK                         0x08000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK                             0x10000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK                             0x20000000L
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT                        0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT                       0x2
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT                        0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT                       0x6
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT                        0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT                       0xa
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT                        0xc
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT                       0xe
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT                            0x10
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT                        0x11
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT                    0x12
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT                        0x13
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT                       0x14
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK                          0x00000003L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK                         0x00000004L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK                          0x00000030L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK                         0x00000040L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK                          0x00000300L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK                         0x00000400L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK                          0x00003000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK                         0x00004000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK                              0x00010000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK                          0x00020000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK                      0x00040000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK                          0x00080000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK                         0x00100000L
+//RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT                                  0x0
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT                                0x4
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT                                  0x8
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK                                    0x00000001L
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK                                  0x00000010L
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK                                    0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr5_dispdec
+//DPCSSYS_CR5_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT                                                  0x0
+#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK                                                    0x0000FFFFL
+//DPCSSYS_CR5_DPCSSYS_CR_DATA
+#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT                                                  0x0
+#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK                                                    0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
similarity index 100%
rename from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h
rename to drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
similarity index 100%
rename from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h
rename to drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index 2bfaaa8..d984c91 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -2224,6 +2224,14 @@
 #define mmCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX                                                      0
 #define mmCOMPUTE_STATIC_THREAD_MGMT_SE3                                                               0x0e1a
 #define mmCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX                                                      0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4                                                               0x0e25
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX                                                      0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5                                                               0x0e26
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX                                                      0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6                                                               0x0e27
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX                                                      0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7                                                               0x0e28
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX                                                      0
 #define mmCOMPUTE_RESTART_X                                                                            0x0e1b
 #define mmCOMPUTE_RESTART_X_BASE_IDX                                                                   0
 #define mmCOMPUTE_RESTART_Y                                                                            0x0e1c
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index d4c613a..ea316d8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -2060,7 +2060,8 @@
 
 // addressBlock: gc_sqdec
 //SQ_CONFIG
-#define SQ_CONFIG__UNUSED__SHIFT                                                                              0x0
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT                                                             0x0
+#define SQ_CONFIG__UNUSED__SHIFT                                                                              0x1
 #define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT                                                                   0x7
 #define SQ_CONFIG__DEBUG_EN__SHIFT                                                                            0x8
 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT                                                                  0x9
@@ -2079,7 +2080,8 @@
 #define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT                                                  0x1d
 #define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT                                                            0x1e
 #define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT                                                            0x1f
-#define SQ_CONFIG__UNUSED_MASK                                                                                0x0000007FL
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK                                                               0x00000001L
+#define SQ_CONFIG__UNUSED_MASK                                                                                0x0000007EL
 #define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK                                                                     0x00000080L
 #define SQ_CONFIG__DEBUG_EN_MASK                                                                              0x00000100L
 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK                                                                    0x00000200L
@@ -8739,10 +8741,16 @@
 #define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT                                                                     0x4
 #define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT                                                                   0x6
 #define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT                                                                0x9
+#define TCP_ADDR_CONFIG__ENABLE64KHASH__SHIFT                                                                 0xb
+#define TCP_ADDR_CONFIG__ENABLE2MHASH__SHIFT                                                                  0xc
+#define TCP_ADDR_CONFIG__ENABLE1GHASH__SHIFT                                                                  0xd
 #define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK                                                                   0x0000000FL
 #define TCP_ADDR_CONFIG__NUM_BANKS_MASK                                                                       0x00000030L
 #define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK                                                                     0x000001C0L
 #define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK                                                                  0x00000200L
+#define TCP_ADDR_CONFIG__ENABLE64KHASH_MASK                                                                   0x00000800L
+#define TCP_ADDR_CONFIG__ENABLE2MHASH_MASK                                                                    0x00001000L
+#define TCP_ADDR_CONFIG__ENABLE1GHASH_MASK                                                                    0x00002000L
 //TCP_CREDIT
 #define TCP_CREDIT__LFIFO_CREDIT__SHIFT                                                                       0x0
 #define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT                                                                    0x10
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
new file mode 100644
index 0000000..f41556a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_4_1_OFFSET_HEADER
+#define _gc_9_4_1_OFFSET_HEADER
+
+// addressBlock: gc_grbmdec
+// base address: 0x8000
+#define mmGRBM_CNTL                                                                                    0x0000
+#define mmGRBM_CNTL_BASE_IDX                                                                           0
+#define mmGRBM_SKEW_CNTL                                                                               0x0001
+#define mmGRBM_SKEW_CNTL_BASE_IDX                                                                      0
+#define mmGRBM_STATUS2                                                                                 0x0002
+#define mmGRBM_STATUS2_BASE_IDX                                                                        0
+#define mmGRBM_PWR_CNTL                                                                                0x0003
+#define mmGRBM_PWR_CNTL_BASE_IDX                                                                       0
+#define mmGRBM_STATUS                                                                                  0x0004
+#define mmGRBM_STATUS_BASE_IDX                                                                         0
+#define mmGRBM_STATUS_SE0                                                                              0x0005
+#define mmGRBM_STATUS_SE0_BASE_IDX                                                                     0
+#define mmGRBM_STATUS_SE1                                                                              0x0006
+#define mmGRBM_STATUS_SE1_BASE_IDX                                                                     0
+#define mmGRBM_SOFT_RESET                                                                              0x0008
+#define mmGRBM_SOFT_RESET_BASE_IDX                                                                     0
+#define mmGRBM_GFX_CLKEN_CNTL                                                                          0x000c
+#define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX                                                                 0
+#define mmGRBM_WAIT_IDLE_CLOCKS                                                                        0x000d
+#define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX                                                               0
+#define mmGRBM_STATUS_SE2                                                                              0x000e
+#define mmGRBM_STATUS_SE2_BASE_IDX                                                                     0
+#define mmGRBM_STATUS_SE3                                                                              0x000f
+#define mmGRBM_STATUS_SE3_BASE_IDX                                                                     0
+#define mmGRBM_READ_ERROR                                                                              0x0016
+#define mmGRBM_READ_ERROR_BASE_IDX                                                                     0
+#define mmGRBM_READ_ERROR2                                                                             0x0017
+#define mmGRBM_READ_ERROR2_BASE_IDX                                                                    0
+#define mmGRBM_INT_CNTL                                                                                0x0018
+#define mmGRBM_INT_CNTL_BASE_IDX                                                                       0
+#define mmGRBM_TRAP_OP                                                                                 0x0019
+#define mmGRBM_TRAP_OP_BASE_IDX                                                                        0
+#define mmGRBM_TRAP_ADDR                                                                               0x001a
+#define mmGRBM_TRAP_ADDR_BASE_IDX                                                                      0
+#define mmGRBM_TRAP_ADDR_MSK                                                                           0x001b
+#define mmGRBM_TRAP_ADDR_MSK_BASE_IDX                                                                  0
+#define mmGRBM_TRAP_WD                                                                                 0x001c
+#define mmGRBM_TRAP_WD_BASE_IDX                                                                        0
+#define mmGRBM_TRAP_WD_MSK                                                                             0x001d
+#define mmGRBM_TRAP_WD_MSK_BASE_IDX                                                                    0
+#define mmGRBM_DSM_BYPASS                                                                              0x001e
+#define mmGRBM_DSM_BYPASS_BASE_IDX                                                                     0
+#define mmGRBM_WRITE_ERROR                                                                             0x001f
+#define mmGRBM_WRITE_ERROR_BASE_IDX                                                                    0
+#define mmGRBM_IOV_ERROR                                                                               0x0020
+#define mmGRBM_IOV_ERROR_BASE_IDX                                                                      0
+#define mmGRBM_CHIP_REVISION                                                                           0x0021
+#define mmGRBM_CHIP_REVISION_BASE_IDX                                                                  0
+#define mmGRBM_GFX_CNTL                                                                                0x0022
+#define mmGRBM_GFX_CNTL_BASE_IDX                                                                       0
+#define mmGRBM_RSMU_CFG                                                                                0x0023
+#define mmGRBM_RSMU_CFG_BASE_IDX                                                                       0
+#define mmGRBM_IH_CREDIT                                                                               0x0024
+#define mmGRBM_IH_CREDIT_BASE_IDX                                                                      0
+#define mmGRBM_PWR_CNTL2                                                                               0x0025
+#define mmGRBM_PWR_CNTL2_BASE_IDX                                                                      0
+#define mmGRBM_UTCL2_INVAL_RANGE_START                                                                 0x0026
+#define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX                                                        0
+#define mmGRBM_UTCL2_INVAL_RANGE_END                                                                   0x0027
+#define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX                                                          0
+#define mmGRBM_RSMU_READ_ERROR                                                                         0x0028
+#define mmGRBM_RSMU_READ_ERROR_BASE_IDX                                                                0
+#define mmGRBM_CHICKEN_BITS                                                                            0x0029
+#define mmGRBM_CHICKEN_BITS_BASE_IDX                                                                   0
+#define mmGRBM_FENCE_RANGE0                                                                            0x002a
+#define mmGRBM_FENCE_RANGE0_BASE_IDX                                                                   0
+#define mmGRBM_FENCE_RANGE1                                                                            0x002b
+#define mmGRBM_FENCE_RANGE1_BASE_IDX                                                                   0
+#define mmGRBM_NOWHERE                                                                                 0x003f
+#define mmGRBM_NOWHERE_BASE_IDX                                                                        0
+#define mmGRBM_SCRATCH_REG0                                                                            0x0040
+#define mmGRBM_SCRATCH_REG0_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG1                                                                            0x0041
+#define mmGRBM_SCRATCH_REG1_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG2                                                                            0x0042
+#define mmGRBM_SCRATCH_REG2_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG3                                                                            0x0043
+#define mmGRBM_SCRATCH_REG3_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG4                                                                            0x0044
+#define mmGRBM_SCRATCH_REG4_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG5                                                                            0x0045
+#define mmGRBM_SCRATCH_REG5_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG6                                                                            0x0046
+#define mmGRBM_SCRATCH_REG6_BASE_IDX                                                                   0
+#define mmGRBM_SCRATCH_REG7                                                                            0x0047
+#define mmGRBM_SCRATCH_REG7_BASE_IDX                                                                   0
+
+// addressBlock: gc_cppdec2
+// base address: 0xc600
+#define mmCPF_EDC_TAG_CNT                                                                              0x1189
+#define mmCPF_EDC_TAG_CNT_BASE_IDX                                                                     0
+#define mmCPF_EDC_ROQ_CNT                                                                              0x118a
+#define mmCPF_EDC_ROQ_CNT_BASE_IDX                                                                     0
+#define mmCPG_EDC_TAG_CNT                                                                              0x118b
+#define mmCPG_EDC_TAG_CNT_BASE_IDX                                                                     0
+#define mmCPG_EDC_DMA_CNT                                                                              0x118d
+#define mmCPG_EDC_DMA_CNT_BASE_IDX                                                                     0
+#define mmCPC_EDC_SCRATCH_CNT                                                                          0x118e
+#define mmCPC_EDC_SCRATCH_CNT_BASE_IDX                                                                 0
+#define mmCPC_EDC_UCODE_CNT                                                                            0x118f
+#define mmCPC_EDC_UCODE_CNT_BASE_IDX                                                                   0
+#define mmDC_EDC_STATE_CNT                                                                             0x1191
+#define mmDC_EDC_STATE_CNT_BASE_IDX                                                                    0
+#define mmDC_EDC_CSINVOC_CNT                                                                           0x1192
+#define mmDC_EDC_CSINVOC_CNT_BASE_IDX                                                                  0
+#define mmDC_EDC_RESTORE_CNT                                                                           0x1193
+#define mmDC_EDC_RESTORE_CNT_BASE_IDX                                                                  0
+
+// addressBlock: gc_gdsdec
+// base address: 0x9700
+#define mmGDS_EDC_CNT                                                                                  0x05c5
+#define mmGDS_EDC_CNT_BASE_IDX                                                                         0
+#define mmGDS_EDC_GRBM_CNT                                                                             0x05c6
+#define mmGDS_EDC_GRBM_CNT_BASE_IDX                                                                    0
+#define mmGDS_EDC_OA_DED                                                                               0x05c7
+#define mmGDS_EDC_OA_DED_BASE_IDX                                                                      0
+#define mmGDS_EDC_OA_PHY_CNT                                                                           0x05cb
+#define mmGDS_EDC_OA_PHY_CNT_BASE_IDX                                                                  0
+#define mmGDS_EDC_OA_PIPE_CNT                                                                          0x05cc
+#define mmGDS_EDC_OA_PIPE_CNT_BASE_IDX                                                                 0
+
+// addressBlock: gc_shsdec
+// base address: 0x9000
+#define mmSPI_EDC_CNT                                                                                  0x0445
+#define mmSPI_EDC_CNT_BASE_IDX                                                                         0
+
+// addressBlock: gc_sqdec
+// base address: 0x8c00
+#define mmSQC_EDC_CNT2                                                                                 0x032c
+#define mmSQC_EDC_CNT2_BASE_IDX                                                                        0
+#define mmSQC_EDC_CNT3                                                                                 0x032d
+#define mmSQC_EDC_CNT3_BASE_IDX                                                                        0
+#define mmSQC_EDC_PARITY_CNT3                                                                          0x032e
+#define mmSQC_EDC_PARITY_CNT3_BASE_IDX                                                                 0
+#define mmSQC_EDC_CNT                                                                                  0x03a2
+#define mmSQC_EDC_CNT_BASE_IDX                                                                         0
+#define mmSQ_EDC_SEC_CNT                                                                               0x03a3
+#define mmSQ_EDC_SEC_CNT_BASE_IDX                                                                      0
+#define mmSQ_EDC_DED_CNT                                                                               0x03a4
+#define mmSQ_EDC_DED_CNT_BASE_IDX                                                                      0
+#define mmSQ_EDC_INFO                                                                                  0x03a5
+#define mmSQ_EDC_INFO_BASE_IDX                                                                         0
+#define mmSQ_EDC_CNT                                                                                   0x03a6
+#define mmSQ_EDC_CNT_BASE_IDX                                                                          0
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTA_EDC_CNT                                                                                   0x0586
+#define mmTA_EDC_CNT_BASE_IDX                                                                          0
+
+// addressBlock: gc_tcdec
+// base address: 0xac00
+#define mmTCP_EDC_CNT                                                                                  0x0b17
+#define mmTCP_EDC_CNT_BASE_IDX                                                                         0
+#define mmTCP_EDC_CNT_NEW                                                                              0x0b18
+#define mmTCP_EDC_CNT_NEW_BASE_IDX                                                                     0
+#define mmTCP_ATC_EDC_GATCL1_CNT                                                                       0x12b1
+#define mmTCP_ATC_EDC_GATCL1_CNT_BASE_IDX                                                              0
+#define mmTCI_EDC_CNT                                                                                  0x0b60
+#define mmTCI_EDC_CNT_BASE_IDX                                                                         0
+#define mmTCC_EDC_CNT                                                                                  0x0b82
+#define mmTCC_EDC_CNT_BASE_IDX                                                                         0
+#define mmTCC_EDC_CNT2                                                                                 0x0b83
+#define mmTCC_EDC_CNT2_BASE_IDX                                                                        0
+#define mmTCA_EDC_CNT                                                                                  0x0bc5
+#define mmTCA_EDC_CNT_BASE_IDX                                                                         0
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTD_EDC_CNT                                                                                   0x052e
+#define mmTD_EDC_CNT_BASE_IDX                                                                          0
+#define mmTA_EDC_CNT                                                                                   0x0586
+#define mmTA_EDC_CNT_BASE_IDX                                                                          0
+
+// addressBlock: gc_ea_gceadec2
+// base address: 0x9c00
+#define mmGCEA_EDC_CNT                                                                                 0x0706
+#define mmGCEA_EDC_CNT_BASE_IDX                                                                        0
+#define mmGCEA_EDC_CNT2                                                                                0x0707
+#define mmGCEA_EDC_CNT2_BASE_IDX                                                                       0
+#define mmGCEA_EDC_CNT3                                                                                0x071b
+#define mmGCEA_EDC_CNT3_BASE_IDX                                                                       0
+
+// addressBlock: gc_gfxudec
+// base address: 0x30000
+#define mmSCRATCH_REG0                                                                                 0x2040
+#define mmSCRATCH_REG0_BASE_IDX                                                                        1
+#define mmSCRATCH_REG1                                                                                 0x2041
+#define mmSCRATCH_REG1_BASE_IDX                                                                        1
+#define mmSCRATCH_REG2                                                                                 0x2042
+#define mmSCRATCH_REG2_BASE_IDX                                                                        1
+#define mmSCRATCH_REG3                                                                                 0x2043
+#define mmSCRATCH_REG3_BASE_IDX                                                                        1
+#define mmSCRATCH_REG4                                                                                 0x2044
+#define mmSCRATCH_REG4_BASE_IDX                                                                        1
+#define mmSCRATCH_REG5                                                                                 0x2045
+#define mmSCRATCH_REG5_BASE_IDX                                                                        1
+#define mmSCRATCH_REG6                                                                                 0x2046
+#define mmSCRATCH_REG6_BASE_IDX                                                                        1
+#define mmSCRATCH_REG7                                                                                 0x2047
+#define mmSCRATCH_REG7_BASE_IDX                                                                        1
+#define mmGRBM_GFX_INDEX                                                                               0x2200
+#define mmGRBM_GFX_INDEX_BASE_IDX                                                                      1
+
+// addressBlock: gc_utcl2_atcl2dec
+// base address: 0xa000
+#define mmATC_L2_CACHE_4K_DSM_INDEX                                                                    0x080e
+#define mmATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX                                                           0
+#define mmATC_L2_CACHE_2M_DSM_INDEX                                                                    0x080f
+#define mmATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX                                                           0
+#define mmATC_L2_CACHE_4K_DSM_CNTL                                                                     0x0810
+#define mmATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX                                                            0
+#define mmATC_L2_CACHE_2M_DSM_CNTL                                                                     0x0811
+#define mmATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX                                                            0
+
+// addressBlock: gc_utcl2_vml2pfdec
+// base address: 0xa100
+#define mmVML2_MEM_ECC_INDEX                                                                           0x0860
+#define mmVML2_MEM_ECC_INDEX_BASE_IDX                                                                  0
+#define mmVML2_WALKER_MEM_ECC_INDEX                                                                    0x0861
+#define mmVML2_WALKER_MEM_ECC_INDEX_BASE_IDX                                                           0
+#define mmUTCL2_MEM_ECC_INDEX                                                                          0x0862
+#define mmUTCL2_MEM_ECC_INDEX_BASE_IDX                                                                 0
+
+#define mmVML2_MEM_ECC_CNTL                                                                            0x0863
+#define mmVML2_MEM_ECC_CNTL_BASE_IDX                                                                   0
+#define mmVML2_WALKER_MEM_ECC_CNTL                                                                     0x0864
+#define mmVML2_WALKER_MEM_ECC_CNTL_BASE_IDX                                                            0
+#define mmUTCL2_MEM_ECC_CNTL                                                                           0x0865
+#define mmUTCL2_MEM_ECC_CNTL_BASE_IDX                                                                  0
+
+// addressBlock: gc_rlcpdec
+// base address: 0x3b000
+#define mmRLC_EDC_CNT                                                                                  0x4d40
+#define mmRLC_EDC_CNT_BASE_IDX                                                                         1
+#define mmRLC_EDC_CNT2                                                                                 0x4d41
+#define mmRLC_EDC_CNT2_BASE_IDX                                                                        1
+
+#endif
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
new file mode 100644
index 0000000..f26246a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
@@ -0,0 +1,748 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_4_1_SH_MASK_HEADER
+#define _gc_9_4_1_SH_MASK_HEADER
+
+// addressBlock: gc_cppdec2
+//CPF_EDC_TAG_CNT
+#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT                                                                     0x0
+#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT                                                                     0x2
+#define CPF_EDC_TAG_CNT__DED_COUNT_MASK                                                                       0x00000003L
+#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK                                                                       0x0000000CL
+//CPF_EDC_ROQ_CNT
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1__SHIFT                                                                 0x0
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1__SHIFT                                                                 0x2
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2__SHIFT                                                                 0x4
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2__SHIFT                                                                 0x6
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1_MASK                                                                   0x00000003L
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1_MASK                                                                   0x0000000CL
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2_MASK                                                                   0x00000030L
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2_MASK                                                                   0x000000C0L
+//CPG_EDC_TAG_CNT
+#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT                                                                     0x0
+#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT                                                                     0x2
+#define CPG_EDC_TAG_CNT__DED_COUNT_MASK                                                                       0x00000003L
+#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK                                                                       0x0000000CL
+//CPG_EDC_DMA_CNT
+#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT__SHIFT                                                                 0x0
+#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT__SHIFT                                                                 0x2
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT                                                                 0x4
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT                                                                 0x6
+#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT_MASK                                                                   0x00000003L
+#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT_MASK                                                                   0x0000000CL
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK                                                                   0x00000030L
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK                                                                   0x000000C0L
+//CPC_EDC_SCRATCH_CNT
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT                                                                 0x0
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT                                                                 0x2
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK                                                                   0x00000003L
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK                                                                   0x0000000CL
+//CPC_EDC_UCODE_CNT
+#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT                                                                   0x0
+#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT                                                                   0x2
+#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK                                                                     0x00000003L
+#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK                                                                     0x0000000CL
+//DC_EDC_STATE_CNT
+#define DC_EDC_STATE_CNT__DED_COUNT_ME1__SHIFT                                                                0x0
+#define DC_EDC_STATE_CNT__SEC_COUNT_ME1__SHIFT                                                                0x2
+#define DC_EDC_STATE_CNT__DED_COUNT_ME1_MASK                                                                  0x00000003L
+#define DC_EDC_STATE_CNT__SEC_COUNT_ME1_MASK                                                                  0x0000000CL
+//DC_EDC_CSINVOC_CNT
+#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1__SHIFT                                                              0x0
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1__SHIFT                                                              0x2
+#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1__SHIFT                                                             0x4
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1__SHIFT                                                             0x6
+#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1_MASK                                                                0x00000003L
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1_MASK                                                                0x0000000CL
+#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1_MASK                                                               0x00000030L
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1_MASK                                                               0x000000C0L
+//DC_EDC_RESTORE_CNT
+#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1__SHIFT                                                              0x0
+#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1__SHIFT                                                              0x2
+#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1__SHIFT                                                             0x4
+#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1__SHIFT                                                             0x6
+#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1_MASK                                                                0x00000003L
+#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1_MASK                                                                0x0000000CL
+#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1_MASK                                                               0x00000030L
+#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1_MASK                                                               0x000000C0L
+
+// addressBlock: gc_gdsdec
+//GDS_EDC_CNT
+#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT                                                                       0x0
+#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT                                                                       0x4
+#define GDS_EDC_CNT__UNUSED__SHIFT                                                                            0x6
+#define GDS_EDC_CNT__GDS_MEM_DED_MASK                                                                         0x00000003L
+#define GDS_EDC_CNT__GDS_MEM_SEC_MASK                                                                         0x00000030L
+#define GDS_EDC_CNT__UNUSED_MASK                                                                              0xFFFFFFC0L
+//GDS_EDC_GRBM_CNT
+#define GDS_EDC_GRBM_CNT__DED__SHIFT                                                                          0x0
+#define GDS_EDC_GRBM_CNT__SEC__SHIFT                                                                          0x2
+#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT                                                                       0x4
+#define GDS_EDC_GRBM_CNT__DED_MASK                                                                            0x00000003L
+#define GDS_EDC_GRBM_CNT__SEC_MASK                                                                            0x0000000CL
+#define GDS_EDC_GRBM_CNT__UNUSED_MASK                                                                         0xFFFFFFF0L
+//GDS_EDC_OA_DED
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT                                                            0x0
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT                                                            0x1
+#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT                                                                     0x2
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT                                                             0x3
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT                                                                  0x4
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT                                                                  0x5
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT                                                                  0x6
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT                                                                  0x7
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT                                                                  0x8
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT                                                                  0x9
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT                                                                  0xa
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT                                                                  0xb
+#define GDS_EDC_OA_DED__UNUSED1__SHIFT                                                                        0xc
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK                                                              0x00000001L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK                                                              0x00000002L
+#define GDS_EDC_OA_DED__ME0_CS_DED_MASK                                                                       0x00000004L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK                                                               0x00000008L
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK                                                                    0x00000010L
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK                                                                    0x00000020L
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK                                                                    0x00000040L
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK                                                                    0x00000080L
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK                                                                    0x00000100L
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK                                                                    0x00000200L
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK                                                                    0x00000400L
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK                                                                    0x00000800L
+#define GDS_EDC_OA_DED__UNUSED1_MASK                                                                          0xFFFFF000L
+//GDS_EDC_OA_PHY_CNT
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT                                                        0x0
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT                                                        0x2
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT                                                        0x4
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT                                                        0x6
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT                                                       0x8
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT                                                       0xa
+#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT                                                                    0xc
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK                                                          0x00000003L
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK                                                          0x0000000CL
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK                                                          0x00000030L
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK                                                          0x000000C0L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK                                                         0x00000300L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK                                                         0x00000C00L
+#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK                                                                      0xFFFFF000L
+//GDS_EDC_OA_PIPE_CNT
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT                                                    0x0
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT                                                    0x2
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT                                                    0x4
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT                                                    0x6
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT                                                    0x8
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT                                                    0xa
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT                                                    0xc
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT                                                    0xe
+#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT                                                                    0x10
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK                                                      0x00000003L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK                                                      0x0000000CL
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK                                                      0x00000030L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK                                                      0x000000C0L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK                                                      0x00000300L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK                                                      0x00000C00L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK                                                      0x00003000L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK                                                      0x0000C000L
+#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK                                                                      0xFFFF0000L
+
+// addressBlock: gc_shsdec
+//SPI_EDC_CNT
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT                                                              0x0
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT                                                              0x2
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT                                                          0x4
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT                                                          0x6
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT                                                         0x8
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT                                                         0xa
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT__SHIFT                                                         0xc
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT__SHIFT                                                         0xe
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT                                                            0x10
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT                                                            0x12
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK                                                                0x00000003L
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK                                                                0x0000000CL
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK                                                            0x00000030L
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK                                                            0x000000C0L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK                                                           0x00000300L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK                                                           0x00000C00L
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT_MASK                                                           0x00003000L
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT_MASK                                                           0x0000C000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK                                                              0x00030000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK                                                              0x000C0000L
+
+// addressBlock: gc_sqdec
+//SQC_EDC_CNT2
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT                                                     0x0
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT                                                     0x2
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT                                                    0x4
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT                                                    0x6
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT                                                     0x8
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT                                                     0xa
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT                                                    0xc
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT                                                    0xe
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                       0x10
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT                                                       0x12
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK                                                       0x00000003L
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK                                                       0x0000000CL
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK                                                      0x00000030L
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK                                                      0x000000C0L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK                                                       0x00000300L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK                                                       0x00000C00L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK                                                      0x00003000L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK                                                      0x0000C000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK                                                         0x00030000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK                                                         0x000C0000L
+//SQC_EDC_CNT3
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT                                                     0x0
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT                                                     0x2
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT                                                    0x4
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT                                                    0x6
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT                                                     0x8
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT                                                     0xa
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT                                                    0xc
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT                                                    0xe
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK                                                       0x00000003L
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK                                                       0x0000000CL
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK                                                      0x00000030L
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK                                                      0x000000C0L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK                                                       0x00000300L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK                                                       0x00000C00L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK                                                      0x00003000L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK                                                      0x0000C000L
+//SQC_EDC_PARITY_CNT3
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT                                      0x0
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT                                      0x2
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT                                            0x4
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT                                            0x6
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT                                             0x8
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT                                             0xa
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT                                            0xc
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT                                            0xe
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT                                      0x10
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT                                      0x12
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT                                            0x14
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT                                            0x16
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT                                             0x18
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT                                             0x1a
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT                                            0x1c
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT                                            0x1e
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK                                        0x00000003L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK                                        0x0000000CL
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK                                              0x00000030L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK                                              0x000000C0L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK                                               0x00000300L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK                                               0x00000C00L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK                                              0x00003000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK                                              0x0000C000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK                                        0x00030000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK                                        0x000C0000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK                                              0x00300000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK                                              0x00C00000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK                                               0x03000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK                                               0x0C000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK                                              0x30000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK                                              0xC0000000L
+//SQC_EDC_CNT
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT                                                 0x0
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT                                                 0x2
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                    0x4
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT                                                    0x6
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT                                                 0x8
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT                                                 0xa
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                    0xc
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT                                                    0xe
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT                                                 0x10
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT                                                 0x12
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                    0x14
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT                                                    0x16
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT                                                 0x18
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT                                                 0x1a
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT                                                    0x1c
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT                                                    0x1e
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK                                                   0x00000003L
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK                                                   0x0000000CL
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK                                                      0x00000030L
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK                                                      0x000000C0L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK                                                   0x00000300L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK                                                   0x00000C00L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK                                                      0x00003000L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK                                                      0x0000C000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK                                                   0x00030000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK                                                   0x000C0000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK                                                      0x00300000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK                                                      0x00C00000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK                                                   0x03000000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK                                                   0x0C000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK                                                      0x30000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK                                                      0xC0000000L
+//SQ_EDC_SEC_CNT
+#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT                                                                        0x0
+#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT                                                                       0x8
+#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT                                                                       0x10
+#define SQ_EDC_SEC_CNT__LDS_SEC_MASK                                                                          0x000000FFL
+#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK                                                                         0x0000FF00L
+#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK                                                                         0x00FF0000L
+//SQ_EDC_DED_CNT
+#define SQ_EDC_DED_CNT__LDS_DED__SHIFT                                                                        0x0
+#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT                                                                       0x8
+#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT                                                                       0x10
+#define SQ_EDC_DED_CNT__LDS_DED_MASK                                                                          0x000000FFL
+#define SQ_EDC_DED_CNT__SGPR_DED_MASK                                                                         0x0000FF00L
+#define SQ_EDC_DED_CNT__VGPR_DED_MASK                                                                         0x00FF0000L
+//SQ_EDC_INFO
+#define SQ_EDC_INFO__WAVE_ID__SHIFT                                                                           0x0
+#define SQ_EDC_INFO__SIMD_ID__SHIFT                                                                           0x4
+#define SQ_EDC_INFO__SOURCE__SHIFT                                                                            0x6
+#define SQ_EDC_INFO__VM_ID__SHIFT                                                                             0x9
+#define SQ_EDC_INFO__WAVE_ID_MASK                                                                             0x0000000FL
+#define SQ_EDC_INFO__SIMD_ID_MASK                                                                             0x00000030L
+#define SQ_EDC_INFO__SOURCE_MASK                                                                              0x000001C0L
+#define SQ_EDC_INFO__VM_ID_MASK                                                                               0x00001E00L
+//SQ_EDC_CNT
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT                                                                    0x0
+#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT                                                                    0x2
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT                                                                    0x4
+#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT                                                                    0x6
+#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT                                                                     0x8
+#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT                                                                     0xa
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT                                                                    0xc
+#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT                                                                    0xe
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT                                                                    0x10
+#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT                                                                    0x12
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT                                                                    0x14
+#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT                                                                    0x16
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT                                                                    0x18
+#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT                                                                    0x1a
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK                                                                      0x00000003L
+#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK                                                                      0x0000000CL
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK                                                                      0x00000030L
+#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK                                                                      0x000000C0L
+#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK                                                                       0x00000300L
+#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK                                                                       0x00000C00L
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK                                                                      0x00003000L
+#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK                                                                      0x0000C000L
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK                                                                      0x00030000L
+#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK                                                                      0x000C0000L
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK                                                                      0x00300000L
+#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK                                                                      0x00C00000L
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK                                                                      0x03000000L
+#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK                                                                      0x0C000000L
+
+// addressBlock: gc_tpdec
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT                                                              0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT                                                              0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT                                                              0x4
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT                                                              0x6
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT                                                              0x8
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT                                                              0xa
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT                                                              0xc
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT                                                              0xe
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT                                                              0x10
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT                                                              0x12
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK                                                                0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK                                                                0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK                                                                0x00000030L
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK                                                                0x000000C0L
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK                                                                0x00000300L
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK                                                                0x00000C00L
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK                                                                0x00003000L
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK                                                                0x0000C000L
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK                                                                0x00030000L
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK                                                                0x000C0000L
+
+// addressBlock: gc_tcdec
+//TCP_EDC_CNT
+#define TCP_EDC_CNT__SEC_COUNT__SHIFT                                                                         0x0
+#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT                                                                   0x8
+#define TCP_EDC_CNT__DED_COUNT__SHIFT                                                                         0x10
+#define TCP_EDC_CNT__SEC_COUNT_MASK                                                                           0x000000FFL
+#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK                                                                     0x0000FF00L
+#define TCP_EDC_CNT__DED_COUNT_MASK                                                                           0x00FF0000L
+//TCP_EDC_CNT_NEW
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT                                                           0x0
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT                                                           0x2
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT                                                           0x4
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT                                                           0x6
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT__SHIFT                                                            0x8
+#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT__SHIFT                                                            0xa
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT                                                             0xc
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT                                                             0xe
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT                                                              0x10
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT                                                        0x12
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT                                                        0x14
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT                                                        0x16
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT                                                        0x18
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK                                                             0x00000003L
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK                                                             0x0000000CL
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK                                                             0x00000030L
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK                                                             0x000000C0L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT_MASK                                                              0x00000300L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT_MASK                                                              0x00000C00L
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK                                                               0x00003000L
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK                                                               0x0000C000L
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK                                                                0x00030000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK                                                          0x000C0000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK                                                          0x00300000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK                                                          0x00C00000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK                                                          0x03000000L
+//TCP_ATC_EDC_GATCL1_CNT
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT                                                               0x0
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK                                                                 0x000000FFL
+//TCI_EDC_CNT
+#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT__SHIFT                                                               0x0
+#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT__SHIFT                                                               0x2
+#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT_MASK                                                                 0x00000003L
+#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT_MASK                                                                 0x0000000CL
+//TCA_EDC_CNT
+#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT__SHIFT                                                               0x0
+#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT__SHIFT                                                               0x2
+#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT__SHIFT                                                                0x4
+#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT__SHIFT                                                                0x6
+#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT_MASK                                                                 0x00000003L
+#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT_MASK                                                                 0x0000000CL
+#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT_MASK                                                                  0x00000030L
+#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT_MASK                                                                  0x000000C0L
+//TCC_EDC_CNT
+#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT                                                              0x0
+#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT                                                              0x2
+#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT                                                             0x4
+#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT                                                             0x6
+#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT                                                           0x8
+#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT                                                           0xa
+#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT                                                            0xc
+#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT                                                            0xe
+#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT                                                                0x10
+#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT                                                                0x12
+#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT__SHIFT                                                            0x14
+#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT__SHIFT                                                            0x16
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT__SHIFT                                                   0x18
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT__SHIFT                                                   0x1a
+#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK                                                                0x00000003L
+#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK                                                                0x0000000CL
+#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK                                                               0x00000030L
+#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK                                                               0x000000C0L
+#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK                                                             0x00000300L
+#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK                                                             0x00000C00L
+#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK                                                              0x00003000L
+#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK                                                              0x0000C000L
+#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK                                                                  0x00030000L
+#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK                                                                  0x000C0000L
+#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT_MASK                                                              0x00300000L
+#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT_MASK                                                              0x00C00000L
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT_MASK                                                     0x03000000L
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT_MASK                                                     0x0C000000L
+//TCC_EDC_CNT2
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT__SHIFT                                                   0x0
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT__SHIFT                                                   0x2
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT__SHIFT                                                         0x4
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT__SHIFT                                                         0x6
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT__SHIFT                                                       0x8
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT__SHIFT                                                       0xa
+#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT__SHIFT                                                         0xc
+#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT__SHIFT                                                         0xe
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT__SHIFT                                                        0x10
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT__SHIFT                                                        0x12
+#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT__SHIFT                                                             0x14
+#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT__SHIFT                                                             0x16
+#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT__SHIFT                                                           0x18
+#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT__SHIFT                                                           0x1a
+#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT__SHIFT                                                            0x1c
+#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT__SHIFT                                                            0x1e
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT_MASK                                                     0x00000003L
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT_MASK                                                     0x0000000CL
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT_MASK                                                           0x00000030L
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT_MASK                                                           0x000000C0L
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT_MASK                                                         0x00000300L
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT_MASK                                                         0x00000C00L
+#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT_MASK                                                           0x00003000L
+#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT_MASK                                                           0x0000C000L
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT_MASK                                                          0x00030000L
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT_MASK                                                          0x000C0000L
+#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT_MASK                                                               0x00300000L
+#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT_MASK                                                               0x00C00000L
+#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT_MASK                                                             0x03000000L
+#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT_MASK                                                             0x0C000000L
+#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT_MASK                                                              0x30000000L
+#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT_MASK                                                              0xC0000000L
+
+// addressBlock: gc_tpdec
+//TD_EDC_CNT
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT                                                               0x0
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT                                                               0x2
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT                                                               0x4
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT                                                               0x6
+#define TD_EDC_CNT__CS_FIFO_SEC_COUNT__SHIFT                                                                  0x8
+#define TD_EDC_CNT__CS_FIFO_DED_COUNT__SHIFT                                                                  0xa
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK                                                                 0x00000003L
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK                                                                 0x0000000CL
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK                                                                 0x00000030L
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK                                                                 0x000000C0L
+#define TD_EDC_CNT__CS_FIFO_SEC_COUNT_MASK                                                                    0x00000300L
+#define TD_EDC_CNT__CS_FIFO_DED_COUNT_MASK                                                                    0x00000C00L
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT                                                              0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT                                                              0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT                                                              0x4
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT                                                              0x6
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT                                                              0x8
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT                                                              0xa
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT                                                              0xc
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT                                                              0xe
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT                                                              0x10
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT                                                              0x12
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK                                                                0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK                                                                0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK                                                                0x00000030L
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK                                                                0x000000C0L
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK                                                                0x00000300L
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK                                                                0x00000C00L
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK                                                                0x00003000L
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK                                                                0x0000C000L
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK                                                                0x00030000L
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK                                                                0x000C0000L
+
+// addressBlock: gc_ea_gceadec2
+//GCEA_EDC_CNT
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT                                                          0x0
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT                                                          0x2
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT                                                          0x4
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT                                                          0x6
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT                                                         0x8
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT                                                         0xa
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT                                                            0xc
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT                                                            0xe
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT                                                            0x10
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT                                                            0x12
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT                                                         0x14
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT                                                         0x16
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT                                                            0x18
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT                                                            0x1a
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT                                                           0x1c
+#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT__SHIFT                                                              0x1e
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK                                                            0x00000003L
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK                                                            0x0000000CL
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK                                                            0x00000030L
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK                                                            0x000000C0L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK                                                           0x00000300L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK                                                           0x00000C00L
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK                                                              0x00003000L
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK                                                              0x0000C000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK                                                              0x00030000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK                                                              0x000C0000L
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK                                                           0x00300000L
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK                                                           0x00C00000L
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK                                                              0x03000000L
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK                                                              0x0C000000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK                                                             0x30000000L
+#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT_MASK                                                                0xC0000000L
+//GCEA_EDC_CNT2
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT                                                          0x0
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT                                                          0x2
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT                                                          0x4
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT                                                          0x6
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT                                                         0x8
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                         0xa
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                         0xc
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                         0xe
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                             0x10
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                             0x12
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                             0x14
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                             0x16
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                             0x18
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                             0x1a
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                             0x1c
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                             0x1e
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                            0x00000003L
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                            0x0000000CL
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                            0x00000030L
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK                                                            0x000000C0L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK                                                           0x00000300L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                           0x00000C00L
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                           0x00003000L
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                           0x0000C000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                               0x00030000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                               0x000C0000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                               0x00300000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                               0x00C00000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                               0x03000000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                               0x0C000000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                               0x30000000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                               0xC0000000L
+//GCEA_EDC_CNT3
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT                                                        0x0
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT                                                        0x2
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT                                                           0x4
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT                                                           0x6
+#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT                                                          0x8
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT                                                         0xa
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT                                                         0xc
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT                                                             0xe
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT                                                             0x10
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT                                                             0x12
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT                                                             0x14
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT                                                             0x16
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT                                                             0x18
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT                                                             0x1a
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT                                                             0x1c
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT                                                             0x1e
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK                                                          0x00000003L
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK                                                          0x0000000CL
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK                                                             0x00000030L
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK                                                             0x000000C0L
+#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK                                                            0x00000300L
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK                                                           0x00000C00L
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK                                                           0x00003000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK                                                               0x0000C000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK                                                               0x00030000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK                                                               0x000C0000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK                                                               0x00300000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK                                                               0x00C00000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK                                                               0x03000000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK                                                               0x0C000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK                                                               0x30000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK                                                               0xC0000000L
+
+// addressBlock: gc_gfxudec
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT                                                                 0x0
+#define GRBM_GFX_INDEX__SH_INDEX__SHIFT                                                                       0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT                                                                       0x10
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT                                                            0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT                                                      0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT                                                            0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK                                                                   0x000000FFL
+#define GRBM_GFX_INDEX__SH_INDEX_MASK                                                                         0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK                                                                         0x00FF0000L
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK                                                              0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK                                                        0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK                                                              0x80000000L
+
+// addressBlock: gc_utcl2_atcl2dec
+//ATC_L2_CNTL
+//ATC_L2_CACHE_4K_DSM_INDEX
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT                                                               0x0
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK                                                                 0x000000FFL
+//ATC_L2_CACHE_2M_DSM_INDEX
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT                                                               0x0
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK                                                                 0x000000FFL
+//ATC_L2_CACHE_4K_DSM_CNTL
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT                                                            0xd
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT                                                            0xf
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK                                                              0x00006000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK                                                              0x00018000L
+//ATC_L2_CACHE_2M_DSM_CNTL
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT                                                            0xd
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT                                                            0xf
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK                                                              0x00006000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK                                                              0x00018000L
+
+// addressBlock: gc_utcl2_vml2pfdec
+//VML2_MEM_ECC_INDEX
+#define VML2_MEM_ECC_INDEX__INDEX__SHIFT                                                                      0x0
+#define VML2_MEM_ECC_INDEX__INDEX_MASK                                                                        0x000000FFL
+//VML2_WALKER_MEM_ECC_INDEX
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT                                                               0x0
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK                                                                 0x000000FFL
+//UTCL2_MEM_ECC_INDEX
+#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT                                                                     0x0
+#define UTCL2_MEM_ECC_INDEX__INDEX_MASK                                                                       0x000000FFL
+//VML2_MEM_ECC_CNTL
+#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT                                                                   0xc
+#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT                                                                   0xe
+#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK                                                                     0x00003000L
+#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK                                                                     0x0000C000L
+//VML2_WALKER_MEM_ECC_CNTL
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT                                                            0xc
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT                                                            0xe
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK                                                              0x00003000L
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK                                                              0x0000C000L
+//UTCL2_MEM_ECC_CNTL
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT                                                                  0xc
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT                                                                  0xe
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK                                                                    0x00003000L
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK                                                                    0x0000C000L
+
+// addressBlock: gc_rlcpdec
+//RLC_EDC_CNT
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT__SHIFT                                                          0x0
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT__SHIFT                                                          0x2
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT__SHIFT                                                        0x4
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT__SHIFT                                                        0x6
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT__SHIFT                                                          0x8
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT__SHIFT                                                          0xa
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT__SHIFT                                                        0xc
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT__SHIFT                                                        0xe
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT__SHIFT                                                           0x10
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT__SHIFT                                                           0x12
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT__SHIFT                                                     0x14
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT__SHIFT                                                      0x16
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT__SHIFT                                                        0x18
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT__SHIFT                                                        0x1a
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT__SHIFT                                                        0x1c
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT__SHIFT                                                        0x1e
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT_MASK                                                            0x00000003L
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT_MASK                                                            0x0000000CL
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT_MASK                                                          0x00000030L
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT_MASK                                                          0x000000C0L
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT_MASK                                                            0x00000300L
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT_MASK                                                            0x00000C00L
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT_MASK                                                          0x00003000L
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT_MASK                                                          0x0000C000L
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT_MASK                                                             0x00030000L
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT_MASK                                                             0x000C0000L
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT_MASK                                                       0x00300000L
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT_MASK                                                        0x00C00000L
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT_MASK                                                          0x03000000L
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT_MASK                                                          0x0C000000L
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT_MASK                                                          0x30000000L
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT_MASK                                                          0xC0000000L
+//RLC_EDC_CNT2
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x0
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x2
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x4
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x6
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x8
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT__SHIFT                                                0xa
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0xc
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT__SHIFT                                                0xe
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x10
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x12
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x14
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x16
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x18
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x1a
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT__SHIFT                                                0x1c
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT__SHIFT                                                0x1e
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00000003L
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT_MASK                                                  0x0000000CL
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00000030L
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT_MASK                                                  0x000000C0L
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00000300L
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT_MASK                                                  0x00000C00L
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00003000L
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT_MASK                                                  0x0000C000L
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00030000L
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT_MASK                                                  0x000C0000L
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x00300000L
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT_MASK                                                  0x00C00000L
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x03000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT_MASK                                                  0x0C000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT_MASK                                                  0x30000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT_MASK                                                  0xC0000000L
+
+#endif
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
index 352ffae7..2c3ce24 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
@@ -1964,4 +1964,20 @@
 #define mmATC_L2_PERFCOUNTER_RSLT_CNTL                                                                 0x084a
 #define mmATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX                                                        0
 
+/* MMEA */
+#define mmMMEA0_EDC_CNT_VG20                                                                           0x0206
+#define mmMMEA0_EDC_CNT_VG20_BASE_IDX                                                                  0
+#define mmMMEA0_EDC_CNT2_VG20                                                                          0x0207
+#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX                                                                 0
+#define mmMMEA1_EDC_CNT_VG20                                                                           0x0346
+#define mmMMEA1_EDC_CNT_VG20_BASE_IDX                                                                  0
+#define mmMMEA1_EDC_CNT2_VG20                                                                          0x0347
+#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX                                                                 0
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL                                                                          0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX                                                                 0
+#define mmMC_VM_XGMI_LFB_SIZE                                                                          0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX                                                                 0
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
index 34278ef..198f5f9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
@@ -10124,4 +10124,126 @@
 #define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK                                                          0x02000000L
 #define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK                                               0x04000000L
 
+//MMEA0_EDC_CNT
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT                                                         0x0
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT                                                         0x2
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT                                                         0x4
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT                                                         0x6
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT                                                        0x8
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT                                                           0xc
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT                                                           0xe
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT                                                           0x10
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT                                                           0x12
+#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT                                                        0x14
+#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT                                                        0x16
+#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT                                                           0x18
+#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT                                                           0x1a
+#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT                                                          0x1c
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK                                                           0x000000C0L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK                                                          0x00000300L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK                                                             0x00003000L
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK                                                             0x0000C000L
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK                                                             0x00030000L
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK                                                             0x000C0000L
+#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK                                                          0x00300000L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK                                                          0x00C00000L
+#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK                                                             0x03000000L
+#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK                                                             0x0C000000L
+#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK                                                            0x30000000L
+//MMEA0_EDC_CNT2
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT                                                         0x0
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT                                                         0x2
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT                                                         0x4
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT                                                         0x6
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT                                                        0x8
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
+#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
+#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK                                                           0x000000C0L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK                                                          0x00000300L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
+#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+//MMEA1_EDC_CNT
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT                                                         0x0
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT                                                         0x2
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT                                                         0x4
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT                                                         0x6
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT                                                        0x8
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT                                                           0xc
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT                                                           0xe
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT                                                           0x10
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT                                                           0x12
+#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT                                                        0x14
+#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT                                                        0x16
+#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT                                                           0x18
+#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT                                                           0x1a
+#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT                                                          0x1c
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK                                                           0x000000C0L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK                                                          0x00000300L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK                                                             0x00003000L
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK                                                             0x0000C000L
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK                                                             0x00030000L
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK                                                             0x000C0000L
+#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK                                                          0x00300000L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK                                                          0x00C00000L
+#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK                                                             0x03000000L
+#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK                                                             0x0C000000L
+#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK                                                            0x30000000L
+//MMEA1_EDC_CNT2
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT                                                         0x0
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT                                                         0x2
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT                                                         0x4
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT                                                         0x6
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT                                                        0x8
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
+#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
+#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK                                                           0x000000C0L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK                                                          0x00000300L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
+#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT                                                             0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT                                                             0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK                                                               0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK                                                               0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT                                                               0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK                                                                 0x0000FFFFL
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
deleted file mode 100644
index f2ae3a5..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2018  Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _mmhub_9_4_0_OFFSET_HEADER
-#define _mmhub_9_4_0_OFFSET_HEADER
-
-/* MMEA */
-#define mmMMEA0_SDP_ARB_FINAL_VG20                                                                     0x01ee
-#define mmMMEA0_SDP_ARB_FINAL_VG20_BASE_IDX                                                            0
-#define mmMMEA0_EDC_CNT_VG20                                                                           0x0206
-#define mmMMEA0_EDC_CNT_VG20_BASE_IDX                                                                  0
-#define mmMMEA0_EDC_CNT2_VG20                                                                          0x0207
-#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX                                                                 0
-#define mmMMEA0_EDC_MODE_VG20                                                                          0x0210
-#define mmMMEA0_EDC_MODE_VG20_BASE_IDX                                                                 0
-#define mmMMEA0_ERR_STATUS_VG20                                                                        0x0211
-#define mmMMEA0_ERR_STATUS_VG20_BASE_IDX                                                               0
-#define mmMMEA1_SDP_ARB_FINAL_VG20                                                                     0x032e
-#define mmMMEA1_SDP_ARB_FINAL_VG20_BASE_IDX                                                            0
-#define mmMMEA1_EDC_CNT_VG20                                                                           0x0346
-#define mmMMEA1_EDC_CNT_VG20_BASE_IDX                                                                  0
-#define mmMMEA1_EDC_CNT2_VG20                                                                          0x0347
-#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX                                                                 0
-#define mmMMEA1_EDC_MODE_VG20                                                                          0x0350
-#define mmMMEA1_EDC_MODE_VG20_BASE_IDX                                                                 0
-#define mmMMEA1_ERR_STATUS_VG20                                                                        0x0351
-#define mmMMEA1_ERR_STATUS_VG20_BASE_IDX                                                               0
-
-// addressBlock: mmhub_utcl2_vmsharedpfdec
-// base address: 0x6a040
-#define mmMC_VM_XGMI_LFB_CNTL                                                                          0x0823
-#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX                                                                 0
-#define mmMC_VM_XGMI_LFB_SIZE                                                                          0x0824
-#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX                                                                 0
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
deleted file mode 100644
index c24259e..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (C) 2018  Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _mmhub_9_4_0_SH_MASK_HEADER
-#define _mmhub_9_4_0_SH_MASK_HEADER
-
-//MMEA0_SDP_ARB_FINAL
-#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT                                                          0x0
-#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT                                                           0x5
-#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT                                                            0xa
-#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT                                                    0xf
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT                                                                0x11
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT                                                                0x12
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT                                                                0x13
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT                                                                0x14
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT                                                                0x15
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT                                                                0x16
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT                                                                0x17
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT                                                                0x18
-#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT                                                         0x19
-#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT                                                          0x1a
-#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK                                                            0x0000001FL
-#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK                                                             0x000003E0L
-#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK                                                              0x00007C00L
-#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK                                                      0x00018000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK                                                                  0x00020000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK                                                                  0x00040000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK                                                                  0x00080000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK                                                                  0x00100000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK                                                                  0x00200000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK                                                                  0x00400000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK                                                                  0x00800000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK                                                                  0x01000000L
-#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK                                                           0x02000000L
-#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK                                                            0x04000000L
-//MMEA0_EDC_CNT
-#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT                                                         0x0
-#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT                                                         0x2
-#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT                                                         0x4
-#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT                                                         0x6
-#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT                                                        0x8
-#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
-#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT                                                           0xc
-#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT                                                           0xe
-#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT                                                           0x10
-#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT                                                           0x12
-#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT                                                        0x14
-#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT                                                        0x16
-#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT                                                           0x18
-#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT                                                           0x1a
-#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT                                                          0x1c
-#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
-#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
-#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
-#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK                                                           0x000000C0L
-#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK                                                          0x00000300L
-#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
-#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK                                                             0x00003000L
-#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK                                                             0x0000C000L
-#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK                                                             0x00030000L
-#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK                                                             0x000C0000L
-#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK                                                          0x00300000L
-#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK                                                          0x00C00000L
-#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK                                                             0x03000000L
-#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK                                                             0x0C000000L
-#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK                                                            0x30000000L
-//MMEA0_EDC_CNT2
-#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT                                                         0x0
-#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT                                                         0x2
-#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT                                                         0x4
-#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT                                                         0x6
-#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT                                                        0x8
-#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
-#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
-#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
-#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
-#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
-#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
-#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
-#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
-#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
-#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
-#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK                                                           0x000000C0L
-#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK                                                          0x00000300L
-#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
-#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
-#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
-#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
-#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
-#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
-#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
-//MMEA0_EDC_MODE
-#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT                                                                  0x10
-#define MMEA0_EDC_MODE__GATE_FUE__SHIFT                                                                       0x11
-#define MMEA0_EDC_MODE__DED_MODE__SHIFT                                                                       0x14
-#define MMEA0_EDC_MODE__PROP_FED__SHIFT                                                                       0x1d
-#define MMEA0_EDC_MODE__BYPASS__SHIFT                                                                         0x1f
-#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK                                                                    0x00010000L
-#define MMEA0_EDC_MODE__GATE_FUE_MASK                                                                         0x00020000L
-#define MMEA0_EDC_MODE__DED_MODE_MASK                                                                         0x00300000L
-#define MMEA0_EDC_MODE__PROP_FED_MASK                                                                         0x20000000L
-#define MMEA0_EDC_MODE__BYPASS_MASK                                                                           0x80000000L
-//MMEA0_ERR_STATUS
-#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT                                                             0x0
-#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT                                                             0x4
-#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT                                                         0x8
-#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT                                                   0xa
-#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT                                                           0xb
-#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT                                                                0xc
-#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT                                                                     0xd
-#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK                                                               0x0000000FL
-#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK                                                               0x000000F0L
-#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK                                                           0x00000300L
-#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK                                                     0x00000400L
-#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK                                                             0x00000800L
-#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK                                                                  0x00001000L
-#define MMEA0_ERR_STATUS__FUE_FLAG_MASK                                                                       0x00002000L
-//MMEA1_SDP_ARB_FINAL
-#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT                                                          0x0
-#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT                                                           0x5
-#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT                                                            0xa
-#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT                                                    0xf
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0__SHIFT                                                                0x11
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1__SHIFT                                                                0x12
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2__SHIFT                                                                0x13
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3__SHIFT                                                                0x14
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4__SHIFT                                                                0x15
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5__SHIFT                                                                0x16
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6__SHIFT                                                                0x17
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7__SHIFT                                                                0x18
-#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT                                                         0x19
-#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT                                                          0x1a
-#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK                                                            0x0000001FL
-#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK                                                             0x000003E0L
-#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK                                                              0x00007C00L
-#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK                                                      0x00018000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0_MASK                                                                  0x00020000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1_MASK                                                                  0x00040000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2_MASK                                                                  0x00080000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3_MASK                                                                  0x00100000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4_MASK                                                                  0x00200000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5_MASK                                                                  0x00400000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6_MASK                                                                  0x00800000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7_MASK                                                                  0x01000000L
-#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK                                                           0x02000000L
-#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK                                                            0x04000000L
-//MMEA1_EDC_CNT
-#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT                                                         0x0
-#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT                                                         0x2
-#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT                                                         0x4
-#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT                                                         0x6
-#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT                                                        0x8
-#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
-#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT                                                           0xc
-#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT                                                           0xe
-#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT                                                           0x10
-#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT                                                           0x12
-#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT                                                        0x14
-#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT                                                        0x16
-#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT                                                           0x18
-#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT                                                           0x1a
-#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT                                                          0x1c
-#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
-#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
-#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
-#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK                                                           0x000000C0L
-#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK                                                          0x00000300L
-#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
-#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK                                                             0x00003000L
-#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK                                                             0x0000C000L
-#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK                                                             0x00030000L
-#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK                                                             0x000C0000L
-#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK                                                          0x00300000L
-#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK                                                          0x00C00000L
-#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK                                                             0x03000000L
-#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK                                                             0x0C000000L
-#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK                                                            0x30000000L
-//MMEA1_EDC_CNT2
-#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT                                                         0x0
-#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT                                                         0x2
-#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT                                                         0x4
-#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT                                                         0x6
-#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT                                                        0x8
-#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
-#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
-#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
-#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
-#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
-#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
-#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
-#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
-#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
-#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
-#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK                                                           0x000000C0L
-#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK                                                          0x00000300L
-#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
-#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
-#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
-#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
-#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
-#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
-#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
-//MMEA1_EDC_MODE
-#define MMEA1_EDC_MODE__COUNT_FED_OUT__SHIFT                                                                  0x10
-#define MMEA1_EDC_MODE__GATE_FUE__SHIFT                                                                       0x11
-#define MMEA1_EDC_MODE__DED_MODE__SHIFT                                                                       0x14
-#define MMEA1_EDC_MODE__PROP_FED__SHIFT                                                                       0x1d
-#define MMEA1_EDC_MODE__BYPASS__SHIFT                                                                         0x1f
-#define MMEA1_EDC_MODE__COUNT_FED_OUT_MASK                                                                    0x00010000L
-#define MMEA1_EDC_MODE__GATE_FUE_MASK                                                                         0x00020000L
-#define MMEA1_EDC_MODE__DED_MODE_MASK                                                                         0x00300000L
-#define MMEA1_EDC_MODE__PROP_FED_MASK                                                                         0x20000000L
-#define MMEA1_EDC_MODE__BYPASS_MASK                                                                           0x80000000L
-//MMEA1_ERR_STATUS
-#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT                                                             0x0
-#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT                                                             0x4
-#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT                                                         0x8
-#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT                                                   0xa
-#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT                                                           0xb
-#define MMEA1_ERR_STATUS__BUSY_ON_ERROR__SHIFT                                                                0xc
-#define MMEA1_ERR_STATUS__FUE_FLAG__SHIFT                                                                     0xd
-#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS_MASK                                                               0x0000000FL
-#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS_MASK                                                               0x000000F0L
-#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK                                                           0x00000300L
-#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK                                                     0x00000400L
-#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS_MASK                                                             0x00000800L
-#define MMEA1_ERR_STATUS__BUSY_ON_ERROR_MASK                                                                  0x00001000L
-#define MMEA1_ERR_STATUS__FUE_FLAG_MASK                                                                       0x00002000L
-
-// addressBlock: mmhub_utcl2_vmsharedpfdec
-//MC_VM_XGMI_LFB_CNTL
-#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT                                                             0x0
-#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT                                                             0x4
-#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK                                                               0x00000007L
-#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK                                                               0x00000070L
-//MC_VM_XGMI_LFB_SIZE
-#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT                                                               0x0
-#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK                                                                 0x0000FFFFL
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 40dfbf1..111a71b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -11185,6 +11185,14 @@
 #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -11193,6 +11201,14 @@
 #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA0_DSM_CNTL
 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -14197,6 +14213,14 @@
 #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -14205,6 +14229,14 @@
 #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA1_DSM_CNTL
 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -17209,6 +17241,14 @@
 #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -17217,6 +17257,14 @@
 #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA2_DSM_CNTL
 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -20221,6 +20269,14 @@
 #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -20229,6 +20285,14 @@
 #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA3_DSM_CNTL
 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -23233,6 +23297,14 @@
 #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -23241,6 +23313,14 @@
 #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA4_DSM_CNTL
 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -34952,6 +35032,14 @@
 #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -34960,6 +35048,14 @@
 #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA5_DSM_CNTL
 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -37964,6 +38060,14 @@
 #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -37972,6 +38076,14 @@
 #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA6_DSM_CNTL
 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
@@ -40976,6 +41088,14 @@
 #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT                                                        0xa
 #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT                                                        0xc
 #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT                                                        0xe
+#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT                                                            0x10
+#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT                                                            0x12
+#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT                                                            0x14
+#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT                                                            0x16
+#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT                                                            0x18
+#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT                                                            0x1a
+#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT                                                            0x1c
+#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT                                                            0x1e
 #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK                                                           0x00000003L
 #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK                                                           0x0000000CL
 #define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK                                                           0x00000030L
@@ -40984,6 +41104,14 @@
 #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK                                                          0x00000C00L
 #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK                                                          0x00003000L
 #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK                                                          0x0000C000L
+#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK                                                              0x00030000L
+#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK                                                              0x000C0000L
+#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK                                                              0x00300000L
+#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK                                                              0x00C00000L
+#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK                                                              0x03000000L
+#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK                                                              0x0C000000L
+#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK                                                              0x30000000L
+#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK                                                              0xC0000000L
 //MMEA7_DSM_CNTL
 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT                                               0x0
 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT                                              0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
index 043aa69..0d6b594 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
@@ -27,5 +27,7 @@
 #define mmUMCCH0_0_EccErrCnt_BASE_IDX                                                                  0
 #define mmMCA_UMC_UMC0_MCUMC_STATUST0                                                                  0x03c2
 #define mmMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX                                                         0
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0                                                                    0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX                                                           0
 
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
new file mode 100644
index 0000000..ce005c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_2_OFFSET_HEADER
+#define _umc_6_1_2_OFFSET_HEADER
+
+#define mmUMCCH0_0_EccErrCntSel_ARCT                                     0x0360
+#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX                            1
+#define mmUMCCH0_0_EccErrCnt_ARCT                                        0x0361
+#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX                               1
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT                               0x03c2
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX                      1
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT                                 0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT_BASE_IDX                        1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h
new file mode 100644
index 0000000..a5a8c99
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2020  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_2_SH_MASK_HEADER
+#define _umc_6_1_2_SH_MASK_HEADER
+
+//UMCCH0_0_EccErrCntSel_ARCT
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel__SHIFT                                                          0x0
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt__SHIFT                                                               0xc
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn__SHIFT                                                             0xf
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel_MASK                                                            0x0000000FL
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt_MASK                                                                 0x00003000L
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn_MASK                                                               0x00008000L
+//UMCCH0_0_EccErrCnt_ARCT
+#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt__SHIFT                                                                  0x0
+#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt_MASK                                                                    0x0000FFFFL
+//MCA_UMC_UMC0_MCUMC_STATUST0_ARCT
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode__SHIFT                                                         0x0
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt__SHIFT                                                      0x10
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0__SHIFT                                                           0x16
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId__SHIFT                                                         0x20
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1__SHIFT                                                           0x26
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub__SHIFT                                                             0x28
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2__SHIFT                                                           0x29
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison__SHIFT                                                            0x2b
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred__SHIFT                                                          0x2c
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC__SHIFT                                                              0x2d
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC__SHIFT                                                              0x2e
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3__SHIFT                                                           0x2f
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent__SHIFT                                                       0x34
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV__SHIFT                                                             0x35
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4__SHIFT                                                           0x36
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC__SHIFT                                                               0x37
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal__SHIFT                                                      0x38
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC__SHIFT                                                               0x39
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV__SHIFT                                                             0x3a
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV__SHIFT                                                             0x3b
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En__SHIFT                                                                0x3c
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC__SHIFT                                                                0x3d
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow__SHIFT                                                          0x3e
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val__SHIFT                                                               0x3f
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode_MASK                                                           0x000000000000FFFFL
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt_MASK                                                        0x00000000003F0000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0_MASK                                                             0x00000000FFC00000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId_MASK                                                           0x0000003F00000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1_MASK                                                             0x000000C000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub_MASK                                                               0x0000010000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2_MASK                                                             0x0000060000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison_MASK                                                              0x0000080000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred_MASK                                                            0x0000100000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC_MASK                                                                0x0000200000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC_MASK                                                                0x0000400000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3_MASK                                                             0x000F800000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent_MASK                                                         0x0010000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV_MASK                                                               0x0020000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4_MASK                                                             0x0040000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC_MASK                                                                 0x0080000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal_MASK                                                        0x0100000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC_MASK                                                                 0x0200000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV_MASK                                                               0x0400000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV_MASK                                                               0x0800000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En_MASK                                                                  0x1000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC_MASK                                                                  0x2000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow_MASK                                                            0x4000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val_MASK                                                                 0x8000000000000000L
+//MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr__SHIFT                                                           0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB__SHIFT                                                                 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved__SHIFT                                                            0x3e
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr_MASK                                                             0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB_MASK                                                                   0x3F00000000000000L
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved_MASK                                                              0xC000000000000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index dd7cbc0..7014651 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -672,20 +672,6 @@ struct vram_usagebyfirmware_v2_1
   uint16_t  used_by_driver_in_kb; 
 };
 
-/* This is part of vram_usagebyfirmware_v2_1 */
-struct vram_reserve_block
-{
-	uint32_t start_address_in_kb;
-	uint16_t used_by_firmware_in_kb;
-	uint16_t used_by_driver_in_kb;
-};
-
-/* Definitions for constance */
-enum atomfirmware_internal_constants
-{
-	ONE_KiB	= 0x400,
-	ONE_MiB	= 0x100000,
-};
 
 /* 
   ***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 2cd217e..a607b10 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -256,6 +256,10 @@ struct kfd2kgd_calls {
 			uint32_t wptr_shift, uint32_t wptr_mask,
 			struct mm_struct *mm);
 
+	int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd,
+			    uint32_t pipe_id, uint32_t queue_id,
+			    uint32_t doorbell_off);
+
 	int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
 			     uint32_t __user *wptr, struct mm_struct *mm);
 
@@ -307,8 +311,6 @@ struct kfd2kgd_calls {
 
 	void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
 			uint32_t vmid, uint64_t page_table_base);
-	int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
-	int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
 	uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
 	uint64_t (*get_hive_id)(struct kgd_dev *kgd);
 
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7932eb1..c195575 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -48,7 +48,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
 
 	hwmgr->adev = adev;
 	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
-	hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
 	hwmgr->device = amdgpu_cgs_create_device(adev);
 	mutex_init(&hwmgr->smu_lock);
 	hwmgr->chip_family = adev->family;
@@ -928,9 +927,12 @@ static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr)
 		return -EINVAL;
 
+	if (!hwmgr->pm_en)
+		return 0;
+
 	if (hwmgr->hwmgr_func->set_mp1_state)
 		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
 
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index a23729d..99ad4dd 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -21,6 +21,7 @@
  */
 
 #include <linux/firmware.h>
+#include <linux/pci.h>
 
 #include "pp_debug.h"
 #include "amdgpu.h"
@@ -356,6 +357,35 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
 	return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
 }
 
+int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
+			    uint32_t *min_value, uint32_t *max_value)
+{
+	int ret = 0;
+	uint32_t level_count = 0;
+
+	if (!min_value && !max_value)
+		return -EINVAL;
+
+	if (min_value) {
+		/* by default, level 0 clock value as min value */
+		ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
+		if (ret)
+			return ret;
+	}
+
+	if (max_value) {
+		ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
+		if (ret)
+			return ret;
+
+		ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
 {
 	enum smu_feature_mask feature_id = 0;
@@ -404,10 +434,10 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
 
 	switch (block_type) {
 	case AMD_IP_BLOCK_TYPE_UVD:
-		ret = smu_dpm_set_uvd_enable(smu, gate);
+		ret = smu_dpm_set_uvd_enable(smu, !gate);
 		break;
 	case AMD_IP_BLOCK_TYPE_VCE:
-		ret = smu_dpm_set_vce_enable(smu, gate);
+		ret = smu_dpm_set_vce_enable(smu, !gate);
 		break;
 	case AMD_IP_BLOCK_TYPE_GFX:
 		ret = smu_gfx_off_control(smu, gate);
@@ -415,6 +445,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
 	case AMD_IP_BLOCK_TYPE_SDMA:
 		ret = smu_powergate_sdma(smu, gate);
 		break;
+	case AMD_IP_BLOCK_TYPE_JPEG:
+		ret = smu_dpm_set_jpeg_enable(smu, !gate);
+		break;
 	default:
 		break;
 	}
@@ -487,26 +520,25 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct amdgpu_device *adev = smu->adev;
-	struct smu_table *table = NULL;
-	int ret = 0;
+	struct smu_table *table = &smu_table->driver_table;
 	int table_id = smu_table_get_index(smu, table_index);
+	uint32_t table_size;
+	int ret = 0;
 
 	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
 		return -EINVAL;
 
-	table = &smu_table->tables[table_index];
+	table_size = smu_table->tables[table_index].size;
 
-	if (drv2smu)
-		memcpy(table->cpu_addr, table_data, table->size);
+	if (drv2smu) {
+		memcpy(table->cpu_addr, table_data, table_size);
+		/*
+		 * Flush hdp cache: to guard the content seen by
+		 * GPU is consitent with CPU.
+		 */
+		amdgpu_asic_flush_hdp(adev, NULL);
+	}
 
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
-					  upper_32_bits(table->mc_address));
-	if (ret)
-		return ret;
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
-					  lower_32_bits(table->mc_address));
-	if (ret)
-		return ret;
 	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
 					  SMU_MSG_TransferTableDram2Smu :
 					  SMU_MSG_TransferTableSmu2Dram,
@@ -514,11 +546,10 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
 	if (ret)
 		return ret;
 
-	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
-
-	if (!drv2smu)
-		memcpy(table_data, table->cpu_addr, table->size);
+	if (!drv2smu) {
+		amdgpu_asic_flush_hdp(adev, NULL);
+		memcpy(table_data, table->cpu_addr, table_size);
+	}
 
 	return ret;
 }
@@ -527,9 +558,12 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
 {
 	if (adev->asic_type == CHIP_VEGA20)
 		return (amdgpu_dpm == 2) ? true : false;
-	else if (adev->asic_type >= CHIP_ARCTURUS)
-		return true;
-	else
+	else if (adev->asic_type >= CHIP_ARCTURUS) {
+		if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+			return false;
+		else
+			return true;
+	} else
 		return false;
 }
 
@@ -637,12 +671,11 @@ int smu_feature_init_dpm(struct smu_context *smu)
 
 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 {
-	struct amdgpu_device *adev = smu->adev;
 	struct smu_feature *feature = &smu->smu_feature;
 	int feature_id;
 	int ret = 0;
 
-	if (adev->flags & AMD_IS_APU)
+	if (smu->is_apu)
 		return 1;
 
 	feature_id = smu_feature_get_index(smu, mask);
@@ -942,32 +975,56 @@ static int smu_init_fb_allocations(struct smu_context *smu)
 	struct amdgpu_device *adev = smu->adev;
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct smu_table *tables = smu_table->tables;
+	struct smu_table *driver_table = &(smu_table->driver_table);
+	uint32_t max_table_size = 0;
 	int ret, i;
 
+	/* VRAM allocation for tool table */
+	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
+		ret = amdgpu_bo_create_kernel(adev,
+					      tables[SMU_TABLE_PMSTATUSLOG].size,
+					      tables[SMU_TABLE_PMSTATUSLOG].align,
+					      tables[SMU_TABLE_PMSTATUSLOG].domain,
+					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
+					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+		if (ret) {
+			pr_err("VRAM allocation for tool table failed!\n");
+			return ret;
+		}
+	}
+
+	/* VRAM allocation for driver table */
 	for (i = 0; i < SMU_TABLE_COUNT; i++) {
 		if (tables[i].size == 0)
 			continue;
-		ret = amdgpu_bo_create_kernel(adev,
-					      tables[i].size,
-					      tables[i].align,
-					      tables[i].domain,
-					      &tables[i].bo,
-					      &tables[i].mc_address,
-					      &tables[i].cpu_addr);
-		if (ret)
-			goto failed;
-	}
 
-	return 0;
-failed:
-	while (--i >= 0) {
-		if (tables[i].size == 0)
+		if (i == SMU_TABLE_PMSTATUSLOG)
 			continue;
-		amdgpu_bo_free_kernel(&tables[i].bo,
-				      &tables[i].mc_address,
-				      &tables[i].cpu_addr);
 
+		if (max_table_size < tables[i].size)
+			max_table_size = tables[i].size;
 	}
+
+	driver_table->size = max_table_size;
+	driver_table->align = PAGE_SIZE;
+	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+	ret = amdgpu_bo_create_kernel(adev,
+				      driver_table->size,
+				      driver_table->align,
+				      driver_table->domain,
+				      &driver_table->bo,
+				      &driver_table->mc_address,
+				      &driver_table->cpu_addr);
+	if (ret) {
+		pr_err("VRAM allocation for driver table failed!\n");
+		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+	}
+
 	return ret;
 }
 
@@ -975,18 +1032,19 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct smu_table *tables = smu_table->tables;
-	uint32_t i = 0;
+	struct smu_table *driver_table = &(smu_table->driver_table);
 
 	if (!tables)
 		return 0;
 
-	for (i = 0; i < SMU_TABLE_COUNT; i++) {
-		if (tables[i].size == 0)
-			continue;
-		amdgpu_bo_free_kernel(&tables[i].bo,
-				      &tables[i].mc_address,
-				      &tables[i].cpu_addr);
-	}
+	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+
+	amdgpu_bo_free_kernel(&driver_table->bo,
+			      &driver_table->mc_address,
+			      &driver_table->cpu_addr);
 
 	return 0;
 }
@@ -1056,28 +1114,48 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 	}
 
 	/* smu_dump_pptable(smu); */
+	if (!amdgpu_sriov_vf(adev)) {
+		ret = smu_set_driver_table_location(smu);
+		if (ret)
+			return ret;
 
-	/*
-	 * Copy pptable bo in the vram to smc with SMU MSGs such as
-	 * SetDriverDramAddr and TransferTableDram2Smu.
-	 */
-	ret = smu_write_pptable(smu);
-	if (ret)
-		return ret;
+		/*
+		 * Copy pptable bo in the vram to smc with SMU MSGs such as
+		 * SetDriverDramAddr and TransferTableDram2Smu.
+		 */
+		ret = smu_write_pptable(smu);
+		if (ret)
+			return ret;
 
-	/* issue Run*Btc msg */
-	ret = smu_run_btc(smu);
-	if (ret)
-		return ret;
+		/* issue Run*Btc msg */
+		ret = smu_run_btc(smu);
+		if (ret)
+			return ret;
+		ret = smu_feature_set_allowed_mask(smu);
+		if (ret)
+			return ret;
 
-	ret = smu_feature_set_allowed_mask(smu);
-	if (ret)
-		return ret;
+		ret = smu_system_features_control(smu, true);
+		if (ret)
+			return ret;
 
-	ret = smu_system_features_control(smu, true);
-	if (ret)
-		return ret;
-
+		if (adev->asic_type == CHIP_NAVI10) {
+			if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
+							      adev->pdev->revision == 0xc3 ||
+							      adev->pdev->revision == 0xca ||
+							      adev->pdev->revision == 0xcb)) ||
+			    (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
+							      adev->pdev->revision == 0xf4 ||
+							      adev->pdev->revision == 0xf5 ||
+							      adev->pdev->revision == 0xf6))) {
+				ret = smu_disable_umc_cdr_12gbps_workaround(smu);
+				if (ret) {
+					pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
+					return ret;
+				}
+			}
+		}
+	}
 	if (adev->asic_type != CHIP_ARCTURUS) {
 		ret = smu_notify_display_change(smu);
 		if (ret)
@@ -1130,8 +1208,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 	/*
 	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
 	 */
-	ret = smu_set_tool_table_location(smu);
-
+	if (!amdgpu_sriov_vf(adev)) {
+		ret = smu_set_tool_table_location(smu);
+	}
 	if (!smu_is_dpm_running(smu))
 		pr_info("dpm has been disabled\n");
 
@@ -1187,10 +1266,9 @@ static int smu_free_memory_pool(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct smu_table *memory_pool = &smu_table->memory_pool;
-	int ret = 0;
 
 	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
-		return ret;
+		return 0;
 
 	amdgpu_bo_free_kernel(&memory_pool->bo,
 			      &memory_pool->mc_address,
@@ -1198,7 +1276,7 @@ static int smu_free_memory_pool(struct smu_context *smu)
 
 	memset(memory_pool, 0, sizeof(struct smu_table));
 
-	return ret;
+	return 0;
 }
 
 static int smu_start_smc_engine(struct smu_context *smu)
@@ -1237,12 +1315,16 @@ static int smu_hw_init(void *handle)
 		return ret;
 	}
 
-	if (adev->flags & AMD_IS_APU) {
+	if (smu->is_apu) {
 		smu_powergate_sdma(&adev->smu, false);
 		smu_powergate_vcn(&adev->smu, false);
+		smu_powergate_jpeg(&adev->smu, false);
 		smu_set_gfx_cgpg(&adev->smu, true);
 	}
 
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
 	if (!smu->pm_enabled)
 		return 0;
 
@@ -1285,7 +1367,7 @@ static int smu_hw_init(void *handle)
 
 static int smu_stop_dpms(struct smu_context *smu)
 {
-	return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+	return smu_system_features_control(smu, false);
 }
 
 static int smu_hw_fini(void *handle)
@@ -1295,36 +1377,45 @@ static int smu_hw_fini(void *handle)
 	struct smu_table_context *table_context = &smu->smu_table;
 	int ret = 0;
 
-	if (adev->flags & AMD_IS_APU) {
+	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	if (smu->is_apu) {
 		smu_powergate_sdma(&adev->smu, true);
 		smu_powergate_vcn(&adev->smu, true);
+		smu_powergate_jpeg(&adev->smu, true);
 	}
 
-	ret = smu_stop_thermal_control(smu);
-	if (ret) {
-		pr_warn("Fail to stop thermal control!\n");
-		return ret;
-	}
+	if (!smu->pm_enabled)
+		return 0;
 
-	/*
-	 * For custom pptable uploading, skip the DPM features
-	 * disable process on Navi1x ASICs.
-	 *   - As the gfx related features are under control of
-	 *     RLC on those ASICs. RLC reinitialization will be
-	 *     needed to reenable them. That will cost much more
-	 *     efforts.
-	 *
-	 *   - SMU firmware can handle the DPM reenablement
-	 *     properly.
-	 */
-	if (!smu->uploading_custom_pp_table ||
-	    !((adev->asic_type >= CHIP_NAVI10) &&
-	      (adev->asic_type <= CHIP_NAVI12))) {
-		ret = smu_stop_dpms(smu);
+	if (!amdgpu_sriov_vf(adev)){
+		ret = smu_stop_thermal_control(smu);
 		if (ret) {
-			pr_warn("Fail to stop Dpms!\n");
+			pr_warn("Fail to stop thermal control!\n");
 			return ret;
 		}
+
+		/*
+		 * For custom pptable uploading, skip the DPM features
+		 * disable process on Navi1x ASICs.
+		 *   - As the gfx related features are under control of
+		 *     RLC on those ASICs. RLC reinitialization will be
+		 *     needed to reenable them. That will cost much more
+		 *     efforts.
+		 *
+		 *   - SMU firmware can handle the DPM reenablement
+		 *     properly.
+		 */
+		if (!smu->uploading_custom_pp_table ||
+				!((adev->asic_type >= CHIP_NAVI10) &&
+					(adev->asic_type <= CHIP_NAVI12))) {
+			ret = smu_stop_dpms(smu);
+			if (ret) {
+				pr_warn("Fail to stop Dpms!\n");
+				return ret;
+			}
+		}
 	}
 
 	kfree(table_context->driver_pptable);
@@ -1370,14 +1461,17 @@ static int smu_suspend(void *handle)
 	struct smu_context *smu = &adev->smu;
 	bool baco_feature_is_enabled = false;
 
-	if(!(adev->flags & AMD_IS_APU))
+	if (!smu->pm_enabled)
+		return 0;
+
+	if(!smu->is_apu)
 		baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
 
 	ret = smu_system_features_control(smu, false);
 	if (ret)
 		return ret;
 
-	if (adev->in_gpu_reset && baco_feature_is_enabled) {
+	if (baco_feature_is_enabled) {
 		ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
 		if (ret) {
 			pr_warn("set BACO feature enabled failed, return %d\n", ret);
@@ -1402,6 +1496,12 @@ static int smu_resume(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct smu_context *smu = &adev->smu;
 
+	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
+
+	if (!smu->pm_enabled)
+		return 0;
+
 	pr_info("SMU is resuming...\n");
 
 	ret = smu_start_smc_engine(smu);
@@ -1600,43 +1700,6 @@ static int smu_enable_umd_pstate(void *handle,
 	return 0;
 }
 
-static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
-{
-	int ret = 0;
-	uint32_t sclk_mask, mclk_mask, soc_mask;
-
-	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_HIGH:
-		ret = smu_force_dpm_limit_value(smu, true);
-		break;
-	case AMD_DPM_FORCED_LEVEL_LOW:
-		ret = smu_force_dpm_limit_value(smu, false);
-		break;
-	case AMD_DPM_FORCED_LEVEL_AUTO:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
-		ret = smu_unforce_dpm_levels(smu);
-		break;
-	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-		ret = smu_get_profiling_clk_mask(smu, level,
-						 &sclk_mask,
-						 &mclk_mask,
-						 &soc_mask);
-		if (ret)
-			return ret;
-		smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
-		smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
-		smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
-		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
-	default:
-		break;
-	}
-	return ret;
-}
-
 int smu_adjust_power_state_dynamic(struct smu_context *smu,
 				   enum amd_dpm_forced_level level,
 				   bool skip_display_settings)
@@ -1664,7 +1727,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
 	}
 
 	if (!skip_display_settings) {
-		ret = smu_notify_smc_dispaly_config(smu);
+		ret = smu_notify_smc_display_config(smu);
 		if (ret) {
 			pr_err("Failed to notify smc display config!");
 			return ret;
@@ -1674,11 +1737,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
 	if (smu_dpm_ctx->dpm_level != level) {
 		ret = smu_asic_set_performance_level(smu, level);
 		if (ret) {
-			ret = smu_default_set_performance_level(smu, level);
-			if (ret) {
-				pr_err("Failed to set performance level!");
-				return ret;
-			}
+			pr_err("Failed to set performance level!");
+			return ret;
 		}
 
 		/* update the saved copy */
@@ -1920,27 +1980,25 @@ int smu_set_df_cstate(struct smu_context *smu,
 
 int smu_write_watermarks_table(struct smu_context *smu)
 {
-	int ret = 0;
-	struct smu_table_context *smu_table = &smu->smu_table;
-	struct smu_table *table = NULL;
+	void *watermarks_table = smu->smu_table.watermarks_table;
 
-	table = &smu_table->tables[SMU_TABLE_WATERMARKS];
-
-	if (!table->cpu_addr)
+	if (!watermarks_table)
 		return -EINVAL;
 
-	ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+	return smu_update_table(smu,
+				SMU_TABLE_WATERMARKS,
+				0,
+				watermarks_table,
 				true);
-
-	return ret;
 }
 
 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
 		struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
-	int ret = 0;
-	struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
-	void *table = watermarks->cpu_addr;
+	void *table = smu->smu_table.watermarks_table;
+
+	if (!table)
+		return -EINVAL;
 
 	mutex_lock(&smu->mutex);
 
@@ -1954,7 +2012,7 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
 
 	mutex_unlock(&smu->mutex);
 
-	return ret;
+	return 0;
 }
 
 const struct amd_ip_funcs smu_ip_funcs = {
@@ -2279,13 +2337,9 @@ int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
 {
 	int ret = 0;
 
-	mutex_lock(&smu->mutex);
-
 	if (smu->ppt_funcs->set_active_display_count)
 		ret = smu->ppt_funcs->set_active_display_count(smu, count);
 
-	mutex_unlock(&smu->mutex);
-
 	return ret;
 }
 
@@ -2432,7 +2486,7 @@ bool smu_baco_is_support(struct smu_context *smu)
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->baco_is_support)
+	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
 		ret = smu->ppt_funcs->baco_is_support(smu);
 
 	mutex_unlock(&smu->mutex);
@@ -2452,14 +2506,28 @@ int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
 	return 0;
 }
 
-int smu_baco_reset(struct smu_context *smu)
+int smu_baco_enter(struct smu_context *smu)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->baco_reset)
-		ret = smu->ppt_funcs->baco_reset(smu);
+	if (smu->ppt_funcs->baco_enter)
+		ret = smu->ppt_funcs->baco_enter(smu);
+
+	mutex_unlock(&smu->mutex);
+
+	return ret;
+}
+
+int smu_baco_exit(struct smu_context *smu)
+{
+	int ret = 0;
+
+	mutex_lock(&smu->mutex);
+
+	if (smu->ppt_funcs->baco_exit)
+		ret = smu->ppt_funcs->baco_exit(smu);
 
 	mutex_unlock(&smu->mutex);
 
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 472e9fe..14ba6aa 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -179,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
 	TAB_MAP(DRIVER_SMU_CONFIG),
 	TAB_MAP(OVERDRIVE),
 	TAB_MAP(I2C_COMMANDS),
+	TAB_MAP(ACTIVITY_MONITOR_COEFF),
 };
 
 static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -280,10 +281,8 @@ static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER
 		return -EINVAL;
 
 	mapping = arcturus_workload_map[profile];
-	if (!(mapping.valid_mapping)) {
-		pr_warn("Unsupported SMU power source: %d\n", profile);
+	if (!(mapping.valid_mapping))
 		return -EINVAL;
-	}
 
 	return mapping.map_to;
 }
@@ -304,6 +303,10 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table
 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
 			       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 
+	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+		       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
+		       AMDGPU_GEM_DOMAIN_VRAM);
+
 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
 	if (!smu_table->metrics_table)
 		return -ENOMEM;
@@ -495,6 +498,7 @@ static int arcturus_store_powerplay_table(struct smu_context *smu)
 {
 	struct smu_11_0_powerplay_table *powerplay_table = NULL;
 	struct smu_table_context *table_context = &smu->smu_table;
+	struct smu_baco_context *smu_baco = &smu->smu_baco;
 	int ret = 0;
 
 	if (!table_context->power_play_table)
@@ -507,6 +511,12 @@ static int arcturus_store_powerplay_table(struct smu_context *smu)
 
 	table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
 
+	mutex_lock(&smu_baco->mutex);
+	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
+	    powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
+		smu_baco->platform_support = true;
+	mutex_unlock(&smu_baco->mutex);
+
 	return ret;
 }
 
@@ -1308,6 +1318,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
 static int arcturus_get_power_profile_mode(struct smu_context *smu,
 					   char *buf)
 {
+	struct amdgpu_device *adev = smu->adev;
+	DpmActivityMonitorCoeffInt_t activity_monitor;
 	static const char *profile_name[] = {
 					"BOOTUP_DEFAULT",
 					"3D_FULL_SCREEN",
@@ -1317,14 +1329,35 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
 					"COMPUTE",
 					"CUSTOM"};
 	static const char *title[] = {
-			"PROFILE_INDEX(NAME)"};
+			"PROFILE_INDEX(NAME)",
+			"CLOCK_TYPE(NAME)",
+			"FPS",
+			"UseRlcBusy",
+			"MinActiveFreqType",
+			"MinActiveFreq",
+			"BoosterFreqType",
+			"BoosterFreq",
+			"PD_Data_limit_c",
+			"PD_Data_error_coeff",
+			"PD_Data_error_rate_coeff"};
 	uint32_t i, size = 0;
 	int16_t workload_type = 0;
+	int result = 0;
+	uint32_t smu_version;
 
-	if (!smu->pm_enabled || !buf)
+	if (!buf)
 		return -EINVAL;
 
-	size += sprintf(buf + size, "%16s\n",
+	result = smu_get_smc_version(smu, NULL, &smu_version);
+	if (result)
+		return result;
+
+	if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev))
+		size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+			title[0], title[1], title[2], title[3], title[4], title[5],
+			title[6], title[7], title[8], title[9], title[10]);
+	else
+		size += sprintf(buf + size, "%16s\n",
 			title[0]);
 
 	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -1336,8 +1369,50 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
 		if (workload_type < 0)
 			continue;
 
+		if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) {
+			result = smu_update_table(smu,
+						  SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+						  workload_type,
+						  (void *)(&activity_monitor),
+						  false);
+			if (result) {
+				pr_err("[%s] Failed to get activity monitor!", __func__);
+				return result;
+			}
+		}
+
 		size += sprintf(buf + size, "%2d %14s%s\n",
 			i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+		if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) {
+			size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+				" ",
+				0,
+				"GFXCLK",
+				activity_monitor.Gfx_FPS,
+				activity_monitor.Gfx_UseRlcBusy,
+				activity_monitor.Gfx_MinActiveFreqType,
+				activity_monitor.Gfx_MinActiveFreq,
+				activity_monitor.Gfx_BoosterFreqType,
+				activity_monitor.Gfx_BoosterFreq,
+				activity_monitor.Gfx_PD_Data_limit_c,
+				activity_monitor.Gfx_PD_Data_error_coeff,
+				activity_monitor.Gfx_PD_Data_error_rate_coeff);
+
+			size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+				" ",
+				1,
+				"UCLK",
+				activity_monitor.Mem_FPS,
+				activity_monitor.Mem_UseRlcBusy,
+				activity_monitor.Mem_MinActiveFreqType,
+				activity_monitor.Mem_MinActiveFreq,
+				activity_monitor.Mem_BoosterFreqType,
+				activity_monitor.Mem_BoosterFreq,
+				activity_monitor.Mem_PD_Data_limit_c,
+				activity_monitor.Mem_PD_Data_error_coeff,
+				activity_monitor.Mem_PD_Data_error_rate_coeff);
+		}
 	}
 
 	return size;
@@ -1347,18 +1422,69 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
 					   long *input,
 					   uint32_t size)
 {
+	DpmActivityMonitorCoeffInt_t activity_monitor;
 	int workload_type = 0;
 	uint32_t profile_mode = input[size];
 	int ret = 0;
-
-	if (!smu->pm_enabled)
-		return -EINVAL;
+	uint32_t smu_version;
 
 	if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
 		pr_err("Invalid power profile mode %d\n", profile_mode);
 		return -EINVAL;
 	}
 
+	ret = smu_get_smc_version(smu, NULL, &smu_version);
+	if (ret)
+		return ret;
+
+	if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
+	     (smu_version >=0x360d00)) {
+		ret = smu_update_table(smu,
+				       SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+				       WORKLOAD_PPLIB_CUSTOM_BIT,
+				       (void *)(&activity_monitor),
+				       false);
+		if (ret) {
+			pr_err("[%s] Failed to get activity monitor!", __func__);
+			return ret;
+		}
+
+		switch (input[0]) {
+		case 0: /* Gfxclk */
+			activity_monitor.Gfx_FPS = input[1];
+			activity_monitor.Gfx_UseRlcBusy = input[2];
+			activity_monitor.Gfx_MinActiveFreqType = input[3];
+			activity_monitor.Gfx_MinActiveFreq = input[4];
+			activity_monitor.Gfx_BoosterFreqType = input[5];
+			activity_monitor.Gfx_BoosterFreq = input[6];
+			activity_monitor.Gfx_PD_Data_limit_c = input[7];
+			activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+			activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+			break;
+		case 1: /* Uclk */
+			activity_monitor.Mem_FPS = input[1];
+			activity_monitor.Mem_UseRlcBusy = input[2];
+			activity_monitor.Mem_MinActiveFreqType = input[3];
+			activity_monitor.Mem_MinActiveFreq = input[4];
+			activity_monitor.Mem_BoosterFreqType = input[5];
+			activity_monitor.Mem_BoosterFreq = input[6];
+			activity_monitor.Mem_PD_Data_limit_c = input[7];
+			activity_monitor.Mem_PD_Data_error_coeff = input[8];
+			activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+			break;
+		}
+
+		ret = smu_update_table(smu,
+				       SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+				       WORKLOAD_PPLIB_CUSTOM_BIT,
+				       (void *)(&activity_monitor),
+				       true);
+		if (ret) {
+			pr_err("[%s] Failed to set activity monitor!", __func__);
+			return ret;
+		}
+	}
+
 	/*
 	 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
 	 * Not all profile modes are supported on arcturus.
@@ -1897,7 +2023,7 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
 	SwI2cRequest_t req;
 	struct amdgpu_device *adev = to_amdgpu_device(control);
 	struct smu_table_context *smu_table = &adev->smu.smu_table;
-	struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+	struct smu_table *table = &smu_table->driver_table;
 
 	memset(&req, 0, sizeof(req));
 	arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
@@ -2051,8 +2177,12 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
 static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
 {
 	struct amdgpu_device *adev = to_amdgpu_device(control);
+	struct smu_context *smu = &adev->smu;
 	int res;
 
+	if (!smu->pm_enabled)
+		return -EOPNOTSUPP;
+
 	control->owner = THIS_MODULE;
 	control->class = I2C_CLASS_SPD;
 	control->dev.parent = &adev->pdev->dev;
@@ -2068,6 +2198,12 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
 
 static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
 {
+	struct amdgpu_device *adev = to_amdgpu_device(control);
+	struct smu_context *smu = &adev->smu;
+
+	if (!smu->pm_enabled)
+		return;
+
 	i2c_del_adapter(control);
 }
 
@@ -2112,6 +2248,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
 	.get_power_profile_mode = arcturus_get_power_profile_mode,
 	.set_power_profile_mode = arcturus_set_power_profile_mode,
+	.set_performance_level = smu_v11_0_set_performance_level,
 	/* debug (internal used) */
 	.dump_pptable = arcturus_dump_pptable,
 	.get_power_limit = arcturus_get_power_limit,
@@ -2135,6 +2272,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+	.set_driver_table_location = smu_v11_0_set_driver_table_location,
 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
@@ -2163,7 +2301,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.baco_is_support= smu_v11_0_baco_is_support,
 	.baco_get_state = smu_v11_0_baco_get_state,
 	.baco_set_state = smu_v11_0_baco_set_state,
-	.baco_reset = smu_v11_0_baco_reset,
+	.baco_enter = smu_v11_0_baco_enter,
+	.baco_exit = smu_v11_0_baco_exit,
 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
 	.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index cc57fb9..9454ab5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -81,8 +81,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 	adev = hwmgr->adev;
 
 	/* Skip for suspend/resume case */
-	if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
-		&& adev->in_suspend) {
+	if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
+	    && !amdgpu_passthrough(adev) && adev->in_suspend) {
 		pr_info("dpm has been enabled\n");
 		return 0;
 	}
@@ -99,6 +99,9 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 
 	PHM_FUNC_CHECK(hwmgr);
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	if (!smum_is_dpm_running(hwmgr)) {
 		pr_info("dpm has been disabled\n");
 		return 0;
@@ -200,6 +203,9 @@ int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
 {
 	PHM_FUNC_CHECK(hwmgr);
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
 		return -EINVAL;
 
@@ -237,6 +243,9 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
 		TEMP_RANGE_MAX};
 	struct amdgpu_device *adev = hwmgr->adev;
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	if (hwmgr->hwmgr_func->get_thermal_temperature_range)
 		hwmgr->hwmgr_func->get_thermal_temperature_range(
 				hwmgr, &range);
@@ -263,6 +272,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
 {
 	PHM_FUNC_CHECK(hwmgr);
+	if (hwmgr->pp_one_vf)
+		return false;
 
 	if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
 		return false;
@@ -482,6 +493,9 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
 {
 	PHM_FUNC_CHECK(hwmgr);
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index d2909c9..f48fdc7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -221,6 +221,9 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 {
 	int ret = 0;
 
+	hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev);
+	hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
+			? true : false;
 	if (!hwmgr->pm_en)
 		return 0;
 
@@ -279,7 +282,7 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 
 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
 {
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
 		return 0;
 
 	phm_stop_thermal_controller(hwmgr);
@@ -299,7 +302,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
 {
 	int ret = 0;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
 		return 0;
 
 	phm_disable_smc_firmware_ctf(hwmgr);
@@ -321,7 +324,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!hwmgr->pm_en)
+	if (!hwmgr->not_vf || !hwmgr->pm_en)
 		return 0;
 
 	ret = phm_setup_asic(hwmgr);
@@ -365,6 +368,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
 
 	switch (task_id) {
 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+		if (!hwmgr->not_vf)
+			return ret;
 		ret = phm_pre_display_configuration_changed(hwmgr);
 		if (ret)
 			return ret;
@@ -381,6 +386,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
 		enum PP_StateUILabel requested_ui_label;
 		struct pp_power_state *requested_ps = NULL;
 
+		if (!hwmgr->not_vf)
+			return ret;
 		if (user_state == NULL) {
 			ret = -EINVAL;
 			break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 6bf4893..31a32a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -262,20 +262,22 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
 	uint32_t index;
 	long workload;
 
-	if (!skip_display_settings)
-		phm_display_configuration_changed(hwmgr);
+	if (hwmgr->not_vf) {
+		if (!skip_display_settings)
+			phm_display_configuration_changed(hwmgr);
 
-	if (hwmgr->ps)
-		power_state_management(hwmgr, new_ps);
-	else
-		/*
-		 * for vega12/vega20 which does not support power state manager
-		 * DAL clock limits should also be honoured
-		 */
-		phm_apply_clock_adjust_rules(hwmgr);
+		if (hwmgr->ps)
+			power_state_management(hwmgr, new_ps);
+		else
+			/*
+			 * for vega12/vega20 which does not support power state manager
+			 * DAL clock limits should also be honoured
+			 */
+			phm_apply_clock_adjust_rules(hwmgr);
 
-	if (!skip_display_settings)
-		phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+		if (!skip_display_settings)
+			phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+	}
 
 	if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
 		hwmgr->dpm_level = hwmgr->request_dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 1115761..689072a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
 
 	clocks->num_levels = 0;
 	for (i = 0; i < pclk_vol_table->count; i++) {
-		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
-		clocks->data[i].latency_in_us = latency_required ?
-						smu10_get_mem_latency(hwmgr,
-						pclk_vol_table->entries[i].clk) :
-						0;
-		clocks->num_levels++;
+		if (pclk_vol_table->entries[i].clk) {
+			clocks->data[clocks->num_levels].clocks_in_khz =
+				pclk_vol_table->entries[i].clk * 10;
+			clocks->data[clocks->num_levels].latency_in_us = latency_required ?
+				smu10_get_mem_latency(hwmgr,
+						      pclk_vol_table->entries[i].clk) :
+				0;
+			clocks->num_levels++;
+		}
 	}
 
 	return 0;
@@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
 
 	clocks->num_levels = 0;
 	for (i = 0; i < pclk_vol_table->count; i++) {
-		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
-		clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
-		clocks->num_levels++;
+		if (pclk_vol_table->entries[i].clk) {
+			clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
+			clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
+			clocks->num_levels++;
+		}
 	}
 
 	return 0;
@@ -1151,12 +1156,11 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 	struct smu10_hwmgr *data = hwmgr->backend;
 	struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
 	Watermarks_t *table = &(data->water_marks_table);
-	int result = 0;
 
 	smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
 	smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
 	data->water_marks_exist = true;
-	return result;
+	return 0;
 }
 
 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index f73dff6..bf04cfe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -720,7 +720,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
 		data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
 		data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
 		/* param1 is for corresponding std voltage */
-		data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+		data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
 	}
 
 	data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
@@ -730,7 +730,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
 		/* Initialize Vddci DPM table based on allow Mclk values */
 		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 			data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
-			data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
+			data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
 		}
 		data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
 	}
@@ -744,7 +744,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
 		 */
 		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 			data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
-			data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+			data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
 		}
 		data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
 	}
@@ -4238,7 +4238,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-	uint32_t vbios_version;
 	uint32_t tmp;
 
 	/* Read MC indirect register offset 0x9F bits [3:0] to see
@@ -4247,7 +4246,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
 	 */
 
 	smu7_get_mc_microcode_version(hwmgr);
-	vbios_version = hwmgr->microcode_version_info.MC & 0xf;
 
 	data->need_long_memory_training = false;
 
@@ -4945,7 +4943,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
 			title[0], title[1], title[2], title[3],
 			title[4], title[5], title[6], title[7]);
 
-	len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
+	len = ARRAY_SIZE(smu7_profiling);
 
 	for (i = 0; i < len; i++) {
 		if (i == hwmgr->power_profile_mode) {
@@ -5077,13 +5075,11 @@ static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw
 				PHM_PerformanceLevel *level)
 {
 	const struct smu7_power_state *ps;
-	struct smu7_hwmgr *data;
 	uint32_t i;
 
 	if (level == NULL || hwmgr == NULL || state == NULL)
 		return -EINVAL;
 
-	data = hwmgr->backend;
 	ps = cast_const_phw_smu7_power_state(state);
 
 	i = index > ps->performance_level_count - 1 ?
@@ -5189,13 +5185,11 @@ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
 
 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
 {
-	int ret = 0;
-
 	hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
 	if (hwmgr->pp_table_version == PP_TABLE_V0)
 		hwmgr->pptable_func = &pptable_funcs;
 	else if (hwmgr->pp_table_version == PP_TABLE_V1)
 		hwmgr->pptable_func = &pptable_v1_0_funcs;
 
-	return ret;
+	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d71a492..92a65e3d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -912,6 +912,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
 
 	data->total_active_cus = adev->gfx.cu_info.number;
+	if (!hwmgr->not_vf)
+		return result;
+
 	/* Setup default Overdrive Fan control settings */
 	data->odn_fan_table.target_fan_speed =
 			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
@@ -979,6 +982,9 @@ static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
 
 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
 {
+	if (!hwmgr->not_vf)
+		return 0;
+
 	PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
 			"Failed to init sclk threshold!",
 			return -EINVAL);
@@ -2503,6 +2509,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
 			"Failed to setup default DPM tables!",
 			return result);
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	/* initialize ODN table */
 	if (hwmgr->od_enabled) {
 		if (odn_table->max_vddc) {
@@ -2826,6 +2835,8 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
 	struct vega10_hwmgr *data = hwmgr->backend;
 	uint32_t i, feature_mask = 0;
 
+	if (!hwmgr->not_vf)
+		return 0;
 
 	if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -2932,61 +2943,73 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 	struct vega10_hwmgr *data = hwmgr->backend;
 	int tmp_result, result = 0;
 
-	vega10_enable_disable_PCC_limit_feature(hwmgr, true);
+	if (hwmgr->not_vf) {
+		vega10_enable_disable_PCC_limit_feature(hwmgr, true);
 
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-		PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
 
-	tmp_result = vega10_construct_voltage_tables(hwmgr);
-	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to construct voltage tables!",
-			result = tmp_result);
-
-	tmp_result = vega10_init_smc_table(hwmgr);
-	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to initialize SMC table!",
-			result = tmp_result);
-
-	if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
-		tmp_result = vega10_enable_thermal_protection(hwmgr);
+		tmp_result = vega10_construct_voltage_tables(hwmgr);
 		PP_ASSERT_WITH_CODE(!tmp_result,
-				"Failed to enable thermal protection!",
-				result = tmp_result);
+				    "Failed to construct voltage tables!",
+				    result = tmp_result);
 	}
 
-	tmp_result = vega10_enable_vrhot_feature(hwmgr);
-	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to enable VR hot feature!",
-			result = tmp_result);
+	if (hwmgr->not_vf || hwmgr->pp_one_vf) {
+		tmp_result = vega10_init_smc_table(hwmgr);
+		PP_ASSERT_WITH_CODE(!tmp_result,
+				    "Failed to initialize SMC table!",
+				    result = tmp_result);
+	}
 
-	tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
-	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to enable deep sleep master switch!",
-			result = tmp_result);
+	if (hwmgr->not_vf) {
+		if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
+			tmp_result = vega10_enable_thermal_protection(hwmgr);
+			PP_ASSERT_WITH_CODE(!tmp_result,
+					    "Failed to enable thermal protection!",
+					    result = tmp_result);
+		}
 
-	tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
-	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to start DPM!", result = tmp_result);
+		tmp_result = vega10_enable_vrhot_feature(hwmgr);
+		PP_ASSERT_WITH_CODE(!tmp_result,
+				    "Failed to enable VR hot feature!",
+				    result = tmp_result);
 
-	/* enable didt, do not abort if failed didt */
-	tmp_result = vega10_enable_didt_config(hwmgr);
-	PP_ASSERT(!tmp_result,
-			"Failed to enable didt config!");
+		tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
+		PP_ASSERT_WITH_CODE(!tmp_result,
+				    "Failed to enable deep sleep master switch!",
+				    result = tmp_result);
+	}
+
+	if (hwmgr->not_vf) {
+		tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
+		PP_ASSERT_WITH_CODE(!tmp_result,
+				    "Failed to start DPM!", result = tmp_result);
+	}
+
+	if (hwmgr->not_vf) {
+		/* enable didt, do not abort if failed didt */
+		tmp_result = vega10_enable_didt_config(hwmgr);
+		PP_ASSERT(!tmp_result,
+			  "Failed to enable didt config!");
+	}
 
 	tmp_result = vega10_enable_power_containment(hwmgr);
 	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to enable power containment!",
-			result = tmp_result);
+			    "Failed to enable power containment!",
+			    result = tmp_result);
 
-	tmp_result = vega10_power_control_set_level(hwmgr);
-	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to power control set level!",
-			result = tmp_result);
+	if (hwmgr->not_vf) {
+		tmp_result = vega10_power_control_set_level(hwmgr);
+		PP_ASSERT_WITH_CODE(!tmp_result,
+				    "Failed to power control set level!",
+				    result = tmp_result);
 
-	tmp_result = vega10_enable_ulv(hwmgr);
-	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to enable ULV!",
-			result = tmp_result);
+		tmp_result = vega10_enable_ulv(hwmgr);
+		PP_ASSERT_WITH_CODE(!tmp_result,
+				    "Failed to enable ULV!",
+				    result = tmp_result);
+	}
 
 	return result;
 }
@@ -3080,11 +3103,22 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
 	performance_level->soc_clock = socclk_dep_table->entries
 				[state_entry->ucSocClockIndexHigh].ulClk;
 	if (gfxclk_dep_table->ucRevId == 0) {
-		performance_level->gfx_clock = gfxclk_dep_table->entries
-			[state_entry->ucGfxClockIndexHigh].ulClk;
+		/* under vega10 pp one vf mode, the gfx clk dpm need be lower
+		 * to level-4 due to the limited 110w-power
+		 */
+		if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
+			performance_level->gfx_clock =
+				gfxclk_dep_table->entries[4].ulClk;
+		else
+			performance_level->gfx_clock = gfxclk_dep_table->entries
+				[state_entry->ucGfxClockIndexHigh].ulClk;
 	} else if (gfxclk_dep_table->ucRevId == 1) {
 		patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
-		performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
+		if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
+			performance_level->gfx_clock = patom_record_V2[4].ulClk;
+		else
+			performance_level->gfx_clock =
+				patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
 	}
 
 	performance_level->mem_clock = mclk_dep_table->entries
@@ -3495,6 +3529,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
 			smum_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_SetSoftMinGfxclkByIndex,
 				data->smc_state_table.gfx_boot_level);
+
 			data->dpm_table.gfx_table.dpm_state.soft_min_level =
 					data->smc_state_table.gfx_boot_level;
 		}
@@ -3503,7 +3538,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
 	if (!data->registry_data.mclk_dpm_key_disabled) {
 		if (data->smc_state_table.mem_boot_level !=
 				data->dpm_table.mem_table.dpm_state.soft_min_level) {
-			if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
+			if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1)
+			    && hwmgr->not_vf) {
 				socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
 				smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetSoftMinSocclkByIndex,
@@ -3518,6 +3554,9 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
 		}
 	}
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	if (!data->registry_data.socclk_dpm_key_disabled) {
 		if (data->smc_state_table.soc_boot_level !=
 				data->dpm_table.soc_table.dpm_state.soft_min_level) {
@@ -3560,6 +3599,9 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 		}
 	}
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	if (!data->registry_data.socclk_dpm_key_disabled) {
 		if (data->smc_state_table.soc_max_level !=
 			data->dpm_table.soc_table.dpm_state.soft_max_level) {
@@ -4054,15 +4096,25 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
 		*mclk_mask = 0;
 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
-		*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+		/* under vega10  pp one vf mode, the gfx clk dpm need be lower
+		 * to level-4 due to the limited power
+		 */
+		if (hwmgr->pp_one_vf)
+			*sclk_mask = 4;
+		else
+			*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
 		*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
 		*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
 	}
+
 	return 0;
 }
 
 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 {
+	if (!hwmgr->not_vf)
+		return;
+
 	switch (mode) {
 	case AMD_FAN_CTRL_NONE:
 		vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
@@ -4176,6 +4228,9 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 		break;
 	}
 
+	if (!hwmgr->not_vf)
+		return ret;
+
 	if (!ret) {
 		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
 			vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
@@ -4360,14 +4415,13 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 	struct vega10_hwmgr *data = hwmgr->backend;
 	struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
 	Watermarks_t *table = &(data->smc_state_table.water_marks_table);
-	int result = 0;
 
 	if (!data->registry_data.disable_water_mark) {
 		smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
 		data->water_marks_bitmap = WaterMarksExist;
 	}
 
-	return result;
+	return 0;
 }
 
 static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
@@ -4480,7 +4534,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 	struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
 
-	int i, now, size = 0;
+	int i, now, size = 0, count = 0;
 
 	switch (type) {
 	case PP_SCLK:
@@ -4490,7 +4544,12 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
 		now = smum_get_argument(hwmgr);
 
-		for (i = 0; i < sclk_table->count; i++)
+		if (hwmgr->pp_one_vf &&
+		    (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
+			count = 5;
+		else
+			count = sclk_table->count;
+		for (i = 0; i < count; i++)
 			size += sprintf(buf + size, "%d: %uMhz %s\n",
 					i, sclk_table->dpm_levels[i].value / 100,
 					(i == now) ? "*" : "");
@@ -4701,6 +4760,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result, result = 0;
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	if (PP_CAP(PHM_PlatformCaps_ThermalController))
 		vega10_disable_thermal_protection(hwmgr);
 
@@ -5252,13 +5314,11 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
 				PHM_PerformanceLevel *level)
 {
 	const struct vega10_power_state *ps;
-	struct vega10_hwmgr *data;
 	uint32_t i;
 
 	if (level == NULL || hwmgr == NULL || state == NULL)
 		return -EINVAL;
 
-	data = hwmgr->backend;
 	ps = cast_const_phw_vega10_power_state(state);
 
 	i = index > ps->performance_level_count - 1 ?
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 6f26cb2..0a677d4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1343,6 +1343,9 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
 	hwmgr->default_power_limit = hwmgr->power_limit =
 			(uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
 
+	if (!hwmgr->not_vf)
+		return 0;
+
 	if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
 		if (data->smu_features[GNLD_PPT].supported)
 			PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 7af9ad4..aca61d1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -499,8 +499,6 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
 static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
 		PPCLK_e clkID, uint32_t index, uint32_t *clock)
 {
-	int result = 0;
-
 	/*
 	 *SMU expects the Clock ID to be in the top 16 bits.
 	 *Lower 16 bits specify the level
@@ -512,7 +510,7 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
 
 	*clock = smum_get_argument(hwmgr);
 
-	return result;
+	return 0;
 }
 
 static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 5bcf0d6..3b3ec56 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -872,7 +872,7 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
 		"[OverridePcieParameters] Attempt to override pcie params failed!",
 		return ret);
 
-	data->pcie_parameters_override = 1;
+	data->pcie_parameters_override = true;
 	data->pcie_gen_level1 = pcie_gen;
 	data->pcie_width_level1 = pcie_width;
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 41fce75..97b6714 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -254,15 +254,26 @@ struct smu_table_context
 	unsigned long			metrics_time;
 	void				*metrics_table;
 	void				*clocks_table;
+	void				*watermarks_table;
 
 	void				*max_sustainable_clocks;
 	struct smu_bios_boot_up_values	boot_values;
 	void                            *driver_pptable;
 	struct smu_table		*tables;
+	/*
+	 * The driver table is just a staging buffer for
+	 * uploading/downloading content from the SMU.
+	 *
+	 * And the table_id for SMU_MSG_TransferTableSmu2Dram/
+	 * SMU_MSG_TransferTableDram2Smu instructs SMU
+	 * which content driver is interested.
+	 */
+	struct smu_table		driver_table;
 	struct smu_table		memory_pool;
 	uint8_t                         thermal_controller_type;
 
 	void				*overdrive_table;
+	void                            *boot_overdrive_table;
 };
 
 struct smu_dpm_context {
@@ -282,6 +293,7 @@ struct smu_power_gate {
 	bool uvd_gated;
 	bool vce_gated;
 	bool vcn_gated;
+	bool jpeg_gated;
 };
 
 struct smu_power_context {
@@ -437,12 +449,13 @@ struct pptable_funcs {
 	int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
 	int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable);
 	int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable);
+	int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
 	int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
 			   void *data, uint32_t *size);
 	int (*pre_display_config_changed)(struct smu_context *smu);
 	int (*display_config_changed)(struct smu_context *smu);
 	int (*apply_clocks_adjust_rules)(struct smu_context *smu);
-	int (*notify_smc_dispaly_config)(struct smu_context *smu);
+	int (*notify_smc_display_config)(struct smu_context *smu);
 	int (*force_dpm_limit_value)(struct smu_context *smu, bool highest);
 	int (*unforce_dpm_levels)(struct smu_context *smu);
 	int (*get_profiling_clk_mask)(struct smu_context *smu,
@@ -491,9 +504,11 @@ struct pptable_funcs {
 	int (*check_fw_version)(struct smu_context *smu);
 	int (*powergate_sdma)(struct smu_context *smu, bool gate);
 	int (*powergate_vcn)(struct smu_context *smu, bool gate);
+	int (*powergate_jpeg)(struct smu_context *smu, bool gate);
 	int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
 	int (*write_pptable)(struct smu_context *smu);
 	int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+	int (*set_driver_table_location)(struct smu_context *smu);
 	int (*set_tool_table_location)(struct smu_context *smu);
 	int (*notify_memory_pool_location)(struct smu_context *smu);
 	int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
@@ -544,12 +559,14 @@ struct pptable_funcs {
 	bool (*baco_is_support)(struct smu_context *smu);
 	enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
 	int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
-	int (*baco_reset)(struct smu_context *smu);
+	int (*baco_enter)(struct smu_context *smu);
+	int (*baco_exit)(struct smu_context *smu);
 	int (*mode2_reset)(struct smu_context *smu);
 	int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
 	int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
 	int (*override_pcie_parameters)(struct smu_context *smu);
 	uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
+	int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
 };
 
 int smu_load_microcode(struct smu_context *smu);
@@ -626,7 +643,8 @@ bool smu_baco_is_support(struct smu_context *smu);
 
 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
 
-int smu_baco_reset(struct smu_context *smu);
+int smu_baco_enter(struct smu_context *smu);
+int smu_baco_exit(struct smu_context *smu);
 
 int smu_mode2_reset(struct smu_context *smu);
 
@@ -692,6 +710,8 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			    uint32_t min, uint32_t max);
 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			    uint32_t min, uint32_t max);
+int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
+			    uint32_t *min_value, uint32_t *max_value);
 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
 int smu_set_display_count(struct smu_context *smu, uint32_t count);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index af97767..2ffb666 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -741,6 +741,7 @@ struct pp_hwmgr {
 	uint32_t smu_version;
 	bool not_vf;
 	bool pm_en;
+	bool pp_one_vf;
 	struct mutex smu_lock;
 
 	uint32_t pp_table_version;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index a886f06..ce5b501 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -622,8 +622,14 @@ typedef struct {
   uint16_t     PccThresholdHigh;
   uint32_t     PaddingAPCC[6];  //FIXME pending SPEC
 
+  // OOB Settings
+  uint16_t BasePerformanceCardPower;
+  uint16_t MaxPerformanceCardPower;
+  uint16_t BasePerformanceFrequencyCap;   //In Mhz
+  uint16_t MaxPerformanceFrequencyCap;    //In Mhz
+
   // SECTION: Reserved
-  uint32_t     Reserved[11];
+  uint32_t     Reserved[9];
 
   // SECTION: BOARD PARAMETERS
 
@@ -823,7 +829,6 @@ typedef struct {
   uint32_t MmHubPadding[8]; // SMU internal use
 } AvfsFuseOverride_t;
 
-/* NOT CURRENTLY USED
 typedef struct {
   uint8_t   Gfx_ActiveHystLimit;
   uint8_t   Gfx_IdleHystLimit;
@@ -866,7 +871,6 @@ typedef struct {
 
   uint32_t  MmHubPadding[8]; // SMU internal use
 } DpmActivityMonitorCoeffInt_t;
-*/
 
 // These defines are used with the following messages:
 // SMC_MSG_TransferTableDram2Smu
@@ -878,11 +882,11 @@ typedef struct {
 #define TABLE_PMSTATUSLOG             4
 #define TABLE_SMU_METRICS             5
 #define TABLE_DRIVER_SMU_CONFIG       6
-//#define TABLE_ACTIVITY_MONITOR_COEFF  7
 #define TABLE_OVERDRIVE               7
 #define TABLE_WAFL_XGMI_TOPOLOGY      8
 #define TABLE_I2C_COMMANDS            9
-#define TABLE_COUNT                   10
+#define TABLE_ACTIVITY_MONITOR_COEFF  10
+#define TABLE_COUNT                   11
 
 // These defines are used with the SMC_MSG_SetUclkFastSwitch message.
 typedef enum {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
index c27c828..2f85a34 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if 
 // any structure is changed in this file
-#define SMU12_DRIVER_IF_VERSION 10
+#define SMU12_DRIVER_IF_VERSION 11
 
 typedef struct {
   int32_t value;
@@ -192,6 +192,11 @@ typedef struct {
   uint16_t SocTemperature;              //[centi-Celsius]
   uint16_t ThrottlerStatus;
   uint16_t spare;
+
+  uint16_t StapmOriginalLimit;          //[mW]
+  uint16_t StapmCurrentLimit;           //[mW]
+  uint16_t ApuPower;              //[mW]
+  uint16_t dGpuPower;               //[mW]
 } SmuMetrics_t;
 
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index d8c9b7f..a5b4df1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -170,6 +170,8 @@
 	__SMU_DUMMY_MAP(SetSoftMinJpeg),              \
 	__SMU_DUMMY_MAP(SetHardMinFclkByFreq),        \
 	__SMU_DUMMY_MAP(DFCstateControl), \
+	__SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
+	__SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)	SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 7198442..d5314d1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
 
 #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x10
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
 #define SMU11_DRIVER_IF_VERSION_NV10 0x33
 #define SMU11_DRIVER_IF_VERSION_NV14 0x34
 
@@ -170,6 +170,8 @@ int smu_v11_0_write_pptable(struct smu_context *smu);
 
 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
 
+int smu_v11_0_set_driver_table_location(struct smu_context *smu);
+
 int smu_v11_0_set_tool_table_location(struct smu_context *smu);
 
 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
@@ -247,7 +249,8 @@ enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
 
 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
 
-int smu_v11_0_baco_reset(struct smu_context *smu);
+int smu_v11_0_baco_enter(struct smu_context *smu);
+int smu_v11_0_baco_exit(struct smu_context *smu);
 
 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
 						 uint32_t *min, uint32_t *max);
@@ -261,4 +264,7 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
 
 uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
 
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+				    enum amd_dpm_forced_level level);
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
index 373861d..406bfd1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
@@ -120,7 +120,10 @@
 #define PPSMC_MSG_GetVoltageByDpmOverdrive       0x45
 #define PPSMC_MSG_BacoAudioD3PME                 0x48
 
-#define PPSMC_Message_Count                      0x49
+#define PPSMC_MSG_DALDisableDummyPstateChange    0x49
+#define PPSMC_MSG_DALEnableDummyPstateChange     0x4A
+
+#define PPSMC_Message_Count                      0x4B
 
 typedef uint32_t PPSMC_Result;
 typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 9d81d78..d79e54b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -57,8 +57,14 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
 
 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
 
+int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate);
+
 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
 
+int smu_v12_0_read_sensor(struct smu_context *smu,
+			  enum amd_pp_sensors sensor,
+			  void *data, uint32_t *size);
+
 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
 
 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
@@ -69,6 +75,13 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu);
 
 int smu_v12_0_populate_smc_tables(struct smu_context *smu);
 
+int smu_v12_0_get_enabled_mask(struct smu_context *smu,
+				      uint32_t *feature_mask, uint32_t num);
+
+int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
+					  enum smu_clk_type clk_id,
+					  uint32_t *value);
+
 int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
 						 uint32_t *min, uint32_t *max);
 
@@ -77,4 +90,6 @@ int smu_v12_0_mode2_reset(struct smu_context *smu);
 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			    uint32_t min, uint32_t max);
 
+int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index ca62e92..19a9846 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -119,6 +119,10 @@ static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] =
 	MSG_MAP(PowerDownJpeg,		PPSMC_MSG_PowerDownJpeg),
 	MSG_MAP(BacoAudioD3PME,		PPSMC_MSG_BacoAudioD3PME),
 	MSG_MAP(ArmD3,			PPSMC_MSG_ArmD3),
+	MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
+	MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE,	PPSMC_MSG_DALEnableDummyPstateChange),
+	MSG_MAP(GetVoltageByDpm,		     PPSMC_MSG_GetVoltageByDpm),
+	MSG_MAP(GetVoltageByDpmOverdrive,	     PPSMC_MSG_GetVoltageByDpmOverdrive),
 };
 
 static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
@@ -384,8 +388,10 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
 
 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
-		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT)
-				| FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
+
+	if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
+		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
 
 	/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
 	if (is_asic_secure(smu)) {
@@ -553,6 +559,10 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
 		return -ENOMEM;
 	smu_table->metrics_time = 0;
 
+	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+	if (!smu_table->watermarks_table)
+		return -ENOMEM;
+
 	return 0;
 }
 
@@ -668,6 +678,31 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 	return ret;
 }
 
+static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+	struct smu_power_context *smu_power = &smu->smu_power;
+	struct smu_power_gate *power_gate = &smu_power->power_gate;
+	int ret = 0;
+
+	if (enable) {
+		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+			ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+			if (ret)
+				return ret;
+		}
+		power_gate->jpeg_gated = false;
+	} else {
+		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+			if (ret)
+				return ret;
+		}
+		power_gate->jpeg_gated = true;
+	}
+
+	return ret;
+}
+
 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
 				       enum smu_clk_type clk_type,
 				       uint32_t *value)
@@ -706,6 +741,15 @@ static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_tabl
 	return od_table->cap[feature];
 }
 
+static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
+					enum SMU_11_0_ODSETTING_ID setting,
+					uint32_t *min, uint32_t *max)
+{
+	if (min)
+		*min = od_table->min[setting];
+	if (max)
+		*max = od_table->max[setting];
+}
 
 static int navi10_print_clk_levels(struct smu_context *smu,
 			enum smu_clk_type clk_type, char *buf)
@@ -724,6 +768,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 	OverDriveTable_t *od_table =
 		(OverDriveTable_t *)table_context->overdrive_table;
 	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
+	uint32_t min_value, max_value;
 
 	switch (clk_type) {
 	case SMU_GFXCLK:
@@ -812,7 +857,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
 			break;
 		size += sprintf(buf + size, "OD_MCLK:\n");
-		size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+		size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
 		break;
 	case SMU_OD_VDDC_CURVE:
 		if (!smu->od_enabled || !od_table || !od_settings)
@@ -837,6 +882,55 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 			size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
 		}
 		break;
+	case SMU_OD_RANGE:
+		if (!smu->od_enabled || !od_table || !od_settings)
+			break;
+		size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
+						    &min_value, NULL);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
+						    NULL, &max_value);
+			size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+					min_value, max_value);
+		}
+
+		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+					min_value, max_value);
+		}
+
+		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+					min_value, max_value);
+			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+						    &min_value, &max_value);
+			size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+					min_value, max_value);
+		}
+
+		break;
 	default:
 		break;
 	}
@@ -918,6 +1012,8 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
 	case SMU_GFXCLK:
 	case SMU_DCEFCLK:
 	case SMU_SOCCLK:
+	case SMU_MCLK:
+	case SMU_UCLK:
 		ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
 		if (ret)
 			return ret;
@@ -1350,7 +1446,7 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu,
 	return ret;
 }
 
-static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
+static int navi10_notify_smc_display_config(struct smu_context *smu)
 {
 	struct smu_clocks min_clocks = {0};
 	struct pp_display_clock_request clock_req;
@@ -1555,12 +1651,44 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_
 	return 0;
 }
 
-static int navi10_set_peak_clock_by_device(struct smu_context *smu)
+static int navi10_set_performance_level(struct smu_context *smu,
+					enum amd_dpm_forced_level level);
+
+static int navi10_set_standard_performance_level(struct smu_context *smu)
 {
 	struct amdgpu_device *adev = smu->adev;
 	int ret = 0;
 	uint32_t sclk_freq = 0, uclk_freq = 0;
-	uint32_t uclk_level = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_NAVI10:
+		sclk_freq = NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
+		uclk_freq = NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
+		break;
+	case CHIP_NAVI14:
+		sclk_freq = NAVI14_UMD_PSTATE_PROFILING_GFXCLK;
+		uclk_freq = NAVI14_UMD_PSTATE_PROFILING_MEMCLK;
+		break;
+	default:
+		/* by default, this is same as auto performance level */
+		return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
+	}
+
+	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+	if (ret)
+		return ret;
+	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int navi10_set_peak_performance_level(struct smu_context *smu)
+{
+	struct amdgpu_device *adev = smu->adev;
+	int ret = 0;
+	uint32_t sclk_freq = 0, uclk_freq = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_NAVI10:
@@ -1601,14 +1729,16 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
 			break;
 		}
 		break;
+	case CHIP_NAVI12:
+		sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
+		break;
 	default:
-		return -EINVAL;
+		ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq);
+		if (ret)
+			return ret;
 	}
 
-	ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
-	if (ret)
-		return ret;
-	ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
+	ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq);
 	if (ret)
 		return ret;
 
@@ -1622,19 +1752,45 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
 	return ret;
 }
 
-static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+static int navi10_set_performance_level(struct smu_context *smu,
+					enum amd_dpm_forced_level level)
 {
 	int ret = 0;
+	uint32_t sclk_mask, mclk_mask, soc_mask;
 
 	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-		ret = navi10_set_peak_clock_by_device(smu);
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		ret = smu_force_dpm_limit_value(smu, true);
 		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		ret = smu_force_dpm_limit_value(smu, false);
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+		ret = smu_unforce_dpm_levels(smu);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+		ret = navi10_set_standard_performance_level(smu);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+		ret = smu_get_profiling_clk_mask(smu, level,
+						 &sclk_mask,
+						 &mclk_mask,
+						 &soc_mask);
+		if (ret)
+			return ret;
+		smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+		smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+		smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		ret = navi10_set_peak_performance_level(smu);
+		break;
+	case AMD_DPM_FORCED_LEVEL_MANUAL:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
-		ret = -EINVAL;
 		break;
 	}
-
 	return ret;
 }
 
@@ -1780,6 +1936,28 @@ static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_tab
 	return 0;
 }
 
+static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
+						     uint16_t *voltage,
+						     uint32_t freq)
+{
+	uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
+	uint32_t value = 0;
+	int ret;
+
+	ret = smu_send_smc_msg_with_param(smu,
+					  SMU_MSG_GetVoltageByDpm,
+					  param);
+	if (ret) {
+		pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
+		return ret;
+	}
+
+	smu_read_smc_arg(smu, &value);
+	*voltage = (uint16_t)value;
+
+	return 0;
+}
+
 static int navi10_setup_od_limits(struct smu_context *smu) {
 	struct smu_11_0_overdrive_table *overdrive_table = NULL;
 	struct smu_11_0_powerplay_table *powerplay_table = NULL;
@@ -1799,23 +1977,54 @@ static int navi10_setup_od_limits(struct smu_context *smu) {
 }
 
 static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
-	OverDriveTable_t *od_table;
+	OverDriveTable_t *od_table, *boot_od_table;
 	int ret = 0;
 
 	ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
 	if (ret)
 		return ret;
 
+	od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+	boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
 	if (initialize) {
 		ret = navi10_setup_od_limits(smu);
 		if (ret) {
 			pr_err("Failed to retrieve board OD limits\n");
 			return ret;
 		}
+		if (od_table) {
+			if (!od_table->GfxclkVolt1) {
+				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+										&od_table->GfxclkVolt1,
+										od_table->GfxclkFreq1);
+				if (ret)
+					od_table->GfxclkVolt1 = 0;
+				if (boot_od_table)
+					boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1;
+			}
 
+			if (!od_table->GfxclkVolt2) {
+				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+										&od_table->GfxclkVolt2,
+										od_table->GfxclkFreq2);
+				if (ret)
+					od_table->GfxclkVolt2 = 0;
+				if (boot_od_table)
+					boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2;
+			}
+
+			if (!od_table->GfxclkVolt3) {
+				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+										&od_table->GfxclkVolt3,
+										od_table->GfxclkFreq3);
+				if (ret)
+					od_table->GfxclkVolt3 = 0;
+				if (boot_od_table)
+					boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3;
+			}
+		}
 	}
 
-	od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
 	if (od_table) {
 		navi10_dump_od_table(od_table);
 	}
@@ -1911,6 +2120,13 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
 			return ret;
 		od_table->UclkFmax = input[1];
 		break;
+	case PP_OD_RESTORE_DEFAULT_TABLE:
+		if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+			pr_err("Overdrive table was not initialized!\n");
+			return -EINVAL;
+		}
+		memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
+		break;
 	case PP_OD_COMMIT_DPM_TABLE:
 		navi10_dump_od_table(od_table);
 		ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
@@ -2000,6 +2216,61 @@ static int navi10_run_btc(struct smu_context *smu)
 	return ret;
 }
 
+static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
+{
+	int result = 0;
+
+	if (!enable)
+		result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+	else
+		result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+
+	return result;
+}
+
+static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
+{
+	uint32_t uclk_count, uclk_min, uclk_max;
+	uint32_t smu_version;
+	int ret = 0;
+
+	ret = smu_get_smc_version(smu, NULL, &smu_version);
+	if (ret)
+		return ret;
+
+	/* This workaround is available only for 42.50 or later SMC firmwares */
+	if (smu_version < 0x2A3200)
+		return 0;
+
+	ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
+	if (ret)
+		return ret;
+
+	ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
+	if (ret)
+		return ret;
+
+	ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
+	if (ret)
+		return ret;
+
+	/* Force UCLK out of the highest DPM */
+	ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
+	if (ret)
+		return ret;
+
+	/* Revert the UCLK Hardmax */
+	ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
+	if (ret)
+		return ret;
+
+	/*
+	 * In this case, SMU already disabled dummy pstate during enablement
+	 * of UCLK DPM, we have to re-enabled it.
+	 * */
+	return navi10_dummy_pstate_control(smu, true);
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
 	.tables_init = navi10_tables_init,
 	.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -2015,6 +2286,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
 	.set_default_dpm_table = navi10_set_default_dpm_table,
 	.dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
+	.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
 	.get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
 	.print_clk_levels = navi10_print_clk_levels,
 	.force_clk_levels = navi10_force_clk_levels,
@@ -2022,7 +2294,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
 	.pre_display_config_changed = navi10_pre_display_config_changed,
 	.display_config_changed = navi10_display_config_changed,
-	.notify_smc_dispaly_config = navi10_notify_smc_dispaly_config,
+	.notify_smc_display_config = navi10_notify_smc_display_config,
 	.force_dpm_limit_value = navi10_force_dpm_limit_value,
 	.unforce_dpm_levels = navi10_unforce_dpm_levels,
 	.is_dpm_running = navi10_is_dpm_running,
@@ -2055,6 +2327,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+	.set_driver_table_location = smu_v11_0_set_driver_table_location,
 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
@@ -2083,7 +2356,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.baco_is_support= smu_v11_0_baco_is_support,
 	.baco_get_state = smu_v11_0_baco_get_state,
 	.baco_set_state = smu_v11_0_baco_set_state,
-	.baco_reset = smu_v11_0_baco_reset,
+	.baco_enter = smu_v11_0_baco_enter,
+	.baco_exit = smu_v11_0_baco_exit,
 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
 	.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
@@ -2091,6 +2365,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.od_edit_dpm_table = navi10_od_edit_dpm_table,
 	.get_pptable_power_limit = navi10_get_pptable_power_limit,
 	.run_btc = navi10_run_btc,
+	.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index ec03c79..2abb4ba 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,12 +27,26 @@
 #define NAVI10_PEAK_SCLK_XT  		(1755)
 #define NAVI10_PEAK_SCLK_XL  		(1625)
 
+#define NAVI10_UMD_PSTATE_PROFILING_GFXCLK    (1300)
+#define NAVI10_UMD_PSTATE_PROFILING_SOCCLK    (980)
+#define NAVI10_UMD_PSTATE_PROFILING_MEMCLK    (625)
+#define NAVI10_UMD_PSTATE_PROFILING_VCLK      (980)
+#define NAVI10_UMD_PSTATE_PROFILING_DCLK      (850)
+
 #define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK      (1670)
 #define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK     (1448)
 #define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK     (1181)
 #define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK     (1717)
 #define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK      (1448)
 
+#define NAVI14_UMD_PSTATE_PROFILING_GFXCLK    (1200)
+#define NAVI14_UMD_PSTATE_PROFILING_SOCCLK    (900)
+#define NAVI14_UMD_PSTATE_PROFILING_MEMCLK    (600)
+#define NAVI14_UMD_PSTATE_PROFILING_VCLK      (900)
+#define NAVI14_UMD_PSTATE_PROFILING_DCLK      (800)
+
+#define NAVI12_UMD_PSTATE_PEAK_GFXCLK     (1100)
+
 #define NAVI10_VOLTAGE_SCALE (4)
 
 #define smnPCIE_LC_SPEED_CNTL			0x11140290
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 977bdd9..861e641 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -31,6 +31,9 @@
 #include "renoir_ppt.h"
 
 
+#define CLK_MAP(clk, index) \
+	[SMU_##clk] = {1, (index)}
+
 #define MSG_MAP(msg, index) \
 	[SMU_MSG_##msg] = {1, (index)}
 
@@ -104,6 +107,14 @@ static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] =
 	MSG_MAP(SetHardMinFclkByFreq,           PPSMC_MSG_SetHardMinFclkByFreq),
 };
 
+static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
+	CLK_MAP(GFXCLK, CLOCK_GFXCLK),
+	CLK_MAP(SCLK,	CLOCK_GFXCLK),
+	CLK_MAP(SOCCLK, CLOCK_SOCCLK),
+	CLK_MAP(UCLK, CLOCK_UMCCLK),
+	CLK_MAP(MCLK, CLOCK_UMCCLK),
+};
+
 static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
 	TAB_MAP_VALID(WATERMARKS),
 	TAB_MAP_INVALID(CUSTOM_DPM),
@@ -125,6 +136,21 @@ static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index)
 	return mapping.map_to;
 }
 
+static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index)
+{
+	struct smu_12_0_cmn2aisc_mapping mapping;
+
+	if (index >= SMU_CLK_COUNT)
+		return -EINVAL;
+
+	mapping = renoir_clk_map[index];
+	if (!(mapping.valid_mapping)) {
+		return -EINVAL;
+	}
+
+	return mapping.map_to;
+}
+
 static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
 {
 	struct smu_12_0_cmn2aisc_mapping mapping;
@@ -139,6 +165,30 @@ static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
 	return mapping.map_to;
 }
 
+static int renoir_get_metrics_table(struct smu_context *smu,
+				    SmuMetrics_t *metrics_table)
+{
+	struct smu_table_context *smu_table= &smu->smu_table;
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+	if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
+		ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+				(void *)smu_table->metrics_table, false);
+		if (ret) {
+			pr_info("Failed to export SMU metrics table!\n");
+			mutex_unlock(&smu->metrics_lock);
+			return ret;
+		}
+		smu_table->metrics_time = jiffies;
+	}
+
+	memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+	mutex_unlock(&smu->metrics_lock);
+
+	return ret;
+}
+
 static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
@@ -154,6 +204,15 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
 	if (!smu_table->clocks_table)
 		return -ENOMEM;
 
+	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+	if (!smu_table->metrics_table)
+		return -ENOMEM;
+	smu_table->metrics_time = 0;
+
+	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+	if (!smu_table->watermarks_table)
+		return -ENOMEM;
+
 	return 0;
 }
 
@@ -187,8 +246,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 
 	memset(&metrics, 0, sizeof(metrics));
 
-	ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
-			       (void *)&metrics, false);
+	ret = renoir_get_metrics_table(smu, &metrics);
 	if (ret)
 		return ret;
 
@@ -301,6 +359,51 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 	return ret;
 }
 
+static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+	struct smu_power_context *smu_power = &smu->smu_power;
+	struct smu_power_gate *power_gate = &smu_power->power_gate;
+	int ret = 0;
+
+	if (enable) {
+		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+			if (ret)
+				return ret;
+		}
+		power_gate->jpeg_gated = false;
+	} else {
+		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+			if (ret)
+				return ret;
+		}
+		power_gate->jpeg_gated = true;
+	}
+
+	return ret;
+}
+
+static int renoir_get_current_clk_freq_by_table(struct smu_context *smu,
+				       enum smu_clk_type clk_type,
+				       uint32_t *value)
+{
+	int ret = 0, clk_id = 0;
+	SmuMetrics_t metrics;
+
+	ret = renoir_get_metrics_table(smu, &metrics);
+	if (ret)
+		return ret;
+
+	clk_id = smu_clk_get_index(smu, clk_type);
+	if (clk_id < 0)
+		return clk_id;
+
+	*value = metrics.ClockFrequency[clk_id];
+
+	return ret;
+}
+
 static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
 {
 	int ret = 0, i = 0;
@@ -361,6 +464,50 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
 	return ret;
 }
 
+static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value)
+{
+	int ret = 0;
+	SmuMetrics_t metrics;
+
+	if (!value)
+		return -EINVAL;
+
+	ret = renoir_get_metrics_table(smu, &metrics);
+	if (ret)
+		return ret;
+
+	*value = (metrics.GfxTemperature / 100) *
+		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	return 0;
+}
+
+static int renoir_get_current_activity_percent(struct smu_context *smu,
+					       enum amd_pp_sensors sensor,
+					       uint32_t *value)
+{
+	int ret = 0;
+	SmuMetrics_t metrics;
+
+	if (!value)
+		return -EINVAL;
+
+	ret = renoir_get_metrics_table(smu, &metrics);
+	if (ret)
+		return ret;
+
+	switch (sensor) {
+	case AMDGPU_PP_SENSOR_GPU_LOAD:
+		*value = metrics.AverageGfxActivity / 100;
+		break;
+	default:
+		pr_err("Invalid sensor for retrieving clock activity\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
 {
 
@@ -565,19 +712,43 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
 	return ret;
 }
 
-static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+static int renoir_set_performance_level(struct smu_context *smu,
+					enum amd_dpm_forced_level level)
 {
 	int ret = 0;
+	uint32_t sclk_mask, mclk_mask, soc_mask;
 
 	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		ret = smu_force_dpm_limit_value(smu, true);
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		ret = smu_force_dpm_limit_value(smu, false);
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+		ret = smu_unforce_dpm_levels(smu);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+		ret = smu_get_profiling_clk_mask(smu, level,
+						 &sclk_mask,
+						 &mclk_mask,
+						 &soc_mask);
+		if (ret)
+			return ret;
+		smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+		smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+		smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+		break;
 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
 		ret = renoir_set_peak_clock_by_device(smu);
 		break;
+	case AMD_DPM_FORCED_LEVEL_MANUAL:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
-		ret = -EINVAL;
 		break;
 	}
-
 	return ret;
 }
 
@@ -636,9 +807,17 @@ static int renoir_set_watermarks_table(
 	}
 
 	/* pass data to smu controller */
-	ret = smu_write_watermarks_table(smu);
+	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+		ret = smu_write_watermarks_table(smu);
+		if (ret) {
+			pr_err("Failed to update WMTABLE!");
+			return ret;
+		}
+		smu->watermarks_bitmap |= WATERMARKS_LOADED;
+	}
 
-	return ret;
+	return 0;
 }
 
 static int renoir_get_power_profile_mode(struct smu_context *smu,
@@ -674,8 +853,36 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
 	return size;
 }
 
+static int renoir_read_sensor(struct smu_context *smu,
+				 enum amd_pp_sensors sensor,
+				 void *data, uint32_t *size)
+{
+	int ret = 0;
+
+	if (!data || !size)
+		return -EINVAL;
+
+	mutex_lock(&smu->sensor_lock);
+	switch (sensor) {
+	case AMDGPU_PP_SENSOR_GPU_LOAD:
+		ret = renoir_get_current_activity_percent(smu, sensor, (uint32_t *)data);
+		*size = 4;
+		break;
+	case AMDGPU_PP_SENSOR_GPU_TEMP:
+		ret = renoir_get_gpu_temperature(smu, (uint32_t *)data);
+		*size = 4;
+		break;
+	default:
+		ret = smu_v12_0_read_sensor(smu, sensor, data, size);
+	}
+	mutex_unlock(&smu->sensor_lock);
+
+	return ret;
+}
+
 static const struct pptable_funcs renoir_ppt_funcs = {
 	.get_smu_msg_index = renoir_get_smu_msg_index,
+	.get_smu_clk_index = renoir_get_smu_clk_index,
 	.get_smu_table_index = renoir_get_smu_table_index,
 	.tables_init = renoir_tables_init,
 	.set_power_state = NULL,
@@ -683,6 +890,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 	.print_clk_levels = renoir_print_clk_levels,
 	.get_current_power_state = renoir_get_current_power_state,
 	.dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+	.dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable,
+	.get_current_clk_freq_by_table = renoir_get_current_clk_freq_by_table,
 	.force_dpm_limit_value = renoir_force_dpm_limit_value,
 	.unforce_dpm_levels = renoir_unforce_dpm_levels,
 	.get_workload_type = renoir_get_workload_type,
@@ -693,10 +902,12 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 	.get_dpm_clock_table = renoir_get_dpm_clock_table,
 	.set_watermarks_table = renoir_set_watermarks_table,
 	.get_power_profile_mode = renoir_get_power_profile_mode,
+	.read_sensor = renoir_read_sensor,
 	.check_fw_status = smu_v12_0_check_fw_status,
 	.check_fw_version = smu_v12_0_check_fw_version,
 	.powergate_sdma = smu_v12_0_powergate_sdma,
 	.powergate_vcn = smu_v12_0_powergate_vcn,
+	.powergate_jpeg = smu_v12_0_powergate_jpeg,
 	.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
 	.read_smc_arg = smu_v12_0_read_arg,
 	.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
@@ -704,9 +915,12 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 	.init_smc_tables = smu_v12_0_init_smc_tables,
 	.fini_smc_tables = smu_v12_0_fini_smc_tables,
 	.populate_smc_tables = smu_v12_0_populate_smc_tables,
+	.get_enabled_mask = smu_v12_0_get_enabled_mask,
+	.get_current_clk_freq = smu_v12_0_get_current_clk_freq,
 	.get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
 	.mode2_reset = smu_v12_0_mode2_reset,
 	.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
+	.set_driver_table_location = smu_v12_0_set_driver_table_location,
 };
 
 void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 8872f8b..7bd200f 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -42,6 +42,8 @@
 	((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
 #define smu_powergate_vcn(smu, gate) \
 	((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
+#define smu_powergate_jpeg(smu, gate) \
+	((smu)->ppt_funcs->powergate_jpeg ? (smu)->ppt_funcs->powergate_jpeg((smu), (gate)) : 0)
 
 #define smu_get_vbios_bootup_values(smu) \
 	((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
@@ -59,6 +61,8 @@
 	((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
 #define smu_set_min_dcef_deep_sleep(smu) \
 	((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_driver_table_location(smu) \
+	((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0)
 #define smu_set_tool_table_location(smu) \
 	((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
 #define smu_notify_memory_pool_location(smu) \
@@ -127,8 +131,8 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
 	((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
 #define smu_apply_clocks_adjust_rules(smu) \
 	((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
-#define smu_notify_smc_dispaly_config(smu) \
-	((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_notify_smc_display_config(smu) \
+	((smu)->ppt_funcs->notify_smc_display_config ? (smu)->ppt_funcs->notify_smc_display_config((smu)) : 0)
 #define smu_force_dpm_limit_value(smu, highest) \
 	((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
 #define smu_unforce_dpm_levels(smu) \
@@ -170,6 +174,8 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
 	((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
 #define smu_dpm_set_vce_enable(smu, enable) \
 	((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+#define smu_dpm_set_jpeg_enable(smu, enable) \
+	((smu)->ppt_funcs->dpm_set_jpeg_enable ? (smu)->ppt_funcs->dpm_set_jpeg_enable((smu), (enable)) : 0)
 
 #define smu_set_watermarks_table(smu, tab, clock_ranges) \
 	((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
@@ -201,4 +207,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
 #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
 		((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
 
+#define smu_disable_umc_cdr_12gbps_workaround(smu) \
+	((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index e4268a6..0dc4947 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -37,6 +37,7 @@
 #include "soc15_common.h"
 #include "atom.h"
 #include "amd_pcie.h"
+#include "amdgpu_ras.h"
 
 #include "asic_reg/thm/thm_11_0_2_offset.h"
 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -79,15 +80,13 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
 	for (i = 0; i < timeout; i++) {
 		cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
 		if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
-			break;
+			return cur_value == 0x1 ? 0 : -EIO;
+
 		udelay(1);
 	}
 
 	/* timeout means wrong logic */
-	if (i == timeout)
-		return -ETIME;
-
-	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+	return -ETIME;
 }
 
 int
@@ -103,9 +102,11 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
 		return index;
 
 	ret = smu_v11_0_wait_for_response(smu);
-	if (ret)
-		pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
-		       smu_get_message_name(smu, msg), index, param, ret);
+	if (ret) {
+		pr_err("Msg issuing pre-check failed and "
+		       "SMU may be not in the right state!\n");
+		return ret;
+	}
 
 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
@@ -449,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 
 	kfree(smu_table->tables);
 	kfree(smu_table->metrics_table);
+	kfree(smu_table->watermarks_table);
 	smu_table->tables = NULL;
 	smu_table->metrics_table = NULL;
+	smu_table->watermarks_table = NULL;
 	smu_table->metrics_time = 0;
 
 	ret = smu_v11_0_fini_dpm_context(smu);
@@ -773,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
 	return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
 }
 
+int smu_v11_0_set_driver_table_location(struct smu_context *smu)
+{
+	struct smu_table *driver_table = &smu->smu_table.driver_table;
+	int ret = 0;
+
+	if (driver_table->mc_address) {
+		ret = smu_send_smc_msg_with_param(smu,
+				SMU_MSG_SetDriverDramAddrHigh,
+				upper_32_bits(driver_table->mc_address));
+		if (!ret)
+			ret = smu_send_smc_msg_with_param(smu,
+				SMU_MSG_SetDriverDramAddrLow,
+				lower_32_bits(driver_table->mc_address));
+	}
+
+	return ret;
+}
+
 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
 {
 	int ret = 0;
@@ -834,27 +855,33 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
 				      uint32_t *feature_mask, uint32_t num)
 {
 	uint32_t feature_mask_high = 0, feature_mask_low = 0;
+	struct smu_feature *feature = &smu->smu_feature;
 	int ret = 0;
 
 	if (!feature_mask || num < 2)
 		return -EINVAL;
 
-	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
-	if (ret)
-		return ret;
-	ret = smu_read_smc_arg(smu, &feature_mask_high);
-	if (ret)
-		return ret;
+	if (bitmap_empty(feature->enabled, feature->feature_num)) {
+		ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+		if (ret)
+			return ret;
+		ret = smu_read_smc_arg(smu, &feature_mask_high);
+		if (ret)
+			return ret;
 
-	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
-	if (ret)
-		return ret;
-	ret = smu_read_smc_arg(smu, &feature_mask_low);
-	if (ret)
-		return ret;
+		ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+		if (ret)
+			return ret;
+		ret = smu_read_smc_arg(smu, &feature_mask_low);
+		if (ret)
+			return ret;
 
-	feature_mask[0] = feature_mask_low;
-	feature_mask[1] = feature_mask_high;
+		feature_mask[0] = feature_mask_low;
+		feature_mask[1] = feature_mask_high;
+	} else {
+		bitmap_copy((unsigned long *)feature_mask, feature->enabled,
+			     feature->feature_num);
+	}
 
 	return ret;
 }
@@ -866,21 +893,24 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
 	uint32_t feature_mask[2];
 	int ret = 0;
 
-	if (smu->pm_enabled) {
-		ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
-					     SMU_MSG_DisableAllSmuFeatures));
-		if (ret)
-			return ret;
-	}
-
-	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+	ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+				     SMU_MSG_DisableAllSmuFeatures));
 	if (ret)
 		return ret;
 
-	bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
-		    feature->feature_num);
-	bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
-		    feature->feature_num);
+	if (en) {
+		ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+		if (ret)
+			return ret;
+
+		bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+			    feature->feature_num);
+		bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+			    feature->feature_num);
+	} else {
+		bitmap_zero(feature->enabled, feature->feature_num);
+		bitmap_zero(feature->supported, feature->feature_num);
+	}
 
 	return ret;
 }
@@ -1124,11 +1154,12 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
 	int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
 	int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
 	uint32_t val;
+	struct smu_table_context *table_context = &smu->smu_table;
+	struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
 
 	low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
 			range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
-	high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
-			range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+	high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, powerplay_table->software_shutdown_temp);
 
 	if (low > high)
 		return -EINVAL;
@@ -1617,7 +1648,9 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
 	if (!baco_support)
 		return false;
 
-	if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+	/* Arcturus does not support this bit mask */
+	if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+	   !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
 		return false;
 
 	val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -1643,6 +1676,10 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
 {
 
 	struct smu_baco_context *smu_baco = &smu->smu_baco;
+	struct amdgpu_device *adev = smu->adev;
+	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+	uint32_t bif_doorbell_intr_cntl;
+	uint32_t data;
 	int ret = 0;
 
 	if (smu_v11_0_baco_get_state(smu) == state)
@@ -1650,10 +1687,37 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
 
 	mutex_lock(&smu_baco->mutex);
 
-	if (state == SMU_BACO_STATE_ENTER)
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
-	else
+	bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+
+	if (state == SMU_BACO_STATE_ENTER) {
+		bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+						BIF_DOORBELL_INT_CNTL,
+						DOORBELL_INTERRUPT_DISABLE, 1);
+		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+		if (!ras || !ras->supported) {
+			data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+			data |= 0x80000000;
+			WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+		} else {
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+		}
+	} else {
 		ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+		if (ret)
+			goto out;
+
+		bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+						BIF_DOORBELL_INT_CNTL,
+						DOORBELL_INTERRUPT_DISABLE, 0);
+		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+		/* clear vbios scratch 6 and 7 for coming asic reinit */
+		WREG32(adev->bios_scratch_reg_offset + 6, 0);
+		WREG32(adev->bios_scratch_reg_offset + 7, 0);
+	}
 	if (ret)
 		goto out;
 
@@ -1663,13 +1727,17 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
 	return ret;
 }
 
-int smu_v11_0_baco_reset(struct smu_context *smu)
+int smu_v11_0_baco_enter(struct smu_context *smu)
 {
+	struct amdgpu_device *adev = smu->adev;
 	int ret = 0;
 
-	ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
-	if (ret)
-		return ret;
+	/* Arcturus does not need this audio workaround */
+	if (adev->asic_type != CHIP_ARCTURUS) {
+		ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
+		if (ret)
+			return ret;
+	}
 
 	ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
 	if (ret)
@@ -1677,6 +1745,13 @@ int smu_v11_0_baco_reset(struct smu_context *smu)
 
 	msleep(10);
 
+	return ret;
+}
+
+int smu_v11_0_baco_exit(struct smu_context *smu)
+{
+	int ret = 0;
+
 	ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
 	if (ret)
 		return ret;
@@ -1807,6 +1882,12 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
 			pr_err("Failed to export overdrive table!\n");
 			return ret;
 		}
+		if (!table_context->boot_overdrive_table) {
+			table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL);
+			if (!table_context->boot_overdrive_table) {
+				return -ENOMEM;
+			}
+		}
 	}
 	ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
 	if (ret) {
@@ -1815,3 +1896,42 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
 	}
 	return ret;
 }
+
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+				    enum amd_dpm_forced_level level)
+{
+	int ret = 0;
+	uint32_t sclk_mask, mclk_mask, soc_mask;
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		ret = smu_force_dpm_limit_value(smu, true);
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		ret = smu_force_dpm_limit_value(smu, false);
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+		ret = smu_unforce_dpm_levels(smu);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		ret = smu_get_profiling_clk_mask(smu, level,
+						 &sclk_mask,
+						 &mclk_mask,
+						 &soc_mask);
+		if (ret)
+			return ret;
+		smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+		smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+		smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+		break;
+	case AMD_DPM_FORCED_LEVEL_MANUAL:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+	default:
+		break;
+	}
+	return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 094cfc4..870e6db 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -66,15 +66,13 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
 	for (i = 0; i < adev->usec_timeout; i++) {
 		cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
 		if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
-			break;
+			return cur_value == 0x1 ? 0 : -EIO;
+
 		udelay(1);
 	}
 
 	/* timeout means wrong logic */
-	if (i == adev->usec_timeout)
-		return -ETIME;
-
-	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+	return -ETIME;
 }
 
 int
@@ -90,9 +88,11 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
 		return index;
 
 	ret = smu_v12_0_wait_for_response(smu);
-	if (ret)
-		pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
-		       index, ret, param);
+	if (ret) {
+		pr_err("Msg issuing pre-check failed and "
+		       "SMU may be not in the right state!\n");
+		return ret;
+	}
 
 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
@@ -159,7 +159,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
 
 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
 {
-	if (!(smu->adev->flags & AMD_IS_APU))
+	if (!smu->is_apu)
 		return 0;
 
 	if (gate)
@@ -170,7 +170,7 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
 
 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
 {
-	if (!(smu->adev->flags & AMD_IS_APU))
+	if (!smu->is_apu)
 		return 0;
 
 	if (gate)
@@ -179,6 +179,17 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
 		return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
 }
 
+int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
+{
+	if (!smu->is_apu)
+		return 0;
+
+	if (gate)
+		return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+	else
+		return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+}
+
 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
 {
 	if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
@@ -188,6 +199,39 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
 		SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
 }
 
+int smu_v12_0_read_sensor(struct smu_context *smu,
+				 enum amd_pp_sensors sensor,
+				 void *data, uint32_t *size)
+{
+	int ret = 0;
+
+	if(!data || !size)
+		return -EINVAL;
+
+	switch (sensor) {
+	case AMDGPU_PP_SENSOR_GFX_MCLK:
+		ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
+		*size = 4;
+		break;
+	case AMDGPU_PP_SENSOR_GFX_SCLK:
+		ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
+		*size = 4;
+		break;
+	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
+		*(uint32_t *)data = 0;
+		*size = 4;
+		break;
+	default:
+		ret = smu_common_read_sensor(smu, sensor, data, size);
+		break;
+	}
+
+	if (ret)
+		*size = 0;
+
+	return ret;
+}
+
 /**
  * smu_v12_0_get_gfxoff_status - get gfxoff status
  *
@@ -274,18 +318,59 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
 int smu_v12_0_populate_smc_tables(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct smu_table *table = NULL;
-
-	table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
-	if (!table)
-		return -EINVAL;
-
-	if (!table->cpu_addr)
-		return -EINVAL;
 
 	return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
 }
 
+int smu_v12_0_get_enabled_mask(struct smu_context *smu,
+				      uint32_t *feature_mask, uint32_t num)
+{
+	uint32_t feature_mask_high = 0, feature_mask_low = 0;
+	int ret = 0;
+
+	if (!feature_mask || num < 2)
+		return -EINVAL;
+
+	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+	if (ret)
+		return ret;
+	ret = smu_read_smc_arg(smu, &feature_mask_high);
+	if (ret)
+		return ret;
+
+	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+	if (ret)
+		return ret;
+	ret = smu_read_smc_arg(smu, &feature_mask_low);
+	if (ret)
+		return ret;
+
+	feature_mask[0] = feature_mask_low;
+	feature_mask[1] = feature_mask_high;
+
+	return ret;
+}
+
+int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
+					  enum smu_clk_type clk_id,
+					  uint32_t *value)
+{
+	int ret = 0;
+	uint32_t freq = 0;
+
+	if (clk_id >= SMU_CLK_COUNT || !value)
+		return -EINVAL;
+
+	ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
+	if (ret)
+		return ret;
+
+	freq *= 100;
+	*value = freq;
+
+	return ret;
+}
+
 int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
 						 uint32_t *min, uint32_t *max)
 {
@@ -421,3 +506,21 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 
 	return ret;
 }
+
+int smu_v12_0_set_driver_table_location(struct smu_context *smu)
+{
+	struct smu_table *driver_table = &smu->smu_table.driver_table;
+	int ret = 0;
+
+	if (driver_table->mc_address) {
+		ret = smu_send_smc_msg_with_param(smu,
+				SMU_MSG_SetDriverDramAddrHigh,
+				upper_32_bits(driver_table->mc_address));
+		if (!ret)
+			ret = smu_send_smc_msg_with_param(smu,
+				SMU_MSG_SetDriverDramAddrLow,
+				lower_32_bits(driver_table->mc_address));
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 15590fd..868e2d5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -653,8 +653,8 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
 {
 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-	uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-	uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	uint16_t HiSidd;
+	uint16_t LoSidd;
 	struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
 
 	HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index da025b1..32ebb38 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -940,7 +940,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
 {
 	int result;
 	/* PP_Clocks minClocks; */
-	uint32_t threshold, mvdd;
+	uint32_t mvdd;
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -973,8 +973,6 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
 	level->VoltageDownHyst = 0;
 	level->PowerThrottle = 0;
 
-	threshold = clock * data->fast_watermark_threshold / 100;
-
 	data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
@@ -1501,7 +1499,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
 	uint32_t dram_timing;
 	uint32_t dram_timing2;
 	uint32_t burstTime;
-	ULONG state, trrds, trrdl;
+	ULONG trrds, trrdl;
 	int result;
 
 	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
@@ -1513,7 +1511,6 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
 	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
 	burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
 
-	state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
 	trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
 	trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index aa0ee2b..2319400 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
 			priv->smu_tables.entry[table_id].table_id);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
 			priv->smu_tables.entry[table_id].size);
@@ -150,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 {
 	struct smu10_smumgr *priv =
 			(struct smu10_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
 			"Invalid SMU Table ID!", return -EINVAL;);
@@ -161,6 +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 	memcpy(priv->smu_tables.entry[table_id].table, table,
 			priv->smu_tables.entry[table_id].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	smu10_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
index 742b3dc..adfbcbe 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -61,15 +61,29 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
 	uint32_t reg;
 	uint32_t ret;
 
-	reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+	/* Due to the L1 policy problem under SRIOV, we have to use
+	 * mmMP1_SMN_C2PMSG_103 as the driver response register
+	 */
+	if (hwmgr->pp_one_vf) {
+		reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103);
 
-	ret = phm_wait_for_register_unequal(hwmgr, reg,
-			0, MP1_C2PMSG_90__CONTENT_MASK);
+		ret = phm_wait_for_register_unequal(hwmgr, reg,
+				0, MP1_C2PMSG_103__CONTENT_MASK);
 
-	if (ret)
-		pr_err("No response from smu\n");
+		if (ret)
+			pr_err("No response from smu\n");
 
-	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+		return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103);
+	} else {
+		reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+
+		ret = phm_wait_for_register_unequal(hwmgr, reg,
+				0, MP1_C2PMSG_90__CONTENT_MASK);
+
+		if (ret)
+			pr_err("No response from smu\n");
+		return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+	}
 }
 
 /*
@@ -83,7 +97,11 @@ static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
 {
 	struct amdgpu_device *adev = hwmgr->adev;
 
-	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+	if (hwmgr->pp_one_vf) {
+		WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_101, msg);
+	} else {
+		WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+	}
 
 	return 0;
 }
@@ -101,7 +119,10 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 
 	smu9_wait_for_response(hwmgr);
 
-	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+	if (hwmgr->pp_one_vf)
+		WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
+	else
+		WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
 	smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
 
@@ -127,9 +148,17 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 
 	smu9_wait_for_response(hwmgr);
 
-	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
-	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+	/* Due to the L1 policy problem under SRIOV, we have to use
+	 * mmMP1_SMN_C2PMSG_101 as the driver message register and
+	 * mmMP1_SMN_C2PMSG_102 as the driver parameter register.
+	 */
+	if (hwmgr->pp_one_vf) {
+		WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
+		WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter);
+	} else {
+		WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+		WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+	}
 
 	smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
 
@@ -144,5 +173,8 @@ uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
 
-	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+	if (hwmgr->pp_one_vf)
+		return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102);
+	else
+		return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 0f3836f..7155640 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
 			priv->smu_tables.entry[table_id].table_id);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, priv->smu_tables.entry[table_id].table,
 			priv->smu_tables.entry[table_id].size);
@@ -70,6 +70,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 		uint8_t *table, int16_t table_id)
 {
 	struct vega10_smumgr *priv = hwmgr->smu_backend;
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	/* under sriov, vbios or hypervisor driver
+	 * has already copy table to smc so here only skip it
+	 */
+	if (!hwmgr->not_vf)
+		return 0;
 
 	PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
 			"Invalid SMU Table ID!", return -EINVAL);
@@ -81,6 +88,8 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 	memcpy(priv->smu_tables.entry[table_id].table, table,
 			priv->smu_tables.entry[table_id].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	smu9_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
@@ -100,6 +109,14 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
 	int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
 			PPSMC_MSG_DisableSmuFeatures;
 
+	/* VF has no permission to change smu feature due
+	 * to security concern even under pp one vf mode
+	 * it still can't do it. For vega10, the smu in
+	 * vbios will enable the appropriate features.
+	 * */
+	if (!hwmgr->not_vf)
+		return 0;
+
 	return smum_send_msg_to_smc_with_parameter(hwmgr,
 			msg, feature_mask);
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 90c782c..275dbf6 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
 			return -EINVAL);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, priv->smu_tables.entry[table_id].table,
 			priv->smu_tables.entry[table_id].size);
@@ -84,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 {
 	struct vega12_smumgr *priv =
 			(struct vega12_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
 			"Invalid SMU Table ID!", return -EINVAL);
@@ -95,6 +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 	memcpy(priv->smu_tables.entry[table_id].table, table,
 			priv->smu_tables.entry[table_id].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
@@ -125,20 +128,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
 	if (enable) {
 		PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
-				"[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+				"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
 				return -EINVAL);
 		PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
-				"[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+				"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
 				return -EINVAL);
 	} else {
 		PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
-				"[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+				"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
 				return -EINVAL);
 		PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
-				"[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+				"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
 				return -EINVAL);
 	}
 
@@ -155,13 +158,13 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
 
 	PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
-			"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+			"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
 			return -EINVAL);
 	smc_features_low = smu9_get_argument(hwmgr);
 
 	PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
-			"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+			"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
 			return -EINVAL);
 	smc_features_high = smu9_get_argument(hwmgr);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index f604612..49e5ef3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
 			return ret);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, priv->smu_tables.entry[table_id].table,
 			priv->smu_tables.entry[table_id].size);
@@ -207,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 {
 	struct vega20_smumgr *priv =
 			(struct vega20_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 	int ret = 0;
 
 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
@@ -219,6 +220,8 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 	memcpy(priv->smu_tables.entry[table_id].table, table,
 			priv->smu_tables.entry[table_id].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
@@ -242,11 +245,14 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
 {
 	struct vega20_smumgr *priv =
 			(struct vega20_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 	int ret = 0;
 
 	memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
@@ -290,7 +296,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
 			return ret);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
@@ -310,20 +316,20 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
 	if (enable) {
 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
-				"[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+				"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
 				return ret);
 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
-				"[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+				"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
 				return ret);
 	} else {
 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
-				"[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+				"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
 				return ret);
 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
-				"[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+				"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
 				return ret);
 	}
 
@@ -341,12 +347,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
 
 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
-			"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+			"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
 			return ret);
 	smc_features_low = vega20_get_argument(hwmgr);
 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
-			"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+			"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
 			return ret);
 	smc_features_high = vega20_get_argument(hwmgr);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index ae18fbc..b0e0d67 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -1114,7 +1114,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
 	SMIO_Pattern vol_level;
 	uint32_t mvdd;
-	uint16_t us_mvdd;
 
 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
 
@@ -1168,17 +1167,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
 			"in Clock Dependency Table",
 			);
 
-	us_mvdd = 0;
-	if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
-			(data->mclk_dpm_key_disabled))
-		us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
-	else {
-		if (!vegam_populate_mvdd_value(hwmgr,
-				data->dpm_table.mclk_table.dpm_levels[0].value,
-				&vol_level))
-			us_mvdd = vol_level.Voltage;
-	}
-
 	if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level))
 		table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
 	else
@@ -1383,11 +1371,16 @@ static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
 	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
 			data->vbios_boot_state.sclk_bootup_value,
 			(uint32_t *)&(table->GraphicsBootLevel));
+	if (result)
+		return result;
 
 	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
 			data->vbios_boot_state.mclk_bootup_value,
 			(uint32_t *)&(table->MemoryBootLevel));
 
+	if (result)
+		return result;
+
 	table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
 			VOLTAGE_SCALE;
 	table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
@@ -1493,7 +1486,7 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 	struct vegam_smumgr *smu_data =
 			(struct vegam_smumgr *)(hwmgr->smu_backend);
 
-	uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+	uint8_t i, stretch_amount, volt_offset = 0;
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1532,11 +1525,9 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 			(table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ?
 			table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5;
 	/* Populate CKS Lookup Table */
-	if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-		stretch_amount2 = 0;
-	else if (stretch_amount == 3 || stretch_amount == 4)
-		stretch_amount2 = 1;
-	else {
+	if (!(stretch_amount == 1 || stretch_amount == 2 ||
+	      stretch_amount == 5 || stretch_amount == 3 ||
+	      stretch_amount == 4)) {
 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 				PHM_PlatformCaps_ClockStretcher);
 		PP_ASSERT_WITH_CODE(false,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 0d3a3b0a..4ad8d6c 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -338,6 +338,10 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
 		return -ENOMEM;
 	smu_table->metrics_time = 0;
 
+	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+	if (!smu_table->watermarks_table)
+		return -ENOMEM;
+
 	return 0;
 }
 
@@ -1702,22 +1706,11 @@ static int vega20_set_default_od_settings(struct smu_context *smu,
 	struct smu_table_context *table_context = &smu->smu_table;
 	int ret;
 
+	ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+	if (ret)
+		return ret;
+
 	if (initialize) {
-		if (table_context->overdrive_table)
-			return -EINVAL;
-
-		table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
-
-		if (!table_context->overdrive_table)
-			return -ENOMEM;
-
-		ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
-				       table_context->overdrive_table, false);
-		if (ret) {
-			pr_err("Failed to export over drive table!\n");
-			return ret;
-		}
-
 		ret = vega20_set_default_od8_setttings(smu);
 		if (ret)
 			return ret;
@@ -2235,7 +2228,7 @@ static int vega20_apply_clocks_adjust_rules(struct smu_context *smu)
 }
 
 static int
-vega20_notify_smc_dispaly_config(struct smu_context *smu)
+vega20_notify_smc_display_config(struct smu_context *smu)
 {
 	struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
 	struct vega20_single_dpm_table *memtable = &dpm_table->mem_table;
@@ -2774,12 +2767,11 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
 		break;
 
 	case PP_OD_RESTORE_DEFAULT_TABLE:
-		ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
-		if (ret) {
-			pr_err("Failed to export over drive table!\n");
-			return ret;
+		if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+			pr_err("Overdrive table was not initialized!\n");
+			return -EINVAL;
 		}
-
+		memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
 		break;
 
 	case PP_OD_COMMIT_DPM_TABLE:
@@ -3194,6 +3186,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.get_od_percentage = vega20_get_od_percentage,
 	.get_power_profile_mode = vega20_get_power_profile_mode,
 	.set_power_profile_mode = vega20_set_power_profile_mode,
+	.set_performance_level = smu_v11_0_set_performance_level,
 	.set_od_percentage = vega20_set_od_percentage,
 	.set_default_od_settings = vega20_set_default_od_settings,
 	.od_edit_dpm_table = vega20_odn_edit_dpm_table,
@@ -3203,7 +3196,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.pre_display_config_changed = vega20_pre_display_config_changed,
 	.display_config_changed = vega20_display_config_changed,
 	.apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
-	.notify_smc_dispaly_config = vega20_notify_smc_dispaly_config,
+	.notify_smc_display_config = vega20_notify_smc_display_config,
 	.force_dpm_limit_value = vega20_force_dpm_limit_value,
 	.unforce_dpm_levels = vega20_unforce_dpm_levels,
 	.get_profiling_clk_mask = vega20_get_profiling_clk_mask,
@@ -3231,6 +3224,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+	.set_driver_table_location = smu_v11_0_set_driver_table_location,
 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
@@ -3259,7 +3253,8 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.baco_is_support= smu_v11_0_baco_is_support,
 	.baco_get_state = smu_v11_0_baco_get_state,
 	.baco_set_state = smu_v11_0_baco_set_state,
-	.baco_reset = smu_v11_0_baco_reset,
+	.baco_enter = smu_v11_0_baco_enter,
+	.baco_exit = smu_v11_0_baco_exit,
 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
 	.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index dfaddbb..8ae1e1f 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -20,9 +20,10 @@
 
 #define ENCODE_PGU_XY(x, y)	((((x) - 1) << 16) | ((y) - 1))
 
-static struct simplefb_format supported_formats[] = {
-	{ "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 },
-	{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 },
+static const u32 arc_pgu_supported_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
 };
 
 static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
@@ -30,22 +31,24 @@ static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
 	struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
 	const struct drm_framebuffer *fb = crtc->primary->state->fb;
 	uint32_t pixel_format = fb->format->format;
-	struct simplefb_format *format = NULL;
+	u32 format = DRM_FORMAT_INVALID;
 	int i;
+	u32 reg_ctrl;
 
-	for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
-		if (supported_formats[i].fourcc == pixel_format)
-			format = &supported_formats[i];
+	for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) {
+		if (arc_pgu_supported_formats[i] == pixel_format)
+			format = arc_pgu_supported_formats[i];
 	}
 
-	if (WARN_ON(!format))
+	if (WARN_ON(format == DRM_FORMAT_INVALID))
 		return;
 
-	if (format->fourcc == DRM_FORMAT_RGB888)
-		arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
-			      arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
-					   ARCPGU_MODE_RGB888_MASK);
-
+	reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
+	if (format == DRM_FORMAT_RGB565)
+		reg_ctrl &= ~ARCPGU_MODE_XRGB8888;
+	else
+		reg_ctrl |= ARCPGU_MODE_XRGB8888;
+	arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl);
 }
 
 static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
@@ -193,18 +196,15 @@ static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
 {
 	struct arcpgu_drm_private *arcpgu = drm->dev_private;
 	struct drm_plane *plane = NULL;
-	u32 formats[ARRAY_SIZE(supported_formats)], i;
 	int ret;
 
 	plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
 	if (!plane)
 		return ERR_PTR(-ENOMEM);
 
-	for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
-		formats[i] = supported_formats[i].fourcc;
-
 	ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
-				       formats, ARRAY_SIZE(formats),
+				       arc_pgu_supported_formats,
+				       ARRAY_SIZE(arc_pgu_supported_formats),
 				       NULL,
 				       DRM_PLANE_TYPE_PRIMARY, NULL);
 	if (ret)
diff --git a/drivers/gpu/drm/arc/arcpgu_regs.h b/drivers/gpu/drm/arc/arcpgu_regs.h
index dab2c38..b689a38 100644
--- a/drivers/gpu/drm/arc/arcpgu_regs.h
+++ b/drivers/gpu/drm/arc/arcpgu_regs.h
@@ -25,7 +25,7 @@
 #define ARCPGU_CTRL_VS_POL_OFST	0x3
 #define ARCPGU_CTRL_HS_POL_MASK	0x1
 #define ARCPGU_CTRL_HS_POL_OFST	0x4
-#define ARCPGU_MODE_RGB888_MASK	0x04
+#define ARCPGU_MODE_XRGB8888	BIT(2)
 #define ARCPGU_STAT_BUSY_MASK	0x02
 
 #endif
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index e87ff86..cec0639 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -12,9 +12,3 @@
 	  Processor driver. It supports the D71 variants of the hardware.
 
 	  If compiled as a module it will be called komeda.
-
-config DRM_KOMEDA_ERROR_PRINT
-	bool "Enable komeda error print"
-	depends on DRM_KOMEDA
-	help
-	  Choose this option to enable error printing.
diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h
index 1053b11..16a8a2c 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_product.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_product.h
@@ -18,7 +18,8 @@
 #define MALIDP_CORE_ID_STATUS(__core_id)     (((__u32)(__core_id)) & 0xFF)
 
 /* Mali-display product IDs */
-#define MALIDP_D71_PRODUCT_ID   0x0071
+#define MALIDP_D71_PRODUCT_ID	0x0071
+#define MALIDP_D32_PRODUCT_ID	0x0032
 
 union komeda_config_id {
 	struct {
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index f095a1c..1931a7f 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -16,12 +16,11 @@
 	komeda_crtc.o \
 	komeda_plane.o \
 	komeda_wb_connector.o \
-	komeda_private_obj.o
+	komeda_private_obj.o \
+	komeda_event.o
 
 komeda-y += \
 	d71/d71_dev.o \
 	d71/d71_component.o
 
-komeda-$(CONFIG_DRM_KOMEDA_ERROR_PRINT) += komeda_event.o
-
 obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index f0ba26e..8a02ade 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -1044,7 +1044,9 @@ static int d71_merger_init(struct d71_dev *d71,
 static void d71_improc_update(struct komeda_component *c,
 			      struct komeda_component_state *state)
 {
+	struct drm_crtc_state *crtc_st = state->crtc->state;
 	struct komeda_improc_state *st = to_improc_st(state);
+	struct d71_pipeline *pipe = to_d71_pipeline(c->pipeline);
 	u32 __iomem *reg = c->reg;
 	u32 index, mask = 0, ctrl = 0;
 
@@ -1055,6 +1057,24 @@ static void d71_improc_update(struct komeda_component *c,
 	malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
 	malidp_write32(reg, IPS_DEPTH, st->color_depth);
 
+	if (crtc_st->color_mgmt_changed) {
+		mask |= IPS_CTRL_FT | IPS_CTRL_RGB;
+
+		if (crtc_st->gamma_lut) {
+			malidp_write_group(pipe->dou_ft_coeff_addr, FT_COEFF0,
+					   KOMEDA_N_GAMMA_COEFFS,
+					   st->fgamma_coeffs);
+			ctrl |= IPS_CTRL_FT; /* enable gamma */
+		}
+
+		if (crtc_st->ctm) {
+			malidp_write_group(reg, IPS_RGB_RGB_COEFF0,
+					   KOMEDA_N_CTM_COEFFS,
+					   st->ctm_coeffs);
+			ctrl |= IPS_CTRL_RGB; /* enable gamut */
+		}
+	}
+
 	mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
 
 	/* config color format */
@@ -1250,7 +1270,7 @@ static int d71_timing_ctrlr_init(struct d71_dev *d71,
 
 	ctrlr = to_ctrlr(c);
 
-	ctrlr->supports_dual_link = true;
+	ctrlr->supports_dual_link = d71->supports_dual_link;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index 822b23a..00fa56c 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -20,8 +20,10 @@ static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
 		evts |= KOMEDA_EVENT_IBSY;
 	if (raw_status & LPU_IRQ_EOW)
 		evts |= KOMEDA_EVENT_EOW;
+	if (raw_status & LPU_IRQ_OVR)
+		evts |= KOMEDA_EVENT_OVR;
 
-	if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) {
+	if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) {
 		u32 restore = 0, tbu_status;
 		/* Check error of LPU status */
 		status = malidp_read32(reg, BLK_STATUS);
@@ -45,6 +47,15 @@ static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
 			restore |= LPU_STATUS_ACE3;
 			evts |= KOMEDA_ERR_ACE3;
 		}
+		if (status & LPU_STATUS_FEMPTY) {
+			restore |= LPU_STATUS_FEMPTY;
+			evts |= KOMEDA_EVENT_EMPTY;
+		}
+		if (status & LPU_STATUS_FFULL) {
+			restore |= LPU_STATUS_FFULL;
+			evts |= KOMEDA_EVENT_FULL;
+		}
+
 		if (restore != 0)
 			malidp_write32_mask(reg, BLK_STATUS, restore, 0);
 
@@ -371,23 +382,33 @@ static int d71_enum_resources(struct komeda_dev *mdev)
 		goto err_cleanup;
 	}
 
-	/* probe PERIPH */
+	/* Only the legacy HW has the periph block, the newer merges the periph
+	 * into GCU
+	 */
 	value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
-	if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) {
-		DRM_ERROR("access blk periph but got blk: %d.\n",
-			  BLOCK_INFO_BLK_TYPE(value));
-		err = -EINVAL;
-		goto err_cleanup;
+	if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH)
+		d71->periph_addr = NULL;
+
+	if (d71->periph_addr) {
+		/* probe PERIPHERAL in legacy HW */
+		value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
+
+		d71->max_line_size	= value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
+		d71->max_vsize		= 4096;
+		d71->num_rich_layers	= value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
+		d71->supports_dual_link	= !!(value & PERIPH_SPLIT_EN);
+		d71->integrates_tbu	= !!(value & PERIPH_TBU_EN);
+	} else {
+		value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0);
+		d71->max_line_size	= GCU_MAX_LINE_SIZE(value);
+		d71->max_vsize		= GCU_MAX_NUM_LINES(value);
+
+		value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1);
+		d71->num_rich_layers	= GCU_NUM_RICH_LAYERS(value);
+		d71->supports_dual_link	= GCU_DISPLAY_SPLIT_EN(value);
+		d71->integrates_tbu	= GCU_DISPLAY_TBU_EN(value);
 	}
 
-	value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
-
-	d71->max_line_size	= value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
-	d71->max_vsize		= 4096;
-	d71->num_rich_layers	= value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
-	d71->supports_dual_link	= value & PERIPH_SPLIT_EN ? true : false;
-	d71->integrates_tbu	= value & PERIPH_TBU_EN ? true : false;
-
 	for (i = 0; i < d71->num_pipelines; i++) {
 		pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
 					   &d71_pipeline_funcs);
@@ -414,8 +435,11 @@ static int d71_enum_resources(struct komeda_dev *mdev)
 		d71->pipes[i] = to_d71_pipeline(pipe);
 	}
 
-	/* loop the register blks and probe */
-	i = 2; /* exclude GCU and PERIPH */
+	/* loop the register blks and probe.
+	 * NOTE: d71->num_blocks includes reserved blocks.
+	 * d71->num_blocks = GCU + valid blocks + reserved blocks
+	 */
+	i = 1; /* exclude GCU */
 	offset = D71_BLOCK_SIZE; /* skip GCU */
 	while (i < d71->num_blocks) {
 		blk_base = mdev->reg_base + (offset >> 2);
@@ -425,9 +449,9 @@ static int d71_enum_resources(struct komeda_dev *mdev)
 			err = d71_probe_block(d71, &blk, blk_base);
 			if (err)
 				goto err_cleanup;
-			i++;
 		}
 
+		i++;
 		offset += D71_BLOCK_SIZE;
 	}
 
@@ -594,10 +618,26 @@ static const struct komeda_dev_funcs d71_chip_funcs = {
 const struct komeda_dev_funcs *
 d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
 {
+	const struct komeda_dev_funcs *funcs;
+	u32 product_id;
+
+	chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
+
+	product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id);
+
+	switch (product_id) {
+	case MALIDP_D71_PRODUCT_ID:
+	case MALIDP_D32_PRODUCT_ID:
+		funcs = &d71_chip_funcs;
+		break;
+	default:
+		DRM_ERROR("Unsupported product: 0x%x\n", product_id);
+		return NULL;
+	}
+
 	chip->arch_id	= malidp_read32(reg_base, GLB_ARCH_ID);
-	chip->core_id	= malidp_read32(reg_base, GLB_CORE_ID);
 	chip->core_info	= malidp_read32(reg_base, GLB_CORE_INFO);
 	chip->bus_width	= D71_BUS_WIDTH_16_BYTES;
 
-	return &d71_chip_funcs;
+	return funcs;
 }
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
index 1727dc9..e80172a 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -72,6 +72,19 @@
 #define GCU_CONTROL_MODE(x)	((x) & 0x7)
 #define GCU_CONTROL_SRST	BIT(16)
 
+/* GCU_CONFIGURATION registers */
+#define GCU_CONFIGURATION_ID0	0x100
+#define GCU_CONFIGURATION_ID1	0x104
+
+/* GCU configuration */
+#define GCU_MAX_LINE_SIZE(x)	((x) & 0xFFFF)
+#define GCU_MAX_NUM_LINES(x)	((x) >> 16)
+#define GCU_NUM_RICH_LAYERS(x)	((x) & 0x7)
+#define GCU_NUM_PIPELINES(x)	(((x) >> 3) & 0x7)
+#define GCU_NUM_SCALERS(x)	(((x) >> 6) & 0x7)
+#define GCU_DISPLAY_SPLIT_EN(x)	(((x) >> 16) & 0x1)
+#define GCU_DISPLAY_TBU_EN(x)	(((x) >> 17) & 0x1)
+
 /* GCU opmode */
 #define INACTIVE_MODE		0
 #define TBU_CONNECT_MODE	1
@@ -162,6 +175,7 @@
 #define TBU_DOUTSTDCAPB_MASK	0x3F
 
 /* LPU_IRQ_BITS */
+#define LPU_IRQ_OVR		BIT(9)
 #define LPU_IRQ_IBSY		BIT(10)
 #define LPU_IRQ_ERR		BIT(11)
 #define LPU_IRQ_EOW		BIT(12)
@@ -172,6 +186,8 @@
 #define LPU_STATUS_AXIE		BIT(4)
 #define LPU_STATUS_AXIRP	BIT(5)
 #define LPU_STATUS_AXIWP	BIT(6)
+#define LPU_STATUS_FEMPTY	BIT(11)
+#define LPU_STATUS_FFULL	BIT(14)
 #define LPU_STATUS_ACE0		BIT(16)
 #define LPU_STATUS_ACE1		BIT(17)
 #define LPU_STATUS_ACE2		BIT(18)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
index 9d14a92..d8e449e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
@@ -65,3 +65,69 @@ const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range)
 
 	return coeffs;
 }
+
+struct gamma_curve_sector {
+	u32 boundary_start;
+	u32 num_of_segments;
+	u32 segment_width;
+};
+
+struct gamma_curve_segment {
+	u32 start;
+	u32 end;
+};
+
+static struct gamma_curve_sector sector_tbl[] = {
+	{ 0,    4,  4   },
+	{ 16,   4,  4   },
+	{ 32,   4,  8   },
+	{ 64,   4,  16  },
+	{ 128,  4,  32  },
+	{ 256,  4,  64  },
+	{ 512,  16, 32  },
+	{ 1024, 24, 128 },
+};
+
+static void
+drm_lut_to_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs,
+		  struct gamma_curve_sector *sector_tbl, u32 num_sectors)
+{
+	struct drm_color_lut *lut;
+	u32 i, j, in, num = 0;
+
+	if (!lut_blob)
+		return;
+
+	lut = lut_blob->data;
+
+	for (i = 0; i < num_sectors; i++) {
+		for (j = 0; j < sector_tbl[i].num_of_segments; j++) {
+			in = sector_tbl[i].boundary_start +
+			     j * sector_tbl[i].segment_width;
+
+			coeffs[num++] = drm_color_lut_extract(lut[in].red,
+						KOMEDA_COLOR_PRECISION);
+		}
+	}
+
+	coeffs[num] = BIT(KOMEDA_COLOR_PRECISION);
+}
+
+void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs)
+{
+	drm_lut_to_coeffs(lut_blob, coeffs, sector_tbl, ARRAY_SIZE(sector_tbl));
+}
+
+void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs)
+{
+	struct drm_color_ctm *ctm;
+	u32 i;
+
+	if (!ctm_blob)
+		return;
+
+	ctm = ctm_blob->data;
+
+	for (i = 0; i < KOMEDA_N_CTM_COEFFS; i++)
+		coeffs[i] = drm_color_ctm_s31_32_to_qm_n(ctm->matrix[i], 3, 12);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
index a2df218..2f46684 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
@@ -11,7 +11,15 @@
 #include <drm/drm_color_mgmt.h>
 
 #define KOMEDA_N_YUV2RGB_COEFFS		12
+#define KOMEDA_N_RGB2YUV_COEFFS		12
+#define KOMEDA_COLOR_PRECISION		12
+#define KOMEDA_N_GAMMA_COEFFS		65
+#define KOMEDA_COLOR_LUT_SIZE		BIT(KOMEDA_COLOR_PRECISION)
+#define KOMEDA_N_CTM_COEFFS		9
+
+void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs);
+void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs);
 
 const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range);
 
-#endif
+#endif /*_KOMEDA_COLOR_MGMT_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 2520152..56bd938 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -5,6 +5,7 @@
  *
  */
 #include <linux/clk.h>
+#include <linux/pm_runtime.h>
 #include <linux/spinlock.h>
 
 #include <drm/drm_atomic.h>
@@ -274,6 +275,7 @@ static void
 komeda_crtc_atomic_enable(struct drm_crtc *crtc,
 			  struct drm_crtc_state *old)
 {
+	pm_runtime_get_sync(crtc->dev->dev);
 	komeda_crtc_prepare(to_kcrtc(crtc));
 	drm_crtc_vblank_on(crtc);
 	WARN_ON(drm_crtc_vblank_get(crtc));
@@ -372,6 +374,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
 	drm_crtc_vblank_put(crtc);
 	drm_crtc_vblank_off(crtc);
 	komeda_crtc_unprepare(kcrtc);
+	pm_runtime_put(crtc->dev->dev);
 }
 
 static void
@@ -617,6 +620,8 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
 
 	crtc->port = kcrtc->master->of_output_port;
 
+	drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE);
+
 	return err;
 }
 
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 937a6d4..1d76747 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -10,6 +10,7 @@
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/dma-mapping.h>
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
@@ -27,12 +28,16 @@ static int komeda_register_show(struct seq_file *sf, void *x)
 
 	seq_puts(sf, "\n====== Komeda register dump =========\n");
 
+	pm_runtime_get_sync(mdev->dev);
+
 	if (mdev->funcs->dump_register)
 		mdev->funcs->dump_register(mdev, sf);
 
 	for (i = 0; i < mdev->n_pipelines; i++)
 		komeda_pipeline_dump_register(mdev->pipelines[i], sf);
 
+	pm_runtime_put(mdev->dev);
+
 	return 0;
 }
 
@@ -58,6 +63,8 @@ static void komeda_debugfs_init(struct komeda_dev *mdev)
 	mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
 	debugfs_create_file("register", 0444, mdev->debugfs_root,
 			    mdev, &komeda_register_fops);
+	debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root,
+			   &mdev->err_verbosity);
 }
 #endif
 
@@ -113,22 +120,14 @@ static struct attribute_group komeda_sysfs_attr_group = {
 	.attrs = komeda_sysfs_entries,
 };
 
-static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
+static int komeda_parse_pipe_dt(struct komeda_pipeline *pipe)
 {
-	struct komeda_pipeline *pipe;
+	struct device_node *np = pipe->of_node;
 	struct clk *clk;
-	u32 pipe_id;
-	int ret = 0;
-
-	ret = of_property_read_u32(np, "reg", &pipe_id);
-	if (ret != 0 || pipe_id >= mdev->n_pipelines)
-		return -EINVAL;
-
-	pipe = mdev->pipelines[pipe_id];
 
 	clk = of_clk_get_by_name(np, "pxclk");
 	if (IS_ERR(clk)) {
-		DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe_id);
+		DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe->id);
 		return PTR_ERR(clk);
 	}
 	pipe->pxlclk = clk;
@@ -142,7 +141,6 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
 		of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
 
 	pipe->dual_link = pipe->of_output_links[0] && pipe->of_output_links[1];
-	pipe->of_node = of_node_get(np);
 
 	return 0;
 }
@@ -151,7 +149,9 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct device_node *child, *np = dev->of_node;
-	int ret;
+	struct komeda_pipeline *pipe;
+	u32 pipe_id = U32_MAX;
+	int ret = -1;
 
 	mdev->irq  = platform_get_irq(pdev, 0);
 	if (mdev->irq < 0) {
@@ -166,37 +166,44 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
 	ret = 0;
 
 	for_each_available_child_of_node(np, child) {
-		if (of_node_cmp(child->name, "pipeline") == 0) {
-			ret = komeda_parse_pipe_dt(mdev, child);
-			if (ret) {
-				DRM_ERROR("parse pipeline dt error!\n");
-				of_node_put(child);
-				break;
+		if (of_node_name_eq(child, "pipeline")) {
+			of_property_read_u32(child, "reg", &pipe_id);
+			if (pipe_id >= mdev->n_pipelines) {
+				DRM_WARN("Skip the redundant DT node: pipeline-%u.\n",
+					 pipe_id);
+				continue;
 			}
+			mdev->pipelines[pipe_id]->of_node = of_node_get(child);
 		}
 	}
 
-	return ret;
+	for (pipe_id = 0; pipe_id < mdev->n_pipelines; pipe_id++) {
+		pipe = mdev->pipelines[pipe_id];
+
+		if (!pipe->of_node) {
+			DRM_ERROR("Pipeline-%d doesn't have a DT node.\n",
+				  pipe->id);
+			return -EINVAL;
+		}
+		ret = komeda_parse_pipe_dt(pipe);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 struct komeda_dev *komeda_dev_create(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	const struct komeda_product_data *product;
+	komeda_identify_func komeda_identify;
 	struct komeda_dev *mdev;
-	struct resource *io_res;
 	int err = 0;
 
-	product = of_device_get_match_data(dev);
-	if (!product)
+	komeda_identify = of_device_get_match_data(dev);
+	if (!komeda_identify)
 		return ERR_PTR(-ENODEV);
 
-	io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!io_res) {
-		DRM_ERROR("No registers defined.\n");
-		return ERR_PTR(-ENODEV);
-	}
-
 	mdev = devm_kzalloc(dev, sizeof(*mdev), GFP_KERNEL);
 	if (!mdev)
 		return ERR_PTR(-ENOMEM);
@@ -204,7 +211,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
 	mutex_init(&mdev->lock);
 
 	mdev->dev = dev;
-	mdev->reg_base = devm_ioremap_resource(dev, io_res);
+	mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(mdev->reg_base)) {
 		DRM_ERROR("Map register space failed.\n");
 		err = PTR_ERR(mdev->reg_base);
@@ -222,11 +229,9 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
 
 	clk_prepare_enable(mdev->aclk);
 
-	mdev->funcs = product->identify(mdev->reg_base, &mdev->chip);
-	if (!komeda_product_match(mdev, product->product_id)) {
-		DRM_ERROR("DT configured %x mismatch with real HW %x.\n",
-			  product->product_id,
-			  MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id));
+	mdev->funcs = komeda_identify(mdev->reg_base, &mdev->chip);
+	if (!mdev->funcs) {
+		DRM_ERROR("Failed to identify the HW.\n");
 		err = -ENODEV;
 		goto disable_clk;
 	}
@@ -263,15 +268,6 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
 	if (!mdev->iommu)
 		DRM_INFO("continue without IOMMU support!\n");
 
-	if (mdev->iommu && mdev->funcs->connect_iommu) {
-		err = mdev->funcs->connect_iommu(mdev);
-		if (err) {
-			DRM_ERROR("connect iommu failed.\n");
-			mdev->iommu = NULL;
-			goto disable_clk;
-		}
-	}
-
 	clk_disable_unprepare(mdev->aclk);
 
 	err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
@@ -280,6 +276,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
 		goto err_cleanup;
 	}
 
+	mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS;
+
 #ifdef CONFIG_DEBUG_FS
 	komeda_debugfs_init(mdev);
 #endif
@@ -308,11 +306,6 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
 	if (mdev->aclk)
 		clk_prepare_enable(mdev->aclk);
 
-	if (mdev->iommu && mdev->funcs->disconnect_iommu)
-		if (mdev->funcs->disconnect_iommu(mdev))
-			DRM_ERROR("disconnect iommu failed.\n");
-	mdev->iommu = NULL;
-
 	for (i = 0; i < mdev->n_pipelines; i++) {
 		komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
 		mdev->pipelines[i] = NULL;
@@ -341,44 +334,26 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
 
 int komeda_dev_resume(struct komeda_dev *mdev)
 {
-	int ret = 0;
-
 	clk_prepare_enable(mdev->aclk);
 
-	if (mdev->iommu && mdev->funcs->connect_iommu) {
-		ret = mdev->funcs->connect_iommu(mdev);
-		if (ret < 0) {
+	mdev->funcs->enable_irq(mdev);
+
+	if (mdev->iommu && mdev->funcs->connect_iommu)
+		if (mdev->funcs->connect_iommu(mdev))
 			DRM_ERROR("connect iommu failed.\n");
-			goto disable_clk;
-		}
-	}
 
-	ret = mdev->funcs->enable_irq(mdev);
-
-disable_clk:
-	clk_disable_unprepare(mdev->aclk);
-
-	return ret;
+	return 0;
 }
 
 int komeda_dev_suspend(struct komeda_dev *mdev)
 {
-	int ret = 0;
-
-	clk_prepare_enable(mdev->aclk);
-
-	if (mdev->iommu && mdev->funcs->disconnect_iommu) {
-		ret = mdev->funcs->disconnect_iommu(mdev);
-		if (ret < 0) {
+	if (mdev->iommu && mdev->funcs->disconnect_iommu)
+		if (mdev->funcs->disconnect_iommu(mdev))
 			DRM_ERROR("disconnect iommu failed.\n");
-			goto disable_clk;
-		}
-	}
 
-	ret = mdev->funcs->disable_irq(mdev);
+	mdev->funcs->disable_irq(mdev);
 
-disable_clk:
 	clk_disable_unprepare(mdev->aclk);
 
-	return ret;
+	return 0;
 }
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 4142002..ce27f2f 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -20,6 +20,8 @@
 #define KOMEDA_EVENT_OVR		BIT_ULL(4)
 #define KOMEDA_EVENT_EOW		BIT_ULL(5)
 #define KOMEDA_EVENT_MODE		BIT_ULL(6)
+#define KOMEDA_EVENT_FULL		BIT_ULL(7)
+#define KOMEDA_EVENT_EMPTY		BIT_ULL(8)
 
 #define KOMEDA_ERR_TETO			BIT_ULL(14)
 #define KOMEDA_ERR_TEMR			BIT_ULL(15)
@@ -49,12 +51,15 @@
 	KOMEDA_ERR_ZME		| KOMEDA_ERR_MERR	| KOMEDA_ERR_TCF |\
 	KOMEDA_ERR_TTNG		| KOMEDA_ERR_TTF)
 
-#define KOMEDA_WARN_EVENTS	KOMEDA_ERR_CSCE
+#define KOMEDA_WARN_EVENTS	\
+	(KOMEDA_ERR_CSCE | KOMEDA_EVENT_FULL | KOMEDA_EVENT_EMPTY)
 
-/* malidp device id */
-enum {
-	MALI_D71 = 0,
-};
+#define KOMEDA_INFO_EVENTS (0 \
+			    | KOMEDA_EVENT_VSYNC \
+			    | KOMEDA_EVENT_FLIP \
+			    | KOMEDA_EVENT_EOW \
+			    | KOMEDA_EVENT_MODE \
+			    )
 
 /* pipeline DT ports */
 enum {
@@ -69,12 +74,6 @@ struct komeda_chip_info {
 	u32 bus_width;
 };
 
-struct komeda_product_data {
-	u32 product_id;
-	const struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
-					     struct komeda_chip_info *info);
-};
-
 struct komeda_dev;
 
 struct komeda_events {
@@ -202,6 +201,23 @@ struct komeda_dev {
 
 	/** @debugfs_root: root directory of komeda debugfs */
 	struct dentry *debugfs_root;
+	/**
+	 * @err_verbosity: bitmask for how much extra info to print on error
+	 *
+	 * See KOMEDA_DEV_* macros for details. Low byte contains the debug
+	 * level categories, the high byte contains extra debug options.
+	 */
+	u16 err_verbosity;
+	/* Print a single line per error per frame with error events. */
+#define KOMEDA_DEV_PRINT_ERR_EVENTS BIT(0)
+	/* Print a single line per warning per frame with error events. */
+#define KOMEDA_DEV_PRINT_WARN_EVENTS BIT(1)
+	/* Print a single line per info event per frame with error events. */
+#define KOMEDA_DEV_PRINT_INFO_EVENTS BIT(2)
+	/* Dump DRM state on an error or warning event. */
+#define KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT BIT(8)
+	/* Disable rate limiting of event prints (normally one per commit) */
+#define KOMEDA_DEV_PRINT_DISABLE_RATELIMIT BIT(12)
 };
 
 static inline bool
@@ -210,6 +226,9 @@ komeda_product_match(struct komeda_dev *mdev, u32 target)
 	return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
 }
 
+typedef const struct komeda_dev_funcs *
+(*komeda_identify_func)(u32 __iomem *reg, struct komeda_chip_info *chip);
+
 const struct komeda_dev_funcs *
 d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
 
@@ -218,11 +237,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev);
 
 struct komeda_dev *dev_to_mdev(struct device *dev);
 
-#ifdef CONFIG_DRM_KOMEDA_ERROR_PRINT
-void komeda_print_events(struct komeda_events *evts);
-#else
-static inline void komeda_print_events(struct komeda_events *evts) {}
-#endif
+void komeda_print_events(struct komeda_events *evts, struct drm_device *dev);
 
 int komeda_dev_resume(struct komeda_dev *mdev);
 int komeda_dev_suspend(struct komeda_dev *mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index d6c2222..ea5cd1e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -33,6 +33,12 @@ static void komeda_unbind(struct device *dev)
 		return;
 
 	komeda_kms_detach(mdrv->kms);
+
+	if (pm_runtime_enabled(dev))
+		pm_runtime_disable(dev);
+	else
+		komeda_dev_suspend(mdrv->mdev);
+
 	komeda_dev_destroy(mdrv->mdev);
 
 	dev_set_drvdata(dev, NULL);
@@ -54,6 +60,10 @@ static int komeda_bind(struct device *dev)
 		goto free_mdrv;
 	}
 
+	pm_runtime_enable(dev);
+	if (!pm_runtime_enabled(dev))
+		komeda_dev_resume(mdrv->mdev);
+
 	mdrv->kms = komeda_kms_attach(mdrv->mdev);
 	if (IS_ERR(mdrv->kms)) {
 		err = PTR_ERR(mdrv->kms);
@@ -65,6 +75,11 @@ static int komeda_bind(struct device *dev)
 	return 0;
 
 destroy_mdev:
+	if (pm_runtime_enabled(dev))
+		pm_runtime_disable(dev);
+	else
+		komeda_dev_suspend(mdrv->mdev);
+
 	komeda_dev_destroy(mdrv->mdev);
 
 free_mdrv:
@@ -123,29 +138,37 @@ static int komeda_platform_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static const struct komeda_product_data komeda_products[] = {
-	[MALI_D71] = {
-		.product_id = MALIDP_D71_PRODUCT_ID,
-		.identify = d71_identify,
-	},
-};
-
 static const struct of_device_id komeda_of_match[] = {
-	{ .compatible = "arm,mali-d71", .data = &komeda_products[MALI_D71], },
+	{ .compatible = "arm,mali-d71", .data = d71_identify, },
+	{ .compatible = "arm,mali-d32", .data = d71_identify, },
 	{},
 };
 
 MODULE_DEVICE_TABLE(of, komeda_of_match);
 
+static int komeda_rt_pm_suspend(struct device *dev)
+{
+	struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+	return komeda_dev_suspend(mdrv->mdev);
+}
+
+static int komeda_rt_pm_resume(struct device *dev)
+{
+	struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+	return komeda_dev_resume(mdrv->mdev);
+}
+
 static int __maybe_unused komeda_pm_suspend(struct device *dev)
 {
 	struct komeda_drv *mdrv = dev_get_drvdata(dev);
-	struct drm_device *drm = &mdrv->kms->base;
 	int res;
 
-	res = drm_mode_config_helper_suspend(drm);
+	res = drm_mode_config_helper_suspend(&mdrv->kms->base);
 
-	komeda_dev_suspend(mdrv->mdev);
+	if (!pm_runtime_status_suspended(dev))
+		komeda_dev_suspend(mdrv->mdev);
 
 	return res;
 }
@@ -153,15 +176,16 @@ static int __maybe_unused komeda_pm_suspend(struct device *dev)
 static int __maybe_unused komeda_pm_resume(struct device *dev)
 {
 	struct komeda_drv *mdrv = dev_get_drvdata(dev);
-	struct drm_device *drm = &mdrv->kms->base;
 
-	komeda_dev_resume(mdrv->mdev);
+	if (!pm_runtime_status_suspended(dev))
+		komeda_dev_resume(mdrv->mdev);
 
-	return drm_mode_config_helper_resume(drm);
+	return drm_mode_config_helper_resume(&mdrv->kms->base);
 }
 
 static const struct dev_pm_ops komeda_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume)
+	SET_RUNTIME_PM_OPS(komeda_rt_pm_suspend, komeda_rt_pm_resume, NULL)
 };
 
 static struct platform_driver komeda_platform_driver = {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
index a36fb86..53f944e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_event.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
@@ -4,6 +4,7 @@
  * Author: James.Qian.Wang <james.qian.wang@arm.com>
  *
  */
+#include <drm/drm_atomic.h>
 #include <drm/drm_print.h>
 
 #include "komeda_dev.h"
@@ -16,6 +17,7 @@ struct komeda_str {
 
 /* return 0 on success,  < 0 on no space.
  */
+__printf(2, 3)
 static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...)
 {
 	va_list args;
@@ -76,6 +78,8 @@ static void evt_str(struct komeda_str *str, u64 events)
 
 	/* LPU errors or events */
 	evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|");
+	evt_sprintf(str, events & KOMEDA_EVENT_EMPTY, "EMPTY|");
+	evt_sprintf(str, events & KOMEDA_EVENT_FULL, "FULL|");
 	evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|");
 	evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|");
 	evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|");
@@ -107,20 +111,31 @@ static bool is_new_frame(struct komeda_events *a)
 	       (KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW);
 }
 
-void komeda_print_events(struct komeda_events *evts)
+void komeda_print_events(struct komeda_events *evts, struct drm_device *dev)
 {
-	u64 print_evts = KOMEDA_ERR_EVENTS;
+	u64 print_evts = 0;
 	static bool en_print = true;
+	struct komeda_dev *mdev = dev->dev_private;
+	u16 const err_verbosity = mdev->err_verbosity;
+	u64 evts_mask = evts->global | evts->pipes[0] | evts->pipes[1];
 
 	/* reduce the same msg print, only print the first evt for one frame */
 	if (evts->global || is_new_frame(evts))
 		en_print = true;
-	if (!en_print)
+	if (!(err_verbosity & KOMEDA_DEV_PRINT_DISABLE_RATELIMIT) && !en_print)
 		return;
 
-	if ((evts->global | evts->pipes[0] | evts->pipes[1]) & print_evts) {
+	if (err_verbosity & KOMEDA_DEV_PRINT_ERR_EVENTS)
+		print_evts |= KOMEDA_ERR_EVENTS;
+	if (err_verbosity & KOMEDA_DEV_PRINT_WARN_EVENTS)
+		print_evts |= KOMEDA_WARN_EVENTS;
+	if (err_verbosity & KOMEDA_DEV_PRINT_INFO_EVENTS)
+		print_evts |= KOMEDA_INFO_EVENTS;
+
+	if (evts_mask & print_evts) {
 		char msg[256];
 		struct komeda_str str;
+		struct drm_printer p = drm_info_printer(dev->dev);
 
 		str.str = msg;
 		str.sz  = sizeof(msg);
@@ -134,6 +149,9 @@ void komeda_print_events(struct komeda_events *evts)
 		evt_str(&str, evts->pipes[1]);
 
 		DRM_ERROR("err detect: %s\n", msg);
+		if ((err_verbosity & KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT) &&
+		    (evts_mask & (KOMEDA_ERR_EVENTS | KOMEDA_WARN_EVENTS)))
+			drm_state_dump(dev, &p);
 
 		en_print = false;
 	}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 52648b4..442d465 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -48,7 +48,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
 	memset(&evts, 0, sizeof(evts));
 	status = mdev->funcs->irq_handler(mdev, &evts);
 
-	komeda_print_events(&evts);
+	komeda_print_events(&evts, drm);
 
 	/* Notify the crtc to handle the events */
 	for (i = 0; i < kms->n_crtcs; i++)
@@ -308,10 +308,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
 	if (err)
 		goto free_component_binding;
 
-	err = mdev->funcs->enable_irq(mdev);
-	if (err)
-		goto free_component_binding;
-
 	drm->irq_enabled = true;
 
 	drm_kms_helper_poll_init(drm);
@@ -325,7 +321,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
 free_interrupts:
 	drm_kms_helper_poll_fini(drm);
 	drm->irq_enabled = false;
-	mdev->funcs->disable_irq(mdev);
 free_component_binding:
 	component_unbind_all(mdev->dev, drm);
 cleanup_mode_config:
@@ -347,7 +342,6 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
 	drm_kms_helper_poll_fini(drm);
 	drm_atomic_helper_shutdown(drm);
 	drm->irq_enabled = false;
-	mdev->funcs->disable_irq(mdev);
 	component_unbind_all(mdev->dev, drm);
 	drm_mode_config_cleanup(drm);
 	komeda_kms_cleanup_private_objs(kms);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index bd6ca7c..ac8725e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -11,6 +11,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include "malidp_utils.h"
+#include "komeda_color_mgmt.h"
 
 #define KOMEDA_MAX_PIPELINES		2
 #define KOMEDA_PIPELINE_MAX_LAYERS	4
@@ -327,6 +328,8 @@ struct komeda_improc_state {
 	struct komeda_component_state base;
 	u8 color_format, color_depth;
 	u16 hsize, vsize;
+	u32 fgamma_coeffs[KOMEDA_N_GAMMA_COEFFS];
+	u32 ctm_coeffs[KOMEDA_N_CTM_COEFFS];
 };
 
 /* display timing controller */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 5275011..8f32ae7 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -802,6 +802,12 @@ komeda_improc_validate(struct komeda_improc *improc,
 		st->color_format = BIT(__ffs(avail_formats));
 	}
 
+	if (kcrtc_st->base.color_mgmt_changed) {
+		drm_lut_to_fgamma_coeffs(kcrtc_st->base.gamma_lut,
+					 st->fgamma_coeffs);
+		drm_ctm_to_coeffs(kcrtc_st->base.ctm, st->ctm_coeffs);
+	}
+
 	komeda_component_add_input(&st->base, &dflow->input, 0);
 	komeda_component_set_output(&dflow->input, &improc->base, 0);
 
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 3c70a53..37715cc 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -512,7 +512,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
 	int i, ret;
 	unsigned int block_w, block_h;
 
-	if (!state->crtc || !state->fb)
+	if (!state->crtc || WARN_ON(!state->fb))
 		return 0;
 
 	fb = state->fb;
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 090cc0d..ac8a78b 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -16,7 +16,7 @@
 #include "armada_fb.h"
 #include "armada_gem.h"
 
-static /*const*/ struct fb_ops armada_fb_ops = {
+static const struct fb_ops armada_fb_ops = {
 	.owner		= THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 93cf8b8..976685f 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -461,16 +461,6 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
 	kfree(sgt);
 }
 
-static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
-{
-	return NULL;
-}
-
-static void
-armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
-{
-}
-
 static int
 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
 {
@@ -481,8 +471,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
 	.map_dma_buf	= armada_gem_prime_map_dma_buf,
 	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
 	.release	= drm_gem_dmabuf_release,
-	.map		= armada_gem_dmabuf_no_kmap,
-	.unmap		= armada_gem_dmabuf_no_kunmap,
 	.mmap		= armada_gem_dmabuf_mmap,
 };
 
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 1f17794..30aa73a 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -33,7 +33,6 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_probe_helper.h>
 
 #include "ast_drv.h"
@@ -86,9 +85,42 @@ static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
 
 static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+	struct drm_device *dev;
+	int ret;
+
 	ast_kick_out_firmware_fb(pdev);
 
-	return drm_get_pci_dev(pdev, ent, &driver);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	dev = drm_dev_alloc(&driver, &pdev->dev);
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		goto err_pci_disable_device;
+	}
+
+	dev->pdev = pdev;
+	pci_set_drvdata(pdev, dev);
+
+	ret = ast_driver_load(dev, ent->driver_data);
+	if (ret)
+		goto err_drm_dev_put;
+
+	ret = drm_dev_register(dev, ent->driver_data);
+	if (ret)
+		goto err_ast_driver_unload;
+
+	return 0;
+
+err_ast_driver_unload:
+	ast_driver_unload(dev);
+err_drm_dev_put:
+	drm_dev_put(dev);
+err_pci_disable_device:
+	pci_disable_device(pdev);
+	return ret;
+
 }
 
 static void
@@ -96,17 +128,19 @@ ast_pci_remove(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 
-	drm_put_dev(dev);
+	drm_dev_unregister(dev);
+	ast_driver_unload(dev);
+	drm_dev_put(dev);
 }
 
-
-
 static int ast_drm_freeze(struct drm_device *dev)
 {
-	drm_kms_helper_poll_disable(dev);
-	pci_save_state(dev->pdev);
-	drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true);
+	int error;
 
+	error = drm_mode_config_helper_suspend(dev);
+	if (error)
+		return error;
+	pci_save_state(dev->pdev);
 	return 0;
 }
 
@@ -114,11 +148,7 @@ static int ast_drm_thaw(struct drm_device *dev)
 {
 	ast_post_gpu(dev);
 
-	drm_mode_config_reset(dev);
-	drm_helper_resume_force_mode(dev);
-	drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false);
-
-	return 0;
+	return drm_mode_config_helper_resume(dev);
 }
 
 static int ast_drm_resume(struct drm_device *dev)
@@ -131,8 +161,6 @@ static int ast_drm_resume(struct drm_device *dev)
 	ret = ast_drm_thaw(dev);
 	if (ret)
 		return ret;
-
-	drm_kms_helper_poll_enable(dev);
 	return 0;
 }
 
@@ -150,6 +178,7 @@ static int ast_pm_suspend(struct device *dev)
 	pci_set_power_state(pdev, PCI_D3hot);
 	return 0;
 }
+
 static int ast_pm_resume(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -165,7 +194,6 @@ static int ast_pm_freeze(struct device *dev)
 	if (!ddev || !ddev->dev_private)
 		return -ENODEV;
 	return ast_drm_freeze(ddev);
-
 }
 
 static int ast_pm_thaw(struct device *dev)
@@ -203,10 +231,9 @@ static struct pci_driver ast_pci_driver = {
 DEFINE_DRM_GEM_FOPS(ast_fops);
 
 static struct drm_driver driver = {
-	.driver_features = DRIVER_MODESET | DRIVER_GEM,
-
-	.load = ast_driver_load,
-	.unload = ast_driver_unload,
+	.driver_features = DRIVER_ATOMIC |
+			   DRIVER_GEM |
+			   DRIVER_MODESET,
 
 	.fops = &ast_fops,
 	.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index ff161bd..f5d8780 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -121,6 +121,9 @@ struct ast_private {
 		unsigned int next_index;
 	} cursor;
 
+	struct drm_plane primary_plane;
+	struct drm_plane cursor_plane;
+
 	bool support_wide_screen;
 	enum {
 		ast_use_p2a,
@@ -137,8 +140,6 @@ struct ast_private {
 int ast_driver_load(struct drm_device *dev, unsigned long flags);
 void ast_driver_unload(struct drm_device *dev);
 
-struct ast_gem_object;
-
 #define AST_IO_AR_PORT_WRITE		(0x40)
 #define AST_IO_MISC_PORT_WRITE		(0x42)
 #define AST_IO_VGA_ENABLE_PORT		(0x43)
@@ -280,6 +281,17 @@ struct ast_vbios_mode_info {
 	const struct ast_vbios_enhtable *enh_table;
 };
 
+struct ast_crtc_state {
+	struct drm_crtc_state base;
+
+	/* Last known format of primary plane */
+	const struct drm_format_info *format;
+
+	struct ast_vbios_mode_info vbios_mode_info;
+};
+
+#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
+
 extern int ast_mode_init(struct drm_device *dev);
 extern void ast_mode_fini(struct drm_device *dev);
 
@@ -289,10 +301,6 @@ extern void ast_mode_fini(struct drm_device *dev);
 int ast_mm_init(struct ast_private *ast);
 void ast_mm_fini(struct ast_private *ast);
 
-int ast_gem_create(struct drm_device *dev,
-		   u32 size, bool iskernel,
-		   struct drm_gem_object **obj);
-
 /* ast post */
 void ast_enable_vga(struct drm_device *dev);
 void ast_enable_mmio(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 21715d6..b79f484 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -28,6 +28,7 @@
 
 #include <linux/pci.h>
 
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
@@ -387,8 +388,33 @@ static int ast_get_dram_info(struct drm_device *dev)
 	return 0;
 }
 
+enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
+						const struct drm_display_mode *mode)
+{
+	static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGBA8888 */
+
+	struct ast_private *ast = dev->dev_private;
+	unsigned long fbsize, fbpages, max_fbpages;
+
+	/* To support double buffering, a framebuffer may not
+	 * consume more than half of the available VRAM.
+	 */
+	max_fbpages = (ast->vram_size / 2) >> PAGE_SHIFT;
+
+	fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+	fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+	if (fbpages > max_fbpages)
+		return MODE_MEM;
+
+	return MODE_OK;
+}
+
 static const struct drm_mode_config_funcs ast_mode_funcs = {
-	.fb_create = drm_gem_fb_create
+	.fb_create = drm_gem_fb_create,
+	.mode_valid = ast_mode_config_mode_valid,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
 };
 
 static u32 ast_get_vram_info(struct drm_device *dev)
@@ -506,6 +532,8 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
 	if (ret)
 		goto out_free;
 
+	drm_mode_config_reset(dev);
+
 	ret = drm_fbdev_generic_setup(dev, 32);
 	if (ret)
 		goto out_free;
@@ -535,27 +563,3 @@ void ast_driver_unload(struct drm_device *dev)
 	pci_iounmap(dev->pdev, ast->regs);
 	kfree(ast);
 }
-
-int ast_gem_create(struct drm_device *dev,
-		   u32 size, bool iskernel,
-		   struct drm_gem_object **obj)
-{
-	struct drm_gem_vram_object *gbo;
-	int ret;
-
-	*obj = NULL;
-
-	size = roundup(size, PAGE_SIZE);
-	if (size == 0)
-		return -EINVAL;
-
-	gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
-	if (IS_ERR(gbo)) {
-		ret = PTR_ERR(gbo);
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("failed to allocate GEM object\n");
-		return ret;
-	}
-	*obj = &gbo->bo.base;
-	return 0;
-}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index b13eaa2..34608f0 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -31,6 +31,9 @@
 #include <linux/export.h>
 #include <linux/pci.h>
 
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fourcc.h>
@@ -43,11 +46,14 @@
 
 static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
 static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
-static int ast_cursor_set(struct drm_crtc *crtc,
-			  struct drm_file *file_priv,
-			  uint32_t handle,
-			  uint32_t width,
-			  uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+			   int x, int y);
+
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height);
+static int ast_cursor_update(void *dst, void *src, unsigned int width,
+			     unsigned int height);
+static void ast_cursor_set_base(struct ast_private *ast, u64 address);
 static int ast_cursor_move(struct drm_crtc *crtc,
 			   int x, int y);
 
@@ -65,9 +71,8 @@ static inline void ast_load_palette_index(struct ast_private *ast,
 	ast_io_read8(ast, AST_IO_SEQ_PORT);
 }
 
-static void ast_crtc_load_lut(struct drm_crtc *crtc)
+static void ast_crtc_load_lut(struct ast_private *ast, struct drm_crtc *crtc)
 {
-	struct ast_private *ast = crtc->dev->dev_private;
 	u16 *r, *g, *b;
 	int i;
 
@@ -82,36 +87,32 @@ static void ast_crtc_load_lut(struct drm_crtc *crtc)
 		ast_load_palette_index(ast, i, *r++ >> 8, *g++ >> 8, *b++ >> 8);
 }
 
-static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+				    const struct drm_display_mode *mode,
 				    struct drm_display_mode *adjusted_mode,
 				    struct ast_vbios_mode_info *vbios_mode)
 {
-	struct ast_private *ast = crtc->dev->dev_private;
-	const struct drm_framebuffer *fb = crtc->primary->fb;
-	u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+	u32 refresh_rate_index = 0, refresh_rate;
 	const struct ast_vbios_enhtable *best = NULL;
 	u32 hborder, vborder;
 	bool check_sync;
 
-	switch (fb->format->cpp[0] * 8) {
+	switch (format->cpp[0] * 8) {
 	case 8:
 		vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
-		color_index = VGAModeIndex - 1;
 		break;
 	case 16:
 		vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
-		color_index = HiCModeIndex;
 		break;
 	case 24:
 	case 32:
 		vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
-		color_index = TrueCModeIndex;
 		break;
 	default:
 		return false;
 	}
 
-	switch (crtc->mode.crtc_hdisplay) {
+	switch (mode->crtc_hdisplay) {
 	case 640:
 		vbios_mode->enh_table = &res_640x480[refresh_rate_index];
 		break;
@@ -122,7 +123,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
 		vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
 		break;
 	case 1280:
-		if (crtc->mode.crtc_vdisplay == 800)
+		if (mode->crtc_vdisplay == 800)
 			vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
 		else
 			vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
@@ -134,7 +135,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
 		vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
 		break;
 	case 1600:
-		if (crtc->mode.crtc_vdisplay == 900)
+		if (mode->crtc_vdisplay == 900)
 			vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
 		else
 			vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
@@ -143,7 +144,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
 		vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
 		break;
 	case 1920:
-		if (crtc->mode.crtc_vdisplay == 1080)
+		if (mode->crtc_vdisplay == 1080)
 			vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
 		else
 			vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
@@ -154,7 +155,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
 
 	refresh_rate = drm_mode_vrefresh(mode);
 	check_sync = vbios_mode->enh_table->flags & WideScreenMode;
-	do {
+
+	while (1) {
 		const struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
 
 		while (loop->refresh_rate != 0xff) {
@@ -178,7 +180,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
 		if (best || !check_sync)
 			break;
 		check_sync = 0;
-	} while (1);
+	}
+
 	if (best)
 		vbios_mode->enh_table = best;
 
@@ -203,38 +206,67 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
 					 vbios_mode->enh_table->vfp +
 					 vbios_mode->enh_table->vsync);
 
+	return true;
+}
+
+static void ast_set_vbios_color_reg(struct ast_private *ast,
+				    const struct drm_format_info *format,
+				    const struct ast_vbios_mode_info *vbios_mode)
+{
+	u32 color_index;
+
+	switch (format->cpp[0]) {
+	case 1:
+		color_index = VGAModeIndex - 1;
+		break;
+	case 2:
+		color_index = HiCModeIndex;
+		break;
+	case 3:
+	case 4:
+		color_index = TrueCModeIndex;
+	default:
+		return;
+	}
+
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0x0f) << 4));
+
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
+
+	if (vbios_mode->enh_table->flags & NewModeInfo) {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, format->cpp[0] * 8);
+	}
+}
+
+static void ast_set_vbios_mode_reg(struct ast_private *ast,
+				   const struct drm_display_mode *adjusted_mode,
+				   const struct ast_vbios_mode_info *vbios_mode)
+{
+	u32 refresh_rate_index, mode_id;
+
 	refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
 	mode_id = vbios_mode->enh_table->mode_id;
 
-	if (ast->chip == AST1180) {
-		/* TODO 1180 */
-	} else {
-		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
-		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
-		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
 
-		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
-		if (vbios_mode->enh_table->flags & NewModeInfo) {
-			ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
-			ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92,
-					  fb->format->cpp[0] * 8);
-			ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
-			ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
-			ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
 
-			ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
-			ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
-		}
+	if (vbios_mode->enh_table->flags & NewModeInfo) {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
 	}
-
-	return true;
-
-
 }
-static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+
+static void ast_set_std_reg(struct ast_private *ast,
+			    struct drm_display_mode *mode,
 			    struct ast_vbios_mode_info *vbios_mode)
 {
-	struct ast_private *ast = crtc->dev->dev_private;
 	const struct ast_vbios_stdtable *stdtable;
 	u32 i;
 	u8 jreg;
@@ -244,18 +276,21 @@ static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
 	jreg = stdtable->misc;
 	ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
 
-	/* Set SEQ */
+	/* Set SEQ; except Screen Disable field */
 	ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
-	for (i = 0; i < 4; i++) {
+	ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, stdtable->seq[0]);
+	for (i = 1; i < 4; i++) {
 		jreg = stdtable->seq[i];
-		if (!i)
-			jreg |= 0x20;
 		ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
 	}
 
-	/* Set CRTC */
+	/* Set CRTC; except base address and offset */
 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
-	for (i = 0; i < 25; i++)
+	for (i = 0; i < 12; i++)
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+	for (i = 14; i < 19; i++)
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+	for (i = 20; i < 25; i++)
 		ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
 
 	/* set AR */
@@ -276,10 +311,10 @@ static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
 		ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
 }
 
-static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static void ast_set_crtc_reg(struct ast_private *ast,
+			     struct drm_display_mode *mode,
 			     struct ast_vbios_mode_info *vbios_mode)
 {
-	struct ast_private *ast = crtc->dev->dev_private;
 	u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
 	u16 temp, precache = 0;
 
@@ -385,11 +420,9 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod
 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
 }
 
-static void ast_set_offset_reg(struct drm_crtc *crtc)
+static void ast_set_offset_reg(struct ast_private *ast,
+			       struct drm_framebuffer *fb)
 {
-	struct ast_private *ast = crtc->dev->dev_private;
-	const struct drm_framebuffer *fb = crtc->primary->fb;
-
 	u16 offset;
 
 	offset = fb->pitches[0] >> 3;
@@ -397,10 +430,10 @@ static void ast_set_offset_reg(struct drm_crtc *crtc)
 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
 }
 
-static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+static void ast_set_dclk_reg(struct ast_private *ast,
+			     struct drm_display_mode *mode,
 			     struct ast_vbios_mode_info *vbios_mode)
 {
-	struct ast_private *ast = dev->dev_private;
 	const struct ast_vbios_dclk_info *clk_info;
 
 	if (ast->chip == AST2500)
@@ -415,14 +448,12 @@ static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mo
 			       ((clk_info->param3 & 0x3) << 4));
 }
 
-static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
-			     struct ast_vbios_mode_info *vbios_mode)
+static void ast_set_color_reg(struct ast_private *ast,
+			      const struct drm_format_info *format)
 {
-	struct ast_private *ast = crtc->dev->dev_private;
-	const struct drm_framebuffer *fb = crtc->primary->fb;
 	u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
 
-	switch (fb->format->cpp[0] * 8) {
+	switch (format->cpp[0] * 8) {
 	case 8:
 		jregA0 = 0x70;
 		jregA3 = 0x01;
@@ -444,7 +475,10 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+}
 
+static void ast_set_crtthd_reg(struct ast_private *ast)
+{
 	/* Set Threshold */
 	if (ast->chip == AST2300 || ast->chip == AST2400 ||
 	    ast->chip == AST2500) {
@@ -462,10 +496,10 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
 	}
 }
 
-static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
-		      struct ast_vbios_mode_info *vbios_mode)
+static void ast_set_sync_reg(struct ast_private *ast,
+			     struct drm_display_mode *mode,
+			     struct ast_vbios_mode_info *vbios_mode)
 {
-	struct ast_private *ast = dev->dev_private;
 	u8 jreg;
 
 	jreg  = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
@@ -475,23 +509,9 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo
 	ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
 }
 
-static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
-		     struct ast_vbios_mode_info *vbios_mode)
+static void ast_set_start_address_crt1(struct ast_private *ast,
+				       unsigned offset)
 {
-	const struct drm_framebuffer *fb = crtc->primary->fb;
-
-	switch (fb->format->cpp[0] * 8) {
-	case 8:
-		break;
-	default:
-		return false;
-	}
-	return true;
-}
-
-static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
-{
-	struct ast_private *ast = crtc->dev->dev_private;
 	u32 addr;
 
 	addr = offset >> 2;
@@ -501,6 +521,247 @@ static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
 
 }
 
+/*
+ * Primary plane
+ */
+
+static const uint32_t ast_primary_plane_formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_C8,
+};
+
+static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
+						 struct drm_plane_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct ast_crtc_state *ast_crtc_state;
+	int ret;
+
+	if (!state->crtc)
+		return 0;
+
+	crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+
+	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, true);
+	if (ret)
+		return ret;
+
+	if (!state->visible)
+		return 0;
+
+	ast_crtc_state = to_ast_crtc_state(crtc_state);
+
+	ast_crtc_state->format = state->fb->format;
+
+	return 0;
+}
+
+void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+					    struct drm_plane_state *old_state)
+{
+	struct ast_private *ast = plane->dev->dev_private;
+	struct drm_plane_state *state = plane->state;
+	struct drm_gem_vram_object *gbo;
+	s64 gpu_addr;
+
+	gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
+	gpu_addr = drm_gem_vram_offset(gbo);
+	if (WARN_ON_ONCE(gpu_addr < 0))
+		return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
+
+	ast_set_offset_reg(ast, state->fb);
+	ast_set_start_address_crt1(ast, (u32)gpu_addr);
+
+	ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
+}
+
+static void
+ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+					struct drm_plane_state *old_state)
+{
+	struct ast_private *ast = plane->dev->dev_private;
+
+	ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+}
+
+static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
+	.prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+	.cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
+	.atomic_check = ast_primary_plane_helper_atomic_check,
+	.atomic_update = ast_primary_plane_helper_atomic_update,
+	.atomic_disable = ast_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_primary_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = drm_plane_cleanup,
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * Cursor plane
+ */
+
+static const uint32_t ast_cursor_plane_formats[] = {
+	DRM_FORMAT_ARGB8888,
+};
+
+static int
+ast_cursor_plane_helper_prepare_fb(struct drm_plane *plane,
+				   struct drm_plane_state *new_state)
+{
+	struct drm_framebuffer *fb = new_state->fb;
+	struct drm_crtc *crtc = new_state->crtc;
+	struct drm_gem_vram_object *gbo;
+	struct ast_private *ast;
+	int ret;
+	void *src, *dst;
+
+	if (!crtc || !fb)
+		return 0;
+
+	if (WARN_ON_ONCE(fb->width > AST_MAX_HWC_WIDTH) ||
+	    WARN_ON_ONCE(fb->height > AST_MAX_HWC_HEIGHT))
+		return -EINVAL; /* BUG: didn't test in atomic_check() */
+
+	ast = crtc->dev->dev_private;
+
+	gbo = drm_gem_vram_of_gem(fb->obj[0]);
+	src = drm_gem_vram_vmap(gbo);
+	if (IS_ERR(src)) {
+		ret = PTR_ERR(src);
+		goto err_drm_gem_vram_unpin;
+	}
+
+	dst = drm_gem_vram_vmap(ast->cursor.gbo[ast->cursor.next_index]);
+	if (IS_ERR(dst)) {
+		ret = PTR_ERR(dst);
+		goto err_drm_gem_vram_vunmap_src;
+	}
+
+	ret = ast_cursor_update(dst, src, fb->width, fb->height);
+	if (ret)
+		goto err_drm_gem_vram_vunmap_dst;
+
+	/* Always unmap buffers here. Destination buffers are
+	 * perma-pinned while the driver is active. We're only
+	 * changing ref-counters here.
+	 */
+	drm_gem_vram_vunmap(ast->cursor.gbo[ast->cursor.next_index], dst);
+	drm_gem_vram_vunmap(gbo, src);
+
+	return 0;
+
+err_drm_gem_vram_vunmap_dst:
+	drm_gem_vram_vunmap(ast->cursor.gbo[ast->cursor.next_index], dst);
+err_drm_gem_vram_vunmap_src:
+	drm_gem_vram_vunmap(gbo, src);
+err_drm_gem_vram_unpin:
+	drm_gem_vram_unpin(gbo);
+	return ret;
+}
+
+static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
+						struct drm_plane_state *state)
+{
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_crtc_state *crtc_state;
+	int ret;
+
+	if (!state->crtc)
+		return 0;
+
+	crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+
+	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  true, true);
+	if (ret)
+		return ret;
+
+	if (!state->visible)
+		return 0;
+
+	if (fb->width > AST_MAX_HWC_WIDTH || fb->height > AST_MAX_HWC_HEIGHT)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void
+ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+				      struct drm_plane_state *old_state)
+{
+	struct drm_plane_state *state = plane->state;
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_framebuffer *fb = state->fb;
+	struct ast_private *ast = plane->dev->dev_private;
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	struct drm_gem_vram_object *gbo;
+	s64 off;
+	u8 jreg;
+
+	ast_crtc->offset_x = AST_MAX_HWC_WIDTH - fb->width;
+	ast_crtc->offset_y = AST_MAX_HWC_WIDTH - fb->height;
+
+	if (state->fb != old_state->fb) {
+		/* A new cursor image was installed. */
+		gbo = ast->cursor.gbo[ast->cursor.next_index];
+		off = drm_gem_vram_offset(gbo);
+		if (WARN_ON_ONCE(off < 0))
+			return; /* Bug: we didn't pin cursor HW BO to VRAM. */
+		ast_cursor_set_base(ast, off);
+
+		++ast->cursor.next_index;
+		ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
+	}
+
+	ast_cursor_move(crtc, state->crtc_x, state->crtc_y);
+
+	jreg = 0x2;
+	/* enable ARGB cursor */
+	jreg |= 1;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+static void
+ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
+				       struct drm_plane_state *old_state)
+{
+	struct ast_private *ast = plane->dev->dev_private;
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
+	.prepare_fb = ast_cursor_plane_helper_prepare_fb,
+	.cleanup_fb = NULL, /* not required for cursor plane */
+	.atomic_check = ast_cursor_plane_helper_atomic_check,
+	.atomic_update = ast_cursor_plane_helper_atomic_update,
+	.atomic_disable = ast_cursor_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_cursor_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = drm_plane_cleanup,
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * CRTC
+ */
+
 static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct ast_private *ast = crtc->dev->dev_private;
@@ -508,179 +769,196 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
 	if (ast->chip == AST1180)
 		return;
 
+	/* TODO: Maybe control display signal generation with
+	 *       Sync Enable (bit CR17.7).
+	 */
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
-		ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
 		if (ast->tx_chip_type == AST_TX_DP501)
 			ast_set_dp501_video_output(crtc->dev, 1);
-		ast_crtc_load_lut(crtc);
+		ast_crtc_load_lut(ast, crtc);
 		break;
 	case DRM_MODE_DPMS_OFF:
 		if (ast->tx_chip_type == AST_TX_DP501)
 			ast_set_dp501_video_output(crtc->dev, 0);
-		ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
 		break;
 	}
 }
 
-static int ast_crtc_do_set_base(struct drm_crtc *crtc,
-				struct drm_framebuffer *fb,
-				int x, int y, int atomic)
+static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
+					struct drm_crtc_state *state)
 {
-	struct drm_gem_vram_object *gbo;
-	int ret;
-	s64 gpu_addr;
-
-	if (!atomic && fb) {
-		gbo = drm_gem_vram_of_gem(fb->obj[0]);
-		drm_gem_vram_unpin(gbo);
-	}
-
-	gbo = drm_gem_vram_of_gem(crtc->primary->fb->obj[0]);
-
-	ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
-	if (ret)
-		return ret;
-	gpu_addr = drm_gem_vram_offset(gbo);
-	if (gpu_addr < 0) {
-		ret = (int)gpu_addr;
-		goto err_drm_gem_vram_unpin;
-	}
-
-	ast_set_offset_reg(crtc);
-	ast_set_start_address_crt1(crtc, (u32)gpu_addr);
-
-	return 0;
-
-err_drm_gem_vram_unpin:
-	drm_gem_vram_unpin(gbo);
-	return ret;
-}
-
-static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-			     struct drm_framebuffer *old_fb)
-{
-	return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-static int ast_crtc_mode_set(struct drm_crtc *crtc,
-			     struct drm_display_mode *mode,
-			     struct drm_display_mode *adjusted_mode,
-			     int x, int y,
-			     struct drm_framebuffer *old_fb)
-{
-	struct drm_device *dev = crtc->dev;
 	struct ast_private *ast = crtc->dev->dev_private;
-	struct ast_vbios_mode_info vbios_mode;
-	bool ret;
+	struct ast_crtc_state *ast_state;
+	const struct drm_format_info *format;
+	bool succ;
+
 	if (ast->chip == AST1180) {
 		DRM_ERROR("AST 1180 modesetting not supported\n");
 		return -EINVAL;
 	}
 
-	ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
-	if (ret == false)
+	ast_state = to_ast_crtc_state(state);
+
+	format = ast_state->format;
+	if (!format)
+		return 0;
+
+	succ = ast_get_vbios_mode_info(format, &state->mode,
+				       &state->adjusted_mode,
+				       &ast_state->vbios_mode_info);
+	if (!succ)
 		return -EINVAL;
-	ast_open_key(ast);
-
-	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
-
-	ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
-	ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
-	ast_set_offset_reg(crtc);
-	ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
-	ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
-	ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
-	ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
-
-	ast_crtc_mode_set_base(crtc, x, y, old_fb);
 
 	return 0;
 }
 
-static void ast_crtc_disable(struct drm_crtc *crtc)
-{
-	DRM_DEBUG_KMS("\n");
-	ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-	if (crtc->primary->fb) {
-		struct drm_framebuffer *fb = crtc->primary->fb;
-		struct drm_gem_vram_object *gbo =
-			drm_gem_vram_of_gem(fb->obj[0]);
-
-		drm_gem_vram_unpin(gbo);
-	}
-	crtc->primary->fb = NULL;
-}
-
-static void ast_crtc_prepare(struct drm_crtc *crtc)
-{
-
-}
-
-static void ast_crtc_commit(struct drm_crtc *crtc)
+static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc,
+					 struct drm_crtc_state *old_crtc_state)
 {
 	struct ast_private *ast = crtc->dev->dev_private;
-	ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
-	ast_crtc_load_lut(crtc);
+
+	ast_open_key(ast);
 }
 
+static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+					 struct drm_crtc_state *old_crtc_state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct ast_private *ast = dev->dev_private;
+	struct ast_crtc_state *ast_state;
+	const struct drm_format_info *format;
+	struct ast_vbios_mode_info *vbios_mode_info;
+	struct drm_display_mode *adjusted_mode;
+
+	crtc->state->no_vblank = true;
+
+	ast_state = to_ast_crtc_state(crtc->state);
+
+	format = ast_state->format;
+	if (!format)
+		return;
+
+	vbios_mode_info = &ast_state->vbios_mode_info;
+
+	ast_set_color_reg(ast, format);
+	ast_set_vbios_color_reg(ast, format, vbios_mode_info);
+
+	if (!crtc->state->mode_changed)
+		return;
+
+	adjusted_mode = &crtc->state->adjusted_mode;
+
+	ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+	ast_set_std_reg(ast, adjusted_mode, vbios_mode_info);
+	ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info);
+	ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
+	ast_set_crtthd_reg(ast);
+	ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
+}
+
+static void
+ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+			      struct drm_crtc_state *old_crtc_state)
+{
+	ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void
+ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
+			       struct drm_crtc_state *old_crtc_state)
+{
+	ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
 
 static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
-	.dpms = ast_crtc_dpms,
-	.mode_set = ast_crtc_mode_set,
-	.mode_set_base = ast_crtc_mode_set_base,
-	.disable = ast_crtc_disable,
-	.prepare = ast_crtc_prepare,
-	.commit = ast_crtc_commit,
-
+	.atomic_check = ast_crtc_helper_atomic_check,
+	.atomic_begin = ast_crtc_helper_atomic_begin,
+	.atomic_flush = ast_crtc_helper_atomic_flush,
+	.atomic_enable = ast_crtc_helper_atomic_enable,
+	.atomic_disable = ast_crtc_helper_atomic_disable,
 };
 
-static void ast_crtc_reset(struct drm_crtc *crtc)
-{
-
-}
-
-static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-			      u16 *blue, uint32_t size,
-			      struct drm_modeset_acquire_ctx *ctx)
-{
-	ast_crtc_load_lut(crtc);
-
-	return 0;
-}
-
-
 static void ast_crtc_destroy(struct drm_crtc *crtc)
 {
 	drm_crtc_cleanup(crtc);
 	kfree(crtc);
 }
 
+static struct drm_crtc_state *
+ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+	struct ast_crtc_state *new_ast_state, *ast_state;
+
+	if (WARN_ON(!crtc->state))
+		return NULL;
+
+	new_ast_state = kmalloc(sizeof(*new_ast_state), GFP_KERNEL);
+	if (!new_ast_state)
+		return NULL;
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &new_ast_state->base);
+
+	ast_state = to_ast_crtc_state(crtc->state);
+
+	new_ast_state->format = ast_state->format;
+	memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info,
+	       sizeof(new_ast_state->vbios_mode_info));
+
+	return &new_ast_state->base;
+}
+
+static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+					  struct drm_crtc_state *state)
+{
+	struct ast_crtc_state *ast_state = to_ast_crtc_state(state);
+
+	__drm_atomic_helper_crtc_destroy_state(&ast_state->base);
+	kfree(ast_state);
+}
+
 static const struct drm_crtc_funcs ast_crtc_funcs = {
-	.cursor_set = ast_cursor_set,
-	.cursor_move = ast_cursor_move,
-	.reset = ast_crtc_reset,
+	.reset = drm_atomic_helper_crtc_reset,
 	.set_config = drm_crtc_helper_set_config,
-	.gamma_set = ast_crtc_gamma_set,
+	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.destroy = ast_crtc_destroy,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.atomic_duplicate_state = ast_crtc_atomic_duplicate_state,
+	.atomic_destroy_state = ast_crtc_atomic_destroy_state,
 };
 
 static int ast_crtc_init(struct drm_device *dev)
 {
+	struct ast_private *ast = dev->dev_private;
 	struct ast_crtc *crtc;
+	int ret;
 
 	crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
 	if (!crtc)
 		return -ENOMEM;
 
-	drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+	ret = drm_crtc_init_with_planes(dev, &crtc->base, &ast->primary_plane,
+					&ast->cursor_plane, &ast_crtc_funcs,
+					NULL);
+	if (ret)
+		goto err_kfree;
+
 	drm_mode_crtc_set_gamma_size(&crtc->base, 256);
 	drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
 	return 0;
+
+err_kfree:
+	kfree(crtc);
+	return ret;
 }
 
+/*
+ * Encoder
+ */
+
 static void ast_encoder_destroy(struct drm_encoder *encoder)
 {
 	drm_encoder_cleanup(encoder);
@@ -691,35 +969,6 @@ static const struct drm_encoder_funcs ast_enc_funcs = {
 	.destroy = ast_encoder_destroy,
 };
 
-static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static void ast_encoder_mode_set(struct drm_encoder *encoder,
-			       struct drm_display_mode *mode,
-			       struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void ast_encoder_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void ast_encoder_commit(struct drm_encoder *encoder)
-{
-
-}
-
-
-static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
-	.dpms = ast_encoder_dpms,
-	.prepare = ast_encoder_prepare,
-	.commit = ast_encoder_commit,
-	.mode_set = ast_encoder_mode_set,
-};
-
 static int ast_encoder_init(struct drm_device *dev)
 {
 	struct ast_encoder *ast_encoder;
@@ -730,12 +979,15 @@ static int ast_encoder_init(struct drm_device *dev)
 
 	drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
 			 DRM_MODE_ENCODER_DAC, NULL);
-	drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
 
 	ast_encoder->base.possible_crtcs = 1;
 	return 0;
 }
 
+/*
+ * Connector
+ */
+
 static int ast_get_modes(struct drm_connector *connector)
 {
 	struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -834,14 +1086,16 @@ static void ast_connector_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
-	.mode_valid = ast_mode_valid,
 	.get_modes = ast_get_modes,
+	.mode_valid = ast_mode_valid,
 };
 
 static const struct drm_connector_funcs ast_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.reset = drm_atomic_helper_connector_reset,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = ast_connector_destroy,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static int ast_connector_init(struct drm_device *dev)
@@ -890,8 +1144,7 @@ static int ast_cursor_init(struct drm_device *dev)
 	size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
 
 	for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
-		gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
-					  size, 0, false);
+		gbo = drm_gem_vram_create(dev, size, 0);
 		if (IS_ERR(gbo)) {
 			ret = PTR_ERR(gbo);
 			goto err_drm_gem_vram_put;
@@ -934,10 +1187,39 @@ static void ast_cursor_fini(struct drm_device *dev)
 
 int ast_mode_init(struct drm_device *dev)
 {
+	struct ast_private *ast = dev->dev_private;
+	int ret;
+
+	memset(&ast->primary_plane, 0, sizeof(ast->primary_plane));
+	ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01,
+				       &ast_primary_plane_funcs,
+				       ast_primary_plane_formats,
+				       ARRAY_SIZE(ast_primary_plane_formats),
+				       NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret) {
+		DRM_ERROR("ast: drm_universal_plane_init() failed: %d\n", ret);
+		return ret;
+	}
+	drm_plane_helper_add(&ast->primary_plane,
+			     &ast_primary_plane_helper_funcs);
+
+	ret = drm_universal_plane_init(dev, &ast->cursor_plane, 0x01,
+				       &ast_cursor_plane_funcs,
+				       ast_cursor_plane_formats,
+				       ARRAY_SIZE(ast_cursor_plane_formats),
+				       NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+	if (ret) {
+		DRM_ERROR("drm_universal_plane_failed(): %d\n", ret);
+		return ret;
+	}
+	drm_plane_helper_add(&ast->cursor_plane,
+			     &ast_cursor_plane_helper_funcs);
+
 	ast_cursor_init(dev);
 	ast_crtc_init(dev);
 	ast_encoder_init(dev);
 	ast_connector_init(dev);
+
 	return 0;
 }
 
@@ -1153,106 +1435,6 @@ static void ast_cursor_set_base(struct ast_private *ast, u64 address)
 	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
 }
 
-static int ast_show_cursor(struct drm_crtc *crtc, void *src,
-			   unsigned int width, unsigned int height)
-{
-	struct ast_private *ast = crtc->dev->dev_private;
-	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
-	struct drm_gem_vram_object *gbo;
-	void *dst;
-	s64 off;
-	int ret;
-	u8 jreg;
-
-	gbo = ast->cursor.gbo[ast->cursor.next_index];
-	dst = drm_gem_vram_vmap(gbo);
-	if (IS_ERR(dst))
-		return PTR_ERR(dst);
-	off = drm_gem_vram_offset(gbo);
-	if (off < 0) {
-		ret = (int)off;
-		goto err_drm_gem_vram_vunmap;
-	}
-
-	ret = ast_cursor_update(dst, src, width, height);
-	if (ret)
-		goto err_drm_gem_vram_vunmap;
-	ast_cursor_set_base(ast, off);
-
-	ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
-	ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
-
-	jreg = 0x2;
-	/* enable ARGB cursor */
-	jreg |= 1;
-	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
-
-	++ast->cursor.next_index;
-	ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
-
-	drm_gem_vram_vunmap(gbo, dst);
-
-	return 0;
-
-err_drm_gem_vram_vunmap:
-	drm_gem_vram_vunmap(gbo, dst);
-	return ret;
-}
-
-static void ast_hide_cursor(struct drm_crtc *crtc)
-{
-	struct ast_private *ast = crtc->dev->dev_private;
-
-	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
-}
-
-static int ast_cursor_set(struct drm_crtc *crtc,
-			  struct drm_file *file_priv,
-			  uint32_t handle,
-			  uint32_t width,
-			  uint32_t height)
-{
-	struct drm_gem_object *obj;
-	struct drm_gem_vram_object *gbo;
-	u8 *src;
-	int ret;
-
-	if (!handle) {
-		ast_hide_cursor(crtc);
-		return 0;
-	}
-
-	if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
-		return -EINVAL;
-
-	obj = drm_gem_object_lookup(file_priv, handle);
-	if (!obj) {
-		DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
-		return -ENOENT;
-	}
-	gbo = drm_gem_vram_of_gem(obj);
-	src = drm_gem_vram_vmap(gbo);
-	if (IS_ERR(src)) {
-		ret = PTR_ERR(src);
-		goto err_drm_gem_object_put_unlocked;
-	}
-
-	ret = ast_show_cursor(crtc, src, width, height);
-	if (ret)
-		goto err_drm_gem_vram_vunmap;
-
-	drm_gem_vram_vunmap(gbo, src);
-	drm_gem_object_put_unlocked(obj);
-
-	return 0;
-
-err_drm_gem_vram_vunmap:
-	drm_gem_vram_vunmap(gbo, src);
-err_drm_gem_object_put_unlocked:
-	drm_gem_object_put_unlocked(obj);
-	return ret;
-}
-
 static int ast_cursor_move(struct drm_crtc *crtc,
 			   int x, int y)
 {
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
deleted file mode 100644
index 580aa26..0000000
--- a/drivers/gpu/drm/ati_pcigart.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * \file ati_pcigart.c
- * ATI PCI GART support
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-
-#include <drm/ati_pcigart.h>
-#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
-#include <drm/drm_print.h>
-
-# define ATI_PCIGART_PAGE_SIZE		4096	/**< PCI GART page size */
-
-static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
-				       struct drm_ati_pcigart_info *gart_info)
-{
-	gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
-						PAGE_SIZE);
-	if (gart_info->table_handle == NULL)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void drm_ati_free_pcigart_table(struct drm_device *dev,
-				       struct drm_ati_pcigart_info *gart_info)
-{
-	drm_pci_free(dev, gart_info->table_handle);
-	gart_info->table_handle = NULL;
-}
-
-int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
-	struct drm_sg_mem *entry = dev->sg;
-	unsigned long pages;
-	int i;
-	int max_pages;
-
-	/* we need to support large memory configurations */
-	if (!entry) {
-		DRM_ERROR("no scatter/gather memory!\n");
-		return 0;
-	}
-
-	if (gart_info->bus_addr) {
-
-		max_pages = (gart_info->table_size / sizeof(u32));
-		pages = (entry->pages <= max_pages)
-		  ? entry->pages : max_pages;
-
-		for (i = 0; i < pages; i++) {
-			if (!entry->busaddr[i])
-				break;
-			pci_unmap_page(dev->pdev, entry->busaddr[i],
-					 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		}
-
-		if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
-			gart_info->bus_addr = 0;
-	}
-
-	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
-	    gart_info->table_handle) {
-		drm_ati_free_pcigart_table(dev, gart_info);
-	}
-
-	return 1;
-}
-EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
-
-int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
-	struct drm_local_map *map = &gart_info->mapping;
-	struct drm_sg_mem *entry = dev->sg;
-	void *address = NULL;
-	unsigned long pages;
-	u32 *pci_gart = NULL, page_base, gart_idx;
-	dma_addr_t bus_address = 0;
-	int i, j, ret = -ENOMEM;
-	int max_ati_pages, max_real_pages;
-
-	if (!entry) {
-		DRM_ERROR("no scatter/gather memory!\n");
-		goto done;
-	}
-
-	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
-		DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
-
-		if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
-			DRM_ERROR("fail to set dma mask to 0x%Lx\n",
-				  (unsigned long long)gart_info->table_mask);
-			ret = -EFAULT;
-			goto done;
-		}
-
-		ret = drm_ati_alloc_pcigart_table(dev, gart_info);
-		if (ret) {
-			DRM_ERROR("cannot allocate PCI GART page!\n");
-			goto done;
-		}
-
-		pci_gart = gart_info->table_handle->vaddr;
-		address = gart_info->table_handle->vaddr;
-		bus_address = gart_info->table_handle->busaddr;
-	} else {
-		address = gart_info->addr;
-		bus_address = gart_info->bus_addr;
-		DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
-			  (unsigned long long)bus_address,
-			  (unsigned long)address);
-	}
-
-
-	max_ati_pages = (gart_info->table_size / sizeof(u32));
-	max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
-	pages = (entry->pages <= max_real_pages)
-	    ? entry->pages : max_real_pages;
-
-	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
-		memset(pci_gart, 0, max_ati_pages * sizeof(u32));
-	} else {
-		memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
-	}
-
-	gart_idx = 0;
-	for (i = 0; i < pages; i++) {
-		/* we need to support large memory configurations */
-		entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
-						 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
-			DRM_ERROR("unable to map PCIGART pages!\n");
-			drm_ati_pcigart_cleanup(dev, gart_info);
-			address = NULL;
-			bus_address = 0;
-			ret = -ENOMEM;
-			goto done;
-		}
-		page_base = (u32) entry->busaddr[i];
-
-		for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
-			u32 offset;
-			u32 val;
-
-			switch(gart_info->gart_reg_if) {
-			case DRM_ATI_GART_IGP:
-				val = page_base | 0xc;
-				break;
-			case DRM_ATI_GART_PCIE:
-				val = (page_base >> 8) | 0xc;
-				break;
-			default:
-			case DRM_ATI_GART_PCI:
-				val = page_base;
-				break;
-			}
-			if (gart_info->gart_table_location ==
-			    DRM_ATI_GART_MAIN) {
-				pci_gart[gart_idx] = cpu_to_le32(val);
-			} else {
-				offset = gart_idx * sizeof(u32);
-				writel(val, (void __iomem *)map->handle + offset);
-			}
-			gart_idx++;
-			page_base += ATI_PCIGART_PAGE_SIZE;
-		}
-	}
-	ret = 0;
-
-#if defined(__i386__) || defined(__x86_64__)
-	wbinvd();
-#else
-	mb();
-#endif
-
-      done:
-	gart_info->addr = address;
-	gart_info->bus_addr = bus_address;
-	return ret;
-}
-EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f2e73e6d..1098513 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -73,7 +73,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
 	unsigned long prate;
 	unsigned int mask = ATMEL_HLCDC_CLKDIV_MASK | ATMEL_HLCDC_CLKPOL;
 	unsigned int cfg = 0;
-	int div;
+	int div, ret;
+
+	ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
+	if (ret)
+		return;
 
 	vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
 	vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
@@ -95,14 +99,14 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
 		     (adj->crtc_hdisplay - 1) |
 		     ((adj->crtc_vdisplay - 1) << 16));
 
+	prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
+	mode_rate = adj->crtc_clock * 1000;
 	if (!crtc->dc->desc->fixed_clksrc) {
+		prate *= 2;
 		cfg |= ATMEL_HLCDC_CLKSEL;
 		mask |= ATMEL_HLCDC_CLKSEL;
 	}
 
-	prate = 2 * clk_get_rate(crtc->dc->hlcdc->sys_clk);
-	mode_rate = adj->crtc_clock * 1000;
-
 	div = DIV_ROUND_UP(prate, mode_rate);
 	if (div < 2) {
 		div = 2;
@@ -117,8 +121,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
 		int div_low = prate / mode_rate;
 
 		if (div_low >= 2 &&
-		    ((prate / div_low - mode_rate) <
-		     10 * (mode_rate - prate / div)))
+		    (10 * (prate / div_low - mode_rate) <
+		     (mode_rate - prate / div)))
 			/*
 			 * At least 10 times better when using a higher
 			 * frequency than requested, instead of a lower.
@@ -147,6 +151,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
 			   ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
 			   ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK,
 			   cfg);
+
+	clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
 }
 
 static enum drm_mode_status
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 9264029..112aa50 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -557,12 +557,6 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
-		struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-	return drm_gem_fb_create(dev, file_priv, mode_cmd);
-}
-
 struct atmel_hlcdc_dc_commit {
 	struct work_struct work;
 	struct drm_device *dev;
@@ -657,7 +651,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
 }
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
-	.fb_create = atmel_hlcdc_fb_create,
+	.fb_create = drm_gem_fb_create,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = atmel_hlcdc_dc_atomic_commit,
 };
@@ -727,18 +721,10 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 	dc->hlcdc = dev_get_drvdata(dev->dev->parent);
 	dev->dev_private = dc;
 
-	if (dc->desc->fixed_clksrc) {
-		ret = clk_prepare_enable(dc->hlcdc->sys_clk);
-		if (ret) {
-			dev_err(dev->dev, "failed to enable sys_clk\n");
-			goto err_destroy_wq;
-		}
-	}
-
 	ret = clk_prepare_enable(dc->hlcdc->periph_clk);
 	if (ret) {
 		dev_err(dev->dev, "failed to enable periph_clk\n");
-		goto err_sys_clk_disable;
+		goto err_destroy_wq;
 	}
 
 	pm_runtime_enable(dev->dev);
@@ -774,9 +760,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 err_periph_clk_disable:
 	pm_runtime_disable(dev->dev);
 	clk_disable_unprepare(dc->hlcdc->periph_clk);
-err_sys_clk_disable:
-	if (dc->desc->fixed_clksrc)
-		clk_disable_unprepare(dc->hlcdc->sys_clk);
 
 err_destroy_wq:
 	destroy_workqueue(dc->wq);
@@ -801,8 +784,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
 
 	pm_runtime_disable(dev->dev);
 	clk_disable_unprepare(dc->hlcdc->periph_clk);
-	if (dc->desc->fixed_clksrc)
-		clk_disable_unprepare(dc->hlcdc->sys_clk);
 	destroy_workqueue(dc->wq);
 }
 
@@ -916,8 +897,6 @@ static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
 	regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr);
 	regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr);
 	clk_disable_unprepare(dc->hlcdc->periph_clk);
-	if (dc->desc->fixed_clksrc)
-		clk_disable_unprepare(dc->hlcdc->sys_clk);
 
 	return 0;
 }
@@ -927,8 +906,6 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev)
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 	struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
 
-	if (dc->desc->fixed_clksrc)
-		clk_prepare_enable(dc->hlcdc->sys_clk);
 	clk_prepare_enable(dc->hlcdc->periph_clk);
 	regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
 
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 034f202..40800ec 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -604,7 +604,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
 	int ret;
 	int i;
 
-	if (!state->base.crtc || !fb)
+	if (!state->base.crtc || WARN_ON(!fb))
 		return 0;
 
 	crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index e567bdf..b615b7d 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -255,7 +255,7 @@ void bochs_hw_setformat(struct bochs_device *bochs,
 		DRM_ERROR("%s: Huh? Got framebuffer format 0x%x",
 			  __func__, format->format);
 		break;
-	};
+	}
 }
 
 void bochs_hw_setbase(struct bochs_device *bochs,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 3436297..0b9ca58 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,16 +16,6 @@
 menu "Display Interface Bridges"
 	depends on DRM && DRM_BRIDGE
 
-config DRM_ANALOGIX_ANX78XX
-	tristate "Analogix ANX78XX bridge"
-	select DRM_KMS_HELPER
-	select REGMAP_I2C
-	---help---
-	  ANX78XX is an ultra-low power Full-HD SlimPort transmitter
-	  designed for portable devices. The ANX78XX transforms
-	  the HDMI output of an application processor to MyDP
-	  or DisplayPort.
-
 config DRM_CDNS_DSI
 	tristate "Cadence DPI/DSI bridge"
 	select DRM_KMS_HELPER
@@ -45,14 +35,14 @@
 	  Support for non-programmable RGB to VGA DAC bridges, such as ADI
 	  ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
 
-config DRM_LVDS_ENCODER
-	tristate "Transparent parallel to LVDS encoder support"
+config DRM_LVDS_CODEC
+	tristate "Transparent LVDS encoders and decoders support"
 	depends on OF
 	select DRM_KMS_HELPER
 	select DRM_PANEL_BRIDGE
 	help
-	  Support for transparent parallel to LVDS encoders that don't require
-	  any configuration.
+	  Support for transparent LVDS encoders and decoders that don't
+	  require any configuration.
 
 config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
 	tristate "MegaChips stdp4028-ge-b850v3-fw and stdp2690-ge-b850v3-fw"
@@ -60,10 +50,10 @@
 	select DRM_KMS_HELPER
 	select DRM_PANEL
 	---help---
-          This is a driver for the display bridges of
-          GE B850v3 that convert dual channel LVDS
-          to DP++. This is used with the i.MX6 imx-ldb
-          driver. You are likely to say N here.
+	  This is a driver for the display bridges of
+	  GE B850v3 that convert dual channel LVDS
+	  to DP++. This is used with the i.MX6 imx-ldb
+	  driver. You are likely to say N here.
 
 config DRM_NXP_PTN3460
 	tristate "NXP PTN3460 DP/LVDS bridge"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 4934fcf..cd16ce8 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,8 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
 obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
 obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
-obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
+obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
 obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
@@ -12,8 +11,9 @@
 obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
-obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
 obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
 obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
 obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+
+obj-y += analogix/
 obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
deleted file mode 100644
index 274989f..0000000
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ /dev/null
@@ -1,1529 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright(c) 2016, Analogix Semiconductor.
- *
- * Based on anx7808 driver obtained from chromeos with copyright:
- * Copyright(c) 2013, Google Inc.
- */
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/regmap.h>
-#include <linux/regulator/consumer.h>
-#include <linux/types.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-
-#include "analogix-anx78xx.h"
-
-#define I2C_NUM_ADDRESSES	5
-#define I2C_IDX_TX_P0		0
-#define I2C_IDX_TX_P1		1
-#define I2C_IDX_TX_P2		2
-#define I2C_IDX_RX_P0		3
-#define I2C_IDX_RX_P1		4
-
-#define XTAL_CLK		270 /* 27M */
-#define AUX_CH_BUFFER_SIZE	16
-#define AUX_WAIT_TIMEOUT_MS	15
-
-static const u8 anx7808_i2c_addresses[] = {
-	[I2C_IDX_TX_P0] = 0x78,
-	[I2C_IDX_TX_P1] = 0x7a,
-	[I2C_IDX_TX_P2] = 0x72,
-	[I2C_IDX_RX_P0] = 0x7e,
-	[I2C_IDX_RX_P1] = 0x80,
-};
-
-static const u8 anx781x_i2c_addresses[] = {
-	[I2C_IDX_TX_P0] = 0x70,
-	[I2C_IDX_TX_P1] = 0x7a,
-	[I2C_IDX_TX_P2] = 0x72,
-	[I2C_IDX_RX_P0] = 0x7e,
-	[I2C_IDX_RX_P1] = 0x80,
-};
-
-struct anx78xx_platform_data {
-	struct regulator *dvdd10;
-	struct gpio_desc *gpiod_hpd;
-	struct gpio_desc *gpiod_pd;
-	struct gpio_desc *gpiod_reset;
-
-	int hpd_irq;
-	int intp_irq;
-};
-
-struct anx78xx {
-	struct drm_dp_aux aux;
-	struct drm_bridge bridge;
-	struct i2c_client *client;
-	struct edid *edid;
-	struct drm_connector connector;
-	struct anx78xx_platform_data pdata;
-	struct mutex lock;
-
-	/*
-	 * I2C Slave addresses of ANX7814 are mapped as TX_P0, TX_P1, TX_P2,
-	 * RX_P0 and RX_P1.
-	 */
-	struct i2c_client *i2c_dummy[I2C_NUM_ADDRESSES];
-	struct regmap *map[I2C_NUM_ADDRESSES];
-
-	u16 chipid;
-	u8 dpcd[DP_RECEIVER_CAP_SIZE];
-
-	bool powered;
-};
-
-static inline struct anx78xx *connector_to_anx78xx(struct drm_connector *c)
-{
-	return container_of(c, struct anx78xx, connector);
-}
-
-static inline struct anx78xx *bridge_to_anx78xx(struct drm_bridge *bridge)
-{
-	return container_of(bridge, struct anx78xx, bridge);
-}
-
-static int anx78xx_set_bits(struct regmap *map, u8 reg, u8 mask)
-{
-	return regmap_update_bits(map, reg, mask, mask);
-}
-
-static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask)
-{
-	return regmap_update_bits(map, reg, mask, 0);
-}
-
-static bool anx78xx_aux_op_finished(struct anx78xx *anx78xx)
-{
-	unsigned int value;
-	int err;
-
-	err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
-			  &value);
-	if (err < 0)
-		return false;
-
-	return (value & SP_AUX_EN) == 0;
-}
-
-static int anx78xx_aux_wait(struct anx78xx *anx78xx)
-{
-	unsigned long timeout;
-	unsigned int status;
-	int err;
-
-	timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
-
-	while (!anx78xx_aux_op_finished(anx78xx)) {
-		if (time_after(jiffies, timeout)) {
-			if (!anx78xx_aux_op_finished(anx78xx)) {
-				DRM_ERROR("Timed out waiting AUX to finish\n");
-				return -ETIMEDOUT;
-			}
-
-			break;
-		}
-
-		usleep_range(1000, 2000);
-	}
-
-	/* Read the AUX channel access status */
-	err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_CH_STATUS_REG,
-			  &status);
-	if (err < 0) {
-		DRM_ERROR("Failed to read from AUX channel: %d\n", err);
-		return err;
-	}
-
-	if (status & SP_AUX_STATUS) {
-		DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n",
-			  status);
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-static int anx78xx_aux_address(struct anx78xx *anx78xx, unsigned int addr)
-{
-	int err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_7_0_REG,
-			   addr & 0xff);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_15_8_REG,
-			   (addr & 0xff00) >> 8);
-	if (err)
-		return err;
-
-	/*
-	 * DP AUX CH Address Register #2, only update bits[3:0]
-	 * [7:4] RESERVED
-	 * [3:0] AUX_ADDR[19:16], Register control AUX CH address.
-	 */
-	err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
-				 SP_AUX_ADDR_19_16_REG,
-				 SP_AUX_ADDR_19_16_MASK,
-				 (addr & 0xf0000) >> 16);
-
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static ssize_t anx78xx_aux_transfer(struct drm_dp_aux *aux,
-				    struct drm_dp_aux_msg *msg)
-{
-	struct anx78xx *anx78xx = container_of(aux, struct anx78xx, aux);
-	u8 ctrl1 = msg->request;
-	u8 ctrl2 = SP_AUX_EN;
-	u8 *buffer = msg->buffer;
-	int err;
-
-	/* The DP AUX transmit and receive buffer has 16 bytes. */
-	if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE))
-		return -E2BIG;
-
-	/* Zero-sized messages specify address-only transactions. */
-	if (msg->size < 1)
-		ctrl2 |= SP_ADDR_ONLY;
-	else	/* For non-zero-sized set the length field. */
-		ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT;
-
-	if ((msg->request & DP_AUX_I2C_READ) == 0) {
-		/* When WRITE | MOT write values to data buffer */
-		err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P0],
-					SP_DP_BUF_DATA0_REG, buffer,
-					msg->size);
-		if (err)
-			return err;
-	}
-
-	/* Write address and request */
-	err = anx78xx_aux_address(anx78xx, msg->address);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL1_REG,
-			   ctrl1);
-	if (err)
-		return err;
-
-	/* Start transaction */
-	err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
-				 SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY |
-				 SP_AUX_EN, ctrl2);
-	if (err)
-		return err;
-
-	err = anx78xx_aux_wait(anx78xx);
-	if (err)
-		return err;
-
-	msg->reply = DP_AUX_I2C_REPLY_ACK;
-
-	if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) {
-		/* Read values from data buffer */
-		err = regmap_bulk_read(anx78xx->map[I2C_IDX_TX_P0],
-				       SP_DP_BUF_DATA0_REG, buffer,
-				       msg->size);
-		if (err)
-			return err;
-	}
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
-				 SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY);
-	if (err)
-		return err;
-
-	return msg->size;
-}
-
-static int anx78xx_set_hpd(struct anx78xx *anx78xx)
-{
-	int err;
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
-				 SP_TMDS_CTRL_BASE + 7, SP_PD_RT);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
-			       SP_HPD_OUT);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static int anx78xx_clear_hpd(struct anx78xx *anx78xx)
-{
-	int err;
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
-				 SP_HPD_OUT);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
-			       SP_TMDS_CTRL_BASE + 7, SP_PD_RT);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static const struct reg_sequence tmds_phy_initialization[] = {
-	{ SP_TMDS_CTRL_BASE +  1, 0x90 },
-	{ SP_TMDS_CTRL_BASE +  2, 0xa9 },
-	{ SP_TMDS_CTRL_BASE +  6, 0x92 },
-	{ SP_TMDS_CTRL_BASE +  7, 0x80 },
-	{ SP_TMDS_CTRL_BASE + 20, 0xf2 },
-	{ SP_TMDS_CTRL_BASE + 22, 0xc4 },
-	{ SP_TMDS_CTRL_BASE + 23, 0x18 },
-};
-
-static int anx78xx_rx_initialization(struct anx78xx *anx78xx)
-{
-	int err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
-			   SP_AUD_MUTE | SP_VID_MUTE);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_CHIP_CTRL_REG,
-			       SP_MAN_HDMI5V_DET | SP_PLLLOCK_CKDT_EN |
-			       SP_DIGITAL_CKDT_EN);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
-			       SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST |
-			       SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST);
-	if (err)
-		return err;
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
-				 SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST |
-				 SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST);
-	if (err)
-		return err;
-
-	/* Sync detect change, GP set mute */
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
-			       SP_AUD_EXCEPTION_ENABLE_BASE + 1, BIT(5) |
-			       BIT(6));
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
-			       SP_AUD_EXCEPTION_ENABLE_BASE + 3,
-			       SP_AEC_EN21);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUDVID_CTRL_REG,
-			       SP_AVC_EN | SP_AAC_OE | SP_AAC_EN);
-	if (err)
-		return err;
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
-				 SP_SYSTEM_POWER_DOWN1_REG, SP_PWDN_CTRL);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
-			       SP_VID_DATA_RANGE_CTRL_REG, SP_R2Y_INPUT_LIMIT);
-	if (err)
-		return err;
-
-	/* Enable DDC stretch */
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
-			   SP_DP_EXTRA_I2C_DEV_ADDR_REG, SP_I2C_EXTRA_ADDR);
-	if (err)
-		return err;
-
-	/* TMDS phy initialization */
-	err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_RX_P0],
-				     tmds_phy_initialization,
-				     ARRAY_SIZE(tmds_phy_initialization));
-	if (err)
-		return err;
-
-	err = anx78xx_clear_hpd(anx78xx);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static const u8 dp_tx_output_precise_tune_bits[20] = {
-	0x01, 0x03, 0x07, 0x7f, 0x71, 0x6b, 0x7f,
-	0x73, 0x7f, 0x7f, 0x00, 0x00, 0x00, 0x00,
-	0x0c, 0x42, 0x1e, 0x3e, 0x72, 0x7e,
-};
-
-static int anx78xx_link_phy_initialization(struct anx78xx *anx78xx)
-{
-	int err;
-
-	/*
-	 * REVISIT : It is writing to a RESERVED bits in Analog Control 0
-	 * register.
-	 */
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_CTRL0_REG,
-			   0x02);
-	if (err)
-		return err;
-
-	/*
-	 * Write DP TX output emphasis precise tune bits.
-	 */
-	err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P1],
-				SP_DP_TX_LT_CTRL0_REG,
-				dp_tx_output_precise_tune_bits,
-				ARRAY_SIZE(dp_tx_output_precise_tune_bits));
-
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static int anx78xx_xtal_clk_sel(struct anx78xx *anx78xx)
-{
-	unsigned int value;
-	int err;
-
-	err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P2],
-				 SP_ANALOG_DEBUG2_REG,
-				 SP_XTAL_FRQ | SP_FORCE_SW_OFF_BYPASS,
-				 SP_XTAL_FRQ_27M);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL3_REG,
-			   XTAL_CLK & SP_WAIT_COUNTER_7_0_MASK);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL4_REG,
-			   ((XTAL_CLK & 0xff00) >> 2) | (XTAL_CLK / 10));
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
-			   SP_I2C_GEN_10US_TIMER0_REG, XTAL_CLK & 0xff);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
-			   SP_I2C_GEN_10US_TIMER1_REG,
-			   (XTAL_CLK & 0xff00) >> 8);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_MISC_CTRL_REG,
-			   XTAL_CLK / 10 - 1);
-	if (err)
-		return err;
-
-	err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
-			  SP_HDMI_US_TIMER_CTRL_REG,
-			  &value);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0],
-			   SP_HDMI_US_TIMER_CTRL_REG,
-			   (value & SP_MS_TIMER_MARGIN_10_8_MASK) |
-			   ((((XTAL_CLK / 10) >> 1) - 2) << 3));
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static const struct reg_sequence otp_key_protect[] = {
-	{ SP_OTP_KEY_PROTECT1_REG, SP_OTP_PSW1 },
-	{ SP_OTP_KEY_PROTECT2_REG, SP_OTP_PSW2 },
-	{ SP_OTP_KEY_PROTECT3_REG, SP_OTP_PSW3 },
-};
-
-static int anx78xx_tx_initialization(struct anx78xx *anx78xx)
-{
-	int err;
-
-	/* Set terminal resistor to 50 ohm */
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
-			   0x30);
-	if (err)
-		return err;
-
-	/* Enable aux double diff output */
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_DP_AUX_CH_CTRL2_REG, 0x08);
-	if (err)
-		return err;
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
-				 SP_DP_HDCP_CTRL_REG, SP_AUTO_EN |
-				 SP_AUTO_START);
-	if (err)
-		return err;
-
-	err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_TX_P0],
-				     otp_key_protect,
-				     ARRAY_SIZE(otp_key_protect));
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_HDCP_KEY_COMMAND_REG, SP_DISABLE_SYNC_HDCP);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL8_REG,
-			   SP_VID_VRES_TH);
-	if (err)
-		return err;
-
-	/*
-	 * DP HDCP auto authentication wait timer (when downstream starts to
-	 * auth, DP side will wait for this period then do auth automatically)
-	 */
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_AUTO_TIMER_REG,
-			   0x00);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_DP_HDCP_CTRL_REG, SP_LINK_POLLING);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_DP_LINK_DEBUG_CTRL_REG, SP_M_VID_DEBUG);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2],
-			       SP_ANALOG_DEBUG2_REG, SP_POWERON_TIME_1P5MS);
-	if (err)
-		return err;
-
-	err = anx78xx_xtal_clk_sel(anx78xx);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_DEFER_CTRL_REG,
-			   SP_DEFER_CTRL_EN | 0x0c);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_DP_POLLING_CTRL_REG,
-			       SP_AUTO_POLLING_DISABLE);
-	if (err)
-		return err;
-
-	/*
-	 * Short the link integrity check timer to speed up bstatus
-	 * polling for HDCP CTS item 1A-07
-	 */
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
-			   SP_HDCP_LINK_CHECK_TIMER_REG, 0x1d);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_DP_MISC_CTRL_REG, SP_EQ_TRAINING_LOOP);
-	if (err)
-		return err;
-
-	/* Power down the main link by default */
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
-	if (err)
-		return err;
-
-	err = anx78xx_link_phy_initialization(anx78xx);
-	if (err)
-		return err;
-
-	/* Gen m_clk with downspreading */
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_DP_M_CALCULATION_CTRL_REG, SP_M_GEN_CLK_SEL);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static int anx78xx_enable_interrupts(struct anx78xx *anx78xx)
-{
-	int err;
-
-	/*
-	 * BIT0: INT pin assertion polarity: 1 = assert high
-	 * BIT1: INT pin output type: 0 = push/pull
-	 */
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_INT_CTRL_REG, 0x01);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
-			   SP_COMMON_INT_MASK4_REG, SP_HPD_LOST | SP_HPD_PLUG);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_MASK1_REG,
-			   SP_TRAINING_FINISH);
-	if (err)
-		return err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_MASK1_REG,
-			   SP_CKDT_CHG | SP_SCDT_CHG);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static void anx78xx_poweron(struct anx78xx *anx78xx)
-{
-	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
-	int err;
-
-	if (WARN_ON(anx78xx->powered))
-		return;
-
-	if (pdata->dvdd10) {
-		err = regulator_enable(pdata->dvdd10);
-		if (err) {
-			DRM_ERROR("Failed to enable DVDD10 regulator: %d\n",
-				  err);
-			return;
-		}
-
-		usleep_range(1000, 2000);
-	}
-
-	gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
-	usleep_range(1000, 2000);
-
-	gpiod_set_value_cansleep(pdata->gpiod_pd, 0);
-	usleep_range(1000, 2000);
-
-	gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
-
-	/* Power on registers module */
-	anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
-			 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
-	anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
-			   SP_REGISTER_PD | SP_TOTAL_PD);
-
-	anx78xx->powered = true;
-}
-
-static void anx78xx_poweroff(struct anx78xx *anx78xx)
-{
-	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
-	int err;
-
-	if (WARN_ON(!anx78xx->powered))
-		return;
-
-	gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
-	usleep_range(1000, 2000);
-
-	gpiod_set_value_cansleep(pdata->gpiod_pd, 1);
-	usleep_range(1000, 2000);
-
-	if (pdata->dvdd10) {
-		err = regulator_disable(pdata->dvdd10);
-		if (err) {
-			DRM_ERROR("Failed to disable DVDD10 regulator: %d\n",
-				  err);
-			return;
-		}
-
-		usleep_range(1000, 2000);
-	}
-
-	anx78xx->powered = false;
-}
-
-static int anx78xx_start(struct anx78xx *anx78xx)
-{
-	int err;
-
-	/* Power on all modules */
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
-				 SP_POWERDOWN_CTRL_REG,
-				 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD |
-				 SP_LINK_PD);
-
-	err = anx78xx_enable_interrupts(anx78xx);
-	if (err) {
-		DRM_ERROR("Failed to enable interrupts: %d\n", err);
-		goto err_poweroff;
-	}
-
-	err = anx78xx_rx_initialization(anx78xx);
-	if (err) {
-		DRM_ERROR("Failed receiver initialization: %d\n", err);
-		goto err_poweroff;
-	}
-
-	err = anx78xx_tx_initialization(anx78xx);
-	if (err) {
-		DRM_ERROR("Failed transmitter initialization: %d\n", err);
-		goto err_poweroff;
-	}
-
-	/*
-	 * This delay seems to help keep the hardware in a good state. Without
-	 * it, there are times where it fails silently.
-	 */
-	usleep_range(10000, 15000);
-
-	return 0;
-
-err_poweroff:
-	DRM_ERROR("Failed SlimPort transmitter initialization: %d\n", err);
-	anx78xx_poweroff(anx78xx);
-
-	return err;
-}
-
-static int anx78xx_init_pdata(struct anx78xx *anx78xx)
-{
-	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
-	struct device *dev = &anx78xx->client->dev;
-
-	/* 1.0V digital core power regulator  */
-	pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
-	if (IS_ERR(pdata->dvdd10)) {
-		if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
-			DRM_ERROR("DVDD10 regulator not found\n");
-
-		return PTR_ERR(pdata->dvdd10);
-	}
-
-	/* GPIO for HPD */
-	pdata->gpiod_hpd = devm_gpiod_get(dev, "hpd", GPIOD_IN);
-	if (IS_ERR(pdata->gpiod_hpd))
-		return PTR_ERR(pdata->gpiod_hpd);
-
-	/* GPIO for chip power down */
-	pdata->gpiod_pd = devm_gpiod_get(dev, "pd", GPIOD_OUT_HIGH);
-	if (IS_ERR(pdata->gpiod_pd))
-		return PTR_ERR(pdata->gpiod_pd);
-
-	/* GPIO for chip reset */
-	pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
-
-	return PTR_ERR_OR_ZERO(pdata->gpiod_reset);
-}
-
-static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
-{
-	u8 dp_bw, dpcd[2];
-	int err;
-
-	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
-			   0x0);
-	if (err)
-		return err;
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
-				 SP_POWERDOWN_CTRL_REG,
-				 SP_TOTAL_PD);
-	if (err)
-		return err;
-
-	err = drm_dp_dpcd_readb(&anx78xx->aux, DP_MAX_LINK_RATE, &dp_bw);
-	if (err < 0)
-		return err;
-
-	switch (dp_bw) {
-	case DP_LINK_BW_1_62:
-	case DP_LINK_BW_2_7:
-	case DP_LINK_BW_5_4:
-		break;
-
-	default:
-		DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw);
-		return -EINVAL;
-	}
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
-			       SP_VIDEO_MUTE);
-	if (err)
-		return err;
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
-				 SP_VID_CTRL1_REG, SP_VIDEO_EN);
-	if (err)
-		return err;
-
-	/* Get DPCD info */
-	err = drm_dp_dpcd_read(&anx78xx->aux, DP_DPCD_REV,
-			       &anx78xx->dpcd, DP_RECEIVER_CAP_SIZE);
-	if (err < 0) {
-		DRM_ERROR("Failed to read DPCD: %d\n", err);
-		return err;
-	}
-
-	/* Clear channel x SERDES power down */
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
-				 SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
-	if (err)
-		return err;
-
-	/*
-	 * Power up the sink (DP_SET_POWER register is only available on DPCD
-	 * v1.1 and later).
-	 */
-	if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
-		err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
-		if (err < 0) {
-			DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
-				  err);
-			return err;
-		}
-
-		dpcd[0] &= ~DP_SET_POWER_MASK;
-		dpcd[0] |= DP_SET_POWER_D0;
-
-		err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
-		if (err < 0) {
-			DRM_ERROR("Failed to power up DisplayPort link: %d\n",
-				  err);
-			return err;
-		}
-
-		/*
-		 * According to the DP 1.1 specification, a "Sink Device must
-		 * exit the power saving state within 1 ms" (Section 2.5.3.1,
-		 * Table 5-52, "Sink Control Field" (register 0x600).
-		 */
-		usleep_range(1000, 2000);
-	}
-
-	/* Possibly enable downspread on the sink */
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
-			   SP_DP_DOWNSPREAD_CTRL1_REG, 0);
-	if (err)
-		return err;
-
-	if (anx78xx->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) {
-		DRM_DEBUG("Enable downspread on the sink\n");
-		/* 4000PPM */
-		err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
-				   SP_DP_DOWNSPREAD_CTRL1_REG, 8);
-		if (err)
-			return err;
-
-		err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL,
-					 DP_SPREAD_AMP_0_5);
-		if (err < 0)
-			return err;
-	} else {
-		err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL, 0);
-		if (err < 0)
-			return err;
-	}
-
-	/* Set the lane count and the link rate on the sink */
-	if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
-		err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-				       SP_DP_SYSTEM_CTRL_BASE + 4,
-				       SP_ENHANCED_MODE);
-	else
-		err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
-					 SP_DP_SYSTEM_CTRL_BASE + 4,
-					 SP_ENHANCED_MODE);
-	if (err)
-		return err;
-
-	dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
-	dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
-			   SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
-	if (err)
-		return err;
-
-	dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd);
-
-	if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
-		dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
-	err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd,
-				sizeof(dpcd));
-	if (err < 0) {
-		DRM_ERROR("Failed to configure link: %d\n", err);
-		return err;
-	}
-
-	/* Start training on the source */
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_LT_CTRL_REG,
-			   SP_LT_EN);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static int anx78xx_config_dp_output(struct anx78xx *anx78xx)
-{
-	int err;
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
-				 SP_VIDEO_MUTE);
-	if (err)
-		return err;
-
-	/* Enable DP output */
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
-			       SP_VIDEO_EN);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static int anx78xx_send_video_infoframe(struct anx78xx *anx78xx,
-					struct hdmi_avi_infoframe *frame)
-{
-	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
-	int err;
-
-	err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer));
-	if (err < 0) {
-		DRM_ERROR("Failed to pack AVI infoframe: %d\n", err);
-		return err;
-	}
-
-	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
-				 SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN);
-	if (err)
-		return err;
-
-	err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P2],
-				SP_INFOFRAME_AVI_DB1_REG, buffer,
-				frame->length);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_UD);
-	if (err)
-		return err;
-
-	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
-			       SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static int anx78xx_get_downstream_info(struct anx78xx *anx78xx)
-{
-	u8 value;
-	int err;
-
-	err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SINK_COUNT, &value);
-	if (err < 0) {
-		DRM_ERROR("Get sink count failed %d\n", err);
-		return err;
-	}
-
-	if (!DP_GET_SINK_COUNT(value)) {
-		DRM_ERROR("Downstream disconnected\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static int anx78xx_get_modes(struct drm_connector *connector)
-{
-	struct anx78xx *anx78xx = connector_to_anx78xx(connector);
-	int err, num_modes = 0;
-
-	if (WARN_ON(!anx78xx->powered))
-		return 0;
-
-	if (anx78xx->edid)
-		return drm_add_edid_modes(connector, anx78xx->edid);
-
-	mutex_lock(&anx78xx->lock);
-
-	err = anx78xx_get_downstream_info(anx78xx);
-	if (err) {
-		DRM_ERROR("Failed to get downstream info: %d\n", err);
-		goto unlock;
-	}
-
-	anx78xx->edid = drm_get_edid(connector, &anx78xx->aux.ddc);
-	if (!anx78xx->edid) {
-		DRM_ERROR("Failed to read EDID\n");
-		goto unlock;
-	}
-
-	err = drm_connector_update_edid_property(connector,
-						 anx78xx->edid);
-	if (err) {
-		DRM_ERROR("Failed to update EDID property: %d\n", err);
-		goto unlock;
-	}
-
-	num_modes = drm_add_edid_modes(connector, anx78xx->edid);
-
-unlock:
-	mutex_unlock(&anx78xx->lock);
-
-	return num_modes;
-}
-
-static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
-	.get_modes = anx78xx_get_modes,
-};
-
-static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
-						bool force)
-{
-	struct anx78xx *anx78xx = connector_to_anx78xx(connector);
-
-	if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
-		return connector_status_disconnected;
-
-	return connector_status_connected;
-}
-
-static const struct drm_connector_funcs anx78xx_connector_funcs = {
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.detect = anx78xx_detect,
-	.destroy = drm_connector_cleanup,
-	.reset = drm_atomic_helper_connector_reset,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int anx78xx_bridge_attach(struct drm_bridge *bridge)
-{
-	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
-	int err;
-
-	if (!bridge->encoder) {
-		DRM_ERROR("Parent encoder object not found");
-		return -ENODEV;
-	}
-
-	/* Register aux channel */
-	anx78xx->aux.name = "DP-AUX";
-	anx78xx->aux.dev = &anx78xx->client->dev;
-	anx78xx->aux.transfer = anx78xx_aux_transfer;
-
-	err = drm_dp_aux_register(&anx78xx->aux);
-	if (err < 0) {
-		DRM_ERROR("Failed to register aux channel: %d\n", err);
-		return err;
-	}
-
-	err = drm_connector_init(bridge->dev, &anx78xx->connector,
-				 &anx78xx_connector_funcs,
-				 DRM_MODE_CONNECTOR_DisplayPort);
-	if (err) {
-		DRM_ERROR("Failed to initialize connector: %d\n", err);
-		return err;
-	}
-
-	drm_connector_helper_add(&anx78xx->connector,
-				 &anx78xx_connector_helper_funcs);
-
-	err = drm_connector_register(&anx78xx->connector);
-	if (err) {
-		DRM_ERROR("Failed to register connector: %d\n", err);
-		return err;
-	}
-
-	anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
-	err = drm_connector_attach_encoder(&anx78xx->connector,
-					   bridge->encoder);
-	if (err) {
-		DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
-		return err;
-	}
-
-	return 0;
-}
-
-static enum drm_mode_status
-anx78xx_bridge_mode_valid(struct drm_bridge *bridge,
-			  const struct drm_display_mode *mode)
-{
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		return MODE_NO_INTERLACE;
-
-	/* Max 1200p at 5.4 Ghz, one lane */
-	if (mode->clock > 154000)
-		return MODE_CLOCK_HIGH;
-
-	return MODE_OK;
-}
-
-static void anx78xx_bridge_disable(struct drm_bridge *bridge)
-{
-	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
-
-	/* Power off all modules except configuration registers access */
-	anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
-			 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
-}
-
-static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
-				const struct drm_display_mode *mode,
-				const struct drm_display_mode *adjusted_mode)
-{
-	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
-	struct hdmi_avi_infoframe frame;
-	int err;
-
-	if (WARN_ON(!anx78xx->powered))
-		return;
-
-	mutex_lock(&anx78xx->lock);
-
-	err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
-						       &anx78xx->connector,
-						       adjusted_mode);
-	if (err) {
-		DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
-		goto unlock;
-	}
-
-	err = anx78xx_send_video_infoframe(anx78xx, &frame);
-	if (err)
-		DRM_ERROR("Failed to send AVI infoframe: %d\n", err);
-
-unlock:
-	mutex_unlock(&anx78xx->lock);
-}
-
-static void anx78xx_bridge_enable(struct drm_bridge *bridge)
-{
-	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
-	int err;
-
-	err = anx78xx_start(anx78xx);
-	if (err) {
-		DRM_ERROR("Failed to initialize: %d\n", err);
-		return;
-	}
-
-	err = anx78xx_set_hpd(anx78xx);
-	if (err)
-		DRM_ERROR("Failed to set HPD: %d\n", err);
-}
-
-static const struct drm_bridge_funcs anx78xx_bridge_funcs = {
-	.attach = anx78xx_bridge_attach,
-	.mode_valid = anx78xx_bridge_mode_valid,
-	.disable = anx78xx_bridge_disable,
-	.mode_set = anx78xx_bridge_mode_set,
-	.enable = anx78xx_bridge_enable,
-};
-
-static irqreturn_t anx78xx_hpd_threaded_handler(int irq, void *data)
-{
-	struct anx78xx *anx78xx = data;
-	int err;
-
-	if (anx78xx->powered)
-		return IRQ_HANDLED;
-
-	mutex_lock(&anx78xx->lock);
-
-	/* Cable is pulled, power on the chip */
-	anx78xx_poweron(anx78xx);
-
-	err = anx78xx_enable_interrupts(anx78xx);
-	if (err)
-		DRM_ERROR("Failed to enable interrupts: %d\n", err);
-
-	mutex_unlock(&anx78xx->lock);
-
-	return IRQ_HANDLED;
-}
-
-static int anx78xx_handle_dp_int_1(struct anx78xx *anx78xx, u8 irq)
-{
-	int err;
-
-	DRM_DEBUG_KMS("Handle DP interrupt 1: %02x\n", irq);
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
-			   irq);
-	if (err)
-		return err;
-
-	if (irq & SP_TRAINING_FINISH) {
-		DRM_DEBUG_KMS("IRQ: hardware link training finished\n");
-		err = anx78xx_config_dp_output(anx78xx);
-	}
-
-	return err;
-}
-
-static bool anx78xx_handle_common_int_4(struct anx78xx *anx78xx, u8 irq)
-{
-	bool event = false;
-	int err;
-
-	DRM_DEBUG_KMS("Handle common interrupt 4: %02x\n", irq);
-
-	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
-			   SP_COMMON_INT_STATUS4_REG, irq);
-	if (err) {
-		DRM_ERROR("Failed to write SP_COMMON_INT_STATUS4 %d\n", err);
-		return event;
-	}
-
-	if (irq & SP_HPD_LOST) {
-		DRM_DEBUG_KMS("IRQ: Hot plug detect - cable is pulled out\n");
-		event = true;
-		anx78xx_poweroff(anx78xx);
-		/* Free cached EDID */
-		kfree(anx78xx->edid);
-		anx78xx->edid = NULL;
-	} else if (irq & SP_HPD_PLUG) {
-		DRM_DEBUG_KMS("IRQ: Hot plug detect - cable plug\n");
-		event = true;
-	}
-
-	return event;
-}
-
-static void anx78xx_handle_hdmi_int_1(struct anx78xx *anx78xx, u8 irq)
-{
-	unsigned int value;
-	int err;
-
-	DRM_DEBUG_KMS("Handle HDMI interrupt 1: %02x\n", irq);
-
-	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
-			   irq);
-	if (err) {
-		DRM_ERROR("Write HDMI int 1 failed: %d\n", err);
-		return;
-	}
-
-	if ((irq & SP_CKDT_CHG) || (irq & SP_SCDT_CHG)) {
-		DRM_DEBUG_KMS("IRQ: HDMI input detected\n");
-
-		err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
-				  SP_SYSTEM_STATUS_REG, &value);
-		if (err) {
-			DRM_ERROR("Read system status reg failed: %d\n", err);
-			return;
-		}
-
-		if (!(value & SP_TMDS_CLOCK_DET)) {
-			DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI clock ***\n");
-			return;
-		}
-
-		if (!(value & SP_TMDS_DE_DET)) {
-			DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI signal ***\n");
-			return;
-		}
-
-		err = anx78xx_dp_link_training(anx78xx);
-		if (err)
-			DRM_ERROR("Failed to start link training: %d\n", err);
-	}
-}
-
-static irqreturn_t anx78xx_intp_threaded_handler(int unused, void *data)
-{
-	struct anx78xx *anx78xx = data;
-	bool event = false;
-	unsigned int irq;
-	int err;
-
-	mutex_lock(&anx78xx->lock);
-
-	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
-			  &irq);
-	if (err) {
-		DRM_ERROR("Failed to read DP interrupt 1 status: %d\n", err);
-		goto unlock;
-	}
-
-	if (irq)
-		anx78xx_handle_dp_int_1(anx78xx, irq);
-
-	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2],
-			  SP_COMMON_INT_STATUS4_REG, &irq);
-	if (err) {
-		DRM_ERROR("Failed to read common interrupt 4 status: %d\n",
-			  err);
-		goto unlock;
-	}
-
-	if (irq)
-		event = anx78xx_handle_common_int_4(anx78xx, irq);
-
-	/* Make sure we are still powered after handle HPD events */
-	if (!anx78xx->powered)
-		goto unlock;
-
-	err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
-			  &irq);
-	if (err) {
-		DRM_ERROR("Failed to read HDMI int 1 status: %d\n", err);
-		goto unlock;
-	}
-
-	if (irq)
-		anx78xx_handle_hdmi_int_1(anx78xx, irq);
-
-unlock:
-	mutex_unlock(&anx78xx->lock);
-
-	if (event)
-		drm_helper_hpd_irq_event(anx78xx->connector.dev);
-
-	return IRQ_HANDLED;
-}
-
-static void unregister_i2c_dummy_clients(struct anx78xx *anx78xx)
-{
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(anx78xx->i2c_dummy); i++)
-		i2c_unregister_device(anx78xx->i2c_dummy[i]);
-}
-
-static const struct regmap_config anx78xx_regmap_config = {
-	.reg_bits = 8,
-	.val_bits = 8,
-};
-
-static const u16 anx78xx_chipid_list[] = {
-	0x7808,
-	0x7812,
-	0x7814,
-	0x7818,
-};
-
-static int anx78xx_i2c_probe(struct i2c_client *client,
-			     const struct i2c_device_id *id)
-{
-	struct anx78xx *anx78xx;
-	struct anx78xx_platform_data *pdata;
-	unsigned int i, idl, idh, version;
-	const u8 *i2c_addresses;
-	bool found = false;
-	int err;
-
-	anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
-	if (!anx78xx)
-		return -ENOMEM;
-
-	pdata = &anx78xx->pdata;
-
-	mutex_init(&anx78xx->lock);
-
-#if IS_ENABLED(CONFIG_OF)
-	anx78xx->bridge.of_node = client->dev.of_node;
-#endif
-
-	anx78xx->client = client;
-	i2c_set_clientdata(client, anx78xx);
-
-	err = anx78xx_init_pdata(anx78xx);
-	if (err) {
-		if (err != -EPROBE_DEFER)
-			DRM_ERROR("Failed to initialize pdata: %d\n", err);
-
-		return err;
-	}
-
-	pdata->hpd_irq = gpiod_to_irq(pdata->gpiod_hpd);
-	if (pdata->hpd_irq < 0) {
-		DRM_ERROR("Failed to get HPD IRQ: %d\n", pdata->hpd_irq);
-		return -ENODEV;
-	}
-
-	pdata->intp_irq = client->irq;
-	if (!pdata->intp_irq) {
-		DRM_ERROR("Failed to get CABLE_DET and INTP IRQ\n");
-		return -ENODEV;
-	}
-
-	/* Map slave addresses of ANX7814 */
-	i2c_addresses = device_get_match_data(&client->dev);
-	for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
-		struct i2c_client *i2c_dummy;
-
-		i2c_dummy = i2c_new_dummy_device(client->adapter,
-						 i2c_addresses[i] >> 1);
-		if (IS_ERR(i2c_dummy)) {
-			err = PTR_ERR(i2c_dummy);
-			DRM_ERROR("Failed to reserve I2C bus %02x: %d\n",
-				  i2c_addresses[i], err);
-			goto err_unregister_i2c;
-		}
-
-		anx78xx->i2c_dummy[i] = i2c_dummy;
-		anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
-						       &anx78xx_regmap_config);
-		if (IS_ERR(anx78xx->map[i])) {
-			err = PTR_ERR(anx78xx->map[i]);
-			DRM_ERROR("Failed regmap initialization %02x\n",
-				  i2c_addresses[i]);
-			goto err_unregister_i2c;
-		}
-	}
-
-	/* Look for supported chip ID */
-	anx78xx_poweron(anx78xx);
-
-	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDL_REG,
-			  &idl);
-	if (err)
-		goto err_poweroff;
-
-	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDH_REG,
-			  &idh);
-	if (err)
-		goto err_poweroff;
-
-	anx78xx->chipid = (u8)idl | ((u8)idh << 8);
-
-	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_VERSION_REG,
-			  &version);
-	if (err)
-		goto err_poweroff;
-
-	for (i = 0; i < ARRAY_SIZE(anx78xx_chipid_list); i++) {
-		if (anx78xx->chipid == anx78xx_chipid_list[i]) {
-			DRM_INFO("Found ANX%x (ver. %d) SlimPort Transmitter\n",
-				 anx78xx->chipid, version);
-			found = true;
-			break;
-		}
-	}
-
-	if (!found) {
-		DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n",
-			  anx78xx->chipid, version);
-		err = -ENODEV;
-		goto err_poweroff;
-	}
-
-	err = devm_request_threaded_irq(&client->dev, pdata->hpd_irq, NULL,
-					anx78xx_hpd_threaded_handler,
-					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-					"anx78xx-hpd", anx78xx);
-	if (err) {
-		DRM_ERROR("Failed to request CABLE_DET threaded IRQ: %d\n",
-			  err);
-		goto err_poweroff;
-	}
-
-	err = devm_request_threaded_irq(&client->dev, pdata->intp_irq, NULL,
-					anx78xx_intp_threaded_handler,
-					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-					"anx78xx-intp", anx78xx);
-	if (err) {
-		DRM_ERROR("Failed to request INTP threaded IRQ: %d\n", err);
-		goto err_poweroff;
-	}
-
-	anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
-
-	drm_bridge_add(&anx78xx->bridge);
-
-	/* If cable is pulled out, just poweroff and wait for HPD event */
-	if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
-		anx78xx_poweroff(anx78xx);
-
-	return 0;
-
-err_poweroff:
-	anx78xx_poweroff(anx78xx);
-
-err_unregister_i2c:
-	unregister_i2c_dummy_clients(anx78xx);
-	return err;
-}
-
-static int anx78xx_i2c_remove(struct i2c_client *client)
-{
-	struct anx78xx *anx78xx = i2c_get_clientdata(client);
-
-	drm_bridge_remove(&anx78xx->bridge);
-
-	unregister_i2c_dummy_clients(anx78xx);
-
-	kfree(anx78xx->edid);
-
-	return 0;
-}
-
-static const struct i2c_device_id anx78xx_id[] = {
-	{ "anx7814", 0 },
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(i2c, anx78xx_id);
-
-#if IS_ENABLED(CONFIG_OF)
-static const struct of_device_id anx78xx_match_table[] = {
-	{ .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
-	{ .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
-	{ .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
-	{ .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
-	{ /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, anx78xx_match_table);
-#endif
-
-static struct i2c_driver anx78xx_driver = {
-	.driver = {
-		   .name = "anx7814",
-		   .of_match_table = of_match_ptr(anx78xx_match_table),
-		  },
-	.probe = anx78xx_i2c_probe,
-	.remove = anx78xx_i2c_remove,
-	.id_table = anx78xx_id,
-};
-module_i2c_driver(anx78xx_driver);
-
-MODULE_DESCRIPTION("ANX78xx SlimPort Transmitter driver");
-MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
deleted file mode 100644
index 55d6c21..0000000
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.h
+++ /dev/null
@@ -1,703 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
- */
-
-#ifndef __ANX78xx_H
-#define __ANX78xx_H
-
-/***************************************************************/
-/* Register definitions for RX_PO                              */
-/***************************************************************/
-
-/*
- * System Control and Status
- */
-
-/* Software Reset Register 1 */
-#define SP_SOFTWARE_RESET1_REG		0x11
-#define SP_VIDEO_RST			BIT(4)
-#define SP_HDCP_MAN_RST			BIT(2)
-#define SP_TMDS_RST			BIT(1)
-#define SP_SW_MAN_RST			BIT(0)
-
-/* System Status Register */
-#define SP_SYSTEM_STATUS_REG		0x14
-#define SP_TMDS_CLOCK_DET		BIT(1)
-#define SP_TMDS_DE_DET			BIT(0)
-
-/* HDMI Status Register */
-#define SP_HDMI_STATUS_REG		0x15
-#define SP_HDMI_AUD_LAYOUT		BIT(3)
-#define SP_HDMI_DET			BIT(0)
-#  define SP_DVI_MODE			0
-#  define SP_HDMI_MODE			1
-
-/* HDMI Mute Control Register */
-#define SP_HDMI_MUTE_CTRL_REG		0x16
-#define SP_AUD_MUTE			BIT(1)
-#define SP_VID_MUTE			BIT(0)
-
-/* System Power Down Register 1 */
-#define SP_SYSTEM_POWER_DOWN1_REG	0x18
-#define SP_PWDN_CTRL			BIT(0)
-
-/*
- * Audio and Video Auto Control
- */
-
-/* Auto Audio and Video Control register */
-#define SP_AUDVID_CTRL_REG		0x20
-#define SP_AVC_OE			BIT(7)
-#define SP_AAC_OE			BIT(6)
-#define SP_AVC_EN			BIT(1)
-#define SP_AAC_EN			BIT(0)
-
-/* Audio Exception Enable Registers */
-#define SP_AUD_EXCEPTION_ENABLE_BASE	(0x24 - 1)
-/* Bits for Audio Exception Enable Register 3 */
-#define SP_AEC_EN21			BIT(5)
-
-/*
- * Interrupt
- */
-
-/* Interrupt Status Register 1 */
-#define SP_INT_STATUS1_REG		0x31
-/* Bits for Interrupt Status Register 1 */
-#define SP_HDMI_DVI			BIT(7)
-#define SP_CKDT_CHG			BIT(6)
-#define SP_SCDT_CHG			BIT(5)
-#define SP_PCLK_CHG			BIT(4)
-#define SP_PLL_UNLOCK			BIT(3)
-#define SP_CABLE_PLUG_CHG		BIT(2)
-#define SP_SET_MUTE			BIT(1)
-#define SP_SW_INTR			BIT(0)
-/* Bits for Interrupt Status Register 2 */
-#define SP_HDCP_ERR			BIT(5)
-#define SP_AUDIO_SAMPLE_CHG		BIT(0)	/* undocumented */
-/* Bits for Interrupt Status Register 3 */
-#define SP_AUD_MODE_CHG			BIT(0)
-/* Bits for Interrupt Status Register 5 */
-#define SP_AUDIO_RCV			BIT(0)
-/* Bits for Interrupt Status Register 6 */
-#define SP_INT_STATUS6_REG		0x36
-#define SP_CTS_RCV			BIT(7)
-#define SP_NEW_AUD_PKT			BIT(4)
-#define SP_NEW_AVI_PKT			BIT(1)
-#define SP_NEW_CP_PKT			BIT(0)
-/* Bits for Interrupt Status Register 7 */
-#define SP_NO_VSI			BIT(7)
-#define SP_NEW_VS			BIT(4)
-
-/* Interrupt Mask 1 Status Registers */
-#define SP_INT_MASK1_REG		0x41
-
-/* HDMI US TIMER Control Register */
-#define SP_HDMI_US_TIMER_CTRL_REG	0x49
-#define SP_MS_TIMER_MARGIN_10_8_MASK	0x07
-
-/*
- * TMDS Control
- */
-
-/* TMDS Control Registers */
-#define SP_TMDS_CTRL_BASE		(0x50 - 1)
-/* Bits for TMDS Control Register 7 */
-#define SP_PD_RT			BIT(0)
-
-/*
- * Video Control
- */
-
-/* Video Status Register */
-#define SP_VIDEO_STATUS_REG		0x70
-#define SP_COLOR_DEPTH_MASK		0xf0
-#define SP_COLOR_DEPTH_SHIFT		4
-#  define SP_COLOR_DEPTH_MODE_LEGACY	0x00
-#  define SP_COLOR_DEPTH_MODE_24BIT	0x04
-#  define SP_COLOR_DEPTH_MODE_30BIT	0x05
-#  define SP_COLOR_DEPTH_MODE_36BIT	0x06
-#  define SP_COLOR_DEPTH_MODE_48BIT	0x07
-
-/* Video Data Range Control Register */
-#define SP_VID_DATA_RANGE_CTRL_REG	0x83
-#define SP_R2Y_INPUT_LIMIT		BIT(1)
-
-/* Pixel Clock High Resolution Counter Registers */
-#define SP_PCLK_HIGHRES_CNT_BASE	(0x8c - 1)
-
-/*
- * Audio Control
- */
-
-/* Number of Audio Channels Status Registers */
-#define SP_AUD_CH_STATUS_REG_NUM	6
-
-/* Audio IN S/PDIF Channel Status Registers */
-#define SP_AUD_SPDIF_CH_STATUS_BASE	0xc7
-
-/* Audio IN S/PDIF Channel Status Register 4 */
-#define SP_FS_FREQ_MASK			0x0f
-#  define SP_FS_FREQ_44100HZ		0x00
-#  define SP_FS_FREQ_48000HZ		0x02
-#  define SP_FS_FREQ_32000HZ		0x03
-#  define SP_FS_FREQ_88200HZ		0x08
-#  define SP_FS_FREQ_96000HZ		0x0a
-#  define SP_FS_FREQ_176400HZ		0x0c
-#  define SP_FS_FREQ_192000HZ		0x0e
-
-/*
- * Micellaneous Control Block
- */
-
-/* CHIP Control Register */
-#define SP_CHIP_CTRL_REG		0xe3
-#define SP_MAN_HDMI5V_DET		BIT(3)
-#define SP_PLLLOCK_CKDT_EN		BIT(2)
-#define SP_ANALOG_CKDT_EN		BIT(1)
-#define SP_DIGITAL_CKDT_EN		BIT(0)
-
-/* Packet Receiving Status Register */
-#define SP_PACKET_RECEIVING_STATUS_REG	0xf3
-#define SP_AVI_RCVD			BIT(5)
-#define SP_VSI_RCVD			BIT(1)
-
-/***************************************************************/
-/* Register definitions for RX_P1                              */
-/***************************************************************/
-
-/* HDCP BCAPS Shadow Register */
-#define SP_HDCP_BCAPS_SHADOW_REG	0x2a
-#define SP_BCAPS_REPEATER		BIT(5)
-
-/* HDCP Status Register */
-#define SP_RX_HDCP_STATUS_REG		0x3f
-#define SP_AUTH_EN			BIT(4)
-
-/*
- * InfoFrame and Control Packet Registers
- */
-
-/* AVI InfoFrame packet checksum */
-#define SP_AVI_INFOFRAME_CHECKSUM	0xa3
-
-/* AVI InfoFrame Registers */
-#define SP_AVI_INFOFRAME_DATA_BASE	0xa4
-
-#define SP_AVI_COLOR_F_MASK		0x60
-#define SP_AVI_COLOR_F_SHIFT		5
-
-/* Audio InfoFrame Registers */
-#define SP_AUD_INFOFRAME_DATA_BASE	0xc4
-#define SP_AUD_INFOFRAME_LAYOUT_MASK	0x0f
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet type code */
-#define SP_MPEG_VS_INFOFRAME_TYPE_REG	0xe0
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet length */
-#define SP_MPEG_VS_INFOFRAME_LEN_REG	0xe2
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet version number */
-#define SP_MPEG_VS_INFOFRAME_VER_REG	0xe1
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet content */
-#define SP_MPEG_VS_INFOFRAME_DATA_BASE	0xe4
-
-/* General Control Packet Register */
-#define SP_GENERAL_CTRL_PACKET_REG	0x9f
-#define SP_CLEAR_AVMUTE			BIT(4)
-#define SP_SET_AVMUTE			BIT(0)
-
-/***************************************************************/
-/* Register definitions for TX_P0                              */
-/***************************************************************/
-
-/* HDCP Status Register */
-#define SP_TX_HDCP_STATUS_REG		0x00
-#define SP_AUTH_FAIL			BIT(5)
-#define SP_AUTHEN_PASS			BIT(1)
-
-/* HDCP Control Register 0 */
-#define SP_HDCP_CTRL0_REG		0x01
-#define SP_RX_REPEATER			BIT(6)
-#define SP_RE_AUTH			BIT(5)
-#define SP_SW_AUTH_OK			BIT(4)
-#define SP_HARD_AUTH_EN			BIT(3)
-#define SP_HDCP_ENC_EN			BIT(2)
-#define SP_BKSV_SRM_PASS		BIT(1)
-#define SP_KSVLIST_VLD			BIT(0)
-/* HDCP Function Enabled */
-#define SP_HDCP_FUNCTION_ENABLED	(BIT(0) | BIT(1) | BIT(2) | BIT(3))
-
-/* HDCP Receiver BSTATUS Register 0 */
-#define	SP_HDCP_RX_BSTATUS0_REG		0x1b
-/* HDCP Receiver BSTATUS Register 1 */
-#define	SP_HDCP_RX_BSTATUS1_REG		0x1c
-
-/* HDCP Embedded "Blue Screen" Content Registers */
-#define SP_HDCP_VID0_BLUE_SCREEN_REG	0x2c
-#define SP_HDCP_VID1_BLUE_SCREEN_REG	0x2d
-#define SP_HDCP_VID2_BLUE_SCREEN_REG	0x2e
-
-/* HDCP Wait R0 Timing Register */
-#define SP_HDCP_WAIT_R0_TIME_REG	0x40
-
-/* HDCP Link Integrity Check Timer Register */
-#define SP_HDCP_LINK_CHECK_TIMER_REG	0x41
-
-/* HDCP Repeater Ready Wait Timer Register */
-#define SP_HDCP_RPTR_RDY_WAIT_TIME_REG	0x42
-
-/* HDCP Auto Timer Register */
-#define SP_HDCP_AUTO_TIMER_REG		0x51
-
-/* HDCP Key Status Register */
-#define SP_HDCP_KEY_STATUS_REG		0x5e
-
-/* HDCP Key Command Register */
-#define SP_HDCP_KEY_COMMAND_REG		0x5f
-#define SP_DISABLE_SYNC_HDCP		BIT(2)
-
-/* OTP Memory Key Protection Registers */
-#define SP_OTP_KEY_PROTECT1_REG		0x60
-#define SP_OTP_KEY_PROTECT2_REG		0x61
-#define SP_OTP_KEY_PROTECT3_REG		0x62
-#define SP_OTP_PSW1			0xa2
-#define SP_OTP_PSW2			0x7e
-#define SP_OTP_PSW3			0xc6
-
-/* DP System Control Registers */
-#define SP_DP_SYSTEM_CTRL_BASE		(0x80 - 1)
-/* Bits for DP System Control Register 2 */
-#define SP_CHA_STA			BIT(2)
-/* Bits for DP System Control Register 3 */
-#define SP_HPD_STATUS			BIT(6)
-#define SP_STRM_VALID			BIT(2)
-/* Bits for DP System Control Register 4 */
-#define SP_ENHANCED_MODE		BIT(3)
-
-/* DP Video Control Register */
-#define SP_DP_VIDEO_CTRL_REG		0x84
-#define SP_COLOR_F_MASK			0x06
-#define SP_COLOR_F_SHIFT		1
-#define SP_BPC_MASK			0xe0
-#define SP_BPC_SHIFT			5
-#  define SP_BPC_6BITS			0x00
-#  define SP_BPC_8BITS			0x01
-#  define SP_BPC_10BITS			0x02
-#  define SP_BPC_12BITS			0x03
-
-/* DP Audio Control Register */
-#define SP_DP_AUDIO_CTRL_REG		0x87
-#define SP_AUD_EN			BIT(0)
-
-/* 10us Pulse Generate Timer Registers */
-#define SP_I2C_GEN_10US_TIMER0_REG	0x88
-#define SP_I2C_GEN_10US_TIMER1_REG	0x89
-
-/* Packet Send Control Register */
-#define SP_PACKET_SEND_CTRL_REG		0x90
-#define SP_AUD_IF_UP			BIT(7)
-#define SP_AVI_IF_UD			BIT(6)
-#define SP_MPEG_IF_UD			BIT(5)
-#define SP_SPD_IF_UD			BIT(4)
-#define SP_AUD_IF_EN			BIT(3)
-#define SP_AVI_IF_EN			BIT(2)
-#define SP_MPEG_IF_EN			BIT(1)
-#define SP_SPD_IF_EN			BIT(0)
-
-/* DP HDCP Control Register */
-#define SP_DP_HDCP_CTRL_REG		0x92
-#define SP_AUTO_EN			BIT(7)
-#define SP_AUTO_START			BIT(5)
-#define SP_LINK_POLLING			BIT(1)
-
-/* DP Main Link Bandwidth Setting Register */
-#define SP_DP_MAIN_LINK_BW_SET_REG	0xa0
-#define SP_LINK_BW_SET_MASK		0x1f
-#define SP_INITIAL_SLIM_M_AUD_SEL	BIT(5)
-
-/* DP Training Pattern Set Register */
-#define SP_DP_TRAINING_PATTERN_SET_REG	0xa2
-
-/* DP Lane 0 Link Training Control Register */
-#define SP_DP_LANE0_LT_CTRL_REG		0xa3
-#define SP_TX_SW_SET_MASK		0x1b
-#define SP_MAX_PRE_REACH		BIT(5)
-#define SP_MAX_DRIVE_REACH		BIT(4)
-#define SP_PRE_EMP_LEVEL1		BIT(3)
-#define SP_DRVIE_CURRENT_LEVEL1		BIT(0)
-
-/* DP Link Training Control Register */
-#define SP_DP_LT_CTRL_REG		0xa8
-#define SP_LT_ERROR_TYPE_MASK		0x70
-#  define SP_LT_NO_ERROR		0x00
-#  define SP_LT_AUX_WRITE_ERROR		0x01
-#  define SP_LT_MAX_DRIVE_REACHED	0x02
-#  define SP_LT_WRONG_LANE_COUNT_SET	0x03
-#  define SP_LT_LOOP_SAME_5_TIME	0x04
-#  define SP_LT_CR_FAIL_IN_EQ		0x05
-#  define SP_LT_EQ_LOOP_5_TIME		0x06
-#define SP_LT_EN			BIT(0)
-
-/* DP CEP Training Control Registers */
-#define SP_DP_CEP_TRAINING_CTRL0_REG	0xa9
-#define SP_DP_CEP_TRAINING_CTRL1_REG	0xaa
-
-/* DP Debug Register 1 */
-#define SP_DP_DEBUG1_REG		0xb0
-#define SP_DEBUG_PLL_LOCK		BIT(4)
-#define SP_POLLING_EN			BIT(1)
-
-/* DP Polling Control Register */
-#define SP_DP_POLLING_CTRL_REG		0xb4
-#define SP_AUTO_POLLING_DISABLE		BIT(0)
-
-/* DP Link Debug Control Register */
-#define SP_DP_LINK_DEBUG_CTRL_REG	0xb8
-#define SP_M_VID_DEBUG			BIT(5)
-#define SP_NEW_PRBS7			BIT(4)
-#define SP_INSERT_ER			BIT(1)
-#define SP_PRBS31_EN			BIT(0)
-
-/* AUX Misc control Register */
-#define SP_AUX_MISC_CTRL_REG		0xbf
-
-/* DP PLL control Register */
-#define SP_DP_PLL_CTRL_REG		0xc7
-#define SP_PLL_RST			BIT(6)
-
-/* DP Analog Power Down Register */
-#define SP_DP_ANALOG_POWER_DOWN_REG	0xc8
-#define SP_CH0_PD			BIT(0)
-
-/* DP Misc Control Register */
-#define SP_DP_MISC_CTRL_REG		0xcd
-#define SP_EQ_TRAINING_LOOP		BIT(6)
-
-/* DP Extra I2C Device Address Register */
-#define SP_DP_EXTRA_I2C_DEV_ADDR_REG	0xce
-#define SP_I2C_STRETCH_DISABLE		BIT(7)
-
-#define SP_I2C_EXTRA_ADDR		0x50
-
-/* DP Downspread Control Register 1 */
-#define SP_DP_DOWNSPREAD_CTRL1_REG	0xd0
-
-/* DP M Value Calculation Control Register */
-#define SP_DP_M_CALCULATION_CTRL_REG	0xd9
-#define SP_M_GEN_CLK_SEL		BIT(0)
-
-/* AUX Channel Access Status Register */
-#define SP_AUX_CH_STATUS_REG		0xe0
-#define SP_AUX_STATUS			0x0f
-
-/* AUX Channel DEFER Control Register */
-#define SP_AUX_DEFER_CTRL_REG		0xe2
-#define SP_DEFER_CTRL_EN		BIT(7)
-
-/* DP Buffer Data Count Register */
-#define SP_BUF_DATA_COUNT_REG		0xe4
-#define SP_BUF_DATA_COUNT_MASK		0x1f
-#define SP_BUF_CLR			BIT(7)
-
-/* DP AUX Channel Control Register 1 */
-#define SP_DP_AUX_CH_CTRL1_REG		0xe5
-#define SP_AUX_TX_COMM_MASK		0x0f
-#define SP_AUX_LENGTH_MASK		0xf0
-#define SP_AUX_LENGTH_SHIFT		4
-
-/* DP AUX CH Address Register 0 */
-#define SP_AUX_ADDR_7_0_REG		0xe6
-
-/* DP AUX CH Address Register 1 */
-#define SP_AUX_ADDR_15_8_REG		0xe7
-
-/* DP AUX CH Address Register 2 */
-#define SP_AUX_ADDR_19_16_REG		0xe8
-#define SP_AUX_ADDR_19_16_MASK		0x0f
-
-/* DP AUX Channel Control Register 2 */
-#define SP_DP_AUX_CH_CTRL2_REG		0xe9
-#define SP_AUX_SEL_RXCM			BIT(6)
-#define SP_AUX_CHSEL			BIT(3)
-#define SP_AUX_PN_INV			BIT(2)
-#define SP_ADDR_ONLY			BIT(1)
-#define SP_AUX_EN			BIT(0)
-
-/* DP Video Stream Control InfoFrame Register */
-#define SP_DP_3D_VSC_CTRL_REG		0xea
-#define SP_INFO_FRAME_VSC_EN		BIT(0)
-
-/* DP Video Stream Data Byte 1 Register */
-#define SP_DP_VSC_DB1_REG		0xeb
-
-/* DP AUX Channel Control Register 3 */
-#define SP_DP_AUX_CH_CTRL3_REG		0xec
-#define SP_WAIT_COUNTER_7_0_MASK	0xff
-
-/* DP AUX Channel Control Register 4 */
-#define SP_DP_AUX_CH_CTRL4_REG		0xed
-
-/* DP AUX Buffer Data Registers */
-#define SP_DP_BUF_DATA0_REG		0xf0
-
-/***************************************************************/
-/* Register definitions for TX_P2                              */
-/***************************************************************/
-
-/*
- * Core Register Definitions
- */
-
-/* Device ID Low Byte Register */
-#define SP_DEVICE_IDL_REG		0x02
-
-/* Device ID High Byte Register */
-#define SP_DEVICE_IDH_REG		0x03
-
-/* Device version register */
-#define SP_DEVICE_VERSION_REG		0x04
-
-/* Power Down Control Register */
-#define SP_POWERDOWN_CTRL_REG		0x05
-#define SP_REGISTER_PD			BIT(7)
-#define SP_HDCP_PD			BIT(5)
-#define SP_AUDIO_PD			BIT(4)
-#define SP_VIDEO_PD			BIT(3)
-#define SP_LINK_PD			BIT(2)
-#define SP_TOTAL_PD			BIT(1)
-
-/* Reset Control Register 1 */
-#define SP_RESET_CTRL1_REG		0x06
-#define SP_MISC_RST			BIT(7)
-#define SP_VIDCAP_RST			BIT(6)
-#define SP_VIDFIF_RST			BIT(5)
-#define SP_AUDFIF_RST			BIT(4)
-#define SP_AUDCAP_RST			BIT(3)
-#define SP_HDCP_RST			BIT(2)
-#define SP_SW_RST			BIT(1)
-#define SP_HW_RST			BIT(0)
-
-/* Reset Control Register 2 */
-#define SP_RESET_CTRL2_REG		0x07
-#define SP_AUX_RST			BIT(2)
-#define SP_SERDES_FIFO_RST		BIT(1)
-#define SP_I2C_REG_RST			BIT(0)
-
-/* Video Control Register 1 */
-#define SP_VID_CTRL1_REG		0x08
-#define SP_VIDEO_EN			BIT(7)
-#define SP_VIDEO_MUTE			BIT(2)
-#define SP_DE_GEN			BIT(1)
-#define SP_DEMUX			BIT(0)
-
-/* Video Control Register 2 */
-#define SP_VID_CTRL2_REG		0x09
-#define SP_IN_COLOR_F_MASK		0x03
-#define SP_IN_YC_BIT_SEL		BIT(2)
-#define SP_IN_BPC_MASK			0x70
-#define SP_IN_BPC_SHIFT			4
-#  define SP_IN_BPC_12BIT		0x03
-#  define SP_IN_BPC_10BIT		0x02
-#  define SP_IN_BPC_8BIT		0x01
-#  define SP_IN_BPC_6BIT		0x00
-#define SP_IN_D_RANGE			BIT(7)
-
-/* Video Control Register 3 */
-#define SP_VID_CTRL3_REG		0x0a
-#define SP_HPD_OUT			BIT(6)
-
-/* Video Control Register 5 */
-#define SP_VID_CTRL5_REG		0x0c
-#define SP_CSC_STD_SEL			BIT(7)
-#define SP_XVYCC_RNG_LMT		BIT(6)
-#define SP_RANGE_Y2R			BIT(5)
-#define SP_CSPACE_Y2R			BIT(4)
-#define SP_RGB_RNG_LMT			BIT(3)
-#define SP_Y_RNG_LMT			BIT(2)
-#define SP_RANGE_R2Y			BIT(1)
-#define SP_CSPACE_R2Y			BIT(0)
-
-/* Video Control Register 6 */
-#define SP_VID_CTRL6_REG		0x0d
-#define SP_TEST_PATTERN_EN		BIT(7)
-#define SP_VIDEO_PROCESS_EN		BIT(6)
-#define SP_VID_US_MODE			BIT(3)
-#define SP_VID_DS_MODE			BIT(2)
-#define SP_UP_SAMPLE			BIT(1)
-#define SP_DOWN_SAMPLE			BIT(0)
-
-/* Video Control Register 8 */
-#define SP_VID_CTRL8_REG		0x0f
-#define SP_VID_VRES_TH			BIT(0)
-
-/* Total Line Status Low Byte Register */
-#define SP_TOTAL_LINE_STAL_REG		0x24
-
-/* Total Line Status High Byte Register */
-#define SP_TOTAL_LINE_STAH_REG		0x25
-
-/* Active Line Status Low Byte Register */
-#define SP_ACT_LINE_STAL_REG		0x26
-
-/* Active Line Status High Byte Register */
-#define SP_ACT_LINE_STAH_REG		0x27
-
-/* Vertical Front Porch Status Register */
-#define SP_V_F_PORCH_STA_REG		0x28
-
-/* Vertical SYNC Width Status Register */
-#define SP_V_SYNC_STA_REG		0x29
-
-/* Vertical Back Porch Status Register */
-#define SP_V_B_PORCH_STA_REG		0x2a
-
-/* Total Pixel Status Low Byte Register */
-#define SP_TOTAL_PIXEL_STAL_REG		0x2b
-
-/* Total Pixel Status High Byte Register */
-#define SP_TOTAL_PIXEL_STAH_REG		0x2c
-
-/* Active Pixel Status Low Byte Register */
-#define SP_ACT_PIXEL_STAL_REG		0x2d
-
-/* Active Pixel Status High Byte Register */
-#define SP_ACT_PIXEL_STAH_REG		0x2e
-
-/* Horizontal Front Porch Status Low Byte Register */
-#define SP_H_F_PORCH_STAL_REG		0x2f
-
-/* Horizontal Front Porch Statys High Byte Register */
-#define SP_H_F_PORCH_STAH_REG		0x30
-
-/* Horizontal SYNC Width Status Low Byte Register */
-#define SP_H_SYNC_STAL_REG		0x31
-
-/* Horizontal SYNC Width Status High Byte Register */
-#define SP_H_SYNC_STAH_REG		0x32
-
-/* Horizontal Back Porch Status Low Byte Register */
-#define SP_H_B_PORCH_STAL_REG		0x33
-
-/* Horizontal Back Porch Status High Byte Register */
-#define SP_H_B_PORCH_STAH_REG		0x34
-
-/* InfoFrame AVI Packet DB1 Register */
-#define SP_INFOFRAME_AVI_DB1_REG	0x70
-
-/* Bit Control Specific Register */
-#define SP_BIT_CTRL_SPECIFIC_REG	0x80
-#define SP_BIT_CTRL_SELECT_SHIFT	1
-#define SP_ENABLE_BIT_CTRL		BIT(0)
-
-/* InfoFrame Audio Packet DB1 Register */
-#define SP_INFOFRAME_AUD_DB1_REG	0x83
-
-/* InfoFrame MPEG Packet DB1 Register */
-#define SP_INFOFRAME_MPEG_DB1_REG	0xb0
-
-/* Audio Channel Status Registers */
-#define SP_AUD_CH_STATUS_BASE		0xd0
-
-/* Audio Channel Num Register 5 */
-#define SP_I2S_CHANNEL_NUM_MASK		0xe0
-#  define SP_I2S_CH_NUM_1		(0x00 << 5)
-#  define SP_I2S_CH_NUM_2		(0x01 << 5)
-#  define SP_I2S_CH_NUM_3		(0x02 << 5)
-#  define SP_I2S_CH_NUM_4		(0x03 << 5)
-#  define SP_I2S_CH_NUM_5		(0x04 << 5)
-#  define SP_I2S_CH_NUM_6		(0x05 << 5)
-#  define SP_I2S_CH_NUM_7		(0x06 << 5)
-#  define SP_I2S_CH_NUM_8		(0x07 << 5)
-#define SP_EXT_VUCP			BIT(2)
-#define SP_VBIT				BIT(1)
-#define SP_AUDIO_LAYOUT			BIT(0)
-
-/* Analog Debug Register 2 */
-#define SP_ANALOG_DEBUG2_REG		0xdd
-#define SP_FORCE_SW_OFF_BYPASS		0x20
-#define SP_XTAL_FRQ			0x1c
-#  define SP_XTAL_FRQ_19M2		(0x00 << 2)
-#  define SP_XTAL_FRQ_24M		(0x01 << 2)
-#  define SP_XTAL_FRQ_25M		(0x02 << 2)
-#  define SP_XTAL_FRQ_26M		(0x03 << 2)
-#  define SP_XTAL_FRQ_27M		(0x04 << 2)
-#  define SP_XTAL_FRQ_38M4		(0x05 << 2)
-#  define SP_XTAL_FRQ_52M		(0x06 << 2)
-#define SP_POWERON_TIME_1P5MS		0x03
-
-/* Analog Control 0 Register */
-#define SP_ANALOG_CTRL0_REG		0xe1
-
-/* Common Interrupt Status Register 1 */
-#define SP_COMMON_INT_STATUS_BASE	(0xf1 - 1)
-#define SP_PLL_LOCK_CHG			0x40
-
-/* Common Interrupt Status Register 2 */
-#define SP_COMMON_INT_STATUS2		0xf2
-#define SP_HDCP_AUTH_CHG		BIT(1)
-#define SP_HDCP_AUTH_DONE		BIT(0)
-
-#define SP_HDCP_LINK_CHECK_FAIL		BIT(0)
-
-/* Common Interrupt Status Register 4 */
-#define SP_COMMON_INT_STATUS4_REG	0xf4
-#define SP_HPD_IRQ			BIT(6)
-#define SP_HPD_ESYNC_ERR		BIT(4)
-#define SP_HPD_CHG			BIT(2)
-#define SP_HPD_LOST			BIT(1)
-#define SP_HPD_PLUG			BIT(0)
-
-/* DP Interrupt Status Register */
-#define SP_DP_INT_STATUS1_REG		0xf7
-#define SP_TRAINING_FINISH		BIT(5)
-#define SP_POLLING_ERR			BIT(4)
-
-/* Common Interrupt Mask Register */
-#define SP_COMMON_INT_MASK_BASE		(0xf8 - 1)
-
-#define SP_COMMON_INT_MASK4_REG		0xfb
-
-/* DP Interrupts Mask Register */
-#define SP_DP_INT_MASK1_REG		0xfe
-
-/* Interrupt Control Register */
-#define SP_INT_CTRL_REG			0xff
-
-/***************************************************************/
-/* Register definitions for TX_P1                              */
-/***************************************************************/
-
-/* DP TX Link Training Control Register */
-#define SP_DP_TX_LT_CTRL0_REG		0x30
-
-/* PD 1.2 Lint Training 80bit Pattern Register */
-#define SP_DP_LT_80BIT_PATTERN0_REG	0x80
-#define SP_DP_LT_80BIT_PATTERN_REG_NUM	10
-
-/* Audio Interface Control Register 0 */
-#define SP_AUD_INTERFACE_CTRL0_REG	0x5f
-#define SP_AUD_INTERFACE_DISABLE	0x80
-
-/* Audio Interface Control Register 2 */
-#define SP_AUD_INTERFACE_CTRL2_REG	0x60
-#define SP_M_AUD_ADJUST_ST		0x04
-
-/* Audio Interface Control Register 3 */
-#define SP_AUD_INTERFACE_CTRL3_REG	0x62
-
-/* Audio Interface Control Register 4 */
-#define SP_AUD_INTERFACE_CTRL4_REG	0x67
-
-/* Audio Interface Control Register 5 */
-#define SP_AUD_INTERFACE_CTRL5_REG	0x68
-
-/* Audio Interface Control Register 6 */
-#define SP_AUD_INTERFACE_CTRL6_REG	0x69
-
-/* Firmware Version Register */
-#define SP_FW_VER_REG			0xb7
-
-#endif
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index e930ff9..e1fa7d8 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -1,4 +1,27 @@
 # SPDX-License-Identifier: GPL-2.0-only
+config DRM_ANALOGIX_ANX6345
+	tristate "Analogix ANX6345 bridge"
+	depends on OF
+	select DRM_ANALOGIX_DP
+	select DRM_KMS_HELPER
+	select REGMAP_I2C
+	help
+	  ANX6345 is an ultra-low Full-HD DisplayPort/eDP
+	  transmitter designed for portable devices. The
+	  ANX6345 transforms the LVTTL RGB output of an
+	  application processor to eDP or DisplayPort.
+
+config DRM_ANALOGIX_ANX78XX
+	tristate "Analogix ANX78XX bridge"
+	select DRM_ANALOGIX_DP
+	select DRM_KMS_HELPER
+	select REGMAP_I2C
+	help
+	  ANX78XX is an ultra-low power Full-HD SlimPort transmitter
+	  designed for portable devices. The ANX78XX transforms
+	  the HDMI output of an application processor to MyDP
+	  or DisplayPort.
+
 config DRM_ANALOGIX_DP
 	tristate
 	depends on DRM
diff --git a/drivers/gpu/drm/bridge/analogix/Makefile b/drivers/gpu/drm/bridge/analogix/Makefile
index fdbf3fd..97669b3 100644
--- a/drivers/gpu/drm/bridge/analogix/Makefile
+++ b/drivers/gpu/drm/bridge/analogix/Makefile
@@ -1,3 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o
+analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o analogix-i2c-dptx.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX6345) += analogix-anx6345.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
new file mode 100644
index 0000000..56f55c5
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -0,0 +1,817 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ * Copyright(c) 2017, Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "analogix-i2c-dptx.h"
+#include "analogix-i2c-txcommon.h"
+
+#define POLL_DELAY		50000 /* us */
+#define POLL_TIMEOUT		5000000 /* us */
+
+#define I2C_IDX_DPTX		0
+#define I2C_IDX_TXCOM		1
+
+static const u8 anx6345_i2c_addresses[] = {
+	[I2C_IDX_DPTX]	= 0x70,
+	[I2C_IDX_TXCOM]	= 0x72,
+};
+#define I2C_NUM_ADDRESSES	ARRAY_SIZE(anx6345_i2c_addresses)
+
+struct anx6345 {
+	struct drm_dp_aux aux;
+	struct drm_bridge bridge;
+	struct i2c_client *client;
+	struct edid *edid;
+	struct drm_connector connector;
+	struct drm_panel *panel;
+	struct regulator *dvdd12;
+	struct regulator *dvdd25;
+	struct gpio_desc *gpiod_reset;
+	struct mutex lock;	/* protect EDID access */
+
+	/* I2C Slave addresses of ANX6345 are mapped as DPTX and SYS */
+	struct i2c_client *i2c_clients[I2C_NUM_ADDRESSES];
+	struct regmap *map[I2C_NUM_ADDRESSES];
+
+	u16 chipid;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+	bool powered;
+};
+
+static inline struct anx6345 *connector_to_anx6345(struct drm_connector *c)
+{
+	return container_of(c, struct anx6345, connector);
+}
+
+static inline struct anx6345 *bridge_to_anx6345(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct anx6345, bridge);
+}
+
+static int anx6345_set_bits(struct regmap *map, u8 reg, u8 mask)
+{
+	return regmap_update_bits(map, reg, mask, mask);
+}
+
+static int anx6345_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+	return regmap_update_bits(map, reg, mask, 0);
+}
+
+static ssize_t anx6345_aux_transfer(struct drm_dp_aux *aux,
+				    struct drm_dp_aux_msg *msg)
+{
+	struct anx6345 *anx6345 = container_of(aux, struct anx6345, aux);
+
+	return anx_dp_aux_transfer(anx6345->map[I2C_IDX_DPTX], msg);
+}
+
+static int anx6345_dp_link_training(struct anx6345 *anx6345)
+{
+	unsigned int value;
+	u8 dp_bw, dpcd[2];
+	int err;
+
+	err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+				 SP_POWERDOWN_CTRL_REG,
+				 SP_TOTAL_PD);
+	if (err)
+		return err;
+
+	err = drm_dp_dpcd_readb(&anx6345->aux, DP_MAX_LINK_RATE, &dp_bw);
+	if (err < 0)
+		return err;
+
+	switch (dp_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+		break;
+
+	default:
+		DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw);
+		return -EINVAL;
+	}
+
+	err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+			       SP_VIDEO_MUTE);
+	if (err)
+		return err;
+
+	err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+				 SP_VID_CTRL1_REG, SP_VIDEO_EN);
+	if (err)
+		return err;
+
+	/* Get DPCD info */
+	err = drm_dp_dpcd_read(&anx6345->aux, DP_DPCD_REV,
+			       &anx6345->dpcd, DP_RECEIVER_CAP_SIZE);
+	if (err < 0) {
+		DRM_ERROR("Failed to read DPCD: %d\n", err);
+		return err;
+	}
+
+	/* Clear channel x SERDES power down */
+	err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX],
+				 SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+	if (err)
+		return err;
+
+	/*
+	 * Power up the sink (DP_SET_POWER register is only available on DPCD
+	 * v1.1 and later).
+	 */
+	if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
+		err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
+		if (err < 0) {
+			DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+				  err);
+			return err;
+		}
+
+		dpcd[0] &= ~DP_SET_POWER_MASK;
+		dpcd[0] |= DP_SET_POWER_D0;
+
+		err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
+		if (err < 0) {
+			DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+				  err);
+			return err;
+		}
+
+		/*
+		 * According to the DP 1.1 specification, a "Sink Device must
+		 * exit the power saving state within 1 ms" (Section 2.5.3.1,
+		 * Table 5-52, "Sink Control Field" (register 0x600).
+		 */
+		usleep_range(1000, 2000);
+	}
+
+	/* Possibly enable downspread on the sink */
+	err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+			   SP_DP_DOWNSPREAD_CTRL1_REG, 0);
+	if (err)
+		return err;
+
+	if (anx6345->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) {
+		DRM_DEBUG("Enable downspread on the sink\n");
+		/* 4000PPM */
+		err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+				   SP_DP_DOWNSPREAD_CTRL1_REG, 8);
+		if (err)
+			return err;
+
+		err = drm_dp_dpcd_writeb(&anx6345->aux, DP_DOWNSPREAD_CTRL,
+					 DP_SPREAD_AMP_0_5);
+		if (err < 0)
+			return err;
+	} else {
+		err = drm_dp_dpcd_writeb(&anx6345->aux, DP_DOWNSPREAD_CTRL, 0);
+		if (err < 0)
+			return err;
+	}
+
+	/* Set the lane count and the link rate on the sink */
+	if (drm_dp_enhanced_frame_cap(anx6345->dpcd))
+		err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+				       SP_DP_SYSTEM_CTRL_BASE + 4,
+				       SP_ENHANCED_MODE);
+	else
+		err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX],
+					 SP_DP_SYSTEM_CTRL_BASE + 4,
+					 SP_ENHANCED_MODE);
+	if (err)
+		return err;
+
+	dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
+	dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+	err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+			   SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+	if (err)
+		return err;
+
+	dpcd[1] = drm_dp_max_lane_count(anx6345->dpcd);
+
+	err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+			   SP_DP_LANE_COUNT_SET_REG, dpcd[1]);
+	if (err)
+		return err;
+
+	if (drm_dp_enhanced_frame_cap(anx6345->dpcd))
+		dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+	err = drm_dp_dpcd_write(&anx6345->aux, DP_LINK_BW_SET, dpcd,
+				sizeof(dpcd));
+
+	if (err < 0) {
+		DRM_ERROR("Failed to configure link: %d\n", err);
+		return err;
+	}
+
+	/* Start training on the source */
+	err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_LT_CTRL_REG,
+			   SP_LT_EN);
+	if (err)
+		return err;
+
+	return regmap_read_poll_timeout(anx6345->map[I2C_IDX_DPTX],
+				       SP_DP_LT_CTRL_REG,
+				       value, !(value & SP_DP_LT_INPROGRESS),
+				       POLL_DELAY, POLL_TIMEOUT);
+}
+
+static int anx6345_tx_initialization(struct anx6345 *anx6345)
+{
+	int err, i;
+
+	/* FIXME: colordepth is hardcoded for now */
+	err = regmap_write(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL2_REG,
+			   SP_IN_BPC_6BIT << SP_IN_BPC_SHIFT);
+	if (err)
+		return err;
+
+	err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_PLL_CTRL_REG, 0);
+	if (err)
+		return err;
+
+	err = regmap_write(anx6345->map[I2C_IDX_TXCOM],
+			   SP_ANALOG_DEBUG1_REG, 0);
+	if (err)
+		return err;
+
+	err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+			   SP_DP_LINK_DEBUG_CTRL_REG,
+			   SP_NEW_PRBS7 | SP_M_VID_DEBUG);
+	if (err)
+		return err;
+
+	err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+			   SP_DP_ANALOG_POWER_DOWN_REG, 0);
+	if (err)
+		return err;
+
+	/* Force HPD */
+	err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+			       SP_DP_SYSTEM_CTRL_BASE + 3,
+			       SP_HPD_FORCE | SP_HPD_CTRL);
+	if (err)
+		return err;
+
+	for (i = 0; i < 4; i++) {
+		/* 4 lanes */
+		err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+				   SP_DP_LANE0_LT_CTRL_REG + i, 0);
+		if (err)
+			return err;
+	}
+
+	/* Reset AUX */
+	err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM],
+			       SP_RESET_CTRL2_REG, SP_AUX_RST);
+	if (err)
+		return err;
+
+	return anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+				 SP_RESET_CTRL2_REG, SP_AUX_RST);
+}
+
+static void anx6345_poweron(struct anx6345 *anx6345)
+{
+	int err;
+
+	/* Ensure reset is asserted before starting power on sequence */
+	gpiod_set_value_cansleep(anx6345->gpiod_reset, 1);
+	usleep_range(1000, 2000);
+
+	err = regulator_enable(anx6345->dvdd12);
+	if (err) {
+		DRM_ERROR("Failed to enable dvdd12 regulator: %d\n",
+			  err);
+		return;
+	}
+
+	/* T1 - delay between VDD12 and VDD25 should be 0-2ms */
+	usleep_range(1000, 2000);
+
+	err = regulator_enable(anx6345->dvdd25);
+	if (err) {
+		DRM_ERROR("Failed to enable dvdd25 regulator: %d\n",
+			  err);
+		return;
+	}
+
+	/* T2 - delay between RESETN and all power rail stable,
+	 * should be 2-5ms
+	 */
+	usleep_range(2000, 5000);
+
+	gpiod_set_value_cansleep(anx6345->gpiod_reset, 0);
+
+	/* Power on registers module */
+	anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+			 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+	anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+			   SP_REGISTER_PD | SP_TOTAL_PD);
+
+	if (anx6345->panel)
+		drm_panel_prepare(anx6345->panel);
+
+	anx6345->powered = true;
+}
+
+static void anx6345_poweroff(struct anx6345 *anx6345)
+{
+	int err;
+
+	gpiod_set_value_cansleep(anx6345->gpiod_reset, 1);
+	usleep_range(1000, 2000);
+
+	if (anx6345->panel)
+		drm_panel_unprepare(anx6345->panel);
+
+	err = regulator_disable(anx6345->dvdd25);
+	if (err) {
+		DRM_ERROR("Failed to disable dvdd25 regulator: %d\n",
+			  err);
+		return;
+	}
+
+	usleep_range(5000, 10000);
+
+	err = regulator_disable(anx6345->dvdd12);
+	if (err) {
+		DRM_ERROR("Failed to disable dvdd12 regulator: %d\n",
+			  err);
+		return;
+	}
+
+	usleep_range(1000, 2000);
+
+	anx6345->powered = false;
+}
+
+static int anx6345_start(struct anx6345 *anx6345)
+{
+	int err;
+
+	if (!anx6345->powered)
+		anx6345_poweron(anx6345);
+
+	/* Power on needed modules */
+	err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+				SP_POWERDOWN_CTRL_REG,
+				SP_VIDEO_PD | SP_LINK_PD);
+
+	err = anx6345_tx_initialization(anx6345);
+	if (err) {
+		DRM_ERROR("Failed eDP transmitter initialization: %d\n", err);
+		anx6345_poweroff(anx6345);
+		return err;
+	}
+
+	err = anx6345_dp_link_training(anx6345);
+	if (err) {
+		DRM_ERROR("Failed link training: %d\n", err);
+		anx6345_poweroff(anx6345);
+		return err;
+	}
+
+	/*
+	 * This delay seems to help keep the hardware in a good state. Without
+	 * it, there are times where it fails silently.
+	 */
+	usleep_range(10000, 15000);
+
+	return 0;
+}
+
+static int anx6345_config_dp_output(struct anx6345 *anx6345)
+{
+	int err;
+
+	err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+				 SP_VIDEO_MUTE);
+	if (err)
+		return err;
+
+	/* Enable DP output */
+	err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+			       SP_VIDEO_EN);
+	if (err)
+		return err;
+
+	/* Force stream valid */
+	return anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+			       SP_DP_SYSTEM_CTRL_BASE + 3,
+			       SP_STRM_FORCE | SP_STRM_CTRL);
+}
+
+static int anx6345_get_downstream_info(struct anx6345 *anx6345)
+{
+	u8 value;
+	int err;
+
+	err = drm_dp_dpcd_readb(&anx6345->aux, DP_SINK_COUNT, &value);
+	if (err < 0) {
+		DRM_ERROR("Get sink count failed %d\n", err);
+		return err;
+	}
+
+	if (!DP_GET_SINK_COUNT(value)) {
+		DRM_ERROR("Downstream disconnected\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int anx6345_get_modes(struct drm_connector *connector)
+{
+	struct anx6345 *anx6345 = connector_to_anx6345(connector);
+	int err, num_modes = 0;
+	bool power_off = false;
+
+	mutex_lock(&anx6345->lock);
+
+	if (!anx6345->edid) {
+		if (!anx6345->powered) {
+			anx6345_poweron(anx6345);
+			power_off = true;
+		}
+
+		err = anx6345_get_downstream_info(anx6345);
+		if (err) {
+			DRM_ERROR("Failed to get downstream info: %d\n", err);
+			goto unlock;
+		}
+
+		anx6345->edid = drm_get_edid(connector, &anx6345->aux.ddc);
+		if (!anx6345->edid)
+			DRM_ERROR("Failed to read EDID from panel\n");
+
+		err = drm_connector_update_edid_property(connector,
+							 anx6345->edid);
+		if (err) {
+			DRM_ERROR("Failed to update EDID property: %d\n", err);
+			goto unlock;
+		}
+	}
+
+	num_modes += drm_add_edid_modes(connector, anx6345->edid);
+
+unlock:
+	if (power_off)
+		anx6345_poweroff(anx6345);
+
+	mutex_unlock(&anx6345->lock);
+
+	if (!num_modes && anx6345->panel)
+		num_modes += drm_panel_get_modes(anx6345->panel, connector);
+
+	return num_modes;
+}
+
+static const struct drm_connector_helper_funcs anx6345_connector_helper_funcs = {
+	.get_modes = anx6345_get_modes,
+};
+
+static void
+anx6345_connector_destroy(struct drm_connector *connector)
+{
+	struct anx6345 *anx6345 = connector_to_anx6345(connector);
+
+	if (anx6345->panel)
+		drm_panel_detach(anx6345->panel);
+	drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs anx6345_connector_funcs = {
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = anx6345_connector_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int anx6345_bridge_attach(struct drm_bridge *bridge)
+{
+	struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+	int err;
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Parent encoder object not found");
+		return -ENODEV;
+	}
+
+	/* Register aux channel */
+	anx6345->aux.name = "DP-AUX";
+	anx6345->aux.dev = &anx6345->client->dev;
+	anx6345->aux.transfer = anx6345_aux_transfer;
+
+	err = drm_dp_aux_register(&anx6345->aux);
+	if (err < 0) {
+		DRM_ERROR("Failed to register aux channel: %d\n", err);
+		return err;
+	}
+
+	err = drm_connector_init(bridge->dev, &anx6345->connector,
+				 &anx6345_connector_funcs,
+				 DRM_MODE_CONNECTOR_eDP);
+	if (err) {
+		DRM_ERROR("Failed to initialize connector: %d\n", err);
+		return err;
+	}
+
+	drm_connector_helper_add(&anx6345->connector,
+				 &anx6345_connector_helper_funcs);
+
+	err = drm_connector_register(&anx6345->connector);
+	if (err) {
+		DRM_ERROR("Failed to register connector: %d\n", err);
+		return err;
+	}
+
+	anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+	err = drm_connector_attach_encoder(&anx6345->connector,
+					   bridge->encoder);
+	if (err) {
+		DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
+		return err;
+	}
+
+	if (anx6345->panel) {
+		err = drm_panel_attach(anx6345->panel, &anx6345->connector);
+		if (err) {
+			DRM_ERROR("Failed to attach panel: %d\n", err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static enum drm_mode_status
+anx6345_bridge_mode_valid(struct drm_bridge *bridge,
+			  const struct drm_display_mode *mode)
+{
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+
+	/* Max 1200p at 5.4 Ghz, one lane */
+	if (mode->clock > 154000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static void anx6345_bridge_disable(struct drm_bridge *bridge)
+{
+	struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+
+	/* Power off all modules except configuration registers access */
+	anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+			 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+	if (anx6345->panel)
+		drm_panel_disable(anx6345->panel);
+
+	if (anx6345->powered)
+		anx6345_poweroff(anx6345);
+}
+
+static void anx6345_bridge_enable(struct drm_bridge *bridge)
+{
+	struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+	int err;
+
+	if (anx6345->panel)
+		drm_panel_enable(anx6345->panel);
+
+	err = anx6345_start(anx6345);
+	if (err) {
+		DRM_ERROR("Failed to initialize: %d\n", err);
+		return;
+	}
+
+	err = anx6345_config_dp_output(anx6345);
+	if (err)
+		DRM_ERROR("Failed to enable DP output: %d\n", err);
+}
+
+static const struct drm_bridge_funcs anx6345_bridge_funcs = {
+	.attach = anx6345_bridge_attach,
+	.mode_valid = anx6345_bridge_mode_valid,
+	.disable = anx6345_bridge_disable,
+	.enable = anx6345_bridge_enable,
+};
+
+static void unregister_i2c_dummy_clients(struct anx6345 *anx6345)
+{
+	unsigned int i;
+
+	for (i = 1; i < ARRAY_SIZE(anx6345->i2c_clients); i++)
+		if (anx6345->i2c_clients[i] &&
+		    anx6345->i2c_clients[i]->addr != anx6345->client->addr)
+			i2c_unregister_device(anx6345->i2c_clients[i]);
+}
+
+static const struct regmap_config anx6345_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = 0xff,
+	.cache_type = REGCACHE_NONE,
+};
+
+static const u16 anx6345_chipid_list[] = {
+	0x6345,
+};
+
+static bool anx6345_get_chip_id(struct anx6345 *anx6345)
+{
+	unsigned int i, idl, idh, version;
+
+	if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDL_REG, &idl))
+		return false;
+
+	if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDH_REG, &idh))
+		return false;
+
+	anx6345->chipid = (u8)idl | ((u8)idh << 8);
+
+	if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_VERSION_REG,
+			&version))
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(anx6345_chipid_list); i++) {
+		if (anx6345->chipid == anx6345_chipid_list[i]) {
+			DRM_INFO("Found ANX%x (ver. %d) eDP Transmitter\n",
+				 anx6345->chipid, version);
+			return true;
+		}
+	}
+
+	DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n",
+		  anx6345->chipid, version);
+
+	return false;
+}
+
+static int anx6345_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	struct anx6345 *anx6345;
+	struct device *dev;
+	int i, err;
+
+	anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
+	if (!anx6345)
+		return -ENOMEM;
+
+	mutex_init(&anx6345->lock);
+
+	anx6345->bridge.of_node = client->dev.of_node;
+
+	anx6345->client = client;
+	i2c_set_clientdata(client, anx6345);
+
+	dev = &anx6345->client->dev;
+
+	err = drm_of_find_panel_or_bridge(client->dev.of_node, 1, 0,
+					  &anx6345->panel, NULL);
+	if (err == -EPROBE_DEFER)
+		return err;
+
+	if (err)
+		DRM_DEBUG("No panel found\n");
+
+	/* 1.2V digital core power regulator  */
+	anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
+	if (IS_ERR(anx6345->dvdd12)) {
+		DRM_ERROR("dvdd12-supply not found\n");
+		return PTR_ERR(anx6345->dvdd12);
+	}
+
+	/* 2.5V digital core power regulator  */
+	anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
+	if (IS_ERR(anx6345->dvdd25)) {
+		DRM_ERROR("dvdd25-supply not found\n");
+		return PTR_ERR(anx6345->dvdd25);
+	}
+
+	/* GPIO for chip reset */
+	anx6345->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(anx6345->gpiod_reset)) {
+		DRM_ERROR("Reset gpio not found\n");
+		return PTR_ERR(anx6345->gpiod_reset);
+	}
+
+	/* Map slave addresses of ANX6345 */
+	for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
+		if (anx6345_i2c_addresses[i] >> 1 != client->addr)
+			anx6345->i2c_clients[i] = i2c_new_dummy_device(client->adapter,
+						anx6345_i2c_addresses[i] >> 1);
+		else
+			anx6345->i2c_clients[i] = client;
+
+		if (IS_ERR(anx6345->i2c_clients[i])) {
+			err = PTR_ERR(anx6345->i2c_clients[i]);
+			DRM_ERROR("Failed to reserve I2C bus %02x\n",
+				  anx6345_i2c_addresses[i]);
+			goto err_unregister_i2c;
+		}
+
+		anx6345->map[i] = devm_regmap_init_i2c(anx6345->i2c_clients[i],
+						       &anx6345_regmap_config);
+		if (IS_ERR(anx6345->map[i])) {
+			err = PTR_ERR(anx6345->map[i]);
+			DRM_ERROR("Failed regmap initialization %02x\n",
+				  anx6345_i2c_addresses[i]);
+			goto err_unregister_i2c;
+		}
+	}
+
+	/* Look for supported chip ID */
+	anx6345_poweron(anx6345);
+	if (anx6345_get_chip_id(anx6345)) {
+		anx6345->bridge.funcs = &anx6345_bridge_funcs;
+		drm_bridge_add(&anx6345->bridge);
+
+		return 0;
+	} else {
+		anx6345_poweroff(anx6345);
+		err = -ENODEV;
+	}
+
+err_unregister_i2c:
+	unregister_i2c_dummy_clients(anx6345);
+	return err;
+}
+
+static int anx6345_i2c_remove(struct i2c_client *client)
+{
+	struct anx6345 *anx6345 = i2c_get_clientdata(client);
+
+	drm_bridge_remove(&anx6345->bridge);
+
+	unregister_i2c_dummy_clients(anx6345);
+
+	kfree(anx6345->edid);
+
+	mutex_destroy(&anx6345->lock);
+
+	return 0;
+}
+
+static const struct i2c_device_id anx6345_id[] = {
+	{ "anx6345", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, anx6345_id);
+
+static const struct of_device_id anx6345_match_table[] = {
+	{ .compatible = "analogix,anx6345", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, anx6345_match_table);
+
+static struct i2c_driver anx6345_driver = {
+	.driver = {
+		   .name = "anx6345",
+		   .of_match_table = of_match_ptr(anx6345_match_table),
+		  },
+	.probe = anx6345_i2c_probe,
+	.remove = anx6345_i2c_remove,
+	.id_table = anx6345_id,
+};
+module_i2c_driver(anx6345_driver);
+
+MODULE_DESCRIPTION("ANX6345 eDP Transmitter driver");
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
new file mode 100644
index 0000000..41867be
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -0,0 +1,1385 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "analogix-anx78xx.h"
+
+#define I2C_NUM_ADDRESSES	5
+#define I2C_IDX_TX_P0		0
+#define I2C_IDX_TX_P1		1
+#define I2C_IDX_TX_P2		2
+#define I2C_IDX_RX_P0		3
+#define I2C_IDX_RX_P1		4
+
+#define XTAL_CLK		270 /* 27M */
+
+static const u8 anx7808_i2c_addresses[] = {
+	[I2C_IDX_TX_P0] = 0x78,
+	[I2C_IDX_TX_P1] = 0x7a,
+	[I2C_IDX_TX_P2] = 0x72,
+	[I2C_IDX_RX_P0] = 0x7e,
+	[I2C_IDX_RX_P1] = 0x80,
+};
+
+static const u8 anx781x_i2c_addresses[] = {
+	[I2C_IDX_TX_P0] = 0x70,
+	[I2C_IDX_TX_P1] = 0x7a,
+	[I2C_IDX_TX_P2] = 0x72,
+	[I2C_IDX_RX_P0] = 0x7e,
+	[I2C_IDX_RX_P1] = 0x80,
+};
+
+struct anx78xx_platform_data {
+	struct regulator *dvdd10;
+	struct gpio_desc *gpiod_hpd;
+	struct gpio_desc *gpiod_pd;
+	struct gpio_desc *gpiod_reset;
+
+	int hpd_irq;
+	int intp_irq;
+};
+
+struct anx78xx {
+	struct drm_dp_aux aux;
+	struct drm_bridge bridge;
+	struct i2c_client *client;
+	struct edid *edid;
+	struct drm_connector connector;
+	struct anx78xx_platform_data pdata;
+	struct mutex lock;
+
+	/*
+	 * I2C Slave addresses of ANX7814 are mapped as TX_P0, TX_P1, TX_P2,
+	 * RX_P0 and RX_P1.
+	 */
+	struct i2c_client *i2c_dummy[I2C_NUM_ADDRESSES];
+	struct regmap *map[I2C_NUM_ADDRESSES];
+
+	u16 chipid;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+	bool powered;
+};
+
+static inline struct anx78xx *connector_to_anx78xx(struct drm_connector *c)
+{
+	return container_of(c, struct anx78xx, connector);
+}
+
+static inline struct anx78xx *bridge_to_anx78xx(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct anx78xx, bridge);
+}
+
+static int anx78xx_set_bits(struct regmap *map, u8 reg, u8 mask)
+{
+	return regmap_update_bits(map, reg, mask, mask);
+}
+
+static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+	return regmap_update_bits(map, reg, mask, 0);
+}
+
+static ssize_t anx78xx_aux_transfer(struct drm_dp_aux *aux,
+				    struct drm_dp_aux_msg *msg)
+{
+	struct anx78xx *anx78xx = container_of(aux, struct anx78xx, aux);
+	return anx_dp_aux_transfer(anx78xx->map[I2C_IDX_TX_P0], msg);
+}
+
+static int anx78xx_set_hpd(struct anx78xx *anx78xx)
+{
+	int err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+				 SP_TMDS_CTRL_BASE + 7, SP_PD_RT);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
+			       SP_HPD_OUT);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_clear_hpd(struct anx78xx *anx78xx)
+{
+	int err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
+				 SP_HPD_OUT);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_TMDS_CTRL_BASE + 7, SP_PD_RT);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static const struct reg_sequence tmds_phy_initialization[] = {
+	{ SP_TMDS_CTRL_BASE +  1, 0x90 },
+	{ SP_TMDS_CTRL_BASE +  2, 0xa9 },
+	{ SP_TMDS_CTRL_BASE +  6, 0x92 },
+	{ SP_TMDS_CTRL_BASE +  7, 0x80 },
+	{ SP_TMDS_CTRL_BASE + 20, 0xf2 },
+	{ SP_TMDS_CTRL_BASE + 22, 0xc4 },
+	{ SP_TMDS_CTRL_BASE + 23, 0x18 },
+};
+
+static int anx78xx_rx_initialization(struct anx78xx *anx78xx)
+{
+	int err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
+			   SP_AUD_MUTE | SP_VID_MUTE);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_CHIP_CTRL_REG,
+			       SP_MAN_HDMI5V_DET | SP_PLLLOCK_CKDT_EN |
+			       SP_DIGITAL_CKDT_EN);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST |
+			       SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+				 SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST |
+				 SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST);
+	if (err)
+		return err;
+
+	/* Sync detect change, GP set mute */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_AUD_EXCEPTION_ENABLE_BASE + 1, BIT(5) |
+			       BIT(6));
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_AUD_EXCEPTION_ENABLE_BASE + 3,
+			       SP_AEC_EN21);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUDVID_CTRL_REG,
+			       SP_AVC_EN | SP_AAC_OE | SP_AAC_EN);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+				 SP_SYSTEM_POWER_DOWN1_REG, SP_PWDN_CTRL);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_VID_DATA_RANGE_CTRL_REG, SP_R2Y_INPUT_LIMIT);
+	if (err)
+		return err;
+
+	/* Enable DDC stretch */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_DP_EXTRA_I2C_DEV_ADDR_REG, SP_I2C_EXTRA_ADDR);
+	if (err)
+		return err;
+
+	/* TMDS phy initialization */
+	err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_RX_P0],
+				     tmds_phy_initialization,
+				     ARRAY_SIZE(tmds_phy_initialization));
+	if (err)
+		return err;
+
+	err = anx78xx_clear_hpd(anx78xx);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static const u8 dp_tx_output_precise_tune_bits[20] = {
+	0x01, 0x03, 0x07, 0x7f, 0x71, 0x6b, 0x7f,
+	0x73, 0x7f, 0x7f, 0x00, 0x00, 0x00, 0x00,
+	0x0c, 0x42, 0x1e, 0x3e, 0x72, 0x7e,
+};
+
+static int anx78xx_link_phy_initialization(struct anx78xx *anx78xx)
+{
+	int err;
+
+	/*
+	 * REVISIT : It is writing to a RESERVED bits in Analog Control 0
+	 * register.
+	 */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_CTRL0_REG,
+			   0x02);
+	if (err)
+		return err;
+
+	/*
+	 * Write DP TX output emphasis precise tune bits.
+	 */
+	err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P1],
+				SP_DP_TX_LT_CTRL0_REG,
+				dp_tx_output_precise_tune_bits,
+				ARRAY_SIZE(dp_tx_output_precise_tune_bits));
+
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_xtal_clk_sel(struct anx78xx *anx78xx)
+{
+	unsigned int value;
+	int err;
+
+	err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P2],
+				 SP_ANALOG_DEBUG2_REG,
+				 SP_XTAL_FRQ | SP_FORCE_SW_OFF_BYPASS,
+				 SP_XTAL_FRQ_27M);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL3_REG,
+			   XTAL_CLK & SP_WAIT_COUNTER_7_0_MASK);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL4_REG,
+			   ((XTAL_CLK & 0xff00) >> 2) | (XTAL_CLK / 10));
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_I2C_GEN_10US_TIMER0_REG, XTAL_CLK & 0xff);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_I2C_GEN_10US_TIMER1_REG,
+			   (XTAL_CLK & 0xff00) >> 8);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_MISC_CTRL_REG,
+			   XTAL_CLK / 10 - 1);
+	if (err)
+		return err;
+
+	err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
+			  SP_HDMI_US_TIMER_CTRL_REG,
+			  &value);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0],
+			   SP_HDMI_US_TIMER_CTRL_REG,
+			   (value & SP_MS_TIMER_MARGIN_10_8_MASK) |
+			   ((((XTAL_CLK / 10) >> 1) - 2) << 3));
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static const struct reg_sequence otp_key_protect[] = {
+	{ SP_OTP_KEY_PROTECT1_REG, SP_OTP_PSW1 },
+	{ SP_OTP_KEY_PROTECT2_REG, SP_OTP_PSW2 },
+	{ SP_OTP_KEY_PROTECT3_REG, SP_OTP_PSW3 },
+};
+
+static int anx78xx_tx_initialization(struct anx78xx *anx78xx)
+{
+	int err;
+
+	/* Set terminal resistor to 50 ohm */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
+			   0x30);
+	if (err)
+		return err;
+
+	/* Enable aux double diff output */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_AUX_CH_CTRL2_REG, 0x08);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_DP_HDCP_CTRL_REG, SP_AUTO_EN |
+				 SP_AUTO_START);
+	if (err)
+		return err;
+
+	err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_TX_P0],
+				     otp_key_protect,
+				     ARRAY_SIZE(otp_key_protect));
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_HDCP_KEY_COMMAND_REG, SP_DISABLE_SYNC_HDCP);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL8_REG,
+			   SP_VID_VRES_TH);
+	if (err)
+		return err;
+
+	/*
+	 * DP HDCP auto authentication wait timer (when downstream starts to
+	 * auth, DP side will wait for this period then do auth automatically)
+	 */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_AUTO_TIMER_REG,
+			   0x00);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_HDCP_CTRL_REG, SP_LINK_POLLING);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_LINK_DEBUG_CTRL_REG, SP_M_VID_DEBUG);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2],
+			       SP_ANALOG_DEBUG2_REG, SP_POWERON_TIME_1P5MS);
+	if (err)
+		return err;
+
+	err = anx78xx_xtal_clk_sel(anx78xx);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_DEFER_CTRL_REG,
+			   SP_DEFER_CTRL_EN | 0x0c);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_POLLING_CTRL_REG,
+			       SP_AUTO_POLLING_DISABLE);
+	if (err)
+		return err;
+
+	/*
+	 * Short the link integrity check timer to speed up bstatus
+	 * polling for HDCP CTS item 1A-07
+	 */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_HDCP_LINK_CHECK_TIMER_REG, 0x1d);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_MISC_CTRL_REG, SP_EQ_TRAINING_LOOP);
+	if (err)
+		return err;
+
+	/* Power down the main link by default */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+	if (err)
+		return err;
+
+	err = anx78xx_link_phy_initialization(anx78xx);
+	if (err)
+		return err;
+
+	/* Gen m_clk with downspreading */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_M_CALCULATION_CTRL_REG, SP_M_GEN_CLK_SEL);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_enable_interrupts(struct anx78xx *anx78xx)
+{
+	int err;
+
+	/*
+	 * BIT0: INT pin assertion polarity: 1 = assert high
+	 * BIT1: INT pin output type: 0 = push/pull
+	 */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_INT_CTRL_REG, 0x01);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
+			   SP_COMMON_INT_MASK4_REG, SP_HPD_LOST | SP_HPD_PLUG);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_MASK1_REG,
+			   SP_TRAINING_FINISH);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_MASK1_REG,
+			   SP_CKDT_CHG | SP_SCDT_CHG);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static void anx78xx_poweron(struct anx78xx *anx78xx)
+{
+	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+	int err;
+
+	if (WARN_ON(anx78xx->powered))
+		return;
+
+	if (pdata->dvdd10) {
+		err = regulator_enable(pdata->dvdd10);
+		if (err) {
+			DRM_ERROR("Failed to enable DVDD10 regulator: %d\n",
+				  err);
+			return;
+		}
+
+		usleep_range(1000, 2000);
+	}
+
+	gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+	usleep_range(1000, 2000);
+
+	gpiod_set_value_cansleep(pdata->gpiod_pd, 0);
+	usleep_range(1000, 2000);
+
+	gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
+
+	/* Power on registers module */
+	anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+			 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+	anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+			   SP_REGISTER_PD | SP_TOTAL_PD);
+
+	anx78xx->powered = true;
+}
+
+static void anx78xx_poweroff(struct anx78xx *anx78xx)
+{
+	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+	int err;
+
+	if (WARN_ON(!anx78xx->powered))
+		return;
+
+	gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+	usleep_range(1000, 2000);
+
+	gpiod_set_value_cansleep(pdata->gpiod_pd, 1);
+	usleep_range(1000, 2000);
+
+	if (pdata->dvdd10) {
+		err = regulator_disable(pdata->dvdd10);
+		if (err) {
+			DRM_ERROR("Failed to disable DVDD10 regulator: %d\n",
+				  err);
+			return;
+		}
+
+		usleep_range(1000, 2000);
+	}
+
+	anx78xx->powered = false;
+}
+
+static int anx78xx_start(struct anx78xx *anx78xx)
+{
+	int err;
+
+	/* Power on all modules */
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+				 SP_POWERDOWN_CTRL_REG,
+				 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD |
+				 SP_LINK_PD);
+
+	err = anx78xx_enable_interrupts(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to enable interrupts: %d\n", err);
+		goto err_poweroff;
+	}
+
+	err = anx78xx_rx_initialization(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed receiver initialization: %d\n", err);
+		goto err_poweroff;
+	}
+
+	err = anx78xx_tx_initialization(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed transmitter initialization: %d\n", err);
+		goto err_poweroff;
+	}
+
+	/*
+	 * This delay seems to help keep the hardware in a good state. Without
+	 * it, there are times where it fails silently.
+	 */
+	usleep_range(10000, 15000);
+
+	return 0;
+
+err_poweroff:
+	DRM_ERROR("Failed SlimPort transmitter initialization: %d\n", err);
+	anx78xx_poweroff(anx78xx);
+
+	return err;
+}
+
+static int anx78xx_init_pdata(struct anx78xx *anx78xx)
+{
+	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+	struct device *dev = &anx78xx->client->dev;
+
+	/* 1.0V digital core power regulator  */
+	pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
+	if (IS_ERR(pdata->dvdd10)) {
+		if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
+			DRM_ERROR("DVDD10 regulator not found\n");
+
+		return PTR_ERR(pdata->dvdd10);
+	}
+
+	/* GPIO for HPD */
+	pdata->gpiod_hpd = devm_gpiod_get(dev, "hpd", GPIOD_IN);
+	if (IS_ERR(pdata->gpiod_hpd))
+		return PTR_ERR(pdata->gpiod_hpd);
+
+	/* GPIO for chip power down */
+	pdata->gpiod_pd = devm_gpiod_get(dev, "pd", GPIOD_OUT_HIGH);
+	if (IS_ERR(pdata->gpiod_pd))
+		return PTR_ERR(pdata->gpiod_pd);
+
+	/* GPIO for chip reset */
+	pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+
+	return PTR_ERR_OR_ZERO(pdata->gpiod_reset);
+}
+
+static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
+{
+	u8 dp_bw, dpcd[2];
+	int err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
+			   0x0);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+				 SP_POWERDOWN_CTRL_REG,
+				 SP_TOTAL_PD);
+	if (err)
+		return err;
+
+	err = drm_dp_dpcd_readb(&anx78xx->aux, DP_MAX_LINK_RATE, &dp_bw);
+	if (err < 0)
+		return err;
+
+	switch (dp_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+	case DP_LINK_BW_5_4:
+		break;
+
+	default:
+		DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw);
+		return -EINVAL;
+	}
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+			       SP_VIDEO_MUTE);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+				 SP_VID_CTRL1_REG, SP_VIDEO_EN);
+	if (err)
+		return err;
+
+	/* Get DPCD info */
+	err = drm_dp_dpcd_read(&anx78xx->aux, DP_DPCD_REV,
+			       &anx78xx->dpcd, DP_RECEIVER_CAP_SIZE);
+	if (err < 0) {
+		DRM_ERROR("Failed to read DPCD: %d\n", err);
+		return err;
+	}
+
+	/* Clear channel x SERDES power down */
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+	if (err)
+		return err;
+
+	/*
+	 * Power up the sink (DP_SET_POWER register is only available on DPCD
+	 * v1.1 and later).
+	 */
+	if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
+		err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
+		if (err < 0) {
+			DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+				  err);
+			return err;
+		}
+
+		dpcd[0] &= ~DP_SET_POWER_MASK;
+		dpcd[0] |= DP_SET_POWER_D0;
+
+		err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
+		if (err < 0) {
+			DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+				  err);
+			return err;
+		}
+
+		/*
+		 * According to the DP 1.1 specification, a "Sink Device must
+		 * exit the power saving state within 1 ms" (Section 2.5.3.1,
+		 * Table 5-52, "Sink Control Field" (register 0x600).
+		 */
+		usleep_range(1000, 2000);
+	}
+
+	/* Possibly enable downspread on the sink */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_DP_DOWNSPREAD_CTRL1_REG, 0);
+	if (err)
+		return err;
+
+	if (anx78xx->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) {
+		DRM_DEBUG("Enable downspread on the sink\n");
+		/* 4000PPM */
+		err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+				   SP_DP_DOWNSPREAD_CTRL1_REG, 8);
+		if (err)
+			return err;
+
+		err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL,
+					 DP_SPREAD_AMP_0_5);
+		if (err < 0)
+			return err;
+	} else {
+		err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL, 0);
+		if (err < 0)
+			return err;
+	}
+
+	/* Set the lane count and the link rate on the sink */
+	if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+		err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+				       SP_DP_SYSTEM_CTRL_BASE + 4,
+				       SP_ENHANCED_MODE);
+	else
+		err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+					 SP_DP_SYSTEM_CTRL_BASE + 4,
+					 SP_ENHANCED_MODE);
+	if (err)
+		return err;
+
+	dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
+	dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+	if (err)
+		return err;
+
+	dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd);
+
+	if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+		dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+	err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd,
+				sizeof(dpcd));
+	if (err < 0) {
+		DRM_ERROR("Failed to configure link: %d\n", err);
+		return err;
+	}
+
+	/* Start training on the source */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_LT_CTRL_REG,
+			   SP_LT_EN);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_config_dp_output(struct anx78xx *anx78xx)
+{
+	int err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+				 SP_VIDEO_MUTE);
+	if (err)
+		return err;
+
+	/* Enable DP output */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+			       SP_VIDEO_EN);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_send_video_infoframe(struct anx78xx *anx78xx,
+					struct hdmi_avi_infoframe *frame)
+{
+	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+	int err;
+
+	err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		DRM_ERROR("Failed to pack AVI infoframe: %d\n", err);
+		return err;
+	}
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN);
+	if (err)
+		return err;
+
+	err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P2],
+				SP_INFOFRAME_AVI_DB1_REG, buffer,
+				frame->length);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_UD);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_get_downstream_info(struct anx78xx *anx78xx)
+{
+	u8 value;
+	int err;
+
+	err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SINK_COUNT, &value);
+	if (err < 0) {
+		DRM_ERROR("Get sink count failed %d\n", err);
+		return err;
+	}
+
+	if (!DP_GET_SINK_COUNT(value)) {
+		DRM_ERROR("Downstream disconnected\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int anx78xx_get_modes(struct drm_connector *connector)
+{
+	struct anx78xx *anx78xx = connector_to_anx78xx(connector);
+	int err, num_modes = 0;
+
+	if (WARN_ON(!anx78xx->powered))
+		return 0;
+
+	if (anx78xx->edid)
+		return drm_add_edid_modes(connector, anx78xx->edid);
+
+	mutex_lock(&anx78xx->lock);
+
+	err = anx78xx_get_downstream_info(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to get downstream info: %d\n", err);
+		goto unlock;
+	}
+
+	anx78xx->edid = drm_get_edid(connector, &anx78xx->aux.ddc);
+	if (!anx78xx->edid) {
+		DRM_ERROR("Failed to read EDID\n");
+		goto unlock;
+	}
+
+	err = drm_connector_update_edid_property(connector,
+						 anx78xx->edid);
+	if (err) {
+		DRM_ERROR("Failed to update EDID property: %d\n", err);
+		goto unlock;
+	}
+
+	num_modes = drm_add_edid_modes(connector, anx78xx->edid);
+
+unlock:
+	mutex_unlock(&anx78xx->lock);
+
+	return num_modes;
+}
+
+static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
+	.get_modes = anx78xx_get_modes,
+};
+
+static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
+						bool force)
+{
+	struct anx78xx *anx78xx = connector_to_anx78xx(connector);
+
+	if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
+		return connector_status_disconnected;
+
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs anx78xx_connector_funcs = {
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.detect = anx78xx_detect,
+	.destroy = drm_connector_cleanup,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int anx78xx_bridge_attach(struct drm_bridge *bridge)
+{
+	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+	int err;
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Parent encoder object not found");
+		return -ENODEV;
+	}
+
+	/* Register aux channel */
+	anx78xx->aux.name = "DP-AUX";
+	anx78xx->aux.dev = &anx78xx->client->dev;
+	anx78xx->aux.transfer = anx78xx_aux_transfer;
+
+	err = drm_dp_aux_register(&anx78xx->aux);
+	if (err < 0) {
+		DRM_ERROR("Failed to register aux channel: %d\n", err);
+		return err;
+	}
+
+	err = drm_connector_init(bridge->dev, &anx78xx->connector,
+				 &anx78xx_connector_funcs,
+				 DRM_MODE_CONNECTOR_DisplayPort);
+	if (err) {
+		DRM_ERROR("Failed to initialize connector: %d\n", err);
+		return err;
+	}
+
+	drm_connector_helper_add(&anx78xx->connector,
+				 &anx78xx_connector_helper_funcs);
+
+	err = drm_connector_register(&anx78xx->connector);
+	if (err) {
+		DRM_ERROR("Failed to register connector: %d\n", err);
+		return err;
+	}
+
+	anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+	err = drm_connector_attach_encoder(&anx78xx->connector,
+					   bridge->encoder);
+	if (err) {
+		DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static enum drm_mode_status
+anx78xx_bridge_mode_valid(struct drm_bridge *bridge,
+			  const struct drm_display_mode *mode)
+{
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+
+	/* Max 1200p at 5.4 Ghz, one lane */
+	if (mode->clock > 154000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static void anx78xx_bridge_disable(struct drm_bridge *bridge)
+{
+	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+
+	/* Power off all modules except configuration registers access */
+	anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+			 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+}
+
+static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
+{
+	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+	struct hdmi_avi_infoframe frame;
+	int err;
+
+	if (WARN_ON(!anx78xx->powered))
+		return;
+
+	mutex_lock(&anx78xx->lock);
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+						       &anx78xx->connector,
+						       adjusted_mode);
+	if (err) {
+		DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
+		goto unlock;
+	}
+
+	err = anx78xx_send_video_infoframe(anx78xx, &frame);
+	if (err)
+		DRM_ERROR("Failed to send AVI infoframe: %d\n", err);
+
+unlock:
+	mutex_unlock(&anx78xx->lock);
+}
+
+static void anx78xx_bridge_enable(struct drm_bridge *bridge)
+{
+	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+	int err;
+
+	err = anx78xx_start(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to initialize: %d\n", err);
+		return;
+	}
+
+	err = anx78xx_set_hpd(anx78xx);
+	if (err)
+		DRM_ERROR("Failed to set HPD: %d\n", err);
+}
+
+static const struct drm_bridge_funcs anx78xx_bridge_funcs = {
+	.attach = anx78xx_bridge_attach,
+	.mode_valid = anx78xx_bridge_mode_valid,
+	.disable = anx78xx_bridge_disable,
+	.mode_set = anx78xx_bridge_mode_set,
+	.enable = anx78xx_bridge_enable,
+};
+
+static irqreturn_t anx78xx_hpd_threaded_handler(int irq, void *data)
+{
+	struct anx78xx *anx78xx = data;
+	int err;
+
+	if (anx78xx->powered)
+		return IRQ_HANDLED;
+
+	mutex_lock(&anx78xx->lock);
+
+	/* Cable is pulled, power on the chip */
+	anx78xx_poweron(anx78xx);
+
+	err = anx78xx_enable_interrupts(anx78xx);
+	if (err)
+		DRM_ERROR("Failed to enable interrupts: %d\n", err);
+
+	mutex_unlock(&anx78xx->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int anx78xx_handle_dp_int_1(struct anx78xx *anx78xx, u8 irq)
+{
+	int err;
+
+	DRM_DEBUG_KMS("Handle DP interrupt 1: %02x\n", irq);
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
+			   irq);
+	if (err)
+		return err;
+
+	if (irq & SP_TRAINING_FINISH) {
+		DRM_DEBUG_KMS("IRQ: hardware link training finished\n");
+		err = anx78xx_config_dp_output(anx78xx);
+	}
+
+	return err;
+}
+
+static bool anx78xx_handle_common_int_4(struct anx78xx *anx78xx, u8 irq)
+{
+	bool event = false;
+	int err;
+
+	DRM_DEBUG_KMS("Handle common interrupt 4: %02x\n", irq);
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
+			   SP_COMMON_INT_STATUS4_REG, irq);
+	if (err) {
+		DRM_ERROR("Failed to write SP_COMMON_INT_STATUS4 %d\n", err);
+		return event;
+	}
+
+	if (irq & SP_HPD_LOST) {
+		DRM_DEBUG_KMS("IRQ: Hot plug detect - cable is pulled out\n");
+		event = true;
+		anx78xx_poweroff(anx78xx);
+		/* Free cached EDID */
+		kfree(anx78xx->edid);
+		anx78xx->edid = NULL;
+	} else if (irq & SP_HPD_PLUG) {
+		DRM_DEBUG_KMS("IRQ: Hot plug detect - cable plug\n");
+		event = true;
+	}
+
+	return event;
+}
+
+static void anx78xx_handle_hdmi_int_1(struct anx78xx *anx78xx, u8 irq)
+{
+	unsigned int value;
+	int err;
+
+	DRM_DEBUG_KMS("Handle HDMI interrupt 1: %02x\n", irq);
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
+			   irq);
+	if (err) {
+		DRM_ERROR("Write HDMI int 1 failed: %d\n", err);
+		return;
+	}
+
+	if ((irq & SP_CKDT_CHG) || (irq & SP_SCDT_CHG)) {
+		DRM_DEBUG_KMS("IRQ: HDMI input detected\n");
+
+		err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
+				  SP_SYSTEM_STATUS_REG, &value);
+		if (err) {
+			DRM_ERROR("Read system status reg failed: %d\n", err);
+			return;
+		}
+
+		if (!(value & SP_TMDS_CLOCK_DET)) {
+			DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI clock ***\n");
+			return;
+		}
+
+		if (!(value & SP_TMDS_DE_DET)) {
+			DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI signal ***\n");
+			return;
+		}
+
+		err = anx78xx_dp_link_training(anx78xx);
+		if (err)
+			DRM_ERROR("Failed to start link training: %d\n", err);
+	}
+}
+
+static irqreturn_t anx78xx_intp_threaded_handler(int unused, void *data)
+{
+	struct anx78xx *anx78xx = data;
+	bool event = false;
+	unsigned int irq;
+	int err;
+
+	mutex_lock(&anx78xx->lock);
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
+			  &irq);
+	if (err) {
+		DRM_ERROR("Failed to read DP interrupt 1 status: %d\n", err);
+		goto unlock;
+	}
+
+	if (irq)
+		anx78xx_handle_dp_int_1(anx78xx, irq);
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2],
+			  SP_COMMON_INT_STATUS4_REG, &irq);
+	if (err) {
+		DRM_ERROR("Failed to read common interrupt 4 status: %d\n",
+			  err);
+		goto unlock;
+	}
+
+	if (irq)
+		event = anx78xx_handle_common_int_4(anx78xx, irq);
+
+	/* Make sure we are still powered after handle HPD events */
+	if (!anx78xx->powered)
+		goto unlock;
+
+	err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
+			  &irq);
+	if (err) {
+		DRM_ERROR("Failed to read HDMI int 1 status: %d\n", err);
+		goto unlock;
+	}
+
+	if (irq)
+		anx78xx_handle_hdmi_int_1(anx78xx, irq);
+
+unlock:
+	mutex_unlock(&anx78xx->lock);
+
+	if (event)
+		drm_helper_hpd_irq_event(anx78xx->connector.dev);
+
+	return IRQ_HANDLED;
+}
+
+static void unregister_i2c_dummy_clients(struct anx78xx *anx78xx)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(anx78xx->i2c_dummy); i++)
+		i2c_unregister_device(anx78xx->i2c_dummy[i]);
+}
+
+static const struct regmap_config anx78xx_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+static const u16 anx78xx_chipid_list[] = {
+	0x7808,
+	0x7812,
+	0x7814,
+	0x7818,
+};
+
+static int anx78xx_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	struct anx78xx *anx78xx;
+	struct anx78xx_platform_data *pdata;
+	unsigned int i, idl, idh, version;
+	const u8 *i2c_addresses;
+	bool found = false;
+	int err;
+
+	anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
+	if (!anx78xx)
+		return -ENOMEM;
+
+	pdata = &anx78xx->pdata;
+
+	mutex_init(&anx78xx->lock);
+
+#if IS_ENABLED(CONFIG_OF)
+	anx78xx->bridge.of_node = client->dev.of_node;
+#endif
+
+	anx78xx->client = client;
+	i2c_set_clientdata(client, anx78xx);
+
+	err = anx78xx_init_pdata(anx78xx);
+	if (err) {
+		if (err != -EPROBE_DEFER)
+			DRM_ERROR("Failed to initialize pdata: %d\n", err);
+
+		return err;
+	}
+
+	pdata->hpd_irq = gpiod_to_irq(pdata->gpiod_hpd);
+	if (pdata->hpd_irq < 0) {
+		DRM_ERROR("Failed to get HPD IRQ: %d\n", pdata->hpd_irq);
+		return -ENODEV;
+	}
+
+	pdata->intp_irq = client->irq;
+	if (!pdata->intp_irq) {
+		DRM_ERROR("Failed to get CABLE_DET and INTP IRQ\n");
+		return -ENODEV;
+	}
+
+	/* Map slave addresses of ANX7814 */
+	i2c_addresses = device_get_match_data(&client->dev);
+	for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
+		struct i2c_client *i2c_dummy;
+
+		i2c_dummy = i2c_new_dummy_device(client->adapter,
+						 i2c_addresses[i] >> 1);
+		if (IS_ERR(i2c_dummy)) {
+			err = PTR_ERR(i2c_dummy);
+			DRM_ERROR("Failed to reserve I2C bus %02x: %d\n",
+				  i2c_addresses[i], err);
+			goto err_unregister_i2c;
+		}
+
+		anx78xx->i2c_dummy[i] = i2c_dummy;
+		anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
+						       &anx78xx_regmap_config);
+		if (IS_ERR(anx78xx->map[i])) {
+			err = PTR_ERR(anx78xx->map[i]);
+			DRM_ERROR("Failed regmap initialization %02x\n",
+				  i2c_addresses[i]);
+			goto err_unregister_i2c;
+		}
+	}
+
+	/* Look for supported chip ID */
+	anx78xx_poweron(anx78xx);
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDL_REG,
+			  &idl);
+	if (err)
+		goto err_poweroff;
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDH_REG,
+			  &idh);
+	if (err)
+		goto err_poweroff;
+
+	anx78xx->chipid = (u8)idl | ((u8)idh << 8);
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_VERSION_REG,
+			  &version);
+	if (err)
+		goto err_poweroff;
+
+	for (i = 0; i < ARRAY_SIZE(anx78xx_chipid_list); i++) {
+		if (anx78xx->chipid == anx78xx_chipid_list[i]) {
+			DRM_INFO("Found ANX%x (ver. %d) SlimPort Transmitter\n",
+				 anx78xx->chipid, version);
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n",
+			  anx78xx->chipid, version);
+		err = -ENODEV;
+		goto err_poweroff;
+	}
+
+	err = devm_request_threaded_irq(&client->dev, pdata->hpd_irq, NULL,
+					anx78xx_hpd_threaded_handler,
+					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+					"anx78xx-hpd", anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to request CABLE_DET threaded IRQ: %d\n",
+			  err);
+		goto err_poweroff;
+	}
+
+	err = devm_request_threaded_irq(&client->dev, pdata->intp_irq, NULL,
+					anx78xx_intp_threaded_handler,
+					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+					"anx78xx-intp", anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to request INTP threaded IRQ: %d\n", err);
+		goto err_poweroff;
+	}
+
+	anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
+
+	drm_bridge_add(&anx78xx->bridge);
+
+	/* If cable is pulled out, just poweroff and wait for HPD event */
+	if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
+		anx78xx_poweroff(anx78xx);
+
+	return 0;
+
+err_poweroff:
+	anx78xx_poweroff(anx78xx);
+
+err_unregister_i2c:
+	unregister_i2c_dummy_clients(anx78xx);
+	return err;
+}
+
+static int anx78xx_i2c_remove(struct i2c_client *client)
+{
+	struct anx78xx *anx78xx = i2c_get_clientdata(client);
+
+	drm_bridge_remove(&anx78xx->bridge);
+
+	unregister_i2c_dummy_clients(anx78xx);
+
+	kfree(anx78xx->edid);
+
+	return 0;
+}
+
+static const struct i2c_device_id anx78xx_id[] = {
+	{ "anx7814", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, anx78xx_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id anx78xx_match_table[] = {
+	{ .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
+	{ .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
+	{ .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
+	{ .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, anx78xx_match_table);
+#endif
+
+static struct i2c_driver anx78xx_driver = {
+	.driver = {
+		   .name = "anx7814",
+		   .of_match_table = of_match_ptr(anx78xx_match_table),
+		  },
+	.probe = anx78xx_i2c_probe,
+	.remove = anx78xx_i2c_remove,
+	.id_table = anx78xx_id,
+};
+module_i2c_driver(anx78xx_driver);
+
+MODULE_DESCRIPTION("ANX78xx SlimPort Transmitter driver");
+MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h
new file mode 100644
index 0000000..db2a272
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
+ */
+
+#ifndef __ANX78xx_H
+#define __ANX78xx_H
+
+#include "analogix-i2c-dptx.h"
+#include "analogix-i2c-txcommon.h"
+
+/***************************************************************/
+/* Register definitions for RX_PO                              */
+/***************************************************************/
+
+/*
+ * System Control and Status
+ */
+
+/* Software Reset Register 1 */
+#define SP_SOFTWARE_RESET1_REG		0x11
+#define SP_VIDEO_RST			BIT(4)
+#define SP_HDCP_MAN_RST			BIT(2)
+#define SP_TMDS_RST			BIT(1)
+#define SP_SW_MAN_RST			BIT(0)
+
+/* System Status Register */
+#define SP_SYSTEM_STATUS_REG		0x14
+#define SP_TMDS_CLOCK_DET		BIT(1)
+#define SP_TMDS_DE_DET			BIT(0)
+
+/* HDMI Status Register */
+#define SP_HDMI_STATUS_REG		0x15
+#define SP_HDMI_AUD_LAYOUT		BIT(3)
+#define SP_HDMI_DET			BIT(0)
+#  define SP_DVI_MODE			0
+#  define SP_HDMI_MODE			1
+
+/* HDMI Mute Control Register */
+#define SP_HDMI_MUTE_CTRL_REG		0x16
+#define SP_AUD_MUTE			BIT(1)
+#define SP_VID_MUTE			BIT(0)
+
+/* System Power Down Register 1 */
+#define SP_SYSTEM_POWER_DOWN1_REG	0x18
+#define SP_PWDN_CTRL			BIT(0)
+
+/*
+ * Audio and Video Auto Control
+ */
+
+/* Auto Audio and Video Control register */
+#define SP_AUDVID_CTRL_REG		0x20
+#define SP_AVC_OE			BIT(7)
+#define SP_AAC_OE			BIT(6)
+#define SP_AVC_EN			BIT(1)
+#define SP_AAC_EN			BIT(0)
+
+/* Audio Exception Enable Registers */
+#define SP_AUD_EXCEPTION_ENABLE_BASE	(0x24 - 1)
+/* Bits for Audio Exception Enable Register 3 */
+#define SP_AEC_EN21			BIT(5)
+
+/*
+ * Interrupt
+ */
+
+/* Interrupt Status Register 1 */
+#define SP_INT_STATUS1_REG		0x31
+/* Bits for Interrupt Status Register 1 */
+#define SP_HDMI_DVI			BIT(7)
+#define SP_CKDT_CHG			BIT(6)
+#define SP_SCDT_CHG			BIT(5)
+#define SP_PCLK_CHG			BIT(4)
+#define SP_PLL_UNLOCK			BIT(3)
+#define SP_CABLE_PLUG_CHG		BIT(2)
+#define SP_SET_MUTE			BIT(1)
+#define SP_SW_INTR			BIT(0)
+/* Bits for Interrupt Status Register 2 */
+#define SP_HDCP_ERR			BIT(5)
+#define SP_AUDIO_SAMPLE_CHG		BIT(0)	/* undocumented */
+/* Bits for Interrupt Status Register 3 */
+#define SP_AUD_MODE_CHG			BIT(0)
+/* Bits for Interrupt Status Register 5 */
+#define SP_AUDIO_RCV			BIT(0)
+/* Bits for Interrupt Status Register 6 */
+#define SP_INT_STATUS6_REG		0x36
+#define SP_CTS_RCV			BIT(7)
+#define SP_NEW_AUD_PKT			BIT(4)
+#define SP_NEW_AVI_PKT			BIT(1)
+#define SP_NEW_CP_PKT			BIT(0)
+/* Bits for Interrupt Status Register 7 */
+#define SP_NO_VSI			BIT(7)
+#define SP_NEW_VS			BIT(4)
+
+/* Interrupt Mask 1 Status Registers */
+#define SP_INT_MASK1_REG		0x41
+
+/* HDMI US TIMER Control Register */
+#define SP_HDMI_US_TIMER_CTRL_REG	0x49
+#define SP_MS_TIMER_MARGIN_10_8_MASK	0x07
+
+/*
+ * TMDS Control
+ */
+
+/* TMDS Control Registers */
+#define SP_TMDS_CTRL_BASE		(0x50 - 1)
+/* Bits for TMDS Control Register 7 */
+#define SP_PD_RT			BIT(0)
+
+/*
+ * Video Control
+ */
+
+/* Video Status Register */
+#define SP_VIDEO_STATUS_REG		0x70
+#define SP_COLOR_DEPTH_MASK		0xf0
+#define SP_COLOR_DEPTH_SHIFT		4
+#  define SP_COLOR_DEPTH_MODE_LEGACY	0x00
+#  define SP_COLOR_DEPTH_MODE_24BIT	0x04
+#  define SP_COLOR_DEPTH_MODE_30BIT	0x05
+#  define SP_COLOR_DEPTH_MODE_36BIT	0x06
+#  define SP_COLOR_DEPTH_MODE_48BIT	0x07
+
+/* Video Data Range Control Register */
+#define SP_VID_DATA_RANGE_CTRL_REG	0x83
+#define SP_R2Y_INPUT_LIMIT		BIT(1)
+
+/* Pixel Clock High Resolution Counter Registers */
+#define SP_PCLK_HIGHRES_CNT_BASE	(0x8c - 1)
+
+/*
+ * Audio Control
+ */
+
+/* Number of Audio Channels Status Registers */
+#define SP_AUD_CH_STATUS_REG_NUM	6
+
+/* Audio IN S/PDIF Channel Status Registers */
+#define SP_AUD_SPDIF_CH_STATUS_BASE	0xc7
+
+/* Audio IN S/PDIF Channel Status Register 4 */
+#define SP_FS_FREQ_MASK			0x0f
+#  define SP_FS_FREQ_44100HZ		0x00
+#  define SP_FS_FREQ_48000HZ		0x02
+#  define SP_FS_FREQ_32000HZ		0x03
+#  define SP_FS_FREQ_88200HZ		0x08
+#  define SP_FS_FREQ_96000HZ		0x0a
+#  define SP_FS_FREQ_176400HZ		0x0c
+#  define SP_FS_FREQ_192000HZ		0x0e
+
+/*
+ * Micellaneous Control Block
+ */
+
+/* CHIP Control Register */
+#define SP_CHIP_CTRL_REG		0xe3
+#define SP_MAN_HDMI5V_DET		BIT(3)
+#define SP_PLLLOCK_CKDT_EN		BIT(2)
+#define SP_ANALOG_CKDT_EN		BIT(1)
+#define SP_DIGITAL_CKDT_EN		BIT(0)
+
+/* Packet Receiving Status Register */
+#define SP_PACKET_RECEIVING_STATUS_REG	0xf3
+#define SP_AVI_RCVD			BIT(5)
+#define SP_VSI_RCVD			BIT(1)
+
+/***************************************************************/
+/* Register definitions for RX_P1                              */
+/***************************************************************/
+
+/* HDCP BCAPS Shadow Register */
+#define SP_HDCP_BCAPS_SHADOW_REG	0x2a
+#define SP_BCAPS_REPEATER		BIT(5)
+
+/* HDCP Status Register */
+#define SP_RX_HDCP_STATUS_REG		0x3f
+#define SP_AUTH_EN			BIT(4)
+
+/*
+ * InfoFrame and Control Packet Registers
+ */
+
+/* AVI InfoFrame packet checksum */
+#define SP_AVI_INFOFRAME_CHECKSUM	0xa3
+
+/* AVI InfoFrame Registers */
+#define SP_AVI_INFOFRAME_DATA_BASE	0xa4
+
+#define SP_AVI_COLOR_F_MASK		0x60
+#define SP_AVI_COLOR_F_SHIFT		5
+
+/* Audio InfoFrame Registers */
+#define SP_AUD_INFOFRAME_DATA_BASE	0xc4
+#define SP_AUD_INFOFRAME_LAYOUT_MASK	0x0f
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet type code */
+#define SP_MPEG_VS_INFOFRAME_TYPE_REG	0xe0
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet length */
+#define SP_MPEG_VS_INFOFRAME_LEN_REG	0xe2
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet version number */
+#define SP_MPEG_VS_INFOFRAME_VER_REG	0xe1
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet content */
+#define SP_MPEG_VS_INFOFRAME_DATA_BASE	0xe4
+
+/* General Control Packet Register */
+#define SP_GENERAL_CTRL_PACKET_REG	0x9f
+#define SP_CLEAR_AVMUTE			BIT(4)
+#define SP_SET_AVMUTE			BIT(0)
+
+/***************************************************************/
+/* Register definitions for TX_P1                              */
+/***************************************************************/
+
+/* DP TX Link Training Control Register */
+#define SP_DP_TX_LT_CTRL0_REG		0x30
+
+/* PD 1.2 Lint Training 80bit Pattern Register */
+#define SP_DP_LT_80BIT_PATTERN0_REG	0x80
+#define SP_DP_LT_80BIT_PATTERN_REG_NUM	10
+
+/* Audio Interface Control Register 0 */
+#define SP_AUD_INTERFACE_CTRL0_REG	0x5f
+#define SP_AUD_INTERFACE_DISABLE	0x80
+
+/* Audio Interface Control Register 2 */
+#define SP_AUD_INTERFACE_CTRL2_REG	0x60
+#define SP_M_AUD_ADJUST_ST		0x04
+
+/* Audio Interface Control Register 3 */
+#define SP_AUD_INTERFACE_CTRL3_REG	0x62
+
+/* Audio Interface Control Register 4 */
+#define SP_AUD_INTERFACE_CTRL4_REG	0x67
+
+/* Audio Interface Control Register 5 */
+#define SP_AUD_INTERFACE_CTRL5_REG	0x68
+
+/* Audio Interface Control Register 6 */
+#define SP_AUD_INTERFACE_CTRL6_REG	0x69
+
+/* Firmware Version Register */
+#define SP_FW_VER_REG			0xb7
+
+#endif
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
new file mode 100644
index 0000000..fe40bab
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#include <linux/regmap.h>
+
+#include <drm/drm.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "analogix-i2c-dptx.h"
+
+#define AUX_WAIT_TIMEOUT_MS	15
+#define AUX_CH_BUFFER_SIZE	16
+
+static int anx_i2c_dp_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+	return regmap_update_bits(map, reg, mask, 0);
+}
+
+static bool anx_dp_aux_op_finished(struct regmap *map_dptx)
+{
+	unsigned int value;
+	int err;
+
+	err = regmap_read(map_dptx, SP_DP_AUX_CH_CTRL2_REG, &value);
+	if (err < 0)
+		return false;
+
+	return (value & SP_AUX_EN) == 0;
+}
+
+static int anx_dp_aux_wait(struct regmap *map_dptx)
+{
+	unsigned long timeout;
+	unsigned int status;
+	int err;
+
+	timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+	while (!anx_dp_aux_op_finished(map_dptx)) {
+		if (time_after(jiffies, timeout)) {
+			if (!anx_dp_aux_op_finished(map_dptx)) {
+				DRM_ERROR("Timed out waiting AUX to finish\n");
+				return -ETIMEDOUT;
+			}
+
+			break;
+		}
+
+		usleep_range(1000, 2000);
+	}
+
+	/* Read the AUX channel access status */
+	err = regmap_read(map_dptx, SP_AUX_CH_STATUS_REG, &status);
+	if (err < 0) {
+		DRM_ERROR("Failed to read from AUX channel: %d\n", err);
+		return err;
+	}
+
+	if (status & SP_AUX_STATUS) {
+		DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n",
+			  status);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int anx_dp_aux_address(struct regmap *map_dptx, unsigned int addr)
+{
+	int err;
+
+	err = regmap_write(map_dptx, SP_AUX_ADDR_7_0_REG, addr & 0xff);
+	if (err)
+		return err;
+
+	err = regmap_write(map_dptx, SP_AUX_ADDR_15_8_REG,
+			   (addr & 0xff00) >> 8);
+	if (err)
+		return err;
+
+	/*
+	 * DP AUX CH Address Register #2, only update bits[3:0]
+	 * [7:4] RESERVED
+	 * [3:0] AUX_ADDR[19:16], Register control AUX CH address.
+	 */
+	err = regmap_update_bits(map_dptx, SP_AUX_ADDR_19_16_REG,
+				 SP_AUX_ADDR_19_16_MASK,
+				 (addr & 0xf0000) >> 16);
+
+	if (err)
+		return err;
+
+	return 0;
+}
+
+ssize_t anx_dp_aux_transfer(struct regmap *map_dptx,
+				struct drm_dp_aux_msg *msg)
+{
+	u8 ctrl1 = msg->request;
+	u8 ctrl2 = SP_AUX_EN;
+	u8 *buffer = msg->buffer;
+	int err;
+
+	/* The DP AUX transmit and receive buffer has 16 bytes. */
+	if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE))
+		return -E2BIG;
+
+	/* Zero-sized messages specify address-only transactions. */
+	if (msg->size < 1)
+		ctrl2 |= SP_ADDR_ONLY;
+	else	/* For non-zero-sized set the length field. */
+		ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT;
+
+	if ((msg->size > 0) && ((msg->request & DP_AUX_I2C_READ) == 0)) {
+		/* When WRITE | MOT write values to data buffer */
+		err = regmap_bulk_write(map_dptx,
+					SP_DP_BUF_DATA0_REG, buffer,
+					msg->size);
+		if (err)
+			return err;
+	}
+
+	/* Write address and request */
+	err = anx_dp_aux_address(map_dptx, msg->address);
+	if (err)
+		return err;
+
+	err = regmap_write(map_dptx, SP_DP_AUX_CH_CTRL1_REG, ctrl1);
+	if (err)
+		return err;
+
+	/* Start transaction */
+	err = regmap_update_bits(map_dptx, SP_DP_AUX_CH_CTRL2_REG,
+				 SP_ADDR_ONLY | SP_AUX_EN, ctrl2);
+	if (err)
+		return err;
+
+	err = anx_dp_aux_wait(map_dptx);
+	if (err)
+		return err;
+
+	msg->reply = DP_AUX_I2C_REPLY_ACK;
+
+	if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) {
+		/* Read values from data buffer */
+		err = regmap_bulk_read(map_dptx,
+				       SP_DP_BUF_DATA0_REG, buffer,
+				       msg->size);
+		if (err)
+			return err;
+	}
+
+	err = anx_i2c_dp_clear_bits(map_dptx, SP_DP_AUX_CH_CTRL2_REG,
+				    SP_ADDR_ONLY);
+	if (err)
+		return err;
+
+	return msg->size;
+}
+EXPORT_SYMBOL_GPL(anx_dp_aux_transfer);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h
new file mode 100644
index 0000000..663c4be
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#ifndef _ANALOGIX_I2C_DPTX_H_
+#define _ANALOGIX_I2C_DPTX_H_
+
+/***************************************************************/
+/* Register definitions for TX_P0                              */
+/***************************************************************/
+
+/* HDCP Status Register */
+#define SP_TX_HDCP_STATUS_REG		0x00
+#define SP_AUTH_FAIL			BIT(5)
+#define SP_AUTHEN_PASS			BIT(1)
+
+/* HDCP Control Register 0 */
+#define SP_HDCP_CTRL0_REG		0x01
+#define SP_RX_REPEATER			BIT(6)
+#define SP_RE_AUTH			BIT(5)
+#define SP_SW_AUTH_OK			BIT(4)
+#define SP_HARD_AUTH_EN			BIT(3)
+#define SP_HDCP_ENC_EN			BIT(2)
+#define SP_BKSV_SRM_PASS		BIT(1)
+#define SP_KSVLIST_VLD			BIT(0)
+/* HDCP Function Enabled */
+#define SP_HDCP_FUNCTION_ENABLED	(BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* HDCP Receiver BSTATUS Register 0 */
+#define	SP_HDCP_RX_BSTATUS0_REG		0x1b
+/* HDCP Receiver BSTATUS Register 1 */
+#define	SP_HDCP_RX_BSTATUS1_REG		0x1c
+
+/* HDCP Embedded "Blue Screen" Content Registers */
+#define SP_HDCP_VID0_BLUE_SCREEN_REG	0x2c
+#define SP_HDCP_VID1_BLUE_SCREEN_REG	0x2d
+#define SP_HDCP_VID2_BLUE_SCREEN_REG	0x2e
+
+/* HDCP Wait R0 Timing Register */
+#define SP_HDCP_WAIT_R0_TIME_REG	0x40
+
+/* HDCP Link Integrity Check Timer Register */
+#define SP_HDCP_LINK_CHECK_TIMER_REG	0x41
+
+/* HDCP Repeater Ready Wait Timer Register */
+#define SP_HDCP_RPTR_RDY_WAIT_TIME_REG	0x42
+
+/* HDCP Auto Timer Register */
+#define SP_HDCP_AUTO_TIMER_REG		0x51
+
+/* HDCP Key Status Register */
+#define SP_HDCP_KEY_STATUS_REG		0x5e
+
+/* HDCP Key Command Register */
+#define SP_HDCP_KEY_COMMAND_REG		0x5f
+#define SP_DISABLE_SYNC_HDCP		BIT(2)
+
+/* OTP Memory Key Protection Registers */
+#define SP_OTP_KEY_PROTECT1_REG		0x60
+#define SP_OTP_KEY_PROTECT2_REG		0x61
+#define SP_OTP_KEY_PROTECT3_REG		0x62
+#define SP_OTP_PSW1			0xa2
+#define SP_OTP_PSW2			0x7e
+#define SP_OTP_PSW3			0xc6
+
+/* DP System Control Registers */
+#define SP_DP_SYSTEM_CTRL_BASE		(0x80 - 1)
+/* Bits for DP System Control Register 2 */
+#define SP_CHA_STA			BIT(2)
+/* Bits for DP System Control Register 3 */
+#define SP_HPD_STATUS			BIT(6)
+#define SP_HPD_FORCE			BIT(5)
+#define SP_HPD_CTRL			BIT(4)
+#define SP_STRM_VALID			BIT(2)
+#define SP_STRM_FORCE			BIT(1)
+#define SP_STRM_CTRL			BIT(0)
+/* Bits for DP System Control Register 4 */
+#define SP_ENHANCED_MODE		BIT(3)
+
+/* DP Video Control Register */
+#define SP_DP_VIDEO_CTRL_REG		0x84
+#define SP_COLOR_F_MASK			0x06
+#define SP_COLOR_F_SHIFT		1
+#define SP_BPC_MASK			0xe0
+#define SP_BPC_SHIFT			5
+#  define SP_BPC_6BITS			0x00
+#  define SP_BPC_8BITS			0x01
+#  define SP_BPC_10BITS			0x02
+#  define SP_BPC_12BITS			0x03
+
+/* DP Audio Control Register */
+#define SP_DP_AUDIO_CTRL_REG		0x87
+#define SP_AUD_EN			BIT(0)
+
+/* 10us Pulse Generate Timer Registers */
+#define SP_I2C_GEN_10US_TIMER0_REG	0x88
+#define SP_I2C_GEN_10US_TIMER1_REG	0x89
+
+/* Packet Send Control Register */
+#define SP_PACKET_SEND_CTRL_REG		0x90
+#define SP_AUD_IF_UP			BIT(7)
+#define SP_AVI_IF_UD			BIT(6)
+#define SP_MPEG_IF_UD			BIT(5)
+#define SP_SPD_IF_UD			BIT(4)
+#define SP_AUD_IF_EN			BIT(3)
+#define SP_AVI_IF_EN			BIT(2)
+#define SP_MPEG_IF_EN			BIT(1)
+#define SP_SPD_IF_EN			BIT(0)
+
+/* DP HDCP Control Register */
+#define SP_DP_HDCP_CTRL_REG		0x92
+#define SP_AUTO_EN			BIT(7)
+#define SP_AUTO_START			BIT(5)
+#define SP_LINK_POLLING			BIT(1)
+
+/* DP Main Link Bandwidth Setting Register */
+#define SP_DP_MAIN_LINK_BW_SET_REG	0xa0
+#define SP_LINK_BW_SET_MASK		0x1f
+#define SP_INITIAL_SLIM_M_AUD_SEL	BIT(5)
+
+/* DP Lane Count Setting Register */
+#define SP_DP_LANE_COUNT_SET_REG	0xa1
+
+/* DP Training Pattern Set Register */
+#define SP_DP_TRAINING_PATTERN_SET_REG	0xa2
+
+/* DP Lane 0 Link Training Control Register */
+#define SP_DP_LANE0_LT_CTRL_REG		0xa3
+#define SP_TX_SW_SET_MASK		0x1b
+#define SP_MAX_PRE_REACH		BIT(5)
+#define SP_MAX_DRIVE_REACH		BIT(4)
+#define SP_PRE_EMP_LEVEL1		BIT(3)
+#define SP_DRVIE_CURRENT_LEVEL1		BIT(0)
+
+/* DP Link Training Control Register */
+#define SP_DP_LT_CTRL_REG		0xa8
+#define SP_DP_LT_INPROGRESS		0x80
+#define SP_LT_ERROR_TYPE_MASK		0x70
+#  define SP_LT_NO_ERROR		0x00
+#  define SP_LT_AUX_WRITE_ERROR		0x01
+#  define SP_LT_MAX_DRIVE_REACHED	0x02
+#  define SP_LT_WRONG_LANE_COUNT_SET	0x03
+#  define SP_LT_LOOP_SAME_5_TIME	0x04
+#  define SP_LT_CR_FAIL_IN_EQ		0x05
+#  define SP_LT_EQ_LOOP_5_TIME		0x06
+#define SP_LT_EN			BIT(0)
+
+/* DP CEP Training Control Registers */
+#define SP_DP_CEP_TRAINING_CTRL0_REG	0xa9
+#define SP_DP_CEP_TRAINING_CTRL1_REG	0xaa
+
+/* DP Debug Register 1 */
+#define SP_DP_DEBUG1_REG		0xb0
+#define SP_DEBUG_PLL_LOCK		BIT(4)
+#define SP_POLLING_EN			BIT(1)
+
+/* DP Polling Control Register */
+#define SP_DP_POLLING_CTRL_REG		0xb4
+#define SP_AUTO_POLLING_DISABLE		BIT(0)
+
+/* DP Link Debug Control Register */
+#define SP_DP_LINK_DEBUG_CTRL_REG	0xb8
+#define SP_M_VID_DEBUG			BIT(5)
+#define SP_NEW_PRBS7			BIT(4)
+#define SP_INSERT_ER			BIT(1)
+#define SP_PRBS31_EN			BIT(0)
+
+/* AUX Misc control Register */
+#define SP_AUX_MISC_CTRL_REG		0xbf
+
+/* DP PLL control Register */
+#define SP_DP_PLL_CTRL_REG		0xc7
+#define SP_PLL_RST			BIT(6)
+
+/* DP Analog Power Down Register */
+#define SP_DP_ANALOG_POWER_DOWN_REG	0xc8
+#define SP_CH0_PD			BIT(0)
+
+/* DP Misc Control Register */
+#define SP_DP_MISC_CTRL_REG		0xcd
+#define SP_EQ_TRAINING_LOOP		BIT(6)
+
+/* DP Extra I2C Device Address Register */
+#define SP_DP_EXTRA_I2C_DEV_ADDR_REG	0xce
+#define SP_I2C_STRETCH_DISABLE		BIT(7)
+
+#define SP_I2C_EXTRA_ADDR		0x50
+
+/* DP Downspread Control Register 1 */
+#define SP_DP_DOWNSPREAD_CTRL1_REG	0xd0
+
+/* DP M Value Calculation Control Register */
+#define SP_DP_M_CALCULATION_CTRL_REG	0xd9
+#define SP_M_GEN_CLK_SEL		BIT(0)
+
+/* AUX Channel Access Status Register */
+#define SP_AUX_CH_STATUS_REG		0xe0
+#define SP_AUX_STATUS			0x0f
+
+/* AUX Channel DEFER Control Register */
+#define SP_AUX_DEFER_CTRL_REG		0xe2
+#define SP_DEFER_CTRL_EN		BIT(7)
+
+/* DP Buffer Data Count Register */
+#define SP_BUF_DATA_COUNT_REG		0xe4
+#define SP_BUF_DATA_COUNT_MASK		0x1f
+#define SP_BUF_CLR			BIT(7)
+
+/* DP AUX Channel Control Register 1 */
+#define SP_DP_AUX_CH_CTRL1_REG		0xe5
+#define SP_AUX_TX_COMM_MASK		0x0f
+#define SP_AUX_LENGTH_MASK		0xf0
+#define SP_AUX_LENGTH_SHIFT		4
+
+/* DP AUX CH Address Register 0 */
+#define SP_AUX_ADDR_7_0_REG		0xe6
+
+/* DP AUX CH Address Register 1 */
+#define SP_AUX_ADDR_15_8_REG		0xe7
+
+/* DP AUX CH Address Register 2 */
+#define SP_AUX_ADDR_19_16_REG		0xe8
+#define SP_AUX_ADDR_19_16_MASK		0x0f
+
+/* DP AUX Channel Control Register 2 */
+#define SP_DP_AUX_CH_CTRL2_REG		0xe9
+#define SP_AUX_SEL_RXCM			BIT(6)
+#define SP_AUX_CHSEL			BIT(3)
+#define SP_AUX_PN_INV			BIT(2)
+#define SP_ADDR_ONLY			BIT(1)
+#define SP_AUX_EN			BIT(0)
+
+/* DP Video Stream Control InfoFrame Register */
+#define SP_DP_3D_VSC_CTRL_REG		0xea
+#define SP_INFO_FRAME_VSC_EN		BIT(0)
+
+/* DP Video Stream Data Byte 1 Register */
+#define SP_DP_VSC_DB1_REG		0xeb
+
+/* DP AUX Channel Control Register 3 */
+#define SP_DP_AUX_CH_CTRL3_REG		0xec
+#define SP_WAIT_COUNTER_7_0_MASK	0xff
+
+/* DP AUX Channel Control Register 4 */
+#define SP_DP_AUX_CH_CTRL4_REG		0xed
+
+/* DP AUX Buffer Data Registers */
+#define SP_DP_BUF_DATA0_REG		0xf0
+
+ssize_t anx_dp_aux_transfer(struct regmap *map_dptx,
+				struct drm_dp_aux_msg *msg);
+
+#endif
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h b/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h
new file mode 100644
index 0000000..3c84349
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
+ */
+#ifndef _ANALOGIX_I2C_TXCOMMON_H_
+#define _ANALOGIX_I2C_TXCOMMON_H_
+
+/***************************************************************/
+/* Register definitions for TX_P2                              */
+/***************************************************************/
+
+/*
+ * Core Register Definitions
+ */
+
+/* Device ID Low Byte Register */
+#define SP_DEVICE_IDL_REG		0x02
+
+/* Device ID High Byte Register */
+#define SP_DEVICE_IDH_REG		0x03
+
+/* Device version register */
+#define SP_DEVICE_VERSION_REG		0x04
+
+/* Power Down Control Register */
+#define SP_POWERDOWN_CTRL_REG		0x05
+#define SP_REGISTER_PD			BIT(7)
+#define SP_HDCP_PD			BIT(5)
+#define SP_AUDIO_PD			BIT(4)
+#define SP_VIDEO_PD			BIT(3)
+#define SP_LINK_PD			BIT(2)
+#define SP_TOTAL_PD			BIT(1)
+
+/* Reset Control Register 1 */
+#define SP_RESET_CTRL1_REG		0x06
+#define SP_MISC_RST			BIT(7)
+#define SP_VIDCAP_RST			BIT(6)
+#define SP_VIDFIF_RST			BIT(5)
+#define SP_AUDFIF_RST			BIT(4)
+#define SP_AUDCAP_RST			BIT(3)
+#define SP_HDCP_RST			BIT(2)
+#define SP_SW_RST			BIT(1)
+#define SP_HW_RST			BIT(0)
+
+/* Reset Control Register 2 */
+#define SP_RESET_CTRL2_REG		0x07
+#define SP_AUX_RST			BIT(2)
+#define SP_SERDES_FIFO_RST		BIT(1)
+#define SP_I2C_REG_RST			BIT(0)
+
+/* Video Control Register 1 */
+#define SP_VID_CTRL1_REG		0x08
+#define SP_VIDEO_EN			BIT(7)
+#define SP_VIDEO_MUTE			BIT(2)
+#define SP_DE_GEN			BIT(1)
+#define SP_DEMUX			BIT(0)
+
+/* Video Control Register 2 */
+#define SP_VID_CTRL2_REG		0x09
+#define SP_IN_COLOR_F_MASK		0x03
+#define SP_IN_YC_BIT_SEL		BIT(2)
+#define SP_IN_BPC_MASK			0x70
+#define SP_IN_BPC_SHIFT			4
+#  define SP_IN_BPC_12BIT		0x03
+#  define SP_IN_BPC_10BIT		0x02
+#  define SP_IN_BPC_8BIT		0x01
+#  define SP_IN_BPC_6BIT		0x00
+#define SP_IN_D_RANGE			BIT(7)
+
+/* Video Control Register 3 */
+#define SP_VID_CTRL3_REG		0x0a
+#define SP_HPD_OUT			BIT(6)
+
+/* Video Control Register 5 */
+#define SP_VID_CTRL5_REG		0x0c
+#define SP_CSC_STD_SEL			BIT(7)
+#define SP_XVYCC_RNG_LMT		BIT(6)
+#define SP_RANGE_Y2R			BIT(5)
+#define SP_CSPACE_Y2R			BIT(4)
+#define SP_RGB_RNG_LMT			BIT(3)
+#define SP_Y_RNG_LMT			BIT(2)
+#define SP_RANGE_R2Y			BIT(1)
+#define SP_CSPACE_R2Y			BIT(0)
+
+/* Video Control Register 6 */
+#define SP_VID_CTRL6_REG		0x0d
+#define SP_TEST_PATTERN_EN		BIT(7)
+#define SP_VIDEO_PROCESS_EN		BIT(6)
+#define SP_VID_US_MODE			BIT(3)
+#define SP_VID_DS_MODE			BIT(2)
+#define SP_UP_SAMPLE			BIT(1)
+#define SP_DOWN_SAMPLE			BIT(0)
+
+/* Video Control Register 8 */
+#define SP_VID_CTRL8_REG		0x0f
+#define SP_VID_VRES_TH			BIT(0)
+
+/* Total Line Status Low Byte Register */
+#define SP_TOTAL_LINE_STAL_REG		0x24
+
+/* Total Line Status High Byte Register */
+#define SP_TOTAL_LINE_STAH_REG		0x25
+
+/* Active Line Status Low Byte Register */
+#define SP_ACT_LINE_STAL_REG		0x26
+
+/* Active Line Status High Byte Register */
+#define SP_ACT_LINE_STAH_REG		0x27
+
+/* Vertical Front Porch Status Register */
+#define SP_V_F_PORCH_STA_REG		0x28
+
+/* Vertical SYNC Width Status Register */
+#define SP_V_SYNC_STA_REG		0x29
+
+/* Vertical Back Porch Status Register */
+#define SP_V_B_PORCH_STA_REG		0x2a
+
+/* Total Pixel Status Low Byte Register */
+#define SP_TOTAL_PIXEL_STAL_REG		0x2b
+
+/* Total Pixel Status High Byte Register */
+#define SP_TOTAL_PIXEL_STAH_REG		0x2c
+
+/* Active Pixel Status Low Byte Register */
+#define SP_ACT_PIXEL_STAL_REG		0x2d
+
+/* Active Pixel Status High Byte Register */
+#define SP_ACT_PIXEL_STAH_REG		0x2e
+
+/* Horizontal Front Porch Status Low Byte Register */
+#define SP_H_F_PORCH_STAL_REG		0x2f
+
+/* Horizontal Front Porch Statys High Byte Register */
+#define SP_H_F_PORCH_STAH_REG		0x30
+
+/* Horizontal SYNC Width Status Low Byte Register */
+#define SP_H_SYNC_STAL_REG		0x31
+
+/* Horizontal SYNC Width Status High Byte Register */
+#define SP_H_SYNC_STAH_REG		0x32
+
+/* Horizontal Back Porch Status Low Byte Register */
+#define SP_H_B_PORCH_STAL_REG		0x33
+
+/* Horizontal Back Porch Status High Byte Register */
+#define SP_H_B_PORCH_STAH_REG		0x34
+
+/* InfoFrame AVI Packet DB1 Register */
+#define SP_INFOFRAME_AVI_DB1_REG	0x70
+
+/* Bit Control Specific Register */
+#define SP_BIT_CTRL_SPECIFIC_REG	0x80
+#define SP_BIT_CTRL_SELECT_SHIFT	1
+#define SP_ENABLE_BIT_CTRL		BIT(0)
+
+/* InfoFrame Audio Packet DB1 Register */
+#define SP_INFOFRAME_AUD_DB1_REG	0x83
+
+/* InfoFrame MPEG Packet DB1 Register */
+#define SP_INFOFRAME_MPEG_DB1_REG	0xb0
+
+/* Audio Channel Status Registers */
+#define SP_AUD_CH_STATUS_BASE		0xd0
+
+/* Audio Channel Num Register 5 */
+#define SP_I2S_CHANNEL_NUM_MASK		0xe0
+#  define SP_I2S_CH_NUM_1		(0x00 << 5)
+#  define SP_I2S_CH_NUM_2		(0x01 << 5)
+#  define SP_I2S_CH_NUM_3		(0x02 << 5)
+#  define SP_I2S_CH_NUM_4		(0x03 << 5)
+#  define SP_I2S_CH_NUM_5		(0x04 << 5)
+#  define SP_I2S_CH_NUM_6		(0x05 << 5)
+#  define SP_I2S_CH_NUM_7		(0x06 << 5)
+#  define SP_I2S_CH_NUM_8		(0x07 << 5)
+#define SP_EXT_VUCP			BIT(2)
+#define SP_VBIT				BIT(1)
+#define SP_AUDIO_LAYOUT			BIT(0)
+
+/* Analog Debug Register 1 */
+#define SP_ANALOG_DEBUG1_REG		0xdc
+
+/* Analog Debug Register 2 */
+#define SP_ANALOG_DEBUG2_REG		0xdd
+#define SP_FORCE_SW_OFF_BYPASS		0x20
+#define SP_XTAL_FRQ			0x1c
+#  define SP_XTAL_FRQ_19M2		(0x00 << 2)
+#  define SP_XTAL_FRQ_24M		(0x01 << 2)
+#  define SP_XTAL_FRQ_25M		(0x02 << 2)
+#  define SP_XTAL_FRQ_26M		(0x03 << 2)
+#  define SP_XTAL_FRQ_27M		(0x04 << 2)
+#  define SP_XTAL_FRQ_38M4		(0x05 << 2)
+#  define SP_XTAL_FRQ_52M		(0x06 << 2)
+#define SP_POWERON_TIME_1P5MS		0x03
+
+/* Analog Control 0 Register */
+#define SP_ANALOG_CTRL0_REG		0xe1
+
+/* Common Interrupt Status Register 1 */
+#define SP_COMMON_INT_STATUS_BASE	(0xf1 - 1)
+#define SP_PLL_LOCK_CHG			0x40
+
+/* Common Interrupt Status Register 2 */
+#define SP_COMMON_INT_STATUS2		0xf2
+#define SP_HDCP_AUTH_CHG		BIT(1)
+#define SP_HDCP_AUTH_DONE		BIT(0)
+
+#define SP_HDCP_LINK_CHECK_FAIL		BIT(0)
+
+/* Common Interrupt Status Register 4 */
+#define SP_COMMON_INT_STATUS4_REG	0xf4
+#define SP_HPD_IRQ			BIT(6)
+#define SP_HPD_ESYNC_ERR		BIT(4)
+#define SP_HPD_CHG			BIT(2)
+#define SP_HPD_LOST			BIT(1)
+#define SP_HPD_PLUG			BIT(0)
+
+/* DP Interrupt Status Register */
+#define SP_DP_INT_STATUS1_REG		0xf7
+#define SP_TRAINING_FINISH		BIT(5)
+#define SP_POLLING_ERR			BIT(4)
+
+/* Common Interrupt Mask Register */
+#define SP_COMMON_INT_MASK_BASE		(0xf8 - 1)
+
+#define SP_COMMON_INT_MASK4_REG		0xfb
+
+/* DP Interrupts Mask Register */
+#define SP_DP_INT_MASK1_REG		0xfe
+
+/* Interrupt Control Register */
+#define SP_INT_CTRL_REG			0xff
+
+#endif /* _ANALOGIX_I2C_TXCOMMON_H_ */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index bb411fe..6effe53 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1111,7 +1111,7 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
 	int ret, num_modes = 0;
 
 	if (dp->plat_data->panel) {
-		num_modes += drm_panel_get_modes(dp->plat_data->panel);
+		num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
 	} else {
 		ret = analogix_dp_prepare_panel(dp, true, false);
 		if (ret) {
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 3a5bd4e..b7c97f0 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -512,7 +512,7 @@ static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
 	struct cdns_dsi_output *output = &dsi->output;
 	unsigned int tmp;
 	bool sync_pulse = false;
-	int bpp, nlanes;
+	int bpp;
 
 	memset(dsi_cfg, 0, sizeof(*dsi_cfg));
 
@@ -520,7 +520,6 @@ static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
 		sync_pulse = true;
 
 	bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
-	nlanes = output->dev->lanes;
 
 	if (mode_valid_check)
 		tmp = mode->htotal -
@@ -785,13 +784,12 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
 	unsigned long tx_byte_period;
 	struct cdns_dsi_cfg dsi_cfg;
 	u32 tmp, reg_wakeup, div;
-	int bpp, nlanes;
+	int nlanes;
 
 	if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
 		return;
 
 	mode = &bridge->encoder->crtc->state->adjusted_mode;
-	bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
 	nlanes = output->dev->lanes;
 
 	WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
new file mode 100644
index 0000000..5f04cc1
--- /dev/null
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2019 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
+
+struct lvds_codec {
+	struct drm_bridge bridge;
+	struct drm_bridge *panel_bridge;
+	struct gpio_desc *powerdown_gpio;
+	u32 connector_type;
+};
+
+static int lvds_codec_attach(struct drm_bridge *bridge)
+{
+	struct lvds_codec *lvds_codec = container_of(bridge,
+						     struct lvds_codec, bridge);
+
+	return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+				 bridge);
+}
+
+static void lvds_codec_enable(struct drm_bridge *bridge)
+{
+	struct lvds_codec *lvds_codec = container_of(bridge,
+						     struct lvds_codec, bridge);
+
+	if (lvds_codec->powerdown_gpio)
+		gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
+}
+
+static void lvds_codec_disable(struct drm_bridge *bridge)
+{
+	struct lvds_codec *lvds_codec = container_of(bridge,
+						     struct lvds_codec, bridge);
+
+	if (lvds_codec->powerdown_gpio)
+		gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
+}
+
+static struct drm_bridge_funcs funcs = {
+	.attach = lvds_codec_attach,
+	.enable = lvds_codec_enable,
+	.disable = lvds_codec_disable,
+};
+
+static int lvds_codec_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *panel_node;
+	struct drm_panel *panel;
+	struct lvds_codec *lvds_codec;
+
+	lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
+	if (!lvds_codec)
+		return -ENOMEM;
+
+	lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
+	lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
+							     GPIOD_OUT_HIGH);
+	if (IS_ERR(lvds_codec->powerdown_gpio)) {
+		int err = PTR_ERR(lvds_codec->powerdown_gpio);
+
+		if (err != -EPROBE_DEFER)
+			dev_err(dev, "powerdown GPIO failure: %d\n", err);
+		return err;
+	}
+
+	/* Locate the panel DT node. */
+	panel_node = of_graph_get_remote_node(dev->of_node, 1, 0);
+	if (!panel_node) {
+		dev_dbg(dev, "panel DT node not found\n");
+		return -ENXIO;
+	}
+
+	panel = of_drm_find_panel(panel_node);
+	of_node_put(panel_node);
+	if (IS_ERR(panel)) {
+		dev_dbg(dev, "panel not found, deferring probe\n");
+		return PTR_ERR(panel);
+	}
+
+	lvds_codec->panel_bridge =
+		devm_drm_panel_bridge_add_typed(dev, panel,
+						lvds_codec->connector_type);
+	if (IS_ERR(lvds_codec->panel_bridge))
+		return PTR_ERR(lvds_codec->panel_bridge);
+
+	/*
+	 * The panel_bridge bridge is attached to the panel's of_node,
+	 * but we need a bridge attached to our of_node for our user
+	 * to look up.
+	 */
+	lvds_codec->bridge.of_node = dev->of_node;
+	lvds_codec->bridge.funcs = &funcs;
+	drm_bridge_add(&lvds_codec->bridge);
+
+	platform_set_drvdata(pdev, lvds_codec);
+
+	return 0;
+}
+
+static int lvds_codec_remove(struct platform_device *pdev)
+{
+	struct lvds_codec *lvds_codec = platform_get_drvdata(pdev);
+
+	drm_bridge_remove(&lvds_codec->bridge);
+
+	return 0;
+}
+
+static const struct of_device_id lvds_codec_match[] = {
+	{
+		.compatible = "lvds-decoder",
+		.data = (void *)DRM_MODE_CONNECTOR_DPI,
+	},
+	{
+		.compatible = "lvds-encoder",
+		.data = (void *)DRM_MODE_CONNECTOR_LVDS,
+	},
+	{
+		.compatible = "thine,thc63lvdm83d",
+		.data = (void *)DRM_MODE_CONNECTOR_LVDS,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, lvds_codec_match);
+
+static struct platform_driver lvds_codec_driver = {
+	.probe	= lvds_codec_probe,
+	.remove	= lvds_codec_remove,
+	.driver		= {
+		.name		= "lvds-codec",
+		.of_match_table	= lvds_codec_match,
+	},
+};
+module_platform_driver(lvds_codec_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("LVDS encoders and decoders");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
deleted file mode 100644
index e2132a8..0000000
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/platform_device.h>
-
-#include <drm/drm_bridge.h>
-#include <drm/drm_panel.h>
-
-struct lvds_encoder {
-	struct drm_bridge bridge;
-	struct drm_bridge *panel_bridge;
-	struct gpio_desc *powerdown_gpio;
-};
-
-static int lvds_encoder_attach(struct drm_bridge *bridge)
-{
-	struct lvds_encoder *lvds_encoder = container_of(bridge,
-							 struct lvds_encoder,
-							 bridge);
-
-	return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
-				 bridge);
-}
-
-static void lvds_encoder_enable(struct drm_bridge *bridge)
-{
-	struct lvds_encoder *lvds_encoder = container_of(bridge,
-							 struct lvds_encoder,
-							 bridge);
-
-	if (lvds_encoder->powerdown_gpio)
-		gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 0);
-}
-
-static void lvds_encoder_disable(struct drm_bridge *bridge)
-{
-	struct lvds_encoder *lvds_encoder = container_of(bridge,
-							 struct lvds_encoder,
-							 bridge);
-
-	if (lvds_encoder->powerdown_gpio)
-		gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 1);
-}
-
-static struct drm_bridge_funcs funcs = {
-	.attach = lvds_encoder_attach,
-	.enable = lvds_encoder_enable,
-	.disable = lvds_encoder_disable,
-};
-
-static int lvds_encoder_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *port;
-	struct device_node *endpoint;
-	struct device_node *panel_node;
-	struct drm_panel *panel;
-	struct lvds_encoder *lvds_encoder;
-
-	lvds_encoder = devm_kzalloc(dev, sizeof(*lvds_encoder), GFP_KERNEL);
-	if (!lvds_encoder)
-		return -ENOMEM;
-
-	lvds_encoder->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
-							       GPIOD_OUT_HIGH);
-	if (IS_ERR(lvds_encoder->powerdown_gpio)) {
-		int err = PTR_ERR(lvds_encoder->powerdown_gpio);
-
-		if (err != -EPROBE_DEFER)
-			dev_err(dev, "powerdown GPIO failure: %d\n", err);
-		return err;
-	}
-
-	/* Locate the panel DT node. */
-	port = of_graph_get_port_by_id(dev->of_node, 1);
-	if (!port) {
-		dev_dbg(dev, "port 1 not found\n");
-		return -ENXIO;
-	}
-
-	endpoint = of_get_child_by_name(port, "endpoint");
-	of_node_put(port);
-	if (!endpoint) {
-		dev_dbg(dev, "no endpoint for port 1\n");
-		return -ENXIO;
-	}
-
-	panel_node = of_graph_get_remote_port_parent(endpoint);
-	of_node_put(endpoint);
-	if (!panel_node) {
-		dev_dbg(dev, "no remote endpoint for port 1\n");
-		return -ENXIO;
-	}
-
-	panel = of_drm_find_panel(panel_node);
-	of_node_put(panel_node);
-	if (IS_ERR(panel)) {
-		dev_dbg(dev, "panel not found, deferring probe\n");
-		return PTR_ERR(panel);
-	}
-
-	lvds_encoder->panel_bridge =
-		devm_drm_panel_bridge_add_typed(dev, panel,
-						DRM_MODE_CONNECTOR_LVDS);
-	if (IS_ERR(lvds_encoder->panel_bridge))
-		return PTR_ERR(lvds_encoder->panel_bridge);
-
-	/* The panel_bridge bridge is attached to the panel's of_node,
-	 * but we need a bridge attached to our of_node for our user
-	 * to look up.
-	 */
-	lvds_encoder->bridge.of_node = dev->of_node;
-	lvds_encoder->bridge.funcs = &funcs;
-	drm_bridge_add(&lvds_encoder->bridge);
-
-	platform_set_drvdata(pdev, lvds_encoder);
-
-	return 0;
-}
-
-static int lvds_encoder_remove(struct platform_device *pdev)
-{
-	struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
-
-	drm_bridge_remove(&lvds_encoder->bridge);
-
-	return 0;
-}
-
-static const struct of_device_id lvds_encoder_match[] = {
-	{ .compatible = "lvds-encoder" },
-	{ .compatible = "thine,thc63lvdm83d" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, lvds_encoder_match);
-
-static struct platform_driver lvds_encoder_driver = {
-	.probe	= lvds_encoder_probe,
-	.remove	= lvds_encoder_remove,
-	.driver		= {
-		.name		= "lvds-encoder",
-		.of_match_table	= lvds_encoder_match,
-	},
-};
-module_platform_driver(lvds_encoder_driver);
-
-MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
-MODULE_DESCRIPTION("Transparent parallel to LVDS encoder");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index f4e293e..f66777e 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -37,7 +37,7 @@ static int panel_bridge_connector_get_modes(struct drm_connector *connector)
 	struct panel_bridge *panel_bridge =
 		drm_connector_to_panel_bridge(connector);
 
-	return drm_panel_get_modes(panel_bridge->panel);
+	return drm_panel_get_modes(panel_bridge->panel, connector);
 }
 
 static const struct drm_connector_helper_funcs
@@ -289,3 +289,21 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
 	return bridge;
 }
 EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
+
+/**
+ * drm_panel_bridge_connector - return the connector for the panel bridge
+ *
+ * drm_panel_bridge creates the connector.
+ * This function gives external access to the connector.
+ *
+ * Returns: Pointer to drm_connector
+ */
+struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge)
+{
+	struct panel_bridge *panel_bridge;
+
+	panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+	return &panel_bridge->connector;
+}
+EXPORT_SYMBOL(drm_panel_bridge_connector);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index b7a72df..10c47c00 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -461,7 +461,7 @@ static int ps8622_get_modes(struct drm_connector *connector)
 
 	ps8622 = connector_to_ps8622(connector);
 
-	return drm_panel_get_modes(ps8622->panel);
+	return drm_panel_get_modes(ps8622->panel, connector);
 }
 
 static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 2b75397..dd56996 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -291,7 +291,7 @@ static irqreturn_t snd_dw_hdmi_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static struct snd_pcm_hardware dw_hdmi_hw = {
+static const struct snd_pcm_hardware dw_hdmi_hw = {
 	.info = SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_BLOCK_TRANSFER |
 		SNDRV_PCM_INFO_MMAP |
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b6e793b..b18351b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -719,7 +719,15 @@ static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
 
 static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
 {
+	const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
+	struct dw_mipi_dsi_dphy_timing timing;
 	u32 hw_version;
+	int ret;
+
+	ret = phy_ops->get_timing(dsi->plat_data->priv_data,
+				  dsi->lane_mbps, &timing);
+	if (ret)
+		DRM_DEV_ERROR(dsi->dev, "Retrieving phy timings failed\n");
 
 	/*
 	 * TODO dw drv improvements
@@ -732,16 +740,20 @@ static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
 	hw_version = dsi_read(dsi, DSI_VERSION) & VERSION;
 
 	if (hw_version >= HWVER_131) {
-		dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME_V131(0x40) |
-			  PHY_LP2HS_TIME_V131(0x40));
+		dsi_write(dsi, DSI_PHY_TMR_CFG,
+			  PHY_HS2LP_TIME_V131(timing.data_hs2lp) |
+			  PHY_LP2HS_TIME_V131(timing.data_lp2hs));
 		dsi_write(dsi, DSI_PHY_TMR_RD_CFG, MAX_RD_TIME_V131(10000));
 	} else {
-		dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40) |
-			  PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
+		dsi_write(dsi, DSI_PHY_TMR_CFG,
+			  PHY_HS2LP_TIME(timing.data_hs2lp) |
+			  PHY_LP2HS_TIME(timing.data_lp2hs) |
+			  MAX_RD_TIME(10000));
 	}
 
-	dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
-		  | PHY_CLKLP2HS_TIME(0x40));
+	dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG,
+		  PHY_CLKHS2LP_TIME(timing.clk_hs2lp) |
+		  PHY_CLKLP2HS_TIME(timing.clk_lp2hs));
 }
 
 static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
@@ -798,9 +810,6 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
 	const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
 
-	if (phy_ops->power_off)
-		phy_ops->power_off(dsi->plat_data->priv_data);
-
 	/*
 	 * Switch to command mode before panel-bridge post_disable &
 	 * panel unprepare.
@@ -817,6 +826,9 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
 	 */
 	dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
 
+	if (phy_ops->power_off)
+		phy_ops->power_off(dsi->plat_data->priv_data);
+
 	if (dsi->slave) {
 		dw_mipi_dsi_disable(dsi->slave);
 		clk_disable_unprepare(dsi->slave->pclk);
@@ -883,6 +895,9 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
 
 	/* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
 	dw_mipi_dsi_set_mode(dsi, 0);
+
+	if (phy_ops->power_on)
+		phy_ops->power_on(dsi->plat_data->priv_data);
 }
 
 static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
@@ -899,15 +914,11 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
 static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
 {
 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
-	const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
 
 	/* Switch to video mode for panel-bridge enable & panel enable */
 	dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
 	if (dsi->slave)
 		dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
-
-	if (phy_ops->power_on)
-		phy_ops->power_on(dsi->plat_data->priv_data);
 }
 
 static enum drm_mode_status
@@ -991,7 +1002,8 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
 	dsi->dev = dev;
 	dsi->plat_data = plat_data;
 
-	if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps) {
+	if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps ||
+	    !plat_data->phy_ops->get_timing) {
 		DRM_ERROR("Phy not properly configured\n");
 		return ERR_PTR(-ENODEV);
 	}
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index db298f5..96207fc 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -282,7 +282,7 @@ static int tc358764_get_modes(struct drm_connector *connector)
 {
 	struct tc358764 *ctx = connector_to_tc358764(connector);
 
-	return drm_panel_get_modes(ctx->panel);
+	return drm_panel_get_modes(ctx->panel, connector);
 }
 
 static const
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8029478..3709e5a 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1346,7 +1346,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
 		return 0;
 	}
 
-	count = drm_panel_get_modes(tc->panel);
+	count = drm_panel_get_modes(tc->panel, connector);
 	if (count > 0)
 		return count;
 
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 43abf01..9a2dd98 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -206,7 +206,7 @@ static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
 {
 	struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector);
 
-	return drm_panel_get_modes(pdata->panel);
+	return drm_panel_get_modes(pdata->panel, connector);
 }
 
 static enum drm_mode_status
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 6e09f27..4c7ad46 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -212,7 +212,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 	if (!entry)
 		return -ENOMEM;
 
-	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+	pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
 	type = (u32) request->type;
 	memory = agp_allocate_memory(dev->agp->bridge, pages, type);
 	if (!memory) {
@@ -325,7 +325,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
 	entry = drm_agp_lookup_entry(dev, request->handle);
 	if (!entry || entry->bound)
 		return -EINVAL;
-	page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+	page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
 	retcode = drm_bind_agp(entry->memory, page);
 	if (retcode)
 		return retcode;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 14aeaf7..d336915 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -251,7 +251,7 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
  * @ref: This atomic state to deallocate
  *
  * This frees all memory associated with an atomic state, including all the
- * per-object state for planes, crtcs and connectors.
+ * per-object state for planes, CRTCs and connectors.
  */
 void __drm_atomic_state_free(struct kref *ref)
 {
@@ -272,12 +272,12 @@ void __drm_atomic_state_free(struct kref *ref)
 EXPORT_SYMBOL(__drm_atomic_state_free);
 
 /**
- * drm_atomic_get_crtc_state - get crtc state
+ * drm_atomic_get_crtc_state - get CRTC state
  * @state: global atomic state object
- * @crtc: crtc to get state object for
+ * @crtc: CRTC to get state object for
  *
- * This function returns the crtc state for the given crtc, allocating it if
- * needed. It will also grab the relevant crtc lock to make sure that the state
+ * This function returns the CRTC state for the given CRTC, allocating it if
+ * needed. It will also grab the relevant CRTC lock to make sure that the state
  * is consistent.
  *
  * Returns:
@@ -688,10 +688,12 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
  * associated state struct &drm_private_state.
  *
  * Similar to userspace-exposed objects, private state structures can be
- * acquired by calling drm_atomic_get_private_obj_state(). Since this function
- * does not take care of locking, drivers should wrap it for each type of
- * private state object they have with the required call to drm_modeset_lock()
- * for the corresponding &drm_modeset_lock.
+ * acquired by calling drm_atomic_get_private_obj_state(). This also takes care
+ * of locking, hence drivers should not have a need to call drm_modeset_lock()
+ * directly. Sequence of the actual hardware state commit is not handled,
+ * drivers might need to keep track of struct drm_crtc_commit within subclassed
+ * structure of &drm_private_state as necessary, e.g. similar to
+ * &drm_plane_state.commit. See also &drm_atomic_state.fake_commit.
  *
  * All private state structures contained in a &drm_atomic_state update can be
  * iterated using for_each_oldnew_private_obj_in_state(),
@@ -1016,14 +1018,14 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
 }
 
 /**
- * drm_atomic_add_affected_connectors - add connectors for crtc
+ * drm_atomic_add_affected_connectors - add connectors for CRTC
  * @state: atomic state
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
  *
  * This function walks the current configuration and adds all connectors
  * currently using @crtc to the atomic configuration @state. Note that this
  * function must acquire the connection mutex. This can potentially cause
- * unneeded seralization if the update is just for the planes on one crtc. Hence
+ * unneeded seralization if the update is just for the planes on one CRTC. Hence
  * drivers and helpers should only call this when really needed (e.g. when a
  * full modeset needs to happen due to some change).
  *
@@ -1076,9 +1078,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
 EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
 
 /**
- * drm_atomic_add_affected_planes - add planes for crtc
+ * drm_atomic_add_affected_planes - add planes for CRTC
  * @state: atomic state
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
  *
  * This function walks the current configuration and adds all planes
  * currently used by @crtc to the atomic configuration @state. This is useful
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index b191d39..4511c2e 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -150,8 +150,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
 	 * is not set, an error is returned. Userspace can provide a solution
 	 * through the atomic ioctl.
 	 *
-	 * If the flag is set conflicting connectors are removed from the crtc
-	 * and the crtc is disabled if no encoder is left. This preserves
+	 * If the flag is set conflicting connectors are removed from the CRTC
+	 * and the CRTC is disabled if no encoder is left. This preserves
 	 * compatibility with the legacy set_config behavior.
 	 */
 	drm_connector_list_iter_begin(state->dev, &conn_iter);
@@ -220,7 +220,7 @@ set_best_encoder(struct drm_atomic_state *state,
 		crtc = conn_state->connector->state->crtc;
 
 		/* A NULL crtc is an error here because we should have
-		 *  duplicated a NULL best_encoder when crtc was NULL.
+		 * duplicated a NULL best_encoder when crtc was NULL.
 		 * As an exception restoring duplicated atomic state
 		 * during resume is allowed, so don't warn when
 		 * best_encoder is equal to encoder we intend to set.
@@ -419,6 +419,7 @@ mode_fixup(struct drm_atomic_state *state)
 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
 		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
+		struct drm_bridge *bridge;
 
 		WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
 
@@ -435,8 +436,10 @@ mode_fixup(struct drm_atomic_state *state)
 		encoder = new_conn_state->best_encoder;
 		funcs = encoder->helper_private;
 
-		ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
-				&new_crtc_state->adjusted_mode);
+		bridge = drm_bridge_chain_get_first_bridge(encoder);
+		ret = drm_bridge_chain_mode_fixup(bridge,
+					&new_crtc_state->mode,
+					&new_crtc_state->adjusted_mode);
 		if (!ret) {
 			DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
 			return -EINVAL;
@@ -492,6 +495,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
 					    struct drm_crtc *crtc,
 					    const struct drm_display_mode *mode)
 {
+	struct drm_bridge *bridge;
 	enum drm_mode_status ret;
 
 	ret = drm_encoder_mode_valid(encoder, mode);
@@ -501,7 +505,8 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
 		return ret;
 	}
 
-	ret = drm_bridge_mode_valid(encoder->bridge, mode);
+	bridge = drm_bridge_chain_get_first_bridge(encoder);
+	ret = drm_bridge_chain_mode_valid(bridge, mode);
 	if (ret != MODE_OK) {
 		DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
 		return ret;
@@ -556,27 +561,27 @@ mode_valid(struct drm_atomic_state *state)
  * @state: the driver state object
  *
  * Check the state object to see if the requested state is physically possible.
- * This does all the crtc and connector related computations for an atomic
+ * This does all the CRTC and connector related computations for an atomic
  * update and adds any additional connectors needed for full modesets. It calls
  * the various per-object callbacks in the follow order:
  *
  * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
  * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
- * 3. If it's determined a modeset is needed then all connectors on the affected crtc
- *    crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
+ * 3. If it's determined a modeset is needed then all connectors on the affected
+ *    CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
  * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
  *    &drm_crtc_helper_funcs.mode_valid are called on the affected components.
  * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
  * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
- *    This function is only called when the encoder will be part of a configured crtc,
+ *    This function is only called when the encoder will be part of a configured CRTC,
  *    it must not be used for implementing connector property validation.
  *    If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
  *    instead.
- * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
+ * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
  *
  * &drm_crtc_state.mode_changed is set when the input mode is changed.
  * &drm_crtc_state.connectors_changed is set when a connector is added or
- * removed from the crtc.  &drm_crtc_state.active_changed is set when
+ * removed from the CRTC.  &drm_crtc_state.active_changed is set when
  * &drm_crtc_state.active changes, which is used for DPMS.
  * See also: drm_atomic_crtc_needs_modeset()
  *
@@ -687,7 +692,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 
 	/*
 	 * After all the routing has been prepared we need to add in any
-	 * connector which is itself unchanged, but whose crtc changes its
+	 * connector which is itself unchanged, but whose CRTC changes its
 	 * configuration. This must be done before calling mode_fixup in case a
 	 * crtc only changed its mode but has the same set of connectors.
 	 */
@@ -736,13 +741,13 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
 /**
  * drm_atomic_helper_check_plane_state() - Check plane state for validity
  * @plane_state: plane state to check
- * @crtc_state: crtc state to check
+ * @crtc_state: CRTC state to check
  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
  * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
  * @can_position: is it legal to position the plane such that it
- *                doesn't cover the entire crtc?  This will generally
+ *                doesn't cover the entire CRTC?  This will generally
  *                only be false for primary planes.
- * @can_update_disabled: can the plane be updated while the crtc
+ * @can_update_disabled: can the plane be updated while the CRTC
  *                       is disabled?
  *
  * Checks that a desired plane update is valid, and updates various
@@ -839,7 +844,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
  * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
  * hooks provided by the driver.
  *
- * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
+ * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
  * updated planes.
  *
  * RETURNS:
@@ -903,7 +908,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
  * @state: the driver state object
  *
  * Check the state object to see if the requested state is physically possible.
- * Only crtcs and planes have check callbacks, so for any additional (global)
+ * Only CRTCs and planes have check callbacks, so for any additional (global)
  * checking that a driver needs it can simply wrap that around this function.
  * Drivers without such needs can directly use this as their
  * &drm_mode_config_funcs.atomic_check callback.
@@ -956,14 +961,14 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
 		   struct drm_crtc_state *new_state)
 {
 	/*
-	 * No new_state means the crtc is off, so the only criteria is whether
+	 * No new_state means the CRTC is off, so the only criteria is whether
 	 * it's currently active or in self refresh mode.
 	 */
 	if (!new_state)
 		return drm_atomic_crtc_effectively_active(old_state);
 
 	/*
-	 * We need to run through the crtc_funcs->disable() function if the crtc
+	 * We need to run through the crtc_funcs->disable() function if the CRTC
 	 * is currently on, if it's transitioning to self refresh mode, or if
 	 * it's in self refresh mode and needs to be fully disabled.
 	 */
@@ -984,6 +989,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 	for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
 		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
+		struct drm_bridge *bridge;
 
 		/* Shut down everything that's in the changeset and currently
 		 * still on. So need to check the old, saved state. */
@@ -1020,7 +1026,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 		 * Each encoder has at most one connector (since we always steal
 		 * it away), so we won't call disable hooks twice.
 		 */
-		drm_atomic_bridge_disable(encoder->bridge, old_state);
+		bridge = drm_bridge_chain_get_first_bridge(encoder);
+		drm_atomic_bridge_chain_disable(bridge, old_state);
 
 		/* Right function depends upon target state. */
 		if (funcs) {
@@ -1034,7 +1041,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 				funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
 		}
 
-		drm_atomic_bridge_post_disable(encoder->bridge, old_state);
+		drm_atomic_bridge_chain_post_disable(bridge, old_state);
 	}
 
 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1080,7 +1087,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
  * @old_state: atomic state object with old state structures
  *
  * This function updates all the various legacy modeset state pointers in
- * connectors, encoders and crtcs. It also updates the timestamping constants
+ * connectors, encoders and CRTCs. It also updates the timestamping constants
  * used for precise vblank timestamps by calling
  * drm_calc_timestamping_constants().
  *
@@ -1188,6 +1195,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
 		struct drm_display_mode *mode, *adjusted_mode;
+		struct drm_bridge *bridge;
 
 		if (!new_conn_state->best_encoder)
 			continue;
@@ -1215,7 +1223,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 			funcs->mode_set(encoder, mode, adjusted_mode);
 		}
 
-		drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
+		bridge = drm_bridge_chain_get_first_bridge(encoder);
+		drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
 	}
 }
 
@@ -1227,7 +1236,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
  * This function shuts down all the outputs that need to be shut down and
  * prepares them (if required) with the new mode.
  *
- * For compatibility with legacy crtc helpers this should be called before
+ * For compatibility with legacy CRTC helpers this should be called before
  * drm_atomic_helper_commit_planes(), which is what the default commit function
  * does. But drivers with different needs can group the modeset commits together
  * and do the plane commits at the end. This is useful for drivers doing runtime
@@ -1273,7 +1282,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
  * This function enables all the outputs with the new configuration which had to
  * be turned off for the update.
  *
- * For compatibility with legacy crtc helpers this should be called after
+ * For compatibility with legacy CRTC helpers this should be called after
  * drm_atomic_helper_commit_planes(), which is what the default commit function
  * does. But drivers with different needs can group the modeset commits together
  * and do the plane commits at the end. This is useful for drivers doing runtime
@@ -1314,6 +1323,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
 		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
+		struct drm_bridge *bridge;
 
 		if (!new_conn_state->best_encoder)
 			continue;
@@ -1332,7 +1342,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 		 * Each encoder has at most one connector (since we always steal
 		 * it away), so we won't call enable hooks twice.
 		 */
-		drm_atomic_bridge_pre_enable(encoder->bridge, old_state);
+		bridge = drm_bridge_chain_get_first_bridge(encoder);
+		drm_atomic_bridge_chain_pre_enable(bridge, old_state);
 
 		if (funcs) {
 			if (funcs->atomic_enable)
@@ -1343,7 +1354,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 				funcs->commit(encoder);
 		}
 
-		drm_atomic_bridge_enable(encoder->bridge, old_state);
+		drm_atomic_bridge_chain_enable(bridge, old_state);
 	}
 
 	drm_atomic_helper_commit_writebacks(dev, old_state);
@@ -1403,12 +1414,12 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
 
 /**
- * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
+ * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
  * @dev: DRM device
  * @old_state: atomic state object with old state structures
  *
- * Helper to, after atomic commit, wait for vblanks on all effected
- * crtcs (ie. before cleaning up old framebuffers using
+ * Helper to, after atomic commit, wait for vblanks on all affected
+ * CRTCs (ie. before cleaning up old framebuffers using
  * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
  * framebuffers have actually changed to optimize for the legacy cursor and
  * plane update use-case.
@@ -1467,10 +1478,10 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
  * @dev: DRM device
  * @old_state: atomic state object with old state structures
  *
- * Helper to, after atomic commit, wait for page flips on all effected
+ * Helper to, after atomic commit, wait for page flips on all affected
  * crtcs (ie. before cleaning up old framebuffers using
  * drm_atomic_helper_cleanup_planes()). Compared to
- * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
+ * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
  * CRTCs, assuming that cursors-only updates are signalling their completion
  * immediately (or using a different path).
  *
@@ -1834,17 +1845,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
 /**
  * DOC: implementing nonblocking commit
  *
- * Nonblocking atomic commits have to be implemented in the following sequence:
+ * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
+ * different operations against each another. Locks, especially struct
+ * &drm_modeset_lock, should not be held in worker threads or any other
+ * asynchronous context used to commit the hardware state.
  *
- * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
- * which commit needs to call which can fail, so we want to run it first and
+ * drm_atomic_helper_commit() implements the recommended sequence for
+ * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
+ *
+ * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
+ * need to propagate out of memory/VRAM errors to userspace, it must be called
  * synchronously.
  *
  * 2. Synchronize with any outstanding nonblocking commit worker threads which
- * might be affected the new state update. This can be done by either cancelling
- * or flushing the work items, depending upon whether the driver can deal with
- * cancelled updates. Note that it is important to ensure that the framebuffer
- * cleanup is still done when cancelling.
+ * might be affected by the new state update. This is handled by
+ * drm_atomic_helper_setup_commit().
  *
  * Asynchronous workers need to have sufficient parallelism to be able to run
  * different atomic commits on different CRTCs in parallel. The simplest way to
@@ -1855,21 +1870,29 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
  * must be done as one global operation, and enabling or disabling a CRTC can
  * take a long time. But even that is not required.
  *
+ * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
+ * against all CRTCs therein. Therefore for atomic state updates which only flip
+ * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
+ * in its atomic check code: This would prevent committing of atomic updates to
+ * multiple CRTCs in parallel. In general, adding additional state structures
+ * should be avoided as much as possible, because this reduces parallelism in
+ * (nonblocking) commits, both due to locking and due to commit sequencing
+ * requirements.
+ *
  * 3. The software state is updated synchronously with
  * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
- * locks means concurrent callers never see inconsistent state. And doing this
- * while it's guaranteed that no relevant nonblocking worker runs means that
- * nonblocking workers do not need grab any locks. Actually they must not grab
- * locks, for otherwise the work flushing will deadlock.
+ * locks means concurrent callers never see inconsistent state. Note that commit
+ * workers do not hold any locks; their access is only coordinated through
+ * ordering. If workers would access state only through the pointers in the
+ * free-standing state objects (currently not the case for any driver) then even
+ * multiple pending commits could be in-flight at the same time.
  *
  * 4. Schedule a work item to do all subsequent steps, using the split-out
  * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
  * then cleaning up the framebuffers after the old framebuffer is no longer
- * being displayed.
- *
- * The above scheme is implemented in the atomic helper libraries in
- * drm_atomic_helper_commit() using a bunch of helper functions. See
- * drm_atomic_helper_setup_commit() for a starting point.
+ * being displayed. The scheduled work should synchronize against other workers
+ * using the &drm_crtc_commit infrastructure as needed. See
+ * drm_atomic_helper_setup_commit() for more details.
  */
 
 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
@@ -2098,7 +2121,7 @@ EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
  *
  * This function waits for all preceeding commits that touch the same CRTC as
  * @old_state to both be committed to the hardware (as signalled by
- * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
+ * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
  * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
  *
  * This is part of the atomic helper support for nonblocking commits, see
@@ -2185,7 +2208,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
  * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
  * @old_state: atomic state object with old state structures
  *
- * This function walks all CRTCs and fake VBLANK events on those with
+ * This function walks all CRTCs and fakes VBLANK events on those with
  * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
  * The primary use of this function is writeback connectors working in oneshot
  * mode and faking VBLANK events. In this case they only fake the VBLANK event
@@ -2381,7 +2404,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
  * @flags: flags for committing plane state
  *
  * This function commits the new plane state using the plane and atomic helper
- * functions for planes and crtcs. It assumes that the atomic state has already
+ * functions for planes and CRTCs. It assumes that the atomic state has already
  * been pushed into the relevant object state pointers, since this step can no
  * longer fail.
  *
@@ -2502,15 +2525,15 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
 
 /**
- * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
- * @old_crtc_state: atomic state object with the old crtc state
+ * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
+ * @old_crtc_state: atomic state object with the old CRTC state
  *
  * This function commits the new plane state using the plane and atomic helper
- * functions for planes on the specific crtc. It assumes that the atomic state
+ * functions for planes on the specific CRTC. It assumes that the atomic state
  * has already been pushed into the relevant object state pointers, since this
  * step can no longer fail.
  *
- * This function is useful when plane updates should be done crtc-by-crtc
+ * This function is useful when plane updates should be done CRTC-by-CRTC
  * instead of one global step like drm_atomic_helper_commit_planes() does.
  *
  * This function can only be savely used when planes are not allowed to move
@@ -2800,10 +2823,10 @@ EXPORT_SYMBOL(drm_atomic_helper_swap_state);
  * @plane: plane object to update
  * @crtc: owning CRTC of owning plane
  * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
+ * @crtc_x: x offset of primary plane on @crtc
+ * @crtc_y: y offset of primary plane on @crtc
+ * @crtc_w: width of primary plane rectangle on @crtc
+ * @crtc_h: height of primary plane rectangle on @crtc
  * @src_x: x offset of @fb for panning
  * @src_y: y offset of @fb for panning
  * @src_w: width of source rectangle in @fb
@@ -2909,7 +2932,7 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
  * @set: mode set configuration
  * @ctx: lock acquisition context
  *
- * Provides a default crtc set_config handler using the atomic driver interface.
+ * Provides a default CRTC set_config handler using the atomic driver interface.
  *
  * NOTE: For backwards compatibility with old userspace this automatically
  * resets the "link-status" property to GOOD, to force any link
@@ -3322,7 +3345,7 @@ static int page_flip_common(struct drm_atomic_state *state,
 
 /**
  * drm_atomic_helper_page_flip - execute a legacy page flip
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
  * @fb: DRM framebuffer
  * @event: optional DRM event to signal upon completion
  * @flags: flip flags for non-vblank sync'ed updates
@@ -3366,7 +3389,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
 
 /**
  * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
  * @fb: DRM framebuffer
  * @event: optional DRM event to signal upon completion
  * @flags: flip flags for non-vblank sync'ed updates
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index d0a937f..7cf3cf9 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -58,6 +58,22 @@
  */
 
 /**
+ * __drm_atomic_helper_crtc_state_reset - reset the CRTC state
+ * @crtc_state: atomic CRTC state, must not be NULL
+ * @crtc: CRTC object, must not be NULL
+ *
+ * Initializes the newly allocated @crtc_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
+ */
+void
+__drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *crtc_state,
+				     struct drm_crtc *crtc)
+{
+	crtc_state->crtc = crtc;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_state_reset);
+
+/**
  * __drm_atomic_helper_crtc_reset - reset state on CRTC
  * @crtc: drm CRTC
  * @crtc_state: CRTC state to assign
@@ -74,7 +90,7 @@ __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
 			       struct drm_crtc_state *crtc_state)
 {
 	if (crtc_state)
-		crtc_state->crtc = crtc;
+		__drm_atomic_helper_crtc_state_reset(crtc_state, crtc);
 
 	crtc->state = crtc_state;
 }
@@ -212,23 +228,43 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
 EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
 
 /**
- * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * __drm_atomic_helper_plane_state_reset - resets plane state to default values
+ * @plane_state: atomic plane state, must not be NULL
  * @plane: plane object, must not be NULL
- * @state: atomic plane state, must not be NULL
  *
- * Initializes plane state to default. This is useful for drivers that subclass
- * the plane state.
+ * Initializes the newly allocated @plane_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
+					   struct drm_plane *plane)
+{
+	plane_state->plane = plane;
+	plane_state->rotation = DRM_MODE_ROTATE_0;
+
+	plane_state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+	plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
+
+/**
+ * __drm_atomic_helper_plane_reset - reset state on plane
+ * @plane: drm plane
+ * @plane_state: plane state to assign
+ *
+ * Initializes the newly allocated @plane_state and assigns it to
+ * the &drm_crtc->state pointer of @plane, usually required when
+ * initializing the drivers or when called from the &drm_plane_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the plane state.
  */
 void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
-				     struct drm_plane_state *state)
+				     struct drm_plane_state *plane_state)
 {
-	state->plane = plane;
-	state->rotation = DRM_MODE_ROTATE_0;
+	if (plane_state)
+		__drm_atomic_helper_plane_state_reset(plane_state, plane);
 
-	state->alpha = DRM_BLEND_ALPHA_OPAQUE;
-	state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
-
-	plane->state = state;
+	plane->state = plane_state;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
 
@@ -336,6 +372,22 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
 EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
 
 /**
+ * __drm_atomic_helper_connector_state_reset - reset the connector state
+ * @conn_state: atomic connector state, must not be NULL
+ * @connector: connectotr object, must not be NULL
+ *
+ * Initializes the newly allocated @conn_state with default
+ * values. This is useful for drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
+					  struct drm_connector *connector)
+{
+	conn_state->connector = connector;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_state_reset);
+
+/**
  * __drm_atomic_helper_connector_reset - reset state on connector
  * @connector: drm connector
  * @conn_state: connector state to assign
@@ -352,7 +404,7 @@ __drm_atomic_helper_connector_reset(struct drm_connector *connector,
 				    struct drm_connector_state *conn_state)
 {
 	if (conn_state)
-		conn_state->connector = connector;
+		__drm_atomic_helper_connector_state_reset(conn_state, connector);
 
 	connector->state = conn_state;
 }
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 0d466d3..a1e5e26 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -160,12 +160,12 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
 
 /**
- * drm_atomic_set_crtc_for_plane - set crtc for plane
+ * drm_atomic_set_crtc_for_plane - set CRTC for plane
  * @plane_state: the plane whose incoming state to update
- * @crtc: crtc to use for the plane
+ * @crtc: CRTC to use for the plane
  *
- * Changing the assigned crtc for a plane requires us to grab the lock and state
- * for the new crtc, as needed. This function takes care of all these details
+ * Changing the assigned CRTC for a plane requires us to grab the lock and state
+ * for the new CRTC, as needed. This function takes care of all these details
  * besides updating the pointer in the state object itself.
  *
  * Returns:
@@ -279,12 +279,12 @@ drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
 
 /**
- * drm_atomic_set_crtc_for_connector - set crtc for connector
+ * drm_atomic_set_crtc_for_connector - set CRTC for connector
  * @conn_state: atomic state object for the connector
- * @crtc: crtc to use for the connector
+ * @crtc: CRTC to use for the connector
  *
- * Changing the assigned crtc for a connector requires us to grab the lock and
- * state for the new crtc, as needed. This function takes care of all these
+ * Changing the assigned CRTC for a connector requires us to grab the lock and
+ * state for the new CRTC, as needed. This function takes care of all these
  * details besides updating the pointer in the state object itself.
  *
  * Returns:
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index cba537c..c2cf0c9 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -55,7 +55,7 @@
  * just provide additional hooks to get the desired output at the end of the
  * encoder chain.
  *
- * Bridges can also be chained up using the &drm_bridge.next pointer.
+ * Bridges can also be chained up using the &drm_bridge.chain_node field.
  *
  * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
  */
@@ -128,20 +128,21 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
 	bridge->dev = encoder->dev;
 	bridge->encoder = encoder;
 
+	if (previous)
+		list_add(&bridge->chain_node, &previous->chain_node);
+	else
+		list_add(&bridge->chain_node, &encoder->bridge_chain);
+
 	if (bridge->funcs->attach) {
 		ret = bridge->funcs->attach(bridge);
 		if (ret < 0) {
+			list_del(&bridge->chain_node);
 			bridge->dev = NULL;
 			bridge->encoder = NULL;
 			return ret;
 		}
 	}
 
-	if (previous)
-		previous->next = bridge;
-	else
-		encoder->bridge = bridge;
-
 	return 0;
 }
 EXPORT_SYMBOL(drm_bridge_attach);
@@ -157,6 +158,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
 	if (bridge->funcs->detach)
 		bridge->funcs->detach(bridge);
 
+	list_del(&bridge->chain_node);
 	bridge->dev = NULL;
 }
 
@@ -172,8 +174,8 @@ void drm_bridge_detach(struct drm_bridge *bridge)
  */
 
 /**
- * drm_bridge_mode_fixup - fixup proposed mode for all bridges in the
- *			   encoder chain
+ * drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
+ *				 encoder chain
  * @bridge: bridge control structure
  * @mode: desired mode to be set for the bridge
  * @adjusted_mode: updated mode that works for this bridge
@@ -186,27 +188,31 @@ void drm_bridge_detach(struct drm_bridge *bridge)
  * RETURNS:
  * true on success, false on failure
  */
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
-			const struct drm_display_mode *mode,
-			struct drm_display_mode *adjusted_mode)
+bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
 {
-	bool ret = true;
+	struct drm_encoder *encoder;
 
 	if (!bridge)
 		return true;
 
-	if (bridge->funcs->mode_fixup)
-		ret = bridge->funcs->mode_fixup(bridge, mode, adjusted_mode);
+	encoder = bridge->encoder;
+	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+		if (!bridge->funcs->mode_fixup)
+			continue;
 
-	ret = ret && drm_bridge_mode_fixup(bridge->next, mode, adjusted_mode);
+		if (!bridge->funcs->mode_fixup(bridge, mode, adjusted_mode))
+			return false;
+	}
 
-	return ret;
+	return true;
 }
-EXPORT_SYMBOL(drm_bridge_mode_fixup);
+EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
 
 /**
- * drm_bridge_mode_valid - validate the mode against all bridges in the
- * 			   encoder chain.
+ * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
+ *				 encoder chain.
  * @bridge: bridge control structure
  * @mode: desired mode to be validated
  *
@@ -219,26 +225,33 @@ EXPORT_SYMBOL(drm_bridge_mode_fixup);
  * RETURNS:
  * MODE_OK on success, drm_mode_status Enum error code on failure
  */
-enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
-					   const struct drm_display_mode *mode)
+enum drm_mode_status
+drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+			    const struct drm_display_mode *mode)
 {
-	enum drm_mode_status ret = MODE_OK;
+	struct drm_encoder *encoder;
 
 	if (!bridge)
-		return ret;
+		return MODE_OK;
 
-	if (bridge->funcs->mode_valid)
+	encoder = bridge->encoder;
+	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+		enum drm_mode_status ret;
+
+		if (!bridge->funcs->mode_valid)
+			continue;
+
 		ret = bridge->funcs->mode_valid(bridge, mode);
+		if (ret != MODE_OK)
+			return ret;
+	}
 
-	if (ret != MODE_OK)
-		return ret;
-
-	return drm_bridge_mode_valid(bridge->next, mode);
+	return MODE_OK;
 }
-EXPORT_SYMBOL(drm_bridge_mode_valid);
+EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
 
 /**
- * drm_bridge_disable - disables all bridges in the encoder chain
+ * drm_bridge_chain_disable - disables all bridges in the encoder chain
  * @bridge: bridge control structure
  *
  * Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
@@ -247,20 +260,28 @@ EXPORT_SYMBOL(drm_bridge_mode_valid);
  *
  * Note: the bridge passed should be the one closest to the encoder
  */
-void drm_bridge_disable(struct drm_bridge *bridge)
+void drm_bridge_chain_disable(struct drm_bridge *bridge)
 {
+	struct drm_encoder *encoder;
+	struct drm_bridge *iter;
+
 	if (!bridge)
 		return;
 
-	drm_bridge_disable(bridge->next);
+	encoder = bridge->encoder;
+	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+		if (iter->funcs->disable)
+			iter->funcs->disable(iter);
 
-	if (bridge->funcs->disable)
-		bridge->funcs->disable(bridge);
+		if (iter == bridge)
+			break;
+	}
 }
-EXPORT_SYMBOL(drm_bridge_disable);
+EXPORT_SYMBOL(drm_bridge_chain_disable);
 
 /**
- * drm_bridge_post_disable - cleans up after disabling all bridges in the encoder chain
+ * drm_bridge_chain_post_disable - cleans up after disabling all bridges in the
+ *				   encoder chain
  * @bridge: bridge control structure
  *
  * Calls &drm_bridge_funcs.post_disable op for all the bridges in the
@@ -269,47 +290,53 @@ EXPORT_SYMBOL(drm_bridge_disable);
  *
  * Note: the bridge passed should be the one closest to the encoder
  */
-void drm_bridge_post_disable(struct drm_bridge *bridge)
+void drm_bridge_chain_post_disable(struct drm_bridge *bridge)
 {
+	struct drm_encoder *encoder;
+
 	if (!bridge)
 		return;
 
-	if (bridge->funcs->post_disable)
-		bridge->funcs->post_disable(bridge);
-
-	drm_bridge_post_disable(bridge->next);
+	encoder = bridge->encoder;
+	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+		if (bridge->funcs->post_disable)
+			bridge->funcs->post_disable(bridge);
+	}
 }
-EXPORT_SYMBOL(drm_bridge_post_disable);
+EXPORT_SYMBOL(drm_bridge_chain_post_disable);
 
 /**
- * drm_bridge_mode_set - set proposed mode for all bridges in the
- *			 encoder chain
+ * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
+ *			       encoder chain
  * @bridge: bridge control structure
- * @mode: desired mode to be set for the bridge
- * @adjusted_mode: updated mode that works for this bridge
+ * @mode: desired mode to be set for the encoder chain
+ * @adjusted_mode: updated mode that works for this encoder chain
  *
  * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
  * encoder chain, starting from the first bridge to the last.
  *
  * Note: the bridge passed should be the one closest to the encoder
  */
-void drm_bridge_mode_set(struct drm_bridge *bridge,
-			 const struct drm_display_mode *mode,
-			 const struct drm_display_mode *adjusted_mode)
+void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
+			       const struct drm_display_mode *mode,
+			       const struct drm_display_mode *adjusted_mode)
 {
+	struct drm_encoder *encoder;
+
 	if (!bridge)
 		return;
 
-	if (bridge->funcs->mode_set)
-		bridge->funcs->mode_set(bridge, mode, adjusted_mode);
-
-	drm_bridge_mode_set(bridge->next, mode, adjusted_mode);
+	encoder = bridge->encoder;
+	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+		if (bridge->funcs->mode_set)
+			bridge->funcs->mode_set(bridge, mode, adjusted_mode);
+	}
 }
-EXPORT_SYMBOL(drm_bridge_mode_set);
+EXPORT_SYMBOL(drm_bridge_chain_mode_set);
 
 /**
- * drm_bridge_pre_enable - prepares for enabling all
- *			   bridges in the encoder chain
+ * drm_bridge_chain_pre_enable - prepares for enabling all bridges in the
+ *				 encoder chain
  * @bridge: bridge control structure
  *
  * Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
@@ -318,20 +345,24 @@ EXPORT_SYMBOL(drm_bridge_mode_set);
  *
  * Note: the bridge passed should be the one closest to the encoder
  */
-void drm_bridge_pre_enable(struct drm_bridge *bridge)
+void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
 {
+	struct drm_encoder *encoder;
+	struct drm_bridge *iter;
+
 	if (!bridge)
 		return;
 
-	drm_bridge_pre_enable(bridge->next);
-
-	if (bridge->funcs->pre_enable)
-		bridge->funcs->pre_enable(bridge);
+	encoder = bridge->encoder;
+	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+		if (iter->funcs->pre_enable)
+			iter->funcs->pre_enable(iter);
+	}
 }
-EXPORT_SYMBOL(drm_bridge_pre_enable);
+EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
 
 /**
- * drm_bridge_enable - enables all bridges in the encoder chain
+ * drm_bridge_chain_enable - enables all bridges in the encoder chain
  * @bridge: bridge control structure
  *
  * Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
@@ -340,22 +371,25 @@ EXPORT_SYMBOL(drm_bridge_pre_enable);
  *
  * Note that the bridge passed should be the one closest to the encoder
  */
-void drm_bridge_enable(struct drm_bridge *bridge)
+void drm_bridge_chain_enable(struct drm_bridge *bridge)
 {
+	struct drm_encoder *encoder;
+
 	if (!bridge)
 		return;
 
-	if (bridge->funcs->enable)
-		bridge->funcs->enable(bridge);
-
-	drm_bridge_enable(bridge->next);
+	encoder = bridge->encoder;
+	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+		if (bridge->funcs->enable)
+			bridge->funcs->enable(bridge);
+	}
 }
-EXPORT_SYMBOL(drm_bridge_enable);
+EXPORT_SYMBOL(drm_bridge_chain_enable);
 
 /**
- * drm_atomic_bridge_disable - disables all bridges in the encoder chain
+ * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
  * @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
  *
  * Calls &drm_bridge_funcs.atomic_disable (falls back on
  * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
@@ -364,26 +398,33 @@ EXPORT_SYMBOL(drm_bridge_enable);
  *
  * Note: the bridge passed should be the one closest to the encoder
  */
-void drm_atomic_bridge_disable(struct drm_bridge *bridge,
-			       struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+				     struct drm_atomic_state *old_state)
 {
+	struct drm_encoder *encoder;
+	struct drm_bridge *iter;
+
 	if (!bridge)
 		return;
 
-	drm_atomic_bridge_disable(bridge->next, state);
+	encoder = bridge->encoder;
+	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+		if (iter->funcs->atomic_disable)
+			iter->funcs->atomic_disable(iter, old_state);
+		else if (iter->funcs->disable)
+			iter->funcs->disable(iter);
 
-	if (bridge->funcs->atomic_disable)
-		bridge->funcs->atomic_disable(bridge, state);
-	else if (bridge->funcs->disable)
-		bridge->funcs->disable(bridge);
+		if (iter == bridge)
+			break;
+	}
 }
-EXPORT_SYMBOL(drm_atomic_bridge_disable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
 
 /**
- * drm_atomic_bridge_post_disable - cleans up after disabling all bridges in the
- *				    encoder chain
+ * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
+ *					  in the encoder chain
  * @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
  *
  * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
  * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
@@ -392,26 +433,29 @@ EXPORT_SYMBOL(drm_atomic_bridge_disable);
  *
  * Note: the bridge passed should be the one closest to the encoder
  */
-void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
-				    struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+					  struct drm_atomic_state *old_state)
 {
+	struct drm_encoder *encoder;
+
 	if (!bridge)
 		return;
 
-	if (bridge->funcs->atomic_post_disable)
-		bridge->funcs->atomic_post_disable(bridge, state);
-	else if (bridge->funcs->post_disable)
-		bridge->funcs->post_disable(bridge);
-
-	drm_atomic_bridge_post_disable(bridge->next, state);
+	encoder = bridge->encoder;
+	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+		if (bridge->funcs->atomic_post_disable)
+			bridge->funcs->atomic_post_disable(bridge, old_state);
+		else if (bridge->funcs->post_disable)
+			bridge->funcs->post_disable(bridge);
+	}
 }
-EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
 
 /**
- * drm_atomic_bridge_pre_enable - prepares for enabling all bridges in the
- *				  encoder chain
+ * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
+ *					the encoder chain
  * @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
  *
  * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
  * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
@@ -420,25 +464,32 @@ EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
  *
  * Note: the bridge passed should be the one closest to the encoder
  */
-void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
-				  struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+					struct drm_atomic_state *old_state)
 {
+	struct drm_encoder *encoder;
+	struct drm_bridge *iter;
+
 	if (!bridge)
 		return;
 
-	drm_atomic_bridge_pre_enable(bridge->next, state);
+	encoder = bridge->encoder;
+	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+		if (iter->funcs->atomic_pre_enable)
+			iter->funcs->atomic_pre_enable(iter, old_state);
+		else if (iter->funcs->pre_enable)
+			iter->funcs->pre_enable(iter);
 
-	if (bridge->funcs->atomic_pre_enable)
-		bridge->funcs->atomic_pre_enable(bridge, state);
-	else if (bridge->funcs->pre_enable)
-		bridge->funcs->pre_enable(bridge);
+		if (iter == bridge)
+			break;
+	}
 }
-EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
 
 /**
- * drm_atomic_bridge_enable - enables all bridges in the encoder chain
+ * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
  * @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
  *
  * Calls &drm_bridge_funcs.atomic_enable (falls back on
  * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
@@ -447,20 +498,23 @@ EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
  *
  * Note: the bridge passed should be the one closest to the encoder
  */
-void drm_atomic_bridge_enable(struct drm_bridge *bridge,
-			      struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
+				    struct drm_atomic_state *old_state)
 {
+	struct drm_encoder *encoder;
+
 	if (!bridge)
 		return;
 
-	if (bridge->funcs->atomic_enable)
-		bridge->funcs->atomic_enable(bridge, state);
-	else if (bridge->funcs->enable)
-		bridge->funcs->enable(bridge);
-
-	drm_atomic_bridge_enable(bridge->next, state);
+	encoder = bridge->encoder;
+	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+		if (bridge->funcs->atomic_enable)
+			bridge->funcs->atomic_enable(bridge, old_state);
+		else if (bridge->funcs->enable)
+			bridge->funcs->enable(bridge);
+	}
 }
-EXPORT_SYMBOL(drm_atomic_bridge_enable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
 
 #ifdef CONFIG_OF
 /**
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index d9a2e36..b031b45 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -150,7 +150,7 @@ void drm_client_release(struct drm_client_dev *client)
 {
 	struct drm_device *dev = client->dev;
 
-	DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name);
+	drm_dbg_kms(dev, "%s\n", client->name);
 
 	drm_client_modeset_free(client);
 	drm_client_close(client);
@@ -203,7 +203,7 @@ void drm_client_dev_hotplug(struct drm_device *dev)
 			continue;
 
 		ret = client->funcs->hotplug(client);
-		DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+		drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
 	}
 	mutex_unlock(&dev->clientlist_mutex);
 }
@@ -223,7 +223,7 @@ void drm_client_dev_restore(struct drm_device *dev)
 			continue;
 
 		ret = client->funcs->restore(client);
-		DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+		drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
 		if (!ret) /* The first one to return zero gets the privilege to restore */
 			break;
 	}
@@ -351,8 +351,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
 
 	ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
 	if (ret)
-		DRM_DEV_ERROR(buffer->client->dev->dev,
-			      "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+		drm_err(buffer->client->dev,
+			"Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
 
 	buffer->fb = NULL;
 }
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 895b73f..6d4a29e 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -115,6 +115,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
 }
 
 static struct drm_display_mode *
+drm_connector_get_tiled_mode(struct drm_connector *connector)
+{
+	struct drm_display_mode *mode;
+
+	list_for_each_entry(mode, &connector->modes, head) {
+		if (mode->hdisplay == connector->tile_h_size &&
+		    mode->vdisplay == connector->tile_v_size)
+			return mode;
+	}
+	return NULL;
+}
+
+static struct drm_display_mode *
+drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
+{
+	struct drm_display_mode *mode;
+
+	list_for_each_entry(mode, &connector->modes, head) {
+		if (mode->hdisplay == connector->tile_h_size &&
+		    mode->vdisplay == connector->tile_v_size)
+			continue;
+		return mode;
+	}
+	return NULL;
+}
+
+static struct drm_display_mode *
 drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
 {
 	struct drm_display_mode *mode;
@@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
 	struct drm_connector *connector;
 	u64 conn_configured = 0;
 	int tile_pass = 0;
+	int num_tiled_conns = 0;
 	int i;
 
+	for (i = 0; i < connector_count; i++) {
+		if (connectors[i]->has_tile &&
+		    connectors[i]->status == connector_status_connected)
+			num_tiled_conns++;
+	}
+
 retry:
 	for (i = 0; i < connector_count; i++) {
 		connector = connectors[i];
@@ -399,6 +433,28 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
 			list_for_each_entry(modes[i], &connector->modes, head)
 				break;
 		}
+		/*
+		 * In case of tiled mode if all tiles not present fallback to
+		 * first available non tiled mode.
+		 * After all tiles are present, try to find the tiled mode
+		 * for all and if tiled mode not present due to fbcon size
+		 * limitations, use first non tiled mode only for
+		 * tile 0,0 and set to no mode for all other tiles.
+		 */
+		if (connector->has_tile) {
+			if (num_tiled_conns <
+			    connector->num_h_tile * connector->num_v_tile ||
+			    (connector->tile_h_loc == 0 &&
+			     connector->tile_v_loc == 0 &&
+			     !drm_connector_get_tiled_mode(connector))) {
+				DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
+					      connector->base.id);
+				modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+			} else {
+				modes[i] = drm_connector_get_tiled_mode(connector);
+			}
+		}
+
 		DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
 			  "none");
 		conn_configured |= BIT_ULL(i);
@@ -515,6 +571,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
 	bool fallback = true, ret = true;
 	int num_connectors_enabled = 0;
 	int num_connectors_detected = 0;
+	int num_tiled_conns = 0;
 	struct drm_modeset_acquire_ctx ctx;
 
 	if (!drm_drv_uses_atomic_modeset(dev))
@@ -532,6 +589,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
 	memcpy(save_enabled, enabled, count);
 	mask = GENMASK(count - 1, 0);
 	conn_configured = 0;
+	for (i = 0; i < count; i++) {
+		if (connectors[i]->has_tile &&
+		    connectors[i]->status == connector_status_connected)
+			num_tiled_conns++;
+	}
 retry:
 	conn_seq = conn_configured;
 	for (i = 0; i < count; i++) {
@@ -631,6 +693,16 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
 				      connector->name);
 			modes[i] = &connector->state->crtc->mode;
 		}
+		/*
+		 * In case of tiled modes, if all tiles are not present
+		 * then fallback to a non tiled mode.
+		 */
+		if (connector->has_tile &&
+		    num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
+			DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
+				      connector->base.id);
+			modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+		}
 		crtcs[i] = new_crtc;
 
 		DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 4ce5c6d..c93123f 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -109,28 +109,38 @@
  */
 
 /**
- * drm_color_lut_extract - clamp and round LUT entries
- * @user_input: input value
- * @bit_precision: number of bits the hw LUT supports
+ * drm_color_ctm_s31_32_to_qm_n
  *
- * Extract a degamma/gamma LUT value provided by user (in the form of
- * &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
+ * @user_input: input value
+ * @m: number of integer bits, only support m <= 32, include the sign-bit
+ * @n: number of fractional bits, only support n <= 32
+ *
+ * Convert and clamp S31.32 sign-magnitude to Qm.n (signed 2's complement).
+ * The sign-bit BIT(m+n-1) and above are 0 for positive value and 1 for negative
+ * the range of value is [-2^(m-1), 2^(m-1) - 2^-n]
+ *
+ * For example
+ * A Q3.12 format number:
+ * - required bit: 3 + 12 = 15bits
+ * - range: [-2^2, 2^2 - 2^−15]
+ *
+ * NOTE: the m can be zero if all bit_precision are used to present fractional
+ *       bits like Q0.32
  */
-uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision)
+u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n)
 {
-	uint32_t val = user_input;
-	uint32_t max = 0xffff >> (16 - bit_precision);
+	u64 mag = (user_input & ~BIT_ULL(63)) >> (32 - n);
+	bool negative = !!(user_input & BIT_ULL(63));
+	s64 val;
 
-	/* Round only if we're not using full precision. */
-	if (bit_precision < 16) {
-		val += 1UL << (16 - bit_precision - 1);
-		val >>= 16 - bit_precision;
-	}
+	WARN_ON(m > 32 || n > 32);
 
-	return clamp_val(val, 0, max);
+	val = clamp_val(mag, 0, negative ?
+				BIT_ULL(n + m - 1) : BIT_ULL(n + m - 1) - 1);
+
+	return negative ? -val : val;
 }
-EXPORT_SYMBOL(drm_color_lut_extract);
+EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n);
 
 /**
  * drm_crtc_enable_color_mgmt - enable color management properties
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 499b05a..93a4eec 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -48,6 +48,8 @@
 #include <drm/drm_print.h>
 #include <drm/drm_vblank.h>
 
+#include "drm_crtc_helper_internal.h"
+
 /**
  * DOC: overview
  *
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index ca3c55c..e22b812 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -140,8 +140,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
 	if (IS_ERR(source))
 		return PTR_ERR(source);
 
-	if (source[len] == '\n')
-		source[len] = '\0';
+	if (source[len - 1] == '\n')
+		source[len - 1] = '\0';
 
 	ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
 	if (ret)
@@ -258,6 +258,11 @@ static int crtc_crc_release(struct inode *inode, struct file *filep)
 	struct drm_crtc *crtc = filep->f_inode->i_private;
 	struct drm_crtc_crc *crc = &crtc->crc;
 
+	/* terminate the infinite while loop if 'drm_dp_aux_crc_work' running */
+	spin_lock_irq(&crc->lock);
+	crc->opened = false;
+	spin_unlock_irq(&crc->lock);
+
 	crtc->funcs->set_crc_source(crtc, NULL);
 
 	spin_lock_irq(&crc->lock);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 0cfb386..2510717 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -163,11 +163,7 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
 			break;
 		}
 
-		if (aux_dev->aux->is_remote)
-			res = drm_dp_mst_dpcd_read(aux_dev->aux, pos, buf,
-						   todo);
-		else
-			res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
+		res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
 
 		if (res <= 0)
 			break;
@@ -215,11 +211,7 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
 			break;
 		}
 
-		if (aux_dev->aux->is_remote)
-			res = drm_dp_mst_dpcd_write(aux_dev->aux, pos, buf,
-						    todo);
-		else
-			res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
+		res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
 
 		if (res <= 0)
 			break;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 2c7870a..a5364b5 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -32,6 +32,7 @@
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_print.h>
 #include <drm/drm_vblank.h>
+#include <drm/drm_dp_mst_helper.h>
 
 #include "drm_crtc_helper_internal.h"
 
@@ -266,7 +267,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
 
 /**
  * drm_dp_dpcd_read() - read a series of bytes from the DPCD
- * @aux: DisplayPort AUX channel
+ * @aux: DisplayPort AUX channel (SST or MST)
  * @offset: address of the (first) register to read
  * @buffer: buffer to store the register values
  * @size: number of bytes in @buffer
@@ -295,13 +296,18 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
 	 * We just have to do it before any DPCD access and hope that the
 	 * monitor doesn't power down exactly after the throw away read.
 	 */
-	ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
-				 1);
-	if (ret != 1)
-		goto out;
+	if (!aux->is_remote) {
+		ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV,
+					 buffer, 1);
+		if (ret != 1)
+			goto out;
+	}
 
-	ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
-				 size);
+	if (aux->is_remote)
+		ret = drm_dp_mst_dpcd_read(aux, offset, buffer, size);
+	else
+		ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
+					 buffer, size);
 
 out:
 	drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
@@ -311,7 +317,7 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
 
 /**
  * drm_dp_dpcd_write() - write a series of bytes to the DPCD
- * @aux: DisplayPort AUX channel
+ * @aux: DisplayPort AUX channel (SST or MST)
  * @offset: address of the (first) register to write
  * @buffer: buffer containing the values to write
  * @size: number of bytes in @buffer
@@ -328,8 +334,12 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
 {
 	int ret;
 
-	ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
-				 size);
+	if (aux->is_remote)
+		ret = drm_dp_mst_dpcd_write(aux, offset, buffer, size);
+	else
+		ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset,
+					 buffer, size);
+
 	drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
 	return ret;
 }
@@ -969,6 +979,19 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
 }
 
 /**
+ * drm_dp_remote_aux_init() - minimally initialise a remote aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Used for remote aux channel in general. Merely initialize the crc work
+ * struct.
+ */
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux)
+{
+	INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
+}
+EXPORT_SYMBOL(drm_dp_remote_aux_init);
+
+/**
  * drm_dp_aux_init() - minimally initialise an aux channel
  * @aux: DisplayPort AUX channel
  *
@@ -1155,6 +1178,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
 	{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) },
 	/* CH7511 seems to leave SINK_COUNT zeroed */
 	{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
+	/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
+	{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
 };
 
 #undef OUI
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e6afe4f..20cdaf3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -76,6 +76,11 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
 
 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 				    struct drm_dp_mst_branch *mstb);
+
+static void
+drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
+				   struct drm_dp_mst_branch *mstb);
+
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 					   struct drm_dp_mst_branch *mstb,
 					   struct drm_dp_mst_port *port);
@@ -517,8 +522,10 @@ drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
 			}
 
 			if (failed) {
-				for (i = 0; i < r->num_transactions; i++)
+				for (i = 0; i < r->num_transactions; i++) {
+					tx = &r->transactions[i];
 					kfree(tx->bytes);
+				}
 				return -ENOMEM;
 			}
 
@@ -846,6 +853,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband
 {
 	int idx = 1;
 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
 	idx++;
 	if (idx > raw->curlen)
 		goto fail_len;
@@ -950,6 +958,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
 	case DP_POWER_DOWN_PHY:
 	case DP_POWER_UP_PHY:
 		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
+	case DP_CLEAR_PAYLOAD_ID_TABLE:
+		return true; /* since there's nothing to parse */
 	default:
 		DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
 			  drm_dp_mst_req_type_str(msg->req_type));
@@ -1048,6 +1058,15 @@ static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
 	return 0;
 }
 
+static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+{
+	struct drm_dp_sideband_msg_req_body req;
+
+	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
+	drm_dp_encode_sideband_req(&req, msg);
+	return 0;
+}
+
 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
 {
 	struct drm_dp_sideband_msg_req_body req;
@@ -2175,6 +2194,7 @@ drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
 				struct drm_dp_mst_port *port)
 {
 	mutex_lock(&mgr->lock);
+	port->parent->num_ports--;
 	list_del(&port->next);
 	mutex_unlock(&mgr->lock);
 	drm_dp_mst_topology_put_port(port);
@@ -2199,6 +2219,9 @@ drm_dp_mst_add_port(struct drm_device *dev,
 	port->aux.dev = dev->dev;
 	port->aux.is_remote = true;
 
+	/* initialize the MST downstream port's AUX crc work queue */
+	drm_dp_remote_aux_init(&port->aux);
+
 	/*
 	 * Make sure the memory allocation for our parent branch stays
 	 * around until our own memory allocation is released
@@ -2275,6 +2298,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
 		mutex_lock(&mgr->lock);
 		drm_dp_mst_topology_get_port(port);
 		list_add(&port->next, &mstb->ports);
+		mstb->num_ports++;
 		mutex_unlock(&mgr->lock);
 	}
 
@@ -2564,10 +2588,14 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
 	struct drm_device *dev = mgr->dev;
 	struct drm_dp_mst_branch *mstb;
 	int ret;
+	bool clear_payload_id_table;
 
 	mutex_lock(&mgr->probe_lock);
 
 	mutex_lock(&mgr->lock);
+	clear_payload_id_table = !mgr->payload_id_table_cleared;
+	mgr->payload_id_table_cleared = true;
+
 	mstb = mgr->mst_primary;
 	if (mstb) {
 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
@@ -2580,6 +2608,19 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
 		return;
 	}
 
+	/*
+	 * Certain branch devices seem to incorrectly report an available_pbn
+	 * of 0 on downstream sinks, even after clearing the
+	 * DP_PAYLOAD_ALLOCATE_* registers in
+	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
+	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
+	 * things work again.
+	 */
+	if (clear_payload_id_table) {
+		DRM_DEBUG_KMS("Clearing payload ID table\n");
+		drm_dp_send_clear_payload_id_table(mgr, mstb);
+	}
+
 	ret = drm_dp_check_and_send_link_address(mgr, mstb);
 	drm_dp_mst_topology_put_mstb(mstb);
 
@@ -2906,6 +2947,28 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 	return ret < 0 ? ret : changed;
 }
 
+void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
+					struct drm_dp_mst_branch *mstb)
+{
+	struct drm_dp_sideband_msg_tx *txmsg;
+	int len, ret;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg)
+		return;
+
+	txmsg->dst = mstb;
+	len = build_clear_payload_id_table(txmsg);
+
+	drm_dp_queue_down_tx(mgr, txmsg);
+
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+		DRM_DEBUG_KMS("clear payload table id nak received\n");
+
+	kfree(txmsg);
+}
+
 static int
 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 				struct drm_dp_mst_branch *mstb,
@@ -2941,6 +3004,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 				      path_res->avail_payload_bw_number);
 			port->available_pbn =
 				path_res->avail_payload_bw_number;
+			port->fec_capable = path_res->fec_capable;
 		}
 	}
 
@@ -3435,6 +3499,7 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
 {
 	int ret = 0;
+	int i = 0;
 	struct drm_dp_mst_branch *mstb = NULL;
 
 	mutex_lock(&mgr->lock);
@@ -3495,10 +3560,23 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 		/* this can fail if the device is gone */
 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
 		ret = 0;
+		mutex_lock(&mgr->payload_lock);
 		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
 		mgr->payload_mask = 0;
 		set_bit(0, &mgr->payload_mask);
+		for (i = 0; i < mgr->max_payloads; i++) {
+			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+
+			if (vcpi) {
+				vcpi->vcpi = 0;
+				vcpi->num_slots = 0;
+			}
+			mgr->proposed_vcpis[i] = NULL;
+		}
 		mgr->vcpi_mask = 0;
+		mutex_unlock(&mgr->payload_lock);
+
+		mgr->payload_id_table_cleared = false;
 	}
 
 out_unlock:
@@ -4071,6 +4149,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
  * @mgr: MST topology manager for the port
  * @port: port to find vcpi slots for
  * @pbn: bandwidth required for the mode in PBN
+ * @pbn_div: divider for DSC mode that takes FEC into account
  *
  * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
  * may have had. Any atomic drivers which support MST must call this function
@@ -4097,11 +4176,12 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
  */
 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
 				  struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_port *port, int pbn)
+				  struct drm_dp_mst_port *port, int pbn,
+				  int pbn_div)
 {
 	struct drm_dp_mst_topology_state *topology_state;
 	struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
-	int prev_slots, req_slots;
+	int prev_slots, prev_bw, req_slots;
 
 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
 	if (IS_ERR(topology_state))
@@ -4112,6 +4192,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
 		if (pos->port == port) {
 			vcpi = pos;
 			prev_slots = vcpi->vcpi;
+			prev_bw = vcpi->pbn;
 
 			/*
 			 * This should never happen, unless the driver tries
@@ -4127,14 +4208,22 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
 			break;
 		}
 	}
-	if (!vcpi)
+	if (!vcpi) {
 		prev_slots = 0;
+		prev_bw = 0;
+	}
 
-	req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+	if (pbn_div <= 0)
+		pbn_div = mgr->pbn_div;
+
+	req_slots = DIV_ROUND_UP(pbn, pbn_div);
 
 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
 			 port->connector->base.id, port->connector->name,
 			 port, prev_slots, req_slots);
+	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
+			 port->connector->base.id, port->connector->name,
+			 port, prev_bw, pbn);
 
 	/* Add the new allocation to the state */
 	if (!vcpi) {
@@ -4147,6 +4236,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
 		list_add(&vcpi->next, &topology_state->vcpis);
 	}
 	vcpi->vcpi = req_slots;
+	vcpi->pbn = pbn;
 
 	return req_slots;
 }
@@ -4397,10 +4487,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
  * @clock: dot clock for the mode
  * @bpp: bpp for the mode.
+ * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
  *
  * This uses the formula in the spec to calculate the PBN value for a mode.
  */
-int drm_dp_calc_pbn_mode(int clock, int bpp)
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
 {
 	/*
 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -4411,7 +4502,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
 	 * peak_kbps *= (1006/1000)
 	 * peak_kbps *= (64/54)
 	 * peak_kbps *= 8    convert to bytes
+	 *
+	 * If the bpp is in units of 1/16, further divide by 16. Put this
+	 * factor in the numerator rather than the denominator to avoid
+	 * integer overflow
 	 */
+
+	if (dsc)
+		return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
+					8 * 54 * 1000 * 1000);
+
 	return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
 				8 * 54 * 1000 * 1000);
 }
@@ -4713,9 +4813,61 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
 	kfree(mst_state);
 }
 
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
+						 struct drm_dp_mst_branch *branch)
+{
+	while (port->parent) {
+		if (port->parent == branch)
+			return true;
+
+		if (port->parent->port_parent)
+			port = port->parent->port_parent;
+		else
+			break;
+	}
+	return false;
+}
+
+static inline
+int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
+				     struct drm_dp_mst_topology_state *mst_state)
+{
+	struct drm_dp_mst_port *port;
+	struct drm_dp_vcpi_allocation *vcpi;
+	int pbn_limit = 0, pbn_used = 0;
+
+	list_for_each_entry(port, &branch->ports, next) {
+		if (port->mstb)
+			if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
+				return -ENOSPC;
+
+		if (port->available_pbn > 0)
+			pbn_limit = port->available_pbn;
+	}
+	DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
+			 branch, pbn_limit);
+
+	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
+		if (!vcpi->pbn)
+			continue;
+
+		if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
+			pbn_used += vcpi->pbn;
+	}
+	DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
+			 branch, pbn_used);
+
+	if (pbn_used > pbn_limit) {
+		DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
+				 branch);
+		return -ENOSPC;
+	}
+	return 0;
+}
+
 static inline int
-drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
-				       struct drm_dp_mst_topology_state *mst_state)
+drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
+					 struct drm_dp_mst_topology_state *mst_state)
 {
 	struct drm_dp_vcpi_allocation *vcpi;
 	int avail_slots = 63, payload_count = 0;
@@ -4753,6 +4905,128 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
 }
 
 /**
+ * drm_dp_mst_add_affected_dsc_crtcs
+ * @state: Pointer to the new struct drm_dp_mst_topology_state
+ * @mgr: MST topology manager
+ *
+ * Whenever there is a change in mst topology
+ * DSC configuration would have to be recalculated
+ * therefore we need to trigger modeset on all affected
+ * CRTCs in that topology
+ *
+ * See also:
+ * drm_dp_mst_atomic_enable_dsc()
+ */
+int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct drm_dp_mst_topology_state *mst_state;
+	struct drm_dp_vcpi_allocation *pos;
+	struct drm_connector *connector;
+	struct drm_connector_state *conn_state;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+
+	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+
+	if (IS_ERR(mst_state))
+		return -EINVAL;
+
+	list_for_each_entry(pos, &mst_state->vcpis, next) {
+
+		connector = pos->port->connector;
+
+		if (!connector)
+			return -EINVAL;
+
+		conn_state = drm_atomic_get_connector_state(state, connector);
+
+		if (IS_ERR(conn_state))
+			return PTR_ERR(conn_state);
+
+		crtc = conn_state->crtc;
+
+		if (WARN_ON(!crtc))
+			return -EINVAL;
+
+		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
+			continue;
+
+		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
+
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
+				 mgr, crtc);
+
+		crtc_state->mode_changed = true;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
+
+/**
+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
+ * @state: Pointer to the new drm_atomic_state
+ * @port: Pointer to the affected MST Port
+ * @pbn: Newly recalculated bw required for link with DSC enabled
+ * @pbn_div: Divider to calculate correct number of pbn per slot
+ * @enable: Boolean flag to enable or disable DSC on the port
+ *
+ * This function enables DSC on the given Port
+ * by recalculating its vcpi from pbn provided
+ * and sets dsc_enable flag to keep track of which
+ * ports have DSC enabled
+ *
+ */
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+				 struct drm_dp_mst_port *port,
+				 int pbn, int pbn_div,
+				 bool enable)
+{
+	struct drm_dp_mst_topology_state *mst_state;
+	struct drm_dp_vcpi_allocation *pos;
+	bool found = false;
+	int vcpi = 0;
+
+	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
+
+	if (IS_ERR(mst_state))
+		return PTR_ERR(mst_state);
+
+	list_for_each_entry(pos, &mst_state->vcpis, next) {
+		if (pos->port == port) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
+				 port, mst_state);
+		return -EINVAL;
+	}
+
+	if (pos->dsc_enabled == enable) {
+		DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
+				 port, enable, pos->vcpi);
+		vcpi = pos->vcpi;
+	}
+
+	if (enable) {
+		vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
+		DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
+				 port, vcpi);
+		if (vcpi < 0)
+			return -EINVAL;
+	}
+
+	pos->dsc_enabled = enable;
+
+	return vcpi;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
+/**
  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
  * atomic update is valid
  * @state: Pointer to the new &struct drm_dp_mst_topology_state
@@ -4780,7 +5054,13 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
 	int i, ret = 0;
 
 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
-		ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
+		if (!mgr->mst_state)
+			continue;
+
+		ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
+		if (ret)
+			break;
+		ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
 		if (ret)
 			break;
 	}
@@ -5044,3 +5324,173 @@ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
 {
 	i2c_del_adapter(&aux->ddc);
 }
+
+/**
+ * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
+ * @port: The port to check
+ *
+ * A single physical MST hub object can be represented in the topology
+ * by multiple branches, with virtual ports between those branches.
+ *
+ * As of DP1.4, An MST hub with internal (virtual) ports must expose
+ * certain DPCD registers over those ports. See sections 2.6.1.1.1
+ * and 2.6.1.1.2 of Display Port specification v1.4 for details.
+ *
+ * May acquire mgr->lock
+ *
+ * Returns:
+ * true if the port is a virtual DP peer device, false otherwise
+ */
+static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
+{
+	struct drm_dp_mst_port *downstream_port;
+
+	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
+		return false;
+
+	/* Virtual DP Sink (Internal Display Panel) */
+	if (port->port_num >= 8)
+		return true;
+
+	/* DP-to-HDMI Protocol Converter */
+	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
+	    !port->mcs &&
+	    port->ldps)
+		return true;
+
+	/* DP-to-DP */
+	mutex_lock(&port->mgr->lock);
+	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+	    port->mstb &&
+	    port->mstb->num_ports == 2) {
+		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
+			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
+			    !downstream_port->input) {
+				mutex_unlock(&port->mgr->lock);
+				return true;
+			}
+		}
+	}
+	mutex_unlock(&port->mgr->lock);
+
+	return false;
+}
+
+/**
+ * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
+ * @port: The port to check. A leaf of the MST tree with an attached display.
+ *
+ * Depending on the situation, DSC may be enabled via the endpoint aux,
+ * the immediately upstream aux, or the connector's physical aux.
+ *
+ * This is both the correct aux to read DSC_CAPABILITY and the
+ * correct aux to write DSC_ENABLED.
+ *
+ * This operation can be expensive (up to four aux reads), so
+ * the caller should cache the return.
+ *
+ * Returns:
+ * NULL if DSC cannot be enabled on this port, otherwise the aux device
+ */
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
+{
+	struct drm_dp_mst_port *immediate_upstream_port;
+	struct drm_dp_mst_port *fec_port;
+	struct drm_dp_desc desc = { 0 };
+	u8 endpoint_fec;
+	u8 endpoint_dsc;
+
+	if (!port)
+		return NULL;
+
+	if (port->parent->port_parent)
+		immediate_upstream_port = port->parent->port_parent;
+	else
+		immediate_upstream_port = NULL;
+
+	fec_port = immediate_upstream_port;
+	while (fec_port) {
+		/*
+		 * Each physical link (i.e. not a virtual port) between the
+		 * output and the primary device must support FEC
+		 */
+		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
+		    !fec_port->fec_capable)
+			return NULL;
+
+		fec_port = fec_port->parent->port_parent;
+	}
+
+	/* DP-to-DP peer device */
+	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
+		u8 upstream_dsc;
+
+		if (drm_dp_dpcd_read(&port->aux,
+				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+			return NULL;
+		if (drm_dp_dpcd_read(&port->aux,
+				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+			return NULL;
+		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
+				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+			return NULL;
+
+		/* Enpoint decompression with DP-to-DP peer device */
+		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+		    (endpoint_fec & DP_FEC_CAPABLE) &&
+		    (upstream_dsc & 0x2) /* DSC passthrough */)
+			return &port->aux;
+
+		/* Virtual DPCD decompression with DP-to-DP peer device */
+		return &immediate_upstream_port->aux;
+	}
+
+	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
+	if (drm_dp_mst_is_virtual_dpcd(port))
+		return &port->aux;
+
+	/*
+	 * Synaptics quirk
+	 * Applies to ports for which:
+	 * - Physical aux has Synaptics OUI
+	 * - DPv1.4 or higher
+	 * - Port is on primary branch device
+	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
+	 */
+	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
+		return NULL;
+
+	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
+	    port->parent == port->mgr->mst_primary) {
+		u8 downstreamport;
+
+		if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
+				     &downstreamport, 1) < 0)
+			return NULL;
+
+		if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
+		   ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
+		     != DP_DWN_STRM_PORT_TYPE_ANALOG))
+			return port->mgr->aux;
+	}
+
+	/*
+	 * The check below verifies if the MST sink
+	 * connected to the GPU is capable of DSC -
+	 * therefore the endpoint needs to be
+	 * both DSC and FEC capable.
+	 */
+	if (drm_dp_dpcd_read(&port->aux,
+	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+		return NULL;
+	if (drm_dp_dpcd_read(&port->aux,
+	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+		return NULL;
+	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+	   (endpoint_fec & DP_FEC_CAPABLE))
+		return &port->aux;
+
+	return NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 1b9b40a..7c18a98 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -622,7 +622,8 @@ int drm_dev_init(struct drm_device *dev,
 		return -ENODEV;
 	}
 
-	BUG_ON(!parent);
+	if (WARN_ON(!parent))
+		return -EINVAL;
 
 	kref_init(&dev->ref);
 	dev->dev = get_device(parent);
@@ -725,7 +726,7 @@ int devm_drm_dev_init(struct device *parent,
 {
 	int ret;
 
-	if (WARN_ON(!parent || !driver->release))
+	if (WARN_ON(!driver->release))
 		return -EINVAL;
 
 	ret = drm_dev_init(dev, driver, parent);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 474ac04..99769d6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -710,14 +710,11 @@ static const struct minimode extra_modes[] = {
 };
 
 /*
- * Probably taken from CEA-861 spec.
- * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ * From CEA/CTA-861 spec.
  *
- * Index using the VIC.
+ * Do not access directly, instead always use cea_mode_for_vic().
  */
-static const struct drm_display_mode edid_cea_modes[] = {
-	/* 0 - dummy, VICs start at 1 */
-	{ },
+static const struct drm_display_mode edid_cea_modes_1[] = {
 	/* 1 - 640x480@60Hz 4:3 */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
 		   752, 800, 0, 480, 490, 492, 525, 0,
@@ -1381,6 +1378,149 @@ static const struct drm_display_mode edid_cea_modes[] = {
 };
 
 /*
+ * From CEA/CTA-861 spec.
+ *
+ * Do not access directly, instead always use cea_mode_for_vic().
+ */
+static const struct drm_display_mode edid_cea_modes_193[] = {
+	/* 193 - 5120x2160@120Hz 64:27 */
+	{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 5284,
+		   5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 194 - 7680x4320@24Hz 16:9 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
+		   10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 195 - 7680x4320@25Hz 16:9 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
+		   10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 196 - 7680x4320@30Hz 16:9 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
+		   8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 197 - 7680x4320@48Hz 16:9 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
+		   10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 198 - 7680x4320@50Hz 16:9 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
+		   10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 199 - 7680x4320@60Hz 16:9 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
+		   8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 200 - 7680x4320@100Hz 16:9 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
+		   9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 201 - 7680x4320@120Hz 16:9 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
+		   8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 202 - 7680x4320@24Hz 64:27 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
+		   10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 203 - 7680x4320@25Hz 64:27 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
+		   10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 204 - 7680x4320@30Hz 64:27 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
+		   8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 205 - 7680x4320@48Hz 64:27 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
+		   10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 206 - 7680x4320@50Hz 64:27 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
+		   10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 207 - 7680x4320@60Hz 64:27 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
+		   8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 208 - 7680x4320@100Hz 64:27 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
+		   9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 209 - 7680x4320@120Hz 64:27 */
+	{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
+		   8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 210 - 10240x4320@24Hz 64:27 */
+	{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 11732,
+		   11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 211 - 10240x4320@25Hz 64:27 */
+	{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 12732,
+		   12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 212 - 10240x4320@30Hz 64:27 */
+	{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 10528,
+		   10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 213 - 10240x4320@48Hz 64:27 */
+	{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 11732,
+		   11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 214 - 10240x4320@50Hz 64:27 */
+	{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 12732,
+		   12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 215 - 10240x4320@60Hz 64:27 */
+	{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 10528,
+		   10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 216 - 10240x4320@100Hz 64:27 */
+	{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 12432,
+		   12608, 13200, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 217 - 10240x4320@120Hz 64:27 */
+	{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 10528,
+		   10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 218 - 4096x2160@100Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4896,
+		   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+	/* 219 - 4096x2160@120Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4184,
+		   4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+};
+
+/*
  * HDMI 1.4 4k modes. Index using the VIC.
  */
 static const struct drm_display_mode edid_4k_modes[] = {
@@ -1391,25 +1531,25 @@ static const struct drm_display_mode edid_4k_modes[] = {
 		   3840, 4016, 4104, 4400, 0,
 		   2160, 2168, 2178, 2250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 30, },
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 2 - 3840x2160@25Hz */
 	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
 		   3840, 4896, 4984, 5280, 0,
 		   2160, 2168, 2178, 2250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 25, },
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 3 - 3840x2160@24Hz */
 	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
 		   3840, 5116, 5204, 5500, 0,
 		   2160, 2168, 2178, 2250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 24, },
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 4 - 4096x2160@24Hz (SMPTE) */
 	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
 		   4096, 5116, 5204, 5500, 0,
 		   2160, 2168, 2178, 2250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 24, },
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
 };
 
 /*** DDC fetch and block validation ***/
@@ -3071,6 +3211,30 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
 	return cea;
 }
 
+static const struct drm_display_mode *cea_mode_for_vic(u8 vic)
+{
+	BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
+	BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);
+
+	if (vic >= 1 && vic < 1 + ARRAY_SIZE(edid_cea_modes_1))
+		return &edid_cea_modes_1[vic - 1];
+	if (vic >= 193 && vic < 193 + ARRAY_SIZE(edid_cea_modes_193))
+		return &edid_cea_modes_193[vic - 193];
+	return NULL;
+}
+
+static u8 cea_num_vics(void)
+{
+	return 193 + ARRAY_SIZE(edid_cea_modes_193);
+}
+
+static u8 cea_next_vic(u8 vic)
+{
+	if (++vic == 1 + ARRAY_SIZE(edid_cea_modes_1))
+		vic = 193;
+	return vic;
+}
+
 /*
  * Calculate the alternate clock for the CEA mode
  * (60Hz vs. 59.94Hz etc.)
@@ -3108,14 +3272,14 @@ cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
 	 * get the other variants by simply increasing the
 	 * vertical front porch length.
 	 */
-	BUILD_BUG_ON(edid_cea_modes[8].vtotal != 262 ||
-		     edid_cea_modes[9].vtotal != 262 ||
-		     edid_cea_modes[12].vtotal != 262 ||
-		     edid_cea_modes[13].vtotal != 262 ||
-		     edid_cea_modes[23].vtotal != 312 ||
-		     edid_cea_modes[24].vtotal != 312 ||
-		     edid_cea_modes[27].vtotal != 312 ||
-		     edid_cea_modes[28].vtotal != 312);
+	BUILD_BUG_ON(cea_mode_for_vic(8)->vtotal != 262 ||
+		     cea_mode_for_vic(9)->vtotal != 262 ||
+		     cea_mode_for_vic(12)->vtotal != 262 ||
+		     cea_mode_for_vic(13)->vtotal != 262 ||
+		     cea_mode_for_vic(23)->vtotal != 312 ||
+		     cea_mode_for_vic(24)->vtotal != 312 ||
+		     cea_mode_for_vic(27)->vtotal != 312 ||
+		     cea_mode_for_vic(28)->vtotal != 312);
 
 	if (((vic == 8 || vic == 9 ||
 	      vic == 12 || vic == 13) && mode->vtotal < 263) ||
@@ -3143,8 +3307,8 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
 	if (to_match->picture_aspect_ratio)
 		match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
 
-	for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
-		struct drm_display_mode cea_mode = edid_cea_modes[vic];
+	for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
+		struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
 		unsigned int clock1, clock2;
 
 		/* Check both 60Hz and 59.94Hz */
@@ -3182,8 +3346,8 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
 	if (to_match->picture_aspect_ratio)
 		match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
 
-	for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
-		struct drm_display_mode cea_mode = edid_cea_modes[vic];
+	for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
+		struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
 		unsigned int clock1, clock2;
 
 		/* Check both 60Hz and 59.94Hz */
@@ -3206,28 +3370,31 @@ EXPORT_SYMBOL(drm_match_cea_mode);
 
 static bool drm_valid_cea_vic(u8 vic)
 {
-	return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
+	return cea_mode_for_vic(vic) != NULL;
 }
 
 static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
 {
-	return edid_cea_modes[video_code].picture_aspect_ratio;
+	const struct drm_display_mode *mode = cea_mode_for_vic(video_code);
+
+	if (mode)
+		return mode->picture_aspect_ratio;
+
+	return HDMI_PICTURE_ASPECT_NONE;
+}
+
+static enum hdmi_picture_aspect drm_get_hdmi_aspect_ratio(const u8 video_code)
+{
+	return edid_4k_modes[video_code].picture_aspect_ratio;
 }
 
 /*
  * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
  * specific block).
- *
- * It's almost like cea_mode_alternate_clock(), we just need to add an
- * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
- * one.
  */
 static unsigned int
 hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
 {
-	if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
-		return hdmi_mode->clock;
-
 	return cea_mode_alternate_clock(hdmi_mode);
 }
 
@@ -3240,6 +3407,9 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
 	if (!to_match->clock)
 		return 0;
 
+	if (to_match->picture_aspect_ratio)
+		match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
 	for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
 		const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
 		unsigned int clock1, clock2;
@@ -3275,6 +3445,9 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
 	if (!to_match->clock)
 		return 0;
 
+	if (to_match->picture_aspect_ratio)
+		match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
 	for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
 		const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
 		unsigned int clock1, clock2;
@@ -3319,7 +3492,7 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
 		unsigned int clock1, clock2;
 
 		if (drm_valid_cea_vic(vic)) {
-			cea_mode = &edid_cea_modes[vic];
+			cea_mode = cea_mode_for_vic(vic);
 			clock2 = cea_mode_alternate_clock(cea_mode);
 		} else {
 			vic = drm_match_hdmi_mode(mode);
@@ -3394,7 +3567,7 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
 	if (!drm_valid_cea_vic(vic))
 		return NULL;
 
-	newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
+	newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
 	if (!newmode)
 		return NULL;
 
@@ -3428,7 +3601,7 @@ static int do_y420vdb_modes(struct drm_connector *connector,
 		if (!drm_valid_cea_vic(vic))
 			continue;
 
-		newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
+		newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
 		if (!newmode)
 			break;
 		bitmap_set(hdmi->y420_vdb_modes, vic, 1);
@@ -3997,7 +4170,7 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
 	vic = drm_match_cea_mode_clock_tolerance(mode, 5);
 	if (drm_valid_cea_vic(vic)) {
 		type = "CEA";
-		cea_mode = &edid_cea_modes[vic];
+		cea_mode = cea_mode_for_vic(vic);
 		clock1 = cea_mode->clock;
 		clock2 = cea_mode_alternate_clock(cea_mode);
 	} else {
@@ -4279,12 +4452,12 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
 	cea = drm_find_cea_extension(edid);
 	if (!cea) {
 		DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
-		return -ENOENT;
+		return 0;
 	}
 
 	if (cea_revision(cea) < 3) {
 		DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
-		return -EOPNOTSUPP;
+		return 0;
 	}
 
 	if (cea_db_offsets(cea, &start, &end)) {
@@ -4340,12 +4513,12 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
 	cea = drm_find_cea_extension(edid);
 	if (!cea) {
 		DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
-		return -ENOENT;
+		return 0;
 	}
 
 	if (cea_revision(cea) < 3) {
 		DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
-		return -EOPNOTSUPP;
+		return 0;
 	}
 
 	if (cea_db_offsets(cea, &start, &end)) {
@@ -4573,7 +4746,7 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
 		if (scdc->supported) {
 			scdc->scrambling.supported = true;
 
-			/* Few sinks support scrambling for cloks < 340M */
+			/* Few sinks support scrambling for clocks < 340M */
 			if ((hf_vsdb[6] & 0x8))
 				scdc->scrambling.low_rates = true;
 		}
@@ -5222,6 +5395,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 					 const struct drm_display_mode *mode)
 {
 	enum hdmi_picture_aspect picture_aspect;
+	u8 vic, hdmi_vic;
 	int err;
 
 	if (!frame || !mode)
@@ -5234,7 +5408,8 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		frame->pixel_repeat = 1;
 
-	frame->video_code = drm_mode_cea_vic(connector, mode);
+	vic = drm_mode_cea_vic(connector, mode);
+	hdmi_vic = drm_mode_hdmi_vic(connector, mode);
 
 	frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
 
@@ -5248,11 +5423,15 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 
 	/*
 	 * Populate picture aspect ratio from either
-	 * user input (if specified) or from the CEA mode list.
+	 * user input (if specified) or from the CEA/HDMI mode lists.
 	 */
 	picture_aspect = mode->picture_aspect_ratio;
-	if (picture_aspect == HDMI_PICTURE_ASPECT_NONE)
-		picture_aspect = drm_get_cea_aspect_ratio(frame->video_code);
+	if (picture_aspect == HDMI_PICTURE_ASPECT_NONE) {
+		if (vic)
+			picture_aspect = drm_get_cea_aspect_ratio(vic);
+		else if (hdmi_vic)
+			picture_aspect = drm_get_hdmi_aspect_ratio(hdmi_vic);
+	}
 
 	/*
 	 * The infoframe can't convey anything but none, 4:3
@@ -5260,12 +5439,20 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 	 * we can only satisfy it by specifying the right VIC.
 	 */
 	if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) {
-		if (picture_aspect !=
-		    drm_get_cea_aspect_ratio(frame->video_code))
+		if (vic) {
+			if (picture_aspect != drm_get_cea_aspect_ratio(vic))
+				return -EINVAL;
+		} else if (hdmi_vic) {
+			if (picture_aspect != drm_get_hdmi_aspect_ratio(hdmi_vic))
+				return -EINVAL;
+		} else {
 			return -EINVAL;
+		}
+
 		picture_aspect = HDMI_PICTURE_ASPECT_NONE;
 	}
 
+	frame->video_code = vic;
 	frame->picture_aspect = picture_aspect;
 	frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
 	frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 80d88a55..e555281 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -140,6 +140,7 @@ int drm_encoder_init(struct drm_device *dev,
 		goto out_put;
 	}
 
+	INIT_LIST_HEAD(&encoder->bridge_chain);
 	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
 	encoder->index = dev->mode_config.num_encoder++;
 
@@ -160,22 +161,16 @@ EXPORT_SYMBOL(drm_encoder_init);
 void drm_encoder_cleanup(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
+	struct drm_bridge *bridge, *next;
 
 	/* Note that the encoder_list is considered to be static; should we
 	 * remove the drm_encoder at runtime we would have to decrement all
 	 * the indices on the drm_encoder after us in the encoder_list.
 	 */
 
-	if (encoder->bridge) {
-		struct drm_bridge *bridge = encoder->bridge;
-		struct drm_bridge *next;
-
-		while (bridge) {
-			next = bridge->next;
-			drm_bridge_detach(bridge);
-			bridge = next;
-		}
-	}
+	list_for_each_entry_safe(bridge, next, &encoder->bridge_chain,
+				 chain_node)
+		drm_bridge_detach(bridge);
 
 	drm_mode_object_unregister(dev, &encoder->base);
 	kfree(encoder->name);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c0b0f60..9801c03 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -9,6 +9,7 @@
  *  Copyright (C) 2012 Red Hat
  */
 
+#include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem_cma_helper.h>
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d8e8f39..4c7cbce 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -95,10 +95,6 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
  * It will automatically set up deferred I/O if the driver requires a shadow
  * buffer.
  *
- * For other drivers, setup fbdev emulation by calling
- * drm_fb_helper_fbdev_setup() and tear it down by calling
- * drm_fb_helper_fbdev_teardown().
- *
  * At runtime drivers should restore the fbdev console by using
  * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
  * They should also notify the fb helper code from updates to the output
@@ -195,6 +191,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
 {
 	struct drm_fb_helper *helper = info->par;
 	struct drm_client_dev *client = &helper->client;
+	struct drm_device *dev = helper->dev;
 	struct drm_crtc *crtc;
 	const struct drm_crtc_helper_funcs *funcs;
 	struct drm_mode_set *mode_set;
@@ -213,7 +210,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
 			continue;
 
 		if (!fb) {
-			DRM_ERROR("no fb to restore??\n");
+			drm_err(dev, "no fb to restore?\n");
 			continue;
 		}
 
@@ -567,8 +564,7 @@ EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
  * drm_fb_helper_fini - finialize a &struct drm_fb_helper
  * @fb_helper: driver-allocated fbdev helper, can be NULL
  *
- * This cleans up all remaining resources associated with @fb_helper. Must be
- * called after drm_fb_helper_unlink_fbi() was called.
+ * This cleans up all remaining resources associated with @fb_helper.
  */
 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 {
@@ -608,19 +604,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 }
 EXPORT_SYMBOL(drm_fb_helper_fini);
 
-/**
- * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * A wrapper around unlink_framebuffer implemented by fbdev core
- */
-void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
-{
-	if (fb_helper && fb_helper->fbdev)
-		unlink_framebuffer(fb_helper->fbdev);
-}
-EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
-
 static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
 {
 	struct drm_device *dev = fb_helper->dev;
@@ -1266,12 +1249,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 {
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_framebuffer *fb = fb_helper->fb;
+	struct drm_device *dev = fb_helper->dev;
 
 	if (in_dbg_master())
 		return -EINVAL;
 
 	if (var->pixclock != 0) {
-		DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+		drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
 		var->pixclock = 0;
 	}
 
@@ -1286,7 +1270,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 	if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
 	    var->xres > fb->width || var->yres > fb->height ||
 	    var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
-		DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
+		drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb "
 			  "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
 			  var->xres, var->yres, var->bits_per_pixel,
 			  var->xres_virtual, var->yres_virtual,
@@ -1318,7 +1302,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 	 * so reject all pixel format changing requests.
 	 */
 	if (!drm_fb_pixel_format_equal(var, &info->var)) {
-		DRM_DEBUG("fbdev emulation doesn't support changing the pixel format\n");
+		drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel format\n");
 		return -EINVAL;
 	}
 
@@ -1343,7 +1327,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
 		return -EBUSY;
 
 	if (var->pixclock != 0) {
-		DRM_ERROR("PIXEL CLOCK SET\n");
+		drm_err(fb_helper->dev, "PIXEL CLOCK SET\n");
 		return -EINVAL;
 	}
 
@@ -1453,6 +1437,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 					 int preferred_bpp)
 {
 	struct drm_client_dev *client = &fb_helper->client;
+	struct drm_device *dev = fb_helper->dev;
 	int ret = 0;
 	int crtc_count = 0;
 	struct drm_connector_list_iter conn_iter;
@@ -1516,7 +1501,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 		struct drm_plane *plane = crtc->primary;
 		int j;
 
-		DRM_DEBUG("test CRTC %u primary plane\n", drm_crtc_index(crtc));
+		drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc));
 
 		for (j = 0; j < plane->format_count; j++) {
 			const struct drm_format_info *fmt;
@@ -1549,7 +1534,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 		}
 	}
 	if (sizes.surface_depth != best_depth && best_depth) {
-		DRM_INFO("requested bpp %d, scaled depth down to %d",
+		drm_info(dev, "requested bpp %d, scaled depth down to %d",
 			 sizes.surface_bpp, best_depth);
 		sizes.surface_depth = best_depth;
 	}
@@ -1581,7 +1566,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 		for (j = 0; j < mode_set->num_connectors; j++) {
 			struct drm_connector *connector = mode_set->connectors[j];
 
-			if (connector->has_tile) {
+			if (connector->has_tile &&
+			    desired_mode->hdisplay == connector->tile_h_size &&
+			    desired_mode->vdisplay == connector->tile_v_size) {
 				lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
 				lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
 				/* cloning to multiple tiles is just crazy-talk, so: */
@@ -1597,7 +1584,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 	mutex_unlock(&client->modeset_mutex);
 
 	if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
-		DRM_INFO("Cannot find any crtc or sizes\n");
+		drm_info(dev, "Cannot find any crtc or sizes\n");
 
 		/* First time: disable all crtc's.. */
 		if (!fb_helper->deferred_setup)
@@ -1912,7 +1899,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 
 	drm_master_internal_release(fb_helper->dev);
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(fb_helper->dev, "\n");
 
 	drm_client_modeset_probe(&fb_helper->client, fb_helper->fb->width, fb_helper->fb->height);
 	drm_setup_crtcs_fb(fb_helper);
@@ -1925,108 +1912,6 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
 
 /**
- * drm_fb_helper_fbdev_setup() - Setup fbdev emulation
- * @dev: DRM device
- * @fb_helper: fbdev helper structure to set up
- * @funcs: fbdev helper functions
- * @preferred_bpp: Preferred bits per pixel for the device.
- *                 @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors (not used)
- *
- * This function sets up fbdev emulation and registers fbdev for access by
- * userspace. If all connectors are disconnected, setup is deferred to the next
- * time drm_fb_helper_hotplug_event() is called.
- * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback
- * function.
- *
- * Use drm_fb_helper_fbdev_teardown() to destroy the fbdev.
- *
- * See also: drm_fb_helper_initial_config(), drm_fbdev_generic_setup().
- *
- * Returns:
- * Zero on success or negative error code on failure.
- */
-int drm_fb_helper_fbdev_setup(struct drm_device *dev,
-			      struct drm_fb_helper *fb_helper,
-			      const struct drm_fb_helper_funcs *funcs,
-			      unsigned int preferred_bpp,
-			      unsigned int max_conn_count)
-{
-	int ret;
-
-	if (!preferred_bpp)
-		preferred_bpp = dev->mode_config.preferred_depth;
-	if (!preferred_bpp)
-		preferred_bpp = 32;
-
-	drm_fb_helper_prepare(dev, fb_helper, funcs);
-
-	ret = drm_fb_helper_init(dev, fb_helper, 0);
-	if (ret < 0) {
-		DRM_DEV_ERROR(dev->dev, "fbdev: Failed to initialize (ret=%d)\n", ret);
-		return ret;
-	}
-
-	if (!drm_drv_uses_atomic_modeset(dev))
-		drm_helper_disable_unused_functions(dev);
-
-	ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
-	if (ret < 0) {
-		DRM_DEV_ERROR(dev->dev, "fbdev: Failed to set configuration (ret=%d)\n", ret);
-		goto err_drm_fb_helper_fini;
-	}
-
-	return 0;
-
-err_drm_fb_helper_fini:
-	drm_fb_helper_fbdev_teardown(dev);
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_fb_helper_fbdev_setup);
-
-/**
- * drm_fb_helper_fbdev_teardown - Tear down fbdev emulation
- * @dev: DRM device
- *
- * This function unregisters fbdev if not already done and cleans up the
- * associated resources including the &drm_framebuffer.
- * The driver is responsible for freeing the &drm_fb_helper structure which is
- * stored in &drm_device->fb_helper. Do note that this pointer has been cleared
- * when this function returns.
- *
- * In order to support device removal/unplug while file handles are still open,
- * drm_fb_helper_unregister_fbi() should be called on device removal and
- * drm_fb_helper_fbdev_teardown() in the &drm_driver->release callback when
- * file handles are closed.
- */
-void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
-{
-	struct drm_fb_helper *fb_helper = dev->fb_helper;
-	struct fb_ops *fbops = NULL;
-
-	if (!fb_helper)
-		return;
-
-	/* Unregister if it hasn't been done already */
-	if (fb_helper->fbdev && fb_helper->fbdev->dev)
-		drm_fb_helper_unregister_fbi(fb_helper);
-
-	if (fb_helper->fbdev && fb_helper->fbdev->fbdefio) {
-		fb_deferred_io_cleanup(fb_helper->fbdev);
-		kfree(fb_helper->fbdev->fbdefio);
-		fbops = fb_helper->fbdev->fbops;
-	}
-
-	drm_fb_helper_fini(fb_helper);
-	kfree(fbops);
-
-	if (fb_helper->fb)
-		drm_framebuffer_remove(fb_helper->fb);
-}
-EXPORT_SYMBOL(drm_fb_helper_fbdev_teardown);
-
-/**
  * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
  * @dev: DRM device
  *
@@ -2079,7 +1964,6 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
 static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
 {
 	struct fb_info *fbi = fb_helper->fbdev;
-	struct fb_ops *fbops = NULL;
 	void *shadow = NULL;
 
 	if (!fb_helper->dev)
@@ -2088,15 +1972,11 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
 	if (fbi && fbi->fbdefio) {
 		fb_deferred_io_cleanup(fbi);
 		shadow = fbi->screen_buffer;
-		fbops = fbi->fbops;
 	}
 
 	drm_fb_helper_fini(fb_helper);
 
-	if (shadow) {
-		vfree(shadow);
-		kfree(fbops);
-	}
+	vfree(shadow);
 
 	drm_client_framebuffer_delete(fb_helper->buffer);
 }
@@ -2127,7 +2007,7 @@ static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 		return -ENODEV;
 }
 
-static struct fb_ops drm_fbdev_fb_ops = {
+static const struct fb_ops drm_fbdev_fb_ops = {
 	.owner		= THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_open	= drm_fbdev_fb_open,
@@ -2146,32 +2026,26 @@ static struct fb_deferred_io drm_fbdev_defio = {
 	.deferred_io	= drm_fb_helper_deferred_io,
 };
 
-/**
- * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper
- * @fb_helper: fbdev helper structure
- * @sizes: describes fbdev size and scanout surface size
- *
+/*
  * This function uses the client API to create a framebuffer backed by a dumb buffer.
  *
  * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
  * fb_copyarea, fb_imageblit.
- *
- * Returns:
- * Zero on success or negative error code on failure.
  */
-int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
-				struct drm_fb_helper_surface_size *sizes)
+static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+				       struct drm_fb_helper_surface_size *sizes)
 {
 	struct drm_client_dev *client = &fb_helper->client;
+	struct drm_device *dev = fb_helper->dev;
 	struct drm_client_buffer *buffer;
 	struct drm_framebuffer *fb;
 	struct fb_info *fbi;
 	u32 format;
 	void *vaddr;
 
-	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
-		      sizes->surface_width, sizes->surface_height,
-		      sizes->surface_bpp);
+	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
+		    sizes->surface_width, sizes->surface_height,
+		    sizes->surface_bpp);
 
 	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
@@ -2194,24 +2068,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
 	drm_fb_helper_fill_info(fbi, fb_helper, sizes);
 
 	if (drm_fbdev_use_shadow_fb(fb_helper)) {
-		struct fb_ops *fbops;
-		void *shadow;
-
-		/*
-		 * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per
-		 * instance version is necessary.
-		 */
-		fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
-		shadow = vzalloc(fbi->screen_size);
-		if (!fbops || !shadow) {
-			kfree(fbops);
-			vfree(shadow);
+		fbi->screen_buffer = vzalloc(fbi->screen_size);
+		if (!fbi->screen_buffer)
 			return -ENOMEM;
-		}
 
-		*fbops = *fbi->fbops;
-		fbi->fbops = fbops;
-		fbi->screen_buffer = shadow;
 		fbi->fbdefio = &drm_fbdev_defio;
 
 		fb_deferred_io_init(fbi);
@@ -2232,7 +2092,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
 
 	return 0;
 }
-EXPORT_SYMBOL(drm_fb_helper_generic_probe);
 
 static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
 	.fb_probe = drm_fb_helper_generic_probe,
@@ -2270,7 +2129,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
 		return drm_fb_helper_hotplug_event(dev->fb_helper);
 
 	if (!dev->mode_config.num_connector) {
-		DRM_DEV_DEBUG(dev->dev, "No connectors found, will not create framebuffer!\n");
+		drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
 		return 0;
 	}
 
@@ -2295,7 +2154,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
 	fb_helper->dev = NULL;
 	fb_helper->fbdev = NULL;
 
-	DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
+	drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
 
 	return ret;
 }
@@ -2314,8 +2173,7 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
  *                 @dev->mode_config.preferred_depth is used if this is zero.
  *
  * This function sets up generic fbdev emulation for drivers that supports
- * dumb buffers with a virtual address and that can be mmap'ed. If the driver
- * does not support these functions, it could use drm_fb_helper_fbdev_setup().
+ * dumb buffers with a virtual address and that can be mmap'ed.
  *
  * Restore, hotplug events and teardown are all taken care of. Drivers that do
  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -2353,7 +2211,7 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
 	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
 	if (ret) {
 		kfree(fb_helper);
-		DRM_DEV_ERROR(dev->dev, "Failed to register client: %d\n", ret);
+		drm_err(dev, "Failed to register client: %d\n", ret);
 		return ret;
 	}
 
@@ -2365,7 +2223,7 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
 
 	ret = drm_fbdev_client_hotplug(&fb_helper->client);
 	if (ret)
-		DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret);
+		drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
 
 	drm_client_register(&fb_helper->client);
 
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index ea34bc9..92d16724 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -31,7 +31,9 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/anon_inodes.h>
 #include <linux/dma-fence.h>
+#include <linux/file.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/poll.h>
@@ -285,7 +287,7 @@ static int drm_cpu_valid(void)
 }
 
 /*
- * Called whenever a process opens /dev/drm.
+ * Called whenever a process opens a drm node
  *
  * \param filp file pointer.
  * \param minor acquired minor-object.
@@ -754,3 +756,43 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_send_event);
+
+/**
+ * mock_drm_getfile - Create a new struct file for the drm device
+ * @minor: drm minor to wrap (e.g. #drm_device.primary)
+ * @flags: file creation mode (O_RDWR etc)
+ *
+ * This create a new struct file that wraps a DRM file context around a
+ * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
+ * invoking userspace. The struct file may be operated on using its f_op
+ * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
+ * to userspace facing functions as an internal/anonymous client.
+ *
+ * RETURNS:
+ * Pointer to newly created struct file, ERR_PTR on failure.
+ */
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
+{
+	struct drm_device *dev = minor->dev;
+	struct drm_file *priv;
+	struct file *file;
+
+	priv = drm_file_alloc(minor);
+	if (IS_ERR(priv))
+		return ERR_CAST(priv);
+
+	file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
+	if (IS_ERR(file)) {
+		drm_file_free(priv);
+		return file;
+	}
+
+	/* Everyone shares a single global address space */
+	file->f_mapping = dev->anon_inode->i_mapping;
+
+	drm_dev_get(dev);
+	priv->filp = file;
+
+	return file;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index c630064..b234bfa 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -253,17 +253,17 @@ const struct drm_format_info *__drm_format_info(u32 format)
 		  .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
 		  .hsub = 2, .vsub = 2, .is_yuv = true },
 		{ .format = DRM_FORMAT_P010,            .depth = 0,  .num_planes = 2,
-		  .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+		  .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
 		  .hsub = 2, .vsub = 2, .is_yuv = true},
 		{ .format = DRM_FORMAT_P012,		.depth = 0,  .num_planes = 2,
-		  .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+		  .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
 		   .hsub = 2, .vsub = 2, .is_yuv = true},
 		{ .format = DRM_FORMAT_P016,		.depth = 0,  .num_planes = 2,
-		  .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+		  .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
 		  .hsub = 2, .vsub = 2, .is_yuv = true},
 		{ .format = DRM_FORMAT_P210,		.depth = 0,
 		  .num_planes = 2, .char_per_block = { 2, 4, 0 },
-		  .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2,
+		  .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
 		  .vsub = 1, .is_yuv = true },
 		{ .format = DRM_FORMAT_VUY101010,	.depth = 0,
 		  .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1,
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 000fa4a..a9e4a61 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1114,9 +1114,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 	drm_gem_object_get(obj);
 
 	if (obj->funcs && obj->funcs->mmap) {
-		/* Remove the fake offset */
-		vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-
 		ret = obj->funcs->mmap(obj, vma);
 		if (ret) {
 			drm_gem_object_put_unlocked(obj);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index b9bcd31..3a7ace1 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -74,8 +74,7 @@ drm_gem_fb_alloc(struct drm_device *dev,
 
 	ret = drm_framebuffer_init(dev, fb, funcs);
 	if (ret) {
-		DRM_DEV_ERROR(dev->dev, "Failed to init framebuffer: %d\n",
-			      ret);
+		drm_err(dev, "Failed to init framebuffer: %d\n", ret);
 		kfree(fb);
 		return ERR_PTR(ret);
 	}
@@ -160,7 +159,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
 
 		objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
 		if (!objs[i]) {
-			DRM_DEBUG_KMS("Failed to lookup GEM object\n");
+			drm_dbg_kms(dev, "Failed to lookup GEM object\n");
 			ret = -ENOENT;
 			goto err_gem_object_put;
 		}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 0810d3e..a421a2e 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -528,6 +528,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 	struct drm_gem_shmem_object *shmem;
 	int ret;
 
+	/* Remove the fake offset */
+	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+
 	shmem = to_drm_gem_shmem_obj(obj);
 
 	ret = drm_gem_shmem_get_pages(shmem);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 666cb4c..a486332 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -2,6 +2,7 @@
 
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem_ttm_helper.h>
@@ -92,14 +93,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
 }
 
 static int drm_gem_vram_init(struct drm_device *dev,
-			     struct ttm_bo_device *bdev,
 			     struct drm_gem_vram_object *gbo,
-			     size_t size, unsigned long pg_align,
-			     bool interruptible)
+			     size_t size, unsigned long pg_align)
 {
+	struct drm_vram_mm *vmm = dev->vram_mm;
+	struct ttm_bo_device *bdev;
 	int ret;
 	size_t acc_size;
 
+	if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
+		return -EINVAL;
+	bdev = &vmm->bdev;
+
 	gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
 
 	ret = drm_gem_object_init(dev, &gbo->bo.base, size);
@@ -112,7 +117,7 @@ static int drm_gem_vram_init(struct drm_device *dev,
 	drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
 
 	ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
-			  &gbo->placement, pg_align, interruptible, acc_size,
+			  &gbo->placement, pg_align, false, acc_size,
 			  NULL, NULL, ttm_buffer_object_destroy);
 	if (ret)
 		goto err_drm_gem_object_release;
@@ -127,29 +132,33 @@ static int drm_gem_vram_init(struct drm_device *dev,
 /**
  * drm_gem_vram_create() - Creates a VRAM-backed GEM object
  * @dev:		the DRM device
- * @bdev:		the TTM BO device backing the object
  * @size:		the buffer size in bytes
  * @pg_align:		the buffer's alignment in multiples of the page size
- * @interruptible:	sleep interruptible if waiting for memory
  *
  * Returns:
  * A new instance of &struct drm_gem_vram_object on success, or
  * an ERR_PTR()-encoded error code otherwise.
  */
 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
-						struct ttm_bo_device *bdev,
 						size_t size,
-						unsigned long pg_align,
-						bool interruptible)
+						unsigned long pg_align)
 {
 	struct drm_gem_vram_object *gbo;
 	int ret;
 
-	gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
-	if (!gbo)
-		return ERR_PTR(-ENOMEM);
+	if (dev->driver->gem_create_object) {
+		struct drm_gem_object *gem =
+			dev->driver->gem_create_object(dev, size);
+		if (!gem)
+			return ERR_PTR(-ENOMEM);
+		gbo = drm_gem_vram_of_gem(gem);
+	} else {
+		gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
+		if (!gbo)
+			return ERR_PTR(-ENOMEM);
+	}
 
-	ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
+	ret = drm_gem_vram_init(dev, gbo, size, pg_align);
 	if (ret < 0)
 		goto err_kfree;
 
@@ -483,9 +492,8 @@ EXPORT_SYMBOL(drm_gem_vram_vunmap);
 	Helper for implementing &struct drm_driver.dumb_create
  * @file:		the DRM file
  * @dev:		the DRM device
- * @bdev:		the TTM BO device managing the buffer object
  * @pg_align:		the buffer's alignment in multiples of the page size
- * @interruptible:	sleep interruptible if waiting for memory
+ * @pitch_align:	the scanline's alignment in powers of 2
  * @args:		the arguments as provided to \
 				&struct drm_driver.dumb_create
  *
@@ -500,9 +508,8 @@ EXPORT_SYMBOL(drm_gem_vram_vunmap);
  */
 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
 				  struct drm_device *dev,
-				  struct ttm_bo_device *bdev,
 				  unsigned long pg_align,
-				  bool interruptible,
+				  unsigned long pitch_align,
 				  struct drm_mode_create_dumb *args)
 {
 	size_t pitch, size;
@@ -510,14 +517,19 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
 	int ret;
 	u32 handle;
 
-	pitch = args->width * ((args->bpp + 7) / 8);
+	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+	if (pitch_align) {
+		if (WARN_ON_ONCE(!is_power_of_2(pitch_align)))
+			return -EINVAL;
+		pitch = ALIGN(pitch, pitch_align);
+	}
 	size = pitch * args->height;
 
 	size = roundup(size, PAGE_SIZE);
 	if (!size)
 		return -EINVAL;
 
-	gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
+	gbo = drm_gem_vram_create(dev, size, pg_align);
 	if (IS_ERR(gbo))
 		return PTR_ERR(gbo);
 
@@ -612,8 +624,7 @@ int drm_gem_vram_driver_dumb_create(struct drm_file *file,
 	if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
 		return -EINVAL;
 
-	return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
-					     false, args);
+	return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args);
 }
 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
 
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 51a2055..6937bf9 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -45,12 +45,34 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor);
 void drm_file_free(struct drm_file *file);
 void drm_lastclose(struct drm_device *dev);
 
+#ifdef CONFIG_PCI
+
 /* drm_pci.c */
 int drm_irq_by_busid(struct drm_device *dev, void *data,
 		     struct drm_file *file_priv);
 void drm_pci_agp_destroy(struct drm_device *dev);
 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
 
+#else
+
+static inline int drm_irq_by_busid(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv)
+{
+	return -EINVAL;
+}
+
+static inline void drm_pci_agp_destroy(struct drm_device *dev)
+{
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+				    struct drm_master *master)
+{
+	return -EINVAL;
+}
+
+#endif
+
 /* drm_prime.c */
 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
 				 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index fcd728d..5afb3968 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -652,8 +652,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
 
-	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_RENDER_ALLOW),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0),
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 2e8ce99..2c79e81 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -360,7 +360,8 @@ void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *m
 	/*
 	 * Since the master is disappearing, so is the
 	 * possibility to lock.
-	 */	mutex_lock(&dev->struct_mutex);
+	 */
+	mutex_lock(&dev->struct_mutex);
 	if (master->lock.hw_lock) {
 		if (dev->sigdata.lock == master->lock.hw_lock)
 			dev->sigdata.lock = NULL;
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index e34058c..16bff1b 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -367,9 +367,9 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
 	memset(dbidev->tx_buf, 0, len);
 
 	mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
-			 (width >> 8) & 0xFF, (width - 1) & 0xFF);
+			 ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
 	mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
-			 (height >> 8) & 0xFF, (height - 1) & 0xFF);
+			 ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
 	mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
 			     (u8 *)dbidev->tx_buf, len);
 
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index bd2498b..5553189 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -33,6 +33,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
+#include <drm/drm_dsc.h>
 #include <video/mipi_display.h>
 
 /**
@@ -373,6 +374,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
 	case MIPI_DSI_V_SYNC_END:
 	case MIPI_DSI_H_SYNC_START:
 	case MIPI_DSI_H_SYNC_END:
+	case MIPI_DSI_COMPRESSION_MODE:
 	case MIPI_DSI_END_OF_TRANSMISSION:
 	case MIPI_DSI_COLOR_MODE_OFF:
 	case MIPI_DSI_COLOR_MODE_ON:
@@ -387,7 +389,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
 	case MIPI_DSI_DCS_SHORT_WRITE:
 	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
 	case MIPI_DSI_DCS_READ:
-	case MIPI_DSI_DCS_COMPRESSION_MODE:
+	case MIPI_DSI_EXECUTE_QUEUE:
 	case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
 		return true;
 	}
@@ -406,11 +408,12 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
 bool mipi_dsi_packet_format_is_long(u8 type)
 {
 	switch (type) {
-	case MIPI_DSI_PPS_LONG_WRITE:
 	case MIPI_DSI_NULL_PACKET:
 	case MIPI_DSI_BLANKING_PACKET:
 	case MIPI_DSI_GENERIC_LONG_WRITE:
 	case MIPI_DSI_DCS_LONG_WRITE:
+	case MIPI_DSI_PICTURE_PARAMETER_SET:
+	case MIPI_DSI_COMPRESSED_PIXEL_STREAM:
 	case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
 	case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
 	case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
@@ -547,6 +550,56 @@ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
 EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
 
 /**
+ * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral
+ * @dsi: DSI peripheral device
+ * @enable: Whether to enable or disable the DSC
+ *
+ * Enable or disable Display Stream Compression on the peripheral using the
+ * default Picture Parameter Set and VESA DSC 1.1 algorithm.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+{
+	/* Note: Needs updating for non-default PPS or algorithm */
+	u8 tx[2] = { enable << 0, 0 };
+	struct mipi_dsi_msg msg = {
+		.channel = dsi->channel,
+		.type = MIPI_DSI_COMPRESSION_MODE,
+		.tx_len = sizeof(tx),
+		.tx_buf = tx,
+	};
+	int ret = mipi_dsi_device_transfer(dsi, &msg);
+
+	return (ret < 0) ? ret : 0;
+}
+EXPORT_SYMBOL(mipi_dsi_compression_mode);
+
+/**
+ * mipi_dsi_picture_parameter_set() - transmit the DSC PPS to the peripheral
+ * @dsi: DSI peripheral device
+ * @pps: VESA DSC 1.1 Picture Parameter Set
+ *
+ * Transmit the VESA DSC 1.1 Picture Parameter Set to the peripheral.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+				       const struct drm_dsc_picture_parameter_set *pps)
+{
+	struct mipi_dsi_msg msg = {
+		.channel = dsi->channel,
+		.type = MIPI_DSI_PICTURE_PARAMETER_SET,
+		.tx_len = sizeof(*pps),
+		.tx_buf = pps,
+	};
+	int ret = mipi_dsi_device_transfer(dsi, &msg);
+
+	return (ret < 0) ? ret : 0;
+}
+EXPORT_SYMBOL(mipi_dsi_picture_parameter_set);
+
+/**
  * mipi_dsi_generic_write() - transmit data using a generic write packet
  * @dsi: DSI peripheral device
  * @payload: buffer containing the payload
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 3b570a4..08e6eff6 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -27,6 +27,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_mode_config.h>
 #include <drm/drm_print.h>
+#include <linux/dma-resv.h>
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
@@ -415,6 +416,33 @@ void drm_mode_config_init(struct drm_device *dev)
 	dev->mode_config.num_crtc = 0;
 	dev->mode_config.num_encoder = 0;
 	dev->mode_config.num_total_plane = 0;
+
+	if (IS_ENABLED(CONFIG_LOCKDEP)) {
+		struct drm_modeset_acquire_ctx modeset_ctx;
+		struct ww_acquire_ctx resv_ctx;
+		struct dma_resv resv;
+		int ret;
+
+		dma_resv_init(&resv);
+
+		drm_modeset_acquire_init(&modeset_ctx, 0);
+		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+				       &modeset_ctx);
+		if (ret == -EDEADLK)
+			ret = drm_modeset_backoff(&modeset_ctx);
+
+		ww_acquire_init(&resv_ctx, &reservation_ww_class);
+		ret = dma_resv_lock(&resv, &resv_ctx);
+		if (ret == -EDEADLK)
+			dma_resv_lock_slow(&resv, &resv_ctx);
+
+		dma_resv_unlock(&resv);
+		ww_acquire_fini(&resv_ctx);
+
+		drm_modeset_drop_locks(&modeset_ctx);
+		drm_modeset_acquire_fini(&modeset_ctx);
+		dma_resv_fini(&resv);
+	}
 }
 EXPORT_SYMBOL(drm_mode_config_init);
 
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 6a23e36..35c2719 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -224,12 +224,26 @@ EXPORT_SYMBOL(drm_mode_object_get);
  * This attaches the given property to the modeset object with the given initial
  * value. Currently this function cannot fail since the properties are stored in
  * a statically sized array.
+ *
+ * Note that all properties must be attached before the object itself is
+ * registered and accessible from userspace.
  */
 void drm_object_attach_property(struct drm_mode_object *obj,
 				struct drm_property *property,
 				uint64_t init_val)
 {
 	int count = obj->properties->count;
+	struct drm_device *dev = property->dev;
+
+
+	if (obj->type == DRM_MODE_OBJECT_CONNECTOR) {
+		struct drm_connector *connector = obj_to_connector(obj);
+
+		WARN_ON(!dev->driver->load &&
+			connector->registration_state == DRM_CONNECTOR_REGISTERED);
+	} else {
+		WARN_ON(!dev->driver->load && dev->registered);
+	}
 
 	if (count == DRM_OBJECT_MAX_PROPERTY) {
 		WARN(1, "Failed to attach object property (type: 0x%x). Please "
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 8823269..10336b1 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -233,7 +233,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
 		/* 3) Nominal HSync width (% of line period) - default 8 */
 #define CVT_HSYNC_PERCENTAGE	8
 		unsigned int hblank_percentage;
-		int vsyncandback_porch, vback_porch, hblank;
+		int vsyncandback_porch, __maybe_unused vback_porch, hblank;
 
 		/* estimated the horizontal period */
 		tmp1 = HV_FACTOR * 1000000  -
@@ -386,9 +386,10 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
 	int top_margin, bottom_margin;
 	int interlace;
 	unsigned int hfreq_est;
-	int vsync_plus_bp, vback_porch;
-	unsigned int vtotal_lines, vfieldrate_est, hperiod;
-	unsigned int vfield_rate, vframe_rate;
+	int vsync_plus_bp, __maybe_unused vback_porch;
+	unsigned int vtotal_lines, __maybe_unused vfieldrate_est;
+	unsigned int __maybe_unused hperiod;
+	unsigned int vfield_rate, __maybe_unused vframe_rate;
 	int left_margin, right_margin;
 	unsigned int total_active_pixels, ideal_duty_cycle;
 	unsigned int hblank, total_pixels, pixel_freq;
@@ -1568,33 +1569,76 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
 	return 0;
 }
 
-static int drm_mode_parse_cmdline_options(char *str, size_t len,
+static int drm_mode_parse_cmdline_int(const char *delim, unsigned int *int_ret)
+{
+	const char *value;
+	char *endp;
+
+	/*
+	 * delim must point to the '=', otherwise it is a syntax error and
+	 * if delim points to the terminating zero, then delim + 1 wil point
+	 * past the end of the string.
+	 */
+	if (*delim != '=')
+		return -EINVAL;
+
+	value = delim + 1;
+	*int_ret = simple_strtol(value, &endp, 10);
+
+	/* Make sure we have parsed something */
+	if (endp == value)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int drm_mode_parse_panel_orientation(const char *delim,
+					    struct drm_cmdline_mode *mode)
+{
+	const char *value;
+
+	if (*delim != '=')
+		return -EINVAL;
+
+	value = delim + 1;
+	delim = strchr(value, ',');
+	if (!delim)
+		delim = value + strlen(value);
+
+	if (!strncmp(value, "normal", delim - value))
+		mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+	else if (!strncmp(value, "upside_down", delim - value))
+		mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+	else if (!strncmp(value, "left_side_up", delim - value))
+		mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+	else if (!strncmp(value, "right_side_up", delim - value))
+		mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int drm_mode_parse_cmdline_options(const char *str,
+					  bool freestanding,
 					  const struct drm_connector *connector,
 					  struct drm_cmdline_mode *mode)
 {
-	unsigned int rotation = 0;
-	char *sep = str;
+	unsigned int deg, margin, rotation = 0;
+	const char *delim, *option, *sep;
 
-	while ((sep = strchr(sep, ','))) {
-		char *delim, *option;
-
-		option = sep + 1;
+	option = str;
+	do {
 		delim = strchr(option, '=');
 		if (!delim) {
 			delim = strchr(option, ',');
 
 			if (!delim)
-				delim = str + len;
+				delim = option + strlen(option);
 		}
 
 		if (!strncmp(option, "rotate", delim - option)) {
-			const char *value = delim + 1;
-			unsigned int deg;
-
-			deg = simple_strtol(value, &sep, 10);
-
-			/* Make sure we have parsed something */
-			if (sep == value)
+			if (drm_mode_parse_cmdline_int(delim, &deg))
 				return -EINVAL;
 
 			switch (deg) {
@@ -1619,58 +1663,40 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
 			}
 		} else if (!strncmp(option, "reflect_x", delim - option)) {
 			rotation |= DRM_MODE_REFLECT_X;
-			sep = delim;
 		} else if (!strncmp(option, "reflect_y", delim - option)) {
 			rotation |= DRM_MODE_REFLECT_Y;
-			sep = delim;
 		} else if (!strncmp(option, "margin_right", delim - option)) {
-			const char *value = delim + 1;
-			unsigned int margin;
-
-			margin = simple_strtol(value, &sep, 10);
-
-			/* Make sure we have parsed something */
-			if (sep == value)
+			if (drm_mode_parse_cmdline_int(delim, &margin))
 				return -EINVAL;
 
 			mode->tv_margins.right = margin;
 		} else if (!strncmp(option, "margin_left", delim - option)) {
-			const char *value = delim + 1;
-			unsigned int margin;
-
-			margin = simple_strtol(value, &sep, 10);
-
-			/* Make sure we have parsed something */
-			if (sep == value)
+			if (drm_mode_parse_cmdline_int(delim, &margin))
 				return -EINVAL;
 
 			mode->tv_margins.left = margin;
 		} else if (!strncmp(option, "margin_top", delim - option)) {
-			const char *value = delim + 1;
-			unsigned int margin;
-
-			margin = simple_strtol(value, &sep, 10);
-
-			/* Make sure we have parsed something */
-			if (sep == value)
+			if (drm_mode_parse_cmdline_int(delim, &margin))
 				return -EINVAL;
 
 			mode->tv_margins.top = margin;
 		} else if (!strncmp(option, "margin_bottom", delim - option)) {
-			const char *value = delim + 1;
-			unsigned int margin;
-
-			margin = simple_strtol(value, &sep, 10);
-
-			/* Make sure we have parsed something */
-			if (sep == value)
+			if (drm_mode_parse_cmdline_int(delim, &margin))
 				return -EINVAL;
 
 			mode->tv_margins.bottom = margin;
+		} else if (!strncmp(option, "panel_orientation", delim - option)) {
+			if (drm_mode_parse_panel_orientation(delim, mode))
+				return -EINVAL;
 		} else {
 			return -EINVAL;
 		}
-	}
+		sep = strchr(delim, ',');
+		option = sep + 1;
+	} while (sep);
+
+	if (rotation && freestanding)
+		return -EINVAL;
 
 	mode->rotation_reflection = rotation;
 
@@ -1682,17 +1708,6 @@ static const char * const drm_named_modes_whitelist[] = {
 	"PAL",
 };
 
-static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
-		if (!strncmp(mode, drm_named_modes_whitelist[i], size))
-			return true;
-
-	return false;
-}
-
 /**
  * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
  * @mode_option: optional per connector mode option
@@ -1723,72 +1738,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
 					       struct drm_cmdline_mode *mode)
 {
 	const char *name;
-	bool named_mode = false, parse_extras = false;
+	bool freestanding = false, parse_extras = false;
 	unsigned int bpp_off = 0, refresh_off = 0, options_off = 0;
 	unsigned int mode_end = 0;
-	char *bpp_ptr = NULL, *refresh_ptr = NULL, *extra_ptr = NULL;
-	char *options_ptr = NULL;
+	const char *bpp_ptr = NULL, *refresh_ptr = NULL, *extra_ptr = NULL;
+	const char *options_ptr = NULL;
 	char *bpp_end_ptr = NULL, *refresh_end_ptr = NULL;
-	int ret;
+	int i, len, ret;
 
-#ifdef CONFIG_FB
+	memset(mode, 0, sizeof(*mode));
+	mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+
 	if (!mode_option)
-		mode_option = fb_mode_option;
-#endif
-
-	if (!mode_option) {
-		mode->specified = false;
 		return false;
-	}
 
 	name = mode_option;
 
-	/*
-	 * This is a bit convoluted. To differentiate between the
-	 * named modes and poorly formatted resolutions, we need a
-	 * bunch of things:
-	 *   - We need to make sure that the first character (which
-	 *     would be our resolution in X) is a digit.
-	 *   - If not, then it's either a named mode or a force on/off.
-	 *     To distinguish between the two, we need to run the
-	 *     extra parsing function, and if not, then we consider it
-	 *     a named mode.
-	 *
-	 * If this isn't enough, we should add more heuristics here,
-	 * and matching unit-tests.
-	 */
-	if (!isdigit(name[0]) && name[0] != 'x') {
-		unsigned int namelen = strlen(name);
-
-		/*
-		 * Only the force on/off options can be in that case,
-		 * and they all take a single character.
-		 */
-		if (namelen == 1) {
-			ret = drm_mode_parse_cmdline_extra(name, namelen, true,
-							   connector, mode);
-			if (!ret)
-				return true;
-		}
-
-		named_mode = true;
-	}
-
 	/* Try to locate the bpp and refresh specifiers, if any */
 	bpp_ptr = strchr(name, '-');
-	if (bpp_ptr) {
+	if (bpp_ptr)
 		bpp_off = bpp_ptr - name;
-		mode->bpp_specified = true;
-	}
 
 	refresh_ptr = strchr(name, '@');
-	if (refresh_ptr) {
-		if (named_mode)
-			return false;
-
+	if (refresh_ptr)
 		refresh_off = refresh_ptr - name;
-		mode->refresh_specified = true;
-	}
 
 	/* Locate the start of named options */
 	options_ptr = strchr(name, ',');
@@ -1802,33 +1775,58 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
 		mode_end = refresh_off;
 	} else if (options_ptr) {
 		mode_end = options_off;
+		parse_extras = true;
 	} else {
 		mode_end = strlen(name);
 		parse_extras = true;
 	}
 
-	if (named_mode) {
-		if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
-			return false;
+	/* First check for a named mode */
+	for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++) {
+		ret = str_has_prefix(name, drm_named_modes_whitelist[i]);
+		if (ret == mode_end) {
+			if (refresh_ptr)
+				return false; /* named + refresh is invalid */
 
-		if (!drm_named_mode_is_in_whitelist(name, mode_end))
-			return false;
+			strcpy(mode->name, drm_named_modes_whitelist[i]);
+			mode->specified = true;
+			break;
+		}
+	}
 
-		strscpy(mode->name, name, mode_end + 1);
-	} else {
+	/* No named mode? Check for a normal mode argument, e.g. 1024x768 */
+	if (!mode->specified && isdigit(name[0])) {
 		ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
 						      parse_extras,
 						      connector,
 						      mode);
 		if (ret)
 			return false;
+
+		mode->specified = true;
 	}
-	mode->specified = true;
+
+	/* No mode? Check for freestanding extras and/or options */
+	if (!mode->specified) {
+		unsigned int len = strlen(mode_option);
+
+		if (bpp_ptr || refresh_ptr)
+			return false; /* syntax error */
+
+		if (len == 1 || (len >= 2 && mode_option[1] == ','))
+			extra_ptr = mode_option;
+		else
+			options_ptr = mode_option - 1;
+
+		freestanding = true;
+	}
 
 	if (bpp_ptr) {
 		ret = drm_mode_parse_cmdline_bpp(bpp_ptr, &bpp_end_ptr, mode);
 		if (ret)
 			return false;
+
+		mode->bpp_specified = true;
 	}
 
 	if (refresh_ptr) {
@@ -1836,6 +1834,8 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
 						     &refresh_end_ptr, mode);
 		if (ret)
 			return false;
+
+		mode->refresh_specified = true;
 	}
 
 	/*
@@ -1849,20 +1849,21 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
 	else if (refresh_ptr)
 		extra_ptr = refresh_end_ptr;
 
-	if (extra_ptr &&
-	    extra_ptr != options_ptr) {
-		int len = strlen(name) - (extra_ptr - name);
+	if (extra_ptr) {
+		if (options_ptr)
+			len = options_ptr - extra_ptr;
+		else
+			len = strlen(extra_ptr);
 
-		ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
+		ret = drm_mode_parse_cmdline_extra(extra_ptr, len, freestanding,
 						   connector, mode);
 		if (ret)
 			return false;
 	}
 
 	if (options_ptr) {
-		int len = strlen(name) - (options_ptr - name);
-
-		ret = drm_mode_parse_cmdline_options(options_ptr, len,
+		ret = drm_mode_parse_cmdline_options(options_ptr + 1,
+						     freestanding,
 						     connector, mode);
 		if (ret)
 			return false;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 0ca5880..b50b44e 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -274,3 +274,119 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
 	return ret;
 }
 EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
+
+enum drm_of_lvds_pixels {
+	DRM_OF_LVDS_EVEN = BIT(0),
+	DRM_OF_LVDS_ODD = BIT(1),
+};
+
+static int drm_of_lvds_get_port_pixels_type(struct device_node *port_node)
+{
+	bool even_pixels =
+		of_property_read_bool(port_node, "dual-lvds-even-pixels");
+	bool odd_pixels =
+		of_property_read_bool(port_node, "dual-lvds-odd-pixels");
+
+	return (even_pixels ? DRM_OF_LVDS_EVEN : 0) |
+	       (odd_pixels ? DRM_OF_LVDS_ODD : 0);
+}
+
+static int drm_of_lvds_get_remote_pixels_type(
+			const struct device_node *port_node)
+{
+	struct device_node *endpoint = NULL;
+	int pixels_type = -EPIPE;
+
+	for_each_child_of_node(port_node, endpoint) {
+		struct device_node *remote_port;
+		int current_pt;
+
+		if (!of_node_name_eq(endpoint, "endpoint"))
+			continue;
+
+		remote_port = of_graph_get_remote_port(endpoint);
+		if (!remote_port) {
+			of_node_put(remote_port);
+			return -EPIPE;
+		}
+
+		current_pt = drm_of_lvds_get_port_pixels_type(remote_port);
+		of_node_put(remote_port);
+		if (pixels_type < 0)
+			pixels_type = current_pt;
+
+		/*
+		 * Sanity check, ensure that all remote endpoints have the same
+		 * pixel type. We may lift this restriction later if we need to
+		 * support multiple sinks with different dual-link
+		 * configurations by passing the endpoints explicitly to
+		 * drm_of_lvds_get_dual_link_pixel_order().
+		 */
+		if (!current_pt || pixels_type != current_pt) {
+			of_node_put(remote_port);
+			return -EINVAL;
+		}
+	}
+
+	return pixels_type;
+}
+
+/**
+ * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order
+ * @port1: First DT port node of the Dual-link LVDS source
+ * @port2: Second DT port node of the Dual-link LVDS source
+ *
+ * An LVDS dual-link connection is made of two links, with even pixels
+ * transitting on one link, and odd pixels on the other link. This function
+ * returns, for two ports of an LVDS dual-link source, which port shall transmit
+ * the even and odd pixels, based on the requirements of the connected sink.
+ *
+ * The pixel order is determined from the dual-lvds-even-pixels and
+ * dual-lvds-odd-pixels properties in the sink's DT port nodes. If those
+ * properties are not present, or if their usage is not valid, this function
+ * returns -EINVAL.
+ *
+ * If either port is not connected, this function returns -EPIPE.
+ *
+ * @port1 and @port2 are typically DT sibling nodes, but may have different
+ * parents when, for instance, two separate LVDS encoders carry the even and odd
+ * pixels.
+ *
+ * Return:
+ * * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 carries even pixels and @port2
+ *   carries odd pixels
+ * * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 carries odd pixels and @port2
+ *   carries even pixels
+ * * -EINVAL - @port1 and @port2 are not connected to a dual-link LVDS sink, or
+ *   the sink configuration is invalid
+ * * -EPIPE - when @port1 or @port2 are not connected
+ */
+int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+					  const struct device_node *port2)
+{
+	int remote_p1_pt, remote_p2_pt;
+
+	if (!port1 || !port2)
+		return -EINVAL;
+
+	remote_p1_pt = drm_of_lvds_get_remote_pixels_type(port1);
+	if (remote_p1_pt < 0)
+		return remote_p1_pt;
+
+	remote_p2_pt = drm_of_lvds_get_remote_pixels_type(port2);
+	if (remote_p2_pt < 0)
+		return remote_p2_pt;
+
+	/*
+	 * A valid dual-lVDS bus is found when one remote port is marked with
+	 * "dual-lvds-even-pixels", and the other remote port is marked with
+	 * "dual-lvds-odd-pixels", bail out if the markers are not right.
+	 */
+	if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD)
+		return -EINVAL;
+
+	return remote_p1_pt == DRM_OF_LVDS_EVEN ?
+		DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS :
+		DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+}
+EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index ed7985c..8c7bac8 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -21,11 +21,13 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/backlight.h>
 #include <linux/err.h>
 #include <linux/module.h>
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_print.h>
 
 static DEFINE_MUTEX(panel_lock);
 static LIST_HEAD(panel_list);
@@ -112,12 +114,6 @@ EXPORT_SYMBOL(drm_panel_remove);
  */
 int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
 {
-	if (panel->connector)
-		return -EBUSY;
-
-	panel->connector = connector;
-	panel->drm = connector->dev;
-
 	return 0;
 }
 EXPORT_SYMBOL(drm_panel_attach);
@@ -134,8 +130,6 @@ EXPORT_SYMBOL(drm_panel_attach);
  */
 void drm_panel_detach(struct drm_panel *panel)
 {
-	panel->connector = NULL;
-	panel->drm = NULL;
 }
 EXPORT_SYMBOL(drm_panel_detach);
 
@@ -151,10 +145,13 @@ EXPORT_SYMBOL(drm_panel_detach);
  */
 int drm_panel_prepare(struct drm_panel *panel)
 {
-	if (panel && panel->funcs && panel->funcs->prepare)
+	if (!panel)
+		return -EINVAL;
+
+	if (panel->funcs && panel->funcs->prepare)
 		return panel->funcs->prepare(panel);
 
-	return panel ? -ENOSYS : -EINVAL;
+	return 0;
 }
 EXPORT_SYMBOL(drm_panel_prepare);
 
@@ -171,10 +168,13 @@ EXPORT_SYMBOL(drm_panel_prepare);
  */
 int drm_panel_unprepare(struct drm_panel *panel)
 {
-	if (panel && panel->funcs && panel->funcs->unprepare)
+	if (!panel)
+		return -EINVAL;
+
+	if (panel->funcs && panel->funcs->unprepare)
 		return panel->funcs->unprepare(panel);
 
-	return panel ? -ENOSYS : -EINVAL;
+	return 0;
 }
 EXPORT_SYMBOL(drm_panel_unprepare);
 
@@ -190,10 +190,23 @@ EXPORT_SYMBOL(drm_panel_unprepare);
  */
 int drm_panel_enable(struct drm_panel *panel)
 {
-	if (panel && panel->funcs && panel->funcs->enable)
-		return panel->funcs->enable(panel);
+	int ret;
 
-	return panel ? -ENOSYS : -EINVAL;
+	if (!panel)
+		return -EINVAL;
+
+	if (panel->funcs && panel->funcs->enable) {
+		ret = panel->funcs->enable(panel);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = backlight_enable(panel->backlight);
+	if (ret < 0)
+		DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
+			     ret);
+
+	return 0;
 }
 EXPORT_SYMBOL(drm_panel_enable);
 
@@ -209,16 +222,27 @@ EXPORT_SYMBOL(drm_panel_enable);
  */
 int drm_panel_disable(struct drm_panel *panel)
 {
-	if (panel && panel->funcs && panel->funcs->disable)
+	int ret;
+
+	if (!panel)
+		return -EINVAL;
+
+	ret = backlight_disable(panel->backlight);
+	if (ret < 0)
+		DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
+			     ret);
+
+	if (panel->funcs && panel->funcs->disable)
 		return panel->funcs->disable(panel);
 
-	return panel ? -ENOSYS : -EINVAL;
+	return 0;
 }
 EXPORT_SYMBOL(drm_panel_disable);
 
 /**
  * drm_panel_get_modes - probe the available display modes of a panel
  * @panel: DRM panel
+ * @connector: DRM connector
  *
  * The modes probed from the panel are automatically added to the connector
  * that the panel is attached to.
@@ -226,12 +250,16 @@ EXPORT_SYMBOL(drm_panel_disable);
  * Return: The number of modes available from the panel on success or a
  * negative error code on failure.
  */
-int drm_panel_get_modes(struct drm_panel *panel)
+int drm_panel_get_modes(struct drm_panel *panel,
+			struct drm_connector *connector)
 {
-	if (panel && panel->funcs && panel->funcs->get_modes)
-		return panel->funcs->get_modes(panel);
+	if (!panel)
+		return -EINVAL;
 
-	return panel ? -ENOSYS : -EINVAL;
+	if (panel->funcs && panel->funcs->get_modes)
+		return panel->funcs->get_modes(panel, connector);
+
+	return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL(drm_panel_get_modes);
 
@@ -274,6 +302,45 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
 EXPORT_SYMBOL(of_drm_find_panel);
 #endif
 
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+/**
+ * drm_panel_of_backlight - use backlight device node for backlight
+ * @panel: DRM panel
+ *
+ * Use this function to enable backlight handling if your panel
+ * uses device tree and has a backlight phandle.
+ *
+ * When the panel is enabled backlight will be enabled after a
+ * successful call to &drm_panel_funcs.enable()
+ *
+ * When the panel is disabled backlight will be disabled before the
+ * call to &drm_panel_funcs.disable().
+ *
+ * A typical implementation for a panel driver supporting device tree
+ * will call this function at probe time. Backlight will then be handled
+ * transparently without requiring any intervention from the driver.
+ * drm_panel_of_backlight() must be called after the call to drm_panel_init().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_of_backlight(struct drm_panel *panel)
+{
+	struct backlight_device *backlight;
+
+	if (!panel || !panel->dev)
+		return -EINVAL;
+
+	backlight = devm_of_find_backlight(panel->dev);
+
+	if (IS_ERR(backlight))
+		return PTR_ERR(backlight);
+
+	panel->backlight = backlight;
+	return 0;
+}
+EXPORT_SYMBOL(drm_panel_of_backlight);
+#endif
+
 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
 MODULE_DESCRIPTION("DRM panel infrastructure");
 MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a86a3ab..f2e43d3 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -125,8 +125,6 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 
 EXPORT_SYMBOL(drm_pci_free);
 
-#ifdef CONFIG_PCI
-
 static int drm_get_pci_domain(struct drm_device *dev)
 {
 #ifndef __alpha__
@@ -284,6 +282,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 }
 EXPORT_SYMBOL(drm_get_pci_dev);
 
+#ifdef CONFIG_DRM_LEGACY
+
 /**
  * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
  * @driver: DRM device driver
@@ -331,17 +331,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
 }
 EXPORT_SYMBOL(drm_legacy_pci_init);
 
-#else
-
-void drm_pci_agp_destroy(struct drm_device *dev) {}
-
-int drm_irq_by_busid(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
-{
-	return -EINVAL;
-}
-#endif
-
 /**
  * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
  * @driver: DRM device driver
@@ -367,3 +356,5 @@ void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
 	DRM_INFO("Module unloaded\n");
 }
 EXPORT_SYMBOL(drm_legacy_pci_exit);
+
+#endif
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 0814211..86d9b0e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -240,6 +240,7 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
 				      struct dma_buf_export_info *exp_info)
 {
+	struct drm_gem_object *obj = exp_info->priv;
 	struct dma_buf *dma_buf;
 
 	dma_buf = dma_buf_export(exp_info);
@@ -247,7 +248,8 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
 		return dma_buf;
 
 	drm_dev_get(dev);
-	drm_gem_object_get(exp_info->priv);
+	drm_gem_object_get(obj);
+	dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
 
 	return dma_buf;
 }
@@ -713,6 +715,9 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 	struct file *fil;
 	int ret;
 
+	/* Add the fake offset */
+	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
+
 	if (obj->funcs && obj->funcs->mmap) {
 		ret = obj->funcs->mmap(obj, vma);
 		if (ret)
@@ -737,8 +742,6 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 	if (ret)
 		goto out;
 
-	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
-
 	ret = obj->dev->driver->fops->mmap(fil, vma);
 
 	drm_vma_node_revoke(&obj->vma_node, priv);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 9a25d73..111b932 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -37,11 +37,11 @@
 #include <drm/drm_print.h>
 
 /*
- * drm_debug: Enable debug output.
+ * __drm_debug: Enable debug output.
  * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
  */
-unsigned int drm_debug;
-EXPORT_SYMBOL(drm_debug);
+unsigned int __drm_debug;
+EXPORT_SYMBOL(__drm_debug);
 
 MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
 "\t\tBit 0 (0x01)  will enable CORE messages (drm core code)\n"
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug cat
 "\t\tBit 5 (0x20)  will enable VBL messages (vblank code)\n"
 "\t\tBit 7 (0x80)  will enable LEASE messages (leasing code)\n"
 "\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, drm_debug, int, 0600);
+module_param_named(debug, __drm_debug, int, 0600);
 
 void __drm_puts_coredump(struct drm_printer *p, const char *str)
 {
@@ -256,7 +256,7 @@ void drm_dev_printk(const struct device *dev, const char *level,
 }
 EXPORT_SYMBOL(drm_dev_printk);
 
-void drm_dev_dbg(const struct device *dev, unsigned int category,
+void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
 		 const char *format, ...)
 {
 	struct va_format vaf;
@@ -280,7 +280,7 @@ void drm_dev_dbg(const struct device *dev, unsigned int category,
 }
 EXPORT_SYMBOL(drm_dev_dbg);
 
-void drm_dbg(unsigned int category, const char *format, ...)
+void __drm_dbg(enum drm_debug_category category, const char *format, ...)
 {
 	struct va_format vaf;
 	va_list args;
@@ -297,9 +297,9 @@ void drm_dbg(unsigned int category, const char *format, ...)
 
 	va_end(args);
 }
-EXPORT_SYMBOL(drm_dbg);
+EXPORT_SYMBOL(__drm_dbg);
 
-void drm_err(const char *format, ...)
+void __drm_err(const char *format, ...)
 {
 	struct va_format vaf;
 	va_list args;
@@ -313,7 +313,7 @@ void drm_err(const char *format, ...)
 
 	va_end(args);
 }
-EXPORT_SYMBOL(drm_err);
+EXPORT_SYMBOL(__drm_err);
 
 /**
  * drm_print_regset32 - print the contents of registers to a
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a7c87ab..576b4b7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -101,6 +101,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
 
 	/* Step 2: Validate against encoders and crtcs */
 	drm_connector_for_each_possible_encoder(connector, encoder) {
+		struct drm_bridge *bridge;
 		struct drm_crtc *crtc;
 
 		ret = drm_encoder_mode_valid(encoder, mode);
@@ -112,7 +113,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
 			continue;
 		}
 
-		ret = drm_bridge_mode_valid(encoder->bridge, mode);
+		bridge = drm_bridge_chain_get_first_bridge(encoder);
+		ret = drm_bridge_chain_mode_valid(bridge, mode);
 		if (ret != MODE_OK) {
 			/* There is also no point in continuing for crtc check
 			 * here. */
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index b8363aaa..0460e87 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -52,9 +52,17 @@ bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
 }
 EXPORT_SYMBOL(drm_rect_intersect);
 
-static u32 clip_scaled(u32 src, u32 dst, u32 clip)
+static u32 clip_scaled(int src, int dst, int *clip)
 {
-	u64 tmp = mul_u32_u32(src, dst - clip);
+	u64 tmp;
+
+	if (dst == 0)
+		return 0;
+
+	/* Only clip what we have. Keeps the result bounded. */
+	*clip = min(*clip, dst);
+
+	tmp = mul_u32_u32(src, dst - *clip);
 
 	/*
 	 * Round toward 1.0 when clipping so that we don't accidentally
@@ -73,11 +81,13 @@ static u32 clip_scaled(u32 src, u32 dst, u32 clip)
  * @clip: clip rectangle
  *
  * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
- * same amounts multiplied by @hscale and @vscale.
+ * the corresponding amounts, retaining the vertical and horizontal scaling
+ * factors from @src to @dst.
  *
  * RETURNS:
+ *
  * %true if rectangle @dst is still visible after being clipped,
- * %false otherwise
+ * %false otherwise.
  */
 bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
 			  const struct drm_rect *clip)
@@ -87,34 +97,34 @@ bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
 	diff = clip->x1 - dst->x1;
 	if (diff > 0) {
 		u32 new_src_w = clip_scaled(drm_rect_width(src),
-					    drm_rect_width(dst), diff);
+					    drm_rect_width(dst), &diff);
 
-		src->x1 = clamp_t(int64_t, src->x2 - new_src_w, INT_MIN, INT_MAX);
-		dst->x1 = clip->x1;
+		src->x1 = src->x2 - new_src_w;
+		dst->x1 += diff;
 	}
 	diff = clip->y1 - dst->y1;
 	if (diff > 0) {
 		u32 new_src_h = clip_scaled(drm_rect_height(src),
-					    drm_rect_height(dst), diff);
+					    drm_rect_height(dst), &diff);
 
-		src->y1 = clamp_t(int64_t, src->y2 - new_src_h, INT_MIN, INT_MAX);
-		dst->y1 = clip->y1;
+		src->y1 = src->y2 - new_src_h;
+		dst->y1 += diff;
 	}
 	diff = dst->x2 - clip->x2;
 	if (diff > 0) {
 		u32 new_src_w = clip_scaled(drm_rect_width(src),
-					    drm_rect_width(dst), diff);
+					    drm_rect_width(dst), &diff);
 
-		src->x2 = clamp_t(int64_t, src->x1 + new_src_w, INT_MIN, INT_MAX);
-		dst->x2 = clip->x2;
+		src->x2 = src->x1 + new_src_w;
+		dst->x2 -= diff;
 	}
 	diff = dst->y2 - clip->y2;
 	if (diff > 0) {
 		u32 new_src_h = clip_scaled(drm_rect_height(src),
-					    drm_rect_height(dst), diff);
+					    drm_rect_height(dst), &diff);
 
-		src->y2 = clamp_t(int64_t, src->y1 + new_src_h, INT_MIN, INT_MAX);
-		dst->y2 = clip->y2;
+		src->y2 = src->y1 + new_src_h;
+		dst->y2 -= diff;
 	}
 
 	return drm_rect_visible(dst);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 1f9c01b..6b43c1c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -65,12 +65,13 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
 
 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
 		struct etnaviv_gpu *gpu = priv->gpu[i];
-		struct drm_sched_rq *rq;
+		struct drm_gpu_scheduler *sched;
 
 		if (gpu) {
-			rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+			sched = &gpu->sched;
 			drm_sched_entity_init(&ctx->sched_entity[i],
-					      &rq, 1, NULL);
+					      DRM_SCHED_PRIORITY_NORMAL, &sched,
+					      1, NULL);
 			}
 	}
 
@@ -282,11 +283,6 @@ static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
 			args->flags, &args->handle);
 }
 
-#define TS(t) ((struct timespec){ \
-	.tv_sec = (t).tv_sec, \
-	.tv_nsec = (t).tv_nsec \
-})
-
 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
 		struct drm_file *file)
 {
@@ -301,7 +297,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
 	if (!obj)
 		return -ENOENT;
 
-	ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+	ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
 
 	drm_gem_object_put_unlocked(obj);
 
@@ -354,7 +350,7 @@ static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
 {
 	struct drm_etnaviv_wait_fence *args = data;
 	struct etnaviv_drm_private *priv = dev->dev_private;
-	struct timespec *timeout = &TS(args->timeout);
+	struct drm_etnaviv_timespec *timeout = &args->timeout;
 	struct etnaviv_gpu *gpu;
 
 	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
@@ -403,7 +399,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
 {
 	struct etnaviv_drm_private *priv = dev->dev_private;
 	struct drm_etnaviv_gem_wait *args = data;
-	struct timespec *timeout = &TS(args->timeout);
+	struct drm_etnaviv_timespec *timeout = &args->timeout;
 	struct drm_gem_object *obj;
 	struct etnaviv_gpu *gpu;
 	int ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 32cfa5a..efc656e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -61,7 +61,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
 void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
 void *etnaviv_gem_vmap(struct drm_gem_object *obj);
 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
-		struct timespec *timeout);
+		struct drm_etnaviv_timespec *timeout);
 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
 void etnaviv_gem_free_object(struct drm_gem_object *obj);
 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -107,11 +107,12 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
  * between the specified timeout and the current CLOCK_MONOTONIC time.
  */
 static inline unsigned long etnaviv_timeout_to_jiffies(
-	const struct timespec *timeout)
+	const struct drm_etnaviv_timespec *timeout)
 {
-	struct timespec64 ts, to;
-
-	to = timespec_to_timespec64(*timeout);
+	struct timespec64 ts, to = {
+		.tv_sec = timeout->tv_sec,
+		.tv_nsec = timeout->tv_nsec,
+	};
 
 	ktime_get_ts64(&ts);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index cb1faaa..6adea18 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -373,7 +373,7 @@ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
 }
 
 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
-		struct timespec *timeout)
+		struct drm_etnaviv_timespec *timeout)
 {
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct drm_device *dev = obj->dev;
@@ -431,7 +431,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
 }
 
 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
-	struct timespec *timeout)
+	struct drm_etnaviv_timespec *timeout)
 {
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index d6270ac..6b68fe1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -112,7 +112,7 @@ struct etnaviv_gem_submit {
 void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
 
 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
-	struct timespec *timeout);
+	struct drm_etnaviv_timespec *timeout);
 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
 	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res);
 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index aa3e4c3..3b0afa1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -113,7 +113,7 @@ static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
 	if (submit->bos[i].flags & BO_LOCKED) {
 		struct drm_gem_object *obj = &submit->bos[i].obj->base;
 
-		ww_mutex_unlock(&obj->resv->lock);
+		dma_resv_unlock(obj->resv);
 		submit->bos[i].flags &= ~BO_LOCKED;
 	}
 }
@@ -133,8 +133,7 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
 		contended = i;
 
 		if (!(submit->bos[i].flags & BO_LOCKED)) {
-			ret = ww_mutex_lock_interruptible(&obj->resv->lock,
-							  ticket);
+			ret = dma_resv_lock_interruptible(obj->resv, ticket);
 			if (ret == -EALREADY)
 				DRM_ERROR("BO at index %u already on submit list\n",
 					  i);
@@ -161,8 +160,7 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
 		obj = &submit->bos[contended].obj->base;
 
 		/* we lost out in a seqno race, lock and retry.. */
-		ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
-						       ticket);
+		ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
 		if (!ret) {
 			submit->bos[contended].flags |= BO_LOCKED;
 			slow_locked = contended;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index d47d1a8..799ec20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1132,7 +1132,7 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
  * Cmdstream submission/retirement:
  */
 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
-	u32 id, struct timespec *timeout)
+	u32 id, struct drm_etnaviv_timespec *timeout)
 {
 	struct dma_fence *fence;
 	int ret;
@@ -1179,7 +1179,8 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
  * that lock in this function while waiting.
  */
 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
-	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
+	struct etnaviv_gem_object *etnaviv_obj,
+	struct drm_etnaviv_timespec *timeout)
 {
 	unsigned long remaining;
 	long ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8f9bd4e..97bb480 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -169,9 +169,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
-	u32 fence, struct timespec *timeout);
+	u32 fence, struct drm_etnaviv_timespec *timeout);
 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
-	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
+	struct etnaviv_gem_object *etnaviv_obj,
+	struct drm_etnaviv_timespec *timeout);
 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6f7d3b3..6417f37 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,13 +1,13 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config DRM_EXYNOS
-	tristate "DRM Support for Samsung SoC EXYNOS Series"
+	tristate "DRM Support for Samsung SoC Exynos Series"
 	depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
 	depends on MMU
 	select DRM_KMS_HELPER
 	select VIDEOMODE_HELPERS
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	help
-	  Choose this option if you have a Samsung SoC EXYNOS chipset.
+	  Choose this option if you have a Samsung SoC Exynos chipset.
 	  If M is selected the module will be called exynosdrm.
 
 if DRM_EXYNOS
@@ -62,7 +62,7 @@
 	  This enables support for Exynos MIPI-DSI device.
 
 config DRM_EXYNOS_DP
-	bool "EXYNOS specific extensions for Analogix DP driver"
+	bool "Exynos specific extensions for Analogix DP driver"
 	depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
 	select DRM_ANALOGIX_DP
 	default DRM_EXYNOS
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 2d5cbfd..8428ae1 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -510,7 +510,7 @@ static void decon_swreset(struct decon_context *ctx)
 	       ctx->addr + DECON_CRCCTRL);
 }
 
-static void decon_enable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
 
@@ -523,7 +523,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
 	decon_commit(ctx->crtc);
 }
 
-static void decon_disable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
 	int i;
@@ -599,8 +599,8 @@ static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
 }
 
 static const struct exynos_drm_crtc_ops decon_crtc_ops = {
-	.enable			= decon_enable,
-	.disable		= decon_disable,
+	.atomic_enable		= decon_atomic_enable,
+	.atomic_disable		= decon_atomic_disable,
 	.enable_vblank		= decon_enable_vblank,
 	.disable_vblank		= decon_disable_vblank,
 	.atomic_begin		= decon_atomic_begin,
@@ -651,7 +651,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
 {
 	struct decon_context *ctx = dev_get_drvdata(dev);
 
-	decon_disable(ctx->crtc);
+	decon_atomic_disable(ctx->crtc);
 
 	/* detach this sub driver from iommu mapping if supported. */
 	exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f064095..ff59c64 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -526,7 +526,7 @@ static void decon_init(struct decon_context *ctx)
 		writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
 }
 
-static void decon_enable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
 
@@ -546,7 +546,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
 	ctx->suspended = false;
 }
 
-static void decon_disable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
 	int i;
@@ -568,8 +568,8 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
 }
 
 static const struct exynos_drm_crtc_ops decon_crtc_ops = {
-	.enable = decon_enable,
-	.disable = decon_disable,
+	.atomic_enable = decon_atomic_enable,
+	.atomic_disable = decon_atomic_disable,
 	.enable_vblank = decon_enable_vblank,
 	.disable_vblank = decon_disable_vblank,
 	.atomic_begin = decon_atomic_begin,
@@ -653,7 +653,7 @@ static void decon_unbind(struct device *dev, struct device *master,
 {
 	struct decon_context *ctx = dev_get_drvdata(dev);
 
-	decon_disable(ctx->crtc);
+	decon_atomic_disable(ctx->crtc);
 
 	if (ctx->encoder)
 		exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 1e6aa24..4785885 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -110,7 +110,6 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
 		if (ret) {
 			DRM_DEV_ERROR(dp->dev,
 				      "Failed to attach bridge to drm\n");
-			bridge->next = NULL;
 			return ret;
 		}
 	}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 77ce789..1c03485 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -23,8 +23,8 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-	if (exynos_crtc->ops->enable)
-		exynos_crtc->ops->enable(exynos_crtc);
+	if (exynos_crtc->ops->atomic_enable)
+		exynos_crtc->ops->atomic_enable(exynos_crtc);
 
 	drm_crtc_vblank_on(crtc);
 }
@@ -36,8 +36,8 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
 
 	drm_crtc_vblank_off(crtc);
 
-	if (exynos_crtc->ops->disable)
-		exynos_crtc->ops->disable(exynos_crtc);
+	if (exynos_crtc->ops->atomic_disable)
+		exynos_crtc->ops->atomic_disable(exynos_crtc);
 
 	if (crtc->state->event && !crtc->state->active) {
 		spin_lock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3cebb19..43fa0f2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -43,7 +43,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
 {
 	struct exynos_dpi *ctx = connector_to_dpi(connector);
 
-	if (ctx->panel && !ctx->panel->connector)
+	if (ctx->panel)
 		drm_panel_attach(ctx->panel, &ctx->connector);
 
 	return connector_status_connected;
@@ -85,7 +85,7 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
 	}
 
 	if (ctx->panel)
-		return ctx->panel->funcs->get_modes(ctx->panel);
+		return drm_panel_get_modes(ctx->panel, connector);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d4014ba..d4d21d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -118,8 +118,8 @@ struct exynos_drm_plane_config {
 /*
  * Exynos drm crtc ops
  *
- * @enable: enable the device
- * @disable: disable the device
+ * @atomic_enable: enable the device
+ * @atomic_disable: disable the device
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  * @mode_valid: specific driver callback for mode validation
@@ -133,8 +133,8 @@ struct exynos_drm_plane_config {
  */
 struct exynos_drm_crtc;
 struct exynos_drm_crtc_ops {
-	void (*enable)(struct exynos_drm_crtc *crtc);
-	void (*disable)(struct exynos_drm_crtc *crtc);
+	void (*atomic_enable)(struct exynos_drm_crtc *crtc);
+	void (*atomic_disable)(struct exynos_drm_crtc *crtc);
 	int (*enable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*disable_vblank)(struct exynos_drm_crtc *crtc);
 	enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 72726f2..33628d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -255,6 +255,7 @@ struct exynos_dsi {
 	struct mipi_dsi_host dsi_host;
 	struct drm_connector connector;
 	struct drm_panel *panel;
+	struct list_head bridge_chain;
 	struct drm_bridge *out_bridge;
 	struct device *dev;
 
@@ -1377,6 +1378,7 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
 static void exynos_dsi_enable(struct drm_encoder *encoder)
 {
 	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+	struct drm_bridge *iter;
 	int ret;
 
 	if (dsi->state & DSIM_STATE_ENABLED)
@@ -1390,7 +1392,11 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
 		if (ret < 0)
 			goto err_put_sync;
 	} else {
-		drm_bridge_pre_enable(dsi->out_bridge);
+		list_for_each_entry_reverse(iter, &dsi->bridge_chain,
+					    chain_node) {
+			if (iter->funcs->pre_enable)
+				iter->funcs->pre_enable(iter);
+		}
 	}
 
 	exynos_dsi_set_display_mode(dsi);
@@ -1401,7 +1407,10 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
 		if (ret < 0)
 			goto err_display_disable;
 	} else {
-		drm_bridge_enable(dsi->out_bridge);
+		list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+			if (iter->funcs->enable)
+				iter->funcs->enable(iter);
+		}
 	}
 
 	dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
@@ -1419,6 +1428,7 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
 static void exynos_dsi_disable(struct drm_encoder *encoder)
 {
 	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+	struct drm_bridge *iter;
 
 	if (!(dsi->state & DSIM_STATE_ENABLED))
 		return;
@@ -1426,10 +1436,20 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
 	dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
 
 	drm_panel_disable(dsi->panel);
-	drm_bridge_disable(dsi->out_bridge);
+
+	list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+		if (iter->funcs->disable)
+			iter->funcs->disable(iter);
+	}
+
 	exynos_dsi_set_display_enable(dsi, false);
 	drm_panel_unprepare(dsi->panel);
-	drm_bridge_post_disable(dsi->out_bridge);
+
+	list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+		if (iter->funcs->post_disable)
+			iter->funcs->post_disable(iter);
+	}
+
 	dsi->state &= ~DSIM_STATE_ENABLED;
 	pm_runtime_put_sync(dsi->dev);
 }
@@ -1461,7 +1481,7 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
 	struct exynos_dsi *dsi = connector_to_dsi(connector);
 
 	if (dsi->panel)
-		return dsi->panel->funcs->get_modes(dsi->panel);
+		return drm_panel_get_modes(dsi->panel, connector);
 
 	return 0;
 }
@@ -1522,7 +1542,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
 	if (out_bridge) {
 		drm_bridge_attach(encoder, out_bridge, NULL);
 		dsi->out_bridge = out_bridge;
-		encoder->bridge = NULL;
+		list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
 	} else {
 		int ret = exynos_dsi_create_connector(encoder);
 
@@ -1588,6 +1608,7 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
 		if (dsi->out_bridge->funcs->detach)
 			dsi->out_bridge->funcs->detach(dsi->out_bridge);
 		dsi->out_bridge = NULL;
+		INIT_LIST_HEAD(&dsi->bridge_chain);
 	}
 
 	if (drm->mode_config.poll_enabled)
@@ -1735,6 +1756,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 	init_completion(&dsi->completed);
 	spin_lock_init(&dsi->transfer_lock);
 	INIT_LIST_HEAD(&dsi->transfer_list);
+	INIT_LIST_HEAD(&dsi->bridge_chain);
 
 	dsi->dsi_host.ops = &exynos_dsi_ops;
 	dsi->dsi_host.dev = dev;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index b0877b9..647a1fd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -60,7 +60,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
 	return 0;
 }
 
-static struct fb_ops exynos_drm_fb_ops = {
+static const struct fb_ops exynos_drm_fb_ops = {
 	.owner		= THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_mmap        = exynos_drm_fb_mmap,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 8d0a929..21aec38 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -894,7 +894,7 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
 		fimd_enable_shadow_channel_path(ctx, win, false);
 }
 
-static void fimd_enable(struct exynos_drm_crtc *crtc)
+static void fimd_atomic_enable(struct exynos_drm_crtc *crtc)
 {
 	struct fimd_context *ctx = crtc->ctx;
 
@@ -912,7 +912,7 @@ static void fimd_enable(struct exynos_drm_crtc *crtc)
 	fimd_commit(ctx->crtc);
 }
 
-static void fimd_disable(struct exynos_drm_crtc *crtc)
+static void fimd_atomic_disable(struct exynos_drm_crtc *crtc)
 {
 	struct fimd_context *ctx = crtc->ctx;
 	int i;
@@ -1006,8 +1006,8 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
 }
 
 static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
-	.enable = fimd_enable,
-	.disable = fimd_disable,
+	.atomic_enable = fimd_atomic_enable,
+	.atomic_disable = fimd_atomic_disable,
 	.enable_vblank = fimd_enable_vblank,
 	.disable_vblank = fimd_disable_vblank,
 	.atomic_begin = fimd_atomic_begin,
@@ -1098,7 +1098,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
 {
 	struct fimd_context *ctx = dev_get_drvdata(dev);
 
-	fimd_disable(ctx->crtc);
+	fimd_atomic_disable(ctx->crtc);
 
 	exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 65b891c..b320b3a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -120,7 +120,7 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc,
 	DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr);
 }
 
-static void vidi_enable(struct exynos_drm_crtc *crtc)
+static void vidi_atomic_enable(struct exynos_drm_crtc *crtc)
 {
 	struct vidi_context *ctx = crtc->ctx;
 
@@ -133,7 +133,7 @@ static void vidi_enable(struct exynos_drm_crtc *crtc)
 	drm_crtc_vblank_on(&crtc->base);
 }
 
-static void vidi_disable(struct exynos_drm_crtc *crtc)
+static void vidi_atomic_disable(struct exynos_drm_crtc *crtc)
 {
 	struct vidi_context *ctx = crtc->ctx;
 
@@ -147,8 +147,8 @@ static void vidi_disable(struct exynos_drm_crtc *crtc)
 }
 
 static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
-	.enable = vidi_enable,
-	.disable = vidi_disable,
+	.atomic_enable = vidi_atomic_enable,
+	.atomic_disable = vidi_atomic_disable,
 	.enable_vblank = vidi_enable_vblank,
 	.disable_vblank = vidi_disable_vblank,
 	.update_plane = vidi_update_plane,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 48159d5..9ff921f 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -946,8 +946,10 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
 	connector->interlace_allowed = true;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
-	ret = drm_connector_init(hdata->drm_dev, connector,
-			&hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+	ret = drm_connector_init_with_ddc(hdata->drm_dev, connector,
+					  &hdmi_connector_funcs,
+					  DRM_MODE_CONNECTOR_HDMIA,
+					  hdata->ddc_adpt);
 	if (ret) {
 		DRM_DEV_ERROR(hdata->dev,
 			      "Failed to initialize connector with drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6cfdb95..38ae9c3 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -986,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
 	exynos_crtc_handle_event(crtc);
 }
 
-static void mixer_enable(struct exynos_drm_crtc *crtc)
+static void mixer_atomic_enable(struct exynos_drm_crtc *crtc)
 {
 	struct mixer_context *ctx = crtc->ctx;
 
@@ -1015,7 +1015,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 	set_bit(MXR_BIT_POWERED, &ctx->flags);
 }
 
-static void mixer_disable(struct exynos_drm_crtc *crtc)
+static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
 {
 	struct mixer_context *ctx = crtc->ctx;
 	int i;
@@ -1109,8 +1109,8 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
 }
 
 static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
-	.enable			= mixer_enable,
-	.disable		= mixer_disable,
+	.atomic_enable		= mixer_atomic_enable,
+	.atomic_disable		= mixer_atomic_disable,
 	.enable_vblank		= mixer_enable_vblank,
 	.disable_vblank		= mixer_disable_vblank,
 	.atomic_begin		= mixer_atomic_begin,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 82c972e..9598ee3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -68,7 +68,7 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
 	struct fsl_dcu_drm_connector *fsl_connector;
 
 	fsl_connector = to_fsl_dcu_connector(connector);
-	return drm_panel_get_modes(fsl_connector->panel);
+	return drm_panel_get_modes(fsl_connector->panel, connector);
 }
 
 static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index 45ad5ff..adc0507 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -21,9 +21,9 @@
 
 #include <drm/drm.h>
 #include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_fourcc.h>
 
-#include "framebuffer.h"
 #include "psb_drv.h"
 #include "psb_reg.h"
 
@@ -226,11 +226,10 @@ static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
 static void psbfb_copyarea_accel(struct fb_info *info,
 				 const struct fb_copyarea *a)
 {
-	struct psb_fbdev *fbdev = info->par;
-	struct psb_framebuffer *psbfb = &fbdev->pfb;
-	struct drm_device *dev = psbfb->base.dev;
-	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
-	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_framebuffer *fb = fb_helper->fb;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
 	uint32_t offset;
 	uint32_t stride;
 	uint32_t src_format;
@@ -239,6 +238,8 @@ static void psbfb_copyarea_accel(struct fb_info *info,
 	if (!fb)
 		return;
 
+	dev = fb->dev;
+	dev_priv = dev->dev_private;
 	offset = to_gtt_range(fb->obj[0])->offset;
 	stride = fb->pitches[0];
 
@@ -309,9 +310,9 @@ void psbfb_copyarea(struct fb_info *info,
  */
 int psbfb_sync(struct fb_info *info)
 {
-	struct psb_fbdev *fbdev = info->par;
-	struct psb_framebuffer *psbfb = &fbdev->pfb;
-	struct drm_device *dev = psbfb->base.dev;
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_framebuffer *fb = fb_helper->fb;
+	struct drm_device *dev = fb->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	unsigned long _end = jiffies + HZ;
 	int busy = 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 8b78494..1ed854f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -582,8 +582,8 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	struct gma_clock_t clock;
 	u32 dpll = 0, dspcntr, pipeconf;
 	bool ok;
-	bool is_crt = false, is_lvds = false, is_tv = false;
-	bool is_hdmi = false, is_dp = false;
+	bool is_lvds = false, is_tv = false;
+	bool is_dp = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_connector *connector;
 	const struct gma_limit_t *limit;
@@ -607,10 +607,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 			is_tv = true;
 			break;
 		case INTEL_OUTPUT_ANALOG:
-			is_crt = true;
-			break;
 		case INTEL_OUTPUT_HDMI:
-			is_hdmi = true;
 			break;
 		case INTEL_OUTPUT_DISPLAYPORT:
 			is_dp = true;
@@ -979,6 +976,7 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
 	.gamma_set = gma_crtc_gamma_set,
 	.set_config = gma_crtc_set_config,
 	.destroy = gma_crtc_destroy,
+	.page_flip = gma_crtc_page_flip,
 };
 
 const struct gma_clock_funcs cdv_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 570b595..5772b2d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1594,7 +1594,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
-	bool channel_eq = false;
 	int tries, cr_tries;
 	u32 reg;
 	uint32_t DP = intel_dp->DP;
@@ -1602,7 +1601,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
 	/* channel equalization */
 	tries = 0;
 	cr_tries = 0;
-	channel_eq = false;
 
 	DRM_DEBUG_KMS("\n");
 		reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1648,7 +1646,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
 
 		if (cdv_intel_channel_eq_ok(encoder)) {
 			DRM_DEBUG_KMS("PT2 train is done\n");
-			channel_eq = true;
 			break;
 		}
 
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 218f3bb..1459076 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -40,8 +40,8 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 			   unsigned blue, unsigned transp,
 			   struct fb_info *info)
 {
-	struct psb_fbdev *fbdev = info->par;
-	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_framebuffer *fb = fb_helper->fb;
 	uint32_t v;
 
 	if (!fb)
@@ -77,10 +77,10 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
 {
-	struct psb_fbdev *fbdev = info->par;
-	struct psb_framebuffer *psbfb = &fbdev->pfb;
-	struct drm_device *dev = psbfb->base.dev;
-	struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_framebuffer *fb = fb_helper->fb;
+	struct drm_device *dev = fb->dev;
+	struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
 
 	/*
 	 *	We have to poke our nose in here. The core fb code assumes
@@ -99,10 +99,10 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
 static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct psb_framebuffer *psbfb = vma->vm_private_data;
-	struct drm_device *dev = psbfb->base.dev;
+	struct drm_framebuffer *fb = vma->vm_private_data;
+	struct drm_device *dev = fb->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
+	struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
 	int page_num;
 	int i;
 	unsigned long address;
@@ -145,23 +145,21 @@ static const struct vm_operations_struct psbfb_vm_ops = {
 
 static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
-	struct psb_fbdev *fbdev = info->par;
-	struct psb_framebuffer *psbfb = &fbdev->pfb;
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_framebuffer *fb = fb_helper->fb;
 
 	if (vma->vm_pgoff != 0)
 		return -EINVAL;
 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
 		return -EINVAL;
 
-	if (!psbfb->addr_space)
-		psbfb->addr_space = vma->vm_file->f_mapping;
 	/*
 	 * If this is a GEM object then info->screen_base is the virtual
 	 * kernel remapping of the object. FIXME: Review if this is
 	 * suitable for our mmap work
 	 */
 	vma->vm_ops = &psbfb_vm_ops;
-	vma->vm_private_data = (void *)psbfb;
+	vma->vm_private_data = (void *)fb;
 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
 	return 0;
 }
@@ -209,9 +207,9 @@ static struct fb_ops psbfb_unaccel_ops = {
  *	0 on success or an error code if we fail.
  */
 static int psb_framebuffer_init(struct drm_device *dev,
-					struct psb_framebuffer *fb,
+					struct drm_framebuffer *fb,
 					const struct drm_mode_fb_cmd2 *mode_cmd,
-					struct gtt_range *gt)
+					struct drm_gem_object *obj)
 {
 	const struct drm_format_info *info;
 	int ret;
@@ -227,9 +225,9 @@ static int psb_framebuffer_init(struct drm_device *dev,
 	if (mode_cmd->pitches[0] & 63)
 		return -EINVAL;
 
-	drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
-	fb->base.obj[0] = &gt->gem;
-	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+	fb->obj[0] = obj;
+	ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
 	if (ret) {
 		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
 		return ret;
@@ -252,21 +250,21 @@ static int psb_framebuffer_init(struct drm_device *dev,
 static struct drm_framebuffer *psb_framebuffer_create
 			(struct drm_device *dev,
 			 const struct drm_mode_fb_cmd2 *mode_cmd,
-			 struct gtt_range *gt)
+			 struct drm_gem_object *obj)
 {
-	struct psb_framebuffer *fb;
+	struct drm_framebuffer *fb;
 	int ret;
 
 	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
 	if (!fb)
 		return ERR_PTR(-ENOMEM);
 
-	ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+	ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
 	if (ret) {
 		kfree(fb);
 		return ERR_PTR(ret);
 	}
-	return &fb->base;
+	return fb;
 }
 
 /**
@@ -300,14 +298,13 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
  *
  *	Create a framebuffer to the specifications provided
  */
-static int psbfb_create(struct psb_fbdev *fbdev,
+static int psbfb_create(struct drm_fb_helper *fb_helper,
 				struct drm_fb_helper_surface_size *sizes)
 {
-	struct drm_device *dev = fbdev->psb_fb_helper.dev;
+	struct drm_device *dev = fb_helper->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
-	struct psb_framebuffer *psbfb = &fbdev->pfb;
 	struct drm_mode_fb_cmd2 mode_cmd;
 	int size;
 	int ret;
@@ -372,7 +369,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 
 	memset(dev_priv->vram_addr + backing->offset, 0, size);
 
-	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
+	info = drm_fb_helper_alloc_fbi(fb_helper);
 	if (IS_ERR(info)) {
 		ret = PTR_ERR(info);
 		goto out;
@@ -380,14 +377,13 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 
 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
 
-	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
-	if (ret)
+	fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem);
+	if (IS_ERR(fb)) {
+		ret = PTR_ERR(fb);
 		goto out;
+	}
 
-	fb = &psbfb->base;
-	psbfb->fbdev = info;
-
-	fbdev->psb_fb_helper.fb = fb;
+	fb_helper->fb = fb;
 
 	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
 		info->fbops = &psbfb_ops;
@@ -411,15 +407,14 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
 	}
 
-	drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
+	drm_fb_helper_fill_info(info, fb_helper, sizes);
 
 	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
 	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-	dev_dbg(dev->dev, "allocated %dx%d fb\n",
-					psbfb->base.width, psbfb->base.height);
+	dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
 
 	return 0;
 out:
@@ -439,7 +434,6 @@ static struct drm_framebuffer *psb_user_framebuffer_create
 			(struct drm_device *dev, struct drm_file *filp,
 			 const struct drm_mode_fb_cmd2 *cmd)
 {
-	struct gtt_range *r;
 	struct drm_gem_object *obj;
 
 	/*
@@ -451,17 +445,15 @@ static struct drm_framebuffer *psb_user_framebuffer_create
 		return ERR_PTR(-ENOENT);
 
 	/* Let the core code do all the work */
-	r = container_of(obj, struct gtt_range, gem);
-	return psb_framebuffer_create(dev, cmd, r);
+	return psb_framebuffer_create(dev, cmd, obj);
 }
 
-static int psbfb_probe(struct drm_fb_helper *helper,
+static int psbfb_probe(struct drm_fb_helper *fb_helper,
 				struct drm_fb_helper_surface_size *sizes)
 {
-	struct psb_fbdev *psb_fbdev =
-		container_of(helper, struct psb_fbdev, psb_fb_helper);
-	struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+	struct drm_device *dev = fb_helper->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned int fb_size;
 	int bytespp;
 
 	bytespp = sizes->surface_bpp / 8;
@@ -471,72 +463,77 @@ static int psbfb_probe(struct drm_fb_helper *helper,
 	/* If the mode will not fit in 32bit then switch to 16bit to get
 	   a console on full resolution. The X mode setting server will
 	   allocate its own 32bit GEM framebuffer */
-	if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
-	                dev_priv->vram_stolen_size) {
+	fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
+		  sizes->surface_height;
+	fb_size = ALIGN(fb_size, PAGE_SIZE);
+
+	if (fb_size > dev_priv->vram_stolen_size) {
                 sizes->surface_bpp = 16;
                 sizes->surface_depth = 16;
         }
 
-	return psbfb_create(psb_fbdev, sizes);
+	return psbfb_create(fb_helper, sizes);
 }
 
 static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
 	.fb_probe = psbfb_probe,
 };
 
-static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+static int psb_fbdev_destroy(struct drm_device *dev,
+			     struct drm_fb_helper *fb_helper)
 {
-	struct psb_framebuffer *psbfb = &fbdev->pfb;
+	struct drm_framebuffer *fb = fb_helper->fb;
 
-	drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
+	drm_fb_helper_unregister_fbi(fb_helper);
 
-	drm_fb_helper_fini(&fbdev->psb_fb_helper);
-	drm_framebuffer_unregister_private(&psbfb->base);
-	drm_framebuffer_cleanup(&psbfb->base);
+	drm_fb_helper_fini(fb_helper);
+	drm_framebuffer_unregister_private(fb);
+	drm_framebuffer_cleanup(fb);
 
-	if (psbfb->base.obj[0])
-		drm_gem_object_put_unlocked(psbfb->base.obj[0]);
+	if (fb->obj[0])
+		drm_gem_object_put_unlocked(fb->obj[0]);
+	kfree(fb);
+
 	return 0;
 }
 
 int psb_fbdev_init(struct drm_device *dev)
 {
-	struct psb_fbdev *fbdev;
+	struct drm_fb_helper *fb_helper;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	int ret;
 
-	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
-	if (!fbdev) {
+	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+	if (!fb_helper) {
 		dev_err(dev->dev, "no memory\n");
 		return -ENOMEM;
 	}
 
-	dev_priv->fbdev = fbdev;
+	dev_priv->fb_helper = fb_helper;
 
-	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
+	drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
-				 INTELFB_CONN_LIMIT);
+	ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
 	if (ret)
 		goto free;
 
-	ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+	ret = drm_fb_helper_single_add_all_connectors(fb_helper);
 	if (ret)
 		goto fini;
 
 	/* disable all the possible outputs/crtcs before entering KMS mode */
 	drm_helper_disable_unused_functions(dev);
 
-	ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+	ret = drm_fb_helper_initial_config(fb_helper, 32);
 	if (ret)
 		goto fini;
 
 	return 0;
 
 fini:
-	drm_fb_helper_fini(&fbdev->psb_fb_helper);
+	drm_fb_helper_fini(fb_helper);
 free:
-	kfree(fbdev);
+	kfree(fb_helper);
 	return ret;
 }
 
@@ -544,12 +541,12 @@ static void psb_fbdev_fini(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
-	if (!dev_priv->fbdev)
+	if (!dev_priv->fb_helper)
 		return;
 
-	psb_fbdev_destroy(dev, dev_priv->fbdev);
-	kfree(dev_priv->fbdev);
-	dev_priv->fbdev = NULL;
+	psb_fbdev_destroy(dev, dev_priv->fb_helper);
+	kfree(dev_priv->fb_helper);
+	dev_priv->fb_helper = NULL;
 }
 
 static const struct drm_mode_config_funcs psb_mode_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index ae8a026..2fbba4b 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -9,23 +9,8 @@
 #ifndef _FRAMEBUFFER_H_
 #define _FRAMEBUFFER_H_
 
-#include <drm/drm_fb_helper.h>
-
 #include "psb_drv.h"
 
-struct psb_framebuffer {
-	struct drm_framebuffer base;
-	struct address_space *addr_space;
-	struct fb_info *fbdev;
-};
-
-struct psb_fbdev {
-	struct drm_fb_helper psb_fb_helper; /* must be first */
-	struct psb_framebuffer pfb;
-};
-
-#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
-
 extern int gma_connector_clones(struct drm_device *dev, int type_mask);
 
 #endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index e20ccb5..17f1369 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -255,6 +255,8 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
 		/* Give the overlay scaler a chance to enable
 		 * if it's on this pipe */
 		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+		drm_crtc_vblank_on(crtc);
 		break;
 	case DRM_MODE_DPMS_OFF:
 		if (!gma_crtc->active)
@@ -501,6 +503,52 @@ void gma_crtc_destroy(struct drm_crtc *crtc)
 	kfree(gma_crtc);
 }
 
+int gma_crtc_page_flip(struct drm_crtc *crtc,
+		       struct drm_framebuffer *fb,
+		       struct drm_pending_vblank_event *event,
+		       uint32_t page_flip_flags,
+		       struct drm_modeset_acquire_ctx *ctx)
+{
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	struct drm_framebuffer *current_fb = crtc->primary->fb;
+	struct drm_framebuffer *old_fb = crtc->primary->old_fb;
+	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	struct drm_device *dev = crtc->dev;
+	unsigned long flags;
+	int ret;
+
+	if (!crtc_funcs->mode_set_base)
+		return -EINVAL;
+
+	/* Using mode_set_base requires the new fb to be set already. */
+	crtc->primary->fb = fb;
+
+	if (event) {
+		spin_lock_irqsave(&dev->event_lock, flags);
+
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		gma_crtc->page_flip_event = event;
+
+		/* Call this locked if we want an event at vblank interrupt. */
+		ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+		if (ret) {
+			gma_crtc->page_flip_event = NULL;
+			drm_crtc_vblank_put(crtc);
+		}
+
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	} else {
+		ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+	}
+
+	/* Restore previous fb in case of failure. */
+	if (ret)
+		crtc->primary->fb = current_fb;
+
+	return ret;
+}
+
 int gma_crtc_set_config(struct drm_mode_set *set,
 			struct drm_modeset_acquire_ctx *ctx)
 {
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index fdbd7ec..7bd6c1e 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -11,6 +11,7 @@
 #define _GMA_DISPLAY_H_
 
 #include <linux/pm_runtime.h>
+#include <drm/drm_vblank.h>
 
 struct drm_encoder;
 struct drm_mode_set;
@@ -71,6 +72,11 @@ extern void gma_crtc_prepare(struct drm_crtc *crtc);
 extern void gma_crtc_commit(struct drm_crtc *crtc);
 extern void gma_crtc_disable(struct drm_crtc *crtc);
 extern void gma_crtc_destroy(struct drm_crtc *crtc);
+extern int gma_crtc_page_flip(struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb,
+			      struct drm_pending_vblank_event *event,
+			      uint32_t page_flip_flags,
+			      struct drm_modeset_acquire_ctx *ctx);
 extern int gma_crtc_set_config(struct drm_mode_set *set,
 			       struct drm_modeset_acquire_ctx *ctx);
 
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index b8bfb96..4fff110 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -113,27 +113,6 @@ static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
 	return (pfit_control >> 29) & 0x3;
 }
 
-static struct drm_device globle_dev;
-
-void mdfld__intel_plane_set_alpha(int enable)
-{
-	struct drm_device *dev = &globle_dev;
-	int dspcntr_reg = DSPACNTR;
-	u32 dspcntr;
-
-	dspcntr = REG_READ(dspcntr_reg);
-
-	if (enable) {
-		dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
-		dspcntr |= DISPPLANE_32BPP;
-	} else {
-		dspcntr &= ~DISPPLANE_32BPP;
-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
-	}
-
-	REG_WRITE(dspcntr_reg, dspcntr);
-}
-
 static int check_fb(struct drm_framebuffer *fb)
 {
 	if (!fb)
@@ -164,8 +143,6 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 	u32 dspcntr;
 	int ret;
 
-	memcpy(&globle_dev, dev, sizeof(struct drm_device));
-
 	dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
 
 	/* no fb bound */
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f4c5208..f437023 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -159,9 +159,7 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
 
 static unsigned int htotal_calculate(struct drm_display_mode *mode)
 {
-	u32 htotal, new_crtc_htotal;
-
-	htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+	u32 new_crtc_htotal;
 
 	/*
 	 * 1024 x 768  new_crtc_htotal = 0x1024;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 7390403..582e095 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -117,6 +117,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
 
 	if (!connector) {
 		DRM_ERROR("Couldn't find connector when setting mode");
+		gma_power_end(dev);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 0900052..6956c8e 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -19,10 +19,10 @@
 
 #include <drm/drm.h>
 #include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_irq.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 #include <drm/drm_vblank.h>
 
@@ -426,14 +426,48 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
 
 static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	return drm_get_pci_dev(pdev, ent, &driver);
-}
+	struct drm_device *dev;
+	int ret;
 
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	dev = drm_dev_alloc(&driver, &pdev->dev);
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		goto err_pci_disable_device;
+	}
+
+	dev->pdev = pdev;
+	pci_set_drvdata(pdev, dev);
+
+	ret = psb_driver_load(dev, ent->driver_data);
+	if (ret)
+		goto err_drm_dev_put;
+
+	ret = drm_dev_register(dev, ent->driver_data);
+	if (ret)
+		goto err_psb_driver_unload;
+
+	return 0;
+
+err_psb_driver_unload:
+	psb_driver_unload(dev);
+err_drm_dev_put:
+	drm_dev_put(dev);
+err_pci_disable_device:
+	pci_disable_device(pdev);
+	return ret;
+}
 
 static void psb_pci_remove(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
-	drm_put_dev(dev);
+
+	drm_dev_unregister(dev);
+	psb_driver_unload(dev);
+	drm_dev_put(dev);
 }
 
 static const struct dev_pm_ops psb_pm_ops = {
@@ -466,8 +500,6 @@ static const struct file_operations psb_gem_fops = {
 
 static struct drm_driver driver = {
 	.driver_features = DRIVER_MODESET | DRIVER_GEM,
-	.load = psb_driver_load,
-	.unload = psb_driver_unload,
 	.lastclose = drm_fb_helper_lastclose,
 
 	.num_ioctls = ARRAY_SIZE(psb_ioctls),
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 9b3c03f..3d4ef30 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -229,6 +229,8 @@ enum {
 #define KSEL_BYPASS_25 6
 #define KSEL_BYPASS_83_100 7
 
+struct drm_fb_helper;
+
 struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
@@ -432,7 +434,7 @@ struct drm_psb_private {
 	struct pci_dev *lpc_pdev; /* Currently only used by mrst */
 	const struct psb_ops *ops;
 	const struct psb_offset *regmap;
-	
+
 	struct child_device_config *child_dev;
 	int child_dev_num;
 
@@ -540,7 +542,7 @@ struct drm_psb_private {
 
 	/* Oaktrail HDMI state */
 	struct oaktrail_hdmi_dev *hdmi_priv;
-	
+
 	/* Register state */
 	struct psb_save_area regs;
 
@@ -572,7 +574,7 @@ struct drm_psb_private {
 	uint32_t blc_adj1;
 	uint32_t blc_adj2;
 
-	void *fbdev;
+	struct drm_fb_helper *fb_helper;
 
 	/* 2D acceleration */
 	spinlock_t lock_2d;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 4256410..fed3b56 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -432,6 +432,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
 	.gamma_set = gma_crtc_gamma_set,
 	.set_config = gma_crtc_set_config,
 	.destroy = gma_crtc_destroy,
+	.page_flip = gma_crtc_page_flip,
 };
 
 const struct gma_clock_funcs psb_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index cdf1033..16c6136 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -12,6 +12,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
 #include <linux/gpio.h>
 #include "gma_display.h"
 
@@ -182,6 +183,8 @@ struct gma_crtc {
 	struct psb_intel_crtc_state *crtc_state;
 
 	const struct gma_clock_funcs *clock_funcs;
+
+	struct drm_pending_vblank_event *page_flip_event;
 };
 
 #define to_gma_crtc(x)	\
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index e6265fb..91f9001 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -165,11 +165,23 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
 		"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
 		__func__, pipe, PSB_RVDC32(pipe_stat_reg));
 
-	if (pipe_stat_val & PIPE_VBLANK_STATUS)
+	if (pipe_stat_val & PIPE_VBLANK_STATUS ||
+	    (IS_MFLD(dev) && pipe_stat_val & PIPE_TE_STATUS)) {
+		struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+		struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+		unsigned long flags;
+
 		drm_handle_vblank(dev, pipe);
 
-	if (pipe_stat_val & PIPE_TE_STATUS)
-		drm_handle_vblank(dev, pipe);
+		spin_lock_irqsave(&dev->event_lock, flags);
+		if (gma_crtc->page_flip_event) {
+			drm_crtc_send_vblank_event(crtc,
+						   gma_crtc->page_flip_event);
+			gma_crtc->page_flip_event = NULL;
+			drm_crtc_vblank_put(crtc);
+		}
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
 }
 
 /*
@@ -194,7 +206,6 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	u32 val, addr;
-	int error = false;
 
 	if (stat_1 & _PSB_CE_TWOD_COMPLETE)
 		val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
@@ -229,7 +240,6 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
 
 			DRM_ERROR("\tMMU failing address is 0x%08x.\n",
 				  (unsigned int)addr);
-			error = true;
 		}
 	}
 
@@ -460,12 +470,11 @@ void psb_irq_turn_off_dpst(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv =
 	    (struct drm_psb_private *) dev->dev_private;
-	u32 hist_reg;
 	u32 pwm_reg;
 
 	if (gma_power_begin(dev, false)) {
 		PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
-		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		PSB_RVDC32(HISTOGRAM_INT_CONTROL);
 
 		psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
 
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 7de3ce6..9e82244 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -25,7 +25,7 @@
 #include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/platform_data/tc35876x.h>
+#include <linux/gpio/consumer.h>
 
 #include <asm/intel_scu_ipc.h>
 
@@ -36,6 +36,11 @@
 
 static struct i2c_client *tc35876x_client;
 static struct i2c_client *cmi_lcd_i2c_client;
+/* Panel GPIOs */
+static struct gpio_desc *bridge_reset;
+static struct gpio_desc *bridge_bl_enable;
+static struct gpio_desc *backlight_voltage;
+
 
 #define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
@@ -316,27 +321,23 @@ static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
 
 void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
 {
-	struct tc35876x_platform_data *pdata;
-
 	if (WARN(!tc35876x_client, "%s called before probe", __func__))
 		return;
 
 	dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
 
-	pdata = dev_get_platdata(&tc35876x_client->dev);
-
-	if (pdata->gpio_bridge_reset == -1)
+	if (!bridge_reset)
 		return;
 
 	if (state) {
-		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+		gpiod_set_value_cansleep(bridge_reset, 0);
 		mdelay(10);
 	} else {
 		/* Pull MIPI Bridge reset pin to Low */
-		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+		gpiod_set_value_cansleep(bridge_reset, 0);
 		mdelay(20);
 		/* Pull MIPI Bridge reset pin to High */
-		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 1);
+		gpiod_set_value_cansleep(bridge_reset, 1);
 		mdelay(40);
 	}
 }
@@ -510,25 +511,20 @@ void tc35876x_brightness_control(struct drm_device *dev, int level)
 
 void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
 {
-	struct tc35876x_platform_data *pdata;
-
 	if (WARN(!tc35876x_client, "%s called before probe", __func__))
 		return;
 
 	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
 
-	pdata = dev_get_platdata(&tc35876x_client->dev);
+	if (bridge_bl_enable)
+		gpiod_set_value_cansleep(bridge_bl_enable, 0);
 
-	if (pdata->gpio_panel_bl_en != -1)
-		gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 0);
-
-	if (pdata->gpio_panel_vadd != -1)
-		gpio_set_value_cansleep(pdata->gpio_panel_vadd, 0);
+	if (backlight_voltage)
+		gpiod_set_value_cansleep(backlight_voltage, 0);
 }
 
 void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
 {
-	struct tc35876x_platform_data *pdata;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
 	if (WARN(!tc35876x_client, "%s called before probe", __func__))
@@ -536,10 +532,8 @@ void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
 
 	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
 
-	pdata = dev_get_platdata(&tc35876x_client->dev);
-
-	if (pdata->gpio_panel_vadd != -1) {
-		gpio_set_value_cansleep(pdata->gpio_panel_vadd, 1);
+	if (backlight_voltage) {
+		gpiod_set_value_cansleep(backlight_voltage, 1);
 		msleep(260);
 	}
 
@@ -571,8 +565,8 @@ void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
 				"i2c write failed (%d)\n", ret);
 	}
 
-	if (pdata->gpio_panel_bl_en != -1)
-		gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 1);
+	if (bridge_bl_enable)
+		gpiod_set_value_cansleep(bridge_bl_enable, 1);
 
 	tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
 }
@@ -635,8 +629,6 @@ static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
 static int tc35876x_bridge_probe(struct i2c_client *client,
 				const struct i2c_device_id *id)
 {
-	struct tc35876x_platform_data *pdata;
-
 	dev_info(&client->dev, "%s\n", __func__);
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -645,26 +637,23 @@ static int tc35876x_bridge_probe(struct i2c_client *client,
 		return -ENODEV;
 	}
 
-	pdata = dev_get_platdata(&client->dev);
-	if (!pdata) {
-		dev_err(&client->dev, "%s: no platform data\n", __func__);
-		return -ENODEV;
-	}
+	bridge_reset = devm_gpiod_get_optional(&client->dev, "bridge-reset", GPIOD_OUT_LOW);
+	if (IS_ERR(bridge_reset))
+		return PTR_ERR(bridge_reset);
+	if (bridge_reset)
+		gpiod_set_consumer_name(bridge_reset, "tc35876x bridge reset");
 
-	if (pdata->gpio_bridge_reset != -1) {
-		gpio_request(pdata->gpio_bridge_reset, "tc35876x bridge reset");
-		gpio_direction_output(pdata->gpio_bridge_reset, 0);
-	}
+	bridge_bl_enable = devm_gpiod_get_optional(&client->dev, "bl-en", GPIOD_OUT_LOW);
+	if (IS_ERR(bridge_bl_enable))
+		return PTR_ERR(bridge_bl_enable);
+	if (bridge_bl_enable)
+		gpiod_set_consumer_name(bridge_bl_enable, "tc35876x panel bl en");
 
-	if (pdata->gpio_panel_bl_en != -1) {
-		gpio_request(pdata->gpio_panel_bl_en, "tc35876x panel bl en");
-		gpio_direction_output(pdata->gpio_panel_bl_en, 0);
-	}
-
-	if (pdata->gpio_panel_vadd != -1) {
-		gpio_request(pdata->gpio_panel_vadd, "tc35876x panel vadd");
-		gpio_direction_output(pdata->gpio_panel_vadd, 0);
-	}
+	backlight_voltage = devm_gpiod_get_optional(&client->dev, "vadd", GPIOD_OUT_LOW);
+	if (IS_ERR(backlight_voltage))
+		return PTR_ERR(backlight_voltage);
+	if (backlight_voltage)
+		gpiod_set_consumer_name(backlight_voltage, "tc35876x panel vadd");
 
 	tc35876x_client = client;
 
@@ -673,19 +662,8 @@ static int tc35876x_bridge_probe(struct i2c_client *client,
 
 static int tc35876x_bridge_remove(struct i2c_client *client)
 {
-	struct tc35876x_platform_data *pdata = dev_get_platdata(&client->dev);
-
 	dev_dbg(&client->dev, "%s\n", __func__);
 
-	if (pdata->gpio_bridge_reset != -1)
-		gpio_free(pdata->gpio_bridge_reset);
-
-	if (pdata->gpio_panel_bl_en != -1)
-		gpio_free(pdata->gpio_panel_bl_en);
-
-	if (pdata->gpio_panel_vadd != -1)
-		gpio_free(pdata->gpio_panel_vadd);
-
 	tc35876x_client = NULL;
 
 	return 0;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 0c2d429..f991327 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
 
 obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 6527a97..7fa7d49 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -99,14 +99,12 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
 	s64 gpu_addr = 0;
 	unsigned int line_l;
 	struct hibmc_drm_private *priv = plane->dev->dev_private;
-	struct hibmc_framebuffer *hibmc_fb;
 	struct drm_gem_vram_object *gbo;
 
 	if (!state->fb)
 		return;
 
-	hibmc_fb = to_hibmc_framebuffer(state->fb);
-	gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
+	gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
 
 	gpu_addr = drm_gem_vram_offset(gbo);
 	if (WARN_ON_ONCE(gpu_addr < 0))
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 8dd5a43..4a8a4cf 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -17,6 +17,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_irq.h>
 #include <drm/drm_print.h>
@@ -54,6 +55,7 @@ static struct drm_driver hibmc_driver = {
 	.desc			= "hibmc drm driver",
 	.major			= 1,
 	.minor			= 0,
+	.debugfs_init		= drm_vram_mm_debugfs_init,
 	.dumb_create            = hibmc_dumb_create,
 	.dumb_map_offset        = drm_gem_vram_driver_dumb_mmap_offset,
 	.gem_prime_mmap		= drm_gem_prime_mmap,
@@ -247,8 +249,6 @@ static int hibmc_unload(struct drm_device *dev)
 {
 	struct hibmc_drm_private *priv = dev->dev_private;
 
-	hibmc_fbdev_fini(priv);
-
 	drm_atomic_helper_shutdown(dev);
 
 	if (dev->irq_enabled)
@@ -307,7 +307,7 @@ static int hibmc_load(struct drm_device *dev)
 	/* reset all the states of crtc/plane/encoder/connector */
 	drm_mode_config_reset(dev);
 
-	ret = hibmc_fbdev_init(priv);
+	ret = drm_fbdev_generic_setup(dev, 16);
 	if (ret) {
 		DRM_ERROR("failed to initialize fbdev: %d\n", ret);
 		goto err;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index e58ecd7..50a0c1f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -18,18 +18,6 @@
 #include <drm/drm_framebuffer.h>
 
 struct drm_device;
-struct drm_gem_object;
-
-struct hibmc_framebuffer {
-	struct drm_framebuffer fb;
-	struct drm_gem_object *obj;
-};
-
-struct hibmc_fbdev {
-	struct drm_fb_helper helper; /* must be first */
-	struct hibmc_framebuffer *fb;
-	int size;
-};
 
 struct hibmc_drm_private {
 	/* hw */
@@ -42,13 +30,8 @@ struct hibmc_drm_private {
 	/* drm */
 	struct drm_device  *dev;
 	bool mode_config_initialized;
-
-	/* fbdev */
-	struct hibmc_fbdev *fbdev;
 };
 
-#define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb)
-
 void hibmc_set_power_mode(struct hibmc_drm_private *priv,
 			  unsigned int power_mode);
 void hibmc_set_current_gate(struct hibmc_drm_private *priv,
@@ -56,15 +39,6 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
 
 int hibmc_de_init(struct hibmc_drm_private *priv);
 int hibmc_vdac_init(struct hibmc_drm_private *priv);
-int hibmc_fbdev_init(struct hibmc_drm_private *priv);
-void hibmc_fbdev_fini(struct hibmc_drm_private *priv);
-
-int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
-		     struct drm_gem_object **obj);
-struct hibmc_framebuffer *
-hibmc_framebuffer_init(struct drm_device *dev,
-		       const struct drm_mode_fb_cmd2 *mode_cmd,
-		       struct drm_gem_object *obj);
 
 int hibmc_mm_init(struct hibmc_drm_private *hibmc);
 void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
deleted file mode 100644
index b4c1cea..0000000
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Hisilicon Hibmc SoC drm driver
- *
- * Based on the bochs drm driver.
- *
- * Copyright (c) 2016 Huawei Limited.
- *
- * Author:
- *	Rongrong Zou <zourongrong@huawei.com>
- *	Rongrong Zou <zourongrong@gmail.com>
- *	Jianhua Li <lijianhua@huawei.com>
- */
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include "hibmc_drm_drv.h"
-
-static int hibmcfb_create_object(
-				struct hibmc_drm_private *priv,
-				const struct drm_mode_fb_cmd2 *mode_cmd,
-				struct drm_gem_object **gobj_p)
-{
-	struct drm_gem_object *gobj;
-	struct drm_device *dev = priv->dev;
-	u32 size;
-	int ret = 0;
-
-	size = mode_cmd->pitches[0] * mode_cmd->height;
-	ret = hibmc_gem_create(dev, size, true, &gobj);
-	if (ret)
-		return ret;
-
-	*gobj_p = gobj;
-	return ret;
-}
-
-static struct fb_ops hibmc_drm_fb_ops = {
-	.owner = THIS_MODULE,
-	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = drm_fb_helper_sys_fillrect,
-	.fb_copyarea = drm_fb_helper_sys_copyarea,
-	.fb_imageblit = drm_fb_helper_sys_imageblit,
-	.fb_pan_display = drm_fb_helper_pan_display,
-	.fb_blank = drm_fb_helper_blank,
-	.fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
-			       struct drm_fb_helper_surface_size *sizes)
-{
-	struct hibmc_fbdev *hi_fbdev =
-		container_of(helper, struct hibmc_fbdev, helper);
-	struct hibmc_drm_private *priv = helper->dev->dev_private;
-	struct fb_info *info;
-	struct drm_mode_fb_cmd2 mode_cmd;
-	struct drm_gem_object *gobj = NULL;
-	int ret = 0;
-	size_t size;
-	unsigned int bytes_per_pixel;
-	struct drm_gem_vram_object *gbo = NULL;
-	void *base;
-
-	DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
-			 sizes->surface_width, sizes->surface_height,
-			 sizes->surface_bpp);
-
-	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-
-	mode_cmd.width = sizes->surface_width;
-	mode_cmd.height = sizes->surface_height;
-	mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel;
-	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
-							  sizes->surface_depth);
-
-	size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height);
-
-	ret = hibmcfb_create_object(priv, &mode_cmd, &gobj);
-	if (ret) {
-		DRM_ERROR("failed to create fbcon backing object: %d\n", ret);
-		return -ENOMEM;
-	}
-
-	gbo = drm_gem_vram_of_gem(gobj);
-
-	ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
-	if (ret) {
-		DRM_ERROR("failed to pin fbcon: %d\n", ret);
-		goto out_unref_gem;
-	}
-
-	base = drm_gem_vram_kmap(gbo, true, NULL);
-	if (IS_ERR(base)) {
-		ret = PTR_ERR(base);
-		DRM_ERROR("failed to kmap fbcon: %d\n", ret);
-		goto out_unpin_bo;
-	}
-
-	info = drm_fb_helper_alloc_fbi(helper);
-	if (IS_ERR(info)) {
-		ret = PTR_ERR(info);
-		DRM_ERROR("failed to allocate fbi: %d\n", ret);
-		goto out_release_fbi;
-	}
-
-	hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
-	if (IS_ERR(hi_fbdev->fb)) {
-		ret = PTR_ERR(hi_fbdev->fb);
-		hi_fbdev->fb = NULL;
-		DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
-		goto out_release_fbi;
-	}
-
-	priv->fbdev->size = size;
-	hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
-
-	info->fbops = &hibmc_drm_fb_ops;
-
-	drm_fb_helper_fill_info(info, &priv->fbdev->helper, sizes);
-
-	info->screen_base = base;
-	info->screen_size = size;
-
-	info->fix.smem_start = gbo->bo.mem.bus.offset + gbo->bo.mem.bus.base;
-	info->fix.smem_len = size;
-	return 0;
-
-out_release_fbi:
-	drm_gem_vram_kunmap(gbo);
-out_unpin_bo:
-	drm_gem_vram_unpin(gbo);
-out_unref_gem:
-	drm_gem_object_put_unlocked(gobj);
-
-	return ret;
-}
-
-static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
-{
-	struct hibmc_framebuffer *gfb = fbdev->fb;
-	struct drm_fb_helper *fbh = &fbdev->helper;
-
-	drm_fb_helper_unregister_fbi(fbh);
-
-	drm_fb_helper_fini(fbh);
-
-	if (gfb)
-		drm_framebuffer_put(&gfb->fb);
-}
-
-static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
-	.fb_probe = hibmc_drm_fb_create,
-};
-
-int hibmc_fbdev_init(struct hibmc_drm_private *priv)
-{
-	int ret;
-	struct fb_var_screeninfo *var;
-	struct fb_fix_screeninfo *fix;
-	struct hibmc_fbdev *hifbdev;
-
-	hifbdev = devm_kzalloc(priv->dev->dev, sizeof(*hifbdev), GFP_KERNEL);
-	if (!hifbdev) {
-		DRM_ERROR("failed to allocate hibmc_fbdev\n");
-		return -ENOMEM;
-	}
-
-	priv->fbdev = hifbdev;
-	drm_fb_helper_prepare(priv->dev, &hifbdev->helper,
-			      &hibmc_fbdev_helper_funcs);
-
-	/* Now just one crtc and one channel */
-	ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1);
-	if (ret) {
-		DRM_ERROR("failed to initialize fb helper: %d\n", ret);
-		return ret;
-	}
-
-	ret = drm_fb_helper_single_add_all_connectors(&hifbdev->helper);
-	if (ret) {
-		DRM_ERROR("failed to add all connectors: %d\n", ret);
-		goto fini;
-	}
-
-	ret = drm_fb_helper_initial_config(&hifbdev->helper, 16);
-	if (ret) {
-		DRM_ERROR("failed to setup initial conn config: %d\n", ret);
-		goto fini;
-	}
-
-	var = &hifbdev->helper.fbdev->var;
-	fix = &hifbdev->helper.fbdev->fix;
-
-	DRM_DEBUG_DRIVER("Member of info->var is :\n"
-			 "xres=%d\n"
-			 "yres=%d\n"
-			 "xres_virtual=%d\n"
-			 "yres_virtual=%d\n"
-			 "xoffset=%d\n"
-			 "yoffset=%d\n"
-			 "bits_per_pixel=%d\n"
-			 "...\n", var->xres, var->yres, var->xres_virtual,
-			 var->yres_virtual, var->xoffset, var->yoffset,
-			 var->bits_per_pixel);
-	DRM_DEBUG_DRIVER("Member of info->fix is :\n"
-			 "smem_start=%lx\n"
-			 "smem_len=%d\n"
-			 "type=%d\n"
-			 "type_aux=%d\n"
-			 "visual=%d\n"
-			 "xpanstep=%d\n"
-			 "ypanstep=%d\n"
-			 "ywrapstep=%d\n"
-			 "line_length=%d\n"
-			 "accel=%d\n"
-			 "capabilities=%d\n"
-			 "...\n", fix->smem_start, fix->smem_len, fix->type,
-			 fix->type_aux, fix->visual, fix->xpanstep,
-			 fix->ypanstep, fix->ywrapstep, fix->line_length,
-			 fix->accel, fix->capabilities);
-
-	return 0;
-
-fini:
-	drm_fb_helper_fini(&hifbdev->helper);
-	return ret;
-}
-
-void hibmc_fbdev_fini(struct hibmc_drm_private *priv)
-{
-	if (!priv->fbdev)
-		return;
-
-	hibmc_fbdev_destroy(priv->fbdev);
-	priv->fbdev = NULL;
-}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 21b684e..50b988f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -15,6 +15,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_print.h>
 
@@ -46,125 +47,14 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
 	drm_vram_helper_release_mm(hibmc->dev);
 }
 
-int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
-		     struct drm_gem_object **obj)
-{
-	struct drm_gem_vram_object *gbo;
-	int ret;
-
-	*obj = NULL;
-
-	size = roundup(size, PAGE_SIZE);
-	if (size == 0)
-		return -EINVAL;
-
-	gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
-	if (IS_ERR(gbo)) {
-		ret = PTR_ERR(gbo);
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("failed to allocate GEM object: %d\n", ret);
-		return ret;
-	}
-	*obj = &gbo->bo.base;
-	return 0;
-}
-
 int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
 		      struct drm_mode_create_dumb *args)
 {
-	struct drm_gem_object *gobj;
-	u32 handle;
-	int ret;
-
-	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16);
-	args->size = args->pitch * args->height;
-
-	ret = hibmc_gem_create(dev, args->size, false,
-			       &gobj);
-	if (ret) {
-		DRM_ERROR("failed to create GEM object: %d\n", ret);
-		return ret;
-	}
-
-	ret = drm_gem_handle_create(file, gobj, &handle);
-	drm_gem_object_put_unlocked(gobj);
-	if (ret) {
-		DRM_ERROR("failed to unreference GEM object: %d\n", ret);
-		return ret;
-	}
-
-	args->handle = handle;
-	return 0;
-}
-
-static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-	struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
-
-	drm_gem_object_put_unlocked(hibmc_fb->obj);
-	drm_framebuffer_cleanup(fb);
-	kfree(hibmc_fb);
-}
-
-static const struct drm_framebuffer_funcs hibmc_fb_funcs = {
-	.destroy = hibmc_user_framebuffer_destroy,
-};
-
-struct hibmc_framebuffer *
-hibmc_framebuffer_init(struct drm_device *dev,
-		       const struct drm_mode_fb_cmd2 *mode_cmd,
-		       struct drm_gem_object *obj)
-{
-	struct hibmc_framebuffer *hibmc_fb;
-	int ret;
-
-	hibmc_fb = kzalloc(sizeof(*hibmc_fb), GFP_KERNEL);
-	if (!hibmc_fb) {
-		DRM_ERROR("failed to allocate hibmc_fb\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	drm_helper_mode_fill_fb_struct(dev, &hibmc_fb->fb, mode_cmd);
-	hibmc_fb->obj = obj;
-	ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs);
-	if (ret) {
-		DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
-		kfree(hibmc_fb);
-		return ERR_PTR(ret);
-	}
-
-	return hibmc_fb;
-}
-
-static struct drm_framebuffer *
-hibmc_user_framebuffer_create(struct drm_device *dev,
-			      struct drm_file *filp,
-			      const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-	struct drm_gem_object *obj;
-	struct hibmc_framebuffer *hibmc_fb;
-
-	DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
-			 mode_cmd->width, mode_cmd->height,
-			 (mode_cmd->pixel_format) & 0xff,
-			 (mode_cmd->pixel_format >> 8)  & 0xff,
-			 (mode_cmd->pixel_format >> 16) & 0xff,
-			 (mode_cmd->pixel_format >> 24) & 0xff);
-
-	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
-	if (!obj)
-		return ERR_PTR(-ENOENT);
-
-	hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
-	if (IS_ERR(hibmc_fb)) {
-		drm_gem_object_put_unlocked(obj);
-		return ERR_PTR((long)hibmc_fb);
-	}
-	return &hibmc_fb->fb;
+	return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args);
 }
 
 const struct drm_mode_config_funcs hibmc_mode_funcs = {
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
-	.fb_create = hibmc_user_framebuffer_create,
+	.fb_create = drm_gem_fb_create,
 };
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index e66c383..b88c3d5 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -32,6 +32,7 @@
 
 #include <linux/delay.h>
 #include <linux/mman.h>
+#include <linux/pci.h>
 
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_device.h>
@@ -39,7 +40,6 @@
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_irq.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_print.h>
 #include <drm/i810_drm.h>
 
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 5dd26a0..0e53a06 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -31,11 +31,12 @@
  */
 
 #include "i810_drv.h"
+
 #include <linux/module.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 #include <drm/i810_drm.h>
 
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
new file mode 100644
index 0000000..d9a77f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -0,0 +1 @@
+*.hdrtest
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 438040f..1cb28c2 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -27,6 +27,7 @@
 	select X86_MSR # used by igt/pm_rpm
 	select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
 	select DRM_DEBUG_MM if DRM=y
+	select DRM_EXPORT_FOR_TESTS if m
 	select DRM_DEBUG_SELFTEST
 	select DMABUF_SELFTESTS
 	select SW_SYNC # signaling validation framework (igt/syncobj*)
@@ -149,6 +150,7 @@
 	bool "Enable selftests upon driver load"
 	depends on DRM_I915
 	default n
+	select DRM_EXPORT_FOR_TESTS if m
 	select FAULT_INJECTION
 	select PRIME_NUMBERS
 	help
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 90dcf09..b8c5f89 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -31,9 +31,6 @@
 subdir-ccflags-y += \
 	$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
 
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
-
 subdir-ccflags-y += -I$(srctree)/$(src)
 
 # Please keep these build lists sorted!
@@ -73,8 +70,12 @@
 i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
 
 # "Graphics Technology" (aka we talk to the gpu)
-obj-y += gt/
 gt-y += \
+	gt/debugfs_engines.o \
+	gt/debugfs_gt.o \
+	gt/debugfs_gt_pm.o \
+	gt/gen6_ppgtt.o \
+	gt/gen8_ppgtt.o \
 	gt/intel_breadcrumbs.o \
 	gt/intel_context.o \
 	gt/intel_engine_cs.o \
@@ -82,14 +83,17 @@
 	gt/intel_engine_pm.o \
 	gt/intel_engine_pool.o \
 	gt/intel_engine_user.o \
+	gt/intel_ggtt.o \
 	gt/intel_gt.o \
 	gt/intel_gt_irq.o \
 	gt/intel_gt_pm.o \
 	gt/intel_gt_pm_irq.o \
 	gt/intel_gt_requests.o \
+	gt/intel_gtt.o \
 	gt/intel_llc.o \
 	gt/intel_lrc.o \
 	gt/intel_mocs.o \
+	gt/intel_ppgtt.o \
 	gt/intel_rc6.o \
 	gt/intel_renderstate.o \
 	gt/intel_reset.o \
@@ -108,7 +112,6 @@
 i915-y += $(gt-y)
 
 # GEM (Graphics Execution Management) code
-obj-y += gem/
 gem-y += \
 	gem/i915_gem_busy.o \
 	gem/i915_gem_clflush.o \
@@ -154,7 +157,6 @@
 	  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
-obj-y += gt/uc/
 i915-y += gt/uc/intel_uc.o \
 	  gt/uc/intel_uc_fw.o \
 	  gt/uc/intel_guc.o \
@@ -167,7 +169,6 @@
 	  gt/uc/intel_huc_fw.o
 
 # modesetting core code
-obj-y += display/
 i915-y += \
 	display/intel_atomic.o \
 	display/intel_atomic_plane.o \
@@ -232,7 +233,6 @@
 	display/vlv_dsi_pll.o
 
 # perf code
-obj-y += oa/
 i915-y += \
 	oa/i915_oa_hsw.o \
 	oa/i915_oa_bdw.o \
@@ -257,8 +257,10 @@
 	gem/selftests/igt_gem_utils.o \
 	selftests/i915_random.o \
 	selftests/i915_selftest.o \
+	selftests/igt_atomic.o \
 	selftests/igt_flush_test.o \
 	selftests/igt_live_test.o \
+	selftests/igt_mmap.o \
 	selftests/igt_reset.o \
 	selftests/igt_spinner.o
 
@@ -272,3 +274,27 @@
 
 obj-$(CONFIG_DRM_I915) += i915.o
 obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
+
+# header test
+
+# exclude some broken headers from the test coverage
+no-header-test := \
+	display/intel_vbt_defs.h \
+	gvt/execlist.h \
+	gvt/fb_decoder.h \
+	gvt/gtt.h \
+	gvt/gvt.h \
+	gvt/interrupt.h \
+	gvt/mmio_context.h \
+	gvt/mpt.h \
+	gvt/scheduler.h
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+	$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
+		$(shell cd $(srctree)/$(src) && find * -name '*.h')))
+
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+      cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+	$(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
deleted file mode 100644
index 173c305..0000000
--- a/drivers/gpu/drm/i915/display/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
-header-test- := intel_vbt_defs.h
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 325df29..f8e8821 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -34,6 +34,7 @@
 #include "intel_ddi.h"
 #include "intel_dsi.h"
 #include "intel_panel.h"
+#include "intel_vdsc.h"
 
 static inline int header_credits_available(struct drm_i915_private *dev_priv,
 					   enum transcoder dsi_trans)
@@ -76,7 +77,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port)
 static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	struct mipi_dsi_device *dsi;
 	enum port port;
 	enum transcoder dsi_trans;
@@ -201,7 +202,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
 static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum phy phy;
 	u32 tmp;
 	int lane;
@@ -266,7 +267,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
 				     const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u32 dss_ctl1;
 
 	dss_ctl1 = I915_READ(DSS_CTL1);
@@ -276,7 +277,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
 
 	if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
 		const struct drm_display_mode *adjusted_mode =
-					&pipe_config->base.adjusted_mode;
+					&pipe_config->hw.adjusted_mode;
 		u32 dss_ctl2;
 		u16 hactive = adjusted_mode->crtc_hdisplay;
 		u16 dl_buffer_depth;
@@ -301,18 +302,31 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
 	I915_WRITE(DSS_CTL1, dss_ctl1);
 }
 
-static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
+/* aka DSI 8X clock */
+static int afe_clk(struct intel_encoder *encoder,
+		   const struct intel_crtc_state *crtc_state)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	int bpp;
+
+	if (crtc_state->dsc.compression_enable)
+		bpp = crtc_state->dsc.compressed_bpp;
+	else
+		bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+	return DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, intel_dsi->lane_count);
+}
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
+					  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
-	u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
-	u32 afe_clk_khz; /* 8X Clock */
+	int afe_clk_khz;
 	u32 esc_clk_div_m;
 
-	afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp,
-					intel_dsi->lane_count);
-
+	afe_clk_khz = afe_clk(encoder, crtc_state);
 	esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
 
 	for_each_dsi_port(port, intel_dsi->ports) {
@@ -346,7 +360,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
 static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 tmp;
 
@@ -362,7 +376,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
 static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum phy phy;
 
 	for_each_dsi_phy(phy, intel_dsi->phys)
@@ -373,7 +387,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
 static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum phy phy;
 	u32 tmp;
 	int lane;
@@ -422,7 +436,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
 static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u32 tmp;
 	enum phy phy;
 
@@ -474,7 +488,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
 static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u32 tmp;
 	enum port port;
 
@@ -490,10 +504,12 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
 	}
 }
 
-static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+static void
+gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+			     const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u32 tmp;
 	enum port port;
 	enum phy phy;
@@ -531,7 +547,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
 	 * leave all fields at HW default values.
 	 */
 	if (IS_GEN(dev_priv, 11)) {
-		if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+		if (afe_clk(encoder, crtc_state) <= 800000) {
 			for_each_dsi_port(port, intel_dsi->ports) {
 				tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
 				tmp &= ~TA_SURE_MASK;
@@ -559,7 +575,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
 static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u32 tmp;
 	enum phy phy;
 
@@ -575,7 +591,7 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
 static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u32 tmp;
 	enum phy phy;
 
@@ -592,7 +608,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 	enum phy phy;
 	u32 val;
@@ -624,8 +640,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 			       const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	enum pipe pipe = intel_crtc->pipe;
 	u32 tmp;
 	enum port port;
@@ -641,7 +657,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 			tmp |= EOTP_DISABLED;
 
 		/* enable link calibration if freq > 1.5Gbps */
-		if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+		if (afe_clk(encoder, pipe_config) >= 1500 * 1000) {
 			tmp &= ~LINK_CALIBRATION_MASK;
 			tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
 		}
@@ -667,22 +683,26 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 
 		/* select pixel format */
 		tmp &= ~PIX_FMT_MASK;
-		switch (intel_dsi->pixel_format) {
-		default:
-			MISSING_CASE(intel_dsi->pixel_format);
-			/* fallthrough */
-		case MIPI_DSI_FMT_RGB565:
-			tmp |= PIX_FMT_RGB565;
-			break;
-		case MIPI_DSI_FMT_RGB666_PACKED:
-			tmp |= PIX_FMT_RGB666_PACKED;
-			break;
-		case MIPI_DSI_FMT_RGB666:
-			tmp |= PIX_FMT_RGB666_LOOSE;
-			break;
-		case MIPI_DSI_FMT_RGB888:
-			tmp |= PIX_FMT_RGB888;
-			break;
+		if (pipe_config->dsc.compression_enable) {
+			tmp |= PIX_FMT_COMPRESSED;
+		} else {
+			switch (intel_dsi->pixel_format) {
+			default:
+				MISSING_CASE(intel_dsi->pixel_format);
+				/* fallthrough */
+			case MIPI_DSI_FMT_RGB565:
+				tmp |= PIX_FMT_RGB565;
+				break;
+			case MIPI_DSI_FMT_RGB666_PACKED:
+				tmp |= PIX_FMT_RGB666_PACKED;
+				break;
+			case MIPI_DSI_FMT_RGB666:
+				tmp |= PIX_FMT_RGB666_LOOSE;
+				break;
+			case MIPI_DSI_FMT_RGB888:
+				tmp |= PIX_FMT_RGB888;
+				break;
+			}
 		}
 
 		if (INTEL_GEN(dev_priv) >= 12) {
@@ -745,6 +765,9 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 		case PIPE_C:
 			tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
 			break;
+		case PIPE_D:
+			tmp |= TRANS_DDI_EDP_INPUT_D_ONOFF;
+			break;
 		}
 
 		/* enable DDI buffer */
@@ -763,12 +786,12 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 
 static void
 gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
-				 const struct intel_crtc_state *pipe_config)
+				 const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	const struct drm_display_mode *adjusted_mode =
-					&pipe_config->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	enum port port;
 	enum transcoder dsi_trans;
 	/* horizontal timings */
@@ -776,11 +799,25 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
 	u16 hback_porch;
 	/* vertical timings */
 	u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+	int mul = 1, div = 1;
+
+	/*
+	 * Adjust horizontal timings (htotal, hsync_start, hsync_end) to account
+	 * for slower link speed if DSC is enabled.
+	 *
+	 * The compression frequency ratio is the ratio between compressed and
+	 * non-compressed link speeds, and simplifies down to the ratio between
+	 * compressed and non-compressed bpp.
+	 */
+	if (crtc_state->dsc.compression_enable) {
+		mul = crtc_state->dsc.compressed_bpp;
+		div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+	}
 
 	hactive = adjusted_mode->crtc_hdisplay;
-	htotal = adjusted_mode->crtc_htotal;
-	hsync_start = adjusted_mode->crtc_hsync_start;
-	hsync_end = adjusted_mode->crtc_hsync_end;
+	htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+	hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+	hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
 	hsync_size  = hsync_end - hsync_start;
 	hback_porch = (adjusted_mode->crtc_htotal -
 		       adjusted_mode->crtc_hsync_end);
@@ -886,7 +923,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
 static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	enum transcoder dsi_trans;
 	u32 tmp;
@@ -904,10 +941,11 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
 	}
 }
 
-static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
+				     const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	enum transcoder dsi_trans;
 	u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
@@ -919,7 +957,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
 	 * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
 	 * ESCAPE_CLK_COUNT  = TIME_NS/ESC_CLK_NS
 	 */
-	divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+	divisor = intel_dsi_tlpx_ns(intel_dsi) * afe_clk(encoder, crtc_state) * 1000;
 	mul = 8 * 1000000;
 	hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
 				     divisor);
@@ -955,7 +993,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
 
 static void
 gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
-			      const struct intel_crtc_state *pipe_config)
+			      const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
@@ -972,13 +1010,13 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
 	gen11_dsi_enable_ddi_buffer(encoder);
 
 	/* setup D-PHY timings */
-	gen11_dsi_setup_dphy_timings(encoder);
+	gen11_dsi_setup_dphy_timings(encoder, crtc_state);
 
 	/* step 4h: setup DSI protocol timeouts */
-	gen11_dsi_setup_timeouts(encoder);
+	gen11_dsi_setup_timeouts(encoder, crtc_state);
 
 	/* Step (4h, 4i, 4j, 4k): Configure transcoder */
-	gen11_dsi_configure_transcoder(encoder, pipe_config);
+	gen11_dsi_configure_transcoder(encoder, crtc_state);
 
 	/* Step 4l: Gate DDI clocks */
 	if (IS_GEN(dev_priv, 11))
@@ -988,7 +1026,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
 static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	struct mipi_dsi_device *dsi;
 	enum port port;
 	enum transcoder dsi_trans;
@@ -1025,21 +1063,21 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
 }
 
 static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
-				     const struct intel_crtc_state *pipe_config,
+				     const struct intel_crtc_state *crtc_state,
 				     const struct drm_connector_state *conn_state)
 {
 	/* step2: enable IO power */
 	gen11_dsi_enable_io_power(encoder);
 
 	/* step3: enable DSI PLL */
-	gen11_dsi_program_esc_clk_div(encoder);
+	gen11_dsi_program_esc_clk_div(encoder, crtc_state);
 }
 
 static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
 				 const struct intel_crtc_state *pipe_config,
 				 const struct drm_connector_state *conn_state)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
 	/* step3b */
 	gen11_dsi_map_pll(encoder, pipe_config);
@@ -1050,6 +1088,8 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
 	/* step5: program and powerup panel */
 	gen11_dsi_powerup_panel(encoder);
 
+	intel_dsc_enable(encoder, pipe_config);
+
 	/* step6c: configure transcoder timings */
 	gen11_dsi_set_transcoder_timings(encoder, pipe_config);
 
@@ -1064,7 +1104,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
 static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	enum transcoder dsi_trans;
 	u32 tmp;
@@ -1086,7 +1126,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
 
 static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
@@ -1099,7 +1139,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
 static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	enum transcoder dsi_trans;
 	u32 tmp;
@@ -1140,7 +1180,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
 static void gen11_dsi_disable_port(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u32 tmp;
 	enum port port;
 
@@ -1162,7 +1202,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
 static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 tmp;
 
@@ -1189,7 +1229,7 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *old_crtc_state,
 			      const struct drm_connector_state *old_conn_state)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
 	/* step1: turn off backlight */
 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
@@ -1211,12 +1251,42 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
 	gen11_dsi_disable_io_power(encoder);
 }
 
+static void gen11_dsi_post_disable(struct intel_encoder *encoder,
+				   const struct intel_crtc_state *old_crtc_state,
+				   const struct drm_connector_state *old_conn_state)
+{
+	intel_crtc_vblank_off(old_crtc_state);
+
+	intel_dsc_disable(old_crtc_state);
+
+	skl_scaler_disable(old_crtc_state);
+}
+
+static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
+						 struct drm_display_mode *mode)
+{
+	/* FIXME: DSC? */
+	return intel_dsi_mode_valid(connector, mode);
+}
+
 static void gen11_dsi_get_timings(struct intel_encoder *encoder,
 				  struct intel_crtc_state *pipe_config)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	struct drm_display_mode *adjusted_mode =
-					&pipe_config->base.adjusted_mode;
+					&pipe_config->hw.adjusted_mode;
+
+	if (pipe_config->dsc.compressed_bpp) {
+		int div = pipe_config->dsc.compressed_bpp;
+		int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+		adjusted_mode->crtc_htotal =
+			DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+		adjusted_mode->crtc_hsync_start =
+			DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+		adjusted_mode->crtc_hsync_end =
+			DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
+	}
 
 	if (intel_dsi->dual_link) {
 		adjusted_mode->crtc_hdisplay *= 2;
@@ -1242,22 +1312,66 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
 				 struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+	intel_dsc_get_config(encoder, pipe_config);
 
 	/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
 	pipe_config->port_clock =
 		cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
 
-	pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+	pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
 	if (intel_dsi->dual_link)
-		pipe_config->base.adjusted_mode.crtc_clock *= 2;
+		pipe_config->hw.adjusted_mode.crtc_clock *= 2;
 
 	gen11_dsi_get_timings(encoder, pipe_config);
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
 	pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
 }
 
+static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
+					struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+	int dsc_max_bpc = INTEL_GEN(dev_priv) >= 12 ? 12 : 10;
+	bool use_dsc;
+	int ret;
+
+	use_dsc = intel_bios_get_dsc_params(encoder, crtc_state, dsc_max_bpc);
+	if (!use_dsc)
+		return 0;
+
+	if (crtc_state->pipe_bpp < 8 * 3)
+		return -EINVAL;
+
+	/* FIXME: split only when necessary */
+	if (crtc_state->dsc.slice_count > 1)
+		crtc_state->dsc.dsc_split = true;
+
+	vdsc_cfg->convert_rgb = true;
+
+	ret = intel_dsc_compute_params(encoder, crtc_state);
+	if (ret)
+		return ret;
+
+	/* DSI specific sanity checks on the common code */
+	WARN_ON(vdsc_cfg->vbr_enable);
+	WARN_ON(vdsc_cfg->simple_422);
+	WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+	WARN_ON(vdsc_cfg->slice_height < 8);
+	WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+
+	ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
+	if (ret)
+		return ret;
+
+	crtc_state->dsc.compression_enable = true;
+
+	return 0;
+}
+
 static int gen11_dsi_compute_config(struct intel_encoder *encoder,
 				    struct intel_crtc_state *pipe_config,
 				    struct drm_connector_state *conn_state)
@@ -1265,11 +1379,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
 	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
 						   base);
 	struct intel_connector *intel_connector = intel_dsi->attached_connector;
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	const struct drm_display_mode *fixed_mode =
 					intel_connector->panel.fixed_mode;
 	struct drm_display_mode *adjusted_mode =
-					&pipe_config->base.adjusted_mode;
+					&pipe_config->hw.adjusted_mode;
 
 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -1283,8 +1397,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
 	else
 		pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
 
+	if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+		pipe_config->pipe_bpp = 24;
+	else
+		pipe_config->pipe_bpp = 18;
+
 	pipe_config->clock_set = true;
-	pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+	if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
+		DRM_DEBUG_KMS("Attempting to use DSC failed\n");
+
+	pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
 
 	return 0;
 }
@@ -1292,15 +1415,21 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
 static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
 					struct intel_crtc_state *crtc_state)
 {
-	get_dsi_io_power_domains(to_i915(encoder->base.dev),
-				 enc_to_intel_dsi(&encoder->base));
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+	get_dsi_io_power_domains(i915,
+				 enc_to_intel_dsi(encoder));
+
+	if (crtc_state->dsc.compression_enable)
+		intel_display_power_get(i915,
+					intel_dsc_power_domain(crtc_state));
 }
 
 static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
 				   enum pipe *pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum transcoder dsi_trans;
 	intel_wakeref_t wakeref;
 	enum port port;
@@ -1325,6 +1454,9 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
 			*pipe = PIPE_C;
 			break;
+		case TRANS_DDI_EDP_INPUT_D_ONOFF:
+			*pipe = PIPE_D;
+			break;
 		default:
 			DRM_ERROR("Invalid PIPE input\n");
 			goto out;
@@ -1360,7 +1492,7 @@ static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
 
 static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
 	.get_modes = intel_dsi_get_modes,
-	.mode_valid = intel_dsi_mode_valid,
+	.mode_valid = gen11_dsi_mode_valid,
 	.atomic_check = intel_digital_connector_atomic_check,
 };
 
@@ -1577,6 +1709,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
 	encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
 	encoder->pre_enable = gen11_dsi_pre_enable;
 	encoder->disable = gen11_dsi_disable;
+	encoder->post_disable = gen11_dsi_post_disable;
 	encoder->port = port;
 	encoder->get_config = gen11_dsi_get_config;
 	encoder->update_pipe = intel_panel_update_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index c2875b1..c362eec 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -37,6 +37,7 @@
 #include "intel_atomic.h"
 #include "intel_display_types.h"
 #include "intel_hdcp.h"
+#include "intel_psr.h"
 #include "intel_sprite.h"
 
 /**
@@ -129,6 +130,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
 	struct drm_crtc_state *crtc_state;
 
 	intel_hdcp_atomic_check(conn, old_state, new_state);
+	intel_psr_atomic_check(conn, old_state, new_state);
 
 	if (!new_state->crtc)
 		return 0;
@@ -175,6 +177,38 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
 }
 
 /**
+ * intel_connector_needs_modeset - check if connector needs a modeset
+ */
+bool
+intel_connector_needs_modeset(struct intel_atomic_state *state,
+			      struct drm_connector *connector)
+{
+	const struct drm_connector_state *old_conn_state, *new_conn_state;
+
+	old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector);
+	new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector);
+
+	return old_conn_state->crtc != new_conn_state->crtc ||
+	       (new_conn_state->crtc &&
+		drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base,
+									    new_conn_state->crtc)));
+}
+
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+					 struct intel_connector *connector)
+{
+	struct drm_connector_state *conn_state;
+
+	conn_state = drm_atomic_get_connector_state(&state->base,
+						    &connector->base);
+	if (IS_ERR(conn_state))
+		return ERR_CAST(conn_state);
+
+	return to_intel_digital_connector_state(conn_state);
+}
+
+/**
  * intel_crtc_duplicate_state - duplicate crtc state
  * @crtc: drm crtc
  *
@@ -186,13 +220,22 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
 struct drm_crtc_state *
 intel_crtc_duplicate_state(struct drm_crtc *crtc)
 {
+	const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state);
 	struct intel_crtc_state *crtc_state;
 
-	crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
+	crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL);
 	if (!crtc_state)
 		return NULL;
 
-	__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi);
+
+	/* copy color blobs */
+	if (crtc_state->hw.degamma_lut)
+		drm_property_blob_get(crtc_state->hw.degamma_lut);
+	if (crtc_state->hw.ctm)
+		drm_property_blob_get(crtc_state->hw.ctm);
+	if (crtc_state->hw.gamma_lut)
+		drm_property_blob_get(crtc_state->hw.gamma_lut);
 
 	crtc_state->update_pipe = false;
 	crtc_state->disable_lp_wm = false;
@@ -205,7 +248,29 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
 	crtc_state->fb_bits = 0;
 	crtc_state->update_planes = 0;
 
-	return &crtc_state->base;
+	return &crtc_state->uapi;
+}
+
+static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state)
+{
+	drm_property_blob_put(crtc_state->hw.degamma_lut);
+	drm_property_blob_put(crtc_state->hw.gamma_lut);
+	drm_property_blob_put(crtc_state->hw.ctm);
+}
+
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
+{
+	intel_crtc_put_color_blobs(crtc_state);
+}
+
+void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state)
+{
+	drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+				  crtc_state->uapi.degamma_lut);
+	drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+				  crtc_state->uapi.gamma_lut);
+	drm_property_replace_blob(&crtc_state->hw.ctm,
+				  crtc_state->uapi.ctm);
 }
 
 /**
@@ -220,7 +285,11 @@ void
 intel_crtc_destroy_state(struct drm_crtc *crtc,
 			 struct drm_crtc_state *state)
 {
-	drm_atomic_helper_crtc_destroy_state(crtc, state);
+	struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
+
+	__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+	intel_crtc_free_hw_state(crtc_state);
+	kfree(crtc_state);
 }
 
 static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
@@ -249,10 +318,10 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
 		return;
 
 	/* set scaler mode */
-	if (plane_state && plane_state->base.fb &&
-	    plane_state->base.fb->format->is_yuv &&
-	    plane_state->base.fb->format->num_planes > 1) {
-		struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	if (plane_state && plane_state->hw.fb &&
+	    plane_state->hw.fb->format->is_yuv &&
+	    plane_state->hw.fb->format->num_planes > 1) {
+		struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 		if (IS_GEN(dev_priv, 9) &&
 		    !IS_GEMINILAKE(dev_priv)) {
 			mode = SKL_PS_SCALER_MODE_NV12;
@@ -319,7 +388,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 	struct intel_plane_state *plane_state = NULL;
 	struct intel_crtc_scaler_state *scaler_state =
 		&crtc_state->scaler_state;
-	struct drm_atomic_state *drm_state = crtc_state->base.state;
+	struct drm_atomic_state *drm_state = crtc_state->uapi.state;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
 	int num_scalers_need;
 	int i;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 49d5cb1..74c749d 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -17,6 +17,7 @@ struct drm_device;
 struct drm_i915_private;
 struct drm_property;
 struct intel_atomic_state;
+struct intel_connector;
 struct intel_crtc;
 struct intel_crtc_state;
 
@@ -32,10 +33,17 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
 					 struct drm_atomic_state *state);
 struct drm_connector_state *
 intel_digital_connector_duplicate_state(struct drm_connector *connector);
+bool intel_connector_needs_modeset(struct intel_atomic_state *state,
+				   struct drm_connector *connector);
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+					 struct intel_connector *connector);
 
 struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
 void intel_crtc_destroy_state(struct drm_crtc *crtc,
 			       struct drm_crtc_state *state);
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
+void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
 struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
 void intel_atomic_state_clear(struct drm_atomic_state *state);
 
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 98f557a..3e97af6 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -41,6 +41,16 @@
 #include "intel_pm.h"
 #include "intel_sprite.h"
 
+static void intel_plane_state_reset(struct intel_plane_state *plane_state,
+				    struct intel_plane *plane)
+{
+	memset(plane_state, 0, sizeof(*plane_state));
+
+	__drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
+
+	plane_state->scaler_id = -1;
+}
+
 struct intel_plane *intel_plane_alloc(void)
 {
 	struct intel_plane_state *plane_state;
@@ -56,8 +66,9 @@ struct intel_plane *intel_plane_alloc(void)
 		return ERR_PTR(-ENOMEM);
 	}
 
-	__drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
-	plane_state->scaler_id = -1;
+	intel_plane_state_reset(plane_state, plane);
+
+	plane->base.state = &plane_state->uapi;
 
 	return plane;
 }
@@ -80,22 +91,24 @@ void intel_plane_free(struct intel_plane *plane)
 struct drm_plane_state *
 intel_plane_duplicate_state(struct drm_plane *plane)
 {
-	struct drm_plane_state *state;
 	struct intel_plane_state *intel_state;
 
-	intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
+	intel_state = to_intel_plane_state(plane->state);
+	intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
 
 	if (!intel_state)
 		return NULL;
 
-	state = &intel_state->base;
-
-	__drm_atomic_helper_plane_duplicate_state(plane, state);
+	__drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
 
 	intel_state->vma = NULL;
 	intel_state->flags = 0;
 
-	return state;
+	/* add reference to fb */
+	if (intel_state->hw.fb)
+		drm_framebuffer_get(intel_state->hw.fb);
+
+	return &intel_state->uapi;
 }
 
 /**
@@ -110,18 +123,22 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
 			  struct drm_plane_state *state)
 {
-	WARN_ON(to_intel_plane_state(state)->vma);
+	struct intel_plane_state *plane_state = to_intel_plane_state(state);
+	WARN_ON(plane_state->vma);
 
-	drm_atomic_helper_plane_destroy_state(plane, state);
+	__drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
+	if (plane_state->hw.fb)
+		drm_framebuffer_put(plane_state->hw.fb);
+	kfree(plane_state);
 }
 
 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
 				   const struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int cpp;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
 	cpp = fb->format->cpp[0];
@@ -144,10 +161,10 @@ bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct intel_plane_state *plane_state =
 		intel_atomic_get_new_plane_state(state, plane);
-	struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
 	struct intel_crtc_state *crtc_state;
 
-	if (!plane_state->base.visible || !plane->min_cdclk)
+	if (!plane_state->uapi.visible || !plane->min_cdclk)
 		return false;
 
 	crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -176,23 +193,52 @@ bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
 	return false;
 }
 
+static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
+{
+	if (plane_state->hw.fb)
+		drm_framebuffer_put(plane_state->hw.fb);
+
+	memset(&plane_state->hw, 0, sizeof(plane_state->hw));
+}
+
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+				       const struct intel_plane_state *from_plane_state)
+{
+	intel_plane_clear_hw_state(plane_state);
+
+	plane_state->hw.crtc = from_plane_state->uapi.crtc;
+	plane_state->hw.fb = from_plane_state->uapi.fb;
+	if (plane_state->hw.fb)
+		drm_framebuffer_get(plane_state->hw.fb);
+
+	plane_state->hw.alpha = from_plane_state->uapi.alpha;
+	plane_state->hw.pixel_blend_mode =
+		from_plane_state->uapi.pixel_blend_mode;
+	plane_state->hw.rotation = from_plane_state->uapi.rotation;
+	plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
+	plane_state->hw.color_range = from_plane_state->uapi.color_range;
+}
+
 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
 					struct intel_crtc_state *new_crtc_state,
 					const struct intel_plane_state *old_plane_state,
 					struct intel_plane_state *new_plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
-	const struct drm_framebuffer *fb = new_plane_state->base.fb;
+	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+	const struct drm_framebuffer *fb;
 	int ret;
 
+	intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
+	fb = new_plane_state->hw.fb;
+
 	new_crtc_state->active_planes &= ~BIT(plane->id);
 	new_crtc_state->nv12_planes &= ~BIT(plane->id);
 	new_crtc_state->c8_planes &= ~BIT(plane->id);
 	new_crtc_state->data_rate[plane->id] = 0;
 	new_crtc_state->min_cdclk[plane->id] = 0;
-	new_plane_state->base.visible = false;
+	new_plane_state->uapi.visible = false;
 
-	if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
+	if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
 		return 0;
 
 	ret = plane->check_plane(new_crtc_state, new_plane_state);
@@ -200,18 +246,18 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 		return ret;
 
 	/* FIXME pre-g4x don't work like this */
-	if (new_plane_state->base.visible)
+	if (new_plane_state->uapi.visible)
 		new_crtc_state->active_planes |= BIT(plane->id);
 
-	if (new_plane_state->base.visible &&
-	    drm_format_info_is_yuv_semiplanar(fb->format))
+	if (new_plane_state->uapi.visible &&
+	    intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
 		new_crtc_state->nv12_planes |= BIT(plane->id);
 
-	if (new_plane_state->base.visible &&
+	if (new_plane_state->uapi.visible &&
 	    fb->format->format == DRM_FORMAT_C8)
 		new_crtc_state->c8_planes |= BIT(plane->id);
 
-	if (new_plane_state->base.visible || old_plane_state->base.visible)
+	if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
 		new_crtc_state->update_planes |= BIT(plane->id);
 
 	new_crtc_state->data_rate[plane->id] =
@@ -225,11 +271,11 @@ static struct intel_crtc *
 get_crtc_from_states(const struct intel_plane_state *old_plane_state,
 		     const struct intel_plane_state *new_plane_state)
 {
-	if (new_plane_state->base.crtc)
-		return to_intel_crtc(new_plane_state->base.crtc);
+	if (new_plane_state->uapi.crtc)
+		return to_intel_crtc(new_plane_state->uapi.crtc);
 
-	if (old_plane_state->base.crtc)
-		return to_intel_crtc(old_plane_state->base.crtc);
+	if (old_plane_state->uapi.crtc)
+		return to_intel_crtc(old_plane_state->uapi.crtc);
 
 	return NULL;
 }
@@ -246,7 +292,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
 	const struct intel_crtc_state *old_crtc_state;
 	struct intel_crtc_state *new_crtc_state;
 
-	new_plane_state->base.visible = false;
+	new_plane_state->uapi.visible = false;
 	if (!crtc)
 		return 0;
 
@@ -307,26 +353,16 @@ void intel_update_plane(struct intel_plane *plane,
 			const struct intel_crtc_state *crtc_state,
 			const struct intel_plane_state *plane_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 
 	trace_intel_update_plane(&plane->base, crtc);
 	plane->update_plane(plane, crtc_state, plane_state);
 }
 
-void intel_update_slave(struct intel_plane *plane,
-			const struct intel_crtc_state *crtc_state,
-			const struct intel_plane_state *plane_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-	trace_intel_update_plane(&plane->base, crtc);
-	plane->update_slave(plane, crtc_state, plane_state);
-}
-
 void intel_disable_plane(struct intel_plane *plane,
 			 const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 
 	trace_intel_disable_plane(&plane->base, crtc);
 	plane->disable_plane(plane, crtc_state);
@@ -355,25 +391,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
 		struct intel_plane_state *new_plane_state =
 			intel_atomic_get_new_plane_state(state, plane);
 
-		if (new_plane_state->base.visible) {
+		if (new_plane_state->uapi.visible ||
+		    new_plane_state->planar_slave) {
 			intel_update_plane(plane, new_crtc_state, new_plane_state);
-		} else if (new_plane_state->planar_slave) {
-			struct intel_plane *master =
-				new_plane_state->planar_linked_plane;
-
-			/*
-			 * We update the slave plane from this function because
-			 * programming it from the master plane's update_plane
-			 * callback runs into issues when the Y plane is
-			 * reassigned, disabled or used by a different plane.
-			 *
-			 * The slave plane is updated with the master plane's
-			 * plane_state.
-			 */
-			new_plane_state =
-				intel_atomic_get_new_plane_state(state, master);
-
-			intel_update_slave(plane, new_crtc_state, new_plane_state);
 		} else {
 			intel_disable_plane(plane, new_crtc_state);
 		}
@@ -395,7 +415,7 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
 		    !(update_mask & BIT(plane->id)))
 			continue;
 
-		if (new_plane_state->base.visible)
+		if (new_plane_state->uapi.visible)
 			intel_update_plane(plane, new_crtc_state, new_plane_state);
 		else
 			intel_disable_plane(plane, new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index e61e9a8..5cedafd 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -20,12 +20,11 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
 
 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
 				   const struct intel_plane_state *plane_state);
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+				       const struct intel_plane_state *from_plane_state);
 void intel_update_plane(struct intel_plane *plane,
 			const struct intel_crtc_state *crtc_state,
 			const struct intel_plane_state *plane_state);
-void intel_update_slave(struct intel_plane *plane,
-			const struct intel_crtc_state *crtc_state,
-			const struct intel_plane_state *plane_state);
 void intel_disable_plane(struct intel_plane *plane,
 			 const struct intel_crtc_state *crtc_state);
 struct intel_plane *intel_plane_alloc(void);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 3a5ac13..b180407 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -234,7 +234,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
 static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
 {
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
@@ -555,7 +555,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
 				    const struct drm_connector_state *old_conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	enum pipe pipe = crtc->pipe;
 	enum port port = encoder->port;
 	u32 tmp, eldv;
@@ -602,7 +602,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
 				   const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_connector *connector = conn_state->connector;
 	enum pipe pipe = crtc->pipe;
 	enum port port = encoder->port;
@@ -692,10 +692,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct i915_audio_component *acomp = dev_priv->audio_component;
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_connector *connector = conn_state->connector;
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	enum port port = encoder->port;
 	enum pipe pipe = crtc->pipe;
 
@@ -707,8 +707,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
 	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 			 connector->base.id,
 			 connector->name,
-			 connector->encoder->base.id,
-			 connector->encoder->name);
+			 encoder->base.base.id,
+			 encoder->base.name);
 
 	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
 
@@ -753,7 +753,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct i915_audio_component *acomp = dev_priv->audio_component;
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	enum port port = encoder->port;
 	enum pipe pipe = crtc->pipe;
 
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 63c1bd4..8beac06 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -29,6 +29,7 @@
 #include <drm/i915_drm.h>
 
 #include "display/intel_display.h"
+#include "display/intel_display_types.h"
 #include "display/intel_gmbus.h"
 
 #include "i915_drv.h"
@@ -58,6 +59,13 @@
  * that.
  */
 
+/* Wrapper for VBT child device config */
+struct display_device_data {
+	struct child_device_config child;
+	struct dsc_compression_parameters_entry *dsc;
+	struct list_head node;
+};
+
 #define	SLAVE_ADDR1	0x70
 #define	SLAVE_ADDR2	0x72
 
@@ -202,17 +210,12 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
 	return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
 }
 
-/* Try to find integrated panel data */
+/* Parse general panel options */
 static void
-parse_lfp_panel_data(struct drm_i915_private *dev_priv,
-		     const struct bdb_header *bdb)
+parse_panel_options(struct drm_i915_private *dev_priv,
+		    const struct bdb_header *bdb)
 {
 	const struct bdb_lvds_options *lvds_options;
-	const struct bdb_lvds_lfp_data *lvds_lfp_data;
-	const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
-	const struct lvds_dvo_timing *panel_dvo_timing;
-	const struct lvds_fp_timing *fp_timing;
-	struct drm_display_mode *panel_fixed_mode;
 	int panel_type;
 	int drrs_mode;
 	int ret;
@@ -261,6 +264,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 		DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
 		break;
 	}
+}
+
+/* Try to find integrated panel timing data */
+static void
+parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
+		    const struct bdb_header *bdb)
+{
+	const struct bdb_lvds_lfp_data *lvds_lfp_data;
+	const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+	const struct lvds_dvo_timing *panel_dvo_timing;
+	const struct lvds_fp_timing *fp_timing;
+	struct drm_display_mode *panel_fixed_mode;
+	int panel_type = dev_priv->vbt.panel_type;
 
 	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
 	if (!lvds_lfp_data)
@@ -282,7 +298,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 
 	dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
 
-	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+	DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 
 	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -300,6 +316,98 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 }
 
 static void
+parse_generic_dtd(struct drm_i915_private *dev_priv,
+		  const struct bdb_header *bdb)
+{
+	const struct bdb_generic_dtd *generic_dtd;
+	const struct generic_dtd_entry *dtd;
+	struct drm_display_mode *panel_fixed_mode;
+	int num_dtd;
+
+	generic_dtd = find_section(bdb, BDB_GENERIC_DTD);
+	if (!generic_dtd)
+		return;
+
+	if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
+		DRM_ERROR("GDTD size %u is too small.\n",
+			  generic_dtd->gdtd_size);
+		return;
+	} else if (generic_dtd->gdtd_size !=
+		   sizeof(struct generic_dtd_entry)) {
+		DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size);
+		/* DTD has unknown fields, but keep going */
+	}
+
+	num_dtd = (get_blocksize(generic_dtd) -
+		   sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
+	if (dev_priv->vbt.panel_type >= num_dtd) {
+		DRM_ERROR("Panel type %d not found in table of %d DTD's\n",
+			  dev_priv->vbt.panel_type, num_dtd);
+		return;
+	}
+
+	dtd = &generic_dtd->dtd[dev_priv->vbt.panel_type];
+
+	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+	if (!panel_fixed_mode)
+		return;
+
+	panel_fixed_mode->hdisplay = dtd->hactive;
+	panel_fixed_mode->hsync_start =
+		panel_fixed_mode->hdisplay + dtd->hfront_porch;
+	panel_fixed_mode->hsync_end =
+		panel_fixed_mode->hsync_start + dtd->hsync;
+	panel_fixed_mode->htotal = panel_fixed_mode->hsync_end;
+
+	panel_fixed_mode->vdisplay = dtd->vactive;
+	panel_fixed_mode->vsync_start =
+		panel_fixed_mode->vdisplay + dtd->vfront_porch;
+	panel_fixed_mode->vsync_end =
+		panel_fixed_mode->vsync_start + dtd->vsync;
+	panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end;
+
+	panel_fixed_mode->clock = dtd->pixel_clock;
+	panel_fixed_mode->width_mm = dtd->width_mm;
+	panel_fixed_mode->height_mm = dtd->height_mm;
+
+	panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+	drm_mode_set_name(panel_fixed_mode);
+
+	if (dtd->hsync_positive_polarity)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (dtd->vsync_positive_polarity)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+	DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n");
+	drm_mode_debug_printmodeline(panel_fixed_mode);
+
+	dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+}
+
+static void
+parse_panel_dtd(struct drm_i915_private *dev_priv,
+		const struct bdb_header *bdb)
+{
+	/*
+	 * Older VBTs provided provided DTD information for internal displays
+	 * through the "LFP panel DTD" block (42).  As of VBT revision 229,
+	 * that block is now deprecated and DTD information should be provided
+	 * via a newer "generic DTD" block (58).  Just to be safe, we'll
+	 * try the new generic DTD block first on VBT >= 229, but still fall
+	 * back to trying the old LFP block if that fails.
+	 */
+	if (bdb->version >= 229)
+		parse_generic_dtd(dev_priv, bdb);
+	if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+		parse_lfp_panel_dtd(dev_priv, bdb);
+}
+
+static void
 parse_lfp_backlight(struct drm_i915_private *dev_priv,
 		    const struct bdb_header *bdb)
 {
@@ -449,8 +557,9 @@ static void
 parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
 {
 	struct sdvo_device_mapping *mapping;
+	const struct display_device_data *devdata;
 	const struct child_device_config *child;
-	int i, count = 0;
+	int count = 0;
 
 	/*
 	 * Only parse SDVO mappings on gens that could have SDVO. This isn't
@@ -461,8 +570,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
 		return;
 	}
 
-	for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+		child = &devdata->child;
 
 		if (child->slave_addr != SLAVE_ADDR1 &&
 		    child->slave_addr != SLAVE_ADDR2) {
@@ -552,16 +661,45 @@ parse_driver_features(struct drm_i915_private *dev_priv,
 			dev_priv->vbt.int_lvds_support = 0;
 	}
 
-	DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+	if (bdb->version < 228) {
+		DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+		/*
+		 * If DRRS is not supported, drrs_type has to be set to 0.
+		 * This is because, VBT is configured in such a way that
+		 * static DRRS is 0 and DRRS not supported is represented by
+		 * driver->drrs_enabled=false
+		 */
+		if (!driver->drrs_enabled)
+			dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+
+		dev_priv->vbt.psr.enable = driver->psr_enabled;
+	}
+}
+
+static void
+parse_power_conservation_features(struct drm_i915_private *dev_priv,
+				  const struct bdb_header *bdb)
+{
+	const struct bdb_lfp_power *power;
+	u8 panel_type = dev_priv->vbt.panel_type;
+
+	if (bdb->version < 228)
+		return;
+
+	power = find_section(bdb, BDB_LVDS_POWER);
+	if (!power)
+		return;
+
+	dev_priv->vbt.psr.enable = power->psr & BIT(panel_type);
+
 	/*
 	 * If DRRS is not supported, drrs_type has to be set to 0.
 	 * This is because, VBT is configured in such a way that
 	 * static DRRS is 0 and DRRS not supported is represented by
-	 * driver->drrs_enabled=false
+	 * power->drrs & BIT(panel_type)=false
 	 */
-	if (!driver->drrs_enabled)
+	if (!(power->drrs & BIT(panel_type)))
 		dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
-	dev_priv->vbt.psr.enable = driver->psr_enabled;
 }
 
 static void
@@ -1230,6 +1368,57 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
 	memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
 }
 
+static void
+parse_compression_parameters(struct drm_i915_private *i915,
+			     const struct bdb_header *bdb)
+{
+	const struct bdb_compression_parameters *params;
+	struct display_device_data *devdata;
+	const struct child_device_config *child;
+	u16 block_size;
+	int index;
+
+	if (bdb->version < 198)
+		return;
+
+	params = find_section(bdb, BDB_COMPRESSION_PARAMETERS);
+	if (params) {
+		/* Sanity checks */
+		if (params->entry_size != sizeof(params->data[0])) {
+			DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n");
+			return;
+		}
+
+		block_size = get_blocksize(params);
+		if (block_size < sizeof(*params)) {
+			DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n");
+			return;
+		}
+	}
+
+	list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+		child = &devdata->child;
+
+		if (!child->compression_enable)
+			continue;
+
+		if (!params) {
+			DRM_DEBUG_KMS("VBT: compression params not available\n");
+			continue;
+		}
+
+		if (child->compression_method_cps) {
+			DRM_DEBUG_KMS("VBT: CPS compression not supported\n");
+			continue;
+		}
+
+		index = child->compression_structure_index;
+
+		devdata->dsc = kmemdup(&params->data[index],
+				       sizeof(*devdata->dsc), GFP_KERNEL);
+	}
+}
+
 static u8 translate_iboost(u8 val)
 {
 	static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
@@ -1246,7 +1435,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
 	const struct ddi_vbt_port_info *info;
 	enum port port;
 
-	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+	for_each_port(port) {
 		info = &i915->vbt.ddi_port_info[port];
 
 		if (info->child && ddc_pin == info->alternate_ddc_pin)
@@ -1297,7 +1486,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
 	const struct ddi_vbt_port_info *info;
 	enum port port;
 
-	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+	for_each_port(port) {
 		info = &i915->vbt.ddi_port_info[port];
 
 		if (info->child && aux_ch == info->alternate_aux_channel)
@@ -1418,9 +1607,10 @@ static enum port dvo_port_to_port(u8 dvo_port)
 }
 
 static void parse_ddi_port(struct drm_i915_private *dev_priv,
-			   const struct child_device_config *child,
+			   struct display_device_data *devdata,
 			   u8 bdb_version)
 {
+	const struct child_device_config *child = &devdata->child;
 	struct ddi_vbt_port_info *info;
 	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
 	enum port port;
@@ -1443,7 +1633,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 	is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
 	is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
 
-	if (port == PORT_A && is_dvi) {
+	if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
 		DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
 			      is_hdmi ? "/HDMI" : "");
 		is_dvi = false;
@@ -1461,26 +1651,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 	if (bdb_version >= 209)
 		info->supports_tbt = child->tbt;
 
-	DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
+	DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
 		      port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
 		      HAS_LSPCON(dev_priv) && child->lspcon,
-		      info->supports_typec_usb, info->supports_tbt);
-
-	if (is_edp && is_dvi)
-		DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
-			      port_name(port));
-	if (is_crt && port != PORT_E)
-		DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
-	if (is_crt && (is_dvi || is_dp))
-		DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
-			      port_name(port));
-	if (is_dvi && (port == PORT_A || port == PORT_E))
-		DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
-	if (!is_dvi && !is_dp && !is_crt)
-		DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
-			      port_name(port));
-	if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
-		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+		      info->supports_typec_usb, info->supports_tbt,
+		      devdata->dsc != NULL);
 
 	if (is_dvi) {
 		u8 ddc_pin;
@@ -1509,6 +1684,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 			      port_name(port),
 			      hdmi_level_shift);
 		info->hdmi_level_shift = hdmi_level_shift;
+		info->hdmi_level_shift_set = true;
 	}
 
 	if (bdb_version >= 204) {
@@ -1571,8 +1747,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 
 static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
 {
-	const struct child_device_config *child;
-	int i;
+	struct display_device_data *devdata;
 
 	if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 		return;
@@ -1580,11 +1755,8 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
 	if (bdb_version < 155)
 		return;
 
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
-
-		parse_ddi_port(dev_priv, child, bdb_version);
-	}
+	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node)
+		parse_ddi_port(dev_priv, devdata, bdb_version);
 }
 
 static void
@@ -1592,8 +1764,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 			  const struct bdb_header *bdb)
 {
 	const struct bdb_general_definitions *defs;
+	struct display_device_data *devdata;
 	const struct child_device_config *child;
-	int i, child_device_num, count;
+	int i, child_device_num;
 	u8 expected_size;
 	u16 block_size;
 	int bus_pin;
@@ -1649,26 +1822,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 
 	/* get the number of child device */
 	child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
-	count = 0;
-	/* get the number of child device that is present */
-	for (i = 0; i < child_device_num; i++) {
-		child = child_device_ptr(defs, i);
-		if (!child->device_type)
-			continue;
-		count++;
-	}
-	if (!count) {
-		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
-		return;
-	}
-	dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
-	if (!dev_priv->vbt.child_dev) {
-		DRM_DEBUG_KMS("No memory space for child device\n");
-		return;
-	}
 
-	dev_priv->vbt.child_dev_num = count;
-	count = 0;
 	for (i = 0; i < child_device_num; i++) {
 		child = child_device_ptr(defs, i);
 		if (!child->device_type)
@@ -1677,23 +1831,29 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 		DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
 			      child->device_type);
 
+		devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
+		if (!devdata)
+			break;
+
 		/*
 		 * Copy as much as we know (sizeof) and is available
-		 * (child_dev_size) of the child device. Accessing the data must
-		 * depend on VBT version.
+		 * (child_dev_size) of the child device config. Accessing the
+		 * data must depend on VBT version.
 		 */
-		memcpy(dev_priv->vbt.child_dev + count, child,
+		memcpy(&devdata->child, child,
 		       min_t(size_t, defs->child_dev_size, sizeof(*child)));
-		count++;
+
+		list_add_tail(&devdata->node, &dev_priv->vbt.display_devices);
 	}
+
+	if (list_empty(&dev_priv->vbt.display_devices))
+		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
 }
 
 /* Common defaults which may be overridden by VBT. */
 static void
 init_vbt_defaults(struct drm_i915_private *dev_priv)
 {
-	enum port port;
-
 	dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
 
 	/* Default to having backlight */
@@ -1721,13 +1881,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 	dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
 			!HAS_PCH_SPLIT(dev_priv));
 	DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
-
-	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
-		struct ddi_vbt_port_info *info =
-			&dev_priv->vbt.ddi_port_info[port];
-
-		info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
-	}
 }
 
 /* Defaults to initialize only if there is no VBT. */
@@ -1736,7 +1889,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
 {
 	enum port port;
 
-	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+	for_each_port(port) {
 		struct ddi_vbt_port_info *info =
 			&dev_priv->vbt.ddi_port_info[port];
 		enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -1787,6 +1940,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
 		return false;
 	}
 
+	if (vbt->vbt_size > size) {
+		DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+		return false;
+	}
+
+	size = vbt->vbt_size;
+
 	if (range_overflows_t(size_t,
 			      vbt->bdb_offset,
 			      sizeof(struct bdb_header),
@@ -1804,28 +1964,61 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
 	return vbt;
 }
 
-static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
 {
-	size_t i;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	void __iomem *p = NULL, *oprom;
+	struct vbt_header *vbt;
+	u16 vbt_size;
+	size_t i, size;
+
+	oprom = pci_map_rom(pdev, &size);
+	if (!oprom)
+		return NULL;
 
 	/* Scour memory looking for the VBT signature. */
-	for (i = 0; i + 4 < size; i++) {
-		void *vbt;
-
-		if (ioread32(bios + i) != *((const u32 *) "$VBT"))
+	for (i = 0; i + 4 < size; i += 4) {
+		if (ioread32(oprom + i) != *((const u32 *)"$VBT"))
 			continue;
 
-		/*
-		 * This is the one place where we explicitly discard the address
-		 * space (__iomem) of the BIOS/VBT.
-		 */
-		vbt = (void __force *) bios + i;
-		if (intel_bios_is_valid_vbt(vbt, size - i))
-			return vbt;
-
+		p = oprom + i;
+		size -= i;
 		break;
 	}
 
+	if (!p)
+		goto err_unmap_oprom;
+
+	if (sizeof(struct vbt_header) > size) {
+		DRM_DEBUG_DRIVER("VBT header incomplete\n");
+		goto err_unmap_oprom;
+	}
+
+	vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
+	if (vbt_size > size) {
+		DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+		goto err_unmap_oprom;
+	}
+
+	/* The rest will be validated by intel_bios_is_valid_vbt() */
+	vbt = kmalloc(vbt_size, GFP_KERNEL);
+	if (!vbt)
+		goto err_unmap_oprom;
+
+	memcpy_fromio(vbt, p, vbt_size);
+
+	if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+		goto err_free_vbt;
+
+	pci_unmap_rom(pdev, oprom);
+
+	return vbt;
+
+err_free_vbt:
+	kfree(vbt);
+err_unmap_oprom:
+	pci_unmap_rom(pdev, oprom);
+
 	return NULL;
 }
 
@@ -1839,10 +2032,11 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
  */
 void intel_bios_init(struct drm_i915_private *dev_priv)
 {
-	struct pci_dev *pdev = dev_priv->drm.pdev;
 	const struct vbt_header *vbt = dev_priv->opregion.vbt;
+	struct vbt_header *oprom_vbt = NULL;
 	const struct bdb_header *bdb;
-	u8 __iomem *bios = NULL;
+
+	INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
 
 	if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
 		DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
@@ -1853,15 +2047,11 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 
 	/* If the OpRegion does not have VBT, look in PCI ROM. */
 	if (!vbt) {
-		size_t size;
-
-		bios = pci_map_rom(pdev, &size);
-		if (!bios)
+		oprom_vbt = oprom_get_vbt(dev_priv);
+		if (!oprom_vbt)
 			goto out;
 
-		vbt = find_vbt(bios, size);
-		if (!vbt)
-			goto out;
+		vbt = oprom_vbt;
 
 		DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
 	}
@@ -1874,15 +2064,20 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 	/* Grab useful general definitions */
 	parse_general_features(dev_priv, bdb);
 	parse_general_definitions(dev_priv, bdb);
-	parse_lfp_panel_data(dev_priv, bdb);
+	parse_panel_options(dev_priv, bdb);
+	parse_panel_dtd(dev_priv, bdb);
 	parse_lfp_backlight(dev_priv, bdb);
 	parse_sdvo_panel_data(dev_priv, bdb);
 	parse_driver_features(dev_priv, bdb);
+	parse_power_conservation_features(dev_priv, bdb);
 	parse_edp(dev_priv, bdb);
 	parse_psr(dev_priv, bdb);
 	parse_mipi_config(dev_priv, bdb);
 	parse_mipi_sequence(dev_priv, bdb);
 
+	/* Depends on child device list */
+	parse_compression_parameters(dev_priv, bdb);
+
 	/* Further processing on pre-parsed data */
 	parse_sdvo_device_mapping(dev_priv, bdb->version);
 	parse_ddi_ports(dev_priv, bdb->version);
@@ -1893,8 +2088,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 		init_vbt_missing_defaults(dev_priv);
 	}
 
-	if (bios)
-		pci_unmap_rom(pdev, bios);
+	kfree(oprom_vbt);
 }
 
 /**
@@ -1903,9 +2097,14 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
  */
 void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
 {
-	kfree(dev_priv->vbt.child_dev);
-	dev_priv->vbt.child_dev = NULL;
-	dev_priv->vbt.child_dev_num = 0;
+	struct display_device_data *devdata, *n;
+
+	list_for_each_entry_safe(devdata, n, &dev_priv->vbt.display_devices, node) {
+		list_del(&devdata->node);
+		kfree(devdata->dsc);
+		kfree(devdata);
+	}
+
 	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
 	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
 	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
@@ -1929,17 +2128,18 @@ void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
  */
 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
 {
+	const struct display_device_data *devdata;
 	const struct child_device_config *child;
-	int i;
 
 	if (!dev_priv->vbt.int_tv_support)
 		return false;
 
-	if (!dev_priv->vbt.child_dev_num)
+	if (list_empty(&dev_priv->vbt.display_devices))
 		return true;
 
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+		child = &devdata->child;
+
 		/*
 		 * If the device type is not TV, continue.
 		 */
@@ -1971,14 +2171,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
  */
 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
 {
+	const struct display_device_data *devdata;
 	const struct child_device_config *child;
-	int i;
 
-	if (!dev_priv->vbt.child_dev_num)
+	if (list_empty(&dev_priv->vbt.display_devices))
 		return true;
 
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+		child = &devdata->child;
 
 		/* If the device type is not LFP, continue.
 		 * We have to check both the new identifiers as well as the
@@ -2020,6 +2220,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
  */
 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
 {
+	const struct display_device_data *devdata;
 	const struct child_device_config *child;
 	static const struct {
 		u16 dp, hdmi;
@@ -2030,7 +2231,6 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
 		[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
 		[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
 	};
-	int i;
 
 	if (HAS_DDI(dev_priv)) {
 		const struct ddi_vbt_port_info *port_info =
@@ -2045,11 +2245,8 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
 	if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
 		return false;
 
-	if (!dev_priv->vbt.child_dev_num)
-		return false;
-
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+		child = &devdata->child;
 
 		if ((child->dvo_port == port_mapping[port].dp ||
 		     child->dvo_port == port_mapping[port].hdmi) &&
@@ -2070,6 +2267,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
  */
 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
 {
+	const struct display_device_data *devdata;
 	const struct child_device_config *child;
 	static const short port_mapping[] = {
 		[PORT_B] = DVO_PORT_DPB,
@@ -2078,16 +2276,12 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
 		[PORT_E] = DVO_PORT_DPE,
 		[PORT_F] = DVO_PORT_DPF,
 	};
-	int i;
 
 	if (HAS_DDI(dev_priv))
 		return dev_priv->vbt.ddi_port_info[port].supports_edp;
 
-	if (!dev_priv->vbt.child_dev_num)
-		return false;
-
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+		child = &devdata->child;
 
 		if (child->dvo_port == port_mapping[port] &&
 		    (child->device_type & DEVICE_TYPE_eDP_BITS) ==
@@ -2136,13 +2330,10 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
 				     enum port port)
 {
-	const struct child_device_config *child;
-	int i;
+	const struct display_device_data *devdata;
 
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
-
-		if (child_dev_is_dp_dual_mode(child, port))
+	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+		if (child_dev_is_dp_dual_mode(&devdata->child, port))
 			return true;
 	}
 
@@ -2159,12 +2350,12 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
 			       enum port *port)
 {
+	const struct display_device_data *devdata;
 	const struct child_device_config *child;
 	u8 dvo_port;
-	int i;
 
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		child = dev_priv->vbt.child_dev + i;
+	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+		child = &devdata->child;
 
 		if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
 			continue;
@@ -2188,6 +2379,104 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
 	return false;
 }
 
+static void fill_dsc(struct intel_crtc_state *crtc_state,
+		     struct dsc_compression_parameters_entry *dsc,
+		     int dsc_max_bpc)
+{
+	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+	int bpc = 8;
+
+	vdsc_cfg->dsc_version_major = dsc->version_major;
+	vdsc_cfg->dsc_version_minor = dsc->version_minor;
+
+	if (dsc->support_12bpc && dsc_max_bpc >= 12)
+		bpc = 12;
+	else if (dsc->support_10bpc && dsc_max_bpc >= 10)
+		bpc = 10;
+	else if (dsc->support_8bpc && dsc_max_bpc >= 8)
+		bpc = 8;
+	else
+		DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n",
+			      dsc_max_bpc);
+
+	crtc_state->pipe_bpp = bpc * 3;
+
+	crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
+					     VBT_DSC_MAX_BPP(dsc->max_bpp));
+
+	/*
+	 * FIXME: This is ugly, and slice count should take DSC engine
+	 * throughput etc. into account.
+	 *
+	 * Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices.
+	 */
+	if (dsc->slices_per_line & BIT(2)) {
+		crtc_state->dsc.slice_count = 4;
+	} else if (dsc->slices_per_line & BIT(1)) {
+		crtc_state->dsc.slice_count = 2;
+	} else {
+		/* FIXME */
+		if (!(dsc->slices_per_line & BIT(0)))
+			DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n");
+
+		crtc_state->dsc.slice_count = 1;
+	}
+
+	if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
+	    crtc_state->dsc.slice_count != 0)
+		DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n",
+			      crtc_state->hw.adjusted_mode.crtc_hdisplay,
+			      crtc_state->dsc.slice_count);
+
+	/*
+	 * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
+	 * implementation specific physical rate buffer size. Currently we use
+	 * the required rate buffer model size calculated in
+	 * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
+	 *
+	 * The VBT rc_buffer_block_size and rc_buffer_size definitions
+	 * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
+	 * implementation should also use the DPCD (or perhaps VBT for eDP)
+	 * provided value for the buffer size.
+	 */
+
+	/* FIXME: DSI spec says bpc + 1 for this one */
+	vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
+
+	vdsc_cfg->block_pred_enable = dsc->block_prediction_enable;
+
+	vdsc_cfg->slice_height = dsc->slice_height;
+}
+
+/* FIXME: initially DSI specific */
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+			       struct intel_crtc_state *crtc_state,
+			       int dsc_max_bpc)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	const struct display_device_data *devdata;
+	const struct child_device_config *child;
+
+	list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+		child = &devdata->child;
+
+		if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
+			continue;
+
+		if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+			if (!devdata->dsc)
+				return false;
+
+			if (crtc_state)
+				fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
+
+			return true;
+		}
+	}
+
+	return false;
+}
+
 /**
  * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
  * @i915:	i915 device instance
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 98f0648..d6a0c29 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -35,6 +35,8 @@
 #include <drm/i915_drm.h>
 
 struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
 enum port;
 
 enum intel_backlight_type {
@@ -242,5 +244,8 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
 bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
 				  enum port port);
 enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+			       struct intel_crtc_state *crtc_state,
+			       int dsc_max_bpc);
 
 #endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 22e83f8..b228671 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -15,7 +15,7 @@ struct intel_qgv_point {
 };
 
 struct intel_qgv_info {
-	struct intel_qgv_point points[3];
+	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
 	u8 num_points;
 	u8 num_channels;
 	u8 t_bl;
@@ -264,6 +264,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
 
 void intel_bw_init_hw(struct drm_i915_private *dev_priv)
 {
+	if (!HAS_DISPLAY(dev_priv))
+		return;
+
 	if (IS_GEN(dev_priv, 12))
 		icl_get_bw_info(dev_priv, &tgl_sa_info);
 	else if (IS_GEN(dev_priv, 11))
@@ -273,17 +276,29 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
 static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
 					int num_planes)
 {
-	if (INTEL_GEN(dev_priv) >= 11)
+	if (INTEL_GEN(dev_priv) >= 11) {
+		/*
+		 * Any bw group has same amount of QGV points
+		 */
+		const struct intel_bw_info *bi =
+			&dev_priv->max_bw[0];
+		unsigned int min_bw = UINT_MAX;
+		int i;
+
 		/*
 		 * FIXME with SAGV disabled maybe we can assume
 		 * point 1 will always be used? Seems to match
 		 * the behaviour observed in the wild.
 		 */
-		return min3(icl_max_bw(dev_priv, num_planes, 0),
-			    icl_max_bw(dev_priv, num_planes, 1),
-			    icl_max_bw(dev_priv, num_planes, 2));
-	else
+		for (i = 0; i < bi->num_qgv_points; i++) {
+			unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
+
+			min_bw = min(bw, min_bw);
+		}
+		return min_bw;
+	} else {
 		return UINT_MAX;
+	}
 }
 
 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -297,7 +312,7 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
 
 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	unsigned int data_rate = 0;
 	enum plane_id plane_id;
 
@@ -318,7 +333,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
 void intel_bw_crtc_update(struct intel_bw_state *bw_state,
 			  const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 
 	bw_state->data_rate[crtc->pipe] =
 		intel_bw_crtc_data_rate(crtc_state);
@@ -471,3 +486,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
 
 	return 0;
 }
+
+void intel_bw_cleanup(struct drm_i915_private *dev_priv)
+{
+	drm_atomic_private_obj_fini(&dev_priv->bw_obj);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 9db10af..20b9ad2 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -25,6 +25,7 @@ struct intel_bw_state {
 
 void intel_bw_init_hw(struct drm_i915_private *dev_priv);
 int intel_bw_init(struct drm_i915_private *dev_priv);
+void intel_bw_cleanup(struct drm_i915_private *dev_priv);
 int intel_bw_atomic_check(struct intel_atomic_state *state);
 void intel_bw_crtc_update(struct intel_bw_state *bw_state,
 			  const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed8c7ce..0ce5926 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1904,7 +1904,7 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
 
 static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	int pixel_rate = crtc_state->pixel_rate;
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1922,7 +1922,7 @@ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
 
 static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_plane *plane;
 	int min_cdclk = 0;
@@ -1936,10 +1936,10 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
 int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(crtc_state->base.crtc->dev);
+		to_i915(crtc_state->uapi.crtc->dev);
 	int min_cdclk;
 
-	if (!crtc_state->base.enable)
+	if (!crtc_state->hw.enable)
 		return 0;
 
 	min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
@@ -2004,6 +2004,18 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
 	/* Account for additional needs from the planes */
 	min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
 
+	/*
+	 * HACK. Currently for TGL platforms we calculate
+	 * min_cdclk initially based on pixel_rate divided
+	 * by 2, accounting for also plane requirements,
+	 * however in some cases the lowest possible CDCLK
+	 * doesn't work and causing the underruns.
+	 * Explicitly stating here that this seems to be currently
+	 * rather a Hack, than final solution.
+	 */
+	if (IS_TIGERLAKE(dev_priv))
+		min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+
 	if (min_cdclk > dev_priv->max_cdclk_freq) {
 		DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
 			      min_cdclk, dev_priv->max_cdclk_freq);
@@ -2076,7 +2088,7 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 		int ret;
 
-		if (crtc_state->base.enable)
+		if (crtc_state->hw.enable)
 			min_voltage_level = crtc_state->min_voltage_level;
 		else
 			min_voltage_level = 0;
@@ -2170,7 +2182,7 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
 		vco = dev_priv->skl_preferred_vco_freq;
 
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!crtc_state->base.enable)
+		if (!crtc_state->hw.enable)
 			continue;
 
 		if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
@@ -2283,11 +2295,11 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
 		if (IS_ERR(crtc_state))
 			return PTR_ERR(crtc_state);
 
-		if (!crtc_state->base.active ||
-		    drm_atomic_crtc_needs_modeset(&crtc_state->base))
+		if (!crtc_state->hw.active ||
+		    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
 			continue;
 
-		crtc_state->base.mode_changed = true;
+		crtc_state->uapi.mode_changed = true;
 
 		ret = drm_atomic_add_affected_connectors(&state->base,
 							 &crtc->base);
@@ -2368,7 +2380,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
 		if (IS_ERR(crtc_state))
 			return PTR_ERR(crtc_state);
 
-		if (drm_atomic_crtc_needs_modeset(&crtc_state->base))
+		if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
 			pipe = INVALID_PIPE;
 	} else {
 		pipe = INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index aa3a063..3980e8b 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -117,10 +117,10 @@ static bool lut_is_legacy(const struct drm_property_blob *lut)
 
 static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
 {
-	return !crtc_state->base.degamma_lut &&
-		!crtc_state->base.ctm &&
-		crtc_state->base.gamma_lut &&
-		lut_is_legacy(crtc_state->base.gamma_lut);
+	return !crtc_state->hw.degamma_lut &&
+		!crtc_state->hw.ctm &&
+		crtc_state->hw.gamma_lut &&
+		lut_is_legacy(crtc_state->hw.gamma_lut);
 }
 
 /*
@@ -205,7 +205,7 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
 
 static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	/*
 	 * FIXME if there's a gamma LUT after the CSC, we should
@@ -219,7 +219,7 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
 static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
 				u16 coeffs[9])
 {
-	const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+	const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
 	const u64 *input;
 	u64 temp[9];
 	int i;
@@ -270,11 +270,11 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
 
 static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	bool limited_color_range = ilk_csc_limited_range(crtc_state);
 
-	if (crtc_state->base.ctm) {
+	if (crtc_state->hw.ctm) {
 		u16 coeff[9];
 
 		ilk_csc_convert_ctm(crtc_state, coeff);
@@ -309,10 +309,10 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
 
 static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	if (crtc_state->base.ctm) {
+	if (crtc_state->hw.ctm) {
 		u16 coeff[9];
 
 		ilk_csc_convert_ctm(crtc_state, coeff);
@@ -338,12 +338,12 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
  */
 static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	if (crtc_state->base.ctm) {
-		const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+	if (crtc_state->hw.ctm) {
+		const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
 		u16 coeffs[9] = {};
 		int i;
 
@@ -404,7 +404,7 @@ static u32 ilk_lut_10(const struct drm_color_lut *color)
 static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
 				    const struct drm_property_blob *blob)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	int i;
@@ -435,12 +435,12 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
 
 static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
+	i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
 }
 
 static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	u32 val;
@@ -453,7 +453,7 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
 
 static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	u32 val;
@@ -468,7 +468,7 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
 
 static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
@@ -478,7 +478,7 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
 
 static void skl_color_commit(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	u32 val = 0;
@@ -524,8 +524,8 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
 
 static void i965_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
 		i9xx_load_luts(crtc_state);
@@ -547,8 +547,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
 
 static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
 		i9xx_load_luts(crtc_state);
@@ -654,9 +654,9 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
 
 static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-	const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+	const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
 		i9xx_load_luts(crtc_state);
@@ -677,9 +677,9 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
 
 static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-	const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+	const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
 		i9xx_load_luts(crtc_state);
@@ -700,11 +700,11 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
 
 static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
-	const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
+	const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
 	u32 i;
 
 	/*
@@ -739,7 +739,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 
 static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
@@ -766,8 +766,8 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
 
 static void glk_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 
 	/*
 	 * On GLK+ both pipe CSC and degamma LUT are controlled
@@ -777,7 +777,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
 	 * the degama LUT so that we don't have to reload
 	 * it every time the pipe CSC is being enabled.
 	 */
-	if (crtc_state->base.degamma_lut)
+	if (crtc_state->hw.degamma_lut)
 		glk_load_degamma_lut(crtc_state);
 	else
 		glk_load_degamma_lut_linear(crtc_state);
@@ -808,7 +808,7 @@ static void
 icl_load_gcmax(const struct intel_crtc_state *crtc_state,
 	       const struct drm_color_lut *color)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_dsb *dsb = intel_dsb_get(crtc);
 	enum pipe pipe = crtc->pipe;
 
@@ -822,8 +822,8 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
 static void
 icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
 	const struct drm_color_lut *lut = blob->data;
 	struct intel_dsb *dsb = intel_dsb_get(crtc);
 	enum pipe pipe = crtc->pipe;
@@ -854,8 +854,8 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
 static void
 icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
 	const struct drm_color_lut *lut = blob->data;
 	const struct drm_color_lut *entry;
 	struct intel_dsb *dsb = intel_dsb_get(crtc);
@@ -910,11 +910,11 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
 
 static void icl_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_dsb *dsb = intel_dsb_get(crtc);
 
-	if (crtc_state->base.degamma_lut)
+	if (crtc_state->hw.degamma_lut)
 		glk_load_degamma_lut(crtc_state);
 
 	switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
@@ -990,9 +990,9 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
 
 static void chv_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-	const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+	const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
 
 	cherryview_load_csc_matrix(crtc_state);
 
@@ -1010,35 +1010,35 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
 
 void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	dev_priv->display.load_luts(crtc_state);
 }
 
 void intel_color_commit(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	dev_priv->display.color_commit(crtc_state);
 }
 
 static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(new_crtc_state->base.state);
+		to_intel_atomic_state(new_crtc_state->uapi.state);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(state, crtc);
 
-	return !old_crtc_state->base.gamma_lut &&
-		!old_crtc_state->base.degamma_lut;
+	return !old_crtc_state->hw.gamma_lut &&
+		!old_crtc_state->hw.degamma_lut;
 }
 
 static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(new_crtc_state->base.state);
+		to_intel_atomic_state(new_crtc_state->uapi.state);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(state, crtc);
 
@@ -1050,14 +1050,14 @@ static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
 	if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
 		return false;
 
-	return !old_crtc_state->base.gamma_lut;
+	return !old_crtc_state->hw.gamma_lut;
 }
 
 static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(new_crtc_state->base.state);
+		to_intel_atomic_state(new_crtc_state->uapi.state);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(state, crtc);
 
@@ -1068,19 +1068,19 @@ static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
 	 * linear hardware degamma mid scanout.
 	 */
 	return !old_crtc_state->csc_enable &&
-		!old_crtc_state->base.gamma_lut;
+		!old_crtc_state->hw.gamma_lut;
 }
 
 int intel_color_check(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	return dev_priv->display.color_check(crtc_state);
 }
 
 void intel_color_get_config(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	if (dev_priv->display.read_luts)
 		dev_priv->display.read_luts(crtc_state);
@@ -1104,16 +1104,16 @@ static bool need_plane_update(struct intel_plane *plane,
 static int
 intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(new_crtc_state->base.state);
+		to_intel_atomic_state(new_crtc_state->uapi.state);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(state, crtc);
 	struct intel_plane *plane;
 
-	if (!new_crtc_state->base.active ||
-	    drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+	if (!new_crtc_state->hw.active ||
+	    drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
 		return 0;
 
 	if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
@@ -1155,9 +1155,9 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
 
 static int check_luts(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-	const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+	const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
 	int gamma_length, degamma_length;
 	u32 gamma_tests, degamma_tests;
 
@@ -1205,7 +1205,7 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
 		return ret;
 
 	crtc_state->gamma_enable =
-		crtc_state->base.gamma_lut &&
+		crtc_state->hw.gamma_lut &&
 		!crtc_state->c8_planes;
 
 	crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
@@ -1226,11 +1226,11 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
 	if (crtc_state_is_legacy_gamma(crtc_state))
 		return 0;
 
-	if (crtc_state->base.degamma_lut)
+	if (crtc_state->hw.degamma_lut)
 		cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
-	if (crtc_state->base.ctm)
+	if (crtc_state->hw.ctm)
 		cgm_mode |= CGM_PIPE_MODE_CSC;
-	if (crtc_state->base.gamma_lut)
+	if (crtc_state->hw.gamma_lut)
 		cgm_mode |= CGM_PIPE_MODE_GAMMA;
 
 	return cgm_mode;
@@ -1306,7 +1306,7 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
 		return ret;
 
 	crtc_state->gamma_enable =
-		crtc_state->base.gamma_lut &&
+		crtc_state->hw.gamma_lut &&
 		!crtc_state->c8_planes;
 
 	/*
@@ -1334,8 +1334,8 @@ static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
 	if (!crtc_state->gamma_enable ||
 	    crtc_state_is_legacy_gamma(crtc_state))
 		return GAMMA_MODE_MODE_8BIT;
-	else if (crtc_state->base.gamma_lut &&
-		 crtc_state->base.degamma_lut)
+	else if (crtc_state->hw.gamma_lut &&
+		 crtc_state->hw.degamma_lut)
 		return GAMMA_MODE_MODE_SPLIT;
 	else
 		return GAMMA_MODE_MODE_10BIT;
@@ -1349,7 +1349,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
 	 * CSC comes after the LUT in degamma, RGB->YCbCr,
 	 * and RGB full->limited range mode.
 	 */
-	if (crtc_state->base.degamma_lut ||
+	if (crtc_state->hw.degamma_lut ||
 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
 	    limited_color_range)
 		return 0;
@@ -1367,13 +1367,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
 		return ret;
 
 	crtc_state->gamma_enable =
-		(crtc_state->base.gamma_lut ||
-		 crtc_state->base.degamma_lut) &&
+		(crtc_state->hw.gamma_lut ||
+		 crtc_state->hw.degamma_lut) &&
 		!crtc_state->c8_planes;
 
 	crtc_state->csc_enable =
 		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
-		crtc_state->base.ctm || limited_color_range;
+		crtc_state->hw.ctm || limited_color_range;
 
 	crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
 
@@ -1406,14 +1406,14 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
 		return ret;
 
 	crtc_state->gamma_enable =
-		crtc_state->base.gamma_lut &&
+		crtc_state->hw.gamma_lut &&
 		!crtc_state->c8_planes;
 
 	/* On GLK+ degamma LUT is controlled by csc_enable */
 	crtc_state->csc_enable =
-		crtc_state->base.degamma_lut ||
+		crtc_state->hw.degamma_lut ||
 		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
-		crtc_state->base.ctm || crtc_state->limited_color_range;
+		crtc_state->hw.ctm || crtc_state->limited_color_range;
 
 	crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
 
@@ -1432,14 +1432,14 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
 {
 	u32 gamma_mode = 0;
 
-	if (crtc_state->base.degamma_lut)
+	if (crtc_state->hw.degamma_lut)
 		gamma_mode |= PRE_CSC_GAMMA_ENABLE;
 
-	if (crtc_state->base.gamma_lut &&
+	if (crtc_state->hw.gamma_lut &&
 	    !crtc_state->c8_planes)
 		gamma_mode |= POST_CSC_GAMMA_ENABLE;
 
-	if (!crtc_state->base.gamma_lut ||
+	if (!crtc_state->hw.gamma_lut ||
 	    crtc_state_is_legacy_gamma(crtc_state))
 		gamma_mode |= GAMMA_MODE_MODE_8BIT;
 	else
@@ -1452,7 +1452,7 @@ static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
 {
 	u32 csc_mode = 0;
 
-	if (crtc_state->base.ctm)
+	if (crtc_state->hw.ctm)
 		csc_mode |= ICL_CSC_ENABLE;
 
 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
@@ -1540,7 +1540,7 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
 
 int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (HAS_GMCH(dev_priv)) {
@@ -1646,7 +1646,7 @@ static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
 static struct drm_property_blob *
 i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	struct drm_property_blob *blob;
@@ -1683,13 +1683,13 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
 	if (!crtc_state->gamma_enable)
 		return;
 
-	crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+	crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
 }
 
 static struct drm_property_blob *
 i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
 	enum pipe pipe = crtc->pipe;
@@ -1733,15 +1733,15 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
 		return;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-		crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+		crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
 	else
-		crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state);
+		crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
 }
 
 static struct drm_property_blob *
 chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
 	enum pipe pipe = crtc->pipe;
@@ -1775,7 +1775,7 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
 static void chv_read_luts(struct intel_crtc_state *crtc_state)
 {
 	if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
-		crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state);
+		crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
 	else
 		i965_read_luts(crtc_state);
 }
@@ -1783,7 +1783,7 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state)
 static struct drm_property_blob *
 ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
 	enum pipe pipe = crtc->pipe;
@@ -1822,15 +1822,15 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
 		return;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-		crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+		crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
 	else
-		crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state);
+		crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
 }
 
 static struct drm_property_blob *
 glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	int hw_lut_size = ivb_lut_10_size(prec_index);
 	enum pipe pipe = crtc->pipe;
@@ -1871,9 +1871,9 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
 		return;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-		crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+		crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
 	else
-		crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+		crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
 }
 
 void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 39cc6d7..f976b80 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -65,7 +65,7 @@ static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
 	return container_of(encoder, struct intel_crt, base);
 }
 
-static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
 {
 	return intel_encoder_to_crt(intel_attached_encoder(connector));
 }
@@ -132,9 +132,9 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
 {
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
 
-	pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+	pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
 
-	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
 static void hsw_crt_get_config(struct intel_encoder *encoder,
@@ -144,13 +144,13 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
 
 	intel_ddi_get_config(encoder, pipe_config);
 
-	pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+	pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
 					      DRM_MODE_FLAG_NHSYNC |
 					      DRM_MODE_FLAG_PVSYNC |
 					      DRM_MODE_FLAG_NVSYNC);
-	pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+	pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
 
-	pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+	pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
 }
 
 /* Note: The caller is required to filter out dpms modes not supported by the
@@ -161,8 +161,8 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crt *crt = intel_encoder_to_crt(encoder);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
 	u32 adpa;
 
 	if (INTEL_GEN(dev_priv) >= 5)
@@ -241,6 +241,14 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
+	intel_crtc_vblank_off(old_crtc_state);
+
+	intel_disable_pipe(old_crtc_state);
+
+	intel_ddi_disable_transcoder_func(old_crtc_state);
+
+	ilk_pfit_disable(old_crtc_state);
+
 	intel_ddi_disable_pipe_clock(old_crtc_state);
 
 	pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
@@ -271,14 +279,14 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
 			       const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum pipe pipe = crtc->pipe;
 
 	WARN_ON(!crtc_state->has_pch_encoder);
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
-	dev_priv->display.fdi_link_train(crtc, crtc_state);
+	hsw_fdi_link_train(encoder, crtc_state);
 
 	intel_ddi_enable_pipe_clock(crtc_state);
 }
@@ -288,7 +296,7 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
 			   const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum pipe pipe = crtc->pipe;
 
 	WARN_ON(!crtc_state->has_pch_encoder);
@@ -343,7 +351,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
 
 	/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
 	if (HAS_PCH_LPT(dev_priv) &&
-	    (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+	    ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
 		return MODE_CLOCK_HIGH;
 
 	/* HSW/BDW FDI limited to 4k */
@@ -358,7 +366,7 @@ static int intel_crt_compute_config(struct intel_encoder *encoder,
 				    struct drm_connector_state *conn_state)
 {
 	struct drm_display_mode *adjusted_mode =
-		&pipe_config->base.adjusted_mode;
+		&pipe_config->hw.adjusted_mode;
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return -EINVAL;
@@ -373,7 +381,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder,
 				  struct drm_connector_state *conn_state)
 {
 	struct drm_display_mode *adjusted_mode =
-		&pipe_config->base.adjusted_mode;
+		&pipe_config->hw.adjusted_mode;
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return -EINVAL;
@@ -390,7 +398,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_display_mode *adjusted_mode =
-		&pipe_config->base.adjusted_mode;
+		&pipe_config->hw.adjusted_mode;
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return -EINVAL;
@@ -419,10 +427,10 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
 	return 0;
 }
 
-static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct intel_crt *crt = intel_attached_crt(connector);
+	struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 adpa;
 	bool ret;
@@ -432,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 		bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
 		u32 save_adpa;
 
-		crt->force_hotplug_required = 0;
+		crt->force_hotplug_required = false;
 
 		save_adpa = adpa = I915_READ(crt->adpa_reg);
 		DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
@@ -469,7 +477,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct intel_crt *crt = intel_attached_crt(connector);
+	struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	bool reenable_hpd;
 	u32 adpa;
@@ -527,7 +535,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 	int i, tries = 0;
 
 	if (HAS_PCH_SPLIT(dev_priv))
-		return intel_ironlake_crt_detect_hotplug(connector);
+		return ilk_crt_detect_hotplug(connector);
 
 	if (IS_VALLEYVIEW(dev_priv))
 		return valleyview_crt_detect_hotplug(connector);
@@ -601,7 +609,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
 
 static bool intel_crt_detect_ddc(struct drm_connector *connector)
 {
-	struct intel_crt *crt = intel_attached_crt(connector);
+	struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
 	struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
 	struct edid *edid;
 	struct i2c_adapter *i2c;
@@ -787,7 +795,7 @@ intel_crt_detect(struct drm_connector *connector,
 		 bool force)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_crt *crt = intel_attached_crt(connector);
+	struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
 	struct intel_encoder *intel_encoder = &crt->base;
 	intel_wakeref_t wakeref;
 	int status, ret;
@@ -878,7 +886,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crt *crt = intel_attached_crt(connector);
+	struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
 	struct intel_encoder *intel_encoder = &crt->base;
 	intel_wakeref_t wakeref;
 	struct i2c_adapter *i2c;
@@ -917,7 +925,7 @@ void intel_crt_reset(struct drm_encoder *encoder)
 		POSTING_READ(crt->adpa_reg);
 
 		DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
-		crt->force_hotplug_required = 1;
+		crt->force_hotplug_required = true;
 	}
 
 }
@@ -1055,7 +1063,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 	/*
 	 * Configure the automatic hotplug detection stuff
 	 */
-	crt->force_hotplug_required = 0;
+	crt->force_hotplug_required = false;
 
 	/*
 	 * TODO: find a proper way to discover whether we need to set the the
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 2a27fb5..33f1dc3 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -34,6 +34,7 @@
 #include "intel_ddi.h"
 #include "intel_display_types.h"
 #include "intel_dp.h"
+#include "intel_dp_mst.h"
 #include "intel_dp_link_training.h"
 #include "intel_dpio_phy.h"
 #include "intel_dsi.h"
@@ -902,11 +903,10 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
 
 static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
 {
+	struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port];
 	int n_entries, level, default_entry;
 	enum phy phy = intel_port_to_phy(dev_priv, port);
 
-	level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
-
 	if (INTEL_GEN(dev_priv) >= 12) {
 		if (intel_phy_is_combo(dev_priv, phy))
 			icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
@@ -941,12 +941,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
 		return 0;
 	}
 
-	/* Choose a good default if VBT is badly populated */
-	if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries)
-		level = default_entry;
-
 	if (WARN_ON_ONCE(n_entries == 0))
 		return 0;
+
+	if (port_info->hdmi_level_shift_set)
+		level = port_info->hdmi_level_shift;
+	else
+		level = default_entry;
+
 	if (WARN_ON_ONCE(level >= n_entries))
 		level = n_entries - 1;
 
@@ -1106,18 +1108,14 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
  * DDI A (which is used for eDP)
  */
 
-void hsw_fdi_link_train(struct intel_crtc *crtc,
+void hsw_fdi_link_train(struct intel_encoder *encoder,
 			const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_encoder *encoder;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 temp, i, rx_ctl_val, ddi_pll_sel;
 
-	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-		WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
-		intel_prepare_dp_ddi_buffers(encoder, crtc_state);
-	}
+	intel_prepare_dp_ddi_buffers(encoder, crtc_state);
 
 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
 	 * mode set "sequence for CRT port" document:
@@ -1240,9 +1238,9 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
 
 static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct intel_digital_port *intel_dig_port =
-		enc_to_dig_port(&encoder->base);
+		enc_to_dig_port(encoder);
 
 	intel_dp->DP = intel_dig_port->saved_port_bits |
 		DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
@@ -1542,7 +1540,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
 	if (pipe_config->pixel_multiplier)
 		dotclock /= pipe_config->pixel_multiplier;
 
-	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+	pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
 }
 
 static void icl_ddi_clock_get(struct intel_encoder *encoder,
@@ -1758,7 +1756,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
 void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
 			  const struct drm_connector_state *conn_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	u32 temp;
@@ -1815,22 +1813,6 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
 	I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
 }
 
-void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
-				    bool state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-	u32 temp;
-
-	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
-	if (state == true)
-		temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
-	else
-		temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
-	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
-}
-
 /*
  * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
  *
@@ -1840,7 +1822,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
 static u32
 intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
@@ -1872,9 +1854,9 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
 		BUG();
 	}
 
-	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
 		temp |= TRANS_DDI_PVSYNC;
-	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
 		temp |= TRANS_DDI_PHSYNC;
 
 	if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1918,8 +1900,13 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
 		temp |= TRANS_DDI_MODE_SELECT_DP_MST;
 		temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
 
-		if (INTEL_GEN(dev_priv) >= 12)
-			temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder);
+		if (INTEL_GEN(dev_priv) >= 12) {
+			enum transcoder master;
+
+			master = crtc_state->mst_master_transcoder;
+			WARN_ON(master == INVALID_TRANSCODER);
+			temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
+		}
 	} else {
 		temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 		temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
@@ -1930,12 +1917,14 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
 
 void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	u32 temp;
 
 	temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+		temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
 	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
 }
 
@@ -1946,7 +1935,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
 static void
 intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	u32 temp;
@@ -1958,20 +1947,21 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
 
 void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-	i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
-	u32 val = I915_READ(reg);
+	u32 val;
+
+	val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	val &= ~TRANS_DDI_FUNC_ENABLE;
 
 	if (INTEL_GEN(dev_priv) >= 12) {
-		val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK |
-			 TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+		if (!intel_dp_mst_is_master_trans(crtc_state))
+			val &= ~TGL_TRANS_DDI_PORT_MASK;
 	} else {
-		val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK |
-			 TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+		val &= ~TRANS_DDI_PORT_MASK;
 	}
-	I915_WRITE(reg, val);
+	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
 
 	if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -2234,7 +2224,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
 	if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
 		return;
 
-	dig_port = enc_to_dig_port(&encoder->base);
+	dig_port = enc_to_dig_port(encoder);
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
 	/*
@@ -2256,7 +2246,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
 
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
 	enum port port = encoder->port;
@@ -2274,7 +2264,7 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
 
 void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
 	if (cpu_transcoder != TRANSCODER_EDP) {
@@ -2304,7 +2294,7 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
 static void skl_ddi_set_iboost(struct intel_encoder *encoder,
 			       int level, enum intel_output_type type)
 {
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
 	u8 iboost;
@@ -2375,7 +2365,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	enum port port = encoder->port;
 	enum phy phy = intel_port_to_phy(dev_priv, port);
 	int n_entries;
@@ -2514,7 +2504,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
 		width = 4;
 		rate = 0; /* Rate is always < than 6GHz for HDMI */
 	} else {
-		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 		width = intel_dp->lane_count;
 		rate = intel_dp->link_rate;
@@ -2640,7 +2630,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 		width = 4;
 		/* Rate is always < than 6GHz for HDMI */
 	} else {
-		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 		width = intel_dp->lane_count;
 		rate = intel_dp->link_rate;
@@ -3016,11 +3006,38 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
 	mutex_unlock(&dev_priv->dpll_lock);
 }
 
+static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
+				      u32 port_mask, bool ddi_clk_needed)
+{
+	enum port port;
+	u32 val;
+
+	val = I915_READ(ICL_DPCLKA_CFGCR0);
+	for_each_port_masked(port, port_mask) {
+		enum phy phy = intel_port_to_phy(dev_priv, port);
+		bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
+								   phy);
+
+		if (ddi_clk_needed == !ddi_clk_off)
+			continue;
+
+		/*
+		 * Punt on the case now where clock is gated, but it would
+		 * be needed by the port. Something else is really broken then.
+		 */
+		if (WARN_ON(ddi_clk_needed))
+			continue;
+
+		DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+			 phy_name(phy));
+		val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+		I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+	}
+}
+
 void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	u32 val;
-	enum port port;
 	u32 port_mask;
 	bool ddi_clk_needed;
 
@@ -3069,29 +3086,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
 		ddi_clk_needed = false;
 	}
 
-	val = I915_READ(ICL_DPCLKA_CFGCR0);
-	for_each_port_masked(port, port_mask) {
-		enum phy phy = intel_port_to_phy(dev_priv, port);
-
-		bool ddi_clk_ungated = !(val &
-					 icl_dpclka_cfgcr0_clk_off(dev_priv,
-								   phy));
-
-		if (ddi_clk_needed == ddi_clk_ungated)
-			continue;
-
-		/*
-		 * Punt on the case now where clock is gated, but it would
-		 * be needed by the port. Something else is really broken then.
-		 */
-		if (WARN_ON(ddi_clk_needed))
-			continue;
-
-		DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
-			 phy_name(port));
-		val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
-		I915_WRITE(ICL_DPCLKA_CFGCR0, val);
-	}
+	icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed);
 }
 
 static void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -3173,57 +3168,6 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
 }
 
 static void
-icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable)
-{
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-	u32 val, bits;
-	int ln;
-
-	if (tc_port == PORT_TC_NONE)
-		return;
-
-	bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING |
-	       MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING |
-	       MG_DP_MODE_CFG_GAONPWR_GATING;
-
-	for (ln = 0; ln < 2; ln++) {
-		if (INTEL_GEN(dev_priv) >= 12) {
-			I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
-			val = I915_READ(DKL_DP_MODE(tc_port));
-		} else {
-			val = I915_READ(MG_DP_MODE(ln, tc_port));
-		}
-
-		if (enable)
-			val |= bits;
-		else
-			val &= ~bits;
-
-		if (INTEL_GEN(dev_priv) >= 12)
-			I915_WRITE(DKL_DP_MODE(tc_port), val);
-		else
-			I915_WRITE(MG_DP_MODE(ln, tc_port), val);
-	}
-
-	if (INTEL_GEN(dev_priv) == 11) {
-		bits = MG_MISC_SUS0_CFG_TR2PWR_GATING |
-		       MG_MISC_SUS0_CFG_CL2PWR_GATING |
-		       MG_MISC_SUS0_CFG_GAONPWR_GATING |
-		       MG_MISC_SUS0_CFG_TRPWR_GATING |
-		       MG_MISC_SUS0_CFG_CL1PWR_GATING |
-		       MG_MISC_SUS0_CFG_DGPWR_GATING;
-
-		val = I915_READ(MG_MISC_SUS0(tc_port));
-		if (enable)
-			val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3));
-		else
-			val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK);
-		I915_WRITE(MG_MISC_SUS0(tc_port), val);
-	}
-}
-
-static void
 icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
 		       const struct intel_crtc_state *crtc_state)
 {
@@ -3329,7 +3273,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
 	if (!crtc_state->fec_enable)
 		return;
 
-	intel_dp = enc_to_intel_dp(&encoder->base);
+	intel_dp = enc_to_intel_dp(encoder);
 	val = I915_READ(intel_dp->regs.dp_tp_ctl);
 	val |= DP_TP_CTL_FEC_ENABLE;
 	I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
@@ -3349,7 +3293,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
 	if (!crtc_state->fec_enable)
 		return;
 
-	intel_dp = enc_to_intel_dp(&encoder->base);
+	intel_dp = enc_to_intel_dp(encoder);
 	val = I915_READ(intel_dp->regs.dp_tp_ctl);
 	val &= ~DP_TP_CTL_FEC_ENABLE;
 	I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
@@ -3359,7 +3303,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
 static void
 tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
 {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
 	u32 val;
 
 	if (!cstate->dc3co_exitline)
@@ -3374,7 +3318,7 @@ static void
 tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
 {
 	u32 val, exit_scanlines;
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
 
 	if (!cstate->dc3co_exitline)
 		return;
@@ -3392,8 +3336,8 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
 					      struct intel_crtc_state *cstate)
 {
 	u32 exit_scanlines;
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
-	u32 crtc_vdisplay = cstate->base.adjusted_mode.crtc_vdisplay;
+	struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
+	u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay;
 
 	cstate->dc3co_exitline = 0;
 
@@ -3401,11 +3345,11 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
 		return;
 
 	/* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
-	if (to_intel_crtc(cstate->base.crtc)->pipe != PIPE_A ||
+	if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A ||
 	    encoder->port != PORT_A)
 		return;
 
-	if (!cstate->has_psr2 || !cstate->base.active)
+	if (!cstate->has_psr2 || !cstate->hw.active)
 		return;
 
 	/*
@@ -3413,7 +3357,7 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
 	 */
 	exit_scanlines =
-		intel_usecs_to_scanlines(&cstate->base.adjusted_mode, 200) + 1;
+		intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1;
 
 	if (WARN_ON(exit_scanlines > crtc_vdisplay))
 		return;
@@ -3425,7 +3369,7 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
 static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
 {
 	u32 val;
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	if (INTEL_GEN(dev_priv) < 12)
 		return;
@@ -3440,10 +3384,10 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state,
 				  const struct drm_connector_state *conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
 	int level = intel_ddi_dp_level(intel_dp);
 	enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -3455,47 +3399,80 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
 	intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
 	intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
 
-	/* 1.a got on intel_atomic_commit_tail() */
+	/*
+	 * 1. Enable Power Wells
+	 *
+	 * This was handled at the beginning of intel_atomic_commit_tail(),
+	 * before we called down into this function.
+	 */
 
-	/* 2. */
+	/* 2. Enable Panel Power if PPS is required */
 	intel_edp_panel_on(intel_dp);
 
 	/*
-	 * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by:
-	 * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and
-	 * haswell_crtc_enable()->intel_enable_shared_dpll()
+	 * 3. For non-TBT Type-C ports, set FIA lane count
+	 * (DFLEXDPSP.DPX4TXLATC)
+	 *
+	 * This was done before tgl_ddi_pre_enable_dp by
+	 * hsw_crtc_enable()->intel_encoders_pre_pll_enable().
 	 */
 
-	/* 4.b */
+	/*
+	 * 4. Enable the port PLL.
+	 *
+	 * The PLL enabling itself was already done before this function by
+	 * hsw_crtc_enable()->intel_enable_shared_dpll().  We need only
+	 * configure the PLL to port mapping here.
+	 */
 	intel_ddi_clk_select(encoder, crtc_state);
 
-	/* 5. */
+	/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
 	if (!intel_phy_is_tc(dev_priv, phy) ||
 	    dig_port->tc_mode != TC_PORT_TBT_ALT)
 		intel_display_power_get(dev_priv,
 					dig_port->ddi_io_power_domain);
 
-	/* 6. */
+	/* 6. Program DP_MODE */
 	icl_program_mg_dp_mode(dig_port, crtc_state);
 
 	/*
-	 * 7.a - Steps in this function should only be executed over MST
-	 * master, what will be taken in care by MST hook
-	 * intel_mst_pre_enable_dp()
+	 * 7. The rest of the below are substeps under the bspec's "Enable and
+	 * Train Display Port" step.  Note that steps that are specific to
+	 * MST will be handled by intel_mst_pre_enable_dp() before/after it
+	 * calls into this function.  Also intel_mst_pre_enable_dp() only calls
+	 * us when active_mst_links==0, so any steps designated for "single
+	 * stream or multi-stream master transcoder" can just be performed
+	 * unconditionally here.
+	 */
+
+	/*
+	 * 7.a Configure Transcoder Clock Select to direct the Port clock to the
+	 * Transcoder.
 	 */
 	intel_ddi_enable_pipe_clock(crtc_state);
 
-	/* 7.b */
+	/*
+	 * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
+	 * Transport Select
+	 */
 	intel_ddi_config_transcoder_func(crtc_state);
 
-	/* 7.d */
-	icl_phy_set_clock_gating(dig_port, false);
+	/*
+	 * 7.c Configure & enable DP_TP_CTL with link training pattern 1
+	 * selected
+	 *
+	 * This will be handled by the intel_dp_start_link_train() farther
+	 * down this function.
+	 */
 
-	/* 7.e */
+	/* 7.e Configure voltage swing and related IO settings */
 	tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
 				encoder->type);
 
-	/* 7.f */
+	/*
+	 * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
+	 * the used lanes of the DDI.
+	 */
 	if (intel_phy_is_combo(dev_priv, phy)) {
 		bool lane_reversal =
 			dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
@@ -3505,7 +3482,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
 					       lane_reversal);
 	}
 
-	/* 7.g */
+	/*
+	 * 7.g Configure and enable DDI_BUF_CTL
+	 * 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
+	 *     after 500 us.
+	 *
+	 * We only configure what the register value will be here.  Actual
+	 * enabling happens during link training farther down.
+	 */
 	intel_ddi_init_dp_buf_reg(encoder);
 
 	if (!is_mst)
@@ -3518,23 +3502,21 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
 	 * training
 	 */
 	intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
-	/* 7.c, 7.h, 7.i, 7.j */
+
+	/*
+	 * 7.i Follow DisplayPort specification training sequence (see notes for
+	 *     failure handling)
+	 * 7.j If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle
+	 *     Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent)
+	 *     (timeout after 800 us)
+	 */
 	intel_dp_start_link_train(intel_dp);
 
-	/* 7.k */
+	/* 7.k Set DP_TP_CTL link training to Normal */
 	if (!is_trans_port_sync_mode(crtc_state))
 		intel_dp_stop_link_train(intel_dp);
 
-	/*
-	 * TODO: enable clock gating
-	 *
-	 * It is not written in DP enabling sequence but "PHY Clockgating
-	 * programming" states that clock gating should be enabled after the
-	 * link training but doing so causes all the following trainings to fail
-	 * so not enabling it for now.
-	 */
-
-	/* 7.l */
+	/* 7.l Configure and enable FEC if needed */
 	intel_ddi_enable_fec(encoder, crtc_state);
 	intel_dsc_enable(encoder, crtc_state);
 }
@@ -3543,15 +3525,18 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state,
 				  const struct drm_connector_state *conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
 	enum phy phy = intel_port_to_phy(dev_priv, port);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
 	int level = intel_ddi_dp_level(intel_dp);
 
-	WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+	if (INTEL_GEN(dev_priv) < 11)
+		WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+	else
+		WARN_ON(is_mst && port == PORT_A);
 
 	intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
 				 crtc_state->lane_count, is_mst);
@@ -3569,7 +3554,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
 					dig_port->ddi_io_power_domain);
 
 	icl_program_mg_dp_mode(dig_port, crtc_state);
-	icl_phy_set_clock_gating(dig_port, false);
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3603,8 +3587,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
 
 	intel_ddi_enable_fec(encoder, crtc_state);
 
-	icl_phy_set_clock_gating(dig_port, true);
-
 	if (!is_mst)
 		intel_ddi_enable_pipe_clock(crtc_state);
 
@@ -3633,12 +3615,12 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 				      const struct intel_crtc_state *crtc_state,
 				      const struct drm_connector_state *conn_state)
 {
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
 	int level = intel_ddi_hdmi_level(dev_priv, port);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
 	intel_ddi_clk_select(encoder, crtc_state);
@@ -3646,7 +3628,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
 	icl_program_mg_dp_mode(dig_port, crtc_state);
-	icl_phy_set_clock_gating(dig_port, false);
 
 	if (INTEL_GEN(dev_priv) >= 12)
 		tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3661,8 +3642,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 	else
 		intel_prepare_hdmi_ddi_buffers(encoder, level);
 
-	icl_phy_set_clock_gating(dig_port, true);
-
 	if (IS_GEN9_BC(dev_priv))
 		skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
 
@@ -3677,7 +3656,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 				 const struct intel_crtc_state *crtc_state,
 				 const struct drm_connector_state *conn_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
@@ -3705,12 +3684,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 		intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
 	} else {
 		struct intel_lspcon *lspcon =
-				enc_to_intel_lspcon(&encoder->base);
+				enc_to_intel_lspcon(encoder);
 
 		intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
 		if (lspcon->active) {
 			struct intel_digital_port *dig_port =
-					enc_to_dig_port(&encoder->base);
+					enc_to_dig_port(encoder);
 
 			dig_port->set_infoframes(encoder,
 						 crtc_state->has_infoframe,
@@ -3735,7 +3714,7 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
 	}
 
 	if (intel_crtc_has_dp_encoder(crtc_state)) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 		val = I915_READ(intel_dp->regs.dp_tp_ctl);
 		val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
@@ -3755,23 +3734,42 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 				      const struct drm_connector_state *old_conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	struct intel_dp *intel_dp = &dig_port->dp;
 	bool is_mst = intel_crtc_has_type(old_crtc_state,
 					  INTEL_OUTPUT_DP_MST);
 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 
-	if (!is_mst) {
-		intel_ddi_disable_pipe_clock(old_crtc_state);
-		/*
-		 * Power down sink before disabling the port, otherwise we end
-		 * up getting interrupts from the sink on detecting link loss.
-		 */
-		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+	/*
+	 * Power down sink before disabling the port, otherwise we end
+	 * up getting interrupts from the sink on detecting link loss.
+	 */
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+
+	if (INTEL_GEN(dev_priv) >= 12) {
+		if (is_mst) {
+			enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+			u32 val;
+
+			val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+			val &= ~TGL_TRANS_DDI_PORT_MASK;
+			I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+		}
+	} else {
+		if (!is_mst)
+			intel_ddi_disable_pipe_clock(old_crtc_state);
 	}
 
 	intel_disable_ddi_buf(encoder, old_crtc_state);
 
+	/*
+	 * From TGL spec: "If single stream or multi-stream master transcoder:
+	 * Configure Transcoder Clock select to direct no clock to the
+	 * transcoder"
+	 */
+	if (INTEL_GEN(dev_priv) >= 12)
+		intel_ddi_disable_pipe_clock(old_crtc_state);
+
 	intel_edp_panel_vdd_on(intel_dp);
 	intel_edp_panel_off(intel_dp);
 
@@ -3789,7 +3787,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
 					const struct drm_connector_state *old_conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
 
 	dig_port->set_infoframes(encoder, false,
@@ -3807,11 +3805,46 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
 }
 
+static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+	if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
+		return;
+
+	DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
+		      transcoder_name(old_crtc_state->cpu_transcoder));
+
+	I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
+}
+
 static void intel_ddi_post_disable(struct intel_encoder *encoder,
 				   const struct intel_crtc_state *old_crtc_state,
 				   const struct drm_connector_state *old_conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+	bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+
+	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
+		intel_crtc_vblank_off(old_crtc_state);
+
+		intel_disable_pipe(old_crtc_state);
+
+		if (INTEL_GEN(dev_priv) >= 11)
+			icl_disable_transcoder_port_sync(old_crtc_state);
+
+		intel_ddi_disable_transcoder_func(old_crtc_state);
+
+		intel_dsc_disable(old_crtc_state);
+
+		if (INTEL_GEN(dev_priv) >= 9)
+			skl_scaler_disable(old_crtc_state);
+		else
+			ilk_pfit_disable(old_crtc_state);
+	}
 
 	/*
 	 * When called from DP MST code:
@@ -3835,6 +3868,13 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_unmap_plls_to_ports(encoder);
+
+	if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
+		intel_display_power_put_unchecked(dev_priv,
+						  intel_ddi_main_link_aux_domain(dig_port));
+
+	if (is_tc_port)
+		intel_tc_port_put_link(dig_port);
 }
 
 void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3876,7 +3916,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
 				const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	enum port port = encoder->port;
 
 	if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -3917,7 +3957,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 				  const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	struct drm_connector *connector = conn_state->connector;
 	enum port port = encoder->port;
 
@@ -3994,7 +4034,7 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
 				 const struct intel_crtc_state *old_crtc_state,
 				 const struct drm_connector_state *old_conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 	intel_dp->link_trained = false;
 
@@ -4042,7 +4082,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
 				     const struct intel_crtc_state *crtc_state,
 				     const struct drm_connector_state *conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 	intel_ddi_set_dp_msa(crtc_state, conn_state);
 
@@ -4106,8 +4146,9 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
 
 	WARN_ON(crtc && crtc->active);
 
-	intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
-	if (crtc_state && crtc_state->base.active)
+	intel_tc_port_get_link(enc_to_dig_port(encoder),
+		               required_lanes);
+	if (crtc_state && crtc_state->hw.active)
 		intel_update_active_dpll(state, crtc, encoder);
 }
 
@@ -4116,7 +4157,7 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
 			  struct intel_encoder *encoder,
 			  struct intel_crtc *crtc)
 {
-	intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
+	intel_tc_port_put_link(enc_to_dig_port(encoder));
 }
 
 static void
@@ -4125,7 +4166,7 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
 			 const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 	bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
 
@@ -4147,61 +4188,44 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
 						crtc_state->lane_lat_optim_mask);
 }
 
-static void
-intel_ddi_post_pll_disable(struct intel_encoder *encoder,
-			   const struct intel_crtc_state *crtc_state,
-			   const struct drm_connector_state *conn_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
-	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-	bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
-
-	if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
-		intel_display_power_put_unchecked(dev_priv,
-						  intel_ddi_main_link_aux_domain(dig_port));
-
-	if (is_tc_port)
-		intel_tc_port_put_link(dig_port);
-}
-
 static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv =
 		to_i915(intel_dig_port->base.base.dev);
 	enum port port = intel_dig_port->base.port;
-	u32 val;
+	u32 dp_tp_ctl, ddi_buf_ctl;
 	bool wait = false;
 
-	if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) {
-		val = I915_READ(DDI_BUF_CTL(port));
-		if (val & DDI_BUF_CTL_ENABLE) {
-			val &= ~DDI_BUF_CTL_ENABLE;
-			I915_WRITE(DDI_BUF_CTL(port), val);
+	dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl);
+
+	if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
+		ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port));
+		if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
+			I915_WRITE(DDI_BUF_CTL(port),
+				   ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
 			wait = true;
 		}
 
-		val = I915_READ(intel_dp->regs.dp_tp_ctl);
-		val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
-		val |= DP_TP_CTL_LINK_TRAIN_PAT1;
-		I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+		dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+		dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
+		I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
 		POSTING_READ(intel_dp->regs.dp_tp_ctl);
 
 		if (wait)
 			intel_wait_ddi_buf_idle(dev_priv, port);
 	}
 
-	val = DP_TP_CTL_ENABLE |
-	      DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+	dp_tp_ctl = DP_TP_CTL_ENABLE |
+		    DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
 	if (intel_dp->link_mst)
-		val |= DP_TP_CTL_MODE_MST;
+		dp_tp_ctl |= DP_TP_CTL_MODE_MST;
 	else {
-		val |= DP_TP_CTL_MODE_SST;
+		dp_tp_ctl |= DP_TP_CTL_MODE_SST;
 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
-			val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+			dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
 	}
-	I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+	I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
 	POSTING_READ(intel_dp->regs.dp_tp_ctl);
 
 	intel_dp->DP |= DDI_BUF_CTL_ENABLE;
@@ -4237,7 +4261,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
 	u32 temp, flags = 0;
 
@@ -4245,6 +4269,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 	if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
 		return;
 
+	intel_dsc_get_config(encoder, pipe_config);
+
 	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 	if (temp & TRANS_DDI_PHSYNC)
 		flags |= DRM_MODE_FLAG_PHSYNC;
@@ -4255,7 +4281,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 	else
 		flags |= DRM_MODE_FLAG_NVSYNC;
 
-	pipe_config->base.adjusted_mode.flags |= flags;
+	pipe_config->hw.adjusted_mode.flags |= flags;
 
 	switch (temp & TRANS_DDI_BPC_MASK) {
 	case TRANS_DDI_BPC_6:
@@ -4326,6 +4352,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 		pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
 		pipe_config->lane_count =
 			((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+		if (INTEL_GEN(dev_priv) >= 12)
+			pipe_config->mst_master_transcoder =
+					REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
+
 		intel_dp_get_m_n(intel_crtc, pipe_config);
 		break;
 	default:
@@ -4404,7 +4435,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
 				    struct intel_crtc_state *pipe_config,
 				    struct drm_connector_state *conn_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
 	int ret;
@@ -4439,7 +4470,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
 
 static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
 {
-	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+	struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
 
 	intel_dp_encoder_flush_work(encoder);
 
@@ -4506,7 +4537,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
 				 struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
 	struct intel_connector *connector = hdmi->attached_connector;
 	struct i2c_adapter *adapter =
 		intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -4538,7 +4569,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
 
 	WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
 
-	if (!crtc_state->base.active)
+	if (!crtc_state->hw.active)
 		return 0;
 
 	if (!crtc_state->hdmi_high_tmds_clock_ratio &&
@@ -4578,7 +4609,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
 		  struct intel_connector *connector,
 		  bool irq_received)
 {
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	struct drm_modeset_acquire_ctx ctx;
 	enum intel_hotplug_state state;
 	int ret;
@@ -4709,8 +4740,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	struct ddi_vbt_port_info *port_info =
 		&dev_priv->vbt.ddi_port_info[port];
 	struct intel_digital_port *intel_dig_port;
-	struct intel_encoder *intel_encoder;
-	struct drm_encoder *encoder;
+	struct intel_encoder *encoder;
 	bool init_hdmi, init_dp, init_lspcon = false;
 	enum phy phy = intel_port_to_phy(dev_priv, port);
 
@@ -4739,31 +4769,30 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	if (!intel_dig_port)
 		return;
 
-	intel_encoder = &intel_dig_port->base;
-	encoder = &intel_encoder->base;
+	encoder = &intel_dig_port->base;
 
-	drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
+	drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
 			 DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
 
-	intel_encoder->hotplug = intel_ddi_hotplug;
-	intel_encoder->compute_output_type = intel_ddi_compute_output_type;
-	intel_encoder->compute_config = intel_ddi_compute_config;
-	intel_encoder->enable = intel_enable_ddi;
-	intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
-	intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
-	intel_encoder->pre_enable = intel_ddi_pre_enable;
-	intel_encoder->disable = intel_disable_ddi;
-	intel_encoder->post_disable = intel_ddi_post_disable;
-	intel_encoder->update_pipe = intel_ddi_update_pipe;
-	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
-	intel_encoder->get_config = intel_ddi_get_config;
-	intel_encoder->suspend = intel_dp_encoder_suspend;
-	intel_encoder->get_power_domains = intel_ddi_get_power_domains;
-	intel_encoder->type = INTEL_OUTPUT_DDI;
-	intel_encoder->power_domain = intel_port_to_power_domain(port);
-	intel_encoder->port = port;
-	intel_encoder->cloneable = 0;
-	intel_encoder->pipe_mask = ~0;
+	encoder->hotplug = intel_ddi_hotplug;
+	encoder->compute_output_type = intel_ddi_compute_output_type;
+	encoder->compute_config = intel_ddi_compute_config;
+	encoder->enable = intel_enable_ddi;
+	encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+	encoder->pre_enable = intel_ddi_pre_enable;
+	encoder->disable = intel_disable_ddi;
+	encoder->post_disable = intel_ddi_post_disable;
+	encoder->update_pipe = intel_ddi_update_pipe;
+	encoder->get_hw_state = intel_ddi_get_hw_state;
+	encoder->get_config = intel_ddi_get_config;
+	encoder->suspend = intel_dp_encoder_suspend;
+	encoder->get_power_domains = intel_ddi_get_power_domains;
+
+	encoder->type = INTEL_OUTPUT_DDI;
+	encoder->power_domain = intel_port_to_power_domain(port);
+	encoder->port = port;
+	encoder->cloneable = 0;
+	encoder->pipe_mask = ~0;
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -4771,6 +4800,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	else
 		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
 			(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+
 	intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
 	intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
 	intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
@@ -4781,8 +4811,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 
 		intel_tc_port_init(intel_dig_port, is_legacy);
 
-		intel_encoder->update_prepare = intel_ddi_update_prepare;
-		intel_encoder->update_complete = intel_ddi_update_complete;
+		encoder->update_prepare = intel_ddi_update_prepare;
+		encoder->update_complete = intel_ddi_update_complete;
 	}
 
 	WARN_ON(port > PORT_I);
@@ -4798,7 +4828,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 
 	/* In theory we don't need the encoder->type check, but leave it just in
 	 * case we have some really bad VBTs... */
-	if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+	if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
 		if (!intel_ddi_init_hdmi_connector(intel_dig_port))
 			goto err;
 	}
@@ -4822,6 +4852,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	return;
 
 err:
-	drm_encoder_cleanup(encoder);
+	drm_encoder_cleanup(&encoder->base);
 	kfree(intel_dig_port);
 }
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 19aeab1..167c657 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -22,7 +22,7 @@ struct intel_encoder;
 void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
 				const struct intel_crtc_state *old_crtc_state,
 				const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_crtc *crtc,
+void hsw_fdi_link_train(struct intel_encoder *encoder,
 			const struct intel_crtc_state *crtc_state);
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3018977..19ea842 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -46,6 +46,7 @@
 #include "display/intel_crt.h"
 #include "display/intel_ddi.h"
 #include "display/intel_dp.h"
+#include "display/intel_dp_mst.h"
 #include "display/intel_dsi.h"
 #include "display/intel_dvo.h"
 #include "display/intel_gmbus.h"
@@ -86,8 +87,8 @@
 /* Primary plane formats for gen <= 3 */
 static const u32 i8xx_primary_formats[] = {
 	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB8888,
 };
 
@@ -112,6 +113,21 @@ static const u32 i965_primary_formats[] = {
 	DRM_FORMAT_XBGR16161616F,
 };
 
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_XBGR16161616F,
+};
+
 static const u64 i9xx_format_modifiers[] = {
 	I915_FORMAT_MOD_X_TILED,
 	DRM_FORMAT_MOD_LINEAR,
@@ -130,8 +146,8 @@ static const u64 cursor_format_modifiers[] = {
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				struct intel_crtc_state *pipe_config);
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
-				   struct intel_crtc_state *pipe_config);
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+			      struct intel_crtc_state *pipe_config);
 
 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
 				  struct drm_i915_gem_object *obj,
@@ -142,21 +158,18 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
 					 const struct intel_link_m_n *m_n,
 					 const struct intel_link_m_n *m2_n2);
 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
 static void vlv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
 static void chv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
-				    struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
 static void intel_modeset_setup_hw_state(struct drm_device *dev,
 					 struct drm_modeset_acquire_ctx *ctx);
-static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
 
 struct intel_limit {
 	struct {
@@ -357,7 +370,7 @@ static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
 	},
 };
 
-static const struct intel_limit intel_limits_pineview_sdvo = {
+static const struct intel_limit pnv_limits_sdvo = {
 	.dot = { .min = 20000, .max = 400000},
 	.vco = { .min = 1700000, .max = 3500000 },
 	/* Pineview's Ncounter is a ring counter */
@@ -372,7 +385,7 @@ static const struct intel_limit intel_limits_pineview_sdvo = {
 		.p2_slow = 10, .p2_fast = 5 },
 };
 
-static const struct intel_limit intel_limits_pineview_lvds = {
+static const struct intel_limit pnv_limits_lvds = {
 	.dot = { .min = 20000, .max = 400000 },
 	.vco = { .min = 1700000, .max = 3500000 },
 	.n = { .min = 3, .max = 6 },
@@ -390,7 +403,7 @@ static const struct intel_limit intel_limits_pineview_lvds = {
  * We calculate clock using (register_value + 2) for N/M1/M2, so here
  * the range value for them is (actual_value - 2).
  */
-static const struct intel_limit intel_limits_ironlake_dac = {
+static const struct intel_limit ilk_limits_dac = {
 	.dot = { .min = 25000, .max = 350000 },
 	.vco = { .min = 1760000, .max = 3510000 },
 	.n = { .min = 1, .max = 5 },
@@ -403,7 +416,7 @@ static const struct intel_limit intel_limits_ironlake_dac = {
 		.p2_slow = 10, .p2_fast = 5 },
 };
 
-static const struct intel_limit intel_limits_ironlake_single_lvds = {
+static const struct intel_limit ilk_limits_single_lvds = {
 	.dot = { .min = 25000, .max = 350000 },
 	.vco = { .min = 1760000, .max = 3510000 },
 	.n = { .min = 1, .max = 3 },
@@ -416,7 +429,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds = {
 		.p2_slow = 14, .p2_fast = 14 },
 };
 
-static const struct intel_limit intel_limits_ironlake_dual_lvds = {
+static const struct intel_limit ilk_limits_dual_lvds = {
 	.dot = { .min = 25000, .max = 350000 },
 	.vco = { .min = 1760000, .max = 3510000 },
 	.n = { .min = 1, .max = 3 },
@@ -430,7 +443,7 @@ static const struct intel_limit intel_limits_ironlake_dual_lvds = {
 };
 
 /* LVDS 100mhz refclk limits. */
-static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
+static const struct intel_limit ilk_limits_single_lvds_100m = {
 	.dot = { .min = 25000, .max = 350000 },
 	.vco = { .min = 1760000, .max = 3510000 },
 	.n = { .min = 1, .max = 2 },
@@ -443,7 +456,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
 		.p2_slow = 14, .p2_fast = 14 },
 };
 
-static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
 	.dot = { .min = 25000, .max = 350000 },
 	.vco = { .min = 1760000, .max = 3510000 },
 	.n = { .min = 1, .max = 3 },
@@ -530,7 +543,7 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
 static bool
 needs_modeset(const struct intel_crtc_state *state)
 {
-	return drm_atomic_crtc_needs_modeset(&state->base);
+	return drm_atomic_crtc_needs_modeset(&state->uapi);
 }
 
 bool
@@ -541,10 +554,9 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
 }
 
 static bool
-is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
 {
-	return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
-		crtc_state->sync_mode_slaves_mask);
+	return crtc_state->master_transcoder != INVALID_TRANSCODER;
 }
 
 /*
@@ -658,7 +670,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
 		   const struct intel_crtc_state *crtc_state,
 		   int target)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		/*
@@ -694,7 +706,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
 		    int target, int refclk, struct dpll *match_clock,
 		    struct dpll *best_clock)
 {
-	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_device *dev = crtc_state->uapi.crtc->dev;
 	struct dpll clock;
 	int err = target;
 
@@ -752,7 +764,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
 		   int target, int refclk, struct dpll *match_clock,
 		   struct dpll *best_clock)
 {
-	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_device *dev = crtc_state->uapi.crtc->dev;
 	struct dpll clock;
 	int err = target;
 
@@ -808,7 +820,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
 		   int target, int refclk, struct dpll *match_clock,
 		   struct dpll *best_clock)
 {
-	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_device *dev = crtc_state->uapi.crtc->dev;
 	struct dpll clock;
 	int max_n;
 	bool found = false;
@@ -902,7 +914,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
 		   int target, int refclk, struct dpll *match_clock,
 		   struct dpll *best_clock)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct dpll clock;
 	unsigned int bestppm = 1000000;
@@ -962,7 +974,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
 		   int target, int refclk, struct dpll *match_clock,
 		   struct dpll *best_clock)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	unsigned int best_error_ppm;
 	struct dpll clock;
@@ -1025,33 +1037,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
 				  NULL, best_clock);
 }
 
-bool intel_crtc_active(struct intel_crtc *crtc)
-{
-	/* Be paranoid as we can arrive here with only partial
-	 * state retrieved from the hardware during setup.
-	 *
-	 * We can ditch the adjusted_mode.crtc_clock check as soon
-	 * as Haswell has gained clock readout/fastboot support.
-	 *
-	 * We can ditch the crtc->primary->state->fb check as soon as we can
-	 * properly reconstruct framebuffers.
-	 *
-	 * FIXME: The intel_crtc->active here should be switched to
-	 * crtc->state->active once we have proper CRTC states wired up
-	 * for atomic.
-	 */
-	return crtc->active && crtc->base.primary->state->fb &&
-		crtc->config->base.adjusted_mode.crtc_clock;
-}
-
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
-					     enum pipe pipe)
-{
-	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-	return crtc->config->cpu_transcoder;
-}
-
 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
 				    enum pipe pipe)
 {
@@ -1095,7 +1080,7 @@ static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
 static void
 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (INTEL_GEN(dev_priv) >= 4) {
@@ -1145,11 +1130,15 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
 			  enum pipe pipe, bool state)
 {
 	bool cur_state;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
 
 	if (HAS_DDI(dev_priv)) {
-		/* DDI does not have a specific FDI_TX register */
+		/*
+		 * DDI does not have a specific FDI_TX register.
+		 *
+		 * FDI is never fed from EDP transcoder
+		 * so pipe->transcoder cast is fine here.
+		 */
+		enum transcoder cpu_transcoder = (enum transcoder)pipe;
 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
 	} else {
@@ -1266,11 +1255,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
 }
 
 void assert_pipe(struct drm_i915_private *dev_priv,
-		 enum pipe pipe, bool state)
+		 enum transcoder cpu_transcoder, bool state)
 {
 	bool cur_state;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
 	enum intel_display_power_domain power_domain;
 	intel_wakeref_t wakeref;
 
@@ -1290,8 +1277,9 @@ void assert_pipe(struct drm_i915_private *dev_priv,
 	}
 
 	I915_STATE_WARN(cur_state != state,
-	     "pipe %c assertion failure (expected %s, current %s)\n",
-			pipe_name(pipe), onoff(state), onoff(cur_state));
+			"transcoder %s assertion failure (expected %s, current %s)\n",
+			transcoder_name(cpu_transcoder),
+			onoff(state), onoff(cur_state));
 }
 
 static void assert_plane(struct intel_plane *plane, bool state)
@@ -1418,7 +1406,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
 
 	/* PLL is protected by panel, make sure we can write it */
 	assert_panel_unlocked(dev_priv, pipe);
@@ -1467,7 +1455,7 @@ static void chv_enable_pll(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
 
 	/* PLL is protected by panel, make sure we can write it */
 	assert_panel_unlocked(dev_priv, pipe);
@@ -1514,7 +1502,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
 	u32 dpll = crtc_state->dpll_hw_state.dpll;
 	int i;
 
-	assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
 
 	/* PLL is protected by panel, make sure we can write it */
 	if (i9xx_has_pps(dev_priv))
@@ -1554,7 +1542,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
 
 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
@@ -1563,7 +1551,7 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
 		return;
 
 	/* Make sure the pipe isn't still relying on us */
-	assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
 
 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
 	POSTING_READ(DPLL(pipe));
@@ -1574,7 +1562,7 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 	u32 val;
 
 	/* Make sure the pipe isn't still relying on us */
-	assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
 
 	val = DPLL_INTEGRATED_REF_CLK_VLV |
 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1591,7 +1579,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 	u32 val;
 
 	/* Make sure the pipe isn't still relying on us */
-	assert_pipe_disabled(dev_priv, pipe);
+	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
 
 	val = DPLL_SSC_REF_CLK_CHV |
 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1643,9 +1631,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 		     I915_READ(dpll_reg) & port_mask, expected_mask);
 }
 
-static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
+static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	i915_reg_t reg;
@@ -1659,11 +1647,16 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
 	assert_fdi_rx_enabled(dev_priv, pipe);
 
 	if (HAS_PCH_CPT(dev_priv)) {
-		/* Workaround: Set the timing override bit before enabling the
-		 * pch transcoder. */
 		reg = TRANS_CHICKEN2(pipe);
 		val = I915_READ(reg);
+		/*
+		 * Workaround: Set the timing override bit
+		 * before enabling the pch transcoder.
+		 */
 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+		/* Configure frame start delay to match the CPU */
+		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
 		I915_WRITE(reg, val);
 	}
 
@@ -1672,6 +1665,10 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
 	pipeconf_val = I915_READ(PIPECONF(pipe));
 
 	if (HAS_PCH_IBX(dev_priv)) {
+		/* Configure frame start delay to match the CPU */
+		val &= ~TRANS_FRAME_START_DELAY_MASK;
+		val |= TRANS_FRAME_START_DELAY(0);
+
 		/*
 		 * Make the BPC in transcoder be consistent with
 		 * that in pipeconf reg. For HDMI we must use 8bpc
@@ -1709,9 +1706,12 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
 
-	/* Workaround: set timing override bit. */
 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+	/* Workaround: set timing override bit. */
 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+	/* Configure frame start delay to match the CPU */
+	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+	val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
 
 	val = TRANS_ENABLE;
@@ -1729,8 +1729,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 		DRM_ERROR("Failed to enable PCH transcoder\n");
 }
 
-static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
-					    enum pipe pipe)
+static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+				       enum pipe pipe)
 {
 	i915_reg_t reg;
 	u32 val;
@@ -1789,7 +1789,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
 
 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	/*
 	 * On i965gm the hardware frame counter reads
@@ -1809,16 +1809,25 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state
 
 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 
+	assert_vblank_disabled(&crtc->base);
 	drm_crtc_set_max_vblank_count(&crtc->base,
 				      intel_crtc_max_vblank_count(crtc_state));
 	drm_crtc_vblank_on(&crtc->base);
 }
 
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+	drm_crtc_vblank_off(&crtc->base);
+	assert_vblank_disabled(&crtc->base);
+}
+
 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
 	enum pipe pipe = crtc->pipe;
@@ -1874,9 +1883,9 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 		intel_wait_for_pipe_scanline_moving(crtc);
 }
 
-static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
+void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 	enum pipe pipe = crtc->pipe;
@@ -1919,6 +1928,74 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
 	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
 }
 
+static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+	if (!is_ccs_modifier(fb->modifier))
+		return false;
+
+	return plane >= fb->format->num_planes / 2;
+}
+
+static bool is_gen12_ccs_modifier(u64 modifier)
+{
+	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+
+}
+
+static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+	return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
+}
+
+static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
+{
+	if (is_ccs_modifier(fb->modifier))
+		return is_ccs_plane(fb, plane);
+
+	return plane == 1;
+}
+
+static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+	WARN_ON(!is_ccs_modifier(fb->modifier) ||
+		(main_plane && main_plane >= fb->format->num_planes / 2));
+
+	return fb->format->num_planes / 2 + main_plane;
+}
+
+static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
+{
+	WARN_ON(!is_ccs_modifier(fb->modifier) ||
+		ccs_plane < fb->format->num_planes / 2);
+
+	return ccs_plane - fb->format->num_planes / 2;
+}
+
+/* Return either the main plane's CCS or - if not a CCS FB - UV plane */
+int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+	if (is_ccs_modifier(fb->modifier))
+		return main_to_ccs_plane(fb, main_plane);
+
+	return 1;
+}
+
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+				    uint64_t modifier)
+{
+	return info->is_yuv &&
+	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
+}
+
+static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
+				   int color_plane)
+{
+	return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+	       color_plane == 1;
+}
+
 static unsigned int
 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
 {
@@ -1934,16 +2011,21 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
 		else
 			return 512;
 	case I915_FORMAT_MOD_Y_TILED_CCS:
-		if (color_plane == 1)
+		if (is_ccs_plane(fb, color_plane))
 			return 128;
 		/* fall through */
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+		if (is_ccs_plane(fb, color_plane))
+			return 64;
+		/* fall through */
 	case I915_FORMAT_MOD_Y_TILED:
 		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
 			return 128;
 		else
 			return 512;
 	case I915_FORMAT_MOD_Yf_TILED_CCS:
-		if (color_plane == 1)
+		if (is_ccs_plane(fb, color_plane))
 			return 128;
 		/* fall through */
 	case I915_FORMAT_MOD_Yf_TILED:
@@ -1970,6 +2052,9 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
 static unsigned int
 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
 {
+	if (is_gen12_ccs_plane(fb, color_plane))
+		return 1;
+
 	return intel_tile_size(to_i915(fb->dev)) /
 		intel_tile_width_bytes(fb, color_plane);
 }
@@ -1983,7 +2068,17 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
 	unsigned int cpp = fb->format->cpp[color_plane];
 
 	*tile_width = tile_width_bytes / cpp;
-	*tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
+	*tile_height = intel_tile_height(fb, color_plane);
+}
+
+static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
+					int color_plane)
+{
+	unsigned int tile_width, tile_height;
+
+	intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+	return fb->pitches[color_plane] * tile_height;
 }
 
 unsigned int
@@ -2060,7 +2155,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
 
 	/* AUX_DIST needs only 4K alignment */
-	if (color_plane == 1)
+	if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
+	    is_ccs_plane(fb, color_plane))
 		return 4096;
 
 	switch (fb->modifier) {
@@ -2070,9 +2166,19 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
 		if (INTEL_GEN(dev_priv) >= 9)
 			return 256 * 1024;
 		return 0;
+	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+		if (is_semiplanar_uv_plane(fb, color_plane))
+			return intel_tile_row_size(fb, color_plane);
+		/* Fall-through */
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+		return 16 * 1024;
 	case I915_FORMAT_MOD_Y_TILED_CCS:
 	case I915_FORMAT_MOD_Yf_TILED_CCS:
 	case I915_FORMAT_MOD_Y_TILED:
+		if (INTEL_GEN(dev_priv) >= 12 &&
+		    is_semiplanar_uv_plane(fb, color_plane))
+			return intel_tile_row_size(fb, color_plane);
+		/* Fall-through */
 	case I915_FORMAT_MOD_Yf_TILED:
 		return 1 * 1024 * 1024;
 	default:
@@ -2083,7 +2189,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
 
 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 
 	return INTEL_GEN(dev_priv) < 4 ||
@@ -2109,6 +2215,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
 		return ERR_PTR(-EINVAL);
 
 	alignment = intel_surf_alignment(fb, 0);
+	if (WARN_ON(alignment && !is_power_of_2(alignment)))
+		return ERR_PTR(-EINVAL);
 
 	/* Note that the w/a also requires 64 PTE of padding following the
 	 * bo. We currently fill all unused PTE with the shadow page and so
@@ -2126,19 +2234,18 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
 	 * pin/unpin/fence and not more.
 	 */
 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-	i915_gem_object_lock(obj);
 
 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
 
-	pinctl = 0;
-
-	/* Valleyview is definitely limited to scanning out the first
+	/*
+	 * Valleyview is definitely limited to scanning out the first
 	 * 512MiB. Lets presume this behaviour was inherited from the
 	 * g4x display engine and that all earlier gen are similarly
 	 * limited. Testing suggests that it is a little more
 	 * complicated than this. For example, Cherryview appears quite
 	 * happy to scanout from anywhere within its global aperture.
 	 */
+	pinctl = 0;
 	if (HAS_GMCH(dev_priv))
 		pinctl |= PIN_MAPPABLE;
 
@@ -2150,7 +2257,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
 		int ret;
 
-		/* Install a fence for tiled scan-out. Pre-i965 always needs a
+		/*
+		 * Install a fence for tiled scan-out. Pre-i965 always needs a
 		 * fence, whereas 965+ only requires a fence if using
 		 * framebuffer compression.  For simplicity, we always, when
 		 * possible, install a fence as the cost is not that onerous.
@@ -2180,8 +2288,6 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
 	i915_vma_get(vma);
 err:
 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
-	i915_gem_object_unlock(obj);
 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 	return vma;
 }
@@ -2216,7 +2322,7 @@ u32 intel_fb_xy_to_linear(int x, int y,
 			  const struct intel_plane_state *state,
 			  int color_plane)
 {
-	const struct drm_framebuffer *fb = state->base.fb;
+	const struct drm_framebuffer *fb = state->hw.fb;
 	unsigned int cpp = fb->format->cpp[color_plane];
 	unsigned int pitch = state->color_plane[color_plane].stride;
 
@@ -2264,9 +2370,10 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
 	return new_offset;
 }
 
-static bool is_surface_linear(u64 modifier, int color_plane)
+static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
 {
-	return modifier == DRM_FORMAT_MOD_LINEAR;
+	return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
+	       is_gen12_ccs_plane(fb, color_plane);
 }
 
 static u32 intel_adjust_aligned_offset(int *x, int *y,
@@ -2281,7 +2388,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
 
 	WARN_ON(new_offset > old_offset);
 
-	if (!is_surface_linear(fb->modifier, color_plane)) {
+	if (!is_surface_linear(fb, color_plane)) {
 		unsigned int tile_size, tile_width, tile_height;
 		unsigned int pitch_tiles;
 
@@ -2317,8 +2424,8 @@ static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
 					     int color_plane,
 					     u32 old_offset, u32 new_offset)
 {
-	return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
-					   state->base.rotation,
+	return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
+					   state->hw.rotation,
 					   state->color_plane[color_plane].stride,
 					   old_offset, new_offset);
 }
@@ -2348,10 +2455,7 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
 	unsigned int cpp = fb->format->cpp[color_plane];
 	u32 offset, offset_aligned;
 
-	if (alignment)
-		alignment--;
-
-	if (!is_surface_linear(fb->modifier, color_plane)) {
+	if (!is_surface_linear(fb, color_plane)) {
 		unsigned int tile_size, tile_width, tile_height;
 		unsigned int tile_rows, tiles, pitch_tiles;
 
@@ -2372,17 +2476,24 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
 		*x %= tile_width;
 
 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
-		offset_aligned = offset & ~alignment;
+
+		offset_aligned = offset;
+		if (alignment)
+			offset_aligned = rounddown(offset_aligned, alignment);
 
 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
 					 tile_size, pitch_tiles,
 					 offset, offset_aligned);
 	} else {
 		offset = *y * pitch + *x * cpp;
-		offset_aligned = offset & ~alignment;
-
-		*y = (offset & alignment) / pitch;
-		*x = ((offset & alignment) - *y * pitch) / cpp;
+		offset_aligned = offset;
+		if (alignment) {
+			offset_aligned = rounddown(offset_aligned, alignment);
+			*y = (offset % alignment) / pitch;
+			*x = ((offset % alignment) - *y * pitch) / cpp;
+		} else {
+			*y = *x = 0;
+		}
 	}
 
 	return offset_aligned;
@@ -2392,10 +2503,10 @@ static u32 intel_plane_compute_aligned_offset(int *x, int *y,
 					      const struct intel_plane_state *state,
 					      int color_plane)
 {
-	struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
+	struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
-	const struct drm_framebuffer *fb = state->base.fb;
-	unsigned int rotation = state->base.rotation;
+	const struct drm_framebuffer *fb = state->hw.fb;
+	unsigned int rotation = state->hw.rotation;
 	int pitch = state->color_plane[color_plane].stride;
 	u32 alignment;
 
@@ -2415,9 +2526,17 @@ static int intel_fb_offset_to_xy(int *x, int *y,
 {
 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
 	unsigned int height;
+	u32 alignment;
 
-	if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
-	    fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 12 &&
+	    is_semiplanar_uv_plane(fb, color_plane))
+		alignment = intel_tile_row_size(fb, color_plane);
+	else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
+		alignment = intel_tile_size(dev_priv);
+	else
+		alignment = 0;
+
+	if (alignment != 0 && fb->offsets[color_plane] % alignment) {
 		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
 			      fb->offsets[color_plane], color_plane);
 		return -EINVAL;
@@ -2453,6 +2572,8 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
 		return I915_TILING_X;
 	case I915_FORMAT_MOD_Y_TILED:
 	case I915_FORMAT_MOD_Y_TILED_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
 		return I915_TILING_Y;
 	default:
 		return I915_TILING_NONE;
@@ -2473,7 +2594,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
  * us a ratio of one byte in the CCS for each 8x16 pixels in the
  * main surface.
  */
-static const struct drm_format_info ccs_formats[] = {
+static const struct drm_format_info skl_ccs_formats[] = {
 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
@@ -2484,6 +2605,52 @@ static const struct drm_format_info ccs_formats[] = {
 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
 };
 
+/*
+ * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
+ * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
+ * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
+ * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
+ * the main surface.
+ */
+static const struct drm_format_info gen12_ccs_formats[] = {
+	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+	  .hsub = 1, .vsub = 1, },
+	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+	  .hsub = 1, .vsub = 1, },
+	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+	  .hsub = 1, .vsub = 1, .has_alpha = true },
+	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+	  .hsub = 1, .vsub = 1, .has_alpha = true },
+	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
+	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+	  .hsub = 2, .vsub = 1, .is_yuv = true },
+	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
+	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+	  .hsub = 2, .vsub = 1, .is_yuv = true },
+	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
+	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+	  .hsub = 2, .vsub = 1, .is_yuv = true },
+	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
+	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+	  .hsub = 2, .vsub = 1, .is_yuv = true },
+	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
+	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
+	  .hsub = 2, .vsub = 2, .is_yuv = true },
+	{ .format = DRM_FORMAT_P010, .num_planes = 4,
+	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+	  .hsub = 2, .vsub = 2, .is_yuv = true },
+	{ .format = DRM_FORMAT_P012, .num_planes = 4,
+	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+	  .hsub = 2, .vsub = 2, .is_yuv = true },
+	{ .format = DRM_FORMAT_P016, .num_planes = 4,
+	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+	  .hsub = 2, .vsub = 2, .is_yuv = true },
+};
+
 static const struct drm_format_info *
 lookup_format_info(const struct drm_format_info formats[],
 		   int num_formats, u32 format)
@@ -2504,8 +2671,13 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
 	switch (cmd->modifier[0]) {
 	case I915_FORMAT_MOD_Y_TILED_CCS:
 	case I915_FORMAT_MOD_Yf_TILED_CCS:
-		return lookup_format_info(ccs_formats,
-					  ARRAY_SIZE(ccs_formats),
+		return lookup_format_info(skl_ccs_formats,
+					  ARRAY_SIZE(skl_ccs_formats),
+					  cmd->pixel_format);
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+		return lookup_format_info(gen12_ccs_formats,
+					  ARRAY_SIZE(gen12_ccs_formats),
 					  cmd->pixel_format);
 	default:
 		return NULL;
@@ -2514,10 +2686,18 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
 
 bool is_ccs_modifier(u64 modifier)
 {
-	return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+	       modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
 }
 
+static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
+{
+	return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
+			    512) * 64;
+}
+
 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
 			      u32 pixel_format, u64 modifier)
 {
@@ -2562,8 +2742,9 @@ static u32
 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
 {
 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	u32 tile_width;
 
-	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+	if (is_surface_linear(fb, color_plane)) {
 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
 							   fb->format->format,
 							   fb->modifier);
@@ -2572,20 +2753,41 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
 		 * To make remapping with linear generally feasible
 		 * we need the stride to be page aligned.
 		 */
-		if (fb->pitches[color_plane] > max_stride)
+		if (fb->pitches[color_plane] > max_stride &&
+		    !is_ccs_modifier(fb->modifier))
 			return intel_tile_size(dev_priv);
 		else
 			return 64;
-	} else {
-		return intel_tile_width_bytes(fb, color_plane);
 	}
+
+	tile_width = intel_tile_width_bytes(fb, color_plane);
+	if (is_ccs_modifier(fb->modifier)) {
+		/*
+		 * Display WA #0531: skl,bxt,kbl,glk
+		 *
+		 * Render decompression and plane width > 3840
+		 * combined with horizontal panning requires the
+		 * plane stride to be a multiple of 4. We'll just
+		 * require the entire fb to accommodate that to avoid
+		 * potential runtime errors at plane configuration time.
+		 */
+		if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
+			tile_width *= 4;
+		/*
+		 * The main surface pitch must be padded to a multiple of four
+		 * tile widths.
+		 */
+		else if (INTEL_GEN(dev_priv) >= 12)
+			tile_width *= 4;
+	}
+	return tile_width;
 }
 
 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	int i;
 
 	/* We don't want to deal with remapping with cursors */
@@ -2623,16 +2825,16 @@ bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
 
 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	u32 stride, max_stride;
 
 	/*
 	 * No remapping for invisible planes since we don't have
 	 * an actual source viewport to remap.
 	 */
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return false;
 
 	if (!intel_plane_can_remap(plane_state))
@@ -2649,12 +2851,171 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
 	return stride > max_stride;
 }
 
+static void
+intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+			       const struct drm_framebuffer *fb,
+			       int color_plane)
+{
+	int main_plane;
+
+	if (color_plane == 0) {
+		*hsub = 1;
+		*vsub = 1;
+
+		return;
+	}
+
+	/*
+	 * TODO: Deduct the subsampling from the char block for all CCS
+	 * formats and planes.
+	 */
+	if (!is_gen12_ccs_plane(fb, color_plane)) {
+		*hsub = fb->format->hsub;
+		*vsub = fb->format->vsub;
+
+		return;
+	}
+
+	main_plane = ccs_to_main_plane(fb, color_plane);
+	*hsub = drm_format_info_block_width(fb->format, color_plane) /
+		drm_format_info_block_width(fb->format, main_plane);
+
+	/*
+	 * The min stride check in the core framebuffer_check() function
+	 * assumes that format->hsub applies to every plane except for the
+	 * first plane. That's incorrect for the CCS AUX plane of the first
+	 * plane, but for the above check to pass we must define the block
+	 * width with that subsampling applied to it. Adjust the width here
+	 * accordingly, so we can calculate the actual subsampling factor.
+	 */
+	if (main_plane == 0)
+		*hsub *= fb->format->hsub;
+
+	*vsub = 32;
+}
+static int
+intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
+{
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	int main_plane;
+	int hsub, vsub;
+	int tile_width, tile_height;
+	int ccs_x, ccs_y;
+	int main_x, main_y;
+
+	if (!is_ccs_plane(fb, ccs_plane))
+		return 0;
+
+	intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
+	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+	tile_width *= hsub;
+	tile_height *= vsub;
+
+	ccs_x = (x * hsub) % tile_width;
+	ccs_y = (y * vsub) % tile_height;
+
+	main_plane = ccs_to_main_plane(fb, ccs_plane);
+	main_x = intel_fb->normal[main_plane].x % tile_width;
+	main_y = intel_fb->normal[main_plane].y % tile_height;
+
+	/*
+	 * CCS doesn't have its own x/y offset register, so the intra CCS tile
+	 * x/y offsets must match between CCS and the main surface.
+	 */
+	if (main_x != ccs_x || main_y != ccs_y) {
+		DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+			      main_x, main_y,
+			      ccs_x, ccs_y,
+			      intel_fb->normal[main_plane].x,
+			      intel_fb->normal[main_plane].y,
+			      x, y);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
+{
+	int main_plane = is_ccs_plane(fb, color_plane) ?
+			 ccs_to_main_plane(fb, color_plane) : 0;
+	int main_hsub, main_vsub;
+	int hsub, vsub;
+
+	intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
+	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
+	*w = fb->width / main_hsub / hsub;
+	*h = fb->height / main_vsub / vsub;
+}
+
+/*
+ * Setup the rotated view for an FB plane and return the size the GTT mapping
+ * requires for this view.
+ */
+static u32
+setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
+		  u32 gtt_offset_rotated, int x, int y,
+		  unsigned int width, unsigned int height,
+		  unsigned int tile_size,
+		  unsigned int tile_width, unsigned int tile_height,
+		  struct drm_framebuffer *fb)
+{
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+	unsigned int pitch_tiles;
+	struct drm_rect r;
+
+	/* Y or Yf modifiers required for 90/270 rotation */
+	if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+	    fb->modifier != I915_FORMAT_MOD_Yf_TILED)
+		return 0;
+
+	if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
+		return 0;
+
+	rot_info->plane[plane] = *plane_info;
+
+	intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
+
+	/* rotate the x/y offsets to match the GTT view */
+	drm_rect_init(&r, x, y, width, height);
+	drm_rect_rotate(&r,
+			plane_info->width * tile_width,
+			plane_info->height * tile_height,
+			DRM_MODE_ROTATE_270);
+	x = r.x1;
+	y = r.y1;
+
+	/* rotate the tile dimensions to match the GTT view */
+	pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
+	swap(tile_width, tile_height);
+
+	/*
+	 * We only keep the x/y offsets, so push all of the
+	 * gtt offset into the x/y offsets.
+	 */
+	intel_adjust_tile_offset(&x, &y,
+				 tile_width, tile_height,
+				 tile_size, pitch_tiles,
+				 gtt_offset_rotated * tile_size, 0);
+
+	/*
+	 * First pixel of the framebuffer from
+	 * the start of the rotated gtt mapping.
+	 */
+	intel_fb->rotated[plane].x = x;
+	intel_fb->rotated[plane].y = y;
+
+	return plane_info->width * plane_info->height;
+}
+
 static int
 intel_fill_fb_info(struct drm_i915_private *dev_priv,
 		   struct drm_framebuffer *fb)
 {
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	u32 gtt_offset_rotated = 0;
 	unsigned int max_size = 0;
@@ -2669,8 +3030,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 		int ret;
 
 		cpp = fb->format->cpp[i];
-		width = drm_framebuffer_plane_width(fb->width, fb, i);
-		height = drm_framebuffer_plane_height(fb->height, fb, i);
+		intel_fb_plane_dims(&width, &height, fb, i);
 
 		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
 		if (ret) {
@@ -2679,36 +3039,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 			return ret;
 		}
 
-		if (is_ccs_modifier(fb->modifier) && i == 1) {
-			int hsub = fb->format->hsub;
-			int vsub = fb->format->vsub;
-			int tile_width, tile_height;
-			int main_x, main_y;
-			int ccs_x, ccs_y;
-
-			intel_tile_dims(fb, i, &tile_width, &tile_height);
-			tile_width *= hsub;
-			tile_height *= vsub;
-
-			ccs_x = (x * hsub) % tile_width;
-			ccs_y = (y * vsub) % tile_height;
-			main_x = intel_fb->normal[0].x % tile_width;
-			main_y = intel_fb->normal[0].y % tile_height;
-
-			/*
-			 * CCS doesn't have its own x/y offset register, so the intra CCS tile
-			 * x/y offsets must match between CCS and the main surface.
-			 */
-			if (main_x != ccs_x || main_y != ccs_y) {
-				DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
-					      main_x, main_y,
-					      ccs_x, ccs_y,
-					      intel_fb->normal[0].x,
-					      intel_fb->normal[0].y,
-					      x, y);
-				return -EINVAL;
-			}
-		}
+		ret = intel_fb_check_ccs_xy(fb, i, x, y);
+		if (ret)
+			return ret;
 
 		/*
 		 * The fence (if used) is aligned to the start of the object
@@ -2739,23 +3072,21 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 						      tile_size);
 		offset /= tile_size;
 
-		if (!is_surface_linear(fb->modifier, i)) {
+		if (!is_surface_linear(fb, i)) {
+			struct intel_remapped_plane_info plane_info;
 			unsigned int tile_width, tile_height;
-			unsigned int pitch_tiles;
-			struct drm_rect r;
 
 			intel_tile_dims(fb, i, &tile_width, &tile_height);
 
-			rot_info->plane[i].offset = offset;
-			rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
-			rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
-			rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
-
-			intel_fb->rotated[i].pitch =
-				rot_info->plane[i].height * tile_height;
+			plane_info.offset = offset;
+			plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
+							 tile_width * cpp);
+			plane_info.width = DIV_ROUND_UP(x + width, tile_width);
+			plane_info.height = DIV_ROUND_UP(y + height,
+							 tile_height);
 
 			/* how many tiles does this plane need */
-			size = rot_info->plane[i].stride * rot_info->plane[i].height;
+			size = plane_info.stride * plane_info.height;
 			/*
 			 * If the plane isn't horizontally tile aligned,
 			 * we need one more tile.
@@ -2763,36 +3094,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 			if (x != 0)
 				size++;
 
-			/* rotate the x/y offsets to match the GTT view */
-			drm_rect_init(&r, x, y, width, height);
-			drm_rect_rotate(&r,
-					rot_info->plane[i].width * tile_width,
-					rot_info->plane[i].height * tile_height,
-					DRM_MODE_ROTATE_270);
-			x = r.x1;
-			y = r.y1;
-
-			/* rotate the tile dimensions to match the GTT view */
-			pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
-			swap(tile_width, tile_height);
-
-			/*
-			 * We only keep the x/y offsets, so push all of the
-			 * gtt offset into the x/y offsets.
-			 */
-			intel_adjust_tile_offset(&x, &y,
-						 tile_width, tile_height,
-						 tile_size, pitch_tiles,
-						 gtt_offset_rotated * tile_size, 0);
-
-			gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
-
-			/*
-			 * First pixel of the framebuffer from
-			 * the start of the rotated gtt mapping.
-			 */
-			intel_fb->rotated[i].x = x;
-			intel_fb->rotated[i].y = y;
+			gtt_offset_rotated +=
+				setup_fb_rotation(i, &plane_info,
+						  gtt_offset_rotated,
+						  x, y, width, height,
+						  tile_size,
+						  tile_width, tile_height,
+						  fb);
 		} else {
 			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
 					    x * cpp, tile_size);
@@ -2815,11 +3123,11 @@ static void
 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	struct drm_framebuffer *fb = plane_state->base.fb;
+		to_i915(plane_state->uapi.plane->dev);
+	struct drm_framebuffer *fb = plane_state->hw.fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 	struct intel_rotation_info *info = &plane_state->view.rotated;
-	unsigned int rotation = plane_state->base.rotation;
+	unsigned int rotation = plane_state->hw.rotation;
 	int i, num_planes = fb->format->num_planes;
 	unsigned int tile_size = intel_tile_size(dev_priv);
 	unsigned int src_x, src_y;
@@ -2830,20 +3138,20 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
 	plane_state->view.type = drm_rotation_90_or_270(rotation) ?
 		I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
 
-	src_x = plane_state->base.src.x1 >> 16;
-	src_y = plane_state->base.src.y1 >> 16;
-	src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	src_x = plane_state->uapi.src.x1 >> 16;
+	src_y = plane_state->uapi.src.y1 >> 16;
+	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
 
 	WARN_ON(is_ccs_modifier(fb->modifier));
 
 	/* Make src coordinates relative to the viewport */
-	drm_rect_translate(&plane_state->base.src,
+	drm_rect_translate(&plane_state->uapi.src,
 			   -(src_x << 16), -(src_y << 16));
 
 	/* Rotate src coordinates to match rotated GTT view */
 	if (drm_rotation_90_or_270(rotation))
-		drm_rect_rotate(&plane_state->base.src,
+		drm_rect_rotate(&plane_state->uapi.src,
 				src_w << 16, src_h << 16,
 				DRM_MODE_ROTATE_270);
 
@@ -2876,6 +3184,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
 						      DRM_MODE_ROTATE_0, tile_size);
 		offset /= tile_size;
 
+		WARN_ON(i >= ARRAY_SIZE(info->plane));
 		info->plane[i].offset = offset;
 		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
 						     tile_width * cpp);
@@ -2925,8 +3234,8 @@ static int
 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
 {
 	const struct intel_framebuffer *fb =
-		to_intel_framebuffer(plane_state->base.fb);
-	unsigned int rotation = plane_state->base.rotation;
+		to_intel_framebuffer(plane_state->hw.fb);
+	unsigned int rotation = plane_state->hw.rotation;
 	int i, num_planes;
 
 	if (!fb)
@@ -2963,7 +3272,7 @@ intel_plane_compute_gtt(struct intel_plane_state *plane_state)
 
 	/* Rotate src coordinates to match rotated GTT view */
 	if (drm_rotation_90_or_270(rotation))
-		drm_rect_rotate(&plane_state->base.src,
+		drm_rect_rotate(&plane_state->uapi.src,
 				fb->base.width << 16, fb->base.height << 16,
 				DRM_MODE_ROTATE_270);
 
@@ -2975,6 +3284,8 @@ static int i9xx_format_to_fourcc(int format)
 	switch (format) {
 	case DISPPLANE_8BPP:
 		return DRM_FORMAT_C8;
+	case DISPPLANE_BGRA555:
+		return DRM_FORMAT_ARGB1555;
 	case DISPPLANE_BGRX555:
 		return DRM_FORMAT_XRGB1555;
 	case DISPPLANE_BGRX565:
@@ -2984,10 +3295,18 @@ static int i9xx_format_to_fourcc(int format)
 		return DRM_FORMAT_XRGB8888;
 	case DISPPLANE_RGBX888:
 		return DRM_FORMAT_XBGR8888;
+	case DISPPLANE_BGRA888:
+		return DRM_FORMAT_ARGB8888;
+	case DISPPLANE_RGBA888:
+		return DRM_FORMAT_ABGR8888;
 	case DISPPLANE_BGRX101010:
 		return DRM_FORMAT_XRGB2101010;
 	case DISPPLANE_RGBX101010:
 		return DRM_FORMAT_XBGR2101010;
+	case DISPPLANE_BGRA101010:
+		return DRM_FORMAT_ARGB2101010;
+	case DISPPLANE_RGBA101010:
+		return DRM_FORMAT_ABGR2101010;
 	case DISPPLANE_RGBX161616:
 		return DRM_FORMAT_XBGR16161616F;
 	}
@@ -3032,10 +3351,17 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
 				return DRM_FORMAT_XRGB8888;
 		}
 	case PLANE_CTL_FORMAT_XRGB_2101010:
-		if (rgb_order)
-			return DRM_FORMAT_XBGR2101010;
-		else
-			return DRM_FORMAT_XRGB2101010;
+		if (rgb_order) {
+			if (alpha)
+				return DRM_FORMAT_ABGR2101010;
+			else
+				return DRM_FORMAT_XBGR2101010;
+		} else {
+			if (alpha)
+				return DRM_FORMAT_ARGB2101010;
+			else
+				return DRM_FORMAT_XRGB2101010;
+		}
 	case PLANE_CTL_FORMAT_XRGB_16161616F:
 		if (rgb_order) {
 			if (alpha)
@@ -3131,19 +3457,19 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
 			struct intel_plane_state *plane_state,
 			bool visible)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 
-	plane_state->base.visible = visible;
+	plane_state->uapi.visible = visible;
 
 	if (visible)
-		crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
+		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
 	else
-		crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
+		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
 }
 
 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	struct drm_plane *plane;
 
 	/*
@@ -3154,13 +3480,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
 	crtc_state->active_planes = 0;
 
 	drm_for_each_plane_mask(plane, &dev_priv->drm,
-				crtc_state->base.plane_mask)
+				crtc_state->uapi.plane_mask)
 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
 }
 
 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 					 struct intel_plane *plane)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_crtc_state *crtc_state =
 		to_intel_crtc_state(crtc->base.state);
 	struct intel_plane_state *plane_state =
@@ -3176,7 +3503,27 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 	crtc_state->min_cdclk[plane->id] = 0;
 
 	if (plane->id == PLANE_PRIMARY)
-		intel_pre_disable_primary_noatomic(&crtc->base);
+		hsw_disable_ips(crtc_state);
+
+	/*
+	 * Vblank time updates from the shadow to live plane control register
+	 * are blocked if the memory self-refresh mode is active at that
+	 * moment. So to make sure the plane gets truly disabled, disable
+	 * first the self-refresh mode. The self-refresh enable bit in turn
+	 * will be checked/applied by the HW only at the next frame start
+	 * event which is after the vblank start event, so we need to have a
+	 * wait-for-vblank between disabling the plane and the pipe.
+	 */
+	if (HAS_GMCH(dev_priv) &&
+	    intel_set_memory_cxsr(dev_priv, false))
+		intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+	/*
+	 * Gen2 reports pipe underruns whenever all planes are disabled.
+	 * So disable underrun reporting before all the planes get disabled.
+	 */
+	if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
 
 	intel_disable_plane(plane, crtc_state);
 }
@@ -3229,7 +3576,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 			continue;
 
 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
-			fb = state->base.fb;
+			fb = state->hw.fb;
 			drm_framebuffer_get(fb);
 			goto valid_fb;
 		}
@@ -3247,11 +3594,11 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	return;
 
 valid_fb:
-	intel_state->base.rotation = plane_config->rotation;
+	intel_state->hw.rotation = plane_config->rotation;
 	intel_fill_fb_ggtt_view(&intel_state->view, fb,
-				intel_state->base.rotation);
+				intel_state->hw.rotation);
 	intel_state->color_plane[0].stride =
-		intel_fb_pitch(fb, 0, intel_state->base.rotation);
+		intel_fb_pitch(fb, 0, intel_state->hw.rotation);
 
 	intel_state->vma =
 		intel_pin_and_fence_fb_obj(fb,
@@ -3279,14 +3626,15 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	plane_state->crtc_w = fb->width;
 	plane_state->crtc_h = fb->height;
 
-	intel_state->base.src = drm_plane_state_src(plane_state);
-	intel_state->base.dst = drm_plane_state_dest(plane_state);
+	intel_state->uapi.src = drm_plane_state_src(plane_state);
+	intel_state->uapi.dst = drm_plane_state_dest(plane_state);
 
 	if (plane_config->tiling)
 		dev_priv->preserve_bios_swizzle = true;
 
 	plane_state->fb = fb;
 	plane_state->crtc = &intel_crtc->base;
+	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
 
 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
 		  &to_intel_frontbuffer(fb)->bits);
@@ -3317,6 +3665,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
 			return 5120;
 	case I915_FORMAT_MOD_Y_TILED_CCS:
 	case I915_FORMAT_MOD_Yf_TILED_CCS:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
 		/* FIXME AUX plane? */
 	case I915_FORMAT_MOD_Y_TILED:
 	case I915_FORMAT_MOD_Yf_TILED:
@@ -3375,17 +3724,20 @@ static int icl_max_plane_height(void)
 	return 4320;
 }
 
-static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
-					   int main_x, int main_y, u32 main_offset)
+static bool
+skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+			       int main_x, int main_y, u32 main_offset,
+			       int ccs_plane)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	int hsub = fb->format->hsub;
-	int vsub = fb->format->vsub;
-	int aux_x = plane_state->color_plane[1].x;
-	int aux_y = plane_state->color_plane[1].y;
-	u32 aux_offset = plane_state->color_plane[1].offset;
-	u32 alignment = intel_surf_alignment(fb, 1);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	int aux_x = plane_state->color_plane[ccs_plane].x;
+	int aux_y = plane_state->color_plane[ccs_plane].y;
+	u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
+	u32 alignment = intel_surf_alignment(fb, ccs_plane);
+	int hsub;
+	int vsub;
 
+	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
 	while (aux_offset >= main_offset && aux_y <= main_y) {
 		int x, y;
 
@@ -3397,8 +3749,12 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
 
 		x = aux_x / hsub;
 		y = aux_y / vsub;
-		aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
-							       aux_offset, aux_offset - alignment);
+		aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
+							       plane_state,
+							       ccs_plane,
+							       aux_offset,
+							       aux_offset -
+								alignment);
 		aux_x = x * hsub + aux_x % hsub;
 		aux_y = y * vsub + aux_y % vsub;
 	}
@@ -3406,25 +3762,28 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
 	if (aux_x != main_x || aux_y != main_y)
 		return false;
 
-	plane_state->color_plane[1].offset = aux_offset;
-	plane_state->color_plane[1].x = aux_x;
-	plane_state->color_plane[1].y = aux_y;
+	plane_state->color_plane[ccs_plane].offset = aux_offset;
+	plane_state->color_plane[ccs_plane].x = aux_x;
+	plane_state->color_plane[ccs_plane].y = aux_y;
 
 	return true;
 }
 
 static int skl_check_main_surface(struct intel_plane_state *plane_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
-	int x = plane_state->base.src.x1 >> 16;
-	int y = plane_state->base.src.y1 >> 16;
-	int w = drm_rect_width(&plane_state->base.src) >> 16;
-	int h = drm_rect_height(&plane_state->base.src) >> 16;
+	struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
+	int x = plane_state->uapi.src.x1 >> 16;
+	int y = plane_state->uapi.src.y1 >> 16;
+	int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	int h = drm_rect_height(&plane_state->uapi.src) >> 16;
 	int max_width;
 	int max_height;
-	u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
+	u32 alignment;
+	u32 offset;
+	int aux_plane = intel_main_to_aux_plane(fb, 0);
+	u32 aux_offset = plane_state->color_plane[aux_plane].offset;
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		max_width = icl_max_plane_width(fb, 0, rotation);
@@ -3447,6 +3806,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 	intel_add_fb_offsets(&x, &y, plane_state, 0);
 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
 	alignment = intel_surf_alignment(fb, 0);
+	if (WARN_ON(alignment && !is_power_of_2(alignment)))
+		return -EINVAL;
 
 	/*
 	 * AUX surface offset is specified as the distance from the
@@ -3482,7 +3843,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 	 * they match with the main surface x/y offsets.
 	 */
 	if (is_ccs_modifier(fb->modifier)) {
-		while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
+		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+						       offset, aux_plane)) {
 			if (offset == 0)
 				break;
 
@@ -3490,7 +3852,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 								   offset, offset - alignment);
 		}
 
-		if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
+		if (x != plane_state->color_plane[aux_plane].x ||
+		    y != plane_state->color_plane[aux_plane].y) {
 			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
 			return -EINVAL;
 		}
@@ -3504,7 +3867,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 	 * Put the final coordinates back so that the src
 	 * coordinate checks will see the right values.
 	 */
-	drm_rect_translate_to(&plane_state->base.src,
+	drm_rect_translate_to(&plane_state->uapi.src,
 			      x << 16, y << 16);
 
 	return 0;
@@ -3512,18 +3875,20 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 
 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
-	int max_width = skl_max_plane_width(fb, 1, rotation);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
+	int uv_plane = 1;
+	int max_width = skl_max_plane_width(fb, uv_plane, rotation);
 	int max_height = 4096;
-	int x = plane_state->base.src.x1 >> 17;
-	int y = plane_state->base.src.y1 >> 17;
-	int w = drm_rect_width(&plane_state->base.src) >> 17;
-	int h = drm_rect_height(&plane_state->base.src) >> 17;
+	int x = plane_state->uapi.src.x1 >> 17;
+	int y = plane_state->uapi.src.y1 >> 17;
+	int w = drm_rect_width(&plane_state->uapi.src) >> 17;
+	int h = drm_rect_height(&plane_state->uapi.src) >> 17;
 	u32 offset;
 
-	intel_add_fb_offsets(&x, &y, plane_state, 1);
-	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+	intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
+	offset = intel_plane_compute_aligned_offset(&x, &y,
+						    plane_state, uv_plane);
 
 	/* FIXME not quite sure how/if these apply to the chroma plane */
 	if (w > max_width || h > max_height) {
@@ -3532,62 +3897,126 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 		return -EINVAL;
 	}
 
-	plane_state->color_plane[1].offset = offset;
-	plane_state->color_plane[1].x = x;
-	plane_state->color_plane[1].y = y;
+	if (is_ccs_modifier(fb->modifier)) {
+		int ccs_plane = main_to_ccs_plane(fb, uv_plane);
+		int aux_offset = plane_state->color_plane[ccs_plane].offset;
+		int alignment = intel_surf_alignment(fb, uv_plane);
+
+		if (offset > aux_offset)
+			offset = intel_plane_adjust_aligned_offset(&x, &y,
+								   plane_state,
+								   uv_plane,
+								   offset,
+								   aux_offset & ~(alignment - 1));
+
+		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+						       offset, ccs_plane)) {
+			if (offset == 0)
+				break;
+
+			offset = intel_plane_adjust_aligned_offset(&x, &y,
+								   plane_state,
+								   uv_plane,
+								   offset, offset - alignment);
+		}
+
+		if (x != plane_state->color_plane[ccs_plane].x ||
+		    y != plane_state->color_plane[ccs_plane].y) {
+			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+			return -EINVAL;
+		}
+	}
+
+	plane_state->color_plane[uv_plane].offset = offset;
+	plane_state->color_plane[uv_plane].x = x;
+	plane_state->color_plane[uv_plane].y = y;
 
 	return 0;
 }
 
 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	int src_x = plane_state->base.src.x1 >> 16;
-	int src_y = plane_state->base.src.y1 >> 16;
-	int hsub = fb->format->hsub;
-	int vsub = fb->format->vsub;
-	int x = src_x / hsub;
-	int y = src_y / vsub;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	int src_x = plane_state->uapi.src.x1 >> 16;
+	int src_y = plane_state->uapi.src.y1 >> 16;
 	u32 offset;
+	int ccs_plane;
 
-	intel_add_fb_offsets(&x, &y, plane_state, 1);
-	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+	for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
+		int main_hsub, main_vsub;
+		int hsub, vsub;
+		int x, y;
 
-	plane_state->color_plane[1].offset = offset;
-	plane_state->color_plane[1].x = x * hsub + src_x % hsub;
-	plane_state->color_plane[1].y = y * vsub + src_y % vsub;
+		if (!is_ccs_plane(fb, ccs_plane))
+			continue;
+
+		intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
+					       ccs_to_main_plane(fb, ccs_plane));
+		intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+		hsub *= main_hsub;
+		vsub *= main_vsub;
+		x = src_x / hsub;
+		y = src_y / vsub;
+
+		intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
+
+		offset = intel_plane_compute_aligned_offset(&x, &y,
+							    plane_state,
+							    ccs_plane);
+
+		plane_state->color_plane[ccs_plane].offset = offset;
+		plane_state->color_plane[ccs_plane].x = (x * hsub +
+							 src_x % hsub) /
+							main_hsub;
+		plane_state->color_plane[ccs_plane].y = (y * vsub +
+							 src_y % vsub) /
+							main_vsub;
+	}
 
 	return 0;
 }
 
 int skl_check_plane_surface(struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	int ret;
+	bool needs_aux = false;
 
 	ret = intel_plane_compute_gtt(plane_state);
 	if (ret)
 		return ret;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
 	/*
-	 * Handle the AUX surface first since
-	 * the main surface setup depends on it.
+	 * Handle the AUX surface first since the main surface setup depends on
+	 * it.
 	 */
-	if (drm_format_info_is_yuv_semiplanar(fb->format)) {
-		ret = skl_check_nv12_aux_surface(plane_state);
-		if (ret)
-			return ret;
-	} else if (is_ccs_modifier(fb->modifier)) {
+	if (is_ccs_modifier(fb->modifier)) {
+		needs_aux = true;
 		ret = skl_check_ccs_aux_surface(plane_state);
 		if (ret)
 			return ret;
-	} else {
-		plane_state->color_plane[1].offset = ~0xfff;
-		plane_state->color_plane[1].x = 0;
-		plane_state->color_plane[1].y = 0;
+	}
+
+	if (intel_format_info_is_yuv_semiplanar(fb->format,
+						fb->modifier)) {
+		needs_aux = true;
+		ret = skl_check_nv12_aux_surface(plane_state);
+		if (ret)
+			return ret;
+	}
+
+	if (!needs_aux) {
+		int i;
+
+		for (i = 1; i < fb->format->num_planes; i++) {
+			plane_state->color_plane[i].offset = ~0xfff;
+			plane_state->color_plane[i].x = 0;
+			plane_state->color_plane[i].y = 0;
+		}
 	}
 
 	ret = skl_check_main_surface(plane_state);
@@ -3601,7 +4030,7 @@ static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
 			     const struct intel_plane_state *plane_state,
 			     unsigned int *num, unsigned int *den)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int cpp = fb->format->cpp[0];
 
 	/*
@@ -3673,7 +4102,7 @@ i9xx_plane_max_stride(struct intel_plane *plane,
 
 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 dspcntr = 0;
 
@@ -3693,9 +4122,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
 			  const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	u32 dspcntr;
 
 	dspcntr = DISPLAY_PLANE_ENABLE;
@@ -3711,6 +4140,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
 	case DRM_FORMAT_XRGB1555:
 		dspcntr |= DISPPLANE_BGRX555;
 		break;
+	case DRM_FORMAT_ARGB1555:
+		dspcntr |= DISPPLANE_BGRA555;
+		break;
 	case DRM_FORMAT_RGB565:
 		dspcntr |= DISPPLANE_BGRX565;
 		break;
@@ -3720,12 +4152,24 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
 	case DRM_FORMAT_XBGR8888:
 		dspcntr |= DISPPLANE_RGBX888;
 		break;
+	case DRM_FORMAT_ARGB8888:
+		dspcntr |= DISPPLANE_BGRA888;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		dspcntr |= DISPPLANE_RGBA888;
+		break;
 	case DRM_FORMAT_XRGB2101010:
 		dspcntr |= DISPPLANE_BGRX101010;
 		break;
 	case DRM_FORMAT_XBGR2101010:
 		dspcntr |= DISPPLANE_RGBX101010;
 		break;
+	case DRM_FORMAT_ARGB2101010:
+		dspcntr |= DISPPLANE_BGRA101010;
+		break;
+	case DRM_FORMAT_ABGR2101010:
+		dspcntr |= DISPPLANE_RGBA101010;
+		break;
 	case DRM_FORMAT_XBGR16161616F:
 		dspcntr |= DISPPLANE_RGBX161616;
 		break;
@@ -3750,8 +4194,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	int src_x, src_y, src_w;
 	u32 offset;
 	int ret;
@@ -3760,12 +4204,12 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
 	if (ret)
 		return ret;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
-	src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	src_x = plane_state->base.src.x1 >> 16;
-	src_y = plane_state->base.src.y1 >> 16;
+	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	src_x = plane_state->uapi.src.x1 >> 16;
+	src_y = plane_state->uapi.src.y1 >> 16;
 
 	/* Undocumented hardware limit on i965/g4x/vlv/chv */
 	if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
@@ -3783,14 +4227,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
 	 * Put the final coordinates back so that the src
 	 * coordinate checks will see the right values.
 	 */
-	drm_rect_translate_to(&plane_state->base.src,
+	drm_rect_translate_to(&plane_state->uapi.src,
 			      src_x << 16, src_y << 16);
 
 	/* HSW/BDW do this automagically in hardware */
 	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
-		unsigned int rotation = plane_state->base.rotation;
-		int src_w = drm_rect_width(&plane_state->base.src) >> 16;
-		int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+		unsigned int rotation = plane_state->hw.rotation;
+		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
 
 		if (rotation & DRM_MODE_ROTATE_180) {
 			src_x += src_w - 1;
@@ -3827,15 +4271,15 @@ static int
 i9xx_plane_check(struct intel_crtc_state *crtc_state,
 		 struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	int ret;
 
 	ret = chv_plane_check_rotation(plane_state);
 	if (ret)
 		return ret;
 
-	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-						  &crtc_state->base,
+	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+						  &crtc_state->uapi,
 						  DRM_PLANE_HELPER_NO_SCALING,
 						  DRM_PLANE_HELPER_NO_SCALING,
 						  i9xx_plane_has_windowing(plane),
@@ -3847,7 +4291,7 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
 	ret = intel_plane_check_src_coordinates(plane_state);
@@ -3868,10 +4312,10 @@ static void i9xx_update_plane(struct intel_plane *plane,
 	u32 linear_offset;
 	int x = plane_state->color_plane[0].x;
 	int y = plane_state->color_plane[0].y;
-	int crtc_x = plane_state->base.dst.x1;
-	int crtc_y = plane_state->base.dst.y1;
-	int crtc_w = drm_rect_width(&plane_state->base.dst);
-	int crtc_h = drm_rect_height(&plane_state->base.dst);
+	int crtc_x = plane_state->uapi.dst.x1;
+	int crtc_y = plane_state->uapi.dst.y1;
+	int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+	int crtc_h = drm_rect_height(&plane_state->uapi.dst);
 	unsigned long irqflags;
 	u32 dspaddr_offset;
 	u32 dspcntr;
@@ -4011,7 +4455,7 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
  */
 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	const struct intel_crtc_scaler_state *scaler_state =
 		&crtc_state->scaler_state;
 	int i;
@@ -4030,7 +4474,7 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
 	 * The stride is either expressed as a multiple of 64 bytes chunks for
 	 * linear buffers or in number of tiles for tiled buffers.
 	 */
-	if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+	if (is_surface_linear(fb, color_plane))
 		return 64;
 	else if (drm_rotation_90_or_270(rotation))
 		return intel_tile_height(fb, color_plane);
@@ -4041,8 +4485,8 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
 		     int color_plane)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	u32 stride = plane_state->color_plane[color_plane].stride;
 
 	if (color_plane >= fb->format->num_planes)
@@ -4065,8 +4509,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
 	case DRM_FORMAT_ARGB8888:
 		return PLANE_CTL_FORMAT_XRGB_8888;
 	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
 		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
 	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
 		return PLANE_CTL_FORMAT_XRGB_2101010;
 	case DRM_FORMAT_XBGR16161616F:
 	case DRM_FORMAT_ABGR16161616F:
@@ -4111,10 +4557,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
 
 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
 {
-	if (!plane_state->base.fb->format->has_alpha)
+	if (!plane_state->hw.fb->format->has_alpha)
 		return PLANE_CTL_ALPHA_DISABLE;
 
-	switch (plane_state->base.pixel_blend_mode) {
+	switch (plane_state->hw.pixel_blend_mode) {
 	case DRM_MODE_BLEND_PIXEL_NONE:
 		return PLANE_CTL_ALPHA_DISABLE;
 	case DRM_MODE_BLEND_PREMULTI:
@@ -4122,17 +4568,17 @@ static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
 	case DRM_MODE_BLEND_COVERAGE:
 		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
 	default:
-		MISSING_CASE(plane_state->base.pixel_blend_mode);
+		MISSING_CASE(plane_state->hw.pixel_blend_mode);
 		return PLANE_CTL_ALPHA_DISABLE;
 	}
 }
 
 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
 {
-	if (!plane_state->base.fb->format->has_alpha)
+	if (!plane_state->hw.fb->format->has_alpha)
 		return PLANE_COLOR_ALPHA_DISABLE;
 
-	switch (plane_state->base.pixel_blend_mode) {
+	switch (plane_state->hw.pixel_blend_mode) {
 	case DRM_MODE_BLEND_PIXEL_NONE:
 		return PLANE_COLOR_ALPHA_DISABLE;
 	case DRM_MODE_BLEND_PREMULTI:
@@ -4140,7 +4586,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state
 	case DRM_MODE_BLEND_COVERAGE:
 		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
 	default:
-		MISSING_CASE(plane_state->base.pixel_blend_mode);
+		MISSING_CASE(plane_state->hw.pixel_blend_mode);
 		return PLANE_COLOR_ALPHA_DISABLE;
 	}
 }
@@ -4156,6 +4602,12 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
 		return PLANE_CTL_TILED_Y;
 	case I915_FORMAT_MOD_Y_TILED_CCS:
 		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+		return PLANE_CTL_TILED_Y |
+		       PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+		       PLANE_CTL_CLEAR_COLOR_DISABLE;
+	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+		return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
 	case I915_FORMAT_MOD_Yf_TILED:
 		return PLANE_CTL_TILED_YF;
 	case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -4206,7 +4658,7 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
 
 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	u32 plane_ctl = 0;
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -4225,9 +4677,9 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
 		  const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	u32 plane_ctl;
 
@@ -4237,10 +4689,10 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
 		plane_ctl |= skl_plane_ctl_alpha(plane_state);
 		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
 
-		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
 			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
 
-		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
 	}
 
@@ -4262,7 +4714,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
 
 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	u32 plane_color_ctl = 0;
 
 	if (INTEL_GEN(dev_priv) >= 11)
@@ -4281,21 +4733,21 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 			const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	u32 plane_color_ctl = 0;
 
 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
 	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
 
 	if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
-		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
 		else
 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
 
-		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
 	} else if (fb->format->is_yuv) {
 		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
@@ -4483,7 +4935,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
 
 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 trans_ddi_func_ctl2_val;
 	u8 master_select;
@@ -4511,20 +4963,6 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state
 		   trans_ddi_func_ctl2_val);
 }
 
-static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-	if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
-		return;
-
-	DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
-		      transcoder_name(old_crtc_state->cpu_transcoder));
-
-	I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
-}
-
 static void intel_fdi_normal_train(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -4567,8 +5005,8 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
 }
 
 /* The FDI link training functions for ILK/Ibexpeak. */
-static void ironlake_fdi_link_train(struct intel_crtc *crtc,
-				    const struct intel_crtc_state *crtc_state)
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+			       const struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4577,7 +5015,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc,
 	u32 temp, tries;
 
 	/* FDI needs bits from pipe first */
-	assert_pipe_enabled(dev_priv, pipe);
+	assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
 
 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 	   for train result */
@@ -4920,9 +5358,9 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
 	DRM_DEBUG_KMS("FDI train done.\n");
 }
 
-static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 	enum pipe pipe = intel_crtc->pipe;
 	i915_reg_t reg;
@@ -4957,7 +5395,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
 	}
 }
 
-static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4987,12 +5425,10 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
 	udelay(100);
 }
 
-static void ironlake_fdi_disable(struct drm_crtc *crtc)
+static void ilk_fdi_disable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 	i915_reg_t reg;
 	u32 temp;
 
@@ -5083,9 +5519,9 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 /* Program iCLKIP clock to the desired frequency */
 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	int clock = crtc_state->base.adjusted_mode.crtc_clock;
+	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
 	u32 temp;
 
@@ -5196,10 +5632,10 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
 				 desired_divisor << auxdiv);
 }
 
-static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
-						enum pipe pch_transcoder)
+static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
+					   enum pipe pch_transcoder)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
@@ -5240,9 +5676,9 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e
 	POSTING_READ(SOUTH_CHICKEN1);
 }
 
-static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
+static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	switch (crtc->pipe) {
@@ -5272,7 +5708,7 @@ static struct intel_encoder *
 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
 			   const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	const struct drm_connector_state *connector_state;
 	const struct drm_connector *connector;
 	struct intel_encoder *encoder = NULL;
@@ -5301,10 +5737,10 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
  *   - DP transcoding bits
  *   - transcoder
  */
-static void ironlake_pch_enable(const struct intel_atomic_state *state,
-				const struct intel_crtc_state *crtc_state)
+static void ilk_pch_enable(const struct intel_atomic_state *state,
+			   const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe = crtc->pipe;
@@ -5313,7 +5749,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
 	assert_pch_transcoder_disabled(dev_priv, pipe);
 
 	if (IS_IVYBRIDGE(dev_priv))
-		ivybridge_update_fdi_bc_bifurcation(crtc_state);
+		ivb_update_fdi_bc_bifurcation(crtc_state);
 
 	/* Write the TU size bits before fdi link training, so that error
 	 * detection works. */
@@ -5350,7 +5786,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
 
 	/* set transcoder timing, panel must allow it */
 	assert_panel_unlocked(dev_priv, pipe);
-	ironlake_pch_transcoder_set_timings(crtc_state, pipe);
+	ilk_pch_transcoder_set_timings(crtc_state, pipe);
 
 	intel_fdi_normal_train(crtc);
 
@@ -5358,7 +5794,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
 	if (HAS_PCH_CPT(dev_priv) &&
 	    intel_crtc_has_dp_encoder(crtc_state)) {
 		const struct drm_display_mode *adjusted_mode =
-			&crtc_state->base.adjusted_mode;
+			&crtc_state->hw.adjusted_mode;
 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
 		i915_reg_t reg = TRANS_DP_CTL(pipe);
 		enum port port;
@@ -5382,13 +5818,13 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
 		I915_WRITE(reg, temp);
 	}
 
-	ironlake_enable_pch_transcoder(crtc_state);
+	ilk_enable_pch_transcoder(crtc_state);
 }
 
 static void lpt_pch_enable(const struct intel_atomic_state *state,
 			   const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
@@ -5397,14 +5833,14 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
 	lpt_program_iclkip(crtc_state);
 
 	/* Set transcoder timing. */
-	ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
+	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
 
 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
-static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
+static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
+			       enum pipe pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	i915_reg_t dslreg = PIPEDSL(pipe);
 	u32 temp;
 
@@ -5500,15 +5936,16 @@ static int
 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 		  unsigned int scaler_user, int *scaler_id,
 		  int src_w, int src_h, int dst_w, int dst_h,
-		  const struct drm_format_info *format, bool need_scaler)
+		  const struct drm_format_info *format,
+		  u64 modifier, bool need_scaler)
 {
 	struct intel_crtc_scaler_state *scaler_state =
 		&crtc_state->scaler_state;
 	struct intel_crtc *intel_crtc =
-		to_intel_crtc(crtc_state->base.crtc);
+		to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 
 	/*
 	 * Src coordinates are already rotated by 270 degrees for
@@ -5524,7 +5961,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	 * Once NV12 is enabled, handle it here while allocating scaler
 	 * for NV12.
 	 */
-	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
 	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
 		return -EINVAL;
@@ -5554,7 +5991,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 		return 0;
 	}
 
-	if (format && drm_format_info_is_yuv_semiplanar(format) &&
+	if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
 		DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
 		return -EINVAL;
@@ -5596,17 +6033,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  */
 int skl_update_scaler_crtc(struct intel_crtc_state *state)
 {
-	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
 	bool need_scaler = false;
 
 	if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
 		need_scaler = true;
 
-	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+	return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
 				 &state->scaler_state.scaler_id,
 				 state->pipe_src_w, state->pipe_src_h,
 				 adjusted_mode->crtc_hdisplay,
-				 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
+				 adjusted_mode->crtc_vdisplay, NULL, 0,
+				 need_scaler);
 }
 
 /**
@@ -5622,26 +6060,28 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 				   struct intel_plane_state *plane_state)
 {
 	struct intel_plane *intel_plane =
-		to_intel_plane(plane_state->base.plane);
+		to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
-	struct drm_framebuffer *fb = plane_state->base.fb;
+	struct drm_framebuffer *fb = plane_state->hw.fb;
 	int ret;
-	bool force_detach = !fb || !plane_state->base.visible;
+	bool force_detach = !fb || !plane_state->uapi.visible;
 	bool need_scaler = false;
 
 	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
 	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
-	    fb && drm_format_info_is_yuv_semiplanar(fb->format))
+	    fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
 		need_scaler = true;
 
 	ret = skl_update_scaler(crtc_state, force_detach,
 				drm_plane_index(&intel_plane->base),
 				&plane_state->scaler_id,
-				drm_rect_width(&plane_state->base.src) >> 16,
-				drm_rect_height(&plane_state->base.src) >> 16,
-				drm_rect_width(&plane_state->base.dst),
-				drm_rect_height(&plane_state->base.dst),
-				fb ? fb->format : NULL, need_scaler);
+				drm_rect_width(&plane_state->uapi.src) >> 16,
+				drm_rect_height(&plane_state->uapi.src) >> 16,
+				drm_rect_width(&plane_state->uapi.dst),
+				drm_rect_height(&plane_state->uapi.dst),
+				fb ? fb->format : NULL,
+				fb ? fb->modifier : 0,
+				need_scaler);
 
 	if (ret || plane_state->scaler_id < 0)
 		return ret;
@@ -5663,6 +6103,8 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 	case DRM_FORMAT_ARGB8888:
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
 	case DRM_FORMAT_YUYV:
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_UYVY:
@@ -5695,17 +6137,18 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 	return 0;
 }
 
-static void skylake_scaler_disable(struct intel_crtc *crtc)
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	int i;
 
 	for (i = 0; i < crtc->num_scalers; i++)
 		skl_detach_scaler(crtc, i);
 }
 
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
+static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	const struct intel_crtc_scaler_state *scaler_state =
@@ -5740,9 +6183,9 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
 	}
 }
 
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
@@ -5763,7 +6206,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
 
 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
@@ -5799,7 +6242,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
 
 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
@@ -5834,77 +6277,10 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
 	 */
 }
 
-/**
- * intel_post_enable_primary - Perform operations after enabling primary plane
- * @crtc: the CRTC whose primary plane was just enabled
- * @new_crtc_state: the enabling state
- *
- * Performs potentially sleeping operations that must be done after the primary
- * plane is enabled, such as updating FBC and IPS.  Note that this may be
- * called due to an explicit primary plane update, or due to an implicit
- * re-enable that is caused when a sprite plane is updated to no longer
- * completely hide the primary plane.
- */
-static void
-intel_post_enable_primary(struct drm_crtc *crtc,
-			  const struct intel_crtc_state *new_crtc_state)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
-
-	/*
-	 * Gen2 reports pipe underruns whenever all planes are disabled.
-	 * So don't enable underrun reporting before at least some planes
-	 * are enabled.
-	 * FIXME: Need to fix the logic to work when we turn off all planes
-	 * but leave the pipe running.
-	 */
-	if (IS_GEN(dev_priv, 2))
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
-	/* Underruns don't always raise interrupts, so check manually. */
-	intel_check_cpu_fifo_underruns(dev_priv);
-	intel_check_pch_fifo_underruns(dev_priv);
-}
-
-/* FIXME get rid of this and use pre_plane_update */
-static void
-intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
-
-	/*
-	 * Gen2 reports pipe underruns whenever all planes are disabled.
-	 * So disable underrun reporting before all the planes get disabled.
-	 */
-	if (IS_GEN(dev_priv, 2))
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
-	hsw_disable_ips(to_intel_crtc_state(crtc->state));
-
-	/*
-	 * Vblank time updates from the shadow to live plane control register
-	 * are blocked if the memory self-refresh mode is active at that
-	 * moment. So to make sure the plane gets truly disabled, disable
-	 * first the self-refresh mode. The self-refresh enable bit in turn
-	 * will be checked/applied by the HW only at the next frame start
-	 * event which is after the vblank start event, so we need to have a
-	 * wait-for-vblank between disabling the plane and the pipe.
-	 */
-	if (HAS_GMCH(dev_priv) &&
-	    intel_set_memory_cxsr(dev_priv, false))
-		intel_wait_for_vblank(dev_priv, pipe);
-}
-
 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
 				       const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (!old_crtc_state->ips_enabled)
@@ -5920,7 +6296,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
 	 * Disable IPS before we program the LUT.
 	 */
 	if (IS_HASWELL(dev_priv) &&
-	    (new_crtc_state->base.color_mgmt_changed ||
+	    (new_crtc_state->uapi.color_mgmt_changed ||
 	     new_crtc_state->update_pipe) &&
 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
 		return true;
@@ -5931,7 +6307,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
 				       const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (!new_crtc_state->ips_enabled)
@@ -5947,7 +6323,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
 	 * Re-enable IPS after the LUT has been programmed.
 	 */
 	if (IS_HASWELL(dev_priv) &&
-	    (new_crtc_state->base.color_mgmt_changed ||
+	    (new_crtc_state->uapi.color_mgmt_changed ||
 	     new_crtc_state->update_pipe) &&
 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
 		return true;
@@ -5957,15 +6333,16 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
 	 * forcibly enable IPS on the first fastset.
 	 */
 	if (new_crtc_state->update_pipe &&
-	    old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+	    old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
 		return true;
 
 	return !old_crtc_state->ips_enabled;
 }
 
-static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
-			  const struct intel_crtc_state *crtc_state)
+static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
 	if (!crtc_state->nv12_planes)
 		return false;
 
@@ -5976,9 +6353,10 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
 	return false;
 }
 
-static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
-			       const struct intel_crtc_state *crtc_state)
+static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
 	/* Wa_2006604312:icl */
 	if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
 		return true;
@@ -5986,89 +6364,82 @@ static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
 	return false;
 }
 
-static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
+static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
+			    const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_atomic_state *state = old_crtc_state->base.state;
-	struct intel_crtc_state *pipe_config =
-		intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
-						crtc);
-	struct drm_plane *primary = crtc->base.primary;
-	struct drm_plane_state *old_primary_state =
-		drm_atomic_get_old_plane_state(state, primary);
-
-	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
-
-	if (pipe_config->update_wm_post && pipe_config->base.active)
-		intel_update_watermarks(crtc);
-
-	if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
-		hsw_enable_ips(pipe_config);
-
-	if (old_primary_state) {
-		struct drm_plane_state *new_primary_state =
-			drm_atomic_get_new_plane_state(state, primary);
-
-		intel_fbc_post_update(crtc);
-
-		if (new_primary_state->visible &&
-		    (needs_modeset(pipe_config) ||
-		     !old_primary_state->visible))
-			intel_post_enable_primary(&crtc->base, pipe_config);
-	}
-
-	if (needs_nv12_wa(dev_priv, old_crtc_state) &&
-	    !needs_nv12_wa(dev_priv, pipe_config))
-		skl_wa_827(dev_priv, crtc->pipe, false);
-
-	if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
-	    !needs_scalerclk_wa(dev_priv, pipe_config))
-		icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
+	return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+		new_crtc_state->active_planes;
 }
 
-static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
-				   struct intel_crtc_state *pipe_config)
+static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
+			     const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_atomic_state *state = old_crtc_state->base.state;
-	struct drm_plane *primary = crtc->base.primary;
-	struct drm_plane_state *old_primary_state =
-		drm_atomic_get_old_plane_state(state, primary);
-	bool modeset = needs_modeset(pipe_config);
-	struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(state);
+	return old_crtc_state->active_planes &&
+		(!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+}
 
-	if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
+static void intel_post_plane_update(struct intel_atomic_state *state,
+				    struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct intel_plane_state *new_primary_state =
+		intel_atomic_get_new_plane_state(state, primary);
+	enum pipe pipe = crtc->pipe;
+
+	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
+
+	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
+		intel_update_watermarks(crtc);
+
+	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
+		hsw_enable_ips(new_crtc_state);
+
+	if (new_primary_state)
+		intel_fbc_post_update(crtc);
+
+	if (needs_nv12_wa(old_crtc_state) &&
+	    !needs_nv12_wa(new_crtc_state))
+		skl_wa_827(dev_priv, pipe, false);
+
+	if (needs_scalerclk_wa(old_crtc_state) &&
+	    !needs_scalerclk_wa(new_crtc_state))
+		icl_wa_scalerclkgating(dev_priv, pipe, false);
+}
+
+static void intel_pre_plane_update(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct intel_plane_state *new_primary_state =
+		intel_atomic_get_new_plane_state(state, primary);
+	enum pipe pipe = crtc->pipe;
+
+	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
 		hsw_disable_ips(old_crtc_state);
 
-	if (old_primary_state) {
-		struct intel_plane_state *new_primary_state =
-			intel_atomic_get_new_plane_state(intel_state,
-							 to_intel_plane(primary));
-
-		intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
-		/*
-		 * Gen2 reports pipe underruns whenever all planes are disabled.
-		 * So disable underrun reporting before all the planes get disabled.
-		 */
-		if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
-		    (modeset || !new_primary_state->base.visible))
-			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
-	}
+	if (new_primary_state &&
+	    intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
+		intel_wait_for_vblank(dev_priv, pipe);
 
 	/* Display WA 827 */
-	if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
-	    needs_nv12_wa(dev_priv, pipe_config))
-		skl_wa_827(dev_priv, crtc->pipe, true);
+	if (!needs_nv12_wa(old_crtc_state) &&
+	    needs_nv12_wa(new_crtc_state))
+		skl_wa_827(dev_priv, pipe, true);
 
 	/* Wa_2006604312:icl */
-	if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
-	    needs_scalerclk_wa(dev_priv, pipe_config))
-		icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
+	if (!needs_scalerclk_wa(old_crtc_state) &&
+	    needs_scalerclk_wa(new_crtc_state))
+		icl_wa_scalerclkgating(dev_priv, pipe, true);
 
 	/*
 	 * Vblank time updates from the shadow to live plane control register
@@ -6079,9 +6450,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	 * event which is after the vblank start event, so we need to have a
 	 * wait-for-vblank between disabling the plane and the pipe.
 	 */
-	if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
-	    pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
-		intel_wait_for_vblank(dev_priv, crtc->pipe);
+	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+		intel_wait_for_vblank(dev_priv, pipe);
 
 	/*
 	 * IVB workaround: must disable low power watermarks for at least
@@ -6090,36 +6461,45 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	 *
 	 * WaCxSRDisabledForSpriteScaling:ivb
 	 */
-	if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
-	    old_crtc_state->base.active)
-		intel_wait_for_vblank(dev_priv, crtc->pipe);
+	if (old_crtc_state->hw.active &&
+	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
+		intel_wait_for_vblank(dev_priv, pipe);
 
 	/*
-	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
-	 * watermark programming here.
+	 * If we're doing a modeset we don't need to do any
+	 * pre-vblank watermark programming here.
 	 */
-	if (needs_modeset(pipe_config))
-		return;
+	if (!needs_modeset(new_crtc_state)) {
+		/*
+		 * For platforms that support atomic watermarks, program the
+		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
+		 * will be the intermediate values that are safe for both pre- and
+		 * post- vblank; when vblank happens, the 'active' values will be set
+		 * to the final 'target' values and we'll do this again to get the
+		 * optimal watermarks.  For gen9+ platforms, the values we program here
+		 * will be the final target values which will get automatically latched
+		 * at vblank time; no further programming will be necessary.
+		 *
+		 * If a platform hasn't been transitioned to atomic watermarks yet,
+		 * we'll continue to update watermarks the old way, if flags tell
+		 * us to.
+		 */
+		if (dev_priv->display.initial_watermarks)
+			dev_priv->display.initial_watermarks(state, crtc);
+		else if (new_crtc_state->update_wm_pre)
+			intel_update_watermarks(crtc);
+	}
 
 	/*
-	 * For platforms that support atomic watermarks, program the
-	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
-	 * will be the intermediate values that are safe for both pre- and
-	 * post- vblank; when vblank happens, the 'active' values will be set
-	 * to the final 'target' values and we'll do this again to get the
-	 * optimal watermarks.  For gen9+ platforms, the values we program here
-	 * will be the final target values which will get automatically latched
-	 * at vblank time; no further programming will be necessary.
+	 * Gen2 reports pipe underruns whenever all planes are disabled.
+	 * So disable underrun reporting before all the planes get disabled.
 	 *
-	 * If a platform hasn't been transitioned to atomic watermarks yet,
-	 * we'll continue to update watermarks the old way, if flags tell
-	 * us to.
+	 * We do this after .initial_watermarks() so that we have a
+	 * chance of catching underruns with the intermediate watermarks
+	 * vs. the old plane configuration.
 	 */
-	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(intel_state,
-						     pipe_config);
-	else if (pipe_config->update_wm_pre)
-		intel_update_watermarks(crtc);
+	if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 }
 
 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -6143,7 +6523,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
 
 		intel_disable_plane(plane, new_crtc_state);
 
-		if (old_plane_state->base.visible)
+		if (old_plane_state->uapi.visible)
 			fb_bits |= plane->frontbuffer_bit;
 	}
 
@@ -6167,45 +6547,29 @@ intel_connector_primary_encoder(struct intel_connector *connector)
 	if (connector->mst_port)
 		return &dp_to_dig_port(connector->mst_port)->base;
 
-	encoder = intel_attached_encoder(&connector->base);
+	encoder = intel_attached_encoder(connector);
 	WARN_ON(!encoder);
 
 	return encoder;
 }
 
-static bool
-intel_connector_needs_modeset(struct intel_atomic_state *state,
-			      const struct drm_connector_state *old_conn_state,
-			      const struct drm_connector_state *new_conn_state)
-{
-	struct intel_crtc *old_crtc = old_conn_state->crtc ?
-				      to_intel_crtc(old_conn_state->crtc) : NULL;
-	struct intel_crtc *new_crtc = new_conn_state->crtc ?
-				      to_intel_crtc(new_conn_state->crtc) : NULL;
-
-	return new_crtc != old_crtc ||
-	       (new_crtc &&
-		needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
-}
-
 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
 {
-	struct drm_connector_state *old_conn_state;
 	struct drm_connector_state *new_conn_state;
-	struct drm_connector *conn;
+	struct drm_connector *connector;
 	int i;
 
-	for_each_oldnew_connector_in_state(&state->base, conn,
-					   old_conn_state, new_conn_state, i) {
+	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+					i) {
+		struct intel_connector *intel_connector;
 		struct intel_encoder *encoder;
 		struct intel_crtc *crtc;
 
-		if (!intel_connector_needs_modeset(state,
-						   old_conn_state,
-						   new_conn_state))
+		if (!intel_connector_needs_modeset(state, connector))
 			continue;
 
-		encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+		intel_connector = to_intel_connector(connector);
+		encoder = intel_connector_primary_encoder(intel_connector);
 		if (!encoder->update_prepare)
 			continue;
 
@@ -6217,22 +6581,21 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
 
 static void intel_encoders_update_complete(struct intel_atomic_state *state)
 {
-	struct drm_connector_state *old_conn_state;
 	struct drm_connector_state *new_conn_state;
-	struct drm_connector *conn;
+	struct drm_connector *connector;
 	int i;
 
-	for_each_oldnew_connector_in_state(&state->base, conn,
-					   old_conn_state, new_conn_state, i) {
+	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+					i) {
+		struct intel_connector *intel_connector;
 		struct intel_encoder *encoder;
 		struct intel_crtc *crtc;
 
-		if (!intel_connector_needs_modeset(state,
-						   old_conn_state,
-						   new_conn_state))
+		if (!intel_connector_needs_modeset(state, connector))
 			continue;
 
-		encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+		intel_connector = to_intel_connector(connector);
+		encoder = intel_connector_primary_encoder(intel_connector);
 		if (!encoder->update_complete)
 			continue;
 
@@ -6242,11 +6605,12 @@ static void intel_encoders_update_complete(struct intel_atomic_state *state)
 	}
 }
 
-static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
-					  struct intel_crtc_state *crtc_state,
-					  struct intel_atomic_state *state)
+static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
+					  struct intel_crtc *crtc)
 {
-	struct drm_connector_state *conn_state;
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
@@ -6262,11 +6626,12 @@ static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
 	}
 }
 
-static void intel_encoders_pre_enable(struct intel_crtc *crtc,
-				      struct intel_crtc_state *crtc_state,
-				      struct intel_atomic_state *state)
+static void intel_encoders_pre_enable(struct intel_atomic_state *state,
+				      struct intel_crtc *crtc)
 {
-	struct drm_connector_state *conn_state;
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
@@ -6282,11 +6647,12 @@ static void intel_encoders_pre_enable(struct intel_crtc *crtc,
 	}
 }
 
-static void intel_encoders_enable(struct intel_crtc *crtc,
-				  struct intel_crtc_state *crtc_state,
-				  struct intel_atomic_state *state)
+static void intel_encoders_enable(struct intel_atomic_state *state,
+				  struct intel_crtc *crtc)
 {
-	struct drm_connector_state *conn_state;
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
@@ -6303,11 +6669,12 @@ static void intel_encoders_enable(struct intel_crtc *crtc,
 	}
 }
 
-static void intel_encoders_disable(struct intel_crtc *crtc,
-				   struct intel_crtc_state *old_crtc_state,
-				   struct intel_atomic_state *state)
+static void intel_encoders_disable(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc)
 {
-	struct drm_connector_state *old_conn_state;
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	const struct drm_connector_state *old_conn_state;
 	struct drm_connector *conn;
 	int i;
 
@@ -6324,11 +6691,12 @@ static void intel_encoders_disable(struct intel_crtc *crtc,
 	}
 }
 
-static void intel_encoders_post_disable(struct intel_crtc *crtc,
-					struct intel_crtc_state *old_crtc_state,
-					struct intel_atomic_state *state)
+static void intel_encoders_post_disable(struct intel_atomic_state *state,
+					struct intel_crtc *crtc)
 {
-	struct drm_connector_state *old_conn_state;
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	const struct drm_connector_state *old_conn_state;
 	struct drm_connector *conn;
 	int i;
 
@@ -6344,11 +6712,12 @@ static void intel_encoders_post_disable(struct intel_crtc *crtc,
 	}
 }
 
-static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
-					    struct intel_crtc_state *old_crtc_state,
-					    struct intel_atomic_state *state)
+static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
+					    struct intel_crtc *crtc)
 {
-	struct drm_connector_state *old_conn_state;
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	const struct drm_connector_state *old_conn_state;
 	struct drm_connector *conn;
 	int i;
 
@@ -6364,11 +6733,12 @@ static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
 	}
 }
 
-static void intel_encoders_update_pipe(struct intel_crtc *crtc,
-				       struct intel_crtc_state *crtc_state,
-				       struct intel_atomic_state *state)
+static void intel_encoders_update_pipe(struct intel_atomic_state *state,
+				       struct intel_crtc *crtc)
 {
-	struct drm_connector_state *conn_state;
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
@@ -6386,22 +6756,21 @@ static void intel_encoders_update_pipe(struct intel_crtc *crtc,
 
 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
 
 	plane->disable_plane(plane, crtc_state);
 }
 
-static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
-				 struct intel_atomic_state *state)
+static void ilk_crtc_enable(struct intel_atomic_state *state,
+			    struct intel_crtc *crtc)
 {
-	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
-	if (WARN_ON(intel_crtc->active))
+	if (WARN_ON(crtc->active))
 		return;
 
 	/*
@@ -6417,61 +6786,59 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
-	if (pipe_config->has_pch_encoder)
-		intel_prepare_shared_dpll(pipe_config);
+	if (new_crtc_state->has_pch_encoder)
+		intel_prepare_shared_dpll(new_crtc_state);
 
-	if (intel_crtc_has_dp_encoder(pipe_config))
-		intel_dp_set_m_n(pipe_config, M1_N1);
+	if (intel_crtc_has_dp_encoder(new_crtc_state))
+		intel_dp_set_m_n(new_crtc_state, M1_N1);
 
-	intel_set_pipe_timings(pipe_config);
-	intel_set_pipe_src_size(pipe_config);
+	intel_set_pipe_timings(new_crtc_state);
+	intel_set_pipe_src_size(new_crtc_state);
 
-	if (pipe_config->has_pch_encoder) {
-		intel_cpu_transcoder_set_m_n(pipe_config,
-					     &pipe_config->fdi_m_n, NULL);
-	}
+	if (new_crtc_state->has_pch_encoder)
+		intel_cpu_transcoder_set_m_n(new_crtc_state,
+					     &new_crtc_state->fdi_m_n, NULL);
 
-	ironlake_set_pipeconf(pipe_config);
+	ilk_set_pipeconf(new_crtc_state);
 
-	intel_crtc->active = true;
+	crtc->active = true;
 
-	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+	intel_encoders_pre_enable(state, crtc);
 
-	if (pipe_config->has_pch_encoder) {
+	if (new_crtc_state->has_pch_encoder) {
 		/* Note: FDI PLL enabling _must_ be done before we enable the
 		 * cpu pipes, hence this is separate from all the other fdi/pch
 		 * enabling. */
-		ironlake_fdi_pll_enable(pipe_config);
+		ilk_fdi_pll_enable(new_crtc_state);
 	} else {
 		assert_fdi_tx_disabled(dev_priv, pipe);
 		assert_fdi_rx_disabled(dev_priv, pipe);
 	}
 
-	ironlake_pfit_enable(pipe_config);
+	ilk_pfit_enable(new_crtc_state);
 
 	/*
 	 * On ILK+ LUT must be loaded before the pipe is running but with
 	 * clocks enabled
 	 */
-	intel_color_load_luts(pipe_config);
-	intel_color_commit(pipe_config);
+	intel_color_load_luts(new_crtc_state);
+	intel_color_commit(new_crtc_state);
 	/* update DSPCNTR to configure gamma for pipe bottom color */
-	intel_disable_primary_plane(pipe_config);
+	intel_disable_primary_plane(new_crtc_state);
 
-	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(state, pipe_config);
-	intel_enable_pipe(pipe_config);
+	if (dev_priv->display.initial_watermarks)
+		dev_priv->display.initial_watermarks(state, crtc);
+	intel_enable_pipe(new_crtc_state);
 
-	if (pipe_config->has_pch_encoder)
-		ironlake_pch_enable(state, pipe_config);
+	if (new_crtc_state->has_pch_encoder)
+		ilk_pch_enable(state, new_crtc_state);
 
-	assert_vblank_disabled(crtc);
-	intel_crtc_vblank_on(pipe_config);
+	intel_crtc_vblank_on(new_crtc_state);
 
-	intel_encoders_enable(intel_crtc, pipe_config, state);
+	intel_encoders_enable(state, crtc);
 
 	if (HAS_PCH_CPT(dev_priv))
-		cpt_verify_modeset(dev, intel_crtc->pipe);
+		cpt_verify_modeset(dev_priv, pipe);
 
 	/*
 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
@@ -6479,7 +6846,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 	 * some interlaced HDMI modes. Let's do the double wait always
 	 * in case there are more corner cases we don't know about.
 	 */
-	if (pipe_config->has_pch_encoder) {
+	if (new_crtc_state->has_pch_encoder) {
 		intel_wait_for_vblank(dev_priv, pipe);
 		intel_wait_for_vblank(dev_priv, pipe);
 	}
@@ -6526,103 +6893,112 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
 	I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
 }
 
-static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
-				struct intel_atomic_state *state)
+static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
-	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
+	u32 val;
+
+	val = I915_READ(reg);
+	val &= ~HSW_FRAME_START_DELAY_MASK;
+	val |= HSW_FRAME_START_DELAY(0);
+	I915_WRITE(reg, val);
+}
+
+static void hsw_crtc_enable(struct intel_atomic_state *state,
+			    struct intel_crtc *crtc)
+{
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
+	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
 	bool psl_clkgate_wa;
 
-	if (WARN_ON(intel_crtc->active))
+	if (WARN_ON(crtc->active))
 		return;
 
-	intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
+	intel_encoders_pre_pll_enable(state, crtc);
 
-	if (pipe_config->shared_dpll)
-		intel_enable_shared_dpll(pipe_config);
+	if (new_crtc_state->shared_dpll)
+		intel_enable_shared_dpll(new_crtc_state);
 
-	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+	intel_encoders_pre_enable(state, crtc);
 
-	if (intel_crtc_has_dp_encoder(pipe_config))
-		intel_dp_set_m_n(pipe_config, M1_N1);
+	if (intel_crtc_has_dp_encoder(new_crtc_state))
+		intel_dp_set_m_n(new_crtc_state, M1_N1);
 
 	if (!transcoder_is_dsi(cpu_transcoder))
-		intel_set_pipe_timings(pipe_config);
+		intel_set_pipe_timings(new_crtc_state);
 
 	if (INTEL_GEN(dev_priv) >= 11)
-		icl_enable_trans_port_sync(pipe_config);
+		icl_enable_trans_port_sync(new_crtc_state);
 
-	intel_set_pipe_src_size(pipe_config);
+	intel_set_pipe_src_size(new_crtc_state);
 
 	if (cpu_transcoder != TRANSCODER_EDP &&
-	    !transcoder_is_dsi(cpu_transcoder)) {
+	    !transcoder_is_dsi(cpu_transcoder))
 		I915_WRITE(PIPE_MULT(cpu_transcoder),
-			   pipe_config->pixel_multiplier - 1);
-	}
+			   new_crtc_state->pixel_multiplier - 1);
 
-	if (pipe_config->has_pch_encoder) {
-		intel_cpu_transcoder_set_m_n(pipe_config,
-					     &pipe_config->fdi_m_n, NULL);
-	}
+	if (new_crtc_state->has_pch_encoder)
+		intel_cpu_transcoder_set_m_n(new_crtc_state,
+					     &new_crtc_state->fdi_m_n, NULL);
 
-	if (!transcoder_is_dsi(cpu_transcoder))
-		haswell_set_pipeconf(pipe_config);
+	if (!transcoder_is_dsi(cpu_transcoder)) {
+		hsw_set_frame_start_delay(new_crtc_state);
+		hsw_set_pipeconf(new_crtc_state);
+	}
 
 	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
-		bdw_set_pipemisc(pipe_config);
+		bdw_set_pipemisc(new_crtc_state);
 
-	intel_crtc->active = true;
+	crtc->active = true;
 
 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
 	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
-			 pipe_config->pch_pfit.enabled;
+		new_crtc_state->pch_pfit.enabled;
 	if (psl_clkgate_wa)
 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
 
 	if (INTEL_GEN(dev_priv) >= 9)
-		skylake_pfit_enable(pipe_config);
+		skl_pfit_enable(new_crtc_state);
 	else
-		ironlake_pfit_enable(pipe_config);
+		ilk_pfit_enable(new_crtc_state);
 
 	/*
 	 * On ILK+ LUT must be loaded before the pipe is running but with
 	 * clocks enabled
 	 */
-	intel_color_load_luts(pipe_config);
-	intel_color_commit(pipe_config);
+	intel_color_load_luts(new_crtc_state);
+	intel_color_commit(new_crtc_state);
 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
 	if (INTEL_GEN(dev_priv) < 9)
-		intel_disable_primary_plane(pipe_config);
+		intel_disable_primary_plane(new_crtc_state);
 
 	if (INTEL_GEN(dev_priv) >= 11)
-		icl_set_pipe_chicken(intel_crtc);
+		icl_set_pipe_chicken(crtc);
 
 	if (!transcoder_is_dsi(cpu_transcoder))
-		intel_ddi_enable_transcoder_func(pipe_config);
+		intel_ddi_enable_transcoder_func(new_crtc_state);
 
-	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(state, pipe_config);
+	if (dev_priv->display.initial_watermarks)
+		dev_priv->display.initial_watermarks(state, crtc);
 
 	if (INTEL_GEN(dev_priv) >= 11)
-		icl_pipe_mbus_enable(intel_crtc);
+		icl_pipe_mbus_enable(crtc);
 
 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
 	if (!transcoder_is_dsi(cpu_transcoder))
-		intel_enable_pipe(pipe_config);
+		intel_enable_pipe(new_crtc_state);
 
-	if (pipe_config->has_pch_encoder)
-		lpt_pch_enable(state, pipe_config);
+	if (new_crtc_state->has_pch_encoder)
+		lpt_pch_enable(state, new_crtc_state);
 
-	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
-		intel_ddi_set_vc_payload_alloc(pipe_config, true);
+	intel_crtc_vblank_on(new_crtc_state);
 
-	assert_vblank_disabled(crtc);
-	intel_crtc_vblank_on(pipe_config);
-
-	intel_encoders_enable(intel_crtc, pipe_config, state);
+	intel_encoders_enable(state, crtc);
 
 	if (psl_clkgate_wa) {
 		intel_wait_for_vblank(dev_priv, pipe);
@@ -6631,16 +7007,16 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	/* If we change the relative order between pipe/planes enabling, we need
 	 * to change the workaround. */
-	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
 	}
 }
 
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
@@ -6653,14 +7029,13 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 	}
 }
 
-static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
-				  struct intel_atomic_state *state)
+static void ilk_crtc_disable(struct intel_atomic_state *state,
+			     struct intel_crtc *crtc)
 {
-	struct drm_crtc *crtc = old_crtc_state->base.crtc;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
 	/*
 	 * Sometimes spurious CPU pipe underruns happen when the
@@ -6670,22 +7045,21 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
-	intel_encoders_disable(intel_crtc, old_crtc_state, state);
+	intel_encoders_disable(state, crtc);
 
-	drm_crtc_vblank_off(crtc);
-	assert_vblank_disabled(crtc);
+	intel_crtc_vblank_off(old_crtc_state);
 
 	intel_disable_pipe(old_crtc_state);
 
-	ironlake_pfit_disable(old_crtc_state);
+	ilk_pfit_disable(old_crtc_state);
 
 	if (old_crtc_state->has_pch_encoder)
-		ironlake_fdi_disable(crtc);
+		ilk_fdi_disable(crtc);
 
-	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
+	intel_encoders_post_disable(state, crtc);
 
 	if (old_crtc_state->has_pch_encoder) {
-		ironlake_disable_pch_transcoder(dev_priv, pipe);
+		ilk_disable_pch_transcoder(dev_priv, pipe);
 
 		if (HAS_PCH_CPT(dev_priv)) {
 			i915_reg_t reg;
@@ -6705,54 +7079,27 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 			I915_WRITE(PCH_DPLL_SEL, temp);
 		}
 
-		ironlake_fdi_pll_disable(intel_crtc);
+		ilk_fdi_pll_disable(crtc);
 	}
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
-static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
-				 struct intel_atomic_state *state)
+static void hsw_crtc_disable(struct intel_atomic_state *state,
+			     struct intel_crtc *crtc)
 {
-	struct drm_crtc *crtc = old_crtc_state->base.crtc;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
-
-	intel_encoders_disable(intel_crtc, old_crtc_state, state);
-
-	drm_crtc_vblank_off(crtc);
-	assert_vblank_disabled(crtc);
-
-	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
-	if (!transcoder_is_dsi(cpu_transcoder))
-		intel_disable_pipe(old_crtc_state);
-
-	if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
-		intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
-
-	if (INTEL_GEN(dev_priv) >= 11)
-		icl_disable_transcoder_port_sync(old_crtc_state);
-
-	if (!transcoder_is_dsi(cpu_transcoder))
-		intel_ddi_disable_transcoder_func(old_crtc_state);
-
-	intel_dsc_disable(old_crtc_state);
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		skylake_scaler_disable(intel_crtc);
-	else
-		ironlake_pfit_disable(old_crtc_state);
-
-	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
-
-	intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
+	/*
+	 * FIXME collapse everything to one hook.
+	 * Need care with mst->ddi interactions.
+	 */
+	intel_encoders_disable(state, crtc);
+	intel_encoders_post_disable(state, crtc);
 }
 
 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (!crtc_state->gmch_pfit.control)
@@ -6763,7 +7110,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
 	 * according to register description and PRM.
 	 */
 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
-	assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
 
 	I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
 	I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
@@ -6888,14 +7235,14 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
 
 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct drm_encoder *encoder;
 	enum pipe pipe = crtc->pipe;
 	u64 mask;
 	enum transcoder transcoder = crtc_state->cpu_transcoder;
 
-	if (!crtc_state->base.active)
+	if (!crtc_state->hw.active)
 		return 0;
 
 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
@@ -6905,7 +7252,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 
 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
-				  crtc_state->base.encoder_mask) {
+				  crtc_state->uapi.encoder_mask) {
 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 
 		mask |= BIT_ULL(intel_encoder->power_domain);
@@ -6923,7 +7270,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
 static u64
 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum intel_display_power_domain domain;
 	u64 domains, new_domains, old_domains;
@@ -6949,146 +7296,140 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
 		intel_display_power_put_unchecked(dev_priv, domain);
 }
 
-static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
-				   struct intel_atomic_state *state)
+static void valleyview_crtc_enable(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc)
 {
-	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
-	if (WARN_ON(intel_crtc->active))
+	if (WARN_ON(crtc->active))
 		return;
 
-	if (intel_crtc_has_dp_encoder(pipe_config))
-		intel_dp_set_m_n(pipe_config, M1_N1);
+	if (intel_crtc_has_dp_encoder(new_crtc_state))
+		intel_dp_set_m_n(new_crtc_state, M1_N1);
 
-	intel_set_pipe_timings(pipe_config);
-	intel_set_pipe_src_size(pipe_config);
+	intel_set_pipe_timings(new_crtc_state);
+	intel_set_pipe_src_size(new_crtc_state);
 
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
 		I915_WRITE(CHV_CANVAS(pipe), 0);
 	}
 
-	i9xx_set_pipeconf(pipe_config);
+	i9xx_set_pipeconf(new_crtc_state);
 
-	intel_crtc->active = true;
+	crtc->active = true;
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
+	intel_encoders_pre_pll_enable(state, crtc);
 
 	if (IS_CHERRYVIEW(dev_priv)) {
-		chv_prepare_pll(intel_crtc, pipe_config);
-		chv_enable_pll(intel_crtc, pipe_config);
+		chv_prepare_pll(crtc, new_crtc_state);
+		chv_enable_pll(crtc, new_crtc_state);
 	} else {
-		vlv_prepare_pll(intel_crtc, pipe_config);
-		vlv_enable_pll(intel_crtc, pipe_config);
+		vlv_prepare_pll(crtc, new_crtc_state);
+		vlv_enable_pll(crtc, new_crtc_state);
 	}
 
-	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+	intel_encoders_pre_enable(state, crtc);
 
-	i9xx_pfit_enable(pipe_config);
+	i9xx_pfit_enable(new_crtc_state);
 
-	intel_color_load_luts(pipe_config);
-	intel_color_commit(pipe_config);
+	intel_color_load_luts(new_crtc_state);
+	intel_color_commit(new_crtc_state);
 	/* update DSPCNTR to configure gamma for pipe bottom color */
-	intel_disable_primary_plane(pipe_config);
+	intel_disable_primary_plane(new_crtc_state);
 
-	dev_priv->display.initial_watermarks(state, pipe_config);
-	intel_enable_pipe(pipe_config);
+	dev_priv->display.initial_watermarks(state, crtc);
+	intel_enable_pipe(new_crtc_state);
 
-	assert_vblank_disabled(crtc);
-	intel_crtc_vblank_on(pipe_config);
+	intel_crtc_vblank_on(new_crtc_state);
 
-	intel_encoders_enable(intel_crtc, pipe_config, state);
+	intel_encoders_enable(state, crtc);
 }
 
 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
 	I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
 }
 
-static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
-			     struct intel_atomic_state *state)
+static void i9xx_crtc_enable(struct intel_atomic_state *state,
+			     struct intel_crtc *crtc)
 {
-	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
-	if (WARN_ON(intel_crtc->active))
+	if (WARN_ON(crtc->active))
 		return;
 
-	i9xx_set_pll_dividers(pipe_config);
+	i9xx_set_pll_dividers(new_crtc_state);
 
-	if (intel_crtc_has_dp_encoder(pipe_config))
-		intel_dp_set_m_n(pipe_config, M1_N1);
+	if (intel_crtc_has_dp_encoder(new_crtc_state))
+		intel_dp_set_m_n(new_crtc_state, M1_N1);
 
-	intel_set_pipe_timings(pipe_config);
-	intel_set_pipe_src_size(pipe_config);
+	intel_set_pipe_timings(new_crtc_state);
+	intel_set_pipe_src_size(new_crtc_state);
 
-	i9xx_set_pipeconf(pipe_config);
+	i9xx_set_pipeconf(new_crtc_state);
 
-	intel_crtc->active = true;
+	crtc->active = true;
 
 	if (!IS_GEN(dev_priv, 2))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+	intel_encoders_pre_enable(state, crtc);
 
-	i9xx_enable_pll(intel_crtc, pipe_config);
+	i9xx_enable_pll(crtc, new_crtc_state);
 
-	i9xx_pfit_enable(pipe_config);
+	i9xx_pfit_enable(new_crtc_state);
 
-	intel_color_load_luts(pipe_config);
-	intel_color_commit(pipe_config);
+	intel_color_load_luts(new_crtc_state);
+	intel_color_commit(new_crtc_state);
 	/* update DSPCNTR to configure gamma for pipe bottom color */
-	intel_disable_primary_plane(pipe_config);
+	intel_disable_primary_plane(new_crtc_state);
 
-	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(state,
-						     pipe_config);
+	if (dev_priv->display.initial_watermarks)
+		dev_priv->display.initial_watermarks(state, crtc);
 	else
-		intel_update_watermarks(intel_crtc);
-	intel_enable_pipe(pipe_config);
+		intel_update_watermarks(crtc);
+	intel_enable_pipe(new_crtc_state);
 
-	assert_vblank_disabled(crtc);
-	intel_crtc_vblank_on(pipe_config);
+	intel_crtc_vblank_on(new_crtc_state);
 
-	intel_encoders_enable(intel_crtc, pipe_config, state);
+	intel_encoders_enable(state, crtc);
 }
 
 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (!old_crtc_state->gmch_pfit.control)
 		return;
 
-	assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
 
 	DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
 		      I915_READ(PFIT_CONTROL));
 	I915_WRITE(PFIT_CONTROL, 0);
 }
 
-static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
-			      struct intel_atomic_state *state)
+static void i9xx_crtc_disable(struct intel_atomic_state *state,
+			      struct intel_crtc *crtc)
 {
-	struct drm_crtc *crtc = old_crtc_state->base.crtc;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
 	/*
 	 * On gen2 planes are double buffered but the pipe isn't, so we must
@@ -7097,16 +7438,15 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	if (IS_GEN(dev_priv, 2))
 		intel_wait_for_vblank(dev_priv, pipe);
 
-	intel_encoders_disable(intel_crtc, old_crtc_state, state);
+	intel_encoders_disable(state, crtc);
 
-	drm_crtc_vblank_off(crtc);
-	assert_vblank_disabled(crtc);
+	intel_crtc_vblank_off(old_crtc_state);
 
 	intel_disable_pipe(old_crtc_state);
 
 	i9xx_pfit_disable(old_crtc_state);
 
-	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
+	intel_encoders_post_disable(state, crtc);
 
 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
 		if (IS_CHERRYVIEW(dev_priv))
@@ -7117,92 +7457,97 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 			i9xx_disable_pll(old_crtc_state);
 	}
 
-	intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
+	intel_encoders_post_pll_disable(state, crtc);
 
 	if (!IS_GEN(dev_priv, 2))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
 	if (!dev_priv->display.initial_watermarks)
-		intel_update_watermarks(intel_crtc);
+		intel_update_watermarks(crtc);
 
 	/* clock the pipe down to 640x480@60 to potentially save power */
 	if (IS_I830(dev_priv))
 		i830_enable_pipe(dev_priv, pipe);
 }
 
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
 					struct drm_modeset_acquire_ctx *ctx)
 {
 	struct intel_encoder *encoder;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_bw_state *bw_state =
 		to_intel_bw_state(dev_priv->bw_obj.state);
+	struct intel_crtc_state *crtc_state =
+		to_intel_crtc_state(crtc->base.state);
 	enum intel_display_power_domain domain;
 	struct intel_plane *plane;
-	u64 domains;
 	struct drm_atomic_state *state;
-	struct intel_crtc_state *crtc_state;
+	struct intel_crtc_state *temp_crtc_state;
+	enum pipe pipe = crtc->pipe;
+	u64 domains;
 	int ret;
 
-	if (!intel_crtc->active)
+	if (!crtc_state->hw.active)
 		return;
 
-	for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
 		const struct intel_plane_state *plane_state =
 			to_intel_plane_state(plane->base.state);
 
-		if (plane_state->base.visible)
-			intel_plane_disable_noatomic(intel_crtc, plane);
+		if (plane_state->uapi.visible)
+			intel_plane_disable_noatomic(crtc, plane);
 	}
 
-	state = drm_atomic_state_alloc(crtc->dev);
+	state = drm_atomic_state_alloc(&dev_priv->drm);
 	if (!state) {
 		DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
-			      crtc->base.id, crtc->name);
+			      crtc->base.base.id, crtc->base.name);
 		return;
 	}
 
 	state->acquire_ctx = ctx;
 
 	/* Everything's already locked, -EDEADLK can't happen. */
-	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-	ret = drm_atomic_add_affected_connectors(state, crtc);
+	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
+	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
 
-	WARN_ON(IS_ERR(crtc_state) || ret);
+	WARN_ON(IS_ERR(temp_crtc_state) || ret);
 
-	dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
+	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
 
 	drm_atomic_state_put(state);
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
-		      crtc->base.id, crtc->name);
+		      crtc->base.base.id, crtc->base.name);
 
-	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
-	crtc->state->active = false;
-	intel_crtc->active = false;
-	crtc->enabled = false;
-	crtc->state->connector_mask = 0;
-	crtc->state->encoder_mask = 0;
+	crtc->active = false;
+	crtc->base.enabled = false;
 
-	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+	WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+	crtc_state->uapi.active = false;
+	crtc_state->uapi.connector_mask = 0;
+	crtc_state->uapi.encoder_mask = 0;
+	intel_crtc_free_hw_state(crtc_state);
+	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
+
+	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
 		encoder->base.crtc = NULL;
 
-	intel_fbc_disable(intel_crtc);
-	intel_update_watermarks(intel_crtc);
-	intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
+	intel_fbc_disable(crtc);
+	intel_update_watermarks(crtc);
+	intel_disable_shared_dpll(crtc_state);
 
-	domains = intel_crtc->enabled_power_domains;
+	domains = crtc->enabled_power_domains;
 	for_each_power_domain(domain, domains)
 		intel_display_power_put_unchecked(dev_priv, domain);
-	intel_crtc->enabled_power_domains = 0;
+	crtc->enabled_power_domains = 0;
 
-	dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
-	dev_priv->min_cdclk[intel_crtc->pipe] = 0;
-	dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+	dev_priv->active_pipes &= ~BIT(pipe);
+	dev_priv->min_cdclk[pipe] = 0;
+	dev_priv->min_voltage_level[pipe] = 0;
 
-	bw_state->data_rate[intel_crtc->pipe] = 0;
-	bw_state->num_active_planes[intel_crtc->pipe] = 0;
+	bw_state->data_rate[pipe] = 0;
+	bw_state->num_active_planes[pipe] = 0;
 }
 
 /*
@@ -7252,8 +7597,8 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
 		if (!crtc_state)
 			return;
 
-		I915_STATE_WARN(!crtc_state->base.active,
-		      "connector is active, but attached crtc isn't\n");
+		I915_STATE_WARN(!crtc_state->hw.active,
+				"connector is active, but attached crtc isn't\n");
 
 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
 			return;
@@ -7264,8 +7609,8 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
 			"attached encoder crtc differs from connector crtc\n");
 	} else {
-		I915_STATE_WARN(crtc_state && crtc_state->base.active,
-			"attached crtc is active, but connector isn't\n");
+		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
+				"attached crtc is active, but connector isn't\n");
 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
 			"best encoder set without crtc!\n");
 	}
@@ -7273,17 +7618,17 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
 
 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
 {
-	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
+	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
 		return crtc_state->fdi_lanes;
 
 	return 0;
 }
 
-static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
-				     struct intel_crtc_state *pipe_config)
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+			       struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_atomic_state *state = pipe_config->base.state;
+	struct drm_atomic_state *state = pipe_config->uapi.state;
 	struct intel_crtc *other_crtc;
 	struct intel_crtc_state *other_crtc_state;
 
@@ -7352,11 +7697,11 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 }
 
 #define RETRY 1
-static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
-				       struct intel_crtc_state *pipe_config)
+static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+				  struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	int lane, link_bw, fdi_dotclock, ret;
 	bool needs_recompute = false;
 
@@ -7372,15 +7717,15 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
 
 	fdi_dotclock = adjusted_mode->crtc_clock;
 
-	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
-					   pipe_config->pipe_bpp);
+	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+				      pipe_config->pipe_bpp);
 
 	pipe_config->fdi_lanes = lane;
 
 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
 			       link_bw, &pipe_config->fdi_m_n, false, false);
 
-	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+	ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
 	if (ret == -EDEADLK)
 		return ret;
 
@@ -7402,7 +7747,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
 
 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -7432,9 +7777,9 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(crtc_state->base.crtc->dev);
+		to_i915(crtc_state->uapi.crtc->dev);
 	struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(crtc_state->base.state);
+		to_intel_atomic_state(crtc_state->uapi.state);
 
 	if (!hsw_crtc_state_ips_capable(crtc_state))
 		return false;
@@ -7473,7 +7818,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 {
 	u32 pixel_rate;
 
-	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
+	pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
 
 	/*
 	 * We only use IF-ID interlacing. If we ever use
@@ -7506,12 +7851,12 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 
 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	if (HAS_GMCH(dev_priv))
 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
 		crtc_state->pixel_rate =
-			crtc_state->base.adjusted_mode.crtc_clock;
+			crtc_state->hw.adjusted_mode.crtc_clock;
 	else
 		crtc_state->pixel_rate =
 			ilk_pipe_pixel_rate(crtc_state);
@@ -7521,7 +7866,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 				     struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	int clock_limit = dev_priv->max_dotclk_freq;
 
 	if (INTEL_GEN(dev_priv) < 4) {
@@ -7547,7 +7892,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 
 	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
 	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
-	     pipe_config->base.ctm) {
+	     pipe_config->hw.ctm) {
 		/*
 		 * There is only one pipe CSC unit per pipe, and we need that
 		 * for output conversion from RGB->YCBCR. So if CTM is already
@@ -7586,7 +7931,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 	intel_crtc_compute_pixel_rate(pipe_config);
 
 	if (pipe_config->has_pch_encoder)
-		return ironlake_fdi_compute_config(crtc, pipe_config);
+		return ilk_fdi_compute_config(crtc, pipe_config);
 
 	return 0;
 }
@@ -7741,7 +8086,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
 					 const struct intel_link_m_n *m_n)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
@@ -7768,7 +8113,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
 					 const struct intel_link_m_n *m_n,
 					 const struct intel_link_m_n *m2_n2)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -8077,11 +8422,11 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	struct intel_crtc_state *pipe_config;
 
-	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+	pipe_config = intel_crtc_state_alloc(crtc);
 	if (!pipe_config)
 		return -ENOMEM;
 
-	pipe_config->base.crtc = &crtc->base;
+	pipe_config->cpu_transcoder = (enum transcoder)pipe;
 	pipe_config->pixel_multiplier = 1;
 	pipe_config->dpll = *dpll;
 
@@ -8241,11 +8586,11 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
 
 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
 	u32 crtc_vtotal, crtc_vblank_end;
 	int vsyncshift = 0;
 
@@ -8303,7 +8648,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
 
 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
@@ -8317,7 +8662,7 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
 
 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
 	if (IS_GEN(dev_priv, 2))
@@ -8339,39 +8684,39 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
 	u32 tmp;
 
 	tmp = I915_READ(HTOTAL(cpu_transcoder));
-	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
-	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
 
 	if (!transcoder_is_dsi(cpu_transcoder)) {
 		tmp = I915_READ(HBLANK(cpu_transcoder));
-		pipe_config->base.adjusted_mode.crtc_hblank_start =
+		pipe_config->hw.adjusted_mode.crtc_hblank_start =
 							(tmp & 0xffff) + 1;
-		pipe_config->base.adjusted_mode.crtc_hblank_end =
+		pipe_config->hw.adjusted_mode.crtc_hblank_end =
 						((tmp >> 16) & 0xffff) + 1;
 	}
 	tmp = I915_READ(HSYNC(cpu_transcoder));
-	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
-	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
 
 	tmp = I915_READ(VTOTAL(cpu_transcoder));
-	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
-	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
 
 	if (!transcoder_is_dsi(cpu_transcoder)) {
 		tmp = I915_READ(VBLANK(cpu_transcoder));
-		pipe_config->base.adjusted_mode.crtc_vblank_start =
+		pipe_config->hw.adjusted_mode.crtc_vblank_start =
 							(tmp & 0xffff) + 1;
-		pipe_config->base.adjusted_mode.crtc_vblank_end =
+		pipe_config->hw.adjusted_mode.crtc_vblank_end =
 						((tmp >> 16) & 0xffff) + 1;
 	}
 	tmp = I915_READ(VSYNC(cpu_transcoder));
-	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
-	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
 
 	if (intel_pipe_is_interlaced(pipe_config)) {
-		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
-		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
-		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
+		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
+		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
 	}
 }
 
@@ -8386,27 +8731,27 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
 
-	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
-	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
+	pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
+	pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
 }
 
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 				 struct intel_crtc_state *pipe_config)
 {
-	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
-	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
-	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
-	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
+	mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+	mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
+	mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
+	mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
 
-	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
-	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
-	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
-	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
+	mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
+	mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
+	mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
+	mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
 
-	mode->flags = pipe_config->base.adjusted_mode.flags;
+	mode->flags = pipe_config->hw.adjusted_mode.flags;
 	mode->type = DRM_MODE_TYPE_DRIVER;
 
-	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+	mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
 
 	mode->hsync = drm_mode_hsync(mode);
 	mode->vrefresh = drm_mode_vrefresh(mode);
@@ -8415,7 +8760,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 
 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 pipeconf;
 
@@ -8452,7 +8797,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 		}
 	}
 
-	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 		if (INTEL_GEN(dev_priv) < 4 ||
 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
@@ -8468,6 +8813,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 
 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
 
+	pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+
 	I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
 	POSTING_READ(PIPECONF(crtc->pipe));
 }
@@ -8567,9 +8914,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
 		}
 
-		limit = &intel_limits_pineview_lvds;
+		limit = &pnv_limits_lvds;
 	} else {
-		limit = &intel_limits_pineview_sdvo;
+		limit = &pnv_limits_sdvo;
 	}
 
 	if (!crtc_state->clock_set &&
@@ -8861,7 +9208,7 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
 
 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -8985,7 +9332,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 	 * but in case the pipe is enabled w/o any ports we need a sane
 	 * default.
 	 */
-	pipe_config->base.adjusted_mode.crtc_clock =
+	pipe_config->hw.adjusted_mode.crtc_clock =
 		pipe_config->port_clock / pipe_config->pixel_multiplier;
 
 	ret = true;
@@ -8996,7 +9343,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 	return ret;
 }
 
-static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
 {
 	struct intel_encoder *encoder;
 	int i;
@@ -9494,14 +9841,14 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
 {
 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
-		ironlake_init_pch_refclk(dev_priv);
+		ilk_init_pch_refclk(dev_priv);
 	else if (HAS_PCH_LPT(dev_priv))
 		lpt_init_pch_refclk(dev_priv);
 }
 
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	u32 val;
@@ -9529,7 +9876,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
 	if (crtc_state->dither)
 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
-	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		val |= PIPECONF_INTERLACED_ILK;
 	else
 		val |= PIPECONF_PROGRESSIVE;
@@ -9549,13 +9896,15 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
 
 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
 
+	val |= PIPECONF_FRAME_START_DELAY(0);
+
 	I915_WRITE(PIPECONF(pipe), val);
 	POSTING_READ(PIPECONF(pipe));
 }
 
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	u32 val = 0;
@@ -9563,7 +9912,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
-	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		val |= PIPECONF_INTERLACED_ILK;
 	else
 		val |= PIPECONF_PROGRESSIVE;
@@ -9578,7 +9927,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
 
 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 val = 0;
 
@@ -9641,7 +9990,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
 	}
 }
 
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
 {
 	/*
 	 * Account for spread spectrum to avoid
@@ -9652,14 +10001,14 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
 	return DIV_ROUND_UP(bps, link_bw * 8);
 }
 
-static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
 {
 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
 }
 
-static void ironlake_compute_dpll(struct intel_crtc *crtc,
-				  struct intel_crtc_state *crtc_state,
-				  struct dpll *reduced_clock)
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *crtc_state,
+			     struct dpll *reduced_clock)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 dpll, fp, fp2;
@@ -9679,7 +10028,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
 
 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
 
-	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
+	if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
 		fp |= FP_CB_TUNE;
 
 	if (reduced_clock) {
@@ -9759,12 +10108,12 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
 	crtc_state->dpll_hw_state.fp1 = fp2;
 }
 
-static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
-				       struct intel_crtc_state *crtc_state)
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(crtc_state->base.state);
+		to_intel_atomic_state(crtc_state->uapi.state);
 	const struct intel_limit *limit;
 	int refclk = 120000;
 
@@ -9784,17 +10133,17 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 
 		if (intel_is_dual_link_lvds(dev_priv)) {
 			if (refclk == 100000)
-				limit = &intel_limits_ironlake_dual_lvds_100m;
+				limit = &ilk_limits_dual_lvds_100m;
 			else
-				limit = &intel_limits_ironlake_dual_lvds;
+				limit = &ilk_limits_dual_lvds;
 		} else {
 			if (refclk == 100000)
-				limit = &intel_limits_ironlake_single_lvds_100m;
+				limit = &ilk_limits_single_lvds_100m;
 			else
-				limit = &intel_limits_ironlake_single_lvds;
+				limit = &ilk_limits_single_lvds;
 		}
 	} else {
-		limit = &intel_limits_ironlake_dac;
+		limit = &ilk_limits_dac;
 	}
 
 	if (!crtc_state->clock_set &&
@@ -9804,7 +10153,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 		return -EINVAL;
 	}
 
-	ironlake_compute_dpll(crtc, crtc_state, NULL);
+	ilk_compute_dpll(crtc, crtc_state, NULL);
 
 	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
 		DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
@@ -9879,15 +10228,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
 					     &pipe_config->dp_m2_n2);
 }
 
-static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
-					struct intel_crtc_state *pipe_config)
+static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
+				   struct intel_crtc_state *pipe_config)
 {
 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
 				     &pipe_config->fdi_m_n, NULL);
 }
 
-static void skylake_get_pfit_config(struct intel_crtc *crtc,
-				    struct intel_crtc_state *pipe_config)
+static void skl_get_pfit_config(struct intel_crtc *crtc,
+				struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9918,8 +10267,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
 }
 
 static void
-skylake_get_initial_plane_config(struct intel_crtc *crtc,
-				 struct intel_initial_plane_config *plane_config)
+skl_get_initial_plane_config(struct intel_crtc *crtc,
+			     struct intel_initial_plane_config *plane_config)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9977,7 +10326,11 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
 	case PLANE_CTL_TILED_Y:
 		plane_config->tiling = I915_TILING_Y;
 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
-			fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
+			fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
+				I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
+				I915_FORMAT_MOD_Y_TILED_CCS;
+		else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+			fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
 		else
 			fb->modifier = I915_FORMAT_MOD_Y_TILED;
 		break;
@@ -10044,8 +10397,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
 	kfree(intel_fb);
 }
 
-static void ironlake_get_pfit_config(struct intel_crtc *crtc,
-				     struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_config(struct intel_crtc *crtc,
+				struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10068,8 +10421,8 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
 	}
 }
 
-static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
-				     struct intel_crtc_state *pipe_config)
+static bool ilk_get_pipe_config(struct intel_crtc *crtc,
+				struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10140,7 +10493,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
-		ironlake_get_fdi_m_n_config(crtc, pipe_config);
+		ilk_get_fdi_m_n_config(crtc, pipe_config);
 
 		if (HAS_PCH_IBX(dev_priv)) {
 			/*
@@ -10168,7 +10521,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
 
-		ironlake_pch_clock_get(crtc, pipe_config);
+		ilk_pch_clock_get(crtc, pipe_config);
 	} else {
 		pipe_config->pixel_multiplier = 1;
 	}
@@ -10176,7 +10529,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 	intel_get_pipe_timings(crtc, pipe_config);
 	intel_get_pipe_src_size(crtc, pipe_config);
 
-	ironlake_get_pfit_config(crtc, pipe_config);
+	ilk_get_pfit_config(crtc, pipe_config);
 
 	ret = true;
 
@@ -10185,12 +10538,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 
 	return ret;
 }
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
-				      struct intel_crtc_state *crtc_state)
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(crtc_state->base.state);
+		to_intel_atomic_state(crtc_state->uapi.state);
 
 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
 	    INTEL_GEN(dev_priv) >= 11) {
@@ -10207,9 +10561,8 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
 	return 0;
 }
 
-static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
-				   enum port port,
-				   struct intel_crtc_state *pipe_config)
+static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+			    struct intel_crtc_state *pipe_config)
 {
 	enum intel_dpll_id id;
 	u32 temp;
@@ -10223,9 +10576,8 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
 
-static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
-				enum port port,
-				struct intel_crtc_state *pipe_config)
+static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+			    struct intel_crtc_state *pipe_config)
 {
 	enum phy phy = intel_port_to_phy(dev_priv, port);
 	enum icl_port_dpll_id port_dpll_id;
@@ -10284,9 +10636,8 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
 
-static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
-				enum port port,
-				struct intel_crtc_state *pipe_config)
+static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+			    struct intel_crtc_state *pipe_config)
 {
 	enum intel_dpll_id id;
 	u32 temp;
@@ -10300,9 +10651,8 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
 
-static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
-				enum port port,
-				struct intel_crtc_state *pipe_config)
+static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+			    struct intel_crtc_state *pipe_config)
 {
 	enum intel_dpll_id id;
 	u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
@@ -10403,6 +10753,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
 			trans_pipe = PIPE_C;
 			break;
+		case TRANS_DDI_EDP_INPUT_D_ONOFF:
+			trans_pipe = PIPE_D;
+			break;
 		}
 
 		if (trans_pipe == crtc->pipe) {
@@ -10487,31 +10840,36 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
 }
 
-static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
-				       struct intel_crtc_state *pipe_config)
+static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
+				   struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
 	struct intel_shared_dpll *pll;
 	enum port port;
 	u32 tmp;
 
-	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
-
-	if (INTEL_GEN(dev_priv) >= 12)
-		port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
-	else
-		port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+	if (transcoder_is_dsi(cpu_transcoder)) {
+		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
+						PORT_A : PORT_B;
+	} else {
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+		if (INTEL_GEN(dev_priv) >= 12)
+			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+		else
+			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+	}
 
 	if (INTEL_GEN(dev_priv) >= 11)
-		icelake_get_ddi_pll(dev_priv, port, pipe_config);
+		icl_get_ddi_pll(dev_priv, port, pipe_config);
 	else if (IS_CANNONLAKE(dev_priv))
-		cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
+		cnl_get_ddi_pll(dev_priv, port, pipe_config);
 	else if (IS_GEN9_BC(dev_priv))
-		skylake_get_ddi_pll(dev_priv, port, pipe_config);
+		skl_get_ddi_pll(dev_priv, port, pipe_config);
 	else if (IS_GEN9_LP(dev_priv))
 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
 	else
-		haswell_get_ddi_pll(dev_priv, port, pipe_config);
+		hsw_get_ddi_pll(dev_priv, port, pipe_config);
 
 	pll = pipe_config->shared_dpll;
 	if (pll) {
@@ -10532,7 +10890,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
-		ironlake_get_fdi_m_n_config(crtc, pipe_config);
+		ilk_get_fdi_m_n_config(crtc, pipe_config);
 	}
 }
 
@@ -10554,9 +10912,9 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr
 		return master_select - 1;
 }
 
-static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	u32 transcoders;
 	enum transcoder cpu_transcoder;
 
@@ -10589,8 +10947,8 @@ static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_sta
 		crtc_state->sync_mode_slaves_mask);
 }
 
-static bool haswell_get_pipe_config(struct intel_crtc *crtc,
-				    struct intel_crtc_state *pipe_config)
+static bool hsw_get_pipe_config(struct intel_crtc *crtc,
+				struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
@@ -10598,8 +10956,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 	u64 power_domain_mask;
 	bool active;
 
-	intel_crtc_init_scalers(crtc, pipe_config);
-
 	pipe_config->master_transcoder = INVALID_TRANSCODER;
 
 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
@@ -10627,7 +10983,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 
 	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
 	    INTEL_GEN(dev_priv) >= 11) {
-		haswell_get_ddi_port_state(crtc, pipe_config);
+		hsw_get_ddi_port_state(crtc, pipe_config);
 		intel_get_pipe_timings(crtc, pipe_config);
 	}
 
@@ -10684,9 +11040,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 		power_domain_mask |= BIT_ULL(power_domain);
 
 		if (INTEL_GEN(dev_priv) >= 9)
-			skylake_get_pfit_config(crtc, pipe_config);
+			skl_get_pfit_config(crtc, pipe_config);
 		else
-			ironlake_get_pfit_config(crtc, pipe_config);
+			ilk_get_pfit_config(crtc, pipe_config);
 	}
 
 	if (hsw_crtc_supports_ips(crtc)) {
@@ -10712,7 +11068,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 
 	if (INTEL_GEN(dev_priv) >= 11 &&
 	    !transcoder_is_dsi(pipe_config->cpu_transcoder))
-		icelake_get_trans_port_sync_config(pipe_config);
+		icl_get_trans_port_sync_config(pipe_config);
 
 out:
 	for_each_power_domain(power_domain, power_domain_mask)
@@ -10725,8 +11081,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	u32 base;
 
@@ -10740,8 +11096,8 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
 
 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
 {
-	int x = plane_state->base.dst.x1;
-	int y = plane_state->base.dst.y1;
+	int x = plane_state->uapi.dst.x1;
+	int y = plane_state->uapi.dst.y1;
 	u32 pos = 0;
 
 	if (x < 0) {
@@ -10762,9 +11118,9 @@ static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
 {
 	const struct drm_mode_config *config =
-		&plane_state->base.plane->dev->mode_config;
-	int width = drm_rect_width(&plane_state->base.dst);
-	int height = drm_rect_height(&plane_state->base.dst);
+		&plane_state->uapi.plane->dev->mode_config;
+	int width = drm_rect_width(&plane_state->uapi.dst);
+	int height = drm_rect_height(&plane_state->uapi.dst);
 
 	return width > 0 && width <= config->cursor_width &&
 		height > 0 && height <= config->cursor_height;
@@ -10773,8 +11129,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	unsigned int rotation = plane_state->base.rotation;
+		to_i915(plane_state->uapi.plane->dev);
+	unsigned int rotation = plane_state->hw.rotation;
 	int src_x, src_y;
 	u32 offset;
 	int ret;
@@ -10783,11 +11139,11 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
 	if (ret)
 		return ret;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
-	src_x = plane_state->base.src.x1 >> 16;
-	src_y = plane_state->base.src.y1 >> 16;
+	src_x = plane_state->uapi.src.x1 >> 16;
+	src_y = plane_state->uapi.src.y1 >> 16;
 
 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
 	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
@@ -10802,14 +11158,14 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
 	 * Put the final coordinates back so that the src
 	 * coordinate checks will see the right values.
 	 */
-	drm_rect_translate_to(&plane_state->base.src,
+	drm_rect_translate_to(&plane_state->uapi.src,
 			      src_x << 16, src_y << 16);
 
 	/* ILK+ do this automagically in hardware */
 	if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
-		const struct drm_framebuffer *fb = plane_state->base.fb;
-		int src_w = drm_rect_width(&plane_state->base.src) >> 16;
-		int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+		const struct drm_framebuffer *fb = plane_state->hw.fb;
+		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
 
 		offset += (src_h * src_w - 1) * fb->format->cpp[0];
 	}
@@ -10824,7 +11180,7 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
 			      struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	int ret;
 
 	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
@@ -10832,8 +11188,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
 		return -EINVAL;
 	}
 
-	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-						  &crtc_state->base,
+	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+						  &crtc_state->uapi,
 						  DRM_PLANE_HELPER_NO_SCALING,
 						  DRM_PLANE_HELPER_NO_SCALING,
 						  true, true);
@@ -10841,14 +11197,14 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
 		return ret;
 
 	/* Use the unclipped src/dst rectangles, which we program to hw */
-	plane_state->base.src = drm_plane_state_src(&plane_state->base);
-	plane_state->base.dst = drm_plane_state_dest(&plane_state->base);
+	plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
+	plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
 
 	ret = intel_cursor_check_surface(plane_state);
 	if (ret)
 		return ret;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
 	ret = intel_plane_check_src_coordinates(plane_state);
@@ -10886,7 +11242,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
 
 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
 {
-	int width = drm_rect_width(&plane_state->base.dst);
+	int width = drm_rect_width(&plane_state->uapi.dst);
 
 	/*
 	 * 845g/865g are only limited by the width of their cursors,
@@ -10898,7 +11254,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
 			     struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	int ret;
 
 	ret = intel_check_cursor(crtc_state, plane_state);
@@ -10912,12 +11268,12 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
 	/* Check for which cursor types we support */
 	if (!i845_cursor_size_ok(plane_state)) {
 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
-			  drm_rect_width(&plane_state->base.dst),
-			  drm_rect_height(&plane_state->base.dst));
+			  drm_rect_width(&plane_state->uapi.dst),
+			  drm_rect_height(&plane_state->uapi.dst));
 		return -EINVAL;
 	}
 
-	WARN_ON(plane_state->base.visible &&
+	WARN_ON(plane_state->uapi.visible &&
 		plane_state->color_plane[0].stride != fb->pitches[0]);
 
 	switch (fb->pitches[0]) {
@@ -10945,9 +11301,9 @@ static void i845_update_cursor(struct intel_plane *plane,
 	u32 cntl = 0, base = 0, pos = 0, size = 0;
 	unsigned long irqflags;
 
-	if (plane_state && plane_state->base.visible) {
-		unsigned int width = drm_rect_width(&plane_state->base.dst);
-		unsigned int height = drm_rect_height(&plane_state->base.dst);
+	if (plane_state && plane_state->uapi.visible) {
+		unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+		unsigned int height = drm_rect_height(&plane_state->uapi.dst);
 
 		cntl = plane_state->ctl |
 			i845_cursor_ctl_crtc(crtc_state);
@@ -11020,7 +11376,7 @@ i9xx_cursor_max_stride(struct intel_plane *plane,
 
 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 cntl = 0;
 
@@ -11043,13 +11399,13 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
 			   const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
+		to_i915(plane_state->uapi.plane->dev);
 	u32 cntl = 0;
 
 	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
 		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
 
-	switch (drm_rect_width(&plane_state->base.dst)) {
+	switch (drm_rect_width(&plane_state->uapi.dst)) {
 	case 64:
 		cntl |= MCURSOR_MODE_64_ARGB_AX;
 		break;
@@ -11060,11 +11416,11 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
 		cntl |= MCURSOR_MODE_256_ARGB_AX;
 		break;
 	default:
-		MISSING_CASE(drm_rect_width(&plane_state->base.dst));
+		MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
 		return 0;
 	}
 
-	if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
+	if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
 		cntl |= MCURSOR_ROTATE_180;
 
 	return cntl;
@@ -11073,9 +11429,9 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	int width = drm_rect_width(&plane_state->base.dst);
-	int height = drm_rect_height(&plane_state->base.dst);
+		to_i915(plane_state->uapi.plane->dev);
+	int width = drm_rect_width(&plane_state->uapi.dst);
+	int height = drm_rect_height(&plane_state->uapi.dst);
 
 	if (!intel_cursor_size_ok(plane_state))
 		return false;
@@ -11097,7 +11453,7 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
 	 * cursors.
 	 */
 	if (HAS_CUR_FBC(dev_priv) &&
-	    plane_state->base.rotation & DRM_MODE_ROTATE_0) {
+	    plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
 		if (height < 8 || height > width)
 			return false;
 	} else {
@@ -11111,9 +11467,9 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
 			     struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	enum pipe pipe = plane->pipe;
 	int ret;
 
@@ -11128,19 +11484,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
 	/* Check for which cursor types we support */
 	if (!i9xx_cursor_size_ok(plane_state)) {
 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
-			  drm_rect_width(&plane_state->base.dst),
-			  drm_rect_height(&plane_state->base.dst));
+			  drm_rect_width(&plane_state->uapi.dst),
+			  drm_rect_height(&plane_state->uapi.dst));
 		return -EINVAL;
 	}
 
-	WARN_ON(plane_state->base.visible &&
+	WARN_ON(plane_state->uapi.visible &&
 		plane_state->color_plane[0].stride != fb->pitches[0]);
 
 	if (fb->pitches[0] !=
-	    drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) {
+	    drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
 		DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
 			      fb->pitches[0],
-			      drm_rect_width(&plane_state->base.dst));
+			      drm_rect_width(&plane_state->uapi.dst));
 		return -EINVAL;
 	}
 
@@ -11155,7 +11511,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
 	 * Refuse the put the cursor into that compromised position.
 	 */
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
-	    plane_state->base.visible && plane_state->base.dst.x1 < 0) {
+	    plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
 		return -EINVAL;
 	}
@@ -11174,9 +11530,9 @@ static void i9xx_update_cursor(struct intel_plane *plane,
 	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
 	unsigned long irqflags;
 
-	if (plane_state && plane_state->base.visible) {
-		unsigned width = drm_rect_width(&plane_state->base.dst);
-		unsigned height = drm_rect_height(&plane_state->base.dst);
+	if (plane_state && plane_state->uapi.visible) {
+		unsigned width = drm_rect_width(&plane_state->uapi.dst);
+		unsigned height = drm_rect_height(&plane_state->uapi.dst);
 
 		cntl = plane_state->ctl |
 			i9xx_cursor_ctl_crtc(crtc_state);
@@ -11332,7 +11688,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
 {
 	struct intel_crtc *intel_crtc;
 	struct intel_encoder *intel_encoder =
-		intel_attached_encoder(connector);
+		intel_attached_encoder(to_intel_connector(connector));
 	struct drm_crtc *possible_crtc;
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_crtc *crtc = NULL;
@@ -11431,9 +11787,9 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
 		goto fail;
 	}
 
-	crtc_state->base.active = crtc_state->base.enable = true;
+	crtc_state->uapi.active = true;
 
-	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base,
+	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
 					   &load_detect_mode);
 	if (ret)
 		goto fail;
@@ -11486,7 +11842,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct drm_modeset_acquire_ctx *ctx)
 {
 	struct intel_encoder *intel_encoder =
-		intel_attached_encoder(connector);
+		intel_attached_encoder(to_intel_connector(connector));
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_atomic_state *state = old->restore_state;
 	int ret;
@@ -11629,8 +11985,8 @@ int intel_dotclock_calculate(int link_freq,
 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
 }
 
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
-				   struct intel_crtc_state *pipe_config)
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+			      struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
@@ -11642,11 +11998,38 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 	 * we may need some idea for the dotclock anyway.
 	 * Calculate one based on the FDI configuration.
 	 */
-	pipe_config->base.adjusted_mode.crtc_clock =
+	pipe_config->hw.adjusted_mode.crtc_clock =
 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
 					 &pipe_config->fdi_m_n);
 }
 
+static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+				   struct intel_crtc *crtc)
+{
+	memset(crtc_state, 0, sizeof(*crtc_state));
+
+	__drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+	crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+	crtc_state->master_transcoder = INVALID_TRANSCODER;
+	crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+	crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
+	crtc_state->scaler_state.scaler_id = -1;
+	crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *crtc_state;
+
+	crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+	if (crtc_state)
+		intel_crtc_state_reset(crtc_state, crtc);
+
+	return crtc_state;
+}
+
 /* Returns the currently programmed mode of the given encoder. */
 struct drm_display_mode *
 intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -11666,14 +12049,12 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
 	if (!mode)
 		return NULL;
 
-	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+	crtc_state = intel_crtc_state_alloc(crtc);
 	if (!crtc_state) {
 		kfree(mode);
 		return NULL;
 	}
 
-	crtc_state->base.crtc = &crtc->base;
-
 	if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
 		kfree(crtc_state);
 		kfree(mode);
@@ -11711,18 +12092,18 @@ static bool intel_wm_need_update(const struct intel_plane_state *cur,
 				 struct intel_plane_state *new)
 {
 	/* Update watermarks on tiling or size changes. */
-	if (new->base.visible != cur->base.visible)
+	if (new->uapi.visible != cur->uapi.visible)
 		return true;
 
-	if (!cur->base.fb || !new->base.fb)
+	if (!cur->hw.fb || !new->hw.fb)
 		return false;
 
-	if (cur->base.fb->modifier != new->base.fb->modifier ||
-	    cur->base.rotation != new->base.rotation ||
-	    drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
-	    drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
-	    drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
-	    drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
+	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
+	    cur->hw.rotation != new->hw.rotation ||
+	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
+	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
+	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
+	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
 		return true;
 
 	return false;
@@ -11730,10 +12111,10 @@ static bool intel_wm_need_update(const struct intel_plane_state *cur,
 
 static bool needs_scaling(const struct intel_plane_state *state)
 {
-	int src_w = drm_rect_width(&state->base.src) >> 16;
-	int src_h = drm_rect_height(&state->base.src) >> 16;
-	int dst_w = drm_rect_width(&state->base.dst);
-	int dst_h = drm_rect_height(&state->base.dst);
+	int src_w = drm_rect_width(&state->uapi.src) >> 16;
+	int src_h = drm_rect_height(&state->uapi.src) >> 16;
+	int dst_w = drm_rect_width(&state->uapi.dst);
+	int dst_h = drm_rect_height(&state->uapi.dst);
 
 	return (src_w != dst_w || src_h != dst_h);
 }
@@ -11743,12 +12124,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 				    const struct intel_plane_state *old_plane_state,
 				    struct intel_plane_state *plane_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	bool mode_changed = needs_modeset(crtc_state);
-	bool was_crtc_enabled = old_crtc_state->base.active;
-	bool is_crtc_enabled = crtc_state->base.active;
+	bool was_crtc_enabled = old_crtc_state->hw.active;
+	bool is_crtc_enabled = crtc_state->hw.active;
 	bool turn_off, turn_on, visible, was_visible;
 	int ret;
 
@@ -11758,8 +12139,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 			return ret;
 	}
 
-	was_visible = old_plane_state->base.visible;
-	visible = plane_state->base.visible;
+	was_visible = old_plane_state->uapi.visible;
+	visible = plane_state->uapi.visible;
 
 	if (!was_crtc_enabled && WARN_ON(was_visible))
 		was_visible = false;
@@ -11775,7 +12156,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 	 * only combine the results from all planes in the current place?
 	 */
 	if (!is_crtc_enabled) {
-		plane_state->base.visible = visible = false;
+		plane_state->uapi.visible = visible = false;
 		crtc_state->active_planes &= ~BIT(plane->id);
 		crtc_state->data_rate[plane->id] = 0;
 		crtc_state->min_cdclk[plane->id] = 0;
@@ -11916,9 +12297,9 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
 
 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
 	struct intel_plane *plane, *linked;
 	struct intel_plane_state *plane_state;
 	int i;
@@ -11935,7 +12316,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 			continue;
 
 		plane_state->planar_linked_plane = NULL;
-		if (plane_state->planar_slave && !plane_state->base.visible) {
+		if (plane_state->planar_slave && !plane_state->uapi.visible) {
 			crtc_state->active_planes &= ~BIT(plane->id);
 			crtc_state->update_planes |= BIT(plane->id);
 		}
@@ -11981,6 +12362,25 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 		crtc_state->active_planes |= BIT(linked->id);
 		crtc_state->update_planes |= BIT(linked->id);
 		DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+
+		/* Copy parameters to slave plane */
+		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
+		linked_state->color_ctl = plane_state->color_ctl;
+		memcpy(linked_state->color_plane, plane_state->color_plane,
+		       sizeof(linked_state->color_plane));
+
+		intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
+		linked_state->uapi.src = plane_state->uapi.src;
+		linked_state->uapi.dst = plane_state->uapi.dst;
+
+		if (icl_is_hdr_plane(dev_priv, plane->id)) {
+			if (linked->id == PLANE_SPRITE5)
+				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
+			else if (linked->id == PLANE_SPRITE4)
+				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+			else
+				MISSING_CASE(linked->id);
+		}
 	}
 
 	return 0;
@@ -11988,96 +12388,129 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 
 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(new_crtc_state->base.state);
+		to_intel_atomic_state(new_crtc_state->uapi.state);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(state, crtc);
 
 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
 }
 
-static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
+static bool
+intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
 {
-	struct drm_crtc *crtc = crtc_state->base.crtc;
-	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct drm_connector *master_connector, *connector;
+	struct drm_crtc *crtc = crtc_state->uapi.crtc;
+	struct drm_atomic_state *state = crtc_state->uapi.state;
+	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != crtc)
+			continue;
+		if (connector->has_tile &&
+		    connector->tile_h_loc == connector->num_h_tile - 1 &&
+		    connector->tile_v_loc == connector->num_v_tile - 1)
+			return true;
+	}
+
+	return false;
+}
+
+static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
+{
+	crtc_state->master_transcoder = INVALID_TRANSCODER;
+	crtc_state->sync_mode_slaves_mask = 0;
+}
+
+static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
+					    struct intel_crtc_state *crtc_state,
+					    int num_tiled_conns)
+{
+	struct drm_crtc *crtc = crtc_state->uapi.crtc;
+	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+	struct drm_connector *master_connector;
 	struct drm_connector_list_iter conn_iter;
 	struct drm_crtc *master_crtc = NULL;
 	struct drm_crtc_state *master_crtc_state;
 	struct intel_crtc_state *master_pipe_config;
-	int i, tile_group_id;
 
 	if (INTEL_GEN(dev_priv) < 11)
 		return 0;
 
+	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+		return 0;
+
 	/*
 	 * In case of tiled displays there could be one or more slaves but there is
 	 * only one master. Lets make the CRTC used by the connector corresponding
 	 * to the last horizonal and last vertical tile a master/genlock CRTC.
 	 * All the other CRTCs corresponding to other tiles of the same Tile group
 	 * are the slave CRTCs and hold a pointer to their genlock CRTC.
+	 * If all tiles not present do not make master slave assignments.
 	 */
-	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
-		if (connector_state->crtc != crtc)
-			continue;
-		if (!connector->has_tile)
-			continue;
-		if (crtc_state->base.mode.hdisplay != connector->tile_h_size ||
-		    crtc_state->base.mode.vdisplay != connector->tile_v_size)
-			return 0;
-		if (connector->tile_h_loc == connector->num_h_tile - 1 &&
-		    connector->tile_v_loc == connector->num_v_tile - 1)
-			continue;
-		crtc_state->sync_mode_slaves_mask = 0;
-		tile_group_id = connector->tile_group->id;
-		drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
-		drm_for_each_connector_iter(master_connector, &conn_iter) {
-			struct drm_connector_state *master_conn_state = NULL;
-
-			if (!master_connector->has_tile)
-				continue;
-			if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
-			    master_connector->tile_v_loc != master_connector->num_v_tile - 1)
-				continue;
-			if (master_connector->tile_group->id != tile_group_id)
-				continue;
-
-			master_conn_state = drm_atomic_get_connector_state(&state->base,
-									   master_connector);
-			if (IS_ERR(master_conn_state)) {
-				drm_connector_list_iter_end(&conn_iter);
-				return PTR_ERR(master_conn_state);
-			}
-			if (master_conn_state->crtc) {
-				master_crtc = master_conn_state->crtc;
-				break;
-			}
-		}
-		drm_connector_list_iter_end(&conn_iter);
-
-		if (!master_crtc) {
-			DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
-				      connector_state->crtc->base.id);
-			return -EINVAL;
-		}
-
-		master_crtc_state = drm_atomic_get_crtc_state(&state->base,
-							      master_crtc);
-		if (IS_ERR(master_crtc_state))
-			return PTR_ERR(master_crtc_state);
-
-		master_pipe_config = to_intel_crtc_state(master_crtc_state);
-		crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
-		master_pipe_config->sync_mode_slaves_mask |=
-			BIT(crtc_state->cpu_transcoder);
-		DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
-			      transcoder_name(crtc_state->master_transcoder),
-			      crtc_state->base.crtc->base.id,
-			      master_pipe_config->sync_mode_slaves_mask);
+	if (!connector->has_tile ||
+	    crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
+	    crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
+	    num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
+		reset_port_sync_mode_state(crtc_state);
+		return 0;
 	}
+	/* Last Horizontal and last vertical tile connector is a master
+	 * Master's crtc state is already populated in slave for port sync
+	 */
+	if (connector->tile_h_loc == connector->num_h_tile - 1 &&
+	    connector->tile_v_loc == connector->num_v_tile - 1)
+		return 0;
+
+	/* Loop through all connectors and configure the Slave crtc_state
+	 * to point to the correct master.
+	 */
+	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	drm_for_each_connector_iter(master_connector, &conn_iter) {
+		struct drm_connector_state *master_conn_state = NULL;
+
+		if (!(master_connector->has_tile &&
+		      master_connector->tile_group->id == connector->tile_group->id))
+			continue;
+		if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
+		    master_connector->tile_v_loc != master_connector->num_v_tile - 1)
+			continue;
+
+		master_conn_state = drm_atomic_get_connector_state(&state->base,
+								   master_connector);
+		if (IS_ERR(master_conn_state)) {
+			drm_connector_list_iter_end(&conn_iter);
+			return PTR_ERR(master_conn_state);
+		}
+		if (master_conn_state->crtc) {
+			master_crtc = master_conn_state->crtc;
+			break;
+		}
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	if (!master_crtc) {
+		DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
+			      crtc->base.id);
+		return -EINVAL;
+	}
+
+	master_crtc_state = drm_atomic_get_crtc_state(&state->base,
+						      master_crtc);
+	if (IS_ERR(master_crtc_state))
+		return PTR_ERR(master_crtc_state);
+
+	master_pipe_config = to_intel_crtc_state(master_crtc_state);
+	crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
+	master_pipe_config->sync_mode_slaves_mask |=
+		BIT(crtc_state->cpu_transcoder);
+	DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
+		      transcoder_name(crtc_state->master_transcoder),
+		      crtc->base.id,
+		      master_pipe_config->sync_mode_slaves_mask);
 
 	return 0;
 }
@@ -12092,10 +12525,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
 	int ret;
 
 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
-	    mode_changed && !crtc_state->base.active)
+	    mode_changed && !crtc_state->hw.active)
 		crtc_state->update_wm_post = true;
 
-	if (mode_changed && crtc_state->base.enable &&
+	if (mode_changed && crtc_state->hw.enable &&
 	    dev_priv->display.crtc_compute_clock &&
 	    !WARN_ON(crtc_state->shared_dpll)) {
 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
@@ -12108,10 +12541,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
 	 * when C8 planes are getting enabled/disabled.
 	 */
 	if (c8_planes_changed(crtc_state))
-		crtc_state->base.color_mgmt_changed = true;
+		crtc_state->uapi.color_mgmt_changed = true;
 
 	if (mode_changed || crtc_state->update_pipe ||
-	    crtc_state->base.color_mgmt_changed) {
+	    crtc_state->uapi.color_mgmt_changed) {
 		ret = intel_color_check(crtc_state);
 		if (ret)
 			return ret;
@@ -12224,7 +12657,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
 			  struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct drm_atomic_state *state = pipe_config->base.state;
+	struct drm_atomic_state *state = pipe_config->uapi.state;
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
 	int bpp, i;
@@ -12281,7 +12714,7 @@ static void
 intel_dump_infoframe(struct drm_i915_private *dev_priv,
 		     const union hdmi_infoframe *frame)
 {
-	if ((drm_debug & DRM_UT_KMS) == 0)
+	if (!drm_debug_enabled(DRM_UT_KMS))
 		return;
 
 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
@@ -12349,14 +12782,14 @@ static const char *output_formats(enum intel_output_format format)
 
 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	struct drm_format_name_buf format_name;
 
 	if (!fb) {
 		DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
 			      plane->base.base.id, plane->base.name,
-			      yesno(plane_state->base.visible));
+			      yesno(plane_state->uapi.visible));
 		return;
 	}
 
@@ -12364,20 +12797,20 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
 		      plane->base.base.id, plane->base.name,
 		      fb->base.id, fb->width, fb->height,
 		      drm_get_format_name(fb->format->format, &format_name),
-		      yesno(plane_state->base.visible));
+		      yesno(plane_state->uapi.visible));
 	DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
-		      plane_state->base.rotation, plane_state->scaler_id);
-	if (plane_state->base.visible)
+		      plane_state->hw.rotation, plane_state->scaler_id);
+	if (plane_state->uapi.visible)
 		DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
-			      DRM_RECT_FP_ARG(&plane_state->base.src),
-			      DRM_RECT_ARG(&plane_state->base.dst));
+			      DRM_RECT_FP_ARG(&plane_state->uapi.src),
+			      DRM_RECT_ARG(&plane_state->uapi.dst));
 }
 
 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
 				   struct intel_atomic_state *state,
 				   const char *context)
 {
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct intel_plane_state *plane_state;
 	struct intel_plane *plane;
@@ -12386,14 +12819,14 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
 		      crtc->base.base.id, crtc->base.name,
-		      yesno(pipe_config->base.enable), context);
+		      yesno(pipe_config->hw.enable), context);
 
-	if (!pipe_config->base.enable)
+	if (!pipe_config->hw.enable)
 		goto dump_planes;
 
 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
 	DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
-		      yesno(pipe_config->base.active),
+		      yesno(pipe_config->hw.active),
 		      buf, pipe_config->output_types,
 		      output_formats(pipe_config->output_format));
 
@@ -12433,10 +12866,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
 
 	DRM_DEBUG_KMS("requested mode:\n");
-	drm_mode_debug_printmodeline(&pipe_config->base.mode);
+	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
 	DRM_DEBUG_KMS("adjusted mode:\n");
-	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
-	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
+	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
+	intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
 	DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
 		      pipe_config->port_clock,
 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h,
@@ -12474,6 +12907,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
 			      pipe_config->csc_mode, pipe_config->gamma_mode,
 			      pipe_config->gamma_enable, pipe_config->csc_enable);
 
+	DRM_DEBUG_KMS("MST master transcoder: %s\n",
+		      transcoder_name(pipe_config->mst_master_transcoder));
+
 dump_planes:
 	if (!state)
 		return;
@@ -12556,22 +12992,59 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
 	return ret;
 }
 
-static int
-clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+static void
+intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv =
-		to_i915(crtc_state->base.crtc->dev);
+	intel_crtc_copy_color_blobs(crtc_state);
+}
+
+static void
+intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
+{
+	crtc_state->hw.enable = crtc_state->uapi.enable;
+	crtc_state->hw.active = crtc_state->uapi.active;
+	crtc_state->hw.mode = crtc_state->uapi.mode;
+	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
+	intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
+}
+
+static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
+{
+	crtc_state->uapi.enable = crtc_state->hw.enable;
+	crtc_state->uapi.active = crtc_state->hw.active;
+	WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+
+	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
+
+	/* copy color blobs to uapi */
+	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
+				  crtc_state->hw.degamma_lut);
+	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
+				  crtc_state->hw.gamma_lut);
+	drm_property_replace_blob(&crtc_state->uapi.ctm,
+				  crtc_state->hw.ctm);
+}
+
+static int
+intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_crtc_state *saved_state;
 
-	saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
+	saved_state = intel_crtc_state_alloc(crtc);
 	if (!saved_state)
 		return -ENOMEM;
 
+	/* free the old crtc_state->hw members */
+	intel_crtc_free_hw_state(crtc_state);
+
 	/* FIXME: before the switch to atomic started, a new pipe_config was
 	 * kzalloc'd. Code that depends on any field being zero should be
 	 * fixed, so that the crtc_state can be safely duplicated. For now,
 	 * only fields that are know to not cause problems are preserved. */
 
+	saved_state->uapi = crtc_state->uapi;
 	saved_state->scaler_state = crtc_state->scaler_state;
 	saved_state->shared_dpll = crtc_state->shared_dpll;
 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
@@ -12583,37 +13056,34 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 		saved_state->wm = crtc_state->wm;
 	/*
 	 * Save the slave bitmask which gets filled for master crtc state during
-	 * slave atomic check call.
+	 * slave atomic check call. For all other CRTCs reset the port sync variables
+	 * crtc_state->master_transcoder needs to be set to INVALID
 	 */
-	if (is_trans_port_sync_master(crtc_state))
+	reset_port_sync_mode_state(saved_state);
+	if (intel_atomic_is_master_connector(crtc_state))
 		saved_state->sync_mode_slaves_mask =
 			crtc_state->sync_mode_slaves_mask;
 
-	/* Keep base drm_crtc_state intact, only clear our extended struct */
-	BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
-	memcpy(&crtc_state->base + 1, &saved_state->base + 1,
-	       sizeof(*crtc_state) - sizeof(crtc_state->base));
-
+	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
 	kfree(saved_state);
+
+	intel_crtc_copy_uapi_to_hw_state(crtc_state);
+
 	return 0;
 }
 
 static int
 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 {
-	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_atomic_state *state = pipe_config->base.state;
+	struct drm_crtc *crtc = pipe_config->uapi.crtc;
+	struct drm_atomic_state *state = pipe_config->uapi.state;
 	struct intel_encoder *encoder;
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
 	int base_bpp, ret;
-	int i;
+	int i, tile_group_id = -1, num_tiled_conns = 0;
 	bool retry = true;
 
-	ret = clear_intel_crtc_state(pipe_config);
-	if (ret)
-		return ret;
-
 	pipe_config->cpu_transcoder =
 		(enum transcoder) to_intel_crtc(crtc)->pipe;
 
@@ -12622,13 +13092,13 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	 * positive or negative polarity is requested, treat this as meaning
 	 * negative polarity.
 	 */
-	if (!(pipe_config->base.adjusted_mode.flags &
+	if (!(pipe_config->hw.adjusted_mode.flags &
 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
-		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
 
-	if (!(pipe_config->base.adjusted_mode.flags &
+	if (!(pipe_config->hw.adjusted_mode.flags &
 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
-		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
 
 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
 					pipe_config);
@@ -12645,7 +13115,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	 * computation to clearly distinguish it from the adjusted mode, which
 	 * can be changed by the connectors in the below retry loop.
 	 */
-	drm_mode_get_hv_timing(&pipe_config->base.mode,
+	drm_mode_get_hv_timing(&pipe_config->hw.mode,
 			       &pipe_config->pipe_src_w,
 			       &pipe_config->pipe_src_h);
 
@@ -12678,16 +13148,25 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	pipe_config->pixel_multiplier = 1;
 
 	/* Fill in default crtc timings, allow encoders to overwrite them. */
-	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
 			      CRTC_STEREO_DOUBLE);
 
-	/* Set the crtc_state defaults for trans_port_sync */
-	pipe_config->master_transcoder = INVALID_TRANSCODER;
-	ret = icl_add_sync_mode_crtcs(pipe_config);
-	if (ret) {
-		DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
-			      ret);
-		return ret;
+	/* Get tile_group_id of tiled connector */
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc == crtc &&
+		    connector->has_tile) {
+			tile_group_id = connector->tile_group->id;
+			break;
+		}
+	}
+
+	/* Get total number of tiled connectors in state that belong to
+	 * this tile group.
+	 */
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
+		if (connector->has_tile &&
+		    connector->tile_group->id == tile_group_id)
+			num_tiled_conns++;
 	}
 
 	/* Pass our mode to the connectors and the CRTC to give them a chance to
@@ -12698,6 +13177,14 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 		if (connector_state->crtc != crtc)
 			continue;
 
+		ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
+						       num_tiled_conns);
+		if (ret) {
+			DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
+				      ret);
+			return ret;
+		}
+
 		encoder = to_intel_encoder(connector_state->best_encoder);
 		ret = encoder->compute_config(encoder, pipe_config,
 					      connector_state);
@@ -12712,7 +13199,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	/* Set default port clock if not overwritten by the encoder. Needs to be
 	 * done afterwards in case the encoder adjusts the mode. */
 	if (!pipe_config->port_clock)
-		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
+		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
 			* pipe_config->pixel_multiplier;
 
 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
@@ -12741,6 +13228,12 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
+	/*
+	 * Make drm_calc_timestamping_constants in
+	 * drm_atomic_helper_update_legacy_modeset_state() happy
+	 */
+	pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
+
 	return 0;
 }
 
@@ -12819,7 +13312,7 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
 			       const union hdmi_infoframe *b)
 {
 	if (fastset) {
-		if ((drm_debug & DRM_UT_KMS) == 0)
+		if (!drm_debug_enabled(DRM_UT_KMS))
 			return;
 
 		DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
@@ -12879,13 +13372,13 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 			  const struct intel_crtc_state *pipe_config,
 			  bool fastset)
 {
-	struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	bool ret = true;
 	u32 bp_gamma = 0;
 	bool fixup_inherited = fastset &&
-		(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
-		!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
+		(current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
+		!(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
 
 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
 		DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
@@ -13074,19 +13567,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 
 	PIPE_CONF_CHECK_X(output_types);
 
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
 
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
-	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
+	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
 
 	PIPE_CONF_CHECK_I(pixel_multiplier);
 	PIPE_CONF_CHECK_I(output_format);
@@ -13103,17 +13596,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 
 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
 
-	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+	PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
 			      DRM_MODE_FLAG_INTERLACE);
 
 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
-		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
 				      DRM_MODE_FLAG_PHSYNC);
-		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
 				      DRM_MODE_FLAG_NHSYNC);
-		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
 				      DRM_MODE_FLAG_PVSYNC);
-		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
 				      DRM_MODE_FLAG_NVSYNC);
 	}
 
@@ -13152,7 +13645,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 
 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
 		if (bp_gamma)
-			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma);
+			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
 
 	}
 
@@ -13197,7 +13690,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 	if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
 		PIPE_CONF_CHECK_I(pipe_bpp);
 
-	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
+	PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
 
 	PIPE_CONF_CHECK_I(min_voltage_level);
@@ -13212,6 +13705,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 	PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
 	PIPE_CONF_CHECK_I(master_transcoder);
 
+	PIPE_CONF_CHECK_I(dsc.compression_enable);
+	PIPE_CONF_CHECK_I(dsc.dsc_split);
+	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+
+	PIPE_CONF_CHECK_I(mst_master_transcoder);
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_BOOL
@@ -13231,7 +13730,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
 	if (pipe_config->has_pch_encoder) {
 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
 							    &pipe_config->fdi_m_n);
-		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
+		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
 
 		/*
 		 * FDI already provided one idea for the dotclock.
@@ -13259,7 +13758,7 @@ static void verify_wm_state(struct intel_crtc *crtc,
 	const enum pipe pipe = crtc->pipe;
 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
 
-	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
+	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
 		return;
 
 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -13464,16 +13963,14 @@ verify_crtc_state(struct intel_crtc *crtc,
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_encoder *encoder;
-	struct intel_crtc_state *pipe_config;
-	struct drm_atomic_state *state;
+	struct intel_crtc_state *pipe_config = old_crtc_state;
+	struct drm_atomic_state *state = old_crtc_state->uapi.state;
 	bool active;
 
-	state = old_crtc_state->base.state;
-	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
-	pipe_config = old_crtc_state;
-	memset(pipe_config, 0, sizeof(*pipe_config));
-	pipe_config->base.crtc = &crtc->base;
-	pipe_config->base.state = state;
+	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
+	intel_crtc_free_hw_state(old_crtc_state);
+	intel_crtc_state_reset(old_crtc_state, crtc);
+	old_crtc_state->uapi.state = state;
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
 
@@ -13481,23 +13978,26 @@ verify_crtc_state(struct intel_crtc *crtc,
 
 	/* we keep both pipes enabled on 830 */
 	if (IS_I830(dev_priv))
-		active = new_crtc_state->base.active;
+		active = new_crtc_state->hw.active;
 
-	I915_STATE_WARN(new_crtc_state->base.active != active,
-	     "crtc active state doesn't match with hw state "
-	     "(expected %i, found %i)\n", new_crtc_state->base.active, active);
+	I915_STATE_WARN(new_crtc_state->hw.active != active,
+			"crtc active state doesn't match with hw state "
+			"(expected %i, found %i)\n",
+			new_crtc_state->hw.active, active);
 
-	I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
-	     "transitional active state does not match atomic hw state "
-	     "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
+	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
+			"transitional active state does not match atomic hw state "
+			"(expected %i, found %i)\n",
+			new_crtc_state->hw.active, crtc->active);
 
 	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
 		enum pipe pipe;
 
 		active = encoder->get_hw_state(encoder, &pipe);
-		I915_STATE_WARN(active != new_crtc_state->base.active,
-			"[ENCODER:%i] active %i with crtc active %i\n",
-			encoder->base.base.id, active, new_crtc_state->base.active);
+		I915_STATE_WARN(active != new_crtc_state->hw.active,
+				"[ENCODER:%i] active %i with crtc active %i\n",
+				encoder->base.base.id, active,
+				new_crtc_state->hw.active);
 
 		I915_STATE_WARN(active && crtc->pipe != pipe,
 				"Encoder connected to wrong pipe %c\n",
@@ -13509,7 +14009,7 @@ verify_crtc_state(struct intel_crtc *crtc,
 
 	intel_crtc_compute_pixel_rate(pipe_config);
 
-	if (!new_crtc_state->base.active)
+	if (!new_crtc_state->hw.active)
 		return;
 
 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
@@ -13532,7 +14032,7 @@ intel_verify_planes(struct intel_atomic_state *state)
 	for_each_new_intel_plane_in_state(state, plane,
 					  plane_state, i)
 		assert_plane(plane, plane_state->planar_slave ||
-			     plane_state->base.visible);
+			     plane_state->uapi.visible);
 }
 
 static void
@@ -13571,7 +14071,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
 
 	crtc_mask = drm_crtc_mask(&crtc->base);
 
-	if (new_crtc_state->base.active)
+	if (new_crtc_state->hw.active)
 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
 				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
@@ -13650,10 +14150,10 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
 static void
 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 
 	drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
 
@@ -13724,7 +14224,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
  * multiple pipes, and planes are enabled after the pipe, we need to wait at
  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
  */
-static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
+static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
 {
 	struct intel_crtc_state *crtc_state;
 	struct intel_crtc *crtc;
@@ -13735,7 +14235,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
 
 	/* look at all crtc's that are going to be enabled in during modeset */
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!crtc_state->base.active ||
+		if (!crtc_state->hw.active ||
 		    !needs_modeset(crtc_state))
 			continue;
 
@@ -13760,7 +14260,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
 
 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
 
-		if (!crtc_state->base.active ||
+		if (!crtc_state->hw.active ||
 		    needs_modeset(crtc_state))
 			continue;
 
@@ -13797,12 +14297,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (new_crtc_state->base.active)
+		if (new_crtc_state->hw.active)
 			state->active_pipes |= BIT(crtc->pipe);
 		else
 			state->active_pipes &= ~BIT(crtc->pipe);
 
-		if (old_crtc_state->base.active != new_crtc_state->base.active)
+		if (old_crtc_state->hw.active != new_crtc_state->hw.active)
 			state->active_pipe_changes |= BIT(crtc->pipe);
 	}
 
@@ -13819,7 +14319,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
 	intel_modeset_clear_plls(state);
 
 	if (IS_HASWELL(dev_priv))
-		return haswell_mode_set_planes_workaround(state);
+		return hsw_mode_set_planes_workaround(state);
 
 	return 0;
 }
@@ -13847,9 +14347,13 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
 		return;
 
-	new_crtc_state->base.mode_changed = false;
+	new_crtc_state->uapi.mode_changed = false;
 	new_crtc_state->update_pipe = true;
+}
 
+static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
+				    struct intel_crtc_state *new_crtc_state)
+{
 	/*
 	 * If we're not doing the full modeset we want to
 	 * keep the current M/N values as they may be
@@ -13972,6 +14476,107 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
 	return 0;
 }
 
+static bool intel_cpu_transcoder_needs_modeset(struct intel_atomic_state *state,
+					       enum transcoder transcoder)
+{
+	struct intel_crtc_state *new_crtc_state;
+	struct intel_crtc *crtc;
+	int i;
+
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+		if (new_crtc_state->cpu_transcoder == transcoder)
+			return needs_modeset(new_crtc_state);
+
+	return false;
+}
+
+static void
+intel_modeset_synced_crtcs(struct intel_atomic_state *state,
+			   u8 transcoders)
+{
+	struct intel_crtc_state *new_crtc_state;
+	struct intel_crtc *crtc;
+	int i;
+
+	for_each_new_intel_crtc_in_state(state, crtc,
+					 new_crtc_state, i) {
+		if (transcoders & BIT(new_crtc_state->cpu_transcoder)) {
+			new_crtc_state->uapi.mode_changed = true;
+			new_crtc_state->update_pipe = false;
+		}
+	}
+}
+
+static int
+intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+	int ret = 0;
+
+	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		struct drm_connector_state *conn_state;
+		struct drm_crtc_state *crtc_state;
+
+		if (!connector->has_tile ||
+		    connector->tile_group->id != tile_grp_id)
+			continue;
+		conn_state = drm_atomic_get_connector_state(&state->base,
+							    connector);
+		if (IS_ERR(conn_state)) {
+			ret =  PTR_ERR(conn_state);
+			break;
+		}
+
+		if (!conn_state->crtc)
+			continue;
+
+		crtc_state = drm_atomic_get_crtc_state(&state->base,
+						       conn_state->crtc);
+		if (IS_ERR(crtc_state)) {
+			ret = PTR_ERR(crtc_state);
+			break;
+		}
+		crtc_state->mode_changed = true;
+		ret = drm_atomic_add_affected_connectors(&state->base,
+							 conn_state->crtc);
+		if (ret)
+			break;
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	return ret;
+}
+
+static int
+intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state, *new_conn_state;
+	int i, ret;
+
+	if (INTEL_GEN(dev_priv) < 11)
+		return 0;
+
+	/* Is tiled, mark all other tiled CRTCs as needing a modeset */
+	for_each_oldnew_connector_in_state(&state->base, connector,
+					   old_conn_state, new_conn_state, i) {
+		if (!connector->has_tile)
+			continue;
+		if (!intel_connector_needs_modeset(state, connector))
+			continue;
+
+		ret = intel_modeset_all_tiles(state, connector->tile_group->id);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 /**
  * intel_atomic_check - validate state object
  * @dev: drm device
@@ -13990,33 +14595,96 @@ static int intel_atomic_check(struct drm_device *dev,
 	/* Catch I915_MODE_FLAG_INHERITED */
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (new_crtc_state->base.mode.private_flags !=
-		    old_crtc_state->base.mode.private_flags)
-			new_crtc_state->base.mode_changed = true;
+		if (new_crtc_state->hw.mode.private_flags !=
+		    old_crtc_state->hw.mode.private_flags)
+			new_crtc_state->uapi.mode_changed = true;
 	}
 
 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
 	if (ret)
 		goto fail;
 
+	/**
+	 * This check adds all the connectors in current state that belong to
+	 * the same tile group to a full modeset.
+	 * This function directly sets the mode_changed to true and we also call
+	 * drm_atomic_add_affected_connectors(). Hence we are not explicitly
+	 * calling drm_atomic_helper_check_modeset() after this.
+	 *
+	 * Fixme: Handle some corner cases where one of the
+	 * tiled connectors gets disconnected and tile info is lost but since it
+	 * was previously synced to other conn, we need to add that to the modeset.
+	 */
+	ret = intel_atomic_check_tiled_conns(state);
+	if (ret)
+		goto fail;
+
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (!needs_modeset(new_crtc_state))
-			continue;
+		if (!needs_modeset(new_crtc_state)) {
+			/* Light copy */
+			intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
 
-		if (!new_crtc_state->base.enable) {
-			any_ms = true;
 			continue;
 		}
 
+		if (!new_crtc_state->uapi.enable) {
+			intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
+			continue;
+		}
+
+		ret = intel_crtc_prepare_cleared_state(new_crtc_state);
+		if (ret)
+			goto fail;
+
 		ret = intel_modeset_pipe_config(new_crtc_state);
 		if (ret)
 			goto fail;
 
 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
+	}
 
-		if (needs_modeset(new_crtc_state))
+	/**
+	 * Check if fastset is allowed by external dependencies like other
+	 * pipes and transcoders.
+	 *
+	 * Right now it only forces a fullmodeset when the MST master
+	 * transcoder did not changed but the pipe of the master transcoder
+	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
+	 * in case of port synced crtcs, if one of the synced crtcs
+	 * needs a full modeset, all other synced crtcs should be
+	 * forced a full modeset.
+	 */
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+		if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+			continue;
+
+		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
+			enum transcoder master = new_crtc_state->mst_master_transcoder;
+
+			if (intel_cpu_transcoder_needs_modeset(state, master)) {
+				new_crtc_state->uapi.mode_changed = true;
+				new_crtc_state->update_pipe = false;
+			}
+		} else if (is_trans_port_sync_mode(new_crtc_state)) {
+			u8 trans = new_crtc_state->sync_mode_slaves_mask |
+				   BIT(new_crtc_state->master_transcoder);
+
+			intel_modeset_synced_crtcs(state, trans);
+		}
+	}
+
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		if (needs_modeset(new_crtc_state)) {
 			any_ms = true;
+			continue;
+		}
+
+		if (!new_crtc_state->update_pipe)
+			continue;
+
+		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
 	}
 
 	if (any_ms && !check_digital_port_conflicts(state)) {
@@ -14106,7 +14774,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	if (!IS_GEN(dev_priv, 2))
+	if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
 
 	if (crtc_state->has_pch_encoder) {
@@ -14120,7 +14788,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
 			       const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	/*
@@ -14138,12 +14806,12 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
 		skl_detach_scalers(new_crtc_state);
 
 		if (new_crtc_state->pch_pfit.enabled)
-			skylake_pfit_enable(new_crtc_state);
+			skl_pfit_enable(new_crtc_state);
 	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		if (new_crtc_state->pch_pfit.enabled)
-			ironlake_pfit_enable(new_crtc_state);
+			ilk_pfit_enable(new_crtc_state);
 		else if (old_crtc_state->pch_pfit.enabled)
-			ironlake_pfit_disable(old_crtc_state);
+			ilk_pfit_disable(old_crtc_state);
 	}
 
 	if (INTEL_GEN(dev_priv) >= 11)
@@ -14154,6 +14822,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
 			       struct intel_crtc_state *old_crtc_state,
 			       struct intel_crtc_state *new_crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	bool modeset = needs_modeset(new_crtc_state);
 
@@ -14162,7 +14831,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
 	 * CRTC was enabled.
 	 */
 	if (!modeset) {
-		if (new_crtc_state->base.color_mgmt_changed ||
+		if (new_crtc_state->uapi.color_mgmt_changed ||
 		    new_crtc_state->update_pipe)
 			intel_color_commit(new_crtc_state);
 
@@ -14177,8 +14846,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
 	}
 
 	if (dev_priv->display.atomic_update_watermarks)
-		dev_priv->display.atomic_update_watermarks(state,
-							   new_crtc_state);
+		dev_priv->display.atomic_update_watermarks(state, crtc);
 }
 
 static void intel_update_crtc(struct intel_crtc *crtc,
@@ -14195,20 +14863,20 @@ static void intel_update_crtc(struct intel_crtc *crtc,
 	if (modeset) {
 		intel_crtc_update_active_timings(new_crtc_state);
 
-		dev_priv->display.crtc_enable(new_crtc_state, state);
+		dev_priv->display.crtc_enable(state, crtc);
 
 		/* vblanks work again, re-enable pipe CRC. */
 		intel_crtc_enable_pipe_crc(crtc);
 	} else {
 		if (new_crtc_state->preload_luts &&
-		    (new_crtc_state->base.color_mgmt_changed ||
+		    (new_crtc_state->uapi.color_mgmt_changed ||
 		     new_crtc_state->update_pipe))
 			intel_color_load_luts(new_crtc_state);
 
-		intel_pre_plane_update(old_crtc_state, new_crtc_state);
+		intel_pre_plane_update(state, crtc);
 
 		if (new_crtc_state->update_pipe)
-			intel_encoders_update_pipe(crtc, new_crtc_state, state);
+			intel_encoders_update_pipe(state, crtc);
 	}
 
 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
@@ -14235,13 +14903,13 @@ static void intel_update_crtc(struct intel_crtc *crtc,
 	 * of enabling them on the CRTC's first fastset.
 	 */
 	if (new_crtc_state->update_pipe && !modeset &&
-	    old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+	    old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
 }
 
 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
 	enum transcoder slave_transcoder;
 
 	WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
@@ -14266,97 +14934,60 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
 	 */
 	intel_crtc_disable_pipe_crc(crtc);
 
-	dev_priv->display.crtc_disable(old_crtc_state, state);
+	dev_priv->display.crtc_disable(state, crtc);
 	crtc->active = false;
 	intel_fbc_disable(crtc);
 	intel_disable_shared_dpll(old_crtc_state);
 
-	/*
-	 * Underruns don't always raise interrupts,
-	 * so check manually.
-	 */
-	intel_check_cpu_fifo_underruns(dev_priv);
-	intel_check_pch_fifo_underruns(dev_priv);
-
 	/* FIXME unify this for all platforms */
-	if (!new_crtc_state->base.active &&
+	if (!new_crtc_state->hw.active &&
 	    !HAS_GMCH(dev_priv) &&
 	    dev_priv->display.initial_watermarks)
-		dev_priv->display.initial_watermarks(state,
-						     new_crtc_state);
-}
-
-static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
-						   struct intel_crtc *crtc,
-						   struct intel_crtc_state *old_crtc_state,
-						   struct intel_crtc_state *new_crtc_state)
-{
-	struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
-	struct intel_crtc_state *new_slave_crtc_state =
-		intel_atomic_get_new_crtc_state(state, slave_crtc);
-	struct intel_crtc_state *old_slave_crtc_state =
-		intel_atomic_get_old_crtc_state(state, slave_crtc);
-
-	WARN_ON(!slave_crtc || !new_slave_crtc_state ||
-		!old_slave_crtc_state);
-
-	/* Disable Slave first */
-	intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
-	if (old_slave_crtc_state->base.active)
-		intel_old_crtc_state_disables(state,
-					      old_slave_crtc_state,
-					      new_slave_crtc_state,
-					      slave_crtc);
-
-	/* Disable Master */
-	intel_pre_plane_update(old_crtc_state, new_crtc_state);
-	if (old_crtc_state->base.active)
-		intel_old_crtc_state_disables(state,
-					      old_crtc_state,
-					      new_crtc_state,
-					      crtc);
+		dev_priv->display.initial_watermarks(state, crtc);
 }
 
 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
 {
 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
 	struct intel_crtc *crtc;
+	u32 handled = 0;
 	int i;
 
-	/*
-	 * Disable CRTC/pipes in reverse order because some features(MST in
-	 * TGL+) requires master and slave relationship between pipes, so it
-	 * should always pick the lowest pipe as master as it will be enabled
-	 * first and disable in the reverse order so the master will be the
-	 * last one to be disabled.
-	 */
-	for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
-						    new_crtc_state, i) {
+	/* Only disable port sync and MST slaves */
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
 		if (!needs_modeset(new_crtc_state))
 			continue;
 
+		if (!old_crtc_state->hw.active)
+			continue;
+
 		/* In case of Transcoder port Sync master slave CRTCs can be
 		 * assigned in any order and we need to make sure that
 		 * slave CRTCs are disabled first and then master CRTC since
 		 * Slave vblanks are masked till Master Vblanks.
 		 */
-		if (is_trans_port_sync_mode(new_crtc_state)) {
-			if (is_trans_port_sync_master(new_crtc_state))
-				intel_trans_port_sync_modeset_disables(state,
-								       crtc,
-								       old_crtc_state,
-								       new_crtc_state);
-			else
-				continue;
-		} else {
-			intel_pre_plane_update(old_crtc_state, new_crtc_state);
+		if (!is_trans_port_sync_slave(old_crtc_state) &&
+		    !intel_dp_mst_is_slave_trans(old_crtc_state))
+			continue;
 
-			if (old_crtc_state->base.active)
-				intel_old_crtc_state_disables(state,
-							      old_crtc_state,
-							      new_crtc_state,
-							      crtc);
-		}
+		intel_pre_plane_update(state, crtc);
+		intel_old_crtc_state_disables(state, old_crtc_state,
+					      new_crtc_state, crtc);
+		handled |= BIT(crtc->pipe);
+	}
+
+	/* Disable everything else left on */
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		if (!needs_modeset(new_crtc_state) ||
+		    (handled & BIT(crtc->pipe)))
+			continue;
+
+		intel_pre_plane_update(state, crtc);
+		if (old_crtc_state->hw.active)
+			intel_old_crtc_state_disables(state, old_crtc_state,
+						      new_crtc_state, crtc);
 	}
 }
 
@@ -14367,7 +14998,7 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
 	int i;
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		if (!new_crtc_state->base.active)
+		if (!new_crtc_state->hw.active)
 			continue;
 
 		intel_update_crtc(crtc, state, old_crtc_state,
@@ -14382,7 +15013,7 @@ static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 
 	intel_crtc_update_active_timings(new_crtc_state);
-	dev_priv->display.crtc_enable(new_crtc_state, state);
+	dev_priv->display.crtc_enable(state, crtc);
 	intel_crtc_enable_pipe_crc(crtc);
 }
 
@@ -14398,10 +15029,14 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
 		if (conn_state->crtc == &crtc->base)
 			break;
 	}
-	intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
+	intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
 	intel_dp_stop_link_train(intel_dp);
 }
 
+/*
+ * TODO: This is only called from port sync and it is identical to what will be
+ * executed again in intel_update_crtc() over port sync pipes
+ */
 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
 					   struct intel_atomic_state *state)
 {
@@ -14432,7 +15067,7 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
 	 * of enabling them on the CRTC's first fastset.
 	 */
 	if (new_crtc_state->update_pipe && !modeset &&
-	    old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+	    old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
 }
 
@@ -14487,17 +15122,25 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
-	unsigned int updated = 0;
-	bool progress;
-	int i;
 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
 	u8 required_slices = state->wm_results.ddb.enabled_slices;
 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+	const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
+	u8 update_pipes = 0, modeset_pipes = 0;
+	int i;
 
-	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		if (!new_crtc_state->hw.active)
+			continue;
+
 		/* ignore allocations for crtc's that have been turned off. */
-		if (new_crtc_state->base.active)
+		if (!needs_modeset(new_crtc_state)) {
 			entries[i] = old_crtc_state->wm.skl.ddb;
+			update_pipes |= BIT(crtc->pipe);
+		} else {
+			modeset_pipes |= BIT(crtc->pipe);
+		}
+	}
 
 	/* If 2nd DBuf slice required, enable it here */
 	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -14506,27 +15149,29 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 	/*
 	 * Whenever the number of active pipes changes, we need to make sure we
 	 * update the pipes in the right order so that their ddb allocations
-	 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+	 * never overlap with each other between CRTC updates. Otherwise we'll
 	 * cause pipe underruns and other bad stuff.
+	 *
+	 * So first lets enable all pipes that do not need a fullmodeset as
+	 * those don't have any external dependency.
 	 */
-	do {
-		progress = false;
-
-		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+	while (update_pipes) {
+		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+						    new_crtc_state, i) {
 			enum pipe pipe = crtc->pipe;
-			bool vbl_wait = false;
-			bool modeset = needs_modeset(new_crtc_state);
 
-			if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active)
+			if ((update_pipes & BIT(pipe)) == 0)
 				continue;
 
 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
-							entries,
-							INTEL_NUM_PIPES(dev_priv), i))
+							entries, num_pipes, i))
 				continue;
 
-			updated |= BIT(pipe);
 			entries[i] = new_crtc_state->wm.skl.ddb;
+			update_pipes &= ~BIT(pipe);
+
+			intel_update_crtc(crtc, state, old_crtc_state,
+					  new_crtc_state);
 
 			/*
 			 * If this is an already active pipe, it's DDB changed,
@@ -14536,29 +15181,71 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 			 */
 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
 						 &old_crtc_state->wm.skl.ddb) &&
-			    !modeset &&
-			    state->wm_results.dirty_pipes != updated)
-				vbl_wait = true;
-
-			if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
-				if (is_trans_port_sync_master(new_crtc_state))
-					intel_update_trans_port_sync_crtcs(crtc,
-									   state,
-									   old_crtc_state,
-									   new_crtc_state);
-				else
-					continue;
-			} else {
-				intel_update_crtc(crtc, state, old_crtc_state,
-						  new_crtc_state);
-			}
-
-			if (vbl_wait)
+			    (update_pipes | modeset_pipes))
 				intel_wait_for_vblank(dev_priv, pipe);
-
-			progress = true;
 		}
-	} while (progress);
+	}
+
+	/*
+	 * Enable all pipes that needs a modeset and do not depends on other
+	 * pipes
+	 */
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		enum pipe pipe = crtc->pipe;
+
+		if ((modeset_pipes & BIT(pipe)) == 0)
+			continue;
+
+		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
+		    is_trans_port_sync_slave(new_crtc_state))
+			continue;
+
+		WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+						    entries, num_pipes, i));
+
+		entries[i] = new_crtc_state->wm.skl.ddb;
+		modeset_pipes &= ~BIT(pipe);
+
+		if (is_trans_port_sync_mode(new_crtc_state)) {
+			struct intel_crtc *slave_crtc;
+
+			intel_update_trans_port_sync_crtcs(crtc, state,
+							   old_crtc_state,
+							   new_crtc_state);
+
+			slave_crtc = intel_get_slave_crtc(new_crtc_state);
+			/* TODO: update entries[] of slave */
+			modeset_pipes &= ~BIT(slave_crtc->pipe);
+
+		} else {
+			intel_update_crtc(crtc, state, old_crtc_state,
+					  new_crtc_state);
+		}
+	}
+
+	/*
+	 * Finally enable all pipes that needs a modeset and depends on
+	 * other pipes, right now it is only MST slaves as both port sync slave
+	 * and master are enabled together
+	 */
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		enum pipe pipe = crtc->pipe;
+
+		if ((modeset_pipes & BIT(pipe)) == 0)
+			continue;
+
+		WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+						    entries, num_pipes, i));
+
+		entries[i] = new_crtc_state->wm.skl.ddb;
+		modeset_pipes &= ~BIT(pipe);
+
+		intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
+	}
+
+	WARN_ON(modeset_pipes);
 
 	/* If 2nd DBuf slice is no more required disable it */
 	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
@@ -14679,12 +15366,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 		bool modeset = needs_modeset(new_crtc_state);
 
 		/* Complete events for now disable pipes here. */
-		if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
+		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
 			spin_lock_irq(&dev->event_lock);
-			drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
+			drm_crtc_send_vblank_event(&crtc->base,
+						   new_crtc_state->uapi.event);
 			spin_unlock_irq(&dev->event_lock);
 
-			new_crtc_state->base.event = NULL;
+			new_crtc_state->uapi.event = NULL;
 		}
 	}
 
@@ -14715,10 +15403,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
 
 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
-		if (new_crtc_state->base.active &&
+		if (new_crtc_state->hw.active &&
 		    !needs_modeset(new_crtc_state) &&
 		    !new_crtc_state->preload_luts &&
-		    (new_crtc_state->base.color_mgmt_changed ||
+		    (new_crtc_state->uapi.color_mgmt_changed ||
 		     new_crtc_state->update_pipe))
 			intel_color_load_luts(new_crtc_state);
 	}
@@ -14730,14 +15418,25 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 	 *
 	 * TODO: Move this (and other cleanup) to an async worker eventually.
 	 */
-	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		/*
+		 * Gen2 reports pipe underruns whenever all planes are disabled.
+		 * So re-enable underrun reporting after some planes get enabled.
+		 *
+		 * We do this before .optimize_watermarks() so that we have a
+		 * chance of catching underruns with the intermediate watermarks
+		 * vs. the new plane configuration.
+		 */
+		if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
+			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
 		if (dev_priv->display.optimize_watermarks)
-			dev_priv->display.optimize_watermarks(state,
-							      new_crtc_state);
+			dev_priv->display.optimize_watermarks(state, crtc);
 	}
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		intel_post_plane_update(old_crtc_state);
+		intel_post_plane_update(state, crtc);
 
 		if (put_domains[i])
 			modeset_put_power_domains(dev_priv, put_domains[i]);
@@ -14745,6 +15444,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
 	}
 
+	/* Underruns don't always raise interrupts, so check manually */
+	intel_check_cpu_fifo_underruns(dev_priv);
+	intel_check_pch_fifo_underruns(dev_priv);
+
 	if (state->modeset)
 		intel_verify_planes(state);
 
@@ -14818,8 +15521,8 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
 
 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
 					     new_plane_state, i)
-		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
-					to_intel_frontbuffer(new_plane_state->base.fb),
+		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+					to_intel_frontbuffer(new_plane_state->hw.fb),
 					plane->frontbuffer_bit);
 }
 
@@ -14986,9 +15689,9 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
 
 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	struct drm_framebuffer *fb = plane_state->base.fb;
+	struct drm_framebuffer *fb = plane_state->hw.fb;
 	struct i915_vma *vma;
 
 	if (plane->id == PLANE_CURSOR &&
@@ -15051,9 +15754,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	struct intel_plane_state *new_plane_state =
 		to_intel_plane_state(_new_plane_state);
 	struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(new_plane_state->base.state);
+		to_intel_atomic_state(new_plane_state->uapi.state);
 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
-	struct drm_framebuffer *fb = new_plane_state->base.fb;
+	struct drm_framebuffer *fb = new_plane_state->hw.fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
 	int ret;
@@ -15084,9 +15787,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 		}
 	}
 
-	if (new_plane_state->base.fence) { /* explicit fencing */
+	if (new_plane_state->uapi.fence) { /* explicit fencing */
 		ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
-						    new_plane_state->base.fence,
+						    new_plane_state->uapi.fence,
 						    I915_FENCE_TIMEOUT,
 						    GFP_KERNEL);
 		if (ret < 0)
@@ -15109,7 +15812,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	fb_obj_bump_render_priority(obj);
 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
 
-	if (!new_plane_state->base.fence) { /* implicit fencing */
+	if (!new_plane_state->uapi.fence) { /* implicit fencing */
 		struct dma_fence *fence;
 
 		ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
@@ -15121,13 +15824,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
 		fence = dma_resv_get_excl_rcu(obj->base.resv);
 		if (fence) {
-			add_rps_boost_after_vblank(new_plane_state->base.crtc,
+			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 						   fence);
 			dma_fence_put(fence);
 		}
 	} else {
-		add_rps_boost_after_vblank(new_plane_state->base.crtc,
-					   new_plane_state->base.fence);
+		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+					   new_plane_state->uapi.fence);
 	}
 
 	/*
@@ -15160,7 +15863,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
 	struct intel_plane_state *old_plane_state =
 		to_intel_plane_state(_old_plane_state);
 	struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(old_plane_state->base.state);
+		to_intel_atomic_state(old_plane_state->uapi.state);
 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
 
 	if (intel_state->rps_interactive) {
@@ -15224,8 +15927,12 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
 	case DRM_FORMAT_RGB565:
 	case DRM_FORMAT_XRGB8888:
 	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
 	case DRM_FORMAT_XBGR16161616F:
 		return modifier == DRM_FORMAT_MOD_LINEAR ||
 			modifier == I915_FORMAT_MOD_X_TILED;
@@ -15283,7 +15990,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 	 * When crtc is inactive or there is a modeset pending,
 	 * wait for it to complete in the slowpath
 	 */
-	if (!crtc_state->base.active || needs_modeset(crtc_state) ||
+	if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
 	    crtc_state->update_pipe)
 		goto slow;
 
@@ -15292,8 +15999,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 	 * the plane.  This prevents our async update's changes from getting
 	 * overridden by a previous synchronous update's state.
 	 */
-	if (old_plane_state->base.commit &&
-	    !try_wait_for_completion(&old_plane_state->base.commit->hw_done))
+	if (old_plane_state->uapi.commit &&
+	    !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
 		goto slow;
 
 	/*
@@ -15301,12 +16008,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 	 * take the slowpath. Only changing fb or position should be
 	 * in the fastpath.
 	 */
-	if (old_plane_state->base.crtc != &crtc->base ||
-	    old_plane_state->base.src_w != src_w ||
-	    old_plane_state->base.src_h != src_h ||
-	    old_plane_state->base.crtc_w != crtc_w ||
-	    old_plane_state->base.crtc_h != crtc_h ||
-	    !old_plane_state->base.fb != !fb)
+	if (old_plane_state->uapi.crtc != &crtc->base ||
+	    old_plane_state->uapi.src_w != src_w ||
+	    old_plane_state->uapi.src_h != src_h ||
+	    old_plane_state->uapi.crtc_w != crtc_w ||
+	    old_plane_state->uapi.crtc_h != crtc_h ||
+	    !old_plane_state->uapi.fb != !fb)
 		goto slow;
 
 	new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
@@ -15319,16 +16026,16 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 		goto out_free;
 	}
 
-	drm_atomic_set_fb_for_plane(&new_plane_state->base, fb);
+	drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
 
-	new_plane_state->base.src_x = src_x;
-	new_plane_state->base.src_y = src_y;
-	new_plane_state->base.src_w = src_w;
-	new_plane_state->base.src_h = src_h;
-	new_plane_state->base.crtc_x = crtc_x;
-	new_plane_state->base.crtc_y = crtc_y;
-	new_plane_state->base.crtc_w = crtc_w;
-	new_plane_state->base.crtc_h = crtc_h;
+	new_plane_state->uapi.src_x = src_x;
+	new_plane_state->uapi.src_y = src_y;
+	new_plane_state->uapi.src_w = src_w;
+	new_plane_state->uapi.src_h = src_h;
+	new_plane_state->uapi.crtc_x = crtc_x;
+	new_plane_state->uapi.crtc_y = crtc_y;
+	new_plane_state->uapi.crtc_w = crtc_w;
+	new_plane_state->uapi.crtc_h = crtc_h;
 
 	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
 						  old_plane_state, new_plane_state);
@@ -15339,13 +16046,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 	if (ret)
 		goto out_free;
 
-	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP);
-	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
-				to_intel_frontbuffer(new_plane_state->base.fb),
+	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+				ORIGIN_FLIP);
+	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+				to_intel_frontbuffer(new_plane_state->hw.fb),
 				plane->frontbuffer_bit);
 
 	/* Swap plane state */
-	plane->base.state = &new_plane_state->base;
+	plane->base.state = &new_plane_state->uapi;
 
 	/*
 	 * We cannot swap crtc_state as it may be in use by an atomic commit or
@@ -15359,7 +16067,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 	 */
 	crtc_state->active_planes = new_crtc_state->active_planes;
 
-	if (new_plane_state->base.visible)
+	if (new_plane_state->uapi.visible)
 		intel_update_plane(plane, crtc_state, new_plane_state);
 	else
 		intel_disable_plane(plane, crtc_state);
@@ -15368,11 +16076,11 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 
 out_free:
 	if (new_crtc_state)
-		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base);
+		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
 	if (ret)
-		intel_plane_destroy_state(&plane->base, &new_plane_state->base);
+		intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
 	else
-		intel_plane_destroy_state(&plane->base, &old_plane_state->base);
+		intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
 	return ret;
 
 slow:
@@ -15414,7 +16122,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 	const struct drm_plane_funcs *plane_funcs;
 	unsigned int supported_rotations;
 	unsigned int possible_crtcs;
-	const u64 *modifiers;
 	const u32 *formats;
 	int num_formats;
 	int ret, zpos;
@@ -15446,7 +16153,10 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
 	}
 
-	if (INTEL_GEN(dev_priv) >= 4) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		formats = vlv_primary_formats;
+		num_formats = ARRAY_SIZE(vlv_primary_formats);
+	} else if (INTEL_GEN(dev_priv) >= 4) {
 		/*
 		 * WaFP16GammaEnabling:ivb
 		 * "Workaround : When using the 64-bit format, the plane
@@ -15467,51 +16177,45 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 			formats = i965_primary_formats;
 			num_formats = ARRAY_SIZE(i965_primary_formats);
 		}
-		modifiers = i9xx_format_modifiers;
-
-		plane->max_stride = i9xx_plane_max_stride;
-		plane->update_plane = i9xx_update_plane;
-		plane->disable_plane = i9xx_disable_plane;
-		plane->get_hw_state = i9xx_plane_get_hw_state;
-		plane->check_plane = i9xx_plane_check;
-
-		if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-			plane->min_cdclk = hsw_plane_min_cdclk;
-		else if (IS_IVYBRIDGE(dev_priv))
-			plane->min_cdclk = ivb_plane_min_cdclk;
-		else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv))
-			plane->min_cdclk = vlv_plane_min_cdclk;
-		else
-			plane->min_cdclk = i9xx_plane_min_cdclk;
-
-		plane_funcs = &i965_plane_funcs;
 	} else {
 		formats = i8xx_primary_formats;
 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
-		modifiers = i9xx_format_modifiers;
+	}
 
-		plane->max_stride = i9xx_plane_max_stride;
-		plane->update_plane = i9xx_update_plane;
-		plane->disable_plane = i9xx_disable_plane;
-		plane->get_hw_state = i9xx_plane_get_hw_state;
-		plane->check_plane = i9xx_plane_check;
+	if (INTEL_GEN(dev_priv) >= 4)
+		plane_funcs = &i965_plane_funcs;
+	else
+		plane_funcs = &i8xx_plane_funcs;
+
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		plane->min_cdclk = vlv_plane_min_cdclk;
+	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+		plane->min_cdclk = hsw_plane_min_cdclk;
+	else if (IS_IVYBRIDGE(dev_priv))
+		plane->min_cdclk = ivb_plane_min_cdclk;
+	else
 		plane->min_cdclk = i9xx_plane_min_cdclk;
 
-		plane_funcs = &i8xx_plane_funcs;
-	}
+	plane->max_stride = i9xx_plane_max_stride;
+	plane->update_plane = i9xx_update_plane;
+	plane->disable_plane = i9xx_disable_plane;
+	plane->get_hw_state = i9xx_plane_get_hw_state;
+	plane->check_plane = i9xx_plane_check;
 
 	possible_crtcs = BIT(pipe);
 
 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
 					       possible_crtcs, plane_funcs,
-					       formats, num_formats, modifiers,
+					       formats, num_formats,
+					       i9xx_format_modifiers,
 					       DRM_PLANE_TYPE_PRIMARY,
 					       "primary %c", pipe_name(pipe));
 	else
 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
 					       possible_crtcs, plane_funcs,
-					       formats, num_formats, modifiers,
+					       formats, num_formats,
+					       i9xx_format_modifiers,
 					       DRM_PLANE_TYPE_PRIMARY,
 					       "plane %c",
 					       plane_name(plane->i9xx_plane));
@@ -15615,28 +16319,6 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
 	return ERR_PTR(ret);
 }
 
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
-				    struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc_scaler_state *scaler_state =
-		&crtc_state->scaler_state;
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	int i;
-
-	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
-	if (!crtc->num_scalers)
-		return;
-
-	for (i = 0; i < crtc->num_scalers; i++) {
-		struct intel_scaler *scaler = &scaler_state->scalers[i];
-
-		scaler->in_use = 0;
-		scaler->mode = 0;
-	}
-
-	scaler_state->scaler_id = -1;
-}
-
 #define INTEL_CRTC_FUNCS \
 	.gamma_set = drm_atomic_helper_legacy_gamma_set, \
 	.set_config = drm_atomic_helper_set_config, \
@@ -15704,33 +16386,53 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
 	.disable_vblank = i8xx_disable_vblank,
 };
 
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+	struct intel_crtc_state *crtc_state;
+	struct intel_crtc *crtc;
+
+	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+	if (!crtc)
+		return ERR_PTR(-ENOMEM);
+
+	crtc_state = intel_crtc_state_alloc(crtc);
+	if (!crtc_state) {
+		kfree(crtc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	crtc->base.state = &crtc_state->uapi;
+	crtc->config = crtc_state;
+
+	return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+	intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+	kfree(crtc);
+}
+
 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
+	struct intel_plane *primary, *cursor;
 	const struct drm_crtc_funcs *funcs;
-	struct intel_crtc *intel_crtc;
-	struct intel_crtc_state *crtc_state = NULL;
-	struct intel_plane *primary = NULL;
-	struct intel_plane *cursor = NULL;
+	struct intel_crtc *crtc;
 	int sprite, ret;
 
-	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
-	if (!intel_crtc)
-		return -ENOMEM;
+	crtc = intel_crtc_alloc();
+	if (IS_ERR(crtc))
+		return PTR_ERR(crtc);
 
-	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-	if (!crtc_state) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-	__drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
-	intel_crtc->config = crtc_state;
+	crtc->pipe = pipe;
+	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
 
 	primary = intel_primary_plane_create(dev_priv, pipe);
 	if (IS_ERR(primary)) {
 		ret = PTR_ERR(primary);
 		goto fail;
 	}
-	intel_crtc->plane_ids_mask |= BIT(primary->id);
+	crtc->plane_ids_mask |= BIT(primary->id);
 
 	for_each_sprite(dev_priv, pipe, sprite) {
 		struct intel_plane *plane;
@@ -15740,7 +16442,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 			ret = PTR_ERR(plane);
 			goto fail;
 		}
-		intel_crtc->plane_ids_mask |= BIT(plane->id);
+		crtc->plane_ids_mask |= BIT(plane->id);
 	}
 
 	cursor = intel_cursor_plane_create(dev_priv, pipe);
@@ -15748,7 +16450,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 		ret = PTR_ERR(cursor);
 		goto fail;
 	}
-	intel_crtc->plane_ids_mask |= BIT(cursor->id);
+	crtc->plane_ids_mask |= BIT(cursor->id);
 
 	if (HAS_GMCH(dev_priv)) {
 		if (IS_CHERRYVIEW(dev_priv) ||
@@ -15769,42 +16471,32 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 			funcs = &ilk_crtc_funcs;
 	}
 
-	ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+	ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
 					&primary->base, &cursor->base,
 					funcs, "pipe %c", pipe_name(pipe));
 	if (ret)
 		goto fail;
 
-	intel_crtc->pipe = pipe;
-
-	/* initialize shared scalers */
-	intel_crtc_init_scalers(intel_crtc, crtc_state);
-
 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
 	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
-	dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
+	dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
 
 	if (INTEL_GEN(dev_priv) < 9) {
 		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
 
 		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
 		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
-		dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
+		dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
 	}
 
-	intel_color_init(intel_crtc);
+	intel_color_init(crtc);
 
-	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+	WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
 
 	return 0;
 
 fail:
-	/*
-	 * drm_mode_config_cleanup() will free up any
-	 * crtcs/planes already initialized.
-	 */
-	kfree(crtc_state);
-	kfree(intel_crtc);
+	intel_crtc_free(crtc);
 
 	return ret;
 }
@@ -16291,8 +16983,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	}
 
 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
-	if (mode_cmd->offsets[0] != 0)
+	if (mode_cmd->offsets[0] != 0) {
+		DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
+			      mode_cmd->offsets[0]);
 		goto err;
+	}
 
 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
 
@@ -16305,26 +17000,23 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		}
 
 		stride_alignment = intel_fb_stride_alignment(fb, i);
-
-		/*
-		 * Display WA #0531: skl,bxt,kbl,glk
-		 *
-		 * Render decompression and plane width > 3840
-		 * combined with horizontal panning requires the
-		 * plane stride to be a multiple of 4. We'll just
-		 * require the entire fb to accommodate that to avoid
-		 * potential runtime errors at plane configuration time.
-		 */
-		if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
-		    is_ccs_modifier(fb->modifier))
-			stride_alignment *= 4;
-
 		if (fb->pitches[i] & (stride_alignment - 1)) {
 			DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
 				      i, fb->pitches[i], stride_alignment);
 			goto err;
 		}
 
+		if (is_gen12_ccs_plane(fb, i)) {
+			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
+
+			if (fb->pitches[i] != ccs_aux_stride) {
+				DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
+					      i,
+					      fb->pitches[i], ccs_aux_stride);
+				goto err;
+			}
+		}
+
 		fb->obj[i] = &obj->base;
 	}
 
@@ -16522,29 +17214,28 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
 	intel_init_cdclk_hooks(dev_priv);
 
 	if (INTEL_GEN(dev_priv) >= 9) {
-		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
-			skylake_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock =
-			haswell_crtc_compute_clock;
-		dev_priv->display.crtc_enable = haswell_crtc_enable;
-		dev_priv->display.crtc_disable = haswell_crtc_disable;
+			skl_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+		dev_priv->display.crtc_enable = hsw_crtc_enable;
+		dev_priv->display.crtc_disable = hsw_crtc_disable;
 	} else if (HAS_DDI(dev_priv)) {
-		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
 			i9xx_get_initial_plane_config;
 		dev_priv->display.crtc_compute_clock =
-			haswell_crtc_compute_clock;
-		dev_priv->display.crtc_enable = haswell_crtc_enable;
-		dev_priv->display.crtc_disable = haswell_crtc_disable;
+			hsw_crtc_compute_clock;
+		dev_priv->display.crtc_enable = hsw_crtc_enable;
+		dev_priv->display.crtc_disable = hsw_crtc_disable;
 	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
 			i9xx_get_initial_plane_config;
 		dev_priv->display.crtc_compute_clock =
-			ironlake_crtc_compute_clock;
-		dev_priv->display.crtc_enable = ironlake_crtc_enable;
-		dev_priv->display.crtc_disable = ironlake_crtc_disable;
+			ilk_crtc_compute_clock;
+		dev_priv->display.crtc_enable = ilk_crtc_enable;
+		dev_priv->display.crtc_disable = ilk_crtc_disable;
 	} else if (IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
@@ -16590,14 +17281,12 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
 	}
 
 	if (IS_GEN(dev_priv, 5)) {
-		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+		dev_priv->display.fdi_link_train = ilk_fdi_link_train;
 	} else if (IS_GEN(dev_priv, 6)) {
 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
 	} else if (IS_IVYBRIDGE(dev_priv)) {
 		/* FIXME: detect B0+ stepping and use auto training */
 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
 	}
 
 	if (INTEL_GEN(dev_priv) >= 9)
@@ -16687,7 +17376,7 @@ static void sanitize_watermarks(struct drm_device *dev)
 	/* Write calculated watermark values back */
 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
 		crtc_state->wm.need_postvbl_update = true;
-		dev_priv->display.optimize_watermarks(intel_state, crtc_state);
+		dev_priv->display.optimize_watermarks(intel_state, crtc);
 
 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
 	}
@@ -16719,8 +17408,7 @@ static int intel_initial_commit(struct drm_device *dev)
 {
 	struct drm_atomic_state *state = NULL;
 	struct drm_modeset_acquire_ctx ctx;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct intel_crtc *crtc;
 	int ret = 0;
 
 	state = drm_atomic_state_alloc(dev);
@@ -16732,15 +17420,17 @@ static int intel_initial_commit(struct drm_device *dev)
 retry:
 	state->acquire_ctx = &ctx;
 
-	drm_for_each_crtc(crtc, dev) {
-		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+	for_each_intel_crtc(dev, crtc) {
+		struct intel_crtc_state *crtc_state =
+			intel_atomic_get_crtc_state(state, crtc);
+
 		if (IS_ERR(crtc_state)) {
 			ret = PTR_ERR(crtc_state);
 			goto out;
 		}
 
-		if (crtc_state->active) {
-			ret = drm_atomic_add_affected_planes(state, crtc);
+		if (crtc_state->hw.active) {
+			ret = drm_atomic_add_affected_planes(state, &crtc->base);
 			if (ret)
 				goto out;
 
@@ -16750,7 +17440,7 @@ static int intel_initial_commit(struct drm_device *dev)
 			 * having a proper LUT loaded. Remove once we
 			 * have readout for pipe gamma enable.
 			 */
-			crtc_state->color_mgmt_changed = true;
+			crtc_state->uapi.color_mgmt_changed = true;
 		}
 	}
 
@@ -17073,31 +17763,76 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
 }
 
+static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+	if (INTEL_GEN(dev_priv) >= 9 ||
+	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
+		u32 val;
+
+		if (transcoder_is_dsi(cpu_transcoder))
+			return;
+
+		val = I915_READ(reg);
+		val &= ~HSW_FRAME_START_DELAY_MASK;
+		val |= HSW_FRAME_START_DELAY(0);
+		I915_WRITE(reg, val);
+	} else {
+		i915_reg_t reg = PIPECONF(cpu_transcoder);
+		u32 val;
+
+		val = I915_READ(reg);
+		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
+		val |= PIPECONF_FRAME_START_DELAY(0);
+		I915_WRITE(reg, val);
+	}
+
+	if (!crtc_state->has_pch_encoder)
+		return;
+
+	if (HAS_PCH_IBX(dev_priv)) {
+		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
+		u32 val;
+
+		val = I915_READ(reg);
+		val &= ~TRANS_FRAME_START_DELAY_MASK;
+		val |= TRANS_FRAME_START_DELAY(0);
+		I915_WRITE(reg, val);
+	} else {
+		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
+		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
+		u32 val;
+
+		val = I915_READ(reg);
+		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+		I915_WRITE(reg, val);
+	}
+}
+
 static void intel_sanitize_crtc(struct intel_crtc *crtc,
 				struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
-	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
-	/* Clear any frame start delays used for debugging left by the BIOS */
-	if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
-		i915_reg_t reg = PIPECONF(cpu_transcoder);
-
-		I915_WRITE(reg,
-			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
-	}
-
-	if (crtc_state->base.active) {
+	if (crtc_state->hw.active) {
 		struct intel_plane *plane;
 
+		/* Clear any frame start delays used for debugging left by the BIOS */
+		intel_sanitize_frame_start_delay(crtc_state);
+
 		/* Disable everything but the primary plane */
 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
 			const struct intel_plane_state *plane_state =
 				to_intel_plane_state(plane->base.state);
 
-			if (plane_state->base.visible &&
+			if (plane_state->uapi.visible &&
 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
 				intel_plane_disable_noatomic(crtc, plane);
 		}
@@ -17114,10 +17849,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 
 	/* Adjust the state of the output pipe according to whether we
 	 * have active connectors/encoders. */
-	if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
-		intel_crtc_disable_noatomic(&crtc->base, ctx);
+	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
+		intel_crtc_disable_noatomic(crtc, ctx);
 
-	if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
+	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
 		/*
 		 * We start out with underrun reporting disabled to avoid races.
 		 * For correct bookkeeping mark this on active crtcs.
@@ -17148,7 +17883,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 
 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	/*
 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -17161,7 +17896,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
 	 * road.
 	 */
 	return IS_GEN(dev_priv, 6) &&
-		crtc_state->base.active &&
+		crtc_state->hw.active &&
 		crtc_state->shared_dpll &&
 		crtc_state->port_clock == 0;
 }
@@ -17178,7 +17913,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 	 * encoder is active and trying to read from a pipe) and the
 	 * pipe itself being active. */
 	bool has_active_crtc = crtc_state &&
-		crtc_state->base.active;
+		crtc_state->hw.active;
 
 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
 		DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
@@ -17282,22 +18017,22 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 		struct intel_crtc_state *crtc_state =
 			to_intel_crtc_state(crtc->base.state);
 
-		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
-		memset(crtc_state, 0, sizeof(*crtc_state));
-		__drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
+		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+		intel_crtc_free_hw_state(crtc_state);
+		intel_crtc_state_reset(crtc_state, crtc);
 
-		crtc_state->base.active = crtc_state->base.enable =
+		crtc_state->hw.active = crtc_state->hw.enable =
 			dev_priv->display.get_pipe_config(crtc, crtc_state);
 
-		crtc->base.enabled = crtc_state->base.enable;
-		crtc->active = crtc_state->base.active;
+		crtc->base.enabled = crtc_state->hw.enable;
+		crtc->active = crtc_state->hw.active;
 
-		if (crtc_state->base.active)
+		if (crtc_state->hw.active)
 			dev_priv->active_pipes |= BIT(crtc->pipe);
 
 		DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
 			      crtc->base.base.id, crtc->base.name,
-			      enableddisabled(crtc_state->base.active));
+			      enableddisabled(crtc_state->hw.active));
 	}
 
 	readout_plane_state(dev_priv);
@@ -17319,7 +18054,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			struct intel_crtc_state *crtc_state =
 				to_intel_crtc_state(crtc->base.state);
 
-			if (crtc_state->base.active &&
+			if (crtc_state->hw.active &&
 			    crtc_state->shared_dpll == pll)
 				pll->state.crtc_mask |= 1 << crtc->pipe;
 		}
@@ -17364,15 +18099,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			crtc = to_intel_crtc(encoder->base.crtc);
 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
 
-			if (crtc_state && crtc_state->base.active) {
+			if (crtc_state && crtc_state->hw.active) {
 				/*
 				 * This has to be done during hardware readout
 				 * because anything calling .crtc_disable may
 				 * rely on the connector_mask being accurate.
 				 */
-				crtc_state->base.connector_mask |=
+				crtc_state->uapi.connector_mask |=
 					drm_connector_mask(&connector->base);
-				crtc_state->base.encoder_mask |=
+				crtc_state->uapi.encoder_mask |=
 					drm_encoder_mask(&encoder->base);
 			}
 		} else {
@@ -17393,16 +18128,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 		struct intel_plane *plane;
 		int min_cdclk = 0;
 
-		if (crtc_state->base.active) {
-			struct drm_display_mode mode;
+		if (crtc_state->hw.active) {
+			struct drm_display_mode *mode = &crtc_state->hw.mode;
 
-			intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode,
+			intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
 						    crtc_state);
 
-			mode = crtc_state->base.adjusted_mode;
-			mode.hdisplay = crtc_state->pipe_src_w;
-			mode.vdisplay = crtc_state->pipe_src_h;
-			WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode));
+			*mode = crtc_state->hw.adjusted_mode;
+			mode->hdisplay = crtc_state->pipe_src_w;
+			mode->vdisplay = crtc_state->pipe_src_h;
 
 			/*
 			 * The initial mode needs to be set in order to keep
@@ -17413,11 +18147,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			 * set a flag to indicate that a full recalculation is
 			 * needed on the next commit.
 			 */
-			crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
+			mode->private_flags = I915_MODE_FLAG_INHERITED;
 
 			intel_crtc_compute_pixel_rate(crtc_state);
 
 			intel_crtc_update_active_timings(crtc_state);
+
+			intel_crtc_copy_hw_to_uapi_state(crtc_state);
 		}
 
 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
@@ -17428,14 +18164,14 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			 * FIXME don't have the fb yet, so can't
 			 * use intel_plane_data_rate() :(
 			 */
-			if (plane_state->base.visible)
+			if (plane_state->uapi.visible)
 				crtc_state->data_rate[plane->id] =
 					4 * crtc_state->pixel_rate;
 			/*
 			 * FIXME don't have the fb yet, so can't
 			 * use plane->min_cdclk() :(
 			 */
-			if (plane_state->base.visible && plane->min_cdclk) {
+			if (plane_state->uapi.visible && plane->min_cdclk) {
 				if (crtc_state->double_wide ||
 				    INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 					crtc_state->min_cdclk[plane->id] =
@@ -17449,7 +18185,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 				      crtc_state->min_cdclk[plane->id]);
 		}
 
-		if (crtc_state->base.active) {
+		if (crtc_state->hw.active) {
 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
 			if (WARN_ON(min_cdclk < 0))
 				min_cdclk = 0;
@@ -17490,8 +18226,11 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
 
 static void intel_early_display_was(struct drm_i915_private *dev_priv)
 {
-	/* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
-	if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+	/*
+	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
+	 * Also known as Wa_14010480278.
+	 */
+	if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
 		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
 			   DARBF_GATING_DIS);
 
@@ -17572,7 +18311,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 			     struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc_state *crtc_state;
 	struct intel_encoder *encoder;
 	struct intel_crtc *crtc;
 	intel_wakeref_t wakeref;
@@ -17592,7 +18330,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 		/* We need to sanitize only the MST primary port. */
 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
 		    intel_phy_is_tc(dev_priv, phy))
-			intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
+			intel_tc_port_sanitize(enc_to_dig_port(encoder));
 	}
 
 	get_encoder_power_domains(dev_priv);
@@ -17605,11 +18343,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 	 * waits, so we need vblank interrupts restored beforehand.
 	 */
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		crtc_state = to_intel_crtc_state(crtc->base.state);
+		struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
 
 		drm_crtc_vblank_reset(&crtc->base);
 
-		if (crtc_state->base.active)
+		if (crtc_state->hw.active)
 			intel_crtc_vblank_on(crtc_state);
 	}
 
@@ -17619,7 +18358,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 		intel_sanitize_encoder(encoder);
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		crtc_state = to_intel_crtc_state(crtc->base.state);
+		struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
+
 		intel_sanitize_crtc(crtc, ctx);
 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
 	}
@@ -17652,17 +18393,16 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 	}
 
 	for_each_intel_crtc(dev, crtc) {
+		struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
 		u64 put_domains;
 
-		crtc_state = to_intel_crtc_state(crtc->base.state);
 		put_domains = modeset_get_crtc_power_domains(crtc_state);
 		if (WARN_ON(put_domains))
 			modeset_put_power_domains(dev_priv, put_domains);
 	}
 
 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
-	intel_fbc_init_pipe_state(dev_priv);
 }
 
 void intel_display_resume(struct drm_device *dev)
@@ -17738,6 +18478,13 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
 	 */
 	intel_hpd_poll_fini(i915);
 
+	/*
+	 * MST topology needs to be suspended so we don't have any calls to
+	 * fbdev after it's finalized. MST will be destroyed later as part of
+	 * drm_mode_config_cleanup()
+	 */
+	intel_dp_mst_suspend(i915);
+
 	/* poll work can call into fbdev, hence clean that up afterwards */
 	intel_fbdev_fini(i915);
 
@@ -17756,6 +18503,8 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
 
 	intel_gmbus_teardown(i915);
 
+	intel_bw_cleanup(i915);
+
 	destroy_workqueue(i915->flip_wq);
 	destroy_workqueue(i915->modeset_wq);
 
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f417e09..028aab7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -332,8 +332,11 @@ enum phy_fia {
 	     (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)];	\
 	     (__s)++)
 
-#define for_each_port_masked(__port, __ports_mask) \
-	for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)	\
+#define for_each_port(__port) \
+	for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)
+
+#define for_each_port_masked(__port, __ports_mask)			\
+	for_each_port(__port)						\
 		for_each_if((__ports_mask) & BIT(__port))
 
 #define for_each_phy_masked(__phy, __phys_mask) \
@@ -377,6 +380,13 @@ enum phy_fia {
 			    &(dev)->mode_config.encoder_list,	\
 			    base.head)
 
+#define for_each_intel_encoder_mask(dev, intel_encoder, encoder_mask)	\
+	list_for_each_entry(intel_encoder,				\
+			    &(dev)->mode_config.encoder_list,		\
+			    base.head)					\
+		for_each_if((encoder_mask) &				\
+			    drm_encoder_mask(&intel_encoder->base))
+
 #define for_each_intel_dp(dev, intel_encoder)			\
 	for_each_intel_encoder(dev, intel_encoder)		\
 		for_each_if(intel_encoder_is_dp(intel_encoder))
@@ -446,16 +456,25 @@ enum phy_fia {
 #define intel_atomic_crtc_state_for_each_plane_state( \
 		  plane, plane_state, \
 		  crtc_state) \
-	for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \
-				((crtc_state)->base.plane_mask)) \
+	for_each_intel_plane_mask(((crtc_state)->uapi.state->dev), (plane), \
+				((crtc_state)->uapi.plane_mask)) \
 		for_each_if ((plane_state = \
-			      to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base))))
+			      to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->uapi.state, &plane->base))))
+
+#define for_each_new_intel_connector_in_state(__state, connector, new_connector_state, __i) \
+	for ((__i) = 0; \
+	     (__i) < (__state)->base.num_connector; \
+	     (__i)++) \
+		for_each_if ((__state)->base.connectors[__i].ptr && \
+			     ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
+			     (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
 
 void intel_link_compute_m_n(u16 bpp, int nlanes,
 			    int pixel_clock, int link_clock,
 			    struct intel_link_m_n *m_n,
 			    bool constant_n, bool fec_enable);
 bool is_ccs_modifier(u64 modifier);
+int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
 			      u32 pixel_format, u64 modifier);
@@ -467,6 +486,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
 bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
 
 void intel_plane_destroy(struct drm_plane *plane);
+void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -499,11 +519,10 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
 			      enum port port);
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file_priv);
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
-					     enum pipe pipe);
 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
 
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 			 struct intel_digital_port *dport,
 			 unsigned int expected_mask);
@@ -547,7 +566,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
 			struct dpll *best_clock);
 int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
 
-bool intel_crtc_active(struct intel_crtc *crtc);
 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
 void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
 void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
@@ -561,6 +579,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
 
 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 			const struct intel_plane_state *plane_state);
 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
@@ -582,6 +602,10 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv);
 void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 				     struct intel_display_error_state *error);
 
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+				    uint64_t modifier);
+
 /* modesetting */
 void intel_modeset_init_hw(struct drm_i915_private *i915);
 int intel_modeset_init(struct drm_i915_private *i915);
@@ -603,9 +627,10 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 		       enum pipe pipe, bool state);
 #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
 #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void assert_pipe(struct drm_i915_private *dev_priv,
+		 enum transcoder cpu_transcoder, bool state);
+#define assert_pipe_enabled(d, t) assert_pipe(d, t, true)
+#define assert_pipe_disabled(d, t) assert_pipe(d, t, false)
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 12ba747..21561ac 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -418,7 +418,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 	int pw_idx = power_well->desc->hsw.idx;
 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
 	u32 val;
-	int wa_idx_max;
+
+	WARN_ON(!IS_ICELAKE(dev_priv));
 
 	val = I915_READ(regs->driver);
 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -430,14 +431,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 
 	hsw_wait_for_power_well_enable(dev_priv, power_well);
 
-	/* Display WA #1178: icl, tgl */
-	if (IS_TIGERLAKE(dev_priv))
-		wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
-	else
-		wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
-
-	if (!IS_ELKHARTLAKE(dev_priv) &&
-	    pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
+	/* Display WA #1178: icl */
+	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
 		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
@@ -454,10 +449,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
 	u32 val;
 
-	if (INTEL_GEN(dev_priv) < 12) {
-		val = I915_READ(ICL_PORT_CL_DW12(phy));
-		I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
-	}
+	WARN_ON(!IS_ICELAKE(dev_priv));
+
+	val = I915_READ(ICL_PORT_CL_DW12(phy));
+	I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
 
 	val = I915_READ(regs->driver);
 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -519,7 +514,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 		if (encoder->type == INTEL_OUTPUT_DP_MST)
 			continue;
 
-		dig_port = enc_to_dig_port(&encoder->base);
+		dig_port = enc_to_dig_port(encoder);
 		if (WARN_ON(!dig_port))
 			continue;
 
@@ -1669,8 +1664,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
-	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
+	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
 
 	mutex_lock(&power_domains->lock);
 
@@ -4928,6 +4923,56 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
 	intel_combo_phy_uninit(dev_priv);
 }
 
+struct buddy_page_mask {
+	u32 page_mask;
+	u8 type;
+	u8 num_channels;
+};
+
+static const struct buddy_page_mask tgl_buddy_page_masks[] = {
+	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
+	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
+	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
+	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
+	{}
+};
+
+static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
+	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
+	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
+	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
+	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
+	{}
+};
+
+static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+{
+	enum intel_dram_type type = dev_priv->dram_info.type;
+	u8 num_channels = dev_priv->dram_info.num_channels;
+	const struct buddy_page_mask *table;
+	int i;
+
+	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+		/* Wa_1409767108: tgl */
+		table = wa_1409767108_buddy_page_masks;
+	else
+		table = tgl_buddy_page_masks;
+
+	for (i = 0; table[i].page_mask != 0; i++)
+		if (table[i].num_channels == num_channels &&
+		    table[i].type == type)
+			break;
+
+	if (table[i].page_mask == 0) {
+		DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
+		I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
+		I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+	} else {
+		I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
+		I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+	}
+}
+
 static void icl_display_core_init(struct drm_i915_private *dev_priv,
 				  bool resume)
 {
@@ -4960,6 +5005,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
 	/* 6. Setup MBUS. */
 	icl_mbus_init(dev_priv);
 
+	/* 7. Program arbiter BW_BUDDY registers */
+	if (INTEL_GEN(dev_priv) >= 12)
+		tgl_bw_buddy_init(dev_priv);
+
 	if (resume && dev_priv->csr.dmc_payload)
 		intel_csr_load_program(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 1da04f3e..2608a65a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -28,7 +28,7 @@ enum intel_display_power_domain {
 	POWER_DOMAIN_TRANSCODER_C,
 	POWER_DOMAIN_TRANSCODER_D,
 	POWER_DOMAIN_TRANSCODER_EDP,
-	/* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */
+	/* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
 	POWER_DOMAIN_TRANSCODER_VDSC_PW2,
 	POWER_DOMAIN_TRANSCODER_DSI_A,
 	POWER_DOMAIN_TRANSCODER_DSI_C,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 1a7334d..888ea8a1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -90,8 +90,8 @@ struct intel_framebuffer {
 	/* for each plane in the normal GTT view */
 	struct {
 		unsigned int x, y;
-	} normal[2];
-	/* for each plane in the rotated GTT view */
+	} normal[4];
+	/* for each plane in the rotated GTT view for no-CCS formats */
 	struct {
 		unsigned int x, y;
 		unsigned int pitch; /* pixels */
@@ -523,7 +523,24 @@ struct intel_atomic_state {
 };
 
 struct intel_plane_state {
-	struct drm_plane_state base;
+	struct drm_plane_state uapi;
+
+	/*
+	 * actual hardware state, the state we program to the hardware.
+	 * The following members are used to verify the hardware state:
+	 * During initial hw readout, they need to be copied from uapi.
+	 */
+	struct {
+		struct drm_crtc *crtc;
+		struct drm_framebuffer *fb;
+
+		u16 alpha;
+		uint16_t pixel_blend_mode;
+		unsigned int rotation;
+		enum drm_color_encoding color_encoding;
+		enum drm_color_range color_range;
+	} hw;
+
 	struct i915_ggtt_view view;
 	struct i915_vma *vma;
 	unsigned long flags;
@@ -538,7 +555,7 @@ struct intel_plane_state {
 		 */
 		u32 stride;
 		int x, y;
-	} color_plane[2];
+	} color_plane[4];
 
 	/* plane control register */
 	u32 ctl;
@@ -546,6 +563,9 @@ struct intel_plane_state {
 	/* plane color control register */
 	u32 color_ctl;
 
+	/* chroma upsampler control register */
+	u32 cus_ctl;
+
 	/*
 	 * scaler_id
 	 *    = -1 : not using a scaler
@@ -757,7 +777,33 @@ enum intel_output_format {
 };
 
 struct intel_crtc_state {
-	struct drm_crtc_state base;
+	/*
+	 * uapi (drm) state. This is the software state shown to userspace.
+	 * In particular, the following members are used for bookkeeping:
+	 * - crtc
+	 * - state
+	 * - *_changed
+	 * - event
+	 * - commit
+	 * - mode_blob
+	 */
+	struct drm_crtc_state uapi;
+
+	/*
+	 * actual hardware state, the state we program to the hardware.
+	 * The following members are used to verify the hardware state:
+	 * - enable
+	 * - active
+	 * - mode / adjusted_mode
+	 * - color property blobs.
+	 *
+	 * During initial hw readout, they need to be copied to uapi.
+	 */
+	struct {
+		bool active, enable;
+		struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
+		struct drm_display_mode mode, adjusted_mode;
+	} hw;
 
 	/**
 	 * quirks - bitfield with hw state readout quirks
@@ -1008,6 +1054,9 @@ struct intel_crtc_state {
 
 	/* Bitmask to indicate slaves attached */
 	u8 sync_mode_slaves_mask;
+
+	/* Only valid on TGL+ */
+	enum transcoder mst_master_transcoder;
 };
 
 struct intel_crtc {
@@ -1080,9 +1129,6 @@ struct intel_plane {
 	void (*update_plane)(struct intel_plane *plane,
 			     const struct intel_crtc_state *crtc_state,
 			     const struct intel_plane_state *plane_state);
-	void (*update_slave)(struct intel_plane *plane,
-			     const struct intel_crtc_state *crtc_state,
-			     const struct intel_plane_state *plane_state);
 	void (*disable_plane)(struct intel_plane *plane,
 			      const struct intel_crtc_state *crtc_state);
 	bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
@@ -1113,12 +1159,12 @@ struct cxsr_latency {
 
 #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
 #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 #define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
+#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
 #define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
 
 struct intel_hdmi {
@@ -1392,9 +1438,9 @@ struct intel_load_detect_pipe {
 };
 
 static inline struct intel_encoder *
-intel_attached_encoder(struct drm_connector *connector)
+intel_attached_encoder(struct intel_connector *connector)
 {
-	return to_intel_connector(connector)->encoder;
+	return connector->encoder;
 }
 
 static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
@@ -1411,12 +1457,12 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
 }
 
 static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
+enc_to_dig_port(struct intel_encoder *encoder)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+	struct intel_encoder *intel_encoder = encoder;
 
 	if (intel_encoder_is_dig_port(intel_encoder))
-		return container_of(encoder, struct intel_digital_port,
+		return container_of(&encoder->base, struct intel_digital_port,
 				    base.base);
 	else
 		return NULL;
@@ -1425,16 +1471,17 @@ enc_to_dig_port(struct drm_encoder *encoder)
 static inline struct intel_digital_port *
 conn_to_dig_port(struct intel_connector *connector)
 {
-	return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+	return enc_to_dig_port(intel_attached_encoder(connector));
 }
 
 static inline struct intel_dp_mst_encoder *
-enc_to_mst(struct drm_encoder *encoder)
+enc_to_mst(struct intel_encoder *encoder)
 {
-	return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+	return container_of(&encoder->base, struct intel_dp_mst_encoder,
+			    base.base);
 }
 
-static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
 {
 	return &enc_to_dig_port(encoder)->dp;
 }
@@ -1447,14 +1494,14 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
 		return true;
 	case INTEL_OUTPUT_DDI:
 		/* Skip pure HDMI/DVI DDI encoders */
-		return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+		return i915_mmio_reg_valid(enc_to_intel_dp(encoder)->output_reg);
 	default:
 		return false;
 	}
 }
 
 static inline struct intel_lspcon *
-enc_to_intel_lspcon(struct drm_encoder *encoder)
+enc_to_intel_lspcon(struct intel_encoder *encoder)
 {
 	return &enc_to_dig_port(encoder)->lspcon;
 }
@@ -1528,6 +1575,24 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
 								 &crtc->base));
 }
 
+static inline struct intel_digital_connector_state *
+intel_atomic_get_new_connector_state(struct intel_atomic_state *state,
+				     struct intel_connector *connector)
+{
+	return to_intel_digital_connector_state(
+			drm_atomic_get_new_connector_state(&state->base,
+			&connector->base));
+}
+
+static inline struct intel_digital_connector_state *
+intel_atomic_get_old_connector_state(struct intel_atomic_state *state,
+				     struct intel_connector *connector)
+{
+	return to_intel_digital_connector_state(
+			drm_atomic_get_old_connector_state(&state->base,
+			&connector->base));
+}
+
 /* intel_display.c */
 static inline bool
 intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index b05b219..c7424e2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -146,9 +146,9 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 }
 
-static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
 {
-	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+	return enc_to_intel_dp(intel_attached_encoder(connector));
 }
 
 static void intel_dp_link_down(struct intel_encoder *encoder,
@@ -614,7 +614,7 @@ static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 {
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
@@ -834,7 +834,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 	 * Pick one that's not used by other ports.
 	 */
 	for_each_intel_dp(&dev_priv->drm, encoder) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 		if (encoder->type == INTEL_OUTPUT_EDP) {
 			WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@ -1031,7 +1031,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 	 */
 
 	for_each_intel_dp(&dev_priv->drm, encoder) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 		WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
 
@@ -1814,7 +1814,7 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
 {
 	char str[128]; /* FIXME: too big for stack? */
 
-	if ((drm_debug & DRM_UT_KMS) == 0)
+	if (!drm_debug_enabled(DRM_UT_KMS))
 		return;
 
 	snprintf_int_array(str, sizeof(str),
@@ -1889,32 +1889,15 @@ static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
 		drm_dp_sink_supports_fec(intel_dp->fec_capable);
 }
 
-static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
-					 const struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-	if (!INTEL_INFO(dev_priv)->display.has_dsc)
-		return false;
-
-	/* On TGL, DSC is supported on all Pipes */
-	if (INTEL_GEN(dev_priv) >= 12)
-		return true;
-
-	if (INTEL_GEN(dev_priv) >= 10 &&
-	    pipe_config->cpu_transcoder != TRANSCODER_A)
-		return true;
-
-	return false;
-}
-
 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
-				  const struct intel_crtc_state *pipe_config)
+				  const struct intel_crtc_state *crtc_state)
 {
-	if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+	if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
 		return false;
 
-	return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+	return intel_dsc_source_support(encoder, crtc_state) &&
 		drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
 }
 
@@ -1999,7 +1982,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
 				  struct intel_crtc_state *pipe_config,
 				  const struct link_config_limits *limits)
 {
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	int bpp, clock, lane_count;
 	int mode_rate, link_clock, link_avail;
 
@@ -2046,6 +2029,63 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
 	return 0;
 }
 
+#define DSC_SUPPORTED_VERSION_MIN		1
+
+static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
+				       struct intel_crtc_state *crtc_state)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+	u8 line_buf_depth;
+	int ret;
+
+	ret = intel_dsc_compute_params(encoder, crtc_state);
+	if (ret)
+		return ret;
+
+	/*
+	 * Slice Height of 8 works for all currently available panels. So start
+	 * with that if pic_height is an integral multiple of 8. Eventually add
+	 * logic to try multiple slice heights.
+	 */
+	if (vdsc_cfg->pic_height % 8 == 0)
+		vdsc_cfg->slice_height = 8;
+	else if (vdsc_cfg->pic_height % 4 == 0)
+		vdsc_cfg->slice_height = 4;
+	else
+		vdsc_cfg->slice_height = 2;
+
+	vdsc_cfg->dsc_version_major =
+		(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+		 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+	vdsc_cfg->dsc_version_minor =
+		min(DSC_SUPPORTED_VERSION_MIN,
+		    (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+		     DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+	vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+		DP_DSC_RGB;
+
+	line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+	if (!line_buf_depth) {
+		DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+		return -EINVAL;
+	}
+
+	if (vdsc_cfg->dsc_version_minor == 2)
+		vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+			DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+	else
+		vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+			DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+	vdsc_cfg->block_pred_enable =
+		intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+		DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+	return drm_dsc_compute_rc_parameters(vdsc_cfg);
+}
+
 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
 				       struct intel_crtc_state *pipe_config,
 				       struct drm_connector_state *conn_state,
@@ -2053,7 +2093,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	u8 dsc_max_bpc;
 	int pipe_bpp;
 	int ret;
@@ -2132,7 +2172,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
 		}
 	}
 
-	ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
+	ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
 	if (ret < 0) {
 		DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
 			      "Compressed BPP = %d\n",
@@ -2164,8 +2204,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 			     struct intel_crtc_state *pipe_config,
 			     struct drm_connector_state *conn_state)
 {
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct link_config_limits limits;
 	int common_len;
 	int ret;
@@ -2252,8 +2292,8 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
 {
 	const struct drm_display_info *info = &connector->display_info;
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+		&crtc_state->hw.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	int ret;
 
 	if (!drm_mode_is_420_only(info, adjusted_mode) ||
@@ -2281,7 +2321,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
 	const struct intel_digital_connector_state *intel_conn_state =
 		to_intel_digital_connector_state(conn_state);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 
 	/*
 	 * Our YCbCr output is always limited range.
@@ -2308,17 +2348,28 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
 	}
 }
 
+static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
+				    enum port port)
+{
+	if (IS_G4X(dev_priv))
+		return false;
+	if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
+		return false;
+
+	return true;
+}
+
 int
 intel_dp_compute_config(struct intel_encoder *encoder,
 			struct intel_crtc_state *pipe_config,
 			struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
 	enum port port = encoder->port;
-	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	struct intel_digital_connector_state *intel_conn_state =
 		to_intel_digital_connector_state(conn_state);
@@ -2341,7 +2392,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 		return ret;
 
 	pipe_config->has_drrs = false;
-	if (IS_G4X(dev_priv) || port == PORT_A)
+	if (!intel_dp_port_has_audio(dev_priv, port))
 		pipe_config->has_audio = false;
 	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
 		pipe_config->has_audio = intel_dp->has_audio;
@@ -2431,10 +2482,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 			     const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	enum port port = encoder->port;
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 
 	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
 				 pipe_config->lane_count,
@@ -2458,7 +2509,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 	 *
 	 * CPT PCH is quite different, having many bits moved
 	 * to the TRANS_DP_CTL register instead. That
-	 * configuration happens (oddly) in ironlake_pch_enable
+	 * configuration happens (oddly) in ilk_pch_enable
 	 */
 
 	/* Preserve the BIOS-computed detected bit. This is
@@ -2602,7 +2653,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
  * is locked
  */
 
-static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
+static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	u32 control;
@@ -2652,7 +2703,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
 	if (!edp_have_panel_power(intel_dp))
 		wait_panel_power_cycle(intel_dp);
 
-	pp = ironlake_get_pp_control(intel_dp);
+	pp = ilk_get_pp_control(intel_dp);
 	pp |= EDP_FORCE_VDD;
 
 	pp_stat_reg = _pp_stat_reg(intel_dp);
@@ -2717,7 +2768,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 		      intel_dig_port->base.base.base.id,
 		      intel_dig_port->base.base.name);
 
-	pp = ironlake_get_pp_control(intel_dp);
+	pp = ilk_get_pp_control(intel_dp);
 	pp &= ~EDP_FORCE_VDD;
 
 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2813,7 +2864,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
 	wait_panel_power_cycle(intel_dp);
 
 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-	pp = ironlake_get_pp_control(intel_dp);
+	pp = ilk_get_pp_control(intel_dp);
 	if (IS_GEN(dev_priv, 5)) {
 		/* ILK workaround: disable reset around power sequence */
 		pp &= ~PANEL_POWER_RESET;
@@ -2868,7 +2919,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
 	WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
 	     dig_port->base.base.base.id, dig_port->base.base.name);
 
-	pp = ironlake_get_pp_control(intel_dp);
+	pp = ilk_get_pp_control(intel_dp);
 	/* We need to switch off panel power _and_ force vdd, for otherwise some
 	 * panels get very unhappy and cease to work. */
 	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
@@ -2917,7 +2968,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
 		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 		u32 pp;
 
-		pp = ironlake_get_pp_control(intel_dp);
+		pp = ilk_get_pp_control(intel_dp);
 		pp |= EDP_BLC_ENABLE;
 
 		I915_WRITE(pp_ctrl_reg, pp);
@@ -2929,7 +2980,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
 			    const struct drm_connector_state *conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
+	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
 
 	if (!intel_dp_is_edp(intel_dp))
 		return;
@@ -2953,7 +3004,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 		u32 pp;
 
-		pp = ironlake_get_pp_control(intel_dp);
+		pp = ilk_get_pp_control(intel_dp);
 		pp &= ~EDP_BLC_ENABLE;
 
 		I915_WRITE(pp_ctrl_reg, pp);
@@ -2967,7 +3018,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 /* Disable backlight PP control and backlight PWM. */
 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
+	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
 
 	if (!intel_dp_is_edp(intel_dp))
 		return;
@@ -2985,13 +3036,13 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
 static void intel_edp_backlight_power(struct intel_connector *connector,
 				      bool enable)
 {
-	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	intel_wakeref_t wakeref;
 	bool is_enabled;
 
 	is_enabled = false;
 	with_pps_lock(intel_dp, wakeref)
-		is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
 	if (is_enabled == enable)
 		return;
 
@@ -3028,13 +3079,13 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
 
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
-				const struct intel_crtc_state *pipe_config)
+static void ilk_edp_pll_on(struct intel_dp *intel_dp,
+			   const struct intel_crtc_state *pipe_config)
 {
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
 	assert_dp_port_disabled(intel_dp);
 	assert_edp_pll_disabled(dev_priv);
 
@@ -3068,13 +3119,13 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
 	udelay(200);
 }
 
-static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
-				 const struct intel_crtc_state *old_crtc_state)
+static void ilk_edp_pll_off(struct intel_dp *intel_dp,
+			    const struct intel_crtc_state *old_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
 	assert_dp_port_disabled(intel_dp);
 	assert_edp_pll_enabled(dev_priv);
 
@@ -3207,7 +3258,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
 				  enum pipe *pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	intel_wakeref_t wakeref;
 	bool ret;
 
@@ -3228,10 +3279,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 				struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	u32 tmp, flags = 0;
 	enum port port = encoder->port;
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 
 	if (encoder->type == INTEL_OUTPUT_EDP)
 		pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
@@ -3266,7 +3317,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 			flags |= DRM_MODE_FLAG_NVSYNC;
 	}
 
-	pipe_config->base.adjusted_mode.flags |= flags;
+	pipe_config->hw.adjusted_mode.flags |= flags;
 
 	if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
 		pipe_config->limited_color_range = true;
@@ -3283,7 +3334,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 			pipe_config->port_clock = 270000;
 	}
 
-	pipe_config->base.adjusted_mode.crtc_clock =
+	pipe_config->hw.adjusted_mode.crtc_clock =
 		intel_dotclock_calculate(pipe_config->port_clock,
 					 &pipe_config->dp_m_n);
 
@@ -3312,7 +3363,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
 			     const struct intel_crtc_state *old_crtc_state,
 			     const struct drm_connector_state *old_conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 	intel_dp->link_trained = false;
 
@@ -3346,7 +3397,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
 				const struct intel_crtc_state *old_crtc_state,
 				const struct drm_connector_state *old_conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	enum port port = encoder->port;
 
 	/*
@@ -3359,7 +3410,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
 
 	/* Only ilk+ has port A */
 	if (port == PORT_A)
-		ironlake_edp_pll_off(intel_dp, old_crtc_state);
+		ilk_edp_pll_off(intel_dp, old_crtc_state);
 }
 
 static void vlv_post_disable_dp(struct intel_encoder *encoder,
@@ -3497,8 +3548,8 @@ static void intel_enable_dp(struct intel_encoder *encoder,
 			    const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	u32 dp_reg = I915_READ(intel_dp->output_reg);
 	enum pipe pipe = crtc->pipe;
 	intel_wakeref_t wakeref;
@@ -3557,14 +3608,14 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *pipe_config,
 			      const struct drm_connector_state *conn_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	enum port port = encoder->port;
 
 	intel_dp_prepare(encoder, pipe_config);
 
 	/* Only ilk+ has port A */
 	if (port == PORT_A)
-		ironlake_edp_pll_on(intel_dp, pipe_config);
+		ilk_edp_pll_on(intel_dp, pipe_config);
 }
 
 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -3607,7 +3658,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
 	for_each_intel_dp(&dev_priv->drm, encoder) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 		WARN(intel_dp->active_pipe == pipe,
 		     "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
@@ -3630,8 +3681,8 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
 					   const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -4152,8 +4203,8 @@ intel_dp_link_down(struct intel_encoder *encoder,
 		   const struct intel_crtc_state *old_crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	enum port port = encoder->port;
 	u32 DP = intel_dp->DP;
 
@@ -4852,7 +4903,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
 	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
 	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
 	/* Set test active flag here so userspace doesn't interrupt things */
-	intel_dp->compliance.test_active = 1;
+	intel_dp->compliance.test_active = true;
 
 	return DP_TEST_ACK;
 }
@@ -4896,7 +4947,7 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
 	}
 
 	/* Set test active flag here so userspace doesn't interrupt things */
-	intel_dp->compliance.test_active = 1;
+	intel_dp->compliance.test_active = true;
 
 	return test_result;
 }
@@ -5045,7 +5096,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
 			  struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct intel_connector *connector = intel_dp->attached_connector;
 	struct drm_connector_state *conn_state;
 	struct intel_crtc_state *crtc_state;
@@ -5076,7 +5127,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
 
 	WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
 
-	if (!crtc_state->base.active)
+	if (!crtc_state->hw.active)
 		return 0;
 
 	if (conn_state->commit &&
@@ -5482,10 +5533,10 @@ static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
 	return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
 }
 
-static bool icl_digital_port_connected(struct intel_encoder *encoder)
+static bool icp_digital_port_connected(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 
 	if (intel_phy_is_combo(dev_priv, phy))
@@ -5520,9 +5571,9 @@ static bool __intel_digital_port_connected(struct intel_encoder *encoder)
 			return g4x_digital_port_connected(encoder);
 	}
 
-	if (INTEL_GEN(dev_priv) >= 11)
-		return icl_digital_port_connected(encoder);
-	else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
+	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+		return icp_digital_port_connected(encoder);
+	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
 		return spt_digital_port_connected(encoder);
 	else if (IS_GEN9_LP(dev_priv))
 		return bxt_digital_port_connected(encoder);
@@ -5600,7 +5651,7 @@ intel_dp_detect(struct drm_connector *connector,
 		bool force)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct intel_encoder *encoder = &dig_port->base;
 	enum drm_connector_status status;
@@ -5704,7 +5755,7 @@ intel_dp_detect(struct drm_connector *connector,
 static void
 intel_dp_force(struct drm_connector *connector)
 {
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct intel_encoder *intel_encoder = &dig_port->base;
 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
@@ -5739,7 +5790,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
 	}
 
 	/* if eDP has no EDID, fall back to fixed mode */
-	if (intel_dp_is_edp(intel_attached_dp(connector)) &&
+	if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
 	    intel_connector->panel.fixed_mode) {
 		struct drm_display_mode *mode;
 
@@ -5757,7 +5808,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
 static int
 intel_dp_connector_register(struct drm_connector *connector)
 {
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
 	int ret;
 
 	ret = intel_connector_register(connector);
@@ -5779,7 +5830,7 @@ intel_dp_connector_register(struct drm_connector *connector)
 static void
 intel_dp_connector_unregister(struct drm_connector *connector)
 {
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
 
 	drm_dp_cec_unregister_connector(&intel_dp->aux);
 	drm_dp_aux_unregister(&intel_dp->aux);
@@ -5788,7 +5839,7 @@ intel_dp_connector_unregister(struct drm_connector *connector)
 
 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
 {
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 
 	intel_dp_mst_encoder_cleanup(intel_dig_port);
@@ -5817,12 +5868,12 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 	intel_dp_encoder_flush_work(encoder);
 
 	drm_encoder_cleanup(encoder);
-	kfree(enc_to_dig_port(encoder));
+	kfree(enc_to_dig_port(to_intel_encoder(encoder)));
 }
 
 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
 	intel_wakeref_t wakeref;
 
 	if (!intel_dp_is_edp(intel_dp))
@@ -5853,7 +5904,7 @@ static
 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
 				u8 *an)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
 	static const struct drm_dp_aux_msg msg = {
 		.request = DP_AUX_NATIVE_WRITE,
 		.address = DP_AUX_HDCP_AKSV,
@@ -6463,7 +6514,7 @@ static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
 void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
 	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
 	intel_wakeref_t wakeref;
 
@@ -6642,7 +6693,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
 
 	intel_pps_get_registers(intel_dp, &regs);
 
-	pp_ctl = ironlake_get_pp_control(intel_dp);
+	pp_ctl = ilk_get_pp_control(intel_dp);
 
 	/* Ensure PPS is unlocked */
 	if (!HAS_DDI(dev_priv))
@@ -6812,7 +6863,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 	 * soon as the new power sequencer gets initialized.
 	 */
 	if (force_disable_vdd) {
-		u32 pp = ironlake_get_pp_control(intel_dp);
+		u32 pp = ilk_get_pp_control(intel_dp);
 
 		WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
 
@@ -6909,7 +6960,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 				    int refresh_rate)
 {
 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
 	if (refresh_rate <= 0) {
@@ -6942,7 +6993,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	if (!crtc_state->base.active) {
+	if (!crtc_state->hw.active) {
 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
 		return;
 	}
@@ -7609,7 +7660,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
 		if (encoder->type != INTEL_OUTPUT_DDI)
 			continue;
 
-		intel_dp = enc_to_intel_dp(&encoder->base);
+		intel_dp = enc_to_intel_dp(encoder);
 
 		if (!intel_dp->can_mst)
 			continue;
@@ -7630,7 +7681,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
 		if (encoder->type != INTEL_OUTPUT_DDI)
 			continue;
 
-		intel_dp = enc_to_intel_dp(&encoder->base);
+		intel_dp = enc_to_intel_dp(encoder);
 
 		if (!intel_dp->can_mst)
 			continue;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 020422d..7c653f8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -57,7 +57,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
  */
 static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 	u8 read_val[2] = { 0x0 };
 	u16 level = 0;
 
@@ -82,7 +82,7 @@ static void
 intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 	u8 vals[2] = { 0x0 };
 
 	vals[0] = level;
@@ -110,7 +110,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
 static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 	int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
 	u8 pn, pn_min, pn_max;
 
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 					  const struct drm_connector_state *conn_state)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 	u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
 
 	if (drm_dp_dpcd_readb(&intel_dp->aux,
@@ -222,13 +222,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 
 static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
 {
-	set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false);
+	set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+				 false);
 }
 
 static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
 					enum pipe pipe)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 	struct intel_panel *panel = &connector->panel;
 
 	if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
@@ -247,7 +248,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
 static bool
 intel_dp_aux_display_control_capable(struct intel_connector *connector)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 
 	/* Check the eDP Display control capabilities registers to determine if
 	 * the panel can support backlight control over the aux channel
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 03d1cba..cba68c5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -42,13 +42,13 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
 					    struct drm_connector_state *conn_state,
 					    struct link_config_limits *limits)
 {
-	struct drm_atomic_state *state = crtc_state->base.state;
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct drm_atomic_state *state = crtc_state->uapi.state;
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	struct intel_dp *intel_dp = &intel_mst->primary->dp;
 	struct intel_connector *connector =
 		to_intel_connector(conn_state->connector);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	void *port = connector->port;
 	bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
 					   DP_DPCD_QUIRK_CONSTANT_N);
@@ -61,10 +61,11 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
 		crtc_state->pipe_bpp = bpp;
 
 		crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
-						       crtc_state->pipe_bpp);
+						       crtc_state->pipe_bpp,
+						       false);
 
 		slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
-						      port, crtc_state->pbn);
+						      port, crtc_state->pbn, 0);
 		if (slots == -EDEADLK)
 			return slots;
 		if (slots >= 0)
@@ -87,19 +88,65 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
 	return 0;
 }
 
+/*
+ * Iterate over all connectors and return the smallest transcoder in the MST
+ * stream
+ */
+static enum transcoder
+intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
+				  struct intel_dp *mst_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_digital_connector_state *conn_state;
+	struct intel_connector *connector;
+	enum pipe ret = I915_MAX_PIPES;
+	int i;
+
+	if (INTEL_GEN(dev_priv) < 12)
+		return INVALID_TRANSCODER;
+
+	for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+		struct intel_crtc_state *crtc_state;
+		struct intel_crtc *crtc;
+
+		if (connector->mst_port != mst_port || !conn_state->base.crtc)
+			continue;
+
+		crtc = to_intel_crtc(conn_state->base.crtc);
+		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+		if (!crtc_state->uapi.active)
+			continue;
+
+		/*
+		 * Using crtc->pipe because crtc_state->cpu_transcoder is
+		 * computed, so others CRTCs could have non-computed
+		 * cpu_transcoder
+		 */
+		if (crtc->pipe < ret)
+			ret = crtc->pipe;
+	}
+
+	if (ret == I915_MAX_PIPES)
+		return INVALID_TRANSCODER;
+
+	/* Simple cast works because TGL don't have a eDP transcoder */
+	return (enum transcoder)ret;
+}
+
 static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 				       struct intel_crtc_state *pipe_config,
 				       struct drm_connector_state *conn_state)
 {
+	struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	struct intel_dp *intel_dp = &intel_mst->primary->dp;
 	struct intel_connector *connector =
 		to_intel_connector(conn_state->connector);
 	struct intel_digital_connector_state *intel_conn_state =
 		to_intel_digital_connector_state(conn_state);
 	const struct drm_display_mode *adjusted_mode =
-		&pipe_config->base.adjusted_mode;
+		&pipe_config->hw.adjusted_mode;
 	void *port = connector->port;
 	struct link_config_limits limits;
 	int ret;
@@ -154,25 +201,91 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
 	intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
 
+	pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
+
+	return 0;
+}
+
+/*
+ * If one of the connectors in a MST stream needs a modeset, mark all CRTCs
+ * that shares the same MST stream as mode changed,
+ * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
+ * a fastset when possible.
+ */
+static int
+intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
+				       struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_connector_list_iter connector_list_iter;
+	struct intel_connector *connector_iter;
+
+	if (INTEL_GEN(dev_priv) < 12)
+		return  0;
+
+	if (!intel_connector_needs_modeset(state, &connector->base))
+		return 0;
+
+	drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
+	for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
+		struct intel_digital_connector_state *conn_iter_state;
+		struct intel_crtc_state *crtc_state;
+		struct intel_crtc *crtc;
+		int ret;
+
+		if (connector_iter->mst_port != connector->mst_port ||
+		    connector_iter == connector)
+			continue;
+
+		conn_iter_state = intel_atomic_get_digital_connector_state(state,
+									   connector_iter);
+		if (IS_ERR(conn_iter_state)) {
+			drm_connector_list_iter_end(&connector_list_iter);
+			return PTR_ERR(conn_iter_state);
+		}
+
+		if (!conn_iter_state->base.crtc)
+			continue;
+
+		crtc = to_intel_crtc(conn_iter_state->base.crtc);
+		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+		if (IS_ERR(crtc_state)) {
+			drm_connector_list_iter_end(&connector_list_iter);
+			return PTR_ERR(crtc_state);
+		}
+
+		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+		if (ret) {
+			drm_connector_list_iter_end(&connector_list_iter);
+			return ret;
+		}
+		crtc_state->uapi.mode_changed = true;
+	}
+	drm_connector_list_iter_end(&connector_list_iter);
+
 	return 0;
 }
 
 static int
 intel_dp_mst_atomic_check(struct drm_connector *connector,
-			  struct drm_atomic_state *state)
+			  struct drm_atomic_state *_state)
 {
+	struct intel_atomic_state *state = to_intel_atomic_state(_state);
 	struct drm_connector_state *new_conn_state =
-		drm_atomic_get_new_connector_state(state, connector);
+		drm_atomic_get_new_connector_state(&state->base, connector);
 	struct drm_connector_state *old_conn_state =
-		drm_atomic_get_old_connector_state(state, connector);
+		drm_atomic_get_old_connector_state(&state->base, connector);
 	struct intel_connector *intel_connector =
 		to_intel_connector(connector);
 	struct drm_crtc *new_crtc = new_conn_state->crtc;
-	struct drm_crtc_state *crtc_state;
 	struct drm_dp_mst_topology_mgr *mgr;
 	int ret;
 
-	ret = intel_digital_connector_atomic_check(connector, state);
+	ret = intel_digital_connector_atomic_check(connector, &state->base);
+	if (ret)
+		return ret;
+
+	ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
 	if (ret)
 		return ret;
 
@@ -183,16 +296,18 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
 	 * connector
 	 */
 	if (new_crtc) {
-		crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+		struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc);
+		struct intel_crtc_state *crtc_state =
+			intel_atomic_get_new_crtc_state(state, intel_crtc);
 
 		if (!crtc_state ||
-		    !drm_atomic_crtc_needs_modeset(crtc_state) ||
-		    crtc_state->enable)
+		    !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
+		    crtc_state->uapi.enable)
 			return 0;
 	}
 
-	mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
-	ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
+	mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr;
+	ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr,
 					       intel_connector->port);
 
 	return ret;
@@ -202,7 +317,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
 				 const struct intel_crtc_state *old_crtc_state,
 				 const struct drm_connector_state *old_conn_state)
 {
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct intel_connector *connector =
@@ -226,36 +341,65 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
 				      const struct intel_crtc_state *old_crtc_state,
 				      const struct drm_connector_state *old_conn_state)
 {
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct intel_connector *connector =
 		to_intel_connector(old_conn_state->connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	bool last_mst_stream;
+	u32 val;
 
-	intel_ddi_disable_pipe_clock(old_crtc_state);
+	intel_dp->active_mst_links--;
+	last_mst_stream = intel_dp->active_mst_links == 0;
+	WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+		!intel_dp_mst_is_master_trans(old_crtc_state));
 
-	/* this can fail */
-	drm_dp_check_act_status(&intel_dp->mst_mgr);
-	/* and this can also fail */
+	intel_crtc_vblank_off(old_crtc_state);
+
+	intel_disable_pipe(old_crtc_state);
+
 	drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
+	val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
+	val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+	I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
+
+	if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+				  DP_TP_STATUS_ACT_SENT, 1))
+		DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
+	drm_dp_check_act_status(&intel_dp->mst_mgr);
+
 	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
 
+	intel_ddi_disable_transcoder_func(old_crtc_state);
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		skl_scaler_disable(old_crtc_state);
+	else
+		ilk_pfit_disable(old_crtc_state);
+
 	/*
 	 * Power down mst path before disabling the port, otherwise we end
 	 * up getting interrupts from the sink upon detecting link loss.
 	 */
 	drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
 				     false);
+	/*
+	 * From TGL spec: "If multi-stream slave transcoder: Configure
+	 * Transcoder Clock Select to direct no clock to the transcoder"
+	 *
+	 * From older GENs spec: "Configure Transcoder Clock Select to direct
+	 * no clock to the transcoder"
+	 */
+	if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream)
+		intel_ddi_disable_pipe_clock(old_crtc_state);
 
-	intel_dp->active_mst_links--;
 
 	intel_mst->connector = NULL;
-	if (intel_dp->active_mst_links == 0) {
-		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+	if (last_mst_stream)
 		intel_dig_port->base.post_disable(&intel_dig_port->base,
 						  old_crtc_state, NULL);
-	}
 
 	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 }
@@ -264,7 +408,7 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
 					const struct intel_crtc_state *pipe_config,
 					const struct drm_connector_state *conn_state)
 {
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 
@@ -273,25 +417,11 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
 						    pipe_config, NULL);
 }
 
-static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
-					  const struct intel_crtc_state *old_crtc_state,
-					  const struct drm_connector_state *old_conn_state)
-{
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
-	struct intel_digital_port *intel_dig_port = intel_mst->primary;
-	struct intel_dp *intel_dp = &intel_dig_port->dp;
-
-	if (intel_dp->active_mst_links == 0)
-		intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
-						      old_crtc_state,
-						      old_conn_state);
-}
-
 static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 				    const struct intel_crtc_state *pipe_config,
 				    const struct drm_connector_state *conn_state)
 {
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -299,21 +429,25 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 		to_intel_connector(conn_state->connector);
 	int ret;
 	u32 temp;
+	bool first_mst_stream;
 
 	/* MST encoders are bound to a crtc, not to a connector,
 	 * force the mapping here for get_hw_state.
 	 */
 	connector->encoder = encoder;
 	intel_mst->connector = connector;
+	first_mst_stream = intel_dp->active_mst_links == 0;
+	WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+		!intel_dp_mst_is_master_trans(pipe_config));
 
 	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 
-	if (intel_dp->active_mst_links == 0)
+	if (first_mst_stream)
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 
 	drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
 
-	if (intel_dp->active_mst_links == 0)
+	if (first_mst_stream)
 		intel_dig_port->base.pre_enable(&intel_dig_port->base,
 						pipe_config, NULL);
 
@@ -330,7 +464,15 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 
 	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 
-	intel_ddi_enable_pipe_clock(pipe_config);
+	/*
+	 * Before Gen 12 this is not done as part of
+	 * intel_dig_port->base.pre_enable() and should be done here. For
+	 * Gen 12+ the step in which this should be done is different for the
+	 * first MST stream, so it's done on the DDI for the first stream and
+	 * here for the following ones.
+	 */
+	if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
+		intel_ddi_enable_pipe_clock(pipe_config);
 
 	intel_ddi_set_dp_msa(pipe_config, conn_state);
 }
@@ -339,7 +481,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
 				const struct intel_crtc_state *pipe_config,
 				const struct drm_connector_state *conn_state)
 {
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -360,7 +502,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
 static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
 				      enum pipe *pipe)
 {
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	*pipe = intel_mst->pipe;
 	if (intel_mst->connector)
 		return true;
@@ -370,7 +512,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
 static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
 					struct intel_crtc_state *pipe_config)
 {
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 
 	intel_ddi_get_config(&intel_dig_port->base, pipe_config);
@@ -478,7 +620,7 @@ static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_fun
 
 static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
 {
-	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
 
 	drm_encoder_cleanup(encoder);
 	kfree(intel_mst);
@@ -633,7 +775,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
 	intel_encoder->disable = intel_mst_disable_dp;
 	intel_encoder->post_disable = intel_mst_post_disable_dp;
 	intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
-	intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
 	intel_encoder->pre_enable = intel_mst_pre_enable_dp;
 	intel_encoder->enable = intel_mst_enable_dp;
 	intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
@@ -703,3 +844,14 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
 	drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
 	/* encoders will get killed by normal cleanup */
 }
+
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
+{
+	return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
+}
+
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
+{
+	return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
+	       crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f660ad8..854724f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -6,10 +6,15 @@
 #ifndef __INTEL_DP_MST_H__
 #define __INTEL_DP_MST_H__
 
+#include <linux/types.h>
+
 struct intel_digital_port;
+struct intel_crtc_state;
 
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
 void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
 int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
 
 #endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 556d1b3..6fb1f7a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -642,7 +642,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
 			      bool uniq_trans_scale)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dport = enc_to_dig_port(encoder);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	enum dpio_channel ch = vlv_dport_to_channel(dport);
 	enum pipe pipe = intel_crtc->pipe;
@@ -738,8 +738,8 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
 			      bool reset)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum pipe pipe = crtc->pipe;
 	u32 val;
 
@@ -781,9 +781,9 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
 void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
 			    const struct intel_crtc_state *crtc_state)
 {
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dport = enc_to_dig_port(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum dpio_channel ch = vlv_dport_to_channel(dport);
 	enum pipe pipe = crtc->pipe;
 	unsigned int lane_mask =
@@ -861,10 +861,10 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum dpio_channel ch = vlv_dport_to_channel(dport);
 	enum pipe pipe = crtc->pipe;
 	int data, i, stagger;
@@ -940,7 +940,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 
 void chv_phy_release_cl2_override(struct intel_encoder *encoder)
 {
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dport = enc_to_dig_port(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	if (dport->release_cl2_override) {
@@ -953,7 +953,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *old_crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
+	enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
 	u32 val;
 
 	vlv_dpio_get(dev_priv);
@@ -989,7 +989,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dport = enc_to_dig_port(encoder);
 	enum dpio_channel port = vlv_dport_to_channel(dport);
 	enum pipe pipe = intel_crtc->pipe;
 
@@ -1014,9 +1014,9 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
 			    const struct intel_crtc_state *crtc_state)
 {
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dport = enc_to_dig_port(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum dpio_channel port = vlv_dport_to_channel(dport);
 	enum pipe pipe = crtc->pipe;
 
@@ -1043,10 +1043,10 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum dpio_channel port = vlv_dport_to_channel(dport);
 	enum pipe pipe = crtc->pipe;
 	u32 val;
@@ -1073,9 +1073,9 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 void vlv_phy_reset_lanes(struct intel_encoder *encoder,
 			 const struct intel_crtc_state *old_crtc_state)
 {
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dport = enc_to_dig_port(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	enum dpio_channel port = vlv_dport_to_channel(dport);
 	enum pipe pipe = crtc->pipe;
 
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 3ce0a02..c75e34d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -136,7 +136,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
  */
 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 
@@ -163,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
  */
 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
@@ -208,7 +208,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
  */
 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
@@ -842,7 +842,7 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
 static struct intel_shared_dpll *
 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	struct intel_shared_dpll *pll;
 	enum intel_dpll_id pll_id;
 	int clock = crtc_state->port_clock;
@@ -1751,7 +1751,7 @@ static bool
 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
 			  struct bxt_clk_div *clk_div)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct dpll best_clock;
 
 	/* Calculate HDMI div */
@@ -2274,7 +2274,7 @@ static bool
 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
 			struct skl_wrpll_params *wrpll_params)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	u32 afe_clock = crtc_state->port_clock * 5;
 	u32 ref_clock;
 	u32 dco_min = 7998000;
@@ -2553,7 +2553,7 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
 				  struct skl_wrpll_params *pll_params)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	const struct icl_combo_pll_params *params =
 		dev_priv->cdclk.hw.ref == 24000 ?
 		icl_dp_combo_pll_24MHz_values :
@@ -2575,7 +2575,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
 			     struct skl_wrpll_params *pll_params)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	if (INTEL_GEN(dev_priv) >= 12) {
 		switch (dev_priv->cdclk.hw.ref) {
@@ -2612,7 +2612,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 				struct intel_encoder *encoder,
 				struct intel_dpll_hw_state *pll_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	u32 cfgcr0, cfgcr1;
 	struct skl_wrpll_params pll_params = { 0 };
 	bool ret;
@@ -2744,7 +2744,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 				  struct intel_dpll_hw_state *pll_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	int refclk_khz = dev_priv->cdclk.hw.ref;
 	int clock = crtc_state->port_clock;
 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2972,8 +2972,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
 
 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
-		enc_to_mst(&encoder->base)->primary :
-		enc_to_dig_port(&encoder->base);
+		enc_to_mst(encoder)->primary :
+		enc_to_dig_port(encoder);
 
 	if (primary_port &&
 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index bb5a0e9..ada006a 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -102,43 +102,50 @@ intel_dsb_get(struct intel_crtc *crtc)
 	struct intel_dsb *dsb = &crtc->dsb;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
+	u32 *buf;
 	intel_wakeref_t wakeref;
 
 	if (!HAS_DSB(i915))
 		return dsb;
 
-	if (atomic_add_return(1, &dsb->refcount) != 1)
+	if (dsb->refcount++ != 0)
 		return dsb;
 
-	dsb->id = DSB1;
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
 	obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
 	if (IS_ERR(obj)) {
 		DRM_ERROR("Gem object creation failed\n");
-		goto err;
+		goto out;
 	}
 
-	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 	if (IS_ERR(vma)) {
 		DRM_ERROR("Vma creation failed\n");
 		i915_gem_object_put(obj);
-		atomic_dec(&dsb->refcount);
-		goto err;
+		goto out;
 	}
 
-	dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
-	if (IS_ERR(dsb->cmd_buf)) {
+	buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+	if (IS_ERR(buf)) {
 		DRM_ERROR("Command buffer creation failed\n");
-		i915_vma_unpin_and_release(&vma, 0);
-		dsb->cmd_buf = NULL;
-		atomic_dec(&dsb->refcount);
-		goto err;
+		goto out;
 	}
-	dsb->vma = vma;
 
-err:
+	dsb->id = DSB1;
+	dsb->vma = vma;
+	dsb->cmd_buf = buf;
+
+out:
+	/*
+	 * On error dsb->cmd_buf will continue to be NULL, making the writes
+	 * pass-through. Leave the dangling ref to be removed later by the
+	 * corresponding intel_dsb_put(): the important error message will
+	 * already be logged above.
+	 */
+
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
 	return dsb;
 }
 
@@ -158,10 +165,10 @@ void intel_dsb_put(struct intel_dsb *dsb)
 	if (!HAS_DSB(i915))
 		return;
 
-	if (WARN_ON(atomic_read(&dsb->refcount) == 0))
+	if (WARN_ON(dsb->refcount == 0))
 		return;
 
-	if (atomic_dec_and_test(&dsb->refcount)) {
+	if (--dsb->refcount == 0) {
 		i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
 		dsb->cmd_buf = NULL;
 		dsb->free_pos = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 6f95c8e..395ef9c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -22,7 +22,7 @@ enum dsb_id {
 };
 
 struct intel_dsb {
-	atomic_t refcount;
+	long refcount;
 	enum dsb_id id;
 	u32 *cmd_buf;
 	struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index b15be58..19f78a4 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -45,8 +45,9 @@ struct intel_dsi {
 	struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
 	intel_wakeref_t io_wakeref[I915_MAX_PORTS];
 
-	/* GPIO Desc for CRC based Panel control */
+	/* GPIO Desc for panel and backlight control */
 	struct gpio_desc *gpio_panel;
+	struct gpio_desc *gpio_backlight;
 
 	struct intel_connector *attached_connector;
 
@@ -68,6 +69,9 @@ struct intel_dsi {
 	/* number of DSI lanes */
 	unsigned int lane_count;
 
+	/* i2c bus associated with the slave device */
+	int i2c_bus_num;
+
 	/*
 	 * video mode pixel format
 	 *
@@ -141,9 +145,9 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
 #define for_each_dsi_phy(__phy, __phys_mask) \
 	for_each_phy_masked(__phy, __phys_mask)
 
-static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+static inline struct intel_dsi *enc_to_intel_dsi(struct intel_encoder *encoder)
 {
-	return container_of(encoder, struct intel_dsi, base.base);
+	return container_of(&encoder->base, struct intel_dsi, base.base);
 }
 
 static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
@@ -158,7 +162,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
 
 static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
 {
-	return enc_to_intel_dsi(&encoder->base)->ports;
+	return enc_to_intel_dsi(encoder)->ports;
 }
 
 /* icl_dsi.c */
@@ -203,6 +207,8 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
 
 /* intel_dsi_vbt.c */
 bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
 void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
 				 enum mipi_seq seq_id);
 void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index bb3fd8b..c878388 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -46,7 +46,7 @@
 static u32 dcs_get_backlight(struct intel_connector *connector)
 {
 	struct intel_encoder *encoder = connector->encoder;
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	struct mipi_dsi_device *dsi_device;
 	u8 data = 0;
 	enum port port;
@@ -64,7 +64,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
 
 static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
 	struct mipi_dsi_device *dsi_device;
 	u8 data = level;
 	enum port port;
@@ -79,7 +79,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
 
 static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
 	struct mipi_dsi_device *dsi_device;
 	enum port port;
 
@@ -113,7 +113,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
 static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
 				 const struct drm_connector_state *conn_state)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
 	struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
 	struct mipi_dsi_device *dsi_device;
 	enum port port;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index f90946c..89fb0d9 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -25,7 +25,10 @@
  */
 
 #include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
 #include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
 #include <linux/slab.h>
 
 #include <asm/intel-mid.h>
@@ -83,6 +86,12 @@ static struct gpio_map vlv_gpio_table[] = {
 	{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
 };
 
+struct i2c_adapter_lookup {
+	u16 slave_addr;
+	struct intel_dsi *intel_dsi;
+	acpi_handle dev_handle;
+};
+
 #define CHV_GPIO_IDX_START_N		0
 #define CHV_GPIO_IDX_START_E		73
 #define CHV_GPIO_IDX_START_SW		100
@@ -375,11 +384,98 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 	return data;
 }
 
+static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
+{
+	struct i2c_adapter_lookup *lookup = data;
+	struct intel_dsi *intel_dsi = lookup->intel_dsi;
+	struct acpi_resource_i2c_serialbus *sb;
+	struct i2c_adapter *adapter;
+	acpi_handle adapter_handle;
+	acpi_status status;
+
+	if (intel_dsi->i2c_bus_num >= 0 ||
+	    !i2c_acpi_get_i2c_resource(ares, &sb))
+		return 1;
+
+	if (lookup->slave_addr != sb->slave_address)
+		return 1;
+
+	status = acpi_get_handle(lookup->dev_handle,
+				 sb->resource_source.string_ptr,
+				 &adapter_handle);
+	if (ACPI_FAILURE(status))
+		return 1;
+
+	adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
+	if (adapter)
+		intel_dsi->i2c_bus_num = adapter->nr;
+
+	return 1;
+}
+
 static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
 {
-	DRM_DEBUG_KMS("Skipping I2C element execution\n");
+	struct drm_device *drm_dev = intel_dsi->base.base.dev;
+	struct device *dev = &drm_dev->pdev->dev;
+	struct i2c_adapter *adapter;
+	struct acpi_device *acpi_dev;
+	struct list_head resource_list;
+	struct i2c_adapter_lookup lookup;
+	struct i2c_msg msg;
+	int ret;
+	u8 vbt_i2c_bus_num = *(data + 2);
+	u16 slave_addr = *(u16 *)(data + 3);
+	u8 reg_offset = *(data + 5);
+	u8 payload_size = *(data + 6);
+	u8 *payload_data;
 
-	return data + *(data + 6) + 7;
+	if (intel_dsi->i2c_bus_num < 0) {
+		intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
+
+		acpi_dev = ACPI_COMPANION(dev);
+		if (acpi_dev) {
+			memset(&lookup, 0, sizeof(lookup));
+			lookup.slave_addr = slave_addr;
+			lookup.intel_dsi = intel_dsi;
+			lookup.dev_handle = acpi_device_handle(acpi_dev);
+
+			INIT_LIST_HEAD(&resource_list);
+			acpi_dev_get_resources(acpi_dev, &resource_list,
+					       i2c_adapter_lookup,
+					       &lookup);
+			acpi_dev_free_resource_list(&resource_list);
+		}
+	}
+
+	adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
+	if (!adapter) {
+		DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
+		goto err_bus;
+	}
+
+	payload_data = kzalloc(payload_size + 1, GFP_KERNEL);
+	if (!payload_data)
+		goto err_alloc;
+
+	payload_data[0] = reg_offset;
+	memcpy(&payload_data[1], (data + 7), payload_size);
+
+	msg.addr = slave_addr;
+	msg.flags = 0;
+	msg.len = payload_size + 1;
+	msg.buf = payload_data;
+
+	ret = i2c_transfer(adapter, &msg, 1);
+	if (ret < 0)
+		DRM_DEV_ERROR(dev,
+			      "Failed to xfer payload of size (%u) to reg (%u)\n",
+			      payload_size, reg_offset);
+
+	kfree(payload_data);
+err_alloc:
+	i2c_put_adapter(adapter);
+err_bus:
+	return data + payload_size + 7;
 }
 
 static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
@@ -453,8 +549,8 @@ static const char *sequence_name(enum mipi_seq seq_id)
 		return "(unknown)";
 }
 
-void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
-				 enum mipi_seq seq_id)
+static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
+			       enum mipi_seq seq_id)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
 	const u8 *data;
@@ -519,6 +615,22 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
 	}
 }
 
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+				 enum mipi_seq seq_id)
+{
+	if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+	if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight)
+		gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1);
+
+	intel_dsi_vbt_exec(intel_dsi, seq_id);
+
+	if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+	if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight)
+		gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
+}
+
 void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
@@ -664,6 +776,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 	intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
 	intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
 
+	intel_dsi->i2c_bus_num = -1;
+
 	/* a regular driver would get the device in probe */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
@@ -671,3 +785,110 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 
 	return true;
 }
+
+/*
+ * On some BYT/CHT devs some sequences are incomplete and we need to manually
+ * control some GPIOs. We need to add a GPIO lookup table before we get these.
+ * If the GOP did not initialize the panel (HDMI inserted) we may need to also
+ * change the pinmux for the SoC's PWM0 pin from GPIO to PWM.
+ */
+static struct gpiod_lookup_table pmic_panel_gpio_table = {
+	/* Intel GFX is consumer */
+	.dev_id = "0000:00:02.0",
+	.table = {
+		/* Panel EN/DISABLE */
+		GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+		{ }
+	},
+};
+
+static struct gpiod_lookup_table soc_panel_gpio_table = {
+	.dev_id = "0000:00:02.0",
+	.table = {
+		GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH),
+		GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH),
+		{ }
+	},
+};
+
+static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
+	PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00",
+			  "pwm0_grp", "pwm"),
+};
+
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
+{
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+	enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+	bool want_backlight_gpio = false;
+	bool want_panel_gpio = false;
+	struct pinctrl *pinctrl;
+	int ret;
+
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	    mipi_config->pwm_blc == PPS_BLC_PMIC) {
+		gpiod_add_lookup_table(&pmic_panel_gpio_table);
+		want_panel_gpio = true;
+	}
+
+	if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+		gpiod_add_lookup_table(&soc_panel_gpio_table);
+		want_panel_gpio = true;
+		want_backlight_gpio = true;
+
+		/* Ensure PWM0 pin is muxed as PWM instead of GPIO */
+		ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
+					     ARRAY_SIZE(soc_pwm_pinctrl_map));
+		if (ret)
+			DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
+
+		pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+		if (IS_ERR(pinctrl))
+			DRM_ERROR("Failed to set pinmux to PWM\n");
+	}
+
+	if (want_panel_gpio) {
+		intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
+		if (IS_ERR(intel_dsi->gpio_panel)) {
+			DRM_ERROR("Failed to own gpio for panel control\n");
+			intel_dsi->gpio_panel = NULL;
+		}
+	}
+
+	if (want_backlight_gpio) {
+		intel_dsi->gpio_backlight =
+			gpiod_get(dev->dev, "backlight", flags);
+		if (IS_ERR(intel_dsi->gpio_backlight)) {
+			DRM_ERROR("Failed to own gpio for backlight control\n");
+			intel_dsi->gpio_backlight = NULL;
+		}
+	}
+}
+
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
+{
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+
+	if (intel_dsi->gpio_panel) {
+		gpiod_put(intel_dsi->gpio_panel);
+		intel_dsi->gpio_panel = NULL;
+	}
+
+	if (intel_dsi->gpio_backlight) {
+		gpiod_put(intel_dsi->gpio_backlight);
+		intel_dsi->gpio_backlight = NULL;
+	}
+
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	    mipi_config->pwm_blc == PPS_BLC_PMIC)
+		gpiod_remove_lookup_table(&pmic_panel_gpio_table);
+
+	if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+		pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
+		gpiod_remove_lookup_table(&soc_panel_gpio_table);
+	}
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index bcfbcb7..86a337c 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -125,7 +125,7 @@ static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
 	return container_of(encoder, struct intel_dvo, base);
 }
 
-static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
 {
 	return enc_to_dvo(intel_attached_encoder(connector));
 }
@@ -134,7 +134,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 	u32 tmp;
 
 	tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -178,9 +178,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
 	else
 		flags |= DRM_MODE_FLAG_NVSYNC;
 
-	pipe_config->base.adjusted_mode.flags |= flags;
+	pipe_config->hw.adjusted_mode.flags |= flags;
 
-	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
 static void intel_disable_dvo(struct intel_encoder *encoder,
@@ -207,8 +207,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
 	u32 temp = I915_READ(dvo_reg);
 
 	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
-					 &pipe_config->base.mode,
-					 &pipe_config->base.adjusted_mode);
+					 &pipe_config->hw.mode,
+					 &pipe_config->hw.adjusted_mode);
 
 	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
 	I915_READ(dvo_reg);
@@ -220,7 +220,7 @@ static enum drm_mode_status
 intel_dvo_mode_valid(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
 {
-	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
 	const struct drm_display_mode *fixed_mode =
 		to_intel_connector(connector)->panel.fixed_mode;
 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -253,7 +253,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	const struct drm_display_mode *fixed_mode =
 		intel_dvo->attached_connector->panel.fixed_mode;
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 
 	/*
 	 * If we have timings from the BIOS for the panel, put them in
@@ -277,8 +277,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
 				 const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	enum pipe pipe = crtc->pipe;
 	u32 dvo_val;
@@ -311,7 +311,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
 static enum drm_connector_status
 intel_dvo_detect(struct drm_connector *connector, bool force)
 {
-	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, connector->name);
 	return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 2061663..a1048ec 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -50,11 +50,6 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
 	return HAS_FBC(dev_priv);
 }
 
-static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
-{
-	return INTEL_GEN(dev_priv) <= 3;
-}
-
 /*
  * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
  * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -73,7 +68,7 @@ static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
  * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
  * we wrote to PIPESRC.
  */
-static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
+static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache,
 					    int *width, int *height)
 {
 	if (width)
@@ -83,7 +78,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
 }
 
 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
-					struct intel_fbc_state_cache *cache)
+					const struct intel_fbc_state_cache *cache)
 {
 	int lines;
 
@@ -143,8 +138,10 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 		u32 fbc_ctl2;
 
 		/* Set it up... */
-		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM;
 		fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
+		if (params->fence_id >= 0)
+			fbc_ctl2 |= FBC_CTL_CPU_FENCE;
 		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
 		I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
 	}
@@ -156,7 +153,8 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 	if (IS_I945GM(dev_priv))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-	fbc_ctl |= params->vma->fence->id;
+	if (params->fence_id >= 0)
+		fbc_ctl |= params->fence_id;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 
@@ -176,8 +174,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
-	if (params->flags & PLANE_HAS_FENCE) {
-		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
+	if (params->fence_id >= 0) {
+		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
 		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 	} else {
 		I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -234,14 +232,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 		break;
 	}
 
-	if (params->flags & PLANE_HAS_FENCE) {
+	if (params->fence_id >= 0) {
 		dpfc_ctl |= DPFC_CTL_FENCE_EN;
 		if (IS_GEN(dev_priv, 5))
-			dpfc_ctl |= params->vma->fence->id;
+			dpfc_ctl |= params->fence_id;
 		if (IS_GEN(dev_priv, 6)) {
 			I915_WRITE(SNB_DPFC_CTL_SA,
 				   SNB_CPU_FENCE_ENABLE |
-				   params->vma->fence->id);
+				   params->fence_id);
 			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
 				   params->crtc.fence_y_offset);
 		}
@@ -253,8 +251,6 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 	}
 
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-	I915_WRITE(ILK_FBC_RT_BASE,
-		   i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -285,13 +281,12 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 	int threshold = dev_priv->fbc.threshold;
 
 	/* Display WA #0529: skl, kbl, bxt. */
-	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
+	if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
 		u32 val = I915_READ(CHICKEN_MISC_4);
 
 		val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
 
-		if (i915_gem_object_get_tiling(params->vma->obj) !=
-		    I915_TILING_X)
+		if (params->gen9_wa_cfb_stride)
 			val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
 
 		I915_WRITE(CHICKEN_MISC_4, val);
@@ -317,11 +312,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 		break;
 	}
 
-	if (params->flags & PLANE_HAS_FENCE) {
+	if (params->fence_id >= 0) {
 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 		I915_WRITE(SNB_DPFC_CTL_SA,
 			   SNB_CPU_FENCE_ENABLE |
-			   params->vma->fence->id);
+			   params->fence_id);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 	} else {
 		I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -367,6 +362,7 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
 	fbc->active = true;
+	fbc->activated = true;
 
 	if (INTEL_GEN(dev_priv) >= 7)
 		gen7_fbc_activate(dev_priv);
@@ -419,29 +415,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
 	fbc->no_fbc_reason = reason;
 }
 
-static bool multiple_pipes_ok(struct intel_crtc *crtc,
-			      struct intel_plane_state *plane_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_fbc *fbc = &dev_priv->fbc;
-	enum pipe pipe = crtc->pipe;
-
-	/* Don't even bother tracking anything we don't need. */
-	if (!no_fbc_on_multiple_pipes(dev_priv))
-		return true;
-
-	if (plane_state->base.visible)
-		fbc->visible_pipes_mask |= (1 << pipe);
-	else
-		fbc->visible_pipes_mask &= ~(1 << pipe);
-
-	return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
-}
-
 static int find_compression_threshold(struct drm_i915_private *dev_priv,
 				      struct drm_mm_node *node,
-				      int size,
-				      int fb_cpp)
+				      unsigned int size,
+				      unsigned int fb_cpp)
 {
 	int compression_threshold = 1;
 	int ret;
@@ -487,18 +464,15 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
 	}
 }
 
-static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
+			       unsigned int size, unsigned int fb_cpp)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct drm_mm_node *uninitialized_var(compressed_llb);
-	int size, fb_cpp, ret;
+	int ret;
 
 	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
 
-	size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
-	fb_cpp = fbc->state_cache.fb.format->cpp[0];
-
 	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
 					 size, fb_cpp);
 	if (!ret)
@@ -656,46 +630,55 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 }
 
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
-					 struct intel_crtc_state *crtc_state,
-					 struct intel_plane_state *plane_state)
+					 const struct intel_crtc_state *crtc_state,
+					 const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
-	struct drm_framebuffer *fb = plane_state->base.fb;
+	struct drm_framebuffer *fb = plane_state->hw.fb;
 
-	cache->vma = NULL;
-	cache->flags = 0;
+	cache->plane.visible = plane_state->uapi.visible;
+	if (!cache->plane.visible)
+		return;
 
-	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
+	cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
 
-	cache->plane.rotation = plane_state->base.rotation;
+	cache->plane.rotation = plane_state->hw.rotation;
 	/*
 	 * Src coordinates are already rotated by 270 degrees for
 	 * the 90/270 degree plane rotation cases (to match the
 	 * GTT mapping), hence no need to account for rotation here.
 	 */
-	cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
-	cache->plane.visible = plane_state->base.visible;
+	cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
 	cache->plane.adjusted_x = plane_state->color_plane[0].x;
 	cache->plane.adjusted_y = plane_state->color_plane[0].y;
-	cache->plane.y = plane_state->base.src.y1 >> 16;
+	cache->plane.y = plane_state->uapi.src.y1 >> 16;
 
-	cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
-
-	if (!cache->plane.visible)
-		return;
+	cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
 
 	cache->fb.format = fb->format;
 	cache->fb.stride = fb->pitches[0];
 
-	cache->vma = plane_state->vma;
-	cache->flags = plane_state->flags;
-	if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
-		cache->flags &= ~PLANE_HAS_FENCE;
+	WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
+		!plane_state->vma->fence);
+
+	if (plane_state->flags & PLANE_HAS_FENCE &&
+	    plane_state->vma->fence)
+		cache->fence_id = plane_state->vma->fence->id;
+	else
+		cache->fence_id = -1;
+}
+
+static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
+{
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
+	return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+		fbc->compressed_fb.size * fbc->threshold;
 }
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -704,6 +687,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
+	if (!cache->plane.visible) {
+		fbc->no_fbc_reason = "primary plane not visible";
+		return false;
+	}
+
 	/* We don't need to use a state cache here since this information is
 	 * global for all CRTC.
 	 */
@@ -712,11 +700,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 		return false;
 	}
 
-	if (!cache->vma) {
-		fbc->no_fbc_reason = "primary plane not visible";
-		return false;
-	}
-
 	if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
 		fbc->no_fbc_reason = "incompatible mode";
 		return false;
@@ -740,7 +723,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	 * For now this will effecively disable FBC with 90/270 degree
 	 * rotation.
 	 */
-	if (!(cache->flags & PLANE_HAS_FENCE)) {
+	if (cache->fence_id < 0) {
 		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
 		return false;
 	}
@@ -783,8 +766,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	 * we didn't get any invalidate/deactivate calls, but this would require
 	 * a lot of tracking just for a specific case. If we conclude it's an
 	 * important case, we can implement it later. */
-	if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
-	    fbc->compressed_fb.size * fbc->threshold) {
+	if (intel_fbc_cfb_size_changed(dev_priv)) {
 		fbc->no_fbc_reason = "CFB requirements changed";
 		return false;
 	}
@@ -794,7 +776,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
 	 * and screen flicker.
 	 */
-	if (IS_GEN_RANGE(dev_priv, 9, 10) &&
+	if (INTEL_GEN(dev_priv) >= 9 &&
 	    (fbc->state_cache.plane.adjusted_y & 3)) {
 		fbc->no_fbc_reason = "plane Y offset is misaligned";
 		return false;
@@ -837,8 +819,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
 	 * zero. */
 	memset(params, 0, sizeof(*params));
 
-	params->vma = cache->vma;
-	params->flags = cache->flags;
+	params->fence_id = cache->fence_id;
 
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
@@ -849,39 +830,88 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
 
 	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
 
-	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
-		params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
-						32 * fbc->threshold) * 8;
+	params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride;
+
+	params->plane_visible = cache->plane.visible;
 }
 
-void intel_fbc_pre_update(struct intel_crtc *crtc,
-			  struct intel_crtc_state *crtc_state,
-			  struct intel_plane_state *plane_state)
+static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_fbc *fbc = &dev_priv->fbc;
+	const struct intel_fbc_state_cache *cache = &fbc->state_cache;
+	const struct intel_fbc_reg_params *params = &fbc->params;
+
+	if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+		return false;
+
+	if (!params->plane_visible)
+		return false;
+
+	if (!intel_fbc_can_activate(crtc))
+		return false;
+
+	if (params->fb.format != cache->fb.format)
+		return false;
+
+	if (params->fb.stride != cache->fb.stride)
+		return false;
+
+	if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache))
+		return false;
+
+	if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride)
+		return false;
+
+	return true;
+}
+
+bool intel_fbc_pre_update(struct intel_crtc *crtc,
+			  const struct intel_crtc_state *crtc_state,
+			  const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	const char *reason = "update pending";
+	bool need_vblank_wait = false;
 
 	if (!fbc_supported(dev_priv))
-		return;
+		return need_vblank_wait;
 
 	mutex_lock(&fbc->lock);
 
-	if (!multiple_pipes_ok(crtc, plane_state)) {
-		reason = "more than one pipe active";
-		goto deactivate;
-	}
-
-	if (!fbc->enabled || fbc->crtc != crtc)
+	if (fbc->crtc != crtc)
 		goto unlock;
 
 	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
 	fbc->flip_pending = true;
 
-deactivate:
-	intel_fbc_deactivate(dev_priv, reason);
+	if (!intel_fbc_can_flip_nuke(crtc_state)) {
+		intel_fbc_deactivate(dev_priv, reason);
+
+		/*
+		 * Display WA #1198: glk+
+		 * Need an extra vblank wait between FBC disable and most plane
+		 * updates. Bspec says this is only needed for plane disable, but
+		 * that is not true. Touching most plane registers will cause the
+		 * corruption to appear. Also SKL/derivatives do not seem to be
+		 * affected.
+		 *
+		 * TODO: could optimize this a bit by sampling the frame
+		 * counter when we disable FBC (if it was already done earlier)
+		 * and skipping the extra vblank wait before the plane update
+		 * if at least one frame has already passed.
+		 */
+		if (fbc->activated &&
+		    (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+			need_vblank_wait = true;
+		fbc->activated = false;
+	}
 unlock:
 	mutex_unlock(&fbc->lock);
+
+	return need_vblank_wait;
 }
 
 /**
@@ -897,14 +927,13 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
 	struct intel_crtc *crtc = fbc->crtc;
 
 	WARN_ON(!mutex_is_locked(&fbc->lock));
-	WARN_ON(!fbc->enabled);
+	WARN_ON(!fbc->crtc);
 	WARN_ON(fbc->active);
 
 	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
 
 	__intel_fbc_cleanup_cfb(dev_priv);
 
-	fbc->enabled = false;
 	fbc->crtc = NULL;
 }
 
@@ -915,11 +944,10 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
 
 	WARN_ON(!mutex_is_locked(&fbc->lock));
 
-	if (!fbc->enabled || fbc->crtc != crtc)
+	if (fbc->crtc != crtc)
 		return;
 
 	fbc->flip_pending = false;
-	WARN_ON(fbc->active);
 
 	if (!i915_modparams.enable_fbc) {
 		intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
@@ -933,10 +961,9 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
 	if (!intel_fbc_can_activate(crtc))
 		return;
 
-	if (!fbc->busy_bits) {
-		intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+	if (!fbc->busy_bits)
 		intel_fbc_hw_activate(dev_priv);
-	} else
+	else
 		intel_fbc_deactivate(dev_priv, "frontbuffer write");
 }
 
@@ -955,7 +982,7 @@ void intel_fbc_post_update(struct intel_crtc *crtc)
 
 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
 {
-	if (fbc->enabled)
+	if (fbc->crtc)
 		return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
 	else
 		return fbc->possible_framebuffer_bits;
@@ -977,7 +1004,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 
 	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
 
-	if (fbc->enabled && fbc->busy_bits)
+	if (fbc->crtc && fbc->busy_bits)
 		intel_fbc_deactivate(dev_priv, "frontbuffer write");
 
 	mutex_unlock(&fbc->lock);
@@ -998,7 +1025,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
 	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
 		goto out;
 
-	if (!fbc->busy_bits && fbc->enabled &&
+	if (!fbc->busy_bits && fbc->crtc &&
 	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
 		if (fbc->active)
 			intel_fbc_recompress(dev_priv);
@@ -1047,12 +1074,12 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
 	 * to pipe or plane A. */
 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
 		struct intel_crtc_state *crtc_state;
-		struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+		struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
 
 		if (!plane->has_fbc)
 			continue;
 
-		if (!plane_state->base.visible)
+		if (!plane_state->uapi.visible)
 			continue;
 
 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -1081,42 +1108,53 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
  * intel_fbc_disable in the middle, as long as it is deactivated.
  */
 void intel_fbc_enable(struct intel_crtc *crtc,
-		      struct intel_crtc_state *crtc_state,
-		      struct intel_plane_state *plane_state)
+		      const struct intel_crtc_state *crtc_state,
+		      const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc_state_cache *cache = &fbc->state_cache;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 
 	if (!fbc_supported(dev_priv))
 		return;
 
 	mutex_lock(&fbc->lock);
 
-	if (fbc->enabled) {
-		WARN_ON(fbc->crtc == NULL);
-		if (fbc->crtc == crtc) {
-			WARN_ON(!crtc_state->enable_fbc);
-			WARN_ON(fbc->active);
-		}
-		goto out;
+	if (fbc->crtc) {
+		if (fbc->crtc != crtc ||
+		    !intel_fbc_cfb_size_changed(dev_priv))
+			goto out;
+
+		__intel_fbc_disable(dev_priv);
 	}
 
-	if (!crtc_state->enable_fbc)
-		goto out;
-
 	WARN_ON(fbc->active);
-	WARN_ON(fbc->crtc != NULL);
 
 	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
-	if (intel_fbc_alloc_cfb(crtc)) {
+
+	/* FIXME crtc_state->enable_fbc lies :( */
+	if (!cache->plane.visible)
+		goto out;
+
+	if (intel_fbc_alloc_cfb(dev_priv,
+				intel_fbc_calculate_cfb_size(dev_priv, cache),
+				fb->format->cpp[0])) {
+		cache->plane.visible = false;
 		fbc->no_fbc_reason = "not enough stolen memory";
 		goto out;
 	}
 
+	if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
+	    fb->modifier != I915_FORMAT_MOD_X_TILED)
+		cache->gen9_wa_cfb_stride =
+			DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
+	else
+		cache->gen9_wa_cfb_stride = 0;
+
 	DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
 	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
 
-	fbc->enabled = true;
 	fbc->crtc = crtc;
 out:
 	mutex_unlock(&fbc->lock);
@@ -1156,7 +1194,7 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
 		return;
 
 	mutex_lock(&fbc->lock);
-	if (fbc->enabled) {
+	if (fbc->crtc) {
 		WARN_ON(fbc->crtc->active);
 		__intel_fbc_disable(dev_priv);
 	}
@@ -1172,7 +1210,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
 	mutex_lock(&fbc->lock);
 
 	/* Maybe we were scheduled twice. */
-	if (fbc->underrun_detected || !fbc->enabled)
+	if (fbc->underrun_detected || !fbc->crtc)
 		goto out;
 
 	DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
@@ -1244,28 +1282,6 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
 	schedule_work(&fbc->underrun_work);
 }
 
-/**
- * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
- * @dev_priv: i915 device instance
- *
- * The FBC code needs to track CRTC visibility since the older platforms can't
- * have FBC enabled while multiple pipes are used. This function does the
- * initial setup at driver load to make sure FBC is matching the real hardware.
- */
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
-{
-	struct intel_crtc *crtc;
-
-	/* Don't even bother tracking anything if we don't need. */
-	if (!no_fbc_on_multiple_pipes(dev_priv))
-		return;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc)
-		if (intel_crtc_active(crtc) &&
-		    crtc->base.primary->state->visible)
-			dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
-}
-
 /*
  * The DDX driver changes its behavior depending on the value it reads from
  * i915.enable_fbc, so sanitize it by translating the default value into either
@@ -1283,10 +1299,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
 	if (!HAS_FBC(dev_priv))
 		return 0;
 
-	/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-		return 0;
-
 	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
 		return 1;
 
@@ -1317,7 +1329,6 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 
 	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
 	mutex_init(&fbc->lock);
-	fbc->enabled = false;
 	fbc->active = false;
 
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 50272ed..c8a5e50 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,15 +19,14 @@ struct intel_plane_state;
 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
 			   struct intel_atomic_state *state);
 bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc,
-			  struct intel_crtc_state *crtc_state,
-			  struct intel_plane_state *plane_state);
+bool intel_fbc_pre_update(struct intel_crtc *crtc,
+			  const struct intel_crtc_state *crtc_state,
+			  const struct intel_plane_state *plane_state);
 void intel_fbc_post_update(struct intel_crtc *crtc);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
 void intel_fbc_enable(struct intel_crtc *crtc,
-		      struct intel_crtc_state *crtc_state,
-		      struct intel_plane_state *plane_state);
+		      const struct intel_crtc_state *crtc_state,
+		      const struct intel_plane_state *plane_state);
 void intel_fbc_disable(struct intel_crtc *crtc);
 void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 48c960c..1e98e43 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -100,7 +100,7 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
 	return ret;
 }
 
-static struct fb_ops intelfb_ops = {
+static const struct fb_ops intelfb_ops = {
 	.owner = THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_set_par = intel_fbdev_set_par,
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index ab61f88..6c83b35 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -126,8 +126,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
 	}
 }
 
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
-						 enum pipe pipe, bool enable)
+static void ilk_set_fifo_underrun_reporting(struct drm_device *dev,
+					    enum pipe pipe, bool enable)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 bit = (pipe == PIPE_A) ?
@@ -139,7 +139,7 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
 		ilk_disable_display_irq(dev_priv, bit);
 }
 
-static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
+static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
@@ -157,9 +157,9 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
 	DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
 }
 
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
-						  enum pipe pipe,
-						  bool enable, bool old)
+static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
+					    enum pipe pipe, bool enable,
+					    bool old)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	if (enable) {
@@ -180,8 +180,8 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
 	}
 }
 
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
-						  enum pipe pipe, bool enable)
+static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
+					    enum pipe pipe, bool enable)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
@@ -264,11 +264,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
 	if (HAS_GMCH(dev_priv))
 		i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
 	else if (IS_GEN_RANGE(dev_priv, 5, 6))
-		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+		ilk_set_fifo_underrun_reporting(dev, pipe, enable);
 	else if (IS_GEN(dev_priv, 7))
-		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+		ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
 	else if (INTEL_GEN(dev_priv) >= 8)
-		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+		bdw_set_fifo_underrun_reporting(dev, pipe, enable);
 
 	return old;
 }
@@ -427,7 +427,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
 		if (HAS_GMCH(dev_priv))
 			i9xx_check_fifo_underruns(crtc);
 		else if (IS_GEN(dev_priv, 7))
-			ivybridge_check_fifo_underruns(crtc);
+			ivb_check_fifo_underruns(crtc);
 	}
 
 	spin_unlock_irq(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index a448815..0fdbd39 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1870,7 +1870,7 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
 		return false;
 
 	return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
-		IS_KABYLAKE(dev_priv));
+		IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
 }
 
 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index f56fffc..93ac0f2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -85,16 +85,17 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
 	     "HDMI transcoder function enabled, expecting disabled\n");
 }
 
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *intel_dig_port =
-		container_of(encoder, struct intel_digital_port, base.base);
+		container_of(&encoder->base, struct intel_digital_port,
+			     base.base);
 	return &intel_dig_port->hdmi;
 }
 
-static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
 {
-	return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+	return enc_to_intel_hdmi(intel_attached_encoder(connector));
 }
 
 static u32 g4x_infoframe_index(unsigned int type)
@@ -285,7 +286,7 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
 {
 	const u32 *data = frame;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 	int i;
@@ -321,7 +322,7 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
 			       void *frame, ssize_t len)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	u32 val, *data = frame;
 	int i;
 
@@ -340,7 +341,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+	enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
 	i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
 	u32 val = I915_READ(reg);
 
@@ -362,7 +363,7 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
 {
 	const u32 *data = frame;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 	int i;
@@ -401,7 +402,7 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
 			       void *frame, ssize_t len)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	u32 val, *data = frame;
 	int i;
 
@@ -420,7 +421,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+	enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
 	u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -438,7 +439,7 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
 {
 	const u32 *data = frame;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 	int i;
@@ -474,7 +475,7 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
 			       void *frame, ssize_t len)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	u32 val, *data = frame;
 	int i;
 
@@ -493,7 +494,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+	enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
 	u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -602,7 +603,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	u32 val, ret = 0;
 	int i;
 
@@ -646,7 +647,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
 				  enum hdmi_infoframe_type type,
 				  const union hdmi_infoframe *frame)
 {
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	u8 buffer[VIDEO_DIP_DATA_SIZE];
 	ssize_t len;
 
@@ -675,7 +676,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
 			  enum hdmi_infoframe_type type,
 			  union hdmi_infoframe *frame)
 {
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	u8 buffer[VIDEO_DIP_DATA_SIZE];
 	int ret;
 
@@ -708,7 +709,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
 {
 	struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	struct drm_connector *connector = conn_state->connector;
 	int ret;
 
@@ -804,7 +805,7 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
 
 	ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
 							  conn_state->connector,
-							  &crtc_state->base.adjusted_mode);
+							  &crtc_state->hw.adjusted_mode);
 	if (WARN_ON(ret))
 		return false;
 
@@ -855,7 +856,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
 			       const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	i915_reg_t reg = VIDEO_DIP_CTL;
 	u32 val = I915_READ(reg);
@@ -965,7 +966,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
 					 const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	i915_reg_t reg;
 
 	if ((crtc_state->infoframes.enable &
@@ -990,7 +991,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
 				   struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	i915_reg_t reg;
 
 	if ((crtc_state->infoframes.enable &
@@ -1027,7 +1028,7 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
 
 	/* Enable default_phase whenever the display mode is suitably aligned */
 	if (gcp_default_phase_possible(crtc_state->pipe_bpp,
-				       &crtc_state->base.adjusted_mode))
+				       &crtc_state->hw.adjusted_mode))
 		crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
 }
 
@@ -1037,8 +1038,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
 			       const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
@@ -1096,8 +1097,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
 			       const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
@@ -1145,8 +1146,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
 			       const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 	u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1736,9 +1737,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
 	u32 hdmi_val;
 
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
@@ -1774,7 +1775,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
 				    enum pipe *pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	intel_wakeref_t wakeref;
 	bool ret;
 
@@ -1793,7 +1794,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
 static void intel_hdmi_get_config(struct intel_encoder *encoder,
 				  struct intel_crtc_state *pipe_config)
 {
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 tmp, flags = 0;
@@ -1829,7 +1830,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	    tmp & HDMI_COLOR_RANGE_16_235)
 		pipe_config->limited_color_range = true;
 
-	pipe_config->base.adjusted_mode.flags |= flags;
+	pipe_config->hw.adjusted_mode.flags |= flags;
 
 	if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
 		dotclock = pipe_config->port_clock * 2 / 3;
@@ -1839,7 +1840,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	if (pipe_config->pixel_multiplier)
 		dotclock /= pipe_config->pixel_multiplier;
 
-	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+	pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
 
 	pipe_config->lane_count = 4;
 
@@ -1860,7 +1861,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
 				    const struct intel_crtc_state *pipe_config,
 				    const struct drm_connector_state *conn_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 
 	WARN_ON(!pipe_config->has_hdmi_sink);
 	DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
@@ -1874,7 +1875,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	u32 temp;
 
 	temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1896,7 +1897,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	u32 temp;
 
 	temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1946,8 +1947,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	enum pipe pipe = crtc->pipe;
 	u32 temp;
 
@@ -2007,10 +2008,10 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	struct intel_digital_port *intel_dig_port =
 		hdmi_to_dig_port(intel_hdmi);
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	u32 temp;
 
 	temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -2160,7 +2161,7 @@ static enum drm_mode_status
 intel_hdmi_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
 {
-	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+	struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum drm_mode_status status;
@@ -2210,12 +2211,12 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
 				     int bpc)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(crtc_state->base.crtc->dev);
-	struct drm_atomic_state *state = crtc_state->base.state;
+		to_i915(crtc_state->uapi.crtc->dev);
+	struct drm_atomic_state *state = crtc_state->uapi.state;
 	struct drm_connector_state *connector_state;
 	struct drm_connector *connector;
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	int i;
 
 	if (HAS_GMCH(dev_priv))
@@ -2240,7 +2241,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
 	for_each_new_connector_in_state(state, connector, connector_state, i) {
 		const struct drm_display_info *info = &connector->display_info;
 
-		if (connector_state->crtc != crtc_state->base.crtc)
+		if (connector_state->crtc != crtc_state->uapi.crtc)
 			continue;
 
 		if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
@@ -2281,7 +2282,7 @@ static bool
 intel_hdmi_ycbcr420_config(struct drm_connector *connector,
 			   struct intel_crtc_state *config)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
 
 	if (!connector->ycbcr_420_allowed) {
 		DRM_ERROR("Platform doesn't support YCBCR420 output\n");
@@ -2316,7 +2317,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
 				  struct intel_crtc_state *crtc_state,
 				  int clock, bool force_dvi)
 {
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	int bpc;
 
 	for (bpc = 12; bpc >= 10; bpc -= 2) {
@@ -2334,9 +2335,9 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
 				    struct intel_crtc_state *crtc_state,
 				    bool force_dvi)
 {
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	int bpc, clock = adjusted_mode->crtc_clock;
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
@@ -2378,7 +2379,7 @@ static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_s
 	const struct intel_digital_connector_state *intel_conn_state =
 		to_intel_digital_connector_state(conn_state);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 
 	/*
 	 * Our YCbCr output is always limited range.
@@ -2404,9 +2405,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
 			      struct intel_crtc_state *pipe_config,
 			      struct drm_connector_state *conn_state)
 {
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	struct drm_connector *connector = conn_state->connector;
 	struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
 	struct intel_digital_connector_state *intel_conn_state =
@@ -2451,8 +2452,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
 	if (ret)
 		return ret;
 
-	/* Set user selected PAR to incoming mode's member */
-	adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+	if (conn_state->picture_aspect_ratio)
+		adjusted_mode->picture_aspect_ratio =
+			conn_state->picture_aspect_ratio;
 
 	pipe_config->lane_count = 4;
 
@@ -2495,7 +2497,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
 static void
 intel_hdmi_unset_edid(struct drm_connector *connector)
 {
-	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
 
 	intel_hdmi->has_hdmi_sink = false;
 	intel_hdmi->has_audio = false;
@@ -2511,7 +2513,7 @@ static void
 intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+	struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
 	enum port port = hdmi_to_dig_port(hdmi)->base.port;
 	struct i2c_adapter *adapter =
 		intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -2558,7 +2560,7 @@ static bool
 intel_hdmi_set_edid(struct drm_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
 	intel_wakeref_t wakeref;
 	struct edid *edid;
 	bool connected = false;
@@ -2599,7 +2601,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
 	enum drm_connector_status status = connector_status_disconnected;
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
 	struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
 	intel_wakeref_t wakeref;
 
@@ -2662,7 +2664,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
 				  const struct drm_connector_state *conn_state)
 {
 	struct intel_digital_port *intel_dig_port =
-		enc_to_dig_port(&encoder->base);
+		enc_to_dig_port(encoder);
 
 	intel_hdmi_prepare(encoder, pipe_config);
 
@@ -2675,7 +2677,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
 				const struct intel_crtc_state *pipe_config,
 				const struct drm_connector_state *conn_state)
 {
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dport = enc_to_dig_port(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -2745,7 +2747,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
 				const struct intel_crtc_state *pipe_config,
 				const struct drm_connector_state *conn_state)
 {
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dport = enc_to_dig_port(encoder);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
@@ -2771,7 +2773,7 @@ static struct i2c_adapter *
 intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
 
 	return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
 }
@@ -2815,7 +2817,7 @@ intel_hdmi_connector_register(struct drm_connector *connector)
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
-	struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
+	struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
 
 	cec_notifier_conn_unregister(n);
 
@@ -2872,7 +2874,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
 		intel_attach_colorspace_property(connector);
 
 	drm_connector_attach_content_type_property(connector);
-	connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		drm_object_attach_property(&connector->base,
@@ -2906,7 +2907,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
 				       bool scrambling)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	struct drm_scrambling *sink_scrambling =
 		&connector->display_info.hdmi.scdc.scrambling;
 	struct i2c_adapter *adapter =
@@ -3131,20 +3132,29 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i2c_adapter *ddc;
 	enum port port = intel_encoder->port;
 	struct cec_connector_info conn_info;
 
 	DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
 		      intel_encoder->base.base.id, intel_encoder->base.name);
 
+	if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A))
+		return;
+
 	if (WARN(intel_dig_port->max_lanes < 4,
 		 "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
 		 intel_dig_port->max_lanes, intel_encoder->base.base.id,
 		 intel_encoder->base.name))
 		return;
 
-	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
-			   DRM_MODE_CONNECTOR_HDMIA);
+	intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+	ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+	drm_connector_init_with_ddc(dev, connector,
+				    &intel_hdmi_connector_funcs,
+				    DRM_MODE_CONNECTOR_HDMIA,
+				    ddc);
 	drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
 
 	connector->interlace_allowed = 1;
@@ -3154,10 +3164,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		connector->ycbcr_420_allowed = true;
 
-	intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
-
-	if (WARN_ON(port == PORT_A))
-		return;
 	intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
 
 	if (HAS_DDI(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index cf1ea54..d3659d0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -29,7 +29,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
 		     enum port port);
 void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 			       struct intel_connector *intel_connector);
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
 int intel_hdmi_compute_config(struct intel_encoder *encoder,
 			      struct intel_crtc_state *pipe_config,
 			      struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index fc29046..99d3a3c 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -302,7 +302,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
 {
 	return intel_encoder_is_dig_port(encoder) &&
-		enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+		enc_to_dig_port(encoder)->hpd_pulse != NULL;
 }
 
 static void i915_digport_work_func(struct work_struct *work)
@@ -335,7 +335,7 @@ static void i915_digport_work_func(struct work_struct *work)
 		if (!long_hpd && !short_hpd)
 			continue;
 
-		dig_port = enc_to_dig_port(&encoder->base);
+		dig_port = enc_to_dig_port(encoder);
 
 		ret = dig_port->hpd_pulse(dig_port, long_hpd);
 		if (ret == IRQ_NONE) {
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f8f1308..d807c56 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -189,7 +189,7 @@ void lspcon_ycbcr420_config(struct drm_connector *connector,
 {
 	const struct drm_display_info *info = &connector->display_info;
 	const struct drm_display_mode *adjusted_mode =
-					&crtc_state->base.adjusted_mode;
+					&crtc_state->hw.adjusted_mode;
 
 	if (drm_mode_is_420_only(info, adjusted_mode) &&
 	    connector->ycbcr_420_allowed) {
@@ -434,8 +434,8 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
 			    const void *frame, ssize_t len)
 {
 	bool ret;
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
 
 	/* LSPCON only needs AVI IF */
 	if (type != HDMI_INFOFRAME_TYPE_AVI)
@@ -472,10 +472,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
 	ssize_t ret;
 	union hdmi_infoframe frame;
 	u8 buf[VIDEO_DIP_DATA_SIZE];
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	struct intel_lspcon *lspcon = &dig_port->lspcon;
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 
 	if (!lspcon->active) {
 		DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
@@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *pipe_config)
 {
 	/* FIXME actually read this from the hw */
-	return enc_to_intel_lspcon(&encoder->base)->active;
+	return enc_to_intel_lspcon(encoder)->active;
 }
 
 void lspcon_resume(struct intel_lspcon *lspcon)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index b1bc786..10696bb 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -135,7 +135,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 	else
 		flags |= DRM_MODE_FLAG_PVSYNC;
 
-	pipe_config->base.adjusted_mode.flags |= flags;
+	pipe_config->hw.adjusted_mode.flags |= flags;
 
 	if (INTEL_GEN(dev_priv) < 5)
 		pipe_config->gmch_pfit.lvds_border_bits =
@@ -148,7 +148,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 		pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
 	}
 
-	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
 static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
@@ -230,8 +230,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
 {
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	enum pipe pipe = crtc->pipe;
 	u32 temp;
 
@@ -392,8 +392,8 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 		to_lvds_encoder(&intel_encoder->base);
 	struct intel_connector *intel_connector =
 		lvds_encoder->attached_connector;
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	unsigned int lvds_bpp;
 
 	/* Should never happen!! */
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 969ade6..e59b499 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -941,6 +941,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 	if (mboxes & MBOX_ACPI) {
 		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
 		opregion->acpi = base + OPREGION_ACPI_OFFSET;
+		/*
+		 * Indicate we handle monitor hotplug events ourselves so we do
+		 * not need ACPI notifications for them. Disabling these avoids
+		 * triggering the AML code doing the notifation, which may be
+		 * broken as Windows also seems to disable these.
+		 */
+		opregion->acpi->chpd = 1;
 	}
 
 	if (mboxes & MBOX_SWSCI) {
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 8a98a1a..e40c3a0 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -677,8 +677,8 @@ static void update_colorkey(struct intel_overlay *overlay,
 	if (overlay->color_key_enabled)
 		flags |= DST_KEY_ENABLE;
 
-	if (state->base.visible)
-		format = state->base.fb->format->format;
+	if (state->uapi.visible)
+		format = state->hw.fb->format->format;
 
 	switch (format) {
 	case DRM_FORMAT_C8:
@@ -767,10 +767,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
 
-	i915_gem_object_lock(new_bo);
 	vma = i915_gem_object_pin_to_display_plane(new_bo,
 						   0, NULL, PIN_MAPPABLE);
-	i915_gem_object_unlock(new_bo);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto out_pin_section;
@@ -1335,12 +1333,14 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
 void intel_overlay_setup(struct drm_i915_private *dev_priv)
 {
 	struct intel_overlay *overlay;
+	struct intel_engine_cs *engine;
 	int ret;
 
 	if (!HAS_OVERLAY(dev_priv))
 		return;
 
-	if (!HAS_ENGINE(dev_priv, RCS0))
+	engine = dev_priv->engine[RCS0];
+	if (!engine || !engine->kernel_context)
 		return;
 
 	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
@@ -1348,7 +1348,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
 		return;
 
 	overlay->i915 = dev_priv;
-	overlay->context = dev_priv->engine[RCS0]->kernel_context;
+	overlay->context = engine->kernel_context;
 	GEM_BUG_ON(!overlay->context);
 
 	overlay->color_key = 0x0101fe;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index bc14e9c..7b3ec6eb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -178,7 +178,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
 			struct intel_crtc_state *pipe_config,
 			int fitting_mode)
 {
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	int x = 0, y = 0, width = 0, height = 0;
 
 	/* Native modes don't need fitting */
@@ -300,7 +300,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
 static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
 			      u32 *pfit_control)
 {
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	u32 scaled_width = adjusted_mode->crtc_hdisplay *
 		pipe_config->pipe_src_h;
 	u32 scaled_height = pipe_config->pipe_src_w *
@@ -321,7 +321,7 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
 			      u32 *pfit_control, u32 *pfit_pgm_ratios,
 			      u32 *border)
 {
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	u32 scaled_width = adjusted_mode->crtc_hdisplay *
 		pipe_config->pipe_src_h;
 	u32 scaled_height = pipe_config->pipe_src_w *
@@ -380,7 +380,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 
 	/* Native modes don't need fitting */
 	if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
@@ -1047,7 +1047,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 	u32 ctl, ctl2;
 
 	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -1077,7 +1077,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 	u32 pwm_ctl, val;
 
 	/* Controller 1 uses the utility pin. */
@@ -1189,7 +1189,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 
 	if (!panel->backlight.present)
 		return;
@@ -1840,13 +1840,22 @@ static int pwm_setup_backlight(struct intel_connector *connector,
 			       enum pipe pipe)
 {
 	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_panel *panel = &connector->panel;
+	const char *desc;
 	int retval;
 
-	/* Get the PWM chip for backlight control */
-	panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+	/* Get the right PWM chip for DSI backlight according to VBT */
+	if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+		panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+		desc = "PMIC";
+	} else {
+		panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+		desc = "SoC";
+	}
+
 	if (IS_ERR(panel->backlight.pwm)) {
-		DRM_ERROR("Failed to own the pwm chip\n");
+		DRM_ERROR("Failed to get the %s PWM chip\n", desc);
 		panel->backlight.pwm = NULL;
 		return -ENODEV;
 	}
@@ -1873,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
 				 CRC_PMIC_PWM_PERIOD_NS);
 	panel->backlight.enabled = panel->backlight.level != 0;
 
+	DRM_INFO("Using %s PWM for LCD backlight control\n", desc);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 6260a20..520408e 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -98,7 +98,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
 			break;
 		case INTEL_OUTPUT_DP:
 		case INTEL_OUTPUT_EDP:
-			dig_port = enc_to_dig_port(&encoder->base);
+			dig_port = enc_to_dig_port(encoder);
 			switch (dig_port->base.port) {
 			case PORT_B:
 				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
@@ -309,13 +309,13 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
 		goto put_state;
 	}
 
-	pipe_config->base.mode_changed = pipe_config->has_psr;
+	pipe_config->uapi.mode_changed = pipe_config->has_psr;
 	pipe_config->crc_enabled = enable;
 
 	if (IS_HASWELL(dev_priv) &&
-	    pipe_config->base.active && crtc->pipe == PIPE_A &&
+	    pipe_config->hw.active && crtc->pipe == PIPE_A &&
 	    pipe_config->cpu_transcoder == TRANSCODER_EDP)
-		pipe_config->base.mode_changed = true;
+		pipe_config->uapi.mode_changed = true;
 
 	ret = drm_atomic_commit(state);
 
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6a9f322..89c9cf5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,6 +26,7 @@
 #include "display/intel_dp.h"
 
 #include "i915_drv.h"
+#include "intel_atomic.h"
 #include "intel_display_types.h"
 #include "intel_psr.h"
 #include "intel_sprite.h"
@@ -401,7 +402,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
 	/* Enable ALPM at sink for psr2 */
 	if (dev_priv->psr.psr2_enabled) {
 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
-				   DP_ALPM_ENABLE);
+				   DP_ALPM_ENABLE |
+				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
 	} else {
 		if (dev_priv->psr.link_standby)
@@ -536,11 +539,11 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
 
 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
 {
-	if (!cstate || !cstate->base.active)
+	if (!cstate || !cstate->hw.active)
 		return 0;
 
 	return DIV_ROUND_UP(1000 * 1000,
-			    drm_mode_vrefresh(&cstate->base.adjusted_mode));
+			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
 }
 
 static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
@@ -605,9 +608,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 				    struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
-	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
-	int psr_max_h = 0, psr_max_v = 0;
+	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
 
 	if (!dev_priv->psr.sink_psr2_support)
 		return false;
@@ -631,12 +634,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	if (INTEL_GEN(dev_priv) >= 12) {
 		psr_max_h = 5120;
 		psr_max_v = 3200;
+		max_bpp = 30;
 	} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
 		psr_max_h = 4096;
 		psr_max_v = 2304;
+		max_bpp = 24;
 	} else if (IS_GEN(dev_priv, 9)) {
 		psr_max_h = 3640;
 		psr_max_v = 2304;
+		max_bpp = 24;
 	}
 
 	if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
@@ -646,6 +652,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 		return false;
 	}
 
+	if (crtc_state->pipe_bpp > max_bpp) {
+		DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
+			      crtc_state->pipe_bpp, max_bpp);
+		return false;
+	}
+
 	/*
 	 * HW sends SU blocks of size four scan lines, which means the starting
 	 * X coordinate and Y granularity requirements will always be met. We
@@ -672,7 +684,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	int psr_setup_time;
 
 	if (!CAN_PSR(dev_priv))
@@ -792,7 +804,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
 
 	dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
 	dev_priv->psr.busy_frontbuffer_bits = 0;
-	dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+	dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 	dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
 	dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
 	dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
@@ -924,6 +936,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 	/* Disable PSR on Sink */
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 
+	if (dev_priv->psr.psr2_enabled)
+		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
+
 	dev_priv->psr.enabled = false;
 }
 
@@ -1039,7 +1054,7 @@ void intel_psr_update(struct intel_dp *intel_dp,
 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
 			    u32 *out_value)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
@@ -1096,7 +1111,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
 	struct drm_device *dev = &dev_priv->drm;
 	struct drm_modeset_acquire_ctx ctx;
 	struct drm_atomic_state *state;
-	struct drm_crtc *crtc;
+	struct intel_crtc *crtc;
 	int err;
 
 	state = drm_atomic_state_alloc(dev);
@@ -1107,21 +1122,18 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
 	state->acquire_ctx = &ctx;
 
 retry:
-	drm_for_each_crtc(crtc, dev) {
-		struct drm_crtc_state *crtc_state;
-		struct intel_crtc_state *intel_crtc_state;
+	for_each_intel_crtc(dev, crtc) {
+		struct intel_crtc_state *crtc_state =
+			intel_atomic_get_crtc_state(state, crtc);
 
-		crtc_state = drm_atomic_get_crtc_state(state, crtc);
 		if (IS_ERR(crtc_state)) {
 			err = PTR_ERR(crtc_state);
 			goto error;
 		}
 
-		intel_crtc_state = to_intel_crtc_state(crtc_state);
-
-		if (crtc_state->active && intel_crtc_state->has_psr) {
+		if (crtc_state->hw.active && crtc_state->has_psr) {
 			/* Mark mode as changed to trigger a pipe->update() */
-			crtc_state->mode_changed = true;
+			crtc_state->uapi.mode_changed = true;
 			break;
 		}
 	}
@@ -1379,11 +1391,80 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
 	mutex_init(&dev_priv->psr.lock);
 }
 
-void intel_psr_short_pulse(struct intel_dp *intel_dp)
+static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
+					   u8 *status, u8 *error_status)
+{
+	struct drm_dp_aux *aux = &intel_dp->aux;
+	int ret;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
+	if (ret != 1)
+		return ret;
+
+	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
+	if (ret != 1)
+		return ret;
+
+	*status = *status & DP_PSR_SINK_STATE_MASK;
+
+	return 0;
+}
+
+static void psr_alpm_check(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct drm_dp_aux *aux = &intel_dp->aux;
+	struct i915_psr *psr = &dev_priv->psr;
+	u8 val;
+	int r;
+
+	if (!psr->psr2_enabled)
+		return;
+
+	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+	if (r != 1) {
+		DRM_ERROR("Error reading ALPM status\n");
+		return;
+	}
+
+	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+		intel_psr_disable_locked(intel_dp);
+		psr->sink_not_reliable = true;
+		DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
+
+		/* Clearing error */
+		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+	}
+}
+
+static void psr_capability_changed_check(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	struct i915_psr *psr = &dev_priv->psr;
 	u8 val;
+	int r;
+
+	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
+	if (r != 1) {
+		DRM_ERROR("Error reading DP_PSR_ESI\n");
+		return;
+	}
+
+	if (val & DP_PSR_CAPS_CHANGE) {
+		intel_psr_disable_locked(intel_dp);
+		psr->sink_not_reliable = true;
+		DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
+
+		/* Clearing it */
+		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
+	}
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct i915_psr *psr = &dev_priv->psr;
+	u8 status, error_status;
 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
 			  DP_PSR_LINK_CRC_ERROR;
@@ -1396,38 +1477,34 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 	if (!psr->enabled || psr->dp != intel_dp)
 		goto exit;
 
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
-		DRM_ERROR("PSR_STATUS dpcd read failed\n");
+	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
+		DRM_ERROR("Error reading PSR status or error status\n");
 		goto exit;
 	}
 
-	if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
-		DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
 		intel_psr_disable_locked(intel_dp);
 		psr->sink_not_reliable = true;
 	}
 
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
-		DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
-		goto exit;
-	}
-
-	if (val & DP_PSR_RFB_STORAGE_ERROR)
+	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
+		DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
 		DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
-	if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
 		DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
-	if (val & DP_PSR_LINK_CRC_ERROR)
+	if (error_status & DP_PSR_LINK_CRC_ERROR)
 		DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
 
-	if (val & ~errors)
+	if (error_status & ~errors)
 		DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
-			  val & ~errors);
-	if (val & errors) {
-		intel_psr_disable_locked(intel_dp);
-		psr->sink_not_reliable = true;
-	}
+			  error_status & ~errors);
 	/* clear status register */
-	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
+
+	psr_alpm_check(intel_dp);
+	psr_capability_changed_check(intel_dp);
+
 exit:
 	mutex_unlock(&psr->lock);
 }
@@ -1446,3 +1523,27 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
 
 	return ret;
 }
+
+void intel_psr_atomic_check(struct drm_connector *connector,
+			    struct drm_connector_state *old_state,
+			    struct drm_connector_state *new_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct intel_connector *intel_connector;
+	struct intel_digital_port *dig_port;
+	struct drm_crtc_state *crtc_state;
+
+	if (!CAN_PSR(dev_priv) || !new_state->crtc ||
+	    dev_priv->psr.initially_probed)
+		return;
+
+	intel_connector = to_intel_connector(connector);
+	dig_port = enc_to_dig_port(intel_connector->encoder);
+	if (dev_priv->psr.dp != &dig_port->dp)
+		return;
+
+	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+						   new_state->crtc);
+	crtc_state->mode_changed = true;
+	dev_priv->psr.initially_probed = true;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 46e4de8..c58a1d4 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -8,6 +8,8 @@
 
 #include "intel_frontbuffer.h"
 
+struct drm_connector;
+struct drm_connector_state;
 struct drm_i915_private;
 struct intel_crtc_state;
 struct intel_dp;
@@ -35,5 +37,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp);
 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
 			    u32 *out_value);
 bool intel_psr_enabled(struct intel_dp *intel_dp);
+void intel_psr_atomic_check(struct drm_connector *connector,
+			    struct drm_connector_state *old_state,
+			    struct drm_connector_state *new_state);
 
 #endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 5b7f4ba..e8819fd 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -180,7 +180,7 @@ static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
 	return container_of(encoder, struct intel_sdvo, base);
 }
 
-static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector)
 {
 	return to_sdvo(intel_attached_encoder(connector));
 }
@@ -1087,7 +1087,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
 {
 	struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	int ret;
 
 	if (!crtc_state->has_hdmi_sink)
@@ -1276,8 +1276,8 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
 		to_intel_sdvo_connector_state(conn_state);
 	struct intel_sdvo_connector *intel_sdvo_connector =
 		to_intel_sdvo_connector(conn_state->connector);
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	struct drm_display_mode *mode = &pipe_config->base.mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+	struct drm_display_mode *mode = &pipe_config->hw.mode;
 
 	DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
 	pipe_config->pipe_bpp = 8*3;
@@ -1349,9 +1349,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
 	if (IS_TV(intel_sdvo_connector))
 		i9xx_adjust_sdvo_tv_clock(pipe_config);
 
-	/* Set user selected PAR to incoming mode's member */
-	if (intel_sdvo_connector->is_hdmi)
-		adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+	if (conn_state->picture_aspect_ratio)
+		adjusted_mode->picture_aspect_ratio =
+			conn_state->picture_aspect_ratio;
 
 	if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
 					      pipe_config, conn_state)) {
@@ -1429,13 +1429,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 				  const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
 	const struct intel_sdvo_connector_state *sdvo_state =
 		to_intel_sdvo_connector_state(conn_state);
 	const struct intel_sdvo_connector *intel_sdvo_connector =
 		to_intel_sdvo_connector(conn_state->connector);
-	const struct drm_display_mode *mode = &crtc_state->base.mode;
+	const struct drm_display_mode *mode = &crtc_state->hw.mode;
 	struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
 	u32 sdvox;
 	struct intel_sdvo_in_out_map in_out;
@@ -1551,7 +1551,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
 {
 	struct intel_sdvo_connector *intel_sdvo_connector =
 		to_intel_sdvo_connector(&connector->base);
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 	u16 active_outputs = 0;
 
 	intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
@@ -1629,7 +1629,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 			flags |= DRM_MODE_FLAG_NVSYNC;
 	}
 
-	pipe_config->base.adjusted_mode.flags |= flags;
+	pipe_config->hw.adjusted_mode.flags |= flags;
 
 	/*
 	 * pixel multiplier readout is tricky: Only on i915g/gm it is stored in
@@ -1649,7 +1649,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 	if (pipe_config->pixel_multiplier)
 		dotclock /= pipe_config->pixel_multiplier;
 
-	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+	pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
 
 	/* Cross check the port pixel multiplier with the sdvo encoder state. */
 	if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
@@ -1701,7 +1701,7 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
 				    const struct drm_connector_state *conn_state)
 {
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	struct drm_connector *connector = conn_state->connector;
 	u8 *eld = connector->eld;
 
@@ -1723,7 +1723,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	u32 temp;
 
 	if (old_crtc_state->has_audio)
@@ -1785,7 +1785,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
-	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	u32 temp;
 	bool input1, input2;
 	int i;
@@ -1823,7 +1823,7 @@ static enum drm_mode_status
 intel_sdvo_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
 {
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
 	struct intel_sdvo_connector *intel_sdvo_connector =
 		to_intel_sdvo_connector(connector);
 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1941,7 +1941,7 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
 static struct edid *
 intel_sdvo_get_edid(struct drm_connector *connector)
 {
-	struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
 	return drm_get_edid(connector, &sdvo->ddc);
 }
 
@@ -1959,7 +1959,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
 static enum drm_connector_status
 intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
 {
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
 	struct intel_sdvo_connector *intel_sdvo_connector =
 		to_intel_sdvo_connector(connector);
 	enum drm_connector_status status;
@@ -2028,7 +2028,7 @@ static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
 	u16 response;
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
 	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 	enum drm_connector_status ret;
 
@@ -2175,7 +2175,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
 
 static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 {
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
 	const struct drm_connector_state *conn_state = connector->state;
 	struct intel_sdvo_sdtv_resolution_request tv_res;
 	u32 reply = 0, format_map = 0;
@@ -2215,7 +2215,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 
 static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 {
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	struct drm_display_mode *newmode;
 
@@ -2379,7 +2379,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
 static int
 intel_sdvo_connector_register(struct drm_connector *connector)
 {
-	struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
 	int ret;
 
 	ret = intel_connector_register(connector);
@@ -2394,7 +2394,7 @@ intel_sdvo_connector_register(struct drm_connector *connector)
 static void
 intel_sdvo_connector_unregister(struct drm_connector *connector)
 {
-	struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
 
 	sysfs_remove_link(&connector->kdev->kobj,
 			  sdvo->ddc.dev.kobj.name);
@@ -2654,7 +2654,6 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
 		intel_attach_broadcast_rgb_property(&connector->base.base);
 	}
 	intel_attach_aspect_ratio_property(&connector->base.base);
-	connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 }
 
 static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
@@ -2933,7 +2932,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
 
 	list_for_each_entry_safe(connector, tmp,
 				 &dev->mode_config.connector_list, head) {
-		if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+		if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) {
 			drm_connector_unregister(connector);
 			intel_connector_destroy(connector);
 		}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 72fda04..fca77ec1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -81,9 +81,9 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  */
 void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
 	long timeout = msecs_to_jiffies_timeout(1);
 	int scanline, min, max, vblank_start;
 	wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -120,7 +120,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 
 	crtc->debug.min_vbl = min;
 	crtc->debug.max_vbl = max;
-	trace_i915_pipe_update_start(crtc);
+	trace_intel_pipe_update_start(crtc);
 
 	for (;;) {
 		/*
@@ -173,7 +173,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 	crtc->debug.start_vbl_time = ktime_get();
 	crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
 
-	trace_i915_pipe_update_vblank_evaded(crtc);
+	trace_intel_pipe_update_vblank_evaded(crtc);
 	return;
 
 irq_disable:
@@ -190,27 +190,28 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
  */
 void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	enum pipe pipe = crtc->pipe;
 	int scanline_end = intel_get_crtc_scanline(crtc);
 	u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
 	ktime_t end_vbl_time = ktime_get();
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+	trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
 
 	/* We're still in the vblank-evade critical section, this can't race.
 	 * Would be slightly nice to just grab the vblank count and arm the
 	 * event outside of the critical section - the spinlock might spin for a
 	 * while ... */
-	if (new_crtc_state->base.event) {
+	if (new_crtc_state->uapi.event) {
 		WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
 
 		spin_lock(&crtc->base.dev->event_lock);
-		drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
+		drm_crtc_arm_vblank_event(&crtc->base,
+				          new_crtc_state->uapi.event);
 		spin_unlock(&crtc->base.dev->event_lock);
 
-		new_crtc_state->base.event = NULL;
+		new_crtc_state->uapi.event = NULL;
 	}
 
 	local_irq_enable();
@@ -239,9 +240,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
 
 int intel_plane_check_stride(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	u32 stride, max_stride;
 
 	/*
@@ -251,7 +252,7 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
 	 * kick in due the plane being invisible.
 	 */
 	if (intel_plane_can_remap(plane_state) &&
-	    !plane_state->base.visible)
+	    !plane_state->uapi.visible)
 		return 0;
 
 	/* FIXME other color planes? */
@@ -271,10 +272,10 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
 
 int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_rect *src = &plane_state->base.src;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct drm_rect *src = &plane_state->uapi.src;
 	u32 src_x, src_y, src_w, src_h, hsub, vsub;
-	bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
+	bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
 
 	/*
 	 * Hardware doesn't handle subpixel coordinates.
@@ -327,8 +328,8 @@ skl_plane_ratio(const struct intel_crtc_state *crtc_state,
 		const struct intel_plane_state *plane_state,
 		unsigned int *num, unsigned int *den)
 {
-	struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 
 	if (fb->format->cpp[0] == 8) {
 		if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
@@ -347,7 +348,7 @@ skl_plane_ratio(const struct intel_crtc_state *crtc_state,
 static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
 			       const struct intel_plane_state *plane_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+	struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
 	unsigned int pixel_rate = crtc_state->pixel_rate;
 	unsigned int src_w, src_h, dst_w, dst_h;
 	unsigned int num, den;
@@ -358,10 +359,10 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		den *= 2;
 
-	src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	src_h = drm_rect_height(&plane_state->base.src) >> 16;
-	dst_w = drm_rect_width(&plane_state->base.dst);
-	dst_h = drm_rect_height(&plane_state->base.dst);
+	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+	dst_w = drm_rect_width(&plane_state->uapi.dst);
+	dst_h = drm_rect_height(&plane_state->uapi.dst);
 
 	/* Downscaling limits the maximum pixel rate */
 	dst_w = min(src_w, dst_w);
@@ -395,28 +396,28 @@ skl_program_scaler(struct intel_plane *plane,
 		   const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	enum pipe pipe = plane->pipe;
 	int scaler_id = plane_state->scaler_id;
 	const struct intel_scaler *scaler =
 		&crtc_state->scaler_state.scalers[scaler_id];
-	int crtc_x = plane_state->base.dst.x1;
-	int crtc_y = plane_state->base.dst.y1;
-	u32 crtc_w = drm_rect_width(&plane_state->base.dst);
-	u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+	int crtc_x = plane_state->uapi.dst.x1;
+	int crtc_y = plane_state->uapi.dst.y1;
+	u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+	u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
 	u16 y_hphase, uv_rgb_hphase;
 	u16 y_vphase, uv_rgb_vphase;
 	int hscale, vscale;
 
-	hscale = drm_rect_calc_hscale(&plane_state->base.src,
-				      &plane_state->base.dst,
+	hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+				      &plane_state->uapi.dst,
 				      0, INT_MAX);
-	vscale = drm_rect_calc_vscale(&plane_state->base.src,
-				      &plane_state->base.dst,
+	vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
+				      &plane_state->uapi.dst,
 				      0, INT_MAX);
 
 	/* TODO: handle sub-pixel coordinates */
-	if (drm_format_info_is_yuv_semiplanar(fb->format) &&
+	if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
 	    !icl_is_hdr_plane(dev_priv, plane->id)) {
 		y_hphase = skl_scaler_calc_phase(1, hscale, false);
 		y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -541,10 +542,10 @@ icl_program_input_csc(struct intel_plane *plane,
 	};
 	const u16 *csc;
 
-	if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-		csc = input_csc_matrix[plane_state->base.color_encoding];
+	if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+		csc = input_csc_matrix[plane_state->hw.color_encoding];
 	else
-		csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+		csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
 
 	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
 		      GOFF(csc[1]));
@@ -558,7 +559,7 @@ icl_program_input_csc(struct intel_plane *plane,
 
 	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
 		      PREOFF_YUV_TO_RGB_HI);
-	if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+	if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 		I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
 	else
 		I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
@@ -574,7 +575,7 @@ static void
 skl_program_plane(struct intel_plane *plane,
 		  const struct intel_crtc_state *crtc_state,
 		  const struct intel_plane_state *plane_state,
-		  int color_plane, bool slave, u32 plane_ctl)
+		  int color_plane)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum plane_id plane_id = plane->id;
@@ -582,19 +583,21 @@ skl_program_plane(struct intel_plane *plane,
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	u32 surf_addr = plane_state->color_plane[color_plane].offset;
 	u32 stride = skl_plane_stride(plane_state, color_plane);
-	u32 aux_stride = skl_plane_stride(plane_state, 1);
-	int crtc_x = plane_state->base.dst.x1;
-	int crtc_y = plane_state->base.dst.y1;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	int aux_plane = intel_main_to_aux_plane(fb, color_plane);
+	u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
+	u32 aux_stride = skl_plane_stride(plane_state, aux_plane);
+	int crtc_x = plane_state->uapi.dst.x1;
+	int crtc_y = plane_state->uapi.dst.y1;
 	u32 x = plane_state->color_plane[color_plane].x;
 	u32 y = plane_state->color_plane[color_plane].y;
-	u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
-	struct intel_plane *linked = plane_state->planar_linked_plane;
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	u8 alpha = plane_state->base.alpha >> 8;
+	u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+	u8 alpha = plane_state->hw.alpha >> 8;
 	u32 plane_color_ctl = 0;
 	unsigned long irqflags;
 	u32 keymsk, keymax;
+	u32 plane_ctl = plane_state->ctl;
 
 	plane_ctl |= skl_plane_ctl_crtc(crtc_state);
 
@@ -623,29 +626,13 @@ skl_program_plane(struct intel_plane *plane,
 	I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
 	I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
 	I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
-	I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
-		      (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
 
-	if (icl_is_hdr_plane(dev_priv, plane_id)) {
-		u32 cus_ctl = 0;
+	if (INTEL_GEN(dev_priv) < 12)
+		aux_dist |= aux_stride;
+	I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist);
 
-		if (linked) {
-			/* Enable and use MPEG-2 chroma siting */
-			cus_ctl = PLANE_CUS_ENABLE |
-				PLANE_CUS_HPHASE_0 |
-				PLANE_CUS_VPHASE_SIGN_NEGATIVE |
-				PLANE_CUS_VPHASE_0_25;
-
-			if (linked->id == PLANE_SPRITE5)
-				cus_ctl |= PLANE_CUS_PLANE_7;
-			else if (linked->id == PLANE_SPRITE4)
-				cus_ctl |= PLANE_CUS_PLANE_6;
-			else
-				MISSING_CASE(linked->id);
-		}
-
-		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
-	}
+	if (icl_is_hdr_plane(dev_priv, plane_id))
+		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl);
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
@@ -675,7 +662,7 @@ skl_program_plane(struct intel_plane *plane,
 	I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
 		      intel_plane_ggtt_offset(plane_state) + surf_addr);
 
-	if (!slave && plane_state->scaler_id >= 0)
+	if (plane_state->scaler_id >= 0)
 		skl_program_scaler(plane, crtc_state, plane_state);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -688,24 +675,12 @@ skl_update_plane(struct intel_plane *plane,
 {
 	int color_plane = 0;
 
-	if (plane_state->planar_linked_plane) {
-		/* Program the UV plane */
+	if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+		/* Program the UV plane on planar master */
 		color_plane = 1;
-	}
 
-	skl_program_plane(plane, crtc_state, plane_state,
-			  color_plane, false, plane_state->ctl);
+	skl_program_plane(plane, crtc_state, plane_state, color_plane);
 }
-
-static void
-icl_update_slave(struct intel_plane *plane,
-		 const struct intel_crtc_state *crtc_state,
-		 const struct intel_plane_state *plane_state)
-{
-	skl_program_plane(plane, crtc_state, plane_state, 0, true,
-			  plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
-}
-
 static void
 skl_disable_plane(struct intel_plane *plane,
 		  const struct intel_crtc_state *crtc_state)
@@ -765,9 +740,9 @@ static void i9xx_plane_linear_gamma(u16 gamma[8])
 static void
 chv_update_csc(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	enum plane_id plane_id = plane->id;
 	/*
 	 * |r|   | c0 c1 c2 |   |cr|
@@ -793,7 +768,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
 			    0, 4096,  7601,
 		},
 	};
-	const s16 *csc = csc_matrix[plane_state->base.color_encoding];
+	const s16 *csc = csc_matrix[plane_state->hw.color_encoding];
 
 	/* Seems RGB data bypasses the CSC always */
 	if (!fb->format->is_yuv)
@@ -824,15 +799,15 @@ chv_update_csc(const struct intel_plane_state *plane_state)
 static void
 vlv_update_clrc(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	enum pipe pipe = plane->pipe;
 	enum plane_id plane_id = plane->id;
 	int contrast, brightness, sh_scale, sh_sin, sh_cos;
 
 	if (fb->format->is_yuv &&
-	    plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
+	    plane_state->hw.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
 		/*
 		 * Expand limited range to full range:
 		 * Contrast is applied first and is used to expand Y range.
@@ -866,7 +841,7 @@ vlv_plane_ratio(const struct intel_crtc_state *crtc_state,
 		unsigned int *num, unsigned int *den)
 {
 	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int cpp = fb->format->cpp[0];
 
 	/*
@@ -952,8 +927,8 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
 static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
 			  const struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	u32 sprctl;
 
@@ -972,6 +947,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
 	case DRM_FORMAT_VYUY:
 		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
 		break;
+	case DRM_FORMAT_C8:
+		sprctl |= SP_FORMAT_8BPP;
+		break;
 	case DRM_FORMAT_RGB565:
 		sprctl |= SP_FORMAT_BGR565;
 		break;
@@ -987,6 +965,12 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
 	case DRM_FORMAT_ABGR2101010:
 		sprctl |= SP_FORMAT_RGBA1010102;
 		break;
+	case DRM_FORMAT_XRGB2101010:
+		sprctl |= SP_FORMAT_BGRX1010102;
+		break;
+	case DRM_FORMAT_ARGB2101010:
+		sprctl |= SP_FORMAT_BGRA1010102;
+		break;
 	case DRM_FORMAT_XBGR8888:
 		sprctl |= SP_FORMAT_RGBX8888;
 		break;
@@ -998,7 +982,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
 		return 0;
 	}
 
-	if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+	if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
 		sprctl |= SP_YUV_FORMAT_BT709;
 
 	if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1018,9 +1002,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
 
 static void vlv_update_gamma(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	enum pipe pipe = plane->pipe;
 	enum plane_id plane_id = plane->id;
 	u16 gamma[8];
@@ -1052,10 +1036,10 @@ vlv_update_plane(struct intel_plane *plane,
 	u32 sprsurf_offset = plane_state->color_plane[0].offset;
 	u32 linear_offset;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-	int crtc_x = plane_state->base.dst.x1;
-	int crtc_y = plane_state->base.dst.y1;
-	u32 crtc_w = drm_rect_width(&plane_state->base.dst);
-	u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+	int crtc_x = plane_state->uapi.dst.x1;
+	int crtc_y = plane_state->uapi.dst.y1;
+	u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+	u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
 	u32 x = plane_state->color_plane[0].x;
 	u32 y = plane_state->color_plane[0].y;
 	unsigned long irqflags;
@@ -1150,7 +1134,7 @@ static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state,
 			    unsigned int *num, unsigned int *den)
 {
 	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int cpp = fb->format->cpp[0];
 
 	if (hweight8(active_planes) == 2) {
@@ -1186,7 +1170,7 @@ static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state,
 				    const struct intel_plane_state *plane_state,
 				    unsigned int *num, unsigned int *den)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int cpp = fb->format->cpp[0];
 
 	switch (cpp) {
@@ -1244,8 +1228,8 @@ static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
 	 */
 	pixel_rate = crtc_state->pixel_rate;
 
-	src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	dst_w = drm_rect_width(&plane_state->base.dst);
+	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	dst_w = drm_rect_width(&plane_state->uapi.dst);
 
 	if (src_w != dst_w)
 		ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den);
@@ -1264,7 +1248,7 @@ static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state,
 			    unsigned int *num, unsigned int *den)
 {
 	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int cpp = fb->format->cpp[0];
 
 	if (hweight8(active_planes) == 2) {
@@ -1319,8 +1303,8 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
 static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 
 	return fb->format->cpp[0] == 8 &&
 		(IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
@@ -1330,9 +1314,9 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
 			  const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	u32 sprctl;
 
@@ -1348,6 +1332,12 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
 	case DRM_FORMAT_XRGB8888:
 		sprctl |= SPRITE_FORMAT_RGBX888;
 		break;
+	case DRM_FORMAT_XBGR2101010:
+		sprctl |= SPRITE_FORMAT_RGBX101010 | SPRITE_RGB_ORDER_RGBX;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+		sprctl |= SPRITE_FORMAT_RGBX101010;
+		break;
 	case DRM_FORMAT_XBGR16161616F:
 		sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX;
 		break;
@@ -1374,10 +1364,10 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
 	if (!ivb_need_sprite_gamma(plane_state))
 		sprctl |= SPRITE_INT_GAMMA_DISABLE;
 
-	if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+	if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
 		sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
 
-	if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+	if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 		sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE;
 
 	if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1421,7 +1411,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
 
 static void ivb_update_gamma(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum pipe pipe = plane->pipe;
 	u16 gamma[18];
@@ -1460,14 +1450,14 @@ ivb_update_plane(struct intel_plane *plane,
 	u32 sprsurf_offset = plane_state->color_plane[0].offset;
 	u32 linear_offset;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-	int crtc_x = plane_state->base.dst.x1;
-	int crtc_y = plane_state->base.dst.y1;
-	u32 crtc_w = drm_rect_width(&plane_state->base.dst);
-	u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+	int crtc_x = plane_state->uapi.dst.x1;
+	int crtc_y = plane_state->uapi.dst.y1;
+	u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+	u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
 	u32 x = plane_state->color_plane[0].x;
 	u32 y = plane_state->color_plane[0].y;
-	u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
 	u32 sprctl, sprscale = 0;
 	unsigned long irqflags;
 
@@ -1566,7 +1556,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
 static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
 				const struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int hscale, pixel_rate;
 	unsigned int limit, decimate;
 
@@ -1580,8 +1570,8 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
 	pixel_rate = crtc_state->pixel_rate;
 
 	/* Horizontal downscaling limits the maximum pixel rate */
-	hscale = drm_rect_calc_hscale(&plane_state->base.src,
-				      &plane_state->base.dst,
+	hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+				      &plane_state->uapi.dst,
 				      0, INT_MAX);
 	if (hscale < 0x10000)
 		return pixel_rate;
@@ -1635,9 +1625,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
 			  const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+		to_i915(plane_state->uapi.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	u32 dvscntr;
 
@@ -1653,6 +1643,12 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
 	case DRM_FORMAT_XRGB8888:
 		dvscntr |= DVS_FORMAT_RGBX888;
 		break;
+	case DRM_FORMAT_XBGR2101010:
+		dvscntr |= DVS_FORMAT_RGBX101010 | DVS_RGB_ORDER_XBGR;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+		dvscntr |= DVS_FORMAT_RGBX101010;
+		break;
 	case DRM_FORMAT_XBGR16161616F:
 		dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR;
 		break;
@@ -1676,10 +1672,10 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
 		return 0;
 	}
 
-	if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+	if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
 		dvscntr |= DVS_YUV_FORMAT_BT709;
 
-	if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+	if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 		dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE;
 
 	if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1698,9 +1694,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
 
 static void g4x_update_gamma(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	enum pipe pipe = plane->pipe;
 	u16 gamma[8];
 	int i;
@@ -1730,9 +1726,9 @@ static void ilk_sprite_linear_gamma(u16 gamma[17])
 
 static void ilk_update_gamma(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	enum pipe pipe = plane->pipe;
 	u16 gamma[17];
 	int i;
@@ -1766,14 +1762,14 @@ g4x_update_plane(struct intel_plane *plane,
 	u32 dvssurf_offset = plane_state->color_plane[0].offset;
 	u32 linear_offset;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-	int crtc_x = plane_state->base.dst.x1;
-	int crtc_y = plane_state->base.dst.y1;
-	u32 crtc_w = drm_rect_width(&plane_state->base.dst);
-	u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+	int crtc_x = plane_state->uapi.dst.x1;
+	int crtc_y = plane_state->uapi.dst.y1;
+	u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+	u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
 	u32 x = plane_state->color_plane[0].x;
 	u32 y = plane_state->color_plane[0].y;
-	u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
 	u32 dvscntr, dvsscale = 0;
 	unsigned long irqflags;
 
@@ -1886,12 +1882,12 @@ static int
 g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	const struct drm_rect *src = &plane_state->base.src;
-	const struct drm_rect *dst = &plane_state->base.dst;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	const struct drm_rect *src = &plane_state->uapi.src;
+	const struct drm_rect *dst = &plane_state->uapi.dst;
 	int src_x, src_w, src_h, crtc_w, crtc_h;
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	unsigned int stride = plane_state->color_plane[0].stride;
 	unsigned int cpp = fb->format->cpp[0];
 	unsigned int width_bytes;
@@ -1947,13 +1943,13 @@ static int
 g4x_sprite_check(struct intel_crtc_state *crtc_state,
 		 struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int ret;
 
-	if (intel_fb_scalable(plane_state->base.fb)) {
+	if (intel_fb_scalable(plane_state->hw.fb)) {
 		if (INTEL_GEN(dev_priv) < 7) {
 			min_scale = 1;
 			max_scale = 16 << 16;
@@ -1963,8 +1959,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
 		}
 	}
 
-	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-						  &crtc_state->base,
+	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+						  &crtc_state->uapi,
 						  min_scale, max_scale,
 						  true, true);
 	if (ret)
@@ -1974,7 +1970,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
 	ret = intel_plane_check_src_coordinates(plane_state);
@@ -1995,9 +1991,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
 
 int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	unsigned int rotation = plane_state->base.rotation;
+	unsigned int rotation = plane_state->hw.rotation;
 
 	/* CHV ignores the mirror bit when the rotate bit is set :( */
 	if (IS_CHERRYVIEW(dev_priv) &&
@@ -2020,8 +2016,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-						  &crtc_state->base,
+	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+						  &crtc_state->uapi,
 						  DRM_PLANE_HELPER_NO_SCALING,
 						  DRM_PLANE_HELPER_NO_SCALING,
 						  true, true);
@@ -2032,7 +2028,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
 	ret = intel_plane_check_src_coordinates(plane_state);
@@ -2047,10 +2043,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
 static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 			      const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
 	struct drm_format_name_buf format_name;
 
 	if (!fb)
@@ -2105,12 +2101,14 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 	}
 
 	/* Y-tiling is not supported in IF-ID Interlace mode */
-	if (crtc_state->base.enable &&
-	    crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+	if (crtc_state->hw.enable &&
+	    crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
 	    (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
 	     fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
 	     fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-	     fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
+	     fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+	     fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+	     fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
 		DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
 		return -EINVAL;
 	}
@@ -2122,9 +2120,9 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
 					   const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv =
-		to_i915(plane_state->base.plane->dev);
-	int crtc_x = plane_state->base.dst.x1;
-	int crtc_w = drm_rect_width(&plane_state->base.dst);
+		to_i915(plane_state->uapi.plane->dev);
+	int crtc_x = plane_state->uapi.dst.x1;
+	int crtc_w = drm_rect_width(&plane_state->uapi.dst);
 	int pipe_src_w = crtc_state->pipe_src_w;
 
 	/*
@@ -2150,12 +2148,13 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
 
 static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	unsigned int rotation = plane_state->base.rotation;
-	int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	unsigned int rotation = plane_state->hw.rotation;
+	int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
 
 	/* Display WA #1106 */
-	if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 &&
+	if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+	    src_w & 3 &&
 	    (rotation == DRM_MODE_ROTATE_270 ||
 	     rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
 		DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
@@ -2175,7 +2174,7 @@ static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
 	 * FIXME need to properly check this later.
 	 */
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
-	    !drm_format_info_is_yuv_semiplanar(fb->format))
+	    !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
 		return 0x30000 - 1;
 	else
 		return 0x20000 - 1;
@@ -2184,9 +2183,9 @@ static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
 static int skl_plane_check(struct intel_crtc_state *crtc_state,
 			   struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int ret;
@@ -2201,8 +2200,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
 		max_scale = skl_plane_max_scale(dev_priv, fb);
 	}
 
-	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-						  &crtc_state->base,
+	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+						  &crtc_state->uapi,
 						  min_scale, max_scale,
 						  true, true);
 	if (ret)
@@ -2212,7 +2211,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
 	ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
@@ -2228,8 +2227,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
 		return ret;
 
 	/* HW only has 8 bits pixel precision, disable plane if invisible */
-	if (!(plane_state->base.alpha >> 8))
-		plane_state->base.visible = false;
+	if (!(plane_state->hw.alpha >> 8))
+		plane_state->uapi.visible = false;
 
 	plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
 
@@ -2237,6 +2236,15 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
 		plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
 							     plane_state);
 
+	if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+	    icl_is_hdr_plane(dev_priv, plane->id))
+		/* Enable and use MPEG-2 chroma siting */
+		plane_state->cus_ctl = PLANE_CUS_ENABLE |
+			PLANE_CUS_HPHASE_0 |
+			PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
+	else
+		plane_state->cus_ctl = 0;
+
 	return 0;
 }
 
@@ -2248,7 +2256,7 @@ static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
 static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
 				 const struct drm_intel_sprite_colorkey *set)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 
@@ -2375,6 +2383,8 @@ static const u64 i9xx_plane_format_modifiers[] = {
 static const u32 snb_plane_formats[] = {
 	DRM_FORMAT_XRGB8888,
 	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
 	DRM_FORMAT_XRGB16161616F,
 	DRM_FORMAT_XBGR16161616F,
 	DRM_FORMAT_YUYV,
@@ -2384,11 +2394,12 @@ static const u32 snb_plane_formats[] = {
 };
 
 static const u32 vlv_plane_formats[] = {
+	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
-	DRM_FORMAT_ABGR8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_XBGR8888,
 	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
 	DRM_FORMAT_XBGR2101010,
 	DRM_FORMAT_ABGR2101010,
 	DRM_FORMAT_YUYV,
@@ -2397,6 +2408,23 @@ static const u32 vlv_plane_formats[] = {
 	DRM_FORMAT_VYUY,
 };
 
+static const u32 chv_pipe_b_sprite_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
 static const u32 skl_plane_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
@@ -2462,6 +2490,8 @@ static const u32 icl_sdr_y_plane_formats[] = {
 	DRM_FORMAT_ABGR8888,
 	DRM_FORMAT_XRGB2101010,
 	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_ABGR2101010,
 	DRM_FORMAT_YUYV,
 	DRM_FORMAT_YVYU,
 	DRM_FORMAT_UYVY,
@@ -2483,6 +2513,8 @@ static const u32 icl_sdr_uv_plane_formats[] = {
 	DRM_FORMAT_ABGR8888,
 	DRM_FORMAT_XRGB2101010,
 	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_ABGR2101010,
 	DRM_FORMAT_YUYV,
 	DRM_FORMAT_YVYU,
 	DRM_FORMAT_UYVY,
@@ -2508,6 +2540,8 @@ static const u32 icl_hdr_plane_formats[] = {
 	DRM_FORMAT_ABGR8888,
 	DRM_FORMAT_XRGB2101010,
 	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_ABGR2101010,
 	DRM_FORMAT_XRGB16161616F,
 	DRM_FORMAT_XBGR16161616F,
 	DRM_FORMAT_ARGB16161616F,
@@ -2546,7 +2580,17 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
 	DRM_FORMAT_MOD_INVALID
 };
 
-static const u64 gen12_plane_format_modifiers_noccs[] = {
+static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
+	I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
+	I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+	I915_FORMAT_MOD_Y_TILED,
+	I915_FORMAT_MOD_X_TILED,
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
+	I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
 	I915_FORMAT_MOD_Y_TILED,
 	I915_FORMAT_MOD_X_TILED,
 	DRM_FORMAT_MOD_LINEAR,
@@ -2593,6 +2637,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
 	switch (format) {
 	case DRM_FORMAT_XRGB8888:
 	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
 	case DRM_FORMAT_XRGB16161616F:
 	case DRM_FORMAT_XBGR16161616F:
 	case DRM_FORMAT_YUYV:
@@ -2620,6 +2666,7 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
 	}
 
 	switch (format) {
+	case DRM_FORMAT_C8:
 	case DRM_FORMAT_RGB565:
 	case DRM_FORMAT_ABGR8888:
 	case DRM_FORMAT_ARGB8888:
@@ -2627,6 +2674,8 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
 	case DRM_FORMAT_XRGB8888:
 	case DRM_FORMAT_XBGR2101010:
 	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
 	case DRM_FORMAT_YUYV:
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_UYVY:
@@ -2671,6 +2720,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
 	case DRM_FORMAT_RGB565:
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
 	case DRM_FORMAT_YUYV:
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_UYVY:
@@ -2703,13 +2754,25 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
 	}
 }
 
+static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
+{
+	return plane_id < PLANE_SPRITE4;
+}
+
 static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
 					     u32 format, u64 modifier)
 {
+	struct intel_plane *plane = to_intel_plane(_plane);
+
 	switch (modifier) {
+	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+		if (!gen12_plane_supports_mc_ccs(plane->id))
+			return false;
+		/* fall through */
 	case DRM_FORMAT_MOD_LINEAR:
 	case I915_FORMAT_MOD_X_TILED:
 	case I915_FORMAT_MOD_Y_TILED:
+	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
 		break;
 	default:
 		return false;
@@ -2720,9 +2783,9 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
 	case DRM_FORMAT_XBGR8888:
 	case DRM_FORMAT_ARGB8888:
 	case DRM_FORMAT_ABGR8888:
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
+		if (is_ccs_modifier(modifier))
+			return true;
+		/* fall through */
 	case DRM_FORMAT_YUYV:
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_UYVY:
@@ -2731,6 +2794,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
 	case DRM_FORMAT_P010:
 	case DRM_FORMAT_P012:
 	case DRM_FORMAT_P016:
+		if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
+			return true;
+		/* fall through */
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
 	case DRM_FORMAT_XVYU2101010:
 	case DRM_FORMAT_C8:
 	case DRM_FORMAT_XBGR16161616F:
@@ -2864,6 +2935,14 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
 	}
 }
 
+static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
+{
+	if (gen12_plane_supports_mc_ccs(plane_id))
+		return gen12_plane_format_modifiers_mc_ccs;
+	else
+		return gen12_plane_format_modifiers_rc_ccs;
+}
+
 static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
 			      enum pipe pipe, enum plane_id plane_id)
 {
@@ -2916,8 +2995,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
 	plane->get_hw_state = skl_plane_get_hw_state;
 	plane->check_plane = skl_plane_check;
 	plane->min_cdclk = skl_plane_min_cdclk;
-	if (icl_is_nv12_y_plane(plane_id))
-		plane->update_slave = icl_update_slave;
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		formats = icl_get_plane_formats(dev_priv, pipe,
@@ -2929,13 +3006,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
 		formats = skl_get_plane_formats(dev_priv, pipe,
 						plane_id, &num_formats);
 
+	plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
 	if (INTEL_GEN(dev_priv) >= 12) {
-		/* TODO: Implement support for gen-12 CCS modifiers */
-		plane->has_ccs = false;
-		modifiers = gen12_plane_format_modifiers_noccs;
+		modifiers = gen12_get_plane_modifiers(plane_id);
 		plane_funcs = &gen12_plane_funcs;
 	} else {
-		plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
 		if (plane->has_ccs)
 			modifiers = skl_plane_format_modifiers_ccs;
 		else
@@ -3025,8 +3100,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 		plane->check_plane = vlv_sprite_check;
 		plane->min_cdclk = vlv_plane_min_cdclk;
 
-		formats = vlv_plane_formats;
-		num_formats = ARRAY_SIZE(vlv_plane_formats);
+		if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+			formats = chv_pipe_b_sprite_formats;
+			num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats);
+		} else {
+			formats = vlv_plane_formats;
+			num_formats = ARRAY_SIZE(vlv_plane_formats);
+		}
 		modifiers = i9xx_plane_format_modifiers;
 
 		plane_funcs = &vlv_sprite_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 9983fad..c75e0ce 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -898,7 +898,7 @@ static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
 	return container_of(encoder, struct intel_tv, base);
 }
 
-static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+static struct intel_tv *intel_attached_tv(struct intel_connector *connector)
 {
 	return enc_to_tv(intel_attached_encoder(connector));
 }
@@ -924,7 +924,7 @@ intel_enable_tv(struct intel_encoder *encoder,
 
 	/* Prevents vblank waits from timing out in intel_tv_detect_type() */
 	intel_wait_for_vblank(dev_priv,
-			      to_intel_crtc(pipe_config->base.crtc)->pipe);
+			      to_intel_crtc(pipe_config->uapi.crtc)->pipe);
 
 	I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
 }
@@ -1085,7 +1085,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_display_mode *adjusted_mode =
-		&pipe_config->base.adjusted_mode;
+		&pipe_config->hw.adjusted_mode;
 	struct drm_display_mode mode = {};
 	u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp;
 	struct tv_mode tv_mode = {};
@@ -1188,7 +1188,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
 		to_intel_tv_connector_state(conn_state);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
 	struct drm_display_mode *adjusted_mode =
-		&pipe_config->base.adjusted_mode;
+		&pipe_config->hw.adjusted_mode;
 	int hdisplay = adjusted_mode->crtc_hdisplay;
 	int vdisplay = adjusted_mode->crtc_vdisplay;
 
@@ -1417,7 +1417,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
 				const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	struct intel_tv *intel_tv = enc_to_tv(encoder);
 	const struct intel_tv_connector_state *tv_conn_state =
 		to_intel_tv_connector_state(conn_state);
@@ -1527,7 +1527,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
 			   ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
 			    (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
 
-	assert_pipe_disabled(dev_priv, intel_crtc->pipe);
+	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
 
 	/* Filter ctl must be set before TV_WIN_SIZE */
 	tv_filter_ctl = TV_AUTO_SCALE;
@@ -1662,7 +1662,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
  */
 static void intel_tv_find_better_format(struct drm_connector *connector)
 {
-	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
 	const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
 	int i;
 
@@ -1689,7 +1689,7 @@ intel_tv_detect(struct drm_connector *connector,
 		struct drm_modeset_acquire_ctx *ctx,
 		bool force)
 {
-	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
 	enum drm_connector_status status;
 	int type;
 
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 69a7cb1..4d0c23b 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -115,6 +115,7 @@ enum bdb_block_id {
 	BDB_MIPI_CONFIG			= 52,
 	BDB_MIPI_SEQUENCE		= 53,
 	BDB_COMPRESSION_PARAMETERS	= 56,
+	BDB_GENERIC_DTD			= 58,
 	BDB_SKIP			= 254, /* VBIOS private block, ignore */
 };
 
@@ -368,7 +369,7 @@ struct child_device_config {
 			u16 dtd_buf_ptr;			/* 161 */
 			u8 edidless_efp:1;			/* 161 */
 			u8 compression_enable:1;		/* 198 */
-			u8 compression_method:1;		/* 198 */
+			u8 compression_method_cps:1;		/* 198 */
 			u8 ganged_edp:1;			/* 202 */
 			u8 reserved0:4;
 			u8 compression_structure_index:4;	/* 198 */
@@ -793,6 +794,35 @@ struct bdb_lfp_backlight_data {
 } __packed;
 
 /*
+ * Block 44 - LFP Power Conservation Features Block
+ */
+
+struct als_data_entry {
+	u16 backlight_adjust;
+	u16 lux;
+} __packed;
+
+struct agressiveness_profile_entry {
+	u8 dpst_agressiveness : 4;
+	u8 lace_agressiveness : 4;
+} __packed;
+
+struct bdb_lfp_power {
+	u8 lfp_feature_bits;
+	struct als_data_entry als[5];
+	u8 lace_aggressiveness_profile;
+	u16 dpst;
+	u16 psr;
+	u16 drrs;
+	u16 lace_support;
+	u16 adt;
+	u16 dmrrs;
+	u16 adb;
+	u16 lace_enabled_status;
+	struct agressiveness_profile_entry aggressivenes[16];
+} __packed;
+
+/*
  * Block 52 - MIPI Configuration Block
  */
 
@@ -863,4 +893,34 @@ struct bdb_compression_parameters {
 	struct dsc_compression_parameters_entry data[16];
 } __packed;
 
+/*
+ * Block 58 - Generic DTD Block
+ */
+
+struct generic_dtd_entry {
+	u32 pixel_clock;
+	u16 hactive;
+	u16 hblank;
+	u16 hfront_porch;
+	u16 hsync;
+	u16 vactive;
+	u16 vblank;
+	u16 vfront_porch;
+	u16 vsync;
+	u16 width_mm;
+	u16 height_mm;
+
+	/* Flags */
+	u8 rsvd_flags:6;
+	u8 vsync_positive_polarity:1;
+	u8 hsync_positive_polarity:1;
+
+	u8 rsvd[3];
+} __packed;
+
+struct bdb_generic_dtd {
+	u16 gdtd_size;
+	struct generic_dtd_entry dtd[];	/* up to 24 DTD's */
+} __packed;
+
 #endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 896b0c3..9e6aaa3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -10,6 +10,7 @@
 
 #include "i915_drv.h"
 #include "intel_display_types.h"
+#include "intel_dsi.h"
 #include "intel_vdsc.h"
 
 enum ROW_INDEX_BPP {
@@ -30,10 +31,8 @@ enum COLUMN_INDEX_BPC {
 	MAX_COLUMN_INDEX
 };
 
-#define DSC_SUPPORTED_VERSION_MIN		1
-
 /* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
-static u16 rc_buf_thresh[] = {
+static const u16 rc_buf_thresh[] = {
 	896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
 	7744, 7872, 8000, 8064
 };
@@ -53,7 +52,7 @@ struct rc_parameters {
  * Selected Rate Control Related Parameter Recommended Values
  * from DSC_v1.11 spec & C Model release: DSC_model_20161212
  */
-static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+static const struct rc_parameters rc_parameters[][MAX_COLUMN_INDEX] = {
 {
 	/* 6BPP/8BPC */
 	{ 768, 15, 6144, 3, 13, 11, 11, {
@@ -319,63 +318,84 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
 	}
 }
 
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
-				struct intel_crtc_state *pipe_config)
+static const struct rc_parameters *get_rc_params(u16 compressed_bpp,
+						 u8 bits_per_component)
+{
+	int row_index, column_index;
+
+	row_index = get_row_index_for_rc_params(compressed_bpp);
+	if (row_index < 0)
+		return NULL;
+
+	column_index = get_column_index_for_rc_params(bits_per_component);
+	if (column_index < 0)
+		return NULL;
+
+	return &rc_parameters[row_index][column_index];
+}
+
+bool intel_dsc_source_support(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *crtc_state)
+{
+	const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	enum pipe pipe = crtc->pipe;
+
+	if (!INTEL_INFO(i915)->display.has_dsc)
+		return false;
+
+	/* On TGL, DSC is supported on all Pipes */
+	if (INTEL_GEN(i915) >= 12)
+		return true;
+
+	if (INTEL_GEN(i915) >= 10 &&
+	    (pipe != PIPE_A ||
+	     (cpu_transcoder == TRANSCODER_EDP ||
+	      cpu_transcoder == TRANSCODER_DSI_0 ||
+	      cpu_transcoder == TRANSCODER_DSI_1)))
+		return true;
+
+	return false;
+}
+
+static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
+{
+	const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	const struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+	if (INTEL_GEN(i915) >= 12)
+		return true;
+
+	if (cpu_transcoder == TRANSCODER_EDP ||
+	    cpu_transcoder == TRANSCODER_DSI_0 ||
+	    cpu_transcoder == TRANSCODER_DSI_1)
+		return false;
+
+	/* There's no pipe A DSC engine on ICL */
+	WARN_ON(crtc->pipe == PIPE_A);
+
+	return true;
+}
+
+int intel_dsc_compute_params(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config)
 {
 	struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
 	u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
+	const struct rc_parameters *rc_params;
 	u8 i = 0;
-	int row_index = 0;
-	int column_index = 0;
-	u8 line_buf_depth = 0;
 
-	vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
-	vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+	vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+	vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
 	vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
 					     pipe_config->dsc.slice_count);
-	/*
-	 * Slice Height of 8 works for all currently available panels. So start
-	 * with that if pic_height is an integral multiple of 8.
-	 * Eventually add logic to try multiple slice heights.
-	 */
-	if (vdsc_cfg->pic_height % 8 == 0)
-		vdsc_cfg->slice_height = 8;
-	else if (vdsc_cfg->pic_height % 4 == 0)
-		vdsc_cfg->slice_height = 4;
-	else
-		vdsc_cfg->slice_height = 2;
-
-	/* Values filled from DSC Sink DPCD */
-	vdsc_cfg->dsc_version_major =
-		(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
-		 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
-	vdsc_cfg->dsc_version_minor =
-		min(DSC_SUPPORTED_VERSION_MIN,
-		    (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
-		     DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
-
-	vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
-		DP_DSC_RGB;
-
-	line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
-	if (!line_buf_depth) {
-		DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
-		return -EINVAL;
-	}
-	if (vdsc_cfg->dsc_version_minor == 2)
-		vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
-			DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
-	else
-		vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
-			DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
 
 	/* Gen 11 does not support YCbCr */
 	vdsc_cfg->simple_422 = false;
 	/* Gen 11 does not support VBR */
 	vdsc_cfg->vbr_enable = false;
-	vdsc_cfg->block_pred_enable =
-			intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
-		DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
 
 	/* Gen 11 only supports integral values of bpp */
 	vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
@@ -399,39 +419,29 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
 		vdsc_cfg->rc_buf_thresh[13] = 0x7D;
 	}
 
-	row_index = get_row_index_for_rc_params(compressed_bpp);
-	column_index =
-		get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
-
-	if (row_index < 0 || column_index < 0)
+	rc_params = get_rc_params(compressed_bpp, vdsc_cfg->bits_per_component);
+	if (!rc_params)
 		return -EINVAL;
 
-	vdsc_cfg->first_line_bpg_offset =
-		rc_params[row_index][column_index].first_line_bpg_offset;
-	vdsc_cfg->initial_xmit_delay =
-		rc_params[row_index][column_index].initial_xmit_delay;
-	vdsc_cfg->initial_offset =
-		rc_params[row_index][column_index].initial_offset;
-	vdsc_cfg->flatness_min_qp =
-		rc_params[row_index][column_index].flatness_min_qp;
-	vdsc_cfg->flatness_max_qp =
-		rc_params[row_index][column_index].flatness_max_qp;
-	vdsc_cfg->rc_quant_incr_limit0 =
-		rc_params[row_index][column_index].rc_quant_incr_limit0;
-	vdsc_cfg->rc_quant_incr_limit1 =
-		rc_params[row_index][column_index].rc_quant_incr_limit1;
+	vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset;
+	vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay;
+	vdsc_cfg->initial_offset = rc_params->initial_offset;
+	vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp;
+	vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp;
+	vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0;
+	vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1;
 
 	for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
 		vdsc_cfg->rc_range_params[i].range_min_qp =
-			rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+			rc_params->rc_range_params[i].range_min_qp;
 		vdsc_cfg->rc_range_params[i].range_max_qp =
-			rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+			rc_params->rc_range_params[i].range_max_qp;
 		/*
 		 * Range BPG Offset uses 2's complement and is only a 6 bits. So
 		 * mask it to get only 6 bits.
 		 */
 		vdsc_cfg->rc_range_params[i].range_bpg_offset =
-			rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+			rc_params->rc_range_params[i].range_bpg_offset &
 			DSC_RANGE_BPG_OFFSET_MASK;
 	}
 
@@ -453,41 +463,42 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
 	vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
 		(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
 
-	return drm_dsc_compute_rc_parameters(vdsc_cfg);
+	return 0;
 }
 
 enum intel_display_power_domain
 intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
-	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
 	/*
-	 * On ICL VDSC/joining for eDP transcoder uses a separate power well,
-	 * PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain.
-	 * For any other transcoder, VDSC/joining uses the power well associated
-	 * with the pipe/transcoder in use. Hence another reference on the
-	 * transcoder power domain will suffice.
+	 * VDSC/joining uses a separate power well, PW2, and requires
+	 * POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
 	 *
-	 * On TGL we have the same mapping, but for transcoder A (the special
-	 * TRANSCODER_EDP is gone).
+	 *  - ICL eDP/DSI transcoder
+	 *  - TGL pipe A
+	 *
+	 * For any other pipe, VDSC/joining uses the power well associated with
+	 * the pipe in use. Hence another reference on the pipe power domain
+	 * will suffice. (Except no VDSC/joining on ICL pipe A.)
 	 */
-	if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A)
+	if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A)
 		return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
-	else if (cpu_transcoder == TRANSCODER_EDP)
-		return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
+	else if (is_pipe_dsc(crtc_state))
+		return POWER_DOMAIN_PIPE(pipe);
 	else
-		return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+		return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
 }
 
-static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
-						const struct intel_crtc_state *crtc_state)
+static void intel_dsc_pps_configure(struct intel_encoder *encoder,
+				    const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
 	enum pipe pipe = crtc->pipe;
-	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	u32 pps_val = 0;
 	u32 rc_buf_thresh_dword[4];
 	u32 rc_range_params_dword[8];
@@ -508,7 +519,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	if (vdsc_cfg->vbr_enable)
 		pps_val |= DSC_VBR_ENABLE;
 	DRM_INFO("PPS0 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -527,7 +538,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	pps_val = 0;
 	pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
 	DRM_INFO("PPS1 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -547,7 +558,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
 		DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
 	DRM_INFO("PPS2 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -567,7 +578,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
 		DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
 	DRM_INFO("PPS3 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -587,7 +598,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
 		DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
 	DRM_INFO("PPS4 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -607,7 +618,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
 		DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
 	DRM_INFO("PPS5 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -629,7 +640,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 		DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
 		DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
 	DRM_INFO("PPS6 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -649,7 +660,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
 		DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
 	DRM_INFO("PPS7 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -669,7 +680,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
 		DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
 	DRM_INFO("PPS8 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -689,7 +700,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
 		DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
 	DRM_INFO("PPS9 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -711,7 +722,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 		DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
 		DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
 	DRM_INFO("PPS10 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -734,7 +745,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 		DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
 					vdsc_cfg->slice_height);
 	DRM_INFO("PPS16 = 0x%08x\n", pps_val);
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
@@ -758,7 +769,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 		DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
 			 rc_buf_thresh_dword[i / 4]);
 	}
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
 		I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
 		I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
@@ -807,7 +818,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 		DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
 			 rc_range_params_dword[i / 2]);
 	}
-	if (cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
 			   rc_range_params_dword[0]);
 		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
@@ -880,10 +891,77 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
 	}
 }
 
-static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
-				       const struct intel_crtc_state *crtc_state)
+void intel_dsc_get_config(struct intel_encoder *encoder,
+			  struct intel_crtc_state *crtc_state)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	enum pipe pipe = crtc->pipe;
+	enum intel_display_power_domain power_domain;
+	intel_wakeref_t wakeref;
+	u32 dss_ctl1, dss_ctl2, val;
+
+	if (!intel_dsc_source_support(encoder, crtc_state))
+		return;
+
+	power_domain = intel_dsc_power_domain(crtc_state);
+
+	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+	if (!wakeref)
+		return;
+
+	if (!is_pipe_dsc(crtc_state)) {
+		dss_ctl1 = I915_READ(DSS_CTL1);
+		dss_ctl2 = I915_READ(DSS_CTL2);
+	} else {
+		dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe));
+		dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe));
+	}
+
+	crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
+	if (!crtc_state->dsc.compression_enable)
+		goto out;
+
+	crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
+		(dss_ctl1 & JOINER_ENABLE);
+
+	/* FIXME: add more state readout as needed */
+
+	/* PPS1 */
+	if (!is_pipe_dsc(crtc_state))
+		val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1);
+	else
+		val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+	vdsc_cfg->bits_per_pixel = val;
+	crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
+out:
+	intel_display_power_put(dev_priv, power_domain, wakeref);
+}
+
+static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
+				    const struct intel_crtc_state *crtc_state)
+{
+	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	struct mipi_dsi_device *dsi;
+	struct drm_dsc_picture_parameter_set pps;
+	enum port port;
+
+	drm_dsc_pps_payload_pack(&pps, vdsc_cfg);
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi = intel_dsi->dsi_hosts[port]->device;
+
+		mipi_dsi_picture_parameter_set(dsi, &pps);
+		mipi_dsi_compression_mode(dsi, true);
+	}
+}
+
+static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+				   const struct intel_crtc_state *crtc_state)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
 	struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
@@ -902,7 +980,7 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
 void intel_dsc_enable(struct intel_encoder *encoder,
 		      const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum pipe pipe = crtc->pipe;
 	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
@@ -916,11 +994,14 @@ void intel_dsc_enable(struct intel_encoder *encoder,
 	intel_display_power_get(dev_priv,
 				intel_dsc_power_domain(crtc_state));
 
-	intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+	intel_dsc_pps_configure(encoder, crtc_state);
 
-	intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+	if (encoder->type == INTEL_OUTPUT_DSI)
+		intel_dsc_dsi_pps_write(encoder, crtc_state);
+	else
+		intel_dsc_dp_pps_write(encoder, crtc_state);
 
-	if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(crtc_state)) {
 		dss_ctl1_reg = DSS_CTL1;
 		dss_ctl2_reg = DSS_CTL2;
 	} else {
@@ -938,7 +1019,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
 
 void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
@@ -947,7 +1028,7 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
 	if (!old_crtc_state->dsc.compression_enable)
 		return;
 
-	if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+	if (!is_pipe_dsc(old_crtc_state)) {
 		dss_ctl1_reg = DSS_CTL1;
 		dss_ctl2_reg = DSS_CTL2;
 	} else {
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 90d3f60..e56a325 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -6,15 +6,20 @@
 #ifndef __INTEL_VDSC_H__
 #define __INTEL_VDSC_H__
 
+#include <linux/types.h>
+
 struct intel_encoder;
 struct intel_crtc_state;
-struct intel_dp;
 
+bool intel_dsc_source_support(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *crtc_state);
 void intel_dsc_enable(struct intel_encoder *encoder,
 		      const struct intel_crtc_state *crtc_state);
 void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
-				struct intel_crtc_state *pipe_config);
+int intel_dsc_compute_params(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config);
+void intel_dsc_get_config(struct intel_encoder *encoder,
+			  struct intel_crtc_state *crtc_state);
 enum intel_display_power_domain
 intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
 
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 0ca49b1..daf4fc3 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -23,7 +23,6 @@
  * Author: Jani Nikula <jani.nikula@intel.com>
  */
 
-#include <linux/gpio/consumer.h>
 #include <linux/slab.h>
 
 #include <drm/drm_atomic_helper.h>
@@ -261,9 +260,9 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
 	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
 						   base);
 	struct intel_connector *intel_connector = intel_dsi->attached_connector;
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
-	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	int ret;
 
 	DRM_DEBUG_KMS("\n");
@@ -319,7 +318,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
 static bool glk_dsi_enable_io(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 tmp;
 	bool cold_boot = false;
@@ -367,7 +366,7 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
 static void glk_dsi_device_ready(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 val;
 
@@ -438,7 +437,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
 static void bxt_dsi_device_ready(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 val;
 
@@ -465,7 +464,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
 static void vlv_dsi_device_ready(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 val;
 
@@ -516,7 +515,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
 static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 val;
 
@@ -546,7 +545,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
 static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 tmp;
 
@@ -579,7 +578,7 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
 static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 
 	DRM_DEBUG_KMS("\n");
@@ -624,8 +623,8 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 
 	if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
@@ -681,7 +680,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 
 	for_each_dsi_port(port, intel_dsi->ports) {
@@ -745,8 +744,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 				 const struct intel_crtc_state *pipe_config,
 				 const struct drm_connector_state *conn_state)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	struct drm_crtc *crtc = pipe_config->base.crtc;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	struct drm_crtc *crtc = pipe_config->uapi.crtc;
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
@@ -793,9 +792,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 	if (!IS_GEMINILAKE(dev_priv))
 		intel_dsi_prepare(encoder, pipe_config);
 
-	/* Power on, try both CRC pmic gpio and VBT */
-	if (intel_dsi->gpio_panel)
-		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
 	intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
 
@@ -850,7 +846,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *old_crtc_state,
 			      const struct drm_connector_state *old_conn_state)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 
 	DRM_DEBUG_KMS("\n");
@@ -882,16 +878,22 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 }
 
 static void intel_dsi_post_disable(struct intel_encoder *encoder,
-				   const struct intel_crtc_state *pipe_config,
-				   const struct drm_connector_state *conn_state)
+				   const struct intel_crtc_state *old_crtc_state,
+				   const struct drm_connector_state *old_conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 val;
 
 	DRM_DEBUG_KMS("\n");
 
+	if (IS_GEN9_LP(dev_priv)) {
+		intel_crtc_vblank_off(old_crtc_state);
+
+		skl_scaler_disable(old_crtc_state);
+	}
+
 	if (is_vid_mode(intel_dsi)) {
 		for_each_dsi_port(port, intel_dsi->ports)
 			vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
@@ -939,11 +941,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
 	/* Assert reset */
 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
 
-	/* Power off, try both CRC pmic gpio and VBT */
 	intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
-	if (intel_dsi->gpio_panel)
-		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
 
 	/*
 	 * FIXME As we do with eDP, just make a note of the time here
@@ -956,7 +955,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 				   enum pipe *pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	intel_wakeref_t wakeref;
 	enum port port;
 	bool active = false;
@@ -1032,10 +1031,10 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_display_mode *adjusted_mode =
-					&pipe_config->base.adjusted_mode;
+					&pipe_config->hw.adjusted_mode;
 	struct drm_display_mode *adjusted_mode_sw;
-	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	unsigned int lane_count = intel_dsi->lane_count;
 	unsigned int bpp, fmt;
 	enum port port;
@@ -1045,7 +1044,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
 				crtc_hblank_start_sw, crtc_hblank_end_sw;
 
 	/* FIXME: hw readout should not depend on SW state */
-	adjusted_mode_sw = &crtc->config->base.adjusted_mode;
+	adjusted_mode_sw = &crtc->config->hw.adjusted_mode;
 
 	/*
 	 * Atleast one port is active as encoder->get_config called only if
@@ -1204,7 +1203,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
 	}
 
 	if (pclk) {
-		pipe_config->base.adjusted_mode.crtc_clock = pclk;
+		pipe_config->hw.adjusted_mode.crtc_clock = pclk;
 		pipe_config->port_clock = pclk;
 	}
 }
@@ -1228,7 +1227,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
 	enum port port;
 	unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 	unsigned int lane_count = intel_dsi->lane_count;
@@ -1315,9 +1314,9 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	enum port port;
 	unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 	u32 val, tmp;
@@ -1506,7 +1505,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 static void intel_dsi_unprepare(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 val;
 
@@ -1533,12 +1532,9 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
 
 static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
 
-	/* dispose of the gpios */
-	if (intel_dsi->gpio_panel)
-		gpiod_put(intel_dsi->gpio_panel);
-
+	intel_dsi_vbt_gpio_cleanup(intel_dsi);
 	intel_encoder_destroy(encoder);
 }
 
@@ -1819,6 +1815,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 	struct drm_connector *connector;
 	struct drm_display_mode *current_mode, *fixed_mode;
 	enum port port;
+	enum pipe pipe;
 
 	DRM_DEBUG_KMS("\n");
 
@@ -1917,20 +1914,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 
 	vlv_dphy_param_init(intel_dsi);
 
-	/*
-	 * In case of BYT with CRC PMIC, we need to use GPIO for
-	 * Panel control.
-	 */
-	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-	    (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
-		intel_dsi->gpio_panel =
-			gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
-
-		if (IS_ERR(intel_dsi->gpio_panel)) {
-			DRM_ERROR("Failed to own gpio for panel control\n");
-			intel_dsi->gpio_panel = NULL;
-		}
-	}
+	intel_dsi_vbt_gpio_init(intel_dsi,
+				intel_dsi_get_hw_state(intel_encoder, &pipe));
 
 	drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
 			   DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 95f39cd..6b89e67 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -117,7 +117,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
 			struct intel_crtc_state *config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	int ret;
 	u32 dsi_clk;
 
@@ -255,7 +255,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
 		     struct intel_crtc_state *config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 	u32 dsi_clock, pclk;
 	u32 pll_ctl, pll_div;
@@ -321,7 +321,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
 	u32 pclk;
 	u32 dsi_clk;
 	u32 dsi_ratio;
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 
@@ -341,7 +341,7 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
 	u32 temp;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
 	temp = I915_READ(MIPI_CTRL(port));
 	temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
@@ -455,7 +455,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
 			struct intel_crtc_state *config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
 	u32 dsi_clk;
 
@@ -503,7 +503,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
 			const struct intel_crtc_state *config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 	u32 val;
 
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
deleted file mode 100644
index 7e73aa5..0000000
--- a/drivers/gpu/drm/i915/gem/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 18ee708..34be4c0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -27,27 +27,24 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
 static int clflush_work(struct dma_fence_work *base)
 {
 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
-	struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
+	struct drm_i915_gem_object *obj = clflush->obj;
 	int err;
 
 	err = i915_gem_object_pin_pages(obj);
 	if (err)
-		goto put;
+		return err;
 
 	__do_clflush(obj);
 	i915_gem_object_unpin_pages(obj);
 
-put:
-	i915_gem_object_put(obj);
-	return err;
+	return 0;
 }
 
 static void clflush_release(struct dma_fence_work *base)
 {
 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
 
-	if (clflush->obj)
-		i915_gem_object_put(clflush->obj);
+	i915_gem_object_put(clflush->obj);
 }
 
 static const struct dma_fence_work_ops clflush_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 4238527..a2e57e62 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,7 +69,10 @@
 
 #include <drm/i915_drm.h>
 
+#include "gt/gen6_ppgtt.h"
+#include "gt/intel_context.h"
 #include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_engine_pm.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_lrc_reg.h"
 #include "gt/intel_ring.h"
@@ -169,12 +172,80 @@ lookup_user_engine(struct i915_gem_context *ctx,
 	return i915_gem_context_get_engine(ctx, idx);
 }
 
+static struct i915_address_space *
+context_get_vm_rcu(struct i915_gem_context *ctx)
+{
+	GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
+
+	do {
+		struct i915_address_space *vm;
+
+		/*
+		 * We do not allow downgrading from full-ppgtt [to a shared
+		 * global gtt], so ctx->vm cannot become NULL.
+		 */
+		vm = rcu_dereference(ctx->vm);
+		if (!kref_get_unless_zero(&vm->ref))
+			continue;
+
+		/*
+		 * This ppgtt may have be reallocated between
+		 * the read and the kref, and reassigned to a third
+		 * context. In order to avoid inadvertent sharing
+		 * of this ppgtt with that third context (and not
+		 * src), we have to confirm that we have the same
+		 * ppgtt after passing through the strong memory
+		 * barrier implied by a successful
+		 * kref_get_unless_zero().
+		 *
+		 * Once we have acquired the current ppgtt of ctx,
+		 * we no longer care if it is released from ctx, as
+		 * it cannot be reallocated elsewhere.
+		 */
+
+		if (vm == rcu_access_pointer(ctx->vm))
+			return rcu_pointer_handoff(vm);
+
+		i915_vm_put(vm);
+	} while (1);
+}
+
+static void intel_context_set_gem(struct intel_context *ce,
+				  struct i915_gem_context *ctx)
+{
+	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
+	RCU_INIT_POINTER(ce->gem_context, ctx);
+
+	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+		ce->ring = __intel_context_ring_size(SZ_16K);
+
+	if (rcu_access_pointer(ctx->vm)) {
+		struct i915_address_space *vm;
+
+		rcu_read_lock();
+		vm = context_get_vm_rcu(ctx); /* hmm */
+		rcu_read_unlock();
+
+		i915_vm_put(ce->vm);
+		ce->vm = vm;
+	}
+
+	GEM_BUG_ON(ce->timeline);
+	if (ctx->timeline)
+		ce->timeline = intel_timeline_get(ctx->timeline);
+
+	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+	    intel_engine_has_semaphores(ce->engine))
+		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 {
 	while (count--) {
 		if (!e->engines[count])
 			continue;
 
+		RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
 		intel_context_put(e->engines[count]);
 	}
 	kfree(e);
@@ -211,12 +282,14 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			__free_engines(e, e->num_engines + 1);
 			return ERR_CAST(ce);
 		}
 
+		intel_context_set_gem(ce, ctx);
+
 		e->engines[engine->legacy_idx] = ce;
 		e->num_engines = max(e->num_engines, engine->legacy_idx);
 	}
@@ -236,14 +309,10 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	free_engines(rcu_access_pointer(ctx->engines));
 	mutex_destroy(&ctx->engines_mutex);
 
-	kfree(ctx->jump_whitelist);
-
 	if (ctx->timeline)
 		intel_timeline_put(ctx->timeline);
 
-	kfree(ctx->name);
 	put_pid(ctx->pid);
-
 	mutex_destroy(&ctx->mutex);
 
 	kfree_rcu(ctx, rcu);
@@ -389,15 +458,6 @@ static void kill_context(struct i915_gem_context *ctx)
 	struct intel_context *ce;
 
 	/*
-	 * If we are already banned, it was due to a guilty request causing
-	 * a reset and the entire context being evicted from the GPU.
-	 */
-	if (i915_gem_context_is_banned(ctx))
-		return;
-
-	i915_gem_context_set_banned(ctx);
-
-	/*
 	 * Map the user's engine back to the actual engines; one virtual
 	 * engine will be mapped to multiple engines, and using ctx->engine[]
 	 * the same engine may be have multiple instances in the user's map.
@@ -407,6 +467,9 @@ static void kill_context(struct i915_gem_context *ctx)
 	for_each_gem_engine(ce, __context_engines_static(ctx), it) {
 		struct intel_engine_cs *engine;
 
+		if (intel_context_set_banned(ce))
+			continue;
+
 		/*
 		 * Check the current active state of this context; if we
 		 * are currently executing on the GPU we need to evict
@@ -427,11 +490,29 @@ static void kill_context(struct i915_gem_context *ctx)
 	}
 }
 
+static void set_closed_name(struct i915_gem_context *ctx)
+{
+	char *s;
+
+	/* Replace '[]' with '<>' to indicate closed in debug prints */
+
+	s = strrchr(ctx->name, '[');
+	if (!s)
+		return;
+
+	*s = '<';
+
+	s = strchr(s + 1, ']');
+	if (s)
+		*s = '>';
+}
+
 static void context_close(struct i915_gem_context *ctx)
 {
 	struct i915_address_space *vm;
 
 	i915_gem_context_set_closed(ctx);
+	set_closed_name(ctx);
 
 	mutex_lock(&ctx->mutex);
 
@@ -529,9 +610,6 @@ __create_context(struct drm_i915_private *i915)
 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
 
-	ctx->jump_whitelist = NULL;
-	ctx->jump_whitelist_cmds = 0;
-
 	spin_lock(&i915->gem.contexts.lock);
 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
 	spin_unlock(&i915->gem.contexts.lock);
@@ -628,7 +706,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 	if (HAS_FULL_PPGTT(i915)) {
 		struct i915_ppgtt *ppgtt;
 
-		ppgtt = i915_ppgtt_create(i915);
+		ppgtt = i915_ppgtt_create(&i915->gt);
 		if (IS_ERR(ppgtt)) {
 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 					 PTR_ERR(ppgtt));
@@ -661,37 +739,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 	return ctx;
 }
 
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
-{
-	struct i915_gem_context *ctx;
-
-	/* Keep the context ref so that we can free it immediately ourselves */
-	ctx = i915_gem_context_get(fetch_and_zero(ctxp));
-	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
-	context_close(ctx);
-	i915_gem_context_free(ctx);
-}
-
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
-{
-	struct i915_gem_context *ctx;
-
-	ctx = i915_gem_create_context(i915, 0);
-	if (IS_ERR(ctx))
-		return ctx;
-
-	i915_gem_context_clear_bannable(ctx);
-	i915_gem_context_set_persistence(ctx);
-	ctx->sched.priority = I915_USER_PRIORITY(prio);
-
-	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
-	return ctx;
-}
-
 static void init_contexts(struct i915_gem_contexts *gc)
 {
 	spin_lock_init(&gc->lock);
@@ -701,41 +748,19 @@ static void init_contexts(struct i915_gem_contexts *gc)
 	init_llist_head(&gc->free_list);
 }
 
-int i915_gem_init_contexts(struct drm_i915_private *i915)
+void i915_gem_init__contexts(struct drm_i915_private *i915)
 {
-	struct i915_gem_context *ctx;
-
-	/* Reassure ourselves we are only called once */
-	GEM_BUG_ON(i915->kernel_context);
-
 	init_contexts(&i915->gem.contexts);
-
-	/* lowest priority; idle task */
-	ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
-	if (IS_ERR(ctx)) {
-		DRM_ERROR("Failed to create default global context\n");
-		return PTR_ERR(ctx);
-	}
-	i915->kernel_context = ctx;
-
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
 			 DRIVER_CAPS(i915)->has_logical_contexts ?
 			 "logical" : "fake");
-	return 0;
 }
 
 void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
 {
-	destroy_kernel_context(&i915->kernel_context);
 	flush_work(&i915->gem.contexts.free_work);
 }
 
-static int context_idr_cleanup(int id, void *p, void *data)
-{
-	context_close(p);
-	return 0;
-}
-
 static int vm_idr_cleanup(int id, void *p, void *data)
 {
 	i915_vm_put(p);
@@ -743,7 +768,8 @@ static int vm_idr_cleanup(int id, void *p, void *data)
 }
 
 static int gem_context_register(struct i915_gem_context *ctx,
-				struct drm_i915_file_private *fpriv)
+				struct drm_i915_file_private *fpriv,
+				u32 *id)
 {
 	struct i915_address_space *vm;
 	int ret;
@@ -757,24 +783,14 @@ static int gem_context_register(struct i915_gem_context *ctx,
 	mutex_unlock(&ctx->mutex);
 
 	ctx->pid = get_task_pid(current, PIDTYPE_PID);
-	ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
-			      current->comm, pid_nr(ctx->pid));
-	if (!ctx->name) {
-		ret = -ENOMEM;
-		goto err_pid;
-	}
+	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
+		 current->comm, pid_nr(ctx->pid));
 
 	/* And finally expose ourselves to userspace via the idr */
-	mutex_lock(&fpriv->context_idr_lock);
-	ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
-	mutex_unlock(&fpriv->context_idr_lock);
-	if (ret >= 0)
-		goto out;
+	ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
+	if (ret)
+		put_pid(fetch_and_zero(&ctx->pid));
 
-	kfree(fetch_and_zero(&ctx->name));
-err_pid:
-	put_pid(fetch_and_zero(&ctx->pid));
-out:
 	return ret;
 }
 
@@ -784,11 +800,11 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_gem_context *ctx;
 	int err;
+	u32 id;
 
-	mutex_init(&file_priv->context_idr_lock);
+	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
+
 	mutex_init(&file_priv->vm_idr_lock);
-
-	idr_init(&file_priv->context_idr);
 	idr_init_base(&file_priv->vm_idr, 1);
 
 	ctx = i915_gem_create_context(i915, 0);
@@ -797,22 +813,19 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 		goto err;
 	}
 
-	err = gem_context_register(ctx, file_priv);
+	err = gem_context_register(ctx, file_priv, &id);
 	if (err < 0)
 		goto err_ctx;
 
-	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
-	GEM_BUG_ON(err > 0);
-
+	GEM_BUG_ON(id);
 	return 0;
 
 err_ctx:
 	context_close(ctx);
 err:
 	idr_destroy(&file_priv->vm_idr);
-	idr_destroy(&file_priv->context_idr);
+	xa_destroy(&file_priv->context_xa);
 	mutex_destroy(&file_priv->vm_idr_lock);
-	mutex_destroy(&file_priv->context_idr_lock);
 	return err;
 }
 
@@ -820,10 +833,12 @@ void i915_gem_context_close(struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct drm_i915_private *i915 = file_priv->dev_priv;
+	struct i915_gem_context *ctx;
+	unsigned long idx;
 
-	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
-	idr_destroy(&file_priv->context_idr);
-	mutex_destroy(&file_priv->context_idr_lock);
+	xa_for_each(&file_priv->context_xa, idx, ctx)
+		context_close(ctx);
+	xa_destroy(&file_priv->context_xa);
 
 	idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
 	idr_destroy(&file_priv->vm_idr);
@@ -847,7 +862,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
 	if (args->flags)
 		return -EINVAL;
 
-	ppgtt = i915_ppgtt_create(i915);
+	ppgtt = i915_ppgtt_create(&i915->gt);
 	if (IS_ERR(ppgtt))
 		return PTR_ERR(ppgtt);
 
@@ -1012,7 +1027,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
 		return -ENODEV;
 
 	rcu_read_lock();
-	vm = i915_vm_get(ctx->vm);
+	vm = context_get_vm_rcu(ctx);
 	rcu_read_unlock();
 
 	ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
@@ -1049,7 +1064,7 @@ static void set_ppgtt_barrier(void *data)
 
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
-	struct i915_address_space *vm = rq->hw_context->vm;
+	struct i915_address_space *vm = rq->context->vm;
 	struct intel_engine_cs *engine = rq->engine;
 	u32 base = engine->mmio_base;
 	u32 *cs;
@@ -1096,9 +1111,6 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
 		}
 		*cs++ = MI_NOOP;
 		intel_ring_advance(rq, cs);
-	} else {
-		/* ppGTT is not part of the legacy context image */
-		gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
 	}
 
 	return 0;
@@ -1106,10 +1118,20 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
 
 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
 {
+	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+		return true;
+
 	if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
-		return !ce->state;
-	else
-		return !atomic_read(&ce->pin_count);
+		return false;
+
+	if (!atomic_read(&ce->pin_count))
+		return true;
+
+	/* ppGTT is not part of the legacy context image */
+	if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
+		return true;
+
+	return false;
 }
 
 static int set_ppgtt(struct drm_i915_file_private *file_priv,
@@ -1214,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 	 * image, or into the registers directory, does not stick). Pristine
 	 * and idle contexts will be configured on pinning.
 	 */
-	if (!intel_context_is_pinned(ce))
+	if (!intel_context_pin_if_active(ce))
 		return 0;
 
-	rq = i915_request_create(ce->engine->kernel_context);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
+	rq = intel_engine_create_kernel_request(ce->engine);
+	if (IS_ERR(rq)) {
+		ret = PTR_ERR(rq);
+		goto out_unpin;
+	}
 
 	/* Serialise with the remote context */
 	ret = intel_context_prepare_remote_request(ce, rq);
@@ -1227,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 		ret = gen8_emit_rpcs_config(rq, ce, sseu);
 
 	i915_request_add(rq);
+out_unpin:
+	intel_context_unpin(ce);
 	return ret;
 }
 
@@ -1485,12 +1511,14 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
 		}
 	}
 
-	ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+	ce = intel_execlists_create_virtual(siblings, n);
 	if (IS_ERR(ce)) {
 		err = PTR_ERR(ce);
 		goto out_siblings;
 	}
 
+	intel_context_set_gem(ce, set->ctx);
+
 	if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
 		intel_context_put(ce);
 		err = -EEXIST;
@@ -1660,12 +1688,14 @@ set_engines(struct i915_gem_context *ctx,
 			return -ENOENT;
 		}
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			__free_engines(set.engines, n);
 			return PTR_ERR(ce);
 		}
 
+		intel_context_set_gem(ce, ctx);
+
 		set.engines->engines[n] = ce;
 	}
 	set.engines->num_engines = num_engines;
@@ -1806,6 +1836,44 @@ set_persistence(struct i915_gem_context *ctx,
 	return __context_set_persistence(ctx, args->value);
 }
 
+static void __apply_priority(struct intel_context *ce, void *arg)
+{
+	struct i915_gem_context *ctx = arg;
+
+	if (!intel_engine_has_semaphores(ce->engine))
+		return;
+
+	if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
+		intel_context_set_use_semaphores(ce);
+	else
+		intel_context_clear_use_semaphores(ce);
+}
+
+static int set_priority(struct i915_gem_context *ctx,
+			const struct drm_i915_gem_context_param *args)
+{
+	s64 priority = args->value;
+
+	if (args->size)
+		return -EINVAL;
+
+	if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+		return -ENODEV;
+
+	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
+		return -EINVAL;
+
+	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+	    !capable(CAP_SYS_NICE))
+		return -EPERM;
+
+	ctx->sched.priority = I915_USER_PRIORITY(priority);
+	context_apply_all(ctx, __apply_priority, ctx);
+
+	return 0;
+}
+
 static int ctx_setparam(struct drm_i915_file_private *fpriv,
 			struct i915_gem_context *ctx,
 			struct drm_i915_gem_context_param *args)
@@ -1852,23 +1920,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
 		break;
 
 	case I915_CONTEXT_PARAM_PRIORITY:
-		{
-			s64 priority = args->value;
-
-			if (args->size)
-				ret = -EINVAL;
-			else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
-				ret = -ENODEV;
-			else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
-				 priority < I915_CONTEXT_MIN_USER_PRIORITY)
-				ret = -EINVAL;
-			else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
-				 !capable(CAP_SYS_NICE))
-				ret = -EPERM;
-			else
-				ctx->sched.priority =
-					I915_USER_PRIORITY(priority);
-		}
+		ret = set_priority(ctx, args);
 		break;
 
 	case I915_CONTEXT_PARAM_SSEU:
@@ -1948,20 +2000,23 @@ static int clone_engines(struct i915_gem_context *dst,
 		 */
 		if (intel_engine_is_virtual(engine))
 			clone->engines[n] =
-				intel_execlists_clone_virtual(dst, engine);
+				intel_execlists_clone_virtual(engine);
 		else
-			clone->engines[n] = intel_context_create(dst, engine);
+			clone->engines[n] = intel_context_create(engine);
 		if (IS_ERR_OR_NULL(clone->engines[n])) {
 			__free_engines(clone, n);
 			goto err_unlock;
 		}
+
+		intel_context_set_gem(clone->engines[n], dst);
 	}
 	clone->num_engines = n;
 
 	user_engines = i915_gem_context_user_engines(src);
 	i915_gem_context_unlock_engines(src);
 
-	free_engines(dst->engines);
+	/* Serialised by constructor */
+	free_engines(__context_engines_static(dst));
 	RCU_INIT_POINTER(dst->engines, clone);
 	if (user_engines)
 		i915_gem_context_set_user_engines(dst);
@@ -1996,7 +2051,8 @@ static int clone_sseu(struct i915_gem_context *dst,
 	unsigned long n;
 	int err;
 
-	clone = dst->engines; /* no locking required; sole access */
+	/* no locking required; sole access under constructor*/
+	clone = __context_engines_static(dst);
 	if (e->num_engines != clone->num_engines) {
 		err = -EINVAL;
 		goto unlock;
@@ -2041,47 +2097,21 @@ static int clone_vm(struct i915_gem_context *dst,
 	struct i915_address_space *vm;
 	int err = 0;
 
+	if (!rcu_access_pointer(src->vm))
+		return 0;
+
 	rcu_read_lock();
-	do {
-		vm = rcu_dereference(src->vm);
-		if (!vm)
-			break;
-
-		if (!kref_get_unless_zero(&vm->ref))
-			continue;
-
-		/*
-		 * This ppgtt may have be reallocated between
-		 * the read and the kref, and reassigned to a third
-		 * context. In order to avoid inadvertent sharing
-		 * of this ppgtt with that third context (and not
-		 * src), we have to confirm that we have the same
-		 * ppgtt after passing through the strong memory
-		 * barrier implied by a successful
-		 * kref_get_unless_zero().
-		 *
-		 * Once we have acquired the current ppgtt of src,
-		 * we no longer care if it is released from src, as
-		 * it cannot be reallocated elsewhere.
-		 */
-
-		if (vm == rcu_access_pointer(src->vm))
-			break;
-
-		i915_vm_put(vm);
-	} while (1);
+	vm = context_get_vm_rcu(src);
 	rcu_read_unlock();
 
-	if (vm) {
-		if (!mutex_lock_interruptible(&dst->mutex)) {
-			__assign_ppgtt(dst, vm);
-			mutex_unlock(&dst->mutex);
-		} else {
-			err = -EINTR;
-		}
-		i915_vm_put(vm);
+	if (!mutex_lock_interruptible(&dst->mutex)) {
+		__assign_ppgtt(dst, vm);
+		mutex_unlock(&dst->mutex);
+	} else {
+		err = -EINTR;
 	}
 
+	i915_vm_put(vm);
 	return err;
 }
 
@@ -2153,6 +2183,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_context_create_ext *args = data;
 	struct create_ext ext_data;
 	int ret;
+	u32 id;
 
 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
 		return -ENODEV;
@@ -2184,11 +2215,11 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 			goto err_ctx;
 	}
 
-	ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
+	ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
 	if (ret < 0)
 		goto err_ctx;
 
-	args->ctx_id = ret;
+	args->ctx_id = id;
 	DRM_DEBUG("HW context %d created\n", args->ctx_id);
 
 	return 0;
@@ -2211,11 +2242,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 	if (!args->ctx_id)
 		return -ENOENT;
 
-	if (mutex_lock_interruptible(&file_priv->context_idr_lock))
-		return -EINTR;
-
-	ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
-	mutex_unlock(&file_priv->context_idr_lock);
+	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
 	if (!ctx)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 18e50a7..3ae61a3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -13,7 +13,6 @@
 
 #include "i915_drv.h"
 #include "i915_gem.h"
-#include "i915_gem_gtt.h"
 #include "i915_scheduler.h"
 #include "intel_device_info.h"
 
@@ -91,26 +90,6 @@ static inline void i915_gem_context_clear_persistence(struct i915_gem_context *c
 	clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
 }
 
-static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
-{
-	set_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
-{
-	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
 static inline bool
 i915_gem_context_user_engines(const struct i915_gem_context *ctx)
 {
@@ -129,31 +108,8 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
 	clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
 }
 
-static inline bool
-i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
-{
-	set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
-{
-	clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
-{
-	return !ctx->file_priv;
-}
-
 /* i915_gem_context.c */
-int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_init__contexts(struct drm_i915_private *i915);
 void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
@@ -178,9 +134,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
 				       struct drm_file *file);
 
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
-
 static inline struct i915_gem_context *
 i915_gem_context_get(struct i915_gem_context *ctx)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 3870dd5..017ca80 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -100,15 +100,6 @@ struct i915_gem_context {
 	 */
 	struct pid *pid;
 
-	/**
-	 * @name: arbitrary name
-	 *
-	 * A name is constructed for the context from the creator's process
-	 * name, pid and user handle in order to uniquely identify the
-	 * context in messages.
-	 */
-	const char *name;
-
 	/** link: place with &drm_i915_private.context_list */
 	struct list_head link;
 	struct llist_node free_link;
@@ -143,11 +134,8 @@ struct i915_gem_context {
 	 * @flags: small set of booleans
 	 */
 	unsigned long flags;
-#define CONTEXT_BANNED			0
-#define CONTEXT_CLOSED			1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
-#define CONTEXT_USER_ENGINES		3
-#define CONTEXT_NOPREEMPT		4
+#define CONTEXT_CLOSED			0
+#define CONTEXT_USER_ENGINES		1
 
 	struct mutex mutex;
 
@@ -177,12 +165,14 @@ struct i915_gem_context {
 	 */
 	struct radix_tree_root handles_vma;
 
-	/** jump_whitelist: Bit array for tracking cmds during cmdparsing
-	 *  Guarded by struct_mutex
+	/**
+	 * @name: arbitrary name, used for user debug
+	 *
+	 * A name is constructed for the context from the creator's process
+	 * name, pid and user handle in order to uniquely identify the
+	 * context in messages.
 	 */
-	unsigned long *jump_whitelist;
-	/** jump_whitelist_cmds: No of cmd slots available */
-	u32 jump_whitelist_cmds;
+	char name[TASK_COMM_LEN + 8];
 };
 
 #endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index eaea49d..372b57c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -93,40 +93,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 	i915_gem_object_unpin_map(obj);
 }
 
-static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
-	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
-	struct page *page;
-
-	if (page_num >= obj->base.size >> PAGE_SHIFT)
-		return NULL;
-
-	if (!i915_gem_object_has_struct_page(obj))
-		return NULL;
-
-	if (i915_gem_object_pin_pages(obj))
-		return NULL;
-
-	/* Synchronisation is left to the caller (via .begin_cpu_access()) */
-	page = i915_gem_object_get_page(obj, page_num);
-	if (IS_ERR(page))
-		goto err_unpin;
-
-	return kmap(page);
-
-err_unpin:
-	i915_gem_object_unpin_pages(obj);
-	return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
-
-	kunmap(virt_to_page(addr));
-	i915_gem_object_unpin_pages(obj);
-}
-
 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -195,8 +161,6 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.map_dma_buf = i915_gem_map_dma_buf,
 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
 	.release = drm_gem_dmabuf_release,
-	.map = i915_gem_dmabuf_kmap,
-	.unmap = i915_gem_dmabuf_kunmap,
 	.mmap = i915_gem_dmabuf_mmap,
 	.vmap = i915_gem_dmabuf_vmap,
 	.vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index f86400a..0cc40e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -12,6 +12,8 @@
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
 #include "i915_vma.h"
+#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
 
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 {
@@ -148,9 +150,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
 	obj->read_domains |= I915_GEM_DOMAIN_GTT;
 	if (write) {
+		struct i915_vma *vma;
+
 		obj->read_domains = I915_GEM_DOMAIN_GTT;
 		obj->write_domain = I915_GEM_DOMAIN_GTT;
 		obj->mm.dirty = true;
+
+		spin_lock(&obj->vma.lock);
+		for_each_ggtt_vma(vma, obj)
+			if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+				i915_vma_set_ggtt_write(vma);
+		spin_unlock(&obj->vma.lock);
 	}
 
 	i915_gem_object_unpin_pages(obj);
@@ -175,138 +185,34 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level)
 {
-	struct i915_vma *vma;
 	int ret;
 
-	assert_object_held(obj);
-
 	if (obj->cache_level == cache_level)
 		return 0;
 
-	/* Inspect the list of currently bound VMA and unbind any that would
-	 * be invalid given the new cache-level. This is principally to
-	 * catch the issue of the CS prefetch crossing page boundaries and
-	 * reading an invalid PTE on older architectures.
-	 */
-restart:
-	list_for_each_entry(vma, &obj->vma.list, obj_link) {
-		if (!drm_mm_node_allocated(&vma->node))
-			continue;
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT);
+	if (ret)
+		return ret;
 
-		if (i915_vma_is_pinned(vma)) {
-			DRM_DEBUG("can not change the cache level of pinned objects\n");
-			return -EBUSY;
-		}
+	ret = i915_gem_object_lock_interruptible(obj);
+	if (ret)
+		return ret;
 
-		if (!i915_vma_is_closed(vma) &&
-		    i915_gem_valid_gtt_space(vma, cache_level))
-			continue;
-
-		ret = i915_vma_unbind(vma);
-		if (ret)
-			return ret;
-
-		/* As unbinding may affect other elements in the
-		 * obj->vma_list (due to side-effects from retiring
-		 * an active vma), play safe and restart the iterator.
-		 */
-		goto restart;
+	/* Always invalidate stale cachelines */
+	if (obj->cache_level != cache_level) {
+		i915_gem_object_set_cache_coherency(obj, cache_level);
+		obj->cache_dirty = true;
 	}
 
-	/* We can reuse the existing drm_mm nodes but need to change the
-	 * cache-level on the PTE. We could simply unbind them all and
-	 * rebind with the correct cache-level on next use. However since
-	 * we already have a valid slot, dma mapping, pages etc, we may as
-	 * rewrite the PTE in the belief that doing so tramples upon less
-	 * state and so involves less work.
-	 */
-	if (atomic_read(&obj->bind_count)) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	i915_gem_object_unlock(obj);
 
-		/* Before we change the PTE, the GPU must not be accessing it.
-		 * If we wait upon the object, we know that all the bound
-		 * VMA are no longer active.
-		 */
-		ret = i915_gem_object_wait(obj,
-					   I915_WAIT_INTERRUPTIBLE |
-					   I915_WAIT_ALL,
-					   MAX_SCHEDULE_TIMEOUT);
-		if (ret)
-			return ret;
-
-		if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
-			intel_wakeref_t wakeref =
-				intel_runtime_pm_get(&i915->runtime_pm);
-
-			/*
-			 * Access to snoopable pages through the GTT is
-			 * incoherent and on some machines causes a hard
-			 * lockup. Relinquish the CPU mmaping to force
-			 * userspace to refault in the pages and we can
-			 * then double check if the GTT mapping is still
-			 * valid for that pointer access.
-			 */
-			ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
-			if (ret) {
-				intel_runtime_pm_put(&i915->runtime_pm,
-						     wakeref);
-				return ret;
-			}
-
-			if (obj->userfault_count)
-				__i915_gem_object_release_mmap(obj);
-
-			/*
-			 * As we no longer need a fence for GTT access,
-			 * we can relinquish it now (and so prevent having
-			 * to steal a fence from someone else on the next
-			 * fence request). Note GPU activity would have
-			 * dropped the fence as all snoopable access is
-			 * supposed to be linear.
-			 */
-			for_each_ggtt_vma(vma, obj) {
-				ret = i915_vma_revoke_fence(vma);
-				if (ret)
-					break;
-			}
-			mutex_unlock(&i915->ggtt.vm.mutex);
-			intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-			if (ret)
-				return ret;
-		} else {
-			/*
-			 * We either have incoherent backing store and
-			 * so no GTT access or the architecture is fully
-			 * coherent. In such cases, existing GTT mmaps
-			 * ignore the cache bit in the PTE and we can
-			 * rewrite it without confusing the GPU or having
-			 * to force userspace to fault back in its mmaps.
-			 */
-		}
-
-		list_for_each_entry(vma, &obj->vma.list, obj_link) {
-			if (!drm_mm_node_allocated(&vma->node))
-				continue;
-
-			/* Wait for an earlier async bind, need to rewrite it */
-			ret = i915_vma_sync(vma);
-			if (ret)
-				return ret;
-
-			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
-			if (ret)
-				return ret;
-		}
-	}
-
-	list_for_each_entry(vma, &obj->vma.list, obj_link) {
-		if (i915_vm_has_cache_coloring(vma->vm))
-			vma->node.color = cache_level;
-	}
-	i915_gem_object_set_cache_coherency(obj, cache_level);
-	obj->cache_dirty = true; /* Always invalidate stale cachelines */
-
-	return 0;
+	/* The cache-level will be applied when each vma is rebound. */
+	return i915_gem_object_unbind(obj,
+				      I915_GEM_OBJECT_UNBIND_ACTIVE |
+				      I915_GEM_OBJECT_UNBIND_BARRIER);
 }
 
 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -387,20 +293,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 		goto out;
 	}
 
-	if (obj->cache_level == level)
-		goto out;
-
-	ret = i915_gem_object_wait(obj,
-				   I915_WAIT_INTERRUPTIBLE,
-				   MAX_SCHEDULE_TIMEOUT);
-	if (ret)
-		goto out;
-
-	ret = i915_gem_object_lock_interruptible(obj);
-	if (ret == 0) {
-		ret = i915_gem_object_set_cache_level(obj, level);
-		i915_gem_object_unlock(obj);
-	}
+	ret = i915_gem_object_set_cache_level(obj, level);
 
 out:
 	i915_gem_object_put(obj);
@@ -419,10 +312,13 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     const struct i915_ggtt_view *view,
 				     unsigned int flags)
 {
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_vma *vma;
 	int ret;
 
-	assert_object_held(obj);
+	/* Frame buffer must be in LMEM (no migration yet) */
+	if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
+		return ERR_PTR(-EINVAL);
 
 	/*
 	 * The display engine is not coherent with the LLC cache on gen6.  As
@@ -435,7 +331,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
 	 */
 	ret = i915_gem_object_set_cache_level(obj,
-					      HAS_WT(to_i915(obj->base.dev)) ?
+					      HAS_WT(i915) ?
 					      I915_CACHE_WT : I915_CACHE_NONE);
 	if (ret)
 		return ERR_PTR(ret);
@@ -462,13 +358,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 
 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 
-	__i915_gem_object_flush_for_display(obj);
-
-	/*
-	 * It should now be out of any other write domains, and we can update
-	 * the domain values for our changes.
-	 */
-	obj->read_domains |= I915_GEM_DOMAIN_GTT;
+	i915_gem_object_flush_if_display(obj);
 
 	return vma;
 }
@@ -479,8 +369,11 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	if (!atomic_read(&obj->bind_count))
+		return;
 
 	mutex_lock(&i915->ggtt.vm.mutex);
+	spin_lock(&obj->vma.lock);
 	for_each_ggtt_vma(vma, obj) {
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
@@ -488,6 +381,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 		GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
 		list_move_tail(&vma->vm_link, &vma->vm->bound_list);
 	}
+	spin_unlock(&obj->vma.lock);
 	mutex_unlock(&i915->ggtt.vm.mutex);
 
 	if (i915_gem_object_is_shrinkable(obj)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bc3a672..d5a0f5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -25,6 +25,7 @@
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
 #include "i915_gem_ioctls.h"
+#include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 
 enum {
@@ -228,6 +229,7 @@ struct i915_execbuffer {
 
 	struct i915_request *request; /** our request to build */
 	struct i915_vma *batch; /** identity of the batch obj/vma */
+	struct i915_vma *trampoline; /** trampoline used for chaining */
 
 	/** actual size of execobj[] as we may extend it for the cmdparser */
 	unsigned int buffer_count;
@@ -253,7 +255,6 @@ struct i915_execbuffer {
 		bool has_fence : 1;
 		bool needs_unfenced : 1;
 
-		struct intel_context *ce;
 		struct i915_request *rq;
 		u32 *rq_cmd;
 		unsigned int rq_size;
@@ -277,25 +278,6 @@ struct i915_execbuffer {
 
 #define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
 
-/*
- * Used to convert any address to canonical form.
- * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
- * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
- * addresses to be in a canonical form:
- * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
- * canonical form [63:48] == [47]."
- */
-#define GEN8_HIGH_ADDRESS_BIT 47
-static inline u64 gen8_canonical_addr(u64 address)
-{
-	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
-}
-
-static inline u64 gen8_noncanonical_addr(u64 address)
-{
-	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
-}
-
 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
 {
 	return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -748,9 +730,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	unsigned int i, batch;
 	int err;
 
-	if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
-		return -EIO;
-
 	INIT_LIST_HEAD(&eb->relocs);
 	INIT_LIST_HEAD(&eb->unbound);
 
@@ -886,9 +865,6 @@ static void eb_destroy(const struct i915_execbuffer *eb)
 {
 	GEM_BUG_ON(eb->reloc_cache.rq);
 
-	if (eb->reloc_cache.ce)
-		intel_context_put(eb->reloc_cache.ce);
-
 	if (eb->lut_size > 0)
 		kfree(eb->buckets);
 }
@@ -912,7 +888,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
 	cache->has_fence = cache->gen < 4;
 	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
 	cache->node.flags = 0;
-	cache->ce = NULL;
 	cache->rq = NULL;
 	cache->rq_size = 0;
 }
@@ -1182,7 +1157,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	if (err)
 		goto err_unmap;
 
-	rq = intel_context_create_request(cache->ce);
+	rq = i915_request_create(eb->context);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_unpin;
@@ -1246,36 +1221,9 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
 	if (unlikely(!cache->rq)) {
 		int err;
 
-		/* If we need to copy for the cmdparser, we will stall anyway */
-		if (eb_use_cmdparser(eb))
-			return ERR_PTR(-EWOULDBLOCK);
-
 		if (!intel_engine_can_store_dword(eb->engine))
 			return ERR_PTR(-ENODEV);
 
-		if (!cache->ce) {
-			struct intel_context *ce;
-
-			/*
-			 * The CS pre-parser can pre-fetch commands across
-			 * memory sync points and starting gen12 it is able to
-			 * pre-fetch across BB_START and BB_END boundaries
-			 * (within the same context). We therefore use a
-			 * separate context gen12+ to guarantee that the reloc
-			 * writes land before the parser gets to the target
-			 * memory location.
-			 */
-			if (cache->gen >= 12)
-				ce = intel_context_create(eb->context->gem_context,
-							  eb->engine);
-			else
-				ce = intel_context_get(eb->context);
-			if (IS_ERR(ce))
-				return ERR_CAST(ce);
-
-			cache->ce = ce;
-		}
-
 		err = __reloc_gpu_alloc(eb, vma, len);
 		if (unlikely(err))
 			return ERR_PTR(err);
@@ -1943,15 +1891,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 	return err;
 }
 
-static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 {
 	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
-		return false;
+		return -EINVAL;
 
 	/* Kernel clipping was a DRI1 misfeature */
 	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
 		if (exec->num_cliprects || exec->cliprects_ptr)
-			return false;
+			return -EINVAL;
 	}
 
 	if (exec->DR4 == 0xffffffff) {
@@ -1959,12 +1907,12 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 		exec->DR4 = 0;
 	}
 	if (exec->DR1 || exec->DR4)
-		return false;
+		return -EINVAL;
 
 	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
-		return false;
+		return -EINVAL;
 
-	return true;
+	return 0;
 }
 
 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
@@ -1993,99 +1941,179 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 }
 
 static struct i915_vma *
-shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
+shadow_batch_pin(struct drm_i915_gem_object *obj,
+		 struct i915_address_space *vm,
+		 unsigned int flags)
 {
-	struct drm_i915_private *dev_priv = eb->i915;
-	struct i915_vma * const vma = *eb->vma;
-	struct i915_address_space *vm;
-	u64 flags;
-
-	/*
-	 * PPGTT backed shadow buffers must be mapped RO, to prevent
-	 * post-scan tampering
-	 */
-	if (CMDPARSER_USES_GGTT(dev_priv)) {
-		flags = PIN_GLOBAL;
-		vm = &dev_priv->ggtt.vm;
-	} else if (vma->vm->has_read_only) {
-		flags = PIN_USER;
-		vm = vma->vm;
-		i915_gem_object_set_readonly(obj);
-	} else {
-		DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
-}
-
-static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
-{
-	struct intel_engine_pool_node *pool;
 	struct i915_vma *vma;
-	u64 batch_start;
-	u64 shadow_batch_start;
 	int err;
 
-	pool = intel_engine_get_pool(eb->engine, eb->batch_len);
-	if (IS_ERR(pool))
-		return ERR_CAST(pool);
-
-	vma = shadow_batch_pin(eb, pool->obj);
+	vma = i915_vma_instance(obj, vm, NULL);
 	if (IS_ERR(vma))
-		goto err;
+		return vma;
 
-	batch_start = gen8_canonical_addr(eb->batch->node.start) +
-		      eb->batch_start_offset;
+	err = i915_vma_pin(vma, 0, 0, flags);
+	if (err)
+		return ERR_PTR(err);
 
-	shadow_batch_start = gen8_canonical_addr(vma->node.start);
+	return vma;
+}
 
-	err = intel_engine_cmd_parser(eb->gem_context,
-				      eb->engine,
-				      eb->batch->obj,
-				      batch_start,
-				      eb->batch_start_offset,
-				      eb->batch_len,
-				      pool->obj,
-				      shadow_batch_start);
+struct eb_parse_work {
+	struct dma_fence_work base;
+	struct intel_engine_cs *engine;
+	struct i915_vma *batch;
+	struct i915_vma *shadow;
+	struct i915_vma *trampoline;
+	unsigned int batch_offset;
+	unsigned int batch_length;
+};
 
-	if (err) {
-		i915_vma_unpin(vma);
+static int __eb_parse(struct dma_fence_work *work)
+{
+	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
 
+	return intel_engine_cmd_parser(pw->engine,
+				       pw->batch,
+				       pw->batch_offset,
+				       pw->batch_length,
+				       pw->shadow,
+				       pw->trampoline);
+}
+
+static const struct dma_fence_work_ops eb_parse_ops = {
+	.name = "eb_parse",
+	.work = __eb_parse,
+};
+
+static int eb_parse_pipeline(struct i915_execbuffer *eb,
+			     struct i915_vma *shadow,
+			     struct i915_vma *trampoline)
+{
+	struct eb_parse_work *pw;
+	int err;
+
+	pw = kzalloc(sizeof(*pw), GFP_KERNEL);
+	if (!pw)
+		return -ENOMEM;
+
+	dma_fence_work_init(&pw->base, &eb_parse_ops);
+
+	pw->engine = eb->engine;
+	pw->batch = eb->batch;
+	pw->batch_offset = eb->batch_start_offset;
+	pw->batch_length = eb->batch_len;
+	pw->shadow = shadow;
+	pw->trampoline = trampoline;
+
+	dma_resv_lock(pw->batch->resv, NULL);
+
+	err = dma_resv_reserve_shared(pw->batch->resv, 1);
+	if (err)
+		goto err_batch_unlock;
+
+	/* Wait for all writes (and relocs) into the batch to complete */
+	err = i915_sw_fence_await_reservation(&pw->base.chain,
+					      pw->batch->resv, NULL, false,
+					      0, I915_FENCE_GFP);
+	if (err < 0)
+		goto err_batch_unlock;
+
+	/* Keep the batch alive and unwritten as we parse */
+	dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
+
+	dma_resv_unlock(pw->batch->resv);
+
+	/* Force execution to wait for completion of the parser */
+	dma_resv_lock(shadow->resv, NULL);
+	dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
+	dma_resv_unlock(shadow->resv);
+
+	dma_fence_work_commit(&pw->base);
+	return 0;
+
+err_batch_unlock:
+	dma_resv_unlock(pw->batch->resv);
+	kfree(pw);
+	return err;
+}
+
+static int eb_parse(struct i915_execbuffer *eb)
+{
+	struct intel_engine_pool_node *pool;
+	struct i915_vma *shadow, *trampoline;
+	unsigned int len;
+	int err;
+
+	if (!eb_use_cmdparser(eb))
+		return 0;
+
+	len = eb->batch_len;
+	if (!CMDPARSER_USES_GGTT(eb->i915)) {
 		/*
-		 * Unsafe GGTT-backed buffers can still be submitted safely
-		 * as non-secure.
-		 * For PPGTT backing however, we have no choice but to forcibly
-		 * reject unsafe buffers
+		 * ppGTT backed shadow buffers must be mapped RO, to prevent
+		 * post-scan tampering
 		 */
-		if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
-			/* Execute original buffer non-secure */
-			vma = NULL;
-		else
-			vma = ERR_PTR(err);
-		goto err;
+		if (!eb->context->vm->has_read_only) {
+			DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+			return -EINVAL;
+		}
+	} else {
+		len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
 	}
 
-	eb->vma[eb->buffer_count] = i915_vma_get(vma);
+	pool = intel_engine_get_pool(eb->engine, len);
+	if (IS_ERR(pool))
+		return PTR_ERR(pool);
+
+	shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
+	if (IS_ERR(shadow)) {
+		err = PTR_ERR(shadow);
+		goto err;
+	}
+	i915_gem_object_set_readonly(shadow->obj);
+
+	trampoline = NULL;
+	if (CMDPARSER_USES_GGTT(eb->i915)) {
+		trampoline = shadow;
+
+		shadow = shadow_batch_pin(pool->obj,
+					  &eb->engine->gt->ggtt->vm,
+					  PIN_GLOBAL);
+		if (IS_ERR(shadow)) {
+			err = PTR_ERR(shadow);
+			shadow = trampoline;
+			goto err_shadow;
+		}
+
+		eb->batch_flags |= I915_DISPATCH_SECURE;
+	}
+
+	err = eb_parse_pipeline(eb, shadow, trampoline);
+	if (err)
+		goto err_trampoline;
+
+	eb->vma[eb->buffer_count] = i915_vma_get(shadow);
 	eb->flags[eb->buffer_count] =
 		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
-	vma->exec_flags = &eb->flags[eb->buffer_count];
+	shadow->exec_flags = &eb->flags[eb->buffer_count];
 	eb->buffer_count++;
 
+	eb->trampoline = trampoline;
 	eb->batch_start_offset = 0;
-	eb->batch = vma;
+	eb->batch = shadow;
 
-	if (CMDPARSER_USES_GGTT(eb->i915))
-		eb->batch_flags |= I915_DISPATCH_SECURE;
+	shadow->private = pool;
+	return 0;
 
-	/* eb->batch_len unchanged */
-
-	vma->private = pool;
-	return vma;
-
+err_trampoline:
+	if (trampoline)
+		i915_vma_unpin(trampoline);
+err_shadow:
+	i915_vma_unpin(shadow);
 err:
 	intel_engine_pool_put(pool);
-	return vma;
+	return err;
 }
 
 static void
@@ -2134,8 +2162,18 @@ static int eb_submit(struct i915_execbuffer *eb)
 	if (err)
 		return err;
 
-	if (i915_gem_context_nopreempt(eb->gem_context))
-		eb->request->flags |= I915_REQUEST_NOPREEMPT;
+	if (eb->trampoline) {
+		GEM_BUG_ON(eb->batch_start_offset);
+		err = eb->engine->emit_bb_start(eb->request,
+						eb->trampoline->node.start +
+						eb->batch_len,
+						0, 0);
+		if (err)
+			return err;
+	}
+
+	if (intel_context_nopreempt(eb->context))
+		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
 
 	return 0;
 }
@@ -2220,6 +2258,9 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
 	if (err)
 		return err;
 
+	if (unlikely(intel_context_is_banned(ce)))
+		return -EIO;
+
 	/*
 	 * Pinning the contexts may generate requests in order to acquire
 	 * GGTT space, so do this first before we reserve a seqno for
@@ -2515,6 +2556,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	eb.buffer_count = args->buffer_count;
 	eb.batch_start_offset = args->batch_start_offset;
 	eb.batch_len = args->batch_len;
+	eb.trampoline = NULL;
 
 	eb.batch_flags = 0;
 	if (args->flags & I915_EXEC_SECURE) {
@@ -2606,15 +2648,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (eb.batch_len == 0)
 		eb.batch_len = eb.batch->size - eb.batch_start_offset;
 
-	if (eb_use_cmdparser(&eb)) {
-		struct i915_vma *vma;
-
-		vma = eb_parse(&eb);
-		if (IS_ERR(vma)) {
-			err = PTR_ERR(vma);
-			goto err_vma;
-		}
-	}
+	err = eb_parse(&eb);
+	if (err)
+		goto err_vma;
 
 	/*
 	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
@@ -2720,6 +2756,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_vma:
 	if (eb.exec)
 		eb_release_vmas(&eb);
+	if (eb.trampoline)
+		i915_vma_unpin(eb.trampoline);
 	mutex_unlock(&dev->struct_mutex);
 err_engine:
 	eb_unpin_engine(&eb);
@@ -2789,8 +2827,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 	exec2.flags = I915_EXEC_RENDER;
 	i915_execbuffer2_set_context_id(exec2, 0);
 
-	if (!i915_gem_check_execbuffer(&exec2))
-		return -EINVAL;
+	err = i915_gem_check_execbuffer(&exec2);
+	if (err)
+		return err;
 
 	/* Copy in the exec list from userland */
 	exec_list = kvmalloc_array(count, sizeof(*exec_list),
@@ -2867,8 +2906,9 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
-	if (!i915_gem_check_execbuffer(args))
-		return -EINVAL;
+	err = i915_gem_check_execbuffer(args);
+	if (err)
+		return err;
 
 	/* Allocate an extra slot for use by the command parser */
 	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index ddc7f2a..87d8b27 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -28,8 +28,8 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file);
 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
-int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
-			    struct drm_file *file);
+int i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file);
 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 0e2bf6b..70543c8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -16,46 +16,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
 	.release = i915_gem_object_release_memory_region,
 };
 
-/* XXX: Time to vfunc your life up? */
-void __iomem *
-i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
-				 unsigned long n)
-{
-	resource_size_t offset;
-
-	offset = i915_gem_object_get_dma_address(obj, n);
-	offset -= obj->mm.region->region.start;
-
-	return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
-}
-
-void __iomem *
-i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
-					unsigned long n)
-{
-	resource_size_t offset;
-
-	offset = i915_gem_object_get_dma_address(obj, n);
-	offset -= obj->mm.region->region.start;
-
-	return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
-}
-
-void __iomem *
-i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
-			    unsigned long n,
-			    unsigned long size)
-{
-	resource_size_t offset;
-
-	GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
-
-	offset = i915_gem_object_get_dma_address(obj, n);
-	offset -= obj->mm.region->region.start;
-
-	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
-}
-
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
 	return obj->ops == &i915_gem_lmem_obj_ops;
@@ -79,9 +39,6 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
 	struct drm_i915_private *i915 = mem->i915;
 	struct drm_i915_gem_object *obj;
 
-	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
-		return ERR_PTR(-E2BIG);
-
 	obj = i915_gem_object_alloc();
 	if (!obj)
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 7c176b8..fc3f155 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,14 +14,6 @@ struct intel_memory_region;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
-void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
-					  unsigned long n, unsigned long size);
-void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
-					       unsigned long n);
-void __iomem *
-i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
-					unsigned long n);
-
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index e300284..b9fdac2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -4,7 +4,9 @@
  * Copyright © 2014-2016 Intel Corporation
  */
 
+#include <linux/anon_inodes.h>
 #include <linux/mman.h>
+#include <linux/pfn_t.h>
 #include <linux/sizes.h>
 
 #include "gt/intel_gt.h"
@@ -14,7 +16,9 @@
 #include "i915_gem_gtt.h"
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
+#include "i915_gem_mman.h"
 #include "i915_trace.h"
+#include "i915_user_extensions.h"
 #include "i915_vma.h"
 
 static inline bool
@@ -144,6 +148,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
  * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
  *     pagefault; swapin remains transparent.
  *
+ * 4 - Support multiple fault handlers per object depending on object's
+ *     backing storage (a.k.a. MMAP_OFFSET).
+ *
  * Restrictions:
  *
  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -171,7 +178,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
  */
 int i915_gem_mmap_gtt_version(void)
 {
-	return 3;
+	return 4;
 }
 
 static inline struct i915_ggtt_view
@@ -197,29 +204,80 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
 	return view;
 }
 
-/**
- * i915_gem_fault - fault a page into the GTT
- * @vmf: fault info
- *
- * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
- * from userspace.  The fault handler takes care of binding the object to
- * the GTT (if needed), allocating and programming a fence register (again,
- * only if needed based on whether the old reg is still valid or the object
- * is tiled) and inserting a new PTE into the faulting process.
- *
- * Note that the faulting process may involve evicting existing objects
- * from the GTT and/or fence registers to make room.  So performance may
- * suffer if the GTT working set is large or there are few fence registers
- * left.
- *
- * The current feature set supported by i915_gem_fault() and thus GTT mmaps
- * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
- */
-vm_fault_t i915_gem_fault(struct vm_fault *vmf)
+static vm_fault_t i915_error_to_vmf_fault(int err)
+{
+	switch (err) {
+	default:
+		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
+		/* fallthrough */
+	case -EIO: /* shmemfs failure from swap device */
+	case -EFAULT: /* purged object */
+	case -ENODEV: /* bad object, how did you get here! */
+	case -ENXIO: /* unable to access backing store (on device) */
+		return VM_FAULT_SIGBUS;
+
+	case -ENOSPC: /* shmemfs allocation failure */
+	case -ENOMEM: /* our allocation failure */
+		return VM_FAULT_OOM;
+
+	case 0:
+	case -EAGAIN:
+	case -ERESTARTSYS:
+	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
+		return VM_FAULT_NOPAGE;
+	}
+}
+
+static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
+{
+	struct vm_area_struct *area = vmf->vma;
+	struct i915_mmap_offset *mmo = area->vm_private_data;
+	struct drm_i915_gem_object *obj = mmo->obj;
+	resource_size_t iomap;
+	int err;
+
+	/* Sanity check that we allow writing into this object */
+	if (unlikely(i915_gem_object_is_readonly(obj) &&
+		     area->vm_flags & VM_WRITE))
+		return VM_FAULT_SIGBUS;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out;
+
+	iomap = -1;
+	if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
+		iomap = obj->mm.region->iomap.base;
+		iomap -= obj->mm.region->region.start;
+	}
+
+	/* PTEs are revoked in obj->ops->put_pages() */
+	err = remap_io_sg(area,
+			  area->vm_start, area->vm_end - area->vm_start,
+			  obj->mm.pages->sgl, iomap);
+
+	if (area->vm_flags & VM_WRITE) {
+		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+		obj->mm.dirty = true;
+	}
+
+	i915_gem_object_unpin_pages(obj);
+
+out:
+	return i915_error_to_vmf_fault(err);
+}
+
+static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
 {
 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
 	struct vm_area_struct *area = vmf->vma;
-	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+	struct i915_mmap_offset *mmo = area->vm_private_data;
+	struct drm_i915_gem_object *obj = mmo->obj;
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
@@ -312,6 +370,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
 	mutex_unlock(&i915->ggtt.vm.mutex);
 
+	/* Track the mmo associated with the fenced vma */
+	vma->mmo = mmo;
+
 	if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -332,67 +393,36 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	intel_runtime_pm_put(rpm, wakeref);
 	i915_gem_object_unpin_pages(obj);
 err:
-	switch (ret) {
-	default:
-		WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
-		/* fallthrough */
-	case -EIO: /* shmemfs failure from swap device */
-	case -EFAULT: /* purged object */
-	case -ENODEV: /* bad object, how did you get here! */
-		return VM_FAULT_SIGBUS;
-
-	case -ENOSPC: /* shmemfs allocation failure */
-	case -ENOMEM: /* our allocation failure */
-		return VM_FAULT_OOM;
-
-	case 0:
-	case -EAGAIN:
-	case -ERESTARTSYS:
-	case -EINTR:
-	case -EBUSY:
-		/*
-		 * EBUSY is ok: this just means that another thread
-		 * already did the job.
-		 */
-		return VM_FAULT_NOPAGE;
-	}
+	return i915_error_to_vmf_fault(ret);
 }
 
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
 
 	GEM_BUG_ON(!obj->userfault_count);
 
-	obj->userfault_count = 0;
-	list_del(&obj->userfault_link);
-	drm_vma_node_unmap(&obj->base.vma_node,
-			   obj->base.dev->anon_inode->i_mapping);
-
 	for_each_ggtt_vma(vma, obj)
-		i915_vma_unset_userfault(vma);
+		i915_vma_revoke_mmap(vma);
+
+	GEM_BUG_ON(obj->userfault_count);
 }
 
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
+/*
  * It is vital that we remove the page mapping if we have mapped a tiled
  * object through the GTT and then lose the fence register due to
  * resource pressure. Similarly if the object has been moved out of the
  * aperture, than pages mapped into userspace must be revoked. Removing the
  * mapping will then trigger a page fault on the next user access, allowing
- * fixup by i915_gem_fault().
+ * fixup by vm_fault_gtt().
  */
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	intel_wakeref_t wakeref;
 
-	/* Serialisation between user GTT access and our code depends upon
+	/*
+	 * Serialisation between user GTT access and our code depends upon
 	 * revoking the CPU's PTE whilst the mutex is held. The next user
 	 * pagefault then has to wait until we release the mutex.
 	 *
@@ -406,9 +436,10 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 	if (!obj->userfault_count)
 		goto out;
 
-	__i915_gem_object_release_mmap(obj);
+	__i915_gem_object_release_mmap_gtt(obj);
 
-	/* Ensure that the CPU's PTE are revoked and there are not outstanding
+	/*
+	 * Ensure that the CPU's PTE are revoked and there are not outstanding
 	 * memory transactions from userspace before we return. The TLB
 	 * flushing implied above by changing the PTE above *should* be
 	 * sufficient, an extra barrier here just provides us with a bit
@@ -422,54 +453,151 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct intel_gt *gt = &i915->gt;
-	int err;
+	struct i915_mmap_offset *mmo;
 
-	err = drm_gem_create_mmap_offset(&obj->base);
-	if (likely(!err))
-		return 0;
+	spin_lock(&obj->mmo.lock);
+	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+		/*
+		 * vma_node_unmap for GTT mmaps handled already in
+		 * __i915_gem_object_release_mmap_gtt
+		 */
+		if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
+			continue;
 
-	/* Attempt to reap some mmap space from dead objects */
-	err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
-	if (err)
-		return err;
-
-	i915_gem_drain_freed_objects(i915);
-	return drm_gem_create_mmap_offset(&obj->base);
+		spin_unlock(&obj->mmo.lock);
+		drm_vma_node_unmap(&mmo->vma_node,
+				   obj->base.dev->anon_inode->i_mapping);
+		spin_lock(&obj->mmo.lock);
+	}
+	spin_unlock(&obj->mmo.lock);
 }
 
-int
-i915_gem_mmap_gtt(struct drm_file *file,
-		  struct drm_device *dev,
-		  u32 handle,
-		  u64 *offset)
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_release_mmap_gtt(obj);
+	i915_gem_object_release_mmap_offset(obj);
+}
+
+static struct i915_mmap_offset *
+mmap_offset_attach(struct drm_i915_gem_object *obj,
+		   enum i915_mmap_type mmap_type,
+		   struct drm_file *file)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_mmap_offset *mmo;
+	int err;
+
+	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
+	if (!mmo)
+		return ERR_PTR(-ENOMEM);
+
+	mmo->obj = obj;
+	mmo->dev = obj->base.dev;
+	mmo->file = file;
+	mmo->mmap_type = mmap_type;
+	drm_vma_node_reset(&mmo->vma_node);
+
+	err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
+				 obj->base.size / PAGE_SIZE);
+	if (likely(!err))
+		goto out;
+
+	/* Attempt to reap some mmap space from dead objects */
+	err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
+	if (err)
+		goto err;
+
+	i915_gem_drain_freed_objects(i915);
+	err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
+				 obj->base.size / PAGE_SIZE);
+	if (err)
+		goto err;
+
+out:
+	if (file)
+		drm_vma_node_allow(&mmo->vma_node, file);
+
+	spin_lock(&obj->mmo.lock);
+	list_add(&mmo->offset, &obj->mmo.offsets);
+	spin_unlock(&obj->mmo.lock);
+
+	return mmo;
+
+err:
+	kfree(mmo);
+	return ERR_PTR(err);
+}
+
+static int
+__assign_mmap_offset(struct drm_file *file,
+		     u32 handle,
+		     enum i915_mmap_type mmap_type,
+		     u64 *offset)
 {
 	struct drm_i915_gem_object *obj;
-	int ret;
+	struct i915_mmap_offset *mmo;
+	int err;
 
 	obj = i915_gem_object_lookup(file, handle);
 	if (!obj)
 		return -ENOENT;
 
-	if (i915_gem_object_never_bind_ggtt(obj)) {
-		ret = -ENODEV;
+	if (mmap_type == I915_MMAP_TYPE_GTT &&
+	    i915_gem_object_never_bind_ggtt(obj)) {
+		err = -ENODEV;
 		goto out;
 	}
 
-	ret = create_mmap_offset(obj);
-	if (ret == 0)
-		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+	if (mmap_type != I915_MMAP_TYPE_GTT &&
+	    !i915_gem_object_type_has(obj,
+				      I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+				      I915_GEM_OBJECT_HAS_IOMEM)) {
+		err = -ENODEV;
+		goto out;
+	}
 
+	mmo = mmap_offset_attach(obj, mmap_type, file);
+	if (IS_ERR(mmo)) {
+		err = PTR_ERR(mmo);
+		goto out;
+	}
+
+	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
+	err = 0;
 out:
 	i915_gem_object_put(obj);
-	return ret;
+	return err;
+}
+
+int
+i915_gem_dumb_mmap_offset(struct drm_file *file,
+			  struct drm_device *dev,
+			  u32 handle,
+			  u64 *offset)
+{
+	enum i915_mmap_type mmap_type;
+
+	if (boot_cpu_has(X86_FEATURE_PAT))
+		mmap_type = I915_MMAP_TYPE_WC;
+	else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
+		return -ENODEV;
+	else
+		mmap_type = I915_MMAP_TYPE_GTT;
+
+	return __assign_mmap_offset(file, handle, mmap_type, offset);
 }
 
 /**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
  * @dev: DRM device
  * @data: GTT mapping ioctl data
  * @file: GEM object info
@@ -484,12 +612,237 @@ i915_gem_mmap_gtt(struct drm_file *file,
  * userspace.
  */
 int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
-			struct drm_file *file)
+i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file)
 {
-	struct drm_i915_gem_mmap_gtt *args = data;
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct drm_i915_gem_mmap_offset *args = data;
+	enum i915_mmap_type type;
+	int err;
 
-	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+	/*
+	 * Historically we failed to check args.pad and args.offset
+	 * and so we cannot use those fields for user input and we cannot
+	 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
+	 * may be feeding in garbage in those fields.
+	 *
+	 * if (args->pad) return -EINVAL; is verbotten!
+	 */
+
+	err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+				   NULL, 0, NULL);
+	if (err)
+		return err;
+
+	switch (args->flags) {
+	case I915_MMAP_OFFSET_GTT:
+		if (!i915_ggtt_has_aperture(&i915->ggtt))
+			return -ENODEV;
+		type = I915_MMAP_TYPE_GTT;
+		break;
+
+	case I915_MMAP_OFFSET_WC:
+		if (!boot_cpu_has(X86_FEATURE_PAT))
+			return -ENODEV;
+		type = I915_MMAP_TYPE_WC;
+		break;
+
+	case I915_MMAP_OFFSET_WB:
+		type = I915_MMAP_TYPE_WB;
+		break;
+
+	case I915_MMAP_OFFSET_UC:
+		if (!boot_cpu_has(X86_FEATURE_PAT))
+			return -ENODEV;
+		type = I915_MMAP_TYPE_UC;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return __assign_mmap_offset(file, args->handle, type, &args->offset);
+}
+
+static void vm_open(struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *mmo = vma->vm_private_data;
+	struct drm_i915_gem_object *obj = mmo->obj;
+
+	GEM_BUG_ON(!obj);
+	i915_gem_object_get(obj);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *mmo = vma->vm_private_data;
+	struct drm_i915_gem_object *obj = mmo->obj;
+
+	GEM_BUG_ON(!obj);
+	i915_gem_object_put(obj);
+}
+
+static const struct vm_operations_struct vm_ops_gtt = {
+	.fault = vm_fault_gtt,
+	.open = vm_open,
+	.close = vm_close,
+};
+
+static const struct vm_operations_struct vm_ops_cpu = {
+	.fault = vm_fault_cpu,
+	.open = vm_open,
+	.close = vm_close,
+};
+
+static int singleton_release(struct inode *inode, struct file *file)
+{
+	struct drm_i915_private *i915 = file->private_data;
+
+	cmpxchg(&i915->gem.mmap_singleton, file, NULL);
+	drm_dev_put(&i915->drm);
+
+	return 0;
+}
+
+static const struct file_operations singleton_fops = {
+	.owner = THIS_MODULE,
+	.release = singleton_release,
+};
+
+static struct file *mmap_singleton(struct drm_i915_private *i915)
+{
+	struct file *file;
+
+	rcu_read_lock();
+	file = i915->gem.mmap_singleton;
+	if (file && !get_file_rcu(file))
+		file = NULL;
+	rcu_read_unlock();
+	if (file)
+		return file;
+
+	file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
+	if (IS_ERR(file))
+		return file;
+
+	/* Everyone shares a single global address space */
+	file->f_mapping = i915->drm.anon_inode->i_mapping;
+
+	smp_store_mb(i915->gem.mmap_singleton, file);
+	drm_dev_get(&i915->drm);
+
+	return file;
+}
+
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_vma_offset_node *node;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct i915_mmap_offset *mmo = NULL;
+	struct drm_gem_object *obj = NULL;
+	struct file *anon;
+
+	if (drm_dev_is_unplugged(dev))
+		return -ENODEV;
+
+	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+						  vma->vm_pgoff,
+						  vma_pages(vma));
+	if (likely(node)) {
+		mmo = container_of(node, struct i915_mmap_offset,
+				   vma_node);
+		/*
+		 * In our dependency chain, the drm_vma_offset_node
+		 * depends on the validity of the mmo, which depends on
+		 * the gem object. However the only reference we have
+		 * at this point is the mmo (as the parent of the node).
+		 * Try to check if the gem object was at least cleared.
+		 */
+		if (!mmo || !mmo->obj) {
+			drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+			return -EINVAL;
+		}
+		/*
+		 * Skip 0-refcnted objects as it is in the process of being
+		 * destroyed and will be invalid when the vma manager lock
+		 * is released.
+		 */
+		obj = &mmo->obj->base;
+		if (!kref_get_unless_zero(&obj->refcount))
+			obj = NULL;
+	}
+	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+	if (!obj)
+		return -EINVAL;
+
+	if (!drm_vma_node_is_allowed(node, priv)) {
+		drm_gem_object_put_unlocked(obj);
+		return -EACCES;
+	}
+
+	if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
+		if (vma->vm_flags & VM_WRITE) {
+			drm_gem_object_put_unlocked(obj);
+			return -EINVAL;
+		}
+		vma->vm_flags &= ~VM_MAYWRITE;
+	}
+
+	anon = mmap_singleton(to_i915(obj->dev));
+	if (IS_ERR(anon)) {
+		drm_gem_object_put_unlocked(obj);
+		return PTR_ERR(anon);
+	}
+
+	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = mmo;
+
+	/*
+	 * We keep the ref on mmo->obj, not vm_file, but we require
+	 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
+	 * Our userspace is accustomed to having per-file resource cleanup
+	 * (i.e. contexts, objects and requests) on their close(fd), which
+	 * requires avoiding extraneous references to their filp, hence why
+	 * we prefer to use an anonymous file for their mmaps.
+	 */
+	fput(vma->vm_file);
+	vma->vm_file = anon;
+
+	switch (mmo->mmap_type) {
+	case I915_MMAP_TYPE_WC:
+		vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+		vma->vm_ops = &vm_ops_cpu;
+		break;
+
+	case I915_MMAP_TYPE_WB:
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+		vma->vm_ops = &vm_ops_cpu;
+		break;
+
+	case I915_MMAP_TYPE_UC:
+		vma->vm_page_prot =
+			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+		vma->vm_ops = &vm_ops_cpu;
+		break;
+
+	case I915_MMAP_TYPE_GTT:
+		vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+		vma->vm_ops = &vm_ops_gtt;
+		break;
+	}
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
+	return 0;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
new file mode 100644
index 0000000..862e01b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -0,0 +1,31 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_MMAN_H__
+#define __I915_GEM_MMAN_H__
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_gem_object;
+struct file;
+struct i915_mmap_offset;
+struct mutex;
+
+int i915_gem_mmap_gtt_version(void);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
+			      struct drm_device *dev,
+			      u32 handle, u64 *offset);
+
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index a596548..46bacc8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -22,11 +22,14 @@
  *
  */
 
+#include <linux/sched/mm.h>
+
 #include "display/intel_frontbuffer.h"
 #include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
+#include "i915_gem_mman.h"
 #include "i915_gem_object.h"
 #include "i915_globals.h"
 #include "i915_trace.h"
@@ -59,6 +62,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
 	INIT_LIST_HEAD(&obj->lut_list);
 
+	spin_lock_init(&obj->mmo.lock);
+	INIT_LIST_HEAD(&obj->mmo.offsets);
+
 	init_rcu_head(&obj->rcu);
 
 	obj->ops = ops;
@@ -95,6 +101,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	struct drm_i915_gem_object *obj = to_intel_bo(gem);
 	struct drm_i915_file_private *fpriv = file->driver_priv;
 	struct i915_lut_handle *lut, *ln;
+	struct i915_mmap_offset *mmo;
 	LIST_HEAD(close);
 
 	i915_gem_object_lock(obj);
@@ -109,6 +116,17 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	}
 	i915_gem_object_unlock(obj);
 
+	spin_lock(&obj->mmo.lock);
+	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+		if (mmo->file != file)
+			continue;
+
+		spin_unlock(&obj->mmo.lock);
+		drm_vma_node_revoke(&mmo->vma_node, file);
+		spin_lock(&obj->mmo.lock);
+	}
+	spin_unlock(&obj->mmo.lock);
+
 	list_for_each_entry_safe(lut, ln, &close, obj_link) {
 		struct i915_gem_context *ctx = lut->ctx;
 		struct i915_vma *vma;
@@ -156,6 +174,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	llist_for_each_entry_safe(obj, on, freed, freed) {
+		struct i915_mmap_offset *mmo, *mn;
+
 		trace_i915_gem_object_destroy(obj);
 
 		if (!list_empty(&obj->vma.list)) {
@@ -174,19 +194,28 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 				GEM_BUG_ON(vma->obj != obj);
 				spin_unlock(&obj->vma.lock);
 
-				i915_vma_destroy(vma);
+				__i915_vma_put(vma);
 
 				spin_lock(&obj->vma.lock);
 			}
 			spin_unlock(&obj->vma.lock);
 		}
 
+		i915_gem_object_release_mmap(obj);
+
+		list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+					      &mmo->vma_node);
+			kfree(mmo);
+		}
+		INIT_LIST_HEAD(&obj->mmo.offsets);
+
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
 		GEM_BUG_ON(obj->userfault_count);
 		GEM_BUG_ON(!list_empty(&obj->lut_list));
 
 		atomic_set(&obj->mm.pages_pin_count, 0);
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+		__i915_gem_object_put_pages(obj);
 		GEM_BUG_ON(i915_gem_object_has_pages(obj));
 		bitmap_free(obj->bit_17);
 
@@ -277,18 +306,14 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 
 	switch (obj->write_domain) {
 	case I915_GEM_DOMAIN_GTT:
-		for_each_ggtt_vma(vma, obj)
-			intel_gt_flush_ggtt_writes(vma->vm->gt);
+		spin_lock(&obj->vma.lock);
+		for_each_ggtt_vma(vma, obj) {
+			if (i915_vma_unset_ggtt_write(vma))
+				intel_gt_flush_ggtt_writes(vma->vm->gt);
+		}
+		spin_unlock(&obj->vma.lock);
 
 		i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
-
-		for_each_ggtt_vma(vma, obj) {
-			if (vma->iomap)
-				continue;
-
-			i915_vma_unset_ggtt_write(vma);
-		}
-
 		break;
 
 	case I915_GEM_DOMAIN_WC:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 4b93591..db70a33 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,6 +16,7 @@
 #include "display/intel_frontbuffer.h"
 #include "i915_gem_object_types.h"
 #include "i915_gem_gtt.h"
+#include "i915_vma_types.h"
 
 void i915_gem_init__objects(struct drm_i915_private *i915);
 
@@ -132,13 +133,13 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
 static inline void
 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 {
-	obj->base.vma_node.readonly = true;
+	obj->flags |= I915_BO_READONLY;
 }
 
 static inline bool
 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 {
-	return obj->base.vma_node.readonly;
+	return obj->flags & I915_BO_READONLY;
 }
 
 static inline bool
@@ -271,10 +272,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
+	I915_MM_NORMAL = 0,
+	/*
+	 * Only used by struct_mutex, when called "recursively" from
+	 * direct-reclaim-esque. Safe because there is only every one
+	 * struct_mutex in the entire system.
+	 */
+	I915_MM_SHRINKER = 1,
+	/*
+	 * Used for obj->mm.lock when allocating pages. Safe because the object
+	 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
+	 * it. As soon as the object has pages, obj->mm.lock nests within
+	 * fs_reclaim.
+	 */
+	I915_MM_GET_PAGES = 1,
+};
+
 static inline int __must_check
 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-	might_lock(&obj->mm.lock);
+	might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
 
 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
 		return 0;
@@ -317,13 +335,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 	__i915_gem_object_unpin_pages(obj);
 }
 
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
-	I915_MM_NORMAL = 0,
-	I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
-};
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-				enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
 
@@ -376,9 +388,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 	i915_gem_object_unpin_pages(obj);
 }
 
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-
 void
 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 				   unsigned int flush_domains);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e3f3944..88e2686 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -63,6 +63,23 @@ struct drm_i915_gem_object_ops {
 	void (*release)(struct drm_i915_gem_object *obj);
 };
 
+enum i915_mmap_type {
+	I915_MMAP_TYPE_GTT = 0,
+	I915_MMAP_TYPE_WC,
+	I915_MMAP_TYPE_WB,
+	I915_MMAP_TYPE_UC,
+};
+
+struct i915_mmap_offset {
+	struct drm_device *dev;
+	struct drm_vma_offset_node vma_node;
+	struct drm_i915_gem_object *obj;
+	struct drm_file *file;
+	enum i915_mmap_type mmap_type;
+
+	struct list_head offset;
+};
+
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
@@ -118,12 +135,18 @@ struct drm_i915_gem_object {
 	unsigned int userfault_count;
 	struct list_head userfault_link;
 
+	struct {
+		spinlock_t lock; /* Protects access to mmo offsets */
+		struct list_head offsets;
+	} mmo;
+
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
 	unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
 #define I915_BO_ALLOC_VOLATILE   BIT(1)
 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
+#define I915_BO_READONLY         BIT(2)
 
 	/*
 	 * Is the object to be mapped as read-only to the GPU
@@ -162,7 +185,11 @@ struct drm_i915_gem_object {
 	atomic_t bind_count;
 
 	struct {
-		struct mutex lock; /* protects the pages and their use */
+		/*
+		 * Protects the pages and their use. Do not use directly, but
+		 * instead go through the pin/unpin interfaces.
+		 */
+		struct mutex lock;
 		atomic_t pages_pin_count;
 		atomic_t shrink_pin;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 29f4c28..54aca5c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -8,6 +8,7 @@
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 				 struct sg_table *pages,
@@ -106,7 +107,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
 	int err;
 
-	err = mutex_lock_interruptible(&obj->mm.lock);
+	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
 	if (err)
 		return err;
 
@@ -157,9 +158,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 
 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
 {
-	if (i915_gem_object_is_lmem(obj))
-		io_mapping_unmap((void __force __iomem *)ptr);
-	else if (is_vmalloc_addr(ptr))
+	if (is_vmalloc_addr(ptr))
 		vunmap(ptr);
 	else
 		kunmap(kmap_to_page(ptr));
@@ -190,8 +189,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 	return pages;
 }
 
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-				enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 {
 	struct sg_table *pages;
 	int err;
@@ -202,12 +200,14 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	GEM_BUG_ON(atomic_read(&obj->bind_count));
 
 	/* May be called by shrinker from within get_pages() (on another bo) */
-	mutex_lock_nested(&obj->mm.lock, subclass);
+	mutex_lock(&obj->mm.lock);
 	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
 		err = -EBUSY;
 		goto unlock;
 	}
 
+	i915_gem_object_release_mmap_offset(obj);
+
 	/*
 	 * ->put_pages might need to allocate memory for the bit17 swizzle
 	 * array, hence protect them from being reaped by removing them from gtt
@@ -234,46 +234,44 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	return err;
 }
 
+static inline pte_t iomap_pte(resource_size_t base,
+			      dma_addr_t offset,
+			      pgprot_t prot)
+{
+	return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
+}
+
 /* The 'mapping' part of i915_gem_object_pin_map() below */
 static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
 				 enum i915_map_type type)
 {
-	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+	unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
 	struct sg_table *sgt = obj->mm.pages;
-	struct sgt_iter sgt_iter;
-	struct page *page;
-	struct page *stack_pages[32];
-	struct page **pages = stack_pages;
-	unsigned long i = 0;
+	pte_t *stack[32], **mem;
+	struct vm_struct *area;
 	pgprot_t pgprot;
-	void *addr;
 
-	if (i915_gem_object_is_lmem(obj)) {
-		void __iomem *io;
-
-		if (type != I915_MAP_WC)
-			return NULL;
-
-		io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
-		return (void __force *)io;
-	}
+	if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
+		return NULL;
 
 	/* A single page can always be kmapped */
-	if (n_pages == 1 && type == I915_MAP_WB)
+	if (n_pte == 1 && type == I915_MAP_WB)
 		return kmap(sg_page(sgt->sgl));
 
-	if (n_pages > ARRAY_SIZE(stack_pages)) {
+	mem = stack;
+	if (n_pte > ARRAY_SIZE(stack)) {
 		/* Too big for stack -- allocate temporary array instead */
-		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
-		if (!pages)
+		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
+		if (!mem)
 			return NULL;
 	}
 
-	for_each_sgt_page(page, sgt_iter, sgt)
-		pages[i++] = page;
-
-	/* Check that we have the expected number of pages */
-	GEM_BUG_ON(i != n_pages);
+	area = alloc_vm_area(obj->base.size, mem);
+	if (!area) {
+		if (mem != stack)
+			kvfree(mem);
+		return NULL;
+	}
 
 	switch (type) {
 	default:
@@ -286,12 +284,31 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
 		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
 		break;
 	}
-	addr = vmap(pages, n_pages, 0, pgprot);
 
-	if (pages != stack_pages)
-		kvfree(pages);
+	if (i915_gem_object_has_struct_page(obj)) {
+		struct sgt_iter iter;
+		struct page *page;
+		pte_t **ptes = mem;
 
-	return addr;
+		for_each_sgt_page(page, iter, sgt)
+			**ptes++ = mk_pte(page, pgprot);
+	} else {
+		resource_size_t iomap;
+		struct sgt_iter iter;
+		pte_t **ptes = mem;
+		dma_addr_t addr;
+
+		iomap = obj->mm.region->iomap.base;
+		iomap -= obj->mm.region->region.start;
+
+		for_each_sgt_daddr(addr, iter, sgt)
+			**ptes++ = iomap_pte(iomap, addr, pgprot);
+	}
+
+	if (mem != stack)
+		kvfree(mem);
+
+	return area->addr;
 }
 
 /* get, pin, and map the pages of the object into kernel space */
@@ -308,7 +325,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 	if (!i915_gem_object_type_has(obj, flags))
 		return ERR_PTR(-ENXIO);
 
-	err = mutex_lock_interruptible(&obj->mm.lock);
+	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
 	if (err)
 		return ERR_PTR(err);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 8043ff6..b1b7c1b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -164,7 +164,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 	if (err)
 		return err;
 
-	mutex_lock(&obj->mm.lock);
+	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
 
 	if (obj->mm.madv != I915_MADV_WILLNEED) {
 		err = -EFAULT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index f88ee13..c8264eb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -13,7 +13,7 @@
 
 void i915_gem_suspend(struct drm_i915_private *i915)
 {
-	GEM_TRACE("\n");
+	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
 
 	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
 	flush_workqueue(i915->wq);
@@ -99,30 +99,12 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 
 void i915_gem_resume(struct drm_i915_private *i915)
 {
-	GEM_TRACE("\n");
-
-	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
-	if (intel_gt_init_hw(&i915->gt))
-		goto err_wedged;
+	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
 
 	/*
 	 * As we didn't flush the kernel context before suspend, we cannot
 	 * guarantee that the context image is complete. So let's just reset
 	 * it and start again.
 	 */
-	if (intel_gt_resume(&i915->gt))
-		goto err_wedged;
-
-out_unlock:
-	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
-	return;
-
-err_wedged:
-	if (!intel_gt_is_wedged(&i915->gt)) {
-		dev_err(i915->drm.dev,
-			"Failed to re-initialize GPU, declaring it wedged!\n");
-		intel_gt_set_wedged(&i915->gt);
-	}
-	goto out_unlock;
+	intel_gt_resume(&i915->gt);
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 2f7bcfb..1515384 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -85,7 +85,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 		}
 
 		prev_end = offset + block_size;
-	};
+	}
 
 	sg_page_sizes |= sg->length;
 	sg_mark_end(sg);
@@ -107,7 +107,10 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
 {
 	INIT_LIST_HEAD(&obj->mm.blocks);
 	obj->mm.region = intel_memory_region_get(mem);
+
 	obj->flags |= flags;
+	if (obj->base.size <= mem->min_page_size)
+		obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
 
 	mutex_lock(&mem->objects.lock);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4d69c3f..a2a980d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -594,6 +594,8 @@ static int init_shmem(struct intel_memory_region *mem)
 			 err);
 	}
 
+	intel_memory_region_set_name(mem, "system");
+
 	return 0; /* Don't error, we can simply fallback to the kernel mnt */
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f2418a1..f7e4b39 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -57,7 +57,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
 		flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
 
 	if (i915_gem_object_unbind(obj, flags) == 0)
-		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+		__i915_gem_object_put_pages(obj);
 
 	return !i915_gem_object_has_pages(obj);
 }
@@ -209,8 +209,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
 
 			if (unsafe_drop_pages(obj, shrink)) {
 				/* May arrive from get_pages on another bo */
-				mutex_lock_nested(&obj->mm.lock,
-						  I915_MM_SHRINKER);
+				mutex_lock(&obj->mm.lock);
 				if (!i915_gem_object_has_pages(obj)) {
 					try_to_writeback(obj, shrink);
 					count += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index a2d49c0..451f307 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -26,48 +26,49 @@
  * for is a boon.
  */
 
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
 					 struct drm_mm_node *node, u64 size,
 					 unsigned alignment, u64 start, u64 end)
 {
 	int ret;
 
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+	if (!drm_mm_initialized(&i915->mm.stolen))
 		return -ENODEV;
 
 	/* WaSkipStolenMemoryFirstPage:bdw+ */
-	if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
+	if (INTEL_GEN(i915) >= 8 && start < 4096)
 		start = 4096;
 
-	mutex_lock(&dev_priv->mm.stolen_lock);
-	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+	mutex_lock(&i915->mm.stolen_lock);
+	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
 					  size, alignment, 0,
 					  start, end, DRM_MM_INSERT_BEST);
-	mutex_unlock(&dev_priv->mm.stolen_lock);
+	mutex_unlock(&i915->mm.stolen_lock);
 
 	return ret;
 }
 
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
 				struct drm_mm_node *node, u64 size,
 				unsigned alignment)
 {
-	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
+	return i915_gem_stolen_insert_node_in_range(i915, node, size,
 						    alignment, 0, U64_MAX);
 }
 
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
 				 struct drm_mm_node *node)
 {
-	mutex_lock(&dev_priv->mm.stolen_lock);
+	mutex_lock(&i915->mm.stolen_lock);
 	drm_mm_remove_node(node);
-	mutex_unlock(&dev_priv->mm.stolen_lock);
+	mutex_unlock(&i915->mm.stolen_lock);
 }
 
-static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
+static int i915_adjust_stolen(struct drm_i915_private *i915,
 			      struct resource *dsm)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 	struct resource *r;
 
 	if (dsm->start == 0 || dsm->end <= dsm->start)
@@ -79,14 +80,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
 	 */
 
 	/* Make sure we don't clobber the GTT if it's within stolen memory */
-	if (INTEL_GEN(dev_priv) <= 4 &&
-	    !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
+	if (INTEL_GEN(i915) <= 4 &&
+	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
 		struct resource stolen[2] = {*dsm, *dsm};
 		struct resource ggtt_res;
 		resource_size_t ggtt_start;
 
-		ggtt_start = I915_READ(PGTBL_CTL);
-		if (IS_GEN(dev_priv, 4))
+		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
+		if (IS_GEN(i915, 4))
 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
 		else
@@ -120,7 +121,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
 	 * kernel. So if the region is already marked as busy, something
 	 * is seriously wrong.
 	 */
-	r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
+	r = devm_request_mem_region(i915->drm.dev, dsm->start,
 				    resource_size(dsm),
 				    "Graphics Stolen Memory");
 	if (r == NULL) {
@@ -133,14 +134,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
 		 * reservation starting from 1 instead of 0.
 		 * There's also BIOS with off-by-one on the other end.
 		 */
-		r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
+		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
 					    resource_size(dsm) - 2,
 					    "Graphics Stolen Memory");
 		/*
 		 * GEN3 firmware likes to smash pci bridges into the stolen
 		 * range. Apparently this works.
 		 */
-		if (r == NULL && !IS_GEN(dev_priv, 3)) {
+		if (!r && !IS_GEN(i915, 3)) {
 			DRM_ERROR("conflict detected with stolen region: %pR\n",
 				  dsm);
 
@@ -151,25 +152,27 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
 	return 0;
 }
 
-static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
 {
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+	if (!drm_mm_initialized(&i915->mm.stolen))
 		return;
 
-	drm_mm_takedown(&dev_priv->mm.stolen);
+	drm_mm_takedown(&i915->mm.stolen);
 }
 
-static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
+				    struct intel_uncore *uncore,
 				    resource_size_t *base,
 				    resource_size_t *size)
 {
-	u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
-				CTG_STOLEN_RESERVED :
-				ELK_STOLEN_RESERVED);
-	resource_size_t stolen_top = dev_priv->dsm.end + 1;
+	u32 reg_val = intel_uncore_read(uncore,
+					IS_GM45(i915) ?
+					CTG_STOLEN_RESERVED :
+					ELK_STOLEN_RESERVED);
+	resource_size_t stolen_top = i915->dsm.end + 1;
 
 	DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
-			 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
+			 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
 
 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
 		return;
@@ -178,7 +181,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	 * Whether ILK really reuses the ELK register for this is unclear.
 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
 	 */
-	WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+	WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
 	     reg_val);
 
 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
@@ -190,11 +193,12 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	*size = stolen_top - *base;
 }
 
-static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
+				     struct intel_uncore *uncore,
 				     resource_size_t *base,
 				     resource_size_t *size)
 {
-	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
@@ -222,12 +226,13 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
+				    struct intel_uncore *uncore,
 				    resource_size_t *base,
 				    resource_size_t *size)
 {
-	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
-	resource_size_t stolen_top = dev_priv->dsm.end + 1;
+	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
+	resource_size_t stolen_top = i915->dsm.end + 1;
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
@@ -250,11 +255,12 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	*base = stolen_top - *size;
 }
 
-static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
+				     struct intel_uncore *uncore,
 				     resource_size_t *base,
 				     resource_size_t *size)
 {
-	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
@@ -276,11 +282,12 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void chv_get_stolen_reserved(struct drm_i915_private *i915,
+				    struct intel_uncore *uncore,
 				    resource_size_t *base,
 				    resource_size_t *size)
 {
-	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
@@ -308,12 +315,13 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
+				    struct intel_uncore *uncore,
 				    resource_size_t *base,
 				    resource_size_t *size)
 {
-	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
-	resource_size_t stolen_top = dev_priv->dsm.end + 1;
+	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
+	resource_size_t stolen_top = i915->dsm.end + 1;
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
@@ -328,10 +336,11 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
 }
 
 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
+				    struct intel_uncore *uncore,
 				    resource_size_t *base,
 				    resource_size_t *size)
 {
-	u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
+	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
 
@@ -356,22 +365,23 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
 	}
 }
 
-static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *i915)
 {
+	struct intel_uncore *uncore = &i915->uncore;
 	resource_size_t reserved_base, stolen_top;
 	resource_size_t reserved_total, reserved_size;
 
-	mutex_init(&dev_priv->mm.stolen_lock);
+	mutex_init(&i915->mm.stolen_lock);
 
-	if (intel_vgpu_active(dev_priv)) {
-		dev_notice(dev_priv->drm.dev,
+	if (intel_vgpu_active(i915)) {
+		dev_notice(i915->drm.dev,
 			   "%s, disabling use of stolen memory\n",
 			   "iGVT-g active");
 		return 0;
 	}
 
-	if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
-		dev_notice(dev_priv->drm.dev,
+	if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
+		dev_notice(i915->drm.dev,
 			   "%s, disabling use of stolen memory\n",
 			   "DMAR active");
 		return 0;
@@ -380,58 +390,59 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 	if (resource_size(&intel_graphics_stolen_res) == 0)
 		return 0;
 
-	dev_priv->dsm = intel_graphics_stolen_res;
+	i915->dsm = intel_graphics_stolen_res;
 
-	if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
+	if (i915_adjust_stolen(i915, &i915->dsm))
 		return 0;
 
-	GEM_BUG_ON(dev_priv->dsm.start == 0);
-	GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
+	GEM_BUG_ON(i915->dsm.start == 0);
+	GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
 
-	stolen_top = dev_priv->dsm.end + 1;
+	stolen_top = i915->dsm.end + 1;
 	reserved_base = stolen_top;
 	reserved_size = 0;
 
-	switch (INTEL_GEN(dev_priv)) {
+	switch (INTEL_GEN(i915)) {
 	case 2:
 	case 3:
 		break;
 	case 4:
-		if (!IS_G4X(dev_priv))
+		if (!IS_G4X(i915))
 			break;
 		/* fall through */
 	case 5:
-		g4x_get_stolen_reserved(dev_priv,
+		g4x_get_stolen_reserved(i915, uncore,
 					&reserved_base, &reserved_size);
 		break;
 	case 6:
-		gen6_get_stolen_reserved(dev_priv,
+		gen6_get_stolen_reserved(i915, uncore,
 					 &reserved_base, &reserved_size);
 		break;
 	case 7:
-		if (IS_VALLEYVIEW(dev_priv))
-			vlv_get_stolen_reserved(dev_priv,
+		if (IS_VALLEYVIEW(i915))
+			vlv_get_stolen_reserved(i915, uncore,
 						&reserved_base, &reserved_size);
 		else
-			gen7_get_stolen_reserved(dev_priv,
+			gen7_get_stolen_reserved(i915, uncore,
 						 &reserved_base, &reserved_size);
 		break;
 	case 8:
 	case 9:
 	case 10:
-		if (IS_LP(dev_priv))
-			chv_get_stolen_reserved(dev_priv,
+		if (IS_LP(i915))
+			chv_get_stolen_reserved(i915, uncore,
 						&reserved_base, &reserved_size);
 		else
-			bdw_get_stolen_reserved(dev_priv,
+			bdw_get_stolen_reserved(i915, uncore,
 						&reserved_base, &reserved_size);
 		break;
 	default:
-		MISSING_CASE(INTEL_GEN(dev_priv));
+		MISSING_CASE(INTEL_GEN(i915));
 		/* fall-through */
 	case 11:
 	case 12:
-		icl_get_stolen_reserved(dev_priv, &reserved_base,
+		icl_get_stolen_reserved(i915, uncore,
+					&reserved_base,
 					&reserved_size);
 		break;
 	}
@@ -448,12 +459,12 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 		reserved_size = 0;
 	}
 
-	dev_priv->dsm_reserved =
-		(struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
+	i915->dsm_reserved =
+		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
 
-	if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
+	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
 		DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
-			  &dev_priv->dsm_reserved, &dev_priv->dsm);
+			  &i915->dsm_reserved, &i915->dsm);
 		return 0;
 	}
 
@@ -462,14 +473,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 	reserved_total = stolen_top - reserved_base;
 
 	DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
-			 (u64)resource_size(&dev_priv->dsm) >> 10,
-			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
+			 (u64)resource_size(&i915->dsm) >> 10,
+			 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
 
-	dev_priv->stolen_usable_size =
-		resource_size(&dev_priv->dsm) - reserved_total;
+	i915->stolen_usable_size =
+		resource_size(&i915->dsm) - reserved_total;
 
 	/* Basic memrange allocator for stolen space. */
-	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
+	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
 
 	return 0;
 }
@@ -478,11 +489,11 @@ static struct sg_table *
 i915_pages_create_for_stolen(struct drm_device *dev,
 			     resource_size_t offset, resource_size_t size)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
 	struct sg_table *st;
 	struct scatterlist *sg;
 
-	GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
+	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
 
 	/* We hide that we have no struct page backing our stolen object
 	 * by wrapping the contiguous physical allocation with a fake
@@ -502,7 +513,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	sg->offset = 0;
 	sg->length = size;
 
-	sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
+	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
 	sg_dma_len(sg) = size;
 
 	return st;
@@ -533,16 +544,15 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
 
 	GEM_BUG_ON(!stolen);
 
-	i915_gem_stolen_remove_node(dev_priv, stolen);
-	kfree(stolen);
+	i915_gem_object_release_memory_region(obj);
 
-	if (obj->mm.region)
-		i915_gem_object_release_memory_region(obj);
+	i915_gem_stolen_remove_node(i915, stolen);
+	kfree(stolen);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -552,9 +562,8 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
 };
 
 static struct drm_i915_gem_object *
-__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
-				struct drm_mm_node *stolen,
-				struct intel_memory_region *mem)
+__i915_gem_object_create_stolen(struct intel_memory_region *mem,
+				struct drm_mm_node *stolen)
 {
 	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
@@ -565,20 +574,19 @@ __i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
 	if (!obj)
 		goto err;
 
-	drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
+	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
 
 	obj->stolen = stolen;
 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
-	cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
+	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
 	i915_gem_object_set_cache_coherency(obj, cache_level);
 
 	err = i915_gem_object_pin_pages(obj);
 	if (err)
 		goto cleanup;
 
-	if (mem)
-		i915_gem_object_init_memory_region(obj, mem, 0);
+	i915_gem_object_init_memory_region(obj, mem, 0);
 
 	return obj;
 
@@ -593,12 +601,12 @@ _i915_gem_object_create_stolen(struct intel_memory_region *mem,
 			       resource_size_t size,
 			       unsigned int flags)
 {
-	struct drm_i915_private *dev_priv = mem->i915;
+	struct drm_i915_private *i915 = mem->i915;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node *stolen;
 	int ret;
 
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+	if (!drm_mm_initialized(&i915->mm.stolen))
 		return ERR_PTR(-ENODEV);
 
 	if (size == 0)
@@ -608,35 +616,37 @@ _i915_gem_object_create_stolen(struct intel_memory_region *mem,
 	if (!stolen)
 		return ERR_PTR(-ENOMEM);
 
-	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
+	ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
 	if (ret) {
 		obj = ERR_PTR(ret);
 		goto err_free;
 	}
 
-	obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
+	obj = __i915_gem_object_create_stolen(mem, stolen);
 	if (IS_ERR(obj))
 		goto err_remove;
 
 	return obj;
 
 err_remove:
-	i915_gem_stolen_remove_node(dev_priv, stolen);
+	i915_gem_stolen_remove_node(i915, stolen);
 err_free:
 	kfree(stolen);
 	return obj;
 }
 
 struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
 			      resource_size_t size)
 {
-	return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
 					     size, I915_BO_ALLOC_CONTIGUOUS);
 }
 
 static int init_stolen(struct intel_memory_region *mem)
 {
+	intel_memory_region_set_name(mem, "stolen");
+
 	/*
 	 * Initialise stolen early so that we may reserve preallocated
 	 * objects for the BIOS to KMS transition.
@@ -665,18 +675,19 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
 }
 
 struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
 					       resource_size_t stolen_offset,
 					       resource_size_t gtt_offset,
 					       resource_size_t size)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
+	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node *stolen;
 	struct i915_vma *vma;
 	int ret;
 
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+	if (!drm_mm_initialized(&i915->mm.stolen))
 		return ERR_PTR(-ENODEV);
 
 	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
@@ -694,19 +705,19 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 
 	stolen->start = stolen_offset;
 	stolen->size = size;
-	mutex_lock(&dev_priv->mm.stolen_lock);
-	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
-	mutex_unlock(&dev_priv->mm.stolen_lock);
+	mutex_lock(&i915->mm.stolen_lock);
+	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
+	mutex_unlock(&i915->mm.stolen_lock);
 	if (ret) {
 		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
 		kfree(stolen);
 		return ERR_PTR(ret);
 	}
 
-	obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL);
+	obj = __i915_gem_object_create_stolen(mem, stolen);
 	if (IS_ERR(obj)) {
 		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
-		i915_gem_stolen_remove_node(dev_priv, stolen);
+		i915_gem_stolen_remove_node(i915, stolen);
 		kfree(stolen);
 		return obj;
 	}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 1fa592d..6c7825a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -11,6 +11,7 @@
 #include "i915_drv.h"
 #include "i915_gem.h"
 #include "i915_gem_ioctls.h"
+#include "i915_gem_mman.h"
 #include "i915_gem_object.h"
 
 /**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 0dbb44d..580319b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -129,9 +129,10 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 		spin_unlock(&mn->lock);
 
 		ret = i915_gem_object_unbind(obj,
-					     I915_GEM_OBJECT_UNBIND_ACTIVE);
+					     I915_GEM_OBJECT_UNBIND_ACTIVE |
+					     I915_GEM_OBJECT_UNBIND_BARRIER);
 		if (ret == 0)
-			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+			ret = __i915_gem_object_put_pages(obj);
 		i915_gem_object_put(obj);
 		if (ret)
 			return ret;
@@ -460,31 +461,36 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 	if (pvec != NULL) {
 		struct mm_struct *mm = obj->userptr.mm->mm;
 		unsigned int flags = 0;
+		int locked = 0;
 
 		if (!i915_gem_object_is_readonly(obj))
 			flags |= FOLL_WRITE;
 
 		ret = -EFAULT;
 		if (mmget_not_zero(mm)) {
-			down_read(&mm->mmap_sem);
 			while (pinned < npages) {
+				if (!locked) {
+					down_read(&mm->mmap_sem);
+					locked = 1;
+				}
 				ret = get_user_pages_remote
 					(work->task, mm,
 					 obj->userptr.ptr + pinned * PAGE_SIZE,
 					 npages - pinned,
 					 flags,
-					 pvec + pinned, NULL, NULL);
+					 pvec + pinned, NULL, &locked);
 				if (ret < 0)
 					break;
 
 				pinned += ret;
 			}
-			up_read(&mm->mmap_sem);
+			if (locked)
+				up_read(&mm->mmap_sem);
 			mmput(mm);
 		}
 	}
 
-	mutex_lock(&obj->mm.lock);
+	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
 	if (obj->userptr.work == &work->work) {
 		struct sg_table *pages = ERR_PTR(ret);
 
@@ -774,15 +780,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 		return -EFAULT;
 
 	if (args->flags & I915_USERPTR_READ_ONLY) {
-		struct i915_address_space *vm;
-
 		/*
 		 * On almost all of the older hw, we cannot tell the GPU that
 		 * a page is readonly.
 		 */
-		vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
-					       true); /* static vm */
-		if (!vm || !vm->has_read_only)
+		if (!dev_priv->gt.vm->has_read_only)
 			return -ENODEV;
 	}
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 892d12d..fa16f2c3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -12,10 +12,14 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
 			    struct sg_table *pages)
 {
 	unsigned long nreal = obj->scratch / PAGE_SIZE;
-	struct scatterlist *sg;
+	struct sgt_iter sgt_iter;
+	struct page *page;
 
-	for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
-		__free_page(sg_page(sg));
+	for_each_sgt_page(page, sgt_iter, pages) {
+		__free_page(page);
+		if (!--nreal)
+			break;
+	}
 
 	sg_free_table(pages);
 	kfree(pages);
@@ -70,7 +74,6 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
 
 err:
 	huge_free_pages(obj, pages);
-
 	return -ENOMEM;
 #undef GFP
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
index 549c139..b8cf31b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
@@ -7,6 +7,12 @@
 #ifndef __HUGE_GEM_OBJECT_H
 #define __HUGE_GEM_OBJECT_H
 
+#include <linux/types.h>
+
+#include "gem/i915_gem_object_types.h"
+
+struct drm_i915_private;
+
 struct drm_i915_gem_object *
 huge_gem_object(struct drm_i915_private *i915,
 		phys_addr_t phys_size,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 688c49a..9311250 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -517,7 +517,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
 			i915_vma_unpin(vma);
 			i915_vma_close(vma);
 
-			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+			__i915_gem_object_put_pages(obj);
 			i915_gem_object_put(obj);
 		}
 	}
@@ -650,7 +650,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
 		i915_vma_close(vma);
 
 		i915_gem_object_unpin_pages(obj);
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+		__i915_gem_object_put_pages(obj);
 		i915_gem_object_put(obj);
 	}
 
@@ -678,7 +678,7 @@ static void close_object_list(struct list_head *objects,
 
 		list_del(&obj->st_link);
 		i915_gem_object_unpin_pages(obj);
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+		__i915_gem_object_put_pages(obj);
 		i915_gem_object_put(obj);
 	}
 }
@@ -948,7 +948,7 @@ static int igt_mock_ppgtt_64K(void *arg)
 			i915_vma_close(vma);
 
 			i915_gem_object_unpin_pages(obj);
-			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+			__i915_gem_object_put_pages(obj);
 			i915_gem_object_put(obj);
 		}
 	}
@@ -1017,38 +1017,33 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 	return err;
 }
 
-static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 {
-	unsigned long n;
+	unsigned long n = obj->base.size >> PAGE_SHIFT;
+	u32 *ptr;
 	int err;
 
-	i915_gem_object_lock(obj);
-	err = i915_gem_object_set_to_wc_domain(obj, false);
-	i915_gem_object_unlock(obj);
+	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
 	if (err)
 		return err;
 
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
-		return err;
+	ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(ptr))
+		return PTR_ERR(ptr);
 
-	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
-		u32 __iomem *base;
-		u32 read_val;
-
-		base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
-
-		read_val = ioread32(base + dword);
-		io_mapping_unmap_atomic(base);
-		if (read_val != val) {
-			pr_err("n=%lu base[%u]=%u, val=%u\n",
-			       n, dword, read_val, val);
+	ptr += dword;
+	while (n--) {
+		if (*ptr != val) {
+			pr_err("base[%u]=%08x, val=%08x\n",
+			       dword, *ptr, val);
 			err = -EINVAL;
 			break;
 		}
+
+		ptr += PAGE_SIZE / sizeof(*ptr);
 	}
 
-	i915_gem_object_unpin_pages(obj);
+	i915_gem_object_unpin_map(obj);
 	return err;
 }
 
@@ -1056,10 +1051,8 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 {
 	if (i915_gem_object_has_struct_page(obj))
 		return __cpu_check_shmem(obj, dword, val);
-	else if (i915_gem_object_is_lmem(obj))
-		return __cpu_check_lmem(obj, dword, val);
-
-	return -ENODEV;
+	else
+		return __cpu_check_vmap(obj, dword, val);
 }
 
 static int __igt_write_huge(struct intel_context *ce,
@@ -1110,8 +1103,7 @@ static int __igt_write_huge(struct intel_context *ce,
 out_vma_unpin:
 	i915_vma_unpin(vma);
 out_vma_close:
-	i915_vma_destroy(vma);
-
+	__i915_vma_put(vma);
 	return err;
 }
 
@@ -1301,7 +1293,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 			}
 
 			i915_gem_object_unpin_pages(obj);
-			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+			__i915_gem_object_put_pages(obj);
 			i915_gem_object_put(obj);
 		}
 	}
@@ -1420,7 +1412,7 @@ static int igt_ppgtt_smoke_huge(void *arg)
 
 		err = i915_gem_object_pin_pages(obj);
 		if (err) {
-			if (err == -ENXIO) {
+			if (err == -ENXIO || err == -E2BIG) {
 				i915_gem_object_put(obj);
 				size >>= 1;
 				goto try_again;
@@ -1442,7 +1434,7 @@ static int igt_ppgtt_smoke_huge(void *arg)
 		}
 out_unpin:
 		i915_gem_object_unpin_pages(obj);
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+		__i915_gem_object_put_pages(obj);
 out_put:
 		i915_gem_object_put(obj);
 
@@ -1530,7 +1522,7 @@ static int igt_ppgtt_sanity_check(void *arg)
 			err = igt_write_huge(ctx, obj);
 
 			i915_gem_object_unpin_pages(obj);
-			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+			__i915_gem_object_put_pages(obj);
 			i915_gem_object_put(obj);
 
 			if (err) {
@@ -1873,7 +1865,7 @@ int i915_gem_huge_page_mock_selftests(void)
 	mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
 
-	ppgtt = i915_ppgtt_create(dev_priv);
+	ppgtt = i915_ppgtt_create(&dev_priv->gt);
 	if (IS_ERR(ppgtt)) {
 		err = PTR_ERR(ppgtt);
 		goto out_unlock;
@@ -1912,9 +1904,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_ppgtt_smoke_huge),
 		SUBTEST(igt_ppgtt_sanity_check),
 	};
-	struct drm_file *file;
 	struct i915_gem_context *ctx;
 	struct i915_address_space *vm;
+	struct file *file;
 	int err;
 
 	if (!HAS_PPGTT(i915)) {
@@ -1944,6 +1936,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 	err = i915_subtests(tests, ctx);
 
 out_file:
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index da8edee..b972be1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -24,6 +24,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
 
 	prandom_seed_state(&prng, i915_selftest.random_seed);
 
+	intel_engine_pm_get(engine);
 	do {
 		const u32 max_block_size = S16_MAX * PAGE_SIZE;
 		u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
@@ -99,6 +100,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
 err_flush:
 	if (err == -ENOMEM)
 		err = 0;
+	intel_engine_pm_put(engine);
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 2b29f6b..3f6079e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -6,6 +6,7 @@
 
 #include <linux/prime_numbers.h>
 
+#include "gt/intel_engine_pm.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_ring.h"
@@ -200,7 +201,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
-	rq = i915_request_create(ctx->engine->kernel_context);
+	rq = intel_engine_create_kernel_request(ctx->engine);
 	if (IS_ERR(rq)) {
 		i915_vma_unpin(vma);
 		return PTR_ERR(rq);
@@ -324,8 +325,12 @@ static int igt_gem_coherency(void *arg)
 	values = offsets + ncachelines;
 
 	ctx.engine = random_engine(i915, &prng);
-	GEM_BUG_ON(!ctx.engine);
+	if (!ctx.engine) {
+		err = -ENODEV;
+		goto out_free;
+	}
 	pr_info("%s: using %s\n", __func__, ctx.engine->name);
+	intel_engine_pm_get(ctx.engine);
 
 	for (over = igt_coherency_mode; over->name; over++) {
 		if (!over->set)
@@ -352,7 +357,7 @@ static int igt_gem_coherency(void *arg)
 					ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
 					if (IS_ERR(ctx.obj)) {
 						err = PTR_ERR(ctx.obj);
-						goto free;
+						goto out_pm;
 					}
 
 					i915_random_reorder(offsets, ncachelines, &prng);
@@ -403,13 +408,15 @@ static int igt_gem_coherency(void *arg)
 			}
 		}
 	}
-free:
+out_pm:
+	intel_engine_pm_put(ctx.engine);
+out_free:
 	kfree(offsets);
 	return err;
 
 put_object:
 	i915_gem_object_put(ctx.obj);
-	goto free;
+	goto out_pm;
 }
 
 int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 62fabc0..7fc4686 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -7,6 +7,7 @@
 #include <linux/prime_numbers.h>
 
 #include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_pm.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_requests.h"
 #include "gt/intel_reset.h"
@@ -26,6 +27,12 @@
 
 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
 
+static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
+{
+	/* single threaded, private ctx */
+	return rcu_dereference_protected(ctx->vm, true);
+}
+
 static int live_nop_switch(void *arg)
 {
 	const unsigned int nctx = 1024;
@@ -33,7 +40,7 @@ static int live_nop_switch(void *arg)
 	struct intel_engine_cs *engine;
 	struct i915_gem_context **ctx;
 	struct igt_live_test t;
-	struct drm_file *file;
+	struct file *file;
 	unsigned long n;
 	int err = -ENODEV;
 
@@ -67,25 +74,34 @@ static int live_nop_switch(void *arg)
 	}
 
 	for_each_uabi_engine(engine, i915) {
-		struct i915_request *rq;
+		struct i915_request *rq = NULL;
 		unsigned long end_time, prime;
 		ktime_t times[2] = {};
 
 		times[0] = ktime_get_raw();
 		for (n = 0; n < nctx; n++) {
-			rq = igt_request_alloc(ctx[n], engine);
-			if (IS_ERR(rq)) {
-				err = PTR_ERR(rq);
+			struct i915_request *this;
+
+			this = igt_request_alloc(ctx[n], engine);
+			if (IS_ERR(this)) {
+				err = PTR_ERR(this);
 				goto out_file;
 			}
-			i915_request_add(rq);
+			if (rq) {
+				i915_request_await_dma_fence(this, &rq->fence);
+				i915_request_put(rq);
+			}
+			rq = i915_request_get(this);
+			i915_request_add(this);
 		}
 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 			pr_err("Failed to populated %d contexts\n", nctx);
 			intel_gt_set_wedged(&i915->gt);
+			i915_request_put(rq);
 			err = -EIO;
 			goto out_file;
 		}
+		i915_request_put(rq);
 
 		times[1] = ktime_get_raw();
 
@@ -100,13 +116,21 @@ static int live_nop_switch(void *arg)
 		for_each_prime_number_from(prime, 2, 8192) {
 			times[1] = ktime_get_raw();
 
+			rq = NULL;
 			for (n = 0; n < prime; n++) {
-				rq = igt_request_alloc(ctx[n % nctx], engine);
-				if (IS_ERR(rq)) {
-					err = PTR_ERR(rq);
+				struct i915_request *this;
+
+				this = igt_request_alloc(ctx[n % nctx], engine);
+				if (IS_ERR(this)) {
+					err = PTR_ERR(this);
 					goto out_file;
 				}
 
+				if (rq) { /* Force submission order */
+					i915_request_await_dma_fence(this, &rq->fence);
+					i915_request_put(rq);
+				}
+
 				/*
 				 * This space is left intentionally blank.
 				 *
@@ -121,14 +145,18 @@ static int live_nop_switch(void *arg)
 				 * for latency.
 				 */
 
-				i915_request_add(rq);
+				rq = i915_request_get(this);
+				i915_request_add(this);
 			}
+			GEM_BUG_ON(!rq);
 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 				pr_err("Switching between %ld contexts timed out\n",
 				       prime);
 				intel_gt_set_wedged(&i915->gt);
+				i915_request_put(rq);
 				break;
 			}
+			i915_request_put(rq);
 
 			times[1] = ktime_sub(ktime_get_raw(), times[1]);
 			if (prime == 2)
@@ -149,7 +177,7 @@ static int live_nop_switch(void *arg)
 	}
 
 out_file:
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
@@ -255,7 +283,7 @@ static int live_parallel_switch(void *arg)
 	int (* const *fn)(void *arg);
 	struct i915_gem_context *ctx;
 	struct intel_context *ce;
-	struct drm_file *file;
+	struct file *file;
 	int n, m, count;
 	int err = 0;
 
@@ -309,7 +337,7 @@ static int live_parallel_switch(void *arg)
 			if (!data[m].ce[0])
 				continue;
 
-			ce = intel_context_create(ctx, data[m].ce[0]->engine);
+			ce = intel_context_create(data[m].ce[0]->engine);
 			if (IS_ERR(ce))
 				goto out;
 
@@ -377,7 +405,7 @@ static int live_parallel_switch(void *arg)
 	}
 	kfree(data);
 out_file:
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
@@ -502,17 +530,17 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
 	return err;
 }
 
-static int file_add_object(struct drm_file *file,
-			    struct drm_i915_gem_object *obj)
+static int file_add_object(struct file *file, struct drm_i915_gem_object *obj)
 {
 	int err;
 
 	GEM_BUG_ON(obj->base.handle_count);
 
 	/* tie the object to the drm_file for easy reaping */
-	err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
+	err = idr_alloc(&to_drm_file(file)->object_idr,
+			&obj->base, 1, 0, GFP_KERNEL);
 	if (err < 0)
-		return  err;
+		return err;
 
 	i915_gem_object_get(obj);
 	obj->base.handle_count++;
@@ -521,7 +549,7 @@ static int file_add_object(struct drm_file *file,
 
 static struct drm_i915_gem_object *
 create_test_object(struct i915_address_space *vm,
-		   struct drm_file *file,
+		   struct file *file,
 		   struct list_head *objects)
 {
 	struct drm_i915_gem_object *obj;
@@ -621,9 +649,9 @@ static int igt_ctx_exec(void *arg)
 		unsigned long ncontexts, ndwords, dw;
 		struct i915_request *tq[5] = {};
 		struct igt_live_test t;
-		struct drm_file *file;
 		IGT_TIMEOUT(end_time);
 		LIST_HEAD(objects);
+		struct file *file;
 
 		if (!intel_engine_can_store_dword(engine))
 			continue;
@@ -716,7 +744,7 @@ static int igt_ctx_exec(void *arg)
 		if (igt_live_test_end(&t))
 			err = -EIO;
 
-		mock_file_free(i915, file);
+		fput(file);
 		if (err)
 			return err;
 
@@ -733,7 +761,7 @@ static int igt_shared_ctx_exec(void *arg)
 	struct i915_gem_context *parent;
 	struct intel_engine_cs *engine;
 	struct igt_live_test t;
-	struct drm_file *file;
+	struct file *file;
 	int err = 0;
 
 	/*
@@ -786,14 +814,15 @@ static int igt_shared_ctx_exec(void *arg)
 			}
 
 			mutex_lock(&ctx->mutex);
-			__assign_ppgtt(ctx, parent->vm);
+			__assign_ppgtt(ctx, ctx_vm(parent));
 			mutex_unlock(&ctx->mutex);
 
 			ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
 			GEM_BUG_ON(IS_ERR(ce));
 
 			if (!obj) {
-				obj = create_test_object(parent->vm, file, &objects);
+				obj = create_test_object(ctx_vm(parent),
+							 file, &objects);
 				if (IS_ERR(obj)) {
 					err = PTR_ERR(obj);
 					intel_context_put(ce);
@@ -854,7 +883,7 @@ static int igt_shared_ctx_exec(void *arg)
 	if (igt_live_test_end(&t))
 		err = -EIO;
 out_file:
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
@@ -1140,8 +1169,7 @@ __sseu_finish(const char *name,
 		igt_spinner_end(spin);
 
 	if ((flags & TEST_IDLE) && ret == 0) {
-		ret = intel_gt_wait_for_idle(ce->engine->gt,
-					     MAX_SCHEDULE_TIMEOUT);
+		ret = igt_flush_test(ce->engine->i915);
 		if (ret)
 			return ret;
 
@@ -1163,9 +1191,11 @@ __sseu_test(const char *name,
 	struct igt_spinner *spin = NULL;
 	int ret;
 
+	intel_engine_pm_get(ce->engine);
+
 	ret = __sseu_prepare(name, flags, ce, &spin);
 	if (ret)
-		return ret;
+		goto out_pm;
 
 	ret = intel_context_reconfigure_sseu(ce, sseu);
 	if (ret)
@@ -1180,6 +1210,8 @@ __sseu_test(const char *name,
 		igt_spinner_fini(spin);
 		kfree(spin);
 	}
+out_pm:
+	intel_engine_pm_put(ce->engine);
 	return ret;
 }
 
@@ -1232,8 +1264,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 			hweight32(engine->sseu.slice_mask),
 			hweight32(pg_sseu.slice_mask));
 
-		ce = intel_context_create(engine->kernel_context->gem_context,
-					  engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			ret = PTR_ERR(ce);
 			goto out_put;
@@ -1311,16 +1342,18 @@ static int igt_ctx_sseu(void *arg)
 static int igt_ctx_readonly(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
+	unsigned long idx, ndwords, dw, num_engines;
 	struct drm_i915_gem_object *obj = NULL;
 	struct i915_request *tq[5] = {};
+	struct i915_gem_engines_iter it;
 	struct i915_address_space *vm;
 	struct i915_gem_context *ctx;
-	unsigned long idx, ndwords, dw;
+	struct intel_context *ce;
 	struct igt_live_test t;
-	struct drm_file *file;
 	I915_RND_STATE(prng);
 	IGT_TIMEOUT(end_time);
 	LIST_HEAD(objects);
+	struct file *file;
 	int err = -ENODEV;
 
 	/*
@@ -1343,21 +1376,21 @@ static int igt_ctx_readonly(void *arg)
 		goto out_file;
 	}
 
-	rcu_read_lock();
-	vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
+	vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
 	if (!vm || !vm->has_read_only) {
-		rcu_read_unlock();
 		err = 0;
 		goto out_file;
 	}
-	rcu_read_unlock();
+
+	num_engines = 0;
+	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+		if (intel_engine_can_store_dword(ce->engine))
+			num_engines++;
+	i915_gem_context_unlock_engines(ctx);
 
 	ndwords = 0;
 	dw = 0;
 	while (!time_after(jiffies, end_time)) {
-		struct i915_gem_engines_iter it;
-		struct intel_context *ce;
-
 		for_each_gem_engine(ce,
 				    i915_gem_context_lock_engines(ctx), it) {
 			if (!intel_engine_can_store_dword(ce->engine))
@@ -1380,7 +1413,7 @@ static int igt_ctx_readonly(void *arg)
 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
 				       ndwords, dw, max_dwords(obj),
 				       ce->engine->name,
-				       yesno(!!rcu_access_pointer(ctx->vm)),
+				       yesno(!!ctx_vm(ctx)),
 				       err);
 				i915_gem_context_unlock_engines(ctx);
 				goto out_file;
@@ -1400,8 +1433,8 @@ static int igt_ctx_readonly(void *arg)
 		}
 		i915_gem_context_unlock_engines(ctx);
 	}
-	pr_info("Submitted %lu dwords (across %u engines)\n",
-		ndwords, RUNTIME_INFO(i915)->num_engines);
+	pr_info("Submitted %lu dwords (across %lu engines)\n",
+		ndwords, num_engines);
 
 	dw = 0;
 	idx = 0;
@@ -1426,7 +1459,7 @@ static int igt_ctx_readonly(void *arg)
 	if (igt_live_test_end(&t))
 		err = -EIO;
 
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
@@ -1466,7 +1499,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
-		goto err;
+		goto out;
 	}
 
 	*cmd++ = MI_STORE_DWORD_IMM_GEN4;
@@ -1488,12 +1521,12 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 	vma = i915_vma_instance(obj, vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
-		goto err_vm;
+		goto out_vm;
 	}
 
 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
 	if (err)
-		goto err_vm;
+		goto out_vm;
 
 	err = check_scratch(vm, offset);
 	if (err)
@@ -1517,22 +1550,20 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 	if (err)
 		goto skip_request;
 
-	i915_vma_unpin_and_release(&vma, 0);
+	i915_vma_unpin(vma);
 
 	i915_request_add(rq);
 
-	i915_vm_put(vm);
-	return 0;
-
+	goto out_vm;
 skip_request:
 	i915_request_skip(rq, err);
 err_request:
 	i915_request_add(rq);
 err_unpin:
 	i915_vma_unpin(vma);
-err_vm:
+out_vm:
 	i915_vm_put(vm);
-err:
+out:
 	i915_gem_object_put(obj);
 	return err;
 }
@@ -1560,7 +1591,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
-		goto err;
+		goto out;
 	}
 
 	memset(cmd, POISON_INUSE, PAGE_SIZE);
@@ -1592,12 +1623,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 	vma = i915_vma_instance(obj, vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
-		goto err_vm;
+		goto out_vm;
 	}
 
 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
 	if (err)
-		goto err_vm;
+		goto out_vm;
 
 	err = check_scratch(vm, offset);
 	if (err)
@@ -1630,29 +1661,27 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 	err = i915_gem_object_set_to_cpu_domain(obj, false);
 	i915_gem_object_unlock(obj);
 	if (err)
-		goto err_vm;
+		goto out_vm;
 
 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
-		goto err_vm;
+		goto out_vm;
 	}
 
 	*value = cmd[result / sizeof(*cmd)];
 	i915_gem_object_unpin_map(obj);
-	i915_gem_object_put(obj);
 
-	return 0;
-
+	goto out_vm;
 skip_request:
 	i915_request_skip(rq, err);
 err_request:
 	i915_request_add(rq);
 err_unpin:
 	i915_vma_unpin(vma);
-err_vm:
+out_vm:
 	i915_vm_put(vm);
-err:
+out:
 	i915_gem_object_put(obj);
 	return err;
 }
@@ -1661,11 +1690,11 @@ static int igt_vm_isolation(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx_a, *ctx_b;
+	unsigned long num_engines, count;
 	struct intel_engine_cs *engine;
 	struct igt_live_test t;
-	struct drm_file *file;
 	I915_RND_STATE(prng);
-	unsigned long count;
+	struct file *file;
 	u64 vm_total;
 	int err;
 
@@ -1698,14 +1727,15 @@ static int igt_vm_isolation(void *arg)
 	}
 
 	/* We can only test vm isolation, if the vm are distinct */
-	if (ctx_a->vm == ctx_b->vm)
+	if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
 		goto out_file;
 
-	vm_total = ctx_a->vm->total;
-	GEM_BUG_ON(ctx_b->vm->total != vm_total);
+	vm_total = ctx_vm(ctx_a)->total;
+	GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
 	vm_total -= I915_GTT_PAGE_SIZE;
 
 	count = 0;
+	num_engines = 0;
 	for_each_uabi_engine(engine, i915) {
 		IGT_TIMEOUT(end_time);
 		unsigned long this = 0;
@@ -1743,14 +1773,15 @@ static int igt_vm_isolation(void *arg)
 			this++;
 		}
 		count += this;
+		num_engines++;
 	}
-	pr_info("Checked %lu scratch offsets across %d engines\n",
-		count, RUNTIME_INFO(i915)->num_engines);
+	pr_info("Checked %lu scratch offsets across %lu engines\n",
+		count, num_engines);
 
 out_file:
 	if (igt_live_test_end(&t))
 		err = -EIO;
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index d85d1ce..2a52b92 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -254,106 +254,6 @@ static int igt_dmabuf_export_vmap(void *arg)
 	return err;
 }
 
-static int igt_dmabuf_export_kmap(void *arg)
-{
-	struct drm_i915_private *i915 = arg;
-	struct drm_i915_gem_object *obj;
-	struct dma_buf *dmabuf;
-	void *ptr;
-	int err;
-
-	obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
-
-	dmabuf = i915_gem_prime_export(&obj->base, 0);
-	i915_gem_object_put(obj);
-	if (IS_ERR(dmabuf)) {
-		err = PTR_ERR(dmabuf);
-		pr_err("i915_gem_prime_export failed with err=%d\n", err);
-		return err;
-	}
-
-	ptr = dma_buf_kmap(dmabuf, 0);
-	if (!ptr) {
-		pr_err("dma_buf_kmap failed\n");
-		err = -ENOMEM;
-		goto err;
-	}
-
-	if (memchr_inv(ptr, 0, PAGE_SIZE)) {
-		dma_buf_kunmap(dmabuf, 0, ptr);
-		pr_err("Exported page[0] not initialiased to zero!\n");
-		err = -EINVAL;
-		goto err;
-	}
-
-	memset(ptr, 0xc5, PAGE_SIZE);
-	dma_buf_kunmap(dmabuf, 0, ptr);
-
-	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
-	if (IS_ERR(ptr)) {
-		err = PTR_ERR(ptr);
-		pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
-		goto err;
-	}
-	memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
-	i915_gem_object_flush_map(obj);
-	i915_gem_object_unpin_map(obj);
-
-	ptr = dma_buf_kmap(dmabuf, 1);
-	if (!ptr) {
-		pr_err("dma_buf_kmap failed\n");
-		err = -ENOMEM;
-		goto err;
-	}
-
-	if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
-		dma_buf_kunmap(dmabuf, 1, ptr);
-		pr_err("Exported page[1] not set to 0xaa!\n");
-		err = -EINVAL;
-		goto err;
-	}
-
-	memset(ptr, 0xc5, PAGE_SIZE);
-	dma_buf_kunmap(dmabuf, 1, ptr);
-
-	ptr = dma_buf_kmap(dmabuf, 0);
-	if (!ptr) {
-		pr_err("dma_buf_kmap failed\n");
-		err = -ENOMEM;
-		goto err;
-	}
-	if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
-		dma_buf_kunmap(dmabuf, 0, ptr);
-		pr_err("Exported page[0] did not retain 0xc5!\n");
-		err = -EINVAL;
-		goto err;
-	}
-	dma_buf_kunmap(dmabuf, 0, ptr);
-
-	ptr = dma_buf_kmap(dmabuf, 2);
-	if (ptr) {
-		pr_err("Erroneously kmapped beyond the end of the object!\n");
-		dma_buf_kunmap(dmabuf, 2, ptr);
-		err = -EINVAL;
-		goto err;
-	}
-
-	ptr = dma_buf_kmap(dmabuf, -1);
-	if (ptr) {
-		pr_err("Erroneously kmapped before the start of the object!\n");
-		dma_buf_kunmap(dmabuf, -1, ptr);
-		err = -EINVAL;
-		goto err;
-	}
-
-	err = 0;
-err:
-	dma_buf_put(dmabuf);
-	return err;
-}
-
 int i915_gem_dmabuf_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -362,7 +262,6 @@ int i915_gem_dmabuf_mock_selftests(void)
 		SUBTEST(igt_dmabuf_import),
 		SUBTEST(igt_dmabuf_import_ownership),
 		SUBTEST(igt_dmabuf_export_vmap),
-		SUBTEST(igt_dmabuf_export_kmap),
 	};
 	struct drm_i915_private *i915;
 	int err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 29b2077..ef7c74c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,12 +6,15 @@
 
 #include <linux/prime_numbers.h>
 
+#include "gt/intel_engine_pm.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
+#include "gem/i915_gem_region.h"
 #include "huge_gem_object.h"
 #include "i915_selftest.h"
 #include "selftests/i915_random.h"
 #include "selftests/igt_flush_test.h"
+#include "selftests/igt_mmap.h"
 
 struct tile {
 	unsigned int width;
@@ -161,7 +164,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
 	kunmap(p);
 
 out:
-	i915_vma_destroy(vma);
+	__i915_vma_put(vma);
 	return err;
 }
 
@@ -255,7 +258,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
 		if (err)
 			return err;
 
-		i915_vma_destroy(vma);
+		__i915_vma_put(vma);
 
 		if (igt_timeout(end_time,
 				"%s: timed out after tiling=%d stride=%d\n",
@@ -535,7 +538,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
 		if (err)
 			return err;
 
-		rq = i915_request_create(engine->kernel_context);
+		rq = intel_engine_create_kernel_request(engine);
 		if (IS_ERR(rq)) {
 			i915_vma_unpin(vma);
 			return PTR_ERR(rq);
@@ -563,16 +566,16 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
 			       int expected)
 {
 	struct drm_i915_gem_object *obj;
-	int err;
+	struct i915_mmap_offset *mmo;
 
 	obj = i915_gem_object_create_internal(i915, size);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	err = create_mmap_offset(obj);
+	mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
 	i915_gem_object_put(obj);
 
-	return err == expected;
+	return PTR_ERR_OR_ZERO(mmo) == expected;
 }
 
 static void disable_retire_worker(struct drm_i915_private *i915)
@@ -606,28 +609,50 @@ static int igt_mmap_offset_exhaustion(void *arg)
 	struct drm_i915_private *i915 = arg;
 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
 	struct drm_i915_gem_object *obj;
-	struct drm_mm_node resv, *hole;
-	u64 hole_start, hole_end;
-	int loop, err;
+	struct drm_mm_node *hole, *next;
+	struct i915_mmap_offset *mmo;
+	int loop, err = 0;
 
 	/* Disable background reaper */
 	disable_retire_worker(i915);
 	GEM_BUG_ON(!i915->gt.awake);
+	intel_gt_retire_requests(&i915->gt);
+	i915_gem_drain_freed_objects(i915);
 
 	/* Trim the device mmap space to only a page */
-	memset(&resv, 0, sizeof(resv));
-	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-		resv.start = hole_start;
-		resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
-		mmap_offset_lock(i915);
-		err = drm_mm_reserve_node(mm, &resv);
-		mmap_offset_unlock(i915);
-		if (err) {
-			pr_err("Failed to trim VMA manager, err=%d\n", err);
+	mmap_offset_lock(i915);
+	loop = 1; /* PAGE_SIZE units */
+	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
+		struct drm_mm_node *resv;
+
+		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
+		if (!resv) {
+			err = -ENOMEM;
 			goto out_park;
 		}
-		break;
+
+		resv->start = drm_mm_hole_node_start(hole) + loop;
+		resv->size = hole->hole_size - loop;
+		resv->color = -1ul;
+		loop = 0;
+
+		if (!resv->size) {
+			kfree(resv);
+			continue;
+		}
+
+		pr_debug("Reserving hole [%llx + %llx]\n",
+			 resv->start, resv->size);
+
+		err = drm_mm_reserve_node(mm, resv);
+		if (err) {
+			pr_err("Failed to trim VMA manager, err=%d\n", err);
+			kfree(resv);
+			goto out_park;
+		}
 	}
+	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
+	mmap_offset_unlock(i915);
 
 	/* Just fits! */
 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
@@ -650,9 +675,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
 		goto out;
 	}
 
-	err = create_mmap_offset(obj);
-	if (err) {
+	mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+	if (IS_ERR(mmo)) {
 		pr_err("Unable to insert object into reclaimed hole\n");
+		err = PTR_ERR(mmo);
 		goto err_obj;
 	}
 
@@ -684,9 +710,15 @@ static int igt_mmap_offset_exhaustion(void *arg)
 
 out:
 	mmap_offset_lock(i915);
-	drm_mm_remove_node(&resv);
-	mmap_offset_unlock(i915);
 out_park:
+	drm_mm_for_each_node_safe(hole, next, mm) {
+		if (hole->color != -1ul)
+			continue;
+
+		drm_mm_remove_node(hole);
+		kfree(hole);
+	}
+	mmap_offset_unlock(i915);
 	restore_retire_worker(i915);
 	return err;
 err_obj:
@@ -694,12 +726,515 @@ static int igt_mmap_offset_exhaustion(void *arg)
 	goto out;
 }
 
+static int gtt_set(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+	void __iomem *map;
+	int err = 0;
+
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	intel_gt_pm_get(vma->vm->gt);
+	map = i915_vma_pin_iomap(vma);
+	i915_vma_unpin(vma);
+	if (IS_ERR(map)) {
+		err = PTR_ERR(map);
+		goto out;
+	}
+
+	memset_io(map, POISON_INUSE, obj->base.size);
+	i915_vma_unpin_iomap(vma);
+
+out:
+	intel_gt_pm_put(vma->vm->gt);
+	return err;
+}
+
+static int gtt_check(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+	void __iomem *map;
+	int err = 0;
+
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	intel_gt_pm_get(vma->vm->gt);
+	map = i915_vma_pin_iomap(vma);
+	i915_vma_unpin(vma);
+	if (IS_ERR(map)) {
+		err = PTR_ERR(map);
+		goto out;
+	}
+
+	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
+		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
+		       obj->mm.region->name);
+		err = -EINVAL;
+	}
+	i915_vma_unpin_iomap(vma);
+
+out:
+	intel_gt_pm_put(vma->vm->gt);
+	return err;
+}
+
+static int wc_set(struct drm_i915_gem_object *obj)
+{
+	void *vaddr;
+
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr))
+		return PTR_ERR(vaddr);
+
+	memset(vaddr, POISON_INUSE, obj->base.size);
+	i915_gem_object_flush_map(obj);
+	i915_gem_object_unpin_map(obj);
+
+	return 0;
+}
+
+static int wc_check(struct drm_i915_gem_object *obj)
+{
+	void *vaddr;
+	int err = 0;
+
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr))
+		return PTR_ERR(vaddr);
+
+	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
+		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
+		       obj->mm.region->name);
+		err = -EINVAL;
+	}
+	i915_gem_object_unpin_map(obj);
+
+	return err;
+}
+
+static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
+{
+	if (type == I915_MMAP_TYPE_GTT &&
+	    !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
+		return false;
+
+	if (type != I915_MMAP_TYPE_GTT &&
+	    !i915_gem_object_type_has(obj,
+				      I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+				      I915_GEM_OBJECT_HAS_IOMEM))
+		return false;
+
+	return true;
+}
+
+#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
+static int __igt_mmap(struct drm_i915_private *i915,
+		      struct drm_i915_gem_object *obj,
+		      enum i915_mmap_type type)
+{
+	struct i915_mmap_offset *mmo;
+	struct vm_area_struct *area;
+	unsigned long addr;
+	int err, i;
+
+	if (!can_mmap(obj, type))
+		return 0;
+
+	err = wc_set(obj);
+	if (err == -ENXIO)
+		err = gtt_set(obj);
+	if (err)
+		return err;
+
+	mmo = mmap_offset_attach(obj, type, NULL);
+	if (IS_ERR(mmo))
+		return PTR_ERR(mmo);
+
+	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+	if (IS_ERR_VALUE(addr))
+		return addr;
+
+	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
+
+	area = find_vma(current->mm, addr);
+	if (!area) {
+		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
+		       obj->mm.region->name);
+		err = -EINVAL;
+		goto out_unmap;
+	}
+
+	if (area->vm_private_data != mmo) {
+		pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
+		       obj->mm.region->name);
+		err = -EINVAL;
+		goto out_unmap;
+	}
+
+	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
+		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
+		u32 x;
+
+		if (get_user(x, ux)) {
+			pr_err("%s: Unable to read from mmap, offset:%zd\n",
+			       obj->mm.region->name, i * sizeof(x));
+			err = -EFAULT;
+			goto out_unmap;
+		}
+
+		if (x != expand32(POISON_INUSE)) {
+			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
+			       obj->mm.region->name,
+			       i * sizeof(x), x, expand32(POISON_INUSE));
+			err = -EINVAL;
+			goto out_unmap;
+		}
+
+		x = expand32(POISON_FREE);
+		if (put_user(x, ux)) {
+			pr_err("%s: Unable to write to mmap, offset:%zd\n",
+			       obj->mm.region->name, i * sizeof(x));
+			err = -EFAULT;
+			goto out_unmap;
+		}
+	}
+
+	if (type == I915_MMAP_TYPE_GTT)
+		intel_gt_flush_ggtt_writes(&i915->gt);
+
+	err = wc_check(obj);
+	if (err == -ENXIO)
+		err = gtt_check(obj);
+out_unmap:
+	vm_munmap(addr, obj->base.size);
+	return err;
+}
+
+static int igt_mmap(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_memory_region *mr;
+	enum intel_region_id id;
+
+	for_each_memory_region(mr, i915, id) {
+		unsigned long sizes[] = {
+			PAGE_SIZE,
+			mr->min_page_size,
+			SZ_4M,
+		};
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+			struct drm_i915_gem_object *obj;
+			int err;
+
+			obj = i915_gem_object_create_region(mr, sizes[i], 0);
+			if (obj == ERR_PTR(-ENODEV))
+				continue;
+
+			if (IS_ERR(obj))
+				return PTR_ERR(obj);
+
+			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
+			if (err == 0)
+				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
+
+			i915_gem_object_put(obj);
+			if (err)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
+static int __igt_mmap_gpu(struct drm_i915_private *i915,
+			  struct drm_i915_gem_object *obj,
+			  enum i915_mmap_type type)
+{
+	struct intel_engine_cs *engine;
+	struct i915_mmap_offset *mmo;
+	unsigned long addr;
+	u32 __user *ux;
+	u32 bbe;
+	int err;
+
+	/*
+	 * Verify that the mmap access into the backing store aligns with
+	 * that of the GPU, i.e. that mmap is indeed writing into the same
+	 * page as being read by the GPU.
+	 */
+
+	if (!can_mmap(obj, type))
+		return 0;
+
+	err = wc_set(obj);
+	if (err == -ENXIO)
+		err = gtt_set(obj);
+	if (err)
+		return err;
+
+	mmo = mmap_offset_attach(obj, type, NULL);
+	if (IS_ERR(mmo))
+		return PTR_ERR(mmo);
+
+	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+	if (IS_ERR_VALUE(addr))
+		return addr;
+
+	ux = u64_to_user_ptr((u64)addr);
+	bbe = MI_BATCH_BUFFER_END;
+	if (put_user(bbe, ux)) {
+		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
+		err = -EFAULT;
+		goto out_unmap;
+	}
+
+	if (type == I915_MMAP_TYPE_GTT)
+		intel_gt_flush_ggtt_writes(&i915->gt);
+
+	for_each_uabi_engine(engine, i915) {
+		struct i915_request *rq;
+		struct i915_vma *vma;
+
+		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
+		if (IS_ERR(vma)) {
+			err = PTR_ERR(vma);
+			goto out_unmap;
+		}
+
+		err = i915_vma_pin(vma, 0, 0, PIN_USER);
+		if (err)
+			goto out_unmap;
+
+		rq = i915_request_create(engine->kernel_context);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out_unpin;
+		}
+
+		i915_vma_lock(vma);
+		err = i915_request_await_object(rq, vma->obj, false);
+		if (err == 0)
+			err = i915_vma_move_to_active(vma, rq, 0);
+		i915_vma_unlock(vma);
+
+		err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+		i915_request_get(rq);
+		i915_request_add(rq);
+
+		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+			struct drm_printer p =
+				drm_info_printer(engine->i915->drm.dev);
+
+			pr_err("%s(%s, %s): Failed to execute batch\n",
+			       __func__, engine->name, obj->mm.region->name);
+			intel_engine_dump(engine, &p,
+					  "%s\n", engine->name);
+
+			intel_gt_set_wedged(engine->gt);
+			err = -EIO;
+		}
+		i915_request_put(rq);
+
+out_unpin:
+		i915_vma_unpin(vma);
+		if (err)
+			goto out_unmap;
+	}
+
+out_unmap:
+	vm_munmap(addr, obj->base.size);
+	return err;
+}
+
+static int igt_mmap_gpu(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_memory_region *mr;
+	enum intel_region_id id;
+
+	for_each_memory_region(mr, i915, id) {
+		struct drm_i915_gem_object *obj;
+		int err;
+
+		obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+		if (obj == ERR_PTR(-ENODEV))
+			continue;
+
+		if (IS_ERR(obj))
+			return PTR_ERR(obj);
+
+		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
+		if (err == 0)
+			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
+
+		i915_gem_object_put(obj);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
+{
+	if (!pte_present(*pte) || pte_none(*pte)) {
+		pr_err("missing PTE:%lx\n",
+		       (addr - (unsigned long)data) >> PAGE_SHIFT);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
+{
+	if (pte_present(*pte) && !pte_none(*pte)) {
+		pr_err("present PTE:%lx; expected to be revoked\n",
+		       (addr - (unsigned long)data) >> PAGE_SHIFT);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int check_present(unsigned long addr, unsigned long len)
+{
+	return apply_to_page_range(current->mm, addr, len,
+				   check_present_pte, (void *)addr);
+}
+
+static int check_absent(unsigned long addr, unsigned long len)
+{
+	return apply_to_page_range(current->mm, addr, len,
+				   check_absent_pte, (void *)addr);
+}
+
+static int prefault_range(u64 start, u64 len)
+{
+	const char __user *addr, *end;
+	char __maybe_unused c;
+	int err;
+
+	addr = u64_to_user_ptr(start);
+	end = addr + len;
+
+	for (; addr < end; addr += PAGE_SIZE) {
+		err = __get_user(c, addr);
+		if (err)
+			return err;
+	}
+
+	return __get_user(c, end - 1);
+}
+
+static int __igt_mmap_revoke(struct drm_i915_private *i915,
+			     struct drm_i915_gem_object *obj,
+			     enum i915_mmap_type type)
+{
+	struct i915_mmap_offset *mmo;
+	unsigned long addr;
+	int err;
+
+	if (!can_mmap(obj, type))
+		return 0;
+
+	mmo = mmap_offset_attach(obj, type, NULL);
+	if (IS_ERR(mmo))
+		return PTR_ERR(mmo);
+
+	addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+	if (IS_ERR_VALUE(addr))
+		return addr;
+
+	err = prefault_range(addr, obj->base.size);
+	if (err)
+		goto out_unmap;
+
+	GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
+		   !atomic_read(&obj->bind_count));
+
+	err = check_present(addr, obj->base.size);
+	if (err) {
+		pr_err("%s: was not present\n", obj->mm.region->name);
+		goto out_unmap;
+	}
+
+	/*
+	 * After unbinding the object from the GGTT, its address may be reused
+	 * for other objects. Ergo we have to revoke the previous mmap PTE
+	 * access as it no longer points to the same object.
+	 */
+	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+	if (err) {
+		pr_err("Failed to unbind object!\n");
+		goto out_unmap;
+	}
+	GEM_BUG_ON(atomic_read(&obj->bind_count));
+
+	if (type != I915_MMAP_TYPE_GTT) {
+		__i915_gem_object_put_pages(obj);
+		if (i915_gem_object_has_pages(obj)) {
+			pr_err("Failed to put-pages object!\n");
+			err = -EINVAL;
+			goto out_unmap;
+		}
+	}
+
+	err = check_absent(addr, obj->base.size);
+	if (err) {
+		pr_err("%s: was not absent\n", obj->mm.region->name);
+		goto out_unmap;
+	}
+
+out_unmap:
+	vm_munmap(addr, obj->base.size);
+	return err;
+}
+
+static int igt_mmap_revoke(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_memory_region *mr;
+	enum intel_region_id id;
+
+	for_each_memory_region(mr, i915, id) {
+		struct drm_i915_gem_object *obj;
+		int err;
+
+		obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+		if (obj == ERR_PTR(-ENODEV))
+			continue;
+
+		if (IS_ERR(obj))
+			return PTR_ERR(obj);
+
+		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
+		if (err == 0)
+			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
+
+		i915_gem_object_put(obj);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_partial_tiling),
 		SUBTEST(igt_smoke_tiling),
 		SUBTEST(igt_mmap_offset_exhaustion),
+		SUBTEST(igt_mmap),
+		SUBTEST(igt_mmap_revoke),
+		SUBTEST(igt_mmap_gpu),
 	};
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index e8132ac..62077fe 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -41,6 +41,7 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj)
 		if (!engine)
 			return 0;
 
+		intel_engine_pm_get(engine);
 		for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
 			struct intel_context *ce = engine->kernel_context;
 			ktime_t t0, t1;
@@ -49,17 +50,20 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj)
 
 			err = i915_gem_object_fill_blt(obj, ce, 0);
 			if (err)
-				return err;
+				break;
 
 			err = i915_gem_object_wait(obj,
 						   I915_WAIT_ALL,
 						   MAX_SCHEDULE_TIMEOUT);
 			if (err)
-				return err;
+				break;
 
 			t1 = ktime_get();
 			t[pass] = ktime_sub(t1, t0);
 		}
+		intel_engine_pm_put(engine);
+		if (err)
+			return err;
 
 		sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
 		pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
@@ -109,6 +113,7 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src,
 		struct intel_engine_cs *engine;
 		ktime_t t[5];
 		int pass;
+		int err = 0;
 
 		engine = intel_engine_lookup_user(i915,
 						  I915_ENGINE_CLASS_COPY,
@@ -116,26 +121,29 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src,
 		if (!engine)
 			return 0;
 
+		intel_engine_pm_get(engine);
 		for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
 			struct intel_context *ce = engine->kernel_context;
 			ktime_t t0, t1;
-			int err;
 
 			t0 = ktime_get();
 
 			err = i915_gem_object_copy_blt(src, dst, ce);
 			if (err)
-				return err;
+				break;
 
 			err = i915_gem_object_wait(dst,
 						   I915_WAIT_ALL,
 						   MAX_SCHEDULE_TIMEOUT);
 			if (err)
-				return err;
+				break;
 
 			t1 = ktime_get();
 			t[pass] = ktime_sub(t1, t0);
 		}
+		intel_engine_pm_put(engine);
+		if (err)
+			return err;
 
 		sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
 		pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
@@ -186,6 +194,8 @@ static int perf_copy_blt(void *arg)
 
 struct igt_thread_arg {
 	struct drm_i915_private *i915;
+	struct i915_gem_context *ctx;
+	struct file *file;
 	struct rnd_state prng;
 	unsigned int n_cpus;
 };
@@ -198,24 +208,20 @@ static int igt_fill_blt_thread(void *arg)
 	struct drm_i915_gem_object *obj;
 	struct i915_gem_context *ctx;
 	struct intel_context *ce;
-	struct drm_file *file;
 	unsigned int prio;
 	IGT_TIMEOUT(end);
 	int err;
 
-	file = mock_file(i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
+	ctx = thread->ctx;
+	if (!ctx) {
+		ctx = live_context(i915, thread->file);
+		if (IS_ERR(ctx))
+			return PTR_ERR(ctx);
 
-	ctx = live_context(i915, file);
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
-		goto out_file;
+		prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+		ctx->sched.priority = I915_USER_PRIORITY(prio);
 	}
 
-	prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
-	ctx->sched.priority = I915_USER_PRIORITY(prio);
-
 	ce = i915_gem_context_get_engine(ctx, BCS0);
 	GEM_BUG_ON(IS_ERR(ce));
 
@@ -300,8 +306,6 @@ static int igt_fill_blt_thread(void *arg)
 		err = 0;
 
 	intel_context_put(ce);
-out_file:
-	mock_file_free(i915, file);
 	return err;
 }
 
@@ -313,24 +317,20 @@ static int igt_copy_blt_thread(void *arg)
 	struct drm_i915_gem_object *src, *dst;
 	struct i915_gem_context *ctx;
 	struct intel_context *ce;
-	struct drm_file *file;
 	unsigned int prio;
 	IGT_TIMEOUT(end);
 	int err;
 
-	file = mock_file(i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
+	ctx = thread->ctx;
+	if (!ctx) {
+		ctx = live_context(i915, thread->file);
+		if (IS_ERR(ctx))
+			return PTR_ERR(ctx);
 
-	ctx = live_context(i915, file);
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
-		goto out_file;
+		prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+		ctx->sched.priority = I915_USER_PRIORITY(prio);
 	}
 
-	prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
-	ctx->sched.priority = I915_USER_PRIORITY(prio);
-
 	ce = i915_gem_context_get_engine(ctx, BCS0);
 	GEM_BUG_ON(IS_ERR(ce));
 
@@ -431,19 +431,18 @@ static int igt_copy_blt_thread(void *arg)
 		err = 0;
 
 	intel_context_put(ce);
-out_file:
-	mock_file_free(i915, file);
 	return err;
 }
 
 static int igt_threaded_blt(struct drm_i915_private *i915,
-			    int (*blt_fn)(void *arg))
+			    int (*blt_fn)(void *arg),
+			    unsigned int flags)
+#define SINGLE_CTX BIT(0)
 {
 	struct igt_thread_arg *thread;
 	struct task_struct **tsk;
+	unsigned int n_cpus, i;
 	I915_RND_STATE(prng);
-	unsigned int n_cpus;
-	unsigned int i;
 	int err = 0;
 
 	n_cpus = num_online_cpus() + 1;
@@ -453,13 +452,27 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
 		return 0;
 
 	thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL);
-	if (!thread) {
-		kfree(tsk);
-		return 0;
+	if (!thread)
+		goto out_tsk;
+
+	thread[0].file = mock_file(i915);
+	if (IS_ERR(thread[0].file)) {
+		err = PTR_ERR(thread[0].file);
+		goto out_thread;
+	}
+
+	if (flags & SINGLE_CTX) {
+		thread[0].ctx = live_context(i915, thread[0].file);
+		if (IS_ERR(thread[0].ctx)) {
+			err = PTR_ERR(thread[0].ctx);
+			goto out_file;
+		}
 	}
 
 	for (i = 0; i < n_cpus; ++i) {
 		thread[i].i915 = i915;
+		thread[i].file = thread[0].file;
+		thread[i].ctx = thread[0].ctx;
 		thread[i].n_cpus = n_cpus;
 		thread[i].prng =
 			I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
@@ -488,29 +501,42 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
 		put_task_struct(tsk[i]);
 	}
 
-	kfree(tsk);
+out_file:
+	fput(thread[0].file);
+out_thread:
 	kfree(thread);
-
+out_tsk:
+	kfree(tsk);
 	return err;
 }
 
 static int igt_fill_blt(void *arg)
 {
-	return igt_threaded_blt(arg, igt_fill_blt_thread);
+	return igt_threaded_blt(arg, igt_fill_blt_thread, 0);
+}
+
+static int igt_fill_blt_ctx0(void *arg)
+{
+	return igt_threaded_blt(arg, igt_fill_blt_thread, SINGLE_CTX);
 }
 
 static int igt_copy_blt(void *arg)
 {
-	return igt_threaded_blt(arg, igt_copy_blt_thread);
+	return igt_threaded_blt(arg, igt_copy_blt_thread, 0);
+}
+
+static int igt_copy_blt_ctx0(void *arg)
+{
+	return igt_threaded_blt(arg, igt_copy_blt_thread, SINGLE_CTX);
 }
 
 int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
-		SUBTEST(perf_fill_blt),
-		SUBTEST(perf_copy_blt),
 		SUBTEST(igt_fill_blt),
+		SUBTEST(igt_fill_blt_ctx0),
 		SUBTEST(igt_copy_blt),
+		SUBTEST(igt_copy_blt_ctx0),
 	};
 
 	if (intel_gt_is_wedged(&i915->gt))
@@ -521,3 +547,16 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
 
 	return i915_live_subtests(tests, i915);
 }
+
+int i915_gem_object_blt_perf_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(perf_fill_blt),
+		SUBTEST(perf_copy_blt),
+	};
+
+	if (intel_gt_is_wedged(&i915->gt))
+		return 0;
+
+	return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 29b8984..384143aa 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -5,6 +5,7 @@
  */
 
 #include "mock_context.h"
+#include "selftests/mock_drm.h"
 #include "selftests/mock_gtt.h"
 
 struct i915_gem_context *
@@ -36,9 +37,7 @@ mock_context(struct drm_i915_private *i915,
 	if (name) {
 		struct i915_ppgtt *ppgtt;
 
-		ctx->name = kstrdup(name, GFP_KERNEL);
-		if (!ctx->name)
-			goto err_put;
+		strncpy(ctx->name, name, sizeof(ctx->name));
 
 		ppgtt = mock_ppgtt(i915, name);
 		if (!ppgtt)
@@ -74,16 +73,17 @@ void mock_init_contexts(struct drm_i915_private *i915)
 }
 
 struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file)
+live_context(struct drm_i915_private *i915, struct file *file)
 {
 	struct i915_gem_context *ctx;
 	int err;
+	u32 id;
 
 	ctx = i915_gem_create_context(i915, 0);
 	if (IS_ERR(ctx))
 		return ctx;
 
-	err = gem_context_register(ctx, file->driver_priv);
+	err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
 	if (err < 0)
 		goto err_ctx;
 
@@ -97,7 +97,16 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
 struct i915_gem_context *
 kernel_context(struct drm_i915_private *i915)
 {
-	return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
+	struct i915_gem_context *ctx;
+
+	ctx = i915_gem_create_context(i915, 0);
+	if (IS_ERR(ctx))
+		return ctx;
+
+	i915_gem_context_clear_bannable(ctx);
+	i915_gem_context_set_persistence(ctx);
+
+	return ctx;
 }
 
 void kernel_context_close(struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
index 0b92665..fb83d2f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -7,6 +7,9 @@
 #ifndef __MOCK_CONTEXT_H
 #define __MOCK_CONTEXT_H
 
+struct file;
+struct drm_i915_private;
+
 void mock_init_contexts(struct drm_i915_private *i915);
 
 struct i915_gem_context *
@@ -16,7 +19,7 @@ mock_context(struct drm_i915_private *i915,
 void mock_context_close(struct i915_gem_context *ctx);
 
 struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file);
+live_context(struct drm_i915_private *i915, struct file *file);
 
 struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
 void kernel_context_close(struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index b9e059d..9272bef 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -76,20 +76,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 	vm_unmap_ram(vaddr, mock->npages);
 }
 
-static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
-	struct mock_dmabuf *mock = to_mock(dma_buf);
-
-	return kmap(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-	struct mock_dmabuf *mock = to_mock(dma_buf);
-
-	return kunmap(mock->pages[page_num]);
-}
-
 static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 {
 	return -ENODEV;
@@ -99,8 +85,6 @@ static const struct dma_buf_ops mock_dmabuf_ops =  {
 	.map_dma_buf = mock_map_dma_buf,
 	.unmap_dma_buf = mock_unmap_dma_buf,
 	.release = mock_dmabuf_release,
-	.map = mock_dmabuf_kmap,
-	.unmap = mock_dmabuf_kunmap,
 	.mmap = mock_dmabuf_mmap,
 	.vmap = mock_dmabuf_vmap,
 	.vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
index f0f8bbd..22818bb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
@@ -14,7 +14,7 @@ struct mock_dmabuf {
 	struct page *pages[];
 };
 
-static struct mock_dmabuf *to_mock(struct dma_buf *buf)
+static inline struct mock_dmabuf *to_mock(struct dma_buf *buf)
 {
 	return buf->priv;
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
index 370360b..688511a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
@@ -7,6 +7,8 @@
 #ifndef __MOCK_GEM_OBJECT_H__
 #define __MOCK_GEM_OBJECT_H__
 
+#include "gem/i915_gem_object_types.h"
+
 struct mock_object {
 	struct drm_i915_gem_object base;
 };
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
deleted file mode 100644
index 7e73aa5..0000000
--- a/drivers/gpu/drm/i915/gt/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c
new file mode 100644
index 0000000..6a5e9ab
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "debugfs_engines.h"
+#include "debugfs_gt.h"
+#include "i915_drv.h" /* for_each_engine! */
+#include "intel_engine.h"
+
+static int engines_show(struct seq_file *m, void *data)
+{
+	struct intel_gt *gt = m->private;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct drm_printer p;
+
+	p = drm_seq_file_printer(m);
+	for_each_engine(engine, gt, id)
+		intel_engine_dump(engine, &p, "%s\n", engine->name);
+
+	return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(engines);
+
+void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
+{
+	static const struct debugfs_gt_file files[] = {
+		{ "engines", &engines_fops },
+	};
+
+	debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.h b/drivers/gpu/drm/i915/gt/debugfs_engines.h
new file mode 100644
index 0000000..f69257e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_ENGINES_H
+#define DEBUGFS_ENGINES_H
+
+struct intel_gt;
+struct dentry;
+
+void debugfs_engines_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* DEBUGFS_ENGINES_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
new file mode 100644
index 0000000..75255aa
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "debugfs_engines.h"
+#include "debugfs_gt.h"
+#include "debugfs_gt_pm.h"
+#include "i915_drv.h"
+
+void debugfs_gt_register(struct intel_gt *gt)
+{
+	struct dentry *root;
+
+	if (!gt->i915->drm.primary->debugfs_root)
+		return;
+
+	root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
+	if (IS_ERR(root))
+		return;
+
+	debugfs_engines_register(gt, root);
+	debugfs_gt_pm_register(gt, root);
+}
+
+void debugfs_gt_register_files(struct intel_gt *gt,
+			       struct dentry *root,
+			       const struct debugfs_gt_file *files,
+			       unsigned long count)
+{
+	while (count--) {
+		if (!files->eval || files->eval(gt))
+			debugfs_create_file(files->name,
+					    0444, root, gt,
+					    files->fops);
+
+		files++;
+	}
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h
new file mode 100644
index 0000000..4ea0f06
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GT_H
+#define DEBUGFS_GT_H
+
+#include <linux/file.h>
+
+struct intel_gt;
+
+#define DEFINE_GT_DEBUGFS_ATTRIBUTE(__name)				\
+	static int __name ## _open(struct inode *inode, struct file *file) \
+{									\
+	return single_open(file, __name ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __name ## _fops = {			\
+	.owner = THIS_MODULE,						\
+	.open = __name ## _open,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+	.release = single_release,					\
+}
+
+void debugfs_gt_register(struct intel_gt *gt);
+
+struct debugfs_gt_file {
+	const char *name;
+	const struct file_operations *fops;
+	bool (*eval)(const struct intel_gt *gt);
+};
+
+void debugfs_gt_register_files(struct intel_gt *gt,
+			       struct dentry *root,
+			       const struct debugfs_gt_file *files,
+			       unsigned long count);
+
+#endif /* DEBUGFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
new file mode 100644
index 0000000..059c9e5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/seq_file.h>
+
+#include "debugfs_gt.h"
+#include "debugfs_gt_pm.h"
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+#include "intel_runtime_pm.h"
+#include "intel_sideband.h"
+#include "intel_uncore.h"
+
+static int fw_domains_show(struct seq_file *m, void *data)
+{
+	struct intel_gt *gt = m->private;
+	struct intel_uncore *uncore = gt->uncore;
+	struct intel_uncore_forcewake_domain *fw_domain;
+	unsigned int tmp;
+
+	seq_printf(m, "user.bypass_count = %u\n",
+		   uncore->user_forcewake_count);
+
+	for_each_fw_domain(fw_domain, uncore, tmp)
+		seq_printf(m, "%s.wake_count = %u\n",
+			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
+			   READ_ONCE(fw_domain->wake_count));
+
+	return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains);
+
+static void print_rc6_res(struct seq_file *m,
+			  const char *title,
+			  const i915_reg_t reg)
+{
+	struct intel_gt *gt = m->private;
+	intel_wakeref_t wakeref;
+
+	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+		seq_printf(m, "%s %u (%llu us)\n", title,
+			   intel_uncore_read(gt->uncore, reg),
+			   intel_rc6_residency_us(&gt->rc6, reg));
+}
+
+static int vlv_drpc(struct seq_file *m)
+{
+	struct intel_gt *gt = m->private;
+	struct intel_uncore *uncore = gt->uncore;
+	u32 rcctl1, pw_status;
+
+	pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
+	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+
+	seq_printf(m, "RC6 Enabled: %s\n",
+		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+					GEN6_RC_CTL_EI_MODE(1))));
+	seq_printf(m, "Render Power Well: %s\n",
+		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+	seq_printf(m, "Media Power Well: %s\n",
+		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
+	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
+
+	return fw_domains_show(m, NULL);
+}
+
+static int gen6_drpc(struct seq_file *m)
+{
+	struct intel_gt *gt = m->private;
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	u32 gt_core_status, rcctl1, rc6vids = 0;
+	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
+
+	gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
+
+	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+	if (INTEL_GEN(i915) >= 9) {
+		gen9_powergate_enable =
+			intel_uncore_read(uncore, GEN9_PG_ENABLE);
+		gen9_powergate_status =
+			intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
+	}
+
+	if (INTEL_GEN(i915) <= 7)
+		sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+				       &rc6vids, NULL);
+
+	seq_printf(m, "RC1e Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+	seq_printf(m, "RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+	if (INTEL_GEN(i915) >= 9) {
+		seq_printf(m, "Render Well Gating Enabled: %s\n",
+			   yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+		seq_printf(m, "Media Well Gating Enabled: %s\n",
+			   yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+	}
+	seq_printf(m, "Deep RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+	seq_printf(m, "Deepest RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+	seq_puts(m, "Current RC state: ");
+	switch (gt_core_status & GEN6_RCn_MASK) {
+	case GEN6_RC0:
+		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+			seq_puts(m, "Core Power Down\n");
+		else
+			seq_puts(m, "on\n");
+		break;
+	case GEN6_RC3:
+		seq_puts(m, "RC3\n");
+		break;
+	case GEN6_RC6:
+		seq_puts(m, "RC6\n");
+		break;
+	case GEN6_RC7:
+		seq_puts(m, "RC7\n");
+		break;
+	default:
+		seq_puts(m, "Unknown\n");
+		break;
+	}
+
+	seq_printf(m, "Core Power Down: %s\n",
+		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+	if (INTEL_GEN(i915) >= 9) {
+		seq_printf(m, "Render Power Well: %s\n",
+			   (gen9_powergate_status &
+			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
+		seq_printf(m, "Media Power Well: %s\n",
+			   (gen9_powergate_status &
+			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
+	}
+
+	/* Not exactly sure what this is */
+	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
+		      GEN6_GT_GFX_RC6_LOCKED);
+	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
+	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
+	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
+
+	if (INTEL_GEN(i915) <= 7) {
+		seq_printf(m, "RC6   voltage: %dmV\n",
+			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+		seq_printf(m, "RC6+  voltage: %dmV\n",
+			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+		seq_printf(m, "RC6++ voltage: %dmV\n",
+			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+	}
+
+	return fw_domains_show(m, NULL);
+}
+
+static int ilk_drpc(struct seq_file *m)
+{
+	struct intel_gt *gt = m->private;
+	struct intel_uncore *uncore = gt->uncore;
+	u32 rgvmodectl, rstdbyctl;
+	u16 crstandvid;
+
+	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
+	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
+
+	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
+	seq_printf(m, "Boost freq: %d\n",
+		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+		   MEMMODE_BOOST_FREQ_SHIFT);
+	seq_printf(m, "HW control enabled: %s\n",
+		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
+	seq_printf(m, "SW control enabled: %s\n",
+		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
+	seq_printf(m, "Gated voltage change: %s\n",
+		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
+	seq_printf(m, "Starting frequency: P%d\n",
+		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+	seq_printf(m, "Max P-state: P%d\n",
+		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+	seq_printf(m, "Render standby enabled: %s\n",
+		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
+	seq_puts(m, "Current RS state: ");
+	switch (rstdbyctl & RSX_STATUS_MASK) {
+	case RSX_STATUS_ON:
+		seq_puts(m, "on\n");
+		break;
+	case RSX_STATUS_RC1:
+		seq_puts(m, "RC1\n");
+		break;
+	case RSX_STATUS_RC1E:
+		seq_puts(m, "RC1E\n");
+		break;
+	case RSX_STATUS_RS1:
+		seq_puts(m, "RS1\n");
+		break;
+	case RSX_STATUS_RS2:
+		seq_puts(m, "RS2 (RC6)\n");
+		break;
+	case RSX_STATUS_RS3:
+		seq_puts(m, "RC3 (RC6+)\n");
+		break;
+	default:
+		seq_puts(m, "unknown\n");
+		break;
+	}
+
+	return 0;
+}
+
+static int drpc_show(struct seq_file *m, void *unused)
+{
+	struct intel_gt *gt = m->private;
+	struct drm_i915_private *i915 = gt->i915;
+	intel_wakeref_t wakeref;
+	int err = -ENODEV;
+
+	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+			err = vlv_drpc(m);
+		else if (INTEL_GEN(i915) >= 6)
+			err = gen6_drpc(m);
+		else
+			err = ilk_drpc(m);
+	}
+
+	return err;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc);
+
+static int frequency_show(struct seq_file *m, void *unused)
+{
+	struct intel_gt *gt = m->private;
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	struct intel_rps *rps = &gt->rps;
+	intel_wakeref_t wakeref;
+
+	wakeref = intel_runtime_pm_get(uncore->rpm);
+
+	if (IS_GEN(i915, 5)) {
+		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
+
+		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+			   MEMSTAT_VID_SHIFT);
+		seq_printf(m, "Current P-state: %d\n",
+			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		u32 rpmodectl, freq_sts;
+
+		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+		seq_printf(m, "Video Turbo Mode: %s\n",
+			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+		seq_printf(m, "HW control enabled: %s\n",
+			   yesno(rpmodectl & GEN6_RP_ENABLE));
+		seq_printf(m, "SW control enabled: %s\n",
+			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+				  GEN6_RP_MEDIA_SW_MODE));
+
+		vlv_punit_get(i915);
+		freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+		vlv_punit_put(i915);
+
+		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+		seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
+
+		seq_printf(m, "actual GPU freq: %d MHz\n",
+			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
+
+		seq_printf(m, "current GPU freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->cur_freq));
+
+		seq_printf(m, "max GPU freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->max_freq));
+
+		seq_printf(m, "min GPU freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->min_freq));
+
+		seq_printf(m, "idle GPU freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->idle_freq));
+
+		seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+			   intel_gpu_freq(rps, rps->efficient_freq));
+	} else if (INTEL_GEN(i915) >= 6) {
+		u32 rp_state_limits;
+		u32 gt_perf_status;
+		u32 rp_state_cap;
+		u32 rpmodectl, rpinclimit, rpdeclimit;
+		u32 rpstat, cagf, reqf;
+		u32 rpupei, rpcurup, rpprevup;
+		u32 rpdownei, rpcurdown, rpprevdown;
+		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
+		int max_freq;
+
+		rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
+		if (IS_GEN9_LP(i915)) {
+			rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+			gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
+		} else {
+			rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+			gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
+		}
+
+		/* RPSTAT1 is in the GT power well */
+		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+		reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+		if (INTEL_GEN(i915) >= 9) {
+			reqf >>= 23;
+		} else {
+			reqf &= ~GEN6_TURBO_DISABLE;
+			if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+				reqf >>= 24;
+			else
+				reqf >>= 25;
+		}
+		reqf = intel_gpu_freq(rps, reqf);
+
+		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+		rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+		rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
+		rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
+		rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+		rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+		rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+		rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+		rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+		rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+		cagf = intel_rps_read_actual_frequency(rps);
+
+		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+		if (INTEL_GEN(i915) >= 11) {
+			pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+			pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
+			/*
+			 * The equivalent to the PM ISR & IIR cannot be read
+			 * without affecting the current state of the system
+			 */
+			pm_isr = 0;
+			pm_iir = 0;
+		} else if (INTEL_GEN(i915) >= 8) {
+			pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
+			pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
+			pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
+			pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
+		} else {
+			pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+			pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+			pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
+			pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
+		}
+		pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
+
+		seq_printf(m, "Video Turbo Mode: %s\n",
+			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+		seq_printf(m, "HW control enabled: %s\n",
+			   yesno(rpmodectl & GEN6_RP_ENABLE));
+		seq_printf(m, "SW control enabled: %s\n",
+			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+				  GEN6_RP_MEDIA_SW_MODE));
+
+		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+			   pm_ier, pm_imr, pm_mask);
+		if (INTEL_GEN(i915) <= 10)
+			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+				   pm_isr, pm_iir);
+		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
+			   rps->pm_intrmsk_mbz);
+		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+		seq_printf(m, "Render p-state ratio: %d\n",
+			   (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+		seq_printf(m, "Render p-state VID: %d\n",
+			   gt_perf_status & 0xff);
+		seq_printf(m, "Render p-state limit: %d\n",
+			   rp_state_limits & 0xff);
+		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
+		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
+		seq_printf(m, "CAGF: %dMHz\n", cagf);
+		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
+			   rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
+		seq_printf(m, "RP CUR UP: %d (%dus)\n",
+			   rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
+		seq_printf(m, "RP PREV UP: %d (%dus)\n",
+			   rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
+		seq_printf(m, "Up threshold: %d%%\n",
+			   rps->power.up_threshold);
+
+		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
+			   rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
+		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
+			   rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
+		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
+			   rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
+		seq_printf(m, "Down threshold: %d%%\n",
+			   rps->power.down_threshold);
+
+		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
+			    rp_state_cap >> 16) & 0xff;
+		max_freq *= (IS_GEN9_BC(i915) ||
+			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+			   intel_gpu_freq(rps, max_freq));
+
+		max_freq = (rp_state_cap & 0xff00) >> 8;
+		max_freq *= (IS_GEN9_BC(i915) ||
+			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+			   intel_gpu_freq(rps, max_freq));
+
+		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
+			    rp_state_cap >> 0) & 0xff;
+		max_freq *= (IS_GEN9_BC(i915) ||
+			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+			   intel_gpu_freq(rps, max_freq));
+		seq_printf(m, "Max overclocked frequency: %dMHz\n",
+			   intel_gpu_freq(rps, rps->max_freq));
+
+		seq_printf(m, "Current freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->cur_freq));
+		seq_printf(m, "Actual freq: %d MHz\n", cagf);
+		seq_printf(m, "Idle freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->idle_freq));
+		seq_printf(m, "Min freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->min_freq));
+		seq_printf(m, "Boost freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->boost_freq));
+		seq_printf(m, "Max freq: %d MHz\n",
+			   intel_gpu_freq(rps, rps->max_freq));
+		seq_printf(m,
+			   "efficient (RPe) frequency: %d MHz\n",
+			   intel_gpu_freq(rps, rps->efficient_freq));
+	} else {
+		seq_puts(m, "no P-state info available\n");
+	}
+
+	seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
+	seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+	seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+
+	intel_runtime_pm_put(uncore->rpm, wakeref);
+
+	return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency);
+
+static int llc_show(struct seq_file *m, void *data)
+{
+	struct intel_gt *gt = m->private;
+	struct drm_i915_private *i915 = gt->i915;
+	const bool edram = INTEL_GEN(i915) > 8;
+	struct intel_rps *rps = &gt->rps;
+	unsigned int max_gpu_freq, min_gpu_freq;
+	intel_wakeref_t wakeref;
+	int gpu_freq, ia_freq;
+
+	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915)));
+	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
+		   i915->edram_size_mb);
+
+	min_gpu_freq = rps->min_freq;
+	max_gpu_freq = rps->max_freq;
+	if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq /= GEN9_FREQ_SCALER;
+		max_gpu_freq /= GEN9_FREQ_SCALER;
+	}
+
+	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+
+	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+		ia_freq = gpu_freq;
+		sandybridge_pcode_read(i915,
+				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
+				       &ia_freq, NULL);
+		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+			   intel_gpu_freq(rps,
+					  (gpu_freq *
+					   (IS_GEN9_BC(i915) ||
+					    INTEL_GEN(i915) >= 10 ?
+					    GEN9_FREQ_SCALER : 1))),
+			   ((ia_freq >> 0) & 0xff) * 100,
+			   ((ia_freq >> 8) & 0xff) * 100);
+	}
+	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+	return 0;
+}
+
+static bool llc_eval(const struct intel_gt *gt)
+{
+	return HAS_LLC(gt->i915);
+}
+
+DEFINE_GT_DEBUGFS_ATTRIBUTE(llc);
+
+static const char *rps_power_to_str(unsigned int power)
+{
+	static const char * const strings[] = {
+		[LOW_POWER] = "low power",
+		[BETWEEN] = "mixed",
+		[HIGH_POWER] = "high power",
+	};
+
+	if (power >= ARRAY_SIZE(strings) || !strings[power])
+		return "unknown";
+
+	return strings[power];
+}
+
+static int rps_boost_show(struct seq_file *m, void *data)
+{
+	struct intel_gt *gt = m->private;
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_rps *rps = &gt->rps;
+
+	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+	seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+	seq_printf(m, "Boosts outstanding? %d\n",
+		   atomic_read(&rps->num_waiters));
+	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
+	seq_printf(m, "Frequency requested %d, actual %d\n",
+		   intel_gpu_freq(rps, rps->cur_freq),
+		   intel_rps_read_actual_frequency(rps));
+	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
+		   intel_gpu_freq(rps, rps->min_freq),
+		   intel_gpu_freq(rps, rps->min_freq_softlimit),
+		   intel_gpu_freq(rps, rps->max_freq_softlimit),
+		   intel_gpu_freq(rps, rps->max_freq));
+	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
+		   intel_gpu_freq(rps, rps->idle_freq),
+		   intel_gpu_freq(rps, rps->efficient_freq),
+		   intel_gpu_freq(rps, rps->boost_freq));
+
+	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+
+	if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
+		struct intel_uncore *uncore = gt->uncore;
+		u32 rpup, rpupei;
+		u32 rpdown, rpdownei;
+
+		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+		rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
+		rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
+		rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
+		rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
+		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
+			   rps_power_to_str(rps->power.mode));
+		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
+			   rpup && rpupei ? 100 * rpup / rpupei : 0,
+			   rps->power.up_threshold);
+		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
+			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
+			   rps->power.down_threshold);
+	} else {
+		seq_puts(m, "\nRPS Autotuning inactive\n");
+	}
+
+	return 0;
+}
+
+static bool rps_eval(const struct intel_gt *gt)
+{
+	return HAS_RPS(gt->i915);
+}
+
+DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost);
+
+void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
+{
+	static const struct debugfs_gt_file files[] = {
+		{ "drpc", &drpc_fops, NULL },
+		{ "frequency", &frequency_fops, NULL },
+		{ "forcewake", &fw_domains_fops, NULL },
+		{ "llc", &llc_fops, llc_eval },
+		{ "rps_boost", &rps_boost_fops, rps_eval },
+	};
+
+	debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
new file mode 100644
index 0000000..4cf5f5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GT_PM_H
+#define DEBUGFS_GT_PM_H
+
+struct intel_gt;
+struct dentry;
+
+void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* DEBUGFS_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
new file mode 100644
index 0000000..f4fec7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gen6_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+				  const unsigned int pde,
+				  const struct i915_page_table *pt)
+{
+	/* Caller needs to make sure the write completes if necessary */
+	iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+		  ppgtt->pd_addr + pde);
+}
+
+void gen7_ppgtt_enable(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	u32 ecochk;
+
+	intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
+
+	ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+	if (IS_HASWELL(i915)) {
+		ecochk |= ECOCHK_PPGTT_WB_HSW;
+	} else {
+		ecochk |= ECOCHK_PPGTT_LLC_IVB;
+		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+	}
+	intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
+
+	for_each_engine(engine, gt, id) {
+		/* GFX_MODE is per-ring on gen7+ */
+		ENGINE_WRITE(engine,
+			     RING_MODE_GEN7,
+			     _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+	}
+}
+
+void gen6_ppgtt_enable(struct intel_gt *gt)
+{
+	struct intel_uncore *uncore = gt->uncore;
+
+	intel_uncore_rmw(uncore,
+			 GAC_ECO_BITS,
+			 0,
+			 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
+
+	intel_uncore_rmw(uncore,
+			 GAB_CTL,
+			 0,
+			 GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+	intel_uncore_rmw(uncore,
+			 GAM_ECOCHK,
+			 0,
+			 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+	if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
+		intel_uncore_write(uncore,
+				   GFX_MODE,
+				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
+				   u64 start, u64 length)
+{
+	struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+	const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+	const gen6_pte_t scratch_pte = vm->scratch[0].encode;
+	unsigned int pde = first_entry / GEN6_PTES;
+	unsigned int pte = first_entry % GEN6_PTES;
+	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+
+	while (num_entries) {
+		struct i915_page_table * const pt =
+			i915_pt_entry(ppgtt->base.pd, pde++);
+		const unsigned int count = min(num_entries, GEN6_PTES - pte);
+		gen6_pte_t *vaddr;
+
+		GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
+
+		num_entries -= count;
+
+		GEM_BUG_ON(count > atomic_read(&pt->used));
+		if (!atomic_sub_return(count, &pt->used))
+			ppgtt->scan_for_unused_pt = true;
+
+		/*
+		 * Note that the hw doesn't support removing PDE on the fly
+		 * (they are cached inside the context with no means to
+		 * invalidate the cache), so we can only reset the PTE
+		 * entries back to scratch.
+		 */
+
+		vaddr = kmap_atomic_px(pt);
+		memset32(vaddr + pte, scratch_pte, count);
+		kunmap_atomic(vaddr);
+
+		pte = 0;
+	}
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
+				      struct i915_vma *vma,
+				      enum i915_cache_level cache_level,
+				      u32 flags)
+{
+	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+	struct i915_page_directory * const pd = ppgtt->pd;
+	unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
+	unsigned int act_pt = first_entry / GEN6_PTES;
+	unsigned int act_pte = first_entry % GEN6_PTES;
+	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
+	struct sgt_dma iter = sgt_dma(vma);
+	gen6_pte_t *vaddr;
+
+	GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
+
+	vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
+	do {
+		GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
+		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
+
+		iter.dma += I915_GTT_PAGE_SIZE;
+		if (iter.dma == iter.max) {
+			iter.sg = __sg_next(iter.sg);
+			if (!iter.sg)
+				break;
+
+			iter.dma = sg_dma_address(iter.sg);
+			iter.max = iter.dma + iter.sg->length;
+		}
+
+		if (++act_pte == GEN6_PTES) {
+			kunmap_atomic(vaddr);
+			vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
+			act_pte = 0;
+		}
+	} while (1);
+	kunmap_atomic(vaddr);
+
+	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
+{
+	struct i915_page_directory * const pd = ppgtt->base.pd;
+	struct i915_page_table *pt;
+	unsigned int pde;
+
+	start = round_down(start, SZ_64K);
+	end = round_up(end, SZ_64K) - start;
+
+	mutex_lock(&ppgtt->flush);
+
+	gen6_for_each_pde(pt, pd, start, end, pde)
+		gen6_write_pde(ppgtt, pde, pt);
+
+	mb();
+	ioread32(ppgtt->pd_addr + pde - 1);
+	gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
+	mb();
+
+	mutex_unlock(&ppgtt->flush);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+			       u64 start, u64 length)
+{
+	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+	struct i915_page_directory * const pd = ppgtt->base.pd;
+	struct i915_page_table *pt, *alloc = NULL;
+	intel_wakeref_t wakeref;
+	u64 from = start;
+	unsigned int pde;
+	int ret = 0;
+
+	wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+	spin_lock(&pd->lock);
+	gen6_for_each_pde(pt, pd, start, length, pde) {
+		const unsigned int count = gen6_pte_count(start, length);
+
+		if (px_base(pt) == px_base(&vm->scratch[1])) {
+			spin_unlock(&pd->lock);
+
+			pt = fetch_and_zero(&alloc);
+			if (!pt)
+				pt = alloc_pt(vm);
+			if (IS_ERR(pt)) {
+				ret = PTR_ERR(pt);
+				goto unwind_out;
+			}
+
+			fill32_px(pt, vm->scratch[0].encode);
+
+			spin_lock(&pd->lock);
+			if (pd->entry[pde] == &vm->scratch[1]) {
+				pd->entry[pde] = pt;
+			} else {
+				alloc = pt;
+				pt = pd->entry[pde];
+			}
+		}
+
+		atomic_add(count, &pt->used);
+	}
+	spin_unlock(&pd->lock);
+
+	if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
+		gen6_flush_pd(ppgtt, from, start);
+
+	goto out;
+
+unwind_out:
+	gen6_ppgtt_clear_range(vm, from, start - from);
+out:
+	if (alloc)
+		free_px(vm, alloc);
+	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+	return ret;
+}
+
+static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
+{
+	struct i915_address_space * const vm = &ppgtt->base.vm;
+	struct i915_page_directory * const pd = ppgtt->base.pd;
+	int ret;
+
+	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+	if (ret)
+		return ret;
+
+	vm->scratch[0].encode =
+		vm->pte_encode(px_dma(&vm->scratch[0]),
+			       I915_CACHE_NONE, PTE_READ_ONLY);
+
+	if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
+		cleanup_scratch_page(vm);
+		return -ENOMEM;
+	}
+
+	fill32_px(&vm->scratch[1], vm->scratch[0].encode);
+	memset_p(pd->entry, &vm->scratch[1], I915_PDES);
+
+	return 0;
+}
+
+static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
+{
+	struct i915_page_directory * const pd = ppgtt->base.pd;
+	struct i915_page_dma * const scratch =
+		px_base(&ppgtt->base.vm.scratch[1]);
+	struct i915_page_table *pt;
+	u32 pde;
+
+	gen6_for_all_pdes(pt, pd, pde)
+		if (px_base(pt) != scratch)
+			free_px(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+
+	__i915_vma_put(ppgtt->vma);
+
+	gen6_ppgtt_free_pd(ppgtt);
+	free_scratch(vm);
+
+	mutex_destroy(&ppgtt->flush);
+	mutex_destroy(&ppgtt->pin_mutex);
+	kfree(ppgtt->base.pd);
+}
+
+static int pd_vma_set_pages(struct i915_vma *vma)
+{
+	vma->pages = ERR_PTR(-ENODEV);
+	return 0;
+}
+
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!vma->pages);
+
+	vma->pages = NULL;
+}
+
+static int pd_vma_bind(struct i915_vma *vma,
+		       enum i915_cache_level cache_level,
+		       u32 unused)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+	struct gen6_ppgtt *ppgtt = vma->private;
+	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
+
+	px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
+
+	gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
+	return 0;
+}
+
+static void pd_vma_unbind(struct i915_vma *vma)
+{
+	struct gen6_ppgtt *ppgtt = vma->private;
+	struct i915_page_directory * const pd = ppgtt->base.pd;
+	struct i915_page_dma * const scratch =
+		px_base(&ppgtt->base.vm.scratch[1]);
+	struct i915_page_table *pt;
+	unsigned int pde;
+
+	if (!ppgtt->scan_for_unused_pt)
+		return;
+
+	/* Free all no longer used page tables */
+	gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
+		if (px_base(pt) == scratch || atomic_read(&pt->used))
+			continue;
+
+		free_px(&ppgtt->base.vm, pt);
+		pd->entry[pde] = scratch;
+	}
+
+	ppgtt->scan_for_unused_pt = false;
+}
+
+static const struct i915_vma_ops pd_vma_ops = {
+	.set_pages = pd_vma_set_pages,
+	.clear_pages = pd_vma_clear_pages,
+	.bind_vma = pd_vma_bind,
+	.unbind_vma = pd_vma_unbind,
+};
+
+static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
+{
+	struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
+	struct i915_vma *vma;
+
+	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+	GEM_BUG_ON(size > ggtt->vm.total);
+
+	vma = i915_vma_alloc();
+	if (!vma)
+		return ERR_PTR(-ENOMEM);
+
+	i915_active_init(&vma->active, NULL, NULL);
+
+	kref_init(&vma->ref);
+	mutex_init(&vma->pages_mutex);
+	vma->vm = i915_vm_get(&ggtt->vm);
+	vma->ops = &pd_vma_ops;
+	vma->private = ppgtt;
+
+	vma->size = size;
+	vma->fence_size = size;
+	atomic_set(&vma->flags, I915_VMA_GGTT);
+	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
+
+	INIT_LIST_HEAD(&vma->obj_link);
+	INIT_LIST_HEAD(&vma->closed_link);
+
+	return vma;
+}
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base)
+{
+	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+	int err;
+
+	GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
+
+	/*
+	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+	 * which will be pinned into every active context.
+	 * (When vma->pin_count becomes atomic, I expect we will naturally
+	 * need a larger, unpacked, type and kill this redundancy.)
+	 */
+	if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
+		return 0;
+
+	if (mutex_lock_interruptible(&ppgtt->pin_mutex))
+		return -EINTR;
+
+	/*
+	 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+	 * allocator works in address space sizes, so it's multiplied by page
+	 * size. We allocate at the top of the GTT to avoid fragmentation.
+	 */
+	err = 0;
+	if (!atomic_read(&ppgtt->pin_count))
+		err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
+	if (!err)
+		atomic_inc(&ppgtt->pin_count);
+	mutex_unlock(&ppgtt->pin_mutex);
+
+	return err;
+}
+
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+{
+	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+	GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+	if (atomic_dec_and_test(&ppgtt->pin_count))
+		i915_vma_unpin(ppgtt->vma);
+}
+
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
+{
+	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+	if (!atomic_read(&ppgtt->pin_count))
+		return;
+
+	i915_vma_unpin(ppgtt->vma);
+	atomic_set(&ppgtt->pin_count, 0);
+}
+
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
+{
+	struct i915_ggtt * const ggtt = gt->ggtt;
+	struct gen6_ppgtt *ppgtt;
+	int err;
+
+	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+	if (!ppgtt)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&ppgtt->flush);
+	mutex_init(&ppgtt->pin_mutex);
+
+	ppgtt_init(&ppgtt->base, gt);
+	ppgtt->base.vm.top = 1;
+
+	ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+
+	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+	ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
+	if (!ppgtt->base.pd) {
+		err = -ENOMEM;
+		goto err_free;
+	}
+
+	err = gen6_ppgtt_init_scratch(ppgtt);
+	if (err)
+		goto err_pd;
+
+	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+	if (IS_ERR(ppgtt->vma)) {
+		err = PTR_ERR(ppgtt->vma);
+		goto err_scratch;
+	}
+
+	return &ppgtt->base;
+
+err_scratch:
+	free_scratch(&ppgtt->base.vm);
+err_pd:
+	kfree(ppgtt->base.pd);
+err_free:
+	mutex_destroy(&ppgtt->pin_mutex);
+	kfree(ppgtt);
+	return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
new file mode 100644
index 0000000..72e4818
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN6_PPGTT_H__
+#define __GEN6_PPGTT_H__
+
+#include "intel_gtt.h"
+
+struct gen6_ppgtt {
+	struct i915_ppgtt base;
+
+	struct mutex flush;
+	struct i915_vma *vma;
+	gen6_pte_t __iomem *pd_addr;
+
+	atomic_t pin_count;
+	struct mutex pin_mutex;
+
+	bool scan_for_unused_pt;
+};
+
+static inline u32 gen6_pte_index(u32 addr)
+{
+	return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pte_count(u32 addr, u32 length)
+{
+	return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pde_index(u32 addr)
+{
+	return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
+
+static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
+{
+	BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
+	return __to_gen6_ppgtt(base);
+}
+
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
+ */
+#define gen6_for_each_pde(pt, pd, start, length, iter)			\
+	for (iter = gen6_pde_index(start);				\
+	     length > 0 && iter < I915_PDES &&				\
+		     (pt = i915_pt_entry(pd, iter), true);		\
+	     ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT);		\
+		    temp = min(temp - start, length);			\
+		    start += temp, length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter)					\
+	for (iter = 0;							\
+	     iter < I915_PDES &&					\
+		     (pt = i915_pt_entry(pd, iter), true);		\
+	     ++iter)
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
+void gen6_ppgtt_enable(struct intel_gt *gt);
+void gen7_ppgtt_enable(struct intel_gt *gt);
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
new file mode 100644
index 0000000..4d1de2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gen8_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+static u64 gen8_pde_encode(const dma_addr_t addr,
+			   const enum i915_cache_level level)
+{
+	u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
+
+	if (level != I915_CACHE_NONE)
+		pde |= PPAT_CACHED_PDE;
+	else
+		pde |= PPAT_UNCACHED;
+
+	return pde;
+}
+
+static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+{
+	struct drm_i915_private *i915 = ppgtt->vm.i915;
+	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
+	enum vgt_g2v_type msg;
+	int i;
+
+	if (create)
+		atomic_inc(px_used(ppgtt->pd)); /* never remove */
+	else
+		atomic_dec(px_used(ppgtt->pd));
+
+	mutex_lock(&i915->vgpu.lock);
+
+	if (i915_vm_is_4lvl(&ppgtt->vm)) {
+		const u64 daddr = px_dma(ppgtt->pd);
+
+		intel_uncore_write(uncore,
+				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
+		intel_uncore_write(uncore,
+				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+
+		msg = create ?
+			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
+	} else {
+		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+			intel_uncore_write(uncore,
+					   vgtif_reg(pdp[i].lo),
+					   lower_32_bits(daddr));
+			intel_uncore_write(uncore,
+					   vgtif_reg(pdp[i].hi),
+					   upper_32_bits(daddr));
+		}
+
+		msg = create ?
+			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
+	}
+
+	/* g2v_notify atomically (via hv trap) consumes the message packet. */
+	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
+
+	mutex_unlock(&i915->vgpu.lock);
+}
+
+/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
+#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
+#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
+#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
+#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
+#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
+#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
+#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
+
+#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
+
+static inline unsigned int
+gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
+{
+	const int shift = gen8_pd_shift(lvl);
+	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+	GEM_BUG_ON(start >= end);
+	end += ~mask >> gen8_pd_shift(1);
+
+	*idx = i915_pde_index(start, shift);
+	if ((start ^ end) & mask)
+		return GEN8_PDES - *idx;
+	else
+		return i915_pde_index(end, shift) - *idx;
+}
+
+static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+{
+	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+	GEM_BUG_ON(start >= end);
+	return (start ^ end) & mask && (start & ~mask) == 0;
+}
+
+static inline unsigned int gen8_pt_count(u64 start, u64 end)
+{
+	GEM_BUG_ON(start >= end);
+	if ((start ^ end) >> gen8_pd_shift(1))
+		return GEN8_PDES - (start & (GEN8_PDES - 1));
+	else
+		return end - start;
+}
+
+static inline unsigned int
+gen8_pd_top_count(const struct i915_address_space *vm)
+{
+	unsigned int shift = __gen8_pte_shift(vm->top);
+	return (vm->total + (1ull << shift) - 1) >> shift;
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
+{
+	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+
+	if (vm->top == 2)
+		return ppgtt->pd;
+	else
+		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
+{
+	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
+}
+
+static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
+				 struct i915_page_directory *pd,
+				 int count, int lvl)
+{
+	if (lvl) {
+		void **pde = pd->entry;
+
+		do {
+			if (!*pde)
+				continue;
+
+			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
+		} while (pde++, --count);
+	}
+
+	free_px(vm, pd);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+	if (intel_vgpu_active(vm->i915))
+		gen8_ppgtt_notify_vgt(ppgtt, false);
+
+	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+	free_scratch(vm);
+}
+
+static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
+			      struct i915_page_directory * const pd,
+			      u64 start, const u64 end, int lvl)
+{
+	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+	unsigned int idx, len;
+
+	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+	len = gen8_pd_range(start, end, lvl--, &idx);
+	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+	    __func__, vm, lvl + 1, start, end,
+	    idx, len, atomic_read(px_used(pd)));
+	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
+
+	do {
+		struct i915_page_table *pt = pd->entry[idx];
+
+		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
+		    gen8_pd_contains(start, end, lvl)) {
+			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
+			    __func__, vm, lvl + 1, idx, start, end);
+			clear_pd_entry(pd, idx, scratch);
+			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
+			start += (u64)I915_PDES << gen8_pd_shift(lvl);
+			continue;
+		}
+
+		if (lvl) {
+			start = __gen8_ppgtt_clear(vm, as_pd(pt),
+						   start, end, lvl);
+		} else {
+			unsigned int count;
+			u64 *vaddr;
+
+			count = gen8_pt_count(start, end);
+			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
+			    __func__, vm, lvl, start, end,
+			    gen8_pd_index(start, 0), count,
+			    atomic_read(&pt->used));
+			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
+
+			vaddr = kmap_atomic_px(pt);
+			memset64(vaddr + gen8_pd_index(start, 0),
+				 vm->scratch[0].encode,
+				 count);
+			kunmap_atomic(vaddr);
+
+			atomic_sub(count, &pt->used);
+			start += count;
+		}
+
+		if (release_pd_entry(pd, idx, pt, scratch))
+			free_px(vm, pt);
+	} while (idx++, --len);
+
+	return start;
+}
+
+static void gen8_ppgtt_clear(struct i915_address_space *vm,
+			     u64 start, u64 length)
+{
+	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+	GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+	start >>= GEN8_PTE_SHIFT;
+	length >>= GEN8_PTE_SHIFT;
+	GEM_BUG_ON(length == 0);
+
+	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+			   start, start + length, vm->top);
+}
+
+static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+			      struct i915_page_directory * const pd,
+			      u64 * const start, const u64 end, int lvl)
+{
+	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+	struct i915_page_table *alloc = NULL;
+	unsigned int idx, len;
+	int ret = 0;
+
+	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+	len = gen8_pd_range(*start, end, lvl--, &idx);
+	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+	    __func__, vm, lvl + 1, *start, end,
+	    idx, len, atomic_read(px_used(pd)));
+	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
+
+	spin_lock(&pd->lock);
+	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
+	do {
+		struct i915_page_table *pt = pd->entry[idx];
+
+		if (!pt) {
+			spin_unlock(&pd->lock);
+
+			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
+			    __func__, vm, lvl + 1, idx);
+
+			pt = fetch_and_zero(&alloc);
+			if (lvl) {
+				if (!pt) {
+					pt = &alloc_pd(vm)->pt;
+					if (IS_ERR(pt)) {
+						ret = PTR_ERR(pt);
+						goto out;
+					}
+				}
+
+				fill_px(pt, vm->scratch[lvl].encode);
+			} else {
+				if (!pt) {
+					pt = alloc_pt(vm);
+					if (IS_ERR(pt)) {
+						ret = PTR_ERR(pt);
+						goto out;
+					}
+				}
+
+				if (intel_vgpu_active(vm->i915) ||
+				    gen8_pt_count(*start, end) < I915_PDES)
+					fill_px(pt, vm->scratch[lvl].encode);
+			}
+
+			spin_lock(&pd->lock);
+			if (likely(!pd->entry[idx]))
+				set_pd_entry(pd, idx, pt);
+			else
+				alloc = pt, pt = pd->entry[idx];
+		}
+
+		if (lvl) {
+			atomic_inc(&pt->used);
+			spin_unlock(&pd->lock);
+
+			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
+						 start, end, lvl);
+			if (unlikely(ret)) {
+				if (release_pd_entry(pd, idx, pt, scratch))
+					free_px(vm, pt);
+				goto out;
+			}
+
+			spin_lock(&pd->lock);
+			atomic_dec(&pt->used);
+			GEM_BUG_ON(!atomic_read(&pt->used));
+		} else {
+			unsigned int count = gen8_pt_count(*start, end);
+
+			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
+			    __func__, vm, lvl, *start, end,
+			    gen8_pd_index(*start, 0), count,
+			    atomic_read(&pt->used));
+
+			atomic_add(count, &pt->used);
+			/* All other pdes may be simultaneously removed */
+			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
+			*start += count;
+		}
+	} while (idx++, --len);
+	spin_unlock(&pd->lock);
+out:
+	if (alloc)
+		free_px(vm, alloc);
+	return ret;
+}
+
+static int gen8_ppgtt_alloc(struct i915_address_space *vm,
+			    u64 start, u64 length)
+{
+	u64 from;
+	int err;
+
+	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+	GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+	start >>= GEN8_PTE_SHIFT;
+	length >>= GEN8_PTE_SHIFT;
+	GEM_BUG_ON(length == 0);
+	from = start;
+
+	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
+				 &start, start + length, vm->top);
+	if (unlikely(err && from != start))
+		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+				   from, start, vm->top);
+
+	return err;
+}
+
+static __always_inline u64
+gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
+		      struct i915_page_directory *pdp,
+		      struct sgt_dma *iter,
+		      u64 idx,
+		      enum i915_cache_level cache_level,
+		      u32 flags)
+{
+	struct i915_page_directory *pd;
+	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+	gen8_pte_t *vaddr;
+
+	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+	do {
+		GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
+		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
+
+		iter->dma += I915_GTT_PAGE_SIZE;
+		if (iter->dma >= iter->max) {
+			iter->sg = __sg_next(iter->sg);
+			if (!iter->sg) {
+				idx = 0;
+				break;
+			}
+
+			iter->dma = sg_dma_address(iter->sg);
+			iter->max = iter->dma + iter->sg->length;
+		}
+
+		if (gen8_pd_index(++idx, 0) == 0) {
+			if (gen8_pd_index(idx, 1) == 0) {
+				/* Limited by sg length for 3lvl */
+				if (gen8_pd_index(idx, 2) == 0)
+					break;
+
+				pd = pdp->entry[gen8_pd_index(idx, 2)];
+			}
+
+			kunmap_atomic(vaddr);
+			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+		}
+	} while (1);
+	kunmap_atomic(vaddr);
+
+	return idx;
+}
+
+static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
+				   struct sgt_dma *iter,
+				   enum i915_cache_level cache_level,
+				   u32 flags)
+{
+	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+	u64 start = vma->node.start;
+	dma_addr_t rem = iter->sg->length;
+
+	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
+
+	do {
+		struct i915_page_directory * const pdp =
+			gen8_pdp_for_page_address(vma->vm, start);
+		struct i915_page_directory * const pd =
+			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
+		gen8_pte_t encode = pte_encode;
+		unsigned int maybe_64K = -1;
+		unsigned int page_size;
+		gen8_pte_t *vaddr;
+		u16 index;
+
+		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+		    rem >= I915_GTT_PAGE_SIZE_2M &&
+		    !__gen8_pte_index(start, 0)) {
+			index = __gen8_pte_index(start, 1);
+			encode |= GEN8_PDE_PS_2M;
+			page_size = I915_GTT_PAGE_SIZE_2M;
+
+			vaddr = kmap_atomic_px(pd);
+		} else {
+			struct i915_page_table *pt =
+				i915_pt_entry(pd, __gen8_pte_index(start, 1));
+
+			index = __gen8_pte_index(start, 0);
+			page_size = I915_GTT_PAGE_SIZE;
+
+			if (!index &&
+			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
+				maybe_64K = __gen8_pte_index(start, 1);
+
+			vaddr = kmap_atomic_px(pt);
+		}
+
+		do {
+			GEM_BUG_ON(iter->sg->length < page_size);
+			vaddr[index++] = encode | iter->dma;
+
+			start += page_size;
+			iter->dma += page_size;
+			rem -= page_size;
+			if (iter->dma >= iter->max) {
+				iter->sg = __sg_next(iter->sg);
+				if (!iter->sg)
+					break;
+
+				rem = iter->sg->length;
+				iter->dma = sg_dma_address(iter->sg);
+				iter->max = iter->dma + rem;
+
+				if (maybe_64K != -1 && index < I915_PDES &&
+				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
+					maybe_64K = -1;
+
+				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+					break;
+			}
+		} while (rem >= page_size && index < I915_PDES);
+
+		kunmap_atomic(vaddr);
+
+		/*
+		 * Is it safe to mark the 2M block as 64K? -- Either we have
+		 * filled whole page-table with 64K entries, or filled part of
+		 * it and have reached the end of the sg table and we have
+		 * enough padding.
+		 */
+		if (maybe_64K != -1 &&
+		    (index == I915_PDES ||
+		     (i915_vm_has_scratch_64K(vma->vm) &&
+		      !iter->sg && IS_ALIGNED(vma->node.start +
+					      vma->node.size,
+					      I915_GTT_PAGE_SIZE_2M)))) {
+			vaddr = kmap_atomic_px(pd);
+			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
+			kunmap_atomic(vaddr);
+			page_size = I915_GTT_PAGE_SIZE_64K;
+
+			/*
+			 * We write all 4K page entries, even when using 64K
+			 * pages. In order to verify that the HW isn't cheating
+			 * by using the 4K PTE instead of the 64K PTE, we want
+			 * to remove all the surplus entries. If the HW skipped
+			 * the 64K PTE, it will read/write into the scratch page
+			 * instead - which we detect as missing results during
+			 * selftests.
+			 */
+			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
+				u16 i;
+
+				encode = vma->vm->scratch[0].encode;
+				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+
+				for (i = 1; i < index; i += 16)
+					memset64(vaddr + i, encode, 15);
+
+				kunmap_atomic(vaddr);
+			}
+		}
+
+		vma->page_sizes.gtt |= page_size;
+	} while (iter->sg);
+}
+
+static void gen8_ppgtt_insert(struct i915_address_space *vm,
+			      struct i915_vma *vma,
+			      enum i915_cache_level cache_level,
+			      u32 flags)
+{
+	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+	struct sgt_dma iter = sgt_dma(vma);
+
+	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
+	} else  {
+		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
+
+		do {
+			struct i915_page_directory * const pdp =
+				gen8_pdp_for_page_index(vm, idx);
+
+			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
+						    cache_level, flags);
+		} while (idx);
+
+		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+	}
+}
+
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+	int ret;
+	int i;
+
+	/*
+	 * If everybody agrees to not to write into the scratch page,
+	 * we can reuse it for all vm, keeping contexts and processes separate.
+	 */
+	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+		struct i915_address_space *clone = vm->gt->vm;
+
+		GEM_BUG_ON(!clone->has_read_only);
+
+		vm->scratch_order = clone->scratch_order;
+		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
+		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
+		return 0;
+	}
+
+	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+	if (ret)
+		return ret;
+
+	vm->scratch[0].encode =
+		gen8_pte_encode(px_dma(&vm->scratch[0]),
+				I915_CACHE_LLC, vm->has_read_only);
+
+	for (i = 1; i <= vm->top; i++) {
+		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
+			goto free_scratch;
+
+		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
+		vm->scratch[i].encode =
+			gen8_pde_encode(px_dma(&vm->scratch[i]),
+					I915_CACHE_LLC);
+	}
+
+	return 0;
+
+free_scratch:
+	free_scratch(vm);
+	return -ENOMEM;
+}
+
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+{
+	struct i915_address_space *vm = &ppgtt->vm;
+	struct i915_page_directory *pd = ppgtt->pd;
+	unsigned int idx;
+
+	GEM_BUG_ON(vm->top != 2);
+	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
+
+	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
+		struct i915_page_directory *pde;
+
+		pde = alloc_pd(vm);
+		if (IS_ERR(pde))
+			return PTR_ERR(pde);
+
+		fill_px(pde, vm->scratch[1].encode);
+		set_pd_entry(pd, idx, pde);
+		atomic_inc(px_used(pde)); /* keep pinned */
+	}
+	wmb();
+
+	return 0;
+}
+
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
+{
+	const unsigned int count = gen8_pd_top_count(vm);
+	struct i915_page_directory *pd;
+
+	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
+
+	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+	if (unlikely(!pd))
+		return ERR_PTR(-ENOMEM);
+
+	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+		kfree(pd);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
+	atomic_inc(px_used(pd)); /* mark as pinned */
+	return pd;
+}
+
+/*
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ */
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
+{
+	struct i915_ppgtt *ppgtt;
+	int err;
+
+	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+	if (!ppgtt)
+		return ERR_PTR(-ENOMEM);
+
+	ppgtt_init(ppgtt, gt);
+	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
+
+	/*
+	 * From bdw, there is hw support for read-only pages in the PPGTT.
+	 *
+	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
+	 * for now.
+	 *
+	 * Gen12 has inherited the same read-only fault issue from gen11.
+	 */
+	ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
+
+	/*
+	 * There are only few exceptions for gen >=6. chv and bxt.
+	 * And we are not sure about the latter so play safe for now.
+	 */
+	if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
+		ppgtt->vm.pt_kmap_wc = true;
+
+	err = gen8_init_scratch(&ppgtt->vm);
+	if (err)
+		goto err_free;
+
+	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+	if (IS_ERR(ppgtt->pd)) {
+		err = PTR_ERR(ppgtt->pd);
+		goto err_free_scratch;
+	}
+
+	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+		err = gen8_preallocate_top_level_pdp(ppgtt);
+		if (err)
+			goto err_free_pd;
+	}
+
+	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
+	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+	ppgtt->vm.clear_range = gen8_ppgtt_clear;
+
+	if (intel_vgpu_active(gt->i915))
+		gen8_ppgtt_notify_vgt(ppgtt, true);
+
+	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+
+	return ppgtt;
+
+err_free_pd:
+	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
+			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
+err_free_scratch:
+	free_scratch(&ppgtt->vm);
+err_free:
+	kfree(ppgtt);
+	return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
new file mode 100644
index 0000000..76a08b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN8_PPGTT_H__
+#define __GEN8_PPGTT_H__
+
+struct intel_gt;
+
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 5531708..0ba524a 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -28,6 +28,8 @@
 
 #include "i915_drv.h"
 #include "i915_trace.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
 
 static void irq_enable(struct intel_engine_cs *engine)
 {
@@ -53,15 +55,17 @@ static void irq_disable(struct intel_engine_cs *engine)
 
 static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
 {
+	struct intel_engine_cs *engine =
+		container_of(b, struct intel_engine_cs, breadcrumbs);
+
 	lockdep_assert_held(&b->irq_lock);
 
 	GEM_BUG_ON(!b->irq_enabled);
 	if (!--b->irq_enabled)
-		irq_disable(container_of(b,
-					 struct intel_engine_cs,
-					 breadcrumbs));
+		irq_disable(engine);
 
 	b->irq_armed = false;
+	intel_gt_pm_put_async(engine->gt);
 }
 
 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
@@ -127,16 +131,23 @@ __dma_fence_signal__notify(struct dma_fence *fence,
 	}
 }
 
-void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
+static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
 {
-	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+	struct intel_engine_cs *engine =
+		container_of(b, struct intel_engine_cs, breadcrumbs);
+
+	intel_engine_add_retire(engine, tl);
+}
+
+static void signal_irq_work(struct irq_work *work)
+{
+	struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
 	const ktime_t timestamp = ktime_get();
 	struct intel_context *ce, *cn;
 	struct list_head *pos, *next;
-	unsigned long flags;
 	LIST_HEAD(signal);
 
-	spin_lock_irqsave(&b->irq_lock, flags);
+	spin_lock(&b->irq_lock);
 
 	if (b->irq_armed && list_empty(&b->signalers))
 		__intel_breadcrumbs_disarm_irq(b);
@@ -177,44 +188,41 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
 		if (!list_is_first(pos, &ce->signals)) {
 			/* Advance the list to the first incomplete request */
 			__list_del_many(&ce->signals, pos);
-			if (&ce->signals == pos) /* now empty */
+			if (&ce->signals == pos) { /* now empty */
 				list_del_init(&ce->signal_link);
+				add_retire(b, ce->timeline);
+			}
 		}
 	}
 
-	spin_unlock_irqrestore(&b->irq_lock, flags);
+	spin_unlock(&b->irq_lock);
 
 	list_for_each_safe(pos, next, &signal) {
 		struct i915_request *rq =
 			list_entry(pos, typeof(*rq), signal_link);
 		struct list_head cb_list;
 
-		spin_lock_irqsave(&rq->lock, flags);
+		spin_lock(&rq->lock);
 		list_replace(&rq->fence.cb_list, &cb_list);
 		__dma_fence_signal__timestamp(&rq->fence, timestamp);
 		__dma_fence_signal__notify(&rq->fence, &cb_list);
-		spin_unlock_irqrestore(&rq->lock, flags);
+		spin_unlock(&rq->lock);
 
 		i915_request_put(rq);
 	}
 }
 
-static void signal_irq_work(struct irq_work *work)
-{
-	struct intel_engine_cs *engine =
-		container_of(work, typeof(*engine), breadcrumbs.irq_work);
-
-	intel_engine_breadcrumbs_irq(engine);
-}
-
-static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
+static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
 {
 	struct intel_engine_cs *engine =
 		container_of(b, struct intel_engine_cs, breadcrumbs);
 
 	lockdep_assert_held(&b->irq_lock);
 	if (b->irq_armed)
-		return;
+		return true;
+
+	if (!intel_gt_pm_get_if_awake(engine->gt))
+		return false;
 
 	/*
 	 * The breadcrumb irq will be disarmed on the interrupt after the
@@ -234,6 +242,8 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
 
 	if (!b->irq_enabled++)
 		irq_enable(engine);
+
+	return true;
 }
 
 void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
@@ -271,19 +281,20 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
 
 	if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
 		struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
-		struct intel_context *ce = rq->hw_context;
+		struct intel_context *ce = rq->context;
 		struct list_head *pos;
 
 		spin_lock(&b->irq_lock);
 		GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
 
-		__intel_breadcrumbs_arm_irq(b);
+		if (!__intel_breadcrumbs_arm_irq(b))
+			goto unlock;
 
 		/*
 		 * We keep the seqno in retirement order, so we can break
-		 * inside intel_engine_breadcrumbs_irq as soon as we've passed
-		 * the last completed request (or seen a request that hasn't
-		 * event started). We could iterate the timeline->requests list,
+		 * inside intel_engine_signal_breadcrumbs as soon as we've
+		 * passed the last completed request (or seen a request that
+		 * hasn't event started). We could walk the timeline->requests,
 		 * but keeping a separate signalers_list has the advantage of
 		 * hopefully being much smaller than the full list and so
 		 * provides faster iteration and detection when there are no
@@ -306,6 +317,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
 		GEM_BUG_ON(!check_signal_order(ce, rq));
 
 		set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+unlock:
 		spin_unlock(&b->irq_lock);
 	}
 
@@ -326,7 +338,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
 	 */
 	spin_lock(&b->irq_lock);
 	if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
-		struct intel_context *ce = rq->hw_context;
+		struct intel_context *ce = rq->context;
 
 		list_del(&rq->signal_link);
 		if (list_empty(&ce->signals))
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 5b7ff3c..23137b2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -31,8 +31,7 @@ void intel_context_free(struct intel_context *ce)
 }
 
 struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
-		     struct intel_engine_cs *engine)
+intel_context_create(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 
@@ -40,39 +39,82 @@ intel_context_create(struct i915_gem_context *ctx,
 	if (!ce)
 		return ERR_PTR(-ENOMEM);
 
-	intel_context_init(ce, ctx, engine);
+	intel_context_init(ce, engine);
 	return ce;
 }
 
+int intel_context_alloc_state(struct intel_context *ce)
+{
+	int err = 0;
+
+	if (mutex_lock_interruptible(&ce->pin_mutex))
+		return -EINTR;
+
+	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+		err = ce->ops->alloc(ce);
+		if (unlikely(err))
+			goto unlock;
+
+		set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+	}
+
+unlock:
+	mutex_unlock(&ce->pin_mutex);
+	return err;
+}
+
+static int intel_context_active_acquire(struct intel_context *ce)
+{
+	int err;
+
+	err = i915_active_acquire(&ce->active);
+	if (err)
+		return err;
+
+	/* Preallocate tracking nodes */
+	if (!intel_context_is_barrier(ce)) {
+		err = i915_active_acquire_preallocate_barrier(&ce->active,
+							      ce->engine);
+		if (err) {
+			i915_active_release(&ce->active);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void intel_context_active_release(struct intel_context *ce)
+{
+	/* Nodes preallocated in intel_context_active() */
+	i915_active_acquire_barrier(&ce->active);
+	i915_active_release(&ce->active);
+}
+
 int __intel_context_do_pin(struct intel_context *ce)
 {
 	int err;
 
+	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
+		err = intel_context_alloc_state(ce);
+		if (err)
+			return err;
+	}
+
 	if (mutex_lock_interruptible(&ce->pin_mutex))
 		return -EINTR;
 
 	if (likely(!atomic_read(&ce->pin_count))) {
-		intel_wakeref_t wakeref;
-
-		if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
-			err = ce->ops->alloc(ce);
-			if (unlikely(err))
-				goto err;
-
-			__set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
-		}
-
-		err = 0;
-		with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
-			err = ce->ops->pin(ce);
-		if (err)
+		err = intel_context_active_acquire(ce);
+		if (unlikely(err))
 			goto err;
 
-		GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
-			  ce->engine->name, ce->timeline->fence_context,
-			  ce->ring->head, ce->ring->tail);
+		err = ce->ops->pin(ce);
+		if (unlikely(err))
+			goto err_active;
 
-		i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
+		CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+			 ce->ring->head, ce->ring->tail);
 
 		smp_mb__before_atomic(); /* flush pin before it is visible */
 	}
@@ -83,6 +125,8 @@ int __intel_context_do_pin(struct intel_context *ce)
 	mutex_unlock(&ce->pin_mutex);
 	return 0;
 
+err_active:
+	intel_context_active_release(ce);
 err:
 	mutex_unlock(&ce->pin_mutex);
 	return err;
@@ -90,36 +134,29 @@ int __intel_context_do_pin(struct intel_context *ce)
 
 void intel_context_unpin(struct intel_context *ce)
 {
-	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+	if (!atomic_dec_and_test(&ce->pin_count))
 		return;
 
-	/* We may be called from inside intel_context_pin() to evict another */
+	CE_TRACE(ce, "unpin\n");
+	ce->ops->unpin(ce);
+
+	/*
+	 * Once released, we may asynchronously drop the active reference.
+	 * As that may be the only reference keeping the context alive,
+	 * take an extra now so that it is not freed before we finish
+	 * dereferencing it.
+	 */
 	intel_context_get(ce);
-	mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
-
-	if (likely(atomic_dec_and_test(&ce->pin_count))) {
-		GEM_TRACE("%s context:%llx retire\n",
-			  ce->engine->name, ce->timeline->fence_context);
-
-		ce->ops->unpin(ce);
-
-		i915_gem_context_put(ce->gem_context);
-		intel_context_active_release(ce);
-	}
-
-	mutex_unlock(&ce->pin_mutex);
+	intel_context_active_release(ce);
 	intel_context_put(ce);
 }
 
 static int __context_pin_state(struct i915_vma *vma)
 {
-	u64 flags;
+	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
 	int err;
 
-	flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
-	flags |= PIN_HIGH | PIN_GLOBAL;
-
-	err = i915_vma_pin(vma, 0, 0, flags);
+	err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
 	if (err)
 		return err;
 
@@ -178,9 +215,9 @@ static void __intel_context_retire(struct i915_active *active)
 {
 	struct intel_context *ce = container_of(active, typeof(*ce), active);
 
-	GEM_TRACE("%s context:%llx retire\n",
-		  ce->engine->name, ce->timeline->fence_context);
+	CE_TRACE(ce, "retire\n");
 
+	set_bit(CONTEXT_VALID_BIT, &ce->flags);
 	if (ce->state)
 		__context_unpin_state(ce->state);
 
@@ -195,6 +232,8 @@ static int __intel_context_active(struct i915_active *active)
 	struct intel_context *ce = container_of(active, typeof(*ce), active);
 	int err;
 
+	CE_TRACE(ce, "active\n");
+
 	intel_context_get(ce);
 
 	err = __ring_active(ce->ring);
@@ -223,60 +262,21 @@ static int __intel_context_active(struct i915_active *active)
 	return err;
 }
 
-int intel_context_active_acquire(struct intel_context *ce)
-{
-	int err;
-
-	err = i915_active_acquire(&ce->active);
-	if (err)
-		return err;
-
-	/* Preallocate tracking nodes */
-	if (!i915_gem_context_is_kernel(ce->gem_context)) {
-		err = i915_active_acquire_preallocate_barrier(&ce->active,
-							      ce->engine);
-		if (err) {
-			i915_active_release(&ce->active);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-void intel_context_active_release(struct intel_context *ce)
-{
-	/* Nodes preallocated in intel_context_active() */
-	i915_active_acquire_barrier(&ce->active);
-	i915_active_release(&ce->active);
-}
-
 void
 intel_context_init(struct intel_context *ce,
-		   struct i915_gem_context *ctx,
 		   struct intel_engine_cs *engine)
 {
-	struct i915_address_space *vm;
-
 	GEM_BUG_ON(!engine->cops);
+	GEM_BUG_ON(!engine->gt->vm);
 
 	kref_init(&ce->ref);
 
-	ce->gem_context = ctx;
-	rcu_read_lock();
-	vm = rcu_dereference(ctx->vm);
-	if (vm)
-		ce->vm = i915_vm_get(vm);
-	else
-		ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
-	rcu_read_unlock();
-	if (ctx->timeline)
-		ce->timeline = intel_timeline_get(ctx->timeline);
-
 	ce->engine = engine;
 	ce->ops = engine->cops;
 	ce->sseu = engine->sseu;
-	ce->ring = __intel_context_ring_size(SZ_16K);
+	ce->ring = __intel_context_ring_size(SZ_4K);
+
+	ce->vm = i915_vm_get(engine->gt->vm);
 
 	INIT_LIST_HEAD(&ce->signal_link);
 	INIT_LIST_HEAD(&ce->signals);
@@ -341,30 +341,11 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
 	int err;
 
 	/* Only suitable for use in remotely modifying this context */
-	GEM_BUG_ON(rq->hw_context == ce);
+	GEM_BUG_ON(rq->context == ce);
 
 	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
-		/*
-		 * Ideally, we just want to insert our foreign fence as
-		 * a barrier into the remove context, such that this operation
-		 * occurs after all current operations in that context, and
-		 * all future operations must occur after this.
-		 *
-		 * Currently, the timeline->last_request tracking is guarded
-		 * by its mutex and so we must obtain that to atomically
-		 * insert our barrier. However, since we already hold our
-		 * timeline->mutex, we must be careful against potential
-		 * inversion if we are the kernel_context as the remote context
-		 * will itself poke at the kernel_context when it needs to
-		 * unpin. Ergo, if already locked, we drop both locks and
-		 * try again (through the magic of userspace repeating EAGAIN).
-		 */
-		if (!mutex_trylock(&tl->mutex))
-			return -EAGAIN;
-
 		/* Queue this switch after current activity by this context. */
 		err = i915_active_fence_set(&tl->last_request, rq);
-		mutex_unlock(&tl->mutex);
 		if (err)
 			return err;
 	}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 68b3d31..30bd248 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -7,7 +7,9 @@
 #ifndef __INTEL_CONTEXT_H__
 #define __INTEL_CONTEXT_H__
 
+#include <linux/bitops.h>
 #include <linux/lockdep.h>
+#include <linux/types.h>
 
 #include "i915_active.h"
 #include "intel_context_types.h"
@@ -15,14 +17,21 @@
 #include "intel_ring_types.h"
 #include "intel_timeline_types.h"
 
+#define CE_TRACE(ce, fmt, ...) do {					\
+	const struct intel_context *ce__ = (ce);			\
+	ENGINE_TRACE(ce__->engine, "context:%llx " fmt,			\
+		     ce__->timeline->fence_context,			\
+		     ##__VA_ARGS__);					\
+} while (0)
+
 void intel_context_init(struct intel_context *ce,
-			struct i915_gem_context *ctx,
 			struct intel_engine_cs *engine);
 void intel_context_fini(struct intel_context *ce);
 
 struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
-		     struct intel_engine_cs *engine);
+intel_context_create(struct intel_engine_cs *engine);
+
+int intel_context_alloc_state(struct intel_context *ce);
 
 void intel_context_free(struct intel_context *ce);
 
@@ -69,9 +78,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
 
 int __intel_context_do_pin(struct intel_context *ce);
 
+static inline bool intel_context_pin_if_active(struct intel_context *ce)
+{
+	return atomic_inc_not_zero(&ce->pin_count);
+}
+
 static inline int intel_context_pin(struct intel_context *ce)
 {
-	if (likely(atomic_inc_not_zero(&ce->pin_count)))
+	if (likely(intel_context_pin_if_active(ce)))
 		return 0;
 
 	return __intel_context_do_pin(ce);
@@ -109,9 +123,6 @@ static inline void intel_context_exit(struct intel_context *ce)
 		ce->ops->exit(ce);
 }
 
-int intel_context_active_acquire(struct intel_context *ce);
-void intel_context_active_release(struct intel_context *ce);
-
 static inline struct intel_context *intel_context_get(struct intel_context *ce)
 {
 	kref_get(&ce->ref);
@@ -153,4 +164,64 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
 	return u64_to_ptr(struct intel_ring, sz);
 }
 
+static inline bool intel_context_is_barrier(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+}
+
+static inline bool intel_context_use_semaphores(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_set_use_semaphores(struct intel_context *ce)
+{
+	set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
+{
+	clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline bool intel_context_is_banned(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool intel_context_set_banned(struct intel_context *ce)
+{
+	return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool
+intel_context_force_single_submission(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline void
+intel_context_set_single_submission(struct intel_context *ce)
+{
+	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline bool
+intel_context_nopreempt(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_set_nopreempt(struct intel_context *ce)
+{
+	set_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_clear_nopreempt(struct intel_context *ce)
+{
+	clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
 #endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 6959b05..ca1420f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -17,6 +17,8 @@
 #include "intel_engine_types.h"
 #include "intel_sseu.h"
 
+#define CONTEXT_REDZONE POISON_INUSE
+
 struct i915_gem_context;
 struct i915_vma;
 struct intel_context;
@@ -44,7 +46,7 @@ struct intel_context {
 #define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
 
 	struct i915_address_space *vm;
-	struct i915_gem_context *gem_context;
+	struct i915_gem_context __rcu *gem_context;
 
 	struct list_head signal_link;
 	struct list_head signals;
@@ -54,7 +56,13 @@ struct intel_context {
 	struct intel_timeline *timeline;
 
 	unsigned long flags;
-#define CONTEXT_ALLOC_BIT 0
+#define CONTEXT_BARRIER_BIT		0
+#define CONTEXT_ALLOC_BIT		1
+#define CONTEXT_VALID_BIT		2
+#define CONTEXT_USE_SEMAPHORES		3
+#define CONTEXT_BANNED			4
+#define CONTEXT_FORCE_SINGLE_SUBMISSION	5
+#define CONTEXT_NOPREEMPT		6
 
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 01765a7..5df0030 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -29,6 +29,13 @@ struct intel_gt;
 #define CACHELINE_BYTES 64
 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
 
+#define ENGINE_TRACE(e, fmt, ...) do {					\
+	const struct intel_engine_cs *e__ __maybe_unused = (e);		\
+	GEM_TRACE("%s %s: " fmt,					\
+		  dev_name(e__->i915->drm.dev), e__->name,		\
+		  ##__VA_ARGS__);					\
+} while (0)
+
 /*
  * The register defines to be used with the following macros need to accept a
  * base param, e.g:
@@ -177,15 +184,15 @@ void intel_engine_stop(struct intel_engine_cs *engine);
 void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 int intel_engines_init_mmio(struct intel_gt *gt);
-int intel_engines_setup(struct intel_gt *gt);
 int intel_engines_init(struct intel_gt *gt);
-void intel_engines_cleanup(struct intel_gt *gt);
+
+void intel_engines_release(struct intel_gt *gt);
+void intel_engines_free(struct intel_gt *gt);
 
 int intel_engine_init_common(struct intel_engine_cs *engine);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
 int intel_ring_submission_setup(struct intel_engine_cs *engine);
-int intel_ring_submission_init(struct intel_engine_cs *engine);
 
 int intel_engine_stop_cs(struct intel_engine_cs *engine);
 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
@@ -195,7 +202,7 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
-void intel_engine_get_instdone(struct intel_engine_cs *engine,
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
 			       struct intel_instdone *instdone);
 
 void intel_engine_init_execlists(struct intel_engine_cs *engine);
@@ -206,13 +213,11 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
 
 static inline void
-intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
+intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
 {
 	irq_work_queue(&engine->breadcrumbs.irq_work);
 }
 
-void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
-
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 
@@ -270,8 +275,8 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
 static inline void __intel_engine_reset(struct intel_engine_cs *engine,
 					bool stalled)
 {
-	if (engine->reset.reset)
-		engine->reset.reset(engine, stalled);
+	if (engine->reset.rewind)
+		engine->reset.rewind(engine, stalled);
 	engine->serial++; /* contexts lost */
 }
 
@@ -296,7 +301,7 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
 struct i915_request *
 intel_engine_find_active_request(struct intel_engine_cs *engine);
 
-u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 813bd3a..f451ef3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -141,7 +141,7 @@ static const struct engine_info intel_engines[] = {
 
 /**
  * intel_engine_context_size() - return the size of the context for an engine
- * @dev_priv: i915 device private
+ * @gt: the gt
  * @class: engine class
  *
  * Each engine class may require a different amount of space for a context
@@ -153,17 +153,18 @@ static const struct engine_info intel_engines[] = {
  * in LRC mode, but does not include the "shared data page" used with
  * GuC submission. The caller should account for this if using the GuC.
  */
-u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
 {
+	struct intel_uncore *uncore = gt->uncore;
 	u32 cxt_size;
 
 	BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
 
 	switch (class) {
 	case RENDER_CLASS:
-		switch (INTEL_GEN(dev_priv)) {
+		switch (INTEL_GEN(gt->i915)) {
 		default:
-			MISSING_CASE(INTEL_GEN(dev_priv));
+			MISSING_CASE(INTEL_GEN(gt->i915));
 			return DEFAULT_LR_CONTEXT_RENDER_SIZE;
 		case 12:
 		case 11:
@@ -175,14 +176,14 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
 		case 8:
 			return GEN8_LR_CONTEXT_RENDER_SIZE;
 		case 7:
-			if (IS_HASWELL(dev_priv))
+			if (IS_HASWELL(gt->i915))
 				return HSW_CXT_TOTAL_SIZE;
 
-			cxt_size = I915_READ(GEN7_CXT_SIZE);
+			cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
 			return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
 					PAGE_SIZE);
 		case 6:
-			cxt_size = I915_READ(CXT_SIZE);
+			cxt_size = intel_uncore_read(uncore, CXT_SIZE);
 			return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
 					PAGE_SIZE);
 		case 5:
@@ -197,9 +198,9 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
 			 * minimum allocation anyway so it should all come
 			 * out in the wash.
 			 */
-			cxt_size = I915_READ(CXT_SIZE) + 1;
+			cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
 			DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
-					 INTEL_GEN(dev_priv),
+					 INTEL_GEN(gt->i915),
 					 cxt_size * 64,
 					 cxt_size - 1);
 			return round_up(cxt_size * 64, PAGE_SIZE);
@@ -216,7 +217,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
 	case VIDEO_DECODE_CLASS:
 	case VIDEO_ENHANCEMENT_CLASS:
 	case COPY_ENGINE_CLASS:
-		if (INTEL_GEN(dev_priv) < 8)
+		if (INTEL_GEN(gt->i915) < 8)
 			return 0;
 		return GEN8_LR_CONTEXT_OTHER_SIZE;
 	}
@@ -318,14 +319,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	engine->props.timeslice_duration_ms =
 		CONFIG_DRM_I915_TIMESLICE_DURATION;
 
-	/*
-	 * To be overridden by the backend on setup. However to facilitate
-	 * cleanup on error during setup, we always provide the destroy vfunc.
-	 */
-	engine->destroy = (typeof(engine->destroy))kfree;
-
-	engine->context_size = intel_engine_context_size(gt->i915,
-							 engine->class);
+	engine->context_size = intel_engine_context_size(gt, engine->class);
 	if (WARN_ON(engine->context_size > BIT(20)))
 		engine->context_size = 0;
 	if (engine->context_size)
@@ -334,6 +328,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
 
+	ewma__engine_latency_init(&engine->latency);
 	seqlock_init(&engine->stats.lock);
 
 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -344,7 +339,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	gt->engine_class[info->class][info->instance] = engine;
 	gt->engine[id] = engine;
 
-	intel_engine_add_user(engine);
 	gt->i915->engine[id] = engine;
 
 	return 0;
@@ -390,18 +384,36 @@ static void intel_setup_engine_capabilities(struct intel_gt *gt)
 }
 
 /**
- * intel_engines_cleanup() - free the resources allocated for Command Streamers
+ * intel_engines_release() - free the resources allocated for Command Streamers
  * @gt: pointer to struct intel_gt
  */
-void intel_engines_cleanup(struct intel_gt *gt)
+void intel_engines_release(struct intel_gt *gt)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/* Decouple the backend; but keep the layout for late GPU resets */
+	for_each_engine(engine, gt, id) {
+		if (!engine->release)
+			continue;
+
+		engine->release(engine);
+		engine->release = NULL;
+
+		memset(&engine->reset, 0, sizeof(engine->reset));
+
+		gt->i915->engine[id] = NULL;
+	}
+}
+
+void intel_engines_free(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
 	for_each_engine(engine, gt, id) {
-		engine->destroy(engine);
+		kfree(engine);
 		gt->engine[id] = NULL;
-		gt->i915->engine[id] = NULL;
 	}
 }
 
@@ -455,38 +467,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
 	return 0;
 
 cleanup:
-	intel_engines_cleanup(gt);
-	return err;
-}
-
-/**
- * intel_engines_init() - init the Engine Command Streamers
- * @gt: pointer to struct intel_gt
- *
- * Return: non-zero if the initialization failed.
- */
-int intel_engines_init(struct intel_gt *gt)
-{
-	int (*init)(struct intel_engine_cs *engine);
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err;
-
-	if (HAS_EXECLISTS(gt->i915))
-		init = intel_execlists_submission_init;
-	else
-		init = intel_ring_submission_init;
-
-	for_each_engine(engine, gt, id) {
-		err = init(engine);
-		if (err)
-			goto cleanup;
-	}
-
-	return 0;
-
-cleanup:
-	intel_engines_cleanup(gt);
+	intel_engines_free(gt);
 	return err;
 }
 
@@ -601,7 +582,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 	return ret;
 }
 
-static int intel_engine_setup_common(struct intel_engine_cs *engine)
+static int engine_setup_common(struct intel_engine_cs *engine)
 {
 	int err;
 
@@ -631,49 +612,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
 	return 0;
 }
 
-/**
- * intel_engines_setup- setup engine state not requiring hw access
- * @gt: pointer to struct intel_gt
- *
- * Initializes engine structure members shared between legacy and execlists
- * submission modes which do not require hardware access.
- *
- * Typically done early in the submission mode specific engine setup stage.
- */
-int intel_engines_setup(struct intel_gt *gt)
-{
-	int (*setup)(struct intel_engine_cs *engine);
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err;
-
-	if (HAS_EXECLISTS(gt->i915))
-		setup = intel_execlists_submission_setup;
-	else
-		setup = intel_ring_submission_setup;
-
-	for_each_engine(engine, gt, id) {
-		err = intel_engine_setup_common(engine);
-		if (err)
-			goto cleanup;
-
-		err = setup(engine);
-		if (err)
-			goto cleanup;
-
-		/* We expect the backend to take control over its state */
-		GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
-
-		GEM_BUG_ON(!engine->cops);
-	}
-
-	return 0;
-
-cleanup:
-	intel_engines_cleanup(gt);
-	return err;
-}
-
 struct measure_breadcrumb {
 	struct i915_request rq;
 	struct intel_timeline timeline;
@@ -757,13 +695,13 @@ create_kernel_context(struct intel_engine_cs *engine)
 	struct intel_context *ce;
 	int err;
 
-	ce = intel_context_create(engine->i915->kernel_context, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return ce;
 
-	ce->ring = __intel_context_ring_size(SZ_4K);
+	__set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
 
-	err = intel_context_pin(ce);
+	err = intel_context_pin(ce); /* perma-pin so it is always available */
 	if (err) {
 		intel_context_put(ce);
 		return ERR_PTR(err);
@@ -791,13 +729,19 @@ create_kernel_context(struct intel_engine_cs *engine)
  *
  * Returns zero on success or an error code on failure.
  */
-int intel_engine_init_common(struct intel_engine_cs *engine)
+static int engine_init_common(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 	int ret;
 
 	engine->set_default_submission(engine);
 
+	ret = measure_breadcrumb_dw(engine);
+	if (ret < 0)
+		return ret;
+
+	engine->emit_fini_breadcrumb_dw = ret;
+
 	/*
 	 * We may need to do things with the shrinker which
 	 * require us to immediately switch back to the default
@@ -812,18 +756,38 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 
 	engine->kernel_context = ce;
 
-	ret = measure_breadcrumb_dw(engine);
-	if (ret < 0)
-		goto err_unpin;
+	return 0;
+}
 
-	engine->emit_fini_breadcrumb_dw = ret;
+int intel_engines_init(struct intel_gt *gt)
+{
+	int (*setup)(struct intel_engine_cs *engine);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err;
+
+	if (HAS_EXECLISTS(gt->i915))
+		setup = intel_execlists_submission_setup;
+	else
+		setup = intel_ring_submission_setup;
+
+	for_each_engine(engine, gt, id) {
+		err = engine_setup_common(engine);
+		if (err)
+			return err;
+
+		err = setup(engine);
+		if (err)
+			return err;
+
+		err = engine_init_common(engine);
+		if (err)
+			return err;
+
+		intel_engine_add_user(engine);
+	}
 
 	return 0;
-
-err_unpin:
-	intel_context_unpin(ce);
-	intel_context_put(ce);
-	return ret;
 }
 
 /**
@@ -836,6 +800,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
 	GEM_BUG_ON(!list_empty(&engine->active.requests));
+	tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
 
 	cleanup_status_page(engine);
 
@@ -911,7 +876,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
 	if (INTEL_GEN(engine->i915) < 3)
 		return -ENODEV;
 
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
 
@@ -920,7 +885,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
 					 mode, MODE_IDLE, MODE_IDLE,
 					 1000, stop_timeout(engine),
 					 NULL)) {
-		GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+		ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
 		err = -ETIMEDOUT;
 	}
 
@@ -932,7 +897,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
 
 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
 {
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
 }
@@ -949,8 +914,8 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
 }
 
 static u32
-read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
-		  i915_reg_t reg)
+read_subslice_reg(const struct intel_engine_cs *engine,
+		  int slice, int subslice, i915_reg_t reg)
 {
 	struct drm_i915_private *i915 = engine->i915;
 	struct intel_uncore *uncore = engine->uncore;
@@ -994,7 +959,7 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
 }
 
 /* NB: please notice the memset */
-void intel_engine_get_instdone(struct intel_engine_cs *engine,
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
 			       struct intel_instdone *instdone)
 {
 	struct drm_i915_private *i915 = engine->i915;
@@ -1478,6 +1443,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		drm_printf(m, "*** WEDGED ***\n");
 
 	drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
+	drm_printf(m, "\tBarriers?: %s\n",
+		   yesno(!llist_empty(&engine->barrier_tasks)));
+	drm_printf(m, "\tLatency: %luus\n",
+		   ewma__engine_latency_read(&engine->latency));
 
 	rcu_read_lock();
 	rq = READ_ONCE(engine->heartbeat.systole);
@@ -1517,9 +1486,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
 		print_request_ring(m, rq);
 
-		if (rq->hw_context->lrc_reg_state) {
+		if (rq->context->lrc_reg_state) {
 			drm_printf(m, "Logical Ring Context:\n");
-			hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
+			hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
 		}
 	}
 	spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -1580,7 +1549,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 
 		for (port = execlists->pending; (rq = *port); port++) {
 			/* Exclude any contexts already counted in active */
-			if (!intel_context_inflight_count(rq->hw_context))
+			if (!intel_context_inflight_count(rq->context))
 				engine->stats.active++;
 		}
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 06aa14c..6c6fd18 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -63,15 +63,15 @@ static void heartbeat(struct work_struct *wrk)
 	struct intel_context *ce = engine->kernel_context;
 	struct i915_request *rq;
 
-	if (!intel_engine_pm_get_if_awake(engine))
-		return;
-
 	rq = engine->heartbeat.systole;
 	if (rq && i915_request_completed(rq)) {
 		i915_request_put(rq);
 		engine->heartbeat.systole = NULL;
 	}
 
+	if (!intel_engine_pm_get_if_awake(engine))
+		return;
+
 	if (intel_gt_is_wedged(engine->gt))
 		goto out;
 
@@ -199,7 +199,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 		goto out_unlock;
 	}
 
-	rq->flags |= I915_REQUEST_SENTINEL;
+	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
 	idle_pulse(engine, rq);
 
 	__i915_request_commit(rq);
@@ -215,18 +215,26 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 int intel_engine_flush_barriers(struct intel_engine_cs *engine)
 {
 	struct i915_request *rq;
+	int err = 0;
 
 	if (llist_empty(&engine->barrier_tasks))
 		return 0;
 
+	if (!intel_engine_pm_get_if_awake(engine))
+		return 0;
+
 	rq = i915_request_create(engine->kernel_context);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_rpm;
+	}
 
 	idle_pulse(engine, rq);
 	i915_request_add(rq);
 
-	return 0;
+out_rpm:
+	intel_engine_pm_put(engine);
+	return err;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index c1dd0cd..ea90ab3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -6,6 +6,7 @@
 
 #include "i915_drv.h"
 
+#include "intel_context.h"
 #include "intel_engine.h"
 #include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
@@ -19,9 +20,10 @@ static int __engine_unpark(struct intel_wakeref *wf)
 {
 	struct intel_engine_cs *engine =
 		container_of(wf, typeof(*engine), wakeref);
+	struct intel_context *ce;
 	void *map;
 
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	intel_gt_pm_get(engine->gt);
 
@@ -33,6 +35,27 @@ static int __engine_unpark(struct intel_wakeref *wf)
 	if (!IS_ERR_OR_NULL(map))
 		engine->pinned_default_state = map;
 
+	/* Discard stale context state from across idling */
+	ce = engine->kernel_context;
+	if (ce) {
+		GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
+
+		/* First poison the image to verify we never fully trust it */
+		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
+			struct drm_i915_gem_object *obj = ce->state->obj;
+			int type = i915_coherent_map_type(engine->i915);
+
+			map = i915_gem_object_pin_map(obj, type);
+			if (!IS_ERR(map)) {
+				memset(map, CONTEXT_REDZONE, obj->base.size);
+				i915_gem_object_flush_map(obj);
+				i915_gem_object_unpin_map(obj);
+			}
+		}
+
+		ce->ops->reset(ce);
+	}
+
 	if (engine->unpark)
 		engine->unpark(engine);
 
@@ -73,6 +96,15 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
 
 #endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
 
+static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct i915_request *rq = to_request(fence);
+
+	ewma__engine_latency_add(&rq->engine->latency,
+				 ktime_us_delta(rq->fence.timestamp,
+						rq->duration.emitted));
+}
+
 static void
 __queue_and_release_pm(struct i915_request *rq,
 		       struct intel_timeline *tl,
@@ -80,7 +112,7 @@ __queue_and_release_pm(struct i915_request *rq,
 {
 	struct intel_gt_timelines *timelines = &engine->gt->timelines;
 
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	/*
 	 * We have to serialise all potential retirement paths with our
@@ -113,14 +145,16 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 	unsigned long flags;
 	bool result = true;
 
-	/* Already inside the kernel context, safe to power down. */
-	if (engine->wakeref_serial == engine->serial)
-		return true;
-
 	/* GPU is pointing to the void, as good as in the kernel context. */
 	if (intel_gt_is_wedged(engine->gt))
 		return true;
 
+	GEM_BUG_ON(!intel_context_is_barrier(ce));
+
+	/* Already inside the kernel context, safe to power down. */
+	if (engine->wakeref_serial == engine->serial)
+		return true;
+
 	/*
 	 * Note, we do this without taking the timeline->mutex. We cannot
 	 * as we may be called while retiring the kernel context and so
@@ -163,7 +197,18 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 
 	/* Install ourselves as a preemption barrier */
 	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
-	__i915_request_commit(rq);
+	if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
+		/*
+		 * Use an interrupt for precise measurement of duration,
+		 * otherwise we rely on someone else retiring all the requests
+		 * which may delay the signaling (i.e. we will likely wait
+		 * until the background request retirement running every
+		 * second or two).
+		 */
+		BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
+		dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
+		rq->duration.emitted = ktime_get();
+	}
 
 	/* Expose ourselves to the world */
 	__queue_and_release_pm(rq, ce->timeline, engine);
@@ -183,7 +228,7 @@ static void call_idle_barriers(struct intel_engine_cs *engine)
 			container_of((struct list_head *)node,
 				     typeof(*cb), node);
 
-		cb->func(NULL, cb);
+		cb->func(ERR_PTR(-EAGAIN), cb);
 	}
 }
 
@@ -204,7 +249,7 @@ static int __engine_park(struct intel_wakeref *wf)
 	if (!switch_to_kernel_context(engine))
 		return -EBUSY;
 
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	call_idle_barriers(engine); /* cleanup after wedging */
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 24e2034..e52c2b0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,6 +7,7 @@
 #ifndef INTEL_ENGINE_PM_H
 #define INTEL_ENGINE_PM_H
 
+#include "i915_request.h"
 #include "intel_engine_types.h"
 #include "intel_wakeref.h"
 
@@ -41,6 +42,26 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
 	intel_wakeref_unlock_wait(&engine->wakeref);
 }
 
+static inline struct i915_request *
+intel_engine_create_kernel_request(struct intel_engine_cs *engine)
+{
+	struct i915_request *rq;
+
+	/*
+	 * The engine->kernel_context is special as it is used inside
+	 * the engine-pm barrier (see __engine_park()), circumventing
+	 * the usual mutexes and relying on the engine-pm barrier
+	 * instead. So whenever we use the engine->kernel_context
+	 * outside of the barrier, we must manually handle the
+	 * engine wakeref to serialise with the use inside.
+	 */
+	intel_engine_pm_get(engine);
+	rq = i915_request_create(engine->kernel_context);
+	intel_engine_pm_put(engine);
+
+	return rq;
+}
+
 void intel_engine_init__pm(struct intel_engine_cs *engine);
 
 #endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 2b44647..350da59 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -7,6 +7,7 @@
 #ifndef __INTEL_ENGINE_TYPES__
 #define __INTEL_ENGINE_TYPES__
 
+#include <linux/average.h>
 #include <linux/hashtable.h>
 #include <linux/irq_work.h>
 #include <linux/kref.h>
@@ -119,6 +120,9 @@ enum intel_engine_id {
 #define INVALID_ENGINE ((enum intel_engine_id)-1)
 };
 
+/* A simple estimator for the round-trip latency of an engine */
+DECLARE_EWMA(_engine_latency, 6, 4)
+
 struct st_preempt_hang {
 	struct completion completion;
 	unsigned int count;
@@ -316,6 +320,13 @@ struct intel_engine_cs {
 		struct intel_timeline *timeline;
 	} legacy;
 
+	/*
+	 * We track the average duration of the idle pulse on parking the
+	 * engine to keep an estimate of the how the fast the engine is
+	 * under ideal conditions.
+	 */
+	struct ewma__engine_latency latency;
+
 	/* Rather than have every client wait upon all user interrupts,
 	 * with the herd waking after every interrupt and each doing the
 	 * heavyweight seqno dance, we delegate the task (of being the
@@ -389,7 +400,10 @@ struct intel_engine_cs {
 
 	struct {
 		void (*prepare)(struct intel_engine_cs *engine);
-		void (*reset)(struct intel_engine_cs *engine, bool stalled);
+
+		void (*rewind)(struct intel_engine_cs *engine, bool stalled);
+		void (*cancel)(struct intel_engine_cs *engine);
+
 		void (*finish)(struct intel_engine_cs *engine);
 	} reset;
 
@@ -439,15 +453,7 @@ struct intel_engine_cs {
 	void		(*schedule)(struct i915_request *request,
 				    const struct i915_sched_attr *attr);
 
-	/*
-	 * Cancel all requests on the hardware, or queued for execution.
-	 * This should only cancel the ready requests that have been
-	 * submitted to the engine (via the engine->submit_request callback).
-	 * This is called when marking the device as wedged.
-	 */
-	void		(*cancel_requests)(struct intel_engine_cs *engine);
-
-	void		(*destroy)(struct intel_engine_cs *engine);
+	void		(*release)(struct intel_engine_cs *engine);
 
 	struct intel_engine_execlists execlists;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 7f7150a..9e7f12b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -11,6 +11,7 @@
 #include "i915_drv.h"
 #include "intel_engine.h"
 #include "intel_engine_user.h"
+#include "intel_gt.h"
 
 struct intel_engine_cs *
 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
@@ -200,6 +201,9 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
 				     uabi_node);
 		char old[sizeof(engine->name)];
 
+		if (intel_gt_has_init_error(engine->gt))
+			continue; /* ignore incomplete engines */
+
 		GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
 		engine->uabi_class = uabi_classes[engine->class];
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
new file mode 100644
index 0000000..531d501
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -0,0 +1,1486 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/stop_machine.h>
+
+#include <asm/set_memory.h>
+#include <asm/smp.h>
+
+#include "intel_gt.h"
+#include "i915_drv.h"
+#include "i915_scatterlist.h"
+#include "i915_vgpu.h"
+
+#include "intel_gtt.h"
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma);
+
+static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
+				   unsigned long color,
+				   u64 *start,
+				   u64 *end)
+{
+	if (i915_node_color_differs(node, color))
+		*start += I915_GTT_PAGE_SIZE;
+
+	/*
+	 * Also leave a space between the unallocated reserved node after the
+	 * GTT and any objects within the GTT, i.e. we use the color adjustment
+	 * to insert a guard page to prevent prefetches crossing over the
+	 * GTT boundary.
+	 */
+	node = list_next_entry(node, node_list);
+	if (node->color != color)
+		*end -= I915_GTT_PAGE_SIZE;
+}
+
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+
+	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+	ggtt->vm.is_ggtt = true;
+
+	/* Only VLV supports read-only GGTT mappings */
+	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
+
+	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
+		ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
+
+	if (ggtt->mappable_end) {
+		if (!io_mapping_init_wc(&ggtt->iomap,
+					ggtt->gmadr.start,
+					ggtt->mappable_end)) {
+			ggtt->vm.cleanup(&ggtt->vm);
+			return -EIO;
+		}
+
+		ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+					      ggtt->mappable_end);
+	}
+
+	i915_ggtt_init_fences(ggtt);
+
+	return 0;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @i915: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *i915)
+{
+	int ret;
+
+	stash_init(&i915->mm.wc_stash);
+
+	/*
+	 * Note that we use page colouring to enforce a guard page at the
+	 * end of the address space. This is required as the CS may prefetch
+	 * beyond the end of the batch buffer, across the page boundary,
+	 * and beyond the end of the GTT if we do not provide a guard.
+	 */
+	ret = ggtt_init_hw(&i915->ggtt);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+	/*
+	 * Query intel_iommu to see if we need the workaround. Presumably that
+	 * was loaded first.
+	 */
+	return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+}
+
+static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
+{
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+
+	/*
+	 * Don't bother messing with faults pre GEN6 as we have little
+	 * documentation supporting that it's a good idea.
+	 */
+	if (INTEL_GEN(i915) < 6)
+		return;
+
+	intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+
+	ggtt->invalidate(ggtt);
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
+{
+	ggtt_suspend_mappings(&i915->ggtt);
+}
+
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+	spin_lock_irq(&uncore->lock);
+	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
+	spin_unlock_irq(&uncore->lock);
+}
+
+static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+	/*
+	 * Note that as an uncached mmio write, this will flush the
+	 * WCB of the writes into the GGTT before it triggers the invalidate.
+	 */
+	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+}
+
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+
+	gen8_ggtt_invalidate(ggtt);
+
+	if (INTEL_GEN(i915) >= 12)
+		intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
+				      GEN12_GUC_TLB_INV_CR_INVALIDATE);
+	else
+		intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+	intel_gtt_chipset_flush();
+}
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+	writeq(pte, addr);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+				  dma_addr_t addr,
+				  u64 offset,
+				  enum i915_cache_level level,
+				  u32 unused)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+	gen8_pte_t __iomem *pte =
+		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+
+	ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+				     struct i915_vma *vma,
+				     enum i915_cache_level level,
+				     u32 flags)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+	struct sgt_iter sgt_iter;
+	gen8_pte_t __iomem *gtt_entries;
+	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+	dma_addr_t addr;
+
+	/*
+	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+	 * not to allow the user to override access to a read only page.
+	 */
+
+	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
+	gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
+	for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+		gen8_set_pte(gtt_entries++, pte_encode | addr);
+
+	/*
+	 * We want to flush the TLBs only after we're certain all the PTE
+	 * updates have finished.
+	 */
+	ggtt->invalidate(ggtt);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+				  dma_addr_t addr,
+				  u64 offset,
+				  enum i915_cache_level level,
+				  u32 flags)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+	gen6_pte_t __iomem *pte =
+		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+	iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+	ggtt->invalidate(ggtt);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+				     struct i915_vma *vma,
+				     enum i915_cache_level level,
+				     u32 flags)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+	gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
+	unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
+	struct sgt_iter iter;
+	dma_addr_t addr;
+
+	for_each_sgt_daddr(addr, iter, vma->pages)
+		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
+
+	/*
+	 * We want to flush the TLBs only after we're certain all the PTE
+	 * updates have finished.
+	 */
+	ggtt->invalidate(ggtt);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+			    u64 start, u64 length)
+{
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+				  u64 start, u64 length)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+	const gen8_pte_t scratch_pte = vm->scratch[0].encode;
+	gen8_pte_t __iomem *gtt_base =
+		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+	int i;
+
+	if (WARN(num_entries > max_entries,
+		 "First entry = %d; Num entries = %d (max=%d)\n",
+		 first_entry, num_entries, max_entries))
+		num_entries = max_entries;
+
+	for (i = 0; i < num_entries; i++)
+		gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+	/*
+	 * Make sure the internal GAM fifo has been cleared of all GTT
+	 * writes before exiting stop_machine(). This guarantees that
+	 * any aperture accesses waiting to start in another process
+	 * cannot back up behind the GTT writes causing a hang.
+	 * The register can be any arbitrary GAM register.
+	 */
+	intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+	struct i915_address_space *vm;
+	dma_addr_t addr;
+	u64 offset;
+	enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+	struct insert_page *arg = _arg;
+
+	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+	bxt_vtd_ggtt_wa(arg->vm);
+
+	return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+					  dma_addr_t addr,
+					  u64 offset,
+					  enum i915_cache_level level,
+					  u32 unused)
+{
+	struct insert_page arg = { vm, addr, offset, level };
+
+	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+	struct i915_address_space *vm;
+	struct i915_vma *vma;
+	enum i915_cache_level level;
+	u32 flags;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+	struct insert_entries *arg = _arg;
+
+	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
+	bxt_vtd_ggtt_wa(arg->vm);
+
+	return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+					     struct i915_vma *vma,
+					     enum i915_cache_level level,
+					     u32 flags)
+{
+	struct insert_entries arg = { vm, vma, level, flags };
+
+	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+	struct i915_address_space *vm;
+	u64 start;
+	u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+	struct clear_range *arg = _arg;
+
+	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+	bxt_vtd_ggtt_wa(arg->vm);
+
+	return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+					  u64 start,
+					  u64 length)
+{
+	struct clear_range arg = { vm, start, length };
+
+	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+				  u64 start, u64 length)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+	gen6_pte_t scratch_pte, __iomem *gtt_base =
+		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+	int i;
+
+	if (WARN(num_entries > max_entries,
+		 "First entry = %d; Num entries = %d (max=%d)\n",
+		 first_entry, num_entries, max_entries))
+		num_entries = max_entries;
+
+	scratch_pte = vm->scratch[0].encode;
+	for (i = 0; i < num_entries; i++)
+		iowrite32(scratch_pte, &gtt_base[i]);
+}
+
+static void i915_ggtt_insert_page(struct i915_address_space *vm,
+				  dma_addr_t addr,
+				  u64 offset,
+				  enum i915_cache_level cache_level,
+				  u32 unused)
+{
+	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
+				     struct i915_vma *vma,
+				     enum i915_cache_level cache_level,
+				     u32 unused)
+{
+	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
+				    flags);
+}
+
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
+				  u64 start, u64 length)
+{
+	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static int ggtt_bind_vma(struct i915_vma *vma,
+			 enum i915_cache_level cache_level,
+			 u32 flags)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	u32 pte_flags;
+
+	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+	pte_flags = 0;
+	if (i915_gem_object_is_readonly(obj))
+		pte_flags |= PTE_READ_ONLY;
+
+	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
+	/*
+	 * Without aliasing PPGTT there's no difference between
+	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+	 * upgrade to both bound if we bind either to avoid double-binding.
+	 */
+	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+
+	return 0;
+}
+
+static void ggtt_unbind_vma(struct i915_vma *vma)
+{
+	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+}
+
+static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+{
+	u64 size;
+	int ret;
+
+	if (!USES_GUC(ggtt->vm.i915))
+		return 0;
+
+	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
+	size = ggtt->vm.total - GUC_GGTT_TOP;
+
+	ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+				   PIN_NOEVICT);
+	if (ret)
+		DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+
+	return ret;
+}
+
+static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
+{
+	if (drm_mm_node_allocated(&ggtt->uc_fw))
+		drm_mm_remove_node(&ggtt->uc_fw);
+}
+
+static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
+{
+	ggtt_release_guc_top(ggtt);
+	if (drm_mm_node_allocated(&ggtt->error_capture))
+		drm_mm_remove_node(&ggtt->error_capture);
+	mutex_destroy(&ggtt->error_mutex);
+}
+
+static int init_ggtt(struct i915_ggtt *ggtt)
+{
+	/*
+	 * Let GEM Manage all of the aperture.
+	 *
+	 * However, leave one page at the end still bound to the scratch page.
+	 * There are a number of places where the hardware apparently prefetches
+	 * past the end of the object, and we've seen multiple hangs with the
+	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
+	 * aperture.  One page should be enough to keep any prefetching inside
+	 * of the aperture.
+	 */
+	unsigned long hole_start, hole_end;
+	struct drm_mm_node *entry;
+	int ret;
+
+	/*
+	 * GuC requires all resources that we're sharing with it to be placed in
+	 * non-WOPCM memory. If GuC is not present or not in use we still need a
+	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
+	 * why.
+	 */
+	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
+			       intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
+
+	ret = intel_vgt_balloon(ggtt);
+	if (ret)
+		return ret;
+
+	mutex_init(&ggtt->error_mutex);
+	if (ggtt->mappable_end) {
+		/* Reserve a mappable slot for our lockless error capture */
+		ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
+						  &ggtt->error_capture,
+						  PAGE_SIZE, 0,
+						  I915_COLOR_UNEVICTABLE,
+						  0, ggtt->mappable_end,
+						  DRM_MM_INSERT_LOW);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * The upper portion of the GuC address space has a sizeable hole
+	 * (several MB) that is inaccessible by GuC. Reserve this range within
+	 * GGTT as it can comfortably hold GuC/HuC firmware images.
+	 */
+	ret = ggtt_reserve_guc_top(ggtt);
+	if (ret)
+		goto err;
+
+	/* Clear any non-preallocated blocks */
+	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
+		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+			      hole_start, hole_end);
+		ggtt->vm.clear_range(&ggtt->vm, hole_start,
+				     hole_end - hole_start);
+	}
+
+	/* And finally clear the reserved guard page */
+	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
+
+	return 0;
+
+err:
+	cleanup_init_ggtt(ggtt);
+	return ret;
+}
+
+static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+				 enum i915_cache_level cache_level,
+				 u32 flags)
+{
+	u32 pte_flags;
+	int ret;
+
+	/* Currently applicable only to VLV */
+	pte_flags = 0;
+	if (i915_gem_object_is_readonly(vma->obj))
+		pte_flags |= PTE_READ_ONLY;
+
+	if (flags & I915_VMA_LOCAL_BIND) {
+		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
+
+		if (flags & I915_VMA_ALLOC) {
+			ret = alias->vm.allocate_va_range(&alias->vm,
+							  vma->node.start,
+							  vma->size);
+			if (ret)
+				return ret;
+
+			set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+		}
+
+		GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
+				     __i915_vma_flags(vma)));
+		alias->vm.insert_entries(&alias->vm, vma,
+					 cache_level, pte_flags);
+	}
+
+	if (flags & I915_VMA_GLOBAL_BIND)
+		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+	return 0;
+}
+
+static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
+{
+	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+		struct i915_address_space *vm = vma->vm;
+
+		vm->clear_range(vm, vma->node.start, vma->size);
+	}
+
+	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+		struct i915_address_space *vm =
+			&i915_vm_to_ggtt(vma->vm)->alias->vm;
+
+		vm->clear_range(vm, vma->node.start, vma->size);
+	}
+}
+
+static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+	struct i915_ppgtt *ppgtt;
+	int err;
+
+	ppgtt = i915_ppgtt_create(ggtt->vm.gt);
+	if (IS_ERR(ppgtt))
+		return PTR_ERR(ppgtt);
+
+	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
+		err = -ENODEV;
+		goto err_ppgtt;
+	}
+
+	/*
+	 * Note we only pre-allocate as far as the end of the global
+	 * GTT. On 48b / 4-level page-tables, the difference is very,
+	 * very significant! We have to preallocate as GVT/vgpu does
+	 * not like the page directory disappearing.
+	 */
+	err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+	if (err)
+		goto err_ppgtt;
+
+	ggtt->alias = ppgtt;
+	ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
+
+	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
+
+	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
+
+	return 0;
+
+err_ppgtt:
+	i915_vm_put(&ppgtt->vm);
+	return err;
+}
+
+static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+	struct i915_ppgtt *ppgtt;
+
+	ppgtt = fetch_and_zero(&ggtt->alias);
+	if (!ppgtt)
+		return;
+
+	i915_vm_put(&ppgtt->vm);
+
+	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
+	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+}
+
+int i915_init_ggtt(struct drm_i915_private *i915)
+{
+	int ret;
+
+	ret = init_ggtt(&i915->ggtt);
+	if (ret)
+		return ret;
+
+	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+		ret = init_aliasing_ppgtt(&i915->ggtt);
+		if (ret)
+			cleanup_init_ggtt(&i915->ggtt);
+	}
+
+	return 0;
+}
+
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
+{
+	struct i915_vma *vma, *vn;
+
+	atomic_set(&ggtt->vm.open, 0);
+
+	rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
+	flush_workqueue(ggtt->vm.i915->wq);
+
+	mutex_lock(&ggtt->vm.mutex);
+
+	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
+		WARN_ON(__i915_vma_unbind(vma));
+
+	if (drm_mm_node_allocated(&ggtt->error_capture))
+		drm_mm_remove_node(&ggtt->error_capture);
+	mutex_destroy(&ggtt->error_mutex);
+
+	ggtt_release_guc_top(ggtt);
+	intel_vgt_deballoon(ggtt);
+
+	ggtt->vm.cleanup(&ggtt->vm);
+
+	mutex_unlock(&ggtt->vm.mutex);
+	i915_address_space_fini(&ggtt->vm);
+
+	arch_phys_wc_del(ggtt->mtrr);
+
+	if (ggtt->iomap.size)
+		io_mapping_fini(&ggtt->iomap);
+}
+
+/**
+ * i915_ggtt_driver_release - Clean up GGTT hardware initialization
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_release(struct drm_i915_private *i915)
+{
+	struct pagevec *pvec;
+
+	fini_aliasing_ppgtt(&i915->ggtt);
+
+	ggtt_cleanup_hw(&i915->ggtt);
+
+	pvec = &i915->mm.wc_stash.pvec;
+	if (pvec->nr) {
+		set_pages_array_wb(pvec->pages, pvec->nr);
+		__pagevec_release(pvec);
+	}
+}
+
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+	return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+	if (bdw_gmch_ctl)
+		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+	if (bdw_gmch_ctl > 4)
+		bdw_gmch_ctl = 4;
+#endif
+
+	return bdw_gmch_ctl << 20;
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+	if (gmch_ctrl)
+		return 1 << (20 + gmch_ctrl);
+
+	return 0;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+	struct pci_dev *pdev = i915->drm.pdev;
+	phys_addr_t phys_addr;
+	int ret;
+
+	/* For Modern GENs the PTEs and register space are split in the BAR */
+	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
+
+	/*
+	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
+	 * will be dropped. For WC mappings in general we have 64 byte burst
+	 * writes when the WC buffer is flushed, so we can't use it, but have to
+	 * resort to an uncached mapping. The WC issue is easily caught by the
+	 * readback check when writing GTT PTE entries.
+	 */
+	if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+		ggtt->gsm = ioremap(phys_addr, size);
+	else
+		ggtt->gsm = ioremap_wc(phys_addr, size);
+	if (!ggtt->gsm) {
+		DRM_ERROR("Failed to map the ggtt page table\n");
+		return -ENOMEM;
+	}
+
+	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
+	if (ret) {
+		DRM_ERROR("Scratch setup failed\n");
+		/* iounmap will also get called at remove, but meh */
+		iounmap(ggtt->gsm);
+		return ret;
+	}
+
+	ggtt->vm.scratch[0].encode =
+		ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
+				    I915_CACHE_NONE, 0);
+
+	return 0;
+}
+
+int ggtt_set_pages(struct i915_vma *vma)
+{
+	int ret;
+
+	GEM_BUG_ON(vma->pages);
+
+	ret = i915_get_ggtt_vma_pages(vma);
+	if (ret)
+		return ret;
+
+	vma->page_sizes = vma->obj->mm.page_sizes;
+
+	return 0;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+	iounmap(ggtt->gsm);
+	cleanup_scratch_page(vm);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
+{
+	return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+					       pci_resource_len(pdev, bar));
+}
+
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
+{
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+	struct pci_dev *pdev = i915->drm.pdev;
+	unsigned int size;
+	u16 snb_gmch_ctl;
+	int err;
+
+	/* TODO: We're not aware of mappable constraints on gen8 yet */
+	if (!IS_DGFX(i915)) {
+		ggtt->gmadr = pci_resource(pdev, 2);
+		ggtt->mappable_end = resource_size(&ggtt->gmadr);
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+	if (err)
+		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+
+	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+	if (IS_CHERRYVIEW(i915))
+		size = chv_get_total_gtt_size(snb_gmch_ctl);
+	else
+		size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+	ggtt->vm.cleanup = gen6_gmch_remove;
+	ggtt->vm.insert_page = gen8_ggtt_insert_page;
+	ggtt->vm.clear_range = nop_clear_range;
+	if (intel_scanout_needs_vtd_wa(i915))
+		ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+	if (intel_ggtt_update_needs_vtd_wa(i915) ||
+	    IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
+		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+		if (ggtt->vm.clear_range != nop_clear_range)
+			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+	}
+
+	ggtt->invalidate = gen8_ggtt_invalidate;
+
+	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+	ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+	ggtt->vm.pte_encode = gen8_pte_encode;
+
+	setup_private_pat(ggtt->vm.gt->uncore);
+
+	return ggtt_probe_common(ggtt, size);
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
+{
+	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+	switch (level) {
+	case I915_CACHE_L3_LLC:
+	case I915_CACHE_LLC:
+		pte |= GEN6_PTE_CACHE_LLC;
+		break;
+	case I915_CACHE_NONE:
+		pte |= GEN6_PTE_UNCACHED;
+		break;
+	default:
+		MISSING_CASE(level);
+	}
+
+	return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
+{
+	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+	switch (level) {
+	case I915_CACHE_L3_LLC:
+		pte |= GEN7_PTE_CACHE_L3_LLC;
+		break;
+	case I915_CACHE_LLC:
+		pte |= GEN6_PTE_CACHE_LLC;
+		break;
+	case I915_CACHE_NONE:
+		pte |= GEN6_PTE_UNCACHED;
+		break;
+	default:
+		MISSING_CASE(level);
+	}
+
+	return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
+{
+	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+	if (!(flags & PTE_READ_ONLY))
+		pte |= BYT_PTE_WRITEABLE;
+
+	if (level != I915_CACHE_NONE)
+		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+	return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
+{
+	gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+	if (level != I915_CACHE_NONE)
+		pte |= HSW_WB_LLC_AGE3;
+
+	return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+			   enum i915_cache_level level,
+			   u32 flags)
+{
+	gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+	switch (level) {
+	case I915_CACHE_NONE:
+		break;
+	case I915_CACHE_WT:
+		pte |= HSW_WT_ELLC_LLC_AGE3;
+		break;
+	default:
+		pte |= HSW_WB_ELLC_LLC_AGE3;
+		break;
+	}
+
+	return pte;
+}
+
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
+{
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+	struct pci_dev *pdev = i915->drm.pdev;
+	unsigned int size;
+	u16 snb_gmch_ctl;
+	int err;
+
+	ggtt->gmadr = pci_resource(pdev, 2);
+	ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+	/*
+	 * 64/512MB is the current min/max we actually know of, but this is
+	 * just a coarse sanity check.
+	 */
+	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
+		DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
+		return -ENXIO;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+	if (err)
+		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+	size = gen6_get_total_gtt_size(snb_gmch_ctl);
+	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+	ggtt->vm.clear_range = nop_clear_range;
+	if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+		ggtt->vm.clear_range = gen6_ggtt_clear_range;
+	ggtt->vm.insert_page = gen6_ggtt_insert_page;
+	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+	ggtt->vm.cleanup = gen6_gmch_remove;
+
+	ggtt->invalidate = gen6_ggtt_invalidate;
+
+	if (HAS_EDRAM(i915))
+		ggtt->vm.pte_encode = iris_pte_encode;
+	else if (IS_HASWELL(i915))
+		ggtt->vm.pte_encode = hsw_pte_encode;
+	else if (IS_VALLEYVIEW(i915))
+		ggtt->vm.pte_encode = byt_pte_encode;
+	else if (INTEL_GEN(i915) >= 7)
+		ggtt->vm.pte_encode = ivb_pte_encode;
+	else
+		ggtt->vm.pte_encode = snb_pte_encode;
+
+	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+	ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+	return ggtt_probe_common(ggtt, size);
+}
+
+static void i915_gmch_remove(struct i915_address_space *vm)
+{
+	intel_gmch_remove();
+}
+
+static int i915_gmch_probe(struct i915_ggtt *ggtt)
+{
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+	phys_addr_t gmadr_base;
+	int ret;
+
+	ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
+	if (!ret) {
+		DRM_ERROR("failed to set up gmch\n");
+		return -EIO;
+	}
+
+	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+	ggtt->gmadr =
+		(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+	ggtt->do_idle_maps = needs_idle_maps(i915);
+	ggtt->vm.insert_page = i915_ggtt_insert_page;
+	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+	ggtt->vm.clear_range = i915_ggtt_clear_range;
+	ggtt->vm.cleanup = i915_gmch_remove;
+
+	ggtt->invalidate = gmch_ggtt_invalidate;
+
+	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+	ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+	if (unlikely(ggtt->do_idle_maps))
+		dev_notice(i915->drm.dev,
+			   "Applying Ironlake quirks for intel_iommu\n");
+
+	return 0;
+}
+
+static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	int ret;
+
+	ggtt->vm.gt = gt;
+	ggtt->vm.i915 = i915;
+	ggtt->vm.dma = &i915->drm.pdev->dev;
+
+	if (INTEL_GEN(i915) <= 5)
+		ret = i915_gmch_probe(ggtt);
+	else if (INTEL_GEN(i915) < 8)
+		ret = gen6_gmch_probe(ggtt);
+	else
+		ret = gen8_gmch_probe(ggtt);
+	if (ret)
+		return ret;
+
+	if ((ggtt->vm.total - 1) >> 32) {
+		DRM_ERROR("We never expected a Global GTT with more than 32bits"
+			  " of address space! Found %lldM!\n",
+			  ggtt->vm.total >> 20);
+		ggtt->vm.total = 1ULL << 32;
+		ggtt->mappable_end =
+			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
+	}
+
+	if (ggtt->mappable_end > ggtt->vm.total) {
+		DRM_ERROR("mappable aperture extends past end of GGTT,"
+			  " aperture=%pa, total=%llx\n",
+			  &ggtt->mappable_end, ggtt->vm.total);
+		ggtt->mappable_end = ggtt->vm.total;
+	}
+
+	/* GMADR is the PCI mmio aperture into the global GTT. */
+	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
+	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
+	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
+			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+
+	return 0;
+}
+
+/**
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @i915: i915 device
+ */
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
+{
+	int ret;
+
+	ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+	if (ret)
+		return ret;
+
+	if (intel_vtd_active())
+		dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+
+	return 0;
+}
+
+int i915_ggtt_enable_hw(struct drm_i915_private *i915)
+{
+	if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
+		return -EIO;
+
+	return 0;
+}
+
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
+{
+	GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
+
+	ggtt->invalidate = guc_ggtt_invalidate;
+
+	ggtt->invalidate(ggtt);
+}
+
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
+{
+	/* XXX Temporary pardon for error unload */
+	if (ggtt->invalidate == gen8_ggtt_invalidate)
+		return;
+
+	/* We should only be called after i915_ggtt_enable_guc() */
+	GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
+
+	ggtt->invalidate = gen8_ggtt_invalidate;
+
+	ggtt->invalidate(ggtt);
+}
+
+static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
+{
+	struct i915_vma *vma;
+	bool flush = false;
+	int open;
+
+	intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+	mutex_lock(&ggtt->vm.mutex);
+
+	/* First fill our portion of the GTT with scratch pages */
+	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+
+	/* Skip rewriting PTE on VMA unbind. */
+	open = atomic_xchg(&ggtt->vm.open, 0);
+
+	/* clflush objects bound into the GGTT and rebind them. */
+	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
+		struct drm_i915_gem_object *obj = vma->obj;
+
+		if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+			continue;
+
+		clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
+		WARN_ON(i915_vma_bind(vma,
+				      obj ? obj->cache_level : 0,
+				      PIN_GLOBAL, NULL));
+		if (obj) { /* only used during resume => exclusive access */
+			flush |= fetch_and_zero(&obj->write_domain);
+			obj->read_domains |= I915_GEM_DOMAIN_GTT;
+		}
+	}
+
+	atomic_set(&ggtt->vm.open, open);
+	ggtt->invalidate(ggtt);
+
+	mutex_unlock(&ggtt->vm.mutex);
+
+	if (flush)
+		wbinvd_on_all_cpus();
+}
+
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
+{
+	struct i915_ggtt *ggtt = &i915->ggtt;
+
+	ggtt_restore_mappings(ggtt);
+
+	if (INTEL_GEN(i915) >= 8)
+		setup_private_pat(ggtt->vm.gt->uncore);
+}
+
+static struct scatterlist *
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+	     unsigned int width, unsigned int height,
+	     unsigned int stride,
+	     struct sg_table *st, struct scatterlist *sg)
+{
+	unsigned int column, row;
+	unsigned int src_idx;
+
+	for (column = 0; column < width; column++) {
+		src_idx = stride * (height - 1) + column + offset;
+		for (row = 0; row < height; row++) {
+			st->nents++;
+			/*
+			 * We don't need the pages, but need to initialize
+			 * the entries so the sg list can be happily traversed.
+			 * The only thing we need are DMA addresses.
+			 */
+			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
+			sg_dma_address(sg) =
+				i915_gem_object_get_dma_address(obj, src_idx);
+			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
+			sg = sg_next(sg);
+			src_idx -= stride;
+		}
+	}
+
+	return sg;
+}
+
+static noinline struct sg_table *
+intel_rotate_pages(struct intel_rotation_info *rot_info,
+		   struct drm_i915_gem_object *obj)
+{
+	unsigned int size = intel_rotation_info_size(rot_info);
+	struct sg_table *st;
+	struct scatterlist *sg;
+	int ret = -ENOMEM;
+	int i;
+
+	/* Allocate target SG list. */
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (!st)
+		goto err_st_alloc;
+
+	ret = sg_alloc_table(st, size, GFP_KERNEL);
+	if (ret)
+		goto err_sg_alloc;
+
+	st->nents = 0;
+	sg = st->sgl;
+
+	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+		sg = rotate_pages(obj, rot_info->plane[i].offset,
+				  rot_info->plane[i].width, rot_info->plane[i].height,
+				  rot_info->plane[i].stride, st, sg);
+	}
+
+	return st;
+
+err_sg_alloc:
+	kfree(st);
+err_st_alloc:
+
+	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+
+	return ERR_PTR(ret);
+}
+
+static struct scatterlist *
+remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+	    unsigned int width, unsigned int height,
+	    unsigned int stride,
+	    struct sg_table *st, struct scatterlist *sg)
+{
+	unsigned int row;
+
+	for (row = 0; row < height; row++) {
+		unsigned int left = width * I915_GTT_PAGE_SIZE;
+
+		while (left) {
+			dma_addr_t addr;
+			unsigned int length;
+
+			/*
+			 * We don't need the pages, but need to initialize
+			 * the entries so the sg list can be happily traversed.
+			 * The only thing we need are DMA addresses.
+			 */
+
+			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
+
+			length = min(left, length);
+
+			st->nents++;
+
+			sg_set_page(sg, NULL, length, 0);
+			sg_dma_address(sg) = addr;
+			sg_dma_len(sg) = length;
+			sg = sg_next(sg);
+
+			offset += length / I915_GTT_PAGE_SIZE;
+			left -= length;
+		}
+
+		offset += stride - width;
+	}
+
+	return sg;
+}
+
+static noinline struct sg_table *
+intel_remap_pages(struct intel_remapped_info *rem_info,
+		  struct drm_i915_gem_object *obj)
+{
+	unsigned int size = intel_remapped_info_size(rem_info);
+	struct sg_table *st;
+	struct scatterlist *sg;
+	int ret = -ENOMEM;
+	int i;
+
+	/* Allocate target SG list. */
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (!st)
+		goto err_st_alloc;
+
+	ret = sg_alloc_table(st, size, GFP_KERNEL);
+	if (ret)
+		goto err_sg_alloc;
+
+	st->nents = 0;
+	sg = st->sgl;
+
+	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+		sg = remap_pages(obj, rem_info->plane[i].offset,
+				 rem_info->plane[i].width, rem_info->plane[i].height,
+				 rem_info->plane[i].stride, st, sg);
+	}
+
+	i915_sg_trim(st);
+
+	return st;
+
+err_sg_alloc:
+	kfree(st);
+err_st_alloc:
+
+	DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+			 obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+
+	return ERR_PTR(ret);
+}
+
+static noinline struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+		    struct drm_i915_gem_object *obj)
+{
+	struct sg_table *st;
+	struct scatterlist *sg, *iter;
+	unsigned int count = view->partial.size;
+	unsigned int offset;
+	int ret = -ENOMEM;
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (!st)
+		goto err_st_alloc;
+
+	ret = sg_alloc_table(st, count, GFP_KERNEL);
+	if (ret)
+		goto err_sg_alloc;
+
+	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
+	GEM_BUG_ON(!iter);
+
+	sg = st->sgl;
+	st->nents = 0;
+	do {
+		unsigned int len;
+
+		len = min(iter->length - (offset << PAGE_SHIFT),
+			  count << PAGE_SHIFT);
+		sg_set_page(sg, NULL, len, 0);
+		sg_dma_address(sg) =
+			sg_dma_address(iter) + (offset << PAGE_SHIFT);
+		sg_dma_len(sg) = len;
+
+		st->nents++;
+		count -= len >> PAGE_SHIFT;
+		if (count == 0) {
+			sg_mark_end(sg);
+			i915_sg_trim(st); /* Drop any unused tail entries. */
+
+			return st;
+		}
+
+		sg = __sg_next(sg);
+		iter = __sg_next(iter);
+		offset = 0;
+	} while (1);
+
+err_sg_alloc:
+	kfree(st);
+err_st_alloc:
+	return ERR_PTR(ret);
+}
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
+{
+	int ret;
+
+	/*
+	 * The vma->pages are only valid within the lifespan of the borrowed
+	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+	 * must be the vma->pages. A simple rule is that vma->pages must only
+	 * be accessed when the obj->mm.pages are pinned.
+	 */
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
+	switch (vma->ggtt_view.type) {
+	default:
+		GEM_BUG_ON(vma->ggtt_view.type);
+		/* fall through */
+	case I915_GGTT_VIEW_NORMAL:
+		vma->pages = vma->obj->mm.pages;
+		return 0;
+
+	case I915_GGTT_VIEW_ROTATED:
+		vma->pages =
+			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+		break;
+
+	case I915_GGTT_VIEW_REMAPPED:
+		vma->pages =
+			intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+		break;
+
+	case I915_GGTT_VIEW_PARTIAL:
+		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+		break;
+	}
+
+	ret = 0;
+	if (IS_ERR(vma->pages)) {
+		ret = PTR_ERR(vma->pages);
+		vma->pages = NULL;
+		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+			  vma->ggtt_view.type, ret);
+	}
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 4294f14..51b8718 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -7,6 +7,8 @@
 #ifndef _INTEL_GPU_COMMANDS_H_
 #define _INTEL_GPU_COMMANDS_H_
 
+#include <linux/bitops.h>
+
 /*
  * Target address alignments required for GPU access e.g.
  * MI_STORE_DWORD_IMM.
@@ -319,4 +321,31 @@
 #define COLOR_BLT     ((0x2<<29)|(0x40<<22))
 #define SRC_COPY_BLT  ((0x2<<29)|(0x43<<22))
 
+/*
+ * Used to convert any address to canonical form.
+ * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
+ * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
+ * addresses to be in a canonical form:
+ * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
+ * canonical form [63:48] == [47]."
+ */
+#define GEN8_HIGH_ADDRESS_BIT 47
+static inline u64 gen8_canonical_addr(u64 address)
+{
+	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
+}
+
+static inline u64 gen8_noncanonical_addr(u64 address)
+{
+	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
+}
+
+static inline u32 *__gen6_emit_bb_start(u32 *cs, u32 addr, unsigned int flags)
+{
+	*cs++ = MI_BATCH_BUFFER_START | flags;
+	*cs++ = addr;
+
+	return cs;
+}
+
 #endif /* _INTEL_GPU_COMMANDS_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 4c26daf..da2b6e2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,12 +3,15 @@
  * Copyright © 2019 Intel Corporation
  */
 
+#include "debugfs_gt.h"
 #include "i915_drv.h"
+#include "intel_context.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
 #include "intel_mocs.h"
 #include "intel_rc6.h"
+#include "intel_renderstate.h"
 #include "intel_rps.h"
 #include "intel_uncore.h"
 #include "intel_pm.h"
@@ -25,6 +28,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 
 	intel_gt_init_reset(gt);
 	intel_gt_init_requests(gt);
+	intel_gt_init_timelines(gt);
 	intel_gt_pm_init_early(gt);
 
 	intel_rps_init_early(&gt->rps);
@@ -34,8 +38,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
 {
 	gt->ggtt = ggtt;
-
-	intel_gt_sanitize(gt, false);
 }
 
 static void init_unused_ring(struct intel_gt *gt, u32 base)
@@ -73,11 +75,6 @@ int intel_gt_init_hw(struct intel_gt *gt)
 	struct intel_uncore *uncore = gt->uncore;
 	int ret;
 
-	BUG_ON(!i915->kernel_context);
-	ret = intel_gt_terminally_wedged(gt);
-	if (ret)
-		return ret;
-
 	gt->last_init_time = ktime_get();
 
 	/* Double layer security blanket, see i915_gem_init() */
@@ -303,7 +300,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
 
 	intel_gt_chipset_flush(gt);
 
-	with_intel_runtime_pm(uncore->rpm, wakeref) {
+	with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
 		unsigned long flags;
 
 		spin_lock_irqsave(&uncore->lock, flags);
@@ -323,6 +320,8 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
 void intel_gt_driver_register(struct intel_gt *gt)
 {
 	intel_rps_driver_register(&gt->rps);
+
+	debugfs_gt_register(gt);
 }
 
 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
@@ -364,22 +363,272 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
 	i915_vma_unpin_and_release(&gt->scratch, 0);
 }
 
+static struct i915_address_space *kernel_vm(struct intel_gt *gt)
+{
+	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
+		return &i915_ppgtt_create(gt)->vm;
+	else
+		return i915_vm_get(&gt->ggtt->vm);
+}
+
+static int __intel_context_flush_retire(struct intel_context *ce)
+{
+	struct intel_timeline *tl;
+
+	tl = intel_context_timeline_lock(ce);
+	if (IS_ERR(tl))
+		return PTR_ERR(tl);
+
+	intel_context_timeline_unlock(tl);
+	return 0;
+}
+
+static int __engines_record_defaults(struct intel_gt *gt)
+{
+	struct i915_request *requests[I915_NUM_ENGINES] = {};
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/*
+	 * As we reset the gpu during very early sanitisation, the current
+	 * register state on the GPU should reflect its defaults values.
+	 * We load a context onto the hw (with restore-inhibit), then switch
+	 * over to a second context to save that default register state. We
+	 * can then prime every new context with that state so they all start
+	 * from the same default HW values.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		struct intel_renderstate so;
+		struct intel_context *ce;
+		struct i915_request *rq;
+
+		/* We must be able to switch to something! */
+		GEM_BUG_ON(!engine->kernel_context);
+
+		err = intel_renderstate_init(&so, engine);
+		if (err)
+			goto out;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = intel_context_create_request(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			intel_context_put(ce);
+			goto out;
+		}
+
+		err = intel_engine_emit_ctx_wa(rq);
+		if (err)
+			goto err_rq;
+
+		err = intel_renderstate_emit(&so, rq);
+		if (err)
+			goto err_rq;
+
+err_rq:
+		requests[id] = i915_request_get(rq);
+		i915_request_add(rq);
+		intel_renderstate_fini(&so);
+		if (err)
+			goto out;
+	}
+
+	/* Flush the default context image to memory, and enable powersaving. */
+	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+		err = -EIO;
+		goto out;
+	}
+
+	for (id = 0; id < ARRAY_SIZE(requests); id++) {
+		struct i915_request *rq;
+		struct i915_vma *state;
+		void *vaddr;
+
+		rq = requests[id];
+		if (!rq)
+			continue;
+
+		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
+		state = rq->context->state;
+		if (!state)
+			continue;
+
+		/* Serialise with retirement on another CPU */
+		GEM_BUG_ON(!i915_request_completed(rq));
+		err = __intel_context_flush_retire(rq->context);
+		if (err)
+			goto out;
+
+		/* We want to be able to unbind the state from the GGTT */
+		GEM_BUG_ON(intel_context_is_pinned(rq->context));
+
+		/*
+		 * As we will hold a reference to the logical state, it will
+		 * not be torn down with the context, and importantly the
+		 * object will hold onto its vma (making it possible for a
+		 * stray GTT write to corrupt our defaults). Unmap the vma
+		 * from the GTT to prevent such accidents and reclaim the
+		 * space.
+		 */
+		err = i915_vma_unbind(state);
+		if (err)
+			goto out;
+
+		i915_gem_object_lock(state->obj);
+		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+		i915_gem_object_unlock(state->obj);
+		if (err)
+			goto out;
+
+		i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
+
+		/* Check we can acquire the image of the context state */
+		vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
+		if (IS_ERR(vaddr)) {
+			err = PTR_ERR(vaddr);
+			goto out;
+		}
+
+		rq->engine->default_state = i915_gem_object_get(state->obj);
+		i915_gem_object_unpin_map(state->obj);
+	}
+
+out:
+	/*
+	 * If we have to abandon now, we expect the engines to be idle
+	 * and ready to be torn-down. The quickest way we can accomplish
+	 * this is by declaring ourselves wedged.
+	 */
+	if (err)
+		intel_gt_set_wedged(gt);
+
+	for (id = 0; id < ARRAY_SIZE(requests); id++) {
+		struct intel_context *ce;
+		struct i915_request *rq;
+
+		rq = requests[id];
+		if (!rq)
+			continue;
+
+		ce = rq->context;
+		i915_request_put(rq);
+		intel_context_put(ce);
+	}
+	return err;
+}
+
+static int __engines_verify_workarounds(struct intel_gt *gt)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		if (intel_engine_verify_workarounds(engine, "load"))
+			err = -EIO;
+	}
+
+	return err;
+}
+
+static void __intel_gt_disable(struct intel_gt *gt)
+{
+	intel_gt_set_wedged_on_init(gt);
+
+	intel_gt_suspend_prepare(gt);
+	intel_gt_suspend_late(gt);
+
+	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+}
+
 int intel_gt_init(struct intel_gt *gt)
 {
 	int err;
 
-	err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+	err = i915_inject_probe_error(gt->i915, -ENODEV);
 	if (err)
 		return err;
 
+	/*
+	 * This is just a security blanket to placate dragons.
+	 * On some systems, we very sporadically observe that the first TLBs
+	 * used by the CS may be stale, despite us poking the TLB reset. If
+	 * we hold the forcewake during initialisation these problems
+	 * just magically go away.
+	 */
+	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+	err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+	if (err)
+		goto out_fw;
+
 	intel_gt_pm_init(gt);
 
-	return 0;
+	gt->vm = kernel_vm(gt);
+	if (!gt->vm) {
+		err = -ENOMEM;
+		goto err_pm;
+	}
+
+	err = intel_engines_init(gt);
+	if (err)
+		goto err_engines;
+
+	intel_uc_init(&gt->uc);
+
+	err = intel_gt_resume(gt);
+	if (err)
+		goto err_uc_init;
+
+	err = __engines_record_defaults(gt);
+	if (err)
+		goto err_gt;
+
+	err = __engines_verify_workarounds(gt);
+	if (err)
+		goto err_gt;
+
+	err = i915_inject_probe_error(gt->i915, -EIO);
+	if (err)
+		goto err_gt;
+
+	goto out_fw;
+err_gt:
+	__intel_gt_disable(gt);
+	intel_uc_fini_hw(&gt->uc);
+err_uc_init:
+	intel_uc_fini(&gt->uc);
+err_engines:
+	intel_engines_release(gt);
+	i915_vm_put(fetch_and_zero(&gt->vm));
+err_pm:
+	intel_gt_pm_fini(gt);
+	intel_gt_fini_scratch(gt);
+out_fw:
+	if (err)
+		intel_gt_set_wedged_on_init(gt);
+	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+	return err;
 }
 
 void intel_gt_driver_remove(struct intel_gt *gt)
 {
-	GEM_BUG_ON(gt->awake);
+	__intel_gt_disable(gt);
+
+	intel_uc_fini_hw(&gt->uc);
+	intel_uc_fini(&gt->uc);
+
+	intel_engines_release(gt);
 }
 
 void intel_gt_driver_unregister(struct intel_gt *gt)
@@ -389,6 +638,12 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
 
 void intel_gt_driver_release(struct intel_gt *gt)
 {
+	struct i915_address_space *vm;
+
+	vm = fetch_and_zero(&gt->vm);
+	if (vm) /* FIXME being called twice on error paths :( */
+		i915_vm_put(vm);
+
 	intel_gt_pm_fini(gt);
 	intel_gt_fini_scratch(gt);
 }
@@ -396,5 +651,8 @@ void intel_gt_driver_release(struct intel_gt *gt)
 void intel_gt_driver_late_release(struct intel_gt *gt)
 {
 	intel_uc_driver_late_release(&gt->uc);
+	intel_gt_fini_requests(gt);
 	intel_gt_fini_reset(gt);
+	intel_gt_fini_timelines(gt);
+	intel_engines_free(gt);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 5436f8c..1dac441 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -12,6 +12,12 @@
 
 struct drm_i915_private;
 
+#define GT_TRACE(gt, fmt, ...) do {					\
+	const struct intel_gt *gt__ __maybe_unused = (gt);		\
+	GEM_TRACE("%s  " fmt, dev_name(gt__->i915->drm.dev),		\
+		  ##__VA_ARGS__);					\
+} while (0)
+
 static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
 {
 	return container_of(uc, struct intel_gt, uc);
@@ -52,9 +58,14 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
 	return i915_ggtt_offset(gt->scratch) + field;
 }
 
-static inline bool intel_gt_is_wedged(struct intel_gt *gt)
+static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
 {
 	return __intel_reset_failed(&gt->reset);
 }
 
+static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
+{
+	return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+}
+
 #endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 973ee7e..f796bdf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -28,7 +28,7 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 		tasklet = true;
 
 	if (iir & GT_RENDER_USER_INTERRUPT) {
-		intel_engine_breadcrumbs_irq(engine);
+		intel_engine_signal_breadcrumbs(engine);
 		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
 	}
 
@@ -245,9 +245,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
 {
 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+		intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+		intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
 }
 
 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
@@ -271,11 +271,11 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
 {
 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+		intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
 	if (gt_iir & GT_BSD_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+		intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
 	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]);
+		intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
 
 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
 		      GT_BSD_CS_ERROR_INTERRUPT |
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 7e64b7d..d1c2f03 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -43,7 +43,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
 	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
 	struct drm_i915_private *i915 = gt->i915;
 
-	GEM_TRACE("\n");
+	GT_TRACE(gt, "\n");
 
 	i915_globals_unpark();
 
@@ -61,9 +61,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
 	gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
 	GEM_BUG_ON(!gt->awake);
 
-	if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
-		intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
+	intel_rc6_unpark(&gt->rc6);
 	intel_rps_unpark(&gt->rps);
 	i915_pmu_gt_unparked(i915);
 
@@ -78,22 +76,18 @@ static int __gt_park(struct intel_wakeref *wf)
 	intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
 	struct drm_i915_private *i915 = gt->i915;
 
-	GEM_TRACE("\n");
+	GT_TRACE(gt, "\n");
 
 	intel_gt_park_requests(gt);
 
 	i915_vma_parked(gt);
 	i915_pmu_gt_parked(i915);
 	intel_rps_park(&gt->rps);
+	intel_rc6_park(&gt->rc6);
 
 	/* Everything switched off, flush any residual interrupt just in case */
 	intel_synchronize_irq(i915);
 
-	if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
-		intel_rc6_ctx_wa_check(&i915->gt.rc6);
-		intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
-	}
-
 	/* Defer dropping the display power well for 100ms, it's slow! */
 	GEM_BUG_ON(!wakeref);
 	intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
@@ -132,23 +126,13 @@ static bool reset_engines(struct intel_gt *gt)
 	return __intel_gt_reset(gt, ALL_ENGINES) == 0;
 }
 
-/**
- * intel_gt_sanitize: called after the GPU has lost power
- * @gt: the i915 GT container
- * @force: ignore a failed reset and sanitize engine state anyway
- *
- * Anytime we reset the GPU, either with an explicit GPU reset or through a
- * PCI power cycle, the GPU loses state and we must reset our state tracking
- * to match. Note that calling intel_gt_sanitize() if the GPU has not
- * been reset results in much confusion!
- */
-void intel_gt_sanitize(struct intel_gt *gt, bool force)
+static void gt_sanitize(struct intel_gt *gt, bool force)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
 
-	GEM_TRACE("force:%s\n", yesno(force));
+	GT_TRACE(gt, "force:%s", yesno(force));
 
 	/* Use a raw wakeref to avoid calling intel_display_power_get early */
 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -193,9 +177,13 @@ int intel_gt_resume(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-	int err = 0;
+	int err;
 
-	GEM_TRACE("\n");
+	err = intel_gt_has_init_error(gt);
+	if (err)
+		return err;
+
+	GT_TRACE(gt, "\n");
 
 	/*
 	 * After resume, we may need to poke into the pinned kernel
@@ -207,21 +195,26 @@ int intel_gt_resume(struct intel_gt *gt)
 
 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
 	intel_rc6_sanitize(&gt->rc6);
+	gt_sanitize(gt, true);
+	if (intel_gt_is_wedged(gt)) {
+		err = -EIO;
+		goto out_fw;
+	}
+
+	/* Only when the HW is re-initialised, can we replay the requests */
+	err = intel_gt_init_hw(gt);
+	if (err) {
+		dev_err(gt->i915->drm.dev,
+			"Failed to initialize GPU, declaring it wedged!\n");
+		goto err_wedged;
+	}
 
 	intel_rps_enable(&gt->rps);
 	intel_llc_enable(&gt->llc);
 
 	for_each_engine(engine, gt, id) {
-		struct intel_context *ce;
-
 		intel_engine_pm_get(engine);
 
-		ce = engine->kernel_context;
-		if (ce) {
-			GEM_BUG_ON(!intel_context_is_pinned(ce));
-			ce->ops->reset(ce);
-		}
-
 		engine->serial++; /* kernel context lost */
 		err = engine->resume(engine);
 
@@ -230,7 +223,7 @@ int intel_gt_resume(struct intel_gt *gt)
 			dev_err(gt->i915->drm.dev,
 				"Failed to restart %s (%d)\n",
 				engine->name, err);
-			break;
+			goto err_wedged;
 		}
 	}
 
@@ -240,10 +233,14 @@ int intel_gt_resume(struct intel_gt *gt)
 
 	user_forcewake(gt, false);
 
+out_fw:
 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
 	intel_gt_pm_put(gt);
-
 	return err;
+
+err_wedged:
+	intel_gt_set_wedged(gt);
+	goto out_fw;
 }
 
 static void wait_for_suspend(struct intel_gt *gt)
@@ -257,6 +254,7 @@ static void wait_for_suspend(struct intel_gt *gt)
 		 * the gpu quiet.
 		 */
 		intel_gt_set_wedged(gt);
+		intel_gt_retire_requests(gt);
 	}
 
 	intel_gt_pm_wait_for_idle(gt);
@@ -286,6 +284,11 @@ void intel_gt_suspend_late(struct intel_gt *gt)
 	/* We expect to be idle already; but also want to be independent */
 	wait_for_suspend(gt);
 
+	if (is_mock_gt(gt))
+		return;
+
+	GEM_BUG_ON(gt->awake);
+
 	/*
 	 * On disabling the device, we want to turn off HW access to memory
 	 * that we no longer own.
@@ -305,22 +308,21 @@ void intel_gt_suspend_late(struct intel_gt *gt)
 		intel_llc_disable(&gt->llc);
 	}
 
-	intel_gt_sanitize(gt, false);
+	gt_sanitize(gt, false);
 
-	GEM_TRACE("\n");
+	GT_TRACE(gt, "\n");
 }
 
 void intel_gt_runtime_suspend(struct intel_gt *gt)
 {
 	intel_uc_runtime_suspend(&gt->uc);
 
-	GEM_TRACE("\n");
+	GT_TRACE(gt, "\n");
 }
 
 int intel_gt_runtime_resume(struct intel_gt *gt)
 {
-	GEM_TRACE("\n");
-
+	GT_TRACE(gt, "\n");
 	intel_gt_init_swizzling(gt);
 
 	return intel_uc_runtime_resume(&gt->uc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 990efc2..60f0e2f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -22,6 +22,11 @@ static inline void intel_gt_pm_get(struct intel_gt *gt)
 	intel_wakeref_get(&gt->wakeref);
 }
 
+static inline void __intel_gt_pm_get(struct intel_gt *gt)
+{
+	__intel_wakeref_get(&gt->wakeref);
+}
+
 static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
 {
 	return intel_wakeref_get_if_active(&gt->wakeref);
@@ -46,8 +51,6 @@ void intel_gt_pm_init_early(struct intel_gt *gt);
 void intel_gt_pm_init(struct intel_gt *gt);
 void intel_gt_pm_fini(struct intel_gt *gt);
 
-void intel_gt_sanitize(struct intel_gt *gt, bool force);
-
 void intel_gt_suspend_prepare(struct intel_gt *gt);
 void intel_gt_suspend_late(struct intel_gt *gt);
 int intel_gt_resume(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 3dc13ec..7ef1d37 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -8,27 +8,40 @@
 
 #include "i915_drv.h" /* for_each_engine() */
 #include "i915_request.h"
+#include "intel_engine_heartbeat.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
 #include "intel_timeline.h"
 
-static void retire_requests(struct intel_timeline *tl)
+static bool retire_requests(struct intel_timeline *tl)
 {
 	struct i915_request *rq, *rn;
 
 	list_for_each_entry_safe(rq, rn, &tl->requests, link)
 		if (!i915_request_retire(rq))
-			break;
+			return false;
+
+	/* And check nothing new was submitted */
+	return !i915_active_fence_isset(&tl->last_request);
 }
 
-static void flush_submission(struct intel_gt *gt)
+static bool flush_submission(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
+	bool active = false;
 
-	for_each_engine(engine, gt, id)
+	if (!intel_gt_pm_is_awake(gt))
+		return false;
+
+	for_each_engine(engine, gt, id) {
 		intel_engine_flush_submission(engine);
+		active |= flush_work(&engine->retire_work);
+		active |= flush_work(&engine->wakeref.work);
+	}
+
+	return active;
 }
 
 static void engine_retire(struct work_struct *work)
@@ -62,19 +75,16 @@ static void engine_retire(struct work_struct *work)
 static bool add_retire(struct intel_engine_cs *engine,
 		       struct intel_timeline *tl)
 {
+#define STUB ((struct intel_timeline *)1)
 	struct intel_timeline *first;
 
 	/*
 	 * We open-code a llist here to include the additional tag [BIT(0)]
 	 * so that we know when the timeline is already on a
 	 * retirement queue: either this engine or another.
-	 *
-	 * However, we rely on that a timeline can only be active on a single
-	 * engine at any one time and that add_retire() is called before the
-	 * engine releases the timeline and transferred to another to retire.
 	 */
 
-	if (READ_ONCE(tl->retire)) /* already queued */
+	if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
 		return false;
 
 	intel_timeline_get(tl);
@@ -109,7 +119,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 	struct intel_gt_timelines *timelines = &gt->timelines;
 	struct intel_timeline *tl, *tn;
 	unsigned long active_count = 0;
-	unsigned long flags;
 	bool interruptible;
 	LIST_HEAD(free);
 
@@ -118,8 +127,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 		timeout = -timeout, interruptible = false;
 
 	flush_submission(gt); /* kick the ksoftirqd tasklets */
-
-	spin_lock_irqsave(&timelines->lock, flags);
+	spin_lock(&timelines->lock);
 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
 		if (!mutex_trylock(&tl->mutex)) {
 			active_count++; /* report busy to caller, try again? */
@@ -129,7 +137,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 		intel_timeline_get(tl);
 		GEM_BUG_ON(!atomic_read(&tl->active_count));
 		atomic_inc(&tl->active_count); /* pin the list element */
-		spin_unlock_irqrestore(&timelines->lock, flags);
+		spin_unlock(&timelines->lock);
 
 		if (timeout > 0) {
 			struct dma_fence *fence;
@@ -143,16 +151,15 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 			}
 		}
 
-		retire_requests(tl);
+		if (!retire_requests(tl) || flush_submission(gt))
+			active_count++;
 
-		spin_lock_irqsave(&timelines->lock, flags);
+		spin_lock(&timelines->lock);
 
 		/* Resume iteration after dropping lock */
 		list_safe_reset_next(tl, tn, link);
 		if (atomic_dec_and_test(&tl->active_count))
 			list_del(&tl->link);
-		else
-			active_count += !!rcu_access_pointer(tl->last_request.fence);
 
 		mutex_unlock(&tl->mutex);
 
@@ -162,7 +169,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 			list_add(&tl->link, &free);
 		}
 	}
-	spin_unlock_irqrestore(&timelines->lock, flags);
+	spin_unlock(&timelines->lock);
 
 	list_for_each_entry_safe(tl, tn, &free, link)
 		__intel_timeline_free(&tl->kref);
@@ -190,9 +197,9 @@ static void retire_work_handler(struct work_struct *work)
 	struct intel_gt *gt =
 		container_of(work, typeof(*gt), requests.retire_work.work);
 
-	intel_gt_retire_requests(gt);
 	schedule_delayed_work(&gt->requests.retire_work,
 			      round_jiffies_up_relative(HZ));
+	intel_gt_retire_requests(gt);
 }
 
 void intel_gt_init_requests(struct intel_gt *gt)
@@ -210,3 +217,9 @@ void intel_gt_unpark_requests(struct intel_gt *gt)
 	schedule_delayed_work(&gt->requests.retire_work,
 			      round_jiffies_up_relative(HZ));
 }
+
+void intel_gt_fini_requests(struct intel_gt *gt)
+{
+	/* Wait until the work is marked as finished before unloading! */
+	cancel_delayed_work_sync(&gt->requests.retire_work);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index d626fb1..dbac53b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -27,5 +27,6 @@ int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
 void intel_gt_init_requests(struct intel_gt *gt);
 void intel_gt_park_requests(struct intel_gt *gt);
 void intel_gt_unpark_requests(struct intel_gt *gt);
+void intel_gt_fini_requests(struct intel_gt *gt);
 
 #endif /* INTEL_GT_REQUESTS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index d4e14db..96890dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -90,6 +90,13 @@ struct intel_gt {
 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
 					    [MAX_ENGINE_INSTANCE + 1];
+
+	/*
+	 * Default address space (either GGTT or ppGTT depending on arch).
+	 *
+	 * Reserved for exclusive use by the kernel.
+	 */
+	struct i915_address_space *vm;
 };
 
 enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
new file mode 100644
index 0000000..16acdc5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h> /* fault-inject.h is not standalone! */
+
+#include <linux/fault-inject.h>
+
+#include "i915_trace.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+void stash_init(struct pagestash *stash)
+{
+	pagevec_init(&stash->pvec);
+	spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+	struct page *page = NULL;
+
+	spin_lock(&stash->lock);
+	if (likely(stash->pvec.nr))
+		page = stash->pvec.pages[--stash->pvec.nr];
+	spin_unlock(&stash->lock);
+
+	return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+	unsigned int nr;
+
+	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+	nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
+	memcpy(stash->pvec.pages + stash->pvec.nr,
+	       pvec->pages + pvec->nr - nr,
+	       sizeof(pvec->pages[0]) * nr);
+	stash->pvec.nr += nr;
+
+	spin_unlock(&stash->lock);
+
+	pvec->nr -= nr;
+}
+
+static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
+{
+	struct pagevec stack;
+	struct page *page;
+
+	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
+		i915_gem_shrink_all(vm->i915);
+
+	page = stash_pop_page(&vm->free_pages);
+	if (page)
+		return page;
+
+	if (!vm->pt_kmap_wc)
+		return alloc_page(gfp);
+
+	/* Look in our global stash of WC pages... */
+	page = stash_pop_page(&vm->i915->mm.wc_stash);
+	if (page)
+		return page;
+
+	/*
+	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
+	 *
+	 * We have to be careful as page allocation may trigger the shrinker
+	 * (via direct reclaim) which will fill up the WC stash underneath us.
+	 * So we add our WB pages into a temporary pvec on the stack and merge
+	 * them into the WC stash after all the allocations are complete.
+	 */
+	pagevec_init(&stack);
+	do {
+		struct page *page;
+
+		page = alloc_page(gfp);
+		if (unlikely(!page))
+			break;
+
+		stack.pages[stack.nr++] = page;
+	} while (pagevec_space(&stack));
+
+	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+		page = stack.pages[--stack.nr];
+
+		/* Merge spare WC pages to the global stash */
+		if (stack.nr)
+			stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
+
+		/* Push any surplus WC pages onto the local VM stash */
+		if (stack.nr)
+			stash_push_pagevec(&vm->free_pages, &stack);
+	}
+
+	/* Return unwanted leftovers */
+	if (unlikely(stack.nr)) {
+		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+		__pagevec_release(&stack);
+	}
+
+	return page;
+}
+
+static void vm_free_pages_release(struct i915_address_space *vm,
+				  bool immediate)
+{
+	struct pagevec *pvec = &vm->free_pages.pvec;
+	struct pagevec stack;
+
+	lockdep_assert_held(&vm->free_pages.lock);
+	GEM_BUG_ON(!pagevec_count(pvec));
+
+	if (vm->pt_kmap_wc) {
+		/*
+		 * When we use WC, first fill up the global stash and then
+		 * only if full immediately free the overflow.
+		 */
+		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
+
+		/*
+		 * As we have made some room in the VM's free_pages,
+		 * we can wait for it to fill again. Unless we are
+		 * inside i915_address_space_fini() and must
+		 * immediately release the pages!
+		 */
+		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+			return;
+
+		/*
+		 * We have to drop the lock to allow ourselves to sleep,
+		 * so take a copy of the pvec and clear the stash for
+		 * others to use it as we sleep.
+		 */
+		stack = *pvec;
+		pagevec_reinit(pvec);
+		spin_unlock(&vm->free_pages.lock);
+
+		pvec = &stack;
+		set_pages_array_wb(pvec->pages, pvec->nr);
+
+		spin_lock(&vm->free_pages.lock);
+	}
+
+	__pagevec_release(pvec);
+}
+
+static void vm_free_page(struct i915_address_space *vm, struct page *page)
+{
+	/*
+	 * On !llc, we need to change the pages back to WB. We only do so
+	 * in bulk, so we rarely need to change the page attributes here,
+	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
+	 * To make detection of the possible sleep more likely, use an
+	 * unconditional might_sleep() for everybody.
+	 */
+	might_sleep();
+	spin_lock(&vm->free_pages.lock);
+	while (!pagevec_space(&vm->free_pages.pvec))
+		vm_free_pages_release(vm, false);
+	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
+	pagevec_add(&vm->free_pages.pvec, page);
+	spin_unlock(&vm->free_pages.lock);
+}
+
+void __i915_vm_close(struct i915_address_space *vm)
+{
+	struct i915_vma *vma, *vn;
+
+	mutex_lock(&vm->mutex);
+	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+		struct drm_i915_gem_object *obj = vma->obj;
+
+		/* Keep the obj (and hence the vma) alive as _we_ destroy it */
+		if (!kref_get_unless_zero(&obj->base.refcount))
+			continue;
+
+		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+		WARN_ON(__i915_vma_unbind(vma));
+		__i915_vma_put(vma);
+
+		i915_gem_object_put(obj);
+	}
+	GEM_BUG_ON(!list_empty(&vm->bound_list));
+	mutex_unlock(&vm->mutex);
+}
+
+void i915_address_space_fini(struct i915_address_space *vm)
+{
+	spin_lock(&vm->free_pages.lock);
+	if (pagevec_count(&vm->free_pages.pvec))
+		vm_free_pages_release(vm, true);
+	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+	spin_unlock(&vm->free_pages.lock);
+
+	drm_mm_takedown(&vm->mm);
+
+	mutex_destroy(&vm->mutex);
+}
+
+static void __i915_vm_release(struct work_struct *work)
+{
+	struct i915_address_space *vm =
+		container_of(work, struct i915_address_space, rcu.work);
+
+	vm->cleanup(vm);
+	i915_address_space_fini(vm);
+
+	kfree(vm);
+}
+
+void i915_vm_release(struct kref *kref)
+{
+	struct i915_address_space *vm =
+		container_of(kref, struct i915_address_space, ref);
+
+	GEM_BUG_ON(i915_is_ggtt(vm));
+	trace_i915_ppgtt_release(vm);
+
+	queue_rcu_work(vm->i915->wq, &vm->rcu);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass)
+{
+	kref_init(&vm->ref);
+	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+	atomic_set(&vm->open, 1);
+
+	/*
+	 * The vm->mutex must be reclaim safe (for use in the shrinker).
+	 * Do a dummy acquire now under fs_reclaim so that any allocation
+	 * attempt holding the lock is immediately reported by lockdep.
+	 */
+	mutex_init(&vm->mutex);
+	lockdep_set_subclass(&vm->mutex, subclass);
+	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+
+	GEM_BUG_ON(!vm->total);
+	drm_mm_init(&vm->mm, 0, vm->total);
+	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+	stash_init(&vm->free_pages);
+
+	INIT_LIST_HEAD(&vm->bound_list);
+}
+
+void clear_pages(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!vma->pages);
+
+	if (vma->pages != vma->obj->mm.pages) {
+		sg_free_table(vma->pages);
+		kfree(vma->pages);
+	}
+	vma->pages = NULL;
+
+	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
+}
+
+static int __setup_page_dma(struct i915_address_space *vm,
+			    struct i915_page_dma *p,
+			    gfp_t gfp)
+{
+	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
+	if (unlikely(!p->page))
+		return -ENOMEM;
+
+	p->daddr = dma_map_page_attrs(vm->dma,
+				      p->page, 0, PAGE_SIZE,
+				      PCI_DMA_BIDIRECTIONAL,
+				      DMA_ATTR_SKIP_CPU_SYNC |
+				      DMA_ATTR_NO_WARN);
+	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
+		vm_free_page(vm, p->page);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+{
+	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
+}
+
+void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+{
+	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	vm_free_page(vm, p->page);
+}
+
+void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
+{
+	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
+}
+
+int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
+{
+	unsigned long size;
+
+	/*
+	 * In order to utilize 64K pages for an object with a size < 2M, we will
+	 * need to support a 64K scratch page, given that every 16th entry for a
+	 * page-table operating in 64K mode must point to a properly aligned 64K
+	 * region, including any PTEs which happen to point to scratch.
+	 *
+	 * This is only relevant for the 48b PPGTT where we support
+	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+	 * scratch (read-only) between all vm, we create one 64k scratch page
+	 * for all.
+	 */
+	size = I915_GTT_PAGE_SIZE_4K;
+	if (i915_vm_is_4lvl(vm) &&
+	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+		size = I915_GTT_PAGE_SIZE_64K;
+		gfp |= __GFP_NOWARN;
+	}
+	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
+
+	do {
+		unsigned int order = get_order(size);
+		struct page *page;
+		dma_addr_t addr;
+
+		page = alloc_pages(gfp, order);
+		if (unlikely(!page))
+			goto skip;
+
+		addr = dma_map_page_attrs(vm->dma,
+					  page, 0, size,
+					  PCI_DMA_BIDIRECTIONAL,
+					  DMA_ATTR_SKIP_CPU_SYNC |
+					  DMA_ATTR_NO_WARN);
+		if (unlikely(dma_mapping_error(vm->dma, addr)))
+			goto free_page;
+
+		if (unlikely(!IS_ALIGNED(addr, size)))
+			goto unmap_page;
+
+		vm->scratch[0].base.page = page;
+		vm->scratch[0].base.daddr = addr;
+		vm->scratch_order = order;
+		return 0;
+
+unmap_page:
+		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
+free_page:
+		__free_pages(page, order);
+skip:
+		if (size == I915_GTT_PAGE_SIZE_4K)
+			return -ENOMEM;
+
+		size = I915_GTT_PAGE_SIZE_4K;
+		gfp &= ~__GFP_NOWARN;
+	} while (1);
+}
+
+void cleanup_scratch_page(struct i915_address_space *vm)
+{
+	struct i915_page_dma *p = px_base(&vm->scratch[0]);
+	unsigned int order = vm->scratch_order;
+
+	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
+		       PCI_DMA_BIDIRECTIONAL);
+	__free_pages(p->page, order);
+}
+
+void free_scratch(struct i915_address_space *vm)
+{
+	int i;
+
+	if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
+		return;
+
+	for (i = 1; i <= vm->top; i++) {
+		if (!px_dma(&vm->scratch[i]))
+			break;
+		cleanup_page_dma(vm, px_base(&vm->scratch[i]));
+	}
+
+	cleanup_scratch_page(vm);
+}
+
+void gtt_write_workarounds(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+
+	/*
+	 * This function is for gtt related workarounds. This function is
+	 * called on driver load and after a GPU reset, so you can place
+	 * workarounds here even if they get overwritten by GPU reset.
+	 */
+	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
+	if (IS_BROADWELL(i915))
+		intel_uncore_write(uncore,
+				   GEN8_L3_LRA_1_GPGPU,
+				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+	else if (IS_CHERRYVIEW(i915))
+		intel_uncore_write(uncore,
+				   GEN8_L3_LRA_1_GPGPU,
+				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+	else if (IS_GEN9_LP(i915))
+		intel_uncore_write(uncore,
+				   GEN8_L3_LRA_1_GPGPU,
+				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+	else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
+		intel_uncore_write(uncore,
+				   GEN8_L3_LRA_1_GPGPU,
+				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+
+	/*
+	 * To support 64K PTEs we need to first enable the use of the
+	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+	 * shouldn't be needed after GEN10.
+	 *
+	 * 64K pages were first introduced from BDW+, although technically they
+	 * only *work* from gen9+. For pre-BDW we instead have the option for
+	 * 32K pages, but we don't currently have any support for it in our
+	 * driver.
+	 */
+	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+	    INTEL_GEN(i915) <= 10)
+		intel_uncore_rmw(uncore,
+				 GEN8_GAMW_ECO_DEV_RW_IA,
+				 0,
+				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
+
+	if (IS_GEN_RANGE(i915, 8, 11)) {
+		bool can_use_gtt_cache = true;
+
+		/*
+		 * According to the BSpec if we use 2M/1G pages then we also
+		 * need to disable the GTT cache. At least on BDW we can see
+		 * visual corruption when using 2M pages, and not disabling the
+		 * GTT cache.
+		 */
+		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
+			can_use_gtt_cache = false;
+
+		/* WaGttCachingOffByDefault */
+		intel_uncore_write(uncore,
+				   HSW_GTT_CACHE_EN,
+				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
+		WARN_ON_ONCE(can_use_gtt_cache &&
+			     intel_uncore_read(uncore,
+					       HSW_GTT_CACHE_EN) == 0);
+	}
+}
+
+u64 gen8_pte_encode(dma_addr_t addr,
+		    enum i915_cache_level level,
+		    u32 flags)
+{
+	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+	if (unlikely(flags & PTE_READ_ONLY))
+		pte &= ~_PAGE_RW;
+
+	switch (level) {
+	case I915_CACHE_NONE:
+		pte |= PPAT_UNCACHED;
+		break;
+	case I915_CACHE_WT:
+		pte |= PPAT_DISPLAY_ELLC;
+		break;
+	default:
+		pte |= PPAT_CACHED;
+		break;
+	}
+
+	return pte;
+}
+
+static void tgl_setup_private_ppat(struct intel_uncore *uncore)
+{
+	/* TGL doesn't support LLC or AGE settings */
+	intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+	intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+	intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+	intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+	intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+	intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+	intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+	intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
+static void cnl_setup_private_ppat(struct intel_uncore *uncore)
+{
+	intel_uncore_write(uncore,
+			   GEN10_PAT_INDEX(0),
+			   GEN8_PPAT_WB | GEN8_PPAT_LLC);
+	intel_uncore_write(uncore,
+			   GEN10_PAT_INDEX(1),
+			   GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+	intel_uncore_write(uncore,
+			   GEN10_PAT_INDEX(2),
+			   GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+	intel_uncore_write(uncore,
+			   GEN10_PAT_INDEX(3),
+			   GEN8_PPAT_UC);
+	intel_uncore_write(uncore,
+			   GEN10_PAT_INDEX(4),
+			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+	intel_uncore_write(uncore,
+			   GEN10_PAT_INDEX(5),
+			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+	intel_uncore_write(uncore,
+			   GEN10_PAT_INDEX(6),
+			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+	intel_uncore_write(uncore,
+			   GEN10_PAT_INDEX(7),
+			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+}
+
+/*
+ * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases.
+ */
+static void bdw_setup_private_ppat(struct intel_uncore *uncore)
+{
+	u64 pat;
+
+	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
+	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
+	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |	/* for scanout with eLLC */
+	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
+	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+static void chv_setup_private_ppat(struct intel_uncore *uncore)
+{
+	u64 pat;
+
+	/*
+	 * Map WB on BDW to snooped on CHV.
+	 *
+	 * Only the snoop bit has meaning for CHV, the rest is
+	 * ignored.
+	 *
+	 * The hardware will never snoop for certain types of accesses:
+	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
+	 * - PPGTT page tables
+	 * - some other special cycles
+	 *
+	 * As with BDW, we also need to consider the following for GT accesses:
+	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
+	 * so RTL will always use the value corresponding to
+	 * pat_sel = 000".
+	 * Which means we must set the snoop bit in PAT entry 0
+	 * in order to keep the global status page working.
+	 */
+
+	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(1, 0) |
+	      GEN8_PPAT(2, 0) |
+	      GEN8_PPAT(3, 0) |
+	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+void setup_private_pat(struct intel_uncore *uncore)
+{
+	struct drm_i915_private *i915 = uncore->i915;
+
+	GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+	if (INTEL_GEN(i915) >= 12)
+		tgl_setup_private_ppat(uncore);
+	else if (INTEL_GEN(i915) >= 10)
+		cnl_setup_private_ppat(uncore);
+	else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
+		chv_setup_private_ppat(uncore);
+	else
+		bdw_setup_private_ppat(uncore);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_gtt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
new file mode 100644
index 0000000..7da7681
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -0,0 +1,587 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Please try to maintain the following order within this file unless it makes
+ * sense to do otherwise. From top to bottom:
+ * 1. typedefs
+ * 2. #defines, and macros
+ * 3. structure definitions
+ * 4. function prototypes
+ *
+ * Within each section, please try to order by generation in ascending order,
+ * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ */
+
+#ifndef __INTEL_GTT_H__
+#define __INTEL_GTT_H__
+
+#include <linux/io-mapping.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/pagevec.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_mm.h>
+
+#include "gt/intel_reset.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_selftest.h"
+#include "i915_vma_types.h"
+
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
+#define DBG(...) trace_printk(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
+#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
+
+#define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
+
+#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
+#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
+typedef u32 gen6_pte_t;
+typedef u64 gen8_pte_t;
+
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
+
+#define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
+#define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
+#define I915_PDES			512
+#define I915_PDE_MASK			(I915_PDES - 1)
+
+/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
+#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PTE_CACHE_LLC		(2 << 1)
+#define GEN6_PTE_UNCACHED		(1 << 1)
+#define GEN6_PTE_VALID			REG_BIT(0)
+
+#define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
+#define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT			22
+#define GEN6_PDE_VALID			REG_BIT(0)
+#define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
+
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
+#define BYT_PTE_WRITEABLE		REG_BIT(1)
+
+/*
+ * Cacheability Control is a 4-bit value. The low three bits are stored in bits
+ * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
+					 (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
+#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
+#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_PTE_UNCACHED		(0)
+#define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
+#define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
+
+/*
+ * GEN8 32b style address is defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 |  11:0
+ * PDPE  |  PDE  |  PTE  | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ *
+ * GEN8 48b style address is defined as a 4 level page table:
+ * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
+ * PML4E | PDPE  |  PDE  |  PTE  | offset
+ */
+#define GEN8_3LVL_PDPES			4
+
+#define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE			0 /* WB LLC */
+#define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
+
+#define CHV_PPAT_SNOOP			REG_BIT(6)
+#define GEN8_PPAT_AGE(x)		((x)<<4)
+#define GEN8_PPAT_LLCeLLC		(3<<2)
+#define GEN8_PPAT_LLCELLC		(2<<2)
+#define GEN8_PPAT_LLC			(1<<2)
+#define GEN8_PPAT_WB			(3<<0)
+#define GEN8_PPAT_WT			(2<<0)
+#define GEN8_PPAT_WC			(1<<0)
+#define GEN8_PPAT_UC			(0<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
+#define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
+
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M   BIT(7)
+
+#define for_each_sgt_daddr(__dp, __iter, __sgt) \
+	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
+
+struct i915_page_dma {
+	struct page *page;
+	union {
+		dma_addr_t daddr;
+
+		/*
+		 * For gen6/gen7 only. This is the offset in the GGTT
+		 * where the page directory entries for PPGTT begin
+		 */
+		u32 ggtt_offset;
+	};
+};
+
+struct i915_page_scratch {
+	struct i915_page_dma base;
+	u64 encode;
+};
+
+struct i915_page_table {
+	struct i915_page_dma base;
+	atomic_t used;
+};
+
+struct i915_page_directory {
+	struct i915_page_table pt;
+	spinlock_t lock;
+	void *entry[512];
+};
+
+#define __px_choose_expr(x, type, expr, other) \
+	__builtin_choose_expr( \
+	__builtin_types_compatible_p(typeof(x), type) || \
+	__builtin_types_compatible_p(typeof(x), const type), \
+	({ type __x = (type)(x); expr; }), \
+	other)
+
+#define px_base(px) \
+	__px_choose_expr(px, struct i915_page_dma *, __x, \
+	__px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
+	__px_choose_expr(px, struct i915_page_table *, &__x->base, \
+	__px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
+	(void)0))))
+#define px_dma(px) (px_base(px)->daddr)
+
+#define px_pt(px) \
+	__px_choose_expr(px, struct i915_page_table *, __x, \
+	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
+	(void)0))
+#define px_used(px) (&px_pt(px)->used)
+
+enum i915_cache_level;
+
+struct drm_i915_file_private;
+struct drm_i915_gem_object;
+struct i915_vma;
+struct intel_gt;
+
+struct i915_vma_ops {
+	/* Map an object into an address space with the given cache flags. */
+	int (*bind_vma)(struct i915_vma *vma,
+			enum i915_cache_level cache_level,
+			u32 flags);
+	/*
+	 * Unmap an object from an address space. This usually consists of
+	 * setting the valid PTE entries to a reserved scratch page.
+	 */
+	void (*unbind_vma)(struct i915_vma *vma);
+
+	int (*set_pages)(struct i915_vma *vma);
+	void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+	spinlock_t lock;
+	struct pagevec pvec;
+};
+
+void stash_init(struct pagestash *stash);
+
+struct i915_address_space {
+	struct kref ref;
+	struct rcu_work rcu;
+
+	struct drm_mm mm;
+	struct intel_gt *gt;
+	struct drm_i915_private *i915;
+	struct device *dma;
+	/*
+	 * Every address space belongs to a struct file - except for the global
+	 * GTT that is owned by the driver (and so @file is set to NULL). In
+	 * principle, no information should leak from one context to another
+	 * (or between files/processes etc) unless explicitly shared by the
+	 * owner. Tracking the owner is important in order to free up per-file
+	 * objects along with the file, to aide resource tracking, and to
+	 * assign blame.
+	 */
+	struct drm_i915_file_private *file;
+	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
+	u64 reserved;		/* size addr space reserved */
+
+	unsigned int bind_async_flags;
+
+	/*
+	 * Each active user context has its own address space (in full-ppgtt).
+	 * Since the vm may be shared between multiple contexts, we count how
+	 * many contexts keep us "open". Once open hits zero, we are closed
+	 * and do not allow any new attachments, and proceed to shutdown our
+	 * vma and page directories.
+	 */
+	atomic_t open;
+
+	struct mutex mutex; /* protects vma and our lists */
+#define VM_CLASS_GGTT 0
+#define VM_CLASS_PPGTT 1
+
+	struct i915_page_scratch scratch[4];
+	unsigned int scratch_order;
+	unsigned int top;
+
+	/**
+	 * List of vma currently bound.
+	 */
+	struct list_head bound_list;
+
+	struct pagestash free_pages;
+
+	/* Global GTT */
+	bool is_ggtt:1;
+
+	/* Some systems require uncached updates of the page directories */
+	bool pt_kmap_wc:1;
+
+	/* Some systems support read-only mappings for GGTT and/or PPGTT */
+	bool has_read_only:1;
+
+	u64 (*pte_encode)(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags); /* Create a valid PTE */
+#define PTE_READ_ONLY	BIT(0)
+
+	int (*allocate_va_range)(struct i915_address_space *vm,
+				 u64 start, u64 length);
+	void (*clear_range)(struct i915_address_space *vm,
+			    u64 start, u64 length);
+	void (*insert_page)(struct i915_address_space *vm,
+			    dma_addr_t addr,
+			    u64 offset,
+			    enum i915_cache_level cache_level,
+			    u32 flags);
+	void (*insert_entries)(struct i915_address_space *vm,
+			       struct i915_vma *vma,
+			       enum i915_cache_level cache_level,
+			       u32 flags);
+	void (*cleanup)(struct i915_address_space *vm);
+
+	struct i915_vma_ops vma_ops;
+
+	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
+	I915_SELFTEST_DECLARE(bool scrub_64K);
+};
+
+/*
+ * The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_ggtt {
+	struct i915_address_space vm;
+
+	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
+	struct resource gmadr;          /* GMADR resource */
+	resource_size_t mappable_end;	/* End offset that we can CPU map */
+
+	/** "Graphics Stolen Memory" holds the global PTEs */
+	void __iomem *gsm;
+	void (*invalidate)(struct i915_ggtt *ggtt);
+
+	/** PPGTT used for aliasing the PPGTT with the GTT */
+	struct i915_ppgtt *alias;
+
+	bool do_idle_maps;
+
+	int mtrr;
+
+	/** Bit 6 swizzling required for X tiling */
+	u32 bit_6_swizzle_x;
+	/** Bit 6 swizzling required for Y tiling */
+	u32 bit_6_swizzle_y;
+
+	u32 pin_bias;
+
+	unsigned int num_fences;
+	struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+	struct list_head fence_list;
+
+	/**
+	 * List of all objects in gtt_space, currently mmaped by userspace.
+	 * All objects within this list must also be on bound_list.
+	 */
+	struct list_head userfault_list;
+
+	/* Manual runtime pm autosuspend delay for user GGTT mmaps */
+	struct intel_wakeref_auto userfault_wakeref;
+
+	struct mutex error_mutex;
+	struct drm_mm_node error_capture;
+	struct drm_mm_node uc_fw;
+};
+
+struct i915_ppgtt {
+	struct i915_address_space vm;
+
+	struct i915_page_directory *pd;
+};
+
+#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+
+static inline bool
+i915_vm_is_4lvl(const struct i915_address_space *vm)
+{
+	return (vm->total - 1) >> 32;
+}
+
+static inline bool
+i915_vm_has_scratch_64K(struct i915_address_space *vm)
+{
+	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
+}
+
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+	return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
+	GEM_BUG_ON(!i915_is_ggtt(vm));
+	return container_of(vm, struct i915_ggtt, vm);
+}
+
+static inline struct i915_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
+	GEM_BUG_ON(i915_is_ggtt(vm));
+	return container_of(vm, struct i915_ppgtt, vm);
+}
+
+static inline struct i915_address_space *
+i915_vm_get(struct i915_address_space *vm)
+{
+	kref_get(&vm->ref);
+	return vm;
+}
+
+void i915_vm_release(struct kref *kref);
+
+static inline void i915_vm_put(struct i915_address_space *vm)
+{
+	kref_put(&vm->ref, i915_vm_release);
+}
+
+static inline struct i915_address_space *
+i915_vm_open(struct i915_address_space *vm)
+{
+	GEM_BUG_ON(!atomic_read(&vm->open));
+	atomic_inc(&vm->open);
+	return i915_vm_get(vm);
+}
+
+static inline bool
+i915_vm_tryopen(struct i915_address_space *vm)
+{
+	if (atomic_add_unless(&vm->open, 1, 0))
+		return i915_vm_get(vm);
+
+	return false;
+}
+
+void __i915_vm_close(struct i915_address_space *vm);
+
+static inline void
+i915_vm_close(struct i915_address_space *vm)
+{
+	GEM_BUG_ON(!atomic_read(&vm->open));
+	if (atomic_dec_and_test(&vm->open))
+		__i915_vm_close(vm);
+
+	i915_vm_put(vm);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass);
+void i915_address_space_fini(struct i915_address_space *vm);
+
+static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
+{
+	const u32 mask = NUM_PTE(pde_shift) - 1;
+
+	return (address >> PAGE_SHIFT) & mask;
+}
+
+/*
+ * Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+ */
+static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
+{
+	const u64 mask = ~((1ULL << pde_shift) - 1);
+	u64 end;
+
+	GEM_BUG_ON(length == 0);
+	GEM_BUG_ON(offset_in_page(addr | length));
+
+	end = addr + length;
+
+	if ((addr & mask) != (end & mask))
+		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline u32 i915_pde_index(u64 addr, u32 shift)
+{
+	return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline struct i915_page_table *
+i915_pt_entry(const struct i915_page_directory * const pd,
+	      const unsigned short n)
+{
+	return pd->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pd_entry(const struct i915_page_directory * const pdp,
+	      const unsigned short n)
+{
+	return pdp->entry[n];
+}
+
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
+{
+	struct i915_page_dma *pt = ppgtt->pd->entry[n];
+
+	return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
+
+int i915_ggtt_probe_hw(struct drm_i915_private *i915);
+int i915_ggtt_init_hw(struct drm_i915_private *i915);
+int i915_ggtt_enable_hw(struct drm_i915_private *i915);
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
+int i915_init_ggtt(struct drm_i915_private *i915);
+void i915_ggtt_driver_release(struct drm_i915_private *i915);
+
+static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
+{
+	return ggtt->mappable_end > 0;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt);
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
+
+u64 gen8_pte_encode(dma_addr_t addr,
+		    enum i915_cache_level level,
+		    u32 flags);
+
+int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+
+#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
+
+void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count);
+
+#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
+#define fill32_px(px, v) do {						\
+	u64 v__ = lower_32_bits(v);					\
+	fill_px((px), v__ << 32 | v__);					\
+} while (0)
+
+int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp);
+void cleanup_scratch_page(struct i915_address_space *vm);
+void free_scratch(struct i915_address_space *vm);
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm);
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
+struct i915_page_directory *__alloc_pd(size_t sz);
+
+void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd);
+
+#define free_px(vm, px) free_pd(vm, px_base(px))
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+	       const unsigned short idx,
+	       struct i915_page_dma * const to,
+	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
+
+#define set_pd_entry(pd, idx, to) \
+	__set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+	       const unsigned short idx,
+	       const struct i915_page_scratch * const scratch);
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+		 const unsigned short idx,
+		 struct i915_page_table * const pt,
+		 const struct i915_page_scratch * const scratch);
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
+
+int ggtt_set_pages(struct i915_vma *vma);
+int ppgtt_set_pages(struct i915_vma *vma);
+void clear_pages(struct i915_vma *vma);
+
+void gtt_write_workarounds(struct intel_gt *gt);
+
+void setup_private_pat(struct intel_uncore *uncore);
+
+static inline struct sgt_dma {
+	struct scatterlist *sg;
+	dma_addr_t dma, max;
+} sgt_dma(struct i915_vma *vma) {
+	struct scatterlist *sg = vma->pages->sgl;
+	dma_addr_t addr = sg_dma_address(sg);
+
+	return (struct sgt_dma){ sg, addr, addr + sg->length };
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index d925a10..0cf0f6f 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,12 +133,11 @@
  */
 #include <linux/interrupt.h>
 
-#include "gem/i915_gem_context.h"
-
 #include "i915_drv.h"
 #include "i915_perf.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
+#include "intel_context.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
@@ -489,17 +488,23 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
 	return desc;
 }
 
-static u32 *set_offsets(u32 *regs,
+static inline unsigned int dword_in_page(void *addr)
+{
+	return offset_in_page(addr) / sizeof(u32);
+}
+
+static void set_offsets(u32 *regs,
 			const u8 *data,
-			const struct intel_engine_cs *engine)
+			const struct intel_engine_cs *engine,
+			bool clear)
 #define NOP(x) (BIT(7) | (x))
-#define LRI(count, flags) ((flags) << 6 | (count))
+#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
 #define POSTED BIT(0)
 #define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
 #define REG16(x) \
 	(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
 	(((x) >> 2) & 0x7f)
-#define END() 0
+#define END(x) 0, (x)
 {
 	const u32 base = engine->mmio_base;
 
@@ -507,7 +512,10 @@ static u32 *set_offsets(u32 *regs,
 		u8 count, flags;
 
 		if (*data & BIT(7)) { /* skip */
-			regs += *data++ & ~BIT(7);
+			count = *data++ & ~BIT(7);
+			if (clear)
+				memset32(regs, MI_NOOP, count);
+			regs += count;
 			continue;
 		}
 
@@ -533,12 +541,25 @@ static u32 *set_offsets(u32 *regs,
 				offset |= v & ~BIT(7);
 			} while (v & BIT(7));
 
-			*regs = base + (offset << 2);
+			regs[0] = base + (offset << 2);
+			if (clear)
+				regs[1] = 0;
 			regs += 2;
 		} while (--count);
 	}
 
-	return regs;
+	if (clear) {
+		u8 count = *++data;
+
+		/* Clear past the tail for HW access */
+		GEM_BUG_ON(dword_in_page(regs) > count);
+		memset32(regs, MI_NOOP, count - dword_in_page(regs));
+
+		/* Close the batch; used mainly by live_lrc_layout() */
+		*regs = MI_BATCH_BUFFER_END;
+		if (INTEL_GEN(engine->i915) >= 10)
+			*regs |= BIT(0);
+	}
 }
 
 static const u8 gen8_xcs_offsets[] = {
@@ -573,7 +594,7 @@ static const u8 gen8_xcs_offsets[] = {
 	REG16(0x200),
 	REG(0x028),
 
-	END(),
+	END(80)
 };
 
 static const u8 gen9_xcs_offsets[] = {
@@ -657,7 +678,7 @@ static const u8 gen9_xcs_offsets[] = {
 	REG16(0x67c),
 	REG(0x068),
 
-	END(),
+	END(176)
 };
 
 static const u8 gen12_xcs_offsets[] = {
@@ -689,7 +710,7 @@ static const u8 gen12_xcs_offsets[] = {
 	REG16(0x274),
 	REG16(0x270),
 
-	END(),
+	END(80)
 };
 
 static const u8 gen8_rcs_offsets[] = {
@@ -726,7 +747,91 @@ static const u8 gen8_rcs_offsets[] = {
 	LRI(1, 0),
 	REG(0x0c8),
 
-	END(),
+	END(80)
+};
+
+static const u8 gen9_rcs_offsets[] = {
+	NOP(1),
+	LRI(14, POSTED),
+	REG16(0x244),
+	REG(0x34),
+	REG(0x30),
+	REG(0x38),
+	REG(0x3c),
+	REG(0x168),
+	REG(0x140),
+	REG(0x110),
+	REG(0x11c),
+	REG(0x114),
+	REG(0x118),
+	REG(0x1c0),
+	REG(0x1c4),
+	REG(0x1c8),
+
+	NOP(3),
+	LRI(9, POSTED),
+	REG16(0x3a8),
+	REG16(0x28c),
+	REG16(0x288),
+	REG16(0x284),
+	REG16(0x280),
+	REG16(0x27c),
+	REG16(0x278),
+	REG16(0x274),
+	REG16(0x270),
+
+	NOP(13),
+	LRI(1, 0),
+	REG(0xc8),
+
+	NOP(13),
+	LRI(44, POSTED),
+	REG(0x28),
+	REG(0x9c),
+	REG(0xc0),
+	REG(0x178),
+	REG(0x17c),
+	REG16(0x358),
+	REG(0x170),
+	REG(0x150),
+	REG(0x154),
+	REG(0x158),
+	REG16(0x41c),
+	REG16(0x600),
+	REG16(0x604),
+	REG16(0x608),
+	REG16(0x60c),
+	REG16(0x610),
+	REG16(0x614),
+	REG16(0x618),
+	REG16(0x61c),
+	REG16(0x620),
+	REG16(0x624),
+	REG16(0x628),
+	REG16(0x62c),
+	REG16(0x630),
+	REG16(0x634),
+	REG16(0x638),
+	REG16(0x63c),
+	REG16(0x640),
+	REG16(0x644),
+	REG16(0x648),
+	REG16(0x64c),
+	REG16(0x650),
+	REG16(0x654),
+	REG16(0x658),
+	REG16(0x65c),
+	REG16(0x660),
+	REG16(0x664),
+	REG16(0x668),
+	REG16(0x66c),
+	REG16(0x670),
+	REG16(0x674),
+	REG16(0x678),
+	REG16(0x67c),
+	REG(0x68),
+
+	END(176)
 };
 
 static const u8 gen11_rcs_offsets[] = {
@@ -767,7 +872,7 @@ static const u8 gen11_rcs_offsets[] = {
 	LRI(1, 0),
 	REG(0x0c8),
 
-	END(),
+	END(80)
 };
 
 static const u8 gen12_rcs_offsets[] = {
@@ -808,7 +913,7 @@ static const u8 gen12_rcs_offsets[] = {
 	LRI(1, 0),
 	REG(0x0c8),
 
-	END(),
+	END(80)
 };
 
 #undef END
@@ -833,6 +938,8 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
 			return gen12_rcs_offsets;
 		else if (INTEL_GEN(engine->i915) >= 11)
 			return gen11_rcs_offsets;
+		else if (INTEL_GEN(engine->i915) >= 9)
+			return gen9_rcs_offsets;
 		else
 			return gen8_rcs_offsets;
 	} else {
@@ -880,7 +987,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 			list_move(&rq->sched.link, pl);
 			active = rq;
 		} else {
-			struct intel_engine_cs *owner = rq->hw_context->engine;
+			struct intel_engine_cs *owner = rq->context->engine;
 
 			/*
 			 * Decouple the virtual breadcrumb before moving it
@@ -983,6 +1090,58 @@ static void intel_engine_context_out(struct intel_engine_cs *engine)
 	write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+	if (INTEL_GEN(engine->i915) >= 12)
+		return 0x60;
+	else if (INTEL_GEN(engine->i915) >= 9)
+		return 0x54;
+	else if (engine->class == RENDER_CLASS)
+		return 0x58;
+	else
+		return -1;
+}
+
+static void
+execlists_check_context(const struct intel_context *ce,
+			const struct intel_engine_cs *engine)
+{
+	const struct intel_ring *ring = ce->ring;
+	u32 *regs = ce->lrc_reg_state;
+	bool valid = true;
+	int x;
+
+	if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+		pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+		       engine->name,
+		       regs[CTX_RING_START],
+		       i915_ggtt_offset(ring->vma));
+		regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+		valid = false;
+	}
+
+	if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+	    (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+		pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+		       engine->name,
+		       regs[CTX_RING_CTL],
+		       (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+		regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+		valid = false;
+	}
+
+	x = lrc_ring_mi_mode(engine);
+	if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+		pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+		       engine->name, regs[x + 1]);
+		regs[x + 1] &= ~STOP_RING;
+		regs[x + 1] |= STOP_RING << 16;
+		valid = false;
+	}
+
+	WARN_ONCE(!valid, "Invalid lrc state found before submission\n");
+}
+
 static void restore_default_state(struct intel_context *ce,
 				  struct intel_engine_cs *engine)
 {
@@ -999,7 +1158,7 @@ static void restore_default_state(struct intel_context *ce,
 static void reset_active(struct i915_request *rq,
 			 struct intel_engine_cs *engine)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	u32 head;
 
 	/*
@@ -1017,8 +1176,8 @@ static void reset_active(struct i915_request *rq,
 	 * remain correctly ordered. And we defer to __i915_request_submit()
 	 * so that all asynchronous waits are correctly handled.
 	 */
-	GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
-		  __func__, engine->name, rq->fence.context, rq->fence.seqno);
+	ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
+		     rq->fence.context, rq->fence.seqno);
 
 	/* On resubmission of the active request, payload will be scrubbed */
 	if (i915_request_completed(rq))
@@ -1040,13 +1199,16 @@ static inline struct intel_engine_cs *
 __execlists_schedule_in(struct i915_request *rq)
 {
 	struct intel_engine_cs * const engine = rq->engine;
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 
 	intel_context_get(ce);
 
-	if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+	if (unlikely(intel_context_is_banned(ce)))
 		reset_active(rq, engine);
 
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		execlists_check_context(ce, engine);
+
 	if (ce->tag) {
 		/* Use a fixed tag for OA and friends */
 		ce->lrc_desc |= (u64)ce->tag << 32;
@@ -1054,12 +1216,12 @@ __execlists_schedule_in(struct i915_request *rq)
 		/* We don't need a strict matching tag, just different values */
 		ce->lrc_desc &= ~GENMASK_ULL(47, 37);
 		ce->lrc_desc |=
-			(u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
+			(u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
 			GEN11_SW_CTX_ID_SHIFT;
 		BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
 	}
 
-	intel_gt_pm_get(engine->gt);
+	__intel_gt_pm_get(engine->gt);
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
 	intel_engine_context_in(engine);
 
@@ -1069,7 +1231,7 @@ __execlists_schedule_in(struct i915_request *rq)
 static inline struct i915_request *
 execlists_schedule_in(struct i915_request *rq, int idx)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	struct intel_engine_cs *old;
 
 	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
@@ -1100,7 +1262,7 @@ static inline void
 __execlists_schedule_out(struct i915_request *rq,
 			 struct intel_engine_cs * const engine)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 
 	/*
 	 * NB process_csb() is not under the engine->active.lock and hence
@@ -1138,7 +1300,7 @@ __execlists_schedule_out(struct i915_request *rq,
 static inline void
 execlists_schedule_out(struct i915_request *rq)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	struct intel_engine_cs *cur, *old;
 
 	trace_i915_request_out(rq);
@@ -1155,7 +1317,7 @@ execlists_schedule_out(struct i915_request *rq)
 
 static u64 execlists_update_context(struct i915_request *rq)
 {
-	struct intel_context *ce = rq->hw_context;
+	struct intel_context *ce = rq->context;
 	u64 desc = ce->lrc_desc;
 	u32 tail;
 
@@ -1186,17 +1348,8 @@ static u64 execlists_update_context(struct i915_request *rq)
 	 * may not be visible to the HW prior to the completion of the UC
 	 * register write and that we may begin execution from the context
 	 * before its image is complete leading to invalid PD chasing.
-	 *
-	 * Furthermore, Braswell, at least, wants a full mb to be sure that
-	 * the writes are coherent in memory (visible to the GPU) prior to
-	 * execution, and not just visible to other CPUs (as is the result of
-	 * wmb).
 	 */
-	mb();
-
-	/* Wa_1607138340:tgl */
-	if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
-		desc |= CTX_DESC_FORCE_RESTORE;
+	wmb();
 
 	ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
 	return desc;
@@ -1224,15 +1377,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
 	if (!ports[0])
 		return;
 
-	GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
-		  engine->name, msg,
-		  ports[0]->fence.context,
-		  ports[0]->fence.seqno,
-		  i915_request_completed(ports[0]) ? "!" :
-		  i915_request_started(ports[0]) ? "*" :
-		  "",
-		  ports[1] ? ports[1]->fence.context : 0,
-		  ports[1] ? ports[1]->fence.seqno : 0);
+	ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
+		     ports[0]->fence.context,
+		     ports[0]->fence.seqno,
+		     i915_request_completed(ports[0]) ? "!" :
+		     i915_request_started(ports[0]) ? "*" :
+		     "",
+		     ports[1] ? ports[1]->fence.context : 0,
+		     ports[1] ? ports[1]->fence.seqno : 0);
 }
 
 static __maybe_unused bool
@@ -1256,33 +1408,56 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 	}
 
 	for (port = execlists->pending; (rq = *port); port++) {
-		if (ce == rq->hw_context) {
-			GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
+		unsigned long flags;
+		bool ok = true;
+
+		GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+		GEM_BUG_ON(!i915_request_is_active(rq));
+
+		if (ce == rq->context) {
+			GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n",
+				      ce->timeline->fence_context,
 				      port - execlists->pending);
 			return false;
 		}
+		ce = rq->context;
 
-		ce = rq->hw_context;
-		if (i915_request_completed(rq))
+		/* Hold tightly onto the lock to prevent concurrent retires! */
+		if (!spin_trylock_irqsave(&rq->lock, flags))
 			continue;
 
-		if (i915_active_is_idle(&ce->active)) {
-			GEM_TRACE_ERR("Inactive context in pending[%zd]\n",
+		if (i915_request_completed(rq))
+			goto unlock;
+
+		if (i915_active_is_idle(&ce->active) &&
+		    !intel_context_is_barrier(ce)) {
+			GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
+				      ce->timeline->fence_context,
 				      port - execlists->pending);
-			return false;
+			ok = false;
+			goto unlock;
 		}
 
 		if (!i915_vma_is_pinned(ce->state)) {
-			GEM_TRACE_ERR("Unpinned context in pending[%zd]\n",
+			GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n",
+				      ce->timeline->fence_context,
 				      port - execlists->pending);
-			return false;
+			ok = false;
+			goto unlock;
 		}
 
 		if (!i915_vma_is_pinned(ce->ring->vma)) {
-			GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n",
+			GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n",
+				      ce->timeline->fence_context,
 				      port - execlists->pending);
-			return false;
+			ok = false;
+			goto unlock;
 		}
+
+unlock:
+		spin_unlock_irqrestore(&rq->lock, flags);
+		if (!ok)
+			return false;
 	}
 
 	return ce;
@@ -1327,7 +1502,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 static bool ctx_single_port_submission(const struct intel_context *ce)
 {
 	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
-		i915_gem_context_force_single_submission(ce->gem_context));
+		intel_context_force_single_submission(ce));
 }
 
 static bool can_merge_ctx(const struct intel_context *prev,
@@ -1359,11 +1534,11 @@ static bool can_merge_rq(const struct i915_request *prev,
 	if (i915_request_completed(next))
 		return true;
 
-	if (unlikely((prev->flags ^ next->flags) &
-		     (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
+	if (unlikely((prev->fence.flags ^ next->fence.flags) &
+		     (I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL)))
 		return false;
 
-	if (!can_merge_ctx(prev->hw_context, next->hw_context))
+	if (!can_merge_ctx(prev->context, next->context))
 		return false;
 
 	return true;
@@ -1372,7 +1547,7 @@ static bool can_merge_rq(const struct i915_request *prev,
 static void virtual_update_register_offsets(u32 *regs,
 					    struct intel_engine_cs *engine)
 {
-	set_offsets(regs, reg_offsets(engine), engine);
+	set_offsets(regs, reg_offsets(engine), engine, false);
 }
 
 static bool virtual_matches(const struct virtual_engine *ve,
@@ -1411,7 +1586,7 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
 	if (!list_empty(&ve->context.signal_link)) {
 		list_move_tail(&ve->context.signal_link,
 			       &engine->breadcrumbs.signalers);
-		intel_engine_queue_breadcrumbs(engine);
+		intel_engine_signal_breadcrumbs(engine);
 	}
 	spin_unlock(&old->breadcrumbs.irq_lock);
 }
@@ -1519,7 +1694,7 @@ active_timeslice(const struct intel_engine_cs *engine)
 {
 	const struct i915_request *rq = *engine->execlists.active;
 
-	if (i915_request_completed(rq))
+	if (!rq || i915_request_completed(rq))
 		return 0;
 
 	if (engine->execlists.switch_priority_hint < effective_prio(rq))
@@ -1550,7 +1725,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
 		return 0;
 
 	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
-	if (unlikely(i915_gem_context_is_banned(rq->gem_context)))
+	if (unlikely(intel_context_is_banned(rq->context)))
 		return 1;
 
 	return READ_ONCE(engine->props.preempt_timeout_ms);
@@ -1565,6 +1740,11 @@ static void set_preempt_timeout(struct intel_engine_cs *engine)
 		     active_preempt_timeout(engine));
 }
 
+static inline void clear_ports(struct i915_request **ports, int count)
+{
+	memset_p((void **)ports, NULL, count);
+}
+
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1627,12 +1807,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	last = last_active(execlists);
 	if (last) {
 		if (need_preempt(engine, last, rb)) {
-			GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
-				  engine->name,
-				  last->fence.context,
-				  last->fence.seqno,
-				  last->sched.attr.priority,
-				  execlists->queue_priority_hint);
+			ENGINE_TRACE(engine,
+				     "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+				     last->fence.context,
+				     last->fence.seqno,
+				     last->sched.attr.priority,
+				     execlists->queue_priority_hint);
 			record_preemption(execlists);
 
 			/*
@@ -1658,16 +1838,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * tendency to ignore us rewinding the TAIL to the
 			 * end of an earlier request.
 			 */
-			last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+			last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
 			last = NULL;
 		} else if (need_timeslice(engine, last) &&
 			   timer_expired(&engine->execlists.timer)) {
-			GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
-				  engine->name,
-				  last->fence.context,
-				  last->fence.seqno,
-				  last->sched.attr.priority,
-				  execlists->queue_priority_hint);
+			ENGINE_TRACE(engine,
+				     "expired last=%llx:%lld, prio=%d, hint=%d\n",
+				     last->fence.context,
+				     last->fence.seqno,
+				     last->sched.attr.priority,
+				     execlists->queue_priority_hint);
 
 			ring_set_paused(engine, 1);
 			defer_active(engine);
@@ -1730,7 +1910,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 		GEM_BUG_ON(rq != ve->request);
 		GEM_BUG_ON(rq->engine != &ve->base);
-		GEM_BUG_ON(rq->hw_context != &ve->context);
+		GEM_BUG_ON(rq->context != &ve->context);
 
 		if (rq_prio(rq) >= queue_prio(execlists)) {
 			if (!virtual_matches(ve, rq, engine)) {
@@ -1744,14 +1924,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				return; /* leave this for another */
 			}
 
-			GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
-				  engine->name,
-				  rq->fence.context,
-				  rq->fence.seqno,
-				  i915_request_completed(rq) ? "!" :
-				  i915_request_started(rq) ? "*" :
-				  "",
-				  yesno(engine != ve->siblings[0]));
+			ENGINE_TRACE(engine,
+				     "virtual rq=%llx:%lld%s, new engine? %s\n",
+				     rq->fence.context,
+				     rq->fence.seqno,
+				     i915_request_completed(rq) ? "!" :
+				     i915_request_started(rq) ? "*" :
+				     "",
+				     yesno(engine != ve->siblings[0]));
 
 			ve->request = NULL;
 			ve->base.execlists.queue_priority_hint = INT_MIN;
@@ -1849,7 +2029,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * same LRCA, i.e. we must submit 2 different
 				 * contexts if we submit 2 ELSP.
 				 */
-				if (last->hw_context == rq->hw_context)
+				if (last->context == rq->context)
 					goto done;
 
 				if (i915_request_has_sentinel(last))
@@ -1862,8 +2042,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * the same context (even though a different
 				 * request) to the second port.
 				 */
-				if (ctx_single_port_submission(last->hw_context) ||
-				    ctx_single_port_submission(rq->hw_context))
+				if (ctx_single_port_submission(last->context) ||
+				    ctx_single_port_submission(rq->context))
 					goto done;
 
 				merge = false;
@@ -1877,8 +2057,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				}
 
 				GEM_BUG_ON(last &&
-					   !can_merge_ctx(last->hw_context,
-							  rq->hw_context));
+					   !can_merge_ctx(last->context,
+							  rq->context));
 
 				submit = true;
 				last = rq;
@@ -1907,9 +2087,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * interrupt for secondary ports).
 	 */
 	execlists->queue_priority_hint = queue_prio(execlists);
-	GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
-		  engine->name, execlists->queue_priority_hint,
-		  yesno(submit));
 
 	if (submit) {
 		*port = execlists_schedule_in(last, port - execlists->pending);
@@ -1928,10 +2105,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 			goto skip_submit;
 		}
+		clear_ports(port + 1, last_port - port);
 
-		memset(port + 1, 0, (last_port - port) * sizeof(*port));
 		execlists_submit_ports(engine);
-
 		set_preempt_timeout(engine);
 	} else {
 skip_submit:
@@ -1946,13 +2122,14 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
 
 	for (port = execlists->pending; *port; port++)
 		execlists_schedule_out(*port);
-	memset(execlists->pending, 0, sizeof(execlists->pending));
+	clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
 
 	/* Mark the end of active before we overwrite *active */
 	for (port = xchg(&execlists->active, execlists->pending); *port; port++)
 		execlists_schedule_out(*port);
-	WRITE_ONCE(execlists->active,
-		   memset(execlists->inflight, 0, sizeof(execlists->inflight)));
+	clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+	WRITE_ONCE(execlists->active, execlists->inflight);
 }
 
 static inline void
@@ -2058,7 +2235,7 @@ static void process_csb(struct intel_engine_cs *engine)
 	 */
 	head = execlists->csb_head;
 	tail = READ_ONCE(*execlists->csb_write);
-	GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+	ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
 	if (unlikely(head == tail))
 		return;
 
@@ -2096,9 +2273,8 @@ static void process_csb(struct intel_engine_cs *engine)
 		 * status notifier.
 		 */
 
-		GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
-			  engine->name, head,
-			  buf[2 * head + 0], buf[2 * head + 1]);
+		ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+			     head, buf[2 * head + 0], buf[2 * head + 1]);
 
 		if (INTEL_GEN(engine->i915) >= 12)
 			promote = gen12_csb_parse(execlists, buf + 2 * head);
@@ -2109,7 +2285,6 @@ static void process_csb(struct intel_engine_cs *engine)
 
 			/* Point active to the new ELSP; prevent overwriting */
 			WRITE_ONCE(execlists->active, execlists->pending);
-			set_timeslice(engine);
 
 			if (!inject_preempt_hang(execlists))
 				ring_set_paused(engine, 0);
@@ -2150,6 +2325,7 @@ static void process_csb(struct intel_engine_cs *engine)
 	} while (head != tail);
 
 	execlists->csb_head = head;
+	set_timeslice(engine);
 
 	/*
 	 * Gen11 has proven to fail wrt global observation point between
@@ -2189,10 +2365,9 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
 	/* Mark this tasklet as disabled to avoid waiting for it to complete */
 	tasklet_disable_nosync(&engine->execlists.tasklet);
 
-	GEM_TRACE("%s: preempt timeout %lu+%ums\n",
-		  engine->name,
-		  READ_ONCE(engine->props.preempt_timeout_ms),
-		  jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+	ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
+		     READ_ONCE(engine->props.preempt_timeout_ms),
+		     jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
 	intel_engine_reset(engine, "preemption time out");
 
 	tasklet_enable(&engine->execlists.tasklet);
@@ -2333,7 +2508,7 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine)
 
 	vaddr += engine->context_size;
 
-	memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+	memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
 }
 
 static void
@@ -2344,7 +2519,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
 
 	vaddr += engine->context_size;
 
-	if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
+	if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
 		dev_err_once(engine->i915->drm.dev,
 			     "%s context redzone overwritten!\n",
 			     engine->name);
@@ -2369,7 +2544,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
 
-	regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma);
+	regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
 	regs[CTX_RING_HEAD] = ring->head;
 	regs[CTX_RING_TAIL] = ring->tail;
 
@@ -2387,33 +2562,21 @@ __execlists_context_pin(struct intel_context *ce,
 			struct intel_engine_cs *engine)
 {
 	void *vaddr;
-	int ret;
 
 	GEM_BUG_ON(!ce->state);
-
-	ret = intel_context_active_acquire(ce);
-	if (ret)
-		goto err;
 	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
 
 	vaddr = i915_gem_object_pin_map(ce->state->obj,
 					i915_coherent_map_type(engine->i915) |
 					I915_MAP_OVERRIDE);
-	if (IS_ERR(vaddr)) {
-		ret = PTR_ERR(vaddr);
-		goto unpin_active;
-	}
+	if (IS_ERR(vaddr))
+		return PTR_ERR(vaddr);
 
-	ce->lrc_desc = lrc_descriptor(ce, engine);
+	ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 	__execlists_update_reg_state(ce, engine);
 
 	return 0;
-
-unpin_active:
-	intel_context_active_release(ce);
-err:
-	return ret;
 }
 
 static int execlists_context_pin(struct intel_context *ce)
@@ -2428,6 +2591,9 @@ static int execlists_context_alloc(struct intel_context *ce)
 
 static void execlists_context_reset(struct intel_context *ce)
 {
+	CE_TRACE(ce, "reset\n");
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+
 	/*
 	 * Because we emit WA_TAIL_DWORDS there may be a disparity
 	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
@@ -2444,8 +2610,14 @@ static void execlists_context_reset(struct intel_context *ce)
 	 * So to avoid that we reset the context images upon resume. For
 	 * simplicity, we just zero everything out.
 	 */
-	intel_ring_reset(ce->ring, 0);
+	intel_ring_reset(ce->ring, ce->ring->emit);
+
+	/* Scrub away the garbage */
+	execlists_init_reg_state(ce->lrc_reg_state,
+				 ce, ce->engine, ce->ring, true);
 	__execlists_update_reg_state(ce, ce->engine);
+
+	ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
 }
 
 static const struct intel_context_ops execlists_context_ops = {
@@ -2497,7 +2669,7 @@ static int execlists_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+	GEM_BUG_ON(!intel_context_is_pinned(request->context));
 
 	/*
 	 * Flush enough space to reduce the likelihood of waiting after
@@ -2867,6 +3039,8 @@ static void enable_execlists(struct intel_engine_cs *engine)
 			RING_HWS_PGA,
 			i915_ggtt_offset(engine->status_page.vma));
 	ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+	engine->context_tag = 0;
 }
 
 static bool unexpected_starting_state(struct intel_engine_cs *engine)
@@ -2906,8 +3080,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	unsigned long flags;
 
-	GEM_TRACE("%s: depth<-%d\n", engine->name,
-		  atomic_read(&execlists->tasklet.count));
+	ENGINE_TRACE(engine, "depth<-%d\n",
+		     atomic_read(&execlists->tasklet.count));
 
 	/*
 	 * Prevent request submission to the hardware until we have
@@ -2960,26 +3134,20 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
 	WRITE_ONCE(*execlists->csb_write, reset_value);
 	wmb(); /* Make sure this is visible to HW (paranoia?) */
 
+	/*
+	 * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+	 * Bludgeon them with a mmio update to be sure.
+	 */
+	ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+		     reset_value << 8 | reset_value);
+	ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
 	invalidate_csb_entries(&execlists->csb_status[0],
 			       &execlists->csb_status[reset_value]);
 }
 
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
 {
-	if (INTEL_GEN(engine->i915) >= 12)
-		return 0x60;
-	else if (INTEL_GEN(engine->i915) >= 9)
-		return 0x54;
-	else if (engine->class == RENDER_CLASS)
-		return 0x58;
-	else
-		return -1;
-}
-
-static void __execlists_reset_reg_state(const struct intel_context *ce,
-					const struct intel_engine_cs *engine)
-{
-	u32 *regs = ce->lrc_reg_state;
 	int x;
 
 	x = lrc_ring_mi_mode(engine);
@@ -2989,6 +3157,14 @@ static void __execlists_reset_reg_state(const struct intel_context *ce,
 	}
 }
 
+static void __execlists_reset_reg_state(const struct intel_context *ce,
+					const struct intel_engine_cs *engine)
+{
+	u32 *regs = ce->lrc_reg_state;
+
+	__reset_stop_ring(regs, engine);
+}
+
 static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -3016,7 +3192,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	/* We still have requests in-flight; the engine should be active */
 	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 
-	ce = rq->hw_context;
+	ce = rq->context;
 	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
 
 	if (i915_request_completed(rq)) {
@@ -3073,8 +3249,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	restore_default_state(ce, engine);
 
 out_replay:
-	GEM_TRACE("%s replay {head:%04x, tail:%04x}\n",
-		  engine->name, ce->ring->head, ce->ring->tail);
+	ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+		     ce->ring->head, ce->ring->tail);
 	intel_ring_update_space(ce->ring);
 	__execlists_reset_reg_state(ce, engine);
 	__execlists_update_reg_state(ce, engine);
@@ -3086,11 +3262,11 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	__unwind_incomplete_requests(engine);
 }
 
-static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
 	unsigned long flags;
 
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	spin_lock_irqsave(&engine->active.lock, flags);
 
@@ -3104,14 +3280,14 @@ static void nop_submission_tasklet(unsigned long data)
 	/* The driver is wedged; don't process any more events. */
 }
 
-static void execlists_cancel_requests(struct intel_engine_cs *engine)
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request *rq, *rn;
 	struct rb_node *rb;
 	unsigned long flags;
 
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	/*
 	 * Before we call engine->cancel_requests(), we should have exclusive
@@ -3198,13 +3374,13 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
 	if (__tasklet_enable(&execlists->tasklet))
 		/* And kick in case we missed a new request submission. */
 		tasklet_hi_schedule(&execlists->tasklet);
-	GEM_TRACE("%s: depth->%d\n", engine->name,
-		  atomic_read(&execlists->tasklet.count));
+	ENGINE_TRACE(engine, "depth->%d\n",
+		     atomic_read(&execlists->tasklet.count));
 }
 
-static int gen8_emit_bb_start(struct i915_request *rq,
-			      u64 offset, u32 len,
-			      const unsigned int flags)
+static int gen8_emit_bb_start_noarb(struct i915_request *rq,
+				    u64 offset, u32 len,
+				    const unsigned int flags)
 {
 	u32 *cs;
 
@@ -3238,7 +3414,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
 	return 0;
 }
 
-static int gen9_emit_bb_start(struct i915_request *rq,
+static int gen8_emit_bb_start(struct i915_request *rq,
 			      u64 offset, u32 len,
 			      const unsigned int flags)
 {
@@ -3693,12 +3869,12 @@ static void execlists_park(struct intel_engine_cs *engine)
 void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 {
 	engine->submit_request = execlists_submit_request;
-	engine->cancel_requests = execlists_cancel_requests;
 	engine->schedule = i915_schedule;
 	engine->execlists.tasklet.func = execlists_submission_tasklet;
 
 	engine->reset.prepare = execlists_reset_prepare;
-	engine->reset.reset = execlists_reset;
+	engine->reset.rewind = execlists_reset_rewind;
+	engine->reset.cancel = execlists_reset_cancel;
 	engine->reset.finish = execlists_reset_finish;
 
 	engine->park = execlists_park;
@@ -3713,13 +3889,27 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 
 	if (INTEL_GEN(engine->i915) >= 12)
 		engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
+	if (intel_engine_has_preemption(engine))
+		engine->emit_bb_start = gen8_emit_bb_start;
+	else
+		engine->emit_bb_start = gen8_emit_bb_start_noarb;
 }
 
-static void execlists_destroy(struct intel_engine_cs *engine)
+static void execlists_shutdown(struct intel_engine_cs *engine)
 {
+	/* Synchronise with residual timers and any softirq they raise */
+	del_timer_sync(&engine->execlists.timer);
+	del_timer_sync(&engine->execlists.preempt);
+	tasklet_kill(&engine->execlists.tasklet);
+}
+
+static void execlists_release(struct intel_engine_cs *engine)
+{
+	execlists_shutdown(engine);
+
 	intel_engine_cleanup_common(engine);
 	lrc_destroy_wa_ctx(engine);
-	kfree(engine);
 }
 
 static void
@@ -3727,13 +3917,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
 	/* Default vfuncs which can be overriden by each engine. */
 
-	engine->destroy = execlists_destroy;
 	engine->resume = execlists_resume;
 
-	engine->reset.prepare = execlists_reset_prepare;
-	engine->reset.reset = execlists_reset;
-	engine->reset.finish = execlists_reset_finish;
-
 	engine->cops = &execlists_context_ops;
 	engine->request_alloc = execlists_request_alloc;
 
@@ -3756,10 +3941,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 		 * until a more refined solution exists.
 		 */
 	}
-	if (IS_GEN(engine->i915, 8))
-		engine->emit_bb_start = gen8_emit_bb_start;
-	else
-		engine->emit_bb_start = gen9_emit_bb_start;
 }
 
 static inline void
@@ -3803,6 +3984,11 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
 
 int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 {
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct drm_i915_private *i915 = engine->i915;
+	struct intel_uncore *uncore = engine->uncore;
+	u32 base = engine->mmio_base;
+
 	tasklet_init(&engine->execlists.tasklet,
 		     execlists_submission_tasklet, (unsigned long)engine);
 	timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
@@ -3814,21 +4000,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 	if (engine->class == RENDER_CLASS)
 		rcs_submission_override(engine);
 
-	return 0;
-}
-
-int intel_execlists_submission_init(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct drm_i915_private *i915 = engine->i915;
-	struct intel_uncore *uncore = engine->uncore;
-	u32 base = engine->mmio_base;
-	int ret;
-
-	ret = intel_engine_init_common(engine);
-	if (ret)
-		return ret;
-
 	if (intel_init_workaround_bb(engine))
 		/*
 		 * We continue even if we fail to initialize WA batch
@@ -3860,6 +4031,9 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
 
 	reset_csb_pointers(engine);
 
+	/* Finally, take ownership and responsibility for cleanup! */
+	engine->release = execlists_release;
+
 	return 0;
 }
 
@@ -3899,18 +4073,21 @@ static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
 
 static void init_common_reg_state(u32 * const regs,
 				  const struct intel_engine_cs *engine,
-				  const struct intel_ring *ring)
+				  const struct intel_ring *ring,
+				  bool inhibit)
 {
-	regs[CTX_CONTEXT_CONTROL] =
-		_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
-		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
-	if (INTEL_GEN(engine->i915) < 11)
-		regs[CTX_CONTEXT_CONTROL] |=
-			_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
-					    CTX_CTRL_RS_CTX_ENABLE);
+	u32 ctl;
 
-	regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID;
-	regs[CTX_BB_STATE] = RING_BB_PPGTT;
+	ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+	ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+	if (inhibit)
+		ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+	if (INTEL_GEN(engine->i915) < 11)
+		ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+					   CTX_CTRL_RS_CTX_ENABLE);
+	regs[CTX_CONTEXT_CONTROL] = ctl;
+
+	regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
 }
 
 static void init_wa_bb_reg_state(u32 * const regs,
@@ -3966,7 +4143,7 @@ static void execlists_init_reg_state(u32 *regs,
 				     const struct intel_context *ce,
 				     const struct intel_engine_cs *engine,
 				     const struct intel_ring *ring,
-				     bool close)
+				     bool inhibit)
 {
 	/*
 	 * A context is actually a big batch buffer with several
@@ -3978,21 +4155,17 @@ static void execlists_init_reg_state(u32 *regs,
 	 *
 	 * Must keep consistent with virtual_update_register_offsets().
 	 */
-	u32 *bbe = set_offsets(regs, reg_offsets(engine), engine);
+	set_offsets(regs, reg_offsets(engine), engine, inhibit);
 
-	if (close) { /* Close the batch; used mainly by live_lrc_layout() */
-		*bbe = MI_BATCH_BUFFER_END;
-		if (INTEL_GEN(engine->i915) >= 10)
-			*bbe |= BIT(0);
-	}
-
-	init_common_reg_state(regs, engine, ring);
+	init_common_reg_state(regs, engine, ring, inhibit);
 	init_ppgtt_reg_state(regs, vm_alias(ce->vm));
 
 	init_wa_bb_reg_state(regs, engine,
 			     INTEL_GEN(engine->i915) >= 12 ?
 			     GEN12_CTX_BB_PER_CTX_PTR :
 			     CTX_BB_PER_CTX_PTR);
+
+	__reset_stop_ring(regs, engine);
 }
 
 static int
@@ -4003,7 +4176,6 @@ populate_lr_context(struct intel_context *ce,
 {
 	bool inhibit = true;
 	void *vaddr;
-	u32 *regs;
 	int ret;
 
 	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
@@ -4027,16 +4199,14 @@ populate_lr_context(struct intel_context *ce,
 
 		memcpy(vaddr, defaults, engine->context_size);
 		i915_gem_object_unpin_map(engine->default_state);
+		__set_bit(CONTEXT_VALID_BIT, &ce->flags);
 		inhibit = false;
 	}
 
 	/* The second page of the context object contains some fields which must
 	 * be set up prior to the first execution. */
-	regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
-	execlists_init_reg_state(regs, ce, engine, ring, inhibit);
-	if (inhibit)
-		regs[CTX_CONTEXT_CONTROL] |=
-			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+	execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+				 ce, engine, ring, inhibit);
 
 	ret = 0;
 err_unpin_ctx:
@@ -4174,6 +4344,13 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
 						ve->siblings[0]);
 }
 
+static int virtual_context_alloc(struct intel_context *ce)
+{
+	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+	return __execlists_context_alloc(ce, ve->siblings[0]);
+}
+
 static int virtual_context_pin(struct intel_context *ce)
 {
 	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
@@ -4211,6 +4388,8 @@ static void virtual_context_exit(struct intel_context *ce)
 }
 
 static const struct intel_context_ops virtual_context_ops = {
+	.alloc = virtual_context_alloc,
+
 	.pin = virtual_context_pin,
 	.unpin = execlists_context_unpin,
 
@@ -4237,10 +4416,9 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
 		mask = ve->siblings[0]->mask;
 	}
 
-	GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
-		  ve->base.name,
-		  rq->fence.context, rq->fence.seqno,
-		  mask, ve->base.execlists.queue_priority_hint);
+	ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+		     rq->fence.context, rq->fence.seqno,
+		     mask, ve->base.execlists.queue_priority_hint);
 
 	return mask;
 }
@@ -4331,10 +4509,9 @@ static void virtual_submit_request(struct i915_request *rq)
 	struct i915_request *old;
 	unsigned long flags;
 
-	GEM_TRACE("%s: rq=%llx:%lld\n",
-		  ve->base.name,
-		  rq->fence.context,
-		  rq->fence.seqno);
+	ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+		     rq->fence.context,
+		     rq->fence.seqno);
 
 	GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
 
@@ -4402,8 +4579,7 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 }
 
 struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
-			       struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 			       unsigned int count)
 {
 	struct virtual_engine *ve;
@@ -4414,13 +4590,13 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 		return ERR_PTR(-EINVAL);
 
 	if (count == 1)
-		return intel_context_create(ctx, siblings[0]);
+		return intel_context_create(siblings[0]);
 
 	ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
 	if (!ve)
 		return ERR_PTR(-ENOMEM);
 
-	ve->base.i915 = ctx->i915;
+	ve->base.i915 = siblings[0]->i915;
 	ve->base.gt = siblings[0]->gt;
 	ve->base.uncore = siblings[0]->uncore;
 	ve->base.id = -1;
@@ -4449,7 +4625,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 
 	intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
 	intel_engine_init_breadcrumbs(&ve->base);
-
 	intel_engine_init_execlists(&ve->base);
 
 	ve->base.cops = &virtual_context_ops;
@@ -4465,7 +4640,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 		     virtual_submission_tasklet,
 		     (unsigned long)ve);
 
-	intel_context_init(&ve->context, ctx, &ve->base);
+	intel_context_init(&ve->context, &ve->base);
 
 	for (n = 0; n < count; n++) {
 		struct intel_engine_cs *sibling = siblings[n];
@@ -4532,12 +4707,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 
 	ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
 
-	err = __execlists_context_alloc(&ve->context, siblings[0]);
-	if (err)
-		goto err_put;
-
-	__set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags);
-
 	return &ve->context;
 
 err_put:
@@ -4546,14 +4715,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 }
 
 struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
-			      struct intel_engine_cs *src)
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
 {
 	struct virtual_engine *se = to_virtual_engine(src);
 	struct intel_context *dst;
 
-	dst = intel_execlists_create_virtual(ctx,
-					     se->siblings,
+	dst = intel_execlists_create_virtual(se->siblings,
 					     se->num_siblings);
 	if (IS_ERR(dst))
 		return dst;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 04511d8..dfbc214 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -83,7 +83,6 @@ enum {
 void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
 
 int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-int intel_execlists_submission_init(struct intel_engine_cs *engine);
 
 /* Logical Ring Contexts */
 /* At the start of the context image is its per-process HWS page */
@@ -111,13 +110,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 				   unsigned int max);
 
 struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
-			       struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 			       unsigned int count);
 
 struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
-			      struct intel_engine_cs *src);
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
 
 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 				     const struct intel_engine_cs *master,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 06ab027..08a3be6 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -13,8 +13,8 @@
 #define CTX_CONTEXT_CONTROL		(0x02 + 1)
 #define CTX_RING_HEAD			(0x04 + 1)
 #define CTX_RING_TAIL			(0x06 + 1)
-#define CTX_RING_BUFFER_START		(0x08 + 1)
-#define CTX_RING_BUFFER_CONTROL		(0x0a + 1)
+#define CTX_RING_START			(0x08 + 1)
+#define CTX_RING_CTL			(0x0a + 1)
 #define CTX_BB_STATE			(0x10 + 1)
 #define CTX_BB_PER_CTX_PTR		(0x18 + 1)
 #define CTX_PDP3_UDW			(0x24 + 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 2b97799..eeef90b 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -127,7 +127,7 @@ struct drm_i915_mocs_table {
 		   LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \
 		   L3_3_WB)
 
-static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+static const struct drm_i915_mocs_entry skl_mocs_table[] = {
 	GEN9_MOCS_ENTRIES,
 	MOCS_ENTRY(I915_MOCS_CACHED,
 		   LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
@@ -233,7 +233,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
 		   L3_1_UC)
 
-static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
+static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
 	/* Base - Error (Reserved for Non-Use) */
 	MOCS_ENTRY(0, 0x0, 0x0),
 	/* Base - Reserved */
@@ -267,7 +267,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
 		   L3_3_WB),
 };
 
-static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
+static const struct drm_i915_mocs_entry icl_mocs_table[] = {
 	/* Base - Uncached (Deprecated) */
 	MOCS_ENTRY(I915_MOCS_UNCACHED,
 		   LE_1_UC | LE_TC_1_LLC,
@@ -283,65 +283,42 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
 static bool get_mocs_settings(const struct drm_i915_private *i915,
 			      struct drm_i915_mocs_table *table)
 {
-	bool result = false;
-
 	if (INTEL_GEN(i915) >= 12) {
-		table->size  = ARRAY_SIZE(tigerlake_mocs_table);
-		table->table = tigerlake_mocs_table;
+		table->size  = ARRAY_SIZE(tgl_mocs_table);
+		table->table = tgl_mocs_table;
 		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
-		result = true;
 	} else if (IS_GEN(i915, 11)) {
-		table->size  = ARRAY_SIZE(icelake_mocs_table);
-		table->table = icelake_mocs_table;
+		table->size  = ARRAY_SIZE(icl_mocs_table);
+		table->table = icl_mocs_table;
 		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
-		result = true;
 	} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
-		table->size  = ARRAY_SIZE(skylake_mocs_table);
+		table->size  = ARRAY_SIZE(skl_mocs_table);
 		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
-		table->table = skylake_mocs_table;
-		result = true;
+		table->table = skl_mocs_table;
 	} else if (IS_GEN9_LP(i915)) {
 		table->size  = ARRAY_SIZE(broxton_mocs_table);
 		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
 		table->table = broxton_mocs_table;
-		result = true;
 	} else {
 		WARN_ONCE(INTEL_GEN(i915) >= 9,
 			  "Platform that should have a MOCS table does not.\n");
+		return false;
 	}
 
+	if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
+		return false;
+
 	/* WaDisableSkipCaching:skl,bxt,kbl,glk */
 	if (IS_GEN(i915, 9)) {
 		int i;
 
 		for (i = 0; i < table->size; i++)
-			if (WARN_ON(table->table[i].l3cc_value &
-				    (L3_ESC(1) | L3_SCC(0x7))))
+			if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
+					      (L3_ESC(1) | L3_SCC(0x7))))
 				return false;
 	}
 
-	return result;
-}
-
-static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
-{
-	switch (engine->id) {
-	case RCS0:
-		return GEN9_GFX_MOCS(index);
-	case VCS0:
-		return GEN9_MFX0_MOCS(index);
-	case BCS0:
-		return GEN9_BLT_MOCS(index);
-	case VECS0:
-		return GEN9_VEBOX_MOCS(index);
-	case VCS1:
-		return GEN9_MFX1_MOCS(index);
-	case VCS2:
-		return GEN11_MFX2_MOCS(index);
-	default:
-		MISSING_CASE(engine->id);
-		return INVALID_MMIO_REG;
-	}
+	return true;
 }
 
 /*
@@ -351,29 +328,47 @@ static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
 static u32 get_entry_control(const struct drm_i915_mocs_table *table,
 			     unsigned int index)
 {
-	if (table->table[index].used)
+	if (index < table->size && table->table[index].used)
 		return table->table[index].control_value;
 
 	return table->table[I915_MOCS_PTE].control_value;
 }
 
+#define for_each_mocs(mocs, t, i) \
+	for (i = 0; \
+	     i < (t)->n_entries ? (mocs = get_entry_control((t), i)), 1 : 0;\
+	     i++)
+
+static void __init_mocs_table(struct intel_uncore *uncore,
+			      const struct drm_i915_mocs_table *table,
+			      u32 addr)
+{
+	unsigned int i;
+	u32 mocs;
+
+	for_each_mocs(mocs, table, i)
+		intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
+}
+
+static u32 mocs_offset(const struct intel_engine_cs *engine)
+{
+	static const u32 offset[] = {
+		[RCS0]  =  __GEN9_RCS0_MOCS0,
+		[VCS0]  =  __GEN9_VCS0_MOCS0,
+		[VCS1]  =  __GEN9_VCS1_MOCS0,
+		[VECS0] =  __GEN9_VECS0_MOCS0,
+		[BCS0]  =  __GEN9_BCS0_MOCS0,
+		[VCS2]  = __GEN11_VCS2_MOCS0,
+	};
+
+	GEM_BUG_ON(engine->id >= ARRAY_SIZE(offset));
+	return offset[engine->id];
+}
+
 static void init_mocs_table(struct intel_engine_cs *engine,
 			    const struct drm_i915_mocs_table *table)
 {
-	struct intel_uncore *uncore = engine->uncore;
-	u32 unused_value = table->table[I915_MOCS_PTE].control_value;
-	unsigned int i;
-
-	for (i = 0; i < table->size; i++)
-		intel_uncore_write_fw(uncore,
-				      mocs_register(engine, i),
-				      get_entry_control(table, i));
-
-	/* All remaining entries are unused */
-	for (; i < table->n_entries; i++)
-		intel_uncore_write_fw(uncore,
-				      mocs_register(engine, i),
-				      unused_value);
+	__init_mocs_table(engine->uncore, table, mocs_offset(engine));
 }
 
 /*
@@ -383,51 +378,34 @@ static void init_mocs_table(struct intel_engine_cs *engine,
 static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
 			  unsigned int index)
 {
-	if (table->table[index].used)
+	if (index < table->size && table->table[index].used)
 		return table->table[index].l3cc_value;
 
 	return table->table[I915_MOCS_PTE].l3cc_value;
 }
 
-static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
-			       u16 low,
-			       u16 high)
+static inline u32 l3cc_combine(u16 low, u16 high)
 {
 	return low | (u32)high << 16;
 }
 
+#define for_each_l3cc(l3cc, t, i) \
+	for (i = 0; \
+	     i < ((t)->n_entries + 1) / 2 ? \
+	     (l3cc = l3cc_combine(get_entry_l3cc((t), 2 * i), \
+				  get_entry_l3cc((t), 2 * i + 1))), 1 : \
+	     0; \
+	     i++)
+
 static void init_l3cc_table(struct intel_engine_cs *engine,
 			    const struct drm_i915_mocs_table *table)
 {
 	struct intel_uncore *uncore = engine->uncore;
-	u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value;
 	unsigned int i;
+	u32 l3cc;
 
-	for (i = 0; i < table->size / 2; i++) {
-		u16 low = get_entry_l3cc(table, 2 * i);
-		u16 high = get_entry_l3cc(table, 2 * i + 1);
-
-		intel_uncore_write(uncore,
-				   GEN9_LNCFCMOCS(i),
-				   l3cc_combine(table, low, high));
-	}
-
-	/* Odd table size - 1 left over */
-	if (table->size & 1) {
-		u16 low = get_entry_l3cc(table, 2 * i);
-
-		intel_uncore_write(uncore,
-				   GEN9_LNCFCMOCS(i),
-				   l3cc_combine(table, low, unused_value));
-		i++;
-	}
-
-	/* All remaining entries are also unused */
-	for (; i < table->n_entries / 2; i++)
-		intel_uncore_write(uncore,
-				   GEN9_LNCFCMOCS(i),
-				   l3cc_combine(table, unused_value,
-						unused_value));
+	for_each_l3cc(l3cc, table, i)
+		intel_uncore_write_fw(uncore, GEN9_LNCFCMOCS(i), l3cc);
 }
 
 void intel_mocs_init_engine(struct intel_engine_cs *engine)
@@ -448,11 +426,14 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
 		init_l3cc_table(engine, &table);
 }
 
-static void intel_mocs_init_global(struct intel_gt *gt)
+static u32 global_mocs_offset(void)
 {
-	struct intel_uncore *uncore = gt->uncore;
+	return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
+}
+
+static void init_global_mocs(struct intel_gt *gt)
+{
 	struct drm_i915_mocs_table table;
-	unsigned int index;
 
 	/*
 	 * LLC and eDRAM control values are not applicable to dgfx
@@ -460,32 +441,18 @@ static void intel_mocs_init_global(struct intel_gt *gt)
 	if (IS_DGFX(gt->i915))
 		return;
 
-	GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
-
 	if (!get_mocs_settings(gt->i915, &table))
 		return;
 
-	if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
-		return;
-
-	for (index = 0; index < table.size; index++)
-		intel_uncore_write(uncore,
-				   GEN12_GLOBAL_MOCS(index),
-				   table.table[index].control_value);
-
-	/*
-	 * Ok, now set the unused entries to the invalid entry (index 0). These
-	 * entries are officially undefined and no contract for the contents and
-	 * settings is given for these entries.
-	 */
-	for (; index < table.n_entries; index++)
-		intel_uncore_write(uncore,
-				   GEN12_GLOBAL_MOCS(index),
-				   table.table[0].control_value);
+	__init_mocs_table(gt->uncore, &table, global_mocs_offset());
 }
 
 void intel_mocs_init(struct intel_gt *gt)
 {
 	if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
-		intel_mocs_init_global(gt);
+		init_global_mocs(gt);
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
new file mode 100644
index 0000000..f86f7e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include "i915_trace.h"
+#include "intel_gtt.h"
+#include "gen6_ppgtt.h"
+#include "gen8_ppgtt.h"
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm)
+{
+	struct i915_page_table *pt;
+
+	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
+	if (unlikely(!pt))
+		return ERR_PTR(-ENOMEM);
+
+	if (unlikely(setup_page_dma(vm, &pt->base))) {
+		kfree(pt);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	atomic_set(&pt->used, 0);
+	return pt;
+}
+
+struct i915_page_directory *__alloc_pd(size_t sz)
+{
+	struct i915_page_directory *pd;
+
+	pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
+	if (unlikely(!pd))
+		return NULL;
+
+	spin_lock_init(&pd->lock);
+	return pd;
+}
+
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
+{
+	struct i915_page_directory *pd;
+
+	pd = __alloc_pd(sizeof(*pd));
+	if (unlikely(!pd))
+		return ERR_PTR(-ENOMEM);
+
+	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+		kfree(pd);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return pd;
+}
+
+void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
+{
+	cleanup_page_dma(vm, pd);
+	kfree(pd);
+}
+
+static inline void
+write_dma_entry(struct i915_page_dma * const pdma,
+		const unsigned short idx,
+		const u64 encoded_entry)
+{
+	u64 * const vaddr = kmap_atomic(pdma->page);
+
+	vaddr[idx] = encoded_entry;
+	kunmap_atomic(vaddr);
+}
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+	       const unsigned short idx,
+	       struct i915_page_dma * const to,
+	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
+{
+	/* Each thread pre-pins the pd, and we may have a thread per pde. */
+	GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
+
+	atomic_inc(px_used(pd));
+	pd->entry[idx] = to;
+	write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
+}
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+	       const unsigned short idx,
+	       const struct i915_page_scratch * const scratch)
+{
+	GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
+
+	write_dma_entry(px_base(pd), idx, scratch->encode);
+	pd->entry[idx] = NULL;
+	atomic_dec(px_used(pd));
+}
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+		 const unsigned short idx,
+		 struct i915_page_table * const pt,
+		 const struct i915_page_scratch * const scratch)
+{
+	bool free = false;
+
+	if (atomic_add_unless(&pt->used, -1, 1))
+		return false;
+
+	spin_lock(&pd->lock);
+	if (atomic_dec_and_test(&pt->used)) {
+		clear_pd_entry(pd, idx, scratch);
+		free = true;
+	}
+	spin_unlock(&pd->lock);
+
+	return free;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+
+	gtt_write_workarounds(gt);
+
+	if (IS_GEN(i915, 6))
+		gen6_ppgtt_enable(gt);
+	else if (IS_GEN(i915, 7))
+		gen7_ppgtt_enable(gt);
+
+	return 0;
+}
+
+static struct i915_ppgtt *
+__ppgtt_create(struct intel_gt *gt)
+{
+	if (INTEL_GEN(gt->i915) < 8)
+		return gen6_ppgtt_create(gt);
+	else
+		return gen8_ppgtt_create(gt);
+}
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
+{
+	struct i915_ppgtt *ppgtt;
+
+	ppgtt = __ppgtt_create(gt);
+	if (IS_ERR(ppgtt))
+		return ppgtt;
+
+	trace_i915_ppgtt_create(&ppgtt->vm);
+
+	return ppgtt;
+}
+
+static int ppgtt_bind_vma(struct i915_vma *vma,
+			  enum i915_cache_level cache_level,
+			  u32 flags)
+{
+	u32 pte_flags;
+	int err;
+
+	if (flags & I915_VMA_ALLOC) {
+		err = vma->vm->allocate_va_range(vma->vm,
+						 vma->node.start, vma->size);
+		if (err)
+			return err;
+
+		set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+	}
+
+	/* Applicable to VLV, and gen8+ */
+	pte_flags = 0;
+	if (i915_gem_object_is_readonly(vma->obj))
+		pte_flags |= PTE_READ_ONLY;
+
+	GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
+	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+	wmb();
+
+	return 0;
+}
+
+static void ppgtt_unbind_vma(struct i915_vma *vma)
+{
+	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
+		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+}
+
+int ppgtt_set_pages(struct i915_vma *vma)
+{
+	GEM_BUG_ON(vma->pages);
+
+	vma->pages = vma->obj->mm.pages;
+
+	vma->page_sizes = vma->obj->mm.page_sizes;
+
+	return 0;
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+
+	ppgtt->vm.gt = gt;
+	ppgtt->vm.i915 = i915;
+	ppgtt->vm.dma = &i915->drm.pdev->dev;
+	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+
+	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
+	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
+	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+	ppgtt->vm.vma_ops.clear_pages = clear_pages;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 700104b..9e303c29 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -88,21 +88,18 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
 	 * do not want the enable hysteresis to less than the wakeup latency.
 	 *
 	 * igt/gem_exec_nop/sequential provides a rough estimate for the
-	 * service latency, and puts it around 10us for Broadwell (and other
-	 * big core) and around 40us for Broxton (and other low power cores).
-	 * [Note that for legacy ringbuffer submission, this is less than 1us!]
-	 * However, the wakeup latency on Broxton is closer to 100us. To be
-	 * conservative, we have to factor in a context switch on top (due
-	 * to ksoftirqd).
+	 * service latency, and puts it under 10us for Icelake, similar to
+	 * Broadwell+, To be conservative, we want to factor in a context
+	 * switch on top (due to ksoftirqd).
 	 */
-	set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
-	set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+	set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
+	set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
 
 	/* 3a: Enable RC6 */
-	set(uncore, GEN6_RC_CONTROL,
-	    GEN6_RC_CTL_HW_ENABLE |
-	    GEN6_RC_CTL_RC6_ENABLE |
-	    GEN6_RC_CTL_EI_MODE(1));
+	rc6->ctl_enable =
+		GEN6_RC_CTL_HW_ENABLE |
+		GEN6_RC_CTL_RC6_ENABLE |
+		GEN6_RC_CTL_EI_MODE(1);
 
 	set(uncore, GEN9_PG_ENABLE,
 	    GEN9_RENDER_PG_ENABLE |
@@ -173,10 +170,10 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
 	else
 		rc6_mode = GEN6_RC_CTL_EI_MODE(1);
 
-	set(uncore, GEN6_RC_CONTROL,
-	    GEN6_RC_CTL_HW_ENABLE |
-	    GEN6_RC_CTL_RC6_ENABLE |
-	    rc6_mode);
+	rc6->ctl_enable =
+		GEN6_RC_CTL_HW_ENABLE |
+		GEN6_RC_CTL_RC6_ENABLE |
+		rc6_mode;
 
 	/*
 	 * WaRsDisableCoarsePowerGating:skl,cnl
@@ -203,10 +200,10 @@ static void gen8_rc6_enable(struct intel_rc6 *rc6)
 	set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
 
 	/* 3: Enable RC6 */
-	set(uncore, GEN6_RC_CONTROL,
+	rc6->ctl_enable =
 	    GEN6_RC_CTL_HW_ENABLE |
 	    GEN7_RC_CTL_TO_MODE |
-	    GEN6_RC_CTL_RC6_ENABLE);
+	    GEN6_RC_CTL_RC6_ENABLE;
 }
 
 static void gen6_rc6_enable(struct intel_rc6 *rc6)
@@ -242,10 +239,10 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
 		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
 	if (HAS_RC6pp(i915))
 		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
-	set(uncore, GEN6_RC_CONTROL,
+	rc6->ctl_enable =
 	    rc6_mask |
 	    GEN6_RC_CTL_EI_MODE(1) |
-	    GEN6_RC_CTL_HW_ENABLE);
+	    GEN6_RC_CTL_HW_ENABLE;
 
 	rc6vids = 0;
 	ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
@@ -363,7 +360,7 @@ static void chv_rc6_enable(struct intel_rc6 *rc6)
 			       VLV_RENDER_RC6_COUNT_EN));
 
 	/* 3: Enable RC6 */
-	set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE);
+	rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
 }
 
 static void vlv_rc6_enable(struct intel_rc6 *rc6)
@@ -389,8 +386,8 @@ static void vlv_rc6_enable(struct intel_rc6 *rc6)
 			       VLV_MEDIA_RC6_COUNT_EN |
 			       VLV_RENDER_RC6_COUNT_EN));
 
-	set(uncore, GEN6_RC_CONTROL,
-	    GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
+	rc6->ctl_enable =
+	    GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 }
 
 static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
@@ -491,64 +488,19 @@ static void rpm_put(struct intel_rc6 *rc6)
 	rc6->wakeref = false;
 }
 
-static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6)
-{
-	return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO);
-}
-
-static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6)
+static bool pctx_corrupted(struct intel_rc6 *rc6)
 {
 	struct drm_i915_private *i915 = rc6_to_i915(rc6);
 
 	if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
-		return;
+		return false;
 
-	if (intel_rc6_ctx_corrupted(rc6)) {
-		DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
-		rc6->ctx_corrupted = true;
-	}
-}
+	if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
+		return false;
 
-/**
- * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
- * @rc6: rc6 state
- *
- * Perform any steps needed to re-init the RC6 CTX WA after system resume.
- */
-void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6)
-{
-	if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) {
-		DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
-		rc6->ctx_corrupted = false;
-	}
-}
-
-/**
- * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption
- * @rc6: rc6 state
- *
- * Check if an RC6 CTX corruption has happened since the last check and if so
- * disable RC6 and runtime power management.
-*/
-void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6)
-{
-	struct drm_i915_private *i915 = rc6_to_i915(rc6);
-
-	if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
-		return;
-
-	if (rc6->ctx_corrupted)
-		return;
-
-	if (!intel_rc6_ctx_corrupted(rc6))
-		return;
-
-	DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
-
-	intel_rc6_disable(rc6);
-	rc6->ctx_corrupted = true;
-
-	return;
+	dev_notice(i915->drm.dev,
+		   "RC6 context corruption, disabling runtime power management\n");
+	return true;
 }
 
 static void __intel_rc6_disable(struct intel_rc6 *rc6)
@@ -575,8 +527,6 @@ void intel_rc6_init(struct intel_rc6 *rc6)
 	if (!rc6_supported(rc6))
 		return;
 
-	intel_rc6_ctx_wa_init(rc6);
-
 	if (IS_CHERRYVIEW(i915))
 		err = chv_rc6_init(rc6);
 	else if (IS_VALLEYVIEW(i915))
@@ -611,9 +561,6 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
 
 	GEM_BUG_ON(rc6->enabled);
 
-	if (rc6->ctx_corrupted)
-		return;
-
 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
 	if (IS_CHERRYVIEW(i915))
@@ -629,13 +576,51 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
 	else if (INTEL_GEN(i915) >= 6)
 		gen6_rc6_enable(rc6);
 
+	rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
+	if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+		rc6->ctl_enable = 0;
+
 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 
+	if (unlikely(pctx_corrupted(rc6)))
+		return;
+
 	/* rc6 is ready, runtime-pm is go! */
 	rpm_put(rc6);
 	rc6->enabled = true;
 }
 
+void intel_rc6_unpark(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+	if (!rc6->enabled)
+		return;
+
+	/* Restore HW timers for automatic RC6 entry while busy */
+	set(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
+}
+
+void intel_rc6_park(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+	if (!rc6->enabled)
+		return;
+
+	if (unlikely(pctx_corrupted(rc6))) {
+		intel_rc6_disable(rc6);
+		return;
+	}
+
+	if (!rc6->manual)
+		return;
+
+	/* Turn off the HW timers and go directly to rc6 */
+	set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
+	set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+}
+
 void intel_rc6_disable(struct intel_rc6 *rc6)
 {
 	if (!rc6->enabled)
@@ -785,3 +770,7 @@ u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
 {
 	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rc6.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
index 1370f68..9f0f23f 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -15,6 +15,9 @@ struct intel_rc6;
 void intel_rc6_init(struct intel_rc6 *rc6);
 void intel_rc6_fini(struct intel_rc6 *rc6);
 
+void intel_rc6_unpark(struct intel_rc6 *rc6);
+void intel_rc6_park(struct intel_rc6 *rc6);
+
 void intel_rc6_sanitize(struct intel_rc6 *rc6);
 void intel_rc6_enable(struct intel_rc6 *rc6);
 void intel_rc6_disable(struct intel_rc6 *rc6);
@@ -22,7 +25,4 @@ void intel_rc6_disable(struct intel_rc6 *rc6);
 u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
 u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
 
-void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6);
-void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6);
-
 #endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
index 89ad5697..bfbb623 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -18,12 +18,14 @@ struct intel_rc6 {
 	u64 prev_hw_residency[4];
 	u64 cur_residency[4];
 
+	u32 ctl_enable;
+
 	struct drm_i915_gem_object *pctx;
 
 	bool supported : 1;
 	bool enabled : 1;
+	bool manual : 1;
 	bool wakeref : 1;
-	bool ctx_corrupted : 1;
 };
 
 #endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index c4edc35..5954ecc 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -29,16 +29,6 @@
 #include "intel_renderstate.h"
 #include "intel_ring.h"
 
-struct intel_renderstate {
-	const struct intel_renderstate_rodata *rodata;
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	u32 batch_offset;
-	u32 batch_size;
-	u32 aux_offset;
-	u32 aux_size;
-};
-
 static const struct intel_renderstate_rodata *
 render_state_get_rodata(const struct intel_engine_cs *engine)
 {
@@ -84,11 +74,11 @@ static int render_state_setup(struct intel_renderstate *so,
 	u32 *d;
 	int ret;
 
-	ret = i915_gem_object_prepare_write(so->obj, &needs_clflush);
+	ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush);
 	if (ret)
 		return ret;
 
-	d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0));
+	d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0));
 
 	while (i < rodata->batch_items) {
 		u32 s = rodata->batch[i];
@@ -166,7 +156,7 @@ static int render_state_setup(struct intel_renderstate *so,
 
 	ret = 0;
 out:
-	i915_gem_object_finish_access(so->obj);
+	i915_gem_object_finish_access(so->vma->obj);
 	return ret;
 
 err:
@@ -177,61 +167,84 @@ static int render_state_setup(struct intel_renderstate *so,
 
 #undef OUT_BATCH
 
-int intel_renderstate_emit(struct i915_request *rq)
+int intel_renderstate_init(struct intel_renderstate *so,
+			   struct intel_engine_cs *engine)
 {
-	struct intel_engine_cs *engine = rq->engine;
-	struct intel_renderstate so = {}; /* keep the compiler happy */
+	struct drm_i915_gem_object *obj;
 	int err;
 
-	so.rodata = render_state_get_rodata(engine);
-	if (!so.rodata)
+	memset(so, 0, sizeof(*so));
+
+	so->rodata = render_state_get_rodata(engine);
+	if (!so->rodata)
 		return 0;
 
-	if (so.rodata->batch_items * 4 > PAGE_SIZE)
+	if (so->rodata->batch_items * 4 > PAGE_SIZE)
 		return -EINVAL;
 
-	so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
-	if (IS_ERR(so.obj))
-		return PTR_ERR(so.obj);
+	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
 
-	so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
-	if (IS_ERR(so.vma)) {
-		err = PTR_ERR(so.vma);
+	so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+	if (IS_ERR(so->vma)) {
+		err = PTR_ERR(so->vma);
 		goto err_obj;
 	}
 
-	err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	if (err)
 		goto err_vma;
 
-	err = render_state_setup(&so, rq->i915);
+	err = render_state_setup(so, engine->i915);
 	if (err)
 		goto err_unpin;
 
+	return 0;
+
+err_unpin:
+	i915_vma_unpin(so->vma);
+err_vma:
+	i915_vma_close(so->vma);
+err_obj:
+	i915_gem_object_put(obj);
+	so->vma = NULL;
+	return err;
+}
+
+int intel_renderstate_emit(struct intel_renderstate *so,
+			   struct i915_request *rq)
+{
+	struct intel_engine_cs *engine = rq->engine;
+	int err;
+
+	if (!so->vma)
+		return 0;
+
 	err = engine->emit_bb_start(rq,
-				    so.batch_offset, so.batch_size,
+				    so->batch_offset, so->batch_size,
 				    I915_DISPATCH_SECURE);
 	if (err)
-		goto err_unpin;
+		return err;
 
-	if (so.aux_size > 8) {
+	if (so->aux_size > 8) {
 		err = engine->emit_bb_start(rq,
-					    so.aux_offset, so.aux_size,
+					    so->aux_offset, so->aux_size,
 					    I915_DISPATCH_SECURE);
 		if (err)
-			goto err_unpin;
+			return err;
 	}
 
-	i915_vma_lock(so.vma);
-	err = i915_request_await_object(rq, so.vma->obj, false);
+	i915_vma_lock(so->vma);
+	err = i915_request_await_object(rq, so->vma->obj, false);
 	if (err == 0)
-		err = i915_vma_move_to_active(so.vma, rq, 0);
-	i915_vma_unlock(so.vma);
-err_unpin:
-	i915_vma_unpin(so.vma);
-err_vma:
-	i915_vma_close(so.vma);
-err_obj:
-	i915_gem_object_put(so.obj);
+		err = i915_vma_move_to_active(so->vma, rq, 0);
+	i915_vma_unlock(so->vma);
+
 	return err;
 }
+
+void intel_renderstate_fini(struct intel_renderstate *so)
+{
+	i915_vma_unpin_and_release(&so->vma, 0);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 8d50791..5700be6 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -27,6 +27,8 @@
 #include <linux/types.h>
 
 struct i915_request;
+struct intel_engine_cs;
+struct i915_vma;
 
 struct intel_renderstate_rodata {
 	const u32 *reloc;
@@ -46,6 +48,19 @@ extern const struct intel_renderstate_rodata gen7_null_state;
 extern const struct intel_renderstate_rodata gen8_null_state;
 extern const struct intel_renderstate_rodata gen9_null_state;
 
-int intel_renderstate_emit(struct i915_request *rq);
+struct intel_renderstate {
+	const struct intel_renderstate_rodata *rodata;
+	struct i915_vma *vma;
+	u32 batch_offset;
+	u32 batch_size;
+	u32 aux_offset;
+	u32 aux_size;
+};
+
+int intel_renderstate_init(struct intel_renderstate *so,
+			   struct intel_engine_cs *engine);
+int intel_renderstate_emit(struct intel_renderstate *so,
+			   struct i915_request *rq);
+void intel_renderstate_fini(struct intel_renderstate *so);
 
 #endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c97423a..beee0cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -21,6 +21,7 @@
 #include "intel_reset.h"
 
 #include "uc/intel_guc.h"
+#include "uc/intel_guc_submission.h"
 
 #define RESET_MAX_RETRIES 3
 
@@ -40,27 +41,29 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
 static void engine_skip_context(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine = rq->engine;
-	struct i915_gem_context *hung_ctx = rq->gem_context;
+	struct intel_context *hung_ctx = rq->context;
 
 	if (!i915_request_is_active(rq))
 		return;
 
 	lockdep_assert_held(&engine->active.lock);
 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
-		if (rq->gem_context == hung_ctx)
+		if (rq->context == hung_ctx)
 			i915_request_skip(rq, -EIO);
 }
 
-static void client_mark_guilty(struct drm_i915_file_private *file_priv,
-			       const struct i915_gem_context *ctx)
+static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
 {
-	unsigned int score;
+	struct drm_i915_file_private *file_priv = ctx->file_priv;
 	unsigned long prev_hang;
+	unsigned int score;
 
-	if (i915_gem_context_is_banned(ctx))
+	if (IS_ERR_OR_NULL(file_priv))
+		return;
+
+	score = 0;
+	if (banned)
 		score = I915_CLIENT_SCORE_CONTEXT_BAN;
-	else
-		score = 0;
 
 	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
 	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
@@ -75,17 +78,38 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
 	}
 }
 
-static bool context_mark_guilty(struct i915_gem_context *ctx)
+static bool mark_guilty(struct i915_request *rq)
 {
+	struct i915_gem_context *ctx;
 	unsigned long prev_hang;
 	bool banned;
 	int i;
 
+	rcu_read_lock();
+	ctx = rcu_dereference(rq->context->gem_context);
+	if (ctx && !kref_get_unless_zero(&ctx->ref))
+		ctx = NULL;
+	rcu_read_unlock();
+	if (!ctx)
+		return false;
+
+	if (i915_gem_context_is_closed(ctx)) {
+		intel_context_set_banned(rq->context);
+		banned = true;
+		goto out;
+	}
+
 	atomic_inc(&ctx->guilty_count);
 
 	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
-	if (!i915_gem_context_is_bannable(ctx))
-		return false;
+	if (!i915_gem_context_is_bannable(ctx)) {
+		banned = false;
+		goto out;
+	}
+
+	dev_notice(ctx->i915->drm.dev,
+		   "%s context reset due to GPU hang\n",
+		   ctx->name);
 
 	/* Record the timestamp for the last N hangs */
 	prev_hang = ctx->hang_timestamp[0];
@@ -100,38 +124,43 @@ static bool context_mark_guilty(struct i915_gem_context *ctx)
 	if (banned) {
 		DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
 				 ctx->name, atomic_read(&ctx->guilty_count));
-		i915_gem_context_set_banned(ctx);
+		intel_context_set_banned(rq->context);
 	}
 
-	if (!IS_ERR_OR_NULL(ctx->file_priv))
-		client_mark_guilty(ctx->file_priv, ctx);
+	client_mark_guilty(ctx, banned);
 
+out:
+	i915_gem_context_put(ctx);
 	return banned;
 }
 
-static void context_mark_innocent(struct i915_gem_context *ctx)
+static void mark_innocent(struct i915_request *rq)
 {
-	atomic_inc(&ctx->active_count);
+	struct i915_gem_context *ctx;
+
+	rcu_read_lock();
+	ctx = rcu_dereference(rq->context->gem_context);
+	if (ctx)
+		atomic_inc(&ctx->active_count);
+	rcu_read_unlock();
 }
 
 void __i915_request_reset(struct i915_request *rq, bool guilty)
 {
-	GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
-		  rq->engine->name,
-		  rq->fence.context,
-		  rq->fence.seqno,
-		  yesno(guilty));
+	RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
 
 	GEM_BUG_ON(i915_request_completed(rq));
 
+	rcu_read_lock(); /* protect the GEM context */
 	if (guilty) {
 		i915_request_skip(rq, -EIO);
-		if (context_mark_guilty(rq->gem_context))
+		if (mark_guilty(rq))
 			engine_skip_context(rq);
 	} else {
 		dma_fence_set_error(&rq->fence, -EAGAIN);
-		context_mark_innocent(rq->gem_context);
+		mark_innocent(rq);
 	}
+	rcu_read_unlock();
 }
 
 static bool i915_in_reset(struct pci_dev *pdev)
@@ -218,9 +247,8 @@ static int g4x_do_reset(struct intel_gt *gt,
 	return ret;
 }
 
-static int ironlake_do_reset(struct intel_gt *gt,
-			     intel_engine_mask_t engine_mask,
-			     unsigned int retry)
+static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
+			unsigned int retry)
 {
 	struct intel_uncore *uncore = gt->uncore;
 	int ret;
@@ -564,7 +592,7 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
 	else if (INTEL_GEN(i915) >= 6)
 		return gen6_reset_engines;
 	else if (INTEL_GEN(i915) >= 5)
-		return ironlake_do_reset;
+		return ilk_do_reset;
 	else if (IS_G4X(i915))
 		return g4x_do_reset;
 	else if (IS_G33(i915) || IS_PINEVIEW(i915))
@@ -592,7 +620,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
 	 */
 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
 	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
-		GEM_TRACE("engine_mask=%x\n", engine_mask);
+		GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
 		preempt_disable();
 		ret = reset(gt, engine_mask, retry);
 		preempt_enable();
@@ -647,7 +675,8 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
 	 * GPU state upon resume, i.e. fail to restart after a reset.
 	 */
 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
-	engine->reset.prepare(engine);
+	if (engine->reset.prepare)
+		engine->reset.prepare(engine);
 }
 
 static void revoke_mmaps(struct intel_gt *gt)
@@ -667,8 +696,13 @@ static void revoke_mmaps(struct intel_gt *gt)
 			continue;
 
 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
-		node = &vma->obj->base.vma_node;
+
+		if (!vma->mmo)
+			continue;
+
+		node = &vma->mmo->vma_node;
 		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+
 		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
 				    drm_vma_node_offset_addr(node) + vma_offset,
 				    vma->size,
@@ -722,10 +756,11 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
 
 static void reset_finish_engine(struct intel_engine_cs *engine)
 {
-	engine->reset.finish(engine);
+	if (engine->reset.finish)
+		engine->reset.finish(engine);
 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
 
-	intel_engine_breadcrumbs_irq(engine);
+	intel_engine_signal_breadcrumbs(engine);
 }
 
 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
@@ -745,8 +780,7 @@ static void nop_submit_request(struct i915_request *request)
 	struct intel_engine_cs *engine = request->engine;
 	unsigned long flags;
 
-	GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
-		  engine->name, request->fence.context, request->fence.seqno);
+	RQ_TRACE(request, "-EIO\n");
 	dma_fence_set_error(&request->fence, -EIO);
 
 	spin_lock_irqsave(&engine->active.lock, flags);
@@ -754,7 +788,7 @@ static void nop_submit_request(struct i915_request *request)
 	i915_request_mark_complete(request);
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 
-	intel_engine_queue_breadcrumbs(engine);
+	intel_engine_signal_breadcrumbs(engine);
 }
 
 static void __intel_gt_set_wedged(struct intel_gt *gt)
@@ -773,7 +807,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
 			intel_engine_dump(engine, &p, "%s\n", engine->name);
 	}
 
-	GEM_TRACE("start\n");
+	GT_TRACE(gt, "start\n");
 
 	/*
 	 * First, stop submission to hw, but do not yet complete requests by
@@ -799,11 +833,12 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
 
 	/* Mark all executing requests as skipped */
 	for_each_engine(engine, gt, id)
-		engine->cancel_requests(engine);
+		if (engine->reset.cancel)
+			engine->reset.cancel(engine);
 
 	reset_finish(gt, awake);
 
-	GEM_TRACE("end\n");
+	GT_TRACE(gt, "end\n");
 }
 
 void intel_gt_set_wedged(struct intel_gt *gt)
@@ -820,7 +855,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 	struct intel_timeline *tl;
-	unsigned long flags;
 	bool ok;
 
 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
@@ -830,7 +864,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 	if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
 		return false;
 
-	GEM_TRACE("start\n");
+	GT_TRACE(gt, "start\n");
 
 	/*
 	 * Before unwedging, make sure that all pending operations
@@ -842,7 +876,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 	 *
 	 * No more can be submitted until we reset the wedged bit.
 	 */
-	spin_lock_irqsave(&timelines->lock, flags);
+	spin_lock(&timelines->lock);
 	list_for_each_entry(tl, &timelines->active_list, link) {
 		struct dma_fence *fence;
 
@@ -850,7 +884,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 		if (!fence)
 			continue;
 
-		spin_unlock_irqrestore(&timelines->lock, flags);
+		spin_unlock(&timelines->lock);
 
 		/*
 		 * All internal dependencies (i915_requests) will have
@@ -863,10 +897,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 		dma_fence_put(fence);
 
 		/* Restart iteration after droping lock */
-		spin_lock_irqsave(&timelines->lock, flags);
+		spin_lock(&timelines->lock);
 		tl = list_entry(&timelines->active_list, typeof(*tl), link);
 	}
-	spin_unlock_irqrestore(&timelines->lock, flags);
+	spin_unlock(&timelines->lock);
 
 	/* We must reset pending GPU events before restoring our submission */
 	ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
@@ -892,7 +926,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 	 */
 	intel_engines_reset_default_submission(gt);
 
-	GEM_TRACE("end\n");
+	GT_TRACE(gt, "end\n");
 
 	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
 	clear_bit(I915_WEDGED, &gt->reset.flags);
@@ -967,7 +1001,7 @@ void intel_gt_reset(struct intel_gt *gt,
 	intel_engine_mask_t awake;
 	int ret;
 
-	GEM_TRACE("flags=%lx\n", gt->reset.flags);
+	GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
 
 	might_sleep();
 	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
@@ -1070,9 +1104,10 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
 {
 	struct intel_gt *gt = engine->gt;
+	bool uses_guc = intel_engine_in_guc_submission_mode(engine);
 	int ret;
 
-	GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
+	ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
 
 	if (!intel_engine_pm_get_if_awake(engine))
@@ -1085,14 +1120,14 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
 			   "Resetting %s for %s\n", engine->name, msg);
 	atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
 
-	if (!engine->gt->uc.guc.execbuf_client)
+	if (!uses_guc)
 		ret = intel_gt_reset_engine(engine);
 	else
 		ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
 	if (ret) {
 		/* If we fail here, we expect to fallback to a global reset */
 		DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
-				 engine->gt->uc.guc.execbuf_client ? "GuC " : "",
+				 uses_guc ? "GuC " : "",
 				 engine->name, ret);
 		goto out;
 	}
@@ -1195,7 +1230,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
 	engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
 
 	if (flags & I915_ERROR_CAPTURE) {
-		i915_capture_error_state(gt->i915, engine_mask, msg);
+		i915_capture_error_state(gt->i915);
 		intel_gt_clear_error_registers(gt, engine_mask);
 	}
 
@@ -1288,10 +1323,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
 	if (!intel_gt_is_wedged(gt))
 		return 0;
 
-	/* Reset still in progress? Maybe we will recover? */
-	if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
+	if (intel_gt_has_init_error(gt))
 		return -EIO;
 
+	/* Reset still in progress? Maybe we will recover? */
 	if (wait_event_interruptible(gt->reset.queue,
 				     !test_bit(I915_RESET_BACKOFF,
 					       &gt->reset.flags)))
@@ -1313,6 +1348,9 @@ void intel_gt_init_reset(struct intel_gt *gt)
 	init_waitqueue_head(&gt->reset.queue);
 	mutex_init(&gt->reset.mutex);
 	init_srcu_struct(&gt->reset.backoff_srcu);
+
+	/* no GPU until we are ready! */
+	__set_bit(I915_WEDGED, &gt->reset.flags);
 }
 
 void intel_gt_fini_reset(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 9302621..bc44fe8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -33,6 +33,7 @@
 
 #include "gem/i915_gem_context.h"
 
+#include "gen6_ppgtt.h"
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_context.h"
@@ -362,6 +363,12 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 	 */
 	flags |= PIPE_CONTROL_CS_STALL;
 
+	/*
+	 * CS_STALL suggests at least a post-sync write.
+	 */
+	flags |= PIPE_CONTROL_QW_WRITE;
+	flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
 	/* Just flush everything.  Experiments have shown that reducing the
 	 * number of bits based on the write domains has little performance
 	 * impact.
@@ -380,13 +387,6 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
-		/*
-		 * TLB invalidate requires a post-sync write.
-		 */
-		flags |= PIPE_CONTROL_QW_WRITE;
-		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
-		flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
 
 		/* Workaround: we must issue a pipe_control with CS-stall bit
 		 * set before a pipe_control command that has the state cache
@@ -454,7 +454,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
 	GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
 
-	*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+	*cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
+		MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
 	*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
 	*cs++ = rq->fence.seqno;
 
@@ -496,14 +497,13 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
 
 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	u32 addr;
 
 	addr = lower_32_bits(phys);
-	if (INTEL_GEN(dev_priv) >= 4)
+	if (INTEL_GEN(engine->i915) >= 4)
 		addr |= (phys >> 28) & 0xf0;
 
-	I915_WRITE(HWS_PGA, addr);
+	intel_uncore_write(engine->uncore, HWS_PGA, addr);
 }
 
 static struct page *status_page(struct intel_engine_cs *engine)
@@ -522,14 +522,13 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 
 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	i915_reg_t hwsp;
 
 	/*
 	 * The ring status page addresses are no longer next to the rest of
 	 * the ring registers as of gen7.
 	 */
-	if (IS_GEN(dev_priv, 7)) {
+	if (IS_GEN(engine->i915, 7)) {
 		switch (engine->id) {
 		/*
 		 * No more rings exist on Gen7. Default case is only to shut up
@@ -551,14 +550,14 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
 			hwsp = VEBOX_HWS_PGA_GEN7;
 			break;
 		}
-	} else if (IS_GEN(dev_priv, 6)) {
+	} else if (IS_GEN(engine->i915, 6)) {
 		hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
 	} else {
 		hwsp = RING_HWS_PGA(engine->mmio_base);
 	}
 
-	I915_WRITE(hwsp, offset);
-	POSTING_READ(hwsp);
+	intel_uncore_write(engine->uncore, hwsp, offset);
+	intel_uncore_posting_read(engine->uncore, hwsp);
 }
 
 static void flush_cs_tlb(struct intel_engine_cs *engine)
@@ -633,8 +632,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
 	struct intel_ring *ring = engine->legacy.ring;
 	int ret = 0;
 
-	GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
-		  engine->name, ring->head, ring->tail);
+	ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
+		     ring->head, ring->tail);
 
 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
 
@@ -721,7 +720,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
 	}
 
 	/* Papering over lost _interrupts_ immediately following the restart */
-	intel_engine_queue_breadcrumbs(engine);
+	intel_engine_signal_breadcrumbs(engine);
 out:
 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
 
@@ -747,10 +746,10 @@ static void reset_prepare(struct intel_engine_cs *engine)
 	 *
 	 * FIXME: Wa for more modern gens needs to be validated
 	 */
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	if (intel_engine_stop_cs(engine))
-		GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
+		ENGINE_TRACE(engine, "timed out on STOP_RING\n");
 
 	intel_uncore_write_fw(uncore,
 			      RING_HEAD(base),
@@ -766,12 +765,11 @@ static void reset_prepare(struct intel_engine_cs *engine)
 
 	/* Check acts as a post */
 	if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
-		GEM_TRACE("%s: ring head [%x] not parked\n",
-			  engine->name,
-			  intel_uncore_read_fw(uncore, RING_HEAD(base)));
+		ENGINE_TRACE(engine, "ring head [%x] not parked\n",
+			     intel_uncore_read_fw(uncore, RING_HEAD(base)));
 }
 
-static void reset_ring(struct intel_engine_cs *engine, bool stalled)
+static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
 	struct i915_request *pos, *rq;
 	unsigned long flags;
@@ -842,7 +840,8 @@ static void reset_finish(struct intel_engine_cs *engine)
 
 static int rcs_resume(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
+	struct intel_uncore *uncore = engine->uncore;
 
 	/*
 	 * Disable CONSTANT_BUFFER before it is loaded from the context
@@ -854,13 +853,14 @@ static int rcs_resume(struct intel_engine_cs *engine)
 	 * they are already accustomed to from before contexts were
 	 * enabled.
 	 */
-	if (IS_GEN(dev_priv, 4))
-		I915_WRITE(ECOSKPD,
+	if (IS_GEN(i915, 4))
+		intel_uncore_write(uncore, ECOSKPD,
 			   _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
 
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-	if (IS_GEN_RANGE(dev_priv, 4, 6))
-		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+	if (IS_GEN_RANGE(i915, 4, 6))
+		intel_uncore_write(uncore, MI_MODE,
+				   _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
 	/* We need to disable the AsyncFlip performance optimisations in order
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -868,38 +868,40 @@ static int rcs_resume(struct intel_engine_cs *engine)
 	 *
 	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 	 */
-	if (IS_GEN_RANGE(dev_priv, 6, 7))
-		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+	if (IS_GEN_RANGE(i915, 6, 7))
+		intel_uncore_write(uncore, MI_MODE,
+				   _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
 	/* Required for the hardware to program scanline values for waiting */
 	/* WaEnableFlushTlbInvalidationMode:snb */
-	if (IS_GEN(dev_priv, 6))
-		I915_WRITE(GFX_MODE,
+	if (IS_GEN(i915, 6))
+		intel_uncore_write(uncore, GFX_MODE,
 			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
 	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-	if (IS_GEN(dev_priv, 7))
-		I915_WRITE(GFX_MODE_GEN7,
+	if (IS_GEN(i915, 7))
+		intel_uncore_write(uncore, GFX_MODE_GEN7,
 			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
 			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
-	if (IS_GEN(dev_priv, 6)) {
+	if (IS_GEN(i915, 6)) {
 		/* From the Sandybridge PRM, volume 1 part 3, page 24:
 		 * "If this bit is set, STCunit will have LRA as replacement
 		 *  policy. [...] This bit must be reset.  LRA replacement
 		 *  policy is not supported."
 		 */
-		I915_WRITE(CACHE_MODE_0,
+		intel_uncore_write(uncore, CACHE_MODE_0,
 			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 	}
 
-	if (IS_GEN_RANGE(dev_priv, 6, 7))
-		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+	if (IS_GEN_RANGE(i915, 6, 7))
+		intel_uncore_write(uncore, INSTPM,
+				   _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
 	return xcs_resume(engine);
 }
 
-static void cancel_requests(struct intel_engine_cs *engine)
+static void reset_cancel(struct intel_engine_cs *engine)
 {
 	struct i915_request *request;
 	unsigned long flags;
@@ -1318,6 +1320,8 @@ static int ring_context_alloc(struct intel_context *ce)
 			return PTR_ERR(vma);
 
 		ce->state = vma;
+		if (engine->default_state)
+			__set_bit(CONTEXT_VALID_BIT, &ce->flags);
 	}
 
 	return 0;
@@ -1325,26 +1329,12 @@ static int ring_context_alloc(struct intel_context *ce)
 
 static int ring_context_pin(struct intel_context *ce)
 {
-	int err;
-
-	err = intel_context_active_acquire(ce);
-	if (err)
-		return err;
-
-	err = __context_pin_ppgtt(ce);
-	if (err)
-		goto err_active;
-
-	return 0;
-
-err_active:
-	intel_context_active_release(ce);
-	return err;
+	return __context_pin_ppgtt(ce);
 }
 
 static void ring_context_reset(struct intel_context *ce)
 {
-	intel_ring_reset(ce->ring, 0);
+	intel_ring_reset(ce->ring, ce->ring->emit);
 }
 
 static const struct intel_context_ops ring_context_ops = {
@@ -1360,46 +1350,38 @@ static const struct intel_context_ops ring_context_ops = {
 	.destroy = ring_context_destroy,
 };
 
-static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
+static int load_pd_dir(struct i915_request *rq,
+		       const struct i915_ppgtt *ppgtt,
+		       u32 valid)
 {
 	const struct intel_engine_cs * const engine = rq->engine;
 	u32 *cs;
 
-	cs = intel_ring_begin(rq, 6);
+	cs = intel_ring_begin(rq, 12);
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 
 	*cs++ = MI_LOAD_REGISTER_IMM(1);
 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
-	*cs++ = PP_DIR_DCLV_2G;
+	*cs++ = valid;
 
 	*cs++ = MI_LOAD_REGISTER_IMM(1);
 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
 	*cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
 
-	intel_ring_advance(rq, cs);
-
-	return 0;
-}
-
-static int flush_pd_dir(struct i915_request *rq)
-{
-	const struct intel_engine_cs * const engine = rq->engine;
-	u32 *cs;
-
-	cs = intel_ring_begin(rq, 4);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	/* Stall until the page table load is complete */
+	/* Stall until the page table load is complete? */
 	*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
-	*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+	*cs++ = intel_gt_scratch_offset(engine->gt,
 					INTEL_GT_SCRATCH_FIELD_DEFAULT);
-	*cs++ = MI_NOOP;
+
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
+	*cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
 
 	intel_ring_advance(rq, cs);
-	return 0;
+
+	return rq->engine->emit_flush(rq, EMIT_FLUSH);
 }
 
 static inline int mi_set_context(struct i915_request *rq, u32 flags)
@@ -1477,7 +1459,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 
 	*cs++ = MI_NOOP;
 	*cs++ = MI_SET_CONTEXT;
-	*cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
+	*cs++ = i915_ggtt_offset(rq->context->state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1547,10 +1529,10 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
 
 static int remap_l3(struct i915_request *rq)
 {
-	struct i915_gem_context *ctx = rq->gem_context;
+	struct i915_gem_context *ctx = i915_request_gem_context(rq);
 	int i, err;
 
-	if (!ctx->remap_slice)
+	if (!ctx || !ctx->remap_slice)
 		return 0;
 
 	for (i = 0; i < MAX_L3_SLICES; i++) {
@@ -1566,19 +1548,42 @@ static int remap_l3(struct i915_request *rq)
 	return 0;
 }
 
+static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
+{
+	int ret;
+
+	if (!vm)
+		return 0;
+
+	ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
+	if (ret)
+		return ret;
+
+	/*
+	 * Not only do we need a full barrier (post-sync write) after
+	 * invalidating the TLBs, but we need to wait a little bit
+	 * longer. Whether this is merely delaying us, or the
+	 * subsequent flush is a key part of serialising with the
+	 * post-sync op, this extra pass appears vital before a
+	 * mm switch!
+	 */
+	ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G);
+	if (ret)
+		return ret;
+
+	return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
 static int switch_context(struct i915_request *rq)
 {
-	struct intel_context *ce = rq->hw_context;
-	struct i915_address_space *vm = vm_alias(ce);
+	struct intel_context *ce = rq->context;
 	int ret;
 
 	GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
 
-	if (vm) {
-		ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm));
-		if (ret)
-			return ret;
-	}
+	ret = switch_mm(rq, vm_alias(ce));
+	if (ret)
+		return ret;
 
 	if (ce->state) {
 		u32 flags;
@@ -1590,7 +1595,7 @@ static int switch_context(struct i915_request *rq)
 		BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
 
 		flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
-		if (!i915_gem_context_is_kernel(rq->gem_context))
+		if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
 			flags |= MI_RESTORE_EXT_STATE_EN;
 		else
 			flags |= MI_RESTORE_INHIBIT;
@@ -1600,34 +1605,6 @@ static int switch_context(struct i915_request *rq)
 			return ret;
 	}
 
-	if (vm) {
-		struct intel_engine_cs *engine = rq->engine;
-
-		ret = engine->emit_flush(rq, EMIT_INVALIDATE);
-		if (ret)
-			return ret;
-
-		ret = flush_pd_dir(rq);
-		if (ret)
-			return ret;
-
-		/*
-		 * Not only do we need a full barrier (post-sync write) after
-		 * invalidating the TLBs, but we need to wait a little bit
-		 * longer. Whether this is merely delaying us, or the
-		 * subsequent flush is a key part of serialising with the
-		 * post-sync op, this extra pass appears vital before a
-		 * mm switch!
-		 */
-		ret = engine->emit_flush(rq, EMIT_INVALIDATE);
-		if (ret)
-			return ret;
-
-		ret = engine->emit_flush(rq, EMIT_FLUSH);
-		if (ret)
-			return ret;
-	}
-
 	ret = remap_l3(rq);
 	if (ret)
 		return ret;
@@ -1639,7 +1616,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+	GEM_BUG_ON(!intel_context_is_pinned(request->context));
 	GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
 
 	/*
@@ -1795,7 +1772,6 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
 {
 	engine->submit_request = i9xx_submit_request;
-	engine->cancel_requests = cancel_requests;
 
 	engine->park = NULL;
 	engine->unpark = NULL;
@@ -1807,7 +1783,7 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
 	engine->submit_request = gen6_bsd_submit_request;
 }
 
-static void ring_destroy(struct intel_engine_cs *engine)
+static void ring_release(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
@@ -1821,8 +1797,6 @@ static void ring_destroy(struct intel_engine_cs *engine)
 
 	intel_timeline_unpin(engine->legacy.timeline);
 	intel_timeline_put(engine->legacy.timeline);
-
-	kfree(engine);
 }
 
 static void setup_irq(struct intel_engine_cs *engine)
@@ -1853,11 +1827,10 @@ static void setup_common(struct intel_engine_cs *engine)
 
 	setup_irq(engine);
 
-	engine->destroy = ring_destroy;
-
 	engine->resume = xcs_resume;
 	engine->reset.prepare = reset_prepare;
-	engine->reset.reset = reset_ring;
+	engine->reset.rewind = reset_rewind;
+	engine->reset.cancel = reset_cancel;
 	engine->reset.finish = reset_finish;
 
 	engine->cops = &ring_context_ops;
@@ -1968,6 +1941,10 @@ static void setup_vecs(struct intel_engine_cs *engine)
 
 int intel_ring_submission_setup(struct intel_engine_cs *engine)
 {
+	struct intel_timeline *timeline;
+	struct intel_ring *ring;
+	int err;
+
 	setup_common(engine);
 
 	switch (engine->class) {
@@ -1988,15 +1965,6 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 		return -ENODEV;
 	}
 
-	return 0;
-}
-
-int intel_ring_submission_init(struct intel_engine_cs *engine)
-{
-	struct intel_timeline *timeline;
-	struct intel_ring *ring;
-	int err;
-
 	timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
 	if (IS_ERR(timeline)) {
 		err = PTR_ERR(timeline);
@@ -2022,16 +1990,13 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
 	engine->legacy.ring = ring;
 	engine->legacy.timeline = timeline;
 
-	err = intel_engine_init_common(engine);
-	if (err)
-		goto err_ring_unpin;
-
 	GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
 
+	/* Finally, take ownership and responsibility for cleanup! */
+	engine->release = ring_release;
+
 	return 0;
 
-err_ring_unpin:
-	intel_ring_unpin(ring);
 err_ring:
 	intel_ring_put(ring);
 err_timeline_unpin:
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 20d6ee1..d2a3d93 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -37,6 +37,11 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
 	return mask & ~rps->pm_intrmsk_mbz;
 }
 
+static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+	intel_uncore_write_fw(uncore, reg, val);
+}
+
 static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
 {
 	u32 mask = 0;
@@ -78,8 +83,7 @@ static void rps_enable_interrupts(struct intel_rps *rps)
 	gen6_gt_pm_enable_irq(gt, rps->pm_events);
 	spin_unlock_irq(&gt->irq_lock);
 
-	intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
-			   rps_pm_mask(rps, rps->cur_freq));
+	set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq));
 }
 
 static void gen6_rps_reset_interrupts(struct intel_rps *rps)
@@ -113,8 +117,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
 
 	rps->pm_events = 0;
 
-	intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
-			   rps_pm_sanitize_mask(rps, ~0u));
+	set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
 
 	spin_lock_irq(&gt->irq_lock);
 	gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
@@ -573,25 +576,21 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
 	if (IS_VALLEYVIEW(i915))
 		goto skip_hw_write;
 
-	intel_uncore_write(uncore, GEN6_RP_UP_EI,
-			   GT_INTERVAL_FROM_US(i915, ei_up));
-	intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD,
-			   GT_INTERVAL_FROM_US(i915,
-					       ei_up * threshold_up / 100));
+	set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
+	set(uncore, GEN6_RP_UP_THRESHOLD,
+	    GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
 
-	intel_uncore_write(uncore, GEN6_RP_DOWN_EI,
-			   GT_INTERVAL_FROM_US(i915, ei_down));
-	intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD,
-			   GT_INTERVAL_FROM_US(i915,
-					       ei_down * threshold_down / 100));
+	set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
+	set(uncore, GEN6_RP_DOWN_THRESHOLD,
+	    GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
 
-	intel_uncore_write(uncore, GEN6_RP_CONTROL,
-			   (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
-			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-			   GEN6_RP_MEDIA_IS_GFX |
-			   GEN6_RP_ENABLE |
-			   GEN6_RP_UP_BUSY_AVG |
-			   GEN6_RP_DOWN_IDLE_AVG);
+	set(uncore, GEN6_RP_CONTROL,
+	    (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+	    GEN6_RP_MEDIA_HW_NORMAL_MODE |
+	    GEN6_RP_MEDIA_IS_GFX |
+	    GEN6_RP_ENABLE |
+	    GEN6_RP_UP_BUSY_AVG |
+	    GEN6_RP_DOWN_IDLE_AVG);
 
 skip_hw_write:
 	rps->power.mode = new_power;
@@ -666,7 +665,7 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
 		swreq = (GEN6_FREQUENCY(val) |
 			 GEN6_OFFSET(0) |
 			 GEN6_AGGRESSIVE_TURBO);
-	intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq);
+	set(uncore, GEN6_RPNSWREQ, swreq);
 
 	return 0;
 }
@@ -683,7 +682,7 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
 	return err;
 }
 
-static int rps_set(struct intel_rps *rps, u8 val)
+static int rps_set(struct intel_rps *rps, u8 val, bool update)
 {
 	struct drm_i915_private *i915 = rps_to_i915(rps);
 	int err;
@@ -701,7 +700,8 @@ static int rps_set(struct intel_rps *rps, u8 val)
 	if (err)
 		return err;
 
-	gen6_rps_set_thresholds(rps, val);
+	if (update)
+		gen6_rps_set_thresholds(rps, val);
 	rps->last_freq = val;
 
 	return 0;
@@ -761,7 +761,7 @@ void intel_rps_park(struct intel_rps *rps)
 	 * power than the render powerwell.
 	 */
 	intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
-	rps_set(rps, rps->idle_freq);
+	rps_set(rps, rps->idle_freq, false);
 	intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
 }
 
@@ -777,7 +777,7 @@ void intel_rps_boost(struct i915_request *rq)
 	spin_lock_irqsave(&rq->lock, flags);
 	if (!i915_request_has_waitboost(rq) &&
 	    !dma_fence_is_signaled_locked(&rq->fence)) {
-		rq->flags |= I915_REQUEST_WAITBOOST;
+		set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
 
 		if (!atomic_fetch_inc(&rps->num_waiters) &&
 		    READ_ONCE(rps->cur_freq) < rps->boost_freq)
@@ -790,14 +790,16 @@ void intel_rps_boost(struct i915_request *rq)
 
 int intel_rps_set(struct intel_rps *rps, u8 val)
 {
-	int err = 0;
+	int err;
 
 	lockdep_assert_held(&rps->lock);
 	GEM_BUG_ON(val > rps->max_freq);
 	GEM_BUG_ON(val < rps->min_freq);
 
 	if (rps->active) {
-		err = rps_set(rps, val);
+		err = rps_set(rps, val, true);
+		if (err)
+			return err;
 
 		/*
 		 * Make sure we continue to get interrupts
@@ -806,18 +808,15 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
 		if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
 			struct intel_uncore *uncore = rps_to_uncore(rps);
 
-			intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS,
-					   rps_limits(rps, val));
+			set(uncore,
+			    GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
 
-			intel_uncore_write(uncore, GEN6_PMINTRMSK,
-					   rps_pm_mask(rps, val));
+			set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
 		}
 	}
 
-	if (err == 0)
-		rps->cur_freq = val;
-
-	return err;
+	rps->cur_freq = val;
+	return 0;
 }
 
 static void gen6_rps_init(struct intel_rps *rps)
@@ -878,7 +877,7 @@ static bool rps_reset(struct intel_rps *rps)
 	rps->power.mode = -1;
 	rps->last_freq = -1;
 
-	if (rps_set(rps, rps->min_freq)) {
+	if (rps_set(rps, rps->min_freq, true)) {
 		DRM_ERROR("Failed to reset RPS to initial values\n");
 		return false;
 	}
@@ -1201,7 +1200,7 @@ void intel_rps_enable(struct intel_rps *rps)
 
 static void gen6_rps_disable(struct intel_rps *rps)
 {
-	intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+	set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
 }
 
 void intel_rps_disable(struct intel_rps *rps)
@@ -1566,7 +1565,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
 		return;
 
 	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(gt->engine[VECS0]);
+		intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
 
 	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
 		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -1663,23 +1662,53 @@ void intel_rps_init(struct intel_rps *rps)
 	if (INTEL_GEN(i915) <= 7)
 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
 
-	if (INTEL_GEN(i915) >= 8)
+	if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 }
 
-u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat)
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
 {
 	struct drm_i915_private *i915 = rps_to_i915(rps);
 	u32 cagf;
 
-	if (INTEL_GEN(i915) >= 9)
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		cagf = (rpstat >> 8) & 0xff;
+	else if (INTEL_GEN(i915) >= 9)
 		cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
 	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
 		cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
 	else
 		cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
 
-	return  cagf;
+	return cagf;
+}
+
+static u32 read_cagf(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 freq;
+
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		vlv_punit_get(i915);
+		freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+		vlv_punit_put(i915);
+	} else {
+		freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
+	}
+
+	return intel_rps_get_cagf(rps, freq);
+}
+
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
+{
+	struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
+	intel_wakeref_t wakeref;
+	u32 freq = 0;
+
+	with_intel_runtime_pm_if_in_use(rpm, wakeref)
+		freq = intel_gpu_freq(rps, read_cagf(rps));
+
+	return freq;
 }
 
 /* External interface for intel_ips.ko */
@@ -1715,6 +1744,7 @@ void intel_rps_driver_register(struct intel_rps *rps)
 	 * set up, to avoid intel-ips sneaking in and reading bogus values.
 	 */
 	if (IS_GEN(gt->i915, 5)) {
+		GEM_BUG_ON(ips_mchdev);
 		rcu_assign_pointer(ips_mchdev, gt->i915);
 		ips_ping_for_i915_load();
 	}
@@ -1722,7 +1752,8 @@ void intel_rps_driver_register(struct intel_rps *rps)
 
 void intel_rps_driver_unregister(struct intel_rps *rps)
 {
-	rcu_assign_pointer(ips_mchdev, NULL);
+	if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
+		rcu_assign_pointer(ips_mchdev, NULL);
 }
 
 static struct drm_i915_private *mchdev_get(void)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 9518c66c..dfa98194 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -29,7 +29,8 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
 
 int intel_gpu_freq(struct intel_rps *rps, int val);
 int intel_freq_opcode(struct intel_rps *rps, int val);
-u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1);
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
 
 void gen5_rps_irq_handler(struct intel_rps *rps);
 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 649798c..8771652 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -15,6 +15,9 @@
 #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
 #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
 
+#define CACHELINE_BITS 6
+#define CACHELINE_FREE CACHELINE_BITS
+
 struct intel_timeline_hwsp {
 	struct intel_gt *gt;
 	struct intel_gt_timelines *gt_timelines;
@@ -23,14 +26,6 @@ struct intel_timeline_hwsp {
 	u64 free_bitmap;
 };
 
-struct intel_timeline_cacheline {
-	struct i915_active active;
-	struct intel_timeline_hwsp *hwsp;
-	void *vaddr;
-#define CACHELINE_BITS 6
-#define CACHELINE_FREE CACHELINE_BITS
-};
-
 static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
 {
 	struct drm_i915_private *i915 = gt->i915;
@@ -133,7 +128,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
 	__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
 
 	i915_active_fini(&cl->active);
-	kfree(cl);
+	kfree_rcu(cl, rcu);
 }
 
 __i915_active_call
@@ -254,7 +249,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
 
 	mutex_init(&timeline->mutex);
 
-	INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
+	INIT_ACTIVE_FENCE(&timeline->last_request);
 	INIT_LIST_HEAD(&timeline->requests);
 
 	i915_syncmap_init(&timeline->sync);
@@ -262,7 +257,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
 	return 0;
 }
 
-static void timelines_init(struct intel_gt *gt)
+void intel_gt_init_timelines(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 
@@ -273,11 +268,6 @@ static void timelines_init(struct intel_gt *gt)
 	INIT_LIST_HEAD(&timelines->hwsp_free_list);
 }
 
-void intel_timelines_init(struct drm_i915_private *i915)
-{
-	timelines_init(&i915->gt);
-}
-
 void intel_timeline_fini(struct intel_timeline *timeline)
 {
 	GEM_BUG_ON(atomic_read(&timeline->pin_count));
@@ -338,7 +328,6 @@ int intel_timeline_pin(struct intel_timeline *tl)
 void intel_timeline_enter(struct intel_timeline *tl)
 {
 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
-	unsigned long flags;
 
 	/*
 	 * Pretend we are serialised by the timeline->mutex.
@@ -359,21 +348,19 @@ void intel_timeline_enter(struct intel_timeline *tl)
 	 * use atomic to manipulate tl->active_count.
 	 */
 	lockdep_assert_held(&tl->mutex);
-	GEM_BUG_ON(!atomic_read(&tl->pin_count));
 
 	if (atomic_add_unless(&tl->active_count, 1, 0))
 		return;
 
-	spin_lock_irqsave(&timelines->lock, flags);
+	spin_lock(&timelines->lock);
 	if (!atomic_fetch_inc(&tl->active_count))
 		list_add_tail(&tl->link, &timelines->active_list);
-	spin_unlock_irqrestore(&timelines->lock, flags);
+	spin_unlock(&timelines->lock);
 }
 
 void intel_timeline_exit(struct intel_timeline *tl)
 {
 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
-	unsigned long flags;
 
 	/* See intel_timeline_enter() */
 	lockdep_assert_held(&tl->mutex);
@@ -382,10 +369,10 @@ void intel_timeline_exit(struct intel_timeline *tl)
 	if (atomic_add_unless(&tl->active_count, -1, 1))
 		return;
 
-	spin_lock_irqsave(&timelines->lock, flags);
+	spin_lock(&timelines->lock);
 	if (atomic_dec_and_test(&tl->active_count))
 		list_del(&tl->link);
-	spin_unlock_irqrestore(&timelines->lock, flags);
+	spin_unlock(&timelines->lock);
 
 	/*
 	 * Since this timeline is idle, all bariers upon which we were waiting
@@ -521,46 +508,35 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 			     struct i915_request *to,
 			     u32 *hwsp)
 {
-	struct intel_timeline *tl;
+	struct intel_timeline_cacheline *cl;
 	int err;
 
+	GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
+
 	rcu_read_lock();
-	tl = rcu_dereference(from->timeline);
-	if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref))
-		tl = NULL;
+	cl = rcu_dereference(from->hwsp_cacheline);
+	if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
+		goto unlock; /* seqno wrapped and completed! */
+	if (unlikely(i915_request_completed(from)))
+		goto release;
 	rcu_read_unlock();
-	if (!tl) /* already completed */
-		return 1;
 
-	GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl);
+	err = cacheline_ref(cl, to);
+	if (err)
+		goto out;
 
-	err = -EBUSY;
-	if (mutex_trylock(&tl->mutex)) {
-		struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
+	*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+		ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
 
-		if (i915_request_completed(from)) {
-			err = 1;
-			goto unlock;
-		}
-
-		err = cacheline_ref(cl, to);
-		if (err)
-			goto unlock;
-
-		if (likely(cl == tl->hwsp_cacheline)) {
-			*hwsp = tl->hwsp_offset;
-		} else { /* across a seqno wrap, recover the original offset */
-			*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
-				ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
-				CACHELINE_BYTES;
-		}
-
-unlock:
-		mutex_unlock(&tl->mutex);
-	}
-	intel_timeline_put(tl);
-
+out:
+	i915_active_release(&cl->active);
 	return err;
+
+release:
+	i915_active_release(&cl->active);
+unlock:
+	rcu_read_unlock();
+	return 1;
 }
 
 void intel_timeline_unpin(struct intel_timeline *tl)
@@ -583,7 +559,7 @@ void __intel_timeline_free(struct kref *kref)
 	kfree_rcu(timeline, rcu);
 }
 
-static void timelines_fini(struct intel_gt *gt)
+void intel_gt_fini_timelines(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 
@@ -591,11 +567,6 @@ static void timelines_fini(struct intel_gt *gt)
 	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
 }
 
-void intel_timelines_fini(struct drm_i915_private *i915)
-{
-	timelines_fini(&i915->gt);
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "gt/selftests/mock_timeline.c"
 #include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index f583af1..f5b7ead 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -88,7 +88,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 			     struct i915_request *until,
 			     u32 *hwsp_offset);
 
-void intel_timelines_init(struct drm_i915_private *i915);
-void intel_timelines_fini(struct drm_i915_private *i915);
+void intel_gt_init_timelines(struct intel_gt *gt);
+void intel_gt_fini_timelines(struct intel_gt *gt);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index aaf15cb..02181c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -10,14 +10,15 @@
 #include <linux/list.h>
 #include <linux/kref.h>
 #include <linux/mutex.h>
+#include <linux/rcupdate.h>
 #include <linux/types.h>
 
 #include "i915_active_types.h"
 
-struct drm_i915_private;
 struct i915_vma;
-struct intel_timeline_cacheline;
 struct i915_syncmap;
+struct intel_gt;
+struct intel_timeline_hwsp;
 
 struct intel_timeline {
 	u64 fence_context;
@@ -87,4 +88,13 @@ struct intel_timeline {
 	struct rcu_head rcu;
 };
 
+struct intel_timeline_cacheline {
+	struct i915_active active;
+
+	struct intel_timeline_hwsp *hwsp;
+	void *vaddr;
+
+	struct rcu_head rcu;
+};
+
 #endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e4bccc1..4e292d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -6,6 +6,7 @@
 
 #include "i915_drv.h"
 #include "intel_context.h"
+#include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_ring.h"
 #include "intel_workarounds.h"
@@ -146,21 +147,27 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
 	}
 }
 
-static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
-		   u32 val)
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+		   u32 val, u32 read_mask)
 {
 	struct i915_wa wa = {
 		.reg  = reg,
 		.mask = mask,
 		.val  = val,
-		.read = mask,
+		.read = read_mask,
 	};
 
 	_wa_add(wal, &wa);
 }
 
 static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+		   u32 val)
+{
+	wa_add(wal, reg, mask, val, mask);
+}
+
+static void
 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
 	wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
@@ -247,7 +254,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
 
 	/* WaDisableDopClockGating:bdw
 	 *
-	 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+	 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
 	 * to disable EUTC clock gating.
 	 */
 	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
@@ -568,9 +575,24 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
 static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
 				     struct i915_wa_list *wal)
 {
+	u32 val;
+
 	/* Wa_1409142259:tgl */
 	WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
 			  GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+
+	/* Wa_1604555607:tgl */
+	val = intel_uncore_read(engine->uncore, FF_MODE2);
+	val &= ~FF_MODE2_TDS_TIMER_MASK;
+	val |= FF_MODE2_TDS_TIMER_128;
+	/*
+	 * FIXME: FF_MODE2 register is not readable till TGL B0. We can
+	 * enable verification of WA from the later steppings, which enables
+	 * the read of FF_MODE2.
+	 */
+	wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
+	       IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
+			    FF_MODE2_TDS_TIMER_MASK);
 }
 
 static void
@@ -1315,6 +1337,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 			     GEN6_RC_SLEEP_PSMI_CONTROL,
 			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
 			     GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+
+		/*
+		 * Wa_1606679103:tgl
+		 * (see also Wa_1606682166:icl)
+		 */
+		wa_write_or(wal,
+			    GEN7_SARCHKMD,
+			    GEN7_DISABLE_SAMPLER_PREFETCH);
 	}
 
 	if (IS_GEN(i915, 11)) {
@@ -1574,7 +1604,9 @@ static int engine_wa_list_verify(struct intel_context *ce,
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
+	intel_engine_pm_get(ce->engine);
 	rq = intel_context_create_request(ce);
+	intel_engine_pm_put(ce->engine);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_vma;
@@ -1584,16 +1616,17 @@ static int engine_wa_list_verify(struct intel_context *ce,
 	if (err)
 		goto err_vma;
 
+	i915_request_get(rq);
 	i915_request_add(rq);
 	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 		err = -ETIME;
-		goto err_vma;
+		goto err_rq;
 	}
 
 	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
 	if (IS_ERR(results)) {
 		err = PTR_ERR(results);
-		goto err_vma;
+		goto err_rq;
 	}
 
 	err = 0;
@@ -1607,6 +1640,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
 
 	i915_gem_object_unpin_map(vma->obj);
 
+err_rq:
+	i915_request_put(rq);
 err_vma:
 	i915_vma_unpin(vma);
 	i915_vma_put(vma);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 83f549d..a560b7e 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -77,7 +77,7 @@ static void advance(struct i915_request *request)
 	i915_request_mark_complete(request);
 	GEM_BUG_ON(!i915_request_completed(request));
 
-	intel_engine_queue_breadcrumbs(request->engine);
+	intel_engine_signal_breadcrumbs(request->engine);
 }
 
 static void hw_delay_complete(struct timer_list *t)
@@ -149,7 +149,11 @@ static int mock_context_alloc(struct intel_context *ce)
 
 static int mock_context_pin(struct intel_context *ce)
 {
-	return intel_context_active_acquire(ce);
+	return 0;
+}
+
+static void mock_context_reset(struct intel_context *ce)
+{
 }
 
 static const struct intel_context_ops mock_context_ops = {
@@ -161,6 +165,7 @@ static const struct intel_context_ops mock_context_ops = {
 	.enter = intel_context_enter_engine,
 	.exit = intel_context_exit_engine,
 
+	.reset = mock_context_reset,
 	.destroy = mock_context_destroy,
 };
 
@@ -207,16 +212,12 @@ static void mock_reset_prepare(struct intel_engine_cs *engine)
 {
 }
 
-static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
 	GEM_BUG_ON(stalled);
 }
 
-static void mock_reset_finish(struct intel_engine_cs *engine)
-{
-}
-
-static void mock_cancel_requests(struct intel_engine_cs *engine)
+static void mock_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct i915_request *request;
 	unsigned long flags;
@@ -234,6 +235,24 @@ static void mock_cancel_requests(struct intel_engine_cs *engine)
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
 
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_engine_release(struct intel_engine_cs *engine)
+{
+	struct mock_engine *mock =
+		container_of(engine, typeof(*mock), base);
+
+	GEM_BUG_ON(timer_pending(&mock->hw_delay));
+
+	intel_context_unpin(engine->kernel_context);
+	intel_context_put(engine->kernel_context);
+
+	intel_engine_fini_retire(engine);
+	intel_engine_fini_breadcrumbs(engine);
+}
+
 struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 				    const char *name,
 				    int id)
@@ -265,9 +284,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.submit_request = mock_submit_request;
 
 	engine->base.reset.prepare = mock_reset_prepare;
-	engine->base.reset.reset = mock_reset;
+	engine->base.reset.rewind = mock_reset_rewind;
+	engine->base.reset.cancel = mock_reset_cancel;
 	engine->base.reset.finish = mock_reset_finish;
-	engine->base.cancel_requests = mock_cancel_requests;
+
+	engine->base.release = mock_engine_release;
 
 	i915->gt.engine[id] = &engine->base;
 	i915->gt.engine_class[0][id] = &engine->base;
@@ -290,6 +311,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
 	intel_engine_init_breadcrumbs(engine);
 	intel_engine_init_execlists(engine);
 	intel_engine_init__pm(engine);
+	intel_engine_init_retire(engine);
 	intel_engine_pool_init(&engine->pool);
 
 	ce = create_kernel_context(engine);
@@ -321,18 +343,3 @@ void mock_engine_flush(struct intel_engine_cs *engine)
 void mock_engine_reset(struct intel_engine_cs *engine)
 {
 }
-
-void mock_engine_free(struct intel_engine_cs *engine)
-{
-	struct mock_engine *mock =
-		container_of(engine, typeof(*mock), base);
-
-	GEM_BUG_ON(timer_pending(&mock->hw_delay));
-
-	intel_context_unpin(engine->kernel_context);
-	intel_context_put(engine->kernel_context);
-
-	intel_engine_fini_breadcrumbs(engine);
-
-	kfree(engine);
-}
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index bc720de..e874dfa 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -5,6 +5,7 @@
  */
 
 #include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 
@@ -47,35 +48,36 @@ static int context_sync(struct intel_context *ce)
 
 	mutex_lock(&tl->mutex);
 	do {
-		struct dma_fence *fence;
+		struct i915_request *rq;
 		long timeout;
 
-		fence = i915_active_fence_get(&tl->last_request);
-		if (!fence)
+		if (list_empty(&tl->requests))
 			break;
 
-		timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
+		rq = list_last_entry(&tl->requests, typeof(*rq), link);
+		i915_request_get(rq);
+
+		timeout = i915_request_wait(rq, 0, HZ / 10);
 		if (timeout < 0)
 			err = timeout;
 		else
-			i915_request_retire_upto(to_request(fence));
+			i915_request_retire_upto(rq);
 
-		dma_fence_put(fence);
+		i915_request_put(rq);
 	} while (!err);
 	mutex_unlock(&tl->mutex);
 
 	return err;
 }
 
-static int __live_context_size(struct intel_engine_cs *engine,
-			       struct i915_gem_context *fixme)
+static int __live_context_size(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 	struct i915_request *rq;
 	void *vaddr;
 	int err;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -118,7 +120,7 @@ static int __live_context_size(struct intel_engine_cs *engine,
 		goto err_unpin;
 
 	/* Force the context switch */
-	rq = i915_request_create(engine->kernel_context);
+	rq = intel_engine_create_kernel_request(engine);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_unpin;
@@ -143,7 +145,6 @@ static int live_context_size(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
 	int err = 0;
 
@@ -152,10 +153,6 @@ static int live_context_size(void *arg)
 	 * HW tries to write past the end of one.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (IS_ERR(fixme))
-		return PTR_ERR(fixme);
-
 	for_each_engine(engine, gt, id) {
 		struct {
 			struct drm_i915_gem_object *state;
@@ -180,7 +177,7 @@ static int live_context_size(void *arg)
 		/* Overlaps with the execlists redzone */
 		engine->context_size += I915_GTT_PAGE_SIZE;
 
-		err = __live_context_size(engine, fixme);
+		err = __live_context_size(engine);
 
 		engine->context_size -= I915_GTT_PAGE_SIZE;
 
@@ -193,13 +190,12 @@ static int live_context_size(void *arg)
 			break;
 	}
 
-	kernel_context_close(fixme);
 	return err;
 }
 
-static int __live_active_context(struct intel_engine_cs *engine,
-				 struct i915_gem_context *fixme)
+static int __live_active_context(struct intel_engine_cs *engine)
 {
+	unsigned long saved_heartbeat;
 	struct intel_context *ce;
 	int pass;
 	int err;
@@ -223,40 +219,55 @@ static int __live_active_context(struct intel_engine_cs *engine,
 		return -EINVAL;
 	}
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
+	saved_heartbeat = engine->props.heartbeat_interval_ms;
+	engine->props.heartbeat_interval_ms = 0;
+
 	for (pass = 0; pass <= 2; pass++) {
 		struct i915_request *rq;
 
+		intel_engine_pm_get(engine);
+
 		rq = intel_context_create_request(ce);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
-			goto err;
+			goto out_engine;
 		}
 
 		err = request_sync(rq);
 		if (err)
-			goto err;
+			goto out_engine;
 
 		/* Context will be kept active until after an idle-barrier. */
 		if (i915_active_is_idle(&ce->active)) {
 			pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
 			       engine->name, pass);
 			err = -EINVAL;
-			goto err;
+			goto out_engine;
 		}
 
 		if (!intel_engine_pm_is_awake(engine)) {
 			pr_err("%s is asleep before idle-barrier\n",
 			       engine->name);
 			err = -EINVAL;
-			goto err;
+			goto out_engine;
 		}
+
+out_engine:
+		intel_engine_pm_put(engine);
+		if (err)
+			goto err;
 	}
 
 	/* Now make sure our idle-barriers are flushed */
+	err = intel_engine_flush_barriers(engine);
+	if (err)
+		goto err;
+
+	/* Wait for the barrier and in the process wait for engine to park */
 	err = context_sync(engine->kernel_context);
 	if (err)
 		goto err;
@@ -266,12 +277,15 @@ static int __live_active_context(struct intel_engine_cs *engine,
 		err = -EINVAL;
 	}
 
+	intel_engine_pm_flush(engine);
+
 	if (intel_engine_pm_is_awake(engine)) {
 		struct drm_printer p = drm_debug_printer(__func__);
 
 		intel_engine_dump(engine, &p,
-				  "%s is still awake after idle-barriers\n",
-				  engine->name);
+				  "%s is still awake:%d after idle-barriers\n",
+				  engine->name,
+				  atomic_read(&engine->wakeref.count));
 		GEM_TRACE_DUMP();
 
 		err = -EINVAL;
@@ -279,6 +293,7 @@ static int __live_active_context(struct intel_engine_cs *engine,
 	}
 
 err:
+	engine->props.heartbeat_interval_ms = saved_heartbeat;
 	intel_context_put(ce);
 	return err;
 }
@@ -287,23 +302,11 @@ static int live_active_context(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
-	struct drm_file *file;
 	int err = 0;
 
-	file = mock_file(gt->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	fixme = live_context(gt->i915, file);
-	if (IS_ERR(fixme)) {
-		err = PTR_ERR(fixme);
-		goto out_file;
-	}
-
 	for_each_engine(engine, gt, id) {
-		err = __live_active_context(engine, fixme);
+		err = __live_active_context(engine);
 		if (err)
 			break;
 
@@ -312,8 +315,6 @@ static int live_active_context(void *arg)
 			break;
 	}
 
-out_file:
-	mock_file_free(gt->i915, file);
 	return err;
 }
 
@@ -345,10 +346,10 @@ static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
 	return err;
 }
 
-static int __live_remote_context(struct intel_engine_cs *engine,
-				 struct i915_gem_context *fixme)
+static int __live_remote_context(struct intel_engine_cs *engine)
 {
 	struct intel_context *local, *remote;
+	unsigned long saved_heartbeat;
 	int pass;
 	int err;
 
@@ -360,16 +361,26 @@ static int __live_remote_context(struct intel_engine_cs *engine,
 	 * clobber the idle-barrier.
 	 */
 
-	remote = intel_context_create(fixme, engine);
+	if (intel_engine_pm_is_awake(engine)) {
+		pr_err("%s is awake before starting %s!\n",
+		       engine->name, __func__);
+		return -EINVAL;
+	}
+
+	remote = intel_context_create(engine);
 	if (IS_ERR(remote))
 		return PTR_ERR(remote);
 
-	local = intel_context_create(fixme, engine);
+	local = intel_context_create(engine);
 	if (IS_ERR(local)) {
 		err = PTR_ERR(local);
 		goto err_remote;
 	}
 
+	saved_heartbeat = engine->props.heartbeat_interval_ms;
+	engine->props.heartbeat_interval_ms = 0;
+	intel_engine_pm_get(engine);
+
 	for (pass = 0; pass <= 2; pass++) {
 		err = __remote_sync(local, remote);
 		if (err)
@@ -387,6 +398,9 @@ static int __live_remote_context(struct intel_engine_cs *engine,
 		}
 	}
 
+	intel_engine_pm_put(engine);
+	engine->props.heartbeat_interval_ms = saved_heartbeat;
+
 	intel_context_put(local);
 err_remote:
 	intel_context_put(remote);
@@ -397,23 +411,11 @@ static int live_remote_context(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
-	struct drm_file *file;
 	int err = 0;
 
-	file = mock_file(gt->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	fixme = live_context(gt->i915, file);
-	if (IS_ERR(fixme)) {
-		err = PTR_ERR(fixme);
-		goto out_file;
-	}
-
 	for_each_engine(engine, gt, id) {
-		err = __live_remote_context(engine, fixme);
+		err = __live_remote_context(engine);
 		if (err)
 			break;
 
@@ -422,8 +424,6 @@ static int live_remote_context(void *arg)
 			break;
 	}
 
-out_file:
-	mock_file_free(gt->i915, file);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 3880f07..f88e445 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -4,7 +4,365 @@
  * Copyright © 2018 Intel Corporation
  */
 
-#include "../i915_selftest.h"
+#include <linux/sort.h>
+
+#include "intel_gt_pm.h"
+#include "intel_rps.h"
+
+#include "i915_selftest.h"
+#include "selftests/igt_flush_test.h"
+
+#define COUNT 5
+
+static int cmp_u32(const void *A, const void *B)
+{
+	const u32 *a = A, *b = B;
+
+	return *a - *b;
+}
+
+static void perf_begin(struct intel_gt *gt)
+{
+	intel_gt_pm_get(gt);
+
+	/* Boost gpufreq to max [waitboost] and keep it fixed */
+	atomic_inc(&gt->rps.num_waiters);
+	schedule_work(&gt->rps.work);
+	flush_work(&gt->rps.work);
+}
+
+static int perf_end(struct intel_gt *gt)
+{
+	atomic_dec(&gt->rps.num_waiters);
+	intel_gt_pm_put(gt);
+
+	return igt_flush_test(gt->i915);
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+	u32 cmd;
+	u32 *cs;
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+	if (INTEL_GEN(rq->i915) >= 8)
+		cmd++;
+	*cs++ = cmd;
+	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+	*cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32);
+	*cs++ = 0;
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static struct i915_vma *create_empty_batch(struct intel_context *ce)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	u32 *cs;
+	int err;
+
+	obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_put;
+	}
+
+	cs[0] = MI_BATCH_BUFFER_END;
+
+	i915_gem_object_flush_map(obj);
+
+	vma = i915_vma_instance(obj, ce->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_unpin;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err_unpin;
+
+	i915_gem_object_unpin_map(obj);
+	return vma;
+
+err_unpin:
+	i915_gem_object_unpin_map(obj);
+err_put:
+	i915_gem_object_put(obj);
+	return ERR_PTR(err);
+}
+
+static u32 trifilter(u32 *a)
+{
+	u64 sum;
+
+	sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
+
+	sum = mul_u32_u32(a[2], 2);
+	sum += a[1];
+	sum += a[3];
+
+	return sum >> 2;
+}
+
+static int perf_mi_bb_start(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+		return 0;
+
+	perf_begin(gt);
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce = engine->kernel_context;
+		struct i915_vma *batch;
+		u32 cycles[COUNT];
+		int i;
+
+		intel_engine_pm_get(engine);
+
+		batch = create_empty_batch(ce);
+		if (IS_ERR(batch)) {
+			err = PTR_ERR(batch);
+			intel_engine_pm_put(engine);
+			break;
+		}
+
+		err = i915_vma_sync(batch);
+		if (err) {
+			intel_engine_pm_put(engine);
+			i915_vma_put(batch);
+			break;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+			struct i915_request *rq;
+
+			rq = i915_request_create(ce);
+			if (IS_ERR(rq)) {
+				err = PTR_ERR(rq);
+				break;
+			}
+
+			err = write_timestamp(rq, 2);
+			if (err)
+				goto out;
+
+			err = rq->engine->emit_bb_start(rq,
+							batch->node.start, 8,
+							0);
+			if (err)
+				goto out;
+
+			err = write_timestamp(rq, 3);
+			if (err)
+				goto out;
+
+out:
+			i915_request_get(rq);
+			i915_request_add(rq);
+
+			if (i915_request_wait(rq, 0, HZ / 5) < 0)
+				err = -EIO;
+			i915_request_put(rq);
+			if (err)
+				break;
+
+			cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
+		}
+		i915_vma_put(batch);
+		intel_engine_pm_put(engine);
+		if (err)
+			break;
+
+		pr_info("%s: MI_BB_START cycles: %u\n",
+			engine->name, trifilter(cycles));
+	}
+	if (perf_end(gt))
+		err = -EIO;
+
+	return err;
+}
+
+static struct i915_vma *create_nop_batch(struct intel_context *ce)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	u32 *cs;
+	int err;
+
+	obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_put;
+	}
+
+	memset(cs, 0, SZ_64K);
+	cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
+
+	i915_gem_object_flush_map(obj);
+
+	vma = i915_vma_instance(obj, ce->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_unpin;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err_unpin;
+
+	i915_gem_object_unpin_map(obj);
+	return vma;
+
+err_unpin:
+	i915_gem_object_unpin_map(obj);
+err_put:
+	i915_gem_object_put(obj);
+	return ERR_PTR(err);
+}
+
+static int perf_mi_noop(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+		return 0;
+
+	perf_begin(gt);
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce = engine->kernel_context;
+		struct i915_vma *base, *nop;
+		u32 cycles[COUNT];
+		int i;
+
+		intel_engine_pm_get(engine);
+
+		base = create_empty_batch(ce);
+		if (IS_ERR(base)) {
+			err = PTR_ERR(base);
+			intel_engine_pm_put(engine);
+			break;
+		}
+
+		err = i915_vma_sync(base);
+		if (err) {
+			i915_vma_put(base);
+			intel_engine_pm_put(engine);
+			break;
+		}
+
+		nop = create_nop_batch(ce);
+		if (IS_ERR(nop)) {
+			err = PTR_ERR(nop);
+			i915_vma_put(base);
+			intel_engine_pm_put(engine);
+			break;
+		}
+
+		err = i915_vma_sync(nop);
+		if (err) {
+			i915_vma_put(nop);
+			i915_vma_put(base);
+			intel_engine_pm_put(engine);
+			break;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+			struct i915_request *rq;
+
+			rq = i915_request_create(ce);
+			if (IS_ERR(rq)) {
+				err = PTR_ERR(rq);
+				break;
+			}
+
+			err = write_timestamp(rq, 2);
+			if (err)
+				goto out;
+
+			err = rq->engine->emit_bb_start(rq,
+							base->node.start, 8,
+							0);
+			if (err)
+				goto out;
+
+			err = write_timestamp(rq, 3);
+			if (err)
+				goto out;
+
+			err = rq->engine->emit_bb_start(rq,
+							nop->node.start,
+							nop->node.size,
+							0);
+			if (err)
+				goto out;
+
+			err = write_timestamp(rq, 4);
+			if (err)
+				goto out;
+
+out:
+			i915_request_get(rq);
+			i915_request_add(rq);
+
+			if (i915_request_wait(rq, 0, HZ / 5) < 0)
+				err = -EIO;
+			i915_request_put(rq);
+			if (err)
+				break;
+
+			cycles[i] =
+				(rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
+				(rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
+		}
+		i915_vma_put(nop);
+		i915_vma_put(base);
+		intel_engine_pm_put(engine);
+		if (err)
+			break;
+
+		pr_info("%s: 16K MI_NOOP cycles: %u\n",
+			engine->name, trifilter(cycles));
+	}
+	if (perf_end(gt))
+		err = -EIO;
+
+	return err;
+}
+
+int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(perf_mi_bb_start),
+		SUBTEST(perf_mi_noop),
+	};
+
+	if (intel_gt_is_wedged(&i915->gt))
+		return 0;
+
+	return intel_gt_live_subtests(tests, &i915->gt);
+}
 
 static int intel_mmio_bases_check(void *arg)
 {
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index e864406..43d4d58 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -11,6 +11,28 @@
 #include "intel_gt_requests.h"
 #include "i915_selftest.h"
 
+static int timeline_sync(struct intel_timeline *tl)
+{
+	struct dma_fence *fence;
+	long timeout;
+
+	fence = i915_active_fence_get(&tl->last_request);
+	if (!fence)
+		return 0;
+
+	timeout = dma_fence_wait_timeout(fence, true, HZ / 2);
+	dma_fence_put(fence);
+	if (timeout < 0)
+		return timeout;
+
+	return 0;
+}
+
+static int engine_sync_barrier(struct intel_engine_cs *engine)
+{
+	return timeline_sync(engine->kernel_context->timeline);
+}
+
 struct pulse {
 	struct i915_active active;
 	struct kref kref;
@@ -53,9 +75,7 @@ static struct pulse *pulse_create(void)
 
 static void pulse_unlock_wait(struct pulse *p)
 {
-	mutex_lock(&p->active.mutex);
-	mutex_unlock(&p->active.mutex);
-	flush_work(&p->active.work);
+	i915_active_unlock_wait(&p->active);
 }
 
 static int __live_idle_pulse(struct intel_engine_cs *engine,
@@ -92,7 +112,12 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
 
 	GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
 
-	if (intel_gt_retire_requests_timeout(engine->gt, HZ / 5)) {
+	if (engine_sync_barrier(engine)) {
+		struct drm_printer m = drm_err_printer("pulse");
+
+		pr_err("%s: no heartbeat pulse?\n", engine->name);
+		intel_engine_dump(engine, &m, "%s", engine->name);
+
 		err = -ETIME;
 		goto out;
 	}
@@ -175,8 +200,7 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
 	int err;
 	int i;
 
-	ce = intel_context_create(engine->kernel_context->gem_context,
-				  engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index d1752f1..09ff8e4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -6,6 +6,7 @@
  */
 
 #include "selftest_llc.h"
+#include "selftest_rc6.h"
 
 static int live_gt_resume(void *arg)
 {
@@ -50,6 +51,7 @@ static int live_gt_resume(void *arg)
 int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
+		SUBTEST(live_rc6_manual),
 		SUBTEST(live_gt_resume),
 	};
 
@@ -58,3 +60,20 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
 
 	return intel_gt_live_subtests(tests, &i915->gt);
 }
+
+int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		/*
+		 * These tests may leave the system in an undesirable state.
+		 * They are intended to be run last in CI and the system
+		 * rebooted afterwards.
+		 */
+		SUBTEST(live_rc6_ctx_wa),
+	};
+
+	if (intel_gt_is_wedged(&i915->gt))
+		return 0;
+
+	return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 85e9ccf5c..3e5e6c8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -25,7 +25,9 @@
 #include <linux/kthread.h>
 
 #include "gem/i915_gem_context.h"
-#include "gt/intel_gt.h"
+
+#include "intel_gt.h"
+#include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
 
 #include "i915_selftest.h"
@@ -308,6 +310,24 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
 			  1000));
 }
 
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+				     unsigned long *saved)
+{
+	*saved = engine->props.heartbeat_interval_ms;
+	engine->props.heartbeat_interval_ms = 0;
+
+	intel_engine_pm_get(engine);
+	intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+				    unsigned long saved)
+{
+	intel_engine_pm_put(engine);
+
+	engine->props.heartbeat_interval_ms = saved;
+}
+
 static int igt_hang_sanitycheck(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -377,36 +397,30 @@ static int igt_reset_nop(void *arg)
 	struct intel_gt *gt = arg;
 	struct i915_gpu_error *global = &gt->i915->gpu_error;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	unsigned int reset_count, count;
 	enum intel_engine_id id;
-	struct drm_file *file;
 	IGT_TIMEOUT(end_time);
 	int err = 0;
 
 	/* Check that we can reset during non-user portions of requests */
 
-	file = mock_file(gt->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	ctx = live_context(gt->i915, file);
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
-		goto out;
-	}
-
-	i915_gem_context_clear_bannable(ctx);
 	reset_count = i915_reset_count(global);
 	count = 0;
 	do {
 		for_each_engine(engine, gt, id) {
+			struct intel_context *ce;
 			int i;
 
+			ce = intel_context_create(engine);
+			if (IS_ERR(ce)) {
+				err = PTR_ERR(ce);
+				break;
+			}
+
 			for (i = 0; i < 16; i++) {
 				struct i915_request *rq;
 
-				rq = igt_request_alloc(ctx, engine);
+				rq = intel_context_create_request(ce);
 				if (IS_ERR(rq)) {
 					err = PTR_ERR(rq);
 					break;
@@ -414,6 +428,8 @@ static int igt_reset_nop(void *arg)
 
 				i915_request_add(rq);
 			}
+
+			intel_context_put(ce);
 		}
 
 		igt_global_reset_lock(gt);
@@ -437,10 +453,7 @@ static int igt_reset_nop(void *arg)
 	} while (time_before(jiffies, end_time));
 	pr_info("%s: %d resets\n", __func__, count);
 
-	err = igt_flush_test(gt->i915);
-out:
-	mock_file_free(gt->i915, file);
-	if (intel_gt_is_wedged(gt))
+	if (igt_flush_test(gt->i915))
 		err = -EIO;
 	return err;
 }
@@ -450,36 +463,29 @@ static int igt_reset_nop_engine(void *arg)
 	struct intel_gt *gt = arg;
 	struct i915_gpu_error *global = &gt->i915->gpu_error;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
-	struct drm_file *file;
-	int err = 0;
 
 	/* Check that we can engine-reset during non-user portions */
 
 	if (!intel_has_reset_engine(gt))
 		return 0;
 
-	file = mock_file(gt->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	ctx = live_context(gt->i915, file);
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
-		goto out;
-	}
-
-	i915_gem_context_clear_bannable(ctx);
 	for_each_engine(engine, gt, id) {
-		unsigned int reset_count, reset_engine_count;
-		unsigned int count;
+		unsigned int reset_count, reset_engine_count, count;
+		struct intel_context *ce;
+		unsigned long heartbeat;
 		IGT_TIMEOUT(end_time);
+		int err;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce))
+			return PTR_ERR(ce);
 
 		reset_count = i915_reset_count(global);
 		reset_engine_count = i915_reset_engine_count(global, engine);
 		count = 0;
 
+		engine_heartbeat_disable(engine, &heartbeat);
 		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
 		do {
 			int i;
@@ -494,7 +500,7 @@ static int igt_reset_nop_engine(void *arg)
 			for (i = 0; i < 16; i++) {
 				struct i915_request *rq;
 
-				rq = igt_request_alloc(ctx, engine);
+				rq = intel_context_create_request(ce);
 				if (IS_ERR(rq)) {
 					err = PTR_ERR(rq);
 					break;
@@ -523,22 +529,18 @@ static int igt_reset_nop_engine(void *arg)
 			}
 		} while (time_before(jiffies, end_time));
 		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+		engine_heartbeat_enable(engine, heartbeat);
+
 		pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
 
+		intel_context_put(ce);
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
 		if (err)
-			break;
-
-		err = igt_flush_test(gt->i915);
-		if (err)
-			break;
+			return err;
 	}
 
-	err = igt_flush_test(gt->i915);
-out:
-	mock_file_free(gt->i915, file);
-	if (intel_gt_is_wedged(gt))
-		err = -EIO;
-	return err;
+	return 0;
 }
 
 static int __igt_reset_engine(struct intel_gt *gt, bool active)
@@ -562,6 +564,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 
 	for_each_engine(engine, gt, id) {
 		unsigned int reset_count, reset_engine_count;
+		unsigned long heartbeat;
 		IGT_TIMEOUT(end_time);
 
 		if (active && !intel_engine_can_store_dword(engine))
@@ -577,7 +580,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 		reset_count = i915_reset_count(global);
 		reset_engine_count = i915_reset_engine_count(global, engine);
 
-		intel_engine_pm_get(engine);
+		engine_heartbeat_disable(engine, &heartbeat);
 		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
 		do {
 			if (active) {
@@ -629,7 +632,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 			}
 		} while (time_before(jiffies, end_time));
 		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-		intel_engine_pm_put(engine);
+		engine_heartbeat_enable(engine, heartbeat);
 
 		if (err)
 			break;
@@ -699,43 +702,43 @@ static int active_engine(void *data)
 	struct active_engine *arg = data;
 	struct intel_engine_cs *engine = arg->engine;
 	struct i915_request *rq[8] = {};
-	struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
-	struct drm_file *file;
-	unsigned long count = 0;
+	struct intel_context *ce[ARRAY_SIZE(rq)];
+	unsigned long count;
 	int err = 0;
 
-	file = mock_file(engine->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	for (count = 0; count < ARRAY_SIZE(ctx); count++) {
-		ctx[count] = live_context(engine->i915, file);
-		if (IS_ERR(ctx[count])) {
-			err = PTR_ERR(ctx[count]);
+	for (count = 0; count < ARRAY_SIZE(ce); count++) {
+		ce[count] = intel_context_create(engine);
+		if (IS_ERR(ce[count])) {
+			err = PTR_ERR(ce[count]);
 			while (--count)
-				i915_gem_context_put(ctx[count]);
-			goto err_file;
+				intel_context_put(ce[count]);
+			return err;
 		}
 	}
 
+	count = 0;
 	while (!kthread_should_stop()) {
 		unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
 		struct i915_request *old = rq[idx];
 		struct i915_request *new;
 
-		new = igt_request_alloc(ctx[idx], engine);
+		new = intel_context_create_request(ce[idx]);
 		if (IS_ERR(new)) {
 			err = PTR_ERR(new);
 			break;
 		}
 
-		if (arg->flags & TEST_PRIORITY)
-			ctx[idx]->sched.priority =
-				i915_prandom_u32_max_state(512, &prng);
-
 		rq[idx] = i915_request_get(new);
 		i915_request_add(new);
 
+		if (engine->schedule && arg->flags & TEST_PRIORITY) {
+			struct i915_sched_attr attr = {
+				.priority =
+					i915_prandom_u32_max_state(512, &prng),
+			};
+			engine->schedule(rq[idx], &attr);
+		}
+
 		err = active_request_put(old);
 		if (err)
 			break;
@@ -749,10 +752,10 @@ static int active_engine(void *data)
 		/* Keep the first error */
 		if (!err)
 			err = err__;
+
+		intel_context_put(ce[count]);
 	}
 
-err_file:
-	mock_file_free(engine->i915, file);
 	return err;
 }
 
@@ -786,6 +789,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
 		struct active_engine threads[I915_NUM_ENGINES] = {};
 		unsigned long device = i915_reset_count(global);
 		unsigned long count = 0, reported;
+		unsigned long heartbeat;
 		IGT_TIMEOUT(end_time);
 
 		if (flags & TEST_ACTIVE &&
@@ -828,7 +832,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
 
 		yield(); /* start all threads before we begin */
 
-		intel_engine_pm_get(engine);
+		engine_heartbeat_disable(engine, &heartbeat);
 		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
 		do {
 			struct i915_request *rq = NULL;
@@ -902,7 +906,8 @@ static int __igt_reset_engines(struct intel_gt *gt,
 			}
 		} while (time_before(jiffies, end_time));
 		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-		intel_engine_pm_put(engine);
+		engine_heartbeat_enable(engine, heartbeat);
+
 		pr_info("i915_reset_engine(%s:%s): %lu resets\n",
 			engine->name, test_name, count);
 
@@ -1300,32 +1305,21 @@ static int igt_reset_evict_ggtt(void *arg)
 static int igt_reset_evict_ppgtt(void *arg)
 {
 	struct intel_gt *gt = arg;
-	struct i915_gem_context *ctx;
-	struct i915_address_space *vm;
-	struct drm_file *file;
+	struct i915_ppgtt *ppgtt;
 	int err;
 
-	file = mock_file(gt->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
+	/* aliasing == global gtt locking, covered above */
+	if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
+		return 0;
 
-	ctx = live_context(gt->i915, file);
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
-		goto out;
-	}
+	ppgtt = i915_ppgtt_create(gt);
+	if (IS_ERR(ppgtt))
+		return PTR_ERR(ppgtt);
 
-	err = 0;
-	vm = i915_gem_context_get_vm_rcu(ctx);
-	if (!i915_is_ggtt(vm)) {
-		/* aliasing == global gtt locking, covered above */
-		err = __igt_reset_evict_vma(gt, vm,
-					    evict_vma, EXEC_OBJECT_WRITE);
-	}
-	i915_vm_put(vm);
+	err = __igt_reset_evict_vma(gt, &ppgtt->vm,
+				    evict_vma, EXEC_OBJECT_WRITE);
+	i915_vm_put(&ppgtt->vm);
 
-out:
-	mock_file_free(gt->i915, file);
 	return err;
 }
 
@@ -1504,7 +1498,7 @@ static int igt_handle_error(void *arg)
 	struct intel_engine_cs *engine = gt->engine[RCS0];
 	struct hang h;
 	struct i915_request *rq;
-	struct i915_gpu_state *error;
+	struct i915_gpu_coredump *error;
 	int err;
 
 	/* Check that we can issue a global GPU and engine reset */
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index eb71ac2..15cda02 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -50,14 +50,31 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
 	return vma;
 }
 
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+				     unsigned long *saved)
+{
+	*saved = engine->props.heartbeat_interval_ms;
+	engine->props.heartbeat_interval_ms = 0;
+
+	intel_engine_pm_get(engine);
+	intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+				    unsigned long saved)
+{
+	intel_engine_pm_put(engine);
+
+	engine->props.heartbeat_interval_ms = saved;
+}
+
 static int live_sanitycheck(void *arg)
 {
 	struct intel_gt *gt = arg;
-	struct i915_gem_engines_iter it;
-	struct i915_gem_context *ctx;
-	struct intel_context *ce;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	struct igt_spinner spin;
-	int err = -ENOMEM;
+	int err = 0;
 
 	if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
 		return 0;
@@ -65,17 +82,20 @@ static int live_sanitycheck(void *arg)
 	if (igt_spinner_init(&spin, gt))
 		return -ENOMEM;
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		goto err_spin;
-
-	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
 		struct i915_request *rq;
 
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
-			goto err_ctx;
+			goto out_ctx;
 		}
 
 		i915_request_add(rq);
@@ -84,21 +104,21 @@ static int live_sanitycheck(void *arg)
 			GEM_TRACE_DUMP();
 			intel_gt_set_wedged(gt);
 			err = -EIO;
-			goto err_ctx;
+			goto out_ctx;
 		}
 
 		igt_spinner_end(&spin);
 		if (igt_flush_test(gt->i915)) {
 			err = -EIO;
-			goto err_ctx;
+			goto out_ctx;
 		}
+
+out_ctx:
+		intel_context_put(ce);
+		if (err)
+			break;
 	}
 
-	err = 0;
-err_ctx:
-	i915_gem_context_unlock_engines(ctx);
-	kernel_context_close(ctx);
-err_spin:
 	igt_spinner_fini(&spin);
 	return err;
 }
@@ -106,7 +126,6 @@ static int live_sanitycheck(void *arg)
 static int live_unlite_restore(struct intel_gt *gt, int prio)
 {
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
 	struct igt_spinner spin;
 	int err = -ENOMEM;
@@ -119,15 +138,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
 	if (igt_spinner_init(&spin, gt))
 		return err;
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		goto err_spin;
-
 	err = 0;
 	for_each_engine(engine, gt, id) {
 		struct intel_context *ce[2] = {};
 		struct i915_request *rq[2];
 		struct igt_live_test t;
+		unsigned long saved;
 		int n;
 
 		if (prio && !intel_engine_has_preemption(engine))
@@ -140,11 +156,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
 			err = -EIO;
 			break;
 		}
+		engine_heartbeat_disable(engine, &saved);
 
 		for (n = 0; n < ARRAY_SIZE(ce); n++) {
 			struct intel_context *tmp;
 
-			tmp = intel_context_create(ctx, engine);
+			tmp = intel_context_create(engine);
 			if (IS_ERR(tmp)) {
 				err = PTR_ERR(tmp);
 				goto err_ce;
@@ -247,14 +264,13 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
 			intel_context_put(ce[n]);
 		}
 
+		engine_heartbeat_enable(engine, saved);
 		if (igt_live_test_end(&t))
 			err = -EIO;
 		if (err)
 			break;
 	}
 
-	kernel_context_close(ctx);
-err_spin:
 	igt_spinner_fini(&spin);
 	return err;
 }
@@ -309,17 +325,17 @@ emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
 static struct i915_request *
 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
 {
-	struct i915_gem_context *ctx;
+	struct intel_context *ce;
 	struct i915_request *rq;
 	int err;
 
-	ctx = kernel_context(engine->i915);
-	if (!ctx)
-		return ERR_PTR(-ENOMEM);
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return ERR_CAST(ce);
 
-	rq = igt_request_alloc(ctx, engine);
+	rq = intel_context_create_request(ce);
 	if (IS_ERR(rq))
-		goto out_ctx;
+		goto out_ce;
 
 	err = 0;
 	if (rq->engine->emit_init_breadcrumb)
@@ -332,8 +348,8 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
 	if (err)
 		rq = ERR_PTR(err);
 
-out_ctx:
-	kernel_context_close(ctx);
+out_ce:
+	intel_context_put(ce);
 	return rq;
 }
 
@@ -348,7 +364,7 @@ release_queue(struct intel_engine_cs *engine,
 	struct i915_request *rq;
 	u32 *cs;
 
-	rq = i915_request_create(engine->kernel_context);
+	rq = intel_engine_create_kernel_request(engine);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -468,12 +484,16 @@ static int live_timeslice_preempt(void *arg)
 		enum intel_engine_id id;
 
 		for_each_engine(engine, gt, id) {
+			unsigned long saved;
+
 			if (!intel_engine_has_preemption(engine))
 				continue;
 
 			memset(vaddr, 0, PAGE_SIZE);
 
+			engine_heartbeat_disable(engine, &saved);
 			err = slice_semaphore_queue(engine, vma, count);
+			engine_heartbeat_enable(engine, saved);
 			if (err)
 				goto err_pin;
 
@@ -497,7 +517,7 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
 {
 	struct i915_request *rq;
 
-	rq = i915_request_create(engine->kernel_context);
+	rq = intel_engine_create_kernel_request(engine);
 	if (IS_ERR(rq))
 		return rq;
 
@@ -507,13 +527,19 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
 	return rq;
 }
 
-static void wait_for_submit(struct intel_engine_cs *engine,
-			    struct i915_request *rq)
+static int wait_for_submit(struct intel_engine_cs *engine,
+			   struct i915_request *rq,
+			   unsigned long timeout)
 {
+	timeout += jiffies;
 	do {
 		cond_resched();
 		intel_engine_flush_submission(engine);
-	} while (!i915_request_is_active(rq));
+		if (i915_request_is_active(rq))
+			return 0;
+	} while (time_before(jiffies, timeout));
+
+	return -ETIME;
 }
 
 static long timeslice_threshold(const struct intel_engine_cs *engine)
@@ -566,40 +592,49 @@ static int live_timeslice_queue(void *arg)
 			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
 		};
 		struct i915_request *rq, *nop;
+		unsigned long saved;
 
 		if (!intel_engine_has_preemption(engine))
 			continue;
 
+		engine_heartbeat_disable(engine, &saved);
 		memset(vaddr, 0, PAGE_SIZE);
 
 		/* ELSP[0]: semaphore wait */
 		rq = semaphore_queue(engine, vma, 0);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
-			goto err_pin;
+			goto err_heartbeat;
 		}
 		engine->schedule(rq, &attr);
-		wait_for_submit(engine, rq);
+		err = wait_for_submit(engine, rq, HZ / 2);
+		if (err) {
+			pr_err("%s: Timed out trying to submit semaphores\n",
+			       engine->name);
+			goto err_rq;
+		}
 
 		/* ELSP[1]: nop request */
 		nop = nop_request(engine);
 		if (IS_ERR(nop)) {
 			err = PTR_ERR(nop);
-			i915_request_put(rq);
-			goto err_pin;
+			goto err_rq;
 		}
-		wait_for_submit(engine, nop);
+		err = wait_for_submit(engine, nop, HZ / 2);
 		i915_request_put(nop);
+		if (err) {
+			pr_err("%s: Timed out trying to submit nop\n",
+			       engine->name);
+			goto err_rq;
+		}
 
 		GEM_BUG_ON(i915_request_completed(rq));
 		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
 		/* Queue: semaphore signal, matching priority as semaphore */
 		err = release_queue(engine, vma, 1, effective_prio(rq));
-		if (err) {
-			i915_request_put(rq);
-			goto err_pin;
-		}
+		if (err)
+			goto err_rq;
 
 		intel_engine_flush_submission(engine);
 		if (!READ_ONCE(engine->execlists.timer.expires) &&
@@ -630,12 +665,14 @@ static int live_timeslice_queue(void *arg)
 			memset(vaddr, 0xff, PAGE_SIZE);
 			err = -EIO;
 		}
+err_rq:
 		i915_request_put(rq);
+err_heartbeat:
+		engine_heartbeat_enable(engine, saved);
 		if (err)
 			break;
 	}
 
-err_pin:
 	i915_vma_unpin(vma);
 err_map:
 	i915_gem_object_unpin_map(obj);
@@ -748,15 +785,19 @@ static int live_busywait_preempt(void *arg)
 		*cs++ = 0;
 
 		intel_ring_advance(lo, cs);
+
+		i915_request_get(lo);
 		i915_request_add(lo);
 
 		if (wait_for(READ_ONCE(*map), 10)) {
+			i915_request_put(lo);
 			err = -ETIMEDOUT;
 			goto err_vma;
 		}
 
 		/* Low priority request should be busywaiting now */
 		if (i915_request_wait(lo, 0, 1) != -ETIME) {
+			i915_request_put(lo);
 			pr_err("%s: Busywaiting request did not!\n",
 			       engine->name);
 			err = -EIO;
@@ -766,6 +807,7 @@ static int live_busywait_preempt(void *arg)
 		hi = igt_request_alloc(ctx_hi, engine);
 		if (IS_ERR(hi)) {
 			err = PTR_ERR(hi);
+			i915_request_put(lo);
 			goto err_vma;
 		}
 
@@ -773,6 +815,7 @@ static int live_busywait_preempt(void *arg)
 		if (IS_ERR(cs)) {
 			err = PTR_ERR(cs);
 			i915_request_add(hi);
+			i915_request_put(lo);
 			goto err_vma;
 		}
 
@@ -793,11 +836,13 @@ static int live_busywait_preempt(void *arg)
 			intel_engine_dump(engine, &p, "%s\n", engine->name);
 			GEM_TRACE_DUMP();
 
+			i915_request_put(lo);
 			intel_gt_set_wedged(gt);
 			err = -EIO;
 			goto err_vma;
 		}
 		GEM_BUG_ON(READ_ONCE(*map));
+		i915_request_put(lo);
 
 		if (igt_live_test_end(&t)) {
 			err = -EIO;
@@ -1108,7 +1153,7 @@ static int live_nopreempt(void *arg)
 		}
 
 		/* Low priority client, but unpreemptable! */
-		rq_a->flags |= I915_REQUEST_NOPREEMPT;
+		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
 
 		i915_request_add(rq_a);
 		if (!igt_wait_for_spinner(&a.spin, rq_a)) {
@@ -1187,13 +1232,13 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq = spinner_create_request(&arg->a.spin,
 				    arg->a.ctx, arg->engine,
 				    MI_ARB_CHECK);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
 	i915_request_get(rq);
 	i915_request_add(rq);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1201,7 +1246,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1236,13 +1281,13 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq[0] = spinner_create_request(&arg->a.spin,
 				       arg->a.ctx, arg->engine,
 				       MI_NOOP); /* no preemption */
 	if (IS_ERR(rq[0]))
 		return PTR_ERR(rq[0]);
 
+	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
 	i915_request_get(rq[0]);
 	i915_request_add(rq[0]);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1250,7 +1295,6 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
 	rq[1] = spinner_create_request(&arg->b.spin,
 				       arg->b.ctx, arg->engine,
 				       MI_ARB_CHECK);
@@ -1259,13 +1303,14 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
+	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
 	i915_request_get(rq[1]);
 	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
 	i915_request_add(rq[1]);
 	if (err)
 		goto out;
 
-	i915_gem_context_set_banned(arg->b.ctx);
+	intel_context_set_banned(rq[1]->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1308,13 +1353,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq[0] = spinner_create_request(&arg->a.spin,
 				       arg->a.ctx, arg->engine,
 				       MI_ARB_CHECK);
 	if (IS_ERR(rq[0]))
 		return PTR_ERR(rq[0]);
 
+	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
 	i915_request_get(rq[0]);
 	i915_request_add(rq[0]);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1322,13 +1367,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
 	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
 	if (IS_ERR(rq[1])) {
 		err = PTR_ERR(rq[1]);
 		goto out;
 	}
 
+	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
 	i915_request_get(rq[1]);
 	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
 	i915_request_add(rq[1]);
@@ -1349,7 +1394,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 	if (err)
 		goto out;
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq[2]->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1396,13 +1441,13 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
 		return 0;
 
 	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq = spinner_create_request(&arg->a.spin,
 				    arg->a.ctx, arg->engine,
 				    MI_NOOP); /* preemption disabled */
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
 	i915_request_get(rq);
 	i915_request_add(rq);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1410,7 +1455,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq->context);
 	err = intel_engine_pulse(arg->engine); /* force reset */
 	if (err)
 		goto out;
@@ -1665,6 +1710,7 @@ static int live_suppress_wait_preempt(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct preempt_client client[4];
+	struct i915_request *rq[ARRAY_SIZE(client)] = {};
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	int err = -ENOMEM;
@@ -1698,7 +1744,6 @@ static int live_suppress_wait_preempt(void *arg)
 			continue;
 
 		for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
-			struct i915_request *rq[ARRAY_SIZE(client)];
 			struct i915_request *dummy;
 
 			engine->execlists.preempt_hang.count = 0;
@@ -1708,18 +1753,22 @@ static int live_suppress_wait_preempt(void *arg)
 				goto err_client_3;
 
 			for (i = 0; i < ARRAY_SIZE(client); i++) {
-				rq[i] = spinner_create_request(&client[i].spin,
-							       client[i].ctx, engine,
-							       MI_NOOP);
-				if (IS_ERR(rq[i])) {
-					err = PTR_ERR(rq[i]);
+				struct i915_request *this;
+
+				this = spinner_create_request(&client[i].spin,
+							      client[i].ctx, engine,
+							      MI_NOOP);
+				if (IS_ERR(this)) {
+					err = PTR_ERR(this);
 					goto err_wedged;
 				}
 
 				/* Disable NEWCLIENT promotion */
-				__i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
+				__i915_active_fence_set(&i915_request_timeline(this)->last_request,
 							&dummy->fence);
-				i915_request_add(rq[i]);
+
+				rq[i] = i915_request_get(this);
+				i915_request_add(this);
 			}
 
 			dummy_request_free(dummy);
@@ -1740,8 +1789,11 @@ static int live_suppress_wait_preempt(void *arg)
 				goto err_wedged;
 			}
 
-			for (i = 0; i < ARRAY_SIZE(client); i++)
+			for (i = 0; i < ARRAY_SIZE(client); i++) {
 				igt_spinner_end(&client[i].spin);
+				i915_request_put(rq[i]);
+				rq[i] = NULL;
+			}
 
 			if (igt_flush_test(gt->i915))
 				goto err_wedged;
@@ -1769,8 +1821,10 @@ static int live_suppress_wait_preempt(void *arg)
 	return err;
 
 err_wedged:
-	for (i = 0; i < ARRAY_SIZE(client); i++)
+	for (i = 0; i < ARRAY_SIZE(client); i++) {
 		igt_spinner_end(&client[i].spin);
+		i915_request_put(rq[i]);
+	}
 	intel_gt_set_wedged(gt);
 	err = -EIO;
 	goto err_client_3;
@@ -1815,6 +1869,8 @@ static int live_chain_preempt(void *arg)
 					    MI_ARB_CHECK);
 		if (IS_ERR(rq))
 			goto err_wedged;
+
+		i915_request_get(rq);
 		i915_request_add(rq);
 
 		ring_size = rq->wa_tail - rq->head;
@@ -1827,8 +1883,10 @@ static int live_chain_preempt(void *arg)
 		igt_spinner_end(&lo.spin);
 		if (i915_request_wait(rq, 0, HZ / 2) < 0) {
 			pr_err("Timed out waiting to flush %s\n", engine->name);
+			i915_request_put(rq);
 			goto err_wedged;
 		}
+		i915_request_put(rq);
 
 		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
 			err = -EIO;
@@ -1862,6 +1920,8 @@ static int live_chain_preempt(void *arg)
 			rq = igt_request_alloc(hi.ctx, engine);
 			if (IS_ERR(rq))
 				goto err_wedged;
+
+			i915_request_get(rq);
 			i915_request_add(rq);
 			engine->schedule(rq, &attr);
 
@@ -1874,14 +1934,19 @@ static int live_chain_preempt(void *arg)
 				       count);
 				intel_engine_dump(engine, &p,
 						  "%s\n", engine->name);
+				i915_request_put(rq);
 				goto err_wedged;
 			}
 			igt_spinner_end(&lo.spin);
+			i915_request_put(rq);
 
 			rq = igt_request_alloc(lo.ctx, engine);
 			if (IS_ERR(rq))
 				goto err_wedged;
+
+			i915_request_get(rq);
 			i915_request_add(rq);
+
 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 				struct drm_printer p =
 					drm_info_printer(gt->i915->drm.dev);
@@ -1890,8 +1955,11 @@ static int live_chain_preempt(void *arg)
 				       count);
 				intel_engine_dump(engine, &p,
 						  "%s\n", engine->name);
+
+				i915_request_put(rq);
 				goto err_wedged;
 			}
+			i915_request_put(rq);
 		}
 
 		if (igt_live_test_end(&t)) {
@@ -1915,6 +1983,201 @@ static int live_chain_preempt(void *arg)
 	goto err_client_lo;
 }
 
+static int create_gang(struct intel_engine_cs *engine,
+		       struct i915_request **prev)
+{
+	struct drm_i915_gem_object *obj;
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	u32 *cs;
+	int err;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	obj = i915_gem_object_create_internal(engine->i915, 4096);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_ce;
+	}
+
+	vma = i915_vma_instance(obj, ce->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err_obj;
+
+	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(cs))
+		goto err_obj;
+
+	/* Semaphore target: spin until zero */
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_EQ_SDD;
+	*cs++ = 0;
+	*cs++ = lower_32_bits(vma->node.start);
+	*cs++ = upper_32_bits(vma->node.start);
+
+	if (*prev) {
+		u64 offset = (*prev)->batch->node.start;
+
+		/* Terminate the spinner in the next lower priority batch. */
+		*cs++ = MI_STORE_DWORD_IMM_GEN4;
+		*cs++ = lower_32_bits(offset);
+		*cs++ = upper_32_bits(offset);
+		*cs++ = 0;
+	}
+
+	*cs++ = MI_BATCH_BUFFER_END;
+	i915_gem_object_flush_map(obj);
+	i915_gem_object_unpin_map(obj);
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		goto err_obj;
+
+	rq->batch = vma;
+	i915_request_get(rq);
+
+	i915_vma_lock(vma);
+	err = i915_request_await_object(rq, vma->obj, false);
+	if (!err)
+		err = i915_vma_move_to_active(vma, rq, 0);
+	if (!err)
+		err = rq->engine->emit_bb_start(rq,
+						vma->node.start,
+						PAGE_SIZE, 0);
+	i915_vma_unlock(vma);
+	i915_request_add(rq);
+	if (err)
+		goto err_rq;
+
+	i915_gem_object_put(obj);
+	intel_context_put(ce);
+
+	rq->client_link.next = &(*prev)->client_link;
+	*prev = rq;
+	return 0;
+
+err_rq:
+	i915_request_put(rq);
+err_obj:
+	i915_gem_object_put(obj);
+err_ce:
+	intel_context_put(ce);
+	return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+		return 0;
+
+	/*
+	 * Build as long a chain of preempters as we can, with each
+	 * request higher priority than the last. Once we are ready, we release
+	 * the last batch which then precolates down the chain, each releasing
+	 * the next oldest in turn. The intent is to simply push as hard as we
+	 * can with the number of preemptions, trying to exceed narrow HW
+	 * limits. At a minimum, we insist that we can sort all the user
+	 * high priority levels into execution order.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		struct i915_request *rq = NULL;
+		struct igt_live_test t;
+		IGT_TIMEOUT(end_time);
+		int prio = 0;
+		int err = 0;
+		u32 *cs;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+			return -EIO;
+
+		do {
+			struct i915_sched_attr attr = {
+				.priority = I915_USER_PRIORITY(prio++),
+			};
+
+			err = create_gang(engine, &rq);
+			if (err)
+				break;
+
+			/* Submit each spinner at increasing priority */
+			engine->schedule(rq, &attr);
+
+			if (prio <= I915_PRIORITY_MAX)
+				continue;
+
+			if (prio > (INT_MAX >> I915_USER_PRIORITY_SHIFT))
+				break;
+
+			if (__igt_timeout(end_time, NULL))
+				break;
+		} while (1);
+		pr_debug("%s: Preempt chain of %d requests\n",
+			 engine->name, prio);
+
+		/*
+		 * Such that the last spinner is the highest priority and
+		 * should execute first. When that spinner completes,
+		 * it will terminate the next lowest spinner until there
+		 * are no more spinners and the gang is complete.
+		 */
+		cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+		if (!IS_ERR(cs)) {
+			*cs = 0;
+			i915_gem_object_unpin_map(rq->batch->obj);
+		} else {
+			err = PTR_ERR(cs);
+			intel_gt_set_wedged(gt);
+		}
+
+		while (rq) { /* wait for each rq from highest to lowest prio */
+			struct i915_request *n =
+				list_next_entry(rq, client_link);
+
+			if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+				struct drm_printer p =
+					drm_info_printer(engine->i915->drm.dev);
+
+				pr_err("Failed to flush chain of %d requests, at %d\n",
+				       prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+
+				err = -ETIME;
+			}
+
+			i915_request_put(rq);
+			rq = n;
+		}
+
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int live_preempt_hang(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -2391,28 +2654,18 @@ static int nop_virtual_engine(struct intel_gt *gt,
 #define CHAIN BIT(0)
 {
 	IGT_TIMEOUT(end_time);
-	struct i915_request *request[16];
-	struct i915_gem_context *ctx[16];
+	struct i915_request *request[16] = {};
 	struct intel_context *ve[16];
 	unsigned long n, prime, nc;
 	struct igt_live_test t;
 	ktime_t times[2] = {};
 	int err;
 
-	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
 
 	for (n = 0; n < nctx; n++) {
-		ctx[n] = kernel_context(gt->i915);
-		if (!ctx[n]) {
-			err = -ENOMEM;
-			nctx = n;
-			goto out;
-		}
-
-		ve[n] = intel_execlists_create_virtual(ctx[n],
-						       siblings, nsibling);
+		ve[n] = intel_execlists_create_virtual(siblings, nsibling);
 		if (IS_ERR(ve[n])) {
-			kernel_context_close(ctx[n]);
 			err = PTR_ERR(ve[n]);
 			nctx = n;
 			goto out;
@@ -2421,7 +2674,6 @@ static int nop_virtual_engine(struct intel_gt *gt,
 		err = intel_context_pin(ve[n]);
 		if (err) {
 			intel_context_put(ve[n]);
-			kernel_context_close(ctx[n]);
 			nctx = n;
 			goto out;
 		}
@@ -2437,27 +2689,35 @@ static int nop_virtual_engine(struct intel_gt *gt,
 		if (flags & CHAIN) {
 			for (nc = 0; nc < nctx; nc++) {
 				for (n = 0; n < prime; n++) {
-					request[nc] =
-						i915_request_create(ve[nc]);
-					if (IS_ERR(request[nc])) {
-						err = PTR_ERR(request[nc]);
+					struct i915_request *rq;
+
+					rq = i915_request_create(ve[nc]);
+					if (IS_ERR(rq)) {
+						err = PTR_ERR(rq);
 						goto out;
 					}
 
-					i915_request_add(request[nc]);
+					if (request[nc])
+						i915_request_put(request[nc]);
+					request[nc] = i915_request_get(rq);
+					i915_request_add(rq);
 				}
 			}
 		} else {
 			for (n = 0; n < prime; n++) {
 				for (nc = 0; nc < nctx; nc++) {
-					request[nc] =
-						i915_request_create(ve[nc]);
-					if (IS_ERR(request[nc])) {
-						err = PTR_ERR(request[nc]);
+					struct i915_request *rq;
+
+					rq = i915_request_create(ve[nc]);
+					if (IS_ERR(rq)) {
+						err = PTR_ERR(rq);
 						goto out;
 					}
 
-					i915_request_add(request[nc]);
+					if (request[nc])
+						i915_request_put(request[nc]);
+					request[nc] = i915_request_get(rq);
+					i915_request_add(rq);
 				}
 			}
 		}
@@ -2483,6 +2743,11 @@ static int nop_virtual_engine(struct intel_gt *gt,
 		if (prime == 1)
 			times[0] = times[1];
 
+		for (nc = 0; nc < nctx; nc++) {
+			i915_request_put(request[nc]);
+			request[nc] = NULL;
+		}
+
 		if (__igt_timeout(end_time, NULL))
 			break;
 	}
@@ -2500,9 +2765,9 @@ static int nop_virtual_engine(struct intel_gt *gt,
 		err = -EIO;
 
 	for (nc = 0; nc < nctx; nc++) {
+		i915_request_put(request[nc]);
 		intel_context_unpin(ve[nc]);
 		intel_context_put(ve[nc]);
-		kernel_context_close(ctx[nc]);
 	}
 	return err;
 }
@@ -2561,7 +2826,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
 			       unsigned int nsibling)
 {
 	struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
-	struct i915_gem_context *ctx;
 	struct intel_context *ve;
 	struct igt_live_test t;
 	unsigned int n;
@@ -2572,11 +2836,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
 	 * restrict it to our desired engine within the virtual engine.
 	 */
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		return -ENOMEM;
-
-	ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+	ve = intel_execlists_create_virtual(siblings, nsibling);
 	if (IS_ERR(ve)) {
 		err = PTR_ERR(ve);
 		goto out_close;
@@ -2644,7 +2904,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
 out_put:
 	intel_context_put(ve);
 out_close:
-	kernel_context_close(ctx);
 	return err;
 }
 
@@ -2684,7 +2943,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 				    unsigned int nsibling)
 {
 	struct i915_request *last = NULL;
-	struct i915_gem_context *ctx;
 	struct intel_context *ve;
 	struct i915_vma *scratch;
 	struct igt_live_test t;
@@ -2692,17 +2950,11 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 	int err = 0;
 	u32 *cs;
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		return -ENOMEM;
-
 	scratch = create_scratch(siblings[0]->gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
-	ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+	ve = intel_execlists_create_virtual(siblings, nsibling);
 	if (IS_ERR(ve)) {
 		err = PTR_ERR(ve);
 		goto out_scratch;
@@ -2785,8 +3037,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 	intel_context_put(ve);
 out_scratch:
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(ctx);
 	return err;
 }
 
@@ -2838,16 +3088,54 @@ static int bond_virtual_engine(struct intel_gt *gt,
 #define BOND_SCHEDULE BIT(0)
 {
 	struct intel_engine_cs *master;
-	struct i915_gem_context *ctx;
 	struct i915_request *rq[16];
 	enum intel_engine_id id;
+	struct igt_spinner spin;
 	unsigned long n;
 	int err;
 
+	/*
+	 * A set of bonded requests is intended to be run concurrently
+	 * across a number of engines. We use one request per-engine
+	 * and a magic fence to schedule each of the bonded requests
+	 * at the same time. A consequence of our current scheduler is that
+	 * we only move requests to the HW ready queue when the request
+	 * becomes ready, that is when all of its prerequisite fences have
+	 * been signaled. As one of those fences is the master submit fence,
+	 * there is a delay on all secondary fences as the HW may be
+	 * currently busy. Equally, as all the requests are independent,
+	 * they may have other fences that delay individual request
+	 * submission to HW. Ergo, we do not guarantee that all requests are
+	 * immediately submitted to HW at the same time, just that if the
+	 * rules are abided by, they are ready at the same time as the
+	 * first is submitted. Userspace can embed semaphores in its batch
+	 * to ensure parallel execution of its phases as it requires.
+	 * Though naturally it gets requested that perhaps the scheduler should
+	 * take care of parallel execution, even across preemption events on
+	 * different HW. (The proper answer is of course "lalalala".)
+	 *
+	 * With the submit-fence, we have identified three possible phases
+	 * of synchronisation depending on the master fence: queued (not
+	 * ready), executing, and signaled. The first two are quite simple
+	 * and checked below. However, the signaled master fence handling is
+	 * contentious. Currently we do not distinguish between a signaled
+	 * fence and an expired fence, as once signaled it does not convey
+	 * any information about the previous execution. It may even be freed
+	 * and hence checking later it may not exist at all. Ergo we currently
+	 * do not apply the bonding constraint for an already signaled fence,
+	 * as our expectation is that it should not constrain the secondaries
+	 * and is outside of the scope of the bonded request API (i.e. all
+	 * userspace requests are meant to be running in parallel). As
+	 * it imposes no constraint, and is effectively a no-op, we do not
+	 * check below as normal execution flows are checked extensively above.
+	 *
+	 * XXX Is the degenerate handling of signaled submit fences the
+	 * expected behaviour for userpace?
+	 */
+
 	GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
+	if (igt_spinner_init(&spin, gt))
 		return -ENOMEM;
 
 	err = 0;
@@ -2860,7 +3148,9 @@ static int bond_virtual_engine(struct intel_gt *gt,
 
 		memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
 
-		rq[0] = igt_request_alloc(ctx, master);
+		rq[0] = igt_spinner_create_request(&spin,
+						   master->kernel_context,
+						   MI_NOOP);
 		if (IS_ERR(rq[0])) {
 			err = PTR_ERR(rq[0]);
 			goto out;
@@ -2873,16 +3163,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
 							       &fence,
 							       GFP_KERNEL);
 		}
+
 		i915_request_add(rq[0]);
 		if (err < 0)
 			goto out;
 
+		if (!(flags & BOND_SCHEDULE) &&
+		    !igt_wait_for_spinner(&spin, rq[0])) {
+			err = -EIO;
+			goto out;
+		}
+
 		for (n = 0; n < nsibling; n++) {
 			struct intel_context *ve;
 
-			ve = intel_execlists_create_virtual(ctx,
-							    siblings,
-							    nsibling);
+			ve = intel_execlists_create_virtual(siblings, nsibling);
 			if (IS_ERR(ve)) {
 				err = PTR_ERR(ve);
 				onstack_fence_fini(&fence);
@@ -2924,6 +3219,8 @@ static int bond_virtual_engine(struct intel_gt *gt,
 			}
 		}
 		onstack_fence_fini(&fence);
+		intel_engine_flush_submission(master);
+		igt_spinner_end(&spin);
 
 		if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
 			pr_err("Master request did not execute (on %s)!\n",
@@ -2960,7 +3257,7 @@ static int bond_virtual_engine(struct intel_gt *gt,
 	if (igt_flush_test(gt->i915))
 		err = -EIO;
 
-	kernel_context_close(ctx);
+	igt_spinner_fini(&spin);
 	return err;
 }
 
@@ -3028,6 +3325,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_suppress_self_preempt),
 		SUBTEST(live_suppress_wait_preempt),
 		SUBTEST(live_chain_preempt),
+		SUBTEST(live_preempt_gang),
 		SUBTEST(live_preempt_hang),
 		SUBTEST(live_preempt_timeout),
 		SUBTEST(live_preempt_smoke),
@@ -3080,7 +3378,7 @@ static int live_lrc_layout(void *arg)
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-	u32 *mem;
+	u32 *lrc;
 	int err;
 
 	/*
@@ -3088,13 +3386,13 @@ static int live_lrc_layout(void *arg)
 	 * match the layout saved by HW.
 	 */
 
-	mem = kmalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!mem)
+	lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!lrc)
 		return -ENOMEM;
 
 	err = 0;
 	for_each_engine(engine, gt, id) {
-		u32 *hw, *lrc;
+		u32 *hw;
 		int dw;
 
 		if (!engine->default_state)
@@ -3108,8 +3406,7 @@ static int live_lrc_layout(void *arg)
 		}
 		hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
 
-		lrc = memset(mem, 0, PAGE_SIZE);
-		execlists_init_reg_state(lrc,
+		execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
 					 engine->kernel_context,
 					 engine,
 					 engine->kernel_context->ring,
@@ -3124,6 +3421,13 @@ static int live_lrc_layout(void *arg)
 				continue;
 			}
 
+			if (lrc[dw] == 0) {
+				pr_debug("%s: skipped instruction %x at dword %d\n",
+					 engine->name, lri, dw);
+				dw++;
+				continue;
+			}
+
 			if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
 				pr_err("%s: Expected LRI command at dword %d, found %08x\n",
 				       engine->name, dw, lri);
@@ -3172,7 +3476,7 @@ static int live_lrc_layout(void *arg)
 			break;
 	}
 
-	kfree(mem);
+	kfree(lrc);
 	return err;
 }
 
@@ -3207,12 +3511,12 @@ static int live_lrc_fixed(void *arg)
 		} tbl[] = {
 			{
 				i915_mmio_reg_offset(RING_START(engine->mmio_base)),
-				CTX_RING_BUFFER_START - 1,
+				CTX_RING_START - 1,
 				"RING_START"
 			},
 			{
 				i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
-				CTX_RING_BUFFER_CONTROL - 1,
+				CTX_RING_CTL - 1,
 				"RING_CTL"
 			},
 			{
@@ -3231,7 +3535,7 @@ static int live_lrc_fixed(void *arg)
 				"RING_MI_MODE"
 			},
 			{
-				engine->mmio_base + 0x110,
+				i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
 				CTX_BB_STATE - 1,
 				"BB_STATE"
 			},
@@ -3270,8 +3574,7 @@ static int live_lrc_fixed(void *arg)
 	return err;
 }
 
-static int __live_lrc_state(struct i915_gem_context *fixme,
-			    struct intel_engine_cs *engine,
+static int __live_lrc_state(struct intel_engine_cs *engine,
 			    struct i915_vma *scratch)
 {
 	struct intel_context *ce;
@@ -3286,7 +3589,7 @@ static int __live_lrc_state(struct i915_gem_context *fixme,
 	int err;
 	int n;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -3360,7 +3663,6 @@ static int live_lrc_state(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	struct i915_vma *scratch;
 	enum intel_engine_id id;
 	int err = 0;
@@ -3370,18 +3672,12 @@ static int live_lrc_state(void *arg)
 	 * intel_context.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (!fixme)
-		return -ENOMEM;
-
 	scratch = create_scratch(gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
 	for_each_engine(engine, gt, id) {
-		err = __live_lrc_state(fixme, engine, scratch);
+		err = __live_lrc_state(engine, scratch);
 		if (err)
 			break;
 	}
@@ -3390,8 +3686,6 @@ static int live_lrc_state(void *arg)
 		err = -EIO;
 
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(fixme);
 	return err;
 }
 
@@ -3401,7 +3695,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
 	u32 *cs;
 	int n;
 
-	rq = i915_request_create(engine->kernel_context);
+	rq = intel_engine_create_kernel_request(engine);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -3424,8 +3718,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static int __live_gpr_clear(struct i915_gem_context *fixme,
-			    struct intel_engine_cs *engine,
+static int __live_gpr_clear(struct intel_engine_cs *engine,
 			    struct i915_vma *scratch)
 {
 	struct intel_context *ce;
@@ -3441,7 +3734,7 @@ static int __live_gpr_clear(struct i915_gem_context *fixme,
 	if (err)
 		return err;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -3503,7 +3796,6 @@ static int live_gpr_clear(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	struct i915_vma *scratch;
 	enum intel_engine_id id;
 	int err = 0;
@@ -3513,18 +3805,12 @@ static int live_gpr_clear(void *arg)
 	 * to avoid leaking any information from previous contexts.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (!fixme)
-		return -ENOMEM;
-
 	scratch = create_scratch(gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
 	for_each_engine(engine, gt, id) {
-		err = __live_gpr_clear(fixme, engine, scratch);
+		err = __live_gpr_clear(engine, scratch);
 		if (err)
 			break;
 	}
@@ -3533,8 +3819,6 @@ static int live_gpr_clear(void *arg)
 		err = -EIO;
 
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(fixme);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 0000000..de1f831
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,419 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+	struct drm_i915_mocs_table table;
+	struct i915_vma *scratch;
+	void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (i915_request_wait(rq, 0, HZ / 5) < 0)
+		err = -ETIME;
+	i915_request_put(rq);
+
+	return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+	int err = 0;
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (spin && !igt_wait_for_spinner(spin, rq))
+		err = -ETIME;
+	i915_request_put(rq);
+
+	return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int err;
+
+	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		i915_gem_object_put(obj);
+		return vma;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+	if (err) {
+		i915_gem_object_put(obj);
+		return ERR_PTR(err);
+	}
+
+	return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+	int err;
+
+	if (!get_mocs_settings(gt->i915, &arg->table))
+		return -EINVAL;
+
+	arg->scratch = create_scratch(gt);
+	if (IS_ERR(arg->scratch))
+		return PTR_ERR(arg->scratch);
+
+	arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+	if (IS_ERR(arg->vaddr)) {
+		err = PTR_ERR(arg->vaddr);
+		goto err_scratch;
+	}
+
+	return 0;
+
+err_scratch:
+	i915_vma_unpin_and_release(&arg->scratch, 0);
+	return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+	i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+}
+
+static int read_regs(struct i915_request *rq,
+		     u32 addr, unsigned int count,
+		     uint32_t *offset)
+{
+	unsigned int i;
+	u32 *cs;
+
+	GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
+
+	cs = intel_ring_begin(rq, 4 * count);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	for (i = 0; i < count; i++) {
+		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+		*cs++ = addr;
+		*cs++ = *offset;
+		*cs++ = 0;
+
+		addr += sizeof(u32);
+		*offset += sizeof(u32);
+	}
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static int read_mocs_table(struct i915_request *rq,
+			   const struct drm_i915_mocs_table *table,
+			   uint32_t *offset)
+{
+	u32 addr;
+
+	if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
+		addr = global_mocs_offset();
+	else
+		addr = mocs_offset(rq->engine);
+
+	return read_regs(rq, addr, table->n_entries, offset);
+}
+
+static int read_l3cc_table(struct i915_request *rq,
+			   const struct drm_i915_mocs_table *table,
+			   uint32_t *offset)
+{
+	u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+
+	return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+			    const struct drm_i915_mocs_table *table,
+			    uint32_t **vaddr)
+{
+	unsigned int i;
+	u32 expect;
+
+	for_each_mocs(expect, table, i) {
+		if (**vaddr != expect) {
+			pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+			       engine->name, i, **vaddr, expect);
+			return -EINVAL;
+		}
+		++*vaddr;
+	}
+
+	return 0;
+}
+
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+	/*
+	 * Registers in this range are affected by the MCR selector
+	 * which only controls CPU initiated MMIO. Routing does not
+	 * work for CS access so we cannot verify them on this path.
+	 */
+	return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
+}
+
+static int check_l3cc_table(struct intel_engine_cs *engine,
+			    const struct drm_i915_mocs_table *table,
+			    uint32_t **vaddr)
+{
+	/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
+	u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+	unsigned int i;
+	u32 expect;
+
+	for_each_l3cc(expect, table, i) {
+		if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
+			pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+			       engine->name, i, **vaddr, expect);
+			return -EINVAL;
+		}
+		++*vaddr;
+		reg += 4;
+	}
+
+	return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+			     struct intel_context *ce)
+{
+	struct i915_vma *vma = arg->scratch;
+	struct i915_request *rq;
+	u32 offset;
+	u32 *vaddr;
+	int err;
+
+	memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	i915_vma_lock(vma);
+	err = i915_request_await_object(rq, vma->obj, true);
+	if (!err)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	i915_vma_unlock(vma);
+
+	/* Read the mocs tables back using SRM */
+	offset = i915_ggtt_offset(vma);
+	if (!err)
+		err = read_mocs_table(rq, &arg->table, &offset);
+	if (!err && ce->engine->class == RENDER_CLASS)
+		err = read_l3cc_table(rq, &arg->table, &offset);
+	offset -= i915_ggtt_offset(vma);
+	GEM_BUG_ON(offset > PAGE_SIZE);
+
+	err = request_add_sync(rq, err);
+	if (err)
+		return err;
+
+	/* Compare the results against the expected tables */
+	vaddr = arg->vaddr;
+	if (!err)
+		err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+	if (!err && ce->engine->class == RENDER_CLASS)
+		err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+	if (err)
+		return err;
+
+	GEM_BUG_ON(arg->vaddr + offset != vaddr);
+	return 0;
+}
+
+static int live_mocs_kernel(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct live_mocs mocs;
+	int err;
+
+	/* Basic check the system is configured with the expected mocs table */
+
+	err = live_mocs_init(&mocs, gt);
+	if (err)
+		return err;
+
+	for_each_engine(engine, gt, id) {
+		intel_engine_pm_get(engine);
+		err = check_mocs_engine(&mocs, engine->kernel_context);
+		intel_engine_pm_put(engine);
+		if (err)
+			break;
+	}
+
+	live_mocs_fini(&mocs);
+	return err;
+}
+
+static int live_mocs_clean(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct live_mocs mocs;
+	int err;
+
+	/* Every new context should see the same mocs table */
+
+	err = live_mocs_init(&mocs, gt);
+	if (err)
+		return err;
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
+		err = check_mocs_engine(&mocs, ce);
+		intel_context_put(ce);
+		if (err)
+			break;
+	}
+
+	live_mocs_fini(&mocs);
+	return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+			       const char *reason)
+{
+	struct igt_spinner spin;
+	struct i915_request *rq;
+	int err;
+
+	err = igt_spinner_init(&spin, ce->engine->gt);
+	if (err)
+		return err;
+
+	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+	if (IS_ERR(rq)) {
+		igt_spinner_fini(&spin);
+		return PTR_ERR(rq);
+	}
+
+	err = request_add_spin(rq, &spin);
+	if (err == 0)
+		err = intel_engine_reset(ce->engine, reason);
+
+	igt_spinner_end(&spin);
+	igt_spinner_fini(&spin);
+
+	return err;
+}
+
+static int __live_mocs_reset(struct live_mocs *mocs,
+			     struct intel_context *ce)
+{
+	int err;
+
+	err = intel_engine_reset(ce->engine, "mocs");
+	if (err)
+		return err;
+
+	err = check_mocs_engine(mocs, ce);
+	if (err)
+		return err;
+
+	err = active_engine_reset(ce, "mocs");
+	if (err)
+		return err;
+
+	err = check_mocs_engine(mocs, ce);
+	if (err)
+		return err;
+
+	intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+
+	err = check_mocs_engine(mocs, ce);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int live_mocs_reset(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct live_mocs mocs;
+	int err = 0;
+
+	/* Check the mocs setup is retained over per-engine and global resets */
+
+	if (!intel_has_reset_engine(gt))
+		return 0;
+
+	err = live_mocs_init(&mocs, gt);
+	if (err)
+		return err;
+
+	igt_global_reset_lock(gt);
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
+		intel_engine_pm_get(engine);
+		err = __live_mocs_reset(&mocs, ce);
+		intel_engine_pm_put(engine);
+
+		intel_context_put(ce);
+		if (err)
+			break;
+	}
+	igt_global_reset_unlock(gt);
+
+	live_mocs_fini(&mocs);
+	return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_mocs_kernel),
+		SUBTEST(live_mocs_clean),
+		SUBTEST(live_mocs_reset),
+	};
+	struct drm_i915_mocs_table table;
+
+	if (!get_mocs_settings(i915, &table))
+		return 0;
+
+	return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
new file mode 100644
index 0000000..8cc55a0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -0,0 +1,203 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
+#include "selftest_rc6.h"
+
+#include "selftests/i915_random.h"
+
+int live_rc6_manual(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_rc6 *rc6 = &gt->rc6;
+	intel_wakeref_t wakeref;
+	u64 res[2];
+	int err = 0;
+
+	/*
+	 * Our claim is that we can "encourage" the GPU to enter rc6 at will.
+	 * Let's try it!
+	 */
+
+	if (!rc6->enabled)
+		return 0;
+
+	/* bsw/byt use a PCU and decouple RC6 from our manual control */
+	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+		return 0;
+
+	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+	/* Force RC6 off for starters */
+	__intel_rc6_disable(rc6);
+	msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+
+	res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+	msleep(250);
+	res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+	if ((res[1] - res[0]) >> 10) {
+		pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
+		       (res[1] - res[0]) >> 10);
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	/* Manually enter RC6 */
+	intel_rc6_park(rc6);
+
+	res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+	msleep(100);
+	res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+
+	if (res[1] == res[0]) {
+		pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
+		       intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
+		       intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
+		err = -EINVAL;
+	}
+
+	/* Restore what should have been the original state! */
+	intel_rc6_unpark(rc6);
+
+out_unlock:
+	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+	return err;
+}
+
+static const u32 *__live_rc6_ctx(struct intel_context *ce)
+{
+	struct i915_request *rq;
+	const u32 *result;
+	u32 cmd;
+	u32 *cs;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return ERR_CAST(rq);
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return cs;
+	}
+
+	cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+	if (INTEL_GEN(rq->i915) >= 8)
+		cmd++;
+
+	*cs++ = cmd;
+	*cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO);
+	*cs++ = ce->timeline->hwsp_offset + 8;
+	*cs++ = 0;
+	intel_ring_advance(rq, cs);
+
+	result = rq->hwsp_seqno + 2;
+	i915_request_add(rq);
+
+	return result;
+}
+
+static struct intel_engine_cs **
+randomised_engines(struct intel_gt *gt,
+		   struct rnd_state *prng,
+		   unsigned int *count)
+{
+	struct intel_engine_cs *engine, **engines;
+	enum intel_engine_id id;
+	int n;
+
+	n = 0;
+	for_each_engine(engine, gt, id)
+		n++;
+	if (!n)
+		return NULL;
+
+	engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL);
+	if (!engines)
+		return NULL;
+
+	n = 0;
+	for_each_engine(engine, gt, id)
+		engines[n++] = engine;
+
+	i915_prandom_shuffle(engines, sizeof(*engines), n, prng);
+
+	*count = n;
+	return engines;
+}
+
+int live_rc6_ctx_wa(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs **engines;
+	unsigned int n, count;
+	I915_RND_STATE(prng);
+	int err = 0;
+
+	/* A read of CTX_INFO upsets rc6. Poke the bear! */
+	if (INTEL_GEN(gt->i915) < 8)
+		return 0;
+
+	engines = randomised_engines(gt, &prng, &count);
+	if (!engines)
+		return 0;
+
+	for (n = 0; n < count; n++) {
+		struct intel_engine_cs *engine = engines[n];
+		int pass;
+
+		for (pass = 0; pass < 2; pass++) {
+			struct intel_context *ce;
+			unsigned int resets =
+				i915_reset_engine_count(&gt->i915->gpu_error,
+							engine);
+			const u32 *res;
+
+			/* Use a sacrifical context */
+			ce = intel_context_create(engine);
+			if (IS_ERR(ce)) {
+				err = PTR_ERR(ce);
+				goto out;
+			}
+
+			intel_engine_pm_get(engine);
+			res = __live_rc6_ctx(ce);
+			intel_engine_pm_put(engine);
+			intel_context_put(ce);
+			if (IS_ERR(res)) {
+				err = PTR_ERR(res);
+				goto out;
+			}
+
+			if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+				intel_gt_set_wedged(gt);
+				err = -ETIME;
+				goto out;
+			}
+
+			intel_gt_pm_wait_for_idle(gt);
+			pr_debug("%s: CTX_INFO=%0x\n",
+				 engine->name, READ_ONCE(*res));
+
+			if (resets !=
+			    i915_reset_engine_count(&gt->i915->gpu_error,
+						    engine)) {
+				pr_err("%s: GPU reset required\n",
+				       engine->name);
+				add_taint_for_CI(TAINT_WARN);
+				err = -EIO;
+				goto out;
+			}
+		}
+	}
+
+out:
+	kfree(engines);
+	return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.h b/drivers/gpu/drm/i915/gt/selftest_rc6.h
new file mode 100644
index 0000000..762fd44
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_RC6_H
+#define SELFTEST_RC6_H
+
+int live_rc6_ctx_wa(void *arg);
+int live_rc6_manual(void *arg);
+
+#endif /* SELFTEST_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index f04a59f..e2d78cc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -458,7 +458,7 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
 		goto out;
 	}
 
-	rq = i915_request_create(engine->kernel_context);
+	rq = intel_engine_create_kernel_request(engine);
 	if (IS_ERR(rq))
 		goto out_unpin;
 
@@ -675,9 +675,7 @@ static int live_hwsp_wrap(void *arg)
 		if (!intel_engine_can_store_dword(engine))
 			continue;
 
-		intel_engine_pm_get(engine);
-		rq = i915_request_create(engine->kernel_context);
-		intel_engine_pm_put(engine);
+		rq = intel_engine_create_kernel_request(engine);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index abce6e4..ac19218 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -264,22 +264,15 @@ static int
 switch_to_scratch_context(struct intel_engine_cs *engine,
 			  struct igt_spinner *spin)
 {
-	struct i915_gem_context *ctx;
 	struct intel_context *ce;
 	struct i915_request *rq;
 	int err = 0;
 
-	ctx = kernel_context(engine->i915);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
-
-	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
-
-	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
-	GEM_BUG_ON(IS_ERR(ce));
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
 
 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
-
 	intel_context_put(ce);
 
 	if (IS_ERR(rq)) {
@@ -293,7 +286,6 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
 	if (err && spin)
 		igt_spinner_end(spin);
 
-	kernel_context_close(ctx);
 	return err;
 }
 
@@ -367,20 +359,17 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 	return err;
 }
 
-static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+static struct i915_vma *create_batch(struct i915_address_space *vm)
 {
 	struct drm_i915_gem_object *obj;
-	struct i915_address_space *vm;
 	struct i915_vma *vma;
 	int err;
 
-	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+	obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
-	vm = i915_gem_context_get_vm_rcu(ctx);
 	vma = i915_vma_instance(obj, vm, NULL);
-	i915_vm_put(vm);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err_obj;
@@ -452,8 +441,7 @@ static int whitelist_writable_count(struct intel_engine_cs *engine)
 	return count;
 }
 
-static int check_dirty_whitelist(struct i915_gem_context *ctx,
-				 struct intel_engine_cs *engine)
+static int check_dirty_whitelist(struct intel_context *ce)
 {
 	const u32 values[] = {
 		0x00000000,
@@ -481,19 +469,17 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
 		0xffff00ff,
 		0xffffffff,
 	};
-	struct i915_address_space *vm;
+	struct intel_engine_cs *engine = ce->engine;
 	struct i915_vma *scratch;
 	struct i915_vma *batch;
 	int err = 0, i, v;
 	u32 *cs, *results;
 
-	vm = i915_gem_context_get_vm_rcu(ctx);
-	scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
-	i915_vm_put(vm);
+	scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
 	if (IS_ERR(scratch))
 		return PTR_ERR(scratch);
 
-	batch = create_batch(ctx);
+	batch = create_batch(ce->vm);
 	if (IS_ERR(batch)) {
 		err = PTR_ERR(batch);
 		goto out_scratch;
@@ -518,7 +504,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
 
 		srm = MI_STORE_REGISTER_MEM;
 		lrm = MI_LOAD_REGISTER_MEM;
-		if (INTEL_GEN(ctx->i915) >= 8)
+		if (INTEL_GEN(engine->i915) >= 8)
 			lrm++, srm++;
 
 		pr_debug("%s: Writing garbage to %x\n",
@@ -577,7 +563,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
 		i915_gem_object_unpin_map(batch->obj);
 		intel_gt_chipset_flush(engine->gt);
 
-		rq = igt_request_alloc(ctx, engine);
+		rq = intel_context_create_request(ce);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto out_batch;
@@ -696,7 +682,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
 			break;
 	}
 
-	if (igt_flush_test(ctx->i915))
+	if (igt_flush_test(engine->i915))
 		err = -EIO;
 out_batch:
 	i915_vma_unpin_and_release(&batch, 0);
@@ -709,38 +695,31 @@ static int live_dirty_whitelist(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
-	struct drm_file *file;
-	int err = 0;
 
 	/* Can the user write to the whitelisted registers? */
 
 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
 		return 0;
 
-	file = mock_file(gt->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	ctx = live_context(gt->i915, file);
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
-		goto out_file;
-	}
-
 	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		int err;
+
 		if (engine->whitelist.count == 0)
 			continue;
 
-		err = check_dirty_whitelist(ctx, engine);
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce))
+			return PTR_ERR(ce);
+
+		err = check_dirty_whitelist(ce);
+		intel_context_put(ce);
 		if (err)
-			goto out_file;
+			return err;
 	}
 
-out_file:
-	mock_file_free(gt->i915, file);
-	return err;
+	return 0;
 }
 
 static int live_reset_whitelist(void *arg)
@@ -830,12 +809,15 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
 				       struct intel_engine_cs *engine)
 {
+	struct i915_address_space *vm;
 	struct i915_request *rq;
 	struct i915_vma *batch;
 	int i, err = 0;
 	u32 *cs;
 
-	batch = create_batch(ctx);
+	vm = i915_gem_context_get_vm_rcu(ctx);
+	batch = create_batch(vm);
+	i915_vm_put(vm);
 	if (IS_ERR(batch))
 		return PTR_ERR(batch);
 
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 2a77c05..aeb1d1f 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
 
 	mutex_init(&timeline->mutex);
 
-	INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
+	INIT_ACTIVE_FENCE(&timeline->last_request);
 	INIT_LIST_HEAD(&timeline->requests);
 
 	i915_syncmap_init(&timeline->sync);
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
index 689efc6..d2bcc3d 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
@@ -7,6 +7,8 @@
 #ifndef __MOCK_TIMELINE__
 #define __MOCK_TIMELINE__
 
+#include <linux/types.h>
+
 struct intel_timeline;
 
 void mock_timeline_init(struct intel_timeline *timeline, u64 context);
diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile
deleted file mode 100644
index bec94d434..0000000
--- a/drivers/gpu/drm/i915/gt/uc/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/../..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 3ee4a4e..5d00a3b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -32,18 +32,17 @@
  * just the HuC, but more are expected to land in the future).
  */
 
-static void gen8_guc_raise_irq(struct intel_guc *guc)
+void intel_guc_notify(struct intel_guc *guc)
 {
 	struct intel_gt *gt = guc_to_gt(guc);
 
-	intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
-}
-
-static void gen11_guc_raise_irq(struct intel_guc *guc)
-{
-	struct intel_gt *gt = guc_to_gt(guc);
-
-	intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
+	/*
+	 * On Gen11+, the value written to the register is passes as a payload
+	 * to the FW. However, the FW currently treats all values the same way
+	 * (H2G interrupt), so we can just write the value that the HW expects
+	 * on older gens.
+	 */
+	intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
 }
 
 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
@@ -177,15 +176,13 @@ void intel_guc_init_early(struct intel_guc *guc)
 
 	mutex_init(&guc->send_mutex);
 	spin_lock_init(&guc->irq_lock);
-	guc->send = intel_guc_send_nop;
-	guc->handler = intel_guc_to_host_event_handler_nop;
 	if (INTEL_GEN(i915) >= 11) {
-		guc->notify = gen11_guc_raise_irq;
+		guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
 		guc->interrupts.reset = gen11_reset_guc_interrupts;
 		guc->interrupts.enable = gen11_enable_guc_interrupts;
 		guc->interrupts.disable = gen11_disable_guc_interrupts;
 	} else {
-		guc->notify = gen8_guc_raise_irq;
+		guc->notify_reg = GUC_SEND_INTERRUPT;
 		guc->interrupts.reset = gen9_reset_guc_interrupts;
 		guc->interrupts.enable = gen9_enable_guc_interrupts;
 		guc->interrupts.disable = gen9_disable_guc_interrupts;
@@ -401,18 +398,8 @@ void intel_guc_fini(struct intel_guc *guc)
 	intel_guc_log_destroy(&guc->log);
 	intel_uc_fw_fini(&guc->fw);
 	intel_uc_fw_cleanup_fetch(&guc->fw);
-}
 
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
-		       u32 *response_buf, u32 response_buf_size)
-{
-	WARN(1, "Unexpected send: action=%#x\n", *action);
-	return -ENODEV;
-}
-
-void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
-{
-	WARN(1, "Unexpected event: no suitable handler\n");
+	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
 }
 
 /*
@@ -704,3 +691,37 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
 	i915_gem_object_put(obj);
 	return vma;
 }
+
+/**
+ * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
+ * @guc:	the guc
+ * @size:	size of area to allocate (both virtual space and memory)
+ * @out_vma:	return variable for the allocated vma pointer
+ * @out_vaddr:	return variable for the obj mapping
+ *
+ * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
+ * object with I915_MAP_WB.
+ *
+ * Return:	0 if successful, a negative errno code otherwise.
+ */
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+				   struct i915_vma **out_vma, void **out_vaddr)
+{
+	struct i915_vma *vma;
+	void *vaddr;
+
+	vma = intel_guc_allocate_vma(guc, size);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		i915_vma_unpin_and_release(&vma, 0);
+		return PTR_ERR(vaddr);
+	}
+
+	*out_vma = vma;
+	*out_vaddr = vaddr;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e640020..910d495 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -20,8 +20,8 @@ struct __guc_ads_blob;
 
 /*
  * Top level structure of GuC. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
- * ExecList submission.
+ * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
+ * submission.
  */
 struct intel_guc {
 	struct intel_uc_fw fw;
@@ -46,13 +46,13 @@ struct intel_guc {
 
 	struct i915_vma *stage_desc_pool;
 	void *stage_desc_pool_vaddr;
-	struct ida stage_ids;
 
-	struct intel_guc_client *execbuf_client;
+	struct i915_vma *workqueue;
+	void *workqueue_vaddr;
+	spinlock_t wq_lock;
 
-	DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
-	/* Cyclic counter mod pagesize	*/
-	u32 db_cacheline;
+	struct i915_vma *proc_desc;
+	void *proc_desc_vaddr;
 
 	/* Control params for fw initialization */
 	u32 params[GUC_CTL_MAX_DWORDS];
@@ -64,44 +64,33 @@ struct intel_guc {
 		enum forcewake_domains fw_domains;
 	} send_regs;
 
+	/* register used to send interrupts to the GuC FW */
+	i915_reg_t notify_reg;
+
 	/* Store msg (e.g. log flush) that we see while CTBs are disabled */
 	u32 mmio_msg;
 
 	/* To serialize the intel_guc_send actions */
 	struct mutex send_mutex;
-
-	/* GuC's FW specific send function */
-	int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
-		    u32 *response_buf, u32 response_buf_size);
-
-	/* GuC's FW specific event handler function */
-	void (*handler)(struct intel_guc *guc);
-
-	/* GuC's FW specific notify function */
-	void (*notify)(struct intel_guc *guc);
 };
 
 static
 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 {
-	return guc->send(guc, action, len, NULL, 0);
+	return intel_guc_ct_send(&guc->ct, action, len, NULL, 0);
 }
 
 static inline int
 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
 			   u32 *response_buf, u32 response_buf_size)
 {
-	return guc->send(guc, action, len, response_buf, response_buf_size);
-}
-
-static inline void intel_guc_notify(struct intel_guc *guc)
-{
-	guc->notify(guc);
+	return intel_guc_ct_send(&guc->ct, action, len,
+				 response_buf, response_buf_size);
 }
 
 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
 {
-	guc->handler(guc);
+	intel_guc_ct_event_handler(&guc->ct);
 }
 
 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
@@ -136,12 +125,9 @@ void intel_guc_init_send_regs(struct intel_guc *guc);
 void intel_guc_write_params(struct intel_guc *guc);
 int intel_guc_init(struct intel_guc *guc);
 void intel_guc_fini(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
-		       u32 *response_buf, u32 response_buf_size);
+void intel_guc_notify(struct intel_guc *guc);
 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 			u32 *response_buf, u32 response_buf_size);
-void intel_guc_to_host_event_handler(struct intel_guc *guc);
-void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
 				       const u32 *payload, u32 len);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
@@ -149,6 +135,8 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct intel_guc *guc);
 int intel_guc_resume(struct intel_guc *guc);
 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+				   struct i915_vma **out_vma, void **out_vaddr);
 
 static inline bool intel_guc_is_supported(struct intel_guc *guc)
 {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ca6674b..1017280 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -93,7 +93,8 @@ static void __guc_ads_init(struct intel_guc *guc)
 		 */
 		blob->ads.golden_context_lrca[engine_class] = 0;
 		blob->ads.eng_state_size[engine_class] =
-			intel_engine_context_size(dev_priv, engine_class) -
+			intel_engine_context_size(guc_to_gt(guc),
+						  engine_class) -
 			skipped_size;
 	}
 
@@ -135,32 +136,19 @@ static void __guc_ads_init(struct intel_guc *guc)
 int intel_guc_ads_create(struct intel_guc *guc)
 {
 	const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
-	struct i915_vma *vma;
-	void *blob;
 	int ret;
 
 	GEM_BUG_ON(guc->ads_vma);
 
-	vma = intel_guc_allocate_vma(guc, size);
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
+	ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
+					     (void **)&guc->ads_blob);
 
-	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
-	if (IS_ERR(blob)) {
-		ret = PTR_ERR(blob);
-		goto err_vma;
-	}
-
-	guc->ads_vma = vma;
-	guc->ads_blob = blob;
+	if (ret)
+		return ret;
 
 	__guc_ads_init(guc);
 
 	return 0;
-
-err_vma:
-	i915_vma_unpin_and_release(&guc->ads_vma, 0);
-	return ret;
 }
 
 void intel_guc_ads_destroy(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index b491155..c6f971a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -37,13 +37,10 @@ static void ct_incoming_request_worker_func(struct work_struct *w);
  */
 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
 {
-	/* we're using static channel owners */
-	ct->host_channel.owner = CTB_OWNER_HOST;
-
-	spin_lock_init(&ct->lock);
-	INIT_LIST_HEAD(&ct->pending_requests);
-	INIT_LIST_HEAD(&ct->incoming_requests);
-	INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
+	spin_lock_init(&ct->requests.lock);
+	INIT_LIST_HEAD(&ct->requests.pending);
+	INIT_LIST_HEAD(&ct->requests.incoming);
+	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
 }
 
 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
@@ -64,14 +61,13 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
 }
 
 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
-				    u32 cmds_addr, u32 size, u32 owner)
+				    u32 cmds_addr, u32 size)
 {
-	CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
-			desc, cmds_addr, size, owner);
+	CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
 	memset(desc, 0, sizeof(*desc));
 	desc->addr = cmds_addr;
 	desc->size = size;
-	desc->owner = owner;
+	desc->owner = CTB_OWNER_HOST;
 }
 
 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
@@ -104,12 +100,11 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
 }
 
 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
-					   u32 owner,
 					   u32 type)
 {
 	u32 action[] = {
 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
-		owner,
+		CTB_OWNER_HOST,
 		type
 	};
 	int err;
@@ -117,20 +112,27 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
 	/* Can't use generic send(), CT deregistration must go over MMIO */
 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
 	if (err)
-		DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
-			  guc_ct_buffer_type_to_str(type), owner, err);
+		DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
+			  guc_ct_buffer_type_to_str(type), err);
 	return err;
 }
 
-static int ctch_init(struct intel_guc *guc,
-		     struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_init - Init buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for buffer-based communication.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
 {
-	struct i915_vma *vma;
+	struct intel_guc *guc = ct_to_guc(ct);
 	void *blob;
 	int err;
 	int i;
 
-	GEM_BUG_ON(ctch->vma);
+	GEM_BUG_ON(ct->vma);
 
 	/* We allocate 1 page to hold both descriptors and both buffers.
 	 *       ___________.....................
@@ -154,71 +156,65 @@ static int ctch_init(struct intel_guc *guc,
 	 * other code will need updating as well.
 	 */
 
-	/* allocate vma */
-	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err_out;
+	err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
+	if (err) {
+		DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
+		return err;
 	}
-	ctch->vma = vma;
 
-	/* map first page */
-	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
-	if (IS_ERR(blob)) {
-		err = PTR_ERR(blob);
-		goto err_vma;
-	}
 	CT_DEBUG_DRIVER("CT: vma base=%#x\n",
-			intel_guc_ggtt_offset(guc, ctch->vma));
+			intel_guc_ggtt_offset(guc, ct->vma));
 
 	/* store pointers to desc and cmds */
-	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
-		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
-		ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
-		ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
+	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
+		GEM_BUG_ON((i !=  CTB_SEND) && (i != CTB_RECV));
+		ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
+		ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
 	}
 
 	return 0;
-
-err_vma:
-	i915_vma_unpin_and_release(&ctch->vma, 0);
-err_out:
-	CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
-			ctch->owner, err);
-	return err;
 }
 
-static void ctch_fini(struct intel_guc *guc,
-		      struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_fini - Fini buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for buffer-based communication.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
 {
-	GEM_BUG_ON(ctch->enabled);
+	GEM_BUG_ON(ct->enabled);
 
-	i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
+	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
 }
 
-static int ctch_enable(struct intel_guc *guc,
-		       struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_enable - Enable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_enable(struct intel_guc_ct *ct)
 {
+	struct intel_guc *guc = ct_to_guc(ct);
 	u32 base;
 	int err;
 	int i;
 
-	GEM_BUG_ON(!ctch->vma);
-
-	GEM_BUG_ON(ctch->enabled);
+	GEM_BUG_ON(ct->enabled);
 
 	/* vma should be already allocated and map'ed */
-	base = intel_guc_ggtt_offset(guc, ctch->vma);
+	GEM_BUG_ON(!ct->vma);
+	base = intel_guc_ggtt_offset(guc, ct->vma);
 
 	/* (re)initialize descriptors
 	 * cmds buffers are in the second half of the blob page
 	 */
-	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
+	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
-		guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
+		guc_ct_buffer_desc_init(ct->ctbs[i].desc,
 					base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
-					PAGE_SIZE/4,
-					ctch->owner);
+					PAGE_SIZE/4);
 	}
 
 	/* register buffers, starting wirh RECV buffer
@@ -236,38 +232,42 @@ static int ctch_enable(struct intel_guc *guc,
 	if (unlikely(err))
 		goto err_deregister;
 
-	ctch->enabled = true;
+	ct->enabled = true;
 
 	return 0;
 
 err_deregister:
 	guc_action_deregister_ct_buffer(guc,
-					ctch->owner,
 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
 err_out:
-	DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
+	DRM_ERROR("CT: can't open channel; err=%d\n", err);
 	return err;
 }
 
-static void ctch_disable(struct intel_guc *guc,
-			 struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_disable - Disable buffer based command transport.
+ * @ct: pointer to CT struct
+ */
+void intel_guc_ct_disable(struct intel_guc_ct *ct)
 {
-	GEM_BUG_ON(!ctch->enabled);
+	struct intel_guc *guc = ct_to_guc(ct);
 
-	ctch->enabled = false;
+	GEM_BUG_ON(!ct->enabled);
 
-	guc_action_deregister_ct_buffer(guc,
-					ctch->owner,
-					INTEL_GUC_CT_BUFFER_TYPE_SEND);
-	guc_action_deregister_ct_buffer(guc,
-					ctch->owner,
-					INTEL_GUC_CT_BUFFER_TYPE_RECV);
+	ct->enabled = false;
+
+	if (intel_guc_is_running(guc)) {
+		guc_action_deregister_ct_buffer(guc,
+						INTEL_GUC_CT_BUFFER_TYPE_SEND);
+		guc_action_deregister_ct_buffer(guc,
+						INTEL_GUC_CT_BUFFER_TYPE_RECV);
+	}
 }
 
-static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
+static u32 ct_get_next_fence(struct intel_guc_ct *ct)
 {
 	/* For now it's trivial */
-	return ++ctch->next_fence;
+	return ++ct->requests.next_fence;
 }
 
 /**
@@ -440,35 +440,34 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
 	return err;
 }
 
-static int ctch_send(struct intel_guc_ct *ct,
-		     struct intel_guc_ct_channel *ctch,
-		     const u32 *action,
-		     u32 len,
-		     u32 *response_buf,
-		     u32 response_buf_size,
-		     u32 *status)
+static int ct_send(struct intel_guc_ct *ct,
+		   const u32 *action,
+		   u32 len,
+		   u32 *response_buf,
+		   u32 response_buf_size,
+		   u32 *status)
 {
-	struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
+	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
 	struct guc_ct_buffer_desc *desc = ctb->desc;
 	struct ct_request request;
 	unsigned long flags;
 	u32 fence;
 	int err;
 
-	GEM_BUG_ON(!ctch->enabled);
+	GEM_BUG_ON(!ct->enabled);
 	GEM_BUG_ON(!len);
 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
 	GEM_BUG_ON(!response_buf && response_buf_size);
 
-	fence = ctch_get_next_fence(ctch);
+	fence = ct_get_next_fence(ct);
 	request.fence = fence;
 	request.status = 0;
 	request.response_len = response_buf_size;
 	request.response_buf = response_buf;
 
-	spin_lock_irqsave(&ct->lock, flags);
-	list_add_tail(&request.link, &ct->pending_requests);
-	spin_unlock_irqrestore(&ct->lock, flags);
+	spin_lock_irqsave(&ct->requests.lock, flags);
+	list_add_tail(&request.link, &ct->requests.pending);
+	spin_unlock_irqrestore(&ct->requests.lock, flags);
 
 	err = ctb_write(ctb, action, len, fence, !!response_buf);
 	if (unlikely(err))
@@ -501,9 +500,9 @@ static int ctch_send(struct intel_guc_ct *ct,
 	}
 
 unlink:
-	spin_lock_irqsave(&ct->lock, flags);
+	spin_lock_irqsave(&ct->requests.lock, flags);
 	list_del(&request.link);
-	spin_unlock_irqrestore(&ct->lock, flags);
+	spin_unlock_irqrestore(&ct->requests.lock, flags);
 
 	return err;
 }
@@ -511,18 +510,21 @@ static int ctch_send(struct intel_guc_ct *ct,
 /*
  * Command Transport (CT) buffer based GuC send function.
  */
-int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
 		      u32 *response_buf, u32 response_buf_size)
 {
-	struct intel_guc_ct *ct = &guc->ct;
-	struct intel_guc_ct_channel *ctch = &ct->host_channel;
+	struct intel_guc *guc = ct_to_guc(ct);
 	u32 status = ~0; /* undefined */
 	int ret;
 
+	if (unlikely(!ct->enabled)) {
+		WARN(1, "Unexpected send: action=%#x\n", *action);
+		return -ENODEV;
+	}
+
 	mutex_lock(&guc->send_mutex);
 
-	ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
-			&status);
+	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
 	if (unlikely(ret < 0)) {
 		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
 			  action[0], ret, status);
@@ -653,8 +655,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 
 	CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
 
-	spin_lock(&ct->lock);
-	list_for_each_entry(req, &ct->pending_requests, link) {
+	spin_lock(&ct->requests.lock);
+	list_for_each_entry(req, &ct->requests.pending, link) {
 		if (unlikely(fence != req->fence)) {
 			CT_DEBUG_DRIVER("CT: request %u awaits response\n",
 					req->fence);
@@ -672,7 +674,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 		found = true;
 		break;
 	}
-	spin_unlock(&ct->lock);
+	spin_unlock(&ct->requests.lock);
 
 	if (!found)
 		DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
@@ -710,13 +712,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
 	u32 *payload;
 	bool done;
 
-	spin_lock_irqsave(&ct->lock, flags);
-	request = list_first_entry_or_null(&ct->incoming_requests,
+	spin_lock_irqsave(&ct->requests.lock, flags);
+	request = list_first_entry_or_null(&ct->requests.incoming,
 					   struct ct_incoming_request, link);
 	if (request)
 		list_del(&request->link);
-	done = !!list_empty(&ct->incoming_requests);
-	spin_unlock_irqrestore(&ct->lock, flags);
+	done = !!list_empty(&ct->requests.incoming);
+	spin_unlock_irqrestore(&ct->requests.lock, flags);
 
 	if (!request)
 		return true;
@@ -734,12 +736,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
 
 static void ct_incoming_request_worker_func(struct work_struct *w)
 {
-	struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
+	struct intel_guc_ct *ct =
+		container_of(w, struct intel_guc_ct, requests.worker);
 	bool done;
 
 	done = ct_process_incoming_requests(ct);
 	if (!done)
-		queue_work(system_unbound_wq, &ct->worker);
+		queue_work(system_unbound_wq, &ct->requests.worker);
 }
 
 /**
@@ -777,23 +780,28 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
 	}
 	memcpy(request->msg, msg, 4 * msglen);
 
-	spin_lock_irqsave(&ct->lock, flags);
-	list_add_tail(&request->link, &ct->incoming_requests);
-	spin_unlock_irqrestore(&ct->lock, flags);
+	spin_lock_irqsave(&ct->requests.lock, flags);
+	list_add_tail(&request->link, &ct->requests.incoming);
+	spin_unlock_irqrestore(&ct->requests.lock, flags);
 
-	queue_work(system_unbound_wq, &ct->worker);
+	queue_work(system_unbound_wq, &ct->requests.worker);
 	return 0;
 }
 
-static void ct_process_host_channel(struct intel_guc_ct *ct)
+/*
+ * When we're communicating with the GuC over CT, GuC uses events
+ * to notify us about new messages being posted on the RECV buffer.
+ */
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
 {
-	struct intel_guc_ct_channel *ctch = &ct->host_channel;
-	struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
+	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
 	int err = 0;
 
-	if (!ctch->enabled)
+	if (unlikely(!ct->enabled)) {
+		WARN(1, "Unexpected GuC event received while CT disabled!\n");
 		return;
+	}
 
 	do {
 		err = ctb_read(ctb, msg);
@@ -812,86 +820,3 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
 	}
 }
 
-/*
- * When we're communicating with the GuC over CT, GuC uses events
- * to notify us about new messages being posted on the RECV buffer.
- */
-void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
-{
-	struct intel_guc_ct *ct = &guc->ct;
-
-	ct_process_host_channel(ct);
-}
-
-/**
- * intel_guc_ct_init - Init CT communication
- * @ct: pointer to CT struct
- *
- * Allocate memory required for communication via
- * the CT channel.
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_init(struct intel_guc_ct *ct)
-{
-	struct intel_guc *guc = ct_to_guc(ct);
-	struct intel_guc_ct_channel *ctch = &ct->host_channel;
-	int err;
-
-	err = ctch_init(guc, ctch);
-	if (unlikely(err)) {
-		DRM_ERROR("CT: can't open channel %d; err=%d\n",
-			  ctch->owner, err);
-		return err;
-	}
-
-	GEM_BUG_ON(!ctch->vma);
-	return 0;
-}
-
-/**
- * intel_guc_ct_fini - Fini CT communication
- * @ct: pointer to CT struct
- *
- * Deallocate memory required for communication via
- * the CT channel.
- */
-void intel_guc_ct_fini(struct intel_guc_ct *ct)
-{
-	struct intel_guc *guc = ct_to_guc(ct);
-	struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
-	ctch_fini(guc, ctch);
-}
-
-/**
- * intel_guc_ct_enable - Enable buffer based command transport.
- * @ct: pointer to CT struct
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_enable(struct intel_guc_ct *ct)
-{
-	struct intel_guc *guc = ct_to_guc(ct);
-	struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
-	if (ctch->enabled)
-		return 0;
-
-	return ctch_enable(guc, ctch);
-}
-
-/**
- * intel_guc_ct_disable - Disable buffer based command transport.
- * @ct: pointer to CT struct
- */
-void intel_guc_ct_disable(struct intel_guc_ct *ct)
-{
-	struct intel_guc *guc = ct_to_guc(ct);
-	struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
-	if (!ctch->enabled)
-		return;
-
-	ctch_disable(guc, ctch);
-}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 7c24d83..3e7fe23 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -35,44 +35,28 @@ struct intel_guc_ct_buffer {
 	u32 *cmds;
 };
 
-/** Represents pair of command transport buffers.
- *
- * Buffers go in pairs to allow bi-directional communication.
- * To simplify the code we place both of them in the same vma.
- * Buffers from the same pair must share unique owner id.
- *
- * @vma: pointer to the vma with pair of CT buffers
- * @ctbs: buffers for sending(0) and receiving(1) commands
- * @owner: unique identifier
- * @next_fence: fence to be used with next send command
- */
-struct intel_guc_ct_channel {
-	struct i915_vma *vma;
-	struct intel_guc_ct_buffer ctbs[2];
-	u32 owner;
-	u32 next_fence;
-	bool enabled;
-};
 
-/** Holds all command transport channels.
+/** Top-level structure for Command Transport related data
  *
- * @host_channel: main channel used by the host
+ * Includes a pair of CT buffers for bi-directional communication and tracking
+ * for the H2G and G2H requests sent and received through the buffers.
  */
 struct intel_guc_ct {
-	struct intel_guc_ct_channel host_channel;
-	/* other channels are tbd */
+	struct i915_vma *vma;
+	bool enabled;
 
-	/** @lock: protects pending requests list */
-	spinlock_t lock;
+	/* buffers for sending(0) and receiving(1) commands */
+	struct intel_guc_ct_buffer ctbs[2];
 
-	/** @pending_requests: list of requests waiting for response */
-	struct list_head pending_requests;
+	struct {
+		u32 next_fence; /* fence to be used with next request to send */
 
-	/** @incoming_requests: list of incoming requests */
-	struct list_head incoming_requests;
+		spinlock_t lock; /* protects pending requests list */
+		struct list_head pending; /* requests waiting for response */
 
-	/** @worker: worker for handling incoming requests */
-	struct work_struct worker;
+		struct list_head incoming; /* incoming requests */
+		struct work_struct worker; /* handler for incoming requests */
+	} requests;
 };
 
 void intel_guc_ct_init_early(struct intel_guc_ct *ct);
@@ -81,13 +65,13 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
 int intel_guc_ct_enable(struct intel_guc_ct *ct);
 void intel_guc_ct_disable(struct intel_guc_ct *ct);
 
-static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
+static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
 {
-	ct->host_channel.enabled = false;
+	return ct->enabled;
 }
 
-int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
 		      u32 *response_buf, u32 response_buf_size);
-void intel_guc_to_host_event_handler_ct(struct intel_guc *guc);
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct);
 
 #endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 5528224..3a1c47d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -149,7 +149,7 @@ int intel_guc_fw_upload(struct intel_guc *guc)
 	 * Current uCode expects the code to be loaded at 8k; locations below
 	 * this are used for the stack.
 	 */
-	ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE);
+	ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
 	if (ret)
 		goto out;
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index a26a85d..a6b733c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -31,7 +31,6 @@
 
 #define GUC_DOORBELL_INVALID		256
 
-#define GUC_DB_SIZE			(PAGE_SIZE)
 #define GUC_WQ_SIZE			(PAGE_SIZE * 2)
 
 /* Work queue item header definitions */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 2498c55..9e42324 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -18,15 +18,6 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 
-enum {
-	GUC_PREEMPT_NONE = 0,
-	GUC_PREEMPT_INPROGRESS,
-	GUC_PREEMPT_FINISHED,
-};
-#define GUC_PREEMPT_BREADCRUMB_DWORDS	0x8
-#define GUC_PREEMPT_BREADCRUMB_BYTES	\
-	(sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
-
 /**
  * DOC: GuC-based command submission
  *
@@ -36,25 +27,14 @@ enum {
  * code) matches the old submission model and will be updated as part of the
  * upgrade to the new flow.
  *
- * GuC client:
- * A intel_guc_client refers to a submission path through GuC. Currently, there
- * is only one client, which is charged with all submissions to the GuC. This
- * struct is the owner of a doorbell, a process descriptor and a workqueue (all
- * of them inside a single gem object that contains all required pages for these
- * elements).
- *
  * GuC stage descriptor:
  * During initialization, the driver allocates a static pool of 1024 such
- * descriptors, and shares them with the GuC.
- * Currently, there exists a 1:1 mapping between a intel_guc_client and a
- * guc_stage_desc (via the client's stage_id), so effectively only one
- * gets used. This stage descriptor lets the GuC know about the doorbell,
- * workqueue and process descriptor. Theoretically, it also lets the GuC
- * know about our HW contexts (context ID, etc...), but we actually
- * employ a kind of submission where the GuC uses the LRCA sent via the work
- * item instead (the single guc_stage_desc associated to execbuf client
- * contains information about the default kernel context only, but this is
- * essentially unused). This is called a "proxy" submission.
+ * descriptors, and shares them with the GuC. Currently, we only use one
+ * descriptor. This stage descriptor lets the GuC know about the workqueue and
+ * process descriptor. Theoretically, it also lets the GuC know about our HW
+ * contexts (context ID, etc...), but we actually employ a kind of submission
+ * where the GuC uses the LRCA sent via the work item instead. This is called
+ * a "proxy" submission.
  *
  * The Scratch registers:
  * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
@@ -63,11 +43,6 @@ enum {
  * Firmware writes a success/fail code back to the action register after
  * processes the request. The kernel driver polls waiting for this update and
  * then proceeds.
- * See intel_guc_send()
- *
- * Doorbells:
- * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
- * mapped into process space.
  *
  * Work Items:
  * There are several types of work items that the host may place into a
@@ -84,213 +59,45 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 	return rb_entry(rb, struct i915_priolist, node);
 }
 
-static inline bool is_high_priority(struct intel_guc_client *client)
+static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
 {
-	return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
-		client->priority == GUC_CLIENT_PRIORITY_HIGH);
+	struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
+
+	return &base[id];
 }
 
-static int reserve_doorbell(struct intel_guc_client *client)
+static int guc_workqueue_create(struct intel_guc *guc)
 {
-	unsigned long offset;
-	unsigned long end;
-	u16 id;
-
-	GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
-
-	/*
-	 * The bitmap tracks which doorbell registers are currently in use.
-	 * It is split into two halves; the first half is used for normal
-	 * priority contexts, the second half for high-priority ones.
-	 */
-	offset = 0;
-	end = GUC_NUM_DOORBELLS / 2;
-	if (is_high_priority(client)) {
-		offset = end;
-		end += offset;
-	}
-
-	id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
-	if (id == end)
-		return -ENOSPC;
-
-	__set_bit(id, client->guc->doorbell_bitmap);
-	client->doorbell_id = id;
-	DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
-			 client->stage_id, yesno(is_high_priority(client)),
-			 id);
-	return 0;
+	return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
+					      &guc->workqueue_vaddr);
 }
 
-static bool has_doorbell(struct intel_guc_client *client)
+static void guc_workqueue_destroy(struct intel_guc *guc)
 {
-	if (client->doorbell_id == GUC_DOORBELL_INVALID)
-		return false;
-
-	return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
-}
-
-static void unreserve_doorbell(struct intel_guc_client *client)
-{
-	GEM_BUG_ON(!has_doorbell(client));
-
-	__clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
-	client->doorbell_id = GUC_DOORBELL_INVALID;
-}
-
-/*
- * Tell the GuC to allocate or deallocate a specific doorbell
- */
-
-static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
-{
-	u32 action[] = {
-		INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
-		stage_id
-	};
-
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
-{
-	u32 action[] = {
-		INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
-		stage_id
-	};
-
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
-{
-	struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
-
-	return &base[client->stage_id];
-}
-
-/*
- * Initialise, update, or clear doorbell data shared with the GuC
- *
- * These functions modify shared data and so need access to the mapped
- * client object which contains the page being used for the doorbell
- */
-
-static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
-{
-	struct guc_stage_desc *desc;
-
-	/* Update the GuC's idea of the doorbell ID */
-	desc = __get_stage_desc(client);
-	desc->db_id = new_id;
-}
-
-static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
-{
-	return client->vaddr + client->doorbell_offset;
-}
-
-static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
-{
-	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
-
-	GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
-	return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
-}
-
-static void __init_doorbell(struct intel_guc_client *client)
-{
-	struct guc_doorbell_info *doorbell;
-
-	doorbell = __get_doorbell(client);
-	doorbell->db_status = GUC_DOORBELL_ENABLED;
-	doorbell->cookie = 0;
-}
-
-static void __fini_doorbell(struct intel_guc_client *client)
-{
-	struct guc_doorbell_info *doorbell;
-	u16 db_id = client->doorbell_id;
-
-	doorbell = __get_doorbell(client);
-	doorbell->db_status = GUC_DOORBELL_DISABLED;
-
-	/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
-	 * to go to zero after updating db_status before we call the GuC to
-	 * release the doorbell
-	 */
-	if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
-		WARN_ONCE(true, "Doorbell never became invalid after disable\n");
-}
-
-static int create_doorbell(struct intel_guc_client *client)
-{
-	int ret;
-
-	if (WARN_ON(!has_doorbell(client)))
-		return -ENODEV; /* internal setup error, should never happen */
-
-	__update_doorbell_desc(client, client->doorbell_id);
-	__init_doorbell(client);
-
-	ret = __guc_allocate_doorbell(client->guc, client->stage_id);
-	if (ret) {
-		__fini_doorbell(client);
-		__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
-		DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
-				 client->stage_id, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int destroy_doorbell(struct intel_guc_client *client)
-{
-	int ret;
-
-	GEM_BUG_ON(!has_doorbell(client));
-
-	__fini_doorbell(client);
-	ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
-	if (ret)
-		DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
-			  client->stage_id, ret);
-
-	__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
-
-	return ret;
-}
-
-static unsigned long __select_cacheline(struct intel_guc *guc)
-{
-	unsigned long offset;
-
-	/* Doorbell uses a single cache line within a page */
-	offset = offset_in_page(guc->db_cacheline);
-
-	/* Moving to next cache line to reduce contention */
-	guc->db_cacheline += cache_line_size();
-
-	DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
-			 offset, guc->db_cacheline, cache_line_size());
-	return offset;
-}
-
-static inline struct guc_process_desc *
-__get_process_desc(struct intel_guc_client *client)
-{
-	return client->vaddr + client->proc_desc_offset;
+	i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
 }
 
 /*
  * Initialise the process descriptor shared with the GuC firmware.
  */
-static void guc_proc_desc_init(struct intel_guc_client *client)
+static int guc_proc_desc_create(struct intel_guc *guc)
+{
+	const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
+
+	return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
+					      &guc->proc_desc_vaddr);
+}
+
+static void guc_proc_desc_destroy(struct intel_guc *guc)
+{
+	i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
+}
+
+static void guc_proc_desc_init(struct intel_guc *guc)
 {
 	struct guc_process_desc *desc;
 
-	desc = memset(__get_process_desc(client), 0, sizeof(*desc));
+	desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
 
 	/*
 	 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
@@ -301,47 +108,27 @@ static void guc_proc_desc_init(struct intel_guc_client *client)
 	desc->wq_base_addr = 0;
 	desc->db_base_addr = 0;
 
-	desc->stage_id = client->stage_id;
 	desc->wq_size_bytes = GUC_WQ_SIZE;
 	desc->wq_status = WQ_STATUS_ACTIVE;
-	desc->priority = client->priority;
+	desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
 }
 
-static void guc_proc_desc_fini(struct intel_guc_client *client)
+static void guc_proc_desc_fini(struct intel_guc *guc)
 {
-	struct guc_process_desc *desc;
-
-	desc = __get_process_desc(client);
-	memset(desc, 0, sizeof(*desc));
+	memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
 }
 
 static int guc_stage_desc_pool_create(struct intel_guc *guc)
 {
-	struct i915_vma *vma;
-	void *vaddr;
+	u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+			      GUC_MAX_STAGE_DESCRIPTORS);
 
-	vma = intel_guc_allocate_vma(guc,
-				     PAGE_ALIGN(sizeof(struct guc_stage_desc) *
-				     GUC_MAX_STAGE_DESCRIPTORS));
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
-
-	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		i915_vma_unpin_and_release(&vma, 0);
-		return PTR_ERR(vaddr);
-	}
-
-	guc->stage_desc_pool = vma;
-	guc->stage_desc_pool_vaddr = vaddr;
-	ida_init(&guc->stage_ids);
-
-	return 0;
+	return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
+					      &guc->stage_desc_pool_vaddr);
 }
 
 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
 {
-	ida_destroy(&guc->stage_ids);
 	i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
 }
 
@@ -349,63 +136,49 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
  * Initialise/clear the stage descriptor shared with the GuC firmware.
  *
  * This descriptor tells the GuC where (in GGTT space) to find the important
- * data structures relating to this client (doorbell, process descriptor,
- * write queue, etc).
+ * data structures related to work submission (process descriptor, write queue,
+ * etc).
  */
-static void guc_stage_desc_init(struct intel_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc *guc)
 {
-	struct intel_guc *guc = client->guc;
 	struct guc_stage_desc *desc;
-	u32 gfx_addr;
 
-	desc = __get_stage_desc(client);
+	/* we only use 1 stage desc, so hardcode it to 0 */
+	desc = __get_stage_desc(guc, 0);
 	memset(desc, 0, sizeof(*desc));
 
 	desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
 			  GUC_STAGE_DESC_ATTR_KERNEL;
-	if (is_high_priority(client))
-		desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
-	desc->stage_id = client->stage_id;
-	desc->priority = client->priority;
-	desc->db_id = client->doorbell_id;
 
-	/*
-	 * The doorbell, process descriptor, and workqueue are all parts
-	 * of the client object, which the GuC will reference via the GGTT
-	 */
-	gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
-	desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
-				client->doorbell_offset;
-	desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
-	desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
-	desc->process_desc = gfx_addr + client->proc_desc_offset;
-	desc->wq_addr = gfx_addr + GUC_DB_SIZE;
+	desc->stage_id = 0;
+	desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
+
+	desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
+	desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
 	desc->wq_size = GUC_WQ_SIZE;
-
-	desc->desc_private = ptr_to_u64(client);
 }
 
-static void guc_stage_desc_fini(struct intel_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc *guc)
 {
 	struct guc_stage_desc *desc;
 
-	desc = __get_stage_desc(client);
+	desc = __get_stage_desc(guc, 0);
 	memset(desc, 0, sizeof(*desc));
 }
 
 /* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc_client *client,
+static void guc_wq_item_append(struct intel_guc *guc,
 			       u32 target_engine, u32 context_desc,
 			       u32 ring_tail, u32 fence_id)
 {
 	/* wqi_len is in DWords, and does not include the one-word header */
 	const size_t wqi_size = sizeof(struct guc_wq_item);
 	const u32 wqi_len = wqi_size / sizeof(u32) - 1;
-	struct guc_process_desc *desc = __get_process_desc(client);
+	struct guc_process_desc *desc = guc->proc_desc_vaddr;
 	struct guc_wq_item *wqi;
 	u32 wq_off;
 
-	lockdep_assert_held(&client->wq_lock);
+	lockdep_assert_held(&guc->wq_lock);
 
 	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
 	 * should not have the case where structure wqi is across page, neither
@@ -425,58 +198,30 @@ static void guc_wq_item_append(struct intel_guc_client *client,
 			      GUC_WQ_SIZE) < wqi_size);
 	GEM_BUG_ON(wq_off & (wqi_size - 1));
 
-	/* WQ starts from the page after doorbell / process_desc */
-	wqi = client->vaddr + wq_off + GUC_DB_SIZE;
+	wqi = guc->workqueue_vaddr + wq_off;
 
-	if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
-		wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
-	} else {
-		/* Now fill in the 4-word work queue item */
-		wqi->header = WQ_TYPE_INORDER |
-			      (wqi_len << WQ_LEN_SHIFT) |
-			      (target_engine << WQ_TARGET_SHIFT) |
-			      WQ_NO_WCFLUSH_WAIT;
-		wqi->context_desc = context_desc;
-		wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
-		GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
-		wqi->fence_id = fence_id;
-	}
+	/* Now fill in the 4-word work queue item */
+	wqi->header = WQ_TYPE_INORDER |
+		      (wqi_len << WQ_LEN_SHIFT) |
+		      (target_engine << WQ_TARGET_SHIFT) |
+		      WQ_NO_WCFLUSH_WAIT;
+	wqi->context_desc = context_desc;
+	wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
+	GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
+	wqi->fence_id = fence_id;
 
 	/* Make the update visible to GuC */
 	WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
 }
 
-static void guc_ring_doorbell(struct intel_guc_client *client)
-{
-	struct guc_doorbell_info *db;
-	u32 cookie;
-
-	lockdep_assert_held(&client->wq_lock);
-
-	/* pointer of current doorbell cacheline */
-	db = __get_doorbell(client);
-
-	/*
-	 * We're not expecting the doorbell cookie to change behind our back,
-	 * we also need to treat 0 as a reserved value.
-	 */
-	cookie = READ_ONCE(db->cookie);
-	WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
-
-	/* XXX: doorbell was lost and need to acquire it again */
-	GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
-}
-
 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
-	struct intel_guc_client *client = guc->execbuf_client;
 	struct intel_engine_cs *engine = rq->engine;
-	u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
+	u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
-	guc_wq_item_append(client, engine->guc_id, ctx_desc,
+	guc_wq_item_append(guc, engine->guc_id, ctx_desc,
 			   ring_tail, rq->fence.seqno);
-	guc_ring_doorbell(client);
 }
 
 /*
@@ -488,10 +233,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
  */
 static void flush_ggtt_writes(struct i915_vma *vma)
 {
-	struct drm_i915_private *i915 = vma->vm->i915;
-
 	if (i915_vma_is_map_and_fenceable(vma))
-		intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
+		intel_uncore_posting_read_fw(vma->vm->gt->uncore,
+					     GUC_STATUS);
 }
 
 static void guc_submit(struct intel_engine_cs *engine,
@@ -499,9 +243,8 @@ static void guc_submit(struct intel_engine_cs *engine,
 		       struct i915_request **end)
 {
 	struct intel_guc *guc = &engine->gt->uc.guc;
-	struct intel_guc_client *client = guc->execbuf_client;
 
-	spin_lock(&client->wq_lock);
+	spin_lock(&guc->wq_lock);
 
 	do {
 		struct i915_request *rq = *out++;
@@ -510,7 +253,7 @@ static void guc_submit(struct intel_engine_cs *engine,
 		guc_add_request(guc, rq);
 	} while (out != end);
 
-	spin_unlock(&client->wq_lock);
+	spin_unlock(&guc->wq_lock);
 }
 
 static inline int rq_prio(const struct i915_request *rq)
@@ -529,7 +272,7 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
 	 * required if we generalise the inflight tracking.
 	 */
 
-	intel_gt_pm_get(rq->engine->gt);
+	__intel_gt_pm_get(rq->engine->gt);
 	return i915_request_get(rq);
 }
 
@@ -537,7 +280,7 @@ static void schedule_out(struct i915_request *rq)
 {
 	trace_i915_request_out(rq);
 
-	intel_gt_pm_put(rq->engine->gt);
+	intel_gt_pm_put_async(rq->engine->gt);
 	i915_request_put(rq);
 }
 
@@ -572,7 +315,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 		int i;
 
 		priolist_for_each_request_consume(rq, rn, p, i) {
-			if (last && rq->hw_context != last->hw_context) {
+			if (last && rq->context != last->context) {
 				if (port == last_port)
 					goto done;
 
@@ -631,7 +374,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	/*
 	 * Prevent request submission to the hardware until we have
@@ -658,7 +401,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
 		memset(execlists->inflight, 0, sizeof(execlists->inflight));
 }
 
-static void guc_reset(struct intel_engine_cs *engine, bool stalled)
+static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request *rq;
@@ -677,20 +420,20 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
 		stalled = false;
 
 	__i915_request_reset(rq, stalled);
-	intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
+	intel_lr_context_reset(engine, rq->context, rq->head, stalled);
 
 out_unlock:
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
 
-static void guc_cancel_requests(struct intel_engine_cs *engine)
+static void guc_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request *rq, *rn;
 	struct rb_node *rb;
 	unsigned long flags;
 
-	GEM_TRACE("%s\n", engine->name);
+	ENGINE_TRACE(engine, "\n");
 
 	/*
 	 * Before we call engine->cancel_requests(), we should have exclusive
@@ -751,8 +494,8 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
 		/* And kick in case we missed a new request submission. */
 		tasklet_hi_schedule(&execlists->tasklet);
 
-	GEM_TRACE("%s: depth->%d\n", engine->name,
-		  atomic_read(&execlists->tasklet.count));
+	ENGINE_TRACE(engine, "depth->%d\n",
+		     atomic_read(&execlists->tasklet.count));
 }
 
 /*
@@ -761,213 +504,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
  * path of guc_submit() above.
  */
 
-/* Check that a doorbell register is in the expected state */
-static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
-{
-	bool valid;
-
-	GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
-
-	valid = __doorbell_valid(guc, db_id);
-
-	if (test_bit(db_id, guc->doorbell_bitmap) == valid)
-		return true;
-
-	DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
-			 db_id, yesno(valid));
-
-	return false;
-}
-
-static bool guc_verify_doorbells(struct intel_guc *guc)
-{
-	bool doorbells_ok = true;
-	u16 db_id;
-
-	for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
-		if (!doorbell_ok(guc, db_id))
-			doorbells_ok = false;
-
-	return doorbells_ok;
-}
-
-/**
- * guc_client_alloc() - Allocate an intel_guc_client
- * @guc:	the intel_guc structure
- * @priority:	four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
- *		The kernel client to replace ExecList submission is created with
- *		NORMAL priority. Priority of a client for scheduler can be HIGH,
- *		while a preemption context can use CRITICAL.
- *
- * Return:	An intel_guc_client object if success, else NULL.
- */
-static struct intel_guc_client *
-guc_client_alloc(struct intel_guc *guc, u32 priority)
-{
-	struct intel_guc_client *client;
-	struct i915_vma *vma;
-	void *vaddr;
-	int ret;
-
-	client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (!client)
-		return ERR_PTR(-ENOMEM);
-
-	client->guc = guc;
-	client->priority = priority;
-	client->doorbell_id = GUC_DOORBELL_INVALID;
-	spin_lock_init(&client->wq_lock);
-
-	ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
-			     GFP_KERNEL);
-	if (ret < 0)
-		goto err_client;
-
-	client->stage_id = ret;
-
-	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
-	vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
-	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
-		goto err_id;
-	}
-
-	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
-	client->vma = vma;
-
-	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		ret = PTR_ERR(vaddr);
-		goto err_vma;
-	}
-	client->vaddr = vaddr;
-
-	ret = reserve_doorbell(client);
-	if (ret)
-		goto err_vaddr;
-
-	client->doorbell_offset = __select_cacheline(guc);
-
-	/*
-	 * Since the doorbell only requires a single cacheline, we can save
-	 * space by putting the application process descriptor in the same
-	 * page. Use the half of the page that doesn't include the doorbell.
-	 */
-	if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
-		client->proc_desc_offset = 0;
-	else
-		client->proc_desc_offset = (GUC_DB_SIZE / 2);
-
-	DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n",
-			 priority, client, client->stage_id);
-	DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
-			 client->doorbell_id, client->doorbell_offset);
-
-	return client;
-
-err_vaddr:
-	i915_gem_object_unpin_map(client->vma->obj);
-err_vma:
-	i915_vma_unpin_and_release(&client->vma, 0);
-err_id:
-	ida_simple_remove(&guc->stage_ids, client->stage_id);
-err_client:
-	kfree(client);
-	return ERR_PTR(ret);
-}
-
-static void guc_client_free(struct intel_guc_client *client)
-{
-	unreserve_doorbell(client);
-	i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
-	ida_simple_remove(&client->guc->stage_ids, client->stage_id);
-	kfree(client);
-}
-
-static inline bool ctx_save_restore_disabled(struct intel_context *ce)
-{
-	u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
-
-#define SR_DISABLED \
-	_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
-			   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
-
-	return (sr & SR_DISABLED) == SR_DISABLED;
-
-#undef SR_DISABLED
-}
-
-static int guc_clients_create(struct intel_guc *guc)
-{
-	struct intel_guc_client *client;
-
-	GEM_BUG_ON(guc->execbuf_client);
-
-	client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL);
-	if (IS_ERR(client)) {
-		DRM_ERROR("Failed to create GuC client for submission!\n");
-		return PTR_ERR(client);
-	}
-	guc->execbuf_client = client;
-
-	return 0;
-}
-
-static void guc_clients_destroy(struct intel_guc *guc)
-{
-	struct intel_guc_client *client;
-
-	client = fetch_and_zero(&guc->execbuf_client);
-	if (client)
-		guc_client_free(client);
-}
-
-static int __guc_client_enable(struct intel_guc_client *client)
-{
-	int ret;
-
-	guc_proc_desc_init(client);
-	guc_stage_desc_init(client);
-
-	ret = create_doorbell(client);
-	if (ret)
-		goto fail;
-
-	return 0;
-
-fail:
-	guc_stage_desc_fini(client);
-	guc_proc_desc_fini(client);
-	return ret;
-}
-
-static void __guc_client_disable(struct intel_guc_client *client)
-{
-	/*
-	 * By the time we're here, GuC may have already been reset. if that is
-	 * the case, instead of trying (in vain) to communicate with it, let's
-	 * just cleanup the doorbell HW and our internal state.
-	 */
-	if (intel_guc_is_running(client->guc))
-		destroy_doorbell(client);
-	else
-		__fini_doorbell(client);
-
-	guc_stage_desc_fini(client);
-	guc_proc_desc_fini(client);
-}
-
-static int guc_clients_enable(struct intel_guc *guc)
-{
-	return __guc_client_enable(guc->execbuf_client);
-}
-
-static void guc_clients_disable(struct intel_guc *guc)
-{
-	if (guc->execbuf_client)
-		__guc_client_disable(guc->execbuf_client);
-}
-
 /*
  * Set up the memory resources to be shared with the GuC (via the GGTT)
  * at firmware loading time.
@@ -988,13 +524,20 @@ int intel_guc_submission_init(struct intel_guc *guc)
 	 */
 	GEM_BUG_ON(!guc->stage_desc_pool);
 
-	WARN_ON(!guc_verify_doorbells(guc));
-	ret = guc_clients_create(guc);
+	ret = guc_workqueue_create(guc);
 	if (ret)
 		goto err_pool;
 
+	ret = guc_proc_desc_create(guc);
+	if (ret)
+		goto err_workqueue;
+
+	spin_lock_init(&guc->wq_lock);
+
 	return 0;
 
+err_workqueue:
+	guc_workqueue_destroy(guc);
 err_pool:
 	guc_stage_desc_pool_destroy(guc);
 	return ret;
@@ -1002,83 +545,37 @@ int intel_guc_submission_init(struct intel_guc *guc)
 
 void intel_guc_submission_fini(struct intel_guc *guc)
 {
-	guc_clients_destroy(guc);
-	WARN_ON(!guc_verify_doorbells(guc));
-
-	if (guc->stage_desc_pool)
+	if (guc->stage_desc_pool) {
+		guc_proc_desc_destroy(guc);
+		guc_workqueue_destroy(guc);
 		guc_stage_desc_pool_destroy(guc);
+	}
 }
 
 static void guc_interrupts_capture(struct intel_gt *gt)
 {
-	struct intel_rps *rps = &gt->rps;
 	struct intel_uncore *uncore = gt->uncore;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int irqs;
+	u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
+	u32 dmask = irqs << 16 | irqs;
 
-	/* tell all command streamers to forward interrupts (but not vblank)
-	 * to GuC
-	 */
-	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
-	for_each_engine(engine, gt, id)
-		ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
+	GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
 
-	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
-	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-	/* These three registers have the same bit definitions */
-	intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs);
-	intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs);
-	intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs);
-
-	/*
-	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
-	 * (unmasked) PM interrupts to the GuC. All other bits of this
-	 * register *disable* generation of a specific interrupt.
-	 *
-	 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
-	 * writing to the PM interrupt mask register, i.e. interrupts
-	 * that must not be disabled.
-	 *
-	 * If the GuC is handling these interrupts, then we must not let
-	 * the PM code disable ANY interrupt that the GuC is expecting.
-	 * So for each ENABLED (0) bit in this register, we must SET the
-	 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
-	 * GuC needs ARAT expired interrupt unmasked hence it is set in
-	 * pm_intrmsk_mbz.
-	 *
-	 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
-	 * result in the register bit being left SET!
-	 */
-	rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
-	rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+	/* Don't handle the ctx switch interrupt in GuC submission mode */
+	intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
+	intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
 }
 
 static void guc_interrupts_release(struct intel_gt *gt)
 {
-	struct intel_rps *rps = &gt->rps;
 	struct intel_uncore *uncore = gt->uncore;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int irqs;
+	u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
+	u32 dmask = irqs << 16 | irqs;
 
-	/*
-	 * tell all command streamers NOT to forward interrupts or vblank
-	 * to GuC.
-	 */
-	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
-	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
-	for_each_engine(engine, gt, id)
-		ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
+	GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
 
-	/* route all GT interrupts to the host */
-	intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0);
-	intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0);
-	intel_uncore_write(uncore, GUC_WD_VECS_IER, 0);
-
-	rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-	rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+	/* Handle ctx switch interrupts again */
+	intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
+	intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
 }
 
 static void guc_set_default_submission(struct intel_engine_cs *engine)
@@ -1102,11 +599,10 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
 	engine->park = engine->unpark = NULL;
 
 	engine->reset.prepare = guc_reset_prepare;
-	engine->reset.reset = guc_reset;
+	engine->reset.rewind = guc_reset_rewind;
+	engine->reset.cancel = guc_reset_cancel;
 	engine->reset.finish = guc_reset_finish;
 
-	engine->cancel_requests = guc_cancel_requests;
-
 	engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
 	engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
 
@@ -1119,16 +615,11 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
 	GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
 }
 
-int intel_guc_submission_enable(struct intel_guc *guc)
+void intel_guc_submission_enable(struct intel_guc *guc)
 {
 	struct intel_gt *gt = guc_to_gt(guc);
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-	int err;
-
-	err = i915_inject_probe_error(gt->i915, -ENXIO);
-	if (err)
-		return err;
 
 	/*
 	 * We're using GuC work items for submitting work through GuC. Since
@@ -1143,11 +634,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
 		     sizeof(struct guc_wq_item) *
 		     I915_NUM_ENGINES > GUC_WQ_SIZE);
 
-	GEM_BUG_ON(!guc->execbuf_client);
-
-	err = guc_clients_enable(guc);
-	if (err)
-		return err;
+	guc_proc_desc_init(guc);
+	guc_stage_desc_init(guc);
 
 	/* Take over from manual control of ELSP (execlists) */
 	guc_interrupts_capture(gt);
@@ -1156,8 +644,6 @@ int intel_guc_submission_enable(struct intel_guc *guc)
 		engine->set_default_submission = guc_set_default_submission;
 		engine->set_default_submission(engine);
 	}
-
-	return 0;
 }
 
 void intel_guc_submission_disable(struct intel_guc *guc)
@@ -1166,8 +652,12 @@ void intel_guc_submission_disable(struct intel_guc *guc)
 
 	GEM_BUG_ON(gt->awake); /* GT should be parked first */
 
+	/* Note: By the time we're here, GuC may have already been reset */
+
 	guc_interrupts_release(gt);
-	guc_clients_disable(guc);
+
+	guc_stage_desc_fini(guc);
+	guc_proc_desc_fini(guc);
 }
 
 static bool __guc_submission_support(struct intel_guc *guc)
@@ -1186,6 +676,7 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
 	guc->submission_supported = __guc_submission_support(guc);
 }
 
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_guc.c"
-#endif
+bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
+{
+	return engine->set_default_submission == guc_set_default_submission;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 54d7168..e402a29 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -6,62 +6,18 @@
 #ifndef _INTEL_GUC_SUBMISSION_H_
 #define _INTEL_GUC_SUBMISSION_H_
 
-#include <linux/spinlock.h>
+#include <linux/types.h>
 
-#include "gt/intel_engine_types.h"
-
-#include "i915_gem.h"
-#include "i915_selftest.h"
-
-struct drm_i915_private;
-
-/*
- * This structure primarily describes the GEM object shared with the GuC.
- * The specs sometimes refer to this object as a "GuC context", but we use
- * the term "client" to avoid confusion with hardware contexts. This
- * GEM object is held for the entire lifetime of our interaction with
- * the GuC, being allocated before the GuC is loaded with its firmware.
- * Because there's no way to update the address used by the GuC after
- * initialisation, the shared object must stay pinned into the GGTT as
- * long as the GuC is in use. We also keep the first page (only) mapped
- * into kernel address space, as it includes shared data that must be
- * updated on every request submission.
- *
- * The single GEM object described here is actually made up of several
- * separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process descriptor" which holds sequence data for
- * the doorbell, and one cacheline which actually *is* the doorbell; a
- * write to this will "ring the doorbell" (i.e. send an interrupt to the
- * GuC). The subsequent  pages of the client object constitute the work
- * queue (a circular array of work items), again described in the process
- * descriptor. Work queue pages are mapped momentarily as required.
- */
-struct intel_guc_client {
-	struct i915_vma *vma;
-	void *vaddr;
-	struct intel_guc *guc;
-
-	/* bitmap of (host) engine ids */
-	u32 priority;
-	u32 stage_id;
-	u32 proc_desc_offset;
-
-	u16 doorbell_id;
-	unsigned long doorbell_offset;
-
-	/* Protects GuC client's WQ access */
-	spinlock_t wq_lock;
-
-	/* For testing purposes, use nop WQ items instead of real ones */
-	I915_SELFTEST_DECLARE(bool use_nop_wqi);
-};
+struct intel_guc;
+struct intel_engine_cs;
 
 void intel_guc_submission_init_early(struct intel_guc *guc);
 int intel_guc_submission_init(struct intel_guc *guc);
-int intel_guc_submission_enable(struct intel_guc *guc);
+void intel_guc_submission_enable(struct intel_guc *guc);
 void intel_guc_submission_disable(struct intel_guc *guc);
 void intel_guc_submission_fini(struct intel_guc *guc);
 int intel_guc_preempt_work_create(struct intel_guc *guc);
 void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index d654340..eee193b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -39,5 +39,5 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
 int intel_huc_fw_upload(struct intel_huc *huc)
 {
 	/* HW doesn't look at destination address for HuC, so set it to 0 */
-	return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL);
+	return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
 }
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 629b193..64934a8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -12,6 +12,9 @@
 
 #include "i915_drv.h"
 
+static const struct intel_uc_ops uc_ops_off;
+static const struct intel_uc_ops uc_ops_on;
+
 /* Reset GuC providing us with fresh state for both GuC and HuC.
  */
 static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -89,6 +92,11 @@ void intel_uc_init_early(struct intel_uc *uc)
 	intel_huc_init_early(&uc->huc);
 
 	__confirm_options(uc);
+
+	if (intel_uc_uses_guc(uc))
+		uc->ops = &uc_ops_on;
+	else
+		uc->ops = &uc_ops_off;
 }
 
 void intel_uc_driver_late_release(struct intel_uc *uc)
@@ -123,6 +131,11 @@ static void __uc_free_load_err_log(struct intel_uc *uc)
 		i915_gem_object_put(log);
 }
 
+static inline bool guc_communication_enabled(struct intel_guc *guc)
+{
+	return intel_guc_ct_enabled(&guc->ct);
+}
+
 /*
  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
  * register using the same bits used in the CT message payload. Since our
@@ -158,7 +171,7 @@ static void guc_handle_mmio_msg(struct intel_guc *guc)
 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 
 	/* we need communication to be enabled to reply to GuC */
-	GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
+	GEM_BUG_ON(!guc_communication_enabled(guc));
 
 	if (!guc->mmio_msg)
 		return;
@@ -185,11 +198,6 @@ static void guc_disable_interrupts(struct intel_guc *guc)
 	guc->interrupts.disable(guc);
 }
 
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
-	return guc->send != intel_guc_send_nop;
-}
-
 static int guc_enable_communication(struct intel_guc *guc)
 {
 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
@@ -205,9 +213,6 @@ static int guc_enable_communication(struct intel_guc *guc)
 	if (ret)
 		return ret;
 
-	guc->send = intel_guc_send_ct;
-	guc->handler = intel_guc_to_host_event_handler_ct;
-
 	/* check for mmio messages received before/during the CT enable */
 	guc_get_mmio_msg(guc);
 	guc_handle_mmio_msg(guc);
@@ -216,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
 
 	/* check for CT messages received before we enabled interrupts */
 	spin_lock_irq(&i915->irq_lock);
-	intel_guc_to_host_event_handler_ct(guc);
+	intel_guc_ct_event_handler(&guc->ct);
 	spin_unlock_irq(&i915->irq_lock);
 
 	DRM_INFO("GuC communication enabled\n");
@@ -224,7 +229,7 @@ static int guc_enable_communication(struct intel_guc *guc)
 	return 0;
 }
 
-static void __guc_stop_communication(struct intel_guc *guc)
+static void guc_disable_communication(struct intel_guc *guc)
 {
 	/*
 	 * Events generated during or after CT disable are logged by guc in
@@ -235,23 +240,6 @@ static void __guc_stop_communication(struct intel_guc *guc)
 
 	guc_disable_interrupts(guc);
 
-	guc->send = intel_guc_send_nop;
-	guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-static void guc_stop_communication(struct intel_guc *guc)
-{
-	intel_guc_ct_stop(&guc->ct);
-
-	__guc_stop_communication(guc);
-
-	DRM_INFO("GuC communication stopped\n");
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
-{
-	__guc_stop_communication(guc);
-
 	intel_guc_ct_disable(&guc->ct);
 
 	/*
@@ -265,41 +253,33 @@ static void guc_disable_communication(struct intel_guc *guc)
 	DRM_INFO("GuC communication disabled\n");
 }
 
-void intel_uc_fetch_firmwares(struct intel_uc *uc)
+static void __uc_fetch_firmwares(struct intel_uc *uc)
 {
-	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
 	int err;
 
-	if (!intel_uc_uses_guc(uc))
-		return;
+	GEM_BUG_ON(!intel_uc_uses_guc(uc));
 
-	err = intel_uc_fw_fetch(&uc->guc.fw, i915);
+	err = intel_uc_fw_fetch(&uc->guc.fw);
 	if (err)
 		return;
 
 	if (intel_uc_uses_huc(uc))
-		intel_uc_fw_fetch(&uc->huc.fw, i915);
+		intel_uc_fw_fetch(&uc->huc.fw);
 }
 
-void intel_uc_cleanup_firmwares(struct intel_uc *uc)
+static void __uc_cleanup_firmwares(struct intel_uc *uc)
 {
-	if (!intel_uc_uses_guc(uc))
-		return;
-
-	if (intel_uc_uses_huc(uc))
-		intel_uc_fw_cleanup_fetch(&uc->huc.fw);
-
+	intel_uc_fw_cleanup_fetch(&uc->huc.fw);
 	intel_uc_fw_cleanup_fetch(&uc->guc.fw);
 }
 
-void intel_uc_init(struct intel_uc *uc)
+static void __uc_init(struct intel_uc *uc)
 {
 	struct intel_guc *guc = &uc->guc;
 	struct intel_huc *huc = &uc->huc;
 	int ret;
 
-	if (!intel_uc_uses_guc(uc))
-		return;
+	GEM_BUG_ON(!intel_uc_uses_guc(uc));
 
 	/* XXX: GuC submission is unavailable for now */
 	GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
@@ -314,17 +294,10 @@ void intel_uc_init(struct intel_uc *uc)
 		intel_huc_init(huc);
 }
 
-void intel_uc_fini(struct intel_uc *uc)
+static void __uc_fini(struct intel_uc *uc)
 {
-	struct intel_guc *guc = &uc->guc;
-
-	if (!intel_uc_uses_guc(uc))
-		return;
-
-	if (intel_uc_uses_huc(uc))
-		intel_huc_fini(&uc->huc);
-
-	intel_guc_fini(guc);
+	intel_huc_fini(&uc->huc);
+	intel_guc_fini(&uc->guc);
 
 	__uc_free_load_err_log(uc);
 }
@@ -342,14 +315,6 @@ static int __uc_sanitize(struct intel_uc *uc)
 	return __intel_uc_reset_hw(uc);
 }
 
-void intel_uc_sanitize(struct intel_uc *uc)
-{
-	if (!intel_uc_supports_guc(uc))
-		return;
-
-	__uc_sanitize(uc);
-}
-
 /* Initialize and verify the uC regs related to uC positioning in WOPCM */
 static int uc_init_wopcm(struct intel_uc *uc)
 {
@@ -413,13 +378,8 @@ static bool uc_is_wopcm_locked(struct intel_uc *uc)
 	       (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
 }
 
-int intel_uc_init_hw(struct intel_uc *uc)
+static int __uc_check_hw(struct intel_uc *uc)
 {
-	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
-	struct intel_guc *guc = &uc->guc;
-	struct intel_huc *huc = &uc->huc;
-	int ret, attempts;
-
 	if (!intel_uc_supports_guc(uc))
 		return 0;
 
@@ -428,11 +388,24 @@ int intel_uc_init_hw(struct intel_uc *uc)
 	 * before on this system after reboot, otherwise we risk GPU hangs.
 	 * To check if GuC was loaded before we look at WOPCM registers.
 	 */
-	if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc))
-		return 0;
+	if (uc_is_wopcm_locked(uc))
+		return -EIO;
+
+	return 0;
+}
+
+static int __uc_init_hw(struct intel_uc *uc)
+{
+	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+	struct intel_guc *guc = &uc->guc;
+	struct intel_huc *huc = &uc->huc;
+	int ret, attempts;
+
+	GEM_BUG_ON(!intel_uc_supports_guc(uc));
+	GEM_BUG_ON(!intel_uc_uses_guc(uc));
 
 	if (!intel_uc_fw_is_available(&guc->fw)) {
-		ret = uc_is_wopcm_locked(uc) ||
+		ret = __uc_check_hw(uc) ||
 		      intel_uc_fw_is_overridden(&guc->fw) ||
 		      intel_uc_supports_guc_submission(uc) ?
 		      intel_uc_fw_status_to_error(guc->fw.status) : 0;
@@ -486,11 +459,8 @@ int intel_uc_init_hw(struct intel_uc *uc)
 	if (ret)
 		goto err_communication;
 
-	if (intel_uc_supports_guc_submission(uc)) {
-		ret = intel_guc_submission_enable(guc);
-		if (ret)
-			goto err_communication;
-	}
+	if (intel_uc_supports_guc_submission(uc))
+		intel_guc_submission_enable(guc);
 
 	dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
 		 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
@@ -531,7 +501,7 @@ int intel_uc_init_hw(struct intel_uc *uc)
 	return -EIO;
 }
 
-void intel_uc_fini_hw(struct intel_uc *uc)
+static void __uc_fini_hw(struct intel_uc *uc)
 {
 	struct intel_guc *guc = &uc->guc;
 
@@ -560,7 +530,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
 	if (!intel_guc_is_running(guc))
 		return;
 
-	guc_stop_communication(guc);
+	guc_disable_communication(guc);
 	__uc_sanitize(uc);
 }
 
@@ -631,3 +601,20 @@ int intel_uc_runtime_resume(struct intel_uc *uc)
 	 */
 	return __uc_resume(uc, true);
 }
+
+static const struct intel_uc_ops uc_ops_off = {
+	.init_hw = __uc_check_hw,
+};
+
+static const struct intel_uc_ops uc_ops_on = {
+	.sanitize = __uc_sanitize,
+
+	.init_fw = __uc_fetch_firmwares,
+	.fini_fw = __uc_cleanup_firmwares,
+
+	.init = __uc_init,
+	.fini = __uc_fini,
+
+	.init_hw = __uc_init_hw,
+	.fini_hw = __uc_fini_hw,
+};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 527995c..49c9135 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -10,7 +10,20 @@
 #include "intel_huc.h"
 #include "i915_params.h"
 
+struct intel_uc;
+
+struct intel_uc_ops {
+	int (*sanitize)(struct intel_uc *uc);
+	void (*init_fw)(struct intel_uc *uc);
+	void (*fini_fw)(struct intel_uc *uc);
+	void (*init)(struct intel_uc *uc);
+	void (*fini)(struct intel_uc *uc);
+	int (*init_hw)(struct intel_uc *uc);
+	void (*fini_hw)(struct intel_uc *uc);
+};
+
 struct intel_uc {
+	struct intel_uc_ops const *ops;
 	struct intel_guc guc;
 	struct intel_huc huc;
 
@@ -21,13 +34,6 @@ struct intel_uc {
 void intel_uc_init_early(struct intel_uc *uc);
 void intel_uc_driver_late_release(struct intel_uc *uc);
 void intel_uc_init_mmio(struct intel_uc *uc);
-void intel_uc_fetch_firmwares(struct intel_uc *uc);
-void intel_uc_cleanup_firmwares(struct intel_uc *uc);
-void intel_uc_sanitize(struct intel_uc *uc);
-void intel_uc_init(struct intel_uc *uc);
-int intel_uc_init_hw(struct intel_uc *uc);
-void intel_uc_fini_hw(struct intel_uc *uc);
-void intel_uc_fini(struct intel_uc *uc);
 void intel_uc_reset_prepare(struct intel_uc *uc);
 void intel_uc_suspend(struct intel_uc *uc);
 void intel_uc_runtime_suspend(struct intel_uc *uc);
@@ -64,4 +70,20 @@ static inline bool intel_uc_uses_huc(struct intel_uc *uc)
 	return intel_huc_is_enabled(&uc->huc);
 }
 
+#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
+static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
+{ \
+	if (uc->ops->_OPS) \
+		return uc->ops->_OPS(uc); \
+	return _RET; \
+}
+intel_uc_ops_function(sanitize, sanitize, int, 0);
+intel_uc_ops_function(fetch_firmwares, init_fw, void, );
+intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
+intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(fini, fini, void, );
+intel_uc_ops_function(init_hw, init_hw, int, 0);
+intel_uc_ops_function(fini_hw, fini_hw, void, );
+#undef intel_uc_ops_function
+
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 66a30ab..8ee0a0c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,7 +11,6 @@
 #include "intel_uc_fw_abi.h"
 #include "i915_drv.h"
 
-#ifdef CONFIG_DRM_I915_DEBUG_GUC
 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
 {
 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
@@ -22,6 +21,7 @@ static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
 }
 
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
 			       enum intel_uc_fw_status status)
 {
@@ -219,10 +219,9 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
 }
 
-static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
-				      struct drm_i915_private *i915,
-				      int e)
+static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
 {
+	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
 	bool user = e == -EINVAL;
 
 	if (i915_inject_probe_error(i915, e)) {
@@ -260,14 +259,14 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
 /**
  * intel_uc_fw_fetch - fetch uC firmware
  * @uc_fw: uC firmware
- * @i915: device private
  *
  * Fetch uC firmware into GEM obj.
  *
  * Return: 0 on success, a negative errno code on failure.
  */
-int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
 {
+	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
 	struct device *dev = i915->drm.dev;
 	struct drm_i915_gem_object *obj;
 	const struct firmware *fw = NULL;
@@ -282,8 +281,8 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
 	if (err)
 		return err;
 
-	__force_fw_fetch_failures(uc_fw, i915, -EINVAL);
-	__force_fw_fetch_failures(uc_fw, i915, -ESTALE);
+	__force_fw_fetch_failures(uc_fw, -EINVAL);
+	__force_fw_fetch_failures(uc_fw, -ESTALE);
 
 	err = request_firmware(&fw, uc_fw->path, dev);
 	if (err)
@@ -390,8 +389,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
 	return err;
 }
 
-static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
+static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
 {
+	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
 	struct drm_mm_node *node = &ggtt->uc_fw;
 
 	GEM_BUG_ON(!drm_mm_node_allocated(node));
@@ -401,13 +401,12 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
 	return lower_32_bits(node->start);
 }
 
-static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
-				  struct intel_gt *gt)
+static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
 {
 	struct drm_i915_gem_object *obj = uc_fw->obj;
-	struct i915_ggtt *ggtt = gt->ggtt;
+	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
 	struct i915_vma dummy = {
-		.node.start = uc_fw_ggtt_offset(uc_fw, ggtt),
+		.node.start = uc_fw_ggtt_offset(uc_fw),
 		.node.size = obj->base.size,
 		.pages = obj->mm.pages,
 		.vm = &ggtt->vm,
@@ -422,19 +421,18 @@ static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
 	ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
 }
 
-static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw,
-				    struct intel_gt *gt)
+static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
 {
 	struct drm_i915_gem_object *obj = uc_fw->obj;
-	struct i915_ggtt *ggtt = gt->ggtt;
-	u64 start = uc_fw_ggtt_offset(uc_fw, ggtt);
+	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+	u64 start = uc_fw_ggtt_offset(uc_fw);
 
 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
 }
 
-static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
-		      u32 wopcm_offset, u32 dma_flags)
+static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
 {
+	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
 	struct intel_uncore *uncore = gt->uncore;
 	u64 offset;
 	int ret;
@@ -446,13 +444,13 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
 	/* Set the source address for the uCode */
-	offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt);
+	offset = uc_fw_ggtt_offset(uc_fw);
 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
 
 	/* Set the DMA destination */
-	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset);
+	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
 
 	/*
@@ -484,17 +482,16 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
 /**
  * intel_uc_fw_upload - load uC firmware using custom loader
  * @uc_fw: uC firmware
- * @gt: the intel_gt structure
- * @wopcm_offset: destination offset in wopcm
+ * @dst_offset: destination offset
  * @dma_flags: flags for flags for dma ctrl
  *
  * Loads uC firmware and updates internal flags.
  *
  * Return: 0 on success, non-zero on failure.
  */
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
-		       u32 wopcm_offset, u32 dma_flags)
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
 {
+	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
 	int err;
 
 	/* make sure the status was cleared the last time we reset the uc */
@@ -508,9 +505,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
 		return -ENOEXEC;
 
 	/* Call custom loader */
-	intel_uc_fw_ggtt_bind(uc_fw, gt);
-	err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags);
-	intel_uc_fw_ggtt_unbind(uc_fw, gt);
+	uc_fw_bind_ggtt(uc_fw);
+	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
+	uc_fw_unbind_ggtt(uc_fw);
 	if (err)
 		goto fail;
 
@@ -547,10 +544,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
 
 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
 {
-	if (!intel_uc_fw_is_available(uc_fw))
-		return;
-
-	i915_gem_object_unpin_pages(uc_fw->obj);
+	intel_uc_fw_cleanup_fetch(uc_fw);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 7a0a598..1f30543d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -229,10 +229,9 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
 			    enum intel_uc_fw_type type, bool supported,
 			    enum intel_platform platform, u8 rev);
-int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915);
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
-		       u32 wopcm_offset, u32 dma_flags);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
 int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
deleted file mode 100644
index d8a8038..0000000
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ /dev/null
@@ -1,299 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#include "i915_selftest.h"
-#include "gem/i915_gem_pm.h"
-
-/* max doorbell number + negative test for each client type */
-#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
-
-static struct intel_guc_client *clients[ATTEMPTS];
-
-static bool available_dbs(struct intel_guc *guc, u32 priority)
-{
-	unsigned long offset;
-	unsigned long end;
-	u16 id;
-
-	/* first half is used for normal priority, second half for high */
-	offset = 0;
-	end = GUC_NUM_DOORBELLS / 2;
-	if (priority <= GUC_CLIENT_PRIORITY_HIGH) {
-		offset = end;
-		end += offset;
-	}
-
-	id = find_next_zero_bit(guc->doorbell_bitmap, end, offset);
-	if (id < end)
-		return true;
-
-	return false;
-}
-
-static int check_all_doorbells(struct intel_guc *guc)
-{
-	u16 db_id;
-
-	pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS);
-	for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) {
-		if (!doorbell_ok(guc, db_id)) {
-			pr_err("doorbell %d, not ok\n", db_id);
-			return -EIO;
-		}
-	}
-
-	return 0;
-}
-
-static int ring_doorbell_nop(struct intel_guc_client *client)
-{
-	struct guc_process_desc *desc = __get_process_desc(client);
-	int err;
-
-	client->use_nop_wqi = true;
-
-	spin_lock_irq(&client->wq_lock);
-
-	guc_wq_item_append(client, 0, 0, 0, 0);
-	guc_ring_doorbell(client);
-
-	spin_unlock_irq(&client->wq_lock);
-
-	client->use_nop_wqi = false;
-
-	/* if there are no issues GuC will update the WQ head and keep the
-	 * WQ in active status
-	 */
-	err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10);
-	if (err) {
-		pr_err("doorbell %u ring failed!\n", client->doorbell_id);
-		return -EIO;
-	}
-
-	if (desc->wq_status != WQ_STATUS_ACTIVE) {
-		pr_err("doorbell %u ring put WQ in bad state (%u)!\n",
-		       client->doorbell_id, desc->wq_status);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-/*
- * Basic client sanity check, handy to validate create_clients.
- */
-static int validate_client(struct intel_guc_client *client, int client_priority)
-{
-	if (client->priority != client_priority ||
-	    client->doorbell_id == GUC_DOORBELL_INVALID)
-		return -EINVAL;
-	else
-		return 0;
-}
-
-static bool client_doorbell_in_sync(struct intel_guc_client *client)
-{
-	return !client || doorbell_ok(client->guc, client->doorbell_id);
-}
-
-/*
- * Check that we're able to synchronize guc_clients with their doorbells
- *
- * We're creating clients and reserving doorbells once, at module load. During
- * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
- * GuC being reset. In other words - GuC clients are still around, but the
- * status of their doorbells may be incorrect. This is the reason behind
- * validating that the doorbells status expected by the driver matches what the
- * GuC/HW have.
- */
-static int igt_guc_clients(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_guc *guc = &gt->uc.guc;
-	intel_wakeref_t wakeref;
-	int err = 0;
-
-	GEM_BUG_ON(!HAS_GT_UC(gt->i915));
-	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
-
-	err = check_all_doorbells(guc);
-	if (err)
-		goto unlock;
-
-	/*
-	 * Get rid of clients created during driver load because the test will
-	 * recreate them.
-	 */
-	guc_clients_disable(guc);
-	guc_clients_destroy(guc);
-	if (guc->execbuf_client) {
-		pr_err("guc_clients_destroy lied!\n");
-		err = -EINVAL;
-		goto unlock;
-	}
-
-	err = guc_clients_create(guc);
-	if (err) {
-		pr_err("Failed to create clients\n");
-		goto unlock;
-	}
-	GEM_BUG_ON(!guc->execbuf_client);
-
-	err = validate_client(guc->execbuf_client,
-			      GUC_CLIENT_PRIORITY_KMD_NORMAL);
-	if (err) {
-		pr_err("execbug client validation failed\n");
-		goto out;
-	}
-
-	/* the client should now have reserved a doorbell */
-	if (!has_doorbell(guc->execbuf_client)) {
-		pr_err("guc_clients_create didn't reserve doorbells\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	/* Now enable the clients */
-	guc_clients_enable(guc);
-
-	/* each client should now have received a doorbell */
-	if (!client_doorbell_in_sync(guc->execbuf_client)) {
-		pr_err("failed to initialize the doorbells\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	/*
-	 * Basic test - an attempt to reallocate a valid doorbell to the
-	 * client it is currently assigned should not cause a failure.
-	 */
-	err = create_doorbell(guc->execbuf_client);
-
-out:
-	/*
-	 * Leave clean state for other test, plus the driver always destroy the
-	 * clients during unload.
-	 */
-	guc_clients_disable(guc);
-	guc_clients_destroy(guc);
-	guc_clients_create(guc);
-	guc_clients_enable(guc);
-unlock:
-	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
-	return err;
-}
-
-/*
- * Create as many clients as number of doorbells. Note that there's already
- * client(s)/doorbell(s) created during driver load, but this test creates
- * its own and do not interact with the existing ones.
- */
-static int igt_guc_doorbells(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_guc *guc = &gt->uc.guc;
-	intel_wakeref_t wakeref;
-	int i, err = 0;
-	u16 db_id;
-
-	GEM_BUG_ON(!HAS_GT_UC(gt->i915));
-	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
-
-	err = check_all_doorbells(guc);
-	if (err)
-		goto unlock;
-
-	for (i = 0; i < ATTEMPTS; i++) {
-		clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM);
-
-		if (!clients[i]) {
-			pr_err("[%d] No guc client\n", i);
-			err = -EINVAL;
-			goto out;
-		}
-
-		if (IS_ERR(clients[i])) {
-			if (PTR_ERR(clients[i]) != -ENOSPC) {
-				pr_err("[%d] unexpected error\n", i);
-				err = PTR_ERR(clients[i]);
-				goto out;
-			}
-
-			if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
-				pr_err("[%d] non-db related alloc fail\n", i);
-				err = -EINVAL;
-				goto out;
-			}
-
-			/* expected, ran out of dbs for this client type */
-			continue;
-		}
-
-		/*
-		 * The check below is only valid because we keep a doorbell
-		 * assigned during the whole life of the client.
-		 */
-		if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
-			pr_err("[%d] more clients than doorbells (%d >= %d)\n",
-			       i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
-			err = -EINVAL;
-			goto out;
-		}
-
-		err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
-		if (err) {
-			pr_err("[%d] client_alloc sanity check failed!\n", i);
-			err = -EINVAL;
-			goto out;
-		}
-
-		db_id = clients[i]->doorbell_id;
-
-		err = __guc_client_enable(clients[i]);
-		if (err) {
-			pr_err("[%d] Failed to create a doorbell\n", i);
-			goto out;
-		}
-
-		/* doorbell id shouldn't change, we are holding the mutex */
-		if (db_id != clients[i]->doorbell_id) {
-			pr_err("[%d] doorbell id changed (%d != %d)\n",
-			       i, db_id, clients[i]->doorbell_id);
-			err = -EINVAL;
-			goto out;
-		}
-
-		err = check_all_doorbells(guc);
-		if (err)
-			goto out;
-
-		err = ring_doorbell_nop(clients[i]);
-		if (err)
-			goto out;
-	}
-
-out:
-	for (i = 0; i < ATTEMPTS; i++)
-		if (!IS_ERR_OR_NULL(clients[i])) {
-			__guc_client_disable(clients[i]);
-			guc_client_free(clients[i]);
-		}
-unlock:
-	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
-	return err;
-}
-
-int intel_guc_live_selftest(struct drm_i915_private *i915)
-{
-	static const struct i915_subtest tests[] = {
-		SUBTEST(igt_guc_clients),
-		SUBTEST(igt_guc_doorbells),
-	};
-
-	if (!USES_GUC_SUBMISSION(i915))
-		return 0;
-
-	return intel_gt_live_subtests(tests, &i915->gt);
-}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index 2867036..ab25d15 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -38,6 +38,10 @@
 
 #define GVT_CMD_HASH_BITS 7
 
+struct intel_gvt;
+struct intel_shadow_wa_ctx;
+struct intel_vgpu_workload;
+
 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
 
 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index a87f33e..b59b340 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -35,6 +35,11 @@
 #ifndef _GVT_DISPLAY_H_
 #define _GVT_DISPLAY_H_
 
+#include <linux/types.h>
+
+struct intel_gvt;
+struct intel_vgpu;
+
 #define SBI_REG_MAX	20
 #define DPCD_SIZE	0x700
 
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index f6dfc8b..dfe0cbc 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -35,6 +35,10 @@
 #ifndef _GVT_EDID_H_
 #define _GVT_EDID_H_
 
+#include <linux/types.h>
+
+struct intel_vgpu;
+
 #define EDID_SIZE		128
 #define EDID_ADDR		0x50 /* Linux hvm EDID addr */
 
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5ccc2c6..5c0c1fd 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -35,6 +35,8 @@
 #ifndef _GVT_EXECLIST_H_
 #define _GVT_EXECLIST_H_
 
+#include <linux/types.h>
+
 struct execlist_ctx_descriptor_format {
 	union {
 		u32 ldw;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 60c1550..67b6ede 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -36,6 +36,8 @@
 #ifndef _GVT_FB_DECODER_H_
 #define _GVT_FB_DECODER_H_
 
+#include <linux/types.h>
+
 #define _PLANE_CTL_FORMAT_SHIFT		24
 #define _PLANE_CTL_TILED_SHIFT		10
 #define _PIPE_V_SRCSZ_SHIFT		0
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4b04af5..34cb404 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1282,7 +1282,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
 		return -EINVAL;
 	default:
 		GEM_BUG_ON(1);
-	};
+	}
 
 	/* direct shadow */
 	ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1043e6d..6d28d72 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2691,7 +2691,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	return 0;
 }
 
-static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+static int init_bdw_mmio_info(struct intel_gvt *gvt)
 {
 	struct drm_i915_private *dev_priv = gvt->dev_priv;
 	int ret;
@@ -3380,20 +3380,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 		goto err;
 
 	if (IS_BROADWELL(dev_priv)) {
-		ret = init_broadwell_mmio_info(gvt);
+		ret = init_bdw_mmio_info(gvt);
 		if (ret)
 			goto err;
 	} else if (IS_SKYLAKE(dev_priv)
 		|| IS_KABYLAKE(dev_priv)
 		|| IS_COFFEELAKE(dev_priv)) {
-		ret = init_broadwell_mmio_info(gvt);
+		ret = init_bdw_mmio_info(gvt);
 		if (ret)
 			goto err;
 		ret = init_skl_mmio_info(gvt);
 		if (ret)
 			goto err;
 	} else if (IS_BROXTON(dev_priv)) {
-		ret = init_broadwell_mmio_info(gvt);
+		ret = init_bdw_mmio_info(gvt);
 		if (ret)
 			goto err;
 		ret = init_skl_mmio_info(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index b19a3b1..b17c4a1 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -33,6 +33,10 @@
 #ifndef _GVT_HYPERCALL_H_
 #define _GVT_HYPERCALL_H_
 
+#include <linux/types.h>
+
+struct device;
+
 enum hypervisor_type {
 	INTEL_GVT_HYPERVISOR_XEN = 0,
 	INTEL_GVT_HYPERVISOR_KVM,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index 5313fb1..fcd6638 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,6 +32,8 @@
 #ifndef _GVT_INTERRUPT_H_
 #define _GVT_INTERRUPT_H_
 
+#include <linux/types.h>
+
 enum intel_gvt_event_type {
 	RCS_MI_USER_INTERRUPT = 0,
 	RCS_DEBUG,
@@ -135,6 +137,7 @@ enum intel_gvt_event_type {
 
 struct intel_gvt_irq;
 struct intel_gvt;
+struct intel_vgpu;
 
 typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
 	enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 5874f1c..2e68f4b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -36,6 +36,8 @@
 #ifndef _GVT_MMIO_H_
 #define _GVT_MMIO_H_
 
+#include <linux/types.h>
+
 struct intel_gvt;
 struct intel_vgpu;
 
diff --git a/drivers/gpu/drm/i915/gvt/page_track.h b/drivers/gpu/drm/i915/gvt/page_track.h
index fa607a7..f6eb713 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.h
+++ b/drivers/gpu/drm/i915/gvt/page_track.h
@@ -25,6 +25,9 @@
 #ifndef _GVT_PAGE_TRACK_H_
 #define _GVT_PAGE_TRACK_H_
 
+#include <linux/types.h>
+
+struct intel_vgpu;
 struct intel_vgpu_page_track;
 
 typedef int (*gvt_page_track_handler_t)(
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index 7b59e3e..3dacdad 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -34,6 +34,9 @@
 #ifndef __GVT_SCHED_POLICY__
 #define __GVT_SCHED_POLICY__
 
+struct intel_gvt;
+struct intel_vgpu;
+
 struct intel_gvt_sched_policy_ops {
 	int (*init)(struct intel_gvt *gvt);
 	void (*clean)(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 5b2a7d0..685d1e0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,12 +35,12 @@
 
 #include <linux/kthread.h>
 
-#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
 #include "gt/intel_ring.h"
 
 #include "i915_drv.h"
+#include "i915_gem_gtt.h"
 #include "gvt.h"
 
 #define RING_CTX_OFF(x) \
@@ -59,7 +59,7 @@ static void set_context_pdp_root_pointer(
 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 {
 	struct drm_i915_gem_object *ctx_obj =
-		workload->req->hw_context->state->obj;
+		workload->req->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 
@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	struct intel_gvt *gvt = vgpu->gvt;
 	int ring_id = workload->ring_id;
 	struct drm_i915_gem_object *ctx_obj =
-		workload->req->hw_context->state->obj;
+		workload->req->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *dst;
@@ -205,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	return 0;
 }
 
-static inline bool is_gvt_request(struct i915_request *req)
+static inline bool is_gvt_request(struct i915_request *rq)
 {
-	return i915_gem_context_force_single_submission(req->gem_context);
+	return intel_context_force_single_submission(rq->context);
 }
 
 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -307,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	u32 *cs;
 	int err;
 
-	if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
+	if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
 		intel_vgpu_restore_inhibit_context(vgpu, req);
 
 	/*
@@ -363,11 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 }
 
 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
-					  struct i915_gem_context *ctx)
+					  struct intel_context *ce)
 {
 	struct intel_vgpu_mm *mm = workload->shadow_mm;
-	struct i915_ppgtt *ppgtt =
-		i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
 	int i = 0;
 
 	if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -380,8 +379,6 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
 			px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
 		}
 	}
-
-	i915_vm_put(&ppgtt->vm);
 }
 
 static int
@@ -529,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 		container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
 	struct i915_request *rq = workload->req;
 	struct execlist_ring_context *shadow_ring_context =
-		(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
+		(struct execlist_ring_context *)rq->context->lrc_reg_state;
 
 	shadow_ring_context->bb_per_ctx_ptr.val =
 		(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -628,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
 
 	update_shadow_pdps(workload);
 
-	set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+	set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
 
 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
 	if (ret) {
@@ -787,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	struct i915_request *rq = workload->req;
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
+	struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *src;
@@ -1223,18 +1220,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	struct i915_ppgtt *ppgtt;
 	enum intel_engine_id i;
 	int ret;
 
-	ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+	ppgtt = i915_ppgtt_create(&i915->gt);
+	if (IS_ERR(ppgtt))
+		return PTR_ERR(ppgtt);
 
-	i915_gem_context_set_force_single_submission(ctx);
-
-	ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
 	i915_context_ppgtt_root_save(s, ppgtt);
 
 	for_each_engine(engine, i915, i) {
@@ -1243,12 +1236,16 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		INIT_LIST_HEAD(&s->workload_q_head[i]);
 		s->shadow[i] = ERR_PTR(-EINVAL);
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			ret = PTR_ERR(ce);
 			goto out_shadow_ctx;
 		}
 
+		i915_vm_put(ce->vm);
+		ce->vm = i915_vm_get(&ppgtt->vm);
+		intel_context_set_single_submission(ce);
+
 		if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
 			const unsigned int ring_size = 512 * SZ_4K;
 
@@ -1281,7 +1278,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
 
 	i915_vm_put(&ppgtt->vm);
-	i915_gem_context_put(ctx);
 	return 0;
 
 out_shadow_ctx:
@@ -1294,7 +1290,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		intel_context_put(s->shadow[i]);
 	}
 	i915_vm_put(&ppgtt->vm);
-	i915_gem_context_put(ctx);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index a19e7d8..f3da5c0 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -6,6 +6,7 @@
 
 #include <linux/debugobjects.h>
 
+#include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
 #include "gt/intel_ring.h"
 
@@ -91,10 +92,9 @@ static void debug_active_init(struct i915_active *ref)
 
 static void debug_active_activate(struct i915_active *ref)
 {
-	spin_lock_irq(&ref->tree_lock);
+	lockdep_assert_held(&ref->tree_lock);
 	if (!atomic_read(&ref->count)) /* before the first inc */
 		debug_object_activate(ref, &active_debug_desc);
-	spin_unlock_irq(&ref->tree_lock);
 }
 
 static void debug_active_deactivate(struct i915_active *ref)
@@ -186,18 +186,33 @@ active_retire(struct i915_active *ref)
 	__active_retire(ref);
 }
 
+static inline struct dma_fence **
+__active_fence_slot(struct i915_active_fence *active)
+{
+	return (struct dma_fence ** __force)&active->fence;
+}
+
+static inline bool
+active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct i915_active_fence *active =
+		container_of(cb, typeof(*active), cb);
+
+	return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
+}
+
 static void
 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
-	i915_active_fence_cb(fence, cb);
-	active_retire(container_of(cb, struct active_node, base.cb)->ref);
+	if (active_fence_cb(fence, cb))
+		active_retire(container_of(cb, struct active_node, base.cb)->ref);
 }
 
 static void
 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
-	i915_active_fence_cb(fence, cb);
-	active_retire(container_of(cb, struct i915_active, excl.cb));
+	if (active_fence_cb(fence, cb))
+		active_retire(container_of(cb, struct i915_active, excl.cb));
 }
 
 static struct i915_active_fence *
@@ -244,7 +259,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
 	}
 
 	node = prealloc;
-	__i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire);
+	__i915_active_fence_init(&node->base, NULL, node_retire);
 	node->ref = ref;
 	node->timeline = idx;
 
@@ -262,7 +277,8 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
 void __i915_active_init(struct i915_active *ref,
 			int (*active)(struct i915_active *ref),
 			void (*retire)(struct i915_active *ref),
-			struct lock_class_key *key)
+			struct lock_class_key *mkey,
+			struct lock_class_key *wkey)
 {
 	unsigned long bits;
 
@@ -280,9 +296,12 @@ void __i915_active_init(struct i915_active *ref,
 
 	init_llist_head(&ref->preallocated_barriers);
 	atomic_set(&ref->count, 0);
-	__mutex_init(&ref->mutex, "i915_active", key);
-	__i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire);
+	__mutex_init(&ref->mutex, "i915_active", mkey);
+	__i915_active_fence_init(&ref->excl, NULL, excl_retire);
 	INIT_WORK(&ref->work, active_work);
+#if IS_ENABLED(CONFIG_LOCKDEP)
+	lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
+#endif
 }
 
 static bool ____active_del_barrier(struct i915_active *ref,
@@ -376,15 +395,8 @@ void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
 	/* We expect the caller to manage the exclusive timeline ordering */
 	GEM_BUG_ON(i915_active_is_idle(ref));
 
-	/*
-	 * As we don't know which mutex the caller is using, we told a small
-	 * lie to the debug code that it is using the i915_active.mutex;
-	 * and now we must stick to that lie.
-	 */
-	mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_);
 	if (!__i915_active_fence_set(&ref->excl, f))
 		atomic_inc(&ref->count);
-	mutex_release(&ref->mutex.dep_map, _THIS_IP_);
 }
 
 bool i915_active_acquire_if_busy(struct i915_active *ref)
@@ -407,8 +419,10 @@ int i915_active_acquire(struct i915_active *ref)
 	if (!atomic_read(&ref->count) && ref->active)
 		err = ref->active(ref);
 	if (!err) {
+		spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
 		debug_active_activate(ref);
 		atomic_inc(&ref->count);
+		spin_unlock_irq(&ref->tree_lock);
 	}
 
 	mutex_unlock(&ref->mutex);
@@ -461,6 +475,7 @@ int i915_active_wait(struct i915_active *ref)
 	if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
 		return -EINTR;
 
+	flush_work(&ref->work);
 	return 0;
 }
 
@@ -590,12 +605,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 					    struct intel_engine_cs *engine)
 {
 	intel_engine_mask_t tmp, mask = engine->mask;
+	struct llist_node *pos = NULL, *next;
 	struct intel_gt *gt = engine->gt;
-	struct llist_node *pos, *next;
 	int err;
 
 	GEM_BUG_ON(i915_active_is_idle(ref));
-	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+
+	/* Wait until the previous preallocation is completed */
+	while (!llist_empty(&ref->preallocated_barriers))
+		cond_resched();
 
 	/*
 	 * Preallocate a node for each physical engine supporting the target
@@ -615,10 +633,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 				goto unwind;
 			}
 
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-			node->base.lock =
-				&engine->kernel_context->timeline->mutex;
-#endif
 			RCU_INIT_POINTER(node->base.fence, NULL);
 			node->base.cb.func = node_retire;
 			node->timeline = idx;
@@ -639,18 +653,27 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 			node->base.cb.node.prev = (void *)engine;
 			atomic_inc(&ref->count);
 		}
+		GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
 
 		GEM_BUG_ON(barrier_to_engine(node) != engine);
-		llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
+		next = barrier_to_ll(node);
+		next->next = pos;
+		if (!pos)
+			pos = next;
 		intel_engine_pm_get(engine);
 	}
 
+	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+	llist_add_batch(next, pos, &ref->preallocated_barriers);
+
 	return 0;
 
 unwind:
-	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+	while (pos) {
 		struct active_node *node = barrier_from_ll(pos);
 
+		pos = pos->next;
+
 		atomic_dec(&ref->count);
 		intel_engine_pm_put(barrier_to_engine(node));
 
@@ -702,12 +725,18 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 	}
 }
 
+static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
+{
+	return __active_fence_slot(&barrier_from_ll(node)->base);
+}
+
 void i915_request_add_active_barriers(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine = rq->engine;
 	struct llist_node *node, *next;
 	unsigned long flags;
 
+	GEM_BUG_ON(!intel_context_is_barrier(rq->context));
 	GEM_BUG_ON(intel_engine_is_virtual(engine));
 	GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
 
@@ -721,19 +750,13 @@ void i915_request_add_active_barriers(struct i915_request *rq)
 	 */
 	spin_lock_irqsave(&rq->lock, flags);
 	llist_for_each_safe(node, next, node) {
-		RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence);
-		smp_wmb(); /* serialise with reuse_idle_barrier */
+		/* serialise with reuse_idle_barrier */
+		smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
 		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
 	}
 	spin_unlock_irqrestore(&rq->lock, flags);
 }
 
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-#define active_is_held(active) lockdep_is_held((active)->lock)
-#else
-#define active_is_held(active) true
-#endif
-
 /*
  * __i915_active_fence_set: Update the last active fence along its timeline
  * @active: the active tracker
@@ -744,7 +767,7 @@ void i915_request_add_active_barriers(struct i915_request *rq)
  * fence onto this one. Returns the previous fence (if not already completed),
  * which the caller must ensure is executed before the new fence. To ensure
  * that the order of fences within the timeline of the i915_active_fence is
- * maintained, it must be locked by the caller.
+ * understood, it should be locked by the caller.
  */
 struct dma_fence *
 __i915_active_fence_set(struct i915_active_fence *active,
@@ -753,34 +776,41 @@ __i915_active_fence_set(struct i915_active_fence *active,
 	struct dma_fence *prev;
 	unsigned long flags;
 
-	/* NB: must be serialised by an outer timeline mutex (active->lock) */
-	spin_lock_irqsave(fence->lock, flags);
+	if (fence == rcu_access_pointer(active->fence))
+		return fence;
+
 	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
 
-	prev = rcu_dereference_protected(active->fence, active_is_held(active));
+	/*
+	 * Consider that we have two threads arriving (A and B), with
+	 * C already resident as the active->fence.
+	 *
+	 * A does the xchg first, and so it sees C or NULL depending
+	 * on the timing of the interrupt handler. If it is NULL, the
+	 * previous fence must have been signaled and we know that
+	 * we are first on the timeline. If it is still present,
+	 * we acquire the lock on that fence and serialise with the interrupt
+	 * handler, in the process removing it from any future interrupt
+	 * callback. A will then wait on C before executing (if present).
+	 *
+	 * As B is second, it sees A as the previous fence and so waits for
+	 * it to complete its transition and takes over the occupancy for
+	 * itself -- remembering that it needs to wait on A before executing.
+	 *
+	 * Note the strong ordering of the timeline also provides consistent
+	 * nesting rules for the fence->lock; the inner lock is always the
+	 * older lock.
+	 */
+	spin_lock_irqsave(fence->lock, flags);
+	prev = xchg(__active_fence_slot(active), fence);
 	if (prev) {
 		GEM_BUG_ON(prev == fence);
 		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
 		__list_del_entry(&active->cb.node);
 		spin_unlock(prev->lock); /* serialise with prev->cb_list */
-
-		/*
-		 * active->fence is reset by the callback from inside
-		 * interrupt context. We need to serialise our list
-		 * manipulation with the fence->lock to prevent the prev
-		 * being lost inside an interrupt (it can't be replaced as
-		 * no other caller is allowed to enter __i915_active_fence_set
-		 * as we hold the timeline lock). After serialising with
-		 * the callback, we need to double check which ran first,
-		 * our list_del() [decoupling prev from the callback] or
-		 * the callback...
-		 */
-		prev = rcu_access_pointer(active->fence);
 	}
-
-	rcu_assign_pointer(active->fence, fence);
+	GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
 	list_add_tail(&active->cb.node, &fence->cb_list);
-
 	spin_unlock_irqrestore(fence->lock, flags);
 
 	return prev;
@@ -792,10 +822,6 @@ int i915_active_fence_set(struct i915_active_fence *active,
 	struct dma_fence *fence;
 	int err = 0;
 
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-	lockdep_assert_held(active->lock);
-#endif
-
 	/* Must maintain timeline ordering wrt previous active requests */
 	rcu_read_lock();
 	fence = __i915_active_fence_set(active, &rq->fence);
@@ -812,7 +838,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
 
 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
-	i915_active_fence_cb(fence, cb);
+	active_fence_cb(fence, cb);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 4485935..b571f67 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -61,19 +61,15 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
  */
 static inline void
 __i915_active_fence_init(struct i915_active_fence *active,
-			 struct mutex *lock,
 			 void *fence,
 			 dma_fence_func_t fn)
 {
 	RCU_INIT_POINTER(active->fence, fence);
 	active->cb.func = fn ?: i915_active_noop;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-	active->lock = lock;
-#endif
 }
 
-#define INIT_ACTIVE_FENCE(A, LOCK) \
-	__i915_active_fence_init((A), (LOCK), NULL, NULL)
+#define INIT_ACTIVE_FENCE(A) \
+	__i915_active_fence_init((A), NULL, NULL)
 
 struct dma_fence *
 __i915_active_fence_set(struct i915_active_fence *active,
@@ -127,15 +123,6 @@ i915_active_fence_isset(const struct i915_active_fence *active)
 	return rcu_access_pointer(active->fence);
 }
 
-static inline void
-i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
-	struct i915_active_fence *active =
-		container_of(cb, typeof(*active), cb);
-
-	RCU_INIT_POINTER(active->fence, NULL);
-}
-
 /*
  * GPU activity tracking
  *
@@ -165,11 +152,15 @@ i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 void __i915_active_init(struct i915_active *ref,
 			int (*active)(struct i915_active *ref),
 			void (*retire)(struct i915_active *ref),
-			struct lock_class_key *key);
+			struct lock_class_key *mkey,
+			struct lock_class_key *wkey);
+
+/* Specialise each class of i915_active to avoid impossible lockdep cycles. */
 #define i915_active_init(ref, active, retire) do {		\
-	static struct lock_class_key __key;				\
+	static struct lock_class_key __mkey;				\
+	static struct lock_class_key __wkey;				\
 									\
-	__i915_active_init(ref, active, retire, &__key);		\
+	__i915_active_init(ref, active, retire, &__mkey, &__wkey);	\
 } while (0)
 
 int i915_active_ref(struct i915_active *ref,
@@ -215,5 +206,6 @@ void i915_active_acquire_barrier(struct i915_active *ref);
 void i915_request_add_active_barriers(struct i915_request *rq);
 
 void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+void i915_active_unlock_wait(struct i915_active *ref);
 
 #endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 96aed0e..6360c3e 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -20,21 +20,6 @@
 struct i915_active_fence {
 	struct dma_fence __rcu *fence;
 	struct dma_fence_cb cb;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-	/*
-	 * Incorporeal!
-	 *
-	 * Updates to the i915_active_request must be serialised under a lock
-	 * to ensure that the timeline is ordered. Normally, this is the
-	 * timeline->mutex, but another mutex may be used so long as it is
-	 * done so consistently.
-	 *
-	 * For lockdep tracking of the above, we store the lock we intend
-	 * to always use for updates of this i915_active_request during
-	 * construction and assert that is held on every update.
-	 */
-	struct mutex *lock;
-#endif
 };
 
 struct active_node;
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index e9d4200..66883af 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -262,8 +262,10 @@ void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
 {
 	struct i915_buddy_block *block, *on;
 
-	list_for_each_entry_safe(block, on, objects, link)
+	list_for_each_entry_safe(block, on, objects, link) {
 		i915_buddy_free(mm, block);
+		cond_resched();
+	}
 	INIT_LIST_HEAD(objects);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f24096e..a0e437a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -235,7 +235,7 @@ static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
 	/*
 	 * MI_BATCH_BUFFER_START requires some special handling. It's not
 	 * really a 'skip' action but it doesn't seem like it's worth adding
-	 * a new action. See i915_parse_cmds().
+	 * a new action. See intel_engine_cmd_parser().
 	 */
 	CMD(  MI_BATCH_BUFFER_START,            SMI,   !F,  0xFF,   S  ),
 };
@@ -731,7 +731,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
 			return 0xFF;
 	}
 
-	DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+	DRM_DEBUG("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
 	return 0;
 }
 
@@ -754,7 +754,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
 			return 0xFF;
 	}
 
-	DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+	DRM_DEBUG("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
 	return 0;
 }
 
@@ -767,7 +767,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
 	else if (client == INSTR_BC_CLIENT)
 		return 0xFF;
 
-	DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+	DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
 	return 0;
 }
 
@@ -778,7 +778,7 @@ static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
 	if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
 		return 0xFF;
 
-	DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+	DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
 	return 0;
 }
 
@@ -1127,79 +1127,71 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
 /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
 static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 		       struct drm_i915_gem_object *src_obj,
-		       u32 batch_start_offset,
-		       u32 batch_len,
-		       bool *needs_clflush_after)
+		       u32 offset, u32 length)
 {
-	unsigned int src_needs_clflush;
-	unsigned int dst_needs_clflush;
+	bool needs_clflush;
 	void *dst, *src;
 	int ret;
 
-	ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
-	if (ret)
-		return ERR_PTR(ret);
-
 	dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
-	i915_gem_object_finish_access(dst_obj);
 	if (IS_ERR(dst))
 		return dst;
 
-	ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+	ret = i915_gem_object_pin_pages(src_obj);
 	if (ret) {
 		i915_gem_object_unpin_map(dst_obj);
 		return ERR_PTR(ret);
 	}
 
+	needs_clflush =
+		!(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
+
 	src = ERR_PTR(-ENODEV);
-	if (src_needs_clflush &&
-	    i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) {
+	if (needs_clflush && i915_has_memcpy_from_wc()) {
 		src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
 		if (!IS_ERR(src)) {
-			i915_memcpy_from_wc(dst,
-					    src + batch_start_offset,
-					    ALIGN(batch_len, 16));
+			i915_unaligned_memcpy_from_wc(dst,
+						      src + offset,
+						      length);
 			i915_gem_object_unpin_map(src_obj);
 		}
 	}
 	if (IS_ERR(src)) {
 		void *ptr;
-		int offset, n;
+		int x, n;
 
-		offset = offset_in_page(batch_start_offset);
-
-		/* We can avoid clflushing partial cachelines before the write
+		/*
+		 * We can avoid clflushing partial cachelines before the write
 		 * if we only every write full cache-lines. Since we know that
 		 * both the source and destination are in multiples of
 		 * PAGE_SIZE, we can simply round up to the next cacheline.
 		 * We don't care about copying too much here as we only
 		 * validate up to the end of the batch.
 		 */
-		if (dst_needs_clflush & CLFLUSH_BEFORE)
-			batch_len = roundup(batch_len,
-					    boot_cpu_data.x86_clflush_size);
+		if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+			length = round_up(length,
+					  boot_cpu_data.x86_clflush_size);
 
 		ptr = dst;
-		for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
-			int len = min_t(int, batch_len, PAGE_SIZE - offset);
+		x = offset_in_page(offset);
+		for (n = offset >> PAGE_SHIFT; length; n++) {
+			int len = min_t(int, length, PAGE_SIZE - x);
 
 			src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
-			if (src_needs_clflush)
-				drm_clflush_virt_range(src + offset, len);
-			memcpy(ptr, src + offset, len);
+			if (needs_clflush)
+				drm_clflush_virt_range(src + x, len);
+			memcpy(ptr, src + x, len);
 			kunmap_atomic(src);
 
 			ptr += len;
-			batch_len -= len;
-			offset = 0;
+			length -= len;
+			x = 0;
 		}
 	}
 
-	i915_gem_object_finish_access(src_obj);
+	i915_gem_object_unpin_pages(src_obj);
 
 	/* dst_obj is returned with vmap pinned */
-	*needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
-
 	return dst;
 }
 
@@ -1211,7 +1203,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
 		return true;
 
 	if (desc->flags & CMD_DESC_REJECT) {
-		DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+		DRM_DEBUG("CMD: Rejected command: 0x%08X\n", *cmd);
 		return false;
 	}
 
@@ -1231,8 +1223,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
 				find_reg(engine, reg_addr);
 
 			if (!reg) {
-				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
-						 reg_addr, *cmd, engine->name);
+				DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
+					  reg_addr, *cmd, engine->name);
 				return false;
 			}
 
@@ -1242,22 +1234,22 @@ static bool check_cmd(const struct intel_engine_cs *engine,
 			 */
 			if (reg->mask) {
 				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
-					DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
-							 reg_addr);
+					DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
+						  reg_addr);
 					return false;
 				}
 
 				if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
-					DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
-							 reg_addr);
+					DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
+						  reg_addr);
 					return false;
 				}
 
 				if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
 				    (offset + 2 > length ||
 				     (cmd[offset + 1] & reg->mask) != reg->value)) {
-					DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
-							 reg_addr);
+					DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
+						  reg_addr);
 					return false;
 				}
 			}
@@ -1284,8 +1276,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
 			}
 
 			if (desc->bits[i].offset >= length) {
-				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
-						 *cmd, engine->name);
+				DRM_DEBUG("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
+					  *cmd, engine->name);
 				return false;
 			}
 
@@ -1293,11 +1285,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
 				desc->bits[i].mask;
 
 			if (dword != desc->bits[i].expected) {
-				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
-						 *cmd,
-						 desc->bits[i].mask,
-						 desc->bits[i].expected,
-						 dword, engine->name);
+				DRM_DEBUG("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
+					  *cmd,
+					  desc->bits[i].mask,
+					  desc->bits[i].expected,
+					  dword, engine->name);
 				return false;
 			}
 		}
@@ -1306,17 +1298,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
 	return true;
 }
 
-static int check_bbstart(const struct i915_gem_context *ctx,
-			 u32 *cmd, u32 offset, u32 length,
-			 u32 batch_len,
-			 u64 batch_start,
-			 u64 shadow_batch_start)
+static int check_bbstart(u32 *cmd, u32 offset, u32 length,
+			 u32 batch_length,
+			 u64 batch_addr,
+			 u64 shadow_addr,
+			 const unsigned long *jump_whitelist)
 {
 	u64 jump_offset, jump_target;
 	u32 target_cmd_offset, target_cmd_index;
 
 	/* For igt compatibility on older platforms */
-	if (CMDPARSER_USES_GGTT(ctx->i915)) {
+	if (!jump_whitelist) {
 		DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
 		return -EACCES;
 	}
@@ -1327,14 +1319,14 @@ static int check_bbstart(const struct i915_gem_context *ctx,
 		return -EINVAL;
 	}
 
-	jump_target = *(u64*)(cmd+1);
-	jump_offset = jump_target - batch_start;
+	jump_target = *(u64 *)(cmd + 1);
+	jump_offset = jump_target - batch_addr;
 
 	/*
 	 * Any underflow of jump_target is guaranteed to be outside the range
 	 * of a u32, so >= test catches both too large and too small
 	 */
-	if (jump_offset >= batch_len) {
+	if (jump_offset >= batch_length) {
 		DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
 			  jump_target);
 		return -EINVAL;
@@ -1342,20 +1334,20 @@ static int check_bbstart(const struct i915_gem_context *ctx,
 
 	/*
 	 * This cannot overflow a u32 because we already checked jump_offset
-	 * is within the BB, and the batch_len is a u32
+	 * is within the BB, and the batch_length is a u32
 	 */
 	target_cmd_offset = lower_32_bits(jump_offset);
 	target_cmd_index = target_cmd_offset / sizeof(u32);
 
-	*(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+	*(u64 *)(cmd + 1) = shadow_addr + target_cmd_offset;
 
 	if (target_cmd_index == offset)
 		return 0;
 
-	if (ctx->jump_whitelist_cmds <= target_cmd_index) {
-		DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
-		return -EINVAL;
-	} else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+	if (IS_ERR(jump_whitelist))
+		return PTR_ERR(jump_whitelist);
+
+	if (!test_bit(target_cmd_index, jump_whitelist)) {
 		DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
 			  jump_target);
 		return -EINVAL;
@@ -1364,54 +1356,40 @@ static int check_bbstart(const struct i915_gem_context *ctx,
 	return 0;
 }
 
-static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+static unsigned long *alloc_whitelist(u32 batch_length)
 {
-	const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
-	const u32 exact_size = BITS_TO_LONGS(batch_cmds);
-	u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
-	unsigned long *next_whitelist;
+	unsigned long *jmp;
 
-	if (CMDPARSER_USES_GGTT(ctx->i915))
-		return;
+	/*
+	 * We expect batch_length to be less than 256KiB for known users,
+	 * i.e. we need at most an 8KiB bitmap allocation which should be
+	 * reasonably cheap due to kmalloc caches.
+	 */
 
-	if (batch_cmds <= ctx->jump_whitelist_cmds) {
-		bitmap_zero(ctx->jump_whitelist, batch_cmds);
-		return;
-	}
+	/* Prefer to report transient allocation failure rather than hit oom */
+	jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
+			    GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+	if (!jmp)
+		return ERR_PTR(-ENOMEM);
 
-again:
-	next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
-	if (next_whitelist) {
-		kfree(ctx->jump_whitelist);
-		ctx->jump_whitelist = next_whitelist;
-		ctx->jump_whitelist_cmds =
-			next_size * BITS_PER_BYTE * sizeof(long);
-		return;
-	}
-
-	if (next_size > exact_size) {
-		next_size = exact_size;
-		goto again;
-	}
-
-	DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
-	bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
-
-	return;
+	return jmp;
 }
 
 #define LENGTH_BIAS 2
 
+static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
+{
+	return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+}
+
 /**
- * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
- * @ctx: the context in which the batch is to execute
+ * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
  * @engine: the engine on which the batch is to execute
- * @batch_obj: the batch buffer in question
- * @batch_start: Canonical base address of batch
- * @batch_start_offset: byte offset in the batch at which execution starts
- * @batch_len: length of the commands in batch_obj
- * @shadow_batch_obj: copy of the batch buffer in question
- * @shadow_batch_start: Canonical base address of shadow_batch_obj
+ * @batch: the batch buffer in question
+ * @batch_offset: byte offset in the batch at which execution starts
+ * @batch_length: length of the commands in batch_obj
+ * @shadow: validated copy of the batch buffer in question
+ * @trampoline: whether to emit a conditional trampoline at the end of the batch
  *
  * Parses the specified batch buffer looking for privilege violations as
  * described in the overview.
@@ -1419,38 +1397,46 @@ static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  * if the batch appears legal but should use hardware parsing
  */
-
-int intel_engine_cmd_parser(struct i915_gem_context *ctx,
-			    struct intel_engine_cs *engine,
-			    struct drm_i915_gem_object *batch_obj,
-			    u64 batch_start,
-			    u32 batch_start_offset,
-			    u32 batch_len,
-			    struct drm_i915_gem_object *shadow_batch_obj,
-			    u64 shadow_batch_start)
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+			    struct i915_vma *batch,
+			    u32 batch_offset,
+			    u32 batch_length,
+			    struct i915_vma *shadow,
+			    bool trampoline)
 {
 	u32 *cmd, *batch_end, offset = 0;
 	struct drm_i915_cmd_descriptor default_desc = noop_desc;
 	const struct drm_i915_cmd_descriptor *desc = &default_desc;
-	bool needs_clflush_after = false;
+	unsigned long *jump_whitelist;
+	u64 batch_addr, shadow_addr;
 	int ret = 0;
 
-	cmd = copy_batch(shadow_batch_obj, batch_obj,
-			 batch_start_offset, batch_len,
-			 &needs_clflush_after);
+	GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
+	GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
+	GEM_BUG_ON(range_overflows_t(u64, batch_offset, batch_length,
+				     batch->size));
+	GEM_BUG_ON(!batch_length);
+
+	cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length);
 	if (IS_ERR(cmd)) {
-		DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+		DRM_DEBUG("CMD: Failed to copy batch\n");
 		return PTR_ERR(cmd);
 	}
 
-	init_whitelist(ctx, batch_len);
+	jump_whitelist = NULL;
+	if (!trampoline)
+		/* Defer failure until attempted use */
+		jump_whitelist = alloc_whitelist(batch_length);
+
+	shadow_addr = gen8_canonical_addr(shadow->node.start);
+	batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
 
 	/*
 	 * We use the batch length as size because the shadow object is as
 	 * large or larger and copy_batch() will write MI_NOPs to the extra
 	 * space. Parsing should be faster in some cases this way.
 	 */
-	batch_end = cmd + (batch_len / sizeof(*batch_end));
+	batch_end = cmd + batch_length / sizeof(*batch_end);
 	do {
 		u32 length;
 
@@ -1459,61 +1445,99 @@ int intel_engine_cmd_parser(struct i915_gem_context *ctx,
 
 		desc = find_cmd(engine, *cmd, desc, &default_desc);
 		if (!desc) {
-			DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
-					 *cmd);
+			DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
 			ret = -EINVAL;
-			goto err;
+			break;
 		}
 
 		if (desc->flags & CMD_DESC_FIXED)
 			length = desc->length.fixed;
 		else
-			length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+			length = (*cmd & desc->length.mask) + LENGTH_BIAS;
 
 		if ((batch_end - cmd) < length) {
-			DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
-					 *cmd,
-					 length,
-					 batch_end - cmd);
+			DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
+				  *cmd,
+				  length,
+				  batch_end - cmd);
 			ret = -EINVAL;
-			goto err;
+			break;
 		}
 
 		if (!check_cmd(engine, desc, cmd, length)) {
 			ret = -EACCES;
-			goto err;
-		}
-
-		if (desc->cmd.value == MI_BATCH_BUFFER_START) {
-			ret = check_bbstart(ctx, cmd, offset, length,
-					    batch_len, batch_start,
-					    shadow_batch_start);
-
-			if (ret)
-				goto err;
 			break;
 		}
 
-		if (ctx->jump_whitelist_cmds > offset)
-			set_bit(offset, ctx->jump_whitelist);
+		if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+			ret = check_bbstart(cmd, offset, length, batch_length,
+					    batch_addr, shadow_addr,
+					    jump_whitelist);
+			break;
+		}
+
+		if (!IS_ERR_OR_NULL(jump_whitelist))
+			__set_bit(offset, jump_whitelist);
 
 		cmd += length;
 		offset += length;
 		if  (cmd >= batch_end) {
-			DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+			DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
 			ret = -EINVAL;
-			goto err;
+			break;
 		}
 	} while (1);
 
-	if (needs_clflush_after) {
-		void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+	if (trampoline) {
+		/*
+		 * With the trampoline, the shadow is executed twice.
+		 *
+		 *   1 - starting at offset 0, in privileged mode
+		 *   2 - starting at offset batch_len, as non-privileged
+		 *
+		 * Only if the batch is valid and safe to execute, do we
+		 * allow the first privileged execution to proceed. If not,
+		 * we terminate the first batch and use the second batchbuffer
+		 * entry to chain to the original unsafe non-privileged batch,
+		 * leaving it to the HW to validate.
+		 */
+		*batch_end = MI_BATCH_BUFFER_END;
+
+		if (ret) {
+			/* Batch unsafe to execute with privileges, cancel! */
+			cmd = page_mask_bits(shadow->obj->mm.mapping);
+			*cmd = MI_BATCH_BUFFER_END;
+
+			/* If batch is unsafe but valid, jump to the original */
+			if (ret == -EACCES) {
+				unsigned int flags;
+
+				flags = MI_BATCH_NON_SECURE_I965;
+				if (IS_HASWELL(engine->i915))
+					flags = MI_BATCH_NON_SECURE_HSW;
+
+				GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7));
+				__gen6_emit_bb_start(batch_end,
+						     batch_addr,
+						     flags);
+
+				ret = 0; /* allow execution */
+			}
+		}
+
+		if (shadow_needs_clflush(shadow->obj))
+			drm_clflush_virt_range(batch_end, 8);
+	}
+
+	if (shadow_needs_clflush(shadow->obj)) {
+		void *ptr = page_mask_bits(shadow->obj->mm.mapping);
 
 		drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
 	}
 
-err:
-	i915_gem_object_unpin_map(shadow_batch_obj);
+	if (!IS_ERR_OR_NULL(jump_whitelist))
+		kfree(jump_whitelist);
+	i915_gem_object_unpin_map(shadow->obj);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8016484..d5a9b8a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -61,24 +61,14 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
 
 static int i915_capabilities(struct seq_file *m, void *data)
 {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const struct intel_device_info *info = INTEL_INFO(dev_priv);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
 	struct drm_printer p = drm_seq_file_printer(m);
-	const char *msg;
 
-	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
-	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
-	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
 
-	msg = "n/a";
-#ifdef CONFIG_INTEL_IOMMU
-	msg = enableddisabled(intel_iommu_gfx_mapped);
-#endif
-	seq_printf(m, "iommu: %s\n", msg);
-
-	intel_device_info_dump_flags(info, &p);
-	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
-	intel_driver_caps_print(&dev_priv->caps, &p);
+	intel_device_info_print_static(INTEL_INFO(i915), &p);
+	intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+	intel_driver_caps_print(&i915->caps, &p);
 
 	kernel_param_lock(THIS_MODULE);
 	i915_params_dump(&i915_modparams, &p);
@@ -331,16 +321,15 @@ static void print_context_stats(struct seq_file *m,
 
 		for_each_gem_engine(ce,
 				    i915_gem_context_lock_engines(ctx), it) {
-			intel_context_lock_pinned(ce);
-			if (intel_context_is_pinned(ce)) {
+			if (intel_context_pin_if_active(ce)) {
 				rcu_read_lock();
 				if (ce->state)
 					per_file_stats(0,
 						       ce->state->obj, &kstats);
 				per_file_stats(0, ce->ring->vma->obj, &kstats);
 				rcu_read_unlock();
+				intel_context_unpin(ce);
 			}
-			intel_context_unlock_pinned(ce);
 		}
 		i915_gem_context_unlock_engines(ctx);
 
@@ -377,12 +366,16 @@ static void print_context_stats(struct seq_file *m,
 static int i915_gem_object_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct intel_memory_region *mr;
+	enum intel_region_id id;
 
 	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
 		   i915->mm.shrink_count,
 		   atomic_read(&i915->mm.free_count),
 		   i915->mm.shrink_memory);
-
+	for_each_memory_region(mr, i915, id)
+		seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
+			   mr->name, &mr->total, &mr->avail);
 	seq_putc(m, '\n');
 
 	print_context_stats(m, i915);
@@ -692,7 +685,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 			      size_t count, loff_t *pos)
 {
-	struct i915_gpu_state *error;
+	struct i915_gpu_coredump *error;
 	ssize_t ret;
 	void *buf;
 
@@ -705,7 +698,7 @@ static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 	if (!buf)
 		return -ENOMEM;
 
-	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+	ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
 	if (ret <= 0)
 		goto out;
 
@@ -721,19 +714,19 @@ static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 
 static int gpu_state_release(struct inode *inode, struct file *file)
 {
-	i915_gpu_state_put(file->private_data);
+	i915_gpu_coredump_put(file->private_data);
 	return 0;
 }
 
 static int i915_gpu_info_open(struct inode *inode, struct file *file)
 {
 	struct drm_i915_private *i915 = inode->i_private;
-	struct i915_gpu_state *gpu;
+	struct i915_gpu_coredump *gpu;
 	intel_wakeref_t wakeref;
 
 	gpu = NULL;
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-		gpu = i915_capture_gpu_state(i915);
+		gpu = i915_gpu_coredump(i915);
 	if (IS_ERR(gpu))
 		return PTR_ERR(gpu);
 
@@ -755,7 +748,7 @@ i915_error_state_write(struct file *filp,
 		       size_t cnt,
 		       loff_t *ppos)
 {
-	struct i915_gpu_state *error = filp->private_data;
+	struct i915_gpu_coredump *error = filp->private_data;
 
 	if (!error)
 		return 0;
@@ -768,7 +761,7 @@ i915_error_state_write(struct file *filp,
 
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
-	struct i915_gpu_state *error;
+	struct i915_gpu_coredump *error;
 
 	error = i915_first_error_state(inode->i_private);
 	if (IS_ERR(error))
@@ -891,7 +884,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
-		cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
+		cagf = intel_rps_read_actual_frequency(rps);
 
 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
@@ -1011,7 +1004,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 	return ret;
 }
 
-static int ironlake_drpc_info(struct seq_file *m)
+static int ilk_drpc_info(struct seq_file *m)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
 	struct intel_uncore *uncore = &i915->uncore;
@@ -1219,7 +1212,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
 		else if (INTEL_GEN(dev_priv) >= 6)
 			err = gen6_drpc_info(m);
 		else
-			err = ironlake_drpc_info(m);
+			err = ilk_drpc_info(m);
 	}
 
 	return err;
@@ -1519,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
 		for_each_gem_engine(ce,
 				    i915_gem_context_lock_engines(ctx), it) {
-			intel_context_lock_pinned(ce);
-			if (intel_context_is_pinned(ce)) {
+			if (intel_context_pin_if_active(ce)) {
 				seq_printf(m, "%s: ", ce->engine->name);
 				if (ce->state)
 					describe_obj(m, ce->state->obj);
 				describe_ctx_ring(m, ce->ring);
 				seq_putc(m, '\n');
+				intel_context_unpin(ce);
 			}
-			intel_context_unlock_pinned(ce);
 		}
 		i915_gem_context_unlock_engines(ctx);
 
@@ -1633,21 +1625,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_rps *rps = &dev_priv->gt.rps;
-	u32 act_freq = rps->cur_freq;
-	intel_wakeref_t wakeref;
-
-	with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-			vlv_punit_get(dev_priv);
-			act_freq = vlv_punit_read(dev_priv,
-						  PUNIT_REG_GPU_FREQ_STS);
-			vlv_punit_put(dev_priv);
-			act_freq = (act_freq >> 8) & 0xff;
-		} else {
-			act_freq = intel_get_cagf(rps,
-						  I915_READ(GEN6_RPSTAT1));
-		}
-	}
 
 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
 	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
@@ -1656,7 +1633,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
 	seq_printf(m, "Frequency requested %d, actual %d\n",
 		   intel_gpu_freq(rps, rps->cur_freq),
-		   intel_gpu_freq(rps, act_freq));
+		   intel_rps_read_actual_frequency(rps));
 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
 		   intel_gpu_freq(rps, rps->min_freq),
 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
@@ -1802,30 +1779,12 @@ static void i915_guc_log_info(struct seq_file *m,
 static int i915_guc_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
-	struct intel_guc_client *client = guc->execbuf_client;
 
 	if (!USES_GUC(dev_priv))
 		return -ENODEV;
 
 	i915_guc_log_info(m, dev_priv);
 
-	if (!USES_GUC_SUBMISSION(dev_priv))
-		return 0;
-
-	GEM_BUG_ON(!guc->execbuf_client);
-
-	seq_printf(m, "\nDoorbell map:\n");
-	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
-	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
-
-	seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
-	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
-		   client->priority,
-		   client->stage_id,
-		   client->proc_desc_offset);
-	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
-		   client->doorbell_id, client->doorbell_offset);
 	/* Add more as required ... */
 
 	return 0;
@@ -2020,7 +1979,7 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
 	struct drm_connector *connector = m->private;
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	struct intel_dp *intel_dp =
-		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
 	int ret;
 
 	if (!CAN_PSR(dev_priv)) {
@@ -2367,7 +2326,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
 }
 
 static void intel_seq_print_mode(struct seq_file *m, int tabs,
-				 struct drm_display_mode *mode)
+				 const struct drm_display_mode *mode)
 {
 	int i;
 
@@ -2378,59 +2337,35 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs,
 }
 
 static void intel_encoder_info(struct seq_file *m,
-			       struct intel_crtc *intel_crtc,
-			       struct intel_encoder *intel_encoder)
+			       struct intel_crtc *crtc,
+			       struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_crtc *crtc = &intel_crtc->base;
-	struct intel_connector *intel_connector;
-	struct drm_encoder *encoder;
+	struct drm_connector_list_iter conn_iter;
+	struct drm_connector *connector;
 
-	encoder = &intel_encoder->base;
-	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
-		   encoder->base.id, encoder->name);
-	for_each_connector_on_encoder(dev, encoder, intel_connector) {
-		struct drm_connector *connector = &intel_connector->base;
-		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
-			   connector->base.id,
-			   connector->name,
-			   drm_get_connector_status_name(connector->status));
-		if (connector->status == connector_status_connected) {
-			struct drm_display_mode *mode = &crtc->mode;
-			seq_printf(m, ", mode:\n");
-			intel_seq_print_mode(m, 2, mode);
-		} else {
-			seq_putc(m, '\n');
-		}
+	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+		   encoder->base.base.id, encoder->base.name);
+
+	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		const struct drm_connector_state *conn_state =
+			connector->state;
+
+		if (conn_state->best_encoder != &encoder->base)
+			continue;
+
+		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+			   connector->base.id, connector->name);
 	}
-}
-
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_crtc *crtc = &intel_crtc->base;
-	struct intel_encoder *intel_encoder;
-	struct drm_plane_state *plane_state = crtc->primary->state;
-	struct drm_framebuffer *fb = plane_state->fb;
-
-	if (fb)
-		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
-			   fb->base.id, plane_state->src_x >> 16,
-			   plane_state->src_y >> 16, fb->width, fb->height);
-	else
-		seq_puts(m, "\tprimary plane disabled\n");
-	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
-		intel_encoder_info(m, intel_crtc, intel_encoder);
+	drm_connector_list_iter_end(&conn_iter);
 }
 
 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
 {
-	struct drm_display_mode *mode = panel->fixed_mode;
+	const struct drm_display_mode *mode = panel->fixed_mode;
 
-	seq_printf(m, "\tfixed mode:\n");
-	intel_seq_print_mode(m, 2, mode);
+	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 }
 
 static void intel_hdcp_info(struct seq_file *m,
@@ -2456,7 +2391,7 @@ static void intel_dp_info(struct seq_file *m,
 			  struct intel_connector *intel_connector)
 {
 	struct intel_encoder *intel_encoder = intel_connector->encoder;
-	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
 
 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
@@ -2476,7 +2411,7 @@ static void intel_dp_mst_info(struct seq_file *m,
 {
 	struct intel_encoder *intel_encoder = intel_connector->encoder;
 	struct intel_dp_mst_encoder *intel_mst =
-		enc_to_mst(&intel_encoder->base);
+		enc_to_mst(intel_encoder);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
@@ -2489,7 +2424,7 @@ static void intel_hdmi_info(struct seq_file *m,
 			    struct intel_connector *intel_connector)
 {
 	struct intel_encoder *intel_encoder = intel_connector->encoder;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
 
 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
 	if (intel_connector->hdcp.shim) {
@@ -2508,10 +2443,12 @@ static void intel_connector_info(struct seq_file *m,
 				 struct drm_connector *connector)
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_encoder *intel_encoder = intel_connector->encoder;
-	struct drm_display_mode *mode;
+	const struct drm_connector_state *conn_state = connector->state;
+	struct intel_encoder *encoder =
+		to_intel_encoder(conn_state->best_encoder);
+	const struct drm_display_mode *mode;
 
-	seq_printf(m, "connector %d: type %s, status: %s\n",
+	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
 		   connector->base.id, connector->name,
 		   drm_get_connector_status_name(connector->status));
 
@@ -2525,24 +2462,24 @@ static void intel_connector_info(struct seq_file *m,
 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
 
-	if (!intel_encoder)
+	if (!encoder)
 		return;
 
 	switch (connector->connector_type) {
 	case DRM_MODE_CONNECTOR_DisplayPort:
 	case DRM_MODE_CONNECTOR_eDP:
-		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
+		if (encoder->type == INTEL_OUTPUT_DP_MST)
 			intel_dp_mst_info(m, intel_connector);
 		else
 			intel_dp_info(m, intel_connector);
 		break;
 	case DRM_MODE_CONNECTOR_LVDS:
-		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+		if (encoder->type == INTEL_OUTPUT_LVDS)
 			intel_lvds_info(m, intel_connector);
 		break;
 	case DRM_MODE_CONNECTOR_HDMIA:
-		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
-		    intel_encoder->type == INTEL_OUTPUT_DDI)
+		if (encoder->type == INTEL_OUTPUT_HDMI ||
+		    encoder->type == INTEL_OUTPUT_DDI)
 			intel_hdmi_info(m, intel_connector);
 		break;
 	default:
@@ -2589,70 +2526,88 @@ static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
 		 rotation);
 }
 
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
+{
+	const struct intel_plane_state *plane_state =
+		to_intel_plane_state(plane->base.state);
+	const struct drm_framebuffer *fb = plane_state->uapi.fb;
+	struct drm_format_name_buf format_name;
+	struct drm_rect src, dst;
+	char rot_str[48];
+
+	src = drm_plane_state_src(&plane_state->uapi);
+	dst = drm_plane_state_dest(&plane_state->uapi);
+
+	if (fb)
+		drm_get_format_name(fb->format->format, &format_name);
+
+	plane_rotation(rot_str, sizeof(rot_str),
+		       plane_state->uapi.rotation);
+
+	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+		   fb ? fb->width : 0, fb ? fb->height : 0,
+		   DRM_RECT_FP_ARG(&src),
+		   DRM_RECT_ARG(&dst),
+		   rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+	const struct intel_plane_state *plane_state =
+		to_intel_plane_state(plane->base.state);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct drm_format_name_buf format_name;
+	char rot_str[48];
+
+	if (!fb)
+		return;
+
+	drm_get_format_name(fb->format->format, &format_name);
+
+	plane_rotation(rot_str, sizeof(rot_str),
+		       plane_state->hw.rotation);
+
+	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+		   fb->base.id, format_name.str,
+		   fb->width, fb->height,
+		   yesno(plane_state->uapi.visible),
+		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
+		   DRM_RECT_ARG(&plane_state->uapi.dst),
+		   rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_plane *intel_plane;
+	struct intel_plane *plane;
 
-	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
-		struct drm_plane_state *state;
-		struct drm_plane *plane = &intel_plane->base;
-		struct drm_format_name_buf format_name;
-		char rot_str[48];
-
-		if (!plane->state) {
-			seq_puts(m, "plane->state is NULL!\n");
-			continue;
-		}
-
-		state = plane->state;
-
-		if (state->fb) {
-			drm_get_format_name(state->fb->format->format,
-					    &format_name);
-		} else {
-			sprintf(format_name.str, "N/A");
-		}
-
-		plane_rotation(rot_str, sizeof(rot_str), state->rotation);
-
-		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
-			   plane->base.id,
-			   plane_type(intel_plane->base.type),
-			   state->crtc_x, state->crtc_y,
-			   state->crtc_w, state->crtc_h,
-			   (state->src_x >> 16),
-			   ((state->src_x & 0xffff) * 15625) >> 10,
-			   (state->src_y >> 16),
-			   ((state->src_y & 0xffff) * 15625) >> 10,
-			   (state->src_w >> 16),
-			   ((state->src_w & 0xffff) * 15625) >> 10,
-			   (state->src_h >> 16),
-			   ((state->src_h & 0xffff) * 15625) >> 10,
-			   format_name.str,
-			   rot_str);
+	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+			   plane->base.base.id, plane->base.name,
+			   plane_type(plane->base.type));
+		intel_plane_uapi_info(m, plane);
+		intel_plane_hw_info(m, plane);
 	}
 }
 
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
 {
-	struct intel_crtc_state *pipe_config;
-	int num_scalers = intel_crtc->num_scalers;
+	const struct intel_crtc_state *crtc_state =
+		to_intel_crtc_state(crtc->base.state);
+	int num_scalers = crtc->num_scalers;
 	int i;
 
-	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
-
 	/* Not all platformas have a scaler */
 	if (num_scalers) {
 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
 			   num_scalers,
-			   pipe_config->scaler_state.scaler_users,
-			   pipe_config->scaler_state.scaler_id);
+			   crtc_state->scaler_state.scaler_users,
+			   crtc_state->scaler_state.scaler_id);
 
 		for (i = 0; i < num_scalers; i++) {
-			struct intel_scaler *sc =
-					&pipe_config->scaler_state.scalers[i];
+			const struct intel_scaler *sc =
+				&crtc_state->scaler_state.scalers[i];
 
 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
 				   i, yesno(sc->in_use), sc->mode);
@@ -2663,6 +2618,44 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 	}
 }
 
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	const struct intel_crtc_state *crtc_state =
+		to_intel_crtc_state(crtc->base.state);
+	struct intel_encoder *encoder;
+
+	seq_printf(m, "[CRTC:%d:%s]:\n",
+		   crtc->base.base.id, crtc->base.name);
+
+	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+		   yesno(crtc_state->uapi.enable),
+		   yesno(crtc_state->uapi.active),
+		   DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+	if (crtc_state->hw.enable) {
+		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+			   yesno(crtc_state->hw.active),
+			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+		intel_scaler_info(m, crtc);
+	}
+
+	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+				    crtc_state->uapi.encoder_mask)
+		intel_encoder_info(m, crtc, encoder);
+
+	intel_plane_info(m, crtc);
+
+	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+		   yesno(!crtc->cpu_fifo_underrun_disabled),
+		   yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
 static int i915_display_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2674,52 +2667,22 @@ static int i915_display_info(struct seq_file *m, void *unused)
 
 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 
+	drm_modeset_lock_all(dev);
+
 	seq_printf(m, "CRTC info\n");
 	seq_printf(m, "---------\n");
-	for_each_intel_crtc(dev, crtc) {
-		struct intel_crtc_state *pipe_config;
-
-		drm_modeset_lock(&crtc->base.mutex, NULL);
-		pipe_config = to_intel_crtc_state(crtc->base.state);
-
-		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
-			   crtc->base.base.id, pipe_name(crtc->pipe),
-			   yesno(pipe_config->base.active),
-			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
-			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
-
-		if (pipe_config->base.active) {
-			struct intel_plane *cursor =
-				to_intel_plane(crtc->base.cursor);
-
-			intel_crtc_info(m, crtc);
-
-			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
-				   yesno(cursor->base.state->visible),
-				   cursor->base.state->crtc_x,
-				   cursor->base.state->crtc_y,
-				   cursor->base.state->crtc_w,
-				   cursor->base.state->crtc_h,
-				   cursor->cursor.base);
-			intel_scaler_info(m, crtc);
-			intel_plane_info(m, crtc);
-		}
-
-		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
-			   yesno(!crtc->cpu_fifo_underrun_disabled),
-			   yesno(!crtc->pch_fifo_underrun_disabled));
-		drm_modeset_unlock(&crtc->base.mutex);
-	}
+	for_each_intel_crtc(dev, crtc)
+		intel_crtc_info(m, crtc);
 
 	seq_printf(m, "\n");
 	seq_printf(m, "Connector info\n");
 	seq_printf(m, "--------------\n");
-	mutex_lock(&dev->mode_config.mutex);
 	drm_connector_list_iter_begin(dev, &conn_iter);
 	drm_for_each_connector_iter(connector, &conn_iter)
 		intel_connector_info(m, connector);
 	drm_connector_list_iter_end(&conn_iter);
-	mutex_unlock(&dev->mode_config.mutex);
+
+	drm_modeset_unlock_all(dev);
 
 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
@@ -2755,7 +2718,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
+	intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
 
 	return 0;
 }
@@ -3051,11 +3014,11 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
 
-		intel_encoder = intel_attached_encoder(connector);
+		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
 			continue;
 
-		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+		intel_dig_port = enc_to_dig_port(intel_encoder);
 		if (!intel_dig_port->dp.can_mst)
 			continue;
 
@@ -3105,7 +3068,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 			continue;
 
 		if (encoder && connector->status == connector_status_connected) {
-			intel_dp = enc_to_intel_dp(&encoder->base);
+			intel_dp = enc_to_intel_dp(encoder);
 			status = kstrtoint(input_buffer, 10, &val);
 			if (status < 0)
 				break;
@@ -3114,9 +3077,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 			 * testing code, only accept an actual value of 1 here
 			 */
 			if (val == 1)
-				intel_dp->compliance.test_active = 1;
+				intel_dp->compliance.test_active = true;
 			else
-				intel_dp->compliance.test_active = 0;
+				intel_dp->compliance.test_active = false;
 		}
 	}
 	drm_connector_list_iter_end(&conn_iter);
@@ -3149,7 +3112,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 			continue;
 
 		if (encoder && connector->status == connector_status_connected) {
-			intel_dp = enc_to_intel_dp(&encoder->base);
+			intel_dp = enc_to_intel_dp(encoder);
 			if (intel_dp->compliance.test_active)
 				seq_puts(m, "1");
 			else
@@ -3199,7 +3162,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 			continue;
 
 		if (encoder && connector->status == connector_status_connected) {
-			intel_dp = enc_to_intel_dp(&encoder->base);
+			intel_dp = enc_to_intel_dp(encoder);
 			if (intel_dp->compliance.test_type ==
 			    DP_TEST_LINK_EDID_READ)
 				seq_printf(m, "%lx",
@@ -3243,7 +3206,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 			continue;
 
 		if (encoder && connector->status == connector_status_connected) {
-			intel_dp = enc_to_intel_dp(&encoder->base);
+			intel_dp = enc_to_intel_dp(encoder);
 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
 		} else
 			seq_puts(m, "0");
@@ -3854,8 +3817,8 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
 #undef SS_MAX
 }
 
-static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
-					 struct sseu_dev_info *sseu)
+static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
+				   struct sseu_dev_info *sseu)
 {
 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
@@ -3940,7 +3903,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
 		if (IS_CHERRYVIEW(dev_priv))
 			cherryview_sseu_device_status(dev_priv, &sseu);
 		else if (IS_BROADWELL(dev_priv))
-			broadwell_sseu_device_status(dev_priv, &sseu);
+			bdw_sseu_device_status(dev_priv, &sseu);
 		else if (IS_GEN(dev_priv, 9))
 			gen9_sseu_device_status(dev_priv, &sseu);
 		else if (INTEL_GEN(dev_priv) >= 10)
@@ -4161,11 +4124,11 @@ static int i915_drrs_ctl_set(void *data, u64 val)
 
 		crtc_state = to_intel_crtc_state(crtc->base.state);
 
-		if (!crtc_state->base.active ||
+		if (!crtc_state->hw.active ||
 		    !crtc_state->has_drrs)
 			goto out;
 
-		commit = crtc_state->base.commit;
+		commit = crtc_state->uapi.commit;
 		if (commit) {
 			ret = wait_for_completion_interruptible(&commit->hw_done);
 			if (ret)
@@ -4177,18 +4140,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
 			struct intel_encoder *encoder;
 			struct intel_dp *intel_dp;
 
-			if (!(crtc_state->base.connector_mask &
+			if (!(crtc_state->uapi.connector_mask &
 			      drm_connector_mask(connector)))
 				continue;
 
-			encoder = intel_attached_encoder(connector);
+			encoder = intel_attached_encoder(to_intel_connector(connector));
 			if (encoder->type != INTEL_OUTPUT_EDP)
 				continue;
 
 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
 						val ? "en" : "dis", val);
 
-			intel_dp = enc_to_intel_dp(&encoder->base);
+			intel_dp = enc_to_intel_dp(encoder);
 			if (val)
 				intel_edp_drrs_enable(intel_dp,
 						      crtc_state);
@@ -4236,14 +4199,14 @@ i915_fifo_underrun_reset_write(struct file *filp,
 			return ret;
 
 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
-		commit = crtc_state->base.commit;
+		commit = crtc_state->uapi.commit;
 		if (commit) {
 			ret = wait_for_completion_interruptible(&commit->hw_done);
 			if (!ret)
 				ret = wait_for_completion_interruptible(&commit->flip_done);
 		}
 
-		if (!ret && crtc_state->base.active) {
+		if (!ret && crtc_state->hw.active) {
 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
 				      pipe_name(intel_crtc->pipe));
 
@@ -4392,7 +4355,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
 {
 	struct drm_connector *connector = m->private;
 	struct intel_dp *intel_dp =
-		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
 	u8 buf[16];
 	ssize_t err;
 	int i;
@@ -4427,7 +4390,7 @@ static int i915_panel_show(struct seq_file *m, void *data)
 {
 	struct drm_connector *connector = m->private;
 	struct intel_dp *intel_dp =
-		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
 
 	if (connector->status != connector_status_connected)
 		return -ENODEV;
@@ -4505,7 +4468,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
 		} else if (ret) {
 			break;
 		}
-		intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+		intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
 		crtc_state = to_intel_crtc_state(crtc->state);
 		seq_printf(m, "DSC_Enabled: %s\n",
 			   yesno(crtc_state->dsc.compression_enable));
@@ -4532,8 +4495,8 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
 	int ret;
 	struct drm_connector *connector =
 		((struct seq_file *)file->private_data)->private;
-	struct intel_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 	if (len == 0)
 		return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3c512c5..f7385ab 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,6 +61,7 @@
 
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_rc6.h"
@@ -468,6 +469,12 @@ static void vlv_free_s0ix_state(struct drm_i915_private *i915)
 	i915->vlv_s0ix_state = NULL;
 }
 
+static void sanitize_gpu(struct drm_i915_private *i915)
+{
+	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
+		__intel_gt_reset(&i915->gt, ALL_ENGINES);
+}
+
 /**
  * i915_driver_early_probe - setup state not requiring device access
  * @dev_priv: device private
@@ -601,6 +608,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto err_uncore;
 
+	/* As early as possible, scrub existing GPU state before clobbering */
+	sanitize_gpu(dev_priv);
+
 	return 0;
 
 err_uncore:
@@ -618,7 +628,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
 {
-	intel_engines_cleanup(&dev_priv->gt);
 	intel_teardown_mchbar(dev_priv);
 	intel_uncore_fini_mmio(&dev_priv->uncore);
 	pci_dev_put(dev_priv->bridge_dev);
@@ -1052,7 +1061,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
 	 */
 	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
 
-	if (INTEL_GEN(dev_priv) < 9)
+	if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
 		return;
 
 	if (IS_GEN9_LP(dev_priv))
@@ -1385,7 +1394,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 
 static void i915_welcome_messages(struct drm_i915_private *dev_priv)
 {
-	if (drm_debug & DRM_UT_DRIVER) {
+	if (drm_debug_enabled(DRM_UT_DRIVER)) {
 		struct drm_printer p = drm_debug_printer("i915 device info:");
 
 		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
@@ -1396,8 +1405,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
 					     INTEL_INFO(dev_priv)->platform),
 			   INTEL_GEN(dev_priv));
 
-		intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
-		intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
+		intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
+		intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
 	}
 
 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1817,9 +1826,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
-	intel_rc6_ctx_wa_resume(&dev_priv->gt.rc6);
-
-	intel_gt_sanitize(&dev_priv->gt, true);
+	sanitize_gpu(dev_priv);
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret)
@@ -2662,18 +2669,12 @@ const struct dev_pm_ops i915_pm_ops = {
 	.runtime_resume = intel_runtime_resume,
 };
 
-static const struct vm_operations_struct i915_gem_vm_ops = {
-	.fault = i915_gem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
 static const struct file_operations i915_driver_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
 	.release = drm_release,
 	.unlocked_ioctl = drm_ioctl,
-	.mmap = drm_gem_mmap,
+	.mmap = i915_gem_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
 	.compat_ioctl = i915_compat_ioctl,
@@ -2720,7 +2721,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
@@ -2762,7 +2763,6 @@ static struct drm_driver driver = {
 
 	.gem_close_object = i915_gem_close_object,
 	.gem_free_object_unlocked = i915_gem_free_object,
-	.gem_vm_ops = &i915_gem_vm_ops,
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -2773,7 +2773,8 @@ static struct drm_driver driver = {
 	.get_scanout_position = i915_get_crtc_scanoutpos,
 
 	.dumb_create = i915_gem_dumb_create,
-	.dumb_map_offset = i915_gem_mmap_gtt,
+	.dumb_map_offset = i915_gem_dumb_mmap_offset,
+
 	.ioctls = i915_ioctls,
 	.num_ioctls = ARRAY_SIZE(i915_ioctls),
 	.fops = &i915_driver_fops,
@@ -2784,7 +2785,3 @@ static struct drm_driver driver = {
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 };
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_drm.c"
-#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21aa08f..077af22 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -46,6 +46,7 @@
 #include <linux/dma-resv.h>
 #include <linux/shmem_fs.h>
 #include <linux/stackdepot.h>
+#include <linux/xarray.h>
 
 #include <drm/intel-gtt.h>
 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
@@ -110,8 +111,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20191101"
-#define DRIVER_TIMESTAMP	1572604873
+#define DRIVER_DATE		"20200114"
+#define DRIVER_TIMESTAMP	1579001978
 
 struct drm_i915_gem_object;
 
@@ -201,8 +202,7 @@ struct drm_i915_file_private {
 		struct list_head request_list;
 	} mm;
 
-	struct idr context_idr;
-	struct mutex context_idr_lock; /* guards context_idr */
+	struct xarray context_xa;
 
 	struct idr vm_idr;
 	struct mutex vm_idr_lock; /* guards vm_idr */
@@ -273,11 +273,11 @@ struct drm_i915_display_funcs {
 	int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
 	int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
 	void (*initial_watermarks)(struct intel_atomic_state *state,
-				   struct intel_crtc_state *crtc_state);
+				   struct intel_crtc *crtc);
 	void (*atomic_update_watermarks)(struct intel_atomic_state *state,
-					 struct intel_crtc_state *crtc_state);
+					 struct intel_crtc *crtc);
 	void (*optimize_watermarks)(struct intel_atomic_state *state,
-				    struct intel_crtc_state *crtc_state);
+				    struct intel_crtc *crtc);
 	int (*compute_global_watermarks)(struct intel_atomic_state *state);
 	void (*update_wm)(struct intel_crtc *crtc);
 	int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
@@ -290,10 +290,10 @@ struct drm_i915_display_funcs {
 					 struct intel_initial_plane_config *);
 	int (*crtc_compute_clock)(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state);
-	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
-			    struct intel_atomic_state *old_state);
-	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
-			     struct intel_atomic_state *old_state);
+	void (*crtc_enable)(struct intel_atomic_state *state,
+			    struct intel_crtc *crtc);
+	void (*crtc_disable)(struct intel_atomic_state *state,
+			     struct intel_crtc *crtc);
 	void (*commit_modeset_enables)(struct intel_atomic_state *state);
 	void (*commit_modeset_disables)(struct intel_atomic_state *state);
 	void (*audio_codec_enable)(struct intel_encoder *encoder,
@@ -366,7 +366,6 @@ struct intel_fbc {
 	unsigned threshold;
 	unsigned int possible_framebuffer_bits;
 	unsigned int busy_bits;
-	unsigned int visible_pipes_mask;
 	struct intel_crtc *crtc;
 
 	struct drm_mm_node compressed_fb;
@@ -374,8 +373,8 @@ struct intel_fbc {
 
 	bool false_color;
 
-	bool enabled;
 	bool active;
+	bool activated;
 	bool flip_pending;
 
 	bool underrun_detected;
@@ -387,9 +386,6 @@ struct intel_fbc {
 	 * these problems.
 	 */
 	struct intel_fbc_state_cache {
-		struct i915_vma *vma;
-		unsigned long flags;
-
 		struct {
 			unsigned int mode_flags;
 			u32 hsw_bdw_pixel_rate;
@@ -418,6 +414,8 @@ struct intel_fbc {
 			const struct drm_format_info *format;
 			unsigned int stride;
 		} fb;
+		u16 gen9_wa_cfb_stride;
+		s8 fence_id;
 	} state_cache;
 
 	/*
@@ -428,9 +426,6 @@ struct intel_fbc {
 	 * are supposed to read from it in order to program the registers.
 	 */
 	struct intel_fbc_reg_params {
-		struct i915_vma *vma;
-		unsigned long flags;
-
 		struct {
 			enum pipe pipe;
 			enum i9xx_plane_id i9xx_plane;
@@ -443,7 +438,9 @@ struct intel_fbc {
 		} fb;
 
 		int cfb_size;
-		unsigned int gen9_wa_cfb_stride;
+		u16 gen9_wa_cfb_stride;
+		s8 fence_id;
+		bool plane_visible;
 	} params;
 
 	const char *no_fbc_reason;
@@ -508,6 +505,7 @@ struct i915_psr {
 	bool dc3co_enabled;
 	u32 dc3co_exit_delay;
 	struct delayed_work idle_work;
+	bool initially_probed;
 };
 
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -621,19 +619,18 @@ struct i915_gem_mm {
 
 #define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
 
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
+#define I915_NUM_QGV_POINTS 8
+
 struct ddi_vbt_port_info {
 	/* Non-NULL if port present. */
 	const struct child_device_config *child;
 
 	int max_tmds_clock;
 
-	/*
-	 * This is an index in the HDMI/DVI DDI buffer translation table.
-	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
-	 * populate this field.
-	 */
-#define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
+	/* This is an index in the HDMI/DVI DDI buffer translation table. */
 	u8 hdmi_level_shift;
+	u8 hdmi_level_shift_set:1;
 
 	u8 supports_dvi:1;
 	u8 supports_hdmi:1;
@@ -724,8 +721,7 @@ struct intel_vbt_data {
 
 	int crt_ddc_pin;
 
-	int child_dev_num;
-	struct child_device_config *child_dev;
+	struct list_head display_devices;
 
 	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
 	struct sdvo_device_mapping sdvo_mappings[2];
@@ -891,6 +887,10 @@ struct intel_cdclk_state {
 	u8 voltage_level;
 };
 
+struct i915_selftest_stash {
+	atomic_t counter;
+};
+
 struct drm_i915_private {
 	struct drm_device drm;
 
@@ -956,9 +956,6 @@ struct drm_i915_private {
 
 	struct pci_dev *bridge_dev;
 
-	/* Context used internally to idle the GPU and setup initial state */
-	struct i915_gem_context *kernel_context;
-
 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	struct rb_root uabi_engines;
 
@@ -1233,7 +1230,8 @@ struct drm_i915_private {
 	} dram_info;
 
 	struct intel_bw_info {
-		unsigned int deratedbw[3]; /* for each QGV point */
+		/* for each QGV point */
+		unsigned int deratedbw[I915_NUM_QGV_POINTS];
 		u8 num_qgv_points;
 		u8 num_planes;
 	} max_bw[6];
@@ -1248,8 +1246,6 @@ struct drm_i915_private {
 	struct intel_gt gt;
 
 	struct {
-		struct notifier_block pm_notifier;
-
 		struct i915_gem_contexts {
 			spinlock_t lock; /* locks list */
 			struct list_head list;
@@ -1257,6 +1253,16 @@ struct drm_i915_private {
 			struct llist_head free_list;
 			struct work_struct free_work;
 		} contexts;
+
+		/*
+		 * We replace the local file with a global mappings as the
+		 * backing storage for the mmap is on the device and not
+		 * on the struct file, and we do not want to prolong the
+		 * lifetime of the local fd. To minimise the number of
+		 * anonymous inodes we create, we use a global singleton to
+		 * share the global mapping.
+		 */
+		struct file *mmap_singleton;
 	} gem;
 
 	u8 pch_ssc_use;
@@ -1286,6 +1292,8 @@ struct drm_i915_private {
 	/* Mutex to protect the above hdcp component related values. */
 	struct mutex hdcp_comp_mutex;
 
+	I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
 	/*
 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
 	 * will be rejected. Instead look for a better place.
@@ -1844,14 +1852,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 			   unsigned long flags);
 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
-
-struct i915_vma * __must_check
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm,
-		    const struct i915_ggtt_view *view,
-		    u64 size,
-		    u64 alignment,
-		    u64 flags);
+#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
@@ -1864,9 +1865,6 @@ i915_mutex_lock_interruptible(struct drm_device *dev)
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
-		      u32 handle, u64 *offset);
-int i915_gem_mmap_gtt_version(void);
 
 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 
@@ -1876,7 +1874,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
 }
 
 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
-					  struct intel_engine_cs *engine)
+					  const struct intel_engine_cs *engine)
 {
 	return atomic_read(&error->reset_engine_count[engine->uabi_class]);
 }
@@ -1889,7 +1887,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv);
 void i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
-vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 
 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
@@ -1905,7 +1902,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
 static inline struct i915_gem_context *
 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
 {
-	return idr_find(&file_priv->context_idr, id);
+	return xa_load(&file_priv->context_xa, id);
 }
 
 static inline struct i915_gem_context *
@@ -1958,14 +1955,13 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct i915_gem_context *cxt,
-			    struct intel_engine_cs *engine,
-			    struct drm_i915_gem_object *batch_obj,
-			    u64 user_batch_start,
-			    u32 batch_start_offset,
-			    u32 batch_len,
-			    struct drm_i915_gem_object *shadow_batch_obj,
-			    u64 shadow_batch_start);
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+			    struct i915_vma *batch,
+			    u32 batch_offset,
+			    u32 batch_length,
+			    struct i915_vma *shadow,
+			    bool trampoline);
+#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
 
 /* intel_device_info.c */
 static inline struct intel_device_info *
@@ -2032,6 +2028,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 int remap_io_mapping(struct vm_area_struct *vma,
 		     unsigned long addr, unsigned long pfn, unsigned long size,
 		     struct io_mapping *iomap);
+int remap_io_sg(struct vm_area_struct *vma,
+		unsigned long addr, unsigned long size,
+		struct scatterlist *sgl, resource_size_t iobase);
 
 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 905890e3..94f993e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -44,20 +44,14 @@
 #include "gem/i915_gem_clflush.h"
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
-#include "gem/i915_gem_pm.h"
-#include "gt/intel_context.h"
+#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_region.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
-#include "gt/intel_gt_requests.h"
-#include "gt/intel_mocs.h"
-#include "gt/intel_reset.h"
-#include "gt/intel_renderstate.h"
-#include "gt/intel_rps.h"
 #include "gt/intel_workarounds.h"
 
 #include "i915_drv.h"
-#include "i915_scatterlist.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
 
@@ -119,33 +113,65 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 			   unsigned long flags)
 {
-	struct i915_vma *vma;
+	struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
 	LIST_HEAD(still_in_list);
-	int ret = 0;
+	intel_wakeref_t wakeref;
+	struct i915_vma *vma;
+	int ret;
 
+	if (!atomic_read(&obj->bind_count))
+		return 0;
+
+	/*
+	 * As some machines use ACPI to handle runtime-resume callbacks, and
+	 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
+	 * as they are required by the shrinker. Ergo, we wake the device up
+	 * first just in case.
+	 */
+	wakeref = intel_runtime_pm_get(rpm);
+
+try_again:
+	ret = 0;
 	spin_lock(&obj->vma.lock);
 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
 						       struct i915_vma,
 						       obj_link))) {
 		struct i915_address_space *vm = vma->vm;
 
-		ret = -EBUSY;
+		list_move_tail(&vma->obj_link, &still_in_list);
+		if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
+			continue;
+
+		ret = -EAGAIN;
 		if (!i915_vm_tryopen(vm))
 			break;
 
-		list_move_tail(&vma->obj_link, &still_in_list);
+		/* Prevent vma being freed by i915_vma_parked as we unbind */
+		vma = __i915_vma_get(vma);
 		spin_unlock(&obj->vma.lock);
 
-		if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
-		    !i915_vma_is_active(vma))
-			ret = i915_vma_unbind(vma);
+		if (vma) {
+			ret = -EBUSY;
+			if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+			    !i915_vma_is_active(vma))
+				ret = i915_vma_unbind(vma);
+
+			__i915_vma_put(vma);
+		}
 
 		i915_vm_close(vm);
 		spin_lock(&obj->vma.lock);
 	}
-	list_splice(&still_in_list, &obj->vma.list);
+	list_splice_init(&still_in_list, &obj->vma.list);
 	spin_unlock(&obj->vma.lock);
 
+	if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
+		rcu_barrier(); /* flush the i915_vm_release() */
+		goto try_again;
+	}
+
+	intel_runtime_pm_put(rpm, wakeref);
+
 	return ret;
 }
 
@@ -175,7 +201,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 
 static int
 i915_gem_create(struct drm_file *file,
-		struct drm_i915_private *dev_priv,
+		struct intel_memory_region *mr,
 		u64 *size_p,
 		u32 *handle_p)
 {
@@ -184,12 +210,16 @@ i915_gem_create(struct drm_file *file,
 	u64 size;
 	int ret;
 
-	size = round_up(*size_p, PAGE_SIZE);
+	GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+	size = round_up(*size_p, mr->min_page_size);
 	if (size == 0)
 		return -EINVAL;
 
+	/* For most of the ABI (e.g. mmap) we think in system pages */
+	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
 	/* Allocate the new object */
-	obj = i915_gem_object_create_shmem(dev_priv, size);
+	obj = i915_gem_object_create_region(mr, size, 0);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
@@ -209,6 +239,7 @@ i915_gem_dumb_create(struct drm_file *file,
 		     struct drm_device *dev,
 		     struct drm_mode_create_dumb *args)
 {
+	enum intel_memory_type mem_type;
 	int cpp = DIV_ROUND_UP(args->bpp, 8);
 	u32 format;
 
@@ -235,7 +266,14 @@ i915_gem_dumb_create(struct drm_file *file,
 		args->pitch = ALIGN(args->pitch, 4096);
 
 	args->size = args->pitch * args->height;
-	return i915_gem_create(file, to_i915(dev),
+
+	mem_type = INTEL_MEMORY_SYSTEM;
+	if (HAS_LMEM(to_i915(dev)))
+		mem_type = INTEL_MEMORY_LOCAL;
+
+	return i915_gem_create(file,
+			       intel_memory_region_by_type(to_i915(dev),
+							   mem_type),
 			       &args->size, &args->handle);
 }
 
@@ -249,12 +287,14 @@ int
 i915_gem_create_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_create *args = data;
 
-	i915_gem_flush_free_objects(dev_priv);
+	i915_gem_flush_free_objects(i915);
 
-	return i915_gem_create(file, dev_priv,
+	return i915_gem_create(file,
+			       intel_memory_region_by_type(i915,
+							   INTEL_MEMORY_SYSTEM),
 			       &args->size, &args->handle);
 }
 
@@ -631,11 +671,12 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		user_data += page_length;
 		offset += page_length;
 	}
+
+	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
 	i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
-	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
 	if (drm_mm_node_allocated(&node)) {
 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
 		remove_mappable_node(ggtt, &node);
@@ -855,7 +896,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
 
 	list_for_each_entry_safe(obj, on,
 				 &i915->ggtt.userfault_list, userfault_link)
-		__i915_gem_object_release_mmap(obj);
+		__i915_gem_object_release_mmap_gtt(obj);
 
 	/*
 	 * The fence will be lost when the device powers down. If any were
@@ -892,22 +933,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 u64 alignment,
 			 u64 flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_address_space *vm = &dev_priv->ggtt.vm;
-
-	return i915_gem_object_pin(obj, vm, view, size, alignment,
-				   flags | PIN_GLOBAL);
-}
-
-struct i915_vma *
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm,
-		    const struct i915_ggtt_view *view,
-		    u64 size,
-		    u64 alignment,
-		    u64 flags)
-{
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct i915_vma *vma;
 	int ret;
 
@@ -916,17 +943,19 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 
 	if (flags & PIN_MAPPABLE &&
 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
-		/* If the required space is larger than the available
+		/*
+		 * If the required space is larger than the available
 		 * aperture, we will not able to find a slot for the
 		 * object and unbinding the object now will be in
 		 * vain. Worse, doing so may cause us to ping-pong
 		 * the object in and out of the Global GTT and
 		 * waste a lot of cycles under the mutex.
 		 */
-		if (obj->base.size > dev_priv->ggtt.mappable_end)
+		if (obj->base.size > ggtt->mappable_end)
 			return ERR_PTR(-E2BIG);
 
-		/* If NONBLOCK is set the caller is optimistically
+		/*
+		 * If NONBLOCK is set the caller is optimistically
 		 * trying to cache the full object within the mappable
 		 * aperture, and *must* have a fallback in place for
 		 * situations where we cannot bind the object. We
@@ -942,11 +971,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		 * we could try to minimise harm to others.
 		 */
 		if (flags & PIN_NONBLOCK &&
-		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
+		    obj->base.size > ggtt->mappable_end / 2)
 			return ERR_PTR(-ENOSPC);
 	}
 
-	vma = i915_vma_instance(obj, vm, view);
+	vma = i915_vma_instance(obj, &ggtt->vm, view);
 	if (IS_ERR(vma))
 		return vma;
 
@@ -956,7 +985,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 				return ERR_PTR(-ENOSPC);
 
 			if (flags & PIN_MAPPABLE &&
-			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
+			    vma->fence_size > ggtt->mappable_end / 2)
 				return ERR_PTR(-ENOSPC);
 		}
 
@@ -966,14 +995,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	}
 
 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
-		mutex_lock(&vma->vm->mutex);
+		mutex_lock(&ggtt->vm.mutex);
 		ret = i915_vma_revoke_fence(vma);
-		mutex_unlock(&vma->vm->mutex);
+		mutex_unlock(&ggtt->vm.mutex);
 		if (ret)
 			return ERR_PTR(ret);
 	}
 
-	ret = i915_vma_pin(vma, size, alignment, flags);
+	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
 	if (ret)
 		return ERR_PTR(ret);
 
@@ -1054,172 +1083,6 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	return err;
 }
 
-static int __intel_context_flush_retire(struct intel_context *ce)
-{
-	struct intel_timeline *tl;
-
-	tl = intel_context_timeline_lock(ce);
-	if (IS_ERR(tl))
-		return PTR_ERR(tl);
-
-	intel_context_timeline_unlock(tl);
-	return 0;
-}
-
-static int __intel_engines_record_defaults(struct intel_gt *gt)
-{
-	struct i915_request *requests[I915_NUM_ENGINES] = {};
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = 0;
-
-	/*
-	 * As we reset the gpu during very early sanitisation, the current
-	 * register state on the GPU should reflect its defaults values.
-	 * We load a context onto the hw (with restore-inhibit), then switch
-	 * over to a second context to save that default register state. We
-	 * can then prime every new context with that state so they all start
-	 * from the same default HW values.
-	 */
-
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce;
-		struct i915_request *rq;
-
-		/* We must be able to switch to something! */
-		GEM_BUG_ON(!engine->kernel_context);
-		engine->serial++; /* force the kernel context switch */
-
-		ce = intel_context_create(engine->kernel_context->gem_context,
-					  engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			goto out;
-		}
-
-		rq = intel_context_create_request(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			intel_context_put(ce);
-			goto out;
-		}
-
-		err = intel_engine_emit_ctx_wa(rq);
-		if (err)
-			goto err_rq;
-
-		err = intel_renderstate_emit(rq);
-		if (err)
-			goto err_rq;
-
-err_rq:
-		requests[id] = i915_request_get(rq);
-		i915_request_add(rq);
-		if (err)
-			goto out;
-	}
-
-	/* Flush the default context image to memory, and enable powersaving. */
-	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
-		err = -EIO;
-		goto out;
-	}
-
-	for (id = 0; id < ARRAY_SIZE(requests); id++) {
-		struct i915_request *rq;
-		struct i915_vma *state;
-		void *vaddr;
-
-		rq = requests[id];
-		if (!rq)
-			continue;
-
-		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT,
-				     &rq->hw_context->flags));
-		state = rq->hw_context->state;
-		if (!state)
-			continue;
-
-		/* Serialise with retirement on another CPU */
-		err = __intel_context_flush_retire(rq->hw_context);
-		if (err)
-			goto out;
-
-		/* We want to be able to unbind the state from the GGTT */
-		GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
-
-		/*
-		 * As we will hold a reference to the logical state, it will
-		 * not be torn down with the context, and importantly the
-		 * object will hold onto its vma (making it possible for a
-		 * stray GTT write to corrupt our defaults). Unmap the vma
-		 * from the GTT to prevent such accidents and reclaim the
-		 * space.
-		 */
-		err = i915_vma_unbind(state);
-		if (err)
-			goto out;
-
-		i915_gem_object_lock(state->obj);
-		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
-		i915_gem_object_unlock(state->obj);
-		if (err)
-			goto out;
-
-		i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
-
-		/* Check we can acquire the image of the context state */
-		vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
-		if (IS_ERR(vaddr)) {
-			err = PTR_ERR(vaddr);
-			goto out;
-		}
-
-		rq->engine->default_state = i915_gem_object_get(state->obj);
-		i915_gem_object_unpin_map(state->obj);
-	}
-
-out:
-	/*
-	 * If we have to abandon now, we expect the engines to be idle
-	 * and ready to be torn-down. The quickest way we can accomplish
-	 * this is by declaring ourselves wedged.
-	 */
-	if (err)
-		intel_gt_set_wedged(gt);
-
-	for (id = 0; id < ARRAY_SIZE(requests); id++) {
-		struct intel_context *ce;
-		struct i915_request *rq;
-
-		rq = requests[id];
-		if (!rq)
-			continue;
-
-		ce = rq->hw_context;
-		i915_request_put(rq);
-		intel_context_put(ce);
-	}
-	return err;
-}
-
-static int intel_engines_verify_workarounds(struct intel_gt *gt)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = 0;
-
-	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-		return 0;
-
-	for_each_engine(engine, gt, id) {
-		if (intel_engine_verify_workarounds(engine, "load"))
-			err = -EIO;
-	}
-
-	return err;
-}
-
 int i915_gem_init(struct drm_i915_private *dev_priv)
 {
 	int ret;
@@ -1229,8 +1092,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		mkwrite_device_info(dev_priv)->page_sizes =
 			I915_GTT_PAGE_SIZE_4K;
 
-	intel_timelines_init(dev_priv);
-
 	ret = i915_gem_init_userptr(dev_priv);
 	if (ret)
 		return ret;
@@ -1238,51 +1099,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	intel_uc_fetch_firmwares(&dev_priv->gt.uc);
 	intel_wopcm_init(&dev_priv->wopcm);
 
-	/* This is just a security blanket to placate dragons.
-	 * On some systems, we very sporadically observe that the first TLBs
-	 * used by the CS may be stale, despite us poking the TLB reset. If
-	 * we hold the forcewake during initialisation these problems
-	 * just magically go away.
-	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
 	ret = i915_init_ggtt(dev_priv);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_unlock;
 	}
 
-	intel_gt_init(&dev_priv->gt);
-
-	ret = intel_engines_setup(&dev_priv->gt);
-	if (ret) {
-		GEM_BUG_ON(ret == -EIO);
-		goto err_unlock;
-	}
-
-	ret = i915_gem_init_contexts(dev_priv);
-	if (ret) {
-		GEM_BUG_ON(ret == -EIO);
-		goto err_scratch;
-	}
-
-	ret = intel_engines_init(&dev_priv->gt);
-	if (ret) {
-		GEM_BUG_ON(ret == -EIO);
-		goto err_context;
-	}
-
-	intel_uc_init(&dev_priv->gt.uc);
-
-	ret = intel_gt_init_hw(&dev_priv->gt);
-	if (ret)
-		goto err_uc_init;
-
-	/* Only when the HW is re-initialised, can we replay the requests */
-	ret = intel_gt_resume(&dev_priv->gt);
-	if (ret)
-		goto err_init_hw;
-
 	/*
 	 * Despite its name intel_init_clock_gating applies both display
 	 * clock gating workarounds; GT mmio workarounds and the occasional
@@ -1294,23 +1116,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 */
 	intel_init_clock_gating(dev_priv);
 
-	ret = intel_engines_verify_workarounds(&dev_priv->gt);
+	ret = intel_gt_init(&dev_priv->gt);
 	if (ret)
-		goto err_gt;
-
-	ret = __intel_engines_record_defaults(&dev_priv->gt);
-	if (ret)
-		goto err_gt;
-
-	ret = i915_inject_probe_error(dev_priv, -ENODEV);
-	if (ret)
-		goto err_gt;
-
-	ret = i915_inject_probe_error(dev_priv, -EIO);
-	if (ret)
-		goto err_gt;
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+		goto err_unlock;
 
 	return 0;
 
@@ -1320,31 +1128,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * HW as irrevisibly wedged, but keep enough state around that the
 	 * driver doesn't explode during runtime.
 	 */
-err_gt:
-	intel_gt_set_wedged_on_init(&dev_priv->gt);
-	i915_gem_suspend(dev_priv);
-	i915_gem_suspend_late(dev_priv);
-
-	i915_gem_drain_workqueue(dev_priv);
-err_init_hw:
-	intel_uc_fini_hw(&dev_priv->gt.uc);
-err_uc_init:
-	if (ret != -EIO) {
-		intel_uc_fini(&dev_priv->gt.uc);
-		intel_engines_cleanup(&dev_priv->gt);
-	}
-err_context:
-	if (ret != -EIO)
-		i915_gem_driver_release__contexts(dev_priv);
-err_scratch:
-	intel_gt_driver_release(&dev_priv->gt);
 err_unlock:
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	i915_gem_drain_workqueue(dev_priv);
 
 	if (ret != -EIO) {
 		intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
 		i915_gem_cleanup_userptr(dev_priv);
-		intel_timelines_fini(dev_priv);
 	}
 
 	if (ret == -EIO) {
@@ -1388,27 +1177,24 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
 
 	i915_gem_suspend_late(dev_priv);
 	intel_gt_driver_remove(&dev_priv->gt);
+	dev_priv->uabi_engines = RB_ROOT;
 
 	/* Flush any outstanding unpin_work. */
 	i915_gem_drain_workqueue(dev_priv);
 
-	intel_uc_fini_hw(&dev_priv->gt.uc);
-	intel_uc_fini(&dev_priv->gt.uc);
-
 	i915_gem_drain_freed_objects(dev_priv);
 }
 
 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 {
-	intel_engines_cleanup(&dev_priv->gt);
 	i915_gem_driver_release__contexts(dev_priv);
+
 	intel_gt_driver_release(&dev_priv->gt);
 
 	intel_wa_list_free(&dev_priv->gt_wa_list);
 
 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
 	i915_gem_cleanup_userptr(dev_priv);
-	intel_timelines_fini(dev_priv);
 
 	i915_gem_drain_freed_objects(dev_priv);
 
@@ -1430,6 +1216,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
 void i915_gem_init_early(struct drm_i915_private *dev_priv)
 {
 	i915_gem_init__mm(dev_priv);
+	i915_gem_init__contexts(dev_priv);
 
 	spin_lock_init(&dev_priv->fb_tracking.lock);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index f6f9675..1753c84 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -30,15 +30,18 @@
 
 #include <drm/drm_drv.h>
 
+#include "i915_utils.h"
+
 struct drm_i915_private;
 
 #ifdef CONFIG_DRM_I915_DEBUG_GEM
 
-#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
+#define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER)
 
 #define GEM_BUG_ON(condition) do { if (unlikely((condition))) {	\
 		GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
 			      __func__, __LINE__, __stringify(condition)); \
+		GEM_TRACE_DUMP(); \
 		BUG(); \
 		} \
 	} while(0)
@@ -68,9 +71,10 @@ struct drm_i915_private;
 	pr_err(__VA_ARGS__);						\
 	trace_printk(__VA_ARGS__);					\
 } while (0)
-#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP() \
+	do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0)
 #define GEM_TRACE_DUMP_ON(expr) \
-	do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
+	do { if (expr) GEM_TRACE_DUMP(); } while (0)
 #else
 #define GEM_TRACE(...) do { } while (0)
 #define GEM_TRACE_ERR(...) do { } while (0)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 7e62c31..0697bed 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -359,9 +359,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
  */
 int i915_gem_evict_vm(struct i915_address_space *vm)
 {
-	struct list_head eviction_list;
-	struct i915_vma *vma, *next;
-	int ret;
+	int ret = 0;
 
 	lockdep_assert_held(&vm->mutex);
 	trace_i915_gem_evict_vm(vm);
@@ -377,21 +375,30 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 			return ret;
 	}
 
-	INIT_LIST_HEAD(&eviction_list);
-	list_for_each_entry(vma, &vm->bound_list, vm_link) {
-		if (i915_vma_is_pinned(vma))
-			continue;
+	do {
+		struct i915_vma *vma, *vn;
+		LIST_HEAD(eviction_list);
 
-		__i915_vma_pin(vma);
-		list_add(&vma->evict_link, &eviction_list);
-	}
+		list_for_each_entry(vma, &vm->bound_list, vm_link) {
+			if (i915_vma_is_pinned(vma))
+				continue;
 
-	ret = 0;
-	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
-		__i915_vma_unpin(vma);
-		if (ret == 0)
-			ret = __i915_vma_unbind(vma);
-	}
+			__i915_vma_pin(vma);
+			list_add(&vma->evict_link, &eviction_list);
+		}
+		if (list_empty(&eviction_list))
+			break;
+
+		ret = 0;
+		list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
+			__i915_vma_unpin(vma);
+			if (ret == 0)
+				ret = __i915_vma_unbind(vma);
+			if (ret != -EINTR) /* "Get me out of here!" */
+				ret = 0;
+		}
+	} while (ret == 0);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 71efccf..d9c34a2 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -412,6 +412,9 @@ int i915_vma_pin_fence(struct i915_vma *vma)
 {
 	int err;
 
+	if (!vma->fence && !i915_gem_object_is_tiled(vma->obj))
+		return 0;
+
 	/*
 	 * Note that we revoke fences on runtime suspend. Therefore the user
 	 * must keep the device awake whilst using the fence.
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d6ce57d..e039eb5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1,26 +1,7 @@
+// SPDX-License-Identifier: MIT
 /*
  * Copyright © 2010 Daniel Vetter
- * Copyright © 2011-2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2020 Intel Corporation
  */
 
 #include <linux/slab.h> /* fault-inject.h is not standalone! */
@@ -45,2098 +26,6 @@
 #include "i915_trace.h"
 #include "i915_vgpu.h"
 
-#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
-
-#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
-#define DBG(...) trace_printk(__VA_ARGS__)
-#else
-#define DBG(...)
-#endif
-
-/**
- * DOC: Global GTT views
- *
- * Background and previous state
- *
- * Historically objects could exists (be bound) in global GTT space only as
- * singular instances with a view representing all of the object's backing pages
- * in a linear fashion. This view will be called a normal view.
- *
- * To support multiple views of the same object, where the number of mapped
- * pages is not equal to the backing store, or where the layout of the pages
- * is not linear, concept of a GGTT view was added.
- *
- * One example of an alternative view is a stereo display driven by a single
- * image. In this case we would have a framebuffer looking like this
- * (2x2 pages):
- *
- *    12
- *    34
- *
- * Above would represent a normal GGTT view as normally mapped for GPU or CPU
- * rendering. In contrast, fed to the display engine would be an alternative
- * view which could look something like this:
- *
- *   1212
- *   3434
- *
- * In this example both the size and layout of pages in the alternative view is
- * different from the normal view.
- *
- * Implementation and usage
- *
- * GGTT views are implemented using VMAs and are distinguished via enum
- * i915_ggtt_view_type and struct i915_ggtt_view.
- *
- * A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
- * renaming  in large amounts of code. They take the struct i915_ggtt_view
- * parameter encapsulating all metadata required to implement a view.
- *
- * As a helper for callers which are only interested in the normal view,
- * globally const i915_ggtt_view_normal singleton instance exists. All old core
- * GEM API functions, the ones not taking the view parameter, are operating on,
- * or with the normal GGTT view.
- *
- * Code wanting to add or use a new GGTT view needs to:
- *
- * 1. Add a new enum with a suitable name.
- * 2. Extend the metadata in the i915_ggtt_view structure if required.
- * 3. Add support to i915_get_vma_pages().
- *
- * New views are required to build a scatter-gather table from within the
- * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
- * exists for the lifetime of an VMA.
- *
- * Core API is designed to have copy semantics which means that passed in
- * struct i915_ggtt_view does not need to be persistent (left around after
- * calling the core API functions).
- *
- */
-
-#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
-static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
-	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
-
-	/*
-	 * Note that as an uncached mmio write, this will flush the
-	 * WCB of the writes into the GGTT before it triggers the invalidate.
-	 */
-	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-}
-
-static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
-	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
-	struct drm_i915_private *i915 = ggtt->vm.i915;
-
-	gen6_ggtt_invalidate(ggtt);
-
-	if (INTEL_GEN(i915) >= 12)
-		intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
-				      GEN12_GUC_TLB_INV_CR_INVALIDATE);
-	else
-		intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-}
-
-static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
-	intel_gtt_chipset_flush();
-}
-
-static int ppgtt_bind_vma(struct i915_vma *vma,
-			  enum i915_cache_level cache_level,
-			  u32 flags)
-{
-	u32 pte_flags;
-	int err;
-
-	if (flags & I915_VMA_ALLOC) {
-		err = vma->vm->allocate_va_range(vma->vm,
-						 vma->node.start, vma->size);
-		if (err)
-			return err;
-
-		set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
-	}
-
-	/* Applicable to VLV, and gen8+ */
-	pte_flags = 0;
-	if (i915_gem_object_is_readonly(vma->obj))
-		pte_flags |= PTE_READ_ONLY;
-
-	GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
-	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-	wmb();
-
-	return 0;
-}
-
-static void ppgtt_unbind_vma(struct i915_vma *vma)
-{
-	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
-		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-}
-
-static int ppgtt_set_pages(struct i915_vma *vma)
-{
-	GEM_BUG_ON(vma->pages);
-
-	vma->pages = vma->obj->mm.pages;
-
-	vma->page_sizes = vma->obj->mm.page_sizes;
-
-	return 0;
-}
-
-static void clear_pages(struct i915_vma *vma)
-{
-	GEM_BUG_ON(!vma->pages);
-
-	if (vma->pages != vma->obj->mm.pages) {
-		sg_free_table(vma->pages);
-		kfree(vma->pages);
-	}
-	vma->pages = NULL;
-
-	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
-}
-
-static u64 gen8_pte_encode(dma_addr_t addr,
-			   enum i915_cache_level level,
-			   u32 flags)
-{
-	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
-	if (unlikely(flags & PTE_READ_ONLY))
-		pte &= ~_PAGE_RW;
-
-	switch (level) {
-	case I915_CACHE_NONE:
-		pte |= PPAT_UNCACHED;
-		break;
-	case I915_CACHE_WT:
-		pte |= PPAT_DISPLAY_ELLC;
-		break;
-	default:
-		pte |= PPAT_CACHED;
-		break;
-	}
-
-	return pte;
-}
-
-static u64 gen8_pde_encode(const dma_addr_t addr,
-			   const enum i915_cache_level level)
-{
-	u64 pde = _PAGE_PRESENT | _PAGE_RW;
-	pde |= addr;
-	if (level != I915_CACHE_NONE)
-		pde |= PPAT_CACHED_PDE;
-	else
-		pde |= PPAT_UNCACHED;
-	return pde;
-}
-
-static u64 snb_pte_encode(dma_addr_t addr,
-			  enum i915_cache_level level,
-			  u32 flags)
-{
-	gen6_pte_t pte = GEN6_PTE_VALID;
-	pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
-	switch (level) {
-	case I915_CACHE_L3_LLC:
-	case I915_CACHE_LLC:
-		pte |= GEN6_PTE_CACHE_LLC;
-		break;
-	case I915_CACHE_NONE:
-		pte |= GEN6_PTE_UNCACHED;
-		break;
-	default:
-		MISSING_CASE(level);
-	}
-
-	return pte;
-}
-
-static u64 ivb_pte_encode(dma_addr_t addr,
-			  enum i915_cache_level level,
-			  u32 flags)
-{
-	gen6_pte_t pte = GEN6_PTE_VALID;
-	pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
-	switch (level) {
-	case I915_CACHE_L3_LLC:
-		pte |= GEN7_PTE_CACHE_L3_LLC;
-		break;
-	case I915_CACHE_LLC:
-		pte |= GEN6_PTE_CACHE_LLC;
-		break;
-	case I915_CACHE_NONE:
-		pte |= GEN6_PTE_UNCACHED;
-		break;
-	default:
-		MISSING_CASE(level);
-	}
-
-	return pte;
-}
-
-static u64 byt_pte_encode(dma_addr_t addr,
-			  enum i915_cache_level level,
-			  u32 flags)
-{
-	gen6_pte_t pte = GEN6_PTE_VALID;
-	pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
-	if (!(flags & PTE_READ_ONLY))
-		pte |= BYT_PTE_WRITEABLE;
-
-	if (level != I915_CACHE_NONE)
-		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
-
-	return pte;
-}
-
-static u64 hsw_pte_encode(dma_addr_t addr,
-			  enum i915_cache_level level,
-			  u32 flags)
-{
-	gen6_pte_t pte = GEN6_PTE_VALID;
-	pte |= HSW_PTE_ADDR_ENCODE(addr);
-
-	if (level != I915_CACHE_NONE)
-		pte |= HSW_WB_LLC_AGE3;
-
-	return pte;
-}
-
-static u64 iris_pte_encode(dma_addr_t addr,
-			   enum i915_cache_level level,
-			   u32 flags)
-{
-	gen6_pte_t pte = GEN6_PTE_VALID;
-	pte |= HSW_PTE_ADDR_ENCODE(addr);
-
-	switch (level) {
-	case I915_CACHE_NONE:
-		break;
-	case I915_CACHE_WT:
-		pte |= HSW_WT_ELLC_LLC_AGE3;
-		break;
-	default:
-		pte |= HSW_WB_ELLC_LLC_AGE3;
-		break;
-	}
-
-	return pte;
-}
-
-static void stash_init(struct pagestash *stash)
-{
-	pagevec_init(&stash->pvec);
-	spin_lock_init(&stash->lock);
-}
-
-static struct page *stash_pop_page(struct pagestash *stash)
-{
-	struct page *page = NULL;
-
-	spin_lock(&stash->lock);
-	if (likely(stash->pvec.nr))
-		page = stash->pvec.pages[--stash->pvec.nr];
-	spin_unlock(&stash->lock);
-
-	return page;
-}
-
-static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
-{
-	unsigned int nr;
-
-	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
-
-	nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
-	memcpy(stash->pvec.pages + stash->pvec.nr,
-	       pvec->pages + pvec->nr - nr,
-	       sizeof(pvec->pages[0]) * nr);
-	stash->pvec.nr += nr;
-
-	spin_unlock(&stash->lock);
-
-	pvec->nr -= nr;
-}
-
-static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
-{
-	struct pagevec stack;
-	struct page *page;
-
-	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
-		i915_gem_shrink_all(vm->i915);
-
-	page = stash_pop_page(&vm->free_pages);
-	if (page)
-		return page;
-
-	if (!vm->pt_kmap_wc)
-		return alloc_page(gfp);
-
-	/* Look in our global stash of WC pages... */
-	page = stash_pop_page(&vm->i915->mm.wc_stash);
-	if (page)
-		return page;
-
-	/*
-	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
-	 *
-	 * We have to be careful as page allocation may trigger the shrinker
-	 * (via direct reclaim) which will fill up the WC stash underneath us.
-	 * So we add our WB pages into a temporary pvec on the stack and merge
-	 * them into the WC stash after all the allocations are complete.
-	 */
-	pagevec_init(&stack);
-	do {
-		struct page *page;
-
-		page = alloc_page(gfp);
-		if (unlikely(!page))
-			break;
-
-		stack.pages[stack.nr++] = page;
-	} while (pagevec_space(&stack));
-
-	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
-		page = stack.pages[--stack.nr];
-
-		/* Merge spare WC pages to the global stash */
-		if (stack.nr)
-			stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
-
-		/* Push any surplus WC pages onto the local VM stash */
-		if (stack.nr)
-			stash_push_pagevec(&vm->free_pages, &stack);
-	}
-
-	/* Return unwanted leftovers */
-	if (unlikely(stack.nr)) {
-		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
-		__pagevec_release(&stack);
-	}
-
-	return page;
-}
-
-static void vm_free_pages_release(struct i915_address_space *vm,
-				  bool immediate)
-{
-	struct pagevec *pvec = &vm->free_pages.pvec;
-	struct pagevec stack;
-
-	lockdep_assert_held(&vm->free_pages.lock);
-	GEM_BUG_ON(!pagevec_count(pvec));
-
-	if (vm->pt_kmap_wc) {
-		/*
-		 * When we use WC, first fill up the global stash and then
-		 * only if full immediately free the overflow.
-		 */
-		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
-
-		/*
-		 * As we have made some room in the VM's free_pages,
-		 * we can wait for it to fill again. Unless we are
-		 * inside i915_address_space_fini() and must
-		 * immediately release the pages!
-		 */
-		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
-			return;
-
-		/*
-		 * We have to drop the lock to allow ourselves to sleep,
-		 * so take a copy of the pvec and clear the stash for
-		 * others to use it as we sleep.
-		 */
-		stack = *pvec;
-		pagevec_reinit(pvec);
-		spin_unlock(&vm->free_pages.lock);
-
-		pvec = &stack;
-		set_pages_array_wb(pvec->pages, pvec->nr);
-
-		spin_lock(&vm->free_pages.lock);
-	}
-
-	__pagevec_release(pvec);
-}
-
-static void vm_free_page(struct i915_address_space *vm, struct page *page)
-{
-	/*
-	 * On !llc, we need to change the pages back to WB. We only do so
-	 * in bulk, so we rarely need to change the page attributes here,
-	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
-	 * To make detection of the possible sleep more likely, use an
-	 * unconditional might_sleep() for everybody.
-	 */
-	might_sleep();
-	spin_lock(&vm->free_pages.lock);
-	while (!pagevec_space(&vm->free_pages.pvec))
-		vm_free_pages_release(vm, false);
-	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
-	pagevec_add(&vm->free_pages.pvec, page);
-	spin_unlock(&vm->free_pages.lock);
-}
-
-static void i915_address_space_fini(struct i915_address_space *vm)
-{
-	spin_lock(&vm->free_pages.lock);
-	if (pagevec_count(&vm->free_pages.pvec))
-		vm_free_pages_release(vm, true);
-	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
-	spin_unlock(&vm->free_pages.lock);
-
-	drm_mm_takedown(&vm->mm);
-
-	mutex_destroy(&vm->mutex);
-}
-
-void __i915_vm_close(struct i915_address_space *vm)
-{
-	struct i915_vma *vma, *vn;
-
-	mutex_lock(&vm->mutex);
-	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
-		struct drm_i915_gem_object *obj = vma->obj;
-
-		/* Keep the obj (and hence the vma) alive as _we_ destroy it */
-		if (!kref_get_unless_zero(&obj->base.refcount))
-			continue;
-
-		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
-		WARN_ON(__i915_vma_unbind(vma));
-		i915_vma_destroy(vma);
-
-		i915_gem_object_put(obj);
-	}
-	GEM_BUG_ON(!list_empty(&vm->bound_list));
-	mutex_unlock(&vm->mutex);
-}
-
-static void __i915_vm_release(struct work_struct *work)
-{
-	struct i915_address_space *vm =
-		container_of(work, struct i915_address_space, rcu.work);
-
-	vm->cleanup(vm);
-	i915_address_space_fini(vm);
-
-	kfree(vm);
-}
-
-void i915_vm_release(struct kref *kref)
-{
-	struct i915_address_space *vm =
-		container_of(kref, struct i915_address_space, ref);
-
-	GEM_BUG_ON(i915_is_ggtt(vm));
-	trace_i915_ppgtt_release(vm);
-
-	queue_rcu_work(vm->i915->wq, &vm->rcu);
-}
-
-static void i915_address_space_init(struct i915_address_space *vm, int subclass)
-{
-	kref_init(&vm->ref);
-	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
-	atomic_set(&vm->open, 1);
-
-	/*
-	 * The vm->mutex must be reclaim safe (for use in the shrinker).
-	 * Do a dummy acquire now under fs_reclaim so that any allocation
-	 * attempt holding the lock is immediately reported by lockdep.
-	 */
-	mutex_init(&vm->mutex);
-	lockdep_set_subclass(&vm->mutex, subclass);
-	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
-
-	GEM_BUG_ON(!vm->total);
-	drm_mm_init(&vm->mm, 0, vm->total);
-	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
-
-	stash_init(&vm->free_pages);
-
-	INIT_LIST_HEAD(&vm->bound_list);
-}
-
-static int __setup_page_dma(struct i915_address_space *vm,
-			    struct i915_page_dma *p,
-			    gfp_t gfp)
-{
-	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
-	if (unlikely(!p->page))
-		return -ENOMEM;
-
-	p->daddr = dma_map_page_attrs(vm->dma,
-				      p->page, 0, PAGE_SIZE,
-				      PCI_DMA_BIDIRECTIONAL,
-				      DMA_ATTR_SKIP_CPU_SYNC |
-				      DMA_ATTR_NO_WARN);
-	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
-		vm_free_page(vm, p->page);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static int setup_page_dma(struct i915_address_space *vm,
-			  struct i915_page_dma *p)
-{
-	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
-}
-
-static void cleanup_page_dma(struct i915_address_space *vm,
-			     struct i915_page_dma *p)
-{
-	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	vm_free_page(vm, p->page);
-}
-
-#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
-
-static void
-fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
-{
-	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
-}
-
-#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
-#define fill32_px(px, v) do {						\
-	u64 v__ = lower_32_bits(v);					\
-	fill_px((px), v__ << 32 | v__);					\
-} while (0)
-
-static int
-setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
-{
-	unsigned long size;
-
-	/*
-	 * In order to utilize 64K pages for an object with a size < 2M, we will
-	 * need to support a 64K scratch page, given that every 16th entry for a
-	 * page-table operating in 64K mode must point to a properly aligned 64K
-	 * region, including any PTEs which happen to point to scratch.
-	 *
-	 * This is only relevant for the 48b PPGTT where we support
-	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
-	 * scratch (read-only) between all vm, we create one 64k scratch page
-	 * for all.
-	 */
-	size = I915_GTT_PAGE_SIZE_4K;
-	if (i915_vm_is_4lvl(vm) &&
-	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
-		size = I915_GTT_PAGE_SIZE_64K;
-		gfp |= __GFP_NOWARN;
-	}
-	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
-
-	do {
-		unsigned int order = get_order(size);
-		struct page *page;
-		dma_addr_t addr;
-
-		page = alloc_pages(gfp, order);
-		if (unlikely(!page))
-			goto skip;
-
-		addr = dma_map_page_attrs(vm->dma,
-					  page, 0, size,
-					  PCI_DMA_BIDIRECTIONAL,
-					  DMA_ATTR_SKIP_CPU_SYNC |
-					  DMA_ATTR_NO_WARN);
-		if (unlikely(dma_mapping_error(vm->dma, addr)))
-			goto free_page;
-
-		if (unlikely(!IS_ALIGNED(addr, size)))
-			goto unmap_page;
-
-		vm->scratch[0].base.page = page;
-		vm->scratch[0].base.daddr = addr;
-		vm->scratch_order = order;
-		return 0;
-
-unmap_page:
-		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
-free_page:
-		__free_pages(page, order);
-skip:
-		if (size == I915_GTT_PAGE_SIZE_4K)
-			return -ENOMEM;
-
-		size = I915_GTT_PAGE_SIZE_4K;
-		gfp &= ~__GFP_NOWARN;
-	} while (1);
-}
-
-static void cleanup_scratch_page(struct i915_address_space *vm)
-{
-	struct i915_page_dma *p = px_base(&vm->scratch[0]);
-	unsigned int order = vm->scratch_order;
-
-	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
-		       PCI_DMA_BIDIRECTIONAL);
-	__free_pages(p->page, order);
-}
-
-static void free_scratch(struct i915_address_space *vm)
-{
-	int i;
-
-	if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
-		return;
-
-	for (i = 1; i <= vm->top; i++) {
-		if (!px_dma(&vm->scratch[i]))
-			break;
-		cleanup_page_dma(vm, px_base(&vm->scratch[i]));
-	}
-
-	cleanup_scratch_page(vm);
-}
-
-static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
-{
-	struct i915_page_table *pt;
-
-	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
-	if (unlikely(!pt))
-		return ERR_PTR(-ENOMEM);
-
-	if (unlikely(setup_page_dma(vm, &pt->base))) {
-		kfree(pt);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	atomic_set(&pt->used, 0);
-	return pt;
-}
-
-static struct i915_page_directory *__alloc_pd(size_t sz)
-{
-	struct i915_page_directory *pd;
-
-	pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
-	if (unlikely(!pd))
-		return NULL;
-
-	spin_lock_init(&pd->lock);
-	return pd;
-}
-
-static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
-{
-	struct i915_page_directory *pd;
-
-	pd = __alloc_pd(sizeof(*pd));
-	if (unlikely(!pd))
-		return ERR_PTR(-ENOMEM);
-
-	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
-		kfree(pd);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	return pd;
-}
-
-static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
-{
-	cleanup_page_dma(vm, pd);
-	kfree(pd);
-}
-
-#define free_px(vm, px) free_pd(vm, px_base(px))
-
-static inline void
-write_dma_entry(struct i915_page_dma * const pdma,
-		const unsigned short idx,
-		const u64 encoded_entry)
-{
-	u64 * const vaddr = kmap_atomic(pdma->page);
-
-	vaddr[idx] = encoded_entry;
-	kunmap_atomic(vaddr);
-}
-
-static inline void
-__set_pd_entry(struct i915_page_directory * const pd,
-	       const unsigned short idx,
-	       struct i915_page_dma * const to,
-	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
-{
-	/* Each thread pre-pins the pd, and we may have a thread per pde. */
-	GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry));
-
-	atomic_inc(px_used(pd));
-	pd->entry[idx] = to;
-	write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
-}
-
-#define set_pd_entry(pd, idx, to) \
-	__set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
-
-static inline void
-clear_pd_entry(struct i915_page_directory * const pd,
-	       const unsigned short idx,
-	       const struct i915_page_scratch * const scratch)
-{
-	GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
-
-	write_dma_entry(px_base(pd), idx, scratch->encode);
-	pd->entry[idx] = NULL;
-	atomic_dec(px_used(pd));
-}
-
-static bool
-release_pd_entry(struct i915_page_directory * const pd,
-		 const unsigned short idx,
-		 struct i915_page_table * const pt,
-		 const struct i915_page_scratch * const scratch)
-{
-	bool free = false;
-
-	if (atomic_add_unless(&pt->used, -1, 1))
-		return false;
-
-	spin_lock(&pd->lock);
-	if (atomic_dec_and_test(&pt->used)) {
-		clear_pd_entry(pd, idx, scratch);
-		free = true;
-	}
-	spin_unlock(&pd->lock);
-
-	return free;
-}
-
-static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
-{
-	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
-	enum vgt_g2v_type msg;
-	int i;
-
-	if (create)
-		atomic_inc(px_used(ppgtt->pd)); /* never remove */
-	else
-		atomic_dec(px_used(ppgtt->pd));
-
-	mutex_lock(&dev_priv->vgpu.lock);
-
-	if (i915_vm_is_4lvl(&ppgtt->vm)) {
-		const u64 daddr = px_dma(ppgtt->pd);
-
-		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
-		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
-
-		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
-				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
-	} else {
-		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
-			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
-
-			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
-			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
-		}
-
-		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
-				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
-	}
-
-	/* g2v_notify atomically (via hv trap) consumes the message packet. */
-	I915_WRITE(vgtif_reg(g2v_notify), msg);
-
-	mutex_unlock(&dev_priv->vgpu.lock);
-}
-
-/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
-#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
-#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
-#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
-#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
-#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
-#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
-#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
-
-static inline unsigned int
-gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
-{
-	const int shift = gen8_pd_shift(lvl);
-	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
-
-	GEM_BUG_ON(start >= end);
-	end += ~mask >> gen8_pd_shift(1);
-
-	*idx = i915_pde_index(start, shift);
-	if ((start ^ end) & mask)
-		return GEN8_PDES - *idx;
-	else
-		return i915_pde_index(end, shift) - *idx;
-}
-
-static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
-{
-	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
-
-	GEM_BUG_ON(start >= end);
-	return (start ^ end) & mask && (start & ~mask) == 0;
-}
-
-static inline unsigned int gen8_pt_count(u64 start, u64 end)
-{
-	GEM_BUG_ON(start >= end);
-	if ((start ^ end) >> gen8_pd_shift(1))
-		return GEN8_PDES - (start & (GEN8_PDES - 1));
-	else
-		return end - start;
-}
-
-static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
-{
-	unsigned int shift = __gen8_pte_shift(vm->top);
-	return (vm->total + (1ull << shift) - 1) >> shift;
-}
-
-static inline struct i915_page_directory *
-gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
-{
-	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
-
-	if (vm->top == 2)
-		return ppgtt->pd;
-	else
-		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
-}
-
-static inline struct i915_page_directory *
-gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
-{
-	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
-}
-
-static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
-				 struct i915_page_directory *pd,
-				 int count, int lvl)
-{
-	if (lvl) {
-		void **pde = pd->entry;
-
-		do {
-			if (!*pde)
-				continue;
-
-			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
-		} while (pde++, --count);
-	}
-
-	free_px(vm, pd);
-}
-
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
-{
-	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-
-	if (intel_vgpu_active(vm->i915))
-		gen8_ppgtt_notify_vgt(ppgtt, false);
-
-	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
-	free_scratch(vm);
-}
-
-static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
-			      struct i915_page_directory * const pd,
-			      u64 start, const u64 end, int lvl)
-{
-	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
-	unsigned int idx, len;
-
-	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
-
-	len = gen8_pd_range(start, end, lvl--, &idx);
-	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
-	    __func__, vm, lvl + 1, start, end,
-	    idx, len, atomic_read(px_used(pd)));
-	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
-
-	do {
-		struct i915_page_table *pt = pd->entry[idx];
-
-		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
-		    gen8_pd_contains(start, end, lvl)) {
-			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
-			    __func__, vm, lvl + 1, idx, start, end);
-			clear_pd_entry(pd, idx, scratch);
-			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
-			start += (u64)I915_PDES << gen8_pd_shift(lvl);
-			continue;
-		}
-
-		if (lvl) {
-			start = __gen8_ppgtt_clear(vm, as_pd(pt),
-						   start, end, lvl);
-		} else {
-			unsigned int count;
-			u64 *vaddr;
-
-			count = gen8_pt_count(start, end);
-			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
-			    __func__, vm, lvl, start, end,
-			    gen8_pd_index(start, 0), count,
-			    atomic_read(&pt->used));
-			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
-
-			vaddr = kmap_atomic_px(pt);
-			memset64(vaddr + gen8_pd_index(start, 0),
-				 vm->scratch[0].encode,
-				 count);
-			kunmap_atomic(vaddr);
-
-			atomic_sub(count, &pt->used);
-			start += count;
-		}
-
-		if (release_pd_entry(pd, idx, pt, scratch))
-			free_px(vm, pt);
-	} while (idx++, --len);
-
-	return start;
-}
-
-static void gen8_ppgtt_clear(struct i915_address_space *vm,
-			     u64 start, u64 length)
-{
-	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
-	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
-	GEM_BUG_ON(range_overflows(start, length, vm->total));
-
-	start >>= GEN8_PTE_SHIFT;
-	length >>= GEN8_PTE_SHIFT;
-	GEM_BUG_ON(length == 0);
-
-	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
-			   start, start + length, vm->top);
-}
-
-static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
-			      struct i915_page_directory * const pd,
-			      u64 * const start, const u64 end, int lvl)
-{
-	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
-	struct i915_page_table *alloc = NULL;
-	unsigned int idx, len;
-	int ret = 0;
-
-	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
-
-	len = gen8_pd_range(*start, end, lvl--, &idx);
-	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
-	    __func__, vm, lvl + 1, *start, end,
-	    idx, len, atomic_read(px_used(pd)));
-	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
-
-	spin_lock(&pd->lock);
-	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
-	do {
-		struct i915_page_table *pt = pd->entry[idx];
-
-		if (!pt) {
-			spin_unlock(&pd->lock);
-
-			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
-			    __func__, vm, lvl + 1, idx);
-
-			pt = fetch_and_zero(&alloc);
-			if (lvl) {
-				if (!pt) {
-					pt = &alloc_pd(vm)->pt;
-					if (IS_ERR(pt)) {
-						ret = PTR_ERR(pt);
-						goto out;
-					}
-				}
-
-				fill_px(pt, vm->scratch[lvl].encode);
-			} else {
-				if (!pt) {
-					pt = alloc_pt(vm);
-					if (IS_ERR(pt)) {
-						ret = PTR_ERR(pt);
-						goto out;
-					}
-				}
-
-				if (intel_vgpu_active(vm->i915) ||
-				    gen8_pt_count(*start, end) < I915_PDES)
-					fill_px(pt, vm->scratch[lvl].encode);
-			}
-
-			spin_lock(&pd->lock);
-			if (likely(!pd->entry[idx]))
-				set_pd_entry(pd, idx, pt);
-			else
-				alloc = pt, pt = pd->entry[idx];
-		}
-
-		if (lvl) {
-			atomic_inc(&pt->used);
-			spin_unlock(&pd->lock);
-
-			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
-						 start, end, lvl);
-			if (unlikely(ret)) {
-				if (release_pd_entry(pd, idx, pt, scratch))
-					free_px(vm, pt);
-				goto out;
-			}
-
-			spin_lock(&pd->lock);
-			atomic_dec(&pt->used);
-			GEM_BUG_ON(!atomic_read(&pt->used));
-		} else {
-			unsigned int count = gen8_pt_count(*start, end);
-
-			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
-			    __func__, vm, lvl, *start, end,
-			    gen8_pd_index(*start, 0), count,
-			    atomic_read(&pt->used));
-
-			atomic_add(count, &pt->used);
-			/* All other pdes may be simultaneously removed */
-			GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES);
-			*start += count;
-		}
-	} while (idx++, --len);
-	spin_unlock(&pd->lock);
-out:
-	if (alloc)
-		free_px(vm, alloc);
-	return ret;
-}
-
-static int gen8_ppgtt_alloc(struct i915_address_space *vm,
-			    u64 start, u64 length)
-{
-	u64 from;
-	int err;
-
-	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
-	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
-	GEM_BUG_ON(range_overflows(start, length, vm->total));
-
-	start >>= GEN8_PTE_SHIFT;
-	length >>= GEN8_PTE_SHIFT;
-	GEM_BUG_ON(length == 0);
-	from = start;
-
-	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
-				 &start, start + length, vm->top);
-	if (unlikely(err && from != start))
-		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
-				   from, start, vm->top);
-
-	return err;
-}
-
-static inline struct sgt_dma {
-	struct scatterlist *sg;
-	dma_addr_t dma, max;
-} sgt_dma(struct i915_vma *vma) {
-	struct scatterlist *sg = vma->pages->sgl;
-	dma_addr_t addr = sg_dma_address(sg);
-	return (struct sgt_dma) { sg, addr, addr + sg->length };
-}
-
-static __always_inline u64
-gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
-		      struct i915_page_directory *pdp,
-		      struct sgt_dma *iter,
-		      u64 idx,
-		      enum i915_cache_level cache_level,
-		      u32 flags)
-{
-	struct i915_page_directory *pd;
-	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
-	gen8_pte_t *vaddr;
-
-	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
-	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
-	do {
-		GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
-		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
-
-		iter->dma += I915_GTT_PAGE_SIZE;
-		if (iter->dma >= iter->max) {
-			iter->sg = __sg_next(iter->sg);
-			if (!iter->sg) {
-				idx = 0;
-				break;
-			}
-
-			iter->dma = sg_dma_address(iter->sg);
-			iter->max = iter->dma + iter->sg->length;
-		}
-
-		if (gen8_pd_index(++idx, 0) == 0) {
-			if (gen8_pd_index(idx, 1) == 0) {
-				/* Limited by sg length for 3lvl */
-				if (gen8_pd_index(idx, 2) == 0)
-					break;
-
-				pd = pdp->entry[gen8_pd_index(idx, 2)];
-			}
-
-			kunmap_atomic(vaddr);
-			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
-		}
-	} while (1);
-	kunmap_atomic(vaddr);
-
-	return idx;
-}
-
-static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
-				   struct sgt_dma *iter,
-				   enum i915_cache_level cache_level,
-				   u32 flags)
-{
-	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
-	u64 start = vma->node.start;
-	dma_addr_t rem = iter->sg->length;
-
-	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
-
-	do {
-		struct i915_page_directory * const pdp =
-			gen8_pdp_for_page_address(vma->vm, start);
-		struct i915_page_directory * const pd =
-			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
-		gen8_pte_t encode = pte_encode;
-		unsigned int maybe_64K = -1;
-		unsigned int page_size;
-		gen8_pte_t *vaddr;
-		u16 index;
-
-		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
-		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
-		    rem >= I915_GTT_PAGE_SIZE_2M &&
-		    !__gen8_pte_index(start, 0)) {
-			index = __gen8_pte_index(start, 1);
-			encode |= GEN8_PDE_PS_2M;
-			page_size = I915_GTT_PAGE_SIZE_2M;
-
-			vaddr = kmap_atomic_px(pd);
-		} else {
-			struct i915_page_table *pt =
-				i915_pt_entry(pd, __gen8_pte_index(start, 1));
-
-			index = __gen8_pte_index(start, 0);
-			page_size = I915_GTT_PAGE_SIZE;
-
-			if (!index &&
-			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
-			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
-			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
-			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
-				maybe_64K = __gen8_pte_index(start, 1);
-
-			vaddr = kmap_atomic_px(pt);
-		}
-
-		do {
-			GEM_BUG_ON(iter->sg->length < page_size);
-			vaddr[index++] = encode | iter->dma;
-
-			start += page_size;
-			iter->dma += page_size;
-			rem -= page_size;
-			if (iter->dma >= iter->max) {
-				iter->sg = __sg_next(iter->sg);
-				if (!iter->sg)
-					break;
-
-				rem = iter->sg->length;
-				iter->dma = sg_dma_address(iter->sg);
-				iter->max = iter->dma + rem;
-
-				if (maybe_64K != -1 && index < I915_PDES &&
-				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
-				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
-				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
-					maybe_64K = -1;
-
-				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
-					break;
-			}
-		} while (rem >= page_size && index < I915_PDES);
-
-		kunmap_atomic(vaddr);
-
-		/*
-		 * Is it safe to mark the 2M block as 64K? -- Either we have
-		 * filled whole page-table with 64K entries, or filled part of
-		 * it and have reached the end of the sg table and we have
-		 * enough padding.
-		 */
-		if (maybe_64K != -1 &&
-		    (index == I915_PDES ||
-		     (i915_vm_has_scratch_64K(vma->vm) &&
-		      !iter->sg && IS_ALIGNED(vma->node.start +
-					      vma->node.size,
-					      I915_GTT_PAGE_SIZE_2M)))) {
-			vaddr = kmap_atomic_px(pd);
-			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
-			kunmap_atomic(vaddr);
-			page_size = I915_GTT_PAGE_SIZE_64K;
-
-			/*
-			 * We write all 4K page entries, even when using 64K
-			 * pages. In order to verify that the HW isn't cheating
-			 * by using the 4K PTE instead of the 64K PTE, we want
-			 * to remove all the surplus entries. If the HW skipped
-			 * the 64K PTE, it will read/write into the scratch page
-			 * instead - which we detect as missing results during
-			 * selftests.
-			 */
-			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
-				u16 i;
-
-				encode = vma->vm->scratch[0].encode;
-				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
-
-				for (i = 1; i < index; i += 16)
-					memset64(vaddr + i, encode, 15);
-
-				kunmap_atomic(vaddr);
-			}
-		}
-
-		vma->page_sizes.gtt |= page_size;
-	} while (iter->sg);
-}
-
-static void gen8_ppgtt_insert(struct i915_address_space *vm,
-			      struct i915_vma *vma,
-			      enum i915_cache_level cache_level,
-			      u32 flags)
-{
-	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
-	struct sgt_dma iter = sgt_dma(vma);
-
-	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
-		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
-	} else  {
-		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
-
-		do {
-			struct i915_page_directory * const pdp =
-				gen8_pdp_for_page_index(vm, idx);
-
-			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
-						    cache_level, flags);
-		} while (idx);
-
-		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-	}
-}
-
-static int gen8_init_scratch(struct i915_address_space *vm)
-{
-	int ret;
-	int i;
-
-	/*
-	 * If everybody agrees to not to write into the scratch page,
-	 * we can reuse it for all vm, keeping contexts and processes separate.
-	 */
-	if (vm->has_read_only &&
-	    vm->i915->kernel_context &&
-	    vm->i915->kernel_context->vm) {
-		struct i915_address_space *clone =
-			rcu_dereference_protected(vm->i915->kernel_context->vm,
-						  true); /* static */
-
-		GEM_BUG_ON(!clone->has_read_only);
-
-		vm->scratch_order = clone->scratch_order;
-		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
-		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
-		return 0;
-	}
-
-	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
-	if (ret)
-		return ret;
-
-	vm->scratch[0].encode =
-		gen8_pte_encode(px_dma(&vm->scratch[0]),
-				I915_CACHE_LLC, vm->has_read_only);
-
-	for (i = 1; i <= vm->top; i++) {
-		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
-			goto free_scratch;
-
-		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
-		vm->scratch[i].encode =
-			gen8_pde_encode(px_dma(&vm->scratch[i]),
-					I915_CACHE_LLC);
-	}
-
-	return 0;
-
-free_scratch:
-	free_scratch(vm);
-	return -ENOMEM;
-}
-
-static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
-{
-	struct i915_address_space *vm = &ppgtt->vm;
-	struct i915_page_directory *pd = ppgtt->pd;
-	unsigned int idx;
-
-	GEM_BUG_ON(vm->top != 2);
-	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
-
-	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
-		struct i915_page_directory *pde;
-
-		pde = alloc_pd(vm);
-		if (IS_ERR(pde))
-			return PTR_ERR(pde);
-
-		fill_px(pde, vm->scratch[1].encode);
-		set_pd_entry(pd, idx, pde);
-		atomic_inc(px_used(pde)); /* keep pinned */
-	}
-	wmb();
-
-	return 0;
-}
-
-static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
-{
-	struct drm_i915_private *i915 = gt->i915;
-
-	ppgtt->vm.gt = gt;
-	ppgtt->vm.i915 = i915;
-	ppgtt->vm.dma = &i915->drm.pdev->dev;
-	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
-
-	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-
-	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
-	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
-	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
-	ppgtt->vm.vma_ops.clear_pages = clear_pages;
-}
-
-static struct i915_page_directory *
-gen8_alloc_top_pd(struct i915_address_space *vm)
-{
-	const unsigned int count = gen8_pd_top_count(vm);
-	struct i915_page_directory *pd;
-
-	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
-
-	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
-	if (unlikely(!pd))
-		return ERR_PTR(-ENOMEM);
-
-	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
-		kfree(pd);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
-	atomic_inc(px_used(pd)); /* mark as pinned */
-	return pd;
-}
-
-/*
- * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
- * with a net effect resembling a 2-level page table in normal x86 terms. Each
- * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
- * space.
- *
- */
-static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
-{
-	struct i915_ppgtt *ppgtt;
-	int err;
-
-	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-	if (!ppgtt)
-		return ERR_PTR(-ENOMEM);
-
-	ppgtt_init(ppgtt, &i915->gt);
-	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
-
-	/*
-	 * From bdw, there is hw support for read-only pages in the PPGTT.
-	 *
-	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
-	 * for now.
-	 *
-	 * Gen12 has inherited the same read-only fault issue from gen11.
-	 */
-	ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12);
-
-	/* There are only few exceptions for gen >=6. chv and bxt.
-	 * And we are not sure about the latter so play safe for now.
-	 */
-	if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
-		ppgtt->vm.pt_kmap_wc = true;
-
-	err = gen8_init_scratch(&ppgtt->vm);
-	if (err)
-		goto err_free;
-
-	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
-	if (IS_ERR(ppgtt->pd)) {
-		err = PTR_ERR(ppgtt->pd);
-		goto err_free_scratch;
-	}
-
-	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
-		err = gen8_preallocate_top_level_pdp(ppgtt);
-		if (err)
-			goto err_free_pd;
-	}
-
-	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
-	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
-	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
-	ppgtt->vm.clear_range = gen8_ppgtt_clear;
-
-	if (intel_vgpu_active(i915))
-		gen8_ppgtt_notify_vgt(ppgtt, true);
-
-	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
-	return ppgtt;
-
-err_free_pd:
-	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
-			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
-err_free_scratch:
-	free_scratch(&ppgtt->vm);
-err_free:
-	kfree(ppgtt);
-	return ERR_PTR(err);
-}
-
-/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
-				  const unsigned int pde,
-				  const struct i915_page_table *pt)
-{
-	/* Caller needs to make sure the write completes if necessary */
-	iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
-		  ppgtt->pd_addr + pde);
-}
-
-static void gen7_ppgtt_enable(struct intel_gt *gt)
-{
-	struct drm_i915_private *i915 = gt->i915;
-	struct intel_uncore *uncore = gt->uncore;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	u32 ecochk;
-
-	intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
-
-	ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
-	if (IS_HASWELL(i915)) {
-		ecochk |= ECOCHK_PPGTT_WB_HSW;
-	} else {
-		ecochk |= ECOCHK_PPGTT_LLC_IVB;
-		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
-	}
-	intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
-
-	for_each_engine(engine, gt, id) {
-		/* GFX_MODE is per-ring on gen7+ */
-		ENGINE_WRITE(engine,
-			     RING_MODE_GEN7,
-			     _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-	}
-}
-
-static void gen6_ppgtt_enable(struct intel_gt *gt)
-{
-	struct intel_uncore *uncore = gt->uncore;
-
-	intel_uncore_rmw(uncore,
-			 GAC_ECO_BITS,
-			 0,
-			 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
-
-	intel_uncore_rmw(uncore,
-			 GAB_CTL,
-			 0,
-			 GAB_CTL_CONT_AFTER_PAGEFAULT);
-
-	intel_uncore_rmw(uncore,
-			 GAM_ECOCHK,
-			 0,
-			 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
-
-	if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
-		intel_uncore_write(uncore,
-				   GFX_MODE,
-				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-}
-
-/* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
-				   u64 start, u64 length)
-{
-	struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
-	const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
-	const gen6_pte_t scratch_pte = vm->scratch[0].encode;
-	unsigned int pde = first_entry / GEN6_PTES;
-	unsigned int pte = first_entry % GEN6_PTES;
-	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-
-	while (num_entries) {
-		struct i915_page_table * const pt =
-			i915_pt_entry(ppgtt->base.pd, pde++);
-		const unsigned int count = min(num_entries, GEN6_PTES - pte);
-		gen6_pte_t *vaddr;
-
-		GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
-
-		num_entries -= count;
-
-		GEM_BUG_ON(count > atomic_read(&pt->used));
-		if (!atomic_sub_return(count, &pt->used))
-			ppgtt->scan_for_unused_pt = true;
-
-		/*
-		 * Note that the hw doesn't support removing PDE on the fly
-		 * (they are cached inside the context with no means to
-		 * invalidate the cache), so we can only reset the PTE
-		 * entries back to scratch.
-		 */
-
-		vaddr = kmap_atomic_px(pt);
-		memset32(vaddr + pte, scratch_pte, count);
-		kunmap_atomic(vaddr);
-
-		pte = 0;
-	}
-}
-
-static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
-				      struct i915_vma *vma,
-				      enum i915_cache_level cache_level,
-				      u32 flags)
-{
-	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-	struct i915_page_directory * const pd = ppgtt->pd;
-	unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
-	unsigned act_pt = first_entry / GEN6_PTES;
-	unsigned act_pte = first_entry % GEN6_PTES;
-	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
-	struct sgt_dma iter = sgt_dma(vma);
-	gen6_pte_t *vaddr;
-
-	GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
-
-	vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
-	do {
-		GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
-		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
-
-		iter.dma += I915_GTT_PAGE_SIZE;
-		if (iter.dma == iter.max) {
-			iter.sg = __sg_next(iter.sg);
-			if (!iter.sg)
-				break;
-
-			iter.dma = sg_dma_address(iter.sg);
-			iter.max = iter.dma + iter.sg->length;
-		}
-
-		if (++act_pte == GEN6_PTES) {
-			kunmap_atomic(vaddr);
-			vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
-			act_pte = 0;
-		}
-	} while (1);
-	kunmap_atomic(vaddr);
-
-	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-}
-
-static int gen6_alloc_va_range(struct i915_address_space *vm,
-			       u64 start, u64 length)
-{
-	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
-	struct i915_page_directory * const pd = ppgtt->base.pd;
-	struct i915_page_table *pt, *alloc = NULL;
-	intel_wakeref_t wakeref;
-	u64 from = start;
-	unsigned int pde;
-	bool flush = false;
-	int ret = 0;
-
-	wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
-	spin_lock(&pd->lock);
-	gen6_for_each_pde(pt, pd, start, length, pde) {
-		const unsigned int count = gen6_pte_count(start, length);
-
-		if (px_base(pt) == px_base(&vm->scratch[1])) {
-			spin_unlock(&pd->lock);
-
-			pt = fetch_and_zero(&alloc);
-			if (!pt)
-				pt = alloc_pt(vm);
-			if (IS_ERR(pt)) {
-				ret = PTR_ERR(pt);
-				goto unwind_out;
-			}
-
-			fill32_px(pt, vm->scratch[0].encode);
-
-			spin_lock(&pd->lock);
-			if (pd->entry[pde] == &vm->scratch[1]) {
-				pd->entry[pde] = pt;
-				if (i915_vma_is_bound(ppgtt->vma,
-						      I915_VMA_GLOBAL_BIND)) {
-					gen6_write_pde(ppgtt, pde, pt);
-					flush = true;
-				}
-			} else {
-				alloc = pt;
-				pt = pd->entry[pde];
-			}
-		}
-
-		atomic_add(count, &pt->used);
-	}
-	spin_unlock(&pd->lock);
-
-	if (flush)
-		gen6_ggtt_invalidate(vm->gt->ggtt);
-
-	goto out;
-
-unwind_out:
-	gen6_ppgtt_clear_range(vm, from, start - from);
-out:
-	if (alloc)
-		free_px(vm, alloc);
-	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
-	return ret;
-}
-
-static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
-{
-	struct i915_address_space * const vm = &ppgtt->base.vm;
-	struct i915_page_directory * const pd = ppgtt->base.pd;
-	int ret;
-
-	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
-	if (ret)
-		return ret;
-
-	vm->scratch[0].encode =
-		vm->pte_encode(px_dma(&vm->scratch[0]),
-			       I915_CACHE_NONE, PTE_READ_ONLY);
-
-	if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
-		cleanup_scratch_page(vm);
-		return -ENOMEM;
-	}
-
-	fill32_px(&vm->scratch[1], vm->scratch[0].encode);
-	memset_p(pd->entry, &vm->scratch[1], I915_PDES);
-
-	return 0;
-}
-
-static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
-{
-	struct i915_page_directory * const pd = ppgtt->base.pd;
-	struct i915_page_dma * const scratch =
-		px_base(&ppgtt->base.vm.scratch[1]);
-	struct i915_page_table *pt;
-	u32 pde;
-
-	gen6_for_all_pdes(pt, pd, pde)
-		if (px_base(pt) != scratch)
-			free_px(&ppgtt->base.vm, pt);
-}
-
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
-{
-	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
-
-	i915_vma_destroy(ppgtt->vma);
-
-	gen6_ppgtt_free_pd(ppgtt);
-	free_scratch(vm);
-
-	mutex_destroy(&ppgtt->pin_mutex);
-	kfree(ppgtt->base.pd);
-}
-
-static int pd_vma_set_pages(struct i915_vma *vma)
-{
-	vma->pages = ERR_PTR(-ENODEV);
-	return 0;
-}
-
-static void pd_vma_clear_pages(struct i915_vma *vma)
-{
-	GEM_BUG_ON(!vma->pages);
-
-	vma->pages = NULL;
-}
-
-static int pd_vma_bind(struct i915_vma *vma,
-		       enum i915_cache_level cache_level,
-		       u32 unused)
-{
-	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
-	struct gen6_ppgtt *ppgtt = vma->private;
-	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
-	struct i915_page_table *pt;
-	unsigned int pde;
-
-	px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
-	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
-
-	gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
-		gen6_write_pde(ppgtt, pde, pt);
-
-	gen6_ggtt_invalidate(ggtt);
-
-	return 0;
-}
-
-static void pd_vma_unbind(struct i915_vma *vma)
-{
-	struct gen6_ppgtt *ppgtt = vma->private;
-	struct i915_page_directory * const pd = ppgtt->base.pd;
-	struct i915_page_dma * const scratch =
-		px_base(&ppgtt->base.vm.scratch[1]);
-	struct i915_page_table *pt;
-	unsigned int pde;
-
-	if (!ppgtt->scan_for_unused_pt)
-		return;
-
-	/* Free all no longer used page tables */
-	gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
-		if (px_base(pt) == scratch || atomic_read(&pt->used))
-			continue;
-
-		free_px(&ppgtt->base.vm, pt);
-		pd->entry[pde] = scratch;
-	}
-
-	ppgtt->scan_for_unused_pt = false;
-}
-
-static const struct i915_vma_ops pd_vma_ops = {
-	.set_pages = pd_vma_set_pages,
-	.clear_pages = pd_vma_clear_pages,
-	.bind_vma = pd_vma_bind,
-	.unbind_vma = pd_vma_unbind,
-};
-
-static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
-{
-	struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
-	struct i915_vma *vma;
-
-	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
-	GEM_BUG_ON(size > ggtt->vm.total);
-
-	vma = i915_vma_alloc();
-	if (!vma)
-		return ERR_PTR(-ENOMEM);
-
-	i915_active_init(&vma->active, NULL, NULL);
-
-	mutex_init(&vma->pages_mutex);
-	vma->vm = i915_vm_get(&ggtt->vm);
-	vma->ops = &pd_vma_ops;
-	vma->private = ppgtt;
-
-	vma->size = size;
-	vma->fence_size = size;
-	atomic_set(&vma->flags, I915_VMA_GGTT);
-	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
-
-	INIT_LIST_HEAD(&vma->obj_link);
-	INIT_LIST_HEAD(&vma->closed_link);
-
-	return vma;
-}
-
-int gen6_ppgtt_pin(struct i915_ppgtt *base)
-{
-	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-	int err = 0;
-
-	GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
-
-	/*
-	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
-	 * which will be pinned into every active context.
-	 * (When vma->pin_count becomes atomic, I expect we will naturally
-	 * need a larger, unpacked, type and kill this redundancy.)
-	 */
-	if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
-		return 0;
-
-	if (mutex_lock_interruptible(&ppgtt->pin_mutex))
-		return -EINTR;
-
-	/*
-	 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
-	 * allocator works in address space sizes, so it's multiplied by page
-	 * size. We allocate at the top of the GTT to avoid fragmentation.
-	 */
-	if (!atomic_read(&ppgtt->pin_count)) {
-		err = i915_vma_pin(ppgtt->vma,
-				   0, GEN6_PD_ALIGN,
-				   PIN_GLOBAL | PIN_HIGH);
-	}
-	if (!err)
-		atomic_inc(&ppgtt->pin_count);
-	mutex_unlock(&ppgtt->pin_mutex);
-
-	return err;
-}
-
-void gen6_ppgtt_unpin(struct i915_ppgtt *base)
-{
-	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-
-	GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
-	if (atomic_dec_and_test(&ppgtt->pin_count))
-		i915_vma_unpin(ppgtt->vma);
-}
-
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
-{
-	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-
-	if (!atomic_read(&ppgtt->pin_count))
-		return;
-
-	i915_vma_unpin(ppgtt->vma);
-	atomic_set(&ppgtt->pin_count, 0);
-}
-
-static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
-{
-	struct i915_ggtt * const ggtt = &i915->ggtt;
-	struct gen6_ppgtt *ppgtt;
-	int err;
-
-	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-	if (!ppgtt)
-		return ERR_PTR(-ENOMEM);
-
-	mutex_init(&ppgtt->pin_mutex);
-
-	ppgtt_init(&ppgtt->base, &i915->gt);
-	ppgtt->base.vm.top = 1;
-
-	ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
-	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
-	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
-	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
-	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
-
-	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
-
-	ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
-	if (!ppgtt->base.pd) {
-		err = -ENOMEM;
-		goto err_free;
-	}
-
-	err = gen6_ppgtt_init_scratch(ppgtt);
-	if (err)
-		goto err_pd;
-
-	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
-	if (IS_ERR(ppgtt->vma)) {
-		err = PTR_ERR(ppgtt->vma);
-		goto err_scratch;
-	}
-
-	return &ppgtt->base;
-
-err_scratch:
-	free_scratch(&ppgtt->base.vm);
-err_pd:
-	kfree(ppgtt->base.pd);
-err_free:
-	kfree(ppgtt);
-	return ERR_PTR(err);
-}
-
-static void gtt_write_workarounds(struct intel_gt *gt)
-{
-	struct drm_i915_private *i915 = gt->i915;
-	struct intel_uncore *uncore = gt->uncore;
-
-	/* This function is for gtt related workarounds. This function is
-	 * called on driver load and after a GPU reset, so you can place
-	 * workarounds here even if they get overwritten by GPU reset.
-	 */
-	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
-	if (IS_BROADWELL(i915))
-		intel_uncore_write(uncore,
-				   GEN8_L3_LRA_1_GPGPU,
-				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
-	else if (IS_CHERRYVIEW(i915))
-		intel_uncore_write(uncore,
-				   GEN8_L3_LRA_1_GPGPU,
-				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
-	else if (IS_GEN9_LP(i915))
-		intel_uncore_write(uncore,
-				   GEN8_L3_LRA_1_GPGPU,
-				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
-	else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
-		intel_uncore_write(uncore,
-				   GEN8_L3_LRA_1_GPGPU,
-				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
-
-	/*
-	 * To support 64K PTEs we need to first enable the use of the
-	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
-	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
-	 * shouldn't be needed after GEN10.
-	 *
-	 * 64K pages were first introduced from BDW+, although technically they
-	 * only *work* from gen9+. For pre-BDW we instead have the option for
-	 * 32K pages, but we don't currently have any support for it in our
-	 * driver.
-	 */
-	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
-	    INTEL_GEN(i915) <= 10)
-		intel_uncore_rmw(uncore,
-				 GEN8_GAMW_ECO_DEV_RW_IA,
-				 0,
-				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
-
-	if (IS_GEN_RANGE(i915, 8, 11)) {
-		bool can_use_gtt_cache = true;
-
-		/*
-		 * According to the BSpec if we use 2M/1G pages then we also
-		 * need to disable the GTT cache. At least on BDW we can see
-		 * visual corruption when using 2M pages, and not disabling the
-		 * GTT cache.
-		 */
-		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
-			can_use_gtt_cache = false;
-
-		/* WaGttCachingOffByDefault */
-		intel_uncore_write(uncore,
-				   HSW_GTT_CACHE_EN,
-				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
-		WARN_ON_ONCE(can_use_gtt_cache &&
-			     intel_uncore_read(uncore,
-					       HSW_GTT_CACHE_EN) == 0);
-	}
-}
-
-int i915_ppgtt_init_hw(struct intel_gt *gt)
-{
-	struct drm_i915_private *i915 = gt->i915;
-
-	gtt_write_workarounds(gt);
-
-	if (IS_GEN(i915, 6))
-		gen6_ppgtt_enable(gt);
-	else if (IS_GEN(i915, 7))
-		gen7_ppgtt_enable(gt);
-
-	return 0;
-}
-
-static struct i915_ppgtt *
-__ppgtt_create(struct drm_i915_private *i915)
-{
-	if (INTEL_GEN(i915) < 8)
-		return gen6_ppgtt_create(i915);
-	else
-		return gen8_ppgtt_create(i915);
-}
-
-struct i915_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *i915)
-{
-	struct i915_ppgtt *ppgtt;
-
-	ppgtt = __ppgtt_create(i915);
-	if (IS_ERR(ppgtt))
-		return ppgtt;
-
-	trace_i915_ppgtt_create(&ppgtt->vm);
-
-	return ppgtt;
-}
-
-/* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static bool needs_idle_maps(struct drm_i915_private *dev_priv)
-{
-	/* Query intel_iommu to see if we need the workaround. Presumably that
-	 * was loaded first.
-	 */
-	return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
-}
-
-static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
-{
-	struct drm_i915_private *i915 = ggtt->vm.i915;
-
-	/* Don't bother messing with faults pre GEN6 as we have little
-	 * documentation supporting that it's a good idea.
-	 */
-	if (INTEL_GEN(i915) < 6)
-		return;
-
-	intel_gt_check_and_clear_faults(ggtt->vm.gt);
-
-	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
-	ggtt->invalidate(ggtt);
-}
-
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
-{
-	ggtt_suspend_mappings(&i915->ggtt);
-}
-
 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 			       struct sg_table *pages)
 {
@@ -2163,368 +52,6 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 	return -ENOSPC;
 }
 
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
-	writeq(pte, addr);
-}
-
-static void gen8_ggtt_insert_page(struct i915_address_space *vm,
-				  dma_addr_t addr,
-				  u64 offset,
-				  enum i915_cache_level level,
-				  u32 unused)
-{
-	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-	gen8_pte_t __iomem *pte =
-		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
-	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
-
-	ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
-				     struct i915_vma *vma,
-				     enum i915_cache_level level,
-				     u32 flags)
-{
-	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-	struct sgt_iter sgt_iter;
-	gen8_pte_t __iomem *gtt_entries;
-	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
-	dma_addr_t addr;
-
-	/*
-	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
-	 * not to allow the user to override access to a read only page.
-	 */
-
-	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
-	gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
-	for_each_sgt_daddr(addr, sgt_iter, vma->pages)
-		gen8_set_pte(gtt_entries++, pte_encode | addr);
-
-	/*
-	 * We want to flush the TLBs only after we're certain all the PTE
-	 * updates have finished.
-	 */
-	ggtt->invalidate(ggtt);
-}
-
-static void gen6_ggtt_insert_page(struct i915_address_space *vm,
-				  dma_addr_t addr,
-				  u64 offset,
-				  enum i915_cache_level level,
-				  u32 flags)
-{
-	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-	gen6_pte_t __iomem *pte =
-		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
-	iowrite32(vm->pte_encode(addr, level, flags), pte);
-
-	ggtt->invalidate(ggtt);
-}
-
-/*
- * Binds an object into the global gtt with the specified cache level. The object
- * will be accessible to the GPU via commands whose operands reference offsets
- * within the global GTT as well as accessible by the GPU through the GMADR
- * mapped BAR (dev_priv->mm.gtt->gtt).
- */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
-				     struct i915_vma *vma,
-				     enum i915_cache_level level,
-				     u32 flags)
-{
-	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-	gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
-	unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
-	struct sgt_iter iter;
-	dma_addr_t addr;
-	for_each_sgt_daddr(addr, iter, vma->pages)
-		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
-
-	/*
-	 * We want to flush the TLBs only after we're certain all the PTE
-	 * updates have finished.
-	 */
-	ggtt->invalidate(ggtt);
-}
-
-static void nop_clear_range(struct i915_address_space *vm,
-			    u64 start, u64 length)
-{
-}
-
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
-				  u64 start, u64 length)
-{
-	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
-	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
-	const gen8_pte_t scratch_pte = vm->scratch[0].encode;
-	gen8_pte_t __iomem *gtt_base =
-		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
-	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
-	int i;
-
-	if (WARN(num_entries > max_entries,
-		 "First entry = %d; Num entries = %d (max=%d)\n",
-		 first_entry, num_entries, max_entries))
-		num_entries = max_entries;
-
-	for (i = 0; i < num_entries; i++)
-		gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
-static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
-{
-	struct drm_i915_private *dev_priv = vm->i915;
-
-	/*
-	 * Make sure the internal GAM fifo has been cleared of all GTT
-	 * writes before exiting stop_machine(). This guarantees that
-	 * any aperture accesses waiting to start in another process
-	 * cannot back up behind the GTT writes causing a hang.
-	 * The register can be any arbitrary GAM register.
-	 */
-	POSTING_READ(GFX_FLSH_CNTL_GEN6);
-}
-
-struct insert_page {
-	struct i915_address_space *vm;
-	dma_addr_t addr;
-	u64 offset;
-	enum i915_cache_level level;
-};
-
-static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
-{
-	struct insert_page *arg = _arg;
-
-	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
-	bxt_vtd_ggtt_wa(arg->vm);
-
-	return 0;
-}
-
-static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
-					  dma_addr_t addr,
-					  u64 offset,
-					  enum i915_cache_level level,
-					  u32 unused)
-{
-	struct insert_page arg = { vm, addr, offset, level };
-
-	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
-}
-
-struct insert_entries {
-	struct i915_address_space *vm;
-	struct i915_vma *vma;
-	enum i915_cache_level level;
-	u32 flags;
-};
-
-static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
-{
-	struct insert_entries *arg = _arg;
-
-	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
-	bxt_vtd_ggtt_wa(arg->vm);
-
-	return 0;
-}
-
-static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
-					     struct i915_vma *vma,
-					     enum i915_cache_level level,
-					     u32 flags)
-{
-	struct insert_entries arg = { vm, vma, level, flags };
-
-	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
-}
-
-struct clear_range {
-	struct i915_address_space *vm;
-	u64 start;
-	u64 length;
-};
-
-static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
-{
-	struct clear_range *arg = _arg;
-
-	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
-	bxt_vtd_ggtt_wa(arg->vm);
-
-	return 0;
-}
-
-static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
-					  u64 start,
-					  u64 length)
-{
-	struct clear_range arg = { vm, start, length };
-
-	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
-}
-
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
-				  u64 start, u64 length)
-{
-	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
-	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
-	gen6_pte_t scratch_pte, __iomem *gtt_base =
-		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
-	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
-	int i;
-
-	if (WARN(num_entries > max_entries,
-		 "First entry = %d; Num entries = %d (max=%d)\n",
-		 first_entry, num_entries, max_entries))
-		num_entries = max_entries;
-
-	scratch_pte = vm->scratch[0].encode;
-	for (i = 0; i < num_entries; i++)
-		iowrite32(scratch_pte, &gtt_base[i]);
-}
-
-static void i915_ggtt_insert_page(struct i915_address_space *vm,
-				  dma_addr_t addr,
-				  u64 offset,
-				  enum i915_cache_level cache_level,
-				  u32 unused)
-{
-	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
-		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
-	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-}
-
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
-				     struct i915_vma *vma,
-				     enum i915_cache_level cache_level,
-				     u32 unused)
-{
-	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
-		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
-	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
-				    flags);
-}
-
-static void i915_ggtt_clear_range(struct i915_address_space *vm,
-				  u64 start, u64 length)
-{
-	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
-}
-
-static int ggtt_bind_vma(struct i915_vma *vma,
-			 enum i915_cache_level cache_level,
-			 u32 flags)
-{
-	struct drm_i915_private *i915 = vma->vm->i915;
-	struct drm_i915_gem_object *obj = vma->obj;
-	intel_wakeref_t wakeref;
-	u32 pte_flags;
-
-	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
-	pte_flags = 0;
-	if (i915_gem_object_is_readonly(obj))
-		pte_flags |= PTE_READ_ONLY;
-
-	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
-	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-
-	/*
-	 * Without aliasing PPGTT there's no difference between
-	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
-	 * upgrade to both bound if we bind either to avoid double-binding.
-	 */
-	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
-
-	return 0;
-}
-
-static void ggtt_unbind_vma(struct i915_vma *vma)
-{
-	struct drm_i915_private *i915 = vma->vm->i915;
-	intel_wakeref_t wakeref;
-
-	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-}
-
-static int aliasing_gtt_bind_vma(struct i915_vma *vma,
-				 enum i915_cache_level cache_level,
-				 u32 flags)
-{
-	struct drm_i915_private *i915 = vma->vm->i915;
-	u32 pte_flags;
-	int ret;
-
-	/* Currently applicable only to VLV */
-	pte_flags = 0;
-	if (i915_gem_object_is_readonly(vma->obj))
-		pte_flags |= PTE_READ_ONLY;
-
-	if (flags & I915_VMA_LOCAL_BIND) {
-		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
-
-		if (flags & I915_VMA_ALLOC) {
-			ret = alias->vm.allocate_va_range(&alias->vm,
-							  vma->node.start,
-							  vma->size);
-			if (ret)
-				return ret;
-
-			set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
-		}
-
-		GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
-				     __i915_vma_flags(vma)));
-		alias->vm.insert_entries(&alias->vm, vma,
-					 cache_level, pte_flags);
-	}
-
-	if (flags & I915_VMA_GLOBAL_BIND) {
-		intel_wakeref_t wakeref;
-
-		with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-			vma->vm->insert_entries(vma->vm, vma,
-						cache_level, pte_flags);
-		}
-	}
-
-	return 0;
-}
-
-static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
-{
-	struct drm_i915_private *i915 = vma->vm->i915;
-
-	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
-		struct i915_address_space *vm = vma->vm;
-		intel_wakeref_t wakeref;
-
-		with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-			vm->clear_range(vm, vma->node.start, vma->size);
-	}
-
-	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
-		struct i915_address_space *vm =
-			&i915_vm_to_ggtt(vma->vm)->alias->vm;
-
-		vm->clear_range(vm, vma->node.start, vma->size);
-	}
-}
-
 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 			       struct sg_table *pages)
 {
@@ -2545,1067 +72,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
 }
 
-static int ggtt_set_pages(struct i915_vma *vma)
-{
-	int ret;
-
-	GEM_BUG_ON(vma->pages);
-
-	ret = i915_get_ggtt_vma_pages(vma);
-	if (ret)
-		return ret;
-
-	vma->page_sizes = vma->obj->mm.page_sizes;
-
-	return 0;
-}
-
-static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
-				   unsigned long color,
-				   u64 *start,
-				   u64 *end)
-{
-	if (i915_node_color_differs(node, color))
-		*start += I915_GTT_PAGE_SIZE;
-
-	/* Also leave a space between the unallocated reserved node after the
-	 * GTT and any objects within the GTT, i.e. we use the color adjustment
-	 * to insert a guard page to prevent prefetches crossing over the
-	 * GTT boundary.
-	 */
-	node = list_next_entry(node, node_list);
-	if (node->color != color)
-		*end -= I915_GTT_PAGE_SIZE;
-}
-
-static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
-{
-	struct i915_ppgtt *ppgtt;
-	int err;
-
-	ppgtt = i915_ppgtt_create(ggtt->vm.i915);
-	if (IS_ERR(ppgtt))
-		return PTR_ERR(ppgtt);
-
-	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
-		err = -ENODEV;
-		goto err_ppgtt;
-	}
-
-	/*
-	 * Note we only pre-allocate as far as the end of the global
-	 * GTT. On 48b / 4-level page-tables, the difference is very,
-	 * very significant! We have to preallocate as GVT/vgpu does
-	 * not like the page directory disappearing.
-	 */
-	err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
-	if (err)
-		goto err_ppgtt;
-
-	ggtt->alias = ppgtt;
-	ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
-
-	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
-	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
-
-	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
-	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
-
-	return 0;
-
-err_ppgtt:
-	i915_vm_put(&ppgtt->vm);
-	return err;
-}
-
-static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
-{
-	struct i915_ppgtt *ppgtt;
-
-	ppgtt = fetch_and_zero(&ggtt->alias);
-	if (!ppgtt)
-		return;
-
-	i915_vm_put(&ppgtt->vm);
-
-	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
-	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-}
-
-static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
-{
-	u64 size;
-	int ret;
-
-	if (!USES_GUC(ggtt->vm.i915))
-		return 0;
-
-	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
-	size = ggtt->vm.total - GUC_GGTT_TOP;
-
-	ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
-				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
-				   PIN_NOEVICT);
-	if (ret)
-		DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
-
-	return ret;
-}
-
-static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
-{
-	if (drm_mm_node_allocated(&ggtt->uc_fw))
-		drm_mm_remove_node(&ggtt->uc_fw);
-}
-
-static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
-{
-	ggtt_release_guc_top(ggtt);
-	if (drm_mm_node_allocated(&ggtt->error_capture))
-		drm_mm_remove_node(&ggtt->error_capture);
-}
-
-static int init_ggtt(struct i915_ggtt *ggtt)
-{
-	/* Let GEM Manage all of the aperture.
-	 *
-	 * However, leave one page at the end still bound to the scratch page.
-	 * There are a number of places where the hardware apparently prefetches
-	 * past the end of the object, and we've seen multiple hangs with the
-	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
-	 * aperture.  One page should be enough to keep any prefetching inside
-	 * of the aperture.
-	 */
-	unsigned long hole_start, hole_end;
-	struct drm_mm_node *entry;
-	int ret;
-
-	/*
-	 * GuC requires all resources that we're sharing with it to be placed in
-	 * non-WOPCM memory. If GuC is not present or not in use we still need a
-	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
-	 * why.
-	 */
-	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
-			       intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
-
-	ret = intel_vgt_balloon(ggtt);
-	if (ret)
-		return ret;
-
-	if (ggtt->mappable_end) {
-		/* Reserve a mappable slot for our lockless error capture */
-		ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
-						  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
-						  0, ggtt->mappable_end,
-						  DRM_MM_INSERT_LOW);
-		if (ret)
-			return ret;
-	}
-
-	/*
-	 * The upper portion of the GuC address space has a sizeable hole
-	 * (several MB) that is inaccessible by GuC. Reserve this range within
-	 * GGTT as it can comfortably hold GuC/HuC firmware images.
-	 */
-	ret = ggtt_reserve_guc_top(ggtt);
-	if (ret)
-		goto err;
-
-	/* Clear any non-preallocated blocks */
-	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
-		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
-			      hole_start, hole_end);
-		ggtt->vm.clear_range(&ggtt->vm, hole_start,
-				     hole_end - hole_start);
-	}
-
-	/* And finally clear the reserved guard page */
-	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
-
-	return 0;
-
-err:
-	cleanup_init_ggtt(ggtt);
-	return ret;
-}
-
-int i915_init_ggtt(struct drm_i915_private *i915)
-{
-	int ret;
-
-	ret = init_ggtt(&i915->ggtt);
-	if (ret)
-		return ret;
-
-	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
-		ret = init_aliasing_ppgtt(&i915->ggtt);
-		if (ret)
-			cleanup_init_ggtt(&i915->ggtt);
-	}
-
-	return 0;
-}
-
-static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
-{
-	struct i915_vma *vma, *vn;
-
-	atomic_set(&ggtt->vm.open, 0);
-
-	rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
-	flush_workqueue(ggtt->vm.i915->wq);
-
-	mutex_lock(&ggtt->vm.mutex);
-
-	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
-		WARN_ON(__i915_vma_unbind(vma));
-
-	if (drm_mm_node_allocated(&ggtt->error_capture))
-		drm_mm_remove_node(&ggtt->error_capture);
-
-	ggtt_release_guc_top(ggtt);
-	intel_vgt_deballoon(ggtt);
-
-	ggtt->vm.cleanup(&ggtt->vm);
-
-	mutex_unlock(&ggtt->vm.mutex);
-	i915_address_space_fini(&ggtt->vm);
-
-	arch_phys_wc_del(ggtt->mtrr);
-
-	if (ggtt->iomap.size)
-		io_mapping_fini(&ggtt->iomap);
-}
-
-/**
- * i915_ggtt_driver_release - Clean up GGTT hardware initialization
- * @i915: i915 device
- */
-void i915_ggtt_driver_release(struct drm_i915_private *i915)
-{
-	struct pagevec *pvec;
-
-	fini_aliasing_ppgtt(&i915->ggtt);
-
-	ggtt_cleanup_hw(&i915->ggtt);
-
-	pvec = &i915->mm.wc_stash.pvec;
-	if (pvec->nr) {
-		set_pages_array_wb(pvec->pages, pvec->nr);
-		__pagevec_release(pvec);
-	}
-}
-
-static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
-	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
-	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
-	return snb_gmch_ctl << 20;
-}
-
-static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
-{
-	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
-	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
-	if (bdw_gmch_ctl)
-		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-
-#ifdef CONFIG_X86_32
-	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
-	if (bdw_gmch_ctl > 4)
-		bdw_gmch_ctl = 4;
-#endif
-
-	return bdw_gmch_ctl << 20;
-}
-
-static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
-{
-	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
-	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
-
-	if (gmch_ctrl)
-		return 1 << (20 + gmch_ctrl);
-
-	return 0;
-}
-
-static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
-{
-	struct drm_i915_private *dev_priv = ggtt->vm.i915;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	phys_addr_t phys_addr;
-	int ret;
-
-	/* For Modern GENs the PTEs and register space are split in the BAR */
-	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
-
-	/*
-	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
-	 * will be dropped. For WC mappings in general we have 64 byte burst
-	 * writes when the WC buffer is flushed, so we can't use it, but have to
-	 * resort to an uncached mapping. The WC issue is easily caught by the
-	 * readback check when writing GTT PTE entries.
-	 */
-	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
-		ggtt->gsm = ioremap(phys_addr, size);
-	else
-		ggtt->gsm = ioremap_wc(phys_addr, size);
-	if (!ggtt->gsm) {
-		DRM_ERROR("Failed to map the ggtt page table\n");
-		return -ENOMEM;
-	}
-
-	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
-	if (ret) {
-		DRM_ERROR("Scratch setup failed\n");
-		/* iounmap will also get called at remove, but meh */
-		iounmap(ggtt->gsm);
-		return ret;
-	}
-
-	ggtt->vm.scratch[0].encode =
-		ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
-				    I915_CACHE_NONE, 0);
-
-	return 0;
-}
-
-static void tgl_setup_private_ppat(struct intel_uncore *uncore)
-{
-	/* TGL doesn't support LLC or AGE settings */
-	intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
-	intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
-	intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
-	intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
-	intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
-	intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
-	intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
-	intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
-}
-
-static void cnl_setup_private_ppat(struct intel_uncore *uncore)
-{
-	intel_uncore_write(uncore,
-			   GEN10_PAT_INDEX(0),
-			   GEN8_PPAT_WB | GEN8_PPAT_LLC);
-	intel_uncore_write(uncore,
-			   GEN10_PAT_INDEX(1),
-			   GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
-	intel_uncore_write(uncore,
-			   GEN10_PAT_INDEX(2),
-			   GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
-	intel_uncore_write(uncore,
-			   GEN10_PAT_INDEX(3),
-			   GEN8_PPAT_UC);
-	intel_uncore_write(uncore,
-			   GEN10_PAT_INDEX(4),
-			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
-	intel_uncore_write(uncore,
-			   GEN10_PAT_INDEX(5),
-			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
-	intel_uncore_write(uncore,
-			   GEN10_PAT_INDEX(6),
-			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
-	intel_uncore_write(uncore,
-			   GEN10_PAT_INDEX(7),
-			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
- * bits. When using advanced contexts each context stores its own PAT, but
- * writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct intel_uncore *uncore)
-{
-	u64 pat;
-
-	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
-	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
-	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |	/* for scanout with eLLC */
-	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
-	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
-	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
-	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
-	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-
-	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
-	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
-}
-
-static void chv_setup_private_ppat(struct intel_uncore *uncore)
-{
-	u64 pat;
-
-	/*
-	 * Map WB on BDW to snooped on CHV.
-	 *
-	 * Only the snoop bit has meaning for CHV, the rest is
-	 * ignored.
-	 *
-	 * The hardware will never snoop for certain types of accesses:
-	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
-	 * - PPGTT page tables
-	 * - some other special cycles
-	 *
-	 * As with BDW, we also need to consider the following for GT accesses:
-	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
-	 * so RTL will always use the value corresponding to
-	 * pat_sel = 000".
-	 * Which means we must set the snoop bit in PAT entry 0
-	 * in order to keep the global status page working.
-	 */
-
-	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
-	      GEN8_PPAT(1, 0) |
-	      GEN8_PPAT(2, 0) |
-	      GEN8_PPAT(3, 0) |
-	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
-	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
-	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
-	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
-
-	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
-	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
-}
-
-static void gen6_gmch_remove(struct i915_address_space *vm)
-{
-	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-
-	iounmap(ggtt->gsm);
-	cleanup_scratch_page(vm);
-}
-
-static void setup_private_pat(struct intel_uncore *uncore)
-{
-	struct drm_i915_private *i915 = uncore->i915;
-
-	GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
-	if (INTEL_GEN(i915) >= 12)
-		tgl_setup_private_ppat(uncore);
-	else if (INTEL_GEN(i915) >= 10)
-		cnl_setup_private_ppat(uncore);
-	else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
-		chv_setup_private_ppat(uncore);
-	else
-		bdw_setup_private_ppat(uncore);
-}
-
-static struct resource pci_resource(struct pci_dev *pdev, int bar)
-{
-	return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
-					       pci_resource_len(pdev, bar));
-}
-
-static int gen8_gmch_probe(struct i915_ggtt *ggtt)
-{
-	struct drm_i915_private *dev_priv = ggtt->vm.i915;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	unsigned int size;
-	u16 snb_gmch_ctl;
-	int err;
-
-	/* TODO: We're not aware of mappable constraints on gen8 yet */
-	if (!IS_DGFX(dev_priv)) {
-		ggtt->gmadr = pci_resource(pdev, 2);
-		ggtt->mappable_end = resource_size(&ggtt->gmadr);
-	}
-
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
-	if (!err)
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
-	if (err)
-		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
-
-	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-	if (IS_CHERRYVIEW(dev_priv))
-		size = chv_get_total_gtt_size(snb_gmch_ctl);
-	else
-		size = gen8_get_total_gtt_size(snb_gmch_ctl);
-
-	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
-	ggtt->vm.cleanup = gen6_gmch_remove;
-	ggtt->vm.insert_page = gen8_ggtt_insert_page;
-	ggtt->vm.clear_range = nop_clear_range;
-	if (intel_scanout_needs_vtd_wa(dev_priv))
-		ggtt->vm.clear_range = gen8_ggtt_clear_range;
-
-	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
-
-	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
-	if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
-	    IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
-		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
-		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
-		if (ggtt->vm.clear_range != nop_clear_range)
-			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
-	}
-
-	ggtt->invalidate = gen6_ggtt_invalidate;
-
-	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
-	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
-	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
-	ggtt->vm.vma_ops.clear_pages = clear_pages;
-
-	ggtt->vm.pte_encode = gen8_pte_encode;
-
-	setup_private_pat(ggtt->vm.gt->uncore);
-
-	return ggtt_probe_common(ggtt, size);
-}
-
-static int gen6_gmch_probe(struct i915_ggtt *ggtt)
-{
-	struct drm_i915_private *dev_priv = ggtt->vm.i915;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	unsigned int size;
-	u16 snb_gmch_ctl;
-	int err;
-
-	ggtt->gmadr =
-		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
-						 pci_resource_len(pdev, 2));
-	ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
-	/* 64/512MB is the current min/max we actually know of, but this is just
-	 * a coarse sanity check.
-	 */
-	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
-		DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
-		return -ENXIO;
-	}
-
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
-	if (!err)
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
-	if (err)
-		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
-	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
-	size = gen6_get_total_gtt_size(snb_gmch_ctl);
-	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
-
-	ggtt->vm.clear_range = nop_clear_range;
-	if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
-		ggtt->vm.clear_range = gen6_ggtt_clear_range;
-	ggtt->vm.insert_page = gen6_ggtt_insert_page;
-	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
-	ggtt->vm.cleanup = gen6_gmch_remove;
-
-	ggtt->invalidate = gen6_ggtt_invalidate;
-
-	if (HAS_EDRAM(dev_priv))
-		ggtt->vm.pte_encode = iris_pte_encode;
-	else if (IS_HASWELL(dev_priv))
-		ggtt->vm.pte_encode = hsw_pte_encode;
-	else if (IS_VALLEYVIEW(dev_priv))
-		ggtt->vm.pte_encode = byt_pte_encode;
-	else if (INTEL_GEN(dev_priv) >= 7)
-		ggtt->vm.pte_encode = ivb_pte_encode;
-	else
-		ggtt->vm.pte_encode = snb_pte_encode;
-
-	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
-	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
-	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
-	ggtt->vm.vma_ops.clear_pages = clear_pages;
-
-	return ggtt_probe_common(ggtt, size);
-}
-
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
-	intel_gmch_remove();
-}
-
-static int i915_gmch_probe(struct i915_ggtt *ggtt)
-{
-	struct drm_i915_private *dev_priv = ggtt->vm.i915;
-	phys_addr_t gmadr_base;
-	int ret;
-
-	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
-	if (!ret) {
-		DRM_ERROR("failed to set up gmch\n");
-		return -EIO;
-	}
-
-	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
-
-	ggtt->gmadr =
-		(struct resource) DEFINE_RES_MEM(gmadr_base,
-						 ggtt->mappable_end);
-
-	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
-	ggtt->vm.insert_page = i915_ggtt_insert_page;
-	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
-	ggtt->vm.clear_range = i915_ggtt_clear_range;
-	ggtt->vm.cleanup = i915_gmch_remove;
-
-	ggtt->invalidate = gmch_ggtt_invalidate;
-
-	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
-	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
-	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
-	ggtt->vm.vma_ops.clear_pages = clear_pages;
-
-	if (unlikely(ggtt->do_idle_maps))
-		dev_notice(dev_priv->drm.dev,
-			   "Applying Ironlake quirks for intel_iommu\n");
-
-	return 0;
-}
-
-static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
-{
-	struct drm_i915_private *i915 = gt->i915;
-	int ret;
-
-	ggtt->vm.gt = gt;
-	ggtt->vm.i915 = i915;
-	ggtt->vm.dma = &i915->drm.pdev->dev;
-
-	if (INTEL_GEN(i915) <= 5)
-		ret = i915_gmch_probe(ggtt);
-	else if (INTEL_GEN(i915) < 8)
-		ret = gen6_gmch_probe(ggtt);
-	else
-		ret = gen8_gmch_probe(ggtt);
-	if (ret)
-		return ret;
-
-	if ((ggtt->vm.total - 1) >> 32) {
-		DRM_ERROR("We never expected a Global GTT with more than 32bits"
-			  " of address space! Found %lldM!\n",
-			  ggtt->vm.total >> 20);
-		ggtt->vm.total = 1ULL << 32;
-		ggtt->mappable_end =
-			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
-	}
-
-	if (ggtt->mappable_end > ggtt->vm.total) {
-		DRM_ERROR("mappable aperture extends past end of GGTT,"
-			  " aperture=%pa, total=%llx\n",
-			  &ggtt->mappable_end, ggtt->vm.total);
-		ggtt->mappable_end = ggtt->vm.total;
-	}
-
-	/* GMADR is the PCI mmio aperture into the global GTT. */
-	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
-	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
-	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
-			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
-
-	return 0;
-}
-
-/**
- * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @i915: i915 device
- */
-int i915_ggtt_probe_hw(struct drm_i915_private *i915)
-{
-	int ret;
-
-	ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
-	if (ret)
-		return ret;
-
-	if (intel_vtd_active())
-		dev_info(i915->drm.dev, "VT-d active for gfx access\n");
-
-	return 0;
-}
-
-static int ggtt_init_hw(struct i915_ggtt *ggtt)
-{
-	struct drm_i915_private *i915 = ggtt->vm.i915;
-
-	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
-	ggtt->vm.is_ggtt = true;
-
-	/* Only VLV supports read-only GGTT mappings */
-	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
-
-	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
-		ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
-
-	if (ggtt->mappable_end) {
-		if (!io_mapping_init_wc(&ggtt->iomap,
-					ggtt->gmadr.start,
-					ggtt->mappable_end)) {
-			ggtt->vm.cleanup(&ggtt->vm);
-			return -EIO;
-		}
-
-		ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
-					      ggtt->mappable_end);
-	}
-
-	i915_ggtt_init_fences(ggtt);
-
-	return 0;
-}
-
-/**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
- */
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
-{
-	int ret;
-
-	stash_init(&dev_priv->mm.wc_stash);
-
-	/* Note that we use page colouring to enforce a guard page at the
-	 * end of the address space. This is required as the CS may prefetch
-	 * beyond the end of the batch buffer, across the page boundary,
-	 * and beyond the end of the GTT if we do not provide a guard.
-	 */
-	ret = ggtt_init_hw(&dev_priv->ggtt);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
-{
-	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
-		return -EIO;
-
-	return 0;
-}
-
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
-{
-	GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
-
-	ggtt->invalidate = guc_ggtt_invalidate;
-
-	ggtt->invalidate(ggtt);
-}
-
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
-{
-	/* XXX Temporary pardon for error unload */
-	if (ggtt->invalidate == gen6_ggtt_invalidate)
-		return;
-
-	/* We should only be called after i915_ggtt_enable_guc() */
-	GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
-
-	ggtt->invalidate = gen6_ggtt_invalidate;
-
-	ggtt->invalidate(ggtt);
-}
-
-static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
-{
-	struct i915_vma *vma;
-	bool flush = false;
-	int open;
-
-	intel_gt_check_and_clear_faults(ggtt->vm.gt);
-
-	mutex_lock(&ggtt->vm.mutex);
-
-	/* First fill our portion of the GTT with scratch pages */
-	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
-	/* Skip rewriting PTE on VMA unbind. */
-	open = atomic_xchg(&ggtt->vm.open, 0);
-
-	/* clflush objects bound into the GGTT and rebind them. */
-	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
-		struct drm_i915_gem_object *obj = vma->obj;
-
-		if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
-			continue;
-
-		clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
-		WARN_ON(i915_vma_bind(vma,
-				      obj ? obj->cache_level : 0,
-				      PIN_GLOBAL, NULL));
-		if (obj) { /* only used during resume => exclusive access */
-			flush |= fetch_and_zero(&obj->write_domain);
-			obj->read_domains |= I915_GEM_DOMAIN_GTT;
-		}
-	}
-
-	atomic_set(&ggtt->vm.open, open);
-	ggtt->invalidate(ggtt);
-
-	mutex_unlock(&ggtt->vm.mutex);
-
-	if (flush)
-		wbinvd_on_all_cpus();
-}
-
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
-{
-	struct i915_ggtt *ggtt = &i915->ggtt;
-
-	ggtt_restore_mappings(ggtt);
-
-	if (INTEL_GEN(i915) >= 8)
-		setup_private_pat(ggtt->vm.gt->uncore);
-}
-
-static struct scatterlist *
-rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
-	     unsigned int width, unsigned int height,
-	     unsigned int stride,
-	     struct sg_table *st, struct scatterlist *sg)
-{
-	unsigned int column, row;
-	unsigned int src_idx;
-
-	for (column = 0; column < width; column++) {
-		src_idx = stride * (height - 1) + column + offset;
-		for (row = 0; row < height; row++) {
-			st->nents++;
-			/* We don't need the pages, but need to initialize
-			 * the entries so the sg list can be happily traversed.
-			 * The only thing we need are DMA addresses.
-			 */
-			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
-			sg_dma_address(sg) =
-				i915_gem_object_get_dma_address(obj, src_idx);
-			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
-			sg = sg_next(sg);
-			src_idx -= stride;
-		}
-	}
-
-	return sg;
-}
-
-static noinline struct sg_table *
-intel_rotate_pages(struct intel_rotation_info *rot_info,
-		   struct drm_i915_gem_object *obj)
-{
-	unsigned int size = intel_rotation_info_size(rot_info);
-	struct sg_table *st;
-	struct scatterlist *sg;
-	int ret = -ENOMEM;
-	int i;
-
-	/* Allocate target SG list. */
-	st = kmalloc(sizeof(*st), GFP_KERNEL);
-	if (!st)
-		goto err_st_alloc;
-
-	ret = sg_alloc_table(st, size, GFP_KERNEL);
-	if (ret)
-		goto err_sg_alloc;
-
-	st->nents = 0;
-	sg = st->sgl;
-
-	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
-		sg = rotate_pages(obj, rot_info->plane[i].offset,
-				  rot_info->plane[i].width, rot_info->plane[i].height,
-				  rot_info->plane[i].stride, st, sg);
-	}
-
-	return st;
-
-err_sg_alloc:
-	kfree(st);
-err_st_alloc:
-
-	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
-			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
-
-	return ERR_PTR(ret);
-}
-
-static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
-	    unsigned int width, unsigned int height,
-	    unsigned int stride,
-	    struct sg_table *st, struct scatterlist *sg)
-{
-	unsigned int row;
-
-	for (row = 0; row < height; row++) {
-		unsigned int left = width * I915_GTT_PAGE_SIZE;
-
-		while (left) {
-			dma_addr_t addr;
-			unsigned int length;
-
-			/* We don't need the pages, but need to initialize
-			 * the entries so the sg list can be happily traversed.
-			 * The only thing we need are DMA addresses.
-			 */
-
-			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
-
-			length = min(left, length);
-
-			st->nents++;
-
-			sg_set_page(sg, NULL, length, 0);
-			sg_dma_address(sg) = addr;
-			sg_dma_len(sg) = length;
-			sg = sg_next(sg);
-
-			offset += length / I915_GTT_PAGE_SIZE;
-			left -= length;
-		}
-
-		offset += stride - width;
-	}
-
-	return sg;
-}
-
-static noinline struct sg_table *
-intel_remap_pages(struct intel_remapped_info *rem_info,
-		  struct drm_i915_gem_object *obj)
-{
-	unsigned int size = intel_remapped_info_size(rem_info);
-	struct sg_table *st;
-	struct scatterlist *sg;
-	int ret = -ENOMEM;
-	int i;
-
-	/* Allocate target SG list. */
-	st = kmalloc(sizeof(*st), GFP_KERNEL);
-	if (!st)
-		goto err_st_alloc;
-
-	ret = sg_alloc_table(st, size, GFP_KERNEL);
-	if (ret)
-		goto err_sg_alloc;
-
-	st->nents = 0;
-	sg = st->sgl;
-
-	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
-		sg = remap_pages(obj, rem_info->plane[i].offset,
-				 rem_info->plane[i].width, rem_info->plane[i].height,
-				 rem_info->plane[i].stride, st, sg);
-	}
-
-	i915_sg_trim(st);
-
-	return st;
-
-err_sg_alloc:
-	kfree(st);
-err_st_alloc:
-
-	DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
-			 obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
-
-	return ERR_PTR(ret);
-}
-
-static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
-		    struct drm_i915_gem_object *obj)
-{
-	struct sg_table *st;
-	struct scatterlist *sg, *iter;
-	unsigned int count = view->partial.size;
-	unsigned int offset;
-	int ret = -ENOMEM;
-
-	st = kmalloc(sizeof(*st), GFP_KERNEL);
-	if (!st)
-		goto err_st_alloc;
-
-	ret = sg_alloc_table(st, count, GFP_KERNEL);
-	if (ret)
-		goto err_sg_alloc;
-
-	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
-	GEM_BUG_ON(!iter);
-
-	sg = st->sgl;
-	st->nents = 0;
-	do {
-		unsigned int len;
-
-		len = min(iter->length - (offset << PAGE_SHIFT),
-			  count << PAGE_SHIFT);
-		sg_set_page(sg, NULL, len, 0);
-		sg_dma_address(sg) =
-			sg_dma_address(iter) + (offset << PAGE_SHIFT);
-		sg_dma_len(sg) = len;
-
-		st->nents++;
-		count -= len >> PAGE_SHIFT;
-		if (count == 0) {
-			sg_mark_end(sg);
-			i915_sg_trim(st); /* Drop any unused tail entries. */
-
-			return st;
-		}
-
-		sg = __sg_next(sg);
-		iter = __sg_next(iter);
-		offset = 0;
-	} while (1);
-
-err_sg_alloc:
-	kfree(st);
-err_st_alloc:
-	return ERR_PTR(ret);
-}
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma)
-{
-	int ret;
-
-	/* The vma->pages are only valid within the lifespan of the borrowed
-	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
-	 * must be the vma->pages. A simple rule is that vma->pages must only
-	 * be accessed when the obj->mm.pages are pinned.
-	 */
-	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
-
-	switch (vma->ggtt_view.type) {
-	default:
-		GEM_BUG_ON(vma->ggtt_view.type);
-		/* fall through */
-	case I915_GGTT_VIEW_NORMAL:
-		vma->pages = vma->obj->mm.pages;
-		return 0;
-
-	case I915_GGTT_VIEW_ROTATED:
-		vma->pages =
-			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
-		break;
-
-	case I915_GGTT_VIEW_REMAPPED:
-		vma->pages =
-			intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
-		break;
-
-	case I915_GGTT_VIEW_PARTIAL:
-		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
-		break;
-	}
-
-	ret = 0;
-	if (IS_ERR(vma->pages)) {
-		ret = PTR_ERR(vma->pages);
-		vma->pages = NULL;
-		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
-			  vma->ggtt_view.type, ret);
-	}
-	return ret;
-}
-
 /**
  * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
  * @vm: the &struct i915_address_space
@@ -3827,6 +293,5 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_gtt.c"
 #include "selftests/i915_gem_gtt.c"
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 402283c..f6226df 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -1,638 +1,21 @@
+/* SPDX-License-Identifier: MIT */
 /*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Please try to maintain the following order within this file unless it makes
- * sense to do otherwise. From top to bottom:
- * 1. typedefs
- * 2. #defines, and macros
- * 3. structure definitions
- * 4. function prototypes
- *
- * Within each section, please try to order by generation in ascending order,
- * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ * Copyright © 2020 Intel Corporation
  */
 
 #ifndef __I915_GEM_GTT_H__
 #define __I915_GEM_GTT_H__
 
 #include <linux/io-mapping.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/pagevec.h>
-#include <linux/workqueue.h>
+#include <linux/types.h>
 
 #include <drm/drm_mm.h>
 
-#include "gt/intel_reset.h"
-#include "i915_gem_fence_reg.h"
-#include "i915_request.h"
+#include "gt/intel_gtt.h"
 #include "i915_scatterlist.h"
-#include "i915_selftest.h"
-#include "gt/intel_timeline.h"
 
-#define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
-#define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
-#define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
-
-#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
-#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
-
-#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
-
-#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
-
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 32
-/* 32 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 6
-
-struct drm_i915_file_private;
 struct drm_i915_gem_object;
-struct i915_vma;
-struct intel_gt;
-
-typedef u32 gen6_pte_t;
-typedef u64 gen8_pte_t;
-
-#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
-
-/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
-#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
-#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
-#define GEN6_PTE_CACHE_LLC		(2 << 1)
-#define GEN6_PTE_UNCACHED		(1 << 1)
-#define GEN6_PTE_VALID			(1 << 0)
-
-#define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
-#define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
-#define I915_PDES			512
-#define I915_PDE_MASK			(I915_PDES - 1)
-#define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
-
-#define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
-#define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
-#define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
-#define GEN6_PDE_SHIFT			22
-#define GEN6_PDE_VALID			(1 << 0)
-
-#define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
-
-#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
-#define BYT_PTE_WRITEABLE		(1 << 1)
-
-/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
- * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
- */
-#define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
-					 (((bits) & 0x8) << (11 - 3)))
-#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
-#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
-#define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
-#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
-#define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
-#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
-#define HSW_PTE_UNCACHED		(0)
-#define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
-#define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
-
-/*
- * GEN8 32b style address is defined as a 3 level page table:
- * 31:30 | 29:21 | 20:12 |  11:0
- * PDPE  |  PDE  |  PTE  | offset
- * The difference as compared to normal x86 3 level page table is the PDPEs are
- * programmed via register.
- *
- * GEN8 48b style address is defined as a 4 level page table:
- * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
- * PML4E | PDPE  |  PDE  |  PTE  | offset
- */
-#define GEN8_3LVL_PDPES			4
-
-#define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
-#define PPAT_CACHED_PDE			0 /* WB LLC */
-#define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
-#define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
-
-#define CHV_PPAT_SNOOP			(1<<6)
-#define GEN8_PPAT_AGE(x)		((x)<<4)
-#define GEN8_PPAT_LLCeLLC		(3<<2)
-#define GEN8_PPAT_LLCELLC		(2<<2)
-#define GEN8_PPAT_LLC			(1<<2)
-#define GEN8_PPAT_WB			(3<<0)
-#define GEN8_PPAT_WT			(2<<0)
-#define GEN8_PPAT_WC			(1<<0)
-#define GEN8_PPAT_UC			(0<<0)
-#define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
-#define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
-
-#define GEN8_PDE_IPS_64K BIT(11)
-#define GEN8_PDE_PS_2M   BIT(7)
-
-#define for_each_sgt_daddr(__dp, __iter, __sgt) \
-	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
-
-struct intel_remapped_plane_info {
-	/* in gtt pages */
-	unsigned int width, height, stride, offset;
-} __packed;
-
-struct intel_remapped_info {
-	struct intel_remapped_plane_info plane[2];
-	unsigned int unused_mbz;
-} __packed;
-
-struct intel_rotation_info {
-	struct intel_remapped_plane_info plane[2];
-} __packed;
-
-struct intel_partial_info {
-	u64 offset;
-	unsigned int size;
-} __packed;
-
-enum i915_ggtt_view_type {
-	I915_GGTT_VIEW_NORMAL = 0,
-	I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
-	I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
-	I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
-static inline void assert_i915_gem_gtt_types(void)
-{
-	BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
-	BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
-	BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
-
-	/* Check that rotation/remapped shares offsets for simplicity */
-	BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
-		     offsetof(struct intel_rotation_info, plane[0]));
-	BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
-		     offsetofend(struct intel_rotation_info, plane[1]));
-
-	/* As we encode the size of each branch inside the union into its type,
-	 * we have to be careful that each branch has a unique size.
-	 */
-	switch ((enum i915_ggtt_view_type)0) {
-	case I915_GGTT_VIEW_NORMAL:
-	case I915_GGTT_VIEW_PARTIAL:
-	case I915_GGTT_VIEW_ROTATED:
-	case I915_GGTT_VIEW_REMAPPED:
-		/* gcc complains if these are identical cases */
-		break;
-	}
-}
-
-struct i915_ggtt_view {
-	enum i915_ggtt_view_type type;
-	union {
-		/* Members need to contain no holes/padding */
-		struct intel_partial_info partial;
-		struct intel_rotation_info rotated;
-		struct intel_remapped_info remapped;
-	};
-};
-
-enum i915_cache_level;
-
-struct i915_vma;
-
-struct i915_page_dma {
-	struct page *page;
-	union {
-		dma_addr_t daddr;
-
-		/* For gen6/gen7 only. This is the offset in the GGTT
-		 * where the page directory entries for PPGTT begin
-		 */
-		u32 ggtt_offset;
-	};
-};
-
-struct i915_page_scratch {
-	struct i915_page_dma base;
-	u64 encode;
-};
-
-struct i915_page_table {
-	struct i915_page_dma base;
-	atomic_t used;
-};
-
-struct i915_page_directory {
-	struct i915_page_table pt;
-	spinlock_t lock;
-	void *entry[512];
-};
-
-#define __px_choose_expr(x, type, expr, other) \
-	__builtin_choose_expr( \
-	__builtin_types_compatible_p(typeof(x), type) || \
-	__builtin_types_compatible_p(typeof(x), const type), \
-	({ type __x = (type)(x); expr; }), \
-	other)
-
-#define px_base(px) \
-	__px_choose_expr(px, struct i915_page_dma *, __x, \
-	__px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
-	__px_choose_expr(px, struct i915_page_table *, &__x->base, \
-	__px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
-	(void)0))))
-#define px_dma(px) (px_base(px)->daddr)
-
-#define px_pt(px) \
-	__px_choose_expr(px, struct i915_page_table *, __x, \
-	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
-	(void)0))
-#define px_used(px) (&px_pt(px)->used)
-
-struct i915_vma_ops {
-	/* Map an object into an address space with the given cache flags. */
-	int (*bind_vma)(struct i915_vma *vma,
-			enum i915_cache_level cache_level,
-			u32 flags);
-	/*
-	 * Unmap an object from an address space. This usually consists of
-	 * setting the valid PTE entries to a reserved scratch page.
-	 */
-	void (*unbind_vma)(struct i915_vma *vma);
-
-	int (*set_pages)(struct i915_vma *vma);
-	void (*clear_pages)(struct i915_vma *vma);
-};
-
-struct pagestash {
-	spinlock_t lock;
-	struct pagevec pvec;
-};
-
-struct i915_address_space {
-	struct kref ref;
-	struct rcu_work rcu;
-
-	struct drm_mm mm;
-	struct intel_gt *gt;
-	struct drm_i915_private *i915;
-	struct device *dma;
-	/* Every address space belongs to a struct file - except for the global
-	 * GTT that is owned by the driver (and so @file is set to NULL). In
-	 * principle, no information should leak from one context to another
-	 * (or between files/processes etc) unless explicitly shared by the
-	 * owner. Tracking the owner is important in order to free up per-file
-	 * objects along with the file, to aide resource tracking, and to
-	 * assign blame.
-	 */
-	struct drm_i915_file_private *file;
-	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
-	u64 reserved;		/* size addr space reserved */
-
-	unsigned int bind_async_flags;
-
-	/*
-	 * Each active user context has its own address space (in full-ppgtt).
-	 * Since the vm may be shared between multiple contexts, we count how
-	 * many contexts keep us "open". Once open hits zero, we are closed
-	 * and do not allow any new attachments, and proceed to shutdown our
-	 * vma and page directories.
-	 */
-	atomic_t open;
-
-	struct mutex mutex; /* protects vma and our lists */
-#define VM_CLASS_GGTT 0
-#define VM_CLASS_PPGTT 1
-
-	struct i915_page_scratch scratch[4];
-	unsigned int scratch_order;
-	unsigned int top;
-
-	/**
-	 * List of vma currently bound.
-	 */
-	struct list_head bound_list;
-
-	struct pagestash free_pages;
-
-	/* Global GTT */
-	bool is_ggtt:1;
-
-	/* Some systems require uncached updates of the page directories */
-	bool pt_kmap_wc:1;
-
-	/* Some systems support read-only mappings for GGTT and/or PPGTT */
-	bool has_read_only:1;
-
-	u64 (*pte_encode)(dma_addr_t addr,
-			  enum i915_cache_level level,
-			  u32 flags); /* Create a valid PTE */
-#define PTE_READ_ONLY	(1<<0)
-
-	int (*allocate_va_range)(struct i915_address_space *vm,
-				 u64 start, u64 length);
-	void (*clear_range)(struct i915_address_space *vm,
-			    u64 start, u64 length);
-	void (*insert_page)(struct i915_address_space *vm,
-			    dma_addr_t addr,
-			    u64 offset,
-			    enum i915_cache_level cache_level,
-			    u32 flags);
-	void (*insert_entries)(struct i915_address_space *vm,
-			       struct i915_vma *vma,
-			       enum i915_cache_level cache_level,
-			       u32 flags);
-	void (*cleanup)(struct i915_address_space *vm);
-
-	struct i915_vma_ops vma_ops;
-
-	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
-	I915_SELFTEST_DECLARE(bool scrub_64K);
-};
-
-#define i915_is_ggtt(vm) ((vm)->is_ggtt)
-
-static inline bool
-i915_vm_is_4lvl(const struct i915_address_space *vm)
-{
-	return (vm->total - 1) >> 32;
-}
-
-static inline bool
-i915_vm_has_scratch_64K(struct i915_address_space *vm)
-{
-	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
-}
-
-static inline bool
-i915_vm_has_cache_coloring(struct i915_address_space *vm)
-{
-	return i915_is_ggtt(vm) && vm->mm.color_adjust;
-}
-
-/* The Graphics Translation Table is the way in which GEN hardware translates a
- * Graphics Virtual Address into a Physical Address. In addition to the normal
- * collateral associated with any va->pa translations GEN hardware also has a
- * portion of the GTT which can be mapped by the CPU and remain both coherent
- * and correct (in cases like swizzling). That region is referred to as GMADR in
- * the spec.
- */
-struct i915_ggtt {
-	struct i915_address_space vm;
-
-	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
-	struct resource gmadr;          /* GMADR resource */
-	resource_size_t mappable_end;	/* End offset that we can CPU map */
-
-	/** "Graphics Stolen Memory" holds the global PTEs */
-	void __iomem *gsm;
-	void (*invalidate)(struct i915_ggtt *ggtt);
-
-	/** PPGTT used for aliasing the PPGTT with the GTT */
-	struct i915_ppgtt *alias;
-
-	bool do_idle_maps;
-
-	int mtrr;
-
-	/** Bit 6 swizzling required for X tiling */
-	u32 bit_6_swizzle_x;
-	/** Bit 6 swizzling required for Y tiling */
-	u32 bit_6_swizzle_y;
-
-	u32 pin_bias;
-
-	unsigned int num_fences;
-	struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
-	struct list_head fence_list;
-
-	/** List of all objects in gtt_space, currently mmaped by userspace.
-	 * All objects within this list must also be on bound_list.
-	 */
-	struct list_head userfault_list;
-
-	/* Manual runtime pm autosuspend delay for user GGTT mmaps */
-	struct intel_wakeref_auto userfault_wakeref;
-
-	struct drm_mm_node error_capture;
-	struct drm_mm_node uc_fw;
-};
-
-struct i915_ppgtt {
-	struct i915_address_space vm;
-
-	struct i915_page_directory *pd;
-};
-
-struct gen6_ppgtt {
-	struct i915_ppgtt base;
-
-	struct i915_vma *vma;
-	gen6_pte_t __iomem *pd_addr;
-
-	atomic_t pin_count;
-	struct mutex pin_mutex;
-
-	bool scan_for_unused_pt;
-};
-
-#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
-
-static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
-{
-	BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
-	return __to_gen6_ppgtt(base);
-}
-
-/*
- * gen6_for_each_pde() iterates over every pde from start until start+length.
- * If start and start+length are not perfectly divisible, the macro will round
- * down and up as needed. Start=0 and length=2G effectively iterates over
- * every PDE in the system. The macro modifies ALL its parameters except 'pd',
- * so each of the other parameters should preferably be a simple variable, or
- * at most an lvalue with no side-effects!
- */
-#define gen6_for_each_pde(pt, pd, start, length, iter)			\
-	for (iter = gen6_pde_index(start);				\
-	     length > 0 && iter < I915_PDES &&				\
-		     (pt = i915_pt_entry(pd, iter), true);		\
-	     ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT);		\
-		    temp = min(temp - start, length);			\
-		    start += temp, length -= temp; }), ++iter)
-
-#define gen6_for_all_pdes(pt, pd, iter)					\
-	for (iter = 0;							\
-	     iter < I915_PDES &&					\
-		     (pt = i915_pt_entry(pd, iter), true);		\
-	     ++iter)
-
-static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
-{
-	const u32 mask = NUM_PTE(pde_shift) - 1;
-
-	return (address >> PAGE_SHIFT) & mask;
-}
-
-/* Helper to counts the number of PTEs within the given length. This count
- * does not cross a page table boundary, so the max value would be
- * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
-*/
-static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
-{
-	const u64 mask = ~((1ULL << pde_shift) - 1);
-	u64 end;
-
-	GEM_BUG_ON(length == 0);
-	GEM_BUG_ON(offset_in_page(addr | length));
-
-	end = addr + length;
-
-	if ((addr & mask) != (end & mask))
-		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
-
-	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
-}
-
-static inline u32 i915_pde_index(u64 addr, u32 shift)
-{
-	return (addr >> shift) & I915_PDE_MASK;
-}
-
-static inline u32 gen6_pte_index(u32 addr)
-{
-	return i915_pte_index(addr, GEN6_PDE_SHIFT);
-}
-
-static inline u32 gen6_pte_count(u32 addr, u32 length)
-{
-	return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
-}
-
-static inline u32 gen6_pde_index(u32 addr)
-{
-	return i915_pde_index(addr, GEN6_PDE_SHIFT);
-}
-
-static inline struct i915_page_table *
-i915_pt_entry(const struct i915_page_directory * const pd,
-	      const unsigned short n)
-{
-	return pd->entry[n];
-}
-
-static inline struct i915_page_directory *
-i915_pd_entry(const struct i915_page_directory * const pdp,
-	      const unsigned short n)
-{
-	return pdp->entry[n];
-}
-
-static inline dma_addr_t
-i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
-{
-	struct i915_page_dma *pt = ppgtt->pd->entry[n];
-
-	return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
-}
-
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
-	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
-	GEM_BUG_ON(!i915_is_ggtt(vm));
-	return container_of(vm, struct i915_ggtt, vm);
-}
-
-static inline struct i915_ppgtt *
-i915_vm_to_ppgtt(struct i915_address_space *vm)
-{
-	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
-	GEM_BUG_ON(i915_is_ggtt(vm));
-	return container_of(vm, struct i915_ppgtt, vm);
-}
-
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
-int i915_init_ggtt(struct drm_i915_private *dev_priv);
-void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
-
-static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
-{
-	return ggtt->mappable_end > 0;
-}
-
-int i915_ppgtt_init_hw(struct intel_gt *gt);
-
-struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-
-static inline struct i915_address_space *
-i915_vm_get(struct i915_address_space *vm)
-{
-	kref_get(&vm->ref);
-	return vm;
-}
-
-void i915_vm_release(struct kref *kref);
-
-static inline void i915_vm_put(struct i915_address_space *vm)
-{
-	kref_put(&vm->ref, i915_vm_release);
-}
-
-static inline struct i915_address_space *
-i915_vm_open(struct i915_address_space *vm)
-{
-	GEM_BUG_ON(!atomic_read(&vm->open));
-	atomic_inc(&vm->open);
-	return i915_vm_get(vm);
-}
-
-static inline bool
-i915_vm_tryopen(struct i915_address_space *vm)
-{
-	if (atomic_add_unless(&vm->open, 1, 0))
-		return i915_vm_get(vm);
-
-	return false;
-}
-
-void __i915_vm_close(struct i915_address_space *vm);
-
-static inline void
-i915_vm_close(struct i915_address_space *vm)
-{
-	GEM_BUG_ON(!atomic_read(&vm->open));
-	if (atomic_dec_and_test(&vm->open))
-		__i915_vm_close(vm);
-
-	i915_vm_put(vm);
-}
-
-int gen6_ppgtt_pin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
-
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
+struct i915_address_space;
 
 int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 					    struct sg_table *pages);
@@ -663,6 +46,6 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 #define PIN_GLOBAL		BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
 #define PIN_USER		BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
 
-#define PIN_OFFSET_MASK		(-I915_GTT_PAGE_SIZE)
+#define PIN_OFFSET_MASK		I915_GTT_PAGE_MASK
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index cf8a8c3..54fce81d 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -2,6 +2,7 @@
  * SPDX-License-Identifier: MIT
  */
 
+#include "gem/i915_gem_mman.h"
 #include "gt/intel_engine_user.h"
 
 #include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index be127cd..3aa2136 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -20,7 +20,10 @@ static LIST_HEAD(globals);
 static atomic_t active;
 static atomic_t epoch;
 static struct park_work {
-	struct rcu_work work;
+	struct delayed_work work;
+	struct rcu_head rcu;
+	unsigned long flags;
+#define PENDING 0
 	int epoch;
 } park;
 
@@ -37,11 +40,33 @@ static void i915_globals_shrink(void)
 		global->shrink();
 }
 
+static void __i915_globals_grace(struct rcu_head *rcu)
+{
+	/* Ratelimit parking as shrinking is quite slow */
+	schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
+}
+
+static void __i915_globals_queue_rcu(void)
+{
+	park.epoch = atomic_inc_return(&epoch);
+	if (!atomic_read(&active)) {
+		init_rcu_head(&park.rcu);
+		call_rcu(&park.rcu, __i915_globals_grace);
+	}
+}
+
 static void __i915_globals_park(struct work_struct *work)
 {
+	destroy_rcu_head(&park.rcu);
+
 	/* Confirm nothing woke up in the last grace period */
-	if (park.epoch == atomic_read(&epoch))
-		i915_globals_shrink();
+	if (park.epoch != atomic_read(&epoch)) {
+		__i915_globals_queue_rcu();
+		return;
+	}
+
+	clear_bit(PENDING, &park.flags);
+	i915_globals_shrink();
 }
 
 void __init i915_global_register(struct i915_global *global)
@@ -85,7 +110,7 @@ int __init i915_globals_init(void)
 		}
 	}
 
-	INIT_RCU_WORK(&park.work, __i915_globals_park);
+	INIT_DELAYED_WORK(&park.work, __i915_globals_park);
 	return 0;
 }
 
@@ -103,8 +128,9 @@ void i915_globals_park(void)
 	if (!atomic_dec_and_test(&active))
 		return;
 
-	park.epoch = atomic_inc_return(&epoch);
-	queue_rcu_work(system_wq, &park.work);
+	/* Queue cleanup after the next RCU grace period has freed slabs */
+	if (!test_and_set_bit(PENDING, &park.flags))
+		__i915_globals_queue_rcu();
 }
 
 void i915_globals_unpark(void)
@@ -113,12 +139,21 @@ void i915_globals_unpark(void)
 	atomic_inc(&active);
 }
 
+static void __exit __i915_globals_flush(void)
+{
+	atomic_inc(&active); /* skip shrinking */
+
+	rcu_barrier(); /* wait for the work to be queued */
+	flush_delayed_work(&park.work);
+
+	atomic_dec(&active);
+}
+
 void __exit i915_globals_exit(void)
 {
-	/* Flush any residual park_work */
-	atomic_inc(&epoch);
-	flush_rcu_work(&park.work);
+	GEM_BUG_ON(atomic_read(&active));
 
+	__i915_globals_flush();
 	__i915_globals_cleanup();
 
 	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3c85cb0e..4c1836f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -41,6 +41,7 @@
 
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt_pm.h"
 
 #include "i915_drv.h"
 #include "i915_gpu_error.h"
@@ -232,14 +233,13 @@ static void pool_free(struct pagevec *pv, void *addr)
 
 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
 
-struct compress {
+struct i915_vma_compress {
 	struct pagevec pool;
 	struct z_stream_s zstream;
 	void *tmp;
-	bool wc;
 };
 
-static bool compress_init(struct compress *c)
+static bool compress_init(struct i915_vma_compress *c)
 {
 	struct z_stream_s *zstream = &c->zstream;
 
@@ -261,7 +261,7 @@ static bool compress_init(struct compress *c)
 	return true;
 }
 
-static bool compress_start(struct compress *c)
+static bool compress_start(struct i915_vma_compress *c)
 {
 	struct z_stream_s *zstream = &c->zstream;
 	void *workspace = zstream->workspace;
@@ -272,8 +272,8 @@ static bool compress_start(struct compress *c)
 	return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
 }
 
-static void *compress_next_page(struct compress *c,
-				struct drm_i915_error_object *dst)
+static void *compress_next_page(struct i915_vma_compress *c,
+				struct i915_vma_coredump *dst)
 {
 	void *page;
 
@@ -287,14 +287,15 @@ static void *compress_next_page(struct compress *c,
 	return dst->pages[dst->page_count++] = page;
 }
 
-static int compress_page(struct compress *c,
+static int compress_page(struct i915_vma_compress *c,
 			 void *src,
-			 struct drm_i915_error_object *dst)
+			 struct i915_vma_coredump *dst,
+			 bool wc)
 {
 	struct z_stream_s *zstream = &c->zstream;
 
 	zstream->next_in = src;
-	if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+	if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
 		zstream->next_in = c->tmp;
 	zstream->avail_in = PAGE_SIZE;
 
@@ -318,8 +319,8 @@ static int compress_page(struct compress *c,
 	return 0;
 }
 
-static int compress_flush(struct compress *c,
-			  struct drm_i915_error_object *dst)
+static int compress_flush(struct i915_vma_compress *c,
+			  struct i915_vma_coredump *dst)
 {
 	struct z_stream_s *zstream = &c->zstream;
 
@@ -347,12 +348,12 @@ static int compress_flush(struct compress *c,
 	return 0;
 }
 
-static void compress_finish(struct compress *c)
+static void compress_finish(struct i915_vma_compress *c)
 {
 	zlib_deflateEnd(&c->zstream);
 }
 
-static void compress_fini(struct compress *c)
+static void compress_fini(struct i915_vma_compress *c)
 {
 	kfree(c->zstream.workspace);
 	if (c->tmp)
@@ -367,24 +368,24 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
 
 #else
 
-struct compress {
+struct i915_vma_compress {
 	struct pagevec pool;
-	bool wc;
 };
 
-static bool compress_init(struct compress *c)
+static bool compress_init(struct i915_vma_compress *c)
 {
 	return pool_init(&c->pool, ALLOW_FAIL) == 0;
 }
 
-static bool compress_start(struct compress *c)
+static bool compress_start(struct i915_vma_compress *c)
 {
 	return true;
 }
 
-static int compress_page(struct compress *c,
+static int compress_page(struct i915_vma_compress *c,
 			 void *src,
-			 struct drm_i915_error_object *dst)
+			 struct i915_vma_coredump *dst,
+			 bool wc)
 {
 	void *ptr;
 
@@ -392,24 +393,24 @@ static int compress_page(struct compress *c,
 	if (!ptr)
 		return -ENOMEM;
 
-	if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
+	if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
 		memcpy(ptr, src, PAGE_SIZE);
 	dst->pages[dst->page_count++] = ptr;
 
 	return 0;
 }
 
-static int compress_flush(struct compress *c,
-			  struct drm_i915_error_object *dst)
+static int compress_flush(struct i915_vma_compress *c,
+			  struct i915_vma_coredump *dst)
 {
 	return 0;
 }
 
-static void compress_finish(struct compress *c)
+static void compress_finish(struct i915_vma_compress *c)
 {
 }
 
-static void compress_fini(struct compress *c)
+static void compress_fini(struct i915_vma_compress *c)
 {
 	pool_fini(&c->pool);
 }
@@ -422,7 +423,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
 #endif
 
 static void error_print_instdone(struct drm_i915_error_state_buf *m,
-				 const struct drm_i915_error_engine *ee)
+				 const struct intel_engine_coredump *ee)
 {
 	const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
 	int slice;
@@ -453,40 +454,56 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
 
 static void error_print_request(struct drm_i915_error_state_buf *m,
 				const char *prefix,
-				const struct drm_i915_error_request *erq,
-				const unsigned long epoch)
+				const struct i915_request_coredump *erq)
 {
 	if (!erq->seqno)
 		return;
 
-	err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+	err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n",
 		   prefix, erq->pid, erq->context, erq->seqno,
 		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
 			    &erq->flags) ? "!" : "",
 		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 			    &erq->flags) ? "+" : "",
 		   erq->sched_attr.priority,
-		   jiffies_to_msecs(erq->jiffies - epoch),
 		   erq->start, erq->head, erq->tail);
 }
 
 static void error_print_context(struct drm_i915_error_state_buf *m,
 				const char *header,
-				const struct drm_i915_error_context *ctx)
+				const struct i915_gem_context_coredump *ctx)
 {
 	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
 		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
 		   ctx->guilty, ctx->active);
 }
 
-static void error_print_engine(struct drm_i915_error_state_buf *m,
-			       const struct drm_i915_error_engine *ee,
-			       const unsigned long epoch)
+static struct i915_vma_coredump *
+__find_vma(struct i915_vma_coredump *vma, const char *name)
 {
+	while (vma) {
+		if (strcmp(vma->name, name) == 0)
+			return vma;
+		vma = vma->next;
+	}
+
+	return NULL;
+}
+
+static struct i915_vma_coredump *
+find_batch(const struct intel_engine_coredump *ee)
+{
+	return __find_vma(ee->vma, "batch");
+}
+
+static void error_print_engine(struct drm_i915_error_state_buf *m,
+			       const struct intel_engine_coredump *ee)
+{
+	struct i915_vma_coredump *batch;
 	int n;
 
 	err_printf(m, "%s command stream:\n", ee->engine->name);
-	err_printf(m, "  IDLE?: %s\n", yesno(ee->idle));
+	err_printf(m, "  CCID:  0x%08x\n", ee->ccid);
 	err_printf(m, "  START: 0x%08x\n", ee->start);
 	err_printf(m, "  HEAD:  0x%08x [0x%08x]\n", ee->head, ee->rq_head);
 	err_printf(m, "  TAIL:  0x%08x [0x%08x, 0x%08x]\n",
@@ -501,9 +518,10 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 
 	error_print_instdone(m, ee);
 
-	if (ee->batchbuffer) {
-		u64 start = ee->batchbuffer->gtt_offset;
-		u64 end = start + ee->batchbuffer->gtt_size;
+	batch = find_batch(ee);
+	if (batch) {
+		u64 start = batch->gtt_offset;
+		u64 end = start + batch->gtt_size;
 
 		err_printf(m, "  batch: [0x%08x_%08x, 0x%08x_%08x]\n",
 			   upper_32_bits(start), lower_32_bits(start),
@@ -535,13 +553,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 				   ee->vm_info.pp_dir_base);
 		}
 	}
-	err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
-	err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
 	err_printf(m, "  engine reset count: %u\n", ee->reset_count);
 
 	for (n = 0; n < ee->num_ports; n++) {
 		err_printf(m, "  ELSP[%d]:", n);
-		error_print_request(m, " ", &ee->execlist[n], epoch);
+		error_print_request(m, " ", &ee->execlist[n]);
 	}
 
 	error_print_context(m, "  Active context: ", &ee->context);
@@ -556,38 +572,35 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
 	va_end(args);
 }
 
-static void print_error_obj(struct drm_i915_error_state_buf *m,
+static void print_error_vma(struct drm_i915_error_state_buf *m,
 			    const struct intel_engine_cs *engine,
-			    const char *name,
-			    const struct drm_i915_error_object *obj)
+			    const struct i915_vma_coredump *vma)
 {
 	char out[ASCII85_BUFSZ];
 	int page;
 
-	if (!obj)
+	if (!vma)
 		return;
 
-	if (name) {
-		err_printf(m, "%s --- %s = 0x%08x %08x\n",
-			   engine ? engine->name : "global", name,
-			   upper_32_bits(obj->gtt_offset),
-			   lower_32_bits(obj->gtt_offset));
-	}
+	err_printf(m, "%s --- %s = 0x%08x %08x\n",
+		   engine ? engine->name : "global", vma->name,
+		   upper_32_bits(vma->gtt_offset),
+		   lower_32_bits(vma->gtt_offset));
 
-	if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
-		err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes);
+	if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
+		err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
 
 	err_compression_marker(m);
-	for (page = 0; page < obj->page_count; page++) {
+	for (page = 0; page < vma->page_count; page++) {
 		int i, len;
 
 		len = PAGE_SIZE;
-		if (page == obj->page_count - 1)
-			len -= obj->unused;
+		if (page == vma->page_count - 1)
+			len -= vma->unused;
 		len = ascii85_encode_len(len);
 
 		for (i = 0; i < len; i++)
-			err_puts(m, ascii85_encode(obj->pages[page][i], out));
+			err_puts(m, ascii85_encode(vma->pages[page][i], out));
 	}
 	err_puts(m, "\n");
 }
@@ -599,9 +612,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
 {
 	struct drm_printer p = i915_error_printer(m);
 
-	intel_device_info_dump_flags(info, &p);
+	intel_device_info_print_static(info, &p);
+	intel_device_info_print_runtime(runtime, &p);
+	intel_device_info_print_topology(&runtime->sseu, &p);
 	intel_driver_caps_print(caps, &p);
-	intel_device_info_dump_topology(&runtime->sseu, &p);
 }
 
 static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -625,18 +639,13 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
 }
 
 static void err_print_uc(struct drm_i915_error_state_buf *m,
-			 const struct i915_error_uc *error_uc)
+			 const struct intel_uc_coredump *error_uc)
 {
 	struct drm_printer p = i915_error_printer(m);
-	const struct i915_gpu_state *error =
-		container_of(error_uc, typeof(*error), uc);
-
-	if (!error->device_info.has_gt_uc)
-		return;
 
 	intel_uc_fw_dump(&error_uc->guc_fw, &p);
 	intel_uc_fw_dump(&error_uc->huc_fw, &p);
-	print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
+	print_error_vma(m, NULL, error_uc->guc_log);
 }
 
 static void err_free_sgl(struct scatterlist *sgl)
@@ -656,12 +665,69 @@ static void err_free_sgl(struct scatterlist *sgl)
 	}
 }
 
-static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
-			       struct i915_gpu_state *error)
+static void err_print_gt(struct drm_i915_error_state_buf *m,
+			 struct intel_gt_coredump *gt)
 {
-	const struct drm_i915_error_engine *ee;
+	const struct intel_engine_coredump *ee;
+	int i;
+
+	err_printf(m, "GT awake: %s\n", yesno(gt->awake));
+	err_printf(m, "EIR: 0x%08x\n", gt->eir);
+	err_printf(m, "IER: 0x%08x\n", gt->ier);
+	for (i = 0; i < gt->ngtier; i++)
+		err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
+	err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
+	err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
+	err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
+
+	for (i = 0; i < gt->nfence; i++)
+		err_printf(m, "  fence[%d] = %08llx\n", i, gt->fence[i]);
+
+	if (IS_GEN_RANGE(m->i915, 6, 11)) {
+		err_printf(m, "ERROR: 0x%08x\n", gt->error);
+		err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
+	}
+
+	if (INTEL_GEN(m->i915) >= 8)
+		err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+			   gt->fault_data1, gt->fault_data0);
+
+	if (IS_GEN(m->i915, 7))
+		err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
+
+	if (IS_GEN_RANGE(m->i915, 8, 11))
+		err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
+
+	if (IS_GEN(m->i915, 12))
+		err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
+
+	if (INTEL_GEN(m->i915) >= 12) {
+		int i;
+
+		for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+			err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
+				   gt->sfc_done[i]);
+
+		err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
+	}
+
+	for (ee = gt->engine; ee; ee = ee->next) {
+		const struct i915_vma_coredump *vma;
+
+		error_print_engine(m, ee);
+		for (vma = ee->vma; vma; vma = vma->next)
+			print_error_vma(m, ee->engine, vma);
+	}
+
+	if (gt->uc)
+		err_print_uc(m, gt->uc);
+}
+
+static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
+			       struct i915_gpu_coredump *error)
+{
+	const struct intel_engine_coredump *ee;
 	struct timespec64 ts;
-	int i, j;
 
 	if (*error->error_msg)
 		err_printf(m, "%s\n", error->error_msg);
@@ -681,7 +747,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 	err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
 		   error->capture, jiffies_to_msecs(jiffies - error->capture));
 
-	for (ee = error->engine; ee; ee = ee->next)
+	for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
 		err_printf(m, "Active process (on ring %s): %s [%d]\n",
 			   ee->engine->name,
 			   ee->context.comm,
@@ -707,90 +773,11 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 			   CSR_VERSION_MINOR(csr->version));
 	}
 
-	err_printf(m, "GT awake: %s\n", yesno(error->awake));
 	err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
 	err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
-	err_printf(m, "EIR: 0x%08x\n", error->eir);
-	err_printf(m, "IER: 0x%08x\n", error->ier);
-	for (i = 0; i < error->ngtier; i++)
-		err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
-	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
-	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
-	err_printf(m, "CCID: 0x%08x\n", error->ccid);
 
-	for (i = 0; i < error->nfence; i++)
-		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
-
-	if (IS_GEN_RANGE(m->i915, 6, 11)) {
-		err_printf(m, "ERROR: 0x%08x\n", error->error);
-		err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
-	}
-
-	if (INTEL_GEN(m->i915) >= 8)
-		err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
-			   error->fault_data1, error->fault_data0);
-
-	if (IS_GEN(m->i915, 7))
-		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
-	if (IS_GEN_RANGE(m->i915, 8, 11))
-		err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache);
-
-	if (IS_GEN(m->i915, 12))
-		err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err);
-
-	if (INTEL_GEN(m->i915) >= 12) {
-		int i;
-
-		for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
-			err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
-				   error->sfc_done[i]);
-
-		err_printf(m, "  GAM_DONE: 0x%08x\n", error->gam_done);
-	}
-
-	for (ee = error->engine; ee; ee = ee->next)
-		error_print_engine(m, ee, error->capture);
-
-	for (ee = error->engine; ee; ee = ee->next) {
-		const struct drm_i915_error_object *obj;
-
-		obj = ee->batchbuffer;
-		if (obj) {
-			err_puts(m, ee->engine->name);
-			if (ee->context.pid)
-				err_printf(m, " (submitted by %s [%d])",
-					   ee->context.comm,
-					   ee->context.pid);
-			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
-				   upper_32_bits(obj->gtt_offset),
-				   lower_32_bits(obj->gtt_offset));
-			print_error_obj(m, ee->engine, NULL, obj);
-		}
-
-		for (j = 0; j < ee->user_bo_count; j++)
-			print_error_obj(m, ee->engine, "user", ee->user_bo[j]);
-
-		if (ee->num_requests) {
-			err_printf(m, "%s --- %d requests\n",
-				   ee->engine->name,
-				   ee->num_requests);
-			for (j = 0; j < ee->num_requests; j++)
-				error_print_request(m, " ",
-						    &ee->requests[j],
-						    error->capture);
-		}
-
-		print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
-		print_error_obj(m, ee->engine, "HW Status", ee->hws_page);
-		print_error_obj(m, ee->engine, "HW context", ee->ctx);
-		print_error_obj(m, ee->engine, "WA context", ee->wa_ctx);
-		print_error_obj(m, ee->engine,
-				"WA batchbuffer", ee->wa_batchbuffer);
-		print_error_obj(m, ee->engine,
-				"NULL context", ee->default_state);
-	}
+	if (error->gt)
+		err_print_gt(m, error->gt);
 
 	if (error->overlay)
 		intel_overlay_print_error_state(m, error->overlay);
@@ -801,10 +788,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 	err_print_capabilities(m, &error->device_info, &error->runtime_info,
 			       &error->driver_caps);
 	err_print_params(m, &error->params);
-	err_print_uc(m, &error->uc);
 }
 
-static int err_print_to_sgl(struct i915_gpu_state *error)
+static int err_print_to_sgl(struct i915_gpu_coredump *error)
 {
 	struct drm_i915_error_state_buf m;
 
@@ -841,8 +827,8 @@ static int err_print_to_sgl(struct i915_gpu_state *error)
 	return 0;
 }
 
-ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
-				      char *buf, loff_t off, size_t rem)
+ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
+					 char *buf, loff_t off, size_t rem)
 {
 	struct scatterlist *sg;
 	size_t count;
@@ -905,85 +891,88 @@ ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
 	return count;
 }
 
-static void i915_error_object_free(struct drm_i915_error_object *obj)
+static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
 {
-	int page;
+	while (vma) {
+		struct i915_vma_coredump *next = vma->next;
+		int page;
 
-	if (obj == NULL)
-		return;
+		for (page = 0; page < vma->page_count; page++)
+			free_page((unsigned long)vma->pages[page]);
 
-	for (page = 0; page < obj->page_count; page++)
-		free_page((unsigned long)obj->pages[page]);
-
-	kfree(obj);
+		kfree(vma);
+		vma = next;
+	}
 }
 
-
-static void cleanup_params(struct i915_gpu_state *error)
+static void cleanup_params(struct i915_gpu_coredump *error)
 {
 	i915_params_free(&error->params);
 }
 
-static void cleanup_uc_state(struct i915_gpu_state *error)
+static void cleanup_uc(struct intel_uc_coredump *uc)
 {
-	struct i915_error_uc *error_uc = &error->uc;
+	kfree(uc->guc_fw.path);
+	kfree(uc->huc_fw.path);
+	i915_vma_coredump_free(uc->guc_log);
 
-	kfree(error_uc->guc_fw.path);
-	kfree(error_uc->huc_fw.path);
-	i915_error_object_free(error_uc->guc_log);
+	kfree(uc);
 }
 
-void __i915_gpu_state_free(struct kref *error_ref)
+static void cleanup_gt(struct intel_gt_coredump *gt)
 {
-	struct i915_gpu_state *error =
-		container_of(error_ref, typeof(*error), ref);
-	long i;
+	while (gt->engine) {
+		struct intel_engine_coredump *ee = gt->engine;
 
-	while (error->engine) {
-		struct drm_i915_error_engine *ee = error->engine;
+		gt->engine = ee->next;
 
-		error->engine = ee->next;
-
-		for (i = 0; i < ee->user_bo_count; i++)
-			i915_error_object_free(ee->user_bo[i]);
-		kfree(ee->user_bo);
-
-		i915_error_object_free(ee->batchbuffer);
-		i915_error_object_free(ee->wa_batchbuffer);
-		i915_error_object_free(ee->ringbuffer);
-		i915_error_object_free(ee->hws_page);
-		i915_error_object_free(ee->ctx);
-		i915_error_object_free(ee->wa_ctx);
-
-		kfree(ee->requests);
+		i915_vma_coredump_free(ee->vma);
 		kfree(ee);
 	}
 
+	if (gt->uc)
+		cleanup_uc(gt->uc);
+
+	kfree(gt);
+}
+
+void __i915_gpu_coredump_free(struct kref *error_ref)
+{
+	struct i915_gpu_coredump *error =
+		container_of(error_ref, typeof(*error), ref);
+
+	while (error->gt) {
+		struct intel_gt_coredump *gt = error->gt;
+
+		error->gt = gt->next;
+		cleanup_gt(gt);
+	}
+
 	kfree(error->overlay);
 	kfree(error->display);
 
 	cleanup_params(error);
-	cleanup_uc_state(error);
 
 	err_free_sgl(error->sgl);
 	kfree(error);
 }
 
-static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *i915,
-			 struct i915_vma *vma,
-			 struct compress *compress)
+static struct i915_vma_coredump *
+i915_vma_coredump_create(const struct intel_gt *gt,
+			 const struct i915_vma *vma,
+			 const char *name,
+			 struct i915_vma_compress *compress)
 {
-	struct i915_ggtt *ggtt = &i915->ggtt;
+	struct i915_ggtt *ggtt = gt->ggtt;
 	const u64 slot = ggtt->error_capture.start;
-	struct drm_i915_error_object *dst;
+	struct i915_vma_coredump *dst;
 	unsigned long num_pages;
 	struct sgt_iter iter;
 	int ret;
 
 	might_sleep();
 
-	if (!vma || !vma->pages)
+	if (!vma || !vma->pages || !compress)
 		return NULL;
 
 	num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
@@ -997,6 +986,9 @@ i915_error_object_create(struct drm_i915_private *i915,
 		return NULL;
 	}
 
+	strcpy(dst->name, name);
+	dst->next = NULL;
+
 	dst->gtt_offset = vma->node.start;
 	dst->gtt_size = vma->node.size;
 	dst->gtt_page_sizes = vma->page_sizes.gtt;
@@ -1004,9 +996,6 @@ i915_error_object_create(struct drm_i915_private *i915,
 	dst->page_count = 0;
 	dst->unused = 0;
 
-	compress->wc = i915_gem_object_is_lmem(vma->obj) ||
-		       drm_mm_node_allocated(&ggtt->error_capture);
-
 	ret = -EINVAL;
 	if (drm_mm_node_allocated(&ggtt->error_capture)) {
 		void __iomem *s;
@@ -1015,9 +1004,12 @@ i915_error_object_create(struct drm_i915_private *i915,
 		for_each_sgt_daddr(dma, iter, vma->pages) {
 			ggtt->vm.insert_page(&ggtt->vm, dma, slot,
 					     I915_CACHE_NONE, 0);
+			mb();
 
 			s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
-			ret = compress_page(compress, (void  __force *)s, dst);
+			ret = compress_page(compress,
+					    (void  __force *)s, dst,
+					    true);
 			io_mapping_unmap(s);
 			if (ret)
 				break;
@@ -1030,7 +1022,9 @@ i915_error_object_create(struct drm_i915_private *i915,
 			void __iomem *s;
 
 			s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
-			ret = compress_page(compress, (void __force *)s, dst);
+			ret = compress_page(compress,
+					    (void __force *)s, dst,
+					    true);
 			io_mapping_unmap(s);
 			if (ret)
 				break;
@@ -1044,8 +1038,8 @@ i915_error_object_create(struct drm_i915_private *i915,
 			drm_clflush_pages(&page, 1);
 
 			s = kmap(page);
-			ret = compress_page(compress, s, dst);
-			kunmap(s);
+			ret = compress_page(compress, s, dst, false);
+			kunmap(page);
 
 			drm_clflush_pages(&page, 1);
 
@@ -1065,77 +1059,56 @@ i915_error_object_create(struct drm_i915_private *i915,
 	return dst;
 }
 
-/*
- * Generate a semi-unique error code. The code is not meant to have meaning, The
- * code's only purpose is to try to prevent false duplicated bug reports by
- * grossly estimating a GPU error state.
- *
- * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
- * the hang if we could strip the GTT offset information from it.
- *
- * It's only a small step better than a random number in its current form.
- */
-static u32 i915_error_generate_code(struct i915_gpu_state *error)
+static void gt_record_fences(struct intel_gt_coredump *gt)
 {
-	const struct drm_i915_error_engine *ee = error->engine;
-
-	/*
-	 * IPEHR would be an ideal way to detect errors, as it's the gross
-	 * measure of "the command that hung." However, has some very common
-	 * synchronization commands which almost always appear in the case
-	 * strictly a client bug. Use instdone to differentiate those some.
-	 */
-	return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
-}
-
-static void gem_record_fences(struct i915_gpu_state *error)
-{
-	struct drm_i915_private *dev_priv = error->i915;
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct i915_ggtt *ggtt = gt->_gt->ggtt;
+	struct intel_uncore *uncore = gt->_gt->uncore;
 	int i;
 
-	if (INTEL_GEN(dev_priv) >= 6) {
-		for (i = 0; i < dev_priv->ggtt.num_fences; i++)
-			error->fence[i] =
+	if (INTEL_GEN(uncore->i915) >= 6) {
+		for (i = 0; i < ggtt->num_fences; i++)
+			gt->fence[i] =
 				intel_uncore_read64(uncore,
 						    FENCE_REG_GEN6_LO(i));
-	} else if (INTEL_GEN(dev_priv) >= 4) {
-		for (i = 0; i < dev_priv->ggtt.num_fences; i++)
-			error->fence[i] =
+	} else if (INTEL_GEN(uncore->i915) >= 4) {
+		for (i = 0; i < ggtt->num_fences; i++)
+			gt->fence[i] =
 				intel_uncore_read64(uncore,
 						    FENCE_REG_965_LO(i));
 	} else {
-		for (i = 0; i < dev_priv->ggtt.num_fences; i++)
-			error->fence[i] =
+		for (i = 0; i < ggtt->num_fences; i++)
+			gt->fence[i] =
 				intel_uncore_read(uncore, FENCE_REG(i));
 	}
-	error->nfence = i;
+	gt->nfence = i;
 }
 
-static void error_record_engine_registers(struct i915_gpu_state *error,
-					  struct intel_engine_cs *engine,
-					  struct drm_i915_error_engine *ee)
+static void engine_record_registers(struct intel_engine_coredump *ee)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	const struct intel_engine_cs *engine = ee->engine;
+	struct drm_i915_private *i915 = engine->i915;
 
-	if (INTEL_GEN(dev_priv) >= 6) {
+	if (INTEL_GEN(i915) >= 6) {
 		ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
 
-		if (INTEL_GEN(dev_priv) >= 12)
-			ee->fault_reg = I915_READ(GEN12_RING_FAULT_REG);
-		else if (INTEL_GEN(dev_priv) >= 8)
-			ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
+		if (INTEL_GEN(i915) >= 12)
+			ee->fault_reg = intel_uncore_read(engine->uncore,
+							  GEN12_RING_FAULT_REG);
+		else if (INTEL_GEN(i915) >= 8)
+			ee->fault_reg = intel_uncore_read(engine->uncore,
+							  GEN8_RING_FAULT_REG);
 		else
 			ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
 	}
 
-	if (INTEL_GEN(dev_priv) >= 4) {
+	if (INTEL_GEN(i915) >= 4) {
 		ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
 		ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
 		ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
 		ee->instps = ENGINE_READ(engine, RING_INSTPS);
 		ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
-		if (INTEL_GEN(dev_priv) >= 8) {
+		ee->ccid = ENGINE_READ(engine, CCID);
+		if (INTEL_GEN(i915) >= 8) {
 			ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
 			ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
 		}
@@ -1154,13 +1127,13 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 	ee->head = ENGINE_READ(engine, RING_HEAD);
 	ee->tail = ENGINE_READ(engine, RING_TAIL);
 	ee->ctl = ENGINE_READ(engine, RING_CTL);
-	if (INTEL_GEN(dev_priv) > 2)
+	if (INTEL_GEN(i915) > 2)
 		ee->mode = ENGINE_READ(engine, RING_MI_MODE);
 
-	if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
+	if (!HWS_NEEDS_PHYSICAL(i915)) {
 		i915_reg_t mmio;
 
-		if (IS_GEN(dev_priv, 7)) {
+		if (IS_GEN(i915, 7)) {
 			switch (engine->id) {
 			default:
 				MISSING_CASE(engine->id);
@@ -1185,110 +1158,63 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 			mmio = RING_HWS_PGA(engine->mmio_base);
 		}
 
-		ee->hws = I915_READ(mmio);
+		ee->hws = intel_uncore_read(engine->uncore, mmio);
 	}
 
-	ee->idle = intel_engine_is_idle(engine);
-	ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
-						  engine);
+	ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
 
-	if (HAS_PPGTT(dev_priv)) {
+	if (HAS_PPGTT(i915)) {
 		int i;
 
 		ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
 
-		if (IS_GEN(dev_priv, 6)) {
+		if (IS_GEN(i915, 6)) {
 			ee->vm_info.pp_dir_base =
 				ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
-		} else if (IS_GEN(dev_priv, 7)) {
+		} else if (IS_GEN(i915, 7)) {
 			ee->vm_info.pp_dir_base =
 				ENGINE_READ(engine, RING_PP_DIR_BASE);
-		} else if (INTEL_GEN(dev_priv) >= 8) {
+		} else if (INTEL_GEN(i915) >= 8) {
 			u32 base = engine->mmio_base;
 
 			for (i = 0; i < 4; i++) {
 				ee->vm_info.pdp[i] =
-					I915_READ(GEN8_RING_PDP_UDW(base, i));
+					intel_uncore_read(engine->uncore,
+							  GEN8_RING_PDP_UDW(base, i));
 				ee->vm_info.pdp[i] <<= 32;
 				ee->vm_info.pdp[i] |=
-					I915_READ(GEN8_RING_PDP_LDW(base, i));
+					intel_uncore_read(engine->uncore,
+							  GEN8_RING_PDP_LDW(base, i));
 			}
 		}
 	}
 }
 
 static void record_request(const struct i915_request *request,
-			   struct drm_i915_error_request *erq)
+			   struct i915_request_coredump *erq)
 {
-	const struct i915_gem_context *ctx = request->gem_context;
+	const struct i915_gem_context *ctx;
 
 	erq->flags = request->fence.flags;
 	erq->context = request->fence.context;
 	erq->seqno = request->fence.seqno;
 	erq->sched_attr = request->sched.attr;
-	erq->jiffies = request->emitted_jiffies;
 	erq->start = i915_ggtt_offset(request->ring->vma);
 	erq->head = request->head;
 	erq->tail = request->tail;
 
+	erq->pid = 0;
 	rcu_read_lock();
-	erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
+	ctx = rcu_dereference(request->context->gem_context);
+	if (ctx)
+		erq->pid = pid_nr(ctx->pid);
 	rcu_read_unlock();
 }
 
-static void engine_record_requests(struct intel_engine_cs *engine,
-				   struct i915_request *first,
-				   struct drm_i915_error_engine *ee)
+static void engine_record_execlists(struct intel_engine_coredump *ee)
 {
-	struct i915_request *request;
-	int count;
-
-	count = 0;
-	request = first;
-	list_for_each_entry_from(request, &engine->active.requests, sched.link)
-		count++;
-	if (!count)
-		return;
-
-	ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL);
-	if (!ee->requests)
-		return;
-
-	ee->num_requests = count;
-
-	count = 0;
-	request = first;
-	list_for_each_entry_from(request,
-				 &engine->active.requests, sched.link) {
-		if (count >= ee->num_requests) {
-			/*
-			 * If the ring request list was changed in
-			 * between the point where the error request
-			 * list was created and dimensioned and this
-			 * point then just exit early to avoid crashes.
-			 *
-			 * We don't need to communicate that the
-			 * request list changed state during error
-			 * state capture and that the error state is
-			 * slightly incorrect as a consequence since we
-			 * are typically only interested in the request
-			 * list state at the point of error state
-			 * capture, not in any changes happening during
-			 * the capture.
-			 */
-			break;
-		}
-
-		record_request(request, &ee->requests[count++]);
-	}
-	ee->num_requests = count;
-}
-
-static void error_record_engine_execlists(const struct intel_engine_cs *engine,
-					  struct drm_i915_error_engine *ee)
-{
-	const struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct i915_request * const *port = execlists->active;
+	const struct intel_engine_execlists * const el = &ee->engine->execlists;
+	struct i915_request * const *port = el->active;
 	unsigned int n = 0;
 
 	while (*port)
@@ -1297,47 +1223,57 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine,
 	ee->num_ports = n;
 }
 
-static bool record_context(struct drm_i915_error_context *e,
+static bool record_context(struct i915_gem_context_coredump *e,
 			   const struct i915_request *rq)
 {
-	const struct i915_gem_context *ctx = rq->gem_context;
+	struct i915_gem_context *ctx;
+	struct task_struct *task;
+	bool capture;
 
-	if (ctx->pid) {
-		struct task_struct *task;
+	rcu_read_lock();
+	ctx = rcu_dereference(rq->context->gem_context);
+	if (ctx && !kref_get_unless_zero(&ctx->ref))
+		ctx = NULL;
+	rcu_read_unlock();
+	if (!ctx)
+		return false;
 
-		rcu_read_lock();
-		task = pid_task(ctx->pid, PIDTYPE_PID);
-		if (task) {
-			strcpy(e->comm, task->comm);
-			e->pid = task->pid;
-		}
-		rcu_read_unlock();
+	rcu_read_lock();
+	task = pid_task(ctx->pid, PIDTYPE_PID);
+	if (task) {
+		strcpy(e->comm, task->comm);
+		e->pid = task->pid;
 	}
+	rcu_read_unlock();
 
 	e->sched_attr = ctx->sched;
 	e->guilty = atomic_read(&ctx->guilty_count);
 	e->active = atomic_read(&ctx->active_count);
 
-	return i915_gem_context_no_error_capture(ctx);
+	capture = i915_gem_context_no_error_capture(ctx);
+
+	i915_gem_context_put(ctx);
+	return capture;
 }
 
-struct capture_vma {
-	struct capture_vma *next;
-	void **slot;
+struct intel_engine_capture_vma {
+	struct intel_engine_capture_vma *next;
+	struct i915_vma *vma;
+	char name[16];
 };
 
-static struct capture_vma *
-capture_vma(struct capture_vma *next,
+static struct intel_engine_capture_vma *
+capture_vma(struct intel_engine_capture_vma *next,
 	    struct i915_vma *vma,
-	    struct drm_i915_error_object **out)
+	    const char *name,
+	    gfp_t gfp)
 {
-	struct capture_vma *c;
+	struct intel_engine_capture_vma *c;
 
-	*out = NULL;
 	if (!vma)
 		return next;
 
-	c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL);
+	c = kmalloc(sizeof(*c), gfp);
 	if (!c)
 		return next;
 
@@ -1346,54 +1282,31 @@ capture_vma(struct capture_vma *next,
 		return next;
 	}
 
-	c->slot = (void **)out;
-	*c->slot = i915_vma_get(vma);
+	strcpy(c->name, name);
+	c->vma = i915_vma_get(vma);
 
 	c->next = next;
 	return c;
 }
 
-static struct capture_vma *
-request_record_user_bo(struct i915_request *request,
-		       struct drm_i915_error_engine *ee,
-		       struct capture_vma *capture)
+static struct intel_engine_capture_vma *
+capture_user(struct intel_engine_capture_vma *capture,
+	     const struct i915_request *rq,
+	     gfp_t gfp)
 {
 	struct i915_capture_list *c;
-	struct drm_i915_error_object **bo;
-	long count, max;
 
-	max = 0;
-	for (c = request->capture_list; c; c = c->next)
-		max++;
-	if (!max)
-		return capture;
-
-	bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
-	if (!bo) {
-		/* If we can't capture everything, try to capture something. */
-		max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
-		bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
-	}
-	if (!bo)
-		return capture;
-
-	count = 0;
-	for (c = request->capture_list; c; c = c->next) {
-		capture = capture_vma(capture, c->vma, &bo[count]);
-		if (++count == max)
-			break;
-	}
-
-	ee->user_bo = bo;
-	ee->user_bo_count = count;
+	for (c = rq->capture_list; c; c = c->next)
+		capture = capture_vma(capture, c->vma, "user", gfp);
 
 	return capture;
 }
 
-static struct drm_i915_error_object *
-capture_object(struct drm_i915_private *dev_priv,
+static struct i915_vma_coredump *
+capture_object(const struct intel_gt *gt,
 	       struct drm_i915_gem_object *obj,
-	       struct compress *compress)
+	       const char *name,
+	       struct i915_vma_compress *compress)
 {
 	if (obj && i915_gem_object_has_pages(obj)) {
 		struct i915_vma fake = {
@@ -1403,127 +1316,175 @@ capture_object(struct drm_i915_private *dev_priv,
 			.obj = obj,
 		};
 
-		return i915_error_object_create(dev_priv, &fake, compress);
+		return i915_vma_coredump_create(gt, &fake, name, compress);
 	} else {
 		return NULL;
 	}
 }
 
-static void
-gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
+static void add_vma(struct intel_engine_coredump *ee,
+		    struct i915_vma_coredump *vma)
 {
-	struct drm_i915_private *i915 = error->i915;
-	struct intel_engine_cs *engine;
-	struct drm_i915_error_engine *ee;
+	if (vma) {
+		vma->next = ee->vma;
+		ee->vma = vma;
+	}
+}
 
-	ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+{
+	struct intel_engine_coredump *ee;
+
+	ee = kzalloc(sizeof(*ee), gfp);
 	if (!ee)
-		return;
+		return NULL;
 
-	for_each_uabi_engine(engine, i915) {
-		struct capture_vma *capture = NULL;
-		struct i915_request *request;
-		unsigned long flags;
+	ee->engine = engine;
+
+	engine_record_registers(ee);
+	engine_record_execlists(ee);
+
+	return ee;
+}
+
+struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+				  struct i915_request *rq,
+				  gfp_t gfp)
+{
+	struct intel_engine_capture_vma *vma = NULL;
+
+	ee->simulated |= record_context(&ee->context, rq);
+	if (ee->simulated)
+		return NULL;
+
+	/*
+	 * We need to copy these to an anonymous buffer
+	 * as the simplest method to avoid being overwritten
+	 * by userspace.
+	 */
+	vma = capture_vma(vma, rq->batch, "batch", gfp);
+	vma = capture_user(vma, rq, gfp);
+	vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
+	vma = capture_vma(vma, rq->context->state, "HW context", gfp);
+
+	ee->rq_head = rq->head;
+	ee->rq_post = rq->postfix;
+	ee->rq_tail = rq->tail;
+
+	return vma;
+}
+
+void
+intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+			      struct intel_engine_capture_vma *capture,
+			      struct i915_vma_compress *compress)
+{
+	const struct intel_engine_cs *engine = ee->engine;
+
+	while (capture) {
+		struct intel_engine_capture_vma *this = capture;
+		struct i915_vma *vma = this->vma;
+
+		add_vma(ee,
+			i915_vma_coredump_create(engine->gt,
+						 vma, this->name,
+						 compress));
+
+		i915_active_release(&vma->active);
+		i915_vma_put(vma);
+
+		capture = this->next;
+		kfree(this);
+	}
+
+	add_vma(ee,
+		i915_vma_coredump_create(engine->gt,
+					 engine->status_page.vma,
+					 "HW Status",
+					 compress));
+
+	add_vma(ee,
+		i915_vma_coredump_create(engine->gt,
+					 engine->wa_ctx.vma,
+					 "WA context",
+					 compress));
+
+	add_vma(ee,
+		capture_object(engine->gt,
+			       engine->default_state,
+			       "NULL context",
+			       compress));
+}
+
+static struct intel_engine_coredump *
+capture_engine(struct intel_engine_cs *engine,
+	       struct i915_vma_compress *compress)
+{
+	struct intel_engine_capture_vma *capture = NULL;
+	struct intel_engine_coredump *ee;
+	struct i915_request *rq;
+	unsigned long flags;
+
+	ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+	if (!ee)
+		return NULL;
+
+	spin_lock_irqsave(&engine->active.lock, flags);
+	rq = intel_engine_find_active_request(engine);
+	if (rq)
+		capture = intel_engine_coredump_add_request(ee, rq,
+							    ATOMIC_MAYFAIL);
+	spin_unlock_irqrestore(&engine->active.lock, flags);
+	if (!capture) {
+		kfree(ee);
+		return NULL;
+	}
+
+	intel_engine_coredump_add_vma(ee, capture, compress);
+
+	return ee;
+}
+
+static void
+gt_record_engines(struct intel_gt_coredump *gt,
+		  struct i915_vma_compress *compress)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, gt->_gt, id) {
+		struct intel_engine_coredump *ee;
 
 		/* Refill our page pool before entering atomic section */
 		pool_refill(&compress->pool, ALLOW_FAIL);
 
-		spin_lock_irqsave(&engine->active.lock, flags);
-		request = intel_engine_find_active_request(engine);
-		if (!request) {
-			spin_unlock_irqrestore(&engine->active.lock, flags);
+		ee = capture_engine(engine, compress);
+		if (!ee)
+			continue;
+
+		gt->simulated |= ee->simulated;
+		if (ee->simulated) {
+			kfree(ee);
 			continue;
 		}
 
-		error->simulated |= record_context(&ee->context, request);
-
-		/*
-		 * We need to copy these to an anonymous buffer
-		 * as the simplest method to avoid being overwritten
-		 * by userspace.
-		 */
-		capture = capture_vma(capture,
-				      request->batch,
-				      &ee->batchbuffer);
-
-		if (HAS_BROKEN_CS_TLB(i915))
-			capture = capture_vma(capture,
-					      engine->gt->scratch,
-					      &ee->wa_batchbuffer);
-
-		capture = request_record_user_bo(request, ee, capture);
-
-		capture = capture_vma(capture,
-				      request->hw_context->state,
-				      &ee->ctx);
-
-		capture = capture_vma(capture,
-				      request->ring->vma,
-				      &ee->ringbuffer);
-
-		ee->cpu_ring_head = request->ring->head;
-		ee->cpu_ring_tail = request->ring->tail;
-
-		ee->rq_head = request->head;
-		ee->rq_post = request->postfix;
-		ee->rq_tail = request->tail;
-
-		engine_record_requests(engine, request, ee);
-		spin_unlock_irqrestore(&engine->active.lock, flags);
-
-		error_record_engine_registers(error, engine, ee);
-		error_record_engine_execlists(engine, ee);
-
-		while (capture) {
-			struct capture_vma *this = capture;
-			struct i915_vma *vma = *this->slot;
-
-			*this->slot =
-				i915_error_object_create(i915, vma, compress);
-
-			i915_active_release(&vma->active);
-			i915_vma_put(vma);
-
-			capture = this->next;
-			kfree(this);
-		}
-
-		ee->hws_page =
-			i915_error_object_create(i915,
-						 engine->status_page.vma,
-						 compress);
-
-		ee->wa_ctx =
-			i915_error_object_create(i915,
-						 engine->wa_ctx.vma,
-						 compress);
-
-		ee->default_state =
-			capture_object(i915, engine->default_state, compress);
-
-		ee->engine = engine;
-
-		ee->next = error->engine;
-		error->engine = ee;
-
-		ee = kzalloc(sizeof(*ee), GFP_KERNEL);
-		if (!ee)
-			return;
+		ee->next = gt->engine;
+		gt->engine = ee;
 	}
-
-	kfree(ee);
 }
 
-static void
-capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
+static struct intel_uc_coredump *
+gt_record_uc(struct intel_gt_coredump *gt,
+	     struct i915_vma_compress *compress)
 {
-	struct drm_i915_private *i915 = error->i915;
-	struct i915_error_uc *error_uc = &error->uc;
-	struct intel_uc *uc = &i915->gt.uc;
+	const struct intel_uc *uc = &gt->_gt->uc;
+	struct intel_uc_coredump *error_uc;
 
-	/* Capturing uC state won't be useful if there is no GuC */
-	if (!error->device_info.has_gt_uc)
-		return;
+	error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
+	if (!error_uc)
+		return NULL;
 
 	memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
 	memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
@@ -1534,19 +1495,42 @@ capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
 	 */
 	error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
 	error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
-	error_uc->guc_log = i915_error_object_create(i915,
-						     uc->guc.log.vma,
-						     compress);
+	error_uc->guc_log =
+		i915_vma_coredump_create(gt->_gt,
+					 uc->guc.log.vma, "GuC log buffer",
+					 compress);
+
+	return error_uc;
+}
+
+static void gt_capture_prepare(struct intel_gt_coredump *gt)
+{
+	struct i915_ggtt *ggtt = gt->_gt->ggtt;
+
+	mutex_lock(&ggtt->error_mutex);
+}
+
+static void gt_capture_finish(struct intel_gt_coredump *gt)
+{
+	struct i915_ggtt *ggtt = gt->_gt->ggtt;
+
+	if (drm_mm_node_allocated(&ggtt->error_capture))
+		ggtt->vm.clear_range(&ggtt->vm,
+				     ggtt->error_capture.start,
+				     PAGE_SIZE);
+
+	mutex_unlock(&ggtt->error_mutex);
 }
 
 /* Capture all registers which don't fit into another category. */
-static void capture_reg_state(struct i915_gpu_state *error)
+static void gt_record_regs(struct intel_gt_coredump *gt)
 {
-	struct drm_i915_private *i915 = error->i915;
-	struct intel_uncore *uncore = &i915->uncore;
+	struct intel_uncore *uncore = gt->_gt->uncore;
+	struct drm_i915_private *i915 = uncore->i915;
 	int i;
 
-	/* General organization
+	/*
+	 * General organization
 	 * 1. Registers specific to a single generation
 	 * 2. Registers which belong to multiple generations
 	 * 3. Feature specific registers.
@@ -1556,138 +1540,162 @@ static void capture_reg_state(struct i915_gpu_state *error)
 
 	/* 1: Registers specific to a single generation */
 	if (IS_VALLEYVIEW(i915)) {
-		error->gtier[0] = intel_uncore_read(uncore, GTIER);
-		error->ier = intel_uncore_read(uncore, VLV_IER);
-		error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
+		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+		gt->ier = intel_uncore_read(uncore, VLV_IER);
+		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
 	}
 
 	if (IS_GEN(i915, 7))
-		error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
+		gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
 
 	if (INTEL_GEN(i915) >= 12) {
-		error->fault_data0 = intel_uncore_read(uncore,
-						       GEN12_FAULT_TLB_DATA0);
-		error->fault_data1 = intel_uncore_read(uncore,
-						       GEN12_FAULT_TLB_DATA1);
+		gt->fault_data0 = intel_uncore_read(uncore,
+						    GEN12_FAULT_TLB_DATA0);
+		gt->fault_data1 = intel_uncore_read(uncore,
+						    GEN12_FAULT_TLB_DATA1);
 	} else if (INTEL_GEN(i915) >= 8) {
-		error->fault_data0 = intel_uncore_read(uncore,
-						       GEN8_FAULT_TLB_DATA0);
-		error->fault_data1 = intel_uncore_read(uncore,
-						       GEN8_FAULT_TLB_DATA1);
+		gt->fault_data0 = intel_uncore_read(uncore,
+						    GEN8_FAULT_TLB_DATA0);
+		gt->fault_data1 = intel_uncore_read(uncore,
+						    GEN8_FAULT_TLB_DATA1);
 	}
 
 	if (IS_GEN(i915, 6)) {
-		error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
-		error->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
-		error->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
+		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
+		gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
+		gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
 	}
 
 	/* 2: Registers which belong to multiple generations */
 	if (INTEL_GEN(i915) >= 7)
-		error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
+		gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
 
 	if (INTEL_GEN(i915) >= 6) {
-		error->derrmr = intel_uncore_read(uncore, DERRMR);
+		gt->derrmr = intel_uncore_read(uncore, DERRMR);
 		if (INTEL_GEN(i915) < 12) {
-			error->error = intel_uncore_read(uncore, ERROR_GEN6);
-			error->done_reg = intel_uncore_read(uncore, DONE_REG);
+			gt->error = intel_uncore_read(uncore, ERROR_GEN6);
+			gt->done_reg = intel_uncore_read(uncore, DONE_REG);
 		}
 	}
 
-	if (INTEL_GEN(i915) >= 5)
-		error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE));
-
 	/* 3: Feature specific registers */
 	if (IS_GEN_RANGE(i915, 6, 7)) {
-		error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
-		error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
+		gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+		gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
 	}
 
 	if (IS_GEN_RANGE(i915, 8, 11))
-		error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
+		gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
 
 	if (IS_GEN(i915, 12))
-		error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
+		gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
 
 	if (INTEL_GEN(i915) >= 12) {
 		for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
-			error->sfc_done[i] =
+			gt->sfc_done[i] =
 				intel_uncore_read(uncore, GEN12_SFC_DONE(i));
 		}
 
-		error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
+		gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
 	}
 
 	/* 4: Everything else */
 	if (INTEL_GEN(i915) >= 11) {
-		error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
-		error->gtier[0] =
+		gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+		gt->gtier[0] =
 			intel_uncore_read(uncore,
 					  GEN11_RENDER_COPY_INTR_ENABLE);
-		error->gtier[1] =
+		gt->gtier[1] =
 			intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
-		error->gtier[2] =
+		gt->gtier[2] =
 			intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
-		error->gtier[3] =
+		gt->gtier[3] =
 			intel_uncore_read(uncore,
 					  GEN11_GPM_WGBOXPERF_INTR_ENABLE);
-		error->gtier[4] =
+		gt->gtier[4] =
 			intel_uncore_read(uncore,
 					  GEN11_CRYPTO_RSVD_INTR_ENABLE);
-		error->gtier[5] =
+		gt->gtier[5] =
 			intel_uncore_read(uncore,
 					  GEN11_GUNIT_CSME_INTR_ENABLE);
-		error->ngtier = 6;
+		gt->ngtier = 6;
 	} else if (INTEL_GEN(i915) >= 8) {
-		error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+		gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
 		for (i = 0; i < 4; i++)
-			error->gtier[i] = intel_uncore_read(uncore,
-							    GEN8_GT_IER(i));
-		error->ngtier = 4;
+			gt->gtier[i] =
+				intel_uncore_read(uncore, GEN8_GT_IER(i));
+		gt->ngtier = 4;
 	} else if (HAS_PCH_SPLIT(i915)) {
-		error->ier = intel_uncore_read(uncore, DEIER);
-		error->gtier[0] = intel_uncore_read(uncore, GTIER);
-		error->ngtier = 1;
+		gt->ier = intel_uncore_read(uncore, DEIER);
+		gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+		gt->ngtier = 1;
 	} else if (IS_GEN(i915, 2)) {
-		error->ier = intel_uncore_read16(uncore, GEN2_IER);
+		gt->ier = intel_uncore_read16(uncore, GEN2_IER);
 	} else if (!IS_VALLEYVIEW(i915)) {
-		error->ier = intel_uncore_read(uncore, GEN2_IER);
+		gt->ier = intel_uncore_read(uncore, GEN2_IER);
 	}
-	error->eir = intel_uncore_read(uncore, EIR);
-	error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
+	gt->eir = intel_uncore_read(uncore, EIR);
+	gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
 }
 
-static const char *
-error_msg(struct i915_gpu_state *error,
-	  intel_engine_mask_t engines, const char *msg)
+/*
+ * Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static u32 generate_ecode(const struct intel_engine_coredump *ee)
 {
+	/*
+	 * IPEHR would be an ideal way to detect errors, as it's the gross
+	 * measure of "the command that hung." However, has some very common
+	 * synchronization commands which almost always appear in the case
+	 * strictly a client bug. Use instdone to differentiate those some.
+	 */
+	return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
+}
+
+static const char *error_msg(struct i915_gpu_coredump *error)
+{
+	struct intel_engine_coredump *first = NULL;
+	struct intel_gt_coredump *gt;
+	intel_engine_mask_t engines;
 	int len;
 
+	engines = 0;
+	for (gt = error->gt; gt; gt = gt->next) {
+		struct intel_engine_coredump *cs;
+
+		if (gt->engine && !first)
+			first = gt->engine;
+
+		for (cs = gt->engine; cs; cs = cs->next)
+			engines |= cs->engine->mask;
+	}
+
 	len = scnprintf(error->error_msg, sizeof(error->error_msg),
-			"GPU HANG: ecode %d:%x:0x%08x",
+			"GPU HANG: ecode %d:%x:%08x",
 			INTEL_GEN(error->i915), engines,
-			i915_error_generate_code(error));
-	if (error->engine) {
+			generate_ecode(first));
+	if (first) {
 		/* Just show the first executing process, more is confusing */
 		len += scnprintf(error->error_msg + len,
 				 sizeof(error->error_msg) - len,
 				 ", in %s [%d]",
-				 error->engine->context.comm,
-				 error->engine->context.pid);
+				 first->context.comm, first->context.pid);
 	}
-	if (msg)
-		len += scnprintf(error->error_msg + len,
-				 sizeof(error->error_msg) - len,
-				 ", %s", msg);
 
 	return error->error_msg;
 }
 
-static void capture_gen_state(struct i915_gpu_state *error)
+static void capture_gen(struct i915_gpu_coredump *error)
 {
 	struct drm_i915_private *i915 = error->i915;
 
-	error->awake = i915->gt.awake;
 	error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
 	error->suspended = i915->runtime_pm.suspended;
 
@@ -1698,6 +1706,7 @@ static void capture_gen_state(struct i915_gpu_state *error)
 	error->reset_count = i915_reset_count(&i915->gpu_error);
 	error->suspend_count = i915->suspend_count;
 
+	i915_params_copy(&error->params, &i915_modparams);
 	memcpy(&error->device_info,
 	       INTEL_INFO(i915),
 	       sizeof(error->device_info));
@@ -1707,46 +1716,17 @@ static void capture_gen_state(struct i915_gpu_state *error)
 	error->driver_caps = i915->caps;
 }
 
-static void capture_params(struct i915_gpu_state *error)
+struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
 {
-	i915_params_copy(&error->params, &i915_modparams);
-}
+	struct i915_gpu_coredump *error;
 
-static void capture_finish(struct i915_gpu_state *error)
-{
-	struct i915_ggtt *ggtt = &error->i915->ggtt;
+	if (!i915_modparams.error_capture)
+		return NULL;
 
-	if (drm_mm_node_allocated(&ggtt->error_capture)) {
-		const u64 slot = ggtt->error_capture.start;
-
-		ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
-	}
-}
-
-#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
-
-struct i915_gpu_state *
-i915_capture_gpu_state(struct drm_i915_private *i915)
-{
-	struct i915_gpu_state *error;
-	struct compress compress;
-
-	/* Check if GPU capture has been disabled */
-	error = READ_ONCE(i915->gpu_error.first_error);
-	if (IS_ERR(error))
-		return error;
-
-	error = kzalloc(sizeof(*error), ALLOW_FAIL);
-	if (!error) {
-		i915_disable_error_state(i915, -ENOMEM);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	if (!compress_init(&compress)) {
-		kfree(error);
-		i915_disable_error_state(i915, -ENOMEM);
-		return ERR_PTR(-ENOMEM);
-	}
+	error = kzalloc(sizeof(*error), gfp);
+	if (!error)
+		return NULL;
 
 	kref_init(&error->ref);
 	error->i915 = i915;
@@ -1756,66 +1736,118 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
 	error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
 	error->capture = jiffies;
 
-	capture_params(error);
-	capture_gen_state(error);
-	capture_uc_state(error, &compress);
-	capture_reg_state(error);
-	gem_record_fences(error);
-	gem_record_rings(error, &compress);
-
-	error->overlay = intel_overlay_capture_error_state(i915);
-	error->display = intel_display_capture_error_state(i915);
-
-	capture_finish(error);
-	compress_fini(&compress);
+	capture_gen(error);
 
 	return error;
 }
 
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @i915: i915 device
- * @engine_mask: the mask of engines triggering the hang
- * @msg: a message to insert into the error capture header
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error.  Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-void i915_capture_error_state(struct drm_i915_private *i915,
-			      intel_engine_mask_t engine_mask,
-			      const char *msg)
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
+struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
 {
-	static bool warned;
-	struct i915_gpu_state *error;
-	unsigned long flags;
+	struct intel_gt_coredump *gc;
 
-	if (!i915_modparams.error_capture)
+	gc = kzalloc(sizeof(*gc), gfp);
+	if (!gc)
+		return NULL;
+
+	gc->_gt = gt;
+	gc->awake = intel_gt_pm_is_awake(gt);
+
+	gt_record_regs(gc);
+	gt_record_fences(gc);
+
+	return gc;
+}
+
+struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt)
+{
+	struct i915_vma_compress *compress;
+
+	compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
+	if (!compress)
+		return NULL;
+
+	if (!compress_init(compress)) {
+		kfree(compress);
+		return NULL;
+	}
+
+	gt_capture_prepare(gt);
+
+	return compress;
+}
+
+void i915_vma_capture_finish(struct intel_gt_coredump *gt,
+			     struct i915_vma_compress *compress)
+{
+	if (!compress)
 		return;
 
-	if (READ_ONCE(i915->gpu_error.first_error))
-		return;
+	gt_capture_finish(gt);
 
-	error = i915_capture_gpu_state(i915);
+	compress_fini(compress);
+	kfree(compress);
+}
+
+struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
+{
+	struct i915_gpu_coredump *error;
+
+	/* Check if GPU capture has been disabled */
+	error = READ_ONCE(i915->gpu_error.first_error);
 	if (IS_ERR(error))
-		return;
+		return error;
 
-	dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg));
+	error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
+	if (!error)
+		return ERR_PTR(-ENOMEM);
 
-	if (!error->simulated) {
-		spin_lock_irqsave(&i915->gpu_error.lock, flags);
-		if (!i915->gpu_error.first_error) {
-			i915->gpu_error.first_error = error;
-			error = NULL;
+	error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL);
+	if (error->gt) {
+		struct i915_vma_compress *compress;
+
+		compress = i915_vma_capture_prepare(error->gt);
+		if (!compress) {
+			kfree(error->gt);
+			kfree(error);
+			return ERR_PTR(-ENOMEM);
 		}
-		spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
+
+		gt_record_engines(error->gt, compress);
+
+		if (INTEL_INFO(i915)->has_gt_uc)
+			error->gt->uc = gt_record_uc(error->gt, compress);
+
+		i915_vma_capture_finish(error->gt, compress);
+
+		error->simulated |= error->gt->simulated;
 	}
 
-	if (error) {
-		__i915_gpu_state_free(&error->ref);
+	error->overlay = intel_overlay_capture_error_state(i915);
+	error->display = intel_display_capture_error_state(i915);
+
+	return error;
+}
+
+void i915_error_state_store(struct i915_gpu_coredump *error)
+{
+	struct drm_i915_private *i915;
+	static bool warned;
+
+	if (IS_ERR_OR_NULL(error))
 		return;
-	}
+
+	i915 = error->i915;
+	dev_info(i915->drm.dev, "%s\n", error_msg(error));
+
+	if (error->simulated ||
+	    cmpxchg(&i915->gpu_error.first_error, NULL, error))
+		return;
+
+	i915_gpu_coredump_get(error);
 
 	if (!xchg(&warned, true) &&
 	    ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
@@ -1828,15 +1860,38 @@ void i915_capture_error_state(struct drm_i915_private *i915,
 	}
 }
 
-struct i915_gpu_state *
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @i915: i915 device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error.  Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_i915_private *i915)
+{
+	struct i915_gpu_coredump *error;
+
+	error = i915_gpu_coredump(i915);
+	if (IS_ERR(error)) {
+		cmpxchg(&i915->gpu_error.first_error, NULL, error);
+		return;
+	}
+
+	i915_error_state_store(error);
+	i915_gpu_coredump_put(error);
+}
+
+struct i915_gpu_coredump *
 i915_first_error_state(struct drm_i915_private *i915)
 {
-	struct i915_gpu_state *error;
+	struct i915_gpu_coredump *error;
 
 	spin_lock_irq(&i915->gpu_error.lock);
 	error = i915->gpu_error.first_error;
 	if (!IS_ERR_OR_NULL(error))
-		i915_gpu_state_get(error);
+		i915_gpu_coredump_get(error);
 	spin_unlock_irq(&i915->gpu_error.lock);
 
 	return error;
@@ -1844,7 +1899,7 @@ i915_first_error_state(struct drm_i915_private *i915)
 
 void i915_reset_error_state(struct drm_i915_private *i915)
 {
-	struct i915_gpu_state *error;
+	struct i915_gpu_coredump *error;
 
 	spin_lock_irq(&i915->gpu_error.lock);
 	error = i915->gpu_error.first_error;
@@ -1853,7 +1908,7 @@ void i915_reset_error_state(struct drm_i915_private *i915)
 	spin_unlock_irq(&i915->gpu_error.lock);
 
 	if (!IS_ERR_OR_NULL(error))
-		i915_gpu_state_put(error);
+		i915_gpu_coredump_put(error);
 }
 
 void i915_disable_error_state(struct drm_i915_private *i915, int err)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 5d2c337..9109004 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -25,43 +25,100 @@
 #include "i915_scheduler.h"
 
 struct drm_i915_private;
+struct i915_vma_compress;
+struct intel_engine_capture_vma;
 struct intel_overlay_error_state;
 struct intel_display_error_state;
 
-struct i915_gpu_state {
-	struct kref ref;
-	ktime_t time;
-	ktime_t boottime;
-	ktime_t uptime;
-	unsigned long capture;
+struct i915_vma_coredump {
+	struct i915_vma_coredump *next;
 
-	struct drm_i915_private *i915;
+	char name[20];
 
-	char error_msg[128];
+	u64 gtt_offset;
+	u64 gtt_size;
+	u32 gtt_page_sizes;
+
+	int num_pages;
+	int page_count;
+	int unused;
+	u32 *pages[0];
+};
+
+struct i915_request_coredump {
+	unsigned long flags;
+	pid_t pid;
+	u32 context;
+	u32 seqno;
+	u32 start;
+	u32 head;
+	u32 tail;
+	struct i915_sched_attr sched_attr;
+};
+
+struct intel_engine_coredump {
+	const struct intel_engine_cs *engine;
+
 	bool simulated;
-	bool awake;
-	bool wakelock;
-	bool suspended;
-	int iommu;
 	u32 reset_count;
-	u32 suspend_count;
-	struct intel_device_info device_info;
-	struct intel_runtime_info runtime_info;
-	struct intel_driver_caps driver_caps;
-	struct i915_params params;
 
-	struct i915_error_uc {
-		struct intel_uc_fw guc_fw;
-		struct intel_uc_fw huc_fw;
-		struct drm_i915_error_object *guc_log;
-	} uc;
+	/* position of active request inside the ring */
+	u32 rq_head, rq_post, rq_tail;
+
+	/* Register state */
+	u32 ccid;
+	u32 start;
+	u32 tail;
+	u32 head;
+	u32 ctl;
+	u32 mode;
+	u32 hws;
+	u32 ipeir;
+	u32 ipehr;
+	u32 bbstate;
+	u32 instpm;
+	u32 instps;
+	u64 bbaddr;
+	u64 acthd;
+	u32 fault_reg;
+	u64 faddr;
+	u32 rc_psmi; /* sleep state */
+	struct intel_instdone instdone;
+
+	struct i915_gem_context_coredump {
+		char comm[TASK_COMM_LEN];
+		pid_t pid;
+		int active;
+		int guilty;
+		struct i915_sched_attr sched_attr;
+	} context;
+
+	struct i915_vma_coredump *vma;
+
+	struct i915_request_coredump execlist[EXECLIST_MAX_PORTS];
+	unsigned int num_ports;
+
+	struct {
+		u32 gfx_mode;
+		union {
+			u64 pdp[4];
+			u32 pp_dir_base;
+		};
+	} vm_info;
+
+	struct intel_engine_coredump *next;
+};
+
+struct intel_gt_coredump {
+	const struct intel_gt *_gt;
+	bool awake;
+	bool simulated;
 
 	/* Generic register state */
 	u32 eir;
 	u32 pgtbl_er;
 	u32 ier;
 	u32 gtier[6], ngtier;
-	u32 ccid;
 	u32 derrmr;
 	u32 forcewake;
 	u32 error; /* gen6+ */
@@ -80,91 +137,45 @@ struct i915_gpu_state {
 
 	u32 nfence;
 	u64 fence[I915_MAX_NUM_FENCES];
+
+	struct intel_engine_coredump *engine;
+
+	struct intel_uc_coredump {
+		struct intel_uc_fw guc_fw;
+		struct intel_uc_fw huc_fw;
+		struct i915_vma_coredump *guc_log;
+	} *uc;
+
+	struct intel_gt_coredump *next;
+};
+
+struct i915_gpu_coredump {
+	struct kref ref;
+	ktime_t time;
+	ktime_t boottime;
+	ktime_t uptime;
+	unsigned long capture;
+
+	struct drm_i915_private *i915;
+
+	struct intel_gt_coredump *gt;
+
+	char error_msg[128];
+	bool simulated;
+	bool wakelock;
+	bool suspended;
+	int iommu;
+	u32 reset_count;
+	u32 suspend_count;
+
+	struct intel_device_info device_info;
+	struct intel_runtime_info runtime_info;
+	struct intel_driver_caps driver_caps;
+	struct i915_params params;
+
 	struct intel_overlay_error_state *overlay;
 	struct intel_display_error_state *display;
 
-	struct drm_i915_error_engine {
-		const struct intel_engine_cs *engine;
-
-		/* Software tracked state */
-		bool idle;
-		int num_requests;
-		u32 reset_count;
-
-		/* position of active request inside the ring */
-		u32 rq_head, rq_post, rq_tail;
-
-		/* our own tracking of ring head and tail */
-		u32 cpu_ring_head;
-		u32 cpu_ring_tail;
-
-		/* Register state */
-		u32 start;
-		u32 tail;
-		u32 head;
-		u32 ctl;
-		u32 mode;
-		u32 hws;
-		u32 ipeir;
-		u32 ipehr;
-		u32 bbstate;
-		u32 instpm;
-		u32 instps;
-		u64 bbaddr;
-		u64 acthd;
-		u32 fault_reg;
-		u64 faddr;
-		u32 rc_psmi; /* sleep state */
-		struct intel_instdone instdone;
-
-		struct drm_i915_error_context {
-			char comm[TASK_COMM_LEN];
-			pid_t pid;
-			int active;
-			int guilty;
-			struct i915_sched_attr sched_attr;
-		} context;
-
-		struct drm_i915_error_object {
-			u64 gtt_offset;
-			u64 gtt_size;
-			u32 gtt_page_sizes;
-			int num_pages;
-			int page_count;
-			int unused;
-			u32 *pages[0];
-		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
-		struct drm_i915_error_object **user_bo;
-		long user_bo_count;
-
-		struct drm_i915_error_object *wa_ctx;
-		struct drm_i915_error_object *default_state;
-
-		struct drm_i915_error_request {
-			unsigned long flags;
-			long jiffies;
-			pid_t pid;
-			u32 context;
-			u32 seqno;
-			u32 start;
-			u32 head;
-			u32 tail;
-			struct i915_sched_attr sched_attr;
-		} *requests, execlist[EXECLIST_MAX_PORTS];
-		unsigned int num_ports;
-
-		struct {
-			u32 gfx_mode;
-			union {
-				u64 pdp[4];
-				u32 pp_dir_base;
-			};
-		} vm_info;
-
-		struct drm_i915_error_engine *next;
-	} *engine;
-
 	struct scatterlist *sgl, *fit;
 };
 
@@ -172,7 +183,7 @@ struct i915_gpu_error {
 	/* For reset and error_state handling. */
 	spinlock_t lock;
 	/* Protected by the above dev->gpu_error.lock. */
-	struct i915_gpu_state *first_error;
+	struct i915_gpu_coredump *first_error;
 
 	atomic_t pending_fb_pin;
 
@@ -200,41 +211,115 @@ struct drm_i915_error_state_buf {
 __printf(2, 3)
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
 
-struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
-			      intel_engine_mask_t engine_mask,
-			      const char *error_msg);
+struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915);
+void i915_capture_error_state(struct drm_i915_private *i915);
 
-static inline struct i915_gpu_state *
-i915_gpu_state_get(struct i915_gpu_state *gpu)
+struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp);
+
+struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp);
+
+struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp);
+
+struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+				  struct i915_request *rq,
+				  gfp_t gfp);
+
+void intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+				   struct intel_engine_capture_vma *capture,
+				   struct i915_vma_compress *compress);
+
+struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt);
+
+void i915_vma_capture_finish(struct intel_gt_coredump *gt,
+			     struct i915_vma_compress *compress);
+
+void i915_error_state_store(struct i915_gpu_coredump *error);
+
+static inline struct i915_gpu_coredump *
+i915_gpu_coredump_get(struct i915_gpu_coredump *gpu)
 {
 	kref_get(&gpu->ref);
 	return gpu;
 }
 
-ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
-				      char *buf, loff_t offset, size_t count);
+ssize_t
+i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
+				 char *buf, loff_t offset, size_t count);
 
-void __i915_gpu_state_free(struct kref *kref);
-static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+void __i915_gpu_coredump_free(struct kref *kref);
+static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
 {
 	if (gpu)
-		kref_put(&gpu->ref, __i915_gpu_state_free);
+		kref_put(&gpu->ref, __i915_gpu_coredump_free);
 }
 
-struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
 void i915_reset_error_state(struct drm_i915_private *i915);
 void i915_disable_error_state(struct drm_i915_private *i915, int err);
 
 #else
 
-static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
-					    u32 engine_mask,
-					    const char *error_msg)
+static inline void i915_capture_error_state(struct drm_i915_private *i915)
 {
 }
 
-static inline struct i915_gpu_state *
+static inline struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
+{
+	return NULL;
+}
+
+static inline struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
+{
+	return NULL;
+}
+
+static inline struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+{
+	return NULL;
+}
+
+static inline struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+				  struct i915_request *rq,
+				  gfp_t gfp)
+{
+	return NULL;
+}
+
+static inline void
+intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+			      struct intel_engine_capture_vma *capture,
+			      struct i915_vma_compress *compress)
+{
+}
+
+static inline struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt)
+{
+	return NULL;
+}
+
+static inline void
+i915_vma_capture_finish(struct intel_gt_coredump *gt,
+			struct i915_vma_compress *compress)
+{
+}
+
+static inline void
+i915_error_state_store(struct drm_i915_private *i915,
+		       struct i915_gpu_coredump *error)
+{
+}
+
+static inline struct i915_gpu_coredump *
 i915_first_error_state(struct drm_i915_private *i915)
 {
 	return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dae00f7..afc6aad 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -893,7 +893,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
 }
 
 /**
- * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * ivb_parity_work - Workqueue called when a parity error interrupt
  * occurred.
  * @work: workqueue struct
  *
@@ -901,7 +901,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
  * this event, userspace should try to remap the bad rows since statistically
  * it is likely the same row is more likely to go bad again.
  */
-static void ivybridge_parity_work(struct work_struct *work)
+static void ivb_parity_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
@@ -2031,7 +2031,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
  * 4 - Process the interrupt(s) that had bits set in the IIRs.
  * 5 - Re-enable Master Interrupt Control.
  */
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+static irqreturn_t ilk_irq_handler(int irq, void *arg)
 {
 	struct drm_i915_private *dev_priv = arg;
 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
@@ -2453,6 +2453,25 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
 }
 
+static void
+gen11_display_irq_handler(struct drm_i915_private *i915)
+{
+	void __iomem * const regs = i915->uncore.regs;
+	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
+
+	disable_rpm_wakeref_asserts(&i915->runtime_pm);
+	/*
+	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
+	 * for the display related bits.
+	 */
+	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
+	gen8_de_irq_handler(i915, disp_ctl);
+	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
+		      GEN11_DISPLAY_IRQ_ENABLE);
+
+	enable_rpm_wakeref_asserts(&i915->runtime_pm);
+}
+
 static __always_inline irqreturn_t
 __gen11_irq_handler(struct drm_i915_private * const i915,
 		    u32 (*intr_disable)(void __iomem * const regs),
@@ -2476,17 +2495,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
 	gen11_gt_irq_handler(gt, master_ctl);
 
 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	if (master_ctl & GEN11_DISPLAY_IRQ) {
-		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
-
-		disable_rpm_wakeref_asserts(&i915->runtime_pm);
-		/*
-		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
-		 * for the display related bits.
-		 */
-		gen8_de_irq_handler(i915, disp_ctl);
-		enable_rpm_wakeref_asserts(&i915->runtime_pm);
-	}
+	if (master_ctl & GEN11_DISPLAY_IRQ)
+		gen11_display_irq_handler(i915);
 
 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
 
@@ -2732,7 +2742,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 
 /* drm_dma.h hooks
 */
-static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
+static void ilk_irq_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
@@ -2788,15 +2798,11 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
 		ibx_irq_reset(dev_priv);
 }
 
-static void gen11_irq_reset(struct drm_i915_private *dev_priv)
+static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	enum pipe pipe;
 
-	gen11_master_intr_disable(dev_priv->uncore.regs);
-
-	gen11_gt_irq_reset(&dev_priv->gt);
-
 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
 
 	if (INTEL_GEN(dev_priv) >= 12) {
@@ -2825,13 +2831,24 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
-	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
-	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
 
 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 		GEN3_IRQ_RESET(uncore, SDE);
 }
 
+static void gen11_irq_reset(struct drm_i915_private *dev_priv)
+{
+	struct intel_uncore *uncore = &dev_priv->uncore;
+
+	gen11_master_intr_disable(dev_priv->uncore.regs);
+
+	gen11_gt_irq_reset(&dev_priv->gt);
+	gen11_display_irq_reset(dev_priv);
+
+	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+}
+
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
 				     u8 pipe_mask)
 {
@@ -2976,6 +2993,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
 	hotplug_irqs = sde_ddi_mask | sde_tc_mask;
 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
 
+	I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+
 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
 	icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
@@ -3081,6 +3100,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
 	u32 hotplug_irqs, enabled_irqs;
 
+	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+
 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
 
@@ -3203,7 +3225,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
 		spt_hpd_detection_setup(dev_priv);
 }
 
-static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 display_mask, extra_mask;
@@ -3597,7 +3619,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
 
 		if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
 
 		if (iir & I915_MASTER_ERROR_INTERRUPT)
 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3702,7 +3724,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 		I915_WRITE(GEN2_IIR, iir);
 
 		if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
 
 		if (iir & I915_MASTER_ERROR_INTERRUPT)
 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3844,10 +3866,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 		I915_WRITE(GEN2_IIR, iir);
 
 		if (iir & I915_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
 
 		if (iir & I915_BSD_USER_INTERRUPT)
-			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
+			intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
 
 		if (iir & I915_MASTER_ERROR_INTERRUPT)
 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3877,7 +3899,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 
 	intel_hpd_init_work(dev_priv);
 
-	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
 	for (i = 0; i < MAX_L3_SLICES; ++i)
 		dev_priv->l3_parity.remap_info[i] = NULL;
 
@@ -3958,7 +3980,7 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
 		else if (INTEL_GEN(dev_priv) >= 8)
 			return gen8_irq_handler;
 		else
-			return ironlake_irq_handler;
+			return ilk_irq_handler;
 	}
 }
 
@@ -3981,7 +4003,7 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv)
 		else if (INTEL_GEN(dev_priv) >= 8)
 			gen8_irq_reset(dev_priv);
 		else
-			ironlake_irq_reset(dev_priv);
+			ilk_irq_reset(dev_priv);
 	}
 }
 
@@ -4004,7 +4026,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
 		else if (INTEL_GEN(dev_priv) >= 8)
 			gen8_irq_postinstall(dev_priv);
 		else
-			ironlake_irq_postinstall(dev_priv);
+			ilk_irq_postinstall(dev_priv);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 07b04b0..fdd5504 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -27,6 +27,12 @@
 
 #include "i915_memcpy.h"
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define CI_BUG_ON(expr) BUG_ON(expr)
+#else
+#define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
 static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
 
 #ifdef CONFIG_AS_MOVNTDQA
@@ -34,7 +40,6 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
 {
 	kernel_fpu_begin();
 
-	len >>= 4;
 	while (len >= 4) {
 		asm("movntdqa   (%0), %%xmm0\n"
 		    "movntdqa 16(%0), %%xmm1\n"
@@ -59,6 +64,38 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
 
 	kernel_fpu_end();
 }
+
+static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
+{
+	kernel_fpu_begin();
+
+	while (len >= 4) {
+		asm("movntdqa   (%0), %%xmm0\n"
+		    "movntdqa 16(%0), %%xmm1\n"
+		    "movntdqa 32(%0), %%xmm2\n"
+		    "movntdqa 48(%0), %%xmm3\n"
+		    "movups %%xmm0,   (%1)\n"
+		    "movups %%xmm1, 16(%1)\n"
+		    "movups %%xmm2, 32(%1)\n"
+		    "movups %%xmm3, 48(%1)\n"
+		    :: "r" (src), "r" (dst) : "memory");
+		src += 64;
+		dst += 64;
+		len -= 4;
+	}
+	while (len--) {
+		asm("movntdqa (%0), %%xmm0\n"
+		    "movups %%xmm0, (%1)\n"
+		    :: "r" (src), "r" (dst) : "memory");
+		src += 16;
+		dst += 16;
+	}
+
+	kernel_fpu_end();
+}
+#else
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
+static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
 #endif
 
 /**
@@ -83,17 +120,47 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
 	if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
 		return false;
 
-#ifdef CONFIG_AS_MOVNTDQA
 	if (static_branch_likely(&has_movntdqa)) {
 		if (likely(len))
-			__memcpy_ntdqa(dst, src, len);
+			__memcpy_ntdqa(dst, src, len >> 4);
 		return true;
 	}
-#endif
 
 	return false;
 }
 
+/**
+ * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
+ * @dst: destination pointer
+ * @src: source pointer
+ * @len: how many bytes to copy
+ *
+ * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
+ * @src to @dst using * non-temporal instructions where available, but
+ * accepts that its arguments may not be aligned, but are valid for the
+ * potential 16-byte read past the end.
+ */
+void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len)
+{
+	unsigned long addr;
+
+	CI_BUG_ON(!i915_has_memcpy_from_wc());
+
+	addr = (unsigned long)src;
+	if (!IS_ALIGNED(addr, 16)) {
+		unsigned long x = min(ALIGN(addr, 16) - addr, len);
+
+		memcpy(dst, src, x);
+
+		len -= x;
+		dst += x;
+		src += x;
+	}
+
+	if (likely(len))
+		__memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16));
+}
+
 void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
 {
 	/*
diff --git a/drivers/gpu/drm/i915/i915_memcpy.h b/drivers/gpu/drm/i915/i915_memcpy.h
index 970d84b..e36d30e 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.h
+++ b/drivers/gpu/drm/i915/i915_memcpy.h
@@ -11,7 +11,9 @@
 struct drm_i915_private;
 
 void i915_memcpy_init_early(struct drm_i915_private *i915);
+
 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len);
 
 /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
  * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 318562c..b6376b2 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -33,6 +33,9 @@ struct remap_pfn {
 	struct mm_struct *mm;
 	unsigned long pfn;
 	pgprot_t prot;
+
+	struct sgt_iter sgt;
+	resource_size_t iobase;
 };
 
 static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
@@ -46,6 +49,35 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
 	return 0;
 }
 
+#define use_dma(io) ((io) != -1)
+
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+	if (use_dma(r->iobase))
+		return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+	else
+		return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+	struct remap_pfn *r = data;
+
+	if (GEM_WARN_ON(!r->sgt.pfn))
+		return -EINVAL;
+
+	/* Special PTE are not associated with any struct page */
+	set_pte_at(r->mm, addr, pte,
+		   pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+	r->pfn++; /* track insertions in case we need to unwind later */
+
+	r->sgt.curr += PAGE_SIZE;
+	if (r->sgt.curr >= r->sgt.max)
+		r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+	return 0;
+}
+
 /**
  * remap_io_mapping - remap an IO mapping to userspace
  * @vma: user vma to map to
@@ -80,3 +112,40 @@ int remap_io_mapping(struct vm_area_struct *vma,
 
 	return 0;
 }
+
+/**
+ * remap_io_sg - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @size: size of map area
+ * @sgl: Start sg entry
+ * @iobase: Use stored dma address offset by this address or pfn if -1
+ *
+ *  Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_sg(struct vm_area_struct *vma,
+		unsigned long addr, unsigned long size,
+		struct scatterlist *sgl, resource_size_t iobase)
+{
+	struct remap_pfn r = {
+		.mm = vma->vm_mm,
+		.prot = vma->vm_page_prot,
+		.sgt = __sgt_iter(sgl, use_dma(iobase)),
+		.iobase = iobase,
+	};
+	int err;
+
+	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+	if (!use_dma(iobase))
+		flush_cache_range(vma, addr, size);
+
+	err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+	if (unlikely(err)) {
+		zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+		return err;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1bb701d..83f0140 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -30,6 +30,7 @@
 #include "display/intel_fbdev.h"
 
 #include "i915_drv.h"
+#include "i915_perf.h"
 #include "i915_globals.h"
 #include "i915_selftest.h"
 
@@ -192,23 +193,23 @@
 	GEN_DEFAULT_PAGE_SIZES, \
 	GEN_DEFAULT_REGIONS
 
-static const struct intel_device_info intel_i830_info = {
+static const struct intel_device_info i830_info = {
 	I830_FEATURES,
 	PLATFORM(INTEL_I830),
 };
 
-static const struct intel_device_info intel_i845g_info = {
+static const struct intel_device_info i845g_info = {
 	I845_FEATURES,
 	PLATFORM(INTEL_I845G),
 };
 
-static const struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info i85x_info = {
 	I830_FEATURES,
 	PLATFORM(INTEL_I85X),
 	.display.has_fbc = 1,
 };
 
-static const struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info i865g_info = {
 	I845_FEATURES,
 	PLATFORM(INTEL_I865G),
 };
@@ -227,7 +228,7 @@ static const struct intel_device_info intel_i865g_info = {
 	GEN_DEFAULT_PAGE_SIZES, \
 	GEN_DEFAULT_REGIONS
 
-static const struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info i915g_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I915G),
 	.has_coherent_ggtt = false,
@@ -238,7 +239,7 @@ static const struct intel_device_info intel_i915g_info = {
 	.unfenced_needs_alignment = 1,
 };
 
-static const struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info i915gm_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I915GM),
 	.is_mobile = 1,
@@ -251,7 +252,7 @@ static const struct intel_device_info intel_i915gm_info = {
 	.unfenced_needs_alignment = 1,
 };
 
-static const struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info i945g_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I945G),
 	.display.has_hotplug = 1,
@@ -262,7 +263,7 @@ static const struct intel_device_info intel_i945g_info = {
 	.unfenced_needs_alignment = 1,
 };
 
-static const struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info i945gm_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I945GM),
 	.is_mobile = 1,
@@ -276,21 +277,21 @@ static const struct intel_device_info intel_i945gm_info = {
 	.unfenced_needs_alignment = 1,
 };
 
-static const struct intel_device_info intel_g33_info = {
+static const struct intel_device_info g33_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_G33),
 	.display.has_hotplug = 1,
 	.display.has_overlay = 1,
 };
 
-static const struct intel_device_info intel_pineview_g_info = {
+static const struct intel_device_info pnv_g_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_PINEVIEW),
 	.display.has_hotplug = 1,
 	.display.has_overlay = 1,
 };
 
-static const struct intel_device_info intel_pineview_m_info = {
+static const struct intel_device_info pnv_m_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_PINEVIEW),
 	.is_mobile = 1,
@@ -313,7 +314,7 @@ static const struct intel_device_info intel_pineview_m_info = {
 	GEN_DEFAULT_PAGE_SIZES, \
 	GEN_DEFAULT_REGIONS
 
-static const struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info i965g_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_I965G),
 	.display.has_overlay = 1,
@@ -321,7 +322,7 @@ static const struct intel_device_info intel_i965g_info = {
 	.has_snoop = false,
 };
 
-static const struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info i965gm_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_I965GM),
 	.is_mobile = 1,
@@ -332,14 +333,14 @@ static const struct intel_device_info intel_i965gm_info = {
 	.has_snoop = false,
 };
 
-static const struct intel_device_info intel_g45_info = {
+static const struct intel_device_info g45_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_G45),
 	.engine_mask = BIT(RCS0) | BIT(VCS0),
 	.gpu_reset_clobbers_display = false,
 };
 
-static const struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info gm45_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_GM45),
 	.is_mobile = 1,
@@ -364,12 +365,12 @@ static const struct intel_device_info intel_gm45_info = {
 	GEN_DEFAULT_PAGE_SIZES, \
 	GEN_DEFAULT_REGIONS
 
-static const struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info ilk_d_info = {
 	GEN5_FEATURES,
 	PLATFORM(INTEL_IRONLAKE),
 };
 
-static const struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info ilk_m_info = {
 	GEN5_FEATURES,
 	PLATFORM(INTEL_IRONLAKE),
 	.is_mobile = 1,
@@ -399,12 +400,12 @@ static const struct intel_device_info intel_ironlake_m_info = {
 	GEN6_FEATURES, \
 	PLATFORM(INTEL_SANDYBRIDGE)
 
-static const struct intel_device_info intel_sandybridge_d_gt1_info = {
+static const struct intel_device_info snb_d_gt1_info = {
 	SNB_D_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_sandybridge_d_gt2_info = {
+static const struct intel_device_info snb_d_gt2_info = {
 	SNB_D_PLATFORM,
 	.gt = 2,
 };
@@ -415,12 +416,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info = {
 	.is_mobile = 1
 
 
-static const struct intel_device_info intel_sandybridge_m_gt1_info = {
+static const struct intel_device_info snb_m_gt1_info = {
 	SNB_M_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_sandybridge_m_gt2_info = {
+static const struct intel_device_info snb_m_gt2_info = {
 	SNB_M_PLATFORM,
 	.gt = 2,
 };
@@ -436,7 +437,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
 	.has_rc6 = 1, \
 	.has_rc6p = 1, \
 	.has_rps = true, \
-	.ppgtt_type = INTEL_PPGTT_ALIASING, \
+	.ppgtt_type = INTEL_PPGTT_FULL, \
 	.ppgtt_size = 31, \
 	IVB_PIPE_OFFSETS, \
 	IVB_CURSOR_OFFSETS, \
@@ -449,12 +450,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
 	PLATFORM(INTEL_IVYBRIDGE), \
 	.has_l3_dpf = 1
 
-static const struct intel_device_info intel_ivybridge_d_gt1_info = {
+static const struct intel_device_info ivb_d_gt1_info = {
 	IVB_D_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_ivybridge_d_gt2_info = {
+static const struct intel_device_info ivb_d_gt2_info = {
 	IVB_D_PLATFORM,
 	.gt = 2,
 };
@@ -465,17 +466,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info = {
 	.is_mobile = 1, \
 	.has_l3_dpf = 1
 
-static const struct intel_device_info intel_ivybridge_m_gt1_info = {
+static const struct intel_device_info ivb_m_gt1_info = {
 	IVB_M_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_ivybridge_m_gt2_info = {
+static const struct intel_device_info ivb_m_gt2_info = {
 	IVB_M_PLATFORM,
 	.gt = 2,
 };
 
-static const struct intel_device_info intel_ivybridge_q_info = {
+static const struct intel_device_info ivb_q_info = {
 	GEN7_FEATURES,
 	PLATFORM(INTEL_IVYBRIDGE),
 	.gt = 2,
@@ -483,7 +484,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
 	.has_l3_dpf = 1,
 };
 
-static const struct intel_device_info intel_valleyview_info = {
+static const struct intel_device_info vlv_info = {
 	PLATFORM(INTEL_VALLEYVIEW),
 	GEN(7),
 	.is_lp = 1,
@@ -493,7 +494,7 @@ static const struct intel_device_info intel_valleyview_info = {
 	.has_rps = true,
 	.display.has_gmch = 1,
 	.display.has_hotplug = 1,
-	.ppgtt_type = INTEL_PPGTT_ALIASING,
+	.ppgtt_type = INTEL_PPGTT_FULL,
 	.ppgtt_size = 31,
 	.has_snoop = true,
 	.has_coherent_ggtt = false,
@@ -522,17 +523,17 @@ static const struct intel_device_info intel_valleyview_info = {
 	PLATFORM(INTEL_HASWELL), \
 	.has_l3_dpf = 1
 
-static const struct intel_device_info intel_haswell_gt1_info = {
+static const struct intel_device_info hsw_gt1_info = {
 	HSW_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_haswell_gt2_info = {
+static const struct intel_device_info hsw_gt2_info = {
 	HSW_PLATFORM,
 	.gt = 2,
 };
 
-static const struct intel_device_info intel_haswell_gt3_info = {
+static const struct intel_device_info hsw_gt3_info = {
 	HSW_PLATFORM,
 	.gt = 3,
 };
@@ -550,17 +551,17 @@ static const struct intel_device_info intel_haswell_gt3_info = {
 	GEN8_FEATURES, \
 	PLATFORM(INTEL_BROADWELL)
 
-static const struct intel_device_info intel_broadwell_gt1_info = {
+static const struct intel_device_info bdw_gt1_info = {
 	BDW_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_broadwell_gt2_info = {
+static const struct intel_device_info bdw_gt2_info = {
 	BDW_PLATFORM,
 	.gt = 2,
 };
 
-static const struct intel_device_info intel_broadwell_rsvd_info = {
+static const struct intel_device_info bdw_rsvd_info = {
 	BDW_PLATFORM,
 	.gt = 3,
 	/* According to the device ID those devices are GT3, they were
@@ -568,14 +569,14 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
 	 */
 };
 
-static const struct intel_device_info intel_broadwell_gt3_info = {
+static const struct intel_device_info bdw_gt3_info = {
 	BDW_PLATFORM,
 	.gt = 3,
 	.engine_mask =
 		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
-static const struct intel_device_info intel_cherryview_info = {
+static const struct intel_device_info chv_info = {
 	PLATFORM(INTEL_CHERRYVIEW),
 	GEN(8),
 	.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
@@ -620,12 +621,12 @@ static const struct intel_device_info intel_cherryview_info = {
 	GEN9_FEATURES, \
 	PLATFORM(INTEL_SKYLAKE)
 
-static const struct intel_device_info intel_skylake_gt1_info = {
+static const struct intel_device_info skl_gt1_info = {
 	SKL_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_skylake_gt2_info = {
+static const struct intel_device_info skl_gt2_info = {
 	SKL_PLATFORM,
 	.gt = 2,
 };
@@ -636,12 +637,12 @@ static const struct intel_device_info intel_skylake_gt2_info = {
 		BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
 
 
-static const struct intel_device_info intel_skylake_gt3_info = {
+static const struct intel_device_info skl_gt3_info = {
 	SKL_GT3_PLUS_PLATFORM,
 	.gt = 3,
 };
 
-static const struct intel_device_info intel_skylake_gt4_info = {
+static const struct intel_device_info skl_gt4_info = {
 	SKL_GT3_PLUS_PLATFORM,
 	.gt = 4,
 };
@@ -678,13 +679,13 @@ static const struct intel_device_info intel_skylake_gt4_info = {
 	GEN9_DEFAULT_PAGE_SIZES, \
 	GEN_DEFAULT_REGIONS
 
-static const struct intel_device_info intel_broxton_info = {
+static const struct intel_device_info bxt_info = {
 	GEN9_LP_FEATURES,
 	PLATFORM(INTEL_BROXTON),
 	.ddb_size = 512,
 };
 
-static const struct intel_device_info intel_geminilake_info = {
+static const struct intel_device_info glk_info = {
 	GEN9_LP_FEATURES,
 	PLATFORM(INTEL_GEMINILAKE),
 	.ddb_size = 1024,
@@ -695,17 +696,17 @@ static const struct intel_device_info intel_geminilake_info = {
 	GEN9_FEATURES, \
 	PLATFORM(INTEL_KABYLAKE)
 
-static const struct intel_device_info intel_kabylake_gt1_info = {
+static const struct intel_device_info kbl_gt1_info = {
 	KBL_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_kabylake_gt2_info = {
+static const struct intel_device_info kbl_gt2_info = {
 	KBL_PLATFORM,
 	.gt = 2,
 };
 
-static const struct intel_device_info intel_kabylake_gt3_info = {
+static const struct intel_device_info kbl_gt3_info = {
 	KBL_PLATFORM,
 	.gt = 3,
 	.engine_mask =
@@ -716,17 +717,17 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
 	GEN9_FEATURES, \
 	PLATFORM(INTEL_COFFEELAKE)
 
-static const struct intel_device_info intel_coffeelake_gt1_info = {
+static const struct intel_device_info cfl_gt1_info = {
 	CFL_PLATFORM,
 	.gt = 1,
 };
 
-static const struct intel_device_info intel_coffeelake_gt2_info = {
+static const struct intel_device_info cfl_gt2_info = {
 	CFL_PLATFORM,
 	.gt = 2,
 };
 
-static const struct intel_device_info intel_coffeelake_gt3_info = {
+static const struct intel_device_info cfl_gt3_info = {
 	CFL_PLATFORM,
 	.gt = 3,
 	.engine_mask =
@@ -741,7 +742,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
 	.has_coherent_ggtt = false, \
 	GLK_COLORS
 
-static const struct intel_device_info intel_cannonlake_info = {
+static const struct intel_device_info cnl_info = {
 	GEN10_FEATURES,
 	PLATFORM(INTEL_CANNONLAKE),
 	.gt = 2,
@@ -776,14 +777,14 @@ static const struct intel_device_info intel_cannonlake_info = {
 	.has_logical_ring_elsq = 1, \
 	.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
 
-static const struct intel_device_info intel_icelake_11_info = {
+static const struct intel_device_info icl_info = {
 	GEN11_FEATURES,
 	PLATFORM(INTEL_ICELAKE),
 	.engine_mask =
 		BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
 };
 
-static const struct intel_device_info intel_elkhartlake_info = {
+static const struct intel_device_info ehl_info = {
 	GEN11_FEATURES,
 	PLATFORM(INTEL_ELKHARTLAKE),
 	.require_force_probe = 1,
@@ -814,7 +815,7 @@ static const struct intel_device_info intel_elkhartlake_info = {
 	.has_global_mocs = 1, \
 	.display.has_dsb = 1
 
-static const struct intel_device_info intel_tigerlake_12_info = {
+static const struct intel_device_info tgl_info = {
 	GEN12_FEATURES,
 	PLATFORM(INTEL_TIGERLAKE),
 	.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
@@ -839,68 +840,70 @@ static const struct intel_device_info intel_tigerlake_12_info = {
  * PCI ID matches, otherwise we'll use the wrong info struct above.
  */
 static const struct pci_device_id pciidlist[] = {
-	INTEL_I830_IDS(&intel_i830_info),
-	INTEL_I845G_IDS(&intel_i845g_info),
-	INTEL_I85X_IDS(&intel_i85x_info),
-	INTEL_I865G_IDS(&intel_i865g_info),
-	INTEL_I915G_IDS(&intel_i915g_info),
-	INTEL_I915GM_IDS(&intel_i915gm_info),
-	INTEL_I945G_IDS(&intel_i945g_info),
-	INTEL_I945GM_IDS(&intel_i945gm_info),
-	INTEL_I965G_IDS(&intel_i965g_info),
-	INTEL_G33_IDS(&intel_g33_info),
-	INTEL_I965GM_IDS(&intel_i965gm_info),
-	INTEL_GM45_IDS(&intel_gm45_info),
-	INTEL_G45_IDS(&intel_g45_info),
-	INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
-	INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
-	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
-	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
-	INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
-	INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info),
-	INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info),
-	INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info),
-	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
-	INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info),
-	INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info),
-	INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info),
-	INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info),
-	INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info),
-	INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info),
-	INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info),
-	INTEL_VLV_IDS(&intel_valleyview_info),
-	INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info),
-	INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info),
-	INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
-	INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info),
-	INTEL_CHV_IDS(&intel_cherryview_info),
-	INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info),
-	INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info),
-	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
-	INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info),
-	INTEL_BXT_IDS(&intel_broxton_info),
-	INTEL_GLK_IDS(&intel_geminilake_info),
-	INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info),
-	INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
-	INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
-	INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
-	INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
-	INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
-	INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
-	INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info),
-	INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
-	INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
-	INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
-	INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
-	INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
-	INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
-	INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
-	INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
-	INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
-	INTEL_CNL_IDS(&intel_cannonlake_info),
-	INTEL_ICL_11_IDS(&intel_icelake_11_info),
-	INTEL_EHL_IDS(&intel_elkhartlake_info),
-	INTEL_TGL_12_IDS(&intel_tigerlake_12_info),
+	INTEL_I830_IDS(&i830_info),
+	INTEL_I845G_IDS(&i845g_info),
+	INTEL_I85X_IDS(&i85x_info),
+	INTEL_I865G_IDS(&i865g_info),
+	INTEL_I915G_IDS(&i915g_info),
+	INTEL_I915GM_IDS(&i915gm_info),
+	INTEL_I945G_IDS(&i945g_info),
+	INTEL_I945GM_IDS(&i945gm_info),
+	INTEL_I965G_IDS(&i965g_info),
+	INTEL_G33_IDS(&g33_info),
+	INTEL_I965GM_IDS(&i965gm_info),
+	INTEL_GM45_IDS(&gm45_info),
+	INTEL_G45_IDS(&g45_info),
+	INTEL_PINEVIEW_G_IDS(&pnv_g_info),
+	INTEL_PINEVIEW_M_IDS(&pnv_m_info),
+	INTEL_IRONLAKE_D_IDS(&ilk_d_info),
+	INTEL_IRONLAKE_M_IDS(&ilk_m_info),
+	INTEL_SNB_D_GT1_IDS(&snb_d_gt1_info),
+	INTEL_SNB_D_GT2_IDS(&snb_d_gt2_info),
+	INTEL_SNB_M_GT1_IDS(&snb_m_gt1_info),
+	INTEL_SNB_M_GT2_IDS(&snb_m_gt2_info),
+	INTEL_IVB_Q_IDS(&ivb_q_info), /* must be first IVB */
+	INTEL_IVB_M_GT1_IDS(&ivb_m_gt1_info),
+	INTEL_IVB_M_GT2_IDS(&ivb_m_gt2_info),
+	INTEL_IVB_D_GT1_IDS(&ivb_d_gt1_info),
+	INTEL_IVB_D_GT2_IDS(&ivb_d_gt2_info),
+	INTEL_HSW_GT1_IDS(&hsw_gt1_info),
+	INTEL_HSW_GT2_IDS(&hsw_gt2_info),
+	INTEL_HSW_GT3_IDS(&hsw_gt3_info),
+	INTEL_VLV_IDS(&vlv_info),
+	INTEL_BDW_GT1_IDS(&bdw_gt1_info),
+	INTEL_BDW_GT2_IDS(&bdw_gt2_info),
+	INTEL_BDW_GT3_IDS(&bdw_gt3_info),
+	INTEL_BDW_RSVD_IDS(&bdw_rsvd_info),
+	INTEL_CHV_IDS(&chv_info),
+	INTEL_SKL_GT1_IDS(&skl_gt1_info),
+	INTEL_SKL_GT2_IDS(&skl_gt2_info),
+	INTEL_SKL_GT3_IDS(&skl_gt3_info),
+	INTEL_SKL_GT4_IDS(&skl_gt4_info),
+	INTEL_BXT_IDS(&bxt_info),
+	INTEL_GLK_IDS(&glk_info),
+	INTEL_KBL_GT1_IDS(&kbl_gt1_info),
+	INTEL_KBL_GT2_IDS(&kbl_gt2_info),
+	INTEL_KBL_GT3_IDS(&kbl_gt3_info),
+	INTEL_KBL_GT4_IDS(&kbl_gt3_info),
+	INTEL_AML_KBL_GT2_IDS(&kbl_gt2_info),
+	INTEL_CFL_S_GT1_IDS(&cfl_gt1_info),
+	INTEL_CFL_S_GT2_IDS(&cfl_gt2_info),
+	INTEL_CFL_H_GT1_IDS(&cfl_gt1_info),
+	INTEL_CFL_H_GT2_IDS(&cfl_gt2_info),
+	INTEL_CFL_U_GT2_IDS(&cfl_gt2_info),
+	INTEL_CFL_U_GT3_IDS(&cfl_gt3_info),
+	INTEL_WHL_U_GT1_IDS(&cfl_gt1_info),
+	INTEL_WHL_U_GT2_IDS(&cfl_gt2_info),
+	INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info),
+	INTEL_WHL_U_GT3_IDS(&cfl_gt3_info),
+	INTEL_CML_GT1_IDS(&cfl_gt1_info),
+	INTEL_CML_GT2_IDS(&cfl_gt2_info),
+	INTEL_CML_U_GT1_IDS(&cfl_gt1_info),
+	INTEL_CML_U_GT2_IDS(&cfl_gt2_info),
+	INTEL_CNL_IDS(&cnl_info),
+	INTEL_ICL_11_IDS(&icl_info),
+	INTEL_EHL_IDS(&ehl_info),
+	INTEL_TGL_12_IDS(&tgl_info),
 	{0, 0, 0}
 };
 MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -1003,6 +1006,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		return err > 0 ? -ENOTTY : err;
 	}
 
+	err = i915_perf_selftests(pdev);
+	if (err) {
+		i915_pci_remove(pdev);
+		return err > 0 ? -ENOTTY : err;
+	}
+
 	return 0;
 }
 
@@ -1045,7 +1054,12 @@ static int __init i915_init(void)
 		return 0;
 	}
 
-	return pci_register_driver(&i915_pci_driver);
+	err = pci_register_driver(&i915_pci_driver);
+	if (err)
+		return err;
+
+	i915_perf_sysctl_register();
+	return 0;
 }
 
 static void __exit i915_exit(void)
@@ -1053,6 +1067,7 @@ static void __exit i915_exit(void)
 	if (!i915_pci_driver.driver.owner)
 		return;
 
+	i915_perf_sysctl_unregister();
 	pci_unregister_driver(&i915_pci_driver);
 	i915_globals_exit();
 }
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2ae14bc..0f556d8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -387,6 +387,8 @@ struct i915_oa_config_bo {
 	struct i915_vma *vma;
 };
 
+static struct ctl_table_header *sysctl_header;
+
 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
 
 void i915_oa_config_release(struct kref *ref)
@@ -1777,6 +1779,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
 	*cs++ = MI_MATH_ADD;
 	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
 
+	*cs++ = MI_ARB_CHECK;
+
 	/*
 	 * Transfer the result into the predicate register to be used for the
 	 * predicated jump.
@@ -1966,7 +1970,9 @@ static int emit_oa_config(struct i915_perf_stream *stream,
 	if (err)
 		goto err_vma_put;
 
+	intel_engine_pm_get(ce->engine);
 	rq = i915_request_create(ce);
+	intel_engine_pm_put(ce->engine);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_vma_unpin;
@@ -2153,9 +2159,7 @@ static int gen8_modify_context(struct intel_context *ce,
 	struct i915_request *rq;
 	int err;
 
-	lockdep_assert_held(&ce->pin_mutex);
-
-	rq = i915_request_create(ce->engine->kernel_context);
+	rq = intel_engine_create_kernel_request(ce->engine);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -2197,17 +2201,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
 		if (ce->engine->class != RENDER_CLASS)
 			continue;
 
-		err = intel_context_lock_pinned(ce);
-		if (err)
-			break;
+		/* Otherwise OA settings will be set upon first use */
+		if (!intel_context_pin_if_active(ce))
+			continue;
 
 		flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+		err = gen8_modify_context(ce, flex, count);
 
-		/* Otherwise OA settings will be set upon first use */
-		if (intel_context_is_pinned(ce))
-			err = gen8_modify_context(ce, flex, count);
-
-		intel_context_unlock_pinned(ce);
+		intel_context_unpin(ce);
 		if (err)
 			break;
 	}
@@ -2317,9 +2318,6 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
 	 */
 	spin_lock(&i915->gem.contexts.lock);
 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
-		if (ctx == i915->kernel_context)
-			continue;
-
 		if (!kref_get_unless_zero(&ctx->ref))
 			continue;
 
@@ -2455,6 +2453,13 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
 	return emit_oa_config(stream, oa_config, oa_context(stream));
 }
 
+static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
+{
+	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
+			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
+			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+}
+
 static int gen12_enable_metric_set(struct i915_perf_stream *stream)
 {
 	struct intel_uncore *uncore = stream->uncore;
@@ -2468,12 +2473,10 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
 			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
 					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
 			   /*
-			    * If the user didn't require OA reports, instruct the
-			    * hardware not to emit ctx switch reports.
+			    * If the user didn't require OA reports, instruct
+			    * the hardware not to emit ctx switch reports.
 			    */
-			   !(stream->sample_flags & SAMPLE_OA_REPORT) ?
-			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS) :
-			   _MASKED_BIT_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS));
+			   oag_report_ctx_switches(stream));
 
 	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
 			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
@@ -3101,7 +3104,7 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
 		stream->ops->enable(stream);
 
 	if (stream->hold_preemption)
-		i915_gem_context_set_nopreempt(stream->ctx);
+		intel_context_set_nopreempt(stream->pinned_ctx);
 }
 
 /**
@@ -3127,7 +3130,7 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
 	stream->enabled = false;
 
 	if (stream->hold_preemption)
-		i915_gem_context_clear_nopreempt(stream->ctx);
+		intel_context_clear_nopreempt(stream->pinned_ctx);
 
 	if (stream->ops->disable)
 		stream->ops->disable(stream);
@@ -3981,7 +3984,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
 	struct i915_perf *perf = &to_i915(dev)->perf;
 	struct drm_i915_perf_oa_config *args = data;
 	struct i915_oa_config *oa_config, *tmp;
-	static struct i915_oa_reg *regs;
+	struct i915_oa_reg *regs;
 	int err, id;
 
 	if (!perf->i915) {
@@ -4219,7 +4222,7 @@ static struct ctl_table dev_root[] = {
 };
 
 /**
- * i915_perf_init - initialize i915-perf state on module load
+ * i915_perf_init - initialize i915-perf state on module bind
  * @i915: i915 device instance
  *
  * Initializes i915-perf state without exposing anything to userspace.
@@ -4336,7 +4339,6 @@ void i915_perf_init(struct drm_i915_private *i915)
 
 		oa_sample_rate_hard_limit = 1000 *
 			(RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
-		perf->sysctl_header = register_sysctl_table(dev_root);
 
 		mutex_init(&perf->metrics_lock);
 		idr_init(&perf->metrics_idr);
@@ -4372,6 +4374,16 @@ static int destroy_config(int id, void *p, void *data)
 	return 0;
 }
 
+void i915_perf_sysctl_register(void)
+{
+	sysctl_header = register_sysctl_table(dev_root);
+}
+
+void i915_perf_sysctl_unregister(void)
+{
+	unregister_sysctl_table(sysctl_header);
+}
+
 /**
  * i915_perf_fini - Counter part to i915_perf_init()
  * @i915: i915 device instance
@@ -4386,8 +4398,6 @@ void i915_perf_fini(struct drm_i915_private *i915)
 	idr_for_each(&perf->metrics_idr, destroy_config, perf);
 	idr_destroy(&perf->metrics_idr);
 
-	unregister_sysctl_table(perf->sysctl_header);
-
 	memset(&perf->ops, 0, sizeof(perf->ops));
 	perf->i915 = NULL;
 }
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
index 4ceebce..882fdd0 100644
--- a/drivers/gpu/drm/i915/i915_perf.h
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -23,6 +23,8 @@ void i915_perf_fini(struct drm_i915_private *i915);
 void i915_perf_register(struct drm_i915_private *i915);
 void i915_perf_unregister(struct drm_i915_private *i915);
 int i915_perf_ioctl_version(void);
+void i915_perf_sysctl_register(void);
+void i915_perf_sysctl_unregister(void);
 
 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 74ddc20..45e5814 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -380,7 +380,6 @@ struct i915_perf {
 	struct drm_i915_private *i915;
 
 	struct kobject *metrics_kobj;
-	struct ctl_table_header *sysctl_header;
 
 	/*
 	 * Lock associated with adding/modifying/removing OA configs
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d6d2e6f..28a82c8 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -259,6 +259,16 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
 	sample->cur += val;
 }
 
+static bool exclusive_mmio_access(const struct drm_i915_private *i915)
+{
+	/*
+	 * We have to avoid concurrent mmio cache line access on gen7 or
+	 * risk a machine hang. For a fun history lesson dig out the old
+	 * userspace intel_gpu_top and run it on Ivybridge or Haswell!
+	 */
+	return IS_GEN(i915, 7);
+}
+
 static void
 engines_sample(struct intel_gt *gt, unsigned int period_ns)
 {
@@ -269,8 +279,12 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
 	if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
 		return;
 
+	if (!intel_gt_pm_is_awake(gt))
+		return;
+
 	for_each_engine(engine, gt, id) {
 		struct intel_engine_pmu *pmu = &engine->pmu;
+		spinlock_t *mmio_lock;
 		unsigned long flags;
 		bool busy;
 		u32 val;
@@ -278,7 +292,12 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
 		if (!intel_engine_pm_get_if_awake(engine))
 			continue;
 
-		spin_lock_irqsave(&engine->uncore->lock, flags);
+		mmio_lock = NULL;
+		if (exclusive_mmio_access(i915))
+			mmio_lock = &engine->uncore->lock;
+
+		if (unlikely(mmio_lock))
+			spin_lock_irqsave(mmio_lock, flags);
 
 		val = ENGINE_READ_FW(engine, RING_CTL);
 		if (val == 0) /* powerwell off => engine idle */
@@ -309,7 +328,8 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
 			add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
 
 skip:
-		spin_unlock_irqrestore(&engine->uncore->lock, flags);
+		if (unlikely(mmio_lock))
+			spin_unlock_irqrestore(mmio_lock, flags);
 		intel_engine_pm_put_async(engine);
 	}
 }
@@ -320,6 +340,13 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
 	sample->cur += mul_u32_u32(val, mul);
 }
 
+static bool frequency_sampling_enabled(struct i915_pmu *pmu)
+{
+	return pmu->enable &
+	       (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
+		config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+}
+
 static void
 frequency_sample(struct intel_gt *gt, unsigned int period_ns)
 {
@@ -328,19 +355,33 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
 	struct i915_pmu *pmu = &i915->pmu;
 	struct intel_rps *rps = &gt->rps;
 
+	if (!frequency_sampling_enabled(pmu))
+		return;
+
+	/* Report 0/0 (actual/requested) frequency while parked. */
+	if (!intel_gt_pm_get_if_awake(gt))
+		return;
+
 	if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
 		u32 val;
 
-		val = rps->cur_freq;
-		if (intel_gt_pm_get_if_awake(gt)) {
-			val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
-			val = intel_get_cagf(rps, val);
-			intel_gt_pm_put_async(gt);
-		}
+		/*
+		 * We take a quick peek here without using forcewake
+		 * so that we don't perturb the system under observation
+		 * (forcewake => !rc6 => increased power use). We expect
+		 * that if the read fails because it is outside of the
+		 * mmio power well, then it will return 0 -- in which
+		 * case we assume the system is running at the intended
+		 * frequency. Fortunately, the read should rarely fail!
+		 */
+		val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1);
+		if (val)
+			val = intel_rps_get_cagf(rps, val);
+		else
+			val = rps->cur_freq;
 
 		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
-				intel_gpu_freq(rps, val),
-				period_ns / 1000);
+				intel_gpu_freq(rps, val), period_ns / 1000);
 	}
 
 	if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
@@ -348,6 +389,8 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
 				intel_gpu_freq(rps, rps->cur_freq),
 				period_ns / 1000);
 	}
+
+	intel_gt_pm_put_async(gt);
 }
 
 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 094011b..6cc55c1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2244,26 +2244,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 				    MG_DP_MODE_LN1_ACU_PORT1)
 #define   MG_DP_MODE_CFG_DP_X2_MODE			(1 << 7)
 #define   MG_DP_MODE_CFG_DP_X1_MODE			(1 << 6)
-#define   MG_DP_MODE_CFG_TR2PWR_GATING			(1 << 5)
-#define   MG_DP_MODE_CFG_TRPWR_GATING			(1 << 4)
-#define   MG_DP_MODE_CFG_CLNPWR_GATING			(1 << 3)
-#define   MG_DP_MODE_CFG_DIGPWR_GATING			(1 << 2)
-#define   MG_DP_MODE_CFG_GAONPWR_GATING			(1 << 1)
-
-#define MG_MISC_SUS0_PORT1				0x168814
-#define MG_MISC_SUS0_PORT2				0x169814
-#define MG_MISC_SUS0_PORT3				0x16A814
-#define MG_MISC_SUS0_PORT4				0x16B814
-#define MG_MISC_SUS0(tc_port) \
-	_MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2))
-#define   MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK	(3 << 14)
-#define   MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x)	((x) << 14)
-#define   MG_MISC_SUS0_CFG_TR2PWR_GATING		(1 << 12)
-#define   MG_MISC_SUS0_CFG_CL2PWR_GATING		(1 << 11)
-#define   MG_MISC_SUS0_CFG_GAONPWR_GATING		(1 << 10)
-#define   MG_MISC_SUS0_CFG_TRPWR_GATING			(1 << 7)
-#define   MG_MISC_SUS0_CFG_CL1PWR_GATING		(1 << 6)
-#define   MG_MISC_SUS0_CFG_DGPWR_GATING			(1 << 5)
 
 /* The spec defines this only for BXT PHY0, but lets assume that this
  * would exist for PHY1 too if it had a second channel.
@@ -5048,14 +5028,20 @@ enum {
 #define   BLM_PCH_POLARITY			(1 << 29)
 #define BLC_PWM_PCH_CTL2	_MMIO(0xc8254)
 
-#define UTIL_PIN_CTL		_MMIO(0x48400)
-#define   UTIL_PIN_ENABLE	(1 << 31)
-
-#define   UTIL_PIN_PIPE(x)     ((x) << 29)
-#define   UTIL_PIN_PIPE_MASK   (3 << 29)
-#define   UTIL_PIN_MODE_PWM    (1 << 24)
-#define   UTIL_PIN_MODE_MASK   (0xf << 24)
-#define   UTIL_PIN_POLARITY    (1 << 22)
+#define UTIL_PIN_CTL			_MMIO(0x48400)
+#define   UTIL_PIN_ENABLE		(1 << 31)
+#define   UTIL_PIN_PIPE_MASK		(3 << 29)
+#define   UTIL_PIN_PIPE(x)		((x) << 29)
+#define   UTIL_PIN_MODE_MASK		(0xf << 24)
+#define   UTIL_PIN_MODE_DATA		(0 << 24)
+#define   UTIL_PIN_MODE_PWM		(1 << 24)
+#define   UTIL_PIN_MODE_VBLANK		(4 << 24)
+#define   UTIL_PIN_MODE_VSYNC		(5 << 24)
+#define   UTIL_PIN_MODE_EYE_LEVEL	(8 << 24)
+#define   UTIL_PIN_OUTPUT_DATA		(1 << 23)
+#define   UTIL_PIN_POLARITY		(1 << 22)
+#define   UTIL_PIN_DIRECTION_INPUT	(1 << 19)
+#define   UTIL_PIN_INPUT_DATA		(1 << 16)
 
 /* BXT backlight register definition. */
 #define _BXT_BLC_PWM_CTL1			0xC8250
@@ -5757,7 +5743,8 @@ enum {
 #define   PIPECONF_DOUBLE_WIDE	(1 << 30)
 #define   I965_PIPECONF_ACTIVE	(1 << 30)
 #define   PIPECONF_DSI_PLL_LOCKED	(1 << 29) /* vlv & pipe A only */
-#define   PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
+#define   PIPECONF_FRAME_START_DELAY_MASK	(3 << 27) /* pre-hsw */
+#define   PIPECONF_FRAME_START_DELAY(x)		((x) << 27) /* pre-hsw: 0-3 */
 #define   PIPECONF_SINGLE_WIDE	0
 #define   PIPECONF_PIPE_UNLOCKED 0
 #define   PIPECONF_PIPE_LOCKED	(1 << 25)
@@ -6365,6 +6352,7 @@ enum {
 #define   DISPPLANE_RGBX101010			(0x8 << 26)
 #define   DISPPLANE_RGBA101010			(0x9 << 26)
 #define   DISPPLANE_BGRX101010			(0xa << 26)
+#define   DISPPLANE_BGRA101010			(0xb << 26)
 #define   DISPPLANE_RGBX161616			(0xc << 26)
 #define   DISPPLANE_RGBX888			(0xe << 26)
 #define   DISPPLANE_RGBA888			(0xf << 26)
@@ -6639,12 +6627,15 @@ enum {
 #define   SP_ENABLE			(1 << 31)
 #define   SP_GAMMA_ENABLE		(1 << 30)
 #define   SP_PIXFORMAT_MASK		(0xf << 26)
-#define   SP_FORMAT_YUV422		(0 << 26)
-#define   SP_FORMAT_BGR565		(5 << 26)
-#define   SP_FORMAT_BGRX8888		(6 << 26)
-#define   SP_FORMAT_BGRA8888		(7 << 26)
-#define   SP_FORMAT_RGBX1010102		(8 << 26)
-#define   SP_FORMAT_RGBA1010102		(9 << 26)
+#define   SP_FORMAT_YUV422		(0x0 << 26)
+#define   SP_FORMAT_8BPP		(0x2 << 26)
+#define   SP_FORMAT_BGR565		(0x5 << 26)
+#define   SP_FORMAT_BGRX8888		(0x6 << 26)
+#define   SP_FORMAT_BGRA8888		(0x7 << 26)
+#define   SP_FORMAT_RGBX1010102		(0x8 << 26)
+#define   SP_FORMAT_RGBA1010102		(0x9 << 26)
+#define   SP_FORMAT_BGRX1010102		(0xa << 26) /* CHV pipe B */
+#define   SP_FORMAT_BGRA1010102		(0xb << 26) /* CHV pipe B */
 #define   SP_FORMAT_RGBX8888		(0xe << 26)
 #define   SP_FORMAT_RGBA8888		(0xf << 26)
 #define   SP_ALPHA_PREMULTIPLY		(1 << 23) /* CHV pipe B */
@@ -6795,6 +6786,7 @@ enum {
 #define   PLANE_CTL_YUV422_VYUY			(3 << 16)
 #define   PLANE_CTL_RENDER_DECOMPRESSION_ENABLE	(1 << 15)
 #define   PLANE_CTL_TRICKLE_FEED_DISABLE	(1 << 14)
+#define   PLANE_CTL_CLEAR_COLOR_DISABLE		(1 << 13) /* TGL+ */
 #define   PLANE_CTL_PLANE_GAMMA_DISABLE		(1 << 13) /* Pre-GLK */
 #define   PLANE_CTL_TILED_MASK			(0x7 << 10)
 #define   PLANE_CTL_TILED_LINEAR		(0 << 10)
@@ -6802,6 +6794,7 @@ enum {
 #define   PLANE_CTL_TILED_Y			(4 << 10)
 #define   PLANE_CTL_TILED_YF			(5 << 10)
 #define   PLANE_CTL_FLIP_HORIZONTAL		(1 << 8)
+#define   PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE	(1 << 4) /* TGL+ */
 #define   PLANE_CTL_ALPHA_MASK			(0x3 << 4) /* Pre-GLK */
 #define   PLANE_CTL_ALPHA_DISABLE		(0 << 4)
 #define   PLANE_CTL_ALPHA_SW_PREMULTIPLY	(2 << 4)
@@ -7512,11 +7505,15 @@ enum {
 #define GEN8_DE_PORT_IMR _MMIO(0x44444)
 #define GEN8_DE_PORT_IIR _MMIO(0x44448)
 #define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define  DSI1_NON_TE			(1 << 31)
+#define  DSI0_NON_TE			(1 << 30)
 #define  ICL_AUX_CHANNEL_E		(1 << 29)
 #define  CNL_AUX_CHANNEL_F		(1 << 28)
 #define  GEN9_AUX_CHANNEL_D		(1 << 27)
 #define  GEN9_AUX_CHANNEL_C		(1 << 26)
 #define  GEN9_AUX_CHANNEL_B		(1 << 25)
+#define  DSI1_TE			(1 << 24)
+#define  DSI0_TE			(1 << 23)
 #define  BXT_DE_PORT_HP_DDIC		(1 << 5)
 #define  BXT_DE_PORT_HP_DDIB		(1 << 4)
 #define  BXT_DE_PORT_HP_DDIA		(1 << 3)
@@ -7731,6 +7728,8 @@ enum {
 					    [TRANSCODER_B] = _CHICKEN_TRANS_B, \
 					    [TRANSCODER_C] = _CHICKEN_TRANS_C, \
 					    [TRANSCODER_D] = _CHICKEN_TRANS_D))
+#define  HSW_FRAME_START_DELAY_MASK	(3 << 27)
+#define  HSW_FRAME_START_DELAY(x)	((x) << 27) /* 0-3 */
 #define  VSC_DATA_SEL_SOFTWARE_CONTROL	(1 << 25) /* GLK and CNL+ */
 #define  DDI_TRAINING_OVERRIDE_ENABLE	(1 << 19)
 #define  DDI_TRAINING_OVERRIDE_VALUE	(1 << 18)
@@ -7754,6 +7753,14 @@ enum {
 #define GEN7_MSG_CTL	_MMIO(0x45010)
 #define  WAIT_FOR_PCH_RESET_ACK		(1 << 1)
 #define  WAIT_FOR_PCH_FLR_ACK		(1 << 0)
+
+#define BW_BUDDY1_CTL			_MMIO(0x45140)
+#define BW_BUDDY2_CTL			_MMIO(0x45150)
+#define   BW_BUDDY_DISABLE		REG_BIT(31)
+
+#define BW_BUDDY1_PAGE_MASK		_MMIO(0x45144)
+#define BW_BUDDY2_PAGE_MASK		_MMIO(0x45154)
+
 #define HSW_NDE_RSTWRN_OPT	_MMIO(0x46408)
 #define  RESET_PCH_HANDSHAKE_ENABLE	(1 << 4)
 
@@ -7911,6 +7918,10 @@ enum {
 #define   PIXEL_ROUNDING_TRUNC_FB_PASSTHRU 	(1 << 15)
 #define   PER_PIXEL_ALPHA_BYPASS_EN		(1 << 7)
 
+#define FF_MODE2			_MMIO(0x6604)
+#define   FF_MODE2_TDS_TIMER_MASK	REG_GENMASK(23, 16)
+#define   FF_MODE2_TDS_TIMER_128	REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
+
 /* PCH */
 
 #define PCH_DISPLAY_BASE	0xc0000u
@@ -8099,6 +8110,10 @@ enum {
 
 #define SHOTPLUG_CTL_TC				_MMIO(0xc4034)
 #define   ICP_TC_HPD_ENABLE(tc_port)		(8 << (tc_port) * 4)
+
+#define SHPD_FILTER_CNT				_MMIO(0xc4038)
+#define   SHPD_FILTER_CNT_500_ADJ		0x001D9
+
 /* Icelake DSC Rate Control Range Parameter Registers */
 #define DSCA_RC_RANGE_PARAMETERS_0		_MMIO(0x6B240)
 #define DSCA_RC_RANGE_PARAMETERS_0_UDW		_MMIO(0x6B240 + 4)
@@ -8449,10 +8464,8 @@ enum {
 #define  TRANS_STATE_MASK       (1 << 30)
 #define  TRANS_STATE_DISABLE    (0 << 30)
 #define  TRANS_STATE_ENABLE     (1 << 30)
-#define  TRANS_FSYNC_DELAY_HB1  (0 << 27)
-#define  TRANS_FSYNC_DELAY_HB2  (1 << 27)
-#define  TRANS_FSYNC_DELAY_HB3  (2 << 27)
-#define  TRANS_FSYNC_DELAY_HB4  (3 << 27)
+#define  TRANS_FRAME_START_DELAY_MASK	(3 << 27) /* ibx */
+#define  TRANS_FRAME_START_DELAY(x)	((x) << 27) /* ibx: 0-3 */
 #define  TRANS_INTERLACE_MASK   (7 << 21)
 #define  TRANS_PROGRESSIVE      (0 << 21)
 #define  TRANS_INTERLACED       (3 << 21)
@@ -8473,6 +8486,7 @@ enum {
 #define  TRANS_CHICKEN2_TIMING_OVERRIDE			(1 << 31)
 #define  TRANS_CHICKEN2_FDI_POLARITY_REVERSED		(1 << 29)
 #define  TRANS_CHICKEN2_FRAME_START_DELAY_MASK		(3 << 27)
+#define  TRANS_CHICKEN2_FRAME_START_DELAY(x)		((x) << 27) /* 0-3 */
 #define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER	(1 << 26)
 #define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH	(1 << 25)
 
@@ -9675,6 +9689,7 @@ enum skl_power_gate {
 #define  TRANS_DDI_EDP_INPUT_A_ONOFF	(4 << 12)
 #define  TRANS_DDI_EDP_INPUT_B_ONOFF	(5 << 12)
 #define  TRANS_DDI_EDP_INPUT_C_ONOFF	(6 << 12)
+#define  TRANS_DDI_EDP_INPUT_D_ONOFF	(7 << 12)
 #define  TRANS_DDI_MST_TRANSPORT_SELECT_MASK	REG_GENMASK(11, 10)
 #define  TRANS_DDI_MST_TRANSPORT_SELECT(trans)	\
 	REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
@@ -10780,6 +10795,57 @@ enum skl_power_gate {
 #define  ICL_ESC_CLK_DIV_SHIFT			0
 #define DSI_MAX_ESC_CLK			20000		/* in KHz */
 
+#define _DSI_CMD_FRMCTL_0		0x6b034
+#define _DSI_CMD_FRMCTL_1		0x6b834
+#define DSI_CMD_FRMCTL(port)		_MMIO_PORT(port,	\
+						   _DSI_CMD_FRMCTL_0,\
+						   _DSI_CMD_FRMCTL_1)
+#define   DSI_FRAME_UPDATE_REQUEST		(1 << 31)
+#define   DSI_PERIODIC_FRAME_UPDATE_ENABLE	(1 << 29)
+#define   DSI_NULL_PACKET_ENABLE		(1 << 28)
+#define   DSI_FRAME_IN_PROGRESS			(1 << 0)
+
+#define _DSI_INTR_MASK_REG_0		0x6b070
+#define _DSI_INTR_MASK_REG_1		0x6b870
+#define DSI_INTR_MASK_REG(port)		_MMIO_PORT(port,	\
+						   _DSI_INTR_MASK_REG_0,\
+						   _DSI_INTR_MASK_REG_1)
+
+#define _DSI_INTR_IDENT_REG_0		0x6b074
+#define _DSI_INTR_IDENT_REG_1		0x6b874
+#define DSI_INTR_IDENT_REG(port)	_MMIO_PORT(port,	\
+						   _DSI_INTR_IDENT_REG_0,\
+						   _DSI_INTR_IDENT_REG_1)
+#define   DSI_TE_EVENT				(1 << 31)
+#define   DSI_RX_DATA_OR_BTA_TERMINATED		(1 << 30)
+#define   DSI_TX_DATA				(1 << 29)
+#define   DSI_ULPS_ENTRY_DONE			(1 << 28)
+#define   DSI_NON_TE_TRIGGER_RECEIVED		(1 << 27)
+#define   DSI_HOST_CHKSUM_ERROR			(1 << 26)
+#define   DSI_HOST_MULTI_ECC_ERROR		(1 << 25)
+#define   DSI_HOST_SINGL_ECC_ERROR		(1 << 24)
+#define   DSI_HOST_CONTENTION_DETECTED		(1 << 23)
+#define   DSI_HOST_FALSE_CONTROL_ERROR		(1 << 22)
+#define   DSI_HOST_TIMEOUT_ERROR		(1 << 21)
+#define   DSI_HOST_LOW_POWER_TX_SYNC_ERROR	(1 << 20)
+#define   DSI_HOST_ESCAPE_MODE_ENTRY_ERROR	(1 << 19)
+#define   DSI_FRAME_UPDATE_DONE			(1 << 16)
+#define   DSI_PROTOCOL_VIOLATION_REPORTED	(1 << 15)
+#define   DSI_INVALID_TX_LENGTH			(1 << 13)
+#define   DSI_INVALID_VC			(1 << 12)
+#define   DSI_INVALID_DATA_TYPE			(1 << 11)
+#define   DSI_PERIPHERAL_CHKSUM_ERROR		(1 << 10)
+#define   DSI_PERIPHERAL_MULTI_ECC_ERROR	(1 << 9)
+#define   DSI_PERIPHERAL_SINGLE_ECC_ERROR	(1 << 8)
+#define   DSI_PERIPHERAL_CONTENTION_DETECTED	(1 << 7)
+#define   DSI_PERIPHERAL_FALSE_CTRL_ERROR	(1 << 6)
+#define   DSI_PERIPHERAL_TIMEOUT_ERROR		(1 << 5)
+#define   DSI_PERIPHERAL_LP_TX_SYNC_ERROR	(1 << 4)
+#define   DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR	(1 << 3)
+#define   DSI_EOT_SYNC_ERROR			(1 << 2)
+#define   DSI_SOT_SYNC_ERROR			(1 << 1)
+#define   DSI_SOT_ERROR				(1 << 0)
+
 /* Gen4+ Timestamp and Pipe Frame time stamp registers */
 #define GEN4_TIMESTAMP		_MMIO(0x2358)
 #define ILK_TIMESTAMP_HI	_MMIO(0x70070)
@@ -11384,6 +11450,7 @@ enum skl_power_gate {
 #define  CMD_MODE_TE_GATE		(0x1 << 28)
 #define  VIDEO_MODE_SYNC_EVENT		(0x2 << 28)
 #define  VIDEO_MODE_SYNC_PULSE		(0x3 << 28)
+#define  TE_SOURCE_GPIO			(1 << 27)
 #define  LINK_READY			(1 << 20)
 #define  PIX_FMT_MASK			(0x3 << 16)
 #define  PIX_FMT_SHIFT			16
@@ -11636,13 +11703,18 @@ enum skl_power_gate {
 /* MOCS (Memory Object Control State) registers */
 #define GEN9_LNCFCMOCS(i)	_MMIO(0xb020 + (i) * 4)	/* L3 Cache Control */
 
-#define GEN9_GFX_MOCS(i)	_MMIO(0xc800 + (i) * 4)	/* Graphics MOCS registers */
-#define GEN9_MFX0_MOCS(i)	_MMIO(0xc900 + (i) * 4)	/* Media 0 MOCS registers */
-#define GEN9_MFX1_MOCS(i)	_MMIO(0xca00 + (i) * 4)	/* Media 1 MOCS registers */
-#define GEN9_VEBOX_MOCS(i)	_MMIO(0xcb00 + (i) * 4)	/* Video MOCS registers */
-#define GEN9_BLT_MOCS(i)	_MMIO(0xcc00 + (i) * 4)	/* Blitter MOCS registers */
-/* Media decoder 2 MOCS registers */
-#define GEN11_MFX2_MOCS(i)	_MMIO(0x10000 + (i) * 4)
+#define __GEN9_RCS0_MOCS0	0xc800
+#define GEN9_GFX_MOCS(i)	_MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS0_MOCS0	0xc900
+#define GEN9_MFX0_MOCS(i)	_MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS1_MOCS0	0xca00
+#define GEN9_MFX1_MOCS(i)	_MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
+#define __GEN9_VECS0_MOCS0	0xcb00
+#define GEN9_VEBOX_MOCS(i)	_MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
+#define __GEN9_BCS0_MOCS0	0xcc00
+#define GEN9_BLT_MOCS(i)	_MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
+#define __GEN11_VCS2_MOCS0	0x10000
+#define GEN11_MFX2_MOCS(i)	_MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
 
 #define GEN10_SCRATCH_LNCF2		_MMIO(0xb0a0)
 #define   PMFLUSHDONE_LNICRSDROP	(1 << 20)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 765bec8..be185886 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -57,11 +57,13 @@ static struct i915_global_request {
 
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
-	return "i915";
+	return dev_name(to_request(fence)->i915->drm.dev);
 }
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
+	const struct i915_gem_context *ctx;
+
 	/*
 	 * The timeline struct (as part of the ppgtt underneath a context)
 	 * may be freed when the request is no longer in use by the GPU.
@@ -74,7 +76,11 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return "signaled";
 
-	return to_request(fence)->gem_context->name ?: "[i915]";
+	ctx = i915_request_gem_context(to_request(fence));
+	if (!ctx)
+		return "[" DRIVER_NAME "]";
+
+	return ctx->name;
 }
 
 static bool i915_fence_signaled(struct dma_fence *fence)
@@ -188,7 +194,7 @@ static void free_capture_list(struct i915_request *request)
 {
 	struct i915_capture_list *capture;
 
-	capture = request->capture_list;
+	capture = fetch_and_zero(&request->capture_list);
 	while (capture) {
 		struct i915_capture_list *next = capture->next;
 
@@ -214,7 +220,7 @@ static void remove_from_engine(struct i915_request *rq)
 		spin_lock(&engine->active.lock);
 		locked = engine;
 	}
-	list_del(&rq->sched.link);
+	list_del_init(&rq->sched.link);
 	spin_unlock_irq(&locked->active.lock);
 }
 
@@ -223,10 +229,7 @@ bool i915_request_retire(struct i915_request *rq)
 	if (!i915_request_completed(rq))
 		return false;
 
-	GEM_TRACE("%s fence %llx:%lld, current %d\n",
-		  rq->engine->name,
-		  rq->fence.context, rq->fence.seqno,
-		  hwsp_seqno(rq));
+	RQ_TRACE(rq, "\n");
 
 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 	trace_i915_request_retire(rq);
@@ -272,8 +275,8 @@ bool i915_request_retire(struct i915_request *rq)
 	remove_from_client(rq);
 	list_del(&rq->link);
 
-	intel_context_exit(rq->hw_context);
-	intel_context_unpin(rq->hw_context);
+	intel_context_exit(rq->context);
+	intel_context_unpin(rq->context);
 
 	free_capture_list(rq);
 	i915_sched_node_fini(&rq->sched);
@@ -287,10 +290,7 @@ void i915_request_retire_upto(struct i915_request *rq)
 	struct intel_timeline * const tl = i915_request_timeline(rq);
 	struct i915_request *tmp;
 
-	GEM_TRACE("%s fence %llx:%lld, current %d\n",
-		  rq->engine->name,
-		  rq->fence.context, rq->fence.seqno,
-		  hwsp_seqno(rq));
+	RQ_TRACE(rq, "\n");
 
 	GEM_BUG_ON(!i915_request_completed(rq));
 
@@ -351,10 +351,7 @@ bool __i915_request_submit(struct i915_request *request)
 	struct intel_engine_cs *engine = request->engine;
 	bool result = false;
 
-	GEM_TRACE("%s fence %llx:%lld, current %d\n",
-		  engine->name,
-		  request->fence.context, request->fence.seqno,
-		  hwsp_seqno(request));
+	RQ_TRACE(request, "\n");
 
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->active.lock);
@@ -378,7 +375,7 @@ bool __i915_request_submit(struct i915_request *request)
 	if (i915_request_completed(request))
 		goto xfer;
 
-	if (i915_gem_context_is_banned(request->gem_context))
+	if (intel_context_is_banned(request->context))
 		i915_request_skip(request, -EIO);
 
 	/*
@@ -417,7 +414,7 @@ bool __i915_request_submit(struct i915_request *request)
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
 	    !i915_request_enable_breadcrumb(request))
-		intel_engine_queue_breadcrumbs(engine);
+		intel_engine_signal_breadcrumbs(engine);
 
 	__notify_execute_cb(request);
 
@@ -443,10 +440,7 @@ void __i915_request_unsubmit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 
-	GEM_TRACE("%s fence %llx:%lld, current %d\n",
-		  engine->name,
-		  request->fence.context, request->fence.seqno,
-		  hwsp_seqno(request));
+	RQ_TRACE(request, "\n");
 
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->active.lock);
@@ -588,6 +582,21 @@ request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
 	return kmem_cache_alloc(global.slab_requests, gfp);
 }
 
+static void __i915_request_ctor(void *arg)
+{
+	struct i915_request *rq = arg;
+
+	spin_lock_init(&rq->lock);
+	i915_sched_node_init(&rq->sched);
+	i915_sw_fence_init(&rq->submit, submit_notify);
+	i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+
+	rq->file_priv = NULL;
+	rq->capture_list = NULL;
+
+	INIT_LIST_HEAD(&rq->execute_cb);
+}
+
 struct i915_request *
 __i915_request_create(struct intel_context *ce, gfp_t gfp)
 {
@@ -645,35 +654,31 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 		goto err_free;
 
 	rq->i915 = ce->engine->i915;
-	rq->hw_context = ce;
-	rq->gem_context = ce->gem_context;
+	rq->context = ce;
 	rq->engine = ce->engine;
 	rq->ring = ce->ring;
 	rq->execution_mask = ce->engine->mask;
 
-	rcu_assign_pointer(rq->timeline, tl);
+	RCU_INIT_POINTER(rq->timeline, tl);
+	RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
 	rq->hwsp_seqno = tl->hwsp_seqno;
-	rq->hwsp_cacheline = tl->hwsp_cacheline;
 
 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 
-	spin_lock_init(&rq->lock);
 	dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
 		       tl->fence_context, seqno);
 
 	/* We bump the ref for the fence chain */
-	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
-	i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
+	i915_sw_fence_reinit(&i915_request_get(rq)->submit);
+	i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
 
-	i915_sched_node_init(&rq->sched);
+	i915_sched_node_reinit(&rq->sched);
 
-	/* No zalloc, must clear what we need by hand */
-	rq->file_priv = NULL;
+	/* No zalloc, everything must be cleared after use */
 	rq->batch = NULL;
-	rq->capture_list = NULL;
-	rq->flags = 0;
-
-	INIT_LIST_HEAD(&rq->execute_cb);
+	GEM_BUG_ON(rq->file_priv);
+	GEM_BUG_ON(rq->capture_list);
+	GEM_BUG_ON(!list_empty(&rq->execute_cb));
 
 	/*
 	 * Reserve space in the ring buffer for all the commands required to
@@ -755,34 +760,37 @@ i915_request_create(struct intel_context *ce)
 static int
 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
 {
-	struct intel_timeline *tl;
 	struct dma_fence *fence;
 	int err;
 
 	GEM_BUG_ON(i915_request_timeline(rq) ==
 		   rcu_access_pointer(signal->timeline));
 
+	fence = NULL;
 	rcu_read_lock();
-	tl = rcu_dereference(signal->timeline);
-	if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref))
-		tl = NULL;
-	rcu_read_unlock();
-	if (!tl) /* already started or maybe even completed */
-		return 0;
+	spin_lock_irq(&signal->lock);
+	if (!i915_request_started(signal) &&
+	    !list_is_first(&signal->link,
+			   &rcu_dereference(signal->timeline)->requests)) {
+		struct i915_request *prev = list_prev_entry(signal, link);
 
-	fence = ERR_PTR(-EBUSY);
-	if (mutex_trylock(&tl->mutex)) {
-		fence = NULL;
-		if (!i915_request_started(signal) &&
-		    !list_is_first(&signal->link, &tl->requests)) {
-			signal = list_prev_entry(signal, link);
-			fence = dma_fence_get(&signal->fence);
+		/*
+		 * Peek at the request before us in the timeline. That
+		 * request will only be valid before it is retired, so
+		 * after acquiring a reference to it, confirm that it is
+		 * still part of the signaler's timeline.
+		 */
+		if (i915_request_get_rcu(prev)) {
+			if (list_next_entry(prev, link) == signal)
+				fence = &prev->fence;
+			else
+				i915_request_put(prev);
 		}
-		mutex_unlock(&tl->mutex);
 	}
-	intel_timeline_put(tl);
-	if (IS_ERR_OR_NULL(fence))
-		return PTR_ERR_OR_ZERO(fence);
+	spin_unlock_irq(&signal->lock);
+	rcu_read_unlock();
+	if (!fence)
+		return 0;
 
 	err = 0;
 	if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
@@ -908,18 +916,16 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 			return ret;
 	}
 
-	if (to->engine == from->engine) {
+	if (to->engine == from->engine)
 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
 						       &from->submit,
 						       I915_FENCE_GFP);
-	} else if (intel_engine_has_semaphores(to->engine) &&
-		   to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+	else if (intel_context_use_semaphores(to->context))
 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
-	} else {
+	else
 		ret = i915_sw_fence_await_dma_fence(&to->submit,
 						    &from->fence, 0,
 						    I915_FENCE_GFP);
-	}
 	if (ret < 0)
 		return ret;
 
@@ -959,8 +965,10 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 
 	do {
 		fence = *child++;
-		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+			i915_sw_fence_set_error_once(&rq->submit, fence->error);
 			continue;
+		}
 
 		/*
 		 * Requests on the same timeline are explicitly ordered, along
@@ -1067,8 +1075,10 @@ i915_request_await_execution(struct i915_request *rq,
 
 	do {
 		fence = *child++;
-		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+			i915_sw_fence_set_error_once(&rq->submit, fence->error);
 			continue;
+		}
 
 		/*
 		 * We don't squash repeated fence dependencies here as we
@@ -1244,8 +1254,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
 	struct intel_ring *ring = rq->ring;
 	u32 *cs;
 
-	GEM_TRACE("%s fence %llx:%lld\n",
-		  engine->name, rq->fence.context, rq->fence.seqno);
+	RQ_TRACE(rq, "\n");
 
 	/*
 	 * To ensure that this call will not fail, space for its emissions
@@ -1291,8 +1300,8 @@ void __i915_request_queue(struct i915_request *rq,
 
 void i915_request_add(struct i915_request *rq)
 {
-	struct i915_sched_attr attr = rq->gem_context->sched;
 	struct intel_timeline * const tl = i915_request_timeline(rq);
+	struct i915_sched_attr attr = {};
 	struct i915_request *prev;
 
 	lockdep_assert_held(&tl->mutex);
@@ -1302,6 +1311,9 @@ void i915_request_add(struct i915_request *rq)
 
 	prev = __i915_request_commit(rq);
 
+	if (rcu_access_pointer(rq->context->gem_context))
+		attr = i915_request_gem_context(rq)->sched;
+
 	/*
 	 * Boost actual workloads past semaphores!
 	 *
@@ -1597,10 +1609,14 @@ static struct i915_global_request global = { {
 
 int __init i915_global_request_init(void)
 {
-	global.slab_requests = KMEM_CACHE(i915_request,
-					  SLAB_HWCACHE_ALIGN |
-					  SLAB_RECLAIM_ACCOUNT |
-					  SLAB_TYPESAFE_BY_RCU);
+	global.slab_requests =
+		kmem_cache_create("i915_request",
+				  sizeof(struct i915_request),
+				  __alignof__(struct i915_request),
+				  SLAB_HWCACHE_ALIGN |
+				  SLAB_RECLAIM_ACCOUNT |
+				  SLAB_TYPESAFE_BY_RCU,
+				  __i915_request_ctor);
 	if (!global.slab_requests)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 96991d6..0314336 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,8 +28,10 @@
 #include <linux/dma-fence.h>
 #include <linux/lockdep.h>
 
+#include "gem/i915_gem_context_types.h"
 #include "gt/intel_context_types.h"
 #include "gt/intel_engine_types.h"
+#include "gt/intel_timeline_types.h"
 
 #include "i915_gem.h"
 #include "i915_scheduler.h"
@@ -41,14 +43,19 @@
 struct drm_file;
 struct drm_i915_gem_object;
 struct i915_request;
-struct intel_timeline;
-struct intel_timeline_cacheline;
 
 struct i915_capture_list {
 	struct i915_capture_list *next;
 	struct i915_vma *vma;
 };
 
+#define RQ_TRACE(rq, fmt, ...) do {					\
+	const struct i915_request *rq__ = (rq);				\
+	ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,	\
+		     rq__->fence.context, rq__->fence.seqno,		\
+		     hwsp_seqno(rq__), ##__VA_ARGS__);			\
+} while (0)
+
 enum {
 	/*
 	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
@@ -70,6 +77,38 @@ enum {
 	 * a request is on the various signal_list.
 	 */
 	I915_FENCE_FLAG_SIGNAL,
+
+	/*
+	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
+	 *
+	 * The execution of some requests should not be interrupted. This is
+	 * a sensitive operation as it makes the request super important,
+	 * blocking other higher priority work. Abuse of this flag will
+	 * lead to quality of service issues.
+	 */
+	I915_FENCE_FLAG_NOPREEMPT,
+
+	/*
+	 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
+	 *
+	 * A high priority sentinel request may be submitted to clear the
+	 * submission queue. As it will be the only request in-flight, upon
+	 * execution all other active requests will have been preempted and
+	 * unsubmitted. This preemptive pulse is used to re-evaluate the
+	 * in-flight requests, particularly in cases where an active context
+	 * is banned and those active requests need to be cancelled.
+	 */
+	I915_FENCE_FLAG_SENTINEL,
+
+	/*
+	 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
+	 *
+	 * Some requests are more important than others! In particular, a
+	 * request that the user is waiting on is typically required for
+	 * interactive latency, for which we want to minimise by upclocking
+	 * the GPU. Here we track such boost requests on a per-request basis.
+	 */
+	I915_FENCE_FLAG_BOOST,
 };
 
 /**
@@ -109,9 +148,8 @@ struct i915_request {
 	 * i915_request_free() will then decrement the refcount on the
 	 * context.
 	 */
-	struct i915_gem_context *gem_context;
 	struct intel_engine_cs *engine;
-	struct intel_context *hw_context;
+	struct intel_context *context;
 	struct intel_ring *ring;
 	struct intel_timeline __rcu *timeline;
 	struct list_head signal_link;
@@ -144,6 +182,10 @@ struct i915_request {
 	union {
 		wait_queue_entry_t submitq;
 		struct i915_sw_dma_fence_cb dmaq;
+		struct i915_request_duration_cb {
+			struct dma_fence_cb cb;
+			ktime_t emitted;
+		} duration;
 	};
 	struct list_head execute_cb;
 	struct i915_sw_fence semaphore;
@@ -176,7 +218,7 @@ struct i915_request {
 	 * inside the timeline's HWSP vma, but it is only valid while this
 	 * request has not completed and guarded by the timeline mutex.
 	 */
-	struct intel_timeline_cacheline *hwsp_cacheline;
+	struct intel_timeline_cacheline __rcu *hwsp_cacheline;
 
 	/** Position in the ring of the start of the request */
 	u32 head;
@@ -215,11 +257,6 @@ struct i915_request {
 	/** Time at which this request was emitted, in jiffies. */
 	unsigned long emitted_jiffies;
 
-	unsigned long flags;
-#define I915_REQUEST_WAITBOOST	BIT(0)
-#define I915_REQUEST_NOPREEMPT	BIT(1)
-#define I915_REQUEST_SENTINEL	BIT(2)
-
 	/** timeline->request entry for this request */
 	struct list_head link;
 
@@ -432,18 +469,18 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
 
 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
 {
-	return rq->flags & I915_REQUEST_WAITBOOST;
+	return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
 }
 
 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
 {
 	/* Preemption should only be disabled very rarely */
-	return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
+	return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
 }
 
 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
 {
-	return unlikely(rq->flags & I915_REQUEST_SENTINEL);
+	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
 }
 
 static inline struct intel_timeline *
@@ -454,6 +491,13 @@ i915_request_timeline(struct i915_request *rq)
 					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
 }
 
+static inline struct i915_gem_context *
+i915_request_gem_context(struct i915_request *rq)
+{
+	/* Valid only while the request is being constructed (or retired). */
+	return rcu_dereference_protected(rq->context->gem_context, true);
+}
+
 static inline struct intel_timeline *
 i915_request_active_timeline(struct i915_request *rq)
 {
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 247a967..bf87c70b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -213,7 +213,7 @@ static void kick_submission(struct intel_engine_cs *engine,
 	 * If we are already the currently executing context, don't
 	 * bother evaluating if we should preempt ourselves.
 	 */
-	if (inflight->hw_context == rq->hw_context)
+	if (inflight->context == rq->context)
 		goto unlock;
 
 	engine->execlists.queue_priority_hint = prio;
@@ -387,9 +387,19 @@ void i915_sched_node_init(struct i915_sched_node *node)
 	INIT_LIST_HEAD(&node->signalers_list);
 	INIT_LIST_HEAD(&node->waiters_list);
 	INIT_LIST_HEAD(&node->link);
+
+	i915_sched_node_reinit(node);
+}
+
+void i915_sched_node_reinit(struct i915_sched_node *node)
+{
 	node->attr.priority = I915_PRIORITY_INVALID;
 	node->semaphores = 0;
 	node->flags = 0;
+
+	GEM_BUG_ON(!list_empty(&node->signalers_list));
+	GEM_BUG_ON(!list_empty(&node->waiters_list));
+	GEM_BUG_ON(!list_empty(&node->link));
 }
 
 static struct i915_dependency *
@@ -480,6 +490,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
 			i915_dependency_free(dep);
 	}
+	INIT_LIST_HEAD(&node->signalers_list);
 
 	/* Remove ourselves from everyone who depends upon us */
 	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
@@ -490,6 +501,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
 			i915_dependency_free(dep);
 	}
+	INIT_LIST_HEAD(&node->waiters_list);
 
 	spin_unlock_irq(&schedule_lock);
 }
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 07d243ac..d1dc4ef 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -26,6 +26,7 @@
 					 sched.link)
 
 void i915_sched_node_init(struct i915_sched_node *node);
+void i915_sched_node_reinit(struct i915_sched_node *node);
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 				      struct i915_sched_node *signal,
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 4d88205..98bcb6f 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -36,6 +36,7 @@ struct i915_selftest {
 	char *filter;
 	int mock;
 	int live;
+	int perf;
 };
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -45,6 +46,7 @@ extern struct i915_selftest i915_selftest;
 
 int i915_mock_selftests(void);
 int i915_live_selftests(struct pci_dev *pdev);
+int i915_perf_selftests(struct pci_dev *pdev);
 
 /* We extract the function declarations from i915_mock_selftests.h and
  * i915_live_selftests.h Add your unit test declarations there!
@@ -61,6 +63,7 @@ int i915_live_selftests(struct pci_dev *pdev);
 #undef selftest
 #define selftest(name, func) int func(struct drm_i915_private *i915);
 #include "selftests/i915_live_selftests.h"
+#include "selftests/i915_perf_selftests.h"
 #undef selftest
 
 struct i915_subtest {
@@ -109,6 +112,7 @@ int __i915_subtests(const char *caller,
 
 static inline int i915_mock_selftests(void) { return 0; }
 static inline int i915_live_selftests(struct pci_dev *pdev) { return 0; }
+static inline int i915_perf_selftests(struct pci_dev *pdev) { return 0; }
 
 #define I915_SELFTEST_DECLARE(x)
 #define I915_SELFTEST_ONLY(x) 0
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6a88db2..51ba97d 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -12,6 +12,12 @@
 #include "i915_sw_fence.h"
 #include "i915_selftest.h"
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
+#else
+#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
 #define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
 
 static DEFINE_SPINLOCK(i915_sw_fence_lock);
@@ -218,13 +224,21 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
 {
 	BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
 
+	__init_waitqueue_head(&fence->wait, name, key);
+	fence->flags = (unsigned long)fn;
+
+	i915_sw_fence_reinit(fence);
+}
+
+void i915_sw_fence_reinit(struct i915_sw_fence *fence)
+{
 	debug_fence_init(fence);
 
-	__init_waitqueue_head(&fence->wait, name, key);
 	atomic_set(&fence->pending, 1);
 	fence->error = 0;
 
-	fence->flags = (unsigned long)fn;
+	I915_SW_FENCE_BUG_ON(!fence->flags);
+	I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
 }
 
 void i915_sw_fence_commit(struct i915_sw_fence *fence)
@@ -414,8 +428,10 @@ static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
 	struct i915_sw_fence *fence;
 
 	fence = xchg(&cb->base.fence, NULL);
-	if (fence)
+	if (fence) {
+		i915_sw_fence_set_error_once(fence, dma->error);
 		i915_sw_fence_complete(fence);
+	}
 
 	irq_work_queue(&cb->work);
 }
@@ -443,8 +459,10 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 	debug_fence_assert(fence);
 	might_sleep_if(gfpflags_allow_blocking(gfp));
 
-	if (dma_fence_is_signaled(dma))
+	if (dma_fence_is_signaled(dma)) {
+		i915_sw_fence_set_error_once(fence, dma->error);
 		return 0;
+	}
 
 	cb = kmalloc(timeout ?
 		     sizeof(struct i915_sw_dma_fence_cb_timer) :
@@ -454,7 +472,12 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 		if (!gfpflags_allow_blocking(gfp))
 			return -ENOMEM;
 
-		return dma_fence_wait(dma, false);
+		ret = dma_fence_wait(dma, false);
+		if (ret)
+			return ret;
+
+		i915_sw_fence_set_error_once(fence, dma->error);
+		return 0;
 	}
 
 	cb->fence = fence;
@@ -504,8 +527,10 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 
 	debug_fence_assert(fence);
 
-	if (dma_fence_is_signaled(dma))
+	if (dma_fence_is_signaled(dma)) {
+		i915_sw_fence_set_error_once(fence, dma->error);
 		return 0;
+	}
 
 	cb->fence = fence;
 	i915_sw_fence_await(fence);
@@ -539,8 +564,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 		struct dma_fence **shared;
 		unsigned int count, i;
 
-		ret = dma_resv_get_fences_rcu(resv,
-							&excl, &count, &shared);
+		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
 		if (ret)
 			return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index ab7d58b..19e806c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -54,6 +54,8 @@ do {								\
 	__i915_sw_fence_init((fence), (fn), NULL, NULL)
 #endif
 
+void i915_sw_fence_reinit(struct i915_sw_fence *fence);
+
 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
 void i915_sw_fence_fini(struct i915_sw_fence *fence);
 #else
@@ -110,7 +112,8 @@ static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
 static inline void
 i915_sw_fence_set_error_once(struct i915_sw_fence *fence, int error)
 {
-	cmpxchg(&fence->error, 0, error);
+	if (unlikely(error))
+		cmpxchg(&fence->error, 0, error);
 }
 
 #endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 8538ee7..997b299 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -6,6 +6,13 @@
 
 #include "i915_sw_fence_work.h"
 
+static void fence_complete(struct dma_fence_work *f)
+{
+	if (f->ops->release)
+		f->ops->release(f);
+	dma_fence_signal(&f->dma);
+}
+
 static void fence_work(struct work_struct *work)
 {
 	struct dma_fence_work *f = container_of(work, typeof(*f), work);
@@ -14,7 +21,8 @@ static void fence_work(struct work_struct *work)
 	err = f->ops->work(f);
 	if (err)
 		dma_fence_set_error(&f->dma, err);
-	dma_fence_signal(&f->dma);
+
+	fence_complete(f);
 	dma_fence_put(&f->dma);
 }
 
@@ -32,7 +40,7 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 			dma_fence_get(&f->dma);
 			queue_work(system_unbound_wq, &f->work);
 		} else {
-			dma_fence_signal(&f->dma);
+			fence_complete(f);
 		}
 		break;
 
@@ -60,9 +68,6 @@ static void fence_release(struct dma_fence *fence)
 {
 	struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
 
-	if (f->ops->release)
-		f->ops->release(f);
-
 	i915_sw_fence_fini(&f->chain);
 
 	BUILD_BUG_ON(offsetof(typeof(*f), dma));
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 6547690..0cef313 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -259,33 +259,18 @@ static const struct bin_attribute dpf_attrs_1 = {
 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt.rps;
-	intel_wakeref_t wakeref;
-	u32 freq;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt.rps;
 
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		vlv_punit_get(dev_priv);
-		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-		vlv_punit_put(dev_priv);
-
-		freq = (freq >> 8) & 0xff;
-	} else {
-		freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1));
-	}
-
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq));
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			intel_rps_read_actual_frequency(rps));
 }
 
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt.rps;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			intel_gpu_freq(rps, rps->cur_freq));
@@ -293,8 +278,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 
 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt.rps;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			intel_gpu_freq(rps, rps->boost_freq));
@@ -513,15 +498,15 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 
 	struct device *kdev = kobj_to_dev(kobj);
 	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
-	struct i915_gpu_state *gpu;
+	struct i915_gpu_coredump *gpu;
 	ssize_t ret;
 
 	gpu = i915_first_error_state(i915);
 	if (IS_ERR(gpu)) {
 		ret = PTR_ERR(gpu);
 	} else if (gpu) {
-		ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
-		i915_gpu_state_put(gpu);
+		ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+		i915_gpu_coredump_put(gpu);
 	} else {
 		const char *str = "No error state collected\n";
 		size_t len = strlen(str);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 7ef7a1e..233a97a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -341,7 +341,7 @@ TRACE_EVENT(intel_disable_plane,
 
 /* pipe updates */
 
-TRACE_EVENT(i915_pipe_update_start,
+TRACE_EVENT(intel_pipe_update_start,
 	    TP_PROTO(struct intel_crtc *crtc),
 	    TP_ARGS(crtc),
 
@@ -366,7 +366,7 @@ TRACE_EVENT(i915_pipe_update_start,
 		       __entry->scanline, __entry->min, __entry->max)
 );
 
-TRACE_EVENT(i915_pipe_update_vblank_evaded,
+TRACE_EVENT(intel_pipe_update_vblank_evaded,
 	    TP_PROTO(struct intel_crtc *crtc),
 	    TP_ARGS(crtc),
 
@@ -391,7 +391,7 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
 		       __entry->scanline, __entry->min, __entry->max)
 );
 
-TRACE_EVENT(i915_pipe_update_end,
+TRACE_EVENT(intel_pipe_update_end,
 	    TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
 	    TP_ARGS(crtc, frame, scanline_end),
 
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 0348c6d..c47261a 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -23,7 +23,7 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
 	struct va_format vaf;
 	va_list args;
 
-	if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+	if (is_debug && !drm_debug_enabled(DRM_UT_DRIVER))
 		return;
 
 	va_start(args, fmt);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 04139ba..b0ade76 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -69,7 +69,7 @@ bool i915_error_injected(void);
 
 #else
 
-#define i915_inject_probe_error(_i915, _err) 0
+#define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; })
 #define i915_error_injected() false
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 01c8222..17d7c52 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -28,7 +28,9 @@
 #include "display/intel_frontbuffer.h"
 
 #include "gt/intel_engine.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
 
 #include "i915_drv.h"
 #include "i915_globals.h"
@@ -112,6 +114,7 @@ vma_create(struct drm_i915_gem_object *obj,
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	kref_init(&vma->ref);
 	mutex_init(&vma->pages_mutex);
 	vma->vm = i915_vm_get(vm);
 	vma->ops = &vm->vma_ops;
@@ -290,6 +293,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 struct i915_vma_work {
 	struct dma_fence_work base;
 	struct i915_vma *vma;
+	struct drm_i915_gem_object *pinned;
 	enum i915_cache_level cache_level;
 	unsigned int flags;
 };
@@ -304,15 +308,21 @@ static int __vma_bind(struct dma_fence_work *work)
 	if (err)
 		atomic_or(I915_VMA_ERROR, &vma->flags);
 
-	if (vma->obj)
-		__i915_gem_object_unpin_pages(vma->obj);
-
 	return err;
 }
 
+static void __vma_release(struct dma_fence_work *work)
+{
+	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+
+	if (vw->pinned)
+		__i915_gem_object_unpin_pages(vw->pinned);
+}
+
 static const struct dma_fence_work_ops bind_ops = {
 	.name = "bind",
 	.work = __vma_bind,
+	.release = __vma_release,
 };
 
 struct i915_vma_work *i915_vma_work(void)
@@ -393,8 +403,10 @@ int i915_vma_bind(struct i915_vma *vma,
 		i915_active_set_exclusive(&vma->active, &work->base.dma);
 		work->base.dma.error = 0; /* enable the queue_work() */
 
-		if (vma->obj)
+		if (vma->obj) {
 			__i915_gem_object_pin_pages(vma->obj);
+			work->pinned = vma->obj;
+		}
 	} else {
 		GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
 		ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
@@ -411,8 +423,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 	void __iomem *ptr;
 	int err;
 
-	/* Access through the GTT requires the device to be awake. */
-	assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
 	if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
 		err = -ENODEV;
 		goto err;
@@ -444,6 +454,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 		goto err_unpin;
 
 	i915_vma_set_ggtt_write(vma);
+
+	/* NB Access through the GTT requires the device to be awake. */
 	return ptr;
 
 err_unpin:
@@ -846,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma)
 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
 	struct i915_vma_work *work = NULL;
+	intel_wakeref_t wakeref = 0;
 	unsigned int bound;
 	int err;
 
@@ -871,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		}
 	}
 
+	if (flags & PIN_GLOBAL)
+		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+
 	/* No more allocations allowed once we hold vm->mutex */
 	err = mutex_lock_interruptible(&vma->vm->mutex);
 	if (err)
@@ -934,11 +950,45 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 err_fence:
 	if (work)
 		dma_fence_work_commit(&work->base);
+	if (wakeref)
+		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
 err_pages:
 	vma_put_pages(vma);
 	return err;
 }
 
+static void flush_idle_contexts(struct intel_gt *gt)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, gt, id)
+		intel_engine_flush_barriers(engine);
+
+	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+}
+
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
+{
+	struct i915_address_space *vm = vma->vm;
+	int err;
+
+	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+	do {
+		err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+		if (err != -ENOSPC)
+			return err;
+
+		/* Unlike i915_vma_pin, we don't take no for an answer! */
+		flush_idle_contexts(vm->gt);
+		if (mutex_lock_interruptible(&vm->mutex) == 0) {
+			i915_gem_evict_vm(vm);
+			mutex_unlock(&vm->mutex);
+		}
+	} while (1);
+}
+
 void i915_vma_close(struct i915_vma *vma)
 {
 	struct intel_gt *gt = vma->vm->gt;
@@ -978,8 +1028,10 @@ void i915_vma_reopen(struct i915_vma *vma)
 		__i915_vma_remove_closed(vma);
 }
 
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_release(struct kref *ref)
 {
+	struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+
 	if (drm_mm_node_allocated(&vma->node)) {
 		mutex_lock(&vma->vm->mutex);
 		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
@@ -1019,7 +1071,9 @@ void i915_vma_parked(struct intel_gt *gt)
 		if (!kref_get_unless_zero(&obj->base.refcount))
 			continue;
 
-		if (!i915_vm_tryopen(vm)) {
+		if (i915_vm_tryopen(vm)) {
+			list_del_init(&vma->closed_link);
+		} else {
 			i915_gem_object_put(obj);
 			obj = NULL;
 		}
@@ -1027,7 +1081,7 @@ void i915_vma_parked(struct intel_gt *gt)
 		spin_unlock_irq(&gt->closed_lock);
 
 		if (obj) {
-			i915_vma_destroy(vma);
+			__i915_vma_put(vma);
 			i915_gem_object_put(obj);
 		}
 
@@ -1054,17 +1108,16 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
 
 void i915_vma_revoke_mmap(struct i915_vma *vma)
 {
-	struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+	struct drm_vma_offset_node *node;
 	u64 vma_offset;
 
-	lockdep_assert_held(&vma->vm->mutex);
-
 	if (!i915_vma_has_userfault(vma))
 		return;
 
 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
 	GEM_BUG_ON(!vma->obj->userfault_count);
 
+	node = &vma->mmo->vma_node;
 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
 			    drm_vma_node_offset_addr(node) + vma_offset,
@@ -1152,7 +1205,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
 	GEM_BUG_ON(i915_vma_is_active(vma));
 	if (i915_vma_is_pinned(vma)) {
 		vma_print_allocator(vma, "is pinned");
-		return -EBUSY;
+		return -EAGAIN;
 	}
 
 	GEM_BUG_ON(i915_vma_is_active(vma));
@@ -1192,15 +1245,23 @@ int __i915_vma_unbind(struct i915_vma *vma)
 	i915_vma_detach(vma);
 	vma_unbind_pages(vma);
 
-	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
+	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
 	return 0;
 }
 
 int i915_vma_unbind(struct i915_vma *vma)
 {
 	struct i915_address_space *vm = vma->vm;
+	intel_wakeref_t wakeref = 0;
 	int err;
 
+	if (!drm_mm_node_allocated(&vma->node))
+		return 0;
+
+	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+		/* XXX not always required: nop_clear_range */
+		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
 	err = mutex_lock_interruptible(&vm->mutex);
 	if (err)
 		return err;
@@ -1208,6 +1269,9 @@ int i915_vma_unbind(struct i915_vma *vma)
 	err = __i915_vma_unbind(vma);
 	mutex_unlock(&vm->mutex);
 
+	if (wakeref)
+		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4659328..02b31a6 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,139 +30,14 @@
 
 #include <drm/drm_mm.h>
 
+#include "gem/i915_gem_object.h"
+
 #include "i915_gem_gtt.h"
 #include "i915_gem_fence_reg.h"
-#include "gem/i915_gem_object.h"
 
 #include "i915_active.h"
 #include "i915_request.h"
-
-enum i915_cache_level;
-
-/**
- * DOC: Virtual Memory Address
- *
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
-	struct drm_mm_node node;
-	struct drm_i915_gem_object *obj;
-	struct i915_address_space *vm;
-	const struct i915_vma_ops *ops;
-	struct i915_fence_reg *fence;
-	struct dma_resv *resv; /** Alias of obj->resv */
-	struct sg_table *pages;
-	void __iomem *iomap;
-	void *private; /* owned by creator */
-	u64 size;
-	u64 display_alignment;
-	struct i915_page_sizes page_sizes;
-
-	u32 fence_size;
-	u32 fence_alignment;
-
-	/**
-	 * Count of the number of times this vma has been opened by different
-	 * handles (but same file) for execbuf, i.e. the number of aliases
-	 * that exist in the ctx->handle_vmas LUT for this vma.
-	 */
-	atomic_t open_count;
-	atomic_t flags;
-	/**
-	 * How many users have pinned this object in GTT space.
-	 *
-	 * This is a tightly bound, fairly small number of users, so we
-	 * stuff inside the flags field so that we can both check for overflow
-	 * and detect a no-op i915_vma_pin() in a single check, while also
-	 * pinning the vma.
-	 *
-	 * The worst case display setup would have the same vma pinned for
-	 * use on each plane on each crtc, while also building the next atomic
-	 * state and holding a pin for the length of the cleanup queue. In the
-	 * future, the flip queue may be increased from 1.
-	 * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
-	 *
-	 * For GEM, the number of concurrent users for pwrite/pread is
-	 * unbounded. For execbuffer, it is currently one but will in future
-	 * be extended to allow multiple clients to pin vma concurrently.
-	 *
-	 * We also use suballocated pages, with each suballocation claiming
-	 * its own pin on the shared vma. At present, this is limited to
-	 * exclusive cachelines of a single page, so a maximum of 64 possible
-	 * users.
-	 */
-#define I915_VMA_PIN_MASK 0x3ff
-#define I915_VMA_OVERFLOW 0x200
-
-	/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND_BIT 10
-#define I915_VMA_LOCAL_BIND_BIT  11
-
-#define I915_VMA_GLOBAL_BIND	((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
-#define I915_VMA_LOCAL_BIND	((int)BIT(I915_VMA_LOCAL_BIND_BIT))
-
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
-
-#define I915_VMA_ALLOC_BIT	12
-#define I915_VMA_ALLOC		((int)BIT(I915_VMA_ALLOC_BIT))
-
-#define I915_VMA_ERROR_BIT	13
-#define I915_VMA_ERROR		((int)BIT(I915_VMA_ERROR_BIT))
-
-#define I915_VMA_GGTT_BIT	14
-#define I915_VMA_CAN_FENCE_BIT	15
-#define I915_VMA_USERFAULT_BIT	16
-#define I915_VMA_GGTT_WRITE_BIT	17
-
-#define I915_VMA_GGTT		((int)BIT(I915_VMA_GGTT_BIT))
-#define I915_VMA_CAN_FENCE	((int)BIT(I915_VMA_CAN_FENCE_BIT))
-#define I915_VMA_USERFAULT	((int)BIT(I915_VMA_USERFAULT_BIT))
-#define I915_VMA_GGTT_WRITE	((int)BIT(I915_VMA_GGTT_WRITE_BIT))
-
-	struct i915_active active;
-
-#define I915_VMA_PAGES_BIAS 24
-#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
-	atomic_t pages_count; /* number of active binds to the pages */
-	struct mutex pages_mutex; /* protect acquire/release of backing pages */
-
-	/**
-	 * Support different GGTT views into the same object.
-	 * This means there can be multiple VMA mappings per object and per VM.
-	 * i915_ggtt_view_type is used to distinguish between those entries.
-	 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
-	 * assumed in GEM functions which take no ggtt view parameter.
-	 */
-	struct i915_ggtt_view ggtt_view;
-
-	/** This object's place on the active/inactive lists */
-	struct list_head vm_link;
-
-	struct list_head obj_link; /* Link in the object's VMA list */
-	struct rb_node obj_node;
-	struct hlist_node obj_hash;
-
-	/** This vma's place in the execbuf reservation list */
-	struct list_head exec_link;
-	struct list_head reloc_link;
-
-	/** This vma's place in the eviction list */
-	struct list_head evict_link;
-
-	struct list_head closed_link;
-
-	/**
-	 * Used for performing relocations during execbuffer insertion.
-	 */
-	unsigned int *exec_flags;
-	struct hlist_node exec_node;
-	u32 exec_handle;
-};
+#include "i915_vma_types.h"
 
 struct i915_vma *
 i915_vma_instance(struct drm_i915_gem_object *obj,
@@ -333,7 +208,20 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
 void i915_vma_unlink_ctx(struct i915_vma *vma);
 void i915_vma_close(struct i915_vma *vma);
 void i915_vma_reopen(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
+
+static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
+{
+	if (kref_get_unless_zero(&vma->ref))
+		return vma;
+
+	return NULL;
+}
+
+void i915_vma_release(struct kref *ref);
+static inline void __i915_vma_put(struct i915_vma *vma)
+{
+	kref_put(&vma->ref, i915_vma_release);
+}
 
 #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
 
@@ -349,6 +237,7 @@ static inline void i915_vma_unlock(struct i915_vma *vma)
 
 int __must_check
 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags);
 
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
new file mode 100644
index 0000000..e0942ef
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_TYPES_H__
+#define __I915_VMA_TYPES_H__
+
+#include <linux/rbtree.h>
+
+#include <drm/drm_mm.h>
+
+#include "gem/i915_gem_object_types.h"
+
+enum i915_cache_level;
+
+/**
+ * DOC: Global GTT views
+ *
+ * Background and previous state
+ *
+ * Historically objects could exists (be bound) in global GTT space only as
+ * singular instances with a view representing all of the object's backing pages
+ * in a linear fashion. This view will be called a normal view.
+ *
+ * To support multiple views of the same object, where the number of mapped
+ * pages is not equal to the backing store, or where the layout of the pages
+ * is not linear, concept of a GGTT view was added.
+ *
+ * One example of an alternative view is a stereo display driven by a single
+ * image. In this case we would have a framebuffer looking like this
+ * (2x2 pages):
+ *
+ *    12
+ *    34
+ *
+ * Above would represent a normal GGTT view as normally mapped for GPU or CPU
+ * rendering. In contrast, fed to the display engine would be an alternative
+ * view which could look something like this:
+ *
+ *   1212
+ *   3434
+ *
+ * In this example both the size and layout of pages in the alternative view is
+ * different from the normal view.
+ *
+ * Implementation and usage
+ *
+ * GGTT views are implemented using VMAs and are distinguished via enum
+ * i915_ggtt_view_type and struct i915_ggtt_view.
+ *
+ * A new flavour of core GEM functions which work with GGTT bound objects were
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming  in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
+ *
+ * As a helper for callers which are only interested in the normal view,
+ * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * GEM API functions, the ones not taking the view parameter, are operating on,
+ * or with the normal GGTT view.
+ *
+ * Code wanting to add or use a new GGTT view needs to:
+ *
+ * 1. Add a new enum with a suitable name.
+ * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 3. Add support to i915_get_vma_pages().
+ *
+ * New views are required to build a scatter-gather table from within the
+ * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * exists for the lifetime of an VMA.
+ *
+ * Core API is designed to have copy semantics which means that passed in
+ * struct i915_ggtt_view does not need to be persistent (left around after
+ * calling the core API functions).
+ *
+ */
+
+struct intel_remapped_plane_info {
+	/* in gtt pages */
+	unsigned int width, height, stride, offset;
+} __packed;
+
+struct intel_remapped_info {
+	struct intel_remapped_plane_info plane[2];
+	unsigned int unused_mbz;
+} __packed;
+
+struct intel_rotation_info {
+	struct intel_remapped_plane_info plane[2];
+} __packed;
+
+struct intel_partial_info {
+	u64 offset;
+	unsigned int size;
+} __packed;
+
+enum i915_ggtt_view_type {
+	I915_GGTT_VIEW_NORMAL = 0,
+	I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+	I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+	I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+};
+
+static inline void assert_i915_gem_gtt_types(void)
+{
+	BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+	BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+	BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
+
+	/* Check that rotation/remapped shares offsets for simplicity */
+	BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
+		     offsetof(struct intel_rotation_info, plane[0]));
+	BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
+		     offsetofend(struct intel_rotation_info, plane[1]));
+
+	/* As we encode the size of each branch inside the union into its type,
+	 * we have to be careful that each branch has a unique size.
+	 */
+	switch ((enum i915_ggtt_view_type)0) {
+	case I915_GGTT_VIEW_NORMAL:
+	case I915_GGTT_VIEW_PARTIAL:
+	case I915_GGTT_VIEW_ROTATED:
+	case I915_GGTT_VIEW_REMAPPED:
+		/* gcc complains if these are identical cases */
+		break;
+	}
+}
+
+struct i915_ggtt_view {
+	enum i915_ggtt_view_type type;
+	union {
+		/* Members need to contain no holes/padding */
+		struct intel_partial_info partial;
+		struct intel_rotation_info rotated;
+		struct intel_remapped_info remapped;
+	};
+};
+
+/**
+ * DOC: Virtual Memory Address
+ *
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+	struct drm_mm_node node;
+
+	struct i915_address_space *vm;
+	const struct i915_vma_ops *ops;
+
+	struct drm_i915_gem_object *obj;
+	struct dma_resv *resv; /** Alias of obj->resv */
+
+	struct sg_table *pages;
+	void __iomem *iomap;
+	void *private; /* owned by creator */
+
+	struct i915_fence_reg *fence;
+
+	u64 size;
+	u64 display_alignment;
+	struct i915_page_sizes page_sizes;
+
+	/* mmap-offset associated with fencing for this vma */
+	struct i915_mmap_offset	*mmo;
+
+	u32 fence_size;
+	u32 fence_alignment;
+
+	/**
+	 * Count of the number of times this vma has been opened by different
+	 * handles (but same file) for execbuf, i.e. the number of aliases
+	 * that exist in the ctx->handle_vmas LUT for this vma.
+	 */
+	struct kref ref;
+	atomic_t open_count;
+	atomic_t flags;
+	/**
+	 * How many users have pinned this object in GTT space.
+	 *
+	 * This is a tightly bound, fairly small number of users, so we
+	 * stuff inside the flags field so that we can both check for overflow
+	 * and detect a no-op i915_vma_pin() in a single check, while also
+	 * pinning the vma.
+	 *
+	 * The worst case display setup would have the same vma pinned for
+	 * use on each plane on each crtc, while also building the next atomic
+	 * state and holding a pin for the length of the cleanup queue. In the
+	 * future, the flip queue may be increased from 1.
+	 * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
+	 *
+	 * For GEM, the number of concurrent users for pwrite/pread is
+	 * unbounded. For execbuffer, it is currently one but will in future
+	 * be extended to allow multiple clients to pin vma concurrently.
+	 *
+	 * We also use suballocated pages, with each suballocation claiming
+	 * its own pin on the shared vma. At present, this is limited to
+	 * exclusive cachelines of a single page, so a maximum of 64 possible
+	 * users.
+	 */
+#define I915_VMA_PIN_MASK 0x3ff
+#define I915_VMA_OVERFLOW 0x200
+
+	/** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND_BIT 10
+#define I915_VMA_LOCAL_BIND_BIT  11
+
+#define I915_VMA_GLOBAL_BIND	((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
+#define I915_VMA_LOCAL_BIND	((int)BIT(I915_VMA_LOCAL_BIND_BIT))
+
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
+
+#define I915_VMA_ALLOC_BIT	12
+#define I915_VMA_ALLOC		((int)BIT(I915_VMA_ALLOC_BIT))
+
+#define I915_VMA_ERROR_BIT	13
+#define I915_VMA_ERROR		((int)BIT(I915_VMA_ERROR_BIT))
+
+#define I915_VMA_GGTT_BIT	14
+#define I915_VMA_CAN_FENCE_BIT	15
+#define I915_VMA_USERFAULT_BIT	16
+#define I915_VMA_GGTT_WRITE_BIT	17
+
+#define I915_VMA_GGTT		((int)BIT(I915_VMA_GGTT_BIT))
+#define I915_VMA_CAN_FENCE	((int)BIT(I915_VMA_CAN_FENCE_BIT))
+#define I915_VMA_USERFAULT	((int)BIT(I915_VMA_USERFAULT_BIT))
+#define I915_VMA_GGTT_WRITE	((int)BIT(I915_VMA_GGTT_WRITE_BIT))
+
+	struct i915_active active;
+
+#define I915_VMA_PAGES_BIAS 24
+#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
+	atomic_t pages_count; /* number of active binds to the pages */
+	struct mutex pages_mutex; /* protect acquire/release of backing pages */
+
+	/**
+	 * Support different GGTT views into the same object.
+	 * This means there can be multiple VMA mappings per object and per VM.
+	 * i915_ggtt_view_type is used to distinguish between those entries.
+	 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+	 * assumed in GEM functions which take no ggtt view parameter.
+	 */
+	struct i915_ggtt_view ggtt_view;
+
+	/** This object's place on the active/inactive lists */
+	struct list_head vm_link;
+
+	struct list_head obj_link; /* Link in the object's VMA list */
+	struct rb_node obj_node;
+	struct hlist_node obj_hash;
+
+	/** This vma's place in the execbuf reservation list */
+	struct list_head exec_link;
+	struct list_head reloc_link;
+
+	/** This vma's place in the eviction list */
+	struct list_head evict_link;
+
+	struct list_head closed_link;
+
+	/**
+	 * Used for performing relocations during execbuffer insertion.
+	 */
+	unsigned int *exec_flags;
+	struct hlist_node exec_node;
+	u32 exec_handle;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index a5b5713..6670a07 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -73,9 +73,30 @@ const char *intel_platform_name(enum intel_platform platform)
 	return platform_names[platform];
 }
 
-void intel_device_info_dump_flags(const struct intel_device_info *info,
-				  struct drm_printer *p)
+static const char *iommu_name(void)
 {
+	const char *msg = "n/a";
+
+#ifdef CONFIG_INTEL_IOMMU
+	msg = enableddisabled(intel_iommu_gfx_mapped);
+#endif
+
+	return msg;
+}
+
+void intel_device_info_print_static(const struct intel_device_info *info,
+				    struct drm_printer *p)
+{
+	drm_printf(p, "engines: %x\n", info->engine_mask);
+	drm_printf(p, "gen: %d\n", info->gen);
+	drm_printf(p, "gt: %d\n", info->gt);
+	drm_printf(p, "iommu: %s\n", iommu_name());
+	drm_printf(p, "memory-regions: %x\n", info->memory_regions);
+	drm_printf(p, "page-sizes: %x\n", info->page_sizes);
+	drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
+	drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
+	drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+
 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
 #undef PRINT_FLAG
@@ -106,8 +127,8 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
 	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
 }
 
-void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
-				    struct drm_printer *p)
+void intel_device_info_print_runtime(const struct intel_runtime_info *info,
+				     struct drm_printer *p)
 {
 	sseu_dump(&info->sseu, p);
 
@@ -148,8 +169,8 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
 	}
 }
 
-void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
-				     struct drm_printer *p)
+void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
+				      struct drm_printer *p)
 {
 	int s, ss;
 
@@ -498,7 +519,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
 {
 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 	int s, ss;
@@ -579,7 +600,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 	sseu->has_eu_pg = 0;
 }
 
-static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
 {
 	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 	u32 fuse1;
@@ -808,6 +829,8 @@ static const u16 subplatform_ult_ids[] = {
 	INTEL_WHL_U_GT1_IDS(0),
 	INTEL_WHL_U_GT2_IDS(0),
 	INTEL_WHL_U_GT3_IDS(0),
+	INTEL_CML_U_GT1_IDS(0),
+	INTEL_CML_U_GT2_IDS(0),
 };
 
 static const u16 subplatform_ulx_ids[] = {
@@ -998,11 +1021,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 
 	/* Initialize slice/subslice/EU info */
 	if (IS_HASWELL(dev_priv))
-		haswell_sseu_info_init(dev_priv);
+		hsw_sseu_info_init(dev_priv);
 	else if (IS_CHERRYVIEW(dev_priv))
 		cherryview_sseu_info_init(dev_priv);
 	else if (IS_BROADWELL(dev_priv))
-		broadwell_sseu_info_init(dev_priv);
+		bdw_sseu_info_init(dev_priv);
 	else if (IS_GEN(dev_priv, 9))
 		gen9_sseu_info_init(dev_priv);
 	else if (IS_GEN(dev_priv, 10))
@@ -1070,7 +1093,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 		 * hooked up to an SFC (Scaler & Format Converter) unit.
 		 * In TGL each VDBOX has access to an SFC.
 		 */
-		if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
+		if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
 			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
 	}
 	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 4bdf8a6..2725cb7 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -230,12 +230,13 @@ const char *intel_platform_name(enum intel_platform platform);
 
 void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_dump_flags(const struct intel_device_info *info,
-				  struct drm_printer *p);
-void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
+
+void intel_device_info_print_static(const struct intel_device_info *info,
 				    struct drm_printer *p);
-void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
+void intel_device_info_print_runtime(const struct intel_runtime_info *info,
 				     struct drm_printer *p);
+void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
+				      struct drm_printer *p);
 
 void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index baaeaec..d0d038b 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -16,6 +16,20 @@ const u32 intel_region_map[] = {
 	[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
 };
 
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+			    enum intel_memory_type mem_type)
+{
+	struct intel_memory_region *mr;
+	int id;
+
+	for_each_memory_region(mr, i915, id)
+		if (mr->type == mem_type)
+			return mr;
+
+	return NULL;
+}
+
 static u64
 intel_memory_region_free_pages(struct intel_memory_region *mem,
 			       struct list_head *blocks)
@@ -37,7 +51,7 @@ __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
 				      struct list_head *blocks)
 {
 	mutex_lock(&mem->mm_lock);
-	intel_memory_region_free_pages(mem, blocks);
+	mem->avail += intel_memory_region_free_pages(mem, blocks);
 	mutex_unlock(&mem->mm_lock);
 }
 
@@ -73,6 +87,9 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 		min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
 	}
 
+	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+		return -E2BIG;
+
 	n_pages = size >> ilog2(mem->mm.chunk_size);
 
 	mutex_lock(&mem->mm_lock);
@@ -103,6 +120,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 			break;
 	} while (1);
 
+	mem->avail -= size;
 	mutex_unlock(&mem->mm_lock);
 	return 0;
 
@@ -161,6 +179,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
 	mem->io_start = io_start;
 	mem->min_page_size = min_page_size;
 	mem->ops = ops;
+	mem->total = size;
+	mem->avail = mem->total;
 
 	mutex_init(&mem->objects.lock);
 	INIT_LIST_HEAD(&mem->objects.list);
@@ -182,6 +202,16 @@ intel_memory_region_create(struct drm_i915_private *i915,
 	return ERR_PTR(err);
 }
 
+void intel_memory_region_set_name(struct intel_memory_region *mem,
+				  const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
+	va_end(ap);
+}
+
 static void __intel_memory_region_destroy(struct kref *kref)
 {
 	struct intel_memory_region *mem =
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 2387220..232490d 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -47,6 +47,10 @@ enum intel_region_id {
 #define I915_ALLOC_MIN_PAGE_SIZE  BIT(0)
 #define I915_ALLOC_CONTIGUOUS     BIT(1)
 
+#define for_each_memory_region(mr, i915, id) \
+	for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
+		for_each_if((mr) = (i915)->mm.regions[id])
+
 /**
  * Memory regions encoded as type | instance
  */
@@ -82,10 +86,13 @@ struct intel_memory_region {
 
 	resource_size_t io_start;
 	resource_size_t min_page_size;
+	resource_size_t total;
+	resource_size_t avail;
 
 	unsigned int type;
 	unsigned int instance;
 	unsigned int id;
+	char name[8];
 
 	dma_addr_t remap_addr;
 
@@ -125,5 +132,12 @@ void intel_memory_region_put(struct intel_memory_region *mem);
 
 int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
 void intel_memory_regions_driver_release(struct drm_i915_private *i915);
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+			    enum intel_memory_type mem_type);
+
+__printf(2, 3) void
+intel_memory_region_set_name(struct intel_memory_region *mem,
+			     const char *fmt, ...);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 8fd92b9..4ed60e1 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -12,89 +12,91 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
 {
 	switch (id) {
 	case INTEL_PCH_IBX_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
 		WARN_ON(!IS_GEN(dev_priv, 5));
 		return PCH_IBX;
 	case INTEL_PCH_CPT_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
 		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
 		return PCH_CPT;
 	case INTEL_PCH_PPT_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
 		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
 		/* PantherPoint is CPT compatible */
 		return PCH_CPT;
 	case INTEL_PCH_LPT_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
 		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
 		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
 		return PCH_LPT;
 	case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
 		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
 		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
 		return PCH_LPT;
 	case INTEL_PCH_WPT_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
 		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
 		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
 		/* WildcatPoint is LPT compatible */
 		return PCH_LPT;
 	case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
 		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
 		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
 		/* WildcatPoint is LPT compatible */
 		return PCH_LPT;
 	case INTEL_PCH_SPT_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
 		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
 		return PCH_SPT;
 	case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
 		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
 			!IS_COFFEELAKE(dev_priv));
 		return PCH_SPT;
 	case INTEL_PCH_KBP_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
+		drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
 		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
 			!IS_COFFEELAKE(dev_priv));
 		/* KBP is SPT compatible */
 		return PCH_SPT;
 	case INTEL_PCH_CNP_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
+		drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
 		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
 		return PCH_CNP;
 	case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Found Cannon Lake LP PCH (CNP-LP)\n");
 		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
 		return PCH_CNP;
 	case INTEL_PCH_CMP_DEVICE_ID_TYPE:
 	case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+		drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
 		WARN_ON(!IS_COFFEELAKE(dev_priv));
 		/* CometPoint is CNP Compatible */
 		return PCH_CNP;
 	case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n");
+		drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
 		WARN_ON(!IS_COFFEELAKE(dev_priv));
 		/* Comet Lake V PCH is based on KBP, which is SPT compatible */
 		return PCH_SPT;
 	case INTEL_PCH_ICP_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Ice Lake PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
 		WARN_ON(!IS_ICELAKE(dev_priv));
 		return PCH_ICP;
 	case INTEL_PCH_MCC_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
 		WARN_ON(!IS_ELKHARTLAKE(dev_priv));
 		return PCH_MCC;
 	case INTEL_PCH_TGP_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
+	case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
+		drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
 		WARN_ON(!IS_TIGERLAKE(dev_priv));
 		return PCH_TGP;
 	case INTEL_PCH_JSP_DEVICE_ID_TYPE:
 	case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
-		DRM_DEBUG_KMS("Found Jasper Lake PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
 		WARN_ON(!IS_ELKHARTLAKE(dev_priv));
 		return PCH_JSP;
 	default:
@@ -144,9 +146,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
 		id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
 
 	if (id)
-		DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
+		drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
 	else
-		DRM_DEBUG_KMS("Assuming no PCH\n");
+		drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
 
 	return id;
 }
@@ -200,13 +202,14 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
 	 * display.
 	 */
 	if (pch && !HAS_DISPLAY(dev_priv)) {
-		DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Display disabled, reverting to NOP PCH\n");
 		dev_priv->pch_type = PCH_NOP;
 		dev_priv->pch_id = 0;
 	}
 
 	if (!pch)
-		DRM_DEBUG_KMS("No PCH found.\n");
+		drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
 
 	pci_dev_put(pch);
 }
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index d26c25d..3053d1c 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -47,6 +47,7 @@ enum intel_pch {
 #define INTEL_PCH_ICP_DEVICE_ID_TYPE		0x3480
 #define INTEL_PCH_MCC_DEVICE_ID_TYPE		0x4B00
 #define INTEL_PCH_TGP_DEVICE_ID_TYPE		0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE		0x4380
 #define INTEL_PCH_JSP_DEVICE_ID_TYPE		0x4D80
 #define INTEL_PCH_JSP2_DEVICE_ID_TYPE		0x3880
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 86379ed..bd2d30e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -140,7 +140,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
 
 }
 
-static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
+static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
 {
 	u32 tmp;
 
@@ -178,7 +178,7 @@ static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
 	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
 }
 
-static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
+static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
 {
 	u16 ddrpll, csipll;
 
@@ -199,8 +199,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
 		dev_priv->mem_freq = 1600;
 		break;
 	default:
-		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
-				 ddrpll & 0xff);
+		drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+			ddrpll & 0xff);
 		dev_priv->mem_freq = 0;
 		break;
 	}
@@ -228,8 +228,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
 		dev_priv->fsb_freq = 6400;
 		break;
 	default:
-		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
-				 csipll & 0x3ff);
+		drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
+			csipll & 0x3ff);
 		dev_priv->fsb_freq = 0;
 		break;
 	}
@@ -314,7 +314,8 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
 
 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
 		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
-		DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+		drm_err(&dev_priv->drm,
+			"timed out waiting for Punit DDR DVFS request\n");
 
 	vlv_punit_put(dev_priv);
 }
@@ -383,9 +384,9 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
 
 	trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
 
-	DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
-		      enableddisabled(enable),
-		      enableddisabled(was_enabled));
+	drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+		    enableddisabled(enable),
+		    enableddisabled(was_enabled));
 
 	return was_enabled;
 }
@@ -463,7 +464,7 @@ static const int pessimal_latency_ns = 5000;
 
 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
 	enum pipe pipe = crtc->pipe;
@@ -510,8 +511,8 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
 	if (i9xx_plane == PLANE_B)
 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
 
-	DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
-		      dsparb, plane_name(i9xx_plane), size);
+	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+		    dsparb, plane_name(i9xx_plane), size);
 
 	return size;
 }
@@ -527,8 +528,8 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
 		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
 	size >>= 1; /* Convert to cachelines */
 
-	DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
-		      dsparb, plane_name(i9xx_plane), size);
+	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+		    dsparb, plane_name(i9xx_plane), size);
 
 	return size;
 }
@@ -542,41 +543,45 @@ static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
 	size = dsparb & 0x7f;
 	size >>= 2; /* Convert to cachelines */
 
-	DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
-		      dsparb, plane_name(i9xx_plane), size);
+	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+		    dsparb, plane_name(i9xx_plane), size);
 
 	return size;
 }
 
 /* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
+static const struct intel_watermark_params pnv_display_wm = {
 	.fifo_size = PINEVIEW_DISPLAY_FIFO,
 	.max_wm = PINEVIEW_MAX_WM,
 	.default_wm = PINEVIEW_DFT_WM,
 	.guard_size = PINEVIEW_GUARD_WM,
 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
+
+static const struct intel_watermark_params pnv_display_hplloff_wm = {
 	.fifo_size = PINEVIEW_DISPLAY_FIFO,
 	.max_wm = PINEVIEW_MAX_WM,
 	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
 	.guard_size = PINEVIEW_GUARD_WM,
 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
-static const struct intel_watermark_params pineview_cursor_wm = {
+
+static const struct intel_watermark_params pnv_cursor_wm = {
 	.fifo_size = PINEVIEW_CURSOR_FIFO,
 	.max_wm = PINEVIEW_CURSOR_MAX_WM,
 	.default_wm = PINEVIEW_CURSOR_DFT_WM,
 	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+
+static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
 	.fifo_size = PINEVIEW_CURSOR_FIFO,
 	.max_wm = PINEVIEW_CURSOR_MAX_WM,
 	.default_wm = PINEVIEW_CURSOR_DFT_WM,
 	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
+
 static const struct intel_watermark_params i965_cursor_wm_info = {
 	.fifo_size = I965_CURSOR_FIFO,
 	.max_wm = I965_CURSOR_MAX_WM,
@@ -584,6 +589,7 @@ static const struct intel_watermark_params i965_cursor_wm_info = {
 	.guard_size = 2,
 	.cacheline_size = I915_FIFO_LINE_SIZE,
 };
+
 static const struct intel_watermark_params i945_wm_info = {
 	.fifo_size = I945_FIFO_SIZE,
 	.max_wm = I915_MAX_WM,
@@ -591,6 +597,7 @@ static const struct intel_watermark_params i945_wm_info = {
 	.guard_size = 2,
 	.cacheline_size = I915_FIFO_LINE_SIZE,
 };
+
 static const struct intel_watermark_params i915_wm_info = {
 	.fifo_size = I915_FIFO_SIZE,
 	.max_wm = I915_MAX_WM,
@@ -598,6 +605,7 @@ static const struct intel_watermark_params i915_wm_info = {
 	.guard_size = 2,
 	.cacheline_size = I915_FIFO_LINE_SIZE,
 };
+
 static const struct intel_watermark_params i830_a_wm_info = {
 	.fifo_size = I855GM_FIFO_SIZE,
 	.max_wm = I915_MAX_WM,
@@ -605,6 +613,7 @@ static const struct intel_watermark_params i830_a_wm_info = {
 	.guard_size = 2,
 	.cacheline_size = I830_FIFO_LINE_SIZE,
 };
+
 static const struct intel_watermark_params i830_bc_wm_info = {
 	.fifo_size = I855GM_FIFO_SIZE,
 	.max_wm = I915_MAX_WM/2,
@@ -612,6 +621,7 @@ static const struct intel_watermark_params i830_bc_wm_info = {
 	.guard_size = 2,
 	.cacheline_size = I830_FIFO_LINE_SIZE,
 };
+
 static const struct intel_watermark_params i845_wm_info = {
 	.fifo_size = I830_FIFO_SIZE,
 	.max_wm = I915_MAX_WM,
@@ -794,10 +804,10 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
 				   const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 
 	/* FIXME check the 'enable' instead */
-	if (!crtc_state->base.active)
+	if (!crtc_state->hw.active)
 		return false;
 
 	/*
@@ -809,9 +819,28 @@ static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
 	 * around this problem with the watermark code.
 	 */
 	if (plane->id == PLANE_CURSOR)
-		return plane_state->base.fb != NULL;
+		return plane_state->hw.fb != NULL;
 	else
-		return plane_state->base.visible;
+		return plane_state->uapi.visible;
+}
+
+static bool intel_crtc_active(struct intel_crtc *crtc)
+{
+	/* Be paranoid as we can arrive here with only partial
+	 * state retrieved from the hardware during setup.
+	 *
+	 * We can ditch the adjusted_mode.crtc_clock check as soon
+	 * as Haswell has gained clock readout/fastboot support.
+	 *
+	 * We can ditch the crtc->primary->state->fb check as soon as we can
+	 * properly reconstruct framebuffers.
+	 *
+	 * FIXME: The intel_crtc->active here should be switched to
+	 * crtc->state->active once we have proper CRTC states wired up
+	 * for atomic.
+	 */
+	return crtc->active && crtc->base.primary->state->fb &&
+		crtc->config->hw.adjusted_mode.crtc_clock;
 }
 
 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
@@ -829,7 +858,7 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
 	return enabled;
 }
 
-static void pineview_update_wm(struct intel_crtc *unused_crtc)
+static void pnv_update_wm(struct intel_crtc *unused_crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
 	struct intel_crtc *crtc;
@@ -842,7 +871,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
 					 dev_priv->fsb_freq,
 					 dev_priv->mem_freq);
 	if (!latency) {
-		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Unknown FSB/MEM found, disable CxSR\n");
 		intel_set_memory_cxsr(dev_priv, false);
 		return;
 	}
@@ -850,25 +880,25 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
 	crtc = single_enabled_crtc(dev_priv);
 	if (crtc) {
 		const struct drm_display_mode *adjusted_mode =
-			&crtc->config->base.adjusted_mode;
+			&crtc->config->hw.adjusted_mode;
 		const struct drm_framebuffer *fb =
 			crtc->base.primary->state->fb;
 		int cpp = fb->format->cpp[0];
 		int clock = adjusted_mode->crtc_clock;
 
 		/* Display SR */
-		wm = intel_calculate_wm(clock, &pineview_display_wm,
-					pineview_display_wm.fifo_size,
+		wm = intel_calculate_wm(clock, &pnv_display_wm,
+					pnv_display_wm.fifo_size,
 					cpp, latency->display_sr);
 		reg = I915_READ(DSPFW1);
 		reg &= ~DSPFW_SR_MASK;
 		reg |= FW_WM(wm, SR);
 		I915_WRITE(DSPFW1, reg);
-		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+		drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
 
 		/* cursor SR */
-		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
-					pineview_display_wm.fifo_size,
+		wm = intel_calculate_wm(clock, &pnv_cursor_wm,
+					pnv_display_wm.fifo_size,
 					4, latency->cursor_sr);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -876,8 +906,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
 		I915_WRITE(DSPFW3, reg);
 
 		/* Display HPLL off SR */
-		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
-					pineview_display_hplloff_wm.fifo_size,
+		wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
+					pnv_display_hplloff_wm.fifo_size,
 					cpp, latency->display_hpll_disable);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_HPLL_SR_MASK;
@@ -885,14 +915,14 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
 		I915_WRITE(DSPFW3, reg);
 
 		/* cursor HPLL off SR */
-		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
-					pineview_display_hplloff_wm.fifo_size,
+		wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
+					pnv_display_hplloff_wm.fifo_size,
 					4, latency->cursor_hpll_disable);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
 		reg |= FW_WM(wm, HPLL_CURSOR);
 		I915_WRITE(DSPFW3, reg);
-		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+		drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
 
 		intel_set_memory_cxsr(dev_priv, true);
 	} else {
@@ -1083,10 +1113,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
 			  const struct intel_plane_state *plane_state,
 			  int level)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
 	unsigned int clock, htotal, cpp, width, wm;
 
@@ -1096,7 +1126,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = plane_state->base.fb->format->cpp[0];
+	cpp = plane_state->hw.fb->format->cpp[0];
 
 	/*
 	 * Not 100% sure which way ELK should go here as the
@@ -1116,7 +1146,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
 	clock = adjusted_mode->crtc_clock;
 	htotal = adjusted_mode->crtc_htotal;
 
-	width = drm_rect_width(&plane_state->base.dst);
+	width = drm_rect_width(&plane_state->uapi.dst);
 
 	if (plane->id == PLANE_CURSOR) {
 		wm = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1143,7 +1173,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
 				 int level, enum plane_id plane_id, u16 value)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	bool dirty = false;
 
 	for (; level < intel_wm_num_levels(dev_priv); level++) {
@@ -1159,7 +1189,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
 			       int level, u16 value)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	bool dirty = false;
 
 	/* NORMAL level doesn't have an FBC watermark */
@@ -1182,7 +1212,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
 				     const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
 	enum plane_id plane_id = plane->id;
 	bool dirty = false;
@@ -1235,16 +1266,18 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
 
  out:
 	if (dirty) {
-		DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
-			      plane->base.name,
-			      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
-			      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
-			      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
+		drm_dbg_kms(&dev_priv->drm,
+			    "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
+			    plane->base.name,
+			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
+			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
+			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
 
 		if (plane_id == PLANE_PRIMARY)
-			DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
-				      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
-				      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
+			drm_dbg_kms(&dev_priv->drm,
+				    "FBC watermarks: SR=%d, HPLL=%d\n",
+				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
+				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
 	}
 
 	return dirty;
@@ -1261,7 +1294,7 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
 				     int level)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	if (level > dev_priv->wm.max_level)
 		return false;
@@ -1299,9 +1332,9 @@ static void g4x_invalidate_wms(struct intel_crtc *crtc,
 
 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(crtc_state->base.state);
+		to_intel_atomic_state(crtc_state->uapi.state);
 	struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
 	int num_active_planes = hweight8(crtc_state->active_planes &
 					 ~BIT(PLANE_CURSOR));
@@ -1316,8 +1349,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 	for_each_oldnew_intel_plane_in_state(state, plane,
 					     old_plane_state,
 					     new_plane_state, i) {
-		if (new_plane_state->base.crtc != &crtc->base &&
-		    old_plane_state->base.crtc != &crtc->base)
+		if (new_plane_state->hw.crtc != &crtc->base &&
+		    old_plane_state->hw.crtc != &crtc->base)
 			continue;
 
 		if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
@@ -1388,17 +1421,17 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 
 static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
 	const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
 	struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(new_crtc_state->base.state);
+		to_intel_atomic_state(new_crtc_state->uapi.state);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(intel_state, crtc);
 	const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
 	enum plane_id plane_id;
 
-	if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+	if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
 		*intermediate = *optimal;
 
 		intermediate->cxsr = false;
@@ -1528,10 +1561,11 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
 }
 
 static void g4x_initial_watermarks(struct intel_atomic_state *state,
-				   struct intel_crtc_state *crtc_state)
+				   struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
 	crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
@@ -1540,10 +1574,11 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state,
 }
 
 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
-				    struct intel_crtc_state *crtc_state)
+				    struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 
 	if (!crtc_state->wm.need_postvbl_update)
 		return;
@@ -1589,10 +1624,10 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
 				const struct intel_plane_state *plane_state,
 				int level)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	unsigned int clock, htotal, cpp, width, wm;
 
 	if (dev_priv->wm.pri_latency[level] == 0)
@@ -1601,7 +1636,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = plane_state->base.fb->format->cpp[0];
+	cpp = plane_state->hw.fb->format->cpp[0];
 	clock = adjusted_mode->crtc_clock;
 	htotal = adjusted_mode->crtc_htotal;
 	width = crtc_state->pipe_src_w;
@@ -1630,7 +1665,7 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
 
 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	const struct g4x_pipe_wm *raw =
 		&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
 	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1742,7 +1777,7 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
 				 int level, enum plane_id plane_id, u16 value)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	int num_levels = intel_wm_num_levels(dev_priv);
 	bool dirty = false;
 
@@ -1759,7 +1794,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
 				     const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	enum plane_id plane_id = plane->id;
 	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
 	int level;
@@ -1787,11 +1823,12 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
 
 out:
 	if (dirty)
-		DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
-			      plane->base.name,
-			      crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
-			      crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
-			      crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
+		drm_dbg_kms(&dev_priv->drm,
+			    "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
+			    plane->base.name,
+			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
+			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
+			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
 
 	return dirty;
 }
@@ -1817,16 +1854,16 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
 
 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_atomic_state *state =
-		to_intel_atomic_state(crtc_state->base.state);
+		to_intel_atomic_state(crtc_state->uapi.state);
 	struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
 	const struct vlv_fifo_state *fifo_state =
 		&crtc_state->wm.vlv.fifo_state;
 	int num_active_planes = hweight8(crtc_state->active_planes &
 					 ~BIT(PLANE_CURSOR));
-	bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
+	bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
 	const struct intel_plane_state *old_plane_state;
 	const struct intel_plane_state *new_plane_state;
 	struct intel_plane *plane;
@@ -1837,8 +1874,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 	for_each_oldnew_intel_plane_in_state(state, plane,
 					     old_plane_state,
 					     new_plane_state, i) {
-		if (new_plane_state->base.crtc != &crtc->base &&
-		    old_plane_state->base.crtc != &crtc->base)
+		if (new_plane_state->hw.crtc != &crtc->base &&
+		    old_plane_state->hw.crtc != &crtc->base)
 			continue;
 
 		if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
@@ -1923,11 +1960,12 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
 
 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
-				   struct intel_crtc_state *crtc_state)
+				   struct intel_crtc *crtc)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 	const struct vlv_fifo_state *fifo_state =
 		&crtc_state->wm.vlv.fifo_state;
 	int sprite0_start, sprite1_start, fifo_size;
@@ -2021,17 +2059,17 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 
 static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
 	const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
 	struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(new_crtc_state->base.state);
+		to_intel_atomic_state(new_crtc_state->uapi.state);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(intel_state, crtc);
 	const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
 	int level;
 
-	if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+	if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
 		*intermediate = *optimal;
 
 		intermediate->cxsr = false;
@@ -2147,10 +2185,11 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
 }
 
 static void vlv_initial_watermarks(struct intel_atomic_state *state,
-				   struct intel_crtc_state *crtc_state)
+				   struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
 	crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
@@ -2159,10 +2198,11 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state,
 }
 
 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
-				    struct intel_crtc_state *crtc_state)
+				    struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 
 	if (!crtc_state->wm.need_postvbl_update)
 		return;
@@ -2187,7 +2227,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 12000;
 		const struct drm_display_mode *adjusted_mode =
-			&crtc->config->base.adjusted_mode;
+			&crtc->config->hw.adjusted_mode;
 		const struct drm_framebuffer *fb =
 			crtc->base.primary->state->fb;
 		int clock = adjusted_mode->crtc_clock;
@@ -2203,8 +2243,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
 		if (srwm < 0)
 			srwm = 1;
 		srwm &= 0x1ff;
-		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
-			      entries, srwm);
+		drm_dbg_kms(&dev_priv->drm,
+			    "self-refresh entries: %d, wm: %d\n",
+			    entries, srwm);
 
 		entries = intel_wm_method2(clock, htotal,
 					   crtc->base.cursor->state->crtc_w, 4,
@@ -2217,8 +2258,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
 		if (cursor_sr > i965_cursor_wm_info.max_wm)
 			cursor_sr = i965_cursor_wm_info.max_wm;
 
-		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
-			      "cursor %d\n", srwm, cursor_sr);
+		drm_dbg_kms(&dev_priv->drm,
+			    "self-refresh watermark: display plane %d "
+			    "cursor %d\n", srwm, cursor_sr);
 
 		cxsr_enabled = true;
 	} else {
@@ -2227,8 +2269,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
 		intel_set_memory_cxsr(dev_priv, false);
 	}
 
-	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
-		      srwm);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+		    srwm);
 
 	/* 965 has limitations... */
 	I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
@@ -2268,7 +2311,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
 	if (intel_crtc_active(crtc)) {
 		const struct drm_display_mode *adjusted_mode =
-			&crtc->config->base.adjusted_mode;
+			&crtc->config->hw.adjusted_mode;
 		const struct drm_framebuffer *fb =
 			crtc->base.primary->state->fb;
 		int cpp;
@@ -2295,7 +2338,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
 	if (intel_crtc_active(crtc)) {
 		const struct drm_display_mode *adjusted_mode =
-			&crtc->config->base.adjusted_mode;
+			&crtc->config->hw.adjusted_mode;
 		const struct drm_framebuffer *fb =
 			crtc->base.primary->state->fb;
 		int cpp;
@@ -2318,7 +2361,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 			planeb_wm = wm_info->max_wm;
 	}
 
-	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+	drm_dbg_kms(&dev_priv->drm,
+		    "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
 	if (IS_I915GM(dev_priv) && enabled) {
 		struct drm_i915_gem_object *obj;
@@ -2343,7 +2387,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 6000;
 		const struct drm_display_mode *adjusted_mode =
-			&enabled->config->base.adjusted_mode;
+			&enabled->config->hw.adjusted_mode;
 		const struct drm_framebuffer *fb =
 			enabled->base.primary->state->fb;
 		int clock = adjusted_mode->crtc_clock;
@@ -2360,7 +2404,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 		entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
 					   sr_latency_ns / 100);
 		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
-		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+		drm_dbg_kms(&dev_priv->drm,
+			    "self-refresh entries: %d\n", entries);
 		srwm = wm_info->fifo_size - entries;
 		if (srwm < 0)
 			srwm = 1;
@@ -2372,8 +2417,9 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
 	}
 
-	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
-		      planea_wm, planeb_wm, cwm, srwm);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+		     planea_wm, planeb_wm, cwm, srwm);
 
 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
 	fwater_hi = (cwm & 0x1f);
@@ -2401,7 +2447,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
 	if (crtc == NULL)
 		return;
 
-	adjusted_mode = &crtc->config->base.adjusted_mode;
+	adjusted_mode = &crtc->config->hw.adjusted_mode;
 	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
 				       &i845_wm_info,
 				       dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
@@ -2409,7 +2455,8 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
 	fwater_lo |= (3<<8) | planea_wm;
 
-	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Setting FIFO watermarks - A: %d\n", planea_wm);
 
 	I915_WRITE(FW_BLC, fwater_lo);
 }
@@ -2483,7 +2530,7 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = plane_state->base.fb->format->cpp[0];
+	cpp = plane_state->hw.fb->format->cpp[0];
 
 	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
 
@@ -2491,8 +2538,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
 		return method1;
 
 	method2 = ilk_wm_method2(crtc_state->pixel_rate,
-				 crtc_state->base.adjusted_mode.crtc_htotal,
-				 drm_rect_width(&plane_state->base.dst),
+				 crtc_state->hw.adjusted_mode.crtc_htotal,
+				 drm_rect_width(&plane_state->uapi.dst),
 				 cpp, mem_value);
 
 	return min(method1, method2);
@@ -2515,12 +2562,12 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = plane_state->base.fb->format->cpp[0];
+	cpp = plane_state->hw.fb->format->cpp[0];
 
 	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
 	method2 = ilk_wm_method2(crtc_state->pixel_rate,
-				 crtc_state->base.adjusted_mode.crtc_htotal,
-				 drm_rect_width(&plane_state->base.dst),
+				 crtc_state->hw.adjusted_mode.crtc_htotal,
+				 drm_rect_width(&plane_state->uapi.dst),
 				 cpp, mem_value);
 	return min(method1, method2);
 }
@@ -2541,11 +2588,11 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = plane_state->base.fb->format->cpp[0];
+	cpp = plane_state->hw.fb->format->cpp[0];
 
 	return ilk_wm_method2(crtc_state->pixel_rate,
-			      crtc_state->base.adjusted_mode.crtc_htotal,
-			      drm_rect_width(&plane_state->base.dst),
+			      crtc_state->hw.adjusted_mode.crtc_htotal,
+			      drm_rect_width(&plane_state->uapi.dst),
 			      cpp, mem_value);
 }
 
@@ -2559,9 +2606,10 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = plane_state->base.fb->format->cpp[0];
+	cpp = plane_state->hw.fb->format->cpp[0];
 
-	return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp);
+	return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
+			  cpp);
 }
 
 static unsigned int
@@ -2766,12 +2814,12 @@ static u32
 hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
 {
 	const struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(crtc_state->base.state);
+		to_intel_atomic_state(crtc_state->uapi.state);
 	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
+		&crtc_state->hw.adjusted_mode;
 	u32 linetime, ips_linetime;
 
-	if (!crtc_state->base.active)
+	if (!crtc_state->hw.active)
 		return 0;
 	if (WARN_ON(adjusted_mode->crtc_clock == 0))
 		return 0;
@@ -2807,7 +2855,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 					     &val, NULL);
 
 		if (ret) {
-			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+			drm_err(&dev_priv->drm,
+				"SKL Mailbox read error = %d\n", ret);
 			return;
 		}
 
@@ -2825,7 +2874,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 					     GEN9_PCODE_READ_MEM_LATENCY,
 					     &val, NULL);
 		if (ret) {
-			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+			drm_err(&dev_priv->drm,
+				"SKL Mailbox read error = %d\n", ret);
 			return;
 		}
 
@@ -2943,8 +2993,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 		unsigned int latency = wm[level];
 
 		if (latency == 0) {
-			DRM_DEBUG_KMS("%s WM%d latency not provided\n",
-				      name, level);
+			drm_dbg_kms(&dev_priv->drm,
+				    "%s WM%d latency not provided\n",
+				    name, level);
 			continue;
 		}
 
@@ -2957,9 +3008,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 		else if (level > 0)
 			latency *= 5;
 
-		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
-			      name, level, wm[level],
-			      latency / 10, latency % 10);
+		drm_dbg_kms(&dev_priv->drm,
+			    "%s WM%d latency %u (%u.%u usec)\n", name, level,
+			    wm[level], latency / 10, latency % 10);
 	}
 }
 
@@ -2993,7 +3044,8 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
 	if (!changed)
 		return;
 
-	DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "WM latency values increased to avoid potential underruns\n");
 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -3021,7 +3073,8 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
 	dev_priv->wm.spr_latency[3] = 0;
 	dev_priv->wm.cur_latency[3] = 0;
 
-	DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "LP3 watermarks disabled due to potential for lost interrupts\n");
 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -3071,7 +3124,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
 
 	/* At least LP0 must be valid */
 	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
-		DRM_DEBUG_KMS("LP0 watermark invalid\n");
+		drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
 		return false;
 	}
 
@@ -3081,11 +3134,9 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
 /* Compute new watermarks for the pipe */
 static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 {
-	struct drm_atomic_state *state = crtc_state->base.state;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_pipe_wm *pipe_wm;
-	struct drm_device *dev = state->dev;
-	const struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *plane;
 	const struct intel_plane_state *plane_state;
 	const struct intel_plane_state *pristate = NULL;
@@ -3105,12 +3156,12 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 			curstate = plane_state;
 	}
 
-	pipe_wm->pipe_enabled = crtc_state->base.active;
+	pipe_wm->pipe_enabled = crtc_state->hw.active;
 	if (sprstate) {
-		pipe_wm->sprites_enabled = sprstate->base.visible;
-		pipe_wm->sprites_scaled = sprstate->base.visible &&
-			(drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
-			 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
+		pipe_wm->sprites_enabled = sprstate->uapi.visible;
+		pipe_wm->sprites_scaled = sprstate->uapi.visible &&
+			(drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
+			 drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
 	}
 
 	usable_level = max_level;
@@ -3162,11 +3213,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
  */
 static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
 	struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(newstate->base.state);
+		to_intel_atomic_state(newstate->uapi.state);
 	const struct intel_crtc_state *oldstate =
 		intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
 	const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
@@ -3178,7 +3229,7 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
 	 * and after the vblank.
 	 */
 	*a = newstate->wm.ilk.optimal;
-	if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+	if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
 	    intel_state->skip_intermediate_wm)
 		return 0;
 
@@ -3588,10 +3639,8 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
 	dev_priv->wm.hw = *results;
 }
 
-bool ilk_disable_lp_wm(struct drm_device *dev)
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
@@ -3652,7 +3701,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
 			return;
 		}
 
-		DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n");
+		drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
 	} else if (IS_GEN(dev_priv, 11)) {
 		dev_priv->sagv_block_time_us = 10;
 		return;
@@ -3692,7 +3741,7 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
 	if (dev_priv->sagv_status == I915_SAGV_ENABLED)
 		return 0;
 
-	DRM_DEBUG_KMS("Enabling SAGV\n");
+	drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
 	ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
 				      GEN9_SAGV_ENABLE);
 
@@ -3703,11 +3752,11 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
 	 * don't actually have SAGV.
 	 */
 	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
-		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+		drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
 		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
 		return 0;
 	} else if (ret < 0) {
-		DRM_ERROR("Failed to enable SAGV\n");
+		drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
 		return ret;
 	}
 
@@ -3726,7 +3775,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
 	if (dev_priv->sagv_status == I915_SAGV_DISABLED)
 		return 0;
 
-	DRM_DEBUG_KMS("Disabling SAGV\n");
+	drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
 	/* bspec says to keep retrying for at least 1 ms */
 	ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
 				GEN9_SAGV_DISABLE,
@@ -3737,11 +3786,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
 	 * don't actually have SAGV.
 	 */
 	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
-		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+		drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
 		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
 		return 0;
 	} else if (ret < 0) {
-		DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
+		drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
 		return ret;
 	}
 
@@ -3780,7 +3829,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	crtc_state = to_intel_crtc_state(crtc->base.state);
 
-	if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		return false;
 
 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
@@ -3830,7 +3879,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) < 11)
 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
-	adjusted_mode = &crtc_state->base.adjusted_mode;
+	adjusted_mode = &crtc_state->hw.adjusted_mode;
 	total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
 
 	/*
@@ -3859,16 +3908,16 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 				   struct skl_ddb_entry *alloc, /* out */
 				   int *num_active /* out */)
 {
-	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_atomic_state *state = crtc_state->uapi.state;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct drm_crtc *for_crtc = crtc_state->base.crtc;
+	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
 	const struct intel_crtc *crtc;
 	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
 	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
 	u16 ddb_size;
 	u32 i;
 
-	if (WARN_ON(!state) || !crtc_state->base.active) {
+	if (WARN_ON(!state) || !crtc_state->hw.active) {
 		alloc->start = 0;
 		alloc->end = 0;
 		*num_active = hweight8(dev_priv->active_pipes);
@@ -3907,11 +3956,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	 */
 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
 		const struct drm_display_mode *adjusted_mode =
-			&crtc_state->base.adjusted_mode;
+			&crtc_state->hw.adjusted_mode;
 		enum pipe pipe = crtc->pipe;
 		int hdisplay, vdisplay;
 
-		if (!crtc_state->base.enable)
+		if (!crtc_state->hw.enable)
 			continue;
 
 		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
@@ -3942,7 +3991,7 @@ static unsigned int
 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
 		      int num_active)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
 	struct skl_wm_level wm = {};
 	int ret, min_ddb_alloc = 0;
@@ -4082,10 +4131,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
 	 *
 	 * n.b., src is 16.16 fixed point, dst is whole integer.
 	 */
-	src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	src_h = drm_rect_height(&plane_state->base.src) >> 16;
-	dst_w = drm_rect_width(&plane_state->base.dst);
-	dst_h = drm_rect_height(&plane_state->base.dst);
+	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+	dst_w = drm_rect_width(&plane_state->uapi.dst);
+	dst_h = drm_rect_height(&plane_state->uapi.dst);
 
 	fp_w_ratio = div_fixed16(src_w, dst_w);
 	fp_h_ratio = div_fixed16(src_h, dst_h);
@@ -4100,21 +4149,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
 			     const struct intel_plane_state *plane_state,
 			     int color_plane)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	u32 data_rate;
 	u32 width = 0, height = 0;
 	uint_fixed_16_16_t down_scale_amount;
 	u64 rate;
 
-	if (!plane_state->base.visible)
+	if (!plane_state->uapi.visible)
 		return 0;
 
 	if (plane->id == PLANE_CURSOR)
 		return 0;
 
 	if (color_plane == 1 &&
-	    !drm_format_info_is_yuv_semiplanar(fb->format))
+	    !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
 		return 0;
 
 	/*
@@ -4122,8 +4171,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
 	 * the 90/270 degree plane rotation cases (to match the
 	 * GTT mapping), hence no need to account for rotation here.
 	 */
-	width = drm_rect_width(&plane_state->base.src) >> 16;
-	height = drm_rect_height(&plane_state->base.src) >> 16;
+	width = drm_rect_width(&plane_state->uapi.src) >> 16;
+	height = drm_rect_height(&plane_state->uapi.src) >> 16;
 
 	/* UV plane does 1/2 pixel sub-sampling */
 	if (color_plane == 1) {
@@ -4146,7 +4195,7 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 				 u64 *plane_data_rate,
 				 u64 *uv_plane_data_rate)
 {
-	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_atomic_state *state = crtc_state->uapi.state;
 	struct intel_plane *plane;
 	const struct intel_plane_state *plane_state;
 	u64 total_data_rate = 0;
@@ -4181,7 +4230,7 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 	const struct intel_plane_state *plane_state;
 	u64 total_data_rate = 0;
 
-	if (WARN_ON(!crtc_state->base.state))
+	if (WARN_ON(!crtc_state->uapi.state))
 		return 0;
 
 	/* Calculate and cache data rate for each plane */
@@ -4225,8 +4274,8 @@ static int
 skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 		      struct skl_ddb_allocation *ddb /* out */)
 {
-	struct drm_atomic_state *state = crtc_state->base.state;
-	struct drm_crtc *crtc = crtc_state->base.crtc;
+	struct drm_atomic_state *state = crtc_state->uapi.state;
+	struct drm_crtc *crtc = crtc_state->uapi.crtc;
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
@@ -4248,7 +4297,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	if (WARN_ON(!state))
 		return 0;
 
-	if (!crtc_state->base.active) {
+	if (!crtc_state->hw.active) {
 		alloc->start = alloc->end = 0;
 		return 0;
 	}
@@ -4310,9 +4359,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	}
 
 	if (level < 0) {
-		DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
-		DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
-			      alloc_size);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Requested display configuration exceeds system DDB limitations");
+		drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
+			    blocks, alloc_size);
 		return -EINVAL;
 	}
 
@@ -4490,7 +4540,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
 	u32 crtc_htotal;
 	uint_fixed_16_16_t linetime_us;
 
-	if (!crtc_state->base.active)
+	if (!crtc_state->hw.active)
 		return u32_to_fixed16(0);
 
 	pixel_rate = crtc_state->pixel_rate;
@@ -4498,7 +4548,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
 	if (WARN_ON(pixel_rate == 0))
 		return u32_to_fixed16(0);
 
-	crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal;
+	crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
 	linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
 
 	return linetime_us;
@@ -4533,13 +4583,15 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 		      u32 plane_pixel_rate, struct skl_wm_params *wp,
 		      int color_plane)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 interm_pbpl;
 
 	/* only planar format has two planes */
-	if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) {
-		DRM_DEBUG_KMS("Non planar format have single plane\n");
+	if (color_plane == 1 &&
+	    !intel_format_info_is_yuv_semiplanar(format, modifier)) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "Non planar format have single plane\n");
 		return -EINVAL;
 	}
 
@@ -4550,7 +4602,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
 	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
-	wp->is_planar = drm_format_info_is_yuv_semiplanar(format);
+	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
 
 	wp->width = width;
 	if (color_plane == 1 && wp->is_planar)
@@ -4622,7 +4674,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
 			    const struct intel_plane_state *plane_state,
 			    struct skl_wm_params *wp, int color_plane)
 {
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	int width;
 
 	/*
@@ -4630,11 +4682,11 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
 	 * the 90/270 degree plane rotation cases (to match the
 	 * GTT mapping), hence no need to account for rotation here.
 	 */
-	width = drm_rect_width(&plane_state->base.src) >> 16;
+	width = drm_rect_width(&plane_state->uapi.src) >> 16;
 
 	return skl_compute_wm_params(crtc_state, width,
 				     fb->format, fb->modifier,
-				     plane_state->base.rotation,
+				     plane_state->hw.rotation,
 				     skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
 				     wp, color_plane);
 }
@@ -4654,7 +4706,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
 				 const struct skl_wm_level *result_prev,
 				 struct skl_wm_level *result /* out */)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	u32 latency = dev_priv->wm.skl_latency[level];
 	uint_fixed_16_16_t method1, method2;
 	uint_fixed_16_16_t selected_result;
@@ -4680,14 +4732,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
 	method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
 				 wp->cpp, latency, wp->dbuf_block_size);
 	method2 = skl_wm_method2(wp->plane_pixel_rate,
-				 crtc_state->base.adjusted_mode.crtc_htotal,
+				 crtc_state->hw.adjusted_mode.crtc_htotal,
 				 latency,
 				 wp->plane_blocks_per_line);
 
 	if (wp->y_tiled) {
 		selected_result = max_fixed16(method2, wp->y_tile_minimum);
 	} else {
-		if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal /
+		if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
 		     wp->dbuf_block_size < 1) &&
 		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
 			selected_result = method2;
@@ -4778,7 +4830,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
 		      const struct skl_wm_params *wm_params,
 		      struct skl_wm_level *levels)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
 	struct skl_wm_level *result_prev = &levels[0];
 
@@ -4795,7 +4847,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
 static u32
 skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_atomic_state *state = crtc_state->uapi.state;
 	struct drm_i915_private *dev_priv = to_i915(state->dev);
 	uint_fixed_16_16_t linetime_us;
 	u32 linetime_wm;
@@ -4814,7 +4866,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
 				      const struct skl_wm_params *wp,
 				      struct skl_plane_wm *wm)
 {
-	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_device *dev = crtc_state->uapi.crtc->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
 	u16 trans_min, trans_y_tile_min;
 	const u16 trans_amount = 10; /* This is configurable amount */
@@ -4912,8 +4964,8 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
 			      const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
+	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	enum plane_id plane_id = plane->id;
 	int ret;
 
@@ -4938,7 +4990,7 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
 			      const struct intel_plane_state *plane_state)
 {
-	enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+	enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
 	int ret;
 
 	/* Watermarks calculated in master */
@@ -4946,7 +4998,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
 		return 0;
 
 	if (plane_state->planar_linked_plane) {
-		const struct drm_framebuffer *fb = plane_state->base.fb;
+		const struct drm_framebuffer *fb = plane_state->hw.fb;
 		enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
 
 		WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
@@ -4974,7 +5026,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
 
 static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
 	struct intel_plane *plane;
 	const struct intel_plane_state *plane_state;
@@ -5151,8 +5203,8 @@ static int
 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
 			    struct intel_crtc_state *new_crtc_state)
 {
-	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_plane *plane;
 
@@ -5218,7 +5270,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
 	struct intel_crtc *crtc;
 	int i;
 
-	if ((drm_debug & DRM_UT_KMS) == 0)
+	if (!drm_debug_enabled(DRM_UT_KMS))
 		return;
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -5238,10 +5290,11 @@ skl_print_wm_changes(struct intel_atomic_state *state)
 			if (skl_ddb_entry_equal(old, new))
 				continue;
 
-			DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
-				      plane->base.base.id, plane->base.name,
-				      old->start, old->end, new->start, new->end,
-				      skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+			drm_dbg_kms(&dev_priv->drm,
+				    "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+				    plane->base.base.id, plane->base.name,
+				    old->start, old->end, new->start, new->end,
+				    skl_ddb_entry_size(old), skl_ddb_entry_size(new));
 		}
 
 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
@@ -5254,70 +5307,74 @@ skl_print_wm_changes(struct intel_atomic_state *state)
 			if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
 				continue;
 
-			DRM_DEBUG_KMS("[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
-				      " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
-				      plane->base.base.id, plane->base.name,
-				      enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
-				      enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
-				      enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
-				      enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
-				      enast(old_wm->trans_wm.plane_en),
-				      enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
-				      enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
-				      enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
-				      enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
-				      enast(new_wm->trans_wm.plane_en));
+			drm_dbg_kms(&dev_priv->drm,
+				    "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
+				    " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+				    plane->base.base.id, plane->base.name,
+				    enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
+				    enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
+				    enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
+				    enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
+				    enast(old_wm->trans_wm.plane_en),
+				    enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
+				    enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
+				    enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
+				    enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
+				    enast(new_wm->trans_wm.plane_en));
 
-			DRM_DEBUG_KMS("[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+			drm_dbg_kms(&dev_priv->drm,
+				    "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
 				      " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
-				      plane->base.base.id, plane->base.name,
-				      enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
-				      enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
-				      enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
-				      enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
-				      enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
-				      enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
-				      enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
-				      enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
-				      enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+				    plane->base.base.id, plane->base.name,
+				    enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
+				    enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
+				    enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
+				    enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
+				    enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
+				    enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
+				    enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
+				    enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
+				    enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
 
-				      enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
-				      enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
-				      enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
-				      enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
-				      enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
-				      enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
-				      enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
-				      enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
-				      enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+				    enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
+				    enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
+				    enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
+				    enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
+				    enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
+				    enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
+				    enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
+				    enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
+				    enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
 
-			DRM_DEBUG_KMS("[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
-				      " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
-				      plane->base.base.id, plane->base.name,
-				      old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
-				      old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
-				      old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
-				      old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
-				      old_wm->trans_wm.plane_res_b,
-				      new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
-				      new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
-				      new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
-				      new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
-				      new_wm->trans_wm.plane_res_b);
+			drm_dbg_kms(&dev_priv->drm,
+				    "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+				    plane->base.base.id, plane->base.name,
+				    old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
+				    old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
+				    old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
+				    old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
+				    old_wm->trans_wm.plane_res_b,
+				    new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
+				    new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
+				    new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
+				    new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
+				    new_wm->trans_wm.plane_res_b);
 
-			DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
-				      " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
-				      plane->base.base.id, plane->base.name,
-				      old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
-				      old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
-				      old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
-				      old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
-				      old_wm->trans_wm.min_ddb_alloc,
-				      new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
-				      new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
-				      new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
-				      new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
-				      new_wm->trans_wm.min_ddb_alloc);
+			drm_dbg_kms(&dev_priv->drm,
+				    "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+				    plane->base.base.id, plane->base.name,
+				    old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+				    old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+				    old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+				    old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+				    old_wm->trans_wm.min_ddb_alloc,
+				    new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+				    new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+				    new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+				    new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+				    new_wm->trans_wm.min_ddb_alloc);
 		}
 	}
 }
@@ -5356,7 +5413,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
 		if (ret)
 			return ret;
 
-		state->active_pipe_changes = ~0;
+		state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
 
 		/*
 		 * We usually only initialize state->active_pipes if we
@@ -5382,7 +5439,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
 	 * to grab the lock on *all* CRTC's.
 	 */
 	if (state->active_pipe_changes || state->modeset) {
-		state->wm_results.dirty_pipes = ~0;
+		state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
 
 		ret = intel_add_all_pipes(state);
 		if (ret)
@@ -5436,7 +5493,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
 		 * power well the hardware state will go out of sync
 		 * with the software state.
 		 */
-		if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+		if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
 		    skl_plane_wm_equals(dev_priv,
 					&old_crtc_state->wm.skl.optimal.planes[plane_id],
 					&new_crtc_state->wm.skl.optimal.planes[plane_id]))
@@ -5500,11 +5557,12 @@ skl_compute_wm(struct intel_atomic_state *state)
 }
 
 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
-				      struct intel_crtc_state *crtc_state)
+				      struct intel_crtc *crtc)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
 	enum pipe pipe = crtc->pipe;
 
 	if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
@@ -5514,10 +5572,11 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
 }
 
 static void skl_initial_wm(struct intel_atomic_state *state,
-			   struct intel_crtc_state *crtc_state)
+			   struct intel_crtc *crtc)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 	struct skl_ddb_values *results = &state->wm_results;
 
 	if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
@@ -5525,8 +5584,8 @@ static void skl_initial_wm(struct intel_atomic_state *state,
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
 
-	if (crtc_state->base.active_changed)
-		skl_atomic_update_crtc_wm(state, crtc_state);
+	if (crtc_state->uapi.active_changed)
+		skl_atomic_update_crtc_wm(state, crtc);
 
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -5582,10 +5641,11 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
 }
 
 static void ilk_initial_watermarks(struct intel_atomic_state *state,
-				   struct intel_crtc_state *crtc_state)
+				   struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
 	crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
@@ -5594,10 +5654,11 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state,
 }
 
 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
-				    struct intel_crtc_state *crtc_state)
+				    struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 
 	if (!crtc_state->wm.need_postvbl_update)
 		return;
@@ -5905,19 +5966,22 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
 		crtc_state->wm.g4x.optimal = *active;
 		crtc_state->wm.g4x.intermediate = *active;
 
-		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
-			      pipe_name(pipe),
-			      wm->pipe[pipe].plane[PLANE_PRIMARY],
-			      wm->pipe[pipe].plane[PLANE_CURSOR],
-			      wm->pipe[pipe].plane[PLANE_SPRITE0]);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
+			    pipe_name(pipe),
+			    wm->pipe[pipe].plane[PLANE_PRIMARY],
+			    wm->pipe[pipe].plane[PLANE_CURSOR],
+			    wm->pipe[pipe].plane[PLANE_SPRITE0]);
 	}
 
-	DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
-		      wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
-	DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
-		      wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
-	DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
-		      yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
+	drm_dbg_kms(&dev_priv->drm,
+		    "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
+		    wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
+		    wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
+	drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+		    yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
 }
 
 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -5938,7 +6002,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
 		enum plane_id plane_id = plane->id;
 		int level;
 
-		if (plane_state->base.visible)
+		if (plane_state->uapi.visible)
 			continue;
 
 		for (level = 0; level < 3; level++) {
@@ -6009,8 +6073,9 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
 
 		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
 			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
-			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
-				      "assuming DDR DVFS is disabled\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Punit not acking DDR DVFS request, "
+				    "assuming DDR DVFS is disabled\n");
 			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
 		} else {
 			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
@@ -6061,16 +6126,18 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
 		crtc_state->wm.vlv.optimal = *active;
 		crtc_state->wm.vlv.intermediate = *active;
 
-		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
-			      pipe_name(pipe),
-			      wm->pipe[pipe].plane[PLANE_PRIMARY],
-			      wm->pipe[pipe].plane[PLANE_CURSOR],
-			      wm->pipe[pipe].plane[PLANE_SPRITE0],
-			      wm->pipe[pipe].plane[PLANE_SPRITE1]);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+			    pipe_name(pipe),
+			    wm->pipe[pipe].plane[PLANE_PRIMARY],
+			    wm->pipe[pipe].plane[PLANE_CURSOR],
+			    wm->pipe[pipe].plane[PLANE_SPRITE0],
+			    wm->pipe[pipe].plane[PLANE_SPRITE1]);
 	}
 
-	DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
-		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+		    wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
 }
 
 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -6093,7 +6160,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
 		enum plane_id plane_id = plane->id;
 		int level;
 
-		if (plane_state->base.visible)
+		if (plane_state->uapi.visible)
 			continue;
 
 		for (level = 0; level < wm_state->num_levels; level++) {
@@ -6369,7 +6436,6 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
 		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
 		if (dev_priv->vbt.fdi_rx_polarity_inverted)
 			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
 		I915_WRITE(TRANS_CHICKEN2(pipe), val);
@@ -6387,8 +6453,9 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
 
 	tmp = I915_READ(MCH_SSKPD);
 	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
-		DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
-			      tmp);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+			    tmp);
 }
 
 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6583,6 +6650,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
 	u32 vd_pg_enable = 0;
 	unsigned int i;
 
+	/* Wa_1408615072:tgl */
+	intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+			 0, VSUNIT_CLKGATE_DIS_TGL);
+
 	/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
 	for (i = 0; i < I915_MAX_VCS; i++) {
 		if (HAS_ENGINE(dev_priv, _VCS(i)))
@@ -7099,7 +7170,8 @@ void intel_suspend_hw(struct drm_i915_private *dev_priv)
 
 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "No clock gating settings or workarounds applied.\n");
 }
 
 /**
@@ -7166,9 +7238,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 {
 	/* For cxsr */
 	if (IS_PINEVIEW(dev_priv))
-		i915_pineview_get_mem_freq(dev_priv);
+		pnv_get_mem_freq(dev_priv);
 	else if (IS_GEN(dev_priv, 5))
-		i915_ironlake_get_mem_freq(dev_priv);
+		ilk_get_mem_freq(dev_priv);
 
 	if (intel_has_sagv(dev_priv))
 		skl_setup_sagv_block_time(dev_priv);
@@ -7194,8 +7266,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 			dev_priv->display.optimize_watermarks =
 				ilk_optimize_watermarks;
 		} else {
-			DRM_DEBUG_KMS("Failed to read display plane latency. "
-				      "Disable CxSR\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Failed to read display plane latency. "
+				    "Disable CxSR\n");
 		}
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		vlv_setup_wm_latency(dev_priv);
@@ -7215,7 +7288,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 					    dev_priv->is_ddr3,
 					    dev_priv->fsb_freq,
 					    dev_priv->mem_freq)) {
-			DRM_INFO("failed to find known CxSR latency "
+			drm_info(&dev_priv->drm,
+				 "failed to find known CxSR latency "
 				 "(found ddr%s fsb freq %d, mem freq %d), "
 				 "disabling CxSR\n",
 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
@@ -7224,7 +7298,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 			intel_set_memory_cxsr(dev_priv, false);
 			dev_priv->display.update_wm = NULL;
 		} else
-			dev_priv->display.update_wm = pineview_update_wm;
+			dev_priv->display.update_wm = pnv_update_wm;
 	} else if (IS_GEN(dev_priv, 4)) {
 		dev_priv->display.update_wm = i965_update_wm;
 	} else if (IS_GEN(dev_priv, 3)) {
@@ -7239,7 +7313,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
 		}
 	} else {
-		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
+		drm_err(&dev_priv->drm,
+			"unexpected fall-through in %s\n", __func__);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index b579c72..c06c6a8 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -54,7 +54,7 @@ void skl_write_plane_wm(struct intel_plane *plane,
 			const struct intel_crtc_state *crtc_state);
 void skl_write_cursor_wm(struct intel_plane *plane,
 			 const struct intel_crtc_state *crtc_state);
-bool ilk_disable_lp_wm(struct drm_device *dev);
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
 void intel_init_ipc(struct drm_i915_private *dev_priv);
 void intel_enable_ipc(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index 5831180..14b59b8 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -51,8 +51,10 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
 
 static void release_fake_lmem_bar(struct intel_memory_region *mem)
 {
-	if (drm_mm_node_allocated(&mem->fake_mappable))
-		drm_mm_remove_node(&mem->fake_mappable);
+	if (!drm_mm_node_allocated(&mem->fake_mappable))
+		return;
+
+	drm_mm_remove_node(&mem->fake_mappable);
 
 	dma_unmap_resource(&mem->i915->drm.pdev->dev,
 			   mem->remap_addr,
@@ -88,6 +90,8 @@ region_lmem_init(struct intel_memory_region *mem)
 	if (ret)
 		io_mapping_fini(&mem->iomap);
 
+	intel_memory_region_set_name(mem, "local");
+
 	return ret;
 }
 
@@ -121,10 +125,12 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
 					 io_start,
 					 &intel_region_lmem_ops);
 	if (!IS_ERR(mem)) {
-		DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
-		DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
-			 (u64)mem->io_start);
-		DRM_INFO("Intel graphics fake LMEM size: %llx\n",
+		drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
+			 &mem->region);
+		drm_info(&i915->drm,
+			 "Intel graphics fake LMEM IO start: %llx\n",
+			(u64)mem->io_start);
+		drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
 			 (u64)resource_size(&mem->region));
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index e06b35b..cbfb717 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -105,8 +105,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
 	if (intel_wait_for_register(uncore,
 				    VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
 				    5)) {
-		DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
-				 is_read ? "read" : "write");
+		drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
+			is_read ? "read" : "write");
 		return -EAGAIN;
 	}
 
@@ -129,8 +129,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
 			*val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
 		err = 0;
 	} else {
-		DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
-				 is_read ? "read" : "write");
+		drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
+			is_read ? "read" : "write");
 		err = -ETIMEDOUT;
 	}
 
@@ -283,7 +283,8 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
 	if (intel_wait_for_register_fw(uncore,
 				       SBI_CTL_STAT, SBI_BUSY, 0,
 				       100)) {
-		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		drm_err(&i915->drm,
+			"timeout waiting for SBI to become ready\n");
 		return -EBUSY;
 	}
 
@@ -301,12 +302,13 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
 	if (__intel_wait_for_register_fw(uncore,
 					 SBI_CTL_STAT, SBI_BUSY, 0,
 					 100, 100, &cmd)) {
-		DRM_ERROR("timeout waiting for SBI to complete read\n");
+		drm_err(&i915->drm,
+			"timeout waiting for SBI to complete read\n");
 		return -ETIMEDOUT;
 	}
 
 	if (cmd & SBI_RESPONSE_FAIL) {
-		DRM_ERROR("error during SBI read of reg %x\n", reg);
+		drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
 		return -ENXIO;
 	}
 
@@ -426,8 +428,9 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
 	mutex_unlock(&i915->sb_lock);
 
 	if (err) {
-		DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
-				 mbox, __builtin_return_address(0), err);
+		drm_dbg(&i915->drm,
+			"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+			mbox, __builtin_return_address(0), err);
 	}
 
 	return err;
@@ -447,8 +450,9 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
 	mutex_unlock(&i915->sb_lock);
 
 	if (err) {
-		DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
-				 val, mbox, __builtin_return_address(0), err);
+		drm_dbg(&i915->drm,
+			"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+			val, mbox, __builtin_return_address(0), err);
 	}
 
 	return err;
@@ -519,7 +523,8 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
 	 * requests, and for any quirks of the PCODE firmware that delays
 	 * the request completion.
 	 */
-	DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+	drm_dbg_kms(&i915->drm,
+		    "PCODE timeout, retrying with preemption disabled\n");
 	WARN_ON_ONCE(timeout_base_ms > 3);
 	preempt_disable();
 	ret = wait_for_atomic(COND, 50);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 94a97bf..5f2cf6f 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -359,7 +359,8 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
 		if (wait_for_atomic((n = fifo_free_entries(uncore)) >
 				    GT_FIFO_NUM_RESERVED_ENTRIES,
 				    GT_FIFO_TIMEOUT_MS)) {
-			DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
+			drm_dbg(&uncore->i915->drm,
+				"GT_FIFO timeout, entries: %u\n", n);
 			return;
 		}
 	}
@@ -432,7 +433,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
 			break;
 
 		if (--retry_count == 0) {
-			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
+			drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
 			break;
 		}
 
@@ -490,7 +491,7 @@ gen6_check_for_fifo_debug(struct intel_uncore *uncore)
 	fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
 
 	if (unlikely(fifodbg)) {
-		DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
+		drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
 		__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
 	}
 
@@ -562,7 +563,7 @@ void intel_uncore_resume_early(struct intel_uncore *uncore)
 	unsigned int restore_forcewake;
 
 	if (intel_uncore_unclaimed_mmio(uncore))
-		DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
+		drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
 
 	if (!intel_uncore_has_forcewake(uncore))
 		return;
@@ -1595,8 +1596,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 		spin_unlock_irq(&uncore->lock);
 
 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
-			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
-			DRM_INFO("when using vblank-synced partial screen updates.\n");
+			drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
+			drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
 			fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 				       FORCEWAKE, FORCEWAKE_ACK);
@@ -1683,8 +1684,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
 		mmio_size = 2 * 1024 * 1024;
 	uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
 	if (uncore->regs == NULL) {
-		DRM_ERROR("failed to map registers\n");
-
+		drm_err(&i915->drm, "failed to map registers\n");
 		return -EIO;
 	}
 
@@ -1807,7 +1807,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
 
 	/* clear out unclaimed reg detection bit */
 	if (intel_uncore_unclaimed_mmio(uncore))
-		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
+		drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
 
 	return 0;
 
@@ -2072,9 +2072,10 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
 
 	if (unlikely(check_for_unclaimed_mmio(uncore))) {
 		if (!i915_modparams.mmio_debug) {
-			DRM_DEBUG("Unclaimed register detected, "
-				  "enabling oneshot unclaimed register reporting. "
-				  "Please use i915.mmio_debug=N for more information.\n");
+			drm_dbg(&uncore->i915->drm,
+				"Unclaimed register detected, "
+				"enabling oneshot unclaimed register reporting. "
+				"Please use i915.mmio_debug=N for more information.\n");
 			i915_modparams.mmio_debug++;
 		}
 		uncore->debug->unclaimed_mmio_check--;
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 59aa1b6..8fbf6f4 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -95,16 +95,17 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
 void __intel_wakeref_init(struct intel_wakeref *wf,
 			  struct intel_runtime_pm *rpm,
 			  const struct intel_wakeref_ops *ops,
-			  struct lock_class_key *key)
+			  struct intel_wakeref_lockclass *key)
 {
 	wf->rpm = rpm;
 	wf->ops = ops;
 
-	__mutex_init(&wf->mutex, "wakeref", key);
+	__mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
 	atomic_set(&wf->count, 0);
 	wf->wakeref = 0;
 
 	INIT_WORK(&wf->work, __intel_wakeref_put_work);
+	lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
 }
 
 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index da6e8fd..7d1e676 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -44,12 +44,17 @@ struct intel_wakeref {
 	struct work_struct work;
 };
 
+struct intel_wakeref_lockclass {
+	struct lock_class_key mutex;
+	struct lock_class_key work;
+};
+
 void __intel_wakeref_init(struct intel_wakeref *wf,
 			  struct intel_runtime_pm *rpm,
 			  const struct intel_wakeref_ops *ops,
-			  struct lock_class_key *key);
+			  struct intel_wakeref_lockclass *key);
 #define intel_wakeref_init(wf, rpm, ops) do {				\
-	static struct lock_class_key __key;				\
+	static struct intel_wakeref_lockclass __key;			\
 									\
 	__intel_wakeref_init((wf), (rpm), (ops), &__key);		\
 } while (0)
@@ -59,9 +64,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
 
 /**
  * intel_wakeref_get: Acquire the wakeref
- * @i915: the drm_i915_private device
  * @wf: the wakeref
- * @fn: callback for acquired the wakeref, called only on first acquire.
  *
  * Acquire a hold on the wakeref. The first user to do so, will acquire
  * the runtime pm wakeref and then call the @fn underneath the wakeref
@@ -76,6 +79,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
 static inline int
 intel_wakeref_get(struct intel_wakeref *wf)
 {
+	might_sleep();
 	if (unlikely(!atomic_inc_not_zero(&wf->count)))
 		return __intel_wakeref_get_first(wf);
 
@@ -83,6 +87,22 @@ intel_wakeref_get(struct intel_wakeref *wf)
 }
 
 /**
+ * __intel_wakeref_get: Acquire the wakeref, again
+ * @wf: the wakeref
+ *
+ * Increment the wakeref counter, only valid if it is already held by
+ * the caller.
+ *
+ * See intel_wakeref_get().
+ */
+static inline void
+__intel_wakeref_get(struct intel_wakeref *wf)
+{
+	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+	atomic_inc(&wf->count);
+}
+
+/**
  * intel_wakeref_get_if_in_use: Acquire the wakeref
  * @wf: the wakeref
  *
diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile
deleted file mode 100644
index df028e2..0000000
--- a/drivers/gpu/drm/i915/oa/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 260b0ee..ef572a0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -99,7 +99,7 @@ __live_active_setup(struct drm_i915_private *i915)
 	for_each_uabi_engine(engine, i915) {
 		struct i915_request *rq;
 
-		rq = i915_request_create(engine->kernel_context);
+		rq = intel_engine_create_kernel_request(engine);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			break;
@@ -155,7 +155,11 @@ static int live_active_wait(void *arg)
 
 	i915_active_wait(&active->base);
 	if (!READ_ONCE(active->retired)) {
+		struct drm_printer p = drm_err_printer(__func__);
+
 		pr_err("i915_active not retired after waiting!\n");
+		i915_active_print(&active->base, &p);
+
 		err = -EINVAL;
 	}
 
@@ -184,7 +188,11 @@ static int live_active_retire(void *arg)
 		err = -EIO;
 
 	if (!READ_ONCE(active->retired)) {
+		struct drm_printer p = drm_err_printer(__func__);
+
 		pr_err("i915_active not retired after flushing!\n");
+		i915_active_print(&active->base, &p);
+
 		err = -EINVAL;
 	}
 
@@ -250,3 +258,36 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
 		i915_active_release(ref);
 	}
 }
+
+static void spin_unlock_wait(spinlock_t *lock)
+{
+	spin_lock_irq(lock);
+	spin_unlock_irq(lock);
+}
+
+void i915_active_unlock_wait(struct i915_active *ref)
+{
+	if (i915_active_acquire_if_busy(ref)) {
+		struct active_node *it, *n;
+
+		rcu_read_lock();
+		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+			struct dma_fence *f;
+
+			/* Wait for all active callbacks */
+			f = rcu_dereference(it->base.fence);
+			if (f)
+				spin_unlock_wait(f->lock);
+		}
+		rcu_read_unlock();
+
+		i915_active_release(ref);
+	}
+
+	/* And wait for the retire callback */
+	spin_lock_irq(&ref->tree_lock);
+	spin_unlock_irq(&ref->tree_lock);
+
+	/* ... which may have been on a thread instead */
+	flush_work(&ref->work);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index d83f6bf..78f36fa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -9,6 +9,7 @@
 #include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
 
 #include "i915_selftest.h"
 
@@ -123,8 +124,6 @@ static void pm_resume(struct drm_i915_private *i915)
 	 * that runtime-pm just works.
 	 */
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		intel_gt_sanitize(&i915->gt, false);
-
 		i915_gem_restore_gtt_mappings(i915);
 		i915_gem_restore_fences(&i915->ggtt);
 
@@ -136,7 +135,7 @@ static int igt_gem_suspend(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx;
-	struct drm_file *file;
+	struct file *file;
 	int err;
 
 	file = mock_file(i915);
@@ -163,7 +162,7 @@ static int igt_gem_suspend(void *arg)
 
 	err = switch_to_context(ctx);
 out:
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
@@ -171,7 +170,7 @@ static int igt_gem_hibernate(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx;
-	struct drm_file *file;
+	struct file *file;
 	int err;
 
 	file = mock_file(i915);
@@ -198,7 +197,7 @@ static int igt_gem_hibernate(void *arg)
 
 	err = switch_to_context(ctx);
 out:
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 42e9481..06ef8851 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -198,8 +198,8 @@ static int igt_overcommit(void *arg)
 	quirk_add(obj, &objects);
 
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
-	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
-		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
+	if (vma != ERR_PTR(-ENOSPC)) {
+		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
 		err = -EINVAL;
 		goto cleanup;
 	}
@@ -466,7 +466,7 @@ static int igt_evict_contexts(void *arg)
 	/* Overfill the GGTT with context objects and so try to evict one. */
 	for_each_engine(engine, gt, id) {
 		struct i915_sw_fence fence;
-		struct drm_file *file;
+		struct file *file;
 
 		file = mock_file(i915);
 		if (IS_ERR(file)) {
@@ -515,7 +515,7 @@ static int igt_evict_contexts(void *arg)
 		pr_info("Submitted %lu contexts/requests on %s\n",
 			count, engine->name);
 
-		mock_file_free(i915, file);
+		fput(file);
 		if (err)
 			break;
 	}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3f7e80f..b342bef 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -34,6 +34,7 @@
 
 #include "mock_drm.h"
 #include "mock_gem_device.h"
+#include "mock_gtt.h"
 #include "igt_flush_test.h"
 
 static void cleanup_freed_objects(struct drm_i915_private *i915)
@@ -151,7 +152,7 @@ static int igt_ppgtt_alloc(void *arg)
 	if (!HAS_PPGTT(dev_priv))
 		return 0;
 
-	ppgtt = __ppgtt_create(dev_priv);
+	ppgtt = i915_ppgtt_create(&dev_priv->gt);
 	if (IS_ERR(ppgtt))
 		return PTR_ERR(ppgtt);
 
@@ -206,16 +207,17 @@ static int igt_ppgtt_alloc(void *arg)
 	return err;
 }
 
-static int lowlevel_hole(struct drm_i915_private *i915,
-			 struct i915_address_space *vm,
+static int lowlevel_hole(struct i915_address_space *vm,
 			 u64 hole_start, u64 hole_end,
 			 unsigned long end_time)
 {
 	I915_RND_STATE(seed_prng);
+	struct i915_vma *mock_vma;
 	unsigned int size;
-	struct i915_vma mock_vma;
 
-	memset(&mock_vma, 0, sizeof(struct i915_vma));
+	mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
+	if (!mock_vma)
+		return -ENOMEM;
 
 	/* Keep creating larger objects until one cannot fit into the hole */
 	for (size = 12; (hole_end - hole_start) >> size; size++) {
@@ -239,8 +241,10 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 			if (order)
 				break;
 		} while (count >>= 1);
-		if (!count)
+		if (!count) {
+			kfree(mock_vma);
 			return -ENOMEM;
+		}
 		GEM_BUG_ON(!order);
 
 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
@@ -252,7 +256,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 		 * memory. We expect to hit -ENOMEM.
 		 */
 
-		obj = fake_dma_object(i915, BIT_ULL(size));
+		obj = fake_dma_object(vm->i915, BIT_ULL(size));
 		if (IS_ERR(obj)) {
 			kfree(order);
 			break;
@@ -283,12 +287,12 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
 				break;
 
-			mock_vma.pages = obj->mm.pages;
-			mock_vma.node.size = BIT_ULL(size);
-			mock_vma.node.start = addr;
+			mock_vma->pages = obj->mm.pages;
+			mock_vma->node.size = BIT_ULL(size);
+			mock_vma->node.start = addr;
 
-			with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-				vm->insert_entries(vm, &mock_vma,
+			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
+				vm->insert_entries(vm, mock_vma,
 						   I915_CACHE_NONE, 0);
 		}
 		count = n;
@@ -299,7 +303,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 			intel_wakeref_t wakeref;
 
 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
-			with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
 				vm->clear_range(vm, addr, BIT_ULL(size));
 		}
 
@@ -308,9 +312,10 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 
 		kfree(order);
 
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
+	kfree(mock_vma);
 	return 0;
 }
 
@@ -335,8 +340,7 @@ static void close_object_list(struct list_head *objects,
 	}
 }
 
-static int fill_hole(struct drm_i915_private *i915,
-		     struct i915_address_space *vm,
+static int fill_hole(struct i915_address_space *vm,
 		     u64 hole_start, u64 hole_end,
 		     unsigned long end_time)
 {
@@ -369,7 +373,7 @@ static int fill_hole(struct drm_i915_private *i915,
 				{ }
 			}, *p;
 
-			obj = fake_dma_object(i915, full_size);
+			obj = fake_dma_object(vm->i915, full_size);
 			if (IS_ERR(obj))
 				break;
 
@@ -537,7 +541,7 @@ static int fill_hole(struct drm_i915_private *i915,
 		}
 
 		close_object_list(&objects, vm);
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
 	return 0;
@@ -547,8 +551,7 @@ static int fill_hole(struct drm_i915_private *i915,
 	return err;
 }
 
-static int walk_hole(struct drm_i915_private *i915,
-		     struct i915_address_space *vm,
+static int walk_hole(struct i915_address_space *vm,
 		     u64 hole_start, u64 hole_end,
 		     unsigned long end_time)
 {
@@ -570,7 +573,7 @@ static int walk_hole(struct drm_i915_private *i915,
 		u64 addr;
 		int err = 0;
 
-		obj = fake_dma_object(i915, size << PAGE_SHIFT);
+		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
 		if (IS_ERR(obj))
 			break;
 
@@ -625,14 +628,13 @@ static int walk_hole(struct drm_i915_private *i915,
 		if (err)
 			return err;
 
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
 	return 0;
 }
 
-static int pot_hole(struct drm_i915_private *i915,
-		    struct i915_address_space *vm,
+static int pot_hole(struct i915_address_space *vm,
 		    u64 hole_start, u64 hole_end,
 		    unsigned long end_time)
 {
@@ -646,7 +648,7 @@ static int pot_hole(struct drm_i915_private *i915,
 	if (i915_is_ggtt(vm))
 		flags |= PIN_GLOBAL;
 
-	obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
+	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
@@ -707,8 +709,7 @@ static int pot_hole(struct drm_i915_private *i915,
 	return err;
 }
 
-static int drunk_hole(struct drm_i915_private *i915,
-		      struct i915_address_space *vm,
+static int drunk_hole(struct i915_address_space *vm,
 		      u64 hole_start, u64 hole_end,
 		      unsigned long end_time)
 {
@@ -753,7 +754,7 @@ static int drunk_hole(struct drm_i915_private *i915,
 		 * memory. We expect to hit -ENOMEM.
 		 */
 
-		obj = fake_dma_object(i915, BIT_ULL(size));
+		obj = fake_dma_object(vm->i915, BIT_ULL(size));
 		if (IS_ERR(obj)) {
 			kfree(order);
 			break;
@@ -811,14 +812,13 @@ static int drunk_hole(struct drm_i915_private *i915,
 		if (err)
 			return err;
 
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
 	return 0;
 }
 
-static int __shrink_hole(struct drm_i915_private *i915,
-			 struct i915_address_space *vm,
+static int __shrink_hole(struct i915_address_space *vm,
 			 u64 hole_start, u64 hole_end,
 			 unsigned long end_time)
 {
@@ -835,7 +835,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
 		u64 size = BIT_ULL(order++);
 
 		size = min(size, hole_end - addr);
-		obj = fake_dma_object(i915, size);
+		obj = fake_dma_object(vm->i915, size);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			break;
@@ -889,12 +889,11 @@ static int __shrink_hole(struct drm_i915_private *i915,
 	}
 
 	close_object_list(&objects, vm);
-	cleanup_freed_objects(i915);
+	cleanup_freed_objects(vm->i915);
 	return err;
 }
 
-static int shrink_hole(struct drm_i915_private *i915,
-		       struct i915_address_space *vm,
+static int shrink_hole(struct i915_address_space *vm,
 		       u64 hole_start, u64 hole_end,
 		       unsigned long end_time)
 {
@@ -906,7 +905,7 @@ static int shrink_hole(struct drm_i915_private *i915,
 
 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
 		vm->fault_attr.interval = prime;
-		err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
+		err = __shrink_hole(vm, hole_start, hole_end, end_time);
 		if (err)
 			break;
 	}
@@ -916,8 +915,7 @@ static int shrink_hole(struct drm_i915_private *i915,
 	return err;
 }
 
-static int shrink_boom(struct drm_i915_private *i915,
-		       struct i915_address_space *vm,
+static int shrink_boom(struct i915_address_space *vm,
 		       u64 hole_start, u64 hole_end,
 		       unsigned long end_time)
 {
@@ -939,7 +937,7 @@ static int shrink_boom(struct drm_i915_private *i915,
 		unsigned int size = sizes[i];
 		struct i915_vma *vma;
 
-		purge = fake_dma_object(i915, size);
+		purge = fake_dma_object(vm->i915, size);
 		if (IS_ERR(purge))
 			return PTR_ERR(purge);
 
@@ -956,7 +954,7 @@ static int shrink_boom(struct drm_i915_private *i915,
 		/* Should now be ripe for purging */
 		i915_vma_unpin(vma);
 
-		explode = fake_dma_object(i915, size);
+		explode = fake_dma_object(vm->i915, size);
 		if (IS_ERR(explode)) {
 			err = PTR_ERR(explode);
 			goto err_purge;
@@ -982,7 +980,7 @@ static int shrink_boom(struct drm_i915_private *i915,
 		i915_gem_object_put(explode);
 
 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
 	return 0;
@@ -996,14 +994,13 @@ static int shrink_boom(struct drm_i915_private *i915,
 }
 
 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
-			  int (*func)(struct drm_i915_private *i915,
-				      struct i915_address_space *vm,
+			  int (*func)(struct i915_address_space *vm,
 				      u64 hole_start, u64 hole_end,
 				      unsigned long end_time))
 {
-	struct drm_file *file;
 	struct i915_ppgtt *ppgtt;
 	IGT_TIMEOUT(end_time);
+	struct file *file;
 	int err;
 
 	if (!HAS_FULL_PPGTT(dev_priv))
@@ -1013,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	ppgtt = i915_ppgtt_create(dev_priv);
+	ppgtt = i915_ppgtt_create(&dev_priv->gt);
 	if (IS_ERR(ppgtt)) {
 		err = PTR_ERR(ppgtt);
 		goto out_free;
@@ -1021,12 +1018,12 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
 	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
 
-	err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
+	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
 
 	i915_vm_put(&ppgtt->vm);
 
 out_free:
-	mock_file_free(dev_priv, file);
+	fput(file);
 	return err;
 }
 
@@ -1077,8 +1074,7 @@ static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
 }
 
 static int exercise_ggtt(struct drm_i915_private *i915,
-			 int (*func)(struct drm_i915_private *i915,
-				     struct i915_address_space *vm,
+			 int (*func)(struct i915_address_space *vm,
 				     u64 hole_start, u64 hole_end,
 				     unsigned long end_time))
 {
@@ -1100,7 +1096,7 @@ static int exercise_ggtt(struct drm_i915_private *i915,
 		if (hole_start >= hole_end)
 			continue;
 
-		err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
+		err = func(&ggtt->vm, hole_start, hole_end, end_time);
 		if (err)
 			break;
 
@@ -1161,11 +1157,13 @@ static int igt_ggtt_page(void *arg)
 		goto out_free;
 
 	memset(&tmp, 0, sizeof(tmp));
+	mutex_lock(&ggtt->vm.mutex);
 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
 					  count * PAGE_SIZE, 0,
 					  I915_COLOR_UNEVICTABLE,
 					  0, ggtt->mappable_end,
 					  DRM_MM_INSERT_LOW);
+	mutex_unlock(&ggtt->vm.mutex);
 	if (err)
 		goto out_unpin;
 
@@ -1217,7 +1215,9 @@ static int igt_ggtt_page(void *arg)
 out_remove:
 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+	mutex_lock(&ggtt->vm.mutex);
 	drm_mm_remove_node(&tmp);
+	mutex_unlock(&ggtt->vm.mutex);
 out_unpin:
 	i915_gem_object_unpin_pages(obj);
 out_free:
@@ -1243,8 +1243,7 @@ static void track_vma_bind(struct i915_vma *vma)
 }
 
 static int exercise_mock(struct drm_i915_private *i915,
-			 int (*func)(struct drm_i915_private *i915,
-				     struct i915_address_space *vm,
+			 int (*func)(struct i915_address_space *vm,
 				     u64 hole_start, u64 hole_end,
 				     unsigned long end_time))
 {
@@ -1259,7 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915,
 		return -ENOMEM;
 
 	vm = i915_gem_context_get_vm_rcu(ctx);
-	err = func(i915, vm, 0, min(vm->total, limit), end_time);
+	err = func(vm, 0, min(vm->total, limit), end_time);
 	i915_vm_put(vm);
 
 	mock_context_close(ctx);
@@ -1782,9 +1781,9 @@ static int igt_cs_tlb(void *arg)
 	struct i915_address_space *vm;
 	struct i915_gem_context *ctx;
 	struct intel_context *ce;
-	struct drm_file *file;
 	struct i915_vma *vma;
 	I915_RND_STATE(prng);
+	struct file *file;
 	unsigned int i;
 	u32 *result;
 	u32 *batch;
@@ -2022,7 +2021,7 @@ static int igt_cs_tlb(void *arg)
 out_vm:
 	i915_vm_put(vm);
 out_unlock:
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 4b3cac7..34138c7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -1,5 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
  *
  * The name is used as both an enum and expanded as subtest__name to create
  * a module parameter. It must be unique and legal for a C identifier.
@@ -16,6 +22,7 @@ selftest(gt_engines, intel_engine_live_selftests)
 selftest(gt_timelines, intel_timeline_live_selftests)
 selftest(gt_contexts, intel_context_live_selftests)
 selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
 selftest(gt_pm, intel_gt_pm_live_selftests)
 selftest(gt_heartbeat, intel_heartbeat_live_selftests)
 selftest(requests, i915_request_live_selftests)
@@ -36,5 +43,6 @@ selftest(reset, intel_reset_live_selftests)
 selftest(memory_region, intel_memory_region_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
 selftest(execlists, intel_execlists_live_selftests)
-selftest(guc, intel_guc_live_selftest)
 selftest(perf, i915_perf_live_selftests)
+/* Here be dragons: keep last to run last! */
+selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index aa5a0e7..5b39bab 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -1,5 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
  *
  * The name is used as both an enum and expanded as subtest__name to create
  * a module parameter. It must be unique and legal for a C identifier.
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index aabd07f..d1a1568 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -132,7 +132,7 @@ static int live_noa_delay(void *arg)
 	for (i = 0; i < 4; i++)
 		intel_write_status_page(stream->engine, 0x100 + i, 0);
 
-	rq = i915_request_create(stream->engine->kernel_context);
+	rq = intel_engine_create_kernel_request(stream->engine);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto out;
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
new file mode 100644
index 0000000..5a577a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/i915_selftest
+ */
+selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(blt, i915_gem_object_blt_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 8618a4d..f89d9c4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -27,11 +27,13 @@
 #include "gem/i915_gem_pm.h"
 #include "gem/selftests/mock_context.h"
 
+#include "gt/intel_engine_pm.h"
 #include "gt/intel_gt.h"
 
 #include "i915_random.h"
 #include "i915_selftest.h"
 #include "igt_live_test.h"
+#include "igt_spinner.h"
 #include "lib_sw_fence.h"
 
 #include "mock_drm.h"
@@ -540,6 +542,7 @@ static int live_nop_request(void *arg)
 		if (err)
 			return err;
 
+		intel_engine_pm_get(engine);
 		for_each_prime_number_from(prime, 1, 8192) {
 			struct i915_request *request = NULL;
 
@@ -578,6 +581,7 @@ static int live_nop_request(void *arg)
 			if (__igt_timeout(end_time, NULL))
 				break;
 		}
+		intel_engine_pm_put(engine);
 
 		err = igt_live_test_end(&t);
 		if (err)
@@ -692,10 +696,13 @@ static int live_empty_request(void *arg)
 		if (err)
 			goto out_batch;
 
+		intel_engine_pm_get(engine);
+
 		/* Warmup / preload */
 		request = empty_request(engine, batch);
 		if (IS_ERR(request)) {
 			err = PTR_ERR(request);
+			intel_engine_pm_put(engine);
 			goto out_batch;
 		}
 		i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
@@ -708,6 +715,7 @@ static int live_empty_request(void *arg)
 				request = empty_request(engine, batch);
 				if (IS_ERR(request)) {
 					err = PTR_ERR(request);
+					intel_engine_pm_put(engine);
 					goto out_batch;
 				}
 			}
@@ -721,6 +729,7 @@ static int live_empty_request(void *arg)
 				break;
 		}
 		i915_request_put(request);
+		intel_engine_pm_put(engine);
 
 		err = igt_live_test_end(&t);
 		if (err)
@@ -740,10 +749,8 @@ static int live_empty_request(void *arg)
 
 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 {
-	struct i915_gem_context *ctx = i915->kernel_context;
 	struct drm_i915_gem_object *obj;
 	const int gen = INTEL_GEN(i915);
-	struct i915_address_space *vm;
 	struct i915_vma *vma;
 	u32 *cmd;
 	int err;
@@ -752,9 +759,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
-	vm = i915_gem_context_get_vm_rcu(ctx);
-	vma = i915_vma_instance(obj, vm, NULL);
-	i915_vm_put(vm);
+	vma = i915_vma_instance(obj, i915->gt.vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err;
@@ -845,7 +850,7 @@ static int live_all_engines(void *arg)
 
 	idx = 0;
 	for_each_uabi_engine(engine, i915) {
-		request[idx] = i915_request_create(engine->kernel_context);
+		request[idx] = intel_engine_create_kernel_request(engine);
 		if (IS_ERR(request[idx])) {
 			err = PTR_ERR(request[idx]);
 			pr_err("%s: Request allocation failed with err=%d\n",
@@ -962,7 +967,7 @@ static int live_sequential_engines(void *arg)
 			goto out_free;
 		}
 
-		request[idx] = i915_request_create(engine->kernel_context);
+		request[idx] = intel_engine_create_kernel_request(engine);
 		if (IS_ERR(request[idx])) {
 			err = PTR_ERR(request[idx]);
 			pr_err("%s: Request allocation failed for %s with err=%d\n",
@@ -1067,15 +1072,18 @@ static int __live_parallel_engine1(void *arg)
 	struct intel_engine_cs *engine = arg;
 	IGT_TIMEOUT(end_time);
 	unsigned long count;
+	int err = 0;
 
 	count = 0;
+	intel_engine_pm_get(engine);
 	do {
 		struct i915_request *rq;
-		int err;
 
 		rq = i915_request_create(engine->kernel_context);
-		if (IS_ERR(rq))
-			return PTR_ERR(rq);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
+		}
 
 		i915_request_get(rq);
 		i915_request_add(rq);
@@ -1085,13 +1093,14 @@ static int __live_parallel_engine1(void *arg)
 			err = -ETIME;
 		i915_request_put(rq);
 		if (err)
-			return err;
+			break;
 
 		count++;
 	} while (!__igt_timeout(end_time, NULL));
+	intel_engine_pm_put(engine);
 
 	pr_info("%s: %lu request + sync\n", engine->name, count);
-	return 0;
+	return err;
 }
 
 static int __live_parallel_engineN(void *arg)
@@ -1099,21 +1108,100 @@ static int __live_parallel_engineN(void *arg)
 	struct intel_engine_cs *engine = arg;
 	IGT_TIMEOUT(end_time);
 	unsigned long count;
+	int err = 0;
 
 	count = 0;
+	intel_engine_pm_get(engine);
 	do {
 		struct i915_request *rq;
 
 		rq = i915_request_create(engine->kernel_context);
-		if (IS_ERR(rq))
-			return PTR_ERR(rq);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
+		}
 
 		i915_request_add(rq);
 		count++;
 	} while (!__igt_timeout(end_time, NULL));
+	intel_engine_pm_put(engine);
 
 	pr_info("%s: %lu requests\n", engine->name, count);
-	return 0;
+	return err;
+}
+
+static bool wake_all(struct drm_i915_private *i915)
+{
+	if (atomic_dec_and_test(&i915->selftest.counter)) {
+		wake_up_var(&i915->selftest.counter);
+		return true;
+	}
+
+	return false;
+}
+
+static int wait_for_all(struct drm_i915_private *i915)
+{
+	if (wake_all(i915))
+		return 0;
+
+	if (wait_var_event_timeout(&i915->selftest.counter,
+				   !atomic_read(&i915->selftest.counter),
+				   i915_selftest.timeout_jiffies))
+		return 0;
+
+	return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+	struct intel_engine_cs *engine = arg;
+	struct igt_spinner spin;
+	struct i915_request *rq;
+	int err = 0;
+
+	/*
+	 * Create a spinner running for eternity on each engine. If a second
+	 * spinner is incorrectly placed on the same engine, it will not be
+	 * able to start in time.
+	 */
+
+	if (igt_spinner_init(&spin, engine->gt)) {
+		wake_all(engine->i915);
+		return -ENOMEM;
+	}
+
+	intel_engine_pm_get(engine);
+	rq = igt_spinner_create_request(&spin,
+					engine->kernel_context,
+					MI_NOOP); /* no preemption */
+	intel_engine_pm_put(engine);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		if (err == -ENODEV)
+			err = 0;
+		wake_all(engine->i915);
+		goto out_spin;
+	}
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (igt_wait_for_spinner(&spin, rq)) {
+		/* Occupy this engine for the whole test */
+		err = wait_for_all(engine->i915);
+	} else {
+		pr_err("Failed to start spinner on %s\n", engine->name);
+		err = -EINVAL;
+	}
+	igt_spinner_end(&spin);
+
+	if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+		err = -EIO;
+	i915_request_put(rq);
+
+out_spin:
+	igt_spinner_fini(&spin);
+	return err;
 }
 
 static int live_parallel_engines(void *arg)
@@ -1122,6 +1210,7 @@ static int live_parallel_engines(void *arg)
 	static int (* const func[])(void *arg) = {
 		__live_parallel_engine1,
 		__live_parallel_engineN,
+		__live_parallel_spin,
 		NULL,
 	};
 	const unsigned int nengines = num_uabi_engines(i915);
@@ -1140,13 +1229,17 @@ static int live_parallel_engines(void *arg)
 		return -ENOMEM;
 
 	for (fn = func; !err && *fn; fn++) {
+		char name[KSYM_NAME_LEN];
 		struct igt_live_test t;
 		unsigned int idx;
 
-		err = igt_live_test_begin(&t, i915, __func__, "");
+		snprintf(name, sizeof(name), "%pS", fn);
+		err = igt_live_test_begin(&t, i915, __func__, name);
 		if (err)
 			break;
 
+		atomic_set(&i915->selftest.counter, nengines);
+
 		idx = 0;
 		for_each_uabi_engine(engine, i915) {
 			tsk[idx] = kthread_run(*fn, engine,
@@ -1230,9 +1323,9 @@ static int live_breadcrumbs_smoketest(void *arg)
 	struct task_struct **threads;
 	struct igt_live_test live;
 	intel_wakeref_t wakeref;
-	struct drm_file *file;
 	struct smoketest *smoke;
 	unsigned int n, idx;
+	struct file *file;
 	int ret = 0;
 
 	/*
@@ -1354,7 +1447,7 @@ static int live_breadcrumbs_smoketest(void *arg)
 out_smoke:
 	kfree(smoke);
 out_file:
-	mock_file_free(i915, file);
+	fput(file);
 out_rpm:
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index a6cca4a..d3bf9ee 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -57,6 +57,12 @@ enum {
 #undef selftest
 };
 
+enum {
+#define selftest(name, func) perf_##name,
+#include "i915_perf_selftests.h"
+#undef selftest
+};
+
 struct selftest {
 	bool enabled;
 	const char *name;
@@ -78,6 +84,12 @@ static struct selftest live_selftests[] = {
 };
 #undef selftest
 
+#define selftest(n, f) [perf_##n] = { .name = #n, { .live = f } },
+static struct selftest perf_selftests[] = {
+#include "i915_perf_selftests.h"
+};
+#undef selftest
+
 /* Embed the line number into the parameter name so that we can order tests */
 #define selftest(n, func) selftest_0(n, func, param(n))
 #define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n))
@@ -93,6 +105,13 @@ module_param_named(id, live_selftests[live_##n].enabled, bool, 0400);
 #include "i915_live_selftests.h"
 #undef selftest_0
 #undef param
+
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __perf_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, perf_selftests[perf_##n].enabled, bool, 0400);
+#include "i915_perf_selftests.h"
+#undef selftest_0
+#undef param
 #undef selftest
 
 static void set_default_test_all(struct selftest *st, unsigned int count)
@@ -200,6 +219,27 @@ int i915_live_selftests(struct pci_dev *pdev)
 	return 0;
 }
 
+int i915_perf_selftests(struct pci_dev *pdev)
+{
+	int err;
+
+	if (!i915_selftest.perf)
+		return 0;
+
+	err = run_selftests(perf, pdev_to_i915(pdev));
+	if (err) {
+		i915_selftest.perf = err;
+		return err;
+	}
+
+	if (i915_selftest.perf < 0) {
+		i915_selftest.perf = -ENOTTY;
+		return 1;
+	}
+
+	return 0;
+}
+
 static bool apply_subtest_filter(const char *caller, const char *name)
 {
 	char *filter, *sep, *tok;
@@ -365,3 +405,6 @@ MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardw
 
 module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400);
 MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
+
+module_param_named_unsafe(perf_selftests, i915_selftest.perf, int, 0400);
+MODULE_PARM_DESC(perf_selftests, "Run performance orientated selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.c b/drivers/gpu/drm/i915/selftests/igt_atomic.c
new file mode 100644
index 0000000..fb506b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/preempt.h>
+#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
+
+#include "igt_atomic.h"
+
+static void __preempt_begin(void)
+{
+	preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+	preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+	local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+	local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+	local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+	local_irq_enable();
+}
+
+const struct igt_atomic_section igt_atomic_phases[] = {
+	{ "preempt", __preempt_begin, __preempt_end },
+	{ "softirq", __softirq_begin, __softirq_end },
+	{ "hardirq", __hardirq_begin, __hardirq_end },
+	{ }
+};
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h
index 93ec89f..1991798 100644
--- a/drivers/gpu/drm/i915/selftests/igt_atomic.h
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h
@@ -6,51 +6,12 @@
 #ifndef IGT_ATOMIC_H
 #define IGT_ATOMIC_H
 
-#include <linux/preempt.h>
-#include <linux/bottom_half.h>
-#include <linux/irqflags.h>
-
-static void __preempt_begin(void)
-{
-	preempt_disable();
-}
-
-static void __preempt_end(void)
-{
-	preempt_enable();
-}
-
-static void __softirq_begin(void)
-{
-	local_bh_disable();
-}
-
-static void __softirq_end(void)
-{
-	local_bh_enable();
-}
-
-static void __hardirq_begin(void)
-{
-	local_irq_disable();
-}
-
-static void __hardirq_end(void)
-{
-	local_irq_enable();
-}
-
 struct igt_atomic_section {
 	const char *name;
 	void (*critical_section_begin)(void);
 	void (*critical_section_end)(void);
 };
 
-static const struct igt_atomic_section igt_atomic_phases[] = {
-	{ "preempt", __preempt_begin, __preempt_end },
-	{ "softirq", __softirq_begin, __softirq_end },
-	{ "hardirq", __hardirq_begin, __hardirq_end },
-	{ }
-};
+extern const struct igt_atomic_section igt_atomic_phases[];
 
 #endif /* IGT_ATOMIC_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
index c0e9f99..36ed427 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -7,7 +7,7 @@
 #ifndef IGT_LIVE_TEST_H
 #define IGT_LIVE_TEST_H
 
-#include "../i915_gem.h"
+#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
 
 struct drm_i915_private;
 
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.c b/drivers/gpu/drm/i915/selftests/igt_mmap.c
new file mode 100644
index 0000000..583a4ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.c
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+#include "igt_mmap.h"
+
+unsigned long igt_mmap_node(struct drm_i915_private *i915,
+			    struct drm_vma_offset_node *node,
+			    unsigned long addr,
+			    unsigned long prot,
+			    unsigned long flags)
+{
+	struct file *file;
+	int err;
+
+	/* Pretend to open("/dev/dri/card0") */
+	file = mock_drm_getfile(i915->drm.primary, O_RDWR);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	err = drm_vma_node_allow(node, file->private_data);
+	if (err) {
+		addr = err;
+		goto out_file;
+	}
+
+	addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT,
+		       prot, flags, drm_vma_node_offset_addr(node));
+
+	drm_vma_node_revoke(node, file->private_data);
+out_file:
+	fput(file);
+	return addr;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.h b/drivers/gpu/drm/i915/selftests/igt_mmap.h
new file mode 100644
index 0000000..6e716cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.h
@@ -0,0 +1,19 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef IGT_MMAP_H
+#define IGT_MMAP_H
+
+struct drm_i915_private;
+struct drm_vma_offset_node;
+
+unsigned long igt_mmap_node(struct drm_i915_private *i915,
+			    struct drm_vma_offset_node *node,
+			    unsigned long addr,
+			    unsigned long prot,
+			    unsigned long flags);
+
+#endif /* IGT_MMAP_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ee8450b..e8a58fe 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -15,8 +15,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
 	void *vaddr;
 	int err;
 
-	GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
-
 	memset(spin, 0, sizeof(*spin));
 	spin->gt = gt;
 
@@ -95,11 +93,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
 	struct intel_engine_cs *engine = ce->engine;
 	struct i915_request *rq = NULL;
 	struct i915_vma *hws, *vma;
+	unsigned int flags;
 	u32 *batch;
 	int err;
 
 	GEM_BUG_ON(spin->gt != ce->vm->gt);
 
+	if (!intel_engine_can_store_dword(ce->engine))
+		return ERR_PTR(-ENODEV);
+
 	vma = i915_vma_instance(spin->obj, ce->vm, NULL);
 	if (IS_ERR(vma))
 		return ERR_CAST(vma);
@@ -132,16 +134,37 @@ igt_spinner_create_request(struct igt_spinner *spin,
 
 	batch = spin->batch;
 
-	*batch++ = MI_STORE_DWORD_IMM_GEN4;
-	*batch++ = lower_32_bits(hws_address(hws, rq));
-	*batch++ = upper_32_bits(hws_address(hws, rq));
+	if (INTEL_GEN(rq->i915) >= 8) {
+		*batch++ = MI_STORE_DWORD_IMM_GEN4;
+		*batch++ = lower_32_bits(hws_address(hws, rq));
+		*batch++ = upper_32_bits(hws_address(hws, rq));
+	} else if (INTEL_GEN(rq->i915) >= 6) {
+		*batch++ = MI_STORE_DWORD_IMM_GEN4;
+		*batch++ = 0;
+		*batch++ = hws_address(hws, rq);
+	} else if (INTEL_GEN(rq->i915) >= 4) {
+		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+		*batch++ = 0;
+		*batch++ = hws_address(hws, rq);
+	} else {
+		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+		*batch++ = hws_address(hws, rq);
+	}
 	*batch++ = rq->fence.seqno;
 
 	*batch++ = arbitration_command;
 
-	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+	if (INTEL_GEN(rq->i915) >= 8)
+		*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+	else if (IS_HASWELL(rq->i915))
+		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
+	else if (INTEL_GEN(rq->i915) >= 6)
+		*batch++ = MI_BATCH_BUFFER_START;
+	else
+		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
 	*batch++ = lower_32_bits(vma->node.start);
 	*batch++ = upper_32_bits(vma->node.start);
+
 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
 
 	intel_gt_chipset_flush(engine->gt);
@@ -153,7 +176,10 @@ igt_spinner_create_request(struct igt_spinner *spin,
 			goto cancel_rq;
 	}
 
-	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+	flags = 0;
+	if (INTEL_GEN(rq->i915) <= 5)
+		flags |= I915_DISPATCH_SECURE;
+	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 
 cancel_rq:
 	if (err) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 19e1cca..3ef3620 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -32,7 +32,7 @@ static void close_objects(struct intel_memory_region *mem,
 		if (i915_gem_object_has_pinned_pages(obj))
 			i915_gem_object_unpin_pages(obj);
 		/* No polluting the memory region between tests */
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+		__i915_gem_object_put_pages(obj);
 		list_del(&obj->st_link);
 		i915_gem_object_put(obj);
 	}
@@ -122,7 +122,7 @@ igt_object_create(struct intel_memory_region *mem,
 static void igt_object_release(struct drm_i915_gem_object *obj)
 {
 	i915_gem_object_unpin_pages(obj);
-	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+	__i915_gem_object_put_pages(obj);
 	list_del(&obj->st_link);
 	i915_gem_object_put(obj);
 }
@@ -270,36 +270,31 @@ static int igt_gpu_write_dw(struct intel_context *ce,
 
 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 {
-	unsigned long n;
+	unsigned long n = obj->base.size >> PAGE_SHIFT;
+	u32 *ptr;
 	int err;
 
-	i915_gem_object_lock(obj);
-	err = i915_gem_object_set_to_wc_domain(obj, false);
-	i915_gem_object_unlock(obj);
+	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
 	if (err)
 		return err;
 
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
-		return err;
+	ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(ptr))
+		return PTR_ERR(ptr);
 
-	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
-		u32 __iomem *base;
-		u32 read_val;
-
-		base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
-
-		read_val = ioread32(base + dword);
-		io_mapping_unmap_atomic(base);
-		if (read_val != val) {
-			pr_err("n=%lu base[%u]=%u, val=%u\n",
-			       n, dword, read_val, val);
+	ptr += dword;
+	while (n--) {
+		if (*ptr != val) {
+			pr_err("base[%u]=%08x, val=%08x\n",
+			       dword, *ptr, val);
 			err = -EINVAL;
 			break;
 		}
+
+		ptr += PAGE_SIZE / sizeof(*ptr);
 	}
 
-	i915_gem_object_unpin_pages(obj);
+	i915_gem_object_unpin_map(obj);
 	return err;
 }
 
@@ -404,7 +399,7 @@ static int igt_lmem_write_gpu(void *arg)
 	struct drm_i915_private *i915 = arg;
 	struct drm_i915_gem_object *obj;
 	struct i915_gem_context *ctx;
-	struct drm_file *file;
+	struct file *file;
 	I915_RND_STATE(prng);
 	u32 sz;
 	int err;
@@ -439,7 +434,7 @@ static int igt_lmem_write_gpu(void *arg)
 out_put:
 	i915_gem_object_put(obj);
 out_file:
-	mock_file_free(i915, file);
+	fput(file);
 	return err;
 }
 
@@ -506,7 +501,9 @@ static int igt_lmem_write_cpu(void *arg)
 	}
 
 	/* Put the pages into a known state -- from the gpu for added fun */
+	intel_engine_pm_get(engine);
 	err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
+	intel_engine_pm_put(engine);
 	if (err)
 		goto out_unpin;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c
deleted file mode 100644
index 09c7041..0000000
--- a/drivers/gpu/drm/i915/selftests/mock_drm.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "mock_drm.h"
-
-struct drm_file *mock_file(struct drm_i915_private *i915)
-{
-	struct file *filp;
-	struct inode *inode;
-	struct drm_file *file;
-	int err;
-
-	inode = kzalloc(sizeof(*inode), GFP_KERNEL);
-	if (!inode) {
-		err = -ENOMEM;
-		goto err;
-	}
-
-	inode->i_rdev = i915->drm.primary->index;
-
-	filp = kzalloc(sizeof(*filp), GFP_KERNEL);
-	if (!filp) {
-		err = -ENOMEM;
-		goto err_inode;
-	}
-
-	err = drm_open(inode, filp);
-	if (err)
-		goto err_filp;
-
-	file = filp->private_data;
-	memset(&file->filp, POISON_INUSE, sizeof(file->filp));
-	file->authenticated = true;
-
-	kfree(filp);
-	kfree(inode);
-	return file;
-
-err_filp:
-	kfree(filp);
-err_inode:
-	kfree(inode);
-err:
-	return ERR_PTR(err);
-}
-
-void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
-{
-	struct file filp = { .private_data = file };
-
-	drm_release(NULL, &filp);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.h b/drivers/gpu/drm/i915/selftests/mock_drm.h
index b39beee..9916b6f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_drm.h
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.h
@@ -25,7 +25,21 @@
 #ifndef __MOCK_DRM_H
 #define __MOCK_DRM_H
 
-struct drm_file *mock_file(struct drm_i915_private *i915);
-void mock_file_free(struct drm_i915_private *i915, struct drm_file *file);
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+
+struct drm_file;
+struct file;
+
+static inline struct file *mock_file(struct drm_i915_private *i915)
+{
+	return mock_drm_getfile(i915->drm.primary, O_RDWR);
+}
+
+static inline struct drm_file *to_drm_file(struct file *f)
+{
+	return f->private_data;
+}
 
 #endif /* !__MOCK_DRM_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 27ed3ce..3b89869 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -54,25 +54,19 @@ void mock_device_flush(struct drm_i915_private *i915)
 static void mock_device_release(struct drm_device *dev)
 {
 	struct drm_i915_private *i915 = to_i915(dev);
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
 
 	mock_device_flush(i915);
+	intel_gt_driver_remove(&i915->gt);
 
-	i915_gem_drain_workqueue(i915);
-
-	for_each_engine(engine, &i915->gt, id)
-		mock_engine_free(engine);
 	i915_gem_driver_release__contexts(i915);
 
-	intel_timelines_fini(i915);
-
-	drain_workqueue(i915->wq);
+	i915_gem_drain_workqueue(i915);
 	i915_gem_drain_freed_objects(i915);
 
 	mock_fini_ggtt(&i915->ggtt);
 	destroy_workqueue(i915->wq);
 
+	intel_gt_driver_late_release(&i915->gt);
 	intel_memory_regions_driver_release(i915);
 
 	drm_mode_config_cleanup(&i915->drm);
@@ -180,9 +174,8 @@ struct drm_i915_private *mock_gem_device(void)
 
 	mock_init_contexts(i915);
 
-	intel_timelines_init(i915);
-
 	mock_init_ggtt(i915, &i915->ggtt);
+	i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
 
 	mkwrite_device_info(i915)->engine_mask = BIT(0);
 
@@ -190,25 +183,20 @@ struct drm_i915_private *mock_gem_device(void)
 	if (!i915->engine[RCS0])
 		goto err_unlock;
 
-	i915->kernel_context = mock_context(i915, NULL);
-	if (!i915->kernel_context)
-		goto err_engine;
-
 	if (mock_engine_init(i915->engine[RCS0]))
 		goto err_context;
 
+	__clear_bit(I915_WEDGED, &i915->gt.reset.flags);
 	intel_engines_driver_register(i915);
 
 	return i915;
 
 err_context:
-	i915_gem_driver_release__contexts(i915);
-err_engine:
-	mock_engine_free(i915->engine[RCS0]);
+	intel_gt_driver_remove(&i915->gt);
 err_unlock:
-	intel_timelines_fini(i915);
 	destroy_workqueue(i915->wq);
 err_drv:
+	intel_gt_driver_late_release(&i915->gt);
 	intel_memory_regions_driver_release(i915);
 	drm_mode_config_cleanup(&i915->drm);
 	drm_dev_fini(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 20ac384..edc5e3d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -55,6 +55,11 @@ static void mock_cleanup(struct i915_address_space *vm)
 {
 }
 
+static void mock_clear_range(struct i915_address_space *vm,
+			     u64 start, u64 length)
+{
+}
+
 struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
 {
 	struct i915_ppgtt *ppgtt;
@@ -70,7 +75,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
 
 	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
 
-	ppgtt->vm.clear_range = nop_clear_range;
+	ppgtt->vm.clear_range = mock_clear_range;
 	ppgtt->vm.insert_page = mock_insert_page;
 	ppgtt->vm.insert_entries = mock_insert_entries;
 	ppgtt->vm.cleanup = mock_cleanup;
@@ -107,7 +112,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
 	ggtt->mappable_end = resource_size(&ggtt->gmadr);
 	ggtt->vm.total = 4096 * PAGE_SIZE;
 
-	ggtt->vm.clear_range = nop_clear_range;
+	ggtt->vm.clear_range = mock_clear_range;
 	ggtt->vm.insert_page = mock_insert_page;
 	ggtt->vm.insert_entries = mock_insert_entries;
 	ggtt->vm.cleanup = mock_cleanup;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 3387393..e3f224f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -25,6 +25,9 @@
 #ifndef __MOCK_GTT_H
 #define __MOCK_GTT_H
 
+struct drm_i915_private;
+struct i915_ggtt;
+
 void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
 void mock_fini_ggtt(struct i915_ggtt *ggtt);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
index 2460808..329bf74 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.h
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -6,6 +6,11 @@
 #ifndef __MOCK_REGION_H
 #define __MOCK_REGION_H
 
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_memory_region;
+
 struct intel_memory_region *
 mock_region_create(struct drm_i915_private *i915,
 		   resource_size_t start,
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index 8a2cc55..7acf1ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,9 @@
 #ifndef __MOCK_UNCORE_H
 #define __MOCK_UNCORE_H
 
+struct drm_i915_private;
+struct intel_uncore;
+
 void mock_uncore_init(struct intel_uncore *uncore,
 		      struct drm_i915_private *i915);
 
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 208069f..8cb2665 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -127,7 +127,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
 	struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
 	int num_modes;
 
-	num_modes = drm_panel_get_modes(imx_ldb_ch->panel);
+	num_modes = drm_panel_get_modes(imx_ldb_ch->panel, connector);
 	if (num_modes > 0)
 		return num_modes;
 
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 35518e5..3dca424 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -50,7 +50,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
 	struct device_node *np = imxpd->dev->of_node;
 	int num_modes;
 
-	num_modes = drm_panel_get_modes(imxpd->panel);
+	num_modes = drm_panel_get_modes(imxpd->panel, connector);
 	if (num_modes > 0)
 		return num_modes;
 
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index ec32e1c..6d47ef7 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -153,6 +153,7 @@ struct ingenic_dma_hwdesc {
 
 struct jz_soc_info {
 	bool needs_dev_clk;
+	unsigned int max_width, max_height;
 };
 
 struct ingenic_drm {
@@ -164,6 +165,7 @@ struct ingenic_drm {
 	struct device *dev;
 	struct regmap *map;
 	struct clk *lcd_clk, *pix_clk;
+	const struct jz_soc_info *soc_info;
 
 	struct ingenic_dma_hwdesc *dma_hwdesc;
 	dma_addr_t dma_hwdesc_phys;
@@ -326,6 +328,10 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
 	if (!drm_atomic_crtc_needs_modeset(state))
 		return 0;
 
+	if (state->mode.hdisplay > priv->soc_info->max_height ||
+	    state->mode.vdisplay > priv->soc_info->max_width)
+		return -EINVAL;
+
 	rate = clk_round_rate(priv->pix_clk,
 			      state->adjusted_mode.clock * 1000);
 	if (rate < 0)
@@ -372,14 +378,18 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
 	struct ingenic_drm *priv = drm_plane_get_priv(plane);
 	struct drm_plane_state *state = plane->state;
 	unsigned int width, height, cpp;
+	dma_addr_t addr;
 
-	width = state->crtc->state->adjusted_mode.hdisplay;
-	height = state->crtc->state->adjusted_mode.vdisplay;
-	cpp = state->fb->format->cpp[plane->index];
+	if (state && state->fb) {
+		addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
+		width = state->src_w >> 16;
+		height = state->src_h >> 16;
+		cpp = state->fb->format->cpp[plane->index];
 
-	priv->dma_hwdesc->addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
-	priv->dma_hwdesc->cmd = width * height * cpp / 4;
-	priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
+		priv->dma_hwdesc->addr = addr;
+		priv->dma_hwdesc->cmd = width * height * cpp / 4;
+		priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
+	}
 }
 
 static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
@@ -617,6 +627,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
+	priv->soc_info = soc_info;
 	priv->dev = dev;
 	drm = &priv->drm;
 	drm->dev_private = priv;
@@ -632,8 +643,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
 	drm_mode_config_init(drm);
 	drm->mode_config.min_width = 0;
 	drm->mode_config.min_height = 0;
-	drm->mode_config.max_width = 800;
-	drm->mode_config.max_height = 600;
+	drm->mode_config.max_width = soc_info->max_width;
+	drm->mode_config.max_height = 4095;
 	drm->mode_config.funcs = &ingenic_drm_mode_config_funcs;
 
 	base = devm_platform_ioremap_resource(pdev, 0);
@@ -810,15 +821,26 @@ static int ingenic_drm_remove(struct platform_device *pdev)
 
 static const struct jz_soc_info jz4740_soc_info = {
 	.needs_dev_clk = true,
+	.max_width = 800,
+	.max_height = 600,
 };
 
 static const struct jz_soc_info jz4725b_soc_info = {
 	.needs_dev_clk = false,
+	.max_width = 800,
+	.max_height = 600,
+};
+
+static const struct jz_soc_info jz4770_soc_info = {
+	.needs_dev_clk = false,
+	.max_width = 1280,
+	.max_height = 720,
 };
 
 static const struct of_device_id ingenic_drm_of_match[] = {
 	{ .compatible = "ingenic,jz4740-lcd", .data = &jz4740_soc_info },
 	{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
+	{ .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index 571dc36..d589f09 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -11,4 +11,4 @@
        select DRM_SCHED
        select DRM_GEM_SHMEM_HELPER
        help
-         DRM driver for ARM Mali 400/450 GPUs.
+	 DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index f522c5f..b561dd0 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -159,9 +159,10 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
 			    struct lima_sched_context *context,
 			    atomic_t *guilty)
 {
-	struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+	struct drm_gpu_scheduler *sched = &pipe->base;
 
-	return drm_sched_entity_init(&context->base, &rq, 1, guilty);
+	return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
+				     &sched, 1, guilty);
 }
 
 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
@@ -255,13 +256,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
 	return task->fence;
 }
 
-static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
-					 struct lima_sched_task *task)
+static void lima_sched_timedout_job(struct drm_sched_job *job)
 {
+	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+	struct lima_sched_task *task = to_lima_task(job);
+
+	if (!pipe->error)
+		DRM_ERROR("lima job timeout\n");
+
 	drm_sched_stop(&pipe->base, &task->base);
 
-	if (task)
-		drm_sched_increase_karma(&task->base);
+	drm_sched_increase_karma(&task->base);
 
 	pipe->task_error(pipe);
 
@@ -284,16 +289,6 @@ static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
 	drm_sched_start(&pipe->base, true);
 }
 
-static void lima_sched_timedout_job(struct drm_sched_job *job)
-{
-	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
-	struct lima_sched_task *task = to_lima_task(job);
-
-	DRM_ERROR("lima job timeout\n");
-
-	lima_sched_handle_error_task(pipe, task);
-}
-
 static void lima_sched_free_job(struct drm_sched_job *job)
 {
 	struct lima_sched_task *task = to_lima_task(job);
@@ -318,15 +313,6 @@ static const struct drm_sched_backend_ops lima_sched_ops = {
 	.free_job = lima_sched_free_job,
 };
 
-static void lima_sched_error_work(struct work_struct *work)
-{
-	struct lima_sched_pipe *pipe =
-		container_of(work, struct lima_sched_pipe, error_work);
-	struct lima_sched_task *task = pipe->current_task;
-
-	lima_sched_handle_error_task(pipe, task);
-}
-
 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
 {
 	unsigned int timeout = lima_sched_timeout_ms > 0 ?
@@ -335,8 +321,6 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
 	pipe->fence_context = dma_fence_context_alloc(1);
 	spin_lock_init(&pipe->fence_lock);
 
-	INIT_WORK(&pipe->error_work, lima_sched_error_work);
-
 	return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
 			      msecs_to_jiffies(timeout), name);
 }
@@ -349,7 +333,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
 {
 	if (pipe->error)
-		schedule_work(&pipe->error_work);
+		drm_sched_fault(&pipe->base);
 	else {
 		struct lima_sched_task *task = pipe->current_task;
 
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 928af91..1d814fe 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -68,8 +68,6 @@ struct lima_sched_pipe {
 	void (*task_fini)(struct lima_sched_pipe *pipe);
 	void (*task_error)(struct lima_sched_pipe *pipe);
 	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
-
-	struct work_struct error_work;
 };
 
 int lima_sched_task_init(struct lima_sched_task *task,
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 751454a..e59907e 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -498,24 +498,20 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
 	}
 
 	/* Set up channel 0 sync (based on chnl_update_registers()) */
-	if (mcde->te_sync) {
-		/*
-		 * Turn on hardware TE0 synchronization
-		 */
+	if (mcde->video_mode || mcde->te_sync)
 		val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
 			<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
-		val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
-			<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
-	} else {
-		/*
-		 * Set up sync source to software, out sync formatter
-		 * Code mostly from mcde_hw.c chnl_update_registers()
-		 */
+	else
 		val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SOFTWARE
 			<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
+
+	if (mcde->te_sync)
+		val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
+			<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
+	else
 		val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
 			<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
-	}
+
 	writel(val, mcde->regs + sync);
 
 	/* Set up pixels per line and lines per frame */
@@ -934,10 +930,17 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
 		val = readl(mcde->regs + MCDE_CRC);
 		val |= MCDE_CRC_SYCEN0;
 		writel(val, mcde->regs + MCDE_CRC);
-
-		drm_crtc_vblank_on(crtc);
 	}
 
+	drm_crtc_vblank_on(crtc);
+
+	if (mcde->video_mode)
+		/*
+		 * Keep FIFO permanently enabled in video mode,
+		 * otherwise MCDE will stop feeding data to the panel.
+		 */
+		mcde_enable_fifo(mcde, MCDE_FIFO_A);
+
 	dev_info(drm->dev, "MCDE display is enabled\n");
 }
 
@@ -946,13 +949,22 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
 	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_device *drm = crtc->dev;
 	struct mcde *mcde = drm->dev_private;
+	struct drm_pending_vblank_event *event;
 
-	if (mcde->te_sync)
-		drm_crtc_vblank_off(crtc);
+	drm_crtc_vblank_off(crtc);
 
 	/* Disable FIFO A flow */
 	mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
 
+	event = crtc->state->event;
+	if (event) {
+		crtc->state->event = NULL;
+
+		spin_lock_irq(&crtc->dev->event_lock);
+		drm_crtc_send_vblank_event(crtc, event);
+		spin_unlock_irq(&crtc->dev->event_lock);
+	}
+
 	dev_info(drm->dev, "MCDE display is disabled\n");
 }
 
@@ -1048,8 +1060,9 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
 	 */
 	if (fb) {
 		mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
-		/* Send a single frame using software sync */
-		mcde_display_send_one_frame(mcde);
+		if (!mcde->video_mode)
+			/* Send a single frame using software sync */
+			mcde_display_send_one_frame(mcde);
 		dev_info_once(mcde->dev, "sent first display update\n");
 	} else {
 		/*
@@ -1097,6 +1110,8 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
 	.enable = mcde_display_enable,
 	.disable = mcde_display_disable,
 	.update = mcde_display_update,
+	.enable_vblank = mcde_display_enable_vblank,
+	.disable_vblank = mcde_display_disable_vblank,
 	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
@@ -1123,12 +1138,6 @@ int mcde_display_init(struct drm_device *drm)
 		DRM_FORMAT_YUV422,
 	};
 
-	/* Provide vblank only when we have TE enabled */
-	if (mcde->te_sync) {
-		mcde_display_funcs.enable_vblank = mcde_display_enable_vblank;
-		mcde_display_funcs.disable_vblank = mcde_display_disable_vblank;
-	}
-
 	ret = drm_simple_display_pipe_init(drm, &mcde->pipe,
 					   &mcde_display_funcs,
 					   formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index dab4db0..80edd66 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -19,6 +19,7 @@ struct mcde {
 	struct mipi_dsi_device *mdsi;
 	s16 stride;
 	bool te_sync;
+	bool video_mode;
 	bool oneshot_mode;
 	unsigned int flow_active;
 	spinlock_t flow_lock; /* Locks the channel flow control */
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 5649887..9008ddc 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -179,18 +179,10 @@ static int mcde_modeset_init(struct drm_device *drm)
 	mode_config->min_height = 1;
 	mode_config->max_height = 1080;
 
-	/*
-	 * Currently we only support vblank handling on the DSI bridge, using
-	 * TE synchronization. If TE sync is not set up, it is still possible
-	 * to push out a single update on demand, but this is hard for DRM to
-	 * exploit.
-	 */
-	if (mcde->te_sync) {
-		ret = drm_vblank_init(drm, 1);
-		if (ret) {
-			dev_err(drm->dev, "failed to init vblank\n");
-			goto out_config;
-		}
+	ret = drm_vblank_init(drm, 1);
+	if (ret) {
+		dev_err(drm->dev, "failed to init vblank\n");
+		goto out_config;
 	}
 
 	ret = mcde_display_init(drm);
@@ -339,8 +331,6 @@ static int mcde_probe(struct platform_device *pdev)
 	drm->dev_private = mcde;
 	platform_set_drvdata(pdev, drm);
 
-	/* Enable use of the TE signal and interrupt */
-	mcde->te_sync = true;
 	/* Enable continuous updates: this is what Linux' framebuffer expects */
 	mcde->oneshot_mode = false;
 	drm->dev_private = mcde;
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index ef4c630..bb6528b 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -39,7 +39,6 @@ struct mcde_dsi {
 	struct device *dev;
 	struct mcde *mcde;
 	struct drm_bridge bridge;
-	struct drm_connector connector;
 	struct drm_panel *panel;
 	struct drm_bridge *bridge_out;
 	struct mipi_dsi_host dsi_host;
@@ -64,11 +63,6 @@ static inline struct mcde_dsi *host_to_mcde_dsi(struct mipi_dsi_host *h)
 	return container_of(h, struct mcde_dsi, dsi_host);
 }
 
-static inline struct mcde_dsi *connector_to_mcde_dsi(struct drm_connector *c)
-{
-	return container_of(c, struct mcde_dsi, connector);
-}
-
 bool mcde_dsi_irq(struct mipi_dsi_device *mdsi)
 {
 	struct mcde_dsi *d;
@@ -124,12 +118,41 @@ bool mcde_dsi_irq(struct mipi_dsi_device *mdsi)
 
 	val = readl(d->regs + DSI_VID_MODE_STS_FLAG);
 	if (val)
-		dev_err(d->dev, "some video mode error status\n");
+		dev_dbg(d->dev, "DSI_VID_MODE_STS_FLAG = %08x\n", val);
+	if (val & DSI_VID_MODE_STS_VSG_RUNNING)
+		dev_dbg(d->dev, "VID mode VSG running\n");
+	if (val & DSI_VID_MODE_STS_ERR_MISSING_DATA)
+		dev_err(d->dev, "VID mode missing data\n");
+	if (val & DSI_VID_MODE_STS_ERR_MISSING_HSYNC)
+		dev_err(d->dev, "VID mode missing HSYNC\n");
+	if (val & DSI_VID_MODE_STS_ERR_MISSING_VSYNC)
+		dev_err(d->dev, "VID mode missing VSYNC\n");
+	if (val & DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH)
+		dev_err(d->dev, "VID mode less bytes than expected between two HSYNC\n");
+	if (val & DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT)
+		dev_err(d->dev, "VID mode less lines than expected between two VSYNC\n");
+	if (val & (DSI_VID_MODE_STS_ERR_BURSTWRITE |
+		   DSI_VID_MODE_STS_ERR_LINEWRITE |
+		   DSI_VID_MODE_STS_ERR_LONGREAD))
+		dev_err(d->dev, "VID mode read/write error\n");
+	if (val & DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH)
+		dev_err(d->dev, "VID mode received packets differ from expected size\n");
+	if (val & DSI_VID_MODE_STS_VSG_RECOVERY)
+		dev_err(d->dev, "VID mode VSG in recovery mode\n");
 	writel(val, d->regs + DSI_VID_MODE_STS_CLR);
 
 	return te_received;
 }
 
+static void mcde_dsi_attach_to_mcde(struct mcde_dsi *d)
+{
+	d->mcde->mdsi = d->mdsi;
+
+	d->mcde->video_mode = !!(d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+	/* Enable use of the TE signal for all command mode panels */
+	d->mcde->te_sync = !d->mcde->video_mode;
+}
+
 static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
 				struct mipi_dsi_device *mdsi)
 {
@@ -148,7 +171,7 @@ static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
 
 	d->mdsi = mdsi;
 	if (d->mcde)
-		d->mcde->mdsi = mdsi;
+		mcde_dsi_attach_to_mcde(d);
 
 	return 0;
 }
@@ -223,25 +246,25 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
 	if (txlen > 0) {
 		val = 0;
 		for (i = 0; i < 4 && i < txlen; i++)
-			val |= tx[i] << (i & 3) * 8;
+			val |= tx[i] << (i * 8);
 	}
 	writel(val, d->regs + DSI_DIRECT_CMD_WRDAT0);
 	if (txlen > 4) {
 		val = 0;
 		for (i = 0; i < 4 && (i + 4) < txlen; i++)
-			val |= tx[i + 4] << (i & 3) * 8;
+			val |= tx[i + 4] << (i * 8);
 		writel(val, d->regs + DSI_DIRECT_CMD_WRDAT1);
 	}
 	if (txlen > 8) {
 		val = 0;
 		for (i = 0; i < 4 && (i + 8) < txlen; i++)
-			val |= tx[i + 8] << (i & 3) * 8;
+			val |= tx[i + 8] << (i * 8);
 		writel(val, d->regs + DSI_DIRECT_CMD_WRDAT2);
 	}
 	if (txlen > 12) {
 		val = 0;
 		for (i = 0; i < 4 && (i + 12) < txlen; i++)
-			val |= tx[i + 12] << (i & 3) * 8;
+			val |= tx[i + 12] << (i * 8);
 		writel(val, d->regs + DSI_DIRECT_CMD_WRDAT3);
 	}
 
@@ -336,7 +359,7 @@ void mcde_dsi_te_request(struct mipi_dsi_device *mdsi)
 	val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
 	val |= 2 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
 	val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
-	val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 <<
+	val |= MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM <<
 		DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
 	writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
 
@@ -365,13 +388,14 @@ void mcde_dsi_te_request(struct mipi_dsi_device *mdsi)
 static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
 				      const struct drm_display_mode *mode)
 {
-	u8 bpp = mipi_dsi_pixel_format_to_bpp(d->mdsi->format);
+	/* cpp, characters per pixel, number of bytes per pixel */
+	u8 cpp = mipi_dsi_pixel_format_to_bpp(d->mdsi->format) / 8;
+	u64 pclk;
 	u64 bpl;
-	u32 hfp;
-	u32 hbp;
-	u32 hsa;
+	int hfp;
+	int hbp;
+	int hsa;
 	u32 blkline_pck, line_duration;
-	u32 blkeol_pck, blkeol_duration;
 	u32 val;
 
 	val = 0;
@@ -408,11 +432,21 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
 		return;
 	}
 
-	/* TODO: TVG could be enabled here */
+	/* TODO: TVG (test video generator) could be enabled here */
 
-	/* Send blanking packet */
+	/*
+	 * During vertical blanking: go to LP mode
+	 * Like with the EOL setting, if this is not set, the EOL area will be
+	 * filled with NULL or blanking packets in the vblank area.
+	 * FIXME: some Samsung phones and display panels such as s6e63m0 use
+	 * DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING here instead,
+	 * figure out how to properly configure that from the panel.
+	 */
 	val |= DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0;
-	/* Send EOL packet */
+	/*
+	 * During EOL: go to LP mode. If this is not set, the EOL area will be
+	 * filled with NULL or blanking packets.
+	 */
 	val |= DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0;
 	/* Recovery mode 1 */
 	val |= 1 << DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT;
@@ -420,13 +454,13 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
 	writel(val, d->regs + DSI_VID_MAIN_CTL);
 
 	/* Vertical frame parameters are pretty straight-forward */
-	val = mode->vdisplay << DSI_VID_VSIZE_VSA_LENGTH_SHIFT;
+	val = mode->vdisplay << DSI_VID_VSIZE_VACT_LENGTH_SHIFT;
 	/* vertical front porch */
 	val |= (mode->vsync_start - mode->vdisplay)
 		<< DSI_VID_VSIZE_VFP_LENGTH_SHIFT;
 	/* vertical sync active */
 	val |= (mode->vsync_end - mode->vsync_start)
-		<< DSI_VID_VSIZE_VACT_LENGTH_SHIFT;
+		<< DSI_VID_VSIZE_VSA_LENGTH_SHIFT;
 	/* vertical back porch */
 	val |= (mode->vtotal - mode->vsync_end)
 		<< DSI_VID_VSIZE_VBP_LENGTH_SHIFT;
@@ -434,36 +468,54 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
 
 	/*
 	 * Horizontal frame parameters:
-	 * horizontal resolution is given in pixels and must be re-calculated
-	 * into bytes since this is what the hardware expects.
+	 * horizontal resolution is given in pixels but must be re-calculated
+	 * into bytes since this is what the hardware expects, these registers
+	 * define the payload size of the packet.
+	 *
+	 * hfp = horizontal front porch in bytes
+	 * hbp = horizontal back porch in bytes
+	 * hsa = horizontal sync active in bytes
 	 *
 	 * 6 + 2 is HFP header + checksum
 	 */
-	hfp = (mode->hsync_start - mode->hdisplay) * bpp - 6 - 2;
+	hfp = (mode->hsync_start - mode->hdisplay) * cpp - 6 - 2;
 	if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
 		/*
+		 * Use sync pulse for sync: explicit HSA time
 		 * 6 is HBP header + checksum
 		 * 4 is RGB header + checksum
 		 */
-		hbp = (mode->htotal - mode->hsync_end) * bpp - 4 - 6;
+		hbp = (mode->htotal - mode->hsync_end) * cpp - 4 - 6;
 		/*
 		 * 6 is HBP header + checksum
 		 * 4 is HSW packet bytes
 		 * 4 is RGB header + checksum
 		 */
-		hsa = (mode->hsync_end - mode->hsync_start) * bpp - 4 - 4 - 6;
+		hsa = (mode->hsync_end - mode->hsync_start) * cpp - 4 - 4 - 6;
 	} else {
 		/*
-		 * HBP includes both back porch and sync
+		 * Use event for sync: HBP includes both back porch and sync
 		 * 6 is HBP header + checksum
 		 * 4 is HSW packet bytes
 		 * 4 is RGB header + checksum
 		 */
-		hbp = (mode->htotal - mode->hsync_start) * bpp - 4 - 4 - 6;
-		/* HSA is not considered in this mode and set to 0 */
+		hbp = (mode->htotal - mode->hsync_start) * cpp - 4 - 4 - 6;
+		/* HSA is not present in this mode and set to 0 */
 		hsa = 0;
 	}
-	dev_dbg(d->dev, "hfp: %u, hbp: %u, hsa: %u\n",
+	if (hfp < 0) {
+		dev_info(d->dev, "hfp negative, set to 0\n");
+		hfp = 0;
+	}
+	if (hbp < 0) {
+		dev_info(d->dev, "hbp negative, set to 0\n");
+		hbp = 0;
+	}
+	if (hsa < 0) {
+		dev_info(d->dev, "hsa negative, set to 0\n");
+		hsa = 0;
+	}
+	dev_dbg(d->dev, "hfp: %u, hbp: %u, hsa: %u bytes\n",
 		hfp, hbp, hsa);
 
 	/* Frame parameters: horizontal sync active */
@@ -474,91 +526,185 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
 	val |= hfp << DSI_VID_HSIZE1_HFP_LENGTH_SHIFT;
 	writel(val, d->regs + DSI_VID_HSIZE1);
 
-	/* RGB data length (bytes on one scanline) */
-	val = mode->hdisplay * (bpp / 8);
+	/* RGB data length (visible bytes on one scanline) */
+	val = mode->hdisplay * cpp;
 	writel(val, d->regs + DSI_VID_HSIZE2);
-
-	/* TODO: further adjustments for TVG mode here */
+	dev_dbg(d->dev, "RGB length, visible area on a line: %u bytes\n", val);
 
 	/*
-	 * EOL packet length from bits per line calculations: pixel clock
-	 * is given in kHz, calculate the time between two pixels in
-	 * picoseconds.
+	 * Calculate the time between two pixels in picoseconds using
+	 * the supplied refresh rate and total resolution including
+	 * porches and sync.
 	 */
-	bpl = mode->clock * mode->htotal;
-	bpl *= (d->hs_freq / 8);
-	do_div(bpl, 1000000); /* microseconds */
-	do_div(bpl, 1000000); /* seconds */
+	/* (ps/s) / (pixels/s) = ps/pixels */
+	pclk = DIV_ROUND_UP_ULL(1000000000000,
+				(mode->vrefresh * mode->htotal * mode->vtotal));
+	dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
+		pclk);
+
+	/*
+	 * How many bytes per line will this update frequency yield?
+	 *
+	 * Calculate the number of picoseconds for one scanline (1), then
+	 * divide by 1000000000000 (2) to get in pixels per second we
+	 * want to output.
+	 *
+	 * Multiply with number of bytes per second at this video display
+	 * frequency (3) to get number of bytes transferred during this
+	 * time. Notice that we use the frequency the display wants,
+	 * not what we actually get from the DSI PLL, which is hs_freq.
+	 *
+	 * These arithmetics are done in a different order to avoid
+	 * overflow.
+	 */
+	bpl = pclk * mode->htotal; /* (1) picoseconds per line */
+	dev_dbg(d->dev, "picoseconds per line: %llu\n", bpl);
+	/* Multiply with bytes per second (3) */
+	bpl *= (d->mdsi->hs_rate / 8);
+	/* Pixels per second (2) */
+	bpl = DIV_ROUND_DOWN_ULL(bpl, 1000000); /* microseconds */
+	bpl = DIV_ROUND_DOWN_ULL(bpl, 1000000); /* seconds */
+	/* parallel transactions in all lanes */
 	bpl *= d->mdsi->lanes;
-	dev_dbg(d->dev, "calculated bytes per line: %llu\n", bpl);
+	dev_dbg(d->dev,
+		"calculated bytes per line: %llu @ %d Hz with HS %lu Hz\n",
+		bpl, mode->vrefresh, d->mdsi->hs_rate);
+
 	/*
 	 * 6 is header + checksum, header = 4 bytes, checksum = 2 bytes
 	 * 4 is short packet for vsync/hsync
 	 */
 	if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
-		/* Fixme: isn't the hsync width in pixels? */
+		/* Set the event packet size to 0 (not used) */
+		writel(0, d->regs + DSI_VID_BLKSIZE1);
+		/*
+		 * FIXME: isn't the hsync width in pixels? The porch and
+		 * sync area size is in pixels here, but this -6
+		 * seems to be for bytes. It looks like this in the vendor
+		 * code though. Is it completely untested?
+		 */
 		blkline_pck = bpl - (mode->hsync_end - mode->hsync_start) - 6;
 		val = blkline_pck << DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT;
 		writel(val, d->regs + DSI_VID_BLKSIZE2);
 	} else {
+		/* Set the sync pulse packet size to 0 (not used) */
+		writel(0, d->regs + DSI_VID_BLKSIZE2);
+		/* Specifying payload size in bytes (-4-6 from manual) */
 		blkline_pck = bpl - 4 - 6;
+		if (blkline_pck > 0x1FFF)
+			dev_err(d->dev, "blkline_pck too big %d bytes\n",
+				blkline_pck);
 		val = blkline_pck << DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT;
+		val &= DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK;
 		writel(val, d->regs + DSI_VID_BLKSIZE1);
 	}
 
-	line_duration = (blkline_pck + 6) / d->mdsi->lanes;
-	dev_dbg(d->dev, "line duration %u\n", line_duration);
+	/*
+	 * The line duration is used to scale back the frequency from
+	 * the max frequency supported by the HS clock to the desired
+	 * update frequency in vrefresh.
+	 */
+	line_duration = blkline_pck + 6;
+	/*
+	 * The datasheet contains this complex condition to decreasing
+	 * the line duration by 1 under very specific circumstances.
+	 * Here we also imply that LP is used during burst EOL.
+	 */
+	if (d->mdsi->lanes == 2 && (hsa & 0x01) && (hfp & 0x01)
+	    && (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST))
+		line_duration--;
+	line_duration = DIV_ROUND_CLOSEST(line_duration, d->mdsi->lanes);
+	dev_dbg(d->dev, "line duration %u bytes\n", line_duration);
 	val = line_duration << DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT;
 	/*
 	 * This is the time to perform LP->HS on D-PHY
 	 * FIXME: nowhere to get this from: DT property on the DSI?
+	 * The manual says this is "system dependent".
+	 * values like 48 and 72 seen in the vendor code.
 	 */
-	val |= 0 << DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT;
+	val |= 48 << DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT;
 	writel(val, d->regs + DSI_VID_DPHY_TIME);
 
-	/* Calculate block end of line */
-	blkeol_pck = bpl - mode->hdisplay * bpp - 6;
-	blkeol_duration = (blkeol_pck + 6) / d->mdsi->lanes;
-	dev_dbg(d->dev, "blkeol pck: %u, duration: %u\n",
-		 blkeol_pck, blkeol_duration);
-
+	/*
+	 * See the manual figure 657 page 2203 for understanding the impact
+	 * of the different burst mode settings.
+	 */
 	if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
-		/* Set up EOL clock for burst mode */
+		int blkeol_pck, blkeol_duration;
+		/*
+		 * Packet size at EOL for burst mode, this is only used
+		 * if DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 is NOT set,
+		 * but we instead send NULL or blanking packets at EOL.
+		 * This is given in number of bytes.
+		 *
+		 * See the manual page 2198 for the 13 reg_blkeol_pck bits.
+		 */
+		blkeol_pck = bpl - (mode->htotal * cpp) - 6;
+		if (blkeol_pck < 0) {
+			dev_err(d->dev, "video block does not fit on line!\n");
+			dev_err(d->dev,
+				"calculated bytes per line: %llu @ %d Hz\n",
+				bpl, mode->vrefresh);
+			dev_err(d->dev,
+				"bytes per line (blkline_pck) %u bytes\n",
+				blkline_pck);
+			dev_err(d->dev,
+				"blkeol_pck becomes %d bytes\n", blkeol_pck);
+			return;
+		}
+		dev_dbg(d->dev, "BLKEOL packet: %d bytes\n", blkeol_pck);
+
 		val = readl(d->regs + DSI_VID_BLKSIZE1);
+		val &= ~DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK;
 		val |= blkeol_pck << DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT;
 		writel(val, d->regs + DSI_VID_BLKSIZE1);
-		writel(blkeol_pck, d->regs + DSI_VID_VCA_SETTING2);
+		/* Use the same value for exact burst limit */
+		val = blkeol_pck <<
+			DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT;
+		val &= DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK;
+		writel(val, d->regs + DSI_VID_VCA_SETTING2);
+		/*
+		 * This BLKEOL duration is claimed to be the duration in clock
+		 * cycles of the BLLP end-of-line (EOL) period for each line if
+		 * DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 is set.
+		 *
+		 * It is hard to trust the manuals' claim that this is in clock
+		 * cycles as we mimic the behaviour of the vendor code, which
+		 * appears to write a number of bytes that would have been
+		 * transferred on a single lane.
+		 *
+		 * See the manual figure 657 page 2203 and page 2198 for the 13
+		 * reg_blkeol_duration bits.
+		 *
+		 * FIXME: should this also be set up also for non-burst mode
+		 * according to figure 565 page 2202?
+		 */
+		blkeol_duration = DIV_ROUND_CLOSEST(blkeol_pck + 6,
+						    d->mdsi->lanes);
+		dev_dbg(d->dev, "BLKEOL duration: %d clock cycles\n",
+			blkeol_duration);
 
-		writel(blkeol_duration, d->regs + DSI_VID_PCK_TIME);
-		writel(blkeol_duration - 6, d->regs + DSI_VID_VCA_SETTING1);
+		val = readl(d->regs + DSI_VID_PCK_TIME);
+		val &= ~DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK;
+		val |= blkeol_duration <<
+			DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT;
+		writel(val, d->regs + DSI_VID_PCK_TIME);
+
+		/* Max burst limit, this is given in bytes */
+		val = readl(d->regs + DSI_VID_VCA_SETTING1);
+		val &= ~DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK;
+		val |= (blkeol_pck - 6) <<
+			DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT;
+		writel(val, d->regs + DSI_VID_VCA_SETTING1);
 	}
 
 	/* Maximum line limit */
 	val = readl(d->regs + DSI_VID_VCA_SETTING2);
-	val |= blkline_pck <<
-		DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT;
+	val &= ~DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK;
+	val |= (blkline_pck - 6) <<
+		DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT;
 	writel(val, d->regs + DSI_VID_VCA_SETTING2);
-
-	/* Put IF1 into video mode */
-	val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
-	val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
-	writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
-
-	/* Disable command mode on IF1 */
-	val = readl(d->regs + DSI_CMD_MODE_CTL);
-	val &= ~DSI_CMD_MODE_CTL_IF1_LP_EN;
-	writel(val, d->regs + DSI_CMD_MODE_CTL);
-
-	/* Enable some error interrupts */
-	val = readl(d->regs + DSI_VID_MODE_STS_CTL);
-	val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
-	val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
-	writel(val, d->regs + DSI_VID_MODE_STS_CTL);
-
-	/* Enable video mode */
-	val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
-	val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
-	writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+	dev_dbg(d->dev, "blkline pck: %d bytes\n", blkline_pck - 6);
 }
 
 static void mcde_dsi_start(struct mcde_dsi *d)
@@ -670,30 +816,25 @@ static void mcde_dsi_start(struct mcde_dsi *d)
 static void mcde_dsi_bridge_enable(struct drm_bridge *bridge)
 {
 	struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+	u32 val;
+
+	if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		/* Enable video mode */
+		val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+		val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
+		writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+	}
 
 	dev_info(d->dev, "enable DSI master\n");
 };
 
-static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
-				     const struct drm_display_mode *mode,
-				     const struct drm_display_mode *adj)
+static void mcde_dsi_bridge_pre_enable(struct drm_bridge *bridge)
 {
 	struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
-	unsigned long pixel_clock_hz = mode->clock * 1000;
 	unsigned long hs_freq, lp_freq;
 	u32 val;
 	int ret;
 
-	if (!d->mdsi) {
-		dev_err(d->dev, "no DSI device attached to encoder!\n");
-		return;
-	}
-
-	dev_info(d->dev, "set DSI master to %dx%d %lu Hz %s mode\n",
-		 mode->hdisplay, mode->vdisplay, pixel_clock_hz,
-		 (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
-		);
-
 	/* Copy maximum clock frequencies */
 	if (d->mdsi->lp_rate)
 		lp_freq = d->mdsi->lp_rate;
@@ -732,7 +873,21 @@ static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
 			 d->hs_freq);
 
 	if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
-		mcde_dsi_setup_video_mode(d, mode);
+		/* Put IF1 into video mode */
+		val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+		val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
+		writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+
+		/* Disable command mode on IF1 */
+		val = readl(d->regs + DSI_CMD_MODE_CTL);
+		val &= ~DSI_CMD_MODE_CTL_IF1_LP_EN;
+		writel(val, d->regs + DSI_CMD_MODE_CTL);
+
+		/* Enable some error interrupts */
+		val = readl(d->regs + DSI_VID_MODE_STS_CTL);
+		val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
+		val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
+		writel(val, d->regs + DSI_VID_MODE_STS_CTL);
 	} else {
 		/* Command mode, clear IF1 ID */
 		val = readl(d->regs + DSI_CMD_MODE_CTL);
@@ -746,6 +901,26 @@ static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
 	}
 }
 
+static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
+				     const struct drm_display_mode *mode,
+				     const struct drm_display_mode *adj)
+{
+	struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+
+	if (!d->mdsi) {
+		dev_err(d->dev, "no DSI device attached to encoder!\n");
+		return;
+	}
+
+	dev_info(d->dev, "set DSI master to %dx%d %u Hz %s mode\n",
+		 mode->hdisplay, mode->vdisplay, mode->clock * 1000,
+		 (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
+		);
+
+	if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)
+		mcde_dsi_setup_video_mode(d, mode);
+}
+
 static void mcde_dsi_wait_for_command_mode_stop(struct mcde_dsi *d)
 {
 	u32 val;
@@ -811,67 +986,23 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
 	clk_disable_unprepare(d->lp_clk);
 }
 
-/*
- * This connector needs no special handling, just use the default
- * helpers for everything. It's pretty dummy.
- */
-static const struct drm_connector_funcs mcde_dsi_connector_funcs = {
-	.reset = drm_atomic_helper_connector_reset,
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.destroy = drm_connector_cleanup,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int mcde_dsi_get_modes(struct drm_connector *connector)
-{
-	struct mcde_dsi *d = connector_to_mcde_dsi(connector);
-
-	/* Just pass the question to the panel */
-	if (d->panel)
-		return drm_panel_get_modes(d->panel);
-
-	/* TODO: deal with bridges */
-
-	return 0;
-}
-
-static const struct drm_connector_helper_funcs
-mcde_dsi_connector_helper_funcs = {
-	.get_modes = mcde_dsi_get_modes,
-};
-
 static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
 {
 	struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
 	struct drm_device *drm = bridge->dev;
 	int ret;
 
-	drm_connector_helper_add(&d->connector,
-				 &mcde_dsi_connector_helper_funcs);
-
 	if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
 		dev_err(d->dev, "we need atomic updates\n");
 		return -ENOTSUPP;
 	}
 
-	ret = drm_connector_init(drm, &d->connector,
-				 &mcde_dsi_connector_funcs,
-				 DRM_MODE_CONNECTOR_DSI);
-	if (ret) {
-		dev_err(d->dev, "failed to initialize DSI bridge connector\n");
-		return ret;
-	}
-	d->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
-	/* The encoder in the bridge attached to the DSI bridge */
-	drm_connector_attach_encoder(&d->connector, bridge->encoder);
-	/* Then we attach the DSI bridge to the output (panel etc) bridge */
+	/* Attach the DSI bridge to the output (panel etc) bridge */
 	ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
 	if (ret) {
 		dev_err(d->dev, "failed to attach the DSI bridge\n");
 		return ret;
 	}
-	d->connector.status = connector_status_connected;
 
 	return 0;
 }
@@ -881,6 +1012,7 @@ static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
 	.mode_set = mcde_dsi_bridge_mode_set,
 	.disable = mcde_dsi_bridge_disable,
 	.enable = mcde_dsi_bridge_enable,
+	.pre_enable = mcde_dsi_bridge_pre_enable,
 };
 
 static int mcde_dsi_bind(struct device *dev, struct device *master,
@@ -901,7 +1033,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
 	d->mcde = mcde;
 	/* If the display attached before binding, set this up */
 	if (d->mdsi)
-		d->mcde->mdsi = d->mdsi;
+		mcde_dsi_attach_to_mcde(d);
 
 	/* Obtain the clocks */
 	d->hs_clk = devm_clk_get(dev, "hs");
diff --git a/drivers/gpu/drm/mcde/mcde_dsi_regs.h b/drivers/gpu/drm/mcde/mcde_dsi_regs.h
index c925332..16551af 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi_regs.h
+++ b/drivers/gpu/drm/mcde/mcde_dsi_regs.h
@@ -123,17 +123,6 @@
 #define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT BIT(3)
 #define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT 8
 #define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_MASK 0x00003F00
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_TURN_ON_PERIPHERAL 50
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHUT_DOWN_PERIPHERAL 34
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_0 3
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_1 19
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_2 35
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_LONG_WRITE 41
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_0 5
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 21
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_LONG_WRITE 57
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_READ 6
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SET_MAX_PKT_SIZE 55
 #define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT 14
 #define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT 16
 #define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN BIT(21)
@@ -239,6 +228,7 @@
 
 #define DSI_VID_PCK_TIME 0x000000A8
 #define DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT 0
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK 0x00000FFF
 
 #define DSI_VID_DPHY_TIME 0x000000AC
 #define DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT 0
@@ -248,6 +238,16 @@
 
 #define DSI_VID_MODE_STS 0x000000BC
 #define DSI_VID_MODE_STS_VSG_RUNNING BIT(0)
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA BIT(1)
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC BIT(2)
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC BIT(3)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH BIT(4)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT BIT(5)
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE BIT(6)
+#define DSI_VID_MODE_STS_ERR_LINEWRITE BIT(7)
+#define DSI_VID_MODE_STS_ERR_LONGREAD BIT(8)
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH BIT(9)
+#define DSI_VID_MODE_STS_VSG_RECOVERY BIT(10)
 
 #define DSI_VID_VCA_SETTING1 0x000000C0
 #define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT 0
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 8067a4b..b7a82ed 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -7,7 +7,6 @@
 		  mtk_drm_ddp.o \
 		  mtk_drm_ddp_comp.o \
 		  mtk_drm_drv.o \
-		  mtk_drm_fb.o \
 		  mtk_drm_gem.o \
 		  mtk_drm_plane.o \
 		  mtk_dsi.o \
@@ -21,7 +20,7 @@
 mediatek-drm-hdmi-objs := mtk_cec.o \
 			  mtk_hdmi.o \
 			  mtk_hdmi_ddc.o \
-                          mtk_mt2701_hdmi_phy.o \
+			  mtk_mt2701_hdmi_phy.o \
 			  mtk_mt8173_hdmi_phy.o \
 			  mtk_hdmi_phy.o
 
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 59de2a4..6fb0d69 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -9,6 +9,7 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
 
 #include "mtk_drm_crtc.h"
 #include "mtk_drm_ddp_comp.h"
@@ -45,12 +46,12 @@ static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
 
 static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
 			     unsigned int h, unsigned int vrefresh,
-			     unsigned int bpc)
+			     unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
 	struct mtk_disp_color *color = comp_to_color(comp);
 
-	writel(w, comp->regs + DISP_COLOR_WIDTH(color));
-	writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
+	mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color));
+	mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color));
 }
 
 static void mtk_color_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 4a55bb6..891d80c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -11,6 +11,7 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
 
 #include "mtk_drm_crtc.h"
 #include "mtk_drm_ddp_comp.h"
@@ -124,14 +125,15 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
 
 static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
 			   unsigned int h, unsigned int vrefresh,
-			   unsigned int bpc)
+			   unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
 	if (w != 0 && h != 0)
-		writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
-	writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR);
+		mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp,
+				      DISP_REG_OVL_ROI_SIZE);
+	mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR);
 
-	writel(0x1, comp->regs + DISP_REG_OVL_RST);
-	writel(0x0, comp->regs + DISP_REG_OVL_RST);
+	mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST);
+	mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST);
 }
 
 static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
@@ -175,16 +177,16 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
 	return 0;
 }
 
-static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
+static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
+			     struct cmdq_pkt *cmdq_pkt)
 {
-	unsigned int reg;
 	unsigned int gmc_thrshd_l;
 	unsigned int gmc_thrshd_h;
 	unsigned int gmc_value;
 	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
 
-	writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
-
+	mtk_ddp_write(cmdq_pkt, 0x1, comp,
+		      DISP_REG_OVL_RDMA_CTRL(idx));
 	gmc_thrshd_l = GMC_THRESHOLD_LOW >>
 		      (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
 	gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
@@ -194,22 +196,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
 	else
 		gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
 			    gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
-	writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
-
-	reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
-	reg = reg | BIT(idx);
-	writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+	mtk_ddp_write(cmdq_pkt, gmc_value,
+		      comp, DISP_REG_OVL_RDMA_GMC(idx));
+	mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp,
+			   DISP_REG_OVL_SRC_CON, BIT(idx));
 }
 
-static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
+static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx,
+			      struct cmdq_pkt *cmdq_pkt)
 {
-	unsigned int reg;
-
-	reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
-	reg = reg & ~BIT(idx);
-	writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
-
-	writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
+	mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+			   DISP_REG_OVL_SRC_CON, BIT(idx));
+	mtk_ddp_write(cmdq_pkt, 0, comp,
+		      DISP_REG_OVL_RDMA_CTRL(idx));
 }
 
 static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
@@ -249,7 +248,8 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
 }
 
 static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
-				 struct mtk_plane_state *state)
+				 struct mtk_plane_state *state,
+				 struct cmdq_pkt *cmdq_pkt)
 {
 	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
 	struct mtk_plane_pending_state *pending = &state->pending;
@@ -260,11 +260,13 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
 	unsigned int src_size = (pending->height << 16) | pending->width;
 	unsigned int con;
 
-	if (!pending->enable)
-		mtk_ovl_layer_off(comp, idx);
+	if (!pending->enable) {
+		mtk_ovl_layer_off(comp, idx, cmdq_pkt);
+		return;
+	}
 
 	con = ovl_fmt_convert(ovl, fmt);
-	if (idx != 0)
+	if (state->base.fb->format->has_alpha)
 		con |= OVL_CON_AEN | OVL_CON_ALPHA;
 
 	if (pending->rotation & DRM_MODE_REFLECT_Y) {
@@ -277,14 +279,18 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
 		addr += pending->pitch - 1;
 	}
 
-	writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
-	writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
-	writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
-	writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
-	writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx));
+	mtk_ddp_write_relaxed(cmdq_pkt, con, comp,
+			      DISP_REG_OVL_CON(idx));
+	mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp,
+			      DISP_REG_OVL_PITCH(idx));
+	mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp,
+			      DISP_REG_OVL_SRC_SIZE(idx));
+	mtk_ddp_write_relaxed(cmdq_pkt, offset, comp,
+			      DISP_REG_OVL_OFFSET(idx));
+	mtk_ddp_write_relaxed(cmdq_pkt, addr, comp,
+			      DISP_REG_OVL_ADDR(ovl, idx));
 
-	if (pending->enable)
-		mtk_ovl_layer_on(comp, idx);
+	mtk_ovl_layer_on(comp, idx, cmdq_pkt);
 }
 
 static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
@@ -313,8 +319,6 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
 	.disable_vblank = mtk_ovl_disable_vblank,
 	.supported_rotations = mtk_ovl_supported_rotations,
 	.layer_nr = mtk_ovl_layer_nr,
-	.layer_on = mtk_ovl_layer_on,
-	.layer_off = mtk_ovl_layer_off,
 	.layer_check = mtk_ovl_layer_check,
 	.layer_config = mtk_ovl_layer_config,
 	.bgclr_in_on = mtk_ovl_bgclr_in_on,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 405afef..0cb848d 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -9,6 +9,7 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
 
 #include "mtk_drm_crtc.h"
 #include "mtk_drm_ddp_comp.h"
@@ -125,14 +126,16 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
 
 static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
 			    unsigned int height, unsigned int vrefresh,
-			    unsigned int bpc)
+			    unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
 	unsigned int threshold;
 	unsigned int reg;
 	struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
 
-	rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
-	rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
+	mtk_ddp_write_mask(cmdq_pkt, width, comp,
+			   DISP_REG_RDMA_SIZE_CON_0, 0xfff);
+	mtk_ddp_write_mask(cmdq_pkt, height, comp,
+			   DISP_REG_RDMA_SIZE_CON_1, 0xfffff);
 
 	/*
 	 * Enable FIFO underflow since DSI and DPI can't be blocked.
@@ -144,7 +147,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
 	reg = RDMA_FIFO_UNDERFLOW_EN |
 	      RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
 	      RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
-	writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
+	mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON);
 }
 
 static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
@@ -190,7 +193,8 @@ static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
 }
 
 static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
-				  struct mtk_plane_state *state)
+				  struct mtk_plane_state *state,
+				  struct cmdq_pkt *cmdq_pkt)
 {
 	struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
 	struct mtk_plane_pending_state *pending = &state->pending;
@@ -200,24 +204,27 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
 	unsigned int con;
 
 	con = rdma_fmt_convert(rdma, fmt);
-	writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+	mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON);
 
 	if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
-		rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
-				 RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
-		rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
-				 RDMA_MATRIX_INT_MTX_SEL,
-				 RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+		mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp,
+				   DISP_REG_RDMA_SIZE_CON_0,
+				   RDMA_MATRIX_ENABLE);
+		mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB,
+				   comp, DISP_REG_RDMA_SIZE_CON_0,
+				   RDMA_MATRIX_INT_MTX_SEL);
 	} else {
-		rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
-				 RDMA_MATRIX_ENABLE, 0);
+		mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+				   DISP_REG_RDMA_SIZE_CON_0,
+				   RDMA_MATRIX_ENABLE);
 	}
+	mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR);
+	mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH);
+	mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp,
+		      DISP_RDMA_MEM_GMC_SETTING_0);
+	mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp,
+			   DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY);
 
-	writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
-	writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
-	writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
-	rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
-			 RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
 }
 
 static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 3305a94..0dfcd178 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -5,6 +5,7 @@
 
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
 
 #include <asm/barrier.h>
 #include <soc/mediatek/smi.h>
@@ -42,11 +43,20 @@ struct mtk_drm_crtc {
 	struct drm_plane		*planes;
 	unsigned int			layer_nr;
 	bool				pending_planes;
+	bool				pending_async_planes;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	struct cmdq_client		*cmdq_client;
+	u32				cmdq_event;
+#endif
 
 	void __iomem			*config_regs;
 	struct mtk_disp_mutex		*mutex;
 	unsigned int			ddp_comp_nr;
 	struct mtk_ddp_comp		**ddp_comp;
+
+	/* lock for display hardware access */
+	struct mutex			hw_lock;
 };
 
 struct mtk_crtc_state {
@@ -230,6 +240,13 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
 	return NULL;
 }
 
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+static void ddp_cmdq_cb(struct cmdq_cb_data data)
+{
+	cmdq_pkt_destroy(data.data);
+}
+#endif
+
 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 {
 	struct drm_crtc *crtc = &mtk_crtc->base;
@@ -298,7 +315,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 		if (i == 1)
 			mtk_ddp_comp_bgclr_in_on(comp);
 
-		mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
+		mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
 		mtk_ddp_comp_start(comp);
 	}
 
@@ -313,7 +330,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 		comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
 		if (comp)
 			mtk_ddp_comp_layer_config(comp, local_layer,
-						  plane_state);
+						  plane_state, NULL);
 	}
 
 	return 0;
@@ -328,6 +345,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
 {
 	struct drm_device *drm = mtk_crtc->base.dev;
+	struct drm_crtc *crtc = &mtk_crtc->base;
 	int i;
 
 	DRM_DEBUG_DRIVER("%s\n", __func__);
@@ -353,9 +371,17 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
 	mtk_disp_mutex_unprepare(mtk_crtc->mutex);
 
 	pm_runtime_put(drm->dev);
+
+	if (crtc->state->event && !crtc->state->active) {
+		spin_lock_irq(&crtc->dev->event_lock);
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+		crtc->state->event = NULL;
+		spin_unlock_irq(&crtc->dev->event_lock);
+	}
 }
 
-static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
+static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
+				struct cmdq_pkt *cmdq_handle)
 {
 	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
 	struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
@@ -371,7 +397,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
 	if (state->pending_config) {
 		mtk_ddp_comp_config(comp, state->pending_width,
 				    state->pending_height,
-				    state->pending_vrefresh, 0);
+				    state->pending_vrefresh, 0,
+				    cmdq_handle);
 
 		state->pending_config = false;
 	}
@@ -391,11 +418,82 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
 
 			if (comp)
 				mtk_ddp_comp_layer_config(comp, local_layer,
-							  plane_state);
+							  plane_state,
+							  cmdq_handle);
 			plane_state->pending.config = false;
 		}
 		mtk_crtc->pending_planes = false;
 	}
+
+	if (mtk_crtc->pending_async_planes) {
+		for (i = 0; i < mtk_crtc->layer_nr; i++) {
+			struct drm_plane *plane = &mtk_crtc->planes[i];
+			struct mtk_plane_state *plane_state;
+
+			plane_state = to_mtk_plane_state(plane->state);
+
+			if (!plane_state->pending.async_config)
+				continue;
+
+			comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
+							  &local_layer);
+
+			if (comp)
+				mtk_ddp_comp_layer_config(comp, local_layer,
+							  plane_state,
+							  cmdq_handle);
+			plane_state->pending.async_config = false;
+		}
+		mtk_crtc->pending_async_planes = false;
+	}
+}
+
+static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	struct cmdq_pkt *cmdq_handle;
+#endif
+	struct drm_crtc *crtc = &mtk_crtc->base;
+	struct mtk_drm_private *priv = crtc->dev->dev_private;
+	unsigned int pending_planes = 0, pending_async_planes = 0;
+	int i;
+
+	mutex_lock(&mtk_crtc->hw_lock);
+	for (i = 0; i < mtk_crtc->layer_nr; i++) {
+		struct drm_plane *plane = &mtk_crtc->planes[i];
+		struct mtk_plane_state *plane_state;
+
+		plane_state = to_mtk_plane_state(plane->state);
+		if (plane_state->pending.dirty) {
+			plane_state->pending.config = true;
+			plane_state->pending.dirty = false;
+			pending_planes |= BIT(i);
+		} else if (plane_state->pending.async_dirty) {
+			plane_state->pending.async_config = true;
+			plane_state->pending.async_dirty = false;
+			pending_async_planes |= BIT(i);
+		}
+	}
+	if (pending_planes)
+		mtk_crtc->pending_planes = true;
+	if (pending_async_planes)
+		mtk_crtc->pending_async_planes = true;
+
+	if (priv->data->shadow_register) {
+		mtk_disp_mutex_acquire(mtk_crtc->mutex);
+		mtk_crtc_ddp_config(crtc, NULL);
+		mtk_disp_mutex_release(mtk_crtc->mutex);
+	}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	if (mtk_crtc->cmdq_client) {
+		cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+		cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
+		cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
+		mtk_crtc_ddp_config(crtc, cmdq_handle);
+		cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+	}
+#endif
+	mutex_unlock(&mtk_crtc->hw_lock);
 }
 
 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
@@ -410,6 +508,20 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
 	return 0;
 }
 
+void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+			       struct drm_plane_state *new_state)
+{
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	const struct drm_plane_helper_funcs *plane_helper_funcs =
+			plane->helper_private;
+
+	if (!mtk_crtc->enabled)
+		return;
+
+	plane_helper_funcs->atomic_update(plane, new_state);
+	mtk_drm_crtc_hw_config(mtk_crtc);
+}
+
 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
 				       struct drm_crtc_state *old_state)
 {
@@ -457,6 +569,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
 	}
 	mtk_crtc->pending_planes = true;
 
+	mtk_drm_crtc_hw_config(mtk_crtc);
 	/* Wait for planes to be disabled */
 	drm_crtc_wait_one_vblank(crtc);
 
@@ -488,34 +601,16 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 				      struct drm_crtc_state *old_crtc_state)
 {
 	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-	struct mtk_drm_private *priv = crtc->dev->dev_private;
-	unsigned int pending_planes = 0;
 	int i;
 
 	if (mtk_crtc->event)
 		mtk_crtc->pending_needs_vblank = true;
-	for (i = 0; i < mtk_crtc->layer_nr; i++) {
-		struct drm_plane *plane = &mtk_crtc->planes[i];
-		struct mtk_plane_state *plane_state;
-
-		plane_state = to_mtk_plane_state(plane->state);
-		if (plane_state->pending.dirty) {
-			plane_state->pending.config = true;
-			plane_state->pending.dirty = false;
-			pending_planes |= BIT(i);
-		}
-	}
-	if (pending_planes)
-		mtk_crtc->pending_planes = true;
 	if (crtc->state->color_mgmt_changed)
-		for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+		for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
 			mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
-
-	if (priv->data->shadow_register) {
-		mtk_disp_mutex_acquire(mtk_crtc->mutex);
-		mtk_crtc_ddp_config(crtc);
-		mtk_disp_mutex_release(mtk_crtc->mutex);
-	}
+			mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
+		}
+	mtk_drm_crtc_hw_config(mtk_crtc);
 }
 
 static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -565,8 +660,12 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
 	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
 	struct mtk_drm_private *priv = crtc->dev->dev_private;
 
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+#else
 	if (!priv->data->shadow_register)
-		mtk_crtc_ddp_config(crtc);
+#endif
+		mtk_crtc_ddp_config(crtc, NULL);
 
 	mtk_drm_finish_page_flip(mtk_crtc);
 }
@@ -633,6 +732,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 	int pipe = priv->num_pipes;
 	int ret;
 	int i;
+	bool has_ctm = false;
+	uint gamma_lut_size = 0;
 
 	if (!path)
 		return 0;
@@ -683,6 +784,14 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 		}
 
 		mtk_crtc->ddp_comp[i] = comp;
+
+		if (comp->funcs) {
+			if (comp->funcs->gamma_set)
+				gamma_lut_size = MTK_LUT_SIZE;
+
+			if (comp->funcs->ctm_set)
+				has_ctm = true;
+		}
 	}
 
 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
@@ -703,9 +812,28 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 				NULL, pipe);
 	if (ret < 0)
 		return ret;
-	drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
-	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
-	priv->num_pipes++;
 
+	if (gamma_lut_size)
+		drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
+	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
+	priv->num_pipes++;
+	mutex_init(&mtk_crtc->hw_lock);
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	mtk_crtc->cmdq_client =
+			cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+					 2000);
+	if (IS_ERR(mtk_crtc->cmdq_client)) {
+		dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
+			drm_crtc_index(&mtk_crtc->base));
+		mtk_crtc->cmdq_client = NULL;
+	}
+	ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
+					 drm_crtc_index(&mtk_crtc->base),
+					 &mtk_crtc->cmdq_event);
+	if (ret)
+		dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
+			drm_crtc_index(&mtk_crtc->base));
+#endif
 	return 0;
 }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 6afe1c1..a2b4677 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -21,5 +21,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 			unsigned int path_len);
 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
 			     struct mtk_plane_state *state);
+void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+			       struct drm_plane_state *plane_state);
 
 #endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 7f21307..1f5a112 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -12,7 +12,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
-
+#include <linux/soc/mediatek/mtk-cmdq.h>
 #include "mtk_drm_drv.h"
 #include "mtk_drm_plane.h"
 #include "mtk_drm_ddp_comp.h"
@@ -37,7 +37,15 @@
 #define CCORR_EN				BIT(0)
 #define DISP_CCORR_CFG				0x0020
 #define CCORR_RELAY_MODE			BIT(0)
+#define CCORR_ENGINE_EN				BIT(1)
+#define CCORR_GAMMA_OFF				BIT(2)
+#define CCORR_WGAMUT_SRC_CLIP			BIT(3)
 #define DISP_CCORR_SIZE				0x0030
+#define DISP_CCORR_COEF_0			0x0080
+#define DISP_CCORR_COEF_1			0x0084
+#define DISP_CCORR_COEF_2			0x0088
+#define DISP_CCORR_COEF_3			0x008C
+#define DISP_CCORR_COEF_4			0x0090
 
 #define DISP_DITHER_EN				0x0000
 #define DITHER_EN				BIT(0)
@@ -76,36 +84,84 @@
 #define DITHER_ADD_LSHIFT_G(x)			(((x) & 0x7) << 4)
 #define DITHER_ADD_RSHIFT_G(x)			(((x) & 0x7) << 0)
 
+void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+		   struct mtk_ddp_comp *comp, unsigned int offset)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	if (cmdq_pkt)
+		cmdq_pkt_write(cmdq_pkt, comp->subsys,
+			       comp->regs_pa + offset, value);
+	else
+#endif
+		writel(value, comp->regs + offset);
+}
+
+void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+			   struct mtk_ddp_comp *comp,
+			   unsigned int offset)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	if (cmdq_pkt)
+		cmdq_pkt_write(cmdq_pkt, comp->subsys,
+			       comp->regs_pa + offset, value);
+	else
+#endif
+		writel_relaxed(value, comp->regs + offset);
+}
+
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt,
+			unsigned int value,
+			struct mtk_ddp_comp *comp,
+			unsigned int offset,
+			unsigned int mask)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	if (cmdq_pkt) {
+		cmdq_pkt_write_mask(cmdq_pkt, comp->subsys,
+				    comp->regs_pa + offset, value, mask);
+	} else {
+#endif
+		u32 tmp = readl(comp->regs + offset);
+
+		tmp = (tmp & ~mask) | (value & mask);
+		writel(tmp, comp->regs + offset);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	}
+#endif
+}
+
 void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
-		    unsigned int CFG)
+		    unsigned int CFG, struct cmdq_pkt *cmdq_pkt)
 {
 	/* If bpc equal to 0, the dithering function didn't be enabled */
 	if (bpc == 0)
 		return;
 
 	if (bpc >= MTK_MIN_BPC) {
-		writel(0, comp->regs + DISP_DITHER_5);
-		writel(0, comp->regs + DISP_DITHER_7);
-		writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
-		       DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
-		       DITHER_NEW_BIT_MODE,
-		       comp->regs + DISP_DITHER_15);
-		writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
-		       DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
-		       DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
-		       DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
-		       comp->regs + DISP_DITHER_16);
-		writel(DISP_DITHERING, comp->regs + CFG);
+		mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5);
+		mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7);
+		mtk_ddp_write(cmdq_pkt,
+			      DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
+			      DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
+			      DITHER_NEW_BIT_MODE,
+			      comp, DISP_DITHER_15);
+		mtk_ddp_write(cmdq_pkt,
+			      DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
+			      DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
+			      DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
+			      DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
+			      comp, DISP_DITHER_16);
+		mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG);
 	}
 }
 
 static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
 			  unsigned int h, unsigned int vrefresh,
-			  unsigned int bpc)
+			  unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
-	writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
-	mtk_dither_set(comp, bpc, DISP_OD_CFG);
+	mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE);
+	mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG);
+	mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt);
 }
 
 static void mtk_od_start(struct mtk_ddp_comp *comp)
@@ -120,9 +176,9 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
 
 static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
 			   unsigned int h, unsigned int vrefresh,
-			   unsigned int bpc)
+			   unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	writel(h << 16 | w, comp->regs + DISP_AAL_SIZE);
+	mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE);
 }
 
 static void mtk_aal_start(struct mtk_ddp_comp *comp)
@@ -137,10 +193,10 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp)
 
 static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
 			     unsigned int h, unsigned int vrefresh,
-			     unsigned int bpc)
+			     unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE);
-	writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG);
+	mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE);
+	mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG);
 }
 
 static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
@@ -153,12 +209,63 @@ static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
 	writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
 }
 
+/* Converts a DRM S31.32 value to the HW S1.10 format. */
+static u16 mtk_ctm_s31_32_to_s1_10(u64 in)
+{
+	u16 r;
+
+	/* Sign bit. */
+	r = in & BIT_ULL(63) ? BIT(11) : 0;
+
+	if ((in & GENMASK_ULL(62, 33)) > 0) {
+		/* identity value 0x100000000 -> 0x400, */
+		/* if bigger this, set it to max 0x7ff. */
+		r |= GENMASK(10, 0);
+	} else {
+		/* take the 11 most important bits. */
+		r |= (in >> 22) & GENMASK(10, 0);
+	}
+
+	return r;
+}
+
+static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp,
+			      struct drm_crtc_state *state)
+{
+	struct drm_property_blob *blob = state->ctm;
+	struct drm_color_ctm *ctm;
+	const u64 *input;
+	uint16_t coeffs[9] = { 0 };
+	int i;
+	struct cmdq_pkt *cmdq_pkt = NULL;
+
+	if (!blob)
+		return;
+
+	ctm = (struct drm_color_ctm *)blob->data;
+	input = ctm->matrix;
+
+	for (i = 0; i < ARRAY_SIZE(coeffs); i++)
+		coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]);
+
+	mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
+		      comp, DISP_CCORR_COEF_0);
+	mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
+		      comp, DISP_CCORR_COEF_1);
+	mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
+		      comp, DISP_CCORR_COEF_2);
+	mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
+		      comp, DISP_CCORR_COEF_3);
+	mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
+		      comp, DISP_CCORR_COEF_4);
+}
+
 static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
 			      unsigned int h, unsigned int vrefresh,
-			      unsigned int bpc)
+			      unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE);
-	writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG);
+	mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE);
+	mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG);
 }
 
 static void mtk_dither_start(struct mtk_ddp_comp *comp)
@@ -173,10 +280,10 @@ static void mtk_dither_stop(struct mtk_ddp_comp *comp)
 
 static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
 			     unsigned int h, unsigned int vrefresh,
-			     unsigned int bpc)
+			     unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
 {
-	writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE);
-	mtk_dither_set(comp, bpc, DISP_GAMMA_CFG);
+	mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE);
+	mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt);
 }
 
 static void mtk_gamma_start(struct mtk_ddp_comp *comp)
@@ -223,6 +330,7 @@ static const struct mtk_ddp_comp_funcs ddp_ccorr = {
 	.config = mtk_ccorr_config,
 	.start = mtk_ccorr_start,
 	.stop = mtk_ccorr_stop,
+	.ctm_set = mtk_ccorr_ctm_set,
 };
 
 static const struct mtk_ddp_comp_funcs ddp_dither = {
@@ -326,6 +434,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
 	enum mtk_ddp_comp_type type;
 	struct device_node *larb_node;
 	struct platform_device *larb_pdev;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	struct resource res;
+	struct cmdq_client_reg cmdq_reg;
+	int ret;
+#endif
 
 	if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
 		return -EINVAL;
@@ -379,6 +492,19 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
 
 	comp->larb_dev = &larb_pdev->dev;
 
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	if (of_address_to_resource(node, 0, &res) != 0) {
+		dev_err(dev, "Missing reg in %s node\n", node->full_name);
+		return -EINVAL;
+	}
+	comp->regs_pa = res.start;
+
+	ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0);
+	if (ret)
+		dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+	else
+		comp->subsys = cmdq_reg.subsys;
+#endif
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 2f1e9e7..debe363 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -69,27 +69,29 @@ enum mtk_ddp_comp_id {
 };
 
 struct mtk_ddp_comp;
-
+struct cmdq_pkt;
 struct mtk_ddp_comp_funcs {
 	void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
-		       unsigned int h, unsigned int vrefresh, unsigned int bpc);
+		       unsigned int h, unsigned int vrefresh,
+		       unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
 	void (*start)(struct mtk_ddp_comp *comp);
 	void (*stop)(struct mtk_ddp_comp *comp);
 	void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
 	void (*disable_vblank)(struct mtk_ddp_comp *comp);
 	unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
 	unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
-	void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
-	void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
 	int (*layer_check)(struct mtk_ddp_comp *comp,
 			   unsigned int idx,
 			   struct mtk_plane_state *state);
 	void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
-			     struct mtk_plane_state *state);
+			     struct mtk_plane_state *state,
+			     struct cmdq_pkt *cmdq_pkt);
 	void (*gamma_set)(struct mtk_ddp_comp *comp,
 			  struct drm_crtc_state *state);
 	void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
 	void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
+	void (*ctm_set)(struct mtk_ddp_comp *comp,
+			struct drm_crtc_state *state);
 };
 
 struct mtk_ddp_comp {
@@ -99,14 +101,17 @@ struct mtk_ddp_comp {
 	struct device *larb_dev;
 	enum mtk_ddp_comp_id id;
 	const struct mtk_ddp_comp_funcs *funcs;
+	resource_size_t regs_pa;
+	u8 subsys;
 };
 
 static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
 				       unsigned int w, unsigned int h,
-				       unsigned int vrefresh, unsigned int bpc)
+				       unsigned int vrefresh, unsigned int bpc,
+				       struct cmdq_pkt *cmdq_pkt)
 {
 	if (comp->funcs && comp->funcs->config)
-		comp->funcs->config(comp, w, h, vrefresh, bpc);
+		comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt);
 }
 
 static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
@@ -151,20 +156,6 @@ static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
 	return 0;
 }
 
-static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
-					 unsigned int idx)
-{
-	if (comp->funcs && comp->funcs->layer_on)
-		comp->funcs->layer_on(comp, idx);
-}
-
-static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
-					  unsigned int idx)
-{
-	if (comp->funcs && comp->funcs->layer_off)
-		comp->funcs->layer_off(comp, idx);
-}
-
 static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
 					   unsigned int idx,
 					   struct mtk_plane_state *state)
@@ -176,10 +167,11 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
 
 static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
 					     unsigned int idx,
-					     struct mtk_plane_state *state)
+					     struct mtk_plane_state *state,
+					     struct cmdq_pkt *cmdq_pkt)
 {
 	if (comp->funcs && comp->funcs->layer_config)
-		comp->funcs->layer_config(comp, idx, state);
+		comp->funcs->layer_config(comp, idx, state, cmdq_pkt);
 }
 
 static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
@@ -201,6 +193,13 @@ static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
 		comp->funcs->bgclr_in_off(comp);
 }
 
+static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
+				   struct drm_crtc_state *state)
+{
+	if (comp->funcs && comp->funcs->ctm_set)
+		comp->funcs->ctm_set(comp, state);
+}
+
 int mtk_ddp_comp_get_id(struct device_node *node,
 			enum mtk_ddp_comp_type comp_type);
 int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
@@ -209,6 +208,13 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
 int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
 void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
 void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
-		    unsigned int CFG);
-
+		    unsigned int CFG, struct cmdq_pkt *cmdq_pkt);
+enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id);
+void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+		   struct mtk_ddp_comp *comp, unsigned int offset);
+void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+			   struct mtk_ddp_comp *comp, unsigned int offset);
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+			struct mtk_ddp_comp *comp, unsigned int offset,
+			unsigned int mask);
 #endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 84d1421..0563c68 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -16,8 +16,10 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
@@ -27,7 +29,6 @@
 #include "mtk_drm_ddp.h"
 #include "mtk_drm_ddp_comp.h"
 #include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
 #include "mtk_drm_gem.h"
 
 #define DRIVER_NAME "mediatek"
@@ -36,89 +37,27 @@
 #define DRIVER_MAJOR 1
 #define DRIVER_MINOR 0
 
-static void mtk_atomic_schedule(struct mtk_drm_private *private,
-				struct drm_atomic_state *state)
+static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = {
+	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static struct drm_framebuffer *
+mtk_drm_mode_fb_create(struct drm_device *dev,
+		       struct drm_file *file,
+		       const struct drm_mode_fb_cmd2 *cmd)
 {
-	private->commit.state = state;
-	schedule_work(&private->commit.work);
-}
+	const struct drm_format_info *info = drm_get_format_info(dev, cmd);
 
-static void mtk_atomic_complete(struct mtk_drm_private *private,
-				struct drm_atomic_state *state)
-{
-	struct drm_device *drm = private->drm;
+	if (info->num_planes != 1)
+		return ERR_PTR(-EINVAL);
 
-	drm_atomic_helper_wait_for_fences(drm, state, false);
-
-	/*
-	 * Mediatek drm supports runtime PM, so plane registers cannot be
-	 * written when their crtc is disabled.
-	 *
-	 * The comment for drm_atomic_helper_commit states:
-	 *     For drivers supporting runtime PM the recommended sequence is
-	 *
-	 *     drm_atomic_helper_commit_modeset_disables(dev, state);
-	 *     drm_atomic_helper_commit_modeset_enables(dev, state);
-	 *     drm_atomic_helper_commit_planes(dev, state,
-	 *                                     DRM_PLANE_COMMIT_ACTIVE_ONLY);
-	 *
-	 * See the kerneldoc entries for these three functions for more details.
-	 */
-	drm_atomic_helper_commit_modeset_disables(drm, state);
-	drm_atomic_helper_commit_modeset_enables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state,
-					DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
-	drm_atomic_helper_wait_for_vblanks(drm, state);
-
-	drm_atomic_helper_cleanup_planes(drm, state);
-	drm_atomic_state_put(state);
-}
-
-static void mtk_atomic_work(struct work_struct *work)
-{
-	struct mtk_drm_private *private = container_of(work,
-			struct mtk_drm_private, commit.work);
-
-	mtk_atomic_complete(private, private->commit.state);
-}
-
-static int mtk_atomic_commit(struct drm_device *drm,
-			     struct drm_atomic_state *state,
-			     bool async)
-{
-	struct mtk_drm_private *private = drm->dev_private;
-	int ret;
-
-	ret = drm_atomic_helper_prepare_planes(drm, state);
-	if (ret)
-		return ret;
-
-	mutex_lock(&private->commit.lock);
-	flush_work(&private->commit.work);
-
-	ret = drm_atomic_helper_swap_state(state, true);
-	if (ret) {
-		mutex_unlock(&private->commit.lock);
-		drm_atomic_helper_cleanup_planes(drm, state);
-		return ret;
-	}
-
-	drm_atomic_state_get(state);
-	if (async)
-		mtk_atomic_schedule(private, state);
-	else
-		mtk_atomic_complete(private, state);
-
-	mutex_unlock(&private->commit.lock);
-
-	return 0;
+	return drm_gem_fb_create(dev, file, cmd);
 }
 
 static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
 	.fb_create = mtk_drm_mode_fb_create,
 	.atomic_check = drm_atomic_helper_check,
-	.atomic_commit = mtk_atomic_commit,
+	.atomic_commit = drm_atomic_helper_commit,
 };
 
 static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = {
@@ -236,6 +175,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 	drm->mode_config.max_width = 4096;
 	drm->mode_config.max_height = 4096;
 	drm->mode_config.funcs = &mtk_drm_mode_config_funcs;
+	drm->mode_config.helper_private = &mtk_drm_mode_config_helpers;
 
 	ret = component_bind_all(drm->dev, drm);
 	if (ret)
@@ -495,8 +435,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
 	if (!private)
 		return -ENOMEM;
 
-	mutex_init(&private->commit.lock);
-	INIT_WORK(&private->commit.work, mtk_atomic_work);
 	private->data = of_device_get_match_data(dev);
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index e03fea1..17bc99b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -43,13 +43,6 @@ struct mtk_drm_private {
 	struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
 	struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
 	const struct mtk_mmsys_driver_data *data;
-
-	struct {
-		struct drm_atomic_state *state;
-		struct work_struct work;
-		struct mutex lock;
-	} commit;
-
 	struct drm_atomic_state *suspend_state;
 
 	bool dma_parms_allocated;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
deleted file mode 100644
index 3f230a2..0000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#include <linux/dma-buf.h>
-#include <linux/dma-resv.h>
-
-#include <drm/drm_modeset_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
-#include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
-#include "mtk_drm_gem.h"
-
-static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
-	.create_handle = drm_gem_fb_create_handle,
-	.destroy = drm_gem_fb_destroy,
-};
-
-static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
-					const struct drm_mode_fb_cmd2 *mode,
-					struct drm_gem_object *obj)
-{
-	const struct drm_format_info *info = drm_get_format_info(dev, mode);
-	struct drm_framebuffer *fb;
-	int ret;
-
-	if (info->num_planes != 1)
-		return ERR_PTR(-EINVAL);
-
-	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
-	if (!fb)
-		return ERR_PTR(-ENOMEM);
-
-	drm_helper_mode_fill_fb_struct(dev, fb, mode);
-
-	fb->obj[0] = obj;
-
-	ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
-	if (ret) {
-		DRM_ERROR("failed to initialize framebuffer\n");
-		kfree(fb);
-		return ERR_PTR(ret);
-	}
-
-	return fb;
-}
-
-struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
-					       struct drm_file *file,
-					       const struct drm_mode_fb_cmd2 *cmd)
-{
-	const struct drm_format_info *info = drm_get_format_info(dev, cmd);
-	struct drm_framebuffer *fb;
-	struct drm_gem_object *gem;
-	unsigned int width = cmd->width;
-	unsigned int height = cmd->height;
-	unsigned int size, bpp;
-	int ret;
-
-	if (info->num_planes != 1)
-		return ERR_PTR(-EINVAL);
-
-	gem = drm_gem_object_lookup(file, cmd->handles[0]);
-	if (!gem)
-		return ERR_PTR(-ENOENT);
-
-	bpp = info->cpp[0];
-	size = (height - 1) * cmd->pitches[0] + width * bpp;
-	size += cmd->offsets[0];
-
-	if (gem->size < size) {
-		ret = -EINVAL;
-		goto unreference;
-	}
-
-	fb = mtk_drm_framebuffer_init(dev, cmd, gem);
-	if (IS_ERR(fb)) {
-		ret = PTR_ERR(fb);
-		goto unreference;
-	}
-
-	return fb;
-
-unreference:
-	drm_gem_object_put_unlocked(gem);
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
deleted file mode 100644
index eb64d26..0000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#ifndef MTK_DRM_FB_H
-#define MTK_DRM_FB_H
-
-struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
-					       struct drm_file *file,
-					       const struct drm_mode_fb_cmd2 *cmd);
-
-#endif /* MTK_DRM_FB_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 3b0cc91..914cc76 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -7,13 +7,13 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_atomic_uapi.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 
 #include "mtk_drm_crtc.h"
 #include "mtk_drm_ddp_comp.h"
 #include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
 #include "mtk_drm_gem.h"
 #include "mtk_drm_plane.h"
 
@@ -76,6 +76,50 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
 	kfree(to_mtk_plane_state(state));
 }
 
+static int mtk_plane_atomic_async_check(struct drm_plane *plane,
+					struct drm_plane_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+
+	if (plane != state->crtc->cursor)
+		return -EINVAL;
+
+	if (!plane->state)
+		return -EINVAL;
+
+	if (!plane->state->fb)
+		return -EINVAL;
+
+	if (state->state)
+		crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+								state->crtc);
+	else /* Special case for asynchronous cursor updates. */
+		crtc_state = state->crtc->state;
+
+	return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
+						   DRM_PLANE_HELPER_NO_SCALING,
+						   DRM_PLANE_HELPER_NO_SCALING,
+						   true, true);
+}
+
+static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+					  struct drm_plane_state *new_state)
+{
+	struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+	plane->state->crtc_x = new_state->crtc_x;
+	plane->state->crtc_y = new_state->crtc_y;
+	plane->state->crtc_h = new_state->crtc_h;
+	plane->state->crtc_w = new_state->crtc_w;
+	plane->state->src_x = new_state->src_x;
+	plane->state->src_y = new_state->src_y;
+	plane->state->src_h = new_state->src_h;
+	plane->state->src_w = new_state->src_w;
+	state->pending.async_dirty = true;
+
+	mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);
+}
+
 static const struct drm_plane_funcs mtk_plane_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
@@ -95,7 +139,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
 	if (!fb)
 		return 0;
 
-	if (!state->crtc)
+	if (WARN_ON(!state->crtc))
 		return 0;
 
 	ret = mtk_drm_crtc_plane_check(state->crtc, plane,
@@ -164,6 +208,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
 	.atomic_check = mtk_plane_atomic_check,
 	.atomic_update = mtk_plane_atomic_update,
 	.atomic_disable = mtk_plane_atomic_disable,
+	.atomic_async_update = mtk_plane_atomic_async_update,
+	.atomic_async_check = mtk_plane_atomic_async_check,
 };
 
 int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 760885e..d454bec 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -22,6 +22,8 @@ struct mtk_plane_pending_state {
 	unsigned int			height;
 	unsigned int			rotation;
 	bool				dirty;
+	bool				async_dirty;
+	bool				async_config;
 };
 
 struct mtk_plane_state {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d77c9f4..5fa1073 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -830,7 +830,7 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
 {
 	struct mtk_dsi *dsi = connector_to_dsi(connector);
 
-	return drm_panel_get_modes(dsi->panel);
+	return drm_panel_get_modes(dsi->panel, connector);
 }
 
 static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index c79b1f8..5e4a4db 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1238,17 +1238,19 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
 				    struct drm_display_mode *mode)
 {
 	struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+	struct drm_bridge *next_bridge;
 
 	dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
 		mode->hdisplay, mode->vdisplay, mode->vrefresh,
 		!!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
 
-	if (hdmi->bridge.next) {
+	next_bridge = drm_bridge_get_next_bridge(&hdmi->bridge);
+	if (next_bridge) {
 		struct drm_display_mode adjusted_mode;
 
 		drm_mode_copy(&adjusted_mode, mode);
-		if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
-					   &adjusted_mode))
+		if (!drm_bridge_chain_mode_fixup(next_bridge, mode,
+						 &adjusted_mode))
 			return MODE_BAD;
 	}
 
@@ -1300,9 +1302,10 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
 	int ret;
 
-	ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn,
-				 &mtk_hdmi_connector_funcs,
-				 DRM_MODE_CONNECTOR_HDMIA);
+	ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
+					  &mtk_hdmi_connector_funcs,
+					  DRM_MODE_CONNECTOR_HDMIA,
+					  hdmi->ddc_adpt);
 	if (ret) {
 		dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index c389e23..28a519c 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
 meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
+meson-drm-y += meson_rdma.o meson_osd_afbcd.o
 
 obj-$(CONFIG_DRM_MESON) += meson-drm.o
 obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 57ae1c1..e66b627 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -23,7 +23,9 @@
 #include "meson_registers.h"
 #include "meson_venc.h"
 #include "meson_viu.h"
+#include "meson_rdma.h"
 #include "meson_vpp.h"
+#include "meson_osd_afbcd.h"
 
 #define MESON_G12A_VIU_OFFSET	0x5ec0
 
@@ -35,7 +37,11 @@ struct meson_crtc {
 	struct meson_drm *priv;
 	void (*enable_osd1)(struct meson_drm *priv);
 	void (*enable_vd1)(struct meson_drm *priv);
+	void (*enable_osd1_afbc)(struct meson_drm *priv);
+	void (*disable_osd1_afbc)(struct meson_drm *priv);
 	unsigned int viu_offset;
+	bool vsync_forced;
+	bool vsync_disabled;
 };
 #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
 
@@ -46,6 +52,7 @@ static int meson_crtc_enable_vblank(struct drm_crtc *crtc)
 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
 	struct meson_drm *priv = meson_crtc->priv;
 
+	meson_crtc->vsync_disabled = false;
 	meson_venc_enable_vsync(priv);
 
 	return 0;
@@ -56,7 +63,10 @@ static void meson_crtc_disable_vblank(struct drm_crtc *crtc)
 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
 	struct meson_drm *priv = meson_crtc->priv;
 
-	meson_venc_disable_vsync(priv);
+	if (!meson_crtc->vsync_forced) {
+		meson_crtc->vsync_disabled = true;
+		meson_venc_disable_vsync(priv);
+	}
 }
 
 static const struct drm_crtc_funcs meson_crtc_funcs = {
@@ -236,6 +246,26 @@ static void meson_crtc_enable_osd1(struct meson_drm *priv)
 			    priv->io_base + _REG(VPP_MISC));
 }
 
+static void meson_crtc_g12a_enable_osd1_afbc(struct meson_drm *priv)
+{
+	writel_relaxed(priv->viu.osd1_blk2_cfg4,
+		       priv->io_base + _REG(VIU_OSD1_BLK2_CFG_W4));
+
+	writel_bits_relaxed(OSD_MEM_LINEAR_ADDR, OSD_MEM_LINEAR_ADDR,
+			    priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+
+	writel_relaxed(priv->viu.osd1_blk1_cfg4,
+		       priv->io_base + _REG(VIU_OSD1_BLK1_CFG_W4));
+
+	meson_viu_g12a_enable_osd1_afbc(priv);
+
+	writel_bits_relaxed(OSD_MEM_LINEAR_ADDR, OSD_MEM_LINEAR_ADDR,
+			    priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+
+	writel_bits_relaxed(OSD_MALI_SRC_EN, OSD_MALI_SRC_EN,
+			    priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
+}
+
 static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
 {
 	writel_relaxed(priv->viu.osd_blend_din0_scope_h,
@@ -281,6 +311,8 @@ void meson_crtc_irq(struct meson_drm *priv)
 	if (priv->viu.osd1_enabled && priv->viu.osd1_commit) {
 		writel_relaxed(priv->viu.osd1_ctrl_stat,
 				priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+		writel_relaxed(priv->viu.osd1_ctrl_stat2,
+				priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
 		writel_relaxed(priv->viu.osd1_blk0_cfg[0],
 				priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
 		writel_relaxed(priv->viu.osd1_blk0_cfg[1],
@@ -291,6 +323,20 @@ void meson_crtc_irq(struct meson_drm *priv)
 				priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
 		writel_relaxed(priv->viu.osd1_blk0_cfg[4],
 				priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
+
+		if (priv->viu.osd1_afbcd) {
+			if (meson_crtc->enable_osd1_afbc)
+				meson_crtc->enable_osd1_afbc(priv);
+		} else {
+			if (meson_crtc->disable_osd1_afbc)
+				meson_crtc->disable_osd1_afbc(priv);
+			if (priv->afbcd.ops) {
+				priv->afbcd.ops->reset(priv);
+				priv->afbcd.ops->disable(priv);
+			}
+			meson_crtc->vsync_forced = false;
+		}
+
 		writel_relaxed(priv->viu.osd_sc_ctrl0,
 				priv->io_base + _REG(VPP_OSD_SC_CTRL0));
 		writel_relaxed(priv->viu.osd_sc_i_wh_m1,
@@ -312,15 +358,25 @@ void meson_crtc_irq(struct meson_drm *priv)
 		writel_relaxed(priv->viu.osd_sc_v_ctrl0,
 				priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
 
-		meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
-				priv->viu.osd1_addr, priv->viu.osd1_stride,
-				priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
-				MESON_CANVAS_BLKMODE_LINEAR, 0);
+		if (!priv->viu.osd1_afbcd)
+			meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
+					    priv->viu.osd1_addr,
+					    priv->viu.osd1_stride,
+					    priv->viu.osd1_height,
+					    MESON_CANVAS_WRAP_NONE,
+					    MESON_CANVAS_BLKMODE_LINEAR, 0);
 
 		/* Enable OSD1 */
 		if (meson_crtc->enable_osd1)
 			meson_crtc->enable_osd1(priv);
 
+		if (priv->viu.osd1_afbcd) {
+			priv->afbcd.ops->reset(priv);
+			priv->afbcd.ops->setup(priv);
+			priv->afbcd.ops->enable(priv);
+			meson_crtc->vsync_forced = true;
+		}
+
 		priv->viu.osd1_commit = false;
 	}
 
@@ -357,7 +413,7 @@ void meson_crtc_irq(struct meson_drm *priv)
 					    MESON_CANVAS_WRAP_NONE,
 					    MESON_CANVAS_BLKMODE_LINEAR,
 					    MESON_CANVAS_ENDIAN_SWAP64);
-		};
+		}
 
 		writel_relaxed(priv->viu.vd1_if0_gen_reg,
 				priv->io_base + meson_crtc->viu_offset +
@@ -543,6 +599,9 @@ void meson_crtc_irq(struct meson_drm *priv)
 		priv->viu.vd1_commit = false;
 	}
 
+	if (meson_crtc->vsync_disabled)
+		return;
+
 	drm_crtc_handle_vblank(priv->crtc);
 
 	spin_lock_irqsave(&priv->drm->event_lock, flags);
@@ -579,10 +638,20 @@ int meson_crtc_create(struct meson_drm *priv)
 		meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
 		meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
 		meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
+		meson_crtc->enable_osd1_afbc =
+					meson_crtc_g12a_enable_osd1_afbc;
+		meson_crtc->disable_osd1_afbc =
+					meson_viu_g12a_disable_osd1_afbc;
 		drm_crtc_helper_add(crtc, &meson_g12a_crtc_helper_funcs);
 	} else {
 		meson_crtc->enable_osd1 = meson_crtc_enable_osd1;
 		meson_crtc->enable_vd1 = meson_crtc_enable_vd1;
+		if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
+			meson_crtc->enable_osd1_afbc =
+					meson_viu_gxm_enable_osd1_afbc;
+			meson_crtc->disable_osd1_afbc =
+					meson_viu_gxm_disable_osd1_afbc;
+		}
 		drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
 	}
 
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 397c331..b5f5eb7 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -28,10 +28,12 @@
 #include "meson_drv.h"
 #include "meson_overlay.h"
 #include "meson_plane.h"
+#include "meson_osd_afbcd.h"
 #include "meson_registers.h"
 #include "meson_venc_cvbs.h"
 #include "meson_viu.h"
 #include "meson_vpp.h"
+#include "meson_rdma.h"
 
 #define DRIVER_NAME "meson"
 #define DRIVER_DESC "Amlogic Meson DRM driver"
@@ -184,6 +186,7 @@ static void meson_remove_framebuffers(void)
 static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
 	struct platform_device *pdev = to_platform_device(dev);
+	const struct meson_drm_match_data *match;
 	struct meson_drm *priv;
 	struct drm_device *drm;
 	struct resource *res;
@@ -196,6 +199,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 		return -ENODEV;
 	}
 
+	match = of_device_get_match_data(dev);
+	if (!match)
+		return -ENODEV;
+
 	drm = drm_dev_alloc(&meson_driver, dev);
 	if (IS_ERR(drm))
 		return PTR_ERR(drm);
@@ -208,8 +215,8 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 	drm->dev_private = priv;
 	priv->drm = drm;
 	priv->dev = dev;
-
-	priv->compat = (enum vpu_compatible)of_device_get_match_data(priv->dev);
+	priv->compat = match->compat;
+	priv->afbcd.ops = match->afbcd_ops;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
 	regs = devm_ioremap_resource(dev, res);
@@ -289,6 +296,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 	meson_venc_init(priv);
 	meson_vpp_init(priv);
 	meson_viu_init(priv);
+	if (priv->afbcd.ops) {
+		ret = priv->afbcd.ops->init(priv);
+		if (ret)
+			return ret;
+	}
 
 	/* Encoder Initialization */
 
@@ -359,12 +371,16 @@ static void meson_drv_unbind(struct device *dev)
 		meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
 	}
 
+	if (priv->afbcd.ops) {
+		priv->afbcd.ops->reset(priv);
+		meson_rdma_free(priv);
+	}
+
 	drm_dev_unregister(drm);
 	drm_irq_uninstall(drm);
 	drm_kms_helper_poll_fini(drm);
 	drm_mode_config_cleanup(drm);
 	drm_dev_put(drm);
-
 }
 
 static const struct component_master_ops meson_drv_master_ops = {
@@ -393,6 +409,8 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev)
 	meson_venc_init(priv);
 	meson_vpp_init(priv);
 	meson_viu_init(priv);
+	if (priv->afbcd.ops)
+		priv->afbcd.ops->init(priv);
 
 	drm_mode_config_helper_resume(priv->drm);
 
@@ -481,15 +499,33 @@ static int meson_drv_probe(struct platform_device *pdev)
 	return 0;
 };
 
+static struct meson_drm_match_data meson_drm_gxbb_data = {
+	.compat = VPU_COMPATIBLE_GXBB,
+};
+
+static struct meson_drm_match_data meson_drm_gxl_data = {
+	.compat = VPU_COMPATIBLE_GXL,
+};
+
+static struct meson_drm_match_data meson_drm_gxm_data = {
+	.compat = VPU_COMPATIBLE_GXM,
+	.afbcd_ops = &meson_afbcd_gxm_ops,
+};
+
+static struct meson_drm_match_data meson_drm_g12a_data = {
+	.compat = VPU_COMPATIBLE_G12A,
+	.afbcd_ops = &meson_afbcd_g12a_ops,
+};
+
 static const struct of_device_id dt_match[] = {
 	{ .compatible = "amlogic,meson-gxbb-vpu",
-	  .data       = (void *)VPU_COMPATIBLE_GXBB },
+	  .data       = (void *)&meson_drm_gxbb_data },
 	{ .compatible = "amlogic,meson-gxl-vpu",
-	  .data       = (void *)VPU_COMPATIBLE_GXL },
+	  .data       = (void *)&meson_drm_gxl_data },
 	{ .compatible = "amlogic,meson-gxm-vpu",
-	  .data       = (void *)VPU_COMPATIBLE_GXM },
+	  .data       = (void *)&meson_drm_gxm_data },
 	{ .compatible = "amlogic,meson-g12a-vpu",
-	  .data       = (void *)VPU_COMPATIBLE_G12A },
+	  .data       = (void *)&meson_drm_g12a_data },
 	{}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 820d07b..04fdf38 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -16,6 +16,7 @@ struct drm_crtc;
 struct drm_device;
 struct drm_plane;
 struct meson_drm;
+struct meson_afbcd_ops;
 
 enum vpu_compatible {
 	VPU_COMPATIBLE_GXBB = 0,
@@ -24,6 +25,11 @@ enum vpu_compatible {
 	VPU_COMPATIBLE_G12A = 3,
 };
 
+struct meson_drm_match_data {
+	enum vpu_compatible compat;
+	struct meson_afbcd_ops *afbcd_ops;
+};
+
 struct meson_drm {
 	struct device *dev;
 	enum vpu_compatible compat;
@@ -47,11 +53,16 @@ struct meson_drm {
 		bool osd1_enabled;
 		bool osd1_interlace;
 		bool osd1_commit;
+		bool osd1_afbcd;
 		uint32_t osd1_ctrl_stat;
+		uint32_t osd1_ctrl_stat2;
 		uint32_t osd1_blk0_cfg[5];
+		uint32_t osd1_blk1_cfg4;
+		uint32_t osd1_blk2_cfg4;
 		uint32_t osd1_addr;
 		uint32_t osd1_stride;
 		uint32_t osd1_height;
+		uint32_t osd1_width;
 		uint32_t osd_sc_ctrl0;
 		uint32_t osd_sc_i_wh_m1;
 		uint32_t osd_sc_o_h_start_end;
@@ -122,6 +133,18 @@ struct meson_drm {
 		bool venc_repeat;
 		bool hdmi_use_enci;
 	} venc;
+
+	struct {
+		dma_addr_t addr_dma;
+		uint32_t *addr;
+		unsigned int offset;
+	} rdma;
+
+	struct {
+		struct meson_afbcd_ops *ops;
+		u64 modifier;
+		u32 format;
+	} afbcd;
 };
 
 static inline int meson_vpu_is_compatible(struct meson_drm *priv,
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c
new file mode 100644
index 0000000..f12e027
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_fourcc.h>
+
+#include "meson_drv.h"
+#include "meson_registers.h"
+#include "meson_viu.h"
+#include "meson_rdma.h"
+#include "meson_osd_afbcd.h"
+
+/*
+ * DOC: Driver for the ARM FrameBuffer Compression Decoders
+ *
+ * The Amlogic GXM and G12A SoC families embeds an AFBC Decoder,
+ * to decode compressed buffers generated by the ARM Mali GPU.
+ *
+ * For the GXM Family, Amlogic designed their own Decoder, named in
+ * the vendor source as "MESON_AFBC", and a single decoder is available
+ * for the 2 OSD planes.
+ * This decoder is compatible with the AFBC 1.0 specifications and the
+ * Mali T820 GPU capabilities.
+ * It supports :
+ * - basic AFBC buffer for RGB32 only, thus YTR feature is mandatory
+ * - SPARSE layout and SPLIT layout
+ * - only 16x16 superblock
+ *
+ * The decoder reads the data from the SDRAM, decodes and sends the
+ * decoded pixel stream to the OSD1 Plane pixel composer.
+ *
+ * For the G12A Family, Amlogic integrated an ARM AFBC Decoder, named
+ * in the vendor source as "MALI_AFBC", and the decoder can decode up
+ * to 4 surfaces, one for each of the 4 available OSDs.
+ * This decoder is compatible with the AFBC 1.2 specifications for the
+ * Mali G31 and G52 GPUs.
+ * Is supports :
+ * - basic AFBC buffer for multiple RGB and YUV pixel formats
+ * - SPARSE layout and SPLIT layout
+ * - 16x16 and 32x8 "wideblk" superblocks
+ * - Tiled header
+ *
+ * The ARM AFBC Decoder independent from the VPU Pixel Pipeline, so
+ * the ARM AFBC Decoder reads the data from the SDRAM then decodes
+ * into a private internal physical address where the OSD1 Plane pixel
+ * composer unpacks the decoded data.
+ */
+
+/* Amlogic AFBC Decoder for GXM Family */
+
+#define OSD1_AFBCD_RGB32	0x15
+
+static int meson_gxm_afbcd_pixel_fmt(u64 modifier, uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		return OSD1_AFBCD_RGB32;
+	/* TOFIX support mode formats */
+	default:
+		DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+		return -EINVAL;
+	}
+}
+
+static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format)
+{
+	if (modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+		return false;
+
+	if (!(modifier & AFBC_FORMAT_MOD_YTR))
+		return false;
+
+	return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0;
+}
+
+static int meson_gxm_afbcd_init(struct meson_drm *priv)
+{
+	return 0;
+}
+
+static int meson_gxm_afbcd_reset(struct meson_drm *priv)
+{
+	writel_relaxed(VIU_SW_RESET_OSD1_AFBCD,
+		       priv->io_base + _REG(VIU_SW_RESET));
+	writel_relaxed(0, priv->io_base + _REG(VIU_SW_RESET));
+
+	return 0;
+}
+
+static int meson_gxm_afbcd_enable(struct meson_drm *priv)
+{
+	writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) |
+		       OSD1_AFBCD_DEC_ENABLE,
+		       priv->io_base + _REG(OSD1_AFBCD_ENABLE));
+
+	return 0;
+}
+
+static int meson_gxm_afbcd_disable(struct meson_drm *priv)
+{
+	writel_bits_relaxed(OSD1_AFBCD_DEC_ENABLE, 0,
+			    priv->io_base + _REG(OSD1_AFBCD_ENABLE));
+
+	return 0;
+}
+
+static int meson_gxm_afbcd_setup(struct meson_drm *priv)
+{
+	u32 conv_lbuf_len;
+	u32 mode = FIELD_PREP(OSD1_AFBCD_MIF_URGENT, 3) |
+		   FIELD_PREP(OSD1_AFBCD_HOLD_LINE_NUM, 4) |
+		   FIELD_PREP(OSD1_AFBCD_RGBA_EXCHAN_CTRL, 0x34) |
+		   meson_gxm_afbcd_pixel_fmt(priv->afbcd.modifier,
+					     priv->afbcd.format);
+
+	if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPARSE)
+		mode |= OSD1_AFBCD_HREG_HALF_BLOCK;
+
+	if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPLIT)
+		mode |= OSD1_AFBCD_HREG_BLOCK_SPLIT;
+
+	writel_relaxed(mode, priv->io_base + _REG(OSD1_AFBCD_MODE));
+
+	writel_relaxed(FIELD_PREP(OSD1_AFBCD_HREG_VSIZE_IN,
+				  priv->viu.osd1_width) |
+		       FIELD_PREP(OSD1_AFBCD_HREG_HSIZE_IN,
+				  priv->viu.osd1_height),
+		       priv->io_base + _REG(OSD1_AFBCD_SIZE_IN));
+
+	writel_relaxed(priv->viu.osd1_addr >> 4,
+		       priv->io_base + _REG(OSD1_AFBCD_HDR_PTR));
+	writel_relaxed(priv->viu.osd1_addr >> 4,
+		       priv->io_base + _REG(OSD1_AFBCD_FRAME_PTR));
+	/* TOFIX: bits 31:24 are not documented, nor the meaning of 0xe4 */
+	writel_relaxed((0xe4 << 24) | (priv->viu.osd1_addr & 0xffffff),
+		       priv->io_base + _REG(OSD1_AFBCD_CHROMA_PTR));
+
+	if (priv->viu.osd1_width <= 128)
+		conv_lbuf_len = 32;
+	else if (priv->viu.osd1_width <= 256)
+		conv_lbuf_len = 64;
+	else if (priv->viu.osd1_width <= 512)
+		conv_lbuf_len = 128;
+	else if (priv->viu.osd1_width <= 1024)
+		conv_lbuf_len = 256;
+	else if (priv->viu.osd1_width <= 2048)
+		conv_lbuf_len = 512;
+	else
+		conv_lbuf_len = 1024;
+
+	writel_relaxed(conv_lbuf_len,
+		       priv->io_base + _REG(OSD1_AFBCD_CONV_CTRL));
+
+	writel_relaxed(FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_BGN_H, 0) |
+		       FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_END_H,
+				  priv->viu.osd1_width - 1),
+		       priv->io_base + _REG(OSD1_AFBCD_PIXEL_HSCOPE));
+
+	writel_relaxed(FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_BGN_V, 0) |
+		       FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_END_V,
+				  priv->viu.osd1_height - 1),
+		       priv->io_base + _REG(OSD1_AFBCD_PIXEL_VSCOPE));
+
+	return 0;
+}
+
+struct meson_afbcd_ops meson_afbcd_gxm_ops = {
+	.init = meson_gxm_afbcd_init,
+	.reset = meson_gxm_afbcd_reset,
+	.enable = meson_gxm_afbcd_enable,
+	.disable = meson_gxm_afbcd_disable,
+	.setup = meson_gxm_afbcd_setup,
+	.supported_fmt = meson_gxm_afbcd_supported_fmt,
+};
+
+/* ARM AFBC Decoder for G12A Family */
+
+/* Amlogic G12A Mali AFBC Decoder supported formats */
+enum {
+	MAFBC_FMT_RGB565 = 0,
+	MAFBC_FMT_RGBA5551,
+	MAFBC_FMT_RGBA1010102,
+	MAFBC_FMT_YUV420_10B,
+	MAFBC_FMT_RGB888,
+	MAFBC_FMT_RGBA8888,
+	MAFBC_FMT_RGBA4444,
+	MAFBC_FMT_R8,
+	MAFBC_FMT_RG88,
+	MAFBC_FMT_YUV420_8B,
+	MAFBC_FMT_YUV422_8B = 11,
+	MAFBC_FMT_YUV422_10B = 14,
+};
+
+static int meson_g12a_afbcd_pixel_fmt(u64 modifier, uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		/* YTR is forbidden for non XBGR formats */
+		if (modifier & AFBC_FORMAT_MOD_YTR)
+			return -EINVAL;
+	/* fall through */
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		return MAFBC_FMT_RGBA8888;
+	case DRM_FORMAT_RGB888:
+		/* YTR is forbidden for non XBGR formats */
+		if (modifier & AFBC_FORMAT_MOD_YTR)
+			return -EINVAL;
+		return MAFBC_FMT_RGB888;
+	case DRM_FORMAT_RGB565:
+		/* YTR is forbidden for non XBGR formats */
+		if (modifier & AFBC_FORMAT_MOD_YTR)
+			return -EINVAL;
+		return MAFBC_FMT_RGB565;
+	/* TOFIX support mode formats */
+	default:
+		DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+		return -EINVAL;
+	}
+}
+
+static int meson_g12a_afbcd_bpp(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		return 32;
+	case DRM_FORMAT_RGB888:
+		return 24;
+	case DRM_FORMAT_RGB565:
+		return 16;
+	/* TOFIX support mode formats */
+	default:
+		DRM_ERROR("unsupported afbc format[%08x]\n", format);
+		return 0;
+	}
+}
+
+static int meson_g12a_afbcd_fmt_to_blk_mode(u64 modifier, uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		return OSD_MALI_COLOR_MODE_RGBA8888;
+	case DRM_FORMAT_RGB888:
+		return OSD_MALI_COLOR_MODE_RGB888;
+	case DRM_FORMAT_RGB565:
+		return OSD_MALI_COLOR_MODE_RGB565;
+	/* TOFIX support mode formats */
+	default:
+		DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+		return -EINVAL;
+	}
+}
+
+static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format)
+{
+	return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0;
+}
+
+static int meson_g12a_afbcd_init(struct meson_drm *priv)
+{
+	int ret;
+
+	ret = meson_rdma_init(priv);
+	if (ret)
+		return ret;
+
+	meson_rdma_setup(priv);
+
+	/* Handle AFBC Decoder reset manually */
+	writel_bits_relaxed(MALI_AFBCD_MANUAL_RESET, MALI_AFBCD_MANUAL_RESET,
+			    priv->io_base + _REG(MALI_AFBCD_TOP_CTRL));
+
+	return 0;
+}
+
+static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+{
+	meson_rdma_reset(priv);
+
+	meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
+			       VIU_SW_RESET_G12A_OSD1_AFBCD,
+			       VIU_SW_RESET);
+	meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
+
+	return 0;
+}
+
+static int meson_g12a_afbcd_enable(struct meson_drm *priv)
+{
+	meson_rdma_writel_sync(priv, VPU_MAFBC_IRQ_SURFACES_COMPLETED |
+			       VPU_MAFBC_IRQ_CONFIGURATION_SWAPPED |
+			       VPU_MAFBC_IRQ_DECODE_ERROR |
+			       VPU_MAFBC_IRQ_DETILING_ERROR,
+			       VPU_MAFBC_IRQ_MASK);
+
+	meson_rdma_writel_sync(priv, VPU_MAFBC_S0_ENABLE,
+			       VPU_MAFBC_SURFACE_CFG);
+
+	meson_rdma_writel_sync(priv, VPU_MAFBC_DIRECT_SWAP,
+			       VPU_MAFBC_COMMAND);
+
+	/* This will enable the RDMA replaying the register writes on vsync */
+	meson_rdma_flush(priv);
+
+	return 0;
+}
+
+static int meson_g12a_afbcd_disable(struct meson_drm *priv)
+{
+	writel_bits_relaxed(VPU_MAFBC_S0_ENABLE, 0,
+			    priv->io_base + _REG(VPU_MAFBC_SURFACE_CFG));
+
+	return 0;
+}
+
+static int meson_g12a_afbcd_setup(struct meson_drm *priv)
+{
+	u32 format = meson_g12a_afbcd_pixel_fmt(priv->afbcd.modifier,
+						priv->afbcd.format);
+
+	if (priv->afbcd.modifier & AFBC_FORMAT_MOD_YTR)
+		format |= VPU_MAFBC_YUV_TRANSFORM;
+
+	if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPLIT)
+		format |= VPU_MAFBC_BLOCK_SPLIT;
+
+	if (priv->afbcd.modifier & AFBC_FORMAT_MOD_TILED)
+		format |= VPU_MAFBC_TILED_HEADER_EN;
+
+	if ((priv->afbcd.modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
+		AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+		format |= FIELD_PREP(VPU_MAFBC_SUPER_BLOCK_ASPECT, 1);
+
+	meson_rdma_writel_sync(priv, format,
+			       VPU_MAFBC_FORMAT_SPECIFIER_S0);
+
+	meson_rdma_writel_sync(priv, priv->viu.osd1_addr,
+			       VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0);
+	meson_rdma_writel_sync(priv, 0,
+			       VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0);
+
+	meson_rdma_writel_sync(priv, priv->viu.osd1_width,
+			       VPU_MAFBC_BUFFER_WIDTH_S0);
+	meson_rdma_writel_sync(priv, ALIGN(priv->viu.osd1_height, 32),
+			       VPU_MAFBC_BUFFER_HEIGHT_S0);
+
+	meson_rdma_writel_sync(priv, 0,
+			       VPU_MAFBC_BOUNDING_BOX_X_START_S0);
+	meson_rdma_writel_sync(priv, priv->viu.osd1_width - 1,
+			       VPU_MAFBC_BOUNDING_BOX_X_END_S0);
+	meson_rdma_writel_sync(priv, 0,
+			       VPU_MAFBC_BOUNDING_BOX_Y_START_S0);
+	meson_rdma_writel_sync(priv, priv->viu.osd1_height - 1,
+			       VPU_MAFBC_BOUNDING_BOX_Y_END_S0);
+
+	meson_rdma_writel_sync(priv, MESON_G12A_AFBCD_OUT_ADDR,
+			       VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S0);
+	meson_rdma_writel_sync(priv, 0,
+			       VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0);
+
+	meson_rdma_writel_sync(priv, priv->viu.osd1_width *
+			       (meson_g12a_afbcd_bpp(priv->afbcd.format) / 8),
+			       VPU_MAFBC_OUTPUT_BUF_STRIDE_S0);
+
+	return 0;
+}
+
+struct meson_afbcd_ops meson_afbcd_g12a_ops = {
+	.init = meson_g12a_afbcd_init,
+	.reset = meson_g12a_afbcd_reset,
+	.enable = meson_g12a_afbcd_enable,
+	.disable = meson_g12a_afbcd_disable,
+	.setup = meson_g12a_afbcd_setup,
+	.fmt_to_blk_mode = meson_g12a_afbcd_fmt_to_blk_mode,
+	.supported_fmt = meson_g12a_afbcd_supported_fmt,
+};
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h
new file mode 100644
index 0000000..5e55233
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_OSD_AFBCD_H
+#define __MESON_OSD_AFBCD_H
+
+#include "meson_drv.h"
+
+/* This is an internal address used to transfer pixel from AFBC to the VIU */
+#define MESON_G12A_AFBCD_OUT_ADDR	0x1000000
+
+struct meson_afbcd_ops {
+	int (*init)(struct meson_drm *priv);
+	int (*reset)(struct meson_drm *priv);
+	int (*enable)(struct meson_drm *priv);
+	int (*disable)(struct meson_drm *priv);
+	int (*setup)(struct meson_drm *priv);
+	int (*fmt_to_blk_mode)(u64 modifier, uint32_t format);
+	bool (*supported_fmt)(u64 modifier, uint32_t format);
+};
+
+extern struct meson_afbcd_ops meson_afbcd_gxm_ops;
+extern struct meson_afbcd_ops meson_afbcd_g12a_ops;
+
+#endif /* __MESON_OSD_AFBCD_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index ed54322..d5cbc47 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -23,6 +23,7 @@
 #include "meson_plane.h"
 #include "meson_registers.h"
 #include "meson_viu.h"
+#include "meson_osd_afbcd.h"
 
 /* OSD_SCI_WH_M1 */
 #define SCI_WH_M1_W(w)			FIELD_PREP(GENMASK(28, 16), w)
@@ -92,12 +93,38 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
 						   false, true);
 }
 
+#define MESON_MOD_AFBC_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |	\
+				   AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |	\
+				   AFBC_FORMAT_MOD_YTR |		\
+				   AFBC_FORMAT_MOD_SPARSE |		\
+				   AFBC_FORMAT_MOD_SPLIT)
+
 /* Takes a fixed 16.16 number and converts it to integer. */
 static inline int64_t fixed16_to_int(int64_t value)
 {
 	return value >> 16;
 }
 
+static u32 meson_g12a_afbcd_line_stride(struct meson_drm *priv)
+{
+	u32 line_stride = 0;
+
+	switch (priv->afbcd.format) {
+	case DRM_FORMAT_RGB565:
+		line_stride = ((priv->viu.osd1_width << 4) + 127) >> 7;
+		break;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		line_stride = ((priv->viu.osd1_width << 5) + 127) >> 7;
+		break;
+	}
+
+	return ((line_stride + 1) >> 1) << 1;
+}
+
 static void meson_plane_atomic_update(struct drm_plane *plane,
 				      struct drm_plane_state *old_state)
 {
@@ -126,59 +153,91 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 	 */
 	spin_lock_irqsave(&priv->drm->event_lock, flags);
 
+	/* Check if AFBC decoder is required for this buffer */
+	if ((meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+	     meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) &&
+	    fb->modifier & DRM_FORMAT_MOD_ARM_AFBC(MESON_MOD_AFBC_VALID_BITS))
+		priv->viu.osd1_afbcd = true;
+	else
+		priv->viu.osd1_afbcd = false;
+
 	/* Enable OSD and BLK0, set max global alpha */
 	priv->viu.osd1_ctrl_stat = OSD_ENABLE |
 				   (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
 				   OSD_BLK0_ENABLE;
 
+	priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
+					  _REG(VIU_OSD1_CTRL_STAT2));
+
 	canvas_id_osd1 = priv->canvas_id_osd1;
 
 	/* Set up BLK0 to point to the right canvas */
-	priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
-				      OSD_ENDIANNESS_LE);
+	priv->viu.osd1_blk0_cfg[0] = canvas_id_osd1 << OSD_CANVAS_SEL;
+
+	if (priv->viu.osd1_afbcd) {
+		if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+			/* This is the internal decoding memory address */
+			priv->viu.osd1_blk1_cfg4 = MESON_G12A_AFBCD_OUT_ADDR;
+			priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_BE;
+			priv->viu.osd1_ctrl_stat2 |= OSD_PENDING_STAT_CLEAN;
+			priv->viu.osd1_ctrl_stat |= VIU_OSD1_CFG_SYN_EN;
+		}
+
+		if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
+			priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
+			priv->viu.osd1_ctrl_stat2 |= OSD_DPATH_MALI_AFBCD;
+		}
+	} else {
+		priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
+
+		if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+			priv->viu.osd1_ctrl_stat2 &= ~OSD_DPATH_MALI_AFBCD;
+	}
 
 	/* On GXBB, Use the old non-HDR RGB2YUV converter */
 	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
 		priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
 
+	if (priv->viu.osd1_afbcd &&
+	    meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+		priv->viu.osd1_blk0_cfg[0] |= OSD_MALI_SRC_EN |
+			priv->afbcd.ops->fmt_to_blk_mode(fb->modifier,
+							  fb->format->format);
+	} else {
+		switch (fb->format->format) {
+		case DRM_FORMAT_XRGB8888:
+		case DRM_FORMAT_ARGB8888:
+			priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+						OSD_COLOR_MATRIX_32_ARGB;
+			break;
+		case DRM_FORMAT_XBGR8888:
+		case DRM_FORMAT_ABGR8888:
+			priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+						OSD_COLOR_MATRIX_32_ABGR;
+			break;
+		case DRM_FORMAT_RGB888:
+			priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
+						OSD_COLOR_MATRIX_24_RGB;
+			break;
+		case DRM_FORMAT_RGB565:
+			priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
+						OSD_COLOR_MATRIX_16_RGB565;
+			break;
+		};
+	}
+
 	switch (fb->format->format) {
 	case DRM_FORMAT_XRGB8888:
-		/* For XRGB, replace the pixel's alpha by 0xFF */
-		writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
-				    priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
-		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
-					      OSD_COLOR_MATRIX_32_ARGB;
-		break;
 	case DRM_FORMAT_XBGR8888:
 		/* For XRGB, replace the pixel's alpha by 0xFF */
-		writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
-				    priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
-		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
-					      OSD_COLOR_MATRIX_32_ABGR;
+		priv->viu.osd1_ctrl_stat2 |= OSD_REPLACE_EN;
 		break;
 	case DRM_FORMAT_ARGB8888:
-		/* For ARGB, use the pixel's alpha */
-		writel_bits_relaxed(OSD_REPLACE_EN, 0,
-				    priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
-		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
-					      OSD_COLOR_MATRIX_32_ARGB;
-		break;
 	case DRM_FORMAT_ABGR8888:
 		/* For ARGB, use the pixel's alpha */
-		writel_bits_relaxed(OSD_REPLACE_EN, 0,
-				    priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
-		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
-					      OSD_COLOR_MATRIX_32_ABGR;
+		priv->viu.osd1_ctrl_stat2 &= ~OSD_REPLACE_EN;
 		break;
-	case DRM_FORMAT_RGB888:
-		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
-					      OSD_COLOR_MATRIX_24_RGB;
-		break;
-	case DRM_FORMAT_RGB565:
-		priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
-					      OSD_COLOR_MATRIX_16_RGB565;
-		break;
-	};
+	}
 
 	/* Default scaler parameters */
 	vsc_bot_rcv_num = 0;
@@ -305,6 +364,17 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 	priv->viu.osd1_addr = gem->paddr;
 	priv->viu.osd1_stride = fb->pitches[0];
 	priv->viu.osd1_height = fb->height;
+	priv->viu.osd1_width = fb->width;
+
+	if (priv->viu.osd1_afbcd) {
+		priv->afbcd.modifier = fb->modifier;
+		priv->afbcd.format = fb->format->format;
+
+		/* Calculate decoder write stride */
+		if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+			priv->viu.osd1_blk2_cfg4 =
+				meson_g12a_afbcd_line_stride(priv);
+	}
 
 	if (!meson_plane->enabled) {
 		/* Reset OSD1 before enabling it on GXL+ SoCs */
@@ -326,6 +396,11 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
 	struct meson_plane *meson_plane = to_meson_plane(plane);
 	struct meson_drm *priv = meson_plane->priv;
 
+	if (priv->afbcd.ops) {
+		priv->afbcd.ops->reset(priv);
+		priv->afbcd.ops->disable(priv);
+	}
+
 	/* Disable OSD1 */
 	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
 		writel_bits_relaxed(VIU_OSD1_POSTBLD_SRC_OSD1, 0,
@@ -345,6 +420,42 @@ static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
 	.prepare_fb	= drm_gem_fb_prepare_fb,
 };
 
+static bool meson_plane_format_mod_supported(struct drm_plane *plane,
+					     u32 format, u64 modifier)
+{
+	struct meson_plane *meson_plane = to_meson_plane(plane);
+	struct meson_drm *priv = meson_plane->priv;
+	int i;
+
+	if (modifier == DRM_FORMAT_MOD_INVALID)
+		return false;
+
+	if (modifier == DRM_FORMAT_MOD_LINEAR)
+		return true;
+
+	if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) &&
+	    !meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+		return false;
+
+	if (modifier & ~DRM_FORMAT_MOD_ARM_AFBC(MESON_MOD_AFBC_VALID_BITS))
+		return false;
+
+	for (i = 0 ; i < plane->modifier_count ; ++i)
+		if (plane->modifiers[i] == modifier)
+			break;
+
+	if (i == plane->modifier_count) {
+		DRM_DEBUG_KMS("Unsupported modifier\n");
+		return false;
+	}
+
+	if (priv->afbcd.ops && priv->afbcd.ops->supported_fmt)
+		return priv->afbcd.ops->supported_fmt(modifier, format);
+
+	DRM_DEBUG_KMS("AFBC Unsupported\n");
+	return false;
+}
+
 static const struct drm_plane_funcs meson_plane_funcs = {
 	.update_plane		= drm_atomic_helper_update_plane,
 	.disable_plane		= drm_atomic_helper_disable_plane,
@@ -352,6 +463,7 @@ static const struct drm_plane_funcs meson_plane_funcs = {
 	.reset			= drm_atomic_helper_plane_reset,
 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
 	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+	.format_mod_supported   = meson_plane_format_mod_supported,
 };
 
 static const uint32_t supported_drm_formats[] = {
@@ -363,10 +475,60 @@ static const uint32_t supported_drm_formats[] = {
 	DRM_FORMAT_RGB565,
 };
 
+static const uint64_t format_modifiers_afbc_gxm[] = {
+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+				AFBC_FORMAT_MOD_SPARSE |
+				AFBC_FORMAT_MOD_YTR),
+	/* SPLIT mandates SPARSE, RGB modes mandates YTR */
+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+				AFBC_FORMAT_MOD_YTR |
+				AFBC_FORMAT_MOD_SPARSE |
+				AFBC_FORMAT_MOD_SPLIT),
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_afbc_g12a[] = {
+	/*
+	 * - TOFIX Support AFBC modifiers for YUV formats (16x16 + TILED)
+	 * - SPLIT is mandatory for performances reasons when in 16x16
+	 *   block size
+	 * - 32x8 block size + SPLIT is mandatory with 4K frame size
+	 *   for performances reasons
+	 */
+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+				AFBC_FORMAT_MOD_SPARSE |
+				AFBC_FORMAT_MOD_SPLIT),
+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+				AFBC_FORMAT_MOD_YTR |
+				AFBC_FORMAT_MOD_SPARSE |
+				AFBC_FORMAT_MOD_SPLIT),
+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+				AFBC_FORMAT_MOD_SPARSE),
+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+				AFBC_FORMAT_MOD_YTR |
+				AFBC_FORMAT_MOD_SPARSE),
+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+				AFBC_FORMAT_MOD_SPARSE |
+				AFBC_FORMAT_MOD_SPLIT),
+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+				AFBC_FORMAT_MOD_YTR |
+				AFBC_FORMAT_MOD_SPARSE |
+				AFBC_FORMAT_MOD_SPLIT),
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_default[] = {
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID,
+};
+
 int meson_plane_create(struct meson_drm *priv)
 {
 	struct meson_plane *meson_plane;
 	struct drm_plane *plane;
+	const uint64_t *format_modifiers = format_modifiers_default;
 
 	meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
 				   GFP_KERNEL);
@@ -376,11 +538,16 @@ int meson_plane_create(struct meson_drm *priv)
 	meson_plane->priv = priv;
 	plane = &meson_plane->base;
 
+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+		format_modifiers = format_modifiers_afbc_gxm;
+	else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+		format_modifiers = format_modifiers_afbc_g12a;
+
 	drm_universal_plane_init(priv->drm, plane, 0xFF,
 				 &meson_plane_funcs,
 				 supported_drm_formats,
 				 ARRAY_SIZE(supported_drm_formats),
-				 NULL,
+				 format_modifiers,
 				 DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
 
 	drm_plane_helper_add(plane, &meson_plane_helper_funcs);
diff --git a/drivers/gpu/drm/meson/meson_rdma.c b/drivers/gpu/drm/meson/meson_rdma.c
new file mode 100644
index 0000000..1303821
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_rdma.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+
+#include "meson_drv.h"
+#include "meson_registers.h"
+#include "meson_rdma.h"
+
+/*
+ * The VPU embeds a "Register DMA" that can write a sequence of registers
+ * on the VPU AHB bus, either manually or triggered by an internal IRQ
+ * event like VSYNC or a line input counter.
+ * The initial implementation handles a single channel (over 8), triggered
+ * by the VSYNC irq and does not handle the RDMA irq.
+ */
+
+#define RDMA_DESC_SIZE	(sizeof(uint32_t) * 2)
+
+int meson_rdma_init(struct meson_drm *priv)
+{
+	if (!priv->rdma.addr) {
+		/* Allocate a PAGE buffer */
+		priv->rdma.addr =
+			dma_alloc_coherent(priv->dev, SZ_4K,
+					   &priv->rdma.addr_dma,
+					   GFP_KERNEL);
+		if (!priv->rdma.addr)
+			return -ENOMEM;
+	}
+
+	priv->rdma.offset = 0;
+
+	writel_relaxed(RDMA_CTRL_SW_RESET,
+		       priv->io_base + _REG(RDMA_CTRL));
+	writel_relaxed(RDMA_DEFAULT_CONFIG |
+		       FIELD_PREP(RDMA_CTRL_AHB_WR_BURST, 3) |
+		       FIELD_PREP(RDMA_CTRL_AHB_RD_BURST, 0),
+		       priv->io_base + _REG(RDMA_CTRL));
+
+	return 0;
+}
+
+void meson_rdma_free(struct meson_drm *priv)
+{
+	if (!priv->rdma.addr && !priv->rdma.addr_dma)
+		return;
+
+	meson_rdma_stop(priv);
+
+	dma_free_coherent(priv->dev, SZ_4K,
+			  priv->rdma.addr, priv->rdma.addr_dma);
+
+	priv->rdma.addr = NULL;
+	priv->rdma.addr_dma = (dma_addr_t)0;
+}
+
+void meson_rdma_setup(struct meson_drm *priv)
+{
+	/* Channel 1: Write Flag, No Address Increment */
+	writel_bits_relaxed(RDMA_ACCESS_RW_FLAG_CHAN1 |
+			    RDMA_ACCESS_ADDR_INC_CHAN1,
+			    RDMA_ACCESS_RW_FLAG_CHAN1,
+			    priv->io_base + _REG(RDMA_ACCESS_AUTO));
+}
+
+void meson_rdma_stop(struct meson_drm *priv)
+{
+	writel_bits_relaxed(RDMA_IRQ_CLEAR_CHAN1,
+			    RDMA_IRQ_CLEAR_CHAN1,
+			    priv->io_base + _REG(RDMA_CTRL));
+
+	/* Stop Channel 1 */
+	writel_bits_relaxed(RDMA_ACCESS_TRIGGER_CHAN1,
+			    FIELD_PREP(RDMA_ACCESS_ADDR_INC_CHAN1,
+				       RDMA_ACCESS_TRIGGER_STOP),
+			    priv->io_base + _REG(RDMA_ACCESS_AUTO));
+}
+
+void meson_rdma_reset(struct meson_drm *priv)
+{
+	meson_rdma_stop(priv);
+
+	priv->rdma.offset = 0;
+}
+
+static void meson_rdma_writel(struct meson_drm *priv, uint32_t val,
+			      uint32_t reg)
+{
+	if (priv->rdma.offset >= (SZ_4K / RDMA_DESC_SIZE)) {
+		dev_warn_once(priv->dev, "%s: overflow\n", __func__);
+		return;
+	}
+
+	priv->rdma.addr[priv->rdma.offset++] = reg;
+	priv->rdma.addr[priv->rdma.offset++] = val;
+}
+
+/*
+ * This will add the register to the RDMA buffer and write it to the
+ * hardware at the same time.
+ * When meson_rdma_flush is called, the RDMA will replay the register
+ * writes in order.
+ */
+void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg)
+{
+	meson_rdma_writel(priv, val, reg);
+
+	writel_relaxed(val, priv->io_base + _REG(reg));
+}
+
+void meson_rdma_flush(struct meson_drm *priv)
+{
+	meson_rdma_stop(priv);
+
+	/* Start of Channel 1 register writes buffer */
+	writel(priv->rdma.addr_dma,
+	       priv->io_base + _REG(RDMA_AHB_START_ADDR_1));
+
+	/* Last byte on Channel 1 register writes buffer */
+	writel(priv->rdma.addr_dma + (priv->rdma.offset * RDMA_DESC_SIZE) - 1,
+	       priv->io_base + _REG(RDMA_AHB_END_ADDR_1));
+
+	/* Trigger Channel 1 on VSYNC event */
+	writel_bits_relaxed(RDMA_ACCESS_TRIGGER_CHAN1,
+			    FIELD_PREP(RDMA_ACCESS_TRIGGER_CHAN1,
+				       RDMA_ACCESS_TRIGGER_VSYNC),
+			    priv->io_base + _REG(RDMA_ACCESS_AUTO));
+
+	priv->rdma.offset = 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_rdma.h b/drivers/gpu/drm/meson/meson_rdma.h
new file mode 100644
index 0000000..3870bff
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_rdma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_RDMA_H
+#define __MESON_RDMA_H
+
+#include "meson_drv.h"
+
+int meson_rdma_init(struct meson_drm *priv);
+void meson_rdma_free(struct meson_drm *priv);
+void meson_rdma_setup(struct meson_drm *priv);
+void meson_rdma_reset(struct meson_drm *priv);
+void meson_rdma_stop(struct meson_drm *priv);
+
+void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg);
+void meson_rdma_flush(struct meson_drm *priv);
+
+#endif /* __MESON_RDMA_H */
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 05fce48..8ea0054 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -138,19 +138,25 @@
 #define VIU_ADDR_START 0x1a00
 #define VIU_ADDR_END 0x1aff
 #define VIU_SW_RESET 0x1a01
+#define		VIU_SW_RESET_OSD1_AFBCD		BIT(31)
+#define		VIU_SW_RESET_G12A_OSD1_AFBCD	BIT(21)
+#define		VIU_SW_RESET_G12A_AFBC_ARB	BIT(19)
 #define		VIU_SW_RESET_OSD1               BIT(0)
 #define VIU_MISC_CTRL0 0x1a06
 #define		VIU_CTRL0_VD1_AFBC_MASK         0x170000
 #define VIU_MISC_CTRL1 0x1a07
+#define		MALI_AFBC_MISC			GENMASK(15, 8)
 #define D2D3_INTF_LENGTH 0x1a08
 #define D2D3_INTF_CTRL0 0x1a09
 #define VIU_OSD1_CTRL_STAT 0x1a10
 #define		VIU_OSD1_OSD_BLK_ENABLE         BIT(0)
+#define		VIU_OSD1_OSD_MEM_MODE_LINEAR	BIT(2)
 #define		VIU_OSD1_POSTBLD_SRC_VD1        (1 << 8)
 #define		VIU_OSD1_POSTBLD_SRC_VD2        (2 << 8)
 #define		VIU_OSD1_POSTBLD_SRC_OSD1       (3 << 8)
 #define		VIU_OSD1_POSTBLD_SRC_OSD2       (4 << 8)
 #define		VIU_OSD1_OSD_ENABLE             BIT(21)
+#define		VIU_OSD1_CFG_SYN_EN             BIT(31)
 #define VIU_OSD1_CTRL_STAT2 0x1a2d
 #define VIU_OSD1_COLOR_ADDR 0x1a11
 #define VIU_OSD1_COLOR 0x1a12
@@ -181,6 +187,16 @@
 #define VIU_OSD1_FIFO_CTRL_STAT 0x1a2b
 #define VIU_OSD1_TEST_RDDATA 0x1a2c
 #define VIU_OSD1_PROT_CTRL 0x1a2e
+#define VIU_OSD1_MALI_UNPACK_CTRL 0x1a2f
+#define		VIU_OSD1_MALI_UNPACK_EN		BIT(31)
+#define		VIU_OSD1_MALI_AFBCD_R_REORDER	GENMASK(15, 12)
+#define		VIU_OSD1_MALI_AFBCD_G_REORDER	GENMASK(11, 8)
+#define		VIU_OSD1_MALI_AFBCD_B_REORDER	GENMASK(7, 4)
+#define		VIU_OSD1_MALI_AFBCD_A_REORDER	GENMASK(3, 0)
+#define		VIU_OSD1_MALI_REORDER_R		1
+#define		VIU_OSD1_MALI_REORDER_G		2
+#define		VIU_OSD1_MALI_REORDER_B		3
+#define		VIU_OSD1_MALI_REORDER_A		4
 #define VIU_OSD2_CTRL_STAT 0x1a30
 #define VIU_OSD2_CTRL_STAT2 0x1a4d
 #define VIU_OSD2_COLOR_ADDR 0x1a31
@@ -1195,11 +1211,59 @@
 #define RDMA_AHB_START_ADDR_7 0x110e
 #define RDMA_AHB_END_ADDR_7 0x110f
 #define RDMA_ACCESS_AUTO 0x1110
+#define		RDMA_ACCESS_TRIGGER_CHAN3	GENMASK(31, 24)
+#define		RDMA_ACCESS_TRIGGER_CHAN2	GENMASK(23, 16)
+#define		RDMA_ACCESS_TRIGGER_CHAN1	GENMASK(15, 8)
+#define		RDMA_ACCESS_TRIGGER_STOP	0
+#define		RDMA_ACCESS_TRIGGER_VSYNC	1
+#define		RDMA_ACCESS_TRIGGER_LINE	32
+#define		RDMA_ACCESS_RW_FLAG_CHAN3	BIT(7)
+#define		RDMA_ACCESS_RW_FLAG_CHAN2	BIT(6)
+#define		RDMA_ACCESS_RW_FLAG_CHAN1	BIT(5)
+#define		RDMA_ACCESS_ADDR_INC_CHAN3	BIT(3)
+#define		RDMA_ACCESS_ADDR_INC_CHAN2	BIT(2)
+#define		RDMA_ACCESS_ADDR_INC_CHAN1	BIT(1)
 #define RDMA_ACCESS_AUTO2 0x1111
+#define		RDMA_ACCESS_RW_FLAG_CHAN7	BIT(7)
+#define		RDMA_ACCESS_RW_FLAG_CHAN6	BIT(6)
+#define		RDMA_ACCESS_RW_FLAG_CHAN5	BIT(5)
+#define		RDMA_ACCESS_RW_FLAG_CHAN4	BIT(4)
+#define		RDMA_ACCESS_ADDR_INC_CHAN7	BIT(3)
+#define		RDMA_ACCESS_ADDR_INC_CHAN6	BIT(2)
+#define		RDMA_ACCESS_ADDR_INC_CHAN5	BIT(1)
+#define		RDMA_ACCESS_ADDR_INC_CHAN4	BIT(0)
 #define RDMA_ACCESS_AUTO3 0x1112
+#define		RDMA_ACCESS_TRIGGER_CHAN7	GENMASK(31, 24)
+#define		RDMA_ACCESS_TRIGGER_CHAN6	GENMASK(23, 16)
+#define		RDMA_ACCESS_TRIGGER_CHAN5	GENMASK(15, 8)
+#define		RDMA_ACCESS_TRIGGER_CHAN4	GENMASK(7, 0)
 #define RDMA_ACCESS_MAN 0x1113
+#define		RDMA_ACCESS_MAN_RW_FLAG		BIT(2)
+#define		RDMA_ACCESS_MAN_ADDR_INC	BIT(1)
+#define		RDMA_ACCESS_MAN_START		BIT(0)
 #define RDMA_CTRL 0x1114
+#define		RDMA_IRQ_CLEAR_CHAN7	BIT(31)
+#define		RDMA_IRQ_CLEAR_CHAN6	BIT(30)
+#define		RDMA_IRQ_CLEAR_CHAN5	BIT(29)
+#define		RDMA_IRQ_CLEAR_CHAN4	BIT(28)
+#define		RDMA_IRQ_CLEAR_CHAN3	BIT(27)
+#define		RDMA_IRQ_CLEAR_CHAN2	BIT(26)
+#define		RDMA_IRQ_CLEAR_CHAN1	BIT(25)
+#define		RDMA_IRQ_CLEAR_CHAN_MAN	BIT(24)
+#define		RDMA_DEFAULT_CONFIG	(BIT(7) | BIT(6))
+#define		RDMA_CTRL_AHB_WR_BURST	GENMASK(5, 4)
+#define		RDMA_CTRL_AHB_RD_BURST	GENMASK(3, 2)
+#define		RDMA_CTRL_SW_RESET	BIT(1)
+#define		RDMA_CTRL_FREE_CLK_EN	BIT(0)
 #define RDMA_STATUS 0x1115
+#define		RDMA_IRQ_STAT_CHAN7	BIT(31)
+#define		RDMA_IRQ_STAT_CHAN6	BIT(30)
+#define		RDMA_IRQ_STAT_CHAN5	BIT(29)
+#define		RDMA_IRQ_STAT_CHAN4	BIT(28)
+#define		RDMA_IRQ_STAT_CHAN3	BIT(27)
+#define		RDMA_IRQ_STAT_CHAN2	BIT(26)
+#define		RDMA_IRQ_STAT_CHAN1	BIT(25)
+#define		RDMA_IRQ_STAT_CHAN_MAN	BIT(24)
 #define RDMA_STATUS2 0x1116
 #define RDMA_STATUS3 0x1117
 #define L_GAMMA_CNTL_PORT 0x1400
@@ -1595,15 +1659,33 @@
 
 /* osd afbcd on gxtvbb */
 #define OSD1_AFBCD_ENABLE 0x31a0
+#define		OSD1_AFBCD_ID_FIFO_THRD			GENMASK(15, 9)
+#define		OSD1_AFBCD_DEC_ENABLE			BIT(8)
+#define		OSD1_AFBCD_FRM_START			BIT(0)
 #define OSD1_AFBCD_MODE 0x31a1
+#define		OSD1_AFBCD_SOFT_RESET			BIT(31)
+#define		OSD1_AFBCD_AXI_REORDER_MODE		BIT(28)
+#define		OSD1_AFBCD_MIF_URGENT			GENMASK(25, 24)
+#define		OSD1_AFBCD_HOLD_LINE_NUM		GENMASK(22, 16)
+#define		OSD1_AFBCD_RGBA_EXCHAN_CTRL		GENMASK(15, 8)
+#define		OSD1_AFBCD_HREG_BLOCK_SPLIT		BIT(6)
+#define		OSD1_AFBCD_HREG_HALF_BLOCK		BIT(5)
+#define		OSD1_AFBCD_HREG_PIXEL_PACKING_FMT	GENMASK(4, 0)
 #define OSD1_AFBCD_SIZE_IN 0x31a2
+#define		OSD1_AFBCD_HREG_VSIZE_IN		GENMASK(31, 16)
+#define		OSD1_AFBCD_HREG_HSIZE_IN		GENMASK(15, 0)
 #define OSD1_AFBCD_HDR_PTR 0x31a3
 #define OSD1_AFBCD_FRAME_PTR 0x31a4
 #define OSD1_AFBCD_CHROMA_PTR 0x31a5
 #define OSD1_AFBCD_CONV_CTRL 0x31a6
+#define		OSD1_AFBCD_CONV_LBUF_LEN		GENMASK(15, 0)
 #define OSD1_AFBCD_STATUS 0x31a8
 #define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9
+#define		OSD1_AFBCD_DEC_PIXEL_BGN_H		GENMASK(31, 16)
+#define		OSD1_AFBCD_DEC_PIXEL_END_H		GENMASK(15, 0)
 #define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa
+#define		OSD1_AFBCD_DEC_PIXEL_BGN_V		GENMASK(31, 16)
+#define		OSD1_AFBCD_DEC_PIXEL_END_V		GENMASK(15, 0)
 
 /* add for gxm and 962e dv core2 */
 #define DOLBY_CORE2A_SWAP_CTRL1	0x3434
@@ -1615,12 +1697,34 @@
 #define VPU_MAFBC_IRQ_CLEAR 0x3a02
 #define VPU_MAFBC_IRQ_MASK 0x3a03
 #define VPU_MAFBC_IRQ_STATUS 0x3a04
+#define		VPU_MAFBC_IRQ_SECURE_ID_ERROR		BIT(5)
+#define		VPU_MAFBC_IRQ_AXI_ERROR			BIT(4)
+#define		VPU_MAFBC_IRQ_DETILING_ERROR		BIT(3)
+#define		VPU_MAFBC_IRQ_DECODE_ERROR		BIT(2)
+#define		VPU_MAFBC_IRQ_CONFIGURATION_SWAPPED	BIT(1)
+#define		VPU_MAFBC_IRQ_SURFACES_COMPLETED	BIT(0)
 #define VPU_MAFBC_COMMAND 0x3a05
+#define		VPU_MAFBC_PENDING_SWAP	BIT(1)
+#define		VPU_MAFBC_DIRECT_SWAP	BIT(0)
 #define VPU_MAFBC_STATUS 0x3a06
+#define		VPU_MAFBC_ERROR		BIT(2)
+#define		VPU_MAFBC_SWAPPING	BIT(1)
+#define		VPU_MAFBC_ACTIVE	BIT(0)
 #define VPU_MAFBC_SURFACE_CFG 0x3a07
+#define		VPU_MAFBC_CONTINUOUS_DECODING_ENABLE	BIT(16)
+#define		VPU_MAFBC_S3_ENABLE			BIT(3)
+#define		VPU_MAFBC_S2_ENABLE			BIT(2)
+#define		VPU_MAFBC_S1_ENABLE			BIT(1)
+#define		VPU_MAFBC_S0_ENABLE			BIT(0)
 #define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10
 #define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11
 #define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12
+#define		VPU_MAFBC_PAYLOAD_LIMIT_EN	BIT(19)
+#define		VPU_MAFBC_TILED_HEADER_EN	BIT(18)
+#define		VPU_MAFBC_SUPER_BLOCK_ASPECT	GENMASK(17, 16)
+#define		VPU_MAFBC_BLOCK_SPLIT		BIT(9)
+#define		VPU_MAFBC_YUV_TRANSFORM		BIT(8)
+#define		VPU_MAFBC_PIXEL_FORMAT		GENMASK(3, 0)
 #define VPU_MAFBC_BUFFER_WIDTH_S0 0x3a13
 #define VPU_MAFBC_BUFFER_HEIGHT_S0 0x3a14
 #define VPU_MAFBC_BOUNDING_BOX_X_START_S0 0x3a15
@@ -1631,6 +1735,8 @@
 #define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0 0x3a1a
 #define VPU_MAFBC_OUTPUT_BUF_STRIDE_S0 0x3a1b
 #define VPU_MAFBC_PREFETCH_CFG_S0 0x3a1c
+#define		VPU_MAFBC_PREFETCH_READ_DIRECTION_Y	BIT(1)
+#define		VPU_MAFBC_PREFETCH_READ_DIRECTION_X	BIT(0)
 
 #define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S1 0x3a30
 #define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S1 0x3a31
@@ -1677,7 +1783,11 @@
 #define DOLBY_PATH_CTRL 0x1a0c
 #define		DOLBY_BYPASS_EN(val)            (val & 0xf)
 #define OSD_PATH_MISC_CTRL 0x1a0e
+#define		OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD	BIT(4)
+#define		OSD_PATH_OSD_AXI_SEL_OSD2_AFBCD	BIT(5)
+#define		OSD_PATH_OSD_AXI_SEL_OSD3_AFBCD	BIT(6)
 #define MALI_AFBCD_TOP_CTRL 0x1a0f
+#define		MALI_AFBCD_MANUAL_RESET		BIT(23)
 
 #define VIU_OSD_BLEND_CTRL 0x39b0
 #define		VIU_OSD_BLEND_REORDER(dest, src)      ((src) << (dest * 4))
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 68cf2c2..304f8ff 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -7,6 +7,9 @@
  */
 
 #include <linux/export.h>
+#include <linux/bitfield.h>
+
+#include <drm/drm_fourcc.h>
 
 #include "meson_drv.h"
 #include "meson_viu.h"
@@ -335,6 +338,79 @@ void meson_viu_osd1_reset(struct meson_drm *priv)
 	meson_viu_load_matrix(priv);
 }
 
+#define OSD1_MALI_ORDER_ABGR				\
+	(FIELD_PREP(VIU_OSD1_MALI_AFBCD_A_REORDER,	\
+		    VIU_OSD1_MALI_REORDER_A) |		\
+	 FIELD_PREP(VIU_OSD1_MALI_AFBCD_B_REORDER,	\
+		    VIU_OSD1_MALI_REORDER_B) |		\
+	 FIELD_PREP(VIU_OSD1_MALI_AFBCD_G_REORDER,	\
+		    VIU_OSD1_MALI_REORDER_G) |		\
+	 FIELD_PREP(VIU_OSD1_MALI_AFBCD_R_REORDER,	\
+		    VIU_OSD1_MALI_REORDER_R))
+
+#define OSD1_MALI_ORDER_ARGB				\
+	(FIELD_PREP(VIU_OSD1_MALI_AFBCD_A_REORDER,	\
+		    VIU_OSD1_MALI_REORDER_A) |		\
+	 FIELD_PREP(VIU_OSD1_MALI_AFBCD_B_REORDER,	\
+		    VIU_OSD1_MALI_REORDER_R) |		\
+	 FIELD_PREP(VIU_OSD1_MALI_AFBCD_G_REORDER,	\
+		    VIU_OSD1_MALI_REORDER_G) |		\
+	 FIELD_PREP(VIU_OSD1_MALI_AFBCD_R_REORDER,	\
+		    VIU_OSD1_MALI_REORDER_B))
+
+void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv)
+{
+	u32 afbc_order = OSD1_MALI_ORDER_ARGB;
+
+	/* Enable Mali AFBC Unpack */
+	writel_bits_relaxed(VIU_OSD1_MALI_UNPACK_EN,
+			    VIU_OSD1_MALI_UNPACK_EN,
+			    priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+
+	switch (priv->afbcd.format) {
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		afbc_order = OSD1_MALI_ORDER_ABGR;
+		break;
+	}
+
+	/* Setup RGBA Reordering */
+	writel_bits_relaxed(VIU_OSD1_MALI_AFBCD_A_REORDER |
+			    VIU_OSD1_MALI_AFBCD_B_REORDER |
+			    VIU_OSD1_MALI_AFBCD_G_REORDER |
+			    VIU_OSD1_MALI_AFBCD_R_REORDER,
+			    afbc_order,
+			    priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+
+	/* Select AFBCD path for OSD1 */
+	writel_bits_relaxed(OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD,
+			    OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD,
+			    priv->io_base + _REG(OSD_PATH_MISC_CTRL));
+}
+
+void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv)
+{
+	/* Disable AFBCD path for OSD1 */
+	writel_bits_relaxed(OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD, 0,
+			    priv->io_base + _REG(OSD_PATH_MISC_CTRL));
+
+	/* Disable AFBCD unpack */
+	writel_bits_relaxed(VIU_OSD1_MALI_UNPACK_EN, 0,
+			    priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+}
+
+void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv)
+{
+	writel_bits_relaxed(MALI_AFBC_MISC, FIELD_PREP(MALI_AFBC_MISC, 0x90),
+			    priv->io_base + _REG(VIU_MISC_CTRL1));
+}
+
+void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
+{
+	writel_bits_relaxed(MALI_AFBC_MISC, FIELD_PREP(MALI_AFBC_MISC, 0x00),
+			    priv->io_base + _REG(VIU_MISC_CTRL1));
+}
+
 static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
 {
 	uint32_t val = (((length & 0x80) % 24) / 12);
@@ -362,7 +438,7 @@ void meson_viu_init(struct meson_drm *priv)
 
 	/* Initialize OSD1 fifo control register */
 	reg = VIU_OSD_DDR_PRIORITY_URGENT |
-		VIU_OSD_HOLD_FIFO_LINES(4) |
+		VIU_OSD_HOLD_FIFO_LINES(31) |
 		VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
 		VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
 		VIU_OSD_FIFO_LIMITS(2);      /* fifo_lim: 2*16=32 */
@@ -420,8 +496,13 @@ void meson_viu_init(struct meson_drm *priv)
 
 		writel_bits_relaxed(DOLBY_BYPASS_EN(0xc), DOLBY_BYPASS_EN(0xc),
 				    priv->io_base + _REG(DOLBY_PATH_CTRL));
+
+		meson_viu_g12a_disable_osd1_afbc(priv);
 	}
 
+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+		meson_viu_gxm_disable_osd1_afbc(priv);
+
 	priv->viu.osd1_enabled = false;
 	priv->viu.osd1_commit = false;
 	priv->viu.osd1_interlace = false;
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
index a112e8d..e4a2f24 100644
--- a/drivers/gpu/drm/meson/meson_viu.h
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -10,6 +10,8 @@
 #define __MESON_VIU_H
 
 /* OSDx_BLKx_CFG */
+#define OSD_MALI_SRC_EN		BIT(30)
+
 #define OSD_CANVAS_SEL		16
 
 #define OSD_ENDIANNESS_LE	BIT(15)
@@ -33,21 +35,38 @@
 #define OSD_COLOR_MATRIX_16_RGB655	(0x00 << 2)
 #define OSD_COLOR_MATRIX_16_RGB565	(0x04 << 2)
 
+#define OSD_MALI_COLOR_MODE_R8		(0 << 8)
+#define OSD_MALI_COLOR_MODE_YUV422	(1 << 8)
+#define OSD_MALI_COLOR_MODE_RGB565	(2 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA5551	(3 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA4444	(4 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA8888	(5 << 8)
+#define OSD_MALI_COLOR_MODE_RGB888	(7 << 8)
+#define OSD_MALI_COLOR_MODE_YUV422_10B	(8 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA1010102	(9 << 8)
+
 #define OSD_INTERLACE_ENABLED	BIT(1)
 #define OSD_INTERLACE_ODD	BIT(0)
 #define OSD_INTERLACE_EVEN	(0)
 
 /* OSDx_CTRL_STAT */
 #define OSD_ENABLE		BIT(21)
+#define OSD_MEM_LINEAR_ADDR	BIT(2)
 #define OSD_BLK0_ENABLE		BIT(0)
 
 #define OSD_GLOBAL_ALPHA_SHIFT	12
 
 /* OSDx_CTRL_STAT2 */
+#define OSD_DPATH_MALI_AFBCD	BIT(15)
 #define OSD_REPLACE_EN		BIT(14)
 #define OSD_REPLACE_SHIFT	6
+#define OSD_PENDING_STAT_CLEAN	BIT(1)
 
 void meson_viu_osd1_reset(struct meson_drm *priv);
+void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv);
 void meson_viu_init(struct meson_drm *priv);
 
 #endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index d5deecb..66df516 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -32,6 +32,7 @@
 #define __MGA_DRV_H__
 
 #include <linux/irqreturn.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 
 #include <drm/drm_agpsupport.h>
@@ -40,7 +41,6 @@
 #include <drm/drm_ioctl.h>
 #include <drm/drm_irq.h>
 #include <drm/drm_legacy.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_print.h>
 #include <drm/drm_sarea.h>
 #include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index aed11f4..d60aa4b 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -8,8 +8,8 @@
 	select DRM_TTM_HELPER
 	help
 	 This is a KMS driver for the MGA G200 server chips, it
-         does not support the original MGA G200 or any of the desktop
-         chips. It requires 0.3.0 of the modesetting userspace driver,
-         and a version of mga driver that will fail on KMS enabled
-         devices.
+	 does not support the original MGA G200 or any of the desktop
+	 chips. It requires 0.3.0 of the modesetting userspace driver,
+	 and a version of mga driver that will fail on KMS enabled
+	 devices.
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 79711db..d491edd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -5,7 +5,7 @@
  * Author: Christopher Harvey <charvey@matrox.com>
  */
 
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
 
 #include "mgag200_drv.h"
 
@@ -208,8 +208,7 @@ int mgag200_cursor_init(struct mga_device *mdev)
 		return -ENOMEM;
 
 	for (i = 0; i < ncursors; ++i) {
-		gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
-					  size, 0, false);
+		gbo = drm_gem_vram_create(dev, size, 0);
 		if (IS_ERR(gbo)) {
 			ret = PTR_ERR(gbo);
 			goto err_drm_gem_vram_put;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index b113876..7a5bad2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -6,13 +6,13 @@
  *          Dave Airlie
  */
 
-#include <linux/module.h>
 #include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 
 #include "mgag200_drv.h"
@@ -27,6 +27,10 @@ int mgag200_modeset = -1;
 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
 module_param_named(modeset, mgag200_modeset, int, 0400);
 
+int mgag200_hw_bug_no_startadd = -1;
+MODULE_PARM_DESC(modeset, "HW does not interpret scanout-buffer start address correctly");
+module_param_named(hw_bug_no_startadd, mgag200_hw_bug_no_startadd, int, 0400);
+
 static struct drm_driver driver;
 
 static const struct pci_device_id pciidlist[] = {
@@ -47,22 +51,66 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 
 static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+	struct drm_device *dev;
+	int ret;
+
 	drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
 
-	return drm_get_pci_dev(pdev, ent, &driver);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	dev = drm_dev_alloc(&driver, &pdev->dev);
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		goto err_pci_disable_device;
+	}
+
+	dev->pdev = pdev;
+	pci_set_drvdata(pdev, dev);
+
+	ret = mgag200_driver_load(dev, ent->driver_data);
+	if (ret)
+		goto err_drm_dev_put;
+
+	ret = drm_dev_register(dev, ent->driver_data);
+	if (ret)
+		goto err_mgag200_driver_unload;
+
+	return 0;
+
+err_mgag200_driver_unload:
+	mgag200_driver_unload(dev);
+err_drm_dev_put:
+	drm_dev_put(dev);
+err_pci_disable_device:
+	pci_disable_device(pdev);
+	return ret;
 }
 
 static void mga_pci_remove(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 
-	drm_put_dev(dev);
+	drm_dev_unregister(dev);
+	mgag200_driver_unload(dev);
+	drm_dev_put(dev);
 }
 
 DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
 
 static bool mgag200_pin_bo_at_0(const struct mga_device *mdev)
 {
+	if (mgag200_hw_bug_no_startadd > 0) {
+		DRM_WARN_ONCE("Option hw_bug_no_startradd is enabled. Please "
+			      "report the output of 'lspci -vvnn' to "
+			      "<dri-devel@lists.freedesktop.org> if this "
+			      "option is required to make mgag200 work "
+			      "correctly on your system.\n");
+		return true;
+	} else if (!mgag200_hw_bug_no_startadd) {
+		return false;
+	}
 	return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD;
 }
 
@@ -86,14 +134,11 @@ int mgag200_driver_dumb_create(struct drm_file *file,
 	if (mgag200_pin_bo_at_0(mdev))
 		pg_align = PFN_UP(mdev->mc.vram_size);
 
-	return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev,
-					     pg_align, false, args);
+	return drm_gem_vram_fill_create_dumb(file, dev, pg_align, 0, args);
 }
 
 static struct drm_driver driver = {
 	.driver_features = DRIVER_GEM | DRIVER_MODESET,
-	.load = mgag200_driver_load,
-	.unload = mgag200_driver_unload,
 	.fops = &mgag200_driver_fops,
 	.name = DRIVER_NAME,
 	.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 51d4037..9f463591 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -29,8 +29,7 @@
 #include <linux/export.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/i2c.h>
-
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
 
 #include "mgag200_drv.h"
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index e1bc5b0..e278b6a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -8,9 +8,10 @@
  *          Dave Airlie
  */
 
+#include <linux/pci.h>
+
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_pci.h>
 
 #include "mgag200_drv.h"
 
@@ -118,8 +119,11 @@ static int mgag200_device_init(struct drm_device *dev,
 		return -ENOMEM;
 
 	/* stash G200 SE model number for later use */
-	if (IS_G200_SE(mdev))
+	if (IS_G200_SE(mdev)) {
 		mdev->unique_rev_id = RREG32(0x1e24);
+		DRM_DEBUG("G200 SE unique revision id is 0x%x\n",
+			  mdev->unique_rev_id);
+	}
 
 	ret = mga_vram_init(mdev);
 	if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 5ec6971..62a8e9c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -9,10 +9,10 @@
  */
 
 #include <linux/delay.h>
+#include <linux/pci.h>
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fourcc.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 99997d7..e896576 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -26,7 +26,7 @@
  * Authors: Dave Airlie <airlied@redhat.com>
  */
 
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
 
 #include "mgag200_drv.h"
 
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 7ad1493..b67f888 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -506,6 +506,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
 		goto fail;
 	}
 
+	/*
+	 * Set the ICC path to maximum speed for now by multiplying the fastest
+	 * frequency by the bus width (8). We'll want to scale this later on to
+	 * improve battery life.
+	 */
+	icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+	icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
 	return gpu;
 
 fail:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index b01388a..253d8d8 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -591,6 +591,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
 		goto fail;
 	}
 
+	/*
+	 * Set the ICC path to maximum speed for now by multiplying the fastest
+	 * frequency by the bus width (8). We'll want to scale this later on to
+	 * improve battery life.
+	 */
+	icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+	icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
 	return gpu;
 
 fail:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b02e204..7d9e63e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -753,11 +753,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
 		gpu->funcs->flush(gpu, gpu->rb[0]);
 		if (!a5xx_idle(gpu, gpu->rb[0]))
 			return -EINVAL;
-	} else {
-		/* Print a warning so if we die, we know why */
+	} else if (ret == -ENODEV) {
+		/*
+		 * This device does not use zap shader (but print a warning
+		 * just in case someone got their dt wrong.. hopefully they
+		 * have a debug UART to realize the error of their ways...
+		 * if you mess this up you are about to crash horribly)
+		 */
 		dev_warn_once(gpu->dev->dev,
 			"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
 		gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+	} else {
+		return ret;
 	}
 
 	/* Last step - yield the ringbuffer */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index f44553e..ed78fee 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -16,11 +16,11 @@ The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml     ( 161969 bytes, from 2019-11-29 07:18:16)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
-Copyright (C) 2013-2018 by the following authors:
+Copyright (C) 2013-2019 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -2519,6 +2519,54 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
 
 #define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2			0x0000311a
 
+#define REG_A6XX_GBIF_SCACHE_CNTL1				0x00003c02
+
+#define REG_A6XX_GBIF_QSB_SIDE0					0x00003c03
+
+#define REG_A6XX_GBIF_QSB_SIDE1					0x00003c04
+
+#define REG_A6XX_GBIF_QSB_SIDE2					0x00003c05
+
+#define REG_A6XX_GBIF_QSB_SIDE3					0x00003c06
+
+#define REG_A6XX_GBIF_HALT					0x00003c45
+
+#define REG_A6XX_GBIF_HALT_ACK					0x00003c46
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_EN				0x00003cc0
+
+#define REG_A6XX_GBIF_PERF_CNT_SEL				0x00003cc2
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL				0x00003cc3
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW0				0x00003cc4
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW1				0x00003cc5
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW2				0x00003cc6
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW3				0x00003cc7
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH0				0x00003cc8
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH1				0x00003cc9
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH2				0x00003cca
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH3				0x00003ccb
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW0				0x00003ccc
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW1				0x00003ccd
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW2				0x00003cce
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH0				0x00003ccf
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH1				0x00003cd0
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH2				0x00003cd1
+
 #define REG_A6XX_RB_WINDOW_OFFSET2				0x000088d4
 #define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE		0x80000000
 #define A6XX_RB_WINDOW_OFFSET2_X__MASK				0x00007fff
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 85f14fe..983afea 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
 
 #include <linux/clk.h>
 #include <linux/interconnect.h>
@@ -149,6 +149,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
 		if (freq == gmu->gpu_freqs[perf_index])
 			break;
 
+	gmu->current_perf_index = perf_index;
+
 	__a6xx_gmu_set_freq(gmu, perf_index);
 }
 
@@ -433,6 +435,8 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
 
 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
 {
+	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 	struct platform_device *pdev = to_platform_device(gmu->dev);
 	void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
 	void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
@@ -480,20 +484,34 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
-	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+	if (adreno_is_a618(adreno_gpu))
+		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
+	else
+		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
-	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+	if (adreno_is_a618(adreno_gpu))
+		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
+	else
+		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+
+
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
-	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+	if (adreno_is_a618(adreno_gpu))
+		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
+	else
+		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
 
 	/* Setup GPU PDC */
@@ -741,8 +759,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
 	enable_irq(gmu->hfi_irq);
 
-	/* Set the GPU to the highest power frequency */
-	__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+	/* Set the GPU to the current freq */
+	__a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
 
 	/*
 	 * "enable" the GX power domain which won't actually do anything but it
@@ -1166,6 +1184,8 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
 	gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
 		gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
 
+	gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+
 	/* Build the list of RPMh votes that we'll send to the GMU */
 	return a6xx_gmu_rpmh_votes_init(gmu);
 }
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 39a26dd..2af91ed 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -63,6 +63,9 @@ struct a6xx_gmu {
 	struct clk_bulk_data *clocks;
 	struct clk *core_clk;
 
+	/* current performance index set externally */
+	int current_perf_index;
+
 	int nr_gpu_freqs;
 	unsigned long gpu_freqs[16];
 	u32 gx_arc_votes[16];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index dc8ec2c..daf0780 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
 
 
 #include "msm_gem.h"
@@ -378,6 +378,18 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 	int ret;
 
+	/*
+	 * During a previous slumber, GBIF halt is asserted to ensure
+	 * no further transaction can go through GPU before GPU
+	 * headswitch is turned off.
+	 *
+	 * This halt is deasserted once headswitch goes off but
+	 * incase headswitch doesn't goes off clear GBIF halt
+	 * here to ensure GPU wake-up doesn't fail because of
+	 * halted GPU transactions.
+	 */
+	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+
 	/* Make sure the GMU keeps the GPU on while we set it up */
 	a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
 
@@ -406,12 +418,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
 	gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
 	gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
 
-	/* enable hardware clockgating */
-	a6xx_set_hwcg(gpu, true);
+	/*
+	 * enable hardware clockgating
+	 * For now enable clock gating only for a630
+	 */
+	if (adreno_is_a630(adreno_gpu))
+		a6xx_set_hwcg(gpu, true);
 
-	/* VBIF start */
-	gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+	/* VBIF/GBIF start*/
 	gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+	if (adreno_is_a630(adreno_gpu))
+		gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
 
 	/* Make all blocks contribute to the GPU BUSY perf counter */
 	gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
@@ -537,12 +554,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
 		a6xx_flush(gpu, gpu->rb[0]);
 		if (!a6xx_idle(gpu, gpu->rb[0]))
 			return -EINVAL;
-	} else {
-		/* Print a warning so if we die, we know why */
+	} else if (ret == -ENODEV) {
+		/*
+		 * This device does not use zap shader (but print a warning
+		 * just in case someone got their dt wrong.. hopefully they
+		 * have a debug UART to realize the error of their ways...
+		 * if you mess this up you are about to crash horribly)
+		 */
 		dev_warn_once(gpu->dev->dev,
 			"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
 		gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
 		ret = 0;
+	} else {
+		return ret;
 	}
 
 out:
@@ -724,6 +748,39 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
 };
 
+#define GBIF_CLIENT_HALT_MASK             BIT(0)
+#define GBIF_ARB_HALT_MASK                BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+	struct msm_gpu *gpu = &adreno_gpu->base;
+
+	if(!a6xx_has_gbif(adreno_gpu)){
+		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+								0xf) == 0xf);
+		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+		return;
+	}
+
+	/* Halt new client requests on GBIF */
+	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+	spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+			(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+	/* Halt all AXI requests on GBIF */
+	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+	spin_until((gpu_read(gpu,  REG_A6XX_GBIF_HALT_ACK) &
+			(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+	/*
+	 * GMU needs DDR access in slumber path. Deassert GBIF halt now
+	 * to allow for GMU to access system memory.
+	 */
+	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
 static int a6xx_pm_resume(struct msm_gpu *gpu)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -748,6 +805,16 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
 
 	devfreq_suspend_device(gpu->devfreq.devfreq);
 
+	/*
+	 * Make sure the GMU is idle before continuing (because some transitions
+	 * may use VBIF
+	 */
+	a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
+
+	/* Clear the VBIF pipe before shutting down */
+	/* FIXME: This accesses the GPU - do we need to make sure it is on? */
+	a6xx_bus_clear_pending_transactions(adreno_gpu);
+
 	return a6xx_gmu_stop(a6xx_gpu);
 }
 
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 6439955..7239b8b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
 
 #ifndef __A6XX_GPU_H__
 #define __A6XX_GPU_H__
@@ -42,6 +42,13 @@ struct a6xx_gpu {
 #define A6XX_PROTECT_RDONLY(_reg, _len) \
 	((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
 
+static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
+{
+	if(adreno_is_a630(gpu))
+		return false;
+
+	return true;
+}
 
 int a6xx_gmu_resume(struct a6xx_gpu *gpu);
 int a6xx_gmu_stop(struct a6xx_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 691c1a27..d6023ba 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
 
 #include <linux/ascii85.h>
 #include "msm_gem.h"
@@ -320,6 +320,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
 {
 	struct resource *res;
 	void __iomem *cxdbg = NULL;
+	int nr_debugbus_blocks;
 
 	/* Set up the GX debug bus */
 
@@ -374,9 +375,11 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
 		cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
 	}
 
-	a6xx_state->debugbus = state_kcalloc(a6xx_state,
-		ARRAY_SIZE(a6xx_debugbus_blocks),
-		sizeof(*a6xx_state->debugbus));
+	nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
+		(a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
+
+	a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
+			sizeof(*a6xx_state->debugbus));
 
 	if (a6xx_state->debugbus) {
 		int i;
@@ -388,15 +391,31 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
 				&a6xx_state->debugbus[i]);
 
 		a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+
+		/*
+		 * GBIF has same debugbus as of other GPU blocks, fall back to
+		 * default path if GPU uses GBIF, also GBIF uses exactly same
+		 * ID as of VBIF.
+		 */
+		if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
+			a6xx_get_debugbus_block(gpu, a6xx_state,
+				&a6xx_gbif_debugbus_block,
+				&a6xx_state->debugbus[i]);
+
+			a6xx_state->nr_debugbus += 1;
+		}
 	}
 
-	a6xx_state->vbif_debugbus =
-		state_kcalloc(a6xx_state, 1,
-			sizeof(*a6xx_state->vbif_debugbus));
+	/*  Dump the VBIF debugbus on applicable targets */
+	if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
+		a6xx_state->vbif_debugbus =
+			state_kcalloc(a6xx_state, 1,
+					sizeof(*a6xx_state->vbif_debugbus));
 
-	if (a6xx_state->vbif_debugbus)
-		a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
-			a6xx_state->vbif_debugbus);
+		if (a6xx_state->vbif_debugbus)
+			a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+					a6xx_state->vbif_debugbus);
+	}
 
 	if (cxdbg) {
 		a6xx_state->cx_debugbus =
@@ -770,14 +789,16 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
 		&a6xx_state->gmu_registers[1]);
 }
 
+#define A6XX_GBIF_REGLIST_SIZE   1
 static void a6xx_get_registers(struct msm_gpu *gpu,
 		struct a6xx_gpu_state *a6xx_state,
 		struct a6xx_crashdumper *dumper)
 {
 	int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
 		ARRAY_SIZE(a6xx_reglist) +
-		ARRAY_SIZE(a6xx_hlsq_reglist);
+		ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
 	int index = 0;
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 
 	a6xx_state->registers = state_kcalloc(a6xx_state,
 		count, sizeof(*a6xx_state->registers));
@@ -792,6 +813,15 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
 			a6xx_state, &a6xx_ahb_reglist[i],
 			&a6xx_state->registers[index++]);
 
+	if (a6xx_has_gbif(adreno_gpu))
+		a6xx_get_ahb_gpu_registers(gpu,
+				a6xx_state, &a6xx_gbif_reglist,
+				&a6xx_state->registers[index++]);
+	else
+		a6xx_get_ahb_gpu_registers(gpu,
+				a6xx_state, &a6xx_vbif_reglist,
+				&a6xx_state->registers[index++]);
+
 	for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
 		a6xx_get_crashdumper_registers(gpu,
 			a6xx_state, &a6xx_reglist[i],
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 68cccfa..e67c20c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
 
 #ifndef _A6XX_CRASH_DUMP_H_
 #define _A6XX_CRASH_DUMP_H_
@@ -307,11 +307,20 @@ static const u32 a6xx_vbif_registers[] = {
 	0x3410, 0x3410, 0x3800, 0x3801,
 };
 
+static const u32 a6xx_gbif_registers[] = {
+	0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
+};
+
 static const struct a6xx_registers a6xx_ahb_reglist[] = {
 	REGS(a6xx_ahb_registers, 0, 0),
-	REGS(a6xx_vbif_registers, 0, 0),
 };
 
+static const struct a6xx_registers a6xx_vbif_reglist =
+			REGS(a6xx_vbif_registers, 0, 0);
+
+static const struct a6xx_registers a6xx_gbif_reglist =
+			REGS(a6xx_gbif_registers, 0, 0);
+
 static const u32 a6xx_gmu_gx_registers[] = {
 	/* GMU GX */
 	0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
@@ -422,6 +431,9 @@ static const struct a6xx_debugbus_block {
 	DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
 };
 
+static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block =
+			DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100);
+
 static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
 	DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
 	DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index fbbdf86..cb3a6e5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -167,6 +167,17 @@ static const struct adreno_info gpulist[] = {
 		.init = a5xx_gpu_init,
 		.zapfw = "a540_zap.mdt",
 	}, {
+		.rev = ADRENO_REV(6, 1, 8, ANY_ID),
+		.revn = 618,
+		.name = "A618",
+		.fw = {
+			[ADRENO_FW_SQE] = "a630_sqe.fw",
+			[ADRENO_FW_GMU] = "a630_gmu.bin",
+		},
+		.gmem = SZ_512K,
+		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+		.init = a6xx_gpu_init,
+	}, {
 		.rev = ADRENO_REV(6, 3, 0, ANY_ID),
 		.revn = 630,
 		.name = "A630",
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 0783e4b..7fd2982 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -26,6 +26,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
 {
 	struct device *dev = &gpu->pdev->dev;
 	const struct firmware *fw;
+	const char *signed_fwname = NULL;
 	struct device_node *np, *mem_np;
 	struct resource r;
 	phys_addr_t mem_phys;
@@ -58,8 +59,43 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
 
 	mem_phys = r.start;
 
-	/* Request the MDT file for the firmware */
-	fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+	/*
+	 * Check for a firmware-name property.  This is the new scheme
+	 * to handle firmware that may be signed with device specific
+	 * keys, allowing us to have a different zap fw path for different
+	 * devices.
+	 *
+	 * If the firmware-name property is found, we bypass the
+	 * adreno_request_fw() mechanism, because we don't need to handle
+	 * the /lib/firmware/qcom/... vs /lib/firmware/... case.
+	 *
+	 * If the firmware-name property is not found, for backwards
+	 * compatibility we fall back to the fwname from the gpulist
+	 * table.
+	 */
+	of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
+	if (signed_fwname) {
+		fwname = signed_fwname;
+		ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
+		if (ret)
+			fw = ERR_PTR(ret);
+	} else if (fwname) {
+		/* Request the MDT file from the default location: */
+		fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+	} else {
+		/*
+		 * For new targets, we require the firmware-name property,
+		 * if a zap-shader is required, rather than falling back
+		 * to a firmware name specified in gpulist.
+		 *
+		 * Because the firmware is signed with a (potentially)
+		 * device specific key, having the name come from gpulist
+		 * was a bad idea, and is only provided for backwards
+		 * compatibility for older targets.
+		 */
+		return -ENODEV;
+	}
+
 	if (IS_ERR(fw)) {
 		DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
 		return PTR_ERR(fw);
@@ -95,7 +131,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
 	 * not.  But since we've already gotten through adreno_request_fw()
 	 * we know which of the two cases it is:
 	 */
-	if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+	if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
 		ret = qcom_mdt_load(dev, fw, fwname, pasid,
 				mem_region, mem_phys, mem_size, NULL);
 	} else {
@@ -146,14 +182,6 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
 		return -EPROBE_DEFER;
 	}
 
-	/* Each GPU has a target specific zap shader firmware name to use */
-	if (!adreno_gpu->info->zapfw) {
-		zap_available = false;
-		DRM_DEV_ERROR(&pdev->dev,
-			"Zap shader firmware file not specified for this target\n");
-		return -ENODEV;
-	}
-
 	return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
 }
 
@@ -826,7 +854,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
 
 	node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
 	if (!node) {
-		DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
+		DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
 		return -ENXIO;
 	}
 
@@ -887,10 +915,21 @@ static int adreno_get_pwrlevels(struct device *dev,
 	DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
 
 	/* Check for an interconnect path for the bus */
-	gpu->icc_path = of_icc_get(dev, NULL);
+	gpu->icc_path = of_icc_get(dev, "gfx-mem");
+	if (!gpu->icc_path) {
+		/*
+		 * Keep compatbility with device trees that don't have an
+		 * interconnect-names property.
+		 */
+		gpu->icc_path = of_icc_get(dev, NULL);
+	}
 	if (IS_ERR(gpu->icc_path))
 		gpu->icc_path = NULL;
 
+	gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
+	if (IS_ERR(gpu->ocmem_icc_path))
+		gpu->ocmem_icc_path = NULL;
+
 	return 0;
 }
 
@@ -977,6 +1016,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
 		release_firmware(adreno_gpu->fw[i]);
 
 	icc_put(gpu->icc_path);
+	icc_put(gpu->ocmem_icc_path);
 
 	msm_gpu_cleanup(&adreno_gpu->base);
 }
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e71a757..9ff4e55 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -3,7 +3,7 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
- * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __ADRENO_GPU_H__
@@ -227,6 +227,16 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
 	return gpu->revn == 540;
 }
 
+static inline int adreno_is_a618(struct adreno_gpu *gpu)
+{
+       return gpu->revn == 618;
+}
+
+static inline int adreno_is_a630(struct adreno_gpu *gpu)
+{
+       return gpu->revn == 630;
+}
+
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
 		const char *fwname);
@@ -330,10 +340,7 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
 static inline bool adreno_reg_check(struct adreno_gpu *gpu,
 		enum adreno_regs offset_name)
 {
-	if (offset_name >= REG_ADRENO_REGISTER_MAX ||
-			!gpu->reg_offsets[offset_name]) {
-		BUG();
-	}
+	BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
 
 	/*
 	 * REG_SKIP is a special value that tell us that the register in
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index f197dce..bf51341 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -197,10 +197,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
 	DPU_DEBUG("%s\n", dpu_crtc->name);
 
 	for (i = 0; i < cstate->num_mixers; i++) {
-		if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
-			DPU_ERROR("invalid lm or ctl assigned to mixer\n");
-			return;
-		}
 		mixer[i].mixer_op_mode = 0;
 		mixer[i].flush_mask = 0;
 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
@@ -1113,14 +1109,9 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
 
 	for (i = 0; i < cstate->num_mixers; ++i) {
 		m = &cstate->mixers[i];
-		if (!m->hw_lm)
-			seq_printf(s, "\tmixer[%d] has no lm\n", i);
-		else if (!m->lm_ctl)
-			seq_printf(s, "\tmixer[%d] has no ctl\n", i);
-		else
-			seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
-				m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
-				out_width, mode->vdisplay);
+		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
+			out_width, mode->vdisplay);
 	}
 
 	seq_puts(s, "\n");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f96e142..f8ac3bf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -58,7 +58,7 @@
 
 #define IDLE_SHORT_TIMEOUT	1
 
-#define MAX_VDISPLAY_SPLIT 1080
+#define MAX_HDISPLAY_SPLIT 1080
 
 /* timeout in frames waiting for frame done */
 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
@@ -233,7 +233,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 	u32 irq_status;
 	int ret;
 
-	if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+	if (!wait_info || intr_idx >= INTR_IDX_MAX) {
 		DPU_ERROR("invalid params\n");
 		return -EINVAL;
 	}
@@ -308,7 +308,7 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
 	struct dpu_encoder_irq *irq;
 	int ret = 0;
 
-	if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+	if (intr_idx >= INTR_IDX_MAX) {
 		DPU_ERROR("invalid params\n");
 		return -EINVAL;
 	}
@@ -363,10 +363,6 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
 	struct dpu_encoder_irq *irq;
 	int ret;
 
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
 	irq = &phys_enc->irq[intr_idx];
 
 	/* silently skip irqs that weren't registered */
@@ -415,7 +411,7 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (phys && phys->ops.get_hw_resources)
+		if (phys->ops.get_hw_resources)
 			phys->ops.get_hw_resources(phys, hw_res);
 	}
 }
@@ -438,7 +434,7 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (phys && phys->ops.destroy) {
+		if (phys->ops.destroy) {
 			phys->ops.destroy(phys);
 			--dpu_enc->num_phys_encs;
 			dpu_enc->phys_encs[i] = NULL;
@@ -464,7 +460,7 @@ void dpu_encoder_helper_split_config(
 	struct dpu_hw_mdp *hw_mdptop;
 	struct msm_display_info *disp_info;
 
-	if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+	if (!phys_enc->hw_mdptop || !phys_enc->parent) {
 		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
 		return;
 	}
@@ -534,8 +530,23 @@ static struct msm_display_topology dpu_encoder_get_topology(
 		if (dpu_enc->phys_encs[i])
 			intf_count++;
 
-	/* User split topology for width > 1080 */
-	topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+	/* Datapath topology selection
+	 *
+	 * Dual display
+	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
+	 *
+	 * Single display
+	 * 1 LM, 1 INTF
+	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+	 *
+	 */
+	if (intf_count == 2)
+		topology.num_lm = 2;
+	else if (!dpu_kms->catalog->caps->has_3d_merge)
+		topology.num_lm = 1;
+	else
+		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+
 	topology.num_enc = 0;
 	topology.num_intf = intf_count;
 
@@ -583,10 +594,10 @@ static int dpu_encoder_virt_atomic_check(
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (phys && phys->ops.atomic_check)
+		if (phys->ops.atomic_check)
 			ret = phys->ops.atomic_check(phys, crtc_state,
 					conn_state);
-		else if (phys && phys->ops.mode_fixup)
+		else if (phys->ops.mode_fixup)
 			if (!phys->ops.mode_fixup(phys, mode, adj_mode))
 				ret = -EINVAL;
 
@@ -682,7 +693,7 @@ static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (phys && phys->ops.irq_control)
+		if (phys->ops.irq_control)
 			phys->ops.irq_control(phys, enable);
 	}
 
@@ -1032,46 +1043,43 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (phys) {
-			if (!dpu_enc->hw_pp[i]) {
-				DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
-					     "at idx: %d\n", i);
-				goto error;
-			}
-
-			if (!hw_ctl[i]) {
-				DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
-					     "at idx: %d\n", i);
-				goto error;
-			}
-
-			phys->hw_pp = dpu_enc->hw_pp[i];
-			phys->hw_ctl = hw_ctl[i];
-
-			dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
-					    DPU_HW_BLK_INTF);
-			for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
-				struct dpu_hw_intf *hw_intf;
-
-				if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
-					break;
-
-				hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
-				if (hw_intf->idx == phys->intf_idx)
-					phys->hw_intf = hw_intf;
-			}
-
-			if (!phys->hw_intf) {
-				DPU_ERROR_ENC(dpu_enc,
-					      "no intf block assigned at idx: %d\n",
-					      i);
-				goto error;
-			}
-
-			phys->connector = conn->state->connector;
-			if (phys->ops.mode_set)
-				phys->ops.mode_set(phys, mode, adj_mode);
+		if (!dpu_enc->hw_pp[i]) {
+			DPU_ERROR_ENC(dpu_enc,
+				"no pp block assigned at idx: %d\n", i);
+			goto error;
 		}
+
+		if (!hw_ctl[i]) {
+			DPU_ERROR_ENC(dpu_enc,
+				"no ctl block assigned at idx: %d\n", i);
+			goto error;
+		}
+
+		phys->hw_pp = dpu_enc->hw_pp[i];
+		phys->hw_ctl = hw_ctl[i];
+
+		dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
+				    DPU_HW_BLK_INTF);
+		for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+			struct dpu_hw_intf *hw_intf;
+
+			if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+				break;
+
+			hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+			if (hw_intf->idx == phys->intf_idx)
+				phys->hw_intf = hw_intf;
+		}
+
+		if (!phys->hw_intf) {
+			DPU_ERROR_ENC(dpu_enc,
+				      "no intf block assigned at idx: %d\n", i);
+				goto error;
+		}
+
+		phys->connector = conn->state->connector;
+		if (phys->ops.mode_set)
+			phys->ops.mode_set(phys, mode, adj_mode);
 	}
 
 	dpu_enc->mode_set_complete = true;
@@ -1203,7 +1211,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (phys && phys->ops.disable)
+		if (phys->ops.disable)
 			phys->ops.disable(phys);
 	}
 
@@ -1216,8 +1224,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
-		if (dpu_enc->phys_encs[i])
-			dpu_enc->phys_encs[i]->connector = NULL;
+		dpu_enc->phys_encs[i]->connector = NULL;
 	}
 
 	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
@@ -1307,7 +1314,7 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (phys && phys->ops.control_vblank_irq)
+		if (phys->ops.control_vblank_irq)
 			phys->ops.control_vblank_irq(phys, enable);
 	}
 }
@@ -1419,7 +1426,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
 	}
 
 	ctl = phys->hw_ctl;
-	if (!ctl || !ctl->ops.trigger_flush) {
+	if (!ctl->ops.trigger_flush) {
 		DPU_ERROR("missing trigger cb\n");
 		return;
 	}
@@ -1463,13 +1470,8 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
 {
 	struct dpu_hw_ctl *ctl;
 
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return;
-	}
-
 	ctl = phys_enc->hw_ctl;
-	if (ctl && ctl->ops.trigger_start) {
+	if (ctl->ops.trigger_start) {
 		ctl->ops.trigger_start(ctl);
 		trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
 	}
@@ -1506,14 +1508,10 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
 	struct dpu_hw_ctl *ctl;
 	int rc;
 
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return;
-	}
 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
 	ctl = phys_enc->hw_ctl;
 
-	if (!ctl || !ctl->ops.reset)
+	if (!ctl->ops.reset)
 		return;
 
 	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
@@ -1550,12 +1548,10 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+		if (phys->enable_state == DPU_ENC_DISABLED)
 			continue;
 
 		ctl = phys->hw_ctl;
-		if (!ctl)
-			continue;
 
 		/*
 		 * This is cleared in frame_done worker, which isn't invoked
@@ -1603,17 +1599,15 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		phys = dpu_enc->phys_encs[i];
 
-		if (phys && phys->hw_ctl) {
-			ctl = phys->hw_ctl;
-			if (ctl->ops.clear_pending_flush)
-				ctl->ops.clear_pending_flush(ctl);
+		ctl = phys->hw_ctl;
+		if (ctl->ops.clear_pending_flush)
+			ctl->ops.clear_pending_flush(ctl);
 
-			/* update only for command mode primary ctl */
-			if ((phys == dpu_enc->cur_master) &&
-			   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
-			    && ctl->ops.trigger_pending)
-				ctl->ops.trigger_pending(ctl);
-		}
+		/* update only for command mode primary ctl */
+		if ((phys == dpu_enc->cur_master) &&
+		   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+		    && ctl->ops.trigger_pending)
+			ctl->ops.trigger_pending(ctl);
 	}
 }
 
@@ -1773,12 +1767,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
 	DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		phys = dpu_enc->phys_encs[i];
-		if (phys) {
-			if (phys->ops.prepare_for_kickoff)
-				phys->ops.prepare_for_kickoff(phys);
-			if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
-				needs_hw_reset = true;
-		}
+		if (phys->ops.prepare_for_kickoff)
+			phys->ops.prepare_for_kickoff(phys);
+		if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+			needs_hw_reset = true;
 	}
 	DPU_ATRACE_END("enc_prepare_for_kickoff");
 
@@ -1819,7 +1811,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
 	/* allow phys encs to handle any post-kickoff business */
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		phys = dpu_enc->phys_encs[i];
-		if (phys && phys->ops.handle_post_kickoff)
+		if (phys->ops.handle_post_kickoff)
 			phys->ops.handle_post_kickoff(phys);
 	}
 
@@ -1848,7 +1840,7 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		phys = dpu_enc->phys_encs[i];
-		if (phys && phys->ops.prepare_commit)
+		if (phys->ops.prepare_commit)
 			phys->ops.prepare_commit(phys);
 	}
 }
@@ -1863,9 +1855,6 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
-		if (!phys)
-			continue;
-
 		seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
 				phys->intf_idx - INTF_0,
 				atomic_read(&phys->vsync_cnt),
@@ -1924,8 +1913,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 		dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++)
-		if (dpu_enc->phys_encs[i] &&
-				dpu_enc->phys_encs[i]->ops.late_register)
+		if (dpu_enc->phys_encs[i]->ops.late_register)
 			dpu_enc->phys_encs[i]->ops.late_register(
 					dpu_enc->phys_encs[i],
 					dpu_enc->debugfs_root);
@@ -2094,11 +2082,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
-		if (phys) {
-			atomic_set(&phys->vsync_cnt, 0);
-			atomic_set(&phys->underrun_cnt, 0);
-		}
+		atomic_set(&phys->vsync_cnt, 0);
+		atomic_set(&phys->underrun_cnt, 0);
 	}
 	mutex_unlock(&dpu_enc->enc_lock);
 
@@ -2240,8 +2225,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-		if (!phys)
-			continue;
 
 		switch (event) {
 		case MSM_ENC_COMMIT_DONE:
@@ -2257,7 +2240,7 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
 			DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
 					event);
 			return -EINVAL;
-		};
+		}
 
 		if (fn_wait) {
 			DPU_ATRACE_BEGIN("wait_for_completion_event");
@@ -2274,7 +2257,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
 {
 	struct dpu_encoder_virt *dpu_enc = NULL;
-	int i;
 
 	if (!encoder) {
 		DPU_ERROR("invalid encoder\n");
@@ -2285,12 +2267,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
 	if (dpu_enc->cur_master)
 		return dpu_enc->cur_master->intf_mode;
 
-	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
-		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
-		if (phys)
-			return phys->intf_mode;
-	}
+	if (dpu_enc->num_phys_encs)
+		return dpu_enc->phys_encs[0]->intf_mode;
 
 	return INTF_MODE_NONE;
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 0479609..39e1e28 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -45,8 +45,7 @@ static bool dpu_encoder_phys_cmd_mode_fixup(
 		const struct drm_display_mode *mode,
 		struct drm_display_mode *adj_mode)
 {
-	if (phys_enc)
-		DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+	DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
 	return true;
 }
 
@@ -58,11 +57,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
 	struct dpu_hw_ctl *ctl;
 	struct dpu_hw_intf_cfg intf_cfg = { 0 };
 
-	if (!phys_enc)
-		return;
-
 	ctl = phys_enc->hw_ctl;
-	if (!ctl || !ctl->ops.setup_intf_cfg)
+	if (!ctl->ops.setup_intf_cfg)
 		return;
 
 	intf_cfg.intf = phys_enc->intf_idx;
@@ -79,7 +75,7 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
 	int new_cnt;
 	u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
 
-	if (!phys_enc || !phys_enc->hw_pp)
+	if (!phys_enc->hw_pp)
 		return;
 
 	DPU_ATRACE_BEGIN("pp_done_irq");
@@ -106,7 +102,7 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
 	struct dpu_encoder_phys *phys_enc = arg;
 	struct dpu_encoder_phys_cmd *cmd_enc;
 
-	if (!phys_enc || !phys_enc->hw_pp)
+	if (!phys_enc->hw_pp)
 		return;
 
 	DPU_ATRACE_BEGIN("rd_ptr_irq");
@@ -125,9 +121,6 @@ static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
 {
 	struct dpu_encoder_phys *phys_enc = arg;
 
-	if (!phys_enc || !phys_enc->hw_ctl)
-		return;
-
 	DPU_ATRACE_BEGIN("ctl_start_irq");
 
 	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
@@ -141,9 +134,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
 {
 	struct dpu_encoder_phys *phys_enc = arg;
 
-	if (!phys_enc)
-		return;
-
 	if (phys_enc->parent_ops->handle_underrun_virt)
 		phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
 			phys_enc);
@@ -179,7 +169,7 @@ static void dpu_encoder_phys_cmd_mode_set(
 	struct dpu_encoder_phys_cmd *cmd_enc =
 		to_dpu_encoder_phys_cmd(phys_enc);
 
-	if (!phys_enc || !mode || !adj_mode) {
+	if (!mode || !adj_mode) {
 		DPU_ERROR("invalid args\n");
 		return;
 	}
@@ -198,7 +188,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
 	u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
 	bool do_log = false;
 
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+	if (!phys_enc->hw_pp)
 		return -EINVAL;
 
 	cmd_enc->pp_timeout_report_cnt++;
@@ -247,11 +237,6 @@ static int _dpu_encoder_phys_cmd_wait_for_idle(
 	struct dpu_encoder_wait_info wait_info;
 	int ret;
 
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
 	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -273,7 +258,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
 	int ret = 0;
 	int refcount;
 
-	if (!phys_enc || !phys_enc->hw_pp) {
+	if (!phys_enc->hw_pp) {
 		DPU_ERROR("invalid encoder\n");
 		return -EINVAL;
 	}
@@ -314,9 +299,6 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
 static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
 		bool enable)
 {
-	if (!phys_enc)
-		return;
-
 	trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
 			phys_enc->hw_pp->idx - PINGPONG_0,
 			enable, atomic_read(&phys_enc->vblank_refcount));
@@ -351,7 +333,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
 	u32 vsync_hz;
 	struct dpu_kms *dpu_kms;
 
-	if (!phys_enc || !phys_enc->hw_pp) {
+	if (!phys_enc->hw_pp) {
 		DPU_ERROR("invalid encoder\n");
 		return;
 	}
@@ -428,8 +410,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
 	struct dpu_encoder_phys_cmd *cmd_enc =
 		to_dpu_encoder_phys_cmd(phys_enc);
 
-	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
-			|| !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+	if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
 		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
 		return;
 	}
@@ -458,7 +439,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
 	struct dpu_hw_ctl *ctl;
 	u32 flush_mask = 0;
 
-	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+	if (!phys_enc->hw_pp) {
 		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
 		return;
 	}
@@ -480,7 +461,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
 	struct dpu_encoder_phys_cmd *cmd_enc =
 		to_dpu_encoder_phys_cmd(phys_enc);
 
-	if (!phys_enc || !phys_enc->hw_pp) {
+	if (!phys_enc->hw_pp) {
 		DPU_ERROR("invalid phys encoder\n");
 		return;
 	}
@@ -499,8 +480,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
 static void _dpu_encoder_phys_cmd_connect_te(
 		struct dpu_encoder_phys *phys_enc, bool enable)
 {
-	if (!phys_enc || !phys_enc->hw_pp ||
-			!phys_enc->hw_pp->ops.connect_external_te)
+	if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
 		return;
 
 	trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
@@ -518,7 +498,7 @@ static int dpu_encoder_phys_cmd_get_line_count(
 {
 	struct dpu_hw_pingpong *hw_pp;
 
-	if (!phys_enc || !phys_enc->hw_pp)
+	if (!phys_enc->hw_pp)
 		return -EINVAL;
 
 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
@@ -536,7 +516,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
 	struct dpu_encoder_phys_cmd *cmd_enc =
 		to_dpu_encoder_phys_cmd(phys_enc);
 
-	if (!phys_enc || !phys_enc->hw_pp) {
+	if (!phys_enc->hw_pp) {
 		DPU_ERROR("invalid encoder\n");
 		return;
 	}
@@ -559,10 +539,6 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
 	struct dpu_encoder_phys_cmd *cmd_enc =
 		to_dpu_encoder_phys_cmd(phys_enc);
 
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return;
-	}
 	kfree(cmd_enc);
 }
 
@@ -580,7 +556,7 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
 			to_dpu_encoder_phys_cmd(phys_enc);
 	int ret;
 
-	if (!phys_enc || !phys_enc->hw_pp) {
+	if (!phys_enc->hw_pp) {
 		DPU_ERROR("invalid encoder\n");
 		return;
 	}
@@ -614,11 +590,6 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
 	struct dpu_encoder_wait_info wait_info;
 	int ret;
 
-	if (!phys_enc || !phys_enc->hw_ctl) {
-		DPU_ERROR("invalid argument(s)\n");
-		return -EINVAL;
-	}
-
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
 	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -639,9 +610,6 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
 {
 	int rc;
 
-	if (!phys_enc)
-		return -EINVAL;
-
 	rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
 	if (rc) {
 		DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
@@ -658,9 +626,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
 	int rc = 0;
 	struct dpu_encoder_phys_cmd *cmd_enc;
 
-	if (!phys_enc)
-		return -EINVAL;
-
 	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
 
 	/* only required for master controller */
@@ -681,9 +646,6 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
 	struct dpu_encoder_phys_cmd *cmd_enc;
 	struct dpu_encoder_wait_info wait_info;
 
-	if (!phys_enc)
-		return -EINVAL;
-
 	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
 
 	/* only required for master controller */
@@ -715,9 +677,6 @@ static void dpu_encoder_phys_cmd_handle_post_kickoff(
 static void dpu_encoder_phys_cmd_trigger_start(
 		struct dpu_encoder_phys *phys_enc)
 {
-	if (!phys_enc)
-		return;
-
 	dpu_encoder_helper_trigger_start(phys_enc);
 }
 
@@ -816,6 +775,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
 	DPU_DEBUG_CMDENC(cmd_enc, "created\n");
 
 	return phys_enc;
-
-	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 3123ef8..c71c18d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -220,8 +220,7 @@ static bool dpu_encoder_phys_vid_mode_fixup(
 		const struct drm_display_mode *mode,
 		struct drm_display_mode *adj_mode)
 {
-	if (phys_enc)
-		DPU_DEBUG_VIDENC(phys_enc, "\n");
+	DPU_DEBUG_VIDENC(phys_enc, "\n");
 
 	/*
 	 * Modifying mode has consequences when the mode comes back to us
@@ -239,7 +238,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
 	unsigned long lock_flags;
 	struct dpu_hw_intf_cfg intf_cfg = { 0 };
 
-	if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+	if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
 		DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
 		return;
 	}
@@ -280,6 +279,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
 	phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
 			&timing_params, fmt);
 	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+	/* setup which pp blk will connect to this intf */
+	if (phys_enc->hw_intf->ops.bind_pingpong_blk)
+		phys_enc->hw_intf->ops.bind_pingpong_blk(
+				phys_enc->hw_intf,
+				true,
+				phys_enc->hw_pp->idx);
+
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
 	programmable_fetch_config(phys_enc, &timing_params);
@@ -293,12 +300,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 	u32 flush_register = 0;
 	int new_cnt = -1, old_cnt = -1;
 
-	if (!phys_enc)
-		return;
-
 	hw_ctl = phys_enc->hw_ctl;
-	if (!hw_ctl)
-		return;
 
 	DPU_ATRACE_BEGIN("vblank_irq");
 
@@ -314,7 +316,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 	 * so we need to double-check with hw that it accepted the flush bits
 	 */
 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	if (hw_ctl && hw_ctl->ops.get_flush_register)
+	if (hw_ctl->ops.get_flush_register)
 		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
 
 	if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
@@ -335,9 +337,6 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
 {
 	struct dpu_encoder_phys *phys_enc = arg;
 
-	if (!phys_enc)
-		return;
-
 	if (phys_enc->parent_ops->handle_underrun_virt)
 		phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
 			phys_enc);
@@ -374,11 +373,6 @@ static void dpu_encoder_phys_vid_mode_set(
 		struct drm_display_mode *mode,
 		struct drm_display_mode *adj_mode)
 {
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder/kms\n");
-		return;
-	}
-
 	if (adj_mode) {
 		phys_enc->cached_mode = *adj_mode;
 		drm_mode_debug_printmodeline(adj_mode);
@@ -395,11 +389,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
 	int ret = 0;
 	int refcount;
 
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
 	refcount = atomic_read(&phys_enc->vblank_refcount);
 
 	/* Slave encoders don't report vblank */
@@ -435,6 +424,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
 {
 	struct dpu_hw_ctl *ctl;
 	u32 flush_mask = 0;
+	u32 intf_flush_mask = 0;
 
 	ctl = phys_enc->hw_ctl;
 
@@ -459,10 +449,18 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
 
+	if (ctl->ops.get_bitmask_active_intf)
+		ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask,
+			phys_enc->hw_intf->idx);
+
+	if (ctl->ops.update_pending_intf_flush)
+		ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask);
+
 skip_flush:
 	DPU_DEBUG_VIDENC(phys_enc,
-			 "update pending flush ctl %d flush_mask %x\n",
-			 ctl->idx - CTL_0, flush_mask);
+		"update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n",
+		ctl->idx - CTL_0, flush_mask, intf_flush_mask);
+
 
 	/* ctl_flush & timing engine enable will be triggered by framework */
 	if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -471,11 +469,6 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
 
 static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
 {
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return;
-	}
-
 	DPU_DEBUG_VIDENC(phys_enc, "\n");
 	kfree(phys_enc);
 }
@@ -493,11 +486,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
 	struct dpu_encoder_wait_info wait_info;
 	int ret;
 
-	if (!phys_enc) {
-		pr_err("invalid encoder\n");
-		return -EINVAL;
-	}
-
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
 	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -543,13 +531,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
 	struct dpu_hw_ctl *ctl;
 	int rc;
 
-	if (!phys_enc) {
-		DPU_ERROR("invalid encoder/parameters\n");
-		return;
-	}
-
 	ctl = phys_enc->hw_ctl;
-	if (!ctl || !ctl->ops.wait_reset_status)
+	if (!ctl->ops.wait_reset_status)
 		return;
 
 	/*
@@ -569,12 +552,12 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
 	unsigned long lock_flags;
 	int ret;
 
-	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) {
+	if (!phys_enc->parent || !phys_enc->parent->dev) {
 		DPU_ERROR("invalid encoder/device\n");
 		return;
 	}
 
-	if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
+	if (!phys_enc->hw_intf) {
 		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
 				phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
 		return;
@@ -639,9 +622,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
 {
 	int ret;
 
-	if (!phys_enc)
-		return;
-
 	trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
 			    phys_enc->hw_intf->idx - INTF_0,
 			    enable,
@@ -662,9 +642,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
 static int dpu_encoder_phys_vid_get_line_count(
 		struct dpu_encoder_phys *phys_enc)
 {
-	if (!phys_enc)
-		return -EINVAL;
-
 	if (!dpu_encoder_phys_vid_is_master(phys_enc))
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 24ab624..5286326 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -489,12 +489,28 @@ static const struct dpu_format dpu_format_map_ubwc[] = {
 		true, 4, DPU_FORMAT_FLAG_COMPRESSED,
 		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
 
+	/* ARGB8888 and ABGR8888 purposely have the same color
+	 * ordering.  The hardware only supports ABGR8888 UBWC
+	 * natively.
+	 */
+	INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
 	INTERLEAVED_RGB_FMT_TILED(XBGR8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		false, 4, DPU_FORMAT_FLAG_COMPRESSED,
 		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
 
+	INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
 	INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
@@ -550,7 +566,9 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
 {
 	static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
 		{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+		{DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC},
 		{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+		{DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC},
 		{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
 		{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
 		{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 04c8c44..c567917 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -11,11 +11,17 @@
 #include "dpu_hw_catalog_format.h"
 #include "dpu_kms.h"
 
-#define VIG_SDM845_MASK \
-	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+#define VIG_MASK \
+	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
 	BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
 	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
 
+#define VIG_SDM845_MASK \
+	(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+
+#define VIG_SC7180_MASK \
+	(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4))
+
 #define DMA_SDM845_MASK \
 	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
 	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
@@ -27,6 +33,9 @@
 #define MIXER_SDM845_MASK \
 	(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
 
+#define MIXER_SC7180_MASK \
+	(BIT(DPU_DIM_LAYER))
+
 #define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
 
 #define PINGPONG_SDM845_SPLIT_MASK \
@@ -58,9 +67,20 @@ static const struct dpu_caps sdm845_dpu_caps = {
 	.has_src_split = true,
 	.has_dim_layer = true,
 	.has_idle_pc = true,
+	.has_3d_merge = true,
 };
 
-static struct dpu_mdp_cfg sdm845_mdp[] = {
+static const struct dpu_caps sc7180_dpu_caps = {
+	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.max_mixer_blendstages = 0x9,
+	.qseed_type = DPU_SSPP_SCALER_QSEED4,
+	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+	.ubwc_version = DPU_HW_UBWC_VER_20,
+	.has_dim_layer = true,
+	.has_idle_pc = true,
+};
+
+static const struct dpu_mdp_cfg sdm845_mdp[] = {
 	{
 	.name = "top_0", .id = MDP_TOP,
 	.base = 0x0, .len = 0x45C,
@@ -85,10 +105,27 @@ static struct dpu_mdp_cfg sdm845_mdp[] = {
 	},
 };
 
+static const struct dpu_mdp_cfg sc7180_mdp[] = {
+	{
+	.name = "top_0", .id = MDP_TOP,
+	.base = 0x0, .len = 0x494,
+	.features = 0,
+	.highest_bank_bit = 0x3,
+	.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+		.reg_off = 0x2AC, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+		.reg_off = 0x2AC, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+		.reg_off = 0x2B4, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+		.reg_off = 0x2BC, .bit_off = 8},
+	},
+};
+
 /*************************************************************
  * CTL sub blocks config
  *************************************************************/
-static struct dpu_ctl_cfg sdm845_ctl[] = {
+static const struct dpu_ctl_cfg sdm845_ctl[] = {
 	{
 	.name = "ctl_0", .id = CTL_0,
 	.base = 0x1000, .len = 0xE4,
@@ -116,6 +153,24 @@ static struct dpu_ctl_cfg sdm845_ctl[] = {
 	},
 };
 
+static const struct dpu_ctl_cfg sc7180_ctl[] = {
+	{
+	.name = "ctl_0", .id = CTL_0,
+	.base = 0x1000, .len = 0xE4,
+	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	},
+	{
+	.name = "ctl_1", .id = CTL_1,
+	.base = 0x1200, .len = 0xE4,
+	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	},
+	{
+	.name = "ctl_2", .id = CTL_2,
+	.base = 0x1400, .len = 0xE4,
+	.features = BIT(DPU_CTL_ACTIVE_CFG)
+	},
+};
+
 /*************************************************************
  * SSPP sub blocks config
  *************************************************************/
@@ -128,7 +183,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
 	.maxvdeciexp = MAX_VERT_DECIMATION,
 };
 
-#define _VIG_SBLK(num, sdma_pri) \
+#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
 	{ \
 	.common = &sdm845_sspp_common, \
 	.maxdwnscale = MAX_DOWNSCALE_RATIO, \
@@ -137,7 +192,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
 	.src_blk = {.name = STRCAT("sspp_src_", num), \
 		.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
 	.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
-		.id = DPU_SSPP_SCALER_QSEED3, \
+		.id = qseed_ver, \
 		.base = 0xa00, .len = 0xa0,}, \
 	.csc_blk = {.name = STRCAT("sspp_csc", num), \
 		.id = DPU_SSPP_CSC_10BIT, \
@@ -162,10 +217,14 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
 	.virt_num_formats = ARRAY_SIZE(plane_formats), \
 	}
 
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
+				_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
+				_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
+				_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
+				_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3);
 
 static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
 static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
@@ -184,7 +243,7 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
 	.clk_ctrl = _clkctrl \
 	}
 
-static struct dpu_sspp_cfg sdm845_sspp[] = {
+static const struct dpu_sspp_cfg sdm845_sspp[] = {
 	SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
 		sdm845_vig_sblk_0, 0,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
 	SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
@@ -203,9 +262,26 @@ static struct dpu_sspp_cfg sdm845_sspp[] = {
 		sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
 };
 
+static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
+				_VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_cfg sc7180_sspp[] = {
+	SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+		sc7180_vig_sblk_0, 0,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+	SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
+		sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+	SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_SDM845_MASK,
+		sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+	SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_SDM845_MASK,
+		sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+};
+
 /*************************************************************
  * MIXER sub blocks config
  *************************************************************/
+
+/* SDM845 */
+
 static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
 	.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
 	.maxblendstages = 11, /* excluding base layer */
@@ -215,23 +291,46 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
 	},
 };
 
-#define LM_BLK(_name, _id, _base, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \
 	{ \
 	.name = _name, .id = _id, \
 	.base = _base, .len = 0x320, \
-	.features = MIXER_SDM845_MASK, \
-	.sblk = &sdm845_lm_sblk, \
+	.features = _fmask, \
+	.sblk = _sblk, \
 	.pingpong = _pp, \
 	.lm_pair_mask = (1 << _lmpair) \
 	}
 
-static struct dpu_lm_cfg sdm845_lm[] = {
-	LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1),
-	LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0),
-	LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5),
-	LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0),
-	LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0),
-	LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2),
+static const struct dpu_lm_cfg sdm845_lm[] = {
+	LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+		&sdm845_lm_sblk, PINGPONG_0, LM_1),
+	LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+		&sdm845_lm_sblk, PINGPONG_1, LM_0),
+	LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+		&sdm845_lm_sblk, PINGPONG_2, LM_5),
+	LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
+		&sdm845_lm_sblk, PINGPONG_MAX, 0),
+	LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
+		&sdm845_lm_sblk, PINGPONG_MAX, 0),
+	LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+		&sdm845_lm_sblk, PINGPONG_3, LM_2),
+};
+
+/* SC7180 */
+
+static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
+	.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.maxblendstages = 7, /* excluding base layer */
+	.blendstage_base = { /* offsets relative to mixer base */
+		0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
+	},
+};
+
+static const struct dpu_lm_cfg sc7180_lm[] = {
+	LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+		&sc7180_lm_sblk, PINGPONG_0, LM_1),
+	LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
+		&sc7180_lm_sblk, PINGPONG_1, LM_0),
 };
 
 /*************************************************************
@@ -264,13 +363,18 @@ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
 	.sblk = &sdm845_pp_sblk \
 	}
 
-static struct dpu_pingpong_cfg sdm845_pp[] = {
+static const struct dpu_pingpong_cfg sdm845_pp[] = {
 	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
 	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
 	PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
 	PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
 };
 
+static struct dpu_pingpong_cfg sc7180_pp[] = {
+	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+};
+
 /*************************************************************
  * INTF sub blocks config
  *************************************************************/
@@ -278,26 +382,32 @@ static struct dpu_pingpong_cfg sdm845_pp[] = {
 	{\
 	.name = _name, .id = _id, \
 	.base = _base, .len = 0x280, \
+	.features = BIT(DPU_CTL_ACTIVE_CFG), \
 	.type = _type, \
 	.controller_id = _ctrl_id, \
 	.prog_fetch_lines_worst_case = 24 \
 	}
 
-static struct dpu_intf_cfg sdm845_intf[] = {
+static const struct dpu_intf_cfg sdm845_intf[] = {
 	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
 	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
 	INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
 	INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
 };
 
+static const struct dpu_intf_cfg sc7180_intf[] = {
+	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+};
+
 /*************************************************************
  * VBIF sub blocks config
  *************************************************************/
 /* VBIF QOS remap */
-static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
-static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
 
-static struct dpu_vbif_cfg sdm845_vbif[] = {
+static const struct dpu_vbif_cfg sdm845_vbif[] = {
 	{
 	.name = "vbif_0", .id = VBIF_0,
 	.base = 0, .len = 0x1040,
@@ -316,7 +426,7 @@ static struct dpu_vbif_cfg sdm845_vbif[] = {
 	},
 };
 
-static struct dpu_reg_dma_cfg sdm845_regdma = {
+static const struct dpu_reg_dma_cfg sdm845_regdma = {
 	.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
 };
 
@@ -325,7 +435,7 @@ static struct dpu_reg_dma_cfg sdm845_regdma = {
  *************************************************************/
 
 /* SSPP QOS LUTs */
-static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
 	{.fl = 4, .lut = 0x357},
 	{.fl = 5, .lut = 0x3357},
 	{.fl = 6, .lut = 0x23357},
@@ -340,7 +450,11 @@ static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
 	{.fl = 0, .lut = 0x11222222223357}
 };
 
-static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
+	{.fl = 0, .lut = 0x0011222222335777},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
 	{.fl = 10, .lut = 0x344556677},
 	{.fl = 11, .lut = 0x3344556677},
 	{.fl = 12, .lut = 0x23344556677},
@@ -349,11 +463,19 @@ static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
 	{.fl = 0, .lut = 0x112233344556677},
 };
 
-static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = {
+	{.fl = 0, .lut = 0x0011223344556677},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
 	{.fl = 0, .lut = 0x0},
 };
 
-static struct dpu_perf_cfg sdm845_perf_data = {
+static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
+	{.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_perf_cfg sdm845_perf_data = {
 	.max_bw_low = 6800000,
 	.max_bw_high = 6800000,
 	.min_core_ib = 2400000,
@@ -392,6 +514,30 @@ static struct dpu_perf_cfg sdm845_perf_data = {
 	},
 };
 
+static const struct dpu_perf_cfg sc7180_perf_data = {
+	.max_bw_low = 3900000,
+	.max_bw_high = 5500000,
+	.min_core_ib = 2400000,
+	.min_llcc_ib = 800000,
+	.min_dram_ib = 800000,
+	.danger_lut_tbl = {0xff, 0xffff, 0x0},
+	.qos_lut_tbl = {
+		{.nentry = ARRAY_SIZE(sc7180_qos_linear),
+		.entries = sc7180_qos_linear
+		},
+		{.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+		.entries = sc7180_qos_macrotile
+		},
+		{.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+		.entries = sc7180_qos_nrt
+		},
+	},
+	.cdp_cfg = {
+		{.rd_enable = 1, .wr_enable = 1},
+		{.rd_enable = 1, .wr_enable = 0}
+	},
+};
+
 /*************************************************************
  * Hardware catalog init
  *************************************************************/
@@ -421,12 +567,43 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
 		.reg_dma_count = 1,
 		.dma_cfg = sdm845_regdma,
 		.perf = sdm845_perf_data,
+		.mdss_irqs = 0x3ff,
 	};
 }
 
-static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+/*
+ * sc7180_cfg_init(): populate sc7180 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+	*dpu_cfg = (struct dpu_mdss_cfg){
+		.caps = &sc7180_dpu_caps,
+		.mdp_count = ARRAY_SIZE(sc7180_mdp),
+		.mdp = sc7180_mdp,
+		.ctl_count = ARRAY_SIZE(sc7180_ctl),
+		.ctl = sc7180_ctl,
+		.sspp_count = ARRAY_SIZE(sc7180_sspp),
+		.sspp = sc7180_sspp,
+		.mixer_count = ARRAY_SIZE(sc7180_lm),
+		.mixer = sc7180_lm,
+		.pingpong_count = ARRAY_SIZE(sc7180_pp),
+		.pingpong = sc7180_pp,
+		.intf_count = ARRAY_SIZE(sc7180_intf),
+		.intf = sc7180_intf,
+		.vbif_count = ARRAY_SIZE(sdm845_vbif),
+		.vbif = sdm845_vbif,
+		.reg_dma_count = 1,
+		.dma_cfg = sdm845_regdma,
+		.perf = sc7180_perf_data,
+		.mdss_irqs = 0x3f,
+	};
+}
+
+static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
 	{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
 	{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+	{ .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init},
 };
 
 void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index ec76b868..09df7d8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -38,6 +38,7 @@
 #define DPU_HW_VER_401	DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
 #define DPU_HW_VER_410	DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
 #define DPU_HW_VER_500	DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+#define DPU_HW_VER_620	DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
 
 
 #define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
@@ -45,6 +46,7 @@
 #define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
 #define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
 #define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620)
 
 
 #define DPU_HW_BLK_NAME_LEN	16
@@ -92,6 +94,7 @@ enum {
  * @DPU_SSPP_SRC             Src and fetch part of the pipes,
  * @DPU_SSPP_SCALER_QSEED2,  QSEED2 algorithm support
  * @DPU_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED4,  QSEED4 algorithm support
  * @DPU_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
  * @DPU_SSPP_CSC,            Support of Color space converion
  * @DPU_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
@@ -110,6 +113,7 @@ enum {
 	DPU_SSPP_SRC = 0x1,
 	DPU_SSPP_SCALER_QSEED2,
 	DPU_SSPP_SCALER_QSEED3,
+	DPU_SSPP_SCALER_QSEED4,
 	DPU_SSPP_SCALER_RGB,
 	DPU_SSPP_CSC,
 	DPU_SSPP_CSC_10BIT,
@@ -166,6 +170,7 @@ enum {
  */
 enum {
 	DPU_CTL_SPLIT_DISPLAY = 0x1,
+	DPU_CTL_ACTIVE_CFG,
 	DPU_CTL_MAX
 };
 
@@ -269,7 +274,7 @@ struct dpu_qos_lut_entry {
  */
 struct dpu_qos_lut_tbl {
 	u32 nentry;
-	struct dpu_qos_lut_entry *entries;
+	const struct dpu_qos_lut_entry *entries;
 };
 
 /**
@@ -283,6 +288,7 @@ struct dpu_qos_lut_tbl {
  * @has_src_split      source split feature status
  * @has_dim_layer      dim layer feature status
  * @has_idle_pc        indicate if idle power collapse feature is supported
+ * @has_3d_merge       indicate if 3D merge is supported
  */
 struct dpu_caps {
 	u32 max_mixer_width;
@@ -293,6 +299,7 @@ struct dpu_caps {
 	bool has_src_split;
 	bool has_dim_layer;
 	bool has_idle_pc;
+	bool has_3d_merge;
 };
 
 /**
@@ -320,6 +327,7 @@ struct dpu_sspp_blks_common {
  * @maxupscale:  maxupscale ratio supported
  * @smart_dma_priority: hw priority of rect1 of multirect pipe
  * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @qseed_ver: qseed version
  * @src_blk:
  * @scaler_blk:
  * @csc_blk:
@@ -340,6 +348,7 @@ struct dpu_sspp_sub_blks {
 	u32 maxupscale;
 	u32 smart_dma_priority;
 	u32 max_per_pipe_bw;
+	u32 qseed_ver;
 	struct dpu_src_blk src_blk;
 	struct dpu_scaler_blk scaler_blk;
 	struct dpu_pp_blk csc_blk;
@@ -511,7 +520,7 @@ struct dpu_vbif_dynamic_ot_cfg {
  */
 struct dpu_vbif_dynamic_ot_tbl {
 	u32 count;
-	struct dpu_vbif_dynamic_ot_cfg *cfg;
+	const struct dpu_vbif_dynamic_ot_cfg *cfg;
 };
 
 /**
@@ -521,7 +530,7 @@ struct dpu_vbif_dynamic_ot_tbl {
  */
 struct dpu_vbif_qos_tbl {
 	u32 npriority_lvl;
-	u32 *priority_lvl;
+	const u32 *priority_lvl;
 };
 
 /**
@@ -646,6 +655,7 @@ struct dpu_perf_cfg {
  * @dma_formats        Supported formats for dma pipe
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
+ * @mdss_irqs:         Bitmap with the irqs supported by the target
  */
 struct dpu_mdss_cfg {
 	u32 hwversion;
@@ -653,25 +663,25 @@ struct dpu_mdss_cfg {
 	const struct dpu_caps *caps;
 
 	u32 mdp_count;
-	struct dpu_mdp_cfg *mdp;
+	const struct dpu_mdp_cfg *mdp;
 
 	u32 ctl_count;
-	struct dpu_ctl_cfg *ctl;
+	const struct dpu_ctl_cfg *ctl;
 
 	u32 sspp_count;
-	struct dpu_sspp_cfg *sspp;
+	const struct dpu_sspp_cfg *sspp;
 
 	u32 mixer_count;
-	struct dpu_lm_cfg *mixer;
+	const struct dpu_lm_cfg *mixer;
 
 	u32 pingpong_count;
-	struct dpu_pingpong_cfg *pingpong;
+	const struct dpu_pingpong_cfg *pingpong;
 
 	u32 intf_count;
-	struct dpu_intf_cfg *intf;
+	const struct dpu_intf_cfg *intf;
 
 	u32 vbif_count;
-	struct dpu_vbif_cfg *vbif;
+	const struct dpu_vbif_cfg *vbif;
 
 	u32 reg_dma_count;
 	struct dpu_reg_dma_cfg dma_cfg;
@@ -681,9 +691,11 @@ struct dpu_mdss_cfg {
 	/* Add additional block data structures here */
 
 	struct dpu_perf_cfg perf;
-	struct dpu_format_extended *dma_formats;
-	struct dpu_format_extended *cursor_formats;
-	struct dpu_format_extended *vig_formats;
+	const struct dpu_format_extended *dma_formats;
+	const struct dpu_format_extended *cursor_formats;
+	const struct dpu_format_extended *vig_formats;
+
+	unsigned long mdss_irqs;
 };
 
 struct dpu_mdss_hw_cfg_handler {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
index bb6112c..3766f0f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -6,8 +6,12 @@
 
 static const uint32_t qcom_compressed_supported_formats[] = {
 	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_ARGB8888,
 	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB8888,
 	DRM_FORMAT_BGR565,
+
+	DRM_FORMAT_NV12,
 };
 
 static const uint32_t plane_formats[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 179e8d5..831e5f7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -22,14 +22,18 @@
 #define   CTL_PREPARE                   0x0d0
 #define   CTL_SW_RESET                  0x030
 #define   CTL_LAYER_EXTN_OFFSET         0x40
+#define   CTL_INTF_ACTIVE               0x0F4
+#define   CTL_INTF_FLUSH                0x110
+#define   CTL_INTF_MASTER               0x134
 
 #define CTL_MIXER_BORDER_OUT            BIT(24)
 #define CTL_FLUSH_MASK_CTL              BIT(17)
 
 #define DPU_REG_RESET_TIMEOUT_US        2000
+#define  INTF_IDX       31
 
-static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
-		struct dpu_mdss_cfg *m,
+static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+		const struct dpu_mdss_cfg *m,
 		void __iomem *addr,
 		struct dpu_hw_blk_reg_map *b)
 {
@@ -100,11 +104,27 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
 	ctx->pending_flush_mask |= flushbits;
 }
 
+static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx,
+		u32 flushbits)
+{
+	ctx->pending_intf_flush_mask |= flushbits;
+}
+
 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
 {
 	return ctx->pending_flush_mask;
 }
 
+static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
+{
+
+	if (ctx->pending_flush_mask & BIT(INTF_IDX))
+		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
+				ctx->pending_intf_flush_mask);
+
+	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
 {
 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
@@ -222,6 +242,36 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
 	return 0;
 }
 
+static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_intf intf)
+{
+	switch (intf) {
+	case INTF_0:
+	case INTF_1:
+		*flushbits |= BIT(31);
+		break;
+	default:
+		return 0;
+	}
+	return 0;
+}
+
+static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_intf intf)
+{
+	switch (intf) {
+	case INTF_0:
+		*flushbits |= BIT(0);
+		break;
+	case INTF_1:
+		*flushbits |= BIT(1);
+		break;
+	default:
+		return 0;
+	}
+	return 0;
+}
+
 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
 {
 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -422,6 +472,24 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
 }
 
+
+static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+		struct dpu_hw_intf_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 intf_active = 0;
+	u32 mode_sel = 0;
+
+	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
+		mode_sel |= BIT(17);
+
+	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+	intf_active |= BIT(cfg->intf - INTF_0);
+
+	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
+	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+}
+
 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
 		struct dpu_hw_intf_cfg *cfg)
 {
@@ -455,31 +523,41 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
 		unsigned long cap)
 {
+	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
+		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+		ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1;
+		ops->get_bitmask_active_intf =
+			dpu_hw_ctl_active_get_bitmask_intf;
+		ops->update_pending_intf_flush =
+			dpu_hw_ctl_update_pending_intf_flush;
+	} else {
+		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+		ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+	}
 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
-	ops->trigger_flush = dpu_hw_ctl_trigger_flush;
 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
 	ops->trigger_start = dpu_hw_ctl_trigger_start;
 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
-	ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
 	ops->reset = dpu_hw_ctl_reset_control;
 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
-	ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
 };
 
 static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
 		void __iomem *addr,
-		struct dpu_mdss_cfg *m)
+		const struct dpu_mdss_cfg *m)
 {
 	struct dpu_hw_ctl *c;
-	struct dpu_ctl_cfg *cfg;
+	const struct dpu_ctl_cfg *cfg;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index d3ae939..09e1263 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -91,6 +91,15 @@ struct dpu_hw_ctl_ops {
 		u32 flushbits);
 
 	/**
+	 * OR in the given flushbits to the cached pending_intf_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 * @flushbits : module flushmask
+	 */
+	void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx,
+		u32 flushbits);
+
+	/**
 	 * Write the value of the pending_flush_mask to hardware
 	 * @ctx       : ctl path ctx pointer
 	 */
@@ -130,11 +139,24 @@ struct dpu_hw_ctl_ops {
 	uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
 		enum dpu_lm blk);
 
+	/**
+	 * Query the value of the intf flush mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
 	int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
 		u32 *flushbits,
 		enum dpu_intf blk);
 
 	/**
+	 * Query the value of the intf active flush mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_intf blk);
+
+	/**
 	 * Set all blend stages to disabled
 	 * @ctx       : ctl path ctx pointer
 	 */
@@ -159,6 +181,7 @@ struct dpu_hw_ctl_ops {
  * @mixer_count: number of mixers
  * @mixer_hw_caps: mixer hardware capabilities
  * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @pending_intf_flush_mask: pending INTF flush
  * @ops: operation list
  */
 struct dpu_hw_ctl {
@@ -171,6 +194,7 @@ struct dpu_hw_ctl {
 	int mixer_count;
 	const struct dpu_lm_cfg *mixer_hw_caps;
 	u32 pending_flush_mask;
+	u32 pending_intf_flush_mask;
 
 	/* ops */
 	struct dpu_hw_ctl_ops ops;
@@ -195,7 +219,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
  */
 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
 		void __iomem *addr,
-		struct dpu_mdss_cfg *m);
+		const struct dpu_mdss_cfg *m);
 
 /**
  * dpu_hw_ctl_destroy(): Destroys ctl driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 8bfa7d0..d84a84f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -800,8 +800,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
 		start_idx = reg_idx * 32;
 		end_idx = start_idx + 32;
 
-		if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
-				end_idx > ARRAY_SIZE(dpu_irq_map))
+		if (!test_bit(reg_idx, &intr->irq_mask) ||
+			start_idx >= ARRAY_SIZE(dpu_irq_map))
 			continue;
 
 		/*
@@ -955,8 +955,11 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
 	if (!intr)
 		return -EINVAL;
 
-	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
-		DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+		if (test_bit(i, &intr->irq_mask))
+			DPU_REG_WRITE(&intr->hw,
+					dpu_intr_set[i].clr_off, 0xffffffff);
+	}
 
 	/* ensure register writes go through */
 	wmb();
@@ -971,8 +974,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
 	if (!intr)
 		return -EINVAL;
 
-	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
-		DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+		if (test_bit(i, &intr->irq_mask))
+			DPU_REG_WRITE(&intr->hw,
+					dpu_intr_set[i].en_off, 0x00000000);
+	}
 
 	/* ensure register writes go through */
 	wmb();
@@ -991,6 +997,9 @@ static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
 
 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+		if (!test_bit(i, &intr->irq_mask))
+			continue;
+
 		/* Read interrupt status */
 		intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
 				dpu_intr_set[i].status_off);
@@ -1115,6 +1124,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
 		return ERR_PTR(-ENOMEM);
 	}
 
+	intr->irq_mask = m->mdss_irqs;
 	spin_lock_init(&intr->irq_lock);
 
 	return intr;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 4edcf40..fc9c986 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -187,6 +187,7 @@ struct dpu_hw_intr {
 	u32 *save_irq_status;
 	u32 irq_idx_tbl_size;
 	spinlock_t irq_lock;
+	unsigned long irq_mask;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index dcd87cd..efe9a57 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -56,8 +56,10 @@
 #define   INTF_FRAME_COUNT              0x0AC
 #define   INTF_LINE_COUNT               0x0B0
 
-static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
-		struct dpu_mdss_cfg *m,
+#define   INTF_MUX                      0x25C
+
+static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+		const struct dpu_mdss_cfg *m,
 		void __iomem *addr,
 		struct dpu_hw_blk_reg_map *b)
 {
@@ -218,6 +220,30 @@ static void dpu_hw_intf_setup_prg_fetch(
 	DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
 }
 
+static void dpu_hw_intf_bind_pingpong_blk(
+		struct dpu_hw_intf *intf,
+		bool enable,
+		const enum dpu_pingpong pp)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 mux_cfg;
+
+	if (!intf)
+		return;
+
+	c = &intf->hw;
+
+	mux_cfg = DPU_REG_READ(c, INTF_MUX);
+	mux_cfg &= ~0xf;
+
+	if (enable)
+		mux_cfg |= (pp - PINGPONG_0) & 0x7;
+	else
+		mux_cfg |= 0xf;
+
+	DPU_REG_WRITE(c, INTF_MUX, mux_cfg);
+}
+
 static void dpu_hw_intf_get_status(
 		struct dpu_hw_intf *intf,
 		struct intf_status *s)
@@ -254,16 +280,18 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
 	ops->get_status = dpu_hw_intf_get_status;
 	ops->enable_timing = dpu_hw_intf_enable_timing_engine;
 	ops->get_line_count = dpu_hw_intf_get_line_count;
+	if (cap & BIT(DPU_CTL_ACTIVE_CFG))
+		ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
 }
 
 static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
 		void __iomem *addr,
-		struct dpu_mdss_cfg *m)
+		const struct dpu_mdss_cfg *m)
 {
 	struct dpu_hw_intf *c;
-	struct dpu_intf_cfg *cfg;
+	const struct dpu_intf_cfg *cfg;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index b03acc2..8546898 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -52,6 +52,8 @@ struct intf_status {
  * @ enable_timing: enable/disable timing engine
  * @ get_status: returns if timing engine is enabled or not
  * @ get_line_count: reads current vertical line counter
+ * @bind_pingpong_blk: enable/disable the connection with pingpong which will
+ *                     feed pixels to this interface
  */
 struct dpu_hw_intf_ops {
 	void (*setup_timing_gen)(struct dpu_hw_intf *intf,
@@ -68,6 +70,10 @@ struct dpu_hw_intf_ops {
 			struct intf_status *status);
 
 	u32 (*get_line_count)(struct dpu_hw_intf *intf);
+
+	void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
+			bool enable,
+			const enum dpu_pingpong pp);
 };
 
 struct dpu_hw_intf {
@@ -92,7 +98,7 @@ struct dpu_hw_intf {
  */
 struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
 		void __iomem *addr,
-		struct dpu_mdss_cfg *m);
+		const struct dpu_mdss_cfg *m);
 
 /**
  * dpu_hw_intf_destroy(): Destroys INTF driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 5bc39ba..37becd4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -24,8 +24,8 @@
 #define LM_BLEND0_FG_ALPHA               0x04
 #define LM_BLEND0_BG_ALPHA               0x08
 
-static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
-		struct dpu_mdss_cfg *m,
+static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+		const struct dpu_mdss_cfg *m,
 		void __iomem *addr,
 		struct dpu_hw_blk_reg_map *b)
 {
@@ -147,12 +147,13 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
 	DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
 }
 
-static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
 		struct dpu_hw_lm_ops *ops,
 		unsigned long features)
 {
 	ops->setup_mixer_out = dpu_hw_lm_setup_out;
-	if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+	if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)
+	    || IS_SC7180_TARGET(m->hwversion))
 		ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
 	else
 		ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
@@ -164,10 +165,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
 		void __iomem *addr,
-		struct dpu_mdss_cfg *m)
+		const struct dpu_mdss_cfg *m)
 {
 	struct dpu_hw_mixer *c;
-	struct dpu_lm_cfg *cfg;
+	const struct dpu_lm_cfg *cfg;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 147ace3..4a6b2de 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -91,7 +91,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
  */
 struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
 		void __iomem *addr,
-		struct dpu_mdss_cfg *m);
+		const struct dpu_mdss_cfg *m);
 
 /**
  * dpu_hw_lm_destroy(): Destroys layer mixer driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 5dbaba9..d110a40 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -28,8 +28,8 @@
 #define PP_FBC_BUDGET_CTL               0x038
 #define PP_FBC_LOSSY_MODE               0x03C
 
-static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
-		struct dpu_mdss_cfg *m,
+static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+		const struct dpu_mdss_cfg *m,
 		void __iomem *addr,
 		struct dpu_hw_blk_reg_map *b)
 {
@@ -195,10 +195,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
 		void __iomem *addr,
-		struct dpu_mdss_cfg *m)
+		const struct dpu_mdss_cfg *m)
 {
 	struct dpu_hw_pingpong *c;
-	struct dpu_pingpong_cfg *cfg;
+	const struct dpu_pingpong_cfg *cfg;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 58bdb92..3d6f46b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -106,7 +106,7 @@ struct dpu_hw_pingpong {
  */
 struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
 		void __iomem *addr,
-		struct dpu_mdss_cfg *m);
+		const struct dpu_mdss_cfg *m);
 
 /**
  * dpu_hw_pingpong_destroy - destroys pingpong driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 4f8b813..82c5dbf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -132,6 +132,7 @@
 /* traffic shaper clock in Hz */
 #define TS_CLK			19200000
 
+
 static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
 		int s_id,
 		u32 *idx)
@@ -657,7 +658,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
 		test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
 		c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
 
-	if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+	if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+			test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
 		c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
 		c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
 	}
@@ -666,7 +668,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
 		c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
 }
 
-static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
 		void __iomem *addr,
 		struct dpu_mdss_cfg *catalog,
 		struct dpu_hw_blk_reg_map *b)
@@ -696,7 +698,7 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
 		bool is_virtual_pipe)
 {
 	struct dpu_hw_pipe *hw_pipe;
-	struct dpu_sspp_cfg *cfg;
+	const struct dpu_sspp_cfg *cfg;
 
 	if (!addr || !catalog)
 		return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index a3680b4..85b018a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -27,7 +27,8 @@ struct dpu_hw_pipe;
  */
 #define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
 	(1UL << DPU_SSPP_SCALER_QSEED2) | \
-	(1UL << DPU_SSPP_SCALER_QSEED3))
+	 (1UL << DPU_SSPP_SCALER_QSEED3) | \
+	  (1UL << DPU_SSPP_SCALER_QSEED4))
 
 /**
  * Component indices
@@ -373,7 +374,7 @@ struct dpu_hw_pipe {
 	struct dpu_hw_blk base;
 	struct dpu_hw_blk_reg_map hw;
 	struct dpu_mdss_cfg *catalog;
-	struct dpu_mdp_cfg *mdp;
+	const struct dpu_mdp_cfg *mdp;
 
 	/* Pipe */
 	enum dpu_sspp idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index 27fbeb5..078afc5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -93,19 +93,12 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
 			DEV_DBG("%pS->%s: enable '%s'\n",
 				__builtin_return_address(0), __func__,
 				clk_arry[i].clk_name);
-			if (clk_arry[i].clk) {
-				rc = clk_prepare_enable(clk_arry[i].clk);
-				if (rc)
-					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
-						__builtin_return_address(0),
-						__func__,
-						clk_arry[i].clk_name, rc);
-			} else {
-				DEV_ERR("%pS->%s: '%s' is not available\n",
-					__builtin_return_address(0), __func__,
-					clk_arry[i].clk_name);
-				rc = -EPERM;
-			}
+			rc = clk_prepare_enable(clk_arry[i].clk);
+			if (rc)
+				DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+					__builtin_return_address(0),
+					__func__,
+					clk_arry[i].clk_name, rc);
 
 			if (rc && i) {
 				msm_dss_enable_clk(&clk_arry[i - 1],
@@ -119,12 +112,7 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
 				__builtin_return_address(0), __func__,
 				clk_arry[i].clk_name);
 
-			if (clk_arry[i].clk)
-				clk_disable_unprepare(clk_arry[i].clk);
-			else
-				DEV_ERR("%pS->%s: '%s' is not available\n",
-					__builtin_return_address(0), __func__,
-					clk_arry[i].clk_name);
+			clk_disable_unprepare(clk_arry[i].clk);
 		}
 	}
 
@@ -187,6 +175,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
 			continue;
 		mp->clk_config[i].rate = rate;
 		mp->clk_config[i].type = DSS_CLK_PCLK;
+		mp->clk_config[i].max_rate = rate;
 	}
 
 	mp->num_clk = num_clk;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 6c92f0f..cb08faf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1059,6 +1059,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
 
 static const struct of_device_id dpu_dt_match[] = {
 	{ .compatible = "qcom,sdm845-dpu", },
+	{ .compatible = "qcom,sc7180-dpu", },
 	{}
 };
 MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 58d5acb..3b9c33e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -53,8 +53,13 @@ enum {
 	R_MAX
 };
 
+/*
+ * Default Preload Values
+ */
 #define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
 #define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2
+#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4
 
 #define DEFAULT_REFRESH_RATE	60
 
@@ -477,8 +482,16 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
 			scale_cfg->src_width[i] /= chroma_subsmpl_h;
 			scale_cfg->src_height[i] /= chroma_subsmpl_v;
 		}
-		scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
-		scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+
+		if (pdpu->pipe_hw->cap->features &
+			BIT(DPU_SSPP_SCALER_QSEED4)) {
+			scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
+			scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
+		} else {
+			scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+			scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+		}
+
 		pstate->pixel_ext.num_ext_pxls_top[i] =
 			scale_cfg->src_height[i];
 		pstate->pixel_ext.num_ext_pxls_left[i] =
@@ -738,7 +751,7 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
 	} else {
 		pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
 		pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
-	};
+	}
 
 	DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
 		pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
@@ -858,7 +871,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
 					  pdpu->pipe_sblk->maxupscale << 16,
 					  true, true);
 	if (ret) {
-		DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+		DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
 		return ret;
 	}
 	if (!state->visible)
@@ -884,13 +897,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
 		(!(pdpu->features & DPU_SSPP_SCALER) ||
 		 !(pdpu->features & (BIT(DPU_SSPP_CSC)
 		 | BIT(DPU_SSPP_CSC_10BIT))))) {
-		DPU_ERROR_PLANE(pdpu,
+		DPU_DEBUG_PLANE(pdpu,
 				"plane doesn't have scaler/csc for yuv\n");
 		return -EINVAL;
 
 	/* check src bounds */
 	} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
-		DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+		DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
 				DRM_RECT_ARG(&src));
 		return -E2BIG;
 
@@ -899,19 +912,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
 		   (src.x1 & 0x1 || src.y1 & 0x1 ||
 		    drm_rect_width(&src) & 0x1 ||
 		    drm_rect_height(&src) & 0x1)) {
-		DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+		DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
 				DRM_RECT_ARG(&src));
 		return -EINVAL;
 
 	/* min dst support */
 	} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
-		DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+		DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
 				DRM_RECT_ARG(&dst));
 		return -EINVAL;
 
 	/* check decimated source width */
 	} else if (drm_rect_width(&src) > max_linewidth) {
-		DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+		DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
 				DRM_RECT_ARG(&src), max_linewidth);
 		return -E2BIG;
 	}
@@ -1337,7 +1350,8 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
 			pdpu->debugfs_root, &pdpu->debugfs_src);
 
 	if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
-			cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+			cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
+			cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
 		dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
 				sblk->scaler_blk.base + cfg->base,
 				sblk->scaler_blk.len,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index ddc84127..23f5b14 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -141,11 +141,11 @@ int dpu_rm_destroy(struct dpu_rm *rm)
 
 static int _dpu_rm_hw_blk_create(
 		struct dpu_rm *rm,
-		struct dpu_mdss_cfg *cat,
+		const struct dpu_mdss_cfg *cat,
 		void __iomem *mmio,
 		enum dpu_hw_blk_type type,
 		uint32_t id,
-		void *hw_catalog_info)
+		const void *hw_catalog_info)
 {
 	struct dpu_rm_hw_blk *blk;
 	void *hw;
@@ -215,7 +215,7 @@ int dpu_rm_init(struct dpu_rm *rm,
 
 	/* Interrogate HW catalog and create tracking items for hw blocks */
 	for (i = 0; i < cat->mixer_count; i++) {
-		struct dpu_lm_cfg *lm = &cat->mixer[i];
+		const struct dpu_lm_cfg *lm = &cat->mixer[i];
 
 		if (lm->pingpong == PINGPONG_MAX) {
 			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 991f4c8..93ab36b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -299,7 +299,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
 	entry = debugfs_create_dir("vbif", debugfs_root);
 
 	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
-		struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+		const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
 
 		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
 
@@ -318,7 +318,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
 			(u32 *)&vbif->default_ot_wr_limit);
 
 		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
-			struct dpu_vbif_dynamic_ot_cfg *cfg =
+			const struct dpu_vbif_dynamic_ot_cfg *cfg =
 					&vbif->dynamic_ot_rd_tbl.cfg[j];
 
 			snprintf(vbif_name, sizeof(vbif_name),
@@ -332,7 +332,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
 		}
 
 		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
-			struct dpu_vbif_dynamic_ot_cfg *cfg =
+			const struct dpu_vbif_dynamic_ot_cfg *cfg =
 					&vbif->dynamic_ot_wr_tbl.cfg[j];
 
 			snprintf(vbif_name, sizeof(vbif_name),
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 772f075..aaf2f26 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -121,7 +121,7 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
 	if (mdp4_dsi_encoder->enabled)
 		return;
 
-	 mdp4_crtc_set_config(encoder->crtc,
+	mdp4_crtc_set_config(encoder->crtc,
 			MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
 			MDP4_DMA_CONFIG_DEFLKR_EN |
 			MDP4_DMA_CONFIG_DITHER_EN |
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index 9262ed2..c7df71e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -53,7 +53,7 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
 	if (panel) {
 		drm_panel_attach(panel, connector);
 
-		ret = panel->funcs->get_modes(panel);
+		ret = drm_panel_get_modes(panel, connector);
 
 		drm_panel_detach(panel);
 	}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 1f48f64..e3c4c25 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -902,7 +902,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
 				major, minor);
 		ret = -ENXIO;
 		goto fail;
-	};
+	}
 
 	/* only after mdp5_cfg global pointer's init can we access the hw */
 	for (i = 0; i < num_handlers; i++) {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index eff1a4c..4de771d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -178,6 +178,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
 int msm_dsi_host_init(struct msm_dsi *msm_dsi);
 int msm_dsi_runtime_suspend(struct device *dev);
 int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
 int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
 int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
 void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 86ad3fd..813d69d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -153,6 +153,10 @@ static const char * const dsi_sdm845_bus_clk_names[] = {
 	"iface", "bus",
 };
 
+static const char * const dsi_sc7180_bus_clk_names[] = {
+	"iface", "bus",
+};
+
 static const struct msm_dsi_config sdm845_dsi_cfg = {
 	.io_offset = DSI_6G_REG_SHIFT,
 	.reg_cfg = {
@@ -167,7 +171,22 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
 	.num_dsi = 2,
 };
 
+static const struct msm_dsi_config sc7180_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vdda", 21800, 4 },	/* 1.2 V */
+		},
+	},
+	.bus_clk_names = dsi_sc7180_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
+	.io_start = { 0xae94000 },
+	.num_dsi = 1,
+};
+
 static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+	.link_clk_set_rate = dsi_link_clk_set_rate_v2,
 	.link_clk_enable = dsi_link_clk_enable_v2,
 	.link_clk_disable = dsi_link_clk_disable_v2,
 	.clk_init_ver = dsi_clk_init_v2,
@@ -179,6 +198,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
 };
 
 static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+	.link_clk_set_rate = dsi_link_clk_set_rate_6g,
 	.link_clk_enable = dsi_link_clk_enable_6g,
 	.link_clk_disable = dsi_link_clk_disable_6g,
 	.clk_init_ver = NULL,
@@ -190,6 +210,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
 };
 
 static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+	.link_clk_set_rate = dsi_link_clk_set_rate_6g,
 	.link_clk_enable = dsi_link_clk_enable_6g,
 	.link_clk_disable = dsi_link_clk_disable_6g,
 	.clk_init_ver = dsi_clk_init_6g_v2,
@@ -223,6 +244,9 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
 		&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
 		&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
+		&sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+
 };
 
 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 50a37ce..217e24a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -20,6 +20,7 @@
 #define MSM_DSI_6G_VER_MINOR_V1_4_2	0x10040002
 #define MSM_DSI_6G_VER_MINOR_V2_2_0	0x20000000
 #define MSM_DSI_6G_VER_MINOR_V2_2_1	0x20020001
+#define MSM_DSI_6G_VER_MINOR_V2_4_1	0x20040001
 
 #define MSM_DSI_V2_VER_MINOR_8064	0x0
 
@@ -35,6 +36,7 @@ struct msm_dsi_config {
 };
 
 struct msm_dsi_host_cfg_ops {
+	int (*link_clk_set_rate)(struct msm_dsi_host *msm_host);
 	int (*link_clk_enable)(struct msm_dsi_host *msm_host);
 	void (*link_clk_disable)(struct msm_dsi_host *msm_host);
 	int (*clk_init_ver)(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 458cec8..11ae5b8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -505,7 +505,7 @@ int msm_dsi_runtime_resume(struct device *dev)
 	return dsi_bus_clk_enable(msm_host);
 }
 
-int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
 {
 	int ret;
 
@@ -515,13 +515,13 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
 	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 	if (ret) {
 		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
-		goto error;
+		return ret;
 	}
 
 	ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
 	if (ret) {
 		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
-		goto error;
+		return ret;
 	}
 
 	if (msm_host->byte_intf_clk) {
@@ -530,10 +530,18 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
 		if (ret) {
 			pr_err("%s: Failed to set rate byte intf clk, %d\n",
 			       __func__, ret);
-			goto error;
+			return ret;
 		}
 	}
 
+	return 0;
+}
+
+
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+{
+	int ret;
+
 	ret = clk_prepare_enable(msm_host->esc_clk);
 	if (ret) {
 		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
@@ -573,7 +581,7 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
 	return ret;
 }
 
-int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
 {
 	int ret;
 
@@ -584,27 +592,34 @@ int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
 	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 	if (ret) {
 		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
-		goto error;
+		return ret;
 	}
 
 	ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
 	if (ret) {
 		pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
-		goto error;
+		return ret;
 	}
 
 	ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
 	if (ret) {
 		pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
-		goto error;
+		return ret;
 	}
 
 	ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
 	if (ret) {
 		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
-		goto error;
+		return ret;
 	}
 
+	return 0;
+}
+
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+{
+	int ret;
+
 	ret = clk_prepare_enable(msm_host->byte_clk);
 	if (ret) {
 		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
@@ -818,7 +833,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 	u32 flags = msm_host->mode_flags;
 	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
-	u32 data = 0;
+	u32 data = 0, lane_ctrl = 0;
 
 	if (!enable) {
 		dsi_write(msm_host, REG_DSI_CTRL, 0);
@@ -906,9 +921,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 	dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
 		  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
 
-	if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+	if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
+		lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
 		dsi_write(msm_host, REG_DSI_LANE_CTRL,
-			DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+			lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+	}
 
 	data |= DSI_CTRL_ENABLE;
 
@@ -1996,6 +2013,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
 	 * mdp clock need to be enabled to receive dsi interrupt
 	 */
 	pm_runtime_get_sync(&msm_host->pdev->dev);
+	cfg_hnd->ops->link_clk_set_rate(msm_host);
 	cfg_hnd->ops->link_clk_enable(msm_host);
 
 	/* TODO: vote for bus bandwidth */
@@ -2344,7 +2362,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
 	}
 
 	pm_runtime_get_sync(&msm_host->pdev->dev);
-	ret = cfg_hnd->ops->link_clk_enable(msm_host);
+	ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
+	if (!ret)
+		ret = cfg_hnd->ops->link_clk_enable(msm_host);
 	if (ret) {
 		pr_err("%s: failed to enable link clocks. ret=%d\n",
 		       __func__, ret);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 271aa7b..104115d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -329,7 +329,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
 	 * attached to the drm_panel.
 	 */
 	drm_panel_attach(panel, connector);
-	num = drm_panel_get_modes(panel);
+	num = drm_panel_get_modes(panel, connector);
 	if (!num)
 		return 0;
 
@@ -432,20 +432,8 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 		}
 	}
 
-	if (panel) {
-		ret = drm_panel_enable(panel);
-		if (ret) {
-			pr_err("%s: enable panel %d failed, %d\n", __func__, id,
-									ret);
-			goto panel_en_fail;
-		}
-	}
-
 	return;
 
-panel_en_fail:
-	if (is_dual_dsi && msm_dsi1)
-		msm_dsi_host_disable(msm_dsi1->host);
 host1_en_fail:
 	msm_dsi_host_disable(host);
 host_en_fail:
@@ -464,12 +452,51 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 
 static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
 {
-	DBG("");
+	int id = dsi_mgr_bridge_get_id(bridge);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_panel *panel = msm_dsi->panel;
+	bool is_dual_dsi = IS_DUAL_DSI();
+	int ret;
+
+	DBG("id=%d", id);
+	if (!msm_dsi_device_connected(msm_dsi))
+		return;
+
+	/* Do nothing with the host if it is slave-DSI in case of dual DSI */
+	if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+		return;
+
+	if (panel) {
+		ret = drm_panel_enable(panel);
+		if (ret) {
+			pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+									ret);
+		}
+	}
 }
 
 static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
 {
-	DBG("");
+	int id = dsi_mgr_bridge_get_id(bridge);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_panel *panel = msm_dsi->panel;
+	bool is_dual_dsi = IS_DUAL_DSI();
+	int ret;
+
+	DBG("id=%d", id);
+	if (!msm_dsi_device_connected(msm_dsi))
+		return;
+
+	/* Do nothing with the host if it is slave-DSI in case of dual DSI */
+	if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+		return;
+
+	if (panel) {
+		ret = drm_panel_disable(panel);
+		if (ret)
+			pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+									ret);
+	}
 }
 
 static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
@@ -495,13 +522,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
 	if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
 		goto disable_phy;
 
-	if (panel) {
-		ret = drm_panel_disable(panel);
-		if (ret)
-			pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
-									ret);
-	}
-
 	ret = msm_dsi_host_disable(host);
 	if (ret)
 		pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 8f6100d..1c89454 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -751,9 +751,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
 	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
 
 	hw = clk_hw_register_mux(dev, clk_name,
-				 (const char *[]){
+				 ((const char *[]){
 				 parent, parent2, parent3, parent4
-				 }, 4, 0, pll_10nm->phy_cmn_mmio +
+				 }), 4, 0, pll_10nm->phy_cmn_mmio +
 				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
 				 0, 2, 0, NULL);
 	if (IS_ERR(hw)) {
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 8c99e01..6dffd7f 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -554,9 +554,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
 	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
 	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
 	clks[num++] = clk_register_mux(dev, clk_name,
-			(const char *[]){
+			((const char *[]){
 				parent1, parent2
-			}, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
+			}), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
 			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
 
 	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index 2950bba..b65b5cc 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -55,8 +55,14 @@ static void edp_bridge_mode_set(struct drm_bridge *bridge,
 	DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if ((connector->encoder != NULL) &&
-			(connector->encoder->bridge == bridge)) {
+		struct drm_encoder *encoder = connector->encoder;
+		struct drm_bridge *first_bridge;
+
+		if (!connector->encoder)
+			continue;
+
+		first_bridge = drm_bridge_chain_get_first_bridge(encoder);
+		if (bridge == first_bridge) {
 			msm_edp_ctrl_timing_cfg(edp->ctrl,
 				adjusted_mode, &connector->display_info);
 			break;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 839822d..58707a1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -101,7 +101,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
 
 				gpiod_set_value_cansleep(gpio.gpiod, value);
 			}
-		};
+		}
 
 		DBG("gpio off");
 	}
@@ -433,8 +433,10 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
 
 	connector = &hdmi_connector->base;
 
-	drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
-			DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_init_with_ddc(hdmi->dev, connector,
+				    &hdmi_connector_funcs,
+				    DRM_MODE_CONNECTOR_HDMIA,
+				    hdmi->i2c);
 	drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
 
 	connector->polled = DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ac678ac..c26219c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1192,7 +1192,8 @@ static int add_display_components(struct device *dev,
 	 * the interfaces to our components list.
 	 */
 	if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
-	    of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
+	    of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
+	    of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 		if (ret) {
 			DRM_DEV_ERROR(dev, "failed to populate children devices\n");
@@ -1317,6 +1318,7 @@ static const struct of_device_id dt_match[] = {
 	{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
 	{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
 	{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
+	{ .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
 	{}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 71547e7..740bf7c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -454,8 +454,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
 		remaining_jiffies = 0;
 	} else {
 		ktime_t rem = ktime_sub(*timeout, now);
-		struct timespec ts = ktime_to_timespec(rem);
-		remaining_jiffies = timespec_to_jiffies(&ts);
+		remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
 	}
 
 	return remaining_jiffies;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index cff198b..db48867 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -26,7 +26,7 @@ struct msm_fbdev {
 	struct drm_framebuffer *fb;
 };
 
-static struct fb_ops msm_fb_ops = {
+static const struct fb_ops msm_fb_ops = {
 	.owner = THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index be5327a..385d496 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -54,7 +54,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 
 	INIT_LIST_HEAD(&submit->node);
 	INIT_LIST_HEAD(&submit->bo_list);
-	ww_acquire_init(&submit->ticket, &reservation_ww_class);
 
 	return submit;
 }
@@ -158,7 +157,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
 		msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
 
 	if (submit->bos[i].flags & BO_LOCKED)
-		ww_mutex_unlock(&msm_obj->base.resv->lock);
+		dma_resv_unlock(msm_obj->base.resv);
 
 	if (backoff && !(submit->bos[i].flags & BO_VALID))
 		submit->bos[i].iova = 0;
@@ -181,8 +180,8 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
 		contended = i;
 
 		if (!(submit->bos[i].flags & BO_LOCKED)) {
-			ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
-					&submit->ticket);
+			ret = dma_resv_lock_interruptible(msm_obj->base.resv,
+							  &submit->ticket);
 			if (ret)
 				goto fail;
 			submit->bos[i].flags |= BO_LOCKED;
@@ -203,8 +202,8 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
 	if (ret == -EDEADLK) {
 		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
 		/* we lost out in a seqno race, lock and retry.. */
-		ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
-				&submit->ticket);
+		ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
+						       &submit->ticket);
 		if (!ret) {
 			submit->bos[contended].flags |= BO_LOCKED;
 			slow_locked = contended;
@@ -390,8 +389,6 @@ static void submit_cleanup(struct msm_gem_submit *submit)
 		list_del_init(&msm_obj->submit_entry);
 		drm_gem_object_put(&msm_obj->base);
 	}
-
-	ww_acquire_fini(&submit->ticket);
 }
 
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
@@ -408,6 +405,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 	struct msm_ringbuffer *ring;
 	int out_fence_fd = -1;
 	struct pid *pid = get_pid(task_pid(current));
+	bool has_ww_ticket = false;
 	unsigned i;
 	int ret, submitid;
 	if (!gpu)
@@ -489,6 +487,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 	if (ret)
 		goto out;
 
+	/* copy_*_user while holding a ww ticket upsets lockdep */
+	ww_acquire_init(&submit->ticket, &reservation_ww_class);
+	has_ww_ticket = true;
 	ret = submit_lock_objects(submit);
 	if (ret)
 		goto out;
@@ -588,6 +589,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 
 out:
 	submit_cleanup(submit);
+	if (has_ww_ticket)
+		ww_acquire_fini(&submit->ticket);
 	if (ret)
 		msm_gem_submit_free(submit);
 out_unlock:
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index ab8f0f9c..be5bc2e8 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -111,8 +111,15 @@ struct msm_gpu {
 	struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
 	uint32_t fast_rate;
 
+	/* The gfx-mem interconnect path that's used by all GPU types. */
 	struct icc_path *icc_path;
 
+	/*
+	 * Second interconnect path for some A3xx and all A4xx GPUs to the
+	 * On Chip MEMory (OCMEM).
+	 */
+	struct icc_path *ocmem_icc_path;
+
 	/* Hang and Inactivity Detection:
 	 */
 #define DRM_MSM_INACTIVE_PERIOD   66 /* in ms (roughly four frames) */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index 4eb9474..9eca160 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -31,7 +31,7 @@ static int mxsfb_panel_get_modes(struct drm_connector *connector)
 			drm_connector_to_mxsfb_drm_private(connector);
 
 	if (mxsfb->panel)
-		return drm_panel_get_modes(mxsfb->panel);
+		return drm_panel_get_modes(mxsfb->panel, connector);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 3558df0..d6e4ae1 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -2,7 +2,8 @@
 config DRM_NOUVEAU
 	tristate "Nouveau (NVIDIA) cards"
 	depends on DRM && PCI && MMU
-        select FW_LOADER
+	select IOMMU_API
+	select FW_LOADER
 	select DRM_KMS_HELPER
 	select DRM_TTM
 	select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
@@ -16,6 +17,7 @@
 	select INPUT if ACPI && X86
 	select THERMAL if ACPI && X86
 	select ACPI_VIDEO if ACPI && X86
+	select SND_HDA_COMPONENT if SND_HDA_CORE
 	help
 	  Choose this option for open-source NVIDIA support.
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 3624955..9d4a2d9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -53,8 +53,8 @@ struct nv_sim_state {
 static void
 nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
 {
-	int pagemiss, cas, width, bpp;
-	int nvclks, mclks, pclks, crtpagemiss;
+	int pagemiss, cas, bpp;
+	int nvclks, mclks, crtpagemiss;
 	int found, mclk_extra, mclk_loop, cbs, m1, p1;
 	int mclk_freq, pclk_freq, nvclk_freq;
 	int us_m, us_n, us_p, crtc_drain_rate;
@@ -65,11 +65,9 @@ nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
 	nvclk_freq = arb->nvclk_khz;
 	pagemiss = arb->mem_page_miss;
 	cas = arb->mem_latency;
-	width = arb->memory_width >> 6;
 	bpp = arb->bpp;
 	cbs = 128;
 
-	pclks = 2;
 	nvclks = 10;
 	mclks = 13 + cas;
 	mclk_extra = 3;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 03466f0..3a9489e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -644,16 +644,13 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
 	int i;
 
 	if (nouveau_tv_norm) {
-		for (i = 0; i < num_tv_norms; i++) {
-			if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
-				tv_enc->tv_norm = i;
-				break;
-			}
-		}
-
-		if (i == num_tv_norms)
+		i = match_string(nv17_tv_norm_names, num_tv_norms,
+				 nouveau_tv_norm);
+		if (i < 0)
 			NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
 				nouveau_tv_norm);
+		else
+			tv_enc->tv_norm = i;
 	}
 
 	drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 5f2de77..224a34c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -75,12 +75,16 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
-static void
-base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
 {
-	asyw->xlut.i.mode = 7;
+	if (size != 256 && size != 1024)
+		return false;
+
+	asyw->xlut.i.mode = size == 1024 ? 4 : 7;
 	asyw->xlut.i.enable = 2;
 	asyw->xlut.i.load = head907d_olut_load;
+	return true;
 }
 
 static inline u32
@@ -160,6 +164,7 @@ base907c = {
 	.csc_set = base907c_csc_set,
 	.csc_clr = base907c_csc_clr,
 	.olut_core = true,
+	.ilut_size = 1024,
 	.xlut_set = base907c_xlut_set,
 	.xlut_clr = base907c_xlut_clr,
 	.image_set = base907c_image_set,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index df8336b..ff94f3f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -6,6 +6,7 @@
 struct nv50_core {
 	const struct nv50_core_func *func;
 	struct nv50_dmac chan;
+	bool assign_windows;
 };
 
 int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
@@ -18,6 +19,10 @@ struct nv50_core_func {
 			      struct nvif_device *);
 	void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
 
+	struct {
+		void (*owner)(struct nv50_core *);
+	} wndw;
+
 	const struct nv50_head_func *head;
 	const struct nv50_outp_func {
 		void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
@@ -48,6 +53,7 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
 int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
 int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
 void corec37d_update(struct nv50_core *, u32 *, bool);
+void corec37d_wndw_owner(struct nv50_core *);
 extern const struct nv50_outp_func sorc37d;
 
 int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 40d9b65..3b36dc8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -25,6 +25,20 @@
 #include <nouveau_bo.h>
 
 void
+corec37d_wndw_owner(struct nv50_core *core)
+{
+	const u32 windows = 8; /*XXX*/
+	u32 *push, i;
+	if ((push = evo_wait(&core->chan, 2 * windows))) {
+		for (i = 0; i < windows; i++) {
+			evo_mthd(push, 0x1000 + (i * 0x080), 1);
+			evo_data(push, i >> 1);
+		}
+		evo_kick(push, &core->chan);
+	}
+}
+
+void
 corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 {
 	u32 *push;
@@ -76,20 +90,18 @@ corec37d_init(struct nv50_core *core)
 {
 	const u32 windows = 8; /*XXX*/
 	u32 *push, i;
-	if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+	if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
 		evo_mthd(push, 0x0208, 1);
 		evo_data(push, core->chan.sync.handle);
 		for (i = 0; i < windows; i++) {
-			evo_mthd(push, 0x1000 + (i * 0x080), 3);
-			evo_data(push, i >> 1);
+			evo_mthd(push, 0x1004 + (i * 0x080), 2);
 			evo_data(push, 0x0000001f);
 			evo_data(push, 0x00000000);
 			evo_mthd(push, 0x1010 + (i * 0x080), 1);
 			evo_data(push, 0x00127fff);
 		}
-		evo_mthd(push, 0x0200, 1);
-		evo_data(push, 0x00000001);
 		evo_kick(push, &core->chan);
+		core->assign_windows = true;
 	}
 }
 
@@ -99,6 +111,7 @@ corec37d = {
 	.ntfy_init = corec37d_ntfy_init,
 	.ntfy_wait_done = corec37d_ntfy_wait_done,
 	.update = corec37d_update,
+	.wndw.owner = corec37d_wndw_owner,
 	.head = &headc37d,
 	.sor = &sorc37d,
 };
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index b606d68..147adcd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -27,20 +27,18 @@ corec57d_init(struct nv50_core *core)
 {
 	const u32 windows = 8; /*XXX*/
 	u32 *push, i;
-	if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+	if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
 		evo_mthd(push, 0x0208, 1);
 		evo_data(push, core->chan.sync.handle);
 		for (i = 0; i < windows; i++) {
-			evo_mthd(push, 0x1000 + (i * 0x080), 3);
-			evo_data(push, i >> 1);
+			evo_mthd(push, 0x1004 + (i * 0x080), 2);
 			evo_data(push, 0x0000000f);
 			evo_data(push, 0x00000000);
 			evo_mthd(push, 0x1010 + (i * 0x080), 1);
 			evo_data(push, 0x00117fff);
 		}
-		evo_mthd(push, 0x0200, 1);
-		evo_data(push, 0x00000001);
 		evo_kick(push, &core->chan);
+		core->assign_windows = true;
 	}
 }
 
@@ -50,6 +48,7 @@ corec57d = {
 	.ntfy_init = corec37d_ntfy_init,
 	.ntfy_wait_done = corec37d_ntfy_wait_done,
 	.update = corec37d_update,
+	.wndw.owner = corec37d_wndw_owner,
 	.head = &headc57d,
 	.sor = &sorc37d,
 };
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 63425e2..a3dc2ba 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -29,6 +29,7 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/hdmi.h>
+#include <linux/component.h>
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_dp_helper.h>
@@ -476,12 +477,113 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
 	return 0;
 }
 
+/*
+ * audio component binding for ELD notification
+ */
+static void
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
+{
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+						 port, -1);
+}
+
+static int
+nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
+			     bool *enabled, unsigned char *buf, int max_bytes)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(kdev);
+	struct nouveau_drm *drm = nouveau_drm(drm_dev);
+	struct drm_encoder *encoder;
+	struct nouveau_encoder *nv_encoder;
+	struct nouveau_connector *nv_connector;
+	struct nouveau_crtc *nv_crtc;
+	int ret = 0;
+
+	*enabled = false;
+	drm_for_each_encoder(encoder, drm->dev) {
+		nv_encoder = nouveau_encoder(encoder);
+		nv_connector = nouveau_encoder_connector_get(nv_encoder);
+		nv_crtc = nouveau_crtc(encoder->crtc);
+		if (!nv_connector || !nv_crtc || nv_crtc->index != port)
+			continue;
+		*enabled = drm_detect_monitor_audio(nv_connector->edid);
+		if (*enabled) {
+			ret = drm_eld_size(nv_connector->base.eld);
+			memcpy(buf, nv_connector->base.eld,
+			       min(max_bytes, ret));
+		}
+		break;
+	}
+	return ret;
+}
+
+static const struct drm_audio_component_ops nv50_audio_component_ops = {
+	.get_eld = nv50_audio_component_get_eld,
+};
+
+static int
+nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
+			  void *data)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(kdev);
+	struct nouveau_drm *drm = nouveau_drm(drm_dev);
+	struct drm_audio_component *acomp = data;
+
+	if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
+		return -ENOMEM;
+
+	drm_modeset_lock_all(drm_dev);
+	acomp->ops = &nv50_audio_component_ops;
+	acomp->dev = kdev;
+	drm->audio.component = acomp;
+	drm_modeset_unlock_all(drm_dev);
+	return 0;
+}
+
+static void
+nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
+			    void *data)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(kdev);
+	struct nouveau_drm *drm = nouveau_drm(drm_dev);
+	struct drm_audio_component *acomp = data;
+
+	drm_modeset_lock_all(drm_dev);
+	drm->audio.component = NULL;
+	acomp->ops = NULL;
+	acomp->dev = NULL;
+	drm_modeset_unlock_all(drm_dev);
+}
+
+static const struct component_ops nv50_audio_component_bind_ops = {
+	.bind   = nv50_audio_component_bind,
+	.unbind = nv50_audio_component_unbind,
+};
+
+static void
+nv50_audio_component_init(struct nouveau_drm *drm)
+{
+	if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+		drm->audio.component_registered = true;
+}
+
+static void
+nv50_audio_component_fini(struct nouveau_drm *drm)
+{
+	if (drm->audio.component_registered) {
+		component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+		drm->audio.component_registered = false;
+	}
+}
+
 /******************************************************************************
  * Audio
  *****************************************************************************/
 static void
 nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
 {
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nv50_disp *disp = nv50_disp(encoder->dev);
 	struct {
@@ -496,11 +598,14 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
 	};
 
 	nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+
+	nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
 }
 
 static void
 nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
 {
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_connector *nv_connector;
@@ -527,6 +632,8 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
 
 	nvif_mthd(&disp->disp->object, 0, &args,
 		  sizeof(args.base) + drm_eld_size(args.data));
+
+	nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
 }
 
 /******************************************************************************
@@ -660,7 +767,6 @@ struct nv50_mstm {
 	struct nouveau_encoder *outp;
 
 	struct drm_dp_mst_topology_mgr mgr;
-	struct nv50_msto *msto[4];
 
 	bool modified;
 	bool disabled;
@@ -726,7 +832,6 @@ nv50_msto_cleanup(struct nv50_msto *msto)
 	drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
 
 	msto->mstc = NULL;
-	msto->head = NULL;
 	msto->disabled = false;
 }
 
@@ -806,11 +911,11 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
 		 * topology
 		 */
 		asyh->or.bpc = min(connector->display_info.bpc, 8U);
-		asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
+		asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
 	}
 
 	slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
-					      asyh->dp.pbn);
+					      asyh->dp.pbn, 0);
 	if (slots < 0)
 		return slots;
 
@@ -872,7 +977,6 @@ nv50_msto_enable(struct drm_encoder *encoder)
 	mstm->outp->update(mstm->outp, head->base.index, armh, proto,
 			   nv50_dp_bpc_to_depth(armh->or.bpc));
 
-	msto->head = head;
 	msto->mstc = mstc;
 	mstm->modified = true;
 }
@@ -913,45 +1017,40 @@ nv50_msto = {
 	.destroy = nv50_msto_destroy,
 };
 
-static int
-nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
-	      struct nv50_msto **pmsto)
+static struct nv50_msto *
+nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
 {
 	struct nv50_msto *msto;
 	int ret;
 
-	if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
-		return -ENOMEM;
+	msto = kzalloc(sizeof(*msto), GFP_KERNEL);
+	if (!msto)
+		return ERR_PTR(-ENOMEM);
 
 	ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
-			       DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
+			       DRM_MODE_ENCODER_DPMST, "mst-%d", id);
 	if (ret) {
-		kfree(*pmsto);
-		*pmsto = NULL;
-		return ret;
+		kfree(msto);
+		return ERR_PTR(ret);
 	}
 
 	drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
-	msto->encoder.possible_crtcs = heads;
-	return 0;
+	msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
+	msto->head = head;
+	return msto;
 }
 
 static struct drm_encoder *
 nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
 			      struct drm_connector_state *connector_state)
 {
-	struct nv50_head *head = nv50_head(connector_state->crtc);
 	struct nv50_mstc *mstc = nv50_mstc(connector);
+	struct drm_crtc *crtc = connector_state->crtc;
 
-	return &mstc->mstm->msto[head->base.index]->encoder;
-}
+	if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+		return NULL;
 
-static struct drm_encoder *
-nv50_mstc_best_encoder(struct drm_connector *connector)
-{
-	struct nv50_mstc *mstc = nv50_mstc(connector);
-
-	return &mstc->mstm->msto[0]->encoder;
+	return &nv50_head(crtc)->msto->encoder;
 }
 
 static enum drm_mode_status
@@ -1038,7 +1137,6 @@ static const struct drm_connector_helper_funcs
 nv50_mstc_help = {
 	.get_modes = nv50_mstc_get_modes,
 	.mode_valid = nv50_mstc_mode_valid,
-	.best_encoder = nv50_mstc_best_encoder,
 	.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
 	.atomic_check = nv50_mstc_atomic_check,
 	.detect_ctx = nv50_mstc_detect,
@@ -1071,8 +1169,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
 	      const char *path, struct nv50_mstc **pmstc)
 {
 	struct drm_device *dev = mstm->outp->base.base.dev;
+	struct drm_crtc *crtc;
 	struct nv50_mstc *mstc;
-	int ret, i;
+	int ret;
 
 	if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
 		return -ENOMEM;
@@ -1092,8 +1191,13 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
 	mstc->connector.funcs->reset(&mstc->connector);
 	nouveau_conn_attach_properties(&mstc->connector);
 
-	for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
-		drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+	drm_for_each_crtc(crtc, dev) {
+		if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+			continue;
+
+		drm_connector_attach_encoder(&mstc->connector,
+					     &nv50_head(crtc)->msto->encoder);
+	}
 
 	drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
 	drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
@@ -1367,7 +1471,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
 	const int max_payloads = hweight8(outp->dcb->heads);
 	struct drm_device *dev = outp->base.base.dev;
 	struct nv50_mstm *mstm;
-	int ret, i;
+	int ret;
 	u8 dpcd;
 
 	/* This is a workaround for some monitors not functioning
@@ -1390,13 +1494,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
 	if (ret)
 		return ret;
 
-	for (i = 0; i < max_payloads; i++) {
-		ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
-				    i, &mstm->msto[i]);
-		if (ret)
-			return ret;
-	}
-
 	return 0;
 }
 
@@ -1569,17 +1666,24 @@ nv50_sor_func = {
 	.destroy = nv50_sor_destroy,
 };
 
+static bool nv50_has_mst(struct nouveau_drm *drm)
+{
+	struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+	u32 data;
+	u8 ver, hdr, cnt, len;
+
+	data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
+	return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
+}
+
 static int
 nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 {
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-	struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
-	u8 ver, hdr, cnt, len;
-	u32 data;
 	int type, ret;
 
 	switch (dcbe->type) {
@@ -1624,10 +1728,9 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 		}
 
 		if (nv_connector->type != DCB_CONNECTOR_eDP &&
-		    (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
-		    ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
-			ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
-					    nv_connector->base.base.id,
+		    nv50_has_mst(drm)) {
+			ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
+					    16, nv_connector->base.base.id,
 					    &nv_encoder->dp.mstm);
 			if (ret)
 				return ret;
@@ -1673,7 +1776,6 @@ nv50_pior_enable(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nouveau_connector *nv_connector;
 	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
 	u8 owner = 1 << nv_crtc->index;
@@ -1681,7 +1783,6 @@ nv50_pior_enable(struct drm_encoder *encoder)
 
 	nv50_outp_acquire(nv_encoder);
 
-	nv_connector = nouveau_encoder_connector_get(nv_encoder);
 	switch (asyh->or.bpc) {
 	case 10: asyh->or.depth = 0x6; break;
 	case  8: asyh->or.depth = 0x5; break;
@@ -1832,6 +1933,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_atom *atom = nv50_atom(state);
+	struct nv50_core *core = disp->core;
 	struct nv50_outp_atom *outp, *outt;
 	u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
 	int i;
@@ -1950,6 +2052,21 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 	}
 
+	/* Update window->head assignment.
+	 *
+	 * This has to happen in an update that's not interlocked with
+	 * any window channels to avoid hitting HW error checks.
+	 *
+	 *TODO: Proper handling of window ownership (Turing apparently
+	 *      supports non-fixed mappings).
+	 */
+	if (core->assign_windows) {
+		core->func->wndw.owner(core);
+		core->func->update(core, interlock, false);
+		core->assign_windows = false;
+		interlock[NV50_DISP_INTERLOCK_CORE] = 0;
+	}
+
 	/* Update plane(s). */
 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
@@ -2302,6 +2419,8 @@ nv50_display_destroy(struct drm_device *dev)
 {
 	struct nv50_disp *disp = nv50_disp(dev);
 
+	nv50_audio_component_fini(nouveau_drm(dev));
+
 	nv50_core_del(&disp->core);
 
 	nouveau_bo_unmap(disp->sync);
@@ -2323,6 +2442,7 @@ nv50_display_create(struct drm_device *dev)
 	struct nv50_disp *disp;
 	struct dcb_output *dcbe;
 	int crtcs, ret, i;
+	bool has_mst = nv50_has_mst(drm);
 
 	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
 	if (!disp)
@@ -2371,11 +2491,37 @@ nv50_display_create(struct drm_device *dev)
 		crtcs = 0x3;
 
 	for (i = 0; i < fls(crtcs); i++) {
+		struct nv50_head *head;
+
 		if (!(crtcs & (1 << i)))
 			continue;
-		ret = nv50_head_create(dev, i);
-		if (ret)
+
+		head = nv50_head_create(dev, i);
+		if (IS_ERR(head)) {
+			ret = PTR_ERR(head);
 			goto out;
+		}
+
+		if (has_mst) {
+			head->msto = nv50_msto_new(dev, head, i);
+			if (IS_ERR(head->msto)) {
+				ret = PTR_ERR(head->msto);
+				head->msto = NULL;
+				goto out;
+			}
+
+			/*
+			 * FIXME: This is a hack to workaround the following
+			 * issues:
+			 *
+			 * https://gitlab.gnome.org/GNOME/mutter/issues/759
+			 * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
+			 *
+			 * Once these issues are closed, this should be
+			 * removed
+			 */
+			head->msto->encoder.possible_crtcs = crtcs;
+		}
 	}
 
 	/* create encoder/connector objects based on VBIOS DCB table */
@@ -2423,6 +2569,8 @@ nv50_display_create(struct drm_device *dev)
 	/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
 	dev->vblank_disable_immediate = true;
 
+	nv50_audio_component_init(drm);
+
 out:
 	if (ret)
 		nv50_display_destroy(dev);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 7c41b05..d54fe00 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -4,6 +4,8 @@
 
 #include "nouveau_display.h"
 
+struct nv50_msto;
+
 struct nv50_disp {
 	struct nvif_disp *disp;
 	struct nv50_core *core;
@@ -78,14 +80,14 @@ void evo_kick(u32 *, struct nv50_dmac *);
 
 #define evo_mthd(p, m, s) do {						\
 	const u32 _m = (m), _s = (s);					\
-	if (drm_debug & DRM_UT_KMS)					\
+	if (drm_debug_enabled(DRM_UT_KMS))				\
 		pr_err("%04x %d %s\n", _m, _s, __func__);		\
 	*((p)++) = ((_s << 18) | _m);					\
 } while(0)
 
 #define evo_data(p, d) do {						\
 	const u32 _d = (d);						\
-	if (drm_debug & DRM_UT_KMS)					\
+	if (drm_debug_enabled(DRM_UT_KMS))				\
 		pr_err("\t%08x\n", _d);					\
 	*((p)++) = _d;							\
 } while(0)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index c9692df..d9d6460 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -213,6 +213,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
 {
 	struct nv50_disp *disp = nv50_disp(head->base.base.dev);
 	struct drm_property_blob *olut = asyh->state.gamma_lut;
+	int size;
 
 	/* Determine whether core output LUT should be enabled. */
 	if (olut) {
@@ -229,14 +230,23 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
 		}
 	}
 
-	if (!olut && !head->func->olut_identity) {
-		asyh->olut.handle = 0;
-		return 0;
+	if (!olut) {
+		if (!head->func->olut_identity) {
+			asyh->olut.handle = 0;
+			return 0;
+		}
+		size = 0;
+	} else {
+		size = drm_color_lut_size(olut);
 	}
 
+	if (!head->func->olut(head, asyh, size)) {
+		DRM_DEBUG_KMS("Invalid olut\n");
+		return -EINVAL;
+	}
 	asyh->olut.handle = disp->core->chan.vram.handle;
 	asyh->olut.buffer = !asyh->olut.buffer;
-	head->func->olut(head, asyh);
+
 	return 0;
 }
 
@@ -473,7 +483,7 @@ nv50_head_func = {
 	.atomic_destroy_state = nv50_head_atomic_destroy_state,
 };
 
-int
+struct nv50_head *
 nv50_head_create(struct drm_device *dev, int index)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
@@ -485,7 +495,7 @@ nv50_head_create(struct drm_device *dev, int index)
 
 	head = kzalloc(sizeof(*head), GFP_KERNEL);
 	if (!head)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	head->func = disp->core->func->head;
 	head->base.index = index;
@@ -503,27 +513,26 @@ nv50_head_create(struct drm_device *dev, int index)
 		ret = nv50_curs_new(drm, head->base.index, &curs);
 	if (ret) {
 		kfree(head);
-		return ret;
+		return ERR_PTR(ret);
 	}
 
 	crtc = &head->base.base;
 	drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane,
 				  &nv50_head_func, "head-%d", head->base.index);
 	drm_crtc_helper_add(crtc, &nv50_head_help);
+	/* Keep the legacy gamma size at 256 to avoid compatibility issues */
 	drm_mode_crtc_set_gamma_size(crtc, 256);
-	if (disp->disp->object.oclass >= GF110_DISP)
-		drm_crtc_enable_color_mgmt(crtc, 256, true, 256);
-	else
-		drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
+	drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size,
+				   disp->disp->object.oclass >= GF110_DISP,
+				   head->func->olut_size);
 
 	if (head->func->olut_set) {
 		ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
-		if (ret)
-			goto out;
+		if (ret) {
+			nv50_head_destroy(crtc);
+			return ERR_PTR(ret);
+		}
 	}
 
-out:
-	if (ret)
-		nv50_head_destroy(crtc);
-	return ret;
+	return head;
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index d1c002f..c32b27c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -11,17 +11,19 @@ struct nv50_head {
 	const struct nv50_head_func *func;
 	struct nouveau_crtc base;
 	struct nv50_lut olut;
+	struct nv50_msto *msto;
 };
 
-int nv50_head_create(struct drm_device *, int index);
+struct nv50_head *nv50_head_create(struct drm_device *, int index);
 void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *);
 void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y);
 
 struct nv50_head_func {
 	void (*view)(struct nv50_head *, struct nv50_head_atom *);
 	void (*mode)(struct nv50_head *, struct nv50_head_atom *);
-	void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+	bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
 	bool olut_identity;
+	int  olut_size;
 	void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
 	void (*olut_clr)(struct nv50_head *);
 	void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
@@ -43,7 +45,7 @@ struct nv50_head_func {
 extern const struct nv50_head_func head507d;
 void head507d_view(struct nv50_head *, struct nv50_head_atom *);
 void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head507d_olut(struct nv50_head *, struct nv50_head_atom *);
+bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int);
 void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
 void head507d_core_clr(struct nv50_head *);
 int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
@@ -60,7 +62,7 @@ extern const struct nv50_head_func head827d;
 extern const struct nv50_head_func head907d;
 void head907d_view(struct nv50_head *, struct nv50_head_atom *);
 void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head907d_olut(struct nv50_head *, struct nv50_head_atom *);
+bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
 void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
 void head907d_olut_clr(struct nv50_head *);
 void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 7561be5..66ccf36 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -271,15 +271,19 @@ head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
 	writew(readw(mem - 4), mem + 4);
 }
 
-void
-head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
 {
+	if (size != 256)
+		return false;
+
 	if (asyh->base.cpp == 1)
 		asyh->olut.mode = 0;
 	else
 		asyh->olut.mode = 1;
 
 	asyh->olut.load = head507d_olut_load;
+	return true;
 }
 
 void
@@ -328,6 +332,7 @@ head507d = {
 	.view = head507d_view,
 	.mode = head507d_mode,
 	.olut = head507d_olut,
+	.olut_size = 256,
 	.olut_set = head507d_olut_set,
 	.olut_clr = head507d_olut_clr,
 	.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index af5e7bd..1187711 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -108,6 +108,7 @@ head827d = {
 	.view = head507d_view,
 	.mode = head507d_mode,
 	.olut = head507d_olut,
+	.olut_size = 256,
 	.olut_set = head827d_olut_set,
 	.olut_clr = head827d_olut_clr,
 	.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index c2d09dd..3002ec2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -230,11 +230,15 @@ head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
 	writew(readw(mem - 4), mem + 4);
 }
 
-void
-head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
 {
-	asyh->olut.mode = 7;
+	if (size != 256 && size != 1024)
+		return false;
+
+	asyh->olut.mode = size == 1024 ? 4 : 7;
 	asyh->olut.load = head907d_olut_load;
+	return true;
 }
 
 void
@@ -285,6 +289,7 @@ head907d = {
 	.view = head907d_view,
 	.mode = head907d_mode,
 	.olut = head907d_olut,
+	.olut_size = 1024,
 	.olut_set = head907d_olut_set,
 	.olut_clr = head907d_olut_clr,
 	.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 303df84..76958ce 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -83,6 +83,7 @@ head917d = {
 	.view = head907d_view,
 	.mode = head907d_mode,
 	.olut = head907d_olut,
+	.olut_size = 1024,
 	.olut_set = head907d_olut_set,
 	.olut_clr = head907d_olut_clr,
 	.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index ef6a99d..00011ce 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -148,14 +148,18 @@ headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
-static void
-headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+static bool
+headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
 {
+	if (size != 256 && size != 1024)
+		return false;
+
 	asyh->olut.mode = 2;
-	asyh->olut.size = 0;
+	asyh->olut.size = size == 1024 ? 2 : 0;
 	asyh->olut.range = 0;
 	asyh->olut.output_mode = 1;
 	asyh->olut.load = head907d_olut_load;
+	return true;
 }
 
 static void
@@ -201,6 +205,7 @@ headc37d = {
 	.view = headc37d_view,
 	.mode = headc37d_mode,
 	.olut = headc37d_olut,
+	.olut_size = 1024,
 	.olut_set = headc37d_olut_set,
 	.olut_clr = headc37d_olut_clr,
 	.curs_layout = head917d_curs_layout,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 32a7f9e..938d910 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -151,17 +151,20 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
 	writew(readw(mem - 4), mem + 4);
 }
 
-void
-headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
 {
+	if (size != 0 && size != 256 && size != 1024)
+		return false;
+
 	asyh->olut.mode = 2; /* DIRECT10 */
 	asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
 	asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
-	if (asyh->state.gamma_lut &&
-	    asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256)
+	if (size == 256)
 		asyh->olut.load = headc57d_olut_load_8;
 	else
 		asyh->olut.load = headc57d_olut_load;
+	return true;
 }
 
 static void
@@ -194,6 +197,7 @@ headc57d = {
 	.mode = headc57d_mode,
 	.olut = headc57d_olut,
 	.olut_identity = true,
+	.olut_size = 1024,
 	.olut_set = headc57d_olut_set,
 	.olut_clr = headc57d_olut_clr,
 	.curs_layout = head917d_curs_layout,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
index 994def4..4e95ca5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -49,7 +49,7 @@ nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
 			kvfree(in);
 		}
 	} else {
-		load(in, blob->length / sizeof(*in), mem);
+		load(in, drm_color_lut_size(blob), mem);
 	}
 
 	return addr;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 5193b62..8903152 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -318,7 +318,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
 	return wndw->func->acquire(wndw, asyw, asyh);
 }
 
-static void
+static int
 nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
 			   struct nv50_wndw_atom *armw,
 			   struct nv50_wndw_atom *asyw,
@@ -340,7 +340,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
 		 */
 		if (!(ilut = asyh->state.gamma_lut)) {
 			asyw->visible = false;
-			return;
+			return 0;
 		}
 
 		if (wndw->func->ilut)
@@ -359,7 +359,10 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
 	/* Recalculate LUT state. */
 	memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
 	if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
-		wndw->func->ilut(wndw, asyw);
+		if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) {
+			DRM_DEBUG_KMS("Invalid ilut\n");
+			return -EINVAL;
+		}
 		asyw->xlut.handle = wndw->wndw.vram.handle;
 		asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
 		asyw->set.xlut = true;
@@ -384,6 +387,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
 
 	/* Can't do an immediate flip while changing the LUT. */
 	asyh->state.async_flip = false;
+	return 0;
 }
 
 static int
@@ -424,8 +428,11 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 	    (!armw->visible ||
 	     asyh->state.color_mgmt_changed ||
 	     asyw->state.fb->format->format !=
-	     armw->state.fb->format->format))
-		nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+	     armw->state.fb->format->format)) {
+		ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+		if (ret)
+			return ret;
+	}
 
 	/* Calculate new window state. */
 	if (asyw->visible) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index c63bd3bd..caf3974 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -64,12 +64,13 @@ struct nv50_wndw_func {
 	void (*ntfy_clr)(struct nv50_wndw *);
 	int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
 			       struct nvif_device *);
-	void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int);
 	void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
 		    const struct drm_color_ctm *);
 	void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*csc_clr)(struct nv50_wndw *);
 	bool ilut_identity;
+	int  ilut_size;
 	bool olut_core;
 	void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*xlut_clr)(struct nv50_wndw *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 0f94021..b92dc34 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -71,14 +71,18 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
-static void
-wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
 {
+	if (size != 256 && size != 1024)
+		return false;
+
 	asyw->xlut.i.mode = 2;
-	asyw->xlut.i.size = 0;
+	asyw->xlut.i.size = size == 1024 ? 2 : 0;
 	asyw->xlut.i.range = 0;
 	asyw->xlut.i.output_mode = 1;
 	asyw->xlut.i.load = head907d_olut_load;
+	return true;
 }
 
 void
@@ -261,6 +265,7 @@ wndwc37e = {
 	.ntfy_reset = corec37d_ntfy_init,
 	.ntfy_wait_begun = base507c_ntfy_wait_begun,
 	.ilut = wndwc37e_ilut,
+	.ilut_size = 1024,
 	.xlut_set = wndwc37e_ilut_set,
 	.xlut_clr = wndwc37e_ilut_clr,
 	.csc = base907c_csc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index a311c79..35c9c52 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -156,19 +156,21 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
 	writew(readw(mem - 4), mem + 4);
 }
 
-static void
-wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
 {
-	u16 size = asyw->ilut->length / sizeof(struct drm_color_lut);
+	if (size = size ? size : 1024, size != 256 && size != 1024)
+		return false;
+
 	if (size == 256) {
 		asyw->xlut.i.mode = 1; /* DIRECT8. */
 	} else {
 		asyw->xlut.i.mode = 2; /* DIRECT10. */
-		size = 1024;
 	}
 	asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
 	asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
 	asyw->xlut.i.load = wndwc57e_ilut_load;
+	return true;
 }
 
 static const struct nv50_wndw_func
@@ -183,6 +185,7 @@ wndwc57e = {
 	.ntfy_wait_begun = base507c_ntfy_wait_begun,
 	.ilut = wndwc57e_ilut,
 	.ilut_identity = true,
+	.ilut_size = 1024,
 	.xlut_set = wndwc57e_ilut_set,
 	.xlut_clr = wndwc57e_ilut_clr,
 	.csc = base907c_csc,
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/acr.h b/drivers/gpu/drm/nouveau/include/nvfw/acr.h
new file mode 100644
index 0000000..e65d6a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/acr.h
@@ -0,0 +1,152 @@
+#ifndef __NVFW_ACR_H__
+#define __NVFW_ACR_H__
+
+struct wpr_header {
+#define WPR_HEADER_V0_FALCON_ID_INVALID                              0xffffffff
+	u32 falcon_id;
+	u32 lsb_offset;
+	u32 bootstrap_owner;
+	u32 lazy_bootstrap;
+#define WPR_HEADER_V0_STATUS_NONE                                             0
+#define WPR_HEADER_V0_STATUS_COPY                                             1
+#define WPR_HEADER_V0_STATUS_VALIDATION_CODE_FAILED                           2
+#define WPR_HEADER_V0_STATUS_VALIDATION_DATA_FAILED                           3
+#define WPR_HEADER_V0_STATUS_VALIDATION_DONE                                  4
+#define WPR_HEADER_V0_STATUS_VALIDATION_SKIPPED                               5
+#define WPR_HEADER_V0_STATUS_BOOTSTRAP_READY                                  6
+	u32 status;
+};
+
+void wpr_header_dump(struct nvkm_subdev *, const struct wpr_header *);
+
+struct wpr_header_v1 {
+#define WPR_HEADER_V1_FALCON_ID_INVALID                              0xffffffff
+	u32 falcon_id;
+	u32 lsb_offset;
+	u32 bootstrap_owner;
+	u32 lazy_bootstrap;
+	u32 bin_version;
+#define WPR_HEADER_V1_STATUS_NONE                                             0
+#define WPR_HEADER_V1_STATUS_COPY                                             1
+#define WPR_HEADER_V1_STATUS_VALIDATION_CODE_FAILED                           2
+#define WPR_HEADER_V1_STATUS_VALIDATION_DATA_FAILED                           3
+#define WPR_HEADER_V1_STATUS_VALIDATION_DONE                                  4
+#define WPR_HEADER_V1_STATUS_VALIDATION_SKIPPED                               5
+#define WPR_HEADER_V1_STATUS_BOOTSTRAP_READY                                  6
+#define WPR_HEADER_V1_STATUS_REVOCATION_CHECK_FAILED                          7
+	u32 status;
+};
+
+void wpr_header_v1_dump(struct nvkm_subdev *, const struct wpr_header_v1 *);
+
+struct lsf_signature {
+	u8 prd_keys[2][16];
+	u8 dbg_keys[2][16];
+	u32 b_prd_present;
+	u32 b_dbg_present;
+	u32 falcon_id;
+};
+
+struct lsf_signature_v1 {
+	u8 prd_keys[2][16];
+	u8 dbg_keys[2][16];
+	u32 b_prd_present;
+	u32 b_dbg_present;
+	u32 falcon_id;
+	u32 supports_versioning;
+	u32 version;
+	u32 depmap_count;
+	u8 depmap[11/*LSF_LSB_DEPMAP_SIZE*/ * 2 * 4];
+	u8 kdf[16];
+};
+
+struct lsb_header_tail {
+	u32 ucode_off;
+	u32 ucode_size;
+	u32 data_size;
+	u32 bl_code_size;
+	u32 bl_imem_off;
+	u32 bl_data_off;
+	u32 bl_data_size;
+	u32 app_code_off;
+	u32 app_code_size;
+	u32 app_data_off;
+	u32 app_data_size;
+	u32 flags;
+};
+
+struct lsb_header {
+	struct lsf_signature signature;
+	struct lsb_header_tail tail;
+};
+
+void lsb_header_dump(struct nvkm_subdev *, struct lsb_header *);
+
+struct lsb_header_v1 {
+	struct lsf_signature_v1 signature;
+	struct lsb_header_tail tail;
+};
+
+void lsb_header_v1_dump(struct nvkm_subdev *, struct lsb_header_v1 *);
+
+struct flcn_acr_desc {
+	union {
+		u8 reserved_dmem[0x200];
+		u32 signatures[4];
+	} ucode_reserved_space;
+	u32 wpr_region_id;
+	u32 wpr_offset;
+	u32 mmu_mem_range;
+	struct {
+		u32 no_regions;
+		struct {
+			u32 start_addr;
+			u32 end_addr;
+			u32 region_id;
+			u32 read_mask;
+			u32 write_mask;
+			u32 client_mask;
+		} region_props[2];
+	} regions;
+	u32 ucode_blob_size;
+	u64 ucode_blob_base __aligned(8);
+	struct {
+		u32 vpr_enabled;
+		u32 vpr_start;
+		u32 vpr_end;
+		u32 hdcp_policies;
+	} vpr_desc;
+};
+
+void flcn_acr_desc_dump(struct nvkm_subdev *, struct flcn_acr_desc *);
+
+struct flcn_acr_desc_v1 {
+	u8 reserved_dmem[0x200];
+	u32 signatures[4];
+	u32 wpr_region_id;
+	u32 wpr_offset;
+	u32 mmu_memory_range;
+	struct {
+		u32 no_regions;
+		struct {
+			u32 start_addr;
+			u32 end_addr;
+			u32 region_id;
+			u32 read_mask;
+			u32 write_mask;
+			u32 client_mask;
+			u32 shadow_mem_start_addr;
+		} region_props[2];
+	} regions;
+	u32 ucode_blob_size;
+	u64 ucode_blob_base __aligned(8);
+	struct {
+		u32 vpr_enabled;
+		u32 vpr_start;
+		u32 vpr_end;
+		u32 hdcp_policies;
+	} vpr_desc;
+};
+
+void flcn_acr_desc_v1_dump(struct nvkm_subdev *, struct flcn_acr_desc_v1 *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/flcn.h b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h
new file mode 100644
index 0000000..e090f34
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_FLCN_H__
+#define __NVFW_FLCN_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct loader_config {
+	u32 dma_idx;
+	u32 code_dma_base;
+	u32 code_size_total;
+	u32 code_size_to_load;
+	u32 code_entry_point;
+	u32 data_dma_base;
+	u32 data_size;
+	u32 overlay_dma_base;
+	u32 argc;
+	u32 argv;
+	u32 code_dma_base1;
+	u32 data_dma_base1;
+	u32 overlay_dma_base1;
+};
+
+void
+loader_config_dump(struct nvkm_subdev *, const struct loader_config *);
+
+struct loader_config_v1 {
+	u32 reserved;
+	u32 dma_idx;
+	u64 code_dma_base;
+	u32 code_size_total;
+	u32 code_size_to_load;
+	u32 code_entry_point;
+	u64 data_dma_base;
+	u32 data_size;
+	u64 overlay_dma_base;
+	u32 argc;
+	u32 argv;
+} __packed;
+
+void
+loader_config_v1_dump(struct nvkm_subdev *, const struct loader_config_v1 *);
+
+struct flcn_bl_dmem_desc {
+	u32 reserved[4];
+	u32 signature[4];
+	u32 ctx_dma;
+	u32 code_dma_base;
+	u32 non_sec_code_off;
+	u32 non_sec_code_size;
+	u32 sec_code_off;
+	u32 sec_code_size;
+	u32 code_entry_point;
+	u32 data_dma_base;
+	u32 data_size;
+	u32 code_dma_base1;
+	u32 data_dma_base1;
+};
+
+void
+flcn_bl_dmem_desc_dump(struct nvkm_subdev *, const struct flcn_bl_dmem_desc *);
+
+struct flcn_bl_dmem_desc_v1 {
+	u32 reserved[4];
+	u32 signature[4];
+	u32 ctx_dma;
+	u64 code_dma_base;
+	u32 non_sec_code_off;
+	u32 non_sec_code_size;
+	u32 sec_code_off;
+	u32 sec_code_size;
+	u32 code_entry_point;
+	u64 data_dma_base;
+	u32 data_size;
+} __packed;
+
+void flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *,
+			       const struct flcn_bl_dmem_desc_v1 *);
+
+struct flcn_bl_dmem_desc_v2 {
+	u32 reserved[4];
+	u32 signature[4];
+	u32 ctx_dma;
+	u64 code_dma_base;
+	u32 non_sec_code_off;
+	u32 non_sec_code_size;
+	u32 sec_code_off;
+	u32 sec_code_size;
+	u32 code_entry_point;
+	u64 data_dma_base;
+	u32 data_size;
+	u32 argc;
+	u32 argv;
+} __packed;
+
+void flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *,
+			       const struct flcn_bl_dmem_desc_v2 *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/fw.h b/drivers/gpu/drm/nouveau/include/nvfw/fw.h
new file mode 100644
index 0000000..a7cf118
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/fw.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_FW_H__
+#define __NVFW_FW_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_bin_hdr {
+	u32 bin_magic;
+	u32 bin_ver;
+	u32 bin_size;
+	u32 header_offset;
+	u32 data_offset;
+	u32 data_size;
+};
+
+const struct nvfw_bin_hdr *nvfw_bin_hdr(struct nvkm_subdev *, const void *);
+
+struct nvfw_bl_desc {
+	u32 start_tag;
+	u32 dmem_load_off;
+	u32 code_off;
+	u32 code_size;
+	u32 data_off;
+	u32 data_size;
+};
+
+const struct nvfw_bl_desc *nvfw_bl_desc(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
new file mode 100644
index 0000000..64d0d32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_HS_H__
+#define __NVFW_HS_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_hs_header {
+	u32 sig_dbg_offset;
+	u32 sig_dbg_size;
+	u32 sig_prod_offset;
+	u32 sig_prod_size;
+	u32 patch_loc;
+	u32 patch_sig;
+	u32 hdr_offset;
+	u32 hdr_size;
+};
+
+const struct nvfw_hs_header *nvfw_hs_header(struct nvkm_subdev *, const void *);
+
+struct nvfw_hs_load_header {
+	u32 non_sec_code_off;
+	u32 non_sec_code_size;
+	u32 data_dma_base;
+	u32 data_size;
+	u32 num_apps;
+	u32 apps[0];
+};
+
+const struct nvfw_hs_load_header *
+nvfw_hs_load_header(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/ls.h b/drivers/gpu/drm/nouveau/include/nvfw/ls.h
new file mode 100644
index 0000000..f63692a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/ls.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_LS_H__
+#define __NVFW_LS_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_ls_desc_head {
+	u32 descriptor_size;
+	u32 image_size;
+	u32 tools_version;
+	u32 app_version;
+	char date[64];
+	u32 bootloader_start_offset;
+	u32 bootloader_size;
+	u32 bootloader_imem_offset;
+	u32 bootloader_entry_point;
+	u32 app_start_offset;
+	u32 app_size;
+	u32 app_imem_offset;
+	u32 app_imem_entry;
+	u32 app_dmem_offset;
+	u32 app_resident_code_offset;
+	u32 app_resident_code_size;
+	u32 app_resident_data_offset;
+	u32 app_resident_data_size;
+};
+
+struct nvfw_ls_desc {
+	struct nvfw_ls_desc_head head;
+	u32 nb_overlays;
+	struct {
+		u32 start;
+		u32 size;
+	} load_ovl[64];
+	u32 compressed;
+};
+
+const struct nvfw_ls_desc *nvfw_ls_desc(struct nvkm_subdev *, const void *);
+
+struct nvfw_ls_desc_v1 {
+	struct nvfw_ls_desc_head head;
+	u32 nb_imem_overlays;
+	u32 nb_dmem_overlays;
+	struct {
+		u32 start;
+		u32 size;
+	} load_ovl[64];
+	u32 compressed;
+};
+
+const struct nvfw_ls_desc_v1 *
+nvfw_ls_desc_v1(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/pmu.h b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
new file mode 100644
index 0000000..452ed7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
@@ -0,0 +1,98 @@
+#ifndef __NVFW_PMU_H__
+#define __NVFW_PMU_H__
+
+struct nv_pmu_args {
+	u32 reserved;
+	u32 freq_hz;
+	u32 trace_size;
+	u32 trace_dma_base;
+	u16 trace_dma_base1;
+	u8 trace_dma_offset;
+	u32 trace_dma_idx;
+	bool secure_mode;
+	bool raise_priv_sec;
+	struct {
+		u32 dma_base;
+		u16 dma_base1;
+		u8 dma_offset;
+		u16 fb_size;
+		u8 dma_idx;
+	} gc6_ctx;
+	u8 pad;
+};
+
+#define NV_PMU_UNIT_INIT                                                   0x07
+#define NV_PMU_UNIT_ACR                                                    0x0a
+
+struct nv_pmu_init_msg {
+	struct nv_falcon_msg hdr;
+#define NV_PMU_INIT_MSG_INIT                                               0x00
+	u8 msg_type;
+
+	u8 pad;
+	u16 os_debug_entry_point;
+
+	struct {
+		u16 size;
+		u16 offset;
+		u8 index;
+		u8 pad;
+	} queue_info[5];
+
+	u16 sw_managed_area_offset;
+	u16 sw_managed_area_size;
+};
+
+struct nv_pmu_acr_cmd {
+	struct nv_falcon_cmd hdr;
+#define NV_PMU_ACR_CMD_INIT_WPR_REGION                                     0x00
+#define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON                                    0x01
+#define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS                          0x03
+	u8 cmd_type;
+};
+
+struct nv_pmu_acr_msg {
+	struct nv_falcon_cmd hdr;
+	u8 msg_type;
+};
+
+struct nv_pmu_acr_init_wpr_region_cmd {
+	struct nv_pmu_acr_cmd cmd;
+	u32 region_id;
+	u32 wpr_offset;
+};
+
+struct nv_pmu_acr_init_wpr_region_msg {
+	struct nv_pmu_acr_msg msg;
+	u32 error_code;
+};
+
+struct nv_pmu_acr_bootstrap_falcon_cmd {
+	struct nv_pmu_acr_cmd cmd;
+#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES                  0x00000000
+#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO                   0x00000001
+	u32 flags;
+	u32 falcon_id;
+};
+
+struct nv_pmu_acr_bootstrap_falcon_msg {
+	struct nv_pmu_acr_msg msg;
+	u32 falcon_id;
+};
+
+struct nv_pmu_acr_bootstrap_multiple_falcons_cmd {
+	struct nv_pmu_acr_cmd cmd;
+#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES        0x00000000
+#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_NO         0x00000001
+	u32 flags;
+	u32 falcon_mask;
+	u32 use_va_mask;
+	u32 wpr_lo;
+	u32 wpr_hi;
+};
+
+struct nv_pmu_acr_bootstrap_multiple_falcons_msg {
+	struct nv_pmu_acr_msg msg;
+	u32 falcon_mask;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/sec2.h b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
new file mode 100644
index 0000000..0349655
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
@@ -0,0 +1,60 @@
+#ifndef __NVFW_SEC2_H__
+#define __NVFW_SEC2_H__
+
+struct nv_sec2_args {
+	u32 freq_hz;
+	u32 falc_trace_size;
+	u32 falc_trace_dma_base;
+	u32 falc_trace_dma_idx;
+	bool secure_mode;
+};
+
+#define NV_SEC2_UNIT_INIT                                                  0x01
+#define NV_SEC2_UNIT_ACR                                                   0x08
+
+struct nv_sec2_init_msg {
+	struct nv_falcon_msg hdr;
+#define NV_SEC2_INIT_MSG_INIT                                              0x00
+	u8 msg_type;
+
+	u8 num_queues;
+	u16 os_debug_entry_point;
+
+	struct {
+		u32 offset;
+		u16 size;
+		u8 index;
+#define NV_SEC2_INIT_MSG_QUEUE_ID_CMDQ                                     0x00
+#define NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ                                     0x01
+		u8 id;
+	} queue_info[2];
+
+	u32 sw_managed_area_offset;
+	u16 sw_managed_area_size;
+};
+
+struct nv_sec2_acr_cmd {
+	struct nv_falcon_cmd hdr;
+#define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON                                   0x00
+	u8 cmd_type;
+};
+
+struct nv_sec2_acr_msg {
+	struct nv_falcon_cmd hdr;
+	u8 msg_type;
+};
+
+struct nv_sec2_acr_bootstrap_falcon_cmd {
+	struct nv_sec2_acr_cmd cmd;
+#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES                 0x00000000
+#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO                  0x00000001
+	u32 flags;
+	u32 falcon_id;
+};
+
+struct nv_sec2_acr_bootstrap_falcon_msg {
+	struct nv_sec2_acr_msg msg;
+	u32 error_code;
+	u32 falcon_id;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index f704ae6..3065974 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -166,6 +166,8 @@
 
 #define VOLTA_A                                       /* cl9097.h */ 0x0000c397
 
+#define TURING_A                                      /* cl9097.h */ 0x0000c597
+
 #define NV74_BSP                                                     0x000074b0
 
 #define GT212_MSVLD                                                  0x000085b1
@@ -207,6 +209,7 @@
 #define PASCAL_COMPUTE_A                                             0x0000c0c0
 #define PASCAL_COMPUTE_B                                             0x0000c1c0
 #define VOLTA_COMPUTE_A                                              0x0000c3c0
+#define TURING_COMPUTE_A                                             0x0000c5c0
 
 #define NV74_CIPHER                                                  0x000074c1
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
index 8450127..c21d09f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0008.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
@@ -35,7 +35,7 @@ struct nvif_mmu_type_v0 {
 
 struct nvif_mmu_kind_v0 {
 	__u8  version;
-	__u8  pad01[1];
+	__u8  kind_inv;
 	__u16 count;
 	__u8  data[];
 };
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
index 747ecf6..cec1e88 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -7,6 +7,7 @@ struct nvif_mmu {
 	u8  dmabits;
 	u8  heap_nr;
 	u8  type_nr;
+	u8  kind_inv;
 	u16 kind_nr;
 	s32 mem;
 
@@ -36,9 +37,8 @@ void nvif_mmu_fini(struct nvif_mmu *);
 static inline bool
 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
 {
-	const u8 invalid = mmu->kind_nr - 1;
 	if (kind) {
-		if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
+		if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv)
 			return false;
 	}
 	return true;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 6d55cd0..5c007ce 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -23,13 +23,13 @@ enum nvkm_devidx {
 	NVKM_SUBDEV_MMU,
 	NVKM_SUBDEV_BAR,
 	NVKM_SUBDEV_FAULT,
+	NVKM_SUBDEV_ACR,
 	NVKM_SUBDEV_PMU,
 	NVKM_SUBDEV_VOLT,
 	NVKM_SUBDEV_ICCSENSE,
 	NVKM_SUBDEV_THERM,
 	NVKM_SUBDEV_CLK,
 	NVKM_SUBDEV_GSP,
-	NVKM_SUBDEV_SECBOOT,
 
 	NVKM_ENGINE_BSP,
 
@@ -129,6 +129,7 @@ struct nvkm_device {
 		struct notifier_block nb;
 	} acpi;
 
+	struct nvkm_acr *acr;
 	struct nvkm_bar *bar;
 	struct nvkm_bios *bios;
 	struct nvkm_bus *bus;
@@ -149,7 +150,6 @@ struct nvkm_device {
 	struct nvkm_subdev *mxm;
 	struct nvkm_pci *pci;
 	struct nvkm_pmu *pmu;
-	struct nvkm_secboot *secboot;
 	struct nvkm_therm *therm;
 	struct nvkm_timer *timer;
 	struct nvkm_top *top;
@@ -169,7 +169,7 @@ struct nvkm_device {
 	struct nvkm_engine *mspdec;
 	struct nvkm_engine *msppp;
 	struct nvkm_engine *msvld;
-	struct nvkm_engine *nvenc[3];
+	struct nvkm_nvenc *nvenc[3];
 	struct nvkm_nvdec *nvdec[3];
 	struct nvkm_pm *pm;
 	struct nvkm_engine *sec;
@@ -202,6 +202,7 @@ struct nvkm_device_quirk {
 struct nvkm_device_chip {
 	const char *name;
 
+	int (*acr     )(struct nvkm_device *, int idx, struct nvkm_acr **);
 	int (*bar     )(struct nvkm_device *, int idx, struct nvkm_bar **);
 	int (*bios    )(struct nvkm_device *, int idx, struct nvkm_bios **);
 	int (*bus     )(struct nvkm_device *, int idx, struct nvkm_bus **);
@@ -222,7 +223,6 @@ struct nvkm_device_chip {
 	int (*mxm     )(struct nvkm_device *, int idx, struct nvkm_subdev **);
 	int (*pci     )(struct nvkm_device *, int idx, struct nvkm_pci **);
 	int (*pmu     )(struct nvkm_device *, int idx, struct nvkm_pmu **);
-	int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
 	int (*therm   )(struct nvkm_device *, int idx, struct nvkm_therm **);
 	int (*timer   )(struct nvkm_device *, int idx, struct nvkm_timer **);
 	int (*top     )(struct nvkm_device *, int idx, struct nvkm_top **);
@@ -242,7 +242,7 @@ struct nvkm_device_chip {
 	int (*mspdec  )(struct nvkm_device *, int idx, struct nvkm_engine **);
 	int (*msppp   )(struct nvkm_device *, int idx, struct nvkm_engine **);
 	int (*msvld   )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **);
 	int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
 	int (*pm      )(struct nvkm_device *, int idx, struct nvkm_pm **);
 	int (*sec     )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
new file mode 100644
index 0000000..daa8e4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -0,0 +1,77 @@
+#ifndef __NVKM_FALCON_H__
+#define __NVKM_FALCON_H__
+#include <engine/falcon.h>
+
+int nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *owner,
+		     const char *name, u32 addr, struct nvkm_falcon *);
+void nvkm_falcon_dtor(struct nvkm_falcon *);
+
+void nvkm_falcon_v1_load_imem(struct nvkm_falcon *,
+			      void *, u32, u32, u16, u8, bool);
+void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
+void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
+void nvkm_falcon_v1_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
+int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *, u32);
+int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *, u32);
+void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *, u32 start_addr);
+void nvkm_falcon_v1_start(struct nvkm_falcon *);
+int nvkm_falcon_v1_enable(struct nvkm_falcon *);
+void nvkm_falcon_v1_disable(struct nvkm_falcon *);
+
+void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
+int gp102_sec2_flcn_enable(struct nvkm_falcon *);
+
+#define FLCN_PRINTK(t,f,fmt,a...) do {                                         \
+	if (nvkm_subdev_name[(f)->owner->index] != (f)->name)                  \
+		nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a);           \
+	else                                                                   \
+		nvkm_##t((f)->owner, fmt"\n", ##a);                            \
+} while(0)
+#define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a)
+#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
+
+/**
+ * struct nv_falcon_msg - header for all messages
+ *
+ * @unit_id:	id of firmware process that sent the message
+ * @size:	total size of message
+ * @ctrl_flags:	control flags
+ * @seq_id:	used to match a message from its corresponding command
+ */
+struct nv_falcon_msg {
+	u8 unit_id;
+	u8 size;
+	u8 ctrl_flags;
+	u8 seq_id;
+};
+
+#define nv_falcon_cmd nv_falcon_msg
+#define NV_FALCON_CMD_UNIT_ID_REWIND                                       0x00
+
+struct nvkm_falcon_qmgr;
+int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **);
+void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **);
+
+typedef int
+(*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *);
+
+struct nvkm_falcon_cmdq;
+int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name,
+			 struct nvkm_falcon_cmdq **);
+void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **);
+void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *,
+			   u32 index, u32 offset, u32 size);
+void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *);
+int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *,
+			  nvkm_falcon_qmgr_callback, void *priv,
+			  unsigned long timeout_jiffies);
+
+struct nvkm_falcon_msgq;
+int nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *, const char *name,
+			 struct nvkm_falcon_msgq **);
+void nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **);
+void nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *,
+			   u32 index, u32 offset, u32 size);
+int nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *, void *, u32 size);
+void nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index 383370c..d14b7fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,12 +1,55 @@
 /* SPDX-License-Identifier: MIT */
 #ifndef __NVKM_FIRMWARE_H__
 #define __NVKM_FIRMWARE_H__
+#include <core/option.h>
 #include <core/subdev.h>
 
-int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
-			      int min_version, int max_version,
-			      const struct firmware **);
-int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
+int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, int ver,
 		      const struct firmware **);
 void nvkm_firmware_put(const struct firmware *);
+
+int nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *path,
+			    const char *name, int ver, struct nvkm_blob *);
+int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path,
+			    const char *name, int ver,
+			    const struct firmware **);
+
+#define nvkm_firmware_load(s,l,o,p...) ({                                      \
+	struct nvkm_subdev *_s = (s);                                          \
+	const char *_opts = (o);                                               \
+	char _option[32];                                                      \
+	typeof(l[0]) *_list = (l), *_next, *_fwif = NULL;                      \
+	int _ver, _fwv, _ret = 0;                                              \
+                                                                               \
+	snprintf(_option, sizeof(_option), "Nv%sFw", _opts);                   \
+	_ver = nvkm_longopt(_s->device->cfgopt, _option, -2);                  \
+	if (_ver >= -1) {                                                      \
+		for (_next = _list; !_fwif && _next->load; _next++) {          \
+			if (_next->version == _ver)                            \
+				_fwif = _next;                                 \
+		}                                                              \
+		_ret = _fwif ? 0 : -EINVAL;                                    \
+	}                                                                      \
+                                                                               \
+	if (_ret == 0) {                                                       \
+		snprintf(_option, sizeof(_option), "Nv%sFwVer", _opts);        \
+		_fwv = _fwif ? _fwif->version : -1;                            \
+		_ver = nvkm_longopt(_s->device->cfgopt, _option, _fwv);        \
+		for (_next = _fwif ? _fwif : _list; _next->load; _next++) {    \
+			_fwv = (_ver >= 0) ? _ver : _next->version;            \
+			_ret = _next->load(p, _fwv, _next);                    \
+			if (_ret == 0 || _ver >= 0) {                          \
+				_fwif = _next;                                 \
+				break;                                         \
+			}                                                      \
+		}                                                              \
+	}                                                                      \
+                                                                               \
+	if (_ret) {                                                            \
+		nvkm_error(_s, "failed to load firmware\n");                   \
+		_fwif = ERR_PTR(_ret);                                         \
+	}                                                                      \
+	                                                                       \
+	_fwif;                                                                 \
+})
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index b23bf61..74d3f1a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -84,6 +84,22 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
 	nvkm_wo32((o), __a + 4, upper_32_bits(__d));                           \
 } while(0)
 
+#define nvkm_robj(o,a,p,s) do {                                                \
+	u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p);               \
+	while (_size--) {                                                      \
+		*(_data++) = nvkm_ro32((o), _addr);                            \
+		_addr += 4;                                                    \
+	}                                                                      \
+} while(0)
+
+#define nvkm_wobj(o,a,p,s) do {                                                \
+	u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p);               \
+	while (_size--) {                                                      \
+		nvkm_wo32((o), _addr, *(_data++));                             \
+		_addr += 4;                                                    \
+	}                                                                      \
+} while(0)
+
 #define nvkm_fill(t,s,o,a,d,c) do {                                            \
 	u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s;          \
 	u##t __iomem *_m = nvkm_kmap(o);                                       \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
deleted file mode 100644
index bf3e532..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_CORE_MSGQUEUE_H
-#define __NVKM_CORE_MSGQUEUE_H
-#include <subdev/secboot.h>
-struct nvkm_msgqueue;
-
-/* Hopefully we will never have firmware arguments larger than that... */
-#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
-
-int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *,
-		      struct nvkm_msgqueue **);
-void nvkm_msgqueue_del(struct nvkm_msgqueue **);
-void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
-int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
-
-/* useful if we run a NVIDIA-signed firmware */
-void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
-
-/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
-int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 029a416..d7ba320 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -21,4 +21,17 @@
 	iowrite32_native(lower_32_bits(_v), &_p[0]);			       \
 	iowrite32_native(upper_32_bits(_v), &_p[1]);			       \
 } while(0)
+
+struct nvkm_blob {
+	void *data;
+	u32 size;
+};
+
+static inline void
+nvkm_blob_dtor(struct nvkm_blob *blob)
+{
+	kfree(blob->data);
+	blob->data = NULL;
+	blob->size = 0;
+}
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 23b582d..27c1f86 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_FALCON_H__
-#define __NVKM_FALCON_H__
+#ifndef __NVKM_FLCNEN_H__
+#define __NVKM_FLCNEN_H__
 #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
 #include <core/engine.h>
 struct nvkm_fifo_chan;
@@ -23,12 +23,13 @@ struct nvkm_falcon {
 
 	struct mutex mutex;
 	struct mutex dmem_mutex;
+	bool oneinit;
+
 	const struct nvkm_subdev *user;
 
 	u8 version;
 	u8 secret;
 	bool debug;
-	bool has_emem;
 
 	struct nvkm_memory *core;
 	bool external;
@@ -76,9 +77,14 @@ struct nvkm_falcon_func {
 	} data;
 	void (*init)(struct nvkm_falcon *);
 	void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+
+	u32 debug;
+	u32 fbif;
+
 	void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
 	void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
 	void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
+	u32 emem_addr;
 	void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
 	int (*wait_for_halt)(struct nvkm_falcon *, u32);
 	int (*clear_interrupt)(struct nvkm_falcon *, u32);
@@ -86,6 +92,13 @@ struct nvkm_falcon_func {
 	void (*start)(struct nvkm_falcon *);
 	int (*enable)(struct nvkm_falcon *falcon);
 	void (*disable)(struct nvkm_falcon *falcon);
+	int (*reset)(struct nvkm_falcon *);
+
+	struct {
+		u32 head;
+		u32 tail;
+		u32 stride;
+	} cmdq, msgq;
 
 	struct nvkm_sclass sclass[];
 };
@@ -122,5 +135,4 @@ int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32);
 int nvkm_falcon_enable(struct nvkm_falcon *);
 void nvkm_falcon_disable(struct nvkm_falcon *);
 int nvkm_falcon_reset(struct nvkm_falcon *);
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 2cde36f..1530c81 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -50,6 +50,8 @@ int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 7c7d7f0..1b3183e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -3,13 +3,13 @@
 #define __NVKM_NVDEC_H__
 #define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
 #include <core/engine.h>
+#include <core/falcon.h>
 
 struct nvkm_nvdec {
+	const struct nvkm_nvdec_func *func;
 	struct nvkm_engine engine;
-	u32 addr;
-
-	struct nvkm_falcon *falcon;
+	struct nvkm_falcon falcon;
 };
 
-int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
+int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 2162404..33e6ba8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -1,5 +1,15 @@
 /* SPDX-License-Identifier: MIT */
 #ifndef __NVKM_NVENC_H__
 #define __NVKM_NVENC_H__
+#define nvkm_nvenc(p) container_of((p), struct nvkm_nvenc, engine)
 #include <core/engine.h>
+#include <core/falcon.h>
+
+struct nvkm_nvenc {
+	const struct nvkm_nvenc_func *func;
+	struct nvkm_engine engine;
+	struct nvkm_falcon falcon;
+};
+
+int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index 33078f8..34dc765 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -1,17 +1,24 @@
 /* SPDX-License-Identifier: MIT */
 #ifndef __NVKM_SEC2_H__
 #define __NVKM_SEC2_H__
+#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
 #include <core/engine.h>
+#include <core/falcon.h>
 
 struct nvkm_sec2 {
+	const struct nvkm_sec2_func *func;
 	struct nvkm_engine engine;
-	u32 addr;
+	struct nvkm_falcon falcon;
 
-	struct nvkm_falcon *falcon;
-	struct nvkm_msgqueue *queue;
+	struct nvkm_falcon_qmgr *qmgr;
+	struct nvkm_falcon_cmdq *cmdq;
+	struct nvkm_falcon_msgq *msgq;
+
 	struct work_struct work;
+	bool initmsg_received;
 };
 
 int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
 int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
new file mode 100644
index 0000000..5d9c3a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_ACR_H__
+#define __NVKM_ACR_H__
+#define nvkm_acr(p) container_of((p), struct nvkm_acr, subdev)
+#include <core/subdev.h>
+#include <core/falcon.h>
+
+enum nvkm_acr_lsf_id {
+	NVKM_ACR_LSF_PMU = 0,
+	NVKM_ACR_LSF_GSPLITE = 1,
+	NVKM_ACR_LSF_FECS = 2,
+	NVKM_ACR_LSF_GPCCS = 3,
+	NVKM_ACR_LSF_NVDEC = 4,
+	NVKM_ACR_LSF_SEC2 = 7,
+	NVKM_ACR_LSF_MINION = 10,
+	NVKM_ACR_LSF_NUM
+};
+
+static inline const char *
+nvkm_acr_lsf_id(enum nvkm_acr_lsf_id id)
+{
+	switch (id) {
+	case NVKM_ACR_LSF_PMU    : return "pmu";
+	case NVKM_ACR_LSF_GSPLITE: return "gsplite";
+	case NVKM_ACR_LSF_FECS   : return "fecs";
+	case NVKM_ACR_LSF_GPCCS  : return "gpccs";
+	case NVKM_ACR_LSF_NVDEC  : return "nvdec";
+	case NVKM_ACR_LSF_SEC2   : return "sec2";
+	case NVKM_ACR_LSF_MINION : return "minion";
+	default:
+		return "unknown";
+	}
+}
+
+struct nvkm_acr {
+	const struct nvkm_acr_func *func;
+	struct nvkm_subdev subdev;
+
+	struct list_head hsfw, hsf;
+	struct list_head lsfw, lsf;
+
+	struct nvkm_memory *wpr;
+	u64 wpr_start;
+	u64 wpr_end;
+	u64 shadow_start;
+
+	struct nvkm_memory *inst;
+	struct nvkm_vmm *vmm;
+
+	bool done;
+
+	const struct firmware *wpr_fw;
+	bool wpr_comp;
+	u64 wpr_prev;
+};
+
+bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id);
+int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask);
+
+int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+
+struct nvkm_acr_lsfw {
+	const struct nvkm_acr_lsf_func *func;
+	struct nvkm_falcon *falcon;
+	enum nvkm_acr_lsf_id id;
+
+	struct list_head head;
+
+	struct nvkm_blob img;
+
+	const struct firmware *sig;
+
+	u32 bootloader_size;
+	u32 bootloader_imem_offset;
+
+	u32 app_size;
+	u32 app_start_offset;
+	u32 app_imem_entry;
+	u32 app_resident_code_offset;
+	u32 app_resident_code_size;
+	u32 app_resident_data_offset;
+	u32 app_resident_data_size;
+
+	u32 ucode_size;
+	u32 data_size;
+
+	struct {
+		u32 lsb;
+		u32 img;
+		u32 bld;
+	} offset;
+	u32 bl_data_size;
+};
+
+struct nvkm_acr_lsf_func {
+/* The (currently) map directly to LSB header flags. */
+#define NVKM_ACR_LSF_LOAD_CODE_AT_0                                  0x00000001
+#define NVKM_ACR_LSF_DMACTL_REQ_CTX                                  0x00000004
+#define NVKM_ACR_LSF_FORCE_PRIV_LOAD                                 0x00000008
+	u32 flags;
+	u32 bld_size;
+	void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *);
+	void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust);
+	int (*boot)(struct nvkm_falcon *);
+	int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
+	int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask);
+};
+
+int
+nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *, struct nvkm_falcon *,
+				  enum nvkm_acr_lsf_id, const char *path,
+				  int ver, const struct nvkm_acr_lsf_func *);
+int
+nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *, struct nvkm_falcon *,
+				     enum nvkm_acr_lsf_id, const char *path,
+				     int ver, const struct nvkm_acr_lsf_func *);
+int
+nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *, struct nvkm_falcon *,
+				    enum nvkm_acr_lsf_id, const char *path,
+				    int ver, const struct nvkm_acr_lsf_func *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 97322f9..a513c16 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -31,6 +31,7 @@ struct nvkm_fault_data {
 };
 
 int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 239ad22..34b56b1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -33,6 +33,8 @@ struct nvkm_fb {
 	const struct nvkm_fb_func *func;
 	struct nvkm_subdev subdev;
 
+	struct nvkm_blob vpr_scrubber;
+
 	struct nvkm_ram *ram;
 	struct nvkm_mm tags;
 
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 4c672a5..06db676 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -2,12 +2,11 @@
 #define __NVKM_GSP_H__
 #define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
 #include <core/subdev.h>
+#include <core/falcon.h>
 
 struct nvkm_gsp {
 	struct nvkm_subdev subdev;
-	u32 addr;
-
-	struct nvkm_falcon *falcon;
+	struct nvkm_falcon falcon;
 };
 
 int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 644d527..d76f60d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -40,4 +40,5 @@ int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 4752006..da55308 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -2,13 +2,20 @@
 #ifndef __NVKM_PMU_H__
 #define __NVKM_PMU_H__
 #include <core/subdev.h>
-#include <engine/falcon.h>
+#include <core/falcon.h>
 
 struct nvkm_pmu {
 	const struct nvkm_pmu_func *func;
 	struct nvkm_subdev subdev;
-	struct nvkm_falcon *falcon;
-	struct nvkm_msgqueue *queue;
+	struct nvkm_falcon falcon;
+
+	struct nvkm_falcon_qmgr *qmgr;
+	struct nvkm_falcon_cmdq *hpq;
+	struct nvkm_falcon_cmdq *lpq;
+	struct nvkm_falcon_msgq *msgq;
+	bool initmsg_received;
+
+	struct completion wpr_ready;
 
 	struct {
 		u32 base;
@@ -43,6 +50,7 @@ int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 
 /* interface to MEMX process running on PMU */
 struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f8015e0..1b62ccc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1162,7 +1162,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
 void
 nouveau_bo_move_init(struct nouveau_drm *drm)
 {
-	static const struct {
+	static const struct _method_table {
 		const char *name;
 		int engine;
 		s32 oclass;
@@ -1192,7 +1192,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
 		{},
 		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
-	}, *mthd = _methods;
+	};
+	const struct _method_table *mthd = _methods;
 	const char *name = "CPU";
 	int ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 282fd90..d9381a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -55,6 +55,8 @@ nouveau_channel_killed(struct nvif_notify *ntfy)
 	struct nouveau_cli *cli = (void *)chan->user.client;
 	NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
 	atomic_set(&chan->killed, 1);
+	if (chan->fence)
+		nouveau_fence_context_kill(chan->fence, -ENODEV);
 	return NVIF_NOTIFY_DROP;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index fa14399..0ad5d87 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -635,10 +635,10 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
 	unsigned long c, i;
 	int ret = -ENOMEM;
 
-	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
+	args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
 	if (!args.src)
 		goto out;
-	args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
+	args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
 	if (!args.dst)
 		goto out_free_src;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2cd8384..b65ae81 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -715,7 +715,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
 void
 nouveau_drm_device_remove(struct drm_device *dev)
 {
-	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_client *client;
 	struct nvkm_device *device;
@@ -727,7 +726,6 @@ nouveau_drm_device_remove(struct drm_device *dev)
 	device = nvkm_device_find(client->device);
 
 	nouveau_drm_device_fini(dev);
-	pci_disable_device(pdev);
 	drm_dev_put(dev);
 	nvkm_device_del(&device);
 }
@@ -738,6 +736,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
 	struct drm_device *dev = pci_get_drvdata(pdev);
 
 	nouveau_drm_device_remove(dev);
+	pci_disable_device(pdev);
 }
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 70f34ca..c2c332f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -58,6 +58,8 @@
 #include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_page_alloc.h>
 
+#include <drm/drm_audio_component.h>
+
 #include "uapi/drm/nouveau_drm.h"
 
 struct nouveau_channel;
@@ -211,6 +213,11 @@ struct nouveau_drm {
 	struct nouveau_svm *svm;
 
 	struct nouveau_dmem *dmem;
+
+	struct {
+		struct drm_audio_component *component;
+		bool component_registered;
+	} audio;
 };
 
 static inline struct nouveau_drm *
@@ -248,11 +255,11 @@ void nouveau_drm_device_remove(struct drm_device *dev);
 #define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
 
 #define NV_DEBUG(drm,f,a...) do {                                              \
-	if (unlikely(drm_debug & DRM_UT_DRIVER))                               \
+	if (drm_debug_enabled(DRM_UT_DRIVER))                                  \
 		NV_PRINTK(info, &(drm)->client, f, ##a);                       \
 } while(0)
 #define NV_ATOMIC(drm,f,a...) do {                                             \
-	if (unlikely(drm_debug & DRM_UT_ATOMIC))                               \
+	if (drm_debug_enabled(DRM_UT_ATOMIC))                                  \
 		NV_PRINTK(info, &(drm)->client, f, ##a);                       \
 } while(0)
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f439f0a..0c5cdda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -203,7 +203,7 @@ nouveau_fbcon_release(struct fb_info *info, int user)
 	return 0;
 }
 
-static struct fb_ops nouveau_fbcon_ops = {
+static const struct fb_ops nouveau_fbcon_ops = {
 	.owner = THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_open = nouveau_fbcon_open,
@@ -214,7 +214,7 @@ static struct fb_ops nouveau_fbcon_ops = {
 	.fb_sync = nouveau_fbcon_sync,
 };
 
-static struct fb_ops nouveau_fbcon_sw_ops = {
+static const struct fb_ops nouveau_fbcon_sw_ops = {
 	.owner = THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_open = nouveau_fbcon_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 9118df0..666f209 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -87,7 +87,7 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
 }
 
 void
-nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
 {
 	struct nouveau_fence *fence;
 
@@ -95,11 +95,19 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 	while (!list_empty(&fctx->pending)) {
 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
 
+		if (error)
+			dma_fence_set_error(&fence->base, error);
+
 		if (nouveau_fence_signal(fence))
 			nvif_notify_put(&fctx->notify);
 	}
 	spin_unlock_irq(&fctx->lock);
+}
 
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+{
+	nouveau_fence_context_kill(fctx, 0);
 	nvif_notify_fini(&fctx->notify);
 	fctx->dead = 1;
 
@@ -156,7 +164,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
 
 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
 		chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
-		if (nouveau_fence_update(fence->channel, fctx))
+		if (nouveau_fence_update(chan, fctx))
 			ret = NVIF_NOTIFY_DROP;
 	}
 	spin_unlock_irqrestore(&fctx->lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c9e24ba..4887caa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -63,6 +63,7 @@ struct nouveau_fence_priv {
 void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *);
 void nouveau_fence_context_del(struct nouveau_fence_chan *);
 void nouveau_fence_context_free(struct nouveau_fence_chan *);
+void nouveau_fence_context_kill(struct nouveau_fence_chan *, int error);
 
 int nv04_fence_create(struct nouveau_drm *);
 int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1324c19..f5ece1f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -484,12 +484,9 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 
 static int
 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
-	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
-	      uint64_t user_pbbo_ptr)
+	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
 {
 	struct nouveau_drm *drm = chan->drm;
-	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
-				(void __force __user *)(uintptr_t)user_pbbo_ptr;
 	struct nouveau_bo *nvbo;
 	int ret, relocs = 0;
 
@@ -533,10 +530,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
 			b->presumed.offset = nvbo->bo.offset;
 			b->presumed.valid = 0;
 			relocs++;
-
-			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
-					     &b->presumed, sizeof(b->presumed)))
-				return -EFAULT;
 		}
 	}
 
@@ -547,8 +540,8 @@ static int
 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 			     struct drm_file *file_priv,
 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
-			     uint64_t user_buffers, int nr_buffers,
-			     struct validate_op *op, int *apply_relocs)
+			     int nr_buffers,
+			     struct validate_op *op, bool *apply_relocs)
 {
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	int ret;
@@ -565,7 +558,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 		return ret;
 	}
 
-	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
+	ret = validate_list(chan, cli, &op->list, pbbo);
 	if (unlikely(ret < 0)) {
 		if (ret != -ERESTARTSYS)
 			NV_PRINTK(err, cli, "validating bo list\n");
@@ -605,16 +598,12 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
 static int
 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 				struct drm_nouveau_gem_pushbuf *req,
+				struct drm_nouveau_gem_pushbuf_reloc *reloc,
 				struct drm_nouveau_gem_pushbuf_bo *bo)
 {
-	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 	int ret = 0;
 	unsigned i;
 
-	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
-	if (IS_ERR(reloc))
-		return PTR_ERR(reloc);
-
 	for (i = 0; i < req->nr_relocs; i++) {
 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 		struct drm_nouveau_gem_pushbuf_bo *b;
@@ -693,11 +682,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_nouveau_gem_pushbuf *req = data;
 	struct drm_nouveau_gem_pushbuf_push *push;
+	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 	struct drm_nouveau_gem_pushbuf_bo *bo;
 	struct nouveau_channel *chan = NULL;
 	struct validate_op op;
 	struct nouveau_fence *fence = NULL;
-	int i, j, ret = 0, do_reloc = 0;
+	int i, j, ret = 0;
+	bool do_reloc = false, sync = false;
 
 	if (unlikely(!abi16))
 		return -ENOMEM;
@@ -711,6 +702,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 
 	if (!chan)
 		return nouveau_abi16_put(abi16, -ENOENT);
+	if (unlikely(atomic_read(&chan->killed)))
+		return nouveau_abi16_put(abi16, -ENODEV);
+
+	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
 
 	req->vram_available = drm->gem.vram_available;
 	req->gart_available = drm->gem.gart_available;
@@ -755,7 +750,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	}
 
 	/* Validate buffer list */
-	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+revalidate:
+	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
 					   req->nr_buffers, &op, &do_reloc);
 	if (ret) {
 		if (ret != -ERESTARTSYS)
@@ -765,7 +761,18 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 
 	/* Apply any relocations that are required */
 	if (do_reloc) {
-		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
+		if (!reloc) {
+			validate_fini(&op, chan, NULL, bo);
+			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
+			if (IS_ERR(reloc)) {
+				ret = PTR_ERR(reloc);
+				goto out_prevalid;
+			}
+
+			goto revalidate;
+		}
+
+		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
 		if (ret) {
 			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 			goto out;
@@ -847,10 +854,33 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 		goto out;
 	}
 
+	if (sync) {
+		if (!(ret = nouveau_fence_wait(fence, false, false))) {
+			if ((ret = dma_fence_get_status(&fence->base)) == 1)
+				ret = 0;
+		}
+	}
+
 out:
 	validate_fini(&op, chan, fence, bo);
 	nouveau_fence_unref(&fence);
 
+	if (do_reloc) {
+		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
+			u64_to_user_ptr(req->buffers);
+
+		for (i = 0; i < req->nr_buffers; i++) {
+			if (bo[i].presumed.valid)
+				continue;
+
+			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
+					 sizeof(bo[i].presumed))) {
+				ret = -EFAULT;
+				break;
+			}
+		}
+		u_free(reloc);
+	}
 out_prevalid:
 	u_free(bo);
 	u_free(push);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index d445c6f3..1c3104d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -741,7 +741,7 @@ nouveau_hwmon_init(struct drm_device *dev)
 			special_groups[i++] = &pwm_fan_sensor_group;
 	}
 
-	special_groups[i] = 0;
+	special_groups[i] = NULL;
 	hwmon_dev = hwmon_device_register_with_info(dev->dev, "nouveau", dev,
 							&nouveau_chip_info,
 							special_groups);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 77a0c6a..7ca0a24 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
-	struct nouveau_mem *mem;
 	int ret;
 
 	if (drm->client.device.info.ram_size == 0)
 		return -ENOMEM;
 
 	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
-	mem = nouveau_mem(reg);
 	if (ret)
 		return ret;
 
@@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
-	struct nouveau_mem *mem;
 	int ret;
 
 	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
-	mem = nouveau_mem(reg);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 7706118..b28c7dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -69,8 +69,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
 		}
 		list_del(&vma->head);
 		kfree(*pvma);
-		*pvma = NULL;
 	}
+	*pvma = NULL;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index 5641bda..47efc40 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -121,6 +121,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
 				       kind, argc);
 		if (ret == 0)
 			memcpy(mmu->kind, kind->data, kind->count);
+		mmu->kind_inv = kind->kind_inv;
 		kfree(kind);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild
index b53de9b..db3ade1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: MIT
 include $(src)/nvkm/core/Kbuild
+include $(src)/nvkm/nvfw/Kbuild
 include $(src)/nvkm/falcon/Kbuild
 include $(src)/nvkm/subdev/Kbuild
 include $(src)/nvkm/engine/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 092acde..8b25367 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -22,6 +22,40 @@
 #include <core/device.h>
 #include <core/firmware.h>
 
+int
+nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base,
+			const char *name, int ver, const struct firmware **pfw)
+{
+	char path[64];
+	int ret;
+
+	snprintf(path, sizeof(path), "%s%s", base, name);
+	ret = nvkm_firmware_get(subdev, path, ver, pfw);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+int
+nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base,
+			const char *name, int ver, struct nvkm_blob *blob)
+{
+	const struct firmware *fw;
+	int ret;
+
+	ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw);
+	if (ret == 0) {
+		blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+		blob->size = fw->size;
+		nvkm_firmware_put(fw);
+		if (!blob->data)
+			return -ENOMEM;
+	}
+
+	return ret;
+}
+
 /**
  * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
  * @subdev	subdevice that will use that firmware
@@ -32,9 +66,8 @@
  * Firmware files released by NVIDIA will always follow this format.
  */
 int
-nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
-			  int min_version, int max_version,
-			  const struct firmware **fw)
+nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver,
+		  const struct firmware **fw)
 {
 	struct nvkm_device *device = subdev->device;
 	char f[64];
@@ -50,31 +83,21 @@ nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
 		cname[i] = tolower(cname[i]);
 	}
 
-	for (i = max_version; i >= min_version; i--) {
-		if (i != 0)
-			snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
-		else
-			snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+	if (ver != 0)
+		snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver);
+	else
+		snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
 
-		if (!firmware_request_nowarn(fw, f, device->dev)) {
-			nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
-			return i;
-		}
-
-		nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
+	if (!firmware_request_nowarn(fw, f, device->dev)) {
+		nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n",
+			   f, (*fw)->size);
+		return 0;
 	}
 
-	nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
+	nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
 	return -ENOENT;
 }
 
-int
-nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
-		  const struct firmware **fw)
-{
-	return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
-}
-
 /**
  * nvkm_firmware_put - release firmware loaded with nvkm_firmware_get
  */
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index e85a08e..4cc1862 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -91,8 +91,8 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
 	}
 
 	refcount_set(&tags->refcount, 1);
+	*ptags = memory->tags = tags;
 	mutex_unlock(&fb->subdev.mutex);
-	*ptags = tags;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 245990d..79a8f9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -30,6 +30,7 @@ static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
 
 const char *
 nvkm_subdev_name[NVKM_SUBDEV_NR] = {
+	[NVKM_SUBDEV_ACR     ] = "acr",
 	[NVKM_SUBDEV_BAR     ] = "bar",
 	[NVKM_SUBDEV_VBIOS   ] = "bios",
 	[NVKM_SUBDEV_BUS     ] = "bus",
@@ -50,7 +51,6 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
 	[NVKM_SUBDEV_MXM     ] = "mxm",
 	[NVKM_SUBDEV_PCI     ] = "pci",
 	[NVKM_SUBDEV_PMU     ] = "pmu",
-	[NVKM_SUBDEV_SECBOOT ] = "secboot",
 	[NVKM_SUBDEV_THERM   ] = "therm",
 	[NVKM_SUBDEV_TIMER   ] = "tmr",
 	[NVKM_SUBDEV_TOP     ] = "top",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index c3c7159..c7d700916 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1987,6 +1987,8 @@ nv117_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gm107_fifo_new,
 	.gr = gm107_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
 	.sw = gf100_sw_new,
 };
 
@@ -2027,6 +2029,7 @@ nv118_chipset = {
 static const struct nvkm_device_chip
 nv120_chipset = {
 	.name = "GM200",
+	.acr = gm200_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2045,7 +2048,6 @@ nv120_chipset = {
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
 	.therm = gm200_therm_new,
-	.secboot = gm200_secboot_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 	.volt = gk104_volt_new,
@@ -2056,12 +2058,16 @@ nv120_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gm200_fifo_new,
 	.gr = gm200_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
+	.nvenc[1] = gm107_nvenc_new,
 	.sw = gf100_sw_new,
 };
 
 static const struct nvkm_device_chip
 nv124_chipset = {
 	.name = "GM204",
+	.acr = gm200_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2080,7 +2086,6 @@ nv124_chipset = {
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
 	.therm = gm200_therm_new,
-	.secboot = gm200_secboot_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 	.volt = gk104_volt_new,
@@ -2091,12 +2096,16 @@ nv124_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gm200_fifo_new,
 	.gr = gm200_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
+	.nvenc[1] = gm107_nvenc_new,
 	.sw = gf100_sw_new,
 };
 
 static const struct nvkm_device_chip
 nv126_chipset = {
 	.name = "GM206",
+	.acr = gm200_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2115,7 +2124,6 @@ nv126_chipset = {
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
 	.therm = gm200_therm_new,
-	.secboot = gm200_secboot_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 	.volt = gk104_volt_new,
@@ -2126,12 +2134,15 @@ nv126_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gm200_fifo_new,
 	.gr = gm200_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
 	.sw = gf100_sw_new,
 };
 
 static const struct nvkm_device_chip
 nv12b_chipset = {
 	.name = "GM20B",
+	.acr = gm20b_acr_new,
 	.bar = gm20b_bar_new,
 	.bus = gf100_bus_new,
 	.clk = gm20b_clk_new,
@@ -2143,7 +2154,6 @@ nv12b_chipset = {
 	.mc = gk20a_mc_new,
 	.mmu = gm20b_mmu_new,
 	.pmu = gm20b_pmu_new,
-	.secboot = gm20b_secboot_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 	.ce[2] = gm200_ce_new,
@@ -2157,6 +2167,7 @@ nv12b_chipset = {
 static const struct nvkm_device_chip
 nv130_chipset = {
 	.name = "GP100",
+	.acr = gm200_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2172,7 +2183,6 @@ nv130_chipset = {
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
-	.secboot = gm200_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp100_pmu_new,
 	.timer = gk20a_timer_new,
@@ -2187,12 +2197,17 @@ nv130_chipset = {
 	.disp = gp100_disp_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp100_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
+	.nvenc[1] = gm107_nvenc_new,
+	.nvenc[2] = gm107_nvenc_new,
 	.sw = gf100_sw_new,
 };
 
 static const struct nvkm_device_chip
 nv132_chipset = {
 	.name = "GP102",
+	.acr = gp102_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2208,7 +2223,6 @@ nv132_chipset = {
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
-	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.timer = gk20a_timer_new,
@@ -2221,7 +2235,9 @@ nv132_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp102_gr_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
+	.nvenc[1] = gm107_nvenc_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2229,6 +2245,7 @@ nv132_chipset = {
 static const struct nvkm_device_chip
 nv134_chipset = {
 	.name = "GP104",
+	.acr = gp102_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2244,7 +2261,6 @@ nv134_chipset = {
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
-	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.timer = gk20a_timer_new,
@@ -2257,7 +2273,9 @@ nv134_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp104_gr_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
+	.nvenc[1] = gm107_nvenc_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2265,6 +2283,7 @@ nv134_chipset = {
 static const struct nvkm_device_chip
 nv136_chipset = {
 	.name = "GP106",
+	.acr = gp102_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2280,7 +2299,6 @@ nv136_chipset = {
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
-	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.timer = gk20a_timer_new,
@@ -2293,7 +2311,8 @@ nv136_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp104_gr_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2301,6 +2320,7 @@ nv136_chipset = {
 static const struct nvkm_device_chip
 nv137_chipset = {
 	.name = "GP107",
+	.acr = gp102_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2316,7 +2336,6 @@ nv137_chipset = {
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
-	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.timer = gk20a_timer_new,
@@ -2329,7 +2348,9 @@ nv137_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp107_gr_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
+	.nvenc[1] = gm107_nvenc_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2337,6 +2358,7 @@ nv137_chipset = {
 static const struct nvkm_device_chip
 nv138_chipset = {
 	.name = "GP108",
+	.acr = gp108_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2352,7 +2374,6 @@ nv138_chipset = {
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
-	.secboot = gp108_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.timer = gk20a_timer_new,
@@ -2364,30 +2385,30 @@ nv138_chipset = {
 	.disp = gp102_disp_new,
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
-	.gr = gp107_gr_new,
-	.nvdec[0] = gp102_nvdec_new,
-	.sec2 = gp102_sec2_new,
+	.gr = gp108_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.sec2 = gp108_sec2_new,
 	.sw = gf100_sw_new,
 };
 
 static const struct nvkm_device_chip
 nv13b_chipset = {
 	.name = "GP10B",
+	.acr = gp10b_acr_new,
 	.bar = gm20b_bar_new,
 	.bus = gf100_bus_new,
-	.fault = gp100_fault_new,
+	.fault = gp10b_fault_new,
 	.fb = gp10b_fb_new,
 	.fuse = gm107_fuse_new,
 	.ibus = gp10b_ibus_new,
 	.imem = gk20a_instmem_new,
-	.ltc = gp102_ltc_new,
+	.ltc = gp10b_ltc_new,
 	.mc = gp10b_mc_new,
 	.mmu = gp10b_mmu_new,
-	.secboot = gp10b_secboot_new,
-	.pmu = gm20b_pmu_new,
+	.pmu = gp10b_pmu_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
-	.ce[2] = gp102_ce_new,
+	.ce[0] = gp100_ce_new,
 	.dma = gf119_dma_new,
 	.fifo = gp10b_fifo_new,
 	.gr = gp10b_gr_new,
@@ -2397,6 +2418,7 @@ nv13b_chipset = {
 static const struct nvkm_device_chip
 nv140_chipset = {
 	.name = "GV100",
+	.acr = gp108_acr_new,
 	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2414,7 +2436,6 @@ nv140_chipset = {
 	.mmu = gv100_mmu_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
-	.secboot = gp108_secboot_new,
 	.therm = gp100_therm_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
@@ -2431,13 +2452,17 @@ nv140_chipset = {
 	.dma = gv100_dma_new,
 	.fifo = gv100_fifo_new,
 	.gr = gv100_gr_new,
-	.nvdec[0] = gp102_nvdec_new,
-	.sec2 = gp102_sec2_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
+	.nvenc[1] = gm107_nvenc_new,
+	.nvenc[2] = gm107_nvenc_new,
+	.sec2 = gp108_sec2_new,
 };
 
 static const struct nvkm_device_chip
 nv162_chipset = {
 	.name = "TU102",
+	.acr = tu102_acr_new,
 	.bar = tu102_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2466,13 +2491,16 @@ nv162_chipset = {
 	.disp = tu102_disp_new,
 	.dma = gv100_dma_new,
 	.fifo = tu102_fifo_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.gr = tu102_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
 	.sec2 = tu102_sec2_new,
 };
 
 static const struct nvkm_device_chip
 nv164_chipset = {
 	.name = "TU104",
+	.acr = tu102_acr_new,
 	.bar = tu102_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2501,13 +2529,17 @@ nv164_chipset = {
 	.disp = tu102_disp_new,
 	.dma = gv100_dma_new,
 	.fifo = tu102_fifo_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.gr = tu102_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvdec[1] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
 	.sec2 = tu102_sec2_new,
 };
 
 static const struct nvkm_device_chip
 nv166_chipset = {
 	.name = "TU106",
+	.acr = tu102_acr_new,
 	.bar = tu102_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
@@ -2536,7 +2568,11 @@ nv166_chipset = {
 	.disp = tu102_disp_new,
 	.dma = gv100_dma_new,
 	.fifo = tu102_fifo_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.gr = tu102_gr_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvdec[1] = gm107_nvdec_new,
+	.nvdec[2] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
 	.sec2 = tu102_sec2_new,
 };
 
@@ -2571,7 +2607,8 @@ nv167_chipset = {
 	.disp = tu102_disp_new,
 	.dma = gv100_dma_new,
 	.fifo = tu102_fifo_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
 	.sec2 = tu102_sec2_new,
 };
 
@@ -2606,7 +2643,8 @@ nv168_chipset = {
 	.disp = tu102_disp_new,
 	.dma = gv100_dma_new,
 	.fifo = tu102_fifo_new,
-	.nvdec[0] = gp102_nvdec_new,
+	.nvdec[0] = gm107_nvdec_new,
+	.nvenc[0] = gm107_nvenc_new,
 	.sec2 = tu102_sec2_new,
 };
 
@@ -2638,6 +2676,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
 
 	switch (index) {
 #define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
+	_(ACR     , device->acr     , &device->acr->subdev);
 	_(BAR     , device->bar     , &device->bar->subdev);
 	_(VBIOS   , device->bios    , &device->bios->subdev);
 	_(BUS     , device->bus     , &device->bus->subdev);
@@ -2658,7 +2697,6 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
 	_(MXM     , device->mxm     ,  device->mxm);
 	_(PCI     , device->pci     , &device->pci->subdev);
 	_(PMU     , device->pmu     , &device->pmu->subdev);
-	_(SECBOOT , device->secboot , &device->secboot->subdev);
 	_(THERM   , device->therm   , &device->therm->subdev);
 	_(TIMER   , device->timer   , &device->timer->subdev);
 	_(TOP     , device->top     , &device->top->subdev);
@@ -2703,9 +2741,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
 	_(MSPDEC , device->mspdec  ,  device->mspdec);
 	_(MSPPP  , device->msppp   ,  device->msppp);
 	_(MSVLD  , device->msvld   ,  device->msvld);
-	_(NVENC0 , device->nvenc[0],  device->nvenc[0]);
-	_(NVENC1 , device->nvenc[1],  device->nvenc[1]);
-	_(NVENC2 , device->nvenc[2],  device->nvenc[2]);
+	_(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine);
+	_(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine);
+	_(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine);
 	_(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
 	_(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
 	_(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
@@ -3144,6 +3182,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 	}                                                                      \
 	break
 		switch (i) {
+		_(NVKM_SUBDEV_ACR     ,      acr);
 		_(NVKM_SUBDEV_BAR     ,      bar);
 		_(NVKM_SUBDEV_VBIOS   ,     bios);
 		_(NVKM_SUBDEV_BUS     ,      bus);
@@ -3164,7 +3203,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 		_(NVKM_SUBDEV_MXM     ,      mxm);
 		_(NVKM_SUBDEV_PCI     ,      pci);
 		_(NVKM_SUBDEV_PMU     ,      pmu);
-		_(NVKM_SUBDEV_SECBOOT ,  secboot);
 		_(NVKM_SUBDEV_THERM   ,    therm);
 		_(NVKM_SUBDEV_TIMER   ,    timer);
 		_(NVKM_SUBDEV_TOP     ,      top);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index d8be2f7..54eab5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -3,6 +3,7 @@
 #define __NVKM_DEVICE_PRIV_H__
 #include <core/device.h>
 
+#include <subdev/acr.h>
 #include <subdev/bar.h>
 #include <subdev/bios.h>
 #include <subdev/bus.h>
@@ -27,7 +28,6 @@
 #include <subdev/timer.h>
 #include <subdev/top.h>
 #include <subdev/volt.h>
-#include <subdev/secboot.h>
 
 #include <engine/bsp.h>
 #include <engine/ce.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 0e372a1..d0d52c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -52,18 +52,18 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
 	clk_set_rate(tdev->clk_pwr, 204000000);
 	udelay(10);
 
-	reset_control_assert(tdev->rst);
-	udelay(10);
-
 	if (!tdev->pdev->dev.pm_domain) {
+		reset_control_assert(tdev->rst);
+		udelay(10);
+
 		ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
 		if (ret)
 			goto err_clamp;
 		udelay(10);
-	}
 
-	reset_control_deassert(tdev->rst);
-	udelay(10);
+		reset_control_deassert(tdev->rst);
+		udelay(10);
+	}
 
 	return 0;
 
@@ -279,6 +279,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 		      struct nvkm_device **pdevice)
 {
 	struct nvkm_device_tegra *tdev;
+	unsigned long rate;
 	int ret;
 
 	if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
@@ -307,6 +308,17 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 		goto free;
 	}
 
+	rate = clk_get_rate(tdev->clk);
+	if (rate == 0) {
+		ret = clk_set_rate(tdev->clk, ULONG_MAX);
+		if (ret < 0)
+			goto free;
+
+		rate = clk_get_rate(tdev->clk);
+
+		dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate);
+	}
+
 	if (func->require_ref_clk)
 		tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
 	if (IS_ERR(tdev->clk_ref)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index bcf32d9..50e3539 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -74,6 +74,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
 
 	if (debug > subdev->debug)
 		return;
+	if (!mthd)
+		return;
 
 	for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
 		u32 base = chan->head * mthd->addr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 818d21b..3800aeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,7 +365,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
 	 * and it's better to have a failed modeset than that.
 	 */
 	for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
-		if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
+		if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) {
 			/* Try to respect sink limits too when selecting
 			 * lowest link configuration.
 			 */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index 892be6c..c103252 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -101,15 +101,26 @@ gv100_disp_exception(struct nv50_disp *disp, int chid)
 	u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
 	u32 type = (stat & 0x00007000) >> 12;
 	u32 mthd = (stat & 0x00000fff) << 2;
-	u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
-	u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
 	const struct nvkm_enum *reason =
 		nvkm_enum_find(nv50_disp_intr_error_type, type);
 
-	nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
-			   "data %08x code %08x\n",
-		   chid, stat, type, reason ? reason->name : "",
-		   mthd, data, code);
+	/*TODO: Suspect 33->41 are for WRBK channel exceptions, but we
+	 *      don't support those currently.
+	 *
+	 *      CORE+WIN CHIDs map directly to the FE_EXCEPT() slots.
+	 */
+	if (chid <= 32) {
+		u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
+		u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
+		nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
+				   "mthd %04x data %08x code %08x\n",
+			   chid, stat, type, reason ? reason->name : "",
+			   mthd, data, code);
+	} else {
+		nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
+				   "mthd %04x\n",
+			   chid, stat, type, reason ? reason->name : "", mthd);
+	}
 
 	if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
 		switch (mthd) {
@@ -144,6 +155,12 @@ gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
 	if (stat & 0x00000008)
 		stat &= ~0x00000008;
 
+	if (stat & 0x00000080) {
+		u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000);
+		nvkm_warn(subdev, "error %08x\n", error);
+		stat &= ~0x00000080;
+	}
+
 	if (stat & 0x00000100) {
 		unsigned long wndws = nvkm_rd32(device, 0x611858);
 		unsigned long other = nvkm_rd32(device, 0x61185c);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 73724a8..558c86f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -36,8 +36,10 @@
 nvkm-y += nvkm/engine/gr/gp102.o
 nvkm-y += nvkm/engine/gr/gp104.o
 nvkm-y += nvkm/engine/gr/gp107.o
+nvkm-y += nvkm/engine/gr/gp108.o
 nvkm-y += nvkm/engine/gr/gp10b.o
 nvkm-y += nvkm/engine/gr/gv100.o
+nvkm-y += nvkm/engine/gr/tu102.o
 
 nvkm-y += nvkm/engine/gr/ctxnv40.o
 nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -60,3 +62,4 @@
 nvkm-y += nvkm/engine/gr/ctxgp104.o
 nvkm-y += nvkm/engine/gr/ctxgp107.o
 nvkm-y += nvkm/engine/gr/ctxgv100.o
+nvkm-y += nvkm/engine/gr/ctxtu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 85f2d1e..2979157 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1324,10 +1324,8 @@ gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
 void
 gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 {
-	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *func = gr->func->grctx;
-	int gpc, sm, i, j;
-	u32 data;
+	int sm;
 
 	for (sm = 0; sm < gr->sm_nr; sm++) {
 		func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm);
@@ -1335,12 +1333,9 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 			func->tpc_nr(gr, gr->sm[sm].gpc);
 	}
 
-	for (gpc = 0, i = 0; i < 4; i++) {
-		for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
-			data |= gr->tpc_nr[gpc] << (j * 4);
-		nvkm_wr32(device, 0x406028 + (i * 4), data);
-		nvkm_wr32(device, 0x405870 + (i * 4), data);
-	}
+	gf100_gr_init_num_tpc_per_gpc(gr, false, true);
+	if (!func->skip_pd_num_tpc_per_gpc)
+		gf100_gr_init_num_tpc_per_gpc(gr, true, false);
 
 	if (func->r4060a8)
 		func->r4060a8(gr);
@@ -1374,7 +1369,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	nvkm_mc_unk260(device, 0);
 
-	if (!gr->fuc_sw_ctx) {
+	if (!gr->sw_ctx) {
 		gf100_gr_mmio(gr, grctx->hub);
 		gf100_gr_mmio(gr, grctx->gpc_0);
 		gf100_gr_mmio(gr, grctx->zcull);
@@ -1382,7 +1377,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		gf100_gr_mmio(gr, grctx->tpc);
 		gf100_gr_mmio(gr, grctx->ppc);
 	} else {
-		gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+		gf100_gr_mmio(gr, gr->sw_ctx);
 	}
 
 	gf100_gr_wait_idle(gr);
@@ -1401,8 +1396,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	gf100_gr_wait_idle(gr);
 
 	if (grctx->r400088) grctx->r400088(gr, false);
-	if (gr->fuc_bundle)
-		gf100_gr_icmd(gr, gr->fuc_bundle);
+	if (gr->bundle)
+		gf100_gr_icmd(gr, gr->bundle);
 	else
 		gf100_gr_icmd(gr, grctx->icmd);
 	if (grctx->sw_veid_bundle_init)
@@ -1411,8 +1406,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	nvkm_wr32(device, 0x404154, idle_timeout);
 
-	if (gr->fuc_method)
-		gf100_gr_mthd(gr, gr->fuc_method);
+	if (gr->method)
+		gf100_gr_mthd(gr, gr->method);
 	else
 		gf100_gr_mthd(gr, grctx->mthd);
 	nvkm_mc_unk260(device, 1);
@@ -1431,6 +1426,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		grctx->r419a3c(gr);
 	if (grctx->r408840)
 		grctx->r408840(gr);
+	if (grctx->r419c0c)
+		grctx->r419c0c(gr);
 }
 
 #define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 478b472..32bbddc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -57,6 +57,7 @@ struct gf100_grctx_func {
 	/* floorsweeping */
 	void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm);
 	void (*tpc_nr)(struct gf100_gr *, int gpc);
+	bool skip_pd_num_tpc_per_gpc;
 	void (*r4060a8)(struct gf100_gr *);
 	void (*rop_mapping)(struct gf100_gr *);
 	void (*alpha_beta_tables)(struct gf100_gr *);
@@ -76,6 +77,7 @@ struct gf100_grctx_func {
 	void (*r418e94)(struct gf100_gr *);
 	void (*r419a3c)(struct gf100_gr *);
 	void (*r408840)(struct gf100_gr *);
+	void (*r419c0c)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -153,6 +155,14 @@ extern const struct gf100_grctx_func gp107_grctx;
 
 extern const struct gf100_grctx_func gv100_grctx;
 
+extern const struct gf100_grctx_func tu102_grctx;
+void gv100_grctx_unkn88c(struct gf100_gr *, bool);
+void gv100_grctx_generate_unkn(struct gf100_gr *);
+extern const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[];
+void gv100_grctx_generate_attrib(struct gf100_grctx *);
+void gv100_grctx_generate_rop_mapping(struct gf100_gr *);
+void gv100_grctx_generate_r400088(struct gf100_gr *, bool);
+
 /* context init value lists */
 
 extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 896d473..c0d36bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -32,7 +32,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	u32 idle_timeout;
 	int i;
 
-	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+	gf100_gr_mmio(gr, gr->sw_ctx);
 
 	gf100_gr_wait_idle(gr);
 
@@ -56,10 +56,10 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_wait_idle(gr);
 
-	gf100_gr_mthd(gr, gr->fuc_method);
+	gf100_gr_mthd(gr, gr->method);
 	gf100_gr_wait_idle(gr);
 
-	gf100_gr_icmd(gr, gr->fuc_bundle);
+	gf100_gr_icmd(gr, gr->bundle);
 	grctx->pagepool(info);
 	grctx->bundle(info);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index a1d9e11..6b92f8a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -29,7 +29,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	u32 idle_timeout;
 	int i, tmp;
 
-	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+	gf100_gr_mmio(gr, gr->sw_ctx);
 
 	gf100_gr_wait_idle(gr);
 
@@ -59,10 +59,10 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_wait_idle(gr);
 
-	gf100_gr_mthd(gr, gr->fuc_method);
+	gf100_gr_mthd(gr, gr->method);
 	gf100_gr_wait_idle(gr);
 
-	gf100_gr_icmd(gr, gr->fuc_bundle);
+	gf100_gr_icmd(gr, gr->bundle);
 	grctx->pagepool(info);
 	grctx->bundle(info);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
index 0990765..39553d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
@@ -25,7 +25,7 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-static const struct gf100_gr_init
+const struct gf100_gr_init
 gv100_grctx_init_sw_veid_bundle_init_0[] = {
 	{ 0x00001000, 64, 0x00100000, 0x00000008 },
 	{ 0x00000941, 64, 0x00100000, 0x00000000 },
@@ -58,7 +58,7 @@ gv100_grctx_pack_sw_veid_bundle_init[] = {
 	{}
 };
 
-static void
+void
 gv100_grctx_generate_attrib(struct gf100_grctx *info)
 {
 	struct gf100_gr *gr = info->gr;
@@ -67,14 +67,14 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
 	const u32 attrib = grctx->attrib_nr;
 	const u32   gfxp = grctx->gfxp_nr;
 	const int s = 12;
-	const int max_batches = 0xffff;
 	u32 size = grctx->alpha_nr_max * gr->tpc_total;
 	u32 ao = 0;
 	u32 bo = ao + size;
 	int gpc, ppc, b, n = 0;
 
-	size += grctx->gfxp_nr * gr->tpc_total;
-	size = ((size * 0x20) + 128) & ~127;
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+		size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max;
+	size = ((size * 0x20) + 127) & ~127;
 	b = mmio_vram(info, size, (1 << s), false);
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -84,13 +84,12 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
 	mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7);
 	mmio_wr32(info, 0x405830, attrib);
 	mmio_wr32(info, 0x40585c, alpha);
-	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
 			const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
-			const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
-			const u32 gs =   gfxp * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 bs = attrib * gr->ppc_tpc_max;
+			const u32 gs =   gfxp * gr->ppc_tpc_max;
 			const u32 u = 0x418ea0 + (n * 0x04);
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			if (!(gr->ppc_mask[gpc] & (1 << ppc)))
@@ -110,7 +109,7 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
 	mmio_wr32(info, 0x41befc, 0x00000100);
 }
 
-static void
+void
 gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -147,7 +146,7 @@ gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
 				     gr->screen_tile_row_offset);
 }
 
-static void
+void
 gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -163,7 +162,7 @@ gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
 	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
 }
 
-static void
+void
 gv100_grctx_generate_unkn(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -174,7 +173,7 @@ gv100_grctx_generate_unkn(struct gf100_gr *gr)
 	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
-static void
+void
 gv100_grctx_unkn88c(struct gf100_gr *gr, bool on)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
new file mode 100644
index 0000000..2299ca0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+static void
+tu102_grctx_generate_r419c0c(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419c0c, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x40584c, 0x00000008, 0x00000000);
+	nvkm_mask(device, 0x400080, 0x00000000, 0x00000000);
+}
+
+static void
+tu102_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
+}
+
+static const struct gf100_gr_init
+tu102_grctx_init_unknown_bundle_init_0[] = {
+	{ 0x00001000,  1, 0x00000001, 0x00000004 },
+	{ 0x00002020, 64, 0x00000001, 0x00000000 },
+	{ 0x0001e100,  1, 0x00000001, 0x00000001 },
+	{}
+};
+
+static const struct gf100_gr_pack
+tu102_grctx_pack_sw_veid_bundle_init[] = {
+	{ gv100_grctx_init_sw_veid_bundle_init_0 },
+	{ tu102_grctx_init_unknown_bundle_init_0 },
+	{}
+};
+
+static void
+tu102_grctx_generate_attrib(struct gf100_grctx *info)
+{
+	const u64 size = 0x80000; /*XXX: educated guess */
+	const int s = 8;
+	const int b = mmio_vram(info, size, (1 << s), true);
+
+	gv100_grctx_generate_attrib(info);
+
+	mmio_refn(info, 0x408070, 0x00000000, s, b);
+	mmio_wr32(info, 0x408074, size >> s); /*XXX: guess */
+	mmio_refn(info, 0x419034, 0x00000000, s, b);
+	mmio_wr32(info, 0x408078, 0x00000000);
+}
+
+const struct gf100_grctx_func
+tu102_grctx = {
+	.unkn88c = gv100_grctx_unkn88c,
+	.main = gf100_grctx_generate_main,
+	.unkn = gv100_grctx_generate_unkn,
+	.sw_veid_bundle_init = tu102_grctx_pack_sw_veid_bundle_init,
+	.bundle = gm107_grctx_generate_bundle,
+	.bundle_size = 0x3000,
+	.bundle_min_gpm_fifo_depth = 0x180,
+	.bundle_token_limit = 0xa80,
+	.pagepool = gp100_grctx_generate_pagepool,
+	.pagepool_size = 0x20000,
+	.attrib = tu102_grctx_generate_attrib,
+	.attrib_nr_max = 0x800,
+	.attrib_nr = 0x700,
+	.alpha_nr_max = 0xc00,
+	.alpha_nr = 0x800,
+	.gfxp_nr = 0xfa8,
+	.sm_id = tu102_grctx_generate_sm_id,
+	.skip_pd_num_tpc_per_gpc = true,
+	.rop_mapping = gv100_grctx_generate_rop_mapping,
+	.r406500 = gm200_grctx_generate_r406500,
+	.r400088 = gv100_grctx_generate_r400088,
+	.r419c0c = tu102_grctx_generate_r419c0c,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index c24f35a..ae2d5b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -441,7 +441,7 @@ static uint32_t gk208_grhub_code[] = {
 	0x020014fe,
 	0x12004002,
 	0xbd0002f6,
-	0x05c94104,
+	0x05ca4104,
 	0xbd0010fe,
 	0x07004024,
 	0xbd0002f6,
@@ -460,423 +460,423 @@ static uint32_t gk208_grhub_code[] = {
 	0x01039204,
 	0x03090080,
 	0xbd0003f6,
-	0x87044204,
-	0xf6040040,
-	0x04bd0002,
-	0x00400402,
-	0x0002f603,
-	0x31f404bd,
-	0x96048e10,
-	0x00657e40,
-	0xc7feb200,
-	0x01b590f1,
-	0x1ff4f003,
-	0x01020fb5,
-	0x041fbb01,
-	0x800112b6,
-	0xf6010300,
-	0x04bd0001,
-	0x01040080,
-	0xbd0001f6,
-	0x01004104,
-	0xac7e020f,
-	0xbb7e0006,
-	0x100f0006,
-	0x0006fd7e,
-	0x98000e98,
-	0x207e010f,
-	0x14950001,
-	0xc0008008,
-	0x0004f601,
-	0x008004bd,
-	0x04f601c1,
-	0xb704bd00,
-	0xbb130030,
-	0xf5b6001f,
-	0xd3008002,
-	0x000ff601,
-	0x15b604bd,
-	0x0110b608,
-	0xb20814b6,
-	0x02687e1f,
-	0x001fbb00,
-	0x84020398,
-/* 0x041f: init_gpc */
-	0xb8502000,
-	0x0008044e,
-	0x8f7e1fb2,
-	0x4eb80000,
-	0xbd00010c,
-	0x008f7ef4,
-	0x044eb800,
-	0x8f7e0001,
-	0x4eb80000,
-	0x0f000100,
-	0x008f7e02,
-	0x004eb800,
-/* 0x044e: init_gpc_wait */
-	0x657e0008,
-	0xffc80000,
-	0xf90bf41f,
-	0x08044eb8,
-	0x00657e00,
-	0x001fbb00,
-	0x800040b7,
-	0xf40132b6,
-	0x000fb41b,
-	0x0006fd7e,
-	0xac7e000f,
-	0x00800006,
-	0x01f60201,
-	0xbd04bd00,
-	0x1f19f014,
-	0x02300080,
-	0xbd0001f6,
-/* 0x0491: wait */
-	0x0028f404,
-/* 0x0497: main */
-	0x0d0031f4,
-	0x00377e10,
-	0xf401f400,
-	0x4001e4b1,
-	0x00c71bf5,
-	0x99f094bd,
-	0x37008004,
-	0x0009f602,
-	0x008104bd,
-	0x11cf02c0,
-	0xc1008200,
-	0x0022cf02,
-	0xf41f13c8,
-	0x23c8770b,
-	0x550bf41f,
-	0x12b220f9,
-	0x99f094bd,
-	0x37008007,
-	0x0009f602,
-	0x32f404bd,
-	0x0231f401,
-	0x0008807e,
-	0x99f094bd,
-	0x17008007,
-	0x0009f602,
-	0x20fc04bd,
-	0x99f094bd,
-	0x37008006,
-	0x0009f602,
-	0x31f404bd,
-	0x08807e01,
-	0xf094bd00,
-	0x00800699,
-	0x09f60217,
-	0xf404bd00,
-/* 0x0522: chsw_prev_no_next */
-	0x20f92f0e,
-	0x32f412b2,
-	0x0232f401,
-	0x0008807e,
-	0x008020fc,
-	0x02f602c0,
-	0xf404bd00,
-/* 0x053e: chsw_no_prev */
-	0x23c8130e,
-	0x0d0bf41f,
-	0xf40131f4,
-	0x807e0232,
-/* 0x054e: chsw_done */
-	0x01020008,
-	0x02c30080,
+	0x87048204,
+	0x04004000,
 	0xbd0002f6,
-	0xf094bd04,
-	0x00800499,
-	0x09f60217,
-	0xf504bd00,
-/* 0x056b: main_not_ctx_switch */
-	0xb0ff300e,
-	0x1bf401e4,
-	0x7ef2b20c,
-	0xf4000820,
-/* 0x057a: main_not_ctx_chan */
-	0xe4b0400e,
-	0x2c1bf402,
-	0x99f094bd,
-	0x37008007,
-	0x0009f602,
-	0x32f404bd,
-	0x0232f401,
-	0x0008807e,
-	0x99f094bd,
-	0x17008007,
-	0x0009f602,
-	0x0ef404bd,
-/* 0x05a9: main_not_ctx_save */
-	0x10ef9411,
-	0x7e01f5f0,
-	0xf50002f8,
-/* 0x05b7: main_done */
-	0xbdfee40e,
-	0x1f29f024,
-	0x02300080,
-	0xbd0002f6,
-	0xd20ef504,
-/* 0x05c9: ih */
-	0xf900f9fe,
-	0x0188fe80,
-	0x90f980f9,
-	0xb0f9a0f9,
-	0xe0f9d0f9,
-	0x04bdf0f9,
-	0xcf02004a,
-	0xabc400aa,
-	0x230bf404,
-	0x004e100d,
-	0x00eecf1a,
-	0xcf19004f,
-	0x047e00ff,
-	0xb0b70000,
-	0x010e0400,
-	0xf61d0040,
-	0x04bd000e,
-/* 0x060c: ih_no_fifo */
-	0x0100abe4,
-	0x0d0c0bf4,
-	0x40014e10,
-	0x0000047e,
-/* 0x061c: ih_no_ctxsw */
-	0x0400abe4,
-	0x8e560bf4,
-	0x7e400708,
-	0xb2000065,
-	0x040080ff,
-	0x000ff602,
-	0x048e04bd,
-	0x657e4007,
-	0xffb20000,
-	0x02030080,
-	0xbd000ff6,
-	0x50fec704,
-	0x8f02ee94,
-	0xbb400700,
-	0x657e00ef,
-	0x00800000,
-	0x0ff60202,
-	0x0f04bd00,
-	0x02f87e03,
-	0x01004b00,
-	0x448ebfb2,
-	0x8f7e4001,
-/* 0x0676: ih_no_fwmthd */
-	0x044b0000,
-	0xffb0bd05,
-	0x0bf4b4ab,
-	0x0700800c,
-	0x000bf603,
-/* 0x068a: ih_no_other */
-	0x004004bd,
-	0x000af601,
-	0xf0fc04bd,
-	0xd0fce0fc,
-	0xa0fcb0fc,
-	0x80fc90fc,
-	0xfc0088fe,
-	0xf400fc80,
-	0x01f80032,
-/* 0x06ac: ctx_4170s */
-	0xb210f5f0,
-	0x41708eff,
-	0x008f7e40,
-/* 0x06bb: ctx_4170w */
-	0x8e00f800,
-	0x7e404170,
-	0xb2000065,
-	0x10f4f0ff,
-	0xf8f31bf4,
-/* 0x06cd: ctx_redswitch */
-	0x02004e00,
-	0xf040e5f0,
-	0xe5f020e5,
-	0x85008010,
-	0x000ef601,
-	0x080f04bd,
-/* 0x06e4: ctx_redswitch_delay */
-	0xf401f2b6,
-	0xe5f1fd1b,
-	0xe5f10400,
-	0x00800100,
-	0x0ef60185,
-	0xf804bd00,
-/* 0x06fd: ctx_86c */
-	0x23008000,
-	0x000ff602,
-	0xffb204bd,
-	0x408a148e,
-	0x00008f7e,
-	0x8c8effb2,
-	0x8f7e41a8,
-	0x00f80000,
-/* 0x071c: ctx_mem */
-	0x02840080,
-	0xbd000ff6,
-/* 0x0725: ctx_mem_wait */
-	0x84008f04,
-	0x00ffcf02,
-	0xf405fffd,
-	0x00f8f61b,
-/* 0x0734: ctx_load */
-	0x99f094bd,
-	0x37008005,
-	0x0009f602,
-	0x0c0a04bd,
-	0x0000b87e,
-	0x0080f4bd,
-	0x0ff60289,
+	0x40040204,
+	0x02f60300,
+	0xf404bd00,
+	0x048e1031,
+	0x657e4096,
+	0xfeb20000,
+	0xb590f1c7,
+	0xf4f00301,
+	0x020fb51f,
+	0x1fbb0101,
+	0x0112b604,
+	0x01030080,
+	0xbd0001f6,
+	0x04008004,
+	0x0001f601,
+	0x004104bd,
+	0x7e020f01,
+	0x7e0006ad,
+	0x0f0006bc,
+	0x06fe7e10,
+	0x000e9800,
+	0x7e010f98,
+	0x95000120,
+	0x00800814,
+	0x04f601c0,
 	0x8004bd00,
-	0xf602c100,
-	0x04bd0002,
-	0x02830080,
-	0xbd0002f6,
-	0x7e070f04,
-	0x8000071c,
+	0xf601c100,
+	0x04bd0004,
+	0x130030b7,
+	0xb6001fbb,
+	0x008002f5,
+	0x0ff601d3,
+	0xb604bd00,
+	0x10b60815,
+	0x0814b601,
+	0x687e1fb2,
+	0x1fbb0002,
+	0x02039800,
+	0x50200084,
+/* 0x0420: init_gpc */
+	0x08044eb8,
+	0x7e1fb200,
+	0xb800008f,
+	0x00010c4e,
+	0x8f7ef4bd,
+	0x4eb80000,
+	0x7e000104,
+	0xb800008f,
+	0x0001004e,
+	0x8f7e020f,
+	0x4eb80000,
+/* 0x044f: init_gpc_wait */
+	0x7e000800,
+	0xc8000065,
+	0x0bf41fff,
+	0x044eb8f9,
+	0x657e0008,
+	0x1fbb0000,
+	0x0040b700,
+	0x0132b680,
+	0x0fb41bf4,
+	0x06fe7e00,
+	0x7e000f00,
+	0x800006ad,
+	0xf6020100,
+	0x04bd0001,
+	0x19f014bd,
+	0x3000801f,
+	0x0001f602,
+/* 0x0492: wait */
+	0x28f404bd,
+	0x0031f400,
+/* 0x0498: main */
+	0x377e100d,
+	0x01f40000,
+	0x01e4b1f4,
+	0xc71bf540,
+	0xf094bd00,
+	0x00800499,
+	0x09f60237,
+	0x8104bd00,
+	0xcf02c000,
+	0x00820011,
+	0x22cf02c1,
+	0x1f13c800,
+	0xc8770bf4,
+	0x0bf41f23,
+	0xb220f955,
+	0xf094bd12,
+	0x00800799,
+	0x09f60237,
+	0xf404bd00,
+	0x31f40132,
+	0x08817e02,
+	0xf094bd00,
+	0x00800799,
+	0x09f60217,
+	0xfc04bd00,
+	0xf094bd20,
+	0x00800699,
+	0x09f60237,
+	0xf404bd00,
+	0x817e0131,
+	0x94bd0008,
+	0x800699f0,
+	0xf6021700,
+	0x04bd0009,
+/* 0x0523: chsw_prev_no_next */
+	0xf92f0ef4,
+	0xf412b220,
+	0x32f40132,
+	0x08817e02,
+	0x8020fc00,
 	0xf602c000,
 	0x04bd0002,
-	0xf0000bfe,
-	0x24b61f2a,
-	0x0220b604,
-	0x99f094bd,
-	0x37008008,
-	0x0009f602,
-	0x008004bd,
-	0x02f60281,
-	0xd204bd00,
-	0x80000000,
-	0x800225f0,
-	0xf6028800,
-	0x04bd0002,
-	0x00421001,
-	0x0223f002,
-	0xf80512fa,
-	0xf094bd03,
-	0x00800899,
+/* 0x053f: chsw_no_prev */
+	0xc8130ef4,
+	0x0bf41f23,
+	0x0131f40d,
+	0x7e0232f4,
+/* 0x054f: chsw_done */
+	0x02000881,
+	0xc3008001,
+	0x0002f602,
+	0x94bd04bd,
+	0x800499f0,
+	0xf6021700,
+	0x04bd0009,
+	0xff300ef5,
+/* 0x056c: main_not_ctx_switch */
+	0xf401e4b0,
+	0xf2b20c1b,
+	0x0008217e,
+/* 0x057b: main_not_ctx_chan */
+	0xb0400ef4,
+	0x1bf402e4,
+	0xf094bd2c,
+	0x00800799,
+	0x09f60237,
+	0xf404bd00,
+	0x32f40132,
+	0x08817e02,
+	0xf094bd00,
+	0x00800799,
 	0x09f60217,
-	0x9804bd00,
-	0x14b68101,
-	0x80029818,
-	0xfd0825b6,
-	0x01b50512,
-	0xf094bd16,
-	0x00800999,
+	0xf404bd00,
+/* 0x05aa: main_not_ctx_save */
+	0xef94110e,
+	0x01f5f010,
+	0x0002f87e,
+	0xfee40ef5,
+/* 0x05b8: main_done */
+	0x29f024bd,
+	0x3000801f,
+	0x0002f602,
+	0x0ef504bd,
+/* 0x05ca: ih */
+	0x00f9fed2,
+	0x88fe80f9,
+	0xf980f901,
+	0xf9a0f990,
+	0xf9d0f9b0,
+	0xbdf0f9e0,
+	0x02004a04,
+	0xc400aacf,
+	0x0bf404ab,
+	0x4e100d23,
+	0xeecf1a00,
+	0x19004f00,
+	0x7e00ffcf,
+	0xb7000004,
+	0x0e0400b0,
+	0x1d004001,
+	0xbd000ef6,
+/* 0x060d: ih_no_fifo */
+	0x00abe404,
+	0x0c0bf401,
+	0x014e100d,
+	0x00047e40,
+/* 0x061d: ih_no_ctxsw */
+	0x00abe400,
+	0x560bf404,
+	0x4007088e,
+	0x0000657e,
+	0x0080ffb2,
+	0x0ff60204,
+	0x8e04bd00,
+	0x7e400704,
+	0xb2000065,
+	0x030080ff,
+	0x000ff602,
+	0xfec704bd,
+	0x02ee9450,
+	0x4007008f,
+	0x7e00efbb,
+	0x80000065,
+	0xf6020200,
+	0x04bd000f,
+	0xf87e030f,
+	0x004b0002,
+	0x8ebfb201,
+	0x7e400144,
+/* 0x0677: ih_no_fwmthd */
+	0x4b00008f,
+	0xb0bd0504,
+	0xf4b4abff,
+	0x00800c0b,
+	0x0bf60307,
+/* 0x068b: ih_no_other */
+	0x4004bd00,
+	0x0af60100,
+	0xfc04bd00,
+	0xfce0fcf0,
+	0xfcb0fcd0,
+	0xfc90fca0,
+	0x0088fe80,
+	0x00fc80fc,
+	0xf80032f4,
+/* 0x06ad: ctx_4170s */
+	0x10f5f001,
+	0x708effb2,
+	0x8f7e4041,
+	0x00f80000,
+/* 0x06bc: ctx_4170w */
+	0x4041708e,
+	0x0000657e,
+	0xf4f0ffb2,
+	0xf31bf410,
+/* 0x06ce: ctx_redswitch */
+	0x004e00f8,
+	0x40e5f002,
+	0xf020e5f0,
+	0x008010e5,
+	0x0ef60185,
+	0x0f04bd00,
+/* 0x06e5: ctx_redswitch_delay */
+	0x01f2b608,
+	0xf1fd1bf4,
+	0xf10400e5,
+	0x800100e5,
+	0xf6018500,
+	0x04bd000e,
+/* 0x06fe: ctx_86c */
+	0x008000f8,
+	0x0ff60223,
+	0xb204bd00,
+	0x8a148eff,
+	0x008f7e40,
+	0x8effb200,
+	0x7e41a88c,
+	0xf800008f,
+/* 0x071d: ctx_mem */
+	0x84008000,
+	0x000ff602,
+/* 0x0726: ctx_mem_wait */
+	0x008f04bd,
+	0xffcf0284,
+	0x05fffd00,
+	0xf8f61bf4,
+/* 0x0735: ctx_load */
+	0xf094bd00,
+	0x00800599,
+	0x09f60237,
+	0x0a04bd00,
+	0x00b87e0c,
+	0x80f4bd00,
+	0xf6028900,
+	0x04bd000f,
+	0x02c10080,
+	0xbd0002f6,
+	0x83008004,
+	0x0002f602,
+	0x070f04bd,
+	0x00071d7e,
+	0x02c00080,
+	0xbd0002f6,
+	0x000bfe04,
+	0xb61f2af0,
+	0x20b60424,
+	0xf094bd02,
+	0x00800899,
 	0x09f60237,
 	0x8004bd00,
 	0xf6028100,
-	0x04bd0001,
-	0x00800102,
-	0x02f60288,
-	0x4104bd00,
-	0x13f00100,
-	0x0501fa06,
+	0x04bd0002,
+	0x000000d2,
+	0x0225f080,
+	0x02880080,
+	0xbd0002f6,
+	0x42100104,
+	0x23f00200,
+	0x0512fa02,
 	0x94bd03f8,
-	0x800999f0,
+	0x800899f0,
 	0xf6021700,
 	0x04bd0009,
-	0x99f094bd,
-	0x17008005,
-	0x0009f602,
-	0x00f804bd,
-/* 0x0820: ctx_chan */
-	0x0007347e,
-	0xb87e0c0a,
-	0x050f0000,
-	0x00071c7e,
-/* 0x0832: ctx_mmio_exec */
-	0x039800f8,
-	0x81008041,
-	0x0003f602,
-	0x34bd04bd,
-/* 0x0840: ctx_mmio_loop */
-	0xf4ff34c4,
-	0x00450e1b,
-	0x0653f002,
-	0xf80535fa,
-/* 0x0851: ctx_mmio_pull */
-	0x804e9803,
-	0x7e814f98,
-	0xb600008f,
-	0x12b60830,
-	0xdf1bf401,
-/* 0x0864: ctx_mmio_done */
-	0x80160398,
-	0xf6028100,
-	0x04bd0003,
-	0x414000b5,
-	0x13f00100,
-	0x0601fa06,
-	0x00f803f8,
-/* 0x0880: ctx_xfer */
-	0x0080040e,
-	0x0ef60302,
-/* 0x088b: ctx_xfer_idle */
-	0x8e04bd00,
-	0xcf030000,
-	0xe4f100ee,
-	0x1bf42000,
-	0x0611f4f5,
-/* 0x089f: ctx_xfer_pre */
-	0x0f0c02f4,
-	0x06fd7e10,
-	0x1b11f400,
-/* 0x08a8: ctx_xfer_pre_load */
-	0xac7e020f,
-	0xbb7e0006,
-	0xcd7e0006,
-	0xf4bd0006,
-	0x0006ac7e,
-	0x0007347e,
-/* 0x08c0: ctx_xfer_exec */
-	0xbd160198,
-	0x05008024,
-	0x0002f601,
-	0x1fb204bd,
-	0x41a5008e,
-	0x00008f7e,
-	0xf001fcf0,
-	0x24b6022c,
-	0x05f2fd01,
-	0x048effb2,
-	0x8f7e41a5,
-	0x167e0000,
-	0x24bd0002,
-	0x0247fc80,
-	0xbd0002f6,
-	0x012cf004,
-	0x800320b6,
-	0xf6024afc,
+	0xb6810198,
+	0x02981814,
+	0x0825b680,
+	0xb50512fd,
+	0x94bd1601,
+	0x800999f0,
+	0xf6023700,
+	0x04bd0009,
+	0x02810080,
+	0xbd0001f6,
+	0x80010204,
+	0xf6028800,
 	0x04bd0002,
-	0xf001acf0,
-	0x000b06a5,
-	0x98000c98,
-	0x000e010d,
-	0x00013d7e,
-	0xec7e080a,
-	0x0a7e0000,
-	0x01f40002,
-	0x7e0c0a12,
+	0xf0010041,
+	0x01fa0613,
+	0xbd03f805,
+	0x0999f094,
+	0x02170080,
+	0xbd0009f6,
+	0xf094bd04,
+	0x00800599,
+	0x09f60217,
+	0xf804bd00,
+/* 0x0821: ctx_chan */
+	0x07357e00,
+	0x7e0c0a00,
 	0x0f0000b8,
-	0x071c7e05,
-	0x2d02f400,
-/* 0x093c: ctx_xfer_post */
-	0xac7e020f,
-	0xf4bd0006,
-	0x0006fd7e,
-	0x0002277e,
-	0x0006bb7e,
-	0xac7ef4bd,
+	0x071d7e05,
+/* 0x0833: ctx_mmio_exec */
+	0x9800f800,
+	0x00804103,
+	0x03f60281,
+	0xbd04bd00,
+/* 0x0841: ctx_mmio_loop */
+	0xff34c434,
+	0x450e1bf4,
+	0x53f00200,
+	0x0535fa06,
+/* 0x0852: ctx_mmio_pull */
+	0x4e9803f8,
+	0x814f9880,
+	0x00008f7e,
+	0xb60830b6,
+	0x1bf40112,
+/* 0x0865: ctx_mmio_done */
+	0x160398df,
+	0x02810080,
+	0xbd0003f6,
+	0x4000b504,
+	0xf0010041,
+	0x01fa0613,
+	0xf803f806,
+/* 0x0881: ctx_xfer */
+	0x80040e00,
+	0xf6030200,
+	0x04bd000e,
+/* 0x088c: ctx_xfer_idle */
+	0x0300008e,
+	0xf100eecf,
+	0xf42000e4,
+	0x11f4f51b,
+	0x0c02f406,
+/* 0x08a0: ctx_xfer_pre */
+	0xfe7e100f,
 	0x11f40006,
-	0x40019810,
-	0xf40511fd,
-	0x327e070b,
-/* 0x0966: ctx_xfer_no_post_mmio */
-/* 0x0966: ctx_xfer_done */
-	0x00f80008,
+/* 0x08a9: ctx_xfer_pre_load */
+	0x7e020f1b,
+	0x7e0006ad,
+	0x7e0006bc,
+	0xbd0006ce,
+	0x06ad7ef4,
+	0x07357e00,
+/* 0x08c1: ctx_xfer_exec */
+	0x16019800,
+	0x008024bd,
+	0x02f60105,
+	0xb204bd00,
+	0xa5008e1f,
+	0x008f7e41,
+	0x01fcf000,
+	0xb6022cf0,
+	0xf2fd0124,
+	0x8effb205,
+	0x7e41a504,
+	0x7e00008f,
+	0xbd000216,
+	0x47fc8024,
+	0x0002f602,
+	0x2cf004bd,
+	0x0320b601,
+	0x024afc80,
+	0xbd0002f6,
+	0x01acf004,
+	0x0b06a5f0,
+	0x000c9800,
+	0x0e010d98,
+	0x013d7e00,
+	0x7e080a00,
+	0x7e0000ec,
+	0xf400020a,
+	0x0c0a1201,
+	0x0000b87e,
+	0x1d7e050f,
+	0x02f40007,
+/* 0x093d: ctx_xfer_post */
+	0x7e020f2d,
+	0xbd0006ad,
+	0x06fe7ef4,
+	0x02277e00,
+	0x06bc7e00,
+	0x7ef4bd00,
+	0xf40006ad,
+	0x01981011,
+	0x0511fd40,
+	0x7e070bf4,
+/* 0x0967: ctx_xfer_no_post_mmio */
+/* 0x0967: ctx_xfer_done */
+	0xf8000833,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 649a442..449dae7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -441,7 +441,7 @@ static uint32_t gm107_grhub_code[] = {
 	0x020014fe,
 	0x12004002,
 	0xbd0002f6,
-	0x05c94104,
+	0x05ca4104,
 	0xbd0010fe,
 	0x07004024,
 	0xbd0002f6,
@@ -460,423 +460,423 @@ static uint32_t gm107_grhub_code[] = {
 	0x01039204,
 	0x03090080,
 	0xbd0003f6,
-	0x87044204,
-	0xf6040040,
-	0x04bd0002,
-	0x00400402,
-	0x0002f603,
-	0x31f404bd,
-	0x96048e10,
-	0x00657e40,
-	0xc7feb200,
-	0x01b590f1,
-	0x1ff4f003,
-	0x01020fb5,
-	0x041fbb01,
-	0x800112b6,
-	0xf6010300,
-	0x04bd0001,
-	0x01040080,
-	0xbd0001f6,
-	0x01004104,
-	0xac7e020f,
-	0xbb7e0006,
-	0x100f0006,
-	0x0006fd7e,
-	0x98000e98,
-	0x207e010f,
-	0x14950001,
-	0xc0008008,
-	0x0004f601,
-	0x008004bd,
-	0x04f601c1,
-	0xb704bd00,
-	0xbb130030,
-	0xf5b6001f,
-	0xd3008002,
-	0x000ff601,
-	0x15b604bd,
-	0x0110b608,
-	0xb20814b6,
-	0x02687e1f,
-	0x001fbb00,
-	0x84020398,
-/* 0x041f: init_gpc */
-	0xb8502000,
-	0x0008044e,
-	0x8f7e1fb2,
-	0x4eb80000,
-	0xbd00010c,
-	0x008f7ef4,
-	0x044eb800,
-	0x8f7e0001,
-	0x4eb80000,
-	0x0f000100,
-	0x008f7e02,
-	0x004eb800,
-/* 0x044e: init_gpc_wait */
-	0x657e0008,
-	0xffc80000,
-	0xf90bf41f,
-	0x08044eb8,
-	0x00657e00,
-	0x001fbb00,
-	0x800040b7,
-	0xf40132b6,
-	0x000fb41b,
-	0x0006fd7e,
-	0xac7e000f,
-	0x00800006,
-	0x01f60201,
-	0xbd04bd00,
-	0x1f19f014,
-	0x02300080,
-	0xbd0001f6,
-/* 0x0491: wait */
-	0x0028f404,
-/* 0x0497: main */
-	0x0d0031f4,
-	0x00377e10,
-	0xf401f400,
-	0x4001e4b1,
-	0x00c71bf5,
-	0x99f094bd,
-	0x37008004,
-	0x0009f602,
-	0x008104bd,
-	0x11cf02c0,
-	0xc1008200,
-	0x0022cf02,
-	0xf41f13c8,
-	0x23c8770b,
-	0x550bf41f,
-	0x12b220f9,
-	0x99f094bd,
-	0x37008007,
-	0x0009f602,
-	0x32f404bd,
-	0x0231f401,
-	0x0008807e,
-	0x99f094bd,
-	0x17008007,
-	0x0009f602,
-	0x20fc04bd,
-	0x99f094bd,
-	0x37008006,
-	0x0009f602,
-	0x31f404bd,
-	0x08807e01,
-	0xf094bd00,
-	0x00800699,
-	0x09f60217,
-	0xf404bd00,
-/* 0x0522: chsw_prev_no_next */
-	0x20f92f0e,
-	0x32f412b2,
-	0x0232f401,
-	0x0008807e,
-	0x008020fc,
-	0x02f602c0,
-	0xf404bd00,
-/* 0x053e: chsw_no_prev */
-	0x23c8130e,
-	0x0d0bf41f,
-	0xf40131f4,
-	0x807e0232,
-/* 0x054e: chsw_done */
-	0x01020008,
-	0x02c30080,
+	0x87048204,
+	0x04004000,
 	0xbd0002f6,
-	0xf094bd04,
-	0x00800499,
-	0x09f60217,
-	0xf504bd00,
-/* 0x056b: main_not_ctx_switch */
-	0xb0ff300e,
-	0x1bf401e4,
-	0x7ef2b20c,
-	0xf4000820,
-/* 0x057a: main_not_ctx_chan */
-	0xe4b0400e,
-	0x2c1bf402,
-	0x99f094bd,
-	0x37008007,
-	0x0009f602,
-	0x32f404bd,
-	0x0232f401,
-	0x0008807e,
-	0x99f094bd,
-	0x17008007,
-	0x0009f602,
-	0x0ef404bd,
-/* 0x05a9: main_not_ctx_save */
-	0x10ef9411,
-	0x7e01f5f0,
-	0xf50002f8,
-/* 0x05b7: main_done */
-	0xbdfee40e,
-	0x1f29f024,
-	0x02300080,
-	0xbd0002f6,
-	0xd20ef504,
-/* 0x05c9: ih */
-	0xf900f9fe,
-	0x0188fe80,
-	0x90f980f9,
-	0xb0f9a0f9,
-	0xe0f9d0f9,
-	0x04bdf0f9,
-	0xcf02004a,
-	0xabc400aa,
-	0x230bf404,
-	0x004e100d,
-	0x00eecf1a,
-	0xcf19004f,
-	0x047e00ff,
-	0xb0b70000,
-	0x010e0400,
-	0xf61d0040,
-	0x04bd000e,
-/* 0x060c: ih_no_fifo */
-	0x0100abe4,
-	0x0d0c0bf4,
-	0x40014e10,
-	0x0000047e,
-/* 0x061c: ih_no_ctxsw */
-	0x0400abe4,
-	0x8e560bf4,
-	0x7e400708,
-	0xb2000065,
-	0x040080ff,
-	0x000ff602,
-	0x048e04bd,
-	0x657e4007,
-	0xffb20000,
-	0x02030080,
-	0xbd000ff6,
-	0x50fec704,
-	0x8f02ee94,
-	0xbb400700,
-	0x657e00ef,
-	0x00800000,
-	0x0ff60202,
-	0x0f04bd00,
-	0x02f87e03,
-	0x01004b00,
-	0x448ebfb2,
-	0x8f7e4001,
-/* 0x0676: ih_no_fwmthd */
-	0x044b0000,
-	0xffb0bd05,
-	0x0bf4b4ab,
-	0x0700800c,
-	0x000bf603,
-/* 0x068a: ih_no_other */
-	0x004004bd,
-	0x000af601,
-	0xf0fc04bd,
-	0xd0fce0fc,
-	0xa0fcb0fc,
-	0x80fc90fc,
-	0xfc0088fe,
-	0xf400fc80,
-	0x01f80032,
-/* 0x06ac: ctx_4170s */
-	0xb210f5f0,
-	0x41708eff,
-	0x008f7e40,
-/* 0x06bb: ctx_4170w */
-	0x8e00f800,
-	0x7e404170,
-	0xb2000065,
-	0x10f4f0ff,
-	0xf8f31bf4,
-/* 0x06cd: ctx_redswitch */
-	0x02004e00,
-	0xf040e5f0,
-	0xe5f020e5,
-	0x85008010,
-	0x000ef601,
-	0x080f04bd,
-/* 0x06e4: ctx_redswitch_delay */
-	0xf401f2b6,
-	0xe5f1fd1b,
-	0xe5f10400,
-	0x00800100,
-	0x0ef60185,
-	0xf804bd00,
-/* 0x06fd: ctx_86c */
-	0x23008000,
-	0x000ff602,
-	0xffb204bd,
-	0x408a148e,
-	0x00008f7e,
-	0x8c8effb2,
-	0x8f7e41a8,
-	0x00f80000,
-/* 0x071c: ctx_mem */
-	0x02840080,
-	0xbd000ff6,
-/* 0x0725: ctx_mem_wait */
-	0x84008f04,
-	0x00ffcf02,
-	0xf405fffd,
-	0x00f8f61b,
-/* 0x0734: ctx_load */
-	0x99f094bd,
-	0x37008005,
-	0x0009f602,
-	0x0c0a04bd,
-	0x0000b87e,
-	0x0080f4bd,
-	0x0ff60289,
+	0x40040204,
+	0x02f60300,
+	0xf404bd00,
+	0x048e1031,
+	0x657e4096,
+	0xfeb20000,
+	0xb590f1c7,
+	0xf4f00301,
+	0x020fb51f,
+	0x1fbb0101,
+	0x0112b604,
+	0x01030080,
+	0xbd0001f6,
+	0x04008004,
+	0x0001f601,
+	0x004104bd,
+	0x7e020f01,
+	0x7e0006ad,
+	0x0f0006bc,
+	0x06fe7e10,
+	0x000e9800,
+	0x7e010f98,
+	0x95000120,
+	0x00800814,
+	0x04f601c0,
 	0x8004bd00,
-	0xf602c100,
-	0x04bd0002,
-	0x02830080,
-	0xbd0002f6,
-	0x7e070f04,
-	0x8000071c,
+	0xf601c100,
+	0x04bd0004,
+	0x130030b7,
+	0xb6001fbb,
+	0x008002f5,
+	0x0ff601d3,
+	0xb604bd00,
+	0x10b60815,
+	0x0814b601,
+	0x687e1fb2,
+	0x1fbb0002,
+	0x02039800,
+	0x50200084,
+/* 0x0420: init_gpc */
+	0x08044eb8,
+	0x7e1fb200,
+	0xb800008f,
+	0x00010c4e,
+	0x8f7ef4bd,
+	0x4eb80000,
+	0x7e000104,
+	0xb800008f,
+	0x0001004e,
+	0x8f7e020f,
+	0x4eb80000,
+/* 0x044f: init_gpc_wait */
+	0x7e000800,
+	0xc8000065,
+	0x0bf41fff,
+	0x044eb8f9,
+	0x657e0008,
+	0x1fbb0000,
+	0x0040b700,
+	0x0132b680,
+	0x0fb41bf4,
+	0x06fe7e00,
+	0x7e000f00,
+	0x800006ad,
+	0xf6020100,
+	0x04bd0001,
+	0x19f014bd,
+	0x3000801f,
+	0x0001f602,
+/* 0x0492: wait */
+	0x28f404bd,
+	0x0031f400,
+/* 0x0498: main */
+	0x377e100d,
+	0x01f40000,
+	0x01e4b1f4,
+	0xc71bf540,
+	0xf094bd00,
+	0x00800499,
+	0x09f60237,
+	0x8104bd00,
+	0xcf02c000,
+	0x00820011,
+	0x22cf02c1,
+	0x1f13c800,
+	0xc8770bf4,
+	0x0bf41f23,
+	0xb220f955,
+	0xf094bd12,
+	0x00800799,
+	0x09f60237,
+	0xf404bd00,
+	0x31f40132,
+	0x08817e02,
+	0xf094bd00,
+	0x00800799,
+	0x09f60217,
+	0xfc04bd00,
+	0xf094bd20,
+	0x00800699,
+	0x09f60237,
+	0xf404bd00,
+	0x817e0131,
+	0x94bd0008,
+	0x800699f0,
+	0xf6021700,
+	0x04bd0009,
+/* 0x0523: chsw_prev_no_next */
+	0xf92f0ef4,
+	0xf412b220,
+	0x32f40132,
+	0x08817e02,
+	0x8020fc00,
 	0xf602c000,
 	0x04bd0002,
-	0xf0000bfe,
-	0x24b61f2a,
-	0x0220b604,
-	0x99f094bd,
-	0x37008008,
-	0x0009f602,
-	0x008004bd,
-	0x02f60281,
-	0xd204bd00,
-	0x80000000,
-	0x800225f0,
-	0xf6028800,
-	0x04bd0002,
-	0x00421001,
-	0x0223f002,
-	0xf80512fa,
-	0xf094bd03,
-	0x00800899,
+/* 0x053f: chsw_no_prev */
+	0xc8130ef4,
+	0x0bf41f23,
+	0x0131f40d,
+	0x7e0232f4,
+/* 0x054f: chsw_done */
+	0x02000881,
+	0xc3008001,
+	0x0002f602,
+	0x94bd04bd,
+	0x800499f0,
+	0xf6021700,
+	0x04bd0009,
+	0xff300ef5,
+/* 0x056c: main_not_ctx_switch */
+	0xf401e4b0,
+	0xf2b20c1b,
+	0x0008217e,
+/* 0x057b: main_not_ctx_chan */
+	0xb0400ef4,
+	0x1bf402e4,
+	0xf094bd2c,
+	0x00800799,
+	0x09f60237,
+	0xf404bd00,
+	0x32f40132,
+	0x08817e02,
+	0xf094bd00,
+	0x00800799,
 	0x09f60217,
-	0x9804bd00,
-	0x14b68101,
-	0x80029818,
-	0xfd0825b6,
-	0x01b50512,
-	0xf094bd16,
-	0x00800999,
+	0xf404bd00,
+/* 0x05aa: main_not_ctx_save */
+	0xef94110e,
+	0x01f5f010,
+	0x0002f87e,
+	0xfee40ef5,
+/* 0x05b8: main_done */
+	0x29f024bd,
+	0x3000801f,
+	0x0002f602,
+	0x0ef504bd,
+/* 0x05ca: ih */
+	0x00f9fed2,
+	0x88fe80f9,
+	0xf980f901,
+	0xf9a0f990,
+	0xf9d0f9b0,
+	0xbdf0f9e0,
+	0x02004a04,
+	0xc400aacf,
+	0x0bf404ab,
+	0x4e100d23,
+	0xeecf1a00,
+	0x19004f00,
+	0x7e00ffcf,
+	0xb7000004,
+	0x0e0400b0,
+	0x1d004001,
+	0xbd000ef6,
+/* 0x060d: ih_no_fifo */
+	0x00abe404,
+	0x0c0bf401,
+	0x014e100d,
+	0x00047e40,
+/* 0x061d: ih_no_ctxsw */
+	0x00abe400,
+	0x560bf404,
+	0x4007088e,
+	0x0000657e,
+	0x0080ffb2,
+	0x0ff60204,
+	0x8e04bd00,
+	0x7e400704,
+	0xb2000065,
+	0x030080ff,
+	0x000ff602,
+	0xfec704bd,
+	0x02ee9450,
+	0x4007008f,
+	0x7e00efbb,
+	0x80000065,
+	0xf6020200,
+	0x04bd000f,
+	0xf87e030f,
+	0x004b0002,
+	0x8ebfb201,
+	0x7e400144,
+/* 0x0677: ih_no_fwmthd */
+	0x4b00008f,
+	0xb0bd0504,
+	0xf4b4abff,
+	0x00800c0b,
+	0x0bf60307,
+/* 0x068b: ih_no_other */
+	0x4004bd00,
+	0x0af60100,
+	0xfc04bd00,
+	0xfce0fcf0,
+	0xfcb0fcd0,
+	0xfc90fca0,
+	0x0088fe80,
+	0x00fc80fc,
+	0xf80032f4,
+/* 0x06ad: ctx_4170s */
+	0x10f5f001,
+	0x708effb2,
+	0x8f7e4041,
+	0x00f80000,
+/* 0x06bc: ctx_4170w */
+	0x4041708e,
+	0x0000657e,
+	0xf4f0ffb2,
+	0xf31bf410,
+/* 0x06ce: ctx_redswitch */
+	0x004e00f8,
+	0x40e5f002,
+	0xf020e5f0,
+	0x008010e5,
+	0x0ef60185,
+	0x0f04bd00,
+/* 0x06e5: ctx_redswitch_delay */
+	0x01f2b608,
+	0xf1fd1bf4,
+	0xf10400e5,
+	0x800100e5,
+	0xf6018500,
+	0x04bd000e,
+/* 0x06fe: ctx_86c */
+	0x008000f8,
+	0x0ff60223,
+	0xb204bd00,
+	0x8a148eff,
+	0x008f7e40,
+	0x8effb200,
+	0x7e41a88c,
+	0xf800008f,
+/* 0x071d: ctx_mem */
+	0x84008000,
+	0x000ff602,
+/* 0x0726: ctx_mem_wait */
+	0x008f04bd,
+	0xffcf0284,
+	0x05fffd00,
+	0xf8f61bf4,
+/* 0x0735: ctx_load */
+	0xf094bd00,
+	0x00800599,
+	0x09f60237,
+	0x0a04bd00,
+	0x00b87e0c,
+	0x80f4bd00,
+	0xf6028900,
+	0x04bd000f,
+	0x02c10080,
+	0xbd0002f6,
+	0x83008004,
+	0x0002f602,
+	0x070f04bd,
+	0x00071d7e,
+	0x02c00080,
+	0xbd0002f6,
+	0x000bfe04,
+	0xb61f2af0,
+	0x20b60424,
+	0xf094bd02,
+	0x00800899,
 	0x09f60237,
 	0x8004bd00,
 	0xf6028100,
-	0x04bd0001,
-	0x00800102,
-	0x02f60288,
-	0x4104bd00,
-	0x13f00100,
-	0x0501fa06,
+	0x04bd0002,
+	0x000000d2,
+	0x0225f080,
+	0x02880080,
+	0xbd0002f6,
+	0x42100104,
+	0x23f00200,
+	0x0512fa02,
 	0x94bd03f8,
-	0x800999f0,
+	0x800899f0,
 	0xf6021700,
 	0x04bd0009,
-	0x99f094bd,
-	0x17008005,
-	0x0009f602,
-	0x00f804bd,
-/* 0x0820: ctx_chan */
-	0x0007347e,
-	0xb87e0c0a,
-	0x050f0000,
-	0x00071c7e,
-/* 0x0832: ctx_mmio_exec */
-	0x039800f8,
-	0x81008041,
-	0x0003f602,
-	0x34bd04bd,
-/* 0x0840: ctx_mmio_loop */
-	0xf4ff34c4,
-	0x00450e1b,
-	0x0653f002,
-	0xf80535fa,
-/* 0x0851: ctx_mmio_pull */
-	0x804e9803,
-	0x7e814f98,
-	0xb600008f,
-	0x12b60830,
-	0xdf1bf401,
-/* 0x0864: ctx_mmio_done */
-	0x80160398,
-	0xf6028100,
-	0x04bd0003,
-	0x414000b5,
-	0x13f00100,
-	0x0601fa06,
-	0x00f803f8,
-/* 0x0880: ctx_xfer */
-	0x0080040e,
-	0x0ef60302,
-/* 0x088b: ctx_xfer_idle */
-	0x8e04bd00,
-	0xcf030000,
-	0xe4f100ee,
-	0x1bf42000,
-	0x0611f4f5,
-/* 0x089f: ctx_xfer_pre */
-	0x0f0c02f4,
-	0x06fd7e10,
-	0x1b11f400,
-/* 0x08a8: ctx_xfer_pre_load */
-	0xac7e020f,
-	0xbb7e0006,
-	0xcd7e0006,
-	0xf4bd0006,
-	0x0006ac7e,
-	0x0007347e,
-/* 0x08c0: ctx_xfer_exec */
-	0xbd160198,
-	0x05008024,
-	0x0002f601,
-	0x1fb204bd,
-	0x41a5008e,
-	0x00008f7e,
-	0xf001fcf0,
-	0x24b6022c,
-	0x05f2fd01,
-	0x048effb2,
-	0x8f7e41a5,
-	0x167e0000,
-	0x24bd0002,
-	0x0247fc80,
-	0xbd0002f6,
-	0x012cf004,
-	0x800320b6,
-	0xf6024afc,
+	0xb6810198,
+	0x02981814,
+	0x0825b680,
+	0xb50512fd,
+	0x94bd1601,
+	0x800999f0,
+	0xf6023700,
+	0x04bd0009,
+	0x02810080,
+	0xbd0001f6,
+	0x80010204,
+	0xf6028800,
 	0x04bd0002,
-	0xf001acf0,
-	0x000b06a5,
-	0x98000c98,
-	0x000e010d,
-	0x00013d7e,
-	0xec7e080a,
-	0x0a7e0000,
-	0x01f40002,
-	0x7e0c0a12,
+	0xf0010041,
+	0x01fa0613,
+	0xbd03f805,
+	0x0999f094,
+	0x02170080,
+	0xbd0009f6,
+	0xf094bd04,
+	0x00800599,
+	0x09f60217,
+	0xf804bd00,
+/* 0x0821: ctx_chan */
+	0x07357e00,
+	0x7e0c0a00,
 	0x0f0000b8,
-	0x071c7e05,
-	0x2d02f400,
-/* 0x093c: ctx_xfer_post */
-	0xac7e020f,
-	0xf4bd0006,
-	0x0006fd7e,
-	0x0002277e,
-	0x0006bb7e,
-	0xac7ef4bd,
+	0x071d7e05,
+/* 0x0833: ctx_mmio_exec */
+	0x9800f800,
+	0x00804103,
+	0x03f60281,
+	0xbd04bd00,
+/* 0x0841: ctx_mmio_loop */
+	0xff34c434,
+	0x450e1bf4,
+	0x53f00200,
+	0x0535fa06,
+/* 0x0852: ctx_mmio_pull */
+	0x4e9803f8,
+	0x814f9880,
+	0x00008f7e,
+	0xb60830b6,
+	0x1bf40112,
+/* 0x0865: ctx_mmio_done */
+	0x160398df,
+	0x02810080,
+	0xbd0003f6,
+	0x4000b504,
+	0xf0010041,
+	0x01fa0613,
+	0xf803f806,
+/* 0x0881: ctx_xfer */
+	0x80040e00,
+	0xf6030200,
+	0x04bd000e,
+/* 0x088c: ctx_xfer_idle */
+	0x0300008e,
+	0xf100eecf,
+	0xf42000e4,
+	0x11f4f51b,
+	0x0c02f406,
+/* 0x08a0: ctx_xfer_pre */
+	0xfe7e100f,
 	0x11f40006,
-	0x40019810,
-	0xf40511fd,
-	0x327e070b,
-/* 0x0966: ctx_xfer_no_post_mmio */
-/* 0x0966: ctx_xfer_done */
-	0x00f80008,
+/* 0x08a9: ctx_xfer_pre_load */
+	0x7e020f1b,
+	0x7e0006ad,
+	0x7e0006bc,
+	0xbd0006ce,
+	0x06ad7ef4,
+	0x07357e00,
+/* 0x08c1: ctx_xfer_exec */
+	0x16019800,
+	0x008024bd,
+	0x02f60105,
+	0xb204bd00,
+	0xa5008e1f,
+	0x008f7e41,
+	0x01fcf000,
+	0xb6022cf0,
+	0xf2fd0124,
+	0x8effb205,
+	0x7e41a504,
+	0x7e00008f,
+	0xbd000216,
+	0x47fc8024,
+	0x0002f602,
+	0x2cf004bd,
+	0x0320b601,
+	0x024afc80,
+	0xbd0002f6,
+	0x01acf004,
+	0x0b06a5f0,
+	0x000c9800,
+	0x0e010d98,
+	0x013d7e00,
+	0x7e080a00,
+	0x7e0000ec,
+	0xf400020a,
+	0x0c0a1201,
+	0x0000b87e,
+	0x1d7e050f,
+	0x02f40007,
+/* 0x093d: ctx_xfer_post */
+	0x7e020f2d,
+	0xbd0006ad,
+	0x06fe7ef4,
+	0x02277e00,
+	0x06bc7e00,
+	0x7ef4bd00,
+	0xf40006ad,
+	0x01981011,
+	0x0511fd40,
+	0x7e070bf4,
+/* 0x0967: ctx_xfer_no_post_mmio */
+/* 0x0967: ctx_xfer_done */
+	0xf8000833,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c578deb..dd8f85b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,9 +26,9 @@
 #include "fuc/os.h"
 
 #include <core/client.h>
-#include <core/option.h>
 #include <core/firmware.h>
-#include <subdev/secboot.h>
+#include <core/option.h>
+#include <subdev/acr.h>
 #include <subdev/fb.h>
 #include <subdev/mc.h>
 #include <subdev/pmu.h>
@@ -1636,7 +1636,7 @@ gf100_gr_intr(struct nvkm_gr *base)
 
 static void
 gf100_gr_init_fw(struct nvkm_falcon *falcon,
-		 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
+		 struct nvkm_blob *code, struct nvkm_blob *data)
 {
 	nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0);
 	nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false);
@@ -1690,26 +1690,30 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
 {
 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	struct nvkm_secboot *sb = device->secboot;
-	u32 secboot_mask = 0;
+	u32 lsf_mask = 0;
 	int ret;
 
 	/* load fuc microcode */
 	nvkm_mc_unk260(device, 0);
 
 	/* securely-managed falcons must be reset using secure boot */
-	if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
-		secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
-	else
-		gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d);
 
-	if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
-		secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
-	else
-		gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad);
+	if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) {
+		gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst,
+						   &gr->fecs.data);
+	} else {
+		lsf_mask |= BIT(NVKM_ACR_LSF_FECS);
+	}
 
-	if (secboot_mask != 0) {
-		int ret = nvkm_secboot_reset(sb, secboot_mask);
+	if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) {
+		gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst,
+						    &gr->gpccs.data);
+	} else {
+		lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS);
+	}
+
+	if (lsf_mask) {
+		ret = nvkm_acr_bootstrap_falcons(device, lsf_mask);
 		if (ret)
 			return ret;
 	}
@@ -1721,8 +1725,8 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x41a10c, 0x00000000);
 	nvkm_wr32(device, 0x40910c, 0x00000000);
 
-	nvkm_falcon_start(gr->gpccs.falcon);
-	nvkm_falcon_start(gr->fecs.falcon);
+	nvkm_falcon_start(&gr->gpccs.falcon);
+	nvkm_falcon_start(&gr->fecs.falcon);
 
 	if (nvkm_msec(device, 2000,
 		if (nvkm_rd32(device, 0x409800) & 0x00000001)
@@ -1784,18 +1788,18 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
 
 	/* load HUB microcode */
 	nvkm_mc_unk260(device, 0);
-	nvkm_falcon_load_dmem(gr->fecs.falcon,
+	nvkm_falcon_load_dmem(&gr->fecs.falcon,
 			      gr->func->fecs.ucode->data.data, 0x0,
 			      gr->func->fecs.ucode->data.size, 0);
-	nvkm_falcon_load_imem(gr->fecs.falcon,
+	nvkm_falcon_load_imem(&gr->fecs.falcon,
 			      gr->func->fecs.ucode->code.data, 0x0,
 			      gr->func->fecs.ucode->code.size, 0, 0, false);
 
 	/* load GPC microcode */
-	nvkm_falcon_load_dmem(gr->gpccs.falcon,
+	nvkm_falcon_load_dmem(&gr->gpccs.falcon,
 			      gr->func->gpccs.ucode->data.data, 0x0,
 			      gr->func->gpccs.ucode->data.size, 0);
-	nvkm_falcon_load_imem(gr->gpccs.falcon,
+	nvkm_falcon_load_imem(&gr->gpccs.falcon,
 			      gr->func->gpccs.ucode->code.data, 0x0,
 			      gr->func->gpccs.ucode->code.size, 0, 0, false);
 	nvkm_mc_unk260(device, 1);
@@ -1941,17 +1945,6 @@ gf100_gr_oneinit(struct nvkm_gr *base)
 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int i, j;
-	int ret;
-
-	ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon);
-	if (ret)
-		return ret;
-
-	mutex_init(&gr->fecs.mutex);
-
-	ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon);
-	if (ret)
-		return ret;
 
 	nvkm_pmu_pgob(device->pmu, false);
 
@@ -1992,11 +1985,11 @@ gf100_gr_init_(struct nvkm_gr *base)
 
 	nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
 
-	ret = nvkm_falcon_get(gr->fecs.falcon, subdev);
+	ret = nvkm_falcon_get(&gr->fecs.falcon, subdev);
 	if (ret)
 		return ret;
 
-	ret = nvkm_falcon_get(gr->gpccs.falcon, subdev);
+	ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev);
 	if (ret)
 		return ret;
 
@@ -2004,49 +1997,34 @@ gf100_gr_init_(struct nvkm_gr *base)
 }
 
 static int
-gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
+gf100_gr_fini(struct nvkm_gr *base, bool suspend)
 {
 	struct gf100_gr *gr = gf100_gr(base);
 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
-	nvkm_falcon_put(gr->gpccs.falcon, subdev);
-	nvkm_falcon_put(gr->fecs.falcon, subdev);
+	nvkm_falcon_put(&gr->gpccs.falcon, subdev);
+	nvkm_falcon_put(&gr->fecs.falcon, subdev);
 	return 0;
 }
 
-void
-gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
-{
-	kfree(fuc->data);
-	fuc->data = NULL;
-}
-
-static void
-gf100_gr_dtor_init(struct gf100_gr_pack *pack)
-{
-	vfree(pack);
-}
-
 void *
 gf100_gr_dtor(struct nvkm_gr *base)
 {
 	struct gf100_gr *gr = gf100_gr(base);
 
-	if (gr->func->dtor)
-		gr->func->dtor(gr);
 	kfree(gr->data);
 
-	nvkm_falcon_del(&gr->gpccs.falcon);
-	nvkm_falcon_del(&gr->fecs.falcon);
+	nvkm_falcon_dtor(&gr->gpccs.falcon);
+	nvkm_falcon_dtor(&gr->fecs.falcon);
 
-	gf100_gr_dtor_fw(&gr->fuc409c);
-	gf100_gr_dtor_fw(&gr->fuc409d);
-	gf100_gr_dtor_fw(&gr->fuc41ac);
-	gf100_gr_dtor_fw(&gr->fuc41ad);
+	nvkm_blob_dtor(&gr->fecs.inst);
+	nvkm_blob_dtor(&gr->fecs.data);
+	nvkm_blob_dtor(&gr->gpccs.inst);
+	nvkm_blob_dtor(&gr->gpccs.data);
 
-	gf100_gr_dtor_init(gr->fuc_bundle);
-	gf100_gr_dtor_init(gr->fuc_method);
-	gf100_gr_dtor_init(gr->fuc_sw_ctx);
-	gf100_gr_dtor_init(gr->fuc_sw_nonctx);
+	vfree(gr->bundle);
+	vfree(gr->method);
+	vfree(gr->sw_ctx);
+	vfree(gr->sw_nonctx);
 
 	return gr;
 }
@@ -2056,7 +2034,7 @@ gf100_gr_ = {
 	.dtor = gf100_gr_dtor,
 	.oneinit = gf100_gr_oneinit,
 	.init = gf100_gr_init_,
-	.fini = gf100_gr_fini_,
+	.fini = gf100_gr_fini,
 	.intr = gf100_gr_intr,
 	.units = gf100_gr_units,
 	.chan_new = gf100_gr_chan_new,
@@ -2067,87 +2045,24 @@ gf100_gr_ = {
 	.ctxsw.inst = gf100_gr_ctxsw_inst,
 };
 
-int
-gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
-			struct gf100_gr_fuc *fuc, int ret)
-{
-	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
-	struct nvkm_device *device = subdev->device;
-	const struct firmware *fw;
-	char f[32];
-
-	/* see if this firmware has a legacy path */
-	if (!strcmp(fwname, "fecs_inst"))
-		fwname = "fuc409c";
-	else if (!strcmp(fwname, "fecs_data"))
-		fwname = "fuc409d";
-	else if (!strcmp(fwname, "gpccs_inst"))
-		fwname = "fuc41ac";
-	else if (!strcmp(fwname, "gpccs_data"))
-		fwname = "fuc41ad";
-	else {
-		/* nope, let's just return the error we got */
-		nvkm_error(subdev, "failed to load %s\n", fwname);
-		return ret;
-	}
-
-	/* yes, try to load from the legacy path */
-	nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
-
-	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
-	ret = request_firmware(&fw, f, device->dev);
-	if (ret) {
-		snprintf(f, sizeof(f), "nouveau/%s", fwname);
-		ret = request_firmware(&fw, f, device->dev);
-		if (ret) {
-			nvkm_error(subdev, "failed to load %s\n", fwname);
-			return ret;
-		}
-	}
-
-	fuc->size = fw->size;
-	fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
-	release_firmware(fw);
-	return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
+static const struct nvkm_falcon_func
+gf100_gr_flcn = {
+	.fbif = 0x600,
+	.load_imem = nvkm_falcon_v1_load_imem,
+	.load_dmem = nvkm_falcon_v1_load_dmem,
+	.read_dmem = nvkm_falcon_v1_read_dmem,
+	.bind_context = nvkm_falcon_v1_bind_context,
+	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+	.set_start_addr = nvkm_falcon_v1_set_start_addr,
+	.start = nvkm_falcon_v1_start,
+	.enable = nvkm_falcon_v1_enable,
+	.disable = nvkm_falcon_v1_disable,
+};
 
 int
-gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
-		 struct gf100_gr_fuc *fuc)
-{
-	const struct firmware *fw;
-	int ret;
-
-	ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
-	if (ret) {
-		ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
-		if (ret)
-			return -ENODEV;
-		return 0;
-	}
-
-	fuc->size = fw->size;
-	fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
-	nvkm_firmware_put(fw);
-	return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-int
-gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
-	      int index, struct gf100_gr *gr)
-{
-	gr->func = func;
-	gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
-				    func->fecs.ucode == NULL);
-
-	return nvkm_gr_ctor(&gf100_gr_, device, index,
-			    gr->firmware || func->fecs.ucode != NULL,
-			    &gr->base);
-}
-
-int
-gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_gr **pgr)
+gf100_gr_new_(const struct gf100_gr_fwif *fwif,
+	      struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
 	struct gf100_gr *gr;
 	int ret;
@@ -2156,22 +2071,49 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 		return -ENOMEM;
 	*pgr = &gr->base;
 
-	ret = gf100_gr_ctor(func, device, index, gr);
+	ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base);
 	if (ret)
 		return ret;
 
-	if (gr->firmware) {
-		if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
-		    gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
-		    gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
-		    gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
-			return -ENODEV;
-	}
+	fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr);
+	if (IS_ERR(fwif))
+		return -ENODEV;
+
+	gr->func = fwif->func;
+
+	ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev,
+			       "fecs", 0x409000, &gr->fecs.falcon);
+	if (ret)
+		return ret;
+
+	mutex_init(&gr->fecs.mutex);
+
+	ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev,
+			       "gpccs", 0x41a000, &gr->gpccs.falcon);
+	if (ret)
+		return ret;
 
 	return 0;
 }
 
 void
+gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int gpc, i, j;
+	u32 data;
+
+	for (gpc = 0, i = 0; i < 4; i++) {
+		for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
+			data |= gr->tpc_nr[gpc] << (j * 4);
+		if (pd)
+			nvkm_wr32(device, 0x406028 + (i * 4), data);
+		if (ds)
+			nvkm_wr32(device, 0x405870 + (i * 4), data);
+	}
+}
+
+void
 gf100_gr_init_400054(struct gf100_gr *gr)
 {
 	nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464);
@@ -2295,8 +2237,8 @@ gf100_gr_init(struct gf100_gr *gr)
 
 	gr->func->init_gpc_mmu(gr);
 
-	if (gr->fuc_sw_nonctx)
-		gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+	if (gr->sw_nonctx)
+		gf100_gr_mmio(gr, gr->sw_nonctx);
 	else
 		gf100_gr_mmio(gr, gr->func->mmio);
 
@@ -2320,6 +2262,8 @@ gf100_gr_init(struct gf100_gr *gr)
 		gr->func->init_bios_2(gr);
 	if (gr->func->init_swdx_pes_mask)
 		gr->func->init_swdx_pes_mask(gr);
+	if (gr->func->init_fs)
+		gr->func->init_fs(gr);
 
 	nvkm_wr32(device, 0x400500, 0x00010001);
 
@@ -2338,8 +2282,8 @@ gf100_gr_init(struct gf100_gr *gr)
 	if (gr->func->init_40601c)
 		gr->func->init_40601c(gr);
 
-	nvkm_wr32(device, 0x404490, 0xc0000000);
 	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
 
 	if (gr->func->init_sked_hww_esr)
 		gr->func->init_sked_hww_esr(gr);
@@ -2454,7 +2398,66 @@ gf100_gr = {
 };
 
 int
+gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+	gr->firmware = false;
+	return 0;
+}
+
+static int
+gf100_gr_load_fw(struct gf100_gr *gr, const char *name,
+		 struct nvkm_blob *blob)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const struct firmware *fw;
+	char f[32];
+	int ret;
+
+	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name);
+	ret = request_firmware(&fw, f, device->dev);
+	if (ret) {
+		snprintf(f, sizeof(f), "nouveau/%s", name);
+		ret = request_firmware(&fw, f, device->dev);
+		if (ret) {
+			nvkm_error(subdev, "failed to load %s\n", name);
+			return ret;
+		}
+	}
+
+	blob->size = fw->size;
+	blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL);
+	release_firmware(fw);
+	return (blob->data != NULL) ? 0 : -ENOMEM;
+}
+
+int
+gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false))
+		return -EINVAL;
+
+	if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) ||
+	    gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) ||
+	    gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) ||
+	    gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data))
+		return -ENOENT;
+
+	gr->firmware = true;
+	return 0;
+}
+
+static const struct gf100_gr_fwif
+gf100_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gf100_gr },
+	{ -1, gf100_gr_nofw, &gf100_gr },
+	{}
+};
+
+int
 gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gf100_gr, device, index, pgr);
+	return gf100_gr_new_(gf100_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index fafdd0b..88bcb57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -31,6 +31,8 @@
 #include <subdev/mmu.h>
 #include <engine/falcon.h>
 
+struct nvkm_acr_lsfw;
+
 #define GPC_MAX 32
 #define TPC_MAX_PER_GPC 8
 #define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC)
@@ -55,11 +57,6 @@ struct gf100_gr_mmio {
 	int buffer;
 };
 
-struct gf100_gr_fuc {
-	u32 *data;
-	u32  size;
-};
-
 struct gf100_gr_zbc_color {
 	u32 format;
 	u32 ds[4];
@@ -83,29 +80,30 @@ struct gf100_gr {
 	struct nvkm_gr base;
 
 	struct {
-		struct nvkm_falcon *falcon;
+		struct nvkm_falcon falcon;
+		struct nvkm_blob inst;
+		struct nvkm_blob data;
+
 		struct mutex mutex;
 		u32 disable;
 	} fecs;
 
 	struct {
-		struct nvkm_falcon *falcon;
+		struct nvkm_falcon falcon;
+		struct nvkm_blob inst;
+		struct nvkm_blob data;
 	} gpccs;
 
-	struct gf100_gr_fuc fuc409c;
-	struct gf100_gr_fuc fuc409d;
-	struct gf100_gr_fuc fuc41ac;
-	struct gf100_gr_fuc fuc41ad;
 	bool firmware;
 
 	/*
 	 * Used if the register packs are loaded from NVIDIA fw instead of
 	 * using hardcoded arrays. To be allocated with vzalloc().
 	 */
-	struct gf100_gr_pack *fuc_sw_nonctx;
-	struct gf100_gr_pack *fuc_sw_ctx;
-	struct gf100_gr_pack *fuc_bundle;
-	struct gf100_gr_pack *fuc_method;
+	struct gf100_gr_pack *sw_nonctx;
+	struct gf100_gr_pack *sw_ctx;
+	struct gf100_gr_pack *bundle;
+	struct gf100_gr_pack *method;
 
 	struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
 	struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
@@ -140,12 +138,6 @@ struct gf100_gr {
 	u32 size_pm;
 };
 
-int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
-		  int, struct gf100_gr *);
-int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
-		  int, struct nvkm_gr **);
-void *gf100_gr_dtor(struct nvkm_gr *);
-
 int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst);
 
 struct gf100_gr_func_zbc {
@@ -157,7 +149,6 @@ struct gf100_gr_func_zbc {
 };
 
 struct gf100_gr_func {
-	void (*dtor)(struct gf100_gr *);
 	void (*oneinit_tiles)(struct gf100_gr *);
 	void (*oneinit_sm_id)(struct gf100_gr *);
 	int (*init)(struct gf100_gr *);
@@ -171,6 +162,7 @@ struct gf100_gr_func {
 	void (*init_rop_active_fbps)(struct gf100_gr *);
 	void (*init_bios_2)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
+	void (*init_fs)(struct gf100_gr *);
 	void (*init_fecs_exceptions)(struct gf100_gr *);
 	void (*init_ds_hww_esr_2)(struct gf100_gr *);
 	void (*init_40601c)(struct gf100_gr *);
@@ -217,6 +209,7 @@ void gf100_gr_init_419eb4(struct gf100_gr *);
 void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
 void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
 void gf100_gr_init_400054(struct gf100_gr *);
+void gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *, bool, bool);
 extern const struct gf100_gr_func_zbc gf100_gr_zbc;
 
 void gf117_gr_init_zcull(struct gf100_gr *);
@@ -245,10 +238,18 @@ void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
 void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
 void gp100_gr_zbc_clear_color(struct gf100_gr *, int);
 void gp100_gr_zbc_clear_depth(struct gf100_gr *, int);
+extern const struct gf100_gr_func_zbc gp100_gr_zbc;
 
 void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
 extern const struct gf100_gr_func_zbc gp102_gr_zbc;
 
+extern const struct gf100_gr_func gp107_gr;
+
+void gv100_gr_init_419bd8(struct gf100_gr *);
+void gv100_gr_init_504430(struct gf100_gr *, int, int);
+void gv100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
+void gv100_gr_trap_mp(struct gf100_gr *, int, int);
+
 #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
 #include <core/object.h>
 
@@ -269,9 +270,6 @@ struct gf100_gr_chan {
 
 void gf100_gr_ctxctl_debug(struct gf100_gr *);
 
-void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
-int  gf100_gr_ctor_fw(struct gf100_gr *, const char *,
-		      struct gf100_gr_fuc *);
 u64  gf100_gr_units(struct nvkm_gr *);
 void gf100_gr_zbc_init(struct gf100_gr *);
 
@@ -294,8 +292,8 @@ struct gf100_gr_pack {
 		  for (init = pack->init; init && init->count; init++)
 
 struct gf100_gr_ucode {
-	struct gf100_gr_fuc code;
-	struct gf100_gr_fuc data;
+	struct nvkm_blob code;
+	struct nvkm_blob data;
 };
 
 extern struct gf100_gr_ucode gf100_gr_fecs_ucode;
@@ -310,17 +308,6 @@ void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
 void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
 int  gf100_gr_init_ctxctl(struct gf100_gr *);
 
-/* external bundles loading functions */
-int gk20a_gr_av_to_init(struct gf100_gr *, const char *,
-			struct gf100_gr_pack **);
-int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *,
-			 struct gf100_gr_pack **);
-int gk20a_gr_av_to_method(struct gf100_gr *, const char *,
-			  struct gf100_gr_pack **);
-
-int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int,
-		  struct nvkm_gr **);
-
 /* register init value lists */
 
 extern const struct gf100_gr_init gf100_gr_init_main_0[];
@@ -403,4 +390,31 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
 void gm107_gr_init_bios(struct gf100_gr *);
 
 void gm200_gr_init_gpc_mmu(struct gf100_gr *);
+
+struct gf100_gr_fwif {
+	int version;
+	int (*load)(struct gf100_gr *, int ver, const struct gf100_gr_fwif *);
+	const struct gf100_gr_func *func;
+	const struct nvkm_acr_lsf_func *fecs;
+	const struct nvkm_acr_lsf_func *gpccs;
+};
+
+int gf100_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+
+int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver);
+
+int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr;
+extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr;
+
+extern const struct nvkm_acr_lsf_func gm20b_gr_fecs_acr;
+void gm20b_gr_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
+void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
+
+extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr;
+extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr;
+
+int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int,
+		  struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 42c2fd9..0536fe8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -144,8 +144,15 @@ gf104_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gf104_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gf104_gr },
+	{ -1, gf100_gr_nofw, &gf104_gr },
+	{}
+};
+
 int
 gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gf104_gr, device, index, pgr);
+	return gf100_gr_new_(gf104_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 4731a46..14284b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -143,8 +143,15 @@ gf108_gr = {
 	}
 };
 
+const struct gf100_gr_fwif
+gf108_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gf108_gr },
+	{ -1, gf100_gr_nofw, &gf108_gr },
+	{}
+};
+
 int
 gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gf108_gr, device, index, pgr);
+	return gf100_gr_new_(gf108_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index cdf759c..2807525 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -119,8 +119,15 @@ gf110_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gf110_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gf110_gr },
+	{ -1, gf100_gr_nofw, &gf110_gr },
+	{}
+};
+
 int
 gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gf110_gr, device, index, pgr);
+	return gf100_gr_new_(gf110_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index a4158f8..235c3fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -184,8 +184,15 @@ gf117_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gf117_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gf117_gr },
+	{ -1, gf100_gr_nofw, &gf117_gr },
+	{}
+};
+
 int
 gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gf117_gr, device, index, pgr);
+	return gf100_gr_new_(gf117_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 4197844..7eac385 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -210,8 +210,15 @@ gf119_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gf119_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gf119_gr },
+	{ -1, gf100_gr_nofw, &gf119_gr },
+	{}
+};
+
 int
 gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gf119_gr, device, index, pgr);
+	return gf100_gr_new_(gf119_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 477fee3..89f51d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -489,8 +489,15 @@ gk104_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gk104_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gk104_gr },
+	{ -1, gf100_gr_nofw, &gk104_gr },
+	{}
+};
+
 int
 gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gk104_gr, device, index, pgr);
+	return gf100_gr_new_(gk104_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 7cd628c..735f05e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -385,8 +385,15 @@ gk110_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gk110_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gk110_gr },
+	{ -1, gf100_gr_nofw, &gk110_gr },
+	{}
+};
+
 int
 gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gk110_gr, device, index, pgr);
+	return gf100_gr_new_(gk110_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index a38faa2..adc971b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -136,8 +136,15 @@ gk110b_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gk110b_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gk110b_gr },
+	{ -1, gf100_gr_nofw, &gk110b_gr },
+	{}
+};
+
 int
 gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gk110b_gr, device, index, pgr);
+	return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 5845666..aa0eff6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -194,8 +194,15 @@ gk208_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gk208_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gk208_gr },
+	{ -1, gf100_gr_nofw, &gk208_gr },
+	{}
+};
+
 int
 gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gk208_gr, device, index, pgr);
+	return gf100_gr_new_(gk208_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 500cb08..4209b24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -22,6 +22,7 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <core/firmware.h>
 #include <subdev/timer.h>
 
 #include <nvif/class.h>
@@ -33,21 +34,22 @@ struct gk20a_fw_av
 };
 
 int
-gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
-		    struct gf100_gr_pack **ppack)
+gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name,
+		    int ver, struct gf100_gr_pack **ppack)
 {
-	struct gf100_gr_fuc fuc;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_blob blob;
 	struct gf100_gr_init *init;
 	struct gf100_gr_pack *pack;
 	int nent;
 	int ret;
 	int i;
 
-	ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
 	if (ret)
 		return ret;
 
-	nent = (fuc.size / sizeof(struct gk20a_fw_av));
+	nent = (blob.size / sizeof(struct gk20a_fw_av));
 	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
 	if (!pack) {
 		ret = -ENOMEM;
@@ -59,7 +61,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
 
 	for (i = 0; i < nent; i++) {
 		struct gf100_gr_init *ent = &init[i];
-		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
 
 		ent->addr = av->addr;
 		ent->data = av->data;
@@ -70,7 +72,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
 	*ppack = pack;
 
 end:
-	gf100_gr_dtor_fw(&fuc);
+	nvkm_blob_dtor(&blob);
 	return ret;
 }
 
@@ -82,21 +84,22 @@ struct gk20a_fw_aiv
 };
 
 int
-gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
-		     struct gf100_gr_pack **ppack)
+gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name,
+		     int ver, struct gf100_gr_pack **ppack)
 {
-	struct gf100_gr_fuc fuc;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_blob blob;
 	struct gf100_gr_init *init;
 	struct gf100_gr_pack *pack;
 	int nent;
 	int ret;
 	int i;
 
-	ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
 	if (ret)
 		return ret;
 
-	nent = (fuc.size / sizeof(struct gk20a_fw_aiv));
+	nent = (blob.size / sizeof(struct gk20a_fw_aiv));
 	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
 	if (!pack) {
 		ret = -ENOMEM;
@@ -108,7 +111,7 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
 
 	for (i = 0; i < nent; i++) {
 		struct gf100_gr_init *ent = &init[i];
-		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i];
+		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i];
 
 		ent->addr = av->addr;
 		ent->data = av->data;
@@ -119,15 +122,16 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
 	*ppack = pack;
 
 end:
-	gf100_gr_dtor_fw(&fuc);
+	nvkm_blob_dtor(&blob);
 	return ret;
 }
 
 int
-gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
-		      struct gf100_gr_pack **ppack)
+gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name,
+		      int ver, struct gf100_gr_pack **ppack)
 {
-	struct gf100_gr_fuc fuc;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_blob blob;
 	struct gf100_gr_init *init;
 	struct gf100_gr_pack *pack;
 	/* We don't suppose we will initialize more than 16 classes here... */
@@ -137,29 +141,30 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
 	int ret;
 	int i;
 
-	ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
 	if (ret)
 		return ret;
 
-	nent = (fuc.size / sizeof(struct gk20a_fw_av));
+	nent = (blob.size / sizeof(struct gk20a_fw_av));
 
-	pack = vzalloc((sizeof(*pack) * max_classes) +
-		       (sizeof(*init) * (nent + 1)));
+	pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
+		       (sizeof(*init) * (nent + max_classes + 1)));
 	if (!pack) {
 		ret = -ENOMEM;
 		goto end;
 	}
 
-	init = (void *)(pack + max_classes);
+	init = (void *)(pack + max_classes + 1);
 
-	for (i = 0; i < nent; i++) {
-		struct gf100_gr_init *ent = &init[i];
-		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+	for (i = 0; i < nent; i++, init++) {
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
 		u32 class = av->addr & 0xffff;
 		u32 addr = (av->addr & 0xffff0000) >> 14;
 
 		if (prevclass != class) {
-			pack[classidx].init = ent;
+			if (prevclass) /* Add terminator to the method list. */
+				init++;
+			pack[classidx].init = init;
 			pack[classidx].type = class;
 			prevclass = class;
 			if (++classidx >= max_classes) {
@@ -169,16 +174,16 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
 			}
 		}
 
-		ent->addr = addr;
-		ent->data = av->data;
-		ent->count = 1;
-		ent->pitch = 1;
+		init->addr = addr;
+		init->data = av->data;
+		init->count = 1;
+		init->pitch = 1;
 	}
 
 	*ppack = pack;
 
 end:
-	gf100_gr_dtor_fw(&fuc);
+	nvkm_blob_dtor(&blob);
 	return ret;
 }
 
@@ -224,7 +229,7 @@ gk20a_gr_init(struct gf100_gr *gr)
 	/* Clear SCC RAM */
 	nvkm_wr32(device, 0x40802c, 0x1);
 
-	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+	gf100_gr_mmio(gr, gr->sw_nonctx);
 
 	ret = gk20a_gr_wait_mem_scrubbing(gr);
 	if (ret)
@@ -303,40 +308,45 @@ gk20a_gr = {
 };
 
 int
-gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
 {
-	struct gf100_gr *gr;
-	int ret;
-
-	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
-		return -ENOMEM;
-	*pgr = &gr->base;
-
-	ret = gf100_gr_ctor(&gk20a_gr, device, index, gr);
-	if (ret)
-		return ret;
-
-	if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
-	    gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
-	    gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
-	    gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
-		return -ENODEV;
-
-	ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx);
-	if (ret)
-		return ret;
-
-	ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx);
-	if (ret)
-		return ret;
-
-	ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle);
-	if (ret)
-		return ret;
-
-	ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method);
-	if (ret)
-		return ret;
+	if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) ||
+	    gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) ||
+	    gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) ||
+	    gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method))
+		return -ENOENT;
 
 	return 0;
 }
+
+static int
+gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+
+	if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver,
+				    &gr->fecs.inst) ||
+	    nvkm_firmware_load_blob(subdev, "", "fecs_data", ver,
+				    &gr->fecs.data) ||
+	    nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver,
+				    &gr->gpccs.inst) ||
+	    nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver,
+				    &gr->gpccs.data))
+		return -ENOENT;
+
+	gr->firmware = true;
+
+	return gk20a_gr_load_sw(gr, "", ver);
+}
+
+static const struct gf100_gr_fwif
+gk20a_gr_fwif[] = {
+	{ -1, gk20a_gr_load, &gk20a_gr },
+	{}
+};
+
+int
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 92e31d3..09bb78b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -429,8 +429,15 @@ gm107_gr = {
 	}
 };
 
+static const struct gf100_gr_fwif
+gm107_gr_fwif[] = {
+	{ -1, gf100_gr_load, &gm107_gr },
+	{ -1, gf100_gr_nofw, &gm107_gr },
+	{}
+};
+
 int
 gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gf100_gr_new_(&gm107_gr, device, index, pgr);
+	return gf100_gr_new_(gm107_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index eff3066..3d67cfb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -24,14 +24,64 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <core/firmware.h>
+#include <subdev/acr.h>
 #include <subdev/secboot.h>
 
+#include <nvfw/flcn.h>
+
 #include <nvif/class.h>
 
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+static void
+gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+	struct flcn_bl_dmem_desc_v1 hdr;
+	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+	hdr.code_dma_base = hdr.code_dma_base + adjust;
+	hdr.data_dma_base = hdr.data_dma_base + adjust;
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+	flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr);
+}
+
+static void
+gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+		       struct nvkm_acr_lsfw *lsfw)
+{
+	const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+	const u64 code = base + lsfw->app_resident_code_offset;
+	const u64 data = base + lsfw->app_resident_data_offset;
+	const struct flcn_bl_dmem_desc_v1 hdr = {
+		.ctx_dma = FALCON_DMAIDX_UCODE,
+		.code_dma_base = code,
+		.non_sec_code_off = lsfw->app_resident_code_offset,
+		.non_sec_code_size = lsfw->app_resident_code_size,
+		.code_entry_point = lsfw->app_imem_entry,
+		.data_dma_base = data,
+		.data_size = lsfw->app_resident_data_size,
+	};
+
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gm200_gr_gpccs_acr = {
+	.flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+	.bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
+	.bld_write = gm200_gr_acr_bld_write,
+	.bld_patch = gm200_gr_acr_bld_patch,
+};
+
+const struct nvkm_acr_lsf_func
+gm200_gr_fecs_acr = {
+	.bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
+	.bld_write = gm200_gr_acr_bld_write,
+	.bld_patch = gm200_gr_acr_bld_patch,
+};
+
 int
 gm200_gr_rops(struct gf100_gr *gr)
 {
@@ -124,44 +174,6 @@ gm200_gr_oneinit_tiles(struct gf100_gr *gr)
 	}
 }
 
-int
-gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_gr **pgr)
-{
-	struct gf100_gr *gr;
-	int ret;
-
-	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
-		return -ENOMEM;
-	*pgr = &gr->base;
-
-	ret = gf100_gr_ctor(func, device, index, gr);
-	if (ret)
-		return ret;
-
-	/* Load firmwares for non-secure falcons */
-	if (!nvkm_secboot_is_managed(device->secboot,
-				     NVKM_SECBOOT_FALCON_FECS)) {
-		if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
-		    (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
-			return ret;
-	}
-	if (!nvkm_secboot_is_managed(device->secboot,
-				     NVKM_SECBOOT_FALCON_GPCCS)) {
-		if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
-		    (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
-			return ret;
-	}
-
-	if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
-	    (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
-	    (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
-	    (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
-		return ret;
-
-	return 0;
-}
-
 static const struct gf100_gr_func
 gm200_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
@@ -198,7 +210,77 @@ gm200_gr = {
 };
 
 int
+gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+	int ret;
+
+	ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
+						  &gr->fecs.falcon,
+						  NVKM_ACR_LSF_FECS,
+						  "gr/fecs_", ver, fwif->fecs);
+	if (ret)
+		return ret;
+
+	ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
+						  &gr->gpccs.falcon,
+						  NVKM_ACR_LSF_GPCCS,
+						  "gr/gpccs_", ver,
+						  fwif->gpccs);
+	if (ret)
+		return ret;
+
+	gr->firmware = true;
+
+	return gk20a_gr_load_sw(gr, "gr/", ver);
+}
+
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gm200_gr_fwif[] = {
+	{ 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+	{}
+};
+
+int
 gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gm200_gr_new_(&gm200_gr, device, index, pgr);
+	return gf100_gr_new_(gm200_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index a667770..09d8c5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -22,10 +22,61 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <core/firmware.h>
+#include <subdev/acr.h>
 #include <subdev/timer.h>
 
+#include <nvfw/flcn.h>
+
 #include <nvif/class.h>
 
+void
+gm20b_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+	struct flcn_bl_dmem_desc hdr;
+	u64 addr;
+
+	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+	addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
+	hdr.code_dma_base  = lower_32_bits((addr + adjust) >> 8);
+	hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+	addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
+	hdr.data_dma_base  = lower_32_bits((addr + adjust) >> 8);
+	hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+
+	flcn_bl_dmem_desc_dump(&acr->subdev, &hdr);
+}
+
+void
+gm20b_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+		       struct nvkm_acr_lsfw *lsfw)
+{
+	const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+	const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
+	const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
+	const struct flcn_bl_dmem_desc hdr = {
+		.ctx_dma = FALCON_DMAIDX_UCODE,
+		.code_dma_base = lower_32_bits(code),
+		.non_sec_code_off = lsfw->app_resident_code_offset,
+		.non_sec_code_size = lsfw->app_resident_code_size,
+		.code_entry_point = lsfw->app_imem_entry,
+		.data_dma_base = lower_32_bits(data),
+		.data_size = lsfw->app_resident_data_size,
+		.code_dma_base1 = upper_32_bits(code),
+		.data_dma_base1 = upper_32_bits(data),
+	};
+
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gm20b_gr_fecs_acr = {
+	.bld_size = sizeof(struct flcn_bl_dmem_desc),
+	.bld_write = gm20b_gr_acr_bld_write,
+	.bld_patch = gm20b_gr_acr_bld_patch,
+};
+
 static void
 gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
 {
@@ -33,7 +84,7 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
 	u32 val;
 
 	/* Bypass MMU check for non-secure boot */
-	if (!device->secboot) {
+	if (!device->acr) {
 		nvkm_wr32(device, 0x100ce4, 0xffffffff);
 
 		if (nvkm_rd32(device, 0x100ce4) != 0xffffffff)
@@ -85,8 +136,51 @@ gm20b_gr = {
 	}
 };
 
+static int
+gm20b_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	int ret;
+
+	ret = nvkm_acr_lsfw_load_bl_inst_data_sig(subdev, &gr->fecs.falcon,
+						  NVKM_ACR_LSF_FECS,
+						  "gr/fecs_", ver, fwif->fecs);
+	if (ret)
+		return ret;
+
+
+	if (nvkm_firmware_load_blob(subdev, "gr/", "gpccs_inst", ver,
+				    &gr->gpccs.inst) ||
+	    nvkm_firmware_load_blob(subdev, "gr/", "gpccs_data", ver,
+				    &gr->gpccs.data))
+		return -ENOENT;
+
+	gr->firmware = true;
+
+	return gk20a_gr_load_sw(gr, "gr/", ver);
+}
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
+#endif
+
+static const struct gf100_gr_fwif
+gm20b_gr_fwif[] = {
+	{ 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr },
+	{}
+};
+
 int
 gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gm200_gr_new_(&gm20b_gr, device, index, pgr);
+	return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 9d0521c..33c8634 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -62,7 +62,7 @@ gp100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
 			  gr->zbc_depth[zbc].format << ((znum % 4) * 7));
 }
 
-static const struct gf100_gr_func_zbc
+const struct gf100_gr_func_zbc
 gp100_gr_zbc = {
 	.clear_color = gp100_gr_zbc_clear_color,
 	.clear_depth = gp100_gr_zbc_clear_depth,
@@ -135,8 +135,27 @@ gp100_gr = {
 	}
 };
 
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp100_gr_fwif[] = {
+	{ 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+	{}
+};
+
 int
 gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gm200_gr_new_(&gp100_gr, device, index, pgr);
+	return gf100_gr_new_(gp100_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 37f7d73..7baf67f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -131,8 +131,27 @@ gp102_gr = {
 	}
 };
 
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp102_gr_fwif[] = {
+	{ 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+	{}
+};
+
 int
 gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gm200_gr_new_(&gp102_gr, device, index, pgr);
+	return gf100_gr_new_(gp102_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index 4573c91..d9b8ef8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -59,8 +59,40 @@ gp104_gr = {
 	}
 };
 
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp104_gr_fwif[] = {
+	{ 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+	{}
+};
+
 int
 gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gm200_gr_new_(&gp104_gr, device, index, pgr);
+	return gf100_gr_new_(gp104_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 812aba9..2b1ad55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -26,7 +26,7 @@
 
 #include <nvif/class.h>
 
-static const struct gf100_gr_func
+const struct gf100_gr_func
 gp107_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
 	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
@@ -61,8 +61,27 @@ gp107_gr = {
 	}
 };
 
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp107_gr_fwif[] = {
+	{ 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+	{}
+};
+
 int
 gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gm200_gr_new_(&gp107_gr, device, index, pgr);
+	return gf100_gr_new_(gp107_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
new file mode 100644
index 0000000..113e4c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+
+static void
+gp108_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+	struct flcn_bl_dmem_desc_v2 hdr;
+	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+	hdr.code_dma_base = hdr.code_dma_base + adjust;
+	hdr.data_dma_base = hdr.data_dma_base + adjust;
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+	flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp108_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+		       struct nvkm_acr_lsfw *lsfw)
+{
+	const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+	const u64 code = base + lsfw->app_resident_code_offset;
+	const u64 data = base + lsfw->app_resident_data_offset;
+	const struct flcn_bl_dmem_desc_v2 hdr = {
+		.ctx_dma = FALCON_DMAIDX_UCODE,
+		.code_dma_base = code,
+		.non_sec_code_off = lsfw->app_resident_code_offset,
+		.non_sec_code_size = lsfw->app_resident_code_size,
+		.code_entry_point = lsfw->app_imem_entry,
+		.data_dma_base = data,
+		.data_size = lsfw->app_resident_data_size,
+	};
+
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gp108_gr_gpccs_acr = {
+	.flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+	.bld_write = gp108_gr_acr_bld_write,
+	.bld_patch = gp108_gr_acr_bld_patch,
+};
+
+const struct nvkm_acr_lsf_func
+gp108_gr_fecs_acr = {
+	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+	.bld_write = gp108_gr_acr_bld_write,
+	.bld_patch = gp108_gr_acr_bld_patch,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp108_gr_fwif[] = {
+	{ 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+	{}
+};
+
+int
+gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(gp108_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 303dced..eaf913e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -23,8 +23,20 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <subdev/acr.h>
+
 #include <nvif/class.h>
 
+#include <nvfw/flcn.h>
+
+static const struct nvkm_acr_lsf_func
+gp10b_gr_gpccs_acr = {
+	.flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+	.bld_size = sizeof(struct flcn_bl_dmem_desc),
+	.bld_write = gm20b_gr_acr_bld_write,
+	.bld_patch = gm20b_gr_acr_bld_patch,
+};
+
 static const struct gf100_gr_func
 gp10b_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
@@ -48,8 +60,8 @@ gp10b_gr = {
 	.gpc_nr = 1,
 	.tpc_nr = 2,
 	.ppc_nr = 1,
-	.grctx = &gp102_grctx,
-	.zbc = &gp102_gr_zbc,
+	.grctx = &gp100_grctx,
+	.zbc = &gp100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
@@ -59,8 +71,29 @@ gp10b_gr = {
 	}
 };
 
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
+#endif
+
+static const struct gf100_gr_fwif
+gp10b_gr_fwif[] = {
+	{ 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr },
+	{}
+};
+
 int
 gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gm200_gr_new_(&gp10b_gr, device, index, pgr);
+	return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 3b33277..70639d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -45,7 +45,7 @@ gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
 	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
 }
 
-static void
+void
 gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
 {
 	gv100_gr_trap_sm(gr, gpc, tpc, 0);
@@ -59,7 +59,7 @@ gv100_gr_init_4188a4(struct gf100_gr *gr)
 	nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000);
 }
 
-static void
+void
 gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -71,14 +71,14 @@ gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
 	}
 }
 
-static void
+void
 gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000);
 }
 
-static void
+void
 gv100_gr_init_419bd8(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -120,8 +120,27 @@ gv100_gr = {
 	}
 };
 
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gv100_gr_fwif[] = {
+	{ 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+	{}
+};
+
 int
 gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gm200_gr_new_(&gv100_gr, device, index, pgr);
+	return gf100_gr_new_(gv100_gr_fwif, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
new file mode 100644
index 0000000..454668b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static void
+tu102_gr_init_fecs_exceptions(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x006f0002);
+}
+
+static void
+tu102_gr_init_fs(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int sm;
+
+	gp100_grctx_generate_smid_config(gr);
+	gk104_grctx_generate_gpc_tpc_nr(gr);
+
+	for (sm = 0; sm < gr->sm_nr; sm++) {
+		nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 +
+					   gr->sm[sm].tpc * 4), sm);
+	}
+
+	gm200_grctx_generate_dist_skip_table(gr);
+	gf100_gr_init_num_tpc_per_gpc(gr, true, true);
+}
+
+static void
+tu102_gr_init_zcull(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	const u8 tile_nr = ALIGN(gr->tpc_total, 64);
+	u8 bank[GPC_MAX] = {}, gpc, i, j;
+	u32 data;
+
+	for (i = 0; i < tile_nr; i += 8) {
+		for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) {
+			data |= bank[gr->tile[i + j]] << (j * 4);
+			bank[gr->tile[i + j]]++;
+		}
+		nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data);
+	}
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+							 gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+}
+
+static void
+tu102_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff);
+	nvkm_wr32(device, 0x418890, 0x00000000);
+	nvkm_wr32(device, 0x418894, 0x00000000);
+
+	nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+	nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+	nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+}
+
+static const struct gf100_gr_func
+tu102_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
+	.init = gf100_gr_init,
+	.init_419bd8 = gv100_gr_init_419bd8,
+	.init_gpc_mmu = tu102_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = tu102_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+	.init_fs = tu102_gr_init_fs,
+	.init_fecs_exceptions = tu102_gr_init_fecs_exceptions,
+	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_504430 = gv100_gr_init_504430,
+	.init_shader_exceptions = gv100_gr_init_shader_exceptions,
+	.trap_mp = gv100_gr_trap_mp,
+	.rops = gm200_gr_rops,
+	.gpc_nr = 6,
+	.tpc_nr = 5,
+	.ppc_nr = 3,
+	.grctx = &tu102_grctx,
+	.zbc = &gp102_gr_zbc,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, TURING_A, &gf100_fermi },
+		{ -1, -1, TURING_COMPUTE_A },
+		{}
+	}
+};
+
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+tu102_gr_fwif[] = {
+	{ 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+	{}
+};
+
+int
+tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(tu102_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index cdf6318..9a0fd98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -1,3 +1,3 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/engine/nvdec/base.o
-nvkm-y += nvkm/engine/nvdec/gp102.o
+nvkm-y += nvkm/engine/nvdec/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 4a63581..9b23c1b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -20,48 +20,42 @@
  * DEALINGS IN THE SOFTWARE.
  */
 #include "priv.h"
-
-#include <subdev/top.h>
-#include <engine/falcon.h>
-
-static int
-nvkm_nvdec_oneinit(struct nvkm_engine *engine)
-{
-	struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
-	struct nvkm_subdev *subdev = &nvdec->engine.subdev;
-
-	nvdec->addr = nvkm_top_addr(subdev->device, subdev->index);
-	if (!nvdec->addr)
-		return -EINVAL;
-
-	/*XXX: fix naming of this when adding support for multiple-NVDEC */
-	return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr,
-				  &nvdec->falcon);
-}
+#include <core/firmware.h>
 
 static void *
 nvkm_nvdec_dtor(struct nvkm_engine *engine)
 {
 	struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
-	nvkm_falcon_del(&nvdec->falcon);
+	nvkm_falcon_dtor(&nvdec->falcon);
 	return nvdec;
 }
 
 static const struct nvkm_engine_func
 nvkm_nvdec = {
 	.dtor = nvkm_nvdec_dtor,
-	.oneinit = nvkm_nvdec_oneinit,
 };
 
 int
-nvkm_nvdec_new_(struct nvkm_device *device, int index,
-		struct nvkm_nvdec **pnvdec)
+nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
+		int index, struct nvkm_nvdec **pnvdec)
 {
 	struct nvkm_nvdec *nvdec;
+	int ret;
 
 	if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
 		return -ENOMEM;
 
-	return nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
-				&nvdec->engine);
+	ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
+			       &nvdec->engine);
+	if (ret)
+		return ret;
+
+	fwif = nvkm_firmware_load(&nvdec->engine.subdev, fwif, "Nvdec", nvdec);
+	if (IS_ERR(fwif))
+		return -ENODEV;
+
+	nvdec->func = fwif->func;
+
+	return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
+				nvkm_subdev_name[index], 0, &nvdec->falcon);
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
new file mode 100644
index 0000000..0ab27ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gm107_nvdec_flcn = {
+	.debug = 0xd00,
+	.fbif = 0x600,
+	.load_imem = nvkm_falcon_v1_load_imem,
+	.load_dmem = nvkm_falcon_v1_load_dmem,
+	.read_dmem = nvkm_falcon_v1_read_dmem,
+	.bind_context = nvkm_falcon_v1_bind_context,
+	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+	.set_start_addr = nvkm_falcon_v1_set_start_addr,
+	.start = nvkm_falcon_v1_start,
+	.enable = nvkm_falcon_v1_enable,
+	.disable = nvkm_falcon_v1_disable,
+};
+
+static const struct nvkm_nvdec_func
+gm107_nvdec = {
+	.flcn = &gm107_nvdec_flcn,
+};
+
+static int
+gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver,
+		 const struct nvkm_nvdec_fwif *fwif)
+{
+	return 0;
+}
+
+static const struct nvkm_nvdec_fwif
+gm107_nvdec_fwif[] = {
+	{ -1, gm107_nvdec_nofw, &gm107_nvdec },
+	{}
+};
+
+int
+gm107_nvdec_new(struct nvkm_device *device, int index,
+		struct nvkm_nvdec **pnvdec)
+{
+	return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
deleted file mode 100644
index fde6328..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "priv.h"
-
-int
-gp102_nvdec_new(struct nvkm_device *device, int index,
-		struct nvkm_nvdec **pnvdec)
-{
-	return nvkm_nvdec_new_(device, index, pnvdec);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index 57bfa3a..e14da8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -3,5 +3,17 @@
 #define __NVKM_NVDEC_PRIV_H__
 #include <engine/nvdec.h>
 
-int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **);
+struct nvkm_nvdec_func {
+	const struct nvkm_falcon_func *flcn;
+};
+
+struct nvkm_nvdec_fwif {
+	int version;
+	int (*load)(struct nvkm_nvdec *, int ver,
+		    const struct nvkm_nvdec_fwif *);
+	const struct nvkm_nvdec_func *func;
+};
+
+int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif,
+		    struct nvkm_device *, int, struct nvkm_nvdec **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index f316de8..75bf443 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: MIT
-#nvkm-y += nvkm/engine/nvenc/base.o
+nvkm-y += nvkm/engine/nvenc/base.o
+nvkm-y += nvkm/engine/nvenc/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
new file mode 100644
index 0000000..484100e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include "priv.h"
+#include <core/firmware.h>
+
+static void *
+nvkm_nvenc_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
+	nvkm_falcon_dtor(&nvenc->falcon);
+	return nvenc;
+}
+
+static const struct nvkm_engine_func
+nvkm_nvenc = {
+	.dtor = nvkm_nvenc_dtor,
+};
+
+int
+nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
+		int index, struct nvkm_nvenc **pnvenc)
+{
+	struct nvkm_nvenc *nvenc;
+	int ret;
+
+	if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL)))
+		return -ENOMEM;
+
+	ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true,
+			       &nvenc->engine);
+	if (ret)
+		return ret;
+
+	fwif = nvkm_firmware_load(&nvenc->engine.subdev, fwif, "Nvenc", nvenc);
+	if (IS_ERR(fwif))
+		return -ENODEV;
+
+	nvenc->func = fwif->func;
+
+	return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
+				nvkm_subdev_name[index], 0, &nvenc->falcon);
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
new file mode 100644
index 0000000..d249c8f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gm107_nvenc_flcn = {
+	.fbif = 0x800,
+	.load_imem = nvkm_falcon_v1_load_imem,
+	.load_dmem = nvkm_falcon_v1_load_dmem,
+	.read_dmem = nvkm_falcon_v1_read_dmem,
+	.bind_context = nvkm_falcon_v1_bind_context,
+	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+	.set_start_addr = nvkm_falcon_v1_set_start_addr,
+	.start = nvkm_falcon_v1_start,
+	.enable = nvkm_falcon_v1_enable,
+	.disable = nvkm_falcon_v1_disable,
+};
+
+static const struct nvkm_nvenc_func
+gm107_nvenc = {
+	.flcn = &gm107_nvenc_flcn,
+};
+
+static int
+gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver,
+		 const struct nvkm_nvenc_fwif *fwif)
+{
+	return 0;
+}
+
+static const struct nvkm_nvenc_fwif
+gm107_nvenc_fwif[] = {
+	{ -1, gm107_nvenc_nofw, &gm107_nvenc },
+	{}
+};
+
+int
+gm107_nvenc_new(struct nvkm_device *device, int index,
+		struct nvkm_nvenc **pnvenc)
+{
+	return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
new file mode 100644
index 0000000..100fa5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVENC_PRIV_H__
+#define __NVKM_NVENC_PRIV_H__
+#include <engine/nvenc.h>
+
+struct nvkm_nvenc_func {
+	const struct nvkm_falcon_func *flcn;
+};
+
+struct nvkm_nvenc_fwif {
+	int version;
+	int (*load)(struct nvkm_nvenc *, int ver,
+		    const struct nvkm_nvenc_fwif *);
+	const struct nvkm_nvenc_func *func;
+};
+
+int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *,
+		    int, struct nvkm_nvenc **pnvenc);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
index 97c4696..63cd2be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/engine/sec2/base.o
 nvkm-y += nvkm/engine/sec2/gp102.o
+nvkm-y += nvkm/engine/sec2/gp108.o
 nvkm-y += nvkm/engine/sec2/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index 1b49e5b..41318aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -21,97 +21,98 @@
  */
 #include "priv.h"
 
-#include <core/msgqueue.h>
+#include <core/firmware.h>
 #include <subdev/top.h>
-#include <engine/falcon.h>
-
-static void *
-nvkm_sec2_dtor(struct nvkm_engine *engine)
-{
-	struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
-	nvkm_msgqueue_del(&sec2->queue);
-	nvkm_falcon_del(&sec2->falcon);
-	return sec2;
-}
-
-static void
-nvkm_sec2_intr(struct nvkm_engine *engine)
-{
-	struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
-	struct nvkm_subdev *subdev = &engine->subdev;
-	struct nvkm_device *device = subdev->device;
-	u32 disp = nvkm_rd32(device, sec2->addr + 0x01c);
-	u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16);
-
-	if (intr & 0x00000040) {
-		schedule_work(&sec2->work);
-		nvkm_wr32(device, sec2->addr + 0x004, 0x00000040);
-		intr &= ~0x00000040;
-	}
-
-	if (intr) {
-		nvkm_error(subdev, "unhandled intr %08x\n", intr);
-		nvkm_wr32(device, sec2->addr + 0x004, intr);
-
-	}
-}
 
 static void
 nvkm_sec2_recv(struct work_struct *work)
 {
 	struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
 
-	if (!sec2->queue) {
-		nvkm_warn(&sec2->engine.subdev,
-			  "recv function called while no firmware set!\n");
-		return;
+	if (!sec2->initmsg_received) {
+		int ret = sec2->func->initmsg(sec2);
+		if (ret) {
+			nvkm_error(&sec2->engine.subdev,
+				   "error parsing init message: %d\n", ret);
+			return;
+		}
+
+		sec2->initmsg_received = true;
 	}
 
-	nvkm_msgqueue_recv(sec2->queue);
+	nvkm_falcon_msgq_recv(sec2->msgq);
 }
 
-
-static int
-nvkm_sec2_oneinit(struct nvkm_engine *engine)
+static void
+nvkm_sec2_intr(struct nvkm_engine *engine)
 {
 	struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
-	struct nvkm_subdev *subdev = &sec2->engine.subdev;
-
-	if (!sec2->addr) {
-		sec2->addr = nvkm_top_addr(subdev->device, subdev->index);
-		if (WARN_ON(!sec2->addr))
-			return -EINVAL;
-	}
-
-	return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon);
+	sec2->func->intr(sec2);
 }
 
 static int
 nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
 {
 	struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+
 	flush_work(&sec2->work);
+
+	if (suspend) {
+		nvkm_falcon_cmdq_fini(sec2->cmdq);
+		sec2->initmsg_received = false;
+	}
+
 	return 0;
 }
 
+static void *
+nvkm_sec2_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+	nvkm_falcon_msgq_del(&sec2->msgq);
+	nvkm_falcon_cmdq_del(&sec2->cmdq);
+	nvkm_falcon_qmgr_del(&sec2->qmgr);
+	nvkm_falcon_dtor(&sec2->falcon);
+	return sec2;
+}
+
 static const struct nvkm_engine_func
 nvkm_sec2 = {
 	.dtor = nvkm_sec2_dtor,
-	.oneinit = nvkm_sec2_oneinit,
 	.fini = nvkm_sec2_fini,
 	.intr = nvkm_sec2_intr,
 };
 
 int
-nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr,
-	       struct nvkm_sec2 **psec2)
+nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
+	       int index, u32 addr, struct nvkm_sec2 **psec2)
 {
 	struct nvkm_sec2 *sec2;
+	int ret;
 
 	if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
 		return -ENOMEM;
-	sec2->addr = addr;
-	INIT_WORK(&sec2->work, nvkm_sec2_recv);
 
-	return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+	ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+	if (ret)
+		return ret;
+
+	fwif = nvkm_firmware_load(&sec2->engine.subdev, fwif, "Sec2", sec2);
+	if (IS_ERR(fwif))
+		return PTR_ERR(fwif);
+
+	sec2->func = fwif->func;
+
+	ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev,
+			       nvkm_subdev_name[index], addr, &sec2->falcon);
+	if (ret)
+		return ret;
+
+	if ((ret = nvkm_falcon_qmgr_new(&sec2->falcon, &sec2->qmgr)) ||
+	    (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) ||
+	    (ret = nvkm_falcon_msgq_new(sec2->qmgr, "msgq", &sec2->msgq)))
+		return ret;
+
+	INIT_WORK(&sec2->work, nvkm_sec2_recv);
+	return 0;
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index 858cf27..368f2a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -19,12 +19,316 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
 #include "priv.h"
 
-int
-gp102_sec2_new(struct nvkm_device *device, int index,
-	       struct nvkm_sec2 **psec2)
+#include <core/memory.h>
+#include <subdev/acr.h>
+#include <subdev/timer.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/sec2.h>
+
+static int
+gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr)
 {
-	return nvkm_sec2_new_(device, index, 0, psec2);
+	struct nv_sec2_acr_bootstrap_falcon_msg *msg =
+		container_of(hdr, typeof(*msg), msg.hdr);
+	struct nvkm_subdev *subdev = priv;
+	const char *name = nvkm_acr_lsf_id(msg->falcon_id);
+
+	if (msg->error_code) {
+		nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for "
+				   "falcon %d [%s]: %08x\n",
+			   msg->falcon_id, name, msg->error_code);
+		return -EINVAL;
+	}
+
+	nvkm_debug(subdev, "%s booted\n", name);
+	return 0;
+}
+
+static int
+gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
+			        enum nvkm_acr_lsf_id id)
+{
+	struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon);
+	struct nv_sec2_acr_bootstrap_falcon_cmd cmd = {
+		.cmd.hdr.unit_id = sec2->func->unit_acr,
+		.cmd.hdr.size = sizeof(cmd),
+		.cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON,
+		.flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
+		.falcon_id = id,
+	};
+
+	return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr,
+				     gp102_sec2_acr_bootstrap_falcon_callback,
+				     &sec2->engine.subdev,
+				     msecs_to_jiffies(1000));
+}
+
+static int
+gp102_sec2_acr_boot(struct nvkm_falcon *falcon)
+{
+	struct nv_sec2_args args = {};
+	nvkm_falcon_load_dmem(falcon, &args,
+			      falcon->func->emem_addr, sizeof(args), 0);
+	nvkm_falcon_start(falcon);
+	return 0;
+}
+
+static void
+gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+	struct loader_config_v1 hdr;
+	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+	hdr.code_dma_base = hdr.code_dma_base + adjust;
+	hdr.data_dma_base = hdr.data_dma_base + adjust;
+	hdr.overlay_dma_base = hdr.overlay_dma_base + adjust;
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+	loader_config_v1_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+			 struct nvkm_acr_lsfw *lsfw)
+{
+	const struct loader_config_v1 hdr = {
+		.dma_idx = FALCON_SEC2_DMAIDX_UCODE,
+		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+		.code_size_total = lsfw->app_size,
+		.code_size_to_load = lsfw->app_resident_code_size,
+		.code_entry_point = lsfw->app_imem_entry,
+		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
+				 lsfw->app_resident_data_offset,
+		.data_size = lsfw->app_resident_data_size,
+		.overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+		.argc = 1,
+		.argv = lsfw->falcon->func->emem_addr,
+	};
+
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+static const struct nvkm_acr_lsf_func
+gp102_sec2_acr_0 = {
+	.bld_size = sizeof(struct loader_config_v1),
+	.bld_write = gp102_sec2_acr_bld_write,
+	.bld_patch = gp102_sec2_acr_bld_patch,
+	.boot = gp102_sec2_acr_boot,
+	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
+};
+
+int
+gp102_sec2_initmsg(struct nvkm_sec2 *sec2)
+{
+	struct nv_sec2_init_msg msg;
+	int ret, i;
+
+	ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg));
+	if (ret)
+		return ret;
+
+	if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT ||
+	    msg.msg_type != NV_SEC2_INIT_MSG_INIT)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) {
+		if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) {
+			nvkm_falcon_msgq_init(sec2->msgq,
+					      msg.queue_info[i].index,
+					      msg.queue_info[i].offset,
+					      msg.queue_info[i].size);
+		} else {
+			nvkm_falcon_cmdq_init(sec2->cmdq,
+					      msg.queue_info[i].index,
+					      msg.queue_info[i].offset,
+					      msg.queue_info[i].size);
+		}
+	}
+
+	return 0;
+}
+
+void
+gp102_sec2_intr(struct nvkm_sec2 *sec2)
+{
+	struct nvkm_subdev *subdev = &sec2->engine.subdev;
+	struct nvkm_falcon *falcon = &sec2->falcon;
+	u32 disp = nvkm_falcon_rd32(falcon, 0x01c);
+	u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16);
+
+	if (intr & 0x00000040) {
+		schedule_work(&sec2->work);
+		nvkm_falcon_wr32(falcon, 0x004, 0x00000040);
+		intr &= ~0x00000040;
+	}
+
+	if (intr) {
+		nvkm_error(subdev, "unhandled intr %08x\n", intr);
+		nvkm_falcon_wr32(falcon, 0x004, intr);
+	}
+}
+
+int
+gp102_sec2_flcn_enable(struct nvkm_falcon *falcon)
+{
+	nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
+	udelay(10);
+	nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
+	return nvkm_falcon_v1_enable(falcon);
+}
+
+void
+gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon,
+			     struct nvkm_memory *ctx)
+{
+	struct nvkm_device *device = falcon->owner->device;
+
+	nvkm_falcon_v1_bind_context(falcon, ctx);
+	if (!ctx)
+		return;
+
+	/* Not sure if this is a WAR for a HW issue, or some additional
+	 * programming sequence that's needed to properly complete the
+	 * context switch we trigger above.
+	 *
+	 * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
+	 * particularly when resuming from suspend.
+	 *
+	 * Also removes the need for an odd workaround where we needed
+	 * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
+	 * the SEC2 RTOS would begin executing.
+	 */
+	nvkm_msec(device, 10,
+		u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
+		u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+		if ((irqstat & 0x00000008) &&
+		    (flcn0dc & 0x00007000) == 0x00005000)
+			break;
+	);
+
+	nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
+	nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
+
+	nvkm_msec(device, 10,
+		u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+		if ((flcn0dc & 0x00007000) == 0x00000000)
+			break;
+	);
+}
+
+static const struct nvkm_falcon_func
+gp102_sec2_flcn = {
+	.debug = 0x408,
+	.fbif = 0x600,
+	.load_imem = nvkm_falcon_v1_load_imem,
+	.load_dmem = nvkm_falcon_v1_load_dmem,
+	.read_dmem = nvkm_falcon_v1_read_dmem,
+	.emem_addr = 0x01000000,
+	.bind_context = gp102_sec2_flcn_bind_context,
+	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+	.set_start_addr = nvkm_falcon_v1_set_start_addr,
+	.start = nvkm_falcon_v1_start,
+	.enable = gp102_sec2_flcn_enable,
+	.disable = nvkm_falcon_v1_disable,
+	.cmdq = { 0xa00, 0xa04, 8 },
+	.msgq = { 0xa30, 0xa34, 8 },
+};
+
+const struct nvkm_sec2_func
+gp102_sec2 = {
+	.flcn = &gp102_sec2_flcn,
+	.unit_acr = NV_SEC2_UNIT_ACR,
+	.intr = gp102_sec2_intr,
+	.initmsg = gp102_sec2_initmsg,
+};
+
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
+
+static void
+gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+	struct flcn_bl_dmem_desc_v2 hdr;
+	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+	hdr.code_dma_base = hdr.code_dma_base + adjust;
+	hdr.data_dma_base = hdr.data_dma_base + adjust;
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+	flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld,
+			   struct nvkm_acr_lsfw *lsfw)
+{
+	const struct flcn_bl_dmem_desc_v2 hdr = {
+		.ctx_dma = FALCON_SEC2_DMAIDX_UCODE,
+		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+		.non_sec_code_off = lsfw->app_resident_code_offset,
+		.non_sec_code_size = lsfw->app_resident_code_size,
+		.code_entry_point = lsfw->app_imem_entry,
+		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
+				 lsfw->app_resident_data_offset,
+		.data_size = lsfw->app_resident_data_size,
+		.argc = 1,
+		.argv = lsfw->falcon->func->emem_addr,
+	};
+
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gp102_sec2_acr_1 = {
+	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+	.bld_write = gp102_sec2_acr_bld_write_1,
+	.bld_patch = gp102_sec2_acr_bld_patch_1,
+	.boot = gp102_sec2_acr_boot,
+	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
+};
+
+int
+gp102_sec2_load(struct nvkm_sec2 *sec2, int ver,
+		const struct nvkm_sec2_fwif *fwif)
+{
+	return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev,
+						    &sec2->falcon,
+						    NVKM_ACR_LSF_SEC2, "sec2/",
+						    ver, fwif->acr);
+}
+
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
+
+static const struct nvkm_sec2_fwif
+gp102_sec2_fwif[] = {
+	{ 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+	{ 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
+	{}
+};
+
+int
+gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+{
+	return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
new file mode 100644
index 0000000..232a9d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <subdev/acr.h>
+
+MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
+
+static const struct nvkm_sec2_fwif
+gp108_sec2_fwif[] = {
+	{ 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+	{}
+};
+
+int
+gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+{
+	return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index b331b00..bb88117 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -3,7 +3,27 @@
 #define __NVKM_SEC2_PRIV_H__
 #include <engine/sec2.h>
 
-#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
+struct nvkm_sec2_func {
+	const struct nvkm_falcon_func *flcn;
+	u8 unit_acr;
+	void (*intr)(struct nvkm_sec2 *);
+	int (*initmsg)(struct nvkm_sec2 *);
+};
 
-int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **);
+void gp102_sec2_intr(struct nvkm_sec2 *);
+int gp102_sec2_initmsg(struct nvkm_sec2 *);
+
+struct nvkm_sec2_fwif {
+	int version;
+	int (*load)(struct nvkm_sec2 *, int ver, const struct nvkm_sec2_fwif *);
+	const struct nvkm_sec2_func *func;
+	const struct nvkm_acr_lsf_func *acr;
+};
+
+int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
+extern const struct nvkm_sec2_func gp102_sec2;
+extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
+
+int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *,
+		   int, u32 addr, struct nvkm_sec2 **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index d655576..b6ebd95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -19,15 +19,54 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  */
-
 #include "priv.h"
+#include <subdev/acr.h>
+
+static const struct nvkm_falcon_func
+tu102_sec2_flcn = {
+	.debug = 0x408,
+	.fbif = 0x600,
+	.load_imem = nvkm_falcon_v1_load_imem,
+	.load_dmem = nvkm_falcon_v1_load_dmem,
+	.read_dmem = nvkm_falcon_v1_read_dmem,
+	.emem_addr = 0x01000000,
+	.bind_context = gp102_sec2_flcn_bind_context,
+	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+	.set_start_addr = nvkm_falcon_v1_set_start_addr,
+	.start = nvkm_falcon_v1_start,
+	.enable = nvkm_falcon_v1_enable,
+	.disable = nvkm_falcon_v1_disable,
+	.cmdq = { 0xc00, 0xc04, 8 },
+	.msgq = { 0xc80, 0xc84, 8 },
+};
+
+static const struct nvkm_sec2_func
+tu102_sec2 = {
+	.flcn = &tu102_sec2_flcn,
+	.unit_acr = 0x07,
+	.intr = gp102_sec2_intr,
+	.initmsg = gp102_sec2_initmsg,
+};
+
+static int
+tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
+		const struct nvkm_sec2_fwif *fwif)
+{
+	return 0;
+}
+
+static const struct nvkm_sec2_fwif
+tu102_sec2_fwif[] = {
+	{  0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
+	{ -1, tu102_sec2_nofw, &tu102_sec2 }
+};
 
 int
-tu102_sec2_new(struct nvkm_device *device, int index,
-	       struct nvkm_sec2 **psec2)
+tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
 {
 	/* TOP info wasn't updated on Turing to reflect the PRI
 	 * address change for some reason.  We override it here.
 	 */
-	return nvkm_sec2_new_(device, index, 0x840000, psec2);
+	return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
index b5665ad..d79d783 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/falcon/base.o
+nvkm-y += nvkm/falcon/cmdq.o
+nvkm-y += nvkm/falcon/msgq.o
+nvkm-y += nvkm/falcon/qmgr.o
 nvkm-y += nvkm/falcon/v1.o
-nvkm-y += nvkm/falcon/msgqueue.o
-nvkm-y += nvkm/falcon/msgqueue_0137c63d.o
-nvkm-y += nvkm/falcon/msgqueue_0148cdec.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 366c87d..c6a3448 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -22,6 +22,7 @@
 #include "priv.h"
 
 #include <subdev/mc.h>
+#include <subdev/top.h>
 
 void
 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
@@ -134,6 +135,37 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
 	return falcon->func->clear_interrupt(falcon, mask);
 }
 
+static int
+nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
+{
+	const struct nvkm_falcon_func *func = falcon->func;
+	const struct nvkm_subdev *subdev = falcon->owner;
+	u32 reg;
+
+	if (!falcon->addr) {
+		falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
+		if (WARN_ON(!falcon->addr))
+			return -ENODEV;
+	}
+
+	reg = nvkm_falcon_rd32(falcon, 0x12c);
+	falcon->version = reg & 0xf;
+	falcon->secret = (reg >> 4) & 0x3;
+	falcon->code.ports = (reg >> 8) & 0xf;
+	falcon->data.ports = (reg >> 12) & 0xf;
+
+	reg = nvkm_falcon_rd32(falcon, 0x108);
+	falcon->code.limit = (reg & 0x1ff) << 8;
+	falcon->data.limit = (reg & 0x3fe00) >> 1;
+
+	if (func->debug) {
+		u32 val = nvkm_falcon_rd32(falcon, func->debug);
+		falcon->debug = (val >> 20) & 0x1;
+	}
+
+	return 0;
+}
+
 void
 nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
 {
@@ -151,6 +183,8 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
 int
 nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
 {
+	int ret = 0;
+
 	mutex_lock(&falcon->mutex);
 	if (falcon->user) {
 		nvkm_error(user, "%s falcon already acquired by %s!\n",
@@ -160,70 +194,37 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
 	}
 
 	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
+	if (!falcon->oneinit)
+		ret = nvkm_falcon_oneinit(falcon);
 	falcon->user = user;
 	mutex_unlock(&falcon->mutex);
-	return 0;
+	return ret;
 }
 
 void
+nvkm_falcon_dtor(struct nvkm_falcon *falcon)
+{
+}
+
+int
 nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
 		 struct nvkm_subdev *subdev, const char *name, u32 addr,
 		 struct nvkm_falcon *falcon)
 {
-	u32 debug_reg;
-	u32 reg;
-
 	falcon->func = func;
 	falcon->owner = subdev;
 	falcon->name = name;
 	falcon->addr = addr;
 	mutex_init(&falcon->mutex);
 	mutex_init(&falcon->dmem_mutex);
-
-	reg = nvkm_falcon_rd32(falcon, 0x12c);
-	falcon->version = reg & 0xf;
-	falcon->secret = (reg >> 4) & 0x3;
-	falcon->code.ports = (reg >> 8) & 0xf;
-	falcon->data.ports = (reg >> 12) & 0xf;
-
-	reg = nvkm_falcon_rd32(falcon, 0x108);
-	falcon->code.limit = (reg & 0x1ff) << 8;
-	falcon->data.limit = (reg & 0x3fe00) >> 1;
-
-	switch (subdev->index) {
-	case NVKM_ENGINE_GR:
-		debug_reg = 0x0;
-		break;
-	case NVKM_SUBDEV_PMU:
-		debug_reg = 0xc08;
-		break;
-	case NVKM_ENGINE_NVDEC0:
-		debug_reg = 0xd00;
-		break;
-	case NVKM_ENGINE_SEC2:
-		debug_reg = 0x408;
-		falcon->has_emem = true;
-		break;
-	case NVKM_SUBDEV_GSP:
-		debug_reg = 0x0; /*XXX*/
-		break;
-	default:
-		nvkm_warn(subdev, "unsupported falcon %s!\n",
-			  nvkm_subdev_name[subdev->index]);
-		debug_reg = 0;
-		break;
-	}
-
-	if (debug_reg) {
-		u32 val = nvkm_falcon_rd32(falcon, debug_reg);
-		falcon->debug = (val >> 20) & 0x1;
-	}
+	return 0;
 }
 
 void
 nvkm_falcon_del(struct nvkm_falcon **pfalcon)
 {
 	if (*pfalcon) {
+		nvkm_falcon_dtor(*pfalcon);
 		kfree(*pfalcon);
 		*pfalcon = NULL;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
new file mode 100644
index 0000000..40e3f3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static bool
+nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
+{
+	u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
+	u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
+	u32 free;
+
+	size = ALIGN(size, QUEUE_ALIGNMENT);
+
+	if (head >= tail) {
+		free = cmdq->offset + cmdq->size - head;
+		free -= HDR_SIZE;
+
+		if (size > free) {
+			*rewind = true;
+			head = cmdq->offset;
+		}
+	}
+
+	if (head < tail)
+		free = tail - head - 1;
+
+	return size <= free;
+}
+
+static void
+nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
+{
+	struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+	nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
+	cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
+}
+
+static void
+nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
+{
+	struct nv_falcon_cmd cmd;
+
+	cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
+	cmd.size = sizeof(cmd);
+	nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
+
+	cmdq->position = cmdq->offset;
+}
+
+static int
+nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
+{
+	struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+	bool rewind = false;
+
+	mutex_lock(&cmdq->mutex);
+
+	if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
+		FLCNQ_DBG(cmdq, "queue full");
+		mutex_unlock(&cmdq->mutex);
+		return -EAGAIN;
+	}
+
+	cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
+
+	if (rewind)
+		nvkm_falcon_cmdq_rewind(cmdq);
+
+	return 0;
+}
+
+static void
+nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
+{
+	nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
+	mutex_unlock(&cmdq->mutex);
+}
+
+static int
+nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
+{
+	static unsigned timeout = 2000;
+	unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+	int ret = -EAGAIN;
+
+	while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
+		ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
+	if (ret) {
+		FLCNQ_ERR(cmdq, "timeout waiting for queue space");
+		return ret;
+	}
+
+	nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
+	nvkm_falcon_cmdq_close(cmdq);
+	return ret;
+}
+
+/* specifies that we want to know the command status in the answer message */
+#define CMD_FLAGS_STATUS BIT(0)
+/* specifies that we want an interrupt when the answer message is queued */
+#define CMD_FLAGS_INTR BIT(1)
+
+int
+nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd,
+		      nvkm_falcon_qmgr_callback cb, void *priv,
+		      unsigned long timeout)
+{
+	struct nvkm_falcon_qmgr_seq *seq;
+	int ret;
+
+	if (!wait_for_completion_timeout(&cmdq->ready,
+					 msecs_to_jiffies(1000))) {
+		FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
+		return -ETIMEDOUT;
+	}
+
+	seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
+	if (IS_ERR(seq))
+		return PTR_ERR(seq);
+
+	cmd->seq_id = seq->id;
+	cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
+
+	seq->state = SEQ_STATE_USED;
+	seq->async = !timeout;
+	seq->callback = cb;
+	seq->priv = priv;
+
+	ret = nvkm_falcon_cmdq_write(cmdq, cmd);
+	if (ret) {
+		seq->state = SEQ_STATE_PENDING;
+		nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+		return ret;
+	}
+
+	if (!seq->async) {
+		if (!wait_for_completion_timeout(&seq->done, timeout)) {
+			FLCNQ_ERR(cmdq, "timeout waiting for reply");
+			return -ETIMEDOUT;
+		}
+		ret = seq->result;
+		nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+	}
+
+	return ret;
+}
+
+void
+nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
+{
+	reinit_completion(&cmdq->ready);
+}
+
+void
+nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
+		      u32 index, u32 offset, u32 size)
+{
+	const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
+
+	cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
+	cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
+	cmdq->offset = offset;
+	cmdq->size = size;
+	complete_all(&cmdq->ready);
+
+	FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
+		  index, cmdq->offset, cmdq->size);
+}
+
+void
+nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
+{
+	struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+	if (cmdq) {
+		kfree(*pcmdq);
+		*pcmdq = NULL;
+	}
+}
+
+int
+nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+		     struct nvkm_falcon_cmdq **pcmdq)
+{
+	struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+
+	if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
+		return -ENOMEM;
+
+	cmdq->qmgr = qmgr;
+	cmdq->name = name;
+	mutex_init(&cmdq->mutex);
+	init_completion(&cmdq->ready);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
new file mode 100644
index 0000000..cbfe09a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static void
+nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
+{
+	mutex_lock(&msgq->mutex);
+	msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+}
+
+static void
+nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
+{
+	struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+
+	if (commit)
+		nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
+
+	mutex_unlock(&msgq->mutex);
+}
+
+static bool
+nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
+{
+	u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
+	u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+	return head == tail;
+}
+
+static int
+nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
+{
+	struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+	u32 head, tail, available;
+
+	head = nvkm_falcon_rd32(falcon, msgq->head_reg);
+	/* has the buffer looped? */
+	if (head < msgq->position)
+		msgq->position = msgq->offset;
+
+	tail = msgq->position;
+
+	available = head - tail;
+	if (size > available) {
+		FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
+			  size, available);
+		return -EINVAL;
+	}
+
+	nvkm_falcon_read_dmem(falcon, tail, size, 0, data);
+	msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
+	return 0;
+}
+
+static int
+nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+{
+	int ret = 0;
+
+	nvkm_falcon_msgq_open(msgq);
+
+	if (nvkm_falcon_msgq_empty(msgq))
+		goto close;
+
+	ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
+	if (ret) {
+		FLCNQ_ERR(msgq, "failed to read message header");
+		goto close;
+	}
+
+	if (hdr->size > MSG_BUF_SIZE) {
+		FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
+		ret = -ENOSPC;
+		goto close;
+	}
+
+	if (hdr->size > HDR_SIZE) {
+		u32 read_size = hdr->size - HDR_SIZE;
+
+		ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
+		if (ret) {
+			FLCNQ_ERR(msgq, "failed to read message data");
+			goto close;
+		}
+	}
+
+	ret = 1;
+close:
+	nvkm_falcon_msgq_close(msgq, (ret >= 0));
+	return ret;
+}
+
+static int
+nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+{
+	struct nvkm_falcon_qmgr_seq *seq;
+
+	seq = &msgq->qmgr->seq.id[hdr->seq_id];
+	if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
+		FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
+		return -EINVAL;
+	}
+
+	if (seq->state == SEQ_STATE_USED) {
+		if (seq->callback)
+			seq->result = seq->callback(seq->priv, hdr);
+	}
+
+	if (seq->async) {
+		nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
+		return 0;
+	}
+
+	complete_all(&seq->done);
+	return 0;
+}
+
+void
+nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
+{
+	/*
+	 * We are invoked from a worker thread, so normally we have plenty of
+	 * stack space to work with.
+	 */
+	u8 msg_buffer[MSG_BUF_SIZE];
+	struct nv_falcon_msg *hdr = (void *)msg_buffer;
+
+	while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
+		nvkm_falcon_msgq_exec(msgq, hdr);
+}
+
+int
+nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
+			      void *data, u32 size)
+{
+	struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+	struct nv_falcon_msg *hdr = data;
+	int ret;
+
+	msgq->head_reg = falcon->func->msgq.head;
+	msgq->tail_reg = falcon->func->msgq.tail;
+	msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
+
+	nvkm_falcon_msgq_open(msgq);
+	ret = nvkm_falcon_msgq_pop(msgq, data, size);
+	if (ret == 0 && hdr->size != size) {
+		FLCN_ERR(falcon, "unexpected init message size %d vs %d",
+			 hdr->size, size);
+		ret = -EINVAL;
+	}
+	nvkm_falcon_msgq_close(msgq, ret == 0);
+	return ret;
+}
+
+void
+nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
+		      u32 index, u32 offset, u32 size)
+{
+	const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
+
+	msgq->head_reg = func->msgq.head + index * func->msgq.stride;
+	msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
+	msgq->offset = offset;
+
+	FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
+		  index, msgq->offset, size);
+}
+
+void
+nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
+{
+	struct nvkm_falcon_msgq *msgq = *pmsgq;
+	if (msgq) {
+		kfree(*pmsgq);
+		*pmsgq = NULL;
+	}
+}
+
+int
+nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+		     struct nvkm_falcon_msgq **pmsgq)
+{
+	struct nvkm_falcon_msgq *msgq = *pmsgq;
+
+	if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
+		return -ENOMEM;
+
+	msgq->qmgr = qmgr;
+	msgq->name = name;
+	mutex_init(&msgq->mutex);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
deleted file mode 100644
index a8bee1e..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "msgqueue.h"
-#include <engine/falcon.h>
-
-#include <subdev/secboot.h>
-
-
-#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
-#define QUEUE_ALIGNMENT 4
-/* max size of the messages we can receive */
-#define MSG_BUF_SIZE 128
-
-static int
-msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
-	struct nvkm_falcon *falcon = priv->falcon;
-
-	mutex_lock(&queue->mutex);
-
-	queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
-	return 0;
-}
-
-static void
-msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
-		bool commit)
-{
-	struct nvkm_falcon *falcon = priv->falcon;
-
-	if (commit)
-		nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
-
-	mutex_unlock(&queue->mutex);
-}
-
-static bool
-msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
-	struct nvkm_falcon *falcon = priv->falcon;
-	u32 head, tail;
-
-	head = nvkm_falcon_rd32(falcon, queue->head_reg);
-	tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
-	return head == tail;
-}
-
-static int
-msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
-	      void *data, u32 size)
-{
-	struct nvkm_falcon *falcon = priv->falcon;
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	u32 head, tail, available;
-
-	head = nvkm_falcon_rd32(falcon, queue->head_reg);
-	/* has the buffer looped? */
-	if (head < queue->position)
-		queue->position = queue->offset;
-
-	tail = queue->position;
-
-	available = head - tail;
-
-	if (available == 0) {
-		nvkm_warn(subdev, "no message data available\n");
-		return 0;
-	}
-
-	if (size > available) {
-		nvkm_warn(subdev, "message data smaller than read request\n");
-		size = available;
-	}
-
-	nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
-	queue->position += ALIGN(size, QUEUE_ALIGNMENT);
-
-	return size;
-}
-
-static int
-msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
-	       struct nvkm_msgqueue_hdr *hdr)
-{
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	int err;
-
-	err = msg_queue_open(priv, queue);
-	if (err) {
-		nvkm_error(subdev, "fail to open queue %d\n", queue->index);
-		return err;
-	}
-
-	if (msg_queue_empty(priv, queue)) {
-		err = 0;
-		goto close;
-	}
-
-	err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
-	if (err >= 0 && err != HDR_SIZE)
-		err = -EINVAL;
-	if (err < 0) {
-		nvkm_error(subdev, "failed to read message header: %d\n", err);
-		goto close;
-	}
-
-	if (hdr->size > MSG_BUF_SIZE) {
-		nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
-		err = -ENOSPC;
-		goto close;
-	}
-
-	if (hdr->size > HDR_SIZE) {
-		u32 read_size = hdr->size - HDR_SIZE;
-
-		err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
-		if (err >= 0 && err != read_size)
-			err = -EINVAL;
-		if (err < 0) {
-			nvkm_error(subdev, "failed to read message: %d\n", err);
-			goto close;
-		}
-	}
-
-close:
-	msg_queue_close(priv, queue, (err >= 0));
-
-	return err;
-}
-
-static bool
-cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
-		   u32 size, bool *rewind)
-{
-	struct nvkm_falcon *falcon = priv->falcon;
-	u32 head, tail, free;
-
-	size = ALIGN(size, QUEUE_ALIGNMENT);
-
-	head = nvkm_falcon_rd32(falcon, queue->head_reg);
-	tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
-	if (head >= tail) {
-		free = queue->offset + queue->size - head;
-		free -= HDR_SIZE;
-
-		if (size > free) {
-			*rewind = true;
-			head = queue->offset;
-		}
-	}
-
-	if (head < tail)
-		free = tail - head - 1;
-
-	return size <= free;
-}
-
-static int
-cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
-	       void *data, u32 size)
-{
-	nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
-	queue->position += ALIGN(size, QUEUE_ALIGNMENT);
-
-	return 0;
-}
-
-/* REWIND unit is always 0x00 */
-#define MSGQUEUE_UNIT_REWIND 0x00
-
-static void
-cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	struct nvkm_msgqueue_hdr cmd;
-	int err;
-
-	cmd.unit_id = MSGQUEUE_UNIT_REWIND;
-	cmd.size = sizeof(cmd);
-	err = cmd_queue_push(priv, queue, &cmd, cmd.size);
-	if (err)
-		nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
-	else
-		nvkm_error(subdev, "queue %d rewinded\n", queue->index);
-
-	queue->position = queue->offset;
-}
-
-static int
-cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
-	       u32 size)
-{
-	struct nvkm_falcon *falcon = priv->falcon;
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	bool rewind = false;
-
-	mutex_lock(&queue->mutex);
-
-	if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
-		nvkm_error(subdev, "queue full\n");
-		mutex_unlock(&queue->mutex);
-		return -EAGAIN;
-	}
-
-	queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
-
-	if (rewind)
-		cmd_queue_rewind(priv, queue);
-
-	return 0;
-}
-
-static void
-cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
-		bool commit)
-{
-	struct nvkm_falcon *falcon = priv->falcon;
-
-	if (commit)
-		nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
-
-	mutex_unlock(&queue->mutex);
-}
-
-static int
-cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
-	  struct nvkm_msgqueue_queue *queue)
-{
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	static unsigned timeout = 2000;
-	unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
-	int ret = -EAGAIN;
-	bool commit = true;
-
-	while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
-		ret = cmd_queue_open(priv, queue, cmd->size);
-	if (ret) {
-		nvkm_error(subdev, "pmu_queue_open_write failed\n");
-		return ret;
-	}
-
-	ret = cmd_queue_push(priv, queue, cmd, cmd->size);
-	if (ret) {
-		nvkm_error(subdev, "pmu_queue_push failed\n");
-		commit = false;
-	}
-
-	cmd_queue_close(priv, queue, commit);
-
-	return ret;
-}
-
-static struct nvkm_msgqueue_seq *
-msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
-{
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	struct nvkm_msgqueue_seq *seq;
-	u32 index;
-
-	mutex_lock(&priv->seq_lock);
-
-	index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
-
-	if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
-		nvkm_error(subdev, "no free sequence available\n");
-		mutex_unlock(&priv->seq_lock);
-		return ERR_PTR(-EAGAIN);
-	}
-
-	set_bit(index, priv->seq_tbl);
-
-	mutex_unlock(&priv->seq_lock);
-
-	seq = &priv->seq[index];
-	seq->state = SEQ_STATE_PENDING;
-
-	return seq;
-}
-
-static void
-msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
-{
-	/* no need to acquire seq_lock since clear_bit is atomic */
-	seq->state = SEQ_STATE_FREE;
-	seq->callback = NULL;
-	seq->completion = NULL;
-	clear_bit(seq->id, priv->seq_tbl);
-}
-
-/* specifies that we want to know the command status in the answer message */
-#define CMD_FLAGS_STATUS BIT(0)
-/* specifies that we want an interrupt when the answer message is queued */
-#define CMD_FLAGS_INTR BIT(1)
-
-int
-nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
-		   struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
-		   struct completion *completion, bool wait_init)
-{
-	struct nvkm_msgqueue_seq *seq;
-	struct nvkm_msgqueue_queue *queue;
-	int ret;
-
-	if (wait_init && !wait_for_completion_timeout(&priv->init_done,
-					 msecs_to_jiffies(1000)))
-		return -ETIMEDOUT;
-
-	queue = priv->func->cmd_queue(priv, prio);
-	if (IS_ERR(queue))
-		return PTR_ERR(queue);
-
-	seq = msgqueue_seq_acquire(priv);
-	if (IS_ERR(seq))
-		return PTR_ERR(seq);
-
-	cmd->seq_id = seq->id;
-	cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
-
-	seq->callback = cb;
-	seq->state = SEQ_STATE_USED;
-	seq->completion = completion;
-
-	ret = cmd_write(priv, cmd, queue);
-	if (ret) {
-		seq->state = SEQ_STATE_PENDING;
-		msgqueue_seq_release(priv, seq);
-	}
-
-	return ret;
-}
-
-static int
-msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
-{
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	struct nvkm_msgqueue_seq *seq;
-
-	seq = &priv->seq[hdr->seq_id];
-	if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
-		nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
-		return -EINVAL;
-	}
-
-	if (seq->state == SEQ_STATE_USED) {
-		if (seq->callback)
-			seq->callback(priv, hdr);
-	}
-
-	if (seq->completion)
-		complete(seq->completion);
-
-	msgqueue_seq_release(priv, seq);
-
-	return 0;
-}
-
-static int
-msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
-			 struct nvkm_msgqueue_hdr *hdr)
-{
-	struct nvkm_falcon *falcon = priv->falcon;
-	const struct nvkm_subdev *subdev = falcon->owner;
-	u32 tail;
-	u32 tail_reg;
-	int ret;
-
-	/*
-	 * Of course the message queue registers vary depending on the falcon
-	 * used...
-	 */
-	switch (falcon->owner->index) {
-	case NVKM_SUBDEV_PMU:
-		tail_reg = 0x4cc;
-		break;
-	case NVKM_ENGINE_SEC2:
-		tail_reg = 0xa34;
-		break;
-	default:
-		nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
-			   nvkm_subdev_name[falcon->owner->index]);
-		return -EINVAL;
-	}
-
-	/*
-	 * Read the message - queues are not initialized yet so we cannot rely
-	 * on msg_queue_read()
-	 */
-	tail = nvkm_falcon_rd32(falcon, tail_reg);
-	nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
-
-	if (hdr->size > MSG_BUF_SIZE) {
-		nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
-		return -ENOSPC;
-	}
-
-	nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
-			      (hdr + 1));
-
-	tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
-	nvkm_falcon_wr32(falcon, tail_reg, tail);
-
-	ret = priv->func->init_func->init_callback(priv, hdr);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-void
-nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
-			   struct nvkm_msgqueue_queue *queue)
-{
-	/*
-	 * We are invoked from a worker thread, so normally we have plenty of
-	 * stack space to work with.
-	 */
-	u8 msg_buffer[MSG_BUF_SIZE];
-	struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
-	int ret;
-
-	/* the first message we receive must be the init message */
-	if ((!priv->init_msg_received)) {
-		ret = msgqueue_handle_init_msg(priv, hdr);
-		if (!ret)
-			priv->init_msg_received = true;
-	} else {
-		while (msg_queue_read(priv, queue, hdr) > 0)
-			msgqueue_msg_handle(priv, hdr);
-	}
-}
-
-void
-nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
-	if (!queue || !queue->func || !queue->func->init_func)
-		return;
-
-	queue->func->init_func->gen_cmdline(queue, buf);
-}
-
-int
-nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
-			       unsigned long falcon_mask)
-{
-	unsigned long falcon;
-
-	if (!queue || !queue->func->acr_func)
-		return -ENODEV;
-
-	/* Does the firmware support booting multiple falcons? */
-	if (queue->func->acr_func->boot_multiple_falcons)
-		return queue->func->acr_func->boot_multiple_falcons(queue,
-								   falcon_mask);
-
-	/* Else boot all requested falcons individually */
-	if (!queue->func->acr_func->boot_falcon)
-		return -ENODEV;
-
-	for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
-		int ret = queue->func->acr_func->boot_falcon(queue, falcon);
-
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-int
-nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
-		  const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
-{
-	const struct nvkm_subdev *subdev = falcon->owner;
-	int ret = -EINVAL;
-
-	switch (version) {
-	case 0x0137c63d:
-		ret = msgqueue_0137c63d_new(falcon, sb, queue);
-		break;
-	case 0x0137bca5:
-		ret = msgqueue_0137bca5_new(falcon, sb, queue);
-		break;
-	case 0x0148cdec:
-	case 0x015ccf3e:
-	case 0x0167d263:
-		ret = msgqueue_0148cdec_new(falcon, sb, queue);
-		break;
-	default:
-		nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
-			   version);
-		break;
-	}
-
-	if (ret == 0) {
-		nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
-		(*queue)->fw_version = version;
-	}
-
-	return ret;
-}
-
-void
-nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
-{
-	if (*queue) {
-		(*queue)->func->dtor(*queue);
-		*queue = NULL;
-	}
-}
-
-void
-nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
-{
-	if (!queue->func || !queue->func->recv) {
-		const struct nvkm_subdev *subdev = queue->falcon->owner;
-
-		nvkm_warn(subdev, "missing msgqueue recv function\n");
-		return;
-	}
-
-	queue->func->recv(queue);
-}
-
-int
-nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
-{
-	/* firmware not set yet... */
-	if (!queue)
-		return 0;
-
-	queue->init_msg_received = false;
-	reinit_completion(&queue->init_done);
-
-	return 0;
-}
-
-void
-nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
-		   struct nvkm_falcon *falcon,
-		   struct nvkm_msgqueue *queue)
-{
-	int i;
-
-	queue->func = func;
-	queue->falcon = falcon;
-	mutex_init(&queue->seq_lock);
-	for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
-		queue->seq[i].id = i;
-
-	init_completion(&queue->init_done);
-
-
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
deleted file mode 100644
index 13b54f8..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H
-#define __NVKM_CORE_FALCON_MSGQUEUE_H
-
-#include <core/msgqueue.h>
-
-/*
- * The struct nvkm_msgqueue (named so for lack of better candidate) manages
- * a firmware (typically, NVIDIA signed firmware) running under a given falcon.
- *
- * Such firmwares expect to receive commands (through one or several command
- * queues) and will reply to such command by sending messages (using one
- * message queue).
- *
- * Each firmware can support one or several units - ACR for managing secure
- * falcons, PMU for power management, etc. A unit can be seen as a class to
- * which command can be sent.
- *
- * One usage example would be to send a command to the SEC falcon to ask it to
- * reset a secure falcon. The SEC falcon will receive the command, process it,
- * and send a message to signal success or failure. Only when the corresponding
- * message is received can the requester assume the request has been processed.
- *
- * Since we expect many variations between the firmwares NVIDIA will release
- * across GPU generations, this library is built in a very modular way. Message
- * formats and queues details (such as number of usage) are left to
- * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c
- * take care of posting commands and processing messages in a fashion that is
- * universal.
- *
- */
-
-enum msgqueue_msg_priority {
-	MSGQUEUE_MSG_PRIORITY_HIGH,
-	MSGQUEUE_MSG_PRIORITY_LOW,
-};
-
-/**
- * struct nvkm_msgqueue_hdr - header for all commands/messages
- * @unit_id:	id of firmware using receiving the command/sending the message
- * @size:	total size of command/message
- * @ctrl_flags:	type of command/message
- * @seq_id:	used to match a message from its corresponding command
- */
-struct nvkm_msgqueue_hdr {
-	u8 unit_id;
-	u8 size;
-	u8 ctrl_flags;
-	u8 seq_id;
-};
-
-/**
- * struct nvkm_msgqueue_msg - base message.
- *
- * This is just a header and a message (or command) type. Useful when
- * building command-specific structures.
- */
-struct nvkm_msgqueue_msg {
-	struct nvkm_msgqueue_hdr hdr;
-	u8 msg_type;
-};
-
-struct nvkm_msgqueue;
-typedef void
-(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
-
-/**
- * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization
- *
- * @gen_cmdline:	build the commandline into a pre-allocated buffer
- * @init_callback:	called to process the init message
- */
-struct nvkm_msgqueue_init_func {
-	void (*gen_cmdline)(struct nvkm_msgqueue *, void *);
-	int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
-};
-
-/**
- * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
- *
- * @boot_falcon:	build and send the command to reset a given falcon
- * @boot_multiple_falcons: build and send the command to reset several falcons
- */
-struct nvkm_msgqueue_acr_func {
-	int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
-	int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long);
-};
-
-struct nvkm_msgqueue_func {
-	const struct nvkm_msgqueue_init_func *init_func;
-	const struct nvkm_msgqueue_acr_func *acr_func;
-	void (*dtor)(struct nvkm_msgqueue *);
-	struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *,
-						 enum msgqueue_msg_priority);
-	void (*recv)(struct nvkm_msgqueue *queue);
-};
-
-/**
- * struct nvkm_msgqueue_queue - information about a command or message queue
- *
- * The number of queues is firmware-dependent. All queues must have their
- * information filled by the init message handler.
- *
- * @mutex_lock:	to be acquired when the queue is being used
- * @index:	physical queue index
- * @offset:	DMEM offset where this queue begins
- * @size:	size allocated to this queue in DMEM (in bytes)
- * @position:	current write position
- * @head_reg:	address of the HEAD register for this queue
- * @tail_reg:	address of the TAIL register for this queue
- */
-struct nvkm_msgqueue_queue {
-	struct mutex mutex;
-	u32 index;
-	u32 offset;
-	u32 size;
-	u32 position;
-
-	u32 head_reg;
-	u32 tail_reg;
-};
-
-/**
- * struct nvkm_msgqueue_seq - keep track of ongoing commands
- *
- * Every time a command is sent, a sequence is assigned to it so the
- * corresponding message can be matched. Upon receiving the message, a callback
- * can be called and/or a completion signaled.
- *
- * @id:		sequence ID
- * @state:	current state
- * @callback:	callback to call upon receiving matching message
- * @completion:	completion to signal after callback is called
- */
-struct nvkm_msgqueue_seq {
-	u16 id;
-	enum {
-		SEQ_STATE_FREE = 0,
-		SEQ_STATE_PENDING,
-		SEQ_STATE_USED,
-		SEQ_STATE_CANCELLED
-	} state;
-	nvkm_msgqueue_callback callback;
-	struct completion *completion;
-};
-
-/*
- * We can have an arbitrary number of sequences, but realistically we will
- * probably not use that much simultaneously.
- */
-#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
-
-/**
- * struct nvkm_msgqueue - manage a command/message based FW on a falcon
- *
- * @falcon:	falcon to be managed
- * @func:	implementation of the firmware to use
- * @init_msg_received:	whether the init message has already been received
- * @init_done:	whether all init is complete and commands can be processed
- * @seq_lock:	protects seq and seq_tbl
- * @seq:	sequences to match commands and messages
- * @seq_tbl:	bitmap of sequences currently in use
- */
-struct nvkm_msgqueue {
-	struct nvkm_falcon *falcon;
-	const struct nvkm_msgqueue_func *func;
-	u32 fw_version;
-	bool init_msg_received;
-	struct completion init_done;
-
-	struct mutex seq_lock;
-	struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
-	unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
-};
-
-void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
-			struct nvkm_msgqueue *);
-int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
-		       struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback,
-		       struct completion *, bool);
-void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
-				struct nvkm_msgqueue_queue *);
-
-int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *,
-			  struct nvkm_msgqueue **);
-int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *,
-			  struct nvkm_msgqueue **);
-int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *,
-			  struct nvkm_msgqueue **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
deleted file mode 100644
index fec0273..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "msgqueue.h"
-#include <engine/falcon.h>
-#include <subdev/secboot.h>
-
-/* Queues identifiers */
-enum {
-	/* High Priority Command Queue for Host -> PMU communication */
-	MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
-	/* Low Priority Command Queue for Host -> PMU communication */
-	MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
-	/* Message queue for PMU -> Host communication */
-	MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
-	MSGQUEUE_0137C63D_NUM_QUEUES = 5,
-};
-
-struct msgqueue_0137c63d {
-	struct nvkm_msgqueue base;
-
-	struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
-};
-#define msgqueue_0137c63d(q) \
-	container_of(q, struct msgqueue_0137c63d, base)
-
-struct msgqueue_0137bca5 {
-	struct msgqueue_0137c63d base;
-
-	u64 wpr_addr;
-};
-#define msgqueue_0137bca5(q) \
-	container_of(container_of(q, struct msgqueue_0137c63d, base), \
-		     struct msgqueue_0137bca5, base);
-
-static struct nvkm_msgqueue_queue *
-msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
-			    enum msgqueue_msg_priority priority)
-{
-	struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
-	const struct nvkm_subdev *subdev = priv->base.falcon->owner;
-
-	switch (priority) {
-	case MSGQUEUE_MSG_PRIORITY_HIGH:
-		return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
-	case MSGQUEUE_MSG_PRIORITY_LOW:
-		return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
-	default:
-		nvkm_error(subdev, "invalid command queue!\n");
-		return ERR_PTR(-EINVAL);
-	}
-}
-
-static void
-msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
-{
-	struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
-	struct nvkm_msgqueue_queue *q_queue =
-		&priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
-
-	nvkm_msgqueue_process_msgs(&priv->base, q_queue);
-}
-
-/* Init unit */
-#define MSGQUEUE_0137C63D_UNIT_INIT 0x07
-
-enum {
-	INIT_MSG_INIT = 0x0,
-};
-
-static void
-init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
-	struct {
-		u32 reserved;
-		u32 freq_hz;
-		u32 trace_size;
-		u32 trace_dma_base;
-		u16 trace_dma_base1;
-		u8 trace_dma_offset;
-		u32 trace_dma_idx;
-		bool secure_mode;
-		bool raise_priv_sec;
-		struct {
-			u32 dma_base;
-			u16 dma_base1;
-			u8 dma_offset;
-			u16 fb_size;
-			u8 dma_idx;
-		} gc6_ctx;
-		u8 pad;
-	} *args = buf;
-
-	args->secure_mode = 1;
-}
-
-/* forward declaration */
-static int acr_init_wpr(struct nvkm_msgqueue *queue);
-
-static int
-init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
-{
-	struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
-	struct {
-		struct nvkm_msgqueue_msg base;
-
-		u8 pad;
-		u16 os_debug_entry_point;
-
-		struct {
-			u16 size;
-			u16 offset;
-			u8 index;
-			u8 pad;
-		} queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
-
-		u16 sw_managed_area_offset;
-		u16 sw_managed_area_size;
-	} *init = (void *)hdr;
-	const struct nvkm_subdev *subdev = _queue->falcon->owner;
-	int i;
-
-	if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
-		nvkm_error(subdev, "expected message from init unit\n");
-		return -EINVAL;
-	}
-
-	if (init->base.msg_type != INIT_MSG_INIT) {
-		nvkm_error(subdev, "expected PMU init msg\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
-		struct nvkm_msgqueue_queue *queue = &priv->queue[i];
-
-		mutex_init(&queue->mutex);
-
-		queue->index = init->queue_info[i].index;
-		queue->offset = init->queue_info[i].offset;
-		queue->size = init->queue_info[i].size;
-
-		if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
-			queue->head_reg = 0x4a0 + (queue->index * 4);
-			queue->tail_reg = 0x4b0 + (queue->index * 4);
-		} else {
-			queue->head_reg = 0x4c8;
-			queue->tail_reg = 0x4cc;
-		}
-
-		nvkm_debug(subdev,
-			   "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
-			   i, queue->index, queue->offset, queue->size);
-	}
-
-	/* Complete initialization by initializing WPR region */
-	return acr_init_wpr(&priv->base);
-}
-
-static const struct nvkm_msgqueue_init_func
-msgqueue_0137c63d_init_func = {
-	.gen_cmdline = init_gen_cmdline,
-	.init_callback = init_callback,
-};
-
-
-
-/* ACR unit */
-#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
-
-enum {
-	ACR_CMD_INIT_WPR_REGION = 0x00,
-	ACR_CMD_BOOTSTRAP_FALCON = 0x01,
-	ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
-};
-
-static void
-acr_init_wpr_callback(struct nvkm_msgqueue *queue,
-		      struct nvkm_msgqueue_hdr *hdr)
-{
-	struct {
-		struct nvkm_msgqueue_msg base;
-		u32 error_code;
-	} *msg = (void *)hdr;
-	const struct nvkm_subdev *subdev = queue->falcon->owner;
-
-	if (msg->error_code) {
-		nvkm_error(subdev, "ACR WPR init failure: %d\n",
-			   msg->error_code);
-		return;
-	}
-
-	nvkm_debug(subdev, "ACR WPR init complete\n");
-	complete_all(&queue->init_done);
-}
-
-static int
-acr_init_wpr(struct nvkm_msgqueue *queue)
-{
-	/*
-	 * region_id:	region ID in WPR region
-	 * wpr_offset:	offset in WPR region
-	 */
-	struct {
-		struct nvkm_msgqueue_hdr hdr;
-		u8 cmd_type;
-		u32 region_id;
-		u32 wpr_offset;
-	} cmd;
-	memset(&cmd, 0, sizeof(cmd));
-
-	cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
-	cmd.hdr.size = sizeof(cmd);
-	cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
-	cmd.region_id = 0x01;
-	cmd.wpr_offset = 0x00;
-
-	nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
-			   acr_init_wpr_callback, NULL, false);
-
-	return 0;
-}
-
-
-static void
-acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
-			 struct nvkm_msgqueue_hdr *hdr)
-{
-	struct acr_bootstrap_falcon_msg {
-		struct nvkm_msgqueue_msg base;
-
-		u32 falcon_id;
-	} *msg = (void *)hdr;
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	u32 falcon_id = msg->falcon_id;
-
-	if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
-		nvkm_error(subdev, "in bootstrap falcon callback:\n");
-		nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
-		return;
-	}
-	nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
-}
-
-enum {
-	ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
-	ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
-};
-
-static int
-acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
-{
-	DECLARE_COMPLETION_ONSTACK(completed);
-	/*
-	 * flags      - Flag specifying RESET or no RESET.
-	 * falcon id  - Falcon id specifying falcon to bootstrap.
-	 */
-	struct {
-		struct nvkm_msgqueue_hdr hdr;
-		u8 cmd_type;
-		u32 flags;
-		u32 falcon_id;
-	} cmd;
-
-	memset(&cmd, 0, sizeof(cmd));
-
-	cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
-	cmd.hdr.size = sizeof(cmd);
-	cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
-	cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
-	cmd.falcon_id = falcon;
-	nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
-			acr_boot_falcon_callback, &completed, true);
-
-	if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
-		return -ETIMEDOUT;
-
-	return 0;
-}
-
-static void
-acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
-				   struct nvkm_msgqueue_hdr *hdr)
-{
-	struct acr_bootstrap_falcon_msg {
-		struct nvkm_msgqueue_msg base;
-
-		u32 falcon_mask;
-	} *msg = (void *)hdr;
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	unsigned long falcon_mask = msg->falcon_mask;
-	u32 falcon_id, falcon_treated = 0;
-
-	for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
-		nvkm_debug(subdev, "%s booted\n",
-			   nvkm_secboot_falcon_name[falcon_id]);
-		falcon_treated |= BIT(falcon_id);
-	}
-
-	if (falcon_treated != msg->falcon_mask) {
-		nvkm_error(subdev, "in bootstrap falcon callback:\n");
-		nvkm_error(subdev, "invalid falcon mask 0x%x\n",
-			   msg->falcon_mask);
-		return;
-	}
-}
-
-static int
-acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
-{
-	DECLARE_COMPLETION_ONSTACK(completed);
-	/*
-	 * flags      - Flag specifying RESET or no RESET.
-	 * falcon id  - Falcon id specifying falcon to bootstrap.
-	 */
-	struct {
-		struct nvkm_msgqueue_hdr hdr;
-		u8 cmd_type;
-		u32 flags;
-		u32 falcon_mask;
-		u32 use_va_mask;
-		u32 wpr_lo;
-		u32 wpr_hi;
-	} cmd;
-	struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
-
-	memset(&cmd, 0, sizeof(cmd));
-
-	cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
-	cmd.hdr.size = sizeof(cmd);
-	cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
-	cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
-	cmd.falcon_mask = falcon_mask;
-	cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
-	cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
-	nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
-			acr_boot_multiple_falcons_callback, &completed, true);
-
-	if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
-		return -ETIMEDOUT;
-
-	return 0;
-}
-
-static const struct nvkm_msgqueue_acr_func
-msgqueue_0137c63d_acr_func = {
-	.boot_falcon = acr_boot_falcon,
-};
-
-static const struct nvkm_msgqueue_acr_func
-msgqueue_0137bca5_acr_func = {
-	.boot_falcon = acr_boot_falcon,
-	.boot_multiple_falcons = acr_boot_multiple_falcons,
-};
-
-static void
-msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
-{
-	kfree(msgqueue_0137c63d(queue));
-}
-
-static const struct nvkm_msgqueue_func
-msgqueue_0137c63d_func = {
-	.init_func = &msgqueue_0137c63d_init_func,
-	.acr_func = &msgqueue_0137c63d_acr_func,
-	.cmd_queue = msgqueue_0137c63d_cmd_queue,
-	.recv = msgqueue_0137c63d_process_msgs,
-	.dtor = msgqueue_0137c63d_dtor,
-};
-
-int
-msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
-		      struct nvkm_msgqueue **queue)
-{
-	struct msgqueue_0137c63d *ret;
-
-	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
-	if (!ret)
-		return -ENOMEM;
-
-	*queue = &ret->base;
-
-	nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
-
-	return 0;
-}
-
-static const struct nvkm_msgqueue_func
-msgqueue_0137bca5_func = {
-	.init_func = &msgqueue_0137c63d_init_func,
-	.acr_func = &msgqueue_0137bca5_acr_func,
-	.cmd_queue = msgqueue_0137c63d_cmd_queue,
-	.recv = msgqueue_0137c63d_process_msgs,
-	.dtor = msgqueue_0137c63d_dtor,
-};
-
-int
-msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
-		      struct nvkm_msgqueue **queue)
-{
-	struct msgqueue_0137bca5 *ret;
-
-	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
-	if (!ret)
-		return -ENOMEM;
-
-	*queue = &ret->base.base;
-
-	/*
-	 * FIXME this must be set to the address of a *GPU* mapping within the
-	 * ACR address space!
-	 */
-	/* ret->wpr_addr = sb->wpr_addr; */
-
-	nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
deleted file mode 100644
index 9424803b..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "msgqueue.h"
-#include <engine/falcon.h>
-#include <subdev/secboot.h>
-
-/*
- * This firmware runs on the SEC falcon. It only has one command and one
- * message queue, and uses a different command line and init message.
- */
-
-enum {
-	MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0,
-	MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1,
-	MSGQUEUE_0148CDEC_NUM_QUEUES,
-};
-
-struct msgqueue_0148cdec {
-	struct nvkm_msgqueue base;
-
-	struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES];
-};
-#define msgqueue_0148cdec(q) \
-	container_of(q, struct msgqueue_0148cdec, base)
-
-static struct nvkm_msgqueue_queue *
-msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue,
-			    enum msgqueue_msg_priority priority)
-{
-	struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
-
-	return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE];
-}
-
-static void
-msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue)
-{
-	struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
-	struct nvkm_msgqueue_queue *q_queue =
-		&priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE];
-
-	nvkm_msgqueue_process_msgs(&priv->base, q_queue);
-}
-
-
-/* Init unit */
-#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01
-
-enum {
-	INIT_MSG_INIT = 0x0,
-};
-
-static void
-init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
-	struct {
-		u32 freq_hz;
-		u32 falc_trace_size;
-		u32 falc_trace_dma_base;
-		u32 falc_trace_dma_idx;
-		bool secure_mode;
-	} *args = buf;
-
-	args->secure_mode = false;
-}
-
-static int
-init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
-{
-	struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue);
-	struct {
-		struct nvkm_msgqueue_msg base;
-
-		u8 num_queues;
-		u16 os_debug_entry_point;
-
-		struct {
-			u32 offset;
-			u16 size;
-			u8 index;
-			u8 id;
-		} queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES];
-
-		u16 sw_managed_area_offset;
-		u16 sw_managed_area_size;
-	} *init = (void *)hdr;
-	const struct nvkm_subdev *subdev = _queue->falcon->owner;
-	int i;
-
-	if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) {
-		nvkm_error(subdev, "expected message from init unit\n");
-		return -EINVAL;
-	}
-
-	if (init->base.msg_type != INIT_MSG_INIT) {
-		nvkm_error(subdev, "expected SEC init msg\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) {
-		u8 id = init->queue_info[i].id;
-		struct nvkm_msgqueue_queue *queue = &priv->queue[id];
-
-		mutex_init(&queue->mutex);
-
-		queue->index = init->queue_info[i].index;
-		queue->offset = init->queue_info[i].offset;
-		queue->size = init->queue_info[i].size;
-
-		if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) {
-			queue->head_reg = 0xa30 + (queue->index * 8);
-			queue->tail_reg = 0xa34 + (queue->index * 8);
-		} else {
-			queue->head_reg = 0xa00 + (queue->index * 8);
-			queue->tail_reg = 0xa04 + (queue->index * 8);
-		}
-
-		nvkm_debug(subdev,
-			   "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
-			   id, queue->index, queue->offset, queue->size);
-	}
-
-	complete_all(&_queue->init_done);
-
-	return 0;
-}
-
-static const struct nvkm_msgqueue_init_func
-msgqueue_0148cdec_init_func = {
-	.gen_cmdline = init_gen_cmdline,
-	.init_callback = init_callback,
-};
-
-
-
-/* ACR unit */
-#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08
-
-enum {
-	ACR_CMD_BOOTSTRAP_FALCON = 0x00,
-};
-
-static void
-acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
-			 struct nvkm_msgqueue_hdr *hdr)
-{
-	struct acr_bootstrap_falcon_msg {
-		struct nvkm_msgqueue_msg base;
-
-		u32 error_code;
-		u32 falcon_id;
-	} *msg = (void *)hdr;
-	const struct nvkm_subdev *subdev = priv->falcon->owner;
-	u32 falcon_id = msg->falcon_id;
-
-	if (msg->error_code) {
-		nvkm_error(subdev, "in bootstrap falcon callback:\n");
-		nvkm_error(subdev, "expected error code 0x%x\n",
-			   msg->error_code);
-		return;
-	}
-
-	if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
-		nvkm_error(subdev, "in bootstrap falcon callback:\n");
-		nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
-		return;
-	}
-
-	nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
-}
-
-enum {
-	ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
-	ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
-};
-
-static int
-acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
-{
-	DECLARE_COMPLETION_ONSTACK(completed);
-	/*
-	 * flags      - Flag specifying RESET or no RESET.
-	 * falcon id  - Falcon id specifying falcon to bootstrap.
-	 */
-	struct {
-		struct nvkm_msgqueue_hdr hdr;
-		u8 cmd_type;
-		u32 flags;
-		u32 falcon_id;
-	} cmd;
-
-	memset(&cmd, 0, sizeof(cmd));
-
-	cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR;
-	cmd.hdr.size = sizeof(cmd);
-	cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
-	cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
-	cmd.falcon_id = falcon;
-	nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
-			   acr_boot_falcon_callback, &completed, true);
-
-	if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
-		return -ETIMEDOUT;
-
-	return 0;
-}
-
-const struct nvkm_msgqueue_acr_func
-msgqueue_0148cdec_acr_func = {
-	.boot_falcon = acr_boot_falcon,
-};
-
-static void
-msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue)
-{
-	kfree(msgqueue_0148cdec(queue));
-}
-
-const struct nvkm_msgqueue_func
-msgqueue_0148cdec_func = {
-	.init_func = &msgqueue_0148cdec_init_func,
-	.acr_func = &msgqueue_0148cdec_acr_func,
-	.cmd_queue = msgqueue_0148cdec_cmd_queue,
-	.recv = msgqueue_0148cdec_process_msgs,
-	.dtor = msgqueue_0148cdec_dtor,
-};
-
-int
-msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
-		      struct nvkm_msgqueue **queue)
-{
-	struct msgqueue_0148cdec *ret;
-
-	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
-	if (!ret)
-		return -ENOMEM;
-
-	*queue = &ret->base;
-
-	nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
index 900fe1d..4661887 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -1,9 +1,5 @@
 /* SPDX-License-Identifier: MIT */
 #ifndef __NVKM_FALCON_PRIV_H__
 #define __NVKM_FALCON_PRIV_H__
-#include <engine/falcon.h>
-
-void
-nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *,
-		 const char *, u32, struct nvkm_falcon *);
+#include <core/falcon.h>
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
new file mode 100644
index 0000000..a453de3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
+{
+	const struct nvkm_subdev *subdev = qmgr->falcon->owner;
+	struct nvkm_falcon_qmgr_seq *seq;
+	u32 index;
+
+	mutex_lock(&qmgr->seq.mutex);
+	index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
+	if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
+		nvkm_error(subdev, "no free sequence available\n");
+		mutex_unlock(&qmgr->seq.mutex);
+		return ERR_PTR(-EAGAIN);
+	}
+
+	set_bit(index, qmgr->seq.tbl);
+	mutex_unlock(&qmgr->seq.mutex);
+
+	seq = &qmgr->seq.id[index];
+	seq->state = SEQ_STATE_PENDING;
+	return seq;
+}
+
+void
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
+			     struct nvkm_falcon_qmgr_seq *seq)
+{
+	/* no need to acquire seq.mutex since clear_bit is atomic */
+	seq->state = SEQ_STATE_FREE;
+	seq->callback = NULL;
+	reinit_completion(&seq->done);
+	clear_bit(seq->id, qmgr->seq.tbl);
+}
+
+void
+nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
+{
+	struct nvkm_falcon_qmgr *qmgr = *pqmgr;
+	if (qmgr) {
+		kfree(*pqmgr);
+		*pqmgr = NULL;
+	}
+}
+
+int
+nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
+		     struct nvkm_falcon_qmgr **pqmgr)
+{
+	struct nvkm_falcon_qmgr *qmgr;
+	int i;
+
+	if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
+		return -ENOMEM;
+
+	qmgr->falcon = falcon;
+	mutex_init(&qmgr->seq.mutex);
+	for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
+		qmgr->seq.id[i].id = i;
+		init_completion(&qmgr->seq.id[i].done);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
new file mode 100644
index 0000000..a45cd70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FALCON_QMGR_H__
+#define __NVKM_FALCON_QMGR_H__
+#include <core/falcon.h>
+
+#define HDR_SIZE sizeof(struct nv_falcon_msg)
+#define QUEUE_ALIGNMENT 4
+/* max size of the messages we can receive */
+#define MSG_BUF_SIZE 128
+
+/**
+ * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands
+ *
+ * Every time a command is sent, a sequence is assigned to it so the
+ * corresponding message can be matched. Upon receiving the message, a callback
+ * can be called and/or a completion signaled.
+ *
+ * @id:		sequence ID
+ * @state:	current state
+ * @callback:	callback to call upon receiving matching message
+ * @completion:	completion to signal after callback is called
+ */
+struct nvkm_falcon_qmgr_seq {
+	u16 id;
+	enum {
+		SEQ_STATE_FREE = 0,
+		SEQ_STATE_PENDING,
+		SEQ_STATE_USED,
+		SEQ_STATE_CANCELLED
+	} state;
+	bool async;
+	nvkm_falcon_qmgr_callback callback;
+	void *priv;
+	struct completion done;
+	int result;
+};
+
+/*
+ * We can have an arbitrary number of sequences, but realistically we will
+ * probably not use that much simultaneously.
+ */
+#define NVKM_FALCON_QMGR_SEQ_NUM 16
+
+struct nvkm_falcon_qmgr {
+	struct nvkm_falcon *falcon;
+
+	struct {
+		struct mutex mutex;
+		struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM];
+		unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)];
+	} seq;
+};
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
+void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
+				  struct nvkm_falcon_qmgr_seq *);
+
+struct nvkm_falcon_cmdq {
+	struct nvkm_falcon_qmgr *qmgr;
+	const char *name;
+	struct mutex mutex;
+	struct completion ready;
+
+	u32 head_reg;
+	u32 tail_reg;
+	u32 offset;
+	u32 size;
+
+	u32 position;
+};
+
+struct nvkm_falcon_msgq {
+	struct nvkm_falcon_qmgr *qmgr;
+	const char *name;
+	struct mutex mutex;
+
+	u32 head_reg;
+	u32 tail_reg;
+	u32 offset;
+
+	u32 position;
+};
+
+#define FLCNQ_PRINTK(t,q,f,a...)                                               \
+       FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)
+#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a)
+#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 6d978fe..1ff9b9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -25,7 +25,7 @@
 #include <core/memory.h>
 #include <subdev/timer.h>
 
-static void
+void
 nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
 			 u32 size, u16 tag, u8 port, bool secure)
 {
@@ -89,18 +89,17 @@ nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
 	}
 }
 
-static const u32 EMEM_START_ADDR = 0x1000000;
-
-static void
+void
 nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
-		      u32 size, u8 port)
+			 u32 size, u8 port)
 {
+	const struct nvkm_falcon_func *func = falcon->func;
 	u8 rem = size % 4;
 	int i;
 
-	if (start >= EMEM_START_ADDR && falcon->has_emem)
+	if (func->emem_addr && start >= func->emem_addr)
 		return nvkm_falcon_v1_load_emem(falcon, data,
-						start - EMEM_START_ADDR, size,
+						start - func->emem_addr, size,
 						port);
 
 	size -= rem;
@@ -148,15 +147,16 @@ nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
 	}
 }
 
-static void
+void
 nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
 			 u8 port, void *data)
 {
+	const struct nvkm_falcon_func *func = falcon->func;
 	u8 rem = size % 4;
 	int i;
 
-	if (start >= EMEM_START_ADDR && falcon->has_emem)
-		return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR,
+	if (func->emem_addr && start >= func->emem_addr)
+		return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr,
 						size, port, data);
 
 	size -= rem;
@@ -179,12 +179,11 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
 	}
 }
 
-static void
+void
 nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
 {
-	struct nvkm_device *device = falcon->owner->device;
+	const u32 fbif = falcon->func->fbif;
 	u32 inst_loc;
-	u32 fbif;
 
 	/* disable instance block binding */
 	if (ctx == NULL) {
@@ -192,20 +191,6 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
 		return;
 	}
 
-	switch (falcon->owner->index) {
-	case NVKM_ENGINE_NVENC0:
-	case NVKM_ENGINE_NVENC1:
-	case NVKM_ENGINE_NVENC2:
-		fbif = 0x800;
-		break;
-	case NVKM_SUBDEV_PMU:
-		fbif = 0xe00;
-		break;
-	default:
-		fbif = 0x600;
-		break;
-	}
-
 	nvkm_falcon_wr32(falcon, 0x10c, 0x1);
 
 	/* setup apertures - virtual */
@@ -234,50 +219,15 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
 
 	nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
 	nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
-
-	/* Not sure if this is a WAR for a HW issue, or some additional
-	 * programming sequence that's needed to properly complete the
-	 * context switch we trigger above.
-	 *
-	 * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
-	 * particularly when resuming from suspend.
-	 *
-	 * Also removes the need for an odd workaround where we needed
-	 * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
-	 * the SEC2 RTOS would begin executing.
-	 */
-	switch (falcon->owner->index) {
-	case NVKM_SUBDEV_GSP:
-	case NVKM_ENGINE_SEC2:
-		nvkm_msec(device, 10,
-			u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
-			u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
-			if ((irqstat & 0x00000008) &&
-			    (flcn0dc & 0x00007000) == 0x00005000)
-				break;
-		);
-
-		nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
-		nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
-
-		nvkm_msec(device, 10,
-			u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
-			if ((flcn0dc & 0x00007000) == 0x00000000)
-				break;
-		);
-		break;
-	default:
-		break;
-	}
 }
 
-static void
+void
 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
 {
 	nvkm_falcon_wr32(falcon, 0x104, start_addr);
 }
 
-static void
+void
 nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
 {
 	u32 reg = nvkm_falcon_rd32(falcon, 0x100);
@@ -288,7 +238,7 @@ nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
 		nvkm_falcon_wr32(falcon, 0x100, 0x2);
 }
 
-static int
+int
 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
 {
 	struct nvkm_device *device = falcon->owner->device;
@@ -301,7 +251,7 @@ nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
 	return 0;
 }
 
-static int
+int
 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
 {
 	struct nvkm_device *device = falcon->owner->device;
@@ -330,7 +280,7 @@ falcon_v1_wait_idle(struct nvkm_falcon *falcon)
 	return 0;
 }
 
-static int
+int
 nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
 {
 	struct nvkm_device *device = falcon->owner->device;
@@ -352,7 +302,7 @@ nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
 	return 0;
 }
 
-static void
+void
 nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
 {
 	/* disable IRQs and wait for any previous code to complete */
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
new file mode 100644
index 0000000..41d75f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/nvfw/fw.o
+nvkm-y += nvkm/nvfw/hs.o
+nvkm-y += nvkm/nvfw/ls.o
+
+nvkm-y += nvkm/nvfw/acr.o
+nvkm-y += nvkm/nvfw/flcn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
new file mode 100644
index 0000000..0d063b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/acr.h>
+
+void
+wpr_header_dump(struct nvkm_subdev *subdev, const struct wpr_header *hdr)
+{
+	nvkm_debug(subdev, "wprHeader\n");
+	nvkm_debug(subdev, "\tfalconID      : %d\n", hdr->falcon_id);
+	nvkm_debug(subdev, "\tlsbOffset     : 0x%x\n", hdr->lsb_offset);
+	nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
+	nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
+	nvkm_debug(subdev, "\tstatus        : %d\n", hdr->status);
+}
+
+void
+wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr)
+{
+	nvkm_debug(subdev, "wprHeader\n");
+	nvkm_debug(subdev, "\tfalconID      : %d\n", hdr->falcon_id);
+	nvkm_debug(subdev, "\tlsbOffset     : 0x%x\n", hdr->lsb_offset);
+	nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
+	nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
+	nvkm_debug(subdev, "\tbinVersion    : %d\n", hdr->bin_version);
+	nvkm_debug(subdev, "\tstatus        : %d\n", hdr->status);
+}
+
+void
+lsb_header_tail_dump(struct nvkm_subdev *subdev,
+			struct lsb_header_tail *hdr)
+{
+	nvkm_debug(subdev, "lsbHeader\n");
+	nvkm_debug(subdev, "\tucodeOff      : 0x%x\n", hdr->ucode_off);
+	nvkm_debug(subdev, "\tucodeSize     : 0x%x\n", hdr->ucode_size);
+	nvkm_debug(subdev, "\tdataSize      : 0x%x\n", hdr->data_size);
+	nvkm_debug(subdev, "\tblCodeSize    : 0x%x\n", hdr->bl_code_size);
+	nvkm_debug(subdev, "\tblImemOff     : 0x%x\n", hdr->bl_imem_off);
+	nvkm_debug(subdev, "\tblDataOff     : 0x%x\n", hdr->bl_data_off);
+	nvkm_debug(subdev, "\tblDataSize    : 0x%x\n", hdr->bl_data_size);
+	nvkm_debug(subdev, "\tappCodeOff    : 0x%x\n", hdr->app_code_off);
+	nvkm_debug(subdev, "\tappCodeSize   : 0x%x\n", hdr->app_code_size);
+	nvkm_debug(subdev, "\tappDataOff    : 0x%x\n", hdr->app_data_off);
+	nvkm_debug(subdev, "\tappDataSize   : 0x%x\n", hdr->app_data_size);
+	nvkm_debug(subdev, "\tflags         : 0x%x\n", hdr->flags);
+}
+
+void
+lsb_header_dump(struct nvkm_subdev *subdev, struct lsb_header *hdr)
+{
+	lsb_header_tail_dump(subdev, &hdr->tail);
+}
+
+void
+lsb_header_v1_dump(struct nvkm_subdev *subdev, struct lsb_header_v1 *hdr)
+{
+	lsb_header_tail_dump(subdev, &hdr->tail);
+}
+
+void
+flcn_acr_desc_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc *hdr)
+{
+	int i;
+
+	nvkm_debug(subdev, "acrDesc\n");
+	nvkm_debug(subdev, "\twprRegionId  : %d\n", hdr->wpr_region_id);
+	nvkm_debug(subdev, "\twprOffset    : 0x%x\n", hdr->wpr_offset);
+	nvkm_debug(subdev, "\tmmuMemRange  : 0x%x\n",
+		   hdr->mmu_mem_range);
+	nvkm_debug(subdev, "\tnoRegions    : %d\n",
+		   hdr->regions.no_regions);
+
+	for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
+		nvkm_debug(subdev, "\tregion[%d]    :\n", i);
+		nvkm_debug(subdev, "\t  startAddr  : 0x%x\n",
+			   hdr->regions.region_props[i].start_addr);
+		nvkm_debug(subdev, "\t  endAddr    : 0x%x\n",
+			   hdr->regions.region_props[i].end_addr);
+		nvkm_debug(subdev, "\t  regionId   : %d\n",
+			   hdr->regions.region_props[i].region_id);
+		nvkm_debug(subdev, "\t  readMask   : 0x%x\n",
+			   hdr->regions.region_props[i].read_mask);
+		nvkm_debug(subdev, "\t  writeMask  : 0x%x\n",
+			   hdr->regions.region_props[i].write_mask);
+		nvkm_debug(subdev, "\t  clientMask : 0x%x\n",
+			   hdr->regions.region_props[i].client_mask);
+	}
+
+	nvkm_debug(subdev, "\tucodeBlobSize: %d\n",
+		   hdr->ucode_blob_size);
+	nvkm_debug(subdev, "\tucodeBlobBase: 0x%llx\n",
+		   hdr->ucode_blob_base);
+	nvkm_debug(subdev, "\tvprEnabled   : %d\n",
+		   hdr->vpr_desc.vpr_enabled);
+	nvkm_debug(subdev, "\tvprStart     : 0x%x\n",
+		   hdr->vpr_desc.vpr_start);
+	nvkm_debug(subdev, "\tvprEnd       : 0x%x\n",
+		   hdr->vpr_desc.vpr_end);
+	nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
+		   hdr->vpr_desc.hdcp_policies);
+}
+
+void
+flcn_acr_desc_v1_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc_v1 *hdr)
+{
+	int i;
+
+	nvkm_debug(subdev, "acrDesc\n");
+	nvkm_debug(subdev, "\twprRegionId         : %d\n", hdr->wpr_region_id);
+	nvkm_debug(subdev, "\twprOffset           : 0x%x\n", hdr->wpr_offset);
+	nvkm_debug(subdev, "\tmmuMemoryRange      : 0x%x\n",
+		   hdr->mmu_memory_range);
+	nvkm_debug(subdev, "\tnoRegions           : %d\n",
+		   hdr->regions.no_regions);
+
+	for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
+		nvkm_debug(subdev, "\tregion[%d]           :\n", i);
+		nvkm_debug(subdev, "\t  startAddr         : 0x%x\n",
+			   hdr->regions.region_props[i].start_addr);
+		nvkm_debug(subdev, "\t  endAddr           : 0x%x\n",
+			   hdr->regions.region_props[i].end_addr);
+		nvkm_debug(subdev, "\t  regionId          : %d\n",
+			   hdr->regions.region_props[i].region_id);
+		nvkm_debug(subdev, "\t  readMask          : 0x%x\n",
+			   hdr->regions.region_props[i].read_mask);
+		nvkm_debug(subdev, "\t  writeMask         : 0x%x\n",
+			   hdr->regions.region_props[i].write_mask);
+		nvkm_debug(subdev, "\t  clientMask        : 0x%x\n",
+			   hdr->regions.region_props[i].client_mask);
+		nvkm_debug(subdev, "\t  shadowMemStartAddr: 0x%x\n",
+			   hdr->regions.region_props[i].shadow_mem_start_addr);
+	}
+
+	nvkm_debug(subdev, "\tucodeBlobSize       : %d\n",
+		   hdr->ucode_blob_size);
+	nvkm_debug(subdev, "\tucodeBlobBase       : 0x%llx\n",
+		   hdr->ucode_blob_base);
+	nvkm_debug(subdev, "\tvprEnabled          : %d\n",
+		   hdr->vpr_desc.vpr_enabled);
+	nvkm_debug(subdev, "\tvprStart            : 0x%x\n",
+		   hdr->vpr_desc.vpr_start);
+	nvkm_debug(subdev, "\tvprEnd              : 0x%x\n",
+		   hdr->vpr_desc.vpr_end);
+	nvkm_debug(subdev, "\thdcpPolicies        : 0x%x\n",
+		   hdr->vpr_desc.hdcp_policies);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
new file mode 100644
index 0000000..00ec764
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/flcn.h>
+
+void
+loader_config_dump(struct nvkm_subdev *subdev, const struct loader_config *hdr)
+{
+	nvkm_debug(subdev, "loaderConfig\n");
+	nvkm_debug(subdev, "\tdmaIdx        : %d\n", hdr->dma_idx);
+	nvkm_debug(subdev, "\tcodeDmaBase   : 0x%xx\n", hdr->code_dma_base);
+	nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
+	nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
+	nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+	nvkm_debug(subdev, "\tdataDmaBase   : 0x%x\n", hdr->data_dma_base);
+	nvkm_debug(subdev, "\tdataSize      : 0x%x\n", hdr->data_size);
+	nvkm_debug(subdev, "\toverlayDmaBase: 0x%x\n", hdr->overlay_dma_base);
+	nvkm_debug(subdev, "\targc          : 0x%08x\n", hdr->argc);
+	nvkm_debug(subdev, "\targv          : 0x%08x\n", hdr->argv);
+	nvkm_debug(subdev, "\tcodeDmaBase1  : 0x%x\n", hdr->code_dma_base1);
+	nvkm_debug(subdev, "\tdataDmaBase1  : 0x%x\n", hdr->data_dma_base1);
+	nvkm_debug(subdev, "\tovlyDmaBase1  : 0x%x\n", hdr->overlay_dma_base1);
+}
+
+void
+loader_config_v1_dump(struct nvkm_subdev *subdev,
+		      const struct loader_config_v1 *hdr)
+{
+	nvkm_debug(subdev, "loaderConfig\n");
+	nvkm_debug(subdev, "\treserved      : 0x%08x\n", hdr->reserved);
+	nvkm_debug(subdev, "\tdmaIdx        : %d\n", hdr->dma_idx);
+	nvkm_debug(subdev, "\tcodeDmaBase   : 0x%llxx\n", hdr->code_dma_base);
+	nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
+	nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
+	nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+	nvkm_debug(subdev, "\tdataDmaBase   : 0x%llx\n", hdr->data_dma_base);
+	nvkm_debug(subdev, "\tdataSize      : 0x%x\n", hdr->data_size);
+	nvkm_debug(subdev, "\toverlayDmaBase: 0x%llx\n", hdr->overlay_dma_base);
+	nvkm_debug(subdev, "\targc          : 0x%08x\n", hdr->argc);
+	nvkm_debug(subdev, "\targv          : 0x%08x\n", hdr->argv);
+}
+
+void
+flcn_bl_dmem_desc_dump(struct nvkm_subdev *subdev,
+		       const struct flcn_bl_dmem_desc *hdr)
+{
+	nvkm_debug(subdev, "flcnBlDmemDesc\n");
+	nvkm_debug(subdev, "\treserved      : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
+		   hdr->reserved[3]);
+	nvkm_debug(subdev, "\tsignature     : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   hdr->signature[0], hdr->signature[1], hdr->signature[2],
+		   hdr->signature[3]);
+	nvkm_debug(subdev, "\tctxDma        : %d\n", hdr->ctx_dma);
+	nvkm_debug(subdev, "\tcodeDmaBase   : 0x%x\n", hdr->code_dma_base);
+	nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
+	nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
+	nvkm_debug(subdev, "\tsecCodeOff    : 0x%x\n", hdr->sec_code_off);
+	nvkm_debug(subdev, "\tsecCodeSize   : 0x%x\n", hdr->sec_code_size);
+	nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+	nvkm_debug(subdev, "\tdataDmaBase   : 0x%x\n", hdr->data_dma_base);
+	nvkm_debug(subdev, "\tdataSize      : 0x%x\n", hdr->data_size);
+	nvkm_debug(subdev, "\tcodeDmaBase1  : 0x%x\n", hdr->code_dma_base1);
+	nvkm_debug(subdev, "\tdataDmaBase1  : 0x%x\n", hdr->data_dma_base1);
+}
+
+void
+flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *subdev,
+			  const struct flcn_bl_dmem_desc_v1 *hdr)
+{
+	nvkm_debug(subdev, "flcnBlDmemDesc\n");
+	nvkm_debug(subdev, "\treserved      : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
+		   hdr->reserved[3]);
+	nvkm_debug(subdev, "\tsignature     : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   hdr->signature[0], hdr->signature[1], hdr->signature[2],
+		   hdr->signature[3]);
+	nvkm_debug(subdev, "\tctxDma        : %d\n", hdr->ctx_dma);
+	nvkm_debug(subdev, "\tcodeDmaBase   : 0x%llx\n", hdr->code_dma_base);
+	nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
+	nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
+	nvkm_debug(subdev, "\tsecCodeOff    : 0x%x\n", hdr->sec_code_off);
+	nvkm_debug(subdev, "\tsecCodeSize   : 0x%x\n", hdr->sec_code_size);
+	nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+	nvkm_debug(subdev, "\tdataDmaBase   : 0x%llx\n", hdr->data_dma_base);
+	nvkm_debug(subdev, "\tdataSize      : 0x%x\n", hdr->data_size);
+}
+
+void
+flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *subdev,
+			  const struct flcn_bl_dmem_desc_v2 *hdr)
+{
+	flcn_bl_dmem_desc_v1_dump(subdev, (void *)hdr);
+	nvkm_debug(subdev, "\targc          : 0x%08x\n", hdr->argc);
+	nvkm_debug(subdev, "\targv          : 0x%08x\n", hdr->argv);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
new file mode 100644
index 0000000..746803b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/fw.h>
+
+const struct nvfw_bin_hdr *
+nvfw_bin_hdr(struct nvkm_subdev *subdev, const void *data)
+{
+	const struct nvfw_bin_hdr *hdr = data;
+	nvkm_debug(subdev, "binHdr:\n");
+	nvkm_debug(subdev, "\tbinMagic         : 0x%08x\n", hdr->bin_magic);
+	nvkm_debug(subdev, "\tbinVer           : %d\n", hdr->bin_ver);
+	nvkm_debug(subdev, "\tbinSize          : %d\n", hdr->bin_size);
+	nvkm_debug(subdev, "\theaderOffset     : 0x%x\n", hdr->header_offset);
+	nvkm_debug(subdev, "\tdataOffset       : 0x%x\n", hdr->data_offset);
+	nvkm_debug(subdev, "\tdataSize         : 0x%x\n", hdr->data_size);
+	return hdr;
+}
+
+const struct nvfw_bl_desc *
+nvfw_bl_desc(struct nvkm_subdev *subdev, const void *data)
+{
+	const struct nvfw_bl_desc *hdr = data;
+	nvkm_debug(subdev, "blDesc\n");
+	nvkm_debug(subdev, "\tstartTag         : 0x%x\n", hdr->start_tag);
+	nvkm_debug(subdev, "\tdmemLoadOff      : 0x%x\n", hdr->dmem_load_off);
+	nvkm_debug(subdev, "\tcodeOff          : 0x%x\n", hdr->code_off);
+	nvkm_debug(subdev, "\tcodeSize         : 0x%x\n", hdr->code_size);
+	nvkm_debug(subdev, "\tdataOff          : 0x%x\n", hdr->data_off);
+	nvkm_debug(subdev, "\tdataSize         : 0x%x\n", hdr->data_size);
+	return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
new file mode 100644
index 0000000..04ed77c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/hs.h>
+
+const struct nvfw_hs_header *
+nvfw_hs_header(struct nvkm_subdev *subdev, const void *data)
+{
+	const struct nvfw_hs_header *hdr = data;
+	nvkm_debug(subdev, "hsHeader:\n");
+	nvkm_debug(subdev, "\tsigDbgOffset     : 0x%x\n", hdr->sig_dbg_offset);
+	nvkm_debug(subdev, "\tsigDbgSize       : 0x%x\n", hdr->sig_dbg_size);
+	nvkm_debug(subdev, "\tsigProdOffset    : 0x%x\n", hdr->sig_prod_offset);
+	nvkm_debug(subdev, "\tsigProdSize      : 0x%x\n", hdr->sig_prod_size);
+	nvkm_debug(subdev, "\tpatchLoc         : 0x%x\n", hdr->patch_loc);
+	nvkm_debug(subdev, "\tpatchSig         : 0x%x\n", hdr->patch_sig);
+	nvkm_debug(subdev, "\thdrOffset        : 0x%x\n", hdr->hdr_offset);
+	nvkm_debug(subdev, "\thdrSize          : 0x%x\n", hdr->hdr_size);
+	return hdr;
+}
+
+const struct nvfw_hs_load_header *
+nvfw_hs_load_header(struct nvkm_subdev *subdev, const void *data)
+{
+	const struct nvfw_hs_load_header *hdr = data;
+	int i;
+
+	nvkm_debug(subdev, "hsLoadHeader:\n");
+	nvkm_debug(subdev, "\tnonSecCodeOff    : 0x%x\n",
+			   hdr->non_sec_code_off);
+	nvkm_debug(subdev, "\tnonSecCodeSize   : 0x%x\n",
+			   hdr->non_sec_code_size);
+	nvkm_debug(subdev, "\tdataDmaBase      : 0x%x\n", hdr->data_dma_base);
+	nvkm_debug(subdev, "\tdataSize         : 0x%x\n", hdr->data_size);
+	nvkm_debug(subdev, "\tnumApps          : 0x%x\n", hdr->num_apps);
+	for (i = 0; i < hdr->num_apps; i++) {
+		nvkm_debug(subdev,
+			   "\tApp[%d]           : offset 0x%x size 0x%x\n", i,
+			   hdr->apps[(i * 2) + 0], hdr->apps[(i * 2) + 1]);
+	}
+
+	return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
new file mode 100644
index 0000000..b847f28
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/ls.h>
+
+static void
+nvfw_ls_desc_head(struct nvkm_subdev *subdev,
+		  const struct nvfw_ls_desc_head *hdr)
+{
+	char *date;
+
+	nvkm_debug(subdev, "lsUcodeImgDesc:\n");
+	nvkm_debug(subdev, "\tdescriptorSize       : %d\n",
+			   hdr->descriptor_size);
+	nvkm_debug(subdev, "\timageSize            : %d\n", hdr->image_size);
+	nvkm_debug(subdev, "\ttoolsVersion         : 0x%x\n",
+			   hdr->tools_version);
+	nvkm_debug(subdev, "\tappVersion           : 0x%x\n", hdr->app_version);
+
+	date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL);
+	nvkm_debug(subdev, "\tdate                 : %s\n", date);
+	kfree(date);
+
+	nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n",
+			   hdr->bootloader_start_offset);
+	nvkm_debug(subdev, "\tbootloaderSize       : 0x%x\n",
+			   hdr->bootloader_size);
+	nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n",
+			   hdr->bootloader_imem_offset);
+	nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n",
+			   hdr->bootloader_entry_point);
+
+	nvkm_debug(subdev, "\tappStartOffset       : 0x%x\n",
+			   hdr->app_start_offset);
+	nvkm_debug(subdev, "\tappSize              : 0x%x\n", hdr->app_size);
+	nvkm_debug(subdev, "\tappImemOffset        : 0x%x\n",
+			   hdr->app_imem_offset);
+	nvkm_debug(subdev, "\tappImemEntry         : 0x%x\n",
+			   hdr->app_imem_entry);
+	nvkm_debug(subdev, "\tappDmemOffset        : 0x%x\n",
+			   hdr->app_dmem_offset);
+	nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n",
+			   hdr->app_resident_code_offset);
+	nvkm_debug(subdev, "\tappResidentCodeSize  : 0x%x\n",
+			   hdr->app_resident_code_size);
+	nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n",
+			   hdr->app_resident_data_offset);
+	nvkm_debug(subdev, "\tappResidentDataSize  : 0x%x\n",
+			   hdr->app_resident_data_size);
+}
+
+const struct nvfw_ls_desc *
+nvfw_ls_desc(struct nvkm_subdev *subdev, const void *data)
+{
+	const struct nvfw_ls_desc *hdr = data;
+	int i;
+
+	nvfw_ls_desc_head(subdev, &hdr->head);
+
+	nvkm_debug(subdev, "\tnbOverlays           : %d\n", hdr->nb_overlays);
+	for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
+		nvkm_debug(subdev, "\tloadOvl[%d]          : 0x%x %d\n", i,
+			   hdr->load_ovl[i].start, hdr->load_ovl[i].size);
+	}
+	nvkm_debug(subdev, "\tcompressed           : %d\n", hdr->compressed);
+
+	return hdr;
+}
+
+const struct nvfw_ls_desc_v1 *
+nvfw_ls_desc_v1(struct nvkm_subdev *subdev, const void *data)
+{
+	const struct nvfw_ls_desc_v1 *hdr = data;
+	int i;
+
+	nvfw_ls_desc_head(subdev, &hdr->head);
+
+	nvkm_debug(subdev, "\tnbImemOverlays       : %d\n",
+			   hdr->nb_imem_overlays);
+	nvkm_debug(subdev, "\tnbDmemOverlays       : %d\n",
+			   hdr->nb_imem_overlays);
+	for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
+		nvkm_debug(subdev, "\tloadOvl[%2d]          : 0x%x %d\n", i,
+			   hdr->load_ovl[i].start, hdr->load_ovl[i].size);
+	}
+	nvkm_debug(subdev, "\tcompressed           : %d\n", hdr->compressed);
+
+	return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 4e136f3..fb4fff1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: MIT
+include $(src)/nvkm/subdev/acr/Kbuild
 include $(src)/nvkm/subdev/bar/Kbuild
 include $(src)/nvkm/subdev/bios/Kbuild
 include $(src)/nvkm/subdev/bus/Kbuild
@@ -19,7 +20,6 @@
 include $(src)/nvkm/subdev/mxm/Kbuild
 include $(src)/nvkm/subdev/pci/Kbuild
 include $(src)/nvkm/subdev/pmu/Kbuild
-include $(src)/nvkm/subdev/secboot/Kbuild
 include $(src)/nvkm/subdev/therm/Kbuild
 include $(src)/nvkm/subdev/timer/Kbuild
 include $(src)/nvkm/subdev/top/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
new file mode 100644
index 0000000..5b9f64a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/acr/base.o
+nvkm-y += nvkm/subdev/acr/hsfw.o
+nvkm-y += nvkm/subdev/acr/lsfw.o
+nvkm-y += nvkm/subdev/acr/gm200.o
+nvkm-y += nvkm/subdev/acr/gm20b.o
+nvkm-y += nvkm/subdev/acr/gp102.o
+nvkm-y += nvkm/subdev/acr/gp108.o
+nvkm-y += nvkm/subdev/acr/gp10b.o
+nvkm-y += nvkm/subdev/acr/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
new file mode 100644
index 0000000..8eb2a93
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+static struct nvkm_acr_hsf *
+nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name)
+{
+	struct nvkm_acr_hsf *hsf;
+	list_for_each_entry(hsf, &acr->hsf, head) {
+		if (!strcmp(hsf->name, name))
+			return hsf;
+	}
+	return NULL;
+}
+
+int
+nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	struct nvkm_acr_hsf *hsf;
+	int ret;
+
+	hsf = nvkm_acr_hsf_find(acr, name);
+	if (!hsf)
+		return -EINVAL;
+
+	nvkm_debug(subdev, "executing %s binary\n", hsf->name);
+	ret = nvkm_falcon_get(hsf->falcon, subdev);
+	if (ret)
+		return ret;
+
+	ret = hsf->func->boot(acr, hsf);
+	nvkm_falcon_put(hsf->falcon, subdev);
+	if (ret) {
+		nvkm_error(subdev, "%s binary failed\n", hsf->name);
+		return ret;
+	}
+
+	nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name);
+	return 0;
+}
+
+static void
+nvkm_acr_unload(struct nvkm_acr *acr)
+{
+	if (acr->done) {
+		nvkm_acr_hsf_boot(acr, "unload");
+		acr->done = false;
+	}
+}
+
+static int
+nvkm_acr_load(struct nvkm_acr *acr)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	struct nvkm_acr_lsf *lsf;
+	u64 start, limit;
+	int ret;
+
+	if (list_empty(&acr->lsf)) {
+		nvkm_debug(subdev, "No LSF(s) present.\n");
+		return 0;
+	}
+
+	ret = acr->func->init(acr);
+	if (ret)
+		return ret;
+
+	acr->func->wpr_check(acr, &start, &limit);
+
+	if (start != acr->wpr_start || limit != acr->wpr_end) {
+		nvkm_error(subdev, "WPR not configured as expected: "
+				   "%016llx-%016llx vs %016llx-%016llx\n",
+			   acr->wpr_start, acr->wpr_end, start, limit);
+		return -EIO;
+	}
+
+	acr->done = true;
+
+	list_for_each_entry(lsf, &acr->lsf, head) {
+		if (lsf->func->boot) {
+			ret = lsf->func->boot(lsf->falcon);
+			if (ret)
+				break;
+		}
+	}
+
+	return ret;
+}
+
+static int
+nvkm_acr_reload(struct nvkm_acr *acr)
+{
+	nvkm_acr_unload(acr);
+	return nvkm_acr_load(acr);
+}
+
+static struct nvkm_acr_lsf *
+nvkm_acr_falcon(struct nvkm_device *device)
+{
+	struct nvkm_acr *acr = device->acr;
+	struct nvkm_acr_lsf *lsf;
+
+	if (acr) {
+		list_for_each_entry(lsf, &acr->lsf, head) {
+			if (lsf->func->bootstrap_falcon)
+				return lsf;
+		}
+	}
+
+	return NULL;
+}
+
+int
+nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
+{
+	struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device);
+	struct nvkm_acr *acr = device->acr;
+	unsigned long id;
+
+	if (!acrflcn) {
+		int ret = nvkm_acr_reload(acr);
+		if (ret)
+			return ret;
+
+		return acr->done ? 0 : -EINVAL;
+	}
+
+	if (acrflcn->func->bootstrap_multiple_falcons) {
+		return acrflcn->func->
+			bootstrap_multiple_falcons(acrflcn->falcon, mask);
+	}
+
+	for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) {
+		int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+bool
+nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id)
+{
+	struct nvkm_acr *acr = device->acr;
+	struct nvkm_acr_lsf *lsf;
+
+	if (acr) {
+		list_for_each_entry(lsf, &acr->lsf, head) {
+			if (lsf->id == id)
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static int
+nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	nvkm_acr_unload(nvkm_acr(subdev));
+	return 0;
+}
+
+static int
+nvkm_acr_init(struct nvkm_subdev *subdev)
+{
+	if (!nvkm_acr_falcon(subdev->device))
+		return 0;
+
+	return nvkm_acr_load(nvkm_acr(subdev));
+}
+
+static void
+nvkm_acr_cleanup(struct nvkm_acr *acr)
+{
+	nvkm_acr_lsfw_del_all(acr);
+	nvkm_acr_hsfw_del_all(acr);
+	nvkm_firmware_put(acr->wpr_fw);
+	acr->wpr_fw = NULL;
+}
+
+static int
+nvkm_acr_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_acr *acr = nvkm_acr(subdev);
+	struct nvkm_acr_hsfw *hsfw;
+	struct nvkm_acr_lsfw *lsfw, *lsft;
+	struct nvkm_acr_lsf *lsf;
+	u32 wpr_size = 0;
+	int ret, i;
+
+	if (list_empty(&acr->hsfw)) {
+		nvkm_debug(subdev, "No HSFW(s)\n");
+		nvkm_acr_cleanup(acr);
+		return 0;
+	}
+
+	/* Determine layout/size of WPR image up-front, as we need to know
+	 * it to allocate memory before we begin constructing it.
+	 */
+	list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+		/* Cull unknown falcons that are present in WPR image. */
+		if (acr->wpr_fw) {
+			if (!lsfw->func) {
+				nvkm_acr_lsfw_del(lsfw);
+				continue;
+			}
+
+			wpr_size = acr->wpr_fw->size;
+		}
+
+		/* Ensure we've fetched falcon configuration. */
+		ret = nvkm_falcon_get(lsfw->falcon, subdev);
+		if (ret)
+			return ret;
+
+		nvkm_falcon_put(lsfw->falcon, subdev);
+
+		if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL)))
+			return -ENOMEM;
+		lsf->func = lsfw->func;
+		lsf->falcon = lsfw->falcon;
+		lsf->id = lsfw->id;
+		list_add_tail(&lsf->head, &acr->lsf);
+	}
+
+	if (!acr->wpr_fw || acr->wpr_comp)
+		wpr_size = acr->func->wpr_layout(acr);
+
+	/* Allocate/Locate WPR + fill ucode blob pointer.
+	 *
+	 *  dGPU: allocate WPR + shadow blob
+	 * Tegra: locate WPR with regs, ensure size is sufficient,
+	 *        allocate ucode blob.
+	 */
+	ret = acr->func->wpr_alloc(acr, wpr_size);
+	if (ret)
+		return ret;
+
+	nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n",
+		   acr->wpr_start, acr->wpr_end, acr->shadow_start);
+
+	/* Write WPR to ucode blob. */
+	nvkm_kmap(acr->wpr);
+	if (acr->wpr_fw && !acr->wpr_comp)
+		nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size);
+
+	if (!acr->wpr_fw || acr->wpr_comp)
+		acr->func->wpr_build(acr, nvkm_acr_falcon(device));
+	acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev);
+
+	if (acr->wpr_fw && acr->wpr_comp) {
+		nvkm_kmap(acr->wpr);
+		for (i = 0; i < acr->wpr_fw->size; i += 4) {
+			u32 us = nvkm_ro32(acr->wpr, i);
+			u32 fw = ((u32 *)acr->wpr_fw->data)[i/4];
+			if (fw != us) {
+				nvkm_warn(subdev, "%08x: %08x %08x\n",
+					  i, us, fw);
+			}
+		}
+		return -EINVAL;
+	}
+	nvkm_done(acr->wpr);
+
+	/* Allocate instance block for ACR-related stuff. */
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
+			      &acr->inst);
+	if (ret)
+		return ret;
+
+	ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm);
+	if (ret)
+		return ret;
+
+	acr->vmm->debug = acr->subdev.debug;
+
+	ret = nvkm_vmm_join(acr->vmm, acr->inst);
+	if (ret)
+		return ret;
+
+	/* Load HS firmware blobs into ACR VMM. */
+	list_for_each_entry(hsfw, &acr->hsfw, head) {
+		nvkm_debug(subdev, "loading %s fw\n", hsfw->name);
+		ret = hsfw->func->load(acr, hsfw);
+		if (ret)
+			return ret;
+	}
+
+	/* Kill temporary data. */
+	nvkm_acr_cleanup(acr);
+	return 0;
+}
+
+static void *
+nvkm_acr_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_acr *acr = nvkm_acr(subdev);
+	struct nvkm_acr_hsf *hsf, *hst;
+	struct nvkm_acr_lsf *lsf, *lst;
+
+	list_for_each_entry_safe(hsf, hst, &acr->hsf, head) {
+		nvkm_vmm_put(acr->vmm, &hsf->vma);
+		nvkm_memory_unref(&hsf->ucode);
+		kfree(hsf->imem);
+		list_del(&hsf->head);
+		kfree(hsf);
+	}
+
+	nvkm_vmm_part(acr->vmm, acr->inst);
+	nvkm_vmm_unref(&acr->vmm);
+	nvkm_memory_unref(&acr->inst);
+
+	nvkm_memory_unref(&acr->wpr);
+
+	list_for_each_entry_safe(lsf, lst, &acr->lsf, head) {
+		list_del(&lsf->head);
+		kfree(lsf);
+	}
+
+	nvkm_acr_cleanup(acr);
+	return acr;
+}
+
+static const struct nvkm_subdev_func
+nvkm_acr = {
+	.dtor = nvkm_acr_dtor,
+	.oneinit = nvkm_acr_oneinit,
+	.init = nvkm_acr_init,
+	.fini = nvkm_acr_fini,
+};
+
+static int
+nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	struct nvkm_device *device = subdev->device;
+	int ret;
+
+	ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw);
+	if (ret < 0)
+		return ret;
+
+	/* Pre-add LSFs in the order they appear in the FW WPR image so that
+	 * we're able to do a binary comparison with our own generator.
+	 */
+	ret = acr->func->wpr_parse(acr);
+	if (ret)
+		return ret;
+
+	acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false);
+	acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0);
+	return 0;
+}
+
+int
+nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device,
+	      int index, struct nvkm_acr **pacr)
+{
+	struct nvkm_acr *acr;
+	long wprfw;
+
+	if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev);
+	INIT_LIST_HEAD(&acr->hsfw);
+	INIT_LIST_HEAD(&acr->lsfw);
+	INIT_LIST_HEAD(&acr->hsf);
+	INIT_LIST_HEAD(&acr->lsf);
+
+	fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr);
+	if (IS_ERR(fwif))
+		return PTR_ERR(fwif);
+
+	acr->func = fwif->func;
+
+	wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1);
+	if (wprfw >= 0) {
+		int ret = nvkm_acr_ctor_wpr(acr, wprfw);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
new file mode 100644
index 0000000..9a63940
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/pmu.h>
+#include <subdev/timer.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+int
+gm200_acr_init(struct nvkm_acr *acr)
+{
+	return nvkm_acr_hsf_boot(acr, "load");
+}
+
+void
+gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit)
+{
+	struct nvkm_device *device = acr->subdev.device;
+
+	nvkm_wr32(device, 0x100cd4, 2);
+	*start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
+	nvkm_wr32(device, 0x100cd4, 3);
+	*limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
+	*limit = *limit + 0x20000;
+}
+
+void
+gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	struct wpr_header hdr;
+	struct lsb_header lsb;
+	struct nvkm_acr_lsf *lsfw;
+	u32 offset = 0;
+
+	do {
+		nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
+		wpr_header_dump(subdev, &hdr);
+
+		list_for_each_entry(lsfw, &acr->lsfw, head) {
+			if (lsfw->id != hdr.falcon_id)
+				continue;
+
+			nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
+			lsb_header_dump(subdev, &lsb);
+
+			lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
+			break;
+		}
+		offset += sizeof(hdr);
+	} while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID);
+}
+
+void
+gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw,
+			     struct lsb_header_tail *hdr)
+{
+	hdr->ucode_off = lsfw->offset.img;
+	hdr->ucode_size = lsfw->ucode_size;
+	hdr->data_size = lsfw->data_size;
+	hdr->bl_code_size = lsfw->bootloader_size;
+	hdr->bl_imem_off = lsfw->bootloader_imem_offset;
+	hdr->bl_data_off = lsfw->offset.bld;
+	hdr->bl_data_size = lsfw->bl_data_size;
+	hdr->app_code_off = lsfw->app_start_offset +
+			   lsfw->app_resident_code_offset;
+	hdr->app_code_size = lsfw->app_resident_code_size;
+	hdr->app_data_off = lsfw->app_start_offset +
+			   lsfw->app_resident_data_offset;
+	hdr->app_data_size = lsfw->app_resident_data_size;
+	hdr->flags = lsfw->func->flags;
+}
+
+static int
+gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
+{
+	struct lsb_header hdr;
+
+	if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
+		return -EINVAL;
+
+	memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
+	gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
+
+	nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
+	return 0;
+}
+
+int
+gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+	struct nvkm_acr_lsfw *lsfw;
+	u32 offset = 0;
+	int ret;
+
+	/* Fill per-LSF structures. */
+	list_for_each_entry(lsfw, &acr->lsfw, head) {
+		struct wpr_header hdr = {
+			.falcon_id = lsfw->id,
+			.lsb_offset = lsfw->offset.lsb,
+			.bootstrap_owner = NVKM_ACR_LSF_PMU,
+			.lazy_bootstrap = rtos && lsfw->id != rtos->id,
+			.status = WPR_HEADER_V0_STATUS_COPY,
+		};
+
+		/* Write WPR header. */
+		nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+		offset += sizeof(hdr);
+
+		/* Write LSB header. */
+		ret = gm200_acr_wpr_build_lsb(acr, lsfw);
+		if (ret)
+			return ret;
+
+		/* Write ucode image. */
+		nvkm_wobj(acr->wpr, lsfw->offset.img,
+				    lsfw->img.data,
+				    lsfw->img.size);
+
+		/* Write bootloader data. */
+		lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+	}
+
+	/* Finalise WPR. */
+	nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID);
+	return 0;
+}
+
+static int
+gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+	int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
+				  ALIGN(wpr_size, 0x40000), 0x40000, true,
+				  &acr->wpr);
+	if (ret)
+		return ret;
+
+	acr->wpr_start = nvkm_memory_addr(acr->wpr);
+	acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr);
+	return 0;
+}
+
+u32
+gm200_acr_wpr_layout(struct nvkm_acr *acr)
+{
+	struct nvkm_acr_lsfw *lsfw;
+	u32 wpr = 0;
+
+	wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header);
+
+	list_for_each_entry(lsfw, &acr->lsfw, head) {
+		wpr  = ALIGN(wpr, 256);
+		lsfw->offset.lsb = wpr;
+		wpr += sizeof(struct lsb_header);
+
+		wpr  = ALIGN(wpr, 4096);
+		lsfw->offset.img = wpr;
+		wpr += lsfw->img.size;
+
+		wpr  = ALIGN(wpr, 256);
+		lsfw->offset.bld = wpr;
+		lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
+		wpr += lsfw->bl_data_size;
+	}
+
+	return wpr;
+}
+
+int
+gm200_acr_wpr_parse(struct nvkm_acr *acr)
+{
+	const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
+
+	while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
+		wpr_header_dump(&acr->subdev, hdr);
+		if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void
+gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+	struct flcn_bl_dmem_desc_v1 hsdesc = {
+		.ctx_dma = FALCON_DMAIDX_VIRT,
+		.code_dma_base = hsf->vma->addr,
+		.non_sec_code_off = hsf->non_sec_addr,
+		.non_sec_code_size = hsf->non_sec_size,
+		.sec_code_off = hsf->sec_addr,
+		.sec_code_size = hsf->sec_size,
+		.code_entry_point = 0,
+		.data_dma_base = hsf->vma->addr + hsf->data_addr,
+		.data_size = hsf->data_size,
+	};
+
+	flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc);
+
+	nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+int
+gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
+		    u32 intr_clear, u32 mbox0_ok)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_falcon *falcon = hsf->falcon;
+	u32 mbox0, mbox1;
+	int ret;
+
+	/* Reset falcon. */
+	nvkm_falcon_reset(falcon);
+	nvkm_falcon_bind_context(falcon, acr->inst);
+
+	/* Load bootloader into IMEM. */
+	nvkm_falcon_load_imem(falcon, hsf->imem,
+				      falcon->code.limit - hsf->imem_size,
+				      hsf->imem_size,
+				      hsf->imem_tag,
+				      0, false);
+
+	/* Load bootloader data into DMEM. */
+	hsf->func->bld(acr, hsf);
+
+	/* Boot the falcon. */
+	nvkm_mc_intr_mask(device, falcon->owner->index, false);
+
+	nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
+	nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8);
+	nvkm_falcon_start(falcon);
+	ret = nvkm_falcon_wait_for_halt(falcon, 100);
+	if (ret)
+		return ret;
+
+	/* Check for successful completion. */
+	mbox0 = nvkm_falcon_rd32(falcon, 0x040);
+	mbox1 = nvkm_falcon_rd32(falcon, 0x044);
+	nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1);
+	if (mbox0 && mbox0 != mbox0_ok)
+		return -EIO;
+
+	nvkm_falcon_clear_interrupt(falcon, intr_clear);
+	nvkm_mc_intr_mask(device, falcon->owner->index, true);
+	return ret;
+}
+
+int
+gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw,
+		    struct nvkm_falcon *falcon)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	struct nvkm_acr_hsf *hsf;
+	int ret;
+
+	/* Patch the appropriate signature (production/debug) into the FW
+	 * image, as determined by the mode the falcon is in.
+	 */
+	ret = nvkm_falcon_get(falcon, subdev);
+	if (ret)
+		return ret;
+
+	if (hsfw->sig.patch_loc) {
+		if (!falcon->debug) {
+			nvkm_debug(subdev, "patching production signature\n");
+			memcpy(hsfw->image + hsfw->sig.patch_loc,
+			       hsfw->sig.prod.data,
+			       hsfw->sig.prod.size);
+		} else {
+			nvkm_debug(subdev, "patching debug signature\n");
+			memcpy(hsfw->image + hsfw->sig.patch_loc,
+			       hsfw->sig.dbg.data,
+			       hsfw->sig.dbg.size);
+		}
+	}
+
+	nvkm_falcon_put(falcon, subdev);
+
+	if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL)))
+		return -ENOMEM;
+	hsf->func = hsfw->func;
+	hsf->name = hsfw->name;
+	list_add_tail(&hsf->head, &acr->hsf);
+
+	hsf->imem_size = hsfw->imem_size;
+	hsf->imem_tag = hsfw->imem_tag;
+	hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL);
+	if (!hsf->imem)
+		return -ENOMEM;
+
+	hsf->non_sec_addr = hsfw->non_sec_addr;
+	hsf->non_sec_size = hsfw->non_sec_size;
+	hsf->sec_addr = hsfw->sec_addr;
+	hsf->sec_size = hsfw->sec_size;
+	hsf->data_addr = hsfw->data_addr;
+	hsf->data_size = hsfw->data_size;
+
+	/* Make the FW image accessible to the HS bootloader. */
+	ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
+			      hsfw->image_size, 0x1000, false, &hsf->ucode);
+	if (ret)
+		return ret;
+
+	nvkm_kmap(hsf->ucode);
+	nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size);
+	nvkm_done(hsf->ucode);
+
+	ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode),
+			   &hsf->vma);
+	if (ret)
+		return ret;
+
+	ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0);
+	if (ret)
+		return ret;
+
+	hsf->falcon = falcon;
+	return 0;
+}
+
+int
+gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+	return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d);
+}
+
+int
+gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+	return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+const struct nvkm_acr_hsf_func
+gm200_acr_unload_0 = {
+	.load = gm200_acr_unload_load,
+	.boot = gm200_acr_unload_boot,
+	.bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gm200_acr_unload_fwif[] = {
+	{ 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
+	{}
+};
+
+int
+gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+	return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0);
+}
+
+static int
+gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+	struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+	desc->wpr_region_id = 1;
+	desc->regions.no_regions = 2;
+	desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
+	desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
+	desc->regions.region_props[0].region_id = 1;
+	desc->regions.region_props[0].read_mask = 0xf;
+	desc->regions.region_props[0].write_mask = 0xc;
+	desc->regions.region_props[0].client_mask = 0x2;
+	flcn_acr_desc_dump(&acr->subdev, desc);
+
+	return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+gm200_acr_load_0 = {
+	.load = gm200_acr_load_load,
+	.boot = gm200_acr_load_boot,
+	.bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gm200_acr_load_fwif[] = {
+	{ 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 },
+	{}
+};
+
+static const struct nvkm_acr_func
+gm200_acr = {
+	.load = gm200_acr_load_fwif,
+	.unload = gm200_acr_unload_fwif,
+	.wpr_parse = gm200_acr_wpr_parse,
+	.wpr_layout = gm200_acr_wpr_layout,
+	.wpr_alloc = gm200_acr_wpr_alloc,
+	.wpr_build = gm200_acr_wpr_build,
+	.wpr_patch = gm200_acr_wpr_patch,
+	.wpr_check = gm200_acr_wpr_check,
+	.init = gm200_acr_init,
+};
+
+static int
+gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	const struct nvkm_acr_hsf_fwif *hsfwif;
+
+	hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+				    acr, "acr/bl", "acr/ucode_load", "load");
+	if (IS_ERR(hsfwif))
+		return PTR_ERR(hsfwif);
+
+	hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+				    acr, "acr/bl", "acr/ucode_unload",
+				    "unload");
+	if (IS_ERR(hsfwif))
+		return PTR_ERR(hsfwif);
+
+	return 0;
+}
+
+static const struct nvkm_acr_fwif
+gm200_acr_fwif[] = {
+	{ 0, gm200_acr_load, &gm200_acr },
+	{}
+};
+
+int
+gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+	return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
new file mode 100644
index 0000000..034a6ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/pmu.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+int
+gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+
+	acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end);
+
+	if ((acr->wpr_end - acr->wpr_start) < wpr_size) {
+		nvkm_error(subdev, "WPR image too big for WPR!\n");
+		return -ENOSPC;
+	}
+
+	return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
+			       wpr_size, 0, true, &acr->wpr);
+}
+
+static void
+gm20b_acr_load_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+	struct flcn_bl_dmem_desc hsdesc = {
+		.ctx_dma = FALCON_DMAIDX_VIRT,
+		.code_dma_base = hsf->vma->addr >> 8,
+		.non_sec_code_off = hsf->non_sec_addr,
+		.non_sec_code_size = hsf->non_sec_size,
+		.sec_code_off = hsf->sec_addr,
+		.sec_code_size = hsf->sec_size,
+		.code_entry_point = 0,
+		.data_dma_base = (hsf->vma->addr + hsf->data_addr) >> 8,
+		.data_size = hsf->data_size,
+	};
+
+	flcn_bl_dmem_desc_dump(&acr->subdev, &hsdesc);
+
+	nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+static int
+gm20b_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+	struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+	desc->ucode_blob_base = nvkm_memory_addr(acr->wpr);
+	desc->ucode_blob_size = nvkm_memory_size(acr->wpr);
+	flcn_acr_desc_dump(&acr->subdev, desc);
+
+	return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+const struct nvkm_acr_hsf_func
+gm20b_acr_load_0 = {
+	.load = gm20b_acr_load_load,
+	.boot = gm200_acr_load_boot,
+	.bld = gm20b_acr_load_bld,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
+#endif
+
+static const struct nvkm_acr_hsf_fwif
+gm20b_acr_load_fwif[] = {
+	{ 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 },
+	{}
+};
+
+static const struct nvkm_acr_func
+gm20b_acr = {
+	.load = gm20b_acr_load_fwif,
+	.wpr_parse = gm200_acr_wpr_parse,
+	.wpr_layout = gm200_acr_wpr_layout,
+	.wpr_alloc = gm20b_acr_wpr_alloc,
+	.wpr_build = gm200_acr_wpr_build,
+	.wpr_patch = gm200_acr_wpr_patch,
+	.wpr_check = gm200_acr_wpr_check,
+	.init = gm200_acr_init,
+};
+
+int
+gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	const struct nvkm_acr_hsf_fwif *hsfwif;
+
+	hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+				    acr, "acr/bl", "acr/ucode_load", "load");
+	if (IS_ERR(hsfwif))
+		return PTR_ERR(hsfwif);
+
+	return 0;
+}
+
+static const struct nvkm_acr_fwif
+gm20b_acr_fwif[] = {
+	{ 0, gm20b_acr_load, &gm20b_acr },
+	{}
+};
+
+int
+gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+	return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
new file mode 100644
index 0000000..49e11c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/sec2.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+void
+gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
+{
+	struct wpr_header_v1 hdr;
+	struct lsb_header_v1 lsb;
+	struct nvkm_acr_lsfw *lsfw;
+	u32 offset = 0;
+
+	do {
+		nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
+		wpr_header_v1_dump(&acr->subdev, &hdr);
+
+		list_for_each_entry(lsfw, &acr->lsfw, head) {
+			if (lsfw->id != hdr.falcon_id)
+				continue;
+
+			nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
+			lsb_header_v1_dump(&acr->subdev, &lsb);
+
+			lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
+			break;
+		}
+
+		offset += sizeof(hdr);
+	} while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID);
+}
+
+int
+gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
+{
+	struct lsb_header_v1 hdr;
+
+	if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
+		return -EINVAL;
+
+	memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
+	gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
+
+	nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
+	return 0;
+}
+
+int
+gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+	struct nvkm_acr_lsfw *lsfw;
+	u32 offset = 0;
+	int ret;
+
+	/* Fill per-LSF structures. */
+	list_for_each_entry(lsfw, &acr->lsfw, head) {
+		struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
+		struct wpr_header_v1 hdr = {
+			.falcon_id = lsfw->id,
+			.lsb_offset = lsfw->offset.lsb,
+			.bootstrap_owner = NVKM_ACR_LSF_SEC2,
+			.lazy_bootstrap = rtos && lsfw->id != rtos->id,
+			.bin_version = sig->version,
+			.status = WPR_HEADER_V1_STATUS_COPY,
+		};
+
+		/* Write WPR header. */
+		nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+		offset += sizeof(hdr);
+
+		/* Write LSB header. */
+		ret = gp102_acr_wpr_build_lsb(acr, lsfw);
+		if (ret)
+			return ret;
+
+		/* Write ucode image. */
+		nvkm_wobj(acr->wpr, lsfw->offset.img,
+				    lsfw->img.data,
+				    lsfw->img.size);
+
+		/* Write bootloader data. */
+		lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+	}
+
+	/* Finalise WPR. */
+	nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
+	return 0;
+}
+
+int
+gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+	int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
+				  ALIGN(wpr_size, 0x40000) << 1, 0x40000, true,
+				  &acr->wpr);
+	if (ret)
+		return ret;
+
+	acr->shadow_start = nvkm_memory_addr(acr->wpr);
+	acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1);
+	acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1);
+	return 0;
+}
+
+u32
+gp102_acr_wpr_layout(struct nvkm_acr *acr)
+{
+	struct nvkm_acr_lsfw *lsfw;
+	u32 wpr = 0;
+
+	wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1);
+	wpr  = ALIGN(wpr, 256);
+
+	wpr += 0x100; /* Shared sub-WPR headers. */
+
+	list_for_each_entry(lsfw, &acr->lsfw, head) {
+		wpr  = ALIGN(wpr, 256);
+		lsfw->offset.lsb = wpr;
+		wpr += sizeof(struct lsb_header_v1);
+
+		wpr  = ALIGN(wpr, 4096);
+		lsfw->offset.img = wpr;
+		wpr += lsfw->img.size;
+
+		wpr  = ALIGN(wpr, 256);
+		lsfw->offset.bld = wpr;
+		lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
+		wpr += lsfw->bl_data_size;
+	}
+
+	return wpr;
+}
+
+int
+gp102_acr_wpr_parse(struct nvkm_acr *acr)
+{
+	const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
+
+	while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
+		wpr_header_v1_dump(&acr->subdev, hdr);
+		if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp102_acr_unload_fwif[] = {
+	{ 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
+	{}
+};
+
+int
+gp102_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+	struct flcn_acr_desc_v1 *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+	desc->wpr_region_id = 1;
+	desc->regions.no_regions = 2;
+	desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
+	desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
+	desc->regions.region_props[0].region_id = 1;
+	desc->regions.region_props[0].read_mask = 0xf;
+	desc->regions.region_props[0].write_mask = 0xc;
+	desc->regions.region_props[0].client_mask = 0x2;
+	desc->regions.region_props[0].shadow_mem_start_addr =
+		acr->shadow_start >> 8;
+	flcn_acr_desc_v1_dump(&acr->subdev, desc);
+
+	return gm200_acr_hsfw_load(acr, hsfw,
+				  &acr->subdev.device->sec2->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+gp102_acr_load_0 = {
+	.load = gp102_acr_load_load,
+	.boot = gm200_acr_load_boot,
+	.bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp102_acr_load_fwif[] = {
+	{ 0, nvkm_acr_hsfw_load, &gp102_acr_load_0 },
+	{}
+};
+
+static const struct nvkm_acr_func
+gp102_acr = {
+	.load = gp102_acr_load_fwif,
+	.unload = gp102_acr_unload_fwif,
+	.wpr_parse = gp102_acr_wpr_parse,
+	.wpr_layout = gp102_acr_wpr_layout,
+	.wpr_alloc = gp102_acr_wpr_alloc,
+	.wpr_build = gp102_acr_wpr_build,
+	.wpr_patch = gp102_acr_wpr_patch,
+	.wpr_check = gm200_acr_wpr_check,
+	.init = gm200_acr_init,
+};
+
+int
+gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	const struct nvkm_acr_hsf_fwif *hsfwif;
+
+	hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+				    acr, "acr/bl", "acr/ucode_load", "load");
+	if (IS_ERR(hsfwif))
+		return PTR_ERR(hsfwif);
+
+	hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+				    acr, "acr/unload_bl", "acr/ucode_unload",
+				    "unload");
+	if (IS_ERR(hsfwif))
+		return PTR_ERR(hsfwif);
+
+	return 0;
+}
+
+static const struct nvkm_acr_fwif
+gp102_acr_fwif[] = {
+	{ 0, gp102_acr_load, &gp102_acr },
+	{}
+};
+
+int
+gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+	return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
new file mode 100644
index 0000000..f10dc91
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+#include <nvfw/flcn.h>
+
+void
+gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+	struct flcn_bl_dmem_desc_v2 hsdesc = {
+		.ctx_dma = FALCON_DMAIDX_VIRT,
+		.code_dma_base = hsf->vma->addr,
+		.non_sec_code_off = hsf->non_sec_addr,
+		.non_sec_code_size = hsf->non_sec_size,
+		.sec_code_off = hsf->sec_addr,
+		.sec_code_size = hsf->sec_size,
+		.code_entry_point = 0,
+		.data_dma_base = hsf->vma->addr + hsf->data_addr,
+		.data_size = hsf->data_size,
+		.argc = 0,
+		.argv = 0,
+	};
+
+	flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc);
+
+	nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+const struct nvkm_acr_hsf_func
+gp108_acr_unload_0 = {
+	.load = gm200_acr_unload_load,
+	.boot = gm200_acr_unload_boot,
+	.bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp108_acr_unload_fwif[] = {
+	{ 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
+	{}
+};
+
+static const struct nvkm_acr_hsf_func
+gp108_acr_load_0 = {
+	.load = gp102_acr_load_load,
+	.boot = gm200_acr_load_boot,
+	.bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp108_acr_load_fwif[] = {
+	{ 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 },
+	{}
+};
+
+static const struct nvkm_acr_func
+gp108_acr = {
+	.load = gp108_acr_load_fwif,
+	.unload = gp108_acr_unload_fwif,
+	.wpr_parse = gp102_acr_wpr_parse,
+	.wpr_layout = gp102_acr_wpr_layout,
+	.wpr_alloc = gp102_acr_wpr_alloc,
+	.wpr_build = gp102_acr_wpr_build,
+	.wpr_patch = gp102_acr_wpr_patch,
+	.wpr_check = gm200_acr_wpr_check,
+	.init = gm200_acr_init,
+};
+
+static const struct nvkm_acr_fwif
+gp108_acr_fwif[] = {
+	{ 0, gp102_acr_load, &gp108_acr },
+	{}
+};
+
+int
+gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+	return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
new file mode 100644
index 0000000..39de642
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
+#endif
+
+static const struct nvkm_acr_hsf_fwif
+gp10b_acr_load_fwif[] = {
+	{ 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 },
+	{}
+};
+
+static const struct nvkm_acr_func
+gp10b_acr = {
+	.load = gp10b_acr_load_fwif,
+	.wpr_parse = gm200_acr_wpr_parse,
+	.wpr_layout = gm200_acr_wpr_layout,
+	.wpr_alloc = gm20b_acr_wpr_alloc,
+	.wpr_build = gm200_acr_wpr_build,
+	.wpr_patch = gm200_acr_wpr_patch,
+	.wpr_check = gm200_acr_wpr_check,
+	.init = gm200_acr_init,
+};
+
+static const struct nvkm_acr_fwif
+gp10b_acr_fwif[] = {
+	{ 0, gm20b_acr_load, &gp10b_acr },
+	{}
+};
+
+int
+gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+	return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
new file mode 100644
index 0000000..aecce2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+static void
+nvkm_acr_hsfw_del(struct nvkm_acr_hsfw *hsfw)
+{
+	list_del(&hsfw->head);
+	kfree(hsfw->imem);
+	kfree(hsfw->image);
+	kfree(hsfw->sig.prod.data);
+	kfree(hsfw->sig.dbg.data);
+	kfree(hsfw);
+}
+
+void
+nvkm_acr_hsfw_del_all(struct nvkm_acr *acr)
+{
+	struct nvkm_acr_hsfw *hsfw, *hsft;
+	list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) {
+		nvkm_acr_hsfw_del(hsfw);
+	}
+}
+
+static int
+nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
+			 struct nvkm_acr_hsfw *hsfw)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	const struct firmware *fw;
+	const struct nvfw_bin_hdr *hdr;
+	const struct nvfw_hs_header *fwhdr;
+	const struct nvfw_hs_load_header *lhdr;
+	u32 loc, sig;
+	int ret;
+
+	ret = nvkm_firmware_get(subdev, name, ver, &fw);
+	if (ret < 0)
+		return ret;
+
+	hdr = nvfw_bin_hdr(subdev, fw->data);
+	fwhdr = nvfw_hs_header(subdev, fw->data + hdr->header_offset);
+
+	/* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's
+	 * standard format, and don't have the indirection seen in the 0x10de
+	 * case.
+	 */
+	switch (hdr->bin_magic) {
+	case 0x000010de:
+		loc = *(u32 *)(fw->data + fwhdr->patch_loc);
+		sig = *(u32 *)(fw->data + fwhdr->patch_sig);
+		break;
+	case 0x3b1d14f0:
+		loc = fwhdr->patch_loc;
+		sig = fwhdr->patch_sig;
+		break;
+	default:
+		ret = -EINVAL;
+		goto done;
+	}
+
+	lhdr = nvfw_hs_load_header(subdev, fw->data + fwhdr->hdr_offset);
+
+	if (!(hsfw->image = kmalloc(hdr->data_size, GFP_KERNEL))) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	memcpy(hsfw->image, fw->data + hdr->data_offset, hdr->data_size);
+	hsfw->image_size = hdr->data_size;
+	hsfw->non_sec_addr = lhdr->non_sec_code_off;
+	hsfw->non_sec_size = lhdr->non_sec_code_size;
+	hsfw->sec_addr = lhdr->apps[0];
+	hsfw->sec_size = lhdr->apps[lhdr->num_apps];
+	hsfw->data_addr = lhdr->data_dma_base;
+	hsfw->data_size = lhdr->data_size;
+
+	hsfw->sig.prod.size = fwhdr->sig_prod_size;
+	hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
+	if (!hsfw->sig.prod.data) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
+	       hsfw->sig.prod.size);
+
+	hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
+	hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
+	if (!hsfw->sig.dbg.data) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
+	       hsfw->sig.dbg.size);
+
+	hsfw->sig.patch_loc = loc;
+done:
+	nvkm_firmware_put(fw);
+	return ret;
+}
+
+static int
+nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver,
+		      struct nvkm_acr_hsfw *hsfw)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	const struct nvfw_bin_hdr *hdr;
+	const struct nvfw_bl_desc *desc;
+	const struct firmware *fw;
+	u8 *data;
+	int ret;
+
+	ret = nvkm_firmware_get(subdev, name, ver, &fw);
+	if (ret)
+		return ret;
+
+	hdr = nvfw_bin_hdr(subdev, fw->data);
+	desc = nvfw_bl_desc(subdev, fw->data + hdr->header_offset);
+	data = (void *)fw->data + hdr->data_offset;
+
+	hsfw->imem_size = desc->code_size;
+	hsfw->imem_tag = desc->start_tag;
+	hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL);
+	memcpy(hsfw->imem, data + desc->code_off, desc->code_size);
+
+	nvkm_firmware_put(fw);
+	return 0;
+}
+
+int
+nvkm_acr_hsfw_load(struct nvkm_acr *acr, const char *bl, const char *fw,
+		   const char *name, int version,
+		   const struct nvkm_acr_hsf_fwif *fwif)
+{
+	struct nvkm_acr_hsfw *hsfw;
+	int ret;
+
+	if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL)))
+		return -ENOMEM;
+
+	hsfw->func = fwif->func;
+	hsfw->name = name;
+	list_add_tail(&hsfw->head, &acr->hsfw);
+
+	ret = nvkm_acr_hsfw_load_bl(acr, bl, version, hsfw);
+	if (ret)
+		goto done;
+
+	ret = nvkm_acr_hsfw_load_image(acr, fw, version, hsfw);
+done:
+	if (ret)
+		nvkm_acr_hsfw_del(hsfw);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
new file mode 100644
index 0000000..07d1830
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <nvfw/fw.h>
+#include <nvfw/ls.h>
+
+void
+nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw)
+{
+	nvkm_blob_dtor(&lsfw->img);
+	nvkm_firmware_put(lsfw->sig);
+	list_del(&lsfw->head);
+	kfree(lsfw);
+}
+
+void
+nvkm_acr_lsfw_del_all(struct nvkm_acr *acr)
+{
+	struct nvkm_acr_lsfw *lsfw, *lsft;
+	list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+		nvkm_acr_lsfw_del(lsfw);
+	}
+}
+
+static struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id)
+{
+	struct nvkm_acr_lsfw *lsfw;
+	list_for_each_entry(lsfw, &acr->lsfw, head) {
+		if (lsfw->id == id)
+			return lsfw;
+	}
+	return NULL;
+}
+
+struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr,
+		 struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id)
+{
+	struct nvkm_acr_lsfw *lsfw;
+
+	if (!acr)
+		return ERR_PTR(-ENOSYS);
+
+	lsfw = nvkm_acr_lsfw_get(acr, id);
+	if (lsfw && lsfw->func) {
+		nvkm_error(&acr->subdev, "LSFW %d redefined\n", id);
+		return ERR_PTR(-EEXIST);
+	}
+
+	if (!lsfw) {
+		if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL)))
+			return ERR_PTR(-ENOMEM);
+
+		lsfw->id = id;
+		list_add_tail(&lsfw->head, &acr->lsfw);
+	}
+
+	lsfw->func = func;
+	lsfw->falcon = falcon;
+	return lsfw;
+}
+
+static struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev,
+				   struct nvkm_falcon *falcon,
+				   enum nvkm_acr_lsf_id id,
+				   const char *path, int ver,
+				   const struct nvkm_acr_lsf_func *func,
+				   const struct firmware **pdesc)
+{
+	struct nvkm_acr *acr = subdev->device->acr;
+	struct nvkm_acr_lsfw *lsfw;
+	int ret;
+
+	if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
+		return lsfw;
+
+	ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
+	if (ret)
+		goto done;
+
+	ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img);
+	if (ret)
+		goto done;
+
+	ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc);
+done:
+	if (ret) {
+		nvkm_acr_lsfw_del(lsfw);
+		return ERR_PTR(ret);
+	}
+
+	return lsfw;
+}
+
+static void
+nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc,
+			struct nvkm_acr_lsfw *lsfw)
+{
+	lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256);
+	lsfw->bootloader_imem_offset = desc->bootloader_imem_offset;
+
+	lsfw->app_size = ALIGN(desc->app_size, 256);
+	lsfw->app_start_offset = desc->app_start_offset;
+	lsfw->app_imem_entry = desc->app_imem_entry;
+	lsfw->app_resident_code_offset = desc->app_resident_code_offset;
+	lsfw->app_resident_code_size = desc->app_resident_code_size;
+	lsfw->app_resident_data_offset = desc->app_resident_data_offset;
+	lsfw->app_resident_data_size = desc->app_resident_data_size;
+
+	lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
+			   lsfw->bootloader_size;
+	lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
+			  lsfw->ucode_size;
+}
+
+int
+nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev,
+				  struct nvkm_falcon *falcon,
+				  enum nvkm_acr_lsf_id id,
+				  const char *path, int ver,
+				  const struct nvkm_acr_lsf_func *func)
+{
+	const struct firmware *fw;
+	struct nvkm_acr_lsfw *lsfw;
+
+	lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
+						  func, &fw);
+	if (IS_ERR(lsfw))
+		return PTR_ERR(lsfw);
+
+	nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw);
+	nvkm_firmware_put(fw);
+	return 0;
+}
+
+int
+nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev,
+				     struct nvkm_falcon *falcon,
+				     enum nvkm_acr_lsf_id id,
+				     const char *path, int ver,
+				     const struct nvkm_acr_lsf_func *func)
+{
+	const struct firmware *fw;
+	struct nvkm_acr_lsfw *lsfw;
+
+	lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
+						  func, &fw);
+	if (IS_ERR(lsfw))
+		return PTR_ERR(lsfw);
+
+	nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw);
+	nvkm_firmware_put(fw);
+	return 0;
+}
+
+int
+nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev,
+				    struct nvkm_falcon *falcon,
+				    enum nvkm_acr_lsf_id id,
+				    const char *path, int ver,
+				    const struct nvkm_acr_lsf_func *func)
+{
+	struct nvkm_acr *acr = subdev->device->acr;
+	struct nvkm_acr_lsfw *lsfw;
+	const struct firmware *bl = NULL, *inst = NULL, *data = NULL;
+	const struct nvfw_bin_hdr *hdr;
+	const struct nvfw_bl_desc *desc;
+	u32 *bldata;
+	int ret;
+
+	if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
+		return PTR_ERR(lsfw);
+
+	ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl);
+	if (ret)
+		goto done;
+
+	hdr = nvfw_bin_hdr(subdev, bl->data);
+	desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
+	bldata = (void *)(bl->data + hdr->data_offset);
+
+	ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst);
+	if (ret)
+		goto done;
+
+	ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data);
+	if (ret)
+		goto done;
+
+	ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
+	if (ret)
+		goto done;
+
+	lsfw->bootloader_size = ALIGN(desc->code_size, 256);
+	lsfw->bootloader_imem_offset = desc->start_tag << 8;
+
+	lsfw->app_start_offset = lsfw->bootloader_size;
+	lsfw->app_imem_entry = 0;
+	lsfw->app_resident_code_offset = 0;
+	lsfw->app_resident_code_size = ALIGN(inst->size, 256);
+	lsfw->app_resident_data_offset = lsfw->app_resident_code_size;
+	lsfw->app_resident_data_size = ALIGN(data->size, 256);
+	lsfw->app_size = lsfw->app_resident_code_size +
+			 lsfw->app_resident_data_size;
+
+	lsfw->img.size = lsfw->bootloader_size + lsfw->app_size;
+	if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	memcpy(lsfw->img.data, bldata, lsfw->bootloader_size);
+	memcpy(lsfw->img.data + lsfw->app_start_offset +
+	       lsfw->app_resident_code_offset, inst->data, inst->size);
+	memcpy(lsfw->img.data + lsfw->app_start_offset +
+	       lsfw->app_resident_data_offset, data->data, data->size);
+
+	lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
+			   lsfw->bootloader_size;
+	lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
+			  lsfw->ucode_size;
+
+done:
+	if (ret)
+		nvkm_acr_lsfw_del(lsfw);
+	nvkm_firmware_put(data);
+	nvkm_firmware_put(inst);
+	nvkm_firmware_put(bl);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
new file mode 100644
index 0000000..d8ba728
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
@@ -0,0 +1,151 @@
+#ifndef __NVKM_ACR_PRIV_H__
+#define __NVKM_ACR_PRIV_H__
+#include <subdev/acr.h>
+struct lsb_header_tail;
+
+struct nvkm_acr_fwif {
+	int version;
+	int (*load)(struct nvkm_acr *, int version,
+		    const struct nvkm_acr_fwif *);
+	const struct nvkm_acr_func *func;
+};
+
+int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
+int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
+
+struct nvkm_acr_lsf;
+struct nvkm_acr_func {
+	const struct nvkm_acr_hsf_fwif *load;
+	const struct nvkm_acr_hsf_fwif *ahesasc;
+	const struct nvkm_acr_hsf_fwif *asb;
+	const struct nvkm_acr_hsf_fwif *unload;
+	int (*wpr_parse)(struct nvkm_acr *);
+	u32 (*wpr_layout)(struct nvkm_acr *);
+	int (*wpr_alloc)(struct nvkm_acr *, u32 wpr_size);
+	int (*wpr_build)(struct nvkm_acr *, struct nvkm_acr_lsf *rtos);
+	void (*wpr_patch)(struct nvkm_acr *, s64 adjust);
+	void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit);
+	int (*init)(struct nvkm_acr *);
+	void (*fini)(struct nvkm_acr *);
+};
+
+int gm200_acr_wpr_parse(struct nvkm_acr *);
+u32 gm200_acr_wpr_layout(struct nvkm_acr *);
+int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
+void gm200_acr_wpr_patch(struct nvkm_acr *, s64);
+void gm200_acr_wpr_check(struct nvkm_acr *, u64 *, u64 *);
+void gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *,
+				  struct lsb_header_tail *);
+int gm200_acr_init(struct nvkm_acr *);
+
+int gm20b_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size);
+
+int gp102_acr_wpr_parse(struct nvkm_acr *);
+u32 gp102_acr_wpr_layout(struct nvkm_acr *);
+int gp102_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size);
+int gp102_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
+int gp102_acr_wpr_build_lsb(struct nvkm_acr *, struct nvkm_acr_lsfw *);
+void gp102_acr_wpr_patch(struct nvkm_acr *, s64);
+
+struct nvkm_acr_hsfw {
+	const struct nvkm_acr_hsf_func *func;
+	const char *name;
+	struct list_head head;
+
+	u32 imem_size;
+	u32 imem_tag;
+	u32 *imem;
+
+	u8 *image;
+	u32 image_size;
+	u32 non_sec_addr;
+	u32 non_sec_size;
+	u32 sec_addr;
+	u32 sec_size;
+	u32 data_addr;
+	u32 data_size;
+
+	struct {
+		struct {
+			void *data;
+			u32 size;
+		} prod, dbg;
+		u32 patch_loc;
+	} sig;
+};
+
+struct nvkm_acr_hsf_fwif {
+	int version;
+	int (*load)(struct nvkm_acr *, const char *bl, const char *fw,
+		    const char *name, int version,
+		    const struct nvkm_acr_hsf_fwif *);
+	const struct nvkm_acr_hsf_func *func;
+};
+
+int nvkm_acr_hsfw_load(struct nvkm_acr *, const char *, const char *,
+		       const char *, int, const struct nvkm_acr_hsf_fwif *);
+void nvkm_acr_hsfw_del_all(struct nvkm_acr *);
+
+struct nvkm_acr_hsf {
+	const struct nvkm_acr_hsf_func *func;
+	const char *name;
+	struct list_head head;
+
+	u32 imem_size;
+	u32 imem_tag;
+	u32 *imem;
+
+	u32 non_sec_addr;
+	u32 non_sec_size;
+	u32 sec_addr;
+	u32 sec_size;
+	u32 data_addr;
+	u32 data_size;
+
+	struct nvkm_memory *ucode;
+	struct nvkm_vma *vma;
+	struct nvkm_falcon *falcon;
+};
+
+struct nvkm_acr_hsf_func {
+	int (*load)(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+	int (*boot)(struct nvkm_acr *, struct nvkm_acr_hsf *);
+	void (*bld)(struct nvkm_acr *, struct nvkm_acr_hsf *);
+};
+
+int gm200_acr_hsfw_load(struct nvkm_acr *, struct nvkm_acr_hsfw *,
+			struct nvkm_falcon *);
+int gm200_acr_hsfw_boot(struct nvkm_acr *, struct nvkm_acr_hsf *,
+			u32 clear_intr, u32 mbox0_ok);
+
+int gm200_acr_load_boot(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+extern const struct nvkm_acr_hsf_func gm200_acr_unload_0;
+int gm200_acr_unload_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+int gm200_acr_unload_boot(struct nvkm_acr *, struct nvkm_acr_hsf *);
+void gm200_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+extern const struct nvkm_acr_hsf_func gm20b_acr_load_0;
+
+int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+
+extern const struct nvkm_acr_hsf_func gp108_acr_unload_0;
+void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int,
+		  struct nvkm_acr **);
+int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name);
+
+struct nvkm_acr_lsf {
+	const struct nvkm_acr_lsf_func *func;
+	struct nvkm_falcon *falcon;
+	enum nvkm_acr_lsf_id id;
+	struct list_head head;
+};
+
+struct nvkm_acr_lsfw *nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *,
+					struct nvkm_acr *, struct nvkm_falcon *,
+					enum nvkm_acr_lsf_id);
+void nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *);
+void nvkm_acr_lsfw_del_all(struct nvkm_acr *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
new file mode 100644
index 0000000..7f4b89d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/pmu.h>
+#include <engine/sec2.h>
+
+#include <nvfw/acr.h>
+
+static int
+tu102_acr_init(struct nvkm_acr *acr)
+{
+	int ret = nvkm_acr_hsf_boot(acr, "AHESASC");
+	if (ret)
+		return ret;
+
+	return nvkm_acr_hsf_boot(acr, "ASB");
+}
+
+static int
+tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+	struct nvkm_acr_lsfw *lsfw;
+	u32 offset = 0;
+	int ret;
+
+	/*XXX: shared sub-WPR headers, fill terminator for now. */
+	nvkm_wo32(acr->wpr, 0x200, 0xffffffff);
+
+	/* Fill per-LSF structures. */
+	list_for_each_entry(lsfw, &acr->lsfw, head) {
+		struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
+		struct wpr_header_v1 hdr = {
+			.falcon_id = lsfw->id,
+			.lsb_offset = lsfw->offset.lsb,
+			.bootstrap_owner = NVKM_ACR_LSF_GSPLITE,
+			.lazy_bootstrap = 1,
+			.bin_version = sig->version,
+			.status = WPR_HEADER_V1_STATUS_COPY,
+		};
+
+		/* Write WPR header. */
+		nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+		offset += sizeof(hdr);
+
+		/* Write LSB header. */
+		ret = gp102_acr_wpr_build_lsb(acr, lsfw);
+		if (ret)
+			return ret;
+
+		/* Write ucode image. */
+		nvkm_wobj(acr->wpr, lsfw->offset.img,
+				    lsfw->img.data,
+				    lsfw->img.size);
+
+		/* Write bootloader data. */
+		lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+	}
+
+	/* Finalise WPR. */
+	nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
+	return 0;
+}
+
+static int
+tu102_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+	return gm200_acr_hsfw_boot(acr, hsf, 0, 0);
+}
+
+static int
+tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw,
+		    const char *name, int version,
+		    const struct nvkm_acr_hsf_fwif *fwif)
+{
+	return 0;
+}
+
+MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_unload_fwif[] = {
+	{  0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
+	{ -1, tu102_acr_hsfw_nofw },
+	{}
+};
+
+static int
+tu102_acr_asb_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+	return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->gsp->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+tu102_acr_asb_0 = {
+	.load = tu102_acr_asb_load,
+	.boot = tu102_acr_hsfw_boot,
+	.bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_asb_fwif[] = {
+	{  0, nvkm_acr_hsfw_load, &tu102_acr_asb_0 },
+	{ -1, tu102_acr_hsfw_nofw },
+	{}
+};
+
+static const struct nvkm_acr_hsf_func
+tu102_acr_ahesasc_0 = {
+	.load = gp102_acr_load_load,
+	.boot = tu102_acr_hsfw_boot,
+	.bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_ahesasc_fwif[] = {
+	{  0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 },
+	{ -1, tu102_acr_hsfw_nofw },
+	{}
+};
+
+static const struct nvkm_acr_func
+tu102_acr = {
+	.ahesasc = tu102_acr_ahesasc_fwif,
+	.asb = tu102_acr_asb_fwif,
+	.unload = tu102_acr_unload_fwif,
+	.wpr_parse = gp102_acr_wpr_parse,
+	.wpr_layout = gp102_acr_wpr_layout,
+	.wpr_alloc = gp102_acr_wpr_alloc,
+	.wpr_patch = gp102_acr_wpr_patch,
+	.wpr_build = tu102_acr_wpr_build,
+	.wpr_check = gm200_acr_wpr_check,
+	.init = tu102_acr_init,
+};
+
+static int
+tu102_acr_load(struct nvkm_acr *acr, int version,
+	       const struct nvkm_acr_fwif *fwif)
+{
+	struct nvkm_subdev *subdev = &acr->subdev;
+	const struct nvkm_acr_hsf_fwif *hsfwif;
+
+	hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC",
+				    acr, "acr/bl", "acr/ucode_ahesasc",
+				    "AHESASC");
+	if (IS_ERR(hsfwif))
+		return PTR_ERR(hsfwif);
+
+	hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB",
+				    acr, "acr/bl", "acr/ucode_asb", "ASB");
+	if (IS_ERR(hsfwif))
+		return PTR_ERR(hsfwif);
+
+	hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+				    acr, "acr/unload_bl", "acr/ucode_unload",
+				    "unload");
+	if (IS_ERR(hsfwif))
+		return PTR_ERR(hsfwif);
+
+	return 0;
+}
+
+static const struct nvkm_acr_fwif
+tu102_acr_fwif[] = {
+	{  0, tu102_acr_load, &tu102_acr },
+	{}
+};
+
+int
+tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+	return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 53b9d63..d65ec71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -2,5 +2,6 @@
 nvkm-y += nvkm/subdev/fault/base.o
 nvkm-y += nvkm/subdev/fault/user.o
 nvkm-y += nvkm/subdev/fault/gp100.o
+nvkm-y += nvkm/subdev/fault/gp10b.o
 nvkm-y += nvkm/subdev/fault/gv100.o
 nvkm-y += nvkm/subdev/fault/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index ca25156..f6dca97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -108,7 +108,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
 		return ret;
 
 	/* Pin fault buffer in BAR2. */
-	buffer->addr = nvkm_memory_bar2(buffer->mem);
+	buffer->addr = fault->func->buffer.pin(buffer);
 	if (buffer->addr == ~0ULL)
 		return -EFAULT;
 
@@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
 	struct nvkm_fault *fault = nvkm_fault(subdev);
 	int i;
 
+	nvkm_notify_fini(&fault->nrpfb);
 	nvkm_event_fini(&fault->event);
 
 	for (i = 0; i < fault->buffer_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 4f3c4e0..f6b189c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -21,25 +21,26 @@
  */
 #include "priv.h"
 
+#include <core/memory.h>
 #include <subdev/mc.h>
 
 #include <nvif/class.h>
 
-static void
+void
 gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
 }
 
-static void
+void
 gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
 }
 
-static void
+void
 gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
@@ -48,7 +49,12 @@ gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
 }
 
-static void
+u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
+{
+	return nvkm_memory_bar2(buffer->mem);
+}
+
+void
 gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
 {
 	buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
@@ -56,7 +62,7 @@ gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
 	buffer->put = 0x002a80;
 }
 
-static void
+void
 gp100_fault_intr(struct nvkm_fault *fault)
 {
 	nvkm_event_send(&fault->event, 1, 0, NULL, 0);
@@ -68,6 +74,7 @@ gp100_fault = {
 	.buffer.nr = 1,
 	.buffer.entry_size = 32,
 	.buffer.info = gp100_fault_buffer_info,
+	.buffer.pin = gp100_fault_buffer_pin,
 	.buffer.init = gp100_fault_buffer_init,
 	.buffer.fini = gp100_fault_buffer_fini,
 	.buffer.intr = gp100_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
new file mode 100644
index 0000000..9e66d1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 NVIDIA Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+#include <core/memory.h>
+
+#include <nvif/class.h>
+
+u64
+gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
+{
+	return nvkm_memory_addr(buffer->mem);
+}
+
+static const struct nvkm_fault_func
+gp10b_fault = {
+	.intr = gp100_fault_intr,
+	.buffer.nr = 1,
+	.buffer.entry_size = 32,
+	.buffer.info = gp100_fault_buffer_info,
+	.buffer.pin = gp10b_fault_buffer_pin,
+	.buffer.init = gp100_fault_buffer_init,
+	.buffer.fini = gp100_fault_buffer_fini,
+	.buffer.intr = gp100_fault_buffer_intr,
+	.user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
+};
+
+int
+gp10b_fault_new(struct nvkm_device *device, int index,
+		struct nvkm_fault **pfault)
+{
+	return nvkm_fault_new_(&gp10b_fault, device, index, pfault);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 6747f09..2707be4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -214,6 +214,7 @@ gv100_fault = {
 	.buffer.nr = 2,
 	.buffer.entry_size = 32,
 	.buffer.info = gv100_fault_buffer_info,
+	.buffer.pin = gp100_fault_buffer_pin,
 	.buffer.init = gv100_fault_buffer_init,
 	.buffer.fini = gv100_fault_buffer_fini,
 	.buffer.intr = gv100_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 975e66a..f6f1dd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -30,6 +30,7 @@ struct nvkm_fault_func {
 		int nr;
 		u32 entry_size;
 		void (*info)(struct nvkm_fault_buffer *);
+		u64 (*pin)(struct nvkm_fault_buffer *);
 		void (*init)(struct nvkm_fault_buffer *);
 		void (*fini)(struct nvkm_fault_buffer *);
 		void (*intr)(struct nvkm_fault_buffer *, bool enable);
@@ -40,6 +41,15 @@ struct nvkm_fault_func {
 	} user;
 };
 
+void gp100_fault_buffer_intr(struct nvkm_fault_buffer *, bool enable);
+void gp100_fault_buffer_fini(struct nvkm_fault_buffer *);
+void gp100_fault_buffer_init(struct nvkm_fault_buffer *);
+u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *);
+void gp100_fault_buffer_info(struct nvkm_fault_buffer *);
+void gp100_fault_intr(struct nvkm_fault *);
+
+u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *);
+
 int gv100_fault_oneinit(struct nvkm_fault *);
 
 int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index fa1dfe5..45a6a68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -154,6 +154,7 @@ tu102_fault = {
 	.buffer.nr = 2,
 	.buffer.entry_size = 32,
 	.buffer.info = tu102_fault_buffer_info,
+	.buffer.pin = gp100_fault_buffer_pin,
 	.buffer.init = tu102_fault_buffer_init,
 	.buffer.fini = tu102_fault_buffer_fini,
 	.buffer.intr = tu102_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index b2bb5a3..5940e0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -126,6 +126,34 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
 }
 
 static int
+nvkm_fb_init_scrub_vpr(struct nvkm_fb *fb)
+{
+	struct nvkm_subdev *subdev = &fb->subdev;
+	int ret;
+
+	nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
+
+	if (!fb->vpr_scrubber.size) {
+		nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
+		return 0;
+	}
+
+	ret = fb->func->vpr.scrub(fb);
+	if (ret) {
+		nvkm_error(subdev, "VPR scrubber binary failed\n");
+		return ret;
+	}
+
+	if (fb->func->vpr.scrub_required(fb)) {
+		nvkm_error(subdev, "VPR still locked after scrub!\n");
+		return -EIO;
+	}
+
+	nvkm_debug(subdev, "VPR scrubber binary successful\n");
+	return 0;
+}
+
+static int
 nvkm_fb_init(struct nvkm_subdev *subdev)
 {
 	struct nvkm_fb *fb = nvkm_fb(subdev);
@@ -154,6 +182,14 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
 
 	if (fb->func->init_unkn)
 		fb->func->init_unkn(fb);
+
+	if (fb->func->vpr.scrub_required &&
+	    fb->func->vpr.scrub_required(fb)) {
+		ret = nvkm_fb_init_scrub_vpr(fb);
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -172,6 +208,8 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
 	nvkm_mm_fini(&fb->tags);
 	nvkm_ram_del(&fb->ram);
 
+	nvkm_blob_dtor(&fb->vpr_scrubber);
+
 	if (fb->func->dtor)
 		return fb->func->dtor(fb);
 	return fb;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b4d74e8..fc8c93a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -24,7 +24,81 @@
 #include "gf100.h"
 #include "ram.h"
 
+#include <core/firmware.h>
 #include <core/memory.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+#include <engine/nvdec.h>
+
+int
+gp102_fb_vpr_scrub(struct nvkm_fb *fb)
+{
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_falcon *falcon = &device->nvdec[0]->falcon;
+	struct nvkm_blob *blob = &fb->vpr_scrubber;
+	const struct nvfw_bin_hdr *hsbin_hdr;
+	const struct nvfw_hs_header *fw_hdr;
+	const struct nvfw_hs_load_header *lhdr;
+	void *scrub_data;
+	u32 patch_loc, patch_sig;
+	int ret;
+
+	nvkm_falcon_get(falcon, subdev);
+
+	hsbin_hdr = nvfw_bin_hdr(subdev, blob->data);
+	fw_hdr = nvfw_hs_header(subdev, blob->data + hsbin_hdr->header_offset);
+	lhdr = nvfw_hs_load_header(subdev, blob->data + fw_hdr->hdr_offset);
+	scrub_data = blob->data + hsbin_hdr->data_offset;
+
+	patch_loc = *(u32 *)(blob->data + fw_hdr->patch_loc);
+	patch_sig = *(u32 *)(blob->data + fw_hdr->patch_sig);
+	if (falcon->debug) {
+		memcpy(scrub_data + patch_loc,
+		       blob->data + fw_hdr->sig_dbg_offset + patch_sig,
+		       fw_hdr->sig_dbg_size);
+	} else {
+		memcpy(scrub_data + patch_loc,
+		       blob->data + fw_hdr->sig_prod_offset + patch_sig,
+		       fw_hdr->sig_prod_size);
+	}
+
+	nvkm_falcon_reset(falcon);
+	nvkm_falcon_bind_context(falcon, NULL);
+
+	nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
+			      lhdr->non_sec_code_size,
+			      lhdr->non_sec_code_off >> 8, 0, false);
+	nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
+			      ALIGN(lhdr->apps[0], 0x100),
+			      lhdr->apps[1],
+			      lhdr->apps[0] >> 8, 0, true);
+	nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
+			      lhdr->data_size, 0);
+
+	nvkm_falcon_set_start_addr(falcon, 0x0);
+	nvkm_falcon_start(falcon);
+
+	ret = nvkm_falcon_wait_for_halt(falcon, 500);
+	if (ret < 0) {
+		ret = -ETIMEDOUT;
+		goto end;
+	}
+
+	/* put nvdec in clean state - without reset it will remain in HS mode */
+	nvkm_falcon_reset(falcon);
+end:
+	nvkm_falcon_put(falcon, subdev);
+	return ret;
+}
+
+bool
+gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100cd0, 0x2);
+	return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
+}
 
 static const struct nvkm_fb_func
 gp102_fb = {
@@ -33,11 +107,32 @@ gp102_fb = {
 	.init = gp100_fb_init,
 	.init_remapper = gp100_fb_init_remapper,
 	.init_page = gm200_fb_init_page,
+	.vpr.scrub_required = gp102_fb_vpr_scrub_required,
+	.vpr.scrub = gp102_fb_vpr_scrub,
 	.ram_new = gp100_ram_new,
 };
 
 int
+gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_fb **pfb)
+{
+	int ret = gf100_fb_new_(func, device, index, pfb);
+	if (ret)
+		return ret;
+
+	nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0,
+				&(*pfb)->vpr_scrubber);
+	return 0;
+}
+
+int
 gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gp102_fb, device, index, pfb);
+	return gp102_fb_new_(&gp102_fb, device, index, pfb);
 }
+
+MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 3c5e02e..389bad3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -35,6 +35,8 @@ gv100_fb = {
 	.init = gp100_fb_init,
 	.init_page = gv100_fb_init_page,
 	.init_unkn = gp100_fb_init_unkn,
+	.vpr.scrub_required = gp102_fb_vpr_scrub_required,
+	.vpr.scrub = gp102_fb_vpr_scrub,
 	.ram_new = gp100_ram_new,
 	.default_bigpage = 16,
 };
@@ -42,5 +44,10 @@ gv100_fb = {
 int
 gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
 {
-	return gf100_fb_new_(&gv100_fb, device, index, pfb);
+	return gp102_fb_new_(&gv100_fb, device, index, pfb);
 }
+
+MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index c4e9f55..5be9c56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -17,6 +17,11 @@ struct nvkm_fb_func {
 	void (*intr)(struct nvkm_fb *);
 
 	struct {
+		bool (*scrub_required)(struct nvkm_fb *);
+		int (*scrub)(struct nvkm_fb *);
+	} vpr;
+
+	struct {
 		int regions;
 		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
 			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
@@ -72,4 +77,9 @@ int gm200_fb_init_page(struct nvkm_fb *);
 
 void gp100_fb_init_remapper(struct nvkm_fb *);
 void gp100_fb_init_unkn(struct nvkm_fb *);
+
+int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
+		  struct nvkm_fb **);
+bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
+int gp102_fb_vpr_scrub(struct nvkm_fb *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index ac87a3b..ba43fe1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -655,7 +655,7 @@ gf100_ram_new_(const struct nvkm_ram_func *func,
 
 static const struct nvkm_ram_func
 gf100_ram = {
-	.upper = 0x0200000000,
+	.upper = 0x0200000000ULL,
 	.probe_fbp = gf100_ram_probe_fbp,
 	.probe_fbp_amount = gf100_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
index 70a06e3..d97fa43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -43,7 +43,7 @@ gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
 
 static const struct nvkm_ram_func
 gf108_ram = {
-	.upper = 0x0200000000,
+	.upper = 0x0200000000ULL,
 	.probe_fbp = gf100_ram_probe_fbp,
 	.probe_fbp_amount = gf108_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 456aed1..d350d92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1698,7 +1698,7 @@ gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
 
 static const struct nvkm_ram_func
 gk104_ram = {
-	.upper = 0x0200000000,
+	.upper = 0x0200000000ULL,
 	.probe_fbp = gf100_ram_probe_fbp,
 	.probe_fbp_amount = gf108_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 27c68e3..be91da8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -33,7 +33,7 @@ gm107_ram_probe_fbp(const struct nvkm_ram_func *func,
 
 static const struct nvkm_ram_func
 gm107_ram = {
-	.upper = 0x1000000000,
+	.upper = 0x1000000000ULL,
 	.probe_fbp = gm107_ram_probe_fbp,
 	.probe_fbp_amount = gf108_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
index 6b0cac1..8f91ea9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -48,7 +48,7 @@ gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
 
 static const struct nvkm_ram_func
 gm200_ram = {
-	.upper = 0x1000000000,
+	.upper = 0x1000000000ULL,
 	.probe_fbp = gm107_ram_probe_fbp,
 	.probe_fbp_amount = gm200_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index adb62a6..378f6fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -79,7 +79,7 @@ gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa)
 
 static const struct nvkm_ram_func
 gp100_ram = {
-	.upper = 0x1000000000,
+	.upper = 0x1000000000ULL,
 	.probe_fbp = gm107_ram_probe_fbp,
 	.probe_fbp_amount = gm200_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gp100_ram_probe_fbpa,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index e7c4f06..67cc3b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/base.o
 nvkm-y += nvkm/subdev/gsp/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
new file mode 100644
index 0000000..5a32df0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <subdev/acr.h>
+#include <subdev/top.h>
+
+static void *
+nvkm_gsp_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+	nvkm_falcon_dtor(&gsp->falcon);
+	return gsp;
+}
+
+static const struct nvkm_subdev_func
+nvkm_gsp = {
+	.dtor = nvkm_gsp_dtor,
+};
+
+int
+nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
+	      int index, struct nvkm_gsp **pgsp)
+{
+	struct nvkm_gsp *gsp;
+
+	if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev);
+
+	fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
+	if (IS_ERR(fwif))
+		return PTR_ERR(fwif);
+
+	return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev,
+				nvkm_subdev_name[gsp->subdev.index], 0,
+				&gsp->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
index dccfaf1..2114f9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -19,44 +19,37 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  */
-#include <subdev/gsp.h>
-#include <subdev/top.h>
-#include <engine/falcon.h>
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gv100_gsp_flcn = {
+	.fbif = 0x600,
+	.load_imem = nvkm_falcon_v1_load_imem,
+	.load_dmem = nvkm_falcon_v1_load_dmem,
+	.read_dmem = nvkm_falcon_v1_read_dmem,
+	.bind_context = gp102_sec2_flcn_bind_context,
+	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+	.set_start_addr = nvkm_falcon_v1_set_start_addr,
+	.start = nvkm_falcon_v1_start,
+	.enable = gp102_sec2_flcn_enable,
+	.disable = nvkm_falcon_v1_disable,
+};
 
 static int
-gv100_gsp_oneinit(struct nvkm_subdev *subdev)
+gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
 {
-	struct nvkm_gsp *gsp = nvkm_gsp(subdev);
-
-	gsp->addr = nvkm_top_addr(subdev->device, subdev->index);
-	if (!gsp->addr)
-		return -EINVAL;
-
-	return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon);
+	return 0;
 }
 
-static void *
-gv100_gsp_dtor(struct nvkm_subdev *subdev)
-{
-	struct nvkm_gsp *gsp = nvkm_gsp(subdev);
-	nvkm_falcon_del(&gsp->falcon);
-	return gsp;
-}
-
-static const struct nvkm_subdev_func
-gv100_gsp = {
-	.dtor = gv100_gsp_dtor,
-	.oneinit = gv100_gsp_oneinit,
+struct nvkm_gsp_fwif
+gv100_gsp[] = {
+	{ -1, gv100_gsp_nofw, &gv100_gsp_flcn },
+	{}
 };
 
 int
 gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
 {
-	struct nvkm_gsp *gsp;
-
-	if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
-		return -ENOMEM;
-
-	nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev);
-	return 0;
+	return nvkm_gsp_new_(gv100_gsp, device, index, pgsp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
new file mode 100644
index 0000000..92820fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_GSP_PRIV_H__
+#define __NVKM_GSP_PRIV_H__
+#include <subdev/gsp.h>
+enum nvkm_acr_lsf_id;
+
+struct nvkm_gsp_fwif {
+	int version;
+	int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
+	const struct nvkm_falcon_func *flcn;
+};
+
+int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int,
+		  struct nvkm_gsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 2b6d36e..728d750 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -6,3 +6,4 @@
 nvkm-y += nvkm/subdev/ltc/gm200.o
 nvkm-y += nvkm/subdev/ltc/gp100.o
 nvkm-y += nvkm/subdev/ltc/gp102.o
+nvkm-y += nvkm/subdev/ltc/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
new file mode 100644
index 0000000..c0063c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 NVIDIA Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Thierry Reding
+ */
+
+#include "priv.h"
+
+static void
+gp10b_ltc_init(struct nvkm_ltc *ltc)
+{
+	struct nvkm_device *device = ltc->subdev.device;
+	struct iommu_fwspec *spec;
+
+	nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
+	nvkm_wr32(device, 0x100800, ltc->ltc_nr);
+
+	spec = dev_iommu_fwspec_get(device->dev);
+	if (spec) {
+		u32 sid = spec->ids[0] & 0xffff;
+
+		/* stream ID */
+		nvkm_wr32(device, 0x160000, sid << 2);
+	}
+}
+
+static const struct nvkm_ltc_func
+gp10b_ltc = {
+	.oneinit = gp100_ltc_oneinit,
+	.init = gp10b_ltc_init,
+	.intr = gp100_ltc_intr,
+	.cbc_clear = gm107_ltc_cbc_clear,
+	.cbc_wait = gm107_ltc_cbc_wait,
+	.zbc = 16,
+	.zbc_clear_color = gm107_ltc_zbc_clear_color,
+	.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+	.zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
+	.invalidate = gf100_ltc_invalidate,
+	.flush = gf100_ltc_flush,
+};
+
+int
+gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 2fcf18e..eca5a71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -46,4 +46,6 @@ void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
 int gp100_ltc_oneinit(struct nvkm_ltc *);
 void gp100_ltc_init(struct nvkm_ltc *);
 void gp100_ltc_intr(struct nvkm_ltc *);
+
+void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *, int, const u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 2d07524..2cd5ec8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -30,7 +30,7 @@
  * The value 0xff represents an invalid storage type.
  */
 const u8 *
-gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
+gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
 {
 	static const u8
 	kind[256] = {
@@ -69,6 +69,7 @@ gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
 	};
 
 	*count = ARRAY_SIZE(kind);
+	*invalid = 0xff;
 	return kind;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
index dbf644e..83990c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -27,7 +27,7 @@
 #include <nvif/class.h>
 
 const u8 *
-gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
 {
 	static const u8
 	kind[256] = {
@@ -65,6 +65,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
 		0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
 	};
 	*count = ARRAY_SIZE(kind);
+	*invalid = 0xff;
 	return kind;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index db3dfbb..c0083dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -27,7 +27,7 @@
 #include <nvif/class.h>
 
 const u8 *
-nv50_mmu_kind(struct nvkm_mmu *base, int *count)
+nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid)
 {
 	/* 0x01: no bank swizzle
 	 * 0x02: bank swizzled
@@ -57,6 +57,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count)
 		0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
 	};
 	*count = ARRAY_SIZE(kind);
+	*invalid = 0x7f;
 	return kind;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 07f2fcd..479b023 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -35,17 +35,17 @@ struct nvkm_mmu_func {
 		u32 pd_offset;
 	} vmm;
 
-	const u8 *(*kind)(struct nvkm_mmu *, int *count);
+	const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
 	bool kind_sys;
 };
 
 extern const struct nvkm_mmu_func nv04_mmu;
 
-const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count);
+const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
 
-const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
+const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
 
-const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
 
 struct nvkm_mmu_pt {
 	union {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index c0db0ce..b21e82e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -1,5 +1,6 @@
 /*
  * Copyright 2018 Red Hat Inc.
+ * Copyright 2019 NVIDIA Corporation.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -26,13 +27,26 @@
 
 #include <nvif/class.h>
 
+const u8 *
+tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
+{
+	static const u8
+	kind[16] = {
+		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */
+		0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07,
+	};
+	*count = ARRAY_SIZE(kind);
+	*invalid = 0x07;
+	return kind;
+}
+
 static const struct nvkm_mmu_func
 tu102_mmu = {
 	.dma_bits = 47,
 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
 	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
 	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
-	.kind = gm200_mmu_kind,
+	.kind = tu102_mmu_kind,
 	.kind_sys = true,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
index 353f10f..0e4b894 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -111,15 +111,17 @@ nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
 	} *args = argv;
 	const u8 *kind = NULL;
 	int ret = -ENOSYS, count = 0;
+	u8 kind_inv = 0;
 
 	if (mmu->func->kind)
-		kind = mmu->func->kind(mmu, &count);
+		kind = mmu->func->kind(mmu, &count, &kind_inv);
 
 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
 		if (argc != args->v0.count * sizeof(*args->v0.data))
 			return -EINVAL;
 		if (args->v0.count > count)
 			return -EINVAL;
+		args->v0.kind_inv = kind_inv;
 		memcpy(args->v0.data, kind, args->v0.count);
 	} else
 		return ret;
@@ -157,9 +159,10 @@ nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
 	struct nvkm_mmu *mmu = device->mmu;
 	struct nvkm_ummu *ummu;
 	int ret = -ENOSYS, kinds = 0;
+	u8 unused = 0;
 
 	if (mmu->func->kind)
-		mmu->func->kind(mmu, &kinds);
+		mmu->func->kind(mmu, &kinds, &unused);
 
 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 		args->v0.dmabits = mmu->dma_bits;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index ab6424f..6a2d9eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -247,7 +247,7 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 	} *args = argv;
 	struct nvkm_device *device = vmm->mmu->subdev.device;
 	struct nvkm_memory *memory = map->memory;
-	u8  kind, priv, ro, vol;
+	u8  kind, kind_inv, priv, ro, vol;
 	int kindn, aper, ret = -ENOSYS;
 	const u8 *kindm;
 
@@ -274,8 +274,8 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 	if (WARN_ON(aper < 0))
 		return aper;
 
-	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
-	if (kind >= kindn || kindm[kind] == 0xff) {
+	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+	if (kind >= kindn || kindm[kind] == kind_inv) {
 		VMM_DEBUG(vmm, "kind %02x", kind);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index b4f5197..d862875 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -320,7 +320,7 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 	} *args = argv;
 	struct nvkm_device *device = vmm->mmu->subdev.device;
 	struct nvkm_memory *memory = map->memory;
-	u8  kind, priv, ro, vol;
+	u8  kind, kind_inv, priv, ro, vol;
 	int kindn, aper, ret = -ENOSYS;
 	const u8 *kindm;
 
@@ -347,8 +347,8 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 	if (WARN_ON(aper < 0))
 		return aper;
 
-	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
-	if (kind >= kindn || kindm[kind] == 0xff) {
+	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+	if (kind >= kindn || kindm[kind] == kind_inv) {
 		VMM_DEBUG(vmm, "kind %02x", kind);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index c98afe3..2d89e27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -235,7 +235,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 	struct nvkm_device *device = vmm->mmu->subdev.device;
 	struct nvkm_ram *ram = device->fb->ram;
 	struct nvkm_memory *memory = map->memory;
-	u8  aper, kind, comp, priv, ro;
+	u8  aper, kind, kind_inv, comp, priv, ro;
 	int kindn, ret = -ENOSYS;
 	const u8 *kindm;
 
@@ -278,8 +278,8 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 		return -EINVAL;
 	}
 
-	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
-	if (kind >= kindn || kindm[kind] == 0x7f) {
+	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+	if (kind >= kindn || kindm[kind] == kind_inv) {
 		VMM_DEBUG(vmm, "kind %02x", kind);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index e37b6e4..a76c2a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -12,3 +12,4 @@
 nvkm-y += nvkm/subdev/pmu/gm20b.o
 nvkm-y += nvkm/subdev/pmu/gp100.o
 nvkm-y += nvkm/subdev/pmu/gp102.o
+nvkm-y += nvkm/subdev/pmu/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index ea2e117..a0fe607 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -23,7 +23,7 @@
  */
 #include "priv.h"
 
-#include <core/msgqueue.h>
+#include <core/firmware.h>
 #include <subdev/timer.h>
 
 bool
@@ -85,6 +85,12 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
 		pmu->func->fini(pmu);
 
 	flush_work(&pmu->recv.work);
+
+	reinit_completion(&pmu->wpr_ready);
+
+	nvkm_falcon_cmdq_fini(pmu->lpq);
+	nvkm_falcon_cmdq_fini(pmu->hpq);
+	pmu->initmsg_received = false;
 	return 0;
 }
 
@@ -133,19 +139,15 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
 	return ret;
 }
 
-static int
-nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
-{
-	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
-	return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
-}
-
 static void *
 nvkm_pmu_dtor(struct nvkm_subdev *subdev)
 {
 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
-	nvkm_msgqueue_del(&pmu->queue);
-	nvkm_falcon_del(&pmu->falcon);
+	nvkm_falcon_msgq_del(&pmu->msgq);
+	nvkm_falcon_cmdq_del(&pmu->lpq);
+	nvkm_falcon_cmdq_del(&pmu->hpq);
+	nvkm_falcon_qmgr_del(&pmu->qmgr);
+	nvkm_falcon_dtor(&pmu->falcon);
 	return nvkm_pmu(subdev);
 }
 
@@ -153,29 +155,50 @@ static const struct nvkm_subdev_func
 nvkm_pmu = {
 	.dtor = nvkm_pmu_dtor,
 	.preinit = nvkm_pmu_preinit,
-	.oneinit = nvkm_pmu_oneinit,
 	.init = nvkm_pmu_init,
 	.fini = nvkm_pmu_fini,
 	.intr = nvkm_pmu_intr,
 };
 
 int
-nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
 	      int index, struct nvkm_pmu *pmu)
 {
+	int ret;
+
 	nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
-	pmu->func = func;
+
 	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
 	init_waitqueue_head(&pmu->recv.wait);
+
+	fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu);
+	if (IS_ERR(fwif))
+		return PTR_ERR(fwif);
+
+	pmu->func = fwif->func;
+
+	ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
+			       nvkm_subdev_name[pmu->subdev.index], 0x10a000,
+			       &pmu->falcon);
+	if (ret)
+		return ret;
+
+	if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) ||
+	    (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) ||
+	    (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) ||
+	    (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq)))
+		return ret;
+
+	init_completion(&pmu->wpr_ready);
 	return 0;
 }
 
 int
-nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
 	      int index, struct nvkm_pmu **ppmu)
 {
 	struct nvkm_pmu *pmu;
 	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
 		return -ENOMEM;
-	return nvkm_pmu_ctor(func, device, index, *ppmu);
+	return nvkm_pmu_ctor(fwif, device, index, *ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 0b45865..3ecb3d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -42,6 +42,7 @@ gf100_pmu_enabled(struct nvkm_pmu *pmu)
 
 static const struct nvkm_pmu_func
 gf100_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.code.data = gf100_pmu_code,
 	.code.size = sizeof(gf100_pmu_code),
 	.data.data = gf100_pmu_data,
@@ -56,7 +57,19 @@ gf100_pmu = {
 };
 
 int
+gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
+{
+	return 0;
+}
+
+static const struct nvkm_pmu_fwif
+gf100_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gf100_pmu },
+	{}
+};
+
+int
 gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 3dfa79d..8dd0271 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -26,6 +26,7 @@
 
 static const struct nvkm_pmu_func
 gf119_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.code.data = gf119_pmu_code,
 	.code.size = sizeof(gf119_pmu_code),
 	.data.data = gf119_pmu_data,
@@ -39,8 +40,14 @@ gf119_pmu = {
 	.recv = gt215_pmu_recv,
 };
 
+static const struct nvkm_pmu_fwif
+gf119_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gf119_pmu },
+	{}
+};
+
 int
 gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 8f7ec10..8b70cc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -105,6 +105,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 
 static const struct nvkm_pmu_func
 gk104_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.code.data = gk104_pmu_code,
 	.code.size = sizeof(gk104_pmu_code),
 	.data.data = gk104_pmu_data,
@@ -119,8 +120,14 @@ gk104_pmu = {
 	.pgob = gk104_pmu_pgob,
 };
 
+static const struct nvkm_pmu_fwif
+gk104_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gk104_pmu },
+	{}
+};
+
 int
 gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 345741d..0081f21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -84,6 +84,7 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 
 static const struct nvkm_pmu_func
 gk110_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.code.data = gk110_pmu_code,
 	.code.size = sizeof(gk110_pmu_code),
 	.data.data = gk110_pmu_data,
@@ -98,8 +99,14 @@ gk110_pmu = {
 	.pgob = gk110_pmu_pgob,
 };
 
+static const struct nvkm_pmu_fwif
+gk110_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gk110_pmu },
+	{}
+};
+
 int
 gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index e4acf78..b227c70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -26,6 +26,7 @@
 
 static const struct nvkm_pmu_func
 gk208_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.code.data = gk208_pmu_code,
 	.code.size = sizeof(gk208_pmu_code),
 	.data.data = gk208_pmu_data,
@@ -40,8 +41,14 @@ gk208_pmu = {
 	.pgob = gk110_pmu_pgob,
 };
 
+static const struct nvkm_pmu_fwif
+gk208_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gk208_pmu },
+	{}
+};
+
 int
 gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 05e8185..26c1adf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -95,7 +95,7 @@ static void
 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
 			      struct gk20a_pmu_dvfs_dev_status *status)
 {
-	struct nvkm_falcon *falcon = pmu->base.falcon;
+	struct nvkm_falcon *falcon = &pmu->base.falcon;
 
 	status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
 	status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
@@ -104,7 +104,7 @@ gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
 static void
 gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
 {
-	struct nvkm_falcon *falcon = pmu->base.falcon;
+	struct nvkm_falcon *falcon = &pmu->base.falcon;
 
 	nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
 	nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
@@ -160,7 +160,7 @@ gk20a_pmu_fini(struct nvkm_pmu *pmu)
 	struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
 	nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
 
-	nvkm_falcon_put(pmu->falcon, &pmu->subdev);
+	nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
 }
 
 static int
@@ -169,7 +169,7 @@ gk20a_pmu_init(struct nvkm_pmu *pmu)
 	struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
 	struct nvkm_subdev *subdev = &pmu->subdev;
 	struct nvkm_device *device = pmu->subdev.device;
-	struct nvkm_falcon *falcon = pmu->falcon;
+	struct nvkm_falcon *falcon = &pmu->falcon;
 	int ret;
 
 	ret = nvkm_falcon_get(falcon, subdev);
@@ -196,25 +196,34 @@ gk20a_dvfs_data= {
 
 static const struct nvkm_pmu_func
 gk20a_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.enabled = gf100_pmu_enabled,
 	.init = gk20a_pmu_init,
 	.fini = gk20a_pmu_fini,
 	.reset = gf100_pmu_reset,
 };
 
+static const struct nvkm_pmu_fwif
+gk20a_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gk20a_pmu },
+	{}
+};
+
 int
 gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
 	struct gk20a_pmu *pmu;
+	int ret;
 
 	if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
 		return -ENOMEM;
 	*ppmu = &pmu->base;
 
-	nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base);
+	ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base);
+	if (ret)
+		return ret;
 
 	pmu->data = &gk20a_dvfs_data;
 	nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
-
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 459df1e..5afb55e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -28,6 +28,7 @@
 
 static const struct nvkm_pmu_func
 gm107_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.code.data = gm107_pmu_code,
 	.code.size = sizeof(gm107_pmu_code),
 	.data.data = gm107_pmu_data,
@@ -41,8 +42,14 @@ gm107_pmu = {
 	.recv = gt215_pmu_recv,
 };
 
+static const struct nvkm_pmu_fwif
+gm107_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gm107_pmu },
+	{}
+};
+
 int
 gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 31c8431..8257103 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -19,38 +19,224 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
-#include <engine/falcon.h>
-#include <core/msgqueue.h>
 #include "priv.h"
 
-static void
-gm20b_pmu_recv(struct nvkm_pmu *pmu)
+#include <core/memory.h>
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/pmu.h>
+
+static int
+gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr)
 {
-	if (!pmu->queue) {
-		nvkm_warn(&pmu->subdev,
-			  "recv function called while no firmware set!\n");
-		return;
+	struct nv_pmu_acr_bootstrap_falcon_msg *msg =
+		container_of(hdr, typeof(*msg), msg.hdr);
+	return msg->falcon_id;
+}
+
+int
+gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
+			       enum nvkm_acr_lsf_id id)
+{
+	struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+	struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
+		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+		.cmd.hdr.size = sizeof(cmd),
+		.cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
+		.flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
+		.falcon_id = id,
+	};
+	int ret;
+
+	ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+				    gm20b_pmu_acr_bootstrap_falcon_cb,
+				    &pmu->subdev, msecs_to_jiffies(1000));
+	if (ret >= 0) {
+		if (ret != cmd.falcon_id)
+			ret = -EIO;
+		else
+			ret = 0;
 	}
 
-	nvkm_msgqueue_recv(pmu->queue);
+	return ret;
+}
+
+int
+gm20b_pmu_acr_boot(struct nvkm_falcon *falcon)
+{
+	struct nv_pmu_args args = { .secure_mode = true };
+	const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args);
+	nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
+	nvkm_falcon_start(falcon);
+	return 0;
+}
+
+void
+gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+	struct loader_config hdr;
+	u64 addr;
+
+	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+	addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
+	hdr.code_dma_base  = lower_32_bits((addr + adjust) >> 8);
+	hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+	addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
+	hdr.data_dma_base  = lower_32_bits((addr + adjust) >> 8);
+	hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+	addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8);
+	hdr.overlay_dma_base  = lower_32_bits((addr + adjust) << 8);
+	hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8);
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+
+	loader_config_dump(&acr->subdev, &hdr);
+}
+
+void
+gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+			struct nvkm_acr_lsfw *lsfw)
+{
+	const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+	const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
+	const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
+	const struct loader_config hdr = {
+		.dma_idx = FALCON_DMAIDX_UCODE,
+		.code_dma_base = lower_32_bits(code),
+		.code_size_total = lsfw->app_size,
+		.code_size_to_load = lsfw->app_resident_code_size,
+		.code_entry_point = lsfw->app_imem_entry,
+		.data_dma_base = lower_32_bits(data),
+		.data_size = lsfw->app_resident_data_size,
+		.overlay_dma_base = lower_32_bits(code),
+		.argc = 1,
+		.argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args),
+		.code_dma_base1 = upper_32_bits(code),
+		.data_dma_base1 = upper_32_bits(data),
+		.overlay_dma_base1 = upper_32_bits(code),
+	};
+
+	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+static const struct nvkm_acr_lsf_func
+gm20b_pmu_acr = {
+	.flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
+	.bld_size = sizeof(struct loader_config),
+	.bld_write = gm20b_pmu_acr_bld_write,
+	.bld_patch = gm20b_pmu_acr_bld_patch,
+	.boot = gm20b_pmu_acr_boot,
+	.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
+};
+
+static int
+gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr)
+{
+	struct nv_pmu_acr_init_wpr_region_msg *msg =
+		container_of(hdr, typeof(*msg), msg.hdr);
+	struct nvkm_pmu *pmu = priv;
+	struct nvkm_subdev *subdev = &pmu->subdev;
+
+	if (msg->error_code) {
+		nvkm_error(subdev, "ACR WPR init failure: %d\n",
+			   msg->error_code);
+		return -EINVAL;
+	}
+
+	nvkm_debug(subdev, "ACR WPR init complete\n");
+	complete_all(&pmu->wpr_ready);
+	return 0;
+}
+
+static int
+gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
+{
+	struct nv_pmu_acr_init_wpr_region_cmd cmd = {
+		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+		.cmd.hdr.size = sizeof(cmd),
+		.cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
+		.region_id = 1,
+		.wpr_offset = 0,
+	};
+
+	return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+				     gm20b_pmu_acr_init_wpr_callback, pmu, 0);
+}
+
+int
+gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
+{
+	struct nv_pmu_init_msg msg;
+	int ret;
+
+	ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
+	if (ret)
+		return ret;
+
+	if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
+	    msg.msg_type != NV_PMU_INIT_MSG_INIT)
+		return -EINVAL;
+
+	nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
+					msg.queue_info[0].offset,
+					msg.queue_info[0].size);
+	nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
+					msg.queue_info[1].offset,
+					msg.queue_info[1].size);
+	nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
+					 msg.queue_info[4].offset,
+					 msg.queue_info[4].size);
+	return gm20b_pmu_acr_init_wpr(pmu);
+}
+
+void
+gm20b_pmu_recv(struct nvkm_pmu *pmu)
+{
+	if (!pmu->initmsg_received) {
+		int ret = pmu->func->initmsg(pmu);
+		if (ret) {
+			nvkm_error(&pmu->subdev,
+				   "error parsing init message: %d\n", ret);
+			return;
+		}
+
+		pmu->initmsg_received = true;
+	}
+
+	nvkm_falcon_msgq_recv(pmu->msgq);
 }
 
 static const struct nvkm_pmu_func
 gm20b_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.enabled = gf100_pmu_enabled,
 	.intr = gt215_pmu_intr,
 	.recv = gm20b_pmu_recv,
+	.initmsg = gm20b_pmu_initmsg,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
+#endif
+
+int
+gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
+{
+	return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
+						 NVKM_ACR_LSF_PMU, "pmu/",
+						 ver, fwif->acr);
+}
+
+static const struct nvkm_pmu_fwif
+gm20b_pmu_fwif[] = {
+	{ 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
+	{}
 };
 
 int
 gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	int ret;
-
-	ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
-	if (ret)
-		return ret;
-
-	return 0;
+	return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
index e210cd6..09e05db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -25,12 +25,19 @@
 
 static const struct nvkm_pmu_func
 gp100_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.enabled = gf100_pmu_enabled,
 	.reset = gf100_pmu_reset,
 };
 
+static const struct nvkm_pmu_fwif
+gp100_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gp100_pmu },
+	{}
+};
+
 int
 gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 98c7a2a..262b8a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -39,12 +39,19 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
 
 static const struct nvkm_pmu_func
 gp102_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.enabled = gp102_pmu_enabled,
 	.reset = gp102_pmu_reset,
 };
 
+static const struct nvkm_pmu_fwif
+gp102_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gp102_pmu },
+	{}
+};
+
 int
 gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
new file mode 100644
index 0000000..5b81c73
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/pmu.h>
+
+static int
+gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv,
+					    struct nv_falcon_msg *hdr)
+{
+	struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg =
+		container_of(hdr, typeof(*msg), msg.hdr);
+	return msg->falcon_mask;
+}
+static int
+gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask)
+{
+	struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+	struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = {
+		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+		.cmd.hdr.size = sizeof(cmd),
+		.cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS,
+		.flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES,
+		.falcon_mask = mask,
+		.wpr_lo = 0, /*XXX*/
+		.wpr_hi = 0, /*XXX*/
+	};
+	int ret;
+
+	ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+				    gp10b_pmu_acr_bootstrap_multiple_falcons_cb,
+				    &pmu->subdev, msecs_to_jiffies(1000));
+	if (ret >= 0) {
+		if (ret != cmd.falcon_mask)
+			ret = -EIO;
+		else
+			ret = 0;
+	}
+
+	return ret;
+}
+
+static const struct nvkm_acr_lsf_func
+gp10b_pmu_acr = {
+	.flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
+	.bld_size = sizeof(struct loader_config),
+	.bld_write = gm20b_pmu_acr_bld_write,
+	.bld_patch = gm20b_pmu_acr_bld_patch,
+	.boot = gm20b_pmu_acr_boot,
+	.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
+	.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
+};
+
+static const struct nvkm_pmu_func
+gp10b_pmu = {
+	.flcn = &gt215_pmu_flcn,
+	.enabled = gf100_pmu_enabled,
+	.intr = gt215_pmu_intr,
+	.recv = gm20b_pmu_recv,
+	.initmsg = gm20b_pmu_initmsg,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+#endif
+
+static const struct nvkm_pmu_fwif
+gp10b_pmu_fwif[] = {
+	{ 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr },
+	{}
+};
+
+int
+gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index e04216d..88b9099 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -241,8 +241,27 @@ gt215_pmu_init(struct nvkm_pmu *pmu)
 	return 0;
 }
 
+const struct nvkm_falcon_func
+gt215_pmu_flcn = {
+	.debug = 0xc08,
+	.fbif = 0xe00,
+	.load_imem = nvkm_falcon_v1_load_imem,
+	.load_dmem = nvkm_falcon_v1_load_dmem,
+	.read_dmem = nvkm_falcon_v1_read_dmem,
+	.bind_context = nvkm_falcon_v1_bind_context,
+	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+	.set_start_addr = nvkm_falcon_v1_set_start_addr,
+	.start = nvkm_falcon_v1_start,
+	.enable = nvkm_falcon_v1_enable,
+	.disable = nvkm_falcon_v1_disable,
+	.cmdq = { 0x4a0, 0x4b0, 4 },
+	.msgq = { 0x4c8, 0x4cc, 0 },
+};
+
 static const struct nvkm_pmu_func
 gt215_pmu = {
+	.flcn = &gt215_pmu_flcn,
 	.code.data = gt215_pmu_code,
 	.code.size = sizeof(gt215_pmu_code),
 	.data.data = gt215_pmu_data,
@@ -256,8 +275,14 @@ gt215_pmu = {
 	.recv = gt215_pmu_recv,
 };
 
+static const struct nvkm_pmu_fwif
+gt215_pmu_fwif[] = {
+	{ -1, gf100_pmu_nofw, &gt215_pmu },
+	{}
+};
+
 int
 gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
+	return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 26d73f9..f470859 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -4,13 +4,12 @@
 #define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
 #include <subdev/pmu.h>
 #include <subdev/pmu/fuc/os.h>
-
-int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *,
-		  int index, struct nvkm_pmu *);
-int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
-		  int index, struct nvkm_pmu **);
+enum nvkm_acr_lsf_id;
+struct nvkm_acr_lsfw;
 
 struct nvkm_pmu_func {
+	const struct nvkm_falcon_func *flcn;
+
 	struct {
 		u32 *data;
 		u32  size;
@@ -29,9 +28,11 @@ struct nvkm_pmu_func {
 	int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
 		    u32 message, u32 data0, u32 data1);
 	void (*recv)(struct nvkm_pmu *);
+	int (*initmsg)(struct nvkm_pmu *);
 	void (*pgob)(struct nvkm_pmu *, bool);
 };
 
+extern const struct nvkm_falcon_func gt215_pmu_flcn;
 int gt215_pmu_init(struct nvkm_pmu *);
 void gt215_pmu_fini(struct nvkm_pmu *);
 void gt215_pmu_intr(struct nvkm_pmu *);
@@ -42,4 +43,26 @@ bool gf100_pmu_enabled(struct nvkm_pmu *);
 void gf100_pmu_reset(struct nvkm_pmu *);
 
 void gk110_pmu_pgob(struct nvkm_pmu *, bool);
+
+void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
+void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
+int gm20b_pmu_acr_boot(struct nvkm_falcon *);
+int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
+void gm20b_pmu_recv(struct nvkm_pmu *);
+int gm20b_pmu_initmsg(struct nvkm_pmu *);
+
+struct nvkm_pmu_fwif {
+	int version;
+	int (*load)(struct nvkm_pmu *, int ver, const struct nvkm_pmu_fwif *);
+	const struct nvkm_pmu_func *func;
+	const struct nvkm_acr_lsf_func *acr;
+};
+
+int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+
+int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
+		  int index, struct nvkm_pmu *);
+int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *,
+		  int index, struct nvkm_pmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
deleted file mode 100644
index f3dee26..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/subdev/secboot/base.o
-nvkm-y += nvkm/subdev/secboot/hs_ucode.o
-nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
-nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o
-nvkm-y += nvkm/subdev/secboot/acr.o
-nvkm-y += nvkm/subdev/secboot/acr_r352.o
-nvkm-y += nvkm/subdev/secboot/acr_r361.o
-nvkm-y += nvkm/subdev/secboot/acr_r364.o
-nvkm-y += nvkm/subdev/secboot/acr_r367.o
-nvkm-y += nvkm/subdev/secboot/acr_r370.o
-nvkm-y += nvkm/subdev/secboot/acr_r375.o
-nvkm-y += nvkm/subdev/secboot/gm200.o
-nvkm-y += nvkm/subdev/secboot/gm20b.o
-nvkm-y += nvkm/subdev/secboot/gp102.o
-nvkm-y += nvkm/subdev/secboot/gp108.o
-nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
deleted file mode 100644
index dc80985..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-
-#include <core/firmware.h>
-
-/**
- * Convenience function to duplicate a firmware file in memory and check that
- * it has the required minimum size.
- */
-void *
-nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
-		       size_t min_size)
-{
-	const struct firmware *fw;
-	void *blob;
-	int ret;
-
-	ret = nvkm_firmware_get(subdev, name, &fw);
-	if (ret)
-		return ERR_PTR(ret);
-	if (fw->size < min_size) {
-		nvkm_error(subdev, "%s is smaller than expected size %zu\n",
-			   name, min_size);
-		nvkm_firmware_put(fw);
-		return ERR_PTR(-EINVAL);
-	}
-	blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
-	nvkm_firmware_put(fw);
-	if (!blob)
-		return ERR_PTR(-ENOMEM);
-
-	return blob;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
deleted file mode 100644
index 73a2ac8..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef __NVKM_SECBOOT_ACR_H__
-#define __NVKM_SECBOOT_ACR_H__
-
-#include "priv.h"
-
-struct nvkm_acr;
-
-/**
- * struct nvkm_acr_func - properties and functions specific to an ACR
- *
- * @load: make the ACR ready to run on the given secboot device
- * @reset: reset the specified falcon
- * @start: start the specified falcon (assumed to have been reset)
- */
-struct nvkm_acr_func {
-	void (*dtor)(struct nvkm_acr *);
-	int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
-	int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
-	int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
-		    struct nvkm_gpuobj *, u64);
-	int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long);
-};
-
-/**
- * struct nvkm_acr - instance of an ACR
- *
- * @boot_falcon: ID of the falcon that will perform secure boot
- * @managed_falcons: bitfield of falcons managed by this ACR
- * @optional_falcons: bitfield of falcons we can live without
- */
-struct nvkm_acr {
-	const struct nvkm_acr_func *func;
-	const struct nvkm_subdev *subdev;
-
-	enum nvkm_secboot_falcon boot_falcon;
-	unsigned long managed_falcons;
-	unsigned long optional_falcons;
-};
-
-void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
-
-struct nvkm_acr *acr_r352_new(unsigned long);
-struct nvkm_acr *acr_r361_new(unsigned long);
-struct nvkm_acr *acr_r364_new(unsigned long);
-struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
-struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
-struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
deleted file mode 100644
index 7af971d..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ /dev/null
@@ -1,1241 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r352.h"
-#include "hs_ucode.h"
-
-#include <core/gpuobj.h>
-#include <core/firmware.h>
-#include <engine/falcon.h>
-#include <subdev/pmu.h>
-#include <core/msgqueue.h>
-#include <engine/sec2.h>
-
-/**
- * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
- * @signature:		16B signature for secure code. 0s if no secure code
- * @ctx_dma:		DMA context to be used by BL while loading code/data
- * @code_dma_base:	256B-aligned Physical FB Address where code is located
- *			(falcon's $xcbase register)
- * @non_sec_code_off:	offset from code_dma_base where the non-secure code is
- *                      located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size:	the size of the nonSecure code part.
- * @sec_code_off:	offset from code_dma_base where the secure code is
- *                      located. The offset must be multiple of 256 to help perf
- * @sec_code_size:	offset from code_dma_base where the secure code is
- *                      located. The offset must be multiple of 256 to help perf
- * @code_entry_point:	code entry point which will be invoked by BL after
- *                      code is loaded.
- * @data_dma_base:	256B aligned Physical FB Address where data is located.
- *			(falcon's $xdbase register)
- * @data_size:		size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r352_flcn_bl_desc {
-	u32 reserved[4];
-	u32 signature[4];
-	u32 ctx_dma;
-	u32 code_dma_base;
-	u32 non_sec_code_off;
-	u32 non_sec_code_size;
-	u32 sec_code_off;
-	u32 sec_code_size;
-	u32 code_entry_point;
-	u32 data_dma_base;
-	u32 data_size;
-	u32 code_dma_base1;
-	u32 data_dma_base1;
-};
-
-/**
- * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
- */
-static void
-acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
-			       const struct ls_ucode_img *img, u64 wpr_addr,
-			       void *_desc)
-{
-	struct acr_r352_flcn_bl_desc *desc = _desc;
-	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
-	u64 base, addr_code, addr_data;
-
-	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
-	addr_code = (base + pdesc->app_resident_code_offset) >> 8;
-	addr_data = (base + pdesc->app_resident_data_offset) >> 8;
-
-	desc->ctx_dma = FALCON_DMAIDX_UCODE;
-	desc->code_dma_base = lower_32_bits(addr_code);
-	desc->code_dma_base1 = upper_32_bits(addr_code);
-	desc->non_sec_code_off = pdesc->app_resident_code_offset;
-	desc->non_sec_code_size = pdesc->app_resident_code_size;
-	desc->code_entry_point = pdesc->app_imem_entry;
-	desc->data_dma_base = lower_32_bits(addr_data);
-	desc->data_dma_base1 = upper_32_bits(addr_data);
-	desc->data_size = pdesc->app_resident_data_size;
-}
-
-
-/**
- * struct hsflcn_acr_desc - data section of the HS firmware
- *
- * This header is to be copied at the beginning of DMEM by the HS bootloader.
- *
- * @signature:		signature of ACR ucode
- * @wpr_region_id:	region ID holding the WPR header and its details
- * @wpr_offset:		offset from the WPR region holding the wpr header
- * @regions:		region descriptors
- * @nonwpr_ucode_blob_size:	size of LS blob
- * @nonwpr_ucode_blob_start:	FB location of LS blob is
- */
-struct hsflcn_acr_desc {
-	union {
-		u8 reserved_dmem[0x200];
-		u32 signatures[4];
-	} ucode_reserved_space;
-	u32 wpr_region_id;
-	u32 wpr_offset;
-	u32 mmu_mem_range;
-#define FLCN_ACR_MAX_REGIONS 2
-	struct {
-		u32 no_regions;
-		struct {
-			u32 start_addr;
-			u32 end_addr;
-			u32 region_id;
-			u32 read_mask;
-			u32 write_mask;
-			u32 client_mask;
-		} region_props[FLCN_ACR_MAX_REGIONS];
-	} regions;
-	u32 ucode_blob_size;
-	u64 ucode_blob_base __aligned(8);
-	struct {
-		u32 vpr_enabled;
-		u32 vpr_start;
-		u32 vpr_end;
-		u32 hdcp_policies;
-	} vpr_desc;
-};
-
-
-/*
- * Low-secure blob creation
- */
-
-/**
- * struct acr_r352_lsf_lsb_header - LS firmware header
- * @signature:		signature to verify the firmware against
- * @ucode_off:		offset of the ucode blob in the WPR region. The ucode
- *                      blob contains the bootloader, code and data of the
- *                      LS falcon
- * @ucode_size:		size of the ucode blob, including bootloader
- * @data_size:		size of the ucode blob data
- * @bl_code_size:	size of the bootloader code
- * @bl_imem_off:	offset in imem of the bootloader
- * @bl_data_off:	offset of the bootloader data in WPR region
- * @bl_data_size:	size of the bootloader data
- * @app_code_off:	offset of the app code relative to ucode_off
- * @app_code_size:	size of the app code
- * @app_data_off:	offset of the app data relative to ucode_off
- * @app_data_size:	size of the app data
- * @flags:		flags for the secure bootloader
- *
- * This structure is written into the WPR region for each managed falcon. Each
- * instance is referenced by the lsb_offset member of the corresponding
- * lsf_wpr_header.
- */
-struct acr_r352_lsf_lsb_header {
-	/**
-	 * LS falcon signatures
-	 * @prd_keys:		signature to use in production mode
-	 * @dgb_keys:		signature to use in debug mode
-	 * @b_prd_present:	whether the production key is present
-	 * @b_dgb_present:	whether the debug key is present
-	 * @falcon_id:		ID of the falcon the ucode applies to
-	 */
-	struct {
-		u8 prd_keys[2][16];
-		u8 dbg_keys[2][16];
-		u32 b_prd_present;
-		u32 b_dbg_present;
-		u32 falcon_id;
-	} signature;
-	u32 ucode_off;
-	u32 ucode_size;
-	u32 data_size;
-	u32 bl_code_size;
-	u32 bl_imem_off;
-	u32 bl_data_off;
-	u32 bl_data_size;
-	u32 app_code_off;
-	u32 app_code_size;
-	u32 app_data_off;
-	u32 app_data_size;
-	u32 flags;
-};
-
-/**
- * struct acr_r352_lsf_wpr_header - LS blob WPR Header
- * @falcon_id:		LS falcon ID
- * @lsb_offset:		offset of the lsb_lsf_header in the WPR region
- * @bootstrap_owner:	secure falcon reponsible for bootstrapping the LS falcon
- * @lazy_bootstrap:	skip bootstrapping by ACR
- * @status:		bootstrapping status
- *
- * An array of these is written at the beginning of the WPR region, one for
- * each managed falcon. The array is terminated by an instance which falcon_id
- * is LSF_FALCON_ID_INVALID.
- */
-struct acr_r352_lsf_wpr_header {
-	u32 falcon_id;
-	u32 lsb_offset;
-	u32 bootstrap_owner;
-	u32 lazy_bootstrap;
-	u32 status;
-#define LSF_IMAGE_STATUS_NONE				0
-#define LSF_IMAGE_STATUS_COPY				1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED		2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED		3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE		4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED		5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY		6
-};
-
-/**
- * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
- */
-struct ls_ucode_img_r352 {
-	struct ls_ucode_img base;
-
-	const struct acr_r352_lsf_func *func;
-
-	struct acr_r352_lsf_wpr_header wpr_header;
-	struct acr_r352_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
-
-/**
- * ls_ucode_img_load() - create a lsf_ucode_img and load it
- */
-struct ls_ucode_img *
-acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
-			   const struct nvkm_secboot *sb,
-			   enum nvkm_secboot_falcon falcon_id)
-{
-	const struct nvkm_subdev *subdev = acr->base.subdev;
-	const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
-	struct ls_ucode_img_r352 *img;
-	int ret;
-
-	img = kzalloc(sizeof(*img), GFP_KERNEL);
-	if (!img)
-		return ERR_PTR(-ENOMEM);
-
-	img->base.falcon_id = falcon_id;
-
-	ret = func->load(sb, func->version_max, &img->base);
-	if (ret < 0) {
-		kfree(img->base.ucode_data);
-		kfree(img->base.sig);
-		kfree(img);
-		return ERR_PTR(ret);
-	}
-
-	img->func = func->version[ret];
-
-	/* Check that the signature size matches our expectations... */
-	if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
-		nvkm_error(subdev, "invalid signature size for %s falcon!\n",
-			   nvkm_secboot_falcon_name[falcon_id]);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Copy signature to the right place */
-	memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
-
-	/* not needed? the signature should already have the right value */
-	img->lsb_header.signature.falcon_id = falcon_id;
-
-	return &img->base;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-/**
- * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
- * @acr:	ACR to use
- * @img:	image to generate for
- * @offset:	offset in the WPR region where this image starts
- *
- * Allocate space in the WPR area from offset and write the WPR and LSB headers
- * accordingly.
- *
- * Return: offset at the end of this image.
- */
-static u32
-acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
-			     struct ls_ucode_img_r352 *img, u32 offset)
-{
-	struct ls_ucode_img *_img = &img->base;
-	struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
-	struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
-	struct ls_ucode_img_desc *desc = &_img->ucode_desc;
-	const struct acr_r352_lsf_func *func = img->func;
-
-	/* Fill WPR header */
-	whdr->falcon_id = _img->falcon_id;
-	whdr->bootstrap_owner = acr->base.boot_falcon;
-	whdr->status = LSF_IMAGE_STATUS_COPY;
-
-	/* Skip bootstrapping falcons started by someone else than ACR */
-	if (acr->lazy_bootstrap & BIT(_img->falcon_id))
-		whdr->lazy_bootstrap = 1;
-
-	/* Align, save off, and include an LSB header size */
-	offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
-	whdr->lsb_offset = offset;
-	offset += sizeof(*lhdr);
-
-	/*
-	 * Align, save off, and include the original (static) ucode
-	 * image size
-	 */
-	offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
-	_img->ucode_off = lhdr->ucode_off = offset;
-	offset += _img->ucode_size;
-
-	/*
-	 * For falcons that use a boot loader (BL), we append a loader
-	 * desc structure on the end of the ucode image and consider
-	 * this the boot loader data. The host will then copy the loader
-	 * desc args to this space within the WPR region (before locking
-	 * down) and the HS bin will then copy them to DMEM 0 for the
-	 * loader.
-	 */
-	lhdr->bl_code_size = ALIGN(desc->bootloader_size,
-				   LSF_BL_CODE_SIZE_ALIGN);
-	lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
-				 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
-	lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
-				lhdr->bl_code_size - lhdr->ucode_size;
-	/*
-	 * Though the BL is located at 0th offset of the image, the VA
-	 * is different to make sure that it doesn't collide the actual
-	 * OS VA range
-	 */
-	lhdr->bl_imem_off = desc->bootloader_imem_offset;
-	lhdr->app_code_off = desc->app_start_offset +
-			     desc->app_resident_code_offset;
-	lhdr->app_code_size = desc->app_resident_code_size;
-	lhdr->app_data_off = desc->app_start_offset +
-			     desc->app_resident_data_offset;
-	lhdr->app_data_size = desc->app_resident_data_size;
-
-	lhdr->flags = func->lhdr_flags;
-	if (_img->falcon_id == acr->base.boot_falcon)
-		lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
-
-	/* Align and save off BL descriptor size */
-	lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
-
-	/*
-	 * Align, save off, and include the additional BL data
-	 */
-	offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
-	lhdr->bl_data_off = offset;
-	offset += lhdr->bl_data_size;
-
-	return offset;
-}
-
-/**
- * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
- */
-int
-acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
-{
-	struct ls_ucode_img_r352 *img;
-	struct list_head *l;
-	u32 count = 0;
-	u32 offset;
-
-	/* Count the number of images to manage */
-	list_for_each(l, imgs)
-		count++;
-
-	/*
-	 * Start with an array of WPR headers at the base of the WPR.
-	 * The expectation here is that the secure falcon will do a single DMA
-	 * read of this array and cache it internally so it's ok to pack these.
-	 * Also, we add 1 to the falcon count to indicate the end of the array.
-	 */
-	offset = sizeof(img->wpr_header) * (count + 1);
-
-	/*
-	 * Walk the managed falcons, accounting for the LSB structs
-	 * as well as the ucode images.
-	 */
-	list_for_each_entry(img, imgs, base.node) {
-		offset = acr_r352_ls_img_fill_headers(acr, img, offset);
-	}
-
-	return offset;
-}
-
-/**
- * acr_r352_ls_write_wpr - write the WPR blob contents
- */
-int
-acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
-		      struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
-{
-	struct ls_ucode_img *_img;
-	u32 pos = 0;
-	u32 max_desc_size = 0;
-	u8 *gdesc;
-
-	/* Figure out how large we need gdesc to be. */
-	list_for_each_entry(_img, imgs, node) {
-		struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
-		const struct acr_r352_lsf_func *ls_func = img->func;
-
-		max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
-	}
-
-	gdesc = kmalloc(max_desc_size, GFP_KERNEL);
-	if (!gdesc)
-		return -ENOMEM;
-
-	nvkm_kmap(wpr_blob);
-
-	list_for_each_entry(_img, imgs, node) {
-		struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
-		const struct acr_r352_lsf_func *ls_func = img->func;
-
-		nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
-				      sizeof(img->wpr_header));
-
-		nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
-				     &img->lsb_header, sizeof(img->lsb_header));
-
-		/* Generate and write BL descriptor */
-		memset(gdesc, 0, ls_func->bl_desc_size);
-		ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
-
-		nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
-				      gdesc, ls_func->bl_desc_size);
-
-		/* Copy ucode */
-		nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
-				      _img->ucode_data, _img->ucode_size);
-
-		pos += sizeof(img->wpr_header);
-	}
-
-	nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
-	nvkm_done(wpr_blob);
-
-	kfree(gdesc);
-
-	return 0;
-}
-
-/* Both size and address of WPR need to be 256K-aligned */
-#define WPR_ALIGNMENT	0x40000
-/**
- * acr_r352_prepare_ls_blob() - prepare the LS blob
- *
- * For each securely managed falcon, load the FW, signatures and bootloaders and
- * prepare a ucode blob. Then, compute the offsets in the WPR region for each
- * blob, and finally write the headers and ucode blobs into a GPU object that
- * will be copied into the WPR region by the HS firmware.
- */
-static int
-acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
-	const struct nvkm_subdev *subdev = acr->base.subdev;
-	struct list_head imgs;
-	struct ls_ucode_img *img, *t;
-	unsigned long managed_falcons = acr->base.managed_falcons;
-	u64 wpr_addr = sb->wpr_addr;
-	u32 wpr_size = sb->wpr_size;
-	int managed_count = 0;
-	u32 image_wpr_size, ls_blob_size;
-	int falcon_id;
-	int ret;
-
-	INIT_LIST_HEAD(&imgs);
-
-	/* Load all LS blobs */
-	for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
-		struct ls_ucode_img *img;
-
-		img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
-		if (IS_ERR(img)) {
-			if (acr->base.optional_falcons & BIT(falcon_id)) {
-				managed_falcons &= ~BIT(falcon_id);
-				nvkm_info(subdev, "skipping %s falcon...\n",
-					  nvkm_secboot_falcon_name[falcon_id]);
-				continue;
-			}
-			ret = PTR_ERR(img);
-			goto cleanup;
-		}
-
-		list_add_tail(&img->node, &imgs);
-		managed_count++;
-	}
-
-	/* Commit the actual list of falcons we will manage from now on */
-	acr->base.managed_falcons = managed_falcons;
-
-	/*
-	 * If the boot falcon has a firmare, let it manage the bootstrap of other
-	 * falcons.
-	 */
-	if (acr->func->ls_func[acr->base.boot_falcon] &&
-	    (managed_falcons & BIT(acr->base.boot_falcon))) {
-		for_each_set_bit(falcon_id, &managed_falcons,
-				 NVKM_SECBOOT_FALCON_END) {
-			if (falcon_id == acr->base.boot_falcon)
-				continue;
-
-			acr->lazy_bootstrap |= BIT(falcon_id);
-		}
-	}
-
-	/*
-	 * Fill the WPR and LSF headers with the right offsets and compute
-	 * required WPR size
-	 */
-	image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
-	image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
-
-	ls_blob_size = image_wpr_size;
-
-	/*
-	 * If we need a shadow area, allocate twice the size and use the
-	 * upper half as WPR
-	 */
-	if (wpr_size == 0 && acr->func->shadow_blob)
-		ls_blob_size *= 2;
-
-	/* Allocate GPU object that will contain the WPR region */
-	ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
-			      false, NULL, &acr->ls_blob);
-	if (ret)
-		goto cleanup;
-
-	nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
-		    managed_count, image_wpr_size);
-
-	/* If WPR address and size are not fixed, set them to fit the LS blob */
-	if (wpr_size == 0) {
-		wpr_addr = acr->ls_blob->addr;
-		if (acr->func->shadow_blob)
-			wpr_addr += acr->ls_blob->size / 2;
-
-		wpr_size = image_wpr_size;
-	/*
-	 * But if the WPR region is set by the bootloader, it is illegal for
-	 * the HS blob to be larger than this region.
-	 */
-	} else if (image_wpr_size > wpr_size) {
-		nvkm_error(subdev, "WPR region too small for FW blob!\n");
-		nvkm_error(subdev, "required: %dB\n", image_wpr_size);
-		nvkm_error(subdev, "available: %dB\n", wpr_size);
-		ret = -ENOSPC;
-		goto cleanup;
-	}
-
-	/* Write LS blob */
-	ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
-	if (ret)
-		nvkm_gpuobj_del(&acr->ls_blob);
-
-cleanup:
-	list_for_each_entry_safe(img, t, &imgs, node) {
-		kfree(img->ucode_data);
-		kfree(img->sig);
-		kfree(img);
-	}
-
-	return ret;
-}
-
-
-
-
-void
-acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
-		       void *_desc)
-{
-	struct hsflcn_acr_desc *desc = _desc;
-	struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
-	/* WPR region information if WPR is not fixed */
-	if (sb->wpr_size == 0) {
-		u64 wpr_start = ls_blob->addr;
-		u64 wpr_end = wpr_start + ls_blob->size;
-
-		desc->wpr_region_id = 1;
-		desc->regions.no_regions = 2;
-		desc->regions.region_props[0].start_addr = wpr_start >> 8;
-		desc->regions.region_props[0].end_addr = wpr_end >> 8;
-		desc->regions.region_props[0].region_id = 1;
-		desc->regions.region_props[0].read_mask = 0xf;
-		desc->regions.region_props[0].write_mask = 0xc;
-		desc->regions.region_props[0].client_mask = 0x2;
-	} else {
-		desc->ucode_blob_base = ls_blob->addr;
-		desc->ucode_blob_size = ls_blob->size;
-	}
-}
-
-static void
-acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
-			     u64 offset)
-{
-	struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
-	u64 addr_code, addr_data;
-
-	addr_code = offset >> 8;
-	addr_data = (offset + hdr->data_dma_base) >> 8;
-
-	bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
-	bl_desc->code_dma_base = lower_32_bits(addr_code);
-	bl_desc->non_sec_code_off = hdr->non_sec_code_off;
-	bl_desc->non_sec_code_size = hdr->non_sec_code_size;
-	bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
-	bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
-	bl_desc->code_entry_point = 0;
-	bl_desc->data_dma_base = lower_32_bits(addr_data);
-	bl_desc->data_size = hdr->data_size;
-}
-
-/**
- * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
- *
- * @sb secure boot instance to prepare for
- * @fw name of the HS firmware to load
- * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
- * @bl_desc pointer to the BL descriptor to write for this firmware
- * @patch whether we should patch the HS descriptor (only for HS loaders)
- */
-static int
-acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
-			 const char *fw, struct nvkm_gpuobj **blob,
-			 struct hsf_load_header *load_header, bool patch)
-{
-	struct nvkm_subdev *subdev = &sb->subdev;
-	void *acr_image;
-	struct fw_bin_header *hsbin_hdr;
-	struct hsf_fw_header *fw_hdr;
-	struct hsf_load_header *load_hdr;
-	void *acr_data;
-	int ret;
-
-	acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
-	if (IS_ERR(acr_image))
-		return PTR_ERR(acr_image);
-
-	hsbin_hdr = acr_image;
-	fw_hdr = acr_image + hsbin_hdr->header_offset;
-	load_hdr = acr_image + fw_hdr->hdr_offset;
-	acr_data = acr_image + hsbin_hdr->data_offset;
-
-	/* Patch descriptor with WPR information? */
-	if (patch) {
-		struct hsflcn_acr_desc *desc;
-
-		desc = acr_data + load_hdr->data_dma_base;
-		acr->func->fixup_hs_desc(acr, sb, desc);
-	}
-
-	if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
-		nvkm_error(subdev, "more apps (%d) than supported (%d)!",
-			   load_hdr->num_apps, ACR_R352_MAX_APPS);
-		ret = -EINVAL;
-		goto cleanup;
-	}
-	memcpy(load_header, load_hdr, sizeof(*load_header) +
-			  (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
-
-	/* Create ACR blob and copy HS data to it */
-	ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
-			      0x1000, false, NULL, blob);
-	if (ret)
-		goto cleanup;
-
-	nvkm_kmap(*blob);
-	nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
-	nvkm_done(*blob);
-
-cleanup:
-	kfree(acr_image);
-
-	return ret;
-}
-
-/**
- * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
- *
- * This includes the LS blob, HS ucode loading blob, and HS bootloader.
- *
- * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
- */
-int
-acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
-	struct nvkm_subdev *subdev = &sb->subdev;
-	int ret;
-
-	/* Firmware already loaded? */
-	if (acr->firmware_ok)
-		return 0;
-
-	/* Load and prepare the managed falcon's firmwares */
-	ret = acr_r352_prepare_ls_blob(acr, sb);
-	if (ret)
-		return ret;
-
-	/* Load the HS firmware that will load the LS firmwares */
-	if (!acr->load_blob) {
-		ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
-					       &acr->load_blob,
-					       &acr->load_bl_header, true);
-		if (ret)
-			return ret;
-	}
-
-	/* If the ACR region is dynamically programmed, we need an unload FW */
-	if (sb->wpr_size == 0) {
-		ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
-					       &acr->unload_blob,
-					       &acr->unload_bl_header, false);
-		if (ret)
-			return ret;
-	}
-
-	/* Load the HS firmware bootloader */
-	if (!acr->hsbl_blob) {
-		acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
-		if (IS_ERR(acr->hsbl_blob)) {
-			ret = PTR_ERR(acr->hsbl_blob);
-			acr->hsbl_blob = NULL;
-			return ret;
-		}
-
-		if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
-			acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
-							    "acr/unload_bl", 0);
-			if (IS_ERR(acr->hsbl_unload_blob)) {
-				ret = PTR_ERR(acr->hsbl_unload_blob);
-				acr->hsbl_unload_blob = NULL;
-				return ret;
-			}
-		} else {
-			acr->hsbl_unload_blob = acr->hsbl_blob;
-		}
-	}
-
-	acr->firmware_ok = true;
-	nvkm_debug(&sb->subdev, "LS blob successfully created\n");
-
-	return 0;
-}
-
-/**
- * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
- *
- * Returns the start address to use, or a negative error value.
- */
-static int
-acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
-	      struct nvkm_gpuobj *blob, u64 offset)
-{
-	struct acr_r352 *acr = acr_r352(_acr);
-	const u32 bl_desc_size = acr->func->hs_bl_desc_size;
-	const struct hsf_load_header *load_hdr;
-	struct fw_bin_header *bl_hdr;
-	struct fw_bl_desc *hsbl_desc;
-	void *bl, *blob_data, *hsbl_code, *hsbl_data;
-	u32 code_size;
-	u8 *bl_desc;
-
-	bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
-	if (!bl_desc)
-		return -ENOMEM;
-
-	/* Find the bootloader descriptor for our blob and copy it */
-	if (blob == acr->load_blob) {
-		load_hdr = &acr->load_bl_header;
-		bl = acr->hsbl_blob;
-	} else if (blob == acr->unload_blob) {
-		load_hdr = &acr->unload_bl_header;
-		bl = acr->hsbl_unload_blob;
-	} else {
-		nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
-		kfree(bl_desc);
-		return -EINVAL;
-	}
-
-	bl_hdr = bl;
-	hsbl_desc = bl + bl_hdr->header_offset;
-	blob_data = bl + bl_hdr->data_offset;
-	hsbl_code = blob_data + hsbl_desc->code_off;
-	hsbl_data = blob_data + hsbl_desc->data_off;
-	code_size = ALIGN(hsbl_desc->code_size, 256);
-
-	/*
-	 * Copy HS bootloader data
-	 */
-	nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
-
-	/* Copy HS bootloader code to end of IMEM */
-	nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
-			      code_size, hsbl_desc->start_tag, 0, false);
-
-	/* Generate the BL header */
-	acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
-
-	/*
-	 * Copy HS BL header where the HS descriptor expects it to be
-	 */
-	nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
-			      bl_desc_size, 0);
-
-	kfree(bl_desc);
-	return hsbl_desc->start_tag << 8;
-}
-
-static int
-acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
-	struct nvkm_subdev *subdev = &sb->subdev;
-	int i;
-
-	/* Run the unload blob to unprotect the WPR region */
-	if (acr->unload_blob && sb->wpr_set) {
-		int ret;
-
-		nvkm_debug(subdev, "running HS unload blob\n");
-		ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
-		if (ret < 0)
-			return ret;
-		/*
-		 * Unload blob will return this error code - it is not an error
-		 * and the expected behavior on RM as well
-		 */
-		if (ret && ret != 0x1d) {
-			nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret);
-			return -EINVAL;
-		}
-		nvkm_debug(subdev, "HS unload blob completed\n");
-	}
-
-	for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
-		acr->falcon_state[i] = NON_SECURE;
-
-	sb->wpr_set = false;
-
-	return 0;
-}
-
-/**
- * Check if the WPR region has been indeed set by the ACR firmware, and
- * matches where it should be.
- */
-static bool
-acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
-{
-	const struct nvkm_subdev *subdev = &sb->subdev;
-	const struct nvkm_device *device = subdev->device;
-	u64 wpr_lo, wpr_hi;
-	u64 wpr_range_lo, wpr_range_hi;
-
-	nvkm_wr32(device, 0x100cd4, 0x2);
-	wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
-	wpr_lo <<= 8;
-	nvkm_wr32(device, 0x100cd4, 0x3);
-	wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
-	wpr_hi <<= 8;
-
-	if (sb->wpr_size != 0) {
-		wpr_range_lo = sb->wpr_addr;
-		wpr_range_hi = wpr_range_lo + sb->wpr_size;
-	} else {
-		wpr_range_lo = acr->ls_blob->addr;
-		wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
-	}
-
-	return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
-		wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
-}
-
-static int
-acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
-	const struct nvkm_subdev *subdev = &sb->subdev;
-	unsigned long managed_falcons = acr->base.managed_falcons;
-	int falcon_id;
-	int ret;
-
-	if (sb->wpr_set)
-		return 0;
-
-	/* Make sure all blobs are ready */
-	ret = acr_r352_load_blobs(acr, sb);
-	if (ret)
-		return ret;
-
-	nvkm_debug(subdev, "running HS load blob\n");
-	ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
-	/* clear halt interrupt */
-	nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
-	sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
-	if (ret < 0) {
-		return ret;
-	} else if (ret > 0) {
-		nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret);
-		return -EINVAL;
-	}
-	nvkm_debug(subdev, "HS load blob completed\n");
-	/* WPR must be set at this point */
-	if (!sb->wpr_set) {
-		nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
-		return -EINVAL;
-	}
-
-	/* Run LS firmwares post_run hooks */
-	for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
-		const struct acr_r352_ls_func *func =
-						  acr->func->ls_func[falcon_id];
-
-		if (func->post_run) {
-			ret = func->post_run(&acr->base, sb);
-			if (ret)
-				return ret;
-		}
-	}
-
-	return 0;
-}
-
-/**
- * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
- *
- * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
- * disabled. This has the effect of making all managed falcons ready-to-run.
- */
-static int
-acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
-		     unsigned long falcon_mask)
-{
-	int falcon;
-	int ret;
-
-	/*
-	 * Perform secure boot each time we are called on FECS. Since only FECS
-	 * and GPCCS are managed and started together, this ought to be safe.
-	 */
-	if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
-		goto end;
-
-	ret = acr_r352_shutdown(acr, sb);
-	if (ret)
-		return ret;
-
-	ret = acr_r352_bootstrap(acr, sb);
-	if (ret)
-		return ret;
-
-end:
-	for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
-		acr->falcon_state[falcon] = RESET;
-	}
-	return 0;
-}
-
-/*
- * acr_r352_reset() - execute secure boot from the prepared state
- *
- * Load the HS bootloader and ask the falcon to run it. This will in turn
- * load the HS firmware and run it, so once the falcon stops all the managed
- * falcons should have their LS firmware loaded and be ready to run.
- */
-static int
-acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
-	       unsigned long falcon_mask)
-{
-	struct acr_r352 *acr = acr_r352(_acr);
-	struct nvkm_msgqueue *queue;
-	int falcon;
-	bool wpr_already_set = sb->wpr_set;
-	int ret;
-
-	/* Make sure secure boot is performed */
-	ret = acr_r352_bootstrap(acr, sb);
-	if (ret)
-		return ret;
-
-	/* No PMU interface? */
-	if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
-		/* Redo secure boot entirely if it was already done */
-		if (wpr_already_set)
-			return acr_r352_reset_nopmu(acr, sb, falcon_mask);
-		/* Else return the result of the initial invokation */
-		else
-			return ret;
-	}
-
-	switch (_acr->boot_falcon) {
-	case NVKM_SECBOOT_FALCON_PMU:
-		queue = sb->subdev.device->pmu->queue;
-		break;
-	case NVKM_SECBOOT_FALCON_SEC2:
-		queue = sb->subdev.device->sec2->queue;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Otherwise just ask the LS firmware to reset the falcon */
-	for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
-		nvkm_debug(&sb->subdev, "resetting %s falcon\n",
-			   nvkm_secboot_falcon_name[falcon]);
-	ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
-	if (ret) {
-		nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
-		return ret;
-	}
-	nvkm_debug(&sb->subdev, "falcon reset done\n");
-
-	return 0;
-}
-
-static int
-acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
-{
-	struct acr_r352 *acr = acr_r352(_acr);
-
-	return acr_r352_shutdown(acr, sb);
-}
-
-static void
-acr_r352_dtor(struct nvkm_acr *_acr)
-{
-	struct acr_r352 *acr = acr_r352(_acr);
-
-	nvkm_gpuobj_del(&acr->unload_blob);
-
-	if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
-		kfree(acr->hsbl_unload_blob);
-	kfree(acr->hsbl_blob);
-	nvkm_gpuobj_del(&acr->load_blob);
-	nvkm_gpuobj_del(&acr->ls_blob);
-
-	kfree(acr);
-}
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_fecs_func_0 = {
-	.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r352_ls_fecs_func = {
-	.load = acr_ls_ucode_load_fecs,
-	.version_max = 0,
-	.version = {
-		&acr_r352_ls_fecs_func_0,
-	}
-};
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_gpccs_func_0 = {
-	.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
-	/* GPCCS will be loaded using PRI */
-	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-static const struct acr_r352_ls_func
-acr_r352_ls_gpccs_func = {
-	.load = acr_ls_ucode_load_gpccs,
-	.version_max = 0,
-	.version = {
-		&acr_r352_ls_gpccs_func_0,
-	}
-};
-
-
-
-/**
- * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
- * @dma_idx:		DMA context to be used by BL while loading code/data
- * @code_dma_base:	256B-aligned Physical FB Address where code is located
- * @total_code_size:	total size of the code part in the ucode
- * @code_size_to_load:	size of the code part to load in PMU IMEM.
- * @code_entry_point:	entry point in the code.
- * @data_dma_base:	Physical FB address where data part of ucode is located
- * @data_size:		Total size of the data portion.
- * @overlay_dma_base:	Physical Fb address for resident code present in ucode
- * @argc:		Total number of args
- * @argv:		offset where args are copied into PMU's DMEM.
- *
- * Structure used by the PMU bootloader to load the rest of the code
- */
-struct acr_r352_pmu_bl_desc {
-	u32 dma_idx;
-	u32 code_dma_base;
-	u32 code_size_total;
-	u32 code_size_to_load;
-	u32 code_entry_point;
-	u32 data_dma_base;
-	u32 data_size;
-	u32 overlay_dma_base;
-	u32 argc;
-	u32 argv;
-	u16 code_dma_base1;
-	u16 data_dma_base1;
-	u16 overlay_dma_base1;
-};
-
-/**
- * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
- *
- */
-static void
-acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
-			      const struct ls_ucode_img *img, u64 wpr_addr,
-			      void *_desc)
-{
-	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
-	const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
-	struct acr_r352_pmu_bl_desc *desc = _desc;
-	u64 base;
-	u64 addr_code;
-	u64 addr_data;
-	u32 addr_args;
-
-	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
-	addr_code = (base + pdesc->app_resident_code_offset) >> 8;
-	addr_data = (base + pdesc->app_resident_data_offset) >> 8;
-	addr_args = pmu->falcon->data.limit;
-	addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
-	desc->dma_idx = FALCON_DMAIDX_UCODE;
-	desc->code_dma_base = lower_32_bits(addr_code);
-	desc->code_dma_base1 = upper_32_bits(addr_code);
-	desc->code_size_total = pdesc->app_size;
-	desc->code_size_to_load = pdesc->app_resident_code_size;
-	desc->code_entry_point = pdesc->app_imem_entry;
-	desc->data_dma_base = lower_32_bits(addr_data);
-	desc->data_dma_base1 = upper_32_bits(addr_data);
-	desc->data_size = pdesc->app_resident_data_size;
-	desc->overlay_dma_base = lower_32_bits(addr_code);
-	desc->overlay_dma_base1 = upper_32_bits(addr_code);
-	desc->argc = 1;
-	desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_pmu_func_0 = {
-	.generate_bl_desc = acr_r352_generate_pmu_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
-};
-
-static const struct acr_r352_ls_func
-acr_r352_ls_pmu_func = {
-	.load = acr_ls_ucode_load_pmu,
-	.post_run = acr_ls_pmu_post_run,
-	.version_max = 0,
-	.version = {
-		&acr_r352_ls_pmu_func_0,
-	}
-};
-
-const struct acr_r352_func
-acr_r352_func = {
-	.fixup_hs_desc = acr_r352_fixup_hs_desc,
-	.generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
-	.hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
-	.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
-	.ls_fill_headers = acr_r352_ls_fill_headers,
-	.ls_write_wpr = acr_r352_ls_write_wpr,
-	.ls_func = {
-		[NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
-		[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
-		[NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
-	},
-};
-
-static const struct nvkm_acr_func
-acr_r352_base_func = {
-	.dtor = acr_r352_dtor,
-	.fini = acr_r352_fini,
-	.load = acr_r352_load,
-	.reset = acr_r352_reset,
-};
-
-struct nvkm_acr *
-acr_r352_new_(const struct acr_r352_func *func,
-	      enum nvkm_secboot_falcon boot_falcon,
-	      unsigned long managed_falcons)
-{
-	struct acr_r352 *acr;
-	int i;
-
-	/* Check that all requested falcons are supported */
-	for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
-		if (!func->ls_func[i])
-			return ERR_PTR(-ENOTSUPP);
-	}
-
-	acr = kzalloc(sizeof(*acr), GFP_KERNEL);
-	if (!acr)
-		return ERR_PTR(-ENOMEM);
-
-	acr->base.boot_falcon = boot_falcon;
-	acr->base.managed_falcons = managed_falcons;
-	acr->base.func = &acr_r352_base_func;
-	acr->func = func;
-
-	return &acr->base;
-}
-
-struct nvkm_acr *
-acr_r352_new(unsigned long managed_falcons)
-{
-	return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
-			     managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
deleted file mode 100644
index e516cab..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef __NVKM_SECBOOT_ACR_R352_H__
-#define __NVKM_SECBOOT_ACR_R352_H__
-
-#include "acr.h"
-#include "ls_ucode.h"
-#include "hs_ucode.h"
-
-struct ls_ucode_img;
-
-#define ACR_R352_MAX_APPS 8
-
-#define LSF_FLAG_LOAD_CODE_AT_0		1
-#define LSF_FLAG_DMACTL_REQ_CTX		4
-#define LSF_FLAG_FORCE_PRIV_LOAD	8
-
-static inline u32
-hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app)
-{
-	return hdr->apps[app];
-}
-
-static inline u32
-hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
-{
-	return hdr->apps[hdr->num_apps + app];
-}
-
-/**
- * struct acr_r352_lsf_func - manages a specific LS firmware version
- *
- * @generate_bl_desc: function called on a block of bl_desc_size to generate the
- *		      proper bootloader descriptor for this LS firmware
- * @bl_desc_size: size of the bootloader descriptor
- * @lhdr_flags: LS flags
- */
-struct acr_r352_lsf_func {
-	void (*generate_bl_desc)(const struct nvkm_acr *,
-				 const struct ls_ucode_img *, u64, void *);
-	u32 bl_desc_size;
-	u32 lhdr_flags;
-};
-
-/**
- * struct acr_r352_ls_func - manages a single LS falcon
- *
- * @load: load the external firmware into a ls_ucode_img
- * @post_run: hook called right after the ACR is executed
- */
-struct acr_r352_ls_func {
-	int (*load)(const struct nvkm_secboot *, int maxver,
-		    struct ls_ucode_img *);
-	int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
-	int version_max;
-	const struct acr_r352_lsf_func *version[];
-};
-
-struct acr_r352;
-
-/**
- * struct acr_r352_func - manages nuances between ACR versions
- *
- * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate
- *			 the proper HS bootloader descriptor
- * @hs_bl_desc_size: size of the HS bootloader descriptor
- */
-struct acr_r352_func {
-	void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
-				    u64);
-	void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *);
-	u32 hs_bl_desc_size;
-	bool shadow_blob;
-
-	struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
-						  const struct nvkm_secboot *,
-						  enum nvkm_secboot_falcon);
-	int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
-	int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
-			    struct nvkm_gpuobj *, u64);
-
-	const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
-};
-
-/**
- * struct acr_r352 - ACR data for driver release 352 (and beyond)
- */
-struct acr_r352 {
-	struct nvkm_acr base;
-	const struct acr_r352_func *func;
-
-	/*
-	 * HS FW - lock WPR region (dGPU only) and load LS FWs
-	 * on Tegra the HS FW copies the LS blob into the fixed WPR instead
-	 */
-	struct nvkm_gpuobj *load_blob;
-	struct {
-		struct hsf_load_header load_bl_header;
-		u32 __load_apps[ACR_R352_MAX_APPS * 2];
-	};
-
-	/* HS FW - unlock WPR region (dGPU only) */
-	struct nvkm_gpuobj *unload_blob;
-	struct {
-		struct hsf_load_header unload_bl_header;
-		u32 __unload_apps[ACR_R352_MAX_APPS * 2];
-	};
-
-	/* HS bootloader */
-	void *hsbl_blob;
-
-	/* HS bootloader for unload blob, if using a different falcon */
-	void *hsbl_unload_blob;
-
-	/* LS FWs, to be loaded by the HS ACR */
-	struct nvkm_gpuobj *ls_blob;
-
-	/* Firmware already loaded? */
-	bool firmware_ok;
-
-	/* Falcons to lazy-bootstrap */
-	u32 lazy_bootstrap;
-
-	/* To keep track of the state of all managed falcons */
-	enum {
-		/* In non-secure state, no firmware loaded, no privileges*/
-		NON_SECURE = 0,
-		/* In low-secure mode and ready to be started */
-		RESET,
-		/* In low-secure mode and running */
-		RUNNING,
-	} falcon_state[NVKM_SECBOOT_FALCON_END];
-};
-#define acr_r352(acr) container_of(acr, struct acr_r352, base)
-
-struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
-			       enum nvkm_secboot_falcon, unsigned long);
-
-struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
-						const struct nvkm_secboot *,
-						enum nvkm_secboot_falcon);
-int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
-int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
-			  struct nvkm_gpuobj *, u64);
-
-void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
deleted file mode 100644
index f6b2d20..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r361.h"
-
-#include <engine/falcon.h>
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-
-static void
-acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
-			       const struct ls_ucode_img *img, u64 wpr_addr,
-			       void *_desc)
-{
-	struct acr_r361_flcn_bl_desc *desc = _desc;
-	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
-	u64 base, addr_code, addr_data;
-
-	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
-	addr_code = base + pdesc->app_resident_code_offset;
-	addr_data = base + pdesc->app_resident_data_offset;
-
-	desc->ctx_dma = FALCON_DMAIDX_UCODE;
-	desc->code_dma_base = u64_to_flcn64(addr_code);
-	desc->non_sec_code_off = pdesc->app_resident_code_offset;
-	desc->non_sec_code_size = pdesc->app_resident_code_size;
-	desc->code_entry_point = pdesc->app_imem_entry;
-	desc->data_dma_base = u64_to_flcn64(addr_data);
-	desc->data_size = pdesc->app_resident_data_size;
-}
-
-void
-acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
-			    u64 offset)
-{
-	struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
-
-	bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
-	bl_desc->code_dma_base = u64_to_flcn64(offset);
-	bl_desc->non_sec_code_off = hdr->non_sec_code_off;
-	bl_desc->non_sec_code_size = hdr->non_sec_code_size;
-	bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
-	bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
-	bl_desc->code_entry_point = 0;
-	bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
-	bl_desc->data_size = hdr->data_size;
-}
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_fecs_func_0 = {
-	.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_fecs_func = {
-	.load = acr_ls_ucode_load_fecs,
-	.version_max = 0,
-	.version = {
-		&acr_r361_ls_fecs_func_0,
-	}
-};
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_gpccs_func_0 = {
-	.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-	/* GPCCS will be loaded using PRI */
-	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_gpccs_func = {
-	.load = acr_ls_ucode_load_gpccs,
-	.version_max = 0,
-	.version = {
-		&acr_r361_ls_gpccs_func_0,
-	}
-};
-
-struct acr_r361_pmu_bl_desc {
-	u32 reserved;
-	u32 dma_idx;
-	struct flcn_u64 code_dma_base;
-	u32 total_code_size;
-	u32 code_size_to_load;
-	u32 code_entry_point;
-	struct flcn_u64 data_dma_base;
-	u32 data_size;
-	struct flcn_u64 overlay_dma_base;
-	u32 argc;
-	u32 argv;
-};
-
-static void
-acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
-			      const struct ls_ucode_img *img, u64 wpr_addr,
-			      void *_desc)
-{
-	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
-	const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
-	struct acr_r361_pmu_bl_desc *desc = _desc;
-	u64 base, addr_code, addr_data;
-	u32 addr_args;
-
-	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
-	addr_code = base + pdesc->app_resident_code_offset;
-	addr_data = base + pdesc->app_resident_data_offset;
-	addr_args = pmu->falcon->data.limit;
-	addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
-	desc->dma_idx = FALCON_DMAIDX_UCODE;
-	desc->code_dma_base = u64_to_flcn64(addr_code);
-	desc->total_code_size = pdesc->app_size;
-	desc->code_size_to_load = pdesc->app_resident_code_size;
-	desc->code_entry_point = pdesc->app_imem_entry;
-	desc->data_dma_base = u64_to_flcn64(addr_data);
-	desc->data_size = pdesc->app_resident_data_size;
-	desc->overlay_dma_base = u64_to_flcn64(addr_code);
-	desc->argc = 1;
-	desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_pmu_func_0 = {
-	.generate_bl_desc = acr_r361_generate_pmu_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_pmu_func = {
-	.load = acr_ls_ucode_load_pmu,
-	.post_run = acr_ls_pmu_post_run,
-	.version_max = 0,
-	.version = {
-		&acr_r361_ls_pmu_func_0,
-	}
-};
-
-static void
-acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
-			       const struct ls_ucode_img *img, u64 wpr_addr,
-			       void *_desc)
-{
-	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
-	const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
-	struct acr_r361_pmu_bl_desc *desc = _desc;
-	u64 base, addr_code, addr_data;
-	u32 addr_args;
-
-	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
-	/* For some reason we should not add app_resident_code_offset here */
-	addr_code = base;
-	addr_data = base + pdesc->app_resident_data_offset;
-	addr_args = sec->falcon->data.limit;
-	addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
-	desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
-	desc->code_dma_base = u64_to_flcn64(addr_code);
-	desc->total_code_size = pdesc->app_size;
-	desc->code_size_to_load = pdesc->app_resident_code_size;
-	desc->code_entry_point = pdesc->app_imem_entry;
-	desc->data_dma_base = u64_to_flcn64(addr_data);
-	desc->data_size = pdesc->app_resident_data_size;
-	desc->overlay_dma_base = u64_to_flcn64(addr_code);
-	desc->argc = 1;
-	/* args are stored at the beginning of EMEM */
-	desc->argv = 0x01000000;
-}
-
-const struct acr_r352_lsf_func
-acr_r361_ls_sec2_func_0 = {
-	.generate_bl_desc = acr_r361_generate_sec2_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
-};
-
-static const struct acr_r352_ls_func
-acr_r361_ls_sec2_func = {
-	.load = acr_ls_ucode_load_sec2,
-	.post_run = acr_ls_sec2_post_run,
-	.version_max = 0,
-	.version = {
-		&acr_r361_ls_sec2_func_0,
-	}
-};
-
-
-const struct acr_r352_func
-acr_r361_func = {
-	.fixup_hs_desc = acr_r352_fixup_hs_desc,
-	.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
-	.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-	.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
-	.ls_fill_headers = acr_r352_ls_fill_headers,
-	.ls_write_wpr = acr_r352_ls_write_wpr,
-	.ls_func = {
-		[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
-		[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
-		[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
-		[NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
-	},
-};
-
-struct nvkm_acr *
-acr_r361_new(unsigned long managed_falcons)
-{
-	return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
-			     managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
deleted file mode 100644
index 38dec93..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R361_H__
-#define __NVKM_SECBOOT_ACR_R361_H__
-
-#include "acr_r352.h"
-
-/**
- * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
- * @signature:		16B signature for secure code. 0s if no secure code
- * @ctx_dma:		DMA context to be used by BL while loading code/data
- * @code_dma_base:	256B-aligned Physical FB Address where code is located
- *			(falcon's $xcbase register)
- * @non_sec_code_off:	offset from code_dma_base where the non-secure code is
- *                      located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size:	the size of the nonSecure code part.
- * @sec_code_off:	offset from code_dma_base where the secure code is
- *                      located. The offset must be multiple of 256 to help perf
- * @sec_code_size:	offset from code_dma_base where the secure code is
- *                      located. The offset must be multiple of 256 to help perf
- * @code_entry_point:	code entry point which will be invoked by BL after
- *                      code is loaded.
- * @data_dma_base:	256B aligned Physical FB Address where data is located.
- *			(falcon's $xdbase register)
- * @data_size:		size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r361_flcn_bl_desc {
-	u32 reserved[4];
-	u32 signature[4];
-	u32 ctx_dma;
-	struct flcn_u64 code_dma_base;
-	u32 non_sec_code_off;
-	u32 non_sec_code_size;
-	u32 sec_code_off;
-	u32 sec_code_size;
-	u32 code_entry_point;
-	struct flcn_u64 data_dma_base;
-	u32 data_size;
-};
-
-void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
-
-extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
-extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
-extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
-extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
deleted file mode 100644
index 30cf041..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r361.h"
-
-#include <core/gpuobj.h>
-
-/*
- * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
- * parameter.
- */
-
-struct acr_r364_hsflcn_desc {
-	union {
-		u8 reserved_dmem[0x200];
-		u32 signatures[4];
-	} ucode_reserved_space;
-	u32 wpr_region_id;
-	u32 wpr_offset;
-	u32 mmu_memory_range;
-	struct {
-		u32 no_regions;
-		struct {
-			u32 start_addr;
-			u32 end_addr;
-			u32 region_id;
-			u32 read_mask;
-			u32 write_mask;
-			u32 client_mask;
-			u32 shadow_mem_start_addr;
-		} region_props[2];
-	} regions;
-	u32 ucode_blob_size;
-	u64 ucode_blob_base __aligned(8);
-	struct {
-		u32 vpr_enabled;
-		u32 vpr_start;
-		u32 vpr_end;
-		u32 hdcp_policies;
-	} vpr_desc;
-};
-
-static void
-acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
-		       void *_desc)
-{
-	struct acr_r364_hsflcn_desc *desc = _desc;
-	struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
-	/* WPR region information if WPR is not fixed */
-	if (sb->wpr_size == 0) {
-		u64 wpr_start = ls_blob->addr;
-		u64 wpr_end = ls_blob->addr + ls_blob->size;
-
-		if (acr->func->shadow_blob)
-			wpr_start += ls_blob->size / 2;
-
-		desc->wpr_region_id = 1;
-		desc->regions.no_regions = 2;
-		desc->regions.region_props[0].start_addr = wpr_start >> 8;
-		desc->regions.region_props[0].end_addr = wpr_end >> 8;
-		desc->regions.region_props[0].region_id = 1;
-		desc->regions.region_props[0].read_mask = 0xf;
-		desc->regions.region_props[0].write_mask = 0xc;
-		desc->regions.region_props[0].client_mask = 0x2;
-		if (acr->func->shadow_blob)
-			desc->regions.region_props[0].shadow_mem_start_addr =
-							     ls_blob->addr >> 8;
-		else
-			desc->regions.region_props[0].shadow_mem_start_addr = 0;
-	} else {
-		desc->ucode_blob_base = ls_blob->addr;
-		desc->ucode_blob_size = ls_blob->size;
-	}
-}
-
-const struct acr_r352_func
-acr_r364_func = {
-	.fixup_hs_desc = acr_r364_fixup_hs_desc,
-	.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
-	.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-	.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
-	.ls_fill_headers = acr_r352_ls_fill_headers,
-	.ls_write_wpr = acr_r352_ls_write_wpr,
-	.ls_func = {
-		[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
-		[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
-		[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
-	},
-};
-
-
-struct nvkm_acr *
-acr_r364_new(unsigned long managed_falcons)
-{
-	return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
-			     managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
deleted file mode 100644
index 472ced29..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ /dev/null
@@ -1,418 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r367.h"
-#include "acr_r361.h"
-#include "acr_r370.h"
-
-#include <core/gpuobj.h>
-
-/*
- * r367 ACR: new LS signature format requires a rewrite of LS firmware and
- * blob creation functions. Also the hsflcn_desc layout has changed slightly.
- */
-
-#define LSF_LSB_DEPMAP_SIZE 11
-
-/**
- * struct acr_r367_lsf_lsb_header - LS firmware header
- *
- * See also struct acr_r352_lsf_lsb_header for documentation.
- */
-struct acr_r367_lsf_lsb_header {
-	/**
-	 * LS falcon signatures
-	 * @prd_keys:		signature to use in production mode
-	 * @dgb_keys:		signature to use in debug mode
-	 * @b_prd_present:	whether the production key is present
-	 * @b_dgb_present:	whether the debug key is present
-	 * @falcon_id:		ID of the falcon the ucode applies to
-	 */
-	struct {
-		u8 prd_keys[2][16];
-		u8 dbg_keys[2][16];
-		u32 b_prd_present;
-		u32 b_dbg_present;
-		u32 falcon_id;
-		u32 supports_versioning;
-		u32 version;
-		u32 depmap_count;
-		u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
-		u8 kdf[16];
-	} signature;
-	u32 ucode_off;
-	u32 ucode_size;
-	u32 data_size;
-	u32 bl_code_size;
-	u32 bl_imem_off;
-	u32 bl_data_off;
-	u32 bl_data_size;
-	u32 app_code_off;
-	u32 app_code_size;
-	u32 app_data_off;
-	u32 app_data_size;
-	u32 flags;
-};
-
-/**
- * struct acr_r367_lsf_wpr_header - LS blob WPR Header
- *
- * See also struct acr_r352_lsf_wpr_header for documentation.
- */
-struct acr_r367_lsf_wpr_header {
-	u32 falcon_id;
-	u32 lsb_offset;
-	u32 bootstrap_owner;
-	u32 lazy_bootstrap;
-	u32 bin_version;
-	u32 status;
-#define LSF_IMAGE_STATUS_NONE				0
-#define LSF_IMAGE_STATUS_COPY				1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED		2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED		3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE		4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED		5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY		6
-#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED		7
-};
-
-/**
- * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
- */
-struct ls_ucode_img_r367 {
-	struct ls_ucode_img base;
-
-	const struct acr_r352_lsf_func *func;
-
-	struct acr_r367_lsf_wpr_header wpr_header;
-	struct acr_r367_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
-
-struct ls_ucode_img *
-acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
-			   const struct nvkm_secboot *sb,
-			   enum nvkm_secboot_falcon falcon_id)
-{
-	const struct nvkm_subdev *subdev = acr->base.subdev;
-	const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
-	struct ls_ucode_img_r367 *img;
-	int ret;
-
-	img = kzalloc(sizeof(*img), GFP_KERNEL);
-	if (!img)
-		return ERR_PTR(-ENOMEM);
-
-	img->base.falcon_id = falcon_id;
-
-	ret = func->load(sb, func->version_max, &img->base);
-	if (ret < 0) {
-		kfree(img->base.ucode_data);
-		kfree(img->base.sig);
-		kfree(img);
-		return ERR_PTR(ret);
-	}
-
-	img->func = func->version[ret];
-
-	/* Check that the signature size matches our expectations... */
-	if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
-		nvkm_error(subdev, "invalid signature size for %s falcon!\n",
-			   nvkm_secboot_falcon_name[falcon_id]);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Copy signature to the right place */
-	memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
-
-	/* not needed? the signature should already have the right value */
-	img->lsb_header.signature.falcon_id = falcon_id;
-
-	return &img->base;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-static u32
-acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
-			     struct ls_ucode_img_r367 *img, u32 offset)
-{
-	struct ls_ucode_img *_img = &img->base;
-	struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
-	struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
-	struct ls_ucode_img_desc *desc = &_img->ucode_desc;
-	const struct acr_r352_lsf_func *func = img->func;
-
-	/* Fill WPR header */
-	whdr->falcon_id = _img->falcon_id;
-	whdr->bootstrap_owner = acr->base.boot_falcon;
-	whdr->bin_version = lhdr->signature.version;
-	whdr->status = LSF_IMAGE_STATUS_COPY;
-
-	/* Skip bootstrapping falcons started by someone else than ACR */
-	if (acr->lazy_bootstrap & BIT(_img->falcon_id))
-		whdr->lazy_bootstrap = 1;
-
-	/* Align, save off, and include an LSB header size */
-	offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
-	whdr->lsb_offset = offset;
-	offset += sizeof(*lhdr);
-
-	/*
-	 * Align, save off, and include the original (static) ucode
-	 * image size
-	 */
-	offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
-	_img->ucode_off = lhdr->ucode_off = offset;
-	offset += _img->ucode_size;
-
-	/*
-	 * For falcons that use a boot loader (BL), we append a loader
-	 * desc structure on the end of the ucode image and consider
-	 * this the boot loader data. The host will then copy the loader
-	 * desc args to this space within the WPR region (before locking
-	 * down) and the HS bin will then copy them to DMEM 0 for the
-	 * loader.
-	 */
-	lhdr->bl_code_size = ALIGN(desc->bootloader_size,
-				   LSF_BL_CODE_SIZE_ALIGN);
-	lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
-				 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
-	lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
-				lhdr->bl_code_size - lhdr->ucode_size;
-	/*
-	 * Though the BL is located at 0th offset of the image, the VA
-	 * is different to make sure that it doesn't collide the actual
-	 * OS VA range
-	 */
-	lhdr->bl_imem_off = desc->bootloader_imem_offset;
-	lhdr->app_code_off = desc->app_start_offset +
-			     desc->app_resident_code_offset;
-	lhdr->app_code_size = desc->app_resident_code_size;
-	lhdr->app_data_off = desc->app_start_offset +
-			     desc->app_resident_data_offset;
-	lhdr->app_data_size = desc->app_resident_data_size;
-
-	lhdr->flags = func->lhdr_flags;
-	if (_img->falcon_id == acr->base.boot_falcon)
-		lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
-
-	/* Align and save off BL descriptor size */
-	lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
-
-	/*
-	 * Align, save off, and include the additional BL data
-	 */
-	offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
-	lhdr->bl_data_off = offset;
-	offset += lhdr->bl_data_size;
-
-	return offset;
-}
-
-int
-acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
-{
-	struct ls_ucode_img_r367 *img;
-	struct list_head *l;
-	u32 count = 0;
-	u32 offset;
-
-	/* Count the number of images to manage */
-	list_for_each(l, imgs)
-		count++;
-
-	/*
-	 * Start with an array of WPR headers at the base of the WPR.
-	 * The expectation here is that the secure falcon will do a single DMA
-	 * read of this array and cache it internally so it's ok to pack these.
-	 * Also, we add 1 to the falcon count to indicate the end of the array.
-	 */
-	offset = sizeof(img->wpr_header) * (count + 1);
-
-	/*
-	 * Walk the managed falcons, accounting for the LSB structs
-	 * as well as the ucode images.
-	 */
-	list_for_each_entry(img, imgs, base.node) {
-		offset = acr_r367_ls_img_fill_headers(acr, img, offset);
-	}
-
-	return offset;
-}
-
-int
-acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
-		      struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
-{
-	struct ls_ucode_img *_img;
-	u32 pos = 0;
-	u32 max_desc_size = 0;
-	u8 *gdesc;
-
-	list_for_each_entry(_img, imgs, node) {
-		struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
-		const struct acr_r352_lsf_func *ls_func = img->func;
-
-		max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
-	}
-
-	gdesc = kmalloc(max_desc_size, GFP_KERNEL);
-	if (!gdesc)
-		return -ENOMEM;
-
-	nvkm_kmap(wpr_blob);
-
-	list_for_each_entry(_img, imgs, node) {
-		struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
-		const struct acr_r352_lsf_func *ls_func = img->func;
-
-		nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
-				      sizeof(img->wpr_header));
-
-		nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
-				     &img->lsb_header, sizeof(img->lsb_header));
-
-		/* Generate and write BL descriptor */
-		memset(gdesc, 0, ls_func->bl_desc_size);
-		ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
-
-		nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
-				      gdesc, ls_func->bl_desc_size);
-
-		/* Copy ucode */
-		nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
-				      _img->ucode_data, _img->ucode_size);
-
-		pos += sizeof(img->wpr_header);
-	}
-
-	nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
-	nvkm_done(wpr_blob);
-
-	kfree(gdesc);
-
-	return 0;
-}
-
-struct acr_r367_hsflcn_desc {
-	u8 reserved_dmem[0x200];
-	u32 signatures[4];
-	u32 wpr_region_id;
-	u32 wpr_offset;
-	u32 mmu_memory_range;
-#define FLCN_ACR_MAX_REGIONS 2
-	struct {
-		u32 no_regions;
-		struct {
-			u32 start_addr;
-			u32 end_addr;
-			u32 region_id;
-			u32 read_mask;
-			u32 write_mask;
-			u32 client_mask;
-			u32 shadow_mem_start_addr;
-		} region_props[FLCN_ACR_MAX_REGIONS];
-	} regions;
-	u32 ucode_blob_size;
-	u64 ucode_blob_base __aligned(8);
-	struct {
-		u32 vpr_enabled;
-		u32 vpr_start;
-		u32 vpr_end;
-		u32 hdcp_policies;
-	} vpr_desc;
-};
-
-void
-acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
-		       void *_desc)
-{
-	struct acr_r367_hsflcn_desc *desc = _desc;
-	struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
-	/* WPR region information if WPR is not fixed */
-	if (sb->wpr_size == 0) {
-		u64 wpr_start = ls_blob->addr;
-		u64 wpr_end = ls_blob->addr + ls_blob->size;
-
-		if (acr->func->shadow_blob)
-			wpr_start += ls_blob->size / 2;
-
-		desc->wpr_region_id = 1;
-		desc->regions.no_regions = 2;
-		desc->regions.region_props[0].start_addr = wpr_start >> 8;
-		desc->regions.region_props[0].end_addr = wpr_end >> 8;
-		desc->regions.region_props[0].region_id = 1;
-		desc->regions.region_props[0].read_mask = 0xf;
-		desc->regions.region_props[0].write_mask = 0xc;
-		desc->regions.region_props[0].client_mask = 0x2;
-		if (acr->func->shadow_blob)
-			desc->regions.region_props[0].shadow_mem_start_addr =
-							     ls_blob->addr >> 8;
-		else
-			desc->regions.region_props[0].shadow_mem_start_addr = 0;
-	} else {
-		desc->ucode_blob_base = ls_blob->addr;
-		desc->ucode_blob_size = ls_blob->size;
-	}
-}
-
-static const struct acr_r352_ls_func
-acr_r367_ls_sec2_func = {
-	.load = acr_ls_ucode_load_sec2,
-	.post_run = acr_ls_sec2_post_run,
-	.version_max = 1,
-	.version = {
-		&acr_r361_ls_sec2_func_0,
-		&acr_r370_ls_sec2_func_0,
-	}
-};
-
-const struct acr_r352_func
-acr_r367_func = {
-	.fixup_hs_desc = acr_r367_fixup_hs_desc,
-	.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
-	.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-	.shadow_blob = true,
-	.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
-	.ls_fill_headers = acr_r367_ls_fill_headers,
-	.ls_write_wpr = acr_r367_ls_write_wpr,
-	.ls_func = {
-		[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
-		[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
-		[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
-		[NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
-	},
-};
-
-struct nvkm_acr *
-acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
-	     unsigned long managed_falcons)
-{
-	return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
deleted file mode 100644
index 8bdfb3e..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R367_H__
-#define __NVKM_SECBOOT_ACR_R367_H__
-
-#include "acr_r352.h"
-
-void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
-
-struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
-						const struct nvkm_secboot *,
-						enum nvkm_secboot_falcon);
-int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
-int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
-			  struct nvkm_gpuobj *, u64);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
deleted file mode 100644
index e821d0f..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r370.h"
-#include "acr_r367.h"
-
-#include <core/msgqueue.h>
-#include <engine/falcon.h>
-#include <engine/sec2.h>
-
-static void
-acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
-			       const struct ls_ucode_img *img, u64 wpr_addr,
-			       void *_desc)
-{
-	struct acr_r370_flcn_bl_desc *desc = _desc;
-	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
-	u64 base, addr_code, addr_data;
-
-	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
-	addr_code = base + pdesc->app_resident_code_offset;
-	addr_data = base + pdesc->app_resident_data_offset;
-
-	desc->ctx_dma = FALCON_DMAIDX_UCODE;
-	desc->code_dma_base = u64_to_flcn64(addr_code);
-	desc->non_sec_code_off = pdesc->app_resident_code_offset;
-	desc->non_sec_code_size = pdesc->app_resident_code_size;
-	desc->code_entry_point = pdesc->app_imem_entry;
-	desc->data_dma_base = u64_to_flcn64(addr_data);
-	desc->data_size = pdesc->app_resident_data_size;
-}
-
-static const struct acr_r352_lsf_func
-acr_r370_ls_fecs_func_0 = {
-	.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_fecs_func = {
-	.load = acr_ls_ucode_load_fecs,
-	.version_max = 0,
-	.version = {
-		&acr_r370_ls_fecs_func_0,
-	}
-};
-
-static const struct acr_r352_lsf_func
-acr_r370_ls_gpccs_func_0 = {
-	.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-	/* GPCCS will be loaded using PRI */
-	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_gpccs_func = {
-	.load = acr_ls_ucode_load_gpccs,
-	.version_max = 0,
-	.version = {
-		&acr_r370_ls_gpccs_func_0,
-	}
-};
-
-static void
-acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
-			       const struct ls_ucode_img *img, u64 wpr_addr,
-			       void *_desc)
-{
-	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
-	const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
-	struct acr_r370_flcn_bl_desc *desc = _desc;
-	u64 base, addr_code, addr_data;
-	u32 addr_args;
-
-	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
-	/* For some reason we should not add app_resident_code_offset here */
-	addr_code = base;
-	addr_data = base + pdesc->app_resident_data_offset;
-	addr_args = sec->falcon->data.limit;
-	addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
-	desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
-	desc->code_dma_base = u64_to_flcn64(addr_code);
-	desc->non_sec_code_off = pdesc->app_resident_code_offset;
-	desc->non_sec_code_size = pdesc->app_resident_code_size;
-	desc->code_entry_point = pdesc->app_imem_entry;
-	desc->data_dma_base = u64_to_flcn64(addr_data);
-	desc->data_size = pdesc->app_resident_data_size;
-	desc->argc = 1;
-	/* args are stored at the beginning of EMEM */
-	desc->argv = 0x01000000;
-}
-
-const struct acr_r352_lsf_func
-acr_r370_ls_sec2_func_0 = {
-	.generate_bl_desc = acr_r370_generate_sec2_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_sec2_func = {
-	.load = acr_ls_ucode_load_sec2,
-	.post_run = acr_ls_sec2_post_run,
-	.version_max = 0,
-	.version = {
-		&acr_r370_ls_sec2_func_0,
-	}
-};
-
-void
-acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
-			     u64 offset)
-{
-	struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
-
-	bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
-	bl_desc->non_sec_code_off = hdr->non_sec_code_off;
-	bl_desc->non_sec_code_size = hdr->non_sec_code_size;
-	bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
-	bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
-	bl_desc->code_entry_point = 0;
-	bl_desc->code_dma_base = u64_to_flcn64(offset);
-	bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
-	bl_desc->data_size = hdr->data_size;
-}
-
-const struct acr_r352_func
-acr_r370_func = {
-	.fixup_hs_desc = acr_r367_fixup_hs_desc,
-	.generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
-	.hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-	.shadow_blob = true,
-	.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
-	.ls_fill_headers = acr_r367_ls_fill_headers,
-	.ls_write_wpr = acr_r367_ls_write_wpr,
-	.ls_func = {
-		[NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
-		[NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
-		[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
-	},
-};
-
-struct nvkm_acr *
-acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
-	     unsigned long managed_falcons)
-{
-	return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
deleted file mode 100644
index 2efed6f..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R370_H__
-#define __NVKM_SECBOOT_ACR_R370_H__
-
-#include "priv.h"
-struct hsf_load_header;
-
-/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
-struct acr_r370_flcn_bl_desc {
-	u32 reserved[4];
-	u32 signature[4];
-	u32 ctx_dma;
-	struct flcn_u64 code_dma_base;
-	u32 non_sec_code_off;
-	u32 non_sec_code_size;
-	u32 sec_code_off;
-	u32 sec_code_size;
-	u32 code_entry_point;
-	struct flcn_u64 data_dma_base;
-	u32 data_size;
-	u32 argc;
-	u32 argv;
-};
-
-void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
-extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
-extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
-extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
deleted file mode 100644
index 8f06477..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r370.h"
-#include "acr_r367.h"
-
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-
-static void
-acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
-			      const struct ls_ucode_img *img, u64 wpr_addr,
-			      void *_desc)
-{
-	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
-	const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
-	struct acr_r370_flcn_bl_desc *desc = _desc;
-	u64 base, addr_code, addr_data;
-	u32 addr_args;
-
-	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
-	addr_code = base + pdesc->app_resident_code_offset;
-	addr_data = base + pdesc->app_resident_data_offset;
-	addr_args = pmu->falcon->data.limit;
-	addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
-	desc->ctx_dma = FALCON_DMAIDX_UCODE;
-	desc->code_dma_base = u64_to_flcn64(addr_code);
-	desc->non_sec_code_off = pdesc->app_resident_code_offset;
-	desc->non_sec_code_size = pdesc->app_resident_code_size;
-	desc->code_entry_point = pdesc->app_imem_entry;
-	desc->data_dma_base = u64_to_flcn64(addr_data);
-	desc->data_size = pdesc->app_resident_data_size;
-	desc->argc = 1;
-	desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r375_ls_pmu_func_0 = {
-	.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
-	.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r375_ls_pmu_func = {
-	.load = acr_ls_ucode_load_pmu,
-	.post_run = acr_ls_pmu_post_run,
-	.version_max = 0,
-	.version = {
-		&acr_r375_ls_pmu_func_0,
-	}
-};
-
-const struct acr_r352_func
-acr_r375_func = {
-	.fixup_hs_desc = acr_r367_fixup_hs_desc,
-	.generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
-	.hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-	.shadow_blob = true,
-	.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
-	.ls_fill_headers = acr_r367_ls_fill_headers,
-	.ls_write_wpr = acr_r367_ls_write_wpr,
-	.ls_func = {
-		[NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
-		[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
-		[NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
-	},
-};
-
-struct nvkm_acr *
-acr_r375_new(enum nvkm_secboot_falcon boot_falcon,
-	     unsigned long managed_falcons)
-{
-	return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
deleted file mode 100644
index ee29c6c..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Secure boot is the process by which NVIDIA-signed firmware is loaded into
- * some of the falcons of a GPU. For production devices this is the only way
- * for the firmware to access useful (but sensitive) registers.
- *
- * A Falcon microprocessor supporting advanced security modes can run in one of
- * three modes:
- *
- * - Non-secure (NS). In this mode, functionality is similar to Falcon
- *   architectures before security modes were introduced (pre-Maxwell), but
- *   capability is restricted. In particular, certain registers may be
- *   inaccessible for reads and/or writes, and physical memory access may be
- *   disabled (on certain Falcon instances). This is the only possible mode that
- *   can be used if you don't have microcode cryptographically signed by NVIDIA.
- *
- * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
- *   not possible to read or write any Falcon internal state or Falcon registers
- *   from outside the Falcon (for example, from the host system). The only way
- *   to enable this mode is by loading microcode that has been signed by NVIDIA.
- *   (The loading process involves tagging the IMEM block as secure, writing the
- *   signature into a Falcon register, and starting execution. The hardware will
- *   validate the signature, and if valid, grant HS privileges.)
- *
- * - Light Secure (LS). In this mode, the microprocessor has more privileges
- *   than NS but fewer than HS. Some of the microprocessor state is visible to
- *   host software to ease debugging. The only way to enable this mode is by HS
- *   microcode enabling LS mode. Some privileges available to HS mode are not
- *   available here. LS mode is introduced in GM20x.
- *
- * Secure boot consists in temporarily switching a HS-capable falcon (typically
- * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
- * load them, and switch managed falcons into LS mode. Once secure boot
- * completes, no falcon remains in HS mode.
- *
- * Secure boot requires a write-protected memory region (WPR) which can only be
- * written by the secure falcon. On dGPU, the driver sets up the WPR region in
- * video memory. On Tegra, it is set up by the bootloader and its location and
- * size written into memory controller registers.
- *
- * The secure boot process takes place as follows:
- *
- * 1) A LS blob is constructed that contains all the LS firmwares we want to
- *    load, along with their signatures and bootloaders.
- *
- * 2) A HS blob (also called ACR) is created that contains the signed HS
- *    firmware in charge of loading the LS firmwares into their respective
- *    falcons.
- *
- * 3) The HS blob is loaded (via its own bootloader) and executed on the
- *    HS-capable falcon. It authenticates itself, switches the secure falcon to
- *    HS mode and setup the WPR region around the LS blob (dGPU) or copies the
- *    LS blob into the WPR region (Tegra).
- *
- * 4) The LS blob is now secure from all external tampering. The HS falcon
- *    checks the signatures of the LS firmwares and, if valid, switches the
- *    managed falcons to LS mode and makes them ready to run the LS firmware.
- *
- * 5) The managed falcons remain in LS mode and can be started.
- *
- */
-
-#include "priv.h"
-#include "acr.h"
-
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-
-const char *
-nvkm_secboot_falcon_name[] = {
-	[NVKM_SECBOOT_FALCON_PMU] = "PMU",
-	[NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
-	[NVKM_SECBOOT_FALCON_FECS] = "FECS",
-	[NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
-	[NVKM_SECBOOT_FALCON_SEC2] = "SEC2",
-	[NVKM_SECBOOT_FALCON_END] = "<invalid>",
-};
-/**
- * nvkm_secboot_reset() - reset specified falcon
- */
-int
-nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask)
-{
-	/* Unmanaged falcon? */
-	if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) {
-		nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
-		return -EINVAL;
-	}
-
-	return sb->acr->func->reset(sb->acr, sb, falcon_mask);
-}
-
-/**
- * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
- */
-bool
-nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid)
-{
-	if (!sb)
-		return false;
-
-	return sb->acr->managed_falcons & BIT(fid);
-}
-
-static int
-nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
-{
-	struct nvkm_secboot *sb = nvkm_secboot(subdev);
-	int ret = 0;
-
-	switch (sb->acr->boot_falcon) {
-	case NVKM_SECBOOT_FALCON_PMU:
-		sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
-		break;
-	case NVKM_SECBOOT_FALCON_SEC2:
-		/* we must keep SEC2 alive forever since ACR will run on it */
-		nvkm_engine_ref(&subdev->device->sec2->engine);
-		sb->boot_falcon = subdev->device->sec2->falcon;
-		sb->halt_falcon = subdev->device->pmu->falcon;
-		break;
-	default:
-		nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
-			                nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
-		return -EINVAL;
-	}
-	nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name);
-
-	/* Call chip-specific init function */
-	if (sb->func->oneinit)
-		ret = sb->func->oneinit(sb);
-	if (ret) {
-		nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
-			   ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int
-nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend)
-{
-	struct nvkm_secboot *sb = nvkm_secboot(subdev);
-	int ret = 0;
-
-	if (sb->func->fini)
-		ret = sb->func->fini(sb, suspend);
-
-	return ret;
-}
-
-static void *
-nvkm_secboot_dtor(struct nvkm_subdev *subdev)
-{
-	struct nvkm_secboot *sb = nvkm_secboot(subdev);
-	void *ret = NULL;
-
-	if (sb->func->dtor)
-		ret = sb->func->dtor(sb);
-
-	return ret;
-}
-
-static const struct nvkm_subdev_func
-nvkm_secboot = {
-	.oneinit = nvkm_secboot_oneinit,
-	.fini = nvkm_secboot_fini,
-	.dtor = nvkm_secboot_dtor,
-};
-
-int
-nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr,
-		  struct nvkm_device *device, int index,
-		  struct nvkm_secboot *sb)
-{
-	unsigned long fid;
-
-	nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
-	sb->func = func;
-	sb->acr = acr;
-	acr->subdev = &sb->subdev;
-
-	nvkm_debug(&sb->subdev, "securely managed falcons:\n");
-	for_each_set_bit(fid, &sb->acr->managed_falcons,
-			 NVKM_SECBOOT_FALCON_END)
-		nvkm_debug(&sb->subdev, "- %s\n",
-			   nvkm_secboot_falcon_name[fid]);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
deleted file mode 100644
index 5e91b3f..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "acr.h"
-#include "gm200.h"
-
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <engine/falcon.h>
-#include <subdev/mc.h>
-
-/**
- * gm200_secboot_run_blob() - run the given high-secure blob
- *
- */
-int
-gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
-		       struct nvkm_falcon *falcon)
-{
-	struct gm200_secboot *gsb = gm200_secboot(sb);
-	struct nvkm_subdev *subdev = &gsb->base.subdev;
-	struct nvkm_vma *vma = NULL;
-	u32 start_address;
-	int ret;
-
-	ret = nvkm_falcon_get(falcon, subdev);
-	if (ret)
-		return ret;
-
-	/* Map the HS firmware so the HS bootloader can see it */
-	ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
-	if (ret) {
-		nvkm_falcon_put(falcon, subdev);
-		return ret;
-	}
-
-	ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
-	if (ret)
-		goto end;
-
-	/* Reset and set the falcon up */
-	ret = nvkm_falcon_reset(falcon);
-	if (ret)
-		goto end;
-	nvkm_falcon_bind_context(falcon, gsb->inst);
-
-	/* Load the HS bootloader into the falcon's IMEM/DMEM */
-	ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
-	if (ret < 0)
-		goto end;
-
-	start_address = ret;
-
-	/* Disable interrupts as we will poll for the HALT bit */
-	nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
-
-	/* Set default error value in mailbox register */
-	nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
-
-	/* Start the HS bootloader */
-	nvkm_falcon_set_start_addr(falcon, start_address);
-	nvkm_falcon_start(falcon);
-	ret = nvkm_falcon_wait_for_halt(falcon, 100);
-	if (ret)
-		goto end;
-
-	/*
-	 * The mailbox register contains the (positive) error code - return this
-	 * to the caller
-	 */
-	ret = nvkm_falcon_rd32(falcon, 0x040);
-
-end:
-	/* Reenable interrupts */
-	nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
-
-	/* We don't need the ACR firmware anymore */
-	nvkm_vmm_put(gsb->vmm, &vma);
-	nvkm_falcon_put(falcon, subdev);
-
-	return ret;
-}
-
-int
-gm200_secboot_oneinit(struct nvkm_secboot *sb)
-{
-	struct gm200_secboot *gsb = gm200_secboot(sb);
-	struct nvkm_device *device = sb->subdev.device;
-	int ret;
-
-	/* Allocate instance block and VM */
-	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
-			      &gsb->inst);
-	if (ret)
-		return ret;
-
-	ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr",
-			   &gsb->vmm);
-	if (ret)
-		return ret;
-
-	atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]);
-	gsb->vmm->debug = gsb->base.subdev.debug;
-
-	ret = nvkm_vmm_join(gsb->vmm, gsb->inst);
-	if (ret)
-		return ret;
-
-	if (sb->acr->func->oneinit) {
-		ret = sb->acr->func->oneinit(sb->acr, sb);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-int
-gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
-{
-	int ret = 0;
-
-	if (sb->acr->func->fini)
-		ret = sb->acr->func->fini(sb->acr, sb, suspend);
-
-	return ret;
-}
-
-void *
-gm200_secboot_dtor(struct nvkm_secboot *sb)
-{
-	struct gm200_secboot *gsb = gm200_secboot(sb);
-
-	sb->acr->func->dtor(sb->acr);
-
-	nvkm_vmm_part(gsb->vmm, gsb->inst);
-	nvkm_vmm_unref(&gsb->vmm);
-	nvkm_memory_unref(&gsb->inst);
-
-	return gsb;
-}
-
-
-static const struct nvkm_secboot_func
-gm200_secboot = {
-	.dtor = gm200_secboot_dtor,
-	.oneinit = gm200_secboot_oneinit,
-	.fini = gm200_secboot_fini,
-	.run_blob = gm200_secboot_run_blob,
-};
-
-int
-gm200_secboot_new(struct nvkm_device *device, int index,
-		  struct nvkm_secboot **psb)
-{
-	int ret;
-	struct gm200_secboot *gsb;
-	struct nvkm_acr *acr;
-
-	acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
-			   BIT(NVKM_SECBOOT_FALCON_GPCCS));
-	if (IS_ERR(acr))
-		return PTR_ERR(acr);
-
-	gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
-	if (!gsb) {
-		psb = NULL;
-		return -ENOMEM;
-	}
-	*psb = &gsb->base;
-
-	ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-
-MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
deleted file mode 100644
index 62c5e16..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_GM200_H__
-#define __NVKM_SECBOOT_GM200_H__
-
-#include "priv.h"
-
-struct gm200_secboot {
-	struct nvkm_secboot base;
-
-	/* Instance block & address space used for HS FW execution */
-	struct nvkm_memory *inst;
-	struct nvkm_vmm *vmm;
-};
-#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
-
-int gm200_secboot_oneinit(struct nvkm_secboot *);
-int gm200_secboot_fini(struct nvkm_secboot *, bool);
-void *gm200_secboot_dtor(struct nvkm_secboot *);
-int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
-			   struct nvkm_falcon *);
-
-/* Tegra-only */
-int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
deleted file mode 100644
index df8b919..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#define TEGRA210_MC_BASE			0x70019000
-
-#ifdef CONFIG_ARCH_TEGRA
-#define MC_SECURITY_CARVEOUT2_CFG0		0xc58
-#define MC_SECURITY_CARVEOUT2_BOM_0		0xc5c
-#define MC_SECURITY_CARVEOUT2_BOM_HI_0		0xc60
-#define MC_SECURITY_CARVEOUT2_SIZE_128K		0xc64
-#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED	(1 << 1)
-/**
- * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra
- *
- * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
- * is reserved from system memory by the bootloader and irreversibly locked.
- * This function reads the address and size of the pre-configured WPR region.
- */
-int
-gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
-{
-	struct nvkm_secboot *sb = &gsb->base;
-	void __iomem *mc;
-	u32 cfg;
-
-	mc = ioremap(mc_base, 0xd00);
-	if (!mc) {
-		nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
-		return -ENOMEM;
-	}
-	sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
-	      ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
-	sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
-		<< 17;
-	cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
-	iounmap(mc);
-
-	/* Check that WPR settings are valid */
-	if (sb->wpr_size == 0) {
-		nvkm_error(&sb->subdev, "WPR region is empty\n");
-		return -EINVAL;
-	}
-
-	if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) {
-		nvkm_error(&sb->subdev, "WPR region not locked\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-#else
-int
-gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
-{
-	nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
-	return -EINVAL;
-}
-#endif
-
-static int
-gm20b_secboot_oneinit(struct nvkm_secboot *sb)
-{
-	struct gm200_secboot *gsb = gm200_secboot(sb);
-	int ret;
-
-	ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE);
-	if (ret)
-		return ret;
-
-	return gm200_secboot_oneinit(sb);
-}
-
-static const struct nvkm_secboot_func
-gm20b_secboot = {
-	.dtor = gm200_secboot_dtor,
-	.oneinit = gm20b_secboot_oneinit,
-	.fini = gm200_secboot_fini,
-	.run_blob = gm200_secboot_run_blob,
-};
-
-int
-gm20b_secboot_new(struct nvkm_device *device, int index,
-		  struct nvkm_secboot **psb)
-{
-	int ret;
-	struct gm200_secboot *gsb;
-	struct nvkm_acr *acr;
-
-	acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
-			   BIT(NVKM_SECBOOT_FALCON_PMU));
-	if (IS_ERR(acr))
-		return PTR_ERR(acr);
-	/* Support the initial GM20B firmware release without PMU */
-	acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
-
-	gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
-	if (!gsb) {
-		psb = NULL;
-		return -ENOMEM;
-	}
-	*psb = &gsb->base;
-
-	ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
-MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
deleted file mode 100644
index 4695f1c..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#include "ls_ucode.h"
-#include "hs_ucode.h"
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <engine/falcon.h>
-#include <engine/nvdec.h>
-
-static bool
-gp102_secboot_scrub_required(struct nvkm_secboot *sb)
-{
-	struct nvkm_subdev *subdev = &sb->subdev;
-	struct nvkm_device *device = subdev->device;
-	u32 reg;
-
-	nvkm_wr32(device, 0x100cd0, 0x2);
-	reg = nvkm_rd32(device, 0x100cd0);
-
-	return (reg & BIT(4));
-}
-
-static int
-gp102_run_secure_scrub(struct nvkm_secboot *sb)
-{
-	struct nvkm_subdev *subdev = &sb->subdev;
-	struct nvkm_device *device = subdev->device;
-	struct nvkm_engine *engine;
-	struct nvkm_falcon *falcon;
-	void *scrub_image;
-	struct fw_bin_header *hsbin_hdr;
-	struct hsf_fw_header *fw_hdr;
-	struct hsf_load_header *lhdr;
-	void *scrub_data;
-	int ret;
-
-	nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
-
-	engine = nvkm_engine_ref(&device->nvdec[0]->engine);
-	if (IS_ERR(engine))
-		return PTR_ERR(engine);
-	falcon = device->nvdec[0]->falcon;
-
-	nvkm_falcon_get(falcon, &sb->subdev);
-
-	scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber");
-	if (IS_ERR(scrub_image))
-		return PTR_ERR(scrub_image);
-
-	nvkm_falcon_reset(falcon);
-	nvkm_falcon_bind_context(falcon, NULL);
-
-	hsbin_hdr = scrub_image;
-	fw_hdr = scrub_image + hsbin_hdr->header_offset;
-	lhdr = scrub_image + fw_hdr->hdr_offset;
-	scrub_data = scrub_image + hsbin_hdr->data_offset;
-
-	nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
-			      lhdr->non_sec_code_size,
-			      lhdr->non_sec_code_off >> 8, 0, false);
-	nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
-			      ALIGN(lhdr->apps[0], 0x100),
-			      lhdr->apps[1],
-			      lhdr->apps[0] >> 8, 0, true);
-	nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
-			      lhdr->data_size, 0);
-
-	kfree(scrub_image);
-
-	nvkm_falcon_set_start_addr(falcon, 0x0);
-	nvkm_falcon_start(falcon);
-
-	ret = nvkm_falcon_wait_for_halt(falcon, 500);
-	if (ret < 0) {
-		nvkm_error(subdev, "failed to run VPR scrubber binary!\n");
-		ret = -ETIMEDOUT;
-		goto end;
-	}
-
-	/* put nvdec in clean state - without reset it will remain in HS mode */
-	nvkm_falcon_reset(falcon);
-
-	if (gp102_secboot_scrub_required(sb)) {
-		nvkm_error(subdev, "VPR scrubber binary failed!\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
-	nvkm_debug(subdev, "VPR scrub successfully completed\n");
-
-end:
-	nvkm_falcon_put(falcon, &sb->subdev);
-	nvkm_engine_unref(&engine);
-	return ret;
-}
-
-static int
-gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
-		       struct nvkm_falcon *falcon)
-{
-	int ret;
-
-	/* make sure the VPR region is unlocked */
-	if (gp102_secboot_scrub_required(sb)) {
-		ret = gp102_run_secure_scrub(sb);
-		if (ret)
-			return ret;
-	}
-
-	return gm200_secboot_run_blob(sb, blob, falcon);
-}
-
-const struct nvkm_secboot_func
-gp102_secboot = {
-	.dtor = gm200_secboot_dtor,
-	.oneinit = gm200_secboot_oneinit,
-	.fini = gm200_secboot_fini,
-	.run_blob = gp102_secboot_run_blob,
-};
-
-int
-gp102_secboot_new(struct nvkm_device *device, int index,
-		  struct nvkm_secboot **psb)
-{
-	int ret;
-	struct gm200_secboot *gsb;
-	struct nvkm_acr *acr;
-
-	acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
-			   BIT(NVKM_SECBOOT_FALCON_FECS) |
-			   BIT(NVKM_SECBOOT_FALCON_GPCCS) |
-			   BIT(NVKM_SECBOOT_FALCON_SEC2));
-	if (IS_ERR(acr))
-		return PTR_ERR(acr);
-
-	gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
-	if (!gsb) {
-		psb = NULL;
-		return -ENOMEM;
-	}
-	*psb = &gsb->base;
-
-	ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
deleted file mode 100644
index 737a8d5..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2017 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gm200.h"
-#include "acr.h"
-
-int
-gp108_secboot_new(struct nvkm_device *device, int index,
-		  struct nvkm_secboot **psb)
-{
-	struct gm200_secboot *gsb;
-	struct nvkm_acr *acr;
-
-	acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
-			   BIT(NVKM_SECBOOT_FALCON_FECS) |
-			   BIT(NVKM_SECBOOT_FALCON_GPCCS) |
-			   BIT(NVKM_SECBOOT_FALCON_SEC2));
-	if (IS_ERR(acr))
-		return PTR_ERR(acr);
-
-	if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
-		acr->func->dtor(acr);
-		return -ENOMEM;
-	}
-	*psb = &gsb->base;
-
-	return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
-}
-
-MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
-
-MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
deleted file mode 100644
index 28ca29d..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#define TEGRA186_MC_BASE			0x02c10000
-
-static int
-gp10b_secboot_oneinit(struct nvkm_secboot *sb)
-{
-	struct gm200_secboot *gsb = gm200_secboot(sb);
-	int ret;
-
-	ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE);
-	if (ret)
-		return ret;
-
-	return gm200_secboot_oneinit(sb);
-}
-
-static const struct nvkm_secboot_func
-gp10b_secboot = {
-	.dtor = gm200_secboot_dtor,
-	.oneinit = gp10b_secboot_oneinit,
-	.fini = gm200_secboot_fini,
-	.run_blob = gm200_secboot_run_blob,
-};
-
-int
-gp10b_secboot_new(struct nvkm_device *device, int index,
-		  struct nvkm_secboot **psb)
-{
-	int ret;
-	struct gm200_secboot *gsb;
-	struct nvkm_acr *acr;
-
-	acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
-			   BIT(NVKM_SECBOOT_FALCON_GPCCS) |
-			   BIT(NVKM_SECBOOT_FALCON_PMU));
-	if (IS_ERR(acr))
-		return PTR_ERR(acr);
-
-	gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
-	if (!gsb) {
-		psb = NULL;
-		return -ENOMEM;
-	}
-	*psb = &gsb->base;
-
-	ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
-MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
deleted file mode 100644
index 6b33182..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "hs_ucode.h"
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <engine/falcon.h>
-
-/**
- * hs_ucode_patch_signature() - patch HS blob with correct signature for
- * specified falcon.
- */
-static void
-hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image,
-			 bool new_format)
-{
-	struct fw_bin_header *hsbin_hdr = acr_image;
-	struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
-	void *hs_data = acr_image + hsbin_hdr->data_offset;
-	void *sig;
-	u32 sig_size;
-	u32 patch_loc, patch_sig;
-
-	/*
-	 * I had the brilliant idea to "improve" the binary format by
-	 * removing this useless indirection. However to make NVIDIA files
-	 * directly compatible, let's support both format.
-	 */
-	if (new_format) {
-		patch_loc = fw_hdr->patch_loc;
-		patch_sig = fw_hdr->patch_sig;
-	} else {
-		patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc);
-		patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig);
-	}
-
-	/* Falcon in debug or production mode? */
-	if (falcon->debug) {
-		sig = acr_image + fw_hdr->sig_dbg_offset;
-		sig_size = fw_hdr->sig_dbg_size;
-	} else {
-		sig = acr_image + fw_hdr->sig_prod_offset;
-		sig_size = fw_hdr->sig_prod_size;
-	}
-
-	/* Patch signature */
-	memcpy(hs_data + patch_loc, sig + patch_sig, sig_size);
-}
-
-void *
-hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon,
-		   const char *fw)
-{
-	void *acr_image;
-	bool new_format;
-
-	acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
-	if (IS_ERR(acr_image))
-		return acr_image;
-
-	/* detect the format to define how signature should be patched */
-	switch (((u32 *)acr_image)[0]) {
-	case 0x3b1d14f0:
-		new_format = true;
-		break;
-	case 0x000010de:
-		new_format = false;
-		break;
-	default:
-		nvkm_error(subdev, "unknown header for HS blob %s\n", fw);
-		return ERR_PTR(-EINVAL);
-	}
-
-	hs_ucode_patch_signature(falcon, acr_image, new_format);
-
-	return acr_image;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
deleted file mode 100644
index d8cfc6f..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_HS_UCODE_H__
-#define __NVKM_SECBOOT_HS_UCODE_H__
-
-#include <core/os.h>
-#include <core/subdev.h>
-
-struct nvkm_falcon;
-
-/**
- * struct hsf_fw_header - HS firmware descriptor
- * @sig_dbg_offset:	offset of the debug signature
- * @sig_dbg_size:	size of the debug signature
- * @sig_prod_offset:	offset of the production signature
- * @sig_prod_size:	size of the production signature
- * @patch_loc:		offset of the offset (sic) of where the signature is
- * @patch_sig:		offset of the offset (sic) to add to sig_*_offset
- * @hdr_offset:		offset of the load header (see struct hs_load_header)
- * @hdr_size:		size of above header
- *
- * This structure is embedded in the HS firmware image at
- * hs_bin_hdr.header_offset.
- */
-struct hsf_fw_header {
-	u32 sig_dbg_offset;
-	u32 sig_dbg_size;
-	u32 sig_prod_offset;
-	u32 sig_prod_size;
-	u32 patch_loc;
-	u32 patch_sig;
-	u32 hdr_offset;
-	u32 hdr_size;
-};
-
-/**
- * struct hsf_load_header - HS firmware load header
- */
-struct hsf_load_header {
-	u32 non_sec_code_off;
-	u32 non_sec_code_size;
-	u32 data_dma_base;
-	u32 data_size;
-	u32 num_apps;
-	/*
-	 * Organized as follows:
-	 * - app0_code_off
-	 * - app1_code_off
-	 * - ...
-	 * - appn_code_off
-	 * - app0_code_size
-	 * - app1_code_size
-	 * - ...
-	 */
-	u32 apps[0];
-};
-
-void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *,
-			 const char *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
deleted file mode 100644
index d43f906..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_LS_UCODE_H__
-#define __NVKM_SECBOOT_LS_UCODE_H__
-
-#include <core/os.h>
-#include <core/subdev.h>
-#include <subdev/secboot.h>
-
-struct nvkm_acr;
-
-/**
- * struct ls_ucode_img_desc - descriptor of firmware image
- * @descriptor_size:		size of this descriptor
- * @image_size:			size of the whole image
- * @bootloader_start_offset:	start offset of the bootloader in ucode image
- * @bootloader_size:		size of the bootloader
- * @bootloader_imem_offset:	start off set of the bootloader in IMEM
- * @bootloader_entry_point:	entry point of the bootloader in IMEM
- * @app_start_offset:		start offset of the LS firmware
- * @app_size:			size of the LS firmware's code and data
- * @app_imem_offset:		offset of the app in IMEM
- * @app_imem_entry:		entry point of the app in IMEM
- * @app_dmem_offset:		offset of the data in DMEM
- * @app_resident_code_offset:	offset of app code from app_start_offset
- * @app_resident_code_size:	size of the code
- * @app_resident_data_offset:	offset of data from app_start_offset
- * @app_resident_data_size:	size of data
- *
- * A firmware image contains the code, data, and bootloader of a given LS
- * falcon in a single blob. This structure describes where everything is.
- *
- * This can be generated from a (bootloader, code, data) set if they have
- * been loaded separately, or come directly from a file.
- */
-struct ls_ucode_img_desc {
-	u32 descriptor_size;
-	u32 image_size;
-	u32 tools_version;
-	u32 app_version;
-	char date[64];
-	u32 bootloader_start_offset;
-	u32 bootloader_size;
-	u32 bootloader_imem_offset;
-	u32 bootloader_entry_point;
-	u32 app_start_offset;
-	u32 app_size;
-	u32 app_imem_offset;
-	u32 app_imem_entry;
-	u32 app_dmem_offset;
-	u32 app_resident_code_offset;
-	u32 app_resident_code_size;
-	u32 app_resident_data_offset;
-	u32 app_resident_data_size;
-	u32 nb_overlays;
-	struct {u32 start; u32 size; } load_ovl[64];
-	u32 compressed;
-};
-
-/**
- * struct ls_ucode_img - temporary storage for loaded LS firmwares
- * @node:		to link within lsf_ucode_mgr
- * @falcon_id:		ID of the falcon this LS firmware is for
- * @ucode_desc:		loaded or generated map of ucode_data
- * @ucode_data:		firmware payload (code and data)
- * @ucode_size:		size in bytes of data in ucode_data
- * @ucode_off:		offset of the ucode in ucode_data
- * @sig:		signature for this firmware
- * @sig:size:		size of the signature in bytes
- *
- * Preparing the WPR LS blob requires information about all the LS firmwares
- * (size, etc) to be known. This structure contains all the data of one LS
- * firmware.
- */
-struct ls_ucode_img {
-	struct list_head node;
-	enum nvkm_secboot_falcon falcon_id;
-
-	struct ls_ucode_img_desc ucode_desc;
-	u8 *ucode_data;
-	u32 ucode_size;
-	u32 ucode_off;
-
-	u8 *sig;
-	u32 sig_size;
-};
-
-/**
- * struct fw_bin_header - header of firmware files
- * @bin_magic:		always 0x3b1d14f0
- * @bin_ver:		version of the bin format
- * @bin_size:		entire image size including this header
- * @header_offset:	offset of the firmware/bootloader header in the file
- * @data_offset:	offset of the firmware/bootloader payload in the file
- * @data_size:		size of the payload
- *
- * This header is located at the beginning of the HS firmware and HS bootloader
- * files, to describe where the headers and data can be found.
- */
-struct fw_bin_header {
-	u32 bin_magic;
-	u32 bin_ver;
-	u32 bin_size;
-	u32 header_offset;
-	u32 data_offset;
-	u32 data_size;
-};
-
-/**
- * struct fw_bl_desc - firmware bootloader descriptor
- * @start_tag:		starting tag of bootloader
- * @desc_dmem_load_off:	DMEM offset of flcn_bl_dmem_desc
- * @code_off:		offset of code section
- * @code_size:		size of code section
- * @data_off:		offset of data section
- * @data_size:		size of data section
- *
- * This structure is embedded in bootloader firmware files at to describe the
- * IMEM and DMEM layout expected by the bootloader.
- */
-struct fw_bl_desc {
-	u32 start_tag;
-	u32 dmem_load_off;
-	u32 code_off;
-	u32 code_size;
-	u32 data_off;
-	u32 data_size;
-};
-
-int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
-			   struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
-			    struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
-			  struct ls_ucode_img *);
-int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
-			   struct ls_ucode_img *);
-int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
deleted file mode 100644
index 821d3b2..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <core/firmware.h>
-
-#define BL_DESC_BLK_SIZE 256
-/**
- * Build a ucode image and descriptor from provided bootloader, code and data.
- *
- * @bl:		bootloader image, including 16-bytes descriptor
- * @code:	LS firmware code segment
- * @data:	LS firmware data segment
- * @desc:	ucode descriptor to be written
- *
- * Return: allocated ucode image with corresponding descriptor information. desc
- *         is also updated to contain the right offsets within returned image.
- */
-static void *
-ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
-		   const struct firmware *data, struct ls_ucode_img_desc *desc)
-{
-	struct fw_bin_header *bin_hdr = (void *)bl->data;
-	struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
-	void *bl_data = (void *)bl->data + bin_hdr->data_offset;
-	u32 pos = 0;
-	void *image;
-
-	desc->bootloader_start_offset = pos;
-	desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
-	desc->bootloader_imem_offset = bl_desc->start_tag * 256;
-	desc->bootloader_entry_point = bl_desc->start_tag * 256;
-
-	pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
-	desc->app_start_offset = pos;
-	desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
-			 ALIGN(data->size, BL_DESC_BLK_SIZE);
-	desc->app_imem_offset = 0;
-	desc->app_imem_entry = 0;
-	desc->app_dmem_offset = 0;
-	desc->app_resident_code_offset = 0;
-	desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
-
-	pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
-	desc->app_resident_data_offset = pos - desc->app_start_offset;
-	desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
-
-	desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
-			   desc->app_size;
-
-	image = kzalloc(desc->image_size, GFP_KERNEL);
-	if (!image)
-		return ERR_PTR(-ENOMEM);
-
-	memcpy(image + desc->bootloader_start_offset, bl_data,
-	       bl_desc->code_size);
-	memcpy(image + desc->app_start_offset, code->data, code->size);
-	memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
-	       data->data, data->size);
-
-	return image;
-}
-
-/**
- * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image
- *
- * Load the LS microcode, bootloader and signature and pack them into a single
- * blob. Also generate the corresponding ucode descriptor.
- */
-static int
-ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
-		     struct ls_ucode_img *img, const char *falcon_name)
-{
-	const struct firmware *bl, *code, *data, *sig;
-	char f[64];
-	int ret;
-
-	snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
-	ret = nvkm_firmware_get(subdev, f, &bl);
-	if (ret)
-		goto error;
-
-	snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
-	ret = nvkm_firmware_get(subdev, f, &code);
-	if (ret)
-		goto free_bl;
-
-	snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
-	ret = nvkm_firmware_get(subdev, f, &data);
-	if (ret)
-		goto free_inst;
-
-	snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
-	ret = nvkm_firmware_get(subdev, f, &sig);
-	if (ret)
-		goto free_data;
-
-	img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
-	if (!img->sig) {
-		ret = -ENOMEM;
-		goto free_sig;
-	}
-	img->sig_size = sig->size;
-
-	img->ucode_data = ls_ucode_img_build(bl, code, data,
-					     &img->ucode_desc);
-	if (IS_ERR(img->ucode_data)) {
-		kfree(img->sig);
-		ret = PTR_ERR(img->ucode_data);
-		goto free_sig;
-	}
-	img->ucode_size = img->ucode_desc.image_size;
-
-free_sig:
-	nvkm_firmware_put(sig);
-free_data:
-	nvkm_firmware_put(data);
-free_inst:
-	nvkm_firmware_put(code);
-free_bl:
-	nvkm_firmware_put(bl);
-error:
-	return ret;
-}
-
-int
-acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
-		       struct ls_ucode_img *img)
-{
-	return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
-}
-
-int
-acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
-			struct ls_ucode_img *img)
-{
-	return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
deleted file mode 100644
index a84a999..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <core/firmware.h>
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-
-/**
- * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
- *
- * Load the LS microcode, desc and signature and pack them into a single
- * blob.
- */
-static int
-acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
-			   int maxver, struct ls_ucode_img *img)
-{
-	const struct firmware *image, *desc, *sig;
-	char f[64];
-	int ver, ret;
-
-	snprintf(f, sizeof(f), "%s/image", name);
-	ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
-	if (ver < 0)
-		return ver;
-	img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
-	nvkm_firmware_put(image);
-	if (!img->ucode_data)
-		return -ENOMEM;
-
-	snprintf(f, sizeof(f), "%s/desc", name);
-	ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
-	if (ret < 0)
-		return ret;
-	memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
-	img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
-	nvkm_firmware_put(desc);
-
-	snprintf(f, sizeof(f), "%s/sig", name);
-	ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
-	if (ret < 0)
-		return ret;
-	img->sig_size = sig->size;
-	img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
-	nvkm_firmware_put(sig);
-	if (!img->sig)
-		return -ENOMEM;
-
-	return ver;
-}
-
-static int
-acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
-			 struct nvkm_falcon *falcon, u32 addr_args)
-{
-	struct nvkm_device *device = falcon->owner->device;
-	u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE];
-
-	memset(buf, 0, sizeof(buf));
-	nvkm_msgqueue_write_cmdline(queue, buf);
-	nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0);
-	/* rearm the queue so it will wait for the init message */
-	nvkm_msgqueue_reinit(queue);
-
-	/* Enable interrupts */
-	nvkm_falcon_wr32(falcon, 0x10, 0xff);
-	nvkm_mc_intr_mask(device, falcon->owner->index, true);
-
-	/* Start LS firmware on boot falcon */
-	nvkm_falcon_start(falcon);
-
-	return 0;
-}
-
-int
-acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
-		      struct ls_ucode_img *img)
-{
-	struct nvkm_pmu *pmu = sb->subdev.device->pmu;
-	int ret;
-
-	ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
-	if (ret)
-		return ret;
-
-	/* Allocate the PMU queue corresponding to the FW version */
-	ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
-				sb, &pmu->queue);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-int
-acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
-{
-	struct nvkm_device *device = sb->subdev.device;
-	struct nvkm_pmu *pmu = device->pmu;
-	u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
-	int ret;
-
-	ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
-	if (ret)
-		return ret;
-
-	nvkm_debug(&sb->subdev, "%s started\n",
-		   nvkm_secboot_falcon_name[acr->boot_falcon]);
-
-	return 0;
-}
-
-int
-acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
-		       struct ls_ucode_img *img)
-{
-	struct nvkm_sec2 *sec = sb->subdev.device->sec2;
-	int ver, ret;
-
-	ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
-	if (ver < 0)
-		return ver;
-
-	/* Allocate the PMU queue corresponding to the FW version */
-	ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
-				sb, &sec->queue);
-	if (ret)
-		return ret;
-
-	return ver;
-}
-
-int
-acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
-{
-	const struct nvkm_subdev *subdev = &sb->subdev;
-	struct nvkm_device *device = subdev->device;
-	struct nvkm_sec2 *sec = device->sec2;
-	/* on SEC arguments are always at the beginning of EMEM */
-	const u32 addr_args = 0x01000000;
-	int ret;
-
-	ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
-	if (ret)
-		return ret;
-
-	nvkm_debug(&sb->subdev, "%s started\n",
-		   nvkm_secboot_falcon_name[acr->boot_falcon]);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
deleted file mode 100644
index 959a7b2..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_PRIV_H__
-#define __NVKM_SECBOOT_PRIV_H__
-
-#include <subdev/secboot.h>
-#include <subdev/mmu.h>
-struct nvkm_gpuobj;
-
-struct nvkm_secboot_func {
-	int (*oneinit)(struct nvkm_secboot *);
-	int (*fini)(struct nvkm_secboot *, bool suspend);
-	void *(*dtor)(struct nvkm_secboot *);
-	int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *,
-			struct nvkm_falcon *);
-};
-
-int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
-		      struct nvkm_device *, int, struct nvkm_secboot *);
-int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
-int nvkm_secboot_falcon_run(struct nvkm_secboot *);
-
-extern const struct nvkm_secboot_func gp102_secboot;
-
-struct flcn_u64 {
-	u32 lo;
-	u32 hi;
-};
-
-static inline u64 flcn64_to_u64(const struct flcn_u64 f)
-{
-	return ((u64)f.hi) << 32 | f.lo;
-}
-
-static inline struct flcn_u64 u64_to_flcn64(u64 u)
-{
-	struct flcn_u64 ret;
-
-	ret.hi = upper_32_bits(u);
-	ret.lo = lower_32_bits(u);
-
-	return ret;
-}
-
-#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index 240dda1..b562a8c 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -8,18 +8,18 @@
 	  through a GPIO.
 
 config DRM_OMAP_ENCODER_TPD12S015
-        tristate "TPD12S015 HDMI ESD protection and level shifter"
+	tristate "TPD12S015 HDMI ESD protection and level shifter"
 	help
 	  Driver for TPD12S015, which offers HDMI ESD protection and level
 	  shifting.
 
 config DRM_OMAP_CONNECTOR_HDMI
-        tristate "HDMI Connector"
+	tristate "HDMI Connector"
 	help
 	  Driver for a generic HDMI connector.
 
 config DRM_OMAP_CONNECTOR_ANALOG_TV
-        tristate "Analog TV Connector"
+	tristate "Analog TV Connector"
 	help
 	  Driver for a generic analog TV connector.
 
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
index 956f23e..72ae79c0 100644
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ b/drivers/gpu/drm/omapdrm/dss/Kconfig
@@ -6,12 +6,12 @@
 	tristate
 
 menuconfig OMAP2_DSS
-        tristate "OMAP2+ Display Subsystem support"
+	tristate "OMAP2+ Display Subsystem support"
 	select OMAP_DSS_BASE
 	select VIDEOMODE_HELPERS
 	select OMAP2_DSS_INIT
 	select HDMI
-        help
+	help
 	  OMAP2+ Display Subsystem support.
 
 if OMAP2_DSS
@@ -52,7 +52,7 @@
 
 config OMAP2_DSS_VENC
 	bool "VENC support"
-        default y
+	default y
 	help
 	  OMAP Video Encoder support for S-Video and composite TV-out.
 
@@ -61,7 +61,7 @@
 
 config OMAP4_DSS_HDMI
 	bool "HDMI support for OMAP4"
-        default y
+	default y
 	select OMAP2_DSS_HDMI_COMMON
 	help
 	  HDMI support for OMAP4 based SoCs.
@@ -85,7 +85,7 @@
 
 config OMAP2_DSS_SDI
 	bool "SDI support"
-        default n
+	default n
 	help
 	  SDI (Serial Display Interface) support.
 
@@ -94,7 +94,7 @@
 
 config OMAP2_DSS_DSI
 	bool "DSI support"
-        default n
+	default n
 	help
 	  MIPI DSI (Display Serial Interface) support.
 
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 413dbdd..dbb90f2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -393,8 +393,7 @@ static void dispc_get_reg_field(struct dispc_device *dispc,
 				enum dispc_feat_reg_field id,
 				u8 *start, u8 *end)
 {
-	if (id >= dispc->feat->num_reg_fields)
-		BUG();
+	BUG_ON(id >= dispc->feat->num_reg_fields);
 
 	*start = dispc->feat->reg_fields[id].start;
 	*end = dispc->feat->reg_fields[id].end;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 5b8799c..94cded3 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -229,7 +229,8 @@ static int omap_connector_get_modes(struct drm_connector *connector)
 	 * operation to the panel API.
 	 */
 	if (omap_connector->output->panel)
-		return drm_panel_get_modes(omap_connector->output->panel);
+		return drm_panel_get_modes(omap_connector->output->panel,
+					   connector);
 
 	/*
 	 * We can't retrieve modes, which can happen for instance for a DVI or
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index b3e22c8..d2750f6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -217,8 +217,8 @@ static int omap_display_id(struct omap_dss_device *output)
 	} else if (output->bridge) {
 		struct drm_bridge *bridge = output->bridge;
 
-		while (bridge->next)
-			bridge = bridge->next;
+		while (drm_bridge_get_next_bridge(bridge))
+			bridge = drm_bridge_get_next_bridge(bridge);
 
 		node = bridge->of_node;
 	} else if (output->panel) {
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 24bbe9f..4f2165a 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -126,7 +126,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
 	for (dssdev = output; dssdev; dssdev = dssdev->next)
 		omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
 
-	for (bridge = output->bridge; bridge; bridge = bridge->next) {
+	for (bridge = output->bridge; bridge;
+	     bridge = drm_bridge_get_next_bridge(bridge)) {
 		if (!bridge->timings)
 			continue;
 
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 58f5394..b06e5cb 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -70,7 +70,7 @@ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
 	return drm_fb_helper_pan_display(var, fbi);
 }
 
-static struct fb_ops omap_fb_ops = {
+static const struct fb_ops omap_fb_ops = {
 	.owner = THIS_MODULE,
 
 	.fb_check_var	= drm_fb_helper_check_var,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 7344bb6..b319fe7 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -85,25 +85,6 @@ static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
 	return 0;
 }
 
-static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
-		unsigned long page_num)
-{
-	struct drm_gem_object *obj = buffer->priv;
-	struct page **pages;
-	omap_gem_get_pages(obj, &pages, false);
-	omap_gem_cpu_sync_page(obj, page_num);
-	return kmap(pages[page_num]);
-}
-
-static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
-		unsigned long page_num, void *addr)
-{
-	struct drm_gem_object *obj = buffer->priv;
-	struct page **pages;
-	omap_gem_get_pages(obj, &pages, false);
-	kunmap(pages[page_num]);
-}
-
 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
 		struct vm_area_struct *vma)
 {
@@ -123,8 +104,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
 	.release = drm_gem_dmabuf_release,
 	.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
 	.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
-	.map = omap_gem_dmabuf_kmap,
-	.unmap = omap_gem_dmabuf_kunmap,
 	.mmap = omap_gem_dmabuf_mmap,
 };
 
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index f152bc4..ae44ac2 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,6 +18,17 @@
 	  reference designs. The panel is detected using special registers
 	  in the Versatile family syscon registers.
 
+config DRM_PANEL_BOE_HIMAX8279D
+	tristate "Boe Himax8279d panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for Boe Himax8279d
+	  TFT-LCD modules. The panel has a 1200x1920 resolution and uses
+	  24 bit RGB per pixel. It provides a MIPI DSI interface to
+	  the host and has a built-in LED backlight.
+
 config DRM_PANEL_LVDS
 	tristate "Generic LVDS panel driver"
 	depends on OF
@@ -98,6 +109,17 @@
 	  24 bit RGB per pixel. It provides a MIPI DSI interface to
 	  the host and has a built-in LED backlight.
 
+config DRM_PANEL_LEADTEK_LTK500HD1829
+	tristate "Leadtek LTK500HD1829 panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for Kingdisplay kd097d04
+	  TFT-LCD modules. The panel has a 1536x2048 resolution and uses
+	  24 bit RGB per pixel. It provides a MIPI DSI interface to
+	  the host and has a built-in LED backlight.
+
 config DRM_PANEL_SAMSUNG_LD9040
 	tristate "Samsung LD9040 RGB/SPI panel"
 	depends on OF && SPI
@@ -316,6 +338,17 @@
 	  Say Y here if you want to enable support for the Sitronix
 	  ST7789V controller for 240x320 LCD panels
 
+config DRM_PANEL_SONY_ACX424AKP
+	tristate "Sony ACX424AKP DSI command mode panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	select VIDEOMODE_HELPERS
+	help
+	  Say Y here if you want to enable the Sony ACX424 display
+	  panel. This panel supports DSI in both command and video
+	  mode.
+
 config DRM_PANEL_SONY_ACX565AKM
 	tristate "Sony ACX565AKM panel"
 	depends on GPIOLIB && OF && SPI
@@ -355,4 +388,14 @@
 	help
 	  Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
 	  Video Mode panel
+
+config DRM_PANEL_XINPENG_XPP055C272
+	tristate "Xinpeng XPP055C272 panel driver"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for the Xinpeng
+	  XPP055C272 controller for 720x1280 LCD panels with MIPI/RGB/SPI
+	  system interfaces.
 endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index b6cd39f..7c4d3c5 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
+obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
 obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
 obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
@@ -8,6 +9,7 @@
 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
 obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
 obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
 obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
@@ -33,8 +35,10 @@
 obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o
 obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
 obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
 obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index a0574dc..41444a7 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -260,9 +260,9 @@ static int versatile_panel_enable(struct drm_panel *panel)
 	return 0;
 }
 
-static int versatile_panel_get_modes(struct drm_panel *panel)
+static int versatile_panel_get_modes(struct drm_panel *panel,
+				     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct versatile_panel *vpanel = to_versatile_panel(panel);
 	struct drm_display_mode *mode;
 
@@ -270,7 +270,7 @@ static int versatile_panel_get_modes(struct drm_panel *panel)
 	connector->display_info.height_mm = vpanel->panel_type->height_mm;
 	connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
 
-	mode = drm_mode_duplicate(panel->drm, &vpanel->panel_type->mode);
+	mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
 	drm_mode_set_name(mode);
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
 
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
new file mode 100644
index 0000000..74d58ee
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Huaqin Telecom Technology Co., Ltd
+ *
+ * Author: Jerry Han <jerry.han.hq@gmail.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/mipi_display.h>
+
+struct panel_cmd {
+	char cmd;
+	char data;
+};
+
+struct panel_desc {
+	const struct drm_display_mode *display_mode;
+	unsigned int bpc;
+	unsigned int width_mm;
+	unsigned int height_mm;
+
+	unsigned long mode_flags;
+	enum mipi_dsi_pixel_format format;
+	unsigned int lanes;
+	const struct panel_cmd *on_cmds;
+	unsigned int on_cmds_num;
+};
+
+struct panel_info {
+	struct drm_panel base;
+	struct mipi_dsi_device *link;
+	const struct panel_desc *desc;
+
+	struct gpio_desc *enable_gpio;
+	struct gpio_desc *pp33_gpio;
+	struct gpio_desc *pp18_gpio;
+
+	bool prepared;
+	bool enabled;
+};
+
+static inline struct panel_info *to_panel_info(struct drm_panel *panel)
+{
+	return container_of(panel, struct panel_info, base);
+}
+
+static void disable_gpios(struct panel_info *pinfo)
+{
+	gpiod_set_value(pinfo->enable_gpio, 0);
+	gpiod_set_value(pinfo->pp33_gpio, 0);
+	gpiod_set_value(pinfo->pp18_gpio, 0);
+}
+
+static int send_mipi_cmds(struct drm_panel *panel, const struct panel_cmd *cmds)
+{
+	struct panel_info *pinfo = to_panel_info(panel);
+	unsigned int i = 0;
+	int err;
+
+	for (i = 0; i < pinfo->desc->on_cmds_num; i++) {
+		err = mipi_dsi_dcs_write_buffer(pinfo->link, &cmds[i],
+						sizeof(struct panel_cmd));
+
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+static int boe_panel_disable(struct drm_panel *panel)
+{
+	struct panel_info *pinfo = to_panel_info(panel);
+	int err;
+
+	if (!pinfo->enabled)
+		return 0;
+
+	err = mipi_dsi_dcs_set_display_off(pinfo->link);
+	if (err < 0) {
+		DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+			      err);
+		return err;
+	}
+
+	pinfo->enabled = false;
+
+	return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+	struct panel_info *pinfo = to_panel_info(panel);
+	int err;
+
+	if (!pinfo->prepared)
+		return 0;
+
+	err = mipi_dsi_dcs_set_display_off(pinfo->link);
+	if (err < 0)
+		DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+			      err);
+
+	err = mipi_dsi_dcs_enter_sleep_mode(pinfo->link);
+	if (err < 0)
+		DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+			      err);
+
+	/* sleep_mode_delay: 1ms - 2ms */
+	usleep_range(1000, 2000);
+
+	disable_gpios(pinfo);
+
+	pinfo->prepared = false;
+
+	return 0;
+}
+
+static int boe_panel_prepare(struct drm_panel *panel)
+{
+	struct panel_info *pinfo = to_panel_info(panel);
+	int err;
+
+	if (pinfo->prepared)
+		return 0;
+
+	gpiod_set_value(pinfo->pp18_gpio, 1);
+	/* T1: 5ms - 6ms */
+	usleep_range(5000, 6000);
+	gpiod_set_value(pinfo->pp33_gpio, 1);
+
+	/* reset sequence */
+	/* T2: 14ms - 15ms */
+	usleep_range(14000, 15000);
+	gpiod_set_value(pinfo->enable_gpio, 1);
+
+	/* T3: 1ms - 2ms */
+	usleep_range(1000, 2000);
+	gpiod_set_value(pinfo->enable_gpio, 0);
+
+	/* T4: 1ms - 2ms */
+	usleep_range(1000, 2000);
+	gpiod_set_value(pinfo->enable_gpio, 1);
+
+	/* T5: 5ms - 6ms */
+	usleep_range(5000, 6000);
+
+	/* send init code */
+	err = send_mipi_cmds(panel, pinfo->desc->on_cmds);
+	if (err < 0) {
+		DRM_DEV_ERROR(panel->dev, "failed to send DCS Init Code: %d\n",
+			      err);
+		goto poweroff;
+	}
+
+	err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link);
+	if (err < 0) {
+		DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
+			      err);
+		goto poweroff;
+	}
+
+	/* T6: 120ms - 121ms */
+	usleep_range(120000, 121000);
+
+	err = mipi_dsi_dcs_set_display_on(pinfo->link);
+	if (err < 0) {
+		DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+			      err);
+		goto poweroff;
+	}
+
+	/* T7: 20ms - 21ms */
+	usleep_range(20000, 21000);
+
+	pinfo->prepared = true;
+
+	return 0;
+
+poweroff:
+	disable_gpios(pinfo);
+	return err;
+}
+
+static int boe_panel_enable(struct drm_panel *panel)
+{
+	struct panel_info *pinfo = to_panel_info(panel);
+	int ret;
+
+	if (pinfo->enabled)
+		return 0;
+
+	usleep_range(120000, 121000);
+
+	ret = mipi_dsi_dcs_set_display_on(pinfo->link);
+	if (ret < 0) {
+		DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+			      ret);
+		return ret;
+	}
+
+	pinfo->enabled = true;
+
+	return 0;
+}
+
+static int boe_panel_get_modes(struct drm_panel *panel,
+			       struct drm_connector *connector)
+{
+	struct panel_info *pinfo = to_panel_info(panel);
+	const struct drm_display_mode *m = pinfo->desc->display_mode;
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, m);
+	if (!mode) {
+		DRM_DEV_ERROR(pinfo->base.dev, "failed to add mode %ux%u@%u\n",
+			      m->hdisplay, m->vdisplay, m->vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(connector, mode);
+
+	connector->display_info.width_mm = pinfo->desc->width_mm;
+	connector->display_info.height_mm = pinfo->desc->height_mm;
+	connector->display_info.bpc = pinfo->desc->bpc;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs panel_funcs = {
+	.disable = boe_panel_disable,
+	.unprepare = boe_panel_unprepare,
+	.prepare = boe_panel_prepare,
+	.enable = boe_panel_enable,
+	.get_modes = boe_panel_get_modes,
+};
+
+static const struct drm_display_mode default_display_mode = {
+	.clock = 159420,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 80,
+	.hsync_end = 1200 + 80 + 60,
+	.htotal = 1200 + 80 + 60 + 24,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 10,
+	.vsync_end = 1920 + 10 + 14,
+	.vtotal = 1920 + 10 + 14 + 4,
+	.vrefresh = 60,
+};
+
+/* 8 inch */
+static const struct panel_cmd boe_himax8279d8p_on_cmds[] = {
+	{ 0xB0, 0x05 },
+	{ 0xB1, 0xE5 },
+	{ 0xB3, 0x52 },
+	{ 0xC0, 0x00 },
+	{ 0xC2, 0x57 },
+	{ 0xD9, 0x85 },
+	{ 0xB0, 0x01 },
+	{ 0xC8, 0x00 },
+	{ 0xC9, 0x00 },
+	{ 0xCC, 0x26 },
+	{ 0xCD, 0x26 },
+	{ 0xDC, 0x00 },
+	{ 0xDD, 0x00 },
+	{ 0xE0, 0x26 },
+	{ 0xE1, 0x26 },
+	{ 0xB0, 0x03 },
+	{ 0xC3, 0x2A },
+	{ 0xE7, 0x2A },
+	{ 0xC5, 0x2A },
+	{ 0xDE, 0x2A },
+	{ 0xBC, 0x02 },
+	{ 0xCB, 0x02 },
+	{ 0xB0, 0x00 },
+	{ 0xB6, 0x03 },
+	{ 0xBA, 0x8B },
+	{ 0xBF, 0x15 },
+	{ 0xC0, 0x18 },
+	{ 0xC2, 0x14 },
+	{ 0xC3, 0x02 },
+	{ 0xC4, 0x14 },
+	{ 0xC5, 0x02 },
+	{ 0xCC, 0x0A },
+	{ 0xB0, 0x06 },
+	{ 0xC0, 0xA5 },
+	{ 0xD5, 0x20 },
+	{ 0xC0, 0x00 },
+	{ 0xB0, 0x02 },
+	{ 0xC0, 0x00 },
+	{ 0xC1, 0x02 },
+	{ 0xC2, 0x06 },
+	{ 0xC3, 0x16 },
+	{ 0xC4, 0x0E },
+	{ 0xC5, 0x18 },
+	{ 0xC6, 0x26 },
+	{ 0xC7, 0x32 },
+	{ 0xC8, 0x3F },
+	{ 0xC9, 0x3F },
+	{ 0xCA, 0x3F },
+	{ 0xCB, 0x3F },
+	{ 0xCC, 0x3D },
+	{ 0xCD, 0x2F },
+	{ 0xCE, 0x2F },
+	{ 0xCF, 0x2F },
+	{ 0xD0, 0x07 },
+	{ 0xD2, 0x00 },
+	{ 0xD3, 0x02 },
+	{ 0xD4, 0x06 },
+	{ 0xD5, 0x12 },
+	{ 0xD6, 0x0A },
+	{ 0xD7, 0x14 },
+	{ 0xD8, 0x22 },
+	{ 0xD9, 0x2E },
+	{ 0xDA, 0x3D },
+	{ 0xDB, 0x3F },
+	{ 0xDC, 0x3F },
+	{ 0xDD, 0x3F },
+	{ 0xDE, 0x3D },
+	{ 0xDF, 0x2F },
+	{ 0xE0, 0x2F },
+	{ 0xE1, 0x2F },
+	{ 0xE2, 0x07 },
+	{ 0xB0, 0x07 },
+	{ 0xB1, 0x18 },
+	{ 0xB2, 0x19 },
+	{ 0xB3, 0x2E },
+	{ 0xB4, 0x52 },
+	{ 0xB5, 0x72 },
+	{ 0xB6, 0x8C },
+	{ 0xB7, 0xBD },
+	{ 0xB8, 0xEB },
+	{ 0xB9, 0x47 },
+	{ 0xBA, 0x96 },
+	{ 0xBB, 0x1E },
+	{ 0xBC, 0x90 },
+	{ 0xBD, 0x93 },
+	{ 0xBE, 0xFA },
+	{ 0xBF, 0x56 },
+	{ 0xC0, 0x8C },
+	{ 0xC1, 0xB7 },
+	{ 0xC2, 0xCC },
+	{ 0xC3, 0xDF },
+	{ 0xC4, 0xE8 },
+	{ 0xC5, 0xF0 },
+	{ 0xC6, 0xF8 },
+	{ 0xC7, 0xFA },
+	{ 0xC8, 0xFC },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x5A },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x08 },
+	{ 0xB1, 0x04 },
+	{ 0xB2, 0x15 },
+	{ 0xB3, 0x2D },
+	{ 0xB4, 0x51 },
+	{ 0xB5, 0x72 },
+	{ 0xB6, 0x8D },
+	{ 0xB7, 0xBE },
+	{ 0xB8, 0xED },
+	{ 0xB9, 0x4A },
+	{ 0xBA, 0x9A },
+	{ 0xBB, 0x23 },
+	{ 0xBC, 0x95 },
+	{ 0xBD, 0x98 },
+	{ 0xBE, 0xFF },
+	{ 0xBF, 0x59 },
+	{ 0xC0, 0x8E },
+	{ 0xC1, 0xB9 },
+	{ 0xC2, 0xCD },
+	{ 0xC3, 0xDF },
+	{ 0xC4, 0xE8 },
+	{ 0xC5, 0xF0 },
+	{ 0xC6, 0xF8 },
+	{ 0xC7, 0xFA },
+	{ 0xC8, 0xFC },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x5A },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x09 },
+	{ 0xB1, 0x04 },
+	{ 0xB2, 0x2C },
+	{ 0xB3, 0x36 },
+	{ 0xB4, 0x53 },
+	{ 0xB5, 0x73 },
+	{ 0xB6, 0x8E },
+	{ 0xB7, 0xC0 },
+	{ 0xB8, 0xEF },
+	{ 0xB9, 0x4C },
+	{ 0xBA, 0x9D },
+	{ 0xBB, 0x25 },
+	{ 0xBC, 0x96 },
+	{ 0xBD, 0x9A },
+	{ 0xBE, 0x01 },
+	{ 0xBF, 0x59 },
+	{ 0xC0, 0x8E },
+	{ 0xC1, 0xB9 },
+	{ 0xC2, 0xCD },
+	{ 0xC3, 0xDF },
+	{ 0xC4, 0xE8 },
+	{ 0xC5, 0xF0 },
+	{ 0xC6, 0xF8 },
+	{ 0xC7, 0xFA },
+	{ 0xC8, 0xFC },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x5A },
+	{ 0xCC, 0xBF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x0A },
+	{ 0xB1, 0x18 },
+	{ 0xB2, 0x19 },
+	{ 0xB3, 0x2E },
+	{ 0xB4, 0x52 },
+	{ 0xB5, 0x72 },
+	{ 0xB6, 0x8C },
+	{ 0xB7, 0xBD },
+	{ 0xB8, 0xEB },
+	{ 0xB9, 0x47 },
+	{ 0xBA, 0x96 },
+	{ 0xBB, 0x1E },
+	{ 0xBC, 0x90 },
+	{ 0xBD, 0x93 },
+	{ 0xBE, 0xFA },
+	{ 0xBF, 0x56 },
+	{ 0xC0, 0x8C },
+	{ 0xC1, 0xB7 },
+	{ 0xC2, 0xCC },
+	{ 0xC3, 0xDF },
+	{ 0xC4, 0xE8 },
+	{ 0xC5, 0xF0 },
+	{ 0xC6, 0xF8 },
+	{ 0xC7, 0xFA },
+	{ 0xC8, 0xFC },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x5A },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x0B },
+	{ 0xB1, 0x04 },
+	{ 0xB2, 0x15 },
+	{ 0xB3, 0x2D },
+	{ 0xB4, 0x51 },
+	{ 0xB5, 0x72 },
+	{ 0xB6, 0x8D },
+	{ 0xB7, 0xBE },
+	{ 0xB8, 0xED },
+	{ 0xB9, 0x4A },
+	{ 0xBA, 0x9A },
+	{ 0xBB, 0x23 },
+	{ 0xBC, 0x95 },
+	{ 0xBD, 0x98 },
+	{ 0xBE, 0xFF },
+	{ 0xBF, 0x59 },
+	{ 0xC0, 0x8E },
+	{ 0xC1, 0xB9 },
+	{ 0xC2, 0xCD },
+	{ 0xC3, 0xDF },
+	{ 0xC4, 0xE8 },
+	{ 0xC5, 0xF0 },
+	{ 0xC6, 0xF8 },
+	{ 0xC7, 0xFA },
+	{ 0xC8, 0xFC },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x5A },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x0C },
+	{ 0xB1, 0x04 },
+	{ 0xB2, 0x2C },
+	{ 0xB3, 0x36 },
+	{ 0xB4, 0x53 },
+	{ 0xB5, 0x73 },
+	{ 0xB6, 0x8E },
+	{ 0xB7, 0xC0 },
+	{ 0xB8, 0xEF },
+	{ 0xB9, 0x4C },
+	{ 0xBA, 0x9D },
+	{ 0xBB, 0x25 },
+	{ 0xBC, 0x96 },
+	{ 0xBD, 0x9A },
+	{ 0xBE, 0x01 },
+	{ 0xBF, 0x59 },
+	{ 0xC0, 0x8E },
+	{ 0xC1, 0xB9 },
+	{ 0xC2, 0xCD },
+	{ 0xC3, 0xDF },
+	{ 0xC4, 0xE8 },
+	{ 0xC5, 0xF0 },
+	{ 0xC6, 0xF8 },
+	{ 0xC7, 0xFA },
+	{ 0xC8, 0xFC },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x5A },
+	{ 0xCC, 0xBF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x04 },
+	{ 0xB5, 0x02 },
+	{ 0xB6, 0x01 },
+};
+
+static const struct panel_desc boe_himax8279d8p_panel_desc = {
+	.display_mode = &default_display_mode,
+	.bpc = 8,
+	.width_mm = 107,
+	.height_mm = 172,
+	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+			MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+	.format = MIPI_DSI_FMT_RGB888,
+	.lanes = 4,
+	.on_cmds = boe_himax8279d8p_on_cmds,
+	.on_cmds_num = 260,
+};
+
+/* 10 inch */
+static const struct panel_cmd boe_himax8279d10p_on_cmds[] = {
+	{ 0xB0, 0x05 },
+	{ 0xB1, 0xE5 },
+	{ 0xB3, 0x52 },
+	{ 0xB0, 0x00 },
+	{ 0xB6, 0x03 },
+	{ 0xBA, 0x8B },
+	{ 0xBF, 0x1A },
+	{ 0xC0, 0x0F },
+	{ 0xC2, 0x0C },
+	{ 0xC3, 0x02 },
+	{ 0xC4, 0x0C },
+	{ 0xC5, 0x02 },
+	{ 0xB0, 0x01 },
+	{ 0xE0, 0x26 },
+	{ 0xE1, 0x26 },
+	{ 0xDC, 0x00 },
+	{ 0xDD, 0x00 },
+	{ 0xCC, 0x26 },
+	{ 0xCD, 0x26 },
+	{ 0xC8, 0x00 },
+	{ 0xC9, 0x00 },
+	{ 0xD2, 0x03 },
+	{ 0xD3, 0x03 },
+	{ 0xE6, 0x04 },
+	{ 0xE7, 0x04 },
+	{ 0xC4, 0x09 },
+	{ 0xC5, 0x09 },
+	{ 0xD8, 0x0A },
+	{ 0xD9, 0x0A },
+	{ 0xC2, 0x0B },
+	{ 0xC3, 0x0B },
+	{ 0xD6, 0x0C },
+	{ 0xD7, 0x0C },
+	{ 0xC0, 0x05 },
+	{ 0xC1, 0x05 },
+	{ 0xD4, 0x06 },
+	{ 0xD5, 0x06 },
+	{ 0xCA, 0x07 },
+	{ 0xCB, 0x07 },
+	{ 0xDE, 0x08 },
+	{ 0xDF, 0x08 },
+	{ 0xB0, 0x02 },
+	{ 0xC0, 0x00 },
+	{ 0xC1, 0x0D },
+	{ 0xC2, 0x17 },
+	{ 0xC3, 0x26 },
+	{ 0xC4, 0x31 },
+	{ 0xC5, 0x1C },
+	{ 0xC6, 0x2C },
+	{ 0xC7, 0x33 },
+	{ 0xC8, 0x31 },
+	{ 0xC9, 0x37 },
+	{ 0xCA, 0x37 },
+	{ 0xCB, 0x37 },
+	{ 0xCC, 0x39 },
+	{ 0xCD, 0x2E },
+	{ 0xCE, 0x2F },
+	{ 0xCF, 0x2F },
+	{ 0xD0, 0x07 },
+	{ 0xD2, 0x00 },
+	{ 0xD3, 0x0D },
+	{ 0xD4, 0x17 },
+	{ 0xD5, 0x26 },
+	{ 0xD6, 0x31 },
+	{ 0xD7, 0x3F },
+	{ 0xD8, 0x3F },
+	{ 0xD9, 0x3F },
+	{ 0xDA, 0x3F },
+	{ 0xDB, 0x37 },
+	{ 0xDC, 0x37 },
+	{ 0xDD, 0x37 },
+	{ 0xDE, 0x39 },
+	{ 0xDF, 0x2E },
+	{ 0xE0, 0x2F },
+	{ 0xE1, 0x2F },
+	{ 0xE2, 0x07 },
+	{ 0xB0, 0x03 },
+	{ 0xC8, 0x0B },
+	{ 0xC9, 0x07 },
+	{ 0xC3, 0x00 },
+	{ 0xE7, 0x00 },
+	{ 0xC5, 0x2A },
+	{ 0xDE, 0x2A },
+	{ 0xCA, 0x43 },
+	{ 0xC9, 0x07 },
+	{ 0xE4, 0xC0 },
+	{ 0xE5, 0x0D },
+	{ 0xCB, 0x01 },
+	{ 0xBC, 0x01 },
+	{ 0xB0, 0x06 },
+	{ 0xB8, 0xA5 },
+	{ 0xC0, 0xA5 },
+	{ 0xC7, 0x0F },
+	{ 0xD5, 0x32 },
+	{ 0xB8, 0x00 },
+	{ 0xC0, 0x00 },
+	{ 0xBC, 0x00 },
+	{ 0xB0, 0x07 },
+	{ 0xB1, 0x00 },
+	{ 0xB2, 0x05 },
+	{ 0xB3, 0x10 },
+	{ 0xB4, 0x22 },
+	{ 0xB5, 0x36 },
+	{ 0xB6, 0x4A },
+	{ 0xB7, 0x6C },
+	{ 0xB8, 0x9A },
+	{ 0xB9, 0xD7 },
+	{ 0xBA, 0x17 },
+	{ 0xBB, 0x92 },
+	{ 0xBC, 0x15 },
+	{ 0xBD, 0x18 },
+	{ 0xBE, 0x8C },
+	{ 0xBF, 0x00 },
+	{ 0xC0, 0x3A },
+	{ 0xC1, 0x72 },
+	{ 0xC2, 0x8C },
+	{ 0xC3, 0xA5 },
+	{ 0xC4, 0xB1 },
+	{ 0xC5, 0xBE },
+	{ 0xC6, 0xCA },
+	{ 0xC7, 0xD1 },
+	{ 0xC8, 0xD4 },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x16 },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x08 },
+	{ 0xB1, 0x04 },
+	{ 0xB2, 0x05 },
+	{ 0xB3, 0x11 },
+	{ 0xB4, 0x24 },
+	{ 0xB5, 0x39 },
+	{ 0xB6, 0x4E },
+	{ 0xB7, 0x72 },
+	{ 0xB8, 0xA3 },
+	{ 0xB9, 0xE1 },
+	{ 0xBA, 0x25 },
+	{ 0xBB, 0xA8 },
+	{ 0xBC, 0x2E },
+	{ 0xBD, 0x32 },
+	{ 0xBE, 0xAD },
+	{ 0xBF, 0x28 },
+	{ 0xC0, 0x63 },
+	{ 0xC1, 0x9B },
+	{ 0xC2, 0xB5 },
+	{ 0xC3, 0xCF },
+	{ 0xC4, 0xDB },
+	{ 0xC5, 0xE8 },
+	{ 0xC6, 0xF5 },
+	{ 0xC7, 0xFA },
+	{ 0xC8, 0xFC },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x16 },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x09 },
+	{ 0xB1, 0x04 },
+	{ 0xB2, 0x04 },
+	{ 0xB3, 0x0F },
+	{ 0xB4, 0x22 },
+	{ 0xB5, 0x37 },
+	{ 0xB6, 0x4D },
+	{ 0xB7, 0x71 },
+	{ 0xB8, 0xA2 },
+	{ 0xB9, 0xE1 },
+	{ 0xBA, 0x26 },
+	{ 0xBB, 0xA9 },
+	{ 0xBC, 0x2F },
+	{ 0xBD, 0x33 },
+	{ 0xBE, 0xAC },
+	{ 0xBF, 0x24 },
+	{ 0xC0, 0x5D },
+	{ 0xC1, 0x94 },
+	{ 0xC2, 0xAC },
+	{ 0xC3, 0xC5 },
+	{ 0xC4, 0xD1 },
+	{ 0xC5, 0xDC },
+	{ 0xC6, 0xE8 },
+	{ 0xC7, 0xED },
+	{ 0xC8, 0xF0 },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x16 },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x0A },
+	{ 0xB1, 0x00 },
+	{ 0xB2, 0x05 },
+	{ 0xB3, 0x10 },
+	{ 0xB4, 0x22 },
+	{ 0xB5, 0x36 },
+	{ 0xB6, 0x4A },
+	{ 0xB7, 0x6C },
+	{ 0xB8, 0x9A },
+	{ 0xB9, 0xD7 },
+	{ 0xBA, 0x17 },
+	{ 0xBB, 0x92 },
+	{ 0xBC, 0x15 },
+	{ 0xBD, 0x18 },
+	{ 0xBE, 0x8C },
+	{ 0xBF, 0x00 },
+	{ 0xC0, 0x3A },
+	{ 0xC1, 0x72 },
+	{ 0xC2, 0x8C },
+	{ 0xC3, 0xA5 },
+	{ 0xC4, 0xB1 },
+	{ 0xC5, 0xBE },
+	{ 0xC6, 0xCA },
+	{ 0xC7, 0xD1 },
+	{ 0xC8, 0xD4 },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x16 },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x0B },
+	{ 0xB1, 0x04 },
+	{ 0xB2, 0x05 },
+	{ 0xB3, 0x11 },
+	{ 0xB4, 0x24 },
+	{ 0xB5, 0x39 },
+	{ 0xB6, 0x4E },
+	{ 0xB7, 0x72 },
+	{ 0xB8, 0xA3 },
+	{ 0xB9, 0xE1 },
+	{ 0xBA, 0x25 },
+	{ 0xBB, 0xA8 },
+	{ 0xBC, 0x2E },
+	{ 0xBD, 0x32 },
+	{ 0xBE, 0xAD },
+	{ 0xBF, 0x28 },
+	{ 0xC0, 0x63 },
+	{ 0xC1, 0x9B },
+	{ 0xC2, 0xB5 },
+	{ 0xC3, 0xCF },
+	{ 0xC4, 0xDB },
+	{ 0xC5, 0xE8 },
+	{ 0xC6, 0xF5 },
+	{ 0xC7, 0xFA },
+	{ 0xC8, 0xFC },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x16 },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+	{ 0xB0, 0x0C },
+	{ 0xB1, 0x04 },
+	{ 0xB2, 0x04 },
+	{ 0xB3, 0x0F },
+	{ 0xB4, 0x22 },
+	{ 0xB5, 0x37 },
+	{ 0xB6, 0x4D },
+	{ 0xB7, 0x71 },
+	{ 0xB8, 0xA2 },
+	{ 0xB9, 0xE1 },
+	{ 0xBA, 0x26 },
+	{ 0xBB, 0xA9 },
+	{ 0xBC, 0x2F },
+	{ 0xBD, 0x33 },
+	{ 0xBE, 0xAC },
+	{ 0xBF, 0x24 },
+	{ 0xC0, 0x5D },
+	{ 0xC1, 0x94 },
+	{ 0xC2, 0xAC },
+	{ 0xC3, 0xC5 },
+	{ 0xC4, 0xD1 },
+	{ 0xC5, 0xDC },
+	{ 0xC6, 0xE8 },
+	{ 0xC7, 0xED },
+	{ 0xC8, 0xF0 },
+	{ 0xC9, 0x00 },
+	{ 0xCA, 0x00 },
+	{ 0xCB, 0x16 },
+	{ 0xCC, 0xAF },
+	{ 0xCD, 0xFF },
+	{ 0xCE, 0xFF },
+};
+
+static const struct panel_desc boe_himax8279d10p_panel_desc = {
+	.display_mode = &default_display_mode,
+	.bpc = 8,
+	.width_mm = 135,
+	.height_mm = 216,
+	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+			MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+	.format = MIPI_DSI_FMT_RGB888,
+	.lanes = 4,
+	.on_cmds = boe_himax8279d10p_on_cmds,
+	.on_cmds_num = 283,
+};
+
+static const struct of_device_id panel_of_match[] = {
+	{
+		.compatible = "boe,himax8279d8p",
+		.data = &boe_himax8279d8p_panel_desc,
+	},
+	{
+		.compatible = "boe,himax8279d10p",
+		.data = &boe_himax8279d10p_panel_desc,
+	},
+	{
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(of, panel_of_match);
+
+static int panel_add(struct panel_info *pinfo)
+{
+	struct device *dev = &pinfo->link->dev;
+	int ret;
+
+	pinfo->pp18_gpio = devm_gpiod_get(dev, "pp18", GPIOD_OUT_HIGH);
+	if (IS_ERR(pinfo->pp18_gpio)) {
+		ret = PTR_ERR(pinfo->pp18_gpio);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev, "failed to get pp18 gpio: %d\n",
+				      ret);
+		return ret;
+	}
+
+	pinfo->pp33_gpio = devm_gpiod_get(dev, "pp33", GPIOD_OUT_HIGH);
+	if (IS_ERR(pinfo->pp33_gpio)) {
+		ret = PTR_ERR(pinfo->pp33_gpio);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev, "failed to get pp33 gpio: %d\n",
+				      ret);
+		return ret;
+	}
+
+	pinfo->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+	if (IS_ERR(pinfo->enable_gpio)) {
+		ret = PTR_ERR(pinfo->enable_gpio);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev, "failed to get enable gpio: %d\n",
+				      ret);
+		return ret;
+	}
+
+	drm_panel_init(&pinfo->base, dev, &panel_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	ret = drm_panel_of_backlight(&pinfo->base);
+	if (ret)
+		return ret;
+
+	return drm_panel_add(&pinfo->base);
+}
+
+static int panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct panel_info *pinfo;
+	const struct panel_desc *desc;
+	int err;
+
+	pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
+	if (!pinfo)
+		return -ENOMEM;
+
+	desc = of_device_get_match_data(&dsi->dev);
+	dsi->mode_flags = desc->mode_flags;
+	dsi->format = desc->format;
+	dsi->lanes = desc->lanes;
+	pinfo->desc = desc;
+
+	pinfo->link = dsi;
+	mipi_dsi_set_drvdata(dsi, pinfo);
+
+	err = panel_add(pinfo);
+	if (err < 0)
+		return err;
+
+	err = mipi_dsi_attach(dsi);
+	if (err < 0)
+		drm_panel_remove(&pinfo->base);
+
+	return err;
+}
+
+static int panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
+	int err;
+
+	err = boe_panel_disable(&pinfo->base);
+	if (err < 0)
+		DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n",
+			      err);
+
+	err = boe_panel_unprepare(&pinfo->base);
+	if (err < 0)
+		DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
+			      err);
+
+	err = mipi_dsi_detach(dsi);
+	if (err < 0)
+		DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
+			      err);
+
+	drm_panel_remove(&pinfo->base);
+
+	return 0;
+}
+
+static void panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
+
+	boe_panel_disable(&pinfo->base);
+	boe_panel_unprepare(&pinfo->base);
+}
+
+static struct mipi_dsi_driver panel_driver = {
+	.driver = {
+		.name = "panel-boe-himax8279d",
+		.of_match_table = panel_of_match,
+	},
+	.probe = panel_probe,
+	.remove = panel_remove,
+	.shutdown = panel_shutdown,
+};
+module_mipi_dsi_driver(panel_driver);
+
+MODULE_AUTHOR("Jerry Han <jerry.han.hq@gmail.com>");
+MODULE_DESCRIPTION("Boe Himax8279d driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 98f184b..95b789a 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -9,7 +9,6 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 
-#include <linux/backlight.h>
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -22,7 +21,6 @@ struct feiyang {
 	struct drm_panel	panel;
 	struct mipi_dsi_device	*dsi;
 
-	struct backlight_device	*backlight;
 	struct regulator	*dvdd;
 	struct regulator	*avdd;
 	struct gpio_desc	*reset;
@@ -102,7 +100,6 @@ static int feiyang_enable(struct drm_panel *panel)
 	msleep(200);
 
 	mipi_dsi_dcs_set_display_on(ctx->dsi);
-	backlight_enable(ctx->backlight);
 
 	return 0;
 }
@@ -111,7 +108,6 @@ static int feiyang_disable(struct drm_panel *panel)
 {
 	struct feiyang *ctx = panel_to_feiyang(panel);
 
-	backlight_disable(ctx->backlight);
 	return mipi_dsi_dcs_set_display_off(ctx->dsi);
 }
 
@@ -162,13 +158,13 @@ static const struct drm_display_mode feiyang_default_mode = {
 	.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
 };
 
-static int feiyang_get_modes(struct drm_panel *panel)
+static int feiyang_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct feiyang *ctx = panel_to_feiyang(panel);
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &feiyang_default_mode);
+	mode = drm_mode_duplicate(connector->dev, &feiyang_default_mode);
 	if (!mode) {
 		DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
 			      feiyang_default_mode.hdisplay,
@@ -225,9 +221,9 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
 		return PTR_ERR(ctx->reset);
 	}
 
-	ctx->backlight = devm_of_find_backlight(&dsi->dev);
-	if (IS_ERR(ctx->backlight))
-		return PTR_ERR(ctx->backlight);
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
 
 	ret = drm_panel_add(&ctx->panel);
 	if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 24955be..f394d53 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -641,10 +641,11 @@ static const struct drm_display_mode itu_r_bt_656_720_mode = {
 	.flags = 0,
 };
 
-static int ili9322_get_modes(struct drm_panel *panel)
+static int ili9322_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct ili9322 *ili = panel_to_ili9322(panel);
+	struct drm_device *drm = connector->dev;
 	struct drm_display_mode *mode;
 	struct drm_display_info *info;
 
@@ -663,26 +664,26 @@ static int ili9322_get_modes(struct drm_panel *panel)
 
 	switch (ili->input) {
 	case ILI9322_INPUT_SRGB_DUMMY_320X240:
-		mode = drm_mode_duplicate(panel->drm, &srgb_320x240_mode);
+		mode = drm_mode_duplicate(drm, &srgb_320x240_mode);
 		break;
 	case ILI9322_INPUT_SRGB_DUMMY_360X240:
-		mode = drm_mode_duplicate(panel->drm, &srgb_360x240_mode);
+		mode = drm_mode_duplicate(drm, &srgb_360x240_mode);
 		break;
 	case ILI9322_INPUT_PRGB_THROUGH:
 	case ILI9322_INPUT_PRGB_ALIGNED:
-		mode = drm_mode_duplicate(panel->drm, &prgb_320x240_mode);
+		mode = drm_mode_duplicate(drm, &prgb_320x240_mode);
 		break;
 	case ILI9322_INPUT_YUV_640X320_YCBCR:
-		mode = drm_mode_duplicate(panel->drm, &yuv_640x320_mode);
+		mode = drm_mode_duplicate(drm, &yuv_640x320_mode);
 		break;
 	case ILI9322_INPUT_YUV_720X360_YCBCR:
-		mode = drm_mode_duplicate(panel->drm, &yuv_720x360_mode);
+		mode = drm_mode_duplicate(drm, &yuv_720x360_mode);
 		break;
 	case ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR:
-		mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_720_mode);
+		mode = drm_mode_duplicate(drm, &itu_r_bt_656_720_mode);
 		break;
 	case ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR:
-		mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_640_mode);
+		mode = drm_mode_duplicate(drm, &itu_r_bt_656_640_mode);
 		break;
 	default:
 		mode = NULL;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index e8789e4..f54077c 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -3,7 +3,6 @@
  * Copyright (C) 2017-2018, Bootlin
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -25,7 +24,6 @@ struct ili9881c {
 	struct drm_panel	panel;
 	struct mipi_dsi_device	*dsi;
 
-	struct backlight_device *backlight;
 	struct regulator	*power;
 	struct gpio_desc	*reset;
 };
@@ -348,7 +346,6 @@ static int ili9881c_enable(struct drm_panel *panel)
 	msleep(120);
 
 	mipi_dsi_dcs_set_display_on(ctx->dsi);
-	backlight_enable(ctx->backlight);
 
 	return 0;
 }
@@ -357,7 +354,6 @@ static int ili9881c_disable(struct drm_panel *panel)
 {
 	struct ili9881c *ctx = panel_to_ili9881c(panel);
 
-	backlight_disable(ctx->backlight);
 	return mipi_dsi_dcs_set_display_off(ctx->dsi);
 }
 
@@ -387,13 +383,13 @@ static const struct drm_display_mode bananapi_default_mode = {
 	.vtotal		= 1280 + 10 + 10 + 20,
 };
 
-static int ili9881c_get_modes(struct drm_panel *panel)
+static int ili9881c_get_modes(struct drm_panel *panel,
+			      struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct ili9881c *ctx = panel_to_ili9881c(panel);
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &bananapi_default_mode);
+	mode = drm_mode_duplicate(connector->dev, &bananapi_default_mode);
 	if (!mode) {
 		dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
 			bananapi_default_mode.hdisplay,
@@ -407,8 +403,8 @@ static int ili9881c_get_modes(struct drm_panel *panel)
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
 	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 62;
-	panel->connector->display_info.height_mm = 110;
+	connector->display_info.width_mm = 62;
+	connector->display_info.height_mm = 110;
 
 	return 1;
 }
@@ -423,7 +419,6 @@ static const struct drm_panel_funcs ili9881c_funcs = {
 
 static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
 {
-	struct device_node *np;
 	struct ili9881c *ctx;
 	int ret;
 
@@ -448,14 +443,9 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
 		return PTR_ERR(ctx->reset);
 	}
 
-	np = of_parse_phandle(dsi->dev.of_node, "backlight", 0);
-	if (np) {
-		ctx->backlight = of_find_backlight_by_node(np);
-		of_node_put(np);
-
-		if (!ctx->backlight)
-			return -EPROBE_DEFER;
-	}
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
 
 	ret = drm_panel_add(&ctx->panel);
 	if (ret < 0)
@@ -475,9 +465,6 @@ static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
 	mipi_dsi_detach(dsi);
 	drm_panel_remove(&ctx->panel);
 
-	if (ctx->backlight)
-		put_device(&ctx->backlight->dev);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 83df1ac..7419f1f0 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -3,7 +3,6 @@
  * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
@@ -52,7 +51,6 @@ struct innolux_panel {
 	struct mipi_dsi_device *link;
 	const struct panel_desc *desc;
 
-	struct backlight_device *backlight;
 	struct regulator_bulk_data *supplies;
 	struct gpio_desc *enable_gpio;
 
@@ -72,8 +70,6 @@ static int innolux_panel_disable(struct drm_panel *panel)
 	if (!innolux->enabled)
 		return 0;
 
-	backlight_disable(innolux->backlight);
-
 	innolux->enabled = false;
 
 	return 0;
@@ -204,18 +200,10 @@ static int innolux_panel_prepare(struct drm_panel *panel)
 static int innolux_panel_enable(struct drm_panel *panel)
 {
 	struct innolux_panel *innolux = to_innolux_panel(panel);
-	int ret;
 
 	if (innolux->enabled)
 		return 0;
 
-	ret = backlight_enable(innolux->backlight);
-	if (ret) {
-		DRM_DEV_ERROR(panel->drm->dev,
-			      "Failed to enable backlight %d\n", ret);
-		return ret;
-	}
-
 	innolux->enabled = true;
 
 	return 0;
@@ -403,28 +391,27 @@ static const struct panel_desc innolux_p097pfg_panel_desc = {
 	.sleep_mode_delay = 100, /* T15 */
 };
 
-static int innolux_panel_get_modes(struct drm_panel *panel)
+static int innolux_panel_get_modes(struct drm_panel *panel,
+				   struct drm_connector *connector)
 {
 	struct innolux_panel *innolux = to_innolux_panel(panel);
 	const struct drm_display_mode *m = innolux->desc->mode;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, m);
+	mode = drm_mode_duplicate(connector->dev, m);
 	if (!mode) {
-		DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+		DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
 			      m->hdisplay, m->vdisplay, m->vrefresh);
 		return -ENOMEM;
 	}
 
 	drm_mode_set_name(mode);
 
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm =
-			innolux->desc->size.width;
-	panel->connector->display_info.height_mm =
-			innolux->desc->size.height;
-	panel->connector->display_info.bpc = innolux->desc->bpc;
+	connector->display_info.width_mm = innolux->desc->size.width;
+	connector->display_info.height_mm = innolux->desc->size.height;
+	connector->display_info.bpc = innolux->desc->bpc;
 
 	return 1;
 }
@@ -483,13 +470,13 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
 		innolux->enable_gpio = NULL;
 	}
 
-	innolux->backlight = devm_of_find_backlight(dev);
-	if (IS_ERR(innolux->backlight))
-		return PTR_ERR(innolux->backlight);
-
 	drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
 		       DRM_MODE_CONNECTOR_DSI);
 
+	err = drm_panel_of_backlight(&innolux->base);
+	if (err)
+		return err;
+
 	err = drm_panel_add(&innolux->base);
 	if (err < 0)
 		return err;
@@ -527,12 +514,12 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
 	struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
 	int err;
 
-	err = innolux_panel_unprepare(&innolux->base);
+	err = drm_panel_unprepare(&innolux->base);
 	if (err < 0)
 		DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
 			      err);
 
-	err = innolux_panel_disable(&innolux->base);
+	err = drm_panel_disable(&innolux->base);
 	if (err < 0)
 		DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
 
@@ -550,8 +537,8 @@ static void innolux_panel_shutdown(struct mipi_dsi_device *dsi)
 {
 	struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
 
-	innolux_panel_unprepare(&innolux->base);
-	innolux_panel_disable(&innolux->base);
+	drm_panel_unprepare(&innolux->base);
+	drm_panel_disable(&innolux->base);
 }
 
 static struct mipi_dsi_driver innolux_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 56364a9..4bfd8c8 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -300,13 +300,14 @@ static const struct drm_display_mode default_mode = {
 		.flags = 0,
 };
 
-static int jdi_panel_get_modes(struct drm_panel *panel)
+static int jdi_panel_get_modes(struct drm_panel *panel,
+			       struct drm_connector *connector)
 {
 	struct drm_display_mode *mode;
 	struct jdi_panel *jdi = to_jdi_panel(panel);
 	struct device *dev = &jdi->dsi->dev;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
 		dev_err(dev, "failed to add mode %ux%ux@%u\n",
 			default_mode.hdisplay, default_mode.vdisplay,
@@ -316,10 +317,10 @@ static int jdi_panel_get_modes(struct drm_panel *panel)
 
 	drm_mode_set_name(mode);
 
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 95;
-	panel->connector->display_info.height_mm = 151;
+	connector->display_info.width_mm = 95;
+	connector->display_info.height_mm = 151;
 
 	return 1;
 }
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 45f9655..bac1a2a 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -3,7 +3,6 @@
  * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
@@ -23,7 +22,6 @@ struct kingdisplay_panel {
 	struct drm_panel base;
 	struct mipi_dsi_device *link;
 
-	struct backlight_device *backlight;
 	struct regulator *supply;
 	struct gpio_desc *enable_gpio;
 
@@ -191,8 +189,6 @@ static int kingdisplay_panel_disable(struct drm_panel *panel)
 	if (!kingdisplay->enabled)
 		return 0;
 
-	backlight_disable(kingdisplay->backlight);
-
 	err = mipi_dsi_dcs_set_display_off(kingdisplay->link);
 	if (err < 0)
 		DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
@@ -303,18 +299,10 @@ static int kingdisplay_panel_prepare(struct drm_panel *panel)
 static int kingdisplay_panel_enable(struct drm_panel *panel)
 {
 	struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
-	int ret;
 
 	if (kingdisplay->enabled)
 		return 0;
 
-	ret = backlight_enable(kingdisplay->backlight);
-	if (ret) {
-		DRM_DEV_ERROR(panel->drm->dev,
-			      "Failed to enable backlight %d\n", ret);
-		return ret;
-	}
-
 	kingdisplay->enabled = true;
 
 	return 0;
@@ -333,13 +321,14 @@ static const struct drm_display_mode default_mode = {
 	.vrefresh = 60,
 };
 
-static int kingdisplay_panel_get_modes(struct drm_panel *panel)
+static int kingdisplay_panel_get_modes(struct drm_panel *panel,
+				       struct drm_connector *connector)
 {
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
-		DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+		DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
 			      default_mode.hdisplay, default_mode.vdisplay,
 			      default_mode.vrefresh);
 		return -ENOMEM;
@@ -347,11 +336,11 @@ static int kingdisplay_panel_get_modes(struct drm_panel *panel)
 
 	drm_mode_set_name(mode);
 
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 147;
-	panel->connector->display_info.height_mm = 196;
-	panel->connector->display_info.bpc = 8;
+	connector->display_info.width_mm = 147;
+	connector->display_info.height_mm = 196;
+	connector->display_info.bpc = 8;
 
 	return 1;
 }
@@ -387,13 +376,13 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
 		kingdisplay->enable_gpio = NULL;
 	}
 
-	kingdisplay->backlight = devm_of_find_backlight(dev);
-	if (IS_ERR(kingdisplay->backlight))
-		return PTR_ERR(kingdisplay->backlight);
-
 	drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
 		       &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
 
+	err = drm_panel_of_backlight(&kingdisplay->base);
+	if (err)
+		return err;
+
 	return drm_panel_add(&kingdisplay->base);
 }
 
@@ -431,12 +420,12 @@ static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
 	struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
 	int err;
 
-	err = kingdisplay_panel_unprepare(&kingdisplay->base);
+	err = drm_panel_unprepare(&kingdisplay->base);
 	if (err < 0)
 		DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
 			      err);
 
-	err = kingdisplay_panel_disable(&kingdisplay->base);
+	err = drm_panel_disable(&kingdisplay->base);
 	if (err < 0)
 		DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
 
@@ -454,8 +443,8 @@ static void kingdisplay_panel_shutdown(struct mipi_dsi_device *dsi)
 {
 	struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
 
-	kingdisplay_panel_unprepare(&kingdisplay->base);
-	kingdisplay_panel_disable(&kingdisplay->base);
+	drm_panel_unprepare(&kingdisplay->base);
+	drm_panel_disable(&kingdisplay->base);
 }
 
 static struct mipi_dsi_driver kingdisplay_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
new file mode 100644
index 0000000..76ecf2d
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2019 Theobroma Systems Design und Consulting GmbH
+ *
+ * base on panel-kingdisplay-kd097d04.c
+ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct ltk500hd1829 {
+	struct device *dev;
+	struct drm_panel panel;
+	struct gpio_desc *reset_gpio;
+	struct regulator *vcc;
+	struct regulator *iovcc;
+	bool prepared;
+};
+
+struct ltk500hd1829_cmd {
+	char cmd;
+	char data;
+};
+
+/*
+ * There is no description in the Reference Manual about these commands.
+ * We received them from the vendor, so just use them as is.
+ */
+static const struct ltk500hd1829_cmd init_code[] = {
+	{ 0xE0, 0x00 },
+	{ 0xE1, 0x93 },
+	{ 0xE2, 0x65 },
+	{ 0xE3, 0xF8 },
+	{ 0x80, 0x03 },
+	{ 0xE0, 0x04 },
+	{ 0x2D, 0x03 },
+	{ 0xE0, 0x01 },
+	{ 0x00, 0x00 },
+	{ 0x01, 0xB6 },
+	{ 0x03, 0x00 },
+	{ 0x04, 0xC5 },
+	{ 0x17, 0x00 },
+	{ 0x18, 0xBF },
+	{ 0x19, 0x01 },
+	{ 0x1A, 0x00 },
+	{ 0x1B, 0xBF },
+	{ 0x1C, 0x01 },
+	{ 0x1F, 0x7C },
+	{ 0x20, 0x26 },
+	{ 0x21, 0x26 },
+	{ 0x22, 0x4E },
+	{ 0x37, 0x09 },
+	{ 0x38, 0x04 },
+	{ 0x39, 0x08 },
+	{ 0x3A, 0x1F },
+	{ 0x3B, 0x1F },
+	{ 0x3C, 0x78 },
+	{ 0x3D, 0xFF },
+	{ 0x3E, 0xFF },
+	{ 0x3F, 0x00 },
+	{ 0x40, 0x04 },
+	{ 0x41, 0xA0 },
+	{ 0x43, 0x0F },
+	{ 0x44, 0x0A },
+	{ 0x45, 0x24 },
+	{ 0x55, 0x01 },
+	{ 0x56, 0x01 },
+	{ 0x57, 0xA5 },
+	{ 0x58, 0x0A },
+	{ 0x59, 0x4A },
+	{ 0x5A, 0x38 },
+	{ 0x5B, 0x10 },
+	{ 0x5C, 0x19 },
+	{ 0x5D, 0x7C },
+	{ 0x5E, 0x64 },
+	{ 0x5F, 0x54 },
+	{ 0x60, 0x48 },
+	{ 0x61, 0x44 },
+	{ 0x62, 0x35 },
+	{ 0x63, 0x3A },
+	{ 0x64, 0x24 },
+	{ 0x65, 0x3B },
+	{ 0x66, 0x39 },
+	{ 0x67, 0x37 },
+	{ 0x68, 0x56 },
+	{ 0x69, 0x41 },
+	{ 0x6A, 0x47 },
+	{ 0x6B, 0x2F },
+	{ 0x6C, 0x23 },
+	{ 0x6D, 0x13 },
+	{ 0x6E, 0x02 },
+	{ 0x6F, 0x08 },
+	{ 0x70, 0x7C },
+	{ 0x71, 0x64 },
+	{ 0x72, 0x54 },
+	{ 0x73, 0x48 },
+	{ 0x74, 0x44 },
+	{ 0x75, 0x35 },
+	{ 0x76, 0x3A },
+	{ 0x77, 0x22 },
+	{ 0x78, 0x3B },
+	{ 0x79, 0x39 },
+	{ 0x7A, 0x38 },
+	{ 0x7B, 0x52 },
+	{ 0x7C, 0x41 },
+	{ 0x7D, 0x47 },
+	{ 0x7E, 0x2F },
+	{ 0x7F, 0x23 },
+	{ 0x80, 0x13 },
+	{ 0x81, 0x02 },
+	{ 0x82, 0x08 },
+	{ 0xE0, 0x02 },
+	{ 0x00, 0x57 },
+	{ 0x01, 0x77 },
+	{ 0x02, 0x44 },
+	{ 0x03, 0x46 },
+	{ 0x04, 0x48 },
+	{ 0x05, 0x4A },
+	{ 0x06, 0x4C },
+	{ 0x07, 0x4E },
+	{ 0x08, 0x50 },
+	{ 0x09, 0x55 },
+	{ 0x0A, 0x52 },
+	{ 0x0B, 0x55 },
+	{ 0x0C, 0x55 },
+	{ 0x0D, 0x55 },
+	{ 0x0E, 0x55 },
+	{ 0x0F, 0x55 },
+	{ 0x10, 0x55 },
+	{ 0x11, 0x55 },
+	{ 0x12, 0x55 },
+	{ 0x13, 0x40 },
+	{ 0x14, 0x55 },
+	{ 0x15, 0x55 },
+	{ 0x16, 0x57 },
+	{ 0x17, 0x77 },
+	{ 0x18, 0x45 },
+	{ 0x19, 0x47 },
+	{ 0x1A, 0x49 },
+	{ 0x1B, 0x4B },
+	{ 0x1C, 0x4D },
+	{ 0x1D, 0x4F },
+	{ 0x1E, 0x51 },
+	{ 0x1F, 0x55 },
+	{ 0x20, 0x53 },
+	{ 0x21, 0x55 },
+	{ 0x22, 0x55 },
+	{ 0x23, 0x55 },
+	{ 0x24, 0x55 },
+	{ 0x25, 0x55 },
+	{ 0x26, 0x55 },
+	{ 0x27, 0x55 },
+	{ 0x28, 0x55 },
+	{ 0x29, 0x41 },
+	{ 0x2A, 0x55 },
+	{ 0x2B, 0x55 },
+	{ 0x2C, 0x57 },
+	{ 0x2D, 0x77 },
+	{ 0x2E, 0x4F },
+	{ 0x2F, 0x4D },
+	{ 0x30, 0x4B },
+	{ 0x31, 0x49 },
+	{ 0x32, 0x47 },
+	{ 0x33, 0x45 },
+	{ 0x34, 0x41 },
+	{ 0x35, 0x55 },
+	{ 0x36, 0x53 },
+	{ 0x37, 0x55 },
+	{ 0x38, 0x55 },
+	{ 0x39, 0x55 },
+	{ 0x3A, 0x55 },
+	{ 0x3B, 0x55 },
+	{ 0x3C, 0x55 },
+	{ 0x3D, 0x55 },
+	{ 0x3E, 0x55 },
+	{ 0x3F, 0x51 },
+	{ 0x40, 0x55 },
+	{ 0x41, 0x55 },
+	{ 0x42, 0x57 },
+	{ 0x43, 0x77 },
+	{ 0x44, 0x4E },
+	{ 0x45, 0x4C },
+	{ 0x46, 0x4A },
+	{ 0x47, 0x48 },
+	{ 0x48, 0x46 },
+	{ 0x49, 0x44 },
+	{ 0x4A, 0x40 },
+	{ 0x4B, 0x55 },
+	{ 0x4C, 0x52 },
+	{ 0x4D, 0x55 },
+	{ 0x4E, 0x55 },
+	{ 0x4F, 0x55 },
+	{ 0x50, 0x55 },
+	{ 0x51, 0x55 },
+	{ 0x52, 0x55 },
+	{ 0x53, 0x55 },
+	{ 0x54, 0x55 },
+	{ 0x55, 0x50 },
+	{ 0x56, 0x55 },
+	{ 0x57, 0x55 },
+	{ 0x58, 0x40 },
+	{ 0x59, 0x00 },
+	{ 0x5A, 0x00 },
+	{ 0x5B, 0x10 },
+	{ 0x5C, 0x09 },
+	{ 0x5D, 0x30 },
+	{ 0x5E, 0x01 },
+	{ 0x5F, 0x02 },
+	{ 0x60, 0x30 },
+	{ 0x61, 0x03 },
+	{ 0x62, 0x04 },
+	{ 0x63, 0x06 },
+	{ 0x64, 0x6A },
+	{ 0x65, 0x75 },
+	{ 0x66, 0x0F },
+	{ 0x67, 0xB3 },
+	{ 0x68, 0x0B },
+	{ 0x69, 0x06 },
+	{ 0x6A, 0x6A },
+	{ 0x6B, 0x10 },
+	{ 0x6C, 0x00 },
+	{ 0x6D, 0x04 },
+	{ 0x6E, 0x04 },
+	{ 0x6F, 0x88 },
+	{ 0x70, 0x00 },
+	{ 0x71, 0x00 },
+	{ 0x72, 0x06 },
+	{ 0x73, 0x7B },
+	{ 0x74, 0x00 },
+	{ 0x75, 0xBC },
+	{ 0x76, 0x00 },
+	{ 0x77, 0x05 },
+	{ 0x78, 0x2E },
+	{ 0x79, 0x00 },
+	{ 0x7A, 0x00 },
+	{ 0x7B, 0x00 },
+	{ 0x7C, 0x00 },
+	{ 0x7D, 0x03 },
+	{ 0x7E, 0x7B },
+	{ 0xE0, 0x04 },
+	{ 0x09, 0x10 },
+	{ 0x2B, 0x2B },
+	{ 0x2E, 0x44 },
+	{ 0xE0, 0x00 },
+	{ 0xE6, 0x02 },
+	{ 0xE7, 0x02 },
+	{ 0x35, 0x00 },
+};
+
+static inline
+struct ltk500hd1829 *panel_to_ltk500hd1829(struct drm_panel *panel)
+{
+	return container_of(panel, struct ltk500hd1829, panel);
+}
+
+static int ltk500hd1829_unprepare(struct drm_panel *panel)
+{
+	struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (!ctx->prepared)
+		return 0;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+			      ret);
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+			      ret);
+	}
+
+	/* 120ms to enter sleep mode */
+	msleep(120);
+
+	regulator_disable(ctx->iovcc);
+	regulator_disable(ctx->vcc);
+
+	ctx->prepared = false;
+
+	return 0;
+}
+
+static int ltk500hd1829_prepare(struct drm_panel *panel)
+{
+	struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	unsigned int i;
+	int ret;
+
+	if (ctx->prepared)
+		return 0;
+
+	ret = regulator_enable(ctx->vcc);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			      "Failed to enable vci supply: %d\n", ret);
+		return ret;
+	}
+	ret = regulator_enable(ctx->iovcc);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			      "Failed to enable iovcc supply: %d\n", ret);
+		goto disable_vcc;
+	}
+
+	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+	/* tRW: 10us */
+	usleep_range(10, 20);
+	gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+	/* tRT: >= 5ms */
+	usleep_range(5000, 6000);
+
+	for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+		ret = mipi_dsi_generic_write(dsi, &init_code[i],
+					     sizeof(struct ltk500hd1829_cmd));
+		if (ret < 0) {
+			DRM_DEV_ERROR(panel->dev,
+				      "failed to write init cmds: %d\n", ret);
+			goto disable_iovcc;
+		}
+	}
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
+			      ret);
+		goto disable_iovcc;
+	}
+
+	/* 120ms to exit sleep mode */
+	msleep(120);
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+			      ret);
+		goto disable_iovcc;
+	}
+
+	ctx->prepared = true;
+
+	return 0;
+
+disable_iovcc:
+	regulator_disable(ctx->iovcc);
+disable_vcc:
+	regulator_disable(ctx->vcc);
+	return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+	.hdisplay	= 720,
+	.hsync_start	= 720 + 50,
+	.hsync_end	= 720 + 50 + 50,
+	.htotal		= 720 + 50 + 50 + 50,
+	.vdisplay	= 1280,
+	.vsync_start	= 1280 + 30,
+	.vsync_end	= 1280 + 30 + 4,
+	.vtotal		= 1280 + 30 + 4 + 12,
+	.vrefresh	= 60,
+	.clock		= 41600,
+	.width_mm	= 62,
+	.height_mm	= 110,
+};
+
+static int ltk500hd1829_get_modes(struct drm_panel *panel,
+				  struct drm_connector *connector)
+{
+	struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
+	if (!mode) {
+		DRM_DEV_ERROR(ctx->dev, "failed to add mode %ux%ux@%u\n",
+			      default_mode.hdisplay, default_mode.vdisplay,
+			      default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs ltk500hd1829_funcs = {
+	.unprepare = ltk500hd1829_unprepare,
+	.prepare = ltk500hd1829_prepare,
+	.get_modes = ltk500hd1829_get_modes,
+};
+
+static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
+{
+	struct ltk500hd1829 *ctx;
+	struct device *dev = &dsi->dev;
+	int ret;
+
+	ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ctx->reset_gpio)) {
+		DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+		return PTR_ERR(ctx->reset_gpio);
+	}
+
+	ctx->vcc = devm_regulator_get(dev, "vcc");
+	if (IS_ERR(ctx->vcc)) {
+		ret = PTR_ERR(ctx->vcc);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev,
+				      "Failed to request vcc regulator: %d\n",
+				      ret);
+		return ret;
+	}
+
+	ctx->iovcc = devm_regulator_get(dev, "iovcc");
+	if (IS_ERR(ctx->iovcc)) {
+		ret = PTR_ERR(ctx->iovcc);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev,
+				      "Failed to request iovcc regulator: %d\n",
+				      ret);
+		return ret;
+	}
+
+	mipi_dsi_set_drvdata(dsi, ctx);
+
+	ctx->dev = dev;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+	drm_panel_init(&ctx->panel, &dsi->dev, &ltk500hd1829_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
+
+	drm_panel_add(&ctx->panel);
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+		drm_panel_remove(&ctx->panel);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ltk500hd1829_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = drm_panel_unprepare(&ctx->panel);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+			      ret);
+
+	ret = drm_panel_disable(&ctx->panel);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+			      ret);
+}
+
+static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
+{
+	struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ltk500hd1829_shutdown(dsi);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
+			      ret);
+
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id ltk500hd1829_of_match[] = {
+	{ .compatible = "leadtek,ltk500hd1829", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltk500hd1829_of_match);
+
+static struct mipi_dsi_driver ltk500hd1829_driver = {
+	.driver = {
+		.name = "panel-leadtek-ltk500hd1829",
+		.of_match_table = ltk500hd1829_of_match,
+	},
+	.probe = ltk500hd1829_probe,
+	.remove = ltk500hd1829_remove,
+	.shutdown = ltk500hd1829_shutdown,
+};
+module_mipi_dsi_driver(ltk500hd1829_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("Leadtek LTK500HD1829 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index 7a1385e..e90efea 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -141,12 +141,12 @@ static const struct drm_display_mode lb035q02_mode = {
 	.height_mm = 53,
 };
 
-static int lb035q02_get_modes(struct drm_panel *panel)
+static int lb035q02_get_modes(struct drm_panel *panel,
+			      struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &lb035q02_mode);
+	mode = drm_mode_duplicate(connector->dev, &lb035q02_mode);
 	if (!mode)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index db4865a..b262b53 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -42,7 +42,7 @@ static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
 	struct spi_transfer xfer = {
 		.len = 2,
 	};
-	u16 temp = cpu_to_be16(data);
+	__be16 temp = cpu_to_be16(data);
 	struct spi_message msg;
 
 	dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
@@ -209,14 +209,14 @@ static const struct drm_display_mode default_mode = {
 	.vrefresh = 60,
 };
 
-static int lg4573_get_modes(struct drm_panel *panel)
+static int lg4573_get_modes(struct drm_panel *panel,
+			    struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
-		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+		dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
 			default_mode.hdisplay, default_mode.vdisplay,
 			default_mode.vrefresh);
 		return -ENOMEM;
@@ -227,8 +227,8 @@ static int lg4573_get_modes(struct drm_panel *panel)
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
 	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 61;
-	panel->connector->display_info.height_mm = 103;
+	connector->display_info.width_mm = 61;
+	connector->display_info.height_mm = 103;
 
 	return 1;
 }
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 2405f26..5ce3f4a 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -8,7 +8,6 @@
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  */
 
-#include <linux/backlight.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of_platform.h>
@@ -34,7 +33,6 @@ struct panel_lvds {
 	unsigned int bus_format;
 	bool data_mirror;
 
-	struct backlight_device *backlight;
 	struct regulator *supply;
 
 	struct gpio_desc *enable_gpio;
@@ -46,19 +44,6 @@ static inline struct panel_lvds *to_panel_lvds(struct drm_panel *panel)
 	return container_of(panel, struct panel_lvds, panel);
 }
 
-static int panel_lvds_disable(struct drm_panel *panel)
-{
-	struct panel_lvds *lvds = to_panel_lvds(panel);
-
-	if (lvds->backlight) {
-		lvds->backlight->props.power = FB_BLANK_POWERDOWN;
-		lvds->backlight->props.state |= BL_CORE_FBBLANK;
-		backlight_update_status(lvds->backlight);
-	}
-
-	return 0;
-}
-
 static int panel_lvds_unprepare(struct drm_panel *panel)
 {
 	struct panel_lvds *lvds = to_panel_lvds(panel);
@@ -93,26 +78,13 @@ static int panel_lvds_prepare(struct drm_panel *panel)
 	return 0;
 }
 
-static int panel_lvds_enable(struct drm_panel *panel)
+static int panel_lvds_get_modes(struct drm_panel *panel,
+				struct drm_connector *connector)
 {
 	struct panel_lvds *lvds = to_panel_lvds(panel);
-
-	if (lvds->backlight) {
-		lvds->backlight->props.state &= ~BL_CORE_FBBLANK;
-		lvds->backlight->props.power = FB_BLANK_UNBLANK;
-		backlight_update_status(lvds->backlight);
-	}
-
-	return 0;
-}
-
-static int panel_lvds_get_modes(struct drm_panel *panel)
-{
-	struct panel_lvds *lvds = to_panel_lvds(panel);
-	struct drm_connector *connector = lvds->panel.connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_create(lvds->panel.drm);
+	mode = drm_mode_create(connector->dev);
 	if (!mode)
 		return 0;
 
@@ -132,10 +104,8 @@ static int panel_lvds_get_modes(struct drm_panel *panel)
 }
 
 static const struct drm_panel_funcs panel_lvds_funcs = {
-	.disable = panel_lvds_disable,
 	.unprepare = panel_lvds_unprepare,
 	.prepare = panel_lvds_prepare,
-	.enable = panel_lvds_enable,
 	.get_modes = panel_lvds_get_modes,
 };
 
@@ -242,10 +212,6 @@ static int panel_lvds_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	lvds->backlight = devm_of_find_backlight(lvds->dev);
-	if (IS_ERR(lvds->backlight))
-		return PTR_ERR(lvds->backlight);
-
 	/*
 	 * TODO: Handle all power supplies specified in the DT node in a generic
 	 * way for panels that don't care about power supply ordering. LVDS
@@ -257,6 +223,10 @@ static int panel_lvds_probe(struct platform_device *pdev)
 	drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
 		       DRM_MODE_CONNECTOR_LVDS);
 
+	ret = drm_panel_of_backlight(&lvds->panel);
+	if (ret)
+		return ret;
+
 	ret = drm_panel_add(&lvds->panel);
 	if (ret < 0)
 		return ret;
@@ -271,7 +241,7 @@ static int panel_lvds_remove(struct platform_device *pdev)
 
 	drm_panel_remove(&lvds->panel);
 
-	panel_lvds_disable(&lvds->panel);
+	drm_panel_disable(&lvds->panel);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index fd59353..c4f83f6 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -123,12 +123,12 @@ static const struct drm_display_mode nl8048_mode = {
 	.height_mm = 53,
 };
 
-static int nl8048_get_modes(struct drm_panel *panel)
+static int nl8048_get_modes(struct drm_panel *panel,
+			    struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &nl8048_mode);
+	mode = drm_mode_duplicate(connector->dev, &nl8048_mode);
 	if (!mode)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 60ccedc..a470810f 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -206,14 +206,14 @@ static int nt39016_disable(struct drm_panel *drm_panel)
 	return 0;
 }
 
-static int nt39016_get_modes(struct drm_panel *drm_panel)
+static int nt39016_get_modes(struct drm_panel *drm_panel,
+			     struct drm_connector *connector)
 {
 	struct nt39016 *panel = to_nt39016(drm_panel);
 	const struct nt39016_panel_info *panel_info = panel->panel_info;
-	struct drm_connector *connector = drm_panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(drm_panel->drm, &panel_info->display_mode);
+	mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode);
 	if (!mode)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index f2a72ee..09deb99 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -6,7 +6,6 @@
  *   Author: Stefan Mavrodiev <stefan@olimex.com>
  */
 
-#include <linux/backlight.h>
 #include <linux/crc32.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
@@ -68,7 +67,6 @@ struct lcd_olinuxino {
 	bool prepared;
 	bool enabled;
 
-	struct backlight_device *backlight;
 	struct regulator *supply;
 	struct gpio_desc *enable_gpio;
 
@@ -87,8 +85,6 @@ static int lcd_olinuxino_disable(struct drm_panel *panel)
 	if (!lcd->enabled)
 		return 0;
 
-	backlight_disable(lcd->backlight);
-
 	lcd->enabled = false;
 
 	return 0;
@@ -134,19 +130,16 @@ static int lcd_olinuxino_enable(struct drm_panel *panel)
 	if (lcd->enabled)
 		return 0;
 
-	backlight_enable(lcd->backlight);
-
 	lcd->enabled = true;
 
 	return 0;
 }
 
-static int lcd_olinuxino_get_modes(struct drm_panel *panel)
+static int lcd_olinuxino_get_modes(struct drm_panel *panel,
+				   struct drm_connector *connector)
 {
 	struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
-	struct drm_connector *connector = lcd->panel.connector;
 	struct lcd_olinuxino_info *lcd_info = &lcd->eeprom.info;
-	struct drm_device *drm = lcd->panel.drm;
 	struct lcd_olinuxino_mode *lcd_mode;
 	struct drm_display_mode *mode;
 	u32 i, num = 0;
@@ -155,13 +148,13 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel)
 		lcd_mode = (struct lcd_olinuxino_mode *)
 			   &lcd->eeprom.reserved[i * sizeof(*lcd_mode)];
 
-		mode = drm_mode_create(drm);
+		mode = drm_mode_create(connector->dev);
 		if (!mode) {
-			dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+			dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
 				lcd_mode->hactive,
 				lcd_mode->vactive,
 				lcd_mode->refresh);
-				continue;
+			continue;
 		}
 
 		mode->clock = lcd_mode->pixelclock;
@@ -284,13 +277,13 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
 	if (IS_ERR(lcd->enable_gpio))
 		return PTR_ERR(lcd->enable_gpio);
 
-	lcd->backlight = devm_of_find_backlight(dev);
-	if (IS_ERR(lcd->backlight))
-		return PTR_ERR(lcd->backlight);
-
 	drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
 		       DRM_MODE_CONNECTOR_DPI);
 
+	ret = drm_panel_of_backlight(&lcd->panel);
+	if (ret)
+		return ret;
+
 	return drm_panel_add(&lcd->panel);
 }
 
@@ -300,8 +293,8 @@ static int lcd_olinuxino_remove(struct i2c_client *client)
 
 	drm_panel_remove(&panel->panel);
 
-	lcd_olinuxino_disable(&panel->panel);
-	lcd_olinuxino_unprepare(&panel->panel);
+	drm_panel_disable(&panel->panel);
+	drm_panel_unprepare(&panel->panel);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index bf1f928..bb0c992 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -349,11 +349,12 @@ static int otm8009a_enable(struct drm_panel *panel)
 	return 0;
 }
 
-static int otm8009a_get_modes(struct drm_panel *panel)
+static int otm8009a_get_modes(struct drm_panel *panel,
+			      struct drm_connector *connector)
 {
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
 		DRM_ERROR("failed to add mode %ux%ux@%u\n",
 			  default_mode.hdisplay, default_mode.vdisplay,
@@ -364,10 +365,10 @@ static int otm8009a_get_modes(struct drm_panel *panel)
 	drm_mode_set_name(mode);
 
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = mode->width_mm;
-	panel->connector->display_info.height_mm = mode->height_mm;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
 
 	return 1;
 }
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index 2b40913..3a0229d 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -4,7 +4,6 @@
  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
  */
 
-#include <linux/backlight.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/regulator/consumer.h>
@@ -20,7 +19,6 @@ struct osd101t2587_panel {
 	struct drm_panel base;
 	struct mipi_dsi_device *dsi;
 
-	struct backlight_device *backlight;
 	struct regulator *supply;
 
 	bool prepared;
@@ -42,8 +40,6 @@ static int osd101t2587_panel_disable(struct drm_panel *panel)
 	if (!osd101t2587->enabled)
 		return 0;
 
-	backlight_disable(osd101t2587->backlight);
-
 	ret = mipi_dsi_shutdown_peripheral(osd101t2587->dsi);
 
 	osd101t2587->enabled = false;
@@ -91,8 +87,6 @@ static int osd101t2587_panel_enable(struct drm_panel *panel)
 	if (ret)
 		return ret;
 
-	backlight_enable(osd101t2587->backlight);
-
 	osd101t2587->enabled = true;
 
 	return ret;
@@ -112,14 +106,15 @@ static const struct drm_display_mode default_mode_osd101t2587 = {
 	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
 };
 
-static int osd101t2587_panel_get_modes(struct drm_panel *panel)
+static int osd101t2587_panel_get_modes(struct drm_panel *panel,
+				       struct drm_connector *connector)
 {
 	struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, osd101t2587->default_mode);
+	mode = drm_mode_duplicate(connector->dev, osd101t2587->default_mode);
 	if (!mode) {
-		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+		dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
 			osd101t2587->default_mode->hdisplay,
 			osd101t2587->default_mode->vdisplay,
 			osd101t2587->default_mode->vrefresh);
@@ -128,10 +123,10 @@ static int osd101t2587_panel_get_modes(struct drm_panel *panel)
 
 	drm_mode_set_name(mode);
 
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 217;
-	panel->connector->display_info.height_mm = 136;
+	connector->display_info.width_mm = 217;
+	connector->display_info.height_mm = 136;
 
 	return 1;
 }
@@ -157,18 +152,19 @@ MODULE_DEVICE_TABLE(of, osd101t2587_of_match);
 static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
 {
 	struct device *dev = &osd101t2587->dsi->dev;
+	int ret;
 
 	osd101t2587->supply = devm_regulator_get(dev, "power");
 	if (IS_ERR(osd101t2587->supply))
 		return PTR_ERR(osd101t2587->supply);
 
-	osd101t2587->backlight = devm_of_find_backlight(dev);
-	if (IS_ERR(osd101t2587->backlight))
-		return PTR_ERR(osd101t2587->backlight);
-
 	drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
 		       &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
 
+	ret = drm_panel_of_backlight(&osd101t2587->base);
+	if (ret)
+		return ret;
+
 	return drm_panel_add(&osd101t2587->base);
 }
 
@@ -214,12 +210,11 @@ static int osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
 	struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
 	int ret;
 
-	ret = osd101t2587_panel_disable(&osd101t2587->base);
+	ret = drm_panel_disable(&osd101t2587->base);
 	if (ret < 0)
 		dev_warn(&dsi->dev, "failed to disable panel: %d\n", ret);
 
-	osd101t2587_panel_unprepare(&osd101t2587->base);
-
+	drm_panel_unprepare(&osd101t2587->base);
 	drm_panel_remove(&osd101t2587->base);
 
 	ret = mipi_dsi_detach(dsi);
@@ -233,8 +228,8 @@ static void osd101t2587_panel_shutdown(struct mipi_dsi_device *dsi)
 {
 	struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
 
-	osd101t2587_panel_disable(&osd101t2587->base);
-	osd101t2587_panel_unprepare(&osd101t2587->base);
+	drm_panel_disable(&osd101t2587->base);
+	drm_panel_unprepare(&osd101t2587->base);
 }
 
 static struct mipi_dsi_driver osd101t2587_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 6646050..6969345 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -7,7 +7,6 @@
  * Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -31,7 +30,6 @@ struct wuxga_nt_panel {
 	struct drm_panel base;
 	struct mipi_dsi_device *dsi;
 
-	struct backlight_device *backlight;
 	struct regulator *supply;
 
 	bool prepared;
@@ -62,12 +60,6 @@ static int wuxga_nt_panel_disable(struct drm_panel *panel)
 
 	mipi_ret = mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
 
-	if (wuxga_nt->backlight) {
-		wuxga_nt->backlight->props.power = FB_BLANK_POWERDOWN;
-		wuxga_nt->backlight->props.state |= BL_CORE_FBBLANK;
-		bl_ret = backlight_update_status(wuxga_nt->backlight);
-	}
-
 	wuxga_nt->enabled = false;
 
 	return mipi_ret ? mipi_ret : bl_ret;
@@ -142,12 +134,6 @@ static int wuxga_nt_panel_enable(struct drm_panel *panel)
 	if (wuxga_nt->enabled)
 		return 0;
 
-	if (wuxga_nt->backlight) {
-		wuxga_nt->backlight->props.power = FB_BLANK_UNBLANK;
-		wuxga_nt->backlight->props.state &= ~BL_CORE_FBBLANK;
-		backlight_update_status(wuxga_nt->backlight);
-	}
-
 	wuxga_nt->enabled = true;
 
 	return 0;
@@ -166,24 +152,25 @@ static const struct drm_display_mode default_mode = {
 	.vrefresh = 60,
 };
 
-static int wuxga_nt_panel_get_modes(struct drm_panel *panel)
+static int wuxga_nt_panel_get_modes(struct drm_panel *panel,
+				    struct drm_connector *connector)
 {
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
-		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
-				default_mode.hdisplay, default_mode.vdisplay,
-				default_mode.vrefresh);
+		dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+			default_mode.hdisplay, default_mode.vdisplay,
+			default_mode.vrefresh);
 		return -ENOMEM;
 	}
 
 	drm_mode_set_name(mode);
 
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 217;
-	panel->connector->display_info.height_mm = 136;
+	connector->display_info.width_mm = 217;
+	connector->display_info.height_mm = 136;
 
 	return 1;
 }
@@ -205,7 +192,6 @@ MODULE_DEVICE_TABLE(of, wuxga_nt_of_match);
 static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
 {
 	struct device *dev = &wuxga_nt->dsi->dev;
-	struct device_node *np;
 	int ret;
 
 	wuxga_nt->mode = &default_mode;
@@ -214,38 +200,20 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
 	if (IS_ERR(wuxga_nt->supply))
 		return PTR_ERR(wuxga_nt->supply);
 
-	np = of_parse_phandle(dev->of_node, "backlight", 0);
-	if (np) {
-		wuxga_nt->backlight = of_find_backlight_by_node(np);
-		of_node_put(np);
-
-		if (!wuxga_nt->backlight)
-			return -EPROBE_DEFER;
-	}
-
 	drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
 		       &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
 
-	ret = drm_panel_add(&wuxga_nt->base);
-	if (ret < 0)
-		goto put_backlight;
+	ret = drm_panel_of_backlight(&wuxga_nt->base);
+	if (ret)
+		return ret;
 
-	return 0;
-
-put_backlight:
-	if (wuxga_nt->backlight)
-		put_device(&wuxga_nt->backlight->dev);
-
-	return ret;
+	return drm_panel_add(&wuxga_nt->base);
 }
 
 static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt)
 {
 	if (wuxga_nt->base.dev)
 		drm_panel_remove(&wuxga_nt->base);
-
-	if (wuxga_nt->backlight)
-		put_device(&wuxga_nt->backlight->dev);
 }
 
 static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
@@ -280,7 +248,7 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
 	struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
 	int ret;
 
-	ret = wuxga_nt_panel_disable(&wuxga_nt->base);
+	ret = drm_panel_disable(&wuxga_nt->base);
 	if (ret < 0)
 		dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
 
@@ -297,7 +265,7 @@ static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
 {
 	struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
 
-	wuxga_nt_panel_disable(&wuxga_nt->base);
+	drm_panel_disable(&wuxga_nt->base);
 }
 
 static struct mipi_dsi_driver wuxga_nt_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 09824e9..8f078b7 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -44,8 +44,6 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -311,10 +309,9 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
 	return 0;
 }
 
-static int rpi_touchscreen_get_modes(struct drm_panel *panel)
+static int rpi_touchscreen_get_modes(struct drm_panel *panel,
+				     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
-	struct drm_device *drm = panel->drm;
 	unsigned int i, num = 0;
 	static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 
@@ -322,9 +319,9 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel)
 		const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
 		struct drm_display_mode *mode;
 
-		mode = drm_mode_duplicate(drm, m);
+		mode = drm_mode_duplicate(connector->dev, m);
 		if (!mode) {
-			dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+			dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
 				m->hdisplay, m->vdisplay, m->vrefresh);
 			continue;
 		}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index fd67fc6..313637d 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -436,12 +436,12 @@ static int rad_panel_disable(struct drm_panel *panel)
 	return 0;
 }
 
-static int rad_panel_get_modes(struct drm_panel *panel)
+static int rad_panel_get_modes(struct drm_panel *panel,
+			       struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
 		DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
 			      default_mode.hdisplay, default_mode.vdisplay,
@@ -451,7 +451,7 @@ static int rad_panel_get_modes(struct drm_panel *panel)
 
 	drm_mode_set_name(mode);
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
 	connector->display_info.width_mm = mode->width_mm;
 	connector->display_info.height_mm = mode->height_mm;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 994e855..e898294 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -6,9 +6,9 @@
  *          Yannick Fertre <yannick.fertre@st.com>
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/regulator/consumer.h>
 
@@ -78,7 +78,6 @@ struct rm68200 {
 	struct drm_panel panel;
 	struct gpio_desc *reset_gpio;
 	struct regulator *supply;
-	struct backlight_device *backlight;
 	bool prepared;
 	bool enabled;
 };
@@ -242,8 +241,6 @@ static int rm68200_disable(struct drm_panel *panel)
 	if (!ctx->enabled)
 		return 0;
 
-	backlight_disable(ctx->backlight);
-
 	ctx->enabled = false;
 
 	return 0;
@@ -328,18 +325,17 @@ static int rm68200_enable(struct drm_panel *panel)
 	if (ctx->enabled)
 		return 0;
 
-	backlight_enable(ctx->backlight);
-
 	ctx->enabled = true;
 
 	return 0;
 }
 
-static int rm68200_get_modes(struct drm_panel *panel)
+static int rm68200_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
 {
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
 		DRM_ERROR("failed to add mode %ux%ux@%u\n",
 			  default_mode.hdisplay, default_mode.vdisplay,
@@ -350,10 +346,10 @@ static int rm68200_get_modes(struct drm_panel *panel)
 	drm_mode_set_name(mode);
 
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = mode->width_mm;
-	panel->connector->display_info.height_mm = mode->height_mm;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
 
 	return 1;
 }
@@ -391,10 +387,6 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
 		return ret;
 	}
 
-	ctx->backlight = devm_of_find_backlight(dev);
-	if (IS_ERR(ctx->backlight))
-		return PTR_ERR(ctx->backlight);
-
 	mipi_dsi_set_drvdata(dsi, ctx);
 
 	ctx->dev = dev;
@@ -407,6 +399,10 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
 	drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
 		       DRM_MODE_CONNECTOR_DSI);
 
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
+
 	drm_panel_add(&ctx->panel);
 
 	ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index 31234b7..38ff742 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -5,20 +5,22 @@
  * Copyright (C) Purism SPC 2019
  */
 
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
-#include <linux/backlight.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/regulator/consumer.h>
+
 #include <video/display_timing.h>
 #include <video/mipi_display.h>
 
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
 #define DRV_NAME "panel-rocktech-jh057n00900"
 
 /* Manufacturer specific Commands send via DSI */
@@ -47,7 +49,6 @@ struct jh057n {
 	struct device *dev;
 	struct drm_panel panel;
 	struct gpio_desc *reset_gpio;
-	struct backlight_device *backlight;
 	struct regulator *vcc;
 	struct regulator *iovcc;
 	bool prepared;
@@ -152,7 +153,7 @@ static int jh057n_enable(struct drm_panel *panel)
 		return ret;
 	}
 
-	return backlight_enable(ctx->backlight);
+	return 0;
 }
 
 static int jh057n_disable(struct drm_panel *panel)
@@ -160,7 +161,6 @@ static int jh057n_disable(struct drm_panel *panel)
 	struct jh057n *ctx = panel_to_jh057n(panel);
 	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
 
-	backlight_disable(ctx->backlight);
 	return mipi_dsi_dcs_set_display_off(dsi);
 }
 
@@ -230,12 +230,13 @@ static const struct drm_display_mode default_mode = {
 	.height_mm   = 130,
 };
 
-static int jh057n_get_modes(struct drm_panel *panel)
+static int jh057n_get_modes(struct drm_panel *panel,
+			    struct drm_connector *connector)
 {
 	struct jh057n *ctx = panel_to_jh057n(panel);
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
 		DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
 			      default_mode.hdisplay, default_mode.vdisplay,
@@ -246,9 +247,9 @@ static int jh057n_get_modes(struct drm_panel *panel)
 	drm_mode_set_name(mode);
 
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	panel->connector->display_info.width_mm = mode->width_mm;
-	panel->connector->display_info.height_mm = mode->height_mm;
-	drm_mode_probed_add(panel->connector, mode);
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+	drm_mode_probed_add(connector, mode);
 
 	return 1;
 }
@@ -320,10 +321,6 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
 		MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
 
-	ctx->backlight = devm_of_find_backlight(dev);
-	if (IS_ERR(ctx->backlight))
-		return PTR_ERR(ctx->backlight);
-
 	ctx->vcc = devm_regulator_get(dev, "vcc");
 	if (IS_ERR(ctx->vcc)) {
 		ret = PTR_ERR(ctx->vcc);
@@ -346,6 +343,10 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
 	drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs,
 		       DRM_MODE_CONNECTOR_DSI);
 
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
+
 	drm_panel_add(&ctx->panel);
 
 	ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 170a5cd..ef18559 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -7,7 +7,6 @@
  * This file based on panel-ilitek-ili9881c.c
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -29,7 +28,6 @@
 struct rb070d30_panel {
 	struct drm_panel panel;
 	struct mipi_dsi_device *dsi;
-	struct backlight_device *backlight;
 	struct regulator *supply;
 
 	struct {
@@ -84,22 +82,13 @@ static int rb070d30_panel_enable(struct drm_panel *panel)
 	if (ret)
 		return ret;
 
-	ret = backlight_enable(ctx->backlight);
-	if (ret)
-		goto out;
-
 	return 0;
-
-out:
-	mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
-	return ret;
 }
 
 static int rb070d30_panel_disable(struct drm_panel *panel)
 {
 	struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
 
-	backlight_disable(ctx->backlight);
 	return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
 }
 
@@ -120,14 +109,14 @@ static const struct drm_display_mode default_mode = {
 	.height_mm	= 85,
 };
 
-static int rb070d30_panel_get_modes(struct drm_panel *panel)
+static int rb070d30_panel_get_modes(struct drm_panel *panel,
+				    struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
 	struct drm_display_mode *mode;
 	static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
 		DRM_DEV_ERROR(&ctx->dsi->dev,
 			      "Failed to add mode " DRM_MODE_FMT "\n",
@@ -140,9 +129,9 @@ static int rb070d30_panel_get_modes(struct drm_panel *panel)
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
 	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.bpc = 8;
-	panel->connector->display_info.width_mm = mode->width_mm;
-	panel->connector->display_info.height_mm = mode->height_mm;
+	connector->display_info.bpc = 8;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
 	drm_display_info_set_bus_formats(&connector->display_info,
 					 &bus_format, 1);
 
@@ -208,11 +197,9 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
 		return PTR_ERR(ctx->gpios.shlr);
 	}
 
-	ctx->backlight = devm_of_find_backlight(&dsi->dev);
-	if (IS_ERR(ctx->backlight)) {
-		DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n");
-		return PTR_ERR(ctx->backlight);
-	}
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
 
 	ret = drm_panel_add(&ctx->panel);
 	if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 250809b..3c52f15 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -261,9 +261,9 @@ static int ld9040_enable(struct drm_panel *panel)
 	return 0;
 }
 
-static int ld9040_get_modes(struct drm_panel *panel)
+static int ld9040_get_modes(struct drm_panel *panel,
+			    struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct ld9040 *ctx = panel_to_ld9040(panel);
 	struct drm_display_mode *mode;
 
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index e3a0397..2150043 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -143,12 +143,12 @@ static int s6d16d0_disable(struct drm_panel *panel)
 	return 0;
 }
 
-static int s6d16d0_get_modes(struct drm_panel *panel)
+static int s6d16d0_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
+	mode = drm_mode_duplicate(connector->dev, &samsung_s6d16d0_mode);
 	if (!mode) {
 		DRM_ERROR("bad mode or failed to add mode\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 938ab72..36ebd5a 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -645,13 +645,13 @@ static const struct s6e3ha2_panel_desc samsung_s6e3hf2 = {
 	.type = HF2_TYPE,
 };
 
-static int s6e3ha2_get_modes(struct drm_panel *panel)
+static int s6e3ha2_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, ctx->desc->mode);
+	mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
 	if (!mode) {
 		DRM_ERROR("failed to add mode %ux%ux@%u\n",
 			ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index a60635e..a3570e0 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -400,12 +400,12 @@ static int s6e63j0x03_enable(struct drm_panel *panel)
 	return 0;
 }
 
-static int s6e63j0x03_get_modes(struct drm_panel *panel)
+static int s6e63j0x03_get_modes(struct drm_panel *panel,
+				struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
 		DRM_ERROR("failed to add mode %ux%ux@%u\n",
 			default_mode.hdisplay, default_mode.vdisplay,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index ba01af0..a5f76eb 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -362,12 +362,12 @@ static int s6e63m0_enable(struct drm_panel *panel)
 	return 0;
 }
 
-static int s6e63m0_get_modes(struct drm_panel *panel)
+static int s6e63m0_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
 		DRM_ERROR("failed to add mode %ux%ux@%u\n",
 			  default_mode.hdisplay, default_mode.vdisplay,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index dbced650..8a028d2 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -920,9 +920,9 @@ static int s6e8aa0_enable(struct drm_panel *panel)
 	return 0;
 }
 
-static int s6e8aa0_get_modes(struct drm_panel *panel)
+static int s6e8aa0_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
 	struct drm_display_mode *mode;
 
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index b3619ba..40fcbbb 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -6,7 +6,6 @@
  * Based on Panel Simple driver by Thierry Reding <treding@nvidia.com>
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -46,7 +45,6 @@ struct seiko_panel {
 	bool prepared;
 	bool enabled;
 	const struct seiko_panel_desc *desc;
-	struct backlight_device *backlight;
 	struct regulator *dvdd;
 	struct regulator *avdd;
 };
@@ -56,10 +54,9 @@ static inline struct seiko_panel *to_seiko_panel(struct drm_panel *panel)
 	return container_of(panel, struct seiko_panel, base);
 }
 
-static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
+static int seiko_panel_get_fixed_modes(struct seiko_panel *panel,
+				       struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->base.connector;
-	struct drm_device *drm = panel->base.drm;
 	struct drm_display_mode *mode;
 	unsigned int i, num = 0;
 
@@ -71,9 +68,9 @@ static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
 		struct videomode vm;
 
 		videomode_from_timing(dt, &vm);
-		mode = drm_mode_create(drm);
+		mode = drm_mode_create(connector->dev);
 		if (!mode) {
-			dev_err(drm->dev, "failed to add mode %ux%u\n",
+			dev_err(panel->base.dev, "failed to add mode %ux%u\n",
 				dt->hactive.typ, dt->vactive.typ);
 			continue;
 		}
@@ -92,9 +89,9 @@ static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
 	for (i = 0; i < panel->desc->num_modes; i++) {
 		const struct drm_display_mode *m = &panel->desc->modes[i];
 
-		mode = drm_mode_duplicate(drm, m);
+		mode = drm_mode_duplicate(connector->dev, m);
 		if (!mode) {
-			dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+			dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
 				m->hdisplay, m->vdisplay, m->vrefresh);
 			continue;
 		}
@@ -128,12 +125,6 @@ static int seiko_panel_disable(struct drm_panel *panel)
 	if (!p->enabled)
 		return 0;
 
-	if (p->backlight) {
-		p->backlight->props.power = FB_BLANK_POWERDOWN;
-		p->backlight->props.state |= BL_CORE_FBBLANK;
-		backlight_update_status(p->backlight);
-	}
-
 	p->enabled = false;
 
 	return 0;
@@ -197,23 +188,18 @@ static int seiko_panel_enable(struct drm_panel *panel)
 	if (p->enabled)
 		return 0;
 
-	if (p->backlight) {
-		p->backlight->props.state &= ~BL_CORE_FBBLANK;
-		p->backlight->props.power = FB_BLANK_UNBLANK;
-		backlight_update_status(p->backlight);
-	}
-
 	p->enabled = true;
 
 	return 0;
 }
 
-static int seiko_panel_get_modes(struct drm_panel *panel)
+static int seiko_panel_get_modes(struct drm_panel *panel,
+				 struct drm_connector *connector)
 {
 	struct seiko_panel *p = to_seiko_panel(panel);
 
 	/* add hard-coded panel modes */
-	return seiko_panel_get_fixed_modes(p);
+	return seiko_panel_get_fixed_modes(p, connector);
 }
 
 static int seiko_panel_get_timings(struct drm_panel *panel,
@@ -245,7 +231,6 @@ static const struct drm_panel_funcs seiko_panel_funcs = {
 static int seiko_panel_probe(struct device *dev,
 					const struct seiko_panel_desc *desc)
 {
-	struct device_node *backlight;
 	struct seiko_panel *panel;
 	int err;
 
@@ -265,18 +250,13 @@ static int seiko_panel_probe(struct device *dev,
 	if (IS_ERR(panel->avdd))
 		return PTR_ERR(panel->avdd);
 
-	backlight = of_parse_phandle(dev->of_node, "backlight", 0);
-	if (backlight) {
-		panel->backlight = of_find_backlight_by_node(backlight);
-		of_node_put(backlight);
-
-		if (!panel->backlight)
-			return -EPROBE_DEFER;
-	}
-
 	drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
 		       DRM_MODE_CONNECTOR_DPI);
 
+	err = drm_panel_of_backlight(&panel->base);
+	if (err)
+		return err;
+
 	err = drm_panel_add(&panel->base);
 	if (err < 0)
 		return err;
@@ -291,11 +271,7 @@ static int seiko_panel_remove(struct platform_device *pdev)
 	struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
 
 	drm_panel_remove(&panel->base);
-
-	seiko_panel_disable(&panel->base);
-
-	if (panel->backlight)
-		put_device(&panel->backlight->dev);
+	drm_panel_disable(&panel->base);
 
 	return 0;
 }
@@ -304,7 +280,7 @@ static void seiko_panel_shutdown(struct platform_device *pdev)
 {
 	struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
 
-	seiko_panel_disable(&panel->base);
+	drm_panel_disable(&panel->base);
 }
 
 static const struct display_timing seiko_43wvf1g_timing = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 5e136c3..b5d1977 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -3,7 +3,6 @@
  * Copyright (C) 2014 NVIDIA Corporation
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
@@ -23,7 +22,6 @@ struct sharp_panel {
 	struct mipi_dsi_device *link1;
 	struct mipi_dsi_device *link2;
 
-	struct backlight_device *backlight;
 	struct regulator *supply;
 
 	bool prepared;
@@ -94,8 +92,6 @@ static int sharp_panel_disable(struct drm_panel *panel)
 	if (!sharp->enabled)
 		return 0;
 
-	backlight_disable(sharp->backlight);
-
 	sharp->enabled = false;
 
 	return 0;
@@ -258,8 +254,6 @@ static int sharp_panel_enable(struct drm_panel *panel)
 	if (sharp->enabled)
 		return 0;
 
-	backlight_enable(sharp->backlight);
-
 	sharp->enabled = true;
 
 	return 0;
@@ -278,13 +272,14 @@ static const struct drm_display_mode default_mode = {
 	.vrefresh = 60,
 };
 
-static int sharp_panel_get_modes(struct drm_panel *panel)
+static int sharp_panel_get_modes(struct drm_panel *panel,
+				 struct drm_connector *connector)
 {
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
-		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+		dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
 			default_mode.hdisplay, default_mode.vdisplay,
 			default_mode.vrefresh);
 		return -ENOMEM;
@@ -292,10 +287,10 @@ static int sharp_panel_get_modes(struct drm_panel *panel)
 
 	drm_mode_set_name(mode);
 
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 217;
-	panel->connector->display_info.height_mm = 136;
+	connector->display_info.width_mm = 217;
+	connector->display_info.height_mm = 136;
 
 	return 1;
 }
@@ -316,7 +311,7 @@ MODULE_DEVICE_TABLE(of, sharp_of_match);
 
 static int sharp_panel_add(struct sharp_panel *sharp)
 {
-	struct device *dev = &sharp->link1->dev;
+	int ret;
 
 	sharp->mode = &default_mode;
 
@@ -324,14 +319,13 @@ static int sharp_panel_add(struct sharp_panel *sharp)
 	if (IS_ERR(sharp->supply))
 		return PTR_ERR(sharp->supply);
 
-	sharp->backlight = devm_of_find_backlight(dev);
-
-	if (IS_ERR(sharp->backlight))
-		return PTR_ERR(sharp->backlight);
-
 	drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
 		       DRM_MODE_CONNECTOR_DSI);
 
+	ret = drm_panel_of_backlight(&sharp->base);
+	if (ret)
+		return ret;
+
 	return drm_panel_add(&sharp->base);
 }
 
@@ -407,7 +401,7 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
 		return 0;
 	}
 
-	err = sharp_panel_disable(&sharp->base);
+	err = drm_panel_disable(&sharp->base);
 	if (err < 0)
 		dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
 
@@ -428,7 +422,7 @@ static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
 	if (!sharp)
 		return;
 
-	sharp_panel_disable(&sharp->base);
+	drm_panel_disable(&sharp->base);
 }
 
 static struct mipi_dsi_driver sharp_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index eeab799..1cf3f02 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -100,12 +100,12 @@ static const struct drm_display_mode ls037v7dw01_mode = {
 	.height_mm = 75,
 };
 
-static int ls037v7dw01_get_modes(struct drm_panel *panel)
+static int ls037v7dw01_get_modes(struct drm_panel *panel,
+				 struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &ls037v7dw01_mode);
+	mode = drm_mode_duplicate(connector->dev, &ls037v7dw01_mode);
 	if (!mode)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index b963ba4..ce586c6 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -7,7 +7,6 @@
  * Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
@@ -25,7 +24,6 @@ struct sharp_nt_panel {
 	struct drm_panel base;
 	struct mipi_dsi_device *dsi;
 
-	struct backlight_device *backlight;
 	struct regulator *supply;
 	struct gpio_desc *reset_gpio;
 
@@ -107,8 +105,6 @@ static int sharp_nt_panel_disable(struct drm_panel *panel)
 	if (!sharp_nt->enabled)
 		return 0;
 
-	backlight_disable(sharp_nt->backlight);
-
 	sharp_nt->enabled = false;
 
 	return 0;
@@ -190,8 +186,6 @@ static int sharp_nt_panel_enable(struct drm_panel *panel)
 	if (sharp_nt->enabled)
 		return 0;
 
-	backlight_enable(sharp_nt->backlight);
-
 	sharp_nt->enabled = true;
 
 	return 0;
@@ -210,24 +204,25 @@ static const struct drm_display_mode default_mode = {
 	.vrefresh = 60,
 };
 
-static int sharp_nt_panel_get_modes(struct drm_panel *panel)
+static int sharp_nt_panel_get_modes(struct drm_panel *panel,
+				    struct drm_connector *connector)
 {
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
-		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
-				default_mode.hdisplay, default_mode.vdisplay,
-				default_mode.vrefresh);
+		dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+			default_mode.hdisplay, default_mode.vdisplay,
+			default_mode.vrefresh);
 		return -ENOMEM;
 	}
 
 	drm_mode_set_name(mode);
 
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 54;
-	panel->connector->display_info.height_mm = 95;
+	connector->display_info.width_mm = 54;
+	connector->display_info.height_mm = 95;
 
 	return 1;
 }
@@ -243,6 +238,7 @@ static const struct drm_panel_funcs sharp_nt_panel_funcs = {
 static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
 {
 	struct device *dev = &sharp_nt->dsi->dev;
+	int ret;
 
 	sharp_nt->mode = &default_mode;
 
@@ -259,14 +255,13 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
 		gpiod_set_value(sharp_nt->reset_gpio, 0);
 	}
 
-	sharp_nt->backlight = devm_of_find_backlight(dev);
-
-	if (IS_ERR(sharp_nt->backlight))
-		return PTR_ERR(sharp_nt->backlight);
-
 	drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev,
 		       &sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
 
+	ret = drm_panel_of_backlight(&sharp_nt->base);
+	if (ret)
+		return ret;
+
 	return drm_panel_add(&sharp_nt->base);
 }
 
@@ -308,7 +303,7 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
 	struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
 	int ret;
 
-	ret = sharp_nt_panel_disable(&sharp_nt->base);
+	ret = drm_panel_disable(&sharp_nt->base);
 	if (ret < 0)
 		dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
 
@@ -325,7 +320,7 @@ static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
 {
 	struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
 
-	sharp_nt_panel_disable(&sharp_nt->base);
+	drm_panel_disable(&sharp_nt->base);
 }
 
 static const struct of_device_id sharp_nt_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5d48768..e14c14a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -21,7 +21,6 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
@@ -105,7 +104,6 @@ struct panel_simple {
 
 	const struct panel_desc *desc;
 
-	struct backlight_device *backlight;
 	struct regulator *supply;
 	struct i2c_adapter *ddc;
 
@@ -119,10 +117,9 @@ static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
 	return container_of(panel, struct panel_simple, base);
 }
 
-static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel)
+static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel,
+						   struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->base.connector;
-	struct drm_device *drm = panel->base.drm;
 	struct drm_display_mode *mode;
 	unsigned int i, num = 0;
 
@@ -131,9 +128,9 @@ static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel)
 		struct videomode vm;
 
 		videomode_from_timing(dt, &vm);
-		mode = drm_mode_create(drm);
+		mode = drm_mode_create(connector->dev);
 		if (!mode) {
-			dev_err(drm->dev, "failed to add mode %ux%u\n",
+			dev_err(panel->base.dev, "failed to add mode %ux%u\n",
 				dt->hactive.typ, dt->vactive.typ);
 			continue;
 		}
@@ -152,19 +149,18 @@ static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel)
 	return num;
 }
 
-static unsigned int panel_simple_get_display_modes(struct panel_simple *panel)
+static unsigned int panel_simple_get_display_modes(struct panel_simple *panel,
+						   struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->base.connector;
-	struct drm_device *drm = panel->base.drm;
 	struct drm_display_mode *mode;
 	unsigned int i, num = 0;
 
 	for (i = 0; i < panel->desc->num_modes; i++) {
 		const struct drm_display_mode *m = &panel->desc->modes[i];
 
-		mode = drm_mode_duplicate(drm, m);
+		mode = drm_mode_duplicate(connector->dev, m);
 		if (!mode) {
-			dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+			dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
 				m->hdisplay, m->vdisplay, m->vrefresh);
 			continue;
 		}
@@ -183,10 +179,9 @@ static unsigned int panel_simple_get_display_modes(struct panel_simple *panel)
 	return num;
 }
 
-static int panel_simple_get_non_edid_modes(struct panel_simple *panel)
+static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
+					   struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->base.connector;
-	struct drm_device *drm = panel->base.drm;
 	struct drm_display_mode *mode;
 	bool has_override = panel->override_mode.type;
 	unsigned int num = 0;
@@ -195,18 +190,19 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel)
 		return 0;
 
 	if (has_override) {
-		mode = drm_mode_duplicate(drm, &panel->override_mode);
+		mode = drm_mode_duplicate(connector->dev,
+					  &panel->override_mode);
 		if (mode) {
 			drm_mode_probed_add(connector, mode);
 			num = 1;
 		} else {
-			dev_err(drm->dev, "failed to add override mode\n");
+			dev_err(panel->base.dev, "failed to add override mode\n");
 		}
 	}
 
 	/* Only add timings if override was not there or failed to validate */
 	if (num == 0 && panel->desc->num_timings)
-		num = panel_simple_get_timings_modes(panel);
+		num = panel_simple_get_timings_modes(panel, connector);
 
 	/*
 	 * Only add fixed modes if timings/override added no mode.
@@ -216,7 +212,7 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel)
 	 */
 	WARN_ON(panel->desc->num_timings && panel->desc->num_modes);
 	if (num == 0)
-		num = panel_simple_get_display_modes(panel);
+		num = panel_simple_get_display_modes(panel, connector);
 
 	connector->display_info.bpc = panel->desc->bpc;
 	connector->display_info.width_mm = panel->desc->size.width;
@@ -236,12 +232,6 @@ static int panel_simple_disable(struct drm_panel *panel)
 	if (!p->enabled)
 		return 0;
 
-	if (p->backlight) {
-		p->backlight->props.power = FB_BLANK_POWERDOWN;
-		p->backlight->props.state |= BL_CORE_FBBLANK;
-		backlight_update_status(p->backlight);
-	}
-
 	if (p->desc->delay.disable)
 		msleep(p->desc->delay.disable);
 
@@ -307,34 +297,30 @@ static int panel_simple_enable(struct drm_panel *panel)
 	if (p->desc->delay.enable)
 		msleep(p->desc->delay.enable);
 
-	if (p->backlight) {
-		p->backlight->props.state &= ~BL_CORE_FBBLANK;
-		p->backlight->props.power = FB_BLANK_UNBLANK;
-		backlight_update_status(p->backlight);
-	}
-
 	p->enabled = true;
 
 	return 0;
 }
 
-static int panel_simple_get_modes(struct drm_panel *panel)
+static int panel_simple_get_modes(struct drm_panel *panel,
+				  struct drm_connector *connector)
 {
 	struct panel_simple *p = to_panel_simple(panel);
 	int num = 0;
 
 	/* probe EDID if a DDC bus is available */
 	if (p->ddc) {
-		struct edid *edid = drm_get_edid(panel->connector, p->ddc);
-		drm_connector_update_edid_property(panel->connector, edid);
+		struct edid *edid = drm_get_edid(connector, p->ddc);
+
+		drm_connector_update_edid_property(connector, edid);
 		if (edid) {
-			num += drm_add_edid_modes(panel->connector, edid);
+			num += drm_add_edid_modes(connector, edid);
 			kfree(edid);
 		}
 	}
 
 	/* add hard-coded panel modes */
-	num += panel_simple_get_non_edid_modes(p);
+	num += panel_simple_get_non_edid_modes(p, connector);
 
 	return num;
 }
@@ -414,9 +400,9 @@ static void panel_simple_parse_panel_timing_node(struct device *dev,
 
 static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
 {
-	struct device_node *backlight, *ddc;
 	struct panel_simple *panel;
 	struct display_timing dt;
+	struct device_node *ddc;
 	int err;
 
 	panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
@@ -442,24 +428,13 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
 		return err;
 	}
 
-	backlight = of_parse_phandle(dev->of_node, "backlight", 0);
-	if (backlight) {
-		panel->backlight = of_find_backlight_by_node(backlight);
-		of_node_put(backlight);
-
-		if (!panel->backlight)
-			return -EPROBE_DEFER;
-	}
-
 	ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
 	if (ddc) {
 		panel->ddc = of_find_i2c_adapter_by_node(ddc);
 		of_node_put(ddc);
 
-		if (!panel->ddc) {
-			err = -EPROBE_DEFER;
-			goto free_backlight;
-		}
+		if (!panel->ddc)
+			return -EPROBE_DEFER;
 	}
 
 	if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
@@ -468,6 +443,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
 	drm_panel_init(&panel->base, dev, &panel_simple_funcs,
 		       desc->connector_type);
 
+	err = drm_panel_of_backlight(&panel->base);
+	if (err)
+		goto free_ddc;
+
 	err = drm_panel_add(&panel->base);
 	if (err < 0)
 		goto free_ddc;
@@ -479,9 +458,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
 free_ddc:
 	if (panel->ddc)
 		put_device(&panel->ddc->dev);
-free_backlight:
-	if (panel->backlight)
-		put_device(&panel->backlight->dev);
 
 	return err;
 }
@@ -491,16 +467,12 @@ static int panel_simple_remove(struct device *dev)
 	struct panel_simple *panel = dev_get_drvdata(dev);
 
 	drm_panel_remove(&panel->base);
-
-	panel_simple_disable(&panel->base);
-	panel_simple_unprepare(&panel->base);
+	drm_panel_disable(&panel->base);
+	drm_panel_unprepare(&panel->base);
 
 	if (panel->ddc)
 		put_device(&panel->ddc->dev);
 
-	if (panel->backlight)
-		put_device(&panel->backlight->dev);
-
 	return 0;
 }
 
@@ -508,8 +480,8 @@ static void panel_simple_shutdown(struct device *dev)
 {
 	struct panel_simple *panel = dev_get_drvdata(dev);
 
-	panel_simple_disable(&panel->base);
-	panel_simple_unprepare(&panel->base);
+	drm_panel_disable(&panel->base);
+	drm_panel_unprepare(&panel->base);
 }
 
 static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
@@ -657,6 +629,35 @@ static const struct panel_desc auo_b101xtn01 = {
 	},
 };
 
+static const struct drm_display_mode auo_b116xak01_mode = {
+	.clock = 69300,
+	.hdisplay = 1366,
+	.hsync_start = 1366 + 48,
+	.hsync_end = 1366 + 48 + 32,
+	.htotal = 1366 + 48 + 32 + 10,
+	.vdisplay = 768,
+	.vsync_start = 768 + 4,
+	.vsync_end = 768 + 4 + 6,
+	.vtotal = 768 + 4 + 6 + 15,
+	.vrefresh = 60,
+	.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xak01 = {
+	.modes = &auo_b116xak01_mode,
+	.num_modes = 1,
+	.bpc = 6,
+	.size = {
+		.width = 256,
+		.height = 144,
+	},
+	.delay = {
+		.hpd_absent_delay = 200,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+	.connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
 static const struct drm_display_mode auo_b116xw03_mode = {
 	.clock = 70589,
 	.hdisplay = 1366,
@@ -1036,6 +1037,38 @@ static const struct panel_desc boe_nv101wxmn51 = {
 	},
 };
 
+static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
+	{
+		.clock = 148500,
+		.hdisplay = 1920,
+		.hsync_start = 1920 + 48,
+		.hsync_end = 1920 + 48 + 32,
+		.htotal = 2200,
+		.vdisplay = 1080,
+		.vsync_start = 1080 + 3,
+		.vsync_end = 1080 + 3 + 5,
+		.vtotal = 1125,
+		.vrefresh = 60,
+	},
+};
+
+static const struct panel_desc boe_nv140fhmn49 = {
+	.modes = boe_nv140fhmn49_modes,
+	.num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
+	.bpc = 6,
+	.size = {
+		.width = 309,
+		.height = 174,
+	},
+	.delay = {
+		.prepare = 210,
+		.enable = 50,
+		.unprepare = 160,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+	.connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
 static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
 	.clock = 9000,
 	.hdisplay = 480,
@@ -2061,6 +2094,40 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
 	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
 };
 
+static const struct drm_display_mode logicpd_type_28_mode = {
+	.clock = 9000,
+	.hdisplay = 480,
+	.hsync_start = 480 + 3,
+	.hsync_end = 480 + 3 + 42,
+	.htotal = 480 + 3 + 42 + 2,
+
+	.vdisplay = 272,
+	.vsync_start = 272 + 2,
+	.vsync_end = 272 + 2 + 11,
+	.vtotal = 272 + 2 + 11 + 3,
+	.vrefresh = 60,
+	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc logicpd_type_28 = {
+	.modes = &logicpd_type_28_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 105,
+		.height = 67,
+	},
+	.delay = {
+		.prepare = 200,
+		.enable = 200,
+		.unprepare = 200,
+		.disable = 200,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+		     DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+};
+
 static const struct panel_desc mitsubishi_aa070mc01 = {
 	.modes = &mitsubishi_aa070mc01_mode,
 	.num_modes = 1,
@@ -2547,6 +2614,30 @@ static const struct panel_desc samsung_ltn140at29_301 = {
 	},
 };
 
+static const struct display_timing satoz_sat050at40h12r2_timing = {
+	.pixelclock = {33300000, 33300000, 50000000},
+	.hactive = {800, 800, 800},
+	.hfront_porch = {16, 210, 354},
+	.hback_porch = {46, 46, 46},
+	.hsync_len = {1, 1, 40},
+	.vactive = {480, 480, 480},
+	.vfront_porch = {7, 22, 147},
+	.vback_porch = {23, 23, 23},
+	.vsync_len = {1, 1, 20},
+};
+
+static const struct panel_desc satoz_sat050at40h12r2 = {
+	.timings = &satoz_sat050at40h12r2_timing,
+	.num_timings = 1,
+	.bpc = 8,
+	.size = {
+		.width = 108,
+		.height = 65,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
 	.clock = 168480,
 	.hdisplay = 1920,
@@ -3120,6 +3211,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "auo,b101xtn01",
 		.data = &auo_b101xtn01,
 	}, {
+		.compatible = "auo,b116xa01",
+		.data = &auo_b116xak01,
+	}, {
 		.compatible = "auo,b116xw03",
 		.data = &auo_b116xw03,
 	}, {
@@ -3162,6 +3256,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "boe,nv101wxmn51",
 		.data = &boe_nv101wxmn51,
 	}, {
+		.compatible = "boe,nv140fhmn49",
+		.data = &boe_nv140fhmn49,
+	}, {
 		.compatible = "cdtech,s043wq26h-ct7",
 		.data = &cdtech_s043wq26h_ct7,
 	}, {
@@ -3288,6 +3385,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "lg,lp129qe",
 		.data = &lg_lp129qe,
 	}, {
+		.compatible = "logicpd,type28",
+		.data = &logicpd_type_28,
+	}, {
 		.compatible = "mitsubishi,aa070mc01-ca1",
 		.data = &mitsubishi_aa070mc01,
 	}, {
@@ -3348,6 +3448,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "samsung,ltn140at29-301",
 		.data = &samsung_ltn140at29_301,
 	}, {
+		.compatible = "satoz,sat050at40h12r2",
+		.data = &satoz_sat050at40h12r2,
+	}, {
 		.compatible = "sharp,ld-d5116z01b",
 		.data = &sharp_ld_d5116z01b,
 	}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index ee3f23f..4b4f255 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -9,7 +9,6 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 
-#include <linux/backlight.h>
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -103,7 +102,6 @@ struct st7701 {
 	struct mipi_dsi_device *dsi;
 	const struct st7701_panel_desc *desc;
 
-	struct backlight_device *backlight;
 	struct regulator_bulk_data *supplies;
 	struct gpio_desc *reset;
 	unsigned int sleep_delay;
@@ -223,7 +221,6 @@ static int st7701_enable(struct drm_panel *panel)
 	struct st7701 *st7701 = panel_to_st7701(panel);
 
 	ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
-	backlight_enable(st7701->backlight);
 
 	return 0;
 }
@@ -232,7 +229,6 @@ static int st7701_disable(struct drm_panel *panel)
 {
 	struct st7701 *st7701 = panel_to_st7701(panel);
 
-	backlight_disable(st7701->backlight);
 	ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
 
 	return 0;
@@ -264,13 +260,14 @@ static int st7701_unprepare(struct drm_panel *panel)
 	return 0;
 }
 
-static int st7701_get_modes(struct drm_panel *panel)
+static int st7701_get_modes(struct drm_panel *panel,
+			    struct drm_connector *connector)
 {
 	struct st7701 *st7701 = panel_to_st7701(panel);
 	const struct drm_display_mode *desc_mode = st7701->desc->mode;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, desc_mode);
+	mode = drm_mode_duplicate(connector->dev, desc_mode);
 	if (!mode) {
 		DRM_DEV_ERROR(&st7701->dsi->dev,
 			      "failed to add mode %ux%ux@%u\n",
@@ -280,10 +277,10 @@ static int st7701_get_modes(struct drm_panel *panel)
 	}
 
 	drm_mode_set_name(mode);
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = desc_mode->width_mm;
-	panel->connector->display_info.height_mm = desc_mode->height_mm;
+	connector->display_info.width_mm = desc_mode->width_mm;
+	connector->display_info.height_mm = desc_mode->height_mm;
 
 	return 1;
 }
@@ -365,10 +362,6 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
 		return PTR_ERR(st7701->reset);
 	}
 
-	st7701->backlight = devm_of_find_backlight(&dsi->dev);
-	if (IS_ERR(st7701->backlight))
-		return PTR_ERR(st7701->backlight);
-
 	drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
 		       DRM_MODE_CONNECTOR_DSI);
 
@@ -383,6 +376,10 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
 	 */
 	st7701->sleep_delay = 120 + desc->panel_sleep_delay;
 
+	ret = drm_panel_of_backlight(&st7701->panel);
+	if (ret)
+		return ret;
+
 	ret = drm_panel_add(&st7701->panel);
 	if (ret < 0)
 		return ret;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 108a85b..cc02c54 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -3,7 +3,6 @@
  * Copyright (C) 2017 Free Electrons
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
@@ -116,7 +115,6 @@ struct st7789v {
 	struct drm_panel panel;
 	struct spi_device *spi;
 	struct gpio_desc *reset;
-	struct backlight_device *backlight;
 	struct regulator *power;
 };
 
@@ -170,14 +168,14 @@ static const struct drm_display_mode default_mode = {
 	.vrefresh = 60,
 };
 
-static int st7789v_get_modes(struct drm_panel *panel)
+static int st7789v_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
 	if (!mode) {
-		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+		dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
 			default_mode.hdisplay, default_mode.vdisplay,
 			default_mode.vrefresh);
 		return -ENOMEM;
@@ -188,8 +186,8 @@ static int st7789v_get_modes(struct drm_panel *panel)
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
 	drm_mode_probed_add(connector, mode);
 
-	panel->connector->display_info.width_mm = 61;
-	panel->connector->display_info.height_mm = 103;
+	connector->display_info.width_mm = 61;
+	connector->display_info.height_mm = 103;
 
 	return 1;
 }
@@ -323,12 +321,6 @@ static int st7789v_enable(struct drm_panel *panel)
 {
 	struct st7789v *ctx = panel_to_st7789v(panel);
 
-	if (ctx->backlight) {
-		ctx->backlight->props.state &= ~BL_CORE_FBBLANK;
-		ctx->backlight->props.power = FB_BLANK_UNBLANK;
-		backlight_update_status(ctx->backlight);
-	}
-
 	return st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_ON);
 }
 
@@ -339,12 +331,6 @@ static int st7789v_disable(struct drm_panel *panel)
 
 	ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_OFF));
 
-	if (ctx->backlight) {
-		ctx->backlight->props.power = FB_BLANK_POWERDOWN;
-		ctx->backlight->props.state |= BL_CORE_FBBLANK;
-		backlight_update_status(ctx->backlight);
-	}
-
 	return 0;
 }
 
@@ -370,7 +356,6 @@ static const struct drm_panel_funcs st7789v_drm_funcs = {
 
 static int st7789v_probe(struct spi_device *spi)
 {
-	struct device_node *backlight;
 	struct st7789v *ctx;
 	int ret;
 
@@ -394,26 +379,15 @@ static int st7789v_probe(struct spi_device *spi)
 		return PTR_ERR(ctx->reset);
 	}
 
-	backlight = of_parse_phandle(spi->dev.of_node, "backlight", 0);
-	if (backlight) {
-		ctx->backlight = of_find_backlight_by_node(backlight);
-		of_node_put(backlight);
-
-		if (!ctx->backlight)
-			return -EPROBE_DEFER;
-	}
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
 
 	ret = drm_panel_add(&ctx->panel);
 	if (ret < 0)
-		goto err_free_backlight;
+		return ret;
 
 	return 0;
-
-err_free_backlight:
-	if (ctx->backlight)
-		put_device(&ctx->backlight->dev);
-
-	return ret;
 }
 
 static int st7789v_remove(struct spi_device *spi)
@@ -422,9 +396,6 @@ static int st7789v_remove(struct spi_device *spi)
 
 	drm_panel_remove(&ctx->panel);
 
-	if (ctx->backlight)
-		put_device(&ctx->backlight->dev);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
new file mode 100644
index 0000000..de0abf7
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI-DSI Sony ACX424AKP panel driver. This is a 480x864
+ * AMOLED panel with a command-only DSI interface.
+ *
+ * Copyright (C) Linaro Ltd. 2019
+ * Author: Linus Walleij
+ * Based on code and know-how from Marcus Lorentzon
+ * Copyright (C) ST-Ericsson SA 2010
+ */
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define ACX424_DCS_READ_ID1		0xDA
+#define ACX424_DCS_READ_ID2		0xDB
+#define ACX424_DCS_READ_ID3		0xDC
+#define ACX424_DCS_SET_MDDI		0xAE
+
+/*
+ * Sony seems to use vendor ID 0x81
+ */
+#define DISPLAY_SONY_ACX424AKP_ID1	0x811b
+#define DISPLAY_SONY_ACX424AKP_ID2	0x811a
+/*
+ * The third ID looks like a bug, vendor IDs begin at 0x80
+ * and panel 00 ... seems like default values.
+ */
+#define DISPLAY_SONY_ACX424AKP_ID3	0x8000
+
+struct acx424akp {
+	struct drm_panel panel;
+	struct device *dev;
+	struct backlight_device *bl;
+	struct regulator *supply;
+	struct gpio_desc *reset_gpio;
+	bool video_mode;
+};
+
+static const struct drm_display_mode sony_acx424akp_vid_mode = {
+	.clock = 330000,
+	.hdisplay = 480,
+	.hsync_start = 480 + 15,
+	.hsync_end = 480 + 15 + 0,
+	.htotal = 480 + 15 + 0 + 15,
+	.vdisplay = 864,
+	.vsync_start = 864 + 14,
+	.vsync_end = 864 + 14 + 1,
+	.vtotal = 864 + 14 + 1 + 11,
+	.vrefresh = 60,
+	.width_mm = 48,
+	.height_mm = 84,
+	.flags = DRM_MODE_FLAG_PVSYNC,
+};
+
+/*
+ * The timings are not very helpful as the display is used in
+ * command mode using the maximum HS frequency.
+ */
+static const struct drm_display_mode sony_acx424akp_cmd_mode = {
+	.clock = 420160,
+	.hdisplay = 480,
+	.hsync_start = 480 + 154,
+	.hsync_end = 480 + 154 + 16,
+	.htotal = 480 + 154 + 16 + 32,
+	.vdisplay = 864,
+	.vsync_start = 864 + 1,
+	.vsync_end = 864 + 1 + 1,
+	.vtotal = 864 + 1 + 1 + 1,
+	/*
+	 * Some desired refresh rate, experiments at the maximum "pixel"
+	 * clock speed (HS clock 420 MHz) yields around 117Hz.
+	 */
+	.vrefresh = 60,
+	.width_mm = 48,
+	.height_mm = 84,
+};
+
+static inline struct acx424akp *panel_to_acx424akp(struct drm_panel *panel)
+{
+	return container_of(panel, struct acx424akp, panel);
+}
+
+#define FOSC			20 /* 20Mhz */
+#define SCALE_FACTOR_NS_DIV_MHZ	1000
+
+static int acx424akp_set_brightness(struct backlight_device *bl)
+{
+	struct acx424akp *acx = bl_get_data(bl);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+	int period_ns = 1023;
+	int duty_ns = bl->props.brightness;
+	u8 pwm_ratio;
+	u8 pwm_div;
+	u8 par;
+	int ret;
+
+	/* Calculate the PWM duty cycle in n/256's */
+	pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1);
+	pwm_div = max(1,
+		      ((FOSC * period_ns) / 256) /
+		      SCALE_FACTOR_NS_DIV_MHZ);
+
+	/* Set up PWM dutycycle ONE byte (differs from the standard) */
+	DRM_DEV_DEBUG(acx->dev, "calculated duty cycle %02x\n", pwm_ratio);
+	ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+				 &pwm_ratio, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev,
+			      "failed to set display PWM ratio (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	/*
+	 * Sequence to write PWMDIV:
+	 *	address		data
+	 *	0xF3		0xAA   CMD2 Unlock
+	 *	0x00		0x01   Enter CMD2 page 0
+	 *	0X7D		0x01   No reload MTP of CMD2 P1
+	 *	0x22		PWMDIV
+	 *	0x7F		0xAA   CMD2 page 1 lock
+	 */
+	par = 0xaa;
+	ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev,
+			      "failed to unlock CMD 2 (%d)\n",
+			      ret);
+		return ret;
+	}
+	par = 0x01;
+	ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev,
+			      "failed to enter page 1 (%d)\n",
+			      ret);
+		return ret;
+	}
+	par = 0x01;
+	ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev,
+			      "failed to disable MTP reload (%d)\n",
+			      ret);
+		return ret;
+	}
+	ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev,
+			      "failed to set PWM divisor (%d)\n",
+			      ret);
+		return ret;
+	}
+	par = 0xaa;
+	ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev,
+			      "failed to lock CMD 2 (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	/* Enable backlight */
+	par = 0x24;
+	ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+				 &par, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev,
+			      "failed to enable display backlight (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct backlight_ops acx424akp_bl_ops = {
+	.update_status = acx424akp_set_brightness,
+};
+
+static int acx424akp_read_id(struct acx424akp *acx)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+	u8 vendor, version, panel;
+	u16 val;
+	int ret;
+
+	ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID1, &vendor, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev, "could not vendor ID byte\n");
+		return ret;
+	}
+	ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID2, &version, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev, "could not read device version byte\n");
+		return ret;
+	}
+	ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID3, &panel, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev, "could not read panel ID byte\n");
+		return ret;
+	}
+
+	if (vendor == 0x00) {
+		DRM_DEV_ERROR(acx->dev, "device vendor ID is zero\n");
+		return -ENODEV;
+	}
+
+	val = (vendor << 8) | panel;
+	switch (val) {
+	case DISPLAY_SONY_ACX424AKP_ID1:
+	case DISPLAY_SONY_ACX424AKP_ID2:
+	case DISPLAY_SONY_ACX424AKP_ID3:
+		DRM_DEV_INFO(acx->dev,
+			     "MTP vendor: %02x, version: %02x, panel: %02x\n",
+			     vendor, version, panel);
+		break;
+	default:
+		DRM_DEV_INFO(acx->dev,
+			     "unknown vendor: %02x, version: %02x, panel: %02x\n",
+			     vendor, version, panel);
+		break;
+	}
+
+	return 0;
+}
+
+static int acx424akp_power_on(struct acx424akp *acx)
+{
+	int ret;
+
+	ret = regulator_enable(acx->supply);
+	if (ret) {
+		DRM_DEV_ERROR(acx->dev, "failed to enable supply (%d)\n", ret);
+		return ret;
+	}
+
+	/* Assert RESET */
+	gpiod_set_value_cansleep(acx->reset_gpio, 1);
+	udelay(20);
+	/* De-assert RESET */
+	gpiod_set_value_cansleep(acx->reset_gpio, 0);
+	usleep_range(11000, 20000);
+
+	return 0;
+}
+
+static void acx424akp_power_off(struct acx424akp *acx)
+{
+	/* Assert RESET */
+	gpiod_set_value_cansleep(acx->reset_gpio, 1);
+	usleep_range(11000, 20000);
+
+	regulator_disable(acx->supply);
+}
+
+static int acx424akp_prepare(struct drm_panel *panel)
+{
+	struct acx424akp *acx = panel_to_acx424akp(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+	const u8 mddi = 3;
+	int ret;
+
+	ret = acx424akp_power_on(acx);
+	if (ret)
+		return ret;
+
+	ret = acx424akp_read_id(acx);
+	if (ret) {
+		DRM_DEV_ERROR(acx->dev, "failed to read panel ID (%d)\n", ret);
+		goto err_power_off;
+	}
+
+	/* Enabe tearing mode: send TE (tearing effect) at VBLANK */
+	ret = mipi_dsi_dcs_set_tear_on(dsi,
+				       MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+	if (ret) {
+		DRM_DEV_ERROR(acx->dev, "failed to enable vblank TE (%d)\n",
+			      ret);
+		goto err_power_off;
+	}
+
+	/*
+	 * Set MDDI
+	 *
+	 * This presumably deactivates the Qualcomm MDDI interface and
+	 * selects DSI, similar code is found in other drivers such as the
+	 * Sharp LS043T1LE01 which makes us suspect that this panel may be
+	 * using a Novatek NT35565 or similar display driver chip that shares
+	 * this command. Due to the lack of documentation we cannot know for
+	 * sure.
+	 */
+	ret = mipi_dsi_dcs_write(dsi, ACX424_DCS_SET_MDDI,
+				 &mddi, sizeof(mddi));
+	if (ret < 0) {
+		DRM_DEV_ERROR(acx->dev, "failed to set MDDI (%d)\n", ret);
+		goto err_power_off;
+	}
+
+	/* Exit sleep mode */
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(acx->dev, "failed to exit sleep mode (%d)\n",
+			      ret);
+		goto err_power_off;
+	}
+	msleep(140);
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(acx->dev, "failed to turn display on (%d)\n",
+			      ret);
+		goto err_power_off;
+	}
+	if (acx->video_mode) {
+		/* In video mode turn peripheral on */
+		ret = mipi_dsi_turn_on_peripheral(dsi);
+		if (ret) {
+			dev_err(acx->dev, "failed to turn on peripheral\n");
+			goto err_power_off;
+		}
+	}
+
+	acx->bl->props.power = FB_BLANK_NORMAL;
+
+	return 0;
+
+err_power_off:
+	acx424akp_power_off(acx);
+	return ret;
+}
+
+static int acx424akp_unprepare(struct drm_panel *panel)
+{
+	struct acx424akp *acx = panel_to_acx424akp(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+	u8 par;
+	int ret;
+
+	/* Disable backlight */
+	par = 0x00;
+	ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+				 &par, 1);
+	if (ret) {
+		DRM_DEV_ERROR(acx->dev,
+			      "failed to disable display backlight (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(acx->dev, "failed to turn display off (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	/* Enter sleep mode */
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(acx->dev, "failed to enter sleep mode (%d)\n",
+			      ret);
+		return ret;
+	}
+	msleep(85);
+
+	acx424akp_power_off(acx);
+	acx->bl->props.power = FB_BLANK_POWERDOWN;
+
+	return 0;
+}
+
+static int acx424akp_enable(struct drm_panel *panel)
+{
+	struct acx424akp *acx = panel_to_acx424akp(panel);
+
+	/*
+	 * The backlight is on as long as the display is on
+	 * so no use to call backlight_enable() here.
+	 */
+	acx->bl->props.power = FB_BLANK_UNBLANK;
+
+	return 0;
+}
+
+static int acx424akp_disable(struct drm_panel *panel)
+{
+	struct acx424akp *acx = panel_to_acx424akp(panel);
+
+	/*
+	 * The backlight is on as long as the display is on
+	 * so no use to call backlight_disable() here.
+	 */
+	acx->bl->props.power = FB_BLANK_NORMAL;
+
+	return 0;
+}
+
+static int acx424akp_get_modes(struct drm_panel *panel,
+			       struct drm_connector *connector)
+{
+	struct acx424akp *acx = panel_to_acx424akp(panel);
+	struct drm_display_mode *mode;
+
+	if (acx->video_mode)
+		mode = drm_mode_duplicate(connector->dev,
+					  &sony_acx424akp_vid_mode);
+	else
+		mode = drm_mode_duplicate(connector->dev,
+					  &sony_acx424akp_cmd_mode);
+	if (!mode) {
+		DRM_ERROR("bad mode or failed to add mode\n");
+		return -EINVAL;
+	}
+	drm_mode_set_name(mode);
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+
+	drm_mode_probed_add(connector, mode);
+
+	return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs acx424akp_drm_funcs = {
+	.disable = acx424akp_disable,
+	.unprepare = acx424akp_unprepare,
+	.prepare = acx424akp_prepare,
+	.enable = acx424akp_enable,
+	.get_modes = acx424akp_get_modes,
+};
+
+static int acx424akp_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct acx424akp *acx;
+	int ret;
+
+	acx = devm_kzalloc(dev, sizeof(struct acx424akp), GFP_KERNEL);
+	if (!acx)
+		return -ENOMEM;
+	acx->video_mode = of_property_read_bool(dev->of_node,
+						"enforce-video-mode");
+
+	mipi_dsi_set_drvdata(dsi, acx);
+	acx->dev = dev;
+
+	dsi->lanes = 2;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	/*
+	 * FIXME: these come from the ST-Ericsson vendor driver for the
+	 * HREF520 and seems to reflect limitations in the PLLs on that
+	 * platform, if you have the datasheet, please cross-check the
+	 * actual max rates.
+	 */
+	dsi->lp_rate = 19200000;
+	dsi->hs_rate = 420160000;
+
+	if (acx->video_mode)
+		/* Burst mode using event for sync */
+		dsi->mode_flags =
+			MIPI_DSI_MODE_VIDEO |
+			MIPI_DSI_MODE_VIDEO_BURST;
+	else
+		dsi->mode_flags =
+			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+			MIPI_DSI_MODE_EOT_PACKET;
+
+	acx->supply = devm_regulator_get(dev, "vddi");
+	if (IS_ERR(acx->supply))
+		return PTR_ERR(acx->supply);
+
+	/* This asserts RESET by default */
+	acx->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+						  GPIOD_OUT_HIGH);
+	if (IS_ERR(acx->reset_gpio)) {
+		ret = PTR_ERR(acx->reset_gpio);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev, "failed to request GPIO (%d)\n",
+				      ret);
+		return ret;
+	}
+
+	drm_panel_init(&acx->panel, dev, &acx424akp_drm_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	acx->bl = devm_backlight_device_register(dev, "acx424akp", dev, acx,
+						 &acx424akp_bl_ops, NULL);
+	if (IS_ERR(acx->bl)) {
+		DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+		return PTR_ERR(acx->bl);
+	}
+	acx->bl->props.max_brightness = 1023;
+	acx->bl->props.brightness = 512;
+	acx->bl->props.power = FB_BLANK_POWERDOWN;
+
+	ret = drm_panel_add(&acx->panel);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0) {
+		drm_panel_remove(&acx->panel);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int acx424akp_remove(struct mipi_dsi_device *dsi)
+{
+	struct acx424akp *acx = mipi_dsi_get_drvdata(dsi);
+
+	mipi_dsi_detach(dsi);
+	drm_panel_remove(&acx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id acx424akp_of_match[] = {
+	{ .compatible = "sony,acx424akp" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, acx424akp_of_match);
+
+static struct mipi_dsi_driver acx424akp_driver = {
+	.probe = acx424akp_probe,
+	.remove = acx424akp_remove,
+	.driver = {
+		.name = "panel-sony-acx424akp",
+		.of_match_table = acx424akp_of_match,
+	},
+};
+module_mipi_dsi_driver(acx424akp_driver);
+
+MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("MIPI-DSI Sony acx424akp Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index d6387d8..5c4b6f6 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -521,12 +521,12 @@ static const struct drm_display_mode acx565akm_mode = {
 	.height_mm = 46,
 };
 
-static int acx565akm_get_modes(struct drm_panel *panel)
+static int acx565akm_get_modes(struct drm_panel *panel,
+			       struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &acx565akm_mode);
+	mode = drm_mode_duplicate(connector->dev, &acx565akm_mode);
 	if (!mode)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index c44d6a6..cf29405 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -17,7 +17,6 @@
  * H. Nikolaus Schaller <hns@goldelico.com>
  */
 
-#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/spi/spi.h>
@@ -83,7 +82,6 @@ struct td028ttec1_panel {
 	struct drm_panel panel;
 
 	struct spi_device *spi;
-	struct backlight_device *backlight;
 };
 
 #define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
@@ -243,8 +241,6 @@ static int td028ttec1_enable(struct drm_panel *panel)
 	if (ret)
 		return ret;
 
-	backlight_enable(lcd->backlight);
-
 	return 0;
 }
 
@@ -252,8 +248,6 @@ static int td028ttec1_disable(struct drm_panel *panel)
 {
 	struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
 
-	backlight_disable(lcd->backlight);
-
 	jbt_ret_write_0(lcd, JBT_REG_DISPLAY_OFF, NULL);
 
 	return 0;
@@ -287,12 +281,12 @@ static const struct drm_display_mode td028ttec1_mode = {
 	.height_mm = 58,
 };
 
-static int td028ttec1_get_modes(struct drm_panel *panel)
+static int td028ttec1_get_modes(struct drm_panel *panel,
+				struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &td028ttec1_mode);
+	mode = drm_mode_duplicate(connector->dev, &td028ttec1_mode);
 	if (!mode)
 		return -ENOMEM;
 
@@ -334,10 +328,6 @@ static int td028ttec1_probe(struct spi_device *spi)
 	spi_set_drvdata(spi, lcd);
 	lcd->spi = spi;
 
-	lcd->backlight = devm_of_find_backlight(&spi->dev);
-	if (IS_ERR(lcd->backlight))
-		return PTR_ERR(lcd->backlight);
-
 	spi->mode = SPI_MODE_3;
 	spi->bits_per_word = 9;
 
@@ -350,6 +340,10 @@ static int td028ttec1_probe(struct spi_device *spi)
 	drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
 		       DRM_MODE_CONNECTOR_DPI);
 
+	ret = drm_panel_of_backlight(&lcd->panel);
+	if (ret)
+		return ret;
+
 	return drm_panel_add(&lcd->panel);
 }
 
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index 621b65f..75f1f1f 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -346,12 +346,12 @@ static const struct drm_display_mode td043mtea1_mode = {
 	.height_mm = 56,
 };
 
-static int td043mtea1_get_modes(struct drm_panel *panel)
+static int td043mtea1_get_modes(struct drm_panel *panel,
+				struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct drm_display_mode *mode;
 
-	mode = drm_mode_duplicate(panel->drm, &td043mtea1_mode);
+	mode = drm_mode_duplicate(connector->dev, &td043mtea1_mode);
 	if (!mode)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 1a5418a..8472d01 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -14,13 +14,13 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_print.h>
 
-#include <linux/backlight.h>
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 
@@ -77,10 +77,6 @@ struct tpg110 {
 	 */
 	struct drm_panel panel;
 	/**
-	 * @backlight: backlight for this panel
-	 */
-	struct backlight_device *backlight;
-	/**
 	 * @panel_type: the panel mode as detected
 	 */
 	const struct tpg110_panel_mode *panel_mode;
@@ -356,8 +352,6 @@ static int tpg110_disable(struct drm_panel *panel)
 	val &= ~TPG110_CTRL2_PM;
 	tpg110_write_reg(tpg, TPG110_CTRL2_PM, val);
 
-	backlight_disable(tpg->backlight);
-
 	return 0;
 }
 
@@ -366,8 +360,6 @@ static int tpg110_enable(struct drm_panel *panel)
 	struct tpg110 *tpg = to_tpg110(panel);
 	u8 val;
 
-	backlight_enable(tpg->backlight);
-
 	/* Take chip out of standby */
 	val = tpg110_read_reg(tpg, TPG110_CTRL2_PM);
 	val |= TPG110_CTRL2_PM;
@@ -384,9 +376,9 @@ static int tpg110_enable(struct drm_panel *panel)
  * presents the mode that is configured for the system under use,
  * and which is detected by reading the registers of the display.
  */
-static int tpg110_get_modes(struct drm_panel *panel)
+static int tpg110_get_modes(struct drm_panel *panel,
+			    struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct tpg110 *tpg = to_tpg110(panel);
 	struct drm_display_mode *mode;
 
@@ -394,7 +386,7 @@ static int tpg110_get_modes(struct drm_panel *panel)
 	connector->display_info.height_mm = tpg->height;
 	connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
 
-	mode = drm_mode_duplicate(panel->drm, &tpg->panel_mode->mode);
+	mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
 	drm_mode_set_name(mode);
 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
 
@@ -432,11 +424,6 @@ static int tpg110_probe(struct spi_device *spi)
 	if (ret)
 		DRM_DEV_ERROR(dev, "no panel height specified\n");
 
-	/* Look for some optional backlight */
-	tpg->backlight = devm_of_find_backlight(dev);
-	if (IS_ERR(tpg->backlight))
-		return PTR_ERR(tpg->backlight);
-
 	/* This asserts the GRESTB signal, putting the display into reset */
 	tpg->grestb = devm_gpiod_get(dev, "grestb", GPIOD_OUT_HIGH);
 	if (IS_ERR(tpg->grestb)) {
@@ -459,6 +446,11 @@ static int tpg110_probe(struct spi_device *spi)
 
 	drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
 		       DRM_MODE_CONNECTOR_DPI);
+
+	ret = drm_panel_of_backlight(&tpg->panel);
+	if (ret)
+		return ret;
+
 	spi_set_drvdata(spi, tpg);
 
 	return drm_panel_add(&tpg->panel);
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 0feea24..012ca62 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -454,9 +454,9 @@ static int truly_nt35597_enable(struct drm_panel *panel)
 	return 0;
 }
 
-static int truly_nt35597_get_modes(struct drm_panel *panel)
+static int truly_nt35597_get_modes(struct drm_panel *panel,
+				   struct drm_connector *connector)
 {
-	struct drm_connector *connector = panel->connector;
 	struct truly_nt35597 *ctx = panel_to_ctx(panel);
 	struct drm_display_mode *mode;
 	const struct nt35597_config *config;
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
new file mode 100644
index 0000000..1645ace
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xinpeng xpp055c272 5.5" MIPI-DSI panel driver
+ * Copyright (C) 2019 Theobroma Systems Design und Consulting GmbH
+ *
+ * based on
+ *
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+/* Manufacturer specific Commands send via DSI */
+#define XPP055C272_CMD_ALL_PIXEL_OFF	0x22
+#define XPP055C272_CMD_ALL_PIXEL_ON	0x23
+#define XPP055C272_CMD_SETDISP		0xb2
+#define XPP055C272_CMD_SETRGBIF		0xb3
+#define XPP055C272_CMD_SETCYC		0xb4
+#define XPP055C272_CMD_SETBGP		0xb5
+#define XPP055C272_CMD_SETVCOM		0xb6
+#define XPP055C272_CMD_SETOTP		0xb7
+#define XPP055C272_CMD_SETPOWER_EXT	0xb8
+#define XPP055C272_CMD_SETEXTC		0xb9
+#define XPP055C272_CMD_SETMIPI		0xbA
+#define XPP055C272_CMD_SETVDC		0xbc
+#define XPP055C272_CMD_SETPCR		0xbf
+#define XPP055C272_CMD_SETSCR		0xc0
+#define XPP055C272_CMD_SETPOWER		0xc1
+#define XPP055C272_CMD_SETECO		0xc6
+#define XPP055C272_CMD_SETPANEL		0xcc
+#define XPP055C272_CMD_SETGAMMA		0xe0
+#define XPP055C272_CMD_SETEQ		0xe3
+#define XPP055C272_CMD_SETGIP1		0xe9
+#define XPP055C272_CMD_SETGIP2		0xea
+
+struct xpp055c272 {
+	struct device *dev;
+	struct drm_panel panel;
+	struct gpio_desc *reset_gpio;
+	struct regulator *vci;
+	struct regulator *iovcc;
+	bool prepared;
+};
+
+static inline struct xpp055c272 *panel_to_xpp055c272(struct drm_panel *panel)
+{
+	return container_of(panel, struct xpp055c272, panel);
+}
+
+#define dsi_generic_write_seq(dsi, cmd, seq...) do {			\
+		static const u8 d[] = { seq };				\
+		int ret;						\
+		ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d));	\
+		if (ret < 0)						\
+			return ret;					\
+	} while (0)
+
+static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	struct device *dev = ctx->dev;
+
+	/*
+	 * Init sequence was supplied by the panel vendor without much
+	 * documentation.
+	 */
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETMIPI,
+			      0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
+			      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
+			      0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
+			      0x00, 0x00, 0x37);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
+			      0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
+			      0x00, 0x00);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETSCR,
+			      0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+			      0x00);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEQ,
+			      0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+			      0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER,
+			      0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
+			      0x67, 0x77, 0x33, 0x33);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
+			      0xff, 0x01, 0xff);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
+	msleep(20);
+
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP1,
+			      0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
+			      0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
+			      0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
+			      0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
+			      0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
+			      0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
+			      0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+			      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP2,
+			      0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+			      0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
+			      0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
+			      0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
+			      0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
+			      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
+			      0xa0, 0x00, 0x00, 0x00, 0x00);
+	dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
+			      0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
+			      0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
+			      0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
+			      0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
+			      0x11, 0x18);
+
+	msleep(60);
+
+	DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+	return 0;
+}
+
+static int xpp055c272_unprepare(struct drm_panel *panel)
+{
+	struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (!ctx->prepared)
+		return 0;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+			      ret);
+
+	mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+			      ret);
+		return ret;
+	}
+
+	regulator_disable(ctx->iovcc);
+	regulator_disable(ctx->vci);
+
+	ctx->prepared = false;
+
+	return 0;
+}
+
+static int xpp055c272_prepare(struct drm_panel *panel)
+{
+	struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (ctx->prepared)
+		return 0;
+
+	DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+	ret = regulator_enable(ctx->vci);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			      "Failed to enable vci supply: %d\n", ret);
+		return ret;
+	}
+	ret = regulator_enable(ctx->iovcc);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			      "Failed to enable iovcc supply: %d\n", ret);
+		goto disable_vci;
+	}
+
+	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+	/* T6: 10us */
+	usleep_range(10, 20);
+	gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+	/* T8: 20ms */
+	msleep(20);
+
+	ret = xpp055c272_init_sequence(ctx);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+			      ret);
+		goto disable_iovcc;
+	}
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+		goto disable_iovcc;
+	}
+
+	/* T9: 120ms */
+	msleep(120);
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+		goto disable_iovcc;
+	}
+
+	msleep(50);
+
+	ctx->prepared = true;
+
+	return 0;
+
+disable_iovcc:
+	regulator_disable(ctx->iovcc);
+disable_vci:
+	regulator_disable(ctx->vci);
+	return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+	.hdisplay	= 720,
+	.hsync_start	= 720 + 40,
+	.hsync_end	= 720 + 40 + 10,
+	.htotal		= 720 + 40 + 10 + 40,
+	.vdisplay	= 1280,
+	.vsync_start	= 1280 + 22,
+	.vsync_end	= 1280 + 22 + 4,
+	.vtotal		= 1280 + 22 + 4 + 11,
+	.vrefresh	= 60,
+	.clock		= 64000,
+	.width_mm	= 68,
+	.height_mm	= 121,
+};
+
+static int xpp055c272_get_modes(struct drm_panel *panel,
+				struct drm_connector *connector)
+{
+	struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
+	if (!mode) {
+		DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+			      default_mode.hdisplay, default_mode.vdisplay,
+			      default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs xpp055c272_funcs = {
+	.unprepare	= xpp055c272_unprepare,
+	.prepare	= xpp055c272_prepare,
+	.get_modes	= xpp055c272_get_modes,
+};
+
+static int xpp055c272_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct xpp055c272 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ctx->reset_gpio)) {
+		DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+		return PTR_ERR(ctx->reset_gpio);
+	}
+
+	ctx->vci = devm_regulator_get(dev, "vci");
+	if (IS_ERR(ctx->vci)) {
+		ret = PTR_ERR(ctx->vci);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev,
+				      "Failed to request vci regulator: %d\n",
+				      ret);
+		return ret;
+	}
+
+	ctx->iovcc = devm_regulator_get(dev, "iovcc");
+	if (IS_ERR(ctx->iovcc)) {
+		ret = PTR_ERR(ctx->iovcc);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev,
+				      "Failed to request iovcc regulator: %d\n",
+				      ret);
+		return ret;
+	}
+
+	mipi_dsi_set_drvdata(dsi, ctx);
+
+	ctx->dev = dev;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+	drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
+
+	drm_panel_add(&ctx->panel);
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+		drm_panel_remove(&ctx->panel);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void xpp055c272_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = drm_panel_unprepare(&ctx->panel);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+			      ret);
+
+	ret = drm_panel_disable(&ctx->panel);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+			      ret);
+}
+
+static int xpp055c272_remove(struct mipi_dsi_device *dsi)
+{
+	struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	xpp055c272_shutdown(dsi);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+			      ret);
+
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id xpp055c272_of_match[] = {
+	{ .compatible = "xinpeng,xpp055c272" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, xpp055c272_of_match);
+
+static struct mipi_dsi_driver xpp055c272_driver = {
+	.driver = {
+		.name = "panel-xinpeng-xpp055c272",
+		.of_match_table = xpp055c272_of_match,
+	},
+	.probe	= xpp055c272_probe,
+	.remove = xpp055c272_remove,
+	.shutdown = xpp055c272_shutdown,
+};
+module_mipi_dsi_driver(xpp055c272_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Xinpeng xpp055c272 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 536ba93..4139870 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright 2019 Collabora ltd. */
 #include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
 #include <linux/clk.h>
@@ -74,8 +75,11 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
 	int ret;
 	struct dev_pm_opp *opp;
 	unsigned long cur_freq;
+	struct device *dev = &pfdev->pdev->dev;
+	struct devfreq *devfreq;
+	struct thermal_cooling_device *cooling;
 
-	ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
+	ret = dev_pm_opp_of_add_table(dev);
 	if (ret == -ENODEV) /* Optional, continue without devfreq */
 		return 0;
 	else if (ret)
@@ -85,29 +89,35 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
 
 	cur_freq = clk_get_rate(pfdev->clock);
 
-	opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0);
+	opp = devfreq_recommended_opp(dev, &cur_freq, 0);
 	if (IS_ERR(opp))
 		return PTR_ERR(opp);
 
 	panfrost_devfreq_profile.initial_freq = cur_freq;
 	dev_pm_opp_put(opp);
 
-	pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
-			&panfrost_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
-			NULL);
-	if (IS_ERR(pfdev->devfreq.devfreq)) {
-		DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
-		ret = PTR_ERR(pfdev->devfreq.devfreq);
-		pfdev->devfreq.devfreq = NULL;
-		dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
-		return ret;
+	devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
+					  DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+	if (IS_ERR(devfreq)) {
+		DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
+		dev_pm_opp_of_remove_table(dev);
+		return PTR_ERR(devfreq);
 	}
+	pfdev->devfreq.devfreq = devfreq;
+
+	cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
+	if (IS_ERR(cooling))
+		DRM_DEV_INFO(dev, "Failed to register cooling device\n");
+	else
+		pfdev->devfreq.cooling = cooling;
 
 	return 0;
 }
 
 void panfrost_devfreq_fini(struct panfrost_device *pfdev)
 {
+	if (pfdev->devfreq.cooling)
+		devfreq_cooling_unregister(pfdev->devfreq.cooling);
 	dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
 }
 
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 88b431a..6da59f4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -526,15 +526,11 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
 	kfree(panfrost_priv);
 }
 
-/* DRM_AUTH is required on SUBMIT for now, while all clients share a single
- * address space.  Note that render nodes would be able to submit jobs that
- * could access BOs from clients authenticated with the master node.
- */
 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
 #define PANFROST_IOCTL(n, func, flags) \
 	DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
 
-	PANFROST_IOCTL(SUBMIT,		submit,		DRM_RENDER_ALLOW | DRM_AUTH),
+	PANFROST_IOCTL(SUBMIT,		submit,		DRM_RENDER_ALLOW),
 	PANFROST_IOCTL(WAIT_BO,		wait_bo,	DRM_RENDER_ALLOW),
 	PANFROST_IOCTL(CREATE_BO,	create_bo,	DRM_RENDER_ALLOW),
 	PANFROST_IOCTL(MMAP_BO,		mmap_bo,	DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index e364ee0..7c36ec6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -553,12 +553,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
 {
 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
 	struct panfrost_job_slot *js = pfdev->js;
-	struct drm_sched_rq *rq;
+	struct drm_gpu_scheduler *sched;
 	int ret, i;
 
 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
-		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+		sched = &js->queue[i].sched;
+		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
+					    DRM_SCHED_PRIORITY_NORMAL, &sched,
+					    1, NULL);
 		if (WARN_ON(ret))
 			return ret;
 	}
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 63dfcda..aa8aa8d 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -166,7 +166,7 @@ static int pl111_modeset_init(struct drm_device *dev)
 	priv->bridge = bridge;
 	if (panel) {
 		priv->panel = panel;
-		priv->connector = panel->connector;
+		priv->connector = drm_panel_bridge_connector(bridge);
 	}
 
 	ret = pl111_display_init(dev);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 611cbe7..bfc1631 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -184,7 +184,7 @@ int qxl_device_init(struct qxl_device *qdev,
 
 	if (!qxl_check_device(qdev)) {
 		r = -ENODEV;
-		goto surface_mapping_free;
+		goto rom_unmap;
 	}
 
 	r = qxl_bo_init(qdev);
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
index ae8a1860..c07a069 100644
--- a/drivers/gpu/drm/r128/Makefile
+++ b/drivers/gpu/drm/r128/Makefile
@@ -3,7 +3,7 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-r128-y   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
+r128-y   := r128_drv.o r128_cce.o r128_state.o r128_irq.o ati_pcigart.o
 
 r128-$(CONFIG_COMPAT)   += r128_ioc32.o
 
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
new file mode 100644
index 0000000..9b4072f
--- /dev/null
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -0,0 +1,209 @@
+/**
+ * \file ati_pcigart.c
+ * ATI PCI GART support
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/export.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_print.h>
+
+#include "ati_pcigart.h"
+
+# define ATI_PCIGART_PAGE_SIZE		4096	/**< PCI GART page size */
+
+static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+				       struct drm_ati_pcigart_info *gart_info)
+{
+	gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
+						PAGE_SIZE);
+	if (gart_info->table_handle == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void drm_ati_free_pcigart_table(struct drm_device *dev,
+				       struct drm_ati_pcigart_info *gart_info)
+{
+	drm_pci_free(dev, gart_info->table_handle);
+	gart_info->table_handle = NULL;
+}
+
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_sg_mem *entry = dev->sg;
+	unsigned long pages;
+	int i;
+	int max_pages;
+
+	/* we need to support large memory configurations */
+	if (!entry) {
+		DRM_ERROR("no scatter/gather memory!\n");
+		return 0;
+	}
+
+	if (gart_info->bus_addr) {
+
+		max_pages = (gart_info->table_size / sizeof(u32));
+		pages = (entry->pages <= max_pages)
+		  ? entry->pages : max_pages;
+
+		for (i = 0; i < pages; i++) {
+			if (!entry->busaddr[i])
+				break;
+			pci_unmap_page(dev->pdev, entry->busaddr[i],
+					 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+
+		if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+			gart_info->bus_addr = 0;
+	}
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
+	    gart_info->table_handle) {
+		drm_ati_free_pcigart_table(dev, gart_info);
+	}
+
+	return 1;
+}
+
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_local_map *map = &gart_info->mapping;
+	struct drm_sg_mem *entry = dev->sg;
+	void *address = NULL;
+	unsigned long pages;
+	u32 *pci_gart = NULL, page_base, gart_idx;
+	dma_addr_t bus_address = 0;
+	int i, j, ret = -ENOMEM;
+	int max_ati_pages, max_real_pages;
+
+	if (!entry) {
+		DRM_ERROR("no scatter/gather memory!\n");
+		goto done;
+	}
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+		DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+
+		if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+			DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+				  (unsigned long long)gart_info->table_mask);
+			ret = -EFAULT;
+			goto done;
+		}
+
+		ret = drm_ati_alloc_pcigart_table(dev, gart_info);
+		if (ret) {
+			DRM_ERROR("cannot allocate PCI GART page!\n");
+			goto done;
+		}
+
+		pci_gart = gart_info->table_handle->vaddr;
+		address = gart_info->table_handle->vaddr;
+		bus_address = gart_info->table_handle->busaddr;
+	} else {
+		address = gart_info->addr;
+		bus_address = gart_info->bus_addr;
+		DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
+			  (unsigned long long)bus_address,
+			  (unsigned long)address);
+	}
+
+
+	max_ati_pages = (gart_info->table_size / sizeof(u32));
+	max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+	pages = (entry->pages <= max_real_pages)
+	    ? entry->pages : max_real_pages;
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+		memset(pci_gart, 0, max_ati_pages * sizeof(u32));
+	} else {
+		memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
+	}
+
+	gart_idx = 0;
+	for (i = 0; i < pages; i++) {
+		/* we need to support large memory configurations */
+		entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
+						 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
+			DRM_ERROR("unable to map PCIGART pages!\n");
+			drm_ati_pcigart_cleanup(dev, gart_info);
+			address = NULL;
+			bus_address = 0;
+			ret = -ENOMEM;
+			goto done;
+		}
+		page_base = (u32) entry->busaddr[i];
+
+		for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+			u32 offset;
+			u32 val;
+
+			switch(gart_info->gart_reg_if) {
+			case DRM_ATI_GART_IGP:
+				val = page_base | 0xc;
+				break;
+			case DRM_ATI_GART_PCIE:
+				val = (page_base >> 8) | 0xc;
+				break;
+			default:
+			case DRM_ATI_GART_PCI:
+				val = page_base;
+				break;
+			}
+			if (gart_info->gart_table_location ==
+			    DRM_ATI_GART_MAIN) {
+				pci_gart[gart_idx] = cpu_to_le32(val);
+			} else {
+				offset = gart_idx * sizeof(u32);
+				writel(val, (void __iomem *)map->handle + offset);
+			}
+			gart_idx++;
+			page_base += ATI_PCIGART_PAGE_SIZE;
+		}
+	}
+	ret = 0;
+
+#if defined(__i386__) || defined(__x86_64__)
+	wbinvd();
+#else
+	mb();
+#endif
+
+      done:
+	gart_info->addr = address;
+	gart_info->bus_addr = bus_address;
+	return ret;
+}
diff --git a/include/drm/ati_pcigart.h b/drivers/gpu/drm/r128/ati_pcigart.h
similarity index 100%
rename from include/drm/ati_pcigart.h
rename to drivers/gpu/drm/r128/ati_pcigart.h
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index fd74f74..b7a5f16 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -30,10 +30,10 @@
  */
 
 #include <linux/module.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 #include <drm/drm_vblank.h>
 #include <drm/r128_drm.h>
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index ba8c30e..8b25612 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -39,11 +39,12 @@
 #include <linux/io.h>
 #include <linux/irqreturn.h>
 
-#include <drm/ati_pcigart.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_legacy.h>
 #include <drm/r128_drm.h>
 
+#include "ati_pcigart.h"
+
 /* General customization:
  */
 #define DRIVER_AUTHOR		"Gareth Hughes, VA Linux Systems Inc."
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 364b895..1bf06c9 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -25,6 +25,7 @@
 #ifndef ATOM_H
 #define ATOM_H
 
+#include <linux/mutex.h>
 #include <linux/types.h>
 
 #define ATOM_BIOS_MAGIC		0xAA55
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index da2c9e2..be58369 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -244,9 +244,8 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
-	if (ASIC_IS_DCE8(rdev)) {
+	if (ASIC_IS_DCE8(rdev))
 		WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
-	}
 }
 
 static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6f38375..15b00a3 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -412,7 +412,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-	struct radeon_connector_atom_dig *dig_connector;
 	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
 	u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
 	u8 tmp;
@@ -423,8 +422,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
 	if (!radeon_connector->con_priv)
 		return panel_mode;
 
-	dig_connector = radeon_connector->con_priv;
-
 	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
 		/* DP bridge chips */
 		if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
@@ -816,9 +813,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
 	dp_info.use_dpencoder = true;
 	index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
 	if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
-		if (crev > 1) {
+		if (crev > 1)
 			dp_info.use_dpencoder = false;
-		}
 	}
 
 	dp_info.enc_id = 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index cc8f32a..cc5ee1b 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -26,10 +26,10 @@
 
 #include <linux/backlight.h>
 #include <linux/dmi.h>
+#include <linux/pci.h>
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "atom.h"
@@ -1885,11 +1885,10 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
 			if (ASIC_IS_AVIVO(rdev))
 				args.v1.ucCRTC = radeon_crtc->crtc_id;
 			else {
-				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1)
 					args.v1.ucCRTC = radeon_crtc->crtc_id;
-				} else {
+				else
 					args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
-				}
 			}
 			switch (radeon_encoder->encoder_id) {
 			case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
@@ -2234,9 +2233,9 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
 		DRM_ERROR("Got encoder index incorrect - returning 0\n");
 		return 0;
 	}
-	if (rdev->mode_info.active_encoders & (1 << enc_idx)) {
+	if (rdev->mode_info.active_encoders & (1 << enc_idx))
 		DRM_ERROR("chosen encoder in use %d\n", enc_idx);
-	}
+
 	rdev->mode_info.active_encoders |= (1 << enc_idx);
 	return enc_idx;
 }
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index a570ce4..ab4d210 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -68,11 +68,6 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
 			memcpy(&out, &buf[1], num);
 		args.lpI2CDataOut = cpu_to_le16(out);
 	} else {
-		if (num > ATOM_MAX_HW_I2C_READ) {
-			DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
-			r = -EINVAL;
-			goto done;
-		}
 		args.ucRegIndex = 0;
 		args.lpI2CDataOut = 0;
 	}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index ce37de0..d1d8aaf 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -22,10 +22,9 @@
  * Authors: Alex Deucher
  */
 
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 
-#include <drm/drm_pci.h>
-
 #include "atom.h"
 #include "btc_dpm.h"
 #include "btcd.h"
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index c6fd123..a9257be 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -22,10 +22,9 @@
  */
 
 #include <linux/firmware.h>
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 
-#include <drm/drm_pci.h>
-
 #include "atom.h"
 #include "ci_dpm.h"
 #include "cikd.h"
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 40a7e70..5c42877 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -23,10 +23,10 @@
  */
 
 #include <linux/firmware.h>
-#include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
 
-#include <drm/drm_pci.h>
 #include <drm/drm_vblank.h>
 
 #include "atom.h"
@@ -8137,7 +8137,7 @@ static void cik_uvd_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable uvd here.
 		 */
-		rdev->has_uvd = 0;
+		rdev->has_uvd = false;
 		return;
 	}
 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -8209,7 +8209,7 @@ static void cik_vce_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable vce here.
 		 */
-		rdev->has_vce = 0;
+		rdev->has_vce = false;
 		return;
 	}
 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 35b9dc6..68403e7 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -333,7 +333,7 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
 	u32 me_cntl, reg_offset;
 	int i;
 
-	if (enable == false) {
+	if (!enable) {
 		cik_sdma_gfx_stop(rdev);
 		cik_sdma_rlc_stop(rdev);
 	}
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 32ed60f..35b177d 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -22,7 +22,7 @@
  * Authors: Alex Deucher
  */
 
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
 
 #include "atom.h"
 #include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d978a3..14d90dc 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -23,9 +23,9 @@
  */
 
 #include <linux/firmware.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 
-#include <drm/drm_pci.h>
 #include <drm/drm_vblank.h>
 #include <drm/radeon_drm.h>
 
@@ -4945,7 +4945,7 @@ static void evergreen_uvd_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable uvd here.
 		 */
-		rdev->has_uvd = 0;
+		rdev->has_uvd = false;
 		return;
 	}
 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 0d8d30b..5e6086e 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -21,10 +21,9 @@
  *
  */
 
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 
-#include <drm/drm_pci.h>
-
 #include "cikd.h"
 #include "kv_dpm.h"
 #include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 410f626..02feb08 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -23,10 +23,10 @@
  */
 
 #include <linux/firmware.h>
-#include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
 
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "atom.h"
@@ -2017,7 +2017,7 @@ static void cayman_uvd_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable uvd here.
 		 */
-		rdev->has_uvd = 0;
+		rdev->has_uvd = false;
 		return;
 	}
 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -2085,7 +2085,7 @@ static void cayman_vce_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable vce here.
 		 */
-		rdev->has_vce = 0;
+		rdev->has_vce = false;
 		return;
 	}
 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index d9e62ca..b57c37d 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -22,10 +22,9 @@
  */
 
 #include <linux/math64.h>
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 
-#include <drm/drm_pci.h>
-
 #include "atom.h"
 #include "ni_dpm.h"
 #include "nid.h"
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 110fb38..24c8db6 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -26,16 +26,16 @@
  *          Jerome Glisse
  */
 
-#include <linux/seq_file.h>
-#include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
 
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
 #include <drm/drm_fourcc.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_vblank.h>
 #include <drm/radeon_drm.h>
 
@@ -1823,9 +1823,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 	case RADEON_PP_TXFORMAT_2:
 		i = (reg - RADEON_PP_TXFORMAT_0) / 24;
 		if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
-			track->textures[i].use_pitch = 1;
+			track->textures[i].use_pitch = true;
 		} else {
-			track->textures[i].use_pitch = 0;
+			track->textures[i].use_pitch = false;
 			track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
 			track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
 		}
@@ -2387,12 +2387,12 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
 		else
 			track->num_texture = 6;
 		track->maxy = 2048;
-		track->separate_cube = 1;
+		track->separate_cube = true;
 	} else {
 		track->num_cb = 4;
 		track->num_texture = 16;
 		track->maxy = 4096;
-		track->separate_cube = 0;
+		track->separate_cube = false;
 		track->aaresolve = false;
 		track->aa.robj = NULL;
 	}
@@ -2815,7 +2815,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
 	uint32_t temp;
 
 	temp = RREG32(RADEON_CONFIG_CNTL);
-	if (state == false) {
+	if (!state) {
 		temp &= ~RADEON_CFG_VGA_RAM_EN;
 		temp |= RADEON_CFG_VGA_IO_DIS;
 	} else {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 44856e3..3b7ead5 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,6 +26,7 @@
  *          Jerome Glisse
  */
 
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 
@@ -34,7 +35,6 @@
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "r100_track.h"
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 83282ee..1d4c04e 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -26,13 +26,13 @@
  *          Jerome Glisse
  */
 
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 
 #include "atom.h"
 #include "r100d.h"
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 033bc46..d9a33ca 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -26,14 +26,14 @@
  *          Jerome Glisse
  */
 
-#include <linux/slab.h>
-#include <linux/seq_file.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
 
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_vblank.h>
 #include <drm/radeon_drm.h>
 
@@ -3053,7 +3053,7 @@ static void r600_uvd_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable uvd here.
 		 */
-		rdev->has_uvd = 0;
+		rdev->has_uvd = false;
 		return;
 	}
 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -3191,7 +3191,7 @@ void r600_vga_set_state(struct radeon_device *rdev, bool state)
 	uint32_t temp;
 
 	temp = RREG32(CONFIG_CNTL);
-	if (state == false) {
+	if (!state) {
 		temp &= ~(1<<0);
 		temp |= (1<<1);
 	} else {
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d6c28a5..49e8266 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -350,7 +350,7 @@ static void r600_cs_track_init(struct r600_cs_track *track)
 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 {
 	struct r600_cs_track *track = p->track;
-	u32 slice_tile_max, size, tmp;
+	u32 slice_tile_max, tmp;
 	u32 height, height_align, pitch, pitch_align, depth_align;
 	u64 base_offset, base_align;
 	struct array_mode_checker array_check;
@@ -360,7 +360,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 	/* When resolve is used, the second colorbuffer has always 1 sample. */
 	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
 
-	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
 	format = G_0280A0_FORMAT(track->cb_color_info[i]);
 	if (!r600_fmt_is_valid_color(format)) {
 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
@@ -517,7 +516,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
 {
 	struct r600_cs_track *track = p->track;
-	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+	u32 nviews, bpe, ntiles, slice_tile_max, tmp;
 	u32 height_align, pitch_align, depth_align;
 	u32 pitch = 8192;
 	u32 height = 8192;
@@ -564,7 +563,6 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
 		}
 		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
 	} else {
-		size = radeon_bo_size(track->db_bo);
 		/* pitch in pixels */
 		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
 		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
@@ -2342,7 +2340,6 @@ int r600_cs_parse(struct radeon_cs_parser *p)
 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
 			   struct radeon_bo_list **cs_reloc)
 {
-	struct radeon_cs_chunk *relocs_chunk;
 	unsigned idx;
 
 	*cs_reloc = NULL;
@@ -2350,7 +2347,6 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
 		DRM_ERROR("No relocation chunk !\n");
 		return -EINVAL;
 	}
-	relocs_chunk = p->chunk_relocs;
 	idx = p->dma_reloc_idx;
 	if (idx >= p->nrelocs) {
 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 4de16f3..0aca7bd 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -25,9 +25,10 @@
  *    Jerome Glisse <glisse@freedesktop.org>
  */
 
+#include <linux/pci.h>
+
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index dc3c222..495700d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -27,10 +27,10 @@
  */
 
 #include <linux/console.h>
+#include <linux/pci.h>
 #include <linux/vgaarb.h>
 
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 226a7bf..848ef68 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -24,8 +24,9 @@
  *          Alex Deucher
  */
 
+#include <linux/pci.h>
+
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
@@ -569,7 +570,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 		path_size += le16_to_cpu(path->usSize);
 
 		if (device_support & le16_to_cpu(path->usDeviceTag)) {
-			uint8_t con_obj_id, con_obj_num, con_obj_type;
+			uint8_t con_obj_id, con_obj_num;
 
 			con_obj_id =
 			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
@@ -577,9 +578,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 			con_obj_num =
 			    (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
 			    >> ENUM_ID_SHIFT;
-			con_obj_type =
-			    (le16_to_cpu(path->usConnObjectId) &
-			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
 			/* TODO CV support */
 			if (le16_to_cpu(path->usDeviceTag) ==
@@ -647,15 +645,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 			router.ddc_valid = false;
 			router.cd_valid = false;
 			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
-				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
-				grph_obj_id =
-				    (le16_to_cpu(path->usGraphicObjIds[j]) &
-				     OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
-				grph_obj_num =
-				    (le16_to_cpu(path->usGraphicObjIds[j]) &
-				     ENUM_ID_MASK) >> ENUM_ID_SHIFT;
-				grph_obj_type =
+				uint8_t grph_obj_type =
 				    (le16_to_cpu(path->usGraphicObjIds[j]) &
 				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 72db2b4..8c63ccb 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -288,7 +288,7 @@ static void radeon_audio_interface_init(struct radeon_device *rdev)
 	} else {
 		rdev->audio.funcs = &r600_funcs;
 		rdev->audio.hdmi_funcs = &r600_hdmi_funcs;
-		rdev->audio.dp_funcs = 0;
+		rdev->audio.dp_funcs = NULL;
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 4d1490f..c42f73f 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -26,11 +26,11 @@
  *          Jerome Glisse
  */
 
-#include <linux/slab.h>
 #include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
 
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 
 #include "atom.h"
 #include "radeon.h"
@@ -664,17 +664,17 @@ bool radeon_get_bios(struct radeon_device *rdev)
 	uint16_t tmp;
 
 	r = radeon_atrm_get_bios(rdev);
-	if (r == false)
+	if (!r)
 		r = radeon_acpi_vfct_bios(rdev);
-	if (r == false)
+	if (!r)
 		r = igp_read_bios_from_vram(rdev);
-	if (r == false)
+	if (!r)
 		r = radeon_read_bios(rdev);
-	if (r == false)
+	if (!r)
 		r = radeon_read_disabled_bios(rdev);
-	if (r == false)
+	if (!r)
 		r = radeon_read_platform_bios(rdev);
-	if (r == false || rdev->bios == NULL) {
+	if (!r || rdev->bios == NULL) {
 		DRM_ERROR("Unable to locate a BIOS ROM\n");
 		rdev->bios = NULL;
 		return false;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 9057b32..c594ca6 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -26,8 +26,9 @@
  *          Jerome Glisse
  */
 
+#include <linux/pci.h>
+
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c18ae15..c3e49c9 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -25,8 +25,9 @@
  *          Alex Deucher
  */
 
+#include <linux/pci.h>
+
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
@@ -2638,7 +2639,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
 {
 	struct drm_device *dev = rdev->ddev;
 	u16 offset, misc, misc2 = 0;
-	u8 rev, blocks, tmp;
+	u8 rev, tmp;
 	int state_index = 0;
 	struct radeon_i2c_bus_rec i2c_bus;
 
@@ -2731,7 +2732,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
 		offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
 		if (offset) {
 			rev = RBIOS8(offset);
-			blocks = RBIOS8(offset + 0x2);
 			/* power mode 0 tends to be the only valid one */
 			rdev->pm.power_state[state_index].num_clock_modes = 1;
 			rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index c07427d..fe12d9d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -440,7 +440,7 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
 				if (radeon_conflict->use_digital)
 					continue;
 
-				if (priority == true) {
+				if (priority) {
 					DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
 						      conflict->name);
 					DRM_DEBUG_KMS("in favor of %s\n",
@@ -700,9 +700,9 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
 			else
 				ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds);
 		}
-		if (val == 1 || ret == false) {
+		if (val == 1 || !ret)
 			radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds);
-		}
+
 		radeon_property_change_mode(&radeon_encoder->base);
 	}
 
@@ -1861,6 +1861,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 	struct radeon_connector_atom_dig *radeon_dig_connector;
 	struct drm_encoder *encoder;
 	struct radeon_encoder *radeon_encoder;
+	struct i2c_adapter *ddc = NULL;
 	uint32_t subpixel_order = SubPixelNone;
 	bool shared_ddc = false;
 	bool is_dp_bridge = false;
@@ -1938,17 +1939,21 @@ radeon_add_atom_connector(struct drm_device *dev,
 		radeon_connector->con_priv = radeon_dig_connector;
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
-			if (radeon_connector->ddc_bus)
+			if (radeon_connector->ddc_bus) {
 				has_aux = true;
-			else
+				ddc = &radeon_connector->ddc_bus->adapter;
+			} else {
 				DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
 		}
 		switch (connector_type) {
 		case DRM_MODE_CONNECTOR_VGA:
 		case DRM_MODE_CONNECTOR_DVIA:
 		default:
-			drm_connector_init(dev, &radeon_connector->base,
-					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_dp_connector_funcs,
+						    connector_type,
+						    ddc);
 			drm_connector_helper_add(&radeon_connector->base,
 						 &radeon_dp_connector_helper_funcs);
 			connector->interlace_allowed = true;
@@ -1970,8 +1975,10 @@ radeon_add_atom_connector(struct drm_device *dev,
 		case DRM_MODE_CONNECTOR_HDMIA:
 		case DRM_MODE_CONNECTOR_HDMIB:
 		case DRM_MODE_CONNECTOR_DisplayPort:
-			drm_connector_init(dev, &radeon_connector->base,
-					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_dp_connector_funcs,
+						    connector_type,
+						    ddc);
 			drm_connector_helper_add(&radeon_connector->base,
 						 &radeon_dp_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
@@ -2018,8 +2025,10 @@ radeon_add_atom_connector(struct drm_device *dev,
 			break;
 		case DRM_MODE_CONNECTOR_LVDS:
 		case DRM_MODE_CONNECTOR_eDP:
-			drm_connector_init(dev, &radeon_connector->base,
-					   &radeon_lvds_bridge_connector_funcs, connector_type);
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_lvds_bridge_connector_funcs,
+						    connector_type,
+						    ddc);
 			drm_connector_helper_add(&radeon_connector->base,
 						 &radeon_dp_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
@@ -2033,13 +2042,18 @@ radeon_add_atom_connector(struct drm_device *dev,
 	} else {
 		switch (connector_type) {
 		case DRM_MODE_CONNECTOR_VGA:
-			drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 				if (!radeon_connector->ddc_bus)
 					DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+				else
+					ddc = &radeon_connector->ddc_bus->adapter;
 			}
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_vga_connector_funcs,
+						    connector_type,
+						    ddc);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 			radeon_connector->dac_load_detect = true;
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.load_detect_property,
@@ -2058,13 +2072,18 @@ radeon_add_atom_connector(struct drm_device *dev,
 			connector->doublescan_allowed = true;
 			break;
 		case DRM_MODE_CONNECTOR_DVIA:
-			drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 				if (!radeon_connector->ddc_bus)
 					DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+				else
+					ddc = &radeon_connector->ddc_bus->adapter;
 			}
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_vga_connector_funcs,
+						    connector_type,
+						    ddc);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 			radeon_connector->dac_load_detect = true;
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.load_detect_property,
@@ -2089,13 +2108,18 @@ radeon_add_atom_connector(struct drm_device *dev,
 				goto failed;
 			radeon_dig_connector->igp_lane_info = igp_lane_info;
 			radeon_connector->con_priv = radeon_dig_connector;
-			drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 				if (!radeon_connector->ddc_bus)
 					DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+				else
+					ddc = &radeon_connector->ddc_bus->adapter;
 			}
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_dvi_connector_funcs,
+						    connector_type,
+						    ddc);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 			subpixel_order = SubPixelHorizontalRGB;
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.coherent_mode_property,
@@ -2146,13 +2170,18 @@ radeon_add_atom_connector(struct drm_device *dev,
 				goto failed;
 			radeon_dig_connector->igp_lane_info = igp_lane_info;
 			radeon_connector->con_priv = radeon_dig_connector;
-			drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 				if (!radeon_connector->ddc_bus)
 					DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+				else
+					ddc = &radeon_connector->ddc_bus->adapter;
 			}
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_dvi_connector_funcs,
+						    connector_type,
+						    ddc);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.coherent_mode_property,
 						      1);
@@ -2196,15 +2225,20 @@ radeon_add_atom_connector(struct drm_device *dev,
 				goto failed;
 			radeon_dig_connector->igp_lane_info = igp_lane_info;
 			radeon_connector->con_priv = radeon_dig_connector;
-			drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
-			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
-				if (radeon_connector->ddc_bus)
+				if (radeon_connector->ddc_bus) {
 					has_aux = true;
-				else
+					ddc = &radeon_connector->ddc_bus->adapter;
+				} else {
 					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+				}
 			}
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_dp_connector_funcs,
+						    connector_type,
+						    ddc);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 			subpixel_order = SubPixelHorizontalRGB;
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.coherent_mode_property,
@@ -2246,15 +2280,20 @@ radeon_add_atom_connector(struct drm_device *dev,
 				goto failed;
 			radeon_dig_connector->igp_lane_info = igp_lane_info;
 			radeon_connector->con_priv = radeon_dig_connector;
-			drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
-			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
-				if (radeon_connector->ddc_bus)
+				if (radeon_connector->ddc_bus) {
 					has_aux = true;
-				else
+					ddc = &radeon_connector->ddc_bus->adapter;
+				} else {
 					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+				}
 			}
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_edp_connector_funcs,
+						    connector_type,
+						    ddc);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
 						      dev->mode_config.scaling_mode_property,
 						      DRM_MODE_SCALE_FULLSCREEN);
@@ -2265,7 +2304,10 @@ radeon_add_atom_connector(struct drm_device *dev,
 		case DRM_MODE_CONNECTOR_SVIDEO:
 		case DRM_MODE_CONNECTOR_Composite:
 		case DRM_MODE_CONNECTOR_9PinDIN:
-			drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_tv_connector_funcs,
+						    connector_type,
+						    ddc);
 			drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
 			radeon_connector->dac_load_detect = true;
 			drm_object_attach_property(&radeon_connector->base.base,
@@ -2285,13 +2327,18 @@ radeon_add_atom_connector(struct drm_device *dev,
 				goto failed;
 			radeon_dig_connector->igp_lane_info = igp_lane_info;
 			radeon_connector->con_priv = radeon_dig_connector;
-			drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-			drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 				if (!radeon_connector->ddc_bus)
 					DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+				else
+					ddc = &radeon_connector->ddc_bus->adapter;
 			}
+			drm_connector_init_with_ddc(dev, &radeon_connector->base,
+						    &radeon_lvds_connector_funcs,
+						    connector_type,
+						    ddc);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
 						      dev->mode_config.scaling_mode_property,
 						      DRM_MODE_SCALE_FULLSCREEN);
@@ -2335,6 +2382,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
+	struct i2c_adapter *ddc = NULL;
 	uint32_t subpixel_order = SubPixelNone;
 
 	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -2369,13 +2417,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
 
 	switch (connector_type) {
 	case DRM_MODE_CONNECTOR_VGA:
-		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 			if (!radeon_connector->ddc_bus)
 				DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			else
+				ddc = &radeon_connector->ddc_bus->adapter;
 		}
+		drm_connector_init_with_ddc(dev, &radeon_connector->base,
+					    &radeon_vga_connector_funcs,
+					    connector_type,
+					    ddc);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 		radeon_connector->dac_load_detect = true;
 		drm_object_attach_property(&radeon_connector->base.base,
 					      rdev->mode_info.load_detect_property,
@@ -2386,13 +2439,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
 		connector->doublescan_allowed = true;
 		break;
 	case DRM_MODE_CONNECTOR_DVIA:
-		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 			if (!radeon_connector->ddc_bus)
 				DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			else
+				ddc = &radeon_connector->ddc_bus->adapter;
 		}
+		drm_connector_init_with_ddc(dev, &radeon_connector->base,
+					    &radeon_vga_connector_funcs,
+					    connector_type,
+					    ddc);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
 		radeon_connector->dac_load_detect = true;
 		drm_object_attach_property(&radeon_connector->base.base,
 					      rdev->mode_info.load_detect_property,
@@ -2404,13 +2462,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
 		break;
 	case DRM_MODE_CONNECTOR_DVII:
 	case DRM_MODE_CONNECTOR_DVID:
-		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-		drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 			if (!radeon_connector->ddc_bus)
 				DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			else
+				ddc = &radeon_connector->ddc_bus->adapter;
 		}
+		drm_connector_init_with_ddc(dev, &radeon_connector->base,
+					    &radeon_dvi_connector_funcs,
+					    connector_type,
+					    ddc);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
 		if (connector_type == DRM_MODE_CONNECTOR_DVII) {
 			radeon_connector->dac_load_detect = true;
 			drm_object_attach_property(&radeon_connector->base.base,
@@ -2427,7 +2490,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
 	case DRM_MODE_CONNECTOR_SVIDEO:
 	case DRM_MODE_CONNECTOR_Composite:
 	case DRM_MODE_CONNECTOR_9PinDIN:
-		drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+		drm_connector_init_with_ddc(dev, &radeon_connector->base,
+					    &radeon_tv_connector_funcs,
+					    connector_type,
+					    ddc);
 		drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
 		radeon_connector->dac_load_detect = true;
 		/* RS400,RC410,RS480 chipset seems to report a lot
@@ -2449,13 +2515,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
 		connector->doublescan_allowed = false;
 		break;
 	case DRM_MODE_CONNECTOR_LVDS:
-		drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-		drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
 			if (!radeon_connector->ddc_bus)
 				DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			else
+				ddc = &radeon_connector->ddc_bus->adapter;
 		}
+		drm_connector_init_with_ddc(dev, &radeon_connector->base,
+					    &radeon_lvds_connector_funcs,
+					    connector_type,
+					    ddc);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
 		drm_object_attach_property(&radeon_connector->base.base,
 					      dev->mode_config.scaling_mode_property,
 					      DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7b54606..0d0ab8e0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -26,11 +26,11 @@
  */
 
 #include <linux/list_sort.h>
+#include <linux/pci.h>
 #include <linux/uaccess.h>
 
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5d017f0..a522e09 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -28,6 +28,7 @@
 
 #include <linux/console.h>
 #include <linux/efi.h>
+#include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/vga_switcheroo.h>
@@ -38,7 +39,6 @@
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/radeon_drm.h>
 
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index e81b01f..d07c7db 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -24,6 +24,7 @@
  *          Alex Deucher
  */
 
+#include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <linux/gcd.h>
 
@@ -36,7 +37,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
@@ -127,6 +127,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
 
 	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
 
+	msleep(10);
+
 	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
 	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
 		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
@@ -672,7 +674,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_crtc *radeon_crtc;
-	int i;
 
 	radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
 	if (radeon_crtc == NULL)
@@ -701,12 +702,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
 	radeon_crtc->mode_set.num_connectors = 0;
 #endif
 
-	for (i = 0; i < 256; i++) {
-		radeon_crtc->lut_r[i] = i << 2;
-		radeon_crtc->lut_g[i] = i << 2;
-		radeon_crtc->lut_b[i] = i << 2;
-	}
-
 	if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
 		radeon_atombios_init_crtc(dev, radeon_crtc);
 	else
@@ -847,11 +842,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
 	if (rdev->bios) {
 		if (rdev->is_atom_bios) {
 			ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
-			if (ret == false)
+			if (!ret)
 				ret = radeon_get_atom_connector_info_from_object_table(dev);
 		} else {
 			ret = radeon_get_legacy_connector_info_from_bios(dev);
-			if (ret == false)
+			if (!ret)
 				ret = radeon_get_legacy_connector_info_from_table(dev);
 		}
 	} else {
@@ -1687,7 +1682,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct radeon_encoder *radeon_encoder;
 	struct drm_connector *connector;
-	struct radeon_connector *radeon_connector;
 	bool first = true;
 	u32 src_v = 1, dst_v = 1;
 	u32 src_h = 1, dst_h = 1;
@@ -1700,7 +1694,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
 			continue;
 		radeon_encoder = to_radeon_encoder(encoder);
 		connector = radeon_get_connector_for_encoder(encoder);
-		radeon_connector = to_radeon_connector(connector);
 
 		if (first) {
 			/* set scaling */
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index ee28f5b..28eef92 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -518,7 +518,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
 
 	mst_enc = radeon_encoder->enc_priv;
 
-	mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+	mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp, false);
 
 	mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
 	DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index a0c9908..ced022f 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -24,9 +24,10 @@
  *          Alex Deucher
  */
 
+#include <linux/pci.h>
+
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 2c564f4..ec0b7d6 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -25,6 +25,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/vga_switcheroo.h>
@@ -33,7 +34,6 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fourcc.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
@@ -73,7 +73,7 @@ radeonfb_release(struct fb_info *info, int user)
 	return 0;
 }
 
-static struct fb_ops radeonfb_ops = {
+static const struct fb_ops radeonfb_ops = {
 	.owner = THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_open = radeonfb_open,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d4d3778..f178ba3 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -26,9 +26,9 @@
  *          Jerome Glisse
  */
 
+#include <linux/pci.h>
 #include <linux/vmalloc.h>
 
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 #ifdef CONFIG_X86
 #include <asm/set_memory.h>
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 67298a0..068c3e5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -26,10 +26,11 @@
  *          Jerome Glisse
  */
 
+#include <linux/pci.h>
+
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index d465a3d..545e31e 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -25,10 +25,10 @@
  */
 
 #include <linux/export.h>
+#include <linux/pci.h>
 
 #include <drm/drm_device.h>
 #include <drm/drm_edid.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index d961363..b86bc88 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,12 +26,12 @@
  *          Jerome Glisse
  */
 
+#include <linux/pci.h>
 #include <linux/pm_runtime.h>
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_device.h>
 #include <drm/drm_irq.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 #include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index e85c554..d24f23a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -26,6 +26,7 @@
  *          Jerome Glisse
  */
 
+#include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
@@ -34,7 +35,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index ef100b7..44d060f 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -25,11 +25,11 @@
  */
 
 #include <linux/backlight.h>
+#include <linux/pci.h>
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_util.h>
 #include <drm/radeon_drm.h>
 
@@ -1712,7 +1712,7 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
 	else
 		ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
 
-	if (ret == false)
+	if (!ret)
 		radeon_legacy_get_tmds_info_from_table(encoder, tmds);
 
 	return tmds;
@@ -1735,7 +1735,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
 
 	ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
 
-	if (ret == false)
+	if (!ret)
 		radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
 
 	return tmds;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index f132eec..d9df7f3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -537,7 +537,7 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
 	uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
 	uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
 	uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
-	uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
+	uint32_t tv_pll_cntl, tv_ftotal;
 	uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
 	uint32_t m, n, p;
 	const uint16_t *hor_timing;
@@ -709,12 +709,6 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
 		(((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) |
 		((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
 
-	tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) |
-			((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) |
-			((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) |
-			RADEON_TVCLK_SRC_SEL_TVPLL |
-			RADEON_TVPLL_TEST_DIS);
-
 	tv_dac->tv.tv_uv_adr = 0xc8;
 
 	if (tv_dac->tv_std == TV_STD_NTSC ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index fd470d6..9656517 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -327,7 +327,6 @@ enum radeon_flip_status {
 struct radeon_crtc {
 	struct drm_crtc base;
 	int crtc_id;
-	u16 lut_r[256], lut_g[256], lut_b[256];
 	bool enabled;
 	bool can_tile;
 	bool cursor_out_of_bounds;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 5d10e11..8c5d6fd 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,10 +23,10 @@
 
 #include <linux/hwmon-sysfs.h>
 #include <linux/hwmon.h>
+#include <linux/pci.h>
 #include <linux/power_supply.h>
 
 #include <drm/drm_debugfs.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_vblank.h>
 
 #include "atom.h"
@@ -1789,7 +1789,7 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
 	u32 stat_crtc = 0;
 	bool in_vbl = radeon_pm_in_vbl(rdev);
 
-	if (in_vbl == false)
+	if (!in_vbl)
 		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
 			 finish ? "exit" : "entry");
 	return in_vbl;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 15bf8a2..3b92311 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -32,6 +32,7 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/pagemap.h>
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
@@ -41,7 +42,6 @@
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_prime.h>
 #include <drm/radeon_drm.h>
 #include <drm/ttm/ttm_bo_api.h>
@@ -881,9 +881,6 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
 	man->size = size >> PAGE_SHIFT;
 }
 
-static struct vm_operations_struct radeon_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops = NULL;
-
 static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
 {
 	struct ttm_buffer_object *bo;
@@ -891,34 +888,36 @@ static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
 	vm_fault_t ret;
 
 	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
-	if (bo == NULL) {
+	if (bo == NULL)
 		return VM_FAULT_NOPAGE;
-	}
+
 	rdev = radeon_get_rdev(bo->bdev);
 	down_read(&rdev->pm.mclk_lock);
-	ret = ttm_vm_ops->fault(vmf);
+	ret = ttm_bo_vm_fault(vmf);
 	up_read(&rdev->pm.mclk_lock);
 	return ret;
 }
 
+static struct vm_operations_struct radeon_ttm_vm_ops = {
+	.fault = radeon_ttm_fault,
+	.open = ttm_bo_vm_open,
+	.close = ttm_bo_vm_close,
+	.access = ttm_bo_vm_access
+};
+
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	int r;
 	struct drm_file *file_priv = filp->private_data;
 	struct radeon_device *rdev = file_priv->minor->dev->dev_private;
 
-	if (rdev == NULL) {
+	if (rdev == NULL)
 		return -EINVAL;
-	}
+
 	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
-	if (unlikely(r != 0)) {
+	if (unlikely(r != 0))
 		return r;
-	}
-	if (unlikely(ttm_vm_ops == NULL)) {
-		ttm_vm_ops = vma->vm_ops;
-		radeon_ttm_vm_ops = *ttm_vm_ops;
-		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
-	}
+
 	vma->vm_ops = &radeon_ttm_vm_ops;
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 59db54a..5e80064 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -388,9 +388,9 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
 		ib.ptr[i] = cpu_to_le32(0x0);
 
 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
-	if (r) {
+	if (r)
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
-	}
+
 
 	if (fence)
 		*fence = radeon_fence_ref(ib.fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e0ad547..f60fae0 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -296,9 +296,9 @@ struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
 	struct radeon_bo_va *bo_va;
 
 	list_for_each_entry(bo_va, &bo->va, bo_list) {
-		if (bo_va->vm == vm) {
+		if (bo_va->vm == vm)
 			return bo_va;
-		}
+
 	}
 	return NULL;
 }
@@ -323,9 +323,9 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
 	struct radeon_bo_va *bo_va;
 
 	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
-	if (bo_va == NULL) {
+	if (bo_va == NULL)
 		return NULL;
-	}
+
 	bo_va->vm = vm;
 	bo_va->bo = bo;
 	bo_va->it.start = 0;
@@ -947,9 +947,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 
 	if (mem) {
 		addr = (u64)mem->start << PAGE_SHIFT;
-		if (mem->mem_type != TTM_PL_SYSTEM) {
+		if (mem->mem_type != TTM_PL_SYSTEM)
 			bo_va->flags |= RADEON_VM_PAGE_VALID;
-		}
+
 		if (mem->mem_type == TTM_PL_TT) {
 			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
 			if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
@@ -1233,9 +1233,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 	struct radeon_bo_va *bo_va, *tmp;
 	int i, r;
 
-	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+	if (!RB_EMPTY_ROOT(&vm->va.rb_root))
 		dev_err(rdev->dev, "still active bo inside vm\n");
-	}
+
 	rbtree_postorder_for_each_entry_safe(bo_va, tmp,
 					     &vm->va.rb_root, it.rb) {
 		interval_tree_remove(&bo_va->it, &vm->va);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 2f8ff08..c88b490 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -37,9 +37,9 @@
  */
 
 #include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
 
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_vblank.h>
 
 #include "atom.h"
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 267d8a9..c296f94 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -26,7 +26,7 @@
  *          Jerome Glisse
  */
 
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
 
 #include "atom.h"
 #include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 72dbf32..1739007 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -22,10 +22,9 @@
  * Authors: Alex Deucher
  */
 
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 
-#include <drm/drm_pci.h>
-
 #include "atom.h"
 #include "r600_dpm.h"
 #include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 7a6fc66..21f653a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -27,10 +27,10 @@
  */
 
 #include <linux/firmware.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/radeon_drm.h>
 
 #include "atom.h"
@@ -1703,7 +1703,7 @@ static void rv770_uvd_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable uvd here.
 		 */
-		rdev->has_uvd = 0;
+		rdev->has_uvd = false;
 		return;
 	}
 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d7eea75..93dcab5 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -23,10 +23,10 @@
  */
 
 #include <linux/firmware.h>
-#include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
 
-#include <drm/drm_pci.h>
 #include <drm/drm_vblank.h>
 #include <drm/radeon_drm.h>
 
@@ -6472,7 +6472,7 @@ static void si_uvd_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable uvd here.
 		 */
-		rdev->has_uvd = 0;
+		rdev->has_uvd = false;
 		return;
 	}
 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -6539,7 +6539,7 @@ static void si_vce_init(struct radeon_device *rdev)
 		 * there. So it is pointless to try to go through that code
 		 * hence why we disable vce here.
 		 */
-		rdev->has_vce = 0;
+		rdev->has_vce = false;
 		return;
 	}
 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a0b382a..05e8b4d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -22,10 +22,9 @@
  */
 
 #include <linux/math64.h>
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 
-#include <drm/drm_pci.h>
-
 #include "atom.h"
 #include "r600_dpm.h"
 #include "radeon.h"
@@ -3640,14 +3639,13 @@ static int si_notify_smc_display_change(struct radeon_device *rdev,
 
 static void si_program_response_times(struct radeon_device *rdev)
 {
-	u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+	u32 voltage_response_time, acpi_delay_time, vbi_time_out;
 	u32 vddc_dly, acpi_dly, vbi_dly;
 	u32 reference_clock;
 
 	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
 
 	voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
-	backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
 
 	if (voltage_response_time == 0)
 		voltage_response_time = 1000;
@@ -5900,7 +5898,7 @@ static int si_patch_single_dependency_table_based_on_leakage(struct radeon_devic
 
 static int si_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
 {
-	int ret = 0;
+	int ret;
 
 	ret = si_patch_single_dependency_table_based_on_leakage(rdev,
 								&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 65302f9..4d93b84a 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -21,10 +21,9 @@
  *
  */
 
+#include <linux/pci.h>
 #include <linux/seq_file.h>
 
-#include <drm/drm_pci.h>
-
 #include "r600_dpm.h"
 #include "radeon.h"
 #include "radeon_asic.h"
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 1529849..0919f1f 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@
 	depends on DRM && OF
 	depends on ARM || ARM64
 	depends on ARCH_RENESAS || COMPILE_TEST
+	imply DRM_RCAR_CMM
 	imply DRM_RCAR_LVDS
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
@@ -13,6 +14,13 @@
 	  Choose this option if you have an R-Car chipset.
 	  If M is selected the module will be called rcar-du-drm.
 
+config DRM_RCAR_CMM
+	tristate "R-Car DU Color Management Module (CMM) Support"
+	depends on DRM && OF
+	depends on DRM_RCAR_DU
+	help
+	  Enable support for R-Car Color Management Module (CMM).
+
 config DRM_RCAR_DW_HDMI
 	tristate "R-Car DU Gen3 HDMI Encoder Support"
 	depends on DRM && OF
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 6c2ed9c..4d1187c 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -15,6 +15,7 @@
 rcar-du-drm-$(CONFIG_DRM_RCAR_VSP)	+= rcar_du_vsp.o
 rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o
 
+obj-$(CONFIG_DRM_RCAR_CMM)		+= rcar_cmm.o
 obj-$(CONFIG_DRM_RCAR_DU)		+= rcar-du-drm.o
 obj-$(CONFIG_DRM_RCAR_DW_HDMI)		+= rcar_dw_hdmi.o
 obj-$(CONFIG_DRM_RCAR_LVDS)		+= rcar_lvds.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
new file mode 100644
index 0000000..c578095
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rcar_cmm.c -- R-Car Display Unit Color Management Module
+ *
+ * Copyright (C) 2019 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_color_mgmt.h>
+
+#include "rcar_cmm.h"
+
+#define CM2_LUT_CTRL		0x0000
+#define CM2_LUT_CTRL_LUT_EN	BIT(0)
+#define CM2_LUT_TBL_BASE	0x0600
+#define CM2_LUT_TBL(__i)	(CM2_LUT_TBL_BASE + (__i) * 4)
+
+struct rcar_cmm {
+	void __iomem *base;
+
+	/*
+	 * @lut:		1D-LUT state
+	 * @lut.enabled:	1D-LUT enabled flag
+	 */
+	struct {
+		bool enabled;
+	} lut;
+};
+
+static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg)
+{
+	return ioread32(rcmm->base + reg);
+}
+
+static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data)
+{
+	iowrite32(data, rcmm->base + reg);
+}
+
+/*
+ * rcar_cmm_lut_write() - Scale the DRM LUT table entries to hardware precision
+ *			  and write to the CMM registers
+ * @rcmm: Pointer to the CMM device
+ * @drm_lut: Pointer to the DRM LUT table
+ */
+static void rcar_cmm_lut_write(struct rcar_cmm *rcmm,
+			       const struct drm_color_lut *drm_lut)
+{
+	unsigned int i;
+
+	for (i = 0; i < CM2_LUT_SIZE; ++i) {
+		u32 entry = drm_color_lut_extract(drm_lut[i].red, 8) << 16
+			  | drm_color_lut_extract(drm_lut[i].green, 8) << 8
+			  | drm_color_lut_extract(drm_lut[i].blue, 8);
+
+		rcar_cmm_write(rcmm, CM2_LUT_TBL(i), entry);
+	}
+}
+
+/*
+ * rcar_cmm_setup() - Configure the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ * @config: The CMM unit configuration
+ *
+ * Configure the CMM unit with the given configuration. Currently enabling,
+ * disabling and programming of the 1-D LUT unit is supported.
+ *
+ * As rcar_cmm_setup() accesses the CMM registers the unit should be powered
+ * and its functional clock enabled. To guarantee this, before any call to
+ * this function is made, the CMM unit has to be enabled by calling
+ * rcar_cmm_enable() first.
+ *
+ * TODO: Add support for LUT double buffer operations to avoid updating the
+ * LUT table entries while a frame is being displayed.
+ */
+int rcar_cmm_setup(struct platform_device *pdev,
+		   const struct rcar_cmm_config *config)
+{
+	struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+	/* Disable LUT if no table is provided. */
+	if (!config->lut.table) {
+		if (rcmm->lut.enabled) {
+			rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0);
+			rcmm->lut.enabled = false;
+		}
+
+		return 0;
+	}
+
+	/* Enable LUT and program the new gamma table values. */
+	if (!rcmm->lut.enabled) {
+		rcar_cmm_write(rcmm, CM2_LUT_CTRL, CM2_LUT_CTRL_LUT_EN);
+		rcmm->lut.enabled = true;
+	}
+
+	rcar_cmm_lut_write(rcmm, config->lut.table);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_setup);
+
+/*
+ * rcar_cmm_enable() - Enable the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * When the output of the corresponding DU channel is routed to the CMM unit,
+ * the unit shall be enabled before the DU channel is started, and remain
+ * enabled until the channel is stopped. The CMM unit shall be disabled with
+ * rcar_cmm_disable().
+ *
+ * Calls to rcar_cmm_enable() and rcar_cmm_disable() are not reference-counted.
+ * It is an error to attempt to enable an already enabled CMM unit, or to
+ * attempt to disable a disabled unit.
+ */
+int rcar_cmm_enable(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_enable);
+
+/*
+ * rcar_cmm_disable() - Disable the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * See rcar_cmm_enable() for usage information.
+ *
+ * Disabling the CMM unit disable all the internal processing blocks. The CMM
+ * state shall thus be restored with rcar_cmm_setup() when re-enabling the CMM
+ * unit after the next rcar_cmm_enable() call.
+ */
+void rcar_cmm_disable(struct platform_device *pdev)
+{
+	struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+	rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0);
+	rcmm->lut.enabled = false;
+
+	pm_runtime_put(&pdev->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_disable);
+
+/*
+ * rcar_cmm_init() - Initialize the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * Return: 0 on success, -EPROBE_DEFER if the CMM is not available yet,
+ *         -ENODEV if the DRM_RCAR_CMM config option is disabled
+ */
+int rcar_cmm_init(struct platform_device *pdev)
+{
+	struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+	if (!rcmm)
+		return -EPROBE_DEFER;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_init);
+
+static int rcar_cmm_probe(struct platform_device *pdev)
+{
+	struct rcar_cmm *rcmm;
+
+	rcmm = devm_kzalloc(&pdev->dev, sizeof(*rcmm), GFP_KERNEL);
+	if (!rcmm)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, rcmm);
+
+	rcmm->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(rcmm->base))
+		return PTR_ERR(rcmm->base);
+
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+}
+
+static int rcar_cmm_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static const struct of_device_id rcar_cmm_of_table[] = {
+	{ .compatible = "renesas,rcar-gen3-cmm", },
+	{ .compatible = "renesas,rcar-gen2-cmm", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, rcar_cmm_of_table);
+
+static struct platform_driver rcar_cmm_platform_driver = {
+	.probe		= rcar_cmm_probe,
+	.remove		= rcar_cmm_remove,
+	.driver		= {
+		.name	= "rcar-cmm",
+		.of_match_table = rcar_cmm_of_table,
+	},
+};
+
+module_platform_driver(rcar_cmm_platform_driver);
+
+MODULE_AUTHOR("Jacopo Mondi <jacopo+renesas@jmondi.org>");
+MODULE_DESCRIPTION("Renesas R-Car CMM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.h b/drivers/gpu/drm/rcar-du/rcar_cmm.h
new file mode 100644
index 0000000..b5f7ec6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rcar_cmm.h -- R-Car Display Unit Color Management Module
+ *
+ * Copyright (C) 2019 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#ifndef __RCAR_CMM_H__
+#define __RCAR_CMM_H__
+
+#define CM2_LUT_SIZE		256
+
+struct drm_color_lut;
+struct platform_device;
+
+/**
+ * struct rcar_cmm_config - CMM configuration
+ *
+ * @lut:	1D-LUT configuration
+ * @lut.table:	1D-LUT table entries. Disable LUT operations when NULL
+ */
+struct rcar_cmm_config {
+	struct {
+		struct drm_color_lut *table;
+	} lut;
+};
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_CMM)
+int rcar_cmm_init(struct platform_device *pdev);
+
+int rcar_cmm_enable(struct platform_device *pdev);
+void rcar_cmm_disable(struct platform_device *pdev);
+
+int rcar_cmm_setup(struct platform_device *pdev,
+		   const struct rcar_cmm_config *config);
+#else
+static inline int rcar_cmm_init(struct platform_device *pdev)
+{
+	return -ENODEV;
+}
+
+static inline int rcar_cmm_enable(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static inline void rcar_cmm_disable(struct platform_device *pdev)
+{
+}
+
+static inline int rcar_cmm_setup(struct platform_device *pdev,
+				 const struct rcar_cmm_config *config)
+{
+	return 0;
+}
+#endif /* IS_ENABLED(CONFIG_DRM_RCAR_CMM) */
+
+#endif /* __RCAR_CMM_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 2da46e3..d73e88d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -14,6 +14,7 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_device.h>
 #include <drm/drm_fb_cma_helper.h>
@@ -21,6 +22,7 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_vblank.h>
 
+#include "rcar_cmm.h"
 #include "rcar_du_crtc.h"
 #include "rcar_du_drv.h"
 #include "rcar_du_encoder.h"
@@ -475,6 +477,45 @@ static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
 }
 
 /* -----------------------------------------------------------------------------
+ * Color Management Module (CMM)
+ */
+
+static int rcar_du_cmm_check(struct drm_crtc *crtc,
+			     struct drm_crtc_state *state)
+{
+	struct drm_property_blob *drm_lut = state->gamma_lut;
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+	struct device *dev = rcrtc->dev->dev;
+
+	if (!drm_lut)
+		return 0;
+
+	/* We only accept fully populated LUT tables. */
+	if (drm_color_lut_size(drm_lut) != CM2_LUT_SIZE) {
+		dev_err(dev, "invalid gamma lut size: %zu bytes\n",
+			drm_lut->length);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void rcar_du_cmm_setup(struct drm_crtc *crtc)
+{
+	struct drm_property_blob *drm_lut = crtc->state->gamma_lut;
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+	struct rcar_cmm_config cmm_config = {};
+
+	if (!rcrtc->cmm)
+		return;
+
+	if (drm_lut)
+		cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data;
+
+	rcar_cmm_setup(rcrtc->cmm, &cmm_config);
+}
+
+/* -----------------------------------------------------------------------------
  * Start/Stop and Suspend/Resume
  */
 
@@ -619,6 +660,9 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
 	if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
 		rcar_du_vsp_disable(rcrtc);
 
+	if (rcrtc->cmm)
+		rcar_cmm_disable(rcrtc->cmm);
+
 	/*
 	 * Select switch sync mode. This stops display operation and configures
 	 * the HSYNC and VSYNC signals as inputs.
@@ -642,6 +686,11 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
 {
 	struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
 	struct drm_encoder *encoder;
+	int ret;
+
+	ret = rcar_du_cmm_check(crtc, state);
+	if (ret)
+		return ret;
 
 	/* Store the routes from the CRTC output to the DU outputs. */
 	rstate->outputs = 0;
@@ -667,6 +716,8 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
 	struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
 	struct rcar_du_device *rcdu = rcrtc->dev;
 
+	if (rcrtc->cmm)
+		rcar_cmm_enable(rcrtc->cmm);
 	rcar_du_crtc_get(rcrtc);
 
 	/*
@@ -680,12 +731,20 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
 			rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
 		const struct drm_display_mode *mode =
 			&crtc->state->adjusted_mode;
+		struct drm_bridge *bridge;
 
-		rcar_lvds_clk_enable(encoder->base.bridge,
-				     mode->clock * 1000);
+		bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+		rcar_lvds_clk_enable(bridge, mode->clock * 1000);
 	}
 
 	rcar_du_crtc_start(rcrtc);
+
+	/*
+	 * TODO: The chip manual indicates that CMM tables should be written
+	 * after the DU channel has been activated. Investigate the impact
+	 * of this restriction on the first displayed frame.
+	 */
+	rcar_du_cmm_setup(crtc);
 }
 
 static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -702,12 +761,14 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
 	    rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
 		struct rcar_du_encoder *encoder =
 			rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+		struct drm_bridge *bridge;
 
 		/*
 		 * Disable the LVDS clock output, see
 		 * rcar_du_crtc_atomic_enable().
 		 */
-		rcar_lvds_clk_disable(encoder->base.bridge);
+		bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+		rcar_lvds_clk_disable(bridge);
 	}
 
 	spin_lock_irq(&crtc->dev->event_lock);
@@ -739,6 +800,10 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
 	 */
 	rcar_du_crtc_get(rcrtc);
 
+	/* If the active state changed, we let .atomic_enable handle CMM. */
+	if (crtc->state->color_mgmt_changed && !crtc->state->active_changed)
+		rcar_du_cmm_setup(crtc);
+
 	if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
 		rcar_du_vsp_atomic_begin(rcrtc);
 }
@@ -1075,6 +1140,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
 	.set_crc_source = rcar_du_crtc_set_crc_source,
 	.verify_crc_source = rcar_du_crtc_verify_crc_source,
 	.get_crc_sources = rcar_du_crtc_get_crc_sources,
+	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 };
 
 /* -----------------------------------------------------------------------------
@@ -1194,6 +1260,15 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
 	if (ret < 0)
 		return ret;
 
+	/* CMM might be disabled for this CRTC. */
+	if (rcdu->cmms[swindex]) {
+		rcrtc->cmm = rcdu->cmms[swindex];
+		rgrp->cmms_mask |= BIT(hwindex % 2);
+
+		drm_mode_crtc_set_gamma_size(crtc, CM2_LUT_SIZE);
+		drm_crtc_enable_color_mgmt(crtc, 0, false, CM2_LUT_SIZE);
+	}
+
 	drm_crtc_helper_add(crtc, &crtc_helper_funcs);
 
 	/* Start with vertical blanking interrupt reporting disabled. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 3b7fc66..5f2940c42 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -39,6 +39,7 @@ struct rcar_du_vsp;
  * @vblank_wait: wait queue used to signal vertical blanking
  * @vblank_count: number of vertical blanking interrupts to wait for
  * @group: CRTC group this CRTC belongs to
+ * @cmm: CMM associated with this CRTC
  * @vsp: VSP feeding video to this CRTC
  * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
  * @writeback: the writeback connector
@@ -64,6 +65,7 @@ struct rcar_du_crtc {
 	unsigned int vblank_count;
 
 	struct rcar_du_group *group;
+	struct platform_device *cmm;
 	struct rcar_du_vsp *vsp;
 	unsigned int vsp_pipe;
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index f266c17..654e2dd 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -399,7 +399,10 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = {
 		  | RCAR_DU_FEATURE_TVM_SYNC,
 	.channels_mask = BIT(0),
 	.routes = {
-		/* R8A77970 has one RGB output and one LVDS output. */
+		/*
+		 * R8A77970 and R8A77980 have one RGB output and one LVDS
+		 * output.
+		 */
 		[RCAR_DU_OUTPUT_DPAD0] = {
 			.possible_crtcs = BIT(0),
 			.port = 0,
@@ -457,6 +460,7 @@ static const struct of_device_id rcar_du_of_table[] = {
 	{ .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
 	{ .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info },
 	{ .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info },
+	{ .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info },
 	{ .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info },
 	{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
 	{ }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 1327cd0..61504c5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/wait.h>
 
+#include "rcar_cmm.h"
 #include "rcar_du_crtc.h"
 #include "rcar_du_group.h"
 #include "rcar_du_vsp.h"
@@ -85,6 +86,7 @@ struct rcar_du_device {
 	struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
 
 	struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
+	struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
 	struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
 
 	struct {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 9eee479..88a783c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -135,6 +135,7 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
 static void rcar_du_group_setup(struct rcar_du_group *rgrp)
 {
 	struct rcar_du_device *rcdu = rgrp->dev;
+	u32 defr7 = DEFR7_CODE;
 
 	/* Enable extended features */
 	rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
@@ -147,6 +148,15 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
 
 	rcar_du_group_setup_pins(rgrp);
 
+	/*
+	 * TODO: Handle routing of the DU output to CMM dynamically, as we
+	 * should bypass CMM completely when no color management feature is
+	 * used.
+	 */
+	defr7 |= (rgrp->cmms_mask & BIT(1) ? DEFR7_CMME1 : 0) |
+		 (rgrp->cmms_mask & BIT(0) ? DEFR7_CMME0 : 0);
+	rcar_du_group_write(rgrp, DEFR7, defr7);
+
 	if (rcdu->info->gen >= 2) {
 		rcar_du_group_setup_defr8(rgrp);
 		rcar_du_group_setup_didsr(rgrp);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 87950c1..e990660 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -22,6 +22,7 @@ struct rcar_du_device;
  * @mmio_offset: registers offset in the device memory map
  * @index: group index
  * @channels_mask: bitmask of populated DU channels in this group
+ * @cmms_mask: bitmask of available CMMs in this group
  * @num_crtcs: number of CRTCs in this group (1 or 2)
  * @use_count: number of users of the group (rcar_du_group_(get|put))
  * @used_crtcs: number of CRTCs currently in use
@@ -37,6 +38,7 @@ struct rcar_du_group {
 	unsigned int index;
 
 	unsigned int channels_mask;
+	unsigned int cmms_mask;
 	unsigned int num_crtcs;
 	unsigned int use_count;
 	unsigned int used_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 0d59f39..fcfd916 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -17,7 +17,9 @@
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
+#include <linux/device.h>
 #include <linux/of_graph.h>
+#include <linux/of_platform.h>
 #include <linux/wait.h>
 
 #include "rcar_du_crtc.h"
@@ -542,6 +544,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
 static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
 {
 	const struct device_node *np = rcdu->dev->of_node;
+	const char *vsps_prop_name = "renesas,vsps";
 	struct of_phandle_args args;
 	struct {
 		struct device_node *np;
@@ -557,15 +560,21 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
 	 * entry contains a pointer to the VSP DT node and a bitmask of the
 	 * connected DU CRTCs.
 	 */
-	cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1;
+	ret = of_property_count_u32_elems(np, vsps_prop_name);
+	if (ret < 0) {
+		/* Backward compatibility with old DTBs. */
+		vsps_prop_name = "vsps";
+		ret = of_property_count_u32_elems(np, vsps_prop_name);
+	}
+	cells = ret / rcdu->num_crtcs - 1;
 	if (cells > 1)
 		return -EINVAL;
 
 	for (i = 0; i < rcdu->num_crtcs; ++i) {
 		unsigned int j;
 
-		ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i,
-						       &args);
+		ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
+						       cells, i, &args);
 		if (ret < 0)
 			goto error;
 
@@ -587,8 +596,8 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
 
 		/*
 		 * Store the VSP pointer and pipe index in the CRTC. If the
-		 * second cell of the 'vsps' specifier isn't present, default
-		 * to 0 to remain compatible with older DT bindings.
+		 * second cell of the 'renesas,vsps' specifier isn't present,
+		 * default to 0 to remain compatible with older DT bindings.
 		 */
 		rcdu->crtcs[i].vsp = &rcdu->vsps[j];
 		rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
@@ -618,6 +627,75 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
 	return ret;
 }
 
+static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
+{
+	const struct device_node *np = rcdu->dev->of_node;
+	unsigned int i;
+	int cells;
+
+	cells = of_property_count_u32_elems(np, "renesas,cmms");
+	if (cells == -EINVAL)
+		return 0;
+
+	if (cells > rcdu->num_crtcs) {
+		dev_err(rcdu->dev,
+			"Invalid number of entries in 'renesas,cmms'\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cells; ++i) {
+		struct platform_device *pdev;
+		struct device_link *link;
+		struct device_node *cmm;
+		int ret;
+
+		cmm = of_parse_phandle(np, "renesas,cmms", i);
+		if (IS_ERR(cmm)) {
+			dev_err(rcdu->dev,
+				"Failed to parse 'renesas,cmms' property\n");
+			return PTR_ERR(cmm);
+		}
+
+		if (!of_device_is_available(cmm)) {
+			/* It's fine to have a phandle to a non-enabled CMM. */
+			of_node_put(cmm);
+			continue;
+		}
+
+		pdev = of_find_device_by_node(cmm);
+		if (IS_ERR(pdev)) {
+			dev_err(rcdu->dev, "No device found for CMM%u\n", i);
+			of_node_put(cmm);
+			return PTR_ERR(pdev);
+		}
+
+		of_node_put(cmm);
+
+		/*
+		 * -ENODEV is used to report that the CMM config option is
+		 * disabled: return 0 and let the DU continue probing.
+		 */
+		ret = rcar_cmm_init(pdev);
+		if (ret)
+			return ret == -ENODEV ? 0 : ret;
+
+		/*
+		 * Enforce suspend/resume ordering by making the CMM a provider
+		 * of the DU: CMM is suspended after and resumed before the DU.
+		 */
+		link = device_link_add(rcdu->dev, &pdev->dev, DL_FLAG_STATELESS);
+		if (!link) {
+			dev_err(rcdu->dev,
+				"Failed to create device link to CMM%u\n", i);
+			return -EINVAL;
+		}
+
+		rcdu->cmms[i] = pdev;
+	}
+
+	return 0;
+}
+
 int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 {
 	static const unsigned int mmio_offsets[] = {
@@ -708,6 +786,11 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 			return ret;
 	}
 
+	/* Initialize the Color Management Modules. */
+	ret = rcar_du_cmm_init(rcdu);
+	if (ret)
+		return ret;
+
 	/* Create the CRTCs. */
 	for (swindex = 0, hwindex = 0; swindex < rcdu->num_crtcs; ++hwindex) {
 		struct rcar_du_group *rgrp;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index bc87f08..fb996494 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -197,6 +197,11 @@
 #define DEFR6_MLOS1		(1 << 2)
 #define DEFR6_DEFAULT		(DEFR6_CODE | DEFR6_TCNE1)
 
+#define DEFR7			0x000ec
+#define DEFR7_CODE		(0x7779 << 16)
+#define DEFR7_CMME1		BIT(6)
+#define DEFR7_CMME0		BIT(4)
+
 /* -----------------------------------------------------------------------------
  * R8A7790-only Control Registers
  */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 8c6c172..8ffa4fb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -21,6 +21,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
 
@@ -36,6 +37,12 @@ enum rcar_lvds_mode {
 	RCAR_LVDS_MODE_VESA = 4,
 };
 
+enum rcar_lvds_link_type {
+	RCAR_LVDS_SINGLE_LINK = 0,
+	RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 1,
+	RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 2,
+};
+
 #define RCAR_LVDS_QUIRK_LANES		BIT(0)	/* LVDS lanes 1 and 3 inverted */
 #define RCAR_LVDS_QUIRK_GEN3_LVEN	BIT(1)	/* LVEN bit needs to be set on R8A77970/R8A7799x */
 #define RCAR_LVDS_QUIRK_PWD		BIT(2)	/* PWD bit available (all of Gen3 but E3) */
@@ -65,11 +72,8 @@ struct rcar_lvds {
 		struct clk *dotclkin[2];	/* External DU clocks */
 	} clocks;
 
-	struct drm_display_mode display_mode;
-	enum rcar_lvds_mode mode;
-
 	struct drm_bridge *companion;
-	bool dual_link;
+	enum rcar_lvds_link_type link_type;
 };
 
 #define bridge_to_rcar_lvds(b) \
@@ -91,7 +95,7 @@ static int rcar_lvds_connector_get_modes(struct drm_connector *connector)
 {
 	struct rcar_lvds *lvds = connector_to_rcar_lvds(connector);
 
-	return drm_panel_get_modes(lvds->panel);
+	return drm_panel_get_modes(lvds->panel, connector);
 }
 
 static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
@@ -402,10 +406,53 @@ EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
  * Bridge
  */
 
-static void rcar_lvds_enable(struct drm_bridge *bridge)
+static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds,
+					const struct drm_connector *connector)
+{
+	const struct drm_display_info *info;
+	enum rcar_lvds_mode mode;
+
+	/*
+	 * There is no API yet to retrieve LVDS mode from a bridge, only panels
+	 * are supported.
+	 */
+	if (!lvds->panel)
+		return RCAR_LVDS_MODE_JEIDA;
+
+	info = &connector->display_info;
+	if (!info->num_bus_formats || !info->bus_formats) {
+		dev_warn(lvds->dev,
+			 "no LVDS bus format reported, using JEIDA\n");
+		return RCAR_LVDS_MODE_JEIDA;
+	}
+
+	switch (info->bus_formats[0]) {
+	case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+	case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+		mode = RCAR_LVDS_MODE_JEIDA;
+		break;
+	case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+		mode = RCAR_LVDS_MODE_VESA;
+		break;
+	default:
+		dev_warn(lvds->dev,
+			 "unsupported LVDS bus format 0x%04x, using JEIDA\n",
+			 info->bus_formats[0]);
+		return RCAR_LVDS_MODE_JEIDA;
+	}
+
+	if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
+		mode |= RCAR_LVDS_MODE_MIRROR;
+
+	return mode;
+}
+
+static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
+				      struct drm_atomic_state *state,
+				      struct drm_crtc *crtc,
+				      struct drm_connector *connector)
 {
 	struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
-	const struct drm_display_mode *mode = &lvds->display_mode;
 	u32 lvdhcr;
 	u32 lvdcr0;
 	int ret;
@@ -415,8 +462,9 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
 		return;
 
 	/* Enable the companion LVDS encoder in dual-link mode. */
-	if (lvds->dual_link && lvds->companion)
-		lvds->companion->funcs->enable(lvds->companion);
+	if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
+		__rcar_lvds_atomic_enable(lvds->companion, state, crtc,
+					  connector);
 
 	/*
 	 * Hardcode the channels and control signals routing for now.
@@ -440,30 +488,51 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
 	rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
 
 	if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) {
-		/*
-		 * Configure vertical stripe based on the mode of operation of
-		 * the connected device.
-		 */
-		rcar_lvds_write(lvds, LVDSTRIPE,
-				lvds->dual_link ? LVDSTRIPE_ST_ON : 0);
+		u32 lvdstripe = 0;
+
+		if (lvds->link_type != RCAR_LVDS_SINGLE_LINK) {
+			/*
+			 * By default we generate even pixels from the primary
+			 * encoder and odd pixels from the companion encoder.
+			 * Swap pixels around if the sink requires odd pixels
+			 * from the primary encoder and even pixels from the
+			 * companion encoder.
+			 */
+			bool swap_pixels = lvds->link_type ==
+				RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+
+			/*
+			 * Configure vertical stripe since we are dealing with
+			 * an LVDS dual-link connection.
+			 *
+			 * ST_SWAP is reserved for the companion encoder, only
+			 * set it in the primary encoder.
+			 */
+			lvdstripe = LVDSTRIPE_ST_ON
+				  | (lvds->companion && swap_pixels ?
+				     LVDSTRIPE_ST_SWAP : 0);
+		}
+		rcar_lvds_write(lvds, LVDSTRIPE, lvdstripe);
 	}
 
 	/*
 	 * PLL clock configuration on all instances but the companion in
 	 * dual-link mode.
 	 */
-	if (!lvds->dual_link || lvds->companion)
+	if (lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) {
+		const struct drm_crtc_state *crtc_state =
+			drm_atomic_get_new_crtc_state(state, crtc);
+		const struct drm_display_mode *mode =
+			&crtc_state->adjusted_mode;
+
 		lvds->info->pll_setup(lvds, mode->clock * 1000);
+	}
 
 	/* Set the LVDS mode and select the input. */
-	lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
+	lvdcr0 = rcar_lvds_get_lvds_mode(lvds, connector) << LVDCR0_LVMD_SHIFT;
 
 	if (lvds->bridge.encoder) {
-		/*
-		 * FIXME: We should really retrieve the CRTC through the state,
-		 * but how do we get a state pointer?
-		 */
-		if (drm_crtc_index(lvds->bridge.encoder->crtc) == 2)
+		if (drm_crtc_index(crtc) == 2)
 			lvdcr0 |= LVDCR0_DUSEL;
 	}
 
@@ -520,7 +589,21 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
 	}
 }
 
-static void rcar_lvds_disable(struct drm_bridge *bridge)
+static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
+				    struct drm_atomic_state *state)
+{
+	struct drm_connector *connector;
+	struct drm_crtc *crtc;
+
+	connector = drm_atomic_get_new_connector_for_encoder(state,
+							     bridge->encoder);
+	crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+
+	__rcar_lvds_atomic_enable(bridge, state, crtc, connector);
+}
+
+static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
+				     struct drm_atomic_state *state)
 {
 	struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
 
@@ -534,8 +617,8 @@ static void rcar_lvds_disable(struct drm_bridge *bridge)
 	rcar_lvds_write(lvds, LVDPLLCR, 0);
 
 	/* Disable the companion LVDS encoder in dual-link mode. */
-	if (lvds->dual_link && lvds->companion)
-		lvds->companion->funcs->disable(lvds->companion);
+	if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
+		lvds->companion->funcs->atomic_disable(lvds->companion, state);
 
 	clk_disable_unprepare(lvds->clocks.mod);
 }
@@ -558,54 +641,6 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
 	return true;
 }
 
-static void rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds)
-{
-	struct drm_display_info *info = &lvds->connector.display_info;
-	enum rcar_lvds_mode mode;
-
-	/*
-	 * There is no API yet to retrieve LVDS mode from a bridge, only panels
-	 * are supported.
-	 */
-	if (!lvds->panel)
-		return;
-
-	if (!info->num_bus_formats || !info->bus_formats) {
-		dev_err(lvds->dev, "no LVDS bus format reported\n");
-		return;
-	}
-
-	switch (info->bus_formats[0]) {
-	case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
-	case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
-		mode = RCAR_LVDS_MODE_JEIDA;
-		break;
-	case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
-		mode = RCAR_LVDS_MODE_VESA;
-		break;
-	default:
-		dev_err(lvds->dev, "unsupported LVDS bus format 0x%04x\n",
-			info->bus_formats[0]);
-		return;
-	}
-
-	if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
-		mode |= RCAR_LVDS_MODE_MIRROR;
-
-	lvds->mode = mode;
-}
-
-static void rcar_lvds_mode_set(struct drm_bridge *bridge,
-			       const struct drm_display_mode *mode,
-			       const struct drm_display_mode *adjusted_mode)
-{
-	struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
-
-	lvds->display_mode = *adjusted_mode;
-
-	rcar_lvds_get_lvds_mode(lvds);
-}
-
 static int rcar_lvds_attach(struct drm_bridge *bridge)
 {
 	struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -647,17 +682,16 @@ static void rcar_lvds_detach(struct drm_bridge *bridge)
 static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
 	.attach = rcar_lvds_attach,
 	.detach = rcar_lvds_detach,
-	.enable = rcar_lvds_enable,
-	.disable = rcar_lvds_disable,
+	.atomic_enable = rcar_lvds_atomic_enable,
+	.atomic_disable = rcar_lvds_atomic_disable,
 	.mode_fixup = rcar_lvds_mode_fixup,
-	.mode_set = rcar_lvds_mode_set,
 };
 
 bool rcar_lvds_dual_link(struct drm_bridge *bridge)
 {
 	struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
 
-	return lvds->dual_link;
+	return lvds->link_type != RCAR_LVDS_SINGLE_LINK;
 }
 EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
 
@@ -669,7 +703,10 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
 {
 	const struct of_device_id *match;
 	struct device_node *companion;
+	struct device_node *port0, *port1;
+	struct rcar_lvds *companion_lvds;
 	struct device *dev = lvds->dev;
+	int dual_link;
 	int ret = 0;
 
 	/* Locate the companion LVDS encoder for dual-link operation, if any. */
@@ -688,13 +725,68 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
 		goto done;
 	}
 
+	/*
+	 * We need to work out if the sink is expecting us to function in
+	 * dual-link mode. We do this by looking at the DT port nodes we are
+	 * connected to, if they are marked as expecting even pixels and
+	 * odd pixels than we need to enable vertical stripe output.
+	 */
+	port0 = of_graph_get_port_by_id(dev->of_node, 1);
+	port1 = of_graph_get_port_by_id(companion, 1);
+	dual_link = drm_of_lvds_get_dual_link_pixel_order(port0, port1);
+	of_node_put(port0);
+	of_node_put(port1);
+
+	switch (dual_link) {
+	case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
+		lvds->link_type = RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+		break;
+	case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
+		lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS;
+		break;
+	default:
+		/*
+		 * Early dual-link bridge specific implementations populate the
+		 * timings field of drm_bridge. If the flag is set, we assume
+		 * that we are expected to generate even pixels from the primary
+		 * encoder, and odd pixels from the companion encoder.
+		 */
+		if (lvds->next_bridge && lvds->next_bridge->timings &&
+		    lvds->next_bridge->timings->dual_link)
+			lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS;
+		else
+			lvds->link_type = RCAR_LVDS_SINGLE_LINK;
+	}
+
+	if (lvds->link_type == RCAR_LVDS_SINGLE_LINK) {
+		dev_dbg(dev, "Single-link configuration detected\n");
+		goto done;
+	}
+
 	lvds->companion = of_drm_find_bridge(companion);
 	if (!lvds->companion) {
 		ret = -EPROBE_DEFER;
 		goto done;
 	}
 
-	dev_dbg(dev, "Found companion encoder %pOF\n", companion);
+	dev_dbg(dev,
+		"Dual-link configuration detected (companion encoder %pOF)\n",
+		companion);
+
+	if (lvds->link_type == RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS)
+		dev_dbg(dev, "Data swapping required\n");
+
+	/*
+	 * FIXME: We should not be messing with the companion encoder private
+	 * data from the primary encoder, we should rather let the companion
+	 * encoder work things out on its own. However, the companion encoder
+	 * doesn't hold a reference to the primary encoder, and
+	 * drm_of_lvds_get_dual_link_pixel_order needs to be given references
+	 * to the output ports of both encoders, therefore leave it like this
+	 * for the time being.
+	 */
+	companion_lvds = bridge_to_rcar_lvds(lvds->companion);
+	companion_lvds->link_type = lvds->link_type;
 
 done:
 	of_node_put(companion);
@@ -704,79 +796,17 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
 
 static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
 {
-	struct device_node *local_output = NULL;
-	struct device_node *remote_input = NULL;
-	struct device_node *remote = NULL;
-	struct device_node *node;
-	bool is_bridge = false;
-	int ret = 0;
+	int ret;
 
-	local_output = of_graph_get_endpoint_by_regs(lvds->dev->of_node, 1, 0);
-	if (!local_output) {
-		dev_dbg(lvds->dev, "unconnected port@1\n");
-		ret = -ENODEV;
+	ret = drm_of_find_panel_or_bridge(lvds->dev->of_node, 1, 0,
+					  &lvds->panel, &lvds->next_bridge);
+	if (ret)
 		goto done;
-	}
 
-	/*
-	 * Locate the connected entity and infer its type from the number of
-	 * endpoints.
-	 */
-	remote = of_graph_get_remote_port_parent(local_output);
-	if (!remote) {
-		dev_dbg(lvds->dev, "unconnected endpoint %pOF\n", local_output);
-		ret = -ENODEV;
-		goto done;
-	}
-
-	if (!of_device_is_available(remote)) {
-		dev_dbg(lvds->dev, "connected entity %pOF is disabled\n",
-			remote);
-		ret = -ENODEV;
-		goto done;
-	}
-
-	remote_input = of_graph_get_remote_endpoint(local_output);
-
-	for_each_endpoint_of_node(remote, node) {
-		if (node != remote_input) {
-			/*
-			 * We've found one endpoint other than the input, this
-			 * must be a bridge.
-			 */
-			is_bridge = true;
-			of_node_put(node);
-			break;
-		}
-	}
-
-	if (is_bridge) {
-		lvds->next_bridge = of_drm_find_bridge(remote);
-		if (!lvds->next_bridge) {
-			ret = -EPROBE_DEFER;
-			goto done;
-		}
-
-		if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
-			lvds->dual_link = lvds->next_bridge->timings
-					? lvds->next_bridge->timings->dual_link
-					: false;
-	} else {
-		lvds->panel = of_drm_find_panel(remote);
-		if (IS_ERR(lvds->panel)) {
-			ret = PTR_ERR(lvds->panel);
-			goto done;
-		}
-	}
-
-	if (lvds->dual_link)
+	if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
 		ret = rcar_lvds_parse_dt_companion(lvds);
 
 done:
-	of_node_put(local_output);
-	of_node_put(remote_input);
-	of_node_put(remote);
-
 	/*
 	 * On D3/E3 the LVDS encoder provides a clock to the DU, which can be
 	 * used for the DPAD output even when the LVDS output is not connected.
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 6f4222f..310aa15 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -28,17 +28,17 @@
 	  on RK3288 or RK3399 based SoC, you should select this option.
 
 config ROCKCHIP_CDN_DP
-        bool "Rockchip cdn DP"
+	bool "Rockchip cdn DP"
 	depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
-        help
+	help
 	  This selects support for Rockchip SoC specific extensions
 	  for the cdn DP driver. If you want to enable Dp on
 	  RK3399 based SoC, you should select this
 	  option.
 
 config ROCKCHIP_DW_HDMI
-        bool "Rockchip specific extensions for Synopsys DW HDMI"
-        help
+	bool "Rockchip specific extensions for Synopsys DW HDMI"
+	help
 	  This selects support for Rockchip SoC specific extensions
 	  for the Synopsys DesignWare HDMI driver. If you want to
 	  enable HDMI on RK3288 or RK3399 based SoC, you should select
@@ -46,6 +46,7 @@
 
 config ROCKCHIP_DW_MIPI_DSI
 	bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
+	select GENERIC_PHY_MIPI_DPHY
 	help
 	  This selects support for Rockchip SoC specific extensions
 	  for the Synopsys DesignWare HDMI driver. If you want to
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index bc073ec..6e1270e 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -12,6 +12,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/phy/phy.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 
@@ -139,6 +140,12 @@
 #define DW_MIPI_NEEDS_PHY_CFG_CLK	BIT(0)
 #define DW_MIPI_NEEDS_GRF_CLK		BIT(1)
 
+#define PX30_GRF_PD_VO_CON1		0x0438
+#define PX30_DSI_FORCETXSTOPMODE	(0xf << 7)
+#define PX30_DSI_FORCERXMODE		BIT(6)
+#define PX30_DSI_TURNDISABLE		BIT(5)
+#define PX30_DSI_LCDC_SEL		BIT(0)
+
 #define RK3288_GRF_SOC_CON6		0x025c
 #define RK3288_DSI0_LCDC_SEL		BIT(6)
 #define RK3288_DSI1_LCDC_SEL		BIT(9)
@@ -223,6 +230,10 @@ struct dw_mipi_dsi_rockchip {
 	bool is_slave;
 	struct dw_mipi_dsi_rockchip *slave;
 
+	/* optional external dphy */
+	struct phy *phy;
+	union phy_configure_opts phy_opts;
+
 	unsigned int lane_mbps; /* per lane */
 	u16 input_div;
 	u16 feedback_div;
@@ -359,6 +370,9 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
 	struct dw_mipi_dsi_rockchip *dsi = priv_data;
 	int ret, i, vco;
 
+	if (dsi->phy)
+		return 0;
+
 	/*
 	 * Get vco from frequency(lane_mbps)
 	 * vco	frequency table
@@ -467,6 +481,28 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
 	return ret;
 }
 
+static void dw_mipi_dsi_phy_power_on(void *priv_data)
+{
+	struct dw_mipi_dsi_rockchip *dsi = priv_data;
+	int ret;
+
+	ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
+	if (ret) {
+		DRM_DEV_ERROR(dsi->dev, "failed to set phy mode: %d\n", ret);
+		return;
+	}
+
+	phy_configure(dsi->phy, &dsi->phy_opts);
+	phy_power_on(dsi->phy);
+}
+
+static void dw_mipi_dsi_phy_power_off(void *priv_data)
+{
+	struct dw_mipi_dsi_rockchip *dsi = priv_data;
+
+	phy_power_off(dsi->phy);
+}
+
 static int
 dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
 			  unsigned long mode_flags, u32 lanes, u32 format,
@@ -504,6 +540,17 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
 				      "DPHY clock frequency is out of range\n");
 	}
 
+	/* for external phy only a the mipi_dphy_config is necessary */
+	if (dsi->phy) {
+		phy_mipi_dphy_get_default_config(mode->clock * 1000 * 10 / 8,
+						 bpp, lanes,
+						 &dsi->phy_opts.mipi_dphy);
+		dsi->lane_mbps = target_mbps;
+		*lane_mbps = dsi->lane_mbps;
+
+		return 0;
+	}
+
 	fin = clk_get_rate(dsi->pllref_clk);
 	fout = target_mbps * USEC_PER_SEC;
 
@@ -559,9 +606,89 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
 	return 0;
 }
 
+struct hstt {
+	unsigned int maxfreq;
+	struct dw_mipi_dsi_dphy_timing timing;
+};
+
+#define HSTT(_maxfreq, _c_lp2hs, _c_hs2lp, _d_lp2hs, _d_hs2lp)	\
+{					\
+	.maxfreq = _maxfreq,		\
+	.timing = {			\
+		.clk_lp2hs = _c_lp2hs,	\
+		.clk_hs2lp = _c_hs2lp,	\
+		.data_lp2hs = _d_lp2hs,	\
+		.data_hs2lp = _d_hs2lp,	\
+	}				\
+}
+
+/* Table A-3 High-Speed Transition Times */
+struct hstt hstt_table[] = {
+	HSTT(  90,  32, 20,  26, 13),
+	HSTT( 100,  35, 23,  28, 14),
+	HSTT( 110,  32, 22,  26, 13),
+	HSTT( 130,  31, 20,  27, 13),
+	HSTT( 140,  33, 22,  26, 14),
+	HSTT( 150,  33, 21,  26, 14),
+	HSTT( 170,  32, 20,  27, 13),
+	HSTT( 180,  36, 23,  30, 15),
+	HSTT( 200,  40, 22,  33, 15),
+	HSTT( 220,  40, 22,  33, 15),
+	HSTT( 240,  44, 24,  36, 16),
+	HSTT( 250,  48, 24,  38, 17),
+	HSTT( 270,  48, 24,  38, 17),
+	HSTT( 300,  50, 27,  41, 18),
+	HSTT( 330,  56, 28,  45, 18),
+	HSTT( 360,  59, 28,  48, 19),
+	HSTT( 400,  61, 30,  50, 20),
+	HSTT( 450,  67, 31,  55, 21),
+	HSTT( 500,  73, 31,  59, 22),
+	HSTT( 550,  79, 36,  63, 24),
+	HSTT( 600,  83, 37,  68, 25),
+	HSTT( 650,  90, 38,  73, 27),
+	HSTT( 700,  95, 40,  77, 28),
+	HSTT( 750, 102, 40,  84, 28),
+	HSTT( 800, 106, 42,  87, 30),
+	HSTT( 850, 113, 44,  93, 31),
+	HSTT( 900, 118, 47,  98, 32),
+	HSTT( 950, 124, 47, 102, 34),
+	HSTT(1000, 130, 49, 107, 35),
+	HSTT(1050, 135, 51, 111, 37),
+	HSTT(1100, 139, 51, 114, 38),
+	HSTT(1150, 146, 54, 120, 40),
+	HSTT(1200, 153, 57, 125, 41),
+	HSTT(1250, 158, 58, 130, 42),
+	HSTT(1300, 163, 58, 135, 44),
+	HSTT(1350, 168, 60, 140, 45),
+	HSTT(1400, 172, 64, 144, 47),
+	HSTT(1450, 176, 65, 148, 48),
+	HSTT(1500, 181, 66, 153, 50)
+};
+
+static int
+dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+			   struct dw_mipi_dsi_dphy_timing *timing)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hstt_table); i++)
+		if (lane_mbps < hstt_table[i].maxfreq)
+			break;
+
+	if (i == ARRAY_SIZE(hstt_table))
+		i--;
+
+	*timing = hstt_table[i].timing;
+
+	return 0;
+}
+
 static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
 	.init = dw_mipi_dsi_phy_init,
+	.power_on = dw_mipi_dsi_phy_power_on,
+	.power_off = dw_mipi_dsi_phy_power_off,
 	.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+	.get_timing = dw_mipi_dsi_phy_get_timing,
 };
 
 static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
@@ -916,16 +1043,33 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
 	}
 
 	if (!dsi->cdata) {
-		dev_err(dev, "no dsi-config for %s node\n", np->name);
+		DRM_DEV_ERROR(dev, "no dsi-config for %s node\n", np->name);
 		return -EINVAL;
 	}
 
+	/* try to get a possible external dphy */
+	dsi->phy = devm_phy_optional_get(dev, "dphy");
+	if (IS_ERR(dsi->phy)) {
+		ret = PTR_ERR(dsi->phy);
+		DRM_DEV_ERROR(dev, "failed to get mipi dphy: %d\n", ret);
+		return ret;
+	}
+
 	dsi->pllref_clk = devm_clk_get(dev, "ref");
 	if (IS_ERR(dsi->pllref_clk)) {
-		ret = PTR_ERR(dsi->pllref_clk);
-		DRM_DEV_ERROR(dev,
-			      "Unable to get pll reference clock: %d\n", ret);
-		return ret;
+		if (dsi->phy) {
+			/*
+			 * if external phy is present, pll will be
+			 * generated there.
+			 */
+			dsi->pllref_clk = NULL;
+		} else {
+			ret = PTR_ERR(dsi->pllref_clk);
+			DRM_DEV_ERROR(dev,
+				      "Unable to get pll reference clock: %d\n",
+				      ret);
+			return ret;
+		}
 	}
 
 	if (dsi->cdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
@@ -989,6 +1133,24 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
+	{
+		.reg = 0xff450000,
+		.lcdsel_grf_reg = PX30_GRF_PD_VO_CON1,
+		.lcdsel_big = HIWORD_UPDATE(0, PX30_DSI_LCDC_SEL),
+		.lcdsel_lit = HIWORD_UPDATE(PX30_DSI_LCDC_SEL,
+					    PX30_DSI_LCDC_SEL),
+
+		.lanecfg1_grf_reg = PX30_GRF_PD_VO_CON1,
+		.lanecfg1 = HIWORD_UPDATE(0, PX30_DSI_TURNDISABLE |
+					     PX30_DSI_FORCERXMODE |
+					     PX30_DSI_FORCETXSTOPMODE),
+
+		.max_data_lanes = 4,
+	},
+	{ /* sentinel */ }
+};
+
 static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
 	{
 		.reg = 0xff960000,
@@ -1057,6 +1219,9 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
 
 static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
 	{
+	 .compatible = "rockchip,px30-mipi-dsi",
+	 .data = &px30_chip_data,
+	}, {
 	 .compatible = "rockchip,rk3288-mipi-dsi",
 	 .data = &rk3288_chip_data,
 	}, {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index ed344a7..e5864e8 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -624,8 +624,10 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
 
 	drm_connector_helper_add(&hdmi->connector,
 				 &inno_hdmi_connector_helper_funcs);
-	drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
-			   DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_init_with_ddc(drm, &hdmi->connector,
+				    &inno_hdmi_connector_funcs,
+				    DRM_MODE_CONNECTOR_HDMIA,
+				    hdmi->ddc);
 
 	drm_connector_attach_encoder(&hdmi->connector, encoder);
 
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index cdb401f..fe203d3 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -564,9 +564,10 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
 
 	drm_connector_helper_add(&hdmi->connector,
 				 &rk3066_hdmi_connector_helper_funcs);
-	drm_connector_init(drm, &hdmi->connector,
-			   &rk3066_hdmi_connector_funcs,
-			   DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_init_with_ddc(drm, &hdmi->connector,
+				    &rk3066_hdmi_connector_funcs,
+				    DRM_MODE_CONNECTOR_HDMIA,
+				    hdmi->ddc);
 
 	drm_connector_attach_encoder(&hdmi->connector, encoder);
 
@@ -640,6 +641,9 @@ static int rk3066_hdmi_i2c_write(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
 	if (msgs->addr == DDC_ADDR)
 		hdmi->i2c->ddc_addr = msgs->buf[0];
 
+	/* Set edid fifo first address. */
+	hdmi_writeb(hdmi, HDMI_EDID_FIFO_ADDR, 0x00);
+
 	/* Set edid word address 0x00/0x80. */
 	hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index ca01234..221e72e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -53,64 +53,12 @@ rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cm
 	return fb;
 }
 
-static struct drm_framebuffer *
-rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
-			const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-	const struct drm_format_info *info = drm_get_format_info(dev,
-								 mode_cmd);
-	struct drm_framebuffer *fb;
-	struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
-	struct drm_gem_object *obj;
-	int num_planes = min_t(int, info->num_planes, ROCKCHIP_MAX_FB_BUFFER);
-	int ret;
-	int i;
-
-	for (i = 0; i < num_planes; i++) {
-		unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
-		unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
-		unsigned int min_size;
-
-		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
-		if (!obj) {
-			DRM_DEV_ERROR(dev->dev,
-				      "Failed to lookup GEM object\n");
-			ret = -ENXIO;
-			goto err_gem_object_unreference;
-		}
-
-		min_size = (height - 1) * mode_cmd->pitches[i] +
-			mode_cmd->offsets[i] +
-			width * info->cpp[i];
-
-		if (obj->size < min_size) {
-			drm_gem_object_put_unlocked(obj);
-			ret = -EINVAL;
-			goto err_gem_object_unreference;
-		}
-		objs[i] = obj;
-	}
-
-	fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
-	if (IS_ERR(fb)) {
-		ret = PTR_ERR(fb);
-		goto err_gem_object_unreference;
-	}
-
-	return fb;
-
-err_gem_object_unreference:
-	for (i--; i >= 0; i--)
-		drm_gem_object_put_unlocked(objs[i]);
-	return ERR_PTR(ret);
-}
-
 static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
 	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
 };
 
 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
-	.fb_create = rockchip_user_fb_create,
+	.fb_create = drm_gem_fb_create_with_dirty,
 	.output_poll_changed = drm_fb_helper_output_poll_changed,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 02be6c5..521fe42 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -27,7 +27,7 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
 	return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
 }
 
-static struct fb_ops rockchip_drm_fbdev_ops = {
+static const struct fb_ops rockchip_drm_fbdev_ops = {
 	.owner		= THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_mmap	= rockchip_fbdev_mmap,
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 8a4c9af..f25a367 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -10,6 +10,7 @@
 #include <linux/component.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of_graph.h>
+#include <linux/phy/phy.h>
 #include <linux/pinctrl/devinfo.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -31,6 +32,8 @@
 #define DISPLAY_OUTPUT_LVDS		1
 #define DISPLAY_OUTPUT_DUAL_LVDS	2
 
+struct rockchip_lvds;
+
 #define connector_to_lvds(c) \
 		container_of(c, struct rockchip_lvds, connector)
 
@@ -39,16 +42,12 @@
 
 /**
  * rockchip_lvds_soc_data - rockchip lvds Soc private data
- * @ch1_offset: lvds channel 1 registe offset
- * grf_soc_con6: general registe offset for LVDS contrl
- * grf_soc_con7: general registe offset for LVDS contrl
- * has_vop_sel: to indicate whether need to choose from different VOP.
+ * @probe: LVDS platform probe function
+ * @helper_funcs: LVDS connector helper functions
  */
 struct rockchip_lvds_soc_data {
-	u32 ch1_offset;
-	int grf_soc_con6;
-	int grf_soc_con7;
-	bool has_vop_sel;
+	int (*probe)(struct platform_device *pdev, struct rockchip_lvds *lvds);
+	const struct drm_encoder_helper_funcs *helper_funcs;
 };
 
 struct rockchip_lvds {
@@ -56,6 +55,7 @@ struct rockchip_lvds {
 	void __iomem *regs;
 	struct regmap *grf;
 	struct clk *pclk;
+	struct phy *dphy;
 	const struct rockchip_lvds_soc_data *soc_data;
 	int output; /* rgb lvds or dual lvds output */
 	int format; /* vesa or jeida format */
@@ -67,15 +67,16 @@ struct rockchip_lvds {
 	struct dev_pin_info *pins;
 };
 
-static inline void lvds_writel(struct rockchip_lvds *lvds, u32 offset, u32 val)
+static inline void rk3288_writel(struct rockchip_lvds *lvds, u32 offset,
+				 u32 val)
 {
 	writel_relaxed(val, lvds->regs + offset);
 	if (lvds->output == DISPLAY_OUTPUT_LVDS)
 		return;
-	writel_relaxed(val, lvds->regs + offset + lvds->soc_data->ch1_offset);
+	writel_relaxed(val, lvds->regs + offset + RK3288_LVDS_CH1_OFFSET);
 }
 
-static inline int lvds_name_to_format(const char *s)
+static inline int rockchip_lvds_name_to_format(const char *s)
 {
 	if (strncmp(s, "jeida-18", 8) == 0)
 		return LVDS_JEIDA_18;
@@ -87,7 +88,7 @@ static inline int lvds_name_to_format(const char *s)
 	return -EINVAL;
 }
 
-static inline int lvds_name_to_output(const char *s)
+static inline int rockchip_lvds_name_to_output(const char *s)
 {
 	if (strncmp(s, "rgb", 3) == 0)
 		return DISPLAY_OUTPUT_RGB;
@@ -99,7 +100,41 @@ static inline int lvds_name_to_output(const char *s)
 	return -EINVAL;
 }
 
-static int rockchip_lvds_poweron(struct rockchip_lvds *lvds)
+static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = drm_connector_cleanup,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
+{
+	struct rockchip_lvds *lvds = connector_to_lvds(connector);
+	struct drm_panel *panel = lvds->panel;
+
+	return drm_panel_get_modes(panel, connector);
+}
+
+static const
+struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
+	.get_modes = rockchip_lvds_connector_get_modes,
+};
+
+static int
+rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
+				   struct drm_crtc_state *crtc_state,
+				   struct drm_connector_state *conn_state)
+{
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+	s->output_mode = ROCKCHIP_OUT_MODE_P888;
+	s->output_type = DRM_MODE_CONNECTOR_LVDS;
+
+	return 0;
+}
+
+static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
 {
 	int ret;
 	u32 val;
@@ -121,66 +156,73 @@ static int rockchip_lvds_poweron(struct rockchip_lvds *lvds)
 	if (lvds->output == DISPLAY_OUTPUT_RGB) {
 		val |= RK3288_LVDS_CH0_REG0_TTL_EN |
 			RK3288_LVDS_CH0_REG0_LANECK_EN;
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
-			    RK3288_LVDS_PLL_FBDIV_REG2(0x46));
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG4,
-			    RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
-			    RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
-			    RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
-			    RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
-			    RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
-			    RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG5,
-			    RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
-			    RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
-			    RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
-			    RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
-			    RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
-			    RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
+			      RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG4,
+			      RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
+			      RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
+			      RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
+			      RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
+			      RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
+			      RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG5,
+			      RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
+			      RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
+			      RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
+			      RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
+			      RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
+			      RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
 	} else {
 		val |= RK3288_LVDS_CH0_REG0_LVDS_EN |
 			    RK3288_LVDS_CH0_REG0_LANECK_EN;
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG1,
-			    RK3288_LVDS_CH0_REG1_LANECK_BIAS |
-			    RK3288_LVDS_CH0_REG1_LANE4_BIAS |
-			    RK3288_LVDS_CH0_REG1_LANE3_BIAS |
-			    RK3288_LVDS_CH0_REG1_LANE2_BIAS |
-			    RK3288_LVDS_CH0_REG1_LANE1_BIAS |
-			    RK3288_LVDS_CH0_REG1_LANE0_BIAS);
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
-			    RK3288_LVDS_CH0_REG2_RESERVE_ON |
-			    RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
-			    RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
-			    RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
-			    RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
-			    RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
-			    RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
-			    RK3288_LVDS_PLL_FBDIV_REG2(0x46));
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
-		lvds_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG1,
+			      RK3288_LVDS_CH0_REG1_LANECK_BIAS |
+			      RK3288_LVDS_CH0_REG1_LANE4_BIAS |
+			      RK3288_LVDS_CH0_REG1_LANE3_BIAS |
+			      RK3288_LVDS_CH0_REG1_LANE2_BIAS |
+			      RK3288_LVDS_CH0_REG1_LANE1_BIAS |
+			      RK3288_LVDS_CH0_REG1_LANE0_BIAS);
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
+			      RK3288_LVDS_CH0_REG2_RESERVE_ON |
+			      RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
+			      RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
+			      RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
+			      RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
+			      RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
+			      RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
+			      RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
+		rk3288_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
 	}
-	lvds_writel(lvds, RK3288_LVDS_CH0_REG3, RK3288_LVDS_PLL_FBDIV_REG3(0x46));
-	lvds_writel(lvds, RK3288_LVDS_CH0_REGD, RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
-	lvds_writel(lvds, RK3288_LVDS_CH0_REG20, RK3288_LVDS_CH0_REG20_LSB);
+	rk3288_writel(lvds, RK3288_LVDS_CH0_REG3,
+		      RK3288_LVDS_PLL_FBDIV_REG3(0x46));
+	rk3288_writel(lvds, RK3288_LVDS_CH0_REGD,
+		      RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
+	rk3288_writel(lvds, RK3288_LVDS_CH0_REG20,
+		      RK3288_LVDS_CH0_REG20_LSB);
 
-	lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
-	lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
+	rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
+		      RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+	rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
+		      RK3288_LVDS_CFG_REG21_TX_ENABLE);
 
 	return 0;
 }
 
-static void rockchip_lvds_poweroff(struct rockchip_lvds *lvds)
+static void rk3288_lvds_poweroff(struct rockchip_lvds *lvds)
 {
 	int ret;
 	u32 val;
 
-	lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
-	lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+	rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
+		      RK3288_LVDS_CFG_REG21_TX_ENABLE);
+	rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
+		      RK3288_LVDS_CFG_REGC_PLL_ENABLE);
 	val = LVDS_DUAL | LVDS_TTL_EN | LVDS_CH0_EN | LVDS_CH1_EN | LVDS_PWRDN;
 	val |= val << 16;
-	ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
+	ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
 	if (ret != 0)
 		DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
 
@@ -188,29 +230,8 @@ static void rockchip_lvds_poweroff(struct rockchip_lvds *lvds)
 	clk_disable(lvds->pclk);
 }
 
-static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.destroy = drm_connector_cleanup,
-	.reset = drm_atomic_helper_connector_reset,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
-{
-	struct rockchip_lvds *lvds = connector_to_lvds(connector);
-	struct drm_panel *panel = lvds->panel;
-
-	return drm_panel_get_modes(panel);
-}
-
-static const
-struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
-	.get_modes = rockchip_lvds_connector_get_modes,
-};
-
-static void rockchip_lvds_grf_config(struct drm_encoder *encoder,
-				     struct drm_display_mode *mode)
+static int rk3288_lvds_grf_config(struct drm_encoder *encoder,
+				  struct drm_display_mode *mode)
 {
 	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
 	u8 pin_hsync = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1 : 0;
@@ -234,22 +255,19 @@ static void rockchip_lvds_grf_config(struct drm_encoder *encoder,
 
 	val |= (pin_dclk << 8) | (pin_hsync << 9);
 	val |= (0xffff << 16);
-	ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
-	if (ret != 0) {
+	ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
+	if (ret)
 		DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
-		return;
-	}
+
+	return ret;
 }
 
-static int rockchip_lvds_set_vop_source(struct rockchip_lvds *lvds,
-					struct drm_encoder *encoder)
+static int rk3288_lvds_set_vop_source(struct rockchip_lvds *lvds,
+				      struct drm_encoder *encoder)
 {
 	u32 val;
 	int ret;
 
-	if (!lvds->soc_data->has_vop_sel)
-		return 0;
-
 	ret = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
 	if (ret < 0)
 		return ret;
@@ -258,56 +276,162 @@ static int rockchip_lvds_set_vop_source(struct rockchip_lvds *lvds,
 	if (ret)
 		val |= RK3288_LVDS_SOC_CON6_SEL_VOP_LIT;
 
-	ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con6, val);
+	ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON6, val);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static int
-rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
-				   struct drm_crtc_state *crtc_state,
-				   struct drm_connector_state *conn_state)
-{
-	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
-
-	s->output_mode = ROCKCHIP_OUT_MODE_P888;
-	s->output_type = DRM_MODE_CONNECTOR_LVDS;
-
-	return 0;
-}
-
-static void rockchip_lvds_encoder_enable(struct drm_encoder *encoder)
+static void rk3288_lvds_encoder_enable(struct drm_encoder *encoder)
 {
 	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
 	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	int ret;
 
 	drm_panel_prepare(lvds->panel);
-	ret = rockchip_lvds_poweron(lvds);
+
+	ret = rk3288_lvds_poweron(lvds);
 	if (ret < 0) {
-		DRM_DEV_ERROR(lvds->dev, "failed to power on lvds: %d\n", ret);
+		DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
 		drm_panel_unprepare(lvds->panel);
+		return;
 	}
-	rockchip_lvds_grf_config(encoder, mode);
-	rockchip_lvds_set_vop_source(lvds, encoder);
+
+	ret = rk3288_lvds_grf_config(encoder, mode);
+	if (ret) {
+		DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
+		drm_panel_unprepare(lvds->panel);
+		return;
+	}
+
+	ret = rk3288_lvds_set_vop_source(lvds, encoder);
+	if (ret) {
+		DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
+		drm_panel_unprepare(lvds->panel);
+		return;
+	}
+
 	drm_panel_enable(lvds->panel);
 }
 
-static void rockchip_lvds_encoder_disable(struct drm_encoder *encoder)
+static void rk3288_lvds_encoder_disable(struct drm_encoder *encoder)
 {
 	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
 
 	drm_panel_disable(lvds->panel);
-	rockchip_lvds_poweroff(lvds);
+	rk3288_lvds_poweroff(lvds);
+	drm_panel_unprepare(lvds->panel);
+}
+
+static int px30_lvds_poweron(struct rockchip_lvds *lvds)
+{
+	int ret;
+
+	ret = pm_runtime_get_sync(lvds->dev);
+	if (ret < 0) {
+		DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+		return ret;
+	}
+
+	/* Enable LVDS mode */
+	return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+				  PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
+				  PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
+}
+
+static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
+{
+	regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+			   PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
+			   PX30_LVDS_MODE_EN(0) | PX30_LVDS_P2S_EN(0));
+
+	pm_runtime_put(lvds->dev);
+}
+
+static int px30_lvds_grf_config(struct drm_encoder *encoder,
+				struct drm_display_mode *mode)
+{
+	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+
+	if (lvds->output != DISPLAY_OUTPUT_LVDS) {
+		DRM_DEV_ERROR(lvds->dev, "Unsupported display output %d\n",
+			      lvds->output);
+		return -EINVAL;
+	}
+
+	/* Set format */
+	return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+				  PX30_LVDS_FORMAT(lvds->format),
+				  PX30_LVDS_FORMAT(lvds->format));
+}
+
+static int px30_lvds_set_vop_source(struct rockchip_lvds *lvds,
+				    struct drm_encoder *encoder)
+{
+	int vop;
+
+	vop = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
+	if (vop < 0)
+		return vop;
+
+	return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+				  PX30_LVDS_VOP_SEL(1),
+				  PX30_LVDS_VOP_SEL(vop));
+}
+
+static void px30_lvds_encoder_enable(struct drm_encoder *encoder)
+{
+	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+	int ret;
+
+	drm_panel_prepare(lvds->panel);
+
+	ret = px30_lvds_poweron(lvds);
+	if (ret) {
+		DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
+		drm_panel_unprepare(lvds->panel);
+		return;
+	}
+
+	ret = px30_lvds_grf_config(encoder, mode);
+	if (ret) {
+		DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
+		drm_panel_unprepare(lvds->panel);
+		return;
+	}
+
+	ret = px30_lvds_set_vop_source(lvds, encoder);
+	if (ret) {
+		DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
+		drm_panel_unprepare(lvds->panel);
+		return;
+	}
+
+	drm_panel_enable(lvds->panel);
+}
+
+static void px30_lvds_encoder_disable(struct drm_encoder *encoder)
+{
+	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+
+	drm_panel_disable(lvds->panel);
+	px30_lvds_poweroff(lvds);
 	drm_panel_unprepare(lvds->panel);
 }
 
 static const
-struct drm_encoder_helper_funcs rockchip_lvds_encoder_helper_funcs = {
-	.enable = rockchip_lvds_encoder_enable,
-	.disable = rockchip_lvds_encoder_disable,
+struct drm_encoder_helper_funcs rk3288_lvds_encoder_helper_funcs = {
+	.enable = rk3288_lvds_encoder_enable,
+	.disable = rk3288_lvds_encoder_disable,
+	.atomic_check = rockchip_lvds_encoder_atomic_check,
+};
+
+static const
+struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
+	.enable = px30_lvds_encoder_enable,
+	.disable = px30_lvds_encoder_disable,
 	.atomic_check = rockchip_lvds_encoder_atomic_check,
 };
 
@@ -315,11 +439,88 @@ static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
+static int rk3288_lvds_probe(struct platform_device *pdev,
+			     struct rockchip_lvds *lvds)
+{
+	struct resource *res;
+	int ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	lvds->regs = devm_ioremap_resource(lvds->dev, res);
+	if (IS_ERR(lvds->regs))
+		return PTR_ERR(lvds->regs);
+
+	lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds");
+	if (IS_ERR(lvds->pclk)) {
+		DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n");
+		return PTR_ERR(lvds->pclk);
+	}
+
+	lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
+				  GFP_KERNEL);
+	if (!lvds->pins)
+		return -ENOMEM;
+
+	lvds->pins->p = devm_pinctrl_get(lvds->dev);
+	if (IS_ERR(lvds->pins->p)) {
+		DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n");
+		devm_kfree(lvds->dev, lvds->pins);
+		lvds->pins = NULL;
+	} else {
+		lvds->pins->default_state =
+			pinctrl_lookup_state(lvds->pins->p, "lcdc");
+		if (IS_ERR(lvds->pins->default_state)) {
+			DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n");
+			devm_kfree(lvds->dev, lvds->pins);
+			lvds->pins = NULL;
+		}
+	}
+
+	ret = clk_prepare(lvds->pclk);
+	if (ret < 0) {
+		DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int px30_lvds_probe(struct platform_device *pdev,
+			   struct rockchip_lvds *lvds)
+{
+	int ret;
+
+	/* MSB */
+	ret =  regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+				  PX30_LVDS_MSBSEL(1),
+				  PX30_LVDS_MSBSEL(1));
+	if (ret)
+		return ret;
+
+	/* PHY */
+	lvds->dphy = devm_phy_get(&pdev->dev, "dphy");
+	if (IS_ERR(lvds->dphy))
+		return PTR_ERR(lvds->dphy);
+
+	phy_init(lvds->dphy);
+	if (ret)
+		return ret;
+
+	phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
+	if (ret)
+		return ret;
+
+	return phy_power_on(lvds->dphy);
+}
+
 static const struct rockchip_lvds_soc_data rk3288_lvds_data = {
-	.ch1_offset = 0x100,
-	.grf_soc_con6 = 0x025c,
-	.grf_soc_con7 = 0x0260,
-	.has_vop_sel = true,
+	.probe = rk3288_lvds_probe,
+	.helper_funcs = &rk3288_lvds_encoder_helper_funcs,
+};
+
+static const struct rockchip_lvds_soc_data px30_lvds_data = {
+	.probe = px30_lvds_probe,
+	.helper_funcs = &px30_lvds_encoder_helper_funcs,
 };
 
 static const struct of_device_id rockchip_lvds_dt_ids[] = {
@@ -327,6 +528,10 @@ static const struct of_device_id rockchip_lvds_dt_ids[] = {
 		.compatible = "rockchip,rk3288-lvds",
 		.data = &rk3288_lvds_data
 	},
+	{
+		.compatible = "rockchip,px30-lvds",
+		.data = &px30_lvds_data
+	},
 	{}
 };
 MODULE_DEVICE_TABLE(of, rockchip_lvds_dt_ids);
@@ -378,7 +583,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
 		/* default set it as output rgb */
 		lvds->output = DISPLAY_OUTPUT_RGB;
 	else
-		lvds->output = lvds_name_to_output(name);
+		lvds->output = rockchip_lvds_name_to_output(name);
 
 	if (lvds->output < 0) {
 		DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
@@ -390,7 +595,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
 		/* default set it as format vesa 18 */
 		lvds->format = LVDS_VESA_18;
 	else
-		lvds->format = lvds_name_to_format(name);
+		lvds->format = rockchip_lvds_name_to_format(name);
 
 	if (lvds->format < 0) {
 		DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
@@ -410,7 +615,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
 		goto err_put_remote;
 	}
 
-	drm_encoder_helper_add(encoder, &rockchip_lvds_encoder_helper_funcs);
+	drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
 
 	if (lvds->panel) {
 		connector = &lvds->connector;
@@ -471,8 +676,10 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master,
 				void *data)
 {
 	struct rockchip_lvds *lvds = dev_get_drvdata(dev);
+	const struct drm_encoder_helper_funcs *encoder_funcs;
 
-	rockchip_lvds_encoder_disable(&lvds->encoder);
+	encoder_funcs = lvds->soc_data->helper_funcs;
+	encoder_funcs->disable(&lvds->encoder);
 	if (lvds->panel)
 		drm_panel_detach(lvds->panel);
 	pm_runtime_disable(dev);
@@ -490,7 +697,6 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct rockchip_lvds *lvds;
 	const struct of_device_id *match;
-	struct resource *res;
 	int ret;
 
 	if (!dev->of_node)
@@ -506,37 +712,6 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
 		return -ENODEV;
 	lvds->soc_data = match->data;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	lvds->regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(lvds->regs))
-		return PTR_ERR(lvds->regs);
-
-	lvds->pclk = devm_clk_get(&pdev->dev, "pclk_lvds");
-	if (IS_ERR(lvds->pclk)) {
-		DRM_DEV_ERROR(dev, "could not get pclk_lvds\n");
-		return PTR_ERR(lvds->pclk);
-	}
-
-	lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
-				  GFP_KERNEL);
-	if (!lvds->pins)
-		return -ENOMEM;
-
-	lvds->pins->p = devm_pinctrl_get(lvds->dev);
-	if (IS_ERR(lvds->pins->p)) {
-		DRM_DEV_ERROR(dev, "no pinctrl handle\n");
-		devm_kfree(lvds->dev, lvds->pins);
-		lvds->pins = NULL;
-	} else {
-		lvds->pins->default_state =
-			pinctrl_lookup_state(lvds->pins->p, "lcdc");
-		if (IS_ERR(lvds->pins->default_state)) {
-			DRM_DEV_ERROR(dev, "no default pinctrl state\n");
-			devm_kfree(lvds->dev, lvds->pins);
-			lvds->pins = NULL;
-		}
-	}
-
 	lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
 						    "rockchip,grf");
 	if (IS_ERR(lvds->grf)) {
@@ -544,13 +719,14 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
 		return PTR_ERR(lvds->grf);
 	}
 
-	dev_set_drvdata(dev, lvds);
-
-	ret = clk_prepare(lvds->pclk);
-	if (ret < 0) {
-		DRM_DEV_ERROR(dev, "failed to prepare pclk_lvds\n");
+	ret = lvds->soc_data->probe(pdev, lvds);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "Platform initialization failed\n");
 		return ret;
 	}
+
+	dev_set_drvdata(dev, lvds);
+
 	ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
 	if (ret < 0) {
 		DRM_DEV_ERROR(dev, "failed to add component\n");
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index 029bad8..4ce967d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -70,7 +70,10 @@
 #define RK3288_LVDS_CFG_REG21			0x84
 #define RK3288_LVDS_CFG_REG21_TX_ENABLE		0x92
 #define RK3288_LVDS_CFG_REG21_TX_DISABLE	0x00
-#define RK3288_LVDS_CH1_OFFSET                 0x100
+#define RK3288_LVDS_CH1_OFFSET			0x100
+
+#define RK3288_LVDS_GRF_SOC_CON6		0x025C
+#define RK3288_LVDS_GRF_SOC_CON7		0x0260
 
 /* fbdiv value is split over 2 registers, with bit8 in reg2 */
 #define RK3288_LVDS_PLL_FBDIV_REG2(_fbd) \
@@ -103,4 +106,18 @@
 #define LVDS_VESA_18				2
 #define LVDS_JEIDA_18				3
 
+#define HIWORD_UPDATE(v, h, l)  ((GENMASK(h, l) << 16) | ((v) << (l)))
+
+#define PX30_LVDS_GRF_PD_VO_CON0		0x434
+#define   PX30_LVDS_TIE_CLKS(val)		HIWORD_UPDATE(val,  8,  8)
+#define   PX30_LVDS_INVERT_CLKS(val)		HIWORD_UPDATE(val,  9,  9)
+#define   PX30_LVDS_INVERT_DCLK(val)		HIWORD_UPDATE(val,  5,  5)
+
+#define PX30_LVDS_GRF_PD_VO_CON1		0x438
+#define   PX30_LVDS_FORMAT(val)			HIWORD_UPDATE(val, 14, 13)
+#define   PX30_LVDS_MODE_EN(val)		HIWORD_UPDATE(val, 12, 12)
+#define   PX30_LVDS_MSBSEL(val)			HIWORD_UPDATE(val, 11, 11)
+#define   PX30_LVDS_P2S_EN(val)			HIWORD_UPDATE(val,  6,  6)
+#define   PX30_LVDS_VOP_SEL(val)		HIWORD_UPDATE(val,  1,  1)
+
 #endif /* _ROCKCHIP_LVDS_ */
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 2966fcf..799bd11 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -24,10 +24,10 @@
  */
 
 #include <linux/module.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 
 #include "savage_drv.h"
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 461a7a8..63bccd2 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -38,47 +38,41 @@
  * submit to HW ring.
  *
  * @entity: scheduler entity to init
- * @rq_list: the list of run queue on which jobs from this
+ * @priority: priority of the entity
+ * @sched_list: the list of drm scheds on which jobs from this
  *           entity can be submitted
- * @num_rq_list: number of run queue in rq_list
+ * @num_sched_list: number of drm sched in sched_list
  * @guilty: atomic_t set to 1 when a job on this queue
  *          is found to be guilty causing a timeout
  *
- * Note: the rq_list should have atleast one element to schedule
+ * Note: the sched_list should have at least one element to schedule
  *       the entity
  *
  * Returns 0 on success or a negative error code on failure.
  */
 int drm_sched_entity_init(struct drm_sched_entity *entity,
-			  struct drm_sched_rq **rq_list,
-			  unsigned int num_rq_list,
+			  enum drm_sched_priority priority,
+			  struct drm_gpu_scheduler **sched_list,
+			  unsigned int num_sched_list,
 			  atomic_t *guilty)
 {
-	int i;
-
-	if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
+	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
 		return -EINVAL;
 
 	memset(entity, 0, sizeof(struct drm_sched_entity));
 	INIT_LIST_HEAD(&entity->list);
 	entity->rq = NULL;
 	entity->guilty = guilty;
-	entity->num_rq_list = num_rq_list;
-	entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
-				GFP_KERNEL);
-	if (!entity->rq_list)
-		return -ENOMEM;
+	entity->num_sched_list = num_sched_list;
+	entity->priority = priority;
+	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
+	entity->last_scheduled = NULL;
+
+	if(num_sched_list)
+		entity->rq = &sched_list[0]->sched_rq[entity->priority];
 
 	init_completion(&entity->entity_idle);
 
-	for (i = 0; i < num_rq_list; ++i)
-		entity->rq_list[i] = rq_list[i];
-
-	if (num_rq_list)
-		entity->rq = rq_list[0];
-
-	entity->last_scheduled = NULL;
-
 	spin_lock_init(&entity->rq_lock);
 	spsc_queue_init(&entity->job_queue);
 
@@ -136,21 +130,21 @@ static struct drm_sched_rq *
 drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
 {
 	struct drm_sched_rq *rq = NULL;
-	unsigned int min_jobs = UINT_MAX, num_jobs;
+	unsigned int min_score = UINT_MAX, num_score;
 	int i;
 
-	for (i = 0; i < entity->num_rq_list; ++i) {
-		struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+	for (i = 0; i < entity->num_sched_list; ++i) {
+		struct drm_gpu_scheduler *sched = entity->sched_list[i];
 
-		if (!entity->rq_list[i]->sched->ready) {
+		if (!entity->sched_list[i]->ready) {
 			DRM_WARN("sched%s is not ready, skipping", sched->name);
 			continue;
 		}
 
-		num_jobs = atomic_read(&sched->num_jobs);
-		if (num_jobs < min_jobs) {
-			min_jobs = num_jobs;
-			rq = entity->rq_list[i];
+		num_score = atomic_read(&sched->score);
+		if (num_score < min_score) {
+			min_score = num_score;
+			rq = &entity->sched_list[i]->sched_rq[entity->priority];
 		}
 	}
 
@@ -308,7 +302,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
 
 	dma_fence_put(entity->last_scheduled);
 	entity->last_scheduled = NULL;
-	kfree(entity->rq_list);
 }
 EXPORT_SYMBOL(drm_sched_entity_fini);
 
@@ -354,15 +347,6 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
 }
 
 /**
- * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
- */
-static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
-					     enum drm_sched_priority priority)
-{
-	*rq = &(*rq)->sched->sched_rq[priority];
-}
-
-/**
  * drm_sched_entity_set_priority - Sets priority of the entity
  *
  * @entity: scheduler entity
@@ -373,19 +357,8 @@ static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
 				   enum drm_sched_priority priority)
 {
-	unsigned int i;
-
 	spin_lock(&entity->rq_lock);
-
-	for (i = 0; i < entity->num_rq_list; ++i)
-		drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
-
-	if (entity->rq) {
-		drm_sched_rq_remove_entity(entity->rq, entity);
-		drm_sched_entity_set_rq_priority(&entity->rq, priority);
-		drm_sched_rq_add_entity(entity->rq, entity);
-	}
-
+	entity->priority = priority;
 	spin_unlock(&entity->rq_lock);
 }
 EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -490,20 +463,20 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
 	struct dma_fence *fence;
 	struct drm_sched_rq *rq;
 
-	if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
+	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
 		return;
 
 	fence = READ_ONCE(entity->last_scheduled);
 	if (fence && !dma_fence_is_signaled(fence))
 		return;
 
-	rq = drm_sched_entity_get_free_sched(entity);
-	if (rq == entity->rq)
-		return;
-
 	spin_lock(&entity->rq_lock);
-	drm_sched_rq_remove_entity(entity->rq, entity);
-	entity->rq = rq;
+	rq = drm_sched_entity_get_free_sched(entity);
+	if (rq != entity->rq) {
+		drm_sched_rq_remove_entity(entity->rq, entity);
+		entity->rq = rq;
+	}
+
 	spin_unlock(&entity->rq_lock);
 }
 
@@ -525,7 +498,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 	bool first;
 
 	trace_drm_sched_job(sched_job, entity);
-	atomic_inc(&entity->rq->sched->num_jobs);
+	atomic_inc(&entity->rq->sched->score);
 	WRITE_ONCE(entity->last_user, current->group_leader);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 3c57e84..71ce621 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 	if (!list_empty(&entity->list))
 		return;
 	spin_lock(&rq->lock);
+	atomic_inc(&rq->sched->score);
 	list_add_tail(&entity->list, &rq->entities);
 	spin_unlock(&rq->lock);
 }
@@ -110,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 	if (list_empty(&entity->list))
 		return;
 	spin_lock(&rq->lock);
+	atomic_dec(&rq->sched->score);
 	list_del_init(&entity->list);
 	if (rq->current_entity == entity)
 		rq->current_entity = NULL;
@@ -287,10 +289,21 @@ static void drm_sched_job_timedout(struct work_struct *work)
 	unsigned long flags;
 
 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+	spin_lock_irqsave(&sched->job_list_lock, flags);
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
 				       struct drm_sched_job, node);
 
 	if (job) {
+		/*
+		 * Remove the bad job so it cannot be freed by concurrent
+		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
+		 * is parked at which point it's safe.
+		 */
+		list_del_init(&job->node);
+		spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
 		job->sched->ops->timedout_job(job);
 
 		/*
@@ -301,6 +314,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
 			job->sched->ops->free_job(job);
 			sched->free_guilty = false;
 		}
+	} else {
+		spin_unlock_irqrestore(&sched->job_list_lock, flags);
 	}
 
 	spin_lock_irqsave(&sched->job_list_lock, flags);
@@ -373,6 +388,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 	kthread_park(sched->thread);
 
 	/*
+	 * Reinsert back the bad job here - now it's safe as
+	 * drm_sched_get_cleanup_job cannot race against us and release the
+	 * bad job at this point - we parked (waited for) any in progress
+	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+	 * now until the scheduler thread is unparked.
+	 */
+	if (bad && bad->sched == sched)
+		/*
+		 * Add at the head of the queue to reflect it was the earliest
+		 * job extracted.
+		 */
+		list_add(&bad->node, &sched->ring_mirror_list);
+
+	/*
 	 * Iterate the job list from later to  earlier one and either deactive
 	 * their HW callbacks or remove them from mirror list if they already
 	 * signaled.
@@ -628,7 +657,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 	struct drm_gpu_scheduler *sched = s_fence->sched;
 
 	atomic_dec(&sched->hw_rq_count);
-	atomic_dec(&sched->num_jobs);
+	atomic_dec(&sched->score);
 
 	trace_drm_sched_process_job(s_fence);
 
@@ -803,7 +832,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 	spin_lock_init(&sched->job_list_lock);
 	atomic_set(&sched->hw_rq_count, 0);
 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
-	atomic_set(&sched->num_jobs, 0);
+	atomic_set(&sched->score, 0);
 	atomic64_set(&sched->job_id_count, 0);
 
 	/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index d213734..0856e4b 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
                       test-drm_format.o test-drm_framebuffer.o \
-		      test-drm_damage_helper.o test-drm_dp_mst_helper.o
+		      test-drm_damage_helper.o test-drm_dp_mst_helper.o \
+		      test-drm_rect.o
 
 obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index 6d61a0e..ceac7af 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -60,3 +60,8 @@ cmdline_test(drm_cmdline_test_vmirror)
 cmdline_test(drm_cmdline_test_margin_options)
 cmdline_test(drm_cmdline_test_multiple_options)
 cmdline_test(drm_cmdline_test_invalid_option)
+cmdline_test(drm_cmdline_test_bpp_extra_and_option)
+cmdline_test(drm_cmdline_test_extra_and_option)
+cmdline_test(drm_cmdline_test_freestanding_options)
+cmdline_test(drm_cmdline_test_freestanding_force_e_and_options)
+cmdline_test(drm_cmdline_test_panel_orientation)
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
index 1898de0..782e285 100644
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -6,6 +6,10 @@
  *
  * Tests are executed in order by igt/drm_selftests_helper
  */
+selftest(drm_rect_clip_scaled_div_by_zero, igt_drm_rect_clip_scaled_div_by_zero)
+selftest(drm_rect_clip_scaled_not_clipped, igt_drm_rect_clip_scaled_not_clipped)
+selftest(drm_rect_clip_scaled_clipped, igt_drm_rect_clip_scaled_clipped)
+selftest(drm_rect_clip_scaled_signed_vs_unsigned, igt_drm_rect_clip_scaled_signed_vs_unsigned)
 selftest(check_plane_state, igt_check_plane_state)
 selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
 selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 013de9d..520f3e6 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -992,6 +992,128 @@ static int drm_cmdline_test_invalid_option(void *ignored)
 	return 0;
 }
 
+static int drm_cmdline_test_bpp_extra_and_option(void *ignored)
+{
+	struct drm_cmdline_mode mode = { };
+
+	FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24e,rotate=180",
+							   &no_connector,
+							   &mode));
+	FAIL_ON(!mode.specified);
+	FAIL_ON(mode.xres != 720);
+	FAIL_ON(mode.yres != 480);
+	FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
+
+	FAIL_ON(mode.refresh_specified);
+
+	FAIL_ON(!mode.bpp_specified);
+	FAIL_ON(mode.bpp != 24);
+
+	FAIL_ON(mode.rb);
+	FAIL_ON(mode.cvt);
+	FAIL_ON(mode.interlace);
+	FAIL_ON(mode.margins);
+	FAIL_ON(mode.force != DRM_FORCE_ON);
+
+	return 0;
+}
+
+static int drm_cmdline_test_extra_and_option(void *ignored)
+{
+	struct drm_cmdline_mode mode = { };
+
+	FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480e,rotate=180",
+							   &no_connector,
+							   &mode));
+	FAIL_ON(!mode.specified);
+	FAIL_ON(mode.xres != 720);
+	FAIL_ON(mode.yres != 480);
+	FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
+
+	FAIL_ON(mode.refresh_specified);
+	FAIL_ON(mode.bpp_specified);
+
+	FAIL_ON(mode.rb);
+	FAIL_ON(mode.cvt);
+	FAIL_ON(mode.interlace);
+	FAIL_ON(mode.margins);
+	FAIL_ON(mode.force != DRM_FORCE_ON);
+
+	return 0;
+}
+
+static int drm_cmdline_test_freestanding_options(void *ignored)
+{
+	struct drm_cmdline_mode mode = { };
+
+	FAIL_ON(!drm_mode_parse_command_line_for_connector("margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
+							   &no_connector,
+							   &mode));
+	FAIL_ON(mode.specified);
+	FAIL_ON(mode.refresh_specified);
+	FAIL_ON(mode.bpp_specified);
+
+	FAIL_ON(mode.tv_margins.right != 14);
+	FAIL_ON(mode.tv_margins.left != 24);
+	FAIL_ON(mode.tv_margins.bottom != 36);
+	FAIL_ON(mode.tv_margins.top != 42);
+
+	FAIL_ON(mode.rb);
+	FAIL_ON(mode.cvt);
+	FAIL_ON(mode.interlace);
+	FAIL_ON(mode.margins);
+	FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
+
+	return 0;
+}
+
+static int drm_cmdline_test_freestanding_force_e_and_options(void *ignored)
+{
+	struct drm_cmdline_mode mode = { };
+
+	FAIL_ON(!drm_mode_parse_command_line_for_connector("e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
+							   &no_connector,
+							   &mode));
+	FAIL_ON(mode.specified);
+	FAIL_ON(mode.refresh_specified);
+	FAIL_ON(mode.bpp_specified);
+
+	FAIL_ON(mode.tv_margins.right != 14);
+	FAIL_ON(mode.tv_margins.left != 24);
+	FAIL_ON(mode.tv_margins.bottom != 36);
+	FAIL_ON(mode.tv_margins.top != 42);
+
+	FAIL_ON(mode.rb);
+	FAIL_ON(mode.cvt);
+	FAIL_ON(mode.interlace);
+	FAIL_ON(mode.margins);
+	FAIL_ON(mode.force != DRM_FORCE_ON);
+
+	return 0;
+}
+
+static int drm_cmdline_test_panel_orientation(void *ignored)
+{
+	struct drm_cmdline_mode mode = { };
+
+	FAIL_ON(!drm_mode_parse_command_line_for_connector("panel_orientation=upside_down",
+							   &no_connector,
+							   &mode));
+	FAIL_ON(mode.specified);
+	FAIL_ON(mode.refresh_specified);
+	FAIL_ON(mode.bpp_specified);
+
+	FAIL_ON(mode.panel_orientation != DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
+
+	FAIL_ON(mode.rb);
+	FAIL_ON(mode.cvt);
+	FAIL_ON(mode.interlace);
+	FAIL_ON(mode.margins);
+	FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
+
+	return 0;
+}
+
 #include "drm_selftest.c"
 
 static int __init test_drm_cmdline_init(void)
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
index af2b2de..bd990d17 100644
--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -18,15 +18,19 @@ int igt_dp_mst_calc_pbn_mode(void *ignored)
 		int rate;
 		int bpp;
 		int expected;
+		bool dsc;
 	} test_params[] = {
-		{ 154000, 30, 689 },
-		{ 234000, 30, 1047 },
-		{ 297000, 24, 1063 },
+		{ 154000, 30, 689, false },
+		{ 234000, 30, 1047, false },
+		{ 297000, 24, 1063, false },
+		{ 332880, 24, 50, true },
+		{ 324540, 24, 49, true },
 	};
 
 	for (i = 0; i < ARRAY_SIZE(test_params); i++) {
 		pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
-					   test_params[i].bpp);
+					   test_params[i].bpp,
+					   test_params[i].dsc);
 		FAIL(pbn != test_params[i].expected,
 		     "Expected PBN %d for clock %d bpp %d, got %d\n",
 		     test_params[i].expected, test_params[i].rate,
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
index 0fcb8bb..cfb51d8 100644
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -3,6 +3,9 @@
 #ifndef __TEST_DRM_MODESET_COMMON_H__
 #define __TEST_DRM_MODESET_COMMON_H__
 
+#include <linux/errno.h>
+#include <linux/printk.h>
+
 #define FAIL(test, msg, ...) \
 	do { \
 		if (test) { \
@@ -13,6 +16,10 @@
 
 #define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
 
+int igt_drm_rect_clip_scaled_div_by_zero(void *ignored);
+int igt_drm_rect_clip_scaled_not_clipped(void *ignored);
+int igt_drm_rect_clip_scaled_clipped(void *ignored);
+int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored);
 int igt_check_plane_state(void *ignored);
 int igt_check_drm_format_block_width(void *ignored);
 int igt_check_drm_format_block_height(void *ignored);
diff --git a/drivers/gpu/drm/selftests/test-drm_rect.c b/drivers/gpu/drm/selftests/test-drm_rect.c
new file mode 100644
index 0000000..3a5ff38
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_rect.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_rect functions
+ */
+
+#define pr_fmt(fmt) "drm_rect: " fmt
+
+#include <linux/limits.h>
+
+#include <drm/drm_rect.h>
+
+#include "test-drm_modeset_common.h"
+
+int igt_drm_rect_clip_scaled_div_by_zero(void *ignored)
+{
+	struct drm_rect src, dst, clip;
+	bool visible;
+
+	/*
+	 * Make sure we don't divide by zero when dst
+	 * width/height is zero and dst and clip do not intersect.
+	 */
+	drm_rect_init(&src, 0, 0, 0, 0);
+	drm_rect_init(&dst, 0, 0, 0, 0);
+	drm_rect_init(&clip, 1, 1, 1, 1);
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+	FAIL(visible, "Destination not be visible\n");
+	FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+	drm_rect_init(&src, 0, 0, 0, 0);
+	drm_rect_init(&dst, 3, 3, 0, 0);
+	drm_rect_init(&clip, 1, 1, 1, 1);
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+	FAIL(visible, "Destination not be visible\n");
+	FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+	return 0;
+}
+
+int igt_drm_rect_clip_scaled_not_clipped(void *ignored)
+{
+	struct drm_rect src, dst, clip;
+	bool visible;
+
+	/* 1:1 scaling */
+	drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+	drm_rect_init(&dst, 0, 0, 1, 1);
+	drm_rect_init(&clip, 0, 0, 1, 1);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+	     src.y1 != 0 || src.y2 != 1 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+	     dst.y1 != 0 || dst.y2 != 1,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	/* 2:1 scaling */
+	drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+	drm_rect_init(&dst, 0, 0, 1, 1);
+	drm_rect_init(&clip, 0, 0, 1, 1);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
+	     src.y1 != 0 || src.y2 != 2 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+	     dst.y1 != 0 || dst.y2 != 1,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	/* 1:2 scaling */
+	drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+	drm_rect_init(&dst, 0, 0, 2, 2);
+	drm_rect_init(&clip, 0, 0, 2, 2);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+	     src.y1 != 0 || src.y2 != 1 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 0 || dst.x2 != 2 ||
+	     dst.y1 != 0 || dst.y2 != 2,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	return 0;
+}
+
+int igt_drm_rect_clip_scaled_clipped(void *ignored)
+{
+	struct drm_rect src, dst, clip;
+	bool visible;
+
+	/* 1:1 scaling top/left clip */
+	drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+	drm_rect_init(&dst, 0, 0, 2, 2);
+	drm_rect_init(&clip, 0, 0, 1, 1);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+	     src.y1 != 0 || src.y2 != 1 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+	     dst.y1 != 0 || dst.y2 != 1,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	/* 1:1 scaling bottom/right clip */
+	drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+	drm_rect_init(&dst, 0, 0, 2, 2);
+	drm_rect_init(&clip, 1, 1, 1, 1);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+	     src.y1 != 1 << 16 || src.y2 != 2 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 1 || dst.x2 != 2 ||
+	     dst.y1 != 1 || dst.y2 != 2,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	/* 2:1 scaling top/left clip */
+	drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+	drm_rect_init(&dst, 0, 0, 2, 2);
+	drm_rect_init(&clip, 0, 0, 1, 1);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
+	     src.y1 != 0 || src.y2 != 2 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+	     dst.y1 != 0 || dst.y2 != 1,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	/* 2:1 scaling bottom/right clip */
+	drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+	drm_rect_init(&dst, 0, 0, 2, 2);
+	drm_rect_init(&clip, 1, 1, 1, 1);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
+	     src.y1 != 2 << 16 || src.y2 != 4 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 1 || dst.x2 != 2 ||
+	     dst.y1 != 1 || dst.y2 != 2,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	/* 1:2 scaling top/left clip */
+	drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+	drm_rect_init(&dst, 0, 0, 4, 4);
+	drm_rect_init(&clip, 0, 0, 2, 2);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+	     src.y1 != 0 || src.y2 != 1 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 0 || dst.x2 != 2 ||
+	     dst.y1 != 0 || dst.y2 != 2,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	/* 1:2 scaling bottom/right clip */
+	drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+	drm_rect_init(&dst, 0, 0, 4, 4);
+	drm_rect_init(&clip, 2, 2, 2, 2);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+	     src.y1 != 1 << 16 || src.y2 != 2 << 16,
+	     "Source badly clipped\n");
+	FAIL(dst.x1 != 2 || dst.x2 != 4 ||
+	     dst.y1 != 2 || dst.y2 != 4,
+	     "Destination badly clipped\n");
+	FAIL(!visible, "Destination should be visible\n");
+	FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+	return 0;
+}
+
+int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored)
+{
+	struct drm_rect src, dst, clip;
+	bool visible;
+
+	/*
+	 * 'clip.x2 - dst.x1 >= dst width' could result a negative
+	 * src rectangle width which is no longer expected by the
+	 * code as it's using unsigned types. This could lead to
+	 * the clipped source rectangle appering visible when it
+	 * should have been fully clipped. Make sure both rectangles
+	 * end up invisible.
+	 */
+	drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
+	drm_rect_init(&dst, 0, 0, 2, 2);
+	drm_rect_init(&clip, 3, 3, 1, 1);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+	FAIL(visible, "Destination should not be visible\n");
+	FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index ee38012..2c54b33 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -26,10 +26,10 @@
  */
 
 #include <linux/module.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 #include <drm/sis_drm.h>
 
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 68261c7..b2778ec 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -339,7 +339,7 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
 	struct sti_dvo *dvo = dvo_connector->dvo;
 
 	if (dvo->panel)
-		return drm_panel_get_modes(dvo->panel);
+		return drm_panel_get_modes(dvo->panel, connector);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 514efef..4b16563 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -309,11 +309,24 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
 	return 0;
 }
 
+static int
+dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+			   struct dw_mipi_dsi_dphy_timing *timing)
+{
+	timing->clk_hs2lp = 0x40;
+	timing->clk_lp2hs = 0x40;
+	timing->data_hs2lp = 0x40;
+	timing->data_lp2hs = 0x40;
+
+	return 0;
+}
+
 static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
 	.init = dw_mipi_dsi_phy_init,
 	.power_on = dw_mipi_dsi_phy_power_on,
 	.power_off = dw_mipi_dsi_phy_power_off,
 	.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+	.get_timing = dw_mipi_dsi_phy_get_timing,
 };
 
 static struct dw_mipi_dsi_plat_data dw_mipi_dsi_stm_plat_data = {
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 5b512989..c2815e8 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -437,9 +437,6 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
 	/* Commit shadow registers = update planes at next vblank */
 	reg_set(ldev->regs, LTDC_SRCR, SRCR_VBR);
 
-	/* Enable LTDC */
-	reg_set(ldev->regs, LTDC_GCR, GCR_LTDCEN);
-
 	drm_crtc_vblank_on(crtc);
 }
 
@@ -453,9 +450,6 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
 
 	drm_crtc_vblank_off(crtc);
 
-	/* disable LTDC */
-	reg_clear(ldev->regs, LTDC_GCR, GCR_LTDCEN);
-
 	/* disable IRQ */
 	reg_clear(ldev->regs, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
 
@@ -1044,9 +1038,13 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
 static void ltdc_encoder_disable(struct drm_encoder *encoder)
 {
 	struct drm_device *ddev = encoder->dev;
+	struct ltdc_device *ldev = ddev->dev_private;
 
 	DRM_DEBUG_DRIVER("\n");
 
+	/* Disable LTDC */
+	reg_clear(ldev->regs, LTDC_GCR, GCR_LTDCEN);
+
 	/* Set to sleep state the pinctrl whatever type of encoder */
 	pinctrl_pm_select_sleep_state(ddev->dev);
 }
@@ -1054,6 +1052,19 @@ static void ltdc_encoder_disable(struct drm_encoder *encoder)
 static void ltdc_encoder_enable(struct drm_encoder *encoder)
 {
 	struct drm_device *ddev = encoder->dev;
+	struct ltdc_device *ldev = ddev->dev_private;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	/* Enable LTDC */
+	reg_set(ldev->regs, LTDC_GCR, GCR_LTDCEN);
+}
+
+static void ltdc_encoder_mode_set(struct drm_encoder *encoder,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *ddev = encoder->dev;
 
 	DRM_DEBUG_DRIVER("\n");
 
@@ -1069,6 +1080,7 @@ static void ltdc_encoder_enable(struct drm_encoder *encoder)
 static const struct drm_encoder_helper_funcs ltdc_encoder_helper_funcs = {
 	.disable = ltdc_encoder_disable,
 	.enable = ltdc_encoder_enable,
+	.mode_set = ltdc_encoder_mode_set,
 };
 
 static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 37e90e4..5755f04 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -17,18 +17,18 @@
 if DRM_SUN4I
 
 config DRM_SUN4I_HDMI
-       tristate "Allwinner A10 HDMI Controller Support"
-       default DRM_SUN4I
-       help
+	tristate "Allwinner A10 HDMI Controller Support"
+	default DRM_SUN4I
+	help
 	  Choose this option if you have an Allwinner SoC with an HDMI
 	  controller.
 
 config DRM_SUN4I_HDMI_CEC
-       bool "Allwinner A10 HDMI CEC Support"
-       depends on DRM_SUN4I_HDMI
-       select CEC_CORE
-       select CEC_PIN
-       help
+	bool "Allwinner A10 HDMI CEC Support"
+	depends on DRM_SUN4I_HDMI
+	select CEC_CORE
+	select CEC_PIN
+	help
 	  Choose this option if you have an Allwinner SoC with an HDMI
 	  controller and want to use CEC.
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 4e29f4f..072ea11 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -856,6 +856,13 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
 		ret = PTR_ERR(backend->mod_clk);
 		goto err_disable_bus_clk;
 	}
+
+	ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
+	if (ret) {
+		dev_err(dev, "Couldn't set the module clock frequency\n");
+		goto err_disable_bus_clk;
+	}
+
 	clk_prepare_enable(backend->mod_clk);
 
 	backend->ram_clk = devm_clk_get(dev, "ram");
@@ -932,6 +939,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
 err_disable_ram_clk:
 	clk_disable_unprepare(backend->ram_clk);
 err_disable_mod_clk:
+	clk_rate_exclusive_put(backend->mod_clk);
 	clk_disable_unprepare(backend->mod_clk);
 err_disable_bus_clk:
 	clk_disable_unprepare(backend->bus_clk);
@@ -952,6 +960,7 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
 		sun4i_backend_free_sat(dev);
 
 	clk_disable_unprepare(backend->ram_clk);
+	clk_rate_exclusive_put(backend->mod_clk);
 	clk_disable_unprepare(backend->mod_clk);
 	clk_disable_unprepare(backend->bus_clk);
 	reset_control_assert(backend->reset);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index a5757b1..5ae67d5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -346,6 +346,27 @@ static int sun4i_drv_add_endpoints(struct device *dev,
 	return count;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int sun4i_drv_drm_sys_suspend(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+
+	return drm_mode_config_helper_suspend(drm);
+}
+
+static int sun4i_drv_drm_sys_resume(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+
+	return drm_mode_config_helper_resume(drm);
+}
+#endif
+
+static const struct dev_pm_ops sun4i_drv_drm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(sun4i_drv_drm_sys_suspend,
+				sun4i_drv_drm_sys_resume)
+};
+
 static int sun4i_drv_probe(struct platform_device *pdev)
 {
 	struct component_match *match = NULL;
@@ -418,6 +439,7 @@ static struct platform_driver sun4i_drv_platform_driver = {
 	.driver		= {
 		.name		= "sun4i-drm",
 		.of_match_table	= sun4i_drv_of_table,
+		.pm = &sun4i_drv_drm_pm_ops,
 	},
 };
 module_platform_driver(sun4i_drv_platform_driver);
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index c04f4ba..acfbfd4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -250,11 +250,11 @@ struct drm_plane **sun4i_layers_init(struct drm_device *drm,
 			dev_err(drm->dev, "Couldn't initialize %s plane\n",
 				i ? "overlay" : "primary");
 			return ERR_CAST(layer);
-		};
+		}
 
 		layer->id = i;
 		planes[i] = &layer->plane;
-	};
+	}
 
 	return planes;
 }
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 25ab2ef..65b7a87 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -43,7 +43,7 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector)
 	struct sun4i_lvds *lvds =
 		drm_connector_to_sun4i_lvds(connector);
 
-	return drm_panel_get_modes(lvds->panel);
+	return drm_panel_get_modes(lvds->panel, connector);
 }
 
 static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index e74b9ed..b27f16a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -47,7 +47,7 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
 	struct sun4i_rgb *rgb =
 		drm_connector_to_sun4i_rgb(connector);
 
-	return drm_panel_get_modes(rgb->panel);
+	return drm_panel_get_modes(rgb->panel, connector);
 }
 
 /*
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index f7ab722..4fbe9a6 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -56,6 +56,13 @@ static int sun6i_drc_bind(struct device *dev, struct device *master,
 		ret = PTR_ERR(drc->mod_clk);
 		goto err_disable_bus_clk;
 	}
+
+	ret = clk_set_rate_exclusive(drc->mod_clk, 300000000);
+	if (ret) {
+		dev_err(dev, "Couldn't set the module clock frequency\n");
+		goto err_disable_bus_clk;
+	}
+
 	clk_prepare_enable(drc->mod_clk);
 
 	return 0;
@@ -72,6 +79,7 @@ static void sun6i_drc_unbind(struct device *dev, struct device *master,
 {
 	struct sun6i_drc *drc = dev_get_drvdata(dev);
 
+	clk_rate_exclusive_put(drc->mod_clk);
 	clk_disable_unprepare(drc->mod_clk);
 	clk_disable_unprepare(drc->bus_clk);
 	reset_control_assert(drc->reset);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index c958ca9..a75fcb1 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -795,7 +795,7 @@ static int sun6i_dsi_get_modes(struct drm_connector *connector)
 {
 	struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
 
-	return drm_panel_get_modes(dsi->panel);
+	return drm_panel_get_modes(dsi->panel, connector);
 }
 
 static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
@@ -1081,6 +1081,7 @@ static const struct component_ops sun6i_dsi_ops = {
 static int sun6i_dsi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
+	const char *bus_clk_name = NULL;
 	struct sun6i_dsi *dsi;
 	struct resource *res;
 	void __iomem *base;
@@ -1094,6 +1095,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
 	dsi->host.ops = &sun6i_dsi_host_ops;
 	dsi->host.dev = dev;
 
+	if (of_device_is_compatible(dev->of_node,
+				    "allwinner,sun6i-a31-mipi-dsi"))
+		bus_clk_name = "bus";
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(base)) {
@@ -1107,23 +1112,36 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
 		return PTR_ERR(dsi->regulator);
 	}
 
-	dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
-					      &sun6i_dsi_regmap_config);
-	if (IS_ERR(dsi->regs)) {
-		dev_err(dev, "Couldn't create the DSI encoder regmap\n");
-		return PTR_ERR(dsi->regs);
-	}
-
 	dsi->reset = devm_reset_control_get_shared(dev, NULL);
 	if (IS_ERR(dsi->reset)) {
 		dev_err(dev, "Couldn't get our reset line\n");
 		return PTR_ERR(dsi->reset);
 	}
 
-	dsi->mod_clk = devm_clk_get(dev, "mod");
-	if (IS_ERR(dsi->mod_clk)) {
-		dev_err(dev, "Couldn't get the DSI mod clock\n");
-		return PTR_ERR(dsi->mod_clk);
+	dsi->regs = devm_regmap_init_mmio(dev, base, &sun6i_dsi_regmap_config);
+	if (IS_ERR(dsi->regs)) {
+		dev_err(dev, "Couldn't init regmap\n");
+		return PTR_ERR(dsi->regs);
+	}
+
+	dsi->bus_clk = devm_clk_get(dev, bus_clk_name);
+	if (IS_ERR(dsi->bus_clk)) {
+		dev_err(dev, "Couldn't get the DSI bus clock\n");
+		return PTR_ERR(dsi->bus_clk);
+	}
+
+	ret = regmap_mmio_attach_clk(dsi->regs, dsi->bus_clk);
+	if (ret)
+		return ret;
+
+	if (of_device_is_compatible(dev->of_node,
+				    "allwinner,sun6i-a31-mipi-dsi")) {
+		dsi->mod_clk = devm_clk_get(dev, "mod");
+		if (IS_ERR(dsi->mod_clk)) {
+			dev_err(dev, "Couldn't get the DSI mod clock\n");
+			ret = PTR_ERR(dsi->mod_clk);
+			goto err_attach_clk;
+		}
 	}
 
 	/*
@@ -1161,6 +1179,9 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
 	pm_runtime_disable(dev);
 err_unprotect_clk:
 	clk_rate_exclusive_put(dsi->mod_clk);
+err_attach_clk:
+	if (!IS_ERR(dsi->bus_clk))
+		regmap_mmio_detach_clk(dsi->regs);
 	return ret;
 }
 
@@ -1174,6 +1195,9 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
 	pm_runtime_disable(dev);
 	clk_rate_exclusive_put(dsi->mod_clk);
 
+	if (!IS_ERR(dsi->bus_clk))
+		regmap_mmio_detach_clk(dsi->regs);
+
 	return 0;
 }
 
@@ -1232,6 +1256,7 @@ static const struct dev_pm_ops sun6i_dsi_pm_ops = {
 
 static const struct of_device_id sun6i_dsi_of_table[] = {
 	{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
+	{ .compatible = "allwinner,sun50i-a64-mipi-dsi" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b803eb..7c24f8f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -286,10 +286,10 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
 			dev_err(drm->dev,
 				"Couldn't initialize overlay plane\n");
 			return ERR_CAST(layer);
-		};
+		}
 
 		planes[i] = &layer->plane;
-	};
+	}
 
 	for (i = 0; i < mixer->cfg->ui_num; i++) {
 		struct sun8i_ui_layer *layer;
@@ -299,10 +299,10 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
 			dev_err(drm->dev, "Couldn't initialize %s plane\n",
 				i ? "overlay" : "primary");
 			return ERR_CAST(layer);
-		};
+		}
 
 		planes[mixer->cfg->vi_num + i] = &layer->plane;
-	};
+	}
 
 	return planes;
 }
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index c243af1..ab699bf 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -31,12 +31,12 @@
  */
 
 #include <linux/module.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_legacy.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 
 #include "tdfx_drv.h"
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 714af05..7c70fd3 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1727,6 +1727,7 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
 {
 	struct tegra_dc *dc = to_tegra_dc(crtc);
 	u32 value;
+	int err;
 
 	if (!tegra_dc_idle(dc)) {
 		tegra_dc_stop(dc);
@@ -1773,7 +1774,9 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
 
 	spin_unlock_irq(&crtc->dev->event_lock);
 
-	pm_runtime_put_sync(dc->dev);
+	err = host1x_client_suspend(&dc->client);
+	if (err < 0)
+		dev_err(dc->dev, "failed to suspend: %d\n", err);
 }
 
 static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -1783,8 +1786,13 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
 	struct tegra_dc_state *state = to_dc_state(crtc->state);
 	struct tegra_dc *dc = to_tegra_dc(crtc);
 	u32 value;
+	int err;
 
-	pm_runtime_get_sync(dc->dev);
+	err = host1x_client_resume(&dc->client);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to resume: %d\n", err);
+		return;
+	}
 
 	/* initialize display controller */
 	if (dc->syncpt) {
@@ -1996,7 +2004,7 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
 
 static int tegra_dc_init(struct host1x_client *client)
 {
-	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct drm_device *drm = dev_get_drvdata(client->host);
 	unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	struct tegra_drm *tegra = drm->dev_private;
@@ -2012,6 +2020,15 @@ static int tegra_dc_init(struct host1x_client *client)
 	if (!tegra_dc_has_window_groups(dc))
 		return 0;
 
+	/*
+	 * Set the display hub as the host1x client parent for the display
+	 * controller. This is needed for the runtime reference counting that
+	 * ensures the display hub is always powered when any of the display
+	 * controllers are.
+	 */
+	if (dc->soc->has_nvdisplay)
+		client->parent = &tegra->hub->client;
+
 	dc->syncpt = host1x_syncpt_request(client, flags);
 	if (!dc->syncpt)
 		dev_warn(dc->dev, "failed to allocate syncpoint\n");
@@ -2077,9 +2094,9 @@ static int tegra_dc_init(struct host1x_client *client)
 
 	/*
 	 * Inherit the DMA parameters (such as maximum segment size) from the
-	 * parent device.
+	 * parent host1x device.
 	 */
-	client->dev->dma_parms = client->parent->dma_parms;
+	client->dev->dma_parms = client->host->dma_parms;
 
 	return 0;
 
@@ -2121,9 +2138,74 @@ static int tegra_dc_exit(struct host1x_client *client)
 	return 0;
 }
 
+static int tegra_dc_runtime_suspend(struct host1x_client *client)
+{
+	struct tegra_dc *dc = host1x_client_to_dc(client);
+	struct device *dev = client->dev;
+	int err;
+
+	err = reset_control_assert(dc->rst);
+	if (err < 0) {
+		dev_err(dev, "failed to assert reset: %d\n", err);
+		return err;
+	}
+
+	if (dc->soc->has_powergate)
+		tegra_powergate_power_off(dc->powergate);
+
+	clk_disable_unprepare(dc->clk);
+	pm_runtime_put_sync(dev);
+
+	return 0;
+}
+
+static int tegra_dc_runtime_resume(struct host1x_client *client)
+{
+	struct tegra_dc *dc = host1x_client_to_dc(client);
+	struct device *dev = client->dev;
+	int err;
+
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		dev_err(dev, "failed to get runtime PM: %d\n", err);
+		return err;
+	}
+
+	if (dc->soc->has_powergate) {
+		err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+							dc->rst);
+		if (err < 0) {
+			dev_err(dev, "failed to power partition: %d\n", err);
+			goto put_rpm;
+		}
+	} else {
+		err = clk_prepare_enable(dc->clk);
+		if (err < 0) {
+			dev_err(dev, "failed to enable clock: %d\n", err);
+			goto put_rpm;
+		}
+
+		err = reset_control_deassert(dc->rst);
+		if (err < 0) {
+			dev_err(dev, "failed to deassert reset: %d\n", err);
+			goto disable_clk;
+		}
+	}
+
+	return 0;
+
+disable_clk:
+	clk_disable_unprepare(dc->clk);
+put_rpm:
+	pm_runtime_put_sync(dev);
+	return err;
+}
+
 static const struct host1x_client_ops dc_client_ops = {
 	.init = tegra_dc_init,
 	.exit = tegra_dc_exit,
+	.suspend = tegra_dc_runtime_suspend,
+	.resume = tegra_dc_runtime_resume,
 };
 
 static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
@@ -2535,65 +2617,10 @@ static int tegra_dc_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int tegra_dc_suspend(struct device *dev)
-{
-	struct tegra_dc *dc = dev_get_drvdata(dev);
-	int err;
-
-	err = reset_control_assert(dc->rst);
-	if (err < 0) {
-		dev_err(dev, "failed to assert reset: %d\n", err);
-		return err;
-	}
-
-	if (dc->soc->has_powergate)
-		tegra_powergate_power_off(dc->powergate);
-
-	clk_disable_unprepare(dc->clk);
-
-	return 0;
-}
-
-static int tegra_dc_resume(struct device *dev)
-{
-	struct tegra_dc *dc = dev_get_drvdata(dev);
-	int err;
-
-	if (dc->soc->has_powergate) {
-		err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
-							dc->rst);
-		if (err < 0) {
-			dev_err(dev, "failed to power partition: %d\n", err);
-			return err;
-		}
-	} else {
-		err = clk_prepare_enable(dc->clk);
-		if (err < 0) {
-			dev_err(dev, "failed to enable clock: %d\n", err);
-			return err;
-		}
-
-		err = reset_control_deassert(dc->rst);
-		if (err < 0) {
-			dev_err(dev, "failed to deassert reset: %d\n", err);
-			return err;
-		}
-	}
-
-	return 0;
-}
-#endif
-
-static const struct dev_pm_ops tegra_dc_pm_ops = {
-	SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
-};
-
 struct platform_driver tegra_dc_driver = {
 	.driver = {
 		.name = "tegra-dc",
 		.of_match_table = tegra_dc_of_match,
-		.pm = &tegra_dc_pm_ops,
 	},
 	.probe = tegra_dc_probe,
 	.remove = tegra_dc_remove,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 622cdf1..7dfb50f 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -588,7 +588,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
 	/* make sure pads are powered down when not in use */
 	tegra_dpaux_pad_power_down(dpaux);
 
-	pm_runtime_put(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
 	drm_dp_aux_unregister(&dpaux->aux);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index f455ce71..bd26802 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -905,7 +905,7 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
 int host1x_client_iommu_attach(struct host1x_client *client)
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
-	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct drm_device *drm = dev_get_drvdata(client->host);
 	struct tegra_drm *tegra = drm->dev_private;
 	struct iommu_group *group = NULL;
 	int err;
@@ -941,7 +941,7 @@ int host1x_client_iommu_attach(struct host1x_client *client)
 
 void host1x_client_iommu_detach(struct host1x_client *client)
 {
-	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct drm_device *drm = dev_get_drvdata(client->host);
 	struct tegra_drm *tegra = drm->dev_private;
 	struct iommu_domain *domain;
 
@@ -1037,23 +1037,9 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
 	free_pages((unsigned long)virt, get_order(size));
 }
 
-static int host1x_drm_probe(struct host1x_device *dev)
+static bool host1x_drm_wants_iommu(struct host1x_device *dev)
 {
-	struct drm_driver *driver = &tegra_drm_driver;
 	struct iommu_domain *domain;
-	struct tegra_drm *tegra;
-	struct drm_device *drm;
-	int err;
-
-	drm = drm_dev_alloc(driver, &dev->dev);
-	if (IS_ERR(drm))
-		return PTR_ERR(drm);
-
-	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
-	if (!tegra) {
-		err = -ENOMEM;
-		goto put;
-	}
 
 	/*
 	 * If the Tegra DRM clients are backed by an IOMMU, push buffers are
@@ -1082,9 +1068,38 @@ static int host1x_drm_probe(struct host1x_device *dev)
 	 * up the device tree appropriately. This is considered an problem
 	 * of integration, so care must be taken for the DT to be consistent.
 	 */
-	domain = iommu_get_domain_for_dev(drm->dev->parent);
+	domain = iommu_get_domain_for_dev(dev->dev.parent);
 
-	if (domain && iommu_present(&platform_bus_type)) {
+	/*
+	 * Tegra20 and Tegra30 don't support addressing memory beyond the
+	 * 32-bit boundary, so the regular GATHER opcodes will always be
+	 * sufficient and whether or not the host1x is attached to an IOMMU
+	 * doesn't matter.
+	 */
+	if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
+		return true;
+
+	return domain != NULL;
+}
+
+static int host1x_drm_probe(struct host1x_device *dev)
+{
+	struct drm_driver *driver = &tegra_drm_driver;
+	struct tegra_drm *tegra;
+	struct drm_device *drm;
+	int err;
+
+	drm = drm_dev_alloc(driver, &dev->dev);
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
+
+	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+	if (!tegra) {
+		err = -ENOMEM;
+		goto put;
+	}
+
+	if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
 		tegra->domain = iommu_domain_alloc(&platform_bus_type);
 		if (!tegra->domain) {
 			err = -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index d941553..ed99b67 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -144,6 +144,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
 void tegra_output_exit(struct tegra_output *output);
 void tegra_output_find_possible_crtcs(struct tegra_output *output,
 				      struct drm_device *drm);
+int tegra_output_suspend(struct tegra_output *output);
+int tegra_output_resume(struct tegra_output *output);
 
 int tegra_output_connector_get_modes(struct drm_connector *connector);
 enum drm_connector_status
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index a5d47e3..88b9d64 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -840,7 +840,9 @@ static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
 		dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
 			err);
 
-	pm_runtime_put(dsi->dev);
+	err = host1x_client_suspend(&dsi->client);
+	if (err < 0)
+		dev_err(dsi->dev, "failed to suspend: %d\n", err);
 }
 
 static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
@@ -882,11 +884,15 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
 	tegra_dsi_unprepare(dsi);
 }
 
-static void tegra_dsi_prepare(struct tegra_dsi *dsi)
+static int tegra_dsi_prepare(struct tegra_dsi *dsi)
 {
 	int err;
 
-	pm_runtime_get_sync(dsi->dev);
+	err = host1x_client_resume(&dsi->client);
+	if (err < 0) {
+		dev_err(dsi->dev, "failed to resume: %d\n", err);
+		return err;
+	}
 
 	err = tegra_mipi_enable(dsi->mipi);
 	if (err < 0)
@@ -899,6 +905,8 @@ static void tegra_dsi_prepare(struct tegra_dsi *dsi)
 
 	if (dsi->slave)
 		tegra_dsi_prepare(dsi->slave);
+
+	return 0;
 }
 
 static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -909,8 +917,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
 	struct tegra_dsi *dsi = to_dsi(output);
 	struct tegra_dsi_state *state;
 	u32 value;
+	int err;
 
-	tegra_dsi_prepare(dsi);
+	err = tegra_dsi_prepare(dsi);
+	if (err < 0) {
+		dev_err(dsi->dev, "failed to prepare: %d\n", err);
+		return;
+	}
 
 	state = tegra_dsi_get_state(dsi);
 
@@ -1030,7 +1043,7 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
 
 static int tegra_dsi_init(struct host1x_client *client)
 {
-	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct drm_device *drm = dev_get_drvdata(client->host);
 	struct tegra_dsi *dsi = host1x_client_to_dsi(client);
 	int err;
 
@@ -1075,9 +1088,89 @@ static int tegra_dsi_exit(struct host1x_client *client)
 	return 0;
 }
 
+static int tegra_dsi_runtime_suspend(struct host1x_client *client)
+{
+	struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+	struct device *dev = client->dev;
+	int err;
+
+	if (dsi->rst) {
+		err = reset_control_assert(dsi->rst);
+		if (err < 0) {
+			dev_err(dev, "failed to assert reset: %d\n", err);
+			return err;
+		}
+	}
+
+	usleep_range(1000, 2000);
+
+	clk_disable_unprepare(dsi->clk_lp);
+	clk_disable_unprepare(dsi->clk);
+
+	regulator_disable(dsi->vdd);
+	pm_runtime_put_sync(dev);
+
+	return 0;
+}
+
+static int tegra_dsi_runtime_resume(struct host1x_client *client)
+{
+	struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+	struct device *dev = client->dev;
+	int err;
+
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		dev_err(dev, "failed to get runtime PM: %d\n", err);
+		return err;
+	}
+
+	err = regulator_enable(dsi->vdd);
+	if (err < 0) {
+		dev_err(dev, "failed to enable VDD supply: %d\n", err);
+		goto put_rpm;
+	}
+
+	err = clk_prepare_enable(dsi->clk);
+	if (err < 0) {
+		dev_err(dev, "cannot enable DSI clock: %d\n", err);
+		goto disable_vdd;
+	}
+
+	err = clk_prepare_enable(dsi->clk_lp);
+	if (err < 0) {
+		dev_err(dev, "cannot enable low-power clock: %d\n", err);
+		goto disable_clk;
+	}
+
+	usleep_range(1000, 2000);
+
+	if (dsi->rst) {
+		err = reset_control_deassert(dsi->rst);
+		if (err < 0) {
+			dev_err(dev, "cannot assert reset: %d\n", err);
+			goto disable_clk_lp;
+		}
+	}
+
+	return 0;
+
+disable_clk_lp:
+	clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+	clk_disable_unprepare(dsi->clk);
+disable_vdd:
+	regulator_disable(dsi->vdd);
+put_rpm:
+	pm_runtime_put_sync(dev);
+	return err;
+}
+
 static const struct host1x_client_ops dsi_client_ops = {
 	.init = tegra_dsi_init,
 	.exit = tegra_dsi_exit,
+	.suspend = tegra_dsi_runtime_suspend,
+	.resume = tegra_dsi_runtime_resume,
 };
 
 static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
@@ -1596,79 +1689,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int tegra_dsi_suspend(struct device *dev)
-{
-	struct tegra_dsi *dsi = dev_get_drvdata(dev);
-	int err;
-
-	if (dsi->rst) {
-		err = reset_control_assert(dsi->rst);
-		if (err < 0) {
-			dev_err(dev, "failed to assert reset: %d\n", err);
-			return err;
-		}
-	}
-
-	usleep_range(1000, 2000);
-
-	clk_disable_unprepare(dsi->clk_lp);
-	clk_disable_unprepare(dsi->clk);
-
-	regulator_disable(dsi->vdd);
-
-	return 0;
-}
-
-static int tegra_dsi_resume(struct device *dev)
-{
-	struct tegra_dsi *dsi = dev_get_drvdata(dev);
-	int err;
-
-	err = regulator_enable(dsi->vdd);
-	if (err < 0) {
-		dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
-		return err;
-	}
-
-	err = clk_prepare_enable(dsi->clk);
-	if (err < 0) {
-		dev_err(dev, "cannot enable DSI clock: %d\n", err);
-		goto disable_vdd;
-	}
-
-	err = clk_prepare_enable(dsi->clk_lp);
-	if (err < 0) {
-		dev_err(dev, "cannot enable low-power clock: %d\n", err);
-		goto disable_clk;
-	}
-
-	usleep_range(1000, 2000);
-
-	if (dsi->rst) {
-		err = reset_control_deassert(dsi->rst);
-		if (err < 0) {
-			dev_err(dev, "cannot assert reset: %d\n", err);
-			goto disable_clk_lp;
-		}
-	}
-
-	return 0;
-
-disable_clk_lp:
-	clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
-	clk_disable_unprepare(dsi->clk);
-disable_vdd:
-	regulator_disable(dsi->vdd);
-	return err;
-}
-#endif
-
-static const struct dev_pm_ops tegra_dsi_pm_ops = {
-	SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
-};
-
 static const struct of_device_id tegra_dsi_of_match[] = {
 	{ .compatible = "nvidia,tegra210-dsi", },
 	{ .compatible = "nvidia,tegra132-dsi", },
@@ -1682,7 +1702,6 @@ struct platform_driver tegra_dsi_driver = {
 	.driver = {
 		.name = "tegra-dsi",
 		.of_match_table = tegra_dsi_of_match,
-		.pm = &tegra_dsi_pm_ops,
 	},
 	.probe = tegra_dsi_probe,
 	.remove = tegra_dsi_remove,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 7cea89f..84f0e01 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -192,7 +192,7 @@ static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	return __tegra_gem_mmap(&bo->gem, vma);
 }
 
-static struct fb_ops tegra_fb_ops = {
+static const struct fb_ops tegra_fb_ops = {
 	.owner = THIS_MODULE,
 	DRM_FB_HELPER_DEFAULT_OPS,
 	.fb_fillrect = drm_fb_helper_sys_fillrect,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index bc15b43..6237681 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -60,8 +60,16 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
 	/*
 	 * If we've manually mapped the buffer object through the IOMMU, make
 	 * sure to return the IOVA address of our mapping.
+	 *
+	 * Similarly, for buffers that have been allocated by the DMA API the
+	 * physical address can be used for devices that are not attached to
+	 * an IOMMU. For these devices, callers must pass a valid pointer via
+	 * the @phys argument.
+	 *
+	 * Imported buffers were also already mapped at import time, so the
+	 * existing mapping can be reused.
 	 */
-	if (phys && obj->mm) {
+	if (phys) {
 		*phys = obj->iova;
 		return NULL;
 	}
@@ -146,32 +154,6 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
 		vunmap(addr);
 }
 
-static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
-{
-	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
-
-	if (obj->vaddr)
-		return obj->vaddr + page * PAGE_SIZE;
-	else if (obj->gem.import_attach)
-		return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
-	else
-		return vmap(obj->pages + page, 1, VM_MAP,
-			    pgprot_writecombine(PAGE_KERNEL));
-}
-
-static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
-			    void *addr)
-{
-	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
-
-	if (obj->vaddr)
-		return;
-	else if (obj->gem.import_attach)
-		dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
-	else
-		vunmap(addr);
-}
-
 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
 {
 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
@@ -188,8 +170,6 @@ static const struct host1x_bo_ops tegra_bo_ops = {
 	.unpin = tegra_bo_unpin,
 	.mmap = tegra_bo_mmap,
 	.munmap = tegra_bo_munmap,
-	.kmap = tegra_bo_kmap,
-	.kunmap = tegra_bo_kunmap,
 };
 
 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
@@ -649,16 +629,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
 	return 0;
 }
 
-static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
-{
-	return NULL;
-}
-
-static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
-				   void *addr)
-{
-}
-
 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
 {
 	struct drm_gem_object *gem = buf->priv;
@@ -689,8 +659,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
 	.release = tegra_gem_prime_release,
 	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
 	.end_cpu_access = tegra_gem_prime_end_cpu_access,
-	.map = tegra_gem_prime_kmap,
-	.unmap = tegra_gem_prime_kunmap,
 	.mmap = tegra_gem_prime_mmap,
 	.vmap = tegra_gem_prime_vmap,
 	.vunmap = tegra_gem_prime_vunmap,
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 1fc4e56..48363f7 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -34,7 +34,7 @@ static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
 static int gr2d_init(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
-	struct drm_device *dev = dev_get_drvdata(client->parent);
+	struct drm_device *dev = dev_get_drvdata(client->host);
 	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
 	struct gr2d *gr2d = to_gr2d(drm);
 	int err;
@@ -76,7 +76,7 @@ static int gr2d_init(struct host1x_client *client)
 static int gr2d_exit(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
-	struct drm_device *dev = dev_get_drvdata(client->parent);
+	struct drm_device *dev = dev_get_drvdata(client->host);
 	struct tegra_drm *tegra = dev->dev_private;
 	struct gr2d *gr2d = to_gr2d(drm);
 	int err;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 24fae0f6..c0a528be 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -43,7 +43,7 @@ static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
 static int gr3d_init(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
-	struct drm_device *dev = dev_get_drvdata(client->parent);
+	struct drm_device *dev = dev_get_drvdata(client->host);
 	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
 	struct gr3d *gr3d = to_gr3d(drm);
 	int err;
@@ -85,7 +85,7 @@ static int gr3d_init(struct host1x_client *client)
 static int gr3d_exit(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
-	struct drm_device *dev = dev_get_drvdata(client->parent);
+	struct drm_device *dev = dev_get_drvdata(client->host);
 	struct gr3d *gr3d = to_gr3d(drm);
 	int err;
 
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 50269ff..6f11762 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1146,6 +1146,7 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
 	struct tegra_hdmi *hdmi = to_hdmi(output);
 	u32 value;
+	int err;
 
 	/*
 	 * The following accesses registers of the display controller, so make
@@ -1171,7 +1172,9 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
 	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
 
-	pm_runtime_put(hdmi->dev);
+	err = host1x_client_suspend(&hdmi->client);
+	if (err < 0)
+		dev_err(hdmi->dev, "failed to suspend: %d\n", err);
 }
 
 static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -1186,7 +1189,11 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 	u32 value;
 	int err;
 
-	pm_runtime_get_sync(hdmi->dev);
+	err = host1x_client_resume(&hdmi->client);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to resume: %d\n", err);
+		return;
+	}
 
 	/*
 	 * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
@@ -1424,15 +1431,16 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
 
 static int tegra_hdmi_init(struct host1x_client *client)
 {
-	struct drm_device *drm = dev_get_drvdata(client->parent);
 	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+	struct drm_device *drm = dev_get_drvdata(client->host);
 	int err;
 
 	hdmi->output.dev = client->dev;
 
-	drm_connector_init(drm, &hdmi->output.connector,
-			   &tegra_hdmi_connector_funcs,
-			   DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_init_with_ddc(drm, &hdmi->output.connector,
+				    &tegra_hdmi_connector_funcs,
+				    DRM_MODE_CONNECTOR_HDMIA,
+				    hdmi->output.ddc);
 	drm_connector_helper_add(&hdmi->output.connector,
 				 &tegra_hdmi_connector_helper_funcs);
 	hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
@@ -1489,9 +1497,66 @@ static int tegra_hdmi_exit(struct host1x_client *client)
 	return 0;
 }
 
+static int tegra_hdmi_runtime_suspend(struct host1x_client *client)
+{
+	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+	struct device *dev = client->dev;
+	int err;
+
+	err = reset_control_assert(hdmi->rst);
+	if (err < 0) {
+		dev_err(dev, "failed to assert reset: %d\n", err);
+		return err;
+	}
+
+	usleep_range(1000, 2000);
+
+	clk_disable_unprepare(hdmi->clk);
+	pm_runtime_put_sync(dev);
+
+	return 0;
+}
+
+static int tegra_hdmi_runtime_resume(struct host1x_client *client)
+{
+	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+	struct device *dev = client->dev;
+	int err;
+
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		dev_err(dev, "failed to get runtime PM: %d\n", err);
+		return err;
+	}
+
+	err = clk_prepare_enable(hdmi->clk);
+	if (err < 0) {
+		dev_err(dev, "failed to enable clock: %d\n", err);
+		goto put_rpm;
+	}
+
+	usleep_range(1000, 2000);
+
+	err = reset_control_deassert(hdmi->rst);
+	if (err < 0) {
+		dev_err(dev, "failed to deassert reset: %d\n", err);
+		goto disable_clk;
+	}
+
+	return 0;
+
+disable_clk:
+	clk_disable_unprepare(hdmi->clk);
+put_rpm:
+	pm_runtime_put_sync(dev);
+	return err;
+}
+
 static const struct host1x_client_ops hdmi_client_ops = {
 	.init = tegra_hdmi_init,
 	.exit = tegra_hdmi_exit,
+	.suspend = tegra_hdmi_runtime_suspend,
+	.resume = tegra_hdmi_runtime_resume,
 };
 
 static const struct tegra_hdmi_config tegra20_hdmi_config = {
@@ -1699,58 +1764,10 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int tegra_hdmi_suspend(struct device *dev)
-{
-	struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
-	int err;
-
-	err = reset_control_assert(hdmi->rst);
-	if (err < 0) {
-		dev_err(dev, "failed to assert reset: %d\n", err);
-		return err;
-	}
-
-	usleep_range(1000, 2000);
-
-	clk_disable_unprepare(hdmi->clk);
-
-	return 0;
-}
-
-static int tegra_hdmi_resume(struct device *dev)
-{
-	struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
-	int err;
-
-	err = clk_prepare_enable(hdmi->clk);
-	if (err < 0) {
-		dev_err(dev, "failed to enable clock: %d\n", err);
-		return err;
-	}
-
-	usleep_range(1000, 2000);
-
-	err = reset_control_deassert(hdmi->rst);
-	if (err < 0) {
-		dev_err(dev, "failed to deassert reset: %d\n", err);
-		clk_disable_unprepare(hdmi->clk);
-		return err;
-	}
-
-	return 0;
-}
-#endif
-
-static const struct dev_pm_ops tegra_hdmi_pm_ops = {
-	SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
-};
-
 struct platform_driver tegra_hdmi_driver = {
 	.driver = {
 		.name = "tegra-hdmi",
 		.of_match_table = tegra_hdmi_of_match,
-		.pm = &tegra_hdmi_pm_ops,
 	},
 	.probe = tegra_hdmi_probe,
 	.remove = tegra_hdmi_remove,
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 47d985a..8183e61 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -95,17 +95,25 @@ static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
 
 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
 {
+	int err = 0;
+
 	mutex_lock(&wgrp->lock);
 
 	if (wgrp->usecount == 0) {
-		pm_runtime_get_sync(wgrp->parent);
+		err = host1x_client_resume(wgrp->parent);
+		if (err < 0) {
+			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
+			goto unlock;
+		}
+
 		reset_control_deassert(wgrp->rst);
 	}
 
 	wgrp->usecount++;
-	mutex_unlock(&wgrp->lock);
 
-	return 0;
+unlock:
+	mutex_unlock(&wgrp->lock);
+	return err;
 }
 
 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
@@ -121,7 +129,7 @@ static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
 			       wgrp->index);
 		}
 
-		pm_runtime_put(wgrp->parent);
+		host1x_client_suspend(wgrp->parent);
 	}
 
 	wgrp->usecount--;
@@ -379,6 +387,7 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 	struct tegra_plane *p = to_tegra_plane(plane);
 	struct tegra_dc *dc;
 	u32 value;
+	int err;
 
 	/* rien ne va plus */
 	if (!old_state || !old_state->crtc)
@@ -386,6 +395,12 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 
 	dc = to_tegra_dc(old_state->crtc);
 
+	err = host1x_client_resume(&dc->client);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to resume: %d\n", err);
+		return;
+	}
+
 	/*
 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
 	 * on planes that are already disabled. Make sure we fallback to the
@@ -394,15 +409,13 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 	if (WARN_ON(p->dc == NULL))
 		p->dc = dc;
 
-	pm_runtime_get_sync(dc->dev);
-
 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
 	value &= ~WIN_ENABLE;
 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
 
 	tegra_dc_remove_shared_plane(dc, p);
 
-	pm_runtime_put(dc->dev);
+	host1x_client_suspend(&dc->client);
 }
 
 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
@@ -415,6 +428,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
 	struct tegra_plane *p = to_tegra_plane(plane);
 	dma_addr_t base;
 	u32 value;
+	int err;
 
 	/* rien ne va plus */
 	if (!plane->state->crtc || !plane->state->fb)
@@ -425,7 +439,11 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
 		return;
 	}
 
-	pm_runtime_get_sync(dc->dev);
+	err = host1x_client_resume(&dc->client);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to resume: %d\n", err);
+		return;
+	}
 
 	tegra_dc_assign_shared_plane(dc, p);
 
@@ -515,7 +533,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
 	value &= ~CONTROL_CSC_ENABLE;
 	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
 
-	pm_runtime_put(dc->dev);
+	host1x_client_suspend(&dc->client);
 }
 
 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
@@ -551,7 +569,7 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
 	plane->base.index = index;
 
 	plane->wgrp = &hub->wgrps[wgrp];
-	plane->wgrp->parent = dc->dev;
+	plane->wgrp->parent = &dc->client;
 
 	p = &plane->base.base;
 
@@ -656,8 +674,13 @@ int tegra_display_hub_atomic_check(struct drm_device *drm,
 static void tegra_display_hub_update(struct tegra_dc *dc)
 {
 	u32 value;
+	int err;
 
-	pm_runtime_get_sync(dc->dev);
+	err = host1x_client_resume(&dc->client);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to resume: %d\n", err);
+		return;
+	}
 
 	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
 	value &= ~LATENCY_EVENT;
@@ -672,7 +695,7 @@ static void tegra_display_hub_update(struct tegra_dc *dc)
 	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 
-	pm_runtime_put(dc->dev);
+	host1x_client_suspend(&dc->client);
 }
 
 void tegra_display_hub_atomic_commit(struct drm_device *drm,
@@ -705,7 +728,7 @@ void tegra_display_hub_atomic_commit(struct drm_device *drm,
 static int tegra_display_hub_init(struct host1x_client *client)
 {
 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
-	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct drm_device *drm = dev_get_drvdata(client->host);
 	struct tegra_drm *tegra = drm->dev_private;
 	struct tegra_display_hub_state *state;
 
@@ -723,7 +746,7 @@ static int tegra_display_hub_init(struct host1x_client *client)
 
 static int tegra_display_hub_exit(struct host1x_client *client)
 {
-	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct drm_device *drm = dev_get_drvdata(client->host);
 	struct tegra_drm *tegra = drm->dev_private;
 
 	drm_atomic_private_obj_fini(&tegra->hub->base);
@@ -732,9 +755,85 @@ static int tegra_display_hub_exit(struct host1x_client *client)
 	return 0;
 }
 
+static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
+{
+	struct tegra_display_hub *hub = to_tegra_display_hub(client);
+	struct device *dev = client->dev;
+	unsigned int i = hub->num_heads;
+	int err;
+
+	err = reset_control_assert(hub->rst);
+	if (err < 0)
+		return err;
+
+	while (i--)
+		clk_disable_unprepare(hub->clk_heads[i]);
+
+	clk_disable_unprepare(hub->clk_hub);
+	clk_disable_unprepare(hub->clk_dsc);
+	clk_disable_unprepare(hub->clk_disp);
+
+	pm_runtime_put_sync(dev);
+
+	return 0;
+}
+
+static int tegra_display_hub_runtime_resume(struct host1x_client *client)
+{
+	struct tegra_display_hub *hub = to_tegra_display_hub(client);
+	struct device *dev = client->dev;
+	unsigned int i;
+	int err;
+
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		dev_err(dev, "failed to get runtime PM: %d\n", err);
+		return err;
+	}
+
+	err = clk_prepare_enable(hub->clk_disp);
+	if (err < 0)
+		goto put_rpm;
+
+	err = clk_prepare_enable(hub->clk_dsc);
+	if (err < 0)
+		goto disable_disp;
+
+	err = clk_prepare_enable(hub->clk_hub);
+	if (err < 0)
+		goto disable_dsc;
+
+	for (i = 0; i < hub->num_heads; i++) {
+		err = clk_prepare_enable(hub->clk_heads[i]);
+		if (err < 0)
+			goto disable_heads;
+	}
+
+	err = reset_control_deassert(hub->rst);
+	if (err < 0)
+		goto disable_heads;
+
+	return 0;
+
+disable_heads:
+	while (i--)
+		clk_disable_unprepare(hub->clk_heads[i]);
+
+	clk_disable_unprepare(hub->clk_hub);
+disable_dsc:
+	clk_disable_unprepare(hub->clk_dsc);
+disable_disp:
+	clk_disable_unprepare(hub->clk_disp);
+put_rpm:
+	pm_runtime_put_sync(dev);
+	return err;
+}
+
 static const struct host1x_client_ops tegra_display_hub_ops = {
 	.init = tegra_display_hub_init,
 	.exit = tegra_display_hub_exit,
+	.suspend = tegra_display_hub_runtime_suspend,
+	.resume = tegra_display_hub_runtime_resume,
 };
 
 static int tegra_display_hub_probe(struct platform_device *pdev)
@@ -851,6 +950,7 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
 static int tegra_display_hub_remove(struct platform_device *pdev)
 {
 	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
+	unsigned int i;
 	int err;
 
 	err = host1x_client_unregister(&hub->client);
@@ -859,78 +959,17 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
 			err);
 	}
 
+	for (i = 0; i < hub->soc->num_wgrps; i++) {
+		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
+
+		mutex_destroy(&wgrp->lock);
+	}
+
 	pm_runtime_disable(&pdev->dev);
 
 	return err;
 }
 
-static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
-{
-	struct tegra_display_hub *hub = dev_get_drvdata(dev);
-	unsigned int i = hub->num_heads;
-	int err;
-
-	err = reset_control_assert(hub->rst);
-	if (err < 0)
-		return err;
-
-	while (i--)
-		clk_disable_unprepare(hub->clk_heads[i]);
-
-	clk_disable_unprepare(hub->clk_hub);
-	clk_disable_unprepare(hub->clk_dsc);
-	clk_disable_unprepare(hub->clk_disp);
-
-	return 0;
-}
-
-static int __maybe_unused tegra_display_hub_resume(struct device *dev)
-{
-	struct tegra_display_hub *hub = dev_get_drvdata(dev);
-	unsigned int i;
-	int err;
-
-	err = clk_prepare_enable(hub->clk_disp);
-	if (err < 0)
-		return err;
-
-	err = clk_prepare_enable(hub->clk_dsc);
-	if (err < 0)
-		goto disable_disp;
-
-	err = clk_prepare_enable(hub->clk_hub);
-	if (err < 0)
-		goto disable_dsc;
-
-	for (i = 0; i < hub->num_heads; i++) {
-		err = clk_prepare_enable(hub->clk_heads[i]);
-		if (err < 0)
-			goto disable_heads;
-	}
-
-	err = reset_control_deassert(hub->rst);
-	if (err < 0)
-		goto disable_heads;
-
-	return 0;
-
-disable_heads:
-	while (i--)
-		clk_disable_unprepare(hub->clk_heads[i]);
-
-	clk_disable_unprepare(hub->clk_hub);
-disable_dsc:
-	clk_disable_unprepare(hub->clk_dsc);
-disable_disp:
-	clk_disable_unprepare(hub->clk_disp);
-	return err;
-}
-
-static const struct dev_pm_ops tegra_display_hub_pm_ops = {
-	SET_RUNTIME_PM_OPS(tegra_display_hub_suspend,
-			   tegra_display_hub_resume, NULL)
-};
-
 static const struct tegra_display_hub_soc tegra186_display_hub = {
 	.num_wgrps = 6,
 	.supports_dsc = true,
@@ -958,7 +997,6 @@ struct platform_driver tegra_display_hub_driver = {
 	.driver = {
 		.name = "tegra-display-hub",
 		.of_match_table = tegra_display_hub_of_match,
-		.pm = &tegra_display_hub_pm_ops,
 	},
 	.probe = tegra_display_hub_probe,
 	.remove = tegra_display_hub_remove,
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 767a60d..3efa1be 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -17,7 +17,7 @@ struct tegra_windowgroup {
 	struct mutex lock;
 
 	unsigned int index;
-	struct device *parent;
+	struct host1x_client *parent;
 	struct reset_control *rst;
 };
 
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 3437373..a264259 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -23,7 +23,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
 	 * ignore any other means of obtaining a mode.
 	 */
 	if (output->panel) {
-		err = output->panel->funcs->get_modes(output->panel);
+		err = drm_panel_get_modes(output->panel, connector);
 		if (err > 0)
 			return err;
 	}
@@ -250,3 +250,19 @@ void tegra_output_find_possible_crtcs(struct tegra_output *output,
 
 	output->encoder.possible_crtcs = mask;
 }
+
+int tegra_output_suspend(struct tegra_output *output)
+{
+	if (output->hpd_irq)
+		disable_irq(output->hpd_irq);
+
+	return 0;
+}
+
+int tegra_output_resume(struct tegra_output *output)
+{
+	if (output->hpd_irq)
+		enable_irq(output->hpd_irq);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index cadcdd9..9ccfb56 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -3,6 +3,8 @@
  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
  */
 
+#include <linux/iommu.h>
+
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fourcc.h>
@@ -107,21 +109,27 @@ const struct drm_plane_funcs tegra_plane_funcs = {
 
 static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
 {
+	struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
 	unsigned int i;
 	int err;
 
 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+		dma_addr_t phys_addr, *phys;
+		struct sg_table *sgt;
 
-		if (!dc->client.group) {
-			struct sg_table *sgt;
+		if (!domain || dc->client.group)
+			phys = &phys_addr;
+		else
+			phys = NULL;
 
-			sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
-			if (IS_ERR(sgt)) {
-				err = PTR_ERR(sgt);
-				goto unpin;
-			}
+		sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
+		if (IS_ERR(sgt)) {
+			err = PTR_ERR(sgt);
+			goto unpin;
+		}
 
+		if (sgt) {
 			err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
 					 DMA_TO_DEVICE);
 			if (err == 0) {
@@ -143,7 +151,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
 			state->iova[i] = sg_dma_address(sgt->sgl);
 			state->sgt[i] = sgt;
 		} else {
-			state->iova[i] = bo->iova;
+			state->iova[i] = phys_addr;
 		}
 	}
 
@@ -156,9 +164,11 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
 		struct sg_table *sgt = state->sgt[i];
 
-		dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
-		host1x_bo_unpin(dc->dev, &bo->base, sgt);
+		if (sgt)
+			dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+				     DMA_TO_DEVICE);
 
+		host1x_bo_unpin(dc->dev, &bo->base, sgt);
 		state->iova[i] = DMA_MAPPING_ERROR;
 		state->sgt[i] = NULL;
 	}
@@ -172,17 +182,13 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
 
 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+		struct sg_table *sgt = state->sgt[i];
 
-		if (!dc->client.group) {
-			struct sg_table *sgt = state->sgt[i];
+		if (sgt)
+			dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+				     DMA_TO_DEVICE);
 
-			if (sgt) {
-				dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
-					     DMA_TO_DEVICE);
-				host1x_bo_unpin(dc->dev, &bo->base, sgt);
-			}
-		}
-
+		host1x_bo_unpin(dc->dev, &bo->base, sgt);
 		state->iova[i] = DMA_MAPPING_ERROR;
 		state->sgt[i] = NULL;
 	}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index a68d3b3..81226a4 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2255,7 +2255,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
 	if (err < 0)
 		dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
 
-	pm_runtime_put(sor->dev);
+	host1x_client_suspend(&sor->client);
 }
 
 static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -2276,7 +2276,11 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
 	mode = &encoder->crtc->state->adjusted_mode;
 	pclk = mode->clock * 1000;
 
-	pm_runtime_get_sync(sor->dev);
+	err = host1x_client_resume(&sor->client);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to resume: %d\n", err);
+		return;
+	}
 
 	/* switch to safe parent clock */
 	err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
@@ -2722,7 +2726,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder)
 	if (output->panel)
 		drm_panel_unprepare(output->panel);
 
-	pm_runtime_put(sor->dev);
+	host1x_client_suspend(&sor->client);
 }
 
 static void tegra_sor_dp_enable(struct drm_encoder *encoder)
@@ -2742,7 +2746,11 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder)
 	mode = &encoder->crtc->state->adjusted_mode;
 	info = &output->connector.display_info;
 
-	pm_runtime_get_sync(sor->dev);
+	err = host1x_client_resume(&sor->client);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to resume: %d\n", err);
+		return;
+	}
 
 	/* switch to safe parent clock */
 	err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
@@ -3053,7 +3061,7 @@ static const struct tegra_sor_ops tegra_sor_dp_ops = {
 
 static int tegra_sor_init(struct host1x_client *client)
 {
-	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct drm_device *drm = dev_get_drvdata(client->host);
 	const struct drm_encoder_helper_funcs *helpers = NULL;
 	struct tegra_sor *sor = host1x_client_to_sor(client);
 	int connector = DRM_MODE_CONNECTOR_Unknown;
@@ -3086,9 +3094,10 @@ static int tegra_sor_init(struct host1x_client *client)
 
 	sor->output.dev = sor->dev;
 
-	drm_connector_init(drm, &sor->output.connector,
-			   &tegra_sor_connector_funcs,
-			   connector);
+	drm_connector_init_with_ddc(drm, &sor->output.connector,
+				    &tegra_sor_connector_funcs,
+				    connector,
+				    sor->output.ddc);
 	drm_connector_helper_add(&sor->output.connector,
 				 &tegra_sor_connector_helper_funcs);
 	sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
@@ -3189,9 +3198,80 @@ static int tegra_sor_exit(struct host1x_client *client)
 	return 0;
 }
 
+static int tegra_sor_runtime_suspend(struct host1x_client *client)
+{
+	struct tegra_sor *sor = host1x_client_to_sor(client);
+	struct device *dev = client->dev;
+	int err;
+
+	if (sor->rst) {
+		err = reset_control_assert(sor->rst);
+		if (err < 0) {
+			dev_err(dev, "failed to assert reset: %d\n", err);
+			return err;
+		}
+
+		reset_control_release(sor->rst);
+	}
+
+	usleep_range(1000, 2000);
+
+	clk_disable_unprepare(sor->clk);
+	pm_runtime_put_sync(dev);
+
+	return 0;
+}
+
+static int tegra_sor_runtime_resume(struct host1x_client *client)
+{
+	struct tegra_sor *sor = host1x_client_to_sor(client);
+	struct device *dev = client->dev;
+	int err;
+
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		dev_err(dev, "failed to get runtime PM: %d\n", err);
+		return err;
+	}
+
+	err = clk_prepare_enable(sor->clk);
+	if (err < 0) {
+		dev_err(dev, "failed to enable clock: %d\n", err);
+		goto put_rpm;
+	}
+
+	usleep_range(1000, 2000);
+
+	if (sor->rst) {
+		err = reset_control_acquire(sor->rst);
+		if (err < 0) {
+			dev_err(dev, "failed to acquire reset: %d\n", err);
+			goto disable_clk;
+		}
+
+		err = reset_control_deassert(sor->rst);
+		if (err < 0) {
+			dev_err(dev, "failed to deassert reset: %d\n", err);
+			goto release_reset;
+		}
+	}
+
+	return 0;
+
+release_reset:
+	reset_control_release(sor->rst);
+disable_clk:
+	clk_disable_unprepare(sor->clk);
+put_rpm:
+	pm_runtime_put_sync(dev);
+	return err;
+}
+
 static const struct host1x_client_ops sor_client_ops = {
 	.init = tegra_sor_init,
 	.exit = tegra_sor_exit,
+	.suspend = tegra_sor_runtime_suspend,
+	.resume = tegra_sor_runtime_resume,
 };
 
 static const u8 tegra124_sor_xbar_cfg[5] = {
@@ -3835,37 +3915,6 @@ static int tegra_sor_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, sor);
 	pm_runtime_enable(&pdev->dev);
 
-	/*
-	 * On Tegra210 and earlier, provide our own implementation for the
-	 * pad output clock.
-	 */
-	if (!sor->clk_pad) {
-		char *name;
-
-		err = pm_runtime_get_sync(&pdev->dev);
-		if (err < 0) {
-			dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
-				err);
-			goto remove;
-		}
-
-		name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index);
-		if (!name) {
-			err = -ENOMEM;
-			goto remove;
-		}
-
-		sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
-		pm_runtime_put(&pdev->dev);
-	}
-
-	if (IS_ERR(sor->clk_pad)) {
-		err = PTR_ERR(sor->clk_pad);
-		dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
-			err);
-		goto remove;
-	}
-
 	INIT_LIST_HEAD(&sor->client.list);
 	sor->client.ops = &sor_client_ops;
 	sor->client.dev = &pdev->dev;
@@ -3874,11 +3923,46 @@ static int tegra_sor_probe(struct platform_device *pdev)
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
-		goto remove;
+		goto rpm_disable;
+	}
+
+	/*
+	 * On Tegra210 and earlier, provide our own implementation for the
+	 * pad output clock.
+	 */
+	if (!sor->clk_pad) {
+		char *name;
+
+		name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout",
+				      sor->index);
+		if (!name) {
+			err = -ENOMEM;
+			goto unregister;
+		}
+
+		err = host1x_client_resume(&sor->client);
+		if (err < 0) {
+			dev_err(sor->dev, "failed to resume: %d\n", err);
+			goto unregister;
+		}
+
+		sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
+		host1x_client_suspend(&sor->client);
+	}
+
+	if (IS_ERR(sor->clk_pad)) {
+		err = PTR_ERR(sor->clk_pad);
+		dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
+			err);
+		goto unregister;
 	}
 
 	return 0;
 
+unregister:
+	host1x_client_unregister(&sor->client);
+rpm_disable:
+	pm_runtime_disable(&pdev->dev);
 remove:
 	if (sor->ops && sor->ops->remove)
 		sor->ops->remove(sor);
@@ -3892,8 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev)
 	struct tegra_sor *sor = platform_get_drvdata(pdev);
 	int err;
 
-	pm_runtime_disable(&pdev->dev);
-
 	err = host1x_client_unregister(&sor->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -3901,6 +3983,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
 		return err;
 	}
 
+	pm_runtime_disable(&pdev->dev);
+
 	if (sor->ops && sor->ops->remove) {
 		err = sor->ops->remove(sor);
 		if (err < 0)
@@ -3912,76 +3996,29 @@ static int tegra_sor_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static int tegra_sor_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_sor_suspend(struct device *dev)
 {
 	struct tegra_sor *sor = dev_get_drvdata(dev);
 	int err;
 
-	if (sor->rst) {
-		err = reset_control_assert(sor->rst);
-		if (err < 0) {
-			dev_err(dev, "failed to assert reset: %d\n", err);
-			return err;
-		}
-
-		reset_control_release(sor->rst);
-	}
-
-	usleep_range(1000, 2000);
-
-	clk_disable_unprepare(sor->clk);
-
-	return 0;
-}
-
-static int tegra_sor_runtime_resume(struct device *dev)
-{
-	struct tegra_sor *sor = dev_get_drvdata(dev);
-	int err;
-
-	err = clk_prepare_enable(sor->clk);
+	err = tegra_output_suspend(&sor->output);
 	if (err < 0) {
-		dev_err(dev, "failed to enable clock: %d\n", err);
+		dev_err(dev, "failed to suspend output: %d\n", err);
 		return err;
 	}
 
-	usleep_range(1000, 2000);
-
-	if (sor->rst) {
-		err = reset_control_acquire(sor->rst);
-		if (err < 0) {
-			dev_err(dev, "failed to acquire reset: %d\n", err);
-			clk_disable_unprepare(sor->clk);
-			return err;
-		}
-
-		err = reset_control_deassert(sor->rst);
-		if (err < 0) {
-			dev_err(dev, "failed to deassert reset: %d\n", err);
-			reset_control_release(sor->rst);
-			clk_disable_unprepare(sor->clk);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-static int tegra_sor_suspend(struct device *dev)
-{
-	struct tegra_sor *sor = dev_get_drvdata(dev);
-	int err;
-
 	if (sor->hdmi_supply) {
 		err = regulator_disable(sor->hdmi_supply);
-		if (err < 0)
+		if (err < 0) {
+			tegra_output_resume(&sor->output);
 			return err;
+		}
 	}
 
 	return 0;
 }
 
-static int tegra_sor_resume(struct device *dev)
+static int __maybe_unused tegra_sor_resume(struct device *dev)
 {
 	struct tegra_sor *sor = dev_get_drvdata(dev);
 	int err;
@@ -3992,12 +4029,20 @@ static int tegra_sor_resume(struct device *dev)
 			return err;
 	}
 
+	err = tegra_output_resume(&sor->output);
+	if (err < 0) {
+		dev_err(dev, "failed to resume output: %d\n", err);
+
+		if (sor->hdmi_supply)
+			regulator_disable(sor->hdmi_supply);
+
+		return err;
+	}
+
 	return 0;
 }
 
 static const struct dev_pm_ops tegra_sor_pm_ops = {
-	SET_RUNTIME_PM_OPS(tegra_sor_runtime_suspend, tegra_sor_runtime_resume,
-			   NULL)
 	SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume)
 };
 
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 3526c28..ade56b8 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -161,7 +161,7 @@ static int vic_boot(struct vic *vic)
 static int vic_init(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
-	struct drm_device *dev = dev_get_drvdata(client->parent);
+	struct drm_device *dev = dev_get_drvdata(client->host);
 	struct tegra_drm *tegra = dev->dev_private;
 	struct vic *vic = to_vic(drm);
 	int err;
@@ -190,9 +190,9 @@ static int vic_init(struct host1x_client *client)
 
 	/*
 	 * Inherit the DMA parameters (such as maximum segment size) from the
-	 * parent device.
+	 * parent host1x device.
 	 */
-	client->dev->dma_parms = client->parent->dma_parms;
+	client->dev->dma_parms = client->host->dma_parms;
 
 	return 0;
 
@@ -209,7 +209,7 @@ static int vic_init(struct host1x_client *client)
 static int vic_exit(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
-	struct drm_device *dev = dev_get_drvdata(client->parent);
+	struct drm_device *dev = dev_get_drvdata(client->host);
 	struct tegra_drm *tegra = dev->dev_private;
 	struct vic *vic = to_vic(drm);
 	int err;
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index 87f9480..662bf3a 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -6,7 +6,6 @@
 tilcdc-y := \
 	tilcdc_plane.o \
 	tilcdc_crtc.o \
-	tilcdc_tfp410.o \
 	tilcdc_panel.o \
 	tilcdc_external.o \
 	tilcdc_drv.o
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index a361236..0791a02 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -30,7 +30,6 @@
 #include "tilcdc_external.h"
 #include "tilcdc_panel.h"
 #include "tilcdc_regs.h"
-#include "tilcdc_tfp410.h"
 
 static LIST_HEAD(module_list);
 
@@ -64,12 +63,6 @@ void tilcdc_module_cleanup(struct tilcdc_module *mod)
 
 static struct of_device_id tilcdc_of_match[];
 
-static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
-		struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-	return drm_gem_fb_create(dev, file_priv, mode_cmd);
-}
-
 static int tilcdc_atomic_check(struct drm_device *dev,
 			       struct drm_atomic_state *state)
 {
@@ -140,7 +133,7 @@ static int tilcdc_commit(struct drm_device *dev,
 }
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
-	.fb_create = tilcdc_fb_create,
+	.fb_create = drm_gem_fb_create,
 	.atomic_check = tilcdc_atomic_check,
 	.atomic_commit = tilcdc_commit,
 };
@@ -649,7 +642,6 @@ static struct platform_driver tilcdc_platform_driver = {
 static int __init tilcdc_drm_init(void)
 {
 	DBG("init");
-	tilcdc_tfp410_init();
 	tilcdc_panel_init();
 	return platform_driver_register(&tilcdc_platform_driver);
 }
@@ -659,7 +651,6 @@ static void __exit tilcdc_drm_fini(void)
 	DBG("fini");
 	platform_driver_unregister(&tilcdc_platform_driver);
 	tilcdc_panel_fini();
-	tilcdc_tfp410_fini();
 }
 
 module_init(tilcdc_drm_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
deleted file mode 100644
index 530edb3..0000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <robdclark@gmail.com>
- */
-
-#include <linux/gpio.h>
-#include <linux/mod_devicetable.h>
-#include <linux/of_gpio.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/platform_device.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_probe_helper.h>
-
-#include "tilcdc_drv.h"
-#include "tilcdc_tfp410.h"
-
-struct tfp410_module {
-	struct tilcdc_module base;
-	struct i2c_adapter *i2c;
-	int gpio;
-};
-#define to_tfp410_module(x) container_of(x, struct tfp410_module, base)
-
-
-static const struct tilcdc_panel_info dvi_info = {
-		.ac_bias                = 255,
-		.ac_bias_intrpt         = 0,
-		.dma_burst_sz           = 16,
-		.bpp                    = 16,
-		.fdd                    = 0x80,
-		.tft_alt_mode           = 0,
-		.sync_edge              = 0,
-		.sync_ctrl              = 1,
-		.raster_order           = 0,
-};
-
-/*
- * Encoder:
- */
-
-struct tfp410_encoder {
-	struct drm_encoder base;
-	struct tfp410_module *mod;
-	int dpms;
-};
-#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
-
-static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-	struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
-
-	if (tfp410_encoder->dpms == mode)
-		return;
-
-	if (mode == DRM_MODE_DPMS_ON) {
-		DBG("Power on");
-		gpio_direction_output(tfp410_encoder->mod->gpio, 1);
-	} else {
-		DBG("Power off");
-		gpio_direction_output(tfp410_encoder->mod->gpio, 0);
-	}
-
-	tfp410_encoder->dpms = mode;
-}
-
-static void tfp410_encoder_prepare(struct drm_encoder *encoder)
-{
-	tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tfp410_encoder_commit(struct drm_encoder *encoder)
-{
-	tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	/* nothing needed */
-}
-
-static const struct drm_encoder_funcs tfp410_encoder_funcs = {
-		.destroy        = drm_encoder_cleanup,
-};
-
-static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
-		.dpms           = tfp410_encoder_dpms,
-		.prepare        = tfp410_encoder_prepare,
-		.commit         = tfp410_encoder_commit,
-		.mode_set       = tfp410_encoder_mode_set,
-};
-
-static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
-		struct tfp410_module *mod)
-{
-	struct tfp410_encoder *tfp410_encoder;
-	struct drm_encoder *encoder;
-	int ret;
-
-	tfp410_encoder = devm_kzalloc(dev->dev, sizeof(*tfp410_encoder),
-				      GFP_KERNEL);
-	if (!tfp410_encoder)
-		return NULL;
-
-	tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
-	tfp410_encoder->mod = mod;
-
-	encoder = &tfp410_encoder->base;
-	encoder->possible_crtcs = 1;
-
-	ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
-			DRM_MODE_ENCODER_TMDS, NULL);
-	if (ret < 0)
-		goto fail;
-
-	drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs);
-
-	return encoder;
-
-fail:
-	drm_encoder_cleanup(encoder);
-	return NULL;
-}
-
-/*
- * Connector:
- */
-
-struct tfp410_connector {
-	struct drm_connector base;
-
-	struct drm_encoder *encoder;  /* our connected encoder */
-	struct tfp410_module *mod;
-};
-#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base)
-
-
-static void tfp410_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
-static enum drm_connector_status tfp410_connector_detect(
-		struct drm_connector *connector,
-		bool force)
-{
-	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
-
-	if (drm_probe_ddc(tfp410_connector->mod->i2c))
-		return connector_status_connected;
-
-	return connector_status_unknown;
-}
-
-static int tfp410_connector_get_modes(struct drm_connector *connector)
-{
-	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
-	struct edid *edid;
-	int ret = 0;
-
-	edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
-
-	drm_connector_update_edid_property(connector, edid);
-
-	if (edid) {
-		ret = drm_add_edid_modes(connector, edid);
-		kfree(edid);
-	}
-
-	return ret;
-}
-
-static struct drm_encoder *tfp410_connector_best_encoder(
-		struct drm_connector *connector)
-{
-	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
-	return tfp410_connector->encoder;
-}
-
-static const struct drm_connector_funcs tfp410_connector_funcs = {
-	.destroy            = tfp410_connector_destroy,
-	.detect             = tfp410_connector_detect,
-	.fill_modes         = drm_helper_probe_single_connector_modes,
-	.reset              = drm_atomic_helper_connector_reset,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
-	.get_modes          = tfp410_connector_get_modes,
-	.best_encoder       = tfp410_connector_best_encoder,
-};
-
-static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
-		struct tfp410_module *mod, struct drm_encoder *encoder)
-{
-	struct tfp410_connector *tfp410_connector;
-	struct drm_connector *connector;
-	int ret;
-
-	tfp410_connector = devm_kzalloc(dev->dev, sizeof(*tfp410_connector),
-					GFP_KERNEL);
-	if (!tfp410_connector)
-		return NULL;
-
-	tfp410_connector->encoder = encoder;
-	tfp410_connector->mod = mod;
-
-	connector = &tfp410_connector->base;
-
-	drm_connector_init(dev, connector, &tfp410_connector_funcs,
-			DRM_MODE_CONNECTOR_DVID);
-	drm_connector_helper_add(connector, &tfp410_connector_helper_funcs);
-
-	connector->polled = DRM_CONNECTOR_POLL_CONNECT |
-			DRM_CONNECTOR_POLL_DISCONNECT;
-
-	connector->interlace_allowed = 0;
-	connector->doublescan_allowed = 0;
-
-	ret = drm_connector_attach_encoder(connector, encoder);
-	if (ret)
-		goto fail;
-
-	return connector;
-
-fail:
-	tfp410_connector_destroy(connector);
-	return NULL;
-}
-
-/*
- * Module:
- */
-
-static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
-{
-	struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
-	struct tilcdc_drm_private *priv = dev->dev_private;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-
-	encoder = tfp410_encoder_create(dev, tfp410_mod);
-	if (!encoder)
-		return -ENOMEM;
-
-	connector = tfp410_connector_create(dev, tfp410_mod, encoder);
-	if (!connector)
-		return -ENOMEM;
-
-	priv->encoders[priv->num_encoders++] = encoder;
-	priv->connectors[priv->num_connectors++] = connector;
-
-	tilcdc_crtc_set_panel_info(priv->crtc, &dvi_info);
-	return 0;
-}
-
-static const struct tilcdc_module_ops tfp410_module_ops = {
-		.modeset_init = tfp410_modeset_init,
-};
-
-/*
- * Device:
- */
-
-static int tfp410_probe(struct platform_device *pdev)
-{
-	struct device_node *node = pdev->dev.of_node;
-	struct device_node *i2c_node;
-	struct tfp410_module *tfp410_mod;
-	struct tilcdc_module *mod;
-	struct pinctrl *pinctrl;
-	uint32_t i2c_phandle;
-	int ret = -EINVAL;
-
-	/* bail out early if no DT data: */
-	if (!node) {
-		dev_err(&pdev->dev, "device-tree data is missing\n");
-		return -ENXIO;
-	}
-
-	tfp410_mod = devm_kzalloc(&pdev->dev, sizeof(*tfp410_mod), GFP_KERNEL);
-	if (!tfp410_mod)
-		return -ENOMEM;
-
-	mod = &tfp410_mod->base;
-	pdev->dev.platform_data = mod;
-
-	tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
-
-	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-	if (IS_ERR(pinctrl))
-		dev_warn(&pdev->dev, "pins are not configured\n");
-
-	if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
-		dev_err(&pdev->dev, "could not get i2c bus phandle\n");
-		goto fail;
-	}
-
-	i2c_node = of_find_node_by_phandle(i2c_phandle);
-	if (!i2c_node) {
-		dev_err(&pdev->dev, "could not get i2c bus node\n");
-		goto fail;
-	}
-
-	tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
-	if (!tfp410_mod->i2c) {
-		dev_err(&pdev->dev, "could not get i2c\n");
-		of_node_put(i2c_node);
-		goto fail;
-	}
-
-	of_node_put(i2c_node);
-
-	tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
-			0, NULL);
-	if (tfp410_mod->gpio < 0) {
-		dev_warn(&pdev->dev, "No power down GPIO\n");
-	} else {
-		ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
-		if (ret) {
-			dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
-			goto fail_adapter;
-		}
-	}
-
-	return 0;
-
-fail_adapter:
-	i2c_put_adapter(tfp410_mod->i2c);
-
-fail:
-	tilcdc_module_cleanup(mod);
-	return ret;
-}
-
-static int tfp410_remove(struct platform_device *pdev)
-{
-	struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
-	struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
-
-	i2c_put_adapter(tfp410_mod->i2c);
-	gpio_free(tfp410_mod->gpio);
-
-	tilcdc_module_cleanup(mod);
-
-	return 0;
-}
-
-static const struct of_device_id tfp410_of_match[] = {
-		{ .compatible = "ti,tilcdc,tfp410", },
-		{ },
-};
-
-struct platform_driver tfp410_driver = {
-	.probe = tfp410_probe,
-	.remove = tfp410_remove,
-	.driver = {
-		.owner = THIS_MODULE,
-		.name = "tfp410",
-		.of_match_table = tfp410_of_match,
-	},
-};
-
-int __init tilcdc_tfp410_init(void)
-{
-	return platform_driver_register(&tfp410_driver);
-}
-
-void __exit tilcdc_tfp410_fini(void)
-{
-	platform_driver_unregister(&tfp410_driver);
-}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
deleted file mode 100644
index f9aaf69..0000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <robdclark@gmail.com>
- */
-
-#ifndef __TILCDC_TFP410_H__
-#define __TILCDC_TFP410_H__
-
-/* sub-module for tfp410 dvi adaptor */
-
-int tilcdc_tfp410_init(void);
-void tilcdc_tfp410_fini(void);
-
-#endif /* __TILCDC_TFP410_H__ */
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 3cc21a1..060cc75 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -240,7 +240,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
 
 	mipi_dbi_command(dbi, ST7586_SET_DISP_DUTY, 0x7f);
 	mipi_dbi_command(dbi, ST7586_SET_PART_DISP, 0xa0);
-	mipi_dbi_command(dbi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77);
+	mipi_dbi_command(dbi, MIPI_DCS_SET_PARTIAL_ROWS, 0x00, 0x00, 0x00, 0x77);
 	mipi_dbi_command(dbi, MIPI_DCS_EXIT_INVERT_MODE);
 
 	msleep(100);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d91b04..5df596f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -161,7 +161,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
 	dma_fence_put(bo->moving);
 	if (!ttm_bo_uses_embedded_gem_object(bo))
 		dma_resv_fini(&bo->base._resv);
-	mutex_destroy(&bo->wu_mutex);
 	bo->destroy(bo);
 	ttm_mem_global_free(&ttm_mem_glob, acc_size);
 }
@@ -1299,7 +1298,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	INIT_LIST_HEAD(&bo->ddestroy);
 	INIT_LIST_HEAD(&bo->swap);
 	INIT_LIST_HEAD(&bo->io_reserve_lru);
-	mutex_init(&bo->wu_mutex);
 	bo->bdev = bdev;
 	bo->type = type;
 	bo->num_pages = num_pages;
@@ -1903,37 +1901,3 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
 	while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
 }
 EXPORT_SYMBOL(ttm_bo_swapout_all);
-
-/**
- * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
- * unreserved
- *
- * @bo: Pointer to buffer
- */
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
-{
-	int ret;
-
-	/*
-	 * In the absense of a wait_unlocked API,
-	 * Use the bo::wu_mutex to avoid triggering livelocks due to
-	 * concurrent use of this function. Note that this use of
-	 * bo::wu_mutex can go away if we change locking order to
-	 * mmap_sem -> bo::reserve.
-	 */
-	ret = mutex_lock_interruptible(&bo->wu_mutex);
-	if (unlikely(ret != 0))
-		return -ERESTARTSYS;
-	if (!dma_resv_is_locked(bo->base.resv))
-		goto out_unlock;
-	ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
-	if (ret == -EINTR)
-		ret = -ERESTARTSYS;
-	if (unlikely(ret != 0))
-		goto out_unlock;
-	dma_resv_unlock(bo->base.resv);
-
-out_unlock:
-	mutex_unlock(&bo->wu_mutex);
-	return ret;
-}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 97fd1da..49ed5577 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -504,7 +504,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	INIT_LIST_HEAD(&fbo->base.lru);
 	INIT_LIST_HEAD(&fbo->base.swap);
 	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
-	mutex_init(&fbo->base.wu_mutex);
 	fbo->base.moving = NULL;
 	drm_vma_node_reset(&fbo->base.base.vma_node);
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 11863fb..389128b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -139,19 +139,17 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
 				ttm_bo_get(bo);
 				up_read(&vmf->vma->vm_mm->mmap_sem);
-				(void) ttm_bo_wait_unreserved(bo);
+				if (!dma_resv_lock_interruptible(bo->base.resv,
+								 NULL))
+					dma_resv_unlock(bo->base.resv);
 				ttm_bo_put(bo);
 			}
 
 			return VM_FAULT_RETRY;
 		}
 
-		/*
-		 * If we'd want to change locking order to
-		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
-		 * instead of retrying the fault...
-		 */
-		return VM_FAULT_NOPAGE;
+		if (dma_resv_lock_interruptible(bo->base.resv, NULL))
+			return VM_FAULT_NOPAGE;
 	}
 
 	return 0;
@@ -181,7 +179,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 				    pgoff_t num_prefault)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct vm_area_struct cvma = *vma;
 	struct ttm_buffer_object *bo = vma->vm_private_data;
 	struct ttm_bo_device *bdev = bo->bdev;
 	unsigned long page_offset;
@@ -252,7 +249,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 		goto out_io_unlock;
 	}
 
-	cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
+	prot = ttm_io_prot(bo->mem.placement, prot);
 	if (!bo->mem.bus.is_iomem) {
 		struct ttm_operation_ctx ctx = {
 			.interruptible = false,
@@ -268,7 +265,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 		}
 	} else {
 		/* Iomem should not be marked encrypted */
-		cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
+		prot = pgprot_decrypted(prot);
 	}
 
 	/*
@@ -291,11 +288,20 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 			pfn = page_to_pfn(page);
 		}
 
+		/*
+		 * Note that the value of @prot at this point may differ from
+		 * the value of @vma->vm_page_prot in the caching- and
+		 * encryption bits. This is because the exact location of the
+		 * data may not be known at mmap() time and may also change
+		 * at arbitrary times while the data is mmap'ed.
+		 * See vmf_insert_mixed_prot() for a discussion.
+		 */
 		if (vma->vm_flags & VM_MIXEDMAP)
-			ret = vmf_insert_mixed(&cvma, address,
-					__pfn_to_pfn_t(pfn, PFN_DEV));
+			ret = vmf_insert_mixed_prot(vma, address,
+						    __pfn_to_pfn_t(pfn, PFN_DEV),
+						    prot);
 		else
-			ret = vmf_insert_pfn(&cvma, address, pfn);
+			ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
 
 		/* Never error on prefaulted PTEs */
 		if (unlikely((ret & VM_FAULT_ERROR))) {
@@ -316,7 +322,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 }
 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
 
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	pgprot_t prot;
@@ -327,7 +333,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 	if (ret)
 		return ret;
 
-	prot = vm_get_page_prot(vma->vm_flags);
+	prot = vma->vm_page_prot;
 	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
 		return ret;
@@ -336,6 +342,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 
 	return ret;
 }
+EXPORT_SYMBOL(ttm_bo_vm_fault);
 
 void ttm_bo_vm_open(struct vm_area_struct *vma)
 {
@@ -395,8 +402,8 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
 	return len;
 }
 
-static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
-			    void *buf, int len, int write)
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+		     void *buf, int len, int write)
 {
 	unsigned long offset = (addr) - vma->vm_start;
 	struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -432,6 +439,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
 
 	return ret;
 }
+EXPORT_SYMBOL(ttm_bo_vm_access);
 
 static const struct vm_operations_struct ttm_bo_vm_ops = {
 	.fault = ttm_bo_vm_fault,
@@ -520,13 +528,6 @@ EXPORT_SYMBOL(ttm_bo_mmap);
 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
 {
 	ttm_bo_get(bo);
-
-	/*
-	 * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset
-	 * removed. Add it back here until the rest of TTM works without it.
-	 */
-	vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node);
-
 	ttm_bo_mmap_vma_setup(bo, vma);
 	return 0;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e0e9b4f..2ec448e 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -223,8 +223,9 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
 	ttm->func->destroy(ttm);
 }
 
-void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
-			uint32_t page_flags)
+static void ttm_tt_init_fields(struct ttm_tt *ttm,
+			       struct ttm_buffer_object *bo,
+			       uint32_t page_flags)
 {
 	ttm->bdev = bo->bdev;
 	ttm->num_pages = bo->num_pages;
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 954b09c..00ba9e5 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -110,7 +110,7 @@ static int tve200_modeset_init(struct drm_device *dev)
 	}
 
 	priv->panel = panel;
-	priv->connector = panel->connector;
+	priv->connector = drm_panel_bridge_connector(bridge);
 	priv->bridge = bridge;
 
 	dev_info(dev->dev, "attached to panel %s\n",
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index b4d179b..1f497d8 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -2,10 +2,10 @@
 config DRM_UDL
 	tristate "DisplayLink"
 	depends on DRM
-	depends on USB_SUPPORT
+	depends on USB
 	depends on USB_ARCH_HAS_HCD
-	select USB
+	select DRM_GEM_SHMEM_HELPER
 	select DRM_KMS_HELPER
 	help
 	  This is a KMS driver for the USB displaylink video adapters.
-          Say M/Y to add support for these devices via drm/kms interfaces.
+	  Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index e5bb6f7..b50179b 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_main.o udl_transfer.o udl_gem.o
 
 obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b4ae3e8..e9671d3 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -7,6 +7,7 @@
  * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
  */
 
+#include <drm/drm_atomic_state_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_probe_helper.h>
 
@@ -90,13 +91,6 @@ udl_detect(struct drm_connector *connector, bool force)
 	return connector_status_connected;
 }
 
-static int udl_connector_set_property(struct drm_connector *connector,
-				      struct drm_property *property,
-				      uint64_t val)
-{
-	return 0;
-}
-
 static void udl_connector_destroy(struct drm_connector *connector)
 {
 	struct udl_drm_connector *udl_connector =
@@ -104,7 +98,6 @@ static void udl_connector_destroy(struct drm_connector *connector)
 					struct udl_drm_connector,
 					connector);
 
-	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(udl_connector->edid);
 	kfree(connector);
@@ -117,30 +110,30 @@ static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
 
 static const struct drm_connector_funcs udl_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
+	.reset = drm_atomic_helper_connector_reset,
 	.detect = udl_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = udl_connector_destroy,
-	.set_property = udl_connector_set_property,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state   = drm_atomic_helper_connector_destroy_state,
 };
 
-int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
+struct drm_connector *udl_connector_init(struct drm_device *dev)
 {
 	struct udl_drm_connector *udl_connector;
 	struct drm_connector *connector;
 
 	udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
 	if (!udl_connector)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	connector = &udl_connector->connector;
 	drm_connector_init(dev, connector, &udl_connector_funcs,
 			   DRM_MODE_CONNECTOR_DVII);
 	drm_connector_helper_add(connector, &udl_connector_helper_funcs);
 
-	drm_connector_register(connector);
-	drm_connector_attach_encoder(connector, encoder);
 	connector->polled = DRM_CONNECTOR_POLL_HPD |
 		DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
 
-	return 0;
+	return connector;
 }
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
deleted file mode 100644
index 3108e9a..0000000
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * udl_dmabuf.c
- *
- * Copyright (c) 2014 The Chromium OS Authors
- */
-
-#include <linux/shmem_fs.h>
-#include <linux/dma-buf.h>
-
-#include <drm/drm_prime.h>
-
-#include "udl_drv.h"
-
-struct udl_drm_dmabuf_attachment {
-	struct sg_table sgt;
-	enum dma_data_direction dir;
-	bool is_mapped;
-};
-
-static int udl_attach_dma_buf(struct dma_buf *dmabuf,
-			      struct dma_buf_attachment *attach)
-{
-	struct udl_drm_dmabuf_attachment *udl_attach;
-
-	DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
-			attach->dmabuf->size);
-
-	udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
-	if (!udl_attach)
-		return -ENOMEM;
-
-	udl_attach->dir = DMA_NONE;
-	attach->priv = udl_attach;
-
-	return 0;
-}
-
-static void udl_detach_dma_buf(struct dma_buf *dmabuf,
-			       struct dma_buf_attachment *attach)
-{
-	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
-	struct sg_table *sgt;
-
-	if (!udl_attach)
-		return;
-
-	DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
-			attach->dmabuf->size);
-
-	sgt = &udl_attach->sgt;
-
-	if (udl_attach->dir != DMA_NONE)
-		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
-				udl_attach->dir);
-
-	sg_free_table(sgt);
-	kfree(udl_attach);
-	attach->priv = NULL;
-}
-
-static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
-					enum dma_data_direction dir)
-{
-	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
-	struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
-	struct drm_device *dev = obj->base.dev;
-	struct udl_device *udl = dev->dev_private;
-	struct scatterlist *rd, *wr;
-	struct sg_table *sgt = NULL;
-	unsigned int i;
-	int page_count;
-	int nents, ret;
-
-	DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
-			attach->dmabuf->size, dir);
-
-	/* just return current sgt if already requested. */
-	if (udl_attach->dir == dir && udl_attach->is_mapped)
-		return &udl_attach->sgt;
-
-	if (!obj->pages) {
-		ret = udl_gem_get_pages(obj);
-		if (ret) {
-			DRM_ERROR("failed to map pages.\n");
-			return ERR_PTR(ret);
-		}
-	}
-
-	page_count = obj->base.size / PAGE_SIZE;
-	obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
-	if (IS_ERR(obj->sg)) {
-		DRM_ERROR("failed to allocate sgt.\n");
-		return ERR_CAST(obj->sg);
-	}
-
-	sgt = &udl_attach->sgt;
-
-	ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
-	if (ret) {
-		DRM_ERROR("failed to alloc sgt.\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	mutex_lock(&udl->gem_lock);
-
-	rd = obj->sg->sgl;
-	wr = sgt->sgl;
-	for (i = 0; i < sgt->orig_nents; ++i) {
-		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
-		rd = sg_next(rd);
-		wr = sg_next(wr);
-	}
-
-	if (dir != DMA_NONE) {
-		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
-		if (!nents) {
-			DRM_ERROR("failed to map sgl with iommu.\n");
-			sg_free_table(sgt);
-			sgt = ERR_PTR(-EIO);
-			goto err_unlock;
-		}
-	}
-
-	udl_attach->is_mapped = true;
-	udl_attach->dir = dir;
-	attach->priv = udl_attach;
-
-err_unlock:
-	mutex_unlock(&udl->gem_lock);
-	return sgt;
-}
-
-static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
-			      struct sg_table *sgt,
-			      enum dma_data_direction dir)
-{
-	/* Nothing to do. */
-	DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
-			attach->dmabuf->size, dir);
-}
-
-static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
-	/* TODO */
-
-	return NULL;
-}
-
-static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
-			      unsigned long page_num, void *addr)
-{
-	/* TODO */
-}
-
-static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
-			   struct vm_area_struct *vma)
-{
-	/* TODO */
-
-	return -EINVAL;
-}
-
-static const struct dma_buf_ops udl_dmabuf_ops = {
-	.attach			= udl_attach_dma_buf,
-	.detach			= udl_detach_dma_buf,
-	.map_dma_buf		= udl_map_dma_buf,
-	.unmap_dma_buf		= udl_unmap_dma_buf,
-	.map			= udl_dmabuf_kmap,
-	.unmap			= udl_dmabuf_kunmap,
-	.mmap			= udl_dmabuf_mmap,
-	.release		= drm_gem_dmabuf_release,
-};
-
-struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags)
-{
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-	exp_info.ops = &udl_dmabuf_ops;
-	exp_info.size = obj->size;
-	exp_info.flags = flags;
-	exp_info.priv = obj;
-
-	return drm_gem_dmabuf_export(obj->dev, &exp_info);
-}
-
-static int udl_prime_create(struct drm_device *dev,
-			    size_t size,
-			    struct sg_table *sg,
-			    struct udl_gem_object **obj_p)
-{
-	struct udl_gem_object *obj;
-	int npages;
-
-	npages = size / PAGE_SIZE;
-
-	*obj_p = NULL;
-	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
-	if (!obj)
-		return -ENOMEM;
-
-	obj->sg = sg;
-	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-	if (obj->pages == NULL) {
-		DRM_ERROR("obj pages is NULL %d\n", npages);
-		return -ENOMEM;
-	}
-
-	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
-	*obj_p = obj;
-	return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
-				struct dma_buf *dma_buf)
-{
-	struct dma_buf_attachment *attach;
-	struct sg_table *sg;
-	struct udl_gem_object *uobj;
-	int ret;
-
-	/* need to attach */
-	get_device(dev->dev);
-	attach = dma_buf_attach(dma_buf, dev->dev);
-	if (IS_ERR(attach)) {
-		put_device(dev->dev);
-		return ERR_CAST(attach);
-	}
-
-	get_dma_buf(dma_buf);
-
-	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sg)) {
-		ret = PTR_ERR(sg);
-		goto fail_detach;
-	}
-
-	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
-	if (ret)
-		goto fail_unmap;
-
-	uobj->base.import_attach = attach;
-	uobj->flags = UDL_BO_WC;
-
-	return &uobj->base;
-
-fail_unmap:
-	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
-	dma_buf_detach(dma_buf, attach);
-	dma_buf_put(dma_buf);
-	put_device(dev->dev);
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 8426669..e6c1cd7 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -7,7 +7,9 @@
 
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_print.h>
@@ -19,36 +21,17 @@ static int udl_usb_suspend(struct usb_interface *interface,
 {
 	struct drm_device *dev = usb_get_intfdata(interface);
 
-	drm_kms_helper_poll_disable(dev);
-	return 0;
+	return drm_mode_config_helper_suspend(dev);
 }
 
 static int udl_usb_resume(struct usb_interface *interface)
 {
 	struct drm_device *dev = usb_get_intfdata(interface);
 
-	drm_kms_helper_poll_enable(dev);
-	udl_modeset_restore(dev);
-	return 0;
+	return drm_mode_config_helper_resume(dev);
 }
 
-static const struct vm_operations_struct udl_gem_vm_ops = {
-	.fault = udl_gem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
-static const struct file_operations udl_driver_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.mmap = udl_drm_gem_mmap,
-	.poll = drm_poll,
-	.read = drm_read,
-	.unlocked_ioctl	= drm_ioctl,
-	.release = drm_release,
-	.compat_ioctl = drm_compat_ioctl,
-	.llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(udl_driver_fops);
 
 static void udl_driver_release(struct drm_device *dev)
 {
@@ -59,21 +42,14 @@ static void udl_driver_release(struct drm_device *dev)
 }
 
 static struct drm_driver driver = {
-	.driver_features = DRIVER_MODESET | DRIVER_GEM,
+	.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
 	.release = udl_driver_release,
 
 	/* gem hooks */
-	.gem_free_object_unlocked = udl_gem_free_object,
-	.gem_vm_ops = &udl_gem_vm_ops,
+	.gem_create_object = udl_driver_gem_create_object,
 
-	.dumb_create = udl_dumb_create,
-	.dumb_map_offset = udl_gem_mmap,
 	.fops = &udl_driver_fops,
-
-	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-	.gem_prime_export = udl_gem_prime_export,
-	.gem_prime_import = udl_gem_prime_import,
+	DRM_GEM_SHMEM_DRIVER_OPS,
 
 	.name = DRIVER_NAME,
 	.desc = DRIVER_DESC,
@@ -129,8 +105,14 @@ static int udl_usb_probe(struct usb_interface *interface,
 
 	DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
 
+	r = drm_fbdev_generic_setup(&udl->drm, 0);
+	if (r)
+		goto err_drm_dev_unregister;
+
 	return 0;
 
+err_drm_dev_unregister:
+	drm_dev_unregister(&udl->drm);
 err_free:
 	drm_dev_put(&udl->drm);
 	return r;
@@ -141,7 +123,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
 	struct drm_device *dev = usb_get_intfdata(interface);
 
 	drm_kms_helper_poll_disable(dev);
-	udl_fbdev_unplug(dev);
 	udl_drop_usb(dev);
 	drm_dev_unplug(dev);
 	drm_dev_put(dev);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 12a970f..e67227c 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -17,8 +17,8 @@
 #include <drm/drm_device.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_simple_kms_helper.h>
 
-struct drm_encoder;
 struct drm_mode_create_dumb;
 
 #define DRIVER_NAME		"udl"
@@ -29,9 +29,6 @@ struct drm_mode_create_dumb;
 #define DRIVER_MINOR		0
 #define DRIVER_PATCHLEVEL	1
 
-#define UDL_BO_CACHEABLE		(1 << 0)
-#define UDL_BO_WC		(1 << 1)
-
 struct udl_device;
 
 struct urb_node {
@@ -50,57 +47,29 @@ struct urb_list {
 	size_t size;
 };
 
-struct udl_fbdev;
-
 struct udl_device {
 	struct drm_device drm;
 	struct device *dev;
 	struct usb_device *udev;
-	struct drm_crtc *crtc;
+
+	struct drm_simple_display_pipe display_pipe;
 
 	struct mutex gem_lock;
 
 	int sku_pixel_limit;
 
 	struct urb_list urbs;
-	atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
 
-	struct udl_fbdev *fbdev;
 	char mode_buf[1024];
 	uint32_t mode_buf_len;
-	atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
-	atomic_t bytes_identical; /* saved effort with backbuffer comparison */
-	atomic_t bytes_sent; /* to usb, after compression including overhead */
-	atomic_t cpu_kcycles_used; /* transpired during pixel processing */
 };
 
 #define to_udl(x) container_of(x, struct udl_device, drm)
 
-struct udl_gem_object {
-	struct drm_gem_object base;
-	struct page **pages;
-	void *vmapping;
-	struct sg_table *sg;
-	unsigned int flags;
-};
-
-#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
-
-struct udl_framebuffer {
-	struct drm_framebuffer base;
-	struct udl_gem_object *obj;
-	bool active_16; /* active on the 16-bit channel */
-};
-
-#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
-
 /* modeset */
 int udl_modeset_init(struct drm_device *dev);
-void udl_modeset_restore(struct drm_device *dev);
 void udl_modeset_cleanup(struct drm_device *dev);
-int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
-
-struct drm_encoder *udl_encoder_init(struct drm_device *dev);
+struct drm_connector *udl_connector_init(struct drm_device *dev);
 
 struct urb *udl_get_urb(struct drm_device *dev);
 
@@ -110,41 +79,12 @@ void udl_urb_completion(struct urb *urb);
 int udl_init(struct udl_device *udl);
 void udl_fini(struct drm_device *dev);
 
-int udl_fbdev_init(struct drm_device *dev);
-void udl_fbdev_cleanup(struct drm_device *dev);
-void udl_fbdev_unplug(struct drm_device *dev);
-struct drm_framebuffer *
-udl_fb_user_fb_create(struct drm_device *dev,
-		      struct drm_file *file,
-		      const struct drm_mode_fb_cmd2 *mode_cmd);
-
 int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
 		     const char *front, char **urb_buf_ptr,
-		     u32 byte_offset, u32 device_byte_offset, u32 byte_width,
-		     int *ident_ptr, int *sent_ptr);
+		     u32 byte_offset, u32 device_byte_offset, u32 byte_width);
 
-int udl_dumb_create(struct drm_file *file_priv,
-		    struct drm_device *dev,
-		    struct drm_mode_create_dumb *args);
-int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
-		 uint32_t handle, uint64_t *offset);
-
-void udl_gem_free_object(struct drm_gem_object *gem_obj);
-struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
-					    size_t size);
-struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags);
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
-				struct dma_buf *dma_buf);
-
-int udl_gem_get_pages(struct udl_gem_object *obj);
-void udl_gem_put_pages(struct udl_gem_object *obj);
-int udl_gem_vmap(struct udl_gem_object *obj);
-void udl_gem_vunmap(struct udl_gem_object *obj);
-int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t udl_gem_fault(struct vm_fault *vmf);
-
-int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
-		      int width, int height);
+struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
+						    size_t size);
 
 int udl_drop_usb(struct drm_device *dev);
 
@@ -158,4 +98,13 @@ int udl_drop_usb(struct drm_device *dev);
 #define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
 #define CMD_WRITE_RLX16  "\xAF\x6B" /**< 16 bit extended run length command. */
 
+/* On/Off for driving the DisplayLink framebuffer to the display */
+#define UDL_REG_BLANK_MODE		0x1f
+
+#define UDL_BLANK_MODE_ON		0x00 /* hsync and vsync on, visible */
+#define UDL_BLANK_MODE_BLANKED		0x01 /* hsync and vsync on, blanked */
+#define UDL_BLANK_MODE_VSYNC_OFF	0x03 /* vsync off, blanked */
+#define UDL_BLANK_MODE_HSYNC_OFF	0x05 /* hsync off, blanked */
+#define UDL_BLANK_MODE_POWERDOWN	0x07 /* powered off; requires modeset */
+
 #endif
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
deleted file mode 100644
index 203f041..0000000
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Red Hat
- * based in parts on udlfb.c:
- * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
- * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
- * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
- */
-
-#include <drm/drm_encoder.h>
-#include <drm/drm_modeset_helper_vtables.h>
-
-#include "udl_drv.h"
-
-/* dummy encoder */
-static void udl_enc_destroy(struct drm_encoder *encoder)
-{
-	drm_encoder_cleanup(encoder);
-	kfree(encoder);
-}
-
-static void udl_encoder_disable(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_mode_set(struct drm_encoder *encoder,
-				 struct drm_display_mode *mode,
-				 struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void
-udl_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static const struct drm_encoder_helper_funcs udl_helper_funcs = {
-	.dpms = udl_encoder_dpms,
-	.prepare = udl_encoder_prepare,
-	.mode_set = udl_encoder_mode_set,
-	.commit = udl_encoder_commit,
-	.disable = udl_encoder_disable,
-};
-
-static const struct drm_encoder_funcs udl_enc_funcs = {
-	.destroy = udl_enc_destroy,
-};
-
-struct drm_encoder *udl_encoder_init(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-
-	encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
-	if (!encoder)
-		return NULL;
-
-	drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS,
-			 NULL);
-	drm_encoder_helper_add(encoder, &udl_helper_funcs);
-	encoder->possible_crtcs = 1;
-	return encoder;
-}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
deleted file mode 100644
index ef3504d..0000000
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ /dev/null
@@ -1,527 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Red Hat
- *
- * based in parts on udlfb.c:
- * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
- * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
- * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
- */
-
-#include <linux/moduleparam.h>
-#include <linux/dma-buf.h>
-
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_modeset_helper.h>
-
-#include "udl_drv.h"
-
-#define DL_DEFIO_WRITE_DELAY    (HZ/20) /* fb_deferred_io.delay in jiffies */
-
-static int fb_defio = 0;  /* Optionally enable experimental fb_defio mmap support */
-static int fb_bpp = 16;
-
-module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
-module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
-
-struct udl_fbdev {
-	struct drm_fb_helper helper; /* must be first */
-	struct udl_framebuffer ufb;
-	int fb_count;
-};
-
-#define DL_ALIGN_UP(x, a) ALIGN(x, a)
-#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
-
-/** Read the red component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
-
-/** Read the green component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
-
-/** Read the blue component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
-
-/** Return red/green component of a 16 bpp colour number. */
-#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
-
-/** Return green/blue component of a 16 bpp colour number. */
-#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
-
-/** Return 8 bpp colour number from red, green and blue components. */
-#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
-
-#if 0
-static uint8_t rgb8(uint32_t col)
-{
-	uint8_t red = DLO_RGB_GETRED(col);
-	uint8_t grn = DLO_RGB_GETGRN(col);
-	uint8_t blu = DLO_RGB_GETBLU(col);
-
-	return DLO_RGB8(red, grn, blu);
-}
-
-static uint16_t rgb16(uint32_t col)
-{
-	uint8_t red = DLO_RGB_GETRED(col);
-	uint8_t grn = DLO_RGB_GETGRN(col);
-	uint8_t blu = DLO_RGB_GETBLU(col);
-
-	return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
-}
-#endif
-
-int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
-		      int width, int height)
-{
-	struct drm_device *dev = fb->base.dev;
-	struct udl_device *udl = to_udl(dev);
-	int i, ret;
-	char *cmd;
-	cycles_t start_cycles, end_cycles;
-	int bytes_sent = 0;
-	int bytes_identical = 0;
-	struct urb *urb;
-	int aligned_x;
-	int log_bpp;
-
-	BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
-	log_bpp = __ffs(fb->base.format->cpp[0]);
-
-	if (!fb->active_16)
-		return 0;
-
-	if (!fb->obj->vmapping) {
-		ret = udl_gem_vmap(fb->obj);
-		if (ret == -ENOMEM) {
-			DRM_ERROR("failed to vmap fb\n");
-			return 0;
-		}
-		if (!fb->obj->vmapping) {
-			DRM_ERROR("failed to vmapping\n");
-			return 0;
-		}
-	}
-
-	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
-	width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
-	x = aligned_x;
-
-	if ((width <= 0) ||
-	    (x + width > fb->base.width) ||
-	    (y + height > fb->base.height))
-		return -EINVAL;
-
-	start_cycles = get_cycles();
-
-	urb = udl_get_urb(dev);
-	if (!urb)
-		return 0;
-	cmd = urb->transfer_buffer;
-
-	for (i = y; i < y + height ; i++) {
-		const int line_offset = fb->base.pitches[0] * i;
-		const int byte_offset = line_offset + (x << log_bpp);
-		const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
-		if (udl_render_hline(dev, log_bpp, &urb,
-				     (char *) fb->obj->vmapping,
-				     &cmd, byte_offset, dev_byte_offset,
-				     width << log_bpp,
-				     &bytes_identical, &bytes_sent))
-			goto error;
-	}
-
-	if (cmd > (char *) urb->transfer_buffer) {
-		/* Send partial buffer remaining before exiting */
-		int len;
-		if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
-			*cmd++ = 0xAF;
-		len = cmd - (char *) urb->transfer_buffer;
-		ret = udl_submit_urb(dev, urb, len);
-		bytes_sent += len;
-	} else
-		udl_urb_completion(urb);
-
-error:
-	atomic_add(bytes_sent, &udl->bytes_sent);
-	atomic_add(bytes_identical, &udl->bytes_identical);
-	atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
-	end_cycles = get_cycles();
-	atomic_add(((unsigned int) ((end_cycles - start_cycles)
-		    >> 10)), /* Kcycles */
-		   &udl->cpu_kcycles_used);
-
-	return 0;
-}
-
-static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
-	unsigned long start = vma->vm_start;
-	unsigned long size = vma->vm_end - vma->vm_start;
-	unsigned long offset;
-	unsigned long page, pos;
-
-	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
-		return -EINVAL;
-
-	offset = vma->vm_pgoff << PAGE_SHIFT;
-
-	if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
-		return -EINVAL;
-
-	pos = (unsigned long)info->fix.smem_start + offset;
-
-	pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
-		  pos, size);
-
-	/* We don't want the framebuffer to be mapped encrypted */
-	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-
-	while (size > 0) {
-		page = vmalloc_to_pfn((void *)pos);
-		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
-			return -EAGAIN;
-
-		start += PAGE_SIZE;
-		pos += PAGE_SIZE;
-		if (size > PAGE_SIZE)
-			size -= PAGE_SIZE;
-		else
-			size = 0;
-	}
-
-	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
-	return 0;
-}
-
-/*
- * It's common for several clients to have framebuffer open simultaneously.
- * e.g. both fbcon and X. Makes things interesting.
- * Assumes caller is holding info->lock (for open and release at least)
- */
-static int udl_fb_open(struct fb_info *info, int user)
-{
-	struct udl_fbdev *ufbdev = info->par;
-	struct drm_device *dev = ufbdev->ufb.base.dev;
-	struct udl_device *udl = to_udl(dev);
-
-	/* If the USB device is gone, we don't accept new opens */
-	if (drm_dev_is_unplugged(&udl->drm))
-		return -ENODEV;
-
-	ufbdev->fb_count++;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-	if (fb_defio && (info->fbdefio == NULL)) {
-		/* enable defio at last moment if not disabled by client */
-
-		struct fb_deferred_io *fbdefio;
-
-		fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
-
-		if (fbdefio) {
-			fbdefio->delay = DL_DEFIO_WRITE_DELAY;
-			fbdefio->deferred_io = drm_fb_helper_deferred_io;
-		}
-
-		info->fbdefio = fbdefio;
-		fb_deferred_io_init(info);
-	}
-#endif
-
-	pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d\n",
-		  info->node, user, info, ufbdev->fb_count);
-
-	return 0;
-}
-
-
-/*
- * Assumes caller is holding info->lock mutex (for open and release at least)
- */
-static int udl_fb_release(struct fb_info *info, int user)
-{
-	struct udl_fbdev *ufbdev = info->par;
-
-	ufbdev->fb_count--;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-	if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
-		fb_deferred_io_cleanup(info);
-		kfree(info->fbdefio);
-		info->fbdefio = NULL;
-		info->fbops->fb_mmap = udl_fb_mmap;
-	}
-#endif
-
-	pr_debug("released /dev/fb%d user=%d count=%d\n",
-		info->node, user, ufbdev->fb_count);
-
-	return 0;
-}
-
-static struct fb_ops udlfb_ops = {
-	.owner = THIS_MODULE,
-	DRM_FB_HELPER_DEFAULT_OPS,
-	.fb_fillrect = drm_fb_helper_sys_fillrect,
-	.fb_copyarea = drm_fb_helper_sys_copyarea,
-	.fb_imageblit = drm_fb_helper_sys_imageblit,
-	.fb_mmap = udl_fb_mmap,
-	.fb_open = udl_fb_open,
-	.fb_release = udl_fb_release,
-};
-
-static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
-				      struct drm_file *file,
-				      unsigned flags, unsigned color,
-				      struct drm_clip_rect *clips,
-				      unsigned num_clips)
-{
-	struct udl_framebuffer *ufb = to_udl_fb(fb);
-	int i;
-	int ret = 0;
-
-	drm_modeset_lock_all(fb->dev);
-
-	if (!ufb->active_16)
-		goto unlock;
-
-	if (ufb->obj->base.import_attach) {
-		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
-					       DMA_FROM_DEVICE);
-		if (ret)
-			goto unlock;
-	}
-
-	for (i = 0; i < num_clips; i++) {
-		ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
-				  clips[i].x2 - clips[i].x1,
-				  clips[i].y2 - clips[i].y1);
-		if (ret)
-			break;
-	}
-
-	if (ufb->obj->base.import_attach) {
-		ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
-					     DMA_FROM_DEVICE);
-	}
-
- unlock:
-	drm_modeset_unlock_all(fb->dev);
-
-	return ret;
-}
-
-static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-	struct udl_framebuffer *ufb = to_udl_fb(fb);
-
-	if (ufb->obj)
-		drm_gem_object_put_unlocked(&ufb->obj->base);
-
-	drm_framebuffer_cleanup(fb);
-	kfree(ufb);
-}
-
-static const struct drm_framebuffer_funcs udlfb_funcs = {
-	.destroy = udl_user_framebuffer_destroy,
-	.dirty = udl_user_framebuffer_dirty,
-};
-
-
-static int
-udl_framebuffer_init(struct drm_device *dev,
-		     struct udl_framebuffer *ufb,
-		     const struct drm_mode_fb_cmd2 *mode_cmd,
-		     struct udl_gem_object *obj)
-{
-	int ret;
-
-	ufb->obj = obj;
-	drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd);
-	ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
-	return ret;
-}
-
-
-static int udlfb_create(struct drm_fb_helper *helper,
-			struct drm_fb_helper_surface_size *sizes)
-{
-	struct udl_fbdev *ufbdev =
-		container_of(helper, struct udl_fbdev, helper);
-	struct drm_device *dev = ufbdev->helper.dev;
-	struct fb_info *info;
-	struct drm_framebuffer *fb;
-	struct drm_mode_fb_cmd2 mode_cmd;
-	struct udl_gem_object *obj;
-	uint32_t size;
-	int ret = 0;
-
-	if (sizes->surface_bpp == 24)
-		sizes->surface_bpp = 32;
-
-	mode_cmd.width = sizes->surface_width;
-	mode_cmd.height = sizes->surface_height;
-	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
-
-	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
-							  sizes->surface_depth);
-
-	size = mode_cmd.pitches[0] * mode_cmd.height;
-	size = ALIGN(size, PAGE_SIZE);
-
-	obj = udl_gem_alloc_object(dev, size);
-	if (!obj)
-		goto out;
-
-	ret = udl_gem_vmap(obj);
-	if (ret) {
-		DRM_ERROR("failed to vmap fb\n");
-		goto out_gfree;
-	}
-
-	info = drm_fb_helper_alloc_fbi(helper);
-	if (IS_ERR(info)) {
-		ret = PTR_ERR(info);
-		goto out_gfree;
-	}
-
-	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
-	if (ret)
-		goto out_gfree;
-
-	fb = &ufbdev->ufb.base;
-
-	ufbdev->helper.fb = fb;
-
-	info->screen_base = ufbdev->ufb.obj->vmapping;
-	info->fix.smem_len = size;
-	info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
-
-	info->fbops = &udlfb_ops;
-	drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
-
-	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
-		      fb->width, fb->height,
-		      ufbdev->ufb.obj->vmapping);
-
-	return ret;
-out_gfree:
-	drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
-out:
-	return ret;
-}
-
-static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
-	.fb_probe = udlfb_create,
-};
-
-static void udl_fbdev_destroy(struct drm_device *dev,
-			      struct udl_fbdev *ufbdev)
-{
-	drm_fb_helper_unregister_fbi(&ufbdev->helper);
-	drm_fb_helper_fini(&ufbdev->helper);
-	if (ufbdev->ufb.obj) {
-		drm_framebuffer_unregister_private(&ufbdev->ufb.base);
-		drm_framebuffer_cleanup(&ufbdev->ufb.base);
-		drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
-	}
-}
-
-int udl_fbdev_init(struct drm_device *dev)
-{
-	struct udl_device *udl = to_udl(dev);
-	int bpp_sel = fb_bpp;
-	struct udl_fbdev *ufbdev;
-	int ret;
-
-	ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
-	if (!ufbdev)
-		return -ENOMEM;
-
-	udl->fbdev = ufbdev;
-
-	drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
-
-	ret = drm_fb_helper_init(dev, &ufbdev->helper, 1);
-	if (ret)
-		goto free;
-
-	ret = drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
-	if (ret)
-		goto fini;
-
-	/* disable all the possible outputs/crtcs before entering KMS mode */
-	drm_helper_disable_unused_functions(dev);
-
-	ret = drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
-	if (ret)
-		goto fini;
-
-	return 0;
-
-fini:
-	drm_fb_helper_fini(&ufbdev->helper);
-free:
-	kfree(ufbdev);
-	return ret;
-}
-
-void udl_fbdev_cleanup(struct drm_device *dev)
-{
-	struct udl_device *udl = to_udl(dev);
-	if (!udl->fbdev)
-		return;
-
-	udl_fbdev_destroy(dev, udl->fbdev);
-	kfree(udl->fbdev);
-	udl->fbdev = NULL;
-}
-
-void udl_fbdev_unplug(struct drm_device *dev)
-{
-	struct udl_device *udl = to_udl(dev);
-	struct udl_fbdev *ufbdev;
-	if (!udl->fbdev)
-		return;
-
-	ufbdev = udl->fbdev;
-	drm_fb_helper_unlink_fbi(&ufbdev->helper);
-}
-
-struct drm_framebuffer *
-udl_fb_user_fb_create(struct drm_device *dev,
-		   struct drm_file *file,
-		   const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-	struct drm_gem_object *obj;
-	struct udl_framebuffer *ufb;
-	int ret;
-	uint32_t size;
-
-	obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
-	if (obj == NULL)
-		return ERR_PTR(-ENOENT);
-
-	size = mode_cmd->pitches[0] * mode_cmd->height;
-	size = ALIGN(size, PAGE_SIZE);
-
-	if (size > obj->size) {
-		DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
-	if (ufb == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
-	if (ret) {
-		kfree(ufb);
-		return ERR_PTR(-EINVAL);
-	}
-	return &ufb->base;
-}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index b23a5c2..b6e26f9 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -6,226 +6,101 @@
 #include <linux/dma-buf.h>
 #include <linux/vmalloc.h>
 
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_prime.h>
 
 #include "udl_drv.h"
 
-struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
-					    size_t size)
-{
-	struct udl_gem_object *obj;
+/*
+ * GEM object funcs
+ */
 
-	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-	if (obj == NULL)
-		return NULL;
-
-	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
-		kfree(obj);
-		return NULL;
-	}
-
-	obj->flags = UDL_BO_CACHEABLE;
-	return obj;
-}
-
-static int
-udl_gem_create(struct drm_file *file,
-	       struct drm_device *dev,
-	       uint64_t size,
-	       uint32_t *handle_p)
-{
-	struct udl_gem_object *obj;
-	int ret;
-	u32 handle;
-
-	size = roundup(size, PAGE_SIZE);
-
-	obj = udl_gem_alloc_object(dev, size);
-	if (obj == NULL)
-		return -ENOMEM;
-
-	ret = drm_gem_handle_create(file, &obj->base, &handle);
-	if (ret) {
-		drm_gem_object_release(&obj->base);
-		kfree(obj);
-		return ret;
-	}
-
-	drm_gem_object_put_unlocked(&obj->base);
-	*handle_p = handle;
-	return 0;
-}
-
-static void update_vm_cache_attr(struct udl_gem_object *obj,
-				 struct vm_area_struct *vma)
-{
-	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
-
-	/* non-cacheable as default. */
-	if (obj->flags & UDL_BO_CACHEABLE) {
-		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-	} else if (obj->flags & UDL_BO_WC) {
-		vma->vm_page_prot =
-			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-	} else {
-		vma->vm_page_prot =
-			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-	}
-}
-
-int udl_dumb_create(struct drm_file *file,
-		    struct drm_device *dev,
-		    struct drm_mode_create_dumb *args)
-{
-	args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
-	args->size = args->pitch * args->height;
-	return udl_gem_create(file, dev,
-			      args->size, &args->handle);
-}
-
-int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int udl_gem_object_mmap(struct drm_gem_object *obj,
+			       struct vm_area_struct *vma)
 {
 	int ret;
 
-	ret = drm_gem_mmap(filp, vma);
+	ret = drm_gem_shmem_mmap(obj, vma);
 	if (ret)
 		return ret;
 
-	vma->vm_flags &= ~VM_PFNMAP;
-	vma->vm_flags |= VM_MIXEDMAP;
-
-	update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
-
-	return ret;
-}
-
-vm_fault_t udl_gem_fault(struct vm_fault *vmf)
-{
-	struct vm_area_struct *vma = vmf->vma;
-	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
-	struct page *page;
-	unsigned int page_offset;
-
-	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
-	if (!obj->pages)
-		return VM_FAULT_SIGBUS;
-
-	page = obj->pages[page_offset];
-	return vmf_insert_page(vma, vmf->address, page);
-}
-
-int udl_gem_get_pages(struct udl_gem_object *obj)
-{
-	struct page **pages;
-
-	if (obj->pages)
-		return 0;
-
-	pages = drm_gem_get_pages(&obj->base);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
-
-	obj->pages = pages;
+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	if (obj->import_attach)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 
 	return 0;
 }
 
-void udl_gem_put_pages(struct udl_gem_object *obj)
+static void *udl_gem_object_vmap(struct drm_gem_object *obj)
 {
-	if (obj->base.import_attach) {
-		kvfree(obj->pages);
-		obj->pages = NULL;
-		return;
-	}
-
-	drm_gem_put_pages(&obj->base, obj->pages, false, false);
-	obj->pages = NULL;
-}
-
-int udl_gem_vmap(struct udl_gem_object *obj)
-{
-	int page_count = obj->base.size / PAGE_SIZE;
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 	int ret;
 
-	if (obj->base.import_attach) {
-		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
-		if (!obj->vmapping)
-			return -ENOMEM;
-		return 0;
-	}
-		
-	ret = udl_gem_get_pages(obj);
+	ret = mutex_lock_interruptible(&shmem->vmap_lock);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
-	obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
-	if (!obj->vmapping)
-		return -ENOMEM;
-	return 0;
-}
-
-void udl_gem_vunmap(struct udl_gem_object *obj)
-{
-	if (obj->base.import_attach) {
-		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
-		return;
-	}
-
-	vunmap(obj->vmapping);
-
-	udl_gem_put_pages(obj);
-}
-
-void udl_gem_free_object(struct drm_gem_object *gem_obj)
-{
-	struct udl_gem_object *obj = to_udl_bo(gem_obj);
-
-	if (obj->vmapping)
-		udl_gem_vunmap(obj);
-
-	if (gem_obj->import_attach) {
-		drm_prime_gem_destroy(gem_obj, obj->sg);
-		put_device(gem_obj->dev->dev);
-	}
-
-	if (obj->pages)
-		udl_gem_put_pages(obj);
-
-	drm_gem_free_mmap_offset(gem_obj);
-}
-
-/* the dumb interface doesn't work with the GEM straight MMAP
-   interface, it expects to do MMAP on the drm fd, like normal */
-int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
-		 uint32_t handle, uint64_t *offset)
-{
-	struct udl_gem_object *gobj;
-	struct drm_gem_object *obj;
-	struct udl_device *udl = to_udl(dev);
-	int ret = 0;
-
-	mutex_lock(&udl->gem_lock);
-	obj = drm_gem_object_lookup(file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
-	gobj = to_udl_bo(obj);
-
-	ret = udl_gem_get_pages(gobj);
-	if (ret)
-		goto out;
-	ret = drm_gem_create_mmap_offset(obj);
-	if (ret)
+	if (shmem->vmap_use_count++ > 0)
 		goto out;
 
-	*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
+	ret = drm_gem_shmem_get_pages(shmem);
+	if (ret)
+		goto err_zero_use;
+
+	if (obj->import_attach)
+		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+	else
+		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+				    VM_MAP, PAGE_KERNEL);
+
+	if (!shmem->vaddr) {
+		DRM_DEBUG_KMS("Failed to vmap pages\n");
+		ret = -ENOMEM;
+		goto err_put_pages;
+	}
 
 out:
-	drm_gem_object_put_unlocked(&gobj->base);
-unlock:
-	mutex_unlock(&udl->gem_lock);
-	return ret;
+	mutex_unlock(&shmem->vmap_lock);
+	return shmem->vaddr;
+
+err_put_pages:
+	drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+	shmem->vmap_use_count = 0;
+	mutex_unlock(&shmem->vmap_lock);
+	return ERR_PTR(ret);
+}
+
+static const struct drm_gem_object_funcs udl_gem_object_funcs = {
+	.free = drm_gem_shmem_free_object,
+	.print_info = drm_gem_shmem_print_info,
+	.pin = drm_gem_shmem_pin,
+	.unpin = drm_gem_shmem_unpin,
+	.get_sg_table = drm_gem_shmem_get_sg_table,
+	.vmap = udl_gem_object_vmap,
+	.vunmap = drm_gem_shmem_vunmap,
+	.mmap = udl_gem_object_mmap,
+};
+
+/*
+ * Helpers for struct drm_driver
+ */
+
+struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
+						    size_t size)
+{
+	struct drm_gem_shmem_object *shmem;
+	struct drm_gem_object *obj;
+
+	shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+	if (!shmem)
+		return NULL;
+
+	obj = &shmem->base;
+	obj->funcs = &udl_gem_object_funcs;
+
+	return obj;
 }
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 4e854e0..5387189 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -140,7 +140,6 @@ void udl_urb_completion(struct urb *urb)
 		    urb->status == -ESHUTDOWN)) {
 			DRM_ERROR("%s - nonzero write bulk status received: %d\n",
 				__func__, urb->status);
-			atomic_set(&udl->lost_pixels, 1);
 		}
 	}
 
@@ -271,7 +270,6 @@ struct urb *udl_get_urb(struct drm_device *dev)
 	/* Wait for an in-flight buffer to complete and get re-queued */
 	ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
 	if (ret) {
-		atomic_set(&udl->lost_pixels, 1);
 		DRM_INFO("wait for urb interrupted: %x available: %d\n",
 		       ret, udl->urbs.available);
 		goto error;
@@ -304,7 +302,6 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
 	ret = usb_submit_urb(urb, GFP_ATOMIC);
 	if (ret) {
 		udl_urb_completion(urb); /* because no one else will */
-		atomic_set(&udl->lost_pixels, 1);
 		DRM_ERROR("usb_submit_urb error %x\n", ret);
 	}
 	return ret;
@@ -338,10 +335,6 @@ int udl_init(struct udl_device *udl)
 	if (ret)
 		goto err;
 
-	ret = udl_fbdev_init(dev);
-	if (ret)
-		goto err;
-
 	drm_kms_helper_poll_init(dev);
 
 	return 0;
@@ -367,6 +360,4 @@ void udl_fini(struct drm_device *dev)
 
 	if (udl->urbs.count)
 		udl_free_urb_list(dev);
-
-	udl_fbdev_cleanup(dev);
 }
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index bc1ab60..22af179 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -9,12 +9,21 @@
 
  */
 
+#include <linux/dma-buf.h>
+
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_vblank.h>
 
 #include "udl_drv.h"
 
+#define UDL_COLOR_DEPTH_16BPP	0
+
 /*
  * All DisplayLink bulk operations start with 0xAF, followed by specific code
  * All operations are written to buffers which then later get sent to device
@@ -38,31 +47,9 @@ static char *udl_vidreg_unlock(char *buf)
 	return udl_set_register(buf, 0xFF, 0xFF);
 }
 
-/*
- * On/Off for driving the DisplayLink framebuffer to the display
- *  0x00 H and V sync on
- *  0x01 H and V sync off (screen blank but powered)
- *  0x07 DPMS powerdown (requires modeset to come back)
- */
-static char *udl_set_blank(char *buf, int dpms_mode)
+static char *udl_set_blank_mode(char *buf, u8 mode)
 {
-	u8 reg;
-	switch (dpms_mode) {
-	case DRM_MODE_DPMS_OFF:
-		reg = 0x07;
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-		reg = 0x05;
-		break;
-	case DRM_MODE_DPMS_SUSPEND:
-		reg = 0x01;
-		break;
-	case DRM_MODE_DPMS_ON:
-		reg = 0x00;
-		break;
-	}
-
-	return udl_set_register(buf, 0x1f, reg);
+	return udl_set_register(buf, UDL_REG_BLANK_MODE, mode);
 }
 
 static char *udl_set_color_depth(char *buf, u8 selection)
@@ -233,6 +220,11 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
 	char *buf;
 	int retval;
 
+	if (udl->mode_buf_len == 0) {
+		DRM_ERROR("No mode set\n");
+		return -EINVAL;
+	}
+
 	urb = udl_get_urb(dev);
 	if (!urb)
 		return -ENOMEM;
@@ -245,80 +237,152 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
 	return retval;
 }
 
-
-static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
+static long udl_log_cpp(unsigned int cpp)
 {
-	struct drm_device *dev = crtc->dev;
-	struct udl_device *udl = dev->dev_private;
-	int retval;
+	if (WARN_ON(!is_power_of_2(cpp)))
+		return -EINVAL;
+	return __ffs(cpp);
+}
 
-	if (mode == DRM_MODE_DPMS_OFF) {
-		char *buf;
-		struct urb *urb;
-		urb = udl_get_urb(dev);
-		if (!urb)
-			return;
+static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
+				   int width, int height)
+{
+	int x1, x2;
 
-		buf = (char *)urb->transfer_buffer;
-		buf = udl_vidreg_lock(buf);
-		buf = udl_set_blank(buf, mode);
-		buf = udl_vidreg_unlock(buf);
+	if (WARN_ON_ONCE(x < 0) ||
+	    WARN_ON_ONCE(y < 0) ||
+	    WARN_ON_ONCE(width < 0) ||
+	    WARN_ON_ONCE(height < 0))
+		return -EINVAL;
 
-		buf = udl_dummy_render(buf);
-		retval = udl_submit_urb(dev, urb, buf - (char *)
-					urb->transfer_buffer);
-	} else {
-		if (udl->mode_buf_len == 0) {
-			DRM_ERROR("Trying to enable DPMS with no mode\n");
-			return;
-		}
-		udl_crtc_write_mode_to_hw(crtc);
+	x1 = ALIGN_DOWN(x, sizeof(unsigned long));
+	x2 = ALIGN(width + (x - x1), sizeof(unsigned long)) + x1;
+
+	clip->x1 = x1;
+	clip->y1 = y;
+	clip->x2 = x2;
+	clip->y2 = y + height;
+
+	return 0;
+}
+
+int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
+		      int width, int height)
+{
+	struct drm_device *dev = fb->dev;
+	struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
+	int i, ret, tmp_ret;
+	char *cmd;
+	struct urb *urb;
+	struct drm_rect clip;
+	int log_bpp;
+	void *vaddr;
+
+	ret = udl_log_cpp(fb->format->cpp[0]);
+	if (ret < 0)
+		return ret;
+	log_bpp = ret;
+
+	ret = udl_aligned_damage_clip(&clip, x, y, width, height);
+	if (ret)
+		return ret;
+	else if ((clip.x2 > fb->width) || (clip.y2 > fb->height))
+		return -EINVAL;
+
+	if (import_attach) {
+		ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
+					       DMA_FROM_DEVICE);
+		if (ret)
+			return ret;
 	}
 
+	vaddr = drm_gem_shmem_vmap(fb->obj[0]);
+	if (IS_ERR(vaddr)) {
+		DRM_ERROR("failed to vmap fb\n");
+		goto out_dma_buf_end_cpu_access;
+	}
+
+	urb = udl_get_urb(dev);
+	if (!urb)
+		goto out_drm_gem_shmem_vunmap;
+	cmd = urb->transfer_buffer;
+
+	for (i = clip.y1; i < clip.y2; i++) {
+		const int line_offset = fb->pitches[0] * i;
+		const int byte_offset = line_offset + (clip.x1 << log_bpp);
+		const int dev_byte_offset = (fb->width * i + clip.x1) << log_bpp;
+		const int byte_width = (clip.x2 - clip.x1) << log_bpp;
+		ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
+				       &cmd, byte_offset, dev_byte_offset,
+				       byte_width);
+		if (ret)
+			goto out_drm_gem_shmem_vunmap;
+	}
+
+	if (cmd > (char *)urb->transfer_buffer) {
+		/* Send partial buffer remaining before exiting */
+		int len;
+		if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
+			*cmd++ = 0xAF;
+		len = cmd - (char *)urb->transfer_buffer;
+		ret = udl_submit_urb(dev, urb, len);
+	} else {
+		udl_urb_completion(urb);
+	}
+
+	ret = 0;
+
+out_drm_gem_shmem_vunmap:
+	drm_gem_shmem_vunmap(fb->obj[0], vaddr);
+out_dma_buf_end_cpu_access:
+	if (import_attach) {
+		tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+						 DMA_FROM_DEVICE);
+		if (tmp_ret && !ret)
+			ret = tmp_ret; /* only update ret if not set yet */
+	}
+
+	return ret;
 }
 
-#if 0
-static int
-udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-			   int x, int y, enum mode_set_atomic state)
+/*
+ * Simple display pipeline
+ */
+
+static const uint32_t udl_simple_display_pipe_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+};
+
+static enum drm_mode_status
+udl_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+				   const struct drm_display_mode *mode)
 {
-	return 0;
+	return MODE_OK;
 }
 
-static int
-udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
-		    struct drm_framebuffer *old_fb)
+static void
+udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
+			       struct drm_crtc_state *crtc_state,
+			       struct drm_plane_state *plane_state)
 {
-	return 0;
-}
-#endif
-
-static int udl_crtc_mode_set(struct drm_crtc *crtc,
-			       struct drm_display_mode *mode,
-			       struct drm_display_mode *adjusted_mode,
-			       int x, int y,
-			       struct drm_framebuffer *old_fb)
-
-{
+	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_device *dev = crtc->dev;
-	struct udl_framebuffer *ufb = to_udl_fb(crtc->primary->fb);
+	struct drm_framebuffer *fb = plane_state->fb;
 	struct udl_device *udl = dev->dev_private;
+	struct drm_display_mode *mode = &crtc_state->mode;
 	char *buf;
 	char *wrptr;
-	int color_depth = 0;
+	int color_depth = UDL_COLOR_DEPTH_16BPP;
 
-	udl->crtc = crtc;
+	crtc_state->no_vblank = true;
 
 	buf = (char *)udl->mode_buf;
 
-	/* for now we just clip 24 -> 16 - if we fix that fix this */
-	/*if  (crtc->fb->bits_per_pixel != 16)
-	  color_depth = 1; */
-
 	/* This first section has to do with setting the base address on the
-	* controller * associated with the display. There are 2 base
-	* pointers, currently, we only * use the 16 bpp segment.
-	*/
+	 * controller associated with the display. There are 2 base
+	 * pointers, currently, we only use the 16 bpp segment.
+	 */
 	wrptr = udl_vidreg_lock(buf);
 	wrptr = udl_set_color_depth(wrptr, color_depth);
 	/* set base for 16bpp segment to 0 */
@@ -326,108 +390,95 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
 	/* set base for 8bpp segment to end of fb */
 	wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
 
-	wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
-	wrptr = udl_set_blank(wrptr, DRM_MODE_DPMS_ON);
+	wrptr = udl_set_vid_cmds(wrptr, mode);
+	wrptr = udl_set_blank_mode(wrptr, UDL_BLANK_MODE_ON);
 	wrptr = udl_vidreg_unlock(wrptr);
 
 	wrptr = udl_dummy_render(wrptr);
 
-	if (old_fb) {
-		struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
-		uold_fb->active_16 = false;
-	}
-	ufb->active_16 = true;
 	udl->mode_buf_len = wrptr - buf;
 
-	/* damage all of it */
-	udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
-	return 0;
+	udl_handle_damage(fb, 0, 0, fb->width, fb->height);
+
+	if (!crtc_state->mode_changed)
+		return;
+
+	/* enable display */
+	udl_crtc_write_mode_to_hw(crtc);
 }
 
-
-static void udl_crtc_disable(struct drm_crtc *crtc)
+static void
+udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
 {
-	udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void udl_crtc_destroy(struct drm_crtc *crtc)
-{
-	drm_crtc_cleanup(crtc);
-	kfree(crtc);
-}
-
-static int udl_crtc_page_flip(struct drm_crtc *crtc,
-			      struct drm_framebuffer *fb,
-			      struct drm_pending_vblank_event *event,
-			      uint32_t page_flip_flags,
-			      struct drm_modeset_acquire_ctx *ctx)
-{
-	struct udl_framebuffer *ufb = to_udl_fb(fb);
+	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_device *dev = crtc->dev;
+	struct urb *urb;
+	char *buf;
 
-	struct drm_framebuffer *old_fb = crtc->primary->fb;
-	if (old_fb) {
-		struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
-		uold_fb->active_16 = false;
-	}
-	ufb->active_16 = true;
+	urb = udl_get_urb(dev);
+	if (!urb)
+		return;
 
-	udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
+	buf = (char *)urb->transfer_buffer;
+	buf = udl_vidreg_lock(buf);
+	buf = udl_set_blank_mode(buf, UDL_BLANK_MODE_POWERDOWN);
+	buf = udl_vidreg_unlock(buf);
+	buf = udl_dummy_render(buf);
 
-	spin_lock_irq(&dev->event_lock);
-	if (event)
-		drm_crtc_send_vblank_event(crtc, event);
-	spin_unlock_irq(&dev->event_lock);
-	crtc->primary->fb = fb;
+	udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+}
 
+static int
+udl_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
+			      struct drm_plane_state *plane_state,
+			      struct drm_crtc_state *crtc_state)
+{
 	return 0;
 }
 
-static void udl_crtc_prepare(struct drm_crtc *crtc)
+static void
+udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
+			       struct drm_plane_state *old_plane_state)
 {
+	struct drm_plane_state *state = pipe->plane.state;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_rect rect;
+
+	if (!fb)
+		return;
+
+	if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
+		udl_handle_damage(fb, rect.x1, rect.y1, rect.x2 - rect.x1,
+				  rect.y2 - rect.y1);
 }
 
-static void udl_crtc_commit(struct drm_crtc *crtc)
-{
-	udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_crtc_helper_funcs udl_helper_funcs = {
-	.dpms = udl_crtc_dpms,
-	.mode_set = udl_crtc_mode_set,
-	.prepare = udl_crtc_prepare,
-	.commit = udl_crtc_commit,
-	.disable = udl_crtc_disable,
+static const
+struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
+	.mode_valid = udl_simple_display_pipe_mode_valid,
+	.enable = udl_simple_display_pipe_enable,
+	.disable = udl_simple_display_pipe_disable,
+	.check = udl_simple_display_pipe_check,
+	.update = udl_simple_display_pipe_update,
+	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
-static const struct drm_crtc_funcs udl_crtc_funcs = {
-	.set_config = drm_crtc_helper_set_config,
-	.destroy = udl_crtc_destroy,
-	.page_flip = udl_crtc_page_flip,
-};
-
-static int udl_crtc_init(struct drm_device *dev)
-{
-	struct drm_crtc *crtc;
-
-	crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL);
-	if (crtc == NULL)
-		return -ENOMEM;
-
-	drm_crtc_init(dev, crtc, &udl_crtc_funcs);
-	drm_crtc_helper_add(crtc, &udl_helper_funcs);
-
-	return 0;
-}
+/*
+ * Modesetting
+ */
 
 static const struct drm_mode_config_funcs udl_mode_funcs = {
-	.fb_create = udl_fb_user_fb_create,
-	.output_poll_changed = NULL,
+	.fb_create = drm_gem_fb_create_with_dirty,
+	.atomic_check  = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
 };
 
 int udl_modeset_init(struct drm_device *dev)
 {
-	struct drm_encoder *encoder;
+	size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
+	struct udl_device *udl = dev->dev_private;
+	struct drm_connector *connector;
+	int ret;
+
 	drm_mode_config_init(dev);
 
 	dev->mode_config.min_width = 640;
@@ -437,29 +488,32 @@ int udl_modeset_init(struct drm_device *dev)
 	dev->mode_config.max_height = 2048;
 
 	dev->mode_config.prefer_shadow = 0;
-	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.preferred_depth = 16;
 
 	dev->mode_config.funcs = &udl_mode_funcs;
 
-	udl_crtc_init(dev);
+	connector = udl_connector_init(dev);
+	if (IS_ERR(connector)) {
+		ret = PTR_ERR(connector);
+		goto err_drm_mode_config_cleanup;
+	}
 
-	encoder = udl_encoder_init(dev);
+	format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
 
-	udl_connector_init(dev, encoder);
+	ret = drm_simple_display_pipe_init(dev, &udl->display_pipe,
+					   &udl_simple_display_pipe_funcs,
+					   udl_simple_display_pipe_formats,
+					   format_count, NULL, connector);
+	if (ret)
+		goto err_drm_mode_config_cleanup;
+
+	drm_mode_config_reset(dev);
 
 	return 0;
-}
 
-void udl_modeset_restore(struct drm_device *dev)
-{
-	struct udl_device *udl = dev->dev_private;
-	struct udl_framebuffer *ufb;
-
-	if (!udl->crtc || !udl->crtc->primary->fb)
-		return;
-	udl_crtc_commit(udl->crtc);
-	ufb = to_udl_fb(udl->crtc->primary->fb);
-	udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+err_drm_mode_config_cleanup:
+	drm_mode_config_cleanup(dev);
+	return ret;
 }
 
 void udl_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 1973a4c..9719276 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -212,8 +212,7 @@ static void udl_compress_hline16(
 int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
 		     const char *front, char **urb_buf_ptr,
 		     u32 byte_offset, u32 device_byte_offset,
-		     u32 byte_width,
-		     int *ident_ptr, int *sent_ptr)
+		     u32 byte_width)
 {
 	const u8 *line_start, *line_end, *next_pixel;
 	u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
@@ -235,12 +234,12 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
 
 		if (cmd >= cmd_end) {
 			int len = cmd - (u8 *) urb->transfer_buffer;
-			if (udl_submit_urb(dev, urb, len))
-				return 1; /* lost pixels is set */
-			*sent_ptr += len;
+			int ret = udl_submit_urb(dev, urb, len);
+			if (ret)
+				return ret;
 			urb = udl_get_urb(dev);
 			if (!urb)
-				return 1; /* lost_pixels is set */
+				return -EAGAIN;
 			*urb_ptr = urb;
 			cmd = urb->transfer_buffer;
 			cmd_end = &cmd[urb->transfer_buffer_length];
@@ -251,4 +250,3 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
 
 	return 0;
 }
-
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1a07462..eaa8e96 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -140,7 +140,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct v3d_dev *v3d = to_v3d_dev(dev);
 	struct v3d_file_priv *v3d_priv;
-	struct drm_sched_rq *rq;
+	struct drm_gpu_scheduler *sched;
 	int i;
 
 	v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -150,8 +150,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
 	v3d_priv->v3d = v3d;
 
 	for (i = 0; i < V3D_MAX_QUEUES; i++) {
-		rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
+		sched = &v3d->queue[i].sched;
+		drm_sched_entity_init(&v3d_priv->sched_entity[i],
+				      DRM_SCHED_PRIORITY_NORMAL, &sched,
+				      1, NULL);
 	}
 
 	file->driver_priv = v3d_priv;
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 7c2317e..118e8a4 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -22,9 +22,9 @@
 	  our display setup.
 
 config DRM_VC4_HDMI_CEC
-       bool "Broadcom VC4 HDMI CEC Support"
-       depends on DRM_VC4
-       select CEC_CORE
-       help
+	bool "Broadcom VC4 HDMI CEC Support"
+	depends on DRM_VC4
+	select CEC_CORE
+	help
 	  Choose this option if you have a Broadcom VC4 GPU
 	  and want to use CEC.
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index c9ba83e..fd8a2eb 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -499,6 +499,7 @@ struct vc4_dsi {
 	struct mipi_dsi_host dsi_host;
 	struct drm_encoder *encoder;
 	struct drm_bridge *bridge;
+	struct list_head bridge_chain;
 
 	void __iomem *regs;
 
@@ -752,10 +753,19 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
 	struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
 	struct vc4_dsi *dsi = vc4_encoder->dsi;
 	struct device *dev = &dsi->pdev->dev;
+	struct drm_bridge *iter;
 
-	drm_bridge_disable(dsi->bridge);
+	list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+		if (iter->funcs->disable)
+			iter->funcs->disable(iter);
+	}
+
 	vc4_dsi_ulps(dsi, true);
-	drm_bridge_post_disable(dsi->bridge);
+
+	list_for_each_entry_from(iter, &dsi->bridge_chain, chain_node) {
+		if (iter->funcs->post_disable)
+			iter->funcs->post_disable(iter);
+	}
 
 	clk_disable_unprepare(dsi->pll_phy_clock);
 	clk_disable_unprepare(dsi->escape_clock);
@@ -823,6 +833,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
 	struct vc4_dsi *dsi = vc4_encoder->dsi;
 	struct device *dev = &dsi->pdev->dev;
 	bool debug_dump_regs = false;
+	struct drm_bridge *iter;
 	unsigned long hs_clock;
 	u32 ui_ns;
 	/* Minimum LP state duration in escape clock cycles. */
@@ -1055,7 +1066,10 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
 
 	vc4_dsi_ulps(dsi, false);
 
-	drm_bridge_pre_enable(dsi->bridge);
+	list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+		if (iter->funcs->pre_enable)
+			iter->funcs->pre_enable(iter);
+	}
 
 	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
 		DSI_PORT_WRITE(DISP0_CTRL,
@@ -1072,7 +1086,10 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
 			       DSI_DISP0_ENABLE);
 	}
 
-	drm_bridge_enable(dsi->bridge);
+	list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+		if (iter->funcs->enable)
+			iter->funcs->enable(iter);
+	}
 
 	if (debug_dump_regs) {
 		struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
@@ -1460,6 +1477,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 				       GFP_KERNEL);
 	if (!vc4_dsi_encoder)
 		return -ENOMEM;
+
+	INIT_LIST_HEAD(&dsi->bridge_chain);
 	vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1;
 	vc4_dsi_encoder->dsi = dsi;
 	dsi->encoder = &vc4_dsi_encoder->base.base;
@@ -1610,7 +1629,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 	 * from our driver, since we need to sequence them within the
 	 * encoder's enable/disable paths.
 	 */
-	dsi->encoder->bridge = NULL;
+	list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
 
 	if (dsi->port == 0)
 		vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
@@ -1632,6 +1651,11 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
 	if (dsi->bridge)
 		pm_runtime_disable(dev);
 
+	/*
+	 * Restore the bridge_chain so the bridge detach procedure can happen
+	 * normally.
+	 */
+	list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
 	vc4_dsi_encoder_destroy(dsi->encoder);
 
 	if (dsi->port == 1)
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 7a06cb6..e1cfc3c 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -568,7 +568,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
 	for (i = 0; i < exec->bo_count; i++) {
 		struct drm_gem_object *bo = &exec->bo[i]->base;
 
-		ww_mutex_unlock(&bo->resv->lock);
+		dma_resv_unlock(bo->resv);
 	}
 
 	ww_acquire_fini(acquire_ctx);
@@ -595,8 +595,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
 retry:
 	if (contended_lock != -1) {
 		bo = &exec->bo[contended_lock]->base;
-		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
-						       acquire_ctx);
+		ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
 		if (ret) {
 			ww_acquire_done(acquire_ctx);
 			return ret;
@@ -609,19 +608,19 @@ vc4_lock_bo_reservations(struct drm_device *dev,
 
 		bo = &exec->bo[i]->base;
 
-		ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
+		ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
 		if (ret) {
 			int j;
 
 			for (j = 0; j < i; j++) {
 				bo = &exec->bo[j]->base;
-				ww_mutex_unlock(&bo->resv->lock);
+				dma_resv_unlock(bo->resv);
 			}
 
 			if (contended_lock != -1 && contended_lock >= i) {
 				bo = &exec->bo[contended_lock]->base;
 
-				ww_mutex_unlock(&bo->resv->lock);
+				dma_resv_unlock(bo->resv);
 			}
 
 			if (ret == -EDEADLK) {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 1c62c6c..cea18dc 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -267,7 +267,8 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
 };
 
 static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
-						     struct drm_encoder *encoder)
+						     struct drm_encoder *encoder,
+						     struct i2c_adapter *ddc)
 {
 	struct drm_connector *connector;
 	struct vc4_hdmi_connector *hdmi_connector;
@@ -281,8 +282,10 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
 
 	hdmi_connector->encoder = encoder;
 
-	drm_connector_init(dev, connector, &vc4_hdmi_connector_funcs,
-			   DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_init_with_ddc(dev, connector,
+				    &vc4_hdmi_connector_funcs,
+				    DRM_MODE_CONNECTOR_HDMIA,
+				    ddc);
 	drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
 
 	/* Create and attach TV margin props to this connector. */
@@ -1395,7 +1398,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 			 DRM_MODE_ENCODER_TMDS, NULL);
 	drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
 
-	hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder);
+	hdmi->connector =
+		vc4_hdmi_connector_init(drm, hdmi->encoder, hdmi->ddc);
 	if (IS_ERR(hdmi->connector)) {
 		ret = PTR_ERR(hdmi->connector);
 		goto err_destroy_encoder;
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 3db000a..551fa31 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -35,11 +35,11 @@
  */
 
 #include <linux/pagemap.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/via_drm.h>
 
 #include "via_dmablit.h"
@@ -188,8 +188,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
 		kfree(vsg->desc_pages);
 		/* fall through */
 	case dr_via_pages_locked:
-		put_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
-					  (vsg->direction == DMA_FROM_DEVICE));
+		unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+					   (vsg->direction == DMA_FROM_DEVICE));
 		/* fall through */
 	case dr_via_pages_alloc:
 		vfree(vsg->pages);
@@ -239,7 +239,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
 	vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
 	if (NULL == vsg->pages)
 		return -ENOMEM;
-	ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
+	ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
 			vsg->num_pages,
 			vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
 			vsg->pages);
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 666a16d..5da3808 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -23,10 +23,10 @@
  */
 
 #include <linux/module.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_pciids.h>
 #include <drm/via_drm.h>
 
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 431c150..255c506 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -22,8 +22,9 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/pci.h>
+
 #include <drm/drm_device.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_vblank.h>
 #include <drm/via_drm.h>
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index e622485..0966208 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -43,6 +43,9 @@
 #define XRES_MAX  8192
 #define YRES_MAX  8192
 
+#define drm_connector_to_virtio_gpu_output(x) \
+	container_of(x, struct virtio_gpu_output, conn)
+
 static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
 	.set_config             = drm_atomic_helper_set_config,
 	.destroy                = drm_crtc_cleanup,
@@ -59,7 +62,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
 	.dirty = drm_atomic_helper_dirtyfb,
 };
 
-int
+static int
 virtio_gpu_framebuffer_init(struct drm_device *dev,
 			    struct virtio_gpu_framebuffer *vgfb,
 			    const struct drm_mode_fb_cmd2 *mode_cmd,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 8dee698..8cf27af 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -137,7 +137,7 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
 
 	drm_dev_unregister(dev);
 	virtio_gpu_deinit(dev);
-	drm_put_dev(dev);
+	drm_dev_put(dev);
 }
 
 static void virtio_gpu_config_changed(struct virtio_device *vdev)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0b56ba0..7e69c06 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -38,6 +38,7 @@
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/virtgpu_drm.h>
 
 #define DRIVER_NAME "virtio_gpu"
 #define DRIVER_DESC "virtio GPU"
@@ -102,8 +103,6 @@ struct virtio_gpu_fence {
 	struct virtio_gpu_fence_driver *drv;
 	struct list_head node;
 };
-#define to_virtio_fence(x) \
-	container_of(x, struct virtio_gpu_fence, f)
 
 struct virtio_gpu_vbuffer {
 	char *buf;
@@ -134,10 +133,6 @@ struct virtio_gpu_output {
 };
 #define drm_crtc_to_virtio_gpu_output(x) \
 	container_of(x, struct virtio_gpu_output, crtc)
-#define drm_connector_to_virtio_gpu_output(x) \
-	container_of(x, struct virtio_gpu_output, conn)
-#define drm_encoder_to_virtio_gpu_output(x) \
-	container_of(x, struct virtio_gpu_output, enc)
 
 struct virtio_gpu_framebuffer {
 	struct drm_framebuffer base;
@@ -182,6 +177,9 @@ struct virtio_gpu_device {
 	struct kmem_cache *vbufs;
 	bool vqs_ready;
 
+	bool disable_notify;
+	bool pending_notify;
+
 	struct ida	resource_ida;
 
 	wait_queue_head_t resp_wq;
@@ -312,13 +310,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 					  uint32_t ctx_id,
 					  uint64_t offset, uint32_t level,
-					  struct virtio_gpu_box *box,
+					  struct drm_virtgpu_3d_box *box,
 					  struct virtio_gpu_object_array *objs,
 					  struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 					uint32_t ctx_id,
 					uint64_t offset, uint32_t level,
-					struct virtio_gpu_box *box,
+					struct drm_virtgpu_3d_box *box,
 					struct virtio_gpu_object_array *objs,
 					struct virtio_gpu_fence *fence);
 void
@@ -334,11 +332,10 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
 void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
 void virtio_gpu_dequeue_fence_func(struct work_struct *work);
 
+void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
+
 /* virtio_gpu_display.c */
-int virtio_gpu_framebuffer_init(struct drm_device *dev,
-				struct virtio_gpu_framebuffer *vgfb,
-				const struct drm_mode_fb_cmd2 *mode_cmd,
-				struct drm_gem_object *obj);
 void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
 
@@ -349,7 +346,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
 					int index);
 
 /* virtio_gpu_fence.c */
-bool virtio_fence_signaled(struct dma_fence *f);
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(
 	struct virtio_gpu_device *vgdev);
 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -365,18 +361,12 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 			     struct virtio_gpu_object_params *params,
 			     struct virtio_gpu_object **bo_ptr,
 			     struct virtio_gpu_fence *fence);
-
 /* virtgpu_prime.c */
 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
 	struct drm_device *dev, struct dma_buf_attachment *attach,
 	struct sg_table *sgt);
 
-static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
-{
-	return drm_vma_node_offset_addr(&bo->base.base.vma_node);
-}
-
-/* virgl debufs */
+/* virgl debugfs */
 int virtio_gpu_debugfs_init(struct drm_minor *minor);
 
 #endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index a4b9881..5b2a414 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -27,6 +27,9 @@
 
 #include "virtgpu_drv.h"
 
+#define to_virtio_fence(x) \
+	container_of(x, struct virtio_gpu_fence, f)
+
 static const char *virtio_get_driver_name(struct dma_fence *f)
 {
 	return "virtio_gpu";
@@ -37,7 +40,7 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
 	return "controlq";
 }
 
-bool virtio_fence_signaled(struct dma_fence *f)
+static bool virtio_fence_signaled(struct dma_fence *f)
 {
 	struct virtio_gpu_fence *fence = to_virtio_fence(f);
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 4c1f579..0a2b622 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -96,14 +96,12 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
 			      uint32_t handle, uint64_t *offset_p)
 {
 	struct drm_gem_object *gobj;
-	struct virtio_gpu_object *obj;
 
 	BUG_ON(!offset_p);
 	gobj = drm_gem_object_lookup(file_priv, handle);
 	if (gobj == NULL)
 		return -ENOENT;
-	obj = gem_to_virtio_gpu_obj(gobj);
-	*offset_p = virtio_gpu_object_mmap_offset(obj);
+	*offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
 	drm_gem_object_put_unlocked(gobj);
 	return 0;
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9af1ec6..205ec4a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -33,17 +33,6 @@
 
 #include "virtgpu_drv.h"
 
-static void convert_to_hw_box(struct virtio_gpu_box *dst,
-			      const struct drm_virtgpu_3d_box *src)
-{
-	dst->x = cpu_to_le32(src->x);
-	dst->y = cpu_to_le32(src->y);
-	dst->z = cpu_to_le32(src->z);
-	dst->w = cpu_to_le32(src->w);
-	dst->h = cpu_to_le32(src->h);
-	dst->d = cpu_to_le32(src->d);
-}
-
 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv)
 {
@@ -304,7 +293,6 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 	struct virtio_gpu_fence *fence;
 	int ret;
 	u32 offset = args->offset;
-	struct virtio_gpu_box box;
 
 	if (vgdev->has_virgl_3d == false)
 		return -ENOSYS;
@@ -317,8 +305,6 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 	if (ret != 0)
 		goto err_put_free;
 
-	convert_to_hw_box(&box, &args->box);
-
 	fence = virtio_gpu_fence_alloc(vgdev);
 	if (!fence) {
 		ret = -ENOMEM;
@@ -326,7 +312,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 	}
 	virtio_gpu_cmd_transfer_from_host_3d
 		(vgdev, vfpriv->ctx_id, offset, args->level,
-		 &box, objs, fence);
+		 &args->box, objs, fence);
 	dma_fence_put(&fence->f);
 	return 0;
 
@@ -345,7 +331,6 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 	struct drm_virtgpu_3d_transfer_to_host *args = data;
 	struct virtio_gpu_object_array *objs;
 	struct virtio_gpu_fence *fence;
-	struct virtio_gpu_box box;
 	int ret;
 	u32 offset = args->offset;
 
@@ -353,11 +338,10 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 	if (objs == NULL)
 		return -ENOENT;
 
-	convert_to_hw_box(&box, &args->box);
 	if (!vgdev->has_virgl_3d) {
 		virtio_gpu_cmd_transfer_to_host_2d
 			(vgdev, offset,
-			 box.w, box.h, box.x, box.y,
+			 args->box.w, args->box.h, args->box.x, args->box.y,
 			 objs, NULL);
 	} else {
 		ret = virtio_gpu_array_lock_resv(objs);
@@ -372,7 +356,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 		virtio_gpu_cmd_transfer_to_host_3d
 			(vgdev,
 			 vfpriv ? vfpriv->ctx_id : 0, offset,
-			 args->level, &box, objs, fence);
+			 args->level, &args->box, objs, fence);
 		dma_fence_put(&fence->f);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 1635a9f..d1c3f5f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -24,6 +24,7 @@
  */
 
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_plane_helper.h>
 
@@ -88,7 +89,7 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
 	struct drm_crtc_state *crtc_state;
 	int ret;
 
-	if (!state->fb || !state->crtc)
+	if (!state->fb || WARN_ON(!state->crtc))
 		return 0;
 
 	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
@@ -102,15 +103,37 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
 	return ret;
 }
 
+static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
+				      struct drm_plane_state *state,
+				      struct drm_rect *rect)
+{
+	struct virtio_gpu_object *bo =
+		gem_to_virtio_gpu_obj(state->fb->obj[0]);
+	struct virtio_gpu_object_array *objs;
+	uint32_t w = rect->x2 - rect->x1;
+	uint32_t h = rect->y2 - rect->y1;
+	uint32_t x = rect->x1;
+	uint32_t y = rect->y1;
+	uint32_t off = x * state->fb->format->cpp[0] +
+		y * state->fb->pitches[0];
+
+	objs = virtio_gpu_array_alloc(1);
+	if (!objs)
+		return;
+	virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+	virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+					   objs, NULL);
+}
+
 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 					    struct drm_plane_state *old_state)
 {
 	struct drm_device *dev = plane->dev;
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_output *output = NULL;
-	struct virtio_gpu_framebuffer *vgfb;
 	struct virtio_gpu_object *bo;
-	uint32_t handle;
+	struct drm_rect rect;
 
 	if (plane->state->crtc)
 		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
@@ -119,47 +142,52 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 	if (WARN_ON(!output))
 		return;
 
-	if (plane->state->fb && output->enabled) {
-		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
-		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
-		handle = bo->hw_res_handle;
-		if (bo->dumb) {
-			struct virtio_gpu_object_array *objs;
-
-			objs = virtio_gpu_array_alloc(1);
-			if (!objs)
-				return;
-			virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
-			virtio_gpu_cmd_transfer_to_host_2d
-				(vgdev, 0,
-				 plane->state->src_w >> 16,
-				 plane->state->src_h >> 16,
-				 plane->state->src_x >> 16,
-				 plane->state->src_y >> 16,
-				 objs, NULL);
-		}
-	} else {
-		handle = 0;
+	if (!plane->state->fb || !output->enabled) {
+		DRM_DEBUG("nofb\n");
+		virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
+					   plane->state->src_w >> 16,
+					   plane->state->src_h >> 16,
+					   0, 0);
+		return;
 	}
 
-	DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
-		  plane->state->crtc_w, plane->state->crtc_h,
-		  plane->state->crtc_x, plane->state->crtc_y,
-		  plane->state->src_w >> 16,
-		  plane->state->src_h >> 16,
-		  plane->state->src_x >> 16,
-		  plane->state->src_y >> 16);
-	virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
-				   plane->state->src_w >> 16,
-				   plane->state->src_h >> 16,
-				   plane->state->src_x >> 16,
-				   plane->state->src_y >> 16);
-	if (handle)
-		virtio_gpu_cmd_resource_flush(vgdev, handle,
-					      plane->state->src_x >> 16,
-					      plane->state->src_y >> 16,
-					      plane->state->src_w >> 16,
-					      plane->state->src_h >> 16);
+	if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
+		return;
+
+	virtio_gpu_disable_notify(vgdev);
+
+	bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+	if (bo->dumb)
+		virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
+
+	if (plane->state->fb != old_state->fb ||
+	    plane->state->src_w != old_state->src_w ||
+	    plane->state->src_h != old_state->src_h ||
+	    plane->state->src_x != old_state->src_x ||
+	    plane->state->src_y != old_state->src_y) {
+		DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
+			  bo->hw_res_handle,
+			  plane->state->crtc_w, plane->state->crtc_h,
+			  plane->state->crtc_x, plane->state->crtc_y,
+			  plane->state->src_w >> 16,
+			  plane->state->src_h >> 16,
+			  plane->state->src_x >> 16,
+			  plane->state->src_y >> 16);
+		virtio_gpu_cmd_set_scanout(vgdev, output->index,
+					   bo->hw_res_handle,
+					   plane->state->src_w >> 16,
+					   plane->state->src_h >> 16,
+					   plane->state->src_x >> 16,
+					   plane->state->src_y >> 16);
+	}
+
+	virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
+				      rect.x1,
+				      rect.y1,
+				      rect.x2 - rect.x1,
+				      rect.y2 - rect.y1);
+
+	virtio_gpu_enable_notify(vgdev);
 }
 
 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 74ad3bc..5914e79 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -40,6 +40,17 @@
 			       + MAX_INLINE_CMD_SIZE		 \
 			       + MAX_INLINE_RESP_SIZE)
 
+static void convert_to_hw_box(struct virtio_gpu_box *dst,
+			      const struct drm_virtgpu_3d_box *src)
+{
+	dst->x = cpu_to_le32(src->x);
+	dst->y = cpu_to_le32(src->y);
+	dst->z = cpu_to_le32(src->z);
+	dst->w = cpu_to_le32(src->w);
+	dst->h = cpu_to_le32(src->h);
+	dst->d = cpu_to_le32(src->d);
+}
+
 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
 {
 	struct drm_device *dev = vq->vdev->priv;
@@ -393,8 +404,12 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 	}
 	notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
 	spin_unlock(&vgdev->ctrlq.qlock);
-	if (notify)
-		virtqueue_notify(vgdev->ctrlq.vq);
+	if (notify) {
+		if (vgdev->disable_notify)
+			vgdev->pending_notify = true;
+		else
+			virtqueue_notify(vgdev->ctrlq.vq);
+	}
 
 	if (sgt) {
 		sg_free_table(sgt);
@@ -402,6 +417,21 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 	}
 }
 
+void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
+{
+	vgdev->disable_notify = true;
+}
+
+void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
+{
+	vgdev->disable_notify = false;
+
+	if (!vgdev->pending_notify)
+		return;
+	vgdev->pending_notify = false;
+	virtqueue_notify(vgdev->ctrlq.vq);
+}
+
 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 					 struct virtio_gpu_vbuffer *vbuf)
 {
@@ -965,7 +995,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 					uint32_t ctx_id,
 					uint64_t offset, uint32_t level,
-					struct virtio_gpu_box *box,
+					struct drm_virtgpu_3d_box *box,
 					struct virtio_gpu_object_array *objs,
 					struct virtio_gpu_fence *fence)
 {
@@ -987,7 +1017,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
-	cmd_p->box = *box;
+	convert_to_hw_box(&cmd_p->box, box);
 	cmd_p->offset = cpu_to_le64(offset);
 	cmd_p->level = cpu_to_le32(level);
 
@@ -997,7 +1027,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 					  uint32_t ctx_id,
 					  uint64_t offset, uint32_t level,
-					  struct virtio_gpu_box *box,
+					  struct drm_virtgpu_3d_box *box,
 					  struct virtio_gpu_object_array *objs,
 					  struct virtio_gpu_fence *fence)
 {
@@ -1013,7 +1043,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
-	cmd_p->box = *box;
+	convert_to_hw_box(&cmd_p->box, box);
 	cmd_p->offset = cpu_to_le64(offset);
 	cmd_p->level = cpu_to_le32(level);
 
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index d558569..4af2f19 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -43,18 +43,18 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
 }
 
 /**
- * blend - belnd value at vaddr_src with value at vaddr_dst
+ * blend - blend value at vaddr_src with value at vaddr_dst
  * @vaddr_dst: destination address
  * @vaddr_src: source address
  * @dest_composer: destination framebuffer's metadata
  * @src_composer: source framebuffer's metadata
  *
  * Blend value at vaddr_src with value at vaddr_dst.
- * Currently, this function write value at vaddr_src on value
+ * Currently, this function write value of vaddr_src on value
  * at vaddr_dst using buffer's metadata to locate the new values
- * from vaddr_src and their distenation at vaddr_dst.
+ * from vaddr_src and their destination at vaddr_dst.
  *
- * Todo: Use the alpha value to blend vaddr_src with vaddr_dst
+ * TODO: Use the alpha value to blend vaddr_src with vaddr_dst
  *	 instead of overwriting it.
  */
 static void blend(void *vaddr_dst, void *vaddr_src,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index d1fe144..25bd751 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -3,10 +3,10 @@
 /**
  * DOC: vkms (Virtual Kernel Modesetting)
  *
- * vkms is a software-only model of a kms driver that is useful for testing,
- * or for running X (or similar) on headless machines and be able to still
- * use the GPU. vkms aims to enable a virtual display without the need for
- * a hardware display capability.
+ * VKMS is a software-only model of a KMS driver that is useful for testing
+ * and for running X (or similar) on headless machines. VKMS aims to enable
+ * a virtual display with no need of a hardware display capability, releasing
+ * the GPU in DRM API tests.
  */
 
 #include <linux/module.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 4ac55fc..44d858c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -209,8 +209,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
 
 	cres->hash.key = user_key | (res_type << 24);
 	ret = drm_ht_insert_item(&man->resources, &cres->hash);
-	if (unlikely(ret != 0))
+	if (unlikely(ret != 0)) {
+		kfree(cres);
 		goto out_invalid_key;
+	}
 
 	cres->state = VMW_CMDBUF_RES_ADD;
 	cres->res = vmw_resource_reference(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e962048..827458f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -28,10 +28,10 @@
 #include <linux/console.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/pci.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
 #include <drm/drm_sysfs.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_module.h>
@@ -150,6 +150,9 @@
 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
 		union drm_vmw_gb_surface_reference_ext_arg)
+#define DRM_IOCTL_VMW_MSG						\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG,			\
+		struct drm_vmw_msg_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -165,9 +168,9 @@
 
 static const struct drm_ioctl_desc vmw_ioctls[] = {
 	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
@@ -182,16 +185,16 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
 		      DRM_MASTER),
 
 	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
-	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 		      DRM_RENDER_ALLOW),
@@ -201,9 +204,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
 	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 
 	/* these allow direct access to the framebuffers mark as master only */
 	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
@@ -221,28 +224,31 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
 		      vmw_shader_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
 		      vmw_shader_destroy_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
 		      vmw_gb_surface_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
 		      vmw_gb_surface_reference_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_SYNCCPU,
 		      vmw_user_bo_synccpu_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
 		      vmw_extended_context_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
 		      vmw_gb_surface_define_ext_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
 		      vmw_gb_surface_reference_ext_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_MSG,
+		      vmw_msg_ioctl,
+		      DRM_RENDER_ALLOW),
 };
 
 static const struct pci_device_id vmw_pci_id_list[] = {
@@ -1211,8 +1217,10 @@ static void vmw_remove(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 
+	drm_dev_unregister(dev);
+	vmw_driver_unload(dev);
+	drm_dev_put(dev);
 	pci_disable_device(pdev);
-	drm_put_dev(dev);
 }
 
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
@@ -1391,8 +1399,6 @@ static const struct file_operations vmwgfx_driver_fops = {
 static struct drm_driver driver = {
 	.driver_features =
 	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
-	.load = vmw_driver_load,
-	.unload = vmw_driver_unload,
 	.get_vblank_counter = vmw_get_vblank_counter,
 	.enable_vblank = vmw_enable_vblank,
 	.disable_vblank = vmw_disable_vblank,
@@ -1431,7 +1437,39 @@ static struct pci_driver vmw_pci_driver = {
 
 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	return drm_get_pci_dev(pdev, ent, &driver);
+	struct drm_device *dev;
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	dev = drm_dev_alloc(&driver, &pdev->dev);
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		goto err_pci_disable_device;
+	}
+
+	dev->pdev = pdev;
+	pci_set_drvdata(pdev, dev);
+
+	ret = vmw_driver_load(dev, ent->driver_data);
+	if (ret)
+		goto err_drm_dev_put;
+
+	ret = drm_dev_register(dev, ent->driver_data);
+	if (ret)
+		goto err_vmw_driver_unload;
+
+	return 0;
+
+err_vmw_driver_unload:
+	vmw_driver_unload(dev);
+err_drm_dev_put:
+	drm_dev_put(dev);
+err_pci_disable_device:
+	pci_disable_device(pdev);
+	return ret;
 }
 
 static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a31e726..86b6939 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -56,9 +56,9 @@
 
 
 #define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20190328"
+#define VMWGFX_DRIVER_DATE "20200114"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 16
+#define VMWGFX_DRIVER_MINOR 17
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_RELOCATIONS 2048
@@ -1403,6 +1403,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
 int vmw_host_get_guestinfo(const char *guest_info_param,
 			   char *buffer, size_t *length);
 int vmw_host_log(const char *log);
+int vmw_msg_ioctl(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv);
 
 /* VMW logging */
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 934ad7c..73489a4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2377,9 +2377,12 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
 {
 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
 		container_of(header, typeof(*cmd), header);
+	struct vmw_resource *ret;
 
-	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
-					   cmd->body.renderTargetViewId));
+	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
+				  cmd->body.renderTargetViewId);
+
+	return PTR_ERR_OR_ZERO(ret);
 }
 
 /**
@@ -2396,9 +2399,12 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
 {
 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
 		container_of(header, typeof(*cmd), header);
+	struct vmw_resource *ret;
 
-	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
-					   cmd->body.depthStencilViewId));
+	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
+				  cmd->body.depthStencilViewId);
+
+	return PTR_ERR_OR_ZERO(ret);
 }
 
 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
@@ -2741,9 +2747,12 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
 {
 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
 		container_of(header, typeof(*cmd), header);
+	struct vmw_resource *ret;
 
-	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
-					   cmd->body.shaderResourceViewId));
+	ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
+				  cmd->body.shaderResourceViewId);
+
+	return PTR_ERR_OR_ZERO(ret);
 }
 
 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ea29953..c59806d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -624,7 +624,7 @@ static int vmw_fb_set_par(struct fb_info *info)
 }
 
 
-static struct fb_ops vmw_fb_ops = {
+static const struct fb_ops vmw_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = vmw_fb_check_var,
 	.fb_set_par = vmw_fb_set_par,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index b6c5e4c..e9f448a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,6 +28,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/mem_encrypt.h>
 
 #include <asm/hypervisor.h>
 
@@ -56,6 +57,8 @@
 
 #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
 
+#define MAX_USER_MSG_LENGTH	PAGE_SIZE
+
 static u32 vmw_msg_enabled = 1;
 
 enum rpc_msg_type {
@@ -148,7 +151,8 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
 	unsigned long si, di, eax, ebx, ecx, edx;
 	unsigned long msg_len = strlen(msg);
 
-	if (hb) {
+	/* HB port can't access encrypted memory. */
+	if (hb && !mem_encrypt_active()) {
 		unsigned long bp = channel->cookie_high;
 
 		si = (uintptr_t) msg;
@@ -202,7 +206,8 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
 {
 	unsigned long si, di, eax, ebx, ecx, edx;
 
-	if (hb) {
+	/* HB port can't access encrypted memory */
+	if (hb && !mem_encrypt_active()) {
 		unsigned long bp = channel->cookie_low;
 
 		si = channel->cookie_high;
@@ -514,3 +519,84 @@ int vmw_host_log(const char *log)
 
 	return -EINVAL;
 }
+
+
+/**
+ * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
+ *
+ * Sends a message from user-space to host.
+ * Can also receive a result from host and return that to user-space.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ */
+
+int vmw_msg_ioctl(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_vmw_msg_arg *arg =
+		(struct drm_vmw_msg_arg *) data;
+	struct rpc_channel channel;
+	char *msg;
+	int length;
+
+	msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
+	if (!msg) {
+		DRM_ERROR("Cannot allocate memory for log message.\n");
+		return -ENOMEM;
+	}
+
+	length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
+				   MAX_USER_MSG_LENGTH);
+	if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
+		DRM_ERROR("Userspace message access failure.\n");
+		kfree(msg);
+		return -EINVAL;
+	}
+
+
+	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
+		DRM_ERROR("Failed to open channel.\n");
+		goto out_open;
+	}
+
+	if (vmw_send_msg(&channel, msg)) {
+		DRM_ERROR("Failed to send message to host.\n");
+		goto out_msg;
+	}
+
+	if (!arg->send_only) {
+		char *reply = NULL;
+		size_t reply_len = 0;
+
+		if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
+			DRM_ERROR("Failed to receive message from host.\n");
+			goto out_msg;
+		}
+		if (reply && reply_len > 0) {
+			if (copy_to_user((void __user *)((unsigned long)arg->receive),
+							 reply, reply_len)) {
+				DRM_ERROR("Failed to copy message to userspace.\n");
+				kfree(reply);
+				goto out_msg;
+			}
+			arg->receive_len = (__u32)reply_len;
+		}
+		kfree(reply);
+	}
+
+	vmw_close_channel(&channel);
+	kfree(msg);
+
+	return 0;
+
+out_msg:
+	vmw_close_channel(&channel);
+out_open:
+	kfree(msg);
+
+	return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index e420675..d9552a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -62,45 +62,12 @@ static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
 {
 }
 
-static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
-{
-	return NULL;
-}
-
-static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
-{
-}
-
-static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
-		unsigned long page_num)
-{
-	return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
-		unsigned long page_num, void *addr)
-{
-
-}
-
-static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
-				 struct vm_area_struct *vma)
-{
-	WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
-	return -ENOSYS;
-}
-
 const struct dma_buf_ops vmw_prime_dmabuf_ops =  {
 	.attach = vmw_prime_map_attach,
 	.detach = vmw_prime_map_detach,
 	.map_dma_buf = vmw_prime_map_dma_buf,
 	.unmap_dma_buf = vmw_prime_unmap_dma_buf,
 	.release = NULL,
-	.map = vmw_prime_dmabuf_kmap,
-	.unmap = vmw_prime_dmabuf_kunmap,
-	.mmap = vmw_prime_dmabuf_mmap,
-	.vmap = vmw_prime_dmabuf_vmap,
-	.vunmap = vmw_prime_dmabuf_vunmap,
 };
 
 int vmw_prime_fd_to_handle(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 32b9131..3ce630a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -934,16 +934,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
 	uint32_t handle;
 	struct ttm_base_object *base;
 	int ret;
-	bool require_exist = false;
 
 	if (handle_type == DRM_VMW_HANDLE_PRIME) {
 		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
 		if (unlikely(ret != 0))
 			return ret;
 	} else {
-		if (unlikely(drm_is_render_client(file_priv)))
-			require_exist = true;
-
 		handle = u_handle;
 	}
 
@@ -960,9 +956,18 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
 	}
 
 	if (handle_type != DRM_VMW_HANDLE_PRIME) {
+		bool require_exist = false;
+
 		user_srf = container_of(base, struct vmw_user_surface,
 					prime.base);
 
+		/* Error out if we are unauthenticated primary */
+		if (drm_is_primary_client(file_priv) &&
+		    !file_priv->authenticated) {
+			ret = -EACCES;
+			goto out_bad_resource;
+		}
+
 		/*
 		 * Make sure the surface creator has the same
 		 * authenticating master, or is already registered with us.
@@ -971,6 +976,9 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
 		    user_srf->master != file_priv->master)
 			require_exist = true;
 
+		if (unlikely(drm_is_render_client(file_priv)))
+			require_exist = true;
+
 		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
 					 require_exist);
 		if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index ce28875..aa7e50f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -45,6 +45,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
 
 	vma->vm_ops = &vmw_vm_ops;
 
+	/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
+	if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE)
+		vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index ff506bc..4f34c52 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -63,14 +63,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
 	if (IS_ERR_OR_NULL(fb))
 		return fb;
 
-	gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
-	if (!gem_obj) {
-		DRM_ERROR("Failed to lookup GEM object\n");
-		ret = -ENOENT;
-		goto fail;
-	}
-
-	drm_gem_object_put_unlocked(gem_obj);
+	gem_obj = fb->obj[0];
 
 	ret = xen_drm_front_fb_attach(drm_info->front_info,
 				      xen_drm_front_dbuf_to_cookie(gem_obj),
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index a50f5a1..b98a142 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -319,8 +319,10 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
 
 	hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
 
-	drm_connector_init(drm, &hdmi->connector, &zx_hdmi_connector_funcs,
-			   DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_init_with_ddc(drm, &hdmi->connector,
+				    &zx_hdmi_connector_funcs,
+				    DRM_MODE_CONNECTOR_HDMIA,
+				    &hdmi->ddc->adap);
 	drm_connector_helper_add(&hdmi->connector,
 				 &zx_hdmi_connector_helper_funcs);
 
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index 9b67e41..c4fa3bba 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -165,8 +165,10 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
 
 	vga->connector.polled = DRM_CONNECTOR_POLL_HPD;
 
-	ret = drm_connector_init(drm, connector, &zx_vga_connector_funcs,
-				 DRM_MODE_CONNECTOR_VGA);
+	ret = drm_connector_init_with_ddc(drm, connector,
+					  &zx_vga_connector_funcs,
+					  DRM_MODE_CONNECTOR_VGA,
+					  &vga->ddc->adap);
 	if (ret) {
 		DRM_DEV_ERROR(dev, "failed to init connector: %d\n", ret);
 		goto clean_encoder;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 2c8559f..6a995db 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -120,7 +120,7 @@ static void host1x_subdev_register(struct host1x_device *device,
 	mutex_lock(&device->clients_lock);
 	list_move_tail(&client->list, &device->clients);
 	list_move_tail(&subdev->list, &device->active);
-	client->parent = &device->dev;
+	client->host = &device->dev;
 	subdev->client = client;
 	mutex_unlock(&device->clients_lock);
 	mutex_unlock(&device->subdevs_lock);
@@ -156,7 +156,7 @@ static void __host1x_subdev_unregister(struct host1x_device *device,
 	 */
 	mutex_lock(&device->clients_lock);
 	subdev->client = NULL;
-	client->parent = NULL;
+	client->host = NULL;
 	list_move_tail(&subdev->list, &device->subdevs);
 	/*
 	 * XXX: Perhaps don't do this here, but rather explicitly remove it
@@ -710,6 +710,10 @@ int host1x_client_register(struct host1x_client *client)
 	struct host1x *host1x;
 	int err;
 
+	INIT_LIST_HEAD(&client->list);
+	mutex_init(&client->lock);
+	client->usecount = 0;
+
 	mutex_lock(&devices_lock);
 
 	list_for_each_entry(host1x, &devices, list) {
@@ -768,3 +772,74 @@ int host1x_client_unregister(struct host1x_client *client)
 	return 0;
 }
 EXPORT_SYMBOL(host1x_client_unregister);
+
+int host1x_client_suspend(struct host1x_client *client)
+{
+	int err = 0;
+
+	mutex_lock(&client->lock);
+
+	if (client->usecount == 1) {
+		if (client->ops && client->ops->suspend) {
+			err = client->ops->suspend(client);
+			if (err < 0)
+				goto unlock;
+		}
+	}
+
+	client->usecount--;
+	dev_dbg(client->dev, "use count: %u\n", client->usecount);
+
+	if (client->parent) {
+		err = host1x_client_suspend(client->parent);
+		if (err < 0)
+			goto resume;
+	}
+
+	goto unlock;
+
+resume:
+	if (client->usecount == 0)
+		if (client->ops && client->ops->resume)
+			client->ops->resume(client);
+
+	client->usecount++;
+unlock:
+	mutex_unlock(&client->lock);
+	return err;
+}
+EXPORT_SYMBOL(host1x_client_suspend);
+
+int host1x_client_resume(struct host1x_client *client)
+{
+	int err = 0;
+
+	mutex_lock(&client->lock);
+
+	if (client->parent) {
+		err = host1x_client_resume(client->parent);
+		if (err < 0)
+			goto unlock;
+	}
+
+	if (client->usecount == 0) {
+		if (client->ops && client->ops->resume) {
+			err = client->ops->resume(client);
+			if (err < 0)
+				goto suspend;
+		}
+	}
+
+	client->usecount++;
+	dev_dbg(client->dev, "use count: %u\n", client->usecount);
+
+	goto unlock;
+
+suspend:
+	if (client->parent)
+		host1x_client_suspend(client->parent);
+unlock:
+	mutex_unlock(&client->lock);
+	return err;
+}
+EXPORT_SYMBOL(host1x_client_resume);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index a738ea5..388bcc2 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -339,10 +339,8 @@ static int host1x_probe(struct platform_device *pdev)
 	}
 
 	syncpt_irq = platform_get_irq(pdev, 0);
-	if (syncpt_irq < 0) {
-		dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
+	if (syncpt_irq < 0)
 		return syncpt_irq;
-	}
 
 	mutex_init(&host->devices_lock);
 	INIT_LIST_HEAD(&host->devices);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 25ca54d..a10643a 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -8,6 +8,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/host1x.h>
+#include <linux/iommu.h>
 #include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/scatterlist.h>
@@ -101,9 +102,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 {
 	struct host1x_client *client = job->client;
 	struct device *dev = client->dev;
+	struct iommu_domain *domain;
 	unsigned int i;
 	int err;
 
+	domain = iommu_get_domain_for_dev(dev);
 	job->num_unpins = 0;
 
 	for (i = 0; i < job->num_relocs; i++) {
@@ -117,7 +120,19 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 			goto unpin;
 		}
 
-		if (client->group)
+		/*
+		 * If the client device is not attached to an IOMMU, the
+		 * physical address of the buffer object can be used.
+		 *
+		 * Similarly, when an IOMMU domain is shared between all
+		 * host1x clients, the IOVA is already available, so no
+		 * need to map the buffer object again.
+		 *
+		 * XXX Note that this isn't always safe to do because it
+		 * relies on an assumption that no cache maintenance is
+		 * needed on the buffer objects.
+		 */
+		if (!domain || client->group)
 			phys = &phys_addr;
 		else
 			phys = NULL;
@@ -176,6 +191,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 		dma_addr_t phys_addr;
 		unsigned long shift;
 		struct iova *alloc;
+		dma_addr_t *phys;
 		unsigned int j;
 
 		g->bo = host1x_bo_get(g->bo);
@@ -184,7 +200,17 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 			goto unpin;
 		}
 
-		sgt = host1x_bo_pin(host->dev, g->bo, NULL);
+		/**
+		 * If the host1x is not attached to an IOMMU, there is no need
+		 * to map the buffer object for the host1x, since the physical
+		 * address can simply be used.
+		 */
+		if (!iommu_get_domain_for_dev(host->dev))
+			phys = &phys_addr;
+		else
+			phys = NULL;
+
+		sgt = host1x_bo_pin(host->dev, g->bo, phys);
 		if (IS_ERR(sgt)) {
 			err = PTR_ERR(sgt);
 			goto unpin;
@@ -214,7 +240,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 
 			job->unpins[job->num_unpins].size = gather_size;
 			phys_addr = iova_dma_addr(&host->iova, alloc);
-		} else {
+		} else if (sgt) {
 			err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
 					 DMA_TO_DEVICE);
 			if (!err) {
@@ -222,6 +248,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 				goto unpin;
 			}
 
+			job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
 			job->unpins[job->num_unpins].dev = host->dev;
 			phys_addr = sg_dma_address(sgt->sgl);
 		}
@@ -229,7 +256,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 		job->addr_phys[job->num_unpins] = phys_addr;
 		job->gather_addr_phys[i] = phys_addr;
 
-		job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
 		job->unpins[job->num_unpins].bo = g->bo;
 		job->unpins[job->num_unpins].sgt = sgt;
 		job->num_unpins++;
@@ -244,8 +270,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 
 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
 {
-	u32 last_page = ~0;
-	void *cmdbuf_page_addr = NULL;
+	void *cmdbuf_addr = NULL;
 	struct host1x_bo *cmdbuf = g->bo;
 	unsigned int i;
 
@@ -267,28 +292,22 @@ static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
 			goto patch_reloc;
 		}
 
-		if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
-			if (cmdbuf_page_addr)
-				host1x_bo_kunmap(cmdbuf, last_page,
-						 cmdbuf_page_addr);
+		if (!cmdbuf_addr) {
+			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
 
-			cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
-					reloc->cmdbuf.offset >> PAGE_SHIFT);
-			last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
-
-			if (unlikely(!cmdbuf_page_addr)) {
+			if (unlikely(!cmdbuf_addr)) {
 				pr_err("Could not map cmdbuf for relocation\n");
 				return -ENOMEM;
 			}
 		}
 
-		target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
+		target = cmdbuf_addr + reloc->cmdbuf.offset;
 patch_reloc:
 		*target = reloc_addr;
 	}
 
-	if (cmdbuf_page_addr)
-		host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
+	if (cmdbuf_addr)
+		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
 
 	return 0;
 }
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index dd1cd01..fce7892 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -421,7 +421,7 @@ int host1x_syncpt_init(struct host1x *host)
 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
 					    unsigned long flags)
 {
-	struct host1x *host = dev_get_drvdata(client->parent->parent);
+	struct host1x *host = dev_get_drvdata(client->host->parent);
 
 	return host1x_syncpt_alloc(host, client, flags);
 }
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index c8c770b..1ad4c4e 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -28,6 +28,6 @@
 	help
 	  Many laptops released in 2008/9/10 have two GPUs with a multiplexer
 	  to switch between them. This adds support for dynamic switching when
-          X isn't running and delayed switching until the next logoff. This
+	  X isn't running and delayed switching until the next logoff. This
 	  feature is called hybrid graphics, ATI PowerXpress, and Nvidia
 	  HybridPower.
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index e162a66..a549c42 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -417,8 +417,7 @@ static int picolcd_set_par(struct fb_info *info)
 	return 0;
 }
 
-/* Note this can't be const because of struct fb_info definition */
-static struct fb_ops picolcdfb_ops = {
+static const struct fb_ops picolcdfb_ops = {
 	.owner        = THIS_MODULE,
 	.fb_destroy   = picolcd_fb_destroy,
 	.fb_read      = fb_sys_read,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 8eb1675..0370364 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -1351,6 +1351,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
 	{ CHANNELMSG_19,			0, NULL },
 	{ CHANNELMSG_20,			0, NULL },
 	{ CHANNELMSG_TL_CONNECT_REQUEST,	0, NULL },
+	{ CHANNELMSG_22,			0, NULL },
+	{ CHANNELMSG_TL_CONNECT_RESULT,		0, NULL },
 };
 
 /*
@@ -1362,25 +1364,16 @@ void vmbus_onmessage(void *context)
 {
 	struct hv_message *msg = context;
 	struct vmbus_channel_message_header *hdr;
-	int size;
 
 	hdr = (struct vmbus_channel_message_header *)msg->u.payload;
-	size = msg->header.payload_size;
 
 	trace_vmbus_on_message(hdr);
 
-	if (hdr->msgtype >= CHANNELMSG_COUNT) {
-		pr_err("Received invalid channel message type %d size %d\n",
-			   hdr->msgtype, size);
-		print_hex_dump_bytes("", DUMP_PREFIX_NONE,
-				     (unsigned char *)msg->u.payload, size);
-		return;
-	}
-
-	if (channel_message_table[hdr->msgtype].message_handler)
-		channel_message_table[hdr->msgtype].message_handler(hdr);
-	else
-		pr_err("Unhandled channel message type %d\n", hdr->msgtype);
+	/*
+	 * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
+	 * out of bound and the message_handler pointer can not be NULL.
+	 */
+	channel_message_table[hdr->msgtype].message_handler(hdr);
 }
 
 /*
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b155d00..a02ce43 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1217,10 +1217,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
 	unsigned int i, j;
 	struct page *pg;
 
-	if (num_pages < alloc_unit)
-		return 0;
-
-	for (i = 0; (i * alloc_unit) < num_pages; i++) {
+	for (i = 0; i < num_pages / alloc_unit; i++) {
 		if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
 			HV_HYP_PAGE_SIZE)
 			return i * alloc_unit;
@@ -1258,7 +1255,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
 
 	}
 
-	return num_pages;
+	return i * alloc_unit;
 }
 
 static void balloon_up(struct work_struct *dummy)
@@ -1273,9 +1270,6 @@ static void balloon_up(struct work_struct *dummy)
 	long avail_pages;
 	unsigned long floor;
 
-	/* The host balloons pages in 2M granularity. */
-	WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
-
 	/*
 	 * We will attempt 2M allocations. However, if we fail to
 	 * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
@@ -1285,14 +1279,13 @@ static void balloon_up(struct work_struct *dummy)
 	avail_pages = si_mem_available();
 	floor = compute_balloon_floor();
 
-	/* Refuse to balloon below the floor, keep the 2M granularity. */
+	/* Refuse to balloon below the floor. */
 	if (avail_pages < num_pages || avail_pages - num_pages < floor) {
 		pr_warn("Balloon request will be partially fulfilled. %s\n",
 			avail_pages < num_pages ? "Not enough memory." :
 			"Balloon floor reached.");
 
 		num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
-		num_pages -= num_pages % PAGES_IN_2M;
 	}
 
 	while (!done) {
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 08fa4a5..bb9ba3f 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -346,9 +346,61 @@ int hv_fcopy_init(struct hv_util_service *srv)
 	return 0;
 }
 
+static void hv_fcopy_cancel_work(void)
+{
+	cancel_delayed_work_sync(&fcopy_timeout_work);
+	cancel_work_sync(&fcopy_send_work);
+}
+
+int hv_fcopy_pre_suspend(void)
+{
+	struct vmbus_channel *channel = fcopy_transaction.recv_channel;
+	struct hv_fcopy_hdr *fcopy_msg;
+
+	/*
+	 * Fake a CANCEL_FCOPY message for the user space daemon in case the
+	 * daemon is in the middle of copying some file. It doesn't matter if
+	 * there is already a message pending to be delivered to the user
+	 * space since we force fcopy_transaction.state to be HVUTIL_READY, so
+	 * the user space daemon's write() will fail with EINVAL (see
+	 * fcopy_on_msg()), and the daemon will reset the device by closing
+	 * and re-opening it.
+	 */
+	fcopy_msg = kzalloc(sizeof(*fcopy_msg), GFP_KERNEL);
+	if (!fcopy_msg)
+		return -ENOMEM;
+
+	tasklet_disable(&channel->callback_event);
+
+	fcopy_msg->operation = CANCEL_FCOPY;
+
+	hv_fcopy_cancel_work();
+
+	/* We don't care about the return value. */
+	hvutil_transport_send(hvt, fcopy_msg, sizeof(*fcopy_msg), NULL);
+
+	kfree(fcopy_msg);
+
+	fcopy_transaction.state = HVUTIL_READY;
+
+	/* tasklet_enable() will be called in hv_fcopy_pre_resume(). */
+	return 0;
+}
+
+int hv_fcopy_pre_resume(void)
+{
+	struct vmbus_channel *channel = fcopy_transaction.recv_channel;
+
+	tasklet_enable(&channel->callback_event);
+
+	return 0;
+}
+
 void hv_fcopy_deinit(void)
 {
 	fcopy_transaction.state = HVUTIL_DEVICE_DYING;
-	cancel_delayed_work_sync(&fcopy_timeout_work);
+
+	hv_fcopy_cancel_work();
+
 	hvutil_transport_destroy(hvt);
 }
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ae7c028..e74b144 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -758,11 +758,50 @@ hv_kvp_init(struct hv_util_service *srv)
 	return 0;
 }
 
-void hv_kvp_deinit(void)
+static void hv_kvp_cancel_work(void)
 {
-	kvp_transaction.state = HVUTIL_DEVICE_DYING;
 	cancel_delayed_work_sync(&kvp_host_handshake_work);
 	cancel_delayed_work_sync(&kvp_timeout_work);
 	cancel_work_sync(&kvp_sendkey_work);
+}
+
+int hv_kvp_pre_suspend(void)
+{
+	struct vmbus_channel *channel = kvp_transaction.recv_channel;
+
+	tasklet_disable(&channel->callback_event);
+
+	/*
+	 * If there is a pending transtion, it's unnecessary to tell the host
+	 * that the transaction will fail, because that is implied when
+	 * util_suspend() calls vmbus_close() later.
+	 */
+	hv_kvp_cancel_work();
+
+	/*
+	 * Forece the state to READY to handle the ICMSGTYPE_NEGOTIATE message
+	 * later. The user space daemon may go out of order and its write()
+	 * may fail with EINVAL: this doesn't matter since the daemon will
+	 * reset the device by closing and re-opening it.
+	 */
+	kvp_transaction.state = HVUTIL_READY;
+	return 0;
+}
+
+int hv_kvp_pre_resume(void)
+{
+	struct vmbus_channel *channel = kvp_transaction.recv_channel;
+
+	tasklet_enable(&channel->callback_event);
+
+	return 0;
+}
+
+void hv_kvp_deinit(void)
+{
+	kvp_transaction.state = HVUTIL_DEVICE_DYING;
+
+	hv_kvp_cancel_work();
+
 	hvutil_transport_destroy(hvt);
 }
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 03b6454..1c75b38 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -379,10 +379,61 @@ hv_vss_init(struct hv_util_service *srv)
 	return 0;
 }
 
+static void hv_vss_cancel_work(void)
+{
+	cancel_delayed_work_sync(&vss_timeout_work);
+	cancel_work_sync(&vss_handle_request_work);
+}
+
+int hv_vss_pre_suspend(void)
+{
+	struct vmbus_channel *channel = vss_transaction.recv_channel;
+	struct hv_vss_msg *vss_msg;
+
+	/*
+	 * Fake a THAW message for the user space daemon in case the daemon
+	 * has frozen the file systems. It doesn't matter if there is already
+	 * a message pending to be delivered to the user space since we force
+	 * vss_transaction.state to be HVUTIL_READY, so the user space daemon's
+	 * write() will fail with EINVAL (see vss_on_msg()), and the daemon
+	 * will reset the device by closing and re-opening it.
+	 */
+	vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
+	if (!vss_msg)
+		return -ENOMEM;
+
+	tasklet_disable(&channel->callback_event);
+
+	vss_msg->vss_hdr.operation = VSS_OP_THAW;
+
+	/* Cancel any possible pending work. */
+	hv_vss_cancel_work();
+
+	/* We don't care about the return value. */
+	hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
+
+	kfree(vss_msg);
+
+	vss_transaction.state = HVUTIL_READY;
+
+	/* tasklet_enable() will be called in hv_vss_pre_resume(). */
+	return 0;
+}
+
+int hv_vss_pre_resume(void)
+{
+	struct vmbus_channel *channel = vss_transaction.recv_channel;
+
+	tasklet_enable(&channel->callback_event);
+
+	return 0;
+}
+
 void hv_vss_deinit(void)
 {
 	vss_transaction.state = HVUTIL_DEVICE_DYING;
-	cancel_delayed_work_sync(&vss_timeout_work);
-	cancel_work_sync(&vss_handle_request_work);
+
+	hv_vss_cancel_work();
+
 	hvutil_transport_destroy(hvt);
 }
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 296f909..92ee0fe 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -24,6 +24,10 @@
 
 #define SD_MAJOR	3
 #define SD_MINOR	0
+#define SD_MINOR_1	1
+#define SD_MINOR_2	2
+#define SD_VERSION_3_1	(SD_MAJOR << 16 | SD_MINOR_1)
+#define SD_VERSION_3_2	(SD_MAJOR << 16 | SD_MINOR_2)
 #define SD_VERSION	(SD_MAJOR << 16 | SD_MINOR)
 
 #define SD_MAJOR_1	1
@@ -50,8 +54,10 @@ static int sd_srv_version;
 static int ts_srv_version;
 static int hb_srv_version;
 
-#define SD_VER_COUNT 2
+#define SD_VER_COUNT 4
 static const int sd_versions[] = {
+	SD_VERSION_3_2,
+	SD_VERSION_3_1,
 	SD_VERSION,
 	SD_VERSION_1
 };
@@ -75,18 +81,56 @@ static const int fw_versions[] = {
 	UTIL_WS2K8_FW_VERSION
 };
 
+/*
+ * Send the "hibernate" udev event in a thread context.
+ */
+struct hibernate_work_context {
+	struct work_struct work;
+	struct hv_device *dev;
+};
+
+static struct hibernate_work_context hibernate_context;
+static bool hibernation_supported;
+
+static void send_hibernate_uevent(struct work_struct *work)
+{
+	char *uevent_env[2] = { "EVENT=hibernate", NULL };
+	struct hibernate_work_context *ctx;
+
+	ctx = container_of(work, struct hibernate_work_context, work);
+
+	kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
+
+	pr_info("Sent hibernation uevent\n");
+}
+
+static int hv_shutdown_init(struct hv_util_service *srv)
+{
+	struct vmbus_channel *channel = srv->channel;
+
+	INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
+	hibernate_context.dev = channel->device_obj;
+
+	hibernation_supported = hv_is_hibernation_supported();
+
+	return 0;
+}
+
 static void shutdown_onchannelcallback(void *context);
 static struct hv_util_service util_shutdown = {
 	.util_cb = shutdown_onchannelcallback,
+	.util_init = hv_shutdown_init,
 };
 
 static int hv_timesync_init(struct hv_util_service *srv);
+static int hv_timesync_pre_suspend(void);
 static void hv_timesync_deinit(void);
 
 static void timesync_onchannelcallback(void *context);
 static struct hv_util_service util_timesynch = {
 	.util_cb = timesync_onchannelcallback,
 	.util_init = hv_timesync_init,
+	.util_pre_suspend = hv_timesync_pre_suspend,
 	.util_deinit = hv_timesync_deinit,
 };
 
@@ -98,18 +142,24 @@ static struct hv_util_service util_heartbeat = {
 static struct hv_util_service util_kvp = {
 	.util_cb = hv_kvp_onchannelcallback,
 	.util_init = hv_kvp_init,
+	.util_pre_suspend = hv_kvp_pre_suspend,
+	.util_pre_resume = hv_kvp_pre_resume,
 	.util_deinit = hv_kvp_deinit,
 };
 
 static struct hv_util_service util_vss = {
 	.util_cb = hv_vss_onchannelcallback,
 	.util_init = hv_vss_init,
+	.util_pre_suspend = hv_vss_pre_suspend,
+	.util_pre_resume = hv_vss_pre_resume,
 	.util_deinit = hv_vss_deinit,
 };
 
 static struct hv_util_service util_fcopy = {
 	.util_cb = hv_fcopy_onchannelcallback,
 	.util_init = hv_fcopy_init,
+	.util_pre_suspend = hv_fcopy_pre_suspend,
+	.util_pre_resume = hv_fcopy_pre_resume,
 	.util_deinit = hv_fcopy_deinit,
 };
 
@@ -118,17 +168,27 @@ static void perform_shutdown(struct work_struct *dummy)
 	orderly_poweroff(true);
 }
 
+static void perform_restart(struct work_struct *dummy)
+{
+	orderly_reboot();
+}
+
 /*
  * Perform the shutdown operation in a thread context.
  */
 static DECLARE_WORK(shutdown_work, perform_shutdown);
 
+/*
+ * Perform the restart operation in a thread context.
+ */
+static DECLARE_WORK(restart_work, perform_restart);
+
 static void shutdown_onchannelcallback(void *context)
 {
 	struct vmbus_channel *channel = context;
+	struct work_struct *work = NULL;
 	u32 recvlen;
 	u64 requestid;
-	bool execute_shutdown = false;
 	u8  *shut_txf_buf = util_shutdown.recv_buffer;
 
 	struct shutdown_msg_data *shutdown_msg;
@@ -157,19 +217,37 @@ static void shutdown_onchannelcallback(void *context)
 					sizeof(struct vmbuspipe_hdr) +
 					sizeof(struct icmsg_hdr)];
 
+			/*
+			 * shutdown_msg->flags can be 0(shut down), 2(reboot),
+			 * or 4(hibernate). It may bitwise-OR 1, which means
+			 * performing the request by force. Linux always tries
+			 * to perform the request by force.
+			 */
 			switch (shutdown_msg->flags) {
 			case 0:
 			case 1:
 				icmsghdrp->status = HV_S_OK;
-				execute_shutdown = true;
-
+				work = &shutdown_work;
 				pr_info("Shutdown request received -"
 					    " graceful shutdown initiated\n");
 				break;
+			case 2:
+			case 3:
+				icmsghdrp->status = HV_S_OK;
+				work = &restart_work;
+				pr_info("Restart request received -"
+					    " graceful restart initiated\n");
+				break;
+			case 4:
+			case 5:
+				pr_info("Hibernation request received\n");
+				icmsghdrp->status = hibernation_supported ?
+					HV_S_OK : HV_E_FAIL;
+				if (hibernation_supported)
+					work = &hibernate_context.work;
+				break;
 			default:
 				icmsghdrp->status = HV_E_FAIL;
-				execute_shutdown = false;
-
 				pr_info("Shutdown request received -"
 					    " Invalid request\n");
 				break;
@@ -184,8 +262,8 @@ static void shutdown_onchannelcallback(void *context)
 				       VM_PKT_DATA_INBAND, 0);
 	}
 
-	if (execute_shutdown == true)
-		schedule_work(&shutdown_work);
+	if (work)
+		schedule_work(work);
 }
 
 /*
@@ -441,6 +519,44 @@ static int util_remove(struct hv_device *dev)
 	return 0;
 }
 
+/*
+ * When we're in util_suspend(), all the userspace processes have been frozen
+ * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
+ * after the whole resume procedure, including util_resume(), finishes.
+ */
+static int util_suspend(struct hv_device *dev)
+{
+	struct hv_util_service *srv = hv_get_drvdata(dev);
+	int ret = 0;
+
+	if (srv->util_pre_suspend) {
+		ret = srv->util_pre_suspend();
+		if (ret)
+			return ret;
+	}
+
+	vmbus_close(dev->channel);
+
+	return 0;
+}
+
+static int util_resume(struct hv_device *dev)
+{
+	struct hv_util_service *srv = hv_get_drvdata(dev);
+	int ret = 0;
+
+	if (srv->util_pre_resume) {
+		ret = srv->util_pre_resume();
+		if (ret)
+			return ret;
+	}
+
+	ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
+			 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+			 dev->channel);
+	return ret;
+}
+
 static const struct hv_vmbus_device_id id_table[] = {
 	/* Shutdown guid */
 	{ HV_SHUTDOWN_GUID,
@@ -477,6 +593,8 @@ static  struct hv_driver util_drv = {
 	.id_table = id_table,
 	.probe =  util_probe,
 	.remove =  util_remove,
+	.suspend = util_suspend,
+	.resume =  util_resume,
 	.driver = {
 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
@@ -546,11 +664,23 @@ static int hv_timesync_init(struct hv_util_service *srv)
 	return 0;
 }
 
+static void hv_timesync_cancel_work(void)
+{
+	cancel_work_sync(&adj_time_work);
+}
+
+static int hv_timesync_pre_suspend(void)
+{
+	hv_timesync_cancel_work();
+	return 0;
+}
+
 static void hv_timesync_deinit(void)
 {
 	if (hv_ptp_clock)
 		ptp_clock_unregister(hv_ptp_clock);
-	cancel_work_sync(&adj_time_work);
+
+	hv_timesync_cancel_work();
 }
 
 static int __init init_hyperv_utils(void)
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 20edcfd..f5fa3b3 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -352,14 +352,20 @@ void vmbus_on_msg_dpc(unsigned long data);
 
 int hv_kvp_init(struct hv_util_service *srv);
 void hv_kvp_deinit(void);
+int hv_kvp_pre_suspend(void);
+int hv_kvp_pre_resume(void);
 void hv_kvp_onchannelcallback(void *context);
 
 int hv_vss_init(struct hv_util_service *srv);
 void hv_vss_deinit(void);
+int hv_vss_pre_suspend(void);
+int hv_vss_pre_resume(void);
 void hv_vss_onchannelcallback(void *context);
 
 int hv_fcopy_init(struct hv_util_service *srv);
 void hv_fcopy_deinit(void);
+int hv_fcopy_pre_suspend(void);
+int hv_fcopy_pre_resume(void);
 void hv_fcopy_onchannelcallback(void *context);
 void vmbus_initiate_unload(bool crash);
 
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4ef5a66..029378c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1033,6 +1033,10 @@ void vmbus_on_msg_dpc(unsigned long data)
 	}
 
 	entry = &channel_message_table[hdr->msgtype];
+
+	if (!entry->message_handler)
+		goto msg_handled;
+
 	if (entry->handler_type	== VMHT_BLOCKING) {
 		ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
 		if (ctx == NULL)
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 17583bf8..d4c8300 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -595,19 +595,18 @@ static int i8k_open_fs(struct inode *inode, struct file *file)
 	return single_open(file, i8k_proc_show, NULL);
 }
 
-static const struct file_operations i8k_fops = {
-	.owner		= THIS_MODULE,
-	.open		= i8k_open_fs,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.unlocked_ioctl	= i8k_ioctl,
+static const struct proc_ops i8k_proc_ops = {
+	.proc_open	= i8k_open_fs,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_ioctl	= i8k_ioctl,
 };
 
 static void __init i8k_init_procfs(void)
 {
 	/* Register the proc entry */
-	proc_create("i8k", 0, NULL, &i8k_fops);
+	proc_create("i8k", 0, NULL, &i8k_proc_ops);
 }
 
 static void __exit i8k_exit_procfs(void)
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 8a7732c..286d3cf 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -259,7 +259,7 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
 }
 
 static const struct scmi_device_id scmi_id_table[] = {
-	{ SCMI_PROTOCOL_SENSOR },
+	{ SCMI_PROTOCOL_SENSOR, "hwmon" },
 	{ },
 };
 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 14e1a53..3b05560 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -76,7 +76,6 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
 	struct device_node *node = pdev->dev.of_node;
 	struct hwspinlock_device *bank;
 	struct hwspinlock *hwlock;
-	struct resource *res;
 	void __iomem *io_base;
 	int num_locks, i, ret;
 	/* Only a single hwspinlock block device is supported */
@@ -85,13 +84,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
 	if (!node)
 		return -ENODEV;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
-	io_base = ioremap(res->start, resource_size(res));
-	if (!io_base)
-		return -ENOMEM;
+	io_base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
 
 	/*
 	 * make sure the module is enabled and clocked before reading
@@ -101,7 +96,7 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
 	ret = pm_runtime_get_sync(&pdev->dev);
 	if (ret < 0) {
 		pm_runtime_put_noidle(&pdev->dev);
-		goto iounmap_base;
+		goto runtime_err;
 	}
 
 	/* Determine number of locks */
@@ -114,20 +109,21 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
 	 */
 	ret = pm_runtime_put(&pdev->dev);
 	if (ret < 0)
-		goto iounmap_base;
+		goto runtime_err;
 
 	/* one of the four lsb's must be set, and nothing else */
 	if (hweight_long(i & 0xf) != 1 || i > 8) {
 		ret = -EINVAL;
-		goto iounmap_base;
+		goto runtime_err;
 	}
 
 	num_locks = i * 32; /* actual number of locks in this device */
 
-	bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
+	bank = devm_kzalloc(&pdev->dev, struct_size(bank, lock, num_locks),
+			    GFP_KERNEL);
 	if (!bank) {
 		ret = -ENOMEM;
-		goto iounmap_base;
+		goto runtime_err;
 	}
 
 	platform_set_drvdata(pdev, bank);
@@ -138,25 +134,21 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
 	ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
 						base_id, num_locks);
 	if (ret)
-		goto reg_fail;
+		goto runtime_err;
 
 	dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
 		num_locks);
 
 	return 0;
 
-reg_fail:
-	kfree(bank);
-iounmap_base:
+runtime_err:
 	pm_runtime_disable(&pdev->dev);
-	iounmap(io_base);
 	return ret;
 }
 
 static int omap_hwspinlock_remove(struct platform_device *pdev)
 {
 	struct hwspinlock_device *bank = platform_get_drvdata(pdev);
-	void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
 	int ret;
 
 	ret = hwspin_lock_unregister(bank);
@@ -166,8 +158,6 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
 	}
 
 	pm_runtime_disable(&pdev->dev);
-	iounmap(io_base);
-	kfree(bank);
 
 	return 0;
 }
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 6da7447..f0da544 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 
 #include "hwspinlock_internal.h"
@@ -122,35 +121,12 @@ static int qcom_hwspinlock_probe(struct platform_device *pdev)
 							     regmap, field);
 	}
 
-	pm_runtime_enable(&pdev->dev);
-
-	ret = hwspin_lock_register(bank, &pdev->dev, &qcom_hwspinlock_ops,
-				   0, QCOM_MUTEX_NUM_LOCKS);
-	if (ret)
-		pm_runtime_disable(&pdev->dev);
-
-	return ret;
-}
-
-static int qcom_hwspinlock_remove(struct platform_device *pdev)
-{
-	struct hwspinlock_device *bank = platform_get_drvdata(pdev);
-	int ret;
-
-	ret = hwspin_lock_unregister(bank);
-	if (ret) {
-		dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
-		return ret;
-	}
-
-	pm_runtime_disable(&pdev->dev);
-
-	return 0;
+	return devm_hwspin_lock_register(&pdev->dev, bank, &qcom_hwspinlock_ops,
+					 0, QCOM_MUTEX_NUM_LOCKS);
 }
 
 static struct platform_driver qcom_hwspinlock_driver = {
 	.probe		= qcom_hwspinlock_probe,
-	.remove		= qcom_hwspinlock_remove,
 	.driver		= {
 		.name	= "qcom_hwspinlock",
 		.of_match_table = qcom_hwspinlock_of_match,
diff --git a/drivers/hwspinlock/sirf_hwspinlock.c b/drivers/hwspinlock/sirf_hwspinlock.c
index 1f625cd..823d3c4 100644
--- a/drivers/hwspinlock/sirf_hwspinlock.c
+++ b/drivers/hwspinlock/sirf_hwspinlock.c
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/io.h>
-#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/hwspinlock.h>
@@ -56,7 +55,7 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
 {
 	struct sirf_hwspinlock *hwspin;
 	struct hwspinlock *hwlock;
-	int idx, ret;
+	int idx;
 
 	if (!pdev->dev.of_node)
 		return -ENODEV;
@@ -69,9 +68,9 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	/* retrieve io base */
-	hwspin->io_base = of_iomap(pdev->dev.of_node, 0);
-	if (!hwspin->io_base)
-		return -ENOMEM;
+	hwspin->io_base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(hwspin->io_base))
+		return PTR_ERR(hwspin->io_base);
 
 	for (idx = 0; idx < HW_SPINLOCK_NUMBER; idx++) {
 		hwlock = &hwspin->bank.lock[idx];
@@ -80,39 +79,9 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, hwspin);
 
-	pm_runtime_enable(&pdev->dev);
-
-	ret = hwspin_lock_register(&hwspin->bank, &pdev->dev,
-				   &sirf_hwspinlock_ops, 0,
-				   HW_SPINLOCK_NUMBER);
-	if (ret)
-		goto reg_failed;
-
-	return 0;
-
-reg_failed:
-	pm_runtime_disable(&pdev->dev);
-	iounmap(hwspin->io_base);
-
-	return ret;
-}
-
-static int sirf_hwspinlock_remove(struct platform_device *pdev)
-{
-	struct sirf_hwspinlock *hwspin = platform_get_drvdata(pdev);
-	int ret;
-
-	ret = hwspin_lock_unregister(&hwspin->bank);
-	if (ret) {
-		dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
-		return ret;
-	}
-
-	pm_runtime_disable(&pdev->dev);
-
-	iounmap(hwspin->io_base);
-
-	return 0;
+	return devm_hwspin_lock_register(&pdev->dev, &hwspin->bank,
+					 &sirf_hwspinlock_ops, 0,
+					 HW_SPINLOCK_NUMBER);
 }
 
 static const struct of_device_id sirf_hwpinlock_ids[] = {
@@ -123,7 +92,6 @@ MODULE_DEVICE_TABLE(of, sirf_hwpinlock_ids);
 
 static struct platform_driver sirf_hwspinlock_driver = {
 	.probe = sirf_hwspinlock_probe,
-	.remove = sirf_hwspinlock_remove,
 	.driver = {
 		.name = "atlas7_hwspinlock",
 		.of_match_table = of_match_ptr(sirf_hwpinlock_ids),
diff --git a/drivers/hwspinlock/stm32_hwspinlock.c b/drivers/hwspinlock/stm32_hwspinlock.c
index c8eacf4..3ad0ce0 100644
--- a/drivers/hwspinlock/stm32_hwspinlock.c
+++ b/drivers/hwspinlock/stm32_hwspinlock.c
@@ -58,12 +58,10 @@ static int stm32_hwspinlock_probe(struct platform_device *pdev)
 {
 	struct stm32_hwspinlock *hw;
 	void __iomem *io_base;
-	struct resource *res;
 	size_t array_size;
 	int i, ret;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	io_base = devm_ioremap_resource(&pdev->dev, res);
+	io_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(io_base))
 		return PTR_ERR(io_base);
 
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6a0aa76..2ddca08 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -367,7 +367,8 @@
 
 config I2C_ALTERA
 	tristate "Altera Soft IP I2C"
-	depends on (ARCH_SOCFPGA || NIOS2) && OF
+	depends on ARCH_SOCFPGA || NIOS2 || COMPILE_TEST
+	depends on OF
 	help
 	  If you say yes to this option, support will be included for the
 	  Altera Soft IP I2C interfaces on SoCFPGA and Nios2 architectures.
@@ -387,7 +388,7 @@
 
 config I2C_AT91
 	tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
-	depends on ARCH_AT91
+	depends on ARCH_AT91 || COMPILE_TEST
 	help
 	  This supports the use of the I2C interface on Atmel AT91
 	  processors.
@@ -440,7 +441,8 @@
 
 config I2C_BCM2835
 	tristate "Broadcom BCM2835 I2C controller"
-	depends on ARCH_BCM2835 || ARCH_BRCMSTB
+	depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+	depends on COMMON_CLK
 	help
 	  If you say yes to this option, support will be included for the
 	  BCM2835 I2C controller.
@@ -463,8 +465,8 @@
 
 config I2C_BCM_KONA
 	tristate "BCM Kona I2C adapter"
-	depends on ARCH_BCM_MOBILE
-	default y
+	depends on ARCH_BCM_MOBILE || COMPILE_TEST
+	default y if ARCH_BCM_MOBILE
 	help
 	  If you say yes to this option, support will be included for the
 	  I2C interface on the Broadcom Kona family of processors.
@@ -511,7 +513,7 @@
 
 config I2C_DAVINCI
 	tristate "DaVinci I2C driver"
-	depends on ARCH_DAVINCI || ARCH_KEYSTONE
+	depends on ARCH_DAVINCI || ARCH_KEYSTONE || COMPILE_TEST
 	help
 	  Support for TI DaVinci I2C controller driver.
 
@@ -572,7 +574,7 @@
 
 config I2C_DIGICOLOR
 	tristate "Conexant Digicolor I2C driver"
-	depends on ARCH_DIGICOLOR
+	depends on ARCH_DIGICOLOR || COMPILE_TEST
 	help
 	  Support for Conexant Digicolor SoCs (CX92755) I2C controller driver.
 
@@ -610,11 +612,12 @@
 	  I2C interface on the Renesas Electronics EM/EV family of processors.
 
 config I2C_EXYNOS5
-	tristate "Exynos5 high-speed I2C driver"
-	depends on ARCH_EXYNOS && OF
-	default y
+	tristate "Exynos high-speed I2C driver"
+	depends on OF
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	default y if ARCH_EXYNOS
 	help
-	  High-speed I2C controller on Exynos5 based Samsung SoCs.
+	  High-speed I2C controller on Exynos5 and newer Samsung SoCs.
 
 config I2C_GPIO
 	tristate "GPIO-based bitbanging I2C"
@@ -634,7 +637,7 @@
 
 config I2C_HIGHLANDER
 	tristate "Highlander FPGA SMBus interface"
-	depends on SH_HIGHLANDER
+	depends on SH_HIGHLANDER || COMPILE_TEST
 	help
 	  If you say yes to this option, support will be included for
 	  the SMBus interface located in the FPGA on various Highlander
@@ -686,7 +689,7 @@
 
 config I2C_IOP3XX
 	tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
-	depends on ARCH_IOP32X || ARCH_IXP4XX
+	depends on ARCH_IOP32X || ARCH_IXP4XX || COMPILE_TEST
 	help
 	  Say Y here if you want to use the IIC bus controller on
 	  the Intel IOPx3xx I/O Processors or IXP4xx Network Processors.
@@ -726,6 +729,7 @@
 config I2C_MESON
 	tristate "Amlogic Meson I2C controller"
 	depends on ARCH_MESON || COMPILE_TEST
+	depends on COMMON_CLK
 	help
 	  If you say yes to this option, support will be included for the
 	  I2C interface on the Amlogic Meson family of SoCs.
@@ -759,7 +763,7 @@
 
 config I2C_MV64XXX
 	tristate "Marvell mv64xxx I2C Controller"
-	depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU
+	depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU || COMPILE_TEST
 	help
 	  If you say yes to this option, support will be included for the
 	  built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -770,7 +774,7 @@
 
 config I2C_MXS
 	tristate "Freescale i.MX28 I2C interface"
-	depends on SOC_IMX28
+	depends on SOC_IMX28 || COMPILE_TEST
 	select STMP_DEVICE
 	help
 	  Say Y here if you want to use the I2C bus controller on
@@ -799,7 +803,7 @@
 
 config I2C_OMAP
 	tristate "OMAP I2C adapter"
-	depends on ARCH_OMAP || ARCH_K3
+	depends on ARCH_OMAP || ARCH_K3 || COMPILE_TEST
 	default y if MACH_OMAP_H3 || MACH_OMAP_OSK
 	help
 	  If you say yes to this option, support will be included for the
@@ -833,7 +837,7 @@
 
 config I2C_PMCMSP
 	tristate "PMC MSP I2C TWI Controller"
-	depends on PMC_MSP
+	depends on PMC_MSP || COMPILE_TEST
 	help
 	  This driver supports the PMC TWI controller on MSP devices.
 
@@ -842,7 +846,7 @@
 
 config I2C_PNX
 	tristate "I2C bus support for Philips PNX and NXP LPC targets"
-	depends on ARCH_LPC32XX
+	depends on ARCH_LPC32XX || COMPILE_TEST
 	help
 	  This driver supports the Philips IP3204 I2C IP block master and/or
 	  slave controller
@@ -863,7 +867,7 @@
 
 config I2C_PXA
 	tristate "Intel PXA2XX I2C adapter"
-	depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF)
+	depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF) || COMPILE_TEST
 	help
 	  If you have devices in the PXA I2C bus, say yes to this option.
 	  This driver can also be built as a module.  If so, the module
@@ -932,11 +936,11 @@
 	  respective Kconfig file.
 
 config I2C_S3C2410
-	tristate "S3C2410 I2C Driver"
-	depends on HAVE_S3C2410_I2C
+	tristate "S3C/Exynos I2C Driver"
+	depends on HAVE_S3C2410_I2C || COMPILE_TEST
 	help
 	  Say Y here to include support for I2C controller in the
-	  Samsung SoCs.
+	  Samsung SoCs (S3C, S5Pv210, Exynos).
 
 config I2C_SH7760
 	tristate "Renesas SH7760 I2C Controller"
@@ -971,7 +975,7 @@
 
 config I2C_SIRF
 	tristate "CSR SiRFprimaII I2C interface"
-	depends on ARCH_SIRF
+	depends on ARCH_SIRF || COMPILE_TEST
 	help
 	  If you say yes to this option, support will be included for the
 	  CSR SiRFprimaII I2C interface.
@@ -981,14 +985,14 @@
 
 config I2C_SPRD
 	tristate "Spreadtrum I2C interface"
-	depends on I2C=y && ARCH_SPRD
+	depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
 	help
 	  If you say yes to this option, support will be included for the
 	  Spreadtrum I2C interface.
 
 config I2C_ST
 	tristate "STMicroelectronics SSC I2C support"
-	depends on ARCH_STI
+	depends on ARCH_STI || COMPILE_TEST
 	help
 	  Enable this option to add support for STMicroelectronics SoCs
 	  hardware SSC (Synchronous Serial Controller) as an I2C controller.
@@ -1019,7 +1023,7 @@
 
 config I2C_STU300
 	tristate "ST Microelectronics DDC I2C interface"
-	depends on MACH_U300
+	depends on MACH_U300 || COMPILE_TEST
 	default y if MACH_U300
 	help
 	  If you say yes to this option, support will be included for the
@@ -1055,15 +1059,16 @@
 
 config I2C_TEGRA
 	tristate "NVIDIA Tegra internal I2C controller"
-	depends on ARCH_TEGRA
+	depends on ARCH_TEGRA || (COMPILE_TEST && (ARC || ARM || ARM64 || M68K || RISCV || SUPERH || SPARC))
+	# COMPILE_TEST needs architectures with readsX()/writesX() primitives
 	help
 	  If you say yes to this option, support will be included for the
 	  I2C controller embedded in NVIDIA Tegra SOCs
 
 config I2C_TEGRA_BPMP
 	tristate "NVIDIA Tegra BPMP I2C controller"
-	depends on TEGRA_BPMP
-	default y
+	depends on TEGRA_BPMP || COMPILE_TEST
+	default y if TEGRA_BPMP
 	help
 	  If you say yes to this option, support will be included for the I2C
 	  controller embedded in NVIDIA Tegra SoCs accessed via the BPMP.
@@ -1101,7 +1106,7 @@
 
 config I2C_WMT
 	tristate "Wondermedia WM8xxx SoC I2C bus support"
-	depends on ARCH_VT8500
+	depends on ARCH_VT8500 || COMPILE_TEST
 	help
 	  Say yes if you want to support the I2C bus on Wondermedia 8xxx-series
 	  SoCs.
@@ -1142,7 +1147,7 @@
 
 config I2C_XLR
 	tristate "Netlogic XLR and Sigma Designs I2C support"
-	depends on CPU_XLR || ARCH_TANGO
+	depends on CPU_XLR || ARCH_TANGO || COMPILE_TEST
 	help
 	  This driver enables support for the on-chip I2C interface of
 	  the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
@@ -1202,46 +1207,12 @@
 	  This supports parallel port I2C adapters such as the ones made by
 	  Philips or Velleman, Analog Devices evaluation boards, and more.
 	  Basically any adapter using the parallel port as an I2C bus with
-	  no extra chipset is supported by this driver, or could be.
-
-	  This driver is a replacement for (and was inspired by) an older
-	  driver named i2c-philips-par.  The new driver supports more devices,
-	  and makes it easier to add support for new devices.
-
-	  An adapter type parameter is now mandatory.  Please read the file
-	  Documentation/i2c/busses/i2c-parport.rst for details.
-
-	  Another driver exists, named i2c-parport-light, which doesn't depend
-	  on the parport driver.  This is meant for embedded systems. Don't say
-	  Y here if you intend to say Y or M there.
+	  no extra chipset is supported by this driver, or could be. Please
+	  read the file Documentation/i2c/busses/i2c-parport.rst for details.
 
 	  This support is also available as a module.  If so, the module
 	  will be called i2c-parport.
 
-config I2C_PARPORT_LIGHT
-	tristate "Parallel port adapter (light)"
-	select I2C_ALGOBIT
-	select I2C_SMBUS
-	help
-	  This supports parallel port I2C adapters such as the ones made by
-	  Philips or Velleman, Analog Devices evaluation boards, and more.
-	  Basically any adapter using the parallel port as an I2C bus with
-	  no extra chipset is supported by this driver, or could be.
-
-	  This driver is a light version of i2c-parport.  It doesn't depend
-	  on the parport driver, and uses direct I/O access instead.  This
-	  might be preferred on embedded systems where wasting memory for
-	  the clean but heavy parport handling is not an option.  The
-	  drawback is a reduced portability and the impossibility to
-	  daisy-chain other parallel port devices.
-
-	  Don't say Y here if you said Y or M to i2c-parport.  Saying M to
-	  both is possible but both modules should not be loaded at the same
-	  time.
-
-	  This support is also available as a module.  If so, the module
-	  will be called i2c-parport-light.
-
 config I2C_ROBOTFUZZ_OSIF
 	tristate "RobotFuzz Open Source InterFace USB adapter"
 	depends on USB
@@ -1328,7 +1299,7 @@
 
 config I2C_MLXCPLD
 	tristate "Mellanox I2C driver"
-	depends on X86_64
+	depends on X86_64 || COMPILE_TEST
 	help
 	  This exposes the Mellanox platform I2C busses to the linux I2C layer
 	  for X86 based systems.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3ab8aeb..25d6088 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -128,7 +128,6 @@
 obj-$(CONFIG_I2C_DIOLAN_U2C)	+= i2c-diolan-u2c.o
 obj-$(CONFIG_I2C_DLN2)		+= i2c-dln2.o
 obj-$(CONFIG_I2C_PARPORT)	+= i2c-parport.o
-obj-$(CONFIG_I2C_PARPORT_LIGHT)	+= i2c-parport-light.o
 obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF)	+= i2c-robotfuzz-osif.o
 obj-$(CONFIG_I2C_TAOS_EVM)	+= i2c-taos-evm.o
 obj-$(CONFIG_I2C_TINY_USB)	+= i2c-tiny-usb.o
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 5137e62..3da1a8a 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -66,55 +66,26 @@ static struct at91_twi_pdata at91rm9200_config = {
 	.clk_max_div = 5,
 	.clk_offset = 3,
 	.has_unre_flag = true,
-	.has_alt_cmd = false,
-	.has_hold_field = false,
-	.has_dig_filtr = false,
-	.has_adv_dig_filtr = false,
-	.has_ana_filtr = false,
 };
 
 static struct at91_twi_pdata at91sam9261_config = {
 	.clk_max_div = 5,
 	.clk_offset = 4,
-	.has_unre_flag = false,
-	.has_alt_cmd = false,
-	.has_hold_field = false,
-	.has_dig_filtr = false,
-	.has_adv_dig_filtr = false,
-	.has_ana_filtr = false,
 };
 
 static struct at91_twi_pdata at91sam9260_config = {
 	.clk_max_div = 7,
 	.clk_offset = 4,
-	.has_unre_flag = false,
-	.has_alt_cmd = false,
-	.has_hold_field = false,
-	.has_dig_filtr = false,
-	.has_adv_dig_filtr = false,
-	.has_ana_filtr = false,
 };
 
 static struct at91_twi_pdata at91sam9g20_config = {
 	.clk_max_div = 7,
 	.clk_offset = 4,
-	.has_unre_flag = false,
-	.has_alt_cmd = false,
-	.has_hold_field = false,
-	.has_dig_filtr = false,
-	.has_adv_dig_filtr = false,
-	.has_ana_filtr = false,
 };
 
 static struct at91_twi_pdata at91sam9g10_config = {
 	.clk_max_div = 7,
 	.clk_offset = 4,
-	.has_unre_flag = false,
-	.has_alt_cmd = false,
-	.has_hold_field = false,
-	.has_dig_filtr = false,
-	.has_adv_dig_filtr = false,
-	.has_ana_filtr = false,
 };
 
 static const struct platform_device_id at91_twi_devtypes[] = {
@@ -142,23 +113,13 @@ static const struct platform_device_id at91_twi_devtypes[] = {
 static struct at91_twi_pdata at91sam9x5_config = {
 	.clk_max_div = 7,
 	.clk_offset = 4,
-	.has_unre_flag = false,
-	.has_alt_cmd = false,
-	.has_hold_field = false,
-	.has_dig_filtr = false,
-	.has_adv_dig_filtr = false,
-	.has_ana_filtr = false,
 };
 
 static struct at91_twi_pdata sama5d4_config = {
 	.clk_max_div = 7,
 	.clk_offset = 4,
-	.has_unre_flag = false,
-	.has_alt_cmd = false,
 	.has_hold_field = true,
 	.has_dig_filtr = true,
-	.has_adv_dig_filtr = false,
-	.has_ana_filtr = false,
 };
 
 static struct at91_twi_pdata sama5d2_config = {
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 9d71ce1..1105aee 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -208,6 +208,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
 
 	isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
 	cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+	id->err_status = 0;
 
 	/* Handling nack and arbitration lost interrupt */
 	if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
@@ -241,10 +242,17 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
 			    !id->bus_hold_flag)
 				cdns_i2c_clear_bus_hold(id);
 
-			*(id->p_recv_buf)++ =
-				cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
-			id->recv_count--;
-			id->curr_recv_count--;
+			if (id->recv_count > 0) {
+				*(id->p_recv_buf)++ =
+					cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+				id->recv_count--;
+				id->curr_recv_count--;
+			} else {
+				dev_err(id->adap.dev.parent,
+					"xfer_size reg rollover. xfer aborted!\n");
+				id->err_status |= CDNS_I2C_IXR_TO;
+				break;
+			}
 
 			if (cdns_is_holdquirk(id, hold_quirk))
 				break;
@@ -342,7 +350,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
 	}
 
 	/* Update the status for errors */
-	id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
+	id->err_status |= isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
 	if (id->err_status)
 		status = IRQ_HANDLED;
 
@@ -500,7 +508,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
 	cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET);
 	/* Update the transfercount register to zero */
 	cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET);
-	/* Clear the interupt status register */
+	/* Clear the interrupt status register */
 	regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
 	cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET);
 	/* Clear the status register */
@@ -921,17 +929,18 @@ static int cdns_i2c_probe(struct platform_device *pdev)
 
 	id->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(id->clk)) {
-		dev_err(&pdev->dev, "input clock not found.\n");
+		if (PTR_ERR(id->clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "input clock not found.\n");
 		return PTR_ERR(id->clk);
 	}
 	ret = clk_prepare_enable(id->clk);
 	if (ret)
 		dev_err(&pdev->dev, "Unable to enable clock.\n");
 
-	pm_runtime_enable(id->dev);
 	pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT);
 	pm_runtime_use_autosuspend(id->dev);
 	pm_runtime_set_active(id->dev);
+	pm_runtime_enable(id->dev);
 
 	id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb;
 	if (clk_notifier_register(id->clk, &id->clk_rate_change_nb))
@@ -980,8 +989,8 @@ static int cdns_i2c_probe(struct platform_device *pdev)
 
 err_clk_dis:
 	clk_disable_unprepare(id->clk);
-	pm_runtime_set_suspended(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
 	return ret;
 }
 
@@ -997,10 +1006,13 @@ static int cdns_i2c_remove(struct platform_device *pdev)
 {
 	struct cdns_i2c *id = platform_get_drvdata(pdev);
 
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+
 	i2c_del_adapter(&id->adap);
 	clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
 	clk_disable_unprepare(id->clk);
-	pm_runtime_disable(&pdev->dev);
 
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index b8fde61..35e55fe 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -388,9 +388,9 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
 	 */
 	if (acpi_dev_present("INT33FE", NULL, -1)) {
 		board_info.irq = adap->client_irq;
-		adap->client = i2c_new_device(&adap->adapter, &board_info);
-		if (!adap->client) {
-			ret = -ENOMEM;
+		adap->client = i2c_new_client_device(&adap->adapter, &board_info);
+		if (IS_ERR(adap->client)) {
+			ret = PTR_ERR(adap->client);
 			goto del_adapter;
 		}
 	}
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 958161c..790ea3f 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -273,6 +273,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
 	bus->adap.dev.parent = &pdev->dev;
 	bus->adap.dev.of_node = pdev->dev.of_node;
 	bus->adap.retries = I2C_MAX_RETRIES;
+	ACPI_COMPANION_SET(&bus->adap.dev, ACPI_COMPANION(&pdev->dev));
 
 	err = i2c_add_adapter(&bus->adap);
 	if (err)
@@ -298,7 +299,7 @@ static const struct of_device_id cros_ec_i2c_of_match[] = {
 MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
 
 static const struct acpi_device_id cros_ec_i2c_tunnel_acpi_id[] = {
-	{ "GOOG001A", 0 },
+	{ "GOOG0012", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 16dd338..3b7d58c 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -130,6 +130,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
 	{ "APMC0D0F", 0 },
 	{ "HISI02A1", 0 },
 	{ "HISI02A2", 0 },
+	{ "HISI02A3", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index abfe309..803dad7 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -322,7 +322,7 @@ static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
 		tmp |= (SMMR_MODE0 | SMMR_MODE1);
 		break;
 	default:
-		dev_err(dev->dev, "unsupported xfer size %d\n", dev->buf_len);
+		dev_err(dev->dev, "unsupported xfer size %zu\n", dev->buf_len);
 		return -EINVAL;
 	}
 
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f5e69fe..ca4f096 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -68,6 +68,7 @@
  * Elkhart Lake (PCH)		0x4b23	32	hard	yes	yes	yes
  * Tiger Lake-LP (PCH)		0xa0a3	32	hard	yes	yes	yes
  * Jasper Lake (SOC)		0x4da3	32	hard	yes	yes	yes
+ * Comet Lake-V (PCH)		0xa3a3	32	hard	yes	yes	yes
  *
  * Features supported by this driver:
  * Software PEC				no
@@ -244,6 +245,7 @@
 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS	0xa223
 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS	0xa2a3
 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS		0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS		0xa3a3
 
 struct i801_mux_config {
 	char *gpio_chip;
@@ -1074,6 +1076,7 @@ static const struct pci_device_id i801_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
@@ -1142,7 +1145,7 @@ static void dmi_check_onboard_device(u8 type, const char *name,
 		memset(&info, 0, sizeof(struct i2c_board_info));
 		info.addr = dmi_devices[i].i2c_addr;
 		strlcpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
-		i2c_new_device(adap, &info);
+		i2c_new_client_device(adap, &info);
 		break;
 	}
 }
@@ -1296,7 +1299,7 @@ static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
 	memset(&info, 0, sizeof(struct i2c_board_info));
 	info.addr = dell_lis3lv02d_devices[i].i2c_addr;
 	strlcpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
-	i2c_new_device(&priv->adapter, &info);
+	i2c_new_client_device(&priv->adapter, &info);
 }
 
 /* Register optional slaves */
@@ -1312,7 +1315,7 @@ static void i801_probe_optional_slaves(struct i801_priv *priv)
 		memset(&info, 0, sizeof(struct i2c_board_info));
 		info.addr = apanel_addr;
 		strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
-		i2c_new_device(&priv->adapter, &info);
+		i2c_new_client_device(&priv->adapter, &info);
 	}
 
 	if (dmi_name_in_vendors("FUJITSU"))
@@ -1742,6 +1745,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
 	case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
 	case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
+	case PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS:
 		priv->features |= FEATURE_BLOCK_PROC;
 		priv->features |= FEATURE_I2C_BLOCK_READ;
 		priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 25dcd73..16a67a6 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -4,6 +4,7 @@
  *
  * Copyright (C) 2006 - 2009 Ingenic Semiconductor Inc.
  * Copyright (C) 2015 Imagination Technologies
+ * Copyright (C) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
  */
 
 #include <linux/bitops.h>
@@ -17,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -55,6 +57,7 @@
 #define JZ4780_I2C_ACKGC	0x98
 #define JZ4780_I2C_ENSTA	0x9C
 #define JZ4780_I2C_SDAHD	0xD0
+#define X1000_I2C_SDAHD		0x7C
 
 #define JZ4780_I2C_CTRL_STPHLD		BIT(7)
 #define JZ4780_I2C_CTRL_SLVDIS		BIT(6)
@@ -73,6 +76,8 @@
 #define JZ4780_I2C_STA_TFNF		BIT(1)
 #define JZ4780_I2C_STA_ACT		BIT(0)
 
+#define X1000_I2C_DC_STOP		BIT(9)
+
 static const char * const jz4780_i2c_abrt_src[] = {
 	"ABRT_7B_ADDR_NOACK",
 	"ABRT_10ADDR1_NOACK",
@@ -130,18 +135,33 @@ static const char * const jz4780_i2c_abrt_src[] = {
 #define JZ4780_I2CFLCNT_ADJUST(n)	(((n) - 1) < 8 ? 8 : ((n) - 1))
 
 #define JZ4780_I2C_FIFO_LEN	16
-#define TX_LEVEL		3
-#define RX_LEVEL		(JZ4780_I2C_FIFO_LEN - TX_LEVEL - 1)
+
+#define X1000_I2C_FIFO_LEN	64
 
 #define JZ4780_I2C_TIMEOUT	300
 
 #define BUFSIZE 200
 
+enum ingenic_i2c_version {
+	ID_JZ4780,
+	ID_X1000,
+};
+
+/* ingenic_i2c_config: SoC specific config data. */
+struct ingenic_i2c_config {
+	enum ingenic_i2c_version version;
+
+	int fifosize;
+	int tx_level;
+	int rx_level;
+};
+
 struct jz4780_i2c {
 	void __iomem		*iomem;
 	int			 irq;
 	struct clk		*clk;
 	struct i2c_adapter	 adap;
+	const struct ingenic_i2c_config *cdata;
 
 	/* lock to protect rbuf and wbuf between xfer_rd/wr and irq handler */
 	spinlock_t		lock;
@@ -340,11 +360,18 @@ static int jz4780_i2c_set_speed(struct jz4780_i2c *i2c)
 
 	if (hold_time >= 0) {
 		/*i2c hold time enable */
-		hold_time |= JZ4780_I2C_SDAHD_HDENB;
-		jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, hold_time);
+		if (i2c->cdata->version >= ID_X1000) {
+			jz4780_i2c_writew(i2c, X1000_I2C_SDAHD, hold_time);
+		} else {
+			hold_time |= JZ4780_I2C_SDAHD_HDENB;
+			jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, hold_time);
+		}
 	} else {
 		/* disable hold time */
-		jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, 0);
+		if (i2c->cdata->version >= ID_X1000)
+			jz4780_i2c_writew(i2c, X1000_I2C_SDAHD, 0);
+		else
+			jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, 0);
 	}
 
 	return 0;
@@ -359,9 +386,11 @@ static int jz4780_i2c_cleanup(struct jz4780_i2c *i2c)
 	spin_lock_irqsave(&i2c->lock, flags);
 
 	/* can send stop now if need */
-	tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
-	tmp &= ~JZ4780_I2C_CTRL_STPHLD;
-	jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+	if (i2c->cdata->version < ID_X1000) {
+		tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+		tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+		jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+	}
 
 	/* disable all interrupts first */
 	jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0);
@@ -399,11 +428,19 @@ static int jz4780_i2c_prepare(struct jz4780_i2c *i2c)
 	return jz4780_i2c_enable(i2c);
 }
 
-static void jz4780_i2c_send_rcmd(struct jz4780_i2c *i2c, int cmd_count)
+static void jz4780_i2c_send_rcmd(struct jz4780_i2c *i2c,
+								 int cmd_count,
+								 int cmd_left)
 {
 	int i;
 
-	for (i = 0; i < cmd_count; i++)
+	for (i = 0; i < cmd_count - 1; i++)
+		jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ);
+
+	if ((cmd_left == 0) && (i2c->cdata->version >= ID_X1000))
+		jz4780_i2c_writew(i2c, JZ4780_I2C_DC,
+				JZ4780_I2C_DC_READ | X1000_I2C_DC_STOP);
+	else
 		jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ);
 }
 
@@ -458,37 +495,44 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
 
 		rd_left = i2c->rd_total_len - i2c->rd_data_xfered;
 
-		if (rd_left <= JZ4780_I2C_FIFO_LEN)
+		if (rd_left <= i2c->cdata->fifosize)
 			jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, rd_left - 1);
 	}
 
 	if (intst & JZ4780_I2C_INTST_TXEMP) {
 		if (i2c->is_write == 0) {
 			int cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered;
-			int max_send = (JZ4780_I2C_FIFO_LEN - 1)
+			int max_send = (i2c->cdata->fifosize - 1)
 					 - (i2c->rd_cmd_xfered
 					 - i2c->rd_data_xfered);
 			int cmd_to_send = min(cmd_left, max_send);
 
 			if (i2c->rd_cmd_xfered != 0)
 				cmd_to_send = min(cmd_to_send,
-						  JZ4780_I2C_FIFO_LEN
-						  - TX_LEVEL - 1);
+						  i2c->cdata->fifosize
+						  - i2c->cdata->tx_level - 1);
 
 			if (cmd_to_send) {
-				jz4780_i2c_send_rcmd(i2c, cmd_to_send);
 				i2c->rd_cmd_xfered += cmd_to_send;
+				cmd_left = i2c->rd_total_len -
+						i2c->rd_cmd_xfered;
+				jz4780_i2c_send_rcmd(i2c,
+						cmd_to_send, cmd_left);
+
 			}
 
-			cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered;
 			if (cmd_left == 0) {
 				intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM);
 				intmsk &= ~JZ4780_I2C_INTM_MTXEMP;
 				jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, intmsk);
 
-				tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
-				tmp &= ~JZ4780_I2C_CTRL_STPHLD;
-				jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+				if (i2c->cdata->version < ID_X1000) {
+					tmp = jz4780_i2c_readw(i2c,
+							JZ4780_I2C_CTRL);
+					tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+					jz4780_i2c_writew(i2c,
+							JZ4780_I2C_CTRL, tmp);
+				}
 			}
 		} else {
 			unsigned short data;
@@ -497,23 +541,26 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
 			i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
 
 			while ((i2c_sta & JZ4780_I2C_STA_TFNF) &&
-			       (i2c->wt_len > 0)) {
+					(i2c->wt_len > 0)) {
 				i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
 				data = *i2c->wbuf;
 				data &= ~JZ4780_I2C_DC_READ;
-				jz4780_i2c_writew(i2c, JZ4780_I2C_DC,
-						  data);
+				if ((!i2c->stop_hold) && (i2c->cdata->version >=
+						ID_X1000))
+					data |= X1000_I2C_DC_STOP;
+				jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
 				i2c->wbuf++;
 				i2c->wt_len--;
 			}
 
 			if (i2c->wt_len == 0) {
-				if (!i2c->stop_hold) {
+				if ((!i2c->stop_hold) && (i2c->cdata->version <
+						ID_X1000)) {
 					tmp = jz4780_i2c_readw(i2c,
-							       JZ4780_I2C_CTRL);
+							JZ4780_I2C_CTRL);
 					tmp &= ~JZ4780_I2C_CTRL_STPHLD;
-					jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL,
-							  tmp);
+					jz4780_i2c_writew(i2c,
+							JZ4780_I2C_CTRL, tmp);
 				}
 
 				jz4780_i2c_trans_done(i2c);
@@ -567,20 +614,22 @@ static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
 	i2c->rd_data_xfered = 0;
 	i2c->rd_cmd_xfered = 0;
 
-	if (len <= JZ4780_I2C_FIFO_LEN)
+	if (len <= i2c->cdata->fifosize)
 		jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, len - 1);
 	else
-		jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, RX_LEVEL);
+		jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, i2c->cdata->rx_level);
 
-	jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, TX_LEVEL);
+	jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, i2c->cdata->tx_level);
 
 	jz4780_i2c_writew(i2c, JZ4780_I2C_INTM,
 			  JZ4780_I2C_INTM_MRXFL | JZ4780_I2C_INTM_MTXEMP
 			  | JZ4780_I2C_INTM_MTXABT | JZ4780_I2C_INTM_MRXOF);
 
-	tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
-	tmp |= JZ4780_I2C_CTRL_STPHLD;
-	jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+	if (i2c->cdata->version < ID_X1000) {
+		tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+		tmp |= JZ4780_I2C_CTRL_STPHLD;
+		jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+	}
 
 	spin_unlock_irqrestore(&i2c->lock, flags);
 
@@ -626,14 +675,16 @@ static inline int jz4780_i2c_xfer_write(struct jz4780_i2c *i2c,
 	i2c->wbuf = buf;
 	i2c->wt_len = len;
 
-	jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, TX_LEVEL);
+	jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, i2c->cdata->tx_level);
 
 	jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, JZ4780_I2C_INTM_MTXEMP
 					| JZ4780_I2C_INTM_MTXABT);
 
-	tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
-	tmp |= JZ4780_I2C_CTRL_STPHLD;
-	jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+	if (i2c->cdata->version < ID_X1000) {
+		tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+		tmp |= JZ4780_I2C_CTRL_STPHLD;
+		jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+	}
 
 	spin_unlock_irqrestore(&i2c->lock, flags);
 
@@ -716,8 +767,25 @@ static const struct i2c_algorithm jz4780_i2c_algorithm = {
 	.functionality	= jz4780_i2c_functionality,
 };
 
+static const struct ingenic_i2c_config jz4780_i2c_config = {
+	.version = ID_JZ4780,
+
+	.fifosize = JZ4780_I2C_FIFO_LEN,
+	.tx_level = JZ4780_I2C_FIFO_LEN / 2,
+	.rx_level = JZ4780_I2C_FIFO_LEN / 2 - 1,
+};
+
+static const struct ingenic_i2c_config x1000_i2c_config = {
+	.version = ID_X1000,
+
+	.fifosize = X1000_I2C_FIFO_LEN,
+	.tx_level = X1000_I2C_FIFO_LEN / 2,
+	.rx_level = X1000_I2C_FIFO_LEN / 2 - 1,
+};
+
 static const struct of_device_id jz4780_i2c_of_matches[] = {
-	{ .compatible = "ingenic,jz4780-i2c", },
+	{ .compatible = "ingenic,jz4780-i2c", .data = &jz4780_i2c_config },
+	{ .compatible = "ingenic,x1000-i2c", .data = &x1000_i2c_config },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
@@ -734,6 +802,12 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
 	if (!i2c)
 		return -ENOMEM;
 
+	i2c->cdata = device_get_match_data(&pdev->dev);
+	if (!i2c->cdata) {
+		dev_err(&pdev->dev, "Error: No device match found\n");
+		return -ENODEV;
+	}
+
 	i2c->adap.owner		= THIS_MODULE;
 	i2c->adap.algo		= &jz4780_i2c_algorithm;
 	i2c->adap.algo_data	= i2c;
@@ -777,9 +851,11 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
 
 	dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
 
-	tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
-	tmp &= ~JZ4780_I2C_CTRL_STPHLD;
-	jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+	if (i2c->cdata->version < ID_X1000) {
+		tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+		tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+		jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+	}
 
 	jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
 
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 1e2647f..06b3bed 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -10,6 +10,7 @@
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -213,6 +214,30 @@ static void meson_i2c_prepare_xfer(struct meson_i2c *i2c)
 	writel(i2c->tokens[1], i2c->regs + REG_TOK_LIST1);
 }
 
+static void meson_i2c_transfer_complete(struct meson_i2c *i2c, u32 ctrl)
+{
+	if (ctrl & REG_CTRL_ERROR) {
+		/*
+		 * The bit is set when the IGNORE_NAK bit is cleared
+		 * and the device didn't respond. In this case, the
+		 * I2C controller automatically generates a STOP
+		 * condition.
+		 */
+		dev_dbg(i2c->dev, "error bit set\n");
+		i2c->error = -ENXIO;
+		i2c->state = STATE_IDLE;
+	} else {
+		if (i2c->state == STATE_READ && i2c->count)
+			meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos,
+					   i2c->count);
+
+		i2c->pos += i2c->count;
+
+		if (i2c->pos >= i2c->msg->len)
+			i2c->state = STATE_IDLE;
+	}
+}
+
 static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
 {
 	struct meson_i2c *i2c = dev_id;
@@ -232,27 +257,9 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
 		return IRQ_NONE;
 	}
 
-	if (ctrl & REG_CTRL_ERROR) {
-		/*
-		 * The bit is set when the IGNORE_NAK bit is cleared
-		 * and the device didn't respond. In this case, the
-		 * I2C controller automatically generates a STOP
-		 * condition.
-		 */
-		dev_dbg(i2c->dev, "error bit set\n");
-		i2c->error = -ENXIO;
-		i2c->state = STATE_IDLE;
-		complete(&i2c->done);
-		goto out;
-	}
+	meson_i2c_transfer_complete(i2c, ctrl);
 
-	if (i2c->state == STATE_READ && i2c->count)
-		meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos, i2c->count);
-
-	i2c->pos += i2c->count;
-
-	if (i2c->pos >= i2c->msg->len) {
-		i2c->state = STATE_IDLE;
+	if (i2c->state == STATE_IDLE) {
 		complete(&i2c->done);
 		goto out;
 	}
@@ -279,10 +286,11 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg)
 }
 
 static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
-			      int last)
+			      int last, bool atomic)
 {
 	unsigned long time_left, flags;
 	int ret = 0;
+	u32 ctrl;
 
 	i2c->msg = msg;
 	i2c->last = last;
@@ -300,13 +308,24 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
 
 	i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE;
 	meson_i2c_prepare_xfer(i2c);
-	reinit_completion(&i2c->done);
+
+	if (!atomic)
+		reinit_completion(&i2c->done);
 
 	/* Start the transfer */
 	meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, REG_CTRL_START);
 
-	time_left = msecs_to_jiffies(I2C_TIMEOUT_MS);
-	time_left = wait_for_completion_timeout(&i2c->done, time_left);
+	if (atomic) {
+		ret = readl_poll_timeout_atomic(i2c->regs + REG_CTRL, ctrl,
+						!(ctrl & REG_CTRL_STATUS),
+						10, I2C_TIMEOUT_MS * 1000);
+	} else {
+		time_left = msecs_to_jiffies(I2C_TIMEOUT_MS);
+		time_left = wait_for_completion_timeout(&i2c->done, time_left);
+
+		if (!time_left)
+			ret = -ETIMEDOUT;
+	}
 
 	/*
 	 * Protect access to i2c struct and registers from interrupt
@@ -315,13 +334,14 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
 	 */
 	spin_lock_irqsave(&i2c->lock, flags);
 
+	if (atomic && !ret)
+		meson_i2c_transfer_complete(i2c, ctrl);
+
 	/* Abort any active operation */
 	meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0);
 
-	if (!time_left) {
+	if (ret)
 		i2c->state = STATE_IDLE;
-		ret = -ETIMEDOUT;
-	}
 
 	if (i2c->error)
 		ret = i2c->error;
@@ -331,8 +351,8 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
 	return ret;
 }
 
-static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
-			  int num)
+static int meson_i2c_xfer_messages(struct i2c_adapter *adap,
+				   struct i2c_msg *msgs, int num, bool atomic)
 {
 	struct meson_i2c *i2c = adap->algo_data;
 	int i, ret = 0;
@@ -340,7 +360,7 @@ static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
 	clk_enable(i2c->clk);
 
 	for (i = 0; i < num; i++) {
-		ret = meson_i2c_xfer_msg(i2c, msgs + i, i == num - 1);
+		ret = meson_i2c_xfer_msg(i2c, msgs + i, i == num - 1, atomic);
 		if (ret)
 			break;
 	}
@@ -350,14 +370,27 @@ static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
 	return ret ?: i;
 }
 
+static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+			  int num)
+{
+	return meson_i2c_xfer_messages(adap, msgs, num, false);
+}
+
+static int meson_i2c_xfer_atomic(struct i2c_adapter *adap,
+				 struct i2c_msg *msgs, int num)
+{
+	return meson_i2c_xfer_messages(adap, msgs, num, true);
+}
+
 static u32 meson_i2c_func(struct i2c_adapter *adap)
 {
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 static const struct i2c_algorithm meson_i2c_algorithm = {
-	.master_xfer	= meson_i2c_xfer,
-	.functionality	= meson_i2c_func,
+	.master_xfer = meson_i2c_xfer,
+	.master_xfer_atomic = meson_i2c_xfer_atomic,
+	.functionality = meson_i2c_func,
 };
 
 static int meson_i2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index a5a95ea..febb7c7 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -901,14 +901,13 @@ mv64xxx_i2c_probe(struct platform_device *pd)
 
 	/* Not all platforms have clocks */
 	drv_data->clk = devm_clk_get(&pd->dev, NULL);
-	if (IS_ERR(drv_data->clk) && PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
+	if (PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 	if (!IS_ERR(drv_data->clk))
 		clk_prepare_enable(drv_data->clk);
 
 	drv_data->reg_clk = devm_clk_get(&pd->dev, "reg");
-	if (IS_ERR(drv_data->reg_clk) &&
-	    PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
+	if (PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 	if (!IS_ERR(drv_data->reg_clk))
 		clk_prepare_enable(drv_data->reg_clk);
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 5a1235f..62e18b4 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -280,9 +280,9 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
 	i2cd->gpu_ccgx_ucsi->addr = 0x8;
 	i2cd->gpu_ccgx_ucsi->irq = irq;
 	i2cd->gpu_ccgx_ucsi->properties = ccgx_props;
-	i2cd->ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
-	if (!i2cd->ccgx_client)
-		return -ENODEV;
+	i2cd->ccgx_client = i2c_new_client_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+	if (IS_ERR(i2cd->ccgx_client))
+		return PTR_ERR(i2cd->ccgx_client);
 
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ca8b3ec..f5fc75b 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -731,7 +731,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
 	/* add in known devices to the bus */
 	if (pdata) {
 		for (i = 0; i < pdata->num_devices; i++)
-			i2c_new_device(&i2c->adap, pdata->devices + i);
+			i2c_new_client_device(&i2c->adap, pdata->devices + i);
 	}
 
 	return 0;
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
deleted file mode 100644
index 00f6aaf..0000000
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* ------------------------------------------------------------------------ *
- * i2c-parport-light.c I2C bus over parallel port                           *
- * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
-
-   Based on older i2c-velleman.c driver
-   Copyright (C) 1995-2000 Simon G. Vogl
-   With some changes from:
-   Frodo Looijaard <frodol@dds.nl>
-   Kyösti Mälkki <kmalkki@cc.hut.fi>
-
- * ------------------------------------------------------------------------ */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/i2c-smbus.h>
-#include <linux/io.h>
-#include "i2c-parport.h"
-
-#define DEFAULT_BASE 0x378
-#define DRVNAME "i2c-parport-light"
-
-static struct platform_device *pdev;
-
-static u16 base;
-module_param_hw(base, ushort, ioport, 0);
-MODULE_PARM_DESC(base, "Base I/O address");
-
-static int irq;
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "IRQ (optional)");
-
-/* ----- Low-level parallel port access ----------------------------------- */
-
-static inline void port_write(unsigned char p, unsigned char d)
-{
-	outb(d, base+p);
-}
-
-static inline unsigned char port_read(unsigned char p)
-{
-	return inb(base+p);
-}
-
-/* ----- Unified line operation functions --------------------------------- */
-
-static inline void line_set(int state, const struct lineop *op)
-{
-	u8 oldval = port_read(op->port);
-
-	/* Touch only the bit(s) needed */
-	if ((op->inverted && !state) || (!op->inverted && state))
-		port_write(op->port, oldval | op->val);
-	else
-		port_write(op->port, oldval & ~op->val);
-}
-
-static inline int line_get(const struct lineop *op)
-{
-	u8 oldval = port_read(op->port);
-
-	return ((op->inverted && (oldval & op->val) != op->val)
-	    || (!op->inverted && (oldval & op->val) == op->val));
-}
-
-/* ----- I2C algorithm call-back functions and structures ----------------- */
-
-static void parport_setscl(void *data, int state)
-{
-	line_set(state, &adapter_parm[type].setscl);
-}
-
-static void parport_setsda(void *data, int state)
-{
-	line_set(state, &adapter_parm[type].setsda);
-}
-
-static int parport_getscl(void *data)
-{
-	return line_get(&adapter_parm[type].getscl);
-}
-
-static int parport_getsda(void *data)
-{
-	return line_get(&adapter_parm[type].getsda);
-}
-
-/* Encapsulate the functions above in the correct structure
-   Note that getscl will be set to NULL by the attaching code for adapters
-   that cannot read SCL back */
-static struct i2c_algo_bit_data parport_algo_data = {
-	.setsda		= parport_setsda,
-	.setscl		= parport_setscl,
-	.getsda		= parport_getsda,
-	.getscl		= parport_getscl,
-	.udelay		= 50,
-	.timeout	= HZ,
-};
-
-/* ----- Driver registration ---------------------------------------------- */
-
-static struct i2c_adapter parport_adapter = {
-	.owner		= THIS_MODULE,
-	.class		= I2C_CLASS_HWMON,
-	.algo_data	= &parport_algo_data,
-	.name		= "Parallel port adapter (light)",
-};
-
-/* SMBus alert support */
-static struct i2c_smbus_alert_setup alert_data = {
-};
-static struct i2c_client *ara;
-static struct lineop parport_ctrl_irq = {
-	.val		= (1 << 4),
-	.port		= PORT_CTRL,
-};
-
-static int i2c_parport_probe(struct platform_device *pdev)
-{
-	int err;
-
-	/* Reset hardware to a sane state (SCL and SDA high) */
-	parport_setsda(NULL, 1);
-	parport_setscl(NULL, 1);
-	/* Other init if needed (power on...) */
-	if (adapter_parm[type].init.val) {
-		line_set(1, &adapter_parm[type].init);
-		/* Give powered devices some time to settle */
-		msleep(100);
-	}
-
-	parport_adapter.dev.parent = &pdev->dev;
-	err = i2c_bit_add_bus(&parport_adapter);
-	if (err) {
-		dev_err(&pdev->dev, "Unable to register with I2C\n");
-		return err;
-	}
-
-	/* Setup SMBus alert if supported */
-	if (adapter_parm[type].smbus_alert && irq) {
-		alert_data.irq = irq;
-		ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data);
-		if (ara)
-			line_set(1, &parport_ctrl_irq);
-		else
-			dev_warn(&pdev->dev, "Failed to register ARA client\n");
-	}
-
-	return 0;
-}
-
-static int i2c_parport_remove(struct platform_device *pdev)
-{
-	if (ara) {
-		line_set(0, &parport_ctrl_irq);
-		i2c_unregister_device(ara);
-		ara = NULL;
-	}
-	i2c_del_adapter(&parport_adapter);
-
-	/* Un-init if needed (power off...) */
-	if (adapter_parm[type].init.val)
-		line_set(0, &adapter_parm[type].init);
-
-	return 0;
-}
-
-static struct platform_driver i2c_parport_driver = {
-	.driver = {
-		.name	= DRVNAME,
-	},
-	.probe		= i2c_parport_probe,
-	.remove		= i2c_parport_remove,
-};
-
-static int __init i2c_parport_device_add(u16 address)
-{
-	int err;
-
-	pdev = platform_device_alloc(DRVNAME, -1);
-	if (!pdev) {
-		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
-		goto exit;
-	}
-
-	err = platform_device_add(pdev);
-	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
-		goto exit_device_put;
-	}
-
-	return 0;
-
-exit_device_put:
-	platform_device_put(pdev);
-exit:
-	return err;
-}
-
-static int __init i2c_parport_init(void)
-{
-	int err;
-
-	if (type < 0) {
-		printk(KERN_ERR DRVNAME ": adapter type unspecified\n");
-		return -ENODEV;
-	}
-
-	if (type >= ARRAY_SIZE(adapter_parm)) {
-		printk(KERN_ERR DRVNAME ": invalid type (%d)\n", type);
-		return -ENODEV;
-	}
-
-	if (base == 0) {
-		pr_info(DRVNAME ": using default base 0x%x\n", DEFAULT_BASE);
-		base = DEFAULT_BASE;
-	}
-
-	if (!request_region(base, 3, DRVNAME))
-		return -EBUSY;
-
-	if (irq != 0)
-		pr_info(DRVNAME ": using irq %d\n", irq);
-
-	if (!adapter_parm[type].getscl.val)
-		parport_algo_data.getscl = NULL;
-
-	/* Sets global pdev as a side effect */
-	err = i2c_parport_device_add(base);
-	if (err)
-		goto exit_release;
-
-	err = platform_driver_register(&i2c_parport_driver);
-	if (err)
-		goto exit_device;
-
-	return 0;
-
-exit_device:
-	platform_device_unregister(pdev);
-exit_release:
-	release_region(base, 3);
-	return err;
-}
-
-static void __exit i2c_parport_exit(void)
-{
-	platform_driver_unregister(&i2c_parport_driver);
-	platform_device_unregister(pdev);
-	release_region(base, 3);
-}
-
-MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
-MODULE_DESCRIPTION("I2C bus over parallel port (light)");
-MODULE_LICENSE("GPL");
-
-module_init(i2c_parport_init);
-module_exit(i2c_parport_exit);
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index e8ed882..81eb441 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -25,7 +25,90 @@
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
-#include "i2c-parport.h"
+
+#define PORT_DATA	0
+#define PORT_STAT	1
+#define PORT_CTRL	2
+
+struct lineop {
+	u8 val;
+	u8 port;
+	u8 inverted;
+};
+
+struct adapter_parm {
+	struct lineop setsda;
+	struct lineop setscl;
+	struct lineop getsda;
+	struct lineop getscl;
+	struct lineop init;
+	unsigned int smbus_alert:1;
+};
+
+static const struct adapter_parm adapter_parm[] = {
+	/* type 0: Philips adapter */
+	{
+		.setsda	= { 0x80, PORT_DATA, 1 },
+		.setscl	= { 0x08, PORT_CTRL, 0 },
+		.getsda	= { 0x80, PORT_STAT, 0 },
+		.getscl	= { 0x08, PORT_STAT, 0 },
+	},
+	/* type 1: home brew teletext adapter */
+	{
+		.setsda	= { 0x02, PORT_DATA, 0 },
+		.setscl	= { 0x01, PORT_DATA, 0 },
+		.getsda	= { 0x80, PORT_STAT, 1 },
+	},
+	/* type 2: Velleman K8000 adapter */
+	{
+		.setsda	= { 0x02, PORT_CTRL, 1 },
+		.setscl	= { 0x08, PORT_CTRL, 1 },
+		.getsda	= { 0x10, PORT_STAT, 0 },
+	},
+	/* type 3: ELV adapter */
+	{
+		.setsda	= { 0x02, PORT_DATA, 1 },
+		.setscl	= { 0x01, PORT_DATA, 1 },
+		.getsda	= { 0x40, PORT_STAT, 1 },
+		.getscl	= { 0x08, PORT_STAT, 1 },
+	},
+	/* type 4: ADM1032 evaluation board */
+	{
+		.setsda	= { 0x02, PORT_DATA, 1 },
+		.setscl	= { 0x01, PORT_DATA, 1 },
+		.getsda	= { 0x10, PORT_STAT, 1 },
+		.init	= { 0xf0, PORT_DATA, 0 },
+		.smbus_alert = 1,
+	},
+	/* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
+	{
+		.setsda	= { 0x02, PORT_DATA, 1 },
+		.setscl	= { 0x01, PORT_DATA, 1 },
+		.getsda	= { 0x10, PORT_STAT, 1 },
+	},
+	/* type 6: Barco LPT->DVI (K5800236) adapter */
+	{
+		.setsda	= { 0x02, PORT_DATA, 1 },
+		.setscl	= { 0x01, PORT_DATA, 1 },
+		.getsda	= { 0x20, PORT_STAT, 0 },
+		.getscl	= { 0x40, PORT_STAT, 0 },
+		.init	= { 0xfc, PORT_DATA, 0 },
+	},
+	/* type 7: One For All JP1 parallel port adapter */
+	{
+		.setsda	= { 0x01, PORT_DATA, 0 },
+		.setscl	= { 0x02, PORT_DATA, 0 },
+		.getsda	= { 0x80, PORT_STAT, 1 },
+		.init	= { 0x04, PORT_DATA, 1 },
+	},
+	/* type 8: VCT-jig */
+	{
+		.setsda	= { 0x04, PORT_DATA, 1 },
+		.setscl	= { 0x01, PORT_DATA, 1 },
+		.getsda	= { 0x40, PORT_STAT, 0 },
+		.getscl	= { 0x80, PORT_STAT, 1 },
+	},
+};
 
 /* ----- Device list ------------------------------------------------------ */
 
@@ -40,9 +123,30 @@ struct i2c_par {
 
 static LIST_HEAD(adapter_list);
 static DEFINE_MUTEX(adapter_list_lock);
+
 #define MAX_DEVICE 4
 static int parport[MAX_DEVICE] = {0, -1, -1, -1};
+module_param_array(parport, int, NULL, 0);
+MODULE_PARM_DESC(parport,
+		 "List of parallel ports to bind to, by index.\n"
+		 " At most " __stringify(MAX_DEVICE) " devices are supported.\n"
+		 " Default is one device connected to parport0.\n"
+);
 
+static int type = -1;
+module_param(type, int, 0);
+MODULE_PARM_DESC(type,
+	"Type of adapter:\n"
+	" 0 = Philips adapter\n"
+	" 1 = home brew teletext adapter\n"
+	" 2 = Velleman K8000 adapter\n"
+	" 3 = ELV adapter\n"
+	" 4 = ADM1032 evaluation board\n"
+	" 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n"
+	" 6 = Barco LPT->DVI (K5800236) adapter\n"
+	" 7 = One For All JP1 parallel port adapter\n"
+	" 8 = VCT-jig\n"
+);
 
 /* ----- Low-level parallel port access ----------------------------------- */
 
@@ -311,12 +415,5 @@ MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I2C bus over parallel port");
 MODULE_LICENSE("GPL");
 
-module_param_array(parport, int, NULL, 0);
-MODULE_PARM_DESC(parport,
-		 "List of parallel ports to bind to, by index.\n"
-		 " Atmost " __stringify(MAX_DEVICE) " devices are supported.\n"
-		 " Default is one device connected to parport0.\n"
-);
-
 module_init(i2c_parport_init);
 module_exit(i2c_parport_exit);
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
deleted file mode 100644
index 3b32d92..0000000
--- a/drivers/i2c/busses/i2c-parport.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* ------------------------------------------------------------------------ *
- * i2c-parport.h I2C bus over parallel port                                 *
- * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
-
- * ------------------------------------------------------------------------ */
-
-#define PORT_DATA	0
-#define PORT_STAT	1
-#define PORT_CTRL	2
-
-struct lineop {
-	u8 val;
-	u8 port;
-	u8 inverted;
-};
-
-struct adapter_parm {
-	struct lineop setsda;
-	struct lineop setscl;
-	struct lineop getsda;
-	struct lineop getscl;
-	struct lineop init;
-	unsigned int smbus_alert:1;
-};
-
-static const struct adapter_parm adapter_parm[] = {
-	/* type 0: Philips adapter */
-	{
-		.setsda	= { 0x80, PORT_DATA, 1 },
-		.setscl	= { 0x08, PORT_CTRL, 0 },
-		.getsda	= { 0x80, PORT_STAT, 0 },
-		.getscl	= { 0x08, PORT_STAT, 0 },
-	},
-	/* type 1: home brew teletext adapter */
-	{
-		.setsda	= { 0x02, PORT_DATA, 0 },
-		.setscl	= { 0x01, PORT_DATA, 0 },
-		.getsda	= { 0x80, PORT_STAT, 1 },
-	},
-	/* type 2: Velleman K8000 adapter */
-	{
-		.setsda	= { 0x02, PORT_CTRL, 1 },
-		.setscl	= { 0x08, PORT_CTRL, 1 },
-		.getsda	= { 0x10, PORT_STAT, 0 },
-	},
-	/* type 3: ELV adapter */
-	{
-		.setsda	= { 0x02, PORT_DATA, 1 },
-		.setscl	= { 0x01, PORT_DATA, 1 },
-		.getsda	= { 0x40, PORT_STAT, 1 },
-		.getscl	= { 0x08, PORT_STAT, 1 },
-	},
-	/* type 4: ADM1032 evaluation board */
-	{
-		.setsda	= { 0x02, PORT_DATA, 1 },
-		.setscl	= { 0x01, PORT_DATA, 1 },
-		.getsda	= { 0x10, PORT_STAT, 1 },
-		.init	= { 0xf0, PORT_DATA, 0 },
-		.smbus_alert = 1,
-	},
-	/* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
-	{
-		.setsda	= { 0x02, PORT_DATA, 1 },
-		.setscl	= { 0x01, PORT_DATA, 1 },
-		.getsda	= { 0x10, PORT_STAT, 1 },
-	},
-	/* type 6: Barco LPT->DVI (K5800236) adapter */
-	{
-		.setsda	= { 0x02, PORT_DATA, 1 },
-		.setscl	= { 0x01, PORT_DATA, 1 },
-		.getsda	= { 0x20, PORT_STAT, 0 },
-		.getscl	= { 0x40, PORT_STAT, 0 },
-		.init	= { 0xfc, PORT_DATA, 0 },
-	},
-	/* type 7: One For All JP1 parallel port adapter */
-	{
-		.setsda	= { 0x01, PORT_DATA, 0 },
-		.setscl	= { 0x02, PORT_DATA, 0 },
-		.getsda	= { 0x80, PORT_STAT, 1 },
-		.init	= { 0x04, PORT_DATA, 1 },
-	},
-	/* type 8: VCT-jig */
-	{
-		.setsda	= { 0x04, PORT_DATA, 1 },
-		.setscl	= { 0x01, PORT_DATA, 1 },
-		.getsda	= { 0x40, PORT_STAT, 0 },
-		.getscl	= { 0x80, PORT_STAT, 1 },
-	},
-};
-
-static int type = -1;
-module_param(type, int, 0);
-MODULE_PARM_DESC(type,
-	"Type of adapter:\n"
-	" 0 = Philips adapter\n"
-	" 1 = home brew teletext adapter\n"
-	" 2 = Velleman K8000 adapter\n"
-	" 3 = ELV adapter\n"
-	" 4 = ADM1032 evaluation board\n"
-	" 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n"
-	" 6 = Barco LPT->DVI (K5800236) adapter\n"
-	" 7 = One For All JP1 parallel port adapter\n"
-	" 8 = VCT-jig\n"
-);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 4fde74e..5d89c7c 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -274,8 +274,8 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
 	if (!request_mem_region(res->start, resource_size(res),
 				pldev->name)) {
 		dev_err(&pldev->dev,
-			"Unable to get memory/io address region 0x%08x\n",
-			res->start);
+			"Unable to get memory/io address region %pap\n",
+			&res->start);
 		rc = -EBUSY;
 		goto ret_err;
 	}
@@ -285,7 +285,7 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
 						resource_size(res));
 	if (!pmcmsptwi_data.iobase) {
 		dev_err(&pldev->dev,
-			"Unable to ioremap address 0x%08x\n", res->start);
+			"Unable to ioremap address %pap\n", &res->start);
 		rc = -EIO;
 		goto ret_unreserve;
 	}
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 6e0e546..686c06f 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -734,8 +734,8 @@ static int i2c_pnx_probe(struct platform_device *pdev)
 	if (ret < 0)
 		goto out_clock;
 
-	dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
-		alg_data->adapter.name, res->start, alg_data->irq);
+	dev_dbg(&pdev->dev, "%s: Master at %pap, irq %d.\n",
+		alg_data->adapter.name, &res->start, alg_data->irq);
 
 	return 0;
 
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 504f5bf0..973e533 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -240,8 +240,8 @@ static void i2c_powermac_create_one(struct i2c_adapter *adap,
 
 	strncpy(info.type, type, sizeof(info.type));
 	info.addr = addr;
-	newdev = i2c_new_device(adap, &info);
-	if (!newdev)
+	newdev = i2c_new_client_device(adap, &info);
+	if (IS_ERR(newdev))
 		dev_err(&adap->dev,
 			"i2c-powermac: Failure to register missing %s\n",
 			type);
@@ -359,8 +359,8 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
 		info.irq = irq_of_parse_and_map(node, 0);
 		info.of_node = of_node_get(node);
 
-		newdev = i2c_new_device(adap, &info);
-		if (!newdev) {
+		newdev = i2c_new_client_device(adap, &info);
+		if (IS_ERR(newdev)) {
 			dev_err(&adap->dev, "i2c-powermac: Failure to register"
 				" %pOF\n", node);
 			of_node_put(node);
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index b2634af..5c3e8ac 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -169,6 +169,24 @@
 #define STM32F7_AUTOSUSPEND_DELAY		(HZ / 100)
 
 /**
+ * struct stm32f7_i2c_regs - i2c f7 registers backup
+ * @cr1: Control register 1
+ * @cr2: Control register 2
+ * @oar1: Own address 1 register
+ * @oar2: Own address 2 register
+ * @pecr: PEC register
+ * @tmgr: Timing register
+ */
+struct stm32f7_i2c_regs {
+	u32 cr1;
+	u32 cr2;
+	u32 oar1;
+	u32 oar2;
+	u32 pecr;
+	u32 tmgr;
+};
+
+/**
  * struct stm32f7_i2c_spec - private i2c specification timing
  * @rate: I2C bus speed (Hz)
  * @rate_min: 80% of I2C bus speed (Hz)
@@ -276,6 +294,7 @@ struct stm32f7_i2c_msg {
  * @timing: I2C computed timings
  * @slave: list of slave devices registered on the I2C bus
  * @slave_running: slave device currently used
+ * @backup_regs: backup of i2c controller registers (for suspend/resume)
  * @slave_dir: transfer direction for the current slave device
  * @master_mode: boolean to know in which mode the I2C is running (master or
  * slave)
@@ -298,6 +317,7 @@ struct stm32f7_i2c_dev {
 	struct stm32f7_i2c_timings timing;
 	struct i2c_client *slave[STM32F7_I2C_MAX_SLAVE];
 	struct i2c_client *slave_running;
+	struct stm32f7_i2c_regs backup_regs;
 	u32 slave_dir;
 	bool master_mode;
 	struct stm32_i2c_dma *dma;
@@ -2027,8 +2047,7 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int stm32f7_i2c_runtime_suspend(struct device *dev)
+static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev)
 {
 	struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
 
@@ -2038,7 +2057,7 @@ static int stm32f7_i2c_runtime_suspend(struct device *dev)
 	return 0;
 }
 
-static int stm32f7_i2c_runtime_resume(struct device *dev)
+static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev)
 {
 	struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
 	int ret;
@@ -2053,11 +2072,101 @@ static int stm32f7_i2c_runtime_resume(struct device *dev)
 
 	return 0;
 }
-#endif
+
+static int __maybe_unused
+stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
+{
+	int ret;
+	struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
+
+	ret = pm_runtime_get_sync(i2c_dev->dev);
+	if (ret < 0)
+		return ret;
+
+	backup_regs->cr1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR1);
+	backup_regs->cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2);
+	backup_regs->oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1);
+	backup_regs->oar2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR2);
+	backup_regs->pecr = readl_relaxed(i2c_dev->base + STM32F7_I2C_PECR);
+	backup_regs->tmgr = readl_relaxed(i2c_dev->base + STM32F7_I2C_TIMINGR);
+
+	pm_runtime_put_sync(i2c_dev->dev);
+
+	return ret;
+}
+
+static int __maybe_unused
+stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
+{
+	u32 cr1;
+	int ret;
+	struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
+
+	ret = pm_runtime_get_sync(i2c_dev->dev);
+	if (ret < 0)
+		return ret;
+
+	cr1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR1);
+	if (cr1 & STM32F7_I2C_CR1_PE)
+		stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
+				     STM32F7_I2C_CR1_PE);
+
+	writel_relaxed(backup_regs->tmgr, i2c_dev->base + STM32F7_I2C_TIMINGR);
+	writel_relaxed(backup_regs->cr1 & ~STM32F7_I2C_CR1_PE,
+		       i2c_dev->base + STM32F7_I2C_CR1);
+	if (backup_regs->cr1 & STM32F7_I2C_CR1_PE)
+		stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
+				     STM32F7_I2C_CR1_PE);
+	writel_relaxed(backup_regs->cr2, i2c_dev->base + STM32F7_I2C_CR2);
+	writel_relaxed(backup_regs->oar1, i2c_dev->base + STM32F7_I2C_OAR1);
+	writel_relaxed(backup_regs->oar2, i2c_dev->base + STM32F7_I2C_OAR2);
+	writel_relaxed(backup_regs->pecr, i2c_dev->base + STM32F7_I2C_PECR);
+
+	pm_runtime_put_sync(i2c_dev->dev);
+
+	return ret;
+}
+
+static int __maybe_unused stm32f7_i2c_suspend(struct device *dev)
+{
+	struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+	int ret;
+
+	i2c_mark_adapter_suspended(&i2c_dev->adap);
+	ret = stm32f7_i2c_regs_backup(i2c_dev);
+	if (ret < 0) {
+		i2c_mark_adapter_resumed(&i2c_dev->adap);
+		return ret;
+	}
+
+	pinctrl_pm_select_sleep_state(dev);
+	pm_runtime_force_suspend(dev);
+
+	return 0;
+}
+
+static int __maybe_unused stm32f7_i2c_resume(struct device *dev)
+{
+	struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pm_runtime_force_resume(dev);
+	if (ret < 0)
+		return ret;
+	pinctrl_pm_select_default_state(dev);
+
+	ret = stm32f7_i2c_regs_restore(i2c_dev);
+	if (ret < 0)
+		return ret;
+	i2c_mark_adapter_resumed(&i2c_dev->adap);
+
+	return 0;
+}
 
 static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
 	SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend,
 			   stm32f7_i2c_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(stm32f7_i2c_suspend, stm32f7_i2c_resume)
 };
 
 static const struct of_device_id stm32f7_i2c_match[] = {
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 8c3e2d4..42e0a53 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -444,7 +444,7 @@ static int stu300_wait_while_busy(struct stu300_dev *dev)
 		       "Attempt: %d\n", i+1);
 
 		dev_err(&dev->pdev->dev, "base address = "
-			"0x%08x, reinit hardware\n", (u32) dev->virtbase);
+			"0x%p, reinit hardware\n", dev->virtbase);
 
 		(void) stu300_init_hw(dev);
 	}
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 39762f0..8602679 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -553,7 +553,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
 				 &i2c->pclkrate);
 
 	i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
-	if (IS_ERR(i2c->pclk) && PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
+	if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 	if (!IS_ERR_OR_NULL(i2c->pclk)) {
 		dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 0bff3f3..b4050f5 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -49,10 +49,10 @@ static struct i2c_client *taos_instantiate_device(struct i2c_adapter *adapter)
 	if (!strncmp(adapter->name, "TAOS TSL2550 EVM", 16)) {
 		dev_info(&adapter->dev, "Instantiating device %s at 0x%02x\n",
 			tsl2550_info.type, tsl2550_info.addr);
-		return i2c_new_device(adapter, &tsl2550_info);
+		return i2c_new_client_device(adapter, &tsl2550_info);
 	}
 
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 
 static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 61339c6..cbc2ad4 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -16,7 +16,9 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
+#include <linux/irq.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/consumer.h>
@@ -129,11 +131,12 @@
 #define I2C_PACKET_HEADER_SIZE			12
 
 /*
- * Upto I2C_PIO_MODE_MAX_LEN bytes, controller will use PIO mode,
- * above this, controller will use DMA to fill FIFO.
- * MAX PIO len is 20 bytes excluding packet header.
+ * I2C Controller will use PIO mode for transfers up to 32 bytes in order to
+ * avoid DMA overhead, otherwise external APB DMA controller will be used.
+ * Note that the actual MAX PIO length is 20 bytes because 32 bytes include
+ * I2C_PACKET_HEADER_SIZE.
  */
-#define I2C_PIO_MODE_MAX_LEN			32
+#define I2C_PIO_MODE_PREFERRED_LEN		32
 
 /*
  * msg_end_type: The bus control which need to be send at end of transfer.
@@ -230,7 +233,6 @@ struct tegra_i2c_hw_feature {
  * @base_phys: physical base address of the I2C controller
  * @cont_id: I2C controller ID, used for packet header
  * @irq: IRQ number of transfer complete interrupt
- * @irq_disabled: used to track whether or not the interrupt is enabled
  * @is_dvc: identifies the DVC I2C controller, has a different register layout
  * @msg_complete: transfer completion notifier
  * @msg_err: error code for completed message
@@ -240,7 +242,6 @@ struct tegra_i2c_hw_feature {
  * @bus_clk_rate: current I2C bus clock rate
  * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes
  * @is_multimaster_mode: track if I2C controller is in multi-master mode
- * @xfer_lock: lock to serialize transfer submission and processing
  * @tx_dma_chan: DMA transmit channel
  * @rx_dma_chan: DMA receive channel
  * @dma_phys: handle to DMA resources
@@ -248,6 +249,7 @@ struct tegra_i2c_hw_feature {
  * @dma_buf_size: DMA buffer size
  * @is_curr_dma_xfer: indicates active DMA transfer
  * @dma_complete: DMA completion notifier
+ * @is_curr_atomic_xfer: indicates active atomic transfer
  */
 struct tegra_i2c_dev {
 	struct device *dev;
@@ -260,7 +262,6 @@ struct tegra_i2c_dev {
 	phys_addr_t base_phys;
 	int cont_id;
 	int irq;
-	bool irq_disabled;
 	int is_dvc;
 	struct completion msg_complete;
 	int msg_err;
@@ -270,8 +271,6 @@ struct tegra_i2c_dev {
 	u32 bus_clk_rate;
 	u16 clk_divisor_non_hs_mode;
 	bool is_multimaster_mode;
-	/* xfer_lock: lock to serialize transfer submission and processing */
-	spinlock_t xfer_lock;
 	struct dma_chan *tx_dma_chan;
 	struct dma_chan *rx_dma_chan;
 	dma_addr_t dma_phys;
@@ -279,17 +278,18 @@ struct tegra_i2c_dev {
 	unsigned int dma_buf_size;
 	bool is_curr_dma_xfer;
 	struct completion dma_complete;
+	bool is_curr_atomic_xfer;
 };
 
 static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
 		       unsigned long reg)
 {
-	writel(val, i2c_dev->base + reg);
+	writel_relaxed(val, i2c_dev->base + reg);
 }
 
 static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
 {
-	return readl(i2c_dev->base + reg);
+	return readl_relaxed(i2c_dev->base + reg);
 }
 
 /*
@@ -307,16 +307,16 @@ static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev,
 static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
 		       unsigned long reg)
 {
-	writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+	writel_relaxed(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
 
 	/* Read back register to make sure that register writes completed */
 	if (reg != I2C_TX_FIFO)
-		readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+		readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
 }
 
 static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
 {
-	return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+	return readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
 }
 
 static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
@@ -687,13 +687,15 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
 		reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_CONFIG_LOAD);
 		addr = i2c_dev->base + reg_offset;
 		i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
-		if (in_interrupt())
-			err = readl_poll_timeout_atomic(addr, val, val == 0,
-							1000,
-							I2C_CONFIG_LOAD_TIMEOUT);
+
+		if (i2c_dev->is_curr_atomic_xfer)
+			err = readl_relaxed_poll_timeout_atomic(
+						addr, val, val == 0, 1000,
+						I2C_CONFIG_LOAD_TIMEOUT);
 		else
-			err = readl_poll_timeout(addr, val, val == 0, 1000,
-						 I2C_CONFIG_LOAD_TIMEOUT);
+			err = readl_relaxed_poll_timeout(
+						addr, val, val == 0, 1000,
+						I2C_CONFIG_LOAD_TIMEOUT);
 
 		if (err) {
 			dev_warn(i2c_dev->dev,
@@ -790,11 +792,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
 	if (err)
 		return err;
 
-	if (i2c_dev->irq_disabled) {
-		i2c_dev->irq_disabled = false;
-		enable_irq(i2c_dev->irq);
-	}
-
 	return 0;
 }
 
@@ -825,18 +822,12 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
 
 	status = i2c_readl(i2c_dev, I2C_INT_STATUS);
 
-	spin_lock(&i2c_dev->xfer_lock);
 	if (status == 0) {
 		dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
 			 i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
 			 i2c_readl(i2c_dev, I2C_STATUS),
 			 i2c_readl(i2c_dev, I2C_CNFG));
 		i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT;
-
-		if (!i2c_dev->irq_disabled) {
-			disable_irq_nosync(i2c_dev->irq);
-			i2c_dev->irq_disabled = true;
-		}
 		goto err;
 	}
 
@@ -925,7 +916,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
 
 	complete(&i2c_dev->msg_complete);
 done:
-	spin_unlock(&i2c_dev->xfer_lock);
 	return IRQ_HANDLED;
 }
 
@@ -999,6 +989,64 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
 	i2c_writel(i2c_dev, val, reg);
 }
 
+static unsigned long
+tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
+				  struct completion *complete,
+				  unsigned int timeout_ms)
+{
+	ktime_t ktime = ktime_get();
+	ktime_t ktimeout = ktime_add_ms(ktime, timeout_ms);
+
+	do {
+		u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+		if (status) {
+			tegra_i2c_isr(i2c_dev->irq, i2c_dev);
+
+			if (completion_done(complete)) {
+				s64 delta = ktime_ms_delta(ktimeout, ktime);
+
+				return msecs_to_jiffies(delta) ?: 1;
+			}
+		}
+
+		ktime = ktime_get();
+
+	} while (ktime_before(ktime, ktimeout));
+
+	return 0;
+}
+
+static unsigned long
+tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
+				  struct completion *complete,
+				  unsigned int timeout_ms)
+{
+	unsigned long ret;
+
+	if (i2c_dev->is_curr_atomic_xfer) {
+		ret = tegra_i2c_poll_completion_timeout(i2c_dev, complete,
+							timeout_ms);
+	} else {
+		enable_irq(i2c_dev->irq);
+		ret = wait_for_completion_timeout(complete,
+						  msecs_to_jiffies(timeout_ms));
+		disable_irq(i2c_dev->irq);
+
+		/*
+		 * There is a chance that completion may happen after IRQ
+		 * synchronization, which is done by disable_irq().
+		 */
+		if (ret == 0 && completion_done(complete)) {
+			dev_warn(i2c_dev->dev,
+				 "completion done after timeout\n");
+			ret = 1;
+		}
+	}
+
+	return ret;
+}
+
 static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap)
 {
 	struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
@@ -1020,8 +1068,8 @@ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap)
 	i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG);
 	tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE);
 
-	time_left = wait_for_completion_timeout(&i2c_dev->msg_complete,
-						msecs_to_jiffies(50));
+	time_left = tegra_i2c_wait_completion_timeout(
+			i2c_dev, &i2c_dev->msg_complete, 50);
 	if (time_left == 0) {
 		dev_err(i2c_dev->dev, "timed out for bus clear\n");
 		return -ETIMEDOUT;
@@ -1044,7 +1092,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
 	u32 packet_header;
 	u32 int_mask;
 	unsigned long time_left;
-	unsigned long flags;
 	size_t xfer_size;
 	u32 *buffer = NULL;
 	int err = 0;
@@ -1065,8 +1112,9 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
 		xfer_size = msg->len + I2C_PACKET_HEADER_SIZE;
 
 	xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD);
-	i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_MAX_LEN) &&
-				    i2c_dev->dma_buf;
+	i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_PREFERRED_LEN) &&
+				    i2c_dev->dma_buf &&
+				    !i2c_dev->is_curr_atomic_xfer;
 	tegra_i2c_config_fifo_trig(i2c_dev, xfer_size);
 	dma = i2c_dev->is_curr_dma_xfer;
 	/*
@@ -1075,7 +1123,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
 	 */
 	xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC,
 					i2c_dev->bus_clk_rate);
-	spin_lock_irqsave(&i2c_dev->xfer_lock, flags);
 
 	int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
 	tegra_i2c_unmask_irq(i2c_dev, int_mask);
@@ -1090,7 +1137,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
 				dev_err(i2c_dev->dev,
 					"starting RX DMA failed, err %d\n",
 					err);
-				goto unlock;
+				return err;
 			}
 
 		} else {
@@ -1149,7 +1196,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
 				dev_err(i2c_dev->dev,
 					"starting TX DMA failed, err %d\n",
 					err);
-				goto unlock;
+				return err;
 			}
 		} else {
 			tegra_i2c_fill_tx_fifo(i2c_dev);
@@ -1169,20 +1216,16 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
 	dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
 		i2c_readl(i2c_dev, I2C_INT_MASK));
 
-unlock:
-	spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags);
-
 	if (dma) {
-		if (err)
-			return err;
+		time_left = tegra_i2c_wait_completion_timeout(
+				i2c_dev, &i2c_dev->dma_complete, xfer_time);
 
-		time_left = wait_for_completion_timeout(&i2c_dev->dma_complete,
-							msecs_to_jiffies(xfer_time));
-		if (time_left == 0) {
+		dmaengine_terminate_sync(i2c_dev->msg_read ?
+					 i2c_dev->rx_dma_chan :
+					 i2c_dev->tx_dma_chan);
+
+		if (!time_left && !completion_done(&i2c_dev->dma_complete)) {
 			dev_err(i2c_dev->dev, "DMA transfer timeout\n");
-			dmaengine_terminate_sync(i2c_dev->msg_read ?
-						 i2c_dev->rx_dma_chan :
-						 i2c_dev->tx_dma_chan);
 			tegra_i2c_init(i2c_dev, true);
 			return -ETIMEDOUT;
 		}
@@ -1195,20 +1238,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
 			memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf,
 			       msg->len);
 		}
-
-		if (i2c_dev->msg_err != I2C_ERR_NONE)
-			dmaengine_synchronize(i2c_dev->msg_read ?
-					      i2c_dev->rx_dma_chan :
-					      i2c_dev->tx_dma_chan);
 	}
 
-	time_left = wait_for_completion_timeout(&i2c_dev->msg_complete,
-						msecs_to_jiffies(xfer_time));
+	time_left = tegra_i2c_wait_completion_timeout(
+			i2c_dev, &i2c_dev->msg_complete, xfer_time);
+
 	tegra_i2c_mask_irq(i2c_dev, int_mask);
 
 	if (time_left == 0) {
 		dev_err(i2c_dev->dev, "i2c transfer timed out\n");
-
 		tegra_i2c_init(i2c_dev, true);
 		return -ETIMEDOUT;
 	}
@@ -1270,6 +1308,19 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 	return ret ?: i;
 }
 
+static int tegra_i2c_xfer_atomic(struct i2c_adapter *adap,
+				 struct i2c_msg msgs[], int num)
+{
+	struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+	int ret;
+
+	i2c_dev->is_curr_atomic_xfer = true;
+	ret = tegra_i2c_xfer(adap, msgs, num);
+	i2c_dev->is_curr_atomic_xfer = false;
+
+	return ret;
+}
+
 static u32 tegra_i2c_func(struct i2c_adapter *adap)
 {
 	struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
@@ -1297,8 +1348,9 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
 }
 
 static const struct i2c_algorithm tegra_i2c_algo = {
-	.master_xfer	= tegra_i2c_xfer,
-	.functionality	= tegra_i2c_func,
+	.master_xfer		= tegra_i2c_xfer,
+	.master_xfer_atomic	= tegra_i2c_xfer_atomic,
+	.functionality		= tegra_i2c_func,
 };
 
 /* payload size is only 12 bit */
@@ -1568,7 +1620,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
 				I2C_PACKET_HEADER_SIZE;
 	init_completion(&i2c_dev->msg_complete);
 	init_completion(&i2c_dev->dma_complete);
-	spin_lock_init(&i2c_dev->xfer_lock);
 
 	if (!i2c_dev->hw->has_single_clk_source) {
 		fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
@@ -1607,6 +1658,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
 		goto unprepare_fast_clk;
 	}
 
+	pm_runtime_irq_safe(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 	if (!pm_runtime_enabled(&pdev->dev)) {
 		ret = tegra_i2c_runtime_resume(&pdev->dev);
@@ -1644,6 +1696,8 @@ static int tegra_i2c_probe(struct platform_device *pdev)
 		goto release_dma;
 	}
 
+	irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN);
+
 	ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
 			       tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
 	if (ret) {
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 43e3603..7279ca0 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -84,7 +84,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 				     pmsg->buf, pmsg->len) != pmsg->len) {
 				dev_err(&adapter->dev,
 					"failure reading data\n");
-				ret = -EREMOTEIO;
+				ret = -EIO;
 				goto out;
 			}
 		} else {
@@ -94,7 +94,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 				      pmsg->buf, pmsg->len) != pmsg->len) {
 				dev_err(&adapter->dev,
 					"failure writing data\n");
-				ret = -EREMOTEIO;
+				ret = -EIO;
 				goto out;
 			}
 		}
@@ -102,13 +102,13 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 		/* read status */
 		if (usb_read(adapter, CMD_GET_STATUS, 0, 0, pstatus, 1) != 1) {
 			dev_err(&adapter->dev, "failure reading status\n");
-			ret = -EREMOTEIO;
+			ret = -EIO;
 			goto out;
 		}
 
 		dev_dbg(&adapter->dev, "  status = %d\n", *pstatus);
 		if (*pstatus == STATUS_ADDRESS_NAK) {
-			ret = -EREMOTEIO;
+			ret = -ENXIO;
 			goto out;
 		}
 	}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index d8d49f1..90c1c36 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -156,6 +156,8 @@ struct xiic_i2c {
 #define XIIC_RESET_MASK             0xAUL
 
 #define XIIC_PM_TIMEOUT		1000	/* ms */
+/* timeout waiting for the controller to respond */
+#define XIIC_I2C_TIMEOUT	(msecs_to_jiffies(1000))
 /*
  * The following constant is used for the device global interrupt enable
  * register, to enable all interrupts for the device, this is the only bit
@@ -166,7 +168,7 @@ struct xiic_i2c {
 #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
 #define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
 
-static void xiic_start_xfer(struct xiic_i2c *i2c);
+static int xiic_start_xfer(struct xiic_i2c *i2c);
 static void __xiic_start_xfer(struct xiic_i2c *i2c);
 
 /*
@@ -247,17 +249,29 @@ static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
 	xiic_irq_en(i2c, mask);
 }
 
-static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
+static int xiic_clear_rx_fifo(struct xiic_i2c *i2c)
 {
 	u8 sr;
+	unsigned long timeout;
+
+	timeout = jiffies + XIIC_I2C_TIMEOUT;
 	for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
 		!(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
-		sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
+		sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) {
 		xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
+		if (time_after(jiffies, timeout)) {
+			dev_err(i2c->dev, "Failed to clear rx fifo\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
 }
 
-static void xiic_reinit(struct xiic_i2c *i2c)
+static int xiic_reinit(struct xiic_i2c *i2c)
 {
+	int ret;
+
 	xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
 
 	/* Set receive Fifo depth to maximum (zero based). */
@@ -270,12 +284,16 @@ static void xiic_reinit(struct xiic_i2c *i2c)
 	xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
 
 	/* make sure RX fifo is empty */
-	xiic_clear_rx_fifo(i2c);
+	ret = xiic_clear_rx_fifo(i2c);
+	if (ret)
+		return ret;
 
 	/* Enable interrupts */
 	xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
 
 	xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
+
+	return 0;
 }
 
 static void xiic_deinit(struct xiic_i2c *i2c)
@@ -655,12 +673,18 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
 
 }
 
-static void xiic_start_xfer(struct xiic_i2c *i2c)
+static int xiic_start_xfer(struct xiic_i2c *i2c)
 {
+	int ret;
 	mutex_lock(&i2c->lock);
-	xiic_reinit(i2c);
-	__xiic_start_xfer(i2c);
+
+	ret = xiic_reinit(i2c);
+	if (!ret)
+		__xiic_start_xfer(i2c);
+
 	mutex_unlock(&i2c->lock);
+
+	return ret;
 }
 
 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
@@ -682,7 +706,11 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 	i2c->tx_msg = msgs;
 	i2c->nmsgs = num;
 
-	xiic_start_xfer(i2c);
+	err = xiic_start_xfer(i2c);
+	if (err < 0) {
+		dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
+		goto out;
+	}
 
 	if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
 		(i2c->state == STATE_DONE), HZ)) {
@@ -760,7 +788,8 @@ static int xiic_i2c_probe(struct platform_device *pdev)
 
 	i2c->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(i2c->clk)) {
-		dev_err(&pdev->dev, "input clock not found.\n");
+		if (PTR_ERR(i2c->clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "input clock not found.\n");
 		return PTR_ERR(i2c->clk);
 	}
 	ret = clk_prepare_enable(i2c->clk);
@@ -769,10 +798,10 @@ static int xiic_i2c_probe(struct platform_device *pdev)
 		return ret;
 	}
 	i2c->dev = &pdev->dev;
-	pm_runtime_enable(i2c->dev);
 	pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
 	pm_runtime_use_autosuspend(i2c->dev);
 	pm_runtime_set_active(i2c->dev);
+	pm_runtime_enable(i2c->dev);
 	ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
 					xiic_process, IRQF_ONESHOT,
 					pdev->name, i2c);
@@ -794,7 +823,11 @@ static int xiic_i2c_probe(struct platform_device *pdev)
 	if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
 		i2c->endianness = BIG;
 
-	xiic_reinit(i2c);
+	ret = xiic_reinit(i2c);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Cannot xiic_reinit\n");
+		goto err_clk_dis;
+	}
 
 	/* add i2c adapter to i2c tree */
 	ret = i2c_add_adapter(&i2c->adap);
@@ -806,7 +839,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
 	if (pdata) {
 		/* add in known devices to the bus */
 		for (i = 0; i < pdata->num_devices; i++)
-			i2c_new_device(&i2c->adap, pdata->devices + i);
+			i2c_new_client_device(&i2c->adap, pdata->devices + i);
 	}
 
 	return 0;
@@ -826,14 +859,16 @@ static int xiic_i2c_remove(struct platform_device *pdev)
 	/* remove adapter & data */
 	i2c_del_adapter(&i2c->adap);
 
-	ret = clk_prepare_enable(i2c->clk);
-	if (ret) {
-		dev_err(&pdev->dev, "Unable to enable clock.\n");
+	ret = pm_runtime_get_sync(i2c->dev);
+	if (ret < 0)
 		return ret;
-	}
+
 	xiic_deinit(i2c);
+	pm_runtime_put_sync(i2c->dev);
 	clk_disable_unprepare(i2c->clk);
 	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
 
 	return 0;
 }
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 62a1c92..8f3dbc9 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -225,7 +225,7 @@ static void i2c_acpi_register_device(struct i2c_adapter *adapter,
 	adev->power.flags.ignore_parent = true;
 	acpi_device_set_enumerated(adev);
 
-	if (!i2c_new_device(adapter, info)) {
+	if (IS_ERR(i2c_new_client_device(adapter, info))) {
 		adev->power.flags.ignore_parent = false;
 		dev_err(&adapter->dev,
 			"failed to add I2C device %s from ACPI\n",
@@ -451,7 +451,8 @@ struct notifier_block i2c_acpi_notifier = {
  * resources, in that case this function can be used to create an i2c-client
  * for other I2cSerialBus resources in the Current Resource Settings table.
  *
- * Also see i2c_new_device, which this function calls to create the i2c-client.
+ * Also see i2c_new_client_device, which this function calls to create the
+ * i2c-client.
  *
  * Returns a pointer to the new i2c-client, or error pointer in case of failure.
  * Specifically, -EPROBE_DEFER is returned if the adapter is not found.
@@ -461,7 +462,6 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
 {
 	struct i2c_acpi_lookup lookup;
 	struct i2c_adapter *adapter;
-	struct i2c_client *client;
 	struct acpi_device *adev;
 	LIST_HEAD(resource_list);
 	int ret;
@@ -489,11 +489,7 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
 	if (!adapter)
 		return ERR_PTR(-EPROBE_DEFER);
 
-	client = i2c_new_device(adapter, info);
-	if (!client)
-		return ERR_PTR(-ENODEV);
-
-	return client;
+	return i2c_new_client_device(adapter, info);
 }
 EXPORT_SYMBOL_GPL(i2c_acpi_new_device);
 
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 35b2097..cefad08 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -456,15 +456,15 @@ static void i2c_client_dev_release(struct device *dev)
 }
 
 static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
 		       to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
 
 static ssize_t
-show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
+modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	int len;
@@ -479,7 +479,7 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
 
 	return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
 }
-static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
+static DEVICE_ATTR_RO(modalias);
 
 static struct attribute *i2c_dev_attrs[] = {
 	&dev_attr_name.attr,
@@ -831,8 +831,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
 
 
 /**
- * i2c_unregister_device - reverse effect of i2c_new_device()
- * @client: value returned from i2c_new_device()
+ * i2c_unregister_device - reverse effect of i2c_new_*_device()
+ * @client: value returned from i2c_new_*_device()
  * Context: can sleep
  */
 void i2c_unregister_device(struct i2c_client *client)
@@ -1023,8 +1023,8 @@ EXPORT_SYMBOL_GPL(i2c_adapter_depth);
  * the user to provide incorrect parameters.
  */
 static ssize_t
-i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
-		     const char *buf, size_t count)
+new_device_store(struct device *dev, struct device_attribute *attr,
+		 const char *buf, size_t count)
 {
 	struct i2c_adapter *adap = to_i2c_adapter(dev);
 	struct i2c_board_info info;
@@ -1079,7 +1079,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
 
 	return count;
 }
-static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
+static DEVICE_ATTR_WO(new_device);
 
 /*
  * And of course let the users delete the devices they instantiated, if
@@ -1091,8 +1091,8 @@ static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
  * the user to delete the wrong device.
  */
 static ssize_t
-i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
-			const char *buf, size_t count)
+delete_device_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
 {
 	struct i2c_adapter *adap = to_i2c_adapter(dev);
 	struct i2c_client *client, *next;
@@ -1135,7 +1135,7 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
 	return res;
 }
 static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
-				   i2c_sysfs_delete_device);
+				  delete_device_store);
 
 static struct attribute *i2c_adapter_attrs[] = {
 	&dev_attr_name.attr,
@@ -1178,9 +1178,8 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
 
 	down_read(&__i2c_board_lock);
 	list_for_each_entry(devinfo, &__i2c_board_list, list) {
-		if (devinfo->busnum == adapter->nr
-				&& !i2c_new_device(adapter,
-						&devinfo->board_info))
+		if (devinfo->busnum == adapter->nr &&
+		    IS_ERR(i2c_new_client_device(adapter, &devinfo->board_info)))
 			dev_err(&adapter->dev,
 				"Can't create device at 0x%02x\n",
 				devinfo->board_info.addr);
@@ -2167,8 +2166,8 @@ static int i2c_detect_address(struct i2c_client *temp_client,
 
 		dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
 			info.type, info.addr);
-		client = i2c_new_device(adapter, &info);
-		if (client)
+		client = i2c_new_client_device(adapter, &info);
+		if (!IS_ERR(client))
 			list_add_tail(&client->detected, &driver->clients);
 		else
 			dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n",
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index e4d296b..6787c1f 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -75,11 +75,10 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
 	if (ret)
 		return ERR_PTR(ret);
 
-	client = i2c_new_device(adap, &info);
-	if (!client) {
+	client = i2c_new_client_device(adap, &info);
+	if (IS_ERR(client))
 		dev_err(&adap->dev, "of_i2c: Failure registering %pOF\n", node);
-		return ERR_PTR(-EINVAL);
-	}
+
 	return client;
 }
 
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 50e1fb4..6daec8d 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -16,6 +16,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/i2c.h>
@@ -42,20 +43,20 @@
 #define PCA9541_CONTROL		0x01
 #define PCA9541_ISTAT		0x02
 
-#define PCA9541_CTL_MYBUS	(1 << 0)
-#define PCA9541_CTL_NMYBUS	(1 << 1)
-#define PCA9541_CTL_BUSON	(1 << 2)
-#define PCA9541_CTL_NBUSON	(1 << 3)
-#define PCA9541_CTL_BUSINIT	(1 << 4)
-#define PCA9541_CTL_TESTON	(1 << 6)
-#define PCA9541_CTL_NTESTON	(1 << 7)
+#define PCA9541_CTL_MYBUS	BIT(0)
+#define PCA9541_CTL_NMYBUS	BIT(1)
+#define PCA9541_CTL_BUSON	BIT(2)
+#define PCA9541_CTL_NBUSON	BIT(3)
+#define PCA9541_CTL_BUSINIT	BIT(4)
+#define PCA9541_CTL_TESTON	BIT(6)
+#define PCA9541_CTL_NTESTON	BIT(7)
 
-#define PCA9541_ISTAT_INTIN	(1 << 0)
-#define PCA9541_ISTAT_BUSINIT	(1 << 1)
-#define PCA9541_ISTAT_BUSOK	(1 << 2)
-#define PCA9541_ISTAT_BUSLOST	(1 << 3)
-#define PCA9541_ISTAT_MYTEST	(1 << 6)
-#define PCA9541_ISTAT_NMYTEST	(1 << 7)
+#define PCA9541_ISTAT_INTIN	BIT(0)
+#define PCA9541_ISTAT_BUSINIT	BIT(1)
+#define PCA9541_ISTAT_BUSOK	BIT(2)
+#define PCA9541_ISTAT_BUSLOST	BIT(3)
+#define PCA9541_ISTAT_MYTEST	BIT(6)
+#define PCA9541_ISTAT_NMYTEST	BIT(7)
 
 #define BUSON		(PCA9541_CTL_BUSON | PCA9541_CTL_NBUSON)
 #define MYBUS		(PCA9541_CTL_MYBUS | PCA9541_CTL_NMYBUS)
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 923aa3a..a0d926a 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -86,7 +86,7 @@ struct pca954x {
 
 	u8 last_chan;		/* last register value */
 	/* MUX_IDLE_AS_IS, MUX_IDLE_DISCONNECT or >= 0 for channel */
-	s8 idle_state;
+	s32 idle_state;
 
 	struct i2c_client *client;
 
@@ -229,20 +229,23 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
 				I2C_SMBUS_BYTE, &dummy);
 }
 
+static u8 pca954x_regval(struct pca954x *data, u8 chan)
+{
+	/* We make switches look like muxes, not sure how to be smarter. */
+	if (data->chip->muxtype == pca954x_ismux)
+		return chan | data->chip->enable;
+	else
+		return 1 << chan;
+}
+
 static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
 {
 	struct pca954x *data = i2c_mux_priv(muxc);
 	struct i2c_client *client = data->client;
-	const struct chip_desc *chip = data->chip;
 	u8 regval;
 	int ret = 0;
 
-	/* we make switches look like muxes, not sure how to be smarter */
-	if (chip->muxtype == pca954x_ismux)
-		regval = chan | chip->enable;
-	else
-		regval = 1 << chan;
-
+	regval = pca954x_regval(data, chan);
 	/* Only select the channel if its different from the last channel */
 	if (data->last_chan != regval) {
 		ret = pca954x_reg_write(muxc->parent, client, regval);
@@ -256,7 +259,7 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
 {
 	struct pca954x *data = i2c_mux_priv(muxc);
 	struct i2c_client *client = data->client;
-	s8 idle_state;
+	s32 idle_state;
 
 	idle_state = READ_ONCE(data->idle_state);
 	if (idle_state >= 0)
@@ -402,6 +405,22 @@ static void pca954x_cleanup(struct i2c_mux_core *muxc)
 	i2c_mux_del_adapters(muxc);
 }
 
+static int pca954x_init(struct i2c_client *client, struct pca954x *data)
+{
+	int ret;
+
+	if (data->idle_state >= 0)
+		data->last_chan = pca954x_regval(data, data->idle_state);
+	else
+		data->last_chan = 0; /* Disconnect multiplexer */
+
+	ret = i2c_smbus_write_byte(client, data->last_chan);
+	if (ret < 0)
+		data->last_chan = 0;
+
+	return ret;
+}
+
 /*
  * I2C init/probing/exit functions
  */
@@ -411,7 +430,6 @@ static int pca954x_probe(struct i2c_client *client,
 	struct i2c_adapter *adap = client->adapter;
 	struct device *dev = &client->dev;
 	struct device_node *np = dev->of_node;
-	bool idle_disconnect_dt;
 	struct gpio_desc *gpio;
 	struct i2c_mux_core *muxc;
 	struct pca954x *data;
@@ -462,23 +480,24 @@ static int pca954x_probe(struct i2c_client *client,
 		}
 	}
 
-	/* Write the mux register at addr to verify
+	data->idle_state = MUX_IDLE_AS_IS;
+	if (of_property_read_u32(np, "idle-state", &data->idle_state)) {
+		if (np && of_property_read_bool(np, "i2c-mux-idle-disconnect"))
+			data->idle_state = MUX_IDLE_DISCONNECT;
+	}
+
+	/*
+	 * Write the mux register at addr to verify
 	 * that the mux is in fact present. This also
-	 * initializes the mux to disconnected state.
+	 * initializes the mux to a channel
+	 * or disconnected state.
 	 */
-	if (i2c_smbus_write_byte(client, 0) < 0) {
+	ret = pca954x_init(client, data);
+	if (ret < 0) {
 		dev_warn(dev, "probe failed\n");
 		return -ENODEV;
 	}
 
-	data->last_chan = 0;		   /* force the first selection */
-	data->idle_state = MUX_IDLE_AS_IS;
-
-	idle_disconnect_dt = np &&
-		of_property_read_bool(np, "i2c-mux-idle-disconnect");
-	if (idle_disconnect_dt)
-		data->idle_state = MUX_IDLE_DISCONNECT;
-
 	ret = pca954x_irq_setup(muxc);
 	if (ret)
 		goto fail_cleanup;
@@ -530,9 +549,13 @@ static int pca954x_resume(struct device *dev)
 	struct i2c_client *client = to_i2c_client(dev);
 	struct i2c_mux_core *muxc = i2c_get_clientdata(client);
 	struct pca954x *data = i2c_mux_priv(muxc);
+	int ret;
 
-	data->last_chan = 0;
-	return i2c_smbus_write_byte(client, 0);
+	ret = pca954x_init(client, data);
+	if (ret < 0)
+		dev_err(&client->dev, "failed to verify mux presence\n");
+
+	return ret;
 }
 #endif
 
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index cac02db..d4f4409 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -3,8 +3,6 @@
 # link order is important here
 #
 
-ccflags-y				:= -Idrivers/ide
-
 ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
 	      ide-taskfile.o ide-pm.o ide-park.o ide-sysfs.o ide-devsets.o \
 	      ide-io-std.o ide-eh.o
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index a1898e1..943bf94 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -66,6 +66,9 @@ static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
 	struct ide_timing t;
 	u8 arttim = 0;
 
+	if (drive->dn >= ARRAY_SIZE(drwtim_regs))
+		return;
+
 	ide_timing_compute(drive, mode, &t, T, 0);
 
 	/*
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index 0dae65a..743bc36 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -310,7 +310,7 @@ static void __init ht6560b_init_dev(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
 	/* Setting default configurations for drives. */
-	int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
+	unsigned long t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
 
 	if (hwif->channel)
 		t |= (HT_SECONDARY_IF << 8);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 9d11793..dcf8b51 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -25,6 +25,7 @@
 
 #define IDECD_VERSION "5.00"
 
+#include <linux/compat.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -1710,6 +1711,41 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
 	return ret;
 }
 
+static int idecd_locked_compat_ioctl(struct block_device *bdev, fmode_t mode,
+			unsigned int cmd, unsigned long arg)
+{
+	struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
+	void __user *argp = compat_ptr(arg);
+	int err;
+
+	switch (cmd) {
+	case CDROMSETSPINDOWN:
+		return idecd_set_spindown(&info->devinfo, (unsigned long)argp);
+	case CDROMGETSPINDOWN:
+		return idecd_get_spindown(&info->devinfo, (unsigned long)argp);
+	default:
+		break;
+	}
+
+	err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
+	if (err == -EINVAL)
+		err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd,
+				  (unsigned long)argp);
+
+	return err;
+}
+
+static int idecd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long arg)
+{
+	int ret;
+
+	mutex_lock(&ide_cd_mutex);
+	ret = idecd_locked_compat_ioctl(bdev, mode, cmd, arg);
+	mutex_unlock(&ide_cd_mutex);
+
+	return ret;
+}
 
 static unsigned int idecd_check_events(struct gendisk *disk,
 				       unsigned int clearing)
@@ -1732,6 +1768,8 @@ static const struct block_device_operations idecd_ops = {
 	.open			= idecd_open,
 	.release		= idecd_release,
 	.ioctl			= idecd_ioctl,
+	.compat_ioctl		= IS_ENABLED(CONFIG_COMPAT) ?
+				  idecd_compat_ioctl : NULL,
 	.check_events		= idecd_check_events,
 	.revalidate_disk	= idecd_revalidate_disk
 };
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 197912a..1d3407d 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -794,4 +794,5 @@ const struct ide_disk_ops ide_ata_disk_ops = {
 	.set_doorlock		= ide_disk_set_doorlock,
 	.do_request		= ide_do_rw_disk,
 	.ioctl			= ide_disk_ioctl,
+	.compat_ioctl		= ide_disk_ioctl,
 };
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 1ea2f9e..1fe1f9d 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -19,6 +19,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/compat.h>
 #include <linux/delay.h>
 #include <linux/timer.h>
 #include <linux/mm.h>
@@ -546,4 +547,7 @@ const struct ide_disk_ops ide_atapi_disk_ops = {
 	.set_doorlock	= ide_set_media_lock,
 	.do_request	= ide_floppy_do_request,
 	.ioctl		= ide_floppy_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= ide_floppy_compat_ioctl,
+#endif
 };
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
index 13c9b4b..8505a5f 100644
--- a/drivers/ide/ide-floppy.h
+++ b/drivers/ide/ide-floppy.h
@@ -26,6 +26,8 @@ void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
 /* ide-floppy_ioctl.c */
 int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
 		     unsigned int, unsigned long);
+int ide_floppy_compat_ioctl(ide_drive_t *, struct block_device *, fmode_t,
+			    unsigned int, unsigned long);
 
 #ifdef CONFIG_IDE_PROC_FS
 /* ide-floppy_proc.c */
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index 40a2ebe..39a790a 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -5,6 +5,7 @@
 
 #include <linux/kernel.h>
 #include <linux/ide.h>
+#include <linux/compat.h>
 #include <linux/cdrom.h>
 #include <linux/mutex.h>
 
@@ -302,3 +303,37 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
 	mutex_unlock(&ide_floppy_ioctl_mutex);
 	return err;
 }
+
+#ifdef CONFIG_COMPAT
+int ide_floppy_compat_ioctl(ide_drive_t *drive, struct block_device *bdev,
+			    fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+	struct ide_atapi_pc pc;
+	void __user *argp = compat_ptr(arg);
+	int err;
+
+	mutex_lock(&ide_floppy_ioctl_mutex);
+	if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
+		err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
+		goto out;
+	}
+
+	err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
+	if (err != -ENOTTY)
+		goto out;
+
+	/*
+	 * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
+	 * and CDROM_SEND_PACKET (legacy) ioctls
+	 */
+	if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
+		err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+
+	if (err == -ENOTTY)
+		err = generic_ide_ioctl(drive, bdev, cmd, arg);
+
+out:
+	mutex_unlock(&ide_floppy_ioctl_mutex);
+	return err;
+}
+#endif
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index dba9ad5..1bb99b5 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -341,11 +341,28 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
 	return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
 }
 
+#ifdef CONFIG_COMPAT
+static int ide_gd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+			       unsigned int cmd, unsigned long arg)
+{
+	struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+	ide_drive_t *drive = idkp->drive;
+
+	if (!drive->disk_ops->compat_ioctl)
+		return -ENOIOCTLCMD;
+
+	return drive->disk_ops->compat_ioctl(drive, bdev, mode, cmd, arg);
+}
+#endif
+
 static const struct block_device_operations ide_gd_ops = {
 	.owner			= THIS_MODULE,
 	.open			= ide_gd_unlocked_open,
 	.release		= ide_gd_release,
 	.ioctl			= ide_gd_ioctl,
+#ifdef CONFIG_COMPAT
+	.ioctl			= ide_gd_compat_ioctl,
+#endif
 	.getgeo			= ide_gd_getgeo,
 	.check_events		= ide_gd_check_events,
 	.unlock_native_capacity	= ide_gd_unlock_native_capacity,
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index d48c170..0949109 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -3,11 +3,20 @@
  * IDE ioctls handling.
  */
 
+#include <linux/compat.h>
 #include <linux/export.h>
 #include <linux/hdreg.h>
 #include <linux/ide.h>
 #include <linux/slab.h>
 
+static int put_user_long(long val, unsigned long arg)
+{
+	if (in_compat_syscall())
+		return put_user(val, (compat_long_t __user *)compat_ptr(arg));
+
+	return put_user(val, (long __user *)arg);
+}
+
 static const struct ide_ioctl_devset ide_ioctl_settings[] = {
 { HDIO_GET_32BIT,	 HDIO_SET_32BIT,	&ide_devset_io_32bit  },
 { HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS,	&ide_devset_keepsettings },
@@ -37,7 +46,7 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
 	mutex_lock(&ide_setting_mtx);
 	err = ds->get(drive);
 	mutex_unlock(&ide_setting_mtx);
-	return err >= 0 ? put_user(err, (long __user *)arg) : err;
+	return err >= 0 ? put_user_long(err, arg) : err;
 
 set_val:
 	if (bdev != bdev->bd_contains)
@@ -56,7 +65,7 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
 EXPORT_SYMBOL_GPL(ide_setting_ioctl);
 
 static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
-				  unsigned long arg)
+				  void __user *argp)
 {
 	u16 *id = NULL;
 	int size = (cmd == HDIO_GET_IDENTITY) ? (ATA_ID_WORDS * 2) : 142;
@@ -77,7 +86,7 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
 	memcpy(id, drive->id, size);
 	ata_id_to_hd_driveid(id);
 
-	if (copy_to_user((void __user *)arg, id, size))
+	if (copy_to_user(argp, id, size))
 		rc = -EFAULT;
 
 	kfree(id);
@@ -87,10 +96,10 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
 
 static int ide_get_nice_ioctl(ide_drive_t *drive, unsigned long arg)
 {
-	return put_user((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
+	return put_user_long((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
 			 << IDE_NICE_DSC_OVERLAP) |
 			(!!(drive->dev_flags & IDE_DFLAG_NICE1)
-			 << IDE_NICE_1), (long __user *)arg);
+			 << IDE_NICE_1), arg);
 }
 
 static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
@@ -115,7 +124,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
 	return 0;
 }
 
-static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
+static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp)
 {
 	u8 *buf = NULL;
 	int bufsize = 0, err = 0;
@@ -123,7 +132,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
 	struct ide_cmd cmd;
 	struct ide_taskfile *tf = &cmd.tf;
 
-	if (NULL == (void *) arg) {
+	if (NULL == argp) {
 		struct request *rq;
 
 		rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
@@ -135,7 +144,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
 		return err;
 	}
 
-	if (copy_from_user(args, (void __user *)arg, 4))
+	if (copy_from_user(args, argp, 4))
 		return -EFAULT;
 
 	memset(&cmd, 0, sizeof(cmd));
@@ -181,19 +190,18 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
 	args[1] = tf->error;
 	args[2] = tf->nsect;
 abort:
-	if (copy_to_user((void __user *)arg, &args, 4))
+	if (copy_to_user(argp, &args, 4))
 		err = -EFAULT;
 	if (buf) {
-		if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
+		if (copy_to_user((argp + 4), buf, bufsize))
 			err = -EFAULT;
 		kfree(buf);
 	}
 	return err;
 }
 
-static int ide_task_ioctl(ide_drive_t *drive, unsigned long arg)
+static int ide_task_ioctl(ide_drive_t *drive, void __user *p)
 {
-	void __user *p = (void __user *)arg;
 	int err = 0;
 	u8 args[7];
 	struct ide_cmd cmd;
@@ -237,6 +245,10 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
 		      unsigned int cmd, unsigned long arg)
 {
 	int err;
+	void __user *argp = (void __user *)arg;
+
+	if (in_compat_syscall())
+		argp = compat_ptr(arg);
 
 	err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings);
 	if (err != -EOPNOTSUPP)
@@ -247,7 +259,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
 	case HDIO_GET_IDENTITY:
 		if (bdev != bdev->bd_contains)
 			return -EINVAL;
-		return ide_get_identity_ioctl(drive, cmd, arg);
+		return ide_get_identity_ioctl(drive, cmd, argp);
 	case HDIO_GET_NICE:
 		return ide_get_nice_ioctl(drive, arg);
 	case HDIO_SET_NICE:
@@ -258,6 +270,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
 	case HDIO_DRIVE_TASKFILE:
 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
 			return -EACCES;
+		/* missing compat handler for HDIO_DRIVE_TASKFILE */
+		if (in_compat_syscall())
+			return -ENOTTY;
 		if (drive->media == ide_disk)
 			return ide_taskfile_ioctl(drive, arg);
 		return -ENOMSG;
@@ -265,11 +280,11 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
 	case HDIO_DRIVE_CMD:
 		if (!capable(CAP_SYS_RAWIO))
 			return -EACCES;
-		return ide_cmd_ioctl(drive, arg);
+		return ide_cmd_ioctl(drive, argp);
 	case HDIO_DRIVE_TASK:
 		if (!capable(CAP_SYS_RAWIO))
 			return -EACCES;
-		return ide_task_ioctl(drive, arg);
+		return ide_task_ioctl(drive, argp);
 	case HDIO_DRIVE_RESET:
 		if (!capable(CAP_SYS_ADMIN))
 			return -EACCES;
@@ -277,7 +292,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
 	case HDIO_GET_BUSSTATE:
 		if (!capable(CAP_SYS_ADMIN))
 			return -EACCES;
-		if (put_user(BUSSTATE_ON, (long __user *)arg))
+		if (put_user_long(BUSSTATE_ON, arg))
 			return -EFAULT;
 		return 0;
 	case HDIO_SET_BUSSTATE:
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index d1445d7..f2be127 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -530,7 +530,6 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
 		 */
 		if (stat == 0xff)
 			return -ENODEV;
-		touch_softlockup_watchdog();
 		touch_nmi_watchdog();
 	}
 	return -EBUSY;
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 0363d73..15c17f3 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -206,7 +206,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
 ide_devset_rw(current_speed, xfer_rate);
 ide_devset_rw_field(init_speed, init_speed);
 ide_devset_rw_flag(nice1, IDE_DFLAG_NICE1);
-ide_devset_rw_field(number, dn);
+ide_devset_ro_field(number, dn);
 
 static const struct ide_proc_devset ide_generic_settings[] = {
 	IDE_PROC_DEVSET(current_speed, 0, 70),
@@ -381,13 +381,12 @@ static ssize_t ide_settings_proc_write(struct file *file, const char __user *buf
 	return -EINVAL;
 }
 
-static const struct file_operations ide_settings_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= ide_settings_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= ide_settings_proc_write,
+static const struct proc_ops ide_settings_proc_ops = {
+	.proc_open	= ide_settings_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= ide_settings_proc_write,
 };
 
 int ide_capacity_proc_show(struct seq_file *m, void *v)
@@ -546,7 +545,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
 		if (drive->proc) {
 			ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
 			proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
-					drive->proc, &ide_settings_proc_fops,
+					drive->proc, &ide_settings_proc_ops,
 					drive);
 		}
 		sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
@@ -615,7 +614,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
 	return 0;
 }
 
-DEFINE_SHOW_ATTRIBUTE(ide_drivers);
+DEFINE_PROC_SHOW_ATTRIBUTE(ide_drivers);
 
 void proc_ide_create(void)
 {
@@ -624,7 +623,7 @@ void proc_ide_create(void)
 	if (!proc_ide_root)
 		return;
 
-	proc_create("drivers", 0, proc_ide_root, &ide_drivers_fops);
+	proc_create("drivers", 0, proc_ide_root, &ide_drivers_proc_ops);
 }
 
 void proc_ide_destroy(void)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 3e748269..6f26634 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1945,11 +1945,22 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
 	return err;
 }
 
+static int idetape_compat_ioctl(struct block_device *bdev, fmode_t mode,
+				unsigned int cmd, unsigned long arg)
+{
+        if (cmd == 0x0340 || cmd == 0x350)
+		arg = (unsigned long)compat_ptr(arg);
+
+	return idetape_ioctl(bdev, mode, cmd, arg);
+}
+
 static const struct block_device_operations idetape_block_ops = {
 	.owner		= THIS_MODULE,
 	.open		= idetape_open,
 	.release	= idetape_release,
 	.ioctl		= idetape_ioctl,
+	.compat_ioctl	= IS_ENABLED(CONFIG_COMPAT) ?
+				idetape_compat_ioctl : NULL,
 };
 
 static int ide_tape_probe(ide_drive_t *drive)
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index b5647e3..ea0b064 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1019,7 +1019,6 @@ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
 	struct device_node *np = pmif->node;
 	const int *bidp;
 	struct ide_host *host;
-	ide_hwif_t *hwif;
 	struct ide_hw *hws[] = { hw };
 	struct ide_port_info d = pmac_port_info;
 	int rc;
@@ -1075,7 +1074,7 @@ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
 		rc = -ENOMEM;
 		goto bail;
 	}
-	hwif = pmif->hwif = host->ports[0];
+	pmif->hwif = host->ports[0];
 
 	if (on_media_bay(pmif)) {
 		/* Fixup bus ID for media bay */
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
index 6ce318e..ab79b62 100644
--- a/drivers/ide/qd65xx.c
+++ b/drivers/ide/qd65xx.c
@@ -299,7 +299,7 @@ static void __init qd6500_init_dev(ide_drive_t *drive)
 static void __init qd6580_init_dev(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
-	u16 t1, t2;
+	unsigned long t1, t2;
 	u8 base = (hwif->config_data & 0xff00) >> 8;
 	u8 config = QD_CONFIG(hwif);
 
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index ac6fc3f..458e72e 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -115,6 +115,9 @@ static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 	struct pci_dev *dev = to_pci_dev(hwif->dev);
 	const u8 pio = drive->pio_mode - XFER_PIO_0;
 
+	if (drive->dn >= ARRAY_SIZE(drive_pci))
+		return;
+
 	pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
 
 	if (svwks_csb_check(dev)) {
@@ -141,6 +144,9 @@ static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 
 	u8 ultra_enable	 = 0, ultra_timing = 0, dma_timing = 0;
 
+	if (drive->dn >= ARRAY_SIZE(drive_pci2))
+		return;
+
 	pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
 	pci_read_config_byte(dev, 0x54, &ultra_enable);
 
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index 57eea5a..c4b20f3 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -648,8 +648,7 @@ static void sil_quirkproc(ide_drive_t *drive)
 
 static void init_iops_siimage(ide_hwif_t *hwif)
 {
-	struct pci_dev *dev = to_pci_dev(hwif->dev);
-	struct ide_host *host = pci_get_drvdata(dev);
+	struct ide_host *host = dev_get_drvdata(hwif->dev);
 
 	hwif->hwif_data = NULL;
 
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index d5e871f..b1bbf80 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -549,7 +549,7 @@ static int __init tx4939ide_probe(struct platform_device *pdev)
 		return -ENODEV;
 
 	if (!devm_request_mem_region(&pdev->dev, res->start,
-				     resource_size(res), "tx4938ide"))
+				     resource_size(res), MODNAME))
 		return -EBUSY;
 	mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
 					      resource_size(res));
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 977cb00..63a3aca 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -175,8 +175,7 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
 static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
 {
 	ide_drive_t *peer = ide_get_pair_dev(drive);
-	struct pci_dev *dev = to_pci_dev(hwif->dev);
-	struct ide_host *host = pci_get_drvdata(dev);
+	struct ide_host *host = dev_get_drvdata(hwif->dev);
 	struct via82cxxx_dev *vdev = host->host_priv;
 	struct ide_timing t, p;
 	unsigned int T, UT;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 7833e65..d556066 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -63,6 +63,7 @@ static struct cpuidle_driver intel_idle_driver = {
 };
 /* intel_idle.max_cstate=0 disables driver */
 static int max_cstate = CPUIDLE_STATE_MAX - 1;
+static unsigned int disabled_states_mask;
 
 static unsigned int mwait_substates;
 
@@ -1131,6 +1132,10 @@ static bool no_acpi __read_mostly;
 module_param(no_acpi, bool, 0444);
 MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
 
+static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */
+module_param_named(use_acpi, force_use_acpi, bool, 0444);
+MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
+
 static struct acpi_processor_power acpi_state_table __initdata;
 
 /**
@@ -1230,6 +1235,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
 		if (cx->type > ACPI_STATE_C2)
 			state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
 
+		if (disabled_states_mask & BIT(cstate))
+			state->flags |= CPUIDLE_FLAG_OFF;
+
 		state->enter = intel_idle;
 		state->enter_s2idle = intel_idle_s2idle;
 	}
@@ -1258,6 +1266,8 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
 	return true;
 }
 #else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+#define force_use_acpi	(false)
+
 static inline bool intel_idle_acpi_cst_extract(void) { return false; }
 static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
 static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
@@ -1460,8 +1470,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
 		/* Structure copy. */
 		drv->states[drv->state_count] = cpuidle_state_table[cstate];
 
-		if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) &&
-		    !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE))
+		if ((disabled_states_mask & BIT(drv->state_count)) ||
+		    ((icpu->use_acpi || force_use_acpi) &&
+		     intel_idle_off_by_default(mwait_hint) &&
+		     !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
 			drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
 
 		drv->state_count++;
@@ -1480,6 +1492,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
 static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
 {
 	cpuidle_poll_state_init(drv);
+
+	if (disabled_states_mask & BIT(0))
+		drv->states[0].flags |= CPUIDLE_FLAG_OFF;
+
 	drv->state_count = 1;
 
 	if (icpu)
@@ -1607,7 +1623,7 @@ static int __init intel_idle_init(void)
 	icpu = (const struct idle_cpu *)id->driver_data;
 	if (icpu) {
 		cpuidle_state_table = icpu->state_table;
-		if (icpu->use_acpi)
+		if (icpu->use_acpi || force_use_acpi)
 			intel_idle_acpi_cst_extract();
 	} else if (!intel_idle_acpi_cst_extract()) {
 		return -ENODEV;
@@ -1660,3 +1676,11 @@ device_initcall(intel_idle_init);
  * is the easiest way (currently) to continue doing that.
  */
 module_param(max_cstate, int, 0444);
+/*
+ * The positions of the bits that are set in this number are the indices of the
+ * idle states to be disabled by default (as reflected by the names of the
+ * corresponding idle state directories in sysfs, "state0", "state1" ...
+ * "state<i>" ..., where <i> is the index of the given state).
+ */
+module_param_named(states_off, disabled_states_mask, uint, 0444);
+MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index d4ef35a..5d91a6d 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -89,13 +89,13 @@
 	  module will be called adxl372_i2c.
 
 config BMA180
-	tristate "Bosch BMA180/BMA250 3-Axis Accelerometer Driver"
+	tristate "Bosch BMA180/BMA25x 3-Axis Accelerometer Driver"
 	depends on I2C
 	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
 	help
 	  Say Y here if you want to build a driver for the Bosch BMA180 or
-	  BMA250 triaxial acceleration sensor.
+	  BMA25x triaxial acceleration sensor.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called bma180.
@@ -112,6 +112,22 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called bma220_spi.
 
+config BMA400
+	tristate "Bosch BMA400 3-Axis Accelerometer Driver"
+	select REGMAP
+	select BMA400_I2C if I2C
+	help
+	  Say Y here if you want to build a driver for the Bosch BMA400
+	  triaxial acceleration sensor.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called bma400_core and you will also get
+	  bma400_i2c if I2C is enabled.
+
+config BMA400_I2C
+	tristate
+	depends on BMA400
+
 config BMC150_ACCEL
 	tristate "Bosch BMC150 Accelerometer Driver"
 	select IIO_BUFFER
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 56bd021..3a051cf 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -14,6 +14,8 @@
 obj-$(CONFIG_ADXL372_SPI) += adxl372_spi.o
 obj-$(CONFIG_BMA180) += bma180.o
 obj-$(CONFIG_BMA220) += bma220_spi.o
+obj-$(CONFIG_BMA400) += bma400_core.o
+obj-$(CONFIG_BMA400_I2C) += bma400_i2c.o
 obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
 obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
 obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index c4810c7..0f0f27a 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -233,6 +233,12 @@ static const char * const adis16201_status_error_msgs[] = {
 	[ADIS16201_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
 };
 
+static const struct adis_timeout adis16201_timeouts = {
+	.reset_ms = ADIS16201_STARTUP_DELAY_MS,
+	.sw_reset_ms = ADIS16201_STARTUP_DELAY_MS,
+	.self_test_ms = ADIS16201_STARTUP_DELAY_MS,
+};
+
 static const struct adis_data adis16201_data = {
 	.read_delay = 20,
 	.msc_ctrl_reg = ADIS16201_MSC_CTRL_REG,
@@ -241,7 +247,7 @@ static const struct adis_data adis16201_data = {
 
 	.self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
 	.self_test_no_autoclear = true,
-	.startup_delay = ADIS16201_STARTUP_DELAY_MS,
+	.timeouts = &adis16201_timeouts,
 
 	.status_error_msgs = adis16201_status_error_msgs,
 	.status_error_mask = BIT(ADIS16201_DIAG_STAT_SPI_FAIL_BIT) |
diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c
index 98d77af..c6dbd24 100644
--- a/drivers/iio/accel/adis16209.c
+++ b/drivers/iio/accel/adis16209.c
@@ -243,6 +243,12 @@ static const char * const adis16209_status_error_msgs[] = {
 	[ADIS16209_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
 };
 
+static const struct adis_timeout adis16209_timeouts = {
+	.reset_ms = ADIS16209_STARTUP_DELAY_MS,
+	.self_test_ms = ADIS16209_STARTUP_DELAY_MS,
+	.sw_reset_ms = ADIS16209_STARTUP_DELAY_MS,
+};
+
 static const struct adis_data adis16209_data = {
 	.read_delay = 30,
 	.msc_ctrl_reg = ADIS16209_MSC_CTRL_REG,
@@ -251,7 +257,7 @@ static const struct adis_data adis16209_data = {
 
 	.self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
 	.self_test_no_autoclear = true,
-	.startup_delay = ADIS16209_STARTUP_DELAY_MS,
+	.timeouts = &adis16209_timeouts,
 
 	.status_error_msgs = adis16209_status_error_msgs,
 	.status_error_mask = BIT(ADIS16209_STAT_SELFTEST_FAIL_BIT) |
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 1574e46..fcd91d5 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -9,6 +9,7 @@
  * SPI is not supported by driver
  * BMA180: 7-bit I2C slave address 0x40 or 0x41
  * BMA250: 7-bit I2C slave address 0x18 or 0x19
+ * BMA254: 7-bit I2C slave address 0x18 or 0x19
  */
 
 #include <linux/module.h>
@@ -18,6 +19,7 @@
 #include <linux/of_device.h>
 #include <linux/of.h>
 #include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/iio/iio.h>
@@ -33,17 +35,20 @@
 enum chip_ids {
 	BMA180,
 	BMA250,
+	BMA254,
 };
 
 struct bma180_data;
 
 struct bma180_part_info {
+	u8 chip_id;
 	const struct iio_chan_spec *channels;
 	unsigned int num_channels;
 	const int *scale_table;
 	unsigned int num_scales;
 	const int *bw_table;
 	unsigned int num_bw;
+	int center_temp;
 
 	u8 int_reset_reg, int_reset_mask;
 	u8 sleep_reg, sleep_mask;
@@ -51,6 +56,7 @@ struct bma180_part_info {
 	u8 scale_reg, scale_mask;
 	u8 power_reg, power_mask, lowpower_val;
 	u8 int_enable_reg, int_enable_mask;
+	u8 int_map_reg, int_enable_dataready_int1_mask;
 	u8 softreset_reg;
 
 	int (*chip_config)(struct bma180_data *data);
@@ -89,6 +95,8 @@ struct bma180_part_info {
 #define BMA180_RESET_VAL	0xb6
 
 #define BMA180_ID_REG_VAL	0x03
+#define BMA250_ID_REG_VAL	0x03
+#define BMA254_ID_REG_VAL	0xfa /* 250 decimal */
 
 /* Chip power modes */
 #define BMA180_LOW_POWER	0x03
@@ -109,7 +117,26 @@ struct bma180_part_info {
 #define BMA250_INT1_DATA_MASK	BIT(0)
 #define BMA250_INT_RESET_MASK	BIT(7) /* Reset pending interrupts */
 
+#define BMA254_RANGE_REG	0x0f
+#define BMA254_BW_REG		0x10
+#define BMA254_POWER_REG	0x11
+#define BMA254_RESET_REG	0x14
+#define BMA254_INT_ENABLE_REG	0x17
+#define BMA254_INT_MAP_REG	0x1a
+#define BMA254_INT_RESET_REG	0x21
+
+#define BMA254_RANGE_MASK	GENMASK(3, 0) /* Range of accel values */
+#define BMA254_BW_MASK		GENMASK(4, 0) /* Accel bandwidth */
+#define BMA254_SUSPEND_MASK	BIT(7) /* chip will sleep */
+#define BMA254_LOWPOWER_MASK	BIT(6)
+#define BMA254_DATA_INTEN_MASK	BIT(4)
+#define BMA254_INT2_DATA_MASK	BIT(7)
+#define BMA254_INT1_DATA_MASK	BIT(0)
+#define BMA254_INT_RESET_MASK	BIT(7) /* Reset pending interrupts */
+
 struct bma180_data {
+	struct regulator *vdd_supply;
+	struct regulator *vddio_supply;
 	struct i2c_client *client;
 	struct iio_trigger *trig;
 	const struct bma180_part_info *part_info;
@@ -132,8 +159,8 @@ enum bma180_chan {
 static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
 static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
 
-static int bma250_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
-static int bma250_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
+static int bma25x_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
+static int bma25x_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
 	0, 0, 306458 };
 
 static int bma180_get_data_reg(struct bma180_data *data, enum bma180_chan chan)
@@ -307,8 +334,11 @@ static int bma180_chip_init(struct bma180_data *data)
 
 	if (ret < 0)
 		return ret;
-	if (ret != BMA180_ID_REG_VAL)
+	if (ret != data->part_info->chip_id) {
+		dev_err(&data->client->dev, "wrong chip ID %d expected %d\n",
+			ret, data->part_info->chip_id);
 		return -ENODEV;
+	}
 
 	ret = bma180_soft_reset(data);
 	if (ret)
@@ -355,7 +385,7 @@ static int bma180_chip_config(struct bma180_data *data)
 	return ret;
 }
 
-static int bma250_chip_config(struct bma180_data *data)
+static int bma25x_chip_config(struct bma180_data *data)
 {
 	int ret = bma180_chip_init(data);
 
@@ -367,8 +397,12 @@ static int bma250_chip_config(struct bma180_data *data)
 	ret = bma180_set_scale(data, 38344); /* 2 G */
 	if (ret)
 		goto err;
-	ret = bma180_set_bits(data, BMA250_INT_MAP_REG,
-		BMA250_INT1_DATA_MASK, 1);
+	/*
+	 * This enables dataready interrupt on the INT1 pin
+	 * FIXME: support using the INT2 pin
+	 */
+	ret = bma180_set_bits(data, data->part_info->int_map_reg,
+		data->part_info->int_enable_dataready_int1_mask, 1);
 	if (ret)
 		goto err;
 
@@ -394,7 +428,7 @@ static void bma180_chip_disable(struct bma180_data *data)
 	dev_err(&data->client->dev, "failed to disable the chip\n");
 }
 
-static void bma250_chip_disable(struct bma180_data *data)
+static void bma25x_chip_disable(struct bma180_data *data)
 {
 	if (bma180_set_new_data_intr_state(data, false))
 		goto err;
@@ -497,7 +531,7 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
 			return -EINVAL;
 		}
 	case IIO_CHAN_INFO_OFFSET:
-		*val = 48; /* 0 LSB @ 24 degree C */
+		*val = data->part_info->center_temp;
 		return IIO_VAL_INT;
 	default:
 		return -EINVAL;
@@ -627,34 +661,96 @@ static const struct iio_chan_spec bma250_channels[] = {
 	IIO_CHAN_SOFT_TIMESTAMP(4),
 };
 
+static const struct iio_chan_spec bma254_channels[] = {
+	BMA180_ACC_CHANNEL(X, 12),
+	BMA180_ACC_CHANNEL(Y, 12),
+	BMA180_ACC_CHANNEL(Z, 12),
+	BMA180_TEMP_CHANNEL,
+	IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
 static const struct bma180_part_info bma180_part_info[] = {
 	[BMA180] = {
-		bma180_channels, ARRAY_SIZE(bma180_channels),
-		bma180_scale_table, ARRAY_SIZE(bma180_scale_table),
-		bma180_bw_table, ARRAY_SIZE(bma180_bw_table),
-		BMA180_CTRL_REG0, BMA180_RESET_INT,
-		BMA180_CTRL_REG0, BMA180_SLEEP,
-		BMA180_BW_TCS, BMA180_BW,
-		BMA180_OFFSET_LSB1, BMA180_RANGE,
-		BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER,
-		BMA180_CTRL_REG3, BMA180_NEW_DATA_INT,
-		BMA180_RESET,
-		bma180_chip_config,
-		bma180_chip_disable,
+		.chip_id = BMA180_ID_REG_VAL,
+		.channels = bma180_channels,
+		.num_channels = ARRAY_SIZE(bma180_channels),
+		.scale_table = bma180_scale_table,
+		.num_scales = ARRAY_SIZE(bma180_scale_table),
+		.bw_table = bma180_bw_table,
+		.num_bw = ARRAY_SIZE(bma180_bw_table),
+		.center_temp = 48, /* 0 LSB @ 24 degree C */
+		.int_reset_reg = BMA180_CTRL_REG0,
+		.int_reset_mask = BMA180_RESET_INT,
+		.sleep_reg = BMA180_CTRL_REG0,
+		.sleep_mask = BMA180_SLEEP,
+		.bw_reg = BMA180_BW_TCS,
+		.bw_mask = BMA180_BW,
+		.scale_reg = BMA180_OFFSET_LSB1,
+		.scale_mask = BMA180_RANGE,
+		.power_reg = BMA180_TCO_Z,
+		.power_mask = BMA180_MODE_CONFIG,
+		.lowpower_val = BMA180_LOW_POWER,
+		.int_enable_reg = BMA180_CTRL_REG3,
+		.int_enable_mask = BMA180_NEW_DATA_INT,
+		.softreset_reg = BMA180_RESET,
+		.chip_config = bma180_chip_config,
+		.chip_disable = bma180_chip_disable,
 	},
 	[BMA250] = {
-		bma250_channels, ARRAY_SIZE(bma250_channels),
-		bma250_scale_table, ARRAY_SIZE(bma250_scale_table),
-		bma250_bw_table, ARRAY_SIZE(bma250_bw_table),
-		BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK,
-		BMA250_POWER_REG, BMA250_SUSPEND_MASK,
-		BMA250_BW_REG, BMA250_BW_MASK,
-		BMA250_RANGE_REG, BMA250_RANGE_MASK,
-		BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1,
-		BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK,
-		BMA250_RESET_REG,
-		bma250_chip_config,
-		bma250_chip_disable,
+		.chip_id = BMA250_ID_REG_VAL,
+		.channels = bma250_channels,
+		.num_channels = ARRAY_SIZE(bma250_channels),
+		.scale_table = bma25x_scale_table,
+		.num_scales = ARRAY_SIZE(bma25x_scale_table),
+		.bw_table = bma25x_bw_table,
+		.num_bw = ARRAY_SIZE(bma25x_bw_table),
+		.center_temp = 48, /* 0 LSB @ 24 degree C */
+		.int_reset_reg = BMA250_INT_RESET_REG,
+		.int_reset_mask = BMA250_INT_RESET_MASK,
+		.sleep_reg = BMA250_POWER_REG,
+		.sleep_mask = BMA250_SUSPEND_MASK,
+		.bw_reg = BMA250_BW_REG,
+		.bw_mask = BMA250_BW_MASK,
+		.scale_reg = BMA250_RANGE_REG,
+		.scale_mask = BMA250_RANGE_MASK,
+		.power_reg = BMA250_POWER_REG,
+		.power_mask = BMA250_LOWPOWER_MASK,
+		.lowpower_val = 1,
+		.int_enable_reg = BMA250_INT_ENABLE_REG,
+		.int_enable_mask = BMA250_DATA_INTEN_MASK,
+		.int_map_reg = BMA250_INT_MAP_REG,
+		.int_enable_dataready_int1_mask = BMA250_INT1_DATA_MASK,
+		.softreset_reg = BMA250_RESET_REG,
+		.chip_config = bma25x_chip_config,
+		.chip_disable = bma25x_chip_disable,
+	},
+	[BMA254] = {
+		.chip_id = BMA254_ID_REG_VAL,
+		.channels = bma254_channels,
+		.num_channels = ARRAY_SIZE(bma254_channels),
+		.scale_table = bma25x_scale_table,
+		.num_scales = ARRAY_SIZE(bma25x_scale_table),
+		.bw_table = bma25x_bw_table,
+		.num_bw = ARRAY_SIZE(bma25x_bw_table),
+		.center_temp = 46, /* 0 LSB @ 23 degree C */
+		.int_reset_reg = BMA254_INT_RESET_REG,
+		.int_reset_mask = BMA254_INT_RESET_MASK,
+		.sleep_reg = BMA254_POWER_REG,
+		.sleep_mask = BMA254_SUSPEND_MASK,
+		.bw_reg = BMA254_BW_REG,
+		.bw_mask = BMA254_BW_MASK,
+		.scale_reg = BMA254_RANGE_REG,
+		.scale_mask = BMA254_RANGE_MASK,
+		.power_reg = BMA254_POWER_REG,
+		.power_mask = BMA254_LOWPOWER_MASK,
+		.lowpower_val = 1,
+		.int_enable_reg = BMA254_INT_ENABLE_REG,
+		.int_enable_mask = BMA254_DATA_INTEN_MASK,
+		.int_map_reg = BMA254_INT_MAP_REG,
+		.int_enable_dataready_int1_mask = BMA254_INT1_DATA_MASK,
+		.softreset_reg = BMA254_RESET_REG,
+		.chip_config = bma25x_chip_config,
+		.chip_disable = bma25x_chip_disable,
 	},
 };
 
@@ -712,12 +808,13 @@ static const struct iio_trigger_ops bma180_trigger_ops = {
 static int bma180_probe(struct i2c_client *client,
 		const struct i2c_device_id *id)
 {
+	struct device *dev = &client->dev;
 	struct bma180_data *data;
 	struct iio_dev *indio_dev;
 	enum chip_ids chip;
 	int ret;
 
-	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
 	if (!indio_dev)
 		return -ENOMEM;
 
@@ -725,22 +822,56 @@ static int bma180_probe(struct i2c_client *client,
 	i2c_set_clientdata(client, indio_dev);
 	data->client = client;
 	if (client->dev.of_node)
-		chip = (enum chip_ids)of_device_get_match_data(&client->dev);
+		chip = (enum chip_ids)of_device_get_match_data(dev);
 	else
 		chip = id->driver_data;
 	data->part_info = &bma180_part_info[chip];
 
-	ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+	ret = iio_read_mount_matrix(dev, "mount-matrix",
 				&data->orientation);
 	if (ret)
 		return ret;
 
+	data->vdd_supply = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(data->vdd_supply)) {
+		if (PTR_ERR(data->vdd_supply) != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get vdd regulator %d\n",
+				(int)PTR_ERR(data->vdd_supply));
+		return PTR_ERR(data->vdd_supply);
+	}
+	data->vddio_supply = devm_regulator_get(dev, "vddio");
+	if (IS_ERR(data->vddio_supply)) {
+		if (PTR_ERR(data->vddio_supply) != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get vddio regulator %d\n",
+				(int)PTR_ERR(data->vddio_supply));
+		return PTR_ERR(data->vddio_supply);
+	}
+	/* Typical voltage 2.4V these are min and max */
+	ret = regulator_set_voltage(data->vdd_supply, 1620000, 3600000);
+	if (ret)
+		return ret;
+	ret = regulator_set_voltage(data->vddio_supply, 1200000, 3600000);
+	if (ret)
+		return ret;
+	ret = regulator_enable(data->vdd_supply);
+	if (ret) {
+		dev_err(dev, "Failed to enable vdd regulator: %d\n", ret);
+		return ret;
+	}
+	ret = regulator_enable(data->vddio_supply);
+	if (ret) {
+		dev_err(dev, "Failed to enable vddio regulator: %d\n", ret);
+		goto err_disable_vdd;
+	}
+	/* Wait to make sure we started up properly (3 ms at least) */
+	usleep_range(3000, 5000);
+
 	ret = data->part_info->chip_config(data);
 	if (ret < 0)
 		goto err_chip_disable;
 
 	mutex_init(&data->mutex);
-	indio_dev->dev.parent = &client->dev;
+	indio_dev->dev.parent = dev;
 	indio_dev->channels = data->part_info->channels;
 	indio_dev->num_channels = data->part_info->num_channels;
 	indio_dev->name = id->name;
@@ -755,15 +886,15 @@ static int bma180_probe(struct i2c_client *client,
 			goto err_chip_disable;
 		}
 
-		ret = devm_request_irq(&client->dev, client->irq,
+		ret = devm_request_irq(dev, client->irq,
 			iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING,
 			"bma180_event", data->trig);
 		if (ret) {
-			dev_err(&client->dev, "unable to request IRQ\n");
+			dev_err(dev, "unable to request IRQ\n");
 			goto err_trigger_free;
 		}
 
-		data->trig->dev.parent = &client->dev;
+		data->trig->dev.parent = dev;
 		data->trig->ops = &bma180_trigger_ops;
 		iio_trigger_set_drvdata(data->trig, indio_dev);
 		indio_dev->trig = iio_trigger_get(data->trig);
@@ -776,13 +907,13 @@ static int bma180_probe(struct i2c_client *client,
 	ret = iio_triggered_buffer_setup(indio_dev, NULL,
 			bma180_trigger_handler, NULL);
 	if (ret < 0) {
-		dev_err(&client->dev, "unable to setup iio triggered buffer\n");
+		dev_err(dev, "unable to setup iio triggered buffer\n");
 		goto err_trigger_unregister;
 	}
 
 	ret = iio_device_register(indio_dev);
 	if (ret < 0) {
-		dev_err(&client->dev, "unable to register iio device\n");
+		dev_err(dev, "unable to register iio device\n");
 		goto err_buffer_cleanup;
 	}
 
@@ -797,6 +928,9 @@ static int bma180_probe(struct i2c_client *client,
 	iio_trigger_free(data->trig);
 err_chip_disable:
 	data->part_info->chip_disable(data);
+	regulator_disable(data->vddio_supply);
+err_disable_vdd:
+	regulator_disable(data->vdd_supply);
 
 	return ret;
 }
@@ -816,6 +950,8 @@ static int bma180_remove(struct i2c_client *client)
 	mutex_lock(&data->mutex);
 	data->part_info->chip_disable(data);
 	mutex_unlock(&data->mutex);
+	regulator_disable(data->vddio_supply);
+	regulator_disable(data->vdd_supply);
 
 	return 0;
 }
@@ -856,6 +992,7 @@ static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
 static const struct i2c_device_id bma180_ids[] = {
 	{ "bma180", BMA180 },
 	{ "bma250", BMA250 },
+	{ "bma254", BMA254 },
 	{ }
 };
 
@@ -870,6 +1007,10 @@ static const struct of_device_id bma180_of_match[] = {
 		.compatible = "bosch,bma250",
 		.data = (void *)BMA250
 	},
+	{
+		.compatible = "bosch,bma254",
+		.data = (void *)BMA254
+	},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, bma180_of_match);
@@ -889,5 +1030,5 @@ module_i2c_driver(bma180_driver);
 
 MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
 MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("Bosch BMA180/BMA250 triaxial acceleration sensor");
+MODULE_DESCRIPTION("Bosch BMA180/BMA25x triaxial acceleration sensor");
 MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
new file mode 100644
index 0000000..5ad10db
--- /dev/null
+++ b/drivers/iio/accel/bma400.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Register constants and other forward declarations needed by the bma400
+ * sources.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ */
+
+#ifndef _BMA400_H_
+#define _BMA400_H_
+
+#include <linux/bits.h>
+#include <linux/regmap.h>
+
+/*
+ * Read-Only Registers
+ */
+
+/* Status and ID registers */
+#define BMA400_CHIP_ID_REG          0x00
+#define BMA400_ERR_REG              0x02
+#define BMA400_STATUS_REG           0x03
+
+/* Acceleration registers */
+#define BMA400_X_AXIS_LSB_REG       0x04
+#define BMA400_X_AXIS_MSB_REG       0x05
+#define BMA400_Y_AXIS_LSB_REG       0x06
+#define BMA400_Y_AXIS_MSB_REG       0x07
+#define BMA400_Z_AXIS_LSB_REG       0x08
+#define BMA400_Z_AXIS_MSB_REG       0x09
+
+/* Sensor time registers */
+#define BMA400_SENSOR_TIME0         0x0a
+#define BMA400_SENSOR_TIME1         0x0b
+#define BMA400_SENSOR_TIME2         0x0c
+
+/* Event and interrupt registers */
+#define BMA400_EVENT_REG            0x0d
+#define BMA400_INT_STAT0_REG        0x0e
+#define BMA400_INT_STAT1_REG        0x0f
+#define BMA400_INT_STAT2_REG        0x10
+
+/* Temperature register */
+#define BMA400_TEMP_DATA_REG        0x11
+
+/* FIFO length and data registers */
+#define BMA400_FIFO_LENGTH0_REG     0x12
+#define BMA400_FIFO_LENGTH1_REG     0x13
+#define BMA400_FIFO_DATA_REG        0x14
+
+/* Step count registers */
+#define BMA400_STEP_CNT0_REG        0x15
+#define BMA400_STEP_CNT1_REG        0x16
+#define BMA400_STEP_CNT3_REG        0x17
+#define BMA400_STEP_STAT_REG        0x18
+
+/*
+ * Read-write configuration registers
+ */
+#define BMA400_ACC_CONFIG0_REG      0x19
+#define BMA400_ACC_CONFIG1_REG      0x1a
+#define BMA400_ACC_CONFIG2_REG      0x1b
+#define BMA400_CMD_REG              0x7e
+
+/* Chip ID of BMA 400 devices found in the chip ID register. */
+#define BMA400_ID_REG_VAL           0x90
+
+#define BMA400_LP_OSR_SHIFT         5
+#define BMA400_NP_OSR_SHIFT         4
+#define BMA400_SCALE_SHIFT          6
+
+#define BMA400_TWO_BITS_MASK        GENMASK(1, 0)
+#define BMA400_LP_OSR_MASK          GENMASK(6, 5)
+#define BMA400_NP_OSR_MASK          GENMASK(5, 4)
+#define BMA400_ACC_ODR_MASK         GENMASK(3, 0)
+#define BMA400_ACC_SCALE_MASK       GENMASK(7, 6)
+
+#define BMA400_ACC_ODR_MIN_RAW      0x05
+#define BMA400_ACC_ODR_LP_RAW       0x06
+#define BMA400_ACC_ODR_MAX_RAW      0x0b
+
+#define BMA400_ACC_ODR_MAX_HZ       800
+#define BMA400_ACC_ODR_MIN_WHOLE_HZ 25
+#define BMA400_ACC_ODR_MIN_HZ       12
+
+#define BMA400_SCALE_MIN            38357
+#define BMA400_SCALE_MAX            306864
+
+#define BMA400_NUM_REGULATORS       2
+#define BMA400_VDD_REGULATOR        0
+#define BMA400_VDDIO_REGULATOR      1
+
+extern const struct regmap_config bma400_regmap_config;
+
+int bma400_probe(struct device *dev, struct regmap *regmap, const char *name);
+
+int bma400_remove(struct device *dev);
+
+#endif
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
new file mode 100644
index 0000000..cc77f89
--- /dev/null
+++ b/drivers/iio/accel/bma400_core.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Core IIO driver for Bosch BMA400 triaxial acceleration sensor.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ *
+ * TODO:
+ *  - Support for power management
+ *  - Support events and interrupts
+ *  - Create channel for step count
+ *  - Create channel for sensor time
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "bma400.h"
+
+/*
+ * The G-range selection may be one of 2g, 4g, 8, or 16g. The scale may
+ * be selected with the acc_range bits of the ACC_CONFIG1 register.
+ * NB: This buffer is populated in the device init.
+ */
+static int bma400_scales[8];
+
+/*
+ * See the ACC_CONFIG1 section of the datasheet.
+ * NB: This buffer is populated in the device init.
+ */
+static int bma400_sample_freqs[14];
+
+static const int bma400_osr_range[] = { 0, 1, 3 };
+
+/* See the ACC_CONFIG0 section of the datasheet */
+enum bma400_power_mode {
+	POWER_MODE_SLEEP   = 0x00,
+	POWER_MODE_LOW     = 0x01,
+	POWER_MODE_NORMAL  = 0x02,
+	POWER_MODE_INVALID = 0x03,
+};
+
+struct bma400_sample_freq {
+	int hz;
+	int uhz;
+};
+
+struct bma400_data {
+	struct device *dev;
+	struct regmap *regmap;
+	struct regulator_bulk_data regulators[BMA400_NUM_REGULATORS];
+	struct mutex mutex; /* data register lock */
+	struct iio_mount_matrix orientation;
+	enum bma400_power_mode power_mode;
+	struct bma400_sample_freq sample_freq;
+	int oversampling_ratio;
+	int scale;
+};
+
+static bool bma400_is_writable_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case BMA400_CHIP_ID_REG:
+	case BMA400_ERR_REG:
+	case BMA400_STATUS_REG:
+	case BMA400_X_AXIS_LSB_REG:
+	case BMA400_X_AXIS_MSB_REG:
+	case BMA400_Y_AXIS_LSB_REG:
+	case BMA400_Y_AXIS_MSB_REG:
+	case BMA400_Z_AXIS_LSB_REG:
+	case BMA400_Z_AXIS_MSB_REG:
+	case BMA400_SENSOR_TIME0:
+	case BMA400_SENSOR_TIME1:
+	case BMA400_SENSOR_TIME2:
+	case BMA400_EVENT_REG:
+	case BMA400_INT_STAT0_REG:
+	case BMA400_INT_STAT1_REG:
+	case BMA400_INT_STAT2_REG:
+	case BMA400_TEMP_DATA_REG:
+	case BMA400_FIFO_LENGTH0_REG:
+	case BMA400_FIFO_LENGTH1_REG:
+	case BMA400_FIFO_DATA_REG:
+	case BMA400_STEP_CNT0_REG:
+	case BMA400_STEP_CNT1_REG:
+	case BMA400_STEP_CNT3_REG:
+	case BMA400_STEP_STAT_REG:
+		return false;
+	default:
+		return true;
+	}
+}
+
+static bool bma400_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case BMA400_ERR_REG:
+	case BMA400_STATUS_REG:
+	case BMA400_X_AXIS_LSB_REG:
+	case BMA400_X_AXIS_MSB_REG:
+	case BMA400_Y_AXIS_LSB_REG:
+	case BMA400_Y_AXIS_MSB_REG:
+	case BMA400_Z_AXIS_LSB_REG:
+	case BMA400_Z_AXIS_MSB_REG:
+	case BMA400_SENSOR_TIME0:
+	case BMA400_SENSOR_TIME1:
+	case BMA400_SENSOR_TIME2:
+	case BMA400_EVENT_REG:
+	case BMA400_INT_STAT0_REG:
+	case BMA400_INT_STAT1_REG:
+	case BMA400_INT_STAT2_REG:
+	case BMA400_TEMP_DATA_REG:
+	case BMA400_FIFO_LENGTH0_REG:
+	case BMA400_FIFO_LENGTH1_REG:
+	case BMA400_FIFO_DATA_REG:
+	case BMA400_STEP_CNT0_REG:
+	case BMA400_STEP_CNT1_REG:
+	case BMA400_STEP_CNT3_REG:
+	case BMA400_STEP_STAT_REG:
+		return true;
+	default:
+		return false;
+	}
+}
+
+const struct regmap_config bma400_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = BMA400_CMD_REG,
+	.cache_type = REGCACHE_RBTREE,
+	.writeable_reg = bma400_is_writable_reg,
+	.volatile_reg = bma400_is_volatile_reg,
+};
+EXPORT_SYMBOL(bma400_regmap_config);
+
+static const struct iio_mount_matrix *
+bma400_accel_get_mount_matrix(const struct iio_dev *indio_dev,
+			      const struct iio_chan_spec *chan)
+{
+	struct bma400_data *data = iio_priv(indio_dev);
+
+	return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bma400_ext_info[] = {
+	IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma400_accel_get_mount_matrix),
+	{ }
+};
+
+#define BMA400_ACC_CHANNEL(_axis) { \
+	.type = IIO_ACCEL, \
+	.modified = 1, \
+	.channel2 = IIO_MOD_##_axis, \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+		BIT(IIO_CHAN_INFO_SCALE) | \
+		BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+	.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+		BIT(IIO_CHAN_INFO_SCALE) | \
+		BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+	.ext_info = bma400_ext_info, \
+}
+
+static const struct iio_chan_spec bma400_channels[] = {
+	BMA400_ACC_CHANNEL(X),
+	BMA400_ACC_CHANNEL(Y),
+	BMA400_ACC_CHANNEL(Z),
+	{
+		.type = IIO_TEMP,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+	},
+};
+
+static int bma400_get_temp_reg(struct bma400_data *data, int *val, int *val2)
+{
+	unsigned int raw_temp;
+	int host_temp;
+	int ret;
+
+	if (data->power_mode == POWER_MODE_SLEEP)
+		return -EBUSY;
+
+	ret = regmap_read(data->regmap, BMA400_TEMP_DATA_REG, &raw_temp);
+	if (ret)
+		return ret;
+
+	host_temp = sign_extend32(raw_temp, 7);
+	/*
+	 * The formula for the TEMP_DATA register in the datasheet
+	 * is: x * 0.5 + 23
+	 */
+	*val = (host_temp >> 1) + 23;
+	*val2 = (host_temp & 0x1) * 500000;
+	return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int bma400_get_accel_reg(struct bma400_data *data,
+				const struct iio_chan_spec *chan,
+				int *val)
+{
+	__le16 raw_accel;
+	int lsb_reg;
+	int ret;
+
+	if (data->power_mode == POWER_MODE_SLEEP)
+		return -EBUSY;
+
+	switch (chan->channel2) {
+	case IIO_MOD_X:
+		lsb_reg = BMA400_X_AXIS_LSB_REG;
+		break;
+	case IIO_MOD_Y:
+		lsb_reg = BMA400_Y_AXIS_LSB_REG;
+		break;
+	case IIO_MOD_Z:
+		lsb_reg = BMA400_Z_AXIS_LSB_REG;
+		break;
+	default:
+		dev_err(data->dev, "invalid axis channel modifier\n");
+		return -EINVAL;
+	}
+
+	/* bulk read two registers, with the base being the LSB register */
+	ret = regmap_bulk_read(data->regmap, lsb_reg, &raw_accel,
+			       sizeof(raw_accel));
+	if (ret)
+		return ret;
+
+	*val = sign_extend32(le16_to_cpu(raw_accel), 11);
+	return IIO_VAL_INT;
+}
+
+static void bma400_output_data_rate_from_raw(int raw, unsigned int *val,
+					     unsigned int *val2)
+{
+	*val = BMA400_ACC_ODR_MAX_HZ >> (BMA400_ACC_ODR_MAX_RAW - raw);
+	if (raw > BMA400_ACC_ODR_MIN_RAW)
+		*val2 = 0;
+	else
+		*val2 = 500000;
+}
+
+static int bma400_get_accel_output_data_rate(struct bma400_data *data)
+{
+	unsigned int val;
+	unsigned int odr;
+	int ret;
+
+	switch (data->power_mode) {
+	case POWER_MODE_LOW:
+		/*
+		 * Runs at a fixed rate in low-power mode. See section 4.3
+		 * in the datasheet.
+		 */
+		bma400_output_data_rate_from_raw(BMA400_ACC_ODR_LP_RAW,
+						 &data->sample_freq.hz,
+						 &data->sample_freq.uhz);
+		return 0;
+	case POWER_MODE_NORMAL:
+		/*
+		 * In normal mode the ODR can be found in the ACC_CONFIG1
+		 * register.
+		 */
+		ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+		if (ret)
+			goto error;
+
+		odr = val & BMA400_ACC_ODR_MASK;
+		if (odr < BMA400_ACC_ODR_MIN_RAW ||
+		    odr > BMA400_ACC_ODR_MAX_RAW) {
+			ret = -EINVAL;
+			goto error;
+		}
+
+		bma400_output_data_rate_from_raw(odr, &data->sample_freq.hz,
+						 &data->sample_freq.uhz);
+		return 0;
+	case POWER_MODE_SLEEP:
+		data->sample_freq.hz = 0;
+		data->sample_freq.uhz = 0;
+		return 0;
+	default:
+		ret = 0;
+		goto error;
+	}
+error:
+	data->sample_freq.hz = -1;
+	data->sample_freq.uhz = -1;
+	return ret;
+}
+
+static int bma400_set_accel_output_data_rate(struct bma400_data *data,
+					     int hz, int uhz)
+{
+	unsigned int idx;
+	unsigned int odr;
+	unsigned int val;
+	int ret;
+
+	if (hz >= BMA400_ACC_ODR_MIN_WHOLE_HZ) {
+		if (uhz || hz > BMA400_ACC_ODR_MAX_HZ)
+			return -EINVAL;
+
+		/* Note this works because MIN_WHOLE_HZ is odd */
+		idx = __ffs(hz);
+
+		if (hz >> idx != BMA400_ACC_ODR_MIN_WHOLE_HZ)
+			return -EINVAL;
+
+		idx += BMA400_ACC_ODR_MIN_RAW + 1;
+	} else if (hz == BMA400_ACC_ODR_MIN_HZ && uhz == 500000) {
+		idx = BMA400_ACC_ODR_MIN_RAW;
+	} else {
+		return -EINVAL;
+	}
+
+	ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+	if (ret)
+		return ret;
+
+	/* preserve the range and normal mode osr */
+	odr = (~BMA400_ACC_ODR_MASK & val) | idx;
+
+	ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG, odr);
+	if (ret)
+		return ret;
+
+	bma400_output_data_rate_from_raw(idx, &data->sample_freq.hz,
+					 &data->sample_freq.uhz);
+	return 0;
+}
+
+static int bma400_get_accel_oversampling_ratio(struct bma400_data *data)
+{
+	unsigned int val;
+	unsigned int osr;
+	int ret;
+
+	/*
+	 * The oversampling ratio is stored in a different register
+	 * based on the power-mode. In normal mode the OSR is stored
+	 * in ACC_CONFIG1. In low-power mode it is stored in
+	 * ACC_CONFIG0.
+	 */
+	switch (data->power_mode) {
+	case POWER_MODE_LOW:
+		ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG, &val);
+		if (ret) {
+			data->oversampling_ratio = -1;
+			return ret;
+		}
+
+		osr = (val & BMA400_LP_OSR_MASK) >> BMA400_LP_OSR_SHIFT;
+
+		data->oversampling_ratio = osr;
+		return 0;
+	case POWER_MODE_NORMAL:
+		ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+		if (ret) {
+			data->oversampling_ratio = -1;
+			return ret;
+		}
+
+		osr = (val & BMA400_NP_OSR_MASK) >> BMA400_NP_OSR_SHIFT;
+
+		data->oversampling_ratio = osr;
+		return 0;
+	case POWER_MODE_SLEEP:
+		data->oversampling_ratio = 0;
+		return 0;
+	default:
+		data->oversampling_ratio = -1;
+		return -EINVAL;
+	}
+}
+
+static int bma400_set_accel_oversampling_ratio(struct bma400_data *data,
+					       int val)
+{
+	unsigned int acc_config;
+	int ret;
+
+	if (val & ~BMA400_TWO_BITS_MASK)
+		return -EINVAL;
+
+	/*
+	 * The oversampling ratio is stored in a different register
+	 * based on the power-mode.
+	 */
+	switch (data->power_mode) {
+	case POWER_MODE_LOW:
+		ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG,
+				  &acc_config);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(data->regmap, BMA400_ACC_CONFIG0_REG,
+				   (acc_config & ~BMA400_LP_OSR_MASK) |
+				   (val << BMA400_LP_OSR_SHIFT));
+		if (ret) {
+			dev_err(data->dev, "Failed to write out OSR\n");
+			return ret;
+		}
+
+		data->oversampling_ratio = val;
+		return 0;
+	case POWER_MODE_NORMAL:
+		ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG,
+				  &acc_config);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG,
+				   (acc_config & ~BMA400_NP_OSR_MASK) |
+				   (val << BMA400_NP_OSR_SHIFT));
+		if (ret) {
+			dev_err(data->dev, "Failed to write out OSR\n");
+			return ret;
+		}
+
+		data->oversampling_ratio = val;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int bma400_accel_scale_to_raw(struct bma400_data *data,
+				     unsigned int val)
+{
+	int raw;
+
+	if (val == 0)
+		return -EINVAL;
+
+	/* Note this works because BMA400_SCALE_MIN is odd */
+	raw = __ffs(val);
+
+	if (val >> raw != BMA400_SCALE_MIN)
+		return -EINVAL;
+
+	return raw;
+}
+
+static int bma400_get_accel_scale(struct bma400_data *data)
+{
+	unsigned int raw_scale;
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+	if (ret)
+		return ret;
+
+	raw_scale = (val & BMA400_ACC_SCALE_MASK) >> BMA400_SCALE_SHIFT;
+	if (raw_scale > BMA400_TWO_BITS_MASK)
+		return -EINVAL;
+
+	data->scale = BMA400_SCALE_MIN << raw_scale;
+
+	return 0;
+}
+
+static int bma400_set_accel_scale(struct bma400_data *data, unsigned int val)
+{
+	unsigned int acc_config;
+	int raw;
+	int ret;
+
+	ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &acc_config);
+	if (ret)
+		return ret;
+
+	raw = bma400_accel_scale_to_raw(data, val);
+	if (raw < 0)
+		return raw;
+
+	ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG,
+			   (acc_config & ~BMA400_ACC_SCALE_MASK) |
+			   (raw << BMA400_SCALE_SHIFT));
+	if (ret)
+		return ret;
+
+	data->scale = val;
+	return 0;
+}
+
+static int bma400_get_power_mode(struct bma400_data *data)
+{
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(data->regmap, BMA400_STATUS_REG, &val);
+	if (ret) {
+		dev_err(data->dev, "Failed to read status register\n");
+		return ret;
+	}
+
+	data->power_mode = (val >> 1) & BMA400_TWO_BITS_MASK;
+	return 0;
+}
+
+static int bma400_set_power_mode(struct bma400_data *data,
+				 enum bma400_power_mode mode)
+{
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG, &val);
+	if (ret)
+		return ret;
+
+	if (data->power_mode == mode)
+		return 0;
+
+	if (mode == POWER_MODE_INVALID)
+		return -EINVAL;
+
+	/* Preserve the low-power oversample ratio etc */
+	ret = regmap_write(data->regmap, BMA400_ACC_CONFIG0_REG,
+			   mode | (val & ~BMA400_TWO_BITS_MASK));
+	if (ret) {
+		dev_err(data->dev, "Failed to write to power-mode\n");
+		return ret;
+	}
+
+	data->power_mode = mode;
+
+	/*
+	 * Update our cached osr and odr based on the new
+	 * power-mode.
+	 */
+	bma400_get_accel_output_data_rate(data);
+	bma400_get_accel_oversampling_ratio(data);
+	return 0;
+}
+
+static void bma400_init_tables(void)
+{
+	int raw;
+	int i;
+
+	for (i = 0; i + 1 < ARRAY_SIZE(bma400_sample_freqs); i += 2) {
+		raw = (i / 2) + 5;
+		bma400_output_data_rate_from_raw(raw, &bma400_sample_freqs[i],
+						 &bma400_sample_freqs[i + 1]);
+	}
+
+	for (i = 0; i + 1 < ARRAY_SIZE(bma400_scales); i += 2) {
+		raw = i / 2;
+		bma400_scales[i] = 0;
+		bma400_scales[i + 1] = BMA400_SCALE_MIN << raw;
+	}
+}
+
+static int bma400_init(struct bma400_data *data)
+{
+	unsigned int val;
+	int ret;
+
+	/* Try to read chip_id register. It must return 0x90. */
+	ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
+	if (ret) {
+		dev_err(data->dev, "Failed to read chip id register\n");
+		goto out;
+	}
+
+	if (val != BMA400_ID_REG_VAL) {
+		dev_err(data->dev, "Chip ID mismatch\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	data->regulators[BMA400_VDD_REGULATOR].supply = "vdd";
+	data->regulators[BMA400_VDDIO_REGULATOR].supply = "vddio";
+	ret = devm_regulator_bulk_get(data->dev,
+				      ARRAY_SIZE(data->regulators),
+				      data->regulators);
+	if (ret) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(data->dev,
+				"Failed to get regulators: %d\n",
+				ret);
+
+		goto out;
+	}
+	ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+				    data->regulators);
+	if (ret) {
+		dev_err(data->dev, "Failed to enable regulators: %d\n",
+			ret);
+		goto out;
+	}
+
+	ret = bma400_get_power_mode(data);
+	if (ret) {
+		dev_err(data->dev, "Failed to get the initial power-mode\n");
+		goto err_reg_disable;
+	}
+
+	if (data->power_mode != POWER_MODE_NORMAL) {
+		ret = bma400_set_power_mode(data, POWER_MODE_NORMAL);
+		if (ret) {
+			dev_err(data->dev, "Failed to wake up the device\n");
+			goto err_reg_disable;
+		}
+		/*
+		 * TODO: The datasheet waits 1500us here in the example, but
+		 * lists 2/ODR as the wakeup time.
+		 */
+		usleep_range(1500, 2000);
+	}
+
+	bma400_init_tables();
+
+	ret = bma400_get_accel_output_data_rate(data);
+	if (ret)
+		goto err_reg_disable;
+
+	ret = bma400_get_accel_oversampling_ratio(data);
+	if (ret)
+		goto err_reg_disable;
+
+	ret = bma400_get_accel_scale(data);
+	if (ret)
+		goto err_reg_disable;
+
+	/*
+	 * Once the interrupt engine is supported we might use the
+	 * data_src_reg, but for now ensure this is set to the
+	 * variable ODR filter selectable by the sample frequency
+	 * channel.
+	 */
+	return regmap_write(data->regmap, BMA400_ACC_CONFIG2_REG, 0x00);
+
+err_reg_disable:
+	regulator_bulk_disable(ARRAY_SIZE(data->regulators),
+			       data->regulators);
+out:
+	return ret;
+}
+
+static int bma400_read_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan, int *val,
+			   int *val2, long mask)
+{
+	struct bma400_data *data = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_PROCESSED:
+		mutex_lock(&data->mutex);
+		ret = bma400_get_temp_reg(data, val, val2);
+		mutex_unlock(&data->mutex);
+		return ret;
+	case IIO_CHAN_INFO_RAW:
+		mutex_lock(&data->mutex);
+		ret = bma400_get_accel_reg(data, chan, val);
+		mutex_unlock(&data->mutex);
+		return ret;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		switch (chan->type) {
+		case IIO_ACCEL:
+			if (data->sample_freq.hz < 0)
+				return -EINVAL;
+
+			*val = data->sample_freq.hz;
+			*val2 = data->sample_freq.uhz;
+			return IIO_VAL_INT_PLUS_MICRO;
+		case IIO_TEMP:
+			/*
+			 * Runs at a fixed sampling frequency. See Section 4.4
+			 * of the datasheet.
+			 */
+			*val = 6;
+			*val2 = 250000;
+			return IIO_VAL_INT_PLUS_MICRO;
+		default:
+			return -EINVAL;
+		}
+	case IIO_CHAN_INFO_SCALE:
+		*val = 0;
+		*val2 = data->scale;
+		return IIO_VAL_INT_PLUS_MICRO;
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		/*
+		 * TODO: We could avoid this logic and returning -EINVAL here if
+		 * we set both the low-power and normal mode OSR registers when
+		 * we configure the device.
+		 */
+		if (data->oversampling_ratio < 0)
+			return -EINVAL;
+
+		*val = data->oversampling_ratio;
+		return IIO_VAL_INT;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int bma400_read_avail(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     const int **vals, int *type, int *length,
+			     long mask)
+{
+	switch (mask) {
+	case IIO_CHAN_INFO_SCALE:
+		*type = IIO_VAL_INT_PLUS_MICRO;
+		*vals = bma400_scales;
+		*length = ARRAY_SIZE(bma400_scales);
+		return IIO_AVAIL_LIST;
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		*type = IIO_VAL_INT;
+		*vals = bma400_osr_range;
+		*length = ARRAY_SIZE(bma400_osr_range);
+		return IIO_AVAIL_RANGE;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*type = IIO_VAL_INT_PLUS_MICRO;
+		*vals = bma400_sample_freqs;
+		*length = ARRAY_SIZE(bma400_sample_freqs);
+		return IIO_AVAIL_LIST;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int bma400_write_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan, int val, int val2,
+			    long mask)
+{
+	struct bma400_data *data = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		/*
+		 * The sample frequency is readonly for the temperature
+		 * register and a fixed value in low-power mode.
+		 */
+		if (chan->type != IIO_ACCEL)
+			return -EINVAL;
+
+		mutex_lock(&data->mutex);
+		ret = bma400_set_accel_output_data_rate(data, val, val2);
+		mutex_unlock(&data->mutex);
+		return ret;
+	case IIO_CHAN_INFO_SCALE:
+		if (val != 0 ||
+		    val2 < BMA400_SCALE_MIN || val2 > BMA400_SCALE_MAX)
+			return -EINVAL;
+
+		mutex_lock(&data->mutex);
+		ret = bma400_set_accel_scale(data, val2);
+		mutex_unlock(&data->mutex);
+		return ret;
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		mutex_lock(&data->mutex);
+		ret = bma400_set_accel_oversampling_ratio(data, val);
+		mutex_unlock(&data->mutex);
+		return ret;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int bma400_write_raw_get_fmt(struct iio_dev *indio_dev,
+				    struct iio_chan_spec const *chan,
+				    long mask)
+{
+	switch (mask) {
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		return IIO_VAL_INT_PLUS_MICRO;
+	case IIO_CHAN_INFO_SCALE:
+		return IIO_VAL_INT_PLUS_MICRO;
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		return IIO_VAL_INT;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct iio_info bma400_info = {
+	.read_raw          = bma400_read_raw,
+	.read_avail        = bma400_read_avail,
+	.write_raw         = bma400_write_raw,
+	.write_raw_get_fmt = bma400_write_raw_get_fmt,
+};
+
+int bma400_probe(struct device *dev, struct regmap *regmap, const char *name)
+{
+	struct iio_dev *indio_dev;
+	struct bma400_data *data;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	data = iio_priv(indio_dev);
+	data->regmap = regmap;
+	data->dev = dev;
+
+	ret = bma400_init(data);
+	if (ret)
+		return ret;
+
+	ret = iio_read_mount_matrix(dev, "mount-matrix", &data->orientation);
+	if (ret)
+		return ret;
+
+	mutex_init(&data->mutex);
+	indio_dev->dev.parent = dev;
+	indio_dev->name = name;
+	indio_dev->info = &bma400_info;
+	indio_dev->channels = bma400_channels;
+	indio_dev->num_channels = ARRAY_SIZE(bma400_channels);
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	dev_set_drvdata(dev, indio_dev);
+
+	return iio_device_register(indio_dev);
+}
+EXPORT_SYMBOL(bma400_probe);
+
+int bma400_remove(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct bma400_data *data = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&data->mutex);
+	ret = bma400_set_power_mode(data, POWER_MODE_SLEEP);
+	mutex_unlock(&data->mutex);
+
+	regulator_bulk_disable(ARRAY_SIZE(data->regulators),
+			       data->regulators);
+
+	iio_device_unregister(indio_dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(bma400_remove);
+
+MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
+MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bma400_i2c.c b/drivers/iio/accel/bma400_i2c.c
new file mode 100644
index 0000000..9dcb7cc
--- /dev/null
+++ b/drivers/iio/accel/bma400_i2c.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I2C IIO driver for Bosch BMA400 triaxial acceleration sensor.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ *
+ * I2C address is either 0x14 or 0x15 depending on SDO
+ */
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bma400.h"
+
+static int bma400_i2c_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_i2c(client, &bma400_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "failed to create regmap\n");
+		return PTR_ERR(regmap);
+	}
+
+	return bma400_probe(&client->dev, regmap, id->name);
+}
+
+static int bma400_i2c_remove(struct i2c_client *client)
+{
+	return bma400_remove(&client->dev);
+}
+
+static const struct i2c_device_id bma400_i2c_ids[] = {
+	{ "bma400", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, bma400_i2c_ids);
+
+static const struct of_device_id bma400_of_i2c_match[] = {
+	{ .compatible = "bosch,bma400" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, bma400_of_i2c_match);
+
+static struct i2c_driver bma400_i2c_driver = {
+	.driver = {
+		.name = "bma400",
+		.of_match_table = bma400_of_i2c_match,
+	},
+	.probe    = bma400_i2c_probe,
+	.remove   = bma400_i2c_remove,
+	.id_table = bma400_i2c_ids,
+};
+
+module_i2c_driver(bma400_i2c_driver);
+
+MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
+MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor (I2C)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 65f85fa..68e847c 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -18,7 +18,6 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index fee535d..c9924a6 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -130,6 +130,7 @@ struct kxcjk1013_data {
 	struct i2c_client *client;
 	struct iio_trigger *dready_trig;
 	struct iio_trigger *motion_trig;
+	struct iio_mount_matrix orientation;
 	struct mutex mutex;
 	s16 buffer[8];
 	u8 odr_bits;
@@ -983,6 +984,20 @@ static const struct iio_event_spec kxcjk1013_event = {
 				 BIT(IIO_EV_INFO_PERIOD)
 };
 
+static const struct iio_mount_matrix *
+kxcjk1013_get_mount_matrix(const struct iio_dev *indio_dev,
+			   const struct iio_chan_spec *chan)
+{
+	struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+	return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info kxcjk1013_ext_info[] = {
+	IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kxcjk1013_get_mount_matrix),
+	{ }
+};
+
 #define KXCJK1013_CHANNEL(_axis) {					\
 	.type = IIO_ACCEL,						\
 	.modified = 1,							\
@@ -999,6 +1014,7 @@ static const struct iio_event_spec kxcjk1013_event = {
 		.endianness = IIO_LE,					\
 	},								\
 	.event_spec = &kxcjk1013_event,				\
+	.ext_info = kxcjk1013_ext_info,					\
 	.num_event_specs = 1						\
 }
 
@@ -1267,11 +1283,18 @@ static int kxcjk1013_probe(struct i2c_client *client,
 	data->client = client;
 
 	pdata = dev_get_platdata(&client->dev);
-	if (pdata)
+	if (pdata) {
 		data->active_high_intr = pdata->active_high_intr;
-	else
+		data->orientation = pdata->orientation;
+	} else {
 		data->active_high_intr = true; /* default polarity */
 
+		ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+					    &data->orientation);
+		if (ret)
+			return ret;
+	}
+
 	if (id) {
 		data->chipset = (enum kx_chipset)(id->driver_data);
 		name = id->name;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index af09943..5b13e29 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -64,7 +64,7 @@ enum st_accel_type {
 * struct st_sensors_platform_data - default accel platform data
 * @drdy_int_pin: default accel DRDY is available on INT1 pin.
 */
-static const struct st_sensors_platform_data default_accel_pdata = {
+static __maybe_unused const struct st_sensors_platform_data default_accel_pdata = {
 	.drdy_int_pin = 1,
 };
 
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 50fa0fc..633955d 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -18,7 +18,6 @@
 #include <linux/iio/common/st_sensors_i2c.h>
 #include "st_accel.h"
 
-#ifdef CONFIG_OF
 static const struct of_device_id st_accel_of_match[] = {
 	{
 		/* An older compatible */
@@ -108,9 +107,6 @@ static const struct of_device_id st_accel_of_match[] = {
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_accel_of_match);
-#else
-#define st_accel_of_match NULL
-#endif
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id st_accel_acpi_match[] = {
@@ -119,8 +115,6 @@ static const struct acpi_device_id st_accel_acpi_match[] = {
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
-#else
-#define st_accel_acpi_match NULL
 #endif
 
 static const struct i2c_device_id st_accel_id_table[] = {
@@ -195,7 +189,7 @@ static int st_accel_i2c_remove(struct i2c_client *client)
 static struct i2c_driver st_accel_driver = {
 	.driver = {
 		.name = "st-accel-i2c",
-		.of_match_table = of_match_ptr(st_accel_of_match),
+		.of_match_table = st_accel_of_match,
 		.acpi_match_table = ACPI_PTR(st_accel_acpi_match),
 	},
 	.probe_new = st_accel_i2c_probe,
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 8af7027..568ff1b 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -17,7 +17,6 @@
 #include <linux/iio/common/st_sensors_spi.h>
 #include "st_accel.h"
 
-#ifdef CONFIG_OF
 /*
  * For new single-chip sensors use <device_name> as compatible string.
  * For old single-chip devices keep <device_name>-accel to maintain
@@ -96,9 +95,6 @@ static const struct of_device_id st_accel_of_match[] = {
 	{}
 };
 MODULE_DEVICE_TABLE(of, st_accel_of_match);
-#else
-#define st_accel_of_match	NULL
-#endif
 
 static int st_accel_spi_probe(struct spi_device *spi)
 {
@@ -107,8 +103,7 @@ static int st_accel_spi_probe(struct spi_device *spi)
 	struct iio_dev *indio_dev;
 	int err;
 
-	st_sensors_of_name_probe(&spi->dev, st_accel_of_match,
-				 spi->modalias, sizeof(spi->modalias));
+	st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
 
 	settings = st_accel_get_settings(spi->modalias);
 	if (!settings) {
@@ -166,7 +161,7 @@ MODULE_DEVICE_TABLE(spi, st_accel_id_table);
 static struct spi_driver st_accel_driver = {
 	.driver = {
 		.name = "st-accel-spi",
-		.of_match_table = of_match_ptr(st_accel_of_match),
+		.of_match_table = st_accel_of_match,
 	},
 	.probe = st_accel_spi_probe,
 	.remove = st_accel_spi_remove,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 5d8540b..82e3308 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -21,6 +21,13 @@
 	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
 
+config AD7091R5
+	tristate "Analog Devices AD7091R5 ADC Driver"
+	depends on I2C
+	select REGMAP_I2C
+	help
+	  Say yes here to build support for Analog Devices AD7091R-5 ADC.
+
 config AD7124
 	tristate "Analog Devices AD7124 and similar sigma-delta ADCs driver"
 	depends on SPI_MASTER
@@ -523,6 +530,16 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called ltc2485.
 
+config LTC2496
+	tristate "Linear Technology LTC2496 ADC driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Linear Technology LTC2496
+	  16-Bit 8-/16-Channel Delta Sigma ADC.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called ltc2496.
+
 config LTC2497
 	tristate "Linear Technology LTC2497 ADC driver"
 	depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index a1f1fbe..9192289 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -6,6 +6,7 @@
 # When adding new entries keep the list in alphabetical order
 obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
 obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
+obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o
 obj-$(CONFIG_AD7124) += ad7124.o
 obj-$(CONFIG_AD7266) += ad7266.o
 obj-$(CONFIG_AD7291) += ad7291.o
@@ -50,7 +51,8 @@
 obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
 obj-$(CONFIG_LTC2471) += ltc2471.o
 obj-$(CONFIG_LTC2485) += ltc2485.o
-obj-$(CONFIG_LTC2497) += ltc2497.o
+obj-$(CONFIG_LTC2496) += ltc2496.o ltc2497-core.o
+obj-$(CONFIG_LTC2497) += ltc2497.o ltc2497-core.o
 obj-$(CONFIG_MAX1027) += max1027.o
 obj-$(CONFIG_MAX11100) += max11100.o
 obj-$(CONFIG_MAX1118) += max1118.o
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
new file mode 100644
index 0000000..33c4035
--- /dev/null
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD7091RX Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "ad7091r-base.h"
+
+#define AD7091R_REG_RESULT  0
+#define AD7091R_REG_CHANNEL 1
+#define AD7091R_REG_CONF    2
+#define AD7091R_REG_ALERT   3
+#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4)
+#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5)
+#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6)
+
+/* AD7091R_REG_RESULT */
+#define AD7091R_REG_RESULT_CH_ID(x)	    (((x) >> 13) & 0x3)
+#define AD7091R_REG_RESULT_CONV_RESULT(x)   ((x) & 0xfff)
+
+/* AD7091R_REG_CONF */
+#define AD7091R_REG_CONF_AUTO   BIT(8)
+#define AD7091R_REG_CONF_CMD    BIT(10)
+
+#define AD7091R_REG_CONF_MODE_MASK  \
+	(AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD)
+
+enum ad7091r_mode {
+	AD7091R_MODE_SAMPLE,
+	AD7091R_MODE_COMMAND,
+	AD7091R_MODE_AUTOCYCLE,
+};
+
+struct ad7091r_state {
+	struct device *dev;
+	struct regmap *map;
+	struct regulator *vref;
+	const struct ad7091r_chip_info *chip_info;
+	enum ad7091r_mode mode;
+	struct mutex lock; /*lock to prevent concurent reads */
+};
+
+static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+{
+	int ret, conf;
+
+	switch (mode) {
+	case AD7091R_MODE_SAMPLE:
+		conf = 0;
+		break;
+	case AD7091R_MODE_COMMAND:
+		conf = AD7091R_REG_CONF_CMD;
+		break;
+	case AD7091R_MODE_AUTOCYCLE:
+		conf = AD7091R_REG_CONF_AUTO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
+				 AD7091R_REG_CONF_MODE_MASK, conf);
+	if (ret)
+		return ret;
+
+	st->mode = mode;
+
+	return 0;
+}
+
+static int ad7091r_set_channel(struct ad7091r_state *st, unsigned int channel)
+{
+	unsigned int dummy;
+	int ret;
+
+	/* AD7091R_REG_CHANNEL specified which channels to be converted */
+	ret = regmap_write(st->map, AD7091R_REG_CHANNEL,
+			BIT(channel) | (BIT(channel) << 8));
+	if (ret)
+		return ret;
+
+	/*
+	 * There is a latency of one conversion before the channel conversion
+	 * sequence is updated
+	 */
+	return regmap_read(st->map, AD7091R_REG_RESULT, &dummy);
+}
+
+static int ad7091r_read_one(struct iio_dev *iio_dev,
+		unsigned int channel, unsigned int *read_val)
+{
+	struct ad7091r_state *st = iio_priv(iio_dev);
+	unsigned int val;
+	int ret;
+
+	ret = ad7091r_set_channel(st, channel);
+	if (ret)
+		return ret;
+
+	ret = regmap_read(st->map, AD7091R_REG_RESULT, &val);
+	if (ret)
+		return ret;
+
+	if (AD7091R_REG_RESULT_CH_ID(val) != channel)
+		return -EIO;
+
+	*read_val = AD7091R_REG_RESULT_CONV_RESULT(val);
+
+	return 0;
+}
+
+static int ad7091r_read_raw(struct iio_dev *iio_dev,
+			   struct iio_chan_spec const *chan,
+			   int *val, int *val2, long m)
+{
+	struct ad7091r_state *st = iio_priv(iio_dev);
+	unsigned int read_val;
+	int ret;
+
+	mutex_lock(&st->lock);
+
+	switch (m) {
+	case IIO_CHAN_INFO_RAW:
+		if (st->mode != AD7091R_MODE_COMMAND) {
+			ret = -EBUSY;
+			goto unlock;
+		}
+
+		ret = ad7091r_read_one(iio_dev, chan->channel, &read_val);
+		if (ret)
+			goto unlock;
+
+		*val = read_val;
+		ret = IIO_VAL_INT;
+		break;
+
+	case IIO_CHAN_INFO_SCALE:
+		if (st->vref) {
+			ret = regulator_get_voltage(st->vref);
+			if (ret < 0)
+				goto unlock;
+
+			*val = ret / 1000;
+		} else {
+			*val = st->chip_info->vref_mV;
+		}
+
+		*val2 = chan->scan_type.realbits;
+		ret = IIO_VAL_FRACTIONAL_LOG2;
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+unlock:
+	mutex_unlock(&st->lock);
+	return ret;
+}
+
+static const struct iio_info ad7091r_info = {
+	.read_raw = ad7091r_read_raw,
+};
+
+static irqreturn_t ad7091r_event_handler(int irq, void *private)
+{
+	struct ad7091r_state *st = (struct ad7091r_state *) private;
+	struct iio_dev *iio_dev = dev_get_drvdata(st->dev);
+	unsigned int i, read_val;
+	int ret;
+	s64 timestamp = iio_get_time_ns(iio_dev);
+
+	ret = regmap_read(st->map, AD7091R_REG_ALERT, &read_val);
+	if (ret)
+		return IRQ_HANDLED;
+
+	for (i = 0; i < st->chip_info->num_channels; i++) {
+		if (read_val & BIT(i * 2))
+			iio_push_event(iio_dev,
+					IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+						IIO_EV_TYPE_THRESH,
+						IIO_EV_DIR_RISING), timestamp);
+		if (read_val & BIT(i * 2 + 1))
+			iio_push_event(iio_dev,
+					IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+						IIO_EV_TYPE_THRESH,
+						IIO_EV_DIR_FALLING), timestamp);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void ad7091r_remove(void *data)
+{
+	struct ad7091r_state *st = data;
+
+	regulator_disable(st->vref);
+}
+
+int ad7091r_probe(struct device *dev, const char *name,
+		const struct ad7091r_chip_info *chip_info,
+		struct regmap *map, int irq)
+{
+	struct iio_dev *iio_dev;
+	struct ad7091r_state *st;
+	int ret;
+
+	iio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+	if (!iio_dev)
+		return -ENOMEM;
+
+	st = iio_priv(iio_dev);
+	st->dev = dev;
+	st->chip_info = chip_info;
+	st->map = map;
+
+	iio_dev->dev.parent = dev;
+	iio_dev->name = name;
+	iio_dev->info = &ad7091r_info;
+	iio_dev->modes = INDIO_DIRECT_MODE;
+
+	iio_dev->num_channels = chip_info->num_channels;
+	iio_dev->channels = chip_info->channels;
+
+	if (irq) {
+		ret = devm_request_threaded_irq(dev, irq, NULL,
+				ad7091r_event_handler,
+				IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st);
+		if (ret)
+			return ret;
+	}
+
+	st->vref = devm_regulator_get_optional(dev, "vref");
+	if (IS_ERR(st->vref)) {
+		if (PTR_ERR(st->vref) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		st->vref = NULL;
+	} else {
+		ret = regulator_enable(st->vref);
+		if (ret)
+			return ret;
+		ret = devm_add_action_or_reset(dev, ad7091r_remove, st);
+		if (ret)
+			return ret;
+	}
+
+	/* Use command mode by default to convert only desired channels*/
+	ret = ad7091r_set_mode(st, AD7091R_MODE_COMMAND);
+	if (ret)
+		return ret;
+
+	return devm_iio_device_register(dev, iio_dev);
+}
+EXPORT_SYMBOL_GPL(ad7091r_probe);
+
+static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case AD7091R_REG_RESULT:
+	case AD7091R_REG_ALERT:
+		return false;
+	default:
+		return true;
+	}
+}
+
+static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case AD7091R_REG_RESULT:
+	case AD7091R_REG_ALERT:
+		return true;
+	default:
+		return false;
+	}
+}
+
+const struct regmap_config ad7091r_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 16,
+	.writeable_reg = ad7091r_writeable_reg,
+	.volatile_reg = ad7091r_volatile_reg,
+};
+EXPORT_SYMBOL_GPL(ad7091r_regmap_config);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7091Rx multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7091r-base.h b/drivers/iio/adc/ad7091r-base.h
new file mode 100644
index 0000000..509748a
--- /dev/null
+++ b/drivers/iio/adc/ad7091r-base.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AD7091RX Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+#define __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+
+struct device;
+struct ad7091r_state;
+
+struct ad7091r_chip_info {
+	unsigned int num_channels;
+	const struct iio_chan_spec *channels;
+	unsigned int vref_mV;
+};
+
+extern const struct regmap_config ad7091r_regmap_config;
+
+int ad7091r_probe(struct device *dev, const char *name,
+		const struct ad7091r_chip_info *chip_info,
+		struct regmap *map, int irq);
+
+#endif /* __DRIVERS_IIO_ADC_AD7091R_BASE_H__ */
diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
new file mode 100644
index 0000000..9665679
--- /dev/null
+++ b/drivers/iio/adc/ad7091r5.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD7091R5 Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ad7091r-base.h"
+
+static const struct iio_event_spec ad7091r5_events[] = {
+	{
+		.type = IIO_EV_TYPE_THRESH,
+		.dir = IIO_EV_DIR_RISING,
+		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
+				 BIT(IIO_EV_INFO_ENABLE),
+	},
+	{
+		.type = IIO_EV_TYPE_THRESH,
+		.dir = IIO_EV_DIR_FALLING,
+		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
+				 BIT(IIO_EV_INFO_ENABLE),
+	},
+	{
+		.type = IIO_EV_TYPE_THRESH,
+		.dir = IIO_EV_DIR_EITHER,
+		.mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+	},
+};
+
+#define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
+	.type = IIO_VOLTAGE, \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+	.indexed = 1, \
+	.channel = idx, \
+	.event_spec = ev, \
+	.num_event_specs = num_ev, \
+	.scan_type.storagebits = 16, \
+	.scan_type.realbits = bits, \
+}
+static const struct iio_chan_spec ad7091r5_channels_irq[] = {
+	AD7091R_CHANNEL(0, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+	AD7091R_CHANNEL(1, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+	AD7091R_CHANNEL(2, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+	AD7091R_CHANNEL(3, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+};
+
+static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
+	AD7091R_CHANNEL(0, 12, NULL, 0),
+	AD7091R_CHANNEL(1, 12, NULL, 0),
+	AD7091R_CHANNEL(2, 12, NULL, 0),
+	AD7091R_CHANNEL(3, 12, NULL, 0),
+};
+
+static const struct ad7091r_chip_info ad7091r5_chip_info_irq = {
+	.channels = ad7091r5_channels_irq,
+	.num_channels = ARRAY_SIZE(ad7091r5_channels_irq),
+	.vref_mV = 2500,
+};
+
+static const struct ad7091r_chip_info ad7091r5_chip_info_noirq = {
+	.channels = ad7091r5_channels_noirq,
+	.num_channels = ARRAY_SIZE(ad7091r5_channels_noirq),
+	.vref_mV = 2500,
+};
+
+static int ad7091r5_i2c_probe(struct i2c_client *i2c,
+		const struct i2c_device_id *id)
+{
+	const struct ad7091r_chip_info *chip_info;
+	struct regmap *map = devm_regmap_init_i2c(i2c, &ad7091r_regmap_config);
+
+	if (IS_ERR(map))
+		return PTR_ERR(map);
+
+	if (i2c->irq)
+		chip_info = &ad7091r5_chip_info_irq;
+	else
+		chip_info = &ad7091r5_chip_info_noirq;
+
+	return ad7091r_probe(&i2c->dev, id->name, chip_info, map, i2c->irq);
+}
+
+static const struct of_device_id ad7091r5_dt_ids[] = {
+	{ .compatible = "adi,ad7091r5" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids);
+
+static const struct i2c_device_id ad7091r5_i2c_ids[] = {
+	{"ad7091r5", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids);
+
+static struct i2c_driver ad7091r5_driver = {
+	.driver = {
+		.name = "ad7091r5",
+		.of_match_table = ad7091r5_dt_ids,
+	},
+	.probe = ad7091r5_i2c_probe,
+	.id_table = ad7091r5_i2c_ids,
+};
+module_i2c_driver(ad7091r5_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7091R5 multi-channel ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 306bf15..d9915dc 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -9,6 +9,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/err.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/regulator/consumer.h>
@@ -224,6 +225,7 @@ static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
 	.addr_shift = 0,
 	.read_mask = BIT(6),
 	.data_reg = AD7124_DATA,
+	.irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 static int ad7124_set_channel_odr(struct ad7124_state *st,
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index c31b8ea..c8524f0 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -11,7 +11,7 @@
 #include <linux/spi/spi.h>
 #include <linux/regulator/consumer.h>
 #include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 
 #include <linux/interrupt.h>
@@ -34,7 +34,7 @@ struct ad7266_state {
 	enum ad7266_range	range;
 	enum ad7266_mode	mode;
 	bool			fixed_addr;
-	struct gpio		gpios[3];
+	struct gpio_desc	*gpios[3];
 
 	/*
 	 * DMA (thus cache coherency maintenance) requires the
@@ -117,7 +117,7 @@ static void ad7266_select_input(struct ad7266_state *st, unsigned int nr)
 	}
 
 	for (i = 0; i < 3; ++i)
-		gpio_set_value(st->gpios[i].gpio, (bool)(nr & BIT(i)));
+		gpiod_set_value(st->gpios[i], (bool)(nr & BIT(i)));
 }
 
 static int ad7266_update_scan_mode(struct iio_dev *indio_dev,
@@ -376,7 +376,7 @@ static void ad7266_init_channels(struct iio_dev *indio_dev)
 }
 
 static const char * const ad7266_gpio_labels[] = {
-	"AD0", "AD1", "AD2",
+	"ad0", "ad1", "ad2",
 };
 
 static int ad7266_probe(struct spi_device *spi)
@@ -419,14 +419,14 @@ static int ad7266_probe(struct spi_device *spi)
 
 		if (!st->fixed_addr) {
 			for (i = 0; i < ARRAY_SIZE(st->gpios); ++i) {
-				st->gpios[i].gpio = pdata->addr_gpios[i];
-				st->gpios[i].flags = GPIOF_OUT_INIT_LOW;
-				st->gpios[i].label = ad7266_gpio_labels[i];
+				st->gpios[i] = devm_gpiod_get(&spi->dev,
+						      ad7266_gpio_labels[i],
+						      GPIOD_OUT_LOW);
+				if (IS_ERR(st->gpios[i])) {
+					ret = PTR_ERR(st->gpios[i]);
+					goto error_disable_reg;
+				}
 			}
-			ret = gpio_request_array(st->gpios,
-				ARRAY_SIZE(st->gpios));
-			if (ret)
-				goto error_disable_reg;
 		}
 	} else {
 		st->fixed_addr = true;
@@ -465,7 +465,7 @@ static int ad7266_probe(struct spi_device *spi)
 	ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
 		&ad7266_trigger_handler, &iio_triggered_buffer_setup_ops);
 	if (ret)
-		goto error_free_gpios;
+		goto error_disable_reg;
 
 	ret = iio_device_register(indio_dev);
 	if (ret)
@@ -475,9 +475,6 @@ static int ad7266_probe(struct spi_device *spi)
 
 error_buffer_cleanup:
 	iio_triggered_buffer_cleanup(indio_dev);
-error_free_gpios:
-	if (!st->fixed_addr)
-		gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
 error_disable_reg:
 	if (!IS_ERR(st->reg))
 		regulator_disable(st->reg);
@@ -492,8 +489,6 @@ static int ad7266_remove(struct spi_device *spi)
 
 	iio_device_unregister(indio_dev);
 	iio_triggered_buffer_cleanup(indio_dev);
-	if (!st->fixed_addr)
-		gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
 	if (!IS_ERR(st->reg))
 		regulator_disable(st->reg);
 
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index 217a5a5..291c1a8 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -203,6 +203,7 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
 	.set_mode = ad7780_set_mode,
 	.postprocess_sample = ad7780_postprocess_sample,
 	.has_registers = false,
+	.irq_flags = IRQF_TRIGGER_LOW,
 };
 
 #define AD7780_CHANNEL(bits, wordsize) \
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 54025ea..abb2393 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -205,6 +205,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
 	.has_registers = true,
 	.addr_shift = 4,
 	.read_mask = BIT(3),
+	.irq_flags = IRQF_TRIGGER_LOW,
 };
 
 static int ad7791_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index bbc41ec..b747db9 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -206,6 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
 	.has_registers = true,
 	.addr_shift = 3,
 	.read_mask = BIT(6),
+	.irq_flags = IRQF_TRIGGER_LOW,
 };
 
 static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 6223043..c6a3428 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -43,11 +43,17 @@ enum ad7887_channels {
 /**
  * struct ad7887_chip_info - chip specifc information
  * @int_vref_mv:	the internal reference voltage
- * @channel:		channel specification
+ * @channels:		channels specification
+ * @num_channels:	number of channels
+ * @dual_channels:	channels specification in dual mode
+ * @num_dual_channels:	number of channels in dual mode
  */
 struct ad7887_chip_info {
 	u16				int_vref_mv;
-	struct iio_chan_spec		channel[3];
+	const struct iio_chan_spec	*channels;
+	unsigned int			num_channels;
+	const struct iio_chan_spec	*dual_channels;
+	unsigned int			num_dual_channels;
 };
 
 struct ad7887_state {
@@ -183,45 +189,43 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
 	return -EINVAL;
 }
 
+#define AD7887_CHANNEL(x) { \
+	.type = IIO_VOLTAGE, \
+	.indexed = 1, \
+	.channel = (x), \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+	.address = (x), \
+	.scan_index = (x), \
+	.scan_type = { \
+		.sign = 'u', \
+		.realbits = 12, \
+		.storagebits = 16, \
+		.shift = 0, \
+		.endianness = IIO_BE, \
+	}, \
+}
+
+static const struct iio_chan_spec ad7887_channels[] = {
+	AD7887_CHANNEL(0),
+	IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct iio_chan_spec ad7887_dual_channels[] = {
+	AD7887_CHANNEL(0),
+	AD7887_CHANNEL(1),
+	IIO_CHAN_SOFT_TIMESTAMP(2),
+};
 
 static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
 	/*
 	 * More devices added in future
 	 */
 	[ID_AD7887] = {
-		.channel[0] = {
-			.type = IIO_VOLTAGE,
-			.indexed = 1,
-			.channel = 1,
-			.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-			.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
-			.address = 1,
-			.scan_index = 1,
-			.scan_type = {
-				.sign = 'u',
-				.realbits = 12,
-				.storagebits = 16,
-				.shift = 0,
-				.endianness = IIO_BE,
-			},
-		},
-		.channel[1] = {
-			.type = IIO_VOLTAGE,
-			.indexed = 1,
-			.channel = 0,
-			.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-			.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
-			.address = 0,
-			.scan_index = 0,
-			.scan_type = {
-				.sign = 'u',
-				.realbits = 12,
-				.storagebits = 16,
-				.shift = 0,
-				.endianness = IIO_BE,
-			},
-		},
-		.channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+		.channels = ad7887_channels,
+		.num_channels = ARRAY_SIZE(ad7887_channels),
+		.dual_channels = ad7887_dual_channels,
+		.num_dual_channels = ARRAY_SIZE(ad7887_dual_channels),
 		.int_vref_mv = 2500,
 	},
 };
@@ -306,11 +310,11 @@ static int ad7887_probe(struct spi_device *spi)
 		spi_message_init(&st->msg[AD7887_CH1]);
 		spi_message_add_tail(&st->xfer[3], &st->msg[AD7887_CH1]);
 
-		indio_dev->channels = st->chip_info->channel;
-		indio_dev->num_channels = 3;
+		indio_dev->channels = st->chip_info->dual_channels;
+		indio_dev->num_channels = st->chip_info->num_dual_channels;
 	} else {
-		indio_dev->channels = &st->chip_info->channel[1];
-		indio_dev->num_channels = 2;
+		indio_dev->channels = st->chip_info->channels;
+		indio_dev->num_channels = st->chip_info->num_channels;
 	}
 
 	ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 3212eb4..1d124c8 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * AD7904/AD7914/AD7923/AD7924 SPI ADC driver
+ * AD7904/AD7914/AD7923/AD7924/AD7908/AD7918/AD7928 SPI ADC driver
  *
  * Copyright 2011 Analog Devices Inc (from AD7923 Driver)
  * Copyright 2012 CS Systemes d'Information
@@ -29,15 +29,10 @@
 #define AD7923_PM_MODE_AS	(1)		/* auto shutdown */
 #define AD7923_PM_MODE_FS	(2)		/* full shutdown */
 #define AD7923_PM_MODE_OPS	(3)		/* normal operation */
-#define AD7923_CHANNEL_0	(0)		/* analog input 0 */
-#define AD7923_CHANNEL_1	(1)		/* analog input 1 */
-#define AD7923_CHANNEL_2	(2)		/* analog input 2 */
-#define AD7923_CHANNEL_3	(3)		/* analog input 3 */
 #define AD7923_SEQUENCE_OFF	(0)		/* no sequence fonction */
 #define AD7923_SEQUENCE_PROTECT	(2)		/* no interrupt write cycle */
 #define AD7923_SEQUENCE_ON	(3)		/* continuous sequence */
 
-#define AD7923_MAX_CHAN		4
 
 #define AD7923_PM_MODE_WRITE(mode)	((mode) << 4)	 /* write mode */
 #define AD7923_CHANNEL_WRITE(channel)	((channel) << 6) /* write channel */
@@ -78,6 +73,9 @@ enum ad7923_id {
 	AD7904,
 	AD7914,
 	AD7924,
+	AD7908,
+	AD7918,
+	AD7928
 };
 
 #define AD7923_V_CHAN(index, bits)					\
@@ -106,9 +104,25 @@ const struct iio_chan_spec name ## _channels[] = { \
 	IIO_CHAN_SOFT_TIMESTAMP(4), \
 }
 
+#define DECLARE_AD7908_CHANNELS(name, bits) \
+const struct iio_chan_spec name ## _channels[] = { \
+	AD7923_V_CHAN(0, bits), \
+	AD7923_V_CHAN(1, bits), \
+	AD7923_V_CHAN(2, bits), \
+	AD7923_V_CHAN(3, bits), \
+	AD7923_V_CHAN(4, bits), \
+	AD7923_V_CHAN(5, bits), \
+	AD7923_V_CHAN(6, bits), \
+	AD7923_V_CHAN(7, bits), \
+	IIO_CHAN_SOFT_TIMESTAMP(8), \
+}
+
 static DECLARE_AD7923_CHANNELS(ad7904, 8);
 static DECLARE_AD7923_CHANNELS(ad7914, 10);
 static DECLARE_AD7923_CHANNELS(ad7924, 12);
+static DECLARE_AD7908_CHANNELS(ad7908, 8);
+static DECLARE_AD7908_CHANNELS(ad7918, 10);
+static DECLARE_AD7908_CHANNELS(ad7928, 12);
 
 static const struct ad7923_chip_info ad7923_chip_info[] = {
 	[AD7904] = {
@@ -123,6 +137,18 @@ static const struct ad7923_chip_info ad7923_chip_info[] = {
 		.channels = ad7924_channels,
 		.num_channels = ARRAY_SIZE(ad7924_channels),
 	},
+	[AD7908] = {
+		.channels = ad7908_channels,
+		.num_channels = ARRAY_SIZE(ad7908_channels),
+	},
+	[AD7918] = {
+		.channels = ad7918_channels,
+		.num_channels = ARRAY_SIZE(ad7918_channels),
+	},
+	[AD7928] = {
+		.channels = ad7928_channels,
+		.num_channels = ARRAY_SIZE(ad7928_channels),
+	},
 };
 
 /**
@@ -135,7 +161,11 @@ static int ad7923_update_scan_mode(struct iio_dev *indio_dev,
 	int i, cmd, len;
 
 	len = 0;
-	for_each_set_bit(i, active_scan_mask, AD7923_MAX_CHAN) {
+	/*
+	 * For this driver the last channel is always the software timestamp so
+	 * skip that one.
+	 */
+	for_each_set_bit(i, active_scan_mask, indio_dev->num_channels - 1) {
 		cmd = AD7923_WRITE_CR | AD7923_CHANNEL_WRITE(i) |
 			AD7923_SEQUENCE_WRITE(AD7923_SEQUENCE_OFF) |
 			st->settings;
@@ -188,7 +218,7 @@ static irqreturn_t ad7923_trigger_handler(int irq, void *p)
 	return IRQ_HANDLED;
 }
 
-static int ad7923_scan_direct(struct ad7923_state *st, unsigned ch)
+static int ad7923_scan_direct(struct ad7923_state *st, unsigned int ch)
 {
 	int ret, cmd;
 
@@ -348,13 +378,29 @@ static const struct spi_device_id ad7923_id[] = {
 	{"ad7914", AD7914},
 	{"ad7923", AD7924},
 	{"ad7924", AD7924},
+	{"ad7908", AD7908},
+	{"ad7918", AD7918},
+	{"ad7928", AD7928},
 	{}
 };
 MODULE_DEVICE_TABLE(spi, ad7923_id);
 
+static const struct of_device_id ad7923_of_match[] = {
+	{ .compatible = "adi,ad7904", },
+	{ .compatible = "adi,ad7914", },
+	{ .compatible = "adi,ad7923", },
+	{ .compatible = "adi,ad7924", },
+	{ .compatible = "adi,ad7908", },
+	{ .compatible = "adi,ad7918", },
+	{ .compatible = "adi,ad7928", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ad7923_of_match);
+
 static struct spi_driver ad7923_driver = {
 	.driver = {
 		.name	= "ad7923",
+		.of_match_table = ad7923_of_match,
 	},
 	.probe		= ad7923_probe,
 	.remove		= ad7923_remove,
@@ -364,5 +410,5 @@ module_spi_driver(ad7923_driver);
 
 MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
 MODULE_AUTHOR("Patrick Vasseur <patrick.vasseur@c-s.fr>");
-MODULE_DESCRIPTION("Analog Devices AD7904/AD7914/AD7923/AD7924 ADC");
+MODULE_DESCRIPTION("Analog Devices AD7923 and similar ADC");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index f658012..ef013af 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -167,6 +167,21 @@ static int ad799x_read_config(struct ad799x_state *st)
 	}
 }
 
+static int ad799x_update_config(struct ad799x_state *st, u16 config)
+{
+	int ret;
+
+	ret = ad799x_write_config(st, config);
+	if (ret < 0)
+		return ret;
+	ret = ad799x_read_config(st);
+	if (ret < 0)
+		return ret;
+	st->config = ret;
+
+	return 0;
+}
+
 /**
  * ad799x_trigger_handler() bh of trigger launched polling to ring buffer
  *
@@ -808,13 +823,9 @@ static int ad799x_probe(struct i2c_client *client,
 	indio_dev->channels = st->chip_config->channel;
 	indio_dev->num_channels = chip_info->num_channels;
 
-	ret = ad799x_write_config(st, st->chip_config->default_config);
-	if (ret < 0)
+	ret = ad799x_update_config(st, st->chip_config->default_config);
+	if (ret)
 		goto error_disable_vref;
-	ret = ad799x_read_config(st);
-	if (ret < 0)
-		goto error_disable_vref;
-	st->config = ret;
 
 	ret = iio_triggered_buffer_setup(indio_dev, NULL,
 		&ad799x_trigger_handler, NULL);
@@ -864,6 +875,48 @@ static int ad799x_remove(struct i2c_client *client)
 	return 0;
 }
 
+static int __maybe_unused ad799x_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct ad799x_state *st = iio_priv(indio_dev);
+
+	regulator_disable(st->vref);
+	regulator_disable(st->reg);
+
+	return 0;
+}
+
+static int __maybe_unused ad799x_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct ad799x_state *st = iio_priv(indio_dev);
+	int ret;
+
+	ret = regulator_enable(st->reg);
+	if (ret) {
+		dev_err(dev, "Unable to enable vcc regulator\n");
+		return ret;
+	}
+	ret = regulator_enable(st->vref);
+	if (ret) {
+		regulator_disable(st->reg);
+		dev_err(dev, "Unable to enable vref regulator\n");
+		return ret;
+	}
+
+	/* resync config */
+	ret = ad799x_update_config(st, st->config);
+	if (ret) {
+		regulator_disable(st->vref);
+		regulator_disable(st->reg);
+		return ret;
+	}
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ad799x_pm_ops, ad799x_suspend, ad799x_resume);
+
 static const struct i2c_device_id ad799x_id[] = {
 	{ "ad7991", ad7991 },
 	{ "ad7995", ad7995 },
@@ -881,6 +934,7 @@ MODULE_DEVICE_TABLE(i2c, ad799x_id);
 static struct i2c_driver ad799x_driver = {
 	.driver = {
 		.name = "ad799x",
+		.pm = &ad799x_pm_ops,
 	},
 	.probe = ad799x_probe,
 	.remove = ad799x_remove,
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 8ba9048..8115b6d 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -500,7 +500,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
 
 	ret = request_irq(sigma_delta->spi->irq,
 			  ad_sd_data_rdy_trig_poll,
-			  IRQF_TRIGGER_LOW,
+			  sigma_delta->info->irq_flags,
 			  indio_dev->name,
 			  sigma_delta);
 	if (ret)
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index e1850f3..a5c7771 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1444,10 +1444,10 @@ static void at91_adc_dma_init(struct platform_device *pdev)
 	if (st->dma_st.dma_chan)
 		return;
 
-	st->dma_st.dma_chan = dma_request_slave_channel(&pdev->dev, "rx");
-
-	if (!st->dma_st.dma_chan)  {
+	st->dma_st.dma_chan = dma_request_chan(&pdev->dev, "rx");
+	if (IS_ERR(st->dma_st.dma_chan))  {
 		dev_info(&pdev->dev, "can't get DMA channel\n");
+		st->dma_st.dma_chan = NULL;
 		goto dma_exit;
 	}
 
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
new file mode 100644
index 0000000..88a3015
--- /dev/null
+++ b/drivers/iio/adc/ltc2496.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ltc2496.c - Driver for Analog Devices/Linear Technology LTC2496 ADC
+ *
+ * Based on ltc2497.c which has
+ * Copyright (C) 2017 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/2496fc.pdf
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "ltc2497.h"
+
+struct ltc2496_driverdata {
+	/* this must be the first member */
+	struct ltc2497core_driverdata common_ddata;
+	struct spi_device *spi;
+
+	/*
+	 * DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 */
+	unsigned char rxbuf[3] ____cacheline_aligned;
+	unsigned char txbuf[3];
+};
+
+static int ltc2496_result_and_measure(struct ltc2497core_driverdata *ddata,
+				      u8 address, int *val)
+{
+	struct ltc2496_driverdata *st =
+		container_of(ddata, struct ltc2496_driverdata, common_ddata);
+	struct spi_transfer t = {
+		.tx_buf = st->txbuf,
+		.rx_buf = st->rxbuf,
+		.len = sizeof(st->txbuf),
+	};
+	int ret;
+
+	st->txbuf[0] = LTC2497_ENABLE | address;
+
+	ret = spi_sync_transfer(st->spi, &t, 1);
+	if (ret < 0)  {
+		dev_err(&st->spi->dev, "spi_sync_transfer failed: %pe\n",
+			ERR_PTR(ret));
+		return ret;
+	}
+
+	if (val)
+		*val = ((st->rxbuf[0] & 0x3f) << 12 |
+			st->rxbuf[1] << 4 | st->rxbuf[2] >> 4) -
+			(1 << 17);
+
+	return 0;
+}
+
+static int ltc2496_probe(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev;
+	struct ltc2496_driverdata *st;
+	struct device *dev = &spi->dev;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	st = iio_priv(indio_dev);
+	spi_set_drvdata(spi, indio_dev);
+	st->spi = spi;
+	st->common_ddata.result_and_measure = ltc2496_result_and_measure;
+
+	return ltc2497core_probe(dev, indio_dev);
+}
+
+static int ltc2496_remove(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+	ltc2497core_remove(indio_dev);
+
+	return 0;
+}
+
+static const struct of_device_id ltc2496_of_match[] = {
+	{ .compatible = "lltc,ltc2496", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ltc2496_of_match);
+
+static struct spi_driver ltc2496_driver = {
+	.driver = {
+		.name = "ltc2496",
+		.of_match_table = of_match_ptr(ltc2496_of_match),
+	},
+	.probe = ltc2496_probe,
+	.remove = ltc2496_remove,
+};
+module_spi_driver(ltc2496_driver);
+
+MODULE_AUTHOR("Uwe Kleine-König <u.kleine-könig@pengutronix.de>");
+MODULE_DESCRIPTION("Linear Technology LTC2496 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ltc2497-core.c b/drivers/iio/adc/ltc2497-core.c
new file mode 100644
index 0000000..f5f7039
--- /dev/null
+++ b/drivers/iio/adc/ltc2497-core.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ltc2497-core.c - Common code for Analog Devices/Linear Technology
+ * LTC2496 and LTC2497 ADCs
+ *
+ * Copyright (C) 2017 Analog Devices Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include "ltc2497.h"
+
+#define LTC2497_SGL			BIT(4)
+#define LTC2497_DIFF			0
+#define LTC2497_SIGN			BIT(3)
+
+static int ltc2497core_wait_conv(struct ltc2497core_driverdata *ddata)
+{
+	s64 time_elapsed;
+
+	time_elapsed = ktime_ms_delta(ktime_get(), ddata->time_prev);
+
+	if (time_elapsed < LTC2497_CONVERSION_TIME_MS) {
+		/* delay if conversion time not passed
+		 * since last read or write
+		 */
+		if (msleep_interruptible(
+		    LTC2497_CONVERSION_TIME_MS - time_elapsed))
+			return -ERESTARTSYS;
+
+		return 0;
+	}
+
+	if (time_elapsed - LTC2497_CONVERSION_TIME_MS <= 0) {
+		/* We're in automatic mode -
+		 * so the last reading is still not outdated
+		 */
+		return 0;
+	}
+
+	return 1;
+}
+
+static int ltc2497core_read(struct ltc2497core_driverdata *ddata, u8 address, int *val)
+{
+	int ret;
+
+	ret = ltc2497core_wait_conv(ddata);
+	if (ret < 0)
+		return ret;
+
+	if (ret || ddata->addr_prev != address) {
+		ret = ddata->result_and_measure(ddata, address, NULL);
+		if (ret < 0)
+			return ret;
+		ddata->addr_prev = address;
+
+		if (msleep_interruptible(LTC2497_CONVERSION_TIME_MS))
+			return -ERESTARTSYS;
+	}
+
+	ret = ddata->result_and_measure(ddata, address, val);
+	if (ret < 0)
+		return ret;
+
+	ddata->time_prev = ktime_get();
+
+	return ret;
+}
+
+static int ltc2497core_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int *val, int *val2, long mask)
+{
+	struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		mutex_lock(&indio_dev->mlock);
+		ret = ltc2497core_read(ddata, chan->address, val);
+		mutex_unlock(&indio_dev->mlock);
+		if (ret < 0)
+			return ret;
+
+		return IIO_VAL_INT;
+
+	case IIO_CHAN_INFO_SCALE:
+		ret = regulator_get_voltage(ddata->ref);
+		if (ret < 0)
+			return ret;
+
+		*val = ret / 1000;
+		*val2 = 17;
+
+		return IIO_VAL_FRACTIONAL_LOG2;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+#define LTC2497_CHAN(_chan, _addr, _ds_name) { \
+	.type = IIO_VOLTAGE, \
+	.indexed = 1, \
+	.channel = (_chan), \
+	.address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+	.datasheet_name = (_ds_name), \
+}
+
+#define LTC2497_CHAN_DIFF(_chan, _addr) { \
+	.type = IIO_VOLTAGE, \
+	.indexed = 1, \
+	.channel = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 1 : 0), \
+	.channel2 = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 0 : 1),\
+	.address = (_addr | _chan), \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+	.differential = 1, \
+}
+
+static const struct iio_chan_spec ltc2497core_channel[] = {
+	LTC2497_CHAN(0, LTC2497_SGL, "CH0"),
+	LTC2497_CHAN(1, LTC2497_SGL, "CH1"),
+	LTC2497_CHAN(2, LTC2497_SGL, "CH2"),
+	LTC2497_CHAN(3, LTC2497_SGL, "CH3"),
+	LTC2497_CHAN(4, LTC2497_SGL, "CH4"),
+	LTC2497_CHAN(5, LTC2497_SGL, "CH5"),
+	LTC2497_CHAN(6, LTC2497_SGL, "CH6"),
+	LTC2497_CHAN(7, LTC2497_SGL, "CH7"),
+	LTC2497_CHAN(8, LTC2497_SGL, "CH8"),
+	LTC2497_CHAN(9, LTC2497_SGL, "CH9"),
+	LTC2497_CHAN(10, LTC2497_SGL, "CH10"),
+	LTC2497_CHAN(11, LTC2497_SGL, "CH11"),
+	LTC2497_CHAN(12, LTC2497_SGL, "CH12"),
+	LTC2497_CHAN(13, LTC2497_SGL, "CH13"),
+	LTC2497_CHAN(14, LTC2497_SGL, "CH14"),
+	LTC2497_CHAN(15, LTC2497_SGL, "CH15"),
+	LTC2497_CHAN_DIFF(0, LTC2497_DIFF),
+	LTC2497_CHAN_DIFF(1, LTC2497_DIFF),
+	LTC2497_CHAN_DIFF(2, LTC2497_DIFF),
+	LTC2497_CHAN_DIFF(3, LTC2497_DIFF),
+	LTC2497_CHAN_DIFF(4, LTC2497_DIFF),
+	LTC2497_CHAN_DIFF(5, LTC2497_DIFF),
+	LTC2497_CHAN_DIFF(6, LTC2497_DIFF),
+	LTC2497_CHAN_DIFF(7, LTC2497_DIFF),
+	LTC2497_CHAN_DIFF(0, LTC2497_DIFF | LTC2497_SIGN),
+	LTC2497_CHAN_DIFF(1, LTC2497_DIFF | LTC2497_SIGN),
+	LTC2497_CHAN_DIFF(2, LTC2497_DIFF | LTC2497_SIGN),
+	LTC2497_CHAN_DIFF(3, LTC2497_DIFF | LTC2497_SIGN),
+	LTC2497_CHAN_DIFF(4, LTC2497_DIFF | LTC2497_SIGN),
+	LTC2497_CHAN_DIFF(5, LTC2497_DIFF | LTC2497_SIGN),
+	LTC2497_CHAN_DIFF(6, LTC2497_DIFF | LTC2497_SIGN),
+	LTC2497_CHAN_DIFF(7, LTC2497_DIFF | LTC2497_SIGN),
+};
+
+static const struct iio_info ltc2497core_info = {
+	.read_raw = ltc2497core_read_raw,
+};
+
+int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev)
+{
+	struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+	int ret;
+
+	indio_dev->dev.parent = dev;
+	indio_dev->name = dev_name(dev);
+	indio_dev->info = &ltc2497core_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = ltc2497core_channel;
+	indio_dev->num_channels = ARRAY_SIZE(ltc2497core_channel);
+
+	ret = ddata->result_and_measure(ddata, LTC2497_CONFIG_DEFAULT, NULL);
+	if (ret < 0)
+		return ret;
+
+	ddata->ref = devm_regulator_get(dev, "vref");
+	if (IS_ERR(ddata->ref)) {
+		if (PTR_ERR(ddata->ref) != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get vref regulator: %pe\n",
+				ddata->ref);
+
+		return PTR_ERR(ddata->ref);
+	}
+
+	ret = regulator_enable(ddata->ref);
+	if (ret < 0) {
+		dev_err(dev, "Failed to enable vref regulator: %pe\n",
+			ERR_PTR(ret));
+		return ret;
+	}
+
+	if (dev->platform_data) {
+		struct iio_map *plat_data;
+
+		plat_data = (struct iio_map *)dev->platform_data;
+
+		ret = iio_map_array_register(indio_dev, plat_data);
+		if (ret) {
+			dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
+			goto err_regulator_disable;
+		}
+	}
+
+	ddata->addr_prev = LTC2497_CONFIG_DEFAULT;
+	ddata->time_prev = ktime_get();
+
+	ret = iio_device_register(indio_dev);
+	if (ret < 0)
+		goto err_array_unregister;
+
+	return 0;
+
+err_array_unregister:
+	iio_map_array_unregister(indio_dev);
+
+err_regulator_disable:
+	regulator_disable(ddata->ref);
+
+	return ret;
+}
+EXPORT_SYMBOL_NS(ltc2497core_probe, LTC2497);
+
+void ltc2497core_remove(struct iio_dev *indio_dev)
+{
+	struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+
+	iio_map_array_unregister(indio_dev);
+
+	regulator_disable(ddata->ref);
+}
+EXPORT_SYMBOL_NS(ltc2497core_remove, LTC2497);
+
+MODULE_DESCRIPTION("common code for LTC2496/LTC2497 drivers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 4704060..5db63d7 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -7,27 +7,18 @@
  * Datasheet: http://cds.linear.com/docs/en/datasheet/2497fd.pdf
  */
 
-#include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/driver.h>
-#include <linux/iio/sysfs.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/regulator/consumer.h>
 
-#define LTC2497_ENABLE			0xA0
-#define LTC2497_SGL			BIT(4)
-#define LTC2497_DIFF			0
-#define LTC2497_SIGN			BIT(3)
-#define LTC2497_CONFIG_DEFAULT		LTC2497_ENABLE
-#define LTC2497_CONVERSION_TIME_MS	150ULL
+#include "ltc2497.h"
 
-struct ltc2497_st {
+struct ltc2497_driverdata {
+	/* this must be the first member */
+	struct ltc2497core_driverdata common_ddata;
 	struct i2c_client *client;
-	struct regulator *ref;
-	ktime_t	time_prev;
-	u8 addr_prev;
 	/*
 	 * DMA (thus cache coherency maintenance) requires the
 	 * transfer buffers to live in their own cache lines.
@@ -35,232 +26,59 @@ struct ltc2497_st {
 	__be32 buf ____cacheline_aligned;
 };
 
-static int ltc2497_wait_conv(struct ltc2497_st *st)
+static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
+				      u8 address, int *val)
 {
-	s64 time_elapsed;
-
-	time_elapsed = ktime_ms_delta(ktime_get(), st->time_prev);
-
-	if (time_elapsed < LTC2497_CONVERSION_TIME_MS) {
-		/* delay if conversion time not passed
-		 * since last read or write
-		 */
-		if (msleep_interruptible(
-		    LTC2497_CONVERSION_TIME_MS - time_elapsed))
-			return -ERESTARTSYS;
-
-		return 0;
-	}
-
-	if (time_elapsed - LTC2497_CONVERSION_TIME_MS <= 0) {
-		/* We're in automatic mode -
-		 * so the last reading is stil not outdated
-		 */
-		return 0;
-	}
-
-	return 1;
-}
-
-static int ltc2497_read(struct ltc2497_st *st, u8 address, int *val)
-{
-	struct i2c_client *client = st->client;
+	struct ltc2497_driverdata *st =
+		container_of(ddata, struct ltc2497_driverdata, common_ddata);
 	int ret;
 
-	ret = ltc2497_wait_conv(st);
-	if (ret < 0)
-		return ret;
-
-	if (ret || st->addr_prev != address) {
-		ret = i2c_smbus_write_byte(st->client,
-					   LTC2497_ENABLE | address);
-		if (ret < 0)
+	if (val) {
+		ret = i2c_master_recv(st->client, (char *)&st->buf, 3);
+		if (ret < 0) {
+			dev_err(&st->client->dev, "i2c_master_recv failed\n");
 			return ret;
-		st->addr_prev = address;
-		if (msleep_interruptible(LTC2497_CONVERSION_TIME_MS))
-			return -ERESTARTSYS;
-	}
-	ret = i2c_master_recv(client, (char *)&st->buf, 3);
-	if (ret < 0)  {
-		dev_err(&client->dev, "i2c_master_recv failed\n");
-		return ret;
-	}
-	st->time_prev = ktime_get();
+		}
 
-	/* convert and shift the result,
-	 * and finally convert from offset binary to signed integer
-	 */
-	*val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
+		*val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
+	}
 
+	ret = i2c_smbus_write_byte(st->client,
+				   LTC2497_ENABLE | address);
+	if (ret)
+		dev_err(&st->client->dev, "i2c transfer failed: %pe\n",
+			ERR_PTR(ret));
 	return ret;
 }
 
-static int ltc2497_read_raw(struct iio_dev *indio_dev,
-			    struct iio_chan_spec const *chan,
-			    int *val, int *val2, long mask)
-{
-	struct ltc2497_st *st = iio_priv(indio_dev);
-	int ret;
-
-	switch (mask) {
-	case IIO_CHAN_INFO_RAW:
-		mutex_lock(&indio_dev->mlock);
-		ret = ltc2497_read(st, chan->address, val);
-		mutex_unlock(&indio_dev->mlock);
-		if (ret < 0)
-			return ret;
-
-		return IIO_VAL_INT;
-
-	case IIO_CHAN_INFO_SCALE:
-		ret = regulator_get_voltage(st->ref);
-		if (ret < 0)
-			return ret;
-
-		*val = ret / 1000;
-		*val2 = 17;
-
-		return IIO_VAL_FRACTIONAL_LOG2;
-
-	default:
-		return -EINVAL;
-	}
-}
-
-#define LTC2497_CHAN(_chan, _addr, _ds_name) { \
-	.type = IIO_VOLTAGE, \
-	.indexed = 1, \
-	.channel = (_chan), \
-	.address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
-	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
-	.datasheet_name = (_ds_name), \
-}
-
-#define LTC2497_CHAN_DIFF(_chan, _addr) { \
-	.type = IIO_VOLTAGE, \
-	.indexed = 1, \
-	.channel = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 1 : 0), \
-	.channel2 = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 0 : 1),\
-	.address = (_addr | _chan), \
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
-	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
-	.differential = 1, \
-}
-
-static const struct iio_chan_spec ltc2497_channel[] = {
-	LTC2497_CHAN(0, LTC2497_SGL, "CH0"),
-	LTC2497_CHAN(1, LTC2497_SGL, "CH1"),
-	LTC2497_CHAN(2, LTC2497_SGL, "CH2"),
-	LTC2497_CHAN(3, LTC2497_SGL, "CH3"),
-	LTC2497_CHAN(4, LTC2497_SGL, "CH4"),
-	LTC2497_CHAN(5, LTC2497_SGL, "CH5"),
-	LTC2497_CHAN(6, LTC2497_SGL, "CH6"),
-	LTC2497_CHAN(7, LTC2497_SGL, "CH7"),
-	LTC2497_CHAN(8, LTC2497_SGL, "CH8"),
-	LTC2497_CHAN(9, LTC2497_SGL, "CH9"),
-	LTC2497_CHAN(10, LTC2497_SGL, "CH10"),
-	LTC2497_CHAN(11, LTC2497_SGL, "CH11"),
-	LTC2497_CHAN(12, LTC2497_SGL, "CH12"),
-	LTC2497_CHAN(13, LTC2497_SGL, "CH13"),
-	LTC2497_CHAN(14, LTC2497_SGL, "CH14"),
-	LTC2497_CHAN(15, LTC2497_SGL, "CH15"),
-	LTC2497_CHAN_DIFF(0, LTC2497_DIFF),
-	LTC2497_CHAN_DIFF(1, LTC2497_DIFF),
-	LTC2497_CHAN_DIFF(2, LTC2497_DIFF),
-	LTC2497_CHAN_DIFF(3, LTC2497_DIFF),
-	LTC2497_CHAN_DIFF(4, LTC2497_DIFF),
-	LTC2497_CHAN_DIFF(5, LTC2497_DIFF),
-	LTC2497_CHAN_DIFF(6, LTC2497_DIFF),
-	LTC2497_CHAN_DIFF(7, LTC2497_DIFF),
-	LTC2497_CHAN_DIFF(0, LTC2497_DIFF | LTC2497_SIGN),
-	LTC2497_CHAN_DIFF(1, LTC2497_DIFF | LTC2497_SIGN),
-	LTC2497_CHAN_DIFF(2, LTC2497_DIFF | LTC2497_SIGN),
-	LTC2497_CHAN_DIFF(3, LTC2497_DIFF | LTC2497_SIGN),
-	LTC2497_CHAN_DIFF(4, LTC2497_DIFF | LTC2497_SIGN),
-	LTC2497_CHAN_DIFF(5, LTC2497_DIFF | LTC2497_SIGN),
-	LTC2497_CHAN_DIFF(6, LTC2497_DIFF | LTC2497_SIGN),
-	LTC2497_CHAN_DIFF(7, LTC2497_DIFF | LTC2497_SIGN),
-};
-
-static const struct iio_info ltc2497_info = {
-	.read_raw = ltc2497_read_raw,
-};
-
 static int ltc2497_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
 	struct iio_dev *indio_dev;
-	struct ltc2497_st *st;
-	struct iio_map *plat_data;
-	int ret;
+	struct ltc2497_driverdata *st;
+	struct device *dev = &client->dev;
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
 				     I2C_FUNC_SMBUS_WRITE_BYTE))
 		return -EOPNOTSUPP;
 
-	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
 	if (!indio_dev)
 		return -ENOMEM;
 
 	st = iio_priv(indio_dev);
 	i2c_set_clientdata(client, indio_dev);
 	st->client = client;
+	st->common_ddata.result_and_measure = ltc2497_result_and_measure;
 
-	indio_dev->dev.parent = &client->dev;
-	indio_dev->name = id->name;
-	indio_dev->info = &ltc2497_info;
-	indio_dev->modes = INDIO_DIRECT_MODE;
-	indio_dev->channels = ltc2497_channel;
-	indio_dev->num_channels = ARRAY_SIZE(ltc2497_channel);
-
-	st->ref = devm_regulator_get(&client->dev, "vref");
-	if (IS_ERR(st->ref))
-		return PTR_ERR(st->ref);
-
-	ret = regulator_enable(st->ref);
-	if (ret < 0)
-		return ret;
-
-	if (client->dev.platform_data) {
-		plat_data = ((struct iio_map *)client->dev.platform_data);
-		ret = iio_map_array_register(indio_dev, plat_data);
-		if (ret) {
-			dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
-			goto err_regulator_disable;
-		}
-	}
-
-	ret = i2c_smbus_write_byte(st->client, LTC2497_CONFIG_DEFAULT);
-	if (ret < 0)
-		goto err_array_unregister;
-
-	st->addr_prev = LTC2497_CONFIG_DEFAULT;
-	st->time_prev = ktime_get();
-
-	ret = iio_device_register(indio_dev);
-	if (ret < 0)
-		goto err_array_unregister;
-
-	return 0;
-
-err_array_unregister:
-	iio_map_array_unregister(indio_dev);
-
-err_regulator_disable:
-	regulator_disable(st->ref);
-
-	return ret;
+	return ltc2497core_probe(dev, indio_dev);
 }
 
 static int ltc2497_remove(struct i2c_client *client)
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(client);
-	struct ltc2497_st *st = iio_priv(indio_dev);
 
-	iio_map_array_unregister(indio_dev);
-	iio_device_unregister(indio_dev);
-	regulator_disable(st->ref);
+	ltc2497core_remove(indio_dev);
 
 	return 0;
 }
diff --git a/drivers/iio/adc/ltc2497.h b/drivers/iio/adc/ltc2497.h
new file mode 100644
index 0000000..d0b42dd
--- /dev/null
+++ b/drivers/iio/adc/ltc2497.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#define LTC2497_ENABLE			0xA0
+#define LTC2497_CONFIG_DEFAULT		LTC2497_ENABLE
+#define LTC2497_CONVERSION_TIME_MS	150ULL
+
+struct ltc2497core_driverdata {
+	struct regulator *ref;
+	ktime_t	time_prev;
+	u8 addr_prev;
+	int (*result_and_measure)(struct ltc2497core_driverdata *ddata,
+				  u8 address, int *val);
+};
+
+int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev);
+void ltc2497core_remove(struct iio_dev *indio_dev);
+
+MODULE_IMPORT_NS(LTC2497);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index e480529..04d5ff7 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -115,22 +115,17 @@ enum max9611_conf_ids {
  *		      where data shall be read from
  */
 static const unsigned int max9611_mux_conf[][2] = {
-	/* CONF_SENSE_1x */
-	{ MAX9611_MUX_SENSE_1x, MAX9611_REG_CSA_DATA },
-	/* CONF_SENSE_4x */
-	{ MAX9611_MUX_SENSE_4x, MAX9611_REG_CSA_DATA },
-	/* CONF_SENSE_8x */
-	{ MAX9611_MUX_SENSE_8x, MAX9611_REG_CSA_DATA },
-	/* CONF_IN_VOLT */
-	{ MAX9611_INPUT_VOLT, MAX9611_REG_RS_DATA },
-	/* CONF_TEMP */
-	{ MAX9611_MUX_TEMP, MAX9611_REG_TEMP_DATA },
+	[CONF_SENSE_1x]	= { MAX9611_MUX_SENSE_1x, MAX9611_REG_CSA_DATA },
+	[CONF_SENSE_4x]	= { MAX9611_MUX_SENSE_4x, MAX9611_REG_CSA_DATA },
+	[CONF_SENSE_8x]	= { MAX9611_MUX_SENSE_8x, MAX9611_REG_CSA_DATA },
+	[CONF_IN_VOLT]	= { MAX9611_INPUT_VOLT, MAX9611_REG_RS_DATA },
+	[CONF_TEMP]	= { MAX9611_MUX_TEMP, MAX9611_REG_TEMP_DATA },
 };
 
 enum max9611_csa_gain {
-	CSA_GAIN_1x,
-	CSA_GAIN_4x,
-	CSA_GAIN_8x,
+	CSA_GAIN_1x = CONF_SENSE_1x,
+	CSA_GAIN_4x = CONF_SENSE_4x,
+	CSA_GAIN_8x = CONF_SENSE_8x,
 };
 
 enum max9611_csa_gain_params {
@@ -148,18 +143,9 @@ enum max9611_csa_gain_params {
  * value; use this structure to retrieve the correct LSB and offset values.
  */
 static const unsigned int max9611_gain_conf[][2] = {
-	{ /* [0] CSA_GAIN_1x */
-		MAX9611_CSA_1X_LSB_nV,
-		MAX9611_CSA_1X_OFFS_RAW,
-	},
-	{ /* [1] CSA_GAIN_4x */
-		MAX9611_CSA_4X_LSB_nV,
-		MAX9611_CSA_4X_OFFS_RAW,
-	},
-	{ /* [2] CSA_GAIN_8x */
-		MAX9611_CSA_8X_LSB_nV,
-		MAX9611_CSA_8X_OFFS_RAW,
-	},
+	[CSA_GAIN_1x] = { MAX9611_CSA_1X_LSB_nV, MAX9611_CSA_1X_OFFS_RAW, },
+	[CSA_GAIN_4x] = { MAX9611_CSA_4X_LSB_nV, MAX9611_CSA_4X_OFFS_RAW, },
+	[CSA_GAIN_8x] = { MAX9611_CSA_8X_LSB_nV, MAX9611_CSA_8X_OFFS_RAW, },
 };
 
 enum max9611_chan_addrs {
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index dcd7fb5..2bb78d1 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -6,6 +6,7 @@
 #include <linux/log2.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/units.h>
 
 #include "qcom-vadc-common.h"
 
@@ -236,8 +237,7 @@ static int qcom_vadc_scale_die_temp(const struct vadc_linear_graph *calib_graph,
 		voltage = 0;
 	}
 
-	voltage -= KELVINMIL_CELSIUSMIL;
-	*result_mdec = voltage;
+	*result_mdec = milli_kelvin_to_millicelsius(voltage);
 
 	return 0;
 }
@@ -325,7 +325,7 @@ static int qcom_vadc_scale_hw_calib_die_temp(
 {
 	*result_mdec = qcom_vadc_scale_code_voltage_factor(adc_code,
 				prescale, data, 2);
-	*result_mdec -= KELVINMIL_CELSIUSMIL;
+	*result_mdec = milli_kelvin_to_millicelsius(*result_mdec);
 
 	return 0;
 }
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
index bbb1fa0..e074902a 100644
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ b/drivers/iio/adc/qcom-vadc-common.h
@@ -38,7 +38,6 @@
 #define VADC_AVG_SAMPLES_MAX			512
 #define ADC5_AVG_SAMPLES_MAX			16
 
-#define KELVINMIL_CELSIUSMIL			273150
 #define PMIC5_CHG_TEMP_SCALE_FACTOR		377500
 #define PMIC5_SMB_TEMP_CONSTANT			419400
 #define PMIC5_SMB_TEMP_SCALE_FACTOR		356
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 6537f4f..2df88d2 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -280,21 +280,21 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
 static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
 	.csr = STM32F4_ADC_CSR,
 	.ccr = STM32F4_ADC_CCR,
-	.eoc1_msk = STM32F4_EOC1,
-	.eoc2_msk = STM32F4_EOC2,
-	.eoc3_msk = STM32F4_EOC3,
+	.eoc1_msk = STM32F4_EOC1 | STM32F4_OVR1,
+	.eoc2_msk = STM32F4_EOC2 | STM32F4_OVR2,
+	.eoc3_msk = STM32F4_EOC3 | STM32F4_OVR3,
 	.ier = STM32F4_ADC_CR1,
-	.eocie_msk = STM32F4_EOCIE,
+	.eocie_msk = STM32F4_EOCIE | STM32F4_OVRIE,
 };
 
 /* STM32H7 common registers definitions */
 static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
 	.csr = STM32H7_ADC_CSR,
 	.ccr = STM32H7_ADC_CCR,
-	.eoc1_msk = STM32H7_EOC_MST,
-	.eoc2_msk = STM32H7_EOC_SLV,
+	.eoc1_msk = STM32H7_EOC_MST | STM32H7_OVR_MST,
+	.eoc2_msk = STM32H7_EOC_SLV | STM32H7_OVR_SLV,
 	.ier = STM32H7_ADC_IER,
-	.eocie_msk = STM32H7_EOCIE,
+	.eocie_msk = STM32H7_EOCIE | STM32H7_OVRIE,
 };
 
 static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
@@ -688,7 +688,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
 	priv->vref = devm_regulator_get(&pdev->dev, "vref");
 	if (IS_ERR(priv->vref)) {
 		ret = PTR_ERR(priv->vref);
-		dev_err(&pdev->dev, "vref get failed, %d\n", ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "vref get failed, %d\n", ret);
 		return ret;
 	}
 
@@ -696,7 +697,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->aclk)) {
 		ret = PTR_ERR(priv->aclk);
 		if (ret != -ENOENT) {
-			dev_err(&pdev->dev, "Can't get 'adc' clock\n");
+			if (ret != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "Can't get 'adc' clock\n");
 			return ret;
 		}
 		priv->aclk = NULL;
@@ -706,7 +708,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->bclk)) {
 		ret = PTR_ERR(priv->bclk);
 		if (ret != -ENOENT) {
-			dev_err(&pdev->dev, "Can't get 'bus' clock\n");
+			if (ret != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "Can't get 'bus' clock\n");
 			return ret;
 		}
 		priv->bclk = NULL;
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 2579d51..2322809 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -51,10 +51,12 @@
 #define STM32F4_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x04)
 
 /* STM32F4_ADC_SR - bit fields */
+#define STM32F4_OVR			BIT(5)
 #define STM32F4_STRT			BIT(4)
 #define STM32F4_EOC			BIT(1)
 
 /* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_OVRIE			BIT(26)
 #define STM32F4_RES_SHIFT		24
 #define STM32F4_RES_MASK		GENMASK(25, 24)
 #define STM32F4_SCAN			BIT(8)
@@ -72,8 +74,11 @@
 #define STM32F4_ADON			BIT(0)
 
 /* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_OVR3			BIT(21)
 #define STM32F4_EOC3			BIT(17)
+#define STM32F4_OVR2			BIT(13)
 #define STM32F4_EOC2			BIT(9)
+#define STM32F4_OVR1			BIT(5)
 #define STM32F4_EOC1			BIT(1)
 
 /* STM32F4_ADC_CCR - bit fields */
@@ -103,10 +108,12 @@
 
 /* STM32H7_ADC_ISR - bit fields */
 #define STM32MP1_VREGREADY		BIT(12)
+#define STM32H7_OVR			BIT(4)
 #define STM32H7_EOC			BIT(2)
 #define STM32H7_ADRDY			BIT(0)
 
 /* STM32H7_ADC_IER - bit fields */
+#define STM32H7_OVRIE			STM32H7_OVR
 #define STM32H7_EOCIE			STM32H7_EOC
 
 /* STM32H7_ADC_CR - bit fields */
@@ -155,7 +162,9 @@ enum stm32h7_adc_dmngt {
 #define STM32H7_LINCALFACT_MASK		GENMASK(29, 0)
 
 /* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_OVR_SLV			BIT(20)
 #define STM32H7_EOC_SLV			BIT(18)
+#define STM32H7_OVR_MST			BIT(4)
 #define STM32H7_EOC_MST			BIT(2)
 
 /* STM32H7_ADC_CCR - bit fields */
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 3b291d7..80c3f96 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -117,7 +117,9 @@ struct stm32_adc_regs {
  * struct stm32_adc_regspec - stm32 registers definition
  * @dr:			data register offset
  * @ier_eoc:		interrupt enable register & eocie bitfield
+ * @ier_ovr:		interrupt enable register & overrun bitfield
  * @isr_eoc:		interrupt status register & eoc bitfield
+ * @isr_ovr:		interrupt status register & overrun bitfield
  * @sqr:		reference to sequence registers array
  * @exten:		trigger control register & bitfield
  * @extsel:		trigger selection register & bitfield
@@ -128,7 +130,9 @@ struct stm32_adc_regs {
 struct stm32_adc_regspec {
 	const u32 dr;
 	const struct stm32_adc_regs ier_eoc;
+	const struct stm32_adc_regs ier_ovr;
 	const struct stm32_adc_regs isr_eoc;
+	const struct stm32_adc_regs isr_ovr;
 	const struct stm32_adc_regs *sqr;
 	const struct stm32_adc_regs exten;
 	const struct stm32_adc_regs extsel;
@@ -337,7 +341,9 @@ static const unsigned int stm32f4_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
 static const struct stm32_adc_regspec stm32f4_adc_regspec = {
 	.dr = STM32F4_ADC_DR,
 	.ier_eoc = { STM32F4_ADC_CR1, STM32F4_EOCIE },
+	.ier_ovr = { STM32F4_ADC_CR1, STM32F4_OVRIE },
 	.isr_eoc = { STM32F4_ADC_SR, STM32F4_EOC },
+	.isr_ovr = { STM32F4_ADC_SR, STM32F4_OVR },
 	.sqr = stm32f4_sq,
 	.exten = { STM32F4_ADC_CR2, STM32F4_EXTEN_MASK, STM32F4_EXTEN_SHIFT },
 	.extsel = { STM32F4_ADC_CR2, STM32F4_EXTSEL_MASK,
@@ -429,7 +435,9 @@ static const unsigned int stm32h7_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
 static const struct stm32_adc_regspec stm32h7_adc_regspec = {
 	.dr = STM32H7_ADC_DR,
 	.ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE },
+	.ier_ovr = { STM32H7_ADC_IER, STM32H7_OVRIE },
 	.isr_eoc = { STM32H7_ADC_ISR, STM32H7_EOC },
+	.isr_ovr = { STM32H7_ADC_ISR, STM32H7_OVR },
 	.sqr = stm32h7_sq,
 	.exten = { STM32H7_ADC_CFGR, STM32H7_EXTEN_MASK, STM32H7_EXTEN_SHIFT },
 	.extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK,
@@ -506,6 +514,18 @@ static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
 			   adc->cfg->regs->ier_eoc.mask);
 }
 
+static void stm32_adc_ovr_irq_enable(struct stm32_adc *adc)
+{
+	stm32_adc_set_bits(adc, adc->cfg->regs->ier_ovr.reg,
+			   adc->cfg->regs->ier_ovr.mask);
+}
+
+static void stm32_adc_ovr_irq_disable(struct stm32_adc *adc)
+{
+	stm32_adc_clr_bits(adc, adc->cfg->regs->ier_ovr.reg,
+			   adc->cfg->regs->ier_ovr.mask);
+}
+
 static void stm32_adc_set_res(struct stm32_adc *adc)
 {
 	const struct stm32_adc_regs *res = &adc->cfg->regs->res;
@@ -1205,6 +1225,19 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
 	}
 }
 
+static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
+{
+	struct stm32_adc *adc = data;
+	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+	const struct stm32_adc_regspec *regs = adc->cfg->regs;
+	u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+
+	if (status & regs->isr_ovr.mask)
+		dev_err(&indio_dev->dev, "Overrun, stopping: restart needed\n");
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t stm32_adc_isr(int irq, void *data)
 {
 	struct stm32_adc *adc = data;
@@ -1212,6 +1245,19 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
 	const struct stm32_adc_regspec *regs = adc->cfg->regs;
 	u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
 
+	if (status & regs->isr_ovr.mask) {
+		/*
+		 * Overrun occurred on regular conversions: data for wrong
+		 * channel may be read. Unconditionally disable interrupts
+		 * to stop processing data and print error message.
+		 * Restarting the capture can be done by disabling, then
+		 * re-enabling it (e.g. write 0, then 1 to buffer/enable).
+		 */
+		stm32_adc_ovr_irq_disable(adc);
+		stm32_adc_conv_irq_disable(adc);
+		return IRQ_WAKE_THREAD;
+	}
+
 	if (status & regs->isr_eoc.mask) {
 		/* Reading DR also clears EOC status flag */
 		adc->buffer[adc->bufi] = stm32_adc_readw(adc, regs->dr);
@@ -1441,6 +1487,8 @@ static int __stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
 	/* Reset adc buffer index */
 	adc->bufi = 0;
 
+	stm32_adc_ovr_irq_enable(adc);
+
 	if (!adc->dma_chan)
 		stm32_adc_conv_irq_enable(adc);
 
@@ -1481,6 +1529,8 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
 	if (!adc->dma_chan)
 		stm32_adc_conv_irq_disable(adc);
 
+	stm32_adc_ovr_irq_disable(adc);
+
 	if (adc->dma_chan)
 		dmaengine_terminate_sync(adc->dma_chan);
 
@@ -1746,9 +1796,21 @@ static int stm32_adc_dma_request(struct iio_dev *indio_dev)
 	struct dma_slave_config config;
 	int ret;
 
-	adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
-	if (!adc->dma_chan)
+	adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+	if (IS_ERR(adc->dma_chan)) {
+		ret = PTR_ERR(adc->dma_chan);
+		if (ret != -ENODEV) {
+			if (ret != -EPROBE_DEFER)
+				dev_err(&indio_dev->dev,
+					"DMA channel request failed with %d\n",
+					ret);
+			return ret;
+		}
+
+		/* DMA is optional: fall back to IRQ mode */
+		adc->dma_chan = NULL;
 		return 0;
+	}
 
 	adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
 					 STM32_DMA_BUFFER_SIZE,
@@ -1818,8 +1880,9 @@ static int stm32_adc_probe(struct platform_device *pdev)
 	if (adc->irq < 0)
 		return adc->irq;
 
-	ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
-			       0, pdev->name, adc);
+	ret = devm_request_threaded_irq(&pdev->dev, adc->irq, stm32_adc_isr,
+					stm32_adc_threaded_isr,
+					0, pdev->name, adc);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to request IRQ\n");
 		return ret;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index e493242..2aad2cd 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1204,6 +1204,8 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
 
 	stm32_dfsdm_stop_conv(adc);
 
+	stm32_dfsdm_process_data(adc, res);
+
 stop_dfsdm:
 	stm32_dfsdm_stop_dfsdm(adc->dfsdm);
 
@@ -1219,14 +1221,32 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
 	unsigned int spi_freq;
 	int ret = -EINVAL;
 
+	switch (ch->src) {
+	case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
+		spi_freq = adc->dfsdm->spi_master_freq;
+		break;
+	case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
+	case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
+		spi_freq = adc->dfsdm->spi_master_freq / 2;
+		break;
+	default:
+		spi_freq = adc->spi_freq;
+	}
+
 	switch (mask) {
 	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
 		ret = iio_device_claim_direct_mode(indio_dev);
 		if (ret)
 			return ret;
+
 		ret = stm32_dfsdm_compute_all_osrs(indio_dev, val);
-		if (!ret)
+		if (!ret) {
+			dev_dbg(&indio_dev->dev,
+				"Sampling rate changed from (%u) to (%u)\n",
+				adc->sample_freq, spi_freq / val);
 			adc->oversamp = val;
+			adc->sample_freq = spi_freq / val;
+		}
 		iio_device_release_direct_mode(indio_dev);
 		return ret;
 
@@ -1238,18 +1258,6 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
 		if (ret)
 			return ret;
 
-		switch (ch->src) {
-		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
-			spi_freq = adc->dfsdm->spi_master_freq;
-			break;
-		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
-		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
-			spi_freq = adc->dfsdm->spi_master_freq / 2;
-			break;
-		default:
-			spi_freq = adc->spi_freq;
-		}
-
 		ret = dfsdm_adc_set_samp_freq(indio_dev, val, spi_freq);
 		iio_device_release_direct_mode(indio_dev);
 		return ret;
@@ -1383,9 +1391,13 @@ static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
 {
 	struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
 
-	adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
-	if (!adc->dma_chan)
-		return -EINVAL;
+	adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+	if (IS_ERR(adc->dma_chan)) {
+		int ret = PTR_ERR(adc->dma_chan);
+
+		adc->dma_chan = NULL;
+		return ret;
+	}
 
 	adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
 					 DFSDM_DMA_BUFFER_SIZE,
@@ -1509,7 +1521,16 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
 	init_completion(&adc->completion);
 
 	/* Optionally request DMA */
-	if (stm32_dfsdm_dma_request(indio_dev)) {
+	ret = stm32_dfsdm_dma_request(indio_dev);
+	if (ret) {
+		if (ret != -ENODEV) {
+			if (ret != -EPROBE_DEFER)
+				dev_err(&indio_dev->dev,
+					"DMA channel request failed with %d\n",
+					ret);
+			return ret;
+		}
+
 		dev_dbg(&indio_dev->dev, "No DMA support\n");
 		return 0;
 	}
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index a550b13..5ea4f45 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -12,17 +12,15 @@
  */
 
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/i2c.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/pm_runtime.h>
 #include <linux/mutex.h>
 #include <linux/delay.h>
 
-#include <linux/platform_data/ads1015.h>
-
 #include <linux/iio/iio.h>
 #include <linux/iio/types.h>
 #include <linux/iio/sysfs.h>
@@ -33,6 +31,8 @@
 
 #define ADS1015_DRV_NAME "ads1015"
 
+#define ADS1015_CHANNELS 8
+
 #define ADS1015_CONV_REG	0x00
 #define ADS1015_CFG_REG		0x01
 #define ADS1015_LO_THRESH_REG	0x02
@@ -77,6 +77,7 @@
 #define ADS1015_DEFAULT_CHAN		0
 
 enum chip_ids {
+	ADSXXXX = 0,
 	ADS1015,
 	ADS1115,
 };
@@ -219,6 +220,12 @@ static const struct iio_event_spec ads1015_events[] = {
 	.datasheet_name = "AIN"#_chan"-AIN"#_chan2,		\
 }
 
+struct ads1015_channel_data {
+	bool enabled;
+	unsigned int pga;
+	unsigned int data_rate;
+};
+
 struct ads1015_thresh_data {
 	unsigned int comp_queue;
 	int high_thresh;
@@ -837,65 +844,58 @@ static const struct iio_info ads1115_info = {
 	.attrs          = &ads1115_attribute_group,
 };
 
-#ifdef CONFIG_OF
-static int ads1015_get_channels_config_of(struct i2c_client *client)
+static int ads1015_client_get_channels_config(struct i2c_client *client)
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(client);
 	struct ads1015_data *data = iio_priv(indio_dev);
-	struct device_node *node;
+	struct device *dev = &client->dev;
+	struct fwnode_handle *node;
+	int i = -1;
 
-	if (!client->dev.of_node ||
-	    !of_get_next_child(client->dev.of_node, NULL))
-		return -EINVAL;
-
-	for_each_child_of_node(client->dev.of_node, node) {
+	device_for_each_child_node(dev, node) {
 		u32 pval;
 		unsigned int channel;
 		unsigned int pga = ADS1015_DEFAULT_PGA;
 		unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
 
-		if (of_property_read_u32(node, "reg", &pval)) {
-			dev_err(&client->dev, "invalid reg on %pOF\n",
-				node);
+		if (fwnode_property_read_u32(node, "reg", &pval)) {
+			dev_err(dev, "invalid reg on %pfw\n", node);
 			continue;
 		}
 
 		channel = pval;
 		if (channel >= ADS1015_CHANNELS) {
-			dev_err(&client->dev,
-				"invalid channel index %d on %pOF\n",
+			dev_err(dev, "invalid channel index %d on %pfw\n",
 				channel, node);
 			continue;
 		}
 
-		if (!of_property_read_u32(node, "ti,gain", &pval)) {
+		if (!fwnode_property_read_u32(node, "ti,gain", &pval)) {
 			pga = pval;
 			if (pga > 6) {
-				dev_err(&client->dev, "invalid gain on %pOF\n",
-					node);
-				of_node_put(node);
+				dev_err(dev, "invalid gain on %pfw\n", node);
+				fwnode_handle_put(node);
 				return -EINVAL;
 			}
 		}
 
-		if (!of_property_read_u32(node, "ti,datarate", &pval)) {
+		if (!fwnode_property_read_u32(node, "ti,datarate", &pval)) {
 			data_rate = pval;
 			if (data_rate > 7) {
-				dev_err(&client->dev,
-					"invalid data_rate on %pOF\n",
-					node);
-				of_node_put(node);
+				dev_err(dev, "invalid data_rate on %pfw\n", node);
+				fwnode_handle_put(node);
 				return -EINVAL;
 			}
 		}
 
 		data->channel_data[channel].pga = pga;
 		data->channel_data[channel].data_rate = data_rate;
+
+		i++;
 	}
 
-	return 0;
+	return i < 0 ? -EINVAL : 0;
 }
-#endif
 
 static void ads1015_get_channels_config(struct i2c_client *client)
 {
@@ -903,19 +903,10 @@ static void ads1015_get_channels_config(struct i2c_client *client)
 
 	struct iio_dev *indio_dev = i2c_get_clientdata(client);
 	struct ads1015_data *data = iio_priv(indio_dev);
-	struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
 
-	/* prefer platform data */
-	if (pdata) {
-		memcpy(data->channel_data, pdata->channel_data,
-		       sizeof(data->channel_data));
+	if (!ads1015_client_get_channels_config(client))
 		return;
-	}
 
-#ifdef CONFIG_OF
-	if (!ads1015_get_channels_config_of(client))
-		return;
-#endif
 	/* fallback on default configuration */
 	for (k = 0; k < ADS1015_CHANNELS; ++k) {
 		data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
@@ -953,9 +944,8 @@ static int ads1015_probe(struct i2c_client *client,
 	indio_dev->name = ADS1015_DRV_NAME;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
-	if (client->dev.of_node)
-		chip = (enum chip_ids)of_device_get_match_data(&client->dev);
-	else
+	chip = (enum chip_ids)device_get_match_data(&client->dev);
+	if (chip == ADSXXXX)
 		chip = id->driver_data;
 	switch (chip) {
 	case ADS1015:
@@ -970,6 +960,9 @@ static int ads1015_probe(struct i2c_client *client,
 		indio_dev->info = &ads1115_info;
 		data->data_rate = (unsigned int *) &ads1115_data_rate;
 		break;
+	default:
+		dev_err(&client->dev, "Unknown chip %d\n", chip);
+		return -EINVAL;
 	}
 
 	data->event_channel = ADS1015_CHANNELS;
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 2e66e4d..f9edc120 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -602,7 +602,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
 
 	st->reg = devm_regulator_get(&spi->dev, "vref");
 	if (IS_ERR(st->reg)) {
-		dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
+		dev_err(&spi->dev, "Failed to get regulator \"vref\"\n");
 		ret = PTR_ERR(st->reg);
 		goto error_destroy_mutex;
 	}
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 90cf6e5..a74bd9c 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -476,7 +476,7 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
  * @n: Number of bytes to read
  * @user_buffer: Userspace buffer to copy the data to
  *
- * Should be used as the read_first_n callback for iio_buffer_access_ops
+ * Should be used as the read callback for iio_buffer_access_ops
  * struct for DMA buffers.
  */
 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index bea4a75..b129693 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -10,8 +10,10 @@
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
 #include <linux/err.h>
+#include <linux/module.h>
 
 #include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
 #include <linux/iio/buffer.h>
 #include <linux/iio/buffer_impl.h>
 #include <linux/iio/buffer-dma.h>
@@ -107,7 +109,7 @@ static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
 }
 
 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
-	.read_first_n = iio_dma_buffer_read,
+	.read = iio_dma_buffer_read,
 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
 	.set_length = iio_dma_buffer_set_length,
 	.request_update = iio_dma_buffer_request_update,
@@ -125,6 +127,24 @@ static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
 	.abort = iio_dmaengine_buffer_abort,
 };
 
+static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct dmaengine_buffer *dmaengine_buffer =
+		iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
+
+	return sprintf(buf, "%u\n", dmaengine_buffer->align);
+}
+
+static IIO_DEVICE_ATTR(length_align_bytes, 0444,
+		       iio_dmaengine_buffer_get_length_align, NULL, 0);
+
+static const struct attribute *iio_dmaengine_buffer_attrs[] = {
+	&iio_dev_attr_length_align_bytes.dev_attr.attr,
+	NULL,
+};
+
 /**
  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
  * @dev: Parent device for the buffer
@@ -150,7 +170,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
 	if (!dmaengine_buffer)
 		return ERR_PTR(-ENOMEM);
 
-	chan = dma_request_slave_channel_reason(dev, channel);
+	chan = dma_request_chan(dev, channel);
 	if (IS_ERR(chan)) {
 		ret = PTR_ERR(chan);
 		goto err_free;
@@ -178,6 +198,8 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
 
 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
 		&iio_dmaengine_default_ops);
+	iio_buffer_set_attrs(&dmaengine_buffer->queue.buffer,
+		iio_dmaengine_buffer_attrs);
 
 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
 
@@ -206,3 +228,7 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
 	iio_buffer_put(buffer);
 }
 EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("DMA buffer for the IIO framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index e78fc08..3150f8a 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -98,8 +98,7 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
 	return 0;
 }
 
-static int iio_read_first_n_kfifo(struct iio_buffer *r,
-			   size_t n, char __user *buf)
+static int iio_read_kfifo(struct iio_buffer *r, size_t n, char __user *buf)
 {
 	int ret, copied;
 	struct iio_kfifo *kf = iio_to_kfifo(r);
@@ -141,7 +140,7 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
 
 static const struct iio_buffer_access_funcs kfifo_access_funcs = {
 	.store_to = &iio_store_to_kfifo,
-	.read_first_n = &iio_read_first_n_kfifo,
+	.read = &iio_read_kfifo,
 	.data_available = iio_kfifo_buf_data_available,
 	.request_update = &iio_request_update_kfifo,
 	.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index f97270b..33d3a59 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -4,7 +4,7 @@
 #
 
 # When adding new entries keep the list in alphabetical order
-obj-$(CONFIG_ATLAS_PH_SENSOR)	+= atlas-ph-sensor.o
+obj-$(CONFIG_ATLAS_PH_SENSOR)	+= atlas-sensor.o
 obj-$(CONFIG_BME680) += bme680_core.o
 obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
 obj-$(CONFIG_BME680_SPI) += bme680_spi.o
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
deleted file mode 100644
index 6c175eb..0000000
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ /dev/null
@@ -1,685 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * atlas-ph-sensor.c - Support for Atlas Scientific OEM pH-SM sensor
- *
- * Copyright (C) 2015-2018 Matt Ranostay
- * Author: Matt Ranostay <matt.ranostay@konsulko.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/irq_work.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/trigger.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/pm_runtime.h>
-
-#define ATLAS_REGMAP_NAME	"atlas_ph_regmap"
-#define ATLAS_DRV_NAME		"atlas_ph"
-
-#define ATLAS_REG_DEV_TYPE		0x00
-#define ATLAS_REG_DEV_VERSION		0x01
-
-#define ATLAS_REG_INT_CONTROL		0x04
-#define ATLAS_REG_INT_CONTROL_EN	BIT(3)
-
-#define ATLAS_REG_PWR_CONTROL		0x06
-
-#define ATLAS_REG_PH_CALIB_STATUS	0x0d
-#define ATLAS_REG_PH_CALIB_STATUS_MASK	0x07
-#define ATLAS_REG_PH_CALIB_STATUS_LOW	BIT(0)
-#define ATLAS_REG_PH_CALIB_STATUS_MID	BIT(1)
-#define ATLAS_REG_PH_CALIB_STATUS_HIGH	BIT(2)
-
-#define ATLAS_REG_EC_CALIB_STATUS		0x0f
-#define ATLAS_REG_EC_CALIB_STATUS_MASK		0x0f
-#define ATLAS_REG_EC_CALIB_STATUS_DRY		BIT(0)
-#define ATLAS_REG_EC_CALIB_STATUS_SINGLE	BIT(1)
-#define ATLAS_REG_EC_CALIB_STATUS_LOW		BIT(2)
-#define ATLAS_REG_EC_CALIB_STATUS_HIGH		BIT(3)
-
-#define ATLAS_REG_PH_TEMP_DATA		0x0e
-#define ATLAS_REG_PH_DATA		0x16
-
-#define ATLAS_REG_EC_PROBE		0x08
-#define ATLAS_REG_EC_TEMP_DATA		0x10
-#define ATLAS_REG_EC_DATA		0x18
-#define ATLAS_REG_TDS_DATA		0x1c
-#define ATLAS_REG_PSS_DATA		0x20
-
-#define ATLAS_REG_ORP_CALIB_STATUS	0x0d
-#define ATLAS_REG_ORP_DATA		0x0e
-
-#define ATLAS_PH_INT_TIME_IN_MS		450
-#define ATLAS_EC_INT_TIME_IN_MS		650
-#define ATLAS_ORP_INT_TIME_IN_MS	450
-
-enum {
-	ATLAS_PH_SM,
-	ATLAS_EC_SM,
-	ATLAS_ORP_SM,
-};
-
-struct atlas_data {
-	struct i2c_client *client;
-	struct iio_trigger *trig;
-	struct atlas_device *chip;
-	struct regmap *regmap;
-	struct irq_work work;
-
-	__be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
-};
-
-static const struct regmap_config atlas_regmap_config = {
-	.name = ATLAS_REGMAP_NAME,
-	.reg_bits = 8,
-	.val_bits = 8,
-};
-
-static const struct iio_chan_spec atlas_ph_channels[] = {
-	{
-		.type = IIO_PH,
-		.address = ATLAS_REG_PH_DATA,
-		.info_mask_separate =
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-		.scan_index = 0,
-		.scan_type = {
-			.sign = 'u',
-			.realbits = 32,
-			.storagebits = 32,
-			.endianness = IIO_BE,
-		},
-	},
-	IIO_CHAN_SOFT_TIMESTAMP(1),
-	{
-		.type = IIO_TEMP,
-		.address = ATLAS_REG_PH_TEMP_DATA,
-		.info_mask_separate =
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-		.output = 1,
-		.scan_index = -1
-	},
-};
-
-#define ATLAS_EC_CHANNEL(_idx, _addr) \
-	{\
-		.type = IIO_CONCENTRATION, \
-		.indexed = 1, \
-		.channel = _idx, \
-		.address = _addr, \
-		.info_mask_separate = \
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \
-		.scan_index = _idx + 1, \
-		.scan_type = { \
-			.sign = 'u', \
-			.realbits = 32, \
-			.storagebits = 32, \
-			.endianness = IIO_BE, \
-		}, \
-	}
-
-static const struct iio_chan_spec atlas_ec_channels[] = {
-	{
-		.type = IIO_ELECTRICALCONDUCTIVITY,
-		.address = ATLAS_REG_EC_DATA,
-		.info_mask_separate =
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-		.scan_index = 0,
-		.scan_type = {
-			.sign = 'u',
-			.realbits = 32,
-			.storagebits = 32,
-			.endianness = IIO_BE,
-		},
-	},
-	ATLAS_EC_CHANNEL(0, ATLAS_REG_TDS_DATA),
-	ATLAS_EC_CHANNEL(1, ATLAS_REG_PSS_DATA),
-	IIO_CHAN_SOFT_TIMESTAMP(3),
-	{
-		.type = IIO_TEMP,
-		.address = ATLAS_REG_EC_TEMP_DATA,
-		.info_mask_separate =
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-		.output = 1,
-		.scan_index = -1
-	},
-};
-
-static const struct iio_chan_spec atlas_orp_channels[] = {
-	{
-		.type = IIO_VOLTAGE,
-		.address = ATLAS_REG_ORP_DATA,
-		.info_mask_separate =
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-		.scan_index = 0,
-		.scan_type = {
-			.sign = 's',
-			.realbits = 32,
-			.storagebits = 32,
-			.endianness = IIO_BE,
-		},
-	},
-	IIO_CHAN_SOFT_TIMESTAMP(1),
-};
-
-static int atlas_check_ph_calibration(struct atlas_data *data)
-{
-	struct device *dev = &data->client->dev;
-	int ret;
-	unsigned int val;
-
-	ret = regmap_read(data->regmap, ATLAS_REG_PH_CALIB_STATUS, &val);
-	if (ret)
-		return ret;
-
-	if (!(val & ATLAS_REG_PH_CALIB_STATUS_MASK)) {
-		dev_warn(dev, "device has not been calibrated\n");
-		return 0;
-	}
-
-	if (!(val & ATLAS_REG_PH_CALIB_STATUS_LOW))
-		dev_warn(dev, "device missing low point calibration\n");
-
-	if (!(val & ATLAS_REG_PH_CALIB_STATUS_MID))
-		dev_warn(dev, "device missing mid point calibration\n");
-
-	if (!(val & ATLAS_REG_PH_CALIB_STATUS_HIGH))
-		dev_warn(dev, "device missing high point calibration\n");
-
-	return 0;
-}
-
-static int atlas_check_ec_calibration(struct atlas_data *data)
-{
-	struct device *dev = &data->client->dev;
-	int ret;
-	unsigned int val;
-	__be16	rval;
-
-	ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
-	if (ret)
-		return ret;
-
-	val = be16_to_cpu(rval);
-	dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
-
-	ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
-	if (ret)
-		return ret;
-
-	if (!(val & ATLAS_REG_EC_CALIB_STATUS_MASK)) {
-		dev_warn(dev, "device has not been calibrated\n");
-		return 0;
-	}
-
-	if (!(val & ATLAS_REG_EC_CALIB_STATUS_DRY))
-		dev_warn(dev, "device missing dry point calibration\n");
-
-	if (val & ATLAS_REG_EC_CALIB_STATUS_SINGLE) {
-		dev_warn(dev, "device using single point calibration\n");
-	} else {
-		if (!(val & ATLAS_REG_EC_CALIB_STATUS_LOW))
-			dev_warn(dev, "device missing low point calibration\n");
-
-		if (!(val & ATLAS_REG_EC_CALIB_STATUS_HIGH))
-			dev_warn(dev, "device missing high point calibration\n");
-	}
-
-	return 0;
-}
-
-static int atlas_check_orp_calibration(struct atlas_data *data)
-{
-	struct device *dev = &data->client->dev;
-	int ret;
-	unsigned int val;
-
-	ret = regmap_read(data->regmap, ATLAS_REG_ORP_CALIB_STATUS, &val);
-	if (ret)
-		return ret;
-
-	if (!val)
-		dev_warn(dev, "device has not been calibrated\n");
-
-	return 0;
-};
-
-struct atlas_device {
-	const struct iio_chan_spec *channels;
-	int num_channels;
-	int data_reg;
-
-	int (*calibration)(struct atlas_data *data);
-	int delay;
-};
-
-static struct atlas_device atlas_devices[] = {
-	[ATLAS_PH_SM] = {
-				.channels = atlas_ph_channels,
-				.num_channels = 3,
-				.data_reg = ATLAS_REG_PH_DATA,
-				.calibration = &atlas_check_ph_calibration,
-				.delay = ATLAS_PH_INT_TIME_IN_MS,
-	},
-	[ATLAS_EC_SM] = {
-				.channels = atlas_ec_channels,
-				.num_channels = 5,
-				.data_reg = ATLAS_REG_EC_DATA,
-				.calibration = &atlas_check_ec_calibration,
-				.delay = ATLAS_EC_INT_TIME_IN_MS,
-	},
-	[ATLAS_ORP_SM] = {
-				.channels = atlas_orp_channels,
-				.num_channels = 2,
-				.data_reg = ATLAS_REG_ORP_DATA,
-				.calibration = &atlas_check_orp_calibration,
-				.delay = ATLAS_ORP_INT_TIME_IN_MS,
-	},
-};
-
-static int atlas_set_powermode(struct atlas_data *data, int on)
-{
-	return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on);
-}
-
-static int atlas_set_interrupt(struct atlas_data *data, bool state)
-{
-	return regmap_update_bits(data->regmap, ATLAS_REG_INT_CONTROL,
-				  ATLAS_REG_INT_CONTROL_EN,
-				  state ? ATLAS_REG_INT_CONTROL_EN : 0);
-}
-
-static int atlas_buffer_postenable(struct iio_dev *indio_dev)
-{
-	struct atlas_data *data = iio_priv(indio_dev);
-	int ret;
-
-	ret = iio_triggered_buffer_postenable(indio_dev);
-	if (ret)
-		return ret;
-
-	ret = pm_runtime_get_sync(&data->client->dev);
-	if (ret < 0) {
-		pm_runtime_put_noidle(&data->client->dev);
-		return ret;
-	}
-
-	return atlas_set_interrupt(data, true);
-}
-
-static int atlas_buffer_predisable(struct iio_dev *indio_dev)
-{
-	struct atlas_data *data = iio_priv(indio_dev);
-	int ret;
-
-	ret = atlas_set_interrupt(data, false);
-	if (ret)
-		return ret;
-
-	pm_runtime_mark_last_busy(&data->client->dev);
-	ret = pm_runtime_put_autosuspend(&data->client->dev);
-	if (ret)
-		return ret;
-
-	return iio_triggered_buffer_predisable(indio_dev);
-}
-
-static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
-};
-
-static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
-	.postenable = atlas_buffer_postenable,
-	.predisable = atlas_buffer_predisable,
-};
-
-static void atlas_work_handler(struct irq_work *work)
-{
-	struct atlas_data *data = container_of(work, struct atlas_data, work);
-
-	iio_trigger_poll(data->trig);
-}
-
-static irqreturn_t atlas_trigger_handler(int irq, void *private)
-{
-	struct iio_poll_func *pf = private;
-	struct iio_dev *indio_dev = pf->indio_dev;
-	struct atlas_data *data = iio_priv(indio_dev);
-	int ret;
-
-	ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
-			      (u8 *) &data->buffer,
-			      sizeof(__be32) * (data->chip->num_channels - 2));
-
-	if (!ret)
-		iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
-				iio_get_time_ns(indio_dev));
-
-	iio_trigger_notify_done(indio_dev->trig);
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t atlas_interrupt_handler(int irq, void *private)
-{
-	struct iio_dev *indio_dev = private;
-	struct atlas_data *data = iio_priv(indio_dev);
-
-	irq_work_queue(&data->work);
-
-	return IRQ_HANDLED;
-}
-
-static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
-{
-	struct device *dev = &data->client->dev;
-	int suspended = pm_runtime_suspended(dev);
-	int ret;
-
-	ret = pm_runtime_get_sync(dev);
-	if (ret < 0) {
-		pm_runtime_put_noidle(dev);
-		return ret;
-	}
-
-	if (suspended)
-		msleep(data->chip->delay);
-
-	ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
-
-	pm_runtime_mark_last_busy(dev);
-	pm_runtime_put_autosuspend(dev);
-
-	return ret;
-}
-
-static int atlas_read_raw(struct iio_dev *indio_dev,
-			  struct iio_chan_spec const *chan,
-			  int *val, int *val2, long mask)
-{
-	struct atlas_data *data = iio_priv(indio_dev);
-
-	switch (mask) {
-	case IIO_CHAN_INFO_RAW: {
-		int ret;
-		__be32 reg;
-
-		switch (chan->type) {
-		case IIO_TEMP:
-			ret = regmap_bulk_read(data->regmap, chan->address,
-					      (u8 *) &reg, sizeof(reg));
-			break;
-		case IIO_PH:
-		case IIO_CONCENTRATION:
-		case IIO_ELECTRICALCONDUCTIVITY:
-		case IIO_VOLTAGE:
-			ret = iio_device_claim_direct_mode(indio_dev);
-			if (ret)
-				return ret;
-
-			ret = atlas_read_measurement(data, chan->address, &reg);
-
-			iio_device_release_direct_mode(indio_dev);
-			break;
-		default:
-			ret = -EINVAL;
-		}
-
-		if (!ret) {
-			*val = be32_to_cpu(reg);
-			ret = IIO_VAL_INT;
-		}
-		return ret;
-	}
-	case IIO_CHAN_INFO_SCALE:
-		switch (chan->type) {
-		case IIO_TEMP:
-			*val = 10;
-			return IIO_VAL_INT;
-		case IIO_PH:
-			*val = 1; /* 0.001 */
-			*val2 = 1000;
-			break;
-		case IIO_ELECTRICALCONDUCTIVITY:
-			*val = 1; /* 0.00001 */
-			*val2 = 100000;
-			break;
-		case IIO_CONCENTRATION:
-			*val = 0; /* 0.000000001 */
-			*val2 = 1000;
-			return IIO_VAL_INT_PLUS_NANO;
-		case IIO_VOLTAGE:
-			*val = 1; /* 0.1 */
-			*val2 = 10;
-			break;
-		default:
-			return -EINVAL;
-		}
-		return IIO_VAL_FRACTIONAL;
-	}
-
-	return -EINVAL;
-}
-
-static int atlas_write_raw(struct iio_dev *indio_dev,
-			   struct iio_chan_spec const *chan,
-			   int val, int val2, long mask)
-{
-	struct atlas_data *data = iio_priv(indio_dev);
-	__be32 reg = cpu_to_be32(val / 10);
-
-	if (val2 != 0 || val < 0 || val > 20000)
-		return -EINVAL;
-
-	if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_TEMP)
-		return -EINVAL;
-
-	return regmap_bulk_write(data->regmap, chan->address,
-				 &reg, sizeof(reg));
-}
-
-static const struct iio_info atlas_info = {
-	.read_raw = atlas_read_raw,
-	.write_raw = atlas_write_raw,
-};
-
-static const struct i2c_device_id atlas_id[] = {
-	{ "atlas-ph-sm", ATLAS_PH_SM},
-	{ "atlas-ec-sm", ATLAS_EC_SM},
-	{ "atlas-orp-sm", ATLAS_ORP_SM},
-	{}
-};
-MODULE_DEVICE_TABLE(i2c, atlas_id);
-
-static const struct of_device_id atlas_dt_ids[] = {
-	{ .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, },
-	{ .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
-	{ .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, atlas_dt_ids);
-
-static int atlas_probe(struct i2c_client *client,
-		       const struct i2c_device_id *id)
-{
-	struct atlas_data *data;
-	struct atlas_device *chip;
-	const struct of_device_id *of_id;
-	struct iio_trigger *trig;
-	struct iio_dev *indio_dev;
-	int ret;
-
-	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
-	if (!indio_dev)
-		return -ENOMEM;
-
-	of_id = of_match_device(atlas_dt_ids, &client->dev);
-	if (!of_id)
-		chip = &atlas_devices[id->driver_data];
-	else
-		chip = &atlas_devices[(unsigned long)of_id->data];
-
-	indio_dev->info = &atlas_info;
-	indio_dev->name = ATLAS_DRV_NAME;
-	indio_dev->channels = chip->channels;
-	indio_dev->num_channels = chip->num_channels;
-	indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
-	indio_dev->dev.parent = &client->dev;
-
-	trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
-				      indio_dev->name, indio_dev->id);
-
-	if (!trig)
-		return -ENOMEM;
-
-	data = iio_priv(indio_dev);
-	data->client = client;
-	data->trig = trig;
-	data->chip = chip;
-	trig->dev.parent = indio_dev->dev.parent;
-	trig->ops = &atlas_interrupt_trigger_ops;
-	iio_trigger_set_drvdata(trig, indio_dev);
-
-	i2c_set_clientdata(client, indio_dev);
-
-	data->regmap = devm_regmap_init_i2c(client, &atlas_regmap_config);
-	if (IS_ERR(data->regmap)) {
-		dev_err(&client->dev, "regmap initialization failed\n");
-		return PTR_ERR(data->regmap);
-	}
-
-	ret = pm_runtime_set_active(&client->dev);
-	if (ret)
-		return ret;
-
-	if (client->irq <= 0) {
-		dev_err(&client->dev, "no valid irq defined\n");
-		return -EINVAL;
-	}
-
-	ret = chip->calibration(data);
-	if (ret)
-		return ret;
-
-	ret = iio_trigger_register(trig);
-	if (ret) {
-		dev_err(&client->dev, "failed to register trigger\n");
-		return ret;
-	}
-
-	ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
-		&atlas_trigger_handler, &atlas_buffer_setup_ops);
-	if (ret) {
-		dev_err(&client->dev, "cannot setup iio trigger\n");
-		goto unregister_trigger;
-	}
-
-	init_irq_work(&data->work, atlas_work_handler);
-
-	/* interrupt pin toggles on new conversion */
-	ret = devm_request_threaded_irq(&client->dev, client->irq,
-					NULL, atlas_interrupt_handler,
-					IRQF_TRIGGER_RISING |
-					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-					"atlas_irq",
-					indio_dev);
-	if (ret) {
-		dev_err(&client->dev, "request irq (%d) failed\n", client->irq);
-		goto unregister_buffer;
-	}
-
-	ret = atlas_set_powermode(data, 1);
-	if (ret) {
-		dev_err(&client->dev, "cannot power device on");
-		goto unregister_buffer;
-	}
-
-	pm_runtime_enable(&client->dev);
-	pm_runtime_set_autosuspend_delay(&client->dev, 2500);
-	pm_runtime_use_autosuspend(&client->dev);
-
-	ret = iio_device_register(indio_dev);
-	if (ret) {
-		dev_err(&client->dev, "unable to register device\n");
-		goto unregister_pm;
-	}
-
-	return 0;
-
-unregister_pm:
-	pm_runtime_disable(&client->dev);
-	atlas_set_powermode(data, 0);
-
-unregister_buffer:
-	iio_triggered_buffer_cleanup(indio_dev);
-
-unregister_trigger:
-	iio_trigger_unregister(data->trig);
-
-	return ret;
-}
-
-static int atlas_remove(struct i2c_client *client)
-{
-	struct iio_dev *indio_dev = i2c_get_clientdata(client);
-	struct atlas_data *data = iio_priv(indio_dev);
-
-	iio_device_unregister(indio_dev);
-	iio_triggered_buffer_cleanup(indio_dev);
-	iio_trigger_unregister(data->trig);
-
-	pm_runtime_disable(&client->dev);
-	pm_runtime_set_suspended(&client->dev);
-	pm_runtime_put_noidle(&client->dev);
-
-	return atlas_set_powermode(data, 0);
-}
-
-#ifdef CONFIG_PM
-static int atlas_runtime_suspend(struct device *dev)
-{
-	struct atlas_data *data =
-		     iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
-
-	return atlas_set_powermode(data, 0);
-}
-
-static int atlas_runtime_resume(struct device *dev)
-{
-	struct atlas_data *data =
-		     iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
-
-	return atlas_set_powermode(data, 1);
-}
-#endif
-
-static const struct dev_pm_ops atlas_pm_ops = {
-	SET_RUNTIME_PM_OPS(atlas_runtime_suspend,
-			   atlas_runtime_resume, NULL)
-};
-
-static struct i2c_driver atlas_driver = {
-	.driver = {
-		.name	= ATLAS_DRV_NAME,
-		.of_match_table	= of_match_ptr(atlas_dt_ids),
-		.pm	= &atlas_pm_ops,
-	},
-	.probe		= atlas_probe,
-	.remove		= atlas_remove,
-	.id_table	= atlas_id,
-};
-module_i2c_driver(atlas_driver);
-
-MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
-MODULE_DESCRIPTION("Atlas Scientific pH-SM sensor");
-MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
new file mode 100644
index 0000000..2f0a6fe
--- /dev/null
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * atlas-sensor.c - Support for Atlas Scientific OEM SM sensors
+ *
+ * Copyright (C) 2015-2019 Konsulko Group
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/irq_work.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/pm_runtime.h>
+
+#define ATLAS_REGMAP_NAME	"atlas_regmap"
+#define ATLAS_DRV_NAME		"atlas"
+
+#define ATLAS_REG_DEV_TYPE		0x00
+#define ATLAS_REG_DEV_VERSION		0x01
+
+#define ATLAS_REG_INT_CONTROL		0x04
+#define ATLAS_REG_INT_CONTROL_EN	BIT(3)
+
+#define ATLAS_REG_PWR_CONTROL		0x06
+
+#define ATLAS_REG_PH_CALIB_STATUS	0x0d
+#define ATLAS_REG_PH_CALIB_STATUS_MASK	0x07
+#define ATLAS_REG_PH_CALIB_STATUS_LOW	BIT(0)
+#define ATLAS_REG_PH_CALIB_STATUS_MID	BIT(1)
+#define ATLAS_REG_PH_CALIB_STATUS_HIGH	BIT(2)
+
+#define ATLAS_REG_EC_CALIB_STATUS		0x0f
+#define ATLAS_REG_EC_CALIB_STATUS_MASK		0x0f
+#define ATLAS_REG_EC_CALIB_STATUS_DRY		BIT(0)
+#define ATLAS_REG_EC_CALIB_STATUS_SINGLE	BIT(1)
+#define ATLAS_REG_EC_CALIB_STATUS_LOW		BIT(2)
+#define ATLAS_REG_EC_CALIB_STATUS_HIGH		BIT(3)
+
+#define ATLAS_REG_PH_TEMP_DATA		0x0e
+#define ATLAS_REG_PH_DATA		0x16
+
+#define ATLAS_REG_EC_PROBE		0x08
+#define ATLAS_REG_EC_TEMP_DATA		0x10
+#define ATLAS_REG_EC_DATA		0x18
+#define ATLAS_REG_TDS_DATA		0x1c
+#define ATLAS_REG_PSS_DATA		0x20
+
+#define ATLAS_REG_ORP_CALIB_STATUS	0x0d
+#define ATLAS_REG_ORP_DATA		0x0e
+
+#define ATLAS_PH_INT_TIME_IN_MS		450
+#define ATLAS_EC_INT_TIME_IN_MS		650
+#define ATLAS_ORP_INT_TIME_IN_MS	450
+
+enum {
+	ATLAS_PH_SM,
+	ATLAS_EC_SM,
+	ATLAS_ORP_SM,
+};
+
+struct atlas_data {
+	struct i2c_client *client;
+	struct iio_trigger *trig;
+	struct atlas_device *chip;
+	struct regmap *regmap;
+	struct irq_work work;
+
+	__be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
+};
+
+static const struct regmap_config atlas_regmap_config = {
+	.name = ATLAS_REGMAP_NAME,
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+static int atlas_buffer_num_channels(const struct iio_chan_spec *spec)
+{
+	int idx = 0;
+
+	for (; spec->type != IIO_TIMESTAMP; spec++)
+		idx++;
+
+	return idx;
+};
+
+static const struct iio_chan_spec atlas_ph_channels[] = {
+	{
+		.type = IIO_PH,
+		.address = ATLAS_REG_PH_DATA,
+		.info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = 32,
+			.storagebits = 32,
+			.endianness = IIO_BE,
+		},
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(1),
+	{
+		.type = IIO_TEMP,
+		.address = ATLAS_REG_PH_TEMP_DATA,
+		.info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.output = 1,
+		.scan_index = -1
+	},
+};
+
+#define ATLAS_EC_CHANNEL(_idx, _addr) \
+	{\
+		.type = IIO_CONCENTRATION, \
+		.indexed = 1, \
+		.channel = _idx, \
+		.address = _addr, \
+		.info_mask_separate = \
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \
+		.scan_index = _idx + 1, \
+		.scan_type = { \
+			.sign = 'u', \
+			.realbits = 32, \
+			.storagebits = 32, \
+			.endianness = IIO_BE, \
+		}, \
+	}
+
+static const struct iio_chan_spec atlas_ec_channels[] = {
+	{
+		.type = IIO_ELECTRICALCONDUCTIVITY,
+		.address = ATLAS_REG_EC_DATA,
+		.info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = 32,
+			.storagebits = 32,
+			.endianness = IIO_BE,
+		},
+	},
+	ATLAS_EC_CHANNEL(0, ATLAS_REG_TDS_DATA),
+	ATLAS_EC_CHANNEL(1, ATLAS_REG_PSS_DATA),
+	IIO_CHAN_SOFT_TIMESTAMP(3),
+	{
+		.type = IIO_TEMP,
+		.address = ATLAS_REG_EC_TEMP_DATA,
+		.info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.output = 1,
+		.scan_index = -1
+	},
+};
+
+static const struct iio_chan_spec atlas_orp_channels[] = {
+	{
+		.type = IIO_VOLTAGE,
+		.address = ATLAS_REG_ORP_DATA,
+		.info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 's',
+			.realbits = 32,
+			.storagebits = 32,
+			.endianness = IIO_BE,
+		},
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static int atlas_check_ph_calibration(struct atlas_data *data)
+{
+	struct device *dev = &data->client->dev;
+	int ret;
+	unsigned int val;
+
+	ret = regmap_read(data->regmap, ATLAS_REG_PH_CALIB_STATUS, &val);
+	if (ret)
+		return ret;
+
+	if (!(val & ATLAS_REG_PH_CALIB_STATUS_MASK)) {
+		dev_warn(dev, "device has not been calibrated\n");
+		return 0;
+	}
+
+	if (!(val & ATLAS_REG_PH_CALIB_STATUS_LOW))
+		dev_warn(dev, "device missing low point calibration\n");
+
+	if (!(val & ATLAS_REG_PH_CALIB_STATUS_MID))
+		dev_warn(dev, "device missing mid point calibration\n");
+
+	if (!(val & ATLAS_REG_PH_CALIB_STATUS_HIGH))
+		dev_warn(dev, "device missing high point calibration\n");
+
+	return 0;
+}
+
+static int atlas_check_ec_calibration(struct atlas_data *data)
+{
+	struct device *dev = &data->client->dev;
+	int ret;
+	unsigned int val;
+	__be16	rval;
+
+	ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
+	if (ret)
+		return ret;
+
+	val = be16_to_cpu(rval);
+	dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
+
+	ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
+	if (ret)
+		return ret;
+
+	if (!(val & ATLAS_REG_EC_CALIB_STATUS_MASK)) {
+		dev_warn(dev, "device has not been calibrated\n");
+		return 0;
+	}
+
+	if (!(val & ATLAS_REG_EC_CALIB_STATUS_DRY))
+		dev_warn(dev, "device missing dry point calibration\n");
+
+	if (val & ATLAS_REG_EC_CALIB_STATUS_SINGLE) {
+		dev_warn(dev, "device using single point calibration\n");
+	} else {
+		if (!(val & ATLAS_REG_EC_CALIB_STATUS_LOW))
+			dev_warn(dev, "device missing low point calibration\n");
+
+		if (!(val & ATLAS_REG_EC_CALIB_STATUS_HIGH))
+			dev_warn(dev, "device missing high point calibration\n");
+	}
+
+	return 0;
+}
+
+static int atlas_check_orp_calibration(struct atlas_data *data)
+{
+	struct device *dev = &data->client->dev;
+	int ret;
+	unsigned int val;
+
+	ret = regmap_read(data->regmap, ATLAS_REG_ORP_CALIB_STATUS, &val);
+	if (ret)
+		return ret;
+
+	if (!val)
+		dev_warn(dev, "device has not been calibrated\n");
+
+	return 0;
+};
+
+struct atlas_device {
+	const struct iio_chan_spec *channels;
+	int num_channels;
+	int data_reg;
+
+	int (*calibration)(struct atlas_data *data);
+	int delay;
+};
+
+static struct atlas_device atlas_devices[] = {
+	[ATLAS_PH_SM] = {
+				.channels = atlas_ph_channels,
+				.num_channels = 3,
+				.data_reg = ATLAS_REG_PH_DATA,
+				.calibration = &atlas_check_ph_calibration,
+				.delay = ATLAS_PH_INT_TIME_IN_MS,
+	},
+	[ATLAS_EC_SM] = {
+				.channels = atlas_ec_channels,
+				.num_channels = 5,
+				.data_reg = ATLAS_REG_EC_DATA,
+				.calibration = &atlas_check_ec_calibration,
+				.delay = ATLAS_EC_INT_TIME_IN_MS,
+	},
+	[ATLAS_ORP_SM] = {
+				.channels = atlas_orp_channels,
+				.num_channels = 2,
+				.data_reg = ATLAS_REG_ORP_DATA,
+				.calibration = &atlas_check_orp_calibration,
+				.delay = ATLAS_ORP_INT_TIME_IN_MS,
+	},
+};
+
+static int atlas_set_powermode(struct atlas_data *data, int on)
+{
+	return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on);
+}
+
+static int atlas_set_interrupt(struct atlas_data *data, bool state)
+{
+	return regmap_update_bits(data->regmap, ATLAS_REG_INT_CONTROL,
+				  ATLAS_REG_INT_CONTROL_EN,
+				  state ? ATLAS_REG_INT_CONTROL_EN : 0);
+}
+
+static int atlas_buffer_postenable(struct iio_dev *indio_dev)
+{
+	struct atlas_data *data = iio_priv(indio_dev);
+	int ret;
+
+	ret = iio_triggered_buffer_postenable(indio_dev);
+	if (ret)
+		return ret;
+
+	ret = pm_runtime_get_sync(&data->client->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(&data->client->dev);
+		return ret;
+	}
+
+	return atlas_set_interrupt(data, true);
+}
+
+static int atlas_buffer_predisable(struct iio_dev *indio_dev)
+{
+	struct atlas_data *data = iio_priv(indio_dev);
+	int ret;
+
+	ret = atlas_set_interrupt(data, false);
+	if (ret)
+		return ret;
+
+	pm_runtime_mark_last_busy(&data->client->dev);
+	ret = pm_runtime_put_autosuspend(&data->client->dev);
+	if (ret)
+		return ret;
+
+	return iio_triggered_buffer_predisable(indio_dev);
+}
+
+static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
+};
+
+static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
+	.postenable = atlas_buffer_postenable,
+	.predisable = atlas_buffer_predisable,
+};
+
+static void atlas_work_handler(struct irq_work *work)
+{
+	struct atlas_data *data = container_of(work, struct atlas_data, work);
+
+	iio_trigger_poll(data->trig);
+}
+
+static irqreturn_t atlas_trigger_handler(int irq, void *private)
+{
+	struct iio_poll_func *pf = private;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct atlas_data *data = iio_priv(indio_dev);
+	int channels = atlas_buffer_num_channels(data->chip->channels);
+	int ret;
+
+	ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
+			      (u8 *) &data->buffer,
+			      sizeof(__be32) * channels);
+
+	if (!ret)
+		iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+				iio_get_time_ns(indio_dev));
+
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t atlas_interrupt_handler(int irq, void *private)
+{
+	struct iio_dev *indio_dev = private;
+	struct atlas_data *data = iio_priv(indio_dev);
+
+	irq_work_queue(&data->work);
+
+	return IRQ_HANDLED;
+}
+
+static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
+{
+	struct device *dev = &data->client->dev;
+	int suspended = pm_runtime_suspended(dev);
+	int ret;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(dev);
+		return ret;
+	}
+
+	if (suspended)
+		msleep(data->chip->delay);
+
+	ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+
+	return ret;
+}
+
+static int atlas_read_raw(struct iio_dev *indio_dev,
+			  struct iio_chan_spec const *chan,
+			  int *val, int *val2, long mask)
+{
+	struct atlas_data *data = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW: {
+		int ret;
+		__be32 reg;
+
+		switch (chan->type) {
+		case IIO_TEMP:
+			ret = regmap_bulk_read(data->regmap, chan->address,
+					      (u8 *) &reg, sizeof(reg));
+			break;
+		case IIO_PH:
+		case IIO_CONCENTRATION:
+		case IIO_ELECTRICALCONDUCTIVITY:
+		case IIO_VOLTAGE:
+			ret = iio_device_claim_direct_mode(indio_dev);
+			if (ret)
+				return ret;
+
+			ret = atlas_read_measurement(data, chan->address, &reg);
+
+			iio_device_release_direct_mode(indio_dev);
+			break;
+		default:
+			ret = -EINVAL;
+		}
+
+		if (!ret) {
+			*val = be32_to_cpu(reg);
+			ret = IIO_VAL_INT;
+		}
+		return ret;
+	}
+	case IIO_CHAN_INFO_SCALE:
+		switch (chan->type) {
+		case IIO_TEMP:
+			*val = 10;
+			return IIO_VAL_INT;
+		case IIO_PH:
+			*val = 1; /* 0.001 */
+			*val2 = 1000;
+			break;
+		case IIO_ELECTRICALCONDUCTIVITY:
+			*val = 1; /* 0.00001 */
+			*val2 = 100000;
+			break;
+		case IIO_CONCENTRATION:
+			*val = 0; /* 0.000000001 */
+			*val2 = 1000;
+			return IIO_VAL_INT_PLUS_NANO;
+		case IIO_VOLTAGE:
+			*val = 1; /* 0.1 */
+			*val2 = 10;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return IIO_VAL_FRACTIONAL;
+	}
+
+	return -EINVAL;
+}
+
+static int atlas_write_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan,
+			   int val, int val2, long mask)
+{
+	struct atlas_data *data = iio_priv(indio_dev);
+	__be32 reg = cpu_to_be32(val / 10);
+
+	if (val2 != 0 || val < 0 || val > 20000)
+		return -EINVAL;
+
+	if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_TEMP)
+		return -EINVAL;
+
+	return regmap_bulk_write(data->regmap, chan->address,
+				 &reg, sizeof(reg));
+}
+
+static const struct iio_info atlas_info = {
+	.read_raw = atlas_read_raw,
+	.write_raw = atlas_write_raw,
+};
+
+static const struct i2c_device_id atlas_id[] = {
+	{ "atlas-ph-sm", ATLAS_PH_SM},
+	{ "atlas-ec-sm", ATLAS_EC_SM},
+	{ "atlas-orp-sm", ATLAS_ORP_SM},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_id);
+
+static const struct of_device_id atlas_dt_ids[] = {
+	{ .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, },
+	{ .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
+	{ .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, atlas_dt_ids);
+
+static int atlas_probe(struct i2c_client *client,
+		       const struct i2c_device_id *id)
+{
+	struct atlas_data *data;
+	struct atlas_device *chip;
+	const struct of_device_id *of_id;
+	struct iio_trigger *trig;
+	struct iio_dev *indio_dev;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	of_id = of_match_device(atlas_dt_ids, &client->dev);
+	if (!of_id)
+		chip = &atlas_devices[id->driver_data];
+	else
+		chip = &atlas_devices[(unsigned long)of_id->data];
+
+	indio_dev->info = &atlas_info;
+	indio_dev->name = ATLAS_DRV_NAME;
+	indio_dev->channels = chip->channels;
+	indio_dev->num_channels = chip->num_channels;
+	indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
+	indio_dev->dev.parent = &client->dev;
+
+	trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+				      indio_dev->name, indio_dev->id);
+
+	if (!trig)
+		return -ENOMEM;
+
+	data = iio_priv(indio_dev);
+	data->client = client;
+	data->trig = trig;
+	data->chip = chip;
+	trig->dev.parent = indio_dev->dev.parent;
+	trig->ops = &atlas_interrupt_trigger_ops;
+	iio_trigger_set_drvdata(trig, indio_dev);
+
+	i2c_set_clientdata(client, indio_dev);
+
+	data->regmap = devm_regmap_init_i2c(client, &atlas_regmap_config);
+	if (IS_ERR(data->regmap)) {
+		dev_err(&client->dev, "regmap initialization failed\n");
+		return PTR_ERR(data->regmap);
+	}
+
+	ret = pm_runtime_set_active(&client->dev);
+	if (ret)
+		return ret;
+
+	if (client->irq <= 0) {
+		dev_err(&client->dev, "no valid irq defined\n");
+		return -EINVAL;
+	}
+
+	ret = chip->calibration(data);
+	if (ret)
+		return ret;
+
+	ret = iio_trigger_register(trig);
+	if (ret) {
+		dev_err(&client->dev, "failed to register trigger\n");
+		return ret;
+	}
+
+	ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+		&atlas_trigger_handler, &atlas_buffer_setup_ops);
+	if (ret) {
+		dev_err(&client->dev, "cannot setup iio trigger\n");
+		goto unregister_trigger;
+	}
+
+	init_irq_work(&data->work, atlas_work_handler);
+
+	/* interrupt pin toggles on new conversion */
+	ret = devm_request_threaded_irq(&client->dev, client->irq,
+					NULL, atlas_interrupt_handler,
+					IRQF_TRIGGER_RISING |
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					"atlas_irq",
+					indio_dev);
+	if (ret) {
+		dev_err(&client->dev, "request irq (%d) failed\n", client->irq);
+		goto unregister_buffer;
+	}
+
+	ret = atlas_set_powermode(data, 1);
+	if (ret) {
+		dev_err(&client->dev, "cannot power device on");
+		goto unregister_buffer;
+	}
+
+	pm_runtime_enable(&client->dev);
+	pm_runtime_set_autosuspend_delay(&client->dev, 2500);
+	pm_runtime_use_autosuspend(&client->dev);
+
+	ret = iio_device_register(indio_dev);
+	if (ret) {
+		dev_err(&client->dev, "unable to register device\n");
+		goto unregister_pm;
+	}
+
+	return 0;
+
+unregister_pm:
+	pm_runtime_disable(&client->dev);
+	atlas_set_powermode(data, 0);
+
+unregister_buffer:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+unregister_trigger:
+	iio_trigger_unregister(data->trig);
+
+	return ret;
+}
+
+static int atlas_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct atlas_data *data = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+	iio_trigger_unregister(data->trig);
+
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+
+	return atlas_set_powermode(data, 0);
+}
+
+#ifdef CONFIG_PM
+static int atlas_runtime_suspend(struct device *dev)
+{
+	struct atlas_data *data =
+		     iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+	return atlas_set_powermode(data, 0);
+}
+
+static int atlas_runtime_resume(struct device *dev)
+{
+	struct atlas_data *data =
+		     iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+	return atlas_set_powermode(data, 1);
+}
+#endif
+
+static const struct dev_pm_ops atlas_pm_ops = {
+	SET_RUNTIME_PM_OPS(atlas_runtime_suspend,
+			   atlas_runtime_resume, NULL)
+};
+
+static struct i2c_driver atlas_driver = {
+	.driver = {
+		.name	= ATLAS_DRV_NAME,
+		.of_match_table	= of_match_ptr(atlas_dt_ids),
+		.pm	= &atlas_pm_ops,
+	},
+	.probe		= atlas_probe,
+	.remove		= atlas_remove,
+	.id_table	= atlas_id,
+};
+module_i2c_driver(atlas_driver);
+
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
+MODULE_DESCRIPTION("Atlas Scientific SM sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 7dce044..576e45f 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -16,7 +16,6 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 81a7f69..d3a3626 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -13,7 +13,6 @@
 #include <linux/iio/kfifo_buf.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/iio/common/ssp_sensors/ssp.h b/drivers/iio/common/ssp_sensors/ssp.h
index 0a381bb..abb8327 100644
--- a/drivers/iio/common/ssp_sensors/ssp.h
+++ b/drivers/iio/common/ssp_sensors/ssp.h
@@ -7,7 +7,7 @@
 #define __SSP_SENSORHUB_H__
 
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/iio/common/ssp_sensors.h>
 #include <linux/iio/iio.h>
 #include <linux/spi/spi.h>
@@ -168,9 +168,9 @@ struct ssp_sensorhub_info {
  * @fw_dl_state:	firmware download state
  * @comm_lock:		lock protecting the handshake
  * @pending_lock:	lock protecting pending list and completion
- * @mcu_reset_gpio:	mcu reset line
- * @ap_mcu_gpio:	ap to mcu gpio line
- * @mcu_ap_gpio:	mcu to ap gpio line
+ * @mcu_reset_gpiod:	mcu reset line
+ * @ap_mcu_gpiod:	ap to mcu gpio line
+ * @mcu_ap_gpiod:	mcu to ap gpio line
  * @pending_list:	pending list for messages queued to be sent/read
  * @sensor_devs:	registered IIO devices table
  * @enable_refcount:	enable reference count for wdt (watchdog timer)
@@ -212,9 +212,9 @@ struct ssp_data {
 	struct mutex comm_lock;
 	struct mutex pending_lock;
 
-	int mcu_reset_gpio;
-	int ap_mcu_gpio;
-	int mcu_ap_gpio;
+	struct gpio_desc *mcu_reset_gpiod;
+	struct gpio_desc *ap_mcu_gpiod;
+	struct gpio_desc *mcu_ap_gpiod;
 
 	struct list_head pending_list;
 
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 9c70553..a94dbcf 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -9,7 +9,6 @@
 #include <linux/mfd/core.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/of_platform.h>
 #include "ssp.h"
 
@@ -61,9 +60,9 @@ static const struct mfd_cell sensorhub_sensor_devs[] = {
 
 static void ssp_toggle_mcu_reset_gpio(struct ssp_data *data)
 {
-	gpio_set_value(data->mcu_reset_gpio, 0);
+	gpiod_set_value(data->mcu_reset_gpiod, 0);
 	usleep_range(1000, 1200);
-	gpio_set_value(data->mcu_reset_gpio, 1);
+	gpiod_set_value(data->mcu_reset_gpiod, 1);
 	msleep(50);
 }
 
@@ -441,7 +440,6 @@ MODULE_DEVICE_TABLE(of, ssp_of_match);
 
 static struct ssp_data *ssp_parse_dt(struct device *dev)
 {
-	int ret;
 	struct ssp_data *data;
 	struct device_node *node = dev->of_node;
 	const struct of_device_id *match;
@@ -450,26 +448,17 @@ static struct ssp_data *ssp_parse_dt(struct device *dev)
 	if (!data)
 		return NULL;
 
-	data->mcu_ap_gpio = of_get_named_gpio(node, "mcu-ap-gpios", 0);
-	if (data->mcu_ap_gpio < 0)
+	data->mcu_ap_gpiod = devm_gpiod_get(dev, "mcu-ap", GPIOD_IN);
+	if (IS_ERR(data->mcu_ap_gpiod))
 		return NULL;
 
-	data->ap_mcu_gpio = of_get_named_gpio(node, "ap-mcu-gpios", 0);
-	if (data->ap_mcu_gpio < 0)
+	data->ap_mcu_gpiod = devm_gpiod_get(dev, "ap-mcu", GPIOD_OUT_HIGH);
+	if (IS_ERR(data->ap_mcu_gpiod))
 		return NULL;
 
-	data->mcu_reset_gpio = of_get_named_gpio(node, "mcu-reset-gpios", 0);
-	if (data->mcu_reset_gpio < 0)
-		return NULL;
-
-	ret = devm_gpio_request_one(dev, data->ap_mcu_gpio, GPIOF_OUT_INIT_HIGH,
-				    "ap-mcu-gpios");
-	if (ret)
-		return NULL;
-
-	ret = devm_gpio_request_one(dev, data->mcu_reset_gpio,
-				    GPIOF_OUT_INIT_HIGH, "mcu-reset-gpios");
-	if (ret)
+	data->mcu_reset_gpiod = devm_gpiod_get(dev, "mcu-reset",
+					       GPIOD_OUT_HIGH);
+	if (IS_ERR(data->mcu_reset_gpiod))
 		return NULL;
 
 	match = of_match_node(ssp_of_match, node);
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
index 7db3d58..4864c38 100644
--- a/drivers/iio/common/ssp_sensors/ssp_spi.c
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -155,9 +155,9 @@ static int ssp_check_lines(struct ssp_data *data, bool state)
 {
 	int delay_cnt = 0;
 
-	gpio_set_value_cansleep(data->ap_mcu_gpio, state);
+	gpiod_set_value_cansleep(data->ap_mcu_gpiod, state);
 
-	while (gpio_get_value_cansleep(data->mcu_ap_gpio) != state) {
+	while (gpiod_get_value_cansleep(data->mcu_ap_gpiod) != state) {
 		usleep_range(3000, 3500);
 
 		if (data->shut_down || delay_cnt++ > 500) {
@@ -165,7 +165,7 @@ static int ssp_check_lines(struct ssp_data *data, bool state)
 				__func__, state);
 
 			if (!state)
-				gpio_set_value_cansleep(data->ap_mcu_gpio, 1);
+				gpiod_set_value_cansleep(data->ap_mcu_gpiod, 1);
 
 			return -ETIMEDOUT;
 		}
@@ -197,7 +197,7 @@ static int ssp_do_transfer(struct ssp_data *data, struct ssp_msg *msg,
 
 	status = spi_write(data->spi, msg->buffer, SSP_HEADER_SIZE);
 	if (status < 0) {
-		gpio_set_value_cansleep(data->ap_mcu_gpio, 1);
+		gpiod_set_value_cansleep(data->ap_mcu_gpiod, 1);
 		dev_err(SSP_DEV, "%s spi_write fail\n", __func__);
 		goto _error_locked;
 	}
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 4a3064f..e051edb 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -12,9 +12,8 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/iio/iio.h>
+#include <linux/property.h>
 #include <linux/regulator/consumer.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regmap.h>
 #include <asm/unaligned.h>
 #include <linux/iio/common/st_sensors.h>
@@ -319,63 +318,49 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
 	return 0;
 }
 
-#ifdef CONFIG_OF
-static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
+static struct st_sensors_platform_data *st_sensors_dev_probe(struct device *dev,
 		struct st_sensors_platform_data *defdata)
 {
 	struct st_sensors_platform_data *pdata;
-	struct device_node *np = dev->of_node;
 	u32 val;
 
-	if (!np)
+	if (!dev_fwnode(dev))
 		return NULL;
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-	if (!of_property_read_u32(np, "st,drdy-int-pin", &val) && (val <= 2))
+	if (!device_property_read_u32(dev, "st,drdy-int-pin", &val) && (val <= 2))
 		pdata->drdy_int_pin = (u8) val;
 	else
 		pdata->drdy_int_pin = defdata ? defdata->drdy_int_pin : 0;
 
-	pdata->open_drain = of_property_read_bool(np, "drive-open-drain");
+	pdata->open_drain = device_property_read_bool(dev, "drive-open-drain");
 
 	return pdata;
 }
 
 /**
- * st_sensors_of_name_probe() - device tree probe for ST sensor name
+ * st_sensors_dev_name_probe() - device probe for ST sensor name
  * @dev: driver model representation of the device.
- * @match: the OF match table for the device, containing compatible strings
- *	but also a .data field with the corresponding internal kernel name
- *	used by this sensor.
  * @name: device name buffer reference.
  * @len: device name buffer length.
  *
- * In effect this function matches a compatible string to an internal kernel
+ * In effect this function matches an ID to an internal kernel
  * name for a certain sensor device, so that the rest of the autodetection can
  * rely on that name from this point on. I2C/SPI devices will be renamed
  * to match the internal kernel convention.
  */
-void st_sensors_of_name_probe(struct device *dev,
-			      const struct of_device_id *match,
-			      char *name, int len)
+void st_sensors_dev_name_probe(struct device *dev, char *name, int len)
 {
-	const struct of_device_id *of_id;
+	const void *match;
 
-	of_id = of_match_device(match, dev);
-	if (!of_id || !of_id->data)
+	match = device_get_match_data(dev);
+	if (!match)
 		return;
 
-	/* The name from the OF match takes precedence if present */
-	strlcpy(name, of_id->data, len);
+	/* The name from the match takes precedence if present */
+	strlcpy(name, match, len);
 }
-EXPORT_SYMBOL(st_sensors_of_name_probe);
-#else
-static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
-		struct st_sensors_platform_data *defdata)
-{
-	return NULL;
-}
-#endif
+EXPORT_SYMBOL(st_sensors_dev_name_probe);
 
 int st_sensors_init_sensor(struct iio_dev *indio_dev,
 					struct st_sensors_platform_data *pdata)
@@ -385,7 +370,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
 	int err = 0;
 
 	/* If OF/DT pdata exists, it will take precedence of anything else */
-	of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
+	of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
 	if (of_pdata)
 		pdata = of_pdata;
 
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index aa89d54..286830f 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -11,8 +11,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/iio/iio.h>
-#include <linux/of_device.h>
-#include <linux/acpi.h>
 #include <linux/regmap.h>
 
 #include <linux/iio/common/st_sensors_i2c.h>
@@ -68,25 +66,6 @@ int st_sensors_i2c_configure(struct iio_dev *indio_dev,
 }
 EXPORT_SYMBOL(st_sensors_i2c_configure);
 
-#ifdef CONFIG_ACPI
-int st_sensors_match_acpi_device(struct device *dev)
-{
-	const struct acpi_device_id *acpi_id;
-	kernel_ulong_t driver_data = 0;
-
-	if (ACPI_HANDLE(dev)) {
-		acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
-		if (!acpi_id) {
-			dev_err(dev, "No driver data\n");
-			return -EINVAL;
-		}
-		driver_data = acpi_id->driver_data;
-	}
-	return driver_data;
-}
-EXPORT_SYMBOL(st_sensors_match_acpi_device);
-#endif
-
 MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
 MODULE_DESCRIPTION("STMicroelectronics ST-sensors i2c driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 2262f81..1275fb0 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/iio/iio.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 
 #include <linux/iio/common/st_sensors_spi.h>
@@ -37,14 +38,15 @@ static const struct regmap_config st_sensors_spi_regmap_multiread_bit_config = {
  */
 static bool st_sensors_is_spi_3_wire(struct spi_device *spi)
 {
-	struct device_node *np = spi->dev.of_node;
 	struct st_sensors_platform_data *pdata;
+	struct device *dev = &spi->dev;
 
-	pdata = (struct st_sensors_platform_data *)spi->dev.platform_data;
-	if ((np && of_property_read_bool(np, "spi-3wire")) ||
-	    (pdata && pdata->spi_3wire)) {
+	if (device_property_read_bool(dev, "spi-3wire"))
 		return true;
-	}
+
+	pdata = (struct st_sensors_platform_data *)dev->platform_data;
+	if (pdata && pdata->spi_3wire)
+		return true;
 
 	return false;
 }
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 4a2efa0..e817537 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -19,6 +19,9 @@
 
 /**
  * st_sensors_new_samples_available() - check if more samples came in
+ * @indio_dev: IIO device reference.
+ * @sdata: Sensor data.
+ *
  * returns:
  * 0 - no new samples available
  * 1 - new samples available
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 2d897e6..e211011 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -15,7 +15,6 @@
 #include <linux/regulator/consumer.h>
 #include <linux/gpio/consumer.h>
 #include <linux/gpio/driver.h>
-#include <linux/gpio.h>
 #include <linux/property.h>
 
 #include <dt-bindings/iio/adi,ad5592r.h>
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 14bbac6..15af8a1 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -12,7 +12,6 @@
 #include <linux/slab.h>
 #include <linux/sysfs.h>
 #include <linux/regulator/consumer.h>
-#include <linux/of.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -202,7 +201,6 @@ static int ad7303_probe(struct spi_device *spi)
 	const struct spi_device_id *id = spi_get_device_id(spi);
 	struct iio_dev *indio_dev;
 	struct ad7303_state *st;
-	bool ext_ref;
 	int ret;
 
 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -224,24 +222,15 @@ static int ad7303_probe(struct spi_device *spi)
 	if (ret)
 		return ret;
 
-	if (spi->dev.of_node) {
-		ext_ref = of_property_read_bool(spi->dev.of_node,
-				"REF-supply");
-	} else {
-		struct ad7303_platform_data *pdata = spi->dev.platform_data;
-		if (pdata && pdata->use_external_ref)
-			ext_ref = true;
-		else
-		    ext_ref = false;
+	st->vref_reg = devm_regulator_get_optional(&spi->dev, "REF");
+	if (IS_ERR(st->vref_reg)) {
+		ret = PTR_ERR(st->vref_reg);
+		if (ret != -ENODEV)
+			goto err_disable_vdd_reg;
+		st->vref_reg = NULL;
 	}
 
-	if (ext_ref) {
-		st->vref_reg = devm_regulator_get(&spi->dev, "REF");
-		if (IS_ERR(st->vref_reg)) {
-			ret = PTR_ERR(st->vref_reg);
-			goto err_disable_vdd_reg;
-		}
-
+	if (st->vref_reg) {
 		ret = regulator_enable(st->vref_reg);
 		if (ret)
 			goto err_disable_vdd_reg;
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 9e6b4cd..7e5809b 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -20,13 +20,11 @@
 /**
  * struct stm32_dac_priv - stm32 DAC core private data
  * @pclk:		peripheral clock common for all DACs
- * @rst:		peripheral reset control
  * @vref:		regulator reference
  * @common:		Common data for all DAC instances
  */
 struct stm32_dac_priv {
 	struct clk *pclk;
-	struct reset_control *rst;
 	struct regulator *vref;
 	struct stm32_dac_common common;
 };
@@ -94,6 +92,7 @@ static int stm32_dac_probe(struct platform_device *pdev)
 	struct regmap *regmap;
 	struct resource *res;
 	void __iomem *mmio;
+	struct reset_control *rst;
 	int ret;
 
 	if (!dev->of_node)
@@ -148,11 +147,19 @@ static int stm32_dac_probe(struct platform_device *pdev)
 	priv->common.vref_mv = ret / 1000;
 	dev_dbg(dev, "vref+=%dmV\n", priv->common.vref_mv);
 
-	priv->rst = devm_reset_control_get_exclusive(dev, NULL);
-	if (!IS_ERR(priv->rst)) {
-		reset_control_assert(priv->rst);
+	rst = devm_reset_control_get_optional_exclusive(dev, NULL);
+	if (rst) {
+		if (IS_ERR(rst)) {
+			ret = PTR_ERR(rst);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "reset get failed, %d\n", ret);
+
+			goto err_hw_stop;
+		}
+
+		reset_control_assert(rst);
 		udelay(2);
-		reset_control_deassert(priv->rst);
+		reset_control_deassert(rst);
 	}
 
 	if (cfg && cfg->has_hfsel) {
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index ae0ca09..1c2dc9b 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -14,11 +14,10 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/gcd.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <asm/div64.h>
 #include <linux/clk.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -34,6 +33,7 @@ enum {
 struct adf4350_state {
 	struct spi_device		*spi;
 	struct regulator		*reg;
+	struct gpio_desc		*lock_detect_gpiod;
 	struct adf4350_platform_data	*pdata;
 	struct clk			*clk;
 	unsigned long			clkin;
@@ -61,7 +61,6 @@ static struct adf4350_platform_data default_pdata = {
 	.r3_user_settings = ADF4350_REG3_12BIT_CLKDIV_MODE(0),
 	.r4_user_settings = ADF4350_REG4_OUTPUT_PWR(3) |
 			    ADF4350_REG4_MUTE_TILL_LOCK_EN,
-	.gpio_lock_detect = -1,
 };
 
 static int adf4350_sync_config(struct adf4350_state *st)
@@ -317,8 +316,8 @@ static ssize_t adf4350_read(struct iio_dev *indio_dev,
 			(u64)st->fpfd;
 		do_div(val, st->r1_mod * (1 << st->r4_rf_div_sel));
 		/* PLL unlocked? return error */
-		if (gpio_is_valid(st->pdata->gpio_lock_detect))
-			if (!gpio_get_value(st->pdata->gpio_lock_detect)) {
+		if (st->lock_detect_gpiod)
+			if (!gpiod_get_value(st->lock_detect_gpiod)) {
 				dev_dbg(&st->spi->dev, "PLL un-locked\n");
 				ret = -EBUSY;
 			}
@@ -381,7 +380,6 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
 	struct device_node *np = dev->of_node;
 	struct adf4350_platform_data *pdata;
 	unsigned int tmp;
-	int ret;
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
@@ -401,12 +399,6 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
 	of_property_read_u32(np, "adi,reference-div-factor", &tmp);
 	pdata->ref_div_factor = tmp;
 
-	ret = of_get_gpio(np, 0);
-	if (ret < 0)
-		pdata->gpio_lock_detect = -1;
-	else
-		pdata->gpio_lock_detect = ret;
-
 	pdata->ref_doubler_en = of_property_read_bool(np,
 			"adi,reference-doubler-enable");
 	pdata->ref_div2_en = of_property_read_bool(np,
@@ -561,16 +553,10 @@ static int adf4350_probe(struct spi_device *spi)
 
 	memset(st->regs_hw, 0xFF, sizeof(st->regs_hw));
 
-	if (gpio_is_valid(pdata->gpio_lock_detect)) {
-		ret = devm_gpio_request(&spi->dev, pdata->gpio_lock_detect,
-					indio_dev->name);
-		if (ret) {
-			dev_err(&spi->dev, "fail to request lock detect GPIO-%d",
-				pdata->gpio_lock_detect);
-			goto error_disable_reg;
-		}
-		gpio_direction_input(pdata->gpio_lock_detect);
-	}
+	st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+							GPIOD_IN);
+	if (IS_ERR(st->lock_detect_gpiod))
+		return PTR_ERR(st->lock_detect_gpiod);
 
 	if (pdata->power_up_frequency) {
 		ret = adf4350_set_freq(st, pdata->power_up_frequency);
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 95e6f96d..7eaf777 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -75,26 +75,26 @@
 	select REGMAP_SPI
 
 config FXAS21002C
-       tristate "NXP FXAS21002C Gyro Sensor"
-       select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
-       select FXAS21002C_I2C if (I2C)
-       select FXAS21002C_SPI if (SPI)
-       depends on (I2C || SPI_MASTER)
-       help
-         Say yes here to build support for NXP FXAS21002C Tri-axis Gyro
-         Sensor driver connected via I2C or SPI.
+	tristate "NXP FXAS21002C Gyro Sensor"
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	select FXAS21002C_I2C if (I2C)
+	select FXAS21002C_SPI if (SPI)
+	depends on (I2C || SPI_MASTER)
+	help
+	  Say yes here to build support for NXP FXAS21002C Tri-axis Gyro
+	  Sensor driver connected via I2C or SPI.
 
-         This driver can also be built as a module.  If so, the module
-         will be called fxas21002c_i2c or fxas21002c_spi.
+	  This driver can also be built as a module.  If so, the module
+	  will be called fxas21002c_i2c or fxas21002c_spi.
 
 config FXAS21002C_I2C
-       tristate
-       select REGMAP_I2C
+	tristate
+	select REGMAP_I2C
 
 config FXAS21002C_SPI
-       tristate
-       select REGMAP_SPI
+	tristate
+	select REGMAP_SPI
 
 config HID_SENSOR_GYRO_3D
 	depends on HID_SENSOR_HUB
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index d637d52..d5e03a40 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -59,6 +59,7 @@
 struct adis16136_chip_info {
 	unsigned int precision;
 	unsigned int fullscale;
+	const struct adis_timeout *timeouts;
 };
 
 struct adis16136 {
@@ -185,12 +186,12 @@ static int adis16136_set_freq(struct adis16136 *adis16136, unsigned int freq)
 	return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, t);
 }
 
-static int adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
+static int __adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
 {
 	uint16_t t;
 	int ret;
 
-	ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
+	ret = __adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
 	if (ret)
 		return ret;
 
@@ -224,10 +225,13 @@ static ssize_t adis16136_read_frequency(struct device *dev,
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct adis16136 *adis16136 = iio_priv(indio_dev);
+	struct mutex *slock = &adis16136->adis.state_lock;
 	unsigned int freq;
 	int ret;
 
-	ret = adis16136_get_freq(adis16136, &freq);
+	mutex_lock(slock);
+	ret = __adis16136_get_freq(adis16136, &freq);
+	mutex_unlock(slock);
 	if (ret)
 		return ret;
 
@@ -252,42 +256,50 @@ static const unsigned adis16136_3db_divisors[] = {
 static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
 {
 	struct adis16136 *adis16136 = iio_priv(indio_dev);
+	struct mutex *slock = &adis16136->adis.state_lock;
 	unsigned int freq;
 	int i, ret;
 
-	ret = adis16136_get_freq(adis16136, &freq);
+	mutex_lock(slock);
+	ret = __adis16136_get_freq(adis16136, &freq);
 	if (ret)
-		return ret;
+		goto out_unlock;
 
 	for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
 		if (freq / adis16136_3db_divisors[i] >= val)
 			break;
 	}
 
-	return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
+	ret = __adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
+out_unlock:
+	mutex_unlock(slock);
+
+	return ret;
 }
 
 static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
 {
 	struct adis16136 *adis16136 = iio_priv(indio_dev);
+	struct mutex *slock = &adis16136->adis.state_lock;
 	unsigned int freq;
 	uint16_t val16;
 	int ret;
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(slock);
 
-	ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, &val16);
+	ret = __adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT,
+				 &val16);
 	if (ret)
 		goto err_unlock;
 
-	ret = adis16136_get_freq(adis16136, &freq);
+	ret = __adis16136_get_freq(adis16136, &freq);
 	if (ret)
 		goto err_unlock;
 
 	*val = freq / adis16136_3db_divisors[val16 & 0x07];
 
 err_unlock:
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(slock);
 
 	return ret ? ret : IIO_VAL_INT;
 }
@@ -460,7 +472,6 @@ static const struct adis_data adis16136_data = {
 	.msc_ctrl_reg = ADIS16136_REG_MSC_CTRL,
 
 	.self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST,
-	.startup_delay = 80,
 
 	.read_delay = 10,
 	.write_delay = 10,
@@ -479,30 +490,63 @@ enum adis16136_id {
 	ID_ADIS16137,
 };
 
+static const struct adis_timeout adis16133_timeouts = {
+	.reset_ms = 75,
+	.sw_reset_ms = 75,
+	.self_test_ms = 50,
+};
+
+static const struct adis_timeout adis16136_timeouts = {
+	.reset_ms = 128,
+	.sw_reset_ms = 75,
+	.self_test_ms = 245,
+};
+
 static const struct adis16136_chip_info adis16136_chip_info[] = {
 	[ID_ADIS16133] = {
 		.precision = IIO_DEGREE_TO_RAD(1200),
 		.fullscale = 24000,
+		.timeouts = &adis16133_timeouts,
 	},
 	[ID_ADIS16135] = {
 		.precision = IIO_DEGREE_TO_RAD(300),
 		.fullscale = 24000,
+		.timeouts = &adis16133_timeouts,
 	},
 	[ID_ADIS16136] = {
 		.precision = IIO_DEGREE_TO_RAD(450),
 		.fullscale = 24623,
+		.timeouts = &adis16136_timeouts,
 	},
 	[ID_ADIS16137] = {
 		.precision = IIO_DEGREE_TO_RAD(1000),
 		.fullscale = 24609,
+		.timeouts = &adis16136_timeouts,
 	},
 };
 
+static struct adis_data *adis16136_adis_data_alloc(struct adis16136 *st,
+						   struct device *dev)
+{
+	struct adis_data *data;
+
+	data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(data, &adis16136_data, sizeof(*data));
+
+	data->timeouts = st->chip_info->timeouts;
+
+	return data;
+}
+
 static int adis16136_probe(struct spi_device *spi)
 {
 	const struct spi_device_id *id = spi_get_device_id(spi);
 	struct adis16136 *adis16136;
 	struct iio_dev *indio_dev;
+	const struct adis_data *adis16136_data;
 	int ret;
 
 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis16136));
@@ -521,7 +565,11 @@ static int adis16136_probe(struct spi_device *spi)
 	indio_dev->info = &adis16136_info;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
-	ret = adis_init(&adis16136->adis, indio_dev, spi, &adis16136_data);
+	adis16136_data = adis16136_adis_data_alloc(adis16136, &spi->dev);
+	if (IS_ERR(adis16136_data))
+		return PTR_ERR(adis16136_data);
+
+	ret = adis_init(&adis16136->adis, indio_dev, spi, adis16136_data);
 	if (ret)
 		return ret;
 
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 207a0ce..be09b3e 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -293,7 +293,7 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
 		addr = adis16260_addresses[chan->scan_index][1];
 		return adis_write_reg_16(adis, addr, val);
 	case IIO_CHAN_INFO_SAMP_FREQ:
-		mutex_lock(&indio_dev->mlock);
+		mutex_lock(&adis->state_lock);
 		if (spi_get_device_id(adis->spi)->driver_data)
 			t = 256 / val;
 		else
@@ -308,9 +308,9 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
 			adis->spi->max_speed_hz = ADIS16260_SPI_SLOW;
 		else
 			adis->spi->max_speed_hz = ADIS16260_SPI_FAST;
-		ret = adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
+		ret = __adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
 
-		mutex_unlock(&indio_dev->mlock);
+		mutex_unlock(&adis->state_lock);
 		return ret;
 	}
 	return -EINVAL;
@@ -332,6 +332,12 @@ static const char * const adis1620_status_error_msgs[] = {
 	[ADIS16260_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 4.75",
 };
 
+static const struct adis_timeout adis16260_timeouts = {
+	.reset_ms = ADIS16260_STARTUP_DELAY,
+	.sw_reset_ms = ADIS16260_STARTUP_DELAY,
+	.self_test_ms = ADIS16260_STARTUP_DELAY,
+};
+
 static const struct adis_data adis16260_data = {
 	.write_delay = 30,
 	.read_delay = 30,
@@ -340,7 +346,7 @@ static const struct adis_data adis16260_data = {
 	.diag_stat_reg = ADIS16260_DIAG_STAT,
 
 	.self_test_mask = ADIS16260_MSC_CTRL_MEM_TEST,
-	.startup_delay = ADIS16260_STARTUP_DELAY,
+	.timeouts = &adis16260_timeouts,
 
 	.status_error_msgs = adis1620_status_error_msgs,
 	.status_error_mask = BIT(ADIS16260_DIAG_STAT_FLASH_CHK_BIT) |
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 981ae22..b3afa55 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -15,7 +15,6 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/i2c.h>
-#include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
 #include <linux/module.h>
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index 592f6b3..fd9171c 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -28,7 +28,7 @@
  * struct st_sensors_platform_data - gyro platform data
  * @drdy_int_pin: DRDY on gyros is available only on INT2 pin.
  */
-static const struct st_sensors_platform_data gyro_pdata = {
+static __maybe_unused const struct st_sensors_platform_data gyro_pdata = {
 	.drdy_int_pin = 2,
 };
 
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 57be68b..26c50b2 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -138,7 +138,6 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 			[2] = LSM330DLC_GYRO_DEV_NAME,
 			[3] = L3G4IS_GYRO_DEV_NAME,
 			[4] = LSM330_GYRO_DEV_NAME,
-			[5] = LSM9DS0_GYRO_DEV_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
 		.odr = {
@@ -209,6 +208,80 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 		.bootime = 2,
 	},
 	{
+		.wai = 0xd4,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+		.sensors_supported = {
+			[0] = LSM9DS0_GYRO_DEV_NAME,
+		},
+		.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+		.odr = {
+			.addr = 0x20,
+			.mask = GENMASK(7, 6),
+			.odr_avl = {
+				{ .hz = 95, .value = 0x00, },
+				{ .hz = 190, .value = 0x01, },
+				{ .hz = 380, .value = 0x02, },
+				{ .hz = 760, .value = 0x03, },
+			},
+		},
+		.pw = {
+			.addr = 0x20,
+			.mask = BIT(3),
+			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+		},
+		.enable_axis = {
+			.addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+		},
+		.fs = {
+			.addr = 0x23,
+			.mask = GENMASK(5, 4),
+			.fs_avl = {
+				[0] = {
+					.num = ST_GYRO_FS_AVL_245DPS,
+					.value = 0x00,
+					.gain = IIO_DEGREE_TO_RAD(8750),
+				},
+				[1] = {
+					.num = ST_GYRO_FS_AVL_500DPS,
+					.value = 0x01,
+					.gain = IIO_DEGREE_TO_RAD(17500),
+				},
+				[2] = {
+					.num = ST_GYRO_FS_AVL_2000DPS,
+					.value = 0x02,
+					.gain = IIO_DEGREE_TO_RAD(70000),
+				},
+			},
+		},
+		.bdu = {
+			.addr = 0x23,
+			.mask = BIT(7),
+		},
+		.drdy_irq = {
+			.int2 = {
+				.addr = 0x22,
+				.mask = BIT(3),
+			},
+			/*
+			 * The sensor has IHL (active low) and open
+			 * drain settings, but only for INT1 and not
+			 * for the DRDY line on INT2.
+			 */
+			.stat_drdy = {
+				.addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+				.mask = GENMASK(2, 0),
+			},
+		},
+		.sim = {
+			.addr = 0x23,
+			.value = BIT(0),
+		},
+		.multi_read_bit = true,
+		.bootime = 2,
+	},
+	{
 		.wai = 0xd7,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 05a1a08..8190966 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -17,7 +17,6 @@
 #include <linux/iio/common/st_sensors_i2c.h>
 #include "st_gyro.h"
 
-#ifdef CONFIG_OF
 static const struct of_device_id st_gyro_of_match[] = {
 	{
 		.compatible = "st,l3g4200d-gyro",
@@ -58,9 +57,6 @@ static const struct of_device_id st_gyro_of_match[] = {
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_gyro_of_match);
-#else
-#define st_gyro_of_match NULL
-#endif
 
 static int st_gyro_i2c_probe(struct i2c_client *client,
 			     const struct i2c_device_id *id)
@@ -70,8 +66,7 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
 	struct iio_dev *indio_dev;
 	int err;
 
-	st_sensors_of_name_probe(&client->dev, st_gyro_of_match,
-				 client->name, sizeof(client->name));
+	st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
 
 	settings = st_gyro_get_settings(client->name);
 	if (!settings) {
@@ -122,7 +117,7 @@ MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
 static struct i2c_driver st_gyro_driver = {
 	.driver = {
 		.name = "st-gyro-i2c",
-		.of_match_table = of_match_ptr(st_gyro_of_match),
+		.of_match_table = st_gyro_of_match,
 	},
 	.probe = st_gyro_i2c_probe,
 	.remove = st_gyro_i2c_remove,
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index b5c6242..efb8627 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -17,7 +17,6 @@
 #include <linux/iio/common/st_sensors_spi.h>
 #include "st_gyro.h"
 
-#ifdef CONFIG_OF
 /*
  * For new single-chip sensors use <device_name> as compatible string.
  * For old single-chip devices keep <device_name>-gyro to maintain
@@ -63,9 +62,6 @@ static const struct of_device_id st_gyro_of_match[] = {
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_gyro_of_match);
-#else
-#define st_gyro_of_match	NULL
-#endif
 
 static int st_gyro_spi_probe(struct spi_device *spi)
 {
@@ -74,8 +70,7 @@ static int st_gyro_spi_probe(struct spi_device *spi)
 	struct iio_dev *indio_dev;
 	int err;
 
-	st_sensors_of_name_probe(&spi->dev, st_gyro_of_match,
-				 spi->modalias, sizeof(spi->modalias));
+	st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
 
 	settings = st_gyro_get_settings(spi->modalias);
 	if (!settings) {
@@ -126,7 +121,7 @@ MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
 static struct spi_driver st_gyro_driver = {
 	.driver = {
 		.name = "st-gyro-spi",
-		.of_match_table = of_match_ptr(st_gyro_of_match),
+		.of_match_table = st_gyro_of_match,
 	},
 	.probe = st_gyro_spi_probe,
 	.remove = st_gyro_spi_remove,
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index b459600e..d05c6fd 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -174,7 +174,6 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
 	struct iio_dev *iio = data;
 	struct dht11 *dht11 = iio_priv(iio);
 
-	/* TODO: Consider making the handler safe for IRQ sharing */
 	if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
 		dht11->edges[dht11->num_edges].ts = ktime_get_boottime_ns();
 		dht11->edges[dht11->num_edges++].value =
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index 4922444..9003671 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -24,13 +24,6 @@
 #define HTS221_REG_CNTRL1_ADDR		0x20
 #define HTS221_REG_CNTRL2_ADDR		0x21
 
-#define HTS221_REG_AVG_ADDR		0x10
-#define HTS221_REG_H_OUT_L		0x28
-#define HTS221_REG_T_OUT_L		0x2a
-
-#define HTS221_HUMIDITY_AVG_MASK	0x07
-#define HTS221_TEMP_AVG_MASK		0x38
-
 #define HTS221_ODR_MASK			0x03
 #define HTS221_BDU_MASK			BIT(2)
 #define HTS221_ENABLE_MASK		BIT(7)
@@ -66,8 +59,8 @@ static const struct hts221_odr hts221_odr_table[] = {
 
 static const struct hts221_avg hts221_avg_list[] = {
 	{
-		.addr = HTS221_REG_AVG_ADDR,
-		.mask = HTS221_HUMIDITY_AVG_MASK,
+		.addr = 0x10,
+		.mask = 0x07,
 		.avg_avl = {
 			4, /* 0.4 %RH */
 			8, /* 0.3 %RH */
@@ -80,8 +73,8 @@ static const struct hts221_avg hts221_avg_list[] = {
 		},
 	},
 	{
-		.addr = HTS221_REG_AVG_ADDR,
-		.mask = HTS221_TEMP_AVG_MASK,
+		.addr = 0x10,
+		.mask = 0x38,
 		.avg_avl = {
 			2, /* 0.08 degC */
 			4, /* 0.05 degC */
@@ -98,7 +91,7 @@ static const struct hts221_avg hts221_avg_list[] = {
 static const struct iio_chan_spec hts221_channels[] = {
 	{
 		.type = IIO_HUMIDITYRELATIVE,
-		.address = HTS221_REG_H_OUT_L,
+		.address = 0x28,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 				      BIT(IIO_CHAN_INFO_OFFSET) |
 				      BIT(IIO_CHAN_INFO_SCALE) |
@@ -114,7 +107,7 @@ static const struct iio_chan_spec hts221_channels[] = {
 	},
 	{
 		.type = IIO_TEMP,
-		.address = HTS221_REG_T_OUT_L,
+		.address = 0x2a,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 				      BIT(IIO_CHAN_INFO_OFFSET) |
 				      BIT(IIO_CHAN_INFO_SCALE) |
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 159ea3f..fd9a5f1 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -42,14 +42,14 @@ struct poll_table_struct;
 
 __poll_t iio_buffer_poll(struct file *filp,
 			     struct poll_table_struct *wait);
-ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
-				      size_t n, loff_t *f_ps);
+ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
+			      size_t n, loff_t *f_ps);
 
 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev);
 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev);
 
 #define iio_buffer_poll_addr (&iio_buffer_poll)
-#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
+#define iio_buffer_read_outer_addr (&iio_buffer_read_outer)
 
 void iio_disable_all_buffers(struct iio_dev *indio_dev);
 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
@@ -57,7 +57,7 @@ void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
 #else
 
 #define iio_buffer_poll_addr NULL
-#define iio_buffer_read_first_n_outer_addr NULL
+#define iio_buffer_read_outer_addr NULL
 
 static inline int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
 {
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index e14c853..022bb54 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -26,7 +26,14 @@
 #define ADIS_MSC_CTRL_DATA_RDY_DIO2	BIT(0)
 #define ADIS_GLOB_CMD_SW_RESET		BIT(7)
 
-int adis_write_reg(struct adis *adis, unsigned int reg,
+/**
+ * __adis_write_reg() - write N bytes to register (unlocked version)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: The value to write to device (up to 4 bytes)
+ * @size: The size of the @value (in bytes)
+ */
+int __adis_write_reg(struct adis *adis, unsigned int reg,
 	unsigned int value, unsigned int size)
 {
 	unsigned int page = reg / ADIS_PAGE_SIZE;
@@ -38,7 +45,8 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
 			.bits_per_word = 8,
 			.len = 2,
 			.cs_change = 1,
-			.delay_usecs = adis->data->write_delay,
+			.delay.value = adis->data->write_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 			.cs_change_delay.value = adis->data->cs_change_delay,
 			.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
 		}, {
@@ -46,7 +54,8 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
 			.bits_per_word = 8,
 			.len = 2,
 			.cs_change = 1,
-			.delay_usecs = adis->data->write_delay,
+			.delay.value = adis->data->write_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 			.cs_change_delay.value = adis->data->cs_change_delay,
 			.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
 		}, {
@@ -54,24 +63,25 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
 			.bits_per_word = 8,
 			.len = 2,
 			.cs_change = 1,
-			.delay_usecs = adis->data->write_delay,
+			.delay.value = adis->data->write_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 			.cs_change_delay.value = adis->data->cs_change_delay,
 			.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
 		}, {
 			.tx_buf = adis->tx + 6,
 			.bits_per_word = 8,
 			.len = 2,
-			.delay_usecs = adis->data->write_delay,
+			.delay.value = adis->data->write_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 		}, {
 			.tx_buf = adis->tx + 8,
 			.bits_per_word = 8,
 			.len = 2,
-			.delay_usecs = adis->data->write_delay,
+			.delay.value = adis->data->write_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 		},
 	};
 
-	mutex_lock(&adis->txrx_lock);
-
 	spi_message_init(&msg);
 
 	if (adis->current_page != page) {
@@ -96,8 +106,7 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
 		adis->tx[3] = value & 0xff;
 		break;
 	default:
-		ret = -EINVAL;
-		goto out_unlock;
+		return -EINVAL;
 	}
 
 	xfers[size].cs_change = 0;
@@ -113,20 +122,18 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
 		adis->current_page = page;
 	}
 
-out_unlock:
-	mutex_unlock(&adis->txrx_lock);
-
 	return ret;
 }
-EXPORT_SYMBOL_GPL(adis_write_reg);
+EXPORT_SYMBOL_GPL(__adis_write_reg);
 
 /**
- * adis_read_reg() - read 2 bytes from a 16-bit register
+ * __adis_read_reg() - read N bytes from register (unlocked version)
  * @adis: The adis device
  * @reg: The address of the lower of the two registers
  * @val: The value read back from the device
+ * @size: The size of the @val buffer
  */
-int adis_read_reg(struct adis *adis, unsigned int reg,
+int __adis_read_reg(struct adis *adis, unsigned int reg,
 	unsigned int *val, unsigned int size)
 {
 	unsigned int page = reg / ADIS_PAGE_SIZE;
@@ -138,7 +145,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
 			.bits_per_word = 8,
 			.len = 2,
 			.cs_change = 1,
-			.delay_usecs = adis->data->write_delay,
+			.delay.value = adis->data->write_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 			.cs_change_delay.value = adis->data->cs_change_delay,
 			.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
 		}, {
@@ -146,7 +154,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
 			.bits_per_word = 8,
 			.len = 2,
 			.cs_change = 1,
-			.delay_usecs = adis->data->read_delay,
+			.delay.value = adis->data->read_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 			.cs_change_delay.value = adis->data->cs_change_delay,
 			.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
 		}, {
@@ -155,18 +164,19 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
 			.bits_per_word = 8,
 			.len = 2,
 			.cs_change = 1,
-			.delay_usecs = adis->data->read_delay,
+			.delay.value = adis->data->read_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 			.cs_change_delay.value = adis->data->cs_change_delay,
 			.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
 		}, {
 			.rx_buf = adis->rx + 2,
 			.bits_per_word = 8,
 			.len = 2,
-			.delay_usecs = adis->data->read_delay,
+			.delay.value = adis->data->read_delay,
+			.delay.unit = SPI_DELAY_UNIT_USECS,
 		},
 	};
 
-	mutex_lock(&adis->txrx_lock);
 	spi_message_init(&msg);
 
 	if (adis->current_page != page) {
@@ -188,15 +198,14 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
 		spi_message_add_tail(&xfers[3], &msg);
 		break;
 	default:
-		ret = -EINVAL;
-		goto out_unlock;
+		return -EINVAL;
 	}
 
 	ret = spi_sync(adis->spi, &msg);
 	if (ret) {
 		dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n",
 				reg, ret);
-		goto out_unlock;
+		return ret;
 	} else {
 		adis->current_page = page;
 	}
@@ -210,12 +219,9 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
 		break;
 	}
 
-out_unlock:
-	mutex_unlock(&adis->txrx_lock);
-
 	return ret;
 }
-EXPORT_SYMBOL_GPL(adis_read_reg);
+EXPORT_SYMBOL_GPL(__adis_read_reg);
 
 #ifdef CONFIG_DEBUG_FS
 
@@ -253,12 +259,16 @@ int adis_enable_irq(struct adis *adis, bool enable)
 	int ret = 0;
 	uint16_t msc;
 
-	if (adis->data->enable_irq)
-		return adis->data->enable_irq(adis, enable);
+	mutex_lock(&adis->state_lock);
 
-	ret = adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
+	if (adis->data->enable_irq) {
+		ret = adis->data->enable_irq(adis, enable);
+		goto out_unlock;
+	}
+
+	ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
 	if (ret)
-		goto error_ret;
+		goto out_unlock;
 
 	msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH;
 	msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2;
@@ -267,26 +277,27 @@ int adis_enable_irq(struct adis *adis, bool enable)
 	else
 		msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN;
 
-	ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+	ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
 
-error_ret:
+out_unlock:
+	mutex_unlock(&adis->state_lock);
 	return ret;
 }
 EXPORT_SYMBOL(adis_enable_irq);
 
 /**
- * adis_check_status() - Check the device for error conditions
+ * __adis_check_status() - Check the device for error conditions (unlocked)
  * @adis: The adis device
  *
  * Returns 0 on success, a negative error code otherwise
  */
-int adis_check_status(struct adis *adis)
+int __adis_check_status(struct adis *adis)
 {
 	uint16_t status;
 	int ret;
 	int i;
 
-	ret = adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
+	ret = __adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
 	if (ret)
 		return ret;
 
@@ -304,32 +315,38 @@ int adis_check_status(struct adis *adis)
 
 	return -EIO;
 }
-EXPORT_SYMBOL_GPL(adis_check_status);
+EXPORT_SYMBOL_GPL(__adis_check_status);
 
 /**
- * adis_reset() - Reset the device
+ * __adis_reset() - Reset the device (unlocked version)
  * @adis: The adis device
  *
  * Returns 0 on success, a negative error code otherwise
  */
-int adis_reset(struct adis *adis)
+int __adis_reset(struct adis *adis)
 {
 	int ret;
+	const struct adis_timeout *timeouts = adis->data->timeouts;
 
-	ret = adis_write_reg_8(adis, adis->data->glob_cmd_reg,
+	ret = __adis_write_reg_8(adis, adis->data->glob_cmd_reg,
 			ADIS_GLOB_CMD_SW_RESET);
-	if (ret)
+	if (ret) {
 		dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret);
+		return ret;
+	}
 
-	return ret;
+	msleep(timeouts->sw_reset_ms);
+
+	return 0;
 }
-EXPORT_SYMBOL_GPL(adis_reset);
+EXPORT_SYMBOL_GPL(__adis_reset);
 
 static int adis_self_test(struct adis *adis)
 {
 	int ret;
+	const struct adis_timeout *timeouts = adis->data->timeouts;
 
-	ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
+	ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
 			adis->data->self_test_mask);
 	if (ret) {
 		dev_err(&adis->spi->dev, "Failed to initiate self test: %d\n",
@@ -337,12 +354,12 @@ static int adis_self_test(struct adis *adis)
 		return ret;
 	}
 
-	msleep(adis->data->startup_delay);
+	msleep(timeouts->self_test_ms);
 
-	ret = adis_check_status(adis);
+	ret = __adis_check_status(adis);
 
 	if (adis->data->self_test_no_autoclear)
-		adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
+		__adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
 
 	return ret;
 }
@@ -360,19 +377,22 @@ int adis_initial_startup(struct adis *adis)
 {
 	int ret;
 
+	mutex_lock(&adis->state_lock);
+
 	ret = adis_self_test(adis);
 	if (ret) {
 		dev_err(&adis->spi->dev, "Self-test failed, trying reset.\n");
-		adis_reset(adis);
-		msleep(adis->data->startup_delay);
+		__adis_reset(adis);
 		ret = adis_self_test(adis);
 		if (ret) {
 			dev_err(&adis->spi->dev, "Second self-test failed, giving up.\n");
-			return ret;
+			goto out_unlock;
 		}
 	}
 
-	return 0;
+out_unlock:
+	mutex_unlock(&adis->state_lock);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(adis_initial_startup);
 
@@ -398,15 +418,15 @@ int adis_single_conversion(struct iio_dev *indio_dev,
 	unsigned int uval;
 	int ret;
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&adis->state_lock);
 
-	ret = adis_read_reg(adis, chan->address, &uval,
+	ret = __adis_read_reg(adis, chan->address, &uval,
 			chan->scan_type.storagebits / 8);
 	if (ret)
 		goto err_unlock;
 
 	if (uval & error_mask) {
-		ret = adis_check_status(adis);
+		ret = __adis_check_status(adis);
 		if (ret)
 			goto err_unlock;
 	}
@@ -418,7 +438,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
 
 	ret = IIO_VAL_INT;
 err_unlock:
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&adis->state_lock);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(adis_single_conversion);
@@ -438,7 +458,12 @@ EXPORT_SYMBOL_GPL(adis_single_conversion);
 int adis_init(struct adis *adis, struct iio_dev *indio_dev,
 	struct spi_device *spi, const struct adis_data *data)
 {
-	mutex_init(&adis->txrx_lock);
+	if (!data || !data->timeouts) {
+		dev_err(&spi->dev, "No config data or timeouts not defined!\n");
+		return -EINVAL;
+	}
+
+	mutex_init(&adis->state_lock);
 	adis->spi = spi;
 	adis->data = data;
 	iio_device_set_drvdata(indio_dev, adis);
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 44e46dc..cfb1c19 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -156,12 +156,14 @@ struct adis16400_state;
 
 struct adis16400_chip_info {
 	const struct iio_chan_spec *channels;
+	const struct adis_timeout *timeouts;
 	const int num_channels;
 	const long flags;
 	unsigned int gyro_scale_micro;
 	unsigned int accel_scale_micro;
 	int temp_scale_nano;
 	int temp_offset;
+	/* set_freq() & get_freq() need to avoid using ADIS lib's state lock */
 	int (*set_freq)(struct adis16400_state *st, unsigned int freq);
 	int (*get_freq)(struct adis16400_state *st);
 };
@@ -326,7 +328,7 @@ static int adis16334_get_freq(struct adis16400_state *st)
 	int ret;
 	uint16_t t;
 
-	ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+	ret = __adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
 	if (ret)
 		return ret;
 
@@ -350,7 +352,7 @@ static int adis16334_set_freq(struct adis16400_state *st, unsigned int freq)
 	t <<= ADIS16334_RATE_DIV_SHIFT;
 	t |= ADIS16334_RATE_INT_CLK;
 
-	return adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
+	return __adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
 }
 
 static int adis16400_get_freq(struct adis16400_state *st)
@@ -358,7 +360,7 @@ static int adis16400_get_freq(struct adis16400_state *st)
 	int sps, ret;
 	uint16_t t;
 
-	ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+	ret = __adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
 	if (ret)
 		return ret;
 
@@ -390,7 +392,7 @@ static int adis16400_set_freq(struct adis16400_state *st, unsigned int freq)
 	else
 		st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
 
-	return adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
+	return __adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
 }
 
 static const unsigned int adis16400_3db_divisors[] = {
@@ -404,7 +406,7 @@ static const unsigned int adis16400_3db_divisors[] = {
 	[7] = 200, /* Not a valid setting */
 };
 
-static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+static int __adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
 {
 	struct adis16400_state *st = iio_priv(indio_dev);
 	uint16_t val16;
@@ -415,11 +417,11 @@ static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
 			break;
 	}
 
-	ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
+	ret = __adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
 	if (ret)
 		return ret;
 
-	ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
+	ret = __adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
 					 (val16 & ~0x07) | i);
 	return ret;
 }
@@ -507,32 +509,31 @@ static int adis16400_write_raw(struct iio_dev *indio_dev,
 	struct iio_chan_spec const *chan, int val, int val2, long info)
 {
 	struct adis16400_state *st = iio_priv(indio_dev);
+	struct mutex *slock = &st->adis.state_lock;
 	int ret, sps;
 
 	switch (info) {
 	case IIO_CHAN_INFO_CALIBBIAS:
-		mutex_lock(&indio_dev->mlock);
 		ret = adis_write_reg_16(&st->adis,
 				adis16400_addresses[chan->scan_index], val);
-		mutex_unlock(&indio_dev->mlock);
 		return ret;
 	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
 		/*
 		 * Need to cache values so we can update if the frequency
 		 * changes.
 		 */
-		mutex_lock(&indio_dev->mlock);
+		mutex_lock(slock);
 		st->filt_int = val;
 		/* Work out update to current value */
 		sps = st->variant->get_freq(st);
 		if (sps < 0) {
-			mutex_unlock(&indio_dev->mlock);
+			mutex_unlock(slock);
 			return sps;
 		}
 
-		ret = adis16400_set_filter(indio_dev, sps,
+		ret = __adis16400_set_filter(indio_dev, sps,
 			val * 1000 + val2 / 1000);
-		mutex_unlock(&indio_dev->mlock);
+		mutex_unlock(slock);
 		return ret;
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		sps = val * 1000 + val2 / 1000;
@@ -540,9 +541,9 @@ static int adis16400_write_raw(struct iio_dev *indio_dev,
 		if (sps <= 0)
 			return -EINVAL;
 
-		mutex_lock(&indio_dev->mlock);
+		mutex_lock(slock);
 		ret = st->variant->set_freq(st, sps);
-		mutex_unlock(&indio_dev->mlock);
+		mutex_unlock(slock);
 		return ret;
 	default:
 		return -EINVAL;
@@ -553,6 +554,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
 	struct iio_chan_spec const *chan, int *val, int *val2, long info)
 {
 	struct adis16400_state *st = iio_priv(indio_dev);
+	struct mutex *slock = &st->adis.state_lock;
 	int16_t val16;
 	int ret;
 
@@ -596,10 +598,8 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
 			return -EINVAL;
 		}
 	case IIO_CHAN_INFO_CALIBBIAS:
-		mutex_lock(&indio_dev->mlock);
 		ret = adis_read_reg_16(&st->adis,
 				adis16400_addresses[chan->scan_index], &val16);
-		mutex_unlock(&indio_dev->mlock);
 		if (ret)
 			return ret;
 		val16 = sign_extend32(val16, 11);
@@ -610,27 +610,27 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
 		*val = st->variant->temp_offset;
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
-		mutex_lock(&indio_dev->mlock);
+		mutex_lock(slock);
 		/* Need both the number of taps and the sampling frequency */
-		ret = adis_read_reg_16(&st->adis,
+		ret = __adis_read_reg_16(&st->adis,
 						ADIS16400_SENS_AVG,
 						&val16);
 		if (ret) {
-			mutex_unlock(&indio_dev->mlock);
+			mutex_unlock(slock);
 			return ret;
 		}
 		ret = st->variant->get_freq(st);
-		if (ret >= 0) {
-			ret /= adis16400_3db_divisors[val16 & 0x07];
-			*val = ret / 1000;
-			*val2 = (ret % 1000) * 1000;
-		}
-		mutex_unlock(&indio_dev->mlock);
+		mutex_unlock(slock);
 		if (ret)
 			return ret;
+		ret /= adis16400_3db_divisors[val16 & 0x07];
+		*val = ret / 1000;
+		*val2 = (ret % 1000) * 1000;
 		return IIO_VAL_INT_PLUS_MICRO;
 	case IIO_CHAN_INFO_SAMP_FREQ:
+		mutex_lock(slock);
 		ret = st->variant->get_freq(st);
+		mutex_unlock(slock);
 		if (ret)
 			return ret;
 		*val = ret / 1000;
@@ -930,6 +930,36 @@ static const struct iio_chan_spec adis16334_channels[] = {
 	IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
 };
 
+static const struct adis_timeout adis16300_timeouts = {
+	.reset_ms = ADIS16400_STARTUP_DELAY,
+	.sw_reset_ms = ADIS16400_STARTUP_DELAY,
+	.self_test_ms = ADIS16400_STARTUP_DELAY,
+};
+
+static const struct adis_timeout adis16362_timeouts = {
+	.reset_ms = 130,
+	.sw_reset_ms = 130,
+	.self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16400_timeouts = {
+	.reset_ms = 170,
+	.sw_reset_ms = 170,
+	.self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16445_timeouts = {
+	.reset_ms = 55,
+	.sw_reset_ms = 55,
+	.self_test_ms = 16,
+};
+
+static const struct adis_timeout adis16448_timeouts = {
+	.reset_ms = 90,
+	.sw_reset_ms = 90,
+	.self_test_ms = 45,
+};
+
 static struct adis16400_chip_info adis16400_chips[] = {
 	[ADIS16300] = {
 		.channels = adis16300_channels,
@@ -942,6 +972,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
+		.timeouts = &adis16300_timeouts,
 	},
 	[ADIS16334] = {
 		.channels = adis16334_channels,
@@ -965,6 +996,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
+		.timeouts = &adis16300_timeouts,
 	},
 	[ADIS16360] = {
 		.channels = adis16350_channels,
@@ -977,6 +1009,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
+		.timeouts = &adis16300_timeouts,
 	},
 	[ADIS16362] = {
 		.channels = adis16350_channels,
@@ -989,6 +1022,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
+		.timeouts = &adis16362_timeouts,
 	},
 	[ADIS16364] = {
 		.channels = adis16350_channels,
@@ -1001,6 +1035,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
+		.timeouts = &adis16362_timeouts,
 	},
 	[ADIS16367] = {
 		.channels = adis16350_channels,
@@ -1013,6 +1048,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
+		.timeouts = &adis16300_timeouts,
 	},
 	[ADIS16400] = {
 		.channels = adis16400_channels,
@@ -1024,6 +1060,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
+		.timeouts = &adis16400_timeouts,
 	},
 	[ADIS16445] = {
 		.channels = adis16445_channels,
@@ -1037,6 +1074,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
 		.set_freq = adis16334_set_freq,
 		.get_freq = adis16334_get_freq,
+		.timeouts = &adis16445_timeouts,
 	},
 	[ADIS16448] = {
 		.channels = adis16448_channels,
@@ -1050,6 +1088,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
 		.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
 		.set_freq = adis16334_set_freq,
 		.get_freq = adis16334_get_freq,
+		.timeouts = &adis16448_timeouts,
 	}
 };
 
@@ -1087,7 +1126,6 @@ static const struct adis_data adis16400_data = {
 	.write_delay = 50,
 
 	.self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST,
-	.startup_delay = ADIS16400_STARTUP_DELAY,
 
 	.status_error_msgs = adis16400_status_error_msgs,
 	.status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) |
@@ -1121,11 +1159,28 @@ static void adis16400_setup_chan_mask(struct adis16400_state *st)
 	}
 }
 
+static struct adis_data *adis16400_adis_data_alloc(struct adis16400_state *st,
+						   struct device *dev)
+{
+	struct adis_data *data;
+
+	data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(data, &adis16400_data, sizeof(*data));
+
+	data->timeouts = st->variant->timeouts;
+
+	return data;
+}
+
 static int adis16400_probe(struct spi_device *spi)
 {
 	struct adis16400_state *st;
 	struct iio_dev *indio_dev;
 	int ret;
+	const struct adis_data *adis16400_data;
 
 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
 	if (indio_dev == NULL)
@@ -1152,7 +1207,11 @@ static int adis16400_probe(struct spi_device *spi)
 			st->adis.burst->extra_len = sizeof(u16);
 	}
 
-	ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
+	adis16400_data = adis16400_adis_data_alloc(st, &spi->dev);
+	if (IS_ERR(adis16400_data))
+		return PTR_ERR(adis16400_data);
+
+	ret = adis_init(&st->adis, indio_dev, spi, adis16400_data);
 	if (ret)
 		return ret;
 
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index b558125..9539cfe 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -383,6 +383,12 @@ static const char * const adis16460_status_error_msgs[] = {
 	[ADIS16460_DIAG_STAT_FLASH_UPT] = "Flash update failure",
 };
 
+static const struct adis_timeout adis16460_timeouts = {
+	.reset_ms = 225,
+	.sw_reset_ms = 225,
+	.self_test_ms = 10,
+};
+
 static const struct adis_data adis16460_data = {
 	.diag_stat_reg = ADIS16460_REG_DIAG_STAT,
 	.glob_cmd_reg = ADIS16460_REG_GLOB_CMD,
@@ -398,6 +404,7 @@ static const struct adis_data adis16460_data = {
 		BIT(ADIS16460_DIAG_STAT_SPI_COMM) |
 		BIT(ADIS16460_DIAG_STAT_FLASH_UPT),
 	.enable_irq = adis16460_enable_irq,
+	.timeouts = &adis16460_timeouts,
 };
 
 static int adis16460_probe(struct spi_device *spi)
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 748f8bb..dac87f1 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -138,6 +138,7 @@ struct adis16480_chip_info {
 	unsigned int max_dec_rate;
 	const unsigned int *filter_freqs;
 	bool has_pps_clk_mode;
+	const struct adis_timeout *timeouts;
 };
 
 enum adis16480_int_pin {
@@ -555,6 +556,7 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
 	const struct iio_chan_spec *chan, unsigned int freq)
 {
 	struct adis16480 *st = iio_priv(indio_dev);
+	struct mutex *slock = &st->adis.state_lock;
 	unsigned int enable_mask, offset, reg;
 	unsigned int diff, best_diff;
 	unsigned int i, best_freq;
@@ -565,9 +567,11 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
 	offset = ad16480_filter_data[chan->scan_index][1];
 	enable_mask = BIT(offset + 2);
 
-	ret = adis_read_reg_16(&st->adis, reg, &val);
+	mutex_lock(slock);
+
+	ret = __adis_read_reg_16(&st->adis, reg, &val);
 	if (ret)
-		return ret;
+		goto out_unlock;
 
 	if (freq == 0) {
 		val &= ~enable_mask;
@@ -589,7 +593,11 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
 		val |= enable_mask;
 	}
 
-	return adis_write_reg_16(&st->adis, reg, val);
+	ret = __adis_write_reg_16(&st->adis, reg, val);
+out_unlock:
+	mutex_unlock(slock);
+
+	return ret;
 }
 
 static int adis16480_read_raw(struct iio_dev *indio_dev,
@@ -779,6 +787,7 @@ enum adis16480_variant {
 	ADIS16480,
 	ADIS16485,
 	ADIS16488,
+	ADIS16490,
 	ADIS16495_1,
 	ADIS16495_2,
 	ADIS16495_3,
@@ -787,6 +796,30 @@ enum adis16480_variant {
 	ADIS16497_3,
 };
 
+static const struct adis_timeout adis16485_timeouts = {
+	.reset_ms = 560,
+	.sw_reset_ms = 120,
+	.self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16480_timeouts = {
+	.reset_ms = 560,
+	.sw_reset_ms = 560,
+	.self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16495_timeouts = {
+	.reset_ms = 170,
+	.sw_reset_ms = 130,
+	.self_test_ms = 40,
+};
+
+static const struct adis_timeout adis16495_1_timeouts = {
+	.reset_ms = 250,
+	.sw_reset_ms = 210,
+	.self_test_ms = 20,
+};
+
 static const struct adis16480_chip_info adis16480_chip_info[] = {
 	[ADIS16375] = {
 		.channels = adis16485_channels,
@@ -805,6 +838,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.int_clk = 2460000,
 		.max_dec_rate = 2048,
 		.filter_freqs = adis16480_def_filter_freqs,
+		.timeouts = &adis16485_timeouts,
 	},
 	[ADIS16480] = {
 		.channels = adis16480_channels,
@@ -817,6 +851,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.int_clk = 2460000,
 		.max_dec_rate = 2048,
 		.filter_freqs = adis16480_def_filter_freqs,
+		.timeouts = &adis16480_timeouts,
 	},
 	[ADIS16485] = {
 		.channels = adis16485_channels,
@@ -829,6 +864,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.int_clk = 2460000,
 		.max_dec_rate = 2048,
 		.filter_freqs = adis16480_def_filter_freqs,
+		.timeouts = &adis16485_timeouts,
 	},
 	[ADIS16488] = {
 		.channels = adis16480_channels,
@@ -841,6 +877,21 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.int_clk = 2460000,
 		.max_dec_rate = 2048,
 		.filter_freqs = adis16480_def_filter_freqs,
+		.timeouts = &adis16485_timeouts,
+	},
+	[ADIS16490] = {
+		.channels = adis16485_channels,
+		.num_channels = ARRAY_SIZE(adis16485_channels),
+		.gyro_max_val = 20000 << 16,
+		.gyro_max_scale = IIO_DEGREE_TO_RAD(100),
+		.accel_max_val = IIO_M_S_2_TO_G(16000 << 16),
+		.accel_max_scale = 8,
+		.temp_scale = 14285, /* 14.285 milli degree Celsius */
+		.int_clk = 4250000,
+		.max_dec_rate = 4250,
+		.filter_freqs = adis16495_def_filter_freqs,
+		.has_pps_clk_mode = true,
+		.timeouts = &adis16495_timeouts,
 	},
 	[ADIS16495_1] = {
 		.channels = adis16485_channels,
@@ -854,6 +905,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.max_dec_rate = 4250,
 		.filter_freqs = adis16495_def_filter_freqs,
 		.has_pps_clk_mode = true,
+		.timeouts = &adis16495_1_timeouts,
 	},
 	[ADIS16495_2] = {
 		.channels = adis16485_channels,
@@ -867,6 +919,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.max_dec_rate = 4250,
 		.filter_freqs = adis16495_def_filter_freqs,
 		.has_pps_clk_mode = true,
+		.timeouts = &adis16495_1_timeouts,
 	},
 	[ADIS16495_3] = {
 		.channels = adis16485_channels,
@@ -880,6 +933,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.max_dec_rate = 4250,
 		.filter_freqs = adis16495_def_filter_freqs,
 		.has_pps_clk_mode = true,
+		.timeouts = &adis16495_1_timeouts,
 	},
 	[ADIS16497_1] = {
 		.channels = adis16485_channels,
@@ -893,6 +947,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.max_dec_rate = 4250,
 		.filter_freqs = adis16495_def_filter_freqs,
 		.has_pps_clk_mode = true,
+		.timeouts = &adis16495_1_timeouts,
 	},
 	[ADIS16497_2] = {
 		.channels = adis16485_channels,
@@ -906,6 +961,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.max_dec_rate = 4250,
 		.filter_freqs = adis16495_def_filter_freqs,
 		.has_pps_clk_mode = true,
+		.timeouts = &adis16495_1_timeouts,
 	},
 	[ADIS16497_3] = {
 		.channels = adis16485_channels,
@@ -919,6 +975,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
 		.max_dec_rate = 4250,
 		.filter_freqs = adis16495_def_filter_freqs,
 		.has_pps_clk_mode = true,
+		.timeouts = &adis16495_1_timeouts,
 	},
 };
 
@@ -947,14 +1004,14 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
 	uint16_t val;
 	int ret;
 
-	ret = adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
+	ret = __adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
 	if (ret)
 		return ret;
 
 	val &= ~ADIS16480_DRDY_EN_MSK;
 	val |= ADIS16480_DRDY_EN(enable);
 
-	return adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
+	return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
 }
 
 static int adis16480_initial_setup(struct iio_dev *indio_dev)
@@ -1188,9 +1245,26 @@ static int adis16480_get_ext_clocks(struct adis16480 *st)
 	return 0;
 }
 
+static struct adis_data *adis16480_adis_data_alloc(struct adis16480 *st,
+						   struct device *dev)
+{
+	struct adis_data *data;
+
+	data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(data, &adis16480_data, sizeof(*data));
+
+	data->timeouts = st->chip_info->timeouts;
+
+	return data;
+}
+
 static int adis16480_probe(struct spi_device *spi)
 {
 	const struct spi_device_id *id = spi_get_device_id(spi);
+	const struct adis_data *adis16480_data;
 	struct iio_dev *indio_dev;
 	struct adis16480 *st;
 	int ret;
@@ -1211,7 +1285,11 @@ static int adis16480_probe(struct spi_device *spi)
 	indio_dev->info = &adis16480_info;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
-	ret = adis_init(&st->adis, indio_dev, spi, &adis16480_data);
+	adis16480_data = adis16480_adis_data_alloc(st, &spi->dev);
+	if (IS_ERR(adis16480_data))
+		return PTR_ERR(adis16480_data);
+
+	ret = adis_init(&st->adis, indio_dev, spi, adis16480_data);
 	if (ret)
 		return ret;
 
@@ -1278,6 +1356,7 @@ static const struct spi_device_id adis16480_ids[] = {
 	{ "adis16480", ADIS16480 },
 	{ "adis16485", ADIS16485 },
 	{ "adis16488", ADIS16488 },
+	{ "adis16490", ADIS16490 },
 	{ "adis16495-1", ADIS16495_1 },
 	{ "adis16495-2", ADIS16495_2 },
 	{ "adis16495-3", ADIS16495_3 },
@@ -1293,6 +1372,7 @@ static const struct of_device_id adis16480_of_match[] = {
 	{ .compatible = "adi,adis16480" },
 	{ .compatible = "adi,adis16485" },
 	{ .compatible = "adi,adis16488" },
+	{ .compatible = "adi,adis16490" },
 	{ .compatible = "adi,adis16495-1" },
 	{ .compatible = "adi,adis16495-2" },
 	{ .compatible = "adi,adis16495-3" },
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 4998a89d..3f4dd5c 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -129,7 +129,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
 		return -ENOMEM;
 
 	if (adis->data->has_paging) {
-		mutex_lock(&adis->txrx_lock);
+		mutex_lock(&adis->state_lock);
 		if (adis->current_page != 0) {
 			adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
 			adis->tx[1] = 0;
@@ -144,7 +144,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
 
 	if (adis->data->has_paging) {
 		adis->current_page = 0;
-		mutex_unlock(&adis->txrx_lock);
+		mutex_unlock(&adis->state_lock);
 	}
 
 	iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index e4c4c12..017bc0f 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -10,11 +10,12 @@
 
 config INV_MPU6050_I2C
 	tristate "Invensense MPU6050 devices (I2C)"
-	depends on I2C_MUX
+	depends on I2C
+	select I2C_MUX
 	select INV_MPU6050_IIO
 	select REGMAP_I2C
 	help
-	  This driver supports the Invensense MPU6000/6050/6500/6515,
+	  This driver supports the Invensense MPU6050/6500/6515,
 	  MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
 	  over I2C.
 	  This driver can be built as a module. The module will be called
@@ -26,8 +27,8 @@
 	select INV_MPU6050_IIO
 	select REGMAP_SPI
 	help
-	  This driver supports the Invensense MPU6000/6050/6500/6515,
-	  MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
+	  This driver supports the Invensense MPU6000/6500/6515,
+	  MPU9250/9255 and ICM20608/20602 motion tracking devices
 	  over SPI.
 	  This driver can be built as a module. The module will be called
 	  inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 0686e41..5096fc4 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -104,6 +104,7 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
 	.divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
 	.gyro_fifo_enable = false,
 	.accl_fifo_enable = false,
+	.temp_fifo_enable = false,
 	.magn_fifo_enable = false,
 	.accl_fs = INV_MPU6050_FS_02G,
 	.user_ctrl = 0,
@@ -856,19 +857,27 @@ static const struct iio_chan_spec_ext_info inv_ext_info[] = {
 		.ext_info = inv_ext_info,                             \
 	}
 
+#define INV_MPU6050_TEMP_CHAN(_index)				\
+	{							\
+		.type = IIO_TEMP,				\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW)	\
+				| BIT(IIO_CHAN_INFO_OFFSET)	\
+				| BIT(IIO_CHAN_INFO_SCALE),	\
+		.scan_index = _index,				\
+		.scan_type = {					\
+			.sign = 's',				\
+			.realbits = 16,				\
+			.storagebits = 16,			\
+			.shift = 0,				\
+			.endianness = IIO_BE,			\
+		},						\
+	}
+
 static const struct iio_chan_spec inv_mpu_channels[] = {
 	IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
-	/*
-	 * Note that temperature should only be via polled reading only,
-	 * not the final scan elements output.
-	 */
-	{
-		.type = IIO_TEMP,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
-				| BIT(IIO_CHAN_INFO_OFFSET)
-				| BIT(IIO_CHAN_INFO_SCALE),
-		.scan_index = -1,
-	},
+
+	INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
 	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
 	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
 	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
@@ -878,22 +887,29 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
 	INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
 };
 
+#define INV_MPU6050_SCAN_MASK_3AXIS_ACCEL	\
+	(BIT(INV_MPU6050_SCAN_ACCL_X)		\
+	| BIT(INV_MPU6050_SCAN_ACCL_Y)		\
+	| BIT(INV_MPU6050_SCAN_ACCL_Z))
+
+#define INV_MPU6050_SCAN_MASK_3AXIS_GYRO	\
+	(BIT(INV_MPU6050_SCAN_GYRO_X)		\
+	| BIT(INV_MPU6050_SCAN_GYRO_Y)		\
+	| BIT(INV_MPU6050_SCAN_GYRO_Z))
+
+#define INV_MPU6050_SCAN_MASK_TEMP		(BIT(INV_MPU6050_SCAN_TEMP))
+
 static const unsigned long inv_mpu_scan_masks[] = {
 	/* 3-axis accel */
-	BIT(INV_MPU6050_SCAN_ACCL_X)
-		| BIT(INV_MPU6050_SCAN_ACCL_Y)
-		| BIT(INV_MPU6050_SCAN_ACCL_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL,
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
 	/* 3-axis gyro */
-	BIT(INV_MPU6050_SCAN_GYRO_X)
-		| BIT(INV_MPU6050_SCAN_GYRO_Y)
-		| BIT(INV_MPU6050_SCAN_GYRO_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+	INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
 	/* 6-axis accel + gyro */
-	BIT(INV_MPU6050_SCAN_ACCL_X)
-		| BIT(INV_MPU6050_SCAN_ACCL_Y)
-		| BIT(INV_MPU6050_SCAN_ACCL_Z)
-		| BIT(INV_MPU6050_SCAN_GYRO_X)
-		| BIT(INV_MPU6050_SCAN_GYRO_Y)
-		| BIT(INV_MPU6050_SCAN_GYRO_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+		| INV_MPU6050_SCAN_MASK_TEMP,
 	0,
 };
 
@@ -915,19 +931,30 @@ static const unsigned long inv_mpu_scan_masks[] = {
 		.ext_info = inv_ext_info,				\
 	}
 
+static const struct iio_chan_spec inv_mpu9150_channels[] = {
+	IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
+
+	INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
+	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+	INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+	INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+	INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+
+	/* Magnetometer resolution is 13 bits */
+	INV_MPU9X50_MAGN_CHAN(IIO_MOD_X, 13, INV_MPU9X50_SCAN_MAGN_X),
+	INV_MPU9X50_MAGN_CHAN(IIO_MOD_Y, 13, INV_MPU9X50_SCAN_MAGN_Y),
+	INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 13, INV_MPU9X50_SCAN_MAGN_Z),
+};
+
 static const struct iio_chan_spec inv_mpu9250_channels[] = {
 	IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
-	/*
-	 * Note that temperature should only be via polled reading only,
-	 * not the final scan elements output.
-	 */
-	{
-		.type = IIO_TEMP,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
-				| BIT(IIO_CHAN_INFO_OFFSET)
-				| BIT(IIO_CHAN_INFO_SCALE),
-		.scan_index = -1,
-	},
+
+	INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
 	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
 	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
 	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
@@ -942,98 +969,50 @@ static const struct iio_chan_spec inv_mpu9250_channels[] = {
 	INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 16, INV_MPU9X50_SCAN_MAGN_Z),
 };
 
+#define INV_MPU9X50_SCAN_MASK_3AXIS_MAGN	\
+	(BIT(INV_MPU9X50_SCAN_MAGN_X)		\
+	| BIT(INV_MPU9X50_SCAN_MAGN_Y)		\
+	| BIT(INV_MPU9X50_SCAN_MAGN_Z))
+
 static const unsigned long inv_mpu9x50_scan_masks[] = {
 	/* 3-axis accel */
-	BIT(INV_MPU6050_SCAN_ACCL_X)
-		| BIT(INV_MPU6050_SCAN_ACCL_Y)
-		| BIT(INV_MPU6050_SCAN_ACCL_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL,
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
 	/* 3-axis gyro */
-	BIT(INV_MPU6050_SCAN_GYRO_X)
-		| BIT(INV_MPU6050_SCAN_GYRO_Y)
-		| BIT(INV_MPU6050_SCAN_GYRO_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+	INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
 	/* 3-axis magn */
-	BIT(INV_MPU9X50_SCAN_MAGN_X)
-		| BIT(INV_MPU9X50_SCAN_MAGN_Y)
-		| BIT(INV_MPU9X50_SCAN_MAGN_Z),
+	INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+	INV_MPU9X50_SCAN_MASK_3AXIS_MAGN | INV_MPU6050_SCAN_MASK_TEMP,
 	/* 6-axis accel + gyro */
-	BIT(INV_MPU6050_SCAN_ACCL_X)
-		| BIT(INV_MPU6050_SCAN_ACCL_Y)
-		| BIT(INV_MPU6050_SCAN_ACCL_Z)
-		| BIT(INV_MPU6050_SCAN_GYRO_X)
-		| BIT(INV_MPU6050_SCAN_GYRO_Y)
-		| BIT(INV_MPU6050_SCAN_GYRO_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+		| INV_MPU6050_SCAN_MASK_TEMP,
 	/* 6-axis accel + magn */
-	BIT(INV_MPU6050_SCAN_ACCL_X)
-		| BIT(INV_MPU6050_SCAN_ACCL_Y)
-		| BIT(INV_MPU6050_SCAN_ACCL_Z)
-		| BIT(INV_MPU9X50_SCAN_MAGN_X)
-		| BIT(INV_MPU9X50_SCAN_MAGN_Y)
-		| BIT(INV_MPU9X50_SCAN_MAGN_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+		| INV_MPU6050_SCAN_MASK_TEMP,
 	/* 6-axis gyro + magn */
-	BIT(INV_MPU6050_SCAN_GYRO_X)
-		| BIT(INV_MPU6050_SCAN_GYRO_Y)
-		| BIT(INV_MPU6050_SCAN_GYRO_Z)
-		| BIT(INV_MPU9X50_SCAN_MAGN_X)
-		| BIT(INV_MPU9X50_SCAN_MAGN_Y)
-		| BIT(INV_MPU9X50_SCAN_MAGN_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+	INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+		| INV_MPU6050_SCAN_MASK_TEMP,
 	/* 9-axis accel + gyro + magn */
-	BIT(INV_MPU6050_SCAN_ACCL_X)
-		| BIT(INV_MPU6050_SCAN_ACCL_Y)
-		| BIT(INV_MPU6050_SCAN_ACCL_Z)
-		| BIT(INV_MPU6050_SCAN_GYRO_X)
-		| BIT(INV_MPU6050_SCAN_GYRO_Y)
-		| BIT(INV_MPU6050_SCAN_GYRO_Z)
-		| BIT(INV_MPU9X50_SCAN_MAGN_X)
-		| BIT(INV_MPU9X50_SCAN_MAGN_Y)
-		| BIT(INV_MPU9X50_SCAN_MAGN_Z),
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+		| INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+		| INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+		| INV_MPU6050_SCAN_MASK_TEMP,
 	0,
 };
 
-static const struct iio_chan_spec inv_icm20602_channels[] = {
-	IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
-	{
-		.type = IIO_TEMP,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
-				| BIT(IIO_CHAN_INFO_OFFSET)
-				| BIT(IIO_CHAN_INFO_SCALE),
-		.scan_index = INV_ICM20602_SCAN_TEMP,
-		.scan_type = {
-				.sign = 's',
-				.realbits = 16,
-				.storagebits = 16,
-				.shift = 0,
-				.endianness = IIO_BE,
-			     },
-	},
-
-	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_ICM20602_SCAN_GYRO_X),
-	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_ICM20602_SCAN_GYRO_Y),
-	INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_ICM20602_SCAN_GYRO_Z),
-
-	INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_ICM20602_SCAN_ACCL_Y),
-	INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_ICM20602_SCAN_ACCL_X),
-	INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
-};
-
 static const unsigned long inv_icm20602_scan_masks[] = {
 	/* 3-axis accel + temp (mandatory) */
-	BIT(INV_ICM20602_SCAN_ACCL_X)
-		| BIT(INV_ICM20602_SCAN_ACCL_Y)
-		| BIT(INV_ICM20602_SCAN_ACCL_Z)
-		| BIT(INV_ICM20602_SCAN_TEMP),
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
 	/* 3-axis gyro + temp (mandatory) */
-	BIT(INV_ICM20602_SCAN_GYRO_X)
-		| BIT(INV_ICM20602_SCAN_GYRO_Y)
-		| BIT(INV_ICM20602_SCAN_GYRO_Z)
-		| BIT(INV_ICM20602_SCAN_TEMP),
+	INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
 	/* 6-axis accel + gyro + temp (mandatory) */
-	BIT(INV_ICM20602_SCAN_ACCL_X)
-		| BIT(INV_ICM20602_SCAN_ACCL_Y)
-		| BIT(INV_ICM20602_SCAN_ACCL_Z)
-		| BIT(INV_ICM20602_SCAN_GYRO_X)
-		| BIT(INV_ICM20602_SCAN_GYRO_Y)
-		| BIT(INV_ICM20602_SCAN_GYRO_Z)
-		| BIT(INV_ICM20602_SCAN_TEMP),
+	INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+		| INV_MPU6050_SCAN_MASK_TEMP,
 	0,
 };
 
@@ -1241,7 +1220,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
 	irq_type = irqd_get_trigger_type(desc);
 	if (!irq_type)
 		irq_type = IRQF_TRIGGER_RISING;
-	if (irq_type == IRQF_TRIGGER_RISING)
+	if (irq_type & IRQF_TRIGGER_RISING)	// rising or both-edge
 		st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
 	else if (irq_type == IRQF_TRIGGER_FALLING)
 		st->irq_mask = INV_MPU6050_ACTIVE_LOW;
@@ -1324,25 +1303,20 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
 		inv_mpu_bus_setup(indio_dev);
 
 	switch (chip_type) {
+	case INV_MPU9150:
+		indio_dev->channels = inv_mpu9150_channels;
+		indio_dev->num_channels = ARRAY_SIZE(inv_mpu9150_channels);
+		indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
+		break;
 	case INV_MPU9250:
 	case INV_MPU9255:
-		/*
-		 * Use magnetometer inside the chip only if there is no i2c
-		 * auxiliary device in use.
-		 */
-		if (!st->magn_disabled) {
-			indio_dev->channels = inv_mpu9250_channels;
-			indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
-			indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
-		} else {
-			indio_dev->channels = inv_mpu_channels;
-			indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
-			indio_dev->available_scan_masks = inv_mpu_scan_masks;
-		}
+		indio_dev->channels = inv_mpu9250_channels;
+		indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
+		indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
 		break;
 	case INV_ICM20602:
-		indio_dev->channels = inv_icm20602_channels;
-		indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
+		indio_dev->channels = inv_mpu_channels;
+		indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
 		indio_dev->available_scan_masks = inv_icm20602_scan_masks;
 		break;
 	default:
@@ -1351,6 +1325,15 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
 		indio_dev->available_scan_masks = inv_mpu_scan_masks;
 		break;
 	}
+	/*
+	 * Use magnetometer inside the chip only if there is no i2c
+	 * auxiliary device in use. Otherwise Going back to 6-axis only.
+	 */
+	if (st->magn_disabled) {
+		indio_dev->channels = inv_mpu_channels;
+		indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+		indio_dev->available_scan_masks = inv_mpu_scan_masks;
+	}
 
 	indio_dev->info = &mpu_info;
 	indio_dev->modes = INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 389cc85..f47a28b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -77,6 +77,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
 	case INV_ICM20602:
 		/* no i2c auxiliary bus on the chip */
 		return false;
+	case INV_MPU9150:
 	case INV_MPU9250:
 	case INV_MPU9255:
 		if (st->magn_disabled)
@@ -102,6 +103,7 @@ static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
 	struct device_node *mux_node;
 
 	switch (st->chip_type) {
+	case INV_MPU9150:
 	case INV_MPU9250:
 	case INV_MPU9255:
 		mux_node = of_get_child_by_name(dev->of_node, "i2c-gate");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index b096e01..6158fca 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -86,6 +86,7 @@ enum inv_devices {
  *  @accl_fs:		accel full scale range.
  *  @accl_fifo_enable:	enable accel data output
  *  @gyro_fifo_enable:	enable gyro data output
+ *  @temp_fifo_enable:	enable temp data output
  *  @magn_fifo_enable:	enable magn data output
  *  @divider:		chip sample rate divider (sample rate divider - 1)
  */
@@ -95,6 +96,7 @@ struct inv_mpu6050_chip_config {
 	unsigned int accl_fs:2;
 	unsigned int accl_fifo_enable:1;
 	unsigned int gyro_fifo_enable:1;
+	unsigned int temp_fifo_enable:1;
 	unsigned int magn_fifo_enable:1;
 	u8 divider;
 	u8 user_ctrl;
@@ -184,6 +186,7 @@ struct inv_mpu6050_state {
 #define INV_MPU6050_BIT_SLAVE_2             0x04
 #define INV_MPU6050_BIT_ACCEL_OUT           0x08
 #define INV_MPU6050_BITS_GYRO_OUT           0x70
+#define INV_MPU6050_BIT_TEMP_OUT            0x80
 
 #define INV_MPU6050_REG_I2C_MST_CTRL        0x24
 #define INV_MPU6050_BITS_I2C_MST_CLK_400KHZ 0x0D
@@ -268,8 +271,8 @@ struct inv_mpu6050_state {
 /* MPU9X50 9-axis magnetometer */
 #define INV_MPU9X50_BYTES_MAGN               7
 
-/* ICM20602 FIFO samples include temperature readings */
-#define INV_ICM20602_BYTES_PER_TEMP_SENSOR   2
+/* FIFO temperature sample size */
+#define INV_MPU6050_BYTES_PER_TEMP_SENSOR   2
 
 /* mpu6500 registers */
 #define INV_MPU6500_REG_ACCEL_CONFIG_2      0x1D
@@ -298,7 +301,7 @@ struct inv_mpu6050_state {
 #define INV_ICM20608_TEMP_OFFSET	     8170
 #define INV_ICM20608_TEMP_SCALE		     3059976
 
-/* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */
+/* 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8 */
 #define INV_MPU6050_OUTPUT_DATA_SIZE         32
 
 #define INV_MPU6050_REG_INT_PIN_CFG	0x37
@@ -344,6 +347,7 @@ enum inv_mpu6050_scan {
 	INV_MPU6050_SCAN_ACCL_X,
 	INV_MPU6050_SCAN_ACCL_Y,
 	INV_MPU6050_SCAN_ACCL_Z,
+	INV_MPU6050_SCAN_TEMP,
 	INV_MPU6050_SCAN_GYRO_X,
 	INV_MPU6050_SCAN_GYRO_Y,
 	INV_MPU6050_SCAN_GYRO_Z,
@@ -355,18 +359,6 @@ enum inv_mpu6050_scan {
 	INV_MPU9X50_SCAN_TIMESTAMP,
 };
 
-/* scan element definition for ICM20602, which includes temperature */
-enum inv_icm20602_scan {
-	INV_ICM20602_SCAN_ACCL_X,
-	INV_ICM20602_SCAN_ACCL_Y,
-	INV_ICM20602_SCAN_ACCL_Z,
-	INV_ICM20602_SCAN_TEMP,
-	INV_ICM20602_SCAN_GYRO_X,
-	INV_ICM20602_SCAN_GYRO_Y,
-	INV_ICM20602_SCAN_GYRO_Z,
-	INV_ICM20602_SCAN_TIMESTAMP,
-};
-
 enum inv_mpu6050_filter_e {
 	INV_MPU6050_FILTER_256HZ_NOLPF2 = 0,
 	INV_MPU6050_FILTER_188HZ,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
index 02735af..4f19235 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
@@ -12,7 +12,9 @@
 #include "inv_mpu_magn.h"
 
 /*
- * MPU9250 magnetometer is an AKM AK8963 chip on I2C aux bus
+ * MPU9xxx magnetometer are AKM chips on I2C aux bus
+ * MPU9150 is AK8975
+ * MPU9250 is AK8963
  */
 #define INV_MPU_MAGN_I2C_ADDR		0x0C
 
@@ -33,10 +35,10 @@
 #define INV_MPU_MAGN_BITS_MODE_PWDN	0x00
 #define INV_MPU_MAGN_BITS_MODE_SINGLE	0x01
 #define INV_MPU_MAGN_BITS_MODE_FUSE	0x0F
-#define INV_MPU_MAGN_BIT_OUTPUT_BIT	0x10
+#define INV_MPU9250_MAGN_BIT_OUTPUT_BIT	0x10
 
-#define INV_MPU_MAGN_REG_CNTL2		0x0B
-#define INV_MPU_MAGN_BIT_SRST		0x01
+#define INV_MPU9250_MAGN_REG_CNTL2	0x0B
+#define INV_MPU9250_MAGN_BIT_SRST	0x01
 
 #define INV_MPU_MAGN_REG_ASAX		0x10
 #define INV_MPU_MAGN_REG_ASAY		0x11
@@ -48,6 +50,7 @@
 static bool inv_magn_supported(const struct inv_mpu6050_state *st)
 {
 	switch (st->chip_type) {
+	case INV_MPU9150:
 	case INV_MPU9250:
 	case INV_MPU9255:
 		return true;
@@ -61,6 +64,7 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
 {
 	uint8_t val;
 	uint8_t asa[3];
+	int32_t sensitivity;
 	int ret;
 
 	/* check whoami */
@@ -71,12 +75,19 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
 	if (val != INV_MPU_MAGN_BITS_WIA)
 		return -ENODEV;
 
-	/* reset chip */
-	ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
-				INV_MPU_MAGN_REG_CNTL2,
-				INV_MPU_MAGN_BIT_SRST);
-	if (ret)
-		return ret;
+	/* software reset for MPU925x only */
+	switch (st->chip_type) {
+	case INV_MPU9250:
+	case INV_MPU9255:
+		ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+					INV_MPU9250_MAGN_REG_CNTL2,
+					INV_MPU9250_MAGN_BIT_SRST);
+		if (ret)
+			return ret;
+		break;
+	default:
+		break;
+	}
 
 	/* read fuse ROM data */
 	ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
@@ -98,22 +109,36 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
 		return ret;
 
 	/*
+	 * Sensor sentivity
+	 * 1 uT = 0.01 G and value is in micron (1e6)
+	 * sensitvity = x uT * 0.01 * 1e6
+	 */
+	switch (st->chip_type) {
+	case INV_MPU9150:
+		/* sensor sensitivity is 0.3 uT */
+		sensitivity = 3000;
+		break;
+	case INV_MPU9250:
+	case INV_MPU9255:
+		/* sensor sensitivity in 16 bits mode: 0.15 uT */
+		sensitivity = 1500;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
 	 * Sensitivity adjustement and scale to Gauss
 	 *
 	 * Hadj = H * (((ASA - 128) * 0.5 / 128) + 1)
 	 * Factor simplification:
 	 * Hadj = H * ((ASA + 128) / 256)
 	 *
-	 * Sensor sentivity
-	 * 0.15 uT in 16 bits mode
-	 * 1 uT = 0.01 G and value is in micron (1e6)
-	 * sensitvity = 0.15 uT * 0.01 * 1e6
-	 *
-	 * raw_to_gauss = Hadj * 1500
+	 * raw_to_gauss = Hadj * sensitivity
 	 */
-	st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * 1500) / 256;
-	st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * 1500) / 256;
-	st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * 1500) / 256;
+	st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * sensitivity) / 256;
+	st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * sensitivity) / 256;
+	st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * sensitivity) / 256;
 
 	return 0;
 }
@@ -129,6 +154,7 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
  */
 int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
 {
+	uint8_t val;
 	int ret;
 
 	/* quit if chip is not supported */
@@ -179,10 +205,17 @@ int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
 	if (ret)
 		return ret;
 
-	/* add 16 bits mode */
-	ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1),
-			   INV_MPU_MAGN_BITS_MODE_SINGLE |
-			   INV_MPU_MAGN_BIT_OUTPUT_BIT);
+	/* add 16 bits mode for MPU925x */
+	val = INV_MPU_MAGN_BITS_MODE_SINGLE;
+	switch (st->chip_type) {
+	case INV_MPU9250:
+	case INV_MPU9255:
+		val |= INV_MPU9250_MAGN_BIT_OUTPUT_BIT;
+		break;
+	default:
+		break;
+	}
+	ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1), val);
 	if (ret)
 		return ret;
 
@@ -237,6 +270,7 @@ int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
 
 	/* fill magnetometer orientation */
 	switch (st->chip_type) {
+	case INV_MPU9150:
 	case INV_MPU9250:
 	case INV_MPU9255:
 		/* x <- y */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 10d16ec..f9fdf43 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -142,6 +142,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
 		d |= INV_MPU6050_BITS_GYRO_OUT;
 	if (st->chip_config.accl_fifo_enable)
 		d |= INV_MPU6050_BIT_ACCEL_OUT;
+	if (st->chip_config.temp_fifo_enable)
+		d |= INV_MPU6050_BIT_TEMP_OUT;
 	if (st->chip_config.magn_fifo_enable)
 		d |= INV_MPU6050_BIT_SLAVE_0;
 	result = regmap_write(st->map, st->reg->fifo_en, d);
@@ -183,11 +185,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
 			"failed to ack interrupt\n");
 		goto flush_fifo;
 	}
-	if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
-		dev_warn(regmap_get_device(st->map),
-			"spurious interrupt with status 0x%x\n", int_status);
+	if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT))
 		goto end_session;
-	}
 
 	if (!(st->chip_config.accl_fifo_enable |
 		st->chip_config.gyro_fifo_enable |
@@ -200,8 +199,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
 	if (st->chip_config.gyro_fifo_enable)
 		bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
 
-	if (st->chip_type == INV_ICM20602)
-		bytes_per_datum += INV_ICM20602_BYTES_PER_TEMP_SENSOR;
+	if (st->chip_config.temp_fifo_enable)
+		bytes_per_datum += INV_MPU6050_BYTES_PER_TEMP_SENSOR;
 
 	if (st->chip_config.magn_fifo_enable)
 		bytes_per_datum += INV_MPU9X50_BYTES_MAGN;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 142692f..ec102d5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -74,7 +74,6 @@ static int inv_mpu_probe(struct spi_device *spi)
 static const struct spi_device_id inv_mpu_id[] = {
 	{"mpu6000", INV_MPU6000},
 	{"mpu6500", INV_MPU6500},
-	{"mpu9150", INV_MPU9150},
 	{"mpu9250", INV_MPU9250},
 	{"mpu9255", INV_MPU9255},
 	{"icm20608", INV_ICM20608},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index d7d95192..5199fe7 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -24,6 +24,9 @@ static void inv_scan_query_mpu6050(struct iio_dev *indio_dev)
 			 indio_dev->active_scan_mask) ||
 		test_bit(INV_MPU6050_SCAN_ACCL_Z,
 			 indio_dev->active_scan_mask);
+
+	st->chip_config.temp_fifo_enable =
+		test_bit(INV_MPU6050_SCAN_TEMP, indio_dev->active_scan_mask);
 }
 
 static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
@@ -50,6 +53,7 @@ static void inv_scan_query(struct iio_dev *indio_dev)
 	struct inv_mpu6050_state *st = iio_priv(indio_dev);
 
 	switch (st->chip_type) {
+	case INV_MPU9150:
 	case INV_MPU9250:
 	case INV_MPU9255:
 		return inv_scan_query_mpu9x50(indio_dev);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index dc55d7d..9c3486a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -76,6 +76,7 @@ enum st_lsm6dsx_hw_id {
 		.endianness = IIO_LE,					\
 	},								\
 	.event_spec = &st_lsm6dsx_event,				\
+	.ext_info = st_lsm6dsx_accel_ext_info,				\
 	.num_event_specs = 1,						\
 }
 
@@ -176,21 +177,38 @@ struct st_lsm6dsx_hw_ts_settings {
  * @pullup_en: i2c controller pull-up register info (addr + mask).
  * @aux_sens: aux sensor register info (addr + mask).
  * @wr_once: write_once register info (addr + mask).
+ * @emb_func:  embedded function register info (addr + mask).
+ * @num_ext_dev: max number of slave devices.
  * @shub_out: sensor hub first output register info.
  * @slv0_addr: slave0 address in secondary page.
  * @dw_slv0_addr: slave0 write register address in secondary page.
  * @batch_en: Enable/disable FIFO batching.
+ * @pause: controller pause value.
  */
 struct st_lsm6dsx_shub_settings {
 	struct st_lsm6dsx_reg page_mux;
-	struct st_lsm6dsx_reg master_en;
-	struct st_lsm6dsx_reg pullup_en;
+	struct {
+		bool sec_page;
+		u8 addr;
+		u8 mask;
+	} master_en;
+	struct {
+		bool sec_page;
+		u8 addr;
+		u8 mask;
+	} pullup_en;
 	struct st_lsm6dsx_reg aux_sens;
 	struct st_lsm6dsx_reg wr_once;
-	u8 shub_out;
+	struct st_lsm6dsx_reg emb_func;
+	u8 num_ext_dev;
+	struct {
+		bool sec_page;
+		u8 addr;
+	} shub_out;
 	u8 slv0_addr;
 	u8 dw_slv0_addr;
 	u8 batch_en;
+	u8 pause;
 };
 
 struct st_lsm6dsx_event_settings {
@@ -361,6 +379,7 @@ struct st_lsm6dsx_sensor {
  * @enable_event: enabled event bitmask.
  * @iio_devs: Pointers to acc/gyro iio_dev instances.
  * @settings: Pointer to the specific sensor settings in use.
+ * @orientation: sensor chip orientation relative to main hardware.
  */
 struct st_lsm6dsx_hw {
 	struct device *dev;
@@ -387,16 +406,21 @@ struct st_lsm6dsx_hw {
 	struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
 
 	const struct st_lsm6dsx_settings *settings;
+
+	struct iio_mount_matrix orientation;
 };
 
-static const struct iio_event_spec st_lsm6dsx_event = {
+static __maybe_unused const struct iio_event_spec st_lsm6dsx_event = {
 	.type = IIO_EV_TYPE_THRESH,
 	.dir = IIO_EV_DIR_EITHER,
 	.mask_separate = BIT(IIO_EV_INFO_VALUE) |
 			 BIT(IIO_EV_INFO_ENABLE)
 };
 
-static const unsigned long st_lsm6dsx_available_scan_masks[] = {0x7, 0x0};
+static __maybe_unused const unsigned long st_lsm6dsx_available_scan_masks[] = {
+	0x7, 0x0,
+};
+
 extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
 
 int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
@@ -457,4 +481,19 @@ st_lsm6dsx_write_locked(struct st_lsm6dsx_hw *hw, unsigned int addr,
 	return err;
 }
 
+static const inline struct iio_mount_matrix *
+st_lsm6dsx_get_mount_matrix(const struct iio_dev *iio_dev,
+			    const struct iio_chan_spec *chan)
+{
+	struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+	struct st_lsm6dsx_hw *hw = sensor->hw;
+
+	return &hw->orientation;
+}
+
+static const struct iio_chan_spec_ext_info st_lsm6dsx_accel_ext_info[] = {
+	IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, st_lsm6dsx_get_mount_matrix),
+	{ }
+};
+
 #endif /* ST_LSM6DSX_H */
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index cb536b8..bb89934 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -336,12 +336,13 @@ static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 addr,
  */
 int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 {
+	struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor, *ext_sensor = NULL;
+	int err, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
 	u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
 	u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
-	int err, acc_sip, gyro_sip, ts_sip, read_len, offset;
-	struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
 	u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
 	u8 acc_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+	u8 ext_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
 	bool reset_ts = false;
 	__le16 fifo_status;
 	s64 ts = 0;
@@ -364,6 +365,8 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 
 	acc_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
 	gyro_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_GYRO]);
+	if (hw->iio_devs[ST_LSM6DSX_ID_EXT0])
+		ext_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_EXT0]);
 
 	for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
 		err = st_lsm6dsx_read_block(hw, ST_LSM6DSX_REG_FIFO_OUTL_ADDR,
@@ -391,12 +394,13 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 		 * following pattern is repeated every 9 samples:
 		 *   - Gx, Gy, Gz, Ax, Ay, Az, Ts, Gx, Gy, Gz, Ts, Gx, ..
 		 */
+		ext_sip = ext_sensor ? ext_sensor->sip : 0;
 		gyro_sip = gyro_sensor->sip;
 		acc_sip = acc_sensor->sip;
 		ts_sip = hw->ts_sip;
 		offset = 0;
 
-		while (acc_sip > 0 || gyro_sip > 0) {
+		while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) {
 			if (gyro_sip > 0) {
 				memcpy(gyro_buff, &hw->buff[offset],
 				       ST_LSM6DSX_SAMPLE_SIZE);
@@ -407,6 +411,11 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 				       ST_LSM6DSX_SAMPLE_SIZE);
 				offset += ST_LSM6DSX_SAMPLE_SIZE;
 			}
+			if (ext_sip > 0) {
+				memcpy(ext_buff, &hw->buff[offset],
+				       ST_LSM6DSX_SAMPLE_SIZE);
+				offset += ST_LSM6DSX_SAMPLE_SIZE;
+			}
 
 			if (ts_sip-- > 0) {
 				u8 data[ST_LSM6DSX_SAMPLE_SIZE];
@@ -440,6 +449,10 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 				iio_push_to_buffers_with_timestamp(
 					hw->iio_devs[ST_LSM6DSX_ID_ACC],
 					acc_buff, acc_sensor->ts_ref + ts);
+			if (ext_sip-- > 0)
+				iio_push_to_buffers_with_timestamp(
+					hw->iio_devs[ST_LSM6DSX_ID_EXT0],
+					ext_buff, ext_sensor->ts_ref + ts);
 		}
 	}
 
@@ -638,12 +651,12 @@ int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable)
 		err = st_lsm6dsx_sensor_set_enable(sensor, enable);
 		if (err < 0)
 			goto out;
-
-		err = st_lsm6dsx_set_fifo_odr(sensor, enable);
-		if (err < 0)
-			goto out;
 	}
 
+	err = st_lsm6dsx_set_fifo_odr(sensor, enable);
+	if (err < 0)
+		goto out;
+
 	err = st_lsm6dsx_update_decimators(hw);
 	if (err < 0)
 		goto out;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b921dd9..84d219a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -54,6 +54,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/pm.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/bitfield.h>
 
@@ -655,6 +656,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 				.addr = 0x08,
 				.mask = GENMASK(5, 3),
 			},
+			[ST_LSM6DSX_ID_EXT0] = {
+				.addr = 0x09,
+				.mask = GENMASK(2, 0),
+			},
 		},
 		.fifo_ops = {
 			.update_fifo = st_lsm6dsx_update_fifo,
@@ -687,6 +692,39 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 				.mask = GENMASK(5, 3),
 			},
 		},
+		.shub_settings = {
+			.page_mux = {
+				.addr = 0x01,
+				.mask = BIT(7),
+			},
+			.master_en = {
+				.addr = 0x1a,
+				.mask = BIT(0),
+			},
+			.pullup_en = {
+				.addr = 0x1a,
+				.mask = BIT(3),
+			},
+			.aux_sens = {
+				.addr = 0x04,
+				.mask = GENMASK(5, 4),
+			},
+			.wr_once = {
+				.addr = 0x07,
+				.mask = BIT(5),
+			},
+			.emb_func = {
+				.addr = 0x19,
+				.mask = BIT(2),
+			},
+			.num_ext_dev = 1,
+			.shub_out = {
+				.addr = 0x2e,
+			},
+			.slv0_addr = 0x02,
+			.dw_slv0_addr = 0x0e,
+			.pause = 0x7,
+		},
 		.event_settings = {
 			.enable_reg = {
 				.addr = 0x58,
@@ -867,10 +905,12 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 				.mask = BIT(6),
 			},
 			.master_en = {
+				.sec_page = true,
 				.addr = 0x14,
 				.mask = BIT(2),
 			},
 			.pullup_en = {
+				.sec_page = true,
 				.addr = 0x14,
 				.mask = BIT(3),
 			},
@@ -882,7 +922,11 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 				.addr = 0x14,
 				.mask = BIT(6),
 			},
-			.shub_out = 0x02,
+			.num_ext_dev = 3,
+			.shub_out = {
+				.sec_page = true,
+				.addr = 0x02,
+			},
 			.slv0_addr = 0x15,
 			.dw_slv0_addr = 0x21,
 			.batch_en = BIT(3),
@@ -1241,10 +1285,12 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 				.mask = BIT(6),
 			},
 			.master_en = {
+				.sec_page = true,
 				.addr = 0x14,
 				.mask = BIT(2),
 			},
 			.pullup_en = {
+				.sec_page = true,
 				.addr = 0x14,
 				.mask = BIT(3),
 			},
@@ -1256,7 +1302,11 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 				.addr = 0x14,
 				.mask = BIT(6),
 			},
-			.shub_out = 0x02,
+			.num_ext_dev = 3,
+			.shub_out = {
+				.sec_page = true,
+				.addr = 0x02,
+			},
 			.slv0_addr = 0x15,
 			.dw_slv0_addr = 0x21,
 			.batch_en = BIT(3),
@@ -1506,8 +1556,11 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
 	if (err < 0)
 		return err;
 
-	if (!hw->enable_event)
-		st_lsm6dsx_sensor_set_enable(sensor, false);
+	if (!hw->enable_event) {
+		err = st_lsm6dsx_sensor_set_enable(sensor, false);
+		if (err < 0)
+			return err;
+	}
 
 	*val = (s16)le16_to_cpu(data);
 
@@ -1609,11 +1662,11 @@ static int st_lsm6dsx_event_setup(struct st_lsm6dsx_hw *hw, int state)
 }
 
 static int st_lsm6dsx_read_event(struct iio_dev *iio_dev,
-				   const struct iio_chan_spec *chan,
-				   enum iio_event_type type,
-				   enum iio_event_direction dir,
-				   enum iio_event_info info,
-				   int *val, int *val2)
+				 const struct iio_chan_spec *chan,
+				 enum iio_event_type type,
+				 enum iio_event_direction dir,
+				 enum iio_event_info info,
+				 int *val, int *val2)
 {
 	struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
 	struct st_lsm6dsx_hw *hw = sensor->hw;
@@ -1827,14 +1880,14 @@ static const struct iio_info st_lsm6dsx_gyro_info = {
 	.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
 };
 
-static int st_lsm6dsx_of_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
+static int st_lsm6dsx_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
 {
-	struct device_node *np = hw->dev->of_node;
+	struct device *dev = hw->dev;
 
-	if (!np)
+	if (!dev_fwnode(dev))
 		return -EINVAL;
 
-	return of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
+	return device_property_read_u32(dev, "st,drdy-int-pin", drdy_pin);
 }
 
 static int
@@ -1843,7 +1896,7 @@ st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
 {
 	int err = 0, drdy_pin;
 
-	if (st_lsm6dsx_of_get_drdy_pin(hw, &drdy_pin) < 0) {
+	if (st_lsm6dsx_get_drdy_pin(hw, &drdy_pin) < 0) {
 		struct st_sensors_platform_data *pdata;
 		struct device *dev = hw->dev;
 
@@ -1872,26 +1925,29 @@ st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
 static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw)
 {
 	const struct st_lsm6dsx_shub_settings *hub_settings;
-	struct device_node *np = hw->dev->of_node;
 	struct st_sensors_platform_data *pdata;
+	struct device *dev = hw->dev;
 	unsigned int data;
 	int err = 0;
 
 	hub_settings = &hw->settings->shub_settings;
 
-	pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
-	if ((np && of_property_read_bool(np, "st,pullups")) ||
+	pdata = (struct st_sensors_platform_data *)dev->platform_data;
+	if ((dev_fwnode(dev) && device_property_read_bool(dev, "st,pullups")) ||
 	    (pdata && pdata->pullups)) {
-		err = st_lsm6dsx_set_page(hw, true);
-		if (err < 0)
-			return err;
+		if (hub_settings->pullup_en.sec_page) {
+			err = st_lsm6dsx_set_page(hw, true);
+			if (err < 0)
+				return err;
+		}
 
 		data = ST_LSM6DSX_SHIFT_VAL(1, hub_settings->pullup_en.mask);
 		err = regmap_update_bits(hw->regmap,
 					 hub_settings->pullup_en.addr,
 					 hub_settings->pullup_en.mask, data);
 
-		st_lsm6dsx_set_page(hw, false);
+		if (hub_settings->pullup_en.sec_page)
+			st_lsm6dsx_set_page(hw, false);
 
 		if (err < 0)
 			return err;
@@ -1909,6 +1965,16 @@ static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw)
 					 hub_settings->aux_sens.mask, data);
 
 		st_lsm6dsx_set_page(hw, false);
+
+		if (err < 0)
+			return err;
+	}
+
+	if (hub_settings->emb_func.addr) {
+		data = ST_LSM6DSX_SHIFT_VAL(1, hub_settings->emb_func.mask);
+		err = regmap_update_bits(hw->regmap,
+					 hub_settings->emb_func.addr,
+					 hub_settings->emb_func.mask, data);
 	}
 
 	return err;
@@ -2158,9 +2224,9 @@ static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
 
 static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
 {
-	struct device_node *np = hw->dev->of_node;
 	struct st_sensors_platform_data *pdata;
 	const struct st_lsm6dsx_reg *reg;
+	struct device *dev = hw->dev;
 	unsigned long irq_type;
 	bool irq_active_low;
 	int err;
@@ -2188,8 +2254,8 @@ static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
 	if (err < 0)
 		return err;
 
-	pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
-	if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+	pdata = (struct st_sensors_platform_data *)dev->platform_data;
+	if ((dev_fwnode(dev) && device_property_read_bool(dev, "drive-open-drain")) ||
 	    (pdata && pdata->open_drain)) {
 		reg = &hw->settings->irq_config.od;
 		err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
@@ -2219,7 +2285,6 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
 {
 	struct st_sensors_platform_data *pdata = dev->platform_data;
 	const struct st_lsm6dsx_shub_settings *hub_settings;
-	struct device_node *np = dev->of_node;
 	struct st_lsm6dsx_hw *hw;
 	const char *name = NULL;
 	int i, err;
@@ -2273,6 +2338,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
 			return err;
 	}
 
+	err = iio_read_mount_matrix(hw->dev, "mount-matrix", &hw->orientation);
+	if (err)
+		return err;
+
 	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
 		if (!hw->iio_devs[i])
 			continue;
@@ -2282,7 +2351,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
 			return err;
 	}
 
-	if ((np && of_property_read_bool(np, "wakeup-source")) ||
+	if ((dev_fwnode(dev) && device_property_read_bool(dev, "wakeup-source")) ||
 	    (pdata && pdata->wakeup_source))
 		device_init_wakeup(dev, true);
 
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index cd47ec1..0fb3213 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
-#include <linux/of.h>
 #include <linux/regmap.h>
 
 #include "st_lsm6dsx.h"
@@ -122,7 +121,7 @@ static struct i2c_driver st_lsm6dsx_driver = {
 	.driver = {
 		.name = "st_lsm6dsx_i2c",
 		.pm = &st_lsm6dsx_pm_ops,
-		.of_match_table = of_match_ptr(st_lsm6dsx_i2c_of_match),
+		.of_match_table = st_lsm6dsx_i2c_of_match,
 	},
 	.probe = st_lsm6dsx_i2c_probe,
 	.id_table = st_lsm6dsx_i2c_id_table,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index fa5d100..eea5556 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -30,7 +30,6 @@
 
 #include "st_lsm6dsx.h"
 
-#define ST_LSM6DSX_MAX_SLV_NUM			3
 #define ST_LSM6DSX_SLV_ADDR(n, base)		((base) + (n) * 3)
 #define ST_LSM6DSX_SLV_SUB_ADDR(n, base)	((base) + 1 + (n) * 3)
 #define ST_LSM6DSX_SLV_CONFIG(n, base)		((base) + 2 + (n) * 3)
@@ -102,24 +101,31 @@ static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
 }
 
 /**
- * st_lsm6dsx_shub_read_reg - read i2c controller register
+ * st_lsm6dsx_shub_read_output - read i2c controller register
  *
  * Read st_lsm6dsx i2c controller register
  */
-static int st_lsm6dsx_shub_read_reg(struct st_lsm6dsx_hw *hw, u8 addr,
-				    u8 *data, int len)
+static int
+st_lsm6dsx_shub_read_output(struct st_lsm6dsx_hw *hw, u8 *data,
+			    int len)
 {
+	const struct st_lsm6dsx_shub_settings *hub_settings;
 	int err;
 
 	mutex_lock(&hw->page_lock);
 
-	err = st_lsm6dsx_set_page(hw, true);
-	if (err < 0)
-		goto out;
+	hub_settings = &hw->settings->shub_settings;
+	if (hub_settings->shub_out.sec_page) {
+		err = st_lsm6dsx_set_page(hw, true);
+		if (err < 0)
+			goto out;
+	}
 
-	err = regmap_bulk_read(hw->regmap, addr, data, len);
+	err = regmap_bulk_read(hw->regmap, hub_settings->shub_out.addr,
+			       data, len);
 
-	st_lsm6dsx_set_page(hw, false);
+	if (hub_settings->shub_out.sec_page)
+		st_lsm6dsx_set_page(hw, false);
 out:
 	mutex_unlock(&hw->page_lock);
 
@@ -186,15 +192,18 @@ static int st_lsm6dsx_shub_master_enable(struct st_lsm6dsx_sensor *sensor,
 	mutex_lock(&hw->page_lock);
 
 	hub_settings = &hw->settings->shub_settings;
-	err = st_lsm6dsx_set_page(hw, true);
-	if (err < 0)
-		goto out;
+	if (hub_settings->master_en.sec_page) {
+		err = st_lsm6dsx_set_page(hw, true);
+		if (err < 0)
+			goto out;
+	}
 
 	data = ST_LSM6DSX_SHIFT_VAL(enable, hub_settings->master_en.mask);
 	err = regmap_update_bits(hw->regmap, hub_settings->master_en.addr,
 				 hub_settings->master_en.mask, data);
 
-	st_lsm6dsx_set_page(hw, false);
+	if (hub_settings->master_en.sec_page)
+		st_lsm6dsx_set_page(hw, false);
 out:
 	mutex_unlock(&hw->page_lock);
 
@@ -212,16 +221,21 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_sensor *sensor, u8 addr,
 		     u8 *data, int len)
 {
 	const struct st_lsm6dsx_shub_settings *hub_settings;
+	u8 config[3], slv_addr, slv_config = 0;
 	struct st_lsm6dsx_hw *hw = sensor->hw;
-	u8 config[3], slv_addr;
+	const struct st_lsm6dsx_reg *aux_sens;
 	int err;
 
 	hub_settings = &hw->settings->shub_settings;
 	slv_addr = ST_LSM6DSX_SLV_ADDR(0, hub_settings->slv0_addr);
+	aux_sens = &hw->settings->shub_settings.aux_sens;
+	/* do not overwrite aux_sens */
+	if (slv_addr + 2 == aux_sens->addr)
+		slv_config = ST_LSM6DSX_SHIFT_VAL(3, aux_sens->mask);
 
 	config[0] = (sensor->ext_info.addr << 1) | 1;
 	config[1] = addr;
-	config[2] = len & ST_LS6DSX_READ_OP_MASK;
+	config[2] = (len & ST_LS6DSX_READ_OP_MASK) | slv_config;
 
 	err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
 					sizeof(config));
@@ -234,12 +248,14 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_sensor *sensor, u8 addr,
 
 	st_lsm6dsx_shub_wait_complete(hw);
 
-	err = st_lsm6dsx_shub_read_reg(hw, hub_settings->shub_out, data,
-				       len & ST_LS6DSX_READ_OP_MASK);
+	err = st_lsm6dsx_shub_read_output(hw, data,
+					  len & ST_LS6DSX_READ_OP_MASK);
 
 	st_lsm6dsx_shub_master_enable(sensor, false);
 
-	memset(config, 0, sizeof(config));
+	config[0] = hub_settings->pause;
+	config[1] = 0;
+	config[2] = slv_config;
 	return st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
 					 sizeof(config));
 }
@@ -296,7 +312,8 @@ st_lsm6dsx_shub_write(struct st_lsm6dsx_sensor *sensor, u8 addr,
 		st_lsm6dsx_shub_master_enable(sensor, false);
 	}
 
-	memset(config, 0, sizeof(config));
+	config[0] = hub_settings->pause;
+	config[1] = 0;
 	return st_lsm6dsx_shub_write_reg(hw, slv_addr, config, sizeof(config));
 }
 
@@ -688,14 +705,19 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
 			  const struct st_lsm6dsx_ext_dev_settings *settings)
 {
 	const struct st_lsm6dsx_shub_settings *hub_settings;
+	u8 config[3], data, slv_addr, slv_config = 0;
+	const struct st_lsm6dsx_reg *aux_sens;
 	struct st_lsm6dsx_sensor *sensor;
-	u8 config[3], data, slv_addr;
 	bool found = false;
 	int i, err;
 
-	hub_settings = &hw->settings->shub_settings;
-	slv_addr = ST_LSM6DSX_SLV_ADDR(0, hub_settings->slv0_addr);
 	sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+	hub_settings = &hw->settings->shub_settings;
+	aux_sens = &hw->settings->shub_settings.aux_sens;
+	slv_addr = ST_LSM6DSX_SLV_ADDR(0, hub_settings->slv0_addr);
+	/* do not overwrite aux_sens */
+	if (slv_addr + 2 == aux_sens->addr)
+		slv_config = ST_LSM6DSX_SHIFT_VAL(3, aux_sens->mask);
 
 	for (i = 0; i < ARRAY_SIZE(settings->i2c_addr); i++) {
 		if (!settings->i2c_addr[i])
@@ -704,7 +726,7 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
 		/* read wai slave register */
 		config[0] = (settings->i2c_addr[i] << 1) | 0x1;
 		config[1] = settings->wai.addr;
-		config[2] = 0x1;
+		config[2] = 0x1 | slv_config;
 
 		err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
 						sizeof(config));
@@ -717,9 +739,7 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
 
 		st_lsm6dsx_shub_wait_complete(hw);
 
-		err = st_lsm6dsx_shub_read_reg(hw,
-					       hub_settings->shub_out,
-					       &data, sizeof(data));
+		err = st_lsm6dsx_shub_read_output(hw, &data, sizeof(data));
 
 		st_lsm6dsx_shub_master_enable(sensor, false);
 
@@ -735,7 +755,9 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
 	}
 
 	/* reset SLV0 channel */
-	memset(config, 0, sizeof(config));
+	config[0] = hub_settings->pause;
+	config[1] = 0;
+	config[2] = slv_config;
 	err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
 					sizeof(config));
 	if (err < 0)
@@ -770,7 +792,7 @@ int st_lsm6dsx_shub_probe(struct st_lsm6dsx_hw *hw, const char *name)
 		if (err < 0)
 			return err;
 
-		if (++num_ext_dev >= ST_LSM6DSX_MAX_SLV_NUM)
+		if (++num_ext_dev >= hw->settings->shub_settings.num_ext_dev)
 			break;
 		id++;
 	}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 67ff36e..eb1086e 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 #include <linux/spi/spi.h>
 #include <linux/slab.h>
-#include <linux/of.h>
 #include <linux/regmap.h>
 
 #include "st_lsm6dsx.h"
@@ -122,7 +121,7 @@ static struct spi_driver st_lsm6dsx_driver = {
 	.driver = {
 		.name = "st_lsm6dsx_spi",
 		.pm = &st_lsm6dsx_pm_ops,
-		.of_match_table = of_match_ptr(st_lsm6dsx_spi_of_match),
+		.of_match_table = st_lsm6dsx_spi_of_match,
 	},
 	.probe = st_lsm6dsx_spi_probe,
 	.id_table = st_lsm6dsx_spi_id_table,
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 112225c..4ada559 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -87,7 +87,7 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
 }
 
 /**
- * iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ * iio_buffer_read_outer() - chrdev read for buffer access
  * @filp:	File structure pointer for the char device
  * @buf:	Destination buffer for iio buffer read
  * @n:		First n bytes to read
@@ -99,8 +99,8 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
  * Return: negative values corresponding to error codes or ret != 0
  *	   for ending the reading activity
  **/
-ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
-				      size_t n, loff_t *f_ps)
+ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
+			      size_t n, loff_t *f_ps)
 {
 	struct iio_dev *indio_dev = filp->private_data;
 	struct iio_buffer *rb = indio_dev->buffer;
@@ -112,7 +112,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 	if (!indio_dev->info)
 		return -ENODEV;
 
-	if (!rb || !rb->access->read_first_n)
+	if (!rb || !rb->access->read)
 		return -EINVAL;
 
 	datum_size = rb->bytes_per_datum;
@@ -147,7 +147,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 			continue;
 		}
 
-		ret = rb->access->read_first_n(rb, n, buf);
+		ret = rb->access->read(rb, n, buf);
 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
 			ret = -EAGAIN;
 	} while (ret == 0);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index a46cdf2..65ff0d0 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -161,6 +161,7 @@ static const char * const iio_chan_info_postfix[] = {
 	[IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
 	[IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
 	[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
+	[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
 };
 
 /**
@@ -596,6 +597,8 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
 		}
 		return l;
 	}
+	case IIO_VAL_CHAR:
+		return snprintf(buf, len, "%c", (char)vals[0]);
 	default:
 		return 0;
 	}
@@ -837,7 +840,8 @@ static ssize_t iio_write_channel_info(struct device *dev,
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 	int ret, fract_mult = 100000;
-	int integer, fract;
+	int integer, fract = 0;
+	bool is_char = false;
 
 	/* Assumes decimal - precision based on number of digits */
 	if (!indio_dev->info->write_raw)
@@ -855,13 +859,24 @@ static ssize_t iio_write_channel_info(struct device *dev,
 		case IIO_VAL_INT_PLUS_NANO:
 			fract_mult = 100000000;
 			break;
+		case IIO_VAL_CHAR:
+			is_char = true;
+			break;
 		default:
 			return -EINVAL;
 		}
 
-	ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
-	if (ret)
-		return ret;
+	if (is_char) {
+		char ch;
+
+		if (sscanf(buf, "%c", &ch) != 1)
+			return -EINVAL;
+		integer = ch;
+	} else {
+		ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
+		if (ret)
+			return ret;
+	}
 
 	ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
 					 integer, fract, this_attr->address);
@@ -1617,7 +1632,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 }
 
 static const struct file_operations iio_buffer_fileops = {
-	.read = iio_buffer_read_first_n_outer_addr,
+	.read = iio_buffer_read_outer_addr,
 	.release = iio_chrdev_release,
 	.open = iio_chrdev_open,
 	.poll = iio_buffer_poll_addr,
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index c5dfb9a..52f86bc 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -15,7 +15,6 @@
 #include <linux/mutex.h>
 #include <linux/err.h>
 #include <linux/irq.h>
-#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
@@ -24,7 +23,6 @@
 #include <linux/iio/events.h>
 #include <linux/iio/kfifo_buf.h>
 #include <linux/iio/sysfs.h>
-#include <linux/of_gpio.h>
 
 #define APDS9960_REGMAP_NAME	"apds9960_regmap"
 #define APDS9960_DRV_NAME	"apds9960"
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index d85a391..7a838e2 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -14,7 +14,6 @@
 #include <linux/iio/triggered_buffer.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index 6733b52..bc196c2 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -742,7 +742,7 @@ static int lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
 	if (val < LM3533_ALS_RESISTOR_MIN || val > LM3533_ALS_RESISTOR_MAX) {
 		dev_err(&als->pdev->dev, "invalid resistor value\n");
 		return -EINVAL;
-	};
+	}
 
 	ret = lm3533_write(als->lm3533, LM3533_REG_ALS_RESISTOR_SELECT, val);
 	if (ret) {
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 982bba0..0476c2b 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -17,7 +17,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/irq.h>
-#include <linux/gpio.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index dacbac6..4889bbe 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -9,7 +9,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/acpi.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/regmap.h>
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 893bec5..3c88154 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -16,8 +16,7 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/bitops.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/acpi.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
@@ -29,8 +28,6 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 
-#include <linux/iio/magnetometer/ak8975.h>
-
 /*
  * Register definitions, as well as various shifts and masks to get at the
  * individual fields of the registers.
@@ -206,11 +203,11 @@ static long ak09912_raw_to_gauss(u16 data)
 
 /* Compatible Asahi Kasei Compass parts */
 enum asahi_compass_chipset {
+	AKXXXX		= 0,
 	AK8975,
 	AK8963,
 	AK09911,
 	AK09912,
-	AK_MAX_TYPE
 };
 
 enum ak_ctrl_reg_addr {
@@ -248,7 +245,7 @@ struct ak_def {
 	u8 data_regs[3];
 };
 
-static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
+static const struct ak_def ak_def_array[] = {
 	{
 		.type = AK8975,
 		.raw_to_gauss = ak8975_raw_to_gauss,
@@ -360,7 +357,7 @@ struct ak8975_data {
 	struct mutex		lock;
 	u8			asa[3];
 	long			raw_to_gauss[3];
-	int			eoc_gpio;
+	struct gpio_desc	*eoc_gpiod;
 	int			eoc_irq;
 	wait_queue_head_t	data_ready_queue;
 	unsigned long		flags;
@@ -498,15 +495,13 @@ static int ak8975_setup_irq(struct ak8975_data *data)
 	if (client->irq)
 		irq = client->irq;
 	else
-		irq = gpio_to_irq(data->eoc_gpio);
+		irq = gpiod_to_irq(data->eoc_gpiod);
 
 	rc = devm_request_irq(&client->dev, irq, ak8975_irq_handler,
 			      IRQF_TRIGGER_RISING | IRQF_ONESHOT,
 			      dev_name(&client->dev), data);
 	if (rc < 0) {
-		dev_err(&client->dev,
-			"irq %d request failed, (gpio %d): %d\n",
-			irq, data->eoc_gpio, rc);
+		dev_err(&client->dev, "irq %d request failed: %d\n", irq, rc);
 		return rc;
 	}
 
@@ -549,7 +544,7 @@ static int ak8975_setup(struct i2c_client *client)
 		return ret;
 	}
 
-	if (data->eoc_gpio > 0 || client->irq > 0) {
+	if (data->eoc_gpiod || client->irq > 0) {
 		ret = ak8975_setup_irq(data);
 		if (ret < 0) {
 			dev_err(&client->dev,
@@ -574,7 +569,7 @@ static int wait_conversion_complete_gpio(struct ak8975_data *data)
 	/* Wait for the conversion to complete. */
 	while (timeout_ms) {
 		msleep(AK8975_CONVERSION_DONE_POLL_TIME);
-		if (gpio_get_value(data->eoc_gpio))
+		if (gpiod_get_value(data->eoc_gpiod))
 			break;
 		timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME;
 	}
@@ -646,7 +641,7 @@ static int ak8975_start_read_axis(struct ak8975_data *data,
 	/* Wait for the conversion to complete. */
 	if (data->eoc_irq)
 		ret = wait_conversion_complete_interrupt(data);
-	else if (gpio_is_valid(data->eoc_gpio))
+	else if (data->eoc_gpiod)
 		ret = wait_conversion_complete_gpio(data);
 	else
 		ret = wait_conversion_complete_polled(data);
@@ -786,19 +781,6 @@ static const struct acpi_device_id ak_acpi_match[] = {
 MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
 #endif
 
-static const char *ak8975_match_acpi_device(struct device *dev,
-					    enum asahi_compass_chipset *chipset)
-{
-	const struct acpi_device_id *id;
-
-	id = acpi_match_device(dev->driver->acpi_match_table, dev);
-	if (!id)
-		return NULL;
-	*chipset = (int)id->driver_data;
-
-	return dev_name(dev);
-}
-
 static void ak8975_fill_buffer(struct iio_dev *indio_dev)
 {
 	struct ak8975_data *data = iio_priv(indio_dev);
@@ -856,36 +838,23 @@ static int ak8975_probe(struct i2c_client *client,
 {
 	struct ak8975_data *data;
 	struct iio_dev *indio_dev;
-	int eoc_gpio;
+	struct gpio_desc *eoc_gpiod;
+	const void *match;
+	unsigned int i;
 	int err;
+	enum asahi_compass_chipset chipset;
 	const char *name = NULL;
-	enum asahi_compass_chipset chipset = AK_MAX_TYPE;
-	const struct ak8975_platform_data *pdata =
-		dev_get_platdata(&client->dev);
 
-	/* Grab and set up the supplied GPIO. */
-	if (pdata)
-		eoc_gpio = pdata->eoc_gpio;
-	else if (client->dev.of_node)
-		eoc_gpio = of_get_gpio(client->dev.of_node, 0);
-	else
-		eoc_gpio = -1;
-
-	if (eoc_gpio == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
-
-	/* We may not have a GPIO based IRQ to scan, that is fine, we will
-	   poll if so */
-	if (gpio_is_valid(eoc_gpio)) {
-		err = devm_gpio_request_one(&client->dev, eoc_gpio,
-							GPIOF_IN, "ak_8975");
-		if (err < 0) {
-			dev_err(&client->dev,
-				"failed to request GPIO %d, error %d\n",
-							eoc_gpio, err);
-			return err;
-		}
-	}
+	/*
+	 * Grab and set up the supplied GPIO.
+	 * We may not have a GPIO based IRQ to scan, that is fine, we will
+	 * poll if so.
+	 */
+	eoc_gpiod = devm_gpiod_get_optional(&client->dev, NULL, GPIOD_IN);
+	if (IS_ERR(eoc_gpiod))
+		return PTR_ERR(eoc_gpiod);
+	if (eoc_gpiod)
+		gpiod_set_consumer_name(eoc_gpiod, "ak_8975");
 
 	/* Register with IIO */
 	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
@@ -896,35 +865,35 @@ static int ak8975_probe(struct i2c_client *client,
 	i2c_set_clientdata(client, indio_dev);
 
 	data->client = client;
-	data->eoc_gpio = eoc_gpio;
+	data->eoc_gpiod = eoc_gpiod;
 	data->eoc_irq = 0;
 
-	if (!pdata) {
-		err = iio_read_mount_matrix(&client->dev, "mount-matrix",
-					    &data->orientation);
-		if (err)
-			return err;
-	} else
-		data->orientation = pdata->orientation;
+	err = iio_read_mount_matrix(&client->dev, "mount-matrix", &data->orientation);
+	if (err)
+		return err;
 
 	/* id will be NULL when enumerated via ACPI */
-	if (id) {
+	match = device_get_match_data(&client->dev);
+	if (match) {
+		chipset = (enum asahi_compass_chipset)(match);
+		name = dev_name(&client->dev);
+	} else if (id) {
 		chipset = (enum asahi_compass_chipset)(id->driver_data);
 		name = id->name;
-	} else if (ACPI_HANDLE(&client->dev)) {
-		name = ak8975_match_acpi_device(&client->dev, &chipset);
-		if (!name)
-			return -ENODEV;
 	} else
 		return -ENOSYS;
 
-	if (chipset >= AK_MAX_TYPE) {
+	for (i = 0; i < ARRAY_SIZE(ak_def_array); i++)
+		if (ak_def_array[i].type == chipset)
+			break;
+
+	if (i == ARRAY_SIZE(ak_def_array)) {
 		dev_err(&client->dev, "AKM device type unsupported: %d\n",
 			chipset);
 		return -ENODEV;
 	}
 
-	data->def = &ak_def_array[chipset];
+	data->def = &ak_def_array[i];
 
 	/* Fetch the regulators */
 	data->vdd = devm_regulator_get(&client->dev, "vdd");
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index fdba480..c6bb4ce 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -17,7 +17,6 @@
 #include <linux/iio/common/st_sensors_i2c.h>
 #include "st_magn.h"
 
-#ifdef CONFIG_OF
 static const struct of_device_id st_magn_of_match[] = {
 	{
 		.compatible = "st,lsm303dlh-magn",
@@ -50,9 +49,6 @@ static const struct of_device_id st_magn_of_match[] = {
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_magn_of_match);
-#else
-#define st_magn_of_match NULL
-#endif
 
 static int st_magn_i2c_probe(struct i2c_client *client,
 			     const struct i2c_device_id *id)
@@ -62,8 +58,7 @@ static int st_magn_i2c_probe(struct i2c_client *client,
 	struct iio_dev *indio_dev;
 	int err;
 
-	st_sensors_of_name_probe(&client->dev, st_magn_of_match,
-				 client->name, sizeof(client->name));
+	st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
 
 	settings = st_magn_get_settings(client->name);
 	if (!settings) {
@@ -113,7 +108,7 @@ MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
 static struct i2c_driver st_magn_driver = {
 	.driver = {
 		.name = "st-magn-i2c",
-		.of_match_table = of_match_ptr(st_magn_of_match),
+		.of_match_table = st_magn_of_match,
 	},
 	.probe = st_magn_i2c_probe,
 	.remove = st_magn_i2c_remove,
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index fbf909b..3d08d74 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -17,7 +17,6 @@
 #include <linux/iio/common/st_sensors_spi.h>
 #include "st_magn.h"
 
-#ifdef CONFIG_OF
 /*
  * For new single-chip sensors use <device_name> as compatible string.
  * For old single-chip devices keep <device_name>-magn to maintain
@@ -45,9 +44,6 @@ static const struct of_device_id st_magn_of_match[] = {
 	{}
 };
 MODULE_DEVICE_TABLE(of, st_magn_of_match);
-#else
-#define st_magn_of_match	NULL
-#endif
 
 static int st_magn_spi_probe(struct spi_device *spi)
 {
@@ -56,8 +52,7 @@ static int st_magn_spi_probe(struct spi_device *spi)
 	struct iio_dev *indio_dev;
 	int err;
 
-	st_sensors_of_name_probe(&spi->dev, st_magn_of_match,
-				 spi->modalias, sizeof(spi->modalias));
+	st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
 
 	settings = st_magn_get_settings(spi->modalias);
 	if (!settings) {
@@ -104,7 +99,7 @@ MODULE_DEVICE_TABLE(spi, st_magn_id_table);
 static struct spi_driver st_magn_driver = {
 	.driver = {
 		.name = "st-magn-spi",
-		.of_match_table = of_match_ptr(st_magn_of_match),
+		.of_match_table = st_magn_of_match,
 	},
 	.probe = st_magn_spi_probe,
 	.remove = st_magn_spi_remove,
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index ba420e4..9c2d9bf 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -53,6 +53,18 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called cros_ec_baro.
 
+config DLHL60D
+	tristate "All Sensors DLHL60D and DLHL60G low voltage digital pressure sensors"
+	depends on I2C
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Say yes here to build support for the All Sensors DLH series
+	  pressure sensors driver.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called dlhl60d.
+
 config DPS310
 	tristate "Infineon DPS310 pressure and temperature sensor"
 	depends on I2C
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index d8f5ace..5a79192 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -9,6 +9,7 @@
 bmp280-objs := bmp280-core.o bmp280-regmap.o
 obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
 obj-$(CONFIG_BMP280_SPI) += bmp280-spi.o
+obj-$(CONFIG_DLHL60D) += dlhl60d.o
 obj-$(CONFIG_DPS310) += dps310.o
 obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o
 obj-$(CONFIG_HID_SENSOR_PRESS)   += hid-sensor-press.o
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 3109c8e..8b03ea1 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -1,8 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 #include <linux/module.h>
 #include <linux/i2c.h>
-#include <linux/acpi.h>
-#include <linux/of.h>
 #include <linux/regmap.h>
 
 #include "bmp280.h"
@@ -38,16 +36,6 @@ static int bmp280_i2c_probe(struct i2c_client *client,
 				   client->irq);
 }
 
-static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
-	{"BMP0280", BMP280_CHIP_ID },
-	{"BMP0180", BMP180_CHIP_ID },
-	{"BMP0085", BMP180_CHIP_ID },
-	{"BME0280", BME280_CHIP_ID },
-	{ },
-};
-MODULE_DEVICE_TABLE(acpi, bmp280_acpi_i2c_match);
-
-#ifdef CONFIG_OF
 static const struct of_device_id bmp280_of_i2c_match[] = {
 	{ .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
 	{ .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
@@ -56,9 +44,6 @@ static const struct of_device_id bmp280_of_i2c_match[] = {
 	{ },
 };
 MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
-#else
-#define bmp280_of_i2c_match NULL
-#endif
 
 static const struct i2c_device_id bmp280_i2c_id[] = {
 	{"bmp280", BMP280_CHIP_ID },
@@ -72,8 +57,7 @@ MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
 static struct i2c_driver bmp280_i2c_driver = {
 	.driver = {
 		.name	= "bmp280",
-		.acpi_match_table = ACPI_PTR(bmp280_acpi_i2c_match),
-		.of_match_table = of_match_ptr(bmp280_of_i2c_match),
+		.of_match_table = bmp280_of_i2c_match,
 		.pm = &bmp280_dev_pm_ops,
 	},
 	.probe		= bmp280_i2c_probe,
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 52f53f3..b521beb 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -14,7 +14,6 @@
 #include <linux/iio/triggered_buffer.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
new file mode 100644
index 0000000..b8c99e7
--- /dev/null
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * All Sensors DLH series low voltage digital pressure sensors
+ *
+ * Copyright (c) 2019 AVL DiTEST GmbH
+ *   Tomislav Denis <tomislav.denis@avl.com>
+ *
+ * Datasheet: http://www.allsensors.com/cad/DS-0355_Rev_B.PDF
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <asm/unaligned.h>
+
+/* Commands */
+#define DLH_START_SINGLE    0xAA
+
+/* Status bits */
+#define DLH_STATUS_OK       0x40
+
+/* DLH  data format */
+#define DLH_NUM_READ_BYTES  7
+#define DLH_NUM_DATA_BYTES  3
+#define DLH_NUM_PR_BITS     24
+#define DLH_NUM_TEMP_BITS   24
+
+/* DLH  timings */
+#define DLH_SINGLE_DUT_MS   5
+
+enum dhl_ids {
+	dlhl60d,
+	dlhl60g,
+};
+
+struct dlh_info {
+	u8 osdig;           /* digital offset factor */
+	unsigned int fss;   /* full scale span (inch H2O) */
+};
+
+struct dlh_state {
+	struct i2c_client *client;
+	struct dlh_info info;
+	bool use_interrupt;
+	struct completion completion;
+	u8 rx_buf[DLH_NUM_READ_BYTES] ____cacheline_aligned;
+};
+
+static struct dlh_info dlh_info_tbl[] = {
+	[dlhl60d] = {
+		.osdig = 2,
+		.fss = 120,
+	},
+	[dlhl60g] = {
+		.osdig = 10,
+		.fss = 60,
+	},
+};
+
+
+static int dlh_cmd_start_single(struct dlh_state *st)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte(st->client, DLH_START_SINGLE);
+	if (ret)
+		dev_err(&st->client->dev,
+			"%s: I2C write byte failed\n", __func__);
+
+	return ret;
+}
+
+static int dlh_cmd_read_data(struct dlh_state *st)
+{
+	int ret;
+
+	ret = i2c_master_recv(st->client, st->rx_buf, DLH_NUM_READ_BYTES);
+	if (ret < 0) {
+		dev_err(&st->client->dev,
+			"%s: I2C read block failed\n", __func__);
+		return ret;
+	}
+
+	if (st->rx_buf[0] != DLH_STATUS_OK) {
+		dev_err(&st->client->dev,
+			"%s: invalid status 0x%02x\n", __func__, st->rx_buf[0]);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int dlh_start_capture_and_read(struct dlh_state *st)
+{
+	int ret;
+
+	if (st->use_interrupt)
+		reinit_completion(&st->completion);
+
+	ret = dlh_cmd_start_single(st);
+	if (ret)
+		return ret;
+
+	if (st->use_interrupt) {
+		ret = wait_for_completion_timeout(&st->completion,
+			msecs_to_jiffies(DLH_SINGLE_DUT_MS));
+		if (!ret) {
+			dev_err(&st->client->dev,
+				"%s: conversion timed out\n", __func__);
+			return -ETIMEDOUT;
+		}
+	} else {
+		mdelay(DLH_SINGLE_DUT_MS);
+	}
+
+	return dlh_cmd_read_data(st);
+}
+
+static int dlh_read_direct(struct dlh_state *st,
+	unsigned int *pressure, unsigned int *temperature)
+{
+	int ret;
+
+	ret = dlh_start_capture_and_read(st);
+	if (ret)
+		return ret;
+
+	*pressure = get_unaligned_be32(&st->rx_buf[1]) >> 8;
+	*temperature = get_unaligned_be32(&st->rx_buf[3]) &
+		GENMASK(DLH_NUM_TEMP_BITS - 1, 0);
+
+	return 0;
+}
+
+static int dlh_read_raw(struct iio_dev *indio_dev,
+	struct iio_chan_spec const *channel, int *value,
+	int *value2, long mask)
+{
+	struct dlh_state *st = iio_priv(indio_dev);
+	unsigned int pressure, temperature;
+	int ret;
+	s64 tmp;
+	s32 rem;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret)
+			return ret;
+
+		ret = dlh_read_direct(st, &pressure, &temperature);
+		iio_device_release_direct_mode(indio_dev);
+		if (ret)
+			return ret;
+
+		switch (channel->type) {
+		case IIO_PRESSURE:
+			*value = pressure;
+			return IIO_VAL_INT;
+
+		case IIO_TEMP:
+			*value = temperature;
+			return IIO_VAL_INT;
+
+		default:
+			return -EINVAL;
+		}
+	case IIO_CHAN_INFO_SCALE:
+		switch (channel->type) {
+		case IIO_PRESSURE:
+			tmp = div_s64(125LL * st->info.fss * 24909 * 100,
+				1 << DLH_NUM_PR_BITS);
+			tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+			*value = tmp;
+			*value2 = rem;
+			return IIO_VAL_INT_PLUS_NANO;
+
+		case IIO_TEMP:
+			*value = 125 * 1000;
+			*value2 = DLH_NUM_TEMP_BITS;
+			return IIO_VAL_FRACTIONAL_LOG2;
+
+		default:
+			return -EINVAL;
+		}
+	case IIO_CHAN_INFO_OFFSET:
+		switch (channel->type) {
+		case IIO_PRESSURE:
+			*value = -125 * st->info.fss * 24909;
+			*value2 = 100 * st->info.osdig * 100000;
+			return IIO_VAL_FRACTIONAL;
+
+		case IIO_TEMP:
+			*value = -40 * 1000;
+			return IIO_VAL_INT;
+
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info dlh_info = {
+	.read_raw = dlh_read_raw,
+};
+
+static const struct iio_chan_spec dlh_channels[] = {
+	{
+		.type = IIO_PRESSURE,
+		.indexed = 1,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+		.info_mask_shared_by_type =
+			BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_OFFSET),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = DLH_NUM_PR_BITS,
+			.storagebits = 32,
+			.shift = 8,
+			.endianness = IIO_BE,
+		},
+	}, {
+		.type = IIO_TEMP,
+		.indexed = 1,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+		.info_mask_shared_by_type =
+			BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_OFFSET),
+		.scan_index = 1,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = DLH_NUM_TEMP_BITS,
+			.storagebits = 32,
+			.shift = 8,
+			.endianness = IIO_BE,
+		},
+	}
+};
+
+static irqreturn_t dlh_trigger_handler(int irq, void *private)
+{
+	struct iio_poll_func *pf = private;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct dlh_state *st = iio_priv(indio_dev);
+	int ret;
+	unsigned int chn, i = 0;
+	__be32 tmp_buf[2];
+
+	ret = dlh_start_capture_and_read(st);
+	if (ret)
+		goto out;
+
+	for_each_set_bit(chn, indio_dev->active_scan_mask,
+		indio_dev->masklength) {
+		memcpy(tmp_buf + i,
+			&st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
+			DLH_NUM_DATA_BYTES);
+		i++;
+	}
+
+	iio_push_to_buffers(indio_dev, tmp_buf);
+
+out:
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dlh_interrupt(int irq, void *private)
+{
+	struct iio_dev *indio_dev = private;
+	struct dlh_state *st = iio_priv(indio_dev);
+
+	complete(&st->completion);
+
+	return IRQ_HANDLED;
+};
+
+static int dlh_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	struct dlh_state *st;
+	struct iio_dev *indio_dev;
+	int ret;
+
+	if (!i2c_check_functionality(client->adapter,
+		I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE)) {
+		dev_err(&client->dev,
+			"adapter doesn't support required i2c functionality\n");
+		return -EOPNOTSUPP;
+	}
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+	if (!indio_dev) {
+		dev_err(&client->dev, "failed to allocate iio device\n");
+		return -ENOMEM;
+	}
+
+	i2c_set_clientdata(client, indio_dev);
+
+	st = iio_priv(indio_dev);
+	st->info = dlh_info_tbl[id->driver_data];
+	st->client = client;
+	st->use_interrupt = false;
+
+	indio_dev->name = id->name;
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->dev.of_node = client->dev.of_node;
+	indio_dev->info = &dlh_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels =  dlh_channels;
+	indio_dev->num_channels = ARRAY_SIZE(dlh_channels);
+
+	if (client->irq > 0) {
+		ret = devm_request_threaded_irq(&client->dev, client->irq,
+			dlh_interrupt, NULL,
+			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+			id->name, indio_dev);
+		if (ret) {
+			dev_err(&client->dev, "failed to allocate threaded irq");
+			return ret;
+		}
+
+		st->use_interrupt = true;
+		init_completion(&st->completion);
+	}
+
+	ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+		NULL, &dlh_trigger_handler, NULL);
+	if (ret) {
+		dev_err(&client->dev, "failed to setup iio buffer\n");
+		return ret;
+	}
+
+	ret = devm_iio_device_register(&client->dev, indio_dev);
+	if (ret)
+		dev_err(&client->dev, "failed to register iio device\n");
+
+	return ret;
+}
+
+static const struct of_device_id dlh_of_match[] = {
+	{ .compatible = "asc,dlhl60d" },
+	{ .compatible = "asc,dlhl60g" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dlh_of_match);
+
+static const struct i2c_device_id dlh_id[] = {
+	{ "dlhl60d",    dlhl60d },
+	{ "dlhl60g",    dlhl60g },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, dlh_id);
+
+static struct i2c_driver dlh_driver = {
+	.driver = {
+		.name = "dlhl60d",
+		.of_match_table = dlh_of_match,
+	},
+	.probe = dlh_probe,
+	.id_table = dlh_id,
+};
+module_i2c_driver(dlh_driver);
+
+MODULE_AUTHOR("Tomislav Denis <tomislav.denis@avl.com>");
+MODULE_DESCRIPTION("Driver for All Sensors DLH series pressure sensors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index c2e47a6..5c746ff 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -37,7 +37,7 @@ enum st_press_type {
  * struct st_sensors_platform_data - default press platform data
  * @drdy_int_pin: default press DRDY is available on INT1 pin.
  */
-static const struct st_sensors_platform_data default_press_pdata = {
+static __maybe_unused const struct st_sensors_platform_data default_press_pdata = {
 	.drdy_int_pin = 1,
 };
 
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 71d2ed6..09c6903 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -10,7 +10,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/acpi.h>
 #include <linux/i2c.h>
 #include <linux/iio/iio.h>
 
@@ -18,7 +17,6 @@
 #include <linux/iio/common/st_sensors_i2c.h>
 #include "st_pressure.h"
 
-#ifdef CONFIG_OF
 static const struct of_device_id st_press_of_match[] = {
 	{
 		.compatible = "st,lps001wp-press",
@@ -51,9 +49,6 @@ static const struct of_device_id st_press_of_match[] = {
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_press_of_match);
-#else
-#define st_press_of_match NULL
-#endif
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id st_press_acpi_match[] = {
@@ -61,8 +56,6 @@ static const struct acpi_device_id st_press_acpi_match[] = {
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, st_press_acpi_match);
-#else
-#define st_press_acpi_match NULL
 #endif
 
 static const struct i2c_device_id st_press_id_table[] = {
@@ -85,18 +78,7 @@ static int st_press_i2c_probe(struct i2c_client *client,
 	struct iio_dev *indio_dev;
 	int ret;
 
-	if (client->dev.of_node) {
-		st_sensors_of_name_probe(&client->dev, st_press_of_match,
-					 client->name, sizeof(client->name));
-	} else if (ACPI_HANDLE(&client->dev)) {
-		ret = st_sensors_match_acpi_device(&client->dev);
-		if ((ret < 0) || (ret >= ST_PRESS_MAX))
-			return -ENODEV;
-
-		strlcpy(client->name, st_press_id_table[ret].name,
-			sizeof(client->name));
-	} else if (!id)
-		return -ENODEV;
+	st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
 
 	settings = st_press_get_settings(client->name);
 	if (!settings) {
@@ -133,7 +115,7 @@ static int st_press_i2c_remove(struct i2c_client *client)
 static struct i2c_driver st_press_driver = {
 	.driver = {
 		.name = "st-press-i2c",
-		.of_match_table = of_match_ptr(st_press_of_match),
+		.of_match_table = st_press_of_match,
 		.acpi_match_table = ACPI_PTR(st_press_acpi_match),
 	},
 	.probe = st_press_i2c_probe,
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 7c8b702..b5ee3ec 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -17,7 +17,6 @@
 #include <linux/iio/common/st_sensors_spi.h>
 #include "st_pressure.h"
 
-#ifdef CONFIG_OF
 /*
  * For new single-chip sensors use <device_name> as compatible string.
  * For old single-chip devices keep <device_name>-press to maintain
@@ -55,9 +54,6 @@ static const struct of_device_id st_press_of_match[] = {
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_press_of_match);
-#else
-#define st_press_of_match	NULL
-#endif
 
 static int st_press_spi_probe(struct spi_device *spi)
 {
@@ -66,8 +62,7 @@ static int st_press_spi_probe(struct spi_device *spi)
 	struct iio_dev *indio_dev;
 	int err;
 
-	st_sensors_of_name_probe(&spi->dev, st_press_of_match,
-				 spi->modalias, sizeof(spi->modalias));
+	st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
 
 	settings = st_press_get_settings(spi->modalias);
 	if (!settings) {
@@ -116,7 +111,7 @@ MODULE_DEVICE_TABLE(spi, st_press_id_table);
 static struct spi_driver st_press_driver = {
 	.driver = {
 		.name = "st-press-spi",
-		.of_match_table = of_match_ptr(st_press_of_match),
+		.of_match_table = st_press_of_match,
 	},
 	.probe = st_press_spi_probe,
 	.remove = st_press_spi_remove,
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index d536014..37606d4 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -58,6 +58,21 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called mb1232.
 
+config PING
+	tristate "Parallax GPIO bitbanged ranger sensors"
+	depends on GPIOLIB
+	help
+	  Say Y here to build a driver for GPIO bitbanged ranger sensors
+	  with just one GPIO for the trigger and echo. This driver can be
+	  used to measure the distance of objects.
+
+	  Actually supported are:
+	  - Parallax PING))) (ultrasonic)
+	  - Parallax LaserPING (time-of-flight)
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ping.
+
 config RFD77402
 	tristate "RFD77402 ToF sensor"
 	depends on I2C
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index 0bb5f9d..c591b01 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_ISL29501)		+= isl29501.o
 obj-$(CONFIG_LIDAR_LITE_V2)	+= pulsedlight-lidar-lite-v2.o
 obj-$(CONFIG_MB1232)		+= mb1232.o
+obj-$(CONFIG_PING)		+= ping.o
 obj-$(CONFIG_RFD77402)		+= rfd77402.o
 obj-$(CONFIG_SRF04)		+= srf04.o
 obj-$(CONFIG_SRF08)		+= srf08.o
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index b591c63..bac9a43 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -14,7 +14,6 @@
 #include <linux/mutex.h>
 #include <linux/err.h>
 #include <linux/irq.h>
-#include <linux/gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -22,8 +21,6 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/buffer.h>
 #include <linux/iio/triggered_buffer.h>
-#include <linux/of_gpio.h>
-
 
 #define AS3935_AFE_GAIN		0x00
 #define AS3935_AFE_MASK		0x3F
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
new file mode 100644
index 0000000..34aff10
--- /dev/null
+++ b/drivers/iio/proximity/ping.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PING: ultrasonic sensor for distance measuring by using only one GPIOs
+ *
+ * Copyright (c) 2019 Andreas Klinger <ak@it-klinger.de>
+ *
+ * For details about the devices see:
+ * http://parallax.com/sites/default/files/downloads/28041-LaserPING-2m-Rangefinder-Guide.pdf
+ * http://parallax.com/sites/default/files/downloads/28015-PING-Documentation-v1.6.pdf
+ *
+ * the measurement cycle as timing diagram looks like:
+ *
+ * GPIO      ___              ________________________
+ * ping:  __/   \____________/                        \________________
+ *          ^   ^            ^                        ^
+ *          |<->|            interrupt                interrupt
+ *         udelay(5)         (ts_rising)              (ts_falling)
+ *                           |<---------------------->|
+ *                           .  pulse time measured   .
+ *                           .  --> one round trip of ultra sonic waves
+ * ultra                     .                        .
+ * sonic            _   _   _.                        .
+ * burst: _________/ \_/ \_/ \_________________________________________
+ *                                                    .
+ * ultra                                              .
+ * sonic                                     _   _   _.
+ * echo:  __________________________________/ \_/ \_/ \________________
+ */
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+struct ping_cfg {
+	unsigned long	trigger_pulse_us;	/* length of trigger pulse */
+	int		laserping_error;	/* support error code in */
+						/*   pulse width of laser */
+						/*   ping sensors */
+	s64		timeout_ns;		/* timeout in ns */
+};
+
+struct ping_data {
+	struct device		*dev;
+	struct gpio_desc	*gpiod_ping;
+	struct mutex		lock;
+	int			irqnr;
+	ktime_t			ts_rising;
+	ktime_t			ts_falling;
+	struct completion	rising;
+	struct completion	falling;
+	const struct ping_cfg	*cfg;
+};
+
+static const struct ping_cfg pa_ping_cfg = {
+	.trigger_pulse_us	= 5,
+	.laserping_error	= 0,
+	.timeout_ns		= 18500000,	/* 3 meters */
+};
+
+static const struct ping_cfg pa_laser_ping_cfg = {
+	.trigger_pulse_us	= 5,
+	.laserping_error	= 1,
+	.timeout_ns		= 15500000,	/* 2 meters plus error codes */
+};
+
+static irqreturn_t ping_handle_irq(int irq, void *dev_id)
+{
+	struct iio_dev *indio_dev = dev_id;
+	struct ping_data *data = iio_priv(indio_dev);
+	ktime_t now = ktime_get();
+
+	if (gpiod_get_value(data->gpiod_ping)) {
+		data->ts_rising = now;
+		complete(&data->rising);
+	} else {
+		data->ts_falling = now;
+		complete(&data->falling);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int ping_read(struct ping_data *data)
+{
+	int ret;
+	ktime_t ktime_dt;
+	s64 dt_ns;
+	u32 time_ns, distance_mm;
+	struct platform_device *pdev = to_platform_device(data->dev);
+	struct iio_dev *indio_dev = iio_priv_to_dev(data);
+
+	/*
+	 * just one read-echo-cycle can take place at a time
+	 * ==> lock against concurrent reading calls
+	 */
+	mutex_lock(&data->lock);
+
+	reinit_completion(&data->rising);
+	reinit_completion(&data->falling);
+
+	gpiod_set_value(data->gpiod_ping, 1);
+	udelay(data->cfg->trigger_pulse_us);
+	gpiod_set_value(data->gpiod_ping, 0);
+
+	ret = gpiod_direction_input(data->gpiod_ping);
+	if (ret < 0) {
+		mutex_unlock(&data->lock);
+		return ret;
+	}
+
+	data->irqnr = gpiod_to_irq(data->gpiod_ping);
+	if (data->irqnr < 0) {
+		dev_err(data->dev, "gpiod_to_irq: %d\n", data->irqnr);
+		mutex_unlock(&data->lock);
+		return data->irqnr;
+	}
+
+	ret = request_irq(data->irqnr, ping_handle_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+							pdev->name, indio_dev);
+	if (ret < 0) {
+		dev_err(data->dev, "request_irq: %d\n", ret);
+		mutex_unlock(&data->lock);
+		return ret;
+	}
+
+	/* it should not take more than 20 ms until echo is rising */
+	ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
+	if (ret < 0)
+		goto err_reset_direction;
+	else if (ret == 0) {
+		ret = -ETIMEDOUT;
+		goto err_reset_direction;
+	}
+
+	/* it cannot take more than 50 ms until echo is falling */
+	ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
+	if (ret < 0)
+		goto err_reset_direction;
+	else if (ret == 0) {
+		ret = -ETIMEDOUT;
+		goto err_reset_direction;
+	}
+
+	ktime_dt = ktime_sub(data->ts_falling, data->ts_rising);
+
+	free_irq(data->irqnr, indio_dev);
+
+	ret = gpiod_direction_output(data->gpiod_ping, GPIOD_OUT_LOW);
+	if (ret < 0) {
+		mutex_unlock(&data->lock);
+		return ret;
+	}
+
+	mutex_unlock(&data->lock);
+
+	dt_ns = ktime_to_ns(ktime_dt);
+	if (dt_ns > data->cfg->timeout_ns) {
+		dev_dbg(data->dev, "distance out of range: dt=%lldns\n",
+								dt_ns);
+		return -EIO;
+	}
+
+	time_ns = dt_ns;
+
+	/*
+	 * read error code of laser ping sensor and give users chance to
+	 * figure out error by using dynamic debuggging
+	 */
+	if (data->cfg->laserping_error) {
+		if ((time_ns > 12500000) && (time_ns <= 13500000)) {
+			dev_dbg(data->dev, "target too close or to far\n");
+			return -EIO;
+		}
+		if ((time_ns > 13500000) && (time_ns <= 14500000)) {
+			dev_dbg(data->dev, "internal sensor error\n");
+			return -EIO;
+		}
+		if ((time_ns > 14500000) && (time_ns <= 15500000)) {
+			dev_dbg(data->dev, "internal sensor timeout\n");
+			return -EIO;
+		}
+	}
+
+	/*
+	 * the speed as function of the temperature is approximately:
+	 *
+	 * speed = 331,5 + 0,6 * Temp
+	 *   with Temp in °C
+	 *   and speed in m/s
+	 *
+	 * use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the
+	 * temperature
+	 *
+	 * therefore:
+	 *             time     343,5     time * 232
+	 * distance = ------ * ------- = ------------
+	 *             10^6         2        1350800
+	 *   with time in ns
+	 *   and distance in mm (one way)
+	 *
+	 * because we limit to 3 meters the multiplication with 232 just
+	 * fits into 32 bit
+	 */
+	distance_mm = time_ns * 232 / 1350800;
+
+	return distance_mm;
+
+err_reset_direction:
+	free_irq(data->irqnr, indio_dev);
+	mutex_unlock(&data->lock);
+
+	if (gpiod_direction_output(data->gpiod_ping, GPIOD_OUT_LOW))
+		dev_dbg(data->dev, "error in gpiod_direction_output\n");
+	return ret;
+}
+
+static int ping_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *channel, int *val,
+			    int *val2, long info)
+{
+	struct ping_data *data = iio_priv(indio_dev);
+	int ret;
+
+	if (channel->type != IIO_DISTANCE)
+		return -EINVAL;
+
+	switch (info) {
+	case IIO_CHAN_INFO_RAW:
+		ret = ping_read(data);
+		if (ret < 0)
+			return ret;
+		*val = ret;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		/*
+		 * maximum resolution in datasheet is 1 mm
+		 * 1 LSB is 1 mm
+		 */
+		*val = 0;
+		*val2 = 1000;
+		return IIO_VAL_INT_PLUS_MICRO;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct iio_info ping_iio_info = {
+	.read_raw		= ping_read_raw,
+};
+
+static const struct iio_chan_spec ping_chan_spec[] = {
+	{
+		.type = IIO_DISTANCE,
+		.info_mask_separate =
+				BIT(IIO_CHAN_INFO_RAW) |
+				BIT(IIO_CHAN_INFO_SCALE),
+	},
+};
+
+static const struct of_device_id of_ping_match[] = {
+	{ .compatible = "parallax,ping", .data = &pa_ping_cfg},
+	{ .compatible = "parallax,laserping", .data = &pa_ping_cfg},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, of_ping_match);
+
+static int ping_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ping_data *data;
+	struct iio_dev *indio_dev;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(struct ping_data));
+	if (!indio_dev) {
+		dev_err(dev, "failed to allocate IIO device\n");
+		return -ENOMEM;
+	}
+
+	data = iio_priv(indio_dev);
+	data->dev = dev;
+	data->cfg = of_device_get_match_data(dev);
+
+	mutex_init(&data->lock);
+	init_completion(&data->rising);
+	init_completion(&data->falling);
+
+	data->gpiod_ping = devm_gpiod_get(dev, "ping", GPIOD_OUT_LOW);
+	if (IS_ERR(data->gpiod_ping)) {
+		dev_err(dev, "failed to get ping-gpios: err=%ld\n",
+						PTR_ERR(data->gpiod_ping));
+		return PTR_ERR(data->gpiod_ping);
+	}
+
+	if (gpiod_cansleep(data->gpiod_ping)) {
+		dev_err(data->dev, "cansleep-GPIOs not supported\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, indio_dev);
+
+	indio_dev->name = "ping";
+	indio_dev->dev.parent = &pdev->dev;
+	indio_dev->info = &ping_iio_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = ping_chan_spec;
+	indio_dev->num_channels = ARRAY_SIZE(ping_chan_spec);
+
+	return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct platform_driver ping_driver = {
+	.probe		= ping_probe,
+	.driver		= {
+		.name		= "ping-gpio",
+		.of_match_table	= of_ping_match,
+	},
+};
+
+module_platform_driver(ping_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("PING sensors for distance measuring using one GPIOs");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ping");
diff --git a/drivers/iio/resolver/ad2s1200.c b/drivers/iio/resolver/ad2s1200.c
index 17b8962..a391f46 100644
--- a/drivers/iio/resolver/ad2s1200.c
+++ b/drivers/iio/resolver/ad2s1200.c
@@ -10,7 +10,6 @@
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/device.h>
-#include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index 73ed550..b4cb21a 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -6,12 +6,14 @@
  * Copyright (C) 2018-2019 Rockwell Collins
  */
 
+#include <linux/ctype.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
 #include <linux/spi/spi.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
+#include <linux/util_macros.h>
 #include <dt-bindings/iio/temperature/thermocouple.h>
 /*
  * The MSB of the register value determines whether the following byte will
@@ -23,6 +25,9 @@
 #define MAX31856_CR0_1SHOT         BIT(6)
 #define MAX31856_CR0_OCFAULT       BIT(4)
 #define MAX31856_CR0_OCFAULT_MASK  GENMASK(5, 4)
+#define MAX31856_CR0_FILTER_50HZ   BIT(0)
+#define MAX31856_AVERAGING_MASK    GENMASK(6, 4)
+#define MAX31856_AVERAGING_SHIFT   4
 #define MAX31856_TC_TYPE_MASK      GENMASK(3, 0)
 #define MAX31856_FAULT_OVUV        BIT(1)
 #define MAX31856_FAULT_OPEN        BIT(0)
@@ -49,7 +54,10 @@ static const struct iio_chan_spec max31856_channels[] = {
 	{	/* Thermocouple Temperature */
 		.type = IIO_TEMP,
 		.info_mask_separate =
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
+		.info_mask_shared_by_type =
+			BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)
 	},
 	{	/* Cold Junction Temperature */
 		.type = IIO_TEMP,
@@ -57,12 +65,20 @@ static const struct iio_chan_spec max31856_channels[] = {
 		.modified = 1,
 		.info_mask_separate =
 			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_type =
+			BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)
 	},
 };
 
 struct max31856_data {
 	struct spi_device *spi;
 	u32 thermocouple_type;
+	bool filter_50hz;
+	int averaging;
+};
+
+static const char max31856_tc_types[] = {
+	'B', 'E', 'J', 'K', 'N', 'R', 'S', 'T'
 };
 
 static int max31856_read(struct max31856_data *data, u8 reg,
@@ -107,6 +123,10 @@ static int max31856_init(struct max31856_data *data)
 
 	reg_cr1_val &= ~MAX31856_TC_TYPE_MASK;
 	reg_cr1_val |= data->thermocouple_type;
+
+	reg_cr1_val &= ~MAX31856_AVERAGING_MASK;
+	reg_cr1_val |= data->averaging << MAX31856_AVERAGING_SHIFT;
+
 	ret = max31856_write(data, MAX31856_CR1_REG, reg_cr1_val);
 	if (ret)
 		return ret;
@@ -123,6 +143,11 @@ static int max31856_init(struct max31856_data *data)
 	reg_cr0_val &= ~MAX31856_CR0_1SHOT;
 	reg_cr0_val |= MAX31856_CR0_AUTOCONVERT;
 
+	if (data->filter_50hz)
+		reg_cr0_val |= MAX31856_CR0_FILTER_50HZ;
+	else
+		reg_cr0_val &= ~MAX31856_CR0_FILTER_50HZ;
+
 	return max31856_write(data, MAX31856_CR0_REG, reg_cr0_val);
 }
 
@@ -210,6 +235,12 @@ static int max31856_read_raw(struct iio_dev *indio_dev,
 			return IIO_VAL_INT_PLUS_MICRO;
 		}
 		break;
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		*val = 1 << data->averaging;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+		*val = max31856_tc_types[data->thermocouple_type];
+		return IIO_VAL_CHAR;
 	default:
 		ret = -EINVAL;
 		break;
@@ -218,6 +249,62 @@ static int max31856_read_raw(struct iio_dev *indio_dev,
 	return ret;
 }
 
+static int max31856_write_raw_get_fmt(struct iio_dev *indio_dev,
+				      struct iio_chan_spec const *chan,
+				      long mask)
+{
+	switch (mask) {
+	case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+		return IIO_VAL_CHAR;
+	default:
+		return IIO_VAL_INT;
+	}
+}
+
+static int max31856_write_raw(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *chan,
+			      int val, int val2, long mask)
+{
+	struct max31856_data *data = iio_priv(indio_dev);
+	int msb;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		if (val > 16 || val < 1)
+			return -EINVAL;
+		msb = fls(val) - 1;
+		/* Round up to next 2pow if needed */
+		if (BIT(msb) < val)
+			msb++;
+
+		data->averaging = msb;
+		max31856_init(data);
+		break;
+	case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+	{
+		int tc_type = -1;
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(max31856_tc_types); i++) {
+			if (max31856_tc_types[i] == toupper(val)) {
+				tc_type = i;
+				break;
+			}
+		}
+		if (tc_type < 0)
+			return -EINVAL;
+
+		data->thermocouple_type = tc_type;
+		max31856_init(data);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static ssize_t show_fault(struct device *dev, u8 faultbit, char *buf)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -249,12 +336,54 @@ static ssize_t show_fault_oc(struct device *dev,
 	return show_fault(dev, MAX31856_FAULT_OPEN, buf);
 }
 
+static ssize_t show_filter(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct max31856_data *data = iio_priv(indio_dev);
+
+	return sprintf(buf, "%d\n", data->filter_50hz ? 50 : 60);
+}
+
+static ssize_t set_filter(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf,
+			  size_t len)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct max31856_data *data = iio_priv(indio_dev);
+	unsigned int freq;
+	int ret;
+
+	ret = kstrtouint(buf, 10, &freq);
+	if (ret)
+		return ret;
+
+	switch (freq) {
+	case 50:
+		data->filter_50hz = true;
+		break;
+	case 60:
+		data->filter_50hz = false;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	max31856_init(data);
+	return len;
+}
+
 static IIO_DEVICE_ATTR(fault_ovuv, 0444, show_fault_ovuv, NULL, 0);
 static IIO_DEVICE_ATTR(fault_oc, 0444, show_fault_oc, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp_filter_notch_center_frequency, 0644,
+		       show_filter, set_filter, 0);
 
 static struct attribute *max31856_attributes[] = {
 	&iio_dev_attr_fault_ovuv.dev_attr.attr,
 	&iio_dev_attr_fault_oc.dev_attr.attr,
+	&iio_dev_attr_in_temp_filter_notch_center_frequency.dev_attr.attr,
 	NULL,
 };
 
@@ -264,6 +393,8 @@ static const struct attribute_group max31856_group = {
 
 static const struct iio_info max31856_info = {
 	.read_raw = max31856_read_raw,
+	.write_raw = max31856_write_raw,
+	.write_raw_get_fmt = max31856_write_raw_get_fmt,
 	.attrs = &max31856_group,
 };
 
@@ -280,6 +411,7 @@ static int max31856_probe(struct spi_device *spi)
 
 	data = iio_priv(indio_dev);
 	data->spi = spi;
+	data->filter_50hz = false;
 
 	spi_set_drvdata(spi, indio_dev);
 
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index d136060..8d21116 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -14,6 +14,7 @@
 #include <linux/of_device.h>
 #include <linux/spi/spi.h>
 #include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
 #include <linux/iio/trigger.h>
 #include <linux/iio/buffer.h>
 #include <linux/iio/triggered_buffer.h>
@@ -24,13 +25,25 @@
 enum {
 	MAX6675,
 	MAX31855,
+	MAX31855K,
+	MAX31855J,
+	MAX31855N,
+	MAX31855S,
+	MAX31855T,
+	MAX31855E,
+	MAX31855R,
+};
+
+static const char maxim_tc_types[] = {
+	'K', '?', 'K', 'J', 'N', 'S', 'T', 'E', 'R'
 };
 
 static const struct iio_chan_spec max6675_channels[] = {
 	{	/* thermocouple temperature */
 		.type = IIO_TEMP,
 		.info_mask_separate =
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
 		.scan_index = 0,
 		.scan_type = {
 			.sign = 's',
@@ -48,7 +61,8 @@ static const struct iio_chan_spec max31855_channels[] = {
 		.type = IIO_TEMP,
 		.address = 2,
 		.info_mask_separate =
-			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
 		.scan_index = 0,
 		.scan_type = {
 			.sign = 's',
@@ -110,6 +124,7 @@ struct maxim_thermocouple_data {
 	const struct maxim_thermocouple_chip *chip;
 
 	u8 buffer[16] ____cacheline_aligned;
+	char tc_type;
 };
 
 static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
@@ -196,6 +211,10 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
 			ret = IIO_VAL_INT;
 		}
 		break;
+	case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+		*val = data->tc_type;
+		ret = IIO_VAL_CHAR;
+		break;
 	}
 
 	return ret;
@@ -210,8 +229,9 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
 	const struct spi_device_id *id = spi_get_device_id(spi);
 	struct iio_dev *indio_dev;
 	struct maxim_thermocouple_data *data;
+	const int chip_type = (id->driver_data == MAX6675) ? MAX6675 : MAX31855;
 	const struct maxim_thermocouple_chip *chip =
-			&maxim_thermocouple_chips[id->driver_data];
+		&maxim_thermocouple_chips[chip_type];
 	int ret;
 
 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
@@ -229,6 +249,7 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
 	data = iio_priv(indio_dev);
 	data->spi = spi;
 	data->chip = chip;
+	data->tc_type = maxim_tc_types[id->driver_data];
 
 	ret = devm_iio_triggered_buffer_setup(&spi->dev,
 				indio_dev, NULL,
@@ -236,12 +257,22 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
 	if (ret)
 		return ret;
 
+	if (id->driver_data == MAX31855)
+		dev_warn(&spi->dev, "generic max31855 ID is deprecated\nplease use more specific part type");
+
 	return devm_iio_device_register(&spi->dev, indio_dev);
 }
 
 static const struct spi_device_id maxim_thermocouple_id[] = {
 	{"max6675", MAX6675},
 	{"max31855", MAX31855},
+	{"max31855k", MAX31855K},
+	{"max31855j", MAX31855J},
+	{"max31855n", MAX31855N},
+	{"max31855s", MAX31855S},
+	{"max31855t", MAX31855T},
+	{"max31855e", MAX31855E},
+	{"max31855r", MAX31855R},
 	{},
 };
 MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
@@ -249,6 +280,13 @@ MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
 static const struct of_device_id maxim_thermocouple_of_match[] = {
         { .compatible = "maxim,max6675" },
         { .compatible = "maxim,max31855" },
+	{ .compatible = "maxim,max31855k" },
+	{ .compatible = "maxim,max31855j" },
+	{ .compatible = "maxim,max31855n" },
+	{ .compatible = "maxim,max31855s" },
+	{ .compatible = "maxim,max31855t" },
+	{ .compatible = "maxim,max31855e" },
+	{ .compatible = "maxim,max31855r" },
         { },
 };
 MODULE_DEVICE_TABLE(of, maxim_thermocouple_of_match);
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index a5dfe65..2e0d32a 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -297,9 +297,6 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
 			     strlen(master_mode_table[i]))) {
 			regmap_update_bits(priv->regmap, TIM_CR2, mask,
 					   i << shift);
-			/* Make sure that registers are updated */
-			regmap_update_bits(priv->regmap, TIM_EGR,
-					   TIM_EGR_UG, TIM_EGR_UG);
 			return len;
 		}
 	}
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 9a8871e..d1b14887 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,7 +11,8 @@
 				device.o fmr_pool.o cache.o netlink.o \
 				roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
 				multicast.o mad.o smi.o agent.o mad_rmpp.o \
-				nldev.o restrack.o counters.o ib_core_uverbs.o
+				nldev.o restrack.o counters.o ib_core_uverbs.o \
+				trace.o
 
 ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
 ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
@@ -20,7 +21,8 @@
 
 iw_cm-y :=			iwcm.o iwpm_util.o iwpm_msg.o
 
-rdma_cm-y :=			cma.o
+CFLAGS_cma_trace.o +=		-I$(src)
+rdma_cm-y :=			cma.o cma_trace.o
 
 rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o
 
@@ -33,6 +35,7 @@
 				uverbs_std_types_cq.o \
 				uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
 				uverbs_std_types_mr.o uverbs_std_types_counters.o \
-				uverbs_uapi.o uverbs_std_types_device.o
+				uverbs_uapi.o uverbs_std_types_device.o \
+				uverbs_std_types_async_fd.o
 ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
 ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 606fa6d..1753a98 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -139,7 +139,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
 	if (ib_nl_is_good_ip_resp(nlh))
 		ib_nl_process_good_ip_rsep(nlh);
 
-	return skb->len;
+	return 0;
 }
 
 static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index d535995..17bfedd 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -51,9 +51,8 @@ struct ib_pkey_cache {
 
 struct ib_update_work {
 	struct work_struct work;
-	struct ib_device  *device;
-	u8                 port_num;
-	bool		   enforce_security;
+	struct ib_event event;
+	bool enforce_security;
 };
 
 union ib_gid zgid;
@@ -130,7 +129,7 @@ static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
 	event.element.port_num	= port;
 	event.event		= IB_EVENT_GID_CHANGE;
 
-	ib_dispatch_event(&event);
+	ib_dispatch_event_clients(&event);
 }
 
 static const char * const gid_type_str[] = {
@@ -1034,7 +1033,7 @@ int ib_get_cached_pkey(struct ib_device *device,
 	if (!rdma_is_port_valid(device, port_num))
 		return -EINVAL;
 
-	read_lock_irqsave(&device->cache.lock, flags);
+	read_lock_irqsave(&device->cache_lock, flags);
 
 	cache = device->port_data[port_num].cache.pkey;
 
@@ -1043,7 +1042,7 @@ int ib_get_cached_pkey(struct ib_device *device,
 	else
 		*pkey = cache->table[index];
 
-	read_unlock_irqrestore(&device->cache.lock, flags);
+	read_unlock_irqrestore(&device->cache_lock, flags);
 
 	return ret;
 }
@@ -1058,9 +1057,9 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
 	if (!rdma_is_port_valid(device, port_num))
 		return -EINVAL;
 
-	read_lock_irqsave(&device->cache.lock, flags);
+	read_lock_irqsave(&device->cache_lock, flags);
 	*sn_pfx = device->port_data[port_num].cache.subnet_prefix;
-	read_unlock_irqrestore(&device->cache.lock, flags);
+	read_unlock_irqrestore(&device->cache_lock, flags);
 
 	return 0;
 }
@@ -1080,7 +1079,7 @@ int ib_find_cached_pkey(struct ib_device *device,
 	if (!rdma_is_port_valid(device, port_num))
 		return -EINVAL;
 
-	read_lock_irqsave(&device->cache.lock, flags);
+	read_lock_irqsave(&device->cache_lock, flags);
 
 	cache = device->port_data[port_num].cache.pkey;
 
@@ -1101,7 +1100,7 @@ int ib_find_cached_pkey(struct ib_device *device,
 		ret = 0;
 	}
 
-	read_unlock_irqrestore(&device->cache.lock, flags);
+	read_unlock_irqrestore(&device->cache_lock, flags);
 
 	return ret;
 }
@@ -1120,7 +1119,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
 	if (!rdma_is_port_valid(device, port_num))
 		return -EINVAL;
 
-	read_lock_irqsave(&device->cache.lock, flags);
+	read_lock_irqsave(&device->cache_lock, flags);
 
 	cache = device->port_data[port_num].cache.pkey;
 
@@ -1133,7 +1132,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
 			break;
 		}
 
-	read_unlock_irqrestore(&device->cache.lock, flags);
+	read_unlock_irqrestore(&device->cache_lock, flags);
 
 	return ret;
 }
@@ -1149,9 +1148,9 @@ int ib_get_cached_lmc(struct ib_device *device,
 	if (!rdma_is_port_valid(device, port_num))
 		return -EINVAL;
 
-	read_lock_irqsave(&device->cache.lock, flags);
+	read_lock_irqsave(&device->cache_lock, flags);
 	*lmc = device->port_data[port_num].cache.lmc;
-	read_unlock_irqrestore(&device->cache.lock, flags);
+	read_unlock_irqrestore(&device->cache_lock, flags);
 
 	return ret;
 }
@@ -1167,9 +1166,9 @@ int ib_get_cached_port_state(struct ib_device   *device,
 	if (!rdma_is_port_valid(device, port_num))
 		return -EINVAL;
 
-	read_lock_irqsave(&device->cache.lock, flags);
+	read_lock_irqsave(&device->cache_lock, flags);
 	*port_state = device->port_data[port_num].cache.port_state;
-	read_unlock_irqrestore(&device->cache.lock, flags);
+	read_unlock_irqrestore(&device->cache_lock, flags);
 
 	return ret;
 }
@@ -1381,9 +1380,8 @@ static int config_non_roce_gid_cache(struct ib_device *device,
 	return ret;
 }
 
-static void ib_cache_update(struct ib_device *device,
-			    u8                port,
-			    bool	      enforce_security)
+static int
+ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
 {
 	struct ib_port_attr       *tprops = NULL;
 	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
@@ -1391,11 +1389,11 @@ static void ib_cache_update(struct ib_device *device,
 	int                        ret;
 
 	if (!rdma_is_port_valid(device, port))
-		return;
+		return -EINVAL;
 
 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
 	if (!tprops)
-		return;
+		return -ENOMEM;
 
 	ret = ib_query_port(device, port, tprops);
 	if (ret) {
@@ -1413,8 +1411,10 @@ static void ib_cache_update(struct ib_device *device,
 	pkey_cache = kmalloc(struct_size(pkey_cache, table,
 					 tprops->pkey_tbl_len),
 			     GFP_KERNEL);
-	if (!pkey_cache)
+	if (!pkey_cache) {
+		ret = -ENOMEM;
 		goto err;
+	}
 
 	pkey_cache->table_len = tprops->pkey_tbl_len;
 
@@ -1428,7 +1428,7 @@ static void ib_cache_update(struct ib_device *device,
 		}
 	}
 
-	write_lock_irq(&device->cache.lock);
+	write_lock_irq(&device->cache_lock);
 
 	old_pkey_cache = device->port_data[port].cache.pkey;
 
@@ -1437,7 +1437,7 @@ static void ib_cache_update(struct ib_device *device,
 	device->port_data[port].cache.port_state = tprops->state;
 
 	device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
-	write_unlock_irq(&device->cache.lock);
+	write_unlock_irq(&device->cache_lock);
 
 	if (enforce_security)
 		ib_security_cache_change(device,
@@ -1446,57 +1446,91 @@ static void ib_cache_update(struct ib_device *device,
 
 	kfree(old_pkey_cache);
 	kfree(tprops);
-	return;
+	return 0;
 
 err:
 	kfree(pkey_cache);
 	kfree(tprops);
+	return ret;
 }
 
-static void ib_cache_task(struct work_struct *_work)
+static void ib_cache_event_task(struct work_struct *_work)
+{
+	struct ib_update_work *work =
+		container_of(_work, struct ib_update_work, work);
+	int ret;
+
+	/* Before distributing the cache update event, first sync
+	 * the cache.
+	 */
+	ret = ib_cache_update(work->event.device, work->event.element.port_num,
+			      work->enforce_security);
+
+	/* GID event is notified already for individual GID entries by
+	 * dispatch_gid_change_event(). Hence, notifiy for rest of the
+	 * events.
+	 */
+	if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
+		ib_dispatch_event_clients(&work->event);
+
+	kfree(work);
+}
+
+static void ib_generic_event_task(struct work_struct *_work)
 {
 	struct ib_update_work *work =
 		container_of(_work, struct ib_update_work, work);
 
-	ib_cache_update(work->device,
-			work->port_num,
-			work->enforce_security);
+	ib_dispatch_event_clients(&work->event);
 	kfree(work);
 }
 
-static void ib_cache_event(struct ib_event_handler *handler,
-			   struct ib_event *event)
+static bool is_cache_update_event(const struct ib_event *event)
+{
+	return (event->event == IB_EVENT_PORT_ERR    ||
+		event->event == IB_EVENT_PORT_ACTIVE ||
+		event->event == IB_EVENT_LID_CHANGE  ||
+		event->event == IB_EVENT_PKEY_CHANGE ||
+		event->event == IB_EVENT_CLIENT_REREGISTER ||
+		event->event == IB_EVENT_GID_CHANGE);
+}
+
+/**
+ * ib_dispatch_event - Dispatch an asynchronous event
+ * @event:Event to dispatch
+ *
+ * Low-level drivers must call ib_dispatch_event() to dispatch the
+ * event to all registered event handlers when an asynchronous event
+ * occurs.
+ */
+void ib_dispatch_event(const struct ib_event *event)
 {
 	struct ib_update_work *work;
 
-	if (event->event == IB_EVENT_PORT_ERR    ||
-	    event->event == IB_EVENT_PORT_ACTIVE ||
-	    event->event == IB_EVENT_LID_CHANGE  ||
-	    event->event == IB_EVENT_PKEY_CHANGE ||
-	    event->event == IB_EVENT_CLIENT_REREGISTER ||
-	    event->event == IB_EVENT_GID_CHANGE) {
-		work = kmalloc(sizeof *work, GFP_ATOMIC);
-		if (work) {
-			INIT_WORK(&work->work, ib_cache_task);
-			work->device   = event->device;
-			work->port_num = event->element.port_num;
-			if (event->event == IB_EVENT_PKEY_CHANGE ||
-			    event->event == IB_EVENT_GID_CHANGE)
-				work->enforce_security = true;
-			else
-				work->enforce_security = false;
+	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return;
 
-			queue_work(ib_wq, &work->work);
-		}
-	}
+	if (is_cache_update_event(event))
+		INIT_WORK(&work->work, ib_cache_event_task);
+	else
+		INIT_WORK(&work->work, ib_generic_event_task);
+
+	work->event = *event;
+	if (event->event == IB_EVENT_PKEY_CHANGE ||
+	    event->event == IB_EVENT_GID_CHANGE)
+		work->enforce_security = true;
+
+	queue_work(ib_wq, &work->work);
 }
+EXPORT_SYMBOL(ib_dispatch_event);
 
 int ib_cache_setup_one(struct ib_device *device)
 {
 	unsigned int p;
 	int err;
 
-	rwlock_init(&device->cache.lock);
+	rwlock_init(&device->cache_lock);
 
 	err = gid_table_setup_one(device);
 	if (err)
@@ -1505,9 +1539,6 @@ int ib_cache_setup_one(struct ib_device *device)
 	rdma_for_each_port (device, p)
 		ib_cache_update(device, p, true);
 
-	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
-			      device, ib_cache_event);
-	ib_register_event_handler(&device->cache.event_handler);
 	return 0;
 }
 
@@ -1529,14 +1560,12 @@ void ib_cache_release_one(struct ib_device *device)
 
 void ib_cache_cleanup_one(struct ib_device *device)
 {
-	/* The cleanup function unregisters the event handler,
-	 * waits for all in-progress workqueue elements and cleans
-	 * up the GID cache. This function should be called after
-	 * the device was removed from the devices list and all
-	 * clients were removed, so the cache exists but is
+	/* The cleanup function waits for all in-progress workqueue
+	 * elements and cleans up the GID cache. This function should be
+	 * called after the device was removed from the devices list and
+	 * all clients were removed, so the cache exists but is
 	 * non-functional and shouldn't be updated anymore.
 	 */
-	ib_unregister_event_handler(&device->cache.event_handler);
 	flush_workqueue(ib_wq);
 	gid_table_cleanup_one(device);
 
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 455b365..68cc1b2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -241,6 +241,7 @@ struct cm_id_private {
 	/* Number of clients sharing this ib_cm_id. Only valid for listeners.
 	 * Protected by the cm.lock spinlock. */
 	int listen_sharecount;
+	struct rcu_head rcu;
 
 	struct ib_mad_send_buf *msg;
 	struct cm_timewait_info *timewait_info;
@@ -593,28 +594,16 @@ static void cm_free_id(__be32 local_id)
 	xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
 }
 
-static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
+static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
 {
 	struct cm_id_private *cm_id_priv;
 
+	rcu_read_lock();
 	cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
-	if (cm_id_priv) {
-		if (cm_id_priv->id.remote_id == remote_id)
-			refcount_inc(&cm_id_priv->refcount);
-		else
-			cm_id_priv = NULL;
-	}
-
-	return cm_id_priv;
-}
-
-static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
-{
-	struct cm_id_private *cm_id_priv;
-
-	spin_lock_irq(&cm.lock);
-	cm_id_priv = cm_get_id(local_id, remote_id);
-	spin_unlock_irq(&cm.lock);
+	if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
+	    !refcount_inc_not_zero(&cm_id_priv->refcount))
+		cm_id_priv = NULL;
+	rcu_read_unlock();
 
 	return cm_id_priv;
 }
@@ -1089,7 +1078,7 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
 	rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
 	rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
 	kfree(cm_id_priv->private_data);
-	kfree(cm_id_priv);
+	kfree_rcu(cm_id_priv, rcu);
 }
 
 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
@@ -1262,54 +1251,72 @@ static void cm_format_req(struct cm_req_msg *req_msg,
 	cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
 			  cm_form_tid(cm_id_priv));
 
-	req_msg->local_comm_id = cm_id_priv->id.local_id;
-	req_msg->service_id = param->service_id;
-	req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
-	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
-	cm_req_set_init_depth(req_msg, param->initiator_depth);
-	cm_req_set_remote_resp_timeout(req_msg,
-				       param->remote_cm_response_timeout);
+	IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
+		be32_to_cpu(cm_id_priv->id.local_id));
+	IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
+	IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
+		be64_to_cpu(cm_id_priv->id.device->node_guid));
+	IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
+	IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
+	IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
+		param->remote_cm_response_timeout);
 	cm_req_set_qp_type(req_msg, param->qp_type);
-	cm_req_set_flow_ctrl(req_msg, param->flow_control);
-	cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
-	cm_req_set_local_resp_timeout(req_msg,
-				      param->local_cm_response_timeout);
-	req_msg->pkey = param->primary_path->pkey;
-	cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
-	cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
+	IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
+	IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
+	IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
+		param->local_cm_response_timeout);
+	IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
+		be16_to_cpu(param->primary_path->pkey));
+	IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
+		param->primary_path->mtu);
+	IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
 
 	if (param->qp_type != IB_QPT_XRC_INI) {
-		cm_req_set_resp_res(req_msg, param->responder_resources);
-		cm_req_set_retry_count(req_msg, param->retry_count);
-		cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
-		cm_req_set_srq(req_msg, param->srq);
+		IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
+			param->responder_resources);
+		IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
+		IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
+			param->rnr_retry_count);
+		IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
 	}
 
-	req_msg->primary_local_gid = pri_path->sgid;
-	req_msg->primary_remote_gid = pri_path->dgid;
+	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
+		pri_path->sgid;
+	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
+		pri_path->dgid;
 	if (pri_ext) {
-		req_msg->primary_local_gid.global.interface_id
-			= OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
-		req_msg->primary_remote_gid.global.interface_id
-			= OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
+		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
+			->global.interface_id =
+			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
+		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
+			->global.interface_id =
+			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
 	}
 	if (pri_path->hop_limit <= 1) {
-		req_msg->primary_local_lid = pri_ext ? 0 :
-			htons(ntohl(sa_path_get_slid(pri_path)));
-		req_msg->primary_remote_lid = pri_ext ? 0 :
-			htons(ntohl(sa_path_get_dlid(pri_path)));
+		IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+			be16_to_cpu(pri_ext ? 0 :
+					      htons(ntohl(sa_path_get_slid(
+						      pri_path)))));
+		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+			be16_to_cpu(pri_ext ? 0 :
+					      htons(ntohl(sa_path_get_dlid(
+						      pri_path)))));
 	} else {
 		/* Work-around until there's a way to obtain remote LID info */
-		req_msg->primary_local_lid = IB_LID_PERMISSIVE;
-		req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
+		IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+			be16_to_cpu(IB_LID_PERMISSIVE));
+		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+			be16_to_cpu(IB_LID_PERMISSIVE));
 	}
-	cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
-	cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
-	req_msg->primary_traffic_class = pri_path->traffic_class;
-	req_msg->primary_hop_limit = pri_path->hop_limit;
-	cm_req_set_primary_sl(req_msg, pri_path->sl);
-	cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
-	cm_req_set_primary_local_ack_timeout(req_msg,
+	IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
+		be32_to_cpu(pri_path->flow_label));
+	IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
+	IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
+	IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
+	IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
+	IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
+		(pri_path->hop_limit <= 1));
+	IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
 		cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
 			       pri_path->packet_life_time));
 
@@ -1320,38 +1327,55 @@ static void cm_format_req(struct cm_req_msg *req_msg,
 			alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
 						      alt_path->opa.slid);
 
-		req_msg->alt_local_gid = alt_path->sgid;
-		req_msg->alt_remote_gid = alt_path->dgid;
+		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
+			alt_path->sgid;
+		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
+			alt_path->dgid;
 		if (alt_ext) {
-			req_msg->alt_local_gid.global.interface_id
-				= OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
-			req_msg->alt_remote_gid.global.interface_id
-				= OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
+			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
+					req_msg)
+				->global.interface_id =
+				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
+			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
+					req_msg)
+				->global.interface_id =
+				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
 		}
 		if (alt_path->hop_limit <= 1) {
-			req_msg->alt_local_lid = alt_ext ? 0 :
-				htons(ntohl(sa_path_get_slid(alt_path)));
-			req_msg->alt_remote_lid = alt_ext ? 0 :
-				htons(ntohl(sa_path_get_dlid(alt_path)));
+			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+				be16_to_cpu(
+					alt_ext ? 0 :
+						  htons(ntohl(sa_path_get_slid(
+							  alt_path)))));
+			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+				be16_to_cpu(
+					alt_ext ? 0 :
+						  htons(ntohl(sa_path_get_dlid(
+							  alt_path)))));
 		} else {
-			req_msg->alt_local_lid = IB_LID_PERMISSIVE;
-			req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
+			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+				be16_to_cpu(IB_LID_PERMISSIVE));
+			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+				be16_to_cpu(IB_LID_PERMISSIVE));
 		}
-		cm_req_set_alt_flow_label(req_msg,
-					  alt_path->flow_label);
-		cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
-		req_msg->alt_traffic_class = alt_path->traffic_class;
-		req_msg->alt_hop_limit = alt_path->hop_limit;
-		cm_req_set_alt_sl(req_msg, alt_path->sl);
-		cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
-		cm_req_set_alt_local_ack_timeout(req_msg,
+		IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
+			be32_to_cpu(alt_path->flow_label));
+		IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
+		IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
+			alt_path->traffic_class);
+		IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
+			alt_path->hop_limit);
+		IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
+		IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
+			(alt_path->hop_limit <= 1));
+		IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
 			cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
 				       alt_path->packet_life_time));
 	}
 
 	if (param->private_data && param->private_data_len)
-		memcpy(req_msg->private_data, param->private_data,
-		       param->private_data_len);
+		IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
+			    param->private_data_len);
 }
 
 static int cm_validate_req_param(struct ib_cm_req_param *param)
@@ -1443,8 +1467,8 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
 	cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
 	cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
 
-	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
-	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
+	cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@ -1482,14 +1506,16 @@ static int cm_issue_rej(struct cm_port *port,
 	rej_msg = (struct cm_rej_msg *) msg->mad;
 
 	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
-	rej_msg->remote_comm_id = rcv_msg->local_comm_id;
-	rej_msg->local_comm_id = rcv_msg->remote_comm_id;
-	cm_rej_set_msg_rejected(rej_msg, msg_rejected);
-	rej_msg->reason = cpu_to_be16(reason);
+	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
+		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
+	IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
+	IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
+	IBA_SET(CM_REJ_REASON, rej_msg, reason);
 
 	if (ari && ari_length) {
-		cm_rej_set_reject_info_len(rej_msg, ari_length);
-		memcpy(rej_msg->ari, ari, ari_length);
+		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
+		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
 	}
 
 	ret = ib_post_send_mad(msg, NULL);
@@ -1501,8 +1527,10 @@ static int cm_issue_rej(struct cm_port *port,
 
 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
 {
-	return ((req_msg->alt_local_lid) ||
-		(ib_is_opa_gid(&req_msg->alt_local_gid)));
+	return ((cpu_to_be16(
+			IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
+		(ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
+					       req_msg))));
 }
 
 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
@@ -1522,14 +1550,18 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
 
 	if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
 		sa_path_set_dlid(primary_path,
-				 ntohs(req_msg->primary_local_lid));
+				 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
+					 req_msg));
 		sa_path_set_slid(primary_path,
-				 ntohs(req_msg->primary_remote_lid));
+				 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
+					 req_msg));
 	} else {
-		lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
+		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+			CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
 		sa_path_set_dlid(primary_path, lid);
 
-		lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
+		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+			CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
 		sa_path_set_slid(primary_path, lid);
 	}
 
@@ -1537,13 +1569,19 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
 		return;
 
 	if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
-		sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
-		sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
+		sa_path_set_dlid(alt_path,
+				 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
+					 req_msg));
+		sa_path_set_slid(alt_path,
+				 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
+					 req_msg));
 	} else {
-		lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
+		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
 		sa_path_set_dlid(alt_path, lid);
 
-		lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
+		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
 		sa_path_set_slid(alt_path, lid);
 	}
 }
@@ -1552,44 +1590,58 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
 				     struct sa_path_rec *primary_path,
 				     struct sa_path_rec *alt_path)
 {
-	primary_path->dgid = req_msg->primary_local_gid;
-	primary_path->sgid = req_msg->primary_remote_gid;
-	primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
-	primary_path->hop_limit = req_msg->primary_hop_limit;
-	primary_path->traffic_class = req_msg->primary_traffic_class;
+	primary_path->dgid =
+		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
+	primary_path->sgid =
+		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
+	primary_path->flow_label =
+		cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
+	primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
+	primary_path->traffic_class =
+		IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
 	primary_path->reversible = 1;
-	primary_path->pkey = req_msg->pkey;
-	primary_path->sl = cm_req_get_primary_sl(req_msg);
+	primary_path->pkey =
+		cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+	primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
 	primary_path->mtu_selector = IB_SA_EQ;
-	primary_path->mtu = cm_req_get_path_mtu(req_msg);
+	primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
 	primary_path->rate_selector = IB_SA_EQ;
-	primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
+	primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
 	primary_path->packet_life_time_selector = IB_SA_EQ;
 	primary_path->packet_life_time =
-		cm_req_get_primary_local_ack_timeout(req_msg);
+		IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
 	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
-	primary_path->service_id = req_msg->service_id;
+	primary_path->service_id =
+		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
 	if (sa_path_is_roce(primary_path))
 		primary_path->roce.route_resolved = false;
 
 	if (cm_req_has_alt_path(req_msg)) {
-		alt_path->dgid = req_msg->alt_local_gid;
-		alt_path->sgid = req_msg->alt_remote_gid;
-		alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
-		alt_path->hop_limit = req_msg->alt_hop_limit;
-		alt_path->traffic_class = req_msg->alt_traffic_class;
+		alt_path->dgid = *IBA_GET_MEM_PTR(
+			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
+		alt_path->sgid = *IBA_GET_MEM_PTR(
+			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
+		alt_path->flow_label = cpu_to_be32(
+			IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
+		alt_path->hop_limit =
+			IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
+		alt_path->traffic_class =
+			IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
 		alt_path->reversible = 1;
-		alt_path->pkey = req_msg->pkey;
-		alt_path->sl = cm_req_get_alt_sl(req_msg);
+		alt_path->pkey =
+			cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+		alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
 		alt_path->mtu_selector = IB_SA_EQ;
-		alt_path->mtu = cm_req_get_path_mtu(req_msg);
+		alt_path->mtu =
+			IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
 		alt_path->rate_selector = IB_SA_EQ;
-		alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
+		alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
 		alt_path->packet_life_time_selector = IB_SA_EQ;
 		alt_path->packet_life_time =
-			cm_req_get_alt_local_ack_timeout(req_msg);
+			IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
 		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
-		alt_path->service_id = req_msg->service_id;
+		alt_path->service_id =
+			cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
 
 		if (sa_path_is_roce(alt_path))
 			alt_path->roce.route_resolved = false;
@@ -1664,23 +1716,25 @@ static void cm_format_req_event(struct cm_work *work,
 	} else {
 		param->alternate_path = NULL;
 	}
-	param->remote_ca_guid = req_msg->local_ca_guid;
-	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
-	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
+	param->remote_ca_guid =
+		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
+	param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
+	param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
 	param->qp_type = cm_req_get_qp_type(req_msg);
-	param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
-	param->responder_resources = cm_req_get_init_depth(req_msg);
-	param->initiator_depth = cm_req_get_resp_res(req_msg);
+	param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
+	param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
+	param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
 	param->local_cm_response_timeout =
-					cm_req_get_remote_resp_timeout(req_msg);
-	param->flow_control = cm_req_get_flow_ctrl(req_msg);
+		IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
+	param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
 	param->remote_cm_response_timeout =
-					cm_req_get_local_resp_timeout(req_msg);
-	param->retry_count = cm_req_get_retry_count(req_msg);
-	param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
-	param->srq = cm_req_get_srq(req_msg);
+		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
+	param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
+	param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
+	param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
 	param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
-	work->cm_event.private_data = &req_msg->private_data;
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
 }
 
 static void cm_process_work(struct cm_id_private *cm_id_priv,
@@ -1714,13 +1768,16 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
 			  const void *private_data, u8 private_data_len)
 {
 	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
-	cm_mra_set_msg_mraed(mra_msg, msg_mraed);
-	mra_msg->local_comm_id = cm_id_priv->id.local_id;
-	mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
-	cm_mra_set_service_timeout(mra_msg, service_timeout);
+	IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
+	IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
+		be32_to_cpu(cm_id_priv->id.local_id));
+	IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
+		be32_to_cpu(cm_id_priv->id.remote_id));
+	IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
 
 	if (private_data && private_data_len)
-		memcpy(mra_msg->private_data, private_data, private_data_len);
+		IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
+			    private_data_len);
 }
 
 static void cm_format_rej(struct cm_rej_msg *rej_msg,
@@ -1732,36 +1789,42 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
 			  u8 private_data_len)
 {
 	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
-	rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
+	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
+		be32_to_cpu(cm_id_priv->id.remote_id));
 
 	switch(cm_id_priv->id.state) {
 	case IB_CM_REQ_RCVD:
-		rej_msg->local_comm_id = 0;
-		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
+		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
+		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
 		break;
 	case IB_CM_MRA_REQ_SENT:
-		rej_msg->local_comm_id = cm_id_priv->id.local_id;
-		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
+		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+			be32_to_cpu(cm_id_priv->id.local_id));
+		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
 		break;
 	case IB_CM_REP_RCVD:
 	case IB_CM_MRA_REP_SENT:
-		rej_msg->local_comm_id = cm_id_priv->id.local_id;
-		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
+		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+			be32_to_cpu(cm_id_priv->id.local_id));
+		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
 		break;
 	default:
-		rej_msg->local_comm_id = cm_id_priv->id.local_id;
-		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
+		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+			be32_to_cpu(cm_id_priv->id.local_id));
+		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
+			CM_MSG_RESPONSE_OTHER);
 		break;
 	}
 
-	rej_msg->reason = cpu_to_be16(reason);
+	IBA_SET(CM_REJ_REASON, rej_msg, reason);
 	if (ari && ari_length) {
-		cm_rej_set_reject_info_len(rej_msg, ari_length);
-		memcpy(rej_msg->ari, ari, ari_length);
+		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
+		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
 	}
 
 	if (private_data && private_data_len)
-		memcpy(rej_msg->private_data, private_data, private_data_len);
+		IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
+			    private_data_len);
 }
 
 static void cm_dup_req_handler(struct cm_work *work,
@@ -1821,7 +1884,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
 	spin_lock_irq(&cm.lock);
 	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
 	if (timewait_info) {
-		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
 					   timewait_info->work.remote_id);
 		spin_unlock_irq(&cm.lock);
 		if (cur_cm_id_priv) {
@@ -1835,7 +1898,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
 	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
 	if (timewait_info) {
 		cm_cleanup_timewait(cm_id_priv->timewait_info);
-		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
 					   timewait_info->work.remote_id);
 
 		spin_unlock_irq(&cm.lock);
@@ -1851,8 +1914,9 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
 	}
 
 	/* Find matching listen request. */
-	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
-					   req_msg->service_id);
+	listen_cm_id_priv = cm_find_listen(
+		cm_id_priv->id.device,
+		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
 	if (!listen_cm_id_priv) {
 		cm_cleanup_timewait(cm_id_priv->timewait_info);
 		spin_unlock_irq(&cm.lock);
@@ -1877,24 +1941,32 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
  */
 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
 {
-	if (!cm_req_get_primary_subnet_local(req_msg)) {
-		if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
-			req_msg->primary_local_lid = ib_lid_be16(wc->slid);
-			cm_req_set_primary_sl(req_msg, wc->sl);
+	if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
+		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
+					req_msg)) == IB_LID_PERMISSIVE) {
+			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+				be16_to_cpu(ib_lid_be16(wc->slid)));
+			IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
 		}
 
-		if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
-			req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
+					req_msg)) == IB_LID_PERMISSIVE)
+			IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+				wc->dlid_path_bits);
 	}
 
-	if (!cm_req_get_alt_subnet_local(req_msg)) {
-		if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
-			req_msg->alt_local_lid = ib_lid_be16(wc->slid);
-			cm_req_set_alt_sl(req_msg, wc->sl);
+	if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
+		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
+					req_msg)) == IB_LID_PERMISSIVE) {
+			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+				be16_to_cpu(ib_lid_be16(wc->slid)));
+			IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
 		}
 
-		if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
-			req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
+					req_msg)) == IB_LID_PERMISSIVE)
+			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+				wc->dlid_path_bits);
 	}
 }
 
@@ -1914,7 +1986,8 @@ static int cm_req_handler(struct cm_work *work)
 		return PTR_ERR(cm_id);
 
 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-	cm_id_priv->id.remote_id = req_msg->local_comm_id;
+	cm_id_priv->id.remote_id =
+		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
 	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
 				      work->mad_recv_wc->recv_buf.grh,
 				      &cm_id_priv->av);
@@ -1926,9 +1999,12 @@ static int cm_req_handler(struct cm_work *work)
 		ret = PTR_ERR(cm_id_priv->timewait_info);
 		goto destroy;
 	}
-	cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
-	cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
-	cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
+	cm_id_priv->timewait_info->work.remote_id =
+		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
+	cm_id_priv->timewait_info->remote_ca_guid =
+		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
+	cm_id_priv->timewait_info->remote_qpn =
+		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
 
 	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
 	if (!listen_cm_id_priv) {
@@ -1940,7 +2016,8 @@ static int cm_req_handler(struct cm_work *work)
 
 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
 	cm_id_priv->id.context = listen_cm_id_priv->id.context;
-	cm_id_priv->id.service_id = req_msg->service_id;
+	cm_id_priv->id.service_id =
+		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
 	cm_id_priv->id.service_mask = ~cpu_to_be64(0);
 
 	cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
@@ -1957,10 +2034,11 @@ static int cm_req_handler(struct cm_work *work)
 		work->path[0].rec_type =
 			sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
 	} else {
-		cm_path_set_rec_type(work->port->cm_dev->ib_device,
-				     work->port->port_num,
-				     &work->path[0],
-				     &req_msg->primary_local_gid);
+		cm_path_set_rec_type(
+			work->port->cm_dev->ib_device, work->port->port_num,
+			&work->path[0],
+			IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
+					req_msg));
 	}
 	if (cm_req_has_alt_path(req_msg))
 		work->path[1].rec_type = work->path[0].rec_type;
@@ -2000,16 +2078,19 @@ static int cm_req_handler(struct cm_work *work)
 	}
 	cm_id_priv->tid = req_msg->hdr.tid;
 	cm_id_priv->timeout_ms = cm_convert_to_ms(
-					cm_req_get_local_resp_timeout(req_msg));
-	cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
-	cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
-	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
-	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
-	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
-	cm_id_priv->pkey = req_msg->pkey;
-	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
-	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
-	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
+		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
+	cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
+	cm_id_priv->remote_qpn =
+		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+	cm_id_priv->initiator_depth =
+		IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
+	cm_id_priv->responder_resources =
+		IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
+	cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
+	cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
+	cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
+	cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
 	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
 
 	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
@@ -2032,29 +2113,35 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
 			  struct ib_cm_rep_param *param)
 {
 	cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
-	rep_msg->local_comm_id = cm_id_priv->id.local_id;
-	rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
-	cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
-	rep_msg->resp_resources = param->responder_resources;
-	cm_rep_set_target_ack_delay(rep_msg,
-				    cm_id_priv->av.port->cm_dev->ack_delay);
-	cm_rep_set_failover(rep_msg, param->failover_accepted);
-	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
-	rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
+	IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
+		be32_to_cpu(cm_id_priv->id.local_id));
+	IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
+		be32_to_cpu(cm_id_priv->id.remote_id));
+	IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
+	IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
+		param->responder_resources);
+	IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
+		cm_id_priv->av.port->cm_dev->ack_delay);
+	IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
+	IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
+	IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
+		be64_to_cpu(cm_id_priv->id.device->node_guid));
 
 	if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
-		rep_msg->initiator_depth = param->initiator_depth;
-		cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
-		cm_rep_set_srq(rep_msg, param->srq);
-		cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
+		IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
+			param->initiator_depth);
+		IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
+			param->flow_control);
+		IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
+		IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
 	} else {
-		cm_rep_set_srq(rep_msg, 1);
-		cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
+		IBA_SET(CM_REP_SRQ, rep_msg, 1);
+		IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
 	}
 
 	if (param->private_data && param->private_data_len)
-		memcpy(rep_msg->private_data, param->private_data,
-		       param->private_data_len);
+		IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
+			    param->private_data_len);
 }
 
 int ib_send_cm_rep(struct ib_cm_id *cm_id,
@@ -2100,7 +2187,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
 	cm_id_priv->msg = msg;
 	cm_id_priv->initiator_depth = param->initiator_depth;
 	cm_id_priv->responder_resources = param->responder_resources;
-	cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
+	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
 	cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
 
 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -2114,11 +2201,14 @@ static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
 			  u8 private_data_len)
 {
 	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
-	rtu_msg->local_comm_id = cm_id_priv->id.local_id;
-	rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
+	IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
+		be32_to_cpu(cm_id_priv->id.local_id));
+	IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
+		be32_to_cpu(cm_id_priv->id.remote_id));
 
 	if (private_data && private_data_len)
-		memcpy(rtu_msg->private_data, private_data, private_data_len);
+		IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
+			    private_data_len);
 }
 
 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
@@ -2181,18 +2271,20 @@ static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
 
 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
 	param = &work->cm_event.param.rep_rcvd;
-	param->remote_ca_guid = rep_msg->local_ca_guid;
-	param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
+	param->remote_ca_guid =
+		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
+	param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
 	param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
-	param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
-	param->responder_resources = rep_msg->initiator_depth;
-	param->initiator_depth = rep_msg->resp_resources;
-	param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
-	param->failover_accepted = cm_rep_get_failover(rep_msg);
-	param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
-	param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
-	param->srq = cm_rep_get_srq(rep_msg);
-	work->cm_event.private_data = &rep_msg->private_data;
+	param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
+	param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
+	param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
+	param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
+	param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
+	param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
+	param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
+	param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
 }
 
 static void cm_dup_rep_handler(struct cm_work *work)
@@ -2203,8 +2295,9 @@ static void cm_dup_rep_handler(struct cm_work *work)
 	int ret;
 
 	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
-	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
-				   rep_msg->local_comm_id);
+	cm_id_priv = cm_acquire_id(
+		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
+		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
 	if (!cm_id_priv)
 		return;
 
@@ -2248,11 +2341,12 @@ static int cm_rep_handler(struct cm_work *work)
 	struct cm_timewait_info *timewait_info;
 
 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
-	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
+	cm_id_priv = cm_acquire_id(
+		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
 	if (!cm_id_priv) {
 		cm_dup_rep_handler(work);
 		pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
-			 be32_to_cpu(rep_msg->remote_comm_id));
+			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
 		return -EINVAL;
 	}
 
@@ -2266,15 +2360,18 @@ static int cm_rep_handler(struct cm_work *work)
 	default:
 		spin_unlock_irq(&cm_id_priv->lock);
 		ret = -EINVAL;
-		pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
-			 __func__, cm_id_priv->id.state,
-			 be32_to_cpu(rep_msg->local_comm_id),
-			 be32_to_cpu(rep_msg->remote_comm_id));
+		pr_debug(
+			"%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
+			__func__, cm_id_priv->id.state,
+			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
+			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
 		goto error;
 	}
 
-	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
-	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
+	cm_id_priv->timewait_info->work.remote_id =
+		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
+	cm_id_priv->timewait_info->remote_ca_guid =
+		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
 	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
 
 	spin_lock(&cm.lock);
@@ -2284,7 +2381,7 @@ static int cm_rep_handler(struct cm_work *work)
 		spin_unlock_irq(&cm_id_priv->lock);
 		ret = -EINVAL;
 		pr_debug("%s: Failed to insert remote id %d\n", __func__,
-			 be32_to_cpu(rep_msg->remote_comm_id));
+			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
 		goto error;
 	}
 	/* Check for a stale connection. */
@@ -2293,7 +2390,7 @@ static int cm_rep_handler(struct cm_work *work)
 		rb_erase(&cm_id_priv->timewait_info->remote_id_node,
 			 &cm.remote_id_table);
 		cm_id_priv->timewait_info->inserted_remote_id = 0;
-		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
 					   timewait_info->work.remote_id);
 
 		spin_unlock(&cm.lock);
@@ -2302,9 +2399,10 @@ static int cm_rep_handler(struct cm_work *work)
 			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
 			     NULL, 0);
 		ret = -EINVAL;
-		pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
-			 __func__, be32_to_cpu(rep_msg->local_comm_id),
-			 be32_to_cpu(rep_msg->remote_comm_id));
+		pr_debug(
+			"%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
+			__func__, IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
+			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
 
 		if (cur_cm_id_priv) {
 			cm_id = &cur_cm_id_priv->id;
@@ -2317,13 +2415,17 @@ static int cm_rep_handler(struct cm_work *work)
 	spin_unlock(&cm.lock);
 
 	cm_id_priv->id.state = IB_CM_REP_RCVD;
-	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
+	cm_id_priv->id.remote_id =
+		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
 	cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
-	cm_id_priv->initiator_depth = rep_msg->resp_resources;
-	cm_id_priv->responder_resources = rep_msg->initiator_depth;
-	cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
-	cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
-	cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
+	cm_id_priv->initiator_depth =
+		IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
+	cm_id_priv->responder_resources =
+		IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
+	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
+	cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
+	cm_id_priv->target_ack_delay =
+		IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
 	cm_id_priv->av.timeout =
 			cm_ack_timeout(cm_id_priv->target_ack_delay,
 				       cm_id_priv->av.timeout - 1);
@@ -2389,12 +2491,14 @@ static int cm_rtu_handler(struct cm_work *work)
 	int ret;
 
 	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
-	cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
-				   rtu_msg->local_comm_id);
+	cm_id_priv = cm_acquire_id(
+		cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
+		cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
 	if (!cm_id_priv)
 		return -EINVAL;
 
-	work->cm_event.private_data = &rtu_msg->private_data;
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
 
 	spin_lock_irq(&cm_id_priv->lock);
 	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
@@ -2429,12 +2533,16 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
 {
 	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
 			  cm_form_tid(cm_id_priv));
-	dreq_msg->local_comm_id = cm_id_priv->id.local_id;
-	dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
-	cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
+	IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
+		be32_to_cpu(cm_id_priv->id.local_id));
+	IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
+		be32_to_cpu(cm_id_priv->id.remote_id));
+	IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
+		be32_to_cpu(cm_id_priv->remote_qpn));
 
 	if (private_data && private_data_len)
-		memcpy(dreq_msg->private_data, private_data, private_data_len);
+		IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
+			    private_data_len);
 }
 
 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
@@ -2494,11 +2602,14 @@ static void cm_format_drep(struct cm_drep_msg *drep_msg,
 			  u8 private_data_len)
 {
 	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
-	drep_msg->local_comm_id = cm_id_priv->id.local_id;
-	drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
+	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
+		be32_to_cpu(cm_id_priv->id.local_id));
+	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
+		be32_to_cpu(cm_id_priv->id.remote_id));
 
 	if (private_data && private_data_len)
-		memcpy(drep_msg->private_data, private_data, private_data_len);
+		IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
+			    private_data_len);
 }
 
 int ib_send_cm_drep(struct ib_cm_id *cm_id,
@@ -2566,8 +2677,10 @@ static int cm_issue_drep(struct cm_port *port,
 	drep_msg = (struct cm_drep_msg *) msg->mad;
 
 	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
-	drep_msg->remote_comm_id = dreq_msg->local_comm_id;
-	drep_msg->local_comm_id = dreq_msg->remote_comm_id;
+	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
+		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
+	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
+		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
 
 	ret = ib_post_send_mad(msg, NULL);
 	if (ret)
@@ -2584,22 +2697,26 @@ static int cm_dreq_handler(struct cm_work *work)
 	int ret;
 
 	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
-	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
-				   dreq_msg->local_comm_id);
+	cm_id_priv = cm_acquire_id(
+		cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
+		cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
 	if (!cm_id_priv) {
 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_DREQ_COUNTER]);
 		cm_issue_drep(work->port, work->mad_recv_wc);
-		pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
-			 __func__, be32_to_cpu(dreq_msg->local_comm_id),
-			 be32_to_cpu(dreq_msg->remote_comm_id));
+		pr_debug(
+			"%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
+			__func__, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
+			IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
 		return -EINVAL;
 	}
 
-	work->cm_event.private_data = &dreq_msg->private_data;
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
 
 	spin_lock_irq(&cm_id_priv->lock);
-	if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
+	if (cm_id_priv->local_qpn !=
+	    cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
 		goto unlock;
 
 	switch (cm_id_priv->id.state) {
@@ -2665,12 +2782,14 @@ static int cm_drep_handler(struct cm_work *work)
 	int ret;
 
 	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
-	cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
-				   drep_msg->local_comm_id);
+	cm_id_priv = cm_acquire_id(
+		cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
+		cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
 	if (!cm_id_priv)
 		return -EINVAL;
 
-	work->cm_event.private_data = &drep_msg->private_data;
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
 
 	spin_lock_irq(&cm_id_priv->lock);
 	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
@@ -2766,10 +2885,11 @@ static void cm_format_rej_event(struct cm_work *work)
 
 	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
 	param = &work->cm_event.param.rej_rcvd;
-	param->ari = rej_msg->ari;
-	param->ari_length = cm_rej_get_reject_info_len(rej_msg);
-	param->reason = __be16_to_cpu(rej_msg->reason);
-	work->cm_event.private_data = &rej_msg->private_data;
+	param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
+	param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
+	param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
 }
 
 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
@@ -2778,29 +2898,29 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
 	struct cm_id_private *cm_id_priv;
 	__be32 remote_id;
 
-	remote_id = rej_msg->local_comm_id;
+	remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
 
-	if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
+	if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
 		spin_lock_irq(&cm.lock);
-		timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
-						  remote_id);
+		timewait_info = cm_find_remote_id(
+			*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
+			remote_id);
 		if (!timewait_info) {
 			spin_unlock_irq(&cm.lock);
 			return NULL;
 		}
-		cm_id_priv = xa_load(&cm.local_id_table,
-				cm_local_id(timewait_info->work.local_id));
-		if (cm_id_priv) {
-			if (cm_id_priv->id.remote_id == remote_id)
-				refcount_inc(&cm_id_priv->refcount);
-			else
-				cm_id_priv = NULL;
-		}
+		cm_id_priv =
+			cm_acquire_id(timewait_info->work.local_id, remote_id);
 		spin_unlock_irq(&cm.lock);
-	} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
-		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
+	} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
+		   CM_MSG_RESPONSE_REQ)
+		cm_id_priv = cm_acquire_id(
+			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
+			0);
 	else
-		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
+		cm_id_priv = cm_acquire_id(
+			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
+			remote_id);
 
 	return cm_id_priv;
 }
@@ -2828,7 +2948,7 @@ static int cm_rej_handler(struct cm_work *work)
 		/* fall through */
 	case IB_CM_REQ_RCVD:
 	case IB_CM_MRA_REQ_SENT:
-		if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
+		if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
 			cm_enter_timewait(cm_id_priv);
 		else
 			cm_reset_to_idle(cm_id_priv);
@@ -2958,13 +3078,16 @@ EXPORT_SYMBOL(ib_send_cm_mra);
 
 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
 {
-	switch (cm_mra_get_msg_mraed(mra_msg)) {
+	switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
 	case CM_MSG_RESPONSE_REQ:
-		return cm_acquire_id(mra_msg->remote_comm_id, 0);
+		return cm_acquire_id(
+			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
+			0);
 	case CM_MSG_RESPONSE_REP:
 	case CM_MSG_RESPONSE_OTHER:
-		return cm_acquire_id(mra_msg->remote_comm_id,
-				     mra_msg->local_comm_id);
+		return cm_acquire_id(
+			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
+			cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
 	default:
 		return NULL;
 	}
@@ -2981,30 +3104,34 @@ static int cm_mra_handler(struct cm_work *work)
 	if (!cm_id_priv)
 		return -EINVAL;
 
-	work->cm_event.private_data = &mra_msg->private_data;
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
 	work->cm_event.param.mra_rcvd.service_timeout =
-					cm_mra_get_service_timeout(mra_msg);
-	timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
+		IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
+	timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
 		  cm_convert_to_ms(cm_id_priv->av.timeout);
 
 	spin_lock_irq(&cm_id_priv->lock);
 	switch (cm_id_priv->id.state) {
 	case IB_CM_REQ_SENT:
-		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
+		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+			    CM_MSG_RESPONSE_REQ ||
 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
 				  cm_id_priv->msg, timeout))
 			goto out;
 		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
 		break;
 	case IB_CM_REP_SENT:
-		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
+		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+			    CM_MSG_RESPONSE_REP ||
 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
 				  cm_id_priv->msg, timeout))
 			goto out;
 		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
 		break;
 	case IB_CM_ESTABLISHED:
-		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
+		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+			    CM_MSG_RESPONSE_OTHER ||
 		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
 				  cm_id_priv->msg, timeout)) {
@@ -3046,117 +3173,23 @@ static int cm_mra_handler(struct cm_work *work)
 	return -EINVAL;
 }
 
-static void cm_format_lap(struct cm_lap_msg *lap_msg,
-			  struct cm_id_private *cm_id_priv,
-			  struct sa_path_rec *alternate_path,
-			  const void *private_data,
-			  u8 private_data_len)
-{
-	bool alt_ext = false;
-
-	if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
-		alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
-					      alternate_path->opa.slid);
-	cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
-			  cm_form_tid(cm_id_priv));
-	lap_msg->local_comm_id = cm_id_priv->id.local_id;
-	lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
-	cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
-	/* todo: need remote CM response timeout */
-	cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
-	lap_msg->alt_local_lid =
-		htons(ntohl(sa_path_get_slid(alternate_path)));
-	lap_msg->alt_remote_lid =
-		htons(ntohl(sa_path_get_dlid(alternate_path)));
-	lap_msg->alt_local_gid = alternate_path->sgid;
-	lap_msg->alt_remote_gid = alternate_path->dgid;
-	if (alt_ext) {
-		lap_msg->alt_local_gid.global.interface_id
-			= OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
-		lap_msg->alt_remote_gid.global.interface_id
-			= OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
-	}
-	cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
-	cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
-	lap_msg->alt_hop_limit = alternate_path->hop_limit;
-	cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
-	cm_lap_set_sl(lap_msg, alternate_path->sl);
-	cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
-	cm_lap_set_local_ack_timeout(lap_msg,
-		cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
-			       alternate_path->packet_life_time));
-
-	if (private_data && private_data_len)
-		memcpy(lap_msg->private_data, private_data, private_data_len);
-}
-
-int ib_send_cm_lap(struct ib_cm_id *cm_id,
-		   struct sa_path_rec *alternate_path,
-		   const void *private_data,
-		   u8 private_data_len)
-{
-	struct cm_id_private *cm_id_priv;
-	struct ib_mad_send_buf *msg;
-	unsigned long flags;
-	int ret;
-
-	if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
-		return -EINVAL;
-
-	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-	spin_lock_irqsave(&cm_id_priv->lock, flags);
-	if (cm_id->state != IB_CM_ESTABLISHED ||
-	    (cm_id->lap_state != IB_CM_LAP_UNINIT &&
-	     cm_id->lap_state != IB_CM_LAP_IDLE)) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	ret = cm_init_av_by_path(alternate_path, NULL, &cm_id_priv->alt_av,
-				 cm_id_priv);
-	if (ret)
-		goto out;
-	cm_id_priv->alt_av.timeout =
-			cm_ack_timeout(cm_id_priv->target_ack_delay,
-				       cm_id_priv->alt_av.timeout - 1);
-
-	ret = cm_alloc_msg(cm_id_priv, &msg);
-	if (ret)
-		goto out;
-
-	cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
-		      alternate_path, private_data, private_data_len);
-	msg->timeout_ms = cm_id_priv->timeout_ms;
-	msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
-
-	ret = ib_post_send_mad(msg, NULL);
-	if (ret) {
-		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-		cm_free_msg(msg);
-		return ret;
-	}
-
-	cm_id->lap_state = IB_CM_LAP_SENT;
-	cm_id_priv->msg = msg;
-
-out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(ib_send_cm_lap);
-
 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
 					struct sa_path_rec *path)
 {
 	u32 lid;
 
 	if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
-		sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
-		sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
+		sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
+					       lap_msg));
+		sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
+					       lap_msg));
 	} else {
-		lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
+		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+			CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
 		sa_path_set_dlid(path, lid);
 
-		lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
+		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+			CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
 		sa_path_set_slid(path, lid);
 	}
 }
@@ -3165,20 +3198,23 @@ static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
 				    struct sa_path_rec *path,
 				    struct cm_lap_msg *lap_msg)
 {
-	path->dgid = lap_msg->alt_local_gid;
-	path->sgid = lap_msg->alt_remote_gid;
-	path->flow_label = cm_lap_get_flow_label(lap_msg);
-	path->hop_limit = lap_msg->alt_hop_limit;
-	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
+	path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
+	path->sgid =
+		*IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
+	path->flow_label =
+		cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
+	path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
+	path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
 	path->reversible = 1;
 	path->pkey = cm_id_priv->pkey;
-	path->sl = cm_lap_get_sl(lap_msg);
+	path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
 	path->mtu_selector = IB_SA_EQ;
 	path->mtu = cm_id_priv->path_mtu;
 	path->rate_selector = IB_SA_EQ;
-	path->rate = cm_lap_get_packet_rate(lap_msg);
+	path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
 	path->packet_life_time_selector = IB_SA_EQ;
-	path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
+	path->packet_life_time =
+		IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
 	path->packet_life_time -= (path->packet_life_time > 0);
 	cm_format_path_lid_from_lap(lap_msg, path);
 }
@@ -3200,20 +3236,22 @@ static int cm_lap_handler(struct cm_work *work)
 
 	/* todo: verify LAP request and send reject APR if invalid. */
 	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
-	cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
-				   lap_msg->local_comm_id);
+	cm_id_priv = cm_acquire_id(
+		cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
+		cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
 	if (!cm_id_priv)
 		return -EINVAL;
 
 	param = &work->cm_event.param.lap_rcvd;
 	memset(&work->path[0], 0, sizeof(work->path[1]));
 	cm_path_set_rec_type(work->port->cm_dev->ib_device,
-			     work->port->port_num,
-			     &work->path[0],
-			     &lap_msg->alt_local_gid);
+			     work->port->port_num, &work->path[0],
+			     IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
+					     lap_msg));
 	param->alternate_path = &work->path[0];
 	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
-	work->cm_event.private_data = &lap_msg->private_data;
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
 
 	spin_lock_irq(&cm_id_priv->lock);
 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
@@ -3278,72 +3316,6 @@ deref:	cm_deref_id(cm_id_priv);
 	return -EINVAL;
 }
 
-static void cm_format_apr(struct cm_apr_msg *apr_msg,
-			  struct cm_id_private *cm_id_priv,
-			  enum ib_cm_apr_status status,
-			  void *info,
-			  u8 info_length,
-			  const void *private_data,
-			  u8 private_data_len)
-{
-	cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
-	apr_msg->local_comm_id = cm_id_priv->id.local_id;
-	apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
-	apr_msg->ap_status = (u8) status;
-
-	if (info && info_length) {
-		apr_msg->info_length = info_length;
-		memcpy(apr_msg->info, info, info_length);
-	}
-
-	if (private_data && private_data_len)
-		memcpy(apr_msg->private_data, private_data, private_data_len);
-}
-
-int ib_send_cm_apr(struct ib_cm_id *cm_id,
-		   enum ib_cm_apr_status status,
-		   void *info,
-		   u8 info_length,
-		   const void *private_data,
-		   u8 private_data_len)
-{
-	struct cm_id_private *cm_id_priv;
-	struct ib_mad_send_buf *msg;
-	unsigned long flags;
-	int ret;
-
-	if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
-	    (info && info_length > IB_CM_APR_INFO_LENGTH))
-		return -EINVAL;
-
-	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-	spin_lock_irqsave(&cm_id_priv->lock, flags);
-	if (cm_id->state != IB_CM_ESTABLISHED ||
-	    (cm_id->lap_state != IB_CM_LAP_RCVD &&
-	     cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	ret = cm_alloc_msg(cm_id_priv, &msg);
-	if (ret)
-		goto out;
-
-	cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
-		      info, info_length, private_data, private_data_len);
-	ret = ib_post_send_mad(msg, NULL);
-	if (ret) {
-		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-		cm_free_msg(msg);
-		return ret;
-	}
-
-	cm_id->lap_state = IB_CM_LAP_IDLE;
-out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(ib_send_cm_apr);
-
 static int cm_apr_handler(struct cm_work *work)
 {
 	struct cm_id_private *cm_id_priv;
@@ -3358,15 +3330,20 @@ static int cm_apr_handler(struct cm_work *work)
 		return -EINVAL;
 
 	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
-	cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
-				   apr_msg->local_comm_id);
+	cm_id_priv = cm_acquire_id(
+		cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
+		cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
 	if (!cm_id_priv)
 		return -EINVAL; /* Unmatched reply. */
 
-	work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
-	work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
-	work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
-	work->cm_event.private_data = &apr_msg->private_data;
+	work->cm_event.param.apr_rcvd.ap_status =
+		IBA_GET(CM_APR_AR_STATUS, apr_msg);
+	work->cm_event.param.apr_rcvd.apr_info =
+		IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
+	work->cm_event.param.apr_rcvd.info_len =
+		IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
 
 	spin_lock_irq(&cm_id_priv->lock);
 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
@@ -3438,13 +3415,16 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
 {
 	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
 			  cm_form_tid(cm_id_priv));
-	sidr_req_msg->request_id = cm_id_priv->id.local_id;
-	sidr_req_msg->pkey = param->path->pkey;
-	sidr_req_msg->service_id = param->service_id;
+	IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
+		be32_to_cpu(cm_id_priv->id.local_id));
+	IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
+		be16_to_cpu(param->path->pkey));
+	IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
+		be64_to_cpu(param->service_id));
 
 	if (param->private_data && param->private_data_len)
-		memcpy(sidr_req_msg->private_data, param->private_data,
-		       param->private_data_len);
+		IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
+			    param->private_data, param->private_data_len);
 }
 
 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
@@ -3508,13 +3488,15 @@ static void cm_format_sidr_req_event(struct cm_work *work,
 	sidr_req_msg = (struct cm_sidr_req_msg *)
 				work->mad_recv_wc->recv_buf.mad;
 	param = &work->cm_event.param.sidr_req_rcvd;
-	param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
+	param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
 	param->listen_id = listen_id;
-	param->service_id = sidr_req_msg->service_id;
+	param->service_id =
+		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
 	param->bth_pkey = cm_get_bth_pkey(work);
 	param->port = work->port->port_num;
 	param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
-	work->cm_event.private_data = &sidr_req_msg->private_data;
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
 }
 
 static int cm_sidr_req_handler(struct cm_work *work)
@@ -3542,7 +3524,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
 	if (ret)
 		goto out;
 
-	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
+	cm_id_priv->id.remote_id =
+		cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
 	cm_id_priv->tid = sidr_req_msg->hdr.tid;
 	atomic_inc(&cm_id_priv->work_count);
 
@@ -3555,8 +3538,9 @@ static int cm_sidr_req_handler(struct cm_work *work)
 		goto out; /* Duplicate message. */
 	}
 	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
-	cur_cm_id_priv = cm_find_listen(cm_id->device,
-					sidr_req_msg->service_id);
+	cur_cm_id_priv = cm_find_listen(
+		cm_id->device,
+		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)));
 	if (!cur_cm_id_priv) {
 		spin_unlock_irq(&cm.lock);
 		cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
@@ -3568,7 +3552,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
 
 	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
 	cm_id_priv->id.context = cur_cm_id_priv->id.context;
-	cm_id_priv->id.service_id = sidr_req_msg->service_id;
+	cm_id_priv->id.service_id =
+		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
 	cm_id_priv->id.service_mask = ~cpu_to_be64(0);
 
 	cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
@@ -3586,18 +3571,21 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
 {
 	cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
 			  cm_id_priv->tid);
-	sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
-	sidr_rep_msg->status = param->status;
-	cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
-	sidr_rep_msg->service_id = cm_id_priv->id.service_id;
-	sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
+	IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
+		be32_to_cpu(cm_id_priv->id.remote_id));
+	IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
+	IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
+	IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
+		be64_to_cpu(cm_id_priv->id.service_id));
+	IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
 
 	if (param->info && param->info_length)
-		memcpy(sidr_rep_msg->info, param->info, param->info_length);
+		IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
+			    param->info, param->info_length);
 
 	if (param->private_data && param->private_data_len)
-		memcpy(sidr_rep_msg->private_data, param->private_data,
-		       param->private_data_len);
+		IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
+			    param->private_data, param->private_data_len);
 }
 
 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
@@ -3657,13 +3645,16 @@ static void cm_format_sidr_rep_event(struct cm_work *work,
 	sidr_rep_msg = (struct cm_sidr_rep_msg *)
 				work->mad_recv_wc->recv_buf.mad;
 	param = &work->cm_event.param.sidr_rep_rcvd;
-	param->status = sidr_rep_msg->status;
-	param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
-	param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
-	param->info = &sidr_rep_msg->info;
-	param->info_len = sidr_rep_msg->info_length;
+	param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
+	param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
+	param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
+	param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
+				      sidr_rep_msg);
+	param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
+				  sidr_rep_msg);
 	param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
-	work->cm_event.private_data = &sidr_rep_msg->private_data;
+	work->cm_event.private_data =
+		IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
 }
 
 static int cm_sidr_rep_handler(struct cm_work *work)
@@ -3673,7 +3664,8 @@ static int cm_sidr_rep_handler(struct cm_work *work)
 
 	sidr_rep_msg = (struct cm_sidr_rep_msg *)
 				work->mad_recv_wc->recv_buf.mad;
-	cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
+	cm_id_priv = cm_acquire_id(
+		cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
 	if (!cm_id_priv)
 		return -EINVAL; /* Unmatched reply. */
 
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 92d7260..0cc4065 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -8,6 +8,7 @@
 #ifndef CM_MSGS_H
 #define CM_MSGS_H
 
+#include <rdma/ibta_vol1_c12.h>
 #include <rdma/ib_mad.h>
 #include <rdma/ib_cm.h>
 
@@ -18,120 +19,14 @@
 
 #define IB_CM_CLASS_VERSION	2 /* IB specification 1.2 */
 
-struct cm_req_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 rsvd4;
-	__be64 service_id;
-	__be64 local_ca_guid;
-	__be32 rsvd24;
-	__be32 local_qkey;
-	/* local QPN:24, responder resources:8 */
-	__be32 offset32;
-	/* local EECN:24, initiator depth:8 */
-	__be32 offset36;
-	/*
-	 * remote EECN:24, remote CM response timeout:5,
-	 * transport service type:2, end-to-end flow control:1
-	 */
-	__be32 offset40;
-	/* starting PSN:24, local CM response timeout:5, retry count:3 */
-	__be32 offset44;
-	__be16 pkey;
-	/* path MTU:4, RDC exists:1, RNR retry count:3. */
-	u8 offset50;
-	/* max CM Retries:4, SRQ:1, extended transport type:3 */
-	u8 offset51;
-
-	__be16 primary_local_lid;
-	__be16 primary_remote_lid;
-	union ib_gid primary_local_gid;
-	union ib_gid primary_remote_gid;
-	/* flow label:20, rsvd:6, packet rate:6 */
-	__be32 primary_offset88;
-	u8 primary_traffic_class;
-	u8 primary_hop_limit;
-	/* SL:4, subnet local:1, rsvd:3 */
-	u8 primary_offset94;
-	/* local ACK timeout:5, rsvd:3 */
-	u8 primary_offset95;
-
-	__be16 alt_local_lid;
-	__be16 alt_remote_lid;
-	union ib_gid alt_local_gid;
-	union ib_gid alt_remote_gid;
-	/* flow label:20, rsvd:6, packet rate:6 */
-	__be32 alt_offset132;
-	u8 alt_traffic_class;
-	u8 alt_hop_limit;
-	/* SL:4, subnet local:1, rsvd:3 */
-	u8 alt_offset138;
-	/* local ACK timeout:5, rsvd:3 */
-	u8 alt_offset139;
-
-	u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
-
-} __packed;
-
-static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
-{
-	return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
-}
-
-static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
-{
-	req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
-					 (be32_to_cpu(req_msg->offset32) &
-					  0x000000FF));
-}
-
-static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
-{
-	return (u8) be32_to_cpu(req_msg->offset32);
-}
-
-static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
-{
-	req_msg->offset32 = cpu_to_be32(resp_res |
-					(be32_to_cpu(req_msg->offset32) &
-					 0xFFFFFF00));
-}
-
-static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
-{
-	return (u8) be32_to_cpu(req_msg->offset36);
-}
-
-static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
-					 u8 init_depth)
-{
-	req_msg->offset36 = cpu_to_be32(init_depth |
-					(be32_to_cpu(req_msg->offset36) &
-					 0xFFFFFF00));
-}
-
-static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
-{
-	return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
-}
-
-static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
-						  u8 resp_timeout)
-{
-	req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
-					 (be32_to_cpu(req_msg->offset40) &
-					  0xFFFFFF07));
-}
-
 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
 {
-	u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
+	u8 transport_type = IBA_GET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg);
 	switch(transport_type) {
 	case 0: return IB_QPT_RC;
 	case 1: return IB_QPT_UC;
 	case 3:
-		switch (req_msg->offset51 & 0x7) {
+		switch (IBA_GET(CM_REQ_EXTENDED_TRANSPORT_TYPE, req_msg)) {
 		case 1: return IB_QPT_XRC_TGT;
 		default: return 0;
 		}
@@ -144,240 +39,17 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
 {
 	switch(qp_type) {
 	case IB_QPT_UC:
-		req_msg->offset40 = cpu_to_be32((be32_to_cpu(
-						  req_msg->offset40) &
-						   0xFFFFFFF9) | 0x2);
+		IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 1);
 		break;
 	case IB_QPT_XRC_INI:
-		req_msg->offset40 = cpu_to_be32((be32_to_cpu(
-						 req_msg->offset40) &
-						   0xFFFFFFF9) | 0x6);
-		req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
+		IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 3);
+		IBA_SET(CM_REQ_EXTENDED_TRANSPORT_TYPE, req_msg, 1);
 		break;
 	default:
-		req_msg->offset40 = cpu_to_be32(be32_to_cpu(
-						 req_msg->offset40) &
-						  0xFFFFFFF9);
+		IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 0);
 	}
 }
 
-static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
-{
-	return be32_to_cpu(req_msg->offset40) & 0x1;
-}
-
-static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
-					u8 flow_ctrl)
-{
-	req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
-					 (be32_to_cpu(req_msg->offset40) &
-					  0xFFFFFFFE));
-}
-
-static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
-{
-	return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
-}
-
-static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
-					   __be32 starting_psn)
-{
-	req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
-			    (be32_to_cpu(req_msg->offset44) & 0x000000FF));
-}
-
-static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
-{
-	return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
-}
-
-static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
-						 u8 resp_timeout)
-{
-	req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
-			    (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
-}
-
-static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
-{
-	return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
-}
-
-static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
-					  u8 retry_count)
-{
-	req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
-			    (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
-}
-
-static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
-{
-	return req_msg->offset50 >> 4;
-}
-
-static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
-{
-	req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
-}
-
-static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
-{
-	return req_msg->offset50 & 0x7;
-}
-
-static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
-					      u8 rnr_retry_count)
-{
-	req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
-				  (rnr_retry_count & 0x7));
-}
-
-static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
-{
-	return req_msg->offset51 >> 4;
-}
-
-static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
-					     u8 retries)
-{
-	req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
-}
-
-static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
-{
-	return (req_msg->offset51 & 0x8) >> 3;
-}
-
-static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
-{
-	req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
-				  ((srq & 0x1) << 3));
-}
-
-static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
-{
-	return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
-}
-
-static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
-						 __be32 flow_label)
-{
-	req_msg->primary_offset88 = cpu_to_be32(
-				    (be32_to_cpu(req_msg->primary_offset88) &
-				     0x00000FFF) |
-				     (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
-{
-	return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
-}
-
-static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
-						  u8 rate)
-{
-	req_msg->primary_offset88 = cpu_to_be32(
-				    (be32_to_cpu(req_msg->primary_offset88) &
-				     0xFFFFFFC0) | (rate & 0x3F));
-}
-
-static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
-{
-	return (u8) (req_msg->primary_offset94 >> 4);
-}
-
-static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
-{
-	req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
-					  (sl << 4));
-}
-
-static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
-{
-	return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
-}
-
-static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
-						   u8 subnet_local)
-{
-	req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
-					  ((subnet_local & 0x1) << 3));
-}
-
-static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
-{
-	return (u8) (req_msg->primary_offset95 >> 3);
-}
-
-static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
-							u8 local_ack_timeout)
-{
-	req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
-					  (local_ack_timeout << 3));
-}
-
-static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
-{
-	return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
-}
-
-static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
-					     __be32 flow_label)
-{
-	req_msg->alt_offset132 = cpu_to_be32(
-				 (be32_to_cpu(req_msg->alt_offset132) &
-				  0x00000FFF) |
-				  (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
-{
-	return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
-}
-
-static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
-					      u8 rate)
-{
-	req_msg->alt_offset132 = cpu_to_be32(
-				 (be32_to_cpu(req_msg->alt_offset132) &
-				  0xFFFFFFC0) | (rate & 0x3F));
-}
-
-static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
-{
-	return (u8) (req_msg->alt_offset138 >> 4);
-}
-
-static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
-{
-	req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
-				       (sl << 4));
-}
-
-static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
-{
-	return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
-}
-
-static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
-					       u8 subnet_local)
-{
-	req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
-				       ((subnet_local & 0x1) << 3));
-}
-
-static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
-{
-	return (u8) (req_msg->alt_offset139 >> 3);
-}
-
-static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
-						    u8 local_ack_timeout)
-{
-	req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
-				       (local_ack_timeout << 3));
-}
-
 /* Message REJected or MRAed */
 enum cm_msg_response {
 	CM_MSG_RESPONSE_REQ = 0x0,
@@ -385,419 +57,12 @@ enum cm_msg_response {
 	CM_MSG_RESPONSE_OTHER = 0x2
 };
 
- struct cm_mra_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 remote_comm_id;
-	/* message MRAed:2, rsvd:6 */
-	u8 offset8;
-	/* service timeout:5, rsvd:3 */
-	u8 offset9;
-
-	u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
-{
-	return (u8) (mra_msg->offset8 >> 6);
-}
-
-static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
-{
-	mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
-}
-
-static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
-{
-	return (u8) (mra_msg->offset9 >> 3);
-}
-
-static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
-					      u8 service_timeout)
-{
-	mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
-				 (service_timeout << 3));
-}
-
-struct cm_rej_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 remote_comm_id;
-	/* message REJected:2, rsvd:6 */
-	u8 offset8;
-	/* reject info length:7, rsvd:1. */
-	u8 offset9;
-	__be16 reason;
-	u8 ari[IB_CM_REJ_ARI_LENGTH];
-
-	u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
-{
-	return (u8) (rej_msg->offset8 >> 6);
-}
-
-static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
-{
-	rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
-}
-
-static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
-{
-	return (u8) (rej_msg->offset9 >> 1);
-}
-
-static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
-					      u8 len)
-{
-	rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
-}
-
-struct cm_rep_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 remote_comm_id;
-	__be32 local_qkey;
-	/* local QPN:24, rsvd:8 */
-	__be32 offset12;
-	/* local EECN:24, rsvd:8 */
-	__be32 offset16;
-	/* starting PSN:24 rsvd:8 */
-	__be32 offset20;
-	u8 resp_resources;
-	u8 initiator_depth;
-	/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
-	u8 offset26;
-	/* RNR retry count:3, SRQ:1, rsvd:5 */
-	u8 offset27;
-	__be64 local_ca_guid;
-
-	u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
-{
-	return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
-}
-
-static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
-{
-	rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
-			    (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
-}
-
-static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
-{
-	return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
-}
-
-static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
-{
-	rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
-			    (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
-}
-
 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
 {
 	return (qp_type == IB_QPT_XRC_INI) ?
-		cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
-}
-
-static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
-{
-	return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
-}
-
-static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
-					   __be32 starting_psn)
-{
-	rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
-			    (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
-}
-
-static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
-{
-	return (u8) (rep_msg->offset26 >> 3);
-}
-
-static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
-					       u8 target_ack_delay)
-{
-	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
-				  (target_ack_delay << 3));
-}
-
-static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
-{
-	return (u8) ((rep_msg->offset26 & 0x06) >> 1);
-}
-
-static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
-{
-	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
-				  ((failover & 0x3) << 1));
-}
-
-static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
-{
-	return (u8) (rep_msg->offset26 & 0x01);
-}
-
-static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
-					    u8 flow_ctrl)
-{
-	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
-				  (flow_ctrl & 0x1));
-}
-
-static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
-{
-	return (u8) (rep_msg->offset27 >> 5);
-}
-
-static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
-					      u8 rnr_retry_count)
-{
-	rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
-				  (rnr_retry_count << 5));
-}
-
-static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
-{
-	return (u8) ((rep_msg->offset27 >> 4) & 0x1);
-}
-
-static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
-{
-	rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
-				  ((srq & 0x1) << 4));
-}
-
-struct cm_rtu_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 remote_comm_id;
-
-	u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-struct cm_dreq_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 remote_comm_id;
-	/* remote QPN/EECN:24, rsvd:8 */
-	__be32 offset8;
-
-	u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
-{
-	return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
-}
-
-static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
-{
-	dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
-			    (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
-}
-
-struct cm_drep_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 remote_comm_id;
-
-	u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-struct cm_lap_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 remote_comm_id;
-
-	__be32 rsvd8;
-	/* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
-	__be32 offset12;
-	__be32 rsvd16;
-
-	__be16 alt_local_lid;
-	__be16 alt_remote_lid;
-	union ib_gid alt_local_gid;
-	union ib_gid alt_remote_gid;
-	/* flow label:20, rsvd:4, traffic class:8 */
-	__be32 offset56;
-	u8 alt_hop_limit;
-	/* rsvd:2, packet rate:6 */
-	u8 offset61;
-	/* SL:4, subnet local:1, rsvd:3 */
-	u8 offset62;
-	/* local ACK timeout:5, rsvd:3 */
-	u8 offset63;
-
-	u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
-} __packed;
-
-static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
-{
-	return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
-}
-
-static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
-{
-	lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
-					 (be32_to_cpu(lap_msg->offset12) &
-					  0x000000FF));
-}
-
-static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
-{
-	return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
-}
-
-static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
-						  u8 resp_timeout)
-{
-	lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
-					 (be32_to_cpu(lap_msg->offset12) &
-					  0xFFFFFF07));
-}
-
-static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
-{
-	return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
-}
-
-static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
-					 __be32 flow_label)
-{
-	lap_msg->offset56 = cpu_to_be32(
-				 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
-				 (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
-{
-	return (u8) be32_to_cpu(lap_msg->offset56);
-}
-
-static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
-					    u8 traffic_class)
-{
-	lap_msg->offset56 = cpu_to_be32(traffic_class |
-					 (be32_to_cpu(lap_msg->offset56) &
-					  0xFFFFFF00));
-}
-
-static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
-{
-	return lap_msg->offset61 & 0x3F;
-}
-
-static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
-					  u8 packet_rate)
-{
-	lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
-}
-
-static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
-{
-	return lap_msg->offset62 >> 4;
-}
-
-static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
-{
-	lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
-}
-
-static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
-{
-	return (lap_msg->offset62 >> 3) & 0x1;
-}
-
-static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
-					   u8 subnet_local)
-{
-	lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
-			     (lap_msg->offset61 & 0xF7);
-}
-static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
-{
-	return lap_msg->offset63 >> 3;
-}
-
-static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
-						u8 local_ack_timeout)
-{
-	lap_msg->offset63 = (local_ack_timeout << 3) |
-			    (lap_msg->offset63 & 0x07);
-}
-
-struct cm_apr_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 local_comm_id;
-	__be32 remote_comm_id;
-
-	u8 info_length;
-	u8 ap_status;
-	__be16 rsvd;
-	u8 info[IB_CM_APR_INFO_LENGTH];
-
-	u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
-} __packed;
-
-struct cm_sidr_req_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 request_id;
-	__be16 pkey;
-	__be16 rsvd;
-	__be64 service_id;
-
-	u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
-} __packed;
-
-struct cm_sidr_rep_msg {
-	struct ib_mad_hdr hdr;
-
-	__be32 request_id;
-	u8 status;
-	u8 info_length;
-	__be16 rsvd;
-	/* QPN:24, rsvd:8 */
-	__be32 offset8;
-	__be64 service_id;
-	__be32 qkey;
-	u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
-
-	u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
-} __packed;
-
-static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
-{
-	return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
-}
-
-static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
-				       __be32 qpn)
-{
-	sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
-					(be32_to_cpu(sidr_rep_msg->offset8) &
-					 0x000000FF));
+		       cpu_to_be32(IBA_GET(CM_REP_LOCAL_EE_CONTEXT_NUMBER,
+					   rep_msg)) :
+		       cpu_to_be32(IBA_GET(CM_REP_LOCAL_QPN, rep_msg));
 }
 
 #endif /* CM_MSGS_H */
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 43a6f07..72f0321 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -36,6 +36,7 @@
 
 #include "core_priv.h"
 #include "cma_priv.h"
+#include "cma_trace.h"
 
 MODULE_AUTHOR("Sean Hefty");
 MODULE_DESCRIPTION("Generic RDMA CM Agent");
@@ -877,6 +878,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
 	id_priv->id.route.addr.dev_addr.net = get_net(net);
 	id_priv->seq_num &= 0x00ffffff;
 
+	trace_cm_id_create(id_priv);
 	return &id_priv->id;
 }
 EXPORT_SYMBOL(__rdma_create_id);
@@ -928,27 +930,34 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
 	int ret;
 
 	id_priv = container_of(id, struct rdma_id_private, id);
-	if (id->device != pd->device)
-		return -EINVAL;
+	if (id->device != pd->device) {
+		ret = -EINVAL;
+		goto out_err;
+	}
 
 	qp_init_attr->port_num = id->port_num;
 	qp = ib_create_qp(pd, qp_init_attr);
-	if (IS_ERR(qp))
-		return PTR_ERR(qp);
+	if (IS_ERR(qp)) {
+		ret = PTR_ERR(qp);
+		goto out_err;
+	}
 
 	if (id->qp_type == IB_QPT_UD)
 		ret = cma_init_ud_qp(id_priv, qp);
 	else
 		ret = cma_init_conn_qp(id_priv, qp);
 	if (ret)
-		goto err;
+		goto out_destroy;
 
 	id->qp = qp;
 	id_priv->qp_num = qp->qp_num;
 	id_priv->srq = (qp->srq != NULL);
+	trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
 	return 0;
-err:
+out_destroy:
 	ib_destroy_qp(qp);
+out_err:
+	trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
 	return ret;
 }
 EXPORT_SYMBOL(rdma_create_qp);
@@ -958,6 +967,7 @@ void rdma_destroy_qp(struct rdma_cm_id *id)
 	struct rdma_id_private *id_priv;
 
 	id_priv = container_of(id, struct rdma_id_private, id);
+	trace_cm_qp_destroy(id_priv);
 	mutex_lock(&id_priv->qp_mutex);
 	ib_destroy_qp(id_priv->id.qp);
 	id_priv->id.qp = NULL;
@@ -1811,6 +1821,7 @@ void rdma_destroy_id(struct rdma_cm_id *id)
 	enum rdma_cm_state state;
 
 	id_priv = container_of(id, struct rdma_id_private, id);
+	trace_cm_id_destroy(id_priv);
 	state = cma_exch(id_priv, RDMA_CM_DESTROYING);
 	cma_cancel_operation(id_priv, state);
 
@@ -1863,6 +1874,7 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
 	if (ret)
 		goto reject;
 
+	trace_cm_send_rtu(id_priv);
 	ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
 	if (ret)
 		goto reject;
@@ -1871,6 +1883,7 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
 reject:
 	pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
 	cma_modify_qp_err(id_priv);
+	trace_cm_send_rej(id_priv);
 	ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
 		       NULL, 0, NULL, 0);
 	return ret;
@@ -1890,6 +1903,17 @@ static void cma_set_rep_event_data(struct rdma_cm_event *event,
 	event->param.conn.qp_num = rep_data->remote_qpn;
 }
 
+static int cma_cm_event_handler(struct rdma_id_private *id_priv,
+				struct rdma_cm_event *event)
+{
+	int ret;
+
+	trace_cm_event_handler(id_priv, event);
+	ret = id_priv->id.event_handler(&id_priv->id, event);
+	trace_cm_event_done(id_priv, event, ret);
+	return ret;
+}
+
 static int cma_ib_handler(struct ib_cm_id *cm_id,
 			  const struct ib_cm_event *ib_event)
 {
@@ -1912,8 +1936,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
 		break;
 	case IB_CM_REP_RECEIVED:
 		if (cma_comp(id_priv, RDMA_CM_CONNECT) &&
-		    (id_priv->id.qp_type != IB_QPT_UD))
+		    (id_priv->id.qp_type != IB_QPT_UD)) {
+			trace_cm_send_mra(id_priv);
 			ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+		}
 		if (id_priv->id.qp) {
 			event.status = cma_rep_recv(id_priv);
 			event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
@@ -1958,7 +1984,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
 		goto out;
 	}
 
-	ret = id_priv->id.event_handler(&id_priv->id, &event);
+	ret = cma_cm_event_handler(id_priv, &event);
 	if (ret) {
 		/* Destroy the CM ID by returning a non-zero value. */
 		id_priv->cm_id.ib = NULL;
@@ -2119,6 +2145,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
 	if (IS_ERR(listen_id))
 		return PTR_ERR(listen_id);
 
+	trace_cm_req_handler(listen_id, ib_event->event);
 	if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
 		ret = -EINVAL;
 		goto net_dev_put;
@@ -2161,7 +2188,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
 	 * until we're done accessing it.
 	 */
 	atomic_inc(&conn_id->refcount);
-	ret = conn_id->id.event_handler(&conn_id->id, &event);
+	ret = cma_cm_event_handler(conn_id, &event);
 	if (ret)
 		goto err3;
 	/*
@@ -2170,8 +2197,10 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
 	 */
 	mutex_lock(&lock);
 	if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
-	    (conn_id->id.qp_type != IB_QPT_UD))
+	    (conn_id->id.qp_type != IB_QPT_UD)) {
+		trace_cm_send_mra(cm_id->context);
 		ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+	}
 	mutex_unlock(&lock);
 	mutex_unlock(&conn_id->handler_mutex);
 	mutex_unlock(&listen_id->handler_mutex);
@@ -2286,7 +2315,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
 	event.status = iw_event->status;
 	event.param.conn.private_data = iw_event->private_data;
 	event.param.conn.private_data_len = iw_event->private_data_len;
-	ret = id_priv->id.event_handler(&id_priv->id, &event);
+	ret = cma_cm_event_handler(id_priv, &event);
 	if (ret) {
 		/* Destroy the CM ID by returning a non-zero value. */
 		id_priv->cm_id.iw = NULL;
@@ -2363,7 +2392,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
 	 * until we're done accessing it.
 	 */
 	atomic_inc(&conn_id->refcount);
-	ret = conn_id->id.event_handler(&conn_id->id, &event);
+	ret = cma_cm_event_handler(conn_id, &event);
 	if (ret) {
 		/* User wants to destroy the CM ID */
 		conn_id->cm_id.iw = NULL;
@@ -2435,6 +2464,7 @@ static int cma_listen_handler(struct rdma_cm_id *id,
 
 	id->context = id_priv->id.context;
 	id->event_handler = id_priv->id.event_handler;
+	trace_cm_event_handler(id_priv, event);
 	return id_priv->id.event_handler(id, event);
 }
 
@@ -2611,7 +2641,7 @@ static void cma_work_handler(struct work_struct *_work)
 	if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
 		goto out;
 
-	if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
+	if (cma_cm_event_handler(id_priv, &work->event)) {
 		cma_exch(id_priv, RDMA_CM_DESTROYING);
 		destroy = 1;
 	}
@@ -2634,7 +2664,7 @@ static void cma_ndev_work_handler(struct work_struct *_work)
 	    id_priv->state == RDMA_CM_DEVICE_REMOVAL)
 		goto out;
 
-	if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
+	if (cma_cm_event_handler(id_priv, &work->event)) {
 		cma_exch(id_priv, RDMA_CM_DESTROYING);
 		destroy = 1;
 	}
@@ -3089,7 +3119,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
 	} else
 		event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
 
-	if (id_priv->id.event_handler(&id_priv->id, &event)) {
+	if (cma_cm_event_handler(id_priv, &event)) {
 		cma_exch(id_priv, RDMA_CM_DESTROYING);
 		mutex_unlock(&id_priv->handler_mutex);
 		rdma_destroy_id(&id_priv->id);
@@ -3118,6 +3148,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
 	rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
 	rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
 
+	atomic_inc(&id_priv->refcount);
 	cma_init_resolve_addr_work(work, id_priv);
 	queue_work(cma_wq, &work->work);
 	return 0;
@@ -3144,6 +3175,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
 	rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
 		&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
 
+	atomic_inc(&id_priv->refcount);
 	cma_init_resolve_addr_work(work, id_priv);
 	queue_work(cma_wq, &work->work);
 	return 0;
@@ -3736,7 +3768,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
 		goto out;
 	}
 
-	ret = id_priv->id.event_handler(&id_priv->id, &event);
+	ret = cma_cm_event_handler(id_priv, &event);
 
 	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
 	if (ret) {
@@ -3800,6 +3832,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
 	req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
 	req.max_cm_retries = CMA_MAX_CM_RETRIES;
 
+	trace_cm_send_sidr_req(id_priv);
 	ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
 	if (ret) {
 		ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -3873,6 +3906,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
 	req.max_cm_retries = CMA_MAX_CM_RETRIES;
 	req.srq = id_priv->srq ? 1 : 0;
 
+	trace_cm_send_req(id_priv);
 	ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
 out:
 	if (ret && !IS_ERR(id)) {
@@ -3986,6 +4020,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
 	rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
 	rep.srq = id_priv->srq ? 1 : 0;
 
+	trace_cm_send_rep(id_priv);
 	ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
 out:
 	return ret;
@@ -4035,6 +4070,7 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
 	rep.private_data = private_data;
 	rep.private_data_len = private_data_len;
 
+	trace_cm_send_sidr_rep(id_priv);
 	return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
 }
 
@@ -4120,13 +4156,15 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
 		return -EINVAL;
 
 	if (rdma_cap_ib_cm(id->device, id->port_num)) {
-		if (id->qp_type == IB_QPT_UD)
+		if (id->qp_type == IB_QPT_UD) {
 			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
 						private_data, private_data_len);
-		else
+		} else {
+			trace_cm_send_rej(id_priv);
 			ret = ib_send_cm_rej(id_priv->cm_id.ib,
 					     IB_CM_REJ_CONSUMER_DEFINED, NULL,
 					     0, private_data, private_data_len);
+		}
 	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
 		ret = iw_cm_reject(id_priv->cm_id.iw,
 				   private_data, private_data_len);
@@ -4151,8 +4189,13 @@ int rdma_disconnect(struct rdma_cm_id *id)
 		if (ret)
 			goto out;
 		/* Initiate or respond to a disconnect. */
-		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
-			ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
+		trace_cm_disconnect(id_priv);
+		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
+			if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
+				trace_cm_sent_drep(id_priv);
+		} else {
+			trace_cm_sent_dreq(id_priv);
+		}
 	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
 		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
 	} else
@@ -4218,7 +4261,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
 	} else
 		event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
 
-	ret = id_priv->id.event_handler(&id_priv->id, &event);
+	ret = cma_cm_event_handler(id_priv, &event);
 
 	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
 	if (ret) {
@@ -4623,6 +4666,7 @@ static void cma_add_one(struct ib_device *device)
 		cma_listen_on_dev(id_priv, cma_dev);
 	mutex_unlock(&lock);
 
+	trace_cm_add_one(device);
 	return;
 
 free_gid_type:
@@ -4653,7 +4697,7 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
 		goto out;
 
 	event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
-	ret = id_priv->id.event_handler(&id_priv->id, &event);
+	ret = cma_cm_event_handler(id_priv, &event);
 out:
 	mutex_unlock(&id_priv->handler_mutex);
 	return ret;
@@ -4691,6 +4735,8 @@ static void cma_remove_one(struct ib_device *device, void *client_data)
 {
 	struct cma_device *cma_dev = client_data;
 
+	trace_cm_remove_one(device);
+
 	if (!cma_dev)
 		return;
 
diff --git a/drivers/infiniband/core/cma_trace.c b/drivers/infiniband/core/cma_trace.c
new file mode 100644
index 0000000..b314a28
--- /dev/null
+++ b/drivers/infiniband/core/cma_trace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trace points for the RDMA Connection Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define CREATE_TRACE_POINTS
+
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include "cma_priv.h"
+
+#include "cma_trace.h"
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
new file mode 100644
index 0000000..81e36bf1
--- /dev/null
+++ b/drivers/infiniband/core/cma_trace.h
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for the RDMA Connect Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdma_cma
+
+#if !defined(_TRACE_RDMA_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define _TRACE_RDMA_CMA_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/rdma.h>
+
+/*
+ * enum ib_cm_event_type, from include/rdma/ib_cm.h
+ */
+#define IB_CM_EVENT_LIST			\
+	ib_cm_event(REQ_ERROR)			\
+	ib_cm_event(REQ_RECEIVED)		\
+	ib_cm_event(REP_ERROR)			\
+	ib_cm_event(REP_RECEIVED)		\
+	ib_cm_event(RTU_RECEIVED)		\
+	ib_cm_event(USER_ESTABLISHED)		\
+	ib_cm_event(DREQ_ERROR)			\
+	ib_cm_event(DREQ_RECEIVED)		\
+	ib_cm_event(DREP_RECEIVED)		\
+	ib_cm_event(TIMEWAIT_EXIT)		\
+	ib_cm_event(MRA_RECEIVED)		\
+	ib_cm_event(REJ_RECEIVED)		\
+	ib_cm_event(LAP_ERROR)			\
+	ib_cm_event(LAP_RECEIVED)		\
+	ib_cm_event(APR_RECEIVED)		\
+	ib_cm_event(SIDR_REQ_ERROR)		\
+	ib_cm_event(SIDR_REQ_RECEIVED)		\
+	ib_cm_event_end(SIDR_REP_RECEIVED)
+
+#undef ib_cm_event
+#undef ib_cm_event_end
+
+#define ib_cm_event(x)		TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_event_end(x)	TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_EVENT_LIST
+
+#undef ib_cm_event
+#undef ib_cm_event_end
+
+#define ib_cm_event(x)		{ IB_CM_##x, #x },
+#define ib_cm_event_end(x)	{ IB_CM_##x, #x }
+
+#define rdma_show_ib_cm_event(x) \
+		__print_symbolic(x, IB_CM_EVENT_LIST)
+
+
+DECLARE_EVENT_CLASS(cma_fsm_class,
+	TP_PROTO(
+		const struct rdma_id_private *id_priv
+	),
+
+	TP_ARGS(id_priv),
+
+	TP_STRUCT__entry(
+		__field(u32, cm_id)
+		__field(u32, tos)
+		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+	),
+
+	TP_fast_assign(
+		__entry->cm_id = id_priv->res.id;
+		__entry->tos = id_priv->tos;
+		memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+		       sizeof(struct sockaddr_in6));
+		memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+		       sizeof(struct sockaddr_in6));
+	),
+
+	TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u",
+		__entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos
+	)
+);
+
+#define DEFINE_CMA_FSM_EVENT(name)						\
+		DEFINE_EVENT(cma_fsm_class, cm_##name,				\
+				TP_PROTO(					\
+					const struct rdma_id_private *id_priv	\
+				),						\
+				TP_ARGS(id_priv))
+
+DEFINE_CMA_FSM_EVENT(send_rtu);
+DEFINE_CMA_FSM_EVENT(send_rej);
+DEFINE_CMA_FSM_EVENT(send_mra);
+DEFINE_CMA_FSM_EVENT(send_sidr_req);
+DEFINE_CMA_FSM_EVENT(send_sidr_rep);
+DEFINE_CMA_FSM_EVENT(disconnect);
+DEFINE_CMA_FSM_EVENT(sent_drep);
+DEFINE_CMA_FSM_EVENT(sent_dreq);
+DEFINE_CMA_FSM_EVENT(id_destroy);
+
+TRACE_EVENT(cm_id_create,
+	TP_PROTO(
+		const struct rdma_id_private *id_priv
+	),
+
+	TP_ARGS(id_priv),
+
+	TP_STRUCT__entry(
+		__field(u32, cm_id)
+	),
+
+	TP_fast_assign(
+		__entry->cm_id = id_priv->res.id;
+	),
+
+	TP_printk("cm.id=%u",
+		__entry->cm_id
+	)
+);
+
+DECLARE_EVENT_CLASS(cma_qp_class,
+	TP_PROTO(
+		const struct rdma_id_private *id_priv
+	),
+
+	TP_ARGS(id_priv),
+
+	TP_STRUCT__entry(
+		__field(u32, cm_id)
+		__field(u32, tos)
+		__field(u32, qp_num)
+		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+	),
+
+	TP_fast_assign(
+		__entry->cm_id = id_priv->res.id;
+		__entry->tos = id_priv->tos;
+		__entry->qp_num = id_priv->qp_num;
+		memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+		       sizeof(struct sockaddr_in6));
+		memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+		       sizeof(struct sockaddr_in6));
+	),
+
+	TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u qp_num=%u",
+		__entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+		__entry->qp_num
+	)
+);
+
+#define DEFINE_CMA_QP_EVENT(name)						\
+		DEFINE_EVENT(cma_qp_class, cm_##name,				\
+				TP_PROTO(					\
+					const struct rdma_id_private *id_priv	\
+				),						\
+				TP_ARGS(id_priv))
+
+DEFINE_CMA_QP_EVENT(send_req);
+DEFINE_CMA_QP_EVENT(send_rep);
+DEFINE_CMA_QP_EVENT(qp_destroy);
+
+/*
+ * enum ib_wp_type, from include/rdma/ib_verbs.h
+ */
+#define IB_QP_TYPE_LIST				\
+	ib_qp_type(SMI)				\
+	ib_qp_type(GSI)				\
+	ib_qp_type(RC)				\
+	ib_qp_type(UC)				\
+	ib_qp_type(UD)				\
+	ib_qp_type(RAW_IPV6)			\
+	ib_qp_type(RAW_ETHERTYPE)		\
+	ib_qp_type(RAW_PACKET)			\
+	ib_qp_type(XRC_INI)			\
+	ib_qp_type_end(XRC_TGT)
+
+#undef ib_qp_type
+#undef ib_qp_type_end
+
+#define ib_qp_type(x)		TRACE_DEFINE_ENUM(IB_QPT_##x);
+#define ib_qp_type_end(x)	TRACE_DEFINE_ENUM(IB_QPT_##x);
+
+IB_QP_TYPE_LIST
+
+#undef ib_qp_type
+#undef ib_qp_type_end
+
+#define ib_qp_type(x)		{ IB_QPT_##x, #x },
+#define ib_qp_type_end(x)	{ IB_QPT_##x, #x }
+
+#define rdma_show_qp_type(x) \
+		__print_symbolic(x, IB_QP_TYPE_LIST)
+
+
+TRACE_EVENT(cm_qp_create,
+	TP_PROTO(
+		const struct rdma_id_private *id_priv,
+		const struct ib_pd *pd,
+		const struct ib_qp_init_attr *qp_init_attr,
+		int rc
+	),
+
+	TP_ARGS(id_priv, pd, qp_init_attr, rc),
+
+	TP_STRUCT__entry(
+		__field(u32, cm_id)
+		__field(u32, pd_id)
+		__field(u32, tos)
+		__field(u32, qp_num)
+		__field(u32, send_wr)
+		__field(u32, recv_wr)
+		__field(int, rc)
+		__field(unsigned long, qp_type)
+		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+	),
+
+	TP_fast_assign(
+		__entry->cm_id = id_priv->res.id;
+		__entry->pd_id = pd->res.id;
+		__entry->tos = id_priv->tos;
+		__entry->send_wr = qp_init_attr->cap.max_send_wr;
+		__entry->recv_wr = qp_init_attr->cap.max_recv_wr;
+		__entry->rc = rc;
+		if (!rc) {
+			__entry->qp_num = id_priv->qp_num;
+			__entry->qp_type = id_priv->id.qp_type;
+		} else {
+			__entry->qp_num = 0;
+			__entry->qp_type = 0;
+		}
+		memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+		       sizeof(struct sockaddr_in6));
+		memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+		       sizeof(struct sockaddr_in6));
+	),
+
+	TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u pd.id=%u qp_type=%s"
+		" send_wr=%u recv_wr=%u qp_num=%u rc=%d",
+		__entry->cm_id, __entry->srcaddr, __entry->dstaddr,
+		__entry->tos, __entry->pd_id,
+		rdma_show_qp_type(__entry->qp_type), __entry->send_wr,
+		__entry->recv_wr, __entry->qp_num, __entry->rc
+	)
+);
+
+TRACE_EVENT(cm_req_handler,
+	TP_PROTO(
+		const struct rdma_id_private *id_priv,
+		int event
+	),
+
+	TP_ARGS(id_priv, event),
+
+	TP_STRUCT__entry(
+		__field(u32, cm_id)
+		__field(u32, tos)
+		__field(unsigned long, event)
+		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+	),
+
+	TP_fast_assign(
+		__entry->cm_id = id_priv->res.id;
+		__entry->tos = id_priv->tos;
+		__entry->event = event;
+		memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+		       sizeof(struct sockaddr_in6));
+		memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+		       sizeof(struct sockaddr_in6));
+	),
+
+	TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s (%lu)",
+		__entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+		rdma_show_ib_cm_event(__entry->event), __entry->event
+	)
+);
+
+TRACE_EVENT(cm_event_handler,
+	TP_PROTO(
+		const struct rdma_id_private *id_priv,
+		const struct rdma_cm_event *event
+	),
+
+	TP_ARGS(id_priv, event),
+
+	TP_STRUCT__entry(
+		__field(u32, cm_id)
+		__field(u32, tos)
+		__field(unsigned long, event)
+		__field(int, status)
+		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+	),
+
+	TP_fast_assign(
+		__entry->cm_id = id_priv->res.id;
+		__entry->tos = id_priv->tos;
+		__entry->event = event->event;
+		__entry->status = event->status;
+		memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+		       sizeof(struct sockaddr_in6));
+		memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+		       sizeof(struct sockaddr_in6));
+	),
+
+	TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s (%lu/%d)",
+		__entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+		rdma_show_cm_event(__entry->event), __entry->event,
+		__entry->status
+	)
+);
+
+TRACE_EVENT(cm_event_done,
+	TP_PROTO(
+		const struct rdma_id_private *id_priv,
+		const struct rdma_cm_event *event,
+		int result
+	),
+
+	TP_ARGS(id_priv, event, result),
+
+	TP_STRUCT__entry(
+		__field(u32, cm_id)
+		__field(u32, tos)
+		__field(unsigned long, event)
+		__field(int, result)
+		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+	),
+
+	TP_fast_assign(
+		__entry->cm_id = id_priv->res.id;
+		__entry->tos = id_priv->tos;
+		__entry->event = event->event;
+		__entry->result = result;
+		memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+		       sizeof(struct sockaddr_in6));
+		memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+		       sizeof(struct sockaddr_in6));
+	),
+
+	TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s consumer returns %d",
+		__entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+		rdma_show_cm_event(__entry->event), __entry->result
+	)
+);
+
+DECLARE_EVENT_CLASS(cma_client_class,
+	TP_PROTO(
+		const struct ib_device *device
+	),
+
+	TP_ARGS(device),
+
+	TP_STRUCT__entry(
+		__string(name, device->name)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, device->name);
+	),
+
+	TP_printk("device name=%s",
+		__get_str(name)
+	)
+);
+
+#define DEFINE_CMA_CLIENT_EVENT(name)						\
+		DEFINE_EVENT(cma_client_class, cm_##name,			\
+				TP_PROTO(					\
+					const struct ib_device *device		\
+				),						\
+				TP_ARGS(device))
+
+DEFINE_CMA_CLIENT_EVENT(add_one);
+DEFINE_CMA_CLIENT_EVENT(remove_one);
+
+#endif /* _TRACE_RDMA_CMA_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE cma_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 3645e09..b1457b3 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -149,6 +149,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
 int ib_cache_setup_one(struct ib_device *device);
 void ib_cache_cleanup_one(struct ib_device *device);
 void ib_cache_release_one(struct ib_device *device);
+void ib_dispatch_event_clients(struct ib_event *event);
 
 #ifdef CONFIG_CGROUP_RDMA
 void ib_device_register_rdmacg(struct ib_device *device);
@@ -320,7 +321,7 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
 					  struct ib_pd *pd,
 					  struct ib_qp_init_attr *attr,
 					  struct ib_udata *udata,
-					  struct ib_uobject *uobj)
+					  struct ib_uqp_object *uobj)
 {
 	enum ib_qp_type qp_type = attr->qp_type;
 	struct ib_qp *qp;
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index bbfded6..4f25b24 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -7,6 +7,8 @@
 #include <linux/slab.h>
 #include <rdma/ib_verbs.h>
 
+#include <trace/events/rdma_core.h>
+
 /* # of WCs to poll for with a single call to ib_poll_cq */
 #define IB_POLL_BATCH			16
 #define IB_POLL_BATCH_DIRECT		8
@@ -41,6 +43,7 @@ static void ib_cq_rdma_dim_work(struct work_struct *w)
 
 	dim->state = DIM_START_MEASURE;
 
+	trace_cq_modify(cq, comps, usec);
 	cq->device->ops.modify_cq(cq, comps, usec);
 }
 
@@ -65,18 +68,29 @@ static void rdma_dim_init(struct ib_cq *cq)
 	INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
 }
 
+static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
+{
+	int rc;
+
+	rc = ib_poll_cq(cq, num_entries, wc);
+	trace_cq_poll(cq, num_entries, rc);
+	return rc;
+}
+
 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
 			   int batch)
 {
 	int i, n, completed = 0;
 
+	trace_cq_process(cq);
+
 	/*
 	 * budget might be (-1) if the caller does not
 	 * want to bound this call, thus we need unsigned
 	 * minimum here.
 	 */
-	while ((n = ib_poll_cq(cq, min_t(u32, batch,
-					 budget - completed), wcs)) > 0) {
+	while ((n = __poll_cq(cq, min_t(u32, batch,
+					budget - completed), wcs)) > 0) {
 		for (i = 0; i < n; i++) {
 			struct ib_wc *wc = &wcs[i];
 
@@ -131,8 +145,10 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
 	completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
 	if (completed < budget) {
 		irq_poll_complete(&cq->iop);
-		if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
+		if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
+			trace_cq_reschedule(cq);
 			irq_poll_sched(&cq->iop);
+		}
 	}
 
 	if (dim)
@@ -143,6 +159,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
 
 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
 {
+	trace_cq_schedule(cq);
 	irq_poll_sched(&cq->iop);
 }
 
@@ -162,6 +179,7 @@ static void ib_cq_poll_work(struct work_struct *work)
 
 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
 {
+	trace_cq_schedule(cq);
 	queue_work(cq->comp_wq, &cq->work);
 }
 
@@ -239,6 +257,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
 		goto out_destroy_cq;
 	}
 
+	trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
 	return cq;
 
 out_destroy_cq:
@@ -248,6 +267,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
 	kfree(cq->wc);
 out_free_cq:
 	kfree(cq);
+	trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL(__ib_alloc_cq_user);
@@ -304,6 +324,7 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
 		WARN_ON_ONCE(1);
 	}
 
+	trace_cq_free(cq);
 	rdma_restrack_del(&cq->res);
 	cq->device->ops.destroy_cq(cq, udata);
 	if (cq->dim)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 84dd74f..f6c2552 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -587,7 +587,8 @@ struct ib_device *_ib_alloc_device(size_t size)
 	rdma_init_coredev(&device->coredev, device, &init_net);
 
 	INIT_LIST_HEAD(&device->event_handler_list);
-	spin_lock_init(&device->event_handler_lock);
+	spin_lock_init(&device->qp_open_list_lock);
+	init_rwsem(&device->event_handler_rwsem);
 	mutex_init(&device->unregistration_lock);
 	/*
 	 * client_data needs to be alloc because we don't want our mark to be
@@ -1931,17 +1932,15 @@ EXPORT_SYMBOL(ib_set_client_data);
  *
  * ib_register_event_handler() registers an event handler that will be
  * called back when asynchronous IB events occur (as defined in
- * chapter 11 of the InfiniBand Architecture Specification).  This
- * callback may occur in interrupt context.
+ * chapter 11 of the InfiniBand Architecture Specification). This
+ * callback occurs in workqueue context.
  */
 void ib_register_event_handler(struct ib_event_handler *event_handler)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+	down_write(&event_handler->device->event_handler_rwsem);
 	list_add_tail(&event_handler->list,
 		      &event_handler->device->event_handler_list);
-	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
+	up_write(&event_handler->device->event_handler_rwsem);
 }
 EXPORT_SYMBOL(ib_register_event_handler);
 
@@ -1954,35 +1953,23 @@ EXPORT_SYMBOL(ib_register_event_handler);
  */
 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+	down_write(&event_handler->device->event_handler_rwsem);
 	list_del(&event_handler->list);
-	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
+	up_write(&event_handler->device->event_handler_rwsem);
 }
 EXPORT_SYMBOL(ib_unregister_event_handler);
 
-/**
- * ib_dispatch_event - Dispatch an asynchronous event
- * @event:Event to dispatch
- *
- * Low-level drivers must call ib_dispatch_event() to dispatch the
- * event to all registered event handlers when an asynchronous event
- * occurs.
- */
-void ib_dispatch_event(struct ib_event *event)
+void ib_dispatch_event_clients(struct ib_event *event)
 {
-	unsigned long flags;
 	struct ib_event_handler *handler;
 
-	spin_lock_irqsave(&event->device->event_handler_lock, flags);
+	down_read(&event->device->event_handler_rwsem);
 
 	list_for_each_entry(handler, &event->device->event_handler_list, list)
 		handler->handler(handler, event);
 
-	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
+	up_read(&event->device->event_handler_rwsem);
 }
-EXPORT_SYMBOL(ib_dispatch_event);
 
 static int iw_query_port(struct ib_device *device,
 			   u8 port_num,
@@ -1990,7 +1977,6 @@ static int iw_query_port(struct ib_device *device,
 {
 	struct in_device *inetdev;
 	struct net_device *netdev;
-	int err;
 
 	memset(port_attr, 0, sizeof(*port_attr));
 
@@ -2021,11 +2007,7 @@ static int iw_query_port(struct ib_device *device,
 	}
 
 	dev_put(netdev);
-	err = device->ops.query_port(device, port_num, port_attr);
-	if (err)
-		return err;
-
-	return 0;
+	return device->ops.query_port(device, port_num, port_attr);
 }
 
 static int __ib_query_port(struct ib_device *device,
diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
index b7cb598..b51bd70 100644
--- a/drivers/infiniband/core/ib_core_uverbs.c
+++ b/drivers/infiniband/core/ib_core_uverbs.c
@@ -232,7 +232,9 @@ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
 	if (!entry)
 		return;
 
+	xa_lock(&entry->ucontext->mmap_xa);
 	entry->driver_removed = true;
+	xa_unlock(&entry->ucontext->mmap_xa);
 	kref_put(&entry->ref, rdma_user_mmap_entry_free);
 }
 EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index cbf6041..37b433aa 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -41,6 +41,7 @@
 #include "core_priv.h"
 #include "cma_priv.h"
 #include "restrack.h"
+#include "uverbs.h"
 
 typedef int (*res_fill_func_t)(struct sk_buff*, bool,
 			       struct rdma_restrack_entry*, uint32_t);
@@ -599,7 +600,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
 		goto err;
 	if (!rdma_is_kernel_res(res) &&
 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
-			cq->uobject->context->res.id))
+			cq->uobject->uevent.uobject.context->res.id))
 		goto err;
 
 	if (fill_res_name_pid(msg, res))
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 6c72773..5128cb1 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -42,26 +42,21 @@
 #include "core_priv.h"
 #include "rdma_core.h"
 
-void uverbs_uobject_get(struct ib_uobject *uobject)
-{
-	kref_get(&uobject->ref);
-}
-
 static void uverbs_uobject_free(struct kref *ref)
 {
-	struct ib_uobject *uobj =
-		container_of(ref, struct ib_uobject, ref);
-
-	if (uobj->uapi_object->type_class->needs_kfree_rcu)
-		kfree_rcu(uobj, rcu);
-	else
-		kfree(uobj);
+	kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu);
 }
 
+/*
+ * In order to indicate we no longer needs this uobject, uverbs_uobject_put
+ * is called. When the reference count is decreased, the uobject is freed.
+ * For example, this is used when attaching a completion channel to a CQ.
+ */
 void uverbs_uobject_put(struct ib_uobject *uobject)
 {
 	kref_put(&uobject->ref, uverbs_uobject_free);
 }
+EXPORT_SYMBOL(uverbs_uobject_put);
 
 static int uverbs_try_lock_object(struct ib_uobject *uobj,
 				  enum rdma_lookup_mode mode)
@@ -135,7 +130,11 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
 	lockdep_assert_held(&ufile->hw_destroy_rwsem);
 	assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
 
-	if (uobj->object) {
+	if (reason == RDMA_REMOVE_ABORT) {
+		WARN_ON(!list_empty(&uobj->list));
+		WARN_ON(!uobj->context);
+		uobj->uapi_object->type_class->alloc_abort(uobj);
+	} else if (uobj->object) {
 		ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
 								attrs);
 		if (ret) {
@@ -151,12 +150,6 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
 		uobj->object = NULL;
 	}
 
-	if (reason == RDMA_REMOVE_ABORT) {
-		WARN_ON(!list_empty(&uobj->list));
-		WARN_ON(!uobj->context);
-		uobj->uapi_object->type_class->alloc_abort(uobj);
-	}
-
 	uobj->context = NULL;
 
 	/*
@@ -263,15 +256,20 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
 }
 
 /* alloc_uobj must be undone by uverbs_destroy_uobject() */
-static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
+static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs,
 				     const struct uverbs_api_object *obj)
 {
+	struct ib_uverbs_file *ufile = attrs->ufile;
 	struct ib_uobject *uobj;
-	struct ib_ucontext *ucontext;
 
-	ucontext = ib_uverbs_get_ucontext_file(ufile);
-	if (IS_ERR(ucontext))
-		return ERR_CAST(ucontext);
+	if (!attrs->context) {
+		struct ib_ucontext *ucontext =
+			ib_uverbs_get_ucontext_file(ufile);
+
+		if (IS_ERR(ucontext))
+			return ERR_CAST(ucontext);
+		attrs->context = ucontext;
+	}
 
 	uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
 	if (!uobj)
@@ -281,7 +279,7 @@ static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
 	 * The object is added to the list in the commit stage.
 	 */
 	uobj->ufile = ufile;
-	uobj->context = ucontext;
+	uobj->context = attrs->context;
 	INIT_LIST_HEAD(&uobj->list);
 	uobj->uapi_object = obj;
 	/*
@@ -358,9 +356,9 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
 
 	uobject = f->private_data;
 	/*
-	 * fget(id) ensures we are not currently running uverbs_close_fd,
-	 * and the caller is expected to ensure that uverbs_close_fd is never
-	 * done while a call top lookup is possible.
+	 * fget(id) ensures we are not currently running
+	 * uverbs_uobject_fd_release(), and the caller is expected to ensure
+	 * that release is never done while a call to lookup is possible.
 	 */
 	if (f->f_op != fd_type->fops) {
 		fput(f);
@@ -424,12 +422,12 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
 
 static struct ib_uobject *
 alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
-			struct ib_uverbs_file *ufile)
+			struct uverbs_attr_bundle *attrs)
 {
 	int ret;
 	struct ib_uobject *uobj;
 
-	uobj = alloc_uobj(ufile, obj);
+	uobj = alloc_uobj(attrs, obj);
 	if (IS_ERR(uobj))
 		return uobj;
 
@@ -445,7 +443,7 @@ alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
 	return uobj;
 
 remove:
-	xa_erase(&ufile->idr, uobj->id);
+	xa_erase(&attrs->ufile->idr, uobj->id);
 uobj_put:
 	uverbs_uobject_put(uobj);
 	return ERR_PTR(ret);
@@ -453,31 +451,48 @@ alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
 
 static struct ib_uobject *
 alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
-		       struct ib_uverbs_file *ufile)
+		       struct uverbs_attr_bundle *attrs)
 {
+	const struct uverbs_obj_fd_type *fd_type =
+		container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
 	int new_fd;
 	struct ib_uobject *uobj;
+	struct file *filp;
+
+	if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release))
+		return ERR_PTR(-EINVAL);
 
 	new_fd = get_unused_fd_flags(O_CLOEXEC);
 	if (new_fd < 0)
 		return ERR_PTR(new_fd);
 
-	uobj = alloc_uobj(ufile, obj);
-	if (IS_ERR(uobj)) {
-		put_unused_fd(new_fd);
-		return uobj;
+	uobj = alloc_uobj(attrs, obj);
+	if (IS_ERR(uobj))
+		goto err_fd;
+
+	/* Note that uverbs_uobject_fd_release() is called during abort */
+	filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
+				  fd_type->flags);
+	if (IS_ERR(filp)) {
+		uobj = ERR_CAST(filp);
+		goto err_uobj;
 	}
+	uobj->object = filp;
 
 	uobj->id = new_fd;
-	uobj->ufile = ufile;
+	return uobj;
 
+err_uobj:
+	uverbs_uobject_put(uobj);
+err_fd:
+	put_unused_fd(new_fd);
 	return uobj;
 }
 
 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
-					    struct ib_uverbs_file *ufile,
 					    struct uverbs_attr_bundle *attrs)
 {
+	struct ib_uverbs_file *ufile = attrs->ufile;
 	struct ib_uobject *ret;
 
 	if (IS_ERR(obj))
@@ -491,13 +506,11 @@ struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
 	if (!down_read_trylock(&ufile->hw_destroy_rwsem))
 		return ERR_PTR(-EIO);
 
-	ret = obj->type_class->alloc_begin(obj, ufile);
+	ret = obj->type_class->alloc_begin(obj, attrs);
 	if (IS_ERR(ret)) {
 		up_read(&ufile->hw_destroy_rwsem);
 		return ret;
 	}
-	if (attrs)
-		attrs->context = ret->context;
 	return ret;
 }
 
@@ -544,6 +557,9 @@ static void remove_handle_idr_uobject(struct ib_uobject *uobj)
 
 static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
 {
+	struct file *filp = uobj->object;
+
+	fput(filp);
 	put_unused_fd(uobj->id);
 }
 
@@ -553,7 +569,7 @@ static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
 {
 	const struct uverbs_obj_fd_type *fd_type = container_of(
 		uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
-	int ret = fd_type->context_closed(uobj, why);
+	int ret = fd_type->destroy_object(uobj, why);
 
 	if (ib_is_destroy_retryable(ret, why, uobj))
 		return ret;
@@ -565,7 +581,7 @@ static void remove_handle_fd_uobject(struct ib_uobject *uobj)
 {
 }
 
-static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
+static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
 {
 	struct ib_uverbs_file *ufile = uobj->ufile;
 	void *old;
@@ -579,33 +595,14 @@ static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
 	 */
 	old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
 	WARN_ON(old != NULL);
-
-	return 0;
 }
 
-static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
+static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
 {
-	const struct uverbs_obj_fd_type *fd_type = container_of(
-		uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
 	int fd = uobj->id;
-	struct file *filp;
+	struct file *filp = uobj->object;
 
-	/*
-	 * The kref for uobj is moved into filp->private data and put in
-	 * uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd()
-	 * must be guaranteed to be called from the provided fops release
-	 * callback.
-	 */
-	filp = anon_inode_getfile(fd_type->name,
-				  fd_type->fops,
-				  uobj,
-				  fd_type->flags);
-	if (IS_ERR(filp))
-		return PTR_ERR(filp);
-
-	uobj->object = filp;
-
-	/* Matching put will be done in uverbs_close_fd() */
+	/* Matching put will be done in uverbs_uobject_fd_release() */
 	kref_get(&uobj->ufile->ref);
 
 	/* This shouldn't be used anymore. Use the file object instead */
@@ -613,11 +610,10 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
 
 	/*
 	 * NOTE: Once we install the file we loose ownership of our kref on
-	 * uobj. It will be put by uverbs_close_fd()
+	 * uobj. It will be put by uverbs_uobject_fd_release()
 	 */
+	filp->private_data = uobj;
 	fd_install(fd, filp);
-
-	return 0;
 }
 
 /*
@@ -625,19 +621,13 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
  * caller can no longer assume uobj is valid. If this function fails it
  * destroys the uboject, including the attached HW object.
  */
-int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
-					   struct uverbs_attr_bundle *attrs)
+void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+			       struct uverbs_attr_bundle *attrs)
 {
 	struct ib_uverbs_file *ufile = attrs->ufile;
-	int ret;
 
 	/* alloc_commit consumes the uobj kref */
-	ret = uobj->uapi_object->type_class->alloc_commit(uobj);
-	if (ret) {
-		uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
-		up_read(&ufile->hw_destroy_rwsem);
-		return ret;
-	}
+	uobj->uapi_object->type_class->alloc_commit(uobj);
 
 	/* kref is held so long as the uobj is on the uobj list. */
 	uverbs_uobject_get(uobj);
@@ -650,8 +640,6 @@ int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
 
 	/* Matches the down_read in rdma_alloc_begin_uobject */
 	up_read(&ufile->hw_destroy_rwsem);
-
-	return 0;
 }
 
 /*
@@ -663,7 +651,6 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
 {
 	struct ib_uverbs_file *ufile = uobj->ufile;
 
-	uobj->object = NULL;
 	uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
 
 	/* Matches the down_read in rdma_alloc_begin_uobject */
@@ -681,7 +668,10 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj,
 	struct file *filp = uobj->object;
 
 	WARN_ON(mode != UVERBS_LOOKUP_READ);
-	/* This indirectly calls uverbs_close_fd and free the object */
+	/*
+	 * This indirectly calls uverbs_uobject_fd_release() and free the
+	 * object
+	 */
 	fput(filp);
 }
 
@@ -744,33 +734,32 @@ const struct uverbs_obj_type_class uverbs_idr_class = {
 	.lookup_put = lookup_put_idr_uobject,
 	.destroy_hw = destroy_hw_idr_uobject,
 	.remove_handle = remove_handle_idr_uobject,
-	/*
-	 * When we destroy an object, we first just lock it for WRITE and
-	 * actually DESTROY it in the finalize stage. So, the problematic
-	 * scenario is when we just started the finalize stage of the
-	 * destruction (nothing was executed yet). Now, the other thread
-	 * fetched the object for READ access, but it didn't lock it yet.
-	 * The DESTROY thread continues and starts destroying the object.
-	 * When the other thread continue - without the RCU, it would
-	 * access freed memory. However, the rcu_read_lock delays the free
-	 * until the rcu_read_lock of the READ operation quits. Since the
-	 * exclusive lock of the object is still taken by the DESTROY flow, the
-	 * READ operation will get -EBUSY and it'll just bail out.
-	 */
-	.needs_kfree_rcu = true,
 };
 EXPORT_SYMBOL(uverbs_idr_class);
 
-void uverbs_close_fd(struct file *f)
+/*
+ * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct
+ * file_operations release method.
+ */
+int uverbs_uobject_fd_release(struct inode *inode, struct file *filp)
 {
-	struct ib_uobject *uobj = f->private_data;
-	struct ib_uverbs_file *ufile = uobj->ufile;
-	struct uverbs_attr_bundle attrs = {
-		.context = uobj->context,
-		.ufile = ufile,
-	};
+	struct ib_uverbs_file *ufile;
+	struct ib_uobject *uobj;
+
+	/*
+	 * This can only happen if the fput came from alloc_abort_fd_uobject()
+	 */
+	if (!filp->private_data)
+		return 0;
+	uobj = filp->private_data;
+	ufile = uobj->ufile;
 
 	if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
+		struct uverbs_attr_bundle attrs = {
+			.context = uobj->context,
+			.ufile = ufile,
+		};
+
 		/*
 		 * lookup_get_fd_uobject holds the kref on the struct file any
 		 * time a FD uobj is locked, which prevents this release
@@ -782,13 +771,14 @@ void uverbs_close_fd(struct file *f)
 		up_read(&ufile->hw_destroy_rwsem);
 	}
 
-	/* Matches the get in alloc_begin_fd_uobject */
+	/* Matches the get in alloc_commit_fd_uobject() */
 	kref_put(&ufile->ref, ib_uverbs_release_file);
 
 	/* Pairs with filp->private_data in alloc_begin_fd_uobject */
 	uverbs_uobject_put(uobj);
+	return 0;
 }
-EXPORT_SYMBOL(uverbs_close_fd);
+EXPORT_SYMBOL(uverbs_uobject_fd_release);
 
 /*
  * Drop the ucontext off the ufile and completely disconnect it from the
@@ -855,9 +845,7 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
 }
 
 /*
- * Destroy the uncontext and every uobject associated with it. If called with
- * reason != RDMA_REMOVE_CLOSE this will not return until the destruction has
- * been completed and ufile->ucontext is NULL.
+ * Destroy the uncontext and every uobject associated with it.
  *
  * This is internally locked and can be called in parallel from multiple
  * contexts.
@@ -865,22 +853,6 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
 void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
 			     enum rdma_remove_reason reason)
 {
-	if (reason == RDMA_REMOVE_CLOSE) {
-		/*
-		 * During destruction we might trigger something that
-		 * synchronously calls release on any file descriptor. For
-		 * this reason all paths that come from file_operations
-		 * release must use try_lock. They can progress knowing that
-		 * there is an ongoing uverbs_destroy_ufile_hw that will clean
-		 * up the driver resources.
-		 */
-		if (!mutex_trylock(&ufile->ucontext_lock))
-			return;
-
-	} else {
-		mutex_lock(&ufile->ucontext_lock);
-	}
-
 	down_write(&ufile->hw_destroy_rwsem);
 
 	/*
@@ -909,7 +881,6 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
 
 done:
 	up_write(&ufile->hw_destroy_rwsem);
-	mutex_unlock(&ufile->ucontext_lock);
 }
 
 const struct uverbs_obj_type_class uverbs_fd_class = {
@@ -920,7 +891,6 @@ const struct uverbs_obj_type_class uverbs_fd_class = {
 	.lookup_put = lookup_put_fd_uobject,
 	.destroy_hw = destroy_hw_fd_uobject,
 	.remove_handle = remove_handle_fd_uobject,
-	.needs_kfree_rcu = false,
 };
 EXPORT_SYMBOL(uverbs_fd_class);
 
@@ -943,19 +913,17 @@ uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
 		return rdma_lookup_get_uobject(obj, attrs->ufile, id,
 					       UVERBS_LOOKUP_WRITE, attrs);
 	case UVERBS_ACCESS_NEW:
-		return rdma_alloc_begin_uobject(obj, attrs->ufile, attrs);
+		return rdma_alloc_begin_uobject(obj, attrs);
 	default:
 		WARN_ON(true);
 		return ERR_PTR(-EOPNOTSUPP);
 	}
 }
 
-int uverbs_finalize_object(struct ib_uobject *uobj,
-			   enum uverbs_obj_access access, bool commit,
-			   struct uverbs_attr_bundle *attrs)
+void uverbs_finalize_object(struct ib_uobject *uobj,
+			    enum uverbs_obj_access access, bool commit,
+			    struct uverbs_attr_bundle *attrs)
 {
-	int ret = 0;
-
 	/*
 	 * refcounts should be handled at the object level and not at the
 	 * uobject level. Refcounts of the objects themselves are done in
@@ -975,14 +943,11 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
 		break;
 	case UVERBS_ACCESS_NEW:
 		if (commit)
-			ret = rdma_alloc_commit_uobject(uobj, attrs);
+			rdma_alloc_commit_uobject(uobj, attrs);
 		else
 			rdma_alloc_abort_uobject(uobj, attrs);
 		break;
 	default:
 		WARN_ON(true);
-		ret = -EOPNOTSUPP;
 	}
-
-	return ret;
 }
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index e63fbda..33978e0 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -51,29 +51,6 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
 int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs);
 
 /*
- * uverbs_uobject_get is called in order to increase the reference count on
- * an uobject. This is useful when a handler wants to keep the uobject's memory
- * alive, regardless if this uobject is still alive in the context's objects
- * repository. Objects are put via uverbs_uobject_put.
- */
-void uverbs_uobject_get(struct ib_uobject *uobject);
-
-/*
- * In order to indicate we no longer needs this uobject, uverbs_uobject_put
- * is called. When the reference count is decreased, the uobject is freed.
- * For example, this is used when attaching a completion channel to a CQ.
- */
-void uverbs_uobject_put(struct ib_uobject *uobject);
-
-/* Indicate this fd is no longer used by this consumer, but its memory isn't
- * necessarily released yet. When the last reference is put, we release the
- * memory. After this call is executed, calling uverbs_uobject_get isn't
- * allowed.
- * This must be called from the release file_operations of the file!
- */
-void uverbs_close_fd(struct file *f);
-
-/*
  * Get an ib_uobject that corresponds to the given id from ufile, assuming
  * the object is from the given type. Lock it to the required access when
  * applicable.
@@ -86,24 +63,9 @@ struct ib_uobject *
 uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
 			     s64 id, struct uverbs_attr_bundle *attrs);
 
-/*
- * Note that certain finalize stages could return a status:
- *   (a) alloc_commit could return a failure if the object is committed at the
- *       same time when the context is destroyed.
- *   (b) remove_commit could fail if the object wasn't destroyed successfully.
- * Since multiple objects could be finalized in one transaction, it is very NOT
- * recommended to have several finalize actions which have side effects.
- * For example, it's NOT recommended to have a certain action which has both
- * a commit action and a destroy action or two destroy objects in the same
- * action. The rule of thumb is to have one destroy or commit action with
- * multiple lookups.
- * The first non zero return value of finalize_object is returned from this
- * function. For example, this could happen when we couldn't destroy an
- * object.
- */
-int uverbs_finalize_object(struct ib_uobject *uobj,
-			   enum uverbs_obj_access access, bool commit,
-			   struct uverbs_attr_bundle *attrs);
+void uverbs_finalize_object(struct ib_uobject *uobj,
+			    enum uverbs_obj_access access, bool commit,
+			    struct uverbs_attr_bundle *attrs);
 
 int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
 
@@ -189,6 +151,7 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
 			      unsigned int num_attrs);
 void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile);
 
+extern const struct uapi_definition uverbs_def_obj_async_fd[];
 extern const struct uapi_definition uverbs_def_obj_counters[];
 extern const struct uapi_definition uverbs_def_obj_cq[];
 extern const struct uapi_definition uverbs_def_obj_device[];
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8917125..30d4c12 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1068,7 +1068,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
 	}
 
 settimeout_out:
-	return skb->len;
+	return 0;
 }
 
 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
@@ -1139,7 +1139,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
 	}
 
 resp_out:
-	return skb->len;
+	return 0;
 }
 
 static void free_sm_ah(struct kref *kref)
diff --git a/drivers/infiniband/core/trace.c b/drivers/infiniband/core/trace.c
new file mode 100644
index 0000000..6c3514b
--- /dev/null
+++ b/drivers/infiniband/core/trace.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trace points for core RDMA functions.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define CREATE_TRACE_POINTS
+
+#include <rdma/ib_verbs.h>
+
+#include <trace/events/rdma_core.h>
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 146f98f..06b6125 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -54,7 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
 
 	for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
 		page = sg_page_iter_page(&sg_iter);
-		put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
+		unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
 	}
 
 	sg_free_table(&umem->sg_head);
@@ -166,10 +166,13 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
 		 * for any address.
 		 */
 		mask |= (sg_dma_address(sg) + pgoff) ^ va;
-		if (i && i != (umem->nmap - 1))
-			/* restrict by length as well for interior SGEs */
-			mask |= sg_dma_len(sg);
 		va += sg_dma_len(sg) - pgoff;
+		/* Except for the last entry, the ending iova alignment sets
+		 * the maximum possible page size as the low bits of the iova
+		 * must be zero when starting the next chunk.
+		 */
+		if (i != (umem->nmap - 1))
+			mask |= va;
 		pgoff = 0;
 	}
 	best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
@@ -257,16 +260,13 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
 	sg = umem->sg_head.sgl;
 
 	while (npages) {
-		down_read(&mm->mmap_sem);
-		ret = get_user_pages(cur_base,
-				     min_t(unsigned long, npages,
-					   PAGE_SIZE / sizeof (struct page *)),
-				     gup_flags | FOLL_LONGTERM,
-				     page_list, NULL);
-		if (ret < 0) {
-			up_read(&mm->mmap_sem);
+		ret = pin_user_pages_fast(cur_base,
+					  min_t(unsigned long, npages,
+						PAGE_SIZE /
+						sizeof(struct page *)),
+					  gup_flags | FOLL_LONGTERM, page_list);
+		if (ret < 0)
 			goto umem_release;
-		}
 
 		cur_base += ret * PAGE_SIZE;
 		npages   -= ret;
@@ -274,8 +274,6 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
 		sg = ib_umem_add_sg_table(sg, page_list, ret,
 			dma_get_max_seg_size(device->dma_device),
 			&umem->sg_nents);
-
-		up_read(&mm->mmap_sem);
 	}
 
 	sg_mark_end(sg);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index dac3fd2..b8c657b 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -227,21 +227,10 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
 	umem_odp->notifier.ops = ops;
 
 	umem_odp->page_shift = PAGE_SHIFT;
-	if (access & IB_ACCESS_HUGETLB) {
-		struct vm_area_struct *vma;
-		struct hstate *h;
-
-		down_read(&mm->mmap_sem);
-		vma = find_vma(mm, ib_umem_start(umem_odp));
-		if (!vma || !is_vm_hugetlb_page(vma)) {
-			up_read(&mm->mmap_sem);
-			ret = -EINVAL;
-			goto err_free;
-		}
-		h = hstate_vma(vma);
-		umem_odp->page_shift = huge_page_shift(h);
-		up_read(&mm->mmap_sem);
-	}
+#ifdef CONFIG_HUGETLB_PAGE
+	if (access & IB_ACCESS_HUGETLB)
+		umem_odp->page_shift = HPAGE_SHIFT;
+#endif
 
 	umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
 	ret = ib_init_umem_odp(umem_odp, ops);
@@ -251,7 +240,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
 
 err_put_pid:
 	put_pid(umem_odp->tgid);
-err_free:
 	kfree(umem_odp);
 	return ERR_PTR(ret);
 }
@@ -293,9 +281,8 @@ EXPORT_SYMBOL(ib_umem_odp_release);
  * The function returns -EFAULT if the DMA mapping operation fails. It returns
  * -EAGAIN if a concurrent invalidation prevents us from updating the page.
  *
- * The page is released via put_user_page even if the operation failed. For
- * on-demand pinning, the page is released whenever it isn't stored in the
- * umem.
+ * The page is released via put_page even if the operation failed. For on-demand
+ * pinning, the page is released whenever it isn't stored in the umem.
  */
 static int ib_umem_odp_map_dma_single_page(
 		struct ib_umem_odp *umem_odp,
@@ -348,7 +335,7 @@ static int ib_umem_odp_map_dma_single_page(
 	}
 
 out:
-	put_user_page(page);
+	put_page(page);
 	return ret;
 }
 
@@ -425,7 +412,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
 
 	while (bcnt > 0) {
 		const size_t gup_num_pages = min_t(size_t,
-				(bcnt + BIT(page_shift) - 1) >> page_shift,
+				ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
 				PAGE_SIZE / sizeof(struct page *));
 
 		down_read(&owning_mm->mmap_sem);
@@ -458,7 +445,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
 					ret = -EFAULT;
 					break;
 				}
-				put_user_page(local_page_list[j]);
+				put_page(local_page_list[j]);
 				continue;
 			}
 
@@ -485,8 +472,8 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
 			 * ib_umem_odp_map_dma_single_page().
 			 */
 			if (npages - (j + 1) > 0)
-				put_user_pages(&local_page_list[j+1],
-					       npages - (j + 1));
+				release_pages(&local_page_list[j+1],
+					      npages - (j + 1));
 			break;
 		}
 	}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 63f7f7d..7df7198 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -111,7 +111,6 @@ struct ib_uverbs_device {
 	struct srcu_struct			disassociate_srcu;
 	struct mutex				lists_mutex; /* protect lists */
 	struct list_head			uverbs_file_list;
-	struct list_head			uverbs_events_file_list;
 	struct uverbs_api			*uapi;
 };
 
@@ -124,10 +123,9 @@ struct ib_uverbs_event_queue {
 };
 
 struct ib_uverbs_async_event_file {
+	struct ib_uobject			uobj;
 	struct ib_uverbs_event_queue		ev_queue;
-	struct ib_uverbs_file		       *uverbs_file;
-	struct kref				ref;
-	struct list_head			list;
+	struct ib_event_handler			event_handler;
 };
 
 struct ib_uverbs_completion_event_file {
@@ -144,8 +142,7 @@ struct ib_uverbs_file {
 	 * ucontext_lock held
 	 */
 	struct ib_ucontext		       *ucontext;
-	struct ib_event_handler			event_handler;
-	struct ib_uverbs_async_event_file       *async_file;
+	struct ib_uverbs_async_event_file      *async_file;
 	struct list_head			list;
 
 	/*
@@ -183,6 +180,7 @@ struct ib_uverbs_mcast_entry {
 
 struct ib_uevent_object {
 	struct ib_uobject	uobject;
+	/* List member for ib_uverbs_async_event_file list */
 	struct list_head	event_list;
 	u32			events_reported;
 };
@@ -210,25 +208,24 @@ struct ib_uwq_object {
 };
 
 struct ib_ucq_object {
-	struct ib_uobject	uobject;
+	struct ib_uevent_object uevent;
 	struct list_head	comp_list;
-	struct list_head	async_list;
 	u32			comp_events_reported;
-	u32			async_events_reported;
 };
 
 extern const struct file_operations uverbs_event_fops;
+extern const struct file_operations uverbs_async_event_fops;
 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
-struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
-					      struct ib_device *ib_dev);
-void ib_uverbs_free_async_event_file(struct ib_uverbs_file *uverbs_file);
+void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
+void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
 
-void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
-			   struct ib_uverbs_completion_event_file *ev_file,
+int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
+int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
+
+void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
 			   struct ib_ucq_object *uobj);
-void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
-			      struct ib_uevent_object *uobj);
+void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
 void ib_uverbs_release_file(struct kref *ref);
 
 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
@@ -236,8 +233,6 @@ void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
 void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
-void ib_uverbs_event_handler(struct ib_event_handler *handler,
-			     struct ib_event *event);
 int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
 			   enum rdma_remove_reason why,
 			   struct uverbs_attr_bundle *attrs);
@@ -276,23 +271,6 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
 					  size_t kern_filter_sz,
 					  union ib_flow_spec *ib_spec);
 
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DEVICE);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_PD);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_MR);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COMP_CHANNEL);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_CQ);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_QP);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_AH);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_MW);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_SRQ);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_WQ);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DM);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS);
-
 /*
  * ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the
  * PortInfo CapabilityMask, but was extended with unique bits.
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 06ed32c..c8693f5 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -203,82 +203,55 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
 #define ib_uverbs_lookup_comp_file(_fd, _ufile)                                \
 	_ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
 
-static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
 {
-	struct ib_uverbs_file *file = attrs->ufile;
-	struct ib_uverbs_get_context      cmd;
-	struct ib_uverbs_get_context_resp resp;
-	struct ib_ucontext		 *ucontext;
-	struct file			 *filp;
-	struct ib_rdmacg_object		 cg_obj;
+	struct ib_uverbs_file *ufile = attrs->ufile;
+	struct ib_ucontext *ucontext;
 	struct ib_device *ib_dev;
+
+	ib_dev = srcu_dereference(ufile->device->ib_dev,
+				  &ufile->device->disassociate_srcu);
+	if (!ib_dev)
+		return -EIO;
+
+	ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
+	if (!ucontext)
+		return -ENOMEM;
+
+	ucontext->res.type = RDMA_RESTRACK_CTX;
+	ucontext->device = ib_dev;
+	ucontext->ufile = ufile;
+	xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
+	attrs->context = ucontext;
+	return 0;
+}
+
+int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
+{
+	struct ib_ucontext *ucontext = attrs->context;
+	struct ib_uverbs_file *file = attrs->ufile;
 	int ret;
 
-	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
-	if (ret)
-		return ret;
-
+	if (!down_read_trylock(&file->hw_destroy_rwsem))
+		return -EIO;
 	mutex_lock(&file->ucontext_lock);
-	ib_dev = srcu_dereference(file->device->ib_dev,
-				  &file->device->disassociate_srcu);
-	if (!ib_dev) {
-		ret = -EIO;
-		goto err;
-	}
-
 	if (file->ucontext) {
 		ret = -EINVAL;
 		goto err;
 	}
 
-	ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
+	ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device,
+				   RDMACG_RESOURCE_HCA_HANDLE);
 	if (ret)
 		goto err;
 
-	ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
-	if (!ucontext) {
-		ret = -ENOMEM;
-		goto err_alloc;
-	}
-
-	attrs->context = ucontext;
-
-	ucontext->res.type = RDMA_RESTRACK_CTX;
-	ucontext->device = ib_dev;
-	ucontext->cg_obj = cg_obj;
-	/* ufile is required when some objects are released */
-	ucontext->ufile = file;
-
-	ucontext->closing = false;
-	ucontext->cleanup_retryable = false;
-
-	xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
-
-	ret = get_unused_fd_flags(O_CLOEXEC);
-	if (ret < 0)
-		goto err_free;
-	resp.async_fd = ret;
-
-	filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
-	if (IS_ERR(filp)) {
-		ret = PTR_ERR(filp);
-		goto err_fd;
-	}
-
-	resp.num_comp_vectors = file->device->num_comp_vectors;
-
-	ret = uverbs_response(attrs, &resp, sizeof(resp));
+	ret = ucontext->device->ops.alloc_ucontext(ucontext,
+						   &attrs->driver_udata);
 	if (ret)
-		goto err_file;
-
-	ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata);
-	if (ret)
-		goto err_file;
+		goto err_uncharge;
 
 	rdma_restrack_uadd(&ucontext->res);
 
-	fd_install(resp.async_fd, filp);
-
 	/*
 	 * Make sure that ib_uverbs_get_ucontext() sees the pointer update
 	 * only after all writes to setup the ucontext have completed
@@ -286,24 +259,62 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
 	smp_store_release(&file->ucontext, ucontext);
 
 	mutex_unlock(&file->ucontext_lock);
-
+	up_read(&file->hw_destroy_rwsem);
 	return 0;
 
-err_file:
-	ib_uverbs_free_async_event_file(file);
-	fput(filp);
-
-err_fd:
-	put_unused_fd(resp.async_fd);
-
-err_free:
-	kfree(ucontext);
-
-err_alloc:
-	ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
-
+err_uncharge:
+	ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
+			   RDMACG_RESOURCE_HCA_HANDLE);
 err:
 	mutex_unlock(&file->ucontext_lock);
+	up_read(&file->hw_destroy_rwsem);
+	return ret;
+}
+
+static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+{
+	struct ib_uverbs_get_context_resp resp;
+	struct ib_uverbs_get_context cmd;
+	struct ib_device *ib_dev;
+	struct ib_uobject *uobj;
+	int ret;
+
+	ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+	if (ret)
+		return ret;
+
+	ret = ib_alloc_ucontext(attrs);
+	if (ret)
+		return ret;
+
+	uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
+	if (IS_ERR(uobj)) {
+		ret = PTR_ERR(uobj);
+		goto err_ucontext;
+	}
+
+	resp = (struct ib_uverbs_get_context_resp){
+		.num_comp_vectors = attrs->ufile->device->num_comp_vectors,
+		.async_fd = uobj->id,
+	};
+	ret = uverbs_response(attrs, &resp, sizeof(resp));
+	if (ret)
+		goto err_uobj;
+
+	ret = ib_init_ucontext(attrs);
+	if (ret)
+		goto err_uobj;
+
+	ib_uverbs_init_async_event_file(
+		container_of(uobj, struct ib_uverbs_async_event_file, uobj));
+	rdma_alloc_commit_uobject(uobj, attrs);
+	return 0;
+
+err_uobj:
+	rdma_alloc_abort_uobject(uobj, attrs);
+err_ucontext:
+	kfree(attrs->context);
+	attrs->context = NULL;
 	return ret;
 }
 
@@ -446,7 +457,8 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
 	if (ret)
 		goto err_copy;
 
-	return uobj_alloc_commit(uobj, attrs);
+	rdma_alloc_commit_uobject(uobj, attrs);
+	return 0;
 
 err_copy:
 	ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
@@ -642,7 +654,8 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
 
 	mutex_unlock(&ibudev->xrcd_tree_mutex);
 
-	return uobj_alloc_commit(&obj->uobject, attrs);
+	rdma_alloc_commit_uobject(&obj->uobject, attrs);
+	return 0;
 
 err_copy:
 	if (inode) {
@@ -774,7 +787,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
 
 	uobj_put_obj_read(pd);
 
-	return uobj_alloc_commit(uobj, attrs);
+	rdma_alloc_commit_uobject(uobj, attrs);
+	return 0;
 
 err_copy:
 	ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
@@ -928,7 +942,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
 		goto err_copy;
 
 	uobj_put_obj_read(pd);
-	return uobj_alloc_commit(uobj, attrs);
+	rdma_alloc_commit_uobject(uobj, attrs);
+	return 0;
 
 err_copy:
 	uverbs_dealloc_mw(mw);
@@ -980,7 +995,8 @@ static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
 		return ret;
 	}
 
-	return uobj_alloc_commit(uobj, attrs);
+	rdma_alloc_commit_uobject(uobj, attrs);
+	return 0;
 }
 
 static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
@@ -1010,11 +1026,9 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
 		}
 	}
 
-	obj->uobject.user_handle = cmd->user_handle;
-	obj->comp_events_reported  = 0;
-	obj->async_events_reported = 0;
+	obj->uevent.uobject.user_handle = cmd->user_handle;
 	INIT_LIST_HEAD(&obj->comp_list);
-	INIT_LIST_HEAD(&obj->async_list);
+	INIT_LIST_HEAD(&obj->uevent.event_list);
 
 	attr.cqe = cmd->cqe;
 	attr.comp_vector = cmd->comp_vector;
@@ -1026,7 +1040,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
 		goto err_file;
 	}
 	cq->device        = ib_dev;
-	cq->uobject       = &obj->uobject;
+	cq->uobject       = obj;
 	cq->comp_handler  = ib_uverbs_comp_handler;
 	cq->event_handler = ib_uverbs_cq_event_handler;
 	cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
@@ -1036,9 +1050,9 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
 	if (ret)
 		goto err_free;
 
-	obj->uobject.object = cq;
+	obj->uevent.uobject.object = cq;
 	memset(&resp, 0, sizeof resp);
-	resp.base.cq_handle = obj->uobject.id;
+	resp.base.cq_handle = obj->uevent.uobject.id;
 	resp.base.cqe       = cq->cqe;
 	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
 
@@ -1049,9 +1063,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
 	if (ret)
 		goto err_cb;
 
-	ret = uobj_alloc_commit(&obj->uobject, attrs);
-	if (ret)
-		return ERR_PTR(ret);
+	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
 	return obj;
 
 err_cb:
@@ -1061,10 +1073,10 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
 	kfree(cq);
 err_file:
 	if (ev_file)
-		ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
+		ib_uverbs_release_ucq(ev_file, obj);
 
 err:
-	uobj_alloc_abort(&obj->uobject, attrs);
+	uobj_alloc_abort(&obj->uevent.uobject, attrs);
 
 	return ERR_PTR(ret);
 }
@@ -1133,7 +1145,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 out:
-	uobj_put_obj_read(cq);
+	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 
 	return ret;
 }
@@ -1216,7 +1229,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
 		ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
 
 out_put:
-	uobj_put_obj_read(cq);
+	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 	return ret;
 }
 
@@ -1237,8 +1251,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
 	ib_req_notify_cq(cq, cmd.solicited_only ?
 			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
 
-	uobj_put_obj_read(cq);
-
+	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 	return 0;
 }
 
@@ -1258,10 +1272,10 @@ static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
 	if (IS_ERR(uobj))
 		return PTR_ERR(uobj);
 
-	obj = container_of(uobj, struct ib_ucq_object, uobject);
+	obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
 	memset(&resp, 0, sizeof(resp));
 	resp.comp_events_reported  = obj->comp_events_reported;
-	resp.async_events_reported = obj->async_events_reported;
+	resp.async_events_reported = obj->uevent.events_reported;
 
 	uobj_put_destroy(uobj);
 
@@ -1375,7 +1389,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 	}
 
 	attr.event_handler = ib_uverbs_qp_event_handler;
-	attr.qp_context    = attrs->ufile;
 	attr.send_cq       = scq;
 	attr.recv_cq       = rcq;
 	attr.srq           = srq;
@@ -1391,7 +1404,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 	attr.cap.max_recv_sge    = cmd->max_recv_sge;
 	attr.cap.max_inline_data = cmd->max_inline_data;
 
-	obj->uevent.events_reported     = 0;
 	INIT_LIST_HEAD(&obj->uevent.event_list);
 	INIT_LIST_HEAD(&obj->mcast_list);
 
@@ -1421,7 +1433,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 		qp = ib_create_qp(pd, &attr);
 	else
 		qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
-				   &obj->uevent.uobject);
+				   obj);
 
 	if (IS_ERR(qp)) {
 		ret = PTR_ERR(qp);
@@ -1439,7 +1451,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 		qp->srq		  = attr.srq;
 		qp->rwq_ind_tbl	  = ind_tbl;
 		qp->event_handler = attr.event_handler;
-		qp->qp_context	  = attr.qp_context;
 		qp->qp_type	  = attr.qp_type;
 		atomic_set(&qp->usecnt, 0);
 		atomic_inc(&pd->usecnt);
@@ -1454,7 +1465,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 			atomic_inc(&ind_tbl->usecnt);
 	} else {
 		/* It is done in _ib_create_qp for other QP types */
-		qp->uobject = &obj->uevent.uobject;
+		qp->uobject = obj;
 	}
 
 	obj->uevent.uobject.object = qp;
@@ -1483,15 +1494,19 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 	if (pd)
 		uobj_put_obj_read(pd);
 	if (scq)
-		uobj_put_obj_read(scq);
+		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 	if (rcq && rcq != scq)
-		uobj_put_obj_read(rcq);
+		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 	if (srq)
-		uobj_put_obj_read(srq);
+		rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 	if (ind_tbl)
 		uobj_put_obj_read(ind_tbl);
 
-	return uobj_alloc_commit(&obj->uevent.uobject, attrs);
+	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
+	return 0;
 err_cb:
 	ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
 
@@ -1501,11 +1516,14 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 	if (pd)
 		uobj_put_obj_read(pd);
 	if (scq)
-		uobj_put_obj_read(scq);
+		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 	if (rcq && rcq != scq)
-		uobj_put_obj_read(rcq);
+		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 	if (srq)
-		uobj_put_obj_read(srq);
+		rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 	if (ind_tbl)
 		uobj_put_obj_read(ind_tbl);
 
@@ -1567,7 +1585,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
 	struct ib_xrcd		       *xrcd;
 	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
 	struct ib_qp                   *qp;
-	struct ib_qp_open_attr          attr;
+	struct ib_qp_open_attr          attr = {};
 	int ret;
 	struct ib_device *ib_dev;
 
@@ -1593,11 +1611,9 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
 	}
 
 	attr.event_handler = ib_uverbs_qp_event_handler;
-	attr.qp_context    = attrs->ufile;
 	attr.qp_num        = cmd.qpn;
 	attr.qp_type       = cmd.qp_type;
 
-	obj->uevent.events_reported = 0;
 	INIT_LIST_HEAD(&obj->uevent.event_list);
 	INIT_LIST_HEAD(&obj->mcast_list);
 
@@ -1620,10 +1636,11 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
 
 	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
 	atomic_inc(&obj->uxrcd->refcnt);
-	qp->uobject = &obj->uevent.uobject;
+	qp->uobject = obj;
 	uobj_put_read(xrcd_uobj);
 
-	return uobj_alloc_commit(&obj->uevent.uobject, attrs);
+	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
+	return 0;
 
 err_destroy:
 	ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
@@ -1684,7 +1701,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
 
 	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
 
-	uobj_put_obj_read(qp);
+	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 
 	if (ret)
 		goto out;
@@ -1921,7 +1939,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
 				      &attrs->driver_udata);
 
 release_qp:
-	uobj_put_obj_read(qp);
+	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 out:
 	kfree(attr);
 
@@ -2185,7 +2204,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
 		ret = ret2;
 
 out_put:
-	uobj_put_obj_read(qp);
+	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 
 	while (wr) {
 		if (is_ud && ud_wr(wr)->ah)
@@ -2327,7 +2347,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
 	resp.bad_wr = 0;
 	ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
 
-	uobj_put_obj_read(qp);
+	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 	if (ret) {
 		for (next = wr; next; next = next->next) {
 			++resp.bad_wr;
@@ -2377,7 +2398,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
 	resp.bad_wr = 0;
 	ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
 
-	uobj_put_obj_read(srq);
+	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 
 	if (ret)
 		for (next = wr; next; next = next->next) {
@@ -2465,7 +2487,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
 		goto err_copy;
 
 	uobj_put_obj_read(pd);
-	return uobj_alloc_commit(uobj, attrs);
+	rdma_alloc_commit_uobject(uobj, attrs);
+	return 0;
 
 err_copy:
 	rdma_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
@@ -2507,7 +2530,7 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
 	if (!qp)
 		return -EINVAL;
 
-	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
+	obj = qp->uobject;
 
 	mutex_lock(&obj->mcast_lock);
 	list_for_each_entry(mcast, &obj->mcast_list, list)
@@ -2534,7 +2557,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
 
 out_put:
 	mutex_unlock(&obj->mcast_lock);
-	uobj_put_obj_read(qp);
+	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 
 	return ret;
 }
@@ -2556,7 +2580,7 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
 	if (!qp)
 		return -EINVAL;
 
-	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
+	obj = qp->uobject;
 	mutex_lock(&obj->mcast_lock);
 
 	list_for_each_entry(mcast, &obj->mcast_list, list)
@@ -2577,7 +2601,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
 
 out_put:
 	mutex_unlock(&obj->mcast_lock);
-	uobj_put_obj_read(qp);
+	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 	return ret;
 }
 
@@ -2943,7 +2968,6 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
 	wq_init_attr.wq_type = cmd.wq_type;
 	wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
 	wq_init_attr.create_flags = cmd.create_flags;
-	obj->uevent.events_reported = 0;
 	INIT_LIST_HEAD(&obj->uevent.event_list);
 
 	wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
@@ -2952,7 +2976,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
 		goto err_put_cq;
 	}
 
-	wq->uobject = &obj->uevent.uobject;
+	wq->uobject = obj;
 	obj->uevent.uobject.object = wq;
 	wq->wq_type = wq_init_attr.wq_type;
 	wq->cq = cq;
@@ -2962,7 +2986,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
 	atomic_set(&wq->usecnt, 0);
 	atomic_inc(&pd->usecnt);
 	atomic_inc(&cq->usecnt);
-	wq->uobject = &obj->uevent.uobject;
+	wq->uobject = obj;
 	obj->uevent.uobject.object = wq;
 
 	memset(&resp, 0, sizeof(resp));
@@ -2976,13 +3000,16 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
 		goto err_copy;
 
 	uobj_put_obj_read(pd);
-	uobj_put_obj_read(cq);
-	return uobj_alloc_commit(&obj->uevent.uobject, attrs);
+	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
+	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
+	return 0;
 
 err_copy:
 	ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
 err_put_cq:
-	uobj_put_obj_read(cq);
+	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 err_put_pd:
 	uobj_put_obj_read(pd);
 err_uobj:
@@ -3048,7 +3075,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
 	}
 	ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
 					&attrs->driver_udata);
-	uobj_put_obj_read(wq);
+	rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 	return ret;
 }
 
@@ -3149,9 +3177,11 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
 	kfree(wqs_handles);
 
 	for (j = 0; j < num_read_wqs; j++)
-		uobj_put_obj_read(wqs[j]);
+		rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 
-	return uobj_alloc_commit(uobj, attrs);
+	rdma_alloc_commit_uobject(uobj, attrs);
+	return 0;
 
 err_copy:
 	ib_destroy_rwq_ind_table(rwq_ind_tbl);
@@ -3159,7 +3189,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
 	uobj_alloc_abort(uobj, attrs);
 put_wqs:
 	for (j = 0; j < num_read_wqs; j++)
-		uobj_put_obj_read(wqs[j]);
+		rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 err_free:
 	kfree(wqs_handles);
 	kfree(wqs);
@@ -3325,11 +3356,13 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
 	if (err)
 		goto err_copy;
 
-	uobj_put_obj_read(qp);
+	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 	kfree(flow_attr);
 	if (cmd.flow_attr.num_of_specs)
 		kfree(kern_flow_attr);
-	return uobj_alloc_commit(uobj, attrs);
+	rdma_alloc_commit_uobject(uobj, attrs);
+	return 0;
 err_copy:
 	if (!qp->device->ops.destroy_flow(flow_id))
 		atomic_dec(&qp->usecnt);
@@ -3338,7 +3371,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
 err_free_flow_attr:
 	kfree(flow_attr);
 err_put:
-	uobj_put_obj_read(qp);
+	rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 err_uobj:
 	uobj_alloc_abort(uobj, attrs);
 err_free_attr:
@@ -3423,7 +3457,6 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 	attr.attr.max_sge   = cmd->max_sge;
 	attr.attr.srq_limit = cmd->srq_limit;
 
-	obj->uevent.events_reported = 0;
 	INIT_LIST_HEAD(&obj->uevent.event_list);
 
 	srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
@@ -3435,7 +3468,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 	srq->device        = pd->device;
 	srq->pd            = pd;
 	srq->srq_type	   = cmd->srq_type;
-	srq->uobject       = &obj->uevent.uobject;
+	srq->uobject       = obj;
 	srq->event_handler = attr.event_handler;
 	srq->srq_context   = attr.srq_context;
 
@@ -3474,10 +3507,12 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 		uobj_put_read(xrcd_uobj);
 
 	if (ib_srq_has_cq(cmd->srq_type))
-		uobj_put_obj_read(attr.ext.cq);
+		rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 
 	uobj_put_obj_read(pd);
-	return uobj_alloc_commit(&obj->uevent.uobject, attrs);
+	rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
+	return 0;
 
 err_copy:
 	ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
@@ -3490,7 +3525,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 
 err_put_cq:
 	if (ib_srq_has_cq(cmd->srq_type))
-		uobj_put_obj_read(attr.ext.cq);
+		rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
+					UVERBS_LOOKUP_READ);
 
 err_put_xrcd:
 	if (cmd->srq_type == IB_SRQT_XRC) {
@@ -3558,7 +3594,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
 	ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
 					  &attrs->driver_udata);
 
-	uobj_put_obj_read(srq);
+	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 
 	return ret;
 }
@@ -3581,7 +3618,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
 
 	ret = ib_query_srq(srq, &attr);
 
-	uobj_put_obj_read(srq);
+	rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 
 	if (ret)
 		return ret;
@@ -3706,8 +3744,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
 
 	ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
 
-	uobj_put_obj_read(cq);
-
+	rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+				UVERBS_LOOKUP_READ);
 	return ret;
 }
 
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 269938f..538affb 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -220,24 +220,17 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
 	return ret;
 }
 
-static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
-				  struct uverbs_objs_arr_attr *attr,
-				  bool commit, struct uverbs_attr_bundle *attrs)
+static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
+				   struct uverbs_objs_arr_attr *attr,
+				   bool commit,
+				   struct uverbs_attr_bundle *attrs)
 {
 	const struct uverbs_attr_spec *spec = &attr_uapi->spec;
-	int current_ret;
-	int ret = 0;
 	size_t i;
 
-	for (i = 0; i != attr->len; i++) {
-		current_ret = uverbs_finalize_object(attr->uobjects[i],
-						     spec->u2.objs_arr.access,
-						     commit, attrs);
-		if (!ret)
-			ret = current_ret;
-	}
-
-	return ret;
+	for (i = 0; i != attr->len; i++)
+		uverbs_finalize_object(attr->uobjects[i],
+				       spec->u2.objs_arr.access, commit, attrs);
 }
 
 static int uverbs_process_attr(struct bundle_priv *pbundle,
@@ -495,26 +488,22 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
 	return ret;
 }
 
-static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
+static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
 {
 	unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
 	struct bundle_alloc_head *memblock;
 	unsigned int i;
-	int ret = 0;
 
 	/* fast path for simple uobjects */
 	i = -1;
 	while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
 				  i + 1)) < key_bitmap_len) {
 		struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
-		int current_ret;
 
-		current_ret = uverbs_finalize_object(
+		uverbs_finalize_object(
 			attr->obj_attr.uobject,
 			attr->obj_attr.attr_elm->spec.u.obj.access, commit,
 			&pbundle->bundle);
-		if (!ret)
-			ret = current_ret;
 	}
 
 	i = -1;
@@ -523,7 +512,6 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
 		struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
 		const struct uverbs_api_attr *attr_uapi;
 		void __rcu **slot;
-		int current_ret;
 
 		slot = uapi_get_attr_for_method(
 			pbundle,
@@ -534,11 +522,8 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
 		attr_uapi = rcu_dereference_protected(*slot, true);
 
 		if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
-			current_ret = uverbs_free_idrs_array(
-				attr_uapi, &attr->objs_arr_attr, commit,
-				&pbundle->bundle);
-			if (!ret)
-				ret = current_ret;
+			uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr,
+					       commit, &pbundle->bundle);
 		}
 	}
 
@@ -548,8 +533,6 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
 		memblock = memblock->next;
 		kvfree(tmp);
 	}
-
-	return ret;
 }
 
 static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
@@ -562,7 +545,6 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
 	struct bundle_priv *pbundle;
 	struct bundle_priv onstack;
 	void __rcu **slot;
-	int destroy_ret;
 	int ret;
 
 	if (unlikely(hdr->driver_id != uapi->driver_id))
@@ -610,10 +592,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
 	memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
 
 	ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
-	destroy_ret = bundle_destroy(pbundle, ret == 0);
-	if (unlikely(destroy_ret && !ret))
-		return destroy_ret;
-
+	bundle_destroy(pbundle, ret == 0);
 	return ret;
 }
 
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 970d8e3..2d4083b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -125,17 +125,8 @@ static void ib_uverbs_release_dev(struct device *device)
 	kfree(dev);
 }
 
-static void ib_uverbs_release_async_event_file(struct kref *ref)
-{
-	struct ib_uverbs_async_event_file *file =
-		container_of(ref, struct ib_uverbs_async_event_file, ref);
-
-	kfree(file);
-}
-
-void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
-			  struct ib_uverbs_completion_event_file *ev_file,
-			  struct ib_ucq_object *uobj)
+void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
+			   struct ib_ucq_object *uobj)
 {
 	struct ib_uverbs_event *evt, *tmp;
 
@@ -150,25 +141,24 @@ void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
 		uverbs_uobject_put(&ev_file->uobj);
 	}
 
-	spin_lock_irq(&file->async_file->ev_queue.lock);
-	list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
-		list_del(&evt->list);
-		kfree(evt);
-	}
-	spin_unlock_irq(&file->async_file->ev_queue.lock);
+	ib_uverbs_release_uevent(&uobj->uevent);
 }
 
-void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
-			      struct ib_uevent_object *uobj)
+void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
 {
+	struct ib_uverbs_async_event_file *async_file =
+		READ_ONCE(uobj->uobject.ufile->async_file);
 	struct ib_uverbs_event *evt, *tmp;
 
-	spin_lock_irq(&file->async_file->ev_queue.lock);
+	if (!async_file)
+		return;
+
+	spin_lock_irq(&async_file->ev_queue.lock);
 	list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
 		list_del(&evt->list);
 		kfree(evt);
 	}
-	spin_unlock_irq(&file->async_file->ev_queue.lock);
+	spin_unlock_irq(&async_file->ev_queue.lock);
 }
 
 void ib_uverbs_detach_umcast(struct ib_qp *qp,
@@ -208,8 +198,7 @@ void ib_uverbs_release_file(struct kref *ref)
 		ib_uverbs_comp_dev(file->device);
 
 	if (file->async_file)
-		kref_put(&file->async_file->ref,
-			 ib_uverbs_release_async_event_file);
+		uverbs_uobject_put(&file->async_file->uobj);
 	put_device(&file->device->dev);
 
 	if (file->disassociate_page)
@@ -220,7 +209,6 @@ void ib_uverbs_release_file(struct kref *ref)
 }
 
 static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
-				    struct ib_uverbs_file *uverbs_file,
 				    struct file *filp, char __user *buf,
 				    size_t count, loff_t *pos,
 				    size_t eventsz)
@@ -238,19 +226,16 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
 
 		if (wait_event_interruptible(ev_queue->poll_wait,
 					     (!list_empty(&ev_queue->event_list) ||
-			/* The barriers built into wait_event_interruptible()
-			 * and wake_up() guarentee this will see the null set
-			 * without using RCU
-			 */
-					     !uverbs_file->device->ib_dev)))
+					      ev_queue->is_closed)))
 			return -ERESTARTSYS;
 
-		/* If device was disassociated and no event exists set an error */
-		if (list_empty(&ev_queue->event_list) &&
-		    !uverbs_file->device->ib_dev)
-			return -EIO;
-
 		spin_lock_irq(&ev_queue->lock);
+
+		/* If device was disassociated and no event exists set an error */
+		if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
+			spin_unlock_irq(&ev_queue->lock);
+			return -EIO;
+		}
 	}
 
 	event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
@@ -285,8 +270,7 @@ static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
 {
 	struct ib_uverbs_async_event_file *file = filp->private_data;
 
-	return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
-				    buf, count, pos,
+	return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
 				    sizeof(struct ib_uverbs_async_event_desc));
 }
 
@@ -296,9 +280,8 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
 	struct ib_uverbs_completion_event_file *comp_ev_file =
 		filp->private_data;
 
-	return ib_uverbs_event_read(&comp_ev_file->ev_queue,
-				    comp_ev_file->uobj.ufile, filp,
-				    buf, count, pos,
+	return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
+				    pos,
 				    sizeof(struct ib_uverbs_comp_event_desc));
 }
 
@@ -321,7 +304,9 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
 static __poll_t ib_uverbs_async_event_poll(struct file *filp,
 					       struct poll_table_struct *wait)
 {
-	return ib_uverbs_event_poll(filp->private_data, filp, wait);
+	struct ib_uverbs_async_event_file *file = filp->private_data;
+
+	return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
 }
 
 static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
@@ -335,9 +320,9 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
 
 static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
 {
-	struct ib_uverbs_event_queue *ev_queue = filp->private_data;
+	struct ib_uverbs_async_event_file *file = filp->private_data;
 
-	return fasync_helper(fd, filp, on, &ev_queue->async_queue);
+	return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
 }
 
 static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
@@ -348,70 +333,20 @@ static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
 	return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
 }
 
-static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
-{
-	struct ib_uverbs_async_event_file *file = filp->private_data;
-	struct ib_uverbs_file *uverbs_file = file->uverbs_file;
-	struct ib_uverbs_event *entry, *tmp;
-	int closed_already = 0;
-
-	mutex_lock(&uverbs_file->device->lists_mutex);
-	spin_lock_irq(&file->ev_queue.lock);
-	closed_already = file->ev_queue.is_closed;
-	file->ev_queue.is_closed = 1;
-	list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
-		if (entry->counter)
-			list_del(&entry->obj_list);
-		kfree(entry);
-	}
-	spin_unlock_irq(&file->ev_queue.lock);
-	if (!closed_already) {
-		list_del(&file->list);
-		ib_unregister_event_handler(&uverbs_file->event_handler);
-	}
-	mutex_unlock(&uverbs_file->device->lists_mutex);
-
-	kref_put(&uverbs_file->ref, ib_uverbs_release_file);
-	kref_put(&file->ref, ib_uverbs_release_async_event_file);
-
-	return 0;
-}
-
-static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
-{
-	struct ib_uobject *uobj = filp->private_data;
-	struct ib_uverbs_completion_event_file *file = container_of(
-		uobj, struct ib_uverbs_completion_event_file, uobj);
-	struct ib_uverbs_event *entry, *tmp;
-
-	spin_lock_irq(&file->ev_queue.lock);
-	list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
-		if (entry->counter)
-			list_del(&entry->obj_list);
-		kfree(entry);
-	}
-	file->ev_queue.is_closed = 1;
-	spin_unlock_irq(&file->ev_queue.lock);
-
-	uverbs_close_fd(filp);
-
-	return 0;
-}
-
 const struct file_operations uverbs_event_fops = {
 	.owner	 = THIS_MODULE,
 	.read	 = ib_uverbs_comp_event_read,
 	.poll    = ib_uverbs_comp_event_poll,
-	.release = ib_uverbs_comp_event_close,
+	.release = uverbs_uobject_fd_release,
 	.fasync  = ib_uverbs_comp_event_fasync,
 	.llseek	 = no_llseek,
 };
 
-static const struct file_operations uverbs_async_event_fops = {
+const struct file_operations uverbs_async_event_fops = {
 	.owner	 = THIS_MODULE,
 	.read	 = ib_uverbs_async_event_read,
 	.poll    = ib_uverbs_async_event_poll,
-	.release = ib_uverbs_async_event_close,
+	.release = uverbs_uobject_fd_release,
 	.fasync  = ib_uverbs_async_event_fasync,
 	.llseek	 = no_llseek,
 };
@@ -438,9 +373,9 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
 		return;
 	}
 
-	uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
+	uobj = cq->uobject;
 
-	entry->desc.comp.cq_handle = cq->uobject->user_handle;
+	entry->desc.comp.cq_handle = cq->uobject->uevent.uobject.user_handle;
 	entry->counter		   = &uobj->comp_events_reported;
 
 	list_add_tail(&entry->list, &ev_queue->event_list);
@@ -451,102 +386,82 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
 	kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
 }
 
-static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
-				    __u64 element, __u64 event,
-				    struct list_head *obj_list,
-				    u32 *counter)
+static void
+ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+			__u64 element, __u64 event, struct list_head *obj_list,
+			u32 *counter)
 {
 	struct ib_uverbs_event *entry;
 	unsigned long flags;
 
-	spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
-	if (file->async_file->ev_queue.is_closed) {
-		spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
+	if (!async_file)
+		return;
+
+	spin_lock_irqsave(&async_file->ev_queue.lock, flags);
+	if (async_file->ev_queue.is_closed) {
+		spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
 		return;
 	}
 
 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 	if (!entry) {
-		spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
+		spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
 		return;
 	}
 
-	entry->desc.async.element    = element;
+	entry->desc.async.element = element;
 	entry->desc.async.event_type = event;
-	entry->desc.async.reserved   = 0;
-	entry->counter               = counter;
+	entry->desc.async.reserved = 0;
+	entry->counter = counter;
 
-	list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
+	list_add_tail(&entry->list, &async_file->ev_queue.event_list);
 	if (obj_list)
 		list_add_tail(&entry->obj_list, obj_list);
-	spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
+	spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
 
-	wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
-	kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
+	wake_up_interruptible(&async_file->ev_queue.poll_wait);
+	kill_fasync(&async_file->ev_queue.async_queue, SIGIO, POLL_IN);
+}
+
+static void uverbs_uobj_event(struct ib_uevent_object *eobj,
+			      struct ib_event *event)
+{
+	ib_uverbs_async_handler(READ_ONCE(eobj->uobject.ufile->async_file),
+				eobj->uobject.user_handle, event->event,
+				&eobj->event_list, &eobj->events_reported);
 }
 
 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
 {
-	struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
-						  struct ib_ucq_object, uobject);
-
-	ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
-				event->event, &uobj->async_list,
-				&uobj->async_events_reported);
+	uverbs_uobj_event(&event->element.cq->uobject->uevent, event);
 }
 
 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
 {
-	struct ib_uevent_object *uobj;
-
 	/* for XRC target qp's, check that qp is live */
 	if (!event->element.qp->uobject)
 		return;
 
-	uobj = container_of(event->element.qp->uobject,
-			    struct ib_uevent_object, uobject);
-
-	ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
-				event->event, &uobj->event_list,
-				&uobj->events_reported);
+	uverbs_uobj_event(&event->element.qp->uobject->uevent, event);
 }
 
 void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
 {
-	struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
-						  struct ib_uevent_object, uobject);
-
-	ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
-				event->event, &uobj->event_list,
-				&uobj->events_reported);
+	uverbs_uobj_event(&event->element.wq->uobject->uevent, event);
 }
 
 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
 {
-	struct ib_uevent_object *uobj;
-
-	uobj = container_of(event->element.srq->uobject,
-			    struct ib_uevent_object, uobject);
-
-	ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
-				event->event, &uobj->event_list,
-				&uobj->events_reported);
+	uverbs_uobj_event(&event->element.srq->uobject->uevent, event);
 }
 
-void ib_uverbs_event_handler(struct ib_event_handler *handler,
-			     struct ib_event *event)
+static void ib_uverbs_event_handler(struct ib_event_handler *handler,
+				    struct ib_event *event)
 {
-	struct ib_uverbs_file *file =
-		container_of(handler, struct ib_uverbs_file, event_handler);
-
-	ib_uverbs_async_handler(file, event->element.port_num, event->event,
-				NULL, NULL);
-}
-
-void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
-{
-	kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file);
-	file->async_file = NULL;
+	ib_uverbs_async_handler(
+		container_of(handler, struct ib_uverbs_async_event_file,
+			     event_handler),
+		event->element.port_num, event->event, NULL, NULL);
 }
 
 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
@@ -558,45 +473,26 @@ void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
 	ev_queue->async_queue = NULL;
 }
 
-struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
-					      struct ib_device	*ib_dev)
+void ib_uverbs_init_async_event_file(
+	struct ib_uverbs_async_event_file *async_file)
 {
-	struct ib_uverbs_async_event_file *ev_file;
-	struct file *filp;
+	struct ib_uverbs_file *uverbs_file = async_file->uobj.ufile;
+	struct ib_device *ib_dev = async_file->uobj.context->device;
 
-	ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
-	if (!ev_file)
-		return ERR_PTR(-ENOMEM);
+	ib_uverbs_init_event_queue(&async_file->ev_queue);
 
-	ib_uverbs_init_event_queue(&ev_file->ev_queue);
-	ev_file->uverbs_file = uverbs_file;
-	kref_get(&ev_file->uverbs_file->ref);
-	kref_init(&ev_file->ref);
-	filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
-				  ev_file, O_RDONLY);
-	if (IS_ERR(filp))
-		goto err_put_refs;
+	/* The first async_event_file becomes the default one for the file. */
+	mutex_lock(&uverbs_file->ucontext_lock);
+	if (!uverbs_file->async_file) {
+		/* Pairs with the put in ib_uverbs_release_file */
+		uverbs_uobject_get(&async_file->uobj);
+		smp_store_release(&uverbs_file->async_file, async_file);
+	}
+	mutex_unlock(&uverbs_file->ucontext_lock);
 
-	mutex_lock(&uverbs_file->device->lists_mutex);
-	list_add_tail(&ev_file->list,
-		      &uverbs_file->device->uverbs_events_file_list);
-	mutex_unlock(&uverbs_file->device->lists_mutex);
-
-	WARN_ON(uverbs_file->async_file);
-	uverbs_file->async_file = ev_file;
-	kref_get(&uverbs_file->async_file->ref);
-	INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
-			      ib_dev,
+	INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev,
 			      ib_uverbs_event_handler);
-	ib_register_event_handler(&uverbs_file->event_handler);
-	/* At that point async file stuff was fully set */
-
-	return filp;
-
-err_put_refs:
-	kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
-	kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
-	return filp;
+	ib_register_event_handler(&async_file->event_handler);
 }
 
 static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
@@ -1225,7 +1121,6 @@ static void ib_uverbs_add_one(struct ib_device *device)
 	mutex_init(&uverbs_dev->xrcd_tree_mutex);
 	mutex_init(&uverbs_dev->lists_mutex);
 	INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
-	INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
 	rcu_assign_pointer(uverbs_dev->ib_dev, device);
 	uverbs_dev->num_comp_vectors = device->num_comp_vectors;
 
@@ -1270,14 +1165,9 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
 					struct ib_device *ib_dev)
 {
 	struct ib_uverbs_file *file;
-	struct ib_uverbs_async_event_file *event_file;
-	struct ib_event event;
 
 	/* Pending running commands to terminate */
 	uverbs_disassociate_api_pre(uverbs_dev);
-	event.event = IB_EVENT_DEVICE_FATAL;
-	event.element.port_num = 0;
-	event.device = ib_dev;
 
 	mutex_lock(&uverbs_dev->lists_mutex);
 	while (!list_empty(&uverbs_dev->uverbs_file_list)) {
@@ -1293,31 +1183,14 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
 		 */
 		mutex_unlock(&uverbs_dev->lists_mutex);
 
-		ib_uverbs_event_handler(&file->event_handler, &event);
+		ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
+					IB_EVENT_DEVICE_FATAL, NULL, NULL);
+
 		uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
 		kref_put(&file->ref, ib_uverbs_release_file);
 
 		mutex_lock(&uverbs_dev->lists_mutex);
 	}
-
-	while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
-		event_file = list_first_entry(&uverbs_dev->
-					      uverbs_events_file_list,
-					      struct ib_uverbs_async_event_file,
-					      list);
-		spin_lock_irq(&event_file->ev_queue.lock);
-		event_file->ev_queue.is_closed = 1;
-		spin_unlock_irq(&event_file->ev_queue.lock);
-
-		list_del(&event_file->list);
-		ib_unregister_event_handler(
-			&event_file->uverbs_file->event_handler);
-		event_file->uverbs_file->event_handler.device =
-			NULL;
-
-		wake_up_interruptible(&event_file->ev_queue.poll_wait);
-		kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
-	}
 	mutex_unlock(&uverbs_dev->lists_mutex);
 
 	uverbs_disassociate_api(uverbs_dev->uapi);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 35b2e2c..994d874 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -105,7 +105,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
 	if (uqp->uxrcd)
 		atomic_dec(&uqp->uxrcd->refcnt);
 
-	ib_uverbs_release_uevent(attrs->ufile, &uqp->uevent);
+	ib_uverbs_release_uevent(&uqp->uevent);
 	return ret;
 }
 
@@ -138,7 +138,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
 	if (ib_is_destroy_retryable(ret, why, uobject))
 		return ret;
 
-	ib_uverbs_release_uevent(attrs->ufile, &uwq->uevent);
+	ib_uverbs_release_uevent(&uwq->uevent);
 	return ret;
 }
 
@@ -163,7 +163,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
 		atomic_dec(&us->uxrcd->refcnt);
 	}
 
-	ib_uverbs_release_uevent(attrs->ufile, uevent);
+	ib_uverbs_release_uevent(uevent);
 	return ret;
 }
 
@@ -202,24 +202,40 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
 	return 0;
 }
 
-static int uverbs_hot_unplug_completion_event_file(struct ib_uobject *uobj,
-						   enum rdma_remove_reason why)
+void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
 {
-	struct ib_uverbs_completion_event_file *comp_event_file =
-		container_of(uobj, struct ib_uverbs_completion_event_file,
-			     uobj);
-	struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
+	struct ib_uverbs_event *entry, *tmp;
 
 	spin_lock_irq(&event_queue->lock);
+	/*
+	 * The user must ensure that no new items are added to the event_list
+	 * once is_closed is set.
+	 */
 	event_queue->is_closed = 1;
 	spin_unlock_irq(&event_queue->lock);
+	wake_up_interruptible(&event_queue->poll_wait);
+	kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
 
-	if (why == RDMA_REMOVE_DRIVER_REMOVE) {
-		wake_up_interruptible(&event_queue->poll_wait);
-		kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
+	spin_lock_irq(&event_queue->lock);
+	list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
+		if (entry->counter)
+			list_del(&entry->obj_list);
+		kfree(entry);
 	}
+	spin_unlock_irq(&event_queue->lock);
+}
+
+static int
+uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
+					  enum rdma_remove_reason why)
+{
+	struct ib_uverbs_completion_event_file *file =
+		container_of(uobj, struct ib_uverbs_completion_event_file,
+			     uobj);
+
+	ib_uverbs_free_event_queue(&file->ev_queue);
 	return 0;
-};
+}
 
 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
 {
@@ -230,7 +246,7 @@ EXPORT_SYMBOL(uverbs_destroy_def_handler);
 DECLARE_UVERBS_NAMED_OBJECT(
 	UVERBS_OBJECT_COMP_CHANNEL,
 	UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
-			     uverbs_hot_unplug_completion_event_file,
+			     uverbs_completion_event_file_destroy_uobj,
 			     &uverbs_event_fops,
 			     "[infinibandevent]",
 			     O_RDONLY));
diff --git a/drivers/infiniband/core/uverbs_std_types_async_fd.c b/drivers/infiniband/core/uverbs_std_types_async_fd.c
new file mode 100644
index 0000000..82ec080
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_async_fd.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc.  All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int UVERBS_HANDLER(UVERBS_METHOD_ASYNC_EVENT_ALLOC)(
+	struct uverbs_attr_bundle *attrs)
+{
+	struct ib_uobject *uobj =
+		uverbs_attr_get_uobject(attrs, UVERBS_METHOD_ASYNC_EVENT_ALLOC);
+
+	ib_uverbs_init_async_event_file(
+		container_of(uobj, struct ib_uverbs_async_event_file, uobj));
+	return 0;
+}
+
+static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
+					   enum rdma_remove_reason why)
+{
+	struct ib_uverbs_async_event_file *event_file =
+		container_of(uobj, struct ib_uverbs_async_event_file, uobj);
+
+	ib_unregister_event_handler(&event_file->event_handler);
+	ib_uverbs_free_event_queue(&event_file->ev_queue);
+	return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+	UVERBS_METHOD_ASYNC_EVENT_ALLOC,
+	UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
+		       UVERBS_OBJECT_ASYNC_EVENT,
+		       UVERBS_ACCESS_NEW,
+		       UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+	UVERBS_OBJECT_ASYNC_EVENT,
+	UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_async_event_file),
+			     uverbs_async_event_destroy_uobj,
+			     &uverbs_async_event_fops,
+			     "[infinibandevent]",
+			     O_RDONLY),
+	&UVERBS_METHOD(UVERBS_METHOD_ASYNC_EVENT_ALLOC));
+
+const struct uapi_definition uverbs_def_obj_async_fd[] = {
+	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_ASYNC_EVENT),
+	{}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index e39fe6a..da4110a 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -41,7 +41,7 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
 	struct ib_cq *cq = uobject->object;
 	struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
 	struct ib_ucq_object *ucq =
-		container_of(uobject, struct ib_ucq_object, uobject);
+		container_of(uobject, struct ib_ucq_object, uevent.uobject);
 	int ret;
 
 	ret = ib_destroy_cq_user(cq, &attrs->driver_udata);
@@ -49,7 +49,6 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
 		return ret;
 
 	ib_uverbs_release_ucq(
-		attrs->ufile,
 		ev_queue ? container_of(ev_queue,
 					struct ib_uverbs_completion_event_file,
 					ev_queue) :
@@ -63,7 +62,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
 {
 	struct ib_ucq_object *obj = container_of(
 		uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE),
-		typeof(*obj), uobject);
+		typeof(*obj), uevent.uobject);
 	struct ib_device *ib_dev = attrs->context->device;
 	int ret;
 	u64 user_handle;
@@ -106,10 +105,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
 		goto err_event_file;
 	}
 
-	obj->comp_events_reported  = 0;
-	obj->async_events_reported = 0;
 	INIT_LIST_HEAD(&obj->comp_list);
-	INIT_LIST_HEAD(&obj->async_list);
+	INIT_LIST_HEAD(&obj->uevent.event_list);
 
 	cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
 	if (!cq) {
@@ -118,7 +115,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
 	}
 
 	cq->device        = ib_dev;
-	cq->uobject       = &obj->uobject;
+	cq->uobject       = obj;
 	cq->comp_handler  = ib_uverbs_comp_handler;
 	cq->event_handler = ib_uverbs_cq_event_handler;
 	cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
@@ -129,8 +126,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
 	if (ret)
 		goto err_free;
 
-	obj->uobject.object = cq;
-	obj->uobject.user_handle = user_handle;
+	obj->uevent.uobject.object = cq;
+	obj->uevent.uobject.user_handle = user_handle;
 	rdma_restrack_uadd(&cq->res);
 
 	ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
@@ -182,10 +179,10 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
 	struct ib_uobject *uobj =
 		uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
 	struct ib_ucq_object *obj =
-		container_of(uobj, struct ib_ucq_object, uobject);
+		container_of(uobj, struct ib_ucq_object, uevent.uobject);
 	struct ib_uverbs_destroy_cq_resp resp = {
 		.comp_events_reported = obj->comp_events_reported,
-		.async_events_reported = obj->async_events_reported
+		.async_events_reported = obj->uevent.events_reported
 	};
 
 	return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_CQ_RESP, &resp,
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 2a3f2f0..ae4a59d 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -200,6 +200,43 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
 					     &resp, sizeof(resp));
 }
 
+static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)(
+	struct uverbs_attr_bundle *attrs)
+{
+	u32 num_comp = attrs->ufile->device->num_comp_vectors;
+	u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
+	int ret;
+
+	ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+			     &num_comp, sizeof(num_comp));
+	if (IS_UVERBS_COPY_ERR(ret))
+		return ret;
+
+	ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+			     &core_support, sizeof(core_support));
+	if (IS_UVERBS_COPY_ERR(ret))
+		return ret;
+
+	ret = ib_alloc_ucontext(attrs);
+	if (ret)
+		return ret;
+	ret = ib_init_ucontext(attrs);
+	if (ret) {
+		kfree(attrs->context);
+		attrs->context = NULL;
+		return ret;
+	}
+	return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+	UVERBS_METHOD_GET_CONTEXT,
+	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+			    UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
+	UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+			    UVERBS_ATTR_TYPE(u64), UA_OPTIONAL),
+	UVERBS_ATTR_UHW());
+
 DECLARE_UVERBS_NAMED_METHOD(
 	UVERBS_METHOD_INFO_HANDLES,
 	/* Also includes any device specific object ids */
@@ -220,6 +257,7 @@ DECLARE_UVERBS_NAMED_METHOD(
 		UA_MANDATORY));
 
 DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE,
+			      &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT),
 			      &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE),
 			      &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES),
 			      &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT));
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 00c54788..3f121ac 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -195,9 +195,9 @@ static int uapi_merge_obj_tree(struct uverbs_api *uapi,
 		 * disassociation, and the FD types require the driver to use
 		 * struct file_operations.owner to prevent the driver module
 		 * code from unloading while the file is open. This provides
-		 * enough safety that uverbs_close_fd() will continue to work.
-		 * Drivers using FD are responsible to handle disassociation of
-		 * the device on their own.
+		 * enough safety that uverbs_uobject_fd_release() will
+		 * continue to work.  Drivers using FD are responsible to
+		 * handle disassociation of the device on their own.
 		 */
 		if (WARN_ON(is_driver &&
 			    obj->type_attrs->type_class != &uverbs_idr_class &&
@@ -626,6 +626,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
 }
 
 static const struct uapi_definition uverbs_core_api[] = {
+	UAPI_DEF_CHAIN(uverbs_def_obj_async_fd),
 	UAPI_DEF_CHAIN(uverbs_def_obj_counters),
 	UAPI_DEF_CHAIN(uverbs_def_obj_cq),
 	UAPI_DEF_CHAIN(uverbs_def_obj_device),
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index d33bdc9..3ebae3b 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -52,6 +52,9 @@
 #include <rdma/rw.h>
 
 #include "core_priv.h"
+#include <trace/events/rdma_core.h>
+
+#include <trace/events/rdma_core.h>
 
 static int ib_resolve_eth_dmac(struct ib_device *device,
 			       struct rdma_ah_attr *ah_attr);
@@ -1053,11 +1056,11 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
 	struct ib_qp *qp = context;
 	unsigned long flags;
 
-	spin_lock_irqsave(&qp->device->event_handler_lock, flags);
+	spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
 	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
 		if (event->element.qp->event_handler)
 			event->element.qp->event_handler(event, event->element.qp->qp_context);
-	spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
+	spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
 }
 
 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
@@ -1094,9 +1097,9 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
 	qp->qp_num = real_qp->qp_num;
 	qp->qp_type = real_qp->qp_type;
 
-	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+	spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
 	list_add(&qp->open_list, &real_qp->open_list);
-	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+	spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
 
 	return qp;
 }
@@ -1824,9 +1827,9 @@ int ib_close_qp(struct ib_qp *qp)
 	if (real_qp == qp)
 		return -EINVAL;
 
-	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+	spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
 	list_del(&qp->open_list);
-	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+	spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
 
 	atomic_dec(&real_qp->usecnt);
 	if (qp->qp_sec)
@@ -2038,6 +2041,7 @@ int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
 	struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
 	int ret;
 
+	trace_mr_dereg(mr);
 	rdma_restrack_del(&mr->res);
 	ret = mr->device->ops.dereg_mr(mr, udata);
 	if (!ret) {
@@ -2069,11 +2073,16 @@ struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
 {
 	struct ib_mr *mr;
 
-	if (!pd->device->ops.alloc_mr)
-		return ERR_PTR(-EOPNOTSUPP);
+	if (!pd->device->ops.alloc_mr) {
+		mr = ERR_PTR(-EOPNOTSUPP);
+		goto out;
+	}
 
-	if (WARN_ON_ONCE(mr_type == IB_MR_TYPE_INTEGRITY))
-		return ERR_PTR(-EINVAL);
+	if (mr_type == IB_MR_TYPE_INTEGRITY) {
+		WARN_ON_ONCE(1);
+		mr = ERR_PTR(-EINVAL);
+		goto out;
+	}
 
 	mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata);
 	if (!IS_ERR(mr)) {
@@ -2089,6 +2098,8 @@ struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
 		mr->sig_attrs = NULL;
 	}
 
+out:
+	trace_mr_alloc(pd, mr_type, max_num_sg, mr);
 	return mr;
 }
 EXPORT_SYMBOL(ib_alloc_mr_user);
@@ -2113,21 +2124,27 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
 	struct ib_sig_attrs *sig_attrs;
 
 	if (!pd->device->ops.alloc_mr_integrity ||
-	    !pd->device->ops.map_mr_sg_pi)
-		return ERR_PTR(-EOPNOTSUPP);
+	    !pd->device->ops.map_mr_sg_pi) {
+		mr = ERR_PTR(-EOPNOTSUPP);
+		goto out;
+	}
 
-	if (!max_num_meta_sg)
-		return ERR_PTR(-EINVAL);
+	if (!max_num_meta_sg) {
+		mr = ERR_PTR(-EINVAL);
+		goto out;
+	}
 
 	sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
-	if (!sig_attrs)
-		return ERR_PTR(-ENOMEM);
+	if (!sig_attrs) {
+		mr = ERR_PTR(-ENOMEM);
+		goto out;
+	}
 
 	mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
 						max_num_meta_sg);
 	if (IS_ERR(mr)) {
 		kfree(sig_attrs);
-		return mr;
+		goto out;
 	}
 
 	mr->device = pd->device;
@@ -2141,6 +2158,8 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
 	mr->type = IB_MR_TYPE_INTEGRITY;
 	mr->sig_attrs = sig_attrs;
 
+out:
+	trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
 	return mr;
 }
 EXPORT_SYMBOL(ib_alloc_mr_integrity);
@@ -2785,6 +2804,7 @@ void ib_drain_sq(struct ib_qp *qp)
 		qp->device->ops.drain_sq(qp);
 	else
 		__ib_drain_sq(qp);
+	trace_cq_drain_complete(qp->send_cq);
 }
 EXPORT_SYMBOL(ib_drain_sq);
 
@@ -2813,6 +2833,7 @@ void ib_drain_rq(struct ib_qp *qp)
 		qp->device->ops.drain_rq(qp);
 	else
 		__ib_drain_rq(qp);
+	trace_cq_drain_complete(qp->recv_cq);
 }
 EXPORT_SYMBOL(ib_drain_rq);
 
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e7e8a0f..793c972 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -677,7 +677,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
 
 	bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
 
-	ibdev->num_comp_vectors	= 1;
+	ibdev->num_comp_vectors	= rdev->num_msix - 1;
 	ibdev->dev.parent = &rdev->en_dev->pdev->dev;
 	ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
 
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index e96bcb1..74b787a 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -160,10 +160,16 @@ struct efa_admin_create_qp_resp {
 	/* Common Admin Queue completion descriptor */
 	struct efa_admin_acq_common_desc acq_common_desc;
 
-	/* Opaque handle to be used for consequent operations on the QP */
+	/*
+	 * Opaque handle to be used for consequent admin operations on the
+	 * QP
+	 */
 	u32 qp_handle;
 
-	/* QP number in the given EFA virtual device */
+	/*
+	 * QP number in the given EFA virtual device. Least-significant bits
+	 *    (as needed according to max_qp) carry unique QP ID
+	 */
 	u16 qp_num;
 
 	/* MBZ */
@@ -286,6 +292,7 @@ struct efa_admin_create_ah_cmd {
 	/* PD number */
 	u16 pd;
 
+	/* MBZ */
 	u16 reserved;
 };
 
@@ -296,6 +303,7 @@ struct efa_admin_create_ah_resp {
 	/* Target interface address handle (opaque) */
 	u16 ah;
 
+	/* MBZ */
 	u16 reserved;
 };
 
@@ -372,6 +380,7 @@ struct efa_admin_reg_mr_cmd {
 	 */
 	u8 permissions;
 
+	/* MBZ */
 	u16 reserved16_w5;
 
 	/* number of pages in PBL (redundant, could be calculated) */
@@ -419,20 +428,20 @@ struct efa_admin_create_cq_cmd {
 	struct efa_admin_aq_common_desc aq_common_desc;
 
 	/*
-	 * 4:0 : reserved5
+	 * 4:0 : reserved5 - MBZ
 	 * 5 : interrupt_mode_enabled - if set, cq operates
 	 *    in interrupt mode (i.e. CQ events and MSI-X are
 	 *    generated), otherwise - polling
 	 * 6 : virt - If set, ring base address is virtual
 	 *    (IOVA returned by MR registration)
-	 * 7 : reserved6
+	 * 7 : reserved6 - MBZ
 	 */
 	u8 cq_caps_1;
 
 	/*
 	 * 4:0 : cq_entry_size_words - size of CQ entry in
 	 *    32-bit words, valid values: 4, 8.
-	 * 7:5 : reserved7
+	 * 7:5 : reserved7 - MBZ
 	 */
 	u8 cq_caps_2;
 
@@ -478,6 +487,7 @@ struct efa_admin_destroy_cq_cmd {
 
 	u16 cq_idx;
 
+	/* MBZ */
 	u16 reserved1;
 };
 
@@ -530,7 +540,7 @@ struct efa_admin_get_set_feature_common_desc {
 	/*
 	 * 1:0 : select - 0x1 - current value; 0x3 - default
 	 *    value
-	 * 7:3 : reserved3
+	 * 7:3 : reserved3 - MBZ
 	 */
 	u8 flags;
 
@@ -557,10 +567,10 @@ struct efa_admin_feature_device_attr_desc {
 	/* Bar used for SQ and RQ doorbells */
 	u16 db_bar;
 
-	/* Indicates how many bits are used physical address access */
+	/* Indicates how many bits are used on physical address access */
 	u8 phys_addr_width;
 
-	/* Indicates how many bits are used virtual address access */
+	/* Indicates how many bits are used on virtual address access */
 	u8 virt_addr_width;
 
 	/*
@@ -578,27 +588,28 @@ struct efa_admin_feature_queue_attr_desc {
 	/* The maximum number of queue pairs supported */
 	u32 max_qp;
 
+	/* Maximum number of WQEs per Send Queue */
 	u32 max_sq_depth;
 
-	/* max send wr used in inline-buf */
+	/* Maximum size of data that can be sent inline in a Send WQE */
 	u32 inline_buf_size;
 
+	/* Maximum number of buffer descriptors per Recv Queue */
 	u32 max_rq_depth;
 
 	/* The maximum number of completion queues supported per VF */
 	u32 max_cq;
 
+	/* Maximum number of CQEs per Completion Queue */
 	u32 max_cq_depth;
 
 	/* Number of sub-CQs to be created for each CQ */
 	u16 sub_cqs_per_cq;
 
+	/* MBZ */
 	u16 reserved;
 
-	/*
-	 * Maximum number of SGEs (buffs) allowed for a single send work
-	 *    queue element (WQE)
-	 */
+	/* Maximum number of SGEs (buffers) allowed for a single send WQE */
 	u16 max_wr_send_sges;
 
 	/* Maximum number of SGEs allowed for a single recv WQE */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 4822f5f..ec55458 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -387,8 +387,7 @@ static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
 	return efa_com_destroy_qp(&dev->edev, &params);
 }
 
-static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
-					    struct efa_qp *qp)
+static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
 {
 	rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
 	rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
@@ -398,8 +397,6 @@ static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
 
 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
 {
-	struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
-		struct efa_ucontext, ibucontext);
 	struct efa_dev *dev = to_edev(ibqp->pd->device);
 	struct efa_qp *qp = to_eqp(ibqp);
 	int err;
@@ -418,7 +415,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
 				 DMA_TO_DEVICE);
 	}
 
-	efa_qp_user_mmap_entries_remove(ucontext, qp);
+	efa_qp_user_mmap_entries_remove(qp);
 	kfree(qp);
 	return 0;
 }
@@ -510,7 +507,7 @@ static int qp_mmap_entries_setup(struct efa_qp *qp,
 	return 0;
 
 err_remove_mmap:
-	efa_qp_user_mmap_entries_remove(ucontext, qp);
+	efa_qp_user_mmap_entries_remove(qp);
 
 	return -ENOMEM;
 }
@@ -719,7 +716,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
 	return &qp->ibqp;
 
 err_remove_mmap_entries:
-	efa_qp_user_mmap_entries_remove(ucontext, qp);
+	efa_qp_user_mmap_entries_remove(qp);
 err_destroy_qp:
 	efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
 err_free_mapped:
@@ -1370,6 +1367,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
 		IB_ACCESS_LOCAL_WRITE |
 		(is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
 
+	access_flags &= ~IB_ACCESS_OPTIONAL;
 	if (access_flags & ~supp_access_flags) {
 		ibdev_dbg(&dev->ibdev,
 			  "Unsupported access flags[%#x], supported[%#x]\n",
@@ -1608,13 +1606,12 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
 		err = -EINVAL;
 	}
 
-	if (err) {
+	if (err)
 		ibdev_dbg(
 			&dev->ibdev,
 			"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
 			entry->address, rdma_entry->npages * PAGE_SIZE,
 			entry->mmap_flag, err);
-	}
 
 	rdma_user_mmap_entry_put(rdma_entry);
 	return err;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9b1fb84..e0b1238 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1685,6 +1685,14 @@ static u64 access_sw_pio_drain(const struct cntr_entry *entry,
 	return dd->verbs_dev.n_piodrain;
 }
 
+static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
+				   void *context, int vl, int mode, u64 data)
+{
+	struct hfi1_devdata *dd = context;
+
+	return dd->ctx0_seq_drop;
+}
+
 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
 			      void *context, int vl, int mode, u64 data)
 {
@@ -4106,6 +4114,7 @@ def_access_ibp_counter(rc_crwaits);
 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
+[C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
@@ -4249,6 +4258,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
 			    access_sw_cpu_intr),
 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
 			    access_sw_cpu_rcv_limit),
+[C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
+			    access_sw_ctx0_seq_drop),
 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
 			    access_sw_vtx_wait),
 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
@@ -6862,7 +6873,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
 		}
 		rcvmask = HFI1_RCVCTRL_CTXT_ENB;
 		/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
-		rcvmask |= rcd->rcvhdrtail_kvaddr ?
+		rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ?
 			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
 		hfi1_rcvctrl(dd, rcvmask, rcd);
 		hfi1_rcd_put(rcd);
@@ -8394,20 +8405,62 @@ void force_recv_intr(struct hfi1_ctxtdata *rcd)
 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
 {
 	u32 tail;
-	int present;
 
-	if (!rcd->rcvhdrtail_kvaddr)
-		present = (rcd->seq_cnt ==
-				rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
-	else /* is RDMA rtail */
-		present = (rcd->head != get_rcvhdrtail(rcd));
-
-	if (present)
+	if (hfi1_packet_present(rcd))
 		return 1;
 
 	/* fall back to a CSR read, correct indpendent of DMA_RTAIL */
 	tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
-	return rcd->head != tail;
+	return hfi1_rcd_head(rcd) != tail;
+}
+
+/**
+ * Common code for receive contexts interrupt handlers.
+ * Update traces, increment kernel IRQ counter and
+ * setup ASPM when needed.
+ */
+static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
+{
+	struct hfi1_devdata *dd = rcd->dd;
+
+	trace_hfi1_receive_interrupt(dd, rcd);
+	this_cpu_inc(*dd->int_counter);
+	aspm_ctx_disable(rcd);
+}
+
+/**
+ * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
+ * when there are packets present in the queue. When calling
+ * with interrupts enabled please use hfi1_rcd_eoi_intr.
+ *
+ * @rcd: valid receive context
+ */
+static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
+{
+	clear_recv_intr(rcd);
+	if (check_packet_present(rcd))
+		force_recv_intr(rcd);
+}
+
+/**
+ * hfi1_rcd_eoi_intr() - End of Interrupt processing action
+ *
+ * @rcd: Ptr to hfi1_ctxtdata of receive context
+ *
+ *  Hold IRQs so we can safely clear the interrupt and
+ *  recheck for a packet that may have arrived after the previous
+ *  check and the interrupt clear.  If a packet arrived, force another
+ *  interrupt. This routine can be called at the end of receive packet
+ *  processing in interrupt service routines, interrupt service thread
+ *  and softirqs
+ */
+static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	__hfi1_rcd_eoi_intr(rcd);
+	local_irq_restore(flags);
 }
 
 /*
@@ -8421,13 +8474,9 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
 irqreturn_t receive_context_interrupt(int irq, void *data)
 {
 	struct hfi1_ctxtdata *rcd = data;
-	struct hfi1_devdata *dd = rcd->dd;
 	int disposition;
-	int present;
 
-	trace_hfi1_receive_interrupt(dd, rcd);
-	this_cpu_inc(*dd->int_counter);
-	aspm_ctx_disable(rcd);
+	receive_interrupt_common(rcd);
 
 	/* receive interrupt remains blocked while processing packets */
 	disposition = rcd->do_interrupt(rcd, 0);
@@ -8440,17 +8489,7 @@ irqreturn_t receive_context_interrupt(int irq, void *data)
 	if (disposition == RCV_PKT_LIMIT)
 		return IRQ_WAKE_THREAD;
 
-	/*
-	 * The packet processor detected no more packets.  Clear the receive
-	 * interrupt and recheck for a packet packet that may have arrived
-	 * after the previous check and interrupt clear.  If a packet arrived,
-	 * force another interrupt.
-	 */
-	clear_recv_intr(rcd);
-	present = check_packet_present(rcd);
-	if (present)
-		force_recv_intr(rcd);
-
+	__hfi1_rcd_eoi_intr(rcd);
 	return IRQ_HANDLED;
 }
 
@@ -8461,24 +8500,11 @@ irqreturn_t receive_context_interrupt(int irq, void *data)
 irqreturn_t receive_context_thread(int irq, void *data)
 {
 	struct hfi1_ctxtdata *rcd = data;
-	int present;
 
 	/* receive interrupt is still blocked from the IRQ handler */
 	(void)rcd->do_interrupt(rcd, 1);
 
-	/*
-	 * The packet processor will only return if it detected no more
-	 * packets.  Hold IRQs here so we can safely clear the interrupt and
-	 * recheck for a packet that may have arrived after the previous
-	 * check and the interrupt clear.  If a packet arrived, force another
-	 * interrupt.
-	 */
-	local_irq_disable();
-	clear_recv_intr(rcd);
-	present = check_packet_present(rcd);
-	if (present)
-		force_recv_intr(rcd);
-	local_irq_enable();
+	hfi1_rcd_eoi_intr(rcd);
 
 	return IRQ_HANDLED;
 }
@@ -10049,7 +10075,7 @@ u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
 	 * the first kernel context would have been allocated by now so
 	 * we are guaranteed a valid value.
 	 */
-	return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
+	return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
 }
 
 /*
@@ -10094,7 +10120,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
 		thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
 			    sc_mtu_to_threshold(dd->vld[i].sc,
 						dd->vld[i].mtu,
-						dd->rcd[0]->rcvhdrqentsize));
+						get_hdrqentsize(dd->rcd[0])));
 		for (j = 0; j < INIT_SC_PER_VL; j++)
 			sc_set_cr_threshold(
 					pio_select_send_context_vl(dd, j, i),
@@ -11821,7 +11847,7 @@ u32 hdrqempty(struct hfi1_ctxtdata *rcd)
 	head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
 		& RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
 
-	if (rcd->rcvhdrtail_kvaddr)
+	if (hfi1_rcvhdrtail_kvaddr(rcd))
 		tail = get_rcvhdrtail(rcd);
 	else
 		tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
@@ -11865,6 +11891,84 @@ static u32 encoded_size(u32 size)
 	return 0x1;	/* if invalid, go with the minimum size */
 }
 
+/**
+ * encode_rcv_header_entry_size - return chip specific encoding for size
+ * @size: size in dwords
+ *
+ * Convert a receive header entry size that to the encoding used in the CSR.
+ *
+ * Return a zero if the given size is invalid, otherwise the encoding.
+ */
+u8 encode_rcv_header_entry_size(u8 size)
+{
+	/* there are only 3 valid receive header entry sizes */
+	if (size == 2)
+		return 1;
+	if (size == 16)
+		return 2;
+	if (size == 32)
+		return 4;
+	return 0; /* invalid */
+}
+
+/**
+ * hfi1_validate_rcvhdrcnt - validate hdrcnt
+ * @dd: the device data
+ * @thecnt: the header count
+ */
+int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
+{
+	if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
+		dd_dev_err(dd, "Receive header queue count too small\n");
+		return -EINVAL;
+	}
+
+	if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
+		dd_dev_err(dd,
+			   "Receive header queue count cannot be greater than %u\n",
+			   HFI1_MAX_HDRQ_EGRBUF_CNT);
+		return -EINVAL;
+	}
+
+	if (thecnt % HDRQ_INCREMENT) {
+		dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
+			   thecnt, HDRQ_INCREMENT);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * set_hdrq_regs - set header queue registers for context
+ * @dd: the device data
+ * @ctxt: the context
+ * @entsize: the dword entry size
+ * @hdrcnt: the number of header entries
+ */
+void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt)
+{
+	u64 reg;
+
+	reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) <<
+	      RCV_HDR_CNT_CNT_SHIFT;
+	write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg);
+	reg = ((u64)encode_rcv_header_entry_size(entsize) &
+	       RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) <<
+	      RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
+	write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg);
+	reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) <<
+	      RCV_HDR_SIZE_HDR_SIZE_SHIFT;
+	write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg);
+
+	/*
+	 * Program dummy tail address for every receive context
+	 * before enabling any receive context
+	 */
+	write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+			dd->rcvhdrtail_dummy_dma);
+}
+
 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
 		  struct hfi1_ctxtdata *rcd)
 {
@@ -11886,13 +11990,13 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
 		/* reset the tail and hdr addresses, and sequence count */
 		write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
 				rcd->rcvhdrq_dma);
-		if (rcd->rcvhdrtail_kvaddr)
+		if (hfi1_rcvhdrtail_kvaddr(rcd))
 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
 					rcd->rcvhdrqtailaddr_dma);
-		rcd->seq_cnt = 1;
+		hfi1_set_seq_cnt(rcd, 1);
 
 		/* reset the cached receive header queue head value */
-		rcd->head = 0;
+		hfi1_set_rcd_head(rcd, 0);
 
 		/*
 		 * Zero the receive header queue so we don't get false
@@ -11972,7 +12076,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
 			      IS_RCVAVAIL_START + rcd->ctxt, false);
 		rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
 	}
-	if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
+	if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd))
 		rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
 	if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
 		/* See comment on RcvCtxtCtrl.TailUpd above */
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 4ca5ac8..7255092 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -358,6 +358,8 @@
 #define MAX_EAGER_BUFFER       (256 * 1024)
 #define MAX_EAGER_BUFFER_TOTAL (64 * (1 << 20)) /* max per ctxt 64MB */
 #define MAX_EXPECTED_BUFFER    (2048 * 1024)
+#define HFI1_MIN_HDRQ_EGRBUF_CNT 32
+#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
 
 /*
  * Receive expected base and count and eager base and count increment -
@@ -699,6 +701,10 @@ static inline u32 chip_rcv_array_count(struct hfi1_devdata *dd)
 	return read_csr(dd, RCV_ARRAY_CNT);
 }
 
+u8 encode_rcv_header_entry_size(u8 size);
+int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt);
+void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt);
+
 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
 	       u32 dw_len);
 
@@ -859,6 +865,7 @@ static inline int idx_from_vl(int vl)
 enum {
 	C_RCV_OVF = 0,
 	C_RX_LEN_ERR,
+	C_RX_SHORT_ERR,
 	C_RX_ICRC_ERR,
 	C_RX_EBP,
 	C_RX_TID_FULL,
@@ -926,6 +933,7 @@ enum {
 	C_DC_PG_STS_TX_MBE_CNT,
 	C_SW_CPU_INTR,
 	C_SW_CPU_RCV_LIM,
+	C_SW_CTX0_SEQ_DROP,
 	C_SW_VTX_WAIT,
 	C_SW_PIO_WAIT,
 	C_SW_PIO_DRAIN,
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index ab3589d..fb3ec9b 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -381,6 +381,7 @@
 #define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
 #define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
 #define RCV_LENGTH_ERR_CNT 0
+#define RCV_SHORT_ERR_CNT 2
 #define RCV_ICRC_ERR_CNT 6
 #define RCV_EBP_CNT 9
 #define RCV_BUF_OVFL_CNT 10
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index d47da7b..40a1ff0 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -323,6 +323,9 @@ struct diag_pkt {
 /* RHF receive type error - bypass packet errors */
 #define RHF_RTE_BYPASS_NO_ERR		0x0
 
+/* MAX RcvSEQ */
+#define RHF_MAX_SEQ 13
+
 /* IB - LRH header constants */
 #define HFI1_LRH_GRH 0x0003      /* 1. word of IB LRH - next header: GRH */
 #define HFI1_LRH_BTH 0x0002      /* 1. word of IB LRH - next header: BTH */
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index d268bf9..4633a0c 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -379,7 +379,7 @@ static void *_rcds_seq_next(struct seq_file *s, void *v, loff_t *pos)
 	struct hfi1_devdata *dd = dd_from_dev(ibd);
 
 	++*pos;
-	if (!dd->rcd || *pos >= dd->n_krcv_queues)
+	if (!dd->rcd || *pos >= dd->num_rcv_contexts)
 		return NULL;
 	return pos;
 }
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 01aa1f1..049d15b 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -411,14 +411,14 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
 static inline void init_packet(struct hfi1_ctxtdata *rcd,
 			       struct hfi1_packet *packet)
 {
-	packet->rsize = rcd->rcvhdrqentsize; /* words */
-	packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
+	packet->rsize = get_hdrqentsize(rcd); /* words */
+	packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */
 	packet->rcd = rcd;
 	packet->updegr = 0;
 	packet->etail = -1;
 	packet->rhf_addr = get_rhf_addr(rcd);
 	packet->rhf = rhf_to_cpu(packet->rhf_addr);
-	packet->rhqoff = rcd->head;
+	packet->rhqoff = hfi1_rcd_head(rcd);
 	packet->numpkt = 0;
 }
 
@@ -551,22 +551,22 @@ static inline void init_ps_mdata(struct ps_mdata *mdata,
 	mdata->maxcnt = packet->maxcnt;
 	mdata->ps_head = packet->rhqoff;
 
-	if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
+	if (get_dma_rtail_setting(rcd)) {
 		mdata->ps_tail = get_rcvhdrtail(rcd);
 		if (rcd->ctxt == HFI1_CTRL_CTXT)
-			mdata->ps_seq = rcd->seq_cnt;
+			mdata->ps_seq = hfi1_seq_cnt(rcd);
 		else
 			mdata->ps_seq = 0; /* not used with DMA_RTAIL */
 	} else {
 		mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
-		mdata->ps_seq = rcd->seq_cnt;
+		mdata->ps_seq = hfi1_seq_cnt(rcd);
 	}
 }
 
 static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
 			  struct hfi1_ctxtdata *rcd)
 {
-	if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
+	if (get_dma_rtail_setting(rcd))
 		return mdata->ps_head == mdata->ps_tail;
 	return mdata->ps_seq != rhf_rcv_seq(rhf);
 }
@@ -592,11 +592,9 @@ static inline void update_ps_mdata(struct ps_mdata *mdata,
 		mdata->ps_head = 0;
 
 	/* Control context must do seq counting */
-	if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
-	    (rcd->ctxt == HFI1_CTRL_CTXT)) {
-		if (++mdata->ps_seq > 13)
-			mdata->ps_seq = 1;
-	}
+	if (!get_dma_rtail_setting(rcd) ||
+	    rcd->ctxt == HFI1_CTRL_CTXT)
+		mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq);
 }
 
 /*
@@ -734,6 +732,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
 {
 	int ret;
 
+	packet->rcd->dd->ctx0_seq_drop++;
 	/* Set up for the next packet */
 	packet->rhqoff += packet->rsize;
 	if (packet->rhqoff >= packet->maxcnt)
@@ -769,7 +768,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
 		 * The +2 is the size of the RHF.
 		 */
 		prefetch_range(packet->ebuf,
-			       packet->tlen - ((packet->rcd->rcvhdrqentsize -
+			       packet->tlen - ((get_hdrqentsize(packet->rcd) -
 					       (rhf_hdrq_offset(packet->rhf)
 						+ 2)) * 4));
 	}
@@ -823,7 +822,7 @@ static inline void finish_packet(struct hfi1_packet *packet)
 	 * The only thing we need to do is a final update and call for an
 	 * interrupt
 	 */
-	update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
+	update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr,
 		       packet->etail, rcv_intr_dynamic, packet->numpkt);
 }
 
@@ -832,13 +831,11 @@ static inline void finish_packet(struct hfi1_packet *packet)
  */
 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
 {
-	u32 seq;
 	int last = RCV_PKT_OK;
 	struct hfi1_packet packet;
 
 	init_packet(rcd, &packet);
-	seq = rhf_rcv_seq(packet.rhf);
-	if (seq != rcd->seq_cnt) {
+	if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
 		last = RCV_PKT_DONE;
 		goto bail;
 	}
@@ -847,15 +844,12 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
 
 	while (last == RCV_PKT_OK) {
 		last = process_rcv_packet(&packet, thread);
-		seq = rhf_rcv_seq(packet.rhf);
-		if (++rcd->seq_cnt > 13)
-			rcd->seq_cnt = 1;
-		if (seq != rcd->seq_cnt)
+		if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
 			last = RCV_PKT_DONE;
 		process_rcv_update(last, &packet);
 	}
 	process_rcv_qp_work(&packet);
-	rcd->head = packet.rhqoff;
+	hfi1_set_rcd_head(rcd, packet.rhqoff);
 bail:
 	finish_packet(&packet);
 	return last;
@@ -884,15 +878,14 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
 		process_rcv_update(last, &packet);
 	}
 	process_rcv_qp_work(&packet);
-	rcd->head = packet.rhqoff;
+	hfi1_set_rcd_head(rcd, packet.rhqoff);
 bail:
 	finish_packet(&packet);
 	return last;
 }
 
-static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
+static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
 {
-	struct hfi1_ctxtdata *rcd;
 	u16 i;
 
 	/*
@@ -900,50 +893,17 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
 	 * interrupt handler only for that context. Otherwise, switch
 	 * interrupt handler for all statically allocated kernel contexts.
 	 */
-	if (ctxt >= dd->first_dyn_alloc_ctxt) {
-		rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
-		if (rcd) {
-			rcd->do_interrupt =
-				&handle_receive_interrupt_nodma_rtail;
-			hfi1_rcd_put(rcd);
-		}
-		return;
-	}
-
-	for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
-		rcd = hfi1_rcd_get_by_index(dd, i);
-		if (rcd)
-			rcd->do_interrupt =
-				&handle_receive_interrupt_nodma_rtail;
+	if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) {
+		hfi1_rcd_get(rcd);
+		hfi1_set_fast(rcd);
 		hfi1_rcd_put(rcd);
-	}
-}
-
-static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
-{
-	struct hfi1_ctxtdata *rcd;
-	u16 i;
-
-	/*
-	 * For dynamically allocated kernel contexts (like vnic) switch
-	 * interrupt handler only for that context. Otherwise, switch
-	 * interrupt handler for all statically allocated kernel contexts.
-	 */
-	if (ctxt >= dd->first_dyn_alloc_ctxt) {
-		rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
-		if (rcd) {
-			rcd->do_interrupt =
-				&handle_receive_interrupt_dma_rtail;
-			hfi1_rcd_put(rcd);
-		}
 		return;
 	}
 
-	for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
+	for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
 		rcd = hfi1_rcd_get_by_index(dd, i);
-		if (rcd)
-			rcd->do_interrupt =
-				&handle_receive_interrupt_dma_rtail;
+		if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic))
+			hfi1_set_fast(rcd);
 		hfi1_rcd_put(rcd);
 	}
 }
@@ -959,17 +919,14 @@ void set_all_slowpath(struct hfi1_devdata *dd)
 		if (!rcd)
 			continue;
 		if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
-			rcd->do_interrupt = &handle_receive_interrupt;
+			rcd->do_interrupt = rcd->slow_handler;
 
 		hfi1_rcd_put(rcd);
 	}
 }
 
-static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
-				      struct hfi1_packet *packet,
-				      struct hfi1_devdata *dd)
+static bool __set_armed_to_active(struct hfi1_packet *packet)
 {
-	struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
 	u8 etype = rhf_rcv_type(packet->rhf);
 	u8 sc = SC15_PACKET;
 
@@ -984,19 +941,34 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
 		sc = hfi1_16B_get_sc(hdr);
 	}
 	if (sc != SC15_PACKET) {
-		int hwstate = driver_lstate(rcd->ppd);
+		int hwstate = driver_lstate(packet->rcd->ppd);
+		struct work_struct *lsaw =
+				&packet->rcd->ppd->linkstate_active_work;
 
 		if (hwstate != IB_PORT_ACTIVE) {
-			dd_dev_info(dd,
+			dd_dev_info(packet->rcd->dd,
 				    "Unexpected link state %s\n",
 				    opa_lstate_name(hwstate));
-			return 0;
+			return false;
 		}
 
-		queue_work(rcd->ppd->link_wq, lsaw);
-		return 1;
+		queue_work(packet->rcd->ppd->link_wq, lsaw);
+		return true;
 	}
-	return 0;
+	return false;
+}
+
+/**
+ * armed to active - the fast path for armed to active
+ * @packet: the packet structure
+ *
+ * Return true if packet processing needs to bail.
+ */
+static bool set_armed_to_active(struct hfi1_packet *packet)
+{
+	if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED))
+		return false;
+	return __set_armed_to_active(packet);
 }
 
 /*
@@ -1019,10 +991,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
 
 	init_packet(rcd, &packet);
 
-	if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
-		u32 seq = rhf_rcv_seq(packet.rhf);
-
-		if (seq != rcd->seq_cnt) {
+	if (!get_dma_rtail_setting(rcd)) {
+		if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
 			last = RCV_PKT_DONE;
 			goto bail;
 		}
@@ -1039,22 +1009,15 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
 		 * Control context can potentially receive an invalid
 		 * rhf. Drop such packets.
 		 */
-		if (rcd->ctxt == HFI1_CTRL_CTXT) {
-			u32 seq = rhf_rcv_seq(packet.rhf);
-
-			if (seq != rcd->seq_cnt)
+		if (rcd->ctxt == HFI1_CTRL_CTXT)
+			if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
 				skip_pkt = 1;
-		}
 	}
 
 	prescan_rxq(rcd, &packet);
 
 	while (last == RCV_PKT_OK) {
-		if (unlikely(dd->do_drop &&
-			     atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
-			     DROP_PACKET_ON)) {
-			dd->do_drop = 0;
-
+		if (hfi1_need_drop(dd)) {
 			/* On to the next packet */
 			packet.rhqoff += packet.rsize;
 			packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
@@ -1066,26 +1029,14 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
 			last = skip_rcv_packet(&packet, thread);
 			skip_pkt = 0;
 		} else {
-			/* Auto activate link on non-SC15 packet receive */
-			if (unlikely(rcd->ppd->host_link_state ==
-				     HLS_UP_ARMED) &&
-			    set_armed_to_active(rcd, &packet, dd))
+			if (set_armed_to_active(&packet))
 				goto bail;
 			last = process_rcv_packet(&packet, thread);
 		}
 
-		if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
-			u32 seq = rhf_rcv_seq(packet.rhf);
-
-			if (++rcd->seq_cnt > 13)
-				rcd->seq_cnt = 1;
-			if (seq != rcd->seq_cnt)
+		if (!get_dma_rtail_setting(rcd)) {
+			if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
 				last = RCV_PKT_DONE;
-			if (needset) {
-				dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
-				set_nodma_rtail(dd, rcd->ctxt);
-				needset = 0;
-			}
 		} else {
 			if (packet.rhqoff == hdrqtail)
 				last = RCV_PKT_DONE;
@@ -1094,27 +1045,24 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
 			 * rhf. Drop such packets.
 			 */
 			if (rcd->ctxt == HFI1_CTRL_CTXT) {
-				u32 seq = rhf_rcv_seq(packet.rhf);
+				bool lseq;
 
-				if (++rcd->seq_cnt > 13)
-					rcd->seq_cnt = 1;
-				if (!last && (seq != rcd->seq_cnt))
+				lseq = hfi1_seq_incr(rcd,
+						     rhf_rcv_seq(packet.rhf));
+				if (!last && lseq)
 					skip_pkt = 1;
 			}
-
-			if (needset) {
-				dd_dev_info(dd,
-					    "Switching to DMA_RTAIL\n");
-				set_dma_rtail(dd, rcd->ctxt);
-				needset = 0;
-			}
 		}
 
+		if (needset) {
+			needset = false;
+			set_all_fastpath(dd, rcd);
+		}
 		process_rcv_update(last, &packet);
 	}
 
 	process_rcv_qp_work(&packet);
-	rcd->head = packet.rhqoff;
+	hfi1_set_rcd_head(rcd, packet.rhqoff);
 
 bail:
 	/*
@@ -1606,23 +1554,22 @@ void handle_eflags(struct hfi1_packet *packet)
  * The following functions are called by the interrupt handler. They are type
  * specific handlers for each packet type.
  */
-static int process_receive_ib(struct hfi1_packet *packet)
+static void process_receive_ib(struct hfi1_packet *packet)
 {
 	if (hfi1_setup_9B_packet(packet))
-		return RHF_RCV_CONTINUE;
+		return;
 
 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
-		return RHF_RCV_CONTINUE;
+		return;
 
 	trace_hfi1_rcvhdr(packet);
 
 	if (unlikely(rhf_err_flags(packet->rhf))) {
 		handle_eflags(packet);
-		return RHF_RCV_CONTINUE;
+		return;
 	}
 
 	hfi1_ib_rcv(packet);
-	return RHF_RCV_CONTINUE;
 }
 
 static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
@@ -1638,23 +1585,23 @@ static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
 	return false;
 }
 
-static int process_receive_bypass(struct hfi1_packet *packet)
+static void process_receive_bypass(struct hfi1_packet *packet)
 {
 	struct hfi1_devdata *dd = packet->rcd->dd;
 
 	if (hfi1_is_vnic_packet(packet)) {
 		hfi1_vnic_bypass_rcv(packet);
-		return RHF_RCV_CONTINUE;
+		return;
 	}
 
 	if (hfi1_setup_bypass_packet(packet))
-		return RHF_RCV_CONTINUE;
+		return;
 
 	trace_hfi1_rcvhdr(packet);
 
 	if (unlikely(rhf_err_flags(packet->rhf))) {
 		handle_eflags(packet);
-		return RHF_RCV_CONTINUE;
+		return;
 	}
 
 	if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
@@ -1677,17 +1624,16 @@ static int process_receive_bypass(struct hfi1_packet *packet)
 				(OPA_EI_STATUS_SMASK | BAD_L2_ERR);
 		}
 	}
-	return RHF_RCV_CONTINUE;
 }
 
-static int process_receive_error(struct hfi1_packet *packet)
+static void process_receive_error(struct hfi1_packet *packet)
 {
 	/* KHdrHCRCErr -- KDETH packet with a bad HCRC */
 	if (unlikely(
 		 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
 		 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
 		  packet->rhf & RHF_DC_ERR)))
-		return RHF_RCV_CONTINUE;
+		return;
 
 	hfi1_setup_ib_header(packet);
 	handle_eflags(packet);
@@ -1695,32 +1641,29 @@ static int process_receive_error(struct hfi1_packet *packet)
 	if (unlikely(rhf_err_flags(packet->rhf)))
 		dd_dev_err(packet->rcd->dd,
 			   "Unhandled error packet received. Dropping.\n");
-
-	return RHF_RCV_CONTINUE;
 }
 
-static int kdeth_process_expected(struct hfi1_packet *packet)
+static void kdeth_process_expected(struct hfi1_packet *packet)
 {
 	hfi1_setup_9B_packet(packet);
 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
-		return RHF_RCV_CONTINUE;
+		return;
 
 	if (unlikely(rhf_err_flags(packet->rhf))) {
 		struct hfi1_ctxtdata *rcd = packet->rcd;
 
 		if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
-			return RHF_RCV_CONTINUE;
+			return;
 	}
 
 	hfi1_kdeth_expected_rcv(packet);
-	return RHF_RCV_CONTINUE;
 }
 
-static int kdeth_process_eager(struct hfi1_packet *packet)
+static void kdeth_process_eager(struct hfi1_packet *packet)
 {
 	hfi1_setup_9B_packet(packet);
 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
-		return RHF_RCV_CONTINUE;
+		return;
 
 	trace_hfi1_rcvhdr(packet);
 	if (unlikely(rhf_err_flags(packet->rhf))) {
@@ -1728,37 +1671,41 @@ static int kdeth_process_eager(struct hfi1_packet *packet)
 
 		show_eflags_errs(packet);
 		if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
-			return RHF_RCV_CONTINUE;
+			return;
 	}
 
 	hfi1_kdeth_eager_rcv(packet);
-	return RHF_RCV_CONTINUE;
 }
 
-static int process_receive_invalid(struct hfi1_packet *packet)
+static void process_receive_invalid(struct hfi1_packet *packet)
 {
 	dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
 		   rhf_rcv_type(packet->rhf));
-	return RHF_RCV_CONTINUE;
 }
 
+#define HFI1_RCVHDR_DUMP_MAX	5
+
 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
 {
 	struct hfi1_packet packet;
 	struct ps_mdata mdata;
+	int i;
 
-	seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s head %llu tail %llu\n",
-		   rcd->ctxt, rcd->rcvhdrq_cnt, rcd->rcvhdrqentsize,
-		   HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
+	seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu  sw head %u\n",
+		   rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd),
+		   get_dma_rtail_setting(rcd) ?
 		   "dma_rtail" : "nodma_rtail",
+		   read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL),
+		   read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS),
 		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
 		   RCV_HDR_HEAD_HEAD_MASK,
-		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL));
+		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL),
+		   rcd->head);
 
 	init_packet(rcd, &packet);
 	init_ps_mdata(&mdata, &packet);
 
-	while (1) {
+	for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) {
 		__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
 					 rcd->rhf_offset;
 		struct ib_header *hdr;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7c5e3fb..bef6946 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -505,12 +505,12 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
 			ret = -EINVAL;
 			goto done;
 		}
-		if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
+		if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) {
 			ret = -EPERM;
 			goto done;
 		}
 		memlen = PAGE_SIZE;
-		memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
+		memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt);
 		flags &= ~VM_MAYWRITE;
 		break;
 	case SUBCTXT_UREGS:
@@ -1090,7 +1090,7 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
 	 * don't have to wait to be sure the DMA update has happened
 	 * (chip resets head/tail to 0 on transition to enable).
 	 */
-	if (uctxt->rcvhdrtail_kvaddr)
+	if (hfi1_rcvhdrtail_kvaddr(uctxt))
 		clear_rcvhdrtail(uctxt);
 
 	/* Setup J_KEY before enabling the context */
@@ -1154,8 +1154,8 @@ static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
 	cinfo.send_ctxt = uctxt->sc->hw_context;
 
 	cinfo.egrtids = uctxt->egrbufs.alloced;
-	cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
-	cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
+	cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt);
+	cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2;
 	cinfo.sdma_ring_size = fd->cq->nentries;
 	cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
 
@@ -1543,7 +1543,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
 		 * always resets it's tail register back to 0 on a
 		 * transition from disabled to enabled.
 		 */
-		if (uctxt->rcvhdrtail_kvaddr)
+		if (hfi1_rcvhdrtail_kvaddr(uctxt))
 			clear_rcvhdrtail(uctxt);
 		rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
 	} else {
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index fc10d65..6365e8f 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -197,7 +197,9 @@ struct exp_tid_set {
 	u32 count;
 };
 
-typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
+struct hfi1_ctxtdata;
+typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data);
+typedef void (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
 
 struct tid_queue {
 	struct list_head queue_head;
@@ -226,7 +228,11 @@ struct hfi1_ctxtdata {
 	 * be valid. Worst case is we process an extra interrupt and up to 64
 	 * packets with the wrong interrupt handler.
 	 */
-	int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
+	intr_handler do_interrupt;
+	/** fast handler after autoactive */
+	intr_handler fast_handler;
+	/** slow handler */
+	intr_handler slow_handler;
 	/* verbs rx_stats per rcd */
 	struct hfi1_opcode_stats_perctx *opstats;
 	/* clear interrupt mask */
@@ -1153,6 +1159,8 @@ struct hfi1_devdata {
 
 	char *boardname; /* human readable board info */
 
+	u64 ctx0_seq_drop;
+
 	/* reset value */
 	u64 z_int_counter;
 	u64 z_rcv_limit;
@@ -1310,7 +1318,7 @@ struct hfi1_devdata {
 	struct err_info_constraint err_info_xmit_constraint;
 
 	atomic_t drop_packet;
-	u8 do_drop;
+	bool do_drop;
 	u8 err_info_uncorrectable;
 	u8 err_info_fmconfig;
 
@@ -1507,12 +1515,148 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp,
 #define RCV_PKT_LIMIT   0x1 /* stop, hit limit, start thread */
 #define RCV_PKT_DONE    0x2 /* stop, no more packets detected */
 
+/**
+ * hfi1_rcd_head - add accessor for rcd head
+ * @rcd: the context
+ */
+static inline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd)
+{
+	return rcd->head;
+}
+
+/**
+ * hfi1_set_rcd_head - add accessor for rcd head
+ * @rcd: the context
+ * @head: the new head
+ */
+static inline void hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head)
+{
+	rcd->head = head;
+}
+
 /* calculate the current RHF address */
 static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
 {
 	return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset;
 }
 
+/* return DMA_RTAIL configuration */
+static inline bool get_dma_rtail_setting(struct hfi1_ctxtdata *rcd)
+{
+	return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL);
+}
+
+/**
+ * hfi1_seq_incr_wrap - wrapping increment for sequence
+ * @seq: the current sequence number
+ *
+ * Returns: the incremented seq
+ */
+static inline u8 hfi1_seq_incr_wrap(u8 seq)
+{
+	if (++seq > RHF_MAX_SEQ)
+		seq = 1;
+	return seq;
+}
+
+/**
+ * hfi1_seq_cnt - return seq_cnt member
+ * @rcd: the receive context
+ *
+ * Return seq_cnt member
+ */
+static inline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd)
+{
+	return rcd->seq_cnt;
+}
+
+/**
+ * hfi1_set_seq_cnt - return seq_cnt member
+ * @rcd: the receive context
+ *
+ * Return seq_cnt member
+ */
+static inline void hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt)
+{
+	rcd->seq_cnt = cnt;
+}
+
+/**
+ * last_rcv_seq - is last
+ * @rcd: the receive context
+ * @seq: sequence
+ *
+ * return true if last packet
+ */
+static inline bool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq)
+{
+	return seq != rcd->seq_cnt;
+}
+
+/**
+ * rcd_seq_incr - increment context sequence number
+ * @rcd: the receive context
+ * @seq: the current sequence number
+ *
+ * Returns: true if the this was the last packet
+ */
+static inline bool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq)
+{
+	rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt);
+	return last_rcv_seq(rcd, seq);
+}
+
+/**
+ * get_hdrqentsize - return hdrq entry size
+ * @rcd: the receive context
+ */
+static inline u8 get_hdrqentsize(struct hfi1_ctxtdata *rcd)
+{
+	return rcd->rcvhdrqentsize;
+}
+
+/**
+ * get_hdrq_cnt - return hdrq count
+ * @rcd: the receive context
+ */
+static inline u16 get_hdrq_cnt(struct hfi1_ctxtdata *rcd)
+{
+	return rcd->rcvhdrq_cnt;
+}
+
+/**
+ * hfi1_is_slowpath - check if this context is slow path
+ * @rcd: the receive context
+ */
+static inline bool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd)
+{
+	return rcd->do_interrupt == rcd->slow_handler;
+}
+
+/**
+ * hfi1_is_fastpath - check if this context is fast path
+ * @rcd: the receive context
+ */
+static inline bool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd)
+{
+	if (rcd->ctxt == HFI1_CTRL_CTXT)
+		return false;
+
+	return rcd->do_interrupt == rcd->fast_handler;
+}
+
+/**
+ * hfi1_set_fast - change to the fast handler
+ * @rcd: the receive context
+ */
+static inline void hfi1_set_fast(struct hfi1_ctxtdata *rcd)
+{
+	if (unlikely(!rcd))
+		return;
+	if (unlikely(!hfi1_is_fastpath(rcd)))
+		rcd->do_interrupt = rcd->fast_handler;
+}
+
 int hfi1_reset_device(int);
 
 void receive_interrupt_work(struct work_struct *work);
@@ -2015,9 +2159,21 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
 			     size_t npages, bool dirty);
 
+/**
+ * hfi1_rcvhdrtail_kvaddr - return tail kvaddr
+ * @rcd - the receive context
+ */
+static inline __le64 *hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd)
+{
+	return (__le64 *)rcd->rcvhdrtail_kvaddr;
+}
+
 static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
 {
-	*((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL;
+	u64 *kv = (u64 *)hfi1_rcvhdrtail_kvaddr(rcd);
+
+	if (kv)
+		*kv = 0ULL;
 }
 
 static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
@@ -2026,7 +2182,17 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
 	 * volatile because it's a DMA target from the chip, routine is
 	 * inlined, and don't want register caching or reordering.
 	 */
-	return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
+	return (u32)le64_to_cpu(*hfi1_rcvhdrtail_kvaddr(rcd));
+}
+
+static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd)
+{
+	if (likely(!rcd->rcvhdrtail_kvaddr)) {
+		u32 seq = rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)));
+
+		return !last_rcv_seq(rcd, seq);
+	}
+	return hfi1_rcd_head(rcd) != get_rcvhdrtail(rcd);
 }
 
 /*
@@ -2298,6 +2464,25 @@ static inline bool is_integrated(struct hfi1_devdata *dd)
 	return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
 }
 
+/**
+ * hfi1_need_drop - detect need for drop
+ * @dd: - the device
+ *
+ * In some cases, the first packet needs to be dropped.
+ *
+ * Return true is the current packet needs to be dropped and false otherwise.
+ */
+static inline bool hfi1_need_drop(struct hfi1_devdata *dd)
+{
+	if (unlikely(dd->do_drop &&
+		     atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
+		     DROP_PACKET_ON)) {
+		dd->do_drop = false;
+		return true;
+	}
+	return false;
+}
+
 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
 
 #define DD_DEV_ENTRY(dd)       __string(dev, dev_name(&(dd)->pcidev->dev))
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 26b792b..e3acda7 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -78,8 +78,6 @@
  */
 #define HFI1_MIN_USER_CTXT_BUFCNT 7
 
-#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
-#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
 
@@ -122,8 +120,6 @@ unsigned int user_credit_return_threshold = 33;	/* default is 33% */
 module_param(user_credit_return_threshold, uint, S_IRUGO);
 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
 
-static inline u64 encode_rcv_header_entry_size(u16 size);
-
 DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
 
 static int hfi1_create_kctxt(struct hfi1_devdata *dd,
@@ -154,7 +150,12 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
 	/* Control context must use DMA_RTAIL */
 	if (rcd->ctxt == HFI1_CTRL_CTXT)
 		rcd->flags |= HFI1_CAP_DMA_RTAIL;
-	rcd->seq_cnt = 1;
+	rcd->fast_handler = get_dma_rtail_setting(rcd) ?
+				handle_receive_interrupt_dma_rtail :
+				handle_receive_interrupt_nodma_rtail;
+	rcd->slow_handler = handle_receive_interrupt;
+
+	hfi1_set_seq_cnt(rcd, 1);
 
 	rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
 	if (!rcd->sc) {
@@ -511,23 +512,6 @@ void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
 }
 
 /*
- * Convert a receive header entry size that to the encoding used in the CSR.
- *
- * Return a zero if the given size is invalid.
- */
-static inline u64 encode_rcv_header_entry_size(u16 size)
-{
-	/* there are only 3 valid receive header entry sizes */
-	if (size == 2)
-		return 1;
-	if (size == 16)
-		return 2;
-	else if (size == 32)
-		return 4;
-	return 0; /* invalid */
-}
-
-/*
  * Select the largest ccti value over all SLs to determine the intra-
  * packet gap for the link.
  *
@@ -892,10 +876,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
 
 	if (is_ax(dd)) {
 		atomic_set(&dd->drop_packet, DROP_PACKET_ON);
-		dd->do_drop = 1;
+		dd->do_drop = true;
 	} else {
 		atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
-		dd->do_drop = 0;
+		dd->do_drop = false;
 	}
 
 	/* make sure the link is not "up" */
@@ -1149,9 +1133,9 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
 		dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
 				  rcd->rcvhdrq, rcd->rcvhdrq_dma);
 		rcd->rcvhdrq = NULL;
-		if (rcd->rcvhdrtail_kvaddr) {
+		if (hfi1_rcvhdrtail_kvaddr(rcd)) {
 			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-					  (void *)rcd->rcvhdrtail_kvaddr,
+					  (void *)hfi1_rcvhdrtail_kvaddr(rcd),
 					  rcd->rcvhdrqtailaddr_dma);
 			rcd->rcvhdrtail_kvaddr = NULL;
 		}
@@ -1611,29 +1595,6 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
 	hfi1_free_devdata(dd);
 }
 
-static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
-{
-	if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
-		dd_dev_err(dd, "Receive header queue count too small\n");
-		return -EINVAL;
-	}
-
-	if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
-		dd_dev_err(dd,
-			   "Receive header queue count cannot be greater than %u\n",
-			   HFI1_MAX_HDRQ_EGRBUF_CNT);
-		return -EINVAL;
-	}
-
-	if (thecnt % HDRQ_INCREMENT) {
-		dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
-			   thecnt, HDRQ_INCREMENT);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	int ret = 0, j, pidx, initfail;
@@ -1661,7 +1622,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	}
 
 	/* Validate some global module parameters */
-	ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
+	ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt);
 	if (ret)
 		goto bail;
 
@@ -1842,7 +1803,6 @@ static void shutdown_one(struct pci_dev *pdev)
 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
 {
 	unsigned amt;
-	u64 reg;
 
 	if (!rcd->rcvhdrq) {
 		gfp_t gfp_flags;
@@ -1874,30 +1834,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
 				goto bail_free;
 		}
 	}
-	/*
-	 * These values are per-context:
-	 *	RcvHdrCnt
-	 *	RcvHdrEntSize
-	 *	RcvHdrSize
-	 */
-	reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
-			& RCV_HDR_CNT_CNT_MASK)
-		<< RCV_HDR_CNT_CNT_SHIFT;
-	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
-	reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
-			& RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
-		<< RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
-	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
-	reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK)
-		<< RCV_HDR_SIZE_HDR_SIZE_SHIFT;
-	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
 
-	/*
-	 * Program dummy tail address for every receive context
-	 * before enabling any receive context
-	 */
-	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
-			dd->rcvhdrtail_dummy_dma);
+	set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
+		      rcd->rcvhdrq_cnt);
 
 	return 0;
 
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index d920b16..db82db4 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -115,13 +115,11 @@ int msix_initialize(struct hfi1_devdata *dd)
  */
 static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
 			    irq_handler_t handler, irq_handler_t thread,
-			    u32 idx, enum irq_type type)
+			    enum irq_type type, const char *name)
 {
 	unsigned long nr;
 	int irq;
 	int ret;
-	const char *err_info;
-	char name[MAX_NAME_SIZE];
 	struct hfi1_msix_entry *me;
 
 	/* Allocate an MSIx vector */
@@ -135,43 +133,15 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
 	if (nr == dd->msix_info.max_requested)
 		return -ENOSPC;
 
-	/* Specific verification and determine the name */
-	switch (type) {
-	case IRQ_GENERAL:
-		/* general interrupt must be MSIx vector 0 */
-		if (nr) {
-			spin_lock(&dd->msix_info.msix_lock);
-			__clear_bit(nr, dd->msix_info.in_use_msix);
-			spin_unlock(&dd->msix_info.msix_lock);
-			dd_dev_err(dd, "Invalid index %lu for GENERAL IRQ\n",
-				   nr);
-			return -EINVAL;
-		}
-		snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
-		err_info = "general";
-		break;
-	case IRQ_SDMA:
-		snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
-			 dd->unit, idx);
-		err_info = "sdma";
-		break;
-	case IRQ_RCVCTXT:
-		snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
-			 dd->unit, idx);
-		err_info = "receive context";
-		break;
-	case IRQ_OTHER:
-	default:
+	if (type < IRQ_SDMA || type >= IRQ_OTHER)
 		return -EINVAL;
-	}
-	name[sizeof(name) - 1] = 0;
 
 	irq = pci_irq_vector(dd->pcidev, nr);
 	ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
 	if (ret) {
 		dd_dev_err(dd,
-			   "%s: request for IRQ %d failed, MSIx %d, err %d\n",
-			   err_info, irq, idx, ret);
+			   "%s: request for IRQ %d failed, MSIx %lu, err %d\n",
+			   name, irq, nr, ret);
 		spin_lock(&dd->msix_info.msix_lock);
 		__clear_bit(nr, dd->msix_info.in_use_msix);
 		spin_unlock(&dd->msix_info.msix_lock);
@@ -195,17 +165,13 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
 	return nr;
 }
 
-/**
- * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
- * @rcd: valid rcd context
- *
- */
-int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
+				       irq_handler_t handler,
+				       irq_handler_t thread,
+				       const char *name)
 {
-	int nr;
-
-	nr = msix_request_irq(rcd->dd, rcd, receive_context_interrupt,
-			      receive_context_thread, rcd->ctxt, IRQ_RCVCTXT);
+	int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
+				  IRQ_RCVCTXT, name);
 	if (nr < 0)
 		return nr;
 
@@ -222,6 +188,22 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
 }
 
 /**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * @rcd: valid rcd context
+ *
+ */
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+	char name[MAX_NAME_SIZE];
+
+	snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
+		 rcd->dd->unit, rcd->ctxt);
+
+	return msix_request_rcd_irq_common(rcd, receive_context_interrupt,
+					   receive_context_thread, name);
+}
+
+/**
  * msix_request_smda_ira() - Helper for getting SDMA IRQ resources
  * @sde: valid sdma engine
  *
@@ -229,9 +211,12 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
 int msix_request_sdma_irq(struct sdma_engine *sde)
 {
 	int nr;
+	char name[MAX_NAME_SIZE];
 
+	snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
+		 sde->dd->unit, sde->this_idx);
 	nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
-			      sde->this_idx, IRQ_SDMA);
+			      IRQ_SDMA, name);
 	if (nr < 0)
 		return nr;
 	sde->msix_intr = nr;
@@ -241,6 +226,32 @@ int msix_request_sdma_irq(struct sdma_engine *sde)
 }
 
 /**
+ * msix_request_general_irq(void) - Helper for getting general IRQ
+ * resources
+ * @dd: valid device data
+ */
+int msix_request_general_irq(struct hfi1_devdata *dd)
+{
+	int nr;
+	char name[MAX_NAME_SIZE];
+
+	snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
+	nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
+			      name);
+	if (nr < 0)
+		return nr;
+
+	/* general interrupt must be MSIx vector 0 */
+	if (nr) {
+		msix_free_irq(dd, (u8)nr);
+		dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
  * enable_sdma_src() - Helper to enable SDMA IRQ srcs
  * @dd: valid devdata structure
  * @i: index of SDMA engine
@@ -265,10 +276,9 @@ static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
 int msix_request_irqs(struct hfi1_devdata *dd)
 {
 	int i;
-	int ret;
+	int ret = msix_request_general_irq(dd);
 
-	ret = msix_request_irq(dd, dd, general_interrupt, NULL, 0, IRQ_GENERAL);
-	if (ret < 0)
+	if (ret)
 		return ret;
 
 	for (i = 0; i < dd->num_sdma; i++) {
diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h
index a514881..1a02ab7 100644
--- a/drivers/infiniband/hw/hfi1/msix.h
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -54,6 +54,7 @@
 int msix_initialize(struct hfi1_devdata *dd);
 int msix_request_irqs(struct hfi1_devdata *dd);
 void msix_clean_up_interrupts(struct hfi1_devdata *dd);
+int msix_request_general_irq(struct hfi1_devdata *dd);
 int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
 int msix_request_sdma_irq(struct sdma_engine *sde);
 void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1a3c647..f1734e5 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2599,7 +2599,7 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
 	 * to be sent before sending this one.
 	 */
 	e = NULL;
-	old_req = 1;
+	old_req = true;
 	ibp->rvp.n_rc_dupreq++;
 
 	spin_lock_irqsave(&qp->s_lock, flags);
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
index e00c8a7..b5fc5c6 100644
--- a/drivers/infiniband/hw/hfi1/trace_ctxts.h
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -80,7 +80,7 @@ TRACE_EVENT(hfi1_uctxtdata,
 			   __entry->credits = uctxt->sc->credits;
 			   __entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
 			   __entry->piobase = uctxt->sc->base_addr;
-			   __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
+			   __entry->rcvhdrq_cnt = get_hdrq_cnt(uctxt);
 			   __entry->rcvhdrq_dma = uctxt->rcvhdrq_dma;
 			   __entry->eager_cnt = uctxt->egrbufs.alloced;
 			   __entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma;
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 3cec960..168079e 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -106,19 +106,8 @@ TRACE_EVENT(hfi1_receive_interrupt,
 			     ),
 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
 			__entry->ctxt = rcd->ctxt;
-			if (rcd->do_interrupt ==
-			    &handle_receive_interrupt) {
-				__entry->slow_path = 1;
-				__entry->dma_rtail = 0xFF;
-			} else if (rcd->do_interrupt ==
-					&handle_receive_interrupt_dma_rtail){
-				__entry->dma_rtail = 1;
-				__entry->slow_path = 0;
-			} else if (rcd->do_interrupt ==
-					&handle_receive_interrupt_nodma_rtail) {
-				__entry->dma_rtail = 0;
-				__entry->slow_path = 0;
-			}
+			__entry->slow_path = hfi1_is_slowpath(rcd);
+			__entry->dma_rtail = get_dma_rtail_setting(rcd);
 			),
 	    TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
 		      __get_str(dev),
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 469acb9..3b50500 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -106,7 +106,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
 	int ret;
 	unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
 
-	ret = get_user_pages_fast(vaddr, npages, gup_flags, pages);
+	ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
 	if (ret < 0)
 		return ret;
 
@@ -118,7 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
 			     size_t npages, bool dirty)
 {
-	put_user_pages_dirty_lock(p, npages, dirty);
+	unpin_user_pages_dirty_lock(p, npages, dirty);
 
 	if (mm) { /* during close after signal, mm can be NULL */
 		atomic64_sub(npages, &mm->pinned_vm);
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index b49e60e..6b14581 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -78,7 +78,7 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
 	if (ret)
 		goto done;
 
-	if (uctxt->rcvhdrtail_kvaddr)
+	if (hfi1_rcvhdrtail_kvaddr(uctxt))
 		clear_rcvhdrtail(uctxt);
 
 	rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index a2d1e53..5ffe4c9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -370,6 +370,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
 	hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
 	hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
 	spin_lock_init(&hr_cq->lock);
+	INIT_LIST_HEAD(&hr_cq->sq_list);
+	INIT_LIST_HEAD(&hr_cq->rq_list);
 
 	if (udata) {
 		ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 5617434..a7c4ff9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -45,8 +45,6 @@
 
 #define HNS_ROCE_MAX_MSG_LEN			0x80000000
 
-#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
-
 #define HNS_ROCE_IB_MIN_SQ_STRIDE		6
 
 #define HNS_ROCE_BA_SIZE			(32 * 4096)
@@ -107,11 +105,6 @@
 #define NODE_DESC_SIZE				64
 #define DB_REG_OFFSET				0x1000
 
-#define SERV_TYPE_RC				0
-#define SERV_TYPE_RD				1
-#define SERV_TYPE_UC				2
-#define SERV_TYPE_UD				3
-
 /* Configure to HW for PAGE_SIZE larger than 4KB */
 #define PG_SHIFT_OFFSET				(PAGE_SHIFT - 12)
 
@@ -131,6 +124,13 @@
 #define EQ_DEPTH_COEFF				2
 
 enum {
+	SERV_TYPE_RC,
+	SERV_TYPE_UC,
+	SERV_TYPE_RD,
+	SERV_TYPE_UD,
+};
+
+enum {
 	HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
 	HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
 };
@@ -423,7 +423,7 @@ struct hns_roce_mr_table {
 struct hns_roce_wq {
 	u64		*wrid;     /* Work request ID */
 	spinlock_t	lock;
-	int		wqe_cnt;  /* WQE num */
+	u32		wqe_cnt;  /* WQE num */
 	int		max_gs;
 	int		offset;
 	int		wqe_shift;	/* WQE size */
@@ -498,6 +498,10 @@ struct hns_roce_cq {
 	u32				vector;
 	atomic_t			refcount;
 	struct completion		free;
+	struct list_head		sq_list; /* all qps on this send cq */
+	struct list_head		rq_list; /* all qps on this recv cq */
+	int				is_armed; /* cq is armed */
+	struct list_head		node; /* all armed cqs are on a list */
 };
 
 struct hns_roce_idx_que {
@@ -647,7 +651,6 @@ struct hns_roce_qp {
 	u8			sdb_en;
 	u32			doorbell_qpn;
 	u32			sq_signal_bits;
-	u32			sq_next_wqe;
 	struct hns_roce_wq	sq;
 
 	struct ib_umem		*umem;
@@ -682,6 +685,9 @@ struct hns_roce_qp {
 	u32			next_sge;
 
 	struct hns_roce_rinl_buf rq_inl_buf;
+	struct list_head	node;		/* all qps are on a list */
+	struct list_head	rq_node;	/* all recv qps are on a list */
+	struct list_head	sq_node;	/* all send qps are on a list */
 };
 
 struct hns_roce_ib_iboe {
@@ -794,10 +800,8 @@ struct hns_roce_caps {
 	int             reserved_qps;
 	int		num_qpc_timer;
 	int		num_cqc_timer;
-	u32		max_srq_sg;
 	int		num_srqs;
 	u32		max_wqes;
-	u32		max_srqs;
 	u32		max_srq_wrs;
 	u32		max_srq_sges;
 	u32		max_sq_desc_sz;
@@ -811,7 +815,6 @@ struct hns_roce_caps {
 	u32		min_wqes;
 	int		reserved_cqs;
 	int		reserved_srqs;
-	u32		max_srqwqes;
 	int		num_aeq_vectors;
 	int		num_comp_vectors;
 	int		num_other_vectors;
@@ -895,6 +898,12 @@ struct hns_roce_caps {
 	u32		tpq_buf_pg_sz;
 	u32		chunk_sz;	/* chunk size in non multihop mode */
 	u64		flags;
+	u16		default_ceq_max_cnt;
+	u16		default_ceq_period;
+	u16		default_aeq_max_cnt;
+	u16		default_aeq_period;
+	u16		default_aeq_arm_st;
+	u16		default_ceq_arm_st;
 };
 
 struct hns_roce_work {
@@ -911,6 +920,12 @@ struct hns_roce_dfx_hw {
 			      int *buffer);
 };
 
+enum hns_roce_device_state {
+	HNS_ROCE_DEVICE_STATE_INITED,
+	HNS_ROCE_DEVICE_STATE_RST_DOWN,
+	HNS_ROCE_DEVICE_STATE_UNINIT,
+};
+
 struct hns_roce_hw {
 	int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
 	int (*cmq_init)(struct hns_roce_dev *hr_dev);
@@ -993,6 +1008,9 @@ struct hns_roce_dev {
 	bool			dis_db;
 	unsigned long		reset_cnt;
 	struct hns_roce_ib_iboe iboe;
+	enum hns_roce_device_state state;
+	struct list_head	qp_list; /* list of all qps on this dev */
+	spinlock_t		qp_list_lock; /* protect qp_list */
 
 	struct list_head        pgdir_list;
 	struct mutex            pgdir_mutex;
@@ -1133,7 +1151,6 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
 
 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
@@ -1257,6 +1274,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
+void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
 int hns_roce_init(struct hns_roce_dev *hr_dev);
 void hns_roce_exit(struct hns_roce_dev *hr_dev);
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 2a2b211..c6e6658 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -74,8 +74,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
 	unsigned long flags = 0;
 	void *wqe = NULL;
 	__le32 doorbell[2];
+	u32 wqe_idx = 0;
 	int nreq = 0;
-	u32 ind = 0;
 	int ret = 0;
 	u8 *smac;
 	int loopback;
@@ -88,7 +88,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
 	}
 
 	spin_lock_irqsave(&qp->sq.lock, flags);
-	ind = qp->sq_next_wqe;
+
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
 		if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
 			ret = -ENOMEM;
@@ -96,6 +96,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
 			goto out;
 		}
 
+		wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
+
 		if (unlikely(wr->num_sge > qp->sq.max_gs)) {
 			dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
 				wr->num_sge, qp->sq.max_gs);
@@ -104,9 +106,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
 			goto out;
 		}
 
-		wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
-		qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
-								      wr->wr_id;
+		wqe = get_send_wqe(qp, wqe_idx);
+		qp->sq.wrid[wqe_idx] = wr->wr_id;
 
 		/* Corresponding to the RC and RD type wqe process separately */
 		if (ibqp->qp_type == IB_QPT_GSI) {
@@ -210,7 +211,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
 				       cpu_to_le32((wr->sg_list[1].addr) >> 32);
 			ud_sq_wqe->l_key1 =
 				       cpu_to_le32(wr->sg_list[1].lkey);
-			ind++;
 		} else if (ibqp->qp_type == IB_QPT_RC) {
 			u32 tmp_len = 0;
 
@@ -308,7 +308,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
 				ctrl->flag |= cpu_to_le32(wr->num_sge <<
 					      HNS_ROCE_WQE_SGE_NUM_BIT);
 			}
-			ind++;
 		}
 	}
 
@@ -336,7 +335,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
 		doorbell[1] = sq_db.u32_8;
 
 		hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
-		qp->sq_next_wqe = ind;
 	}
 
 	spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -348,12 +346,6 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
 				 const struct ib_recv_wr *wr,
 				 const struct ib_recv_wr **bad_wr)
 {
-	int ret = 0;
-	int nreq = 0;
-	int ind = 0;
-	int i = 0;
-	u32 reg_val;
-	unsigned long flags = 0;
 	struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
 	struct hns_roce_wqe_data_seg *scat = NULL;
 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -361,9 +353,14 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
 	struct device *dev = &hr_dev->pdev->dev;
 	struct hns_roce_rq_db rq_db;
 	__le32 doorbell[2] = {0};
+	unsigned long flags = 0;
+	unsigned int wqe_idx;
+	int ret = 0;
+	int nreq = 0;
+	int i = 0;
+	u32 reg_val;
 
 	spin_lock_irqsave(&hr_qp->rq.lock, flags);
-	ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
 		if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
@@ -373,6 +370,8 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
 			goto out;
 		}
 
+		wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+
 		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
 			dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
 				wr->num_sge, hr_qp->rq.max_gs);
@@ -381,7 +380,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
 			goto out;
 		}
 
-		ctrl = get_recv_wqe(hr_qp, ind);
+		ctrl = get_recv_wqe(hr_qp, wqe_idx);
 
 		roce_set_field(ctrl->rwqe_byte_12,
 			       RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
@@ -393,9 +392,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
 		for (i = 0; i < wr->num_sge; i++)
 			set_data_seg(scat + i, wr->sg_list + i);
 
-		hr_qp->rq.wrid[ind] = wr->wr_id;
-
-		ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+		hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
 	}
 
 out:
@@ -2701,7 +2698,6 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 		hr_qp->rq.tail = 0;
 		hr_qp->sq.head = 0;
 		hr_qp->sq.tail = 0;
-		hr_qp->sq_next_wqe = 0;
 	}
 
 	kfree(context);
@@ -3315,7 +3311,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 		hr_qp->rq.tail = 0;
 		hr_qp->sq.head = 0;
 		hr_qp->sq.tail = 0;
-		hr_qp->sq_next_wqe = 0;
 	}
 out:
 	kfree(context);
@@ -3614,14 +3609,18 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
 	if (ret)
 		return ret;
 
-	send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
-	recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+	send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
+	recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
 
 	hns_roce_lock_cqs(send_cq, recv_cq);
 	if (!udata) {
-		__hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
-				       to_hr_srq(hr_qp->ibqp.srq) : NULL);
-		if (send_cq != recv_cq)
+		if (recv_cq)
+			__hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
+					       (hr_qp->ibqp.srq ?
+						to_hr_srq(hr_qp->ibqp.srq) :
+						NULL));
+
+		if (send_cq && send_cq != recv_cq)
 			__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
 	}
 	hns_roce_unlock_cqs(send_cq, recv_cq);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index cb8071a..12c4cd8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -63,20 +63,15 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 	struct hns_roce_mr *mr = to_hr_mr(wr->mr);
 
 	/* use ib_access_flags */
-	roce_set_bit(rc_sq_wqe->byte_4,
-		     V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
+	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
 		     wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
-	roce_set_bit(rc_sq_wqe->byte_4,
-		     V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
+	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
 		     wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
-	roce_set_bit(rc_sq_wqe->byte_4,
-		     V2_RC_FRMR_WQE_BYTE_4_RR_S,
+	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
 		     wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
-	roce_set_bit(rc_sq_wqe->byte_4,
-		     V2_RC_FRMR_WQE_BYTE_4_RW_S,
+	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
 		     wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
-	roce_set_bit(rc_sq_wqe->byte_4,
-		     V2_RC_FRMR_WQE_BYTE_4_LW_S,
+	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
 		     wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
 
 	/* Data structure reuse may lead to confusion */
@@ -110,7 +105,7 @@ static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
 }
 
 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
-			   unsigned int *sge_ind)
+			   unsigned int *sge_ind, int valid_num_sge)
 {
 	struct hns_roce_v2_wqe_data_seg *dseg;
 	struct ib_sge *sg;
@@ -123,7 +118,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
 
 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
 		num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
-	extend_sge_num = wr->num_sge - num_in_wqe;
+	extend_sge_num = valid_num_sge - num_in_wqe;
 	sg = wr->sg_list + num_in_wqe;
 	shift = qp->hr_buf.page_shift;
 
@@ -159,14 +154,16 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 			     struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 			     void *wqe, unsigned int *sge_ind,
+			     int valid_num_sge,
 			     const struct ib_send_wr **bad_wr)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 	struct hns_roce_v2_wqe_data_seg *dseg = wqe;
 	struct hns_roce_qp *qp = to_hr_qp(ibqp);
+	int j = 0;
 	int i;
 
-	if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+	if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
 		if (le32_to_cpu(rc_sq_wqe->msg_len) >
 		    hr_dev->caps.max_sq_inline) {
 			*bad_wr = wr;
@@ -190,7 +187,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 		roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
 			     1);
 	} else {
-		if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
+		if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
 			for (i = 0; i < wr->num_sge; i++) {
 				if (likely(wr->sg_list[i].length)) {
 					set_data_seg_v2(dseg, wr->sg_list + i);
@@ -203,19 +200,21 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 				     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
 				     (*sge_ind) & (qp->sge.sge_cnt - 1));
 
-			for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
+			for (i = 0; i < wr->num_sge &&
+			     j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
 				if (likely(wr->sg_list[i].length)) {
 					set_data_seg_v2(dseg, wr->sg_list + i);
 					dseg++;
+					j++;
 				}
 			}
 
-			set_extend_sge(qp, wr, sge_ind);
+			set_extend_sge(qp, wr, sge_ind, valid_num_sge);
 		}
 
 		roce_set_field(rc_sq_wqe->byte_16,
 			       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
-			       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
+			       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
 	}
 
 	return 0;
@@ -226,6 +225,30 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
 				 int attr_mask, enum ib_qp_state cur_state,
 				 enum ib_qp_state new_state);
 
+static int check_send_valid(struct hns_roce_dev *hr_dev,
+			    struct hns_roce_qp *hr_qp)
+{
+	struct ib_qp *ibqp = &hr_qp->ibqp;
+	struct device *dev = hr_dev->dev;
+
+	if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+		     ibqp->qp_type != IB_QPT_GSI &&
+		     ibqp->qp_type != IB_QPT_UD)) {
+		dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
+		return -EOPNOTSUPP;
+	} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
+		   hr_qp->state == IB_QPS_INIT ||
+		   hr_qp->state == IB_QPS_RTR)) {
+		dev_err(dev, "Post WQE fail, QP state %d!\n", hr_qp->state);
+		return -EINVAL;
+	} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
+		dev_err(dev, "Post WQE fail, dev state %d!\n", hr_dev->state);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 				 const struct ib_send_wr *wr,
 				 const struct ib_send_wr **bad_wr)
@@ -239,38 +262,31 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 	struct device *dev = hr_dev->dev;
 	struct hns_roce_v2_db sq_db;
 	struct ib_qp_attr attr;
-	unsigned int sge_ind;
 	unsigned int owner_bit;
+	unsigned int sge_idx;
+	unsigned int wqe_idx;
 	unsigned long flags;
-	unsigned int ind;
+	int valid_num_sge;
 	void *wqe = NULL;
 	bool loopback;
 	int attr_mask;
 	u32 tmp_len;
-	int ret = 0;
 	u32 hr_op;
 	u8 *smac;
 	int nreq;
+	int ret;
 	int i;
 
-	if (unlikely(ibqp->qp_type != IB_QPT_RC &&
-		     ibqp->qp_type != IB_QPT_GSI &&
-		     ibqp->qp_type != IB_QPT_UD)) {
-		dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
-		*bad_wr = wr;
-		return -EOPNOTSUPP;
-	}
-
-	if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
-		     qp->state == IB_QPS_RTR)) {
-		dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
-		*bad_wr = wr;
-		return -EINVAL;
-	}
-
 	spin_lock_irqsave(&qp->sq.lock, flags);
-	ind = qp->sq_next_wqe;
-	sge_ind = qp->next_sge;
+
+	ret = check_send_valid(hr_dev, qp);
+	if (ret) {
+		*bad_wr = wr;
+		nreq = 0;
+		goto out;
+	}
+
+	sge_idx = qp->next_sge;
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
 		if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
@@ -279,6 +295,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 			goto out;
 		}
 
+		wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
+
 		if (unlikely(wr->num_sge > qp->sq.max_gs)) {
 			dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
 				wr->num_sge, qp->sq.max_gs);
@@ -287,14 +305,20 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 			goto out;
 		}
 
-		wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
-		qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
-								      wr->wr_id;
-
+		wqe = get_send_wqe(qp, wqe_idx);
+		qp->sq.wrid[wqe_idx] = wr->wr_id;
 		owner_bit =
 		       ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+		valid_num_sge = 0;
 		tmp_len = 0;
 
+		for (i = 0; i < wr->num_sge; i++) {
+			if (likely(wr->sg_list[i].length)) {
+				tmp_len += wr->sg_list[i].length;
+				valid_num_sge++;
+			}
+		}
+
 		/* Corresponding to the QP type, wqe process separately */
 		if (ibqp->qp_type == IB_QPT_GSI) {
 			ud_sq_wqe = wqe;
@@ -330,9 +354,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 				       V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
 				       HNS_ROCE_V2_WQE_OP_SEND);
 
-			for (i = 0; i < wr->num_sge; i++)
-				tmp_len += wr->sg_list[i].length;
-
 			ud_sq_wqe->msg_len =
 			 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
 
@@ -368,12 +389,12 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 			roce_set_field(ud_sq_wqe->byte_16,
 				       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
 				       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
-				       wr->num_sge);
+				       valid_num_sge);
 
 			roce_set_field(ud_sq_wqe->byte_20,
 				     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
 				     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
-				     sge_ind & (qp->sge.sge_cnt - 1));
+				     sge_idx & (qp->sge.sge_cnt - 1));
 
 			roce_set_field(ud_sq_wqe->byte_24,
 				       V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
@@ -423,13 +444,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 			memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
 			       GID_LEN_V2);
 
-			set_extend_sge(qp, wr, &sge_ind);
-			ind++;
+			set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
 		} else if (ibqp->qp_type == IB_QPT_RC) {
 			rc_sq_wqe = wqe;
 			memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
-			for (i = 0; i < wr->num_sge; i++)
-				tmp_len += wr->sg_list[i].length;
 
 			rc_sq_wqe->msg_len =
 			 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
@@ -550,15 +568,14 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 				roce_set_field(rc_sq_wqe->byte_16,
 					       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
 					       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
-					       wr->num_sge);
+					       valid_num_sge);
 			} else if (wr->opcode != IB_WR_REG_MR) {
 				ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
-							wqe, &sge_ind, bad_wr);
+							wqe, &sge_idx,
+							valid_num_sge, bad_wr);
 				if (ret)
 					goto out;
 			}
-
-			ind++;
 		} else {
 			dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
 			spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -588,8 +605,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 
 		hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
 
-		qp->sq_next_wqe = ind;
-		qp->next_sge = sge_ind;
+		qp->next_sge = sge_idx;
 
 		if (qp->state == IB_QPS_ERR) {
 			attr_mask = IB_QP_STATE;
@@ -610,6 +626,17 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 	return ret;
 }
 
+static int check_recv_valid(struct hns_roce_dev *hr_dev,
+			    struct hns_roce_qp *hr_qp)
+{
+	if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
+		return -EIO;
+	else if (hr_qp->state == IB_QPS_RESET)
+		return -EINVAL;
+
+	return 0;
+}
+
 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 				 const struct ib_recv_wr *wr,
 				 const struct ib_recv_wr **bad_wr)
@@ -623,18 +650,18 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 	unsigned long flags;
 	void *wqe = NULL;
 	int attr_mask;
-	int ret = 0;
+	u32 wqe_idx;
 	int nreq;
-	int ind;
+	int ret;
 	int i;
 
 	spin_lock_irqsave(&hr_qp->rq.lock, flags);
-	ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
 
-	if (hr_qp->state == IB_QPS_RESET) {
-		spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+	ret = check_recv_valid(hr_dev, hr_qp);
+	if (ret) {
 		*bad_wr = wr;
-		return -EINVAL;
+		nreq = 0;
+		goto out;
 	}
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
@@ -645,6 +672,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 			goto out;
 		}
 
+		wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+
 		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
 			dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
 				wr->num_sge, hr_qp->rq.max_gs);
@@ -653,7 +682,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 			goto out;
 		}
 
-		wqe = get_recv_wqe(hr_qp, ind);
+		wqe = get_recv_wqe(hr_qp, wqe_idx);
 		dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
 		for (i = 0; i < wr->num_sge; i++) {
 			if (!wr->sg_list[i].length)
@@ -669,8 +698,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 
 		/* rq support inline data */
 		if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
-			sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
-			hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
+			sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
+			hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
 							       (u32)wr->num_sge;
 			for (i = 0; i < wr->num_sge; i++) {
 				sge_list[i].addr =
@@ -679,9 +708,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 			}
 		}
 
-		hr_qp->rq.wrid[ind] = wr->wr_id;
-
-		ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+		hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
 	}
 
 out:
@@ -1254,7 +1281,6 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
 	}
 
 out:
-	dev_err(hr_dev->dev, "Func clear fail.\n");
 	hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
 }
 
@@ -1378,8 +1404,7 @@ static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
 	return 0;
 }
 
-static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
-						  int vf_id)
+static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
 {
 	struct hns_roce_cmq_desc desc;
 	struct hns_roce_vf_switch *swt;
@@ -1388,13 +1413,12 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
 	swt = (struct hns_roce_vf_switch *)desc.data;
 	hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
 	swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
-	roce_set_field(swt->fun_id,
-			VF_SWITCH_DATA_FUN_ID_VF_ID_M,
-			VF_SWITCH_DATA_FUN_ID_VF_ID_S,
-			vf_id);
+	roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
+		       VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
 	if (ret)
 		return ret;
+
 	desc.flag =
 		cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
 	desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
@@ -1574,6 +1598,373 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
 	return hns_roce_cmq_send(hr_dev, &desc, 1);
 }
 
+static void set_default_caps(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_caps *caps = &hr_dev->caps;
+
+	caps->num_qps		= HNS_ROCE_V2_MAX_QP_NUM;
+	caps->max_wqes		= HNS_ROCE_V2_MAX_WQE_NUM;
+	caps->num_cqs		= HNS_ROCE_V2_MAX_CQ_NUM;
+	caps->num_srqs		= HNS_ROCE_V2_MAX_SRQ_NUM;
+	caps->min_cqes		= HNS_ROCE_MIN_CQE_NUM;
+	caps->max_cqes		= HNS_ROCE_V2_MAX_CQE_NUM;
+	caps->max_sq_sg		= HNS_ROCE_V2_MAX_SQ_SGE_NUM;
+	caps->max_extend_sg	= HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
+	caps->max_rq_sg		= HNS_ROCE_V2_MAX_RQ_SGE_NUM;
+	caps->max_sq_inline	= HNS_ROCE_V2_MAX_SQ_INLINE;
+	caps->num_uars		= HNS_ROCE_V2_UAR_NUM;
+	caps->phy_num_uars	= HNS_ROCE_V2_PHY_UAR_NUM;
+	caps->num_aeq_vectors	= HNS_ROCE_V2_AEQE_VEC_NUM;
+	caps->num_comp_vectors	= HNS_ROCE_V2_COMP_VEC_NUM;
+	caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
+	caps->num_mtpts		= HNS_ROCE_V2_MAX_MTPT_NUM;
+	caps->num_mtt_segs	= HNS_ROCE_V2_MAX_MTT_SEGS;
+	caps->num_cqe_segs	= HNS_ROCE_V2_MAX_CQE_SEGS;
+	caps->num_srqwqe_segs	= HNS_ROCE_V2_MAX_SRQWQE_SEGS;
+	caps->num_idx_segs	= HNS_ROCE_V2_MAX_IDX_SEGS;
+	caps->num_pds		= HNS_ROCE_V2_MAX_PD_NUM;
+	caps->max_qp_init_rdma	= HNS_ROCE_V2_MAX_QP_INIT_RDMA;
+	caps->max_qp_dest_rdma	= HNS_ROCE_V2_MAX_QP_DEST_RDMA;
+	caps->max_sq_desc_sz	= HNS_ROCE_V2_MAX_SQ_DESC_SZ;
+	caps->max_rq_desc_sz	= HNS_ROCE_V2_MAX_RQ_DESC_SZ;
+	caps->max_srq_desc_sz	= HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
+	caps->qpc_entry_sz	= HNS_ROCE_V2_QPC_ENTRY_SZ;
+	caps->irrl_entry_sz	= HNS_ROCE_V2_IRRL_ENTRY_SZ;
+	caps->trrl_entry_sz	= HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
+	caps->cqc_entry_sz	= HNS_ROCE_V2_CQC_ENTRY_SZ;
+	caps->srqc_entry_sz	= HNS_ROCE_V2_SRQC_ENTRY_SZ;
+	caps->mtpt_entry_sz	= HNS_ROCE_V2_MTPT_ENTRY_SZ;
+	caps->mtt_entry_sz	= HNS_ROCE_V2_MTT_ENTRY_SZ;
+	caps->idx_entry_sz	= HNS_ROCE_V2_IDX_ENTRY_SZ;
+	caps->cq_entry_sz	= HNS_ROCE_V2_CQE_ENTRY_SIZE;
+	caps->page_size_cap	= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
+	caps->reserved_lkey	= 0;
+	caps->reserved_pds	= 0;
+	caps->reserved_mrws	= 1;
+	caps->reserved_uars	= 0;
+	caps->reserved_cqs	= 0;
+	caps->reserved_srqs	= 0;
+	caps->reserved_qps	= HNS_ROCE_V2_RSV_QPS;
+
+	caps->qpc_ba_pg_sz	= 0;
+	caps->qpc_buf_pg_sz	= 0;
+	caps->qpc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
+	caps->srqc_ba_pg_sz	= 0;
+	caps->srqc_buf_pg_sz	= 0;
+	caps->srqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
+	caps->cqc_ba_pg_sz	= 0;
+	caps->cqc_buf_pg_sz	= 0;
+	caps->cqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
+	caps->mpt_ba_pg_sz	= 0;
+	caps->mpt_buf_pg_sz	= 0;
+	caps->mpt_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
+	caps->mtt_ba_pg_sz	= 0;
+	caps->mtt_buf_pg_sz	= 0;
+	caps->mtt_hop_num	= HNS_ROCE_MTT_HOP_NUM;
+	caps->wqe_sq_hop_num	= HNS_ROCE_SQWQE_HOP_NUM;
+	caps->wqe_sge_hop_num	= HNS_ROCE_EXT_SGE_HOP_NUM;
+	caps->wqe_rq_hop_num	= HNS_ROCE_RQWQE_HOP_NUM;
+	caps->cqe_ba_pg_sz	= HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
+	caps->cqe_buf_pg_sz	= 0;
+	caps->cqe_hop_num	= HNS_ROCE_CQE_HOP_NUM;
+	caps->srqwqe_ba_pg_sz	= 0;
+	caps->srqwqe_buf_pg_sz	= 0;
+	caps->srqwqe_hop_num	= HNS_ROCE_SRQWQE_HOP_NUM;
+	caps->idx_ba_pg_sz	= 0;
+	caps->idx_buf_pg_sz	= 0;
+	caps->idx_hop_num	= HNS_ROCE_IDX_HOP_NUM;
+	caps->chunk_sz		= HNS_ROCE_V2_TABLE_CHUNK_SIZE;
+
+	caps->flags		= HNS_ROCE_CAP_FLAG_REREG_MR |
+				  HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+				  HNS_ROCE_CAP_FLAG_RQ_INLINE |
+				  HNS_ROCE_CAP_FLAG_RECORD_DB |
+				  HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+
+	caps->pkey_table_len[0] = 1;
+	caps->gid_table_len[0]	= HNS_ROCE_V2_GID_INDEX_NUM;
+	caps->ceqe_depth	= HNS_ROCE_V2_COMP_EQE_NUM;
+	caps->aeqe_depth	= HNS_ROCE_V2_ASYNC_EQE_NUM;
+	caps->local_ca_ack_delay = 0;
+	caps->max_mtu = IB_MTU_4096;
+
+	caps->max_srq_wrs	= HNS_ROCE_V2_MAX_SRQ_WR;
+	caps->max_srq_sges	= HNS_ROCE_V2_MAX_SRQ_SGE;
+
+	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
+		caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
+			       HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
+			       HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+
+		caps->num_qpc_timer	  = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
+		caps->qpc_timer_entry_sz  = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
+		caps->qpc_timer_ba_pg_sz  = 0;
+		caps->qpc_timer_buf_pg_sz = 0;
+		caps->qpc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
+		caps->num_cqc_timer	  = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
+		caps->cqc_timer_entry_sz  = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
+		caps->cqc_timer_ba_pg_sz  = 0;
+		caps->cqc_timer_buf_pg_sz = 0;
+		caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
+
+		caps->sccc_entry_sz	  = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+		caps->sccc_ba_pg_sz	  = 0;
+		caps->sccc_buf_pg_sz	  = 0;
+		caps->sccc_hop_num	  = HNS_ROCE_SCCC_HOP_NUM;
+	}
+}
+
+static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
+		       int *buf_page_size, int *bt_page_size, u32 hem_type)
+{
+	u64 obj_per_chunk;
+	int bt_chunk_size = 1 << PAGE_SHIFT;
+	int buf_chunk_size = 1 << PAGE_SHIFT;
+	int obj_per_chunk_default = buf_chunk_size / obj_size;
+
+	*buf_page_size = 0;
+	*bt_page_size = 0;
+
+	switch (hop_num) {
+	case 3:
+		obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+				(bt_chunk_size / BA_BYTE_LEN) *
+				(bt_chunk_size / BA_BYTE_LEN) *
+				 obj_per_chunk_default;
+		break;
+	case 2:
+		obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+				(bt_chunk_size / BA_BYTE_LEN) *
+				 obj_per_chunk_default;
+		break;
+	case 1:
+		obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+				obj_per_chunk_default;
+		break;
+	case HNS_ROCE_HOP_NUM_0:
+		obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
+		break;
+	default:
+		pr_err("Table %d not support hop_num = %d!\n", hem_type,
+			hop_num);
+		return;
+	}
+
+	if (hem_type >= HEM_TYPE_MTT)
+		*bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
+	else
+		*buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
+}
+
+static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
+	struct hns_roce_caps *caps = &hr_dev->caps;
+	struct hns_roce_query_pf_caps_a *resp_a;
+	struct hns_roce_query_pf_caps_b *resp_b;
+	struct hns_roce_query_pf_caps_c *resp_c;
+	struct hns_roce_query_pf_caps_d *resp_d;
+	struct hns_roce_query_pf_caps_e *resp_e;
+	int ctx_hop_num;
+	int pbl_hop_num;
+	int ret;
+	int i;
+
+	for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
+		hns_roce_cmq_setup_basic_desc(&desc[i],
+					      HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
+					      true);
+		if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
+			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+		else
+			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+	}
+
+	ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
+	if (ret)
+		return ret;
+
+	resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
+	resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
+	resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
+	resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
+	resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
+
+	caps->local_ca_ack_delay     = resp_a->local_ca_ack_delay;
+	caps->max_sq_sg		     = le16_to_cpu(resp_a->max_sq_sg);
+	caps->max_sq_inline	     = le16_to_cpu(resp_a->max_sq_inline);
+	caps->max_rq_sg		     = le16_to_cpu(resp_a->max_rq_sg);
+	caps->max_extend_sg	     = le32_to_cpu(resp_a->max_extend_sg);
+	caps->num_qpc_timer	     = le16_to_cpu(resp_a->num_qpc_timer);
+	caps->num_cqc_timer	     = le16_to_cpu(resp_a->num_cqc_timer);
+	caps->max_srq_sges	     = le16_to_cpu(resp_a->max_srq_sges);
+	caps->num_aeq_vectors	     = resp_a->num_aeq_vectors;
+	caps->num_other_vectors	     = resp_a->num_other_vectors;
+	caps->max_sq_desc_sz	     = resp_a->max_sq_desc_sz;
+	caps->max_rq_desc_sz	     = resp_a->max_rq_desc_sz;
+	caps->max_srq_desc_sz	     = resp_a->max_srq_desc_sz;
+	caps->cq_entry_sz	     = resp_a->cq_entry_sz;
+
+	caps->mtpt_entry_sz	     = resp_b->mtpt_entry_sz;
+	caps->irrl_entry_sz	     = resp_b->irrl_entry_sz;
+	caps->trrl_entry_sz	     = resp_b->trrl_entry_sz;
+	caps->cqc_entry_sz	     = resp_b->cqc_entry_sz;
+	caps->srqc_entry_sz	     = resp_b->srqc_entry_sz;
+	caps->idx_entry_sz	     = resp_b->idx_entry_sz;
+	caps->sccc_entry_sz	     = resp_b->scc_ctx_entry_sz;
+	caps->max_mtu		     = resp_b->max_mtu;
+	caps->qpc_entry_sz	     = le16_to_cpu(resp_b->qpc_entry_sz);
+	caps->min_cqes		     = resp_b->min_cqes;
+	caps->min_wqes		     = resp_b->min_wqes;
+	caps->page_size_cap	     = le32_to_cpu(resp_b->page_size_cap);
+	caps->pkey_table_len[0]	     = resp_b->pkey_table_len;
+	caps->phy_num_uars	     = resp_b->phy_num_uars;
+	ctx_hop_num		     = resp_b->ctx_hop_num;
+	pbl_hop_num		     = resp_b->pbl_hop_num;
+
+	caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
+					    V2_QUERY_PF_CAPS_C_NUM_PDS_M,
+					    V2_QUERY_PF_CAPS_C_NUM_PDS_S);
+	caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
+				     V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
+				     V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
+	caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
+					    V2_QUERY_PF_CAPS_C_NUM_CQS_M,
+					    V2_QUERY_PF_CAPS_C_NUM_CQS_S);
+	caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
+						V2_QUERY_PF_CAPS_C_MAX_GID_M,
+						V2_QUERY_PF_CAPS_C_MAX_GID_S);
+	caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
+					     V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
+					     V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
+	caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
+					      V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
+					      V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
+	caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
+					    V2_QUERY_PF_CAPS_C_NUM_QPS_M,
+					    V2_QUERY_PF_CAPS_C_NUM_QPS_S);
+	caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
+						V2_QUERY_PF_CAPS_C_MAX_ORD_M,
+						V2_QUERY_PF_CAPS_C_MAX_ORD_S);
+	caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
+	caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
+	caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
+					     V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
+					     V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
+	caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
+	caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
+					       V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
+					       V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
+	caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
+						V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
+						V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
+	caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
+					       V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
+					       V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
+	caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
+					    V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
+					    V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
+	caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
+					    V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
+					    V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
+	caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
+					    V2_QUERY_PF_CAPS_D_RSV_PDS_M,
+					    V2_QUERY_PF_CAPS_D_RSV_PDS_S);
+	caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
+					     V2_QUERY_PF_CAPS_D_NUM_UARS_M,
+					     V2_QUERY_PF_CAPS_D_NUM_UARS_S);
+	caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
+					    V2_QUERY_PF_CAPS_D_RSV_QPS_M,
+					    V2_QUERY_PF_CAPS_D_RSV_QPS_S);
+	caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
+					     V2_QUERY_PF_CAPS_D_RSV_UARS_M,
+					     V2_QUERY_PF_CAPS_D_RSV_UARS_S);
+	caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
+					     V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
+					     V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
+	caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
+					 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
+					 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
+	caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
+					    V2_QUERY_PF_CAPS_E_RSV_CQS_M,
+					    V2_QUERY_PF_CAPS_E_RSV_CQS_S);
+	caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
+					     V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
+					     V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
+	caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
+					     V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
+					     V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
+	caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
+	caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
+	caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
+	caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
+
+	caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
+	caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
+	caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
+	caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
+	caps->mtt_ba_pg_sz = 0;
+	caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
+	caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
+	caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
+
+	caps->qpc_hop_num = ctx_hop_num;
+	caps->srqc_hop_num = ctx_hop_num;
+	caps->cqc_hop_num = ctx_hop_num;
+	caps->mpt_hop_num = ctx_hop_num;
+	caps->mtt_hop_num = pbl_hop_num;
+	caps->cqe_hop_num = pbl_hop_num;
+	caps->srqwqe_hop_num = pbl_hop_num;
+	caps->idx_hop_num = pbl_hop_num;
+	caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
+					  V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
+					  V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
+	caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
+					  V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
+					  V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
+	caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
+					  V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
+					  V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
+
+	calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
+		   caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
+		   HEM_TYPE_QPC);
+	calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
+		   caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
+		   HEM_TYPE_MTPT);
+	calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
+		   caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
+		   HEM_TYPE_CQC);
+	calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
+		   caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
+		   &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
+
+	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
+		caps->sccc_hop_num = ctx_hop_num;
+		caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+		caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+
+		calc_pg_sz(caps->num_qps, caps->sccc_entry_sz,
+			   caps->sccc_hop_num, caps->sccc_bt_num,
+			   &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
+			   HEM_TYPE_SCCC);
+		calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
+			   caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
+			   &caps->cqc_timer_buf_pg_sz,
+			   &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
+	}
+
+	calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
+		   1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
+	calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
+		   caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
+		   &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
+	calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
+		   1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
+
+	return 0;
+}
+
 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
 {
 	struct hns_roce_caps *caps = &hr_dev->caps;
@@ -1638,130 +2029,22 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
 	hr_dev->vendor_part_id = hr_dev->pci_dev->device;
 	hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
 
-	caps->num_qps		= HNS_ROCE_V2_MAX_QP_NUM;
-	caps->max_wqes		= HNS_ROCE_V2_MAX_WQE_NUM;
-	caps->num_cqs		= HNS_ROCE_V2_MAX_CQ_NUM;
-	caps->num_srqs		= HNS_ROCE_V2_MAX_SRQ_NUM;
-	caps->min_cqes		= HNS_ROCE_MIN_CQE_NUM;
-	caps->max_cqes		= HNS_ROCE_V2_MAX_CQE_NUM;
-	caps->max_srqwqes	= HNS_ROCE_V2_MAX_SRQWQE_NUM;
-	caps->max_sq_sg		= HNS_ROCE_V2_MAX_SQ_SGE_NUM;
-	caps->max_extend_sg	= HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
-	caps->max_rq_sg		= HNS_ROCE_V2_MAX_RQ_SGE_NUM;
-	caps->max_sq_inline	= HNS_ROCE_V2_MAX_SQ_INLINE;
-	caps->max_srq_sg	= HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
-	caps->num_uars		= HNS_ROCE_V2_UAR_NUM;
-	caps->phy_num_uars	= HNS_ROCE_V2_PHY_UAR_NUM;
-	caps->num_aeq_vectors	= HNS_ROCE_V2_AEQE_VEC_NUM;
-	caps->num_comp_vectors	= HNS_ROCE_V2_COMP_VEC_NUM;
-	caps->num_other_vectors	= HNS_ROCE_V2_ABNORMAL_VEC_NUM;
-	caps->num_mtpts		= HNS_ROCE_V2_MAX_MTPT_NUM;
 	caps->num_mtt_segs	= HNS_ROCE_V2_MAX_MTT_SEGS;
 	caps->num_cqe_segs	= HNS_ROCE_V2_MAX_CQE_SEGS;
 	caps->num_srqwqe_segs	= HNS_ROCE_V2_MAX_SRQWQE_SEGS;
 	caps->num_idx_segs	= HNS_ROCE_V2_MAX_IDX_SEGS;
-	caps->num_pds		= HNS_ROCE_V2_MAX_PD_NUM;
-	caps->max_qp_init_rdma	= HNS_ROCE_V2_MAX_QP_INIT_RDMA;
-	caps->max_qp_dest_rdma	= HNS_ROCE_V2_MAX_QP_DEST_RDMA;
-	caps->max_sq_desc_sz	= HNS_ROCE_V2_MAX_SQ_DESC_SZ;
-	caps->max_rq_desc_sz	= HNS_ROCE_V2_MAX_RQ_DESC_SZ;
-	caps->max_srq_desc_sz	= HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
-	caps->qpc_entry_sz	= HNS_ROCE_V2_QPC_ENTRY_SZ;
-	caps->irrl_entry_sz	= HNS_ROCE_V2_IRRL_ENTRY_SZ;
-	caps->trrl_entry_sz	= HNS_ROCE_V2_TRRL_ENTRY_SZ;
-	caps->cqc_entry_sz	= HNS_ROCE_V2_CQC_ENTRY_SZ;
-	caps->srqc_entry_sz	= HNS_ROCE_V2_SRQC_ENTRY_SZ;
-	caps->mtpt_entry_sz	= HNS_ROCE_V2_MTPT_ENTRY_SZ;
-	caps->mtt_entry_sz	= HNS_ROCE_V2_MTT_ENTRY_SZ;
-	caps->idx_entry_sz	= 4;
-	caps->cq_entry_sz	= HNS_ROCE_V2_CQE_ENTRY_SIZE;
-	caps->page_size_cap	= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
-	caps->reserved_lkey	= 0;
-	caps->reserved_pds	= 0;
-	caps->reserved_mrws	= 1;
-	caps->reserved_uars	= 0;
-	caps->reserved_cqs	= 0;
-	caps->reserved_srqs	= 0;
-	caps->reserved_qps	= HNS_ROCE_V2_RSV_QPS;
 
-	caps->qpc_ba_pg_sz	= 0;
-	caps->qpc_buf_pg_sz	= 0;
-	caps->qpc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
-	caps->srqc_ba_pg_sz	= 0;
-	caps->srqc_buf_pg_sz	= 0;
-	caps->srqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
-	caps->cqc_ba_pg_sz	= 0;
-	caps->cqc_buf_pg_sz	= 0;
-	caps->cqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
-	caps->mpt_ba_pg_sz	= 0;
-	caps->mpt_buf_pg_sz	= 0;
-	caps->mpt_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
-	caps->pbl_ba_pg_sz	= 2;
+	caps->pbl_ba_pg_sz	= HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
 	caps->pbl_buf_pg_sz	= 0;
 	caps->pbl_hop_num	= HNS_ROCE_PBL_HOP_NUM;
-	caps->mtt_ba_pg_sz	= 0;
-	caps->mtt_buf_pg_sz	= 0;
-	caps->mtt_hop_num	= HNS_ROCE_MTT_HOP_NUM;
-	caps->wqe_sq_hop_num	= 2;
-	caps->wqe_sge_hop_num	= 1;
-	caps->wqe_rq_hop_num	= 2;
-	caps->cqe_ba_pg_sz	= 6;
-	caps->cqe_buf_pg_sz	= 0;
-	caps->cqe_hop_num	= HNS_ROCE_CQE_HOP_NUM;
-	caps->srqwqe_ba_pg_sz	= 0;
-	caps->srqwqe_buf_pg_sz	= 0;
-	caps->srqwqe_hop_num	= HNS_ROCE_SRQWQE_HOP_NUM;
-	caps->idx_ba_pg_sz	= 0;
-	caps->idx_buf_pg_sz	= 0;
-	caps->idx_hop_num	= HNS_ROCE_IDX_HOP_NUM;
 	caps->eqe_ba_pg_sz	= 0;
 	caps->eqe_buf_pg_sz	= 0;
 	caps->eqe_hop_num	= HNS_ROCE_EQE_HOP_NUM;
 	caps->tsq_buf_pg_sz	= 0;
-	caps->chunk_sz		= HNS_ROCE_V2_TABLE_CHUNK_SIZE;
 
-	caps->flags		= HNS_ROCE_CAP_FLAG_REREG_MR |
-				  HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
-				  HNS_ROCE_CAP_FLAG_RQ_INLINE |
-				  HNS_ROCE_CAP_FLAG_RECORD_DB |
-				  HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
-
-	if (hr_dev->pci_dev->revision == 0x21)
-		caps->flags |= HNS_ROCE_CAP_FLAG_MW |
-			       HNS_ROCE_CAP_FLAG_FRMR;
-
-	caps->pkey_table_len[0] = 1;
-	caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
-	caps->ceqe_depth	= HNS_ROCE_V2_COMP_EQE_NUM;
-	caps->aeqe_depth	= HNS_ROCE_V2_ASYNC_EQE_NUM;
-	caps->local_ca_ack_delay = 0;
-	caps->max_mtu = IB_MTU_4096;
-
-	caps->max_srqs		= HNS_ROCE_V2_MAX_SRQ;
-	caps->max_srq_wrs	= HNS_ROCE_V2_MAX_SRQ_WR;
-	caps->max_srq_sges	= HNS_ROCE_V2_MAX_SRQ_SGE;
-
-	if (hr_dev->pci_dev->revision == 0x21) {
-		caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
-			       HNS_ROCE_CAP_FLAG_SRQ |
-			       HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
-
-		caps->num_qpc_timer	  = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
-		caps->qpc_timer_entry_sz  = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
-		caps->qpc_timer_ba_pg_sz  = 0;
-		caps->qpc_timer_buf_pg_sz = 0;
-		caps->qpc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
-		caps->num_cqc_timer	  = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
-		caps->cqc_timer_entry_sz  = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
-		caps->cqc_timer_ba_pg_sz  = 0;
-		caps->cqc_timer_buf_pg_sz = 0;
-		caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
-
-		caps->sccc_entry_sz	= HNS_ROCE_V2_SCCC_ENTRY_SZ;
-		caps->sccc_ba_pg_sz	= 0;
-		caps->sccc_buf_pg_sz    = 0;
-		caps->sccc_hop_num	= HNS_ROCE_SCCC_HOP_NUM;
-	}
+	ret = hns_roce_query_pf_caps(hr_dev);
+	if (ret)
+		set_default_caps(hr_dev);
 
 	ret = hns_roce_v2_set_bt(hr_dev);
 	if (ret)
@@ -1818,37 +2101,32 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
 			req_a->base_addr_h =
 				cpu_to_le32(link_tbl->table.map >> 32);
 			roce_set_field(req_a->depth_pgsz_init_en,
-				       CFG_LLM_QUE_DEPTH_M,
-				       CFG_LLM_QUE_DEPTH_S,
+				       CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
 				       link_tbl->npages);
 			roce_set_field(req_a->depth_pgsz_init_en,
-				       CFG_LLM_QUE_PGSZ_M,
-				       CFG_LLM_QUE_PGSZ_S,
+				       CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
 				       link_tbl->pg_sz);
 			req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
 			req_a->head_ba_h_nxtptr =
 				cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
-			roce_set_field(req_a->head_ptr,
-				       CFG_LLM_HEAD_PTR_M,
+			roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
 				       CFG_LLM_HEAD_PTR_S, 0);
 		} else {
 			req_b->tail_ba_l =
 				cpu_to_le32(entry[page_num - 1].blk_ba0);
-			roce_set_field(req_b->tail_ba_h,
-				       CFG_LLM_TAIL_BA_H_M,
+			roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
 				       CFG_LLM_TAIL_BA_H_S,
 				       entry[page_num - 1].blk_ba1_nxt_ptr &
-				       HNS_ROCE_LINK_TABLE_BA1_M);
-			roce_set_field(req_b->tail_ptr,
-				       CFG_LLM_TAIL_PTR_M,
+					       HNS_ROCE_LINK_TABLE_BA1_M);
+			roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
 				       CFG_LLM_TAIL_PTR_S,
 				       (entry[page_num - 2].blk_ba1_nxt_ptr &
-				       HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
-				       HNS_ROCE_LINK_TABLE_NXT_PTR_S);
+					HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
+					       HNS_ROCE_LINK_TABLE_NXT_PTR_S);
 		}
 	}
-	roce_set_field(req_a->depth_pgsz_init_en,
-		       CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
+	roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
+		       CFG_LLM_INIT_EN_S, 1);
 
 	return hns_roce_cmq_send(hr_dev, desc, 2);
 }
@@ -2141,11 +2419,9 @@ static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
 
 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
 
-	roce_set_field(sgid_tb->table_idx_rsv,
-		       CFG_SGID_TB_TABLE_IDX_M,
+	roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
 		       CFG_SGID_TB_TABLE_IDX_S, gid_index);
-	roce_set_field(sgid_tb->vf_sgid_type_rsv,
-		       CFG_SGID_TB_VF_SGID_TYPE_M,
+	roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
 		       CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
 
 	p = (u32 *)&gid->raw[0];
@@ -2416,11 +2692,10 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
 		       V2_MPT_BYTE_4_PD_S, mw->pdn);
-	roce_set_field(mpt_entry->byte_4_pd_hop_st,
-		       V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
 		       V2_MPT_BYTE_4_PBL_HOP_NUM_S,
-		       mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
-		       0 : mw->pbl_hop_num);
+		       mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
+							       mw->pbl_hop_num);
 	roce_set_field(mpt_entry->byte_4_pd_hop_st,
 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
@@ -2561,8 +2836,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
 		       V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
-		       V2_CQC_BYTE_4_SHIFT_S,
-		       ilog2(hr_cq->cq_depth));
+		       V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
 		       V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
 
@@ -2687,6 +2961,55 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
 	return 0;
 }
 
+static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
+		   int num_entries, struct ib_wc *wc)
+{
+	unsigned int left;
+	int npolled = 0;
+
+	left = wq->head - wq->tail;
+	if (left == 0)
+		return 0;
+
+	left = min_t(unsigned int, (unsigned int)num_entries, left);
+	while (npolled < left) {
+		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+		wc->status = IB_WC_WR_FLUSH_ERR;
+		wc->vendor_err = 0;
+		wc->qp = &hr_qp->ibqp;
+
+		wq->tail++;
+		wc++;
+		npolled++;
+	}
+
+	return npolled;
+}
+
+static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
+				  struct ib_wc *wc)
+{
+	struct hns_roce_qp *hr_qp;
+	int npolled = 0;
+
+	list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
+		npolled += sw_comp(hr_qp, &hr_qp->sq,
+				   num_entries - npolled, wc + npolled);
+		if (npolled >= num_entries)
+			goto out;
+	}
+
+	list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
+		npolled += sw_comp(hr_qp, &hr_qp->rq,
+				   num_entries - npolled, wc + npolled);
+		if (npolled >= num_entries)
+			goto out;
+	}
+
+out:
+	return npolled;
+}
+
 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
 				struct hns_roce_qp **cur_qp, struct ib_wc *wc)
 {
@@ -2967,6 +3290,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
 			       struct ib_wc *wc)
 {
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
 	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
 	struct hns_roce_qp *cur_qp = NULL;
 	unsigned long flags;
@@ -2974,6 +3298,18 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
 
 	spin_lock_irqsave(&hr_cq->lock, flags);
 
+	/*
+	 * When the device starts to reset, the state is RST_DOWN. At this time,
+	 * there may still be some valid CQEs in the hardware that are not
+	 * polled. Therefore, it is not allowed to switch to the software mode
+	 * immediately. When the state changes to UNINIT, CQE no longer exists
+	 * in the hardware, and then switch to software mode.
+	 */
+	if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
+		npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
+		goto out;
+	}
+
 	for (npolled = 0; npolled < num_entries; ++npolled) {
 		if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
 			break;
@@ -2985,6 +3321,7 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
 		hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
 	}
 
+out:
 	spin_unlock_irqrestore(&hr_cq->lock, flags);
 
 	return npolled;
@@ -3159,8 +3496,6 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
 }
 
 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
-				 enum ib_qp_state cur_state,
-				 enum ib_qp_state new_state,
 				 struct hns_roce_v2_qp_context *context,
 				 struct hns_roce_qp *hr_qp)
 {
@@ -3210,6 +3545,9 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
 		     !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
+		     !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
 }
 
 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
@@ -3577,6 +3915,12 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
 			     IB_ACCESS_REMOTE_ATOMIC));
 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
 			     0);
+		roce_set_bit(context->byte_76_srqn_op_en,
+			     V2_QPC_BYTE_76_EXT_ATE_S,
+			     !!(attr->qp_access_flags &
+				IB_ACCESS_REMOTE_ATOMIC));
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+			     V2_QPC_BYTE_76_EXT_ATE_S, 0);
 	} else {
 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
@@ -3592,6 +3936,11 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
 			     0);
+		roce_set_bit(context->byte_76_srqn_op_en,
+			     V2_QPC_BYTE_76_EXT_ATE_S,
+			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+			     V2_QPC_BYTE_76_EXT_ATE_S, 0);
 	}
 
 	roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
@@ -3838,13 +4187,11 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
 	/* Configure GID index */
 	port_num = rdma_ah_get_port_num(&attr->ah_attr);
 	roce_set_field(context->byte_20_smac_sgid_idx,
-		       V2_QPC_BYTE_20_SGID_IDX_M,
-		       V2_QPC_BYTE_20_SGID_IDX_S,
+		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
 		       hns_get_gid_index(hr_dev, port_num - 1,
 					 grh->sgid_index));
 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
-		       V2_QPC_BYTE_20_SGID_IDX_M,
-		       V2_QPC_BYTE_20_SGID_IDX_S, 0);
+		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
 	memcpy(&(context->dmac), dmac, sizeof(u32));
 	roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
 		       V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
@@ -4234,8 +4581,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
 
 		roce_set_field(context->byte_212_lsn,
 			       V2_QPC_BYTE_212_RETRY_CNT_M,
-			       V2_QPC_BYTE_212_RETRY_CNT_S,
-			       attr->retry_cnt);
+			       V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
 		roce_set_field(qpc_mask->byte_212_lsn,
 			       V2_QPC_BYTE_212_RETRY_CNT_M,
 			       V2_QPC_BYTE_212_RETRY_CNT_S, 0);
@@ -4443,7 +4789,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
 		       V2_QPC_BYTE_60_QP_ST_S, 0);
 
 	/* SW pass context to HW */
-	ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp);
+	ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
 	if (ret) {
 		dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
 		goto out;
@@ -4464,7 +4810,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
 		hr_qp->rq.tail = 0;
 		hr_qp->sq.head = 0;
 		hr_qp->sq.tail = 0;
-		hr_qp->sq_next_wqe = 0;
 		hr_qp->next_sge = 0;
 		if (hr_qp->rq.wqe_cnt)
 			*hr_qp->rdb.db_record = 0;
@@ -4649,6 +4994,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
 {
 	struct hns_roce_cq *send_cq, *recv_cq;
 	struct ib_device *ibdev = &hr_dev->ib_dev;
+	unsigned long flags;
 	int ret = 0;
 
 	if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
@@ -4659,21 +5005,32 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
 			ibdev_err(ibdev, "modify QP to Reset failed.\n");
 	}
 
-	send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
-	recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+	send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
+	recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
 
+	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
 	hns_roce_lock_cqs(send_cq, recv_cq);
 
+	list_del(&hr_qp->node);
+	list_del(&hr_qp->sq_node);
+	list_del(&hr_qp->rq_node);
+
 	if (!udata) {
-		__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
-				       to_hr_srq(hr_qp->ibqp.srq) : NULL);
-		if (send_cq != recv_cq)
+		if (recv_cq)
+			__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
+					       (hr_qp->ibqp.srq ?
+						to_hr_srq(hr_qp->ibqp.srq) :
+						NULL));
+
+		if (send_cq && send_cq != recv_cq)
 			__hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
+
 	}
 
 	hns_roce_qp_remove(hr_dev, hr_qp);
 
 	hns_roce_unlock_cqs(send_cq, recv_cq);
+	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
 
 	hns_roce_qp_free(hr_dev, hr_qp);
 
@@ -5155,8 +5512,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
 		 */
 		dma_rmb();
 
-		cqn = roce_get_field(ceqe->comp,
-				     HNS_ROCE_V2_CEQE_COMP_CQN_M,
+		cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
 				     HNS_ROCE_V2_CEQE_COMP_CQN_S);
 
 		hns_roce_cq_completion(hr_dev, cqn);
@@ -5409,126 +5765,98 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
 		eq->eqe_ba = eq->l0_dma;
 
 	/* set eqc state */
-	roce_set_field(eqc->byte_4,
-		       HNS_ROCE_EQC_EQ_ST_M,
-		       HNS_ROCE_EQC_EQ_ST_S,
+	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
 		       HNS_ROCE_V2_EQ_STATE_VALID);
 
 	/* set eqe hop num */
-	roce_set_field(eqc->byte_4,
-		       HNS_ROCE_EQC_HOP_NUM_M,
+	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
 		       HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
 
 	/* set eqc over_ignore */
-	roce_set_field(eqc->byte_4,
-		       HNS_ROCE_EQC_OVER_IGNORE_M,
+	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
 		       HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
 
 	/* set eqc coalesce */
-	roce_set_field(eqc->byte_4,
-		       HNS_ROCE_EQC_COALESCE_M,
+	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
 		       HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
 
 	/* set eqc arm_state */
-	roce_set_field(eqc->byte_4,
-		       HNS_ROCE_EQC_ARM_ST_M,
+	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
 		       HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
 
 	/* set eqn */
-	roce_set_field(eqc->byte_4,
-		       HNS_ROCE_EQC_EQN_M,
-		       HNS_ROCE_EQC_EQN_S, eq->eqn);
+	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
+		       eq->eqn);
 
 	/* set eqe_cnt */
-	roce_set_field(eqc->byte_4,
-		       HNS_ROCE_EQC_EQE_CNT_M,
-		       HNS_ROCE_EQC_EQE_CNT_S,
-		       HNS_ROCE_EQ_INIT_EQE_CNT);
+	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
+		       HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
 
 	/* set eqe_ba_pg_sz */
-	roce_set_field(eqc->byte_8,
-		       HNS_ROCE_EQC_BA_PG_SZ_M,
+	roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
 		       HNS_ROCE_EQC_BA_PG_SZ_S,
 		       eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
 
 	/* set eqe_buf_pg_sz */
-	roce_set_field(eqc->byte_8,
-		       HNS_ROCE_EQC_BUF_PG_SZ_M,
+	roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
 		       HNS_ROCE_EQC_BUF_PG_SZ_S,
 		       eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
 
 	/* set eq_producer_idx */
-	roce_set_field(eqc->byte_8,
-		       HNS_ROCE_EQC_PROD_INDX_M,
-		       HNS_ROCE_EQC_PROD_INDX_S,
-		       HNS_ROCE_EQ_INIT_PROD_IDX);
+	roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
+		       HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
 
 	/* set eq_max_cnt */
-	roce_set_field(eqc->byte_12,
-		       HNS_ROCE_EQC_MAX_CNT_M,
+	roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
 		       HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
 
 	/* set eq_period */
-	roce_set_field(eqc->byte_12,
-		       HNS_ROCE_EQC_PERIOD_M,
+	roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
 		       HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
 
 	/* set eqe_report_timer */
-	roce_set_field(eqc->eqe_report_timer,
-		       HNS_ROCE_EQC_REPORT_TIMER_M,
+	roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
 		       HNS_ROCE_EQC_REPORT_TIMER_S,
 		       HNS_ROCE_EQ_INIT_REPORT_TIMER);
 
 	/* set eqe_ba [34:3] */
-	roce_set_field(eqc->eqe_ba0,
-		       HNS_ROCE_EQC_EQE_BA_L_M,
+	roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
 		       HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
 
 	/* set eqe_ba [64:35] */
-	roce_set_field(eqc->eqe_ba1,
-		       HNS_ROCE_EQC_EQE_BA_H_M,
+	roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
 		       HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
 
 	/* set eq shift */
-	roce_set_field(eqc->byte_28,
-		       HNS_ROCE_EQC_SHIFT_M,
-		       HNS_ROCE_EQC_SHIFT_S, eq->shift);
+	roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
+		       eq->shift);
 
 	/* set eq MSI_IDX */
-	roce_set_field(eqc->byte_28,
-		       HNS_ROCE_EQC_MSI_INDX_M,
-		       HNS_ROCE_EQC_MSI_INDX_S,
-		       HNS_ROCE_EQ_INIT_MSI_IDX);
+	roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
+		       HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
 
 	/* set cur_eqe_ba [27:12] */
-	roce_set_field(eqc->byte_28,
-		       HNS_ROCE_EQC_CUR_EQE_BA_L_M,
+	roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
 		       HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
 
 	/* set cur_eqe_ba [59:28] */
-	roce_set_field(eqc->byte_32,
-		       HNS_ROCE_EQC_CUR_EQE_BA_M_M,
+	roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
 		       HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
 
 	/* set cur_eqe_ba [63:60] */
-	roce_set_field(eqc->byte_36,
-		       HNS_ROCE_EQC_CUR_EQE_BA_H_M,
+	roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
 		       HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
 
 	/* set eq consumer idx */
-	roce_set_field(eqc->byte_36,
-		       HNS_ROCE_EQC_CONS_INDX_M,
-		       HNS_ROCE_EQC_CONS_INDX_S,
-		       HNS_ROCE_EQ_INIT_CONS_IDX);
+	roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
+		       HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
 
 	/* set nex_eqe_ba[43:12] */
-	roce_set_field(eqc->nxt_eqe_ba0,
-		       HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+	roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
 		       HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
 
 	/* set nex_eqe_ba[63:44] */
-	roce_set_field(eqc->nxt_eqe_ba1,
-		       HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+	roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
 		       HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
 }
 
@@ -5828,18 +6156,16 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
 
 	/* irq contains: abnormal + AEQ + CEQ */
 	for (j = 0; j < other_num; j++)
-		snprintf((char *)hr_dev->irq_names[j],
-			 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j);
+		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+			 "hns-abn-%d", j);
 
 	for (j = other_num; j < (other_num + aeq_num); j++)
-		snprintf((char *)hr_dev->irq_names[j],
-			 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
-			 j - other_num);
+		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+			 "hns-aeq-%d", j - other_num);
 
 	for (j = (other_num + aeq_num); j < irq_num; j++)
-		snprintf((char *)hr_dev->irq_names[j],
-			 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
-			 j - other_num - aeq_num);
+		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+			 "hns-ceq-%d", j - other_num - aeq_num);
 
 	for (j = 0; j < irq_num; j++) {
 		if (j < other_num)
@@ -6448,6 +6774,10 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
 		return;
 
 	handle->priv = NULL;
+
+	hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
+	hns_roce_handle_device_err(hr_dev);
+
 	hns_roce_exit(hr_dev);
 	kfree(hr_dev->priv);
 	ib_dealloc_device(&hr_dev->ib_dev);
@@ -6509,7 +6839,6 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
 {
 	struct hns_roce_dev *hr_dev;
-	struct ib_event event;
 
 	if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
 		set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
@@ -6527,10 +6856,7 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
 	hr_dev->active = false;
 	hr_dev->dis_db = true;
 
-	event.event = IB_EVENT_DEVICE_FATAL;
-	event.device = &hr_dev->ib_dev;
-	event.element.port_num = 1;
-	ib_dispatch_event(&event);
+	hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
 
 	return 0;
 }
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 76a14db..2a117ff 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -81,10 +81,12 @@
 #define HNS_ROCE_V2_QPC_ENTRY_SZ		256
 #define HNS_ROCE_V2_IRRL_ENTRY_SZ		64
 #define HNS_ROCE_V2_TRRL_ENTRY_SZ		48
+#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ	100
 #define HNS_ROCE_V2_CQC_ENTRY_SZ		64
 #define HNS_ROCE_V2_SRQC_ENTRY_SZ		64
 #define HNS_ROCE_V2_MTPT_ENTRY_SZ		64
 #define HNS_ROCE_V2_MTT_ENTRY_SZ		64
+#define HNS_ROCE_V2_IDX_ENTRY_SZ		4
 #define HNS_ROCE_V2_CQE_ENTRY_SIZE		32
 #define HNS_ROCE_V2_SCCC_ENTRY_SZ		32
 #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ		PAGE_SIZE
@@ -109,7 +111,12 @@
 #define HNS_ROCE_PBL_HOP_NUM			2
 #define HNS_ROCE_EQE_HOP_NUM			2
 #define HNS_ROCE_IDX_HOP_NUM			1
+#define HNS_ROCE_SQWQE_HOP_NUM			2
+#define HNS_ROCE_EXT_SGE_HOP_NUM		1
+#define HNS_ROCE_RQWQE_HOP_NUM			2
 
+#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K	6
+#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K		2
 #define HNS_ROCE_V2_GID_INDEX_NUM		256
 
 #define HNS_ROCE_V2_TABLE_CHUNK_SIZE		(1 << 18)
@@ -237,6 +244,7 @@ enum hns_roce_opcode_type {
 	HNS_ROCE_OPC_CFG_EXT_LLM			= 0x8403,
 	HNS_ROCE_OPC_CFG_TMOUT_LLM			= 0x8404,
 	HNS_ROCE_OPC_QUERY_PF_TIMER_RES			= 0x8406,
+	HNS_ROCE_OPC_QUERY_PF_CAPS_NUM                  = 0x8408,
 	HNS_ROCE_OPC_CFG_SGID_TB			= 0x8500,
 	HNS_ROCE_OPC_CFG_SMAC_TB			= 0x8501,
 	HNS_ROCE_OPC_POST_MB				= 0x8504,
@@ -643,7 +651,7 @@ struct hns_roce_v2_qp_context {
 #define	V2_QPC_BYTE_76_ATE_S 27
 
 #define	V2_QPC_BYTE_76_RQIE_S 28
-
+#define	V2_QPC_BYTE_76_EXT_ATE_S 29
 #define	V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
 #define	V2_QPC_BYTE_80_RX_CQN_S 0
 #define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
@@ -1569,6 +1577,155 @@ struct hns_roce_cfg_smac_tb {
 #define CFG_SMAC_TB_VF_SMAC_H_S 0
 #define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
 
+#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5
+struct hns_roce_query_pf_caps_a {
+	u8 number_ports;
+	u8 local_ca_ack_delay;
+	__le16 max_sq_sg;
+	__le16 max_sq_inline;
+	__le16 max_rq_sg;
+	__le32 max_extend_sg;
+	__le16 num_qpc_timer;
+	__le16 num_cqc_timer;
+	__le16 max_srq_sges;
+	u8 num_aeq_vectors;
+	u8 num_other_vectors;
+	u8 max_sq_desc_sz;
+	u8 max_rq_desc_sz;
+	u8 max_srq_desc_sz;
+	u8 cq_entry_sz;
+};
+
+struct hns_roce_query_pf_caps_b {
+	u8 mtpt_entry_sz;
+	u8 irrl_entry_sz;
+	u8 trrl_entry_sz;
+	u8 cqc_entry_sz;
+	u8 srqc_entry_sz;
+	u8 idx_entry_sz;
+	u8 scc_ctx_entry_sz;
+	u8 max_mtu;
+	__le16 qpc_entry_sz;
+	__le16 qpc_timer_entry_sz;
+	__le16 cqc_timer_entry_sz;
+	u8 min_cqes;
+	u8 min_wqes;
+	__le32 page_size_cap;
+	u8 pkey_table_len;
+	u8 phy_num_uars;
+	u8 ctx_hop_num;
+	u8 pbl_hop_num;
+};
+
+struct hns_roce_query_pf_caps_c {
+	__le32 cap_flags_num_pds;
+	__le32 max_gid_num_cqs;
+	__le32 cq_depth;
+	__le32 num_mrws;
+	__le32 ord_num_qps;
+	__le16 sq_depth;
+	__le16 rq_depth;
+};
+
+#define V2_QUERY_PF_CAPS_C_NUM_PDS_S 0
+#define V2_QUERY_PF_CAPS_C_NUM_PDS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_S 20
+#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_M GENMASK(31, 20)
+
+#define V2_QUERY_PF_CAPS_C_NUM_CQS_S 0
+#define V2_QUERY_PF_CAPS_C_NUM_CQS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_C_MAX_GID_S 20
+#define V2_QUERY_PF_CAPS_C_MAX_GID_M GENMASK(28, 20)
+
+#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_S 0
+#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_M GENMASK(22, 0)
+
+#define V2_QUERY_PF_CAPS_C_NUM_MRWS_S 0
+#define V2_QUERY_PF_CAPS_C_NUM_MRWS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_C_NUM_QPS_S 0
+#define V2_QUERY_PF_CAPS_C_NUM_QPS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_C_MAX_ORD_S 20
+#define V2_QUERY_PF_CAPS_C_MAX_ORD_M GENMASK(27, 20)
+
+struct hns_roce_query_pf_caps_d {
+	__le32 wq_hop_num_max_srqs;
+	__le16 srq_depth;
+	__le16 rsv;
+	__le32 num_ceqs_ceq_depth;
+	__le32 arm_st_aeq_depth;
+	__le32 num_uars_rsv_pds;
+	__le32 rsv_uars_rsv_qps;
+};
+#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0
+#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(20, 0)
+
+#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20
+#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20)
+
+#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S 22
+#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M GENMASK(23, 22)
+
+#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24
+#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24)
+
+
+#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0
+#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0)
+
+#define V2_QUERY_PF_CAPS_D_NUM_CEQS_S 22
+#define V2_QUERY_PF_CAPS_D_NUM_CEQS_M GENMASK(31, 22)
+
+#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S 0
+#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M GENMASK(21, 0)
+
+#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S 22
+#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M GENMASK(23, 22)
+
+#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S 24
+#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M GENMASK(25, 24)
+
+#define V2_QUERY_PF_CAPS_D_RSV_PDS_S 0
+#define V2_QUERY_PF_CAPS_D_RSV_PDS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_D_NUM_UARS_S 20
+#define V2_QUERY_PF_CAPS_D_NUM_UARS_M GENMASK(27, 20)
+
+#define V2_QUERY_PF_CAPS_D_RSV_QPS_S 0
+#define V2_QUERY_PF_CAPS_D_RSV_QPS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_D_RSV_UARS_S 20
+#define V2_QUERY_PF_CAPS_D_RSV_UARS_M GENMASK(27, 20)
+
+struct hns_roce_query_pf_caps_e {
+	__le32 chunk_size_shift_rsv_mrws;
+	__le32 rsv_cqs;
+	__le32 rsv_srqs;
+	__le32 rsv_lkey;
+	__le16 ceq_max_cnt;
+	__le16 ceq_period;
+	__le16 aeq_max_cnt;
+	__le16 aeq_period;
+};
+
+#define V2_QUERY_PF_CAPS_E_RSV_MRWS_S 0
+#define V2_QUERY_PF_CAPS_E_RSV_MRWS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S 20
+#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M GENMASK(31, 20)
+
+#define V2_QUERY_PF_CAPS_E_RSV_CQS_S 0
+#define V2_QUERY_PF_CAPS_E_RSV_CQS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_E_RSV_SRQS_S 0
+#define V2_QUERY_PF_CAPS_E_RSV_SRQS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0
+#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0)
+
 struct hns_roce_cmq_desc {
 	__le16 opcode;
 	__le16 flag;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 854ef6e..d0031d5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -90,7 +90,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
 static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
-	struct ib_gid_attr zattr = { };
+	struct ib_gid_attr zattr = {};
 	u8 port = attr->port_num - 1;
 	int ret;
 
@@ -210,7 +210,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
 	props->max_pkeys = 1;
 	props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
-		props->max_srq = hr_dev->caps.max_srqs;
+		props->max_srq = hr_dev->caps.num_srqs;
 		props->max_srq_wr = hr_dev->caps.max_srq_wrs;
 		props->max_srq_sge = hr_dev->caps.max_srq_sges;
 	}
@@ -259,11 +259,12 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
 
 	mtu = iboe_get_mtu(net_dev->mtu);
 	props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
-	props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
-			IB_PORT_ACTIVE : IB_PORT_DOWN;
-	props->phys_state = (props->state == IB_PORT_ACTIVE) ?
-			     IB_PORT_PHYS_STATE_LINK_UP :
-			     IB_PORT_PHYS_STATE_DISABLED;
+	props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
+			       IB_PORT_ACTIVE :
+			       IB_PORT_DOWN;
+	props->phys_state = props->state == IB_PORT_ACTIVE ?
+				    IB_PORT_PHYS_STATE_LINK_UP :
+				    IB_PORT_PHYS_STATE_DISABLED;
 
 	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
 
@@ -481,13 +482,13 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 
 	ib_dev = &hr_dev->ib_dev;
 
-	ib_dev->node_type		= RDMA_NODE_IB_CA;
-	ib_dev->dev.parent		= dev;
+	ib_dev->node_type = RDMA_NODE_IB_CA;
+	ib_dev->dev.parent = dev;
 
-	ib_dev->phys_port_cnt		= hr_dev->caps.num_ports;
-	ib_dev->local_dma_lkey		= hr_dev->caps.reserved_lkey;
-	ib_dev->num_comp_vectors	= hr_dev->caps.num_comp_vectors;
-	ib_dev->uverbs_cmd_mask		=
+	ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
+	ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
+	ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
+	ib_dev->uverbs_cmd_mask =
 		(1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
 		(1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
 		(1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
@@ -503,8 +504,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 		(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
 		(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
 
-	ib_dev->uverbs_ex_cmd_mask |=
-		(1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
+	ib_dev->uverbs_ex_cmd_mask |= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
 
 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
 		ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
@@ -589,11 +589,13 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 
 	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
 		ret = hns_roce_init_hem_table(hr_dev,
-				      &hr_dev->mr_table.mtt_cqe_table,
-				      HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
-				      hr_dev->caps.num_cqe_segs, 1);
+					      &hr_dev->mr_table.mtt_cqe_table,
+					      HEM_TYPE_CQE,
+					      hr_dev->caps.mtt_entry_sz,
+					      hr_dev->caps.num_cqe_segs, 1);
 		if (ret) {
-			dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
+			dev_err(dev,
+				"Failed to init CQE context memory, aborting.\n");
 			goto err_unmap_cqe;
 		}
 	}
@@ -633,7 +635,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 					      hr_dev->caps.num_qps, 1);
 		if (ret) {
 			dev_err(dev,
-			       "Failed to init trrl_table memory, aborting.\n");
+				"Failed to init trrl_table memory, aborting.\n");
 			goto err_unmap_irrl;
 		}
 	}
@@ -653,7 +655,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 					      hr_dev->caps.num_srqs, 1);
 		if (ret) {
 			dev_err(dev,
-			      "Failed to init SRQ context memory, aborting.\n");
+				"Failed to init SRQ context memory, aborting.\n");
 			goto err_unmap_cq;
 		}
 	}
@@ -692,33 +694,31 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 					      hr_dev->caps.num_qps, 1);
 		if (ret) {
 			dev_err(dev,
-			      "Failed to init SCC context memory, aborting.\n");
+				"Failed to init SCC context memory, aborting.\n");
 			goto err_unmap_idx;
 		}
 	}
 
 	if (hr_dev->caps.qpc_timer_entry_sz) {
-		ret = hns_roce_init_hem_table(hr_dev,
-					      &hr_dev->qpc_timer_table,
+		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
 					      HEM_TYPE_QPC_TIMER,
 					      hr_dev->caps.qpc_timer_entry_sz,
 					      hr_dev->caps.num_qpc_timer, 1);
 		if (ret) {
 			dev_err(dev,
-			      "Failed to init QPC timer memory, aborting.\n");
+				"Failed to init QPC timer memory, aborting.\n");
 			goto err_unmap_ctx;
 		}
 	}
 
 	if (hr_dev->caps.cqc_timer_entry_sz) {
-		ret = hns_roce_init_hem_table(hr_dev,
-					      &hr_dev->cqc_timer_table,
+		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
 					      HEM_TYPE_CQC_TIMER,
 					      hr_dev->caps.cqc_timer_entry_sz,
 					      hr_dev->caps.num_cqc_timer, 1);
 		if (ret) {
 			dev_err(dev,
-			      "Failed to init CQC timer memory, aborting.\n");
+				"Failed to init CQC timer memory, aborting.\n");
 			goto err_unmap_qpc_timer;
 		}
 	}
@@ -727,8 +727,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 
 err_unmap_qpc_timer:
 	if (hr_dev->caps.qpc_timer_entry_sz)
-		hns_roce_cleanup_hem_table(hr_dev,
-					   &hr_dev->qpc_timer_table);
+		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
 
 err_unmap_ctx:
 	if (hr_dev->caps.sccc_entry_sz)
@@ -863,6 +862,50 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
 	return ret;
 }
 
+static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
+{
+	struct hns_roce_cq *hr_cq = to_hr_cq(cq);
+	unsigned long flags;
+
+	spin_lock_irqsave(&hr_cq->lock, flags);
+	if (cq->comp_handler) {
+		if (!hr_cq->is_armed) {
+			hr_cq->is_armed = 1;
+			list_add_tail(&hr_cq->node, cq_list);
+		}
+	}
+	spin_unlock_irqrestore(&hr_cq->lock, flags);
+}
+
+void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_qp *hr_qp;
+	struct hns_roce_cq *hr_cq;
+	struct list_head cq_list;
+	unsigned long flags_qp;
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&cq_list);
+
+	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+	list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
+		spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
+		if (hr_qp->sq.tail != hr_qp->sq.head)
+			check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
+		spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
+
+		spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
+		if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
+			check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
+		spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
+	}
+
+	list_for_each_entry(hr_cq, &cq_list, node)
+		hns_roce_cq_completion(hr_dev, hr_cq->cqn);
+
+	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+}
+
 int hns_roce_init(struct hns_roce_dev *hr_dev)
 {
 	int ret;
@@ -933,6 +976,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
 		}
 	}
 
+	INIT_LIST_HEAD(&hr_dev->qp_list);
+	spin_lock_init(&hr_dev->qp_list_lock);
+
 	ret = hns_roce_register_device(hr_dev);
 	if (ret)
 		goto error_failed_register_device;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 3ff6105..b9898e7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -1064,8 +1064,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
 		if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
 			if (page_addr & ((1 << mtt->page_shift) - 1)) {
 				dev_err(dev,
-					"page_addr 0x%llx is not page_shift %d alignment!\n",
-					page_addr, mtt->page_shift);
+					"page_addr is not page_shift %d alignment!\n",
+					mtt->page_shift);
 				ret = -EINVAL;
 				goto out;
 			}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index eb2ee6a..3257ad1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -393,40 +393,38 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
 
 	/* Get buf size, SQ and RQ  are aligned to page_szie */
 	if (hr_dev->caps.max_sq_sg <= 2) {
-		hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
+		hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
 					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
-				   HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+				   round_up((hr_qp->sq.wqe_cnt <<
 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
 
 		hr_qp->sq.offset = 0;
-		hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+		hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
 	} else {
 		page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
 		hr_qp->sge.sge_cnt = ex_sge_num ?
 		   max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
-		hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
+		hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
 					     hr_qp->rq.wqe_shift), page_size) +
-				   HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
+				   round_up((hr_qp->sge.sge_cnt <<
 					     hr_qp->sge.sge_shift), page_size) +
-				   HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+				   round_up((hr_qp->sq.wqe_cnt <<
 					     hr_qp->sq.wqe_shift), page_size);
 
 		hr_qp->sq.offset = 0;
 		if (ex_sge_num) {
-			hr_qp->sge.offset = HNS_ROCE_ALIGN_UP(
-							(hr_qp->sq.wqe_cnt <<
-							hr_qp->sq.wqe_shift),
-							page_size);
+			hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt <<
+						      hr_qp->sq.wqe_shift),
+						     page_size);
 			hr_qp->rq.offset = hr_qp->sge.offset +
-					HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
-						hr_qp->sge.sge_shift),
-						page_size);
+					   round_up((hr_qp->sge.sge_cnt <<
+						     hr_qp->sge.sge_shift),
+						    page_size);
 		} else {
-			hr_qp->rq.offset = HNS_ROCE_ALIGN_UP(
-							(hr_qp->sq.wqe_cnt <<
-							hr_qp->sq.wqe_shift),
-							page_size);
+			hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
+						     hr_qp->sq.wqe_shift),
+						    page_size);
 		}
 	}
 
@@ -593,20 +591,18 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 	/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
 	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
 	hr_qp->sq.offset = 0;
-	size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
-				 page_size);
+	size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
 
 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
 		hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
-					(u32)hr_qp->sge.sge_cnt);
+					 (u32)hr_qp->sge.sge_cnt);
 		hr_qp->sge.offset = size;
-		size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<
-					  hr_qp->sge.sge_shift, page_size);
+		size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift,
+				 page_size);
 	}
 
 	hr_qp->rq.offset = size;
-	size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
-				  page_size);
+	size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size);
 	hr_qp->buff_size = size;
 
 	/* Get wr and sge number which send */
@@ -681,6 +677,29 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
 	kfree(hr_qp->rq_inl_buf.wqe_list);
 }
 
+static void add_qp_to_list(struct hns_roce_dev *hr_dev,
+			   struct hns_roce_qp *hr_qp,
+			   struct ib_cq *send_cq, struct ib_cq *recv_cq)
+{
+	struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
+	unsigned long flags;
+
+	hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
+	hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
+
+	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+	hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
+
+	list_add_tail(&hr_qp->node, &hr_dev->qp_list);
+	if (hr_send_cq)
+		list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
+	if (hr_recv_cq)
+		list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
+
+	hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
+	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+}
+
 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 				     struct ib_pd *ib_pd,
 				     struct ib_qp_init_attr *init_attr,
@@ -950,6 +969,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 	}
 
 	hr_qp->event = hns_roce_ib_qp_event;
+
+	add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, init_attr->recv_cq);
+
 	hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
 
 	return 0;
@@ -1232,7 +1254,16 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
 		       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
 {
-	if (send_cq == recv_cq) {
+	if (unlikely(send_cq == NULL && recv_cq == NULL)) {
+		__acquire(&send_cq->lock);
+		__acquire(&recv_cq->lock);
+	} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
+		spin_lock_irq(&send_cq->lock);
+		__acquire(&recv_cq->lock);
+	} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
+		spin_lock_irq(&recv_cq->lock);
+		__acquire(&send_cq->lock);
+	} else if (send_cq == recv_cq) {
 		spin_lock_irq(&send_cq->lock);
 		__acquire(&recv_cq->lock);
 	} else if (send_cq->cqn < recv_cq->cqn) {
@@ -1248,7 +1279,16 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
 			 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
 			 __releases(&recv_cq->lock)
 {
-	if (send_cq == recv_cq) {
+	if (unlikely(send_cq == NULL && recv_cq == NULL)) {
+		__release(&recv_cq->lock);
+		__release(&send_cq->lock);
+	} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
+		__release(&recv_cq->lock);
+		spin_unlock(&send_cq->lock);
+	} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
+		__release(&send_cq->lock);
+		spin_unlock(&recv_cq->lock);
+	} else if (send_cq == recv_cq) {
 		__release(&recv_cq->lock);
 		spin_unlock_irq(&send_cq->lock);
 	} else if (send_cq->cqn < recv_cq->cqn) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index d44cf33d..2386143 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1225,6 +1225,8 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
 			const struct in_ifaddr *ifa;
 
 			idev = in_dev_get(dev);
+			if (!idev)
+				continue;
 			in_dev_for_each_ifa_rtnl(ifa, idev) {
 				i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
 					    "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index ecd6cad..b591861 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -186,23 +186,6 @@ static void id_map_ent_timeout(struct work_struct *work)
 	kfree(ent);
 }
 
-static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
-{
-	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
-	struct rb_root *sl_id_map = &sriov->sl_id_map;
-	struct id_map_entry *ent, *found_ent;
-
-	spin_lock(&sriov->id_map_lock);
-	ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
-	if (!ent)
-		goto out;
-	found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
-	if (found_ent && found_ent == ent)
-		rb_erase(&found_ent->node, sl_id_map);
-out:
-	spin_unlock(&sriov->id_map_lock);
-}
-
 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
 {
 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
@@ -294,7 +277,7 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
 	spin_lock(&sriov->id_map_lock);
 	spin_lock_irqsave(&sriov->going_down_lock, flags);
 	/*make sure that there is no schedule inside the scheduled work.*/
-	if (!sriov->is_going_down) {
+	if (!sriov->is_going_down && !id->scheduled_delete) {
 		id->scheduled_delete = 1;
 		schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
 	}
@@ -341,9 +324,6 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
 
 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
 		schedule_delayed(ibdev, id);
-	else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
-		id_map_find_del(ibdev, pv_cm_id);
-
 	return 0;
 }
 
@@ -382,12 +362,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
 		*slave = id->slave_id;
 	set_remote_comm_id(mad, id->sl_cm_id);
 
-	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
+	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
+	    mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
 		schedule_delayed(ibdev, id);
-	else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
-			mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
-		id_map_find_del(ibdev, (int) pv_cm_id);
-	}
 
 	return 0;
 }
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a57033d..f8b936b 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -568,18 +568,13 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
 	wc->vendor_err = cqe->vendor_err_syndrome;
 }
 
-static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
+static int mlx4_ib_ipoib_csum_ok(__be16 status, u8 badfcs_enc, __be16 checksum)
 {
-	return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4      |
-				      MLX4_CQE_STATUS_IPV4F     |
-				      MLX4_CQE_STATUS_IPV4OPT   |
-				      MLX4_CQE_STATUS_IPV6      |
-				      MLX4_CQE_STATUS_IPOK)) ==
-		cpu_to_be16(MLX4_CQE_STATUS_IPV4        |
-			    MLX4_CQE_STATUS_IPOK))              &&
-		(status & cpu_to_be16(MLX4_CQE_STATUS_UDP       |
-				      MLX4_CQE_STATUS_TCP))     &&
-		checksum == cpu_to_be16(0xffff);
+	return ((badfcs_enc & MLX4_CQE_STATUS_L4_CSUM) ||
+		((status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+		 (status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
+				       MLX4_CQE_STATUS_UDP)) &&
+		 (checksum == cpu_to_be16(0xffff))));
 }
 
 static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
@@ -855,6 +850,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
 		wc->wc_flags	  |= mlx4_ib_ipoib_csum_ok(cqe->status,
+					cqe->badfcs_enc,
 					cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
 		if (is_eth) {
 			wc->slid = 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 34055cb..2f5d9b1 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -246,6 +246,13 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
 	return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
 }
 
+static void free_gid_entry(struct gid_entry *entry)
+{
+	memset(&entry->gid, 0, sizeof(entry->gid));
+	kfree(entry->ctx);
+	entry->ctx = NULL;
+}
+
 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
 {
 	struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
@@ -313,6 +320,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
 				     GFP_ATOMIC);
 		if (!gids) {
 			ret = -ENOMEM;
+			*context = NULL;
+			free_gid_entry(&port_gid_table->gids[free]);
 		} else {
 			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
 				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
@@ -324,6 +333,12 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
 
 	if (!ret && hw_update) {
 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
+		if (ret) {
+			spin_lock_bh(&iboe->lock);
+			*context = NULL;
+			free_gid_entry(&port_gid_table->gids[free]);
+			spin_unlock_bh(&iboe->lock);
+		}
 		kfree(gids);
 	}
 
@@ -353,10 +368,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
 		if (!ctx->refcount) {
 			unsigned int real_index = ctx->real_index;
 
-			memset(&port_gid_table->gids[real_index].gid, 0,
-			       sizeof(port_gid_table->gids[real_index].gid));
-			kfree(port_gid_table->gids[real_index].ctx);
-			port_gid_table->gids[real_index].ctx = NULL;
+			free_gid_entry(&port_gid_table->gids[real_index]);
 			hw_update = 1;
 		}
 	}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cb23cdb..26425dd 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -849,7 +849,7 @@ static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
 	 * reused for further WQN allocations.
 	 * The next created WQ will allocate a new range.
 	 */
-		range->dirty = 1;
+		range->dirty = true;
 	}
 
 	mutex_unlock(&context->wqn_ranges_mutex);
@@ -3085,7 +3085,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
 		}
 		if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
 			vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
-			is_vlan = 1;
+			is_vlan = true;
 		}
 	}
 	err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 685b8ed..d7efc9f 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -30,7 +30,7 @@ enum devx_obj_flags {
 struct devx_async_data {
 	struct mlx5_ib_dev *mdev;
 	struct list_head list;
-	struct ib_uobject *fd_uobj;
+	struct devx_async_cmd_event_file *ev_file;
 	struct mlx5_async_work cb_work;
 	u16 cmd_out_len;
 	/* must be last field in this structure */
@@ -72,7 +72,6 @@ struct devx_event_subscription {
 	struct rcu_head	rcu;
 	u64 cookie;
 	struct devx_async_event_file *ev_file;
-	struct file *filp; /* Upon hot unplug we need a direct access to */
 	struct eventfd_ctx *eventfd;
 };
 
@@ -1674,21 +1673,20 @@ static void devx_query_callback(int status, struct mlx5_async_work *context)
 {
 	struct devx_async_data *async_data =
 		container_of(context, struct devx_async_data, cb_work);
-	struct ib_uobject *fd_uobj = async_data->fd_uobj;
-	struct devx_async_cmd_event_file *ev_file;
-	struct devx_async_event_queue *ev_queue;
+	struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
+	struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
 	unsigned long flags;
 
-	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
-			       uobj);
-	ev_queue = &ev_file->ev_queue;
-
+	/*
+	 * Note that if the struct devx_async_cmd_event_file uobj begins to be
+	 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
+	 * routine returns, ensuring that it always remains valid here.
+	 */
 	spin_lock_irqsave(&ev_queue->lock, flags);
 	list_add_tail(&async_data->list, &ev_queue->event_list);
 	spin_unlock_irqrestore(&ev_queue->lock, flags);
 
 	wake_up_interruptible(&ev_queue->poll_wait);
-	fput(fd_uobj->object);
 }
 
 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
@@ -1757,9 +1755,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
 
 	async_data->cmd_out_len = cmd_out_len;
 	async_data->mdev = mdev;
-	async_data->fd_uobj = fd_uobj;
+	async_data->ev_file = ev_file;
 
-	get_file(fd_uobj->object);
 	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
 	err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
 		    uverbs_attr_get_len(attrs,
@@ -1769,12 +1766,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
 		    devx_query_callback, &async_data->cb_work);
 
 	if (err)
-		goto cb_err;
+		goto free_async;
 
 	return 0;
 
-cb_err:
-	fput(fd_uobj->object);
 free_async:
 	kvfree(async_data);
 sub_bytes:
@@ -2032,6 +2027,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
 			goto err;
 
 		list_add_tail(&event_sub->event_list, &sub_list);
+		uverbs_uobject_get(&ev_file->uobj);
 		if (use_eventfd) {
 			event_sub->eventfd =
 				eventfd_ctx_fdget(redirect_fd);
@@ -2045,7 +2041,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
 
 		event_sub->cookie = cookie;
 		event_sub->ev_file = ev_file;
-		event_sub->filp = fd_uobj->object;
 		/* May be needed upon cleanup the devx object/subscription */
 		event_sub->xa_key_level1 = key_level1;
 		event_sub->xa_key_level2 = obj_id;
@@ -2099,7 +2094,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
 
 		if (event_sub->eventfd)
 			eventfd_ctx_put(event_sub->eventfd);
-
+		uverbs_uobject_put(&event_sub->ev_file->uobj);
 		kfree(event_sub);
 	}
 
@@ -2329,6 +2324,9 @@ static int deliver_event(struct devx_event_subscription *event_sub,
 			return 0;
 		}
 
+		/* is_destroyed is ignored here because we don't have any memory
+		 * allocation to clean up for the omit_data case
+		 */
 		list_add_tail(&event_sub->event_list, &ev_file->event_list);
 		spin_unlock_irqrestore(&ev_file->lock, flags);
 		wake_up_interruptible(&ev_file->poll_wait);
@@ -2348,7 +2346,10 @@ static int deliver_event(struct devx_event_subscription *event_sub,
 	memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
 
 	spin_lock_irqsave(&ev_file->lock, flags);
-	list_add_tail(&event_data->list, &ev_file->event_list);
+	if (!ev_file->is_destroyed)
+		list_add_tail(&event_data->list, &ev_file->event_list);
+	else
+		kfree(event_data);
 	spin_unlock_irqrestore(&ev_file->lock, flags);
 	wake_up_interruptible(&ev_file->poll_wait);
 
@@ -2361,17 +2362,10 @@ static void dispatch_event_fd(struct list_head *fd_list,
 	struct devx_event_subscription *item;
 
 	list_for_each_entry_rcu(item, fd_list, xa_list) {
-		if (!get_file_rcu(item->filp))
-			continue;
-
-		if (item->eventfd) {
+		if (item->eventfd)
 			eventfd_signal(item->eventfd, 1);
-			fput(item->filp);
-			continue;
-		}
-
-		deliver_event(item, data);
-		fput(item->filp);
+		else
+			deliver_event(item, data);
 	}
 }
 
@@ -2509,23 +2503,6 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
 	return ret;
 }
 
-static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
-{
-	struct ib_uobject *uobj = filp->private_data;
-	struct devx_async_cmd_event_file *comp_ev_file = container_of(
-		uobj, struct devx_async_cmd_event_file, uobj);
-	struct devx_async_data *entry, *tmp;
-
-	spin_lock_irq(&comp_ev_file->ev_queue.lock);
-	list_for_each_entry_safe(entry, tmp,
-				 &comp_ev_file->ev_queue.event_list, list)
-		kvfree(entry);
-	spin_unlock_irq(&comp_ev_file->ev_queue.lock);
-
-	uverbs_close_fd(filp);
-	return 0;
-}
-
 static __poll_t devx_async_cmd_event_poll(struct file *filp,
 					      struct poll_table_struct *wait)
 {
@@ -2549,7 +2526,7 @@ static const struct file_operations devx_async_cmd_event_fops = {
 	.owner	 = THIS_MODULE,
 	.read	 = devx_async_cmd_event_read,
 	.poll    = devx_async_cmd_event_poll,
-	.release = devx_async_cmd_event_close,
+	.release = uverbs_uobject_fd_release,
 	.llseek	 = no_llseek,
 };
 
@@ -2653,26 +2630,73 @@ static __poll_t devx_async_event_poll(struct file *filp,
 	return pollflags;
 }
 
-static int devx_async_event_close(struct inode *inode, struct file *filp)
+static void devx_free_subscription(struct rcu_head *rcu)
 {
-	struct devx_async_event_file *ev_file = filp->private_data;
+	struct devx_event_subscription *event_sub =
+		container_of(rcu, struct devx_event_subscription, rcu);
+
+	if (event_sub->eventfd)
+		eventfd_ctx_put(event_sub->eventfd);
+	uverbs_uobject_put(&event_sub->ev_file->uobj);
+	kfree(event_sub);
+}
+
+static const struct file_operations devx_async_event_fops = {
+	.owner	 = THIS_MODULE,
+	.read	 = devx_async_event_read,
+	.poll    = devx_async_event_poll,
+	.release = uverbs_uobject_fd_release,
+	.llseek	 = no_llseek,
+};
+
+static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
+					     enum rdma_remove_reason why)
+{
+	struct devx_async_cmd_event_file *comp_ev_file =
+		container_of(uobj, struct devx_async_cmd_event_file,
+			     uobj);
+	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+	struct devx_async_data *entry, *tmp;
+
+	spin_lock_irq(&ev_queue->lock);
+	ev_queue->is_destroyed = 1;
+	spin_unlock_irq(&ev_queue->lock);
+	wake_up_interruptible(&ev_queue->poll_wait);
+
+	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
+
+	spin_lock_irq(&comp_ev_file->ev_queue.lock);
+	list_for_each_entry_safe(entry, tmp,
+				 &comp_ev_file->ev_queue.event_list, list)
+		kvfree(entry);
+	spin_unlock_irq(&comp_ev_file->ev_queue.lock);
+	return 0;
+};
+
+static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
+					 enum rdma_remove_reason why)
+{
+	struct devx_async_event_file *ev_file =
+		container_of(uobj, struct devx_async_event_file,
+			     uobj);
 	struct devx_event_subscription *event_sub, *event_sub_tmp;
 	struct devx_async_event_data *entry, *tmp;
 	struct mlx5_ib_dev *dev = ev_file->dev;
 
+	spin_lock_irq(&ev_file->lock);
+	ev_file->is_destroyed = 1;
+	spin_unlock_irq(&ev_file->lock);
+	wake_up_interruptible(&ev_file->poll_wait);
+
 	mutex_lock(&dev->devx_event_table.event_xa_lock);
 	/* delete the subscriptions which are related to this FD */
 	list_for_each_entry_safe(event_sub, event_sub_tmp,
 				 &ev_file->subscribed_events_list, file_list) {
 		devx_cleanup_subscription(dev, event_sub);
-		if (event_sub->eventfd)
-			eventfd_ctx_put(event_sub->eventfd);
-
 		list_del_rcu(&event_sub->file_list);
 		/* subscription may not be used by the read API any more */
-		kfree_rcu(event_sub, rcu);
+		call_rcu(&event_sub->rcu, devx_free_subscription);
 	}
-
 	mutex_unlock(&dev->devx_event_table.event_xa_lock);
 
 	/* free the pending events allocation */
@@ -2684,51 +2708,8 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
 		spin_unlock_irq(&ev_file->lock);
 	}
 
-	uverbs_close_fd(filp);
 	put_device(&dev->ib_dev.dev);
 	return 0;
-}
-
-static const struct file_operations devx_async_event_fops = {
-	.owner	 = THIS_MODULE,
-	.read	 = devx_async_event_read,
-	.poll    = devx_async_event_poll,
-	.release = devx_async_event_close,
-	.llseek	 = no_llseek,
-};
-
-static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
-						   enum rdma_remove_reason why)
-{
-	struct devx_async_cmd_event_file *comp_ev_file =
-		container_of(uobj, struct devx_async_cmd_event_file,
-			     uobj);
-	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
-
-	spin_lock_irq(&ev_queue->lock);
-	ev_queue->is_destroyed = 1;
-	spin_unlock_irq(&ev_queue->lock);
-
-	if (why == RDMA_REMOVE_DRIVER_REMOVE)
-		wake_up_interruptible(&ev_queue->poll_wait);
-
-	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
-	return 0;
-};
-
-static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
-					    enum rdma_remove_reason why)
-{
-	struct devx_async_event_file *ev_file =
-		container_of(uobj, struct devx_async_event_file,
-			     uobj);
-
-	spin_lock_irq(&ev_file->lock);
-	ev_file->is_destroyed = 1;
-	spin_unlock_irq(&ev_file->lock);
-
-	wake_up_interruptible(&ev_file->poll_wait);
-	return 0;
 };
 
 DECLARE_UVERBS_NAMED_METHOD(
@@ -2913,7 +2894,7 @@ DECLARE_UVERBS_NAMED_METHOD(
 DECLARE_UVERBS_NAMED_OBJECT(
 	MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
 	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
-			     devx_hot_unplug_async_cmd_event_file,
+			     devx_async_cmd_event_destroy_uobj,
 			     &devx_async_cmd_event_fops, "[devx_async_cmd]",
 			     O_RDONLY),
 	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
@@ -2931,7 +2912,7 @@ DECLARE_UVERBS_NAMED_METHOD(
 DECLARE_UVERBS_NAMED_OBJECT(
 	MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
 	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
-			     devx_hot_unplug_async_event_file,
+			     devx_async_event_destroy_uobj,
 			     &devx_async_event_fops, "[devx_async_event]",
 			     O_RDONLY),
 	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index ac4d8d1..1ae6fd9 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -507,8 +507,7 @@ int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
 		ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
 		if (ret) {
 			/* Undo the effect of adding the outstanding wr */
-			gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
-					      gsi->cap.max_send_wr;
+			gsi->outstanding_pi--;
 			goto err;
 		}
 		spin_unlock_irqrestore(&gsi->lock, flags);
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index 4f0edd4..b611653 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -164,8 +164,10 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
 	in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
 	in->node_guid = guid;
 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
-	if (!err)
+	if (!err) {
 		vfs_ctx[vf].node_guid = guid;
+		vfs_ctx[vf].node_guid_valid = 1;
+	}
 	kfree(in);
 	return err;
 }
@@ -185,8 +187,10 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
 	in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
 	in->port_guid = guid;
 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
-	if (!err)
+	if (!err) {
 		vfs_ctx[vf].port_guid = guid;
+		vfs_ctx[vf].port_guid_valid = 1;
+	}
 	kfree(in);
 	return err;
 }
@@ -208,20 +212,12 @@ int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
 {
 	struct mlx5_ib_dev *dev = to_mdev(device);
 	struct mlx5_core_dev *mdev = dev->mdev;
-	struct mlx5_hca_vport_context *rep;
-	int err;
+	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
 
-	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
-	if (!rep)
-		return -ENOMEM;
+	node_guid->guid =
+		vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0;
+	port_guid->guid =
+		vfs_ctx[vf].port_guid_valid ? vfs_ctx[vf].port_guid : 0;
 
-	err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
-	if (err)
-		goto ex;
-
-	port_guid->guid = rep->port_guid;
-	node_guid->guid = rep->node_guid;
-ex:
-	kfree(rep);
-	return err;
+	return 0;
 }
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ab4ec33..e874d68 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2094,6 +2094,7 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
 {
 	struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
 	struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
+	struct mlx5_var_table *var_table = &dev->var_table;
 	struct mlx5_ib_dm *mdm;
 
 	switch (mentry->mmap_flag) {
@@ -2103,6 +2104,12 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
 				       mdm->size);
 		kfree(mdm);
 		break;
+	case MLX5_IB_MMAP_TYPE_VAR:
+		mutex_lock(&var_table->bitmap_lock);
+		clear_bit(mentry->page_idx, var_table->bitmap);
+		mutex_unlock(&var_table->bitmap_lock);
+		kfree(mentry);
+		break;
 	default:
 		WARN_ON(true);
 	}
@@ -2262,7 +2269,10 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
 
 	mentry = to_mmmap(entry);
 	pfn = (mentry->address >> PAGE_SHIFT);
-	prot = pgprot_writecombine(vma->vm_page_prot);
+	if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR)
+		prot = pgprot_noncached(vma->vm_page_prot);
+	else
+		prot = pgprot_writecombine(vma->vm_page_prot);
 	ret = rdma_user_mmap_io(ucontext, vma, pfn,
 				entry->npages * PAGE_SIZE,
 				prot,
@@ -2271,6 +2281,15 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
 	return ret;
 }
 
+static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
+{
+	u16 cmd = entry->rdma_entry.start_pgoff >> 16;
+	u16 index = entry->rdma_entry.start_pgoff & 0xFFFF;
+
+	return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
+		(index & 0xFF)) << PAGE_SHIFT;
+}
+
 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
 {
 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
@@ -5368,6 +5387,14 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
 	INIT_Q_COUNTER(req_cqe_flush_error),
 };
 
+static const struct mlx5_ib_counter roce_accl_cnts[] = {
+	INIT_Q_COUNTER(roce_adp_retrans),
+	INIT_Q_COUNTER(roce_adp_retrans_to),
+	INIT_Q_COUNTER(roce_slow_restart),
+	INIT_Q_COUNTER(roce_slow_restart_cnps),
+	INIT_Q_COUNTER(roce_slow_restart_trans),
+};
+
 #define INIT_EXT_PPCNT_COUNTER(_name)		\
 	{ .name = #_name, .offset =	\
 	MLX5_BYTE_OFF(ppcnt_reg, \
@@ -5416,6 +5443,9 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
 		num_counters += ARRAY_SIZE(extended_err_cnts);
 
+	if (MLX5_CAP_GEN(dev->mdev, roce_accl))
+		num_counters += ARRAY_SIZE(roce_accl_cnts);
+
 	cnts->num_q_counters = num_counters;
 
 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
@@ -5476,6 +5506,13 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
 		}
 	}
 
+	if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
+		for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) {
+			names[j] = roce_accl_cnts[i].name;
+			offsets[j] = roce_accl_cnts[i].offset;
+		}
+	}
+
 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
 		for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
 			names[j] = cong_cnts[i].name;
@@ -6051,6 +6088,145 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
 	mlx5_nic_vport_disable_roce(dev->mdev);
 }
 
+static int var_obj_cleanup(struct ib_uobject *uobject,
+			   enum rdma_remove_reason why,
+			   struct uverbs_attr_bundle *attrs)
+{
+	struct mlx5_user_mmap_entry *obj = uobject->object;
+
+	rdma_user_mmap_entry_remove(&obj->rdma_entry);
+	return 0;
+}
+
+static struct mlx5_user_mmap_entry *
+alloc_var_entry(struct mlx5_ib_ucontext *c)
+{
+	struct mlx5_user_mmap_entry *entry;
+	struct mlx5_var_table *var_table;
+	u32 page_idx;
+	int err;
+
+	var_table = &to_mdev(c->ibucontext.device)->var_table;
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_lock(&var_table->bitmap_lock);
+	page_idx = find_first_zero_bit(var_table->bitmap,
+				       var_table->num_var_hw_entries);
+	if (page_idx >= var_table->num_var_hw_entries) {
+		err = -ENOSPC;
+		mutex_unlock(&var_table->bitmap_lock);
+		goto end;
+	}
+
+	set_bit(page_idx, var_table->bitmap);
+	mutex_unlock(&var_table->bitmap_lock);
+
+	entry->address = var_table->hw_start_addr +
+				(page_idx * var_table->stride_size);
+	entry->page_idx = page_idx;
+	entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
+
+	err = rdma_user_mmap_entry_insert_range(
+		&c->ibucontext, &entry->rdma_entry, var_table->stride_size,
+		MLX5_IB_MMAP_OFFSET_START << 16,
+		(MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1);
+	if (err)
+		goto err_insert;
+
+	return entry;
+
+err_insert:
+	mutex_lock(&var_table->bitmap_lock);
+	clear_bit(page_idx, var_table->bitmap);
+	mutex_unlock(&var_table->bitmap_lock);
+end:
+	kfree(entry);
+	return ERR_PTR(err);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
+	struct uverbs_attr_bundle *attrs)
+{
+	struct ib_uobject *uobj = uverbs_attr_get_uobject(
+		attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
+	struct mlx5_ib_ucontext *c;
+	struct mlx5_user_mmap_entry *entry;
+	u64 mmap_offset;
+	u32 length;
+	int err;
+
+	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+	if (IS_ERR(c))
+		return PTR_ERR(c);
+
+	entry = alloc_var_entry(c);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
+
+	mmap_offset = mlx5_entry_to_mmap_offset(entry);
+	length = entry->rdma_entry.npages * PAGE_SIZE;
+	uobj->object = entry;
+
+	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+			     &mmap_offset, sizeof(mmap_offset));
+	if (err)
+		goto err;
+
+	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+			     &entry->page_idx, sizeof(entry->page_idx));
+	if (err)
+		goto err;
+
+	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+			     &length, sizeof(length));
+	if (err)
+		goto err;
+
+	return 0;
+
+err:
+	rdma_user_mmap_entry_remove(&entry->rdma_entry);
+	return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+	MLX5_IB_METHOD_VAR_OBJ_ALLOC,
+	UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
+			MLX5_IB_OBJECT_VAR,
+			UVERBS_ACCESS_NEW,
+			UA_MANDATORY),
+	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+			   UVERBS_ATTR_TYPE(u32),
+			   UA_MANDATORY),
+	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+			   UVERBS_ATTR_TYPE(u32),
+			   UA_MANDATORY),
+	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+			    UVERBS_ATTR_TYPE(u64),
+			    UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+	MLX5_IB_METHOD_VAR_OBJ_DESTROY,
+	UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
+			MLX5_IB_OBJECT_VAR,
+			UVERBS_ACCESS_DESTROY,
+			UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
+			    UVERBS_TYPE_ALLOC_IDR(var_obj_cleanup),
+			    &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
+			    &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
+
+static bool var_is_supported(struct ib_device *device)
+{
+	struct mlx5_ib_dev *dev = to_mdev(device);
+
+	return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+			MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
+}
+
 ADD_UVERBS_ATTRIBUTES_SIMPLE(
 	mlx5_ib_dm,
 	UVERBS_OBJECT_DM,
@@ -6073,14 +6249,14 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
 			     enum mlx5_ib_uapi_flow_action_flags));
 
 static const struct uapi_definition mlx5_ib_defs[] = {
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
 	UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
 	UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
-#endif
 
 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
 				&mlx5_ib_flow_action),
 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
+	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
+				UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
 	{}
 };
 
@@ -6352,6 +6528,35 @@ static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
 	.reg_dm_mr = mlx5_ib_reg_dm_mr,
 };
 
+static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
+{
+	struct mlx5_core_dev *mdev = dev->mdev;
+	struct mlx5_var_table *var_table = &dev->var_table;
+	u8 log_doorbell_bar_size;
+	u8 log_doorbell_stride;
+	u64 bar_size;
+
+	log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
+					log_doorbell_bar_size);
+	log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
+					log_doorbell_stride);
+	var_table->hw_start_addr = dev->mdev->bar_addr +
+				MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
+					doorbell_bar_offset);
+	bar_size = (1ULL << log_doorbell_bar_size) * 4096;
+	var_table->stride_size = 1ULL << log_doorbell_stride;
+	var_table->num_var_hw_entries = bar_size / var_table->stride_size;
+	mutex_init(&var_table->bitmap_lock);
+	var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
+					  GFP_KERNEL);
+	return (var_table->bitmap) ? 0 : -ENOMEM;
+}
+
+static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
+{
+	bitmap_free(dev->var_table.bitmap);
+}
+
 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 {
 	struct mlx5_core_dev *mdev = dev->mdev;
@@ -6439,6 +6644,13 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 	     MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
 		mutex_init(&dev->lb.mutex);
 
+	if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+			MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
+		err = mlx5_ib_init_var_table(dev);
+		if (err)
+			return err;
+	}
+
 	dev->ib_dev.use_cq_dim = true;
 
 	return 0;
@@ -6742,6 +6954,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
 		      const struct mlx5_ib_profile *profile,
 		      int stage)
 {
+	dev->ib_active = false;
+
 	/* Number of stages to cleanup */
 	while (stage) {
 		stage--;
@@ -6787,7 +7001,7 @@ static const struct mlx5_ib_profile pf_profile = {
 		     mlx5_ib_stage_flow_db_cleanup),
 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
 		     mlx5_ib_stage_caps_init,
-		     NULL),
+		     mlx5_ib_stage_caps_cleanup),
 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
 		     mlx5_ib_stage_non_default_cb,
 		     NULL),
@@ -6844,7 +7058,7 @@ const struct mlx5_ib_profile raw_eth_profile = {
 		     mlx5_ib_stage_flow_db_cleanup),
 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
 		     mlx5_ib_stage_caps_init,
-		     NULL),
+		     mlx5_ib_stage_caps_cleanup),
 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
 		     mlx5_ib_stage_raw_eth_non_default_cb,
 		     NULL),
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 048f4e9..b90a364 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -101,18 +101,6 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
 	*count = i;
 }
 
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
-	u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
-	if (umem_dma & ODP_READ_ALLOWED_BIT)
-		mtt_entry |= MLX5_IB_MTT_READ;
-	if (umem_dma & ODP_WRITE_ALLOWED_BIT)
-		mtt_entry |= MLX5_IB_MTT_WRITE;
-
-	return mtt_entry;
-}
-
 /*
  * Populate the given array with bus addresses from the umem.
  *
@@ -139,19 +127,6 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
 	struct scatterlist *sg;
 	int entry;
 
-	if (umem->is_odp) {
-		WARN_ON(shift != 0);
-		WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
-
-		for (i = 0; i < num_pages; ++i) {
-			dma_addr_t pa =
-				to_ib_umem_odp(umem)->dma_list[offset + i];
-
-			pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
-		}
-		return;
-	}
-
 	i = 0;
 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
 		len = sg_dma_len(sg) >> PAGE_SHIFT;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 77d495b..d9bffcc 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -72,6 +72,11 @@
 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
 
 enum {
+	MLX5_IB_MMAP_OFFSET_START = 9,
+	MLX5_IB_MMAP_OFFSET_END = 255,
+};
+
+enum {
 	MLX5_IB_MMAP_CMD_SHIFT	= 8,
 	MLX5_IB_MMAP_CMD_MASK	= 0xff,
 };
@@ -120,6 +125,7 @@ enum {
 
 enum mlx5_ib_mmap_type {
 	MLX5_IB_MMAP_TYPE_MEMIC = 1,
+	MLX5_IB_MMAP_TYPE_VAR = 2,
 };
 
 #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)                                        \
@@ -563,6 +569,7 @@ struct mlx5_user_mmap_entry {
 	struct rdma_user_mmap_entry rdma_entry;
 	u8 mmap_flag;
 	u64 address;
+	u32 page_idx;
 };
 
 struct mlx5_ib_dm {
@@ -959,6 +966,15 @@ struct mlx5_devx_event_table {
 	struct xarray event_xa;
 };
 
+struct mlx5_var_table {
+	/* serialize updating the bitmap */
+	struct mutex bitmap_lock;
+	unsigned long *bitmap;
+	u64 hw_start_addr;
+	u32 stride_size;
+	u64 num_var_hw_entries;
+};
+
 struct mlx5_ib_dev {
 	struct ib_device		ib_dev;
 	struct mlx5_core_dev		*mdev;
@@ -1013,6 +1029,7 @@ struct mlx5_ib_dev {
 	struct mlx5_srq_table   srq_table;
 	struct mlx5_async_ctx   async_ctx;
 	struct mlx5_devx_event_table devx_event_table;
+	struct mlx5_var_table var_table;
 
 	struct xarray sig_mrs;
 };
@@ -1276,8 +1293,8 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
 int __init mlx5_ib_odp_init(void);
 void mlx5_ib_odp_cleanup(void);
 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
-			   size_t nentries, struct mlx5_ib_mr *mr, int flags);
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+			   struct mlx5_ib_mr *mr, int flags);
 
 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
 			       enum ib_uverbs_advise_mr_advice advice,
@@ -1293,9 +1310,8 @@ static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
 static inline int mlx5_ib_odp_init(void) { return 0; }
 static inline void mlx5_ib_odp_cleanup(void)				    {}
 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
-static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
-					 size_t nentries, struct mlx5_ib_mr *mr,
-					 int flags) {}
+static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+					 struct mlx5_ib_mr *mr, int flags) {}
 
 static inline int
 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1363,14 +1379,14 @@ int mlx5_ib_fill_res_entry(struct sk_buff *msg,
 int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
 			    struct rdma_restrack_entry *res);
 
+extern const struct uapi_definition mlx5_ib_devx_defs[];
+extern const struct uapi_definition mlx5_ib_flow_defs[];
+
 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev);
 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev);
-const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
-extern const struct uapi_definition mlx5_ib_devx_defs[];
-extern const struct uapi_definition mlx5_ib_flow_defs[];
 struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
 	struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
 	struct mlx5_flow_context *flow_context,
@@ -1378,7 +1394,6 @@ struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
 	void *cmd_in, int inlen, int dest_id, int dest_type);
 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
 bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
-int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
 void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
 #else
 static inline int
@@ -1507,7 +1522,7 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
 u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
 
 static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
-				       bool do_modify_atomic)
+				       bool do_modify_atomic, int access_flags)
 {
 	if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
 		return false;
@@ -1517,6 +1532,9 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
 	    MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
 		return false;
 
+	if (access_flags & IB_ACCESS_RELAXED_ORDERING)
+		return false;
+
 	return true;
 }
 
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 44a0ee6..6fa0a83 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -147,7 +147,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
 			break;
 		}
 		mr->order = ent->order;
-		mr->allocated_from_cache = 1;
+		mr->allocated_from_cache = true;
 		mr->dev = dev;
 
 		MLX5_SET(mkc, mkc, free, 1);
@@ -661,12 +661,21 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
 					  struct ib_pd *pd)
 {
+	struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
 	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
 	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
 	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
 	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
 	MLX5_SET(mkc, mkc, lr, 1);
 
+	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+		MLX5_SET(mkc, mkc, relaxed_ordering_write,
+			 !!(acc & IB_ACCESS_RELAXED_ORDERING));
+	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+		MLX5_SET(mkc, mkc, relaxed_ordering_read,
+			 !!(acc & IB_ACCESS_RELAXED_ORDERING));
+
 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
 	MLX5_SET64(mkc, mkc, start_addr, start_addr);
@@ -867,36 +876,6 @@ static struct mlx5_ib_mr *alloc_mr_from_cache(
 	return mr;
 }
 
-static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
-			       void *xlt, int page_shift, size_t size,
-			       int flags)
-{
-	struct mlx5_ib_dev *dev = mr->dev;
-	struct ib_umem *umem = mr->umem;
-
-	if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
-		if (!umr_can_use_indirect_mkey(dev))
-			return -EPERM;
-		mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
-		return npages;
-	}
-
-	npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
-
-	if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
-		__mlx5_ib_populate_pas(dev, umem, page_shift,
-				       idx, npages, xlt,
-				       MLX5_IB_MTT_PRESENT);
-		/* Clear padding after the pages
-		 * brought from the umem.
-		 */
-		memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
-		       size - npages * sizeof(struct mlx5_mtt));
-	}
-
-	return npages;
-}
-
 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
 			    MLX5_UMR_MTT_ALIGNMENT)
 #define MLX5_SPARE_UMR_CHUNK 0x10000
@@ -920,6 +899,7 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 	size_t pages_mapped = 0;
 	size_t pages_to_map = 0;
 	size_t pages_iter = 0;
+	size_t size_to_map = 0;
 	gfp_t gfp;
 	bool use_emergency_page = false;
 
@@ -966,6 +946,15 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 		goto free_xlt;
 	}
 
+	if (mr->umem->is_odp) {
+		if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
+			struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+			size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
+
+			pages_to_map = min_t(size_t, pages_to_map, max_pages);
+		}
+	}
+
 	sg.addr = dma;
 	sg.lkey = dev->umrc.pd->local_dma_lkey;
 
@@ -988,14 +977,22 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 	     pages_mapped < pages_to_map && !err;
 	     pages_mapped += pages_iter, idx += pages_iter) {
 		npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
+		size_to_map = npages * desc_size;
 		dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
-		npages = populate_xlt(mr, idx, npages, xlt,
-				      page_shift, size, flags);
-
+		if (mr->umem->is_odp) {
+			mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+		} else {
+			__mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx,
+					       npages, xlt,
+					       MLX5_IB_MTT_PRESENT);
+			/* Clear padding after the pages
+			 * brought from the umem.
+			 */
+			memset(xlt + size_to_map, 0, size - size_to_map);
+		}
 		dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
 
-		sg.length = ALIGN(npages * desc_size,
-				  MLX5_UMR_MTT_ALIGNMENT);
+		sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
 
 		if (pages_mapped + pages_iter >= pages_to_map) {
 			if (flags & MLX5_IB_UPD_XLT_ENABLE)
@@ -1074,6 +1071,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
 	MLX5_SET(mkc, mkc, free, !populate);
 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+		MLX5_SET(mkc, mkc, relaxed_ordering_write,
+			 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+		MLX5_SET(mkc, mkc, relaxed_ordering_read,
+			 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
 	MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
 	MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
 	MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
@@ -1264,7 +1267,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	if (err < 0)
 		return ERR_PTR(err);
 
-	use_umr = mlx5_ib_can_use_umr(dev, true);
+	use_umr = mlx5_ib_can_use_umr(dev, true, access_flags);
 
 	if (order <= mr_cache_max_order(dev) && use_umr) {
 		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1431,7 +1434,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 			goto err;
 	}
 
-	if (!mlx5_ib_can_use_umr(dev, true) ||
+	if (!mlx5_ib_can_use_umr(dev, true, access_flags) ||
 	    (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
 		/*
 		 * UMR can't be used - MKey needs to be replaced.
@@ -1452,7 +1455,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 			goto err;
 		}
 
-		mr->allocated_from_cache = 0;
+		mr->allocated_from_cache = false;
 	} else {
 		/*
 		 * Send a UMR WQE
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 0afb004..4216814 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -93,8 +93,8 @@ struct mlx5_pagefault {
 
 static u64 mlx5_imr_ksm_entries;
 
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
-			   struct mlx5_ib_mr *imr, int flags)
+static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+			struct mlx5_ib_mr *imr, int flags)
 {
 	struct mlx5_klm *end = pklm + nentries;
 
@@ -144,6 +144,44 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
 	}
 }
 
+static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
+{
+	u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
+
+	if (umem_dma & ODP_READ_ALLOWED_BIT)
+		mtt_entry |= MLX5_IB_MTT_READ;
+	if (umem_dma & ODP_WRITE_ALLOWED_BIT)
+		mtt_entry |= MLX5_IB_MTT_WRITE;
+
+	return mtt_entry;
+}
+
+static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
+			 struct mlx5_ib_mr *mr, int flags)
+{
+	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+	dma_addr_t pa;
+	size_t i;
+
+	if (flags & MLX5_IB_UPD_XLT_ZAP)
+		return;
+
+	for (i = 0; i < nentries; i++) {
+		pa = odp->dma_list[idx + i];
+		pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+	}
+}
+
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+			   struct mlx5_ib_mr *mr, int flags)
+{
+	if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
+		populate_klm(xlt, idx, nentries, mr, flags);
+	} else {
+		populate_mtt(xlt, idx, nentries, mr, flags);
+	}
+}
+
 static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
 {
 	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
@@ -342,7 +380,7 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 	memset(caps, 0, sizeof(*caps));
 
 	if (!MLX5_CAP_GEN(dev->mdev, pg) ||
-	    !mlx5_ib_can_use_umr(dev, true))
+	    !mlx5_ib_can_use_umr(dev, true, 0))
 		return;
 
 	caps->general_caps = IB_ODP_SUPPORT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ae7cbd9..a4f8e70 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1918,7 +1918,7 @@ static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
 {
 	enum ib_qp_type qpt = init_attr->qp_type;
 	int scqe_sz;
-	bool allow_scat_cqe = 0;
+	bool allow_scat_cqe = false;
 
 	if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
 		return;
@@ -4870,7 +4870,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
 	bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
 	u8 flags = 0;
 
-	if (!mlx5_ib_can_use_umr(dev, atomic)) {
+	if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
 		mlx5_ib_warn(to_mdev(qp->ibqp.device),
 			     "Fast update of %s for MR is disabled\n",
 			     (MLX5_CAP_GEN(dev->mdev,
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index edccfd6..78a48ae 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
 		goto out;
 	}
 
-	ret = get_user_pages_fast(uaddr & PAGE_MASK, 1,
+	ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
 				  FOLL_WRITE | FOLL_LONGTERM, pages);
 	if (ret < 0)
 		goto out;
@@ -482,7 +482,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
 
 	ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
 	if (ret < 0) {
-		put_user_page(pages[0]);
+		unpin_user_page(pages[0]);
 		goto out;
 	}
 
@@ -490,7 +490,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
 				 mthca_uarc_virt(dev, uar, i));
 	if (ret) {
 		pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
-		put_user_page(sg_page(&db_tab->page[i].mem));
+		unpin_user_page(sg_page(&db_tab->page[i].mem));
 		goto out;
 	}
 
@@ -556,7 +556,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
 		if (db_tab->page[i].uvirt) {
 			mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
 			pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
-			put_user_page(sg_page(&db_tab->page[i].mem));
+			unpin_user_page(sg_page(&db_tab->page[i].mem));
 		}
 	}
 
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 920f35e..484b555 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -312,7 +312,18 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
 	}
 	ctx->db_mmap_entry = &entry->rdma_entry;
 
-	uresp.dpm_enabled = dev->user_dpm_enabled;
+	if (!dev->user_dpm_enabled)
+		uresp.dpm_flags = 0;
+	else if (rdma_protocol_iwarp(&dev->ibdev, 1))
+		uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
+	else
+		uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
+				  QEDR_DPM_TYPE_ROCE_LEGACY;
+
+	uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
+	uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
+	uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
+
 	uresp.wids_enabled = 1;
 	uresp.wid_count = oparams.wid_count;
 	uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 6bf764e..342e317 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -40,7 +40,7 @@
 static void __qib_release_user_pages(struct page **p, size_t num_pages,
 				     int dirty)
 {
-	put_user_pages_dirty_lock(p, num_pages, dirty);
+	unpin_user_pages_dirty_lock(p, num_pages, dirty);
 }
 
 /**
@@ -108,7 +108,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
 
 	down_read(&current->mm->mmap_sem);
 	for (got = 0; got < num_pages; got += ret) {
-		ret = get_user_pages(start_page + got * PAGE_SIZE,
+		ret = pin_user_pages(start_page + got * PAGE_SIZE,
 				     num_pages - got,
 				     FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
 				     p + got, NULL);
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 05190ed..a67599b 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -317,7 +317,7 @@ static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
 		 * the caller can ignore this page.
 		 */
 		if (put) {
-			put_user_page(page);
+			unpin_user_page(page);
 		} else {
 			/* coalesce case */
 			kunmap(page);
@@ -631,7 +631,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
 			kunmap(pkt->addr[i].page);
 
 		if (pkt->addr[i].put_page)
-			put_user_page(pkt->addr[i].page);
+			unpin_user_page(pkt->addr[i].page);
 		else
 			__free_page(pkt->addr[i].page);
 	} else if (pkt->addr[i].kvaddr) {
@@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
 		else
 			j = npages;
 
-		ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
+		ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
 		if (ret != j) {
 			i = 0;
 			j = ret;
@@ -706,7 +706,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
 	/* if error, return all pages not managed by pkt */
 free_pages:
 	while (i < j)
-		put_user_page(pages[i++]);
+		unpin_user_page(pages[i++]);
 
 done:
 	return ret;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 62e6ffa..bd9f944 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -75,7 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
 		for_each_sg(chunk->page_list, sg, chunk->nents, i) {
 			page = sg_page(sg);
 			pa = sg_phys(sg);
-			put_user_pages_dirty_lock(&page, 1, dirty);
+			unpin_user_pages_dirty_lock(&page, 1, dirty);
 			usnic_dbg("pa: %pa\n", &pa);
 		}
 		kfree(chunk);
@@ -141,7 +141,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
 	ret = 0;
 
 	while (npages) {
-		ret = get_user_pages(cur_base,
+		ret = pin_user_pages(cur_base,
 				     min_t(unsigned long, npages,
 				     PAGE_SIZE / sizeof(struct page *)),
 				     gup_flags | FOLL_LONGTERM,
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index 890d7b7..977906c 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -195,7 +195,14 @@ void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
 }
 EXPORT_SYMBOL(rvt_get_credit);
 
-/* rvt_restart_sge - rewind the sge state for a wqe */
+/**
+ * rvt_restart_sge - rewind the sge state for a wqe
+ * @ss: the sge state pointer
+ * @wqe: the wqe to rewind
+ * @len: the data length from the start of the wqe in bytes
+ *
+ * Returns the remaining data length.
+ */
 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len)
 {
 	ss->sge = wqe->sg_list[0];
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 353c666..f59616b 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -34,6 +34,8 @@
 #ifndef RXE_PARAM_H
 #define RXE_PARAM_H
 
+#include <uapi/rdma/rdma_user_rxe.h>
+
 static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
 {
 	if (mtu < 256)
@@ -64,7 +66,6 @@ enum rxe_device_param {
 	RXE_PAGE_SIZE_CAP		= 0xfffff000,
 	RXE_MAX_QP			= 0x10000,
 	RXE_MAX_QP_WR			= 0x4000,
-	RXE_MAX_INLINE_DATA		= 400,
 	RXE_DEVICE_CAP_FLAGS		= IB_DEVICE_BAD_PKEY_CNTR
 					| IB_DEVICE_BAD_QKEY_CNTR
 					| IB_DEVICE_AUTO_PATH_MIG
@@ -77,6 +78,10 @@ enum rxe_device_param {
 					| IB_DEVICE_MEM_MGT_EXTENSIONS
 					| IB_DEVICE_ALLOW_USER_UNREG,
 	RXE_MAX_SGE			= 32,
+	RXE_MAX_WQE_SIZE		= sizeof(struct rxe_send_wqe) +
+					  sizeof(struct ib_sge) * RXE_MAX_SGE,
+	RXE_MAX_INLINE_DATA		= RXE_MAX_WQE_SIZE -
+					  sizeof(struct rxe_send_wqe),
 	RXE_MAX_SGE_RD			= 32,
 	RXE_MAX_CQ			= 16384,
 	RXE_MAX_LOG_CQE			= 15,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index e2c6d1c..ec21f61 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -237,19 +237,17 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
 	 */
 	qp->src_port = RXE_ROCE_V2_SPORT +
 		(hash_32_generic(qp_num(qp), 14) & 0x3fff);
-
 	qp->sq.max_wr		= init->cap.max_send_wr;
-	qp->sq.max_sge		= init->cap.max_send_sge;
-	qp->sq.max_inline	= init->cap.max_inline_data;
 
-	wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
-			 qp->sq.max_sge * sizeof(struct ib_sge),
-			 sizeof(struct rxe_send_wqe) +
-			 qp->sq.max_inline);
+	/* These caps are limited by rxe_qp_chk_cap() done by the caller */
+	wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
+			 init->cap.max_inline_data);
+	qp->sq.max_sge = init->cap.max_send_sge =
+		wqe_size / sizeof(struct ib_sge);
+	qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
+	wqe_size += sizeof(struct rxe_send_wqe);
 
-	qp->sq.queue = rxe_queue_init(rxe,
-				      &qp->sq.max_wr,
-				      wqe_size);
+	qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
 	if (!qp->sq.queue)
 		return -ENOMEM;
 
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 9583420..92de39c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -408,7 +408,7 @@ struct rxe_dev {
 	struct list_head	pending_mmaps;
 
 	spinlock_t		mmap_offset_lock; /* guard mmap_offset */
-	int			mmap_offset;
+	u64			mmap_offset;
 
 	atomic64_t		stats_counters[RXE_NUM_OF_COUNTERS];
 
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index b939f489..af5e9f8 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -7,6 +7,7 @@
 #define _SIW_H
 
 #include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
 #include <linux/socket.h>
 #include <linux/skbuff.h>
 #include <crypto/hash.h>
@@ -209,7 +210,6 @@ struct siw_cq {
 	u32 cq_put;
 	u32 cq_get;
 	u32 num_cqe;
-	bool kernel_verbs;
 	struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
 	u32 id; /* For debugging only */
 };
@@ -254,8 +254,8 @@ struct siw_srq {
 	u32 rq_get;
 	u32 num_rqe; /* max # of wqe's allowed */
 	struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
-	char armed; /* inform user if limit hit */
-	char kernel_verbs; /* '1' if kernel client */
+	bool armed:1; /* inform user if limit hit */
+	bool is_kernel_res:1; /* true if kernel client */
 };
 
 struct siw_qp_attrs {
@@ -418,13 +418,11 @@ struct siw_iwarp_tx {
 };
 
 struct siw_qp {
+	struct ib_qp base_qp;
 	struct siw_device *sdev;
-	struct ib_qp *ib_qp;
 	struct kref ref;
-	u32 qp_num;
 	struct list_head devq;
 	int tx_cpu;
-	bool kernel_verbs;
 	struct siw_qp_attrs attrs;
 
 	struct siw_cep *cep;
@@ -472,11 +470,6 @@ struct siw_qp {
 	struct rcu_head rcu;
 };
 
-struct siw_base_qp {
-	struct ib_qp base_qp;
-	struct siw_qp *qp;
-};
-
 /* helper macros */
 #define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
 #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
@@ -572,14 +565,9 @@ static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
 	return container_of(base_ctx, struct siw_ucontext, base_ucontext);
 }
 
-static inline struct siw_base_qp *to_siw_base_qp(struct ib_qp *base_qp)
-{
-	return container_of(base_qp, struct siw_base_qp, base_qp);
-}
-
 static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
 {
-	return to_siw_base_qp(base_qp)->qp;
+	return container_of(base_qp, struct siw_qp, base_qp);
 }
 
 static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
@@ -624,7 +612,7 @@ static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
 
 static inline u32 qp_id(struct siw_qp *qp)
 {
-	return qp->qp_num;
+	return qp->base_qp.qp_num;
 }
 
 static inline void siw_qp_get(struct siw_qp *qp)
@@ -735,7 +723,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
 		  "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
 
 #define siw_dbg_cep(cep, fmt, ...)                                             \
-	ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt,                  \
+	ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt,                 \
 		  cep, __func__, ##__VA_ARGS__)
 
 void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 3bccfef4..0c3f058 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -29,7 +29,7 @@
  * MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
  */
 static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
-static const bool relaxed_ird_negotiation = 1;
+static const bool relaxed_ird_negotiation = true;
 
 static void siw_cm_llp_state_change(struct sock *s);
 static void siw_cm_llp_data_ready(struct sock *s);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index d8db3be..d68e378 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -65,7 +65,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
 		 * reaped here, which do not hold a QP reference
 		 * and do not qualify for memory extension verbs.
 		 */
-		if (likely(cq->kernel_verbs)) {
+		if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
 			if (cqe->flags & SIW_WQE_REM_INVAL) {
 				wc->ex.invalidate_rkey = cqe->inval_stag;
 				wc->wc_flags = IB_WC_WITH_INVALIDATE;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index c147f06..96ed349 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -244,7 +244,7 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
 		 * siw_qp_id2obj() increments object reference count
 		 */
 		siw_qp_put(qp);
-		return qp->ib_qp;
+		return &qp->base_qp;
 	}
 	return NULL;
 }
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index e99983f..e2061dc 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -63,7 +63,7 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
 static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
 			   bool dirty)
 {
-	put_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
+	unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
 }
 
 void siw_umem_release(struct siw_umem *umem, bool dirty)
@@ -426,7 +426,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
 		while (nents) {
 			struct page **plist = &umem->page_chunk[i].plist[got];
 
-			rv = get_user_pages(first_page_va, nents,
+			rv = pin_user_pages(first_page_va, nents,
 					    foll_flags | FOLL_LONGTERM,
 					    plist, NULL);
 			if (rv < 0)
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index b431748..875d36d 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1070,8 +1070,8 @@ int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
 			cqe->imm_data = 0;
 			cqe->bytes = bytes;
 
-			if (cq->kernel_verbs)
-				cqe->base_qp = qp->ib_qp;
+			if (rdma_is_kernel_res(&cq->base_cq.res))
+				cqe->base_qp = &qp->base_qp;
 			else
 				cqe->qp_id = qp_id(qp);
 
@@ -1128,8 +1128,8 @@ int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
 			cqe->imm_data = 0;
 			cqe->bytes = bytes;
 
-			if (cq->kernel_verbs) {
-				cqe->base_qp = qp->ib_qp;
+			if (rdma_is_kernel_res(&cq->base_cq.res)) {
+				cqe->base_qp = &qp->base_qp;
 				if (inval_stag) {
 					cqe_flags |= SIW_WQE_REM_INVAL;
 					cqe->inval_stag = inval_stag;
@@ -1297,13 +1297,12 @@ void siw_rq_flush(struct siw_qp *qp)
 
 int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
 {
-	int rv = xa_alloc(&sdev->qp_xa, &qp->ib_qp->qp_num, qp, xa_limit_32b,
+	int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
 			  GFP_KERNEL);
 
 	if (!rv) {
 		kref_init(&qp->ref);
 		qp->sdev = sdev;
-		qp->qp_num = qp->ib_qp->qp_num;
 		siw_dbg_qp(qp, "new QP\n");
 	}
 	return rv;
@@ -1312,7 +1311,6 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
 void siw_free_qp(struct kref *ref)
 {
 	struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
-	struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
 	struct siw_device *sdev = qp->sdev;
 	unsigned long flags;
 
@@ -1335,5 +1333,4 @@ void siw_free_qp(struct kref *ref)
 	atomic_dec(&sdev->num_qp);
 	siw_dbg_qp(qp, "free QP\n");
 	kfree_rcu(qp, rcu);
-	kfree(siw_base_qp);
 }
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index c0a8872..9ccce29 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -68,7 +68,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
 			return -EFAULT;
 		}
 		if (srx->mpa_crc_hd) {
-			if (rx_qp(srx)->kernel_verbs) {
+			if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) {
 				crypto_shash_update(srx->mpa_crc_hd,
 					(u8 *)(dest + pg_off), bytes);
 				kunmap_atomic(dest);
@@ -388,7 +388,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
 				struct siw_rqe *rqe2 = &srq->recvq[off];
 
 				if (!(rqe2->flags & SIW_WQE_VALID)) {
-					srq->armed = 0;
+					srq->armed = false;
 					srq_event = true;
 				}
 			}
@@ -1264,7 +1264,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
 
 			if (wc_status == SIW_WC_SUCCESS)
 				wc_status = SIW_WC_GENERAL_ERR;
-		} else if (qp->kernel_verbs &&
+		} else if (rdma_is_kernel_res(&qp->base_qp.res) &&
 			   rx_type(wqe) == SIW_OP_READ_LOCAL_INV) {
 			/*
 			 * Handle any STag invalidation request
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 5d97bba..ae92c80 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -817,7 +817,7 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
 			}
 		} else {
 			wqe->bytes = wqe->sqe.sge[0].length;
-			if (!qp->kernel_verbs) {
+			if (!rdma_is_kernel_res(&qp->base_qp.res)) {
 				if (wqe->bytes > SIW_MAX_INLINE) {
 					rv = -EINVAL;
 					goto tx_error;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 5fd6d64..07e3013 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -303,7 +303,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
 			    struct ib_udata *udata)
 {
 	struct siw_qp *qp = NULL;
-	struct siw_base_qp *siw_base_qp = NULL;
 	struct ib_device *base_dev = pd->device;
 	struct siw_device *sdev = to_siw_dev(base_dev);
 	struct siw_ucontext *uctx =
@@ -357,26 +356,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
 		rv = -EINVAL;
 		goto err_out;
 	}
-	siw_base_qp = kzalloc(sizeof(*siw_base_qp), GFP_KERNEL);
-	if (!siw_base_qp) {
-		rv = -ENOMEM;
-		goto err_out;
-	}
 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 	if (!qp) {
 		rv = -ENOMEM;
 		goto err_out;
 	}
-	siw_base_qp->qp = qp;
-	qp->ib_qp = &siw_base_qp->base_qp;
-
 	init_rwsem(&qp->state_lock);
 	spin_lock_init(&qp->sq_lock);
 	spin_lock_init(&qp->rq_lock);
 	spin_lock_init(&qp->orq_lock);
 
-	qp->kernel_verbs = !udata;
-
 	rv = siw_qp_add(sdev, qp);
 	if (rv)
 		goto err_out;
@@ -389,10 +378,10 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
 	num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
 	num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
 
-	if (qp->kernel_verbs)
-		qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
-	else
+	if (udata)
 		qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
+	else
+		qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
 
 	if (qp->sendq == NULL) {
 		siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
@@ -419,13 +408,14 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
 		 */
 		qp->srq = to_siw_srq(attrs->srq);
 		qp->attrs.rq_size = 0;
-		siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
+		siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
+			qp->base_qp.qp_num);
 	} else if (num_rqe) {
-		if (qp->kernel_verbs)
-			qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
-		else
+		if (udata)
 			qp->recvq =
 				vmalloc_user(num_rqe * sizeof(struct siw_rqe));
+		else
+			qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
 
 		if (qp->recvq == NULL) {
 			siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
@@ -492,13 +482,11 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
 	list_add_tail(&qp->devq, &sdev->qp_list);
 	spin_unlock_irqrestore(&sdev->lock, flags);
 
-	return qp->ib_qp;
+	return &qp->base_qp;
 
 err_out_xa:
 	xa_erase(&sdev->qp_xa, qp_id(qp));
 err_out:
-	kfree(siw_base_qp);
-
 	if (qp) {
 		if (uctx) {
 			rdma_user_mmap_entry_remove(qp->sq_entry);
@@ -742,7 +730,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
 	unsigned long flags;
 	int rv = 0;
 
-	if (wr && !qp->kernel_verbs) {
+	if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
 		siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
 		*bad_wr = wr;
 		return -EINVAL;
@@ -939,7 +927,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
 	if (rv <= 0)
 		goto skip_direct_sending;
 
-	if (qp->kernel_verbs) {
+	if (rdma_is_kernel_res(&qp->base_qp.res)) {
 		rv = siw_sq_start(qp);
 	} else {
 		qp->tx_ctx.in_syscall = 1;
@@ -984,8 +972,8 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
 		*bad_wr = wr;
 		return -EOPNOTSUPP; /* what else from errno.h? */
 	}
-	if (!qp->kernel_verbs) {
-		siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+	if (!rdma_is_kernel_res(&qp->base_qp.res)) {
+		siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
 		*bad_wr = wr;
 		return -EINVAL;
 	}
@@ -1127,14 +1115,13 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
 	cq->base_cq.cqe = size;
 	cq->num_cqe = size;
 
-	if (!udata) {
-		cq->kernel_verbs = 1;
-		cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
-				    sizeof(struct siw_cq_ctrl));
-	} else {
+	if (udata)
 		cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
 					 sizeof(struct siw_cq_ctrl));
-	}
+	else
+		cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
+				    sizeof(struct siw_cq_ctrl));
+
 	if (cq->queue == NULL) {
 		rv = -ENOMEM;
 		goto err_out;
@@ -1589,9 +1576,9 @@ int siw_create_srq(struct ib_srq *base_srq,
 	srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
 	srq->limit = attrs->srq_limit;
 	if (srq->limit)
-		srq->armed = 1;
+		srq->armed = true;
 
-	srq->kernel_verbs = !udata;
+	srq->is_kernel_res = !udata;
 
 	if (udata)
 		srq->recvq =
@@ -1671,9 +1658,9 @@ int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
 				rv = -EINVAL;
 				goto out;
 			}
-			srq->armed = 1;
+			srq->armed = true;
 		} else {
-			srq->armed = 0;
+			srq->armed = false;
 		}
 		srq->limit = attrs->srq_limit;
 	}
@@ -1745,7 +1732,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
 	unsigned long flags;
 	int rv = 0;
 
-	if (unlikely(!srq->kernel_verbs)) {
+	if (unlikely(!srq->is_kernel_res)) {
 		siw_dbg_pd(base_srq->pd,
 			   "[SRQ]: no kernel post_recv for mapped srq\n");
 		rv = -EINVAL;
@@ -1797,7 +1784,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
 void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
 {
 	struct ib_event event;
-	struct ib_qp *base_qp = qp->ib_qp;
+	struct ib_qp *base_qp = &qp->base_qp;
 
 	/*
 	 * Do not report asynchronous errors on QP which gets
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 0f74dc6..7a8f24d 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -527,7 +527,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
 		if (unlikely(err))
 			goto err_reg;
 
-		desc->sig_protected = 1;
+		desc->sig_protected = true;
 	}
 
 	return 0;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 1f4a37a..127887c 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1093,7 +1093,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
 	int ret;
 
 	if (desc && desc->sig_protected) {
-		desc->sig_protected = 0;
+		desc->sig_protected = false;
 		ret = ib_check_mr_status(desc->rsc.sig_mr,
 					 IB_MR_CHECK_SIG_STATUS, &mr_status);
 		if (ret) {
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
index e4c9bf2..4480092 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -358,7 +358,7 @@ struct opa_veswport_summary_counters {
  * @rx_drop_state: received packets in non-forwarding port state
  * @rx_logic: other receive errors
  *
- * All the above are counters of corresponding erorr conditions.
+ * All the above are counters of corresponding error conditions.
  */
 struct opa_veswport_error_counters {
 	__be16  vp_instance;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b7f7a5f..cd1181c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2546,7 +2546,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
 	if (lrsp->opcode == SRP_LOGIN_RSP) {
 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
-		ch->use_imm_data  = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
+		ch->use_imm_data  = srp_use_imm_data &&
+			(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
 		ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
 						      ch->use_imm_data,
 						      target->max_it_iu_size);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 23c782e..9855274 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2810,8 +2810,6 @@ static void srpt_queue_response(struct se_cmd *cmd)
 	int resp_len, ret, i;
 	u8 srp_tm_status;
 
-	BUG_ON(!ch);
-
 	state = ioctx->state;
 	switch (state) {
 	case SRPT_STATE_NEW:
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ee6c323..fce43e6 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1216,13 +1216,12 @@ static int input_proc_devices_open(struct inode *inode, struct file *file)
 	return seq_open(file, &input_devices_seq_ops);
 }
 
-static const struct file_operations input_devices_fileops = {
-	.owner		= THIS_MODULE,
-	.open		= input_proc_devices_open,
-	.poll		= input_proc_devices_poll,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops input_devices_proc_ops = {
+	.proc_open	= input_proc_devices_open,
+	.proc_poll	= input_proc_devices_poll,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
@@ -1280,12 +1279,11 @@ static int input_proc_handlers_open(struct inode *inode, struct file *file)
 	return seq_open(file, &input_handlers_seq_ops);
 }
 
-static const struct file_operations input_handlers_fileops = {
-	.owner		= THIS_MODULE,
-	.open		= input_proc_handlers_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops input_handlers_proc_ops = {
+	.proc_open	= input_proc_handlers_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 static int __init input_proc_init(void)
@@ -1297,12 +1295,12 @@ static int __init input_proc_init(void)
 		return -ENOMEM;
 
 	entry = proc_create("devices", 0, proc_bus_input_dir,
-			    &input_devices_fileops);
+			    &input_devices_proc_ops);
 	if (!entry)
 		goto fail1;
 
 	entry = proc_create("handlers", 0, proc_bus_input_dir,
-			    &input_handlers_fileops);
+			    &input_handlers_proc_ops);
 	if (!entry)
 		goto fail2;
 
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 17c1cca..c8f87df9 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -191,9 +191,10 @@ static ssize_t axp20x_store_attr_shutdown(struct device *dev,
 				 axp20x_pek->info->shutdown_mask, buf, count);
 }
 
-DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup, axp20x_store_attr_startup);
-DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown,
-	    axp20x_store_attr_shutdown);
+static DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup,
+		   axp20x_store_attr_startup);
+static DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown,
+		   axp20x_store_attr_shutdown);
 
 static struct attribute *axp20x_attrs[] = {
 	&dev_attr_startup.attr,
@@ -279,8 +280,7 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
 		return error;
 	}
 
-	if (axp20x_pek->axp20x->variant == AXP288_ID)
-		enable_irq_wake(axp20x_pek->irq_dbr);
+	device_init_wakeup(&pdev->dev, true);
 
 	return 0;
 }
@@ -352,6 +352,40 @@ static int axp20x_pek_probe(struct platform_device *pdev)
 	return 0;
 }
 
+static int __maybe_unused axp20x_pek_suspend(struct device *dev)
+{
+	struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+	/*
+	 * As nested threaded IRQs are not automatically disabled during
+	 * suspend, we must explicitly disable non-wakeup IRQs.
+	 */
+	if (device_may_wakeup(dev)) {
+		enable_irq_wake(axp20x_pek->irq_dbf);
+		enable_irq_wake(axp20x_pek->irq_dbr);
+	} else {
+		disable_irq(axp20x_pek->irq_dbf);
+		disable_irq(axp20x_pek->irq_dbr);
+	}
+
+	return 0;
+}
+
+static int __maybe_unused axp20x_pek_resume(struct device *dev)
+{
+	struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev)) {
+		disable_irq_wake(axp20x_pek->irq_dbf);
+		disable_irq_wake(axp20x_pek->irq_dbr);
+	} else {
+		enable_irq(axp20x_pek->irq_dbf);
+		enable_irq(axp20x_pek->irq_dbr);
+	}
+
+	return 0;
+}
+
 static int __maybe_unused axp20x_pek_resume_noirq(struct device *dev)
 {
 	struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
@@ -371,6 +405,7 @@ static int __maybe_unused axp20x_pek_resume_noirq(struct device *dev)
 }
 
 static const struct dev_pm_ops axp20x_pek_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(axp20x_pek_suspend, axp20x_pek_resume)
 #ifdef CONFIG_PM_SLEEP
 	.resume_noirq = axp20x_pek_resume_noirq,
 #endif
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index bbf9ae9..6adea8a 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -412,6 +412,10 @@ struct f11_2d_sensor_queries {
 
 /* Defs for Ctrl0. */
 #define RMI_F11_REPORT_MODE_MASK        0x07
+#define RMI_F11_REPORT_MODE_CONTINUOUS  (0 << 0)
+#define RMI_F11_REPORT_MODE_REDUCED     (1 << 0)
+#define RMI_F11_REPORT_MODE_FS_CHANGE   (2 << 0)
+#define RMI_F11_REPORT_MODE_FP_CHANGE   (3 << 0)
 #define RMI_F11_ABS_POS_FILT            (1 << 3)
 #define RMI_F11_REL_POS_FILT            (1 << 4)
 #define RMI_F11_REL_BALLISTICS          (1 << 5)
@@ -1195,6 +1199,16 @@ static int rmi_f11_initialize(struct rmi_function *fn)
 		ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
 			sensor->axis_align.delta_y_threshold;
 
+	/*
+	 * If distance threshold values are set, switch to reduced reporting
+	 * mode so they actually get used by the controller.
+	 */
+	if (ctrl->ctrl0_11[RMI_F11_DELTA_X_THRESHOLD] ||
+	    ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD]) {
+		ctrl->ctrl0_11[0] &= ~RMI_F11_REPORT_MODE_MASK;
+		ctrl->ctrl0_11[0] |= RMI_F11_REPORT_MODE_REDUCED;
+	}
+
 	if (f11->sens_query.has_dribble) {
 		switch (sensor->dribble) {
 		case RMI_REG_STATE_OFF:
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index f3e18f8..373a164 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -165,6 +165,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called maceps2.
 
+config SERIO_SGI_IOC3
+	tristate "SGI IOC3 PS/2 controller"
+	depends on SGI_MFD_IOC3
+	help
+	  Say Y here if you have an SGI Onyx2, SGI Octane or IOC3 PCI card
+	  and you want to attach and use a keyboard, mouse, or both.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ioc3kbd.
+
 config SERIO_LIBPS2
 	tristate "PS/2 driver library"
 	depends on SERIO_I8042 || SERIO_I8042=n
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 67950a5cc..6d97bad 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_SERIO_PCIPS2)	+= pcips2.o
 obj-$(CONFIG_SERIO_PS2MULT)	+= ps2mult.o
 obj-$(CONFIG_SERIO_MACEPS2)	+= maceps2.o
+obj-$(CONFIG_SERIO_SGI_IOC3)	+= ioc3kbd.o
 obj-$(CONFIG_SERIO_LIBPS2)	+= libps2.o
 obj-$(CONFIG_SERIO_RAW)		+= serio_raw.o
 obj-$(CONFIG_SERIO_AMS_DELTA)	+= ams_delta_serio.o
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index f290d5d..594ac4e 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -51,7 +51,7 @@ struct apbps2_regs {
 
 struct apbps2_priv {
 	struct serio		*io;
-	struct apbps2_regs	*regs;
+	struct apbps2_regs	__iomem *regs;
 };
 
 static int apbps2_idx;
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index e486a8a..df4e9f6 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -259,6 +259,8 @@ static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev)
 	u32 proto_status;
 	int error;
 
+	reinit_completion(&kbd_dev->wait_event);
+
 	request = &kbd_dev->protocol_req;
 	memset(request, 0, sizeof(struct synth_kbd_protocol_request));
 	request->header.type = __cpu_to_le32(SYNTH_KBD_PROTOCOL_REQUEST);
@@ -380,6 +382,29 @@ static int hv_kbd_remove(struct hv_device *hv_dev)
 	return 0;
 }
 
+static int hv_kbd_suspend(struct hv_device *hv_dev)
+{
+	vmbus_close(hv_dev->channel);
+
+	return 0;
+}
+
+static int hv_kbd_resume(struct hv_device *hv_dev)
+{
+	int ret;
+
+	ret = vmbus_open(hv_dev->channel,
+			 KBD_VSC_SEND_RING_BUFFER_SIZE,
+			 KBD_VSC_RECV_RING_BUFFER_SIZE,
+			 NULL, 0,
+			 hv_kbd_on_channel_callback,
+			 hv_dev);
+	if (ret == 0)
+		ret = hv_kbd_connect_to_vsp(hv_dev);
+
+	return ret;
+}
+
 static const struct hv_vmbus_device_id id_table[] = {
 	/* Keyboard guid */
 	{ HV_KBD_GUID, },
@@ -393,6 +418,8 @@ static struct  hv_driver hv_kbd_drv = {
 	.id_table = id_table,
 	.probe = hv_kbd_probe,
 	.remove = hv_kbd_remove,
+	.suspend = hv_kbd_suspend,
+	.resume = hv_kbd_resume,
 	.driver = {
 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c
new file mode 100644
index 0000000..d51bfe9
--- /dev/null
+++ b/drivers/input/serio/ioc3kbd.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 PS/2 controller driver for linux
+ *
+ * Copyright (C) 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * Based on code Copyright (C) 2005 Stanislaw Skowronek <skylark@unaligned.org>
+ *               Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/serio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/sn/ioc3.h>
+
+struct ioc3kbd_data {
+	struct ioc3_serioregs __iomem *regs;
+	struct serio *kbd, *aux;
+	bool kbd_exists, aux_exists;
+	int irq;
+};
+
+static int ioc3kbd_wait(struct ioc3_serioregs __iomem *regs, u32 mask)
+{
+	unsigned long timeout = 0;
+
+	while ((readl(&regs->km_csr) & mask) && (timeout < 250)) {
+		udelay(50);
+		timeout++;
+	}
+	return (timeout >= 250) ? -ETIMEDOUT : 0;
+}
+
+static int ioc3kbd_write(struct serio *dev, u8 val)
+{
+	struct ioc3kbd_data *d = dev->port_data;
+	int ret;
+
+	ret = ioc3kbd_wait(d->regs, KM_CSR_K_WRT_PEND);
+	if (ret)
+		return ret;
+
+	writel(val, &d->regs->k_wd);
+
+	return 0;
+}
+
+static int ioc3kbd_start(struct serio *dev)
+{
+	struct ioc3kbd_data *d = dev->port_data;
+
+	d->kbd_exists = true;
+	return 0;
+}
+
+static void ioc3kbd_stop(struct serio *dev)
+{
+	struct ioc3kbd_data *d = dev->port_data;
+
+	d->kbd_exists = false;
+}
+
+static int ioc3aux_write(struct serio *dev, u8 val)
+{
+	struct ioc3kbd_data *d = dev->port_data;
+	int ret;
+
+	ret = ioc3kbd_wait(d->regs, KM_CSR_M_WRT_PEND);
+	if (ret)
+		return ret;
+
+	writel(val, &d->regs->m_wd);
+
+	return 0;
+}
+
+static int ioc3aux_start(struct serio *dev)
+{
+	struct ioc3kbd_data *d = dev->port_data;
+
+	d->aux_exists = true;
+	return 0;
+}
+
+static void ioc3aux_stop(struct serio *dev)
+{
+	struct ioc3kbd_data *d = dev->port_data;
+
+	d->aux_exists = false;
+}
+
+static void ioc3kbd_process_data(struct serio *dev, u32 data)
+{
+	if (data & KM_RD_VALID_0)
+		serio_interrupt(dev, (data >> KM_RD_DATA_0_SHIFT) & 0xff, 0);
+	if (data & KM_RD_VALID_1)
+		serio_interrupt(dev, (data >> KM_RD_DATA_1_SHIFT) & 0xff, 0);
+	if (data & KM_RD_VALID_2)
+		serio_interrupt(dev, (data >> KM_RD_DATA_2_SHIFT) & 0xff, 0);
+}
+
+static irqreturn_t ioc3kbd_intr(int itq, void *dev_id)
+{
+	struct ioc3kbd_data *d = dev_id;
+	u32 data_k, data_m;
+
+	data_k = readl(&d->regs->k_rd);
+	if (d->kbd_exists)
+		ioc3kbd_process_data(d->kbd, data_k);
+
+	data_m = readl(&d->regs->m_rd);
+	if (d->aux_exists)
+		ioc3kbd_process_data(d->aux, data_m);
+
+	return IRQ_HANDLED;
+}
+
+static int ioc3kbd_probe(struct platform_device *pdev)
+{
+	struct ioc3_serioregs __iomem *regs;
+	struct device *dev = &pdev->dev;
+	struct ioc3kbd_data *d;
+	struct serio *sk, *sa;
+	int irq, ret;
+
+	regs = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return -ENXIO;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	sk = kzalloc(sizeof(*sk), GFP_KERNEL);
+	if (!sk)
+		return -ENOMEM;
+
+	sa = kzalloc(sizeof(*sa), GFP_KERNEL);
+	if (!sa) {
+		kfree(sk);
+		return -ENOMEM;
+	}
+
+	sk->id.type = SERIO_8042;
+	sk->write = ioc3kbd_write;
+	sk->start = ioc3kbd_start;
+	sk->stop = ioc3kbd_stop;
+	snprintf(sk->name, sizeof(sk->name), "IOC3 keyboard %d", pdev->id);
+	snprintf(sk->phys, sizeof(sk->phys), "ioc3/serio%dkbd", pdev->id);
+	sk->port_data = d;
+	sk->dev.parent = dev;
+
+	sa->id.type = SERIO_8042;
+	sa->write = ioc3aux_write;
+	sa->start = ioc3aux_start;
+	sa->stop = ioc3aux_stop;
+	snprintf(sa->name, sizeof(sa->name), "IOC3 auxiliary %d", pdev->id);
+	snprintf(sa->phys, sizeof(sa->phys), "ioc3/serio%daux", pdev->id);
+	sa->port_data = d;
+	sa->dev.parent = dev;
+
+	d->regs = regs;
+	d->kbd = sk;
+	d->aux = sa;
+	d->irq = irq;
+
+	platform_set_drvdata(pdev, d);
+	serio_register_port(d->kbd);
+	serio_register_port(d->aux);
+
+	ret = request_irq(irq, ioc3kbd_intr, IRQF_SHARED, "ioc3-kbd", d);
+	if (ret) {
+		dev_err(dev, "could not request IRQ %d\n", irq);
+		serio_unregister_port(d->kbd);
+		serio_unregister_port(d->aux);
+		return ret;
+	}
+
+	/* enable ports */
+	writel(KM_CSR_K_CLAMP_3 | KM_CSR_M_CLAMP_3, &regs->km_csr);
+
+	return 0;
+}
+
+static int ioc3kbd_remove(struct platform_device *pdev)
+{
+	struct ioc3kbd_data *d = platform_get_drvdata(pdev);
+
+	free_irq(d->irq, d);
+
+	serio_unregister_port(d->kbd);
+	serio_unregister_port(d->aux);
+
+	return 0;
+}
+
+static struct platform_driver ioc3kbd_driver = {
+	.probe          = ioc3kbd_probe,
+	.remove         = ioc3kbd_remove,
+	.driver = {
+		.name = "ioc3-kbd",
+	},
+};
+module_platform_driver(ioc3kbd_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 serio driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 51ddb20..8fd7fc3 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -333,7 +333,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
 		req->xfer[1].len = 2;
 
 		/* for 1uF, settle for 800 usec; no cap, 100 usec.  */
-		req->xfer[1].delay_usecs = ts->vref_delay_usecs;
+		req->xfer[1].delay.value = ts->vref_delay_usecs;
+		req->xfer[1].delay.unit = SPI_DELAY_UNIT_USECS;
 		spi_message_add_tail(&req->xfer[1], &req->msg);
 
 		/* Enable reference voltage */
@@ -1018,7 +1019,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
 	 * have had enough time to stabilize.
 	 */
 	if (pdata->settle_delay_usecs) {
-		x->delay_usecs = pdata->settle_delay_usecs;
+		x->delay.value = pdata->settle_delay_usecs;
+		x->delay.unit = SPI_DELAY_UNIT_USECS;
 
 		x++;
 		x->tx_buf = &packet->read_y;
@@ -1061,7 +1063,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
 
 	/* ... maybe discard first sample ... */
 	if (pdata->settle_delay_usecs) {
-		x->delay_usecs = pdata->settle_delay_usecs;
+		x->delay.value = pdata->settle_delay_usecs;
+		x->delay.unit = SPI_DELAY_UNIT_USECS;
 
 		x++;
 		x->tx_buf = &packet->read_x;
@@ -1094,7 +1097,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
 
 		/* ... maybe discard first sample ... */
 		if (pdata->settle_delay_usecs) {
-			x->delay_usecs = pdata->settle_delay_usecs;
+			x->delay.value = pdata->settle_delay_usecs;
+			x->delay.unit = SPI_DELAY_UNIT_USECS;
 
 			x++;
 			x->tx_buf = &packet->read_z1;
@@ -1125,7 +1129,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
 
 		/* ... maybe discard first sample ... */
 		if (pdata->settle_delay_usecs) {
-			x->delay_usecs = pdata->settle_delay_usecs;
+			x->delay.value = pdata->settle_delay_usecs;
+			x->delay.unit = SPI_DELAY_UNIT_USECS;
 
 			x++;
 			x->tx_buf = &packet->read_z2;
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index d61731c..d258772 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -13,22 +13,23 @@
  *    http://www.glyn.com/Products/Displays
  */
 
-#include <linux/module.h>
-#include <linux/ratelimit.h>
-#include <linux/irq.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/gpio/consumer.h>
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
-#include <asm/unaligned.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
 #include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/unaligned.h>
 
 #define WORK_REGISTER_THRESHOLD		0x00
 #define WORK_REGISTER_REPORT_RATE	0x08
@@ -1050,6 +1051,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
 {
 	const struct edt_i2c_chip_data *chip_data;
 	struct edt_ft5x06_ts_data *tsdata;
+	u8 buf[2] = { 0xfc, 0x00 };
 	struct input_dev *input;
 	unsigned long irq_flags;
 	int error;
@@ -1140,6 +1142,12 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
 		return error;
 	}
 
+	/*
+	 * Dummy read access. EP0700MLP1 returns bogus data on the first
+	 * register read access and ignores writes.
+	 */
+	edt_ft5x06_ts_readwrite(tsdata->client, 2, buf, 2, buf);
+
 	edt_ft5x06_ts_set_regs(tsdata);
 	edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
 	edt_ft5x06_ts_get_parameters(tsdata);
@@ -1200,7 +1208,6 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
 		return error;
 
 	edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
-	device_init_wakeup(&client->dev, 1);
 
 	dev_dbg(&client->dev,
 		"EDT FT5x06 initialized: IRQ %d, WAKE pin %d, Reset pin %d.\n",
@@ -1220,29 +1227,6 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
 	return 0;
 }
 
-static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-
-	if (device_may_wakeup(dev))
-		enable_irq_wake(client->irq);
-
-	return 0;
-}
-
-static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-
-	if (device_may_wakeup(dev))
-		disable_irq_wake(client->irq);
-
-	return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
-			 edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
-
 static const struct edt_i2c_chip_data edt_ft5x06_data = {
 	.max_support_points = 5,
 };
@@ -1281,7 +1265,6 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
 	.driver = {
 		.name = "edt_ft5x06",
 		.of_match_table = edt_ft5x06_of_match,
-		.pm = &edt_ft5x06_ts_pm_ops,
 	},
 	.id_table = edt_ft5x06_ts_id,
 	.probe    = edt_ft5x06_ts_probe,
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index d4ad24e..49117996 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -59,8 +59,10 @@
 #define CMD_HEADER_WRITE	0x54
 #define CMD_HEADER_READ		0x53
 #define CMD_HEADER_6B_READ	0x5B
+#define CMD_HEADER_ROM_READ	0x96
 #define CMD_HEADER_RESP		0x52
 #define CMD_HEADER_6B_RESP	0x9B
+#define CMD_HEADER_ROM_RESP	0x95
 #define CMD_HEADER_HELLO	0x55
 #define CMD_HEADER_REK		0x66
 
@@ -200,6 +202,10 @@ static int elants_i2c_execute_command(struct i2c_client *client,
 		expected_response = CMD_HEADER_6B_RESP;
 		break;
 
+	case CMD_HEADER_ROM_READ:
+		expected_response = CMD_HEADER_ROM_RESP;
+		break;
+
 	default:
 		dev_err(&client->dev, "%s: invalid command %*ph\n",
 			__func__, (int)cmd_size, cmd);
@@ -556,6 +562,8 @@ static int elants_i2c_initialize(struct elants_data *ts)
 
 	/* hw version is available even if device in recovery state */
 	error2 = elants_i2c_query_hw_version(ts);
+	if (!error2)
+		error2 = elants_i2c_query_bc_version(ts);
 	if (!error)
 		error = error2;
 
@@ -564,8 +572,6 @@ static int elants_i2c_initialize(struct elants_data *ts)
 	if (!error)
 		error = elants_i2c_query_test_version(ts);
 	if (!error)
-		error = elants_i2c_query_bc_version(ts);
-	if (!error)
 		error = elants_i2c_query_ts_info(ts);
 
 	if (error)
@@ -613,39 +619,94 @@ static int elants_i2c_fw_write_page(struct i2c_client *client,
 	return error;
 }
 
+static int elants_i2c_validate_remark_id(struct elants_data *ts,
+					 const struct firmware *fw)
+{
+	struct i2c_client *client = ts->client;
+	int error;
+	const u8 cmd[] = { CMD_HEADER_ROM_READ, 0x80, 0x1F, 0x00, 0x00, 0x21 };
+	u8 resp[6] = { 0 };
+	u16 ts_remark_id = 0;
+	u16 fw_remark_id = 0;
+
+	/* Compare TS Remark ID and FW Remark ID */
+	error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+					resp, sizeof(resp));
+	if (error) {
+		dev_err(&client->dev, "failed to query Remark ID: %d\n", error);
+		return error;
+	}
+
+	ts_remark_id = get_unaligned_be16(&resp[3]);
+
+	fw_remark_id = get_unaligned_le16(&fw->data[fw->size - 4]);
+
+	if (fw_remark_id != ts_remark_id) {
+		dev_err(&client->dev,
+			"Remark ID Mismatched: ts_remark_id=0x%04x, fw_remark_id=0x%04x.\n",
+			ts_remark_id, fw_remark_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int elants_i2c_do_update_firmware(struct i2c_client *client,
 					 const struct firmware *fw,
 					 bool force)
 {
+	struct elants_data *ts = i2c_get_clientdata(client);
 	const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
 	const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
 	const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
-	const u8 close_idle[] = {0x54, 0x2c, 0x01, 0x01};
+	const u8 close_idle[] = { 0x54, 0x2c, 0x01, 0x01 };
 	u8 buf[HEADER_SIZE];
 	u16 send_id;
 	int page, n_fw_pages;
 	int error;
+	bool check_remark_id = ts->iap_version >= 0x60;
 
 	/* Recovery mode detection! */
 	if (force) {
 		dev_dbg(&client->dev, "Recovery mode procedure\n");
+
+		if (check_remark_id) {
+			error = elants_i2c_validate_remark_id(ts, fw);
+			if (error)
+				return error;
+		}
+
 		error = elants_i2c_send(client, enter_iap2, sizeof(enter_iap2));
+		if (error) {
+			dev_err(&client->dev, "failed to enter IAP mode: %d\n",
+				error);
+			return error;
+		}
 	} else {
 		/* Start IAP Procedure */
 		dev_dbg(&client->dev, "Normal IAP procedure\n");
+
 		/* Close idle mode */
 		error = elants_i2c_send(client, close_idle, sizeof(close_idle));
 		if (error)
 			dev_err(&client->dev, "Failed close idle: %d\n", error);
 		msleep(60);
+
 		elants_i2c_sw_reset(client);
 		msleep(20);
-		error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
-	}
 
-	if (error) {
-		dev_err(&client->dev, "failed to enter IAP mode: %d\n", error);
-		return error;
+		if (check_remark_id) {
+			error = elants_i2c_validate_remark_id(ts, fw);
+			if (error)
+				return error;
+		}
+
+		error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
+		if (error) {
+			dev_err(&client->dev, "failed to enter IAP mode: %d\n",
+				error);
+			return error;
+		}
 	}
 
 	msleep(20);
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 28f2ab0..725029a 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
+CFLAGS_core.o				:= -I$(src)
 icc-core-objs				:= core.o
 
 obj-$(CONFIG_INTERCONNECT)		+= icc-core.o
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index c498796..f277e46 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -19,45 +19,22 @@
 #include <linux/of.h>
 #include <linux/overflow.h>
 
+#include "internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 static DEFINE_IDR(icc_idr);
 static LIST_HEAD(icc_providers);
 static DEFINE_MUTEX(icc_lock);
 static struct dentry *icc_debugfs_dir;
 
-/**
- * struct icc_req - constraints that are attached to each node
- * @req_node: entry in list of requests for the particular @node
- * @node: the interconnect node to which this constraint applies
- * @dev: reference to the device that sets the constraints
- * @tag: path tag (optional)
- * @avg_bw: an integer describing the average bandwidth in kBps
- * @peak_bw: an integer describing the peak bandwidth in kBps
- */
-struct icc_req {
-	struct hlist_node req_node;
-	struct icc_node *node;
-	struct device *dev;
-	u32 tag;
-	u32 avg_bw;
-	u32 peak_bw;
-};
-
-/**
- * struct icc_path - interconnect path structure
- * @num_nodes: number of hops (nodes)
- * @reqs: array of the requests applicable to this path of nodes
- */
-struct icc_path {
-	size_t num_nodes;
-	struct icc_req reqs[];
-};
-
 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
 {
 	if (!n)
 		return;
 
-	seq_printf(s, "%-30s %12u %12u\n",
+	seq_printf(s, "%-42s %12u %12u\n",
 		   n->name, n->avg_bw, n->peak_bw);
 }
 
@@ -65,8 +42,8 @@ static int icc_summary_show(struct seq_file *s, void *data)
 {
 	struct icc_provider *provider;
 
-	seq_puts(s, " node                                   avg         peak\n");
-	seq_puts(s, "--------------------------------------------------------\n");
+	seq_puts(s, " node                                  tag          avg         peak\n");
+	seq_puts(s, "--------------------------------------------------------------------\n");
 
 	mutex_lock(&icc_lock);
 
@@ -81,8 +58,8 @@ static int icc_summary_show(struct seq_file *s, void *data)
 				if (!r->dev)
 					continue;
 
-				seq_printf(s, "    %-26s %12u %12u\n",
-					   dev_name(r->dev), r->avg_bw,
+				seq_printf(s, "  %-27s %12u %12u %12u\n",
+					   dev_name(r->dev), r->tag, r->avg_bw,
 					   r->peak_bw);
 			}
 		}
@@ -94,6 +71,70 @@ static int icc_summary_show(struct seq_file *s, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(icc_summary);
 
+static void icc_graph_show_link(struct seq_file *s, int level,
+				struct icc_node *n, struct icc_node *m)
+{
+	seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
+		   level == 2 ? "\t\t" : "\t",
+		   n->id, n->name, m->id, m->name);
+}
+
+static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
+{
+	seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
+		   n->id, n->name, n->id, n->name);
+	seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
+	seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
+	seq_puts(s, "\"]\n");
+}
+
+static int icc_graph_show(struct seq_file *s, void *data)
+{
+	struct icc_provider *provider;
+	struct icc_node *n;
+	int cluster_index = 0;
+	int i;
+
+	seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
+	mutex_lock(&icc_lock);
+
+	/* draw providers as cluster subgraphs */
+	cluster_index = 0;
+	list_for_each_entry(provider, &icc_providers, provider_list) {
+		seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
+		if (provider->dev)
+			seq_printf(s, "\t\tlabel = \"%s\"\n",
+				   dev_name(provider->dev));
+
+		/* draw nodes */
+		list_for_each_entry(n, &provider->nodes, node_list)
+			icc_graph_show_node(s, n);
+
+		/* draw internal links */
+		list_for_each_entry(n, &provider->nodes, node_list)
+			for (i = 0; i < n->num_links; ++i)
+				if (n->provider == n->links[i]->provider)
+					icc_graph_show_link(s, 2, n,
+							    n->links[i]);
+
+		seq_puts(s, "\t}\n");
+	}
+
+	/* draw external links */
+	list_for_each_entry(provider, &icc_providers, provider_list)
+		list_for_each_entry(n, &provider->nodes, node_list)
+			for (i = 0; i < n->num_links; ++i)
+				if (n->provider != n->links[i]->provider)
+					icc_graph_show_link(s, 1, n,
+							    n->links[i]);
+
+	mutex_unlock(&icc_lock);
+	seq_puts(s, "}");
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(icc_graph);
+
 static struct icc_node *node_find(const int id)
 {
 	return idr_find(&icc_idr, id);
@@ -244,6 +285,16 @@ static int apply_constraints(struct icc_path *path)
 	return ret;
 }
 
+int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+		      u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+	*agg_avg += avg_bw;
+	*agg_peak = max(*agg_peak, peak_bw);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(icc_std_aggregate);
+
 /* of_icc_xlate_onecell() - Translate function using a single index.
  * @spec: OF phandle args to map into an interconnect node.
  * @data: private data (pointer to struct icc_onecell_data)
@@ -382,9 +433,17 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
 
 	mutex_lock(&icc_lock);
 	path = path_find(dev, src_node, dst_node);
-	if (IS_ERR(path))
-		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
 	mutex_unlock(&icc_lock);
+	if (IS_ERR(path)) {
+		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+		return path;
+	}
+
+	if (name)
+		path->name = kstrdup_const(name, GFP_KERNEL);
+	else
+		path->name = kasprintf(GFP_KERNEL, "%s-%s",
+				       src_node->name, dst_node->name);
 
 	return path;
 }
@@ -436,9 +495,12 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
 	size_t i;
 	int ret;
 
-	if (!path || !path->num_nodes)
+	if (!path)
 		return 0;
 
+	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+		return -EINVAL;
+
 	mutex_lock(&icc_lock);
 
 	old_avg = path->reqs[0].avg_bw;
@@ -453,6 +515,8 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
 
 		/* aggregate requests for this node */
 		aggregate_requests(node);
+
+		trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
 	}
 
 	ret = apply_constraints(path);
@@ -471,6 +535,8 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
 
 	mutex_unlock(&icc_lock);
 
+	trace_icc_set_bw_end(path, ret);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(icc_set_bw);
@@ -507,9 +573,12 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
 		goto out;
 
 	path = path_find(dev, src, dst);
-	if (IS_ERR(path))
+	if (IS_ERR(path)) {
 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+		goto out;
+	}
 
+	path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
 out:
 	mutex_unlock(&icc_lock);
 	return path;
@@ -545,6 +614,7 @@ void icc_put(struct icc_path *path)
 	}
 	mutex_unlock(&icc_lock);
 
+	kfree_const(path->name);
 	kfree(path);
 }
 EXPORT_SYMBOL_GPL(icc_put);
@@ -743,6 +813,28 @@ void icc_node_del(struct icc_node *node)
 EXPORT_SYMBOL_GPL(icc_node_del);
 
 /**
+ * icc_nodes_remove() - remove all previously added nodes from provider
+ * @provider: the interconnect provider we are removing nodes from
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_nodes_remove(struct icc_provider *provider)
+{
+	struct icc_node *n, *tmp;
+
+	if (WARN_ON(IS_ERR_OR_NULL(provider)))
+		return -EINVAL;
+
+	list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
+		icc_node_del(n);
+		icc_node_destroy(n->id);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(icc_nodes_remove);
+
+/**
  * icc_provider_add() - add a new interconnect provider
  * @provider: the interconnect provider that will be added into topology
  *
@@ -802,6 +894,8 @@ static int __init icc_init(void)
 	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
 	debugfs_create_file("interconnect_summary", 0444,
 			    icc_debugfs_dir, NULL, &icc_summary_fops);
+	debugfs_create_file("interconnect_graph", 0444,
+			    icc_debugfs_dir, NULL, &icc_graph_fops);
 	return 0;
 }
 
diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
new file mode 100644
index 0000000..bf18cb7
--- /dev/null
+++ b/drivers/interconnect/internal.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework internal structs
+ *
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_INTERNAL_H
+#define __DRIVERS_INTERCONNECT_INTERNAL_H
+
+/**
+ * struct icc_req - constraints that are attached to each node
+ * @req_node: entry in list of requests for the particular @node
+ * @node: the interconnect node to which this constraint applies
+ * @dev: reference to the device that sets the constraints
+ * @tag: path tag (optional)
+ * @avg_bw: an integer describing the average bandwidth in kBps
+ * @peak_bw: an integer describing the peak bandwidth in kBps
+ */
+struct icc_req {
+	struct hlist_node req_node;
+	struct icc_node *node;
+	struct device *dev;
+	u32 tag;
+	u32 avg_bw;
+	u32 peak_bw;
+};
+
+/**
+ * struct icc_path - interconnect path structure
+ * @name: a string name of the path (useful for ftrace)
+ * @num_nodes: number of hops (nodes)
+ * @reqs: array of the requests applicable to this path of nodes
+ */
+struct icc_path {
+	const char *name;
+	size_t num_nodes;
+	struct icc_req reqs[];
+};
+
+#endif
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 2f9304d..76938ec 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@
 	help
 	  Support for Qualcomm's Network-on-Chip interconnect hardware.
 
+config INTERCONNECT_QCOM_MSM8916
+	tristate "Qualcomm MSM8916 interconnect driver"
+	depends on INTERCONNECT_QCOM
+	depends on QCOM_SMD_RPM
+	select INTERCONNECT_QCOM_SMD_RPM
+	help
+	  This is a driver for the Qualcomm Network-on-Chip on msm8916-based
+	  platforms.
+
 config INTERCONNECT_QCOM_MSM8974
 	tristate "Qualcomm MSM8974 interconnect driver"
 	depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 9adf9e3..e827157 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,10 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0
 
+qnoc-msm8916-objs			:= msm8916.o
 qnoc-msm8974-objs			:= msm8974.o
 qnoc-qcs404-objs			:= qcs404.o
 qnoc-sdm845-objs			:= sdm845.o
 icc-smd-rpm-objs			:= smd-rpm.o
 
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
 obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
 obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
new file mode 100644
index 0000000..e94f3c5
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Linaro Ltd
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/interconnect/qcom,msm8916.h>
+
+#include "smd-rpm.h"
+
+#define RPM_BUS_MASTER_REQ      0x73616d62
+#define RPM_BUS_SLAVE_REQ       0x766c7362
+
+enum {
+	MSM8916_BIMC_SNOC_MAS = 1,
+	MSM8916_BIMC_SNOC_SLV,
+	MSM8916_MASTER_AMPSS_M0,
+	MSM8916_MASTER_LPASS,
+	MSM8916_MASTER_BLSP_1,
+	MSM8916_MASTER_DEHR,
+	MSM8916_MASTER_GRAPHICS_3D,
+	MSM8916_MASTER_JPEG,
+	MSM8916_MASTER_MDP_PORT0,
+	MSM8916_MASTER_CRYPTO_CORE0,
+	MSM8916_MASTER_SDCC_1,
+	MSM8916_MASTER_SDCC_2,
+	MSM8916_MASTER_QDSS_BAM,
+	MSM8916_MASTER_QDSS_ETR,
+	MSM8916_MASTER_SNOC_CFG,
+	MSM8916_MASTER_SPDM,
+	MSM8916_MASTER_TCU0,
+	MSM8916_MASTER_TCU1,
+	MSM8916_MASTER_USB_HS,
+	MSM8916_MASTER_VFE,
+	MSM8916_MASTER_VIDEO_P0,
+	MSM8916_SNOC_MM_INT_0,
+	MSM8916_SNOC_MM_INT_1,
+	MSM8916_SNOC_MM_INT_2,
+	MSM8916_SNOC_MM_INT_BIMC,
+	MSM8916_PNOC_INT_0,
+	MSM8916_PNOC_INT_1,
+	MSM8916_PNOC_MAS_0,
+	MSM8916_PNOC_MAS_1,
+	MSM8916_PNOC_SLV_0,
+	MSM8916_PNOC_SLV_1,
+	MSM8916_PNOC_SLV_2,
+	MSM8916_PNOC_SLV_3,
+	MSM8916_PNOC_SLV_4,
+	MSM8916_PNOC_SLV_8,
+	MSM8916_PNOC_SLV_9,
+	MSM8916_PNOC_SNOC_MAS,
+	MSM8916_PNOC_SNOC_SLV,
+	MSM8916_SNOC_QDSS_INT,
+	MSM8916_SLAVE_AMPSS_L2,
+	MSM8916_SLAVE_APSS,
+	MSM8916_SLAVE_LPASS,
+	MSM8916_SLAVE_BIMC_CFG,
+	MSM8916_SLAVE_BLSP_1,
+	MSM8916_SLAVE_BOOT_ROM,
+	MSM8916_SLAVE_CAMERA_CFG,
+	MSM8916_SLAVE_CATS_128,
+	MSM8916_SLAVE_OCMEM_64,
+	MSM8916_SLAVE_CLK_CTL,
+	MSM8916_SLAVE_CRYPTO_0_CFG,
+	MSM8916_SLAVE_DEHR_CFG,
+	MSM8916_SLAVE_DISPLAY_CFG,
+	MSM8916_SLAVE_EBI_CH0,
+	MSM8916_SLAVE_GRAPHICS_3D_CFG,
+	MSM8916_SLAVE_IMEM_CFG,
+	MSM8916_SLAVE_IMEM,
+	MSM8916_SLAVE_MPM,
+	MSM8916_SLAVE_MSG_RAM,
+	MSM8916_SLAVE_MSS,
+	MSM8916_SLAVE_PDM,
+	MSM8916_SLAVE_PMIC_ARB,
+	MSM8916_SLAVE_PNOC_CFG,
+	MSM8916_SLAVE_PRNG,
+	MSM8916_SLAVE_QDSS_CFG,
+	MSM8916_SLAVE_QDSS_STM,
+	MSM8916_SLAVE_RBCPR_CFG,
+	MSM8916_SLAVE_SDCC_1,
+	MSM8916_SLAVE_SDCC_2,
+	MSM8916_SLAVE_SECURITY,
+	MSM8916_SLAVE_SNOC_CFG,
+	MSM8916_SLAVE_SPDM,
+	MSM8916_SLAVE_SRVC_SNOC,
+	MSM8916_SLAVE_TCSR,
+	MSM8916_SLAVE_TLMM,
+	MSM8916_SLAVE_USB_HS,
+	MSM8916_SLAVE_VENUS_CFG,
+	MSM8916_SNOC_BIMC_0_MAS,
+	MSM8916_SNOC_BIMC_0_SLV,
+	MSM8916_SNOC_BIMC_1_MAS,
+	MSM8916_SNOC_BIMC_1_SLV,
+	MSM8916_SNOC_INT_0,
+	MSM8916_SNOC_INT_1,
+	MSM8916_SNOC_INT_BIMC,
+	MSM8916_SNOC_PNOC_MAS,
+	MSM8916_SNOC_PNOC_SLV,
+};
+
+#define to_msm8916_provider(_provider) \
+	container_of(_provider, struct msm8916_icc_provider, provider)
+
+static const struct clk_bulk_data msm8916_bus_clocks[] = {
+	{ .id = "bus" },
+	{ .id = "bus_a" },
+};
+
+/**
+ * struct msm8916_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct msm8916_icc_provider {
+	struct icc_provider provider;
+	struct clk_bulk_data *bus_clks;
+	int num_clks;
+};
+
+#define MSM8916_MAX_LINKS	8
+
+/**
+ * struct msm8916_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id:	RPM ID for devices that are bus masters
+ * @slv_rpm_id:	RPM ID for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct msm8916_icc_node {
+	unsigned char *name;
+	u16 id;
+	u16 links[MSM8916_MAX_LINKS];
+	u16 num_links;
+	u16 buswidth;
+	int mas_rpm_id;
+	int slv_rpm_id;
+	u64 rate;
+};
+
+struct msm8916_icc_desc {
+	struct msm8916_icc_node **nodes;
+	size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id,	\
+					...)				\
+		static struct msm8916_icc_node _name = {		\
+		.name = #_name,						\
+		.id = _id,						\
+		.buswidth = _buswidth,					\
+		.mas_rpm_id = _mas_rpm_id,				\
+		.slv_rpm_id = _slv_rpm_id,				\
+		.num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })),	\
+		.links = { __VA_ARGS__ },				\
+	}
+
+DEFINE_QNODE(bimc_snoc_mas, MSM8916_BIMC_SNOC_MAS, 8, -1, -1, MSM8916_BIMC_SNOC_SLV);
+DEFINE_QNODE(bimc_snoc_slv, MSM8916_BIMC_SNOC_SLV, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_1);
+DEFINE_QNODE(mas_apss, MSM8916_MASTER_AMPSS_M0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_audio, MSM8916_MASTER_LPASS, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_blsp_1, MSM8916_MASTER_BLSP_1, 4, -1, -1, MSM8916_PNOC_MAS_1);
+DEFINE_QNODE(mas_dehr, MSM8916_MASTER_DEHR, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_gfx, MSM8916_MASTER_GRAPHICS_3D, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_jpeg, MSM8916_MASTER_JPEG, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_mdp, MSM8916_MASTER_MDP_PORT0, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_pcnoc_crypto_0, MSM8916_MASTER_CRYPTO_CORE0, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8916_MASTER_SDCC_1, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8916_MASTER_SDCC_2, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_qdss_bam, MSM8916_MASTER_QDSS_BAM, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_qdss_etr, MSM8916_MASTER_QDSS_ETR, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, 20, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_spdm, MSM8916_MASTER_SPDM, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_tcu0, MSM8916_MASTER_TCU0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_tcu1, MSM8916_MASTER_TCU1, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_usb_hs, MSM8916_MASTER_USB_HS, 4, -1, -1, MSM8916_PNOC_MAS_1);
+DEFINE_QNODE(mas_vfe, MSM8916_MASTER_VFE, 16, -1, -1, MSM8916_SNOC_MM_INT_1, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_video, MSM8916_MASTER_VIDEO_P0, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mm_int_0, MSM8916_SNOC_MM_INT_0, 16, -1, -1, MSM8916_SNOC_MM_INT_BIMC);
+DEFINE_QNODE(mm_int_1, MSM8916_SNOC_MM_INT_1, 16, -1, -1, MSM8916_SNOC_MM_INT_BIMC);
+DEFINE_QNODE(mm_int_2, MSM8916_SNOC_MM_INT_2, 16, -1, -1, MSM8916_SNOC_INT_0);
+DEFINE_QNODE(mm_int_bimc, MSM8916_SNOC_MM_INT_BIMC, 16, -1, -1, MSM8916_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS, MSM8916_PNOC_SLV_0, MSM8916_PNOC_SLV_1, MSM8916_PNOC_SLV_2, MSM8916_PNOC_SLV_3, MSM8916_PNOC_SLV_4, MSM8916_PNOC_SLV_8, MSM8916_PNOC_SLV_9);
+DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0);
+DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
+DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
+DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
+DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
+DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
+DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
+DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
+DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV);
+DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
+DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
+DEFINE_QNODE(slv_apps_l2, MSM8916_SLAVE_AMPSS_L2, 8, -1, -1, 0);
+DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_audio, MSM8916_SLAVE_LPASS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_bimc_cfg, MSM8916_SLAVE_BIMC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_1, MSM8916_SLAVE_BLSP_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_boot_rom, MSM8916_SLAVE_BOOT_ROM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_camera_cfg, MSM8916_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, 106, 0);
+DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_clk_ctl, MSM8916_SLAVE_CLK_CTL, 4, -1, -1, 0);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8916_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_dehr_cfg, MSM8916_SLAVE_DEHR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_display_cfg, MSM8916_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8916_SLAVE_EBI_CH0, 8, -1, 0, 0);
+DEFINE_QNODE(slv_gfx_cfg, MSM8916_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem_cfg, MSM8916_SLAVE_IMEM_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem, MSM8916_SLAVE_IMEM, 8, -1, 26, 0);
+DEFINE_QNODE(slv_mpm, MSM8916_SLAVE_MPM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_msg_ram, MSM8916_SLAVE_MSG_RAM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_mss, MSM8916_SLAVE_MSS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pdm, MSM8916_SLAVE_PDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, MSM8916_SLAVE_PMIC_ARB, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pcnoc_cfg, MSM8916_SLAVE_PNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_prng, MSM8916_SLAVE_PRNG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_cfg, MSM8916_SLAVE_QDSS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_stm, MSM8916_SLAVE_QDSS_STM, 4, -1, 30, 0);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8916_SLAVE_RBCPR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_1, MSM8916_SLAVE_SDCC_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_2, MSM8916_SLAVE_SDCC_2, 4, -1, -1, 0);
+DEFINE_QNODE(slv_security, MSM8916_SLAVE_SECURITY, 4, -1, -1, 0);
+DEFINE_QNODE(slv_snoc_cfg, MSM8916_SLAVE_SNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_spdm, MSM8916_SLAVE_SPDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_tcsr, MSM8916_SLAVE_TCSR, 4, -1, -1, 0);
+DEFINE_QNODE(slv_tlmm, MSM8916_SLAVE_TLMM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs, MSM8916_SLAVE_USB_HS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_venus_cfg, MSM8916_SLAVE_VENUS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8916_SNOC_BIMC_0_MAS, 8, 3, -1, MSM8916_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8916_SNOC_BIMC_0_SLV, 8, -1, 24, MSM8916_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_1_mas, MSM8916_SNOC_BIMC_1_MAS, 16, -1, -1, MSM8916_SNOC_BIMC_1_SLV);
+DEFINE_QNODE(snoc_bimc_1_slv, MSM8916_SNOC_BIMC_1_SLV, 8, -1, -1, MSM8916_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_int_0, MSM8916_SNOC_INT_0, 8, 99, 130, MSM8916_SLAVE_QDSS_STM, MSM8916_SLAVE_IMEM, MSM8916_SNOC_PNOC_MAS);
+DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, 100, 131, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIMC_0_MAS);
+DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
+DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
+
+static struct msm8916_icc_node *msm8916_snoc_nodes[] = {
+	[BIMC_SNOC_SLV] = &bimc_snoc_slv,
+	[MASTER_JPEG] = &mas_jpeg,
+	[MASTER_MDP_PORT0] = &mas_mdp,
+	[MASTER_QDSS_BAM] = &mas_qdss_bam,
+	[MASTER_QDSS_ETR] = &mas_qdss_etr,
+	[MASTER_SNOC_CFG] = &mas_snoc_cfg,
+	[MASTER_VFE] = &mas_vfe,
+	[MASTER_VIDEO_P0] = &mas_video,
+	[SNOC_MM_INT_0] = &mm_int_0,
+	[SNOC_MM_INT_1] = &mm_int_1,
+	[SNOC_MM_INT_2] = &mm_int_2,
+	[SNOC_MM_INT_BIMC] = &mm_int_bimc,
+	[PCNOC_SNOC_SLV] = &pcnoc_snoc_slv,
+	[SLAVE_APSS] = &slv_apss,
+	[SLAVE_CATS_128] = &slv_cats_0,
+	[SLAVE_OCMEM_64] = &slv_cats_1,
+	[SLAVE_IMEM] = &slv_imem,
+	[SLAVE_QDSS_STM] = &slv_qdss_stm,
+	[SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
+	[SNOC_BIMC_0_MAS] = &snoc_bimc_0_mas,
+	[SNOC_BIMC_1_MAS] = &snoc_bimc_1_mas,
+	[SNOC_INT_0] = &snoc_int_0,
+	[SNOC_INT_1] = &snoc_int_1,
+	[SNOC_INT_BIMC] = &snoc_int_bimc,
+	[SNOC_PCNOC_MAS] = &snoc_pcnoc_mas,
+	[SNOC_QDSS_INT] = &qdss_int,
+};
+
+static struct msm8916_icc_desc msm8916_snoc = {
+	.nodes = msm8916_snoc_nodes,
+	.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
+};
+
+static struct msm8916_icc_node *msm8916_bimc_nodes[] = {
+	[BIMC_SNOC_MAS] = &bimc_snoc_mas,
+	[MASTER_AMPSS_M0] = &mas_apss,
+	[MASTER_GRAPHICS_3D] = &mas_gfx,
+	[MASTER_TCU0] = &mas_tcu0,
+	[MASTER_TCU1] = &mas_tcu1,
+	[SLAVE_AMPSS_L2] = &slv_apps_l2,
+	[SLAVE_EBI_CH0] = &slv_ebi_ch0,
+	[SNOC_BIMC_0_SLV] = &snoc_bimc_0_slv,
+	[SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
+};
+
+static struct msm8916_icc_desc msm8916_bimc = {
+	.nodes = msm8916_bimc_nodes,
+	.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
+};
+
+static struct msm8916_icc_node *msm8916_pcnoc_nodes[] = {
+	[MASTER_BLSP_1] = &mas_blsp_1,
+	[MASTER_DEHR] = &mas_dehr,
+	[MASTER_LPASS] = &mas_audio,
+	[MASTER_CRYPTO_CORE0] = &mas_pcnoc_crypto_0,
+	[MASTER_SDCC_1] = &mas_pcnoc_sdcc_1,
+	[MASTER_SDCC_2] = &mas_pcnoc_sdcc_2,
+	[MASTER_SPDM] = &mas_spdm,
+	[MASTER_USB_HS] = &mas_usb_hs,
+	[PCNOC_INT_0] = &pcnoc_int_0,
+	[PCNOC_INT_1] = &pcnoc_int_1,
+	[PCNOC_MAS_0] = &pcnoc_m_0,
+	[PCNOC_MAS_1] = &pcnoc_m_1,
+	[PCNOC_SLV_0] = &pcnoc_s_0,
+	[PCNOC_SLV_1] = &pcnoc_s_1,
+	[PCNOC_SLV_2] = &pcnoc_s_2,
+	[PCNOC_SLV_3] = &pcnoc_s_3,
+	[PCNOC_SLV_4] = &pcnoc_s_4,
+	[PCNOC_SLV_8] = &pcnoc_s_8,
+	[PCNOC_SLV_9] = &pcnoc_s_9,
+	[PCNOC_SNOC_MAS] = &pcnoc_snoc_mas,
+	[SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+	[SLAVE_BLSP_1] = &slv_blsp_1,
+	[SLAVE_BOOT_ROM] = &slv_boot_rom,
+	[SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+	[SLAVE_CLK_CTL] = &slv_clk_ctl,
+	[SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+	[SLAVE_DEHR_CFG] = &slv_dehr_cfg,
+	[SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+	[SLAVE_GRAPHICS_3D_CFG] = &slv_gfx_cfg,
+	[SLAVE_IMEM_CFG] = &slv_imem_cfg,
+	[SLAVE_LPASS] = &slv_audio,
+	[SLAVE_MPM] = &slv_mpm,
+	[SLAVE_MSG_RAM] = &slv_msg_ram,
+	[SLAVE_MSS] = &slv_mss,
+	[SLAVE_PDM] = &slv_pdm,
+	[SLAVE_PMIC_ARB] = &slv_pmic_arb,
+	[SLAVE_PCNOC_CFG] = &slv_pcnoc_cfg,
+	[SLAVE_PRNG] = &slv_prng,
+	[SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+	[SLAVE_RBCPR_CFG] = &slv_rbcpr_cfg,
+	[SLAVE_SDCC_1] = &slv_sdcc_1,
+	[SLAVE_SDCC_2] = &slv_sdcc_2,
+	[SLAVE_SECURITY] = &slv_security,
+	[SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+	[SLAVE_SPDM] = &slv_spdm,
+	[SLAVE_TCSR] = &slv_tcsr,
+	[SLAVE_TLMM] = &slv_tlmm,
+	[SLAVE_USB_HS] = &slv_usb_hs,
+	[SLAVE_VENUS_CFG] = &slv_venus_cfg,
+	[SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
+};
+
+static struct msm8916_icc_desc msm8916_pcnoc = {
+	.nodes = msm8916_pcnoc_nodes,
+	.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
+};
+
+static int msm8916_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+	struct msm8916_icc_provider *qp;
+	struct msm8916_icc_node *qn;
+	u64 sum_bw, max_peak_bw, rate;
+	u32 agg_avg = 0, agg_peak = 0;
+	struct icc_provider *provider;
+	struct icc_node *n;
+	int ret, i;
+
+	qn = src->data;
+	provider = src->provider;
+	qp = to_msm8916_provider(provider);
+
+	list_for_each_entry(n, &provider->nodes, node_list)
+		provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+				    &agg_avg, &agg_peak);
+
+	sum_bw = icc_units_to_bps(agg_avg);
+	max_peak_bw = icc_units_to_bps(agg_peak);
+
+	/* send bandwidth request message to the RPM processor */
+	if (qn->mas_rpm_id != -1) {
+		ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+					    RPM_BUS_MASTER_REQ,
+					    qn->mas_rpm_id,
+					    sum_bw);
+		if (ret) {
+			pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+			       qn->mas_rpm_id, ret);
+			return ret;
+		}
+	}
+
+	if (qn->slv_rpm_id != -1) {
+		ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+					    RPM_BUS_SLAVE_REQ,
+					    qn->slv_rpm_id,
+					    sum_bw);
+		if (ret) {
+			pr_err("qcom_icc_rpm_smd_send slv error %d\n",
+			       ret);
+			return ret;
+		}
+	}
+
+	rate = max(sum_bw, max_peak_bw);
+
+	do_div(rate, qn->buswidth);
+
+	if (qn->rate == rate)
+		return 0;
+
+	for (i = 0; i < qp->num_clks; i++) {
+		ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+		if (ret) {
+			pr_err("%s clk_set_rate error: %d\n",
+			       qp->bus_clks[i].id, ret);
+			return ret;
+		}
+	}
+
+	qn->rate = rate;
+
+	return 0;
+}
+
+static int msm8916_qnoc_probe(struct platform_device *pdev)
+{
+	const struct msm8916_icc_desc *desc;
+	struct msm8916_icc_node **qnodes;
+	struct msm8916_icc_provider *qp;
+	struct device *dev = &pdev->dev;
+	struct icc_onecell_data *data;
+	struct icc_provider *provider;
+	struct icc_node *node;
+	size_t num_nodes, i;
+	int ret;
+
+	/* wait for the RPM proxy */
+	if (!qcom_icc_rpm_smd_available())
+		return -EPROBE_DEFER;
+
+	desc = of_device_get_match_data(dev);
+	if (!desc)
+		return -EINVAL;
+
+	qnodes = desc->nodes;
+	num_nodes = desc->num_nodes;
+
+	qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+	if (!qp)
+		return -ENOMEM;
+
+	data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+			    GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	qp->bus_clks = devm_kmemdup(dev, msm8916_bus_clocks,
+				    sizeof(msm8916_bus_clocks), GFP_KERNEL);
+	if (!qp->bus_clks)
+		return -ENOMEM;
+
+	qp->num_clks = ARRAY_SIZE(msm8916_bus_clocks);
+	ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+	if (ret)
+		return ret;
+
+	ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+	if (ret)
+		return ret;
+
+	provider = &qp->provider;
+	INIT_LIST_HEAD(&provider->nodes);
+	provider->dev = dev;
+	provider->set = msm8916_icc_set;
+	provider->aggregate = icc_std_aggregate;
+	provider->xlate = of_icc_xlate_onecell;
+	provider->data = data;
+
+	ret = icc_provider_add(provider);
+	if (ret) {
+		dev_err(dev, "error adding interconnect provider: %d\n", ret);
+		clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+		return ret;
+	}
+
+	for (i = 0; i < num_nodes; i++) {
+		size_t j;
+
+		node = icc_node_create(qnodes[i]->id);
+		if (IS_ERR(node)) {
+			ret = PTR_ERR(node);
+			goto err;
+		}
+
+		node->name = qnodes[i]->name;
+		node->data = qnodes[i];
+		icc_node_add(node, provider);
+
+		for (j = 0; j < qnodes[i]->num_links; j++)
+			icc_link_create(node, qnodes[i]->links[j]);
+
+		data->nodes[i] = node;
+	}
+	data->num_nodes = num_nodes;
+
+	platform_set_drvdata(pdev, qp);
+
+	return 0;
+
+err:
+	icc_nodes_remove(provider);
+	icc_provider_del(provider);
+	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+	return ret;
+}
+
+static int msm8916_qnoc_remove(struct platform_device *pdev)
+{
+	struct msm8916_icc_provider *qp = platform_get_drvdata(pdev);
+
+	icc_nodes_remove(&qp->provider);
+	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+	return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id msm8916_noc_of_match[] = {
+	{ .compatible = "qcom,msm8916-bimc", .data = &msm8916_bimc },
+	{ .compatible = "qcom,msm8916-pcnoc", .data = &msm8916_pcnoc },
+	{ .compatible = "qcom,msm8916-snoc", .data = &msm8916_snoc },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
+
+static struct platform_driver msm8916_noc_driver = {
+	.probe = msm8916_qnoc_probe,
+	.remove = msm8916_qnoc_remove,
+	.driver = {
+		.name = "qnoc-msm8916",
+		.of_match_table = msm8916_noc_of_match,
+	},
+};
+module_platform_driver(msm8916_noc_driver);
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm MSM8916 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index bf8bd1a..3a313e1 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -550,15 +550,6 @@ static struct msm8974_icc_desc msm8974_snoc = {
 	.num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
 };
 
-static int msm8974_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
-				 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
-{
-	*agg_avg += avg_bw;
-	*agg_peak = max(*agg_peak, peak_bw);
-
-	return 0;
-}
-
 static void msm8974_icc_rpm_smd_send(struct device *dev, int rsc_type,
 				     char *name, int id, u64 val)
 {
@@ -603,8 +594,8 @@ static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
 	qp = to_msm8974_icc_provider(provider);
 
 	list_for_each_entry(n, &provider->nodes, node_list)
-		msm8974_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
-				      &agg_avg, &agg_peak);
+		provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+				    &agg_avg, &agg_peak);
 
 	sum_bw = icc_units_to_bps(agg_avg);
 	max_peak_bw = icc_units_to_bps(agg_peak);
@@ -652,7 +643,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct icc_onecell_data *data;
 	struct icc_provider *provider;
-	struct icc_node *node, *tmp;
+	struct icc_node *node;
 	size_t num_nodes, i;
 	int ret;
 
@@ -694,7 +685,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
 	INIT_LIST_HEAD(&provider->nodes);
 	provider->dev = dev;
 	provider->set = msm8974_icc_set;
-	provider->aggregate = msm8974_icc_aggregate;
+	provider->aggregate = icc_std_aggregate;
 	provider->xlate = of_icc_xlate_onecell;
 	provider->data = data;
 
@@ -732,10 +723,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
 	return 0;
 
 err_del_icc:
-	list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
-		icc_node_del(node);
-		icc_node_destroy(node->id);
-	}
+	icc_nodes_remove(provider);
 	icc_provider_del(provider);
 
 err_disable_clks:
@@ -747,16 +735,10 @@ static int msm8974_icc_probe(struct platform_device *pdev)
 static int msm8974_icc_remove(struct platform_device *pdev)
 {
 	struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
-	struct icc_provider *provider = &qp->provider;
-	struct icc_node *n, *tmp;
 
-	list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
-		icc_node_del(n);
-		icc_node_destroy(n->id);
-	}
+	icc_nodes_remove(&qp->provider);
 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
-	return icc_provider_del(provider);
+	return icc_provider_del(&qp->provider);
 }
 
 static const struct of_device_id msm8974_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 8e0735a..d4769a5 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -327,15 +327,6 @@ static struct qcom_icc_desc qcs404_snoc = {
 	.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
 };
 
-static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
-			      u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
-{
-	*agg_avg += avg_bw;
-	*agg_peak = max(*agg_peak, peak_bw);
-
-	return 0;
-}
-
 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
 {
 	struct qcom_icc_provider *qp;
@@ -354,8 +345,8 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
 	qp = to_qcom_provider(provider);
 
 	list_for_each_entry(n, &provider->nodes, node_list)
-		qcom_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
-				   &agg_avg, &agg_peak);
+		provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+				    &agg_avg, &agg_peak);
 
 	sum_bw = icc_units_to_bps(agg_avg);
 	max_peak_bw = icc_units_to_bps(agg_peak);
@@ -414,7 +405,7 @@ static int qnoc_probe(struct platform_device *pdev)
 	struct icc_provider *provider;
 	struct qcom_icc_node **qnodes;
 	struct qcom_icc_provider *qp;
-	struct icc_node *node, *tmp;
+	struct icc_node *node;
 	size_t num_nodes, i;
 	int ret;
 
@@ -456,7 +447,7 @@ static int qnoc_probe(struct platform_device *pdev)
 	INIT_LIST_HEAD(&provider->nodes);
 	provider->dev = dev;
 	provider->set = qcom_icc_set;
-	provider->aggregate = qcom_icc_aggregate;
+	provider->aggregate = icc_std_aggregate;
 	provider->xlate = of_icc_xlate_onecell;
 	provider->data = data;
 
@@ -494,10 +485,7 @@ static int qnoc_probe(struct platform_device *pdev)
 
 	return 0;
 err:
-	list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
-		icc_node_del(node);
-		icc_node_destroy(node->id);
-	}
+	icc_nodes_remove(provider);
 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
 	icc_provider_del(provider);
 
@@ -507,16 +495,10 @@ static int qnoc_probe(struct platform_device *pdev)
 static int qnoc_remove(struct platform_device *pdev)
 {
 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-	struct icc_provider *provider = &qp->provider;
-	struct icc_node *n, *tmp;
 
-	list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
-		icc_node_del(n);
-		icc_node_destroy(n->id);
-	}
+	icc_nodes_remove(&qp->provider);
 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
-	return icc_provider_del(provider);
+	return icc_provider_del(&qp->provider);
 }
 
 static const struct of_device_id qcs404_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 387267e..f078cf0 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -855,11 +855,7 @@ static int qnoc_probe(struct platform_device *pdev)
 
 	return ret;
 err:
-	list_for_each_entry(node, &provider->nodes, node_list) {
-		icc_node_del(node);
-		icc_node_destroy(node->id);
-	}
-
+	icc_nodes_remove(provider);
 	icc_provider_del(provider);
 	return ret;
 }
@@ -867,15 +863,9 @@ static int qnoc_probe(struct platform_device *pdev)
 static int qnoc_remove(struct platform_device *pdev)
 {
 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-	struct icc_provider *provider = &qp->provider;
-	struct icc_node *n, *tmp;
 
-	list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
-		icc_node_del(n);
-		icc_node_destroy(n->id);
-	}
-
-	return icc_provider_del(provider);
+	icc_nodes_remove(&qp->provider);
+	return icc_provider_del(&qp->provider);
 }
 
 static const struct of_device_id qnoc_of_match[] = {
diff --git a/drivers/interconnect/trace.h b/drivers/interconnect/trace.h
new file mode 100644
index 0000000..3d668ff
--- /dev/null
+++ b/drivers/interconnect/trace.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework tracepoints
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM interconnect
+
+#if !defined(_TRACE_INTERCONNECT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTERCONNECT_H
+
+#include <linux/interconnect.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(icc_set_bw,
+
+	TP_PROTO(struct icc_path *p, struct icc_node *n, int i,
+		 u32 avg_bw, u32 peak_bw),
+
+	TP_ARGS(p, n, i, avg_bw, peak_bw),
+
+	TP_STRUCT__entry(
+		__string(path_name, p->name)
+		__string(dev, dev_name(p->reqs[i].dev))
+		__string(node_name, n->name)
+		__field(u32, avg_bw)
+		__field(u32, peak_bw)
+		__field(u32, node_avg_bw)
+		__field(u32, node_peak_bw)
+	),
+
+	TP_fast_assign(
+		__assign_str(path_name, p->name);
+		__assign_str(dev, dev_name(p->reqs[i].dev));
+		__assign_str(node_name, n->name);
+		__entry->avg_bw = avg_bw;
+		__entry->peak_bw = peak_bw;
+		__entry->node_avg_bw = n->avg_bw;
+		__entry->node_peak_bw = n->peak_bw;
+	),
+
+	TP_printk("path=%s dev=%s node=%s avg_bw=%u peak_bw=%u agg_avg=%u agg_peak=%u",
+		  __get_str(path_name),
+		  __get_str(dev),
+		  __get_str(node_name),
+		  __entry->avg_bw,
+		  __entry->peak_bw,
+		  __entry->node_avg_bw,
+		  __entry->node_peak_bw)
+);
+
+TRACE_EVENT(icc_set_bw_end,
+
+	TP_PROTO(struct icc_path *p, int ret),
+
+	TP_ARGS(p, ret),
+
+	TP_STRUCT__entry(
+		__string(path_name, p->name)
+		__string(dev, dev_name(p->reqs[0].dev))
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__assign_str(path_name, p->name);
+		__assign_str(dev, dev_name(p->reqs[0].dev));
+		__entry->ret = ret;
+	),
+
+	TP_printk("path=%s dev=%s ret=%d",
+		  __get_str(path_name),
+		  __get_str(dev),
+		  __entry->ret)
+);
+
+#endif /* _TRACE_INTERCONNECT_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 0b9d78a..d2fade9 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -82,7 +82,7 @@
 config IOMMU_DEFAULT_PASSTHROUGH
 	bool "IOMMU passthrough by default"
 	depends on IOMMU_API
-        help
+	help
 	  Enable passthrough by default, removing the need to pass in
 	  iommu.passthrough=on or iommu=pt through command line. If this
 	  is enabled, you can still disable with iommu.passthrough=off
@@ -91,8 +91,8 @@
 	  If unsure, say N here.
 
 config OF_IOMMU
-       def_bool y
-       depends on OF && IOMMU_API
+	def_bool y
+	depends on OF && IOMMU_API
 
 # IOMMU-agnostic DMA-mapping layer
 config IOMMU_DMA
@@ -214,6 +214,7 @@
 	select PCI_PASID
 	select PCI_PRI
 	select MMU_NOTIFIER
+	select IOASID
 	help
 	  Shared Virtual Memory (SVM) provides a facility for devices
 	  to access DMA resources through process address space by
@@ -248,6 +249,18 @@
 	  workaround will setup a 1:1 mapping for the first
 	  16MiB to make floppy (an ISA device) work.
 
+config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+	bool "Enable Intel IOMMU scalable mode by default"
+	depends on INTEL_IOMMU
+	help
+	  Selecting this option will enable by default the scalable mode if
+	  hardware presents the capability. The scalable mode is defined in
+	  VT-d 3.0. The scalable mode capability could be checked by reading
+	  /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
+	  is not selected, scalable mode support could also be enabled by
+	  passing intel_iommu=sm_on to the kernel. If not sure, please use
+	  the default value.
+
 config IRQ_REMAP
 	bool "Support for Interrupt Remapping"
 	depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
@@ -356,7 +369,7 @@
 
 # ARM IOMMU support
 config ARM_SMMU
-	bool "ARM Ltd. System MMU (SMMU) Support"
+	tristate "ARM Ltd. System MMU (SMMU) Support"
 	depends on (ARM64 || ARM) && MMU
 	select IOMMU_API
 	select IOMMU_IO_PGTABLE_LPAE
@@ -368,6 +381,18 @@
 	  Say Y here if your SoC includes an IOMMU device implementing
 	  the ARM SMMU architecture.
 
+config ARM_SMMU_LEGACY_DT_BINDINGS
+	bool "Support the legacy \"mmu-masters\" devicetree bindings"
+	depends on ARM_SMMU=y && OF
+	help
+	  Support for the badly designed and deprecated "mmu-masters"
+	  devicetree bindings. This allows some DMA masters to attach
+	  to the SMMU but does not provide any support via the DMA API.
+	  If you're lucky, you might be able to get VFIO up and running.
+
+	  If you say Y here then you'll make me very sad. Instead, say N
+	  and move your firmware to the utopian future that was 2016.
+
 config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
 	bool "Default to disabling bypass on ARM SMMU v1 and v2"
 	depends on ARM_SMMU
@@ -394,7 +419,7 @@
 	  config.
 
 config ARM_SMMU_V3
-	bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+	tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
 	depends on ARM64
 	select IOMMU_API
 	select IOMMU_IO_PGTABLE_LPAE
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 97814cc..2104fb8 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -14,7 +14,8 @@
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o
+arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
 obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index bd25674..aac132b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -230,11 +230,8 @@ static struct pci_dev *setup_aliases(struct device *dev)
 	 */
 	ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
 	if (ivrs_alias != pci_dev_id(pdev) &&
-	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
-		pci_add_dma_alias(pdev, ivrs_alias & 0xff);
-		pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
-			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
-	}
+	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
+		pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
 
 	clone_aliases(pdev);
 
@@ -2297,7 +2294,6 @@ int __init amd_iommu_init_api(void)
 int __init amd_iommu_init_dma_ops(void)
 {
 	swiotlb        = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
-	iommu_detected = 1;
 
 	if (amd_iommu_unmap_flush)
 		pr_info("IO/TLB flush on unmap enabled\n");
@@ -2641,15 +2637,6 @@ static void amd_iommu_get_resv_regions(struct device *dev,
 	list_add_tail(&region->list, head);
 }
 
-static void amd_iommu_put_resv_regions(struct device *dev,
-				     struct list_head *head)
-{
-	struct iommu_resv_region *entry, *next;
-
-	list_for_each_entry_safe(entry, next, head, list)
-		kfree(entry);
-}
-
 static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
 					 struct device *dev)
 {
@@ -2688,7 +2675,7 @@ const struct iommu_ops amd_iommu_ops = {
 	.device_group = amd_iommu_device_group,
 	.domain_get_attr = amd_iommu_domain_get_attr,
 	.get_resv_regions = amd_iommu_get_resv_regions,
-	.put_resv_regions = amd_iommu_put_resv_regions,
+	.put_resv_regions = generic_iommu_put_resv_regions,
 	.is_attach_deferred = amd_iommu_is_attach_deferred,
 	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
 	.flush_iotlb_all = amd_iommu_flush_iotlb_all,
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 823cc4e..2759a8d 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -71,6 +71,8 @@
 #define IVHD_FLAG_ISOC_EN_MASK          0x08
 
 #define IVMD_FLAG_EXCL_RANGE            0x08
+#define IVMD_FLAG_IW                    0x04
+#define IVMD_FLAG_IR                    0x02
 #define IVMD_FLAG_UNITY_MAP             0x01
 
 #define ACPI_DEVFLAG_INITPASS           0x01
@@ -147,7 +149,7 @@ bool amd_iommu_dump;
 bool amd_iommu_irq_remap __read_mostly;
 
 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
-static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
 
 static bool amd_iommu_detected;
 static bool __initdata amd_iommu_disabled;
@@ -714,7 +716,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
 	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 
-	iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+	iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
 	iommu_feature_enable(iommu, CONTROL_PPR_EN);
 }
 
@@ -1116,21 +1118,17 @@ static int __init add_early_maps(void)
  */
 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
 {
-	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
 	if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
 		return;
 
-	if (iommu) {
-		/*
-		 * We only can configure exclusion ranges per IOMMU, not
-		 * per device. But we can enable the exclusion range per
-		 * device. This is done here
-		 */
-		set_dev_entry_bit(devid, DEV_ENTRY_EX);
-		iommu->exclusion_start = m->range_start;
-		iommu->exclusion_length = m->range_length;
-	}
+	/*
+	 * Treat per-device exclusion ranges as r/w unity-mapped regions
+	 * since some buggy BIOSes might lead to the overwritten exclusion
+	 * range (exclusion_start and exclusion_length members). This
+	 * happens when there are multiple exclusion ranges (IVMD entries)
+	 * defined in ACPI table.
+	 */
+	m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
 }
 
 /*
@@ -1523,8 +1521,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
 		if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
-		if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
-			amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
 		break;
 	case 0x11:
 	case 0x40:
@@ -1534,8 +1530,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
 		if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
-		if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
-			amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+		/*
+		 * Note: Since iommu_update_intcapxt() leverages
+		 * the IOMMU MMIO access to MSI capability block registers
+		 * for MSI address lo/hi/data, we need to check both
+		 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
+		 */
+		if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
+		    (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
+			amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
 		break;
 	default:
 		return -EINVAL;
@@ -1727,7 +1730,6 @@ static const struct attribute_group *amd_iommu_groups[] = {
 static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
 	int cap_ptr = iommu->cap_ptr;
-	u32 range, misc, low, high;
 	int ret;
 
 	iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
@@ -1740,19 +1742,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
 
 	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
 			      &iommu->cap);
-	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
-			      &range);
-	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
-			      &misc);
 
 	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
 		amd_iommu_iotlb_sup = false;
 
 	/* read extended feature bits */
-	low  = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
-	high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
-
-	iommu->features = ((u64)high << 32) | low;
+	iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
 
 	if (iommu_feature(iommu, FEATURE_GT)) {
 		int glxval;
@@ -1996,8 +1991,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu)
 	struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
 
 	/**
-	 * IntCapXT requires XTSup=1, which can be inferred
-	 * amd_iommu_xt_mode.
+	 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
+	 * which can be inferred from amd_iommu_xt_mode.
 	 */
 	if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
 		return 0;
@@ -2044,7 +2039,7 @@ static int iommu_init_msi(struct amd_iommu *iommu)
 	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
 
 	if (iommu->ppr_log != NULL)
-		iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+		iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
 
 	iommu_ga_log_enable(iommu);
 
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index f52f59d..f8d01d6 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -147,8 +147,8 @@
 #define CONTROL_COHERENT_EN     0x0aULL
 #define CONTROL_ISOC_EN         0x0bULL
 #define CONTROL_CMDBUF_EN       0x0cULL
-#define CONTROL_PPFLOG_EN       0x0dULL
-#define CONTROL_PPFINT_EN       0x0eULL
+#define CONTROL_PPRLOG_EN       0x0dULL
+#define CONTROL_PPRINT_EN       0x0eULL
 #define CONTROL_PPR_EN          0x0fULL
 #define CONTROL_GT_EN           0x10ULL
 #define CONTROL_GA_EN           0x11ULL
@@ -377,12 +377,12 @@
 #define IOMMU_CAP_EFR     27
 
 /* IOMMU Feature Reporting Field (for IVHD type 10h */
-#define IOMMU_FEAT_XTSUP_SHIFT	0
 #define IOMMU_FEAT_GASUP_SHIFT	6
 
 /* IOMMU Extended Feature Register (EFR) */
 #define IOMMU_EFR_XTSUP_SHIFT	2
 #define IOMMU_EFR_GASUP_SHIFT	7
+#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT	46
 
 #define MAX_DOMAIN_ID 65536
 
@@ -463,7 +463,6 @@ struct amd_irte_ops;
  * independent of their use.
  */
 struct protection_domain {
-	struct list_head list;  /* for list of all protection domains */
 	struct list_head dev_list; /* List of all devices in this domain */
 	struct iommu_domain domain; /* generic domain handle used by
 				       iommu core code */
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
index b2fe72a..74d97a8 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -119,7 +119,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
 	 * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
 	 */
 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
-	major = FIELD_GET(ID7_MAJOR, reg);
+	major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
 	if (major >= 2)
 		reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index effe72e..aa3ac2a 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -21,8 +21,7 @@
 #include <linux/io-pgtable.h>
 #include <linux/iommu.h>
 #include <linux/iopoll.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -224,9 +223,15 @@
 
 #define STRTAB_STE_0_S1FMT		GENMASK_ULL(5, 4)
 #define STRTAB_STE_0_S1FMT_LINEAR	0
+#define STRTAB_STE_0_S1FMT_64K_L2	2
 #define STRTAB_STE_0_S1CTXPTR_MASK	GENMASK_ULL(51, 6)
 #define STRTAB_STE_0_S1CDMAX		GENMASK_ULL(63, 59)
 
+#define STRTAB_STE_1_S1DSS		GENMASK_ULL(1, 0)
+#define STRTAB_STE_1_S1DSS_TERMINATE	0x0
+#define STRTAB_STE_1_S1DSS_BYPASS	0x1
+#define STRTAB_STE_1_S1DSS_SSID0	0x2
+
 #define STRTAB_STE_1_S1C_CACHE_NC	0UL
 #define STRTAB_STE_1_S1C_CACHE_WBRA	1UL
 #define STRTAB_STE_1_S1C_CACHE_WT	2UL
@@ -251,6 +256,13 @@
 
 #define STRTAB_STE_2_S2VMID		GENMASK_ULL(15, 0)
 #define STRTAB_STE_2_VTCR		GENMASK_ULL(50, 32)
+#define STRTAB_STE_2_VTCR_S2T0SZ	GENMASK_ULL(5, 0)
+#define STRTAB_STE_2_VTCR_S2SL0		GENMASK_ULL(7, 6)
+#define STRTAB_STE_2_VTCR_S2IR0		GENMASK_ULL(9, 8)
+#define STRTAB_STE_2_VTCR_S2OR0		GENMASK_ULL(11, 10)
+#define STRTAB_STE_2_VTCR_S2SH0		GENMASK_ULL(13, 12)
+#define STRTAB_STE_2_VTCR_S2TG		GENMASK_ULL(15, 14)
+#define STRTAB_STE_2_VTCR_S2PS		GENMASK_ULL(18, 16)
 #define STRTAB_STE_2_S2AA64		(1UL << 51)
 #define STRTAB_STE_2_S2ENDI		(1UL << 52)
 #define STRTAB_STE_2_S2PTW		(1UL << 54)
@@ -258,30 +270,34 @@
 
 #define STRTAB_STE_3_S2TTB_MASK		GENMASK_ULL(51, 4)
 
-/* Context descriptor (stage-1 only) */
+/*
+ * Context descriptors.
+ *
+ * Linear: when less than 1024 SSIDs are supported
+ * 2lvl: at most 1024 L1 entries,
+ *       1024 lazy entries per table.
+ */
+#define CTXDESC_SPLIT			10
+#define CTXDESC_L2_ENTRIES		(1 << CTXDESC_SPLIT)
+
+#define CTXDESC_L1_DESC_DWORDS		1
+#define CTXDESC_L1_DESC_V		(1UL << 0)
+#define CTXDESC_L1_DESC_L2PTR_MASK	GENMASK_ULL(51, 12)
+
 #define CTXDESC_CD_DWORDS		8
 #define CTXDESC_CD_0_TCR_T0SZ		GENMASK_ULL(5, 0)
-#define ARM64_TCR_T0SZ			GENMASK_ULL(5, 0)
 #define CTXDESC_CD_0_TCR_TG0		GENMASK_ULL(7, 6)
-#define ARM64_TCR_TG0			GENMASK_ULL(15, 14)
 #define CTXDESC_CD_0_TCR_IRGN0		GENMASK_ULL(9, 8)
-#define ARM64_TCR_IRGN0			GENMASK_ULL(9, 8)
 #define CTXDESC_CD_0_TCR_ORGN0		GENMASK_ULL(11, 10)
-#define ARM64_TCR_ORGN0			GENMASK_ULL(11, 10)
 #define CTXDESC_CD_0_TCR_SH0		GENMASK_ULL(13, 12)
-#define ARM64_TCR_SH0			GENMASK_ULL(13, 12)
 #define CTXDESC_CD_0_TCR_EPD0		(1ULL << 14)
-#define ARM64_TCR_EPD0			(1ULL << 7)
 #define CTXDESC_CD_0_TCR_EPD1		(1ULL << 30)
-#define ARM64_TCR_EPD1			(1ULL << 23)
 
 #define CTXDESC_CD_0_ENDI		(1UL << 15)
 #define CTXDESC_CD_0_V			(1UL << 31)
 
 #define CTXDESC_CD_0_TCR_IPS		GENMASK_ULL(34, 32)
-#define ARM64_TCR_IPS			GENMASK_ULL(34, 32)
 #define CTXDESC_CD_0_TCR_TBI0		(1ULL << 38)
-#define ARM64_TCR_TBI0			(1ULL << 37)
 
 #define CTXDESC_CD_0_AA64		(1UL << 41)
 #define CTXDESC_CD_0_S			(1UL << 44)
@@ -292,9 +308,11 @@
 
 #define CTXDESC_CD_1_TTB0_MASK		GENMASK_ULL(51, 4)
 
-/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
-#define ARM_SMMU_TCR2CD(tcr, fld)	FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \
-					FIELD_GET(ARM64_TCR_##fld, tcr))
+/*
+ * When the SMMU only supports linear context descriptor tables, pick a
+ * reasonable size limit (64kB).
+ */
+#define CTXDESC_LINEAR_CDMAX		ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
 
 /* Command queue */
 #define CMDQ_ENT_SZ_SHIFT		4
@@ -323,6 +341,7 @@
 #define CMDQ_PREFETCH_1_SIZE		GENMASK_ULL(4, 0)
 #define CMDQ_PREFETCH_1_ADDR_MASK	GENMASK_ULL(63, 12)
 
+#define CMDQ_CFGI_0_SSID		GENMASK_ULL(31, 12)
 #define CMDQ_CFGI_0_SID			GENMASK_ULL(63, 32)
 #define CMDQ_CFGI_1_LEAF		(1UL << 0)
 #define CMDQ_CFGI_1_RANGE		GENMASK_ULL(4, 0)
@@ -384,10 +403,6 @@
 #define MSI_IOVA_BASE			0x8000000
 #define MSI_IOVA_LENGTH			0x100000
 
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param_named here.
- */
 static bool disable_bypass = 1;
 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
 MODULE_PARM_DESC(disable_bypass,
@@ -440,8 +455,11 @@ struct arm_smmu_cmdq_ent {
 
 		#define CMDQ_OP_CFGI_STE	0x3
 		#define CMDQ_OP_CFGI_ALL	0x4
+		#define CMDQ_OP_CFGI_CD		0x5
+		#define CMDQ_OP_CFGI_CD_ALL	0x6
 		struct {
 			u32			sid;
+			u32			ssid;
 			union {
 				bool		leaf;
 				u8		span;
@@ -547,16 +565,30 @@ struct arm_smmu_strtab_l1_desc {
 	dma_addr_t			l2ptr_dma;
 };
 
-struct arm_smmu_s1_cfg {
-	__le64				*cdptr;
-	dma_addr_t			cdptr_dma;
+struct arm_smmu_ctx_desc {
+	u16				asid;
+	u64				ttbr;
+	u64				tcr;
+	u64				mair;
+};
 
-	struct arm_smmu_ctx_desc {
-		u16	asid;
-		u64	ttbr;
-		u64	tcr;
-		u64	mair;
-	}				cd;
+struct arm_smmu_l1_ctx_desc {
+	__le64				*l2ptr;
+	dma_addr_t			l2ptr_dma;
+};
+
+struct arm_smmu_ctx_desc_cfg {
+	__le64				*cdtab;
+	dma_addr_t			cdtab_dma;
+	struct arm_smmu_l1_ctx_desc	*l1_desc;
+	unsigned int			num_l1_ents;
+};
+
+struct arm_smmu_s1_cfg {
+	struct arm_smmu_ctx_desc_cfg	cdcfg;
+	struct arm_smmu_ctx_desc	cd;
+	u8				s1fmt;
+	u8				s1cdmax;
 };
 
 struct arm_smmu_s2_cfg {
@@ -638,6 +670,7 @@ struct arm_smmu_master {
 	u32				*sids;
 	unsigned int			num_sids;
 	bool				ats_enabled;
+	unsigned int			ssid_bits;
 };
 
 /* SMMU private data for an IOMMU domain */
@@ -847,15 +880,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
 		cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
 		cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
 		break;
+	case CMDQ_OP_CFGI_CD:
+		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
+		/* Fallthrough */
 	case CMDQ_OP_CFGI_STE:
 		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
 		cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
 		break;
+	case CMDQ_OP_CFGI_CD_ALL:
+		cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
+		break;
 	case CMDQ_OP_CFGI_ALL:
 		/* Cover the entire SID range */
 		cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
 		break;
 	case CMDQ_OP_TLBI_NH_VA:
+		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
 		cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
 		cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
@@ -1443,50 +1483,238 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
 }
 
 /* Context descriptor manipulation functions */
-static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
+static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+			     int ssid, bool leaf)
 {
-	u64 val = 0;
+	size_t i;
+	unsigned long flags;
+	struct arm_smmu_master *master;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_cmdq_ent cmd = {
+		.opcode	= CMDQ_OP_CFGI_CD,
+		.cfgi	= {
+			.ssid	= ssid,
+			.leaf	= leaf,
+		},
+	};
 
-	/* Repack the TCR. Just care about TTBR0 for now */
-	val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
-	val |= ARM_SMMU_TCR2CD(tcr, TG0);
-	val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
-	val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
-	val |= ARM_SMMU_TCR2CD(tcr, SH0);
-	val |= ARM_SMMU_TCR2CD(tcr, EPD0);
-	val |= ARM_SMMU_TCR2CD(tcr, EPD1);
-	val |= ARM_SMMU_TCR2CD(tcr, IPS);
+	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+	list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+		for (i = 0; i < master->num_sids; i++) {
+			cmd.cfgi.sid = master->sids[i];
+			arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+		}
+	}
+	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
 
-	return val;
+	arm_smmu_cmdq_issue_sync(smmu);
 }
 
-static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
-				    struct arm_smmu_s1_cfg *cfg)
+static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
+					struct arm_smmu_l1_ctx_desc *l1_desc)
 {
+	size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+
+	l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
+					     &l1_desc->l2ptr_dma, GFP_KERNEL);
+	if (!l1_desc->l2ptr) {
+		dev_warn(smmu->dev,
+			 "failed to allocate context descriptor table\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void arm_smmu_write_cd_l1_desc(__le64 *dst,
+				      struct arm_smmu_l1_ctx_desc *l1_desc)
+{
+	u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
+		  CTXDESC_L1_DESC_V;
+
+	WRITE_ONCE(*dst, cpu_to_le64(val));
+}
+
+static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
+				   u32 ssid)
+{
+	__le64 *l1ptr;
+	unsigned int idx;
+	struct arm_smmu_l1_ctx_desc *l1_desc;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+
+	if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+		return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
+
+	idx = ssid >> CTXDESC_SPLIT;
+	l1_desc = &cdcfg->l1_desc[idx];
+	if (!l1_desc->l2ptr) {
+		if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+			return NULL;
+
+		l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+		arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+		/* An invalid L1CD can be cached */
+		arm_smmu_sync_cd(smmu_domain, ssid, false);
+	}
+	idx = ssid & (CTXDESC_L2_ENTRIES - 1);
+	return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
+}
+
+static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
+				   int ssid, struct arm_smmu_ctx_desc *cd)
+{
+	/*
+	 * This function handles the following cases:
+	 *
+	 * (1) Install primary CD, for normal DMA traffic (SSID = 0).
+	 * (2) Install a secondary CD, for SID+SSID traffic.
+	 * (3) Update ASID of a CD. Atomically write the first 64 bits of the
+	 *     CD, then invalidate the old entry and mappings.
+	 * (4) Remove a secondary CD.
+	 */
 	u64 val;
+	bool cd_live;
+	__le64 *cdptr;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+	if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+		return -E2BIG;
+
+	cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+	if (!cdptr)
+		return -ENOMEM;
+
+	val = le64_to_cpu(cdptr[0]);
+	cd_live = !!(val & CTXDESC_CD_0_V);
+
+	if (!cd) { /* (4) */
+		val = 0;
+	} else if (cd_live) { /* (3) */
+		val &= ~CTXDESC_CD_0_ASID;
+		val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
+		/*
+		 * Until CD+TLB invalidation, both ASIDs may be used for tagging
+		 * this substream's traffic
+		 */
+	} else { /* (1) and (2) */
+		cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
+		cdptr[2] = 0;
+		cdptr[3] = cpu_to_le64(cd->mair);
+
+		/*
+		 * STE is live, and the SMMU might read dwords of this CD in any
+		 * order. Ensure that it observes valid values before reading
+		 * V=1.
+		 */
+		arm_smmu_sync_cd(smmu_domain, ssid, true);
+
+		val = cd->tcr |
+#ifdef __BIG_ENDIAN
+			CTXDESC_CD_0_ENDI |
+#endif
+			CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
+			CTXDESC_CD_0_AA64 |
+			FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
+			CTXDESC_CD_0_V;
+
+		/* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
+		if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+			val |= CTXDESC_CD_0_S;
+	}
 
 	/*
-	 * We don't need to issue any invalidation here, as we'll invalidate
-	 * the STE when installing the new entry anyway.
+	 * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
+	 * "Configuration structures and configuration invalidation completion"
+	 *
+	 *   The size of single-copy atomic reads made by the SMMU is
+	 *   IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
+	 *   field within an aligned 64-bit span of a structure can be altered
+	 *   without first making the structure invalid.
 	 */
-	val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
-#ifdef __BIG_ENDIAN
-	      CTXDESC_CD_0_ENDI |
-#endif
-	      CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
-	      CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) |
-	      CTXDESC_CD_0_V;
+	WRITE_ONCE(cdptr[0], cpu_to_le64(val));
+	arm_smmu_sync_cd(smmu_domain, ssid, true);
+	return 0;
+}
 
-	/* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
-	if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
-		val |= CTXDESC_CD_0_S;
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+	int ret;
+	size_t l1size;
+	size_t max_contexts;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+	struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
 
-	cfg->cdptr[0] = cpu_to_le64(val);
+	max_contexts = 1 << cfg->s1cdmax;
 
-	val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
-	cfg->cdptr[1] = cpu_to_le64(val);
+	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
+	    max_contexts <= CTXDESC_L2_ENTRIES) {
+		cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
+		cdcfg->num_l1_ents = max_contexts;
 
-	cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair);
+		l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
+	} else {
+		cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
+		cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
+						  CTXDESC_L2_ENTRIES);
+
+		cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
+					      sizeof(*cdcfg->l1_desc),
+					      GFP_KERNEL);
+		if (!cdcfg->l1_desc)
+			return -ENOMEM;
+
+		l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+	}
+
+	cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
+					   GFP_KERNEL);
+	if (!cdcfg->cdtab) {
+		dev_warn(smmu->dev, "failed to allocate context descriptor\n");
+		ret = -ENOMEM;
+		goto err_free_l1;
+	}
+
+	return 0;
+
+err_free_l1:
+	if (cdcfg->l1_desc) {
+		devm_kfree(smmu->dev, cdcfg->l1_desc);
+		cdcfg->l1_desc = NULL;
+	}
+	return ret;
+}
+
+static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+	int i;
+	size_t size, l1size;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+
+	if (cdcfg->l1_desc) {
+		size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+
+		for (i = 0; i < cdcfg->num_l1_ents; i++) {
+			if (!cdcfg->l1_desc[i].l2ptr)
+				continue;
+
+			dmam_free_coherent(smmu->dev, size,
+					   cdcfg->l1_desc[i].l2ptr,
+					   cdcfg->l1_desc[i].l2ptr_dma);
+		}
+		devm_kfree(smmu->dev, cdcfg->l1_desc);
+		cdcfg->l1_desc = NULL;
+
+		l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+	} else {
+		l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+	}
+
+	dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
+	cdcfg->cdtab_dma = 0;
+	cdcfg->cdtab = NULL;
 }
 
 /* Stream table manipulation functions */
@@ -1608,6 +1836,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
 	if (s1_cfg) {
 		BUG_ON(ste_live);
 		dst[1] = cpu_to_le64(
+			 FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
 			 FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
 			 FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
 			 FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
@@ -1617,8 +1846,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
 		   !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
 			dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
 
-		val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
-			FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
+		val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+			FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+			FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
+			FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
 	}
 
 	if (s2_cfg) {
@@ -1642,7 +1873,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
 						 STRTAB_STE_1_EATS_TRANS));
 
 	arm_smmu_sync_ste_for_sid(smmu, sid);
-	dst[0] = cpu_to_le64(val);
+	/* See comment in arm_smmu_write_ctx_desc() */
+	WRITE_ONCE(dst[0], cpu_to_le64(val));
 	arm_smmu_sync_ste_for_sid(smmu, sid);
 
 	/* It's likely that we'll want to use the new STE soon */
@@ -1675,7 +1907,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
 
 	desc->span = STRTAB_SPLIT + 1;
 	desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
-					  GFP_KERNEL | __GFP_ZERO);
+					  GFP_KERNEL);
 	if (!desc->l2ptr) {
 		dev_err(smmu->dev,
 			"failed to allocate l2 stream table for SID %u\n",
@@ -2131,12 +2363,8 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
 		struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
 
-		if (cfg->cdptr) {
-			dmam_free_coherent(smmu_domain->smmu->dev,
-					   CTXDESC_CD_DWORDS << 3,
-					   cfg->cdptr,
-					   cfg->cdptr_dma);
-
+		if (cfg->cdcfg.cdtab) {
+			arm_smmu_free_cd_tables(smmu_domain);
 			arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
 		}
 	} else {
@@ -2149,55 +2377,82 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 }
 
 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+				       struct arm_smmu_master *master,
 				       struct io_pgtable_cfg *pgtbl_cfg)
 {
 	int ret;
 	int asid;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+	typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
 
 	asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
 	if (asid < 0)
 		return asid;
 
-	cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
-					 &cfg->cdptr_dma,
-					 GFP_KERNEL | __GFP_ZERO);
-	if (!cfg->cdptr) {
-		dev_warn(smmu->dev, "failed to allocate context descriptor\n");
-		ret = -ENOMEM;
+	cfg->s1cdmax = master->ssid_bits;
+
+	ret = arm_smmu_alloc_cd_tables(smmu_domain);
+	if (ret)
 		goto out_free_asid;
-	}
 
 	cfg->cd.asid	= (u16)asid;
-	cfg->cd.ttbr	= pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-	cfg->cd.tcr	= pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+	cfg->cd.ttbr	= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+	cfg->cd.tcr	= FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+			  FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
+			  FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
+			  FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
+			  FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
+			  FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
+			  CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
 	cfg->cd.mair	= pgtbl_cfg->arm_lpae_s1_cfg.mair;
+
+	/*
+	 * Note that this will end up calling arm_smmu_sync_cd() before
+	 * the master has been added to the devices list for this domain.
+	 * This isn't an issue because the STE hasn't been installed yet.
+	 */
+	ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
+	if (ret)
+		goto out_free_cd_tables;
+
 	return 0;
 
+out_free_cd_tables:
+	arm_smmu_free_cd_tables(smmu_domain);
 out_free_asid:
 	arm_smmu_bitmap_free(smmu->asid_map, asid);
 	return ret;
 }
 
 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+				       struct arm_smmu_master *master,
 				       struct io_pgtable_cfg *pgtbl_cfg)
 {
 	int vmid;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
+	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
 
 	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
 	if (vmid < 0)
 		return vmid;
 
+	vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
 	cfg->vmid	= (u16)vmid;
 	cfg->vttbr	= pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
-	cfg->vtcr	= pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+	cfg->vtcr	= FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+			  FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+			  FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+			  FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+			  FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+			  FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+			  FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
 	return 0;
 }
 
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct iommu_domain *domain,
+				    struct arm_smmu_master *master)
 {
 	int ret;
 	unsigned long ias, oas;
@@ -2205,6 +2460,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
 	struct io_pgtable_cfg pgtbl_cfg;
 	struct io_pgtable_ops *pgtbl_ops;
 	int (*finalise_stage_fn)(struct arm_smmu_domain *,
+				 struct arm_smmu_master *,
 				 struct io_pgtable_cfg *);
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -2259,7 +2515,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
 	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
 	domain->geometry.force_aperture = true;
 
-	ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
 	if (ret < 0) {
 		free_io_pgtable_ops(pgtbl_ops);
 		return ret;
@@ -2412,7 +2668,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 
 	if (!smmu_domain->smmu) {
 		smmu_domain->smmu = smmu;
-		ret = arm_smmu_domain_finalise(domain);
+		ret = arm_smmu_domain_finalise(domain, master);
 		if (ret) {
 			smmu_domain->smmu = NULL;
 			goto out_unlock;
@@ -2424,6 +2680,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 			dev_name(smmu->dev));
 		ret = -ENXIO;
 		goto out_unlock;
+	} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
+		   master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
+		dev_err(dev,
+			"cannot attach to incompatible domain (%u SSID bits != %u)\n",
+			smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
+		ret = -EINVAL;
+		goto out_unlock;
 	}
 
 	master->domain = smmu_domain;
@@ -2431,9 +2694,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 	if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
 		master->ats_enabled = arm_smmu_ats_supported(master);
 
-	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
-		arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
-
 	arm_smmu_install_ste_for_dev(master);
 
 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
@@ -2534,51 +2794,66 @@ static int arm_smmu_add_device(struct device *dev)
 
 	if (!fwspec || fwspec->ops != &arm_smmu_ops)
 		return -ENODEV;
-	/*
-	 * We _can_ actually withstand dodgy bus code re-calling add_device()
-	 * without an intervening remove_device()/of_xlate() sequence, but
-	 * we're not going to do so quietly...
-	 */
-	if (WARN_ON_ONCE(fwspec->iommu_priv)) {
-		master = fwspec->iommu_priv;
-		smmu = master->smmu;
-	} else {
-		smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
-		if (!smmu)
-			return -ENODEV;
-		master = kzalloc(sizeof(*master), GFP_KERNEL);
-		if (!master)
-			return -ENOMEM;
 
-		master->dev = dev;
-		master->smmu = smmu;
-		master->sids = fwspec->ids;
-		master->num_sids = fwspec->num_ids;
-		fwspec->iommu_priv = master;
-	}
+	if (WARN_ON_ONCE(fwspec->iommu_priv))
+		return -EBUSY;
+
+	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
+	if (!smmu)
+		return -ENODEV;
+
+	master = kzalloc(sizeof(*master), GFP_KERNEL);
+	if (!master)
+		return -ENOMEM;
+
+	master->dev = dev;
+	master->smmu = smmu;
+	master->sids = fwspec->ids;
+	master->num_sids = fwspec->num_ids;
+	fwspec->iommu_priv = master;
 
 	/* Check the SIDs are in range of the SMMU and our stream table */
 	for (i = 0; i < master->num_sids; i++) {
 		u32 sid = master->sids[i];
 
-		if (!arm_smmu_sid_in_range(smmu, sid))
-			return -ERANGE;
+		if (!arm_smmu_sid_in_range(smmu, sid)) {
+			ret = -ERANGE;
+			goto err_free_master;
+		}
 
 		/* Ensure l2 strtab is initialised */
 		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
 			ret = arm_smmu_init_l2_strtab(smmu, sid);
 			if (ret)
-				return ret;
+				goto err_free_master;
 		}
 	}
 
+	master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+
+	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
+		master->ssid_bits = min_t(u8, master->ssid_bits,
+					  CTXDESC_LINEAR_CDMAX);
+
+	ret = iommu_device_link(&smmu->iommu, dev);
+	if (ret)
+		goto err_free_master;
+
 	group = iommu_group_get_for_dev(dev);
-	if (!IS_ERR(group)) {
-		iommu_group_put(group);
-		iommu_device_link(&smmu->iommu, dev);
+	if (IS_ERR(group)) {
+		ret = PTR_ERR(group);
+		goto err_unlink;
 	}
 
-	return PTR_ERR_OR_ZERO(group);
+	iommu_group_put(group);
+	return 0;
+
+err_unlink:
+	iommu_device_unlink(&smmu->iommu, dev);
+err_free_master:
+	kfree(master);
+	fwspec->iommu_priv = NULL;
+	return ret;
 }
 
 static void arm_smmu_remove_device(struct device *dev)
@@ -2710,15 +2985,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
 	iommu_dma_get_resv_regions(dev, head);
 }
 
-static void arm_smmu_put_resv_regions(struct device *dev,
-				      struct list_head *head)
-{
-	struct iommu_resv_region *entry, *next;
-
-	list_for_each_entry_safe(entry, next, head, list)
-		kfree(entry);
-}
-
 static struct iommu_ops arm_smmu_ops = {
 	.capable		= arm_smmu_capable,
 	.domain_alloc		= arm_smmu_domain_alloc,
@@ -2736,7 +3002,7 @@ static struct iommu_ops arm_smmu_ops = {
 	.domain_set_attr	= arm_smmu_domain_set_attr,
 	.of_xlate		= arm_smmu_of_xlate,
 	.get_resv_regions	= arm_smmu_get_resv_regions,
-	.put_resv_regions	= arm_smmu_put_resv_regions,
+	.put_resv_regions	= generic_iommu_put_resv_regions,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
 };
 
@@ -2883,7 +3149,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
 
 	l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
 	strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
-				     GFP_KERNEL | __GFP_ZERO);
+				     GFP_KERNEL);
 	if (!strtab) {
 		dev_err(smmu->dev,
 			"failed to allocate l1 stream table (%u bytes)\n",
@@ -2910,7 +3176,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
 
 	size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
 	strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
-				     GFP_KERNEL | __GFP_ZERO);
+				     GFP_KERNEL);
 	if (!strtab) {
 		dev_err(smmu->dev,
 			"failed to allocate linear stream table (%u bytes)\n",
@@ -3570,6 +3836,43 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
 		return SZ_128K;
 }
 
+static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
+{
+	int err;
+
+#ifdef CONFIG_PCI
+	if (pci_bus_type.iommu_ops != ops) {
+		err = bus_set_iommu(&pci_bus_type, ops);
+		if (err)
+			return err;
+	}
+#endif
+#ifdef CONFIG_ARM_AMBA
+	if (amba_bustype.iommu_ops != ops) {
+		err = bus_set_iommu(&amba_bustype, ops);
+		if (err)
+			goto err_reset_pci_ops;
+	}
+#endif
+	if (platform_bus_type.iommu_ops != ops) {
+		err = bus_set_iommu(&platform_bus_type, ops);
+		if (err)
+			goto err_reset_amba_ops;
+	}
+
+	return 0;
+
+err_reset_amba_ops:
+#ifdef CONFIG_ARM_AMBA
+	bus_set_iommu(&amba_bustype, NULL);
+#endif
+err_reset_pci_ops: __maybe_unused;
+#ifdef CONFIG_PCI
+	bus_set_iommu(&pci_bus_type, NULL);
+#endif
+	return err;
+}
+
 static int arm_smmu_device_probe(struct platform_device *pdev)
 {
 	int irq, ret;
@@ -3599,7 +3902,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
 
 	/* Base address */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
+	if (resource_size(res) < arm_smmu_resource_size(smmu)) {
 		dev_err(dev, "MMIO region too small (%pr)\n", res);
 		return -EINVAL;
 	}
@@ -3660,48 +3963,45 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-#ifdef CONFIG_PCI
-	if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
-		pci_request_acs();
-		ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
-		if (ret)
-			return ret;
-	}
-#endif
-#ifdef CONFIG_ARM_AMBA
-	if (amba_bustype.iommu_ops != &arm_smmu_ops) {
-		ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
-		if (ret)
-			return ret;
-	}
-#endif
-	if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
-		ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
-		if (ret)
-			return ret;
-	}
+	return arm_smmu_set_bus_ops(&arm_smmu_ops);
+}
+
+static int arm_smmu_device_remove(struct platform_device *pdev)
+{
+	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+	arm_smmu_set_bus_ops(NULL);
+	iommu_device_unregister(&smmu->iommu);
+	iommu_device_sysfs_remove(&smmu->iommu);
+	arm_smmu_device_disable(smmu);
+
 	return 0;
 }
 
 static void arm_smmu_device_shutdown(struct platform_device *pdev)
 {
-	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
-
-	arm_smmu_device_disable(smmu);
+	arm_smmu_device_remove(pdev);
 }
 
 static const struct of_device_id arm_smmu_of_match[] = {
 	{ .compatible = "arm,smmu-v3", },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
 
 static struct platform_driver arm_smmu_driver = {
 	.driver	= {
-		.name		= "arm-smmu-v3",
-		.of_match_table	= of_match_ptr(arm_smmu_of_match),
-		.suppress_bind_attrs = true,
+		.name			= "arm-smmu-v3",
+		.of_match_table		= arm_smmu_of_match,
+		.suppress_bind_attrs	= true,
 	},
 	.probe	= arm_smmu_device_probe,
+	.remove	= arm_smmu_device_remove,
 	.shutdown = arm_smmu_device_shutdown,
 };
-builtin_platform_driver(arm_smmu_driver);
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu-v3");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4f1a350..16c4b87 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -27,8 +27,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
@@ -60,10 +59,6 @@
 #define MSI_IOVA_LENGTH			0x100000
 
 static int force_stage;
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param() here.
- */
 module_param(force_stage, int, S_IRUGO);
 MODULE_PARM_DESC(force_stage,
 	"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
@@ -131,6 +126,12 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 	return container_of(dom, struct arm_smmu_domain, domain);
 }
 
+static struct platform_driver arm_smmu_driver;
+static struct iommu_ops arm_smmu_ops;
+
+#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
+static int arm_smmu_bus_init(struct iommu_ops *ops);
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
 	if (dev_is_pci(dev)) {
@@ -166,9 +167,6 @@ static int __find_legacy_master_phandle(struct device *dev, void *data)
 	return err == -ENOENT ? 0 : err;
 }
 
-static struct platform_driver arm_smmu_driver;
-static struct iommu_ops arm_smmu_ops;
-
 static int arm_smmu_register_legacy_master(struct device *dev,
 					   struct arm_smmu_device **smmu)
 {
@@ -220,6 +218,27 @@ static int arm_smmu_register_legacy_master(struct device *dev,
 	return err;
 }
 
+/*
+ * With the legacy DT binding in play, we have no guarantees about
+ * probe order, but then we're also not doing default domains, so we can
+ * delay setting bus ops until we're sure every possible SMMU is ready,
+ * and that way ensure that no add_device() calls get missed.
+ */
+static int arm_smmu_legacy_bus_init(void)
+{
+	if (using_legacy_binding)
+		return arm_smmu_bus_init(&arm_smmu_ops);
+	return 0;
+}
+device_initcall_sync(arm_smmu_legacy_bus_init);
+#else
+static int arm_smmu_register_legacy_master(struct device *dev,
+					   struct arm_smmu_device **smmu)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
+
 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
 {
 	int idx;
@@ -252,7 +271,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
 	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
 		for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
 			reg = arm_smmu_readl(smmu, page, status);
-			if (!(reg & sTLBGSTATUS_GSACTIVE))
+			if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
 				return;
 			cpu_relax();
 		}
@@ -459,7 +478,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 	int idx = smmu_domain->cfg.cbndx;
 
 	fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
-	if (!(fsr & FSR_FAULT))
+	if (!(fsr & ARM_SMMU_FSR_FAULT))
 		return IRQ_NONE;
 
 	fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
@@ -491,7 +510,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
 
 	if (__ratelimit(&rs)) {
 		if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
-		    (gfsr & sGFSR_USF))
+		    (gfsr & ARM_SMMU_sGFSR_USF))
 			dev_err(smmu->dev,
 				"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
 				(u16)gfsynr1);
@@ -521,26 +540,28 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
 			cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
 		} else {
-			cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
-			cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-			cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
+			cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
+			cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
 			if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
-				cb->tcr[1] |= TCR2_AS;
+				cb->tcr[1] |= ARM_SMMU_TCR2_AS;
+			else
+				cb->tcr[0] |= ARM_SMMU_TCR_EAE;
 		}
 	} else {
-		cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+		cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
 	}
 
 	/* TTBRs */
 	if (stage1) {
 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
-			cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
-			cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
+			cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
+			cb->ttbr[1] = 0;
 		} else {
-			cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-			cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
-			cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
-			cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
+			cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+			cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+						  cfg->asid);
+			cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+						 cfg->asid);
 		}
 	} else {
 		cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
@@ -576,31 +597,33 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
 	/* CBA2R */
 	if (smmu->version > ARM_SMMU_V1) {
 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
-			reg = CBA2R_VA64;
+			reg = ARM_SMMU_CBA2R_VA64;
 		else
 			reg = 0;
 		/* 16-bit VMIDs live in CBA2R */
 		if (smmu->features & ARM_SMMU_FEAT_VMID16)
-			reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
+			reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
 
 		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
 	}
 
 	/* CBAR */
-	reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
+	reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
 	if (smmu->version < ARM_SMMU_V2)
-		reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
+		reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
 
 	/*
 	 * Use the weakest shareability/memory types, so they are
 	 * overridden by the ttbcr/pte.
 	 */
 	if (stage1) {
-		reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
-			FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
+		reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
+				  ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
+		       FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
+				  ARM_SMMU_CBAR_S1_MEMATTR_WB);
 	} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
 		/* 8-bit VMIDs live in CBAR */
-		reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
+		reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
 	}
 	arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
 
@@ -632,11 +655,12 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
 	}
 
 	/* SCTLR */
-	reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
+	reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
+	      ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
 	if (stage1)
-		reg |= SCTLR_S1_ASIDPNE;
+		reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
-		reg |= SCTLR_E;
+		reg |= ARM_SMMU_SCTLR_E;
 
 	arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
 }
@@ -818,7 +842,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 	if (ret < 0) {
 		dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
 			cfg->irptndx, irq);
-		cfg->irptndx = INVALID_IRPTNDX;
+		cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
 	}
 
 	mutex_unlock(&smmu_domain->init_mutex);
@@ -856,7 +880,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
 	smmu->cbs[cfg->cbndx].cfg = NULL;
 	arm_smmu_write_context_bank(smmu, cfg->cbndx);
 
-	if (cfg->irptndx != INVALID_IRPTNDX) {
+	if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
 		irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
 		devm_free_irq(smmu->dev, irq, domain);
 	}
@@ -912,23 +936,24 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
 {
 	struct arm_smmu_smr *smr = smmu->smrs + idx;
-	u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
+	u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
+		  FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
 
 	if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
-		reg |= SMR_VALID;
+		reg |= ARM_SMMU_SMR_VALID;
 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
 }
 
 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
 {
 	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
-	u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
-		  FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
-		  FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
+	u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
+		  FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
+		  FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
 
 	if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
 	    smmu->smrs[idx].valid)
-		reg |= S2CR_EXIDVALID;
+		reg |= ARM_SMMU_S2CR_EXIDVALID;
 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
 }
 
@@ -946,24 +971,37 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
 {
 	u32 smr;
+	int i;
 
 	if (!smmu->smrs)
 		return;
-
+	/*
+	 * If we've had to accommodate firmware memory regions, we may
+	 * have live SMRs by now; tread carefully...
+	 *
+	 * Somewhat perversely, not having a free SMR for this test implies we
+	 * can get away without it anyway, as we'll only be able to 'allocate'
+	 * these SMRs for the ID/mask values we're already trusting to be OK.
+	 */
+	for (i = 0; i < smmu->num_mapping_groups; i++)
+		if (!smmu->smrs[i].valid)
+			goto smr_ok;
+	return;
+smr_ok:
 	/*
 	 * SMR.ID bits may not be preserved if the corresponding MASK
 	 * bits are set, so check each one separately. We can reject
 	 * masters later if they try to claim IDs outside these masks.
 	 */
-	smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
-	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
-	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
-	smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
+	smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
+	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+	smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
 
-	smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
-	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
-	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
-	smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
+	smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
+	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+	smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
 }
 
 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
@@ -1032,8 +1070,8 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
 	mutex_lock(&smmu->stream_map_mutex);
 	/* Figure out a viable stream map entry allocation */
 	for_each_cfg_sme(fwspec, i, idx) {
-		u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
-		u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
+		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+		u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
 
 		if (idx != INVALID_SMENDX) {
 			ret = -EEXIST;
@@ -1277,7 +1315,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
 
 	reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
-	if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
+	if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
+				      5, 50)) {
 		spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
 		dev_err(dev,
 			"iova to phys timed out on %pad. Falling back to software table walk.\n",
@@ -1287,7 +1326,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
 
 	phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
 	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
-	if (phys & CB_PAR_F) {
+	if (phys & ARM_SMMU_CB_PAR_F) {
 		dev_err(dev, "translation fault!\n");
 		dev_err(dev, "PAR = 0x%llx\n", phys);
 		return 0;
@@ -1368,8 +1407,8 @@ static int arm_smmu_add_device(struct device *dev)
 
 	ret = -EINVAL;
 	for (i = 0; i < fwspec->num_ids; i++) {
-		u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
-		u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
+		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+		u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
 
 		if (sid & ~smmu->streamid_mask) {
 			dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
@@ -1550,12 +1589,12 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
 	u32 mask, fwid = 0;
 
 	if (args->args_count > 0)
-		fwid |= FIELD_PREP(SMR_ID, args->args[0]);
+		fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
 
 	if (args->args_count > 1)
-		fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
+		fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
 	else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
-		fwid |= FIELD_PREP(SMR_MASK, mask);
+		fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
 
 	return iommu_fwspec_add_ids(dev, &fwid, 1);
 }
@@ -1576,15 +1615,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
 	iommu_dma_get_resv_regions(dev, head);
 }
 
-static void arm_smmu_put_resv_regions(struct device *dev,
-				      struct list_head *head)
-{
-	struct iommu_resv_region *entry, *next;
-
-	list_for_each_entry_safe(entry, next, head, list)
-		kfree(entry);
-}
-
 static struct iommu_ops arm_smmu_ops = {
 	.capable		= arm_smmu_capable,
 	.domain_alloc		= arm_smmu_domain_alloc,
@@ -1602,7 +1632,7 @@ static struct iommu_ops arm_smmu_ops = {
 	.domain_set_attr	= arm_smmu_domain_set_attr,
 	.of_xlate		= arm_smmu_of_xlate,
 	.get_resv_regions	= arm_smmu_get_resv_regions,
-	.put_resv_regions	= arm_smmu_put_resv_regions,
+	.put_resv_regions	= generic_iommu_put_resv_regions,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
 };
 
@@ -1625,7 +1655,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 	/* Make sure all context banks are disabled and clear CB_FSR  */
 	for (i = 0; i < smmu->num_context_banks; ++i) {
 		arm_smmu_write_context_bank(smmu, i);
-		arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
+		arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
 	}
 
 	/* Invalidate the TLB, just in case */
@@ -1635,29 +1665,30 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
 
 	/* Enable fault reporting */
-	reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+	reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
+		ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
 
 	/* Disable TLB broadcasting. */
-	reg |= (sCR0_VMIDPNE | sCR0_PTM);
+	reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
 
 	/* Enable client access, handling unmatched streams as appropriate */
-	reg &= ~sCR0_CLIENTPD;
+	reg &= ~ARM_SMMU_sCR0_CLIENTPD;
 	if (disable_bypass)
-		reg |= sCR0_USFCFG;
+		reg |= ARM_SMMU_sCR0_USFCFG;
 	else
-		reg &= ~sCR0_USFCFG;
+		reg &= ~ARM_SMMU_sCR0_USFCFG;
 
 	/* Disable forced broadcasting */
-	reg &= ~sCR0_FB;
+	reg &= ~ARM_SMMU_sCR0_FB;
 
 	/* Don't upgrade barriers */
-	reg &= ~(sCR0_BSU);
+	reg &= ~(ARM_SMMU_sCR0_BSU);
 
 	if (smmu->features & ARM_SMMU_FEAT_VMID16)
-		reg |= sCR0_VMID16EN;
+		reg |= ARM_SMMU_sCR0_VMID16EN;
 
 	if (smmu->features & ARM_SMMU_FEAT_EXIDS)
-		reg |= sCR0_EXIDENABLE;
+		reg |= ARM_SMMU_sCR0_EXIDENABLE;
 
 	if (smmu->impl && smmu->impl->reset)
 		smmu->impl->reset(smmu);
@@ -1702,21 +1733,21 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
 	/* Restrict available stages based on module parameter */
 	if (force_stage == 1)
-		id &= ~(ID0_S2TS | ID0_NTS);
+		id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
 	else if (force_stage == 2)
-		id &= ~(ID0_S1TS | ID0_NTS);
+		id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
 
-	if (id & ID0_S1TS) {
+	if (id & ARM_SMMU_ID0_S1TS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
 		dev_notice(smmu->dev, "\tstage 1 translation\n");
 	}
 
-	if (id & ID0_S2TS) {
+	if (id & ARM_SMMU_ID0_S2TS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
 		dev_notice(smmu->dev, "\tstage 2 translation\n");
 	}
 
-	if (id & ID0_NTS) {
+	if (id & ARM_SMMU_ID0_NTS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
 		dev_notice(smmu->dev, "\tnested translation\n");
 	}
@@ -1727,8 +1758,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 		return -ENODEV;
 	}
 
-	if ((id & ID0_S1TS) &&
-		((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
+	if ((id & ARM_SMMU_ID0_S1TS) &&
+	    ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
 		dev_notice(smmu->dev, "\taddress translation ops\n");
 	}
@@ -1739,7 +1770,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 	 * Fortunately, this also opens up a workaround for systems where the
 	 * ID register value has ended up configured incorrectly.
 	 */
-	cttw_reg = !!(id & ID0_CTTW);
+	cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
 	if (cttw_fw || cttw_reg)
 		dev_notice(smmu->dev, "\t%scoherent table walk\n",
 			   cttw_fw ? "" : "non-");
@@ -1748,16 +1779,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 			   "\t(IDR0.CTTW overridden by FW configuration)\n");
 
 	/* Max. number of entries we have for stream matching/indexing */
-	if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+	if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
 		smmu->features |= ARM_SMMU_FEAT_EXIDS;
 		size = 1 << 16;
 	} else {
-		size = 1 << FIELD_GET(ID0_NUMSIDB, id);
+		size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
 	}
 	smmu->streamid_mask = size - 1;
-	if (id & ID0_SMS) {
+	if (id & ARM_SMMU_ID0_SMS) {
 		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
-		size = FIELD_GET(ID0_NUMSMRG, id);
+		size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
 		if (size == 0) {
 			dev_err(smmu->dev,
 				"stream-matching supported, but no SMRs present!\n");
@@ -1785,18 +1816,19 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 	mutex_init(&smmu->stream_map_mutex);
 	spin_lock_init(&smmu->global_sync_lock);
 
-	if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
+	if (smmu->version < ARM_SMMU_V2 ||
+	    !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
 		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
-		if (!(id & ID0_PTFS_NO_AARCH32S))
+		if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
 	}
 
 	/* ID1 */
 	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
-	smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
+	smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
 
 	/* Check for size mismatch of SMMU address space from mapped region */
-	size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
+	size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
 	if (smmu->numpage != 2 * size << smmu->pgshift)
 		dev_warn(smmu->dev,
 			"SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
@@ -1804,8 +1836,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 	/* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
 	smmu->numpage = size;
 
-	smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
-	smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
+	smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
+	smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
 	if (smmu->num_s2_context_banks > smmu->num_context_banks) {
 		dev_err(smmu->dev, "impossible number of S2 context banks!\n");
 		return -ENODEV;
@@ -1819,14 +1851,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
 	/* ID2 */
 	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
-	size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
+	size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
 	smmu->ipa_size = size;
 
 	/* The output mask is also applied for bypass */
-	size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
+	size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
 	smmu->pa_size = size;
 
-	if (id & ID2_VMID16)
+	if (id & ARM_SMMU_ID2_VMID16)
 		smmu->features |= ARM_SMMU_FEAT_VMID16;
 
 	/*
@@ -1843,13 +1875,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 		if (smmu->version == ARM_SMMU_V1_64K)
 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
 	} else {
-		size = FIELD_GET(ID2_UBS, id);
+		size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
 		smmu->va_size = arm_smmu_id_size_to_bits(size);
-		if (id & ID2_PTFS_4K)
+		if (id & ARM_SMMU_ID2_PTFS_4K)
 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
-		if (id & ID2_PTFS_16K)
+		if (id & ARM_SMMU_ID2_PTFS_16K)
 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
-		if (id & ID2_PTFS_64K)
+		if (id & ARM_SMMU_ID2_PTFS_64K)
 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
 	}
 
@@ -1911,6 +1943,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
 	{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
 
 #ifdef CONFIG_ACPI
 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
@@ -1997,8 +2030,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
 
 	legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
 	if (legacy_binding && !using_generic_binding) {
-		if (!using_legacy_binding)
-			pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
+		if (!using_legacy_binding) {
+			pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
+				  IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
+		}
 		using_legacy_binding = true;
 	} else if (!legacy_binding && !using_legacy_binding) {
 		using_generic_binding = true;
@@ -2013,25 +2048,50 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
 	return 0;
 }
 
-static void arm_smmu_bus_init(void)
+static int arm_smmu_bus_init(struct iommu_ops *ops)
 {
+	int err;
+
 	/* Oh, for a proper bus abstraction */
-	if (!iommu_present(&platform_bus_type))
-		bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+	if (!iommu_present(&platform_bus_type)) {
+		err = bus_set_iommu(&platform_bus_type, ops);
+		if (err)
+			return err;
+	}
 #ifdef CONFIG_ARM_AMBA
-	if (!iommu_present(&amba_bustype))
-		bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+	if (!iommu_present(&amba_bustype)) {
+		err = bus_set_iommu(&amba_bustype, ops);
+		if (err)
+			goto err_reset_platform_ops;
+	}
 #endif
 #ifdef CONFIG_PCI
 	if (!iommu_present(&pci_bus_type)) {
-		pci_request_acs();
-		bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+		err = bus_set_iommu(&pci_bus_type, ops);
+		if (err)
+			goto err_reset_amba_ops;
 	}
 #endif
 #ifdef CONFIG_FSL_MC_BUS
-	if (!iommu_present(&fsl_mc_bus_type))
-		bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+	if (!iommu_present(&fsl_mc_bus_type)) {
+		err = bus_set_iommu(&fsl_mc_bus_type, ops);
+		if (err)
+			goto err_reset_pci_ops;
+	}
 #endif
+	return 0;
+
+err_reset_pci_ops: __maybe_unused;
+#ifdef CONFIG_PCI
+	bus_set_iommu(&pci_bus_type, NULL);
+#endif
+err_reset_amba_ops: __maybe_unused;
+#ifdef CONFIG_ARM_AMBA
+	bus_set_iommu(&amba_bustype, NULL);
+#endif
+err_reset_platform_ops: __maybe_unused;
+	bus_set_iommu(&platform_bus_type, NULL);
+	return err;
 }
 
 static int arm_smmu_device_probe(struct platform_device *pdev)
@@ -2177,38 +2237,28 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
 	 * ready to handle default domain setup as soon as any SMMU exists.
 	 */
 	if (!using_legacy_binding)
-		arm_smmu_bus_init();
+		return arm_smmu_bus_init(&arm_smmu_ops);
 
 	return 0;
 }
 
-/*
- * With the legacy DT binding in play, though, we have no guarantees about
- * probe order, but then we're also not doing default domains, so we can
- * delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no add_device() calls get missed.
- */
-static int arm_smmu_legacy_bus_init(void)
-{
-	if (using_legacy_binding)
-		arm_smmu_bus_init();
-	return 0;
-}
-device_initcall_sync(arm_smmu_legacy_bus_init);
-
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
 {
 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
 	if (!smmu)
-		return;
+		return -ENODEV;
 
 	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
 		dev_err(&pdev->dev, "removing device with active domains!\n");
 
+	arm_smmu_bus_init(NULL);
+	iommu_device_unregister(&smmu->iommu);
+	iommu_device_sysfs_remove(&smmu->iommu);
+
 	arm_smmu_rpm_get(smmu);
 	/* Turn the thing off */
-	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
+	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
 	arm_smmu_rpm_put(smmu);
 
 	if (pm_runtime_enabled(smmu->dev))
@@ -2217,6 +2267,12 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev)
 		clk_bulk_disable(smmu->num_clks, smmu->clks);
 
 	clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+	return 0;
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+	arm_smmu_device_remove(pdev);
 }
 
 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
@@ -2267,11 +2323,17 @@ static const struct dev_pm_ops arm_smmu_pm_ops = {
 static struct platform_driver arm_smmu_driver = {
 	.driver	= {
 		.name			= "arm-smmu",
-		.of_match_table		= of_match_ptr(arm_smmu_of_match),
+		.of_match_table		= arm_smmu_of_match,
 		.pm			= &arm_smmu_pm_ops,
-		.suppress_bind_attrs	= true,
+		.suppress_bind_attrs    = true,
 	},
 	.probe	= arm_smmu_device_probe,
+	.remove	= arm_smmu_device_remove,
 	.shutdown = arm_smmu_device_shutdown,
 };
-builtin_platform_driver(arm_smmu_driver);
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
index 62b9f0c..8d1cd54 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm-smmu.h
@@ -11,6 +11,7 @@
 #define _ARM_SMMU_H
 
 #include <linux/atomic.h>
+#include <linux/bitfield.h>
 #include <linux/bits.h>
 #include <linux/clk.h>
 #include <linux/device.h>
@@ -23,51 +24,51 @@
 
 /* Configuration registers */
 #define ARM_SMMU_GR0_sCR0		0x0
-#define sCR0_VMID16EN			BIT(31)
-#define sCR0_BSU			GENMASK(15, 14)
-#define sCR0_FB				BIT(13)
-#define sCR0_PTM			BIT(12)
-#define sCR0_VMIDPNE			BIT(11)
-#define sCR0_USFCFG			BIT(10)
-#define sCR0_GCFGFIE			BIT(5)
-#define sCR0_GCFGFRE			BIT(4)
-#define sCR0_EXIDENABLE			BIT(3)
-#define sCR0_GFIE			BIT(2)
-#define sCR0_GFRE			BIT(1)
-#define sCR0_CLIENTPD			BIT(0)
+#define ARM_SMMU_sCR0_VMID16EN		BIT(31)
+#define ARM_SMMU_sCR0_BSU		GENMASK(15, 14)
+#define ARM_SMMU_sCR0_FB		BIT(13)
+#define ARM_SMMU_sCR0_PTM		BIT(12)
+#define ARM_SMMU_sCR0_VMIDPNE		BIT(11)
+#define ARM_SMMU_sCR0_USFCFG		BIT(10)
+#define ARM_SMMU_sCR0_GCFGFIE		BIT(5)
+#define ARM_SMMU_sCR0_GCFGFRE		BIT(4)
+#define ARM_SMMU_sCR0_EXIDENABLE	BIT(3)
+#define ARM_SMMU_sCR0_GFIE		BIT(2)
+#define ARM_SMMU_sCR0_GFRE		BIT(1)
+#define ARM_SMMU_sCR0_CLIENTPD		BIT(0)
 
 /* Auxiliary Configuration register */
 #define ARM_SMMU_GR0_sACR		0x10
 
 /* Identification registers */
 #define ARM_SMMU_GR0_ID0		0x20
-#define ID0_S1TS			BIT(30)
-#define ID0_S2TS			BIT(29)
-#define ID0_NTS				BIT(28)
-#define ID0_SMS				BIT(27)
-#define ID0_ATOSNS			BIT(26)
-#define ID0_PTFS_NO_AARCH32		BIT(25)
-#define ID0_PTFS_NO_AARCH32S		BIT(24)
-#define ID0_NUMIRPT			GENMASK(23, 16)
-#define ID0_CTTW			BIT(14)
-#define ID0_NUMSIDB			GENMASK(12, 9)
-#define ID0_EXIDS			BIT(8)
-#define ID0_NUMSMRG			GENMASK(7, 0)
+#define ARM_SMMU_ID0_S1TS		BIT(30)
+#define ARM_SMMU_ID0_S2TS		BIT(29)
+#define ARM_SMMU_ID0_NTS		BIT(28)
+#define ARM_SMMU_ID0_SMS		BIT(27)
+#define ARM_SMMU_ID0_ATOSNS		BIT(26)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32	BIT(25)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32S	BIT(24)
+#define ARM_SMMU_ID0_NUMIRPT		GENMASK(23, 16)
+#define ARM_SMMU_ID0_CTTW		BIT(14)
+#define ARM_SMMU_ID0_NUMSIDB		GENMASK(12, 9)
+#define ARM_SMMU_ID0_EXIDS		BIT(8)
+#define ARM_SMMU_ID0_NUMSMRG		GENMASK(7, 0)
 
 #define ARM_SMMU_GR0_ID1		0x24
-#define ID1_PAGESIZE			BIT(31)
-#define ID1_NUMPAGENDXB			GENMASK(30, 28)
-#define ID1_NUMS2CB			GENMASK(23, 16)
-#define ID1_NUMCB			GENMASK(7, 0)
+#define ARM_SMMU_ID1_PAGESIZE		BIT(31)
+#define ARM_SMMU_ID1_NUMPAGENDXB	GENMASK(30, 28)
+#define ARM_SMMU_ID1_NUMS2CB		GENMASK(23, 16)
+#define ARM_SMMU_ID1_NUMCB		GENMASK(7, 0)
 
 #define ARM_SMMU_GR0_ID2		0x28
-#define ID2_VMID16			BIT(15)
-#define ID2_PTFS_64K			BIT(14)
-#define ID2_PTFS_16K			BIT(13)
-#define ID2_PTFS_4K			BIT(12)
-#define ID2_UBS				GENMASK(11, 8)
-#define ID2_OAS				GENMASK(7, 4)
-#define ID2_IAS				GENMASK(3, 0)
+#define ARM_SMMU_ID2_VMID16		BIT(15)
+#define ARM_SMMU_ID2_PTFS_64K		BIT(14)
+#define ARM_SMMU_ID2_PTFS_16K		BIT(13)
+#define ARM_SMMU_ID2_PTFS_4K		BIT(12)
+#define ARM_SMMU_ID2_UBS		GENMASK(11, 8)
+#define ARM_SMMU_ID2_OAS		GENMASK(7, 4)
+#define ARM_SMMU_ID2_IAS		GENMASK(3, 0)
 
 #define ARM_SMMU_GR0_ID3		0x2c
 #define ARM_SMMU_GR0_ID4		0x30
@@ -75,11 +76,11 @@
 #define ARM_SMMU_GR0_ID6		0x38
 
 #define ARM_SMMU_GR0_ID7		0x3c
-#define ID7_MAJOR			GENMASK(7, 4)
-#define ID7_MINOR			GENMASK(3, 0)
+#define ARM_SMMU_ID7_MAJOR		GENMASK(7, 4)
+#define ARM_SMMU_ID7_MINOR		GENMASK(3, 0)
 
 #define ARM_SMMU_GR0_sGFSR		0x48
-#define sGFSR_USF			BIT(1)
+#define ARM_SMMU_sGFSR_USF		BIT(1)
 
 #define ARM_SMMU_GR0_sGFSYNR0		0x50
 #define ARM_SMMU_GR0_sGFSYNR1		0x54
@@ -92,106 +93,132 @@
 #define ARM_SMMU_GR0_sTLBGSYNC		0x70
 
 #define ARM_SMMU_GR0_sTLBGSTATUS	0x74
-#define sTLBGSTATUS_GSACTIVE		BIT(0)
+#define ARM_SMMU_sTLBGSTATUS_GSACTIVE	BIT(0)
 
 /* Stream mapping registers */
 #define ARM_SMMU_GR0_SMR(n)		(0x800 + ((n) << 2))
-#define SMR_VALID			BIT(31)
-#define SMR_MASK			GENMASK(31, 16)
-#define SMR_ID				GENMASK(15, 0)
+#define ARM_SMMU_SMR_VALID		BIT(31)
+#define ARM_SMMU_SMR_MASK		GENMASK(31, 16)
+#define ARM_SMMU_SMR_ID			GENMASK(15, 0)
 
 #define ARM_SMMU_GR0_S2CR(n)		(0xc00 + ((n) << 2))
-#define S2CR_PRIVCFG			GENMASK(25, 24)
+#define ARM_SMMU_S2CR_PRIVCFG		GENMASK(25, 24)
 enum arm_smmu_s2cr_privcfg {
 	S2CR_PRIVCFG_DEFAULT,
 	S2CR_PRIVCFG_DIPAN,
 	S2CR_PRIVCFG_UNPRIV,
 	S2CR_PRIVCFG_PRIV,
 };
-#define S2CR_TYPE			GENMASK(17, 16)
+#define ARM_SMMU_S2CR_TYPE		GENMASK(17, 16)
 enum arm_smmu_s2cr_type {
 	S2CR_TYPE_TRANS,
 	S2CR_TYPE_BYPASS,
 	S2CR_TYPE_FAULT,
 };
-#define S2CR_EXIDVALID			BIT(10)
-#define S2CR_CBNDX			GENMASK(7, 0)
+#define ARM_SMMU_S2CR_EXIDVALID		BIT(10)
+#define ARM_SMMU_S2CR_CBNDX		GENMASK(7, 0)
 
 /* Context bank attribute registers */
 #define ARM_SMMU_GR1_CBAR(n)		(0x0 + ((n) << 2))
-#define CBAR_IRPTNDX			GENMASK(31, 24)
-#define CBAR_TYPE			GENMASK(17, 16)
+#define ARM_SMMU_CBAR_IRPTNDX		GENMASK(31, 24)
+#define ARM_SMMU_CBAR_TYPE		GENMASK(17, 16)
 enum arm_smmu_cbar_type {
 	CBAR_TYPE_S2_TRANS,
 	CBAR_TYPE_S1_TRANS_S2_BYPASS,
 	CBAR_TYPE_S1_TRANS_S2_FAULT,
 	CBAR_TYPE_S1_TRANS_S2_TRANS,
 };
-#define CBAR_S1_MEMATTR			GENMASK(15, 12)
-#define CBAR_S1_MEMATTR_WB		0xf
-#define CBAR_S1_BPSHCFG			GENMASK(9, 8)
-#define CBAR_S1_BPSHCFG_NSH		3
-#define CBAR_VMID			GENMASK(7, 0)
+#define ARM_SMMU_CBAR_S1_MEMATTR	GENMASK(15, 12)
+#define ARM_SMMU_CBAR_S1_MEMATTR_WB	0xf
+#define ARM_SMMU_CBAR_S1_BPSHCFG	GENMASK(9, 8)
+#define ARM_SMMU_CBAR_S1_BPSHCFG_NSH	3
+#define ARM_SMMU_CBAR_VMID		GENMASK(7, 0)
 
 #define ARM_SMMU_GR1_CBFRSYNRA(n)	(0x400 + ((n) << 2))
 
 #define ARM_SMMU_GR1_CBA2R(n)		(0x800 + ((n) << 2))
-#define CBA2R_VMID16			GENMASK(31, 16)
-#define CBA2R_VA64			BIT(0)
+#define ARM_SMMU_CBA2R_VMID16		GENMASK(31, 16)
+#define ARM_SMMU_CBA2R_VA64		BIT(0)
 
 #define ARM_SMMU_CB_SCTLR		0x0
-#define SCTLR_S1_ASIDPNE		BIT(12)
-#define SCTLR_CFCFG			BIT(7)
-#define SCTLR_CFIE			BIT(6)
-#define SCTLR_CFRE			BIT(5)
-#define SCTLR_E				BIT(4)
-#define SCTLR_AFE			BIT(2)
-#define SCTLR_TRE			BIT(1)
-#define SCTLR_M				BIT(0)
+#define ARM_SMMU_SCTLR_S1_ASIDPNE	BIT(12)
+#define ARM_SMMU_SCTLR_CFCFG		BIT(7)
+#define ARM_SMMU_SCTLR_CFIE		BIT(6)
+#define ARM_SMMU_SCTLR_CFRE		BIT(5)
+#define ARM_SMMU_SCTLR_E		BIT(4)
+#define ARM_SMMU_SCTLR_AFE		BIT(2)
+#define ARM_SMMU_SCTLR_TRE		BIT(1)
+#define ARM_SMMU_SCTLR_M		BIT(0)
 
 #define ARM_SMMU_CB_ACTLR		0x4
 
 #define ARM_SMMU_CB_RESUME		0x8
-#define RESUME_TERMINATE		BIT(0)
+#define ARM_SMMU_RESUME_TERMINATE	BIT(0)
 
 #define ARM_SMMU_CB_TCR2		0x10
-#define TCR2_SEP			GENMASK(17, 15)
-#define TCR2_SEP_UPSTREAM		0x7
-#define TCR2_AS				BIT(4)
+#define ARM_SMMU_TCR2_SEP		GENMASK(17, 15)
+#define ARM_SMMU_TCR2_SEP_UPSTREAM	0x7
+#define ARM_SMMU_TCR2_AS		BIT(4)
+#define ARM_SMMU_TCR2_PASIZE		GENMASK(3, 0)
 
 #define ARM_SMMU_CB_TTBR0		0x20
 #define ARM_SMMU_CB_TTBR1		0x28
-#define TTBRn_ASID			GENMASK_ULL(63, 48)
+#define ARM_SMMU_TTBRn_ASID		GENMASK_ULL(63, 48)
 
 #define ARM_SMMU_CB_TCR			0x30
+#define ARM_SMMU_TCR_EAE		BIT(31)
+#define ARM_SMMU_TCR_EPD1		BIT(23)
+#define ARM_SMMU_TCR_TG0		GENMASK(15, 14)
+#define ARM_SMMU_TCR_SH0		GENMASK(13, 12)
+#define ARM_SMMU_TCR_ORGN0		GENMASK(11, 10)
+#define ARM_SMMU_TCR_IRGN0		GENMASK(9, 8)
+#define ARM_SMMU_TCR_T0SZ		GENMASK(5, 0)
+
+#define ARM_SMMU_VTCR_RES1		BIT(31)
+#define ARM_SMMU_VTCR_PS		GENMASK(18, 16)
+#define ARM_SMMU_VTCR_TG0		ARM_SMMU_TCR_TG0
+#define ARM_SMMU_VTCR_SH0		ARM_SMMU_TCR_SH0
+#define ARM_SMMU_VTCR_ORGN0		ARM_SMMU_TCR_ORGN0
+#define ARM_SMMU_VTCR_IRGN0		ARM_SMMU_TCR_IRGN0
+#define ARM_SMMU_VTCR_SL0		GENMASK(7, 6)
+#define ARM_SMMU_VTCR_T0SZ		ARM_SMMU_TCR_T0SZ
+
 #define ARM_SMMU_CB_CONTEXTIDR		0x34
 #define ARM_SMMU_CB_S1_MAIR0		0x38
 #define ARM_SMMU_CB_S1_MAIR1		0x3c
 
 #define ARM_SMMU_CB_PAR			0x50
-#define CB_PAR_F			BIT(0)
+#define ARM_SMMU_CB_PAR_F		BIT(0)
 
 #define ARM_SMMU_CB_FSR			0x58
-#define FSR_MULTI			BIT(31)
-#define FSR_SS				BIT(30)
-#define FSR_UUT				BIT(8)
-#define FSR_ASF				BIT(7)
-#define FSR_TLBLKF			BIT(6)
-#define FSR_TLBMCF			BIT(5)
-#define FSR_EF				BIT(4)
-#define FSR_PF				BIT(3)
-#define FSR_AFF				BIT(2)
-#define FSR_TF				BIT(1)
+#define ARM_SMMU_FSR_MULTI		BIT(31)
+#define ARM_SMMU_FSR_SS			BIT(30)
+#define ARM_SMMU_FSR_UUT		BIT(8)
+#define ARM_SMMU_FSR_ASF		BIT(7)
+#define ARM_SMMU_FSR_TLBLKF		BIT(6)
+#define ARM_SMMU_FSR_TLBMCF		BIT(5)
+#define ARM_SMMU_FSR_EF			BIT(4)
+#define ARM_SMMU_FSR_PF			BIT(3)
+#define ARM_SMMU_FSR_AFF		BIT(2)
+#define ARM_SMMU_FSR_TF			BIT(1)
 
-#define FSR_IGN				(FSR_AFF | FSR_ASF | \
-					 FSR_TLBMCF | FSR_TLBLKF)
-#define FSR_FAULT			(FSR_MULTI | FSR_SS | FSR_UUT | \
-					 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
+#define ARM_SMMU_FSR_IGN		(ARM_SMMU_FSR_AFF |		\
+					 ARM_SMMU_FSR_ASF |		\
+					 ARM_SMMU_FSR_TLBMCF |		\
+					 ARM_SMMU_FSR_TLBLKF)
+
+#define ARM_SMMU_FSR_FAULT		(ARM_SMMU_FSR_MULTI |		\
+					 ARM_SMMU_FSR_SS |		\
+					 ARM_SMMU_FSR_UUT |		\
+					 ARM_SMMU_FSR_EF |		\
+					 ARM_SMMU_FSR_PF |		\
+					 ARM_SMMU_FSR_TF |		\
+					 ARM_SMMU_FSR_IGN)
 
 #define ARM_SMMU_CB_FAR			0x60
 
 #define ARM_SMMU_CB_FSYNR0		0x68
-#define FSYNR0_WNR			BIT(4)
+#define ARM_SMMU_FSYNR0_WNR		BIT(4)
 
 #define ARM_SMMU_CB_S1_TLBIVA		0x600
 #define ARM_SMMU_CB_S1_TLBIASID		0x610
@@ -203,7 +230,7 @@ enum arm_smmu_cbar_type {
 #define ARM_SMMU_CB_ATS1PR		0x800
 
 #define ARM_SMMU_CB_ATSR		0x8f0
-#define ATSR_ACTIVE			BIT(0)
+#define ARM_SMMU_ATSR_ACTIVE		BIT(0)
 
 
 /* Maximum number of context banks per SMMU */
@@ -297,7 +324,7 @@ struct arm_smmu_cfg {
 	enum arm_smmu_cbar_type		cbar;
 	enum arm_smmu_context_fmt	fmt;
 };
-#define INVALID_IRPTNDX			0xff
+#define ARM_SMMU_INVALID_IRPTNDX	0xff
 
 enum arm_smmu_domain_stage {
 	ARM_SMMU_DOMAIN_S1 = 0,
@@ -318,6 +345,33 @@ struct arm_smmu_domain {
 	struct iommu_domain		domain;
 };
 
+static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg)
+{
+	return ARM_SMMU_TCR_EPD1 |
+	       FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
+	       FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
+	       FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
+	       FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
+	       FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz);
+}
+
+static inline u32 arm_smmu_lpae_tcr2(struct io_pgtable_cfg *cfg)
+{
+	return FIELD_PREP(ARM_SMMU_TCR2_PASIZE, cfg->arm_lpae_s1_cfg.tcr.ips) |
+	       FIELD_PREP(ARM_SMMU_TCR2_SEP, ARM_SMMU_TCR2_SEP_UPSTREAM);
+}
+
+static inline u32 arm_smmu_lpae_vtcr(struct io_pgtable_cfg *cfg)
+{
+	return ARM_SMMU_VTCR_RES1 |
+	       FIELD_PREP(ARM_SMMU_VTCR_PS, cfg->arm_lpae_s2_cfg.vtcr.ps) |
+	       FIELD_PREP(ARM_SMMU_VTCR_TG0, cfg->arm_lpae_s2_cfg.vtcr.tg) |
+	       FIELD_PREP(ARM_SMMU_VTCR_SH0, cfg->arm_lpae_s2_cfg.vtcr.sh) |
+	       FIELD_PREP(ARM_SMMU_VTCR_ORGN0, cfg->arm_lpae_s2_cfg.vtcr.orgn) |
+	       FIELD_PREP(ARM_SMMU_VTCR_IRGN0, cfg->arm_lpae_s2_cfg.vtcr.irgn) |
+	       FIELD_PREP(ARM_SMMU_VTCR_SL0, cfg->arm_lpae_s2_cfg.vtcr.sl) |
+	       FIELD_PREP(ARM_SMMU_VTCR_T0SZ, cfg->arm_lpae_s2_cfg.vtcr.tsz);
+}
 
 /* Implementation details, yay! */
 struct arm_smmu_impl {
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3acfa6a..071bb42 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -244,7 +244,7 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
 		     info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
 		    (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
 		     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
-		      info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
+		      info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
 			pr_warn("Device scope type does not match for %s\n",
 				pci_name(info->dev));
 			return -EINVAL;
@@ -1354,7 +1354,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
 	struct qi_desc desc;
 
 	if (mask) {
-		WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
 		addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
 		desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
 	} else
@@ -1371,6 +1370,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
 	qi_submit_sync(&desc, iommu);
 }
 
+/* PASID-based IOTLB invalidation */
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+		     unsigned long npages, bool ih)
+{
+	struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
+
+	/*
+	 * npages == -1 means a PASID-selective invalidation, otherwise,
+	 * a positive value for Page-selective-within-PASID invalidation.
+	 * 0 is not a valid input.
+	 */
+	if (WARN_ON(!npages)) {
+		pr_err("Invalid input npages = %ld\n", npages);
+		return;
+	}
+
+	if (npages == -1) {
+		desc.qw0 = QI_EIOTLB_PASID(pasid) |
+				QI_EIOTLB_DID(did) |
+				QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+				QI_EIOTLB_TYPE;
+		desc.qw1 = 0;
+	} else {
+		int mask = ilog2(__roundup_pow_of_two(npages));
+		unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
+
+		if (WARN_ON_ONCE(!ALIGN(addr, align)))
+			addr &= ~(align - 1);
+
+		desc.qw0 = QI_EIOTLB_PASID(pasid) |
+				QI_EIOTLB_DID(did) |
+				QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
+				QI_EIOTLB_TYPE;
+		desc.qw1 = QI_EIOTLB_ADDR(addr) |
+				QI_EIOTLB_IH(ih) |
+				QI_EIOTLB_AM(mask);
+	}
+
+	qi_submit_sync(&desc, iommu);
+}
+
 /*
  * Disable Queued Invalidation interface.
  */
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 471f05d..c1257be 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -5,6 +5,7 @@
  * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
  *	    Sohil Mehta <sohil.mehta@intel.com>
  *	    Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *	    Lu Baolu <baolu.lu@linux.intel.com>
  */
 
 #include <linux/debugfs.h>
@@ -283,6 +284,77 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
 
+static inline unsigned long level_to_directory_size(int level)
+{
+	return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
+}
+
+static inline void
+dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
+{
+	seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
+		   iova >> VTD_PAGE_SHIFT, path[5], path[4],
+		   path[3], path[2], path[1]);
+}
+
+static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
+			       int level, unsigned long start,
+			       u64 *path)
+{
+	int i;
+
+	if (level > 5 || level < 1)
+		return;
+
+	for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
+			i++, pde++, start += level_to_directory_size(level)) {
+		if (!dma_pte_present(pde))
+			continue;
+
+		path[level] = pde->val;
+		if (dma_pte_superpage(pde) || level == 1)
+			dump_page_info(m, start, path);
+		else
+			pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
+					   level - 1, start, path);
+		path[level] = 0;
+	}
+}
+
+static int show_device_domain_translation(struct device *dev, void *data)
+{
+	struct dmar_domain *domain = find_domain(dev);
+	struct seq_file *m = data;
+	u64 path[6] = { 0 };
+
+	if (!domain)
+		return 0;
+
+	seq_printf(m, "Device %s with pasid %d @0x%llx\n",
+		   dev_name(dev), domain->default_pasid,
+		   (u64)virt_to_phys(domain->pgd));
+	seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
+
+	pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
+	seq_putc(m, '\n');
+
+	return 0;
+}
+
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&device_domain_lock, flags);
+	ret = bus_for_each_dev(&pci_bus_type, NULL, m,
+			       show_device_domain_translation);
+	spin_unlock_irqrestore(&device_domain_lock, flags);
+
+	return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
+
 #ifdef CONFIG_IRQ_REMAP
 static void ir_tbl_remap_entry_show(struct seq_file *m,
 				    struct intel_iommu *iommu)
@@ -396,6 +468,9 @@ void __init intel_iommu_debugfs_init(void)
 			    &iommu_regset_fops);
 	debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
 			    NULL, &dmar_translation_struct_fops);
+	debugfs_create_file("domain_translation_struct", 0444,
+			    intel_iommu_debug, NULL,
+			    &domain_translation_struct_fops);
 #ifdef CONFIG_IRQ_REMAP
 	debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
 			    NULL, &ir_translation_struct_fops);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 932267f..9dc3767 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -307,6 +307,20 @@ static int hw_pass_through = 1;
  */
 #define DOMAIN_FLAG_LOSE_CHILDREN		BIT(1)
 
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL		BIT(2)
+
+/*
+ * Domain represents a virtual machine which demands iommu nested
+ * translation mode support.
+ */
+#define DOMAIN_FLAG_NESTING_MODE		BIT(3)
+
 #define for_each_domain_iommu(idx, domain)			\
 	for (idx = 0; idx < g_num_of_iommus; idx++)		\
 		if (domain->iommu_refcnt[idx])
@@ -355,9 +369,14 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
 int dmar_disabled = 0;
 #else
 int dmar_disabled = 1;
-#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
 
+#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+int intel_iommu_sm = 1;
+#else
 int intel_iommu_sm;
+#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
+
 int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
 
@@ -368,7 +387,6 @@ static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
 static int intel_no_bounce;
 
-#define IDENTMAP_ALL		1
 #define IDENTMAP_GFX		2
 #define IDENTMAP_AZALIA		4
 
@@ -377,7 +395,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 
 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
-static DEFINE_SPINLOCK(device_domain_lock);
+DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
 #define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) &&	\
@@ -552,6 +570,11 @@ static inline int domain_type_is_si(struct dmar_domain *domain)
 	return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
 }
 
+static inline bool domain_use_first_level(struct dmar_domain *domain)
+{
+	return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
+}
+
 static inline int domain_pfn_supported(struct dmar_domain *domain,
 				       unsigned long pfn)
 {
@@ -661,11 +684,12 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
 	return ret;
 }
 
-static int domain_update_iommu_superpage(struct intel_iommu *skip)
+static int domain_update_iommu_superpage(struct dmar_domain *domain,
+					 struct intel_iommu *skip)
 {
 	struct dmar_drhd_unit *drhd;
 	struct intel_iommu *iommu;
-	int mask = 0xf;
+	int mask = 0x3;
 
 	if (!intel_iommu_superpage) {
 		return 0;
@@ -675,7 +699,13 @@ static int domain_update_iommu_superpage(struct intel_iommu *skip)
 	rcu_read_lock();
 	for_each_active_iommu(iommu, drhd) {
 		if (iommu != skip) {
-			mask &= cap_super_page_val(iommu->cap);
+			if (domain && domain_use_first_level(domain)) {
+				if (!cap_fl1gp_support(iommu->cap))
+					mask = 0x1;
+			} else {
+				mask &= cap_super_page_val(iommu->cap);
+			}
+
 			if (!mask)
 				break;
 		}
@@ -690,7 +720,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
 {
 	domain_update_iommu_coherency(domain);
 	domain->iommu_snooping = domain_update_iommu_snooping(NULL);
-	domain->iommu_superpage = domain_update_iommu_superpage(NULL);
+	domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
 }
 
 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -774,13 +804,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
 	if (dev_is_pci(dev)) {
 		struct pci_dev *pf_pdev;
 
-		pdev = to_pci_dev(dev);
-
-#ifdef CONFIG_X86
-		/* VMD child devices currently cannot be handled individually */
-		if (is_vmd(pdev->bus))
-			return NULL;
-#endif
+		pdev = pci_real_dma_dev(to_pci_dev(dev));
 
 		/* VFs aren't listed in scope tables; we need to look up
 		 * the PF instead to find the IOMMU. */
@@ -913,6 +937,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
 
 			domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
 			pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+			if (domain_use_first_level(domain))
+				pteval |= DMA_FL_PTE_XD;
 			if (cmpxchg64(&pte->val, 0ULL, pteval))
 				/* Someone else set it while we were thinking; use theirs. */
 				free_pgtable_page(tmp_page);
@@ -1483,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
+static void domain_flush_piotlb(struct intel_iommu *iommu,
+				struct dmar_domain *domain,
+				u64 addr, unsigned long npages, bool ih)
+{
+	u16 did = domain->iommu_did[iommu->seq_id];
+
+	if (domain->default_pasid)
+		qi_flush_piotlb(iommu, did, domain->default_pasid,
+				addr, npages, ih);
+
+	if (!list_empty(&domain->devices))
+		qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
+}
+
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
 				  struct dmar_domain *domain,
 				  unsigned long pfn, unsigned int pages,
@@ -1496,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
 
 	if (ih)
 		ih = 1 << 6;
-	/*
-	 * Fallback to domain selective flush if no PSI support or the size is
-	 * too big.
-	 * PSI requires page size to be 2 ^ x, and the base address is naturally
-	 * aligned to the size
-	 */
-	if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
-		iommu->flush.flush_iotlb(iommu, did, 0, 0,
-						DMA_TLB_DSI_FLUSH);
-	else
-		iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
-						DMA_TLB_PSI_FLUSH);
+
+	if (domain_use_first_level(domain)) {
+		domain_flush_piotlb(iommu, domain, addr, pages, ih);
+	} else {
+		/*
+		 * Fallback to domain selective flush if no PSI support or
+		 * the size is too big. PSI requires page size to be 2 ^ x,
+		 * and the base address is naturally aligned to the size.
+		 */
+		if (!cap_pgsel_inv(iommu->cap) ||
+		    mask > cap_max_amask_val(iommu->cap))
+			iommu->flush.flush_iotlb(iommu, did, 0, 0,
+							DMA_TLB_DSI_FLUSH);
+		else
+			iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+							DMA_TLB_PSI_FLUSH);
+	}
 
 	/*
 	 * In caching mode, changes of pages from non-present to present require
@@ -1522,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
 					struct dmar_domain *domain,
 					unsigned long pfn, unsigned int pages)
 {
-	/* It's a non-present to present mapping. Only flush if caching mode */
-	if (cap_caching_mode(iommu->cap))
+	/*
+	 * It's a non-present to present mapping. Only flush if caching mode
+	 * and second level.
+	 */
+	if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
 		iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
 	else
 		iommu_flush_write_buffer(iommu);
@@ -1540,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad)
 		struct intel_iommu *iommu = g_iommus[idx];
 		u16 did = domain->iommu_did[iommu->seq_id];
 
-		iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+		if (domain_use_first_level(domain))
+			domain_flush_piotlb(iommu, domain, 0, -1, 0);
+		else
+			iommu->flush.flush_iotlb(iommu, did, 0, 0,
+						 DMA_TLB_DSI_FLUSH);
 
 		if (!cap_caching_mode(iommu->cap))
 			iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
@@ -1709,6 +1761,33 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
 #endif
 }
 
+/*
+ * Check and return whether first level is used by default for
+ * DMA translation.
+ */
+static bool first_level_by_default(void)
+{
+	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
+	static int first_level_support = -1;
+
+	if (likely(first_level_support != -1))
+		return first_level_support;
+
+	first_level_support = 1;
+
+	rcu_read_lock();
+	for_each_active_iommu(iommu, drhd) {
+		if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
+			first_level_support = 0;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return first_level_support;
+}
+
 static struct dmar_domain *alloc_domain(int flags)
 {
 	struct dmar_domain *domain;
@@ -1720,6 +1799,8 @@ static struct dmar_domain *alloc_domain(int flags)
 	memset(domain, 0, sizeof(*domain));
 	domain->nid = NUMA_NO_NODE;
 	domain->flags = flags;
+	if (first_level_by_default())
+		domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
 	domain->has_iotlb_device = false;
 	INIT_LIST_HEAD(&domain->devices);
 
@@ -1849,14 +1930,16 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
 {
 	int adjust_width, agaw;
 	unsigned long sagaw;
-	int err;
+	int ret;
 
 	init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
 
-	err = init_iova_flush_queue(&domain->iovad,
-				    iommu_flush_iova, iova_entry_free);
-	if (err)
-		return err;
+	if (!intel_iommu_strict) {
+		ret = init_iova_flush_queue(&domain->iovad,
+					    iommu_flush_iova, iova_entry_free);
+		if (ret)
+			pr_info("iova flush queue initialization failed\n");
+	}
 
 	domain_reserve_special_ranges(domain);
 
@@ -2229,17 +2312,20 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 	unsigned long sg_res = 0;
 	unsigned int largepage_lvl = 0;
 	unsigned long lvl_pages = 0;
+	u64 attr;
 
 	BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
 
 	if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
 		return -EINVAL;
 
-	prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+	attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
+	if (domain_use_first_level(domain))
+		attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
 
 	if (!sg) {
 		sg_res = nr_pages;
-		pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+		pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
 	}
 
 	while (nr_pages > 0) {
@@ -2251,7 +2337,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 			sg_res = aligned_nrpages(sg->offset, sg->length);
 			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
 			sg->dma_length = sg->length;
-			pteval = (sg_phys(sg) - pgoff) | prot;
+			pteval = (sg_phys(sg) - pgoff) | attr;
 			phys_pfn = pteval >> VTD_PAGE_SHIFT;
 		}
 
@@ -2420,7 +2506,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
-static struct dmar_domain *find_domain(struct device *dev)
+struct dmar_domain *find_domain(struct device *dev)
 {
 	struct device_domain_info *info;
 
@@ -2428,6 +2514,9 @@ static struct dmar_domain *find_domain(struct device *dev)
 		     dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
 		return NULL;
 
+	if (dev_is_pci(dev))
+		dev = &pci_real_dma_dev(to_pci_dev(dev))->dev;
+
 	/* No lock here, assumes no domain exit in normal case */
 	info = dev->archdata.iommu;
 	if (likely(info))
@@ -2463,6 +2552,36 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
 	return NULL;
 }
 
+static int domain_setup_first_level(struct intel_iommu *iommu,
+				    struct dmar_domain *domain,
+				    struct device *dev,
+				    int pasid)
+{
+	int flags = PASID_FLAG_SUPERVISOR_MODE;
+	struct dma_pte *pgd = domain->pgd;
+	int agaw, level;
+
+	/*
+	 * Skip top levels of page tables for iommu which has
+	 * less agaw than default. Unnecessary for PT mode.
+	 */
+	for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+		pgd = phys_to_virt(dma_pte_addr(pgd));
+		if (!dma_pte_present(pgd))
+			return -ENOMEM;
+	}
+
+	level = agaw_to_level(agaw);
+	if (level != 4 && level != 5)
+		return -EINVAL;
+
+	flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+
+	return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
+					     domain->iommu_did[iommu->seq_id],
+					     flags);
+}
+
 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 						    int bus, int devfn,
 						    struct device *dev,
@@ -2562,6 +2681,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 		if (hw_pass_through && domain_type_is_si(domain))
 			ret = intel_pasid_setup_pass_through(iommu, domain,
 					dev, PASID_RID2PASID);
+		else if (domain_use_first_level(domain))
+			ret = domain_setup_first_level(iommu, domain, dev,
+					PASID_RID2PASID);
 		else
 			ret = intel_pasid_setup_second_level(iommu, domain,
 					dev, PASID_RID2PASID);
@@ -2767,10 +2889,8 @@ static int __init si_domain_init(int hw)
 	}
 
 	/*
-	 * Normally we use DMA domains for devices which have RMRRs. But we
-	 * loose this requirement for graphic and usb devices. Identity map
-	 * the RMRRs for graphic and USB devices so that they could use the
-	 * si_domain.
+	 * Identity map the RMRRs so that devices with RMRRs could also use
+	 * the si_domain.
 	 */
 	for_each_rmrr_units(rmrr) {
 		for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
@@ -2778,9 +2898,6 @@ static int __init si_domain_init(int hw)
 			unsigned long long start = rmrr->base_address;
 			unsigned long long end = rmrr->end_address;
 
-			if (device_is_rmrr_locked(dev))
-				continue;
-
 			if (WARN_ON(end < start ||
 				    end >> agaw_to_width(si_domain->agaw)))
 				continue;
@@ -2919,9 +3036,6 @@ static int device_def_domain_type(struct device *dev)
 	if (dev_is_pci(dev)) {
 		struct pci_dev *pdev = to_pci_dev(dev);
 
-		if (device_is_rmrr_locked(dev))
-			return IOMMU_DOMAIN_DMA;
-
 		/*
 		 * Prevent any device marked as untrusted from getting
 		 * placed into the statically identity mapping domain.
@@ -2959,13 +3073,9 @@ static int device_def_domain_type(struct device *dev)
 				return IOMMU_DOMAIN_DMA;
 		} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
 			return IOMMU_DOMAIN_DMA;
-	} else {
-		if (device_has_rmrr(dev))
-			return IOMMU_DOMAIN_DMA;
 	}
 
-	return (iommu_identity_mapping & IDENTMAP_ALL) ?
-			IOMMU_DOMAIN_IDENTITY : 0;
+	return 0;
 }
 
 static void intel_iommu_init_qi(struct intel_iommu *iommu)
@@ -3294,10 +3404,7 @@ static int __init init_dmars(void)
 
 		if (!ecap_pass_through(iommu->ecap))
 			hw_pass_through = 0;
-#ifdef CONFIG_INTEL_IOMMU_SVM
-		if (pasid_supported(iommu))
-			intel_svm_init(iommu);
-#endif
+		intel_svm_check(iommu);
 	}
 
 	/*
@@ -3312,9 +3419,6 @@ static int __init init_dmars(void)
 		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
 	}
 
-	if (iommu_default_passthrough())
-		iommu_identity_mapping |= IDENTMAP_ALL;
-
 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
 	dmar_map_gfx = 0;
 #endif
@@ -3387,8 +3491,21 @@ static unsigned long intel_alloc_iova(struct device *dev,
 {
 	unsigned long iova_pfn;
 
-	/* Restrict dma_mask to the width that the iommu can handle */
-	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+	/*
+	 * Restrict dma_mask to the width that the iommu can handle.
+	 * First-level translation restricts the input-address to a
+	 * canonical address (i.e., address bits 63:N have the same
+	 * value as address bit [N-1], where N is 48-bits with 4-level
+	 * paging and 57-bits with 5-level paging). Hence, skip bit
+	 * [N-1].
+	 */
+	if (domain_use_first_level(domain))
+		dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1),
+				 dma_mask);
+	else
+		dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw),
+				 dma_mask);
+
 	/* Ensure we reserve the whole size-aligned region */
 	nrpages = __roundup_pow_of_two(nrpages);
 
@@ -3406,7 +3523,8 @@ static unsigned long intel_alloc_iova(struct device *dev,
 	iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
 				   IOVA_PFN(dma_mask), true);
 	if (unlikely(!iova_pfn)) {
-		dev_err(dev, "Allocating %ld-page iova failed", nrpages);
+		dev_err_once(dev, "Allocating %ld-page iova failed\n",
+			     nrpages);
 		return 0;
 	}
 
@@ -3774,8 +3892,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
 		return 0;
 	}
 
-	trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
-		     sg_phys(sglist), size << VTD_PAGE_SHIFT);
+	for_each_sg(sglist, sg, nelems, i)
+		trace_map_sg(dev, i + 1, nelems, sg);
 
 	return nelems;
 }
@@ -3987,6 +4105,9 @@ bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
 		sg_dma_len(sg) = sg->length;
 	}
 
+	for_each_sg(sglist, sg, nelems, i)
+		trace_bounce_map_sg(dev, i + 1, nelems, sg);
+
 	return nelems;
 
 out_unmap:
@@ -4315,16 +4436,31 @@ static void __init init_iommu_pm_ops(void)
 static inline void init_iommu_pm_ops(void) {}
 #endif	/* CONFIG_PM */
 
+static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
+{
+	if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
+	    !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
+	    rmrr->end_address <= rmrr->base_address ||
+	    arch_rmrr_sanity_check(rmrr))
+		return -EINVAL;
+
+	return 0;
+}
+
 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
 {
 	struct acpi_dmar_reserved_memory *rmrr;
 	struct dmar_rmrr_unit *rmrru;
-	int ret;
 
 	rmrr = (struct acpi_dmar_reserved_memory *)header;
-	ret = arch_rmrr_sanity_check(rmrr);
-	if (ret)
-		return ret;
+	if (rmrr_sanity_check(rmrr))
+		WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+			   "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
+			   "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+			   rmrr->base_address, rmrr->end_address,
+			   dmi_get_system_info(DMI_BIOS_VENDOR),
+			   dmi_get_system_info(DMI_BIOS_VERSION),
+			   dmi_get_system_info(DMI_PRODUCT_VERSION));
 
 	rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
 	if (!rmrru)
@@ -4470,7 +4606,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
 			iommu->name);
 		return -ENXIO;
 	}
-	sp = domain_update_iommu_superpage(iommu) - 1;
+	sp = domain_update_iommu_superpage(NULL, iommu) - 1;
 	if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
 		pr_warn("%s: Doesn't support large page.\n",
 			iommu->name);
@@ -4490,10 +4626,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
 	if (ret)
 		goto out;
 
-#ifdef CONFIG_INTEL_IOMMU_SVM
-	if (pasid_supported(iommu))
-		intel_svm_init(iommu);
-#endif
+	intel_svm_check(iommu);
 
 	if (dmaru->ignored) {
 		/*
@@ -4898,7 +5031,7 @@ static int __init platform_optin_force_iommu(void)
 	 * map for all devices except those marked as being untrusted.
 	 */
 	if (dmar_disabled)
-		iommu_identity_mapping |= IDENTMAP_ALL;
+		iommu_set_default_passthrough(false);
 
 	dmar_disabled = 0;
 	no_iommu = 0;
@@ -5198,6 +5331,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 {
 	struct dmar_domain *dmar_domain;
 	struct iommu_domain *domain;
+	int ret;
 
 	switch (type) {
 	case IOMMU_DOMAIN_DMA:
@@ -5214,11 +5348,12 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 			return NULL;
 		}
 
-		if (type == IOMMU_DOMAIN_DMA &&
-		    init_iova_flush_queue(&dmar_domain->iovad,
-					  iommu_flush_iova, iova_entry_free)) {
-			pr_warn("iova flush queue initialization failed\n");
-			intel_iommu_strict = 1;
+		if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA) {
+			ret = init_iova_flush_queue(&dmar_domain->iovad,
+						    iommu_flush_iova,
+						    iova_entry_free);
+			if (ret)
+				pr_info("iova flush queue initialization failed\n");
 		}
 
 		domain_update_iommu_cap(dmar_domain);
@@ -5284,7 +5419,7 @@ static void auxiliary_unlink_device(struct dmar_domain *domain,
 	domain->auxd_refcnt--;
 
 	if (!domain->auxd_refcnt && domain->default_pasid > 0)
-		intel_pasid_free_id(domain->default_pasid);
+		ioasid_free(domain->default_pasid);
 }
 
 static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -5302,10 +5437,11 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
 	if (domain->default_pasid <= 0) {
 		int pasid;
 
-		pasid = intel_pasid_alloc_id(domain, PASID_MIN,
-					     pci_max_pasids(to_pci_dev(dev)),
-					     GFP_KERNEL);
-		if (pasid <= 0) {
+		/* No private data needed for the default pasid */
+		pasid = ioasid_alloc(NULL, PASID_MIN,
+				     pci_max_pasids(to_pci_dev(dev)) - 1,
+				     NULL);
+		if (pasid == INVALID_IOASID) {
 			pr_err("Can't allocate default pasid\n");
 			return -ENODEV;
 		}
@@ -5323,8 +5459,12 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
 		goto attach_failed;
 
 	/* Setup the PASID entry for mediated devices: */
-	ret = intel_pasid_setup_second_level(iommu, domain, dev,
-					     domain->default_pasid);
+	if (domain_use_first_level(domain))
+		ret = domain_setup_first_level(iommu, domain, dev,
+					       domain->default_pasid);
+	else
+		ret = intel_pasid_setup_second_level(iommu, domain, dev,
+						     domain->default_pasid);
 	if (ret)
 		goto table_failed;
 	spin_unlock(&iommu->lock);
@@ -5341,7 +5481,7 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
 	spin_unlock(&iommu->lock);
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 	if (!domain->auxd_refcnt && domain->default_pasid > 0)
-		intel_pasid_free_id(domain->default_pasid);
+		ioasid_free(domain->default_pasid);
 
 	return ret;
 }
@@ -5595,6 +5735,24 @@ static inline bool iommu_pasid_support(void)
 	return ret;
 }
 
+static inline bool nested_mode_support(void)
+{
+	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
+	bool ret = true;
+
+	rcu_read_lock();
+	for_each_active_iommu(iommu, drhd) {
+		if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
+			ret = false;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
 static bool intel_iommu_capable(enum iommu_cap cap)
 {
 	if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5752,15 +5910,6 @@ static void intel_iommu_get_resv_regions(struct device *device,
 	list_add_tail(&reg->list, head);
 }
 
-static void intel_iommu_put_resv_regions(struct device *dev,
-					 struct list_head *head)
-{
-	struct iommu_resv_region *entry, *next;
-
-	list_for_each_entry_safe(entry, next, head, list)
-		kfree(entry);
-}
-
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
 {
 	struct device_domain_info *info;
@@ -5987,10 +6136,42 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
 	return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
 }
 
+static int
+intel_iommu_domain_set_attr(struct iommu_domain *domain,
+			    enum iommu_attr attr, void *data)
+{
+	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+	unsigned long flags;
+	int ret = 0;
+
+	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+		return -EINVAL;
+
+	switch (attr) {
+	case DOMAIN_ATTR_NESTING:
+		spin_lock_irqsave(&device_domain_lock, flags);
+		if (nested_mode_support() &&
+		    list_empty(&dmar_domain->devices)) {
+			dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
+			dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
+		} else {
+			ret = -ENODEV;
+		}
+		spin_unlock_irqrestore(&device_domain_lock, flags);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
 const struct iommu_ops intel_iommu_ops = {
 	.capable		= intel_iommu_capable,
 	.domain_alloc		= intel_iommu_domain_alloc,
 	.domain_free		= intel_iommu_domain_free,
+	.domain_set_attr	= intel_iommu_domain_set_attr,
 	.attach_dev		= intel_iommu_attach_device,
 	.detach_dev		= intel_iommu_detach_device,
 	.aux_attach_dev		= intel_iommu_aux_attach_device,
@@ -6002,7 +6183,7 @@ const struct iommu_ops intel_iommu_ops = {
 	.add_device		= intel_iommu_add_device,
 	.remove_device		= intel_iommu_remove_device,
 	.get_resv_regions	= intel_iommu_get_resv_regions,
-	.put_resv_regions	= intel_iommu_put_resv_regions,
+	.put_resv_regions	= generic_iommu_put_resv_regions,
 	.apply_resv_region	= intel_iommu_apply_resv_region,
 	.device_group		= intel_iommu_device_group,
 	.dev_has_feat		= intel_iommu_dev_has_feat,
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 040a445..22b30f1 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -26,42 +26,6 @@
  */
 static DEFINE_SPINLOCK(pasid_lock);
 u32 intel_pasid_max_id = PASID_MAX;
-static DEFINE_IDR(pasid_idr);
-
-int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
-{
-	int ret, min, max;
-
-	min = max_t(int, start, PASID_MIN);
-	max = min_t(int, end, intel_pasid_max_id);
-
-	WARN_ON(in_interrupt());
-	idr_preload(gfp);
-	spin_lock(&pasid_lock);
-	ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
-	spin_unlock(&pasid_lock);
-	idr_preload_end();
-
-	return ret;
-}
-
-void intel_pasid_free_id(int pasid)
-{
-	spin_lock(&pasid_lock);
-	idr_remove(&pasid_idr, pasid);
-	spin_unlock(&pasid_lock);
-}
-
-void *intel_pasid_lookup_id(int pasid)
-{
-	void *p;
-
-	spin_lock(&pasid_lock);
-	p = idr_find(&pasid_idr, pasid);
-	spin_unlock(&pasid_lock);
-
-	return p;
-}
 
 /*
  * Per device pasid table management:
@@ -465,6 +429,21 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
 		devtlb_invalidation_with_pasid(iommu, dev, pasid);
 }
 
+static void pasid_flush_caches(struct intel_iommu *iommu,
+				struct pasid_entry *pte,
+				int pasid, u16 did)
+{
+	if (!ecap_coherent(iommu->ecap))
+		clflush_cache_range(pte, sizeof(*pte));
+
+	if (cap_caching_mode(iommu->cap)) {
+		pasid_cache_invalidation_with_pasid(iommu, did, pasid);
+		iotlb_invalidation_with_pasid(iommu, did, pasid);
+	} else {
+		iommu_flush_write_buffer(iommu);
+	}
+}
+
 /*
  * Set up the scalable mode pasid table entry for first only
  * translation type.
@@ -498,10 +477,15 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
 		pasid_set_sre(pte);
 	}
 
-#ifdef CONFIG_X86
-	if (cpu_feature_enabled(X86_FEATURE_LA57))
-		pasid_set_flpm(pte, 1);
-#endif /* CONFIG_X86 */
+	if (flags & PASID_FLAG_FL5LP) {
+		if (cap_5lp_support(iommu->cap)) {
+			pasid_set_flpm(pte, 1);
+		} else {
+			pr_err("No 5-level paging support for first-level\n");
+			pasid_clear_entry(pte);
+			return -EINVAL;
+		}
+	}
 
 	pasid_set_domain_id(pte, did);
 	pasid_set_address_width(pte, iommu->agaw);
@@ -510,16 +494,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
 	/* Setup Present and PASID Granular Transfer Type: */
 	pasid_set_translation_type(pte, 1);
 	pasid_set_present(pte);
-
-	if (!ecap_coherent(iommu->ecap))
-		clflush_cache_range(pte, sizeof(*pte));
-
-	if (cap_caching_mode(iommu->cap)) {
-		pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-		iotlb_invalidation_with_pasid(iommu, did, pasid);
-	} else {
-		iommu_flush_write_buffer(iommu);
-	}
+	pasid_flush_caches(iommu, pte, pasid, did);
 
 	return 0;
 }
@@ -583,16 +558,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
 	 */
 	pasid_set_sre(pte);
 	pasid_set_present(pte);
-
-	if (!ecap_coherent(iommu->ecap))
-		clflush_cache_range(pte, sizeof(*pte));
-
-	if (cap_caching_mode(iommu->cap)) {
-		pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-		iotlb_invalidation_with_pasid(iommu, did, pasid);
-	} else {
-		iommu_flush_write_buffer(iommu);
-	}
+	pasid_flush_caches(iommu, pte, pasid, did);
 
 	return 0;
 }
@@ -626,16 +592,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
 	 */
 	pasid_set_sre(pte);
 	pasid_set_present(pte);
-
-	if (!ecap_coherent(iommu->ecap))
-		clflush_cache_range(pte, sizeof(*pte));
-
-	if (cap_caching_mode(iommu->cap)) {
-		pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-		iotlb_invalidation_with_pasid(iommu, did, pasid);
-	} else {
-		iommu_flush_write_buffer(iommu);
-	}
+	pasid_flush_caches(iommu, pte, pasid, did);
 
 	return 0;
 }
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index fc8cd8f..92de6df 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -37,6 +37,12 @@
  */
 #define PASID_FLAG_SUPERVISOR_MODE	BIT(0)
 
+/*
+ * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
+ * level translation, otherwise, 4-level paging will be used.
+ */
+#define PASID_FLAG_FL5LP		BIT(1)
+
 struct pasid_dir_entry {
 	u64 val;
 };
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index dca88f9..d7f2a53 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -17,25 +17,13 @@
 #include <linux/dmar.h>
 #include <linux/interrupt.h>
 #include <linux/mm_types.h>
+#include <linux/ioasid.h>
 #include <asm/page.h>
 
 #include "intel-pasid.h"
 
 static irqreturn_t prq_event_thread(int irq, void *d);
 
-int intel_svm_init(struct intel_iommu *iommu)
-{
-	if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
-			!cap_fl1gp_support(iommu->cap))
-		return -EINVAL;
-
-	if (cpu_feature_enabled(X86_FEATURE_LA57) &&
-			!cap_5lp_support(iommu->cap))
-		return -EINVAL;
-
-	return 0;
-}
-
 #define PRQ_ORDER 0
 
 int intel_svm_enable_prq(struct intel_iommu *iommu)
@@ -99,6 +87,33 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
 	return 0;
 }
 
+static inline bool intel_svm_capable(struct intel_iommu *iommu)
+{
+	return iommu->flags & VTD_FLAG_SVM_CAPABLE;
+}
+
+void intel_svm_check(struct intel_iommu *iommu)
+{
+	if (!pasid_supported(iommu))
+		return;
+
+	if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
+	    !cap_fl1gp_support(iommu->cap)) {
+		pr_err("%s SVM disabled, incompatible 1GB page capability\n",
+		       iommu->name);
+		return;
+	}
+
+	if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+	    !cap_5lp_support(iommu->cap)) {
+		pr_err("%s SVM disabled, incompatible paging mode\n",
+		       iommu->name);
+		return;
+	}
+
+	iommu->flags |= VTD_FLAG_SVM_CAPABLE;
+}
+
 static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
 				unsigned long address, unsigned long pages, int ih)
 {
@@ -207,6 +222,10 @@ static const struct mmu_notifier_ops intel_mmuops = {
 static DEFINE_MUTEX(pasid_mutex);
 static LIST_HEAD(global_svm_list);
 
+#define for_each_svm_dev(sdev, svm, d)			\
+	list_for_each_entry((sdev), &(svm)->devs, list)	\
+		if ((d) != (sdev)->dev) {} else
+
 int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
 {
 	struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
@@ -220,6 +239,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
 	if (!iommu || dmar_disabled)
 		return -EINVAL;
 
+	if (!intel_svm_capable(iommu))
+		return -ENOTSUPP;
+
 	if (dev_is_pci(dev)) {
 		pasid_max = pci_max_pasids(to_pci_dev(dev));
 		if (pasid_max < 0)
@@ -252,15 +274,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
 				goto out;
 			}
 
-			list_for_each_entry(sdev, &svm->devs, list) {
-				if (dev == sdev->dev) {
-					if (sdev->ops != ops) {
-						ret = -EBUSY;
-						goto out;
-					}
-					sdev->users++;
-					goto success;
+			/* Find the matching device in svm list */
+			for_each_svm_dev(sdev, svm, dev) {
+				if (sdev->ops != ops) {
+					ret = -EBUSY;
+					goto out;
 				}
+				sdev->users++;
+				goto success;
 			}
 
 			break;
@@ -314,16 +335,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
 		if (pasid_max > intel_pasid_max_id)
 			pasid_max = intel_pasid_max_id;
 
-		/* Do not use PASID 0 in caching mode (virtualised IOMMU) */
-		ret = intel_pasid_alloc_id(svm,
-					   !!cap_caching_mode(iommu->cap),
-					   pasid_max - 1, GFP_KERNEL);
-		if (ret < 0) {
+		/* Do not use PASID 0, reserved for RID to PASID */
+		svm->pasid = ioasid_alloc(NULL, PASID_MIN,
+					  pasid_max - 1, svm);
+		if (svm->pasid == INVALID_IOASID) {
 			kfree(svm);
 			kfree(sdev);
+			ret = -ENOSPC;
 			goto out;
 		}
-		svm->pasid = ret;
 		svm->notifier.ops = &intel_mmuops;
 		svm->mm = mm;
 		svm->flags = flags;
@@ -333,7 +353,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
 		if (mm) {
 			ret = mmu_notifier_register(&svm->notifier, mm);
 			if (ret) {
-				intel_pasid_free_id(svm->pasid);
+				ioasid_free(svm->pasid);
 				kfree(svm);
 				kfree(sdev);
 				goto out;
@@ -344,12 +364,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
 		ret = intel_pasid_setup_first_level(iommu, dev,
 				mm ? mm->pgd : init_mm.pgd,
 				svm->pasid, FLPT_DEFAULT_DID,
-				mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+				(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+				(cpu_feature_enabled(X86_FEATURE_LA57) ?
+				 PASID_FLAG_FL5LP : 0));
 		spin_unlock(&iommu->lock);
 		if (ret) {
 			if (mm)
 				mmu_notifier_unregister(&svm->notifier, mm);
-			intel_pasid_free_id(svm->pasid);
+			ioasid_free(svm->pasid);
 			kfree(svm);
 			kfree(sdev);
 			goto out;
@@ -365,7 +387,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
 		ret = intel_pasid_setup_first_level(iommu, dev,
 						mm ? mm->pgd : init_mm.pgd,
 						svm->pasid, FLPT_DEFAULT_DID,
-						mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+						(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+						(cpu_feature_enabled(X86_FEATURE_LA57) ?
+						PASID_FLAG_FL5LP : 0));
 		spin_unlock(&iommu->lock);
 		if (ret) {
 			kfree(sdev);
@@ -397,44 +421,45 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
 	if (!iommu)
 		goto out;
 
-	svm = intel_pasid_lookup_id(pasid);
+	svm = ioasid_find(NULL, pasid, NULL);
 	if (!svm)
 		goto out;
 
-	list_for_each_entry(sdev, &svm->devs, list) {
-		if (dev == sdev->dev) {
-			ret = 0;
-			sdev->users--;
-			if (!sdev->users) {
-				list_del_rcu(&sdev->list);
-				/* Flush the PASID cache and IOTLB for this device.
-				 * Note that we do depend on the hardware *not* using
-				 * the PASID any more. Just as we depend on other
-				 * devices never using PASIDs that they have no right
-				 * to use. We have a *shared* PASID table, because it's
-				 * large and has to be physically contiguous. So it's
-				 * hard to be as defensive as we might like. */
-				intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
-				intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
-				kfree_rcu(sdev, rcu);
+	if (IS_ERR(svm)) {
+		ret = PTR_ERR(svm);
+		goto out;
+	}
 
-				if (list_empty(&svm->devs)) {
-					intel_pasid_free_id(svm->pasid);
-					if (svm->mm)
-						mmu_notifier_unregister(&svm->notifier, svm->mm);
+	for_each_svm_dev(sdev, svm, dev) {
+		ret = 0;
+		sdev->users--;
+		if (!sdev->users) {
+			list_del_rcu(&sdev->list);
+			/* Flush the PASID cache and IOTLB for this device.
+			 * Note that we do depend on the hardware *not* using
+			 * the PASID any more. Just as we depend on other
+			 * devices never using PASIDs that they have no right
+			 * to use. We have a *shared* PASID table, because it's
+			 * large and has to be physically contiguous. So it's
+			 * hard to be as defensive as we might like. */
+			intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
+			intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+			kfree_rcu(sdev, rcu);
 
-					list_del(&svm->list);
-
-					/* We mandate that no page faults may be outstanding
-					 * for the PASID when intel_svm_unbind_mm() is called.
-					 * If that is not obeyed, subtle errors will happen.
-					 * Let's make them less subtle... */
-					memset(svm, 0x6b, sizeof(*svm));
-					kfree(svm);
-				}
+			if (list_empty(&svm->devs)) {
+				ioasid_free(svm->pasid);
+				if (svm->mm)
+					mmu_notifier_unregister(&svm->notifier, svm->mm);
+				list_del(&svm->list);
+				/* We mandate that no page faults may be outstanding
+				 * for the PASID when intel_svm_unbind_mm() is called.
+				 * If that is not obeyed, subtle errors will happen.
+				 * Let's make them less subtle... */
+				memset(svm, 0x6b, sizeof(*svm));
+				kfree(svm);
 			}
-			break;
 		}
+		break;
 	}
  out:
 	mutex_unlock(&pasid_mutex);
@@ -454,10 +479,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
 	if (!iommu)
 		goto out;
 
-	svm = intel_pasid_lookup_id(pasid);
+	svm = ioasid_find(NULL, pasid, NULL);
 	if (!svm)
 		goto out;
 
+	if (IS_ERR(svm)) {
+		ret = PTR_ERR(svm);
+		goto out;
+	}
 	/* init_mm is used in this case */
 	if (!svm->mm)
 		ret = 1;
@@ -564,13 +593,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
 
 		if (!svm || svm->pasid != req->pasid) {
 			rcu_read_lock();
-			svm = intel_pasid_lookup_id(req->pasid);
+			svm = ioasid_find(NULL, req->pasid, NULL);
 			/* It *can't* go away, because the driver is not permitted
 			 * to unbind the mm while any page faults are outstanding.
 			 * So we only need RCU to protect the internal idr code. */
 			rcu_read_unlock();
-
-			if (!svm) {
+			if (IS_ERR_OR_NULL(svm)) {
 				pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
 				       iommu->name, req->pasid, ((unsigned long long *)req)[0],
 				       ((unsigned long long *)req)[1]);
@@ -654,11 +682,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
 			if (req->priv_data_present)
 				memcpy(&resp.qw2, req->priv_data,
 				       sizeof(req->priv_data));
+			resp.qw2 = 0;
+			resp.qw3 = 0;
+			qi_submit_sync(&resp, iommu);
 		}
-		resp.qw2 = 0;
-		resp.qw3 = 0;
-		qi_submit_sync(&resp, iommu);
-
 		head = (head + sizeof(*req)) & PRQ_RING_MASK;
 	}
 
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 7c3bd2c..4272fe4 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -149,8 +149,6 @@
 #define ARM_V7S_TTBR_IRGN_ATTR(attr)					\
 	((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
 
-#define ARM_V7S_TCR_PD1			BIT(5)
-
 #ifdef CONFIG_ZONE_DMA32
 #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
 #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
@@ -798,8 +796,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
 	 */
 	cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
 
-	/* TCR: T0SZ=0, disable TTBR1 */
-	cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1;
+	/* TCR: T0SZ=0, EAE=0 (if applicable) */
+	cfg->arm_v7s_cfg.tcr = 0;
 
 	/*
 	 * TEX remap: the indices used map to the closest equivalent types
@@ -822,15 +820,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
 	/* Ensure the empty pgd is visible before any actual TTBR write */
 	wmb();
 
-	/* TTBRs */
-	cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
-				   ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
-				   (cfg->coherent_walk ?
-				   (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
-				    ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
-				   (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
-				    ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
-	cfg->arm_v7s_cfg.ttbr[1] = 0;
+	/* TTBR */
+	cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
+				(cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
+				 ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+				 ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+				(ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+				 ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
 	return &data->iop;
 
 out_free_data:
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index bdf47f7..983b084 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -100,40 +100,29 @@
 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
 
 /* Register bits */
-#define ARM_32_LPAE_TCR_EAE		(1 << 31)
-#define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
+#define ARM_LPAE_TCR_TG0_4K		0
+#define ARM_LPAE_TCR_TG0_64K		1
+#define ARM_LPAE_TCR_TG0_16K		2
 
-#define ARM_LPAE_TCR_EPD1		(1 << 23)
+#define ARM_LPAE_TCR_TG1_16K		1
+#define ARM_LPAE_TCR_TG1_4K		2
+#define ARM_LPAE_TCR_TG1_64K		3
 
-#define ARM_LPAE_TCR_TG0_4K		(0 << 14)
-#define ARM_LPAE_TCR_TG0_64K		(1 << 14)
-#define ARM_LPAE_TCR_TG0_16K		(2 << 14)
-
-#define ARM_LPAE_TCR_SH0_SHIFT		12
-#define ARM_LPAE_TCR_SH0_MASK		0x3
 #define ARM_LPAE_TCR_SH_NS		0
 #define ARM_LPAE_TCR_SH_OS		2
 #define ARM_LPAE_TCR_SH_IS		3
 
-#define ARM_LPAE_TCR_ORGN0_SHIFT	10
-#define ARM_LPAE_TCR_IRGN0_SHIFT	8
-#define ARM_LPAE_TCR_RGN_MASK		0x3
 #define ARM_LPAE_TCR_RGN_NC		0
 #define ARM_LPAE_TCR_RGN_WBWA		1
 #define ARM_LPAE_TCR_RGN_WT		2
 #define ARM_LPAE_TCR_RGN_WB		3
 
-#define ARM_LPAE_TCR_SL0_SHIFT		6
-#define ARM_LPAE_TCR_SL0_MASK		0x3
+#define ARM_LPAE_VTCR_SL0_MASK		0x3
 
 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
-#define ARM_LPAE_TCR_SZ_MASK		0xf
 
-#define ARM_LPAE_TCR_PS_SHIFT		16
-#define ARM_LPAE_TCR_PS_MASK		0x7
-
-#define ARM_LPAE_TCR_IPS_SHIFT		32
-#define ARM_LPAE_TCR_IPS_MASK		0x7
+#define ARM_LPAE_VTCR_PS_SHIFT		16
+#define ARM_LPAE_VTCR_PS_MASK		0x7
 
 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
@@ -293,17 +282,11 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 {
 	arm_lpae_iopte pte = prot;
 
-	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
-		pte |= ARM_LPAE_PTE_NS;
-
 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
 	else
 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 
-	if (data->iop.fmt != ARM_MALI_LPAE)
-		pte |= ARM_LPAE_PTE_AF;
-	pte |= ARM_LPAE_PTE_SH_IS;
 	pte |= paddr_to_iopte(paddr, data);
 
 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
@@ -460,9 +443,20 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 	}
 
+	if (prot & IOMMU_CACHE)
+		pte |= ARM_LPAE_PTE_SH_IS;
+	else
+		pte |= ARM_LPAE_PTE_SH_OS;
+
 	if (prot & IOMMU_NOEXEC)
 		pte |= ARM_LPAE_PTE_XN;
 
+	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+		pte |= ARM_LPAE_PTE_NS;
+
+	if (data->iop.fmt != ARM_MALI_LPAE)
+		pte |= ARM_LPAE_PTE_AF;
+
 	return pte;
 }
 
@@ -474,6 +468,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
 	arm_lpae_iopte *ptep = data->pgd;
 	int ret, lvl = data->start_level;
 	arm_lpae_iopte prot;
+	long iaext = (long)iova >> cfg->ias;
 
 	/* If no access, then nothing to do */
 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
@@ -482,7 +477,9 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
 	if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
 		return -EINVAL;
 
-	if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+		iaext = ~iaext;
+	if (WARN_ON(iaext || paddr >> cfg->oas))
 		return -ERANGE;
 
 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -648,11 +645,14 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 	arm_lpae_iopte *ptep = data->pgd;
+	long iaext = (long)iova >> cfg->ias;
 
 	if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
 		return 0;
 
-	if (WARN_ON(iova >> data->iop.cfg.ias))
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+		iaext = ~iaext;
+	if (WARN_ON(iaext))
 		return 0;
 
 	return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
@@ -787,9 +787,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 {
 	u64 reg;
 	struct arm_lpae_io_pgtable *data;
+	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
+	bool tg1;
 
 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
-			    IO_PGTABLE_QUIRK_NON_STRICT))
+			    IO_PGTABLE_QUIRK_NON_STRICT |
+			    IO_PGTABLE_QUIRK_ARM_TTBR1))
 		return NULL;
 
 	data = arm_lpae_alloc_pgtable(cfg);
@@ -798,58 +801,55 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 
 	/* TCR */
 	if (cfg->coherent_walk) {
-		reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
-		      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
-		      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+		tcr->sh = ARM_LPAE_TCR_SH_IS;
+		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
+		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 	} else {
-		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
-		      (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
-		      (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
+		tcr->sh = ARM_LPAE_TCR_SH_OS;
+		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
+		tcr->orgn = ARM_LPAE_TCR_RGN_NC;
 	}
 
+	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
 	switch (ARM_LPAE_GRANULE(data)) {
 	case SZ_4K:
-		reg |= ARM_LPAE_TCR_TG0_4K;
+		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
 		break;
 	case SZ_16K:
-		reg |= ARM_LPAE_TCR_TG0_16K;
+		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
 		break;
 	case SZ_64K:
-		reg |= ARM_LPAE_TCR_TG0_64K;
+		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
 		break;
 	}
 
 	switch (cfg->oas) {
 	case 32:
-		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
 		break;
 	case 36:
-		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
 		break;
 	case 40:
-		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
 		break;
 	case 42:
-		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
 		break;
 	case 44:
-		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
 		break;
 	case 48:
-		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
 		break;
 	case 52:
-		reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
 		break;
 	default:
 		goto out_free_data;
 	}
 
-	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
-
-	/* Disable speculative walks through TTBR1 */
-	reg |= ARM_LPAE_TCR_EPD1;
-	cfg->arm_lpae_s1_cfg.tcr = reg;
+	tcr->tsz = 64ULL - cfg->ias;
 
 	/* MAIRs */
 	reg = (ARM_LPAE_MAIR_ATTR_NC
@@ -872,9 +872,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 	/* Ensure the empty pgd is visible before any actual TTBR write */
 	wmb();
 
-	/* TTBRs */
-	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
-	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+	/* TTBR */
+	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
 	return &data->iop;
 
 out_free_data:
@@ -885,8 +884,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 static struct io_pgtable *
 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 {
-	u64 reg, sl;
+	u64 sl;
 	struct arm_lpae_io_pgtable *data;
+	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
 
 	/* The NS quirk doesn't apply at stage 2 */
 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
@@ -911,55 +911,59 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 	}
 
 	/* VTCR */
-	reg = ARM_64_LPAE_S2_TCR_RES1 |
-	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
-	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
-	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+	if (cfg->coherent_walk) {
+		vtcr->sh = ARM_LPAE_TCR_SH_IS;
+		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
+		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
+	} else {
+		vtcr->sh = ARM_LPAE_TCR_SH_OS;
+		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
+		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
+	}
 
 	sl = data->start_level;
 
 	switch (ARM_LPAE_GRANULE(data)) {
 	case SZ_4K:
-		reg |= ARM_LPAE_TCR_TG0_4K;
+		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
 		sl++; /* SL0 format is different for 4K granule size */
 		break;
 	case SZ_16K:
-		reg |= ARM_LPAE_TCR_TG0_16K;
+		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
 		break;
 	case SZ_64K:
-		reg |= ARM_LPAE_TCR_TG0_64K;
+		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
 		break;
 	}
 
 	switch (cfg->oas) {
 	case 32:
-		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
+		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
 		break;
 	case 36:
-		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
+		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
 		break;
 	case 40:
-		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
+		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
 		break;
 	case 42:
-		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
+		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
 		break;
 	case 44:
-		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
+		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
 		break;
 	case 48:
-		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
+		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
 		break;
 	case 52:
-		reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
+		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
 		break;
 	default:
 		goto out_free_data;
 	}
 
-	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
-	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
-	cfg->arm_lpae_s2_cfg.vtcr = reg;
+	vtcr->tsz = 64ULL - cfg->ias;
+	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
 
 	/* Allocate pgd pages */
 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
@@ -982,35 +986,21 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 static struct io_pgtable *
 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 {
-	struct io_pgtable *iop;
-
 	if (cfg->ias > 32 || cfg->oas > 40)
 		return NULL;
 
 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
-	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
-	if (iop) {
-		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
-		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
-	}
-
-	return iop;
+	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
 }
 
 static struct io_pgtable *
 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 {
-	struct io_pgtable *iop;
-
 	if (cfg->ias > 40 || cfg->oas > 40)
 		return NULL;
 
 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
-	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
-	if (iop)
-		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
-
-	return iop;
+	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
 }
 
 static struct io_pgtable *
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index ced53e5..94394c8 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -63,7 +63,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
 	if (!ops)
 		return;
 
-	iop = container_of(ops, struct io_pgtable, ops);
+	iop = io_pgtable_ops_to_pgtable(ops);
 	io_pgtable_tlb_flush_all(iop);
 	io_pgtable_init_table[iop->fmt]->free(iop);
 }
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index e436ff8..9986921 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -87,6 +87,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
 	put_device(iommu->dev);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(iommu_device_sysfs_add);
 
 void iommu_device_sysfs_remove(struct iommu_device *iommu)
 {
@@ -94,6 +95,8 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu)
 	device_unregister(iommu->dev);
 	iommu->dev = NULL;
 }
+EXPORT_SYMBOL_GPL(iommu_device_sysfs_remove);
+
 /*
  * IOMMU drivers can indicate a device is managed by a given IOMMU using
  * this interface.  A link to the device will be created in the "devices"
@@ -119,6 +122,7 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(iommu_device_link);
 
 void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
 {
@@ -128,3 +132,4 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
 	sysfs_remove_link(&link->kobj, "iommu");
 	sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
 }
+EXPORT_SYMBOL_GPL(iommu_device_unlink);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3ead597..3e35284 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -22,6 +22,7 @@
 #include <linux/bitops.h>
 #include <linux/property.h>
 #include <linux/fsl/mc.h>
+#include <linux/module.h>
 #include <trace/events/iommu.h>
 
 static struct kset *iommu_group_kset;
@@ -141,6 +142,7 @@ int iommu_device_register(struct iommu_device *iommu)
 	spin_unlock(&iommu_device_lock);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(iommu_device_register);
 
 void iommu_device_unregister(struct iommu_device *iommu)
 {
@@ -148,6 +150,7 @@ void iommu_device_unregister(struct iommu_device *iommu)
 	list_del(&iommu->list);
 	spin_unlock(&iommu_device_lock);
 }
+EXPORT_SYMBOL_GPL(iommu_device_unregister);
 
 static struct iommu_param *iommu_get_dev_param(struct device *dev)
 {
@@ -183,10 +186,21 @@ int iommu_probe_device(struct device *dev)
 	if (!iommu_get_dev_param(dev))
 		return -ENOMEM;
 
+	if (!try_module_get(ops->owner)) {
+		ret = -EINVAL;
+		goto err_free_dev_param;
+	}
+
 	ret = ops->add_device(dev);
 	if (ret)
-		iommu_free_dev_param(dev);
+		goto err_module_put;
 
+	return 0;
+
+err_module_put:
+	module_put(ops->owner);
+err_free_dev_param:
+	iommu_free_dev_param(dev);
 	return ret;
 }
 
@@ -197,7 +211,10 @@ void iommu_release_device(struct device *dev)
 	if (dev->iommu_group)
 		ops->remove_device(dev);
 
-	iommu_free_dev_param(dev);
+	if (dev->iommu_param) {
+		module_put(ops->owner);
+		iommu_free_dev_param(dev);
+	}
 }
 
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -887,6 +904,7 @@ struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
 	kobject_get(group->devices_kobj);
 	return group;
 }
+EXPORT_SYMBOL_GPL(iommu_group_ref_get);
 
 /**
  * iommu_group_put - Decrement group reference
@@ -1260,6 +1278,7 @@ struct iommu_group *generic_device_group(struct device *dev)
 {
 	return iommu_group_alloc();
 }
+EXPORT_SYMBOL_GPL(generic_device_group);
 
 /*
  * Use standard PCI bus topology, isolation features, and DMA alias quirks
@@ -1327,6 +1346,7 @@ struct iommu_group *pci_device_group(struct device *dev)
 	/* No shared group found, allocate new */
 	return iommu_group_alloc();
 }
+EXPORT_SYMBOL_GPL(pci_device_group);
 
 /* Get the IOMMU group for device on fsl-mc bus */
 struct iommu_group *fsl_mc_device_group(struct device *dev)
@@ -1339,6 +1359,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
 		group = iommu_group_alloc();
 	return group;
 }
+EXPORT_SYMBOL_GPL(fsl_mc_device_group);
 
 /**
  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
@@ -1407,6 +1428,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
 	return group;
 }
+EXPORT_SYMBOL(iommu_group_get_for_dev);
 
 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
 {
@@ -1537,6 +1559,11 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
 {
 	int err;
 
+	if (ops == NULL) {
+		bus->iommu_ops = NULL;
+		return 0;
+	}
+
 	if (bus->iommu_ops != NULL)
 		return -EBUSY;
 
@@ -2230,6 +2257,25 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
 		ops->put_resv_regions(dev, list);
 }
 
+/**
+ * generic_iommu_put_resv_regions - Reserved region driver helper
+ * @dev: device for which to free reserved regions
+ * @list: reserved region list for device
+ *
+ * IOMMU drivers can use this to implement their .put_resv_regions() callback
+ * for simple reservations. Memory allocated for each reserved region will be
+ * freed. If an IOMMU driver allocates additional resources per region, it is
+ * going to have to implement a custom callback.
+ */
+void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
+{
+	struct iommu_resv_region *entry, *next;
+
+	list_for_each_entry_safe(entry, next, list, list)
+		kfree(entry);
+}
+EXPORT_SYMBOL(generic_iommu_put_resv_regions);
+
 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
 						  size_t length, int prot,
 						  enum iommu_resv_type type)
@@ -2247,6 +2293,7 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
 	region->type = type;
 	return region;
 }
+EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
 
 static int
 request_default_domain_for_dev(struct device *dev, unsigned long type)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index c7a914b..0e6a953 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
 
 struct iova *alloc_iova_mem(void)
 {
-	return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
+	return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
 }
 EXPORT_SYMBOL(alloc_iova_mem);
 
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index d02edd2..ecb3f94 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -374,7 +374,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
 	u32 tmp;
 
 	/* TTBR0 */
-	ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
+	ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
 	ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
 	ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
 
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 93f14bc..94a6df1 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -279,8 +279,8 @@ static void __program_context(void __iomem *base, int ctx,
 	SET_V2PCFG(base, ctx, 0x3);
 
 	SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
-	SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
-	SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
+	SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
+	SET_TTBR1(base, ctx, 0);
 
 	/* Set prrr and nmrr */
 	SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 6fc1f5e..95945f4 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -367,7 +367,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
 	/* Update the pgtable base address register of the M4U HW */
 	if (!data->m4u_dom) {
 		data->m4u_dom = dom;
-		writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
+		writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
 		       data->base + REG_MMU_PT_BASE_ADDR);
 	}
 
@@ -765,7 +765,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
 	writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
 	writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
 	if (m4u_dom)
-		writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
+		writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
 		       base + REG_MMU_PT_BASE_ADDR);
 	return 0;
 }
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 026ad2b..20738aac 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -8,11 +8,12 @@
 #include <linux/export.h>
 #include <linux/iommu.h>
 #include <linux/limits.h>
-#include <linux/pci.h>
+#include <linux/module.h>
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_iommu.h>
 #include <linux/of_pci.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/fsl/mc.h>
 
@@ -91,16 +92,16 @@ static int of_iommu_xlate(struct device *dev,
 {
 	const struct iommu_ops *ops;
 	struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
-	int err;
+	int ret;
 
 	ops = iommu_ops_from_fwnode(fwnode);
 	if ((ops && !ops->of_xlate) ||
 	    !of_device_is_available(iommu_spec->np))
 		return NO_IOMMU;
 
-	err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
-	if (err)
-		return err;
+	ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+	if (ret)
+		return ret;
 	/*
 	 * The otherwise-empty fwspec handily serves to indicate the specific
 	 * IOMMU device we're waiting for, which will be useful if we ever get
@@ -109,7 +110,12 @@ static int of_iommu_xlate(struct device *dev,
 	if (!ops)
 		return driver_deferred_probe_check_state(dev);
 
-	return ops->of_xlate(dev, iommu_spec);
+	if (!try_module_get(ops->owner))
+		return -ENODEV;
+
+	ret = ops->of_xlate(dev, iommu_spec);
+	module_put(ops->owner);
+	return ret;
 }
 
 struct of_pci_iommu_alias_info {
@@ -179,6 +185,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
 			.np = master_np,
 		};
 
+		pci_request_acs();
 		err = pci_for_each_dma_alias(to_pci_dev(dev),
 					     of_pci_iommu_init, &info);
 	} else if (dev_is_fsl_mc(dev)) {
@@ -196,8 +203,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
 			if (err)
 				break;
 		}
-	}
 
+		fwspec = dev_iommu_fwspec_get(dev);
+		if (!err && fwspec)
+			of_property_read_u32(master_np, "pasid-num-bits",
+					     &fwspec->num_pasid_bits);
+	}
 
 	/*
 	 * Two success conditions can be represented by non-negative err here:
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 52f3829..39759db 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -201,7 +201,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
 
 	fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
 
-	if (!(fsr & FSR_FAULT))
+	if (!(fsr & ARM_SMMU_FSR_FAULT))
 		return IRQ_NONE;
 
 	fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
@@ -215,7 +215,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
 	}
 
 	iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
-	iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
+	iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
 
 	return IRQ_HANDLED;
 }
@@ -269,18 +269,15 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
 
 		/* TTBRs */
 		iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
-				pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
-				FIELD_PREP(TTBRn_ASID, ctx->asid));
-		iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
-				pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
-				FIELD_PREP(TTBRn_ASID, ctx->asid));
+				pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
+				FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
+		iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
 
 		/* TCR */
 		iommu_writel(ctx, ARM_SMMU_CB_TCR2,
-				(pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
-				FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
+				arm_smmu_lpae_tcr2(&pgtbl_cfg));
 		iommu_writel(ctx, ARM_SMMU_CB_TCR,
-				pgtbl_cfg.arm_lpae_s1_cfg.tcr);
+			     arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
 
 		/* MAIRs (stage-1 only) */
 		iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
@@ -289,11 +286,13 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
 				pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
 
 		/* SCTLR */
-		reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
-			SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
+		reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
+		      ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
+		      ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
+		      ARM_SMMU_SCTLR_CFCFG;
 
 		if (IS_ENABLED(CONFIG_BIG_ENDIAN))
-			reg |= SCTLR_E;
+			reg |= ARM_SMMU_SCTLR_E;
 
 		iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
 
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 315c7cc4..cce329d 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -837,14 +837,6 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
 	iommu_dma_get_resv_regions(dev, head);
 }
 
-static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
-{
-	struct iommu_resv_region *entry, *next;
-
-	list_for_each_entry_safe(entry, next, head, list)
-		kfree(entry);
-}
-
 static struct iommu_ops viommu_ops;
 static struct virtio_driver virtio_iommu_drv;
 
@@ -914,7 +906,7 @@ static int viommu_add_device(struct device *dev)
 err_unlink_dev:
 	iommu_device_unlink(&viommu->iommu, dev);
 err_free_dev:
-	viommu_put_resv_regions(dev, &vdev->resv_regions);
+	generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
 	kfree(vdev);
 
 	return ret;
@@ -932,7 +924,7 @@ static void viommu_remove_device(struct device *dev)
 
 	iommu_group_remove_device(dev);
 	iommu_device_unlink(&vdev->viommu->iommu, dev);
-	viommu_put_resv_regions(dev, &vdev->resv_regions);
+	generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
 	kfree(vdev);
 }
 
@@ -961,7 +953,7 @@ static struct iommu_ops viommu_ops = {
 	.remove_device		= viommu_remove_device,
 	.device_group		= viommu_device_group,
 	.get_resv_regions	= viommu_get_resv_regions,
-	.put_resv_regions	= viommu_put_resv_regions,
+	.put_resv_regions	= generic_iommu_put_resv_regions,
 	.of_xlate		= viommu_of_xlate,
 };
 
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 1006c69..6d39773 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -438,7 +438,7 @@
 	help
 	  Say yes here to enable C-SKY SMP interrupt controller driver used
 	  for C-SKY SMP system.
-	  In fact it's not mmio map in hw and it use ld/st to visit the
+	  In fact it's not mmio map in hardware and it uses ld/st to visit the
 	  controller's register inside CPU.
 
 config CSKY_APB_INTC
@@ -446,7 +446,7 @@
 	depends on CSKY
 	help
 	  Say yes here to enable C-SKY APB interrupt controller driver used
-	  by C-SKY single core SOC system. It use mmio map apb-bus to visit
+	  by C-SKY single core SOC system. It uses mmio map apb-bus to visit
 	  the controller's register.
 
 config IMX_IRQSTEER
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index f717586..1ee95f5 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -661,7 +661,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
 						   struct its_cmd_desc *desc)
 {
 	its_encode_cmd(cmd, GITS_CMD_INVALL);
-	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+	its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
 
 	its_fixup_cmd(cmd);
 
@@ -2376,6 +2376,8 @@ static u64 inherit_vpe_l1_table_from_its(void)
 			continue;
 
 		/* We have a winner! */
+		gic_data_rdist()->vpe_l1_base = its->tables[2].base;
+
 		val  = GICR_VPROPBASER_4_1_VALID;
 		if (baser & GITS_BASER_INDIRECT)
 			val |= GICR_VPROPBASER_4_1_INDIRECT;
@@ -2413,14 +2415,12 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
 
 	for_each_possible_cpu(cpu) {
 		void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
-		u32 tmp;
 
 		if (!base || cpu == smp_processor_id())
 			continue;
 
 		val = gic_read_typer(base + GICR_TYPER);
-		tmp = compute_common_aff(val);
-		if (tmp != aff)
+		if (aff != compute_common_aff(val))
 			continue;
 
 		/*
@@ -2429,9 +2429,10 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
 		 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
 		 * Make sure we don't write the Z bit in that case.
 		 */
-		val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+		val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
 		val &= ~GICR_VPROPBASER_4_1_Z;
 
+		gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
 		*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
 
 		return val;
@@ -2440,6 +2441,72 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
 	return 0;
 }
 
+static bool allocate_vpe_l2_table(int cpu, u32 id)
+{
+	void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
+	u64 val, gpsz, npg;
+	unsigned int psz, esz, idx;
+	struct page *page;
+	__le64 *table;
+
+	if (!gic_rdists->has_rvpeid)
+		return true;
+
+	val  = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+
+	esz  = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
+	gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
+	npg  = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
+
+	switch (gpsz) {
+	default:
+		WARN_ON(1);
+		/* fall through */
+	case GIC_PAGE_SIZE_4K:
+		psz = SZ_4K;
+		break;
+	case GIC_PAGE_SIZE_16K:
+		psz = SZ_16K;
+		break;
+	case GIC_PAGE_SIZE_64K:
+		psz = SZ_64K;
+		break;
+	}
+
+	/* Don't allow vpe_id that exceeds single, flat table limit */
+	if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
+		return (id < (npg * psz / (esz * SZ_8)));
+
+	/* Compute 1st level table index & check if that exceeds table limit */
+	idx = id >> ilog2(psz / (esz * SZ_8));
+	if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
+		return false;
+
+	table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
+
+	/* Allocate memory for 2nd level table */
+	if (!table[idx]) {
+		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
+		if (!page)
+			return false;
+
+		/* Flush Lvl2 table to PoC if hw doesn't support coherency */
+		if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
+			gic_flush_dcache_to_poc(page_address(page), psz);
+
+		table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+		/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+		if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
+			gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+		/* Ensure updated table contents are visible to RD hardware */
+		dsb(sy);
+	}
+
+	return true;
+}
+
 static int allocate_vpe_l1_table(void)
 {
 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
@@ -2457,8 +2524,8 @@ static int allocate_vpe_l1_table(void)
 	 * effect of making sure no doorbell will be generated and we can
 	 * then safely clear VPROPBASER.Valid.
 	 */
-	if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
-		gits_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+	if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
+		gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
 				      vlpi_base + GICR_VPENDBASER);
 
 	/*
@@ -2481,8 +2548,8 @@ static int allocate_vpe_l1_table(void)
 
 	/* First probe the page size */
 	val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
-	gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
-	val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
+	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+	val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
 	gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
 	esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
 
@@ -2531,7 +2598,7 @@ static int allocate_vpe_l1_table(void)
 		npg = 1;
 	}
 
-	val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg);
+	val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
 
 	/* Right, that's the number of CPU pages we need for L1 */
 	np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
@@ -2542,7 +2609,7 @@ static int allocate_vpe_l1_table(void)
 	if (!page)
 		return -ENOMEM;
 
-	gic_data_rdist()->vpe_l1_page = page;
+	gic_data_rdist()->vpe_l1_base = page_address(page);
 	pa = virt_to_phys(page_address(page));
 	WARN_ON(!IS_ALIGNED(pa, psz));
 
@@ -2553,7 +2620,7 @@ static int allocate_vpe_l1_table(void)
 	val |= GICR_VPROPBASER_4_1_VALID;
 
 out:
-	gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
 	cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
 
 	pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
@@ -2660,14 +2727,14 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
 	bool clean;
 	u64 val;
 
-	val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+	val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
 	val &= ~GICR_VPENDBASER_Valid;
 	val &= ~clr;
 	val |= set;
-	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
 
 	do {
-		val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+		val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
 		clean = !(val & GICR_VPENDBASER_Dirty);
 		if (!clean) {
 			count--;
@@ -2782,7 +2849,7 @@ static void its_cpu_init_lpis(void)
 		val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
 		pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
 			smp_processor_id(), val);
-		gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+		gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
 
 		/*
 		 * Also clear Valid bit of GICR_VPENDBASER, in case some
@@ -2790,7 +2857,6 @@ static void its_cpu_init_lpis(void)
 		 * corrupting memory.
 		 */
 		val = its_clear_vpend_valid(vlpi_base, 0, 0);
-		WARN_ON(val & GICR_VPENDBASER_Dirty);
 	}
 
 	if (allocate_vpe_l1_table()) {
@@ -2954,6 +3020,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
 static bool its_alloc_vpe_table(u32 vpe_id)
 {
 	struct its_node *its;
+	int cpu;
 
 	/*
 	 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
@@ -2976,6 +3043,19 @@ static bool its_alloc_vpe_table(u32 vpe_id)
 			return false;
 	}
 
+	/* Non v4.1? No need to iterate RDs and go back early. */
+	if (!gic_rdists->has_rvpeid)
+		return true;
+
+	/*
+	 * Make sure the L2 tables are allocated for all copies of
+	 * the L1 table on *all* v4.1 RDs.
+	 */
+	for_each_possible_cpu(cpu) {
+		if (!allocate_vpe_l2_table(cpu, vpe_id))
+			return false;
+	}
+
 	return true;
 }
 
@@ -3443,7 +3523,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
 	val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
 	val |= GICR_VPROPBASER_RaWb;
 	val |= GICR_VPROPBASER_InnerShareable;
-	gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+	gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
 
 	val  = virt_to_phys(page_address(vpe->vpt_page)) &
 		GENMASK_ULL(51, 16);
@@ -3461,7 +3541,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
 	val |= GICR_VPENDBASER_PendingLast;
 	val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
 	val |= GICR_VPENDBASER_Valid;
-	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
 }
 
 static void its_vpe_deschedule(struct its_vpe *vpe)
@@ -3661,7 +3741,7 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
 	val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
 	val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
 
-	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
 }
 
 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 286f982..c1f7af9 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1839,6 +1839,7 @@ static struct
 	struct redist_region *redist_regs;
 	u32 nr_redist_regions;
 	bool single_redist;
+	int enabled_rdists;
 	u32 maint_irq;
 	int maint_irq_mode;
 	phys_addr_t vcpu_base;
@@ -1933,8 +1934,10 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
 	 * If GICC is enabled and has valid gicr base address, then it means
 	 * GICR base is presented via GICC
 	 */
-	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
+	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
+		acpi_data.enabled_rdists++;
 		return 0;
+	}
 
 	/*
 	 * It's perfectly valid firmware can pass disabled GICC entry, driver
@@ -1964,8 +1967,10 @@ static int __init gic_acpi_count_gicr_regions(void)
 
 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
 				      gic_acpi_match_gicc, 0);
-	if (count > 0)
+	if (count > 0) {
 		acpi_data.single_redist = true;
+		count = acpi_data.enabled_rdists;
+	}
 
 	return count;
 }
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 63baf27..d14334f 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -3,6 +3,6 @@
 
 # Object files in subdirectories
 
-obj-$(CONFIG_ISDN_CAPI)			+= capi/
+obj-$(CONFIG_BT_CMTP)			+= capi/
 obj-$(CONFIG_MISDN)			+= mISDN/
 obj-$(CONFIG_ISDN)			+= hardware/
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 573fea5..cc408ad 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
-menuconfig ISDN_CAPI
-	tristate "CAPI 2.0 subsystem"
+config ISDN_CAPI
+	def_bool ISDN && BT
 	help
 	  This provides CAPI (the Common ISDN Application Programming
 	  Interface) Version 2.0, a standard making it easy for programs to
@@ -15,42 +15,18 @@
 	  See CONFIG_BT_CMTP for the last remaining regular driver
 	  in the kernel that uses the CAPI subsystem.
 
-if ISDN_CAPI
-
 config CAPI_TRACE
-	bool "CAPI trace support"
-	default y
+	def_bool BT_CMTP
 	help
 	  If you say Y here, the kernelcapi driver can make verbose traces
 	  of CAPI messages. This feature can be enabled/disabled via IOCTL for
 	  every controller (default disabled).
-	  This will increase the size of the kernelcapi module by 20 KB.
-	  If unsure, say Y.
-
-config ISDN_CAPI_CAPI20
-	tristate "CAPI2.0 /dev/capi20 support"
-	help
-	  This option will provide the CAPI 2.0 interface to userspace
-	  applications via /dev/capi20. Applications should use the
-	  standardized libcapi20 to access this functionality.  You should say
-	  Y/M here.
 
 config ISDN_CAPI_MIDDLEWARE
-	bool "CAPI2.0 Middleware support"
-	depends on ISDN_CAPI_CAPI20 && TTY
+	def_bool BT_CMTP && TTY
 	help
 	  This option will enhance the capabilities of the /dev/capi20
 	  interface.  It will provide a means of moving a data connection,
 	  established via the usual /dev/capi20 interface to a special tty
 	  device.  If you want to use pppd with pppdcapiplugin to dial up to
 	  your ISP, say Y here.
-
-config ISDN_CAPI_CAPIDRV_VERBOSE
-	bool "Verbose reason code reporting"
-	depends on ISDN_CAPI_CAPIDRV
-	help
-	  If you say Y here, the capidrv interface will give verbose reasons
-	  for disconnecting. This will increase the size of the kernel by 7 KB.
-	  If unsure, say N.
-
-endif
diff --git a/drivers/isdn/capi/Makefile b/drivers/isdn/capi/Makefile
index d299f3e..352217e 100644
--- a/drivers/isdn/capi/Makefile
+++ b/drivers/isdn/capi/Makefile
@@ -1,17 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-# Makefile for the CAPI subsystem.
+# Makefile for the CAPI subsystem used by BT_CMTP
 
-# Ordering constraints: kernelcapi.o first
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_CAPI)			+= kernelcapi.o
-obj-$(CONFIG_ISDN_CAPI_CAPI20)		+= capi.o 
-obj-$(CONFIG_ISDN_CAPI_CAPIDRV)		+= capidrv.o
-
-# Multipart objects.
-
-kernelcapi-y				:= kcapi.o capiutil.o capilib.o
-kernelcapi-$(CONFIG_PROC_FS)		+= kcapi_proc.o
-
-ccflags-y += -I$(srctree)/$(src)/../include -I$(srctree)/$(src)/../include/uapi
+obj-$(CONFIG_BT_CMTP)			+= kernelcapi.o
+kernelcapi-y				:= kcapi.o capiutil.o capi.o kcapi_proc.o
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 1675da3..85767f5 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -39,7 +39,9 @@
 #include <linux/isdn/capiutil.h>
 #include <linux/isdn/capicmd.h>
 
-MODULE_DESCRIPTION("CAPI4Linux: Userspace /dev/capi20 interface");
+#include "kcapi.h"
+
+MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer and /dev/capi20 interface");
 MODULE_AUTHOR("Carsten Paeth");
 MODULE_LICENSE("GPL");
 
@@ -1412,15 +1414,22 @@ static int __init capi_init(void)
 {
 	const char *compileinfo;
 	int major_ret;
+	int ret;
+
+	ret = kcapi_init();
+	if (ret)
+		return ret;
 
 	major_ret = register_chrdev(capi_major, "capi20", &capi_fops);
 	if (major_ret < 0) {
 		printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
+		kcapi_exit();
 		return major_ret;
 	}
 	capi_class = class_create(THIS_MODULE, "capi");
 	if (IS_ERR(capi_class)) {
 		unregister_chrdev(capi_major, "capi20");
+		kcapi_exit();
 		return PTR_ERR(capi_class);
 	}
 
@@ -1430,6 +1439,7 @@ static int __init capi_init(void)
 		device_destroy(capi_class, MKDEV(capi_major, 0));
 		class_destroy(capi_class);
 		unregister_chrdev(capi_major, "capi20");
+		kcapi_exit();
 		return -ENOMEM;
 	}
 
@@ -1455,6 +1465,8 @@ static void __exit capi_exit(void)
 	unregister_chrdev(capi_major, "capi20");
 
 	capinc_tty_exit();
+
+	kcapi_exit();
 }
 
 module_init(capi_init);
diff --git a/drivers/isdn/capi/capilib.c b/drivers/isdn/capi/capilib.c
deleted file mode 100644
index a39ad37..0000000
--- a/drivers/isdn/capi/capilib.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/isdn/capilli.h>
-
-#define DBG(format, arg...) do {					\
-		printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
-	} while (0)
-
-struct capilib_msgidqueue {
-	struct capilib_msgidqueue *next;
-	u16 msgid;
-};
-
-struct capilib_ncci {
-	struct list_head list;
-	u16 applid;
-	u32 ncci;
-	u32 winsize;
-	int   nmsg;
-	struct capilib_msgidqueue *msgidqueue;
-	struct capilib_msgidqueue *msgidlast;
-	struct capilib_msgidqueue *msgidfree;
-	struct capilib_msgidqueue msgidpool[CAPI_MAXDATAWINDOW];
-};
-
-// ---------------------------------------------------------------------------
-// NCCI Handling
-
-static inline void mq_init(struct capilib_ncci *np)
-{
-	u_int i;
-	np->msgidqueue = NULL;
-	np->msgidlast = NULL;
-	np->nmsg = 0;
-	memset(np->msgidpool, 0, sizeof(np->msgidpool));
-	np->msgidfree = &np->msgidpool[0];
-	for (i = 1; i < np->winsize; i++) {
-		np->msgidpool[i].next = np->msgidfree;
-		np->msgidfree = &np->msgidpool[i];
-	}
-}
-
-static inline int mq_enqueue(struct capilib_ncci *np, u16 msgid)
-{
-	struct capilib_msgidqueue *mq;
-	if ((mq = np->msgidfree) == NULL)
-		return 0;
-	np->msgidfree = mq->next;
-	mq->msgid = msgid;
-	mq->next = NULL;
-	if (np->msgidlast)
-		np->msgidlast->next = mq;
-	np->msgidlast = mq;
-	if (!np->msgidqueue)
-		np->msgidqueue = mq;
-	np->nmsg++;
-	return 1;
-}
-
-static inline int mq_dequeue(struct capilib_ncci *np, u16 msgid)
-{
-	struct capilib_msgidqueue **pp;
-	for (pp = &np->msgidqueue; *pp; pp = &(*pp)->next) {
-		if ((*pp)->msgid == msgid) {
-			struct capilib_msgidqueue *mq = *pp;
-			*pp = mq->next;
-			if (mq == np->msgidlast)
-				np->msgidlast = NULL;
-			mq->next = np->msgidfree;
-			np->msgidfree = mq;
-			np->nmsg--;
-			return 1;
-		}
-	}
-	return 0;
-}
-
-void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize)
-{
-	struct capilib_ncci *np;
-
-	np = kmalloc(sizeof(*np), GFP_ATOMIC);
-	if (!np) {
-		printk(KERN_WARNING "capilib_new_ncci: no memory.\n");
-		return;
-	}
-	if (winsize > CAPI_MAXDATAWINDOW) {
-		printk(KERN_ERR "capi_new_ncci: winsize %d too big\n",
-		       winsize);
-		winsize = CAPI_MAXDATAWINDOW;
-	}
-	np->applid = applid;
-	np->ncci = ncci;
-	np->winsize = winsize;
-	mq_init(np);
-	list_add_tail(&np->list, head);
-	DBG("kcapi: appl %d ncci 0x%x up", applid, ncci);
-}
-
-EXPORT_SYMBOL(capilib_new_ncci);
-
-void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci)
-{
-	struct list_head *l;
-	struct capilib_ncci *np;
-
-	list_for_each(l, head) {
-		np = list_entry(l, struct capilib_ncci, list);
-		if (np->applid != applid)
-			continue;
-		if (np->ncci != ncci)
-			continue;
-		printk(KERN_INFO "kcapi: appl %d ncci 0x%x down\n", applid, ncci);
-		list_del(&np->list);
-		kfree(np);
-		return;
-	}
-	printk(KERN_ERR "capilib_free_ncci: ncci 0x%x not found\n", ncci);
-}
-
-EXPORT_SYMBOL(capilib_free_ncci);
-
-void capilib_release_appl(struct list_head *head, u16 applid)
-{
-	struct list_head *l, *n;
-	struct capilib_ncci *np;
-
-	list_for_each_safe(l, n, head) {
-		np = list_entry(l, struct capilib_ncci, list);
-		if (np->applid != applid)
-			continue;
-		printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", applid, np->ncci);
-		list_del(&np->list);
-		kfree(np);
-	}
-}
-
-EXPORT_SYMBOL(capilib_release_appl);
-
-void capilib_release(struct list_head *head)
-{
-	struct list_head *l, *n;
-	struct capilib_ncci *np;
-
-	list_for_each_safe(l, n, head) {
-		np = list_entry(l, struct capilib_ncci, list);
-		printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", np->applid, np->ncci);
-		list_del(&np->list);
-		kfree(np);
-	}
-}
-
-EXPORT_SYMBOL(capilib_release);
-
-u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
-{
-	struct list_head *l;
-	struct capilib_ncci *np;
-
-	list_for_each(l, head) {
-		np = list_entry(l, struct capilib_ncci, list);
-		if (np->applid != applid)
-			continue;
-		if (np->ncci != ncci)
-			continue;
-
-		if (mq_enqueue(np, msgid) == 0)
-			return CAPI_SENDQUEUEFULL;
-
-		return CAPI_NOERROR;
-	}
-	printk(KERN_ERR "capilib_data_b3_req: ncci 0x%x not found\n", ncci);
-	return CAPI_NOERROR;
-}
-
-EXPORT_SYMBOL(capilib_data_b3_req);
-
-void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
-{
-	struct list_head *l;
-	struct capilib_ncci *np;
-
-	list_for_each(l, head) {
-		np = list_entry(l, struct capilib_ncci, list);
-		if (np->applid != applid)
-			continue;
-		if (np->ncci != ncci)
-			continue;
-
-		if (mq_dequeue(np, msgid) == 0) {
-			printk(KERN_ERR "kcapi: msgid %hu ncci 0x%x not on queue\n",
-			       msgid, ncci);
-		}
-		return;
-	}
-	printk(KERN_ERR "capilib_data_b3_conf: ncci 0x%x not found\n", ncci);
-}
-
-EXPORT_SYMBOL(capilib_data_b3_conf);
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index 9846d82..f26bf3c 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -20,6 +20,8 @@
 #include <linux/isdn/capiutil.h>
 #include <linux/slab.h>
 
+#include "kcapi.h"
+
 /* from CAPI2.0 DDK AVM Berlin GmbH */
 
 typedef struct {
@@ -245,190 +247,6 @@ static void jumpcstruct(_cmsg *cmsg)
 		}
 	}
 }
-/*-------------------------------------------------------*/
-static void pars_2_message(_cmsg *cmsg)
-{
-
-	for (; TYP != _CEND; cmsg->p++) {
-		switch (TYP) {
-		case _CBYTE:
-			byteTLcpy(cmsg->m + cmsg->l, OFF);
-			cmsg->l++;
-			break;
-		case _CWORD:
-			wordTLcpy(cmsg->m + cmsg->l, OFF);
-			cmsg->l += 2;
-			break;
-		case _CDWORD:
-			dwordTLcpy(cmsg->m + cmsg->l, OFF);
-			cmsg->l += 4;
-			break;
-		case _CSTRUCT:
-			if (*(u8 **) OFF == NULL) {
-				*(cmsg->m + cmsg->l) = '\0';
-				cmsg->l++;
-			} else if (**(_cstruct *) OFF != 0xff) {
-				structTLcpy(cmsg->m + cmsg->l, *(_cstruct *) OFF, 1 + **(_cstruct *) OFF);
-				cmsg->l += 1 + **(_cstruct *) OFF;
-			} else {
-				_cstruct s = *(_cstruct *) OFF;
-				structTLcpy(cmsg->m + cmsg->l, s, 3 + *(u16 *) (s + 1));
-				cmsg->l += 3 + *(u16 *) (s + 1);
-			}
-			break;
-		case _CMSTRUCT:
-/*----- Metastruktur 0 -----*/
-			if (*(_cmstruct *) OFF == CAPI_DEFAULT) {
-				*(cmsg->m + cmsg->l) = '\0';
-				cmsg->l++;
-				jumpcstruct(cmsg);
-			}
-/*----- Metastruktur wird composed -----*/
-			else {
-				unsigned _l = cmsg->l;
-				unsigned _ls;
-				cmsg->l++;
-				cmsg->p++;
-				pars_2_message(cmsg);
-				_ls = cmsg->l - _l - 1;
-				if (_ls < 255)
-					(cmsg->m + _l)[0] = (u8) _ls;
-				else {
-					structTLcpyovl(cmsg->m + _l + 3, cmsg->m + _l + 1, _ls);
-					(cmsg->m + _l)[0] = 0xff;
-					wordTLcpy(cmsg->m + _l + 1, &_ls);
-				}
-			}
-			break;
-		}
-	}
-}
-
-/**
- * capi_cmsg2message() - assemble CAPI 2.0 message from _cmsg structure
- * @cmsg:	_cmsg structure
- * @msg:	buffer for assembled message
- *
- * Return value: 0 for success
- */
-
-unsigned capi_cmsg2message(_cmsg *cmsg, u8 *msg)
-{
-	cmsg->m = msg;
-	cmsg->l = 8;
-	cmsg->p = 0;
-	cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
-	if (!cmsg->par)
-		return 1;	/* invalid command/subcommand */
-
-	pars_2_message(cmsg);
-
-	wordTLcpy(msg + 0, &cmsg->l);
-	byteTLcpy(cmsg->m + 4, &cmsg->Command);
-	byteTLcpy(cmsg->m + 5, &cmsg->Subcommand);
-	wordTLcpy(cmsg->m + 2, &cmsg->ApplId);
-	wordTLcpy(cmsg->m + 6, &cmsg->Messagenumber);
-
-	return 0;
-}
-
-/*-------------------------------------------------------*/
-static void message_2_pars(_cmsg *cmsg)
-{
-	for (; TYP != _CEND; cmsg->p++) {
-
-		switch (TYP) {
-		case _CBYTE:
-			byteTRcpy(cmsg->m + cmsg->l, OFF);
-			cmsg->l++;
-			break;
-		case _CWORD:
-			wordTRcpy(cmsg->m + cmsg->l, OFF);
-			cmsg->l += 2;
-			break;
-		case _CDWORD:
-			dwordTRcpy(cmsg->m + cmsg->l, OFF);
-			cmsg->l += 4;
-			break;
-		case _CSTRUCT:
-			*(u8 **) OFF = cmsg->m + cmsg->l;
-
-			if (cmsg->m[cmsg->l] != 0xff)
-				cmsg->l += 1 + cmsg->m[cmsg->l];
-			else
-				cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1);
-			break;
-		case _CMSTRUCT:
-/*----- Metastruktur 0 -----*/
-			if (cmsg->m[cmsg->l] == '\0') {
-				*(_cmstruct *) OFF = CAPI_DEFAULT;
-				cmsg->l++;
-				jumpcstruct(cmsg);
-			} else {
-				unsigned _l = cmsg->l;
-				*(_cmstruct *) OFF = CAPI_COMPOSE;
-				cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1;
-				cmsg->p++;
-				message_2_pars(cmsg);
-			}
-			break;
-		}
-	}
-}
-
-/**
- * capi_message2cmsg() - disassemble CAPI 2.0 message into _cmsg structure
- * @cmsg:	_cmsg structure
- * @msg:	buffer for assembled message
- *
- * Return value: 0 for success
- */
-
-unsigned capi_message2cmsg(_cmsg *cmsg, u8 *msg)
-{
-	memset(cmsg, 0, sizeof(_cmsg));
-	cmsg->m = msg;
-	cmsg->l = 8;
-	cmsg->p = 0;
-	byteTRcpy(cmsg->m + 4, &cmsg->Command);
-	byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
-	cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
-	if (!cmsg->par)
-		return 1;	/* invalid command/subcommand */
-
-	message_2_pars(cmsg);
-
-	wordTRcpy(msg + 0, &cmsg->l);
-	wordTRcpy(cmsg->m + 2, &cmsg->ApplId);
-	wordTRcpy(cmsg->m + 6, &cmsg->Messagenumber);
-
-	return 0;
-}
-
-/**
- * capi_cmsg_header() - initialize header part of _cmsg structure
- * @cmsg:	_cmsg structure
- * @_ApplId:	ApplID field value
- * @_Command:	Command field value
- * @_Subcommand:	Subcommand field value
- * @_Messagenumber:	Message Number field value
- * @_Controller:	Controller/PLCI/NCCI field value
- *
- * Return value: 0 for success
- */
-
-unsigned capi_cmsg_header(_cmsg *cmsg, u16 _ApplId,
-			  u8 _Command, u8 _Subcommand,
-			  u16 _Messagenumber, u32 _Controller)
-{
-	memset(cmsg, 0, sizeof(_cmsg));
-	cmsg->ApplId = _ApplId;
-	cmsg->Command = _Command;
-	cmsg->Subcommand = _Subcommand;
-	cmsg->Messagenumber = _Messagenumber;
-	cmsg->adr.adrController = _Controller;
-	return 0;
-}
 
 /*-------------------------------------------------------*/
 
@@ -561,8 +379,6 @@ static char *pnames[] =
 	/*2f */ "Useruserdata"
 };
 
-
-
 #include <stdarg.h>
 
 /*-------------------------------------------------------*/
@@ -800,37 +616,6 @@ _cdebbuf *capi_message2str(u8 *msg)
 	return cdb;
 }
 
-/**
- * capi_cmsg2str() - format _cmsg structure for printing
- * @cmsg:	_cmsg structure
- *
- * Allocates a CAPI debug buffer and fills it with a printable representation
- * of the CAPI 2.0 message stored in @cmsg by a previous call to
- * capi_cmsg2message() or capi_message2cmsg().
- * Return value: allocated debug buffer, NULL on error
- * The returned buffer should be freed by a call to cdebbuf_free() after use.
- */
-
-_cdebbuf *capi_cmsg2str(_cmsg *cmsg)
-{
-	_cdebbuf *cdb;
-
-	if (!cmsg->m)
-		return NULL;	/* no message */
-	cdb = cdebbuf_alloc();
-	if (!cdb)
-		return NULL;
-	cmsg->l = 8;
-	cmsg->p = 0;
-	cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n",
-		       capi_cmd2str(cmsg->Command, cmsg->Subcommand),
-		       ((u16 *) cmsg->m)[1],
-		       ((u16 *) cmsg->m)[3],
-		       ((u16 *) cmsg->m)[0]);
-	cdb = protocol_message_2_pars(cdb, cmsg, 1);
-	return cdb;
-}
-
 int __init cdebug_init(void)
 {
 	g_cmsg = kmalloc(sizeof(_cmsg), GFP_KERNEL);
@@ -854,7 +639,7 @@ int __init cdebug_init(void)
 	return 0;
 }
 
-void __exit cdebug_exit(void)
+void cdebug_exit(void)
 {
 	if (g_debbuf)
 		kfree(g_debbuf->buf);
@@ -885,16 +670,8 @@ int __init cdebug_init(void)
 	return 0;
 }
 
-void __exit cdebug_exit(void)
+void cdebug_exit(void)
 {
 }
 
 #endif
-
-EXPORT_SYMBOL(cdebbuf_free);
-EXPORT_SYMBOL(capi_cmsg2message);
-EXPORT_SYMBOL(capi_message2cmsg);
-EXPORT_SYMBOL(capi_cmsg_header);
-EXPORT_SYMBOL(capi_cmd2str);
-EXPORT_SYMBOL(capi_cmsg2str);
-EXPORT_SYMBOL(capi_message2str);
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index a4ceb61..7168778 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -10,8 +10,6 @@
  *
  */
 
-#define AVMB1_COMPAT
-
 #include "kcapi.h"
 #include <linux/module.h>
 #include <linux/mm.h>
@@ -31,18 +29,12 @@
 #include <linux/uaccess.h>
 #include <linux/isdn/capicmd.h>
 #include <linux/isdn/capiutil.h>
-#ifdef AVMB1_COMPAT
-#include <linux/b1lli.h>
-#endif
 #include <linux/mutex.h>
 #include <linux/rcupdate.h>
 
 static int showcapimsgs = 0;
 static struct workqueue_struct *kcapi_wq;
 
-MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
 module_param(showcapimsgs, uint, 0);
 
 /* ------------------------------------------------------------- */
@@ -61,9 +53,6 @@ static char capi_manufakturer[64] = "AVM Berlin";
 
 #define NCCI2CTRL(ncci)    (((ncci) >> 24) & 0x7f)
 
-LIST_HEAD(capi_drivers);
-DEFINE_MUTEX(capi_drivers_lock);
-
 struct capi_ctr *capi_controller[CAPI_MAXCONTR];
 DEFINE_MUTEX(capi_controller_lock);
 
@@ -71,8 +60,6 @@ struct capi20_appl *capi_applications[CAPI_MAXAPPL];
 
 static int ncontrollers;
 
-static BLOCKING_NOTIFIER_HEAD(ctr_notifier_list);
-
 /* -------- controller ref counting -------------------------------------- */
 
 static inline struct capi_ctr *
@@ -200,8 +187,6 @@ static void notify_up(u32 contr)
 			if (ap)
 				register_appl(ctr, applid, &ap->rparam);
 		}
-
-		wake_up_interruptible_all(&ctr->state_wait_queue);
 	} else
 		printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
 
@@ -229,8 +214,6 @@ static void ctr_down(struct capi_ctr *ctr, int new_state)
 		if (ap)
 			capi_ctr_put(ctr);
 	}
-
-	wake_up_interruptible_all(&ctr->state_wait_queue);
 }
 
 static void notify_down(u32 contr)
@@ -251,36 +234,23 @@ static void notify_down(u32 contr)
 	mutex_unlock(&capi_controller_lock);
 }
 
-static int
-notify_handler(struct notifier_block *nb, unsigned long val, void *v)
-{
-	u32 contr = (long)v;
-
-	switch (val) {
-	case CAPICTR_UP:
-		notify_up(contr);
-		break;
-	case CAPICTR_DOWN:
-		notify_down(contr);
-		break;
-	}
-	return NOTIFY_OK;
-}
-
 static void do_notify_work(struct work_struct *work)
 {
 	struct capictr_event *event =
 		container_of(work, struct capictr_event, work);
 
-	blocking_notifier_call_chain(&ctr_notifier_list, event->type,
-				     (void *)(long)event->controller);
+	switch (event->type) {
+	case CAPICTR_UP:
+		notify_up(event->controller);
+		break;
+	case CAPICTR_DOWN:
+		notify_down(event->controller);
+		break;
+	}
+
 	kfree(event);
 }
 
-/*
- * The notifier will result in adding/deleteing of devices. Devices can
- * only removed in user process, not in bh.
- */
 static int notify_push(unsigned int event_type, u32 controller)
 {
 	struct capictr_event *event = kmalloc(sizeof(*event), GFP_ATOMIC);
@@ -296,18 +266,6 @@ static int notify_push(unsigned int event_type, u32 controller)
 	return 0;
 }
 
-int register_capictr_notifier(struct notifier_block *nb)
-{
-	return blocking_notifier_chain_register(&ctr_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(register_capictr_notifier);
-
-int unregister_capictr_notifier(struct notifier_block *nb)
-{
-	return blocking_notifier_chain_unregister(&ctr_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_capictr_notifier);
-
 /* -------- Receiver ------------------------------------------ */
 
 static void recv_handler(struct work_struct *work)
@@ -454,48 +412,6 @@ void capi_ctr_down(struct capi_ctr *ctr)
 
 EXPORT_SYMBOL(capi_ctr_down);
 
-/**
- * capi_ctr_suspend_output() - suspend controller
- * @ctr:	controller descriptor structure.
- *
- * Called by hardware driver to stop data flow.
- *
- * Note: The caller is responsible for synchronizing concurrent state changes
- * as well as invocations of capi_ctr_handle_message.
- */
-
-void capi_ctr_suspend_output(struct capi_ctr *ctr)
-{
-	if (!ctr->blocked) {
-		printk(KERN_DEBUG "kcapi: controller [%03d] suspend\n",
-		       ctr->cnr);
-		ctr->blocked = 1;
-	}
-}
-
-EXPORT_SYMBOL(capi_ctr_suspend_output);
-
-/**
- * capi_ctr_resume_output() - resume controller
- * @ctr:	controller descriptor structure.
- *
- * Called by hardware driver to resume data flow.
- *
- * Note: The caller is responsible for synchronizing concurrent state changes
- * as well as invocations of capi_ctr_handle_message.
- */
-
-void capi_ctr_resume_output(struct capi_ctr *ctr)
-{
-	if (ctr->blocked) {
-		printk(KERN_DEBUG "kcapi: controller [%03d] resumed\n",
-		       ctr->cnr);
-		ctr->blocked = 0;
-	}
-}
-
-EXPORT_SYMBOL(capi_ctr_resume_output);
-
 /* ------------------------------------------------------------- */
 
 /**
@@ -531,7 +447,6 @@ int attach_capi_ctr(struct capi_ctr *ctr)
 	ctr->state = CAPI_CTR_DETECTED;
 	ctr->blocked = 0;
 	ctr->traceflag = showcapimsgs;
-	init_waitqueue_head(&ctr->state_wait_queue);
 
 	sprintf(ctr->procfn, "capi/controllers/%d", ctr->cnr);
 	ctr->procent = proc_create_single_data(ctr->procfn, 0, NULL,
@@ -586,38 +501,6 @@ int detach_capi_ctr(struct capi_ctr *ctr)
 
 EXPORT_SYMBOL(detach_capi_ctr);
 
-/**
- * register_capi_driver() - register CAPI driver
- * @driver:	driver descriptor structure.
- *
- * Called by hardware driver to register itself with the CAPI subsystem.
- */
-
-void register_capi_driver(struct capi_driver *driver)
-{
-	mutex_lock(&capi_drivers_lock);
-	list_add_tail(&driver->list, &capi_drivers);
-	mutex_unlock(&capi_drivers_lock);
-}
-
-EXPORT_SYMBOL(register_capi_driver);
-
-/**
- * unregister_capi_driver() - unregister CAPI driver
- * @driver:	driver descriptor structure.
- *
- * Called by hardware driver to unregister itself from the CAPI subsystem.
- */
-
-void unregister_capi_driver(struct capi_driver *driver)
-{
-	mutex_lock(&capi_drivers_lock);
-	list_del(&driver->list);
-	mutex_unlock(&capi_drivers_lock);
-}
-
-EXPORT_SYMBOL(unregister_capi_driver);
-
 /* ------------------------------------------------------------- */
 /* -------- CAPI2.0 Interface ---------------------------------- */
 /* ------------------------------------------------------------- */
@@ -648,8 +531,6 @@ u16 capi20_isinstalled(void)
 	return ret;
 }
 
-EXPORT_SYMBOL(capi20_isinstalled);
-
 /**
  * capi20_register() - CAPI 2.0 operation CAPI_REGISTER
  * @ap:		CAPI application descriptor structure.
@@ -711,8 +592,6 @@ u16 capi20_register(struct capi20_appl *ap)
 	return CAPI_NOERROR;
 }
 
-EXPORT_SYMBOL(capi20_register);
-
 /**
  * capi20_release() - CAPI 2.0 operation CAPI_RELEASE
  * @ap:		CAPI application descriptor structure.
@@ -755,8 +634,6 @@ u16 capi20_release(struct capi20_appl *ap)
 	return CAPI_NOERROR;
 }
 
-EXPORT_SYMBOL(capi20_release);
-
 /**
  * capi20_put_message() - CAPI 2.0 operation CAPI_PUT_MESSAGE
  * @ap:		CAPI application descriptor structure.
@@ -834,8 +711,6 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
 	return ctr->send_message(ctr, skb);
 }
 
-EXPORT_SYMBOL(capi20_put_message);
-
 /**
  * capi20_get_manufacturer() - CAPI 2.0 operation CAPI_GET_MANUFACTURER
  * @contr:	controller number.
@@ -869,8 +744,6 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
 	return ret;
 }
 
-EXPORT_SYMBOL(capi20_get_manufacturer);
-
 /**
  * capi20_get_version() - CAPI 2.0 operation CAPI_GET_VERSION
  * @contr:	controller number.
@@ -904,8 +777,6 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp)
 	return ret;
 }
 
-EXPORT_SYMBOL(capi20_get_version);
-
 /**
  * capi20_get_serial() - CAPI 2.0 operation CAPI_GET_SERIAL_NUMBER
  * @contr:	controller number.
@@ -939,8 +810,6 @@ u16 capi20_get_serial(u32 contr, u8 *serial)
 	return ret;
 }
 
-EXPORT_SYMBOL(capi20_get_serial);
-
 /**
  * capi20_get_profile() - CAPI 2.0 operation CAPI_GET_PROFILE
  * @contr:	controller number.
@@ -974,209 +843,6 @@ u16 capi20_get_profile(u32 contr, struct capi_profile *profp)
 	return ret;
 }
 
-EXPORT_SYMBOL(capi20_get_profile);
-
-/* Must be called with capi_controller_lock held. */
-static int wait_on_ctr_state(struct capi_ctr *ctr, unsigned int state)
-{
-	DEFINE_WAIT(wait);
-	int retval = 0;
-
-	ctr = capi_ctr_get(ctr);
-	if (!ctr)
-		return -ESRCH;
-
-	for (;;) {
-		prepare_to_wait(&ctr->state_wait_queue, &wait,
-				TASK_INTERRUPTIBLE);
-
-		if (ctr->state == state)
-			break;
-		if (ctr->state == CAPI_CTR_DETACHED) {
-			retval = -ESRCH;
-			break;
-		}
-		if (signal_pending(current)) {
-			retval = -EINTR;
-			break;
-		}
-
-		mutex_unlock(&capi_controller_lock);
-		schedule();
-		mutex_lock(&capi_controller_lock);
-	}
-	finish_wait(&ctr->state_wait_queue, &wait);
-
-	capi_ctr_put(ctr);
-
-	return retval;
-}
-
-#ifdef AVMB1_COMPAT
-static int old_capi_manufacturer(unsigned int cmd, void __user *data)
-{
-	avmb1_loadandconfigdef ldef;
-	avmb1_extcarddef cdef;
-	avmb1_resetdef rdef;
-	capicardparams cparams;
-	struct capi_ctr *ctr;
-	struct capi_driver *driver = NULL;
-	capiloaddata ldata;
-	struct list_head *l;
-	int retval;
-
-	switch (cmd) {
-	case AVMB1_ADDCARD:
-	case AVMB1_ADDCARD_WITH_TYPE:
-		if (cmd == AVMB1_ADDCARD) {
-			if ((retval = copy_from_user(&cdef, data,
-						     sizeof(avmb1_carddef))))
-				return -EFAULT;
-			cdef.cardtype = AVM_CARDTYPE_B1;
-			cdef.cardnr = 0;
-		} else {
-			if ((retval = copy_from_user(&cdef, data,
-						     sizeof(avmb1_extcarddef))))
-				return -EFAULT;
-		}
-		cparams.port = cdef.port;
-		cparams.irq = cdef.irq;
-		cparams.cardnr = cdef.cardnr;
-
-		mutex_lock(&capi_drivers_lock);
-
-		switch (cdef.cardtype) {
-		case AVM_CARDTYPE_B1:
-			list_for_each(l, &capi_drivers) {
-				driver = list_entry(l, struct capi_driver, list);
-				if (strcmp(driver->name, "b1isa") == 0)
-					break;
-			}
-			break;
-		case AVM_CARDTYPE_T1:
-			list_for_each(l, &capi_drivers) {
-				driver = list_entry(l, struct capi_driver, list);
-				if (strcmp(driver->name, "t1isa") == 0)
-					break;
-			}
-			break;
-		default:
-			driver = NULL;
-			break;
-		}
-		if (!driver) {
-			printk(KERN_ERR "kcapi: driver not loaded.\n");
-			retval = -EIO;
-		} else if (!driver->add_card) {
-			printk(KERN_ERR "kcapi: driver has no add card function.\n");
-			retval = -EIO;
-		} else
-			retval = driver->add_card(driver, &cparams);
-
-		mutex_unlock(&capi_drivers_lock);
-		return retval;
-
-	case AVMB1_LOAD:
-	case AVMB1_LOAD_AND_CONFIG:
-
-		if (cmd == AVMB1_LOAD) {
-			if (copy_from_user(&ldef, data,
-					   sizeof(avmb1_loaddef)))
-				return -EFAULT;
-			ldef.t4config.len = 0;
-			ldef.t4config.data = NULL;
-		} else {
-			if (copy_from_user(&ldef, data,
-					   sizeof(avmb1_loadandconfigdef)))
-				return -EFAULT;
-		}
-
-		mutex_lock(&capi_controller_lock);
-
-		ctr = get_capi_ctr_by_nr(ldef.contr);
-		if (!ctr) {
-			retval = -EINVAL;
-			goto load_unlock_out;
-		}
-
-		if (ctr->load_firmware == NULL) {
-			printk(KERN_DEBUG "kcapi: load: no load function\n");
-			retval = -ESRCH;
-			goto load_unlock_out;
-		}
-
-		if (ldef.t4file.len <= 0) {
-			printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
-			retval = -EINVAL;
-			goto load_unlock_out;
-		}
-		if (ldef.t4file.data == NULL) {
-			printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
-			retval = -EINVAL;
-			goto load_unlock_out;
-		}
-
-		ldata.firmware.user = 1;
-		ldata.firmware.data = ldef.t4file.data;
-		ldata.firmware.len = ldef.t4file.len;
-		ldata.configuration.user = 1;
-		ldata.configuration.data = ldef.t4config.data;
-		ldata.configuration.len = ldef.t4config.len;
-
-		if (ctr->state != CAPI_CTR_DETECTED) {
-			printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr);
-			retval = -EBUSY;
-			goto load_unlock_out;
-		}
-		ctr->state = CAPI_CTR_LOADING;
-
-		retval = ctr->load_firmware(ctr, &ldata);
-		if (retval) {
-			ctr->state = CAPI_CTR_DETECTED;
-			goto load_unlock_out;
-		}
-
-		retval = wait_on_ctr_state(ctr, CAPI_CTR_RUNNING);
-
-	load_unlock_out:
-		mutex_unlock(&capi_controller_lock);
-		return retval;
-
-	case AVMB1_RESETCARD:
-		if (copy_from_user(&rdef, data, sizeof(avmb1_resetdef)))
-			return -EFAULT;
-
-		retval = 0;
-
-		mutex_lock(&capi_controller_lock);
-
-		ctr = get_capi_ctr_by_nr(rdef.contr);
-		if (!ctr) {
-			retval = -ESRCH;
-			goto reset_unlock_out;
-		}
-
-		if (ctr->state == CAPI_CTR_DETECTED)
-			goto reset_unlock_out;
-
-		if (ctr->reset_ctr == NULL) {
-			printk(KERN_DEBUG "kcapi: reset: no reset function\n");
-			retval = -ESRCH;
-			goto reset_unlock_out;
-		}
-
-		ctr->reset_ctr(ctr);
-
-		retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
-
-	reset_unlock_out:
-		mutex_unlock(&capi_controller_lock);
-		return retval;
-	}
-	return -EINVAL;
-}
-#endif
-
 /**
  * capi20_manufacturer() - CAPI 2.0 operation CAPI_MANUFACTURER
  * @cmd:	command.
@@ -1192,14 +858,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
 	int retval;
 
 	switch (cmd) {
-#ifdef AVMB1_COMPAT
-	case AVMB1_LOAD:
-	case AVMB1_LOAD_AND_CONFIG:
-	case AVMB1_RESETCARD:
-	case AVMB1_GET_CARDINFO:
-	case AVMB1_REMOVECARD:
-		return old_capi_manufacturer(cmd, data);
-#endif
 	case KCAPI_CMD_TRACE:
 	{
 		kcapi_flagdef fdef;
@@ -1222,43 +880,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
 
 		return retval;
 	}
-	case KCAPI_CMD_ADDCARD:
-	{
-		struct list_head *l;
-		struct capi_driver *driver = NULL;
-		capicardparams cparams;
-		kcapi_carddef cdef;
-
-		if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
-			return -EFAULT;
-
-		cparams.port = cdef.port;
-		cparams.irq = cdef.irq;
-		cparams.membase = cdef.membase;
-		cparams.cardnr = cdef.cardnr;
-		cparams.cardtype = 0;
-		cdef.driver[sizeof(cdef.driver) - 1] = 0;
-
-		mutex_lock(&capi_drivers_lock);
-
-		list_for_each(l, &capi_drivers) {
-			driver = list_entry(l, struct capi_driver, list);
-			if (strcmp(driver->name, cdef.driver) == 0)
-				break;
-		}
-		if (driver == NULL) {
-			printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n",
-			       cdef.driver);
-			retval = -ESRCH;
-		} else if (!driver->add_card) {
-			printk(KERN_ERR "kcapi: driver \"%s\" has no add card function.\n", cdef.driver);
-			retval = -EIO;
-		} else
-			retval = driver->add_card(driver, &cparams);
-
-		mutex_unlock(&capi_drivers_lock);
-		return retval;
-	}
 
 	default:
 		printk(KERN_ERR "kcapi: manufacturer command %lu unknown.\n",
@@ -1269,8 +890,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
 	return -EINVAL;
 }
 
-EXPORT_SYMBOL(capi20_manufacturer);
-
 /* ------------------------------------------------------------- */
 /* -------- Init & Cleanup ------------------------------------- */
 /* ------------------------------------------------------------- */
@@ -1279,12 +898,7 @@ EXPORT_SYMBOL(capi20_manufacturer);
  * init / exit functions
  */
 
-static struct notifier_block capictr_nb = {
-	.notifier_call = notify_handler,
-	.priority = INT_MAX,
-};
-
-static int __init kcapi_init(void)
+int __init kcapi_init(void)
 {
 	int err;
 
@@ -1292,11 +906,8 @@ static int __init kcapi_init(void)
 	if (!kcapi_wq)
 		return -ENOMEM;
 
-	register_capictr_notifier(&capictr_nb);
-
 	err = cdebug_init();
 	if (err) {
-		unregister_capictr_notifier(&capictr_nb);
 		destroy_workqueue(kcapi_wq);
 		return err;
 	}
@@ -1305,14 +916,10 @@ static int __init kcapi_init(void)
 	return 0;
 }
 
-static void __exit kcapi_exit(void)
+void kcapi_exit(void)
 {
 	kcapi_proc_exit();
 
-	unregister_capictr_notifier(&capictr_nb);
 	cdebug_exit();
 	destroy_workqueue(kcapi_wq);
 }
-
-module_init(kcapi_init);
-module_exit(kcapi_exit);
diff --git a/drivers/isdn/capi/kcapi.h b/drivers/isdn/capi/kcapi.h
index 6d439f9..479623e 100644
--- a/drivers/isdn/capi/kcapi.h
+++ b/drivers/isdn/capi/kcapi.h
@@ -30,22 +30,153 @@ enum {
 	CAPI_CTR_RUNNING  = 3,
 };
 
-extern struct list_head capi_drivers;
-extern struct mutex capi_drivers_lock;
-
 extern struct capi_ctr *capi_controller[CAPI_MAXCONTR];
 extern struct mutex capi_controller_lock;
 
 extern struct capi20_appl *capi_applications[CAPI_MAXAPPL];
 
-#ifdef CONFIG_PROC_FS
-
 void kcapi_proc_init(void);
 void kcapi_proc_exit(void);
 
-#else
+struct capi20_appl {
+	u16 applid;
+	capi_register_params rparam;
+	void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
+	void *private;
 
-static inline void kcapi_proc_init(void) { };
-static inline void kcapi_proc_exit(void) { };
+	/* internal to kernelcapi.o */
+	unsigned long nrecvctlpkt;
+	unsigned long nrecvdatapkt;
+	unsigned long nsentctlpkt;
+	unsigned long nsentdatapkt;
+	struct mutex recv_mtx;
+	struct sk_buff_head recv_queue;
+	struct work_struct recv_work;
+	int release_in_progress;
+};
 
-#endif
+u16 capi20_isinstalled(void);
+u16 capi20_register(struct capi20_appl *ap);
+u16 capi20_release(struct capi20_appl *ap);
+u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
+u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
+u16 capi20_get_version(u32 contr, struct capi_version *verp);
+u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
+u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
+int capi20_manufacturer(unsigned long cmd, void __user *data);
+
+#define CAPICTR_UP			0
+#define CAPICTR_DOWN			1
+
+int kcapi_init(void);
+void kcapi_exit(void);
+
+/*----- basic-type definitions -----*/
+
+typedef __u8 *_cstruct;
+
+typedef enum {
+	CAPI_COMPOSE,
+	CAPI_DEFAULT
+} _cmstruct;
+
+/*
+   The _cmsg structure contains all possible CAPI 2.0 parameter.
+   All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE
+   assembles the parameter and builds CAPI2.0 conform messages.
+   CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the
+   parameter in the _cmsg structure
+ */
+
+typedef struct {
+	/* Header */
+	__u16 ApplId;
+	__u8 Command;
+	__u8 Subcommand;
+	__u16 Messagenumber;
+
+	/* Parameter */
+	union {
+		__u32 adrController;
+		__u32 adrPLCI;
+		__u32 adrNCCI;
+	} adr;
+
+	_cmstruct AdditionalInfo;
+	_cstruct B1configuration;
+	__u16 B1protocol;
+	_cstruct B2configuration;
+	__u16 B2protocol;
+	_cstruct B3configuration;
+	__u16 B3protocol;
+	_cstruct BC;
+	_cstruct BChannelinformation;
+	_cmstruct BProtocol;
+	_cstruct CalledPartyNumber;
+	_cstruct CalledPartySubaddress;
+	_cstruct CallingPartyNumber;
+	_cstruct CallingPartySubaddress;
+	__u32 CIPmask;
+	__u32 CIPmask2;
+	__u16 CIPValue;
+	__u32 Class;
+	_cstruct ConnectedNumber;
+	_cstruct ConnectedSubaddress;
+	__u32 Data;
+	__u16 DataHandle;
+	__u16 DataLength;
+	_cstruct FacilityConfirmationParameter;
+	_cstruct Facilitydataarray;
+	_cstruct FacilityIndicationParameter;
+	_cstruct FacilityRequestParameter;
+	__u16 FacilitySelector;
+	__u16 Flags;
+	__u32 Function;
+	_cstruct HLC;
+	__u16 Info;
+	_cstruct InfoElement;
+	__u32 InfoMask;
+	__u16 InfoNumber;
+	_cstruct Keypadfacility;
+	_cstruct LLC;
+	_cstruct ManuData;
+	__u32 ManuID;
+	_cstruct NCPI;
+	__u16 Reason;
+	__u16 Reason_B3;
+	__u16 Reject;
+	_cstruct Useruserdata;
+
+	/* intern */
+	unsigned l, p;
+	unsigned char *par;
+	__u8 *m;
+
+	/* buffer to construct message */
+	__u8 buf[180];
+
+} _cmsg;
+
+/*-----------------------------------------------------------------------*/
+
+/*
+ * Debugging / Tracing functions
+ */
+
+char *capi_cmd2str(__u8 cmd, __u8 subcmd);
+
+typedef struct {
+	u_char	*buf;
+	u_char	*p;
+	size_t	size;
+	size_t	pos;
+} _cdebbuf;
+
+#define	CDEBUG_SIZE	1024
+#define	CDEBUG_GSIZE	4096
+
+void cdebbuf_free(_cdebbuf *cdb);
+int cdebug_init(void);
+void cdebug_exit(void);
+
+_cdebbuf *capi_message2str(__u8 *msg);
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index c94bd12..b5ed4ea 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -192,37 +192,15 @@ static const struct seq_operations seq_applstats_ops = {
 
 // ---------------------------------------------------------------------------
 
-static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
-	__acquires(&capi_drivers_lock)
+/* /proc/capi/drivers is always empty */
+static ssize_t empty_read(struct file *file, char __user *buf,
+			  size_t size, loff_t *off)
 {
-	mutex_lock(&capi_drivers_lock);
-	return seq_list_start(&capi_drivers, *pos);
-}
-
-static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return seq_list_next(v, &capi_drivers, pos);
-}
-
-static void capi_driver_stop(struct seq_file *seq, void *v)
-	__releases(&capi_drivers_lock)
-{
-	mutex_unlock(&capi_drivers_lock);
-}
-
-static int capi_driver_show(struct seq_file *seq, void *v)
-{
-	struct capi_driver *drv = list_entry(v, struct capi_driver, list);
-
-	seq_printf(seq, "%-32s %s\n", drv->name, drv->revision);
 	return 0;
 }
 
-static const struct seq_operations seq_capi_driver_ops = {
-	.start	= capi_driver_start,
-	.next	= capi_driver_next,
-	.stop	= capi_driver_stop,
-	.show	= capi_driver_show,
+static const struct proc_ops empty_proc_ops = {
+	.proc_read	= empty_read,
 };
 
 // ---------------------------------------------------------------------------
@@ -236,10 +214,10 @@ kcapi_proc_init(void)
 	proc_create_seq("capi/contrstats",   0, NULL, &seq_contrstats_ops);
 	proc_create_seq("capi/applications", 0, NULL, &seq_applications_ops);
 	proc_create_seq("capi/applstats",    0, NULL, &seq_applstats_ops);
-	proc_create_seq("capi/driver",       0, NULL, &seq_capi_driver_ops);
+	proc_create("capi/driver",           0, NULL, &empty_proc_ops);
 }
 
-void __exit
+void
 kcapi_proc_exit(void)
 {
 	remove_proc_entry("capi/driver",       NULL);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 4b68520..d82f1de 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -836,6 +836,16 @@
 	  Say Y to enable the LM36274 LED driver for TI LMU devices.
 	  This supports the LED device LM36274.
 
+config LEDS_TPS6105X
+	tristate "LED support for TI TPS6105X"
+	depends on LEDS_CLASS
+	depends on TPS6105X
+	default y if TPS6105X
+	help
+	  This driver supports TPS61050/TPS61052 LED chips.
+	  It is a single boost converter primarily for white LEDs and
+	  audio amplifiers.
+
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2da39e8..d7e1107 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -85,6 +85,7 @@
 obj-$(CONFIG_LEDS_TI_LMU_COMMON)	+= leds-ti-lmu-common.o
 obj-$(CONFIG_LEDS_LM3697)		+= leds-lm3697.o
 obj-$(CONFIG_LEDS_LM36274)		+= leds-lm36274.o
+obj-$(CONFIG_LEDS_TPS6105X)		+= leds-tps6105x.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_CR0014114)		+= leds-cr0014114.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 4387743..1fc40e8 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -19,6 +19,7 @@
 #include <linux/spinlock.h>
 #include <linux/timer.h>
 #include <uapi/linux/uleds.h>
+#include <linux/of.h>
 #include "leds.h"
 
 static struct class *leds_class;
@@ -214,6 +215,98 @@ static int led_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
 
+/**
+ * of_led_get() - request a LED device via the LED framework
+ * @np: device node to get the LED device from
+ * @index: the index of the LED
+ *
+ * Returns the LED device parsed from the phandle specified in the "leds"
+ * property of a device tree node or a negative error-code on failure.
+ */
+struct led_classdev *of_led_get(struct device_node *np, int index)
+{
+	struct device *led_dev;
+	struct led_classdev *led_cdev;
+	struct device_node *led_node;
+
+	led_node = of_parse_phandle(np, "leds", index);
+	if (!led_node)
+		return ERR_PTR(-ENOENT);
+
+	led_dev = class_find_device_by_of_node(leds_class, led_node);
+	of_node_put(led_node);
+
+	if (!led_dev)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	led_cdev = dev_get_drvdata(led_dev);
+
+	if (!try_module_get(led_cdev->dev->parent->driver->owner))
+		return ERR_PTR(-ENODEV);
+
+	return led_cdev;
+}
+EXPORT_SYMBOL_GPL(of_led_get);
+
+/**
+ * led_put() - release a LED device
+ * @led_cdev: LED device
+ */
+void led_put(struct led_classdev *led_cdev)
+{
+	module_put(led_cdev->dev->parent->driver->owner);
+}
+EXPORT_SYMBOL_GPL(led_put);
+
+static void devm_led_release(struct device *dev, void *res)
+{
+	struct led_classdev **p = res;
+
+	led_put(*p);
+}
+
+/**
+ * devm_of_led_get - Resource-managed request of a LED device
+ * @dev:	LED consumer
+ * @index:	index of the LED to obtain in the consumer
+ *
+ * The device node of the device is parse to find the request LED device.
+ * The LED device returned from this function is automatically released
+ * on driver detach.
+ *
+ * @return a pointer to a LED device or ERR_PTR(errno) on failure.
+ */
+struct led_classdev *__must_check devm_of_led_get(struct device *dev,
+						  int index)
+{
+	struct led_classdev *led;
+	struct led_classdev **dr;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	/* Not using device tree? */
+	if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
+		return ERR_PTR(-ENOTSUPP);
+
+	led = of_led_get(dev->of_node, index);
+	if (IS_ERR(led))
+		return led;
+
+	dr = devres_alloc(devm_led_release, sizeof(struct led_classdev *),
+			  GFP_KERNEL);
+	if (!dr) {
+		led_put(led);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	*dr = led;
+	devres_add(dev, dr);
+
+	return led;
+}
+EXPORT_SYMBOL_GPL(devm_of_led_get);
+
 static int led_classdev_next_name(const char *init_name, char *name,
 				  size_t len)
 {
@@ -276,8 +369,10 @@ int led_classdev_register_ext(struct device *parent,
 		mutex_unlock(&led_cdev->led_access);
 		return PTR_ERR(led_cdev->dev);
 	}
-	if (init_data && init_data->fwnode)
+	if (init_data && init_data->fwnode) {
 		led_cdev->dev->fwnode = init_data->fwnode;
+		led_cdev->dev->of_node = to_of_node(init_data->fwnode);
+	}
 
 	if (ret)
 		dev_warn(parent, "Led %s renamed to %s due to name collision",
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index e7ec6bf..bd61a82 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -10,7 +10,7 @@
 
 #include <linux/module.h>
 #include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/leds.h>
 #include <linux/leds-bd2802.h>
@@ -67,6 +67,7 @@ struct led_state {
 struct bd2802_led {
 	struct bd2802_led_platform_data	*pdata;
 	struct i2c_client		*client;
+	struct gpio_desc		*reset;
 	struct rw_semaphore		rwsem;
 
 	struct led_state		led[2];
@@ -200,7 +201,7 @@ static void bd2802_update_state(struct bd2802_led *led, enum led_ids id,
 		return;
 
 	if (bd2802_is_all_off(led) && !led->adf_on) {
-		gpio_set_value(led->pdata->reset_gpio, 0);
+		gpiod_set_value(led->reset, 1);
 		return;
 	}
 
@@ -226,7 +227,7 @@ static void bd2802_configure(struct bd2802_led *led)
 
 static void bd2802_reset_cancel(struct bd2802_led *led)
 {
-	gpio_set_value(led->pdata->reset_gpio, 1);
+	gpiod_set_value(led->reset, 0);
 	udelay(100);
 	bd2802_configure(led);
 }
@@ -420,7 +421,7 @@ static void bd2802_disable_adv_conf(struct bd2802_led *led)
 						bd2802_addr_attributes[i]);
 
 	if (bd2802_is_all_off(led))
-		gpio_set_value(led->pdata->reset_gpio, 0);
+		gpiod_set_value(led->reset, 1);
 
 	led->adf_on = 0;
 }
@@ -670,8 +671,16 @@ static int bd2802_probe(struct i2c_client *client,
 	pdata = led->pdata = dev_get_platdata(&client->dev);
 	i2c_set_clientdata(client, led);
 
-	/* Configure RESET GPIO (L: RESET, H: RESET cancel) */
-	gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "RGB_RESETB");
+	/*
+	 * Configure RESET GPIO (L: RESET, H: RESET cancel)
+	 *
+	 * We request the reset GPIO as OUT_LOW which means de-asserted,
+	 * board files specifying this GPIO line in a machine descriptor
+	 * table should take care to specify GPIO_ACTIVE_LOW for this line.
+	 */
+	led->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(led->reset))
+		return PTR_ERR(led->reset);
 
 	/* Tacss = min 0.1ms */
 	udelay(100);
@@ -685,7 +694,7 @@ static int bd2802_probe(struct i2c_client *client,
 		dev_info(&client->dev, "return 0x%02x\n", ret);
 
 	/* To save the power, reset BD2802 after detecting */
-	gpio_set_value(led->pdata->reset_gpio, 0);
+	gpiod_set_value(led->reset, 1);
 
 	/* Default attributes */
 	led->wave_pattern = BD2802_PATTERN_HALF;
@@ -720,7 +729,7 @@ static int bd2802_remove(struct i2c_client *client)
 	struct bd2802_led *led = i2c_get_clientdata(client);
 	int i;
 
-	gpio_set_value(led->pdata->reset_gpio, 0);
+	gpiod_set_value(led->reset, 1);
 	bd2802_unregister_led_classdev(led);
 	if (led->adf_on)
 		bd2802_disable_adv_conf(led);
@@ -750,7 +759,7 @@ static int bd2802_suspend(struct device *dev)
 	struct i2c_client *client = to_i2c_client(dev);
 	struct bd2802_led *led = i2c_get_clientdata(client);
 
-	gpio_set_value(led->pdata->reset_gpio, 0);
+	gpiod_set_value(led->reset, 1);
 
 	return 0;
 }
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 491268b..188a57d 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -578,6 +578,12 @@ static int lm3532_parse_node(struct lm3532_data *priv)
 		priv->runtime_ramp_down = lm3532_get_ramp_index(ramp_time);
 
 	device_for_each_child_node(priv->dev, child) {
+		struct led_init_data idata = {
+			.fwnode = child,
+			.default_label = ":",
+			.devicename = priv->client->name,
+		};
+
 		led = &priv->leds[i];
 
 		ret = fwnode_property_read_u32(child, "reg", &control_bank);
@@ -652,7 +658,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
 		led->led_dev.name = led->label;
 		led->led_dev.brightness_set_blocking = lm3532_brightness_set;
 
-		ret = devm_led_classdev_register(priv->dev, &led->led_dev);
+		ret = devm_led_classdev_register_ext(priv->dev, &led->led_dev, &idata);
 		if (ret) {
 			dev_err(&priv->client->dev, "led register err: %d\n",
 				ret);
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 4805754..4232906 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -106,7 +106,7 @@ static int lm3642_control(struct lm3642_chip_data *chip,
 	ret = regmap_read(chip->regmap, REG_FLAG, &chip->last_flag);
 	if (ret < 0) {
 		dev_err(chip->dev, "Failed to read REG_FLAG Register\n");
-		goto out;
+		return ret;
 	}
 
 	if (chip->last_flag)
@@ -146,11 +146,11 @@ static int lm3642_control(struct lm3642_chip_data *chip,
 		break;
 
 	default:
-		return ret;
+		return -EINVAL;
 	}
 	if (ret < 0) {
 		dev_err(chip->dev, "Failed to write REG_I_CTRL Register\n");
-		goto out;
+		return ret;
 	}
 
 	if (chip->tx_pin)
@@ -159,13 +159,12 @@ static int lm3642_control(struct lm3642_chip_data *chip,
 	ret = regmap_update_bits(chip->regmap, REG_ENABLE,
 				 MODE_BITS_MASK << MODE_BITS_SHIFT,
 				 opmode << MODE_BITS_SHIFT);
-out:
 	return ret;
 }
 
 /* torch */
 
-/* torch pin config for lm3642*/
+/* torch pin config for lm3642 */
 static ssize_t lm3642_torch_pin_store(struct device *dev,
 				      struct device_attribute *attr,
 				      const char *buf, size_t size)
@@ -178,7 +177,7 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
 
 	ret = kstrtouint(buf, 10, &state);
 	if (ret)
-		goto out_strtoint;
+		return ret;
 	if (state != 0)
 		state = 0x01 << TORCH_PIN_EN_SHIFT;
 
@@ -186,16 +185,12 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
 	ret = regmap_update_bits(chip->regmap, REG_ENABLE,
 				 TORCH_PIN_EN_MASK << TORCH_PIN_EN_SHIFT,
 				 state);
-	if (ret < 0)
-		goto out;
+	if (ret < 0) {
+		dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
+		return ret;
+	}
 
 	return size;
-out:
-	dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
-	return ret;
-out_strtoint:
-	dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
-	return ret;
 }
 
 static DEVICE_ATTR(torch_pin, S_IWUSR, NULL, lm3642_torch_pin_store);
@@ -229,7 +224,7 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
 
 	ret = kstrtouint(buf, 10, &state);
 	if (ret)
-		goto out_strtoint;
+		return ret;
 	if (state != 0)
 		state = 0x01 << STROBE_PIN_EN_SHIFT;
 
@@ -237,16 +232,12 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
 	ret = regmap_update_bits(chip->regmap, REG_ENABLE,
 				 STROBE_PIN_EN_MASK << STROBE_PIN_EN_SHIFT,
 				 state);
-	if (ret < 0)
-		goto out;
+	if (ret < 0) {
+		dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
+		return ret;
+	}
 
 	return size;
-out:
-	dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
-	return ret;
-out_strtoint:
-	dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
-	return ret;
 }
 
 static DEVICE_ATTR(strobe_pin, S_IWUSR, NULL, lm3642_strobe_pin_store);
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 8b40810..28a51ae 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -6,6 +6,7 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/leds.h>
+#include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
@@ -114,6 +115,9 @@ struct lm3692x_led {
 	struct regulator *regulator;
 	int led_enable;
 	int model_id;
+
+	u8 boost_ctrl, brightness_ctrl;
+	bool enabled;
 };
 
 static const struct reg_default lm3692x_reg_defs[] = {
@@ -162,44 +166,14 @@ static int lm3692x_fault_check(struct lm3692x_led *led)
 	return read_buf;
 }
 
-static int lm3692x_brightness_set(struct led_classdev *led_cdev,
-				enum led_brightness brt_val)
-{
-	struct lm3692x_led *led =
-			container_of(led_cdev, struct lm3692x_led, led_dev);
-	int ret;
-	int led_brightness_lsb = (brt_val >> 5);
-
-	mutex_lock(&led->lock);
-
-	ret = lm3692x_fault_check(led);
-	if (ret) {
-		dev_err(&led->client->dev, "Cannot read/clear faults: %d\n",
-			ret);
-		goto out;
-	}
-
-	ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
-	if (ret) {
-		dev_err(&led->client->dev, "Cannot write MSB: %d\n", ret);
-		goto out;
-	}
-
-	ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
-	if (ret) {
-		dev_err(&led->client->dev, "Cannot write LSB: %d\n", ret);
-		goto out;
-	}
-out:
-	mutex_unlock(&led->lock);
-	return ret;
-}
-
-static int lm3692x_init(struct lm3692x_led *led)
+static int lm3692x_leds_enable(struct lm3692x_led *led)
 {
 	int enable_state;
 	int ret, reg_ret;
 
+	if (led->enabled)
+		return 0;
+
 	if (led->regulator) {
 		ret = regulator_enable(led->regulator);
 		if (ret) {
@@ -249,10 +223,7 @@ static int lm3692x_init(struct lm3692x_led *led)
 	if (ret)
 		goto out;
 
-	ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL,
-			LM3692X_BOOST_SW_1MHZ |
-			LM3692X_BOOST_SW_NO_SHIFT |
-			LM3692X_OCP_PROT_1_5A);
+	ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL, led->boost_ctrl);
 	if (ret)
 		goto out;
 
@@ -305,6 +276,7 @@ static int lm3692x_init(struct lm3692x_led *led)
 	ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_ENABLE_MASK,
 				 enable_state | LM3692X_DEVICE_EN);
 
+	led->enabled = true;
 	return ret;
 out:
 	dev_err(&led->client->dev, "Fail writing initialization values\n");
@@ -322,10 +294,92 @@ static int lm3692x_init(struct lm3692x_led *led)
 	return ret;
 }
 
+static int lm3692x_leds_disable(struct lm3692x_led *led)
+{
+	int ret;
+
+	if (!led->enabled)
+		return 0;
+
+	ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
+	if (ret) {
+		dev_err(&led->client->dev, "Failed to disable regulator: %d\n",
+			ret);
+		return ret;
+	}
+
+	if (led->enable_gpio)
+		gpiod_direction_output(led->enable_gpio, 0);
+
+	if (led->regulator) {
+		ret = regulator_disable(led->regulator);
+		if (ret)
+			dev_err(&led->client->dev,
+				"Failed to disable regulator: %d\n", ret);
+	}
+
+	led->enabled = false;
+	return ret;
+}
+
+static int lm3692x_brightness_set(struct led_classdev *led_cdev,
+				enum led_brightness brt_val)
+{
+	struct lm3692x_led *led =
+			container_of(led_cdev, struct lm3692x_led, led_dev);
+	int ret;
+	int led_brightness_lsb = (brt_val >> 5);
+
+	mutex_lock(&led->lock);
+
+	if (brt_val == 0) {
+		ret = lm3692x_leds_disable(led);
+		goto out;
+	} else {
+		lm3692x_leds_enable(led);
+	}
+
+	ret = lm3692x_fault_check(led);
+	if (ret) {
+		dev_err(&led->client->dev, "Cannot read/clear faults: %d\n",
+			ret);
+		goto out;
+	}
+
+	ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
+	if (ret) {
+		dev_err(&led->client->dev, "Cannot write MSB: %d\n", ret);
+		goto out;
+	}
+
+	ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
+	if (ret) {
+		dev_err(&led->client->dev, "Cannot write LSB: %d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&led->lock);
+	return ret;
+}
+
+static enum led_brightness lm3692x_max_brightness(struct lm3692x_led *led,
+						  u32 max_cur)
+{
+	u32 max_code;
+
+	/* see p.12 of LM36922 data sheet for brightness formula */
+	max_code = ((max_cur * 1000) - 37806) / 12195;
+	if (max_code > 0x7FF)
+		max_code = 0x7FF;
+
+	return max_code >> 3;
+}
+
 static int lm3692x_probe_dt(struct lm3692x_led *led)
 {
 	struct fwnode_handle *child = NULL;
 	struct led_init_data init_data = {};
+	u32 ovp, max_cur;
 	int ret;
 
 	led->enable_gpio = devm_gpiod_get_optional(&led->client->dev,
@@ -350,6 +404,32 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
 		led->regulator = NULL;
 	}
 
+	led->boost_ctrl = LM3692X_BOOST_SW_1MHZ |
+		LM3692X_BOOST_SW_NO_SHIFT |
+		LM3692X_OCP_PROT_1_5A;
+	ret = device_property_read_u32(&led->client->dev,
+				       "ti,ovp-microvolt", &ovp);
+	if (ret) {
+		led->boost_ctrl |= LM3692X_OVP_29V;
+	} else {
+		switch (ovp) {
+		case 17000000:
+			break;
+		case 21000000:
+			led->boost_ctrl |= LM3692X_OVP_21V;
+			break;
+		case 25000000:
+			led->boost_ctrl |= LM3692X_OVP_25V;
+			break;
+		case 29000000:
+			led->boost_ctrl |= LM3692X_OVP_29V;
+			break;
+		default:
+			dev_err(&led->client->dev, "Invalid OVP %d\n", ovp);
+			return -EINVAL;
+		}
+	}
+
 	child = device_get_next_child_node(&led->client->dev, child);
 	if (!child) {
 		dev_err(&led->client->dev, "No LED Child node\n");
@@ -365,6 +445,10 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
 		return ret;
 	}
 
+	ret = fwnode_property_read_u32(child, "led-max-microamp", &max_cur);
+	led->led_dev.max_brightness = ret ? LED_FULL :
+		lm3692x_max_brightness(led, max_cur);
+
 	init_data.fwnode = child;
 	init_data.devicename = led->client->name;
 	init_data.default_label = ":";
@@ -407,7 +491,7 @@ static int lm3692x_probe(struct i2c_client *client,
 	if (ret)
 		return ret;
 
-	ret = lm3692x_init(led);
+	ret = lm3692x_leds_enable(led);
 	if (ret)
 		return ret;
 
@@ -419,23 +503,9 @@ static int lm3692x_remove(struct i2c_client *client)
 	struct lm3692x_led *led = i2c_get_clientdata(client);
 	int ret;
 
-	ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
-	if (ret) {
-		dev_err(&led->client->dev, "Failed to disable regulator: %d\n",
-			ret);
+	ret = lm3692x_leds_disable(led);
+	if (ret)
 		return ret;
-	}
-
-	if (led->enable_gpio)
-		gpiod_direction_output(led->enable_gpio, 0);
-
-	if (led->regulator) {
-		ret = regulator_disable(led->regulator);
-		if (ret)
-			dev_err(&led->client->dev,
-				"Failed to disable regulator: %d\n", ret);
-	}
-
 	mutex_destroy(&led->lock);
 
 	return 0;
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 4afc317..66cdc00 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -40,6 +40,8 @@
 #define PCA963X_LED_PWM		0x2	/* Controlled through PWM */
 #define PCA963X_LED_GRP_PWM	0x3	/* Controlled through PWM/GRPPWM */
 
+#define PCA963X_MODE2_OUTDRV	0x04	/* Open-drain or totem pole */
+#define PCA963X_MODE2_INVRT	0x10	/* Normal or inverted direction */
 #define PCA963X_MODE2_DMBLNK	0x20	/* Enable blinking */
 
 #define PCA963X_MODE1		0x00
@@ -438,12 +440,12 @@ static int pca963x_probe(struct i2c_client *client,
 						    PCA963X_MODE2);
 		/* Configure output: open-drain or totem pole (push-pull) */
 		if (pdata->outdrv == PCA963X_OPEN_DRAIN)
-			mode2 |= 0x01;
+			mode2 &= ~PCA963X_MODE2_OUTDRV;
 		else
-			mode2 |= 0x05;
+			mode2 |= PCA963X_MODE2_OUTDRV;
 		/* Configure direction: normal or inverted */
 		if (pdata->dir == PCA963X_INVERTED)
-			mode2 |= 0x10;
+			mode2 |= PCA963X_MODE2_INVRT;
 		i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
 					  mode2);
 	}
diff --git a/drivers/leds/leds-tps6105x.c b/drivers/leds/leds-tps6105x.c
new file mode 100644
index 0000000..09fd88a
--- /dev/null
+++ b/drivers/leds/leds-tps6105x.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 Sven Van Asbroeck
+ */
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/tps6105x.h>
+#include <linux/regmap.h>
+
+struct tps6105x_priv {
+	struct regmap *regmap;
+	struct led_classdev cdev;
+	struct fwnode_handle *fwnode;
+};
+
+static void tps6105x_handle_put(void *data)
+{
+	struct tps6105x_priv *priv = data;
+
+	fwnode_handle_put(priv->fwnode);
+}
+
+static int tps6105x_brightness_set(struct led_classdev *cdev,
+				  enum led_brightness brightness)
+{
+	struct tps6105x_priv *priv = container_of(cdev, struct tps6105x_priv,
+							cdev);
+
+	return regmap_update_bits(priv->regmap, TPS6105X_REG_0,
+				  TPS6105X_REG0_TORCHC_MASK,
+				  brightness << TPS6105X_REG0_TORCHC_SHIFT);
+}
+
+static int tps6105x_led_probe(struct platform_device *pdev)
+{
+	struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
+	struct tps6105x_platform_data *pdata = tps6105x->pdata;
+	struct led_init_data init_data = { };
+	struct tps6105x_priv *priv;
+	int ret;
+
+	/* This instance is not set for torch mode so bail out */
+	if (pdata->mode != TPS6105X_MODE_TORCH) {
+		dev_info(&pdev->dev,
+			"chip not in torch mode, exit probe");
+		return -EINVAL;
+	}
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	/* fwnode/devicetree is optional. NULL is allowed for priv->fwnode */
+	priv->fwnode = device_get_next_child_node(pdev->dev.parent, NULL);
+	ret = devm_add_action_or_reset(&pdev->dev, tps6105x_handle_put, priv);
+	if (ret)
+		return ret;
+	priv->regmap = tps6105x->regmap;
+	priv->cdev.brightness_set_blocking = tps6105x_brightness_set;
+	priv->cdev.max_brightness = 7;
+	init_data.devicename = "tps6105x";
+	init_data.default_label = ":torch";
+	init_data.fwnode = priv->fwnode;
+
+	ret = regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
+				 TPS6105X_REG0_MODE_MASK |
+					TPS6105X_REG0_TORCHC_MASK,
+				 TPS6105X_REG0_MODE_TORCH <<
+					TPS6105X_REG0_MODE_SHIFT);
+	if (ret)
+		return ret;
+
+	return devm_led_classdev_register_ext(&pdev->dev, &priv->cdev,
+					      &init_data);
+}
+
+static struct platform_driver led_driver = {
+	.probe = tps6105x_led_probe,
+	.driver = {
+		.name = "tps6105x-leds",
+	},
+};
+
+module_platform_driver(led_driver);
+
+MODULE_DESCRIPTION("TPS6105x LED driver");
+MODULE_AUTHOR("Sven Van Asbroeck <TheSven73@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 574e122..cbd46c1 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -178,7 +178,7 @@
 	depends on I2C && I2C_POWERMAC && PPC_PMAC && !PPC_PMAC64
 	help
 	  This driver provides some thermostat and fan control for the
-          iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
+	  iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
 	  better fan behaviour by default, and some manual control.
 
 config WINDFARM
@@ -214,7 +214,7 @@
 	select I2C_POWERMAC
 	help
 	  This driver provides thermal control for the PowerMac9,1
-          which is the recent (SMU based) single CPU desktop G5
+	  which is the recent (SMU based) single CPU desktop G5
 
 config WINDFARM_PM112
 	tristate "Support for thermal management on PowerMac11,2"
@@ -242,7 +242,7 @@
 	depends on PPC_PMAC
 	help
 	  This driver provides some support to control the front panel
-          blue LEDs "vu-meter" of the XServer macs.
+	  blue LEDs "vu-meter" of the XServer macs.
 
 config SENSORS_AMS
 	tristate "Apple Motion Sensor driver"
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 21d532a..d38fb78 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -212,7 +212,7 @@ static int pmu_info_proc_show(struct seq_file *m, void *v);
 static int pmu_irqstats_proc_show(struct seq_file *m, void *v);
 static int pmu_battery_proc_show(struct seq_file *m, void *v);
 static void pmu_pass_intr(unsigned char *data, int len);
-static const struct file_operations pmu_options_proc_fops;
+static const struct proc_ops pmu_options_proc_ops;
 
 #ifdef CONFIG_ADB
 const struct adb_driver via_pmu_driver = {
@@ -573,7 +573,7 @@ static int __init via_pmu_dev_init(void)
 		proc_pmu_irqstats = proc_create_single("interrupts", 0,
 				proc_pmu_root, pmu_irqstats_proc_show);
 		proc_pmu_options = proc_create("options", 0600, proc_pmu_root,
-						&pmu_options_proc_fops);
+						&pmu_options_proc_ops);
 	}
 	return 0;
 }
@@ -974,13 +974,12 @@ static ssize_t pmu_options_proc_write(struct file *file,
 	return fcount;
 }
 
-static const struct file_operations pmu_options_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= pmu_options_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= pmu_options_proc_write,
+static const struct proc_ops pmu_options_proc_ops = {
+	.proc_open	= pmu_options_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= pmu_options_proc_write,
 };
 
 #ifdef CONFIG_ADB
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
index 19f0867..02b7b28e 100644
--- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -143,7 +143,6 @@ static const struct mbox_chan_ops a37xx_mbox_ops = {
 static int armada_37xx_mbox_probe(struct platform_device *pdev)
 {
 	struct a37xx_mbox *mbox;
-	struct resource *regs;
 	struct mbox_chan *chans;
 	int ret;
 
@@ -156,9 +155,7 @@ static int armada_37xx_mbox_probe(struct platform_device *pdev)
 	if (!chans)
 		return -ENOMEM;
 
-	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	mbox->base = devm_ioremap_resource(&pdev->dev, regs);
+	mbox->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(mbox->base)) {
 		dev_err(&pdev->dev, "ioremap failed\n");
 		return PTR_ERR(mbox->base);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index adf26a2..74a9849 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -330,6 +330,9 @@ struct cached_dev {
 	 */
 	atomic_t		has_dirty;
 
+#define BCH_CACHE_READA_ALL		0
+#define BCH_CACHE_READA_META_ONLY	1
+	unsigned int		cache_readahead_policy;
 	struct bch_ratelimit	writeback_rate;
 	struct delayed_work	writeback_rate_update;
 
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index c71365e..a50dcfd 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -397,7 +397,8 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
 
 /* Bkey utility code */
 
-#define bset_bkey_last(i)	bkey_idx((struct bkey *) (i)->d, (i)->keys)
+#define bset_bkey_last(i)	bkey_idx((struct bkey *) (i)->d, \
+					 (unsigned int)(i)->keys)
 
 static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
 {
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 33ddc52..6730820 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -422,7 +422,8 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
 static void btree_flush_write(struct cache_set *c)
 {
 	struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
-	unsigned int i, nr, ref_nr;
+	unsigned int i, nr;
+	int ref_nr;
 	atomic_t *fifo_front_p, *now_fifo_front_p;
 	size_t mask;
 
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 73478a9..820d840 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -379,13 +379,20 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 		goto skip;
 
 	/*
-	 * Flag for bypass if the IO is for read-ahead or background,
-	 * unless the read-ahead request is for metadata
+	 * If the bio is for read-ahead or background IO, bypass it or
+	 * not depends on the following situations,
+	 * - If the IO is for meta data, always cache it and no bypass
+	 * - If the IO is not meta data, check dc->cache_reada_policy,
+	 *      BCH_CACHE_READA_ALL: cache it and not bypass
+	 *      BCH_CACHE_READA_META_ONLY: not cache it and bypass
+	 * That is, read-ahead request for metadata always get cached
 	 * (eg, for gfs2 or xfs).
 	 */
-	if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
-	    !(bio->bi_opf & (REQ_META|REQ_PRIO)))
-		goto skip;
+	if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
+		if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
+		    (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
+			goto skip;
+	}
 
 	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
 	    bio_sectors(bio) & (c->sb.block_size - 1)) {
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index ba1c937..503aafe 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -109,9 +109,13 @@ int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
 
 void bch_cache_accounting_clear(struct cache_accounting *acc)
 {
-	memset(&acc->total.cache_hits,
-	       0,
-	       sizeof(struct cache_stats));
+	acc->total.cache_hits = 0;
+	acc->total.cache_misses = 0;
+	acc->total.cache_bypass_hits = 0;
+	acc->total.cache_bypass_misses = 0;
+	acc->total.cache_readaheads = 0;
+	acc->total.cache_miss_collisions = 0;
+	acc->total.sectors_bypassed = 0;
 }
 
 void bch_cache_accounting_destroy(struct cache_accounting *acc)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3dea1d5..2749daf 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -609,12 +609,13 @@ int bch_prio_write(struct cache *ca, bool wait)
 	return 0;
 }
 
-static void prio_read(struct cache *ca, uint64_t bucket)
+static int prio_read(struct cache *ca, uint64_t bucket)
 {
 	struct prio_set *p = ca->disk_buckets;
 	struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
 	struct bucket *b;
 	unsigned int bucket_nr = 0;
+	int ret = -EIO;
 
 	for (b = ca->buckets;
 	     b < ca->buckets + ca->sb.nbuckets;
@@ -627,11 +628,15 @@ static void prio_read(struct cache *ca, uint64_t bucket)
 			prio_io(ca, bucket, REQ_OP_READ, 0);
 
 			if (p->csum !=
-			    bch_crc64(&p->magic, bucket_bytes(ca) - 8))
+			    bch_crc64(&p->magic, bucket_bytes(ca) - 8)) {
 				pr_warn("bad csum reading priorities");
+				goto out;
+			}
 
-			if (p->magic != pset_magic(&ca->sb))
+			if (p->magic != pset_magic(&ca->sb)) {
 				pr_warn("bad magic reading priorities");
+				goto out;
+			}
 
 			bucket = p->next_bucket;
 			d = p->data;
@@ -640,6 +645,10 @@ static void prio_read(struct cache *ca, uint64_t bucket)
 		b->prio = le16_to_cpu(d->prio);
 		b->gen = b->last_gc = d->gen;
 	}
+
+	ret = 0;
+out:
+	return ret;
 }
 
 /* Bcache device */
@@ -1873,8 +1882,10 @@ static int run_cache_set(struct cache_set *c)
 		j = &list_entry(journal.prev, struct journal_replay, list)->j;
 
 		err = "IO error reading priorities";
-		for_each_cache(ca, c, i)
-			prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
+		for_each_cache(ca, c, i) {
+			if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
+				goto err;
+		}
 
 		/*
 		 * If prio_read() fails it'll call cache_set_error and we'll
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 733e2dd..3470fae 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -27,6 +27,12 @@ static const char * const bch_cache_modes[] = {
 	NULL
 };
 
+static const char * const bch_reada_cache_policies[] = {
+	"all",
+	"meta-only",
+	NULL
+};
+
 /* Default is 0 ("auto") */
 static const char * const bch_stop_on_failure_modes[] = {
 	"auto",
@@ -100,6 +106,7 @@ rw_attribute(congested_write_threshold_us);
 rw_attribute(sequential_cutoff);
 rw_attribute(data_csum);
 rw_attribute(cache_mode);
+rw_attribute(readahead_cache_policy);
 rw_attribute(stop_when_cache_set_failed);
 rw_attribute(writeback_metadata);
 rw_attribute(writeback_running);
@@ -168,6 +175,11 @@ SHOW(__bch_cached_dev)
 					       bch_cache_modes,
 					       BDEV_CACHE_MODE(&dc->sb));
 
+	if (attr == &sysfs_readahead_cache_policy)
+		return bch_snprint_string_list(buf, PAGE_SIZE,
+					      bch_reada_cache_policies,
+					      dc->cache_readahead_policy);
+
 	if (attr == &sysfs_stop_when_cache_set_failed)
 		return bch_snprint_string_list(buf, PAGE_SIZE,
 					       bch_stop_on_failure_modes,
@@ -353,6 +365,15 @@ STORE(__cached_dev)
 		}
 	}
 
+	if (attr == &sysfs_readahead_cache_policy) {
+		v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
+		if (v < 0)
+			return v;
+
+		if ((unsigned int) v != dc->cache_readahead_policy)
+			dc->cache_readahead_policy = v;
+	}
+
 	if (attr == &sysfs_stop_when_cache_set_failed) {
 		v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
 		if (v < 0)
@@ -467,6 +488,7 @@ static struct attribute *bch_cached_dev_files[] = {
 	&sysfs_data_csum,
 #endif
 	&sysfs_cache_mode,
+	&sysfs_readahead_cache_policy,
 	&sysfs_stop_when_cache_set_failed,
 	&sysfs_writeback_metadata,
 	&sysfs_writeback_running,
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index 8ee019e..9dec3b6 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -324,7 +324,7 @@ static bool __unlock(struct dm_bio_prison_v2 *prison,
 	bio_list_init(&cell->bios);
 
 	if (cell->shared_count) {
-		cell->exclusive_lock = 0;
+		cell->exclusive_lock = false;
 		return false;
 	}
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index eb9782f..c6a5298 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,8 +1,8 @@
 /*
  * Copyright (C) 2003 Jana Saout <jana@saout.de>
  * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com>
+ * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
  *
  * This file is released under the GPL.
  */
@@ -115,6 +115,11 @@ struct iv_tcw_private {
 	u8 *whitening;
 };
 
+#define ELEPHANT_MAX_KEY_SIZE 32
+struct iv_elephant_private {
+	struct crypto_skcipher *tfm;
+};
+
 /*
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
@@ -125,6 +130,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
 enum cipher_flags {
 	CRYPT_MODE_INTEGRITY_AEAD,	/* Use authenticated mode for cihper */
 	CRYPT_IV_LARGE_SECTORS,		/* Calculate IV from sector_size, not 512B sectors */
+	CRYPT_ENCRYPT_PREPROCESS,	/* Must preprocess data for encryption (elephant) */
 };
 
 /*
@@ -152,6 +158,7 @@ struct crypt_config {
 		struct iv_benbi_private benbi;
 		struct iv_lmk_private lmk;
 		struct iv_tcw_private tcw;
+		struct iv_elephant_private elephant;
 	} iv_gen_private;
 	u64 iv_offset;
 	unsigned int iv_size;
@@ -285,6 +292,11 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
  * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
  *        The IV is encrypted little-endian byte-offset (with the same key
  *        and cipher as the volume).
+ *
+ * elephant: The extended version of eboiv with additional Elephant diffuser
+ *           used with Bitlocker CBC mode.
+ *           This mode was used in older Windows systems
+ *           http://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
  */
 
 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
@@ -331,8 +343,14 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
 			      const char *opts)
 {
-	unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
-	int log = ilog2(bs);
+	unsigned bs;
+	int log;
+
+	if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
+		bs = crypto_aead_blocksize(any_tfm_aead(cc));
+	else
+		bs = crypto_skcipher_blocksize(any_tfm(cc));
+	log = ilog2(bs);
 
 	/* we need to calculate how far we must shift the sector count
 	 * to get the cipher block count, we use this shift in _gen */
@@ -717,7 +735,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
 	struct crypto_wait wait;
 	int err;
 
-	req = skcipher_request_alloc(any_tfm(cc), GFP_KERNEL | GFP_NOFS);
+	req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
 	if (!req)
 		return -ENOMEM;
 
@@ -734,6 +752,290 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
 	return err;
 }
 
+static void crypt_iv_elephant_dtr(struct crypt_config *cc)
+{
+	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+
+	crypto_free_skcipher(elephant->tfm);
+	elephant->tfm = NULL;
+}
+
+static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
+			    const char *opts)
+{
+	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+	int r;
+
+	elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+	if (IS_ERR(elephant->tfm)) {
+		r = PTR_ERR(elephant->tfm);
+		elephant->tfm = NULL;
+		return r;
+	}
+
+	r = crypt_iv_eboiv_ctr(cc, ti, NULL);
+	if (r)
+		crypt_iv_elephant_dtr(cc);
+	return r;
+}
+
+static void diffuser_disk_to_cpu(u32 *d, size_t n)
+{
+#ifndef __LITTLE_ENDIAN
+	int i;
+
+	for (i = 0; i < n; i++)
+		d[i] = le32_to_cpu((__le32)d[i]);
+#endif
+}
+
+static void diffuser_cpu_to_disk(__le32 *d, size_t n)
+{
+#ifndef __LITTLE_ENDIAN
+	int i;
+
+	for (i = 0; i < n; i++)
+		d[i] = cpu_to_le32((u32)d[i]);
+#endif
+}
+
+static void diffuser_a_decrypt(u32 *d, size_t n)
+{
+	int i, i1, i2, i3;
+
+	for (i = 0; i < 5; i++) {
+		i1 = 0;
+		i2 = n - 2;
+		i3 = n - 5;
+
+		while (i1 < (n - 1)) {
+			d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
+			i1++; i2++; i3++;
+
+			if (i3 >= n)
+				i3 -= n;
+
+			d[i1] += d[i2] ^ d[i3];
+			i1++; i2++; i3++;
+
+			if (i2 >= n)
+				i2 -= n;
+
+			d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
+			i1++; i2++; i3++;
+
+			d[i1] += d[i2] ^ d[i3];
+			i1++; i2++; i3++;
+		}
+	}
+}
+
+static void diffuser_a_encrypt(u32 *d, size_t n)
+{
+	int i, i1, i2, i3;
+
+	for (i = 0; i < 5; i++) {
+		i1 = n - 1;
+		i2 = n - 2 - 1;
+		i3 = n - 5 - 1;
+
+		while (i1 > 0) {
+			d[i1] -= d[i2] ^ d[i3];
+			i1--; i2--; i3--;
+
+			d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
+			i1--; i2--; i3--;
+
+			if (i2 < 0)
+				i2 += n;
+
+			d[i1] -= d[i2] ^ d[i3];
+			i1--; i2--; i3--;
+
+			if (i3 < 0)
+				i3 += n;
+
+			d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
+			i1--; i2--; i3--;
+		}
+	}
+}
+
+static void diffuser_b_decrypt(u32 *d, size_t n)
+{
+	int i, i1, i2, i3;
+
+	for (i = 0; i < 3; i++) {
+		i1 = 0;
+		i2 = 2;
+		i3 = 5;
+
+		while (i1 < (n - 1)) {
+			d[i1] += d[i2] ^ d[i3];
+			i1++; i2++; i3++;
+
+			d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
+			i1++; i2++; i3++;
+
+			if (i2 >= n)
+				i2 -= n;
+
+			d[i1] += d[i2] ^ d[i3];
+			i1++; i2++; i3++;
+
+			if (i3 >= n)
+				i3 -= n;
+
+			d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
+			i1++; i2++; i3++;
+		}
+	}
+}
+
+static void diffuser_b_encrypt(u32 *d, size_t n)
+{
+	int i, i1, i2, i3;
+
+	for (i = 0; i < 3; i++) {
+		i1 = n - 1;
+		i2 = 2 - 1;
+		i3 = 5 - 1;
+
+		while (i1 > 0) {
+			d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
+			i1--; i2--; i3--;
+
+			if (i3 < 0)
+				i3 += n;
+
+			d[i1] -= d[i2] ^ d[i3];
+			i1--; i2--; i3--;
+
+			if (i2 < 0)
+				i2 += n;
+
+			d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
+			i1--; i2--; i3--;
+
+			d[i1] -= d[i2] ^ d[i3];
+			i1--; i2--; i3--;
+		}
+	}
+}
+
+static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
+{
+	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+	u8 *es, *ks, *data, *data2, *data_offset;
+	struct skcipher_request *req;
+	struct scatterlist *sg, *sg2, src, dst;
+	struct crypto_wait wait;
+	int i, r;
+
+	req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
+	es = kzalloc(16, GFP_NOIO); /* Key for AES */
+	ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
+
+	if (!req || !es || !ks) {
+		r = -ENOMEM;
+		goto out;
+	}
+
+	*(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
+
+	/* E(Ks, e(s)) */
+	sg_init_one(&src, es, 16);
+	sg_init_one(&dst, ks, 16);
+	skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
+	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+	r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+	if (r)
+		goto out;
+
+	/* E(Ks, e'(s)) */
+	es[15] = 0x80;
+	sg_init_one(&dst, &ks[16], 16);
+	r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+	if (r)
+		goto out;
+
+	sg = crypt_get_sg_data(cc, dmreq->sg_out);
+	data = kmap_atomic(sg_page(sg));
+	data_offset = data + sg->offset;
+
+	/* Cannot modify original bio, copy to sg_out and apply Elephant to it */
+	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+		sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
+		data2 = kmap_atomic(sg_page(sg2));
+		memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
+		kunmap_atomic(data2);
+	}
+
+	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
+		diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
+		diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+		diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+		diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+	}
+
+	for (i = 0; i < (cc->sector_size / 32); i++)
+		crypto_xor(data_offset + i * 32, ks, 32);
+
+	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+		diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
+		diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+		diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+		diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+	}
+
+	kunmap_atomic(data);
+out:
+	kzfree(ks);
+	kzfree(es);
+	skcipher_request_free(req);
+	return r;
+}
+
+static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
+			    struct dm_crypt_request *dmreq)
+{
+	int r;
+
+	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+		r = crypt_iv_elephant(cc, dmreq);
+		if (r)
+			return r;
+	}
+
+	return crypt_iv_eboiv_gen(cc, iv, dmreq);
+}
+
+static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
+				  struct dm_crypt_request *dmreq)
+{
+	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
+		return crypt_iv_elephant(cc, dmreq);
+
+	return 0;
+}
+
+static int crypt_iv_elephant_init(struct crypt_config *cc)
+{
+	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+	int key_offset = cc->key_size - cc->key_extra_size;
+
+	return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
+}
+
+static int crypt_iv_elephant_wipe(struct crypt_config *cc)
+{
+	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+	u8 key[ELEPHANT_MAX_KEY_SIZE];
+
+	memset(key, 0, cc->key_extra_size);
+	return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
+}
+
 static const struct crypt_iv_operations crypt_iv_plain_ops = {
 	.generator = crypt_iv_plain_gen
 };
@@ -787,6 +1089,15 @@ static struct crypt_iv_operations crypt_iv_eboiv_ops = {
 	.generator = crypt_iv_eboiv_gen
 };
 
+static struct crypt_iv_operations crypt_iv_elephant_ops = {
+	.ctr	   = crypt_iv_elephant_ctr,
+	.dtr	   = crypt_iv_elephant_dtr,
+	.init	   = crypt_iv_elephant_init,
+	.wipe	   = crypt_iv_elephant_wipe,
+	.generator = crypt_iv_elephant_gen,
+	.post	   = crypt_iv_elephant_post
+};
+
 /*
  * Integrity extensions
  */
@@ -1103,6 +1414,9 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
 			r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
 			if (r < 0)
 				return r;
+			/* Data can be already preprocessed in generator */
+			if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
+				sg_in = sg_out;
 			/* Store generated IV in integrity metadata */
 			if (cc->integrity_iv_size)
 				memcpy(tag_iv, org_iv, cc->integrity_iv_size);
@@ -2191,7 +2505,14 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
 		cc->iv_gen_ops = &crypt_iv_null_ops;
 	else if (strcmp(ivmode, "eboiv") == 0)
 		cc->iv_gen_ops = &crypt_iv_eboiv_ops;
-	else if (strcmp(ivmode, "lmk") == 0) {
+	else if (strcmp(ivmode, "elephant") == 0) {
+		cc->iv_gen_ops = &crypt_iv_elephant_ops;
+		cc->key_parts = 2;
+		cc->key_extra_size = cc->key_size / 2;
+		if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
+			return -EINVAL;
+		set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
+	} else if (strcmp(ivmode, "lmk") == 0) {
 		cc->iv_gen_ops = &crypt_iv_lmk_ops;
 		/*
 		 * Version 2 and 3 is recognised according
@@ -2959,7 +3280,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 19, 0},
+	.version = {1, 20, 0},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index eb37584..ff03b90 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -207,16 +207,16 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
 			  bool fail_read_on_bb)
 {
 	unsigned long flags;
-	int ret = DM_MAPIO_REMAPPED;
+	int r = DM_MAPIO_REMAPPED;
 
 	if (fail_read_on_bb) {
 		thisblock >>= dd->sect_per_block_shift;
 		spin_lock_irqsave(&dd->dust_lock, flags);
-		ret = __dust_map_write(dd, thisblock);
+		r = __dust_map_write(dd, thisblock);
 		spin_unlock_irqrestore(&dd->dust_lock, flags);
 	}
 
-	return ret;
+	return r;
 }
 
 static int dust_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e0c3279..2bc18c9 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -20,6 +20,7 @@
 #include <linux/pagemap.h>
 #include <linux/slab.h>
 #include <linux/time.h>
+#include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <scsi/scsi_dh.h>
@@ -29,6 +30,9 @@
 #define DM_MSG_PREFIX "multipath"
 #define DM_PG_INIT_DELAY_MSECS 2000
 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
+#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
+
+static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
 
 /* Path properties */
 struct pgpath {
@@ -91,6 +95,8 @@ struct multipath {
 
 	struct work_struct process_queued_bios;
 	struct bio_list queued_bios;
+
+	struct timer_list nopath_timer;	/* Timeout for queue_if_no_path */
 };
 
 /*
@@ -108,6 +114,7 @@ static void trigger_event(struct work_struct *work);
 static void activate_or_offline_path(struct pgpath *pgpath);
 static void activate_path_work(struct work_struct *work);
 static void process_queued_bios(struct work_struct *work);
+static void queue_if_no_path_timeout_work(struct timer_list *t);
 
 /*-----------------------------------------------
  * Multipath state flags.
@@ -195,6 +202,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
 
 		m->ti = ti;
 		ti->private = m;
+
+		timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
 	}
 
 	return m;
@@ -718,6 +727,43 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 }
 
 /*
+ * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
+ * process any queued I/O.
+ */
+static void queue_if_no_path_timeout_work(struct timer_list *t)
+{
+	struct multipath *m = from_timer(m, t, nopath_timer);
+	struct mapped_device *md = dm_table_get_md(m->ti->table);
+
+	DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md));
+	queue_if_no_path(m, false, false);
+}
+
+/*
+ * Enable the queue_if_no_path timeout if necessary.
+ * Called with m->lock held.
+ */
+static void enable_nopath_timeout(struct multipath *m)
+{
+	unsigned long queue_if_no_path_timeout =
+		READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
+
+	lockdep_assert_held(&m->lock);
+
+	if (queue_if_no_path_timeout > 0 &&
+	    atomic_read(&m->nr_valid_paths) == 0 &&
+	    test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+		mod_timer(&m->nopath_timer,
+			  jiffies + queue_if_no_path_timeout);
+	}
+}
+
+static void disable_nopath_timeout(struct multipath *m)
+{
+	del_timer_sync(&m->nopath_timer);
+}
+
+/*
  * An event is triggered whenever a path is taken out of use.
  * Includes path failure and PG bypass.
  */
@@ -1090,6 +1136,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
 	struct dm_arg_set as;
 	unsigned pg_count = 0;
 	unsigned next_pg_num;
+	unsigned long flags;
 
 	as.argc = argc;
 	as.argv = argv;
@@ -1154,6 +1201,10 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
 		goto bad;
 	}
 
+	spin_lock_irqsave(&m->lock, flags);
+	enable_nopath_timeout(m);
+	spin_unlock_irqrestore(&m->lock, flags);
+
 	ti->num_flush_bios = 1;
 	ti->num_discard_bios = 1;
 	ti->num_write_same_bios = 1;
@@ -1208,6 +1259,7 @@ static void multipath_dtr(struct dm_target *ti)
 {
 	struct multipath *m = ti->private;
 
+	disable_nopath_timeout(m);
 	flush_multipath_work(m);
 	free_multipath(m);
 }
@@ -1241,6 +1293,8 @@ static int fail_path(struct pgpath *pgpath)
 
 	schedule_work(&m->trigger_event);
 
+	enable_nopath_timeout(m);
+
 out:
 	spin_unlock_irqrestore(&m->lock, flags);
 
@@ -1291,6 +1345,9 @@ static int reinstate_path(struct pgpath *pgpath)
 		process_queued_io_list(m);
 	}
 
+	if (pgpath->is_active)
+		disable_nopath_timeout(m);
+
 	return r;
 }
 
@@ -1444,7 +1501,7 @@ static void pg_init_done(void *data, int errors)
 		break;
 	case SCSI_DH_RETRY:
 		/* Wait before retrying. */
-		delay_retry = 1;
+		delay_retry = true;
 		/* fall through */
 	case SCSI_DH_IMM_RETRY:
 	case SCSI_DH_RES_TEMP_UNAVAIL:
@@ -1789,6 +1846,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
 	struct dm_dev *dev;
 	struct multipath *m = ti->private;
 	action_fn action;
+	unsigned long flags;
 
 	mutex_lock(&m->work_mutex);
 
@@ -1800,9 +1858,13 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
 	if (argc == 1) {
 		if (!strcasecmp(argv[0], "queue_if_no_path")) {
 			r = queue_if_no_path(m, true, false);
+			spin_lock_irqsave(&m->lock, flags);
+			enable_nopath_timeout(m);
+			spin_unlock_irqrestore(&m->lock, flags);
 			goto out;
 		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
 			r = queue_if_no_path(m, false, false);
+			disable_nopath_timeout(m);
 			goto out;
 		}
 	}
@@ -2065,6 +2127,10 @@ static void __exit dm_multipath_exit(void)
 module_init(dm_multipath_init);
 module_exit(dm_multipath_exit);
 
+module_param_named(queue_if_no_path_timeout_secs,
+		   queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
+
 MODULE_DESCRIPTION(DM_NAME " multipath target");
 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c412eaa..9a18bef 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -129,7 +129,9 @@ struct raid_dev {
 				  CTR_FLAG_RAID10_COPIES | \
 				  CTR_FLAG_RAID10_FORMAT | \
 				  CTR_FLAG_DELTA_DISKS | \
-				  CTR_FLAG_DATA_OFFSET)
+				  CTR_FLAG_DATA_OFFSET | \
+				  CTR_FLAG_JOURNAL_DEV | \
+				  CTR_FLAG_JOURNAL_MODE)
 
 /* Valid options definitions per raid level... */
 
@@ -3001,7 +3003,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 		{ 1, 254, "Cannot understand number of raid devices parameters" }
 	};
 
-	/* Must have <raid_type> */
 	arg = dm_shift_arg(&as);
 	if (!arg) {
 		ti->error = "No arguments";
@@ -3508,8 +3509,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
 	unsigned long recovery;
 	unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
 	unsigned int sz = 0;
-	unsigned int rebuild_disks;
-	unsigned int write_mostly_params = 0;
+	unsigned int rebuild_writemostly_count = 0;
 	sector_t progress, resync_max_sectors, resync_mismatches;
 	enum sync_state state;
 	struct raid_type *rt;
@@ -3593,18 +3593,20 @@ static void raid_status(struct dm_target *ti, status_type_t type,
 	case STATUSTYPE_TABLE:
 		/* Report the table line string you would use to construct this raid set */
 
-		/* Calculate raid parameter count */
-		for (i = 0; i < rs->raid_disks; i++)
-			if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
-				write_mostly_params += 2;
-		rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
-		raid_param_cnt += rebuild_disks * 2 +
-				  write_mostly_params +
+		/*
+		 * Count any rebuild or writemostly argument pairs and subtract the
+		 * hweight count being added below of any rebuild and writemostly ctr flags.
+		 */
+		for (i = 0; i < rs->raid_disks; i++) {
+			rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) +
+						     (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
+		}
+		rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) +
+					     (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0);
+		/* Calculate raid parameter count based on ^ rebuild/writemostly argument counts and ctr flags set. */
+		raid_param_cnt += rebuild_writemostly_count +
 				  hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
-				  hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
-				  (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0) +
-				  (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags) ? 2 : 0);
-
+				  hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
 		/* Emit table line */
 		/* This has to be in the documented order for userspace! */
 		DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
@@ -3612,11 +3614,10 @@ static void raid_status(struct dm_target *ti, status_type_t type,
 			DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
 		if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
 			DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
-		if (rebuild_disks)
+		if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags))
 			for (i = 0; i < rs->raid_disks; i++)
-				if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
-					DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
-							 rs->dev[i].rdev.raid_disk);
+				if (test_bit(i, (void *) rs->rebuild_disks))
+					DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i);
 		if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
 			DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
 					  mddev->bitmap_info.daemon_sleep);
@@ -3626,7 +3627,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
 		if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
 			DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
 					 mddev->sync_speed_max);
-		if (write_mostly_params)
+		if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags))
 			for (i = 0; i < rs->raid_disks; i++)
 				if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
 					DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
@@ -4029,7 +4030,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
 	.name = "raid",
-	.version = {1, 15, 0},
+	.version = {1, 15, 1},
 	.module = THIS_MODULE,
 	.ctr = raid_ctr,
 	.dtr = raid_dtr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4fb1a40..6b11a26 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1061,7 +1061,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
 			DMERR("Read error in exception store: "
 			      "shutting down merge");
 			down_write(&s->lock);
-			s->merge_failed = 1;
+			s->merge_failed = true;
 			up_write(&s->lock);
 		}
 		goto shut;
@@ -1149,7 +1149,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
 
 shut:
 	down_write(&s->lock);
-	s->merge_failed = 1;
+	s->merge_failed = true;
 	b = __release_queued_bios_after_merge(s);
 	up_write(&s->lock);
 	error_bios(b);
@@ -1314,7 +1314,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 	INIT_LIST_HEAD(&s->list);
 	spin_lock_init(&s->pe_lock);
 	s->state_bits = 0;
-	s->merge_failed = 0;
+	s->merge_failed = false;
 	s->first_merging_chunk = 0;
 	s->num_merging_chunks = 0;
 	bio_list_init(&s->bios_queued_during_merge);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b88d6d7..fc9947d 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -28,7 +28,7 @@
  *
  * - A hierarchical btree, with 2 levels which effectively maps (thin
  *   dev id, virtual block) -> block_time.  Block time is a 64-bit
- *   field holding the time in the low 24 bits, and block in the top 48
+ *   field holding the time in the low 24 bits, and block in the top 40
  *   bits.
  *
  * BTrees consist solely of btree_nodes, that fill a block.  Some are
@@ -387,16 +387,15 @@ static int subtree_equal(void *context, const void *value1_le, const void *value
  * Variant that is used for in-core only changes or code that
  * shouldn't put the pool in service on its own (e.g. commit).
  */
-static inline void __pmd_write_lock(struct dm_pool_metadata *pmd)
+static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd)
 	__acquires(pmd->root_lock)
 {
 	down_write(&pmd->root_lock);
 }
-#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd))
 
 static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
 {
-	__pmd_write_lock(pmd);
+	pmd_write_lock_in_core(pmd);
 	if (unlikely(!pmd->in_service))
 		pmd->in_service = true;
 }
@@ -811,7 +810,7 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
 			return r;
 
 		if (td->open_count)
-			td->changed = 0;
+			td->changed = false;
 		else {
 			list_del(&td->list);
 			kfree(td);
@@ -831,6 +830,7 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
 	 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
 	 */
 	BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
+	BUG_ON(!rwsem_is_locked(&pmd->root_lock));
 
 	if (unlikely(!pmd->in_service))
 		return 0;
@@ -953,6 +953,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
 		return -EBUSY;
 	}
 
+	pmd_write_lock_in_core(pmd);
 	if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
 		r = __commit_transaction(pmd);
 		if (r < 0)
@@ -961,6 +962,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
 	}
 	if (!pmd->fail_io)
 		__destroy_persistent_data_objects(pmd);
+	pmd_write_unlock(pmd);
 
 	kfree(pmd);
 	return 0;
@@ -1106,7 +1108,7 @@ static int __set_snapshot_details(struct dm_pool_metadata *pmd,
 	if (r)
 		return r;
 
-	td->changed = 1;
+	td->changed = true;
 	td->snapshotted_time = time;
 
 	snap->mapped_blocks = td->mapped_blocks;
@@ -1618,7 +1620,7 @@ static int __insert(struct dm_thin_device *td, dm_block_t block,
 	if (r)
 		return r;
 
-	td->changed = 1;
+	td->changed = true;
 	if (inserted)
 		td->mapped_blocks++;
 
@@ -1649,7 +1651,7 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
 		return r;
 
 	td->mapped_blocks--;
-	td->changed = 1;
+	td->changed = true;
 
 	return 0;
 }
@@ -1703,7 +1705,7 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
 	}
 
 	td->mapped_blocks -= total_count;
-	td->changed = 1;
+	td->changed = true;
 
 	/*
 	 * Reinsert the mapping tree.
@@ -1841,7 +1843,7 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
 	 * Care is taken to not have commit be what
 	 * triggers putting the thin-pool in-service.
 	 */
-	__pmd_write_lock(pmd);
+	pmd_write_lock_in_core(pmd);
 	if (pmd->fail_io)
 		goto out;
 
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 57626c2..fa8d546 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -231,6 +231,7 @@ struct pool {
 	struct dm_target *ti;	/* Only set if a pool target is bound */
 
 	struct mapped_device *pool_md;
+	struct block_device *data_dev;
 	struct block_device *md_dev;
 	struct dm_pool_metadata *pmd;
 
@@ -281,6 +282,8 @@ struct pool {
 	struct dm_bio_prison_cell **cell_sort_array;
 
 	mempool_t mapping_pool;
+
+	struct bio flush_bio;
 };
 
 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
@@ -328,7 +331,6 @@ struct pool_c {
 	dm_block_t low_water_blocks;
 	struct pool_features requested_pf; /* Features requested during table load */
 	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
-	struct bio flush_bio;
 };
 
 /*
@@ -2924,6 +2926,7 @@ static void __pool_destroy(struct pool *pool)
 	if (pool->next_mapping)
 		mempool_free(pool->next_mapping, &pool->mapping_pool);
 	mempool_exit(&pool->mapping_pool);
+	bio_uninit(&pool->flush_bio);
 	dm_deferred_set_destroy(pool->shared_read_ds);
 	dm_deferred_set_destroy(pool->all_io_ds);
 	kfree(pool);
@@ -2933,6 +2936,7 @@ static struct kmem_cache *_new_mapping_cache;
 
 static struct pool *pool_create(struct mapped_device *pool_md,
 				struct block_device *metadata_dev,
+				struct block_device *data_dev,
 				unsigned long block_size,
 				int read_only, char **error)
 {
@@ -3003,6 +3007,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 	pool->low_water_triggered = false;
 	pool->suspended = true;
 	pool->out_of_data_space = false;
+	bio_init(&pool->flush_bio, NULL, 0);
 
 	pool->shared_read_ds = dm_deferred_set_create();
 	if (!pool->shared_read_ds) {
@@ -3040,6 +3045,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 	pool->last_commit_jiffies = jiffies;
 	pool->pool_md = pool_md;
 	pool->md_dev = metadata_dev;
+	pool->data_dev = data_dev;
 	__pool_table_insert(pool);
 
 	return pool;
@@ -3081,6 +3087,7 @@ static void __pool_dec(struct pool *pool)
 
 static struct pool *__pool_find(struct mapped_device *pool_md,
 				struct block_device *metadata_dev,
+				struct block_device *data_dev,
 				unsigned long block_size, int read_only,
 				char **error, int *created)
 {
@@ -3091,19 +3098,23 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
 			*error = "metadata device already in use by a pool";
 			return ERR_PTR(-EBUSY);
 		}
+		if (pool->data_dev != data_dev) {
+			*error = "data device already in use by a pool";
+			return ERR_PTR(-EBUSY);
+		}
 		__pool_inc(pool);
 
 	} else {
 		pool = __pool_table_lookup(pool_md);
 		if (pool) {
-			if (pool->md_dev != metadata_dev) {
+			if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) {
 				*error = "different pool cannot replace a pool";
 				return ERR_PTR(-EINVAL);
 			}
 			__pool_inc(pool);
 
 		} else {
-			pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
+			pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error);
 			*created = 1;
 		}
 	}
@@ -3124,7 +3135,6 @@ static void pool_dtr(struct dm_target *ti)
 	__pool_dec(pt->pool);
 	dm_put_device(ti, pt->metadata_dev);
 	dm_put_device(ti, pt->data_dev);
-	bio_uninit(&pt->flush_bio);
 	kfree(pt);
 
 	mutex_unlock(&dm_thin_pool_table.mutex);
@@ -3203,11 +3213,11 @@ static void metadata_low_callback(void *context)
  */
 static int metadata_pre_commit_callback(void *context)
 {
-	struct pool_c *pt = context;
-	struct bio *flush_bio = &pt->flush_bio;
+	struct pool *pool = context;
+	struct bio *flush_bio = &pool->flush_bio;
 
 	bio_reset(flush_bio);
-	bio_set_dev(flush_bio, pt->data_dev->bdev);
+	bio_set_dev(flush_bio, pool->data_dev);
 	flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
 	return submit_bio_wait(flush_bio);
@@ -3356,7 +3366,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
 		goto out;
 	}
 
-	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
+	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
 			   block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
 	if (IS_ERR(pool)) {
 		r = PTR_ERR(pool);
@@ -3381,7 +3391,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
 	pt->data_dev = data_dev;
 	pt->low_water_blocks = low_water_blocks;
 	pt->adjusted_pf = pt->requested_pf = pf;
-	bio_init(&pt->flush_bio, NULL, 0);
 	ti->num_flush_bios = 1;
 
 	/*
@@ -3408,9 +3417,8 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
 	if (r)
 		goto out_flags_changed;
 
-	dm_pool_register_pre_commit_callback(pt->pool->pmd,
-					     metadata_pre_commit_callback,
-					     pt);
+	dm_pool_register_pre_commit_callback(pool->pmd,
+					     metadata_pre_commit_callback, pool);
 
 	pt->callbacks.congested_fn = pool_is_congested;
 	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
@@ -4099,7 +4107,7 @@ static struct target_type pool_target = {
 	.name = "thin-pool",
 	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
 		    DM_TARGET_IMMUTABLE,
-	.version = {1, 21, 0},
+	.version = {1, 22, 0},
 	.module = THIS_MODULE,
 	.ctr = pool_ctr,
 	.dtr = pool_dtr,
@@ -4476,7 +4484,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type thin_target = {
 	.name = "thin",
-	.version = {1, 21, 0},
+	.version = {1, 22, 0},
 	.module	= THIS_MODULE,
 	.ctr = thin_ctr,
 	.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 4fb33e7..0d61e9c 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -611,8 +611,22 @@ static void verity_prefetch_io(struct work_struct *work)
 
 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
 {
+	sector_t block = io->block;
+	unsigned int n_blocks = io->n_blocks;
 	struct dm_verity_prefetch_work *pw;
 
+	if (v->validated_blocks) {
+		while (n_blocks && test_bit(block, v->validated_blocks)) {
+			block++;
+			n_blocks--;
+		}
+		while (n_blocks && test_bit(block + n_blocks - 1,
+					    v->validated_blocks))
+			n_blocks--;
+		if (!n_blocks)
+			return;
+	}
+
 	pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
 		GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 
@@ -621,8 +635,8 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
 
 	INIT_WORK(&pw->work, verity_prefetch_io);
 	pw->v = v;
-	pw->block = io->block;
-	pw->n_blocks = io->n_blocks;
+	pw->block = block;
+	pw->n_blocks = n_blocks;
 	queue_work(v->verify_wq, &pw->work);
 }
 
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 7d727a7..b9e27e3 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -442,7 +442,13 @@ static void writecache_notify_io(unsigned long error, void *context)
 		complete(&endio->c);
 }
 
-static void ssd_commit_flushed(struct dm_writecache *wc)
+static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
+{
+	wait_event(wc->bio_in_progress_wait[direction],
+		   !atomic_read(&wc->bio_in_progress[direction]));
+}
+
+static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
 {
 	struct dm_io_region region;
 	struct dm_io_request req;
@@ -488,17 +494,20 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
 	writecache_notify_io(0, &endio);
 	wait_for_completion_io(&endio.c);
 
+	if (wait_for_ios)
+		writecache_wait_for_ios(wc, WRITE);
+
 	writecache_disk_flush(wc, wc->ssd_dev);
 
 	memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
 }
 
-static void writecache_commit_flushed(struct dm_writecache *wc)
+static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
 {
 	if (WC_MODE_PMEM(wc))
 		wmb();
 	else
-		ssd_commit_flushed(wc);
+		ssd_commit_flushed(wc, wait_for_ios);
 }
 
 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
@@ -522,12 +531,6 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
 		writecache_error(wc, r, "error flushing metadata: %d", r);
 }
 
-static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
-{
-	wait_event(wc->bio_in_progress_wait[direction],
-		   !atomic_read(&wc->bio_in_progress[direction]));
-}
-
 #define WFE_RETURN_FOLLOWING	1
 #define WFE_LOWEST_SEQ		2
 
@@ -622,7 +625,7 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
 	wc->freelist_size++;
 }
 
-static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
+static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
 {
 	struct wc_entry *e;
 
@@ -631,6 +634,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
 		if (unlikely(!wc->current_free))
 			return NULL;
 		e = wc->current_free;
+		if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
+			return NULL;
 		next = rb_next(&e->rb_node);
 		rb_erase(&e->rb_node, &wc->freetree);
 		if (unlikely(!next))
@@ -640,6 +645,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
 		if (unlikely(list_empty(&wc->freelist)))
 			return NULL;
 		e = container_of(wc->freelist.next, struct wc_entry, lru);
+		if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
+			return NULL;
 		list_del(&e->lru);
 	}
 	wc->freelist_size--;
@@ -724,15 +731,12 @@ static void writecache_flush(struct dm_writecache *wc)
 		e = e2;
 		cond_resched();
 	}
-	writecache_commit_flushed(wc);
-
-	if (!WC_MODE_PMEM(wc))
-		writecache_wait_for_ios(wc, WRITE);
+	writecache_commit_flushed(wc, true);
 
 	wc->seq_count++;
 	pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
 	writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
-	writecache_commit_flushed(wc);
+	writecache_commit_flushed(wc, false);
 
 	wc->overwrote_committed = false;
 
@@ -756,7 +760,7 @@ static void writecache_flush(struct dm_writecache *wc)
 	}
 
 	if (need_flush_after_free)
-		writecache_commit_flushed(wc);
+		writecache_commit_flushed(wc, false);
 }
 
 static void writecache_flush_work(struct work_struct *work)
@@ -809,7 +813,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
 	}
 
 	if (discarded_something)
-		writecache_commit_flushed(wc);
+		writecache_commit_flushed(wc, false);
 }
 
 static bool writecache_wait_for_writeback(struct dm_writecache *wc)
@@ -958,7 +962,7 @@ static void writecache_resume(struct dm_target *ti)
 
 	if (need_flush) {
 		writecache_flush_all_metadata(wc);
-		writecache_commit_flushed(wc);
+		writecache_commit_flushed(wc, false);
 	}
 
 	wc_unlock(wc);
@@ -1193,7 +1197,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
 					goto bio_copy;
 				}
 			}
-			e = writecache_pop_from_freelist(wc);
+			e = writecache_pop_from_freelist(wc, (sector_t)-1);
 			if (unlikely(!e)) {
 				writecache_wait_on_freelist(wc);
 				continue;
@@ -1205,9 +1209,26 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
 			if (WC_MODE_PMEM(wc)) {
 				bio_copy_block(wc, bio, memory_data(wc, e));
 			} else {
-				dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
+				unsigned bio_size = wc->block_size;
+				sector_t start_cache_sec = cache_sector(wc, e);
+				sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
+
+				while (bio_size < bio->bi_iter.bi_size) {
+					struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
+					if (!f)
+						break;
+					write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
+									(bio_size >> SECTOR_SHIFT), wc->seq_count);
+					writecache_insert_entry(wc, f);
+					wc->uncommitted_blocks++;
+					bio_size += wc->block_size;
+					current_cache_sec += wc->block_size >> SECTOR_SHIFT;
+				}
+
 				bio_set_dev(bio, wc->ssd_dev->bdev);
-				bio->bi_iter.bi_sector = cache_sector(wc, e);
+				bio->bi_iter.bi_sector = start_cache_sec;
+				dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
+
 				if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
 					wc->uncommitted_blocks = 0;
 					queue_work(wc->writeback_wq, &wc->flush_work);
@@ -1342,7 +1363,7 @@ static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *
 			wc->writeback_size--;
 			n_walked++;
 			if (unlikely(n_walked >= ENDIO_LATENCY)) {
-				writecache_commit_flushed(wc);
+				writecache_commit_flushed(wc, false);
 				wc_unlock(wc);
 				wc_lock(wc);
 				n_walked = 0;
@@ -1423,7 +1444,7 @@ static int writecache_endio_thread(void *data)
 			writecache_wait_for_ios(wc, READ);
 		}
 
-		writecache_commit_flushed(wc);
+		writecache_commit_flushed(wc, false);
 
 		wc_unlock(wc);
 	}
@@ -1766,10 +1787,10 @@ static int init_memory(struct dm_writecache *wc)
 		write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
 
 	writecache_flush_all_metadata(wc);
-	writecache_commit_flushed(wc);
+	writecache_commit_flushed(wc, false);
 	pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
 	writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
-	writecache_commit_flushed(wc);
+	writecache_commit_flushed(wc, false);
 
 	return 0;
 }
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 22b3cb0..516c7b6 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -134,6 +134,7 @@ struct dmz_metadata {
 
 	sector_t		zone_bitmap_size;
 	unsigned int		zone_nr_bitmap_blocks;
+	unsigned int		zone_bits_per_mblk;
 
 	unsigned int		nr_bitmap_blocks;
 	unsigned int		nr_map_blocks;
@@ -1161,7 +1162,10 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
 
 	/* Init */
 	zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
-	zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT;
+	zmd->zone_nr_bitmap_blocks =
+		max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
+	zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
+					DMZ_BLOCK_SIZE_BITS);
 
 	/* Allocate zone array */
 	zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
@@ -1956,7 +1960,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
 		dmz_release_mblock(zmd, to_mblk);
 		dmz_release_mblock(zmd, from_mblk);
 
-		chunk_block += DMZ_BLOCK_SIZE_BITS;
+		chunk_block += zmd->zone_bits_per_mblk;
 	}
 
 	to_zone->weight = from_zone->weight;
@@ -2017,7 +2021,7 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
 
 		/* Set bits */
 		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
-		nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
 
 		count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
 		if (count) {
@@ -2096,7 +2100,7 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
 
 		/* Clear bits */
 		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
-		nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
 
 		count = dmz_clear_bits((unsigned long *)mblk->data,
 				       bit, nr_bits);
@@ -2156,6 +2160,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
 {
 	struct dmz_mblock *mblk;
 	unsigned int bit, set_bit, nr_bits;
+	unsigned int zone_bits = zmd->zone_bits_per_mblk;
 	unsigned long *bitmap;
 	int n = 0;
 
@@ -2170,15 +2175,15 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
 		/* Get offset */
 		bitmap = (unsigned long *) mblk->data;
 		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
-		nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+		nr_bits = min(nr_blocks, zone_bits - bit);
 		if (set)
-			set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
+			set_bit = find_next_bit(bitmap, zone_bits, bit);
 		else
-			set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
+			set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
 		dmz_release_mblock(zmd, mblk);
 
 		n += set_bit - bit;
-		if (set_bit < DMZ_BLOCK_SIZE_BITS)
+		if (set_bit < zone_bits)
 			break;
 
 		nr_blocks -= nr_bits;
@@ -2281,7 +2286,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
 		/* Count bits in this block */
 		bitmap = mblk->data;
 		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
-		nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
 		n += dmz_count_bits(bitmap, bit, nr_bits);
 
 		dmz_release_mblock(zmd, mblk);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e8f9661..b89f07e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1859,6 +1859,7 @@ static void dm_init_normal_md_queue(struct mapped_device *md)
 	/*
 	 * Initialize aspects of queue that aren't relevant for blk-mq
 	 */
+	md->queue->backing_dev_info->congested_data = md;
 	md->queue->backing_dev_info->congested_fn = dm_any_congested;
 }
 
@@ -1949,7 +1950,12 @@ static struct mapped_device *alloc_dev(int minor)
 	if (!md->queue)
 		goto bad;
 	md->queue->queuedata = md;
-	md->queue->backing_dev_info->congested_data = md;
+	/*
+	 * default to bio-based required ->make_request_fn until DM
+	 * table is loaded and md->type established. If request-based
+	 * table is loaded: blk-mq will override accordingly.
+	 */
+	blk_queue_make_request(md->queue, dm_make_request);
 
 	md->disk = alloc_disk_node(1, md->numa_node_id);
 	if (!md->disk)
@@ -2264,7 +2270,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
 	case DM_TYPE_DAX_BIO_BASED:
 	case DM_TYPE_NVME_BIO_BASED:
 		dm_init_normal_md_queue(md);
-		blk_queue_make_request(md->queue, dm_make_request);
 		break;
 	case DM_TYPE_NONE:
 		WARN_ON_ONCE(true);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index e230052..b952bd4 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -364,7 +364,7 @@ static int read_page(struct file *file, unsigned long index,
 	int ret = 0;
 	struct inode *inode = file_inode(file);
 	struct buffer_head *bh;
-	sector_t block;
+	sector_t block, blk_cur;
 
 	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
 		 (unsigned long long)index << PAGE_SHIFT);
@@ -375,17 +375,21 @@ static int read_page(struct file *file, unsigned long index,
 		goto out;
 	}
 	attach_page_buffers(page, bh);
-	block = index << (PAGE_SHIFT - inode->i_blkbits);
+	blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
 	while (bh) {
+		block = blk_cur;
+
 		if (count == 0)
 			bh->b_blocknr = 0;
 		else {
-			bh->b_blocknr = bmap(inode, block);
-			if (bh->b_blocknr == 0) {
-				/* Cannot use this file! */
+			ret = bmap(inode, &block);
+			if (ret || !block) {
 				ret = -EINVAL;
+				bh->b_blocknr = 0;
 				goto out;
 			}
+
+			bh->b_blocknr = block;
 			bh->b_bdev = inode->i_sb->s_bdev;
 			if (count < (1<<inode->i_blkbits))
 				count = 0;
@@ -399,7 +403,7 @@ static int read_page(struct file *file, unsigned long index,
 			set_buffer_mapped(bh);
 			submit_bh(REQ_OP_READ, 0, bh);
 		}
-		block++;
+		blk_cur++;
 		bh = bh->b_this_page;
 	}
 	page->index = index;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4824d50..469f551 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8279,13 +8279,12 @@ static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
 	return mask;
 }
 
-static const struct file_operations md_seq_fops = {
-	.owner		= THIS_MODULE,
-	.open           = md_seq_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release	= seq_release,
-	.poll		= mdstat_poll,
+static const struct proc_ops mdstat_proc_ops = {
+	.proc_open	= md_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
+	.proc_poll	= mdstat_poll,
 };
 
 int register_md_personality(struct md_personality *p)
@@ -9454,7 +9453,7 @@ static void md_geninit(void)
 {
 	pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
 
-	proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
+	proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
 }
 
 static int __init md_init(void)
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index bd68f6f..d8b4125 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -380,6 +380,33 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
 	return -ENOSPC;
 }
 
+int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+	                         dm_block_t begin, dm_block_t end, dm_block_t *b)
+{
+	int r;
+	uint32_t count;
+
+	do {
+		r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
+		if (r)
+			break;
+
+		/* double check this block wasn't used in the old transaction */
+		if (*b >= old_ll->nr_blocks)
+			count = 0;
+		else {
+			r = sm_ll_lookup(old_ll, *b, &count);
+			if (r)
+				break;
+
+			if (count)
+				begin = *b + 1;
+		}
+	} while (count);
+
+	return r;
+}
+
 static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
 			int (*mutator)(void *context, uint32_t old, uint32_t *new),
 			void *context, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index b3078d5..8de63ce 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result);
 int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
 			  dm_block_t end, dm_block_t *result);
+int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+	                         dm_block_t begin, dm_block_t end, dm_block_t *result);
 int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev);
 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index 32adf6b..bf4c5e2 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
 	enum allocation_event ev;
 	struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 
-	/* FIXME: we should loop round a couple of times */
-	r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
+	/*
+	 * Any block we allocate has to be free in both the old and current ll.
+	 */
+	r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
 	if (r)
 		return r;
 
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 2532858..9e3c64e 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -448,7 +448,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
 	enum allocation_event ev;
 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
 
-	r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
+	/*
+	 * Any block we allocate has to be free in both the old and current ll.
+	 */
+	r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
 	if (r)
 		return r;
 
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index db7adff..0c52e1b 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -183,24 +183,6 @@ static void cec_devnode_unregister(struct cec_adapter *adap)
 	put_device(&devnode->dev);
 }
 
-#ifdef CONFIG_CEC_NOTIFIER
-static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
-{
-	cec_s_phys_addr(adap, pa, false);
-}
-
-void cec_register_cec_notifier(struct cec_adapter *adap,
-			       struct cec_notifier *notifier)
-{
-	if (WARN_ON(!cec_is_registered(adap)))
-		return;
-
-	adap->notifier = notifier;
-	cec_notifier_register(adap->notifier, adap, cec_cec_notify);
-}
-EXPORT_SYMBOL_GPL(cec_register_cec_notifier);
-#endif
-
 #ifdef CONFIG_DEBUG_FS
 static ssize_t cec_error_inj_write(struct file *file,
 	const char __user *ubuf, size_t count, loff_t *ppos)
@@ -416,8 +398,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
 #endif
 	debugfs_remove_recursive(adap->cec_dir);
 #ifdef CONFIG_CEC_NOTIFIER
-	if (adap->notifier)
-		cec_notifier_unregister(adap->notifier);
+	cec_notifier_cec_adap_unregister(adap->notifier, adap);
 #endif
 	cec_devnode_unregister(adap);
 }
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 7cf42b1..4a841be 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -25,7 +25,6 @@ struct cec_notifier {
 	struct cec_connector_info conn_info;
 	const char *conn_name;
 	struct cec_adapter *cec_adap;
-	void (*callback)(struct cec_adapter *adap, u16 pa);
 
 	u16 phys_addr;
 };
@@ -81,13 +80,12 @@ static void cec_notifier_release(struct kref *kref)
 	kfree(n);
 }
 
-void cec_notifier_put(struct cec_notifier *n)
+static void cec_notifier_put(struct cec_notifier *n)
 {
 	mutex_lock(&cec_notifiers_lock);
 	kref_put(&n->kref, cec_notifier_release);
 	mutex_unlock(&cec_notifiers_lock);
 }
-EXPORT_SYMBOL_GPL(cec_notifier_put);
 
 struct cec_notifier *
 cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
@@ -162,7 +160,6 @@ void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
 	mutex_lock(&n->lock);
 	adap->notifier = NULL;
 	n->cec_adap = NULL;
-	n->callback = NULL;
 	mutex_unlock(&n->lock);
 	cec_notifier_put(n);
 }
@@ -175,9 +172,7 @@ void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
 
 	mutex_lock(&n->lock);
 	n->phys_addr = pa;
-	if (n->callback)
-		n->callback(n->cec_adap, n->phys_addr);
-	else if (n->cec_adap)
+	if (n->cec_adap)
 		cec_s_phys_addr(n->cec_adap, n->phys_addr, false);
 	mutex_unlock(&n->lock);
 }
@@ -198,34 +193,6 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
 }
 EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr_from_edid);
 
-void cec_notifier_register(struct cec_notifier *n,
-			   struct cec_adapter *adap,
-			   void (*callback)(struct cec_adapter *adap, u16 pa))
-{
-	kref_get(&n->kref);
-	mutex_lock(&n->lock);
-	n->cec_adap = adap;
-	n->callback = callback;
-	n->callback(adap, n->phys_addr);
-	mutex_unlock(&n->lock);
-}
-EXPORT_SYMBOL_GPL(cec_notifier_register);
-
-void cec_notifier_unregister(struct cec_notifier *n)
-{
-	/* Do nothing unless cec_notifier_register was called first */
-	if (!n->callback)
-		return;
-
-	mutex_lock(&n->lock);
-	n->callback = NULL;
-	n->cec_adap->notifier = NULL;
-	n->cec_adap = NULL;
-	mutex_unlock(&n->lock);
-	cec_notifier_put(n);
-}
-EXPORT_SYMBOL_GPL(cec_notifier_unregister);
-
 struct device *cec_notifier_parse_hdmi_phandle(struct device *dev)
 {
 	struct platform_device *hdmi_pdev;
diff --git a/drivers/media/cec/cec-priv.h b/drivers/media/cec/cec-priv.h
index 7bdf855..9bbd050 100644
--- a/drivers/media/cec/cec-priv.h
+++ b/drivers/media/cec/cec-priv.h
@@ -9,7 +9,7 @@
 #define _CEC_PRIV_H
 
 #include <linux/cec-funcs.h>
-#include <media/cec.h>
+#include <media/cec-notifier.h>
 
 #define dprintk(lvl, fmt, arg...)					\
 	do {								\
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index d161220..ccd15b4 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -345,7 +345,8 @@ static int video_begin(struct saa7146_fh *fh)
 
 	fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat);
 	/* we need to have a valid format set here */
-	BUG_ON(NULL == fmt);
+	if (!fmt)
+		return -EINVAL;
 
 	if (0 != (fmt->flags & FORMAT_IS_PLANAR)) {
 		resource = RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP|RESOURCE_DMA3_BRS;
@@ -398,7 +399,8 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
 
 	fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat);
 	/* we need to have a valid format set here */
-	BUG_ON(NULL == fmt);
+	if (!fmt)
+		return -EINVAL;
 
 	if (0 != (fmt->flags & FORMAT_IS_PLANAR)) {
 		resource = RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP|RESOURCE_DMA3_BRS;
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 44cd0e5..d0c9dff 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -335,13 +335,6 @@ static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
 	vb2_dc_put(dbuf->priv);
 }
 
-static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
-{
-	struct vb2_dc_buf *buf = dbuf->priv;
-
-	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
-}
-
 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
 {
 	struct vb2_dc_buf *buf = dbuf->priv;
@@ -360,7 +353,6 @@ static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
 	.detach = vb2_dc_dmabuf_ops_detach,
 	.map_dma_buf = vb2_dc_dmabuf_ops_map,
 	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
-	.map = vb2_dc_dmabuf_ops_kmap,
 	.vmap = vb2_dc_dmabuf_ops_vmap,
 	.mmap = vb2_dc_dmabuf_ops_mmap,
 	.release = vb2_dc_dmabuf_ops_release,
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index ed706b2..6db60e9 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -470,13 +470,6 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
 	vb2_dma_sg_put(dbuf->priv);
 }
 
-static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
-{
-	struct vb2_dma_sg_buf *buf = dbuf->priv;
-
-	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
-}
-
 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
 {
 	struct vb2_dma_sg_buf *buf = dbuf->priv;
@@ -495,7 +488,6 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
 	.detach = vb2_dma_sg_dmabuf_ops_detach,
 	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
 	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
-	.map = vb2_dma_sg_dmabuf_ops_kmap,
 	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
 	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
 	.release = vb2_dma_sg_dmabuf_ops_release,
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index e652f43..eb5d5db 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -146,7 +146,7 @@ static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
 		 * and the timecode field and flag if needed.
 		 */
 		if (q->copy_timestamp)
-			vb->timestamp = v4l2_timeval_to_ns(&b->timestamp);
+			vb->timestamp = v4l2_buffer_get_timestamp(b);
 		vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
 		if (b->flags & V4L2_BUF_FLAG_TIMECODE)
 			vbuf->timecode = b->timecode;
@@ -482,7 +482,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
 
 	b->flags = vbuf->flags;
 	b->field = vbuf->field;
-	b->timestamp = ns_to_timeval(vb->timestamp);
+	v4l2_buffer_set_timestamp(b, vb->timestamp);
 	b->timecode = vbuf->timecode;
 	b->sequence = vbuf->sequence;
 	b->reserved2 = 0;
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 4c8c96a..1a4f0ca 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -319,13 +319,6 @@ static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
 	vb2_vmalloc_put(dbuf->priv);
 }
 
-static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
-{
-	struct vb2_vmalloc_buf *buf = dbuf->priv;
-
-	return buf->vaddr + pgnum * PAGE_SIZE;
-}
-
 static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
 {
 	struct vb2_vmalloc_buf *buf = dbuf->priv;
@@ -344,7 +337,6 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
 	.detach = vb2_vmalloc_dmabuf_ops_detach,
 	.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
 	.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
-	.map = vb2_vmalloc_dmabuf_ops_kmap,
 	.vmap = vb2_vmalloc_dmabuf_ops_vmap,
 	.mmap = vb2_vmalloc_dmabuf_ops_mmap,
 	.release = vb2_vmalloc_dmabuf_ops_release,
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 39a2c6c..5fde1d3 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -971,6 +971,7 @@ static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed)
 	dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
 	dvbdmxfeed->feed.sec.secbufp = 0;
 	dvbdmxfeed->feed.sec.seclen = 0;
+	dvbdmxfeed->pusi_seen = false;
 
 	if (!dvbdmx->start_feed) {
 		mutex_unlock(&dvbdmx->mutex);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 917fe03..80b6a71 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -983,8 +983,8 @@ struct i2c_client *dvb_module_probe(const char *module_name,
 	board_info->addr = addr;
 	board_info->platform_data = platform_data;
 	request_module(module_name);
-	client = i2c_new_device(adap, board_info);
-	if (client == NULL || client->dev.driver == NULL) {
+	client = i2c_new_client_device(adap, board_info);
+	if (!i2c_client_has_driver(client)) {
 		kfree(board_info);
 		return NULL;
 	}
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 496ebb8..bc72d95 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -290,7 +290,8 @@ static int as102_fe_get_frontend(struct dvb_frontend *fe,
 }
 
 static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
-			struct dvb_frontend_tune_settings *settings) {
+			struct dvb_frontend_tune_settings *settings)
+{
 
 	settings->min_delay_ms = 1000;
 
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index c1717dd..8cdca05 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -562,7 +562,7 @@ static int au8522_s_video_routing(struct v4l2_subdev *sd,
 {
 	struct au8522_state *state = to_state(sd);
 
-	switch(input) {
+	switch (input) {
 	case AU8522_COMPOSITE_CH1:
 	case AU8522_SVIDEO_CH13:
 	case AU8522_COMPOSITE_CH4_SIF:
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index d137199..b161833 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -530,8 +530,8 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
 	strscpy(board_info.type, "cxd2820r", I2C_NAME_SIZE);
 	board_info.addr = config->i2c_address;
 	board_info.platform_data = &pdata;
-	client = i2c_new_device(adapter, &board_info);
-	if (!client || !client->dev.driver)
+	client = i2c_new_client_device(adapter, &board_info);
+	if (!i2c_client_has_driver(client))
 		return NULL;
 
 	return pdata.get_dvb_frontend(client);
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index 3b26f61..cafb41d 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -189,7 +189,8 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
 
 		adc = dib0070_read_reg(state, 0x19);
 
-		dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV\n", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
+		dprintk("CAPTRIM=%d; ADC = %hd (ADC) & %dmV\n", state->captrim,
+			adc, (u32)adc * (u32)1800 / (u32)1024);
 
 		if (adc >= 400) {
 			adc -= 400;
@@ -200,7 +201,8 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
 		}
 
 		if (adc < state->adc_diff) {
-			dprintk("CAPTRIM=%hd is closer to target (%hd/%hd)\n", state->captrim, adc, state->adc_diff);
+			dprintk("CAPTRIM=%d is closer to target (%hd/%hd)\n",
+				state->captrim, adc, state->adc_diff);
 			state->adc_diff = adc;
 			state->fcaptrim = state->captrim;
 		}
@@ -364,7 +366,7 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
 	}
 
 	if (*tune_state == CT_TUNER_START) {
-		dprintk("Tuning for Band: %hd (%d kHz)\n", band, freq);
+		dprintk("Tuning for Band: %d (%d kHz)\n", band, freq);
 		if (state->current_rf != freq) {
 			u8 REFDIV;
 			u32 FBDiv, Rest, FREF, VCOF_kHz;
@@ -442,12 +444,17 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
 			dib0070_write_reg(state, 0x20,
 				0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
 
-			dprintk("REFDIV: %hd, FREF: %d\n", REFDIV, FREF);
+			dprintk("REFDIV: %u, FREF: %d\n", REFDIV, FREF);
 			dprintk("FBDIV: %d, Rest: %d\n", FBDiv, Rest);
-			dprintk("Num: %hd, Den: %hd, SD: %hd\n", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
-			dprintk("HFDIV code: %hd\n", state->current_tune_table_index->hfdiv);
-			dprintk("VCO = %hd\n", state->current_tune_table_index->vco_band);
-			dprintk("VCOF: ((%hd*%d) << 1))\n", state->current_tune_table_index->vco_multi, freq);
+			dprintk("Num: %u, Den: %u, SD: %d\n", (u16)Rest, Den,
+				(state->lo4 >> 12) & 0x1);
+			dprintk("HFDIV code: %u\n",
+				state->current_tune_table_index->hfdiv);
+			dprintk("VCO = %u\n",
+				state->current_tune_table_index->vco_band);
+			dprintk("VCOF: ((%u*%d) << 1))\n",
+				state->current_tune_table_index->vco_multi,
+				freq);
 
 			*tune_state = CT_TUNER_STEP_0;
 		} else { /* we are already tuned to this frequency - the configuration is correct  */
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index d13d2e8..bc37475 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1748,7 +1748,8 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
 			}
 
 			dib0090_set_trim(state);
-			dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd\n", state->dc->addr, state->adc_diff, state->step);
+			dprintk("BB Offset Cal, BBreg=%u,Offset=%d,Value Set=%d\n",
+				state->dc->addr, state->adc_diff, state->step);
 
 			state->dc++;
 			if (state->dc->addr == 0)	/* done */
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index e211830..97ce977 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -808,7 +808,7 @@ static int dib7000m_agc_startup(struct dvb_frontend *demod)
 
 			dib7000m_restart_agc(state);
 
-			dprintk("SPLIT %p: %hd\n", demod, agc_split);
+			dprintk("SPLIT %p: %u\n", demod, agc_split);
 
 			(*agc_state)++;
 			ret = 5;
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 0d22c70..0a7790c 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -915,7 +915,7 @@ static int dib7000p_agc_startup(struct dvb_frontend *demod)
 
 		dib7000p_restart_agc(state);
 
-		dprintk("SPLIT %p: %hd\n", demod, agc_split);
+		dprintk("SPLIT %p: %u\n", demod, agc_split);
 
 		(*agc_state)++;
 		ret = 5;
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index 4db679c..9ff1eba 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -31,25 +31,26 @@ static int dvb_dummy_fe_read_status(struct dvb_frontend *fe,
 	return 0;
 }
 
-static int dvb_dummy_fe_read_ber(struct dvb_frontend* fe, u32* ber)
+static int dvb_dummy_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
 {
 	*ber = 0;
 	return 0;
 }
 
-static int dvb_dummy_fe_read_signal_strength(struct dvb_frontend* fe, u16* strength)
+static int dvb_dummy_fe_read_signal_strength(struct dvb_frontend *fe,
+					     u16 *strength)
 {
 	*strength = 0;
 	return 0;
 }
 
-static int dvb_dummy_fe_read_snr(struct dvb_frontend* fe, u16* snr)
+static int dvb_dummy_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
 {
 	*snr = 0;
 	return 0;
 }
 
-static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
+static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
 {
 	*ucblocks = 0;
 	return 0;
@@ -77,12 +78,12 @@ static int dvb_dummy_fe_set_frontend(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int dvb_dummy_fe_sleep(struct dvb_frontend* fe)
+static int dvb_dummy_fe_sleep(struct dvb_frontend *fe)
 {
 	return 0;
 }
 
-static int dvb_dummy_fe_init(struct dvb_frontend* fe)
+static int dvb_dummy_fe_init(struct dvb_frontend *fe)
 {
 	return 0;
 }
@@ -99,17 +100,18 @@ static int dvb_dummy_fe_set_voltage(struct dvb_frontend *fe,
 	return 0;
 }
 
-static void dvb_dummy_fe_release(struct dvb_frontend* fe)
+static void dvb_dummy_fe_release(struct dvb_frontend *fe)
 {
-	struct dvb_dummy_fe_state* state = fe->demodulator_priv;
+	struct dvb_dummy_fe_state *state = fe->demodulator_priv;
+
 	kfree(state);
 }
 
 static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops;
 
-struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
+struct dvb_frontend *dvb_dummy_fe_ofdm_attach(void)
 {
-	struct dvb_dummy_fe_state* state = NULL;
+	struct dvb_dummy_fe_state *state = NULL;
 
 	/* allocate memory for the internal state */
 	state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
@@ -117,16 +119,20 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
 		return NULL;
 
 	/* create dvb_frontend */
-	memcpy(&state->frontend.ops, &dvb_dummy_fe_ofdm_ops, sizeof(struct dvb_frontend_ops));
+	memcpy(&state->frontend.ops,
+	       &dvb_dummy_fe_ofdm_ops,
+	       sizeof(struct dvb_frontend_ops));
+
 	state->frontend.demodulator_priv = state;
 	return &state->frontend;
 }
+EXPORT_SYMBOL(dvb_dummy_fe_ofdm_attach);
 
 static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
 
 struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
 {
-	struct dvb_dummy_fe_state* state = NULL;
+	struct dvb_dummy_fe_state *state = NULL;
 
 	/* allocate memory for the internal state */
 	state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
@@ -134,16 +140,20 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
 		return NULL;
 
 	/* create dvb_frontend */
-	memcpy(&state->frontend.ops, &dvb_dummy_fe_qpsk_ops, sizeof(struct dvb_frontend_ops));
+	memcpy(&state->frontend.ops,
+	       &dvb_dummy_fe_qpsk_ops,
+	       sizeof(struct dvb_frontend_ops));
+
 	state->frontend.demodulator_priv = state;
 	return &state->frontend;
 }
+EXPORT_SYMBOL(dvb_dummy_fe_qpsk_attach);
 
 static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
 
 struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
 {
-	struct dvb_dummy_fe_state* state = NULL;
+	struct dvb_dummy_fe_state *state = NULL;
 
 	/* allocate memory for the internal state */
 	state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
@@ -151,10 +161,14 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
 		return NULL;
 
 	/* create dvb_frontend */
-	memcpy(&state->frontend.ops, &dvb_dummy_fe_qam_ops, sizeof(struct dvb_frontend_ops));
+	memcpy(&state->frontend.ops,
+	       &dvb_dummy_fe_qam_ops,
+	       sizeof(struct dvb_frontend_ops));
+
 	state->frontend.demodulator_priv = state;
 	return &state->frontend;
 }
+EXPORT_SYMBOL(dvb_dummy_fe_qam_attach);
 
 static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
 	.delsys = { SYS_DVBT },
@@ -163,13 +177,21 @@ static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
 		.frequency_min_hz	= 0,
 		.frequency_max_hz	= 863250 * kHz,
 		.frequency_stepsize_hz	= 62500,
-		.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
-				FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
-				FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
-				FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
-				FE_CAN_TRANSMISSION_MODE_AUTO |
-				FE_CAN_GUARD_INTERVAL_AUTO |
-				FE_CAN_HIERARCHY_AUTO,
+		.caps = FE_CAN_FEC_1_2 |
+			FE_CAN_FEC_2_3 |
+			FE_CAN_FEC_3_4 |
+			FE_CAN_FEC_4_5 |
+			FE_CAN_FEC_5_6 |
+			FE_CAN_FEC_6_7 |
+			FE_CAN_FEC_7_8 |
+			FE_CAN_FEC_8_9 |
+			FE_CAN_FEC_AUTO |
+			FE_CAN_QAM_16 |
+			FE_CAN_QAM_64 |
+			FE_CAN_QAM_AUTO |
+			FE_CAN_TRANSMISSION_MODE_AUTO |
+			FE_CAN_GUARD_INTERVAL_AUTO |
+			FE_CAN_HIERARCHY_AUTO,
 	},
 
 	.release = dvb_dummy_fe_release,
@@ -194,11 +216,16 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
 		.frequency_min_hz	=  51 * MHz,
 		.frequency_max_hz	= 858 * MHz,
 		.frequency_stepsize_hz	= 62500,
-		.symbol_rate_min	= (57840000 / 2) / 64,  /* SACLK/64 == (XIN/2)/64 */
+		/* symbol_rate_min: SACLK/64 == (XIN/2)/64 */
+		.symbol_rate_min	= (57840000 / 2) / 64,
 		.symbol_rate_max	= (57840000 / 2) / 4,   /* SACLK/4 */
-		.caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
-			FE_CAN_QAM_128 | FE_CAN_QAM_256 |
-			FE_CAN_FEC_AUTO | FE_CAN_INVERSION_AUTO
+		.caps = FE_CAN_QAM_16 |
+			FE_CAN_QAM_32 |
+			FE_CAN_QAM_64 |
+			FE_CAN_QAM_128 |
+			FE_CAN_QAM_256 |
+			FE_CAN_FEC_AUTO |
+			FE_CAN_INVERSION_AUTO
 	},
 
 	.release = dvb_dummy_fe_release,
@@ -227,8 +254,12 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
 		.symbol_rate_min	= 1000000,
 		.symbol_rate_max	= 45000000,
 		.caps = FE_CAN_INVERSION_AUTO |
-			FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
-			FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+			FE_CAN_FEC_1_2 |
+			FE_CAN_FEC_2_3 |
+			FE_CAN_FEC_3_4 |
+			FE_CAN_FEC_5_6 |
+			FE_CAN_FEC_7_8 |
+			FE_CAN_FEC_AUTO |
 			FE_CAN_QPSK
 	},
 
@@ -253,7 +284,3 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
 MODULE_DESCRIPTION("DVB DUMMY Frontend");
 MODULE_AUTHOR("Emard");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(dvb_dummy_fe_ofdm_attach);
-EXPORT_SYMBOL(dvb_dummy_fe_qam_attach);
-EXPORT_SYMBOL(dvb_dummy_fe_qpsk_attach);
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.h b/drivers/media/dvb-frontends/dvb_dummy_fe.h
index 526fabd..463abf5 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.h
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.h
@@ -12,23 +12,23 @@
 #include <media/dvb_frontend.h>
 
 #if IS_REACHABLE(CONFIG_DVB_DUMMY_FE)
-extern struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void);
-extern struct dvb_frontend* dvb_dummy_fe_qpsk_attach(void);
-extern struct dvb_frontend* dvb_dummy_fe_qam_attach(void);
+struct dvb_frontend *dvb_dummy_fe_ofdm_attach(void);
+struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void);
+struct dvb_frontend *dvb_dummy_fe_qam_attach(void);
 #else
 static inline struct dvb_frontend *dvb_dummy_fe_ofdm_attach(void)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
 }
 static inline struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
 }
 static inline struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
 }
 #endif /* CONFIG_DVB_DUMMY_FE */
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 651c8aa..da3a8c5 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -922,8 +922,8 @@ struct dvb_frontend *lgdt330x_attach(const struct lgdt330x_config *_config,
 	strscpy(board_info.type, "lgdt330x", sizeof(board_info.type));
 	board_info.addr = demod_address;
 	board_info.platform_data = &config;
-	client = i2c_new_device(i2c, &board_info);
-	if (!client || !client->dev.driver)
+	client = i2c_new_client_device(i2c, &board_info);
+	if (!i2c_client_has_driver(client))
 		return NULL;
 
 	return lgdt330x_get_dvb_frontend(client);
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 3a367a58..c96f05f 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1277,8 +1277,8 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
 	strscpy(board_info.type, "m88ds3103", I2C_NAME_SIZE);
 	board_info.addr = cfg->i2c_addr;
 	board_info.platform_data = &pdata;
-	client = i2c_new_device(i2c, &board_info);
-	if (!client || !client->dev.driver)
+	client = i2c_new_client_device(i2c, &board_info);
+	if (!i2c_client_has_driver(client))
 		return NULL;
 
 	*tuner_i2c_adapter = pdata.get_i2c_adapter(client);
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 6c24d6d..234607b 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -519,8 +519,8 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
 	strscpy(board_info.type, "ts2020", I2C_NAME_SIZE);
 	board_info.addr = config->tuner_address;
 	board_info.platform_data = &pdata;
-	client = i2c_new_device(i2c, &board_info);
-	if (!client || !client->dev.driver)
+	client = i2c_new_client_device(i2c, &board_info);
+	if (!i2c_client_has_driver(client))
 		return NULL;
 
 	return fe;
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
index 5042f9e..fccb388 100644
--- a/drivers/media/i2c/adv748x/adv748x.h
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -394,10 +394,10 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
 
 #define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
 #define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
-#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
+#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~(m)) | (v))
 
 #define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
-#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
+#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, (r)+1)) & (m))
 #define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
 
 #define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
@@ -405,11 +405,11 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
 
 #define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
 #define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
-#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
+#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~(m)) | (v))
 
 #define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
 #define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
-#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
+#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~(m)) | (v))
 
 #define tx_read(t, r) adv748x_read(t->state, t->page, r)
 #define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 2dedd6e..09004d9 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1503,23 +1503,14 @@ static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
 
 static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
 {
-	unsigned int freq;
 	int a, b;
 
 	a = hdmi_read(sd, 0x06);
 	b = hdmi_read(sd, 0x3b);
 	if (a < 0 || b < 0)
 		return 0;
-	freq =  a * 1000000 + ((b & 0x30) >> 4) * 250000;
 
-	if (is_hdmi(sd)) {
-		/* adjust for deep color mode */
-		unsigned bits_per_channel = ((hdmi_read(sd, 0x0b) & 0x60) >> 4) + 8;
-
-		freq = freq * 8 / bits_per_channel;
-	}
-
-	return freq;
+	return a * 1000000 + ((b & 0x30) >> 4) * 250000;
 }
 
 static unsigned int adv7611_read_hdmi_pixelclock(struct v4l2_subdev *sd)
@@ -1530,9 +1521,28 @@ static unsigned int adv7611_read_hdmi_pixelclock(struct v4l2_subdev *sd)
 	b = hdmi_read(sd, 0x52);
 	if (a < 0 || b < 0)
 		return 0;
+
 	return ((a << 1) | (b >> 7)) * 1000000 + (b & 0x7f) * 1000000 / 128;
 }
 
+static unsigned int adv76xx_read_hdmi_pixelclock(struct v4l2_subdev *sd)
+{
+	struct adv76xx_state *state = to_state(sd);
+	const struct adv76xx_chip_info *info = state->info;
+	unsigned int freq, bits_per_channel, pixelrepetition;
+
+	freq = info->read_hdmi_pixelclock(sd);
+	if (is_hdmi(sd)) {
+		/* adjust for deep color mode and pixel repetition */
+		bits_per_channel = ((hdmi_read(sd, 0x0b) & 0x60) >> 4) + 8;
+		pixelrepetition = (hdmi_read(sd, 0x05) & 0x0f) + 1;
+
+		freq = freq * 8 / bits_per_channel / pixelrepetition;
+	}
+
+	return freq;
+}
+
 static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
 			struct v4l2_dv_timings *timings)
 {
@@ -1579,7 +1589,7 @@ static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
 
 		bt->width = w;
 		bt->height = h;
-		bt->pixelclock = info->read_hdmi_pixelclock(sd);
+		bt->pixelclock = adv76xx_read_hdmi_pixelclock(sd);
 		bt->hfrontporch = hdmi_read16(sd, 0x20, info->hfrontporch_mask);
 		bt->hsync = hdmi_read16(sd, 0x22, info->hsync_mask);
 		bt->hbackporch = hdmi_read16(sd, 0x24, info->hbackporch_mask);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 4b9b98c..5bd3ae82 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -428,10 +428,12 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
 				  struct v4l2_subdev_pad_config *cfg,
 				  struct v4l2_subdev_mbus_code_enum *code)
 {
+	struct mt9v032 *mt9v032 = to_mt9v032(subdev);
+
 	if (code->index > 0)
 		return -EINVAL;
 
-	code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+	code->code = mt9v032->format.code;
 	return 0;
 }
 
@@ -439,7 +441,11 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
 				   struct v4l2_subdev_pad_config *cfg,
 				   struct v4l2_subdev_frame_size_enum *fse)
 {
-	if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+	struct mt9v032 *mt9v032 = to_mt9v032(subdev);
+
+	if (fse->index >= 3)
+		return -EINVAL;
+	if (mt9v032->format.code != fse->code)
 		return -EINVAL;
 
 	fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index bb41bea..61ae6a0 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -103,7 +103,7 @@
 #define MT9V111_MAX_CLKIN				27000000
 
 /* The default sensor configuration at startup time. */
-static struct v4l2_mbus_framefmt mt9v111_def_fmt = {
+static const struct v4l2_mbus_framefmt mt9v111_def_fmt = {
 	.width		= 640,
 	.height		= 480,
 	.code		= MEDIA_BUS_FMT_UYVY8_2X8,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 5e495c8..854031f 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -189,6 +189,7 @@ struct ov5640_mode_info {
 	u32 vtot;
 	const struct reg_value *reg_data;
 	u32 reg_data_size;
+	u32 max_fps;
 };
 
 struct ov5640_ctrls {
@@ -544,6 +545,7 @@ static const struct ov5640_mode_info ov5640_mode_init_data = {
 	0, SUBSAMPLING, 640, 1896, 480, 984,
 	ov5640_init_setting_30fps_VGA,
 	ARRAY_SIZE(ov5640_init_setting_30fps_VGA),
+	OV5640_30_FPS,
 };
 
 static const struct ov5640_mode_info
@@ -551,39 +553,48 @@ ov5640_mode_data[OV5640_NUM_MODES] = {
 	{OV5640_MODE_QCIF_176_144, SUBSAMPLING,
 	 176, 1896, 144, 984,
 	 ov5640_setting_QCIF_176_144,
-	 ARRAY_SIZE(ov5640_setting_QCIF_176_144)},
+	 ARRAY_SIZE(ov5640_setting_QCIF_176_144),
+	 OV5640_30_FPS},
 	{OV5640_MODE_QVGA_320_240, SUBSAMPLING,
 	 320, 1896, 240, 984,
 	 ov5640_setting_QVGA_320_240,
-	 ARRAY_SIZE(ov5640_setting_QVGA_320_240)},
+	 ARRAY_SIZE(ov5640_setting_QVGA_320_240),
+	 OV5640_30_FPS},
 	{OV5640_MODE_VGA_640_480, SUBSAMPLING,
 	 640, 1896, 480, 1080,
 	 ov5640_setting_VGA_640_480,
-	 ARRAY_SIZE(ov5640_setting_VGA_640_480)},
+	 ARRAY_SIZE(ov5640_setting_VGA_640_480),
+	 OV5640_60_FPS},
 	{OV5640_MODE_NTSC_720_480, SUBSAMPLING,
 	 720, 1896, 480, 984,
 	 ov5640_setting_NTSC_720_480,
-	 ARRAY_SIZE(ov5640_setting_NTSC_720_480)},
+	 ARRAY_SIZE(ov5640_setting_NTSC_720_480),
+	OV5640_30_FPS},
 	{OV5640_MODE_PAL_720_576, SUBSAMPLING,
 	 720, 1896, 576, 984,
 	 ov5640_setting_PAL_720_576,
-	 ARRAY_SIZE(ov5640_setting_PAL_720_576)},
+	 ARRAY_SIZE(ov5640_setting_PAL_720_576),
+	 OV5640_30_FPS},
 	{OV5640_MODE_XGA_1024_768, SUBSAMPLING,
 	 1024, 1896, 768, 1080,
 	 ov5640_setting_XGA_1024_768,
-	 ARRAY_SIZE(ov5640_setting_XGA_1024_768)},
+	 ARRAY_SIZE(ov5640_setting_XGA_1024_768),
+	 OV5640_30_FPS},
 	{OV5640_MODE_720P_1280_720, SUBSAMPLING,
 	 1280, 1892, 720, 740,
 	 ov5640_setting_720P_1280_720,
-	 ARRAY_SIZE(ov5640_setting_720P_1280_720)},
+	 ARRAY_SIZE(ov5640_setting_720P_1280_720),
+	 OV5640_30_FPS},
 	{OV5640_MODE_1080P_1920_1080, SCALING,
 	 1920, 2500, 1080, 1120,
 	 ov5640_setting_1080P_1920_1080,
-	 ARRAY_SIZE(ov5640_setting_1080P_1920_1080)},
+	 ARRAY_SIZE(ov5640_setting_1080P_1920_1080),
+	 OV5640_30_FPS},
 	{OV5640_MODE_QSXGA_2592_1944, SCALING,
 	 2592, 2844, 1944, 1968,
 	 ov5640_setting_QSXGA_2592_1944,
-	 ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944)},
+	 ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944),
+	 OV5640_15_FPS},
 };
 
 static int ov5640_init_slave_id(struct ov5640_dev *sensor)
@@ -874,7 +885,7 @@ static unsigned long ov5640_calc_sys_clk(struct ov5640_dev *sensor,
 			 * We have reached the maximum allowed PLL1 output,
 			 * increase sysdiv.
 			 */
-			if (!rate)
+			if (!_rate)
 				break;
 
 			/*
@@ -1606,14 +1617,8 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
 	    (!nearest && (mode->hact != width || mode->vact != height)))
 		return NULL;
 
-	/* Only 640x480 can operate at 60fps (for now) */
-	if (fr == OV5640_60_FPS &&
-	    !(mode->hact == 640 && mode->vact == 480))
-		return NULL;
-
-	/* 2592x1944 only works at 15fps max */
-	if ((mode->hact == 2592 && mode->vact == 1944) &&
-	    fr > OV5640_15_FPS)
+	/* Check to see if the current mode exceeds the max frame rate */
+	if (ov5640_framerates[fr] > ov5640_framerates[mode->max_fps])
 		return NULL;
 
 	return mode;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 84f9771..a80d770 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -413,21 +413,14 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
 	struct smiapp_sensor *sensor =
 		container_of(ctrl->handler, struct smiapp_subdev, ctrl_handler)
 			->sensor;
+	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+	int pm_status;
 	u32 orient = 0;
+	unsigned int i;
 	int exposure;
 	int rval;
 
 	switch (ctrl->id) {
-	case V4L2_CID_ANALOGUE_GAIN:
-		return smiapp_write(
-			sensor,
-			SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL, ctrl->val);
-
-	case V4L2_CID_EXPOSURE:
-		return smiapp_write(
-			sensor,
-			SMIAPP_REG_U16_COARSE_INTEGRATION_TIME, ctrl->val);
-
 	case V4L2_CID_HFLIP:
 	case V4L2_CID_VFLIP:
 		if (sensor->streaming)
@@ -440,15 +433,10 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
 			orient |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
 
 		orient ^= sensor->hvflip_inv_mask;
-		rval = smiapp_write(sensor, SMIAPP_REG_U8_IMAGE_ORIENTATION,
-				    orient);
-		if (rval < 0)
-			return rval;
 
 		smiapp_update_mbus_formats(sensor);
 
-		return 0;
-
+		break;
 	case V4L2_CID_VBLANK:
 		exposure = sensor->exposure->val;
 
@@ -461,59 +449,105 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
 				return rval;
 		}
 
-		return smiapp_write(
-			sensor, SMIAPP_REG_U16_FRAME_LENGTH_LINES,
-			sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
-			+ ctrl->val);
-
-	case V4L2_CID_HBLANK:
-		return smiapp_write(
-			sensor, SMIAPP_REG_U16_LINE_LENGTH_PCK,
-			sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
-			+ ctrl->val);
-
+		break;
 	case V4L2_CID_LINK_FREQ:
 		if (sensor->streaming)
 			return -EBUSY;
 
-		return smiapp_pll_update(sensor);
+		rval = smiapp_pll_update(sensor);
+		if (rval)
+			return rval;
 
-	case V4L2_CID_TEST_PATTERN: {
-		unsigned int i;
-
+		return 0;
+	case V4L2_CID_TEST_PATTERN:
 		for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++)
 			v4l2_ctrl_activate(
 				sensor->test_data[i],
 				ctrl->val ==
 				V4L2_SMIAPP_TEST_PATTERN_MODE_SOLID_COLOUR);
 
-		return smiapp_write(
-			sensor, SMIAPP_REG_U16_TEST_PATTERN_MODE, ctrl->val);
+		break;
 	}
 
-	case V4L2_CID_TEST_PATTERN_RED:
-		return smiapp_write(
-			sensor, SMIAPP_REG_U16_TEST_DATA_RED, ctrl->val);
-
-	case V4L2_CID_TEST_PATTERN_GREENR:
-		return smiapp_write(
-			sensor, SMIAPP_REG_U16_TEST_DATA_GREENR, ctrl->val);
-
-	case V4L2_CID_TEST_PATTERN_BLUE:
-		return smiapp_write(
-			sensor, SMIAPP_REG_U16_TEST_DATA_BLUE, ctrl->val);
-
-	case V4L2_CID_TEST_PATTERN_GREENB:
-		return smiapp_write(
-			sensor, SMIAPP_REG_U16_TEST_DATA_GREENB, ctrl->val);
-
-	case V4L2_CID_PIXEL_RATE:
-		/* For v4l2_ctrl_s_ctrl_int64() used internally. */
+	pm_runtime_get_noresume(&client->dev);
+	pm_status = pm_runtime_get_if_in_use(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+	if (!pm_status)
 		return 0;
 
+	switch (ctrl->id) {
+	case V4L2_CID_ANALOGUE_GAIN:
+		rval = smiapp_write(
+			sensor,
+			SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL, ctrl->val);
+
+		break;
+	case V4L2_CID_EXPOSURE:
+		rval = smiapp_write(
+			sensor,
+			SMIAPP_REG_U16_COARSE_INTEGRATION_TIME, ctrl->val);
+
+		break;
+	case V4L2_CID_HFLIP:
+	case V4L2_CID_VFLIP:
+		rval = smiapp_write(sensor, SMIAPP_REG_U8_IMAGE_ORIENTATION,
+				    orient);
+
+		break;
+	case V4L2_CID_VBLANK:
+		rval = smiapp_write(
+			sensor, SMIAPP_REG_U16_FRAME_LENGTH_LINES,
+			sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+			+ ctrl->val);
+
+		break;
+	case V4L2_CID_HBLANK:
+		rval = smiapp_write(
+			sensor, SMIAPP_REG_U16_LINE_LENGTH_PCK,
+			sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
+			+ ctrl->val);
+
+		break;
+	case V4L2_CID_TEST_PATTERN:
+		rval = smiapp_write(
+			sensor, SMIAPP_REG_U16_TEST_PATTERN_MODE, ctrl->val);
+
+		break;
+	case V4L2_CID_TEST_PATTERN_RED:
+		rval = smiapp_write(
+			sensor, SMIAPP_REG_U16_TEST_DATA_RED, ctrl->val);
+
+		break;
+	case V4L2_CID_TEST_PATTERN_GREENR:
+		rval = smiapp_write(
+			sensor, SMIAPP_REG_U16_TEST_DATA_GREENR, ctrl->val);
+
+		break;
+	case V4L2_CID_TEST_PATTERN_BLUE:
+		rval = smiapp_write(
+			sensor, SMIAPP_REG_U16_TEST_DATA_BLUE, ctrl->val);
+
+		break;
+	case V4L2_CID_TEST_PATTERN_GREENB:
+		rval = smiapp_write(
+			sensor, SMIAPP_REG_U16_TEST_DATA_GREENB, ctrl->val);
+
+		break;
+	case V4L2_CID_PIXEL_RATE:
+		/* For v4l2_ctrl_s_ctrl_int64() used internally. */
+		rval = 0;
+
+		break;
 	default:
-		return -EINVAL;
+		rval = -EINVAL;
 	}
+
+	if (pm_status > 0) {
+		pm_runtime_mark_last_busy(&client->dev);
+		pm_runtime_put_autosuspend(&client->dev);
+	}
+
+	return rval;
 }
 
 static const struct v4l2_ctrl_ops smiapp_ctrl_ops = {
@@ -1184,10 +1218,6 @@ static int smiapp_power_on(struct device *dev)
 	sleep = SMIAPP_RESET_DELAY(sensor->hwcfg->ext_clk);
 	usleep_range(sleep, sleep);
 
-	mutex_lock(&sensor->mutex);
-
-	sensor->active = true;
-
 	/*
 	 * Failures to respond to the address change command have been noticed.
 	 * Those failures seem to be caused by the sensor requiring a longer
@@ -1270,24 +1300,9 @@ static int smiapp_power_on(struct device *dev)
 		goto out_cci_addr_fail;
 	}
 
-	/* Are we still initialising...? If not, proceed with control setup. */
-	if (sensor->pixel_array) {
-		rval = __v4l2_ctrl_handler_setup(
-			&sensor->pixel_array->ctrl_handler);
-		if (rval)
-			goto out_cci_addr_fail;
-
-		rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
-		if (rval)
-			goto out_cci_addr_fail;
-	}
-
-	mutex_unlock(&sensor->mutex);
-
 	return 0;
 
 out_cci_addr_fail:
-	mutex_unlock(&sensor->mutex);
 	gpiod_set_value(sensor->xshutdown, 0);
 	clk_disable_unprepare(sensor->ext_clk);
 
@@ -1305,8 +1320,6 @@ static int smiapp_power_off(struct device *dev)
 	struct smiapp_sensor *sensor =
 		container_of(ssd, struct smiapp_sensor, ssds[0]);
 
-	mutex_lock(&sensor->mutex);
-
 	/*
 	 * Currently power/clock to lens are enable/disabled separately
 	 * but they are essentially the same signals. So if the sensor is
@@ -1319,10 +1332,6 @@ static int smiapp_power_off(struct device *dev)
 			     SMIAPP_REG_U8_SOFTWARE_RESET,
 			     SMIAPP_SOFTWARE_RESET);
 
-	sensor->active = false;
-
-	mutex_unlock(&sensor->mutex);
-
 	gpiod_set_value(sensor->xshutdown, 0);
 	clk_disable_unprepare(sensor->ext_clk);
 	usleep_range(5000, 5000);
@@ -1507,6 +1516,30 @@ static int smiapp_stop_streaming(struct smiapp_sensor *sensor)
  * V4L2 subdev video operations
  */
 
+static int smiapp_pm_get_init(struct smiapp_sensor *sensor)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+	int rval;
+
+	rval = pm_runtime_get_sync(&client->dev);
+	if (rval < 0) {
+		if (rval != -EBUSY && rval != -EAGAIN)
+			pm_runtime_set_active(&client->dev);
+		pm_runtime_put_noidle(&client->dev);
+
+		return rval;
+	} else if (!rval) {
+		rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->
+					       ctrl_handler);
+		if (rval)
+			return rval;
+
+		return v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+	}
+
+	return 0;
+}
+
 static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
 {
 	struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -1516,22 +1549,23 @@ static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
 	if (sensor->streaming == enable)
 		return 0;
 
-	if (enable) {
-		rval = pm_runtime_get_sync(&client->dev);
-		if (rval < 0) {
-			if (rval != -EBUSY && rval != -EAGAIN)
-				pm_runtime_set_active(&client->dev);
-			pm_runtime_put(&client->dev);
-			return rval;
-		}
+	if (!enable) {
+		smiapp_stop_streaming(sensor);
+		sensor->streaming = false;
+		pm_runtime_mark_last_busy(&client->dev);
+		pm_runtime_put_autosuspend(&client->dev);
 
-		sensor->streaming = true;
+		return 0;
+	}
 
-		rval = smiapp_start_streaming(sensor);
-		if (rval < 0)
-			sensor->streaming = false;
-	} else {
-		rval = smiapp_stop_streaming(sensor);
+	rval = smiapp_pm_get_init(sensor);
+	if (rval)
+		return rval;
+
+	sensor->streaming = true;
+
+	rval = smiapp_start_streaming(sensor);
+	if (rval < 0) {
 		sensor->streaming = false;
 		pm_runtime_mark_last_busy(&client->dev);
 		pm_runtime_put_autosuspend(&client->dev);
@@ -2291,13 +2325,9 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
 	if (!sensor->dev_init_done)
 		return -EBUSY;
 
-	rval = pm_runtime_get_sync(&client->dev);
-	if (rval < 0) {
-		if (rval != -EBUSY && rval != -EAGAIN)
-			pm_runtime_set_active(&client->dev);
-		pm_runtime_put_noidle(&client->dev);
+	rval = smiapp_pm_get_init(sensor);
+	if (rval < 0)
 		return -ENODEV;
-	}
 
 	rval = smiapp_read_nvm(sensor, buf, PAGE_SIZE);
 	if (rval < 0) {
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 0470e47..ce8c1d4 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -223,9 +223,6 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
 	     len != SMIAPP_REG_32BIT) || flags)
 		return -EINVAL;
 
-	if (!sensor->active)
-		return 0;
-
 	msg.addr = client->addr;
 	msg.flags = 0; /* Write */
 	msg.len = 2 + len;
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 3ab874a..4837d80 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -198,7 +198,6 @@ struct smiapp_sensor {
 
 	u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
 	u8 frame_skip;
-	bool active; /* is the sensor powered on? */
 	u16 embedded_start; /* embedded data start line */
 	u16 embedded_end;
 	u16 image_start; /* image data start line */
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 492bc85..41226f1 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -386,7 +386,7 @@ void init_bttv_i2c_ir(struct bttv *btv)
 
 	if (btv->init_data.name) {
 		info.platform_data = &btv->init_data;
-		i2c_dev = i2c_new_device(&btv->c.i2c_adap, &info);
+		i2c_dev = i2c_new_client_device(&btv->c.i2c_adap, &info);
 	} else {
 		/*
 		 * The external IR receiver is at i2c address 0x34 (0x35 for
@@ -396,9 +396,9 @@ void init_bttv_i2c_ir(struct bttv *btv)
 		 * internal.
 		 * That's why we probe 0x1a (~0x34) first. CB
 		 */
-		i2c_dev = i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL);
+		i2c_dev = i2c_new_scanned_device(&btv->c.i2c_adap, &info, addr_list, NULL);
 	}
-	if (NULL == i2c_dev)
+	if (IS_ERR(i2c_dev))
 		return;
 
 #if defined(CONFIG_MODULES) && defined(MODULE)
diff --git a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
index 38d0093..9e7504e 100644
--- a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
+++ b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
@@ -9,7 +9,6 @@
 
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/vmalloc.h>
 #include <linux/delay.h>
 
 #include <media/v4l2-device.h>
@@ -238,54 +237,6 @@ static int snd_cobalt_pcm_capture_close(struct snd_pcm_substream *substream)
 	return 0;
 }
 
-static int snd_cobalt_pcm_ioctl(struct snd_pcm_substream *substream,
-		     unsigned int cmd, void *arg)
-{
-	return snd_pcm_lib_ioctl(substream, cmd, arg);
-}
-
-
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
-					size_t size)
-{
-	struct snd_pcm_runtime *runtime = subs->runtime;
-
-	dprintk("Allocating vbuffer\n");
-	if (runtime->dma_area) {
-		if (runtime->dma_bytes > size)
-			return 0;
-
-		vfree(runtime->dma_area);
-	}
-	runtime->dma_area = vmalloc(size);
-	if (!runtime->dma_area)
-		return -ENOMEM;
-
-	runtime->dma_bytes = size;
-
-	return 0;
-}
-
-static int snd_cobalt_pcm_hw_params(struct snd_pcm_substream *substream,
-			 struct snd_pcm_hw_params *params)
-{
-	dprintk("%s called\n", __func__);
-
-	return snd_pcm_alloc_vmalloc_buffer(substream,
-					   params_buffer_bytes(params));
-}
-
-static int snd_cobalt_pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	if (substream->runtime->dma_area) {
-		dprintk("freeing pcm capture region\n");
-		vfree(substream->runtime->dma_area);
-		substream->runtime->dma_area = NULL;
-	}
-
-	return 0;
-}
-
 static int snd_cobalt_pcm_prepare(struct snd_pcm_substream *substream)
 {
 	struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream);
@@ -490,36 +441,20 @@ snd_pcm_uframes_t snd_cobalt_pcm_pb_pointer(struct snd_pcm_substream *substream)
 	       substream->runtime->buffer_size;
 }
 
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
-					     unsigned long offset)
-{
-	void *pageptr = subs->runtime->dma_area + offset;
-
-	return vmalloc_to_page(pageptr);
-}
-
 static const struct snd_pcm_ops snd_cobalt_pcm_capture_ops = {
 	.open		= snd_cobalt_pcm_capture_open,
 	.close		= snd_cobalt_pcm_capture_close,
-	.ioctl		= snd_cobalt_pcm_ioctl,
-	.hw_params	= snd_cobalt_pcm_hw_params,
-	.hw_free	= snd_cobalt_pcm_hw_free,
 	.prepare	= snd_cobalt_pcm_prepare,
 	.trigger	= snd_cobalt_pcm_trigger,
 	.pointer	= snd_cobalt_pcm_pointer,
-	.page		= snd_pcm_get_vmalloc_page,
 };
 
 static const struct snd_pcm_ops snd_cobalt_pcm_playback_ops = {
 	.open		= snd_cobalt_pcm_playback_open,
 	.close		= snd_cobalt_pcm_playback_close,
-	.ioctl		= snd_cobalt_pcm_ioctl,
-	.hw_params	= snd_cobalt_pcm_hw_params,
-	.hw_free	= snd_cobalt_pcm_hw_free,
 	.prepare	= snd_cobalt_pcm_pb_prepare,
 	.trigger	= snd_cobalt_pcm_pb_trigger,
 	.pointer	= snd_cobalt_pcm_pb_pointer,
-	.page		= snd_pcm_get_vmalloc_page,
 };
 
 int snd_cobalt_pcm_create(struct snd_cobalt_card *cobsc)
@@ -555,6 +490,8 @@ int snd_cobalt_pcm_create(struct snd_cobalt_card *cobsc)
 
 		snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_CAPTURE,
 				&snd_cobalt_pcm_capture_ops);
+		snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC,
+					       NULL, 0, 0);
 		sp->info_flags = 0;
 		sp->private_data = cobsc;
 		strscpy(sp->name, "cobalt", sizeof(sp->name));
@@ -579,6 +516,8 @@ int snd_cobalt_pcm_create(struct snd_cobalt_card *cobsc)
 
 		snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_PLAYBACK,
 				&snd_cobalt_pcm_playback_ops);
+		snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC,
+					       NULL, 0, 0);
 		sp->info_flags = 0;
 		sp->private_data = cobsc;
 		strscpy(sp->name, "cobalt", sizeof(sp->name));
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index 13f858c..bed28b4 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -11,7 +11,6 @@
 
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/vmalloc.h>
 
 #include <media/v4l2-device.h>
 
@@ -201,67 +200,6 @@ static int snd_cx18_pcm_capture_close(struct snd_pcm_substream *substream)
 	return 0;
 }
 
-static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream,
-		     unsigned int cmd, void *arg)
-{
-	struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream);
-	int ret;
-
-	snd_cx18_lock(cxsc);
-	ret = snd_pcm_lib_ioctl(substream, cmd, arg);
-	snd_cx18_unlock(cxsc);
-	return ret;
-}
-
-
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
-					size_t size)
-{
-	struct snd_pcm_runtime *runtime = subs->runtime;
-
-	dprintk("Allocating vbuffer\n");
-	if (runtime->dma_area) {
-		if (runtime->dma_bytes > size)
-			return 0;
-
-		vfree(runtime->dma_area);
-	}
-	runtime->dma_area = vmalloc(size);
-	if (!runtime->dma_area)
-		return -ENOMEM;
-
-	runtime->dma_bytes = size;
-
-	return 0;
-}
-
-static int snd_cx18_pcm_hw_params(struct snd_pcm_substream *substream,
-			 struct snd_pcm_hw_params *params)
-{
-	dprintk("%s called\n", __func__);
-
-	return snd_pcm_alloc_vmalloc_buffer(substream,
-					   params_buffer_bytes(params));
-}
-
-static int snd_cx18_pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream);
-	unsigned long flags;
-	unsigned char *dma_area = NULL;
-
-	spin_lock_irqsave(&cxsc->slock, flags);
-	if (substream->runtime->dma_area) {
-		dprintk("freeing pcm capture region\n");
-		dma_area = substream->runtime->dma_area;
-		substream->runtime->dma_area = NULL;
-	}
-	spin_unlock_irqrestore(&cxsc->slock, flags);
-	vfree(dma_area);
-
-	return 0;
-}
-
 static int snd_cx18_pcm_prepare(struct snd_pcm_substream *substream)
 {
 	struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream);
@@ -291,24 +229,12 @@ snd_pcm_uframes_t snd_cx18_pcm_pointer(struct snd_pcm_substream *substream)
 	return hwptr_done;
 }
 
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
-					     unsigned long offset)
-{
-	void *pageptr = subs->runtime->dma_area + offset;
-
-	return vmalloc_to_page(pageptr);
-}
-
 static const struct snd_pcm_ops snd_cx18_pcm_capture_ops = {
 	.open		= snd_cx18_pcm_capture_open,
 	.close		= snd_cx18_pcm_capture_close,
-	.ioctl		= snd_cx18_pcm_ioctl,
-	.hw_params	= snd_cx18_pcm_hw_params,
-	.hw_free	= snd_cx18_pcm_hw_free,
 	.prepare	= snd_cx18_pcm_prepare,
 	.trigger	= snd_cx18_pcm_trigger,
 	.pointer	= snd_cx18_pcm_pointer,
-	.page		= snd_pcm_get_vmalloc_page,
 };
 
 int snd_cx18_pcm_create(struct snd_cx18_card *cxsc)
@@ -334,6 +260,7 @@ int snd_cx18_pcm_create(struct snd_cx18_card *cxsc)
 
 	snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_CAPTURE,
 			&snd_cx18_pcm_capture_ops);
+	snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
 	sp->info_flags = 0;
 	sp->private_data = cxsc;
 	strscpy(sp->name, cx->card_name, sizeof(sp->name));
diff --git a/drivers/media/pci/cx18/cx18-cards.c b/drivers/media/pci/cx18/cx18-cards.c
index cf11876..ecbe76f 100644
--- a/drivers/media/pci/cx18/cx18-cards.c
+++ b/drivers/media/pci/cx18/cx18-cards.c
@@ -245,7 +245,7 @@ static const struct cx18_card cx18_card_mpc718 = {
 	.type = CX18_CARD_YUAN_MPC718,
 	.name = "Yuan MPC718 MiniPCI DVB-T/Analog",
 	.comment = "Experimenters needed for device to work well.\n"
-		  "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
+		  "\tTo help, mail the linux-media list (www.linuxtv.org).\n",
 	.v4l2_capabilities = CX18_CAP_ENCODER,
 	.hw_audio_ctrl = CX18_HW_418_AV,
 	.hw_muxer = CX18_HW_GPIO_MUX,
@@ -305,7 +305,7 @@ static const struct cx18_card cx18_card_gotview_dvd3 = {
 	.type = CX18_CARD_GOTVIEW_PCI_DVD3,
 	.name = "GoTView PCI DVD3 Hybrid",
 	.comment = "Experimenters needed for device to work well.\n"
-		  "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
+		  "\tTo help, mail the linux-media list (www.linuxtv.org).\n",
 	.v4l2_capabilities = CX18_CAP_ENCODER,
 	.hw_audio_ctrl = CX18_HW_418_AV,
 	.hw_muxer = CX18_HW_GPIO_MUX,
@@ -419,7 +419,7 @@ static const struct cx18_card cx18_card_toshiba_qosmio_dvbt = {
 	.type = CX18_CARD_TOSHIBA_QOSMIO_DVBT,
 	.name = "Toshiba Qosmio DVB-T/Analog",
 	.comment = "Experimenters and photos needed for device to work well.\n"
-		  "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
+		  "\tTo help, mail the linux-media list (www.linuxtv.org).\n",
 	.v4l2_capabilities = CX18_CAP_ENCODER,
 	.hw_audio_ctrl = CX18_HW_418_AV,
 	.hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL,
@@ -462,7 +462,7 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
 	.type = CX18_CARD_LEADTEK_PVR2100,
 	.name = "Leadtek WinFast PVR2100",
 	.comment = "Experimenters and photos needed for device to work well.\n"
-		  "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
+		  "\tTo help, mail the linux-media list (www.linuxtv.org).\n",
 	.v4l2_capabilities = CX18_CAP_ENCODER,
 	.hw_audio_ctrl = CX18_HW_418_AV,
 	.hw_muxer = CX18_HW_GPIO_MUX,
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 2f1eeeb..95aed00 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -676,7 +676,7 @@ static void cx18_process_options(struct cx18 *cx)
 			 cx->pci_dev->subsystem_device);
 		CX18_ERR("Defaulting to %s card\n", cx->card->name);
 		CX18_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n");
-		CX18_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n");
+		CX18_ERR("card you have to the linux-media mailinglist (www.linuxtv.org)\n");
 		CX18_ERR("Prefix your subject line with [UNKNOWN CX18 CARD].\n");
 	}
 	cx->v4l2_cap = cx->card->v4l2_capabilities;
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 1ef7ccf..a834352 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -88,7 +88,7 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
 		break;
 	}
 
-	return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
+	return IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL)) ?
 	       -1 : 0;
 }
 
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index a8e980c..df44ed7 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -495,7 +495,6 @@ static struct page *snd_cx23885_page(struct snd_pcm_substream *substream,
 static const struct snd_pcm_ops snd_cx23885_pcm_ops = {
 	.open = snd_cx23885_pcm_open,
 	.close = snd_cx23885_close,
-	.ioctl = snd_pcm_lib_ioctl,
 	.hw_params = snd_cx23885_hw_params,
 	.hw_free = snd_cx23885_hw_free,
 	.prepare = snd_cx23885_prepare,
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 8644205..8e5a2c5 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -801,6 +801,25 @@ struct cx23885_board cx23885_boards[] = {
 		.name		= "Hauppauge WinTV-Starburst2",
 		.portb		= CX23885_MPEG_DVB,
 	},
+	[CX23885_BOARD_AVERMEDIA_CE310B] = {
+		.name		= "AVerMedia CE310B",
+		.porta		= CX23885_ANALOG_VIDEO,
+		.force_bff	= 1,
+		.input          = {{
+			.type   = CX23885_VMUX_COMPOSITE1,
+			.vmux   = CX25840_VIN1_CH1 |
+				  CX25840_NONE_CH2 |
+				  CX25840_NONE0_CH3,
+			.amux   = CX25840_AUDIO7,
+		}, {
+			.type   = CX23885_VMUX_SVIDEO,
+			.vmux   = CX25840_VIN8_CH1 |
+				  CX25840_NONE_CH2 |
+				  CX25840_VIN7_CH3 |
+				  CX25840_SVIDEO_ON,
+			.amux   = CX25840_AUDIO7,
+		} },
+	},
 };
 const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
 
@@ -1124,6 +1143,10 @@ struct cx23885_subid cx23885_subids[] = {
 		.subvendor = 0x0070,
 		.subdevice = 0xf02a,
 		.card      = CX23885_BOARD_HAUPPAUGE_STARBURST2,
+	}, {
+		.subvendor = 0x1461,
+		.subdevice = 0x3100,
+		.card      = CX23885_BOARD_AVERMEDIA_CE310B,
 	},
 };
 const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -2348,6 +2371,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
 	case CX23885_BOARD_DVBSKY_T982:
 	case CX23885_BOARD_VIEWCAST_260E:
 	case CX23885_BOARD_VIEWCAST_460E:
+	case CX23885_BOARD_AVERMEDIA_CE310B:
 		dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
 				&dev->i2c_bus[2].i2c_adap,
 				"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 4f386db..494751a 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1159,8 +1159,8 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
 		info.addr = 0x40;
 		info.platform_data = &sp2_config;
 		request_module(info.type);
-		client_ci = i2c_new_device(&i2c_bus->i2c_adap, &info);
-		if (client_ci == NULL || client_ci->dev.driver == NULL)
+		client_ci = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+		if (!i2c_client_has_driver(client_ci))
 			return -ENODEV;
 		if (!try_module_get(client_ci->dev.driver->owner)) {
 			i2c_unregister_device(client_ci);
@@ -1826,8 +1826,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x05;
 			info.platform_data = &tda10071_pdata;
 			request_module("tda10071");
-			client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
-			if (!client_demod || !client_demod->dev.driver)
+			client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+			if (!i2c_client_has_driver(client_demod))
 				goto frontend_detach;
 			if (!try_module_get(client_demod->dev.driver->owner)) {
 				i2c_unregister_device(client_demod);
@@ -1843,8 +1843,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x0b;
 			info.platform_data = &a8293_pdata;
 			request_module("a8293");
-			client_sec = i2c_new_device(&i2c_bus->i2c_adap, &info);
-			if (!client_sec || !client_sec->dev.driver)
+			client_sec = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+			if (!i2c_client_has_driver(client_sec))
 				goto frontend_detach;
 			if (!try_module_get(client_sec->dev.driver->owner)) {
 				i2c_unregister_device(client_sec);
@@ -1864,9 +1864,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x64;
 			info.platform_data = &si2165_pdata;
 			request_module(info.type);
-			client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
-			if (client_demod == NULL ||
-					client_demod->dev.driver == NULL)
+			client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+			if (!i2c_client_has_driver(client_demod))
 				goto frontend_detach;
 			if (!try_module_get(client_demod->dev.driver->owner)) {
 				i2c_unregister_device(client_demod);
@@ -1898,8 +1897,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x05;
 		info.platform_data = &tda10071_pdata;
 		request_module("tda10071");
-		client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
-		if (!client_demod || !client_demod->dev.driver)
+		client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+		if (!i2c_client_has_driver(client_demod))
 			goto frontend_detach;
 		if (!try_module_get(client_demod->dev.driver->owner)) {
 			i2c_unregister_device(client_demod);
@@ -1915,8 +1914,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x0b;
 		info.platform_data = &a8293_pdata;
 		request_module("a8293");
-		client_sec = i2c_new_device(&i2c_bus->i2c_adap, &info);
-		if (!client_sec || !client_sec->dev.driver)
+		client_sec = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+		if (!i2c_client_has_driver(client_sec))
 			goto frontend_detach;
 		if (!try_module_get(client_sec->dev.driver->owner)) {
 			i2c_unregister_device(client_sec);
@@ -1948,9 +1947,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x60;
 			info.platform_data = &ts2020_config;
 			request_module(info.type);
-			client_tuner = i2c_new_device(adapter, &info);
-			if (client_tuner == NULL ||
-					client_tuner->dev.driver == NULL)
+			client_tuner = i2c_new_client_device(adapter, &info);
+			if (!i2c_client_has_driver(client_tuner))
 				goto frontend_detach;
 			if (!try_module_get(client_tuner->dev.driver->owner)) {
 				i2c_unregister_device(client_tuner);
@@ -1985,9 +1983,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x64;
 			info.platform_data = &si2168_config;
 			request_module(info.type);
-			client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
-			if (client_demod == NULL ||
-					client_demod->dev.driver == NULL)
+			client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+			if (!i2c_client_has_driver(client_demod))
 				goto frontend_detach;
 			if (!try_module_get(client_demod->dev.driver->owner)) {
 				i2c_unregister_device(client_demod);
@@ -2004,9 +2001,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x60;
 			info.platform_data = &si2157_config;
 			request_module(info.type);
-			client_tuner = i2c_new_device(adapter, &info);
-			if (client_tuner == NULL ||
-					client_tuner->dev.driver == NULL)
+			client_tuner = i2c_new_client_device(adapter, &info);
+			if (!i2c_client_has_driver(client_tuner))
 				goto frontend_detach;
 
 			if (!try_module_get(client_tuner->dev.driver->owner)) {
@@ -2032,8 +2028,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x64;
 		info.platform_data = &si2168_config;
 		request_module(info.type);
-		client_demod = i2c_new_device(&i2c_bus2->i2c_adap, &info);
-		if (client_demod == NULL || client_demod->dev.driver == NULL)
+		client_demod = i2c_new_client_device(&i2c_bus2->i2c_adap, &info);
+		if (!i2c_client_has_driver(client_demod))
 			goto frontend_detach;
 		if (!try_module_get(client_demod->dev.driver->owner)) {
 			i2c_unregister_device(client_demod);
@@ -2050,9 +2046,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x60;
 		info.platform_data = &si2157_config;
 		request_module(info.type);
-		client_tuner = i2c_new_device(adapter, &info);
-		if (client_tuner == NULL ||
-				client_tuner->dev.driver == NULL)
+		client_tuner = i2c_new_client_device(adapter, &info);
+		if (!i2c_client_has_driver(client_tuner))
 			goto frontend_detach;
 		if (!try_module_get(client_tuner->dev.driver->owner)) {
 			i2c_unregister_device(client_tuner);
@@ -2080,8 +2075,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x60;
 		info.platform_data = &ts2020_config;
 		request_module(info.type);
-		client_tuner = i2c_new_device(adapter, &info);
-		if (client_tuner == NULL || client_tuner->dev.driver == NULL)
+		client_tuner = i2c_new_client_device(adapter, &info);
+		if (!i2c_client_has_driver(client_tuner))
 			goto frontend_detach;
 		if (!try_module_get(client_tuner->dev.driver->owner)) {
 			i2c_unregister_device(client_tuner);
@@ -2129,8 +2124,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x68;
 		info.platform_data = &m88ds3103_pdata;
 		request_module(info.type);
-		client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
-		if (client_demod == NULL || client_demod->dev.driver == NULL)
+		client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+		if (!i2c_client_has_driver(client_demod))
 			goto frontend_detach;
 		if (!try_module_get(client_demod->dev.driver->owner)) {
 			i2c_unregister_device(client_demod);
@@ -2149,8 +2144,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x60;
 		info.platform_data = &ts2020_config;
 		request_module(info.type);
-		client_tuner = i2c_new_device(adapter, &info);
-		if (client_tuner == NULL || client_tuner->dev.driver == NULL)
+		client_tuner = i2c_new_client_device(adapter, &info);
+		if (!i2c_client_has_driver(client_tuner))
 			goto frontend_detach;
 		if (!try_module_get(client_tuner->dev.driver->owner)) {
 			i2c_unregister_device(client_tuner);
@@ -2194,8 +2189,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x64;
 		info.platform_data = &si2168_config;
 		request_module(info.type);
-		client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
-		if (client_demod == NULL || client_demod->dev.driver == NULL)
+		client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+		if (!i2c_client_has_driver(client_demod))
 			goto frontend_detach;
 		if (!try_module_get(client_demod->dev.driver->owner)) {
 			i2c_unregister_device(client_demod);
@@ -2212,9 +2207,8 @@ static int dvb_register(struct cx23885_tsport *port)
 		info.addr = 0x60;
 		info.platform_data = &si2157_config;
 		request_module(info.type);
-		client_tuner = i2c_new_device(adapter, &info);
-		if (client_tuner == NULL ||
-				client_tuner->dev.driver == NULL)
+		client_tuner = i2c_new_client_device(adapter, &info);
+		if (!i2c_client_has_driver(client_tuner))
 			goto frontend_detach;
 		if (!try_module_get(client_tuner->dev.driver->owner)) {
 			i2c_unregister_device(client_tuner);
@@ -2245,8 +2239,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x0b;
 			info.platform_data = &a8293_pdata;
 			request_module("a8293");
-			client_sec = i2c_new_device(&i2c_bus->i2c_adap, &info);
-			if (!client_sec || !client_sec->dev.driver)
+			client_sec = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+			if (!i2c_client_has_driver(client_sec))
 				goto frontend_detach;
 			if (!try_module_get(client_sec->dev.driver->owner)) {
 				i2c_unregister_device(client_sec);
@@ -2262,8 +2256,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x21;
 			info.platform_data = &m88rs6000t_config;
 			request_module("%s", info.type);
-			client_tuner = i2c_new_device(adapter, &info);
-			if (!client_tuner || !client_tuner->dev.driver)
+			client_tuner = i2c_new_client_device(adapter, &info);
+			if (!i2c_client_has_driver(client_tuner))
 				goto frontend_detach;
 			if (!try_module_get(client_tuner->dev.driver->owner)) {
 				i2c_unregister_device(client_tuner);
@@ -2287,8 +2281,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x64;
 			info.platform_data = &si2168_config;
 			request_module("%s", info.type);
-			client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
-			if (!client_demod || !client_demod->dev.driver)
+			client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+			if (!i2c_client_has_driver(client_demod))
 				goto frontend_detach;
 			if (!try_module_get(client_demod->dev.driver->owner)) {
 				i2c_unregister_device(client_demod);
@@ -2305,8 +2299,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x60;
 			info.platform_data = &si2157_config;
 			request_module("%s", info.type);
-			client_tuner = i2c_new_device(&i2c_bus2->i2c_adap, &info);
-			if (!client_tuner || !client_tuner->dev.driver) {
+			client_tuner = i2c_new_client_device(&i2c_bus2->i2c_adap, &info);
+			if (!i2c_client_has_driver(client_tuner)) {
 				module_put(client_demod->dev.driver->owner);
 				i2c_unregister_device(client_demod);
 				port->i2c_client_demod = NULL;
@@ -2340,8 +2334,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x64;
 			info.platform_data = &si2168_config;
 			request_module("%s", info.type);
-			client_demod = i2c_new_device(&dev->i2c_bus[0].i2c_adap, &info);
-			if (!client_demod || !client_demod->dev.driver)
+			client_demod = i2c_new_client_device(&dev->i2c_bus[0].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_demod))
 				goto frontend_detach;
 			if (!try_module_get(client_demod->dev.driver->owner)) {
 				i2c_unregister_device(client_demod);
@@ -2358,8 +2352,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x60;
 			info.platform_data = &si2157_config;
 			request_module("%s", info.type);
-			client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
-			if (!client_tuner || !client_tuner->dev.driver) {
+			client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_tuner)) {
 				module_put(client_demod->dev.driver->owner);
 				i2c_unregister_device(client_demod);
 				port->i2c_client_demod = NULL;
@@ -2387,8 +2381,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x66;
 			info.platform_data = &si2168_config;
 			request_module("%s", info.type);
-			client_demod = i2c_new_device(&dev->i2c_bus[0].i2c_adap, &info);
-			if (!client_demod || !client_demod->dev.driver)
+			client_demod = i2c_new_client_device(&dev->i2c_bus[0].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_demod))
 				goto frontend_detach;
 			if (!try_module_get(client_demod->dev.driver->owner)) {
 				i2c_unregister_device(client_demod);
@@ -2405,8 +2399,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x62;
 			info.platform_data = &si2157_config;
 			request_module("%s", info.type);
-			client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
-			if (!client_tuner || !client_tuner->dev.driver) {
+			client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_tuner)) {
 				module_put(client_demod->dev.driver->owner);
 				i2c_unregister_device(client_demod);
 				port->i2c_client_demod = NULL;
@@ -2447,8 +2441,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x60;
 			info.platform_data = &si2157_config;
 			request_module("%s", info.type);
-			client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
-			if (!client_tuner || !client_tuner->dev.driver) {
+			client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_tuner)) {
 				module_put(client_demod->dev.driver->owner);
 				i2c_unregister_device(client_demod);
 				port->i2c_client_demod = NULL;
@@ -2483,8 +2477,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x62;
 			info.platform_data = &si2157_config;
 			request_module("%s", info.type);
-			client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
-			if (!client_tuner || !client_tuner->dev.driver) {
+			client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_tuner)) {
 				module_put(client_demod->dev.driver->owner);
 				i2c_unregister_device(client_demod);
 				port->i2c_client_demod = NULL;
@@ -2523,8 +2517,8 @@ static int dvb_register(struct cx23885_tsport *port)
 			info.addr = 0x60;
 			info.platform_data = &si2157_config;
 			request_module("%s", info.type);
-			client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
-			if (!client_tuner || !client_tuner->dev.driver)
+			client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_tuner))
 				goto frontend_detach;
 
 			if (!try_module_get(client_tuner->dev.driver->owner)) {
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 4f327ee..f51fad3 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -337,8 +337,8 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
 		strscpy(info.type, "ir_video", I2C_NAME_SIZE);
 		/* Use quick read command for probe, some IR chips don't
 		 * support writes */
-		i2c_new_probed_device(&bus->i2c_adap, &info, addr_list,
-				      i2c_probe_func_quick_read);
+		i2c_new_scanned_device(&bus->i2c_adap, &info, addr_list,
+				       i2c_probe_func_quick_read);
 	}
 
 	return bus->i2c_rc;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 8098b15..7fc408e 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -257,7 +257,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
 		(dev->board == CX23885_BOARD_MYGICA_X8507) ||
 		(dev->board == CX23885_BOARD_AVERMEDIA_HC81R) ||
 		(dev->board == CX23885_BOARD_VIEWCAST_260E) ||
-		(dev->board == CX23885_BOARD_VIEWCAST_460E)) {
+		(dev->board == CX23885_BOARD_VIEWCAST_460E) ||
+		(dev->board == CX23885_BOARD_AVERMEDIA_CE310B)) {
 		/* Configure audio routing */
 		v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
 			INPUT(input)->amux, 0, 0);
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index a95a2e4..c472498e 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -101,6 +101,7 @@
 #define CX23885_BOARD_HAUPPAUGE_STARBURST2     59
 #define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885 60
 #define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885 61
+#define CX23885_BOARD_AVERMEDIA_CE310B         62
 
 #define GPIO_0 0x00000001
 #define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index c2f2d7c..3016164 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -639,7 +639,6 @@ static struct page *snd_cx25821_page(struct snd_pcm_substream *substream,
 static const struct snd_pcm_ops snd_cx25821_pcm_ops = {
 	.open = snd_cx25821_pcm_open,
 	.close = snd_cx25821_close,
-	.ioctl = snd_pcm_lib_ioctl,
 	.hw_params = snd_cx25821_hw_params,
 	.hw_free = snd_cx25821_hw_free,
 	.prepare = snd_cx25821_prepare,
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index e1e71ae..7d7acee 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -585,7 +585,6 @@ static struct page *snd_cx88_page(struct snd_pcm_substream *substream,
 static const struct snd_pcm_ops snd_cx88_pcm_ops = {
 	.open = snd_cx88_pcm_open,
 	.close = snd_cx88_close,
-	.ioctl = snd_pcm_lib_ioctl,
 	.hw_params = snd_cx88_hw_params,
 	.hw_free = snd_cx88_hw_free,
 	.prepare = snd_cx88_prepare,
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index 589f52d..c7c2acd 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -613,7 +613,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
 	}
 
 	/*
-	 * We can't call i2c_new_probed_device() because it uses
+	 * We can't call i2c_new_scanned_device() because it uses
 	 * quick writes for probing and at least some RC receiver
 	 * devices only reply to reads.
 	 * Also, Hauppauge XVR needs to be specified, as address 0x71
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 36c0891..c729e54 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -24,7 +24,7 @@
 	  PCI personal video recorder devices.
 
 	  This is used in devices such as the Hauppauge PVR-150/250/350/500
-	  cards. There is a driver homepage at <http://www.ivtvdriver.org>.
+	  cards.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ivtv.
@@ -67,8 +67,7 @@
 	  This is a framebuffer driver for the Conexant cx23415 MPEG
 	  encoder/decoder.
 
-	  This is used in the Hauppauge PVR-350 card. There is a driver
-	  homepage at <http://www.ivtvdriver.org>.
+	  This is used in the Hauppauge PVR-350 card.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ivtvfb.
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 9e6019a..8f346d7 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -16,8 +16,6 @@
 #include "ivtv-alsa.h"
 #include "ivtv-alsa-pcm.h"
 
-#include <linux/vmalloc.h>
-
 #include <sound/core.h>
 #include <sound/pcm.h>
 
@@ -206,67 +204,6 @@ static int snd_ivtv_pcm_capture_close(struct snd_pcm_substream *substream)
 	return 0;
 }
 
-static int snd_ivtv_pcm_ioctl(struct snd_pcm_substream *substream,
-		     unsigned int cmd, void *arg)
-{
-	struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
-	int ret;
-
-	snd_ivtv_lock(itvsc);
-	ret = snd_pcm_lib_ioctl(substream, cmd, arg);
-	snd_ivtv_unlock(itvsc);
-	return ret;
-}
-
-
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
-					size_t size)
-{
-	struct snd_pcm_runtime *runtime = subs->runtime;
-
-	dprintk("Allocating vbuffer\n");
-	if (runtime->dma_area) {
-		if (runtime->dma_bytes > size)
-			return 0;
-
-		vfree(runtime->dma_area);
-	}
-	runtime->dma_area = vmalloc(size);
-	if (!runtime->dma_area)
-		return -ENOMEM;
-
-	runtime->dma_bytes = size;
-
-	return 0;
-}
-
-static int snd_ivtv_pcm_hw_params(struct snd_pcm_substream *substream,
-			 struct snd_pcm_hw_params *params)
-{
-	dprintk("%s called\n", __func__);
-
-	return snd_pcm_alloc_vmalloc_buffer(substream,
-					   params_buffer_bytes(params));
-}
-
-static int snd_ivtv_pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
-	unsigned long flags;
-	unsigned char *dma_area = NULL;
-
-	spin_lock_irqsave(&itvsc->slock, flags);
-	if (substream->runtime->dma_area) {
-		dprintk("freeing pcm capture region\n");
-		dma_area = substream->runtime->dma_area;
-		substream->runtime->dma_area = NULL;
-	}
-	spin_unlock_irqrestore(&itvsc->slock, flags);
-	vfree(dma_area);
-
-	return 0;
-}
-
 static int snd_ivtv_pcm_prepare(struct snd_pcm_substream *substream)
 {
 	struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
@@ -296,24 +233,12 @@ snd_pcm_uframes_t snd_ivtv_pcm_pointer(struct snd_pcm_substream *substream)
 	return hwptr_done;
 }
 
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
-					     unsigned long offset)
-{
-	void *pageptr = subs->runtime->dma_area + offset;
-
-	return vmalloc_to_page(pageptr);
-}
-
 static const struct snd_pcm_ops snd_ivtv_pcm_capture_ops = {
 	.open		= snd_ivtv_pcm_capture_open,
 	.close		= snd_ivtv_pcm_capture_close,
-	.ioctl		= snd_ivtv_pcm_ioctl,
-	.hw_params	= snd_ivtv_pcm_hw_params,
-	.hw_free	= snd_ivtv_pcm_hw_free,
 	.prepare	= snd_ivtv_pcm_prepare,
 	.trigger	= snd_ivtv_pcm_trigger,
 	.pointer	= snd_ivtv_pcm_pointer,
-	.page		= snd_pcm_get_vmalloc_page,
 };
 
 int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc)
@@ -339,6 +264,7 @@ int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc)
 
 	snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_CAPTURE,
 			&snd_ivtv_pcm_capture_ops);
+	snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
 	sp->info_flags = 0;
 	sp->private_data = itvsc;
 	strscpy(sp->name, itv->card_name, sizeof(sp->name));
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 1f79700..b1dde1b 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -23,7 +23,6 @@
  * Driver for the Conexant CX23415/CX23416 chip.
  * Author: Kevin Thayer (nufan_wfk at yahoo.com)
  * License: GPL
- * http://www.ivtvdriver.org
  *
  * -----
  * MPG600/MPG160 support by  T.Adachi <tadachi@tadachi-net.com>
@@ -723,7 +722,7 @@ static void ivtv_process_options(struct ivtv *itv)
 		IVTV_ERR("              %s based\n", chipname);
 		IVTV_ERR("Defaulting to %s card\n", itv->card->name);
 		IVTV_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n");
-		IVTV_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n");
+		IVTV_ERR("card you have to the linux-media mailinglist (www.linuxtv.org)\n");
 		IVTV_ERR("Prefix your subject line with [UNKNOWN IVTV CARD].\n");
 	}
 	itv->v4l2_cap = itv->card->v4l2_capabilities;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index cafba6b..e5efe52 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -28,7 +28,6 @@
  * Driver for the cx23415/6 chip.
  * Author: Kevin Thayer (nufan_wfk at yahoo.com)
  * License: GPL
- * http://www.ivtvdriver.org
  *
  * -----
  * MPG600/MPG160 support by  T.Adachi <tadachi@tadachi-net.com>
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 0772d75..982045c 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -208,12 +208,12 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
 	info.platform_data = init_data;
 	strscpy(info.type, type, I2C_NAME_SIZE);
 
-	return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
+	return IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL)) ?
 	       -1 : 0;
 }
 
 /* Instantiate the IR receiver device using probing -- undesirable */
-struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv)
+void ivtv_i2c_new_ir_legacy(struct ivtv *itv)
 {
 	struct i2c_board_info info;
 	/*
@@ -235,7 +235,7 @@ struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv)
 
 	memset(&info, 0, sizeof(struct i2c_board_info));
 	strscpy(info.type, "ir_video", I2C_NAME_SIZE);
-	return i2c_new_probed_device(&itv->i2c_adap, &info, addr_list, NULL);
+	i2c_new_scanned_device(&itv->i2c_adap, &info, addr_list, NULL);
 }
 
 int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.h b/drivers/media/pci/ivtv/ivtv-i2c.h
index 462f734..2d9cdaa 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.h
+++ b/drivers/media/pci/ivtv/ivtv-i2c.h
@@ -9,7 +9,7 @@
 #ifndef IVTV_I2C_H
 #define IVTV_I2C_H
 
-struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv);
+void ivtv_i2c_new_ir_legacy(struct ivtv *itv);
 int ivtv_i2c_register(struct ivtv *itv, unsigned idx);
 struct v4l2_subdev *ivtv_find_hw(struct ivtv *itv, u32 hw);
 
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 1daf9e0..0c28598 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -925,7 +925,7 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops ivtvfb_ops = {
+static const struct fb_ops ivtvfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_write       = ivtvfb_write,
 	.fb_check_var   = ivtvfb_check_var,
@@ -1049,7 +1049,6 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
 
 	oi->ivtvfb_info.node = -1;
 	oi->ivtvfb_info.flags = FBINFO_FLAG_DEFAULT;
-	oi->ivtvfb_info.fbops = &ivtvfb_ops;
 	oi->ivtvfb_info.par = itv;
 	oi->ivtvfb_info.var = oi->ivtvfb_defined;
 	oi->ivtvfb_info.fix = oi->ivtvfb_fix;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 0e61c81..3a4c29b 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1266,7 +1266,7 @@ static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
 		buf->flags |= V4L2_BUF_FLAG_DONE;
 
 	buf->field = V4L2_FIELD_NONE;
-	buf->timestamp = ns_to_timeval(meye.grab_buffer[index].ts);
+	v4l2_buffer_set_timestamp(buf, meye.grab_buffer[index].ts);
 	buf->sequence = meye.grab_buffer[index].sequence;
 	buf->memory = V4L2_MEMORY_MMAP;
 	buf->m.offset = index * gbufsize;
@@ -1332,7 +1332,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
 	buf->bytesused = meye.grab_buffer[reqnr].size;
 	buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 	buf->field = V4L2_FIELD_NONE;
-	buf->timestamp = ns_to_timeval(meye.grab_buffer[reqnr].ts);
+	v4l2_buffer_set_timestamp(buf, meye.grab_buffer[reqnr].ts);
 	buf->sequence = meye.grab_buffer[reqnr].sequence;
 	buf->memory = V4L2_MEMORY_MMAP;
 	buf->m.offset = reqnr * gbufsize;
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 0385127..544ca57 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -865,7 +865,6 @@ static struct page *snd_card_saa7134_page(struct snd_pcm_substream *substream,
 static const struct snd_pcm_ops snd_card_saa7134_capture_ops = {
 	.open =			snd_card_saa7134_capture_open,
 	.close =		snd_card_saa7134_capture_close,
-	.ioctl =		snd_pcm_lib_ioctl,
 	.hw_params =		snd_card_saa7134_hw_params,
 	.hw_free =		snd_card_saa7134_hw_free,
 	.prepare =		snd_card_saa7134_capture_prepare,
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index 05ab473..bf8c2bb 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -116,8 +116,8 @@ static int si2157_attach(struct saa7164_port *port, struct i2c_adapter *adapter,
 
 	request_module(bi.type);
 
-	tuner = i2c_new_device(adapter, &bi);
-	if (tuner == NULL || tuner->dev.driver == NULL)
+	tuner = i2c_new_client_device(adapter, &bi);
+	if (!i2c_client_has_driver(tuner))
 		return -ENODEV;
 
 	if (!try_module_get(tuner->dev.driver->owner)) {
@@ -637,9 +637,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
 			info.addr = 0xc8 >> 1;
 			info.platform_data = &si2168_config;
 			request_module(info.type);
-			client_demod = i2c_new_device(&dev->i2c_bus[2].i2c_adap,
-						      &info);
-			if (!client_demod || !client_demod->dev.driver)
+			client_demod = i2c_new_client_device(&dev->i2c_bus[2].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_demod))
 				goto frontend_detach;
 
 			if (!try_module_get(client_demod->dev.driver->owner)) {
@@ -657,9 +656,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
 			info.addr = 0xc0 >> 1;
 			info.platform_data = &si2157_config;
 			request_module(info.type);
-			client_tuner = i2c_new_device(&dev->i2c_bus[0].i2c_adap,
-						      &info);
-			if (!client_tuner || !client_tuner->dev.driver) {
+			client_tuner = i2c_new_client_device(&dev->i2c_bus[0].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_tuner)) {
 				module_put(client_demod->dev.driver->owner);
 				i2c_unregister_device(client_demod);
 				goto frontend_detach;
@@ -682,9 +680,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
 			info.addr = 0xcc >> 1;
 			info.platform_data = &si2168_config;
 			request_module(info.type);
-			client_demod = i2c_new_device(&dev->i2c_bus[2].i2c_adap,
-						      &info);
-			if (!client_demod || !client_demod->dev.driver)
+			client_demod = i2c_new_client_device(&dev->i2c_bus[2].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_demod))
 				goto frontend_detach;
 
 			if (!try_module_get(client_demod->dev.driver->owner)) {
@@ -702,9 +699,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
 			info.addr = 0xc0 >> 1;
 			info.platform_data = &si2157_config;
 			request_module(info.type);
-			client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap,
-						      &info);
-			if (!client_tuner || !client_tuner->dev.driver) {
+			client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+			if (!i2c_client_has_driver(client_tuner)) {
 				module_put(client_demod->dev.driver->owner);
 				i2c_unregister_device(client_demod);
 				goto frontend_detach;
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
index 1fb7846..9ca0fc3 100644
--- a/drivers/media/pci/smipcie/smipcie-main.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -484,8 +484,8 @@ static struct i2c_client *smi_add_i2c_client(struct i2c_adapter *adapter,
 	struct i2c_client *client;
 
 	request_module(info->type);
-	client = i2c_new_device(adapter, info);
-	if (client == NULL || client->dev.driver == NULL)
+	client = i2c_new_client_device(adapter, info);
+	if (!i2c_client_has_driver(client))
 		goto err_add_i2c_client;
 
 	if (!try_module_get(client->dev.driver->owner)) {
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index eaa57d8..d6d16e8 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -97,17 +97,6 @@ void solo_g723_isr(struct solo_dev *solo_dev)
 	}
 }
 
-static int snd_solo_hw_params(struct snd_pcm_substream *ss,
-			      struct snd_pcm_hw_params *hw_params)
-{
-	return snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params));
-}
-
-static int snd_solo_hw_free(struct snd_pcm_substream *ss)
-{
-	return snd_pcm_lib_free_pages(ss);
-}
-
 static const struct snd_pcm_hardware snd_solo_pcm_hw = {
 	.info			= (SNDRV_PCM_INFO_MMAP |
 				   SNDRV_PCM_INFO_INTERLEAVED |
@@ -270,9 +259,6 @@ static int snd_solo_pcm_copy_kernel(struct snd_pcm_substream *ss, int channel,
 static const struct snd_pcm_ops snd_solo_pcm_ops = {
 	.open = snd_solo_pcm_open,
 	.close = snd_solo_pcm_close,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = snd_solo_hw_params,
-	.hw_free = snd_solo_hw_free,
 	.prepare = snd_solo_pcm_prepare,
 	.trigger = snd_solo_pcm_trigger,
 	.pointer = snd_solo_pcm_pointer,
@@ -351,11 +337,11 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
 	     ss; ss = ss->next, i++)
 		sprintf(ss->name, "Camera #%d Audio", i);
 
-	snd_pcm_lib_preallocate_pages_for_all(pcm,
-					SNDRV_DMA_TYPE_CONTINUOUS,
-					NULL,
-					G723_PERIOD_BYTES * PERIODS,
-					G723_PERIOD_BYTES * PERIODS);
+	snd_pcm_set_managed_buffer_all(pcm,
+				       SNDRV_DMA_TYPE_CONTINUOUS,
+				       NULL,
+				       G723_PERIOD_BYTES * PERIODS,
+				       G723_PERIOD_BYTES * PERIODS);
 
 	solo_dev->snd_pcm = pcm;
 
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 7786e51..54144e2 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -78,17 +78,6 @@ void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
 	}
 }
 
-static int tw686x_pcm_hw_params(struct snd_pcm_substream *ss,
-				struct snd_pcm_hw_params *hw_params)
-{
-	return snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params));
-}
-
-static int tw686x_pcm_hw_free(struct snd_pcm_substream *ss)
-{
-	return snd_pcm_lib_free_pages(ss);
-}
-
 /*
  * Audio parameters are global and shared among all
  * capture channels. The driver prevents changes to
@@ -269,9 +258,6 @@ static snd_pcm_uframes_t tw686x_pcm_pointer(struct snd_pcm_substream *ss)
 static const struct snd_pcm_ops tw686x_pcm_ops = {
 	.open = tw686x_pcm_open,
 	.close = tw686x_pcm_close,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = tw686x_pcm_hw_params,
-	.hw_free = tw686x_pcm_hw_free,
 	.prepare = tw686x_pcm_prepare,
 	.trigger = tw686x_pcm_trigger,
 	.pointer = tw686x_pcm_pointer,
@@ -298,7 +284,7 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
 	     ss; ss = ss->next, i++)
 		snprintf(ss->name, sizeof(ss->name), "vch%u audio", i);
 
-	snd_pcm_lib_preallocate_pages_for_all(pcm,
+	snd_pcm_set_managed_buffer_all(pcm,
 				SNDRV_DMA_TYPE_DEV,
 				&dev->pci_dev->dev,
 				TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index e84f35d..f65e98d 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -151,7 +151,7 @@
 config VIDEO_TI_CAL
 	tristate "TI CAL (Camera Adaptation Layer) driver"
 	depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
-	depends on SOC_DRA7XX || COMPILE_TEST
+	depends on SOC_DRA7XX || ARCH_K3 || COMPILE_TEST
 	select VIDEOBUF2_DMA_CONTIG
 	select V4L2_FWNODE
 	help
@@ -200,7 +200,7 @@
 
 config VIDEO_MEDIATEK_JPEG
 	tristate "Mediatek JPEG Codec driver"
-	depends on MTK_IOMMU_V1 || COMPILE_TEST
+	depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
 	depends on VIDEO_DEV && VIDEO_V4L2
 	depends on ARCH_MEDIATEK || COMPILE_TEST
 	select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index c1c776b..d7669a0 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -73,6 +73,9 @@ const struct isc_format controller_formats[] = {
 	{
 		.fourcc		= V4L2_PIX_FMT_GREY,
 	},
+	{
+		.fourcc		= V4L2_PIX_FMT_Y10,
+	},
 };
 
 /* This is a list of formats that the ISC can receive as *input* */
@@ -164,6 +167,12 @@ struct isc_format formats_list[] = {
 		.mbus_code	= MEDIA_BUS_FMT_RGB565_2X8_LE,
 		.pfe_cfg0_bps	= ISC_PFE_CFG0_BPS_EIGHT,
 	},
+	{
+		.fourcc		= V4L2_PIX_FMT_Y10,
+		.mbus_code	= MEDIA_BUS_FMT_Y10_1X10,
+		.pfe_cfg0_bps	= ISC_PFG_CFG0_BPS_TEN,
+	},
+
 };
 
 /* Gamma table with gamma 1/2.2 */
@@ -211,6 +220,10 @@ const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
 #define ISC_IS_FORMAT_RAW(mbus_code) \
 	(((mbus_code) & 0xf000) == 0x3000)
 
+#define ISC_IS_FORMAT_GREY(mbus_code) \
+	(((mbus_code) == MEDIA_BUS_FMT_Y10_1X10) | \
+	(((mbus_code) == MEDIA_BUS_FMT_Y8_1X8)))
+
 static inline void isc_update_awb_ctrls(struct isc_device *isc)
 {
 	struct isc_ctrls *ctrls = &isc->ctrls;
@@ -1003,6 +1016,7 @@ static int isc_try_validate_formats(struct isc_device *isc)
 		rgb = true;
 		break;
 	case V4L2_PIX_FMT_GREY:
+	case V4L2_PIX_FMT_Y10:
 		ret = 0;
 		grey = true;
 		break;
@@ -1010,34 +1024,29 @@ static int isc_try_validate_formats(struct isc_device *isc)
 	/* any other different formats are not supported */
 		ret = -EINVAL;
 	}
-
-	/* we cannot output RAW/Grey if we do not receive RAW */
-	if ((bayer || grey) &&
-	    !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
-		return -EINVAL;
-
 	v4l2_dbg(1, debug, &isc->v4l2_dev,
 		 "Format validation, requested rgb=%u, yuv=%u, grey=%u, bayer=%u\n",
 		 rgb, yuv, grey, bayer);
 
+	/* we cannot output RAW if we do not receive RAW */
+	if ((bayer) && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
+		return -EINVAL;
+
+	/* we cannot output GREY if we do not receive RAW/GREY */
+	if (grey && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code) &&
+	    !ISC_IS_FORMAT_GREY(isc->try_config.sd_format->mbus_code))
+		return -EINVAL;
+
 	return ret;
 }
 
 /*
  * Configures the RLP and DMA modules, depending on the output format
  * configured for the ISC.
- * If direct_dump == true, just dump raw data 8 bits.
+ * If direct_dump == true, just dump raw data 8/16 bits depending on format.
  */
 static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
 {
-	if (direct_dump) {
-		isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
-		isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
-		isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
-		isc->try_config.bpp = 16;
-		return 0;
-	}
-
 	switch (isc->try_config.fourcc) {
 	case V4L2_PIX_FMT_SBGGR8:
 	case V4L2_PIX_FMT_SGBRG8:
@@ -1115,9 +1124,23 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
 		isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
 		isc->try_config.bpp = 8;
 		break;
+	case V4L2_PIX_FMT_Y10:
+		isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY10;
+		isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+		isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+		isc->try_config.bpp = 16;
+		break;
 	default:
 		return -EINVAL;
 	}
+
+	if (direct_dump) {
+		isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
+		isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+		isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+		return 0;
+	}
+
 	return 0;
 }
 
@@ -1187,13 +1210,44 @@ static int isc_try_configure_pipeline(struct isc_device *isc)
 	return 0;
 }
 
+static void isc_try_fse(struct isc_device *isc,
+			struct v4l2_subdev_pad_config *pad_cfg)
+{
+	int ret;
+	struct v4l2_subdev_frame_size_enum fse = {};
+
+	/*
+	 * If we do not know yet which format the subdev is using, we cannot
+	 * do anything.
+	 */
+	if (!isc->try_config.sd_format)
+		return;
+
+	fse.code = isc->try_config.sd_format->mbus_code;
+	fse.which = V4L2_SUBDEV_FORMAT_TRY;
+
+	ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
+			       pad_cfg, &fse);
+	/*
+	 * Attempt to obtain format size from subdev. If not available,
+	 * just use the maximum ISC can receive.
+	 */
+	if (ret) {
+		pad_cfg->try_crop.width = ISC_MAX_SUPPORT_WIDTH;
+		pad_cfg->try_crop.height = ISC_MAX_SUPPORT_HEIGHT;
+	} else {
+		pad_cfg->try_crop.width = fse.max_width;
+		pad_cfg->try_crop.height = fse.max_height;
+	}
+}
+
 static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
 			u32 *code)
 {
 	int i;
 	struct isc_format *sd_fmt = NULL, *direct_fmt = NULL;
 	struct v4l2_pix_format *pixfmt = &f->fmt.pix;
-	struct v4l2_subdev_pad_config pad_cfg;
+	struct v4l2_subdev_pad_config pad_cfg = {};
 	struct v4l2_subdev_format format = {
 		.which = V4L2_SUBDEV_FORMAT_TRY,
 	};
@@ -1290,6 +1344,9 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
 	if (ret)
 		goto isc_try_fmt_err;
 
+	/* Obtain frame sizes if possible to have crop requirements ready */
+	isc_try_fse(isc, &pad_cfg);
+
 	v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
 	ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
 			       &pad_cfg, &format);
@@ -1414,6 +1471,7 @@ static int isc_enum_framesizes(struct file *file, void *fh,
 {
 	struct isc_device *isc = video_drvdata(file);
 	struct v4l2_subdev_frame_size_enum fse = {
+		.code = isc->config.sd_format->mbus_code,
 		.index = fsize->index,
 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
 	};
@@ -1436,8 +1494,6 @@ static int isc_enum_framesizes(struct file *file, void *fh,
 	if (ret)
 		return ret;
 
-	fse.code = isc->config.sd_format->mbus_code;
-
 	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
 	fsize->discrete.width = fse.max_width;
 	fsize->discrete.height = fse.max_height;
@@ -1450,6 +1506,7 @@ static int isc_enum_frameintervals(struct file *file, void *fh,
 {
 	struct isc_device *isc = video_drvdata(file);
 	struct v4l2_subdev_frame_interval_enum fie = {
+		.code = isc->config.sd_format->mbus_code,
 		.index = fival->index,
 		.width = fival->width,
 		.height = fival->height,
@@ -1474,7 +1531,6 @@ static int isc_enum_frameintervals(struct file *file, void *fh,
 	if (ret)
 		return ret;
 
-	fie.code = isc->config.sd_format->mbus_code;
 	fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
 	fival->discrete = fie.interval;
 
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 428f117..963dfd6 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -148,7 +148,8 @@ static void configure_geometry(struct atmel_isi *isi)
 	u32 fourcc = isi->current_fmt->fourcc;
 
 	isi->enable_preview_path = fourcc == V4L2_PIX_FMT_RGB565 ||
-				   fourcc == V4L2_PIX_FMT_RGB32;
+				   fourcc == V4L2_PIX_FMT_RGB32 ||
+				   fourcc == V4L2_PIX_FMT_Y16;
 
 	/* According to sensor's output format to set cfg2 */
 	cfg2 = isi->current_fmt->swap;
@@ -554,12 +555,36 @@ static const struct isi_format *find_format_by_fourcc(struct atmel_isi *isi,
 	return NULL;
 }
 
+static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt,
+			struct v4l2_subdev_pad_config *pad_cfg)
+{
+	int ret;
+	struct v4l2_subdev_frame_size_enum fse = {
+		.code = isi_fmt->mbus_code,
+		.which = V4L2_SUBDEV_FORMAT_TRY,
+	};
+
+	ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size,
+			       pad_cfg, &fse);
+	/*
+	 * Attempt to obtain format size from subdev. If not available,
+	 * just use the maximum ISI can receive.
+	 */
+	if (ret) {
+		pad_cfg->try_crop.width = MAX_SUPPORT_WIDTH;
+		pad_cfg->try_crop.height = MAX_SUPPORT_HEIGHT;
+	} else {
+		pad_cfg->try_crop.width = fse.max_width;
+		pad_cfg->try_crop.height = fse.max_height;
+	}
+}
+
 static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
 		       const struct isi_format **current_fmt)
 {
 	const struct isi_format *isi_fmt;
 	struct v4l2_pix_format *pixfmt = &f->fmt.pix;
-	struct v4l2_subdev_pad_config pad_cfg;
+	struct v4l2_subdev_pad_config pad_cfg = {};
 	struct v4l2_subdev_format format = {
 		.which = V4L2_SUBDEV_FORMAT_TRY,
 	};
@@ -576,6 +601,9 @@ static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
 	pixfmt->height = clamp(pixfmt->height, 0U, MAX_SUPPORT_HEIGHT);
 
 	v4l2_fill_mbus_format(&format.format, pixfmt, isi_fmt->mbus_code);
+
+	isi_try_fse(isi, isi_fmt, &pad_cfg);
+
 	ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt,
 			       &pad_cfg, &format);
 	if (ret < 0)
@@ -990,6 +1018,16 @@ static const struct isi_format isi_formats[] = {
 		.mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
 		.bpp = 2,
 		.swap = ISI_CFG2_YCC_SWAP_MODE_1,
+	}, {
+		.fourcc = V4L2_PIX_FMT_GREY,
+		.mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+		.bpp = 1,
+		.swap = ISI_CFG2_GS_MODE_2_PIXEL | ISI_CFG2_GRAYSCALE,
+	}, {
+		.fourcc = V4L2_PIX_FMT_Y16,
+		.mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+		.bpp = 2,
+		.swap = ISI_CFG2_GS_MODE_2_PIXEL | ISI_CFG2_GRAYSCALE,
 	},
 };
 
diff --git a/drivers/media/platform/atmel/atmel-isi.h b/drivers/media/platform/atmel/atmel-isi.h
index 47a9108..7ad3895 100644
--- a/drivers/media/platform/atmel/atmel-isi.h
+++ b/drivers/media/platform/atmel/atmel-isi.h
@@ -62,6 +62,8 @@
 #define		ISI_CFG1_THMASK_BEATS_16	(2 << 13)
 
 /* Bitfields in CFG2 */
+#define ISI_CFG2_GS_MODE_2_PIXEL		(0 << 11)
+#define ISI_CFG2_GS_MODE_1_PIXEL		(1 << 11)
 #define ISI_CFG2_GRAYSCALE			(1 << 13)
 #define ISI_CFG2_COL_SPACE_YCbCr		(0 << 15)
 #define ISI_CFG2_COL_SPACE_RGB			(1 << 15)
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 00c7bed..3443396 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1629,6 +1629,9 @@ static void coda_finish_encode(struct coda_ctx *ctx)
 	struct coda_dev *dev = ctx->dev;
 	u32 wr_ptr, start_ptr;
 
+	if (ctx->aborting)
+		return;
+
 	/*
 	 * Lock to make sure that an encoder stop command running in parallel
 	 * will either already have marked src_buf as last, or it will wake up
@@ -2165,16 +2168,21 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
 	} else {
 		if (dev->devtype->product == CODA_960) {
 			/*
-			 * The CODA960 seems to have an internal list of
-			 * buffers with 64 entries that includes the
-			 * registered frame buffers as well as the rotator
-			 * buffer output.
-			 *
-			 * ROT_INDEX needs to be < 0x40, but >
-			 * ctx->num_internal_frames.
+			 * It was previously assumed that the CODA960 has an
+			 * internal list of 64 buffer entries that contains
+			 * both the registered internal frame buffers as well
+			 * as the rotator buffer output, and that the ROT_INDEX
+			 * register must be set to a value between the last
+			 * internal frame buffers' index and 64.
+			 * At least on firmware version 3.1.1 it turns out that
+			 * setting ROT_INDEX to any value >= 32 causes CODA
+			 * hangups that it can not recover from with the SRC VPU
+			 * reset.
+			 * It does appear to work however, to just set it to a
+			 * fixed value in the [ctx->num_internal_frames, 31]
+			 * range, for example CODA_MAX_FRAMEBUFFERS.
 			 */
-			coda_write(dev,
-				   CODA_MAX_FRAMEBUFFERS + dst_buf->vb2_buf.index,
+			coda_write(dev, CODA_MAX_FRAMEBUFFERS,
 				   CODA9_CMD_DEC_PIC_ROT_INDEX);
 
 			reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
@@ -2266,6 +2274,9 @@ static void coda_finish_decode(struct coda_ctx *ctx)
 	int err_vdoa = 0;
 	u32 val;
 
+	if (ctx->aborting)
+		return;
+
 	/* Update kfifo out pointer from coda bitstream read pointer */
 	coda_kfifo_sync_from_device(ctx);
 
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 94fb4d2..acff10a 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -155,6 +155,7 @@ static const struct coda_codec coda7_codecs[] = {
 static const struct coda_codec coda9_codecs[] = {
 	CODA_CODEC(CODA9_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264,   1920, 1088),
 	CODA_CODEC(CODA9_MODE_ENCODE_MP4,  V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4,  1920, 1088),
+	CODA_CODEC(CODA9_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG,   8192, 8192),
 	CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264,   V4L2_PIX_FMT_YUV420, 1920, 1088),
 	CODA_CODEC(CODA9_MODE_DECODE_MP2,  V4L2_PIX_FMT_MPEG2,  V4L2_PIX_FMT_YUV420, 1920, 1088),
 	CODA_CODEC(CODA9_MODE_DECODE_MP4,  V4L2_PIX_FMT_MPEG4,  V4L2_PIX_FMT_YUV420, 1920, 1088),
@@ -235,6 +236,22 @@ static const struct coda_video_device coda_bit_jpeg_decoder = {
 	},
 };
 
+static const struct coda_video_device coda9_jpeg_encoder = {
+	.name = "coda-jpeg-encoder",
+	.type = CODA_INST_ENCODER,
+	.ops = &coda9_jpeg_encode_ops,
+	.direct = true,
+	.src_formats = {
+		V4L2_PIX_FMT_NV12,
+		V4L2_PIX_FMT_YUV420,
+		V4L2_PIX_FMT_YVU420,
+		V4L2_PIX_FMT_YUV422P,
+	},
+	.dst_formats = {
+		V4L2_PIX_FMT_JPEG,
+	},
+};
+
 static const struct coda_video_device *codadx6_video_devices[] = {
 	&coda_bit_encoder,
 };
@@ -252,6 +269,7 @@ static const struct coda_video_device *coda7_video_devices[] = {
 };
 
 static const struct coda_video_device *coda9_video_devices[] = {
+	&coda9_jpeg_encoder,
 	&coda_bit_encoder,
 	&coda_bit_decoder,
 };
@@ -721,7 +739,8 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
 		ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
 		break;
 	case V4L2_PIX_FMT_NV12:
-		if (!disable_tiling && ctx->dev->devtype->product == CODA_960) {
+		if (!disable_tiling && ctx->use_bit &&
+		    ctx->dev->devtype->product == CODA_960) {
 			ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
 			break;
 		}
@@ -1421,7 +1440,7 @@ static void coda_pic_run_work(struct work_struct *work)
 
 		if (ctx->ops->run_timeout)
 			ctx->ops->run_timeout(ctx);
-	} else if (!ctx->aborting) {
+	} else {
 		ctx->ops->finish_run(ctx);
 	}
 
@@ -1787,7 +1806,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
 				coda_queue_source_change_event(ctx);
 		}
 	} else {
-		if (ctx->inst_type == CODA_INST_ENCODER &&
+		if ((ctx->inst_type == CODA_INST_ENCODER || !ctx->use_bit) &&
 		    vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
 			vbuf->sequence = ctx->qsequence++;
 		v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
@@ -2984,10 +3003,8 @@ static int coda_probe(struct platform_device *pdev)
 	irq = platform_get_irq_byname(pdev, "bit");
 	if (irq < 0)
 		irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "failed to get irq resource\n");
+	if (irq < 0)
 		return irq;
-	}
 
 	ret = devm_request_irq(&pdev->dev, irq, coda_irq_handler, 0,
 			       dev_name(&pdev->dev), dev);
@@ -2996,6 +3013,22 @@ static int coda_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	/* JPEG IRQ */
+	if (dev->devtype->product == CODA_960) {
+		irq = platform_get_irq_byname(pdev, "jpeg");
+		if (irq < 0)
+			return irq;
+
+		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+						coda9_jpeg_irq_handler,
+						IRQF_ONESHOT, CODA_NAME " jpeg",
+						dev);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed to request jpeg irq\n");
+			return ret;
+		}
+	}
+
 	dev->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev,
 							      NULL);
 	if (IS_ERR(dev->rstc)) {
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
index bf61a3e..92234fd 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -5,46 +5,68 @@
  * Copyright (C) 2014 Philipp Zabel, Pengutronix
  */
 
+#include <asm/unaligned.h>
+#include <linux/irqreturn.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
 #include <linux/swab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
 
 #include "coda.h"
 #include "trace.h"
 
 #define SOI_MARKER	0xffd8
+#define DRI_MARKER	0xffdd
+#define DQT_MARKER	0xffdb
+#define DHT_MARKER	0xffc4
+#define SOF_MARKER	0xffc0
 #define EOI_MARKER	0xffd9
 
+enum {
+	CODA9_JPEG_FORMAT_420,
+	CODA9_JPEG_FORMAT_422,
+	CODA9_JPEG_FORMAT_224,
+	CODA9_JPEG_FORMAT_444,
+	CODA9_JPEG_FORMAT_400,
+};
+
+#define CODA9_JPEG_ENC_HUFF_DATA_SIZE	(256 + 256 + 16 + 16)
+
 /*
  * Typical Huffman tables for 8-bit precision luminance and
  * chrominance from JPEG ITU-T.81 (ISO/IEC 10918-1) Annex K.3
  */
 
-static const unsigned char luma_dc_bits[16] = {
+static const unsigned char luma_dc[16 + 12] = {
+	/* bits */
 	0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
 	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const unsigned char luma_dc_value[12] = {
+	/* values */
 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
 	0x08, 0x09, 0x0a, 0x0b,
 };
 
-static const unsigned char chroma_dc_bits[16] = {
+static const unsigned char chroma_dc[16 + 12] = {
+	/* bits */
 	0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
 	0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const unsigned char chroma_dc_value[12] = {
+	/* values */
 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
 	0x08, 0x09, 0x0a, 0x0b,
 };
 
-static const unsigned char luma_ac_bits[16] = {
+static const unsigned char luma_ac[16 + 162 + 2] = {
+	/* bits */
 	0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
 	0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
-};
-
-static const unsigned char luma_ac_value[162 + 2] = {
+	/* values */
 	0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
 	0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
 	0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
@@ -68,12 +90,11 @@ static const unsigned char luma_ac_value[162 + 2] = {
 	0xf9, 0xfa, /* padded to 32-bit */
 };
 
-static const unsigned char chroma_ac_bits[16] = {
+static const unsigned char chroma_ac[16 + 162 + 2] = {
+	/* bits */
 	0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
 	0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
-};
-
-static const unsigned char chroma_ac_value[162 + 2] = {
+	/* values */
 	0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
 	0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
 	0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
@@ -124,6 +145,38 @@ static unsigned char chroma_q[64] = {
 	0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
 };
 
+static const unsigned char width_align[] = {
+	[CODA9_JPEG_FORMAT_420] = 16,
+	[CODA9_JPEG_FORMAT_422] = 16,
+	[CODA9_JPEG_FORMAT_224] = 8,
+	[CODA9_JPEG_FORMAT_444] = 8,
+	[CODA9_JPEG_FORMAT_400] = 8,
+};
+
+static const unsigned char height_align[] = {
+	[CODA9_JPEG_FORMAT_420] = 16,
+	[CODA9_JPEG_FORMAT_422] = 8,
+	[CODA9_JPEG_FORMAT_224] = 16,
+	[CODA9_JPEG_FORMAT_444] = 8,
+	[CODA9_JPEG_FORMAT_400] = 8,
+};
+
+static int coda9_jpeg_chroma_format(u32 pixfmt)
+{
+	switch (pixfmt) {
+	case V4L2_PIX_FMT_YUV420:
+	case V4L2_PIX_FMT_NV12:
+		return CODA9_JPEG_FORMAT_420;
+	case V4L2_PIX_FMT_YUV422P:
+		return CODA9_JPEG_FORMAT_422;
+	case V4L2_PIX_FMT_YUV444:
+		return CODA9_JPEG_FORMAT_444;
+	case V4L2_PIX_FMT_GREY:
+		return CODA9_JPEG_FORMAT_400;
+	}
+	return -EINVAL;
+}
+
 struct coda_memcpy_desc {
 	int offset;
 	const void *src;
@@ -148,14 +201,10 @@ int coda_jpeg_write_tables(struct coda_ctx *ctx)
 {
 	int i;
 	static const struct coda_memcpy_desc huff[8] = {
-		{ 0,   luma_dc_bits,    sizeof(luma_dc_bits)    },
-		{ 16,  luma_dc_value,   sizeof(luma_dc_value)   },
-		{ 32,  luma_ac_bits,    sizeof(luma_ac_bits)    },
-		{ 48,  luma_ac_value,   sizeof(luma_ac_value)   },
-		{ 216, chroma_dc_bits,  sizeof(chroma_dc_bits)  },
-		{ 232, chroma_dc_value, sizeof(chroma_dc_value) },
-		{ 248, chroma_ac_bits,  sizeof(chroma_ac_bits)  },
-		{ 264, chroma_ac_value, sizeof(chroma_ac_value) },
+		{ 0,   luma_dc,    sizeof(luma_dc)    },
+		{ 32,  luma_ac,    sizeof(luma_ac)    },
+		{ 216, chroma_dc,  sizeof(chroma_dc)  },
+		{ 248, chroma_ac,  sizeof(chroma_ac)  },
 	};
 	struct coda_memcpy_desc qmat[3] = {
 		{ 512, ctx->params.jpeg_qmat_tab[0], 64 },
@@ -198,6 +247,379 @@ bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
 	return false;
 }
 
+static const int bus_req_num[] = {
+	[CODA9_JPEG_FORMAT_420] = 2,
+	[CODA9_JPEG_FORMAT_422] = 3,
+	[CODA9_JPEG_FORMAT_224] = 3,
+	[CODA9_JPEG_FORMAT_444] = 4,
+	[CODA9_JPEG_FORMAT_400] = 4,
+};
+
+#define MCU_INFO(mcu_block_num, comp_num, comp0_info, comp1_info, comp2_info) \
+	(((mcu_block_num) << CODA9_JPEG_MCU_BLOCK_NUM_OFFSET) | \
+	 ((comp_num) << CODA9_JPEG_COMP_NUM_OFFSET) | \
+	 ((comp0_info) << CODA9_JPEG_COMP0_INFO_OFFSET) | \
+	 ((comp1_info) << CODA9_JPEG_COMP1_INFO_OFFSET) | \
+	 ((comp2_info) << CODA9_JPEG_COMP2_INFO_OFFSET))
+
+static const u32 mcu_info[] = {
+	[CODA9_JPEG_FORMAT_420] = MCU_INFO(6, 3, 10, 5, 5),
+	[CODA9_JPEG_FORMAT_422] = MCU_INFO(4, 3, 9, 5, 5),
+	[CODA9_JPEG_FORMAT_224] = MCU_INFO(4, 3, 6, 5, 5),
+	[CODA9_JPEG_FORMAT_444] = MCU_INFO(3, 3, 5, 5, 5),
+	[CODA9_JPEG_FORMAT_400] = MCU_INFO(1, 1, 5, 0, 0),
+};
+
+/*
+ * Convert Huffman table specifcations to tables of codes and code lengths.
+ * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1]
+ *
+ * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf
+ */
+static int coda9_jpeg_gen_enc_huff_tab(struct coda_ctx *ctx, int tab_num,
+				       int *ehufsi, int *ehufco)
+{
+	int i, j, k, lastk, si, code, maxsymbol;
+	const u8 *bits, *huffval;
+	struct {
+		int size[256];
+		int code[256];
+	} *huff;
+	static const unsigned char *huff_tabs[4] = {
+		luma_dc, luma_ac, chroma_dc, chroma_ac,
+	};
+	int ret = -EINVAL;
+
+	huff = kzalloc(sizeof(*huff), GFP_KERNEL);
+	if (!huff)
+		return -ENOMEM;
+
+	bits = huff_tabs[tab_num];
+	huffval = huff_tabs[tab_num] + 16;
+
+	maxsymbol = tab_num & 1 ? 256 : 16;
+
+	/* Figure C.1 - Generation of table of Huffman code sizes */
+	k = 0;
+	for (i = 1; i <= 16; i++) {
+		j = bits[i - 1];
+		if (k + j > maxsymbol)
+			goto out;
+		while (j--)
+			huff->size[k++] = i;
+	}
+	lastk = k;
+
+	/* Figure C.2 - Generation of table of Huffman codes */
+	k = 0;
+	code = 0;
+	si = huff->size[0];
+	while (k < lastk) {
+		while (huff->size[k] == si) {
+			huff->code[k++] = code;
+			code++;
+		}
+		if (code >= (1 << si))
+			goto out;
+		code <<= 1;
+		si++;
+	}
+
+	/* Figure C.3 - Ordering procedure for encoding procedure code tables */
+	for (k = 0; k < lastk; k++) {
+		i = huffval[k];
+		if (i >= maxsymbol || ehufsi[i])
+			goto out;
+		ehufco[i] = huff->code[k];
+		ehufsi[i] = huff->size[k];
+	}
+
+	ret = 0;
+out:
+	kfree(huff);
+	return ret;
+}
+
+#define DC_TABLE_INDEX0		    0
+#define AC_TABLE_INDEX0		    1
+#define DC_TABLE_INDEX1		    2
+#define AC_TABLE_INDEX1		    3
+
+static int coda9_jpeg_load_huff_tab(struct coda_ctx *ctx)
+{
+	struct {
+		int size[4][256];
+		int code[4][256];
+	} *huff;
+	u32 *huff_data;
+	int i, j;
+	int ret;
+
+	huff = kzalloc(sizeof(*huff), GFP_KERNEL);
+	if (!huff)
+		return -ENOMEM;
+
+	/* Generate all four (luma/chroma DC/AC) code/size lookup tables */
+	for (i = 0; i < 4; i++) {
+		ret = coda9_jpeg_gen_enc_huff_tab(ctx, i, huff->size[i],
+						  huff->code[i]);
+		if (ret)
+			goto out;
+	}
+
+	if (!ctx->params.jpeg_huff_data) {
+		ctx->params.jpeg_huff_data =
+			kzalloc(sizeof(u32) * CODA9_JPEG_ENC_HUFF_DATA_SIZE,
+				GFP_KERNEL);
+		if (!ctx->params.jpeg_huff_data) {
+			ret = -ENOMEM;
+			goto out;
+		}
+	}
+	huff_data = ctx->params.jpeg_huff_data;
+
+	for (j = 0; j < 4; j++) {
+		/* Store Huffman lookup tables in AC0, AC1, DC0, DC1 order */
+		int t = (j == 0) ? AC_TABLE_INDEX0 :
+			(j == 1) ? AC_TABLE_INDEX1 :
+			(j == 2) ? DC_TABLE_INDEX0 :
+				   DC_TABLE_INDEX1;
+		/* DC tables only have 16 entries */
+		int len = (j < 2) ? 256 : 16;
+
+		for (i = 0; i < len; i++) {
+			if (huff->size[t][i] == 0 && huff->code[t][i] == 0)
+				*(huff_data++) = 0;
+			else
+				*(huff_data++) =
+					((huff->size[t][i] - 1) << 16) |
+					huff->code[t][i];
+		}
+	}
+
+	ret = 0;
+out:
+	kfree(huff);
+	return ret;
+}
+
+static void coda9_jpeg_write_huff_tab(struct coda_ctx *ctx)
+{
+	struct coda_dev *dev = ctx->dev;
+	u32 *huff_data = ctx->params.jpeg_huff_data;
+	int i;
+
+	/* Write Huffman size/code lookup tables in AC0, AC1, DC0, DC1 order */
+	coda_write(dev, 0x3, CODA9_REG_JPEG_HUFF_CTRL);
+	for (i = 0; i < CODA9_JPEG_ENC_HUFF_DATA_SIZE; i++)
+		coda_write(dev, *(huff_data++), CODA9_REG_JPEG_HUFF_DATA);
+	coda_write(dev, 0x0, CODA9_REG_JPEG_HUFF_CTRL);
+}
+
+static inline void coda9_jpeg_write_qmat_quotients(struct coda_dev *dev,
+						   u8 *qmat, int index)
+{
+	int i;
+
+	coda_write(dev, index | 0x3, CODA9_REG_JPEG_QMAT_CTRL);
+	for (i = 0; i < 64; i++)
+		coda_write(dev, 0x80000 / qmat[i], CODA9_REG_JPEG_QMAT_DATA);
+	coda_write(dev, index, CODA9_REG_JPEG_QMAT_CTRL);
+}
+
+static void coda9_jpeg_load_qmat_tab(struct coda_ctx *ctx)
+{
+	struct coda_dev *dev = ctx->dev;
+	u8 *luma_tab;
+	u8 *chroma_tab;
+
+	luma_tab = ctx->params.jpeg_qmat_tab[0];
+	if (!luma_tab)
+		luma_tab = luma_q;
+
+	chroma_tab = ctx->params.jpeg_qmat_tab[1];
+	if (!chroma_tab)
+		chroma_tab = chroma_q;
+
+	coda9_jpeg_write_qmat_quotients(dev, luma_tab, 0x00);
+	coda9_jpeg_write_qmat_quotients(dev, chroma_tab, 0x40);
+	coda9_jpeg_write_qmat_quotients(dev, chroma_tab, 0x80);
+}
+
+struct coda_jpeg_stream {
+	u8 *curr;
+	u8 *end;
+};
+
+static inline int coda_jpeg_put_byte(u8 byte, struct coda_jpeg_stream *stream)
+{
+	if (stream->curr >= stream->end)
+		return -EINVAL;
+
+	*stream->curr++ = byte;
+
+	return 0;
+}
+
+static inline int coda_jpeg_put_word(u16 word, struct coda_jpeg_stream *stream)
+{
+	if (stream->curr + sizeof(__be16) > stream->end)
+		return -EINVAL;
+
+	put_unaligned_be16(word, stream->curr);
+	stream->curr += sizeof(__be16);
+
+	return 0;
+}
+
+static int coda_jpeg_put_table(u16 marker, u8 index, const u8 *table,
+			       size_t len, struct coda_jpeg_stream *stream)
+{
+	int i, ret;
+
+	ret = coda_jpeg_put_word(marker, stream);
+	if (ret < 0)
+		return ret;
+	ret = coda_jpeg_put_word(3 + len, stream);
+	if (ret < 0)
+		return ret;
+	ret = coda_jpeg_put_byte(index, stream);
+	for (i = 0; i < len && ret == 0; i++)
+		ret = coda_jpeg_put_byte(table[i], stream);
+
+	return ret;
+}
+
+static int coda_jpeg_define_quantization_table(struct coda_ctx *ctx, u8 index,
+					       struct coda_jpeg_stream *stream)
+{
+	return coda_jpeg_put_table(DQT_MARKER, index,
+				   ctx->params.jpeg_qmat_tab[index], 64,
+				   stream);
+}
+
+static int coda_jpeg_define_huffman_table(u8 index, const u8 *table, size_t len,
+					  struct coda_jpeg_stream *stream)
+{
+	return coda_jpeg_put_table(DHT_MARKER, index, table, len, stream);
+}
+
+static int coda9_jpeg_encode_header(struct coda_ctx *ctx, int len, u8 *buf)
+{
+	struct coda_jpeg_stream stream = { buf, buf + len };
+	struct coda_q_data *q_data_src;
+	int chroma_format, comp_num;
+	int i, ret, pad;
+
+	q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	chroma_format = coda9_jpeg_chroma_format(q_data_src->fourcc);
+	if (chroma_format < 0)
+		return 0;
+
+	/* Start Of Image */
+	ret = coda_jpeg_put_word(SOI_MARKER, &stream);
+	if (ret < 0)
+		return ret;
+
+	/* Define Restart Interval */
+	if (ctx->params.jpeg_restart_interval) {
+		ret = coda_jpeg_put_word(DRI_MARKER, &stream);
+		if (ret < 0)
+			return ret;
+		ret = coda_jpeg_put_word(4, &stream);
+		if (ret < 0)
+			return ret;
+		ret = coda_jpeg_put_word(ctx->params.jpeg_restart_interval,
+					 &stream);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Define Quantization Tables */
+	ret = coda_jpeg_define_quantization_table(ctx, 0x00, &stream);
+	if (ret < 0)
+		return ret;
+	if (chroma_format != CODA9_JPEG_FORMAT_400) {
+		ret = coda_jpeg_define_quantization_table(ctx, 0x01, &stream);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Define Huffman Tables */
+	ret = coda_jpeg_define_huffman_table(0x00, luma_dc, 16 + 12, &stream);
+	if (ret < 0)
+		return ret;
+	ret = coda_jpeg_define_huffman_table(0x10, luma_ac, 16 + 162, &stream);
+	if (ret < 0)
+		return ret;
+	if (chroma_format != CODA9_JPEG_FORMAT_400) {
+		ret = coda_jpeg_define_huffman_table(0x01, chroma_dc, 16 + 12,
+						     &stream);
+		if (ret < 0)
+			return ret;
+		ret = coda_jpeg_define_huffman_table(0x11, chroma_ac, 16 + 162,
+						     &stream);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Start Of Frame */
+	ret = coda_jpeg_put_word(SOF_MARKER, &stream);
+	if (ret < 0)
+		return ret;
+	comp_num = (chroma_format == CODA9_JPEG_FORMAT_400) ? 1 : 3;
+	ret = coda_jpeg_put_word(8 + comp_num * 3, &stream);
+	if (ret < 0)
+		return ret;
+	ret = coda_jpeg_put_byte(0x08, &stream);
+	if (ret < 0)
+		return ret;
+	ret = coda_jpeg_put_word(q_data_src->height, &stream);
+	if (ret < 0)
+		return ret;
+	ret = coda_jpeg_put_word(q_data_src->width, &stream);
+	if (ret < 0)
+		return ret;
+	ret = coda_jpeg_put_byte(comp_num, &stream);
+	if (ret < 0)
+		return ret;
+	for (i = 0; i < comp_num; i++) {
+		static unsigned char subsampling[5][3] = {
+			[CODA9_JPEG_FORMAT_420] = { 0x22, 0x11, 0x11 },
+			[CODA9_JPEG_FORMAT_422] = { 0x21, 0x11, 0x11 },
+			[CODA9_JPEG_FORMAT_224] = { 0x12, 0x11, 0x11 },
+			[CODA9_JPEG_FORMAT_444] = { 0x11, 0x11, 0x11 },
+			[CODA9_JPEG_FORMAT_400] = { 0x11 },
+		};
+
+		/* Component identifier, matches SOS */
+		ret = coda_jpeg_put_byte(i + 1, &stream);
+		if (ret < 0)
+			return ret;
+		ret = coda_jpeg_put_byte(subsampling[chroma_format][i],
+					 &stream);
+		if (ret < 0)
+			return ret;
+		/* Chroma table index */
+		ret = coda_jpeg_put_byte((i == 0) ? 0 : 1, &stream);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Pad to multiple of 8 bytes */
+	pad = (stream.curr - buf) % 8;
+	if (pad) {
+		pad = 8 - pad;
+		while (pad--) {
+			ret = coda_jpeg_put_byte(0x00, &stream);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	return stream.curr - buf;
+}
+
 /*
  * Scale quantization table using nonlinear scaling factor
  * u8 qtab[64], scale [50,190]
@@ -247,3 +669,279 @@ void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality)
 		coda_scale_quant_table(ctx->params.jpeg_qmat_tab[1], scale);
 	}
 }
+
+/*
+ * Encoder context operations
+ */
+
+static int coda9_jpeg_start_encoding(struct coda_ctx *ctx)
+{
+	struct coda_dev *dev = ctx->dev;
+	int ret;
+
+	ret = coda9_jpeg_load_huff_tab(ctx);
+	if (ret < 0) {
+		v4l2_err(&dev->v4l2_dev, "error loading Huffman tables\n");
+		return ret;
+	}
+	if (!ctx->params.jpeg_qmat_tab[0])
+		ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+	if (!ctx->params.jpeg_qmat_tab[1])
+		ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
+	coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+
+	return 0;
+}
+
+static int coda9_jpeg_prepare_encode(struct coda_ctx *ctx)
+{
+	struct coda_q_data *q_data_src;
+	struct vb2_v4l2_buffer *src_buf, *dst_buf;
+	struct coda_dev *dev = ctx->dev;
+	u32 start_addr, end_addr;
+	u16 aligned_width, aligned_height;
+	bool chroma_interleave;
+	int chroma_format;
+	int header_len;
+	int ret;
+	ktime_t timeout;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+	q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+	if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == 0)
+		vb2_set_plane_payload(&src_buf->vb2_buf, 0,
+				      vb2_plane_size(&src_buf->vb2_buf, 0));
+
+	src_buf->sequence = ctx->osequence;
+	dst_buf->sequence = ctx->osequence;
+	ctx->osequence++;
+
+	src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+	src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
+
+	coda_set_gdi_regs(ctx);
+
+	start_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+	end_addr = start_addr + vb2_plane_size(&dst_buf->vb2_buf, 0);
+
+	chroma_format = coda9_jpeg_chroma_format(q_data_src->fourcc);
+	if (chroma_format < 0)
+		return chroma_format;
+
+	/* Round image dimensions to multiple of MCU size */
+	aligned_width = round_up(q_data_src->width, width_align[chroma_format]);
+	aligned_height = round_up(q_data_src->height,
+				  height_align[chroma_format]);
+	if (aligned_width != q_data_src->bytesperline) {
+		v4l2_err(&dev->v4l2_dev, "wrong stride: %d instead of %d\n",
+			 aligned_width, q_data_src->bytesperline);
+	}
+
+	header_len =
+		coda9_jpeg_encode_header(ctx,
+					 vb2_plane_size(&dst_buf->vb2_buf, 0),
+					 vb2_plane_vaddr(&dst_buf->vb2_buf, 0));
+	if (header_len < 0)
+		return header_len;
+
+	coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_BAS_ADDR);
+	coda_write(dev, end_addr, CODA9_REG_JPEG_BBC_END_ADDR);
+	coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_WR_PTR);
+	coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_RD_PTR);
+	coda_write(dev, 0, CODA9_REG_JPEG_BBC_CUR_POS);
+	/* 64 words per 256-byte page */
+	coda_write(dev, 64, CODA9_REG_JPEG_BBC_DATA_CNT);
+	coda_write(dev, start_addr, CODA9_REG_JPEG_BBC_EXT_ADDR);
+	coda_write(dev, 0, CODA9_REG_JPEG_BBC_INT_ADDR);
+
+	coda_write(dev, 0, CODA9_REG_JPEG_GBU_BT_PTR);
+	coda_write(dev, 0, CODA9_REG_JPEG_GBU_WD_PTR);
+	coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBSR);
+	coda_write(dev, 0, CODA9_REG_JPEG_BBC_STRM_CTRL);
+	coda_write(dev, 0, CODA9_REG_JPEG_GBU_CTRL);
+	coda_write(dev, 0, CODA9_REG_JPEG_GBU_FF_RPTR);
+	coda_write(dev, 127, CODA9_REG_JPEG_GBU_BBER);
+	coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBIR);
+	coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBHR);
+
+	chroma_interleave = (q_data_src->fourcc == V4L2_PIX_FMT_NV12);
+	coda_write(dev, CODA9_JPEG_PIC_CTRL_TC_DIRECTION |
+		   CODA9_JPEG_PIC_CTRL_ENCODER_EN, CODA9_REG_JPEG_PIC_CTRL);
+	coda_write(dev, 0, CODA9_REG_JPEG_SCL_INFO);
+	coda_write(dev, chroma_interleave, CODA9_REG_JPEG_DPB_CONFIG);
+	coda_write(dev, ctx->params.jpeg_restart_interval,
+		   CODA9_REG_JPEG_RST_INTVAL);
+	coda_write(dev, 1, CODA9_REG_JPEG_BBC_CTRL);
+
+	coda_write(dev, bus_req_num[chroma_format], CODA9_REG_JPEG_OP_INFO);
+
+	coda9_jpeg_write_huff_tab(ctx);
+	coda9_jpeg_load_qmat_tab(ctx);
+
+	if (ctx->params.rot_mode & CODA_ROT_90) {
+		aligned_width = aligned_height;
+		aligned_height = q_data_src->bytesperline;
+		if (chroma_format == CODA9_JPEG_FORMAT_422)
+			chroma_format = CODA9_JPEG_FORMAT_224;
+		else if (chroma_format == CODA9_JPEG_FORMAT_224)
+			chroma_format = CODA9_JPEG_FORMAT_422;
+	}
+	/* These need to be multiples of MCU size */
+	coda_write(dev, aligned_width << 16 | aligned_height,
+		   CODA9_REG_JPEG_PIC_SIZE);
+	coda_write(dev, ctx->params.rot_mode ?
+		   (CODA_ROT_MIR_ENABLE | ctx->params.rot_mode) : 0,
+		   CODA9_REG_JPEG_ROT_INFO);
+
+	coda_write(dev, mcu_info[chroma_format], CODA9_REG_JPEG_MCU_INFO);
+
+	coda_write(dev, 1, CODA9_GDI_CONTROL);
+	timeout = ktime_add_us(ktime_get(), 100000);
+	do {
+		ret = coda_read(dev, CODA9_GDI_STATUS);
+		if (ktime_compare(ktime_get(), timeout) > 0) {
+			v4l2_err(&dev->v4l2_dev, "timeout waiting for GDI\n");
+			return -ETIMEDOUT;
+		}
+	} while (!ret);
+
+	coda_write(dev, (chroma_format << 17) | (chroma_interleave << 16) |
+		   q_data_src->bytesperline, CODA9_GDI_INFO_CONTROL);
+	/* The content of this register seems to be irrelevant: */
+	coda_write(dev, aligned_width << 16 | aligned_height,
+		   CODA9_GDI_INFO_PIC_SIZE);
+
+	coda_write_base(ctx, q_data_src, src_buf, CODA9_GDI_INFO_BASE_Y);
+
+	coda_write(dev, 0, CODA9_REG_JPEG_DPB_BASE00);
+	coda_write(dev, 0, CODA9_GDI_CONTROL);
+	coda_write(dev, 1, CODA9_GDI_PIC_INIT_HOST);
+
+	coda_write(dev, 1, CODA9_GDI_WPROT_ERR_CLR);
+	coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
+
+	trace_coda_jpeg_run(ctx, src_buf);
+
+	coda_write(dev, 1, CODA9_REG_JPEG_PIC_START);
+
+	return 0;
+}
+
+static void coda9_jpeg_finish_encode(struct coda_ctx *ctx)
+{
+	struct vb2_v4l2_buffer *src_buf, *dst_buf;
+	struct coda_dev *dev = ctx->dev;
+	u32 wr_ptr, start_ptr;
+	u32 err_mb;
+
+	if (ctx->aborting) {
+		coda_write(ctx->dev, 0, CODA9_REG_JPEG_BBC_FLUSH_CMD);
+		return;
+	}
+
+	/*
+	 * Lock to make sure that an encoder stop command running in parallel
+	 * will either already have marked src_buf as last, or it will wake up
+	 * the capture queue after the buffers are returned.
+	 */
+	mutex_lock(&ctx->wakeup_mutex);
+	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+	trace_coda_jpeg_done(ctx, dst_buf);
+
+	/*
+	 * Set plane payload to the number of bytes written out
+	 * by the JPEG processing unit
+	 */
+	start_ptr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+	wr_ptr = coda_read(dev, CODA9_REG_JPEG_BBC_WR_PTR);
+	vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
+
+	err_mb = coda_read(dev, CODA9_REG_JPEG_PIC_ERRMB);
+	if (err_mb)
+		coda_dbg(1, ctx, "ERRMB: 0x%x\n", err_mb);
+
+	coda_write(dev, 0, CODA9_REG_JPEG_BBC_FLUSH_CMD);
+
+	dst_buf->flags &= ~(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_LAST);
+	dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+	dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_LAST;
+
+	v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
+
+	v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+	coda_m2m_buf_done(ctx, dst_buf, err_mb ? VB2_BUF_STATE_ERROR :
+						 VB2_BUF_STATE_DONE);
+	mutex_unlock(&ctx->wakeup_mutex);
+
+	coda_dbg(1, ctx, "job finished: encoded frame (%u)%s\n",
+		 dst_buf->sequence,
+		 (dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
+}
+
+static void coda9_jpeg_release(struct coda_ctx *ctx)
+{
+	int i;
+
+	if (ctx->params.jpeg_qmat_tab[0] == luma_q)
+		ctx->params.jpeg_qmat_tab[0] = NULL;
+	if (ctx->params.jpeg_qmat_tab[1] == chroma_q)
+		ctx->params.jpeg_qmat_tab[1] = NULL;
+	for (i = 0; i < 3; i++)
+		kfree(ctx->params.jpeg_qmat_tab[i]);
+	kfree(ctx->params.jpeg_huff_data);
+}
+
+const struct coda_context_ops coda9_jpeg_encode_ops = {
+	.queue_init = coda_encoder_queue_init,
+	.start_streaming = coda9_jpeg_start_encoding,
+	.prepare_run = coda9_jpeg_prepare_encode,
+	.finish_run = coda9_jpeg_finish_encode,
+	.release = coda9_jpeg_release,
+};
+
+irqreturn_t coda9_jpeg_irq_handler(int irq, void *data)
+{
+	struct coda_dev *dev = data;
+	struct coda_ctx *ctx;
+	int status;
+	int err_mb;
+
+	status = coda_read(dev, CODA9_REG_JPEG_PIC_STATUS);
+	if (status == 0)
+		return IRQ_HANDLED;
+	coda_write(dev, status, CODA9_REG_JPEG_PIC_STATUS);
+
+	if (status & CODA9_JPEG_STATUS_OVERFLOW)
+		v4l2_err(&dev->v4l2_dev, "JPEG overflow\n");
+
+	if (status & CODA9_JPEG_STATUS_BBC_INT)
+		v4l2_err(&dev->v4l2_dev, "JPEG BBC interrupt\n");
+
+	if (status & CODA9_JPEG_STATUS_ERROR) {
+		v4l2_err(&dev->v4l2_dev, "JPEG error\n");
+
+		err_mb = coda_read(dev, CODA9_REG_JPEG_PIC_ERRMB);
+		if (err_mb) {
+			v4l2_err(&dev->v4l2_dev,
+				 "ERRMB: 0x%x: rst idx %d, mcu pos (%d,%d)\n",
+				 err_mb, err_mb >> 24, (err_mb >> 12) & 0xfff,
+				 err_mb & 0xfff);
+		}
+	}
+
+	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+	if (!ctx) {
+		v4l2_err(&dev->v4l2_dev,
+			 "Instance released before the end of transaction\n");
+		mutex_unlock(&dev->coda_mutex);
+		return IRQ_HANDLED;
+	}
+
+	complete(&ctx->completion);
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 9f22614..43bda17 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -126,6 +126,7 @@ struct coda_params {
 	u8			jpeg_quality;
 	u8			jpeg_restart_interval;
 	u8			*jpeg_qmat_tab[3];
+	u32			*jpeg_huff_data;
 	int			codec_mode;
 	int			codec_mode_aux;
 	enum v4l2_mpeg_video_multi_slice_mode slice_mode;
@@ -366,7 +367,9 @@ void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
 
 extern const struct coda_context_ops coda_bit_encode_ops;
 extern const struct coda_context_ops coda_bit_decode_ops;
+extern const struct coda_context_ops coda9_jpeg_encode_ops;
 
 irqreturn_t coda_irq_handler(int irq, void *data);
+irqreturn_t coda9_jpeg_irq_handler(int irq, void *data);
 
 #endif /* __CODA_H__ */
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index b17464b..da5bb32 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -451,12 +451,21 @@
 #define CODA9_CMD_FIRMWARE_CODE_REV		0x1c4
 
 #define CODA9_GDMA_BASE				0x1000
+#define CODA9_GDI_CONTROL			(CODA9_GDMA_BASE + 0x034)
+#define CODA9_GDI_PIC_INIT_HOST			(CODA9_GDMA_BASE + 0x038)
+#define CODA9_GDI_STATUS			(CODA9_GDMA_BASE + 0x080)
 #define CODA9_GDI_WPROT_ERR_CLR			(CODA9_GDMA_BASE + 0x0a0)
 #define CODA9_GDI_WPROT_RGN_EN			(CODA9_GDMA_BASE + 0x0ac)
 
 #define CODA9_GDI_BUS_CTRL			(CODA9_GDMA_BASE + 0x0f0)
 #define CODA9_GDI_BUS_STATUS			(CODA9_GDMA_BASE + 0x0f4)
 
+#define CODA9_GDI_INFO_CONTROL			(CODA9_GDMA_BASE + 0x400)
+#define CODA9_GDI_INFO_PIC_SIZE			(CODA9_GDMA_BASE + 0x404)
+#define CODA9_GDI_INFO_BASE_Y			(CODA9_GDMA_BASE + 0x408)
+#define CODA9_GDI_INFO_BASE_CB			(CODA9_GDMA_BASE + 0x40c)
+#define CODA9_GDI_INFO_BASE_CR			(CODA9_GDMA_BASE + 0x410)
+
 #define CODA9_GDI_XY2_CAS_0			(CODA9_GDMA_BASE + 0x800)
 #define CODA9_GDI_XY2_CAS_F			(CODA9_GDMA_BASE + 0x83c)
 
@@ -477,4 +486,78 @@
 #define CODA9_GDI_RBC2_AXI_1F			(CODA9_GDMA_BASE + 0x91c)
 #define	CODA9_GDI_TILEDBUF_BASE			(CODA9_GDMA_BASE + 0x920)
 
+#define CODA9_JPEG_BASE				0x3000
+#define CODA9_REG_JPEG_PIC_START		(CODA9_JPEG_BASE + 0x000)
+#define CODA9_REG_JPEG_PIC_STATUS		(CODA9_JPEG_BASE + 0x004)
+#define		CODA9_JPEG_STATUS_OVERFLOW		BIT(3)
+#define		CODA9_JPEG_STATUS_BBC_INT		BIT(2)
+#define		CODA9_JPEG_STATUS_ERROR			BIT(1)
+#define		CODA9_JPEG_STATUS_DONE			BIT(0)
+#define CODA9_REG_JPEG_PIC_ERRMB		(CODA9_JPEG_BASE + 0x008)
+#define		CODA9_JPEG_ERRMB_RESTART_IDX_MASK	(0xf << 24)
+#define		CODA9_JPEG_ERRMB_MCU_POS_X_MASK		(0xfff << 12)
+#define		CODA9_JPEG_ERRMB_MCU_POS_Y_MASK		0xfff
+#define CODA9_REG_JPEG_PIC_CTRL			(CODA9_JPEG_BASE + 0x010)
+#define		CODA9_JPEG_PIC_CTRL_USER_HUFFMAN_EN	BIT(6)
+#define		CODA9_JPEG_PIC_CTRL_TC_DIRECTION	BIT(4)
+#define		CODA9_JPEG_PIC_CTRL_ENCODER_EN		BIT(3)
+#define CODA9_REG_JPEG_PIC_SIZE			(CODA9_JPEG_BASE + 0x014)
+#define CODA9_REG_JPEG_MCU_INFO			(CODA9_JPEG_BASE + 0x018)
+#define		CODA9_JPEG_MCU_BLOCK_NUM_OFFSET		16
+#define		CODA9_JPEG_COMP_NUM_OFFSET		12
+#define		CODA9_JPEG_COMP0_INFO_OFFSET		8
+#define		CODA9_JPEG_COMP1_INFO_OFFSET		4
+#define		CODA9_JPEG_COMP2_INFO_OFFSET		0
+#define CODA9_REG_JPEG_ROT_INFO			(CODA9_JPEG_BASE + 0x01c)
+#define		CODA9_JPEG_ROT_MIR_ENABLE		BIT(4)
+#define		CODA9_JPEG_ROT_MIR_MODE_MASK		0xf
+#define CODA9_REG_JPEG_SCL_INFO			(CODA9_JPEG_BASE + 0x020)
+#define		CODA9_JPEG_SCL_ENABLE			BIT(4)
+#define		CODA9_JPEG_SCL_HOR_MODE_MASK		(0x3 << 2)
+#define		CODA9_JPEG_SCL_VER_MODE_MASK		(0x3 << 0)
+#define CODA9_REG_JPEG_IF_INFO			(CODA9_JPEG_BASE + 0x024)
+#define		CODA9_JPEG_SENS_IF_CLR			BIT(1)
+#define		CODA9_JPEG_DISP_IF_CLR			BIT(0)
+#define CODA9_REG_JPEG_OP_INFO			(CODA9_JPEG_BASE + 0x02c)
+#define		CODA9_JPEG_BUS_REQ_NUM_OFFSET		0
+#define		CODA9_JPEG_BUS_REQ_NUM_MASK		0x7
+#define CODA9_REG_JPEG_DPB_CONFIG		(CODA9_JPEG_BASE + 0x030)
+#define CODA9_REG_JPEG_DPB_BASE00		(CODA9_JPEG_BASE + 0x040)
+#define CODA9_REG_JPEG_HUFF_CTRL		(CODA9_JPEG_BASE + 0x080)
+#define CODA9_REG_JPEG_HUFF_ADDR		(CODA9_JPEG_BASE + 0x084)
+#define CODA9_REG_JPEG_HUFF_DATA		(CODA9_JPEG_BASE + 0x088)
+#define CODA9_REG_JPEG_QMAT_CTRL		(CODA9_JPEG_BASE + 0x090)
+#define CODA9_REG_JPEG_QMAT_ADDR		(CODA9_JPEG_BASE + 0x094)
+#define CODA9_REG_JPEG_QMAT_DATA		(CODA9_JPEG_BASE + 0x098)
+#define CODA9_REG_JPEG_RST_INTVAL		(CODA9_JPEG_BASE + 0x0b0)
+#define CODA9_REG_JPEG_RST_INDEX		(CODA9_JPEG_BASE + 0x0b4)
+#define CODA9_REG_JPEG_RST_COUNT		(CODA9_JPEG_BASE + 0x0b8)
+#define CODA9_REG_JPEG_DPCM_DIFF_Y		(CODA9_JPEG_BASE + 0x0f0)
+#define CODA9_REG_JPEG_DPCM_DIFF_CB		(CODA9_JPEG_BASE + 0x0f4)
+#define CODA9_REG_JPEG_DPCM_DIFF_CR		(CODA9_JPEG_BASE + 0x0f8)
+#define CODA9_REG_JPEG_GBU_CTRL			(CODA9_JPEG_BASE + 0x100)
+#define CODA9_REG_JPEG_GBU_BT_PTR		(CODA9_JPEG_BASE + 0x110)
+#define CODA9_REG_JPEG_GBU_WD_PTR		(CODA9_JPEG_BASE + 0x114)
+#define CODA9_REG_JPEG_GBU_TT_CNT		(CODA9_JPEG_BASE + 0x118)
+#define CODA9_REG_JPEG_GBU_BBSR			(CODA9_JPEG_BASE + 0x140)
+#define CODA9_REG_JPEG_GBU_BBER			(CODA9_JPEG_BASE + 0x144)
+#define CODA9_REG_JPEG_GBU_BBIR			(CODA9_JPEG_BASE + 0x148)
+#define CODA9_REG_JPEG_GBU_BBHR			(CODA9_JPEG_BASE + 0x14c)
+#define CODA9_REG_JPEG_GBU_BCNT			(CODA9_JPEG_BASE + 0x158)
+#define CODA9_REG_JPEG_GBU_FF_RPTR		(CODA9_JPEG_BASE + 0x160)
+#define CODA9_REG_JPEG_GBU_FF_WPTR		(CODA9_JPEG_BASE + 0x164)
+#define CODA9_REG_JPEG_BBC_END_ADDR		(CODA9_JPEG_BASE + 0x208)
+#define CODA9_REG_JPEG_BBC_WR_PTR		(CODA9_JPEG_BASE + 0x20c)
+#define CODA9_REG_JPEG_BBC_RD_PTR		(CODA9_JPEG_BASE + 0x210)
+#define CODA9_REG_JPEG_BBC_EXT_ADDR		(CODA9_JPEG_BASE + 0x214)
+#define CODA9_REG_JPEG_BBC_INT_ADDR		(CODA9_JPEG_BASE + 0x218)
+#define CODA9_REG_JPEG_BBC_DATA_CNT		(CODA9_JPEG_BASE + 0x21c)
+#define CODA9_REG_JPEG_BBC_COMMAND		(CODA9_JPEG_BASE + 0x220)
+#define CODA9_REG_JPEG_BBC_BUSY			(CODA9_JPEG_BASE + 0x224)
+#define CODA9_REG_JPEG_BBC_CTRL			(CODA9_JPEG_BASE + 0x228)
+#define CODA9_REG_JPEG_BBC_CUR_POS		(CODA9_JPEG_BASE + 0x22c)
+#define CODA9_REG_JPEG_BBC_BAS_ADDR		(CODA9_JPEG_BASE + 0x230)
+#define CODA9_REG_JPEG_BBC_STRM_CTRL		(CODA9_JPEG_BASE + 0x234)
+#define CODA9_REG_JPEG_BBC_FLUSH_CMD		(CODA9_JPEG_BASE + 0x238)
+
 #endif
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index 6cf5823..c0791c8 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -154,6 +154,16 @@ DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
 	TP_ARGS(ctx, buf, meta)
 );
 
+DEFINE_EVENT(coda_buf_class, coda_jpeg_run,
+	TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
+	TP_ARGS(ctx, buf)
+);
+
+DEFINE_EVENT(coda_buf_class, coda_jpeg_done,
+	TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
+	TP_ARGS(ctx, buf)
+);
+
 #endif /* __CODA_TRACE_H__ */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
index f048e89..0e7e277 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -14,7 +14,6 @@
 #include <linux/cec.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
 #include <media/cec.h>
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 916ed74..9b1d964 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -168,21 +168,22 @@ int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev)
 	int ret = 0;
 	printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
 
-	BUG_ON(!dev->hw_ops.open);
-	BUG_ON(!dev->hw_ops.enable);
-	BUG_ON(!dev->hw_ops.set_hw_if_params);
-	BUG_ON(!dev->hw_ops.configure);
-	BUG_ON(!dev->hw_ops.set_buftype);
-	BUG_ON(!dev->hw_ops.get_buftype);
-	BUG_ON(!dev->hw_ops.enum_pix);
-	BUG_ON(!dev->hw_ops.set_frame_format);
-	BUG_ON(!dev->hw_ops.get_frame_format);
-	BUG_ON(!dev->hw_ops.get_pixel_format);
-	BUG_ON(!dev->hw_ops.set_pixel_format);
-	BUG_ON(!dev->hw_ops.set_image_window);
-	BUG_ON(!dev->hw_ops.get_image_window);
-	BUG_ON(!dev->hw_ops.get_line_length);
-	BUG_ON(!dev->hw_ops.getfid);
+	if (!dev->hw_ops.open ||
+	    !dev->hw_ops.enable ||
+	    !dev->hw_ops.set_hw_if_params ||
+	    !dev->hw_ops.configure ||
+	    !dev->hw_ops.set_buftype ||
+	    !dev->hw_ops.get_buftype ||
+	    !dev->hw_ops.enum_pix ||
+	    !dev->hw_ops.set_frame_format ||
+	    !dev->hw_ops.get_frame_format ||
+	    !dev->hw_ops.get_pixel_format ||
+	    !dev->hw_ops.set_pixel_format ||
+	    !dev->hw_ops.set_image_window ||
+	    !dev->hw_ops.get_image_window ||
+	    !dev->hw_ops.get_line_length ||
+	    !dev->hw_ops.getfid)
+		return -EINVAL;
 
 	mutex_lock(&ccdc_lock);
 	if (!ccdc_cfg) {
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index c1e29a4..aeaed2c 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -229,6 +229,9 @@ static int mtk_mdp_remove(struct platform_device *pdev)
 	mtk_mdp_unregister_m2m_device(mdp);
 	v4l2_device_unregister(&mdp->v4l2_dev);
 
+	flush_workqueue(mdp->wdt_wq);
+	destroy_workqueue(mdp->wdt_wq);
+
 	flush_workqueue(mdp->job_wq);
 	destroy_workqueue(mdp->job_wq);
 
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 8587278..0f3e710 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -104,6 +104,7 @@ static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
 {
 	struct vdec_fb *disp_frame_buffer = NULL;
 	struct mtk_video_dec_buf *dstbuf;
+	struct vb2_v4l2_buffer *vb;
 
 	mtk_v4l2_debug(3, "[%d]", ctx->id);
 	if (vdec_if_get_param(ctx,
@@ -121,25 +122,26 @@ static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
 
 	dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
 				frame_buffer);
+	vb = &dstbuf->m2m_buf.vb;
 	mutex_lock(&ctx->lock);
 	if (dstbuf->used) {
-		vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
-					ctx->picinfo.fb_sz[0]);
+		vb2_set_plane_payload(&vb->vb2_buf, 0,
+				      ctx->picinfo.fb_sz[0]);
 		if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
-			vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
+			vb2_set_plane_payload(&vb->vb2_buf, 1,
 					      ctx->picinfo.fb_sz[1]);
 
 		mtk_v4l2_debug(2,
 				"[%d]status=%x queue id=%d to done_list %d",
 				ctx->id, disp_frame_buffer->status,
-				dstbuf->vb.vb2_buf.index,
+				vb->vb2_buf.index,
 				dstbuf->queued_in_vb2);
 
-		v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+		v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
 		ctx->decoded_frame_cnt++;
 	}
 	mutex_unlock(&ctx->lock);
-	return &dstbuf->vb.vb2_buf;
+	return &vb->vb2_buf;
 }
 
 /*
@@ -154,6 +156,7 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
 {
 	struct mtk_video_dec_buf *dstbuf;
 	struct vdec_fb *free_frame_buffer = NULL;
+	struct vb2_v4l2_buffer *vb;
 
 	if (vdec_if_get_param(ctx,
 				GET_PARAM_FREE_FRAME_BUFFER,
@@ -171,6 +174,7 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
 
 	dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
 				frame_buffer);
+	vb = &dstbuf->m2m_buf.vb;
 
 	mutex_lock(&ctx->lock);
 	if (dstbuf->used) {
@@ -187,9 +191,9 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
 			mtk_v4l2_debug(2,
 				"[%d]status=%x queue id=%d to rdy_queue %d",
 				ctx->id, free_frame_buffer->status,
-				dstbuf->vb.vb2_buf.index,
+				vb->vb2_buf.index,
 				dstbuf->queued_in_vb2);
-			v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+			v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
 		} else if ((dstbuf->queued_in_vb2 == false) &&
 			   (dstbuf->queued_in_v4l2 == true)) {
 			/*
@@ -205,8 +209,8 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
 			mtk_v4l2_debug(2,
 					"[%d]status=%x queue id=%d to rdy_queue",
 					ctx->id, free_frame_buffer->status,
-					dstbuf->vb.vb2_buf.index);
-			v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+					vb->vb2_buf.index);
+			v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
 			dstbuf->queued_in_vb2 = true;
 		} else {
 			/*
@@ -219,14 +223,14 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
 			 */
 			mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
 					ctx->id, free_frame_buffer->status,
-					dstbuf->vb.vb2_buf.index,
+					vb->vb2_buf.index,
 					dstbuf->queued_in_vb2,
 					dstbuf->queued_in_v4l2);
 		}
 		dstbuf->used = false;
 	}
 	mutex_unlock(&ctx->lock);
-	return &dstbuf->vb.vb2_buf;
+	return &vb->vb2_buf;
 }
 
 static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
@@ -365,8 +369,10 @@ static void mtk_vdec_worker(struct work_struct *work)
 		return;
 	}
 
-	src_buf_info = container_of(src_buf, struct mtk_video_dec_buf, vb);
-	dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf, vb);
+	src_buf_info = container_of(src_buf, struct mtk_video_dec_buf,
+				    m2m_buf.vb);
+	dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf,
+				    m2m_buf.vb);
 
 	pfb = &dst_buf_info->frame_buffer;
 	pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
@@ -397,11 +403,11 @@ static void mtk_vdec_worker(struct work_struct *work)
 
 		vdec_if_decode(ctx, NULL, NULL, &res_chg);
 		clean_display_buffer(ctx);
-		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
+		vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
 		if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
-			vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
+			vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
 		dst_buf->flags |= V4L2_BUF_FLAG_LAST;
-		v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
+		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
 		clean_free_buffer(ctx);
 		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
 		return;
@@ -417,10 +423,8 @@ static void mtk_vdec_worker(struct work_struct *work)
 	}
 	mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
 			ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
-	dst_buf_info->vb.vb2_buf.timestamp
-			= src_buf_info->vb.vb2_buf.timestamp;
-	dst_buf_info->vb.timecode
-			= src_buf_info->vb.timecode;
+	dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+	dst_buf->timecode = src_buf->timecode;
 	mutex_lock(&ctx->lock);
 	dst_buf_info->used = true;
 	mutex_unlock(&ctx->lock);
@@ -434,7 +438,7 @@ static void mtk_vdec_worker(struct work_struct *work)
 			ctx->id,
 			src_buf->vb2_buf.index,
 			buf.size,
-			src_buf_info->vb.vb2_buf.timestamp,
+			src_buf->vb2_buf.timestamp,
 			dst_buf->vb2_buf.index,
 			ret, res_chg);
 		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
@@ -443,14 +447,14 @@ static void mtk_vdec_worker(struct work_struct *work)
 			src_buf_info->error = true;
 			mutex_unlock(&ctx->lock);
 		}
-		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
+		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
 	} else if (res_chg == false) {
 		/*
 		 * we only return src buffer with VB2_BUF_STATE_DONE
 		 * when decode success without resolution change
 		 */
 		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
-		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
 	}
 
 	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
@@ -522,7 +526,8 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
 			mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
 			return 0;
 		}
-		v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf->vb);
+		v4l2_m2m_buf_queue(ctx->m2m_ctx,
+				   &ctx->empty_flush_buf->m2m_buf.vb);
 		v4l2_m2m_try_schedule(ctx->m2m_ctx);
 		break;
 
@@ -1148,7 +1153,8 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
 	 */
 	if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		vb2_v4l2 = to_vb2_v4l2_buffer(vb);
-		buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+		buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
+				   m2m_buf.vb);
 		mutex_lock(&ctx->lock);
 		if (buf->used == false) {
 			v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
@@ -1175,7 +1181,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
 		mtk_v4l2_err("No src buffer");
 		return;
 	}
-	buf = container_of(src_buf, struct mtk_video_dec_buf, vb);
+	buf = container_of(src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
 	if (buf->lastframe) {
 		/* This shouldn't happen. Just in case. */
 		mtk_v4l2_err("Invalid flush buffer.");
@@ -1256,7 +1262,7 @@ static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
 	bool buf_error;
 
 	vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
-	buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+	buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, m2m_buf.vb);
 	mutex_lock(&ctx->lock);
 	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		buf->queued_in_v4l2 = false;
@@ -1276,7 +1282,7 @@ static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
 	struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
 					struct vb2_v4l2_buffer, vb2_buf);
 	struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
-					struct mtk_video_dec_buf, vb);
+					struct mtk_video_dec_buf, m2m_buf.vb);
 
 	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		buf->used = false;
@@ -1309,7 +1315,7 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
 	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
 			struct mtk_video_dec_buf *buf_info = container_of(
-					src_buf, struct mtk_video_dec_buf, vb);
+				 src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
 			if (!buf_info->lastframe)
 				v4l2_m2m_buf_done(src_buf,
 						VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index e0c5338..cf26b6c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -9,7 +9,7 @@
 #define _MTK_VCODEC_DEC_H_
 
 #include <media/videobuf2-core.h>
-#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
 
 #define VCODEC_CAPABILITY_4K_DISABLED	0x10
 #define VCODEC_DEC_4K_CODED_WIDTH	4096U
@@ -33,7 +33,7 @@ struct vdec_fb {
 
 /**
  * struct mtk_video_dec_buf - Private data related to each VB2 buffer.
- * @b:		VB2 buffer
+ * @m2m_buf:	M2M buffer
  * @list:	link list
  * @used:	Capture buffer contain decoded frame data and keep in
  *			codec data structure
@@ -47,8 +47,7 @@ struct vdec_fb {
  * Note : These status information help us track and debug buffer state
  */
 struct mtk_video_dec_buf {
-	struct vb2_v4l2_buffer	vb;
-	struct list_head	list;
+	struct v4l2_m2m_buffer	m2m_buf;
 
 	bool	used;
 	bool	queued_in_vb2;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 944771e..100ae8c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -137,7 +137,7 @@ static int fops_vcodec_open(struct file *file)
 	}
 	src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
 				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
-	ctx->empty_flush_buf->vb.vb2_buf.vb2_queue = src_vq;
+	ctx->empty_flush_buf->m2m_buf.vb.vb2_buf.vb2_queue = src_vq;
 	ctx->empty_flush_buf->lastframe = true;
 	mtk_vcodec_dec_set_default_params(ctx);
 
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index fd8de02..d469ff6 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -332,14 +332,12 @@ static int vidioc_try_fmt(struct v4l2_format *f,
 
 		pix_fmt_mp->num_planes = fmt->num_planes;
 		pix_fmt_mp->plane_fmt[0].sizeimage =
-				pix_fmt_mp->width * pix_fmt_mp->height +
-				((ALIGN(pix_fmt_mp->width, 16) * 2) * 16);
+			pix_fmt_mp->width * pix_fmt_mp->height;
 		pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
 
 		if (pix_fmt_mp->num_planes == 2) {
 			pix_fmt_mp->plane_fmt[1].sizeimage =
-				(pix_fmt_mp->width * pix_fmt_mp->height) / 2 +
-				(ALIGN(pix_fmt_mp->width, 16) * 16);
+				(pix_fmt_mp->width * pix_fmt_mp->height) / 2;
 			pix_fmt_mp->plane_fmt[2].sizeimage = 0;
 			pix_fmt_mp->plane_fmt[1].bytesperline =
 							pix_fmt_mp->width;
@@ -347,8 +345,7 @@ static int vidioc_try_fmt(struct v4l2_format *f,
 		} else if (pix_fmt_mp->num_planes == 3) {
 			pix_fmt_mp->plane_fmt[1].sizeimage =
 			pix_fmt_mp->plane_fmt[2].sizeimage =
-				(pix_fmt_mp->width * pix_fmt_mp->height) / 4 +
-				((ALIGN(pix_fmt_mp->width, 16) / 2) * 16);
+				(pix_fmt_mp->width * pix_fmt_mp->height) / 4;
 			pix_fmt_mp->plane_fmt[1].bytesperline =
 				pix_fmt_mp->plane_fmt[2].bytesperline =
 				pix_fmt_mp->width / 2;
@@ -798,13 +795,14 @@ static void vb2ops_venc_buf_queue(struct vb2_buffer *vb)
 			container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
 
 	struct mtk_video_enc_buf *mtk_buf =
-			container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
+			container_of(vb2_v4l2, struct mtk_video_enc_buf,
+				     m2m_buf.vb);
 
 	if ((vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
 	    (ctx->param_change != MTK_ENCODE_PARAM_NONE)) {
 		mtk_v4l2_debug(1, "[%d] Before id=%d encode parameter change %x",
 			       ctx->id,
-			       mtk_buf->vb.vb2_buf.index,
+			       vb2_v4l2->vb2_buf.index,
 			       ctx->param_change);
 		mtk_buf->param_change = ctx->param_change;
 		mtk_buf->enc_params = ctx->enc_params;
@@ -986,7 +984,8 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
 	struct venc_enc_param enc_prm;
 	struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
 	struct mtk_video_enc_buf *mtk_buf =
-			container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
+			container_of(vb2_v4l2, struct mtk_video_enc_buf,
+				     m2m_buf.vb);
 
 	int ret = 0;
 
@@ -998,7 +997,7 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
 		enc_prm.bitrate = mtk_buf->enc_params.bitrate;
 		mtk_v4l2_debug(1, "[%d] id=%d, change param br=%d",
 				ctx->id,
-				mtk_buf->vb.vb2_buf.index,
+				vb2_v4l2->vb2_buf.index,
 				enc_prm.bitrate);
 		ret |= venc_if_set_param(ctx,
 					 VENC_SET_PARAM_ADJUST_BITRATE,
@@ -1009,7 +1008,7 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
 				   mtk_buf->enc_params.framerate_denom;
 		mtk_v4l2_debug(1, "[%d] id=%d, change param fr=%d",
 			       ctx->id,
-			       mtk_buf->vb.vb2_buf.index,
+			       vb2_v4l2->vb2_buf.index,
 			       enc_prm.frm_rate);
 		ret |= venc_if_set_param(ctx,
 					 VENC_SET_PARAM_ADJUST_FRAMERATE,
@@ -1026,7 +1025,7 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
 	if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FORCE_INTRA) {
 		mtk_v4l2_debug(1, "[%d] id=%d, change param force I=%d",
 				ctx->id,
-				mtk_buf->vb.vb2_buf.index,
+				vb2_v4l2->vb2_buf.index,
 				mtk_buf->enc_params.force_intra);
 		if (mtk_buf->enc_params.force_intra)
 			ret |= venc_if_set_param(ctx,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
index a9c9f86..513ee79 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
@@ -9,7 +9,7 @@
 #define _MTK_VCODEC_ENC_H_
 
 #include <media/videobuf2-core.h>
-#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
 
 #define MTK_VENC_IRQ_STATUS_SPS	0x1
 #define MTK_VENC_IRQ_STATUS_PPS	0x2
@@ -23,15 +23,15 @@
 
 /**
  * struct mtk_video_enc_buf - Private data related to each VB2 buffer.
- * @vb: Pointer to related VB2 buffer.
+ * @m2m_buf:	M2M buffer
  * @list:	list that buffer link to
  * @param_change: Types of encode parameter change before encoding this
  *				buffer
  * @enc_params: Encode parameters changed before encode this buffer
  */
 struct mtk_video_enc_buf {
-	struct vb2_v4l2_buffer vb;
-	struct list_head list;
+	struct v4l2_m2m_buffer m2m_buf;
+
 	u32 param_change;
 	struct mtk_enc_params enc_params;
 };
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 327c571..a4ee6b8 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -810,6 +810,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
 
 		ret = v4l2_subdev_call(subdev, video, s_stream, 0);
 
+		/* Stop at the first external sub-device. */
+		if (subdev->dev != isp->dev)
+			break;
+
 		if (subdev == &isp->isp_res.subdev)
 			ret |= isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
 		else if (subdev == &isp->isp_prev.subdev)
@@ -837,10 +841,6 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
 						      &subdev->entity);
 			failure = -ETIMEDOUT;
 		}
-
-		/* Stop at the first external sub-device. */
-		if (subdev->dev != isp->dev)
-			break;
 	}
 
 	return failure;
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index e2f336c..471ae7cd 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -1607,6 +1607,11 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
 		return 0;
 	}
 
+	/* Don't restart CCDC if we're just about to stop streaming. */
+	if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS &&
+	    ccdc->stopping & CCDC_STOP_REQUEST)
+		return 0;
+
 	if (!ccdc_has_all_fields(ccdc))
 		return 1;
 
@@ -1661,16 +1666,15 @@ static void ccdc_vd0_isr(struct isp_ccdc_device *ccdc)
 		spin_unlock_irqrestore(&ccdc->lock, flags);
 	}
 
-	if (ccdc->output & CCDC_OUTPUT_MEMORY)
-		restart = ccdc_isr_buffer(ccdc);
-
 	spin_lock_irqsave(&ccdc->lock, flags);
-
 	if (ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) {
 		spin_unlock_irqrestore(&ccdc->lock, flags);
 		return;
 	}
 
+	if (ccdc->output & CCDC_OUTPUT_MEMORY)
+		restart = ccdc_isr_buffer(ccdc);
+
 	if (!ccdc->shadow_update)
 		ccdc_apply_controls(ccdc);
 	spin_unlock_irqrestore(&ccdc->lock, flags);
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 8d47ea0..43ae645 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2530,6 +2530,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
 	v4l2_device_unregister(&pcdev->v4l2_dev);
 exit_deactivate:
 	pxa_camera_deactivate(pcdev);
+	tasklet_kill(&pcdev->task_eof);
 exit_free_dma:
 	dma_release_channel(pcdev->dma_chans[2]);
 exit_free_dma_u:
@@ -2544,6 +2545,7 @@ static int pxa_camera_remove(struct platform_device *pdev)
 	struct pxa_camera_dev *pcdev = dev_get_drvdata(&pdev->dev);
 
 	pxa_camera_deactivate(pcdev);
+	tasklet_kill(&pcdev->task_eof);
 	dma_release_channel(pcdev->dma_chans[0]);
 	dma_release_channel(pcdev->dma_chans[1]);
 	dma_release_channel(pcdev->dma_chans[2]);
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 9e2e63f..5ff565e 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -144,7 +144,7 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
 	}
 
 	/* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */
-	switch (vin->format.pixelformat) {
+	switch (pix->pixelformat) {
 	case V4L2_PIX_FMT_NV12:
 	case V4L2_PIX_FMT_NV16:
 		walign = 5;
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
index 4372abb..a74e9fd 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -14,8 +14,8 @@
 #define MAX_SRC_WIDTH           2048
 
 /* Reset & boot poll config */
-#define POLL_RST_MAX            50
-#define POLL_RST_DELAY_MS       20
+#define POLL_RST_MAX            500
+#define POLL_RST_DELAY_MS       2
 
 enum bdisp_target_plan {
 	BDISP_RGB,
@@ -382,7 +382,7 @@ int bdisp_hw_reset(struct bdisp_dev *bdisp)
 	for (i = 0; i < POLL_RST_MAX; i++) {
 		if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
 			break;
-		msleep(POLL_RST_DELAY_MS);
+		udelay(POLL_RST_DELAY_MS * 1000);
 	}
 	if (i == POLL_RST_MAX)
 		dev_err(bdisp->dev, "Reset timeout\n");
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 675b5f2..d1025a5 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1274,6 +1274,8 @@ static int bdisp_remove(struct platform_device *pdev)
 	if (!IS_ERR(bdisp->clock))
 		clk_unprepare(bdisp->clock);
 
+	destroy_workqueue(bdisp->work_queue);
+
 	dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
 
 	return 0;
@@ -1317,20 +1319,22 @@ static int bdisp_probe(struct platform_device *pdev)
 	bdisp->regs = devm_ioremap_resource(dev, res);
 	if (IS_ERR(bdisp->regs)) {
 		dev_err(dev, "failed to get regs\n");
-		return PTR_ERR(bdisp->regs);
+		ret = PTR_ERR(bdisp->regs);
+		goto err_wq;
 	}
 
 	bdisp->clock = devm_clk_get(dev, BDISP_NAME);
 	if (IS_ERR(bdisp->clock)) {
 		dev_err(dev, "failed to get clock\n");
-		return PTR_ERR(bdisp->clock);
+		ret = PTR_ERR(bdisp->clock);
+		goto err_wq;
 	}
 
 	ret = clk_prepare(bdisp->clock);
 	if (ret < 0) {
 		dev_err(dev, "clock prepare failed\n");
 		bdisp->clock = ERR_PTR(-EINVAL);
-		return ret;
+		goto err_wq;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -1402,7 +1406,8 @@ static int bdisp_probe(struct platform_device *pdev)
 err_clk:
 	if (!IS_ERR(bdisp->clock))
 		clk_unprepare(bdisp->clock);
-
+err_wq:
+	destroy_workqueue(bdisp->work_queue);
 	return ret;
 }
 
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
index a79250a..0560a9c 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -170,8 +170,9 @@ int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
 
 		/* attach tuner */
 		request_module("tda18212");
-		client = i2c_new_device(tsin->i2c_adapter, &tda18212_info);
-		if (!client || !client->dev.driver) {
+		client = i2c_new_client_device(tsin->i2c_adapter,
+					       &tda18212_info);
+		if (!i2c_client_has_driver(client)) {
 			dvb_frontend_detach(*fe);
 			return -ENODEV;
 		}
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index f36dc62..eff34de 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -28,6 +29,12 @@
 
 #include "sun4i_csi.h"
 
+struct sun4i_csi_traits {
+	unsigned int channels;
+	unsigned int max_width;
+	bool has_isp;
+};
+
 static const struct media_entity_operations sun4i_csi_video_entity_ops = {
 	.link_validate = v4l2_subdev_link_validate,
 };
@@ -155,6 +162,31 @@ static int sun4i_csi_probe(struct platform_device *pdev)
 	subdev = &csi->subdev;
 	vdev = &csi->vdev;
 
+	csi->traits = of_device_get_match_data(&pdev->dev);
+	if (!csi->traits)
+		return -EINVAL;
+
+	/*
+	 * On Allwinner SoCs, some high memory bandwidth devices do DMA
+	 * directly over the memory bus (called MBUS), instead of the
+	 * system bus. The memory bus has a different addressing scheme
+	 * without the DRAM starting offset.
+	 *
+	 * In some cases this can be described by an interconnect in
+	 * the device tree. In other cases where the hardware is not
+	 * fully understood and the interconnect is left out of the
+	 * device tree, fall back to a default offset.
+	 */
+	if (of_find_property(csi->dev->of_node, "interconnects", NULL)) {
+		ret = of_dma_configure(csi->dev, csi->dev->of_node, true);
+		if (ret)
+			return ret;
+	} else {
+#ifdef PHYS_PFN_OFFSET
+		csi->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+#endif
+	}
+
 	csi->mdev.dev = csi->dev;
 	strscpy(csi->mdev.model, "Allwinner Video Capture Device",
 		sizeof(csi->mdev.model));
@@ -177,10 +209,12 @@ static int sun4i_csi_probe(struct platform_device *pdev)
 		return PTR_ERR(csi->bus_clk);
 	}
 
-	csi->isp_clk = devm_clk_get(&pdev->dev, "isp");
-	if (IS_ERR(csi->isp_clk)) {
-		dev_err(&pdev->dev, "Couldn't get our ISP clock\n");
-		return PTR_ERR(csi->isp_clk);
+	if (csi->traits->has_isp) {
+		csi->isp_clk = devm_clk_get(&pdev->dev, "isp");
+		if (IS_ERR(csi->isp_clk)) {
+			dev_err(&pdev->dev, "Couldn't get our ISP clock\n");
+			return PTR_ERR(csi->isp_clk);
+		}
 	}
 
 	csi->ram_clk = devm_clk_get(&pdev->dev, "ram");
@@ -258,8 +292,21 @@ static int sun4i_csi_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct sun4i_csi_traits sun4i_a10_csi1_traits = {
+	.channels = 1,
+	.max_width = 24,
+	.has_isp = false,
+};
+
+static const struct sun4i_csi_traits sun7i_a20_csi0_traits = {
+	.channels = 4,
+	.max_width = 16,
+	.has_isp = true,
+};
+
 static const struct of_device_id sun4i_csi_of_match[] = {
-	{ .compatible = "allwinner,sun7i-a20-csi0" },
+	{ .compatible = "allwinner,sun4i-a10-csi1", .data = &sun4i_a10_csi1_traits },
+	{ .compatible = "allwinner,sun7i-a20-csi0", .data = &sun7i_a20_csi0_traits },
 	{ /* Sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_csi_of_match);
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
index 001c8bd..0f67ff6 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
@@ -22,8 +22,8 @@
 #define CSI_CFG_INPUT_FMT(fmt)			((fmt) << 20)
 #define CSI_CFG_OUTPUT_FMT(fmt)			((fmt) << 16)
 #define CSI_CFG_YUV_DATA_SEQ(seq)		((seq) << 8)
-#define CSI_CFG_VSYNC_POL(pol)			((pol) << 2)
-#define CSI_CFG_HSYNC_POL(pol)			((pol) << 1)
+#define CSI_CFG_VREF_POL(pol)			((pol) << 2)
+#define CSI_CFG_HREF_POL(pol)			((pol) << 1)
 #define CSI_CFG_PCLK_POL(pol)			((pol) << 0)
 
 #define CSI_CPT_CTRL_REG		0x08
@@ -108,6 +108,8 @@ struct sun4i_csi {
 	/* Device resources */
 	struct device			*dev;
 
+	const struct sun4i_csi_traits	*traits;
+
 	void __iomem			*regs;
 	struct clk			*bus_clk;
 	struct clk			*isp_clk;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
index d6979e1..78fa1c5 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
@@ -228,7 +228,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
 	struct sun4i_csi *csi = vb2_get_drv_priv(vq);
 	struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
 	const struct sun4i_csi_format *csi_fmt;
-	unsigned long hsync_pol, pclk_pol, vsync_pol;
+	unsigned long href_pol, pclk_pol, vref_pol;
 	unsigned long flags;
 	unsigned int i;
 	int ret;
@@ -278,13 +278,21 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
 	writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height),
 	       csi->regs + CSI_WIN_CTRL_H_REG);
 
-	hsync_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
-	pclk_pol = !!(bus->flags & V4L2_MBUS_DATA_ACTIVE_HIGH);
-	vsync_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
+	/*
+	 * This hardware uses [HV]REF instead of [HV]SYNC. Based on the
+	 * provided timing diagrams in the manual, positive polarity
+	 * equals active high [HV]REF.
+	 *
+	 * When the back porch is 0, [HV]REF is more or less equivalent
+	 * to [HV]SYNC inverted.
+	 */
+	href_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
+	vref_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
+	pclk_pol = !!(bus->flags & V4L2_MBUS_PCLK_SAMPLE_RISING);
 	writel(CSI_CFG_INPUT_FMT(csi_fmt->input) |
 	       CSI_CFG_OUTPUT_FMT(csi_fmt->output) |
-	       CSI_CFG_VSYNC_POL(vsync_pol) |
-	       CSI_CFG_HSYNC_POL(hsync_pol) |
+	       CSI_CFG_VREF_POL(vref_pol) |
+	       CSI_CFG_HREF_POL(href_pol) |
 	       CSI_CFG_PCLK_POL(pclk_pol),
 	       csi->regs + CSI_CFG_REG);
 
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index aaa1dc1..b61f3de 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -834,11 +834,8 @@ static int deinterlace_probe(struct platform_device *pdev)
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	dev->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(dev->base)) {
-		dev_err(dev->dev, "Failed to map registers\n");
-
+	if (IS_ERR(dev->base))
 		return PTR_ERR(dev->base);
-	}
 
 	dev->bus_clk = devm_clk_get(dev->dev, "bus");
 	if (IS_ERR(dev->bus_clk)) {
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 223161f..be54806 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -14,6 +14,8 @@
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 #include <linux/videodev2.h>
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
@@ -32,8 +34,8 @@
 
 #define CAL_MODULE_NAME "cal"
 
-#define MAX_WIDTH 1920
-#define MAX_HEIGHT 1200
+#define MAX_WIDTH_BYTES (8192 * 8)
+#define MAX_HEIGHT_LINES 16383
 
 #define CAL_VERSION "0.1.0"
 
@@ -71,8 +73,6 @@ static const struct v4l2_fract
 #define CAL_NUM_INPUT 1
 #define CAL_NUM_CONTEXT 2
 
-#define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
-
 #define reg_read(dev, offset) ioread32(dev->base + offset)
 #define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
 
@@ -91,102 +91,103 @@ static const struct v4l2_fract
 struct cal_fmt {
 	u32	fourcc;
 	u32	code;
-	u8	depth;
+	/* Bits per pixel */
+	u8	bpp;
 };
 
 static struct cal_fmt cal_formats[] = {
 	{
 		.fourcc		= V4L2_PIX_FMT_YUYV,
 		.code		= MEDIA_BUS_FMT_YUYV8_2X8,
-		.depth		= 16,
+		.bpp		= 16,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_UYVY,
 		.code		= MEDIA_BUS_FMT_UYVY8_2X8,
-		.depth		= 16,
+		.bpp		= 16,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_YVYU,
 		.code		= MEDIA_BUS_FMT_YVYU8_2X8,
-		.depth		= 16,
+		.bpp		= 16,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_VYUY,
 		.code		= MEDIA_BUS_FMT_VYUY8_2X8,
-		.depth		= 16,
+		.bpp		= 16,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
 		.code		= MEDIA_BUS_FMT_RGB565_2X8_LE,
-		.depth		= 16,
+		.bpp		= 16,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
 		.code		= MEDIA_BUS_FMT_RGB565_2X8_BE,
-		.depth		= 16,
+		.bpp		= 16,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
 		.code		= MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
-		.depth		= 16,
+		.bpp		= 16,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
 		.code		= MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
-		.depth		= 16,
+		.bpp		= 16,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_RGB24, /* rgb */
 		.code		= MEDIA_BUS_FMT_RGB888_2X12_LE,
-		.depth		= 24,
+		.bpp		= 24,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_BGR24, /* bgr */
 		.code		= MEDIA_BUS_FMT_RGB888_2X12_BE,
-		.depth		= 24,
+		.bpp		= 24,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_RGB32, /* argb */
 		.code		= MEDIA_BUS_FMT_ARGB8888_1X32,
-		.depth		= 32,
+		.bpp		= 32,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SBGGR8,
 		.code		= MEDIA_BUS_FMT_SBGGR8_1X8,
-		.depth		= 8,
+		.bpp		= 8,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SGBRG8,
 		.code		= MEDIA_BUS_FMT_SGBRG8_1X8,
-		.depth		= 8,
+		.bpp		= 8,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SGRBG8,
 		.code		= MEDIA_BUS_FMT_SGRBG8_1X8,
-		.depth		= 8,
+		.bpp		= 8,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SRGGB8,
 		.code		= MEDIA_BUS_FMT_SRGGB8_1X8,
-		.depth		= 8,
+		.bpp		= 8,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SBGGR10,
 		.code		= MEDIA_BUS_FMT_SBGGR10_1X10,
-		.depth		= 16,
+		.bpp		= 10,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SGBRG10,
 		.code		= MEDIA_BUS_FMT_SGBRG10_1X10,
-		.depth		= 16,
+		.bpp		= 10,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SGRBG10,
 		.code		= MEDIA_BUS_FMT_SGRBG10_1X10,
-		.depth		= 16,
+		.bpp		= 10,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SRGGB10,
 		.code		= MEDIA_BUS_FMT_SRGGB10_1X10,
-		.depth		= 16,
+		.bpp		= 10,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SBGGR12,
 		.code		= MEDIA_BUS_FMT_SBGGR12_1X12,
-		.depth		= 16,
+		.bpp		= 12,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SGBRG12,
 		.code		= MEDIA_BUS_FMT_SGBRG12_1X12,
-		.depth		= 16,
+		.bpp		= 12,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SGRBG12,
 		.code		= MEDIA_BUS_FMT_SGRBG12_1X12,
-		.depth		= 16,
+		.bpp		= 12,
 	}, {
 		.fourcc		= V4L2_PIX_FMT_SRGGB12,
 		.code		= MEDIA_BUS_FMT_SRGGB12_1X12,
-		.depth		= 16,
+		.bpp		= 12,
 	},
 };
 
@@ -220,15 +221,6 @@ struct cal_dmaqueue {
 	int			ini_jiffies;
 };
 
-struct cm_data {
-	void __iomem		*base;
-	struct resource		*res;
-
-	unsigned int		camerrx_control;
-
-	struct platform_device *pdev;
-};
-
 struct cc_data {
 	void __iomem		*base;
 	struct resource		*res;
@@ -236,6 +228,113 @@ struct cc_data {
 	struct platform_device *pdev;
 };
 
+/* CTRL_CORE_CAMERRX_CONTROL register field id */
+enum cal_camerarx_field {
+	F_CTRLCLKEN,
+	F_CAMMODE,
+	F_LANEENABLE,
+	F_CSI_MODE,
+
+	F_MAX_FIELDS,
+};
+
+struct cal_csi2_phy {
+	struct regmap_field *fields[F_MAX_FIELDS];
+	struct reg_field *base_fields;
+	const int num_lanes;
+};
+
+struct cal_data {
+	const int num_csi2_phy;
+	struct cal_csi2_phy *csi2_phy_core;
+
+	const unsigned int flags;
+};
+
+static struct reg_field dra72x_ctrl_core_csi0_reg_fields[F_MAX_FIELDS] = {
+	[F_CTRLCLKEN] = REG_FIELD(0, 10, 10),
+	[F_CAMMODE] = REG_FIELD(0, 11, 12),
+	[F_LANEENABLE] = REG_FIELD(0, 13, 16),
+	[F_CSI_MODE] = REG_FIELD(0, 17, 17),
+};
+
+static struct reg_field dra72x_ctrl_core_csi1_reg_fields[F_MAX_FIELDS] = {
+	[F_CTRLCLKEN] = REG_FIELD(0, 0, 0),
+	[F_CAMMODE] = REG_FIELD(0, 1, 2),
+	[F_LANEENABLE] = REG_FIELD(0, 3, 4),
+	[F_CSI_MODE] = REG_FIELD(0, 5, 5),
+};
+
+static struct cal_csi2_phy dra72x_cal_csi_phy[] = {
+	{
+		.base_fields = dra72x_ctrl_core_csi0_reg_fields,
+		.num_lanes = 4,
+	},
+	{
+		.base_fields = dra72x_ctrl_core_csi1_reg_fields,
+		.num_lanes = 2,
+	},
+};
+
+static const struct cal_data dra72x_cal_data = {
+	.csi2_phy_core = dra72x_cal_csi_phy,
+	.num_csi2_phy = ARRAY_SIZE(dra72x_cal_csi_phy),
+};
+
+static const struct cal_data dra72x_es1_cal_data = {
+	.csi2_phy_core = dra72x_cal_csi_phy,
+	.num_csi2_phy = ARRAY_SIZE(dra72x_cal_csi_phy),
+	.flags = DRA72_CAL_PRE_ES2_LDO_DISABLE,
+};
+
+static struct reg_field dra76x_ctrl_core_csi0_reg_fields[F_MAX_FIELDS] = {
+	[F_CTRLCLKEN] = REG_FIELD(0, 8, 8),
+	[F_CAMMODE] = REG_FIELD(0, 9, 10),
+	[F_CSI_MODE] = REG_FIELD(0, 11, 11),
+	[F_LANEENABLE] = REG_FIELD(0, 27, 31),
+};
+
+static struct reg_field dra76x_ctrl_core_csi1_reg_fields[F_MAX_FIELDS] = {
+	[F_CTRLCLKEN] = REG_FIELD(0, 0, 0),
+	[F_CAMMODE] = REG_FIELD(0, 1, 2),
+	[F_CSI_MODE] = REG_FIELD(0, 3, 3),
+	[F_LANEENABLE] = REG_FIELD(0, 24, 26),
+};
+
+static struct cal_csi2_phy dra76x_cal_csi_phy[] = {
+	{
+		.base_fields = dra76x_ctrl_core_csi0_reg_fields,
+		.num_lanes = 5,
+	},
+	{
+		.base_fields = dra76x_ctrl_core_csi1_reg_fields,
+		.num_lanes = 3,
+	},
+};
+
+static const struct cal_data dra76x_cal_data = {
+	.csi2_phy_core = dra76x_cal_csi_phy,
+	.num_csi2_phy = ARRAY_SIZE(dra76x_cal_csi_phy),
+};
+
+static struct reg_field am654_ctrl_core_csi0_reg_fields[F_MAX_FIELDS] = {
+	[F_CTRLCLKEN] = REG_FIELD(0, 15, 15),
+	[F_CAMMODE] = REG_FIELD(0, 24, 25),
+	[F_LANEENABLE] = REG_FIELD(0, 0, 4),
+};
+
+static struct cal_csi2_phy am654_cal_csi_phy[] = {
+	{
+		.base_fields = am654_ctrl_core_csi0_reg_fields,
+		.num_lanes = 5,
+	},
+};
+
+static const struct cal_data am654_cal_data = {
+	.csi2_phy_core = am654_cal_csi_phy,
+	.num_csi2_phy = ARRAY_SIZE(am654_cal_csi_phy),
+};
+
 /*
  * there is one cal_dev structure in the driver, it is shared by
  * all instances.
@@ -247,8 +346,15 @@ struct cal_dev {
 	struct platform_device	*pdev;
 	struct v4l2_device	v4l2_dev;
 
+	/* Controller flags for special cases */
+	unsigned int		flags;
+
+	const struct cal_data	*data;
+
 	/* Control Module handle */
-	struct cm_data		*cm;
+	struct regmap		*syscon_camerrx;
+	u32			syscon_camerrx_offset;
+
 	/* Camera Core Module handle */
 	struct cc_data		*cc[CAL_NUM_CSI2_PORTS];
 
@@ -359,73 +465,115 @@ static inline void set_field(u32 *valp, u32 field, u32 mask)
 	*valp = val;
 }
 
-/*
- * Control Module block access
- */
-static struct cm_data *cm_create(struct cal_dev *dev)
+static u32 cal_data_get_phy_max_lanes(struct cal_ctx *ctx)
+{
+	struct cal_dev *dev = ctx->dev;
+	u32 phy_id = ctx->csi2_port - 1;
+
+	return dev->data->csi2_phy_core[phy_id].num_lanes;
+}
+
+static u32 cal_data_get_num_csi2_phy(struct cal_dev *dev)
+{
+	return dev->data->num_csi2_phy;
+}
+
+static int cal_camerarx_regmap_init(struct cal_dev *dev)
+{
+	struct reg_field *field;
+	struct cal_csi2_phy *phy;
+	int i, j;
+
+	if (!dev->data)
+		return -EINVAL;
+
+	for (i = 0; i < cal_data_get_num_csi2_phy(dev); i++) {
+		phy = &dev->data->csi2_phy_core[i];
+		for (j = 0; j < F_MAX_FIELDS; j++) {
+			field = &phy->base_fields[j];
+			/*
+			 * Here we update the reg offset with the
+			 * value found in DT
+			 */
+			field->reg = dev->syscon_camerrx_offset;
+			phy->fields[j] =
+				devm_regmap_field_alloc(&dev->pdev->dev,
+							dev->syscon_camerrx,
+							*field);
+			if (IS_ERR(phy->fields[j])) {
+				cal_err(dev, "Unable to allocate regmap fields\n");
+				return PTR_ERR(phy->fields[j]);
+			}
+		}
+	}
+	return 0;
+}
+
+static const struct regmap_config cal_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+};
+
+static struct regmap *cal_get_camerarx_regmap(struct cal_dev *dev)
 {
 	struct platform_device *pdev = dev->pdev;
-	struct cm_data *cm;
+	struct regmap *regmap;
+	void __iomem *base;
+	u32 reg_io_width;
+	struct regmap_config r_config = cal_regmap_config;
+	struct resource *res;
 
-	cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
-	if (!cm)
-		return ERR_PTR(-ENOMEM);
-
-	cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-						"camerrx_control");
-	cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
-	if (IS_ERR(cm->base)) {
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "camerrx_control");
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base)) {
 		cal_err(dev, "failed to ioremap\n");
-		return ERR_CAST(cm->base);
+		return ERR_CAST(base);
 	}
 
 	cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
-		cm->res->name, &cm->res->start, &cm->res->end);
+		res->name, &res->start, &res->end);
 
-	return cm;
+	reg_io_width = 4;
+	r_config.reg_stride = reg_io_width;
+	r_config.val_bits = reg_io_width * 8;
+	r_config.max_register = resource_size(res) - reg_io_width;
+
+	regmap = regmap_init_mmio(NULL, base, &r_config);
+	if (IS_ERR(regmap))
+		pr_err("regmap init failed\n");
+
+	return regmap;
 }
 
+/*
+ * Control Module CAMERARX block access
+ */
 static void camerarx_phy_enable(struct cal_ctx *ctx)
 {
-	u32 val;
+	struct cal_csi2_phy *phy;
+	u32 phy_id = ctx->csi2_port - 1;
+	u32 max_lanes;
 
-	if (!ctx->dev->cm->base) {
-		ctx_err(ctx, "cm not mapped\n");
-		return;
-	}
-
-	val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
-	if (ctx->csi2_port == 1) {
-		set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
-		set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
-		/* enable all lanes by default */
-		set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
-		set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
-	} else if (ctx->csi2_port == 2) {
-		set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
-		set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
-		/* enable all lanes by default */
-		set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
-		set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
-	}
-	reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+	phy = &ctx->dev->data->csi2_phy_core[phy_id];
+	regmap_field_write(phy->fields[F_CAMMODE], 0);
+	/* Always enable all lanes at the phy control level */
+	max_lanes = (1 << cal_data_get_phy_max_lanes(ctx)) - 1;
+	regmap_field_write(phy->fields[F_LANEENABLE], max_lanes);
+	/* F_CSI_MODE is not present on every architecture */
+	if (phy->fields[F_CSI_MODE])
+		regmap_field_write(phy->fields[F_CSI_MODE], 1);
+	regmap_field_write(phy->fields[F_CTRLCLKEN], 1);
 }
 
 static void camerarx_phy_disable(struct cal_ctx *ctx)
 {
-	u32 val;
+	struct cal_csi2_phy *phy;
+	u32 phy_id = ctx->csi2_port - 1;
 
-	if (!ctx->dev->cm->base) {
-		ctx_err(ctx, "cm not mapped\n");
-		return;
-	}
-
-	val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
-	if (ctx->csi2_port == 1)
-		set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
-	else if (ctx->csi2_port == 2)
-		set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
-	reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+	phy = &ctx->dev->data->csi2_phy_core[phy_id];
+	regmap_field_write(phy->fields[F_CTRLCLKEN], 0);
 }
 
 /*
@@ -474,9 +622,52 @@ static void cal_get_hwinfo(struct cal_dev *dev)
 		hwinfo);
 }
 
-static inline int cal_runtime_get(struct cal_dev *dev)
+/*
+ *   Errata i913: CSI2 LDO Needs to be disabled when module is powered on
+ *
+ *   Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2
+ *   LDOs on the device are disabled if CSI-2 module is powered on
+ *   (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304
+ *   | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high
+ *   current draw on the module supply in active mode.
+ *
+ *   Errata does not apply when CSI-2 module is powered off
+ *   (0x4845 B304 | 0x4845 B384 [28:27] = 0x0).
+ *
+ * SW Workaround:
+ *	Set the following register bits to disable the LDO,
+ *	which is essentially CSI2 REG10 bit 6:
+ *
+ *		Core 0:  0x4845 B828 = 0x0000 0040
+ *		Core 1:  0x4845 B928 = 0x0000 0040
+ */
+static void i913_errata(struct cal_dev *dev, unsigned int port)
 {
-	return pm_runtime_get_sync(&dev->pdev->dev);
+	u32 reg10 = reg_read(dev->cc[port], CAL_CSI2_PHY_REG10);
+
+	set_field(&reg10, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
+		  CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK);
+
+	cal_dbg(1, dev, "CSI2_%d_REG10 = 0x%08x\n", port, reg10);
+	reg_write(dev->cc[port], CAL_CSI2_PHY_REG10, reg10);
+}
+
+static int cal_runtime_get(struct cal_dev *dev)
+{
+	int r;
+
+	r = pm_runtime_get_sync(&dev->pdev->dev);
+
+	if (dev->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) {
+		/*
+		 * Apply errata on both port eveytime we (re-)enable
+		 * the clock
+		 */
+		i913_errata(dev, 0);
+		i913_errata(dev, 1);
+	}
+
+	return r;
 }
 
 static inline void cal_runtime_put(struct cal_dev *dev)
@@ -508,12 +699,6 @@ static void cal_quickdump_regs(struct cal_dev *dev)
 			       resource_size(dev->ctx[1]->cc->res),
 			       false);
 	}
-
-	cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
-		 &dev->cm->res->start);
-	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
-		       (__force const void *)dev->cm->base,
-		       resource_size(dev->cm->res), false);
 }
 
 /*
@@ -551,29 +736,76 @@ static void disable_irqs(struct cal_ctx *ctx)
 	reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
 }
 
-static void csi2_init(struct cal_ctx *ctx)
+static void csi2_phy_config(struct cal_ctx *ctx);
+
+static void csi2_phy_init(struct cal_ctx *ctx)
 {
 	int i;
 	u32 val;
 
+	/* Steps
+	 *  1. Configure D-PHY mode and enable required lanes
+	 *  2. Reset complex IO - Wait for completion of reset
+	 *          Note if the external sensor is not sending byte clock,
+	 *          the reset will timeout
+	 *  3 Program Stop States
+	 *      A. Program THS_TERM, THS_SETTLE, etc... Timings parameters
+	 *              in terms of DDR clock periods
+	 *      B. Enable stop state transition timeouts
+	 *  4.Force FORCERXMODE
+	 *      D. Enable pull down using pad control
+	 *      E. Power up PHY
+	 *      F. Wait for power up completion
+	 *      G. Wait for all enabled lane to reach stop state
+	 *      H. Disable pull down using pad control
+	 */
+
+	/* 1. Configure D-PHY mode and enable required lanes */
+	camerarx_phy_enable(ctx);
+
+	/* 2. Reset complex IO - Do not wait for reset completion */
+	val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+	set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
+		  CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
+	reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+	ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x De-assert Complex IO Reset\n",
+		ctx->csi2_port,
+		reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
+
+	/* Dummy read to allow SCP to complete */
+	val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+
+	/* 3.A. Program Phy Timing Parameters */
+	csi2_phy_config(ctx);
+
+	/* 3.B. Program Stop States */
 	val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
 	set_field(&val, CAL_GEN_ENABLE,
-		  CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
-	set_field(&val, CAL_GEN_ENABLE,
 		  CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
 	set_field(&val, CAL_GEN_DISABLE,
 		  CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
 	set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
 	reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
-	ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
+	ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Stop States\n",
+		ctx->csi2_port,
 		reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
 
+	/* 4. Force FORCERXMODE */
+	val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
+	set_field(&val, CAL_GEN_ENABLE,
+		  CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
+	reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
+	ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Force RXMODE\n",
+		ctx->csi2_port,
+		reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
+
+	/* E. Power up the PHY using the complex IO */
 	val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
-	set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
-		  CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
 	set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
 		  CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
 	reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+
+	/* F. Wait for power up completion */
 	for (i = 0; i < 10; i++) {
 		if (reg_read_field(ctx->dev,
 				   CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
@@ -582,18 +814,104 @@ static void csi2_init(struct cal_ctx *ctx)
 			break;
 		usleep_range(1000, 1100);
 	}
-	ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
-		reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
+	ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Powered UP %s\n",
+		ctx->csi2_port,
+		reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)),
+		(i >= 10) ? "(timeout)" : "");
+}
 
-	val = reg_read(ctx->dev, CAL_CTRL);
-	set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
-	set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
-	set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
-		  CAL_CTRL_POSTED_WRITES_MASK);
-	set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
-	set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
-	reg_write(ctx->dev, CAL_CTRL, val);
-	ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
+static void csi2_wait_for_phy(struct cal_ctx *ctx)
+{
+	int i;
+
+	/* Steps
+	 *  2. Wait for completion of reset
+	 *          Note if the external sensor is not sending byte clock,
+	 *          the reset will timeout
+	 *  4.Force FORCERXMODE
+	 *      G. Wait for all enabled lane to reach stop state
+	 *      H. Disable pull down using pad control
+	 */
+
+	/* 2. Wait for reset completion */
+	for (i = 0; i < 250; i++) {
+		if (reg_read_field(ctx->dev,
+				   CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+				   CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) ==
+		    CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED)
+			break;
+		usleep_range(1000, 1100);
+	}
+	ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO Reset Done (%d) %s\n",
+		ctx->csi2_port,
+		reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)), i,
+		(i >= 250) ? "(timeout)" : "");
+
+	/* 4. G. Wait for all enabled lane to reach stop state */
+	for (i = 0; i < 10; i++) {
+		if (reg_read_field(ctx->dev,
+				   CAL_CSI2_TIMING(ctx->csi2_port),
+				   CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) ==
+		    CAL_GEN_DISABLE)
+			break;
+		usleep_range(1000, 1100);
+	}
+	ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Stop State Reached %s\n",
+		ctx->csi2_port,
+		reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)),
+		(i >= 10) ? "(timeout)" : "");
+
+	ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x (Bit(31,28) should be set!)\n",
+		(ctx->csi2_port - 1), reg_read(ctx->cc, CAL_CSI2_PHY_REG1));
+}
+
+static void csi2_phy_deinit(struct cal_ctx *ctx)
+{
+	int i;
+	u32 val;
+
+	/* Power down the PHY using the complex IO */
+	val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+	set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF,
+		  CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
+	reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+
+	/* Wait for power down completion */
+	for (i = 0; i < 10; i++) {
+		if (reg_read_field(ctx->dev,
+				   CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+				   CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
+		    CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF)
+			break;
+		usleep_range(1000, 1100);
+	}
+	ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Powered Down %s\n",
+		ctx->csi2_port,
+		reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)),
+		(i >= 10) ? "(timeout)" : "");
+
+	/* Assert Comple IO Reset */
+	val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+	set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL,
+		  CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
+	reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+
+	/* Wait for power down completion */
+	for (i = 0; i < 10; i++) {
+		if (reg_read_field(ctx->dev,
+				   CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+				   CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) ==
+		    CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING)
+			break;
+		usleep_range(1000, 1100);
+	}
+	ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO in Reset (%d) %s\n",
+		ctx->csi2_port,
+		reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)), i,
+		(i >= 10) ? "(timeout)" : "");
+
+	/* Disable the phy */
+	camerarx_phy_disable(ctx);
 }
 
 static void csi2_lane_config(struct cal_ctx *ctx)
@@ -665,13 +983,48 @@ static void csi2_ctx_config(struct cal_ctx *ctx)
 
 static void pix_proc_config(struct cal_ctx *ctx)
 {
-	u32 val;
+	u32 val, extract, pack;
+
+	switch (ctx->fmt->bpp) {
+	case 8:
+		extract = CAL_PIX_PROC_EXTRACT_B8;
+		pack = CAL_PIX_PROC_PACK_B8;
+		break;
+	case 10:
+		extract = CAL_PIX_PROC_EXTRACT_B10_MIPI;
+		pack = CAL_PIX_PROC_PACK_B16;
+		break;
+	case 12:
+		extract = CAL_PIX_PROC_EXTRACT_B12_MIPI;
+		pack = CAL_PIX_PROC_PACK_B16;
+		break;
+	case 16:
+		extract = CAL_PIX_PROC_EXTRACT_B16_LE;
+		pack = CAL_PIX_PROC_PACK_B16;
+		break;
+	default:
+		/*
+		 * If you see this warning then it means that you added
+		 * some new entry in the cal_formats[] array with a different
+		 * bit per pixel values then the one supported below.
+		 * Either add support for the new bpp value below or adjust
+		 * the new entry to use one of the value below.
+		 *
+		 * Instead of failing here just use 8 bpp as a default.
+		 */
+		dev_warn_once(&ctx->dev->pdev->dev,
+			      "%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n",
+			      __FILE__, __LINE__, __func__, ctx->fmt->bpp);
+		extract = CAL_PIX_PROC_EXTRACT_B8;
+		pack = CAL_PIX_PROC_PACK_B8;
+		break;
+	}
 
 	val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
-	set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
+	set_field(&val, extract, CAL_PIX_PROC_EXTRACT_MASK);
 	set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
 	set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
-	set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
+	set_field(&val, pack, CAL_PIX_PROC_PACK_MASK);
 	set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
 	set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
 	reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
@@ -680,12 +1033,13 @@ static void pix_proc_config(struct cal_ctx *ctx)
 }
 
 static void cal_wr_dma_config(struct cal_ctx *ctx,
-			      unsigned int width)
+			      unsigned int width, unsigned int height)
 {
 	u32 val;
 
 	val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
 	set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
+	set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
 	set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
 		  CAL_WR_DMA_CTRL_DTAG_MASK);
 	set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
@@ -720,6 +1074,16 @@ static void cal_wr_dma_config(struct cal_ctx *ctx,
 	reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
 	ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
 		reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
+
+	val = reg_read(ctx->dev, CAL_CTRL);
+	set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
+	set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
+	set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
+		  CAL_CTRL_POSTED_WRITES_MASK);
+	set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
+	set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
+	reg_write(ctx->dev, CAL_CTRL, val);
+	ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
 }
 
 static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
@@ -733,41 +1097,28 @@ static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
 #define TCLK_TERM	0
 #define TCLK_MISS	1
 #define TCLK_SETTLE	14
-#define THS_SETTLE	15
 
 static void csi2_phy_config(struct cal_ctx *ctx)
 {
 	unsigned int reg0, reg1;
 	unsigned int ths_term, ths_settle;
-	unsigned int ddrclkperiod_us;
+	unsigned int csi2_ddrclk_khz;
+	struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
+			&ctx->endpoint.bus.mipi_csi2;
+	u32 num_lanes = mipi_csi2->num_data_lanes;
 
-	/*
-	 * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
-	 */
-	ddrclkperiod_us = ctx->external_rate / 2000000;
-	ddrclkperiod_us = 1000000 / ddrclkperiod_us;
-	ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
+	/* DPHY timing configuration */
+	/* CSI-2 is DDR and we only count used lanes. */
+	csi2_ddrclk_khz = ctx->external_rate / 1000
+		/ (2 * num_lanes) * ctx->fmt->bpp;
+	ctx_dbg(1, ctx, "csi2_ddrclk_khz: %d\n", csi2_ddrclk_khz);
 
-	ths_term = 20000 / ddrclkperiod_us;
-	ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
+	/* THS_TERM: Programmed value = floor(20 ns/DDRClk period) */
+	ths_term = 20 * csi2_ddrclk_khz / 1000000;
 	ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
 
-	/*
-	 * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
-	 *	Since CtrlClk is fixed at 96Mhz then we get
-	 *	ths_settle = floor(176.3 / 10.416) - 1 = 15
-	 * If we ever switch to a dynamic clock then this code might be useful
-	 *
-	 * unsigned int ctrlclkperiod_us;
-	 * ctrlclkperiod_us = 96000000 / 1000000;
-	 * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
-	 * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
-
-	 * ths_settle = 176300  / ctrlclkperiod_us;
-	 * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
-	 */
-
-	ths_settle = THS_SETTLE;
+	/* THS_SETTLE: Programmed value = floor(105 ns/DDRClk period) + 4 */
+	ths_settle = (105 * csi2_ddrclk_khz / 1000000) + 4;
 	ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
 
 	reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
@@ -979,15 +1330,25 @@ static int cal_calc_format_size(struct cal_ctx *ctx,
 				const struct cal_fmt *fmt,
 				struct v4l2_format *f)
 {
+	u32 bpl, max_width;
+
 	if (!fmt) {
 		ctx_dbg(3, ctx, "No cal_fmt provided!\n");
 		return -EINVAL;
 	}
 
-	v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
-			      &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
-	f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
-						 fmt->depth >> 3);
+	/*
+	 * Maximum width is bound by the DMA max width in bytes.
+	 * We need to recalculate the actual maxi width depending on the
+	 * number of bytes per pixels required.
+	 */
+	max_width = MAX_WIDTH_BYTES / (ALIGN(fmt->bpp, 8) >> 3);
+	v4l_bound_align_image(&f->fmt.pix.width, 48, max_width, 2,
+			      &f->fmt.pix.height, 32, MAX_HEIGHT_LINES, 0, 0);
+
+	bpl = (f->fmt.pix.width * ALIGN(fmt->bpp, 8)) >> 3;
+	f->fmt.pix.bytesperline = ALIGN(bpl, 16);
+
 	f->fmt.pix.sizeimage = f->fmt.pix.height *
 			       f->fmt.pix.bytesperline;
 
@@ -1132,6 +1493,7 @@ static int cal_enum_framesizes(struct file *file, void *fh,
 	fse.index = fsize->index;
 	fse.pad = 0;
 	fse.code = fmt->code;
+	fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 
 	ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
 	if (ret)
@@ -1299,36 +1661,50 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
 	if (ret < 0)
 		goto err;
 
+	ret = v4l2_subdev_call(ctx->sensor, core, s_power, 1);
+	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
+		ctx_err(ctx, "power on failed in subdev\n");
+		goto err;
+	}
+
 	cal_runtime_get(ctx->dev);
 
-	enable_irqs(ctx);
-	camerarx_phy_enable(ctx);
-	csi2_init(ctx);
-	csi2_phy_config(ctx);
-	csi2_lane_config(ctx);
 	csi2_ctx_config(ctx);
 	pix_proc_config(ctx);
-	cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
-	cal_wr_dma_addr(ctx, addr);
-	csi2_ppi_enable(ctx);
+	cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
+			  ctx->v_fmt.fmt.pix.height);
+	csi2_lane_config(ctx);
+
+	enable_irqs(ctx);
+	csi2_phy_init(ctx);
 
 	ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
 	if (ret) {
+		v4l2_subdev_call(ctx->sensor, core, s_power, 0);
 		ctx_err(ctx, "stream on failed in subdev\n");
 		cal_runtime_put(ctx->dev);
 		goto err;
 	}
 
+	csi2_wait_for_phy(ctx);
+	cal_wr_dma_addr(ctx, addr);
+	csi2_ppi_enable(ctx);
+
 	if (debug >= 4)
 		cal_quickdump_regs(ctx->dev);
 
 	return 0;
 
 err:
+	spin_lock_irqsave(&ctx->slock, flags);
+	vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+	ctx->cur_frm = NULL;
+	ctx->next_frm = NULL;
 	list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
 		list_del(&buf->list);
 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
 	}
+	spin_unlock_irqrestore(&ctx->slock, flags);
 	return ret;
 }
 
@@ -1338,12 +1714,18 @@ static void cal_stop_streaming(struct vb2_queue *vq)
 	struct cal_dmaqueue *dma_q = &ctx->vidq;
 	struct cal_buffer *buf, *tmp;
 	unsigned long flags;
+	int ret;
+
+	csi2_ppi_disable(ctx);
+	disable_irqs(ctx);
+	csi2_phy_deinit(ctx);
 
 	if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
 		ctx_err(ctx, "stream off failed in subdev\n");
 
-	csi2_ppi_disable(ctx);
-	disable_irqs(ctx);
+	ret = v4l2_subdev_call(ctx->sensor, core, s_power, 0);
+	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+		ctx_err(ctx, "power off failed in subdev\n");
 
 	/* Release all active buffers */
 	spin_lock_irqsave(&ctx->slock, flags);
@@ -1399,6 +1781,7 @@ static const struct v4l2_ioctl_ops cal_ioctl_ops = {
 	.vidioc_querybuf      = vb2_ioctl_querybuf,
 	.vidioc_qbuf          = vb2_ioctl_qbuf,
 	.vidioc_dqbuf         = vb2_ioctl_dqbuf,
+	.vidioc_expbuf        = vb2_ioctl_expbuf,
 	.vidioc_enum_input    = cal_enum_input,
 	.vidioc_g_input       = cal_g_input,
 	.vidioc_s_input       = cal_s_input,
@@ -1451,6 +1834,7 @@ static int cal_async_bound(struct v4l2_async_notifier *notifier,
 
 		memset(&mbus_code, 0, sizeof(mbus_code));
 		mbus_code.index = j;
+		mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 		ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
 				       NULL, &mbus_code);
 		if (ret)
@@ -1804,10 +2188,15 @@ static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
 	return NULL;
 }
 
+static const struct of_device_id cal_of_match[];
+
 static int cal_probe(struct platform_device *pdev)
 {
 	struct cal_dev *dev;
 	struct cal_ctx *ctx;
+	struct device_node *parent = pdev->dev.of_node;
+	struct regmap *syscon_camerrx = NULL;
+	u32 syscon_camerrx_offset = 0;
 	int ret;
 	int irq;
 	int i;
@@ -1816,6 +2205,14 @@ static int cal_probe(struct platform_device *pdev)
 	if (!dev)
 		return -ENOMEM;
 
+	dev->data = of_device_get_match_data(&pdev->dev);
+	if (!dev->data) {
+		dev_err(&pdev->dev, "Could not get feature data based on compatible version\n");
+		return -ENODEV;
+	}
+
+	dev->flags = dev->data->flags;
+
 	/* set pseudo v4l2 device name so we can use v4l2_printk */
 	strscpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
 		sizeof(dev->v4l2_dev.name));
@@ -1823,6 +2220,38 @@ static int cal_probe(struct platform_device *pdev)
 	/* save pdev pointer */
 	dev->pdev = pdev;
 
+	syscon_camerrx = syscon_regmap_lookup_by_phandle(parent,
+							 "ti,camerrx-control");
+	ret = of_property_read_u32_index(parent, "ti,camerrx-control", 1,
+					 &syscon_camerrx_offset);
+	if (IS_ERR(syscon_camerrx))
+		ret = PTR_ERR(syscon_camerrx);
+	if (ret) {
+		dev_warn(&pdev->dev, "failed to get ti,camerrx-control: %d\n",
+			 ret);
+
+		/*
+		 * Backward DTS compatibility.
+		 * If syscon entry is not present then check if the
+		 * camerrx_control resource is present.
+		 */
+		syscon_camerrx = cal_get_camerarx_regmap(dev);
+		if (IS_ERR(syscon_camerrx)) {
+			dev_err(&pdev->dev, "failed to get camerrx_control regmap\n");
+			return PTR_ERR(syscon_camerrx);
+		}
+		/* In this case the base already point to the direct
+		 * CM register so no need for an offset
+		 */
+		syscon_camerrx_offset = 0;
+	}
+
+	dev->syscon_camerrx = syscon_camerrx;
+	dev->syscon_camerrx_offset = syscon_camerrx_offset;
+	ret = cal_camerarx_regmap_init(dev);
+	if (ret)
+		return ret;
+
 	dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"cal_top");
 	dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
@@ -1841,23 +2270,24 @@ static int cal_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, dev);
 
-	dev->cm = cm_create(dev);
-	if (IS_ERR(dev->cm))
-		return PTR_ERR(dev->cm);
-
 	dev->cc[0] = cc_create(dev, 0);
 	if (IS_ERR(dev->cc[0]))
 		return PTR_ERR(dev->cc[0]);
 
-	dev->cc[1] = cc_create(dev, 1);
-	if (IS_ERR(dev->cc[1]))
-		return PTR_ERR(dev->cc[1]);
+	if (cal_data_get_num_csi2_phy(dev) > 1) {
+		dev->cc[1] = cc_create(dev, 1);
+		if (IS_ERR(dev->cc[1]))
+			return PTR_ERR(dev->cc[1]);
+	} else {
+		dev->cc[1] = NULL;
+	}
 
 	dev->ctx[0] = NULL;
 	dev->ctx[1] = NULL;
 
 	dev->ctx[0] = cal_create_instance(dev, 0);
-	dev->ctx[1] = cal_create_instance(dev, 1);
+	if (cal_data_get_num_csi2_phy(dev) > 1)
+		dev->ctx[1] = cal_create_instance(dev, 1);
 	if (!dev->ctx[0] && !dev->ctx[1]) {
 		cal_err(dev, "Neither port is configured, no point in staying up\n");
 		return -ENODEV;
@@ -1924,7 +2354,22 @@ static int cal_remove(struct platform_device *pdev)
 
 #if defined(CONFIG_OF)
 static const struct of_device_id cal_of_match[] = {
-	{ .compatible = "ti,dra72-cal", },
+	{
+		.compatible = "ti,dra72-cal",
+		.data = (void *)&dra72x_cal_data,
+	},
+	{
+		.compatible = "ti,dra72-pre-es2-cal",
+		.data = (void *)&dra72x_es1_cal_data,
+	},
+	{
+		.compatible = "ti,dra76-cal",
+		.data = (void *)&dra76x_cal_data,
+	},
+	{
+		.compatible = "ti,am654-cal",
+		.data = (void *)&am654_cal_data,
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, cal_of_match);
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti-vpe/cal_regs.h
index 68cfc92..0b76d11 100644
--- a/drivers/media/platform/ti-vpe/cal_regs.h
+++ b/drivers/media/platform/ti-vpe/cal_regs.h
@@ -10,6 +10,30 @@
 #ifndef __TI_CAL_REGS_H
 #define __TI_CAL_REGS_H
 
+/*
+ * struct cal_dev.flags possibilities
+ *
+ * DRA72_CAL_PRE_ES2_LDO_DISABLE:
+ *   Errata i913: CSI2 LDO Needs to be disabled when module is powered on
+ *
+ *   Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2
+ *   LDOs on the device are disabled if CSI-2 module is powered on
+ *   (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304
+ *   | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high
+ *   current draw on the module supply in active mode.
+ *
+ *   Errata does not apply when CSI-2 module is powered off
+ *   (0x4845 B304 | 0x4845 B384 [28:27] = 0x0).
+ *
+ * SW Workaround:
+ *	Set the following register bits to disable the LDO,
+ *	which is essentially CSI2 REG10 bit 6:
+ *
+ *		Core 0:  0x4845 B828 = 0x0000 0040
+ *		Core 1:  0x4845 B928 = 0x0000 0040
+ */
+#define DRA72_CAL_PRE_ES2_LDO_DISABLE BIT(0)
+
 #define CAL_NUM_CSI2_PORTS		2
 
 /* CAL register offsets */
@@ -71,6 +95,7 @@
 #define CAL_CSI2_PHY_REG0		0x000
 #define CAL_CSI2_PHY_REG1		0x004
 #define CAL_CSI2_PHY_REG2		0x008
+#define CAL_CSI2_PHY_REG10		0x028
 
 /* CAL Control Module Core Camerrx Control register offsets */
 #define CM_CTRL_CORE_CAMERRX_CONTROL	0x000
@@ -110,7 +135,7 @@
 #define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT		2
 #define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED		3
 
-#define CAL_HL_SYSCONFIG_SOFTRESET_MASK		BIT_MASK(0)
+#define CAL_HL_SYSCONFIG_SOFTRESET_MASK		BIT(0)
 #define CAL_HL_SYSCONFIG_SOFTRESET_DONE			0x0
 #define CAL_HL_SYSCONFIG_SOFTRESET_PENDING		0x1
 #define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION		0x0
@@ -121,11 +146,11 @@
 #define CAL_HL_SYSCONFIG_IDLEMODE_SMART1		2
 #define CAL_HL_SYSCONFIG_IDLEMODE_SMART2		3
 
-#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK		BIT_MASK(0)
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK		BIT(0)
 #define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0		0
 #define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0			0
 
-#define CAL_HL_IRQ_MASK(m)			BIT_MASK(m-1)
+#define CAL_HL_IRQ_MASK(m)			BIT((m) - 1)
 #define CAL_HL_IRQ_NOACTION				0x0
 #define CAL_HL_IRQ_ENABLE				0x1
 #define CAL_HL_IRQ_CLEAR				0x1
@@ -133,7 +158,7 @@
 #define CAL_HL_IRQ_ENABLED				0x1
 #define CAL_HL_IRQ_PENDING				0x1
 
-#define CAL_PIX_PROC_EN_MASK			BIT_MASK(0)
+#define CAL_PIX_PROC_EN_MASK			BIT(0)
 #define CAL_PIX_PROC_EXTRACT_MASK		GENMASK(4, 1)
 #define CAL_PIX_PROC_EXTRACT_B6				0x0
 #define CAL_PIX_PROC_EXTRACT_B7				0x1
@@ -179,7 +204,7 @@
 #define CAL_PIX_PROC_PACK_ARGB				0x6
 #define CAL_PIX_PROC_CPORT_MASK			GENMASK(23, 19)
 
-#define CAL_CTRL_POSTED_WRITES_MASK		BIT_MASK(0)
+#define CAL_CTRL_POSTED_WRITES_MASK		BIT(0)
 #define CAL_CTRL_POSTED_WRITES_NONPOSTED		0
 #define CAL_CTRL_POSTED_WRITES				1
 #define CAL_CTRL_TAGCNT_MASK			GENMASK(4, 1)
@@ -190,10 +215,10 @@
 #define CAL_CTRL_BURSTSIZE_BURST128			0x3
 #define CAL_CTRL_LL_FORCE_STATE_MASK		GENMASK(12, 7)
 #define CAL_CTRL_MFLAGL_MASK			GENMASK(20, 13)
-#define CAL_CTRL_PWRSCPCLK_MASK			BIT_MASK(21)
+#define CAL_CTRL_PWRSCPCLK_MASK			BIT(21)
 #define CAL_CTRL_PWRSCPCLK_AUTO				0
 #define CAL_CTRL_PWRSCPCLK_FORCE			1
-#define CAL_CTRL_RD_DMA_STALL_MASK		BIT_MASK(22)
+#define CAL_CTRL_RD_DMA_STALL_MASK		BIT(22)
 #define CAL_CTRL_MFLAGH_MASK			GENMASK(31, 24)
 
 #define CAL_CTRL1_PPI_GROUPING_MASK		GENMASK(1, 0)
@@ -218,18 +243,18 @@
 #define CAL_VPORT_CTRL1_PCLK_MASK		GENMASK(16, 0)
 #define CAL_VPORT_CTRL1_XBLK_MASK		GENMASK(24, 17)
 #define CAL_VPORT_CTRL1_YBLK_MASK		GENMASK(30, 25)
-#define CAL_VPORT_CTRL1_WIDTH_MASK		BIT_MASK(31)
+#define CAL_VPORT_CTRL1_WIDTH_MASK		BIT(31)
 #define CAL_VPORT_CTRL1_WIDTH_ONE			0
 #define CAL_VPORT_CTRL1_WIDTH_TWO			1
 
 #define CAL_VPORT_CTRL2_CPORT_MASK		GENMASK(4, 0)
-#define CAL_VPORT_CTRL2_FREERUNNING_MASK	BIT_MASK(15)
+#define CAL_VPORT_CTRL2_FREERUNNING_MASK	BIT(15)
 #define CAL_VPORT_CTRL2_FREERUNNING_GATED		0
 #define CAL_VPORT_CTRL2_FREERUNNING_FREE		1
-#define CAL_VPORT_CTRL2_FS_RESETS_MASK		BIT_MASK(16)
+#define CAL_VPORT_CTRL2_FS_RESETS_MASK		BIT(16)
 #define CAL_VPORT_CTRL2_FS_RESETS_NO			0
 #define CAL_VPORT_CTRL2_FS_RESETS_YES			1
-#define CAL_VPORT_CTRL2_FSM_RESET_MASK		BIT_MASK(17)
+#define CAL_VPORT_CTRL2_FSM_RESET_MASK		BIT(17)
 #define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT		0
 #define CAL_VPORT_CTRL2_FSM_RESET			1
 #define CAL_VPORT_CTRL2_RDY_THR_MASK		GENMASK(31, 18)
@@ -237,23 +262,23 @@
 #define CAL_BYS_CTRL1_PCLK_MASK			GENMASK(16, 0)
 #define CAL_BYS_CTRL1_XBLK_MASK			GENMASK(24, 17)
 #define CAL_BYS_CTRL1_YBLK_MASK			GENMASK(30, 25)
-#define CAL_BYS_CTRL1_BYSINEN_MASK		BIT_MASK(31)
+#define CAL_BYS_CTRL1_BYSINEN_MASK		BIT(31)
 
 #define CAL_BYS_CTRL2_CPORTIN_MASK		GENMASK(4, 0)
 #define CAL_BYS_CTRL2_CPORTOUT_MASK		GENMASK(9, 5)
-#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK	BIT_MASK(10)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK	BIT(10)
 #define CAL_BYS_CTRL2_DUPLICATEDDATA_NO			0
 #define CAL_BYS_CTRL2_DUPLICATEDDATA_YES		1
-#define CAL_BYS_CTRL2_FREERUNNING_MASK		BIT_MASK(11)
+#define CAL_BYS_CTRL2_FREERUNNING_MASK		BIT(11)
 #define CAL_BYS_CTRL2_FREERUNNING_NO			0
 #define CAL_BYS_CTRL2_FREERUNNING_YES			1
 
-#define CAL_RD_DMA_CTRL_GO_MASK			BIT_MASK(0)
+#define CAL_RD_DMA_CTRL_GO_MASK			BIT(0)
 #define CAL_RD_DMA_CTRL_GO_DIS				0
 #define CAL_RD_DMA_CTRL_GO_EN				1
 #define CAL_RD_DMA_CTRL_GO_IDLE				0
 #define CAL_RD_DMA_CTRL_GO_BUSY				1
-#define CAL_RD_DMA_CTRL_INIT_MASK		BIT_MASK(1)
+#define CAL_RD_DMA_CTRL_INIT_MASK		BIT(1)
 #define CAL_RD_DMA_CTRL_BW_LIMITER_MASK		GENMASK(10, 2)
 #define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK	GENMASK(14, 11)
 #define CAL_RD_DMA_CTRL_PCLK_MASK		GENMASK(31, 15)
@@ -277,13 +302,13 @@
 #define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN		3
 #define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR		4
 #define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED		5
-#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK	BIT_MASK(3)
+#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK	BIT(3)
 #define CAL_RD_DMA_CTRL2_PATTERN_MASK		GENMASK(5, 4)
 #define CAL_RD_DMA_CTRL2_PATTERN_LINEAR			0
 #define CAL_RD_DMA_CTRL2_PATTERN_YUV420			1
 #define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2		2
 #define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4		3
-#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK	BIT_MASK(6)
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK	BIT(6)
 #define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING	0
 #define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT	1
 #define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK		GENMASK(29, 16)
@@ -300,7 +325,7 @@
 #define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2		2
 #define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4		3
 #define CAL_WR_DMA_CTRL_PATTERN_RESERVED		1
-#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK		BIT_MASK(5)
+#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK		BIT(5)
 #define CAL_WR_DMA_CTRL_DTAG_MASK		GENMASK(8, 6)
 #define CAL_WR_DMA_CTRL_DTAG_ATT_HDR			0
 #define CAL_WR_DMA_CTRL_DTAG_ATT_DAT			1
@@ -311,7 +336,7 @@
 #define CAL_WR_DMA_CTRL_DTAG_D6				6
 #define CAL_WR_DMA_CTRL_DTAG_D7				7
 #define CAL_WR_DMA_CTRL_CPORT_MASK		GENMASK(13, 9)
-#define CAL_WR_DMA_CTRL_STALL_RD_MASK		BIT_MASK(14)
+#define CAL_WR_DMA_CTRL_STALL_RD_MASK		BIT(14)
 #define CAL_WR_DMA_CTRL_YSIZE_MASK		GENMASK(31, 18)
 
 #define CAL_WR_DMA_ADDR_MASK			GENMASK(31, 4)
@@ -327,9 +352,9 @@
 #define CAL_WR_DMA_XSIZE_XSKIP_MASK		GENMASK(15, 3)
 #define CAL_WR_DMA_XSIZE_MASK			GENMASK(31, 19)
 
-#define CAL_CSI2_PPI_CTRL_IF_EN_MASK		BIT_MASK(0)
-#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK		BIT_MASK(2)
-#define CAL_CSI2_PPI_CTRL_FRAME_MASK		BIT_MASK(3)
+#define CAL_CSI2_PPI_CTRL_IF_EN_MASK		BIT(0)
+#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK		BIT(2)
+#define CAL_CSI2_PPI_CTRL_FRAME_MASK		BIT(3)
 #define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE		0
 #define CAL_CSI2_PPI_CTRL_FRAME				1
 
@@ -340,18 +365,18 @@
 #define CAL_CSI2_COMPLEXIO_CFG_POSITION_2			2
 #define CAL_CSI2_COMPLEXIO_CFG_POSITION_1			1
 #define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED		0
-#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK		BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK		BIT(3)
 #define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS			0
 #define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS			1
 #define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK	GENMASK(6, 4)
-#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK		BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK		BIT(7)
 #define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK	GENMASK(10, 8)
-#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK		BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK		BIT(11)
 #define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK	GENMASK(14, 12)
-#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK		BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK		BIT(15)
 #define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK	GENMASK(18, 16)
-#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK		BIT_MASK(19)
-#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK		BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK		BIT(19)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK		BIT(24)
 #define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK		GENMASK(26, 25)
 #define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF		0
 #define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON		1
@@ -360,83 +385,83 @@
 #define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF		0
 #define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON			1
 #define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP		2
-#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK		BIT_MASK(29)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK		BIT(29)
 #define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED	1
 #define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING		0
-#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK		BIT_MASK(30)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK		BIT(30)
 #define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL			0
 #define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL		1
 
 #define CAL_CSI2_SHORT_PACKET_MASK	GENMASK(23, 0)
 
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK		BIT_MASK(0)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK		BIT_MASK(1)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK		BIT_MASK(2)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK		BIT_MASK(3)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK		BIT_MASK(4)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK	BIT_MASK(5)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK	BIT_MASK(6)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK	BIT_MASK(7)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK	BIT_MASK(8)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK	BIT_MASK(9)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK		BIT_MASK(10)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK		BIT_MASK(11)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK		BIT_MASK(12)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK		BIT_MASK(13)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK		BIT_MASK(14)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK		BIT_MASK(15)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK		BIT_MASK(16)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK		BIT_MASK(17)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK		BIT_MASK(18)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK		BIT_MASK(19)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK		BIT_MASK(20)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK		BIT_MASK(21)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK		BIT_MASK(22)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK		BIT_MASK(23)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK		BIT_MASK(24)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK	BIT_MASK(25)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK	BIT_MASK(26)
-#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK		BIT_MASK(27)
-#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK	BIT_MASK(28)
-#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK	BIT_MASK(30)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK		BIT(0)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK		BIT(1)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK		BIT(2)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK		BIT(3)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK		BIT(4)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK	BIT(5)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK	BIT(6)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK	BIT(7)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK	BIT(8)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK	BIT(9)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK		BIT(10)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK		BIT(11)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK		BIT(12)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK		BIT(13)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK		BIT(14)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK		BIT(15)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK		BIT(16)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK		BIT(17)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK		BIT(18)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK		BIT(19)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK		BIT(20)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK		BIT(21)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK		BIT(22)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK		BIT(23)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK		BIT(24)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK	BIT(25)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK	BIT(26)
+#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK		BIT(27)
+#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK	BIT(28)
+#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK	BIT(30)
 
 #define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK	GENMASK(12, 0)
-#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK		BIT_MASK(13)
-#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK		BIT_MASK(14)
-#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK		BIT_MASK(15)
+#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK		BIT(13)
+#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK		BIT(14)
+#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK		BIT(15)
 
-#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK			BIT_MASK(0)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK			BIT_MASK(1)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK			BIT_MASK(2)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK			BIT_MASK(3)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK			BIT_MASK(4)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK	BIT_MASK(5)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK			BIT_MASK(8)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK			BIT_MASK(9)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK			BIT_MASK(10)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK			BIT_MASK(11)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK			BIT_MASK(12)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK	BIT_MASK(13)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK			BIT_MASK(16)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK			BIT_MASK(17)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK			BIT_MASK(18)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK			BIT_MASK(19)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK			BIT_MASK(20)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK	BIT_MASK(21)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK			BIT_MASK(24)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK			BIT_MASK(25)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK			BIT_MASK(26)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK			BIT_MASK(27)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK			BIT_MASK(28)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK	BIT_MASK(29)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK			BIT(0)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK			BIT(1)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK			BIT(2)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK			BIT(3)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK			BIT(4)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK	BIT(5)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK			BIT(8)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK			BIT(9)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK			BIT(10)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK			BIT(11)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK			BIT(12)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK	BIT(13)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK			BIT(16)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK			BIT(17)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK			BIT(18)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK			BIT(19)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK			BIT(20)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK	BIT(21)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK			BIT(24)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK			BIT(25)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK			BIT(26)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK			BIT(27)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK			BIT(28)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK	BIT(29)
 
 #define CAL_CSI2_CTX_DT_MASK		GENMASK(5, 0)
 #define CAL_CSI2_CTX_VC_MASK		GENMASK(7, 6)
 #define CAL_CSI2_CTX_CPORT_MASK		GENMASK(12, 8)
-#define CAL_CSI2_CTX_ATT_MASK		BIT_MASK(13)
+#define CAL_CSI2_CTX_ATT_MASK		BIT(13)
 #define CAL_CSI2_CTX_ATT_PIX			0
 #define CAL_CSI2_CTX_ATT			1
-#define CAL_CSI2_CTX_PACK_MODE_MASK	BIT_MASK(14)
+#define CAL_CSI2_CTX_PACK_MODE_MASK	BIT(14)
 #define CAL_CSI2_CTX_PACK_MODE_LINE		0
 #define CAL_CSI2_CTX_PACK_MODE_FRAME		1
 #define CAL_CSI2_CTX_LINES_MASK		GENMASK(29, 16)
@@ -445,7 +470,7 @@
 
 #define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK	GENMASK(7, 0)
 #define CAL_CSI2_PHY_REG0_THS_TERM_MASK		GENMASK(15, 8)
-#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK	BIT_MASK(24)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK	BIT(24)
 #define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE		1
 #define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE		0
 
@@ -453,24 +478,26 @@
 #define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK		GENMASK(9, 8)
 #define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK		GENMASK(17, 10)
 #define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK			GENMASK(24, 18)
-#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK	BIT_MASK(25)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK	BIT(25)
 #define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR		1
 #define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS		0
 #define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK		GENMASK(29, 28)
 
+#define CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK		BIT(6)
+
 #define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK		GENMASK(23, 0)
 #define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK		GENMASK(25, 24)
 #define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK		GENMASK(27, 26)
 #define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK		GENMASK(29, 28)
 #define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK		GENMASK(31, 30)
 
-#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK			BIT_MASK(0)
+#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK			BIT(0)
 #define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK			GENMASK(2, 1)
 #define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK			GENMASK(4, 3)
-#define CM_CAMERRX_CTRL_CSI1_MODE_MASK				BIT_MASK(5)
-#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK			BIT_MASK(10)
+#define CM_CAMERRX_CTRL_CSI1_MODE_MASK				BIT(5)
+#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK			BIT(10)
 #define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK			GENMASK(12, 11)
 #define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK			GENMASK(16, 13)
-#define CM_CAMERRX_CTRL_CSI0_MODE_MASK				BIT_MASK(17)
+#define CM_CAMERRX_CTRL_CSI0_MODE_MASK				BIT(17)
 
 #endif
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index 834114a..f4e0cf7 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -149,36 +149,28 @@ void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
 	enum v4l2_quantization src_quantization, dst_quantization;
 	u32 src_pixelformat, dst_pixelformat;
 
-	switch (src_fmt->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-		pix = &src_fmt->fmt.pix;
-		src_pixelformat = pix->pixelformat;
-		src_ycbcr_enc = pix->ycbcr_enc;
-		src_quantization = pix->quantization;
-		break;
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-	default:
+	if (V4L2_TYPE_IS_MULTIPLANAR(src_fmt->type)) {
 		mp = &src_fmt->fmt.pix_mp;
 		src_pixelformat = mp->pixelformat;
 		src_ycbcr_enc = mp->ycbcr_enc;
 		src_quantization = mp->quantization;
-		break;
+	} else {
+		pix = &src_fmt->fmt.pix;
+		src_pixelformat = pix->pixelformat;
+		src_ycbcr_enc = pix->ycbcr_enc;
+		src_quantization = pix->quantization;
 	}
 
-	switch (dst_fmt->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-		pix = &dst_fmt->fmt.pix;
-		dst_pixelformat = pix->pixelformat;
-		dst_ycbcr_enc = pix->ycbcr_enc;
-		dst_quantization = pix->quantization;
-		break;
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-	default:
+	if (V4L2_TYPE_IS_MULTIPLANAR(dst_fmt->type)) {
 		mp = &dst_fmt->fmt.pix_mp;
 		dst_pixelformat = mp->pixelformat;
 		dst_ycbcr_enc = mp->ycbcr_enc;
 		dst_quantization = mp->quantization;
-		break;
+	} else {
+		pix = &dst_fmt->fmt.pix;
+		dst_pixelformat = pix->pixelformat;
+		dst_ycbcr_enc = pix->ycbcr_enc;
+		dst_quantization = pix->quantization;
 	}
 
 	src_finfo = v4l2_format_info(src_pixelformat);
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index 2f88a7d..e2e551b 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -8,6 +8,7 @@
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
 #include <linux/v4l2-mediabus.h>
+#include <media/v4l2-rect.h>
 #include <media/v4l2-subdev.h>
 
 #include "vimc-common.h"
@@ -18,6 +19,9 @@ MODULE_PARM_DESC(sca_mult, " the image size multiplier");
 
 #define MAX_ZOOM	8
 
+#define VIMC_SCA_FMT_WIDTH_DEFAULT  640
+#define VIMC_SCA_FMT_HEIGHT_DEFAULT 480
+
 struct vimc_sca_device {
 	struct vimc_ent_device ved;
 	struct v4l2_subdev sd;
@@ -25,6 +29,7 @@ struct vimc_sca_device {
 	 * with the width and hight multiplied by mult
 	 */
 	struct v4l2_mbus_framefmt sink_fmt;
+	struct v4l2_rect crop_rect;
 	/* Values calculated when the stream starts */
 	u8 *src_frame;
 	unsigned int src_line_size;
@@ -33,22 +38,64 @@ struct vimc_sca_device {
 };
 
 static const struct v4l2_mbus_framefmt sink_fmt_default = {
-	.width = 640,
-	.height = 480,
+	.width = VIMC_SCA_FMT_WIDTH_DEFAULT,
+	.height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
 	.code = MEDIA_BUS_FMT_RGB888_1X24,
 	.field = V4L2_FIELD_NONE,
 	.colorspace = V4L2_COLORSPACE_DEFAULT,
 };
 
+static const struct v4l2_rect crop_rect_default = {
+	.width = VIMC_SCA_FMT_WIDTH_DEFAULT,
+	.height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
+	.top = 0,
+	.left = 0,
+};
+
+static const struct v4l2_rect crop_rect_min = {
+	.width = VIMC_FRAME_MIN_WIDTH,
+	.height = VIMC_FRAME_MIN_HEIGHT,
+	.top = 0,
+	.left = 0,
+};
+
+static struct v4l2_rect
+vimc_sca_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
+{
+	/* Get the crop bounds to clamp the crop rectangle correctly */
+	struct v4l2_rect r = {
+		.left = 0,
+		.top = 0,
+		.width = sink_fmt->width,
+		.height = sink_fmt->height,
+	};
+	return r;
+}
+
+static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
+				      const struct v4l2_mbus_framefmt *sink_fmt)
+{
+	const struct v4l2_rect sink_rect =
+		vimc_sca_get_crop_bound_sink(sink_fmt);
+
+	/* Disallow rectangles smaller than the minimal one. */
+	v4l2_rect_set_min_size(r, &crop_rect_min);
+	v4l2_rect_map_inside(r, &sink_rect);
+}
+
 static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
 			     struct v4l2_subdev_pad_config *cfg)
 {
 	struct v4l2_mbus_framefmt *mf;
+	struct v4l2_rect *r;
 	unsigned int i;
 
 	mf = v4l2_subdev_get_try_format(sd, cfg, 0);
 	*mf = sink_fmt_default;
 
+	r = v4l2_subdev_get_try_crop(sd, cfg, 0);
+	*r = crop_rect_default;
+
 	for (i = 1; i < sd->entity.num_pads; i++) {
 		mf = v4l2_subdev_get_try_format(sd, cfg, i);
 		*mf = sink_fmt_default;
@@ -107,16 +154,21 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
 			    struct v4l2_subdev_format *format)
 {
 	struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+	struct v4l2_rect *crop_rect;
 
 	/* Get the current sink format */
-	format->format = (format->which == V4L2_SUBDEV_FORMAT_TRY) ?
-			 *v4l2_subdev_get_try_format(sd, cfg, 0) :
-			 vsca->sink_fmt;
+	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+		format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
+		crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+	} else {
+		format->format = vsca->sink_fmt;
+		crop_rect = &vsca->crop_rect;
+	}
 
 	/* Scale the frame size for the source pad */
 	if (VIMC_IS_SRC(format->pad)) {
-		format->format.width = vsca->sink_fmt.width * sca_mult;
-		format->format.height = vsca->sink_fmt.height * sca_mult;
+		format->format.width = crop_rect->width * sca_mult;
+		format->format.height = crop_rect->height * sca_mult;
 	}
 
 	return 0;
@@ -148,6 +200,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
 {
 	struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
 	struct v4l2_mbus_framefmt *sink_fmt;
+	struct v4l2_rect *crop_rect;
 
 	if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
 		/* Do not change the format while stream is on */
@@ -155,8 +208,10 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
 			return -EBUSY;
 
 		sink_fmt = &vsca->sink_fmt;
+		crop_rect = &vsca->crop_rect;
 	} else {
 		sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+		crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
 	}
 
 	/*
@@ -165,8 +220,8 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
 	 */
 	if (VIMC_IS_SRC(fmt->pad)) {
 		fmt->format = *sink_fmt;
-		fmt->format.width = sink_fmt->width * sca_mult;
-		fmt->format.height = sink_fmt->height * sca_mult;
+		fmt->format.width = crop_rect->width * sca_mult;
+		fmt->format.height = crop_rect->height * sca_mult;
 	} else {
 		/* Set the new format in the sink pad */
 		vimc_sca_adjust_sink_fmt(&fmt->format);
@@ -184,6 +239,78 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
 			fmt->format.xfer_func, fmt->format.ycbcr_enc);
 
 		*sink_fmt = fmt->format;
+
+		/* Do the crop, but respect the current bounds */
+		vimc_sca_adjust_sink_crop(crop_rect, sink_fmt);
+	}
+
+	return 0;
+}
+
+static int vimc_sca_get_selection(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_pad_config *cfg,
+				  struct v4l2_subdev_selection *sel)
+{
+	struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *sink_fmt;
+	struct v4l2_rect *crop_rect;
+
+	if (VIMC_IS_SRC(sel->pad))
+		return -EINVAL;
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+		sink_fmt = &vsca->sink_fmt;
+		crop_rect = &vsca->crop_rect;
+	} else {
+		sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+		crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+	}
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		sel->r = *crop_rect;
+		break;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r = vimc_sca_get_crop_bound_sink(sink_fmt);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vimc_sca_set_selection(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_pad_config *cfg,
+				  struct v4l2_subdev_selection *sel)
+{
+	struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *sink_fmt;
+	struct v4l2_rect *crop_rect;
+
+	if (VIMC_IS_SRC(sel->pad))
+		return -EINVAL;
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+		/* Do not change the format while stream is on */
+		if (vsca->src_frame)
+			return -EBUSY;
+
+		crop_rect = &vsca->crop_rect;
+		sink_fmt = &vsca->sink_fmt;
+	} else {
+		crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+		sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+	}
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		/* Do the crop, but respect the current bounds */
+		vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
+		*crop_rect = sel->r;
+		break;
+	default:
+		return -EINVAL;
 	}
 
 	return 0;
@@ -195,6 +322,8 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
 	.enum_frame_size	= vimc_sca_enum_frame_size,
 	.get_fmt		= vimc_sca_get_fmt,
 	.set_fmt		= vimc_sca_set_fmt,
+	.get_selection		= vimc_sca_get_selection,
+	.set_selection		= vimc_sca_set_selection,
 };
 
 static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
@@ -213,11 +342,11 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
 		vsca->bpp = vpix->bpp;
 
 		/* Calculate the width in bytes of the src frame */
-		vsca->src_line_size = vsca->sink_fmt.width *
+		vsca->src_line_size = vsca->crop_rect.width *
 				      sca_mult * vsca->bpp;
 
 		/* Calculate the frame size of the source pad */
-		frame_size = vsca->src_line_size * vsca->sink_fmt.height *
+		frame_size = vsca->src_line_size * vsca->crop_rect.height *
 			     sca_mult;
 
 		/* Allocate the frame buffer. Use vmalloc to be able to
@@ -259,9 +388,10 @@ static void vimc_sca_fill_pix(u8 *const ptr,
 }
 
 static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
-			       const unsigned int lin, const unsigned int col,
+			       unsigned int lin, unsigned int col,
 			       const u8 *const sink_frame)
 {
+	const struct v4l2_rect crop_rect = vsca->crop_rect;
 	unsigned int i, j, index;
 	const u8 *pixel;
 
@@ -278,8 +408,10 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
 	/* point to the place we are going to put the first pixel
 	 * in the scaled src frame
 	 */
+	lin -= crop_rect.top;
+	col -= crop_rect.left;
 	index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
-				 vsca->sink_fmt.width * sca_mult, vsca->bpp);
+				 crop_rect.width * sca_mult, vsca->bpp);
 
 	dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
 		vsca->sd.name, lin * sca_mult, col * sca_mult, index);
@@ -307,12 +439,13 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
 static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
 				    const u8 *const sink_frame)
 {
+	const struct v4l2_rect r = vsca->crop_rect;
 	unsigned int i, j;
 
 	/* Scale each pixel from the original sink frame */
 	/* TODO: implement scale down, only scale up is supported for now */
-	for (i = 0; i < vsca->sink_fmt.height; i++)
-		for (j = 0; j < vsca->sink_fmt.width; j++)
+	for (i = r.top; i < r.top + r.height; i++)
+		for (j = r.left; j < r.left + r.width; j++)
 			vimc_sca_scale_pix(vsca, i, j, sink_frame);
 }
 
@@ -384,5 +517,8 @@ struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
 	/* Initialize the frame format */
 	vsca->sink_fmt = sink_fmt_default;
 
+	/* Initialize the crop selection */
+	vsca->crop_rect = crop_rect_default;
+
 	return &vsca->ved;
 }
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index e8a50c5..b12ad015 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -3,7 +3,8 @@
 		vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
 		vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
 		vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
-		vivid-osd.o vivid-meta-cap.o vivid-meta-out.o
+		vivid-osd.o vivid-meta-cap.o vivid-meta-out.o \
+		vivid-kthread-touch.o vivid-touch-cap.o
 ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
   vivid-objs += vivid-cec.o
 endif
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index c184f9b..15091cb 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -39,6 +39,7 @@
 #include "vivid-ctrls.h"
 #include "vivid-meta-cap.h"
 #include "vivid-meta-out.h"
+#include "vivid-touch-cap.h"
 
 #define VIVID_MODULE_NAME "vivid"
 
@@ -89,6 +90,10 @@ static int meta_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
 module_param_array(meta_out_nr, int, NULL, 0444);
 MODULE_PARM_DESC(meta_out_nr, " videoX start number, -1 is autodetect");
 
+static int touch_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(touch_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(touch_cap_nr, " v4l-touchX start number, -1 is autodetect");
+
 static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
 module_param_array(ccs_cap_mode, int, NULL, 0444);
 MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n"
@@ -110,10 +115,10 @@ MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 cr
  * vbi-out + vid-out + meta-cap
  */
 static unsigned int node_types[VIVID_MAX_DEVS] = {
-	[0 ... (VIVID_MAX_DEVS - 1)] = 0x61d3d
+	[0 ... (VIVID_MAX_DEVS - 1)] = 0xe1d3d
 };
 module_param_array(node_types, uint, NULL, 0444);
-MODULE_PARM_DESC(node_types, " node types, default is 0x61d3d. Bitmask with the following meaning:\n"
+MODULE_PARM_DESC(node_types, " node types, default is 0xe1d3d. Bitmask with the following meaning:\n"
 			     "\t\t    bit 0: Video Capture node\n"
 			     "\t\t    bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
 			     "\t\t    bit 4: Radio Receiver node\n"
@@ -123,7 +128,8 @@ MODULE_PARM_DESC(node_types, " node types, default is 0x61d3d. Bitmask with the
 			     "\t\t    bit 12: Radio Transmitter node\n"
 			     "\t\t    bit 16: Framebuffer for testing overlays\n"
 			     "\t\t    bit 17: Metadata Capture node\n"
-			     "\t\t    bit 18: Metadata Output node\n");
+			     "\t\t    bit 18: Metadata Output node\n"
+			     "\t\t    bit 19: Touch Capture node\n");
 
 /* Default: 4 inputs */
 static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 };
@@ -223,7 +229,8 @@ static int vidioc_querycap(struct file *file, void  *priv,
 		dev->vbi_cap_caps | dev->vbi_out_caps |
 		dev->radio_rx_caps | dev->radio_tx_caps |
 		dev->sdr_cap_caps | dev->meta_cap_caps |
-		dev->meta_out_caps | V4L2_CAP_DEVICE_CAPS;
+		dev->meta_out_caps | dev->touch_cap_caps |
+		V4L2_CAP_DEVICE_CAPS;
 	return 0;
 }
 
@@ -377,6 +384,8 @@ static int vidioc_g_parm(struct file *file, void *fh,
 {
 	struct video_device *vdev = video_devdata(file);
 
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_g_parm_tch(file, fh, parm);
 	if (vdev->vfl_dir == VFL_DIR_RX)
 		return vivid_vid_cap_g_parm(file, fh, parm);
 	return vivid_vid_out_g_parm(file, fh, parm);
@@ -432,6 +441,104 @@ static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wa
 	return vivid_radio_tx_poll(file, wait);
 }
 
+static int vivid_enum_input(struct file *file, void *priv,
+			    struct v4l2_input *inp)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_enum_input_tch(file, priv, inp);
+	return vidioc_enum_input(file, priv, inp);
+}
+
+static int vivid_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_g_input_tch(file, priv, i);
+	return vidioc_g_input(file, priv, i);
+}
+
+static int vivid_s_input(struct file *file, void *priv, unsigned int i)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_s_input_tch(file, priv, i);
+	return vidioc_s_input(file, priv, i);
+}
+
+static int vivid_enum_fmt_cap(struct file *file, void  *priv,
+			      struct v4l2_fmtdesc *f)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_enum_fmt_tch(file, priv, f);
+	return vivid_enum_fmt_vid(file, priv, f);
+}
+
+static int vivid_g_fmt_cap(struct file *file, void *priv,
+			   struct v4l2_format *f)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_g_fmt_tch(file, priv, f);
+	return vidioc_g_fmt_vid_cap(file, priv, f);
+}
+
+static int vivid_try_fmt_cap(struct file *file, void *priv,
+			     struct v4l2_format *f)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_g_fmt_tch(file, priv, f);
+	return vidioc_try_fmt_vid_cap(file, priv, f);
+}
+
+static int vivid_s_fmt_cap(struct file *file, void *priv,
+			   struct v4l2_format *f)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_g_fmt_tch(file, priv, f);
+	return vidioc_s_fmt_vid_cap(file, priv, f);
+}
+
+static int vivid_g_fmt_cap_mplane(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_g_fmt_tch_mplane(file, priv, f);
+	return vidioc_g_fmt_vid_cap_mplane(file, priv, f);
+}
+
+static int vivid_try_fmt_cap_mplane(struct file *file, void *priv,
+				    struct v4l2_format *f)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_g_fmt_tch_mplane(file, priv, f);
+	return vidioc_try_fmt_vid_cap_mplane(file, priv, f);
+}
+
+static int vivid_s_fmt_cap_mplane(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vdev->vfl_type == VFL_TYPE_TOUCH)
+		return vivid_g_fmt_tch_mplane(file, priv, f);
+	return vidioc_s_fmt_vid_cap_mplane(file, priv, f);
+}
+
 static bool vivid_is_in_use(struct video_device *vdev)
 {
 	unsigned long flags;
@@ -453,7 +560,8 @@ static bool vivid_is_last_user(struct vivid_dev *dev)
 			vivid_is_in_use(&dev->radio_rx_dev) +
 			vivid_is_in_use(&dev->radio_tx_dev) +
 			vivid_is_in_use(&dev->meta_cap_dev) +
-			vivid_is_in_use(&dev->meta_out_dev);
+			vivid_is_in_use(&dev->meta_out_dev) +
+			vivid_is_in_use(&dev->touch_cap_dev);
 
 	return uses == 1;
 }
@@ -481,6 +589,7 @@ static int vivid_fop_release(struct file *file)
 		set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
 		set_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
 		set_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags);
+		set_bit(V4L2_FL_REGISTERED, &dev->touch_cap_dev.flags);
 	}
 	mutex_unlock(&dev->mutex);
 	if (file->private_data == dev->overlay_cap_owner)
@@ -522,13 +631,13 @@ static const struct v4l2_file_operations vivid_radio_fops = {
 static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
 	.vidioc_querycap		= vidioc_querycap,
 
-	.vidioc_enum_fmt_vid_cap	= vivid_enum_fmt_vid,
-	.vidioc_g_fmt_vid_cap		= vidioc_g_fmt_vid_cap,
-	.vidioc_try_fmt_vid_cap		= vidioc_try_fmt_vid_cap,
-	.vidioc_s_fmt_vid_cap		= vidioc_s_fmt_vid_cap,
-	.vidioc_g_fmt_vid_cap_mplane	= vidioc_g_fmt_vid_cap_mplane,
-	.vidioc_try_fmt_vid_cap_mplane	= vidioc_try_fmt_vid_cap_mplane,
-	.vidioc_s_fmt_vid_cap_mplane	= vidioc_s_fmt_vid_cap_mplane,
+	.vidioc_enum_fmt_vid_cap	= vivid_enum_fmt_cap,
+	.vidioc_g_fmt_vid_cap		= vivid_g_fmt_cap,
+	.vidioc_try_fmt_vid_cap		= vivid_try_fmt_cap,
+	.vidioc_s_fmt_vid_cap		= vivid_s_fmt_cap,
+	.vidioc_g_fmt_vid_cap_mplane	= vivid_g_fmt_cap_mplane,
+	.vidioc_try_fmt_vid_cap_mplane	= vivid_try_fmt_cap_mplane,
+	.vidioc_s_fmt_vid_cap_mplane	= vivid_s_fmt_cap_mplane,
 
 	.vidioc_enum_fmt_vid_out	= vivid_enum_fmt_vid,
 	.vidioc_g_fmt_vid_out		= vidioc_g_fmt_vid_out,
@@ -590,9 +699,9 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
 	.vidioc_streamon		= vb2_ioctl_streamon,
 	.vidioc_streamoff		= vb2_ioctl_streamoff,
 
-	.vidioc_enum_input		= vidioc_enum_input,
-	.vidioc_g_input			= vidioc_g_input,
-	.vidioc_s_input			= vidioc_s_input,
+	.vidioc_enum_input		= vivid_enum_input,
+	.vidioc_g_input			= vivid_g_input,
+	.vidioc_s_input			= vivid_s_input,
 	.vidioc_s_audio			= vidioc_s_audio,
 	.vidioc_g_audio			= vidioc_g_audio,
 	.vidioc_enumaudio		= vidioc_enumaudio,
@@ -921,6 +1030,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 			dev->has_scaler_out ? 'Y' : 'N');
 	}
 
+	/* do we create a touch capture device */
+	dev->has_touch_cap = node_type & 0x80000;
+
 	/* end detecting feature set */
 
 	if (dev->has_vid_cap) {
@@ -994,6 +1106,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 		if (dev->has_audio_outputs)
 			dev->meta_out_caps |= V4L2_CAP_AUDIO;
 	}
+	/* set up the capabilities of the touch capture device */
+	if (dev->has_touch_cap) {
+		dev->touch_cap_caps = V4L2_CAP_TOUCH | V4L2_CAP_STREAMING |
+				      V4L2_CAP_READWRITE;
+		dev->touch_cap_caps |= dev->multiplanar ?
+			V4L2_CAP_VIDEO_CAPTURE_MPLANE : V4L2_CAP_VIDEO_CAPTURE;
+	}
 
 	ret = -ENOMEM;
 	/* initialize the test pattern generator */
@@ -1129,6 +1248,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 	v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY);
 	v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_FREQUENCY);
 	v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_FREQUENCY);
+	v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_S_PARM);
+	v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMESIZES);
+	v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMEINTERVALS);
 
 	/* configure internal data */
 	dev->fmt_cap = &vivid_formats[0];
@@ -1201,6 +1323,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 	dev->fb_cap.fmt.bytesperline = dev->src_rect.width * tpg_g_twopixelsize(&dev->tpg, 0) / 2;
 	dev->fb_cap.fmt.sizeimage = dev->src_rect.height * dev->fb_cap.fmt.bytesperline;
 
+	/* update touch configuration */
+	dev->timeperframe_tch_cap.numerator = 1;
+	dev->timeperframe_tch_cap.denominator = 10;
+	vivid_set_touch(dev, 0);
+
 	/* initialize locks */
 	spin_lock_init(&dev->slock);
 	mutex_init(&dev->mutex);
@@ -1213,6 +1340,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 	INIT_LIST_HEAD(&dev->sdr_cap_active);
 	INIT_LIST_HEAD(&dev->meta_cap_active);
 	INIT_LIST_HEAD(&dev->meta_out_active);
+	INIT_LIST_HEAD(&dev->touch_cap_active);
 
 	INIT_LIST_HEAD(&dev->cec_work_list);
 	spin_lock_init(&dev->cec_slock);
@@ -1294,6 +1422,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 			goto unreg_dev;
 	}
 
+	if (dev->has_touch_cap) {
+		/* initialize touch_cap queue */
+		ret = vivid_create_queue(dev, &dev->vb_touch_cap_q,
+					 V4L2_BUF_TYPE_VIDEO_CAPTURE, 1,
+					 &vivid_touch_cap_qops);
+		if (ret)
+			goto unreg_dev;
+	}
+
 	if (dev->has_fb) {
 		/* Create framebuffer for testing capture/output overlay */
 		ret = vivid_fb_init(dev);
@@ -1345,6 +1482,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 	v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
 	v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap);
 	v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out);
+	v4l2_ctrl_handler_setup(&dev->ctrl_hdl_touch_cap);
 
 	/* finally start creating the device nodes */
 	if (dev->has_vid_cap) {
@@ -1635,6 +1773,35 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 			  video_device_node_name(vfd));
 	}
 
+	if (dev->has_touch_cap) {
+		vfd = &dev->touch_cap_dev;
+		snprintf(vfd->name, sizeof(vfd->name),
+			 "vivid-%03d-touch-cap", inst);
+		vfd->fops = &vivid_fops;
+		vfd->ioctl_ops = &vivid_ioctl_ops;
+		vfd->device_caps = dev->touch_cap_caps;
+		vfd->release = video_device_release_empty;
+		vfd->v4l2_dev = &dev->v4l2_dev;
+		vfd->queue = &dev->vb_touch_cap_q;
+		vfd->tvnorms = tvnorms_cap;
+		vfd->lock = &dev->mutex;
+		video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+		dev->touch_cap_pad.flags = MEDIA_PAD_FL_SINK;
+		ret = media_entity_pads_init(&vfd->entity, 1,
+					     &dev->touch_cap_pad);
+		if (ret)
+			goto unreg_dev;
+#endif
+		ret = video_register_device(vfd, VFL_TYPE_TOUCH,
+					    touch_cap_nr[inst]);
+		if (ret < 0)
+			goto unreg_dev;
+		v4l2_info(&dev->v4l2_dev,
+			  "V4L2 touch capture device registered as %s\n",
+			  video_device_node_name(vfd));
+	}
+
 #ifdef CONFIG_MEDIA_CONTROLLER
 	/* Register the media device */
 	ret = media_device_register(&dev->mdev);
@@ -1651,6 +1818,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 	return 0;
 
 unreg_dev:
+	video_unregister_device(&dev->touch_cap_dev);
 	video_unregister_device(&dev->meta_out_dev);
 	video_unregister_device(&dev->meta_cap_dev);
 	video_unregister_device(&dev->radio_tx_dev);
@@ -1778,6 +1946,11 @@ static int vivid_remove(struct platform_device *pdev)
 				  video_device_node_name(&dev->meta_out_dev));
 			video_unregister_device(&dev->meta_out_dev);
 		}
+		if (dev->has_touch_cap) {
+			v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+				  video_device_node_name(&dev->touch_cap_dev));
+			video_unregister_device(&dev->touch_cap_dev);
+		}
 		cec_unregister_adapter(dev->cec_rx_adap);
 		for (j = 0; j < MAX_OUTPUTS; j++)
 			cec_unregister_adapter(dev->cec_tx_adap[j]);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 59192b6..99e69b8 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -133,6 +133,7 @@ struct vivid_dev {
 	struct media_pad		sdr_cap_pad;
 	struct media_pad		meta_cap_pad;
 	struct media_pad		meta_out_pad;
+	struct media_pad		touch_cap_pad;
 #endif
 	struct v4l2_ctrl_handler	ctrl_hdl_user_gen;
 	struct v4l2_ctrl_handler	ctrl_hdl_user_vid;
@@ -159,6 +160,8 @@ struct vivid_dev {
 	struct v4l2_ctrl_handler	ctrl_hdl_meta_cap;
 	struct video_device		meta_out_dev;
 	struct v4l2_ctrl_handler	ctrl_hdl_meta_out;
+	struct video_device		touch_cap_dev;
+	struct v4l2_ctrl_handler	ctrl_hdl_touch_cap;
 
 	spinlock_t			slock;
 	struct mutex			mutex;
@@ -173,6 +176,7 @@ struct vivid_dev {
 	u32				radio_tx_caps;
 	u32				meta_cap_caps;
 	u32				meta_out_caps;
+	u32				touch_cap_caps;
 
 	/* supported features */
 	bool				multiplanar;
@@ -201,6 +205,7 @@ struct vivid_dev {
 	bool				has_meta_cap;
 	bool				has_meta_out;
 	bool				has_tv_tuner;
+	bool				has_touch_cap;
 
 	bool				can_loop_video;
 
@@ -404,6 +409,8 @@ struct vivid_dev {
 	struct list_head		vbi_cap_active;
 	struct vb2_queue		vb_meta_cap_q;
 	struct list_head		meta_cap_active;
+	struct vb2_queue		vb_touch_cap_q;
+	struct list_head		touch_cap_active;
 
 	/* thread for generating video capture stream */
 	struct task_struct		*kthread_vid_cap;
@@ -425,6 +432,19 @@ struct vivid_dev {
 	u32				meta_cap_seq_count;
 	bool				meta_cap_streaming;
 
+	/* Touch capture */
+	struct task_struct		*kthread_touch_cap;
+	unsigned long			jiffies_touch_cap;
+	u64				touch_cap_stream_start;
+	u32				touch_cap_seq_offset;
+	bool				touch_cap_seq_resync;
+	u32				touch_cap_seq_start;
+	u32				touch_cap_seq_count;
+	bool				touch_cap_streaming;
+	struct v4l2_fract		timeperframe_tch_cap;
+	struct v4l2_pix_format		tch_format;
+	int				tch_pat_random;
+
 	/* video output */
 	const struct vivid_fmt		*fmt_out;
 	struct v4l2_fract		timeperframe_vid_out;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 68e8124..3341305 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -1508,6 +1508,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
 	struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap;
 	struct v4l2_ctrl_handler *hdl_meta_cap = &dev->ctrl_hdl_meta_cap;
 	struct v4l2_ctrl_handler *hdl_meta_out = &dev->ctrl_hdl_meta_out;
+	struct v4l2_ctrl_handler *hdl_tch_cap = &dev->ctrl_hdl_touch_cap;
 
 	struct v4l2_ctrl_config vivid_ctrl_dv_timings = {
 		.ops = &vivid_vid_cap_ctrl_ops,
@@ -1551,6 +1552,8 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
 	v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_class, NULL);
 	v4l2_ctrl_handler_init(hdl_meta_out, 2);
 	v4l2_ctrl_new_custom(hdl_meta_out, &vivid_ctrl_class, NULL);
+	v4l2_ctrl_handler_init(hdl_tch_cap, 2);
+	v4l2_ctrl_new_custom(hdl_tch_cap, &vivid_ctrl_class, NULL);
 
 	/* User Controls */
 	dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL,
@@ -1904,6 +1907,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
 			return hdl_meta_out->error;
 		dev->meta_out_dev.ctrl_handler = hdl_meta_out;
 	}
+	if (dev->has_touch_cap) {
+		v4l2_ctrl_add_handler(hdl_tch_cap, hdl_user_gen, NULL, false);
+		v4l2_ctrl_add_handler(hdl_tch_cap, hdl_streaming, NULL, false);
+		if (hdl_tch_cap->error)
+			return hdl_tch_cap->error;
+		dev->touch_cap_dev.ctrl_handler = hdl_tch_cap;
+	}
 	return 0;
 }
 
@@ -1925,4 +1935,5 @@ void vivid_free_controls(struct vivid_dev *dev)
 	v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb);
 	v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_cap);
 	v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_out);
+	v4l2_ctrl_handler_free(&dev->ctrl_hdl_touch_cap);
 }
diff --git a/drivers/media/platform/vivid/vivid-kthread-touch.c b/drivers/media/platform/vivid/vivid-kthread-touch.c
new file mode 100644
index 0000000..674507b
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-touch.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-kthread-touch.c - touch capture thread support functions.
+ *
+ */
+
+#include <linux/freezer.h>
+#include "vivid-core.h"
+#include "vivid-kthread-touch.h"
+#include "vivid-touch-cap.h"
+
+static noinline_for_stack void vivid_thread_tch_cap_tick(struct vivid_dev *dev,
+							 int dropped_bufs)
+{
+	struct vivid_buffer *tch_cap_buf = NULL;
+
+	spin_lock(&dev->slock);
+	if (!list_empty(&dev->touch_cap_active)) {
+		tch_cap_buf = list_entry(dev->touch_cap_active.next,
+					 struct vivid_buffer, list);
+		list_del(&tch_cap_buf->list);
+	}
+
+	spin_unlock(&dev->slock);
+
+	if (tch_cap_buf) {
+		v4l2_ctrl_request_setup(tch_cap_buf->vb.vb2_buf.req_obj.req,
+					&dev->ctrl_hdl_touch_cap);
+
+		vivid_fillbuff_tch(dev, tch_cap_buf);
+		v4l2_ctrl_request_complete(tch_cap_buf->vb.vb2_buf.req_obj.req,
+					   &dev->ctrl_hdl_touch_cap);
+		vb2_buffer_done(&tch_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+				VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+		dprintk(dev, 2, "touch_cap buffer %d done\n",
+			tch_cap_buf->vb.vb2_buf.index);
+
+		tch_cap_buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset;
+	}
+	dev->dqbuf_error = false;
+}
+
+static int vivid_thread_touch_cap(void *data)
+{
+	struct vivid_dev *dev = data;
+	u64 numerators_since_start;
+	u64 buffers_since_start;
+	u64 next_jiffies_since_start;
+	unsigned long jiffies_since_start;
+	unsigned long cur_jiffies;
+	unsigned int wait_jiffies;
+	unsigned int numerator;
+	unsigned int denominator;
+	int dropped_bufs;
+
+	dprintk(dev, 1, "Touch Capture Thread Start\n");
+
+	set_freezable();
+
+	/* Resets frame counters */
+	dev->touch_cap_seq_offset = 0;
+	dev->touch_cap_seq_count = 0;
+	dev->touch_cap_seq_resync = false;
+	dev->jiffies_touch_cap = jiffies;
+
+	for (;;) {
+		try_to_freeze();
+		if (kthread_should_stop())
+			break;
+
+		if (!mutex_trylock(&dev->mutex)) {
+			schedule_timeout_uninterruptible(1);
+			continue;
+		}
+		cur_jiffies = jiffies;
+		if (dev->touch_cap_seq_resync) {
+			dev->jiffies_touch_cap = cur_jiffies;
+			dev->touch_cap_seq_offset = dev->touch_cap_seq_count + 1;
+			dev->touch_cap_seq_count = 0;
+			dev->cap_seq_resync = false;
+		}
+		denominator = dev->timeperframe_tch_cap.denominator;
+		numerator = dev->timeperframe_tch_cap.numerator;
+
+		/* Calculate the number of jiffies since we started streaming */
+		jiffies_since_start = cur_jiffies - dev->jiffies_touch_cap;
+		/* Get the number of buffers streamed since the start */
+		buffers_since_start = (u64)jiffies_since_start * denominator +
+				      (HZ * numerator) / 2;
+		do_div(buffers_since_start, HZ * numerator);
+
+		/*
+		 * After more than 0xf0000000 (rounded down to a multiple of
+		 * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
+		 * jiffies have passed since we started streaming reset the
+		 * counters and keep track of the sequence offset.
+		 */
+		if (jiffies_since_start > JIFFIES_RESYNC) {
+			dev->jiffies_touch_cap = cur_jiffies;
+			dev->cap_seq_offset = buffers_since_start;
+			buffers_since_start = 0;
+		}
+		dropped_bufs = buffers_since_start + dev->touch_cap_seq_offset - dev->touch_cap_seq_count;
+		dev->touch_cap_seq_count = buffers_since_start + dev->touch_cap_seq_offset;
+
+		vivid_thread_tch_cap_tick(dev, dropped_bufs);
+
+		/*
+		 * Calculate the number of 'numerators' streamed
+		 * since we started, including the current buffer.
+		 */
+		numerators_since_start = ++buffers_since_start * numerator;
+
+		/* And the number of jiffies since we started */
+		jiffies_since_start = jiffies - dev->jiffies_touch_cap;
+
+		mutex_unlock(&dev->mutex);
+
+		/*
+		 * Calculate when that next buffer is supposed to start
+		 * in jiffies since we started streaming.
+		 */
+		next_jiffies_since_start = numerators_since_start * HZ +
+					   denominator / 2;
+		do_div(next_jiffies_since_start, denominator);
+		/* If it is in the past, then just schedule asap */
+		if (next_jiffies_since_start < jiffies_since_start)
+			next_jiffies_since_start = jiffies_since_start;
+
+		wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+		schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1);
+	}
+	dprintk(dev, 1, "Touch Capture Thread End\n");
+	return 0;
+}
+
+int vivid_start_generating_touch_cap(struct vivid_dev *dev)
+{
+	if (dev->kthread_touch_cap) {
+		dev->touch_cap_streaming = true;
+		return 0;
+	}
+
+	dev->kthread_touch_cap = kthread_run(vivid_thread_touch_cap, dev,
+					     "%s-tch-cap", dev->v4l2_dev.name);
+
+	if (IS_ERR(dev->kthread_touch_cap)) {
+		int err = PTR_ERR(dev->kthread_touch_cap);
+
+		dev->kthread_touch_cap = NULL;
+		v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+		return err;
+	}
+	dev->touch_cap_streaming = true;
+	dprintk(dev, 1, "returning from %s\n", __func__);
+	return 0;
+}
+
+void vivid_stop_generating_touch_cap(struct vivid_dev *dev)
+{
+	if (!dev->kthread_touch_cap)
+		return;
+
+	dev->touch_cap_streaming = false;
+
+	while (!list_empty(&dev->touch_cap_active)) {
+		struct vivid_buffer *buf;
+
+		buf = list_entry(dev->touch_cap_active.next,
+				 struct vivid_buffer, list);
+		list_del(&buf->list);
+		v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+					   &dev->ctrl_hdl_touch_cap);
+		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+		dprintk(dev, 2, "touch_cap buffer %d done\n",
+			buf->vb.vb2_buf.index);
+	}
+
+	kthread_stop(dev->kthread_touch_cap);
+	dev->kthread_touch_cap = NULL;
+}
diff --git a/drivers/media/platform/vivid/vivid-kthread-touch.h b/drivers/media/platform/vivid/vivid-kthread-touch.h
new file mode 100644
index 0000000..ecf79b4
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-touch.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-kthread-cap.h - video/vbi capture thread support functions.
+ *
+ */
+
+#ifndef _VIVID_KTHREAD_CAP_H_
+#define _VIVID_KTHREAD_CAP_H_
+
+int vivid_start_generating_touch_cap(struct vivid_dev *dev);
+void vivid_stop_generating_touch_cap(struct vivid_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
index f2e789b..fbaec8a 100644
--- a/drivers/media/platform/vivid/vivid-osd.c
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -244,7 +244,7 @@ static int vivid_fb_blank(int blank_mode, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops vivid_fb_ops = {
+static const struct fb_ops vivid_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var   = vivid_fb_check_var,
 	.fb_set_par     = vivid_fb_set_par,
@@ -311,7 +311,6 @@ static int vivid_fb_init_vidmode(struct vivid_dev *dev)
 
 	dev->fb_info.node = -1;
 	dev->fb_info.flags = FBINFO_FLAG_DEFAULT;
-	dev->fb_info.fbops = &vivid_fb_ops;
 	dev->fb_info.par = dev;
 	dev->fb_info.var = dev->fb_defined;
 	dev->fb_info.fix = dev->fb_fix;
diff --git a/drivers/media/platform/vivid/vivid-touch-cap.c b/drivers/media/platform/vivid/vivid-touch-cap.c
new file mode 100644
index 0000000..ebb00b1
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-touch-cap.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-touch-cap.c - touch support functions.
+ */
+
+#include "vivid-core.h"
+#include "vivid-kthread-touch.h"
+#include "vivid-vid-common.h"
+#include "vivid-touch-cap.h"
+
+static int touch_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+				 unsigned int *nplanes, unsigned int sizes[],
+				 struct device *alloc_devs[])
+{
+	struct vivid_dev *dev = vb2_get_drv_priv(vq);
+	struct v4l2_pix_format *f = &dev->tch_format;
+	unsigned int size = f->sizeimage;
+
+	if (*nplanes) {
+		if (sizes[0] < size)
+			return -EINVAL;
+	} else {
+		sizes[0] = size;
+	}
+
+	if (vq->num_buffers + *nbuffers < 2)
+		*nbuffers = 2 - vq->num_buffers;
+
+	*nplanes = 1;
+	return 0;
+}
+
+static int touch_cap_buf_prepare(struct vb2_buffer *vb)
+{
+	struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct v4l2_pix_format *f = &dev->tch_format;
+	unsigned int size = f->sizeimage;
+
+	if (dev->buf_prepare_error) {
+		/*
+		 * Error injection: test what happens if buf_prepare() returns
+		 * an error.
+		 */
+		dev->buf_prepare_error = false;
+		return -EINVAL;
+	}
+	if (vb2_plane_size(vb, 0) < size) {
+		dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+			__func__, vb2_plane_size(vb, 0), size);
+		return -EINVAL;
+	}
+	vb2_set_plane_payload(vb, 0, size);
+
+	return 0;
+}
+
+static void touch_cap_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+	vbuf->field = V4L2_FIELD_NONE;
+	spin_lock(&dev->slock);
+	list_add_tail(&buf->list, &dev->touch_cap_active);
+	spin_unlock(&dev->slock);
+}
+
+static int touch_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct vivid_dev *dev = vb2_get_drv_priv(vq);
+	int err;
+
+	dev->touch_cap_seq_count = 0;
+	if (dev->start_streaming_error) {
+		dev->start_streaming_error = false;
+		err = -EINVAL;
+	} else {
+		err = vivid_start_generating_touch_cap(dev);
+	}
+	if (err) {
+		struct vivid_buffer *buf, *tmp;
+
+		list_for_each_entry_safe(buf, tmp,
+					 &dev->touch_cap_active, list) {
+			list_del(&buf->list);
+			vb2_buffer_done(&buf->vb.vb2_buf,
+					VB2_BUF_STATE_QUEUED);
+		}
+	}
+	return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void touch_cap_stop_streaming(struct vb2_queue *vq)
+{
+	struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+	vivid_stop_generating_touch_cap(dev);
+}
+
+static void touch_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+	struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+	v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_touch_cap);
+}
+
+const struct vb2_ops vivid_touch_cap_qops = {
+	.queue_setup		= touch_cap_queue_setup,
+	.buf_prepare		= touch_cap_buf_prepare,
+	.buf_queue		= touch_cap_buf_queue,
+	.start_streaming	= touch_cap_start_streaming,
+	.stop_streaming		= touch_cap_stop_streaming,
+	.buf_request_complete	= touch_cap_buf_request_complete,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+};
+
+int vivid_enum_fmt_tch(struct file *file, void  *priv, struct v4l2_fmtdesc *f)
+{
+	if (f->index)
+		return -EINVAL;
+
+	f->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+	return 0;
+}
+
+int vivid_g_fmt_tch(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct vivid_dev *dev = video_drvdata(file);
+
+	if (dev->multiplanar)
+		return -ENOTTY;
+	f->fmt.pix = dev->tch_format;
+	return 0;
+}
+
+int vivid_g_fmt_tch_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct vivid_dev *dev = video_drvdata(file);
+	struct v4l2_format sp_fmt;
+
+	if (!dev->multiplanar)
+		return -ENOTTY;
+	sp_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	sp_fmt.fmt.pix = dev->tch_format;
+	fmt_sp2mp(&sp_fmt, f);
+	return 0;
+}
+
+int vivid_g_parm_tch(struct file *file, void *priv,
+		     struct v4l2_streamparm *parm)
+{
+	struct vivid_dev *dev = video_drvdata(file);
+
+	if (parm->type != (dev->multiplanar ?
+			   V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+			   V4L2_BUF_TYPE_VIDEO_CAPTURE))
+		return -EINVAL;
+
+	parm->parm.capture.capability   = V4L2_CAP_TIMEPERFRAME;
+	parm->parm.capture.timeperframe = dev->timeperframe_tch_cap;
+	parm->parm.capture.readbuffers  = 1;
+	return 0;
+}
+
+int vivid_enum_input_tch(struct file *file, void *priv, struct v4l2_input *inp)
+{
+	if (inp->index)
+		return -EINVAL;
+
+	inp->type = V4L2_INPUT_TYPE_TOUCH;
+	strscpy(inp->name, "Vivid Touch", sizeof(inp->name));
+	inp->capabilities = 0;
+	return 0;
+}
+
+int vivid_g_input_tch(struct file *file, void *priv, unsigned int *i)
+{
+	*i = 0;
+	return 0;
+}
+
+int vivid_set_touch(struct vivid_dev *dev, unsigned int i)
+{
+	struct v4l2_pix_format *f = &dev->tch_format;
+
+	if (i)
+		return -EINVAL;
+
+	f->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+	f->width =  VIVID_TCH_WIDTH;
+	f->height = VIVID_TCH_HEIGHT;
+	f->field = V4L2_FIELD_NONE;
+	f->colorspace = V4L2_COLORSPACE_RAW;
+	f->bytesperline = f->width * sizeof(s16);
+	f->sizeimage = f->width * f->height * sizeof(s16);
+	return 0;
+}
+
+int vivid_s_input_tch(struct file *file, void *priv, unsigned int i)
+{
+	return vivid_set_touch(video_drvdata(file), i);
+}
+
+static void vivid_fill_buff_noise(__s16 *tch_buf, int size)
+{
+	int i;
+
+	/* Fill 10% of the values within range -3 and 3, zero the others */
+	for (i = 0; i < size; i++) {
+		unsigned int rand = get_random_int();
+
+		if (rand % 10)
+			tch_buf[i] = 0;
+		else
+			tch_buf[i] = (rand / 10) % 7 - 3;
+	}
+}
+
+static inline int get_random_pressure(void)
+{
+	return get_random_int() % VIVID_PRESSURE_LIMIT;
+}
+
+static void vivid_tch_buf_set(struct v4l2_pix_format *f,
+			      __s16 *tch_buf,
+			      int index)
+{
+	unsigned int x = index % f->width;
+	unsigned int y = index / f->width;
+	unsigned int offset = VIVID_MIN_PRESSURE;
+
+	tch_buf[index] = offset + get_random_pressure();
+	offset /= 2;
+	if (x)
+		tch_buf[index - 1] = offset + get_random_pressure();
+	if (x < f->width - 1)
+		tch_buf[index + 1] = offset + get_random_pressure();
+	if (y)
+		tch_buf[index - f->width] = offset + get_random_pressure();
+	if (y < f->height - 1)
+		tch_buf[index + f->width] = offset + get_random_pressure();
+	offset /= 2;
+	if (x && y)
+		tch_buf[index - 1 - f->width] = offset + get_random_pressure();
+	if (x < f->width - 1 && y)
+		tch_buf[index + 1 - f->width] = offset + get_random_pressure();
+	if (x && y < f->height - 1)
+		tch_buf[index - 1 + f->width] = offset + get_random_pressure();
+	if (x < f->width - 1 && y < f->height - 1)
+		tch_buf[index + 1 + f->width] = offset + get_random_pressure();
+}
+
+void vivid_fillbuff_tch(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+	struct v4l2_pix_format *f = &dev->tch_format;
+	int size = f->width * f->height;
+	int x, y, xstart, ystart, offset_x, offset_y;
+	unsigned int test_pattern, test_pat_idx, rand;
+
+	__s16 *tch_buf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+
+	buf->vb.sequence = dev->touch_cap_seq_count;
+	test_pattern = (buf->vb.sequence / TCH_SEQ_COUNT) % TEST_CASE_MAX;
+	test_pat_idx = buf->vb.sequence % TCH_SEQ_COUNT;
+
+	vivid_fill_buff_noise(tch_buf, size);
+
+	if (test_pat_idx >= TCH_PATTERN_COUNT)
+		return;
+
+	if (test_pat_idx == 0)
+		dev->tch_pat_random = get_random_int();
+	rand = dev->tch_pat_random;
+
+	switch (test_pattern) {
+	case SINGLE_TAP:
+		if (test_pat_idx == 2)
+			vivid_tch_buf_set(f, tch_buf, rand % size);
+		break;
+	case DOUBLE_TAP:
+		if (test_pat_idx == 2 || test_pat_idx == 4)
+			vivid_tch_buf_set(f, tch_buf, rand % size);
+		break;
+	case TRIPLE_TAP:
+		if (test_pat_idx == 2 || test_pat_idx == 4 || test_pat_idx == 6)
+			vivid_tch_buf_set(f, tch_buf, rand % size);
+		break;
+	case MOVE_LEFT_TO_RIGHT:
+		vivid_tch_buf_set(f, tch_buf,
+				  (rand % f->height) * f->width +
+				  test_pat_idx *
+				  (f->width / TCH_PATTERN_COUNT));
+		break;
+	case ZOOM_IN:
+		x = f->width / 2;
+		y = f->height / 2;
+		offset_x = ((TCH_PATTERN_COUNT - 1 - test_pat_idx) * x) /
+				TCH_PATTERN_COUNT;
+		offset_y = ((TCH_PATTERN_COUNT - 1 - test_pat_idx) * y) /
+				TCH_PATTERN_COUNT;
+		vivid_tch_buf_set(f, tch_buf,
+				  (x - offset_x) + f->width * (y - offset_y));
+		vivid_tch_buf_set(f, tch_buf,
+				  (x + offset_x) + f->width * (y + offset_y));
+		break;
+	case ZOOM_OUT:
+		x = f->width / 2;
+		y = f->height / 2;
+		offset_x = (test_pat_idx * x) / TCH_PATTERN_COUNT;
+		offset_y = (test_pat_idx * y) / TCH_PATTERN_COUNT;
+		vivid_tch_buf_set(f, tch_buf,
+				  (x - offset_x) + f->width * (y - offset_y));
+		vivid_tch_buf_set(f, tch_buf,
+				  (x + offset_x) + f->width * (y + offset_y));
+		break;
+	case PALM_PRESS:
+		for (x = 0; x < f->width; x++)
+			for (y = f->height / 2; y < f->height; y++)
+				tch_buf[x + f->width * y] = VIVID_MIN_PRESSURE +
+							get_random_pressure();
+		break;
+	case MULTIPLE_PRESS:
+		/* 16 pressure points */
+		for (y = 0; y < 4; y++) {
+			for (x = 0; x < 4; x++) {
+				ystart = (y * f->height) / 4 + f->height / 8;
+				xstart = (x * f->width) / 4 + f->width / 8;
+				vivid_tch_buf_set(f, tch_buf,
+						  ystart * f->width + xstart);
+			}
+		}
+		break;
+	}
+#ifdef __BIG_ENDIAN__
+	for (x = 0; x < size; x++)
+		tch_buf[x] = (__force s16)__cpu_to_le16((u16)tch_buf[x]);
+#endif
+}
diff --git a/drivers/media/platform/vivid/vivid-touch-cap.h b/drivers/media/platform/vivid/vivid-touch-cap.h
new file mode 100644
index 0000000..07e5140
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-touch-cap.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-touch-cap.h - touch support functions.
+ */
+#ifndef _VIVID_TOUCH_CAP_H_
+#define _VIVID_TOUCH_CAP_H_
+
+#define VIVID_TCH_HEIGHT	12
+#define VIVID_TCH_WIDTH		21
+#define VIVID_MIN_PRESSURE	180
+#define VIVID_PRESSURE_LIMIT	40
+#define TCH_SEQ_COUNT		16
+#define TCH_PATTERN_COUNT	12
+
+enum vivid_tch_test {
+	SINGLE_TAP,
+	DOUBLE_TAP,
+	TRIPLE_TAP,
+	MOVE_LEFT_TO_RIGHT,
+	ZOOM_IN,
+	ZOOM_OUT,
+	PALM_PRESS,
+	MULTIPLE_PRESS,
+	TEST_CASE_MAX
+};
+
+extern const struct vb2_ops vivid_touch_cap_qops;
+
+int vivid_enum_fmt_tch(struct file *file, void  *priv, struct v4l2_fmtdesc *f);
+int vivid_g_fmt_tch(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_g_fmt_tch_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_enum_input_tch(struct file *file, void *priv, struct v4l2_input *inp);
+int vivid_g_input_tch(struct file *file, void *priv, unsigned int *i);
+int vivid_s_input_tch(struct file *file, void *priv, unsigned int i);
+void vivid_fillbuff_tch(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vivid_set_touch(struct vivid_dev *dev, unsigned int i);
+int vivid_g_parm_tch(struct file *file, void *priv,
+		     struct v4l2_streamparm *parm);
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 8665dfd..76b0be6 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -813,7 +813,7 @@ void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
 
 	memset(mp->reserved, 0, sizeof(mp->reserved));
 	mp_fmt->type = is_out ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
-			   V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+			   V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
 	mp->width = pix->width;
 	mp->height = pix->height;
 	mp->pixelformat = pix->pixelformat;
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 872d644..a7deca1 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -413,7 +413,7 @@ static int iguanair_probe(struct usb_interface *intf,
 	int ret, pipein, pipeout;
 	struct usb_host_interface *idesc;
 
-	idesc = intf->altsetting;
+	idesc = intf->cur_altsetting;
 	if (idesc->desc.bNumEndpoints < 2)
 		return -ENODEV;
 
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 32ccefe..d80cfa4 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -37,10 +37,13 @@
 #define INT_CLR_RCV		BIT(16)
 #define INT_CLR_RCVTIMEOUT	(BIT(16) | BIT(17))
 
-#define IR_CLK			0x48
 #define IR_CLK_ENABLE		BIT(4)
 #define IR_CLK_RESET		BIT(5)
 
+/* IR_ENABLE register bits */
+#define IR_ENABLE_EN		BIT(0)
+#define IR_ENABLE_EN_EXTRA	BIT(8)
+
 #define IR_CFG_WIDTH_MASK	0xffff
 #define IR_CFG_WIDTH_SHIFT	16
 #define IR_CFG_FORMAT_MASK	0x3
@@ -58,6 +61,23 @@
 
 #define IR_HIX5HD2_NAME		"hix5hd2-ir"
 
+/* Need to set extra bit for enabling IR */
+#define HIX5HD2_FLAG_EXTRA_ENABLE	BIT(0)
+
+struct hix5hd2_soc_data {
+	u32 clk_reg;
+	u32 flags;
+};
+
+static const struct hix5hd2_soc_data hix5hd2_data = {
+	.clk_reg = 0x48,
+};
+
+static const struct hix5hd2_soc_data hi3796cv300_data = {
+	.clk_reg = 0x60,
+	.flags = HIX5HD2_FLAG_EXTRA_ENABLE,
+};
+
 struct hix5hd2_ir_priv {
 	int			irq;
 	void __iomem		*base;
@@ -66,15 +86,17 @@ struct hix5hd2_ir_priv {
 	struct regmap		*regmap;
 	struct clk		*clock;
 	unsigned long		rate;
+	const struct hix5hd2_soc_data *socdata;
 };
 
-static int hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
+static int hix5hd2_ir_clk_enable(struct hix5hd2_ir_priv *dev, bool on)
 {
+	u32 clk_reg = dev->socdata->clk_reg;
 	u32 val;
 	int ret = 0;
 
 	if (dev->regmap) {
-		regmap_read(dev->regmap, IR_CLK, &val);
+		regmap_read(dev->regmap, clk_reg, &val);
 		if (on) {
 			val &= ~IR_CLK_RESET;
 			val |= IR_CLK_ENABLE;
@@ -82,7 +104,7 @@ static int hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
 			val &= ~IR_CLK_ENABLE;
 			val |= IR_CLK_RESET;
 		}
-		regmap_write(dev->regmap, IR_CLK, val);
+		regmap_write(dev->regmap, clk_reg, val);
 	} else {
 		if (on)
 			ret = clk_prepare_enable(dev->clock);
@@ -92,12 +114,23 @@ static int hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
 	return ret;
 }
 
+static inline void hix5hd2_ir_enable(struct hix5hd2_ir_priv *priv)
+{
+	u32 val = IR_ENABLE_EN;
+
+	if (priv->socdata->flags & HIX5HD2_FLAG_EXTRA_ENABLE)
+		val |= IR_ENABLE_EN_EXTRA;
+
+	writel_relaxed(val, priv->base + IR_ENABLE);
+}
+
 static int hix5hd2_ir_config(struct hix5hd2_ir_priv *priv)
 {
 	int timeout = 10000;
 	u32 val, rate;
 
-	writel_relaxed(0x01, priv->base + IR_ENABLE);
+	hix5hd2_ir_enable(priv);
+
 	while (readl_relaxed(priv->base + IR_BUSY)) {
 		if (timeout--) {
 			udelay(1);
@@ -128,13 +161,13 @@ static int hix5hd2_ir_open(struct rc_dev *rdev)
 	struct hix5hd2_ir_priv *priv = rdev->priv;
 	int ret;
 
-	ret = hix5hd2_ir_enable(priv, true);
+	ret = hix5hd2_ir_clk_enable(priv, true);
 	if (ret)
 		return ret;
 
 	ret = hix5hd2_ir_config(priv);
 	if (ret) {
-		hix5hd2_ir_enable(priv, false);
+		hix5hd2_ir_clk_enable(priv, false);
 		return ret;
 	}
 	return 0;
@@ -144,7 +177,7 @@ static void hix5hd2_ir_close(struct rc_dev *rdev)
 {
 	struct hix5hd2_ir_priv *priv = rdev->priv;
 
-	hix5hd2_ir_enable(priv, false);
+	hix5hd2_ir_clk_enable(priv, false);
 }
 
 static irqreturn_t hix5hd2_ir_rx_interrupt(int irq, void *data)
@@ -205,6 +238,13 @@ static irqreturn_t hix5hd2_ir_rx_interrupt(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static const struct of_device_id hix5hd2_ir_table[] = {
+	{ .compatible = "hisilicon,hix5hd2-ir", &hix5hd2_data, },
+	{ .compatible = "hisilicon,hi3796cv300-ir", &hi3796cv300_data, },
+	{},
+};
+MODULE_DEVICE_TABLE(of, hix5hd2_ir_table);
+
 static int hix5hd2_ir_probe(struct platform_device *pdev)
 {
 	struct rc_dev *rdev;
@@ -212,6 +252,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
 	struct resource *res;
 	struct hix5hd2_ir_priv *priv;
 	struct device_node *node = pdev->dev.of_node;
+	const struct of_device_id *of_id;
 	const char *map_name;
 	int ret;
 
@@ -219,6 +260,13 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
+	of_id = of_match_device(hix5hd2_ir_table, dev);
+	if (!of_id) {
+		dev_err(dev, "Unable to initialize IR data\n");
+		return -ENODEV;
+	}
+	priv->socdata = of_id->data;
+
 	priv->regmap = syscon_regmap_lookup_by_phandle(node,
 						       "hisilicon,power-syscon");
 	if (IS_ERR(priv->regmap)) {
@@ -309,7 +357,7 @@ static int hix5hd2_ir_suspend(struct device *dev)
 	struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
 
 	clk_disable_unprepare(priv->clock);
-	hix5hd2_ir_enable(priv, false);
+	hix5hd2_ir_clk_enable(priv, false);
 
 	return 0;
 }
@@ -319,17 +367,18 @@ static int hix5hd2_ir_resume(struct device *dev)
 	struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
 	int ret;
 
-	ret = hix5hd2_ir_enable(priv, true);
+	ret = hix5hd2_ir_clk_enable(priv, true);
 	if (ret)
 		return ret;
 
 	ret = clk_prepare_enable(priv->clock);
 	if (ret) {
-		hix5hd2_ir_enable(priv, false);
+		hix5hd2_ir_clk_enable(priv, false);
 		return ret;
 	}
 
-	writel_relaxed(0x01, priv->base + IR_ENABLE);
+	hix5hd2_ir_enable(priv);
+
 	writel_relaxed(0x00, priv->base + IR_INTM);
 	writel_relaxed(0xff, priv->base + IR_INTC);
 	writel_relaxed(0x01, priv->base + IR_START);
@@ -341,12 +390,6 @@ static int hix5hd2_ir_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(hix5hd2_ir_pm_ops, hix5hd2_ir_suspend,
 			 hix5hd2_ir_resume);
 
-static const struct of_device_id hix5hd2_ir_table[] = {
-	{ .compatible = "hisilicon,hix5hd2-ir", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, hix5hd2_ir_table);
-
 static struct platform_driver hix5hd2_ir_driver = {
 	.driver = {
 		.name = IR_HIX5HD2_NAME,
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 7741151..6f80c25 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1891,23 +1891,28 @@ int rc_register_device(struct rc_dev *dev)
 
 	dev->registered = true;
 
-	if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
-		rc = rc_setup_rx_device(dev);
-		if (rc)
-			goto out_dev;
-	}
-
-	/* Ensure that the lirc kfifo is setup before we start the thread */
+	/*
+	 * once the the input device is registered in rc_setup_rx_device,
+	 * userspace can open the input device and rc_open() will be called
+	 * as a result. This results in driver code being allowed to submit
+	 * keycodes with rc_keydown, so lirc must be registered first.
+	 */
 	if (dev->allowed_protocols != RC_PROTO_BIT_CEC) {
 		rc = ir_lirc_register(dev);
 		if (rc < 0)
-			goto out_rx;
+			goto out_dev;
+	}
+
+	if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+		rc = rc_setup_rx_device(dev);
+		if (rc)
+			goto out_lirc;
 	}
 
 	if (dev->driver_type == RC_DRIVER_IR_RAW) {
 		rc = ir_raw_event_register(dev);
 		if (rc < 0)
-			goto out_lirc;
+			goto out_rx;
 	}
 
 	dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor,
@@ -1915,11 +1920,11 @@ int rc_register_device(struct rc_dev *dev)
 
 	return 0;
 
+out_rx:
+	rc_free_rx_device(dev);
 out_lirc:
 	if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
 		ir_lirc_unregister(dev);
-out_rx:
-	rc_free_rx_device(dev);
 out_dev:
 	device_del(&dev->dev);
 out_rx_free:
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 7652e98..d77507b 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -353,7 +353,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
 			dcd = (status & hardware[type].signal_pin) ? 1 : 0;
 
 			if (dcd == last_dcd) {
-				dev_err(&serial_ir.pdev->dev,
+				dev_dbg(&serial_ir.pdev->dev,
 					"ignoring spike: %d %d %lldns %lldns\n",
 					dcd, sense, ktime_to_ns(kt),
 					ktime_to_ns(serial_ir.lastkt));
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 626264a..9d3d051 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -800,7 +800,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
 		break;
 	case FRAME_READY:
 		buf->bytesused = cam->buffers[buf->index].length;
-		buf->timestamp = ns_to_timeval(cam->buffers[buf->index].ts);
+		v4l2_buffer_set_timestamp(buf, cam->buffers[buf->index].ts);
 		buf->sequence = cam->buffers[buf->index].seq;
 		buf->flags = V4L2_BUF_FLAG_DONE;
 		break;
@@ -907,7 +907,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
 	buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE
 		| V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 	buf->field = V4L2_FIELD_NONE;
-	buf->timestamp = ns_to_timeval(cam->buffers[buf->index].ts);
+	v4l2_buffer_set_timestamp(buf, cam->buffers[buf->index].ts);
 	buf->sequence = cam->buffers[buf->index].seq;
 	buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
 	buf->length = cam->frame_size;
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index fd6e2df..de42db6 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -13,7 +13,6 @@
 #include <linux/spinlock.h>
 #include <linux/soundcard.h>
 #include <linux/slab.h>
-#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -372,28 +371,6 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
 	return errCode;
 }
 
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
-					size_t size)
-{
-	struct snd_pcm_runtime *runtime = subs->runtime;
-	struct cx231xx *dev = snd_pcm_substream_chip(subs);
-
-	dev_dbg(dev->dev, "Allocating vbuffer\n");
-	if (runtime->dma_area) {
-		if (runtime->dma_bytes > size)
-			return 0;
-
-		vfree(runtime->dma_area);
-	}
-	runtime->dma_area = vmalloc(size);
-	if (!runtime->dma_area)
-		return -ENOMEM;
-
-	runtime->dma_bytes = size;
-
-	return 0;
-}
-
 static const struct snd_pcm_hardware snd_cx231xx_hw_capture = {
 	.info = SNDRV_PCM_INFO_BLOCK_TRANSFER	|
 	    SNDRV_PCM_INFO_MMAP			|
@@ -484,11 +461,6 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
 	}
 
 	dev->adev.users--;
-	if (substream->runtime->dma_area) {
-		dev_dbg(dev->dev, "freeing\n");
-		vfree(substream->runtime->dma_area);
-		substream->runtime->dma_area = NULL;
-	}
 	mutex_unlock(&dev->lock);
 
 	if (dev->adev.users == 0 && dev->adev.shutdown == 1) {
@@ -504,44 +476,6 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
 	return 0;
 }
 
-static int snd_cx231xx_hw_capture_params(struct snd_pcm_substream *substream,
-					 struct snd_pcm_hw_params *hw_params)
-{
-	struct cx231xx *dev = snd_pcm_substream_chip(substream);
-	int ret;
-
-	dev_dbg(dev->dev, "Setting capture parameters\n");
-
-	ret = snd_pcm_alloc_vmalloc_buffer(substream,
-					   params_buffer_bytes(hw_params));
-#if 0
-	/* TODO: set up cx231xx audio chip to deliver the correct audio format,
-	   current default is 48000hz multiplexed => 96000hz mono
-	   which shouldn't matter since analogue TV only supports mono */
-	unsigned int channels, rate, format;
-
-	format = params_format(hw_params);
-	rate = params_rate(hw_params);
-	channels = params_channels(hw_params);
-#endif
-
-	return ret;
-}
-
-static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
-{
-	struct cx231xx *dev = snd_pcm_substream_chip(substream);
-
-	dev_dbg(dev->dev, "Stop capture, if needed\n");
-
-	if (atomic_read(&dev->stream_started) > 0) {
-		atomic_set(&dev->stream_started, 0);
-		schedule_work(&dev->wq_trigger);
-	}
-
-	return 0;
-}
-
 static int snd_cx231xx_prepare(struct snd_pcm_substream *substream)
 {
 	struct cx231xx *dev = snd_pcm_substream_chip(substream);
@@ -614,24 +548,12 @@ static snd_pcm_uframes_t snd_cx231xx_capture_pointer(struct snd_pcm_substream
 	return hwptr_done;
 }
 
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
-					     unsigned long offset)
-{
-	void *pageptr = subs->runtime->dma_area + offset;
-
-	return vmalloc_to_page(pageptr);
-}
-
 static const struct snd_pcm_ops snd_cx231xx_pcm_capture = {
 	.open = snd_cx231xx_capture_open,
 	.close = snd_cx231xx_pcm_close,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = snd_cx231xx_hw_capture_params,
-	.hw_free = snd_cx231xx_hw_capture_free,
 	.prepare = snd_cx231xx_prepare,
 	.trigger = snd_cx231xx_capture_trigger,
 	.pointer = snd_cx231xx_capture_pointer,
-	.page = snd_pcm_get_vmalloc_page,
 };
 
 static int cx231xx_audio_init(struct cx231xx *dev)
@@ -666,6 +588,7 @@ static int cx231xx_audio_init(struct cx231xx *dev)
 
 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
 			&snd_cx231xx_pcm_capture);
+	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
 	pcm->info_flags = 0;
 	pcm->private_data = dev;
 	strscpy(pcm->name, "Conexant cx231xx Capture", sizeof(pcm->name));
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index f33b6a0..c665925 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -515,7 +515,8 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
 {
 	struct cx231xx *dev = bus->dev;
 
-	BUG_ON(!dev->cx231xx_send_usb_command);
+	if (!dev->cx231xx_send_usb_command)
+		return -EINVAL;
 
 	bus->i2c_adap = cx231xx_adap_template;
 	bus->i2c_adap.dev.parent = dev->dev;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 792667e..b1f69c1 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -208,8 +208,8 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
 	request_module("%s", board_info.type);
 
 	/* register I2C device */
-	client = i2c_new_device(adapter, &board_info);
-	if (client == NULL || client->dev.driver == NULL) {
+	client = i2c_new_client_device(adapter, &board_info);
+	if (!i2c_client_has_driver(client)) {
 		ret = -ENODEV;
 		goto err;
 	}
@@ -1621,9 +1621,10 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
 	si2157_config.fe = adap->fe[0];
 
 	/*
-	 * HACK: The Logilink VG0022A has a bug: when the si2157
-	 * firmware that came with the device is replaced by a new
-	 * one, the I2C transfers to the tuner will return just 0xff.
+	 * HACK: The Logilink VG0022A and TerraTec TC2 Stick have
+	 * a bug: when the si2157 firmware that came with the device
+	 * is replaced by a new one, the I2C transfers to the tuner
+	 * will return just 0xff.
 	 *
 	 * Probably, the vendor firmware has some patch specifically
 	 * designed for this device. So, we can't replace by the
@@ -1633,8 +1634,10 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
 	 * while we don't have that, the next best solution is to just
 	 * keep the original firmware at the device.
 	 */
-	if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK &&
-	    le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100)
+	if ((le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK &&
+	     le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100) ||
+	    (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_TERRATEC &&
+	     le16_to_cpu(d->udev->descriptor.idProduct) == USB_PID_TERRATEC_CINERGY_TC2_STICK))
 		si2157_config.dont_load_firmware = true;
 
 	si2157_config.if_port = it930x_addresses_table[state->it930x_addresses].tuner_if_port;
@@ -2150,6 +2153,8 @@ static const struct usb_device_id af9035_id_table[] = {
 		&it930x_props, "AVerMedia TD310 DVB-T2", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x0100,
 		&it930x_props, "Logilink VG0022A", NULL) },
+	{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_TC2_STICK,
+		&it930x_props, "TerraTec Cinergy TC2 Stick", NULL) },
 	{ }
 };
 MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index fb6d99d..0514e87 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -649,8 +649,8 @@ static int anysee_add_i2c_dev(struct dvb_usb_device *d, const char *type,
 	request_module("%s", board_info.type);
 
 	/* register I2C device */
-	client = i2c_new_device(adapter, &board_info);
-	if (client == NULL || client->dev.driver == NULL) {
+	client = i2c_new_client_device(adapter, &board_info);
+	if (!i2c_client_has_driver(client)) {
 		ret = -ENODEV;
 		goto err;
 	}
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 5016ede..c6881a1b 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -697,8 +697,8 @@ static int rtl2831u_frontend_attach(struct dvb_usb_adapter *adap)
 	board_info.addr = 0x10;
 	board_info.platform_data = pdata;
 	request_module("%s", board_info.type);
-	client = i2c_new_device(&d->i2c_adap, &board_info);
-	if (client == NULL || client->dev.driver == NULL) {
+	client = i2c_new_client_device(&d->i2c_adap, &board_info);
+	if (!i2c_client_has_driver(client)) {
 		ret = -ENODEV;
 		goto err;
 	}
@@ -918,8 +918,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
 	board_info.addr = 0x10;
 	board_info.platform_data = pdata;
 	request_module("%s", board_info.type);
-	client = i2c_new_device(&d->i2c_adap, &board_info);
-	if (client == NULL || client->dev.driver == NULL) {
+	client = i2c_new_client_device(&d->i2c_adap, &board_info);
+	if (!i2c_client_has_driver(client)) {
 		ret = -ENODEV;
 		goto err;
 	}
@@ -960,8 +960,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
 			info.addr = 0x18;
 			info.platform_data = &mn88472_config;
 			request_module(info.type);
-			client = i2c_new_device(&d->i2c_adap, &info);
-			if (client == NULL || client->dev.driver == NULL) {
+			client = i2c_new_client_device(&d->i2c_adap, &info);
+			if (!i2c_client_has_driver(client)) {
 				dev->slave_demod = SLAVE_DEMOD_NONE;
 				goto err_slave_demod_failed;
 			}
@@ -982,8 +982,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
 			info.addr = 0x18;
 			info.platform_data = &mn88473_config;
 			request_module(info.type);
-			client = i2c_new_device(&d->i2c_adap, &info);
-			if (client == NULL || client->dev.driver == NULL) {
+			client = i2c_new_client_device(&d->i2c_adap, &info);
+			if (!i2c_client_has_driver(client)) {
 				dev->slave_demod = SLAVE_DEMOD_NONE;
 				goto err_slave_demod_failed;
 			}
@@ -1025,8 +1025,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
 			info.addr = 0x64;
 			info.platform_data = &si2168_config;
 			request_module(info.type);
-			client = i2c_new_device(&d->i2c_adap, &info);
-			if (client == NULL || client->dev.driver == NULL) {
+			client = i2c_new_client_device(&d->i2c_adap, &info);
+			if (!i2c_client_has_driver(client)) {
 				dev->slave_demod = SLAVE_DEMOD_NONE;
 				goto err_slave_demod_failed;
 			}
@@ -1217,8 +1217,9 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
 			info.platform_data = &e4000_config;
 
 			request_module(info.type);
-			client = i2c_new_device(dev->demod_i2c_adapter, &info);
-			if (client == NULL || client->dev.driver == NULL)
+			client = i2c_new_client_device(dev->demod_i2c_adapter,
+						       &info);
+			if (!i2c_client_has_driver(client))
 				break;
 
 			if (!try_module_get(client->dev.driver->owner)) {
@@ -1240,9 +1241,9 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
 			board_info.addr = 0x56;
 			board_info.platform_data = &fc2580_pdata;
 			request_module("fc2580");
-			client = i2c_new_device(dev->demod_i2c_adapter,
-						&board_info);
-			if (client == NULL || client->dev.driver == NULL)
+			client = i2c_new_client_device(dev->demod_i2c_adapter,
+						       &board_info);
+			if (!i2c_client_has_driver(client))
 				break;
 			if (!try_module_get(client->dev.driver->owner)) {
 				i2c_unregister_device(client);
@@ -1271,8 +1272,9 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
 		board_info.addr = 0x60;
 		board_info.platform_data = &tua9001_pdata;
 		request_module("tua9001");
-		client = i2c_new_device(dev->demod_i2c_adapter, &board_info);
-		if (client == NULL || client->dev.driver == NULL)
+		client = i2c_new_client_device(dev->demod_i2c_adapter,
+					       &board_info);
+		if (!i2c_client_has_driver(client))
 			break;
 		if (!try_module_get(client->dev.driver->owner)) {
 			i2c_unregister_device(client);
@@ -1316,8 +1318,8 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
 			info.addr = 0x60;
 			info.platform_data = &si2157_config;
 			request_module(info.type);
-			client = i2c_new_device(&d->i2c_adap, &info);
-			if (client == NULL || client->dev.driver == NULL)
+			client = i2c_new_client_device(&d->i2c_adap, &info);
+			if (!i2c_client_has_driver(client))
 				break;
 
 			if (!try_module_get(client->dev.driver->owner)) {
@@ -1955,6 +1957,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
 		&rtl28xxu_props, "Sveon STV27", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TURBOX_DTT_2000,
 		&rtl28xxu_props, "TURBO-X Pure TV Tuner DTT-2000", NULL) },
+	{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_PROLECTRIX_DV107669,
+		&rtl28xxu_props, "PROlectrix DV107669", NULL) },
 
 	/* RTL2832P devices: */
 	{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
diff --git a/drivers/media/usb/dvb-usb-v2/zd1301.c b/drivers/media/usb/dvb-usb-v2/zd1301.c
index 63b66b2..ba2c1b0 100644
--- a/drivers/media/usb/dvb-usb-v2/zd1301.c
+++ b/drivers/media/usb/dvb-usb-v2/zd1301.c
@@ -172,8 +172,8 @@ static int zd1301_frontend_attach(struct dvb_usb_adapter *adap)
 	board_info.addr = 0x60;
 	board_info.platform_data = &dev->mt2060_pdata;
 	request_module("%s", "mt2060");
-	client = i2c_new_device(adapter, &board_info);
-	if (!client || !client->dev.driver) {
+	client = i2c_new_client_device(adapter, &board_info);
+	if (!i2c_client_has_driver(client)) {
 		ret = -ENODEV;
 		goto err_module_put_demod;
 	}
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index ac93e88..89b4b5d 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -554,7 +554,7 @@ static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
 			      u8 *buf, int size)
 {
 	u16 checksum;
-	int act_len, i, ret;
+	int act_len = 0, i, ret;
 
 	memset(buf, 0, size);
 	buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index fac19ec..c421b60 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -54,9 +54,6 @@ MODULE_PARM_DESC(debug, "set debugging level (see cxusb.h)."
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
-#define deb_info(args...)   dprintk(dvb_usb_cxusb_debug, CXUSB_DBG_MISC, args)
-#define deb_i2c(args...)    dprintk(dvb_usb_cxusb_debug, CXUSB_DBG_I2C, args)
-
 enum cxusb_table_index {
 	MEDION_MD95700,
 	DVICO_BLUEBIRD_LG064F_COLD,
@@ -125,7 +122,7 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
 	cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
 
 	if (i != 0x01)
-		deb_info("gpio_write failed.\n");
+		dev_info(&d->udev->dev, "gpio_write failed.\n");
 
 	st->gpio_write_state[GPIO_TUNER] = onoff;
 	st->gpio_write_refresh[GPIO_TUNER] = false;
@@ -142,7 +139,7 @@ static int cxusb_bluebird_gpio_rw(struct dvb_usb_device *d, u8 changemask,
 
 	rc = cxusb_ctrl_msg(d, CMD_BLUEBIRD_GPIO_RW, o, 2, &gpio_state, 1);
 	if (rc < 0 || (gpio_state & changemask) != (newval & changemask))
-		deb_info("bluebird_gpio_write failed.\n");
+		dev_info(&d->udev->dev, "bluebird_gpio_write failed.\n");
 
 	return rc < 0 ? rc : gpio_state;
 }
@@ -174,7 +171,7 @@ static int cxusb_d680_dmb_gpio_tuner(struct dvb_usb_device *d,
 	if (i == 0x01)
 		return 0;
 
-	deb_info("gpio_write failed.\n");
+	dev_info(&d->udev->dev, "gpio_write failed.\n");
 	return -EIO;
 }
 
@@ -248,7 +245,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
 				break;
 
 			if (ibuf[0] != 0x08)
-				deb_i2c("i2c read may have failed\n");
+				dev_info(&d->udev->dev, "i2c read may have failed\n");
 
 			memcpy(msg[i + 1].buf, &ibuf[1], msg[i + 1].len);
 
@@ -271,7 +268,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
 					   2 + msg[i].len, &ibuf, 1) < 0)
 				break;
 			if (ibuf != 0x08)
-				deb_i2c("i2c write may have failed\n");
+				dev_info(&d->udev->dev, "i2c write may have failed\n");
 		}
 	}
 
@@ -299,7 +296,7 @@ static int _cxusb_power_ctrl(struct dvb_usb_device *d, int onoff)
 {
 	u8 b = 0;
 
-	deb_info("setting power %s\n", onoff ? "ON" : "OFF");
+	dev_info(&d->udev->dev, "setting power %s\n", onoff ? "ON" : "OFF");
 
 	if (onoff)
 		return cxusb_ctrl_msg(d, CMD_POWER_ON, &b, 1, NULL, 0);
@@ -318,7 +315,7 @@ static int cxusb_power_ctrl(struct dvb_usb_device *d, int onoff)
 		mutex_lock(&cxdev->open_lock);
 
 		if (cxdev->open_type == CXUSB_OPEN_ANALOG) {
-			deb_info("preventing DVB core from setting power OFF while we are in analog mode\n");
+			dev_info(&d->udev->dev, "preventing DVB core from setting power OFF while we are in analog mode\n");
 			ret = -EBUSY;
 			goto ret_unlock;
 		}
@@ -754,16 +751,16 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component,
 
 	switch (command) {
 	case XC2028_TUNER_RESET:
-		deb_info("%s: XC2028_TUNER_RESET %d\n", __func__, arg);
+		dev_info(&d->udev->dev, "XC2028_TUNER_RESET %d\n", arg);
 		cxusb_bluebird_gpio_pulse(d, 0x01, 1);
 		break;
 	case XC2028_RESET_CLK:
-		deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg);
+		dev_info(&d->udev->dev, "XC2028_RESET_CLK %d\n", arg);
 		break;
 	case XC2028_I2C_FLUSH:
 		break;
 	default:
-		deb_info("%s: unknown command %d, arg %d\n", __func__,
+		dev_info(&d->udev->dev, "unknown command %d, arg %d\n",
 			 command, arg);
 		return -EINVAL;
 	}
@@ -1444,7 +1441,7 @@ int cxusb_medion_get(struct dvb_usb_device *dvbdev,
 
 	if (cxdev->open_ctr == 0) {
 		if (cxdev->open_type != open_type) {
-			deb_info("will acquire and switch to %s\n",
+			dev_info(&dvbdev->udev->dev, "will acquire and switch to %s\n",
 				 open_type == CXUSB_OPEN_ANALOG ?
 				 "analog" : "digital");
 
@@ -1476,7 +1473,7 @@ int cxusb_medion_get(struct dvb_usb_device *dvbdev,
 
 			cxdev->open_type = open_type;
 		} else {
-			deb_info("reacquired idle %s\n",
+			dev_info(&dvbdev->udev->dev, "reacquired idle %s\n",
 				 open_type == CXUSB_OPEN_ANALOG ?
 				 "analog" : "digital");
 		}
@@ -1484,8 +1481,8 @@ int cxusb_medion_get(struct dvb_usb_device *dvbdev,
 		cxdev->open_ctr = 1;
 	} else if (cxdev->open_type == open_type) {
 		cxdev->open_ctr++;
-		deb_info("acquired %s\n", open_type == CXUSB_OPEN_ANALOG ?
-			 "analog" : "digital");
+		dev_info(&dvbdev->udev->dev, "acquired %s\n",
+			 open_type == CXUSB_OPEN_ANALOG ? "analog" : "digital");
 	} else {
 		ret = -EBUSY;
 	}
@@ -1511,7 +1508,7 @@ void cxusb_medion_put(struct dvb_usb_device *dvbdev)
 	if (!WARN_ON(cxdev->open_ctr < 1)) {
 		cxdev->open_ctr--;
 
-		deb_info("release %s\n",
+		dev_info(&dvbdev->udev->dev, "release %s\n",
 			 cxdev->open_type == CXUSB_OPEN_ANALOG ?
 			 "analog" : "digital");
 	}
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index ab7a100..4ef3fa9 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -3772,8 +3772,8 @@ static int xbox_one_attach(struct dvb_usb_adapter *adap)
 	info.addr = 0x18;
 	info.platform_data = &mn88472_config;
 	request_module(info.type);
-	client_demod = i2c_new_device(&d->i2c_adap, &info);
-	if (client_demod == NULL || client_demod->dev.driver == NULL)
+	client_demod = i2c_new_client_device(&d->i2c_adap, &info);
+	if (!i2c_client_has_driver(client_demod))
 		goto fail_demod_device;
 	if (!try_module_get(client_demod->dev.driver->owner))
 		goto fail_demod_module;
@@ -3800,8 +3800,8 @@ static int xbox_one_attach(struct dvb_usb_adapter *adap)
 	info.platform_data = &tda18250_config;
 
 	request_module(info.type);
-	client_tuner = i2c_new_device(&adap->dev->i2c_adap, &info);
-	if (client_tuner == NULL || client_tuner->dev.driver == NULL)
+	client_tuner = i2c_new_client_device(&adap->dev->i2c_adap, &info);
+	if (!i2c_client_has_driver(client_tuner))
 		goto fail_tuner_device;
 	if (!try_module_get(client_tuner->dev.driver->owner))
 		goto fail_tuner_module;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index dd5bb23..99a3933 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -230,18 +230,22 @@ static struct rc_map_table rc_map_digitv_table[] = {
 
 static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
 {
-	int i;
+	int ret, i;
 	u8 key[5];
 	u8 b[4] = { 0 };
 
 	*event = 0;
 	*state = REMOTE_NO_KEY_PRESSED;
 
-	digitv_ctrl_msg(d,USB_READ_REMOTE,0,NULL,0,&key[1],4);
+	ret = digitv_ctrl_msg(d, USB_READ_REMOTE, 0, NULL, 0, &key[1], 4);
+	if (ret)
+		return ret;
 
 	/* Tell the device we've read the remote. Not sure how necessary
 	   this is, but the Nebula SDK does it. */
-	digitv_ctrl_msg(d,USB_WRITE_REMOTE,0,b,4,NULL,0);
+	ret = digitv_ctrl_msg(d, USB_WRITE_REMOTE, 0, b, 4, NULL, 0);
+	if (ret)
+		return ret;
 
 	/* if something is inside the buffer, simulate key press */
 	if (key[1] != 0)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-urb.c b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
index c1b4e94..2aabf90 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
@@ -12,7 +12,7 @@
 int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
 	u16 rlen, int delay_ms)
 {
-	int actlen,ret = -ENOMEM;
+	int actlen = 0, ret = -ENOMEM;
 
 	if (!d || wbuf == NULL || wlen == 0)
 		return -EINVAL;
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index b960abd..8b58450 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1590,8 +1590,8 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
 	board_info.addr = 0x68;
 	board_info.platform_data = &m88ds3103_pdata;
 	request_module("m88ds3103");
-	client = i2c_new_device(&d->i2c_adap, &board_info);
-	if (client == NULL || client->dev.driver == NULL)
+	client = i2c_new_client_device(&d->i2c_adap, &board_info);
+	if (!i2c_client_has_driver(client))
 		return -ENODEV;
 	if (!try_module_get(client->dev.driver->owner)) {
 		i2c_unregister_device(client);
@@ -1609,9 +1609,9 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
 	board_info.addr = 0x60;
 	board_info.platform_data = &ts2020_config;
 	request_module("ts2020");
-	client = i2c_new_device(i2c_adapter, &board_info);
+	client = i2c_new_client_device(i2c_adapter, &board_info);
 
-	if (client == NULL || client->dev.driver == NULL) {
+	if (!i2c_client_has_driver(client)) {
 		dvb_frontend_detach(adap->fe_adap[0].fe);
 		return -ENODEV;
 	}
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index 80c1cf0..2baf572 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -96,10 +96,14 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
 
 static int vp7045_rc_query(struct dvb_usb_device *d)
 {
+	int ret;
 	u8 key;
-	vp7045_usb_op(d,RC_VAL_READ,NULL,0,&key,1,20);
 
-	deb_rc("remote query key: %x %d\n",key,key);
+	ret = vp7045_usb_op(d, RC_VAL_READ, NULL, 0, &key, 1, 20);
+	if (ret)
+		return ret;
+
+	deb_rc("remote query key: %x\n", key);
 
 	if (key != 0x44) {
 		/*
@@ -115,15 +119,18 @@ static int vp7045_rc_query(struct dvb_usb_device *d)
 
 static int vp7045_read_eeprom(struct dvb_usb_device *d,u8 *buf, int len, int offset)
 {
-	int i = 0;
-	u8 v,br[2];
+	int i, ret;
+	u8 v, br[2];
 	for (i=0; i < len; i++) {
 		v = offset + i;
-		vp7045_usb_op(d,GET_EE_VALUE,&v,1,br,2,5);
+		ret = vp7045_usb_op(d, GET_EE_VALUE, &v, 1, br, 2, 5);
+		if (ret)
+			return ret;
+
 		buf[i] = br[1];
 	}
-	deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ",offset, i);
-	debug_dump(buf,i,deb_info);
+	deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ", offset, i);
+	debug_dump(buf, i, deb_info);
 	return 0;
 }
 
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 79dfbb2..6833b5b 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -30,7 +30,6 @@
 #include <linux/spinlock.h>
 #include <linux/soundcard.h>
 #include <linux/slab.h>
-#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -192,28 +191,6 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
 	return 0;
 }
 
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
-					size_t size)
-{
-	struct em28xx *dev = snd_pcm_substream_chip(subs);
-	struct snd_pcm_runtime *runtime = subs->runtime;
-
-	dprintk("Allocating vbuffer\n");
-	if (runtime->dma_area) {
-		if (runtime->dma_bytes > size)
-			return 0;
-
-		vfree(runtime->dma_area);
-	}
-	runtime->dma_area = vmalloc(size);
-	if (!runtime->dma_area)
-		return -ENOMEM;
-
-	runtime->dma_bytes = size;
-
-	return 0;
-}
-
 static const struct snd_pcm_hardware snd_em28xx_hw_capture = {
 	.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
 		SNDRV_PCM_INFO_MMAP           |
@@ -341,63 +318,12 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
 	}
 
 	em28xx_audio_analog_set(dev);
-	if (substream->runtime->dma_area) {
-		dprintk("freeing\n");
-		vfree(substream->runtime->dma_area);
-		substream->runtime->dma_area = NULL;
-	}
 	mutex_unlock(&dev->lock);
 	kref_put(&dev->ref, em28xx_free_device);
 
 	return 0;
 }
 
-static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
-					struct snd_pcm_hw_params *hw_params)
-{
-	int ret;
-	struct em28xx *dev = snd_pcm_substream_chip(substream);
-
-	if (dev->disconnected)
-		return -ENODEV;
-
-	dprintk("Setting capture parameters\n");
-
-	ret = snd_pcm_alloc_vmalloc_buffer(substream,
-					   params_buffer_bytes(hw_params));
-	if (ret < 0)
-		return ret;
-#if 0
-	/*
-	 * TODO: set up em28xx audio chip to deliver the correct audio format,
-	 * current default is 48000hz multiplexed => 96000hz mono
-	 * which shouldn't matter since analogue TV only supports mono
-	 */
-	unsigned int channels, rate, format;
-
-	format = params_format(hw_params);
-	rate = params_rate(hw_params);
-	channels = params_channels(hw_params);
-#endif
-
-	return 0;
-}
-
-static int snd_em28xx_hw_capture_free(struct snd_pcm_substream *substream)
-{
-	struct em28xx *dev = snd_pcm_substream_chip(substream);
-	struct em28xx_audio *adev = &dev->adev;
-
-	dprintk("Stop capture, if needed\n");
-
-	if (atomic_read(&adev->stream_started) > 0) {
-		atomic_set(&adev->stream_started, 0);
-		schedule_work(&adev->wq_trigger);
-	}
-
-	return 0;
-}
-
 static int snd_em28xx_prepare(struct snd_pcm_substream *substream)
 {
 	struct em28xx *dev = snd_pcm_substream_chip(substream);
@@ -471,14 +397,6 @@ static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
 	return hwptr_done;
 }
 
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
-					     unsigned long offset)
-{
-	void *pageptr = subs->runtime->dma_area + offset;
-
-	return vmalloc_to_page(pageptr);
-}
-
 /*
  * AC97 volume control support
  */
@@ -708,13 +626,9 @@ static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
 static const struct snd_pcm_ops snd_em28xx_pcm_capture = {
 	.open      = snd_em28xx_capture_open,
 	.close     = snd_em28xx_pcm_close,
-	.ioctl     = snd_pcm_lib_ioctl,
-	.hw_params = snd_em28xx_hw_capture_params,
-	.hw_free   = snd_em28xx_hw_capture_free,
 	.prepare   = snd_em28xx_prepare,
 	.trigger   = snd_em28xx_capture_trigger,
 	.pointer   = snd_em28xx_capture_pointer,
-	.page      = snd_pcm_get_vmalloc_page,
 };
 
 static void em28xx_audio_free_urb(struct em28xx *dev)
@@ -936,6 +850,7 @@ static int em28xx_audio_init(struct em28xx *dev)
 		goto card_free;
 
 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture);
+	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
 	pcm->info_flags = 0;
 	pcm->private_data = dev;
 	strscpy(pcm->name, "Empia 28xx Capture", sizeof(pcm->name));
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 49e75a1..b9e4512 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -607,6 +607,7 @@ static int s2250_remove(struct i2c_client *client)
 {
 	struct s2250 *state = to_state(i2c_get_clientdata(client));
 
+	i2c_unregister_device(state->audio);
 	v4l2_device_unregister_subdev(&state->sd);
 	v4l2_ctrl_handler_free(&state->hdl);
 	kfree(state);
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index b05fa22..2ce85ab 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -9,7 +9,6 @@
 #include <linux/spinlock.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
-#include <linux/vmalloc.h>
 #include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/i2c.h>
@@ -100,16 +99,7 @@ static int go7007_snd_hw_params(struct snd_pcm_substream *substream,
 				struct snd_pcm_hw_params *hw_params)
 {
 	struct go7007 *go = snd_pcm_substream_chip(substream);
-	unsigned int bytes;
 
-	bytes = params_buffer_bytes(hw_params);
-	if (substream->runtime->dma_bytes > 0)
-		vfree(substream->runtime->dma_area);
-	substream->runtime->dma_bytes = 0;
-	substream->runtime->dma_area = vmalloc(bytes);
-	if (substream->runtime->dma_area == NULL)
-		return -ENOMEM;
-	substream->runtime->dma_bytes = bytes;
 	go->audio_deliver = parse_audio_stream_data;
 	return 0;
 }
@@ -119,9 +109,6 @@ static int go7007_snd_hw_free(struct snd_pcm_substream *substream)
 	struct go7007 *go = snd_pcm_substream_chip(substream);
 
 	go->audio_deliver = NULL;
-	if (substream->runtime->dma_bytes > 0)
-		vfree(substream->runtime->dma_area);
-	substream->runtime->dma_bytes = 0;
 	return 0;
 }
 
@@ -185,22 +172,14 @@ static snd_pcm_uframes_t go7007_snd_pcm_pointer(struct snd_pcm_substream *substr
 	return gosnd->hw_ptr;
 }
 
-static struct page *go7007_snd_pcm_page(struct snd_pcm_substream *substream,
-					unsigned long offset)
-{
-	return vmalloc_to_page(substream->runtime->dma_area + offset);
-}
-
 static const struct snd_pcm_ops go7007_snd_capture_ops = {
 	.open		= go7007_snd_capture_open,
 	.close		= go7007_snd_capture_close,
-	.ioctl		= snd_pcm_lib_ioctl,
 	.hw_params	= go7007_snd_hw_params,
 	.hw_free	= go7007_snd_hw_free,
 	.prepare	= go7007_snd_pcm_prepare,
 	.trigger	= go7007_snd_pcm_trigger,
 	.pointer	= go7007_snd_pcm_pointer,
-	.page		= go7007_snd_pcm_page,
 };
 
 static int go7007_snd_free(struct snd_device *device)
@@ -236,22 +215,18 @@ int go7007_snd_init(struct go7007 *go)
 	gosnd->capturing = 0;
 	ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0,
 			   &gosnd->card);
-	if (ret < 0) {
-		kfree(gosnd);
-		return ret;
-	}
+	if (ret < 0)
+		goto free_snd;
+
 	ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go,
 			&go7007_snd_device_ops);
-	if (ret < 0) {
-		kfree(gosnd);
-		return ret;
-	}
+	if (ret < 0)
+		goto free_card;
+
 	ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
-	if (ret < 0) {
-		snd_card_free(gosnd->card);
-		kfree(gosnd);
-		return ret;
-	}
+	if (ret < 0)
+		goto free_card;
+
 	strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver));
 	strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->shortname));
 	strscpy(gosnd->card->longname, gosnd->card->shortname,
@@ -260,13 +235,12 @@ int go7007_snd_init(struct go7007 *go)
 	gosnd->pcm->private_data = go;
 	snd_pcm_set_ops(gosnd->pcm, SNDRV_PCM_STREAM_CAPTURE,
 			&go7007_snd_capture_ops);
+	snd_pcm_set_managed_buffer_all(gosnd->pcm, SNDRV_DMA_TYPE_VMALLOC,
+				       NULL, 0, 0);
 
 	ret = snd_card_register(gosnd->card);
-	if (ret < 0) {
-		snd_card_free(gosnd->card);
-		kfree(gosnd);
-		return ret;
-	}
+	if (ret < 0)
+		goto free_card;
 
 	gosnd->substream = NULL;
 	go->snd_context = gosnd;
@@ -274,6 +248,12 @@ int go7007_snd_init(struct go7007 *go)
 	++dev;
 
 	return 0;
+
+free_card:
+	snd_card_free(gosnd->card);
+free_snd:
+	kfree(gosnd);
+	return ret;
 }
 EXPORT_SYMBOL(go7007_snd_init);
 
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 4add2b1..c1b307b 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1461,7 +1461,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
 		pr_err("couldn't kzalloc gspca struct\n");
 		return -ENOMEM;
 	}
-	gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL);
+	gspca_dev->usb_buf = kzalloc(USB_BUF_SZ, GFP_KERNEL);
 	if (!gspca_dev->usb_buf) {
 		pr_err("out of memory\n");
 		ret = -ENOMEM;
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index 5960955..afda438 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -49,7 +49,7 @@ static int debug;
 static int persistent_config;
 module_param(debug, int, 0644);
 module_param(persistent_config, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-1)");
+MODULE_PARM_DESC(debug, "debug level (0-2)");
 MODULE_PARM_DESC(persistent_config, "read config from persistent memory (0-1)");
 
 enum pulse8_msgcodes {
@@ -100,6 +100,61 @@ enum pulse8_msgcodes {
 	MSGCODE_FRAME_ACK = 0x40,
 };
 
+static const char * const pulse8_msgnames[] = {
+	"NOTHING",
+	"PING",
+	"TIMEOUT_ERROR",
+	"HIGH_ERROR",
+	"LOW_ERROR",
+	"FRAME_START",
+	"FRAME_DATA",
+	"RECEIVE_FAILED",
+	"COMMAND_ACCEPTED",
+	"COMMAND_REJECTED",
+	"SET_ACK_MASK",
+	"TRANSMIT",
+	"TRANSMIT_EOM",
+	"TRANSMIT_IDLETIME",
+	"TRANSMIT_ACK_POLARITY",
+	"TRANSMIT_LINE_TIMEOUT",
+	"TRANSMIT_SUCCEEDED",
+	"TRANSMIT_FAILED_LINE",
+	"TRANSMIT_FAILED_ACK",
+	"TRANSMIT_FAILED_TIMEOUT_DATA",
+	"TRANSMIT_FAILED_TIMEOUT_LINE",
+	"FIRMWARE_VERSION",
+	"START_BOOTLOADER",
+	"GET_BUILDDATE",
+	"SET_CONTROLLED",
+	"GET_AUTO_ENABLED",
+	"SET_AUTO_ENABLED",
+	"GET_DEFAULT_LOGICAL_ADDRESS",
+	"SET_DEFAULT_LOGICAL_ADDRESS",
+	"GET_LOGICAL_ADDRESS_MASK",
+	"SET_LOGICAL_ADDRESS_MASK",
+	"GET_PHYSICAL_ADDRESS",
+	"SET_PHYSICAL_ADDRESS",
+	"GET_DEVICE_TYPE",
+	"SET_DEVICE_TYPE",
+	"GET_HDMI_VERSION",
+	"SET_HDMI_VERSION",
+	"GET_OSD_NAME",
+	"SET_OSD_NAME",
+	"WRITE_EEPROM",
+	"GET_ADAPTER_TYPE",
+	"SET_ACTIVE_SOURCE",
+};
+
+static const char *pulse8_msgname(u8 cmd)
+{
+	static char unknown_msg[5];
+
+	if ((cmd & 0x3f) < ARRAY_SIZE(pulse8_msgnames))
+		return pulse8_msgnames[cmd & 0x3f];
+	snprintf(unknown_msg, sizeof(unknown_msg), "0x%02x", cmd);
+	return unknown_msg;
+}
+
 #define MSGSTART	0xff
 #define MSGEND		0xfe
 #define MSGESC		0xfd
@@ -109,147 +164,45 @@ enum pulse8_msgcodes {
 
 #define PING_PERIOD	(15 * HZ)
 
+#define NUM_MSGS 8
+
 struct pulse8 {
 	struct device *dev;
 	struct serio *serio;
 	struct cec_adapter *adap;
 	unsigned int vers;
-	struct completion cmd_done;
-	struct work_struct work;
-	u8 work_result;
+
 	struct delayed_work ping_eeprom_work;
-	struct cec_msg rx_msg;
+
+	struct work_struct irq_work;
+	struct cec_msg rx_msg[NUM_MSGS];
+	unsigned int rx_msg_cur_idx, rx_msg_num;
+	/* protect rx_msg_cur_idx and rx_msg_num */
+	spinlock_t msg_lock;
+	u8 new_rx_msg[CEC_MAX_MSG_SIZE];
+	u8 new_rx_msg_len;
+
+	struct work_struct tx_work;
+	u32 tx_done_status;
+	u32 tx_signal_free_time;
+	struct cec_msg tx_msg;
+	bool tx_msg_is_bcast;
+
+	struct completion cmd_done;
 	u8 data[DATA_SIZE];
 	unsigned int len;
 	u8 buf[DATA_SIZE];
 	unsigned int idx;
 	bool escape;
 	bool started;
-	struct mutex config_lock;
-	struct mutex write_lock;
+
+	/* locks access to the adapter */
+	struct mutex lock;
 	bool config_pending;
 	bool restoring_config;
 	bool autonomous;
 };
 
-static void pulse8_ping_eeprom_work_handler(struct work_struct *work);
-
-static void pulse8_irq_work_handler(struct work_struct *work)
-{
-	struct pulse8 *pulse8 =
-		container_of(work, struct pulse8, work);
-	u8 result = pulse8->work_result;
-
-	pulse8->work_result = 0;
-	switch (result & 0x3f) {
-	case MSGCODE_FRAME_DATA:
-		cec_received_msg(pulse8->adap, &pulse8->rx_msg);
-		break;
-	case MSGCODE_TRANSMIT_SUCCEEDED:
-		cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_OK);
-		break;
-	case MSGCODE_TRANSMIT_FAILED_ACK:
-		cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_NACK);
-		break;
-	case MSGCODE_TRANSMIT_FAILED_LINE:
-	case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
-	case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
-		cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_ERROR);
-		break;
-	}
-}
-
-static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
-				    unsigned int flags)
-{
-	struct pulse8 *pulse8 = serio_get_drvdata(serio);
-
-	if (!pulse8->started && data != MSGSTART)
-		return IRQ_HANDLED;
-	if (data == MSGESC) {
-		pulse8->escape = true;
-		return IRQ_HANDLED;
-	}
-	if (pulse8->escape) {
-		data += MSGOFFSET;
-		pulse8->escape = false;
-	} else if (data == MSGEND) {
-		struct cec_msg *msg = &pulse8->rx_msg;
-		u8 msgcode = pulse8->buf[0];
-
-		if (debug)
-			dev_info(pulse8->dev, "received: %*ph\n",
-				 pulse8->idx, pulse8->buf);
-		switch (msgcode & 0x3f) {
-		case MSGCODE_FRAME_START:
-			msg->len = 1;
-			msg->msg[0] = pulse8->buf[1];
-			break;
-		case MSGCODE_FRAME_DATA:
-			if (msg->len == CEC_MAX_MSG_SIZE)
-				break;
-			msg->msg[msg->len++] = pulse8->buf[1];
-			if (msgcode & MSGCODE_FRAME_EOM) {
-				WARN_ON(pulse8->work_result);
-				pulse8->work_result = msgcode;
-				schedule_work(&pulse8->work);
-				break;
-			}
-			break;
-		case MSGCODE_TRANSMIT_SUCCEEDED:
-		case MSGCODE_TRANSMIT_FAILED_LINE:
-		case MSGCODE_TRANSMIT_FAILED_ACK:
-		case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
-		case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
-			WARN_ON(pulse8->work_result);
-			pulse8->work_result = msgcode;
-			schedule_work(&pulse8->work);
-			break;
-		case MSGCODE_HIGH_ERROR:
-		case MSGCODE_LOW_ERROR:
-		case MSGCODE_RECEIVE_FAILED:
-		case MSGCODE_TIMEOUT_ERROR:
-			break;
-		case MSGCODE_COMMAND_ACCEPTED:
-		case MSGCODE_COMMAND_REJECTED:
-		default:
-			if (pulse8->idx == 0)
-				break;
-			memcpy(pulse8->data, pulse8->buf, pulse8->idx);
-			pulse8->len = pulse8->idx;
-			complete(&pulse8->cmd_done);
-			break;
-		}
-		pulse8->idx = 0;
-		pulse8->started = false;
-		return IRQ_HANDLED;
-	} else if (data == MSGSTART) {
-		pulse8->idx = 0;
-		pulse8->started = true;
-		return IRQ_HANDLED;
-	}
-
-	if (pulse8->idx >= DATA_SIZE) {
-		dev_dbg(pulse8->dev,
-			"throwing away %d bytes of garbage\n", pulse8->idx);
-		pulse8->idx = 0;
-	}
-	pulse8->buf[pulse8->idx++] = data;
-	return IRQ_HANDLED;
-}
-
-static void pulse8_disconnect(struct serio *serio)
-{
-	struct pulse8 *pulse8 = serio_get_drvdata(serio);
-
-	cec_unregister_adapter(pulse8->adap);
-	cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
-	dev_info(&serio->dev, "disconnected\n");
-	serio_close(serio);
-	serio_set_drvdata(serio, NULL);
-	kfree(pulse8);
-}
-
 static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len)
 {
 	int err = 0;
@@ -278,7 +231,9 @@ static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
 {
 	int err;
 
-	/*dev_info(pulse8->dev, "transmit: %*ph\n", cmd_len, cmd);*/
+	if (debug > 1)
+		dev_info(pulse8->dev, "transmit %s: %*ph\n",
+			 pulse8_msgname(cmd[0]), cmd_len, cmd);
 	init_completion(&pulse8->cmd_done);
 
 	err = pulse8_send(pulse8->serio, cmd, cmd_len);
@@ -294,8 +249,9 @@ static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
 		return -ENOTTY;
 	if (response &&
 	    ((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
-		dev_info(pulse8->dev, "transmit: failed %02x\n",
-			 pulse8->data[0] & 0x3f);
+		dev_info(pulse8->dev, "transmit %s failed with %s\n",
+			 pulse8_msgname(cmd[0]),
+			 pulse8_msgname(pulse8->data[0]));
 		return -EIO;
 	}
 	return 0;
@@ -307,23 +263,395 @@ static int pulse8_send_and_wait(struct pulse8 *pulse8,
 	u8 cmd_sc[2];
 	int err;
 
-	mutex_lock(&pulse8->write_lock);
 	err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size);
+	if (err != -ENOTTY)
+		return err;
 
-	if (err == -ENOTTY) {
-		cmd_sc[0] = MSGCODE_SET_CONTROLLED;
-		cmd_sc[1] = 1;
-		err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
-						MSGCODE_COMMAND_ACCEPTED, 1);
-		if (err)
-			goto unlock;
+	cmd_sc[0] = MSGCODE_SET_CONTROLLED;
+	cmd_sc[1] = 1;
+	err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
+					MSGCODE_COMMAND_ACCEPTED, 1);
+	if (!err)
 		err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len,
 						response, size);
+	return err == -ENOTTY ? -EIO : err;
+}
+
+static void pulse8_tx_work_handler(struct work_struct *work)
+{
+	struct pulse8 *pulse8 = container_of(work, struct pulse8, tx_work);
+	struct cec_msg *msg = &pulse8->tx_msg;
+	unsigned int i;
+	u8 cmd[2];
+	int err;
+
+	if (msg->len == 0)
+		return;
+
+	mutex_lock(&pulse8->lock);
+	cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
+	cmd[1] = pulse8->tx_signal_free_time;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 1);
+	cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
+	cmd[1] = cec_msg_is_broadcast(msg);
+	pulse8->tx_msg_is_bcast = cec_msg_is_broadcast(msg);
+	if (!err)
+		err = pulse8_send_and_wait(pulse8, cmd, 2,
+					   MSGCODE_COMMAND_ACCEPTED, 1);
+	cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
+	cmd[1] = msg->msg[0];
+	if (!err)
+		err = pulse8_send_and_wait(pulse8, cmd, 2,
+					   MSGCODE_COMMAND_ACCEPTED, 1);
+	if (!err && msg->len > 1) {
+		for (i = 1; !err && i < msg->len; i++) {
+			cmd[0] = ((i == msg->len - 1)) ?
+				MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
+			cmd[1] = msg->msg[i];
+			err = pulse8_send_and_wait(pulse8, cmd, 2,
+						   MSGCODE_COMMAND_ACCEPTED, 1);
+		}
+	}
+	if (err && debug)
+		dev_info(pulse8->dev, "%s(0x%02x) failed with error %d for msg %*ph\n",
+			 pulse8_msgname(cmd[0]), cmd[1],
+			 err, msg->len, msg->msg);
+	msg->len = 0;
+	mutex_unlock(&pulse8->lock);
+	if (err)
+		cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_ERROR);
+}
+
+static void pulse8_irq_work_handler(struct work_struct *work)
+{
+	struct pulse8 *pulse8 =
+		container_of(work, struct pulse8, irq_work);
+	unsigned long flags;
+	u32 status;
+
+	spin_lock_irqsave(&pulse8->msg_lock, flags);
+	while (pulse8->rx_msg_num) {
+		spin_unlock_irqrestore(&pulse8->msg_lock, flags);
+		if (debug)
+			dev_info(pulse8->dev, "adap received %*ph\n",
+				 pulse8->rx_msg[pulse8->rx_msg_cur_idx].len,
+				 pulse8->rx_msg[pulse8->rx_msg_cur_idx].msg);
+		cec_received_msg(pulse8->adap,
+				 &pulse8->rx_msg[pulse8->rx_msg_cur_idx]);
+		spin_lock_irqsave(&pulse8->msg_lock, flags);
+		if (pulse8->rx_msg_num)
+			pulse8->rx_msg_num--;
+		pulse8->rx_msg_cur_idx =
+			(pulse8->rx_msg_cur_idx + 1) % NUM_MSGS;
+	}
+	spin_unlock_irqrestore(&pulse8->msg_lock, flags);
+
+	mutex_lock(&pulse8->lock);
+	status = pulse8->tx_done_status;
+	pulse8->tx_done_status = 0;
+	mutex_unlock(&pulse8->lock);
+	if (status)
+		cec_transmit_attempt_done(pulse8->adap, status);
+}
+
+static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
+				    unsigned int flags)
+{
+	struct pulse8 *pulse8 = serio_get_drvdata(serio);
+	unsigned long irq_flags;
+	unsigned int idx;
+
+	if (!pulse8->started && data != MSGSTART)
+		return IRQ_HANDLED;
+	if (data == MSGESC) {
+		pulse8->escape = true;
+		return IRQ_HANDLED;
+	}
+	if (pulse8->escape) {
+		data += MSGOFFSET;
+		pulse8->escape = false;
+	} else if (data == MSGEND) {
+		u8 msgcode = pulse8->buf[0];
+
+		if (debug > 1)
+			dev_info(pulse8->dev, "received %s: %*ph\n",
+				 pulse8_msgname(msgcode),
+				 pulse8->idx, pulse8->buf);
+		switch (msgcode & 0x3f) {
+		case MSGCODE_FRAME_START:
+			/*
+			 * Test if we are receiving a new msg when a previous
+			 * message is still pending.
+			 */
+			if (!(msgcode & MSGCODE_FRAME_EOM)) {
+				pulse8->new_rx_msg_len = 1;
+				pulse8->new_rx_msg[0] = pulse8->buf[1];
+				break;
+			}
+			/* fall through */
+		case MSGCODE_FRAME_DATA:
+			if (pulse8->new_rx_msg_len < CEC_MAX_MSG_SIZE)
+				pulse8->new_rx_msg[pulse8->new_rx_msg_len++] =
+					pulse8->buf[1];
+			if (!(msgcode & MSGCODE_FRAME_EOM))
+				break;
+
+			spin_lock_irqsave(&pulse8->msg_lock, irq_flags);
+			idx = (pulse8->rx_msg_cur_idx + pulse8->rx_msg_num) %
+				NUM_MSGS;
+			if (pulse8->rx_msg_num == NUM_MSGS) {
+				dev_warn(pulse8->dev,
+					 "message queue is full, dropping %*ph\n",
+					 pulse8->new_rx_msg_len,
+					 pulse8->new_rx_msg);
+				spin_unlock_irqrestore(&pulse8->msg_lock,
+						       irq_flags);
+				pulse8->new_rx_msg_len = 0;
+				break;
+			}
+			pulse8->rx_msg_num++;
+			memcpy(pulse8->rx_msg[idx].msg, pulse8->new_rx_msg,
+			       pulse8->new_rx_msg_len);
+			pulse8->rx_msg[idx].len = pulse8->new_rx_msg_len;
+			spin_unlock_irqrestore(&pulse8->msg_lock, irq_flags);
+			schedule_work(&pulse8->irq_work);
+			pulse8->new_rx_msg_len = 0;
+			break;
+		case MSGCODE_TRANSMIT_SUCCEEDED:
+			WARN_ON(pulse8->tx_done_status);
+			pulse8->tx_done_status = CEC_TX_STATUS_OK;
+			schedule_work(&pulse8->irq_work);
+			break;
+		case MSGCODE_TRANSMIT_FAILED_ACK:
+			/*
+			 * A NACK for a broadcast message makes no sense, these
+			 * seem to be spurious messages and are skipped.
+			 */
+			if (pulse8->tx_msg_is_bcast)
+				break;
+			WARN_ON(pulse8->tx_done_status);
+			pulse8->tx_done_status = CEC_TX_STATUS_NACK;
+			schedule_work(&pulse8->irq_work);
+			break;
+		case MSGCODE_TRANSMIT_FAILED_LINE:
+		case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
+		case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+			WARN_ON(pulse8->tx_done_status);
+			pulse8->tx_done_status = CEC_TX_STATUS_ERROR;
+			schedule_work(&pulse8->irq_work);
+			break;
+		case MSGCODE_HIGH_ERROR:
+		case MSGCODE_LOW_ERROR:
+		case MSGCODE_RECEIVE_FAILED:
+		case MSGCODE_TIMEOUT_ERROR:
+			pulse8->new_rx_msg_len = 0;
+			break;
+		case MSGCODE_COMMAND_ACCEPTED:
+		case MSGCODE_COMMAND_REJECTED:
+		default:
+			if (pulse8->idx == 0)
+				break;
+			memcpy(pulse8->data, pulse8->buf, pulse8->idx);
+			pulse8->len = pulse8->idx;
+			complete(&pulse8->cmd_done);
+			break;
+		}
+		pulse8->idx = 0;
+		pulse8->started = false;
+		return IRQ_HANDLED;
+	} else if (data == MSGSTART) {
+		pulse8->idx = 0;
+		pulse8->started = true;
+		return IRQ_HANDLED;
+	}
+
+	if (pulse8->idx >= DATA_SIZE) {
+		dev_dbg(pulse8->dev,
+			"throwing away %d bytes of garbage\n", pulse8->idx);
+		pulse8->idx = 0;
+	}
+	pulse8->buf[pulse8->idx++] = data;
+	return IRQ_HANDLED;
+}
+
+static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+	struct pulse8 *pulse8 = cec_get_drvdata(adap);
+	u8 cmd[16];
+	int err;
+
+	mutex_lock(&pulse8->lock);
+	cmd[0] = MSGCODE_SET_CONTROLLED;
+	cmd[1] = enable;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 1);
+	if (!enable) {
+		pulse8->rx_msg_num = 0;
+		pulse8->tx_done_status = 0;
+	}
+	mutex_unlock(&pulse8->lock);
+	return enable ? err : 0;
+}
+
+static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+	struct pulse8 *pulse8 = cec_get_drvdata(adap);
+	u16 mask = 0;
+	u16 pa = adap->phys_addr;
+	u8 cmd[16];
+	int err = 0;
+
+	mutex_lock(&pulse8->lock);
+	if (log_addr != CEC_LOG_ADDR_INVALID)
+		mask = 1 << log_addr;
+	cmd[0] = MSGCODE_SET_ACK_MASK;
+	cmd[1] = mask >> 8;
+	cmd[2] = mask & 0xff;
+	err = pulse8_send_and_wait(pulse8, cmd, 3,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if ((err && mask != 0) || pulse8->restoring_config)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_AUTO_ENABLED;
+	cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+	pulse8->autonomous = cmd[1];
+	if (log_addr == CEC_LOG_ADDR_INVALID)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_DEVICE_TYPE;
+	cmd[1] = adap->log_addrs.primary_device_type[0];
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	switch (adap->log_addrs.primary_device_type[0]) {
+	case CEC_OP_PRIM_DEVTYPE_TV:
+		mask = CEC_LOG_ADDR_MASK_TV;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_RECORD:
+		mask = CEC_LOG_ADDR_MASK_RECORD;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_TUNER:
+		mask = CEC_LOG_ADDR_MASK_TUNER;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
+		mask = CEC_LOG_ADDR_MASK_PLAYBACK;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
+		mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_SWITCH:
+		mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
+		mask = CEC_LOG_ADDR_MASK_SPECIFIC;
+		break;
+	default:
+		mask = 0;
+		break;
+	}
+	cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
+	cmd[1] = mask >> 8;
+	cmd[2] = mask & 0xff;
+	err = pulse8_send_and_wait(pulse8, cmd, 3,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
+	cmd[1] = log_addr;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
+	cmd[1] = pa >> 8;
+	cmd[2] = pa & 0xff;
+	err = pulse8_send_and_wait(pulse8, cmd, 3,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_HDMI_VERSION;
+	cmd[1] = adap->log_addrs.cec_version;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	if (adap->log_addrs.osd_name[0]) {
+		size_t osd_len = strlen(adap->log_addrs.osd_name);
+		char *osd_str = cmd + 1;
+
+		cmd[0] = MSGCODE_SET_OSD_NAME;
+		strscpy(cmd + 1, adap->log_addrs.osd_name, sizeof(cmd) - 1);
+		if (osd_len < 4) {
+			memset(osd_str + osd_len, ' ', 4 - osd_len);
+			osd_len = 4;
+			osd_str[osd_len] = '\0';
+			strscpy(adap->log_addrs.osd_name, osd_str,
+				sizeof(adap->log_addrs.osd_name));
+		}
+		err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
+					   MSGCODE_COMMAND_ACCEPTED, 0);
+		if (err)
+			goto unlock;
 	}
 
 unlock:
-	mutex_unlock(&pulse8->write_lock);
-	return err == -ENOTTY ? -EIO : err;
+	if (pulse8->restoring_config)
+		pulse8->restoring_config = false;
+	else
+		pulse8->config_pending = true;
+	mutex_unlock(&pulse8->lock);
+	return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err;
+}
+
+static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+				    u32 signal_free_time, struct cec_msg *msg)
+{
+	struct pulse8 *pulse8 = cec_get_drvdata(adap);
+
+	pulse8->tx_msg = *msg;
+	if (debug)
+		dev_info(pulse8->dev, "adap transmit %*ph\n",
+			 msg->len, msg->msg);
+	pulse8->tx_signal_free_time = signal_free_time;
+	schedule_work(&pulse8->tx_work);
+	return 0;
+}
+
+static void pulse8_cec_adap_free(struct cec_adapter *adap)
+{
+	struct pulse8 *pulse8 = cec_get_drvdata(adap);
+
+	cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
+	cancel_work_sync(&pulse8->irq_work);
+	cancel_work_sync(&pulse8->tx_work);
+	serio_close(pulse8->serio);
+	serio_set_drvdata(pulse8->serio, NULL);
+	kfree(pulse8);
+}
+
+static const struct cec_adap_ops pulse8_cec_adap_ops = {
+	.adap_enable = pulse8_cec_adap_enable,
+	.adap_log_addr = pulse8_cec_adap_log_addr,
+	.adap_transmit = pulse8_cec_adap_transmit,
+	.adap_free = pulse8_cec_adap_free,
+};
+
+static void pulse8_disconnect(struct serio *serio)
+{
+	struct pulse8 *pulse8 = serio_get_drvdata(serio);
+
+	cec_unregister_adapter(pulse8->adap);
 }
 
 static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
@@ -460,191 +788,34 @@ static int pulse8_apply_persistent_config(struct pulse8 *pulse8,
 	return 0;
 }
 
-static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
+static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
 {
-	struct pulse8 *pulse8 = cec_get_drvdata(adap);
-	u8 cmd[16];
-	int err;
+	struct pulse8 *pulse8 =
+		container_of(work, struct pulse8, ping_eeprom_work.work);
+	u8 cmd;
 
-	cmd[0] = MSGCODE_SET_CONTROLLED;
-	cmd[1] = enable;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 1);
-	return enable ? err : 0;
-}
+	mutex_lock(&pulse8->lock);
+	cmd = MSGCODE_PING;
+	pulse8_send_and_wait(pulse8, &cmd, 1,
+			     MSGCODE_COMMAND_ACCEPTED, 0);
 
-static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
-{
-	struct pulse8 *pulse8 = cec_get_drvdata(adap);
-	u16 mask = 0;
-	u16 pa = adap->phys_addr;
-	u8 cmd[16];
-	int err = 0;
-
-	mutex_lock(&pulse8->config_lock);
-	if (log_addr != CEC_LOG_ADDR_INVALID)
-		mask = 1 << log_addr;
-	cmd[0] = MSGCODE_SET_ACK_MASK;
-	cmd[1] = mask >> 8;
-	cmd[2] = mask & 0xff;
-	err = pulse8_send_and_wait(pulse8, cmd, 3,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if ((err && mask != 0) || pulse8->restoring_config)
+	if (pulse8->vers < 2)
 		goto unlock;
 
-	cmd[0] = MSGCODE_SET_AUTO_ENABLED;
-	cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-	pulse8->autonomous = cmd[1];
-	if (log_addr == CEC_LOG_ADDR_INVALID)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_DEVICE_TYPE;
-	cmd[1] = adap->log_addrs.primary_device_type[0];
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	switch (adap->log_addrs.primary_device_type[0]) {
-	case CEC_OP_PRIM_DEVTYPE_TV:
-		mask = CEC_LOG_ADDR_MASK_TV;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_RECORD:
-		mask = CEC_LOG_ADDR_MASK_RECORD;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_TUNER:
-		mask = CEC_LOG_ADDR_MASK_TUNER;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
-		mask = CEC_LOG_ADDR_MASK_PLAYBACK;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
-		mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_SWITCH:
-		mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
-		mask = CEC_LOG_ADDR_MASK_SPECIFIC;
-		break;
-	default:
-		mask = 0;
-		break;
+	if (pulse8->config_pending && persistent_config) {
+		dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
+		cmd = MSGCODE_WRITE_EEPROM;
+		if (pulse8_send_and_wait(pulse8, &cmd, 1,
+					 MSGCODE_COMMAND_ACCEPTED, 0))
+			dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
+		else
+			pulse8->config_pending = false;
 	}
-	cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
-	cmd[1] = mask >> 8;
-	cmd[2] = mask & 0xff;
-	err = pulse8_send_and_wait(pulse8, cmd, 3,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
-	cmd[1] = log_addr;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
-	cmd[1] = pa >> 8;
-	cmd[2] = pa & 0xff;
-	err = pulse8_send_and_wait(pulse8, cmd, 3,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_HDMI_VERSION;
-	cmd[1] = adap->log_addrs.cec_version;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	if (adap->log_addrs.osd_name[0]) {
-		size_t osd_len = strlen(adap->log_addrs.osd_name);
-		char *osd_str = cmd + 1;
-
-		cmd[0] = MSGCODE_SET_OSD_NAME;
-		strscpy(cmd + 1, adap->log_addrs.osd_name, sizeof(cmd) - 1);
-		if (osd_len < 4) {
-			memset(osd_str + osd_len, ' ', 4 - osd_len);
-			osd_len = 4;
-			osd_str[osd_len] = '\0';
-			strscpy(adap->log_addrs.osd_name, osd_str,
-				sizeof(adap->log_addrs.osd_name));
-		}
-		err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
-					   MSGCODE_COMMAND_ACCEPTED, 0);
-		if (err)
-			goto unlock;
-	}
-
 unlock:
-	if (pulse8->restoring_config)
-		pulse8->restoring_config = false;
-	else
-		pulse8->config_pending = true;
-	mutex_unlock(&pulse8->config_lock);
-	return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err;
+	schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
+	mutex_unlock(&pulse8->lock);
 }
 
-static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
-				    u32 signal_free_time, struct cec_msg *msg)
-{
-	struct pulse8 *pulse8 = cec_get_drvdata(adap);
-	u8 cmd[2];
-	unsigned int i;
-	int err;
-
-	cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
-	cmd[1] = signal_free_time;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 1);
-	cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
-	cmd[1] = cec_msg_is_broadcast(msg);
-	if (!err)
-		err = pulse8_send_and_wait(pulse8, cmd, 2,
-					   MSGCODE_COMMAND_ACCEPTED, 1);
-	cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
-	cmd[1] = msg->msg[0];
-	if (!err)
-		err = pulse8_send_and_wait(pulse8, cmd, 2,
-					   MSGCODE_COMMAND_ACCEPTED, 1);
-	if (!err && msg->len > 1) {
-		cmd[0] = msg->len == 2 ? MSGCODE_TRANSMIT_EOM :
-					 MSGCODE_TRANSMIT;
-		cmd[1] = msg->msg[1];
-		err = pulse8_send_and_wait(pulse8, cmd, 2,
-					   MSGCODE_COMMAND_ACCEPTED, 1);
-		for (i = 0; !err && i + 2 < msg->len; i++) {
-			cmd[0] = (i + 2 == msg->len - 1) ?
-				MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
-			cmd[1] = msg->msg[i + 2];
-			err = pulse8_send_and_wait(pulse8, cmd, 2,
-						   MSGCODE_COMMAND_ACCEPTED, 1);
-		}
-	}
-
-	return err;
-}
-
-static int pulse8_received(struct cec_adapter *adap, struct cec_msg *msg)
-{
-	return -ENOMSG;
-}
-
-static const struct cec_adap_ops pulse8_cec_adap_ops = {
-	.adap_enable = pulse8_cec_adap_enable,
-	.adap_log_addr = pulse8_cec_adap_log_addr,
-	.adap_transmit = pulse8_cec_adap_transmit,
-	.received = pulse8_received,
-};
-
 static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
 {
 	u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_CAP_MONITOR_ALL;
@@ -667,9 +838,10 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
 
 	pulse8->dev = &serio->dev;
 	serio_set_drvdata(serio, pulse8);
-	INIT_WORK(&pulse8->work, pulse8_irq_work_handler);
-	mutex_init(&pulse8->write_lock);
-	mutex_init(&pulse8->config_lock);
+	INIT_WORK(&pulse8->irq_work, pulse8_irq_work_handler);
+	INIT_WORK(&pulse8->tx_work, pulse8_tx_work_handler);
+	mutex_init(&pulse8->lock);
+	spin_lock_init(&pulse8->msg_lock);
 	pulse8->config_pending = false;
 
 	err = serio_open(serio, drv);
@@ -709,33 +881,6 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
 	return err;
 }
 
-static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
-{
-	struct pulse8 *pulse8 =
-		container_of(work, struct pulse8, ping_eeprom_work.work);
-	u8 cmd;
-
-	schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
-	cmd = MSGCODE_PING;
-	pulse8_send_and_wait(pulse8, &cmd, 1,
-			     MSGCODE_COMMAND_ACCEPTED, 0);
-
-	if (pulse8->vers < 2)
-		return;
-
-	mutex_lock(&pulse8->config_lock);
-	if (pulse8->config_pending && persistent_config) {
-		dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
-		cmd = MSGCODE_WRITE_EEPROM;
-		if (pulse8_send_and_wait(pulse8, &cmd, 1,
-					 MSGCODE_COMMAND_ACCEPTED, 0))
-			dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
-		else
-			pulse8->config_pending = false;
-	}
-	mutex_unlock(&pulse8->config_lock);
-}
-
 static const struct serio_device_id pulse8_serio_ids[] = {
 	{
 		.type	= SERIO_RS232,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index fb3178d..f6005d1 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -284,8 +284,8 @@ rdData[0]);
 		wrData[0] = 0x0;
 		ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,1);
 		break;
-
-	}; LOCK_GIVE(hdw->ctl_lock);
+	}
+	LOCK_GIVE(hdw->ctl_lock);
 
 	return ret;
 }
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 21f90a8..b22501f 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -1125,7 +1125,7 @@ static int stk_vidioc_dqbuf(struct file *filp,
 	sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
 	sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
 	sbuf->v4lbuf.sequence = ++dev->sequence;
-	sbuf->v4lbuf.timestamp = ns_to_timeval(ktime_get_ns());
+	v4l2_buffer_set_timestamp(&sbuf->v4lbuf, ktime_get_ns());
 
 	*buf = sbuf->v4lbuf;
 	return 0;
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index d6c79c1..c26a0ff 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -10,7 +10,6 @@
 #include <linux/interrupt.h>
 #include <linux/usb.h>
 #include <linux/slab.h>
-#include <linux/vmalloc.h>
 
 #include <linux/delay.h>
 #include <sound/core.h>
@@ -94,40 +93,6 @@ static int _tm6000_stop_audio_dma(struct snd_tm6000_card *chip)
 	return 0;
 }
 
-static void dsp_buffer_free(struct snd_pcm_substream *substream)
-{
-	struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
-
-	dprintk(2, "Freeing buffer\n");
-
-	vfree(substream->runtime->dma_area);
-	substream->runtime->dma_area = NULL;
-	substream->runtime->dma_bytes = 0;
-}
-
-static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size)
-{
-	struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
-
-	dprintk(2, "Allocating buffer\n");
-
-	if (substream->runtime->dma_area) {
-		if (substream->runtime->dma_bytes > size)
-			return 0;
-
-		dsp_buffer_free(substream);
-	}
-
-	substream->runtime->dma_area = vmalloc(size);
-	if (!substream->runtime->dma_area)
-		return -ENOMEM;
-
-	substream->runtime->dma_bytes = size;
-
-	return 0;
-}
-
-
 /****************************************************************************
 				ALSA PCM Interface
  ****************************************************************************/
@@ -269,40 +234,6 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
 }
 
 /*
- * hw_params callback
- */
-static int snd_tm6000_hw_params(struct snd_pcm_substream *substream,
-			      struct snd_pcm_hw_params *hw_params)
-{
-	int size, rc;
-
-	size = params_period_bytes(hw_params) * params_periods(hw_params);
-
-	rc = dsp_buffer_alloc(substream, size);
-	if (rc < 0)
-		return rc;
-
-	return 0;
-}
-
-/*
- * hw free callback
- */
-static int snd_tm6000_hw_free(struct snd_pcm_substream *substream)
-{
-	struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
-	struct tm6000_core *core = chip->core;
-
-	if (atomic_read(&core->stream_started) > 0) {
-		atomic_set(&core->stream_started, 0);
-		schedule_work(&core->wq_trigger);
-	}
-
-	dsp_buffer_free(substream);
-	return 0;
-}
-
-/*
  * prepare callback
  */
 static int snd_tm6000_prepare(struct snd_pcm_substream *substream)
@@ -369,27 +300,15 @@ static snd_pcm_uframes_t snd_tm6000_pointer(struct snd_pcm_substream *substream)
 	return chip->buf_pos;
 }
 
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
-					     unsigned long offset)
-{
-	void *pageptr = subs->runtime->dma_area + offset;
-
-	return vmalloc_to_page(pageptr);
-}
-
 /*
  * operators
  */
 static const struct snd_pcm_ops snd_tm6000_pcm_ops = {
 	.open = snd_tm6000_pcm_open,
 	.close = snd_tm6000_close,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = snd_tm6000_hw_params,
-	.hw_free = snd_tm6000_hw_free,
 	.prepare = snd_tm6000_prepare,
 	.trigger = snd_tm6000_card_trigger,
 	.pointer = snd_tm6000_pointer,
-	.page = snd_pcm_get_vmalloc_page,
 };
 
 /*
@@ -459,6 +378,7 @@ static int tm6000_audio_init(struct tm6000_core *dev)
 	strscpy(pcm->name, "Trident TM5600/60x0", sizeof(pcm->name));
 
 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_tm6000_pcm_ops);
+	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
 
 	INIT_WORK(&dev->wq_trigger, audio_trigger);
 	rc = snd_card_register(card);
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index e746c8d..b57e94f 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -85,30 +85,6 @@ static int snd_usbtv_pcm_close(struct snd_pcm_substream *substream)
 	return 0;
 }
 
-static int snd_usbtv_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *hw_params)
-{
-	int rv;
-	struct usbtv *chip = snd_pcm_substream_chip(substream);
-
-	rv = snd_pcm_lib_malloc_pages(substream,
-		params_buffer_bytes(hw_params));
-
-	if (rv < 0) {
-		dev_warn(chip->dev, "pcm audio buffer allocation failure %i\n",
-			rv);
-		return rv;
-	}
-
-	return 0;
-}
-
-static int snd_usbtv_hw_free(struct snd_pcm_substream *substream)
-{
-	snd_pcm_lib_free_pages(substream);
-	return 0;
-}
-
 static int snd_usbtv_prepare(struct snd_pcm_substream *substream)
 {
 	struct usbtv *chip = snd_pcm_substream_chip(substream);
@@ -336,9 +312,6 @@ static snd_pcm_uframes_t snd_usbtv_pointer(struct snd_pcm_substream *substream)
 static const struct snd_pcm_ops snd_usbtv_pcm_ops = {
 	.open = snd_usbtv_pcm_open,
 	.close = snd_usbtv_pcm_close,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = snd_usbtv_hw_params,
-	.hw_free = snd_usbtv_hw_free,
 	.prepare = snd_usbtv_prepare,
 	.trigger = snd_usbtv_card_trigger,
 	.pointer = snd_usbtv_pointer,
@@ -377,7 +350,7 @@ int usbtv_audio_init(struct usbtv *usbtv)
 	pcm->private_data = usbtv;
 
 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usbtv_pcm_ops);
-	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
 		NULL, USBTV_AUDIO_BUFFER, USBTV_AUDIO_BUFFER);
 
 	rv = snd_card_register(card);
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 93d36aa..5ca2c2f 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -696,7 +696,7 @@ static int vidioc_querybuf(struct file *file,
 	vb->length = usbvision->curwidth *
 		usbvision->curheight *
 		usbvision->palette.bytes_per_pixel;
-	vb->timestamp = ns_to_timeval(usbvision->frame[vb->index].ts);
+	v4l2_buffer_set_timestamp(vb, usbvision->frame[vb->index].ts);
 	vb->sequence = usbvision->frame[vb->index].sequence;
 	return 0;
 }
@@ -765,7 +765,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb)
 		V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 	vb->index = f->index;
 	vb->sequence = f->sequence;
-	vb->timestamp = ns_to_timeval(f->ts);
+	v4l2_buffer_set_timestamp(vb, f->ts);
 	vb->field = V4L2_FIELD_NONE;
 	vb->bytesused = f->scanlength;
 
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 428235c..9988355 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -497,6 +497,22 @@ static int uvc_parse_format(struct uvc_device *dev,
 			}
 		}
 
+		/* Some devices report bpp that doesn't match the format. */
+		if (dev->quirks & UVC_QUIRK_FORCE_BPP) {
+			const struct v4l2_format_info *info =
+				v4l2_format_info(format->fcc);
+
+			if (info) {
+				unsigned int div = info->hdiv * info->vdiv;
+
+				n = info->bpp[0] * div;
+				for (i = 1; i < info->comp_planes; i++)
+					n += info->bpp[i];
+
+				format->bpp = DIV_ROUND_UP(8 * n, div);
+			}
+		}
+
 		if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) {
 			ftype = UVC_VS_FRAME_UNCOMPRESSED;
 		} else {
@@ -1493,6 +1509,11 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
 			break;
 		if (forward == prev)
 			continue;
+		if (forward->chain.next || forward->chain.prev) {
+			uvc_trace(UVC_TRACE_DESCR, "Found reference to "
+				"entity %d already in chain.\n", forward->id);
+			return -EINVAL;
+		}
 
 		switch (UVC_ENTITY_TYPE(forward)) {
 		case UVC_VC_EXTENSION_UNIT:
@@ -1574,6 +1595,13 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
 				return -1;
 			}
 
+			if (term->chain.next || term->chain.prev) {
+				uvc_trace(UVC_TRACE_DESCR, "Found reference to "
+					"entity %d already in chain.\n",
+					term->id);
+				return -EINVAL;
+			}
+
 			if (uvc_trace_param & UVC_TRACE_PROBE)
 				printk(KERN_CONT " %d", term->id);
 
@@ -2862,6 +2890,15 @@ static const struct usb_device_id uvc_ids[] = {
 	  .bInterfaceSubClass	= 1,
 	  .bInterfaceProtocol	= 0,
 	  .driver_info		= (kernel_ulong_t)&uvc_quirk_force_y8 },
+	/* GEO Semiconductor GC6500 */
+	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+				| USB_DEVICE_ID_MATCH_INT_INFO,
+	  .idVendor		= 0x29fe,
+	  .idProduct		= 0x4d53,
+	  .bInterfaceClass	= USB_CLASS_VIDEO,
+	  .bInterfaceSubClass	= 1,
+	  .bInterfaceProtocol	= 0,
+	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
 	/* Intel RealSense D4M */
 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
 				| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index f773dc5..6ab972c 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -198,6 +198,7 @@
 #define UVC_QUIRK_RESTRICT_FRAME_RATE	0x00000200
 #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT	0x00000400
 #define UVC_QUIRK_FORCE_Y8		0x00000800
+#define UVC_QUIRK_FORCE_BPP		0x00001000
 
 /* Format flags */
 #define UVC_FMT_FLAG_COMPRESSED		0x00000001
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index e1eaf113..a99e82e 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -468,13 +468,43 @@ struct v4l2_plane32 {
 	__u32			reserved[11];
 };
 
+/*
+ * This is correct for all architectures including i386, but not x32,
+ * which has different alignment requirements for timestamp
+ */
 struct v4l2_buffer32 {
 	__u32			index;
 	__u32			type;	/* enum v4l2_buf_type */
 	__u32			bytesused;
 	__u32			flags;
 	__u32			field;	/* enum v4l2_field */
-	struct compat_timeval	timestamp;
+	struct {
+		compat_s64	tv_sec;
+		compat_s64	tv_usec;
+	}			timestamp;
+	struct v4l2_timecode	timecode;
+	__u32			sequence;
+
+	/* memory location */
+	__u32			memory;	/* enum v4l2_memory */
+	union {
+		__u32           offset;
+		compat_long_t   userptr;
+		compat_caddr_t  planes;
+		__s32		fd;
+	} m;
+	__u32			length;
+	__u32			reserved2;
+	__s32			request_fd;
+};
+
+struct v4l2_buffer32_time32 {
+	__u32			index;
+	__u32			type;	/* enum v4l2_buf_type */
+	__u32			bytesused;
+	__u32			flags;
+	__u32			field;	/* enum v4l2_field */
+	struct old_timeval32	timestamp;
 	struct v4l2_timecode	timecode;
 	__u32			sequence;
 
@@ -581,6 +611,31 @@ static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *p32, u32 *size)
 	return 0;
 }
 
+static int bufsize_v4l2_buffer_time32(struct v4l2_buffer32_time32 __user *p32, u32 *size)
+{
+	u32 type;
+	u32 length;
+
+	if (!access_ok(p32, sizeof(*p32)) ||
+	    get_user(type, &p32->type) ||
+	    get_user(length, &p32->length))
+		return -EFAULT;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+		if (length > VIDEO_MAX_PLANES)
+			return -EINVAL;
+
+		/*
+		 * We don't really care if userspace decides to kill itself
+		 * by passing a very big length value
+		 */
+		*size = length * sizeof(struct v4l2_plane);
+	} else {
+		*size = 0;
+	}
+	return 0;
+}
+
 static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
 			     struct v4l2_buffer32 __user *p32,
 			     void __user *aux_buf, u32 aux_space)
@@ -681,6 +736,106 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
 	return 0;
 }
 
+static int get_v4l2_buffer32_time32(struct v4l2_buffer_time32 __user *p64,
+				    struct v4l2_buffer32_time32 __user *p32,
+				    void __user *aux_buf, u32 aux_space)
+{
+	u32 type;
+	u32 length;
+	s32 request_fd;
+	enum v4l2_memory memory;
+	struct v4l2_plane32 __user *uplane32;
+	struct v4l2_plane __user *uplane;
+	compat_caddr_t p;
+	int ret;
+
+	if (!access_ok(p32, sizeof(*p32)) ||
+	    assign_in_user(&p64->index, &p32->index) ||
+	    get_user(type, &p32->type) ||
+	    put_user(type, &p64->type) ||
+	    assign_in_user(&p64->flags, &p32->flags) ||
+	    get_user(memory, &p32->memory) ||
+	    put_user(memory, &p64->memory) ||
+	    get_user(length, &p32->length) ||
+	    put_user(length, &p64->length) ||
+	    get_user(request_fd, &p32->request_fd) ||
+	    put_user(request_fd, &p64->request_fd))
+		return -EFAULT;
+
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		if (assign_in_user(&p64->bytesused, &p32->bytesused) ||
+		    assign_in_user(&p64->field, &p32->field) ||
+		    assign_in_user(&p64->timestamp.tv_sec,
+				   &p32->timestamp.tv_sec) ||
+		    assign_in_user(&p64->timestamp.tv_usec,
+				   &p32->timestamp.tv_usec))
+			return -EFAULT;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+		u32 num_planes = length;
+
+		if (num_planes == 0) {
+			/*
+			 * num_planes == 0 is legal, e.g. when userspace doesn't
+			 * need planes array on DQBUF
+			 */
+			return put_user(NULL, &p64->m.planes);
+		}
+		if (num_planes > VIDEO_MAX_PLANES)
+			return -EINVAL;
+
+		if (get_user(p, &p32->m.planes))
+			return -EFAULT;
+
+		uplane32 = compat_ptr(p);
+		if (!access_ok(uplane32,
+			       num_planes * sizeof(*uplane32)))
+			return -EFAULT;
+
+		/*
+		 * We don't really care if userspace decides to kill itself
+		 * by passing a very big num_planes value
+		 */
+		if (aux_space < num_planes * sizeof(*uplane))
+			return -EFAULT;
+
+		uplane = aux_buf;
+		if (put_user_force(uplane, &p64->m.planes))
+			return -EFAULT;
+
+		while (num_planes--) {
+			ret = get_v4l2_plane32(uplane, uplane32, memory);
+			if (ret)
+				return ret;
+			uplane++;
+			uplane32++;
+		}
+	} else {
+		switch (memory) {
+		case V4L2_MEMORY_MMAP:
+		case V4L2_MEMORY_OVERLAY:
+			if (assign_in_user(&p64->m.offset, &p32->m.offset))
+				return -EFAULT;
+			break;
+		case V4L2_MEMORY_USERPTR: {
+			compat_ulong_t userptr;
+
+			if (get_user(userptr, &p32->m.userptr) ||
+			    put_user((unsigned long)compat_ptr(userptr),
+				     &p64->m.userptr))
+				return -EFAULT;
+			break;
+		}
+		case V4L2_MEMORY_DMABUF:
+			if (assign_in_user(&p64->m.fd, &p32->m.fd))
+				return -EFAULT;
+			break;
+		}
+	}
+
+	return 0;
+}
+
 static int put_v4l2_buffer32(struct v4l2_buffer __user *p64,
 			     struct v4l2_buffer32 __user *p32)
 {
@@ -761,6 +916,86 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64,
 	return 0;
 }
 
+static int put_v4l2_buffer32_time32(struct v4l2_buffer_time32 __user *p64,
+				    struct v4l2_buffer32_time32 __user *p32)
+{
+	u32 type;
+	u32 length;
+	enum v4l2_memory memory;
+	struct v4l2_plane32 __user *uplane32;
+	struct v4l2_plane *uplane;
+	compat_caddr_t p;
+	int ret;
+
+	if (!access_ok(p32, sizeof(*p32)) ||
+	    assign_in_user(&p32->index, &p64->index) ||
+	    get_user(type, &p64->type) ||
+	    put_user(type, &p32->type) ||
+	    assign_in_user(&p32->flags, &p64->flags) ||
+	    get_user(memory, &p64->memory) ||
+	    put_user(memory, &p32->memory))
+		return -EFAULT;
+
+	if (assign_in_user(&p32->bytesused, &p64->bytesused) ||
+	    assign_in_user(&p32->field, &p64->field) ||
+	    assign_in_user(&p32->timestamp.tv_sec, &p64->timestamp.tv_sec) ||
+	    assign_in_user(&p32->timestamp.tv_usec, &p64->timestamp.tv_usec) ||
+	    copy_in_user(&p32->timecode, &p64->timecode, sizeof(p64->timecode)) ||
+	    assign_in_user(&p32->sequence, &p64->sequence) ||
+	    assign_in_user(&p32->reserved2, &p64->reserved2) ||
+	    assign_in_user(&p32->request_fd, &p64->request_fd) ||
+	    get_user(length, &p64->length) ||
+	    put_user(length, &p32->length))
+		return -EFAULT;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+		u32 num_planes = length;
+
+		if (num_planes == 0)
+			return 0;
+		/* We need to define uplane without __user, even though
+		 * it does point to data in userspace here. The reason is
+		 * that v4l2-ioctl.c copies it from userspace to kernelspace,
+		 * so its definition in videodev2.h doesn't have a
+		 * __user markup. Defining uplane with __user causes
+		 * smatch warnings, so instead declare it without __user
+		 * and cast it as a userspace pointer to put_v4l2_plane32().
+		 */
+		if (get_user(uplane, &p64->m.planes))
+			return -EFAULT;
+		if (get_user(p, &p32->m.planes))
+			return -EFAULT;
+		uplane32 = compat_ptr(p);
+
+		while (num_planes--) {
+			ret = put_v4l2_plane32((void __user *)uplane,
+					       uplane32, memory);
+			if (ret)
+				return ret;
+			++uplane;
+			++uplane32;
+		}
+	} else {
+		switch (memory) {
+		case V4L2_MEMORY_MMAP:
+		case V4L2_MEMORY_OVERLAY:
+			if (assign_in_user(&p32->m.offset, &p64->m.offset))
+				return -EFAULT;
+			break;
+		case V4L2_MEMORY_USERPTR:
+			if (assign_in_user(&p32->m.userptr, &p64->m.userptr))
+				return -EFAULT;
+			break;
+		case V4L2_MEMORY_DMABUF:
+			if (assign_in_user(&p32->m.fd, &p64->m.fd))
+				return -EFAULT;
+			break;
+		}
+	}
+
+	return 0;
+}
+
 struct v4l2_framebuffer32 {
 	__u32			capability;
 	__u32			flags;
@@ -1028,6 +1263,15 @@ static int put_v4l2_ext_controls32(struct file *file,
 	return 0;
 }
 
+#ifdef CONFIG_X86_64
+/*
+ * x86 is the only compat architecture with different struct alignment
+ * between 32-bit and 64-bit tasks.
+ *
+ * On all other architectures, v4l2_event32 and v4l2_event32_time32 are
+ * the same as v4l2_event and v4l2_event_time32, so we can use the native
+ * handlers, converting v4l2_event to v4l2_event_time32 if necessary.
+ */
 struct v4l2_event32 {
 	__u32				type;
 	union {
@@ -1036,7 +1280,23 @@ struct v4l2_event32 {
 	} u;
 	__u32				pending;
 	__u32				sequence;
-	struct compat_timespec		timestamp;
+	struct {
+		compat_s64		tv_sec;
+		compat_s64		tv_nsec;
+	} timestamp;
+	__u32				id;
+	__u32				reserved[8];
+};
+
+struct v4l2_event32_time32 {
+	__u32				type;
+	union {
+		compat_s64		value64;
+		__u8			data[64];
+	} u;
+	__u32				pending;
+	__u32				sequence;
+	struct old_timespec32		timestamp;
 	__u32				id;
 	__u32				reserved[8];
 };
@@ -1057,6 +1317,23 @@ static int put_v4l2_event32(struct v4l2_event __user *p64,
 	return 0;
 }
 
+static int put_v4l2_event32_time32(struct v4l2_event_time32 __user *p64,
+				   struct v4l2_event32_time32 __user *p32)
+{
+	if (!access_ok(p32, sizeof(*p32)) ||
+	    assign_in_user(&p32->type, &p64->type) ||
+	    copy_in_user(&p32->u, &p64->u, sizeof(p64->u)) ||
+	    assign_in_user(&p32->pending, &p64->pending) ||
+	    assign_in_user(&p32->sequence, &p64->sequence) ||
+	    assign_in_user(&p32->timestamp.tv_sec, &p64->timestamp.tv_sec) ||
+	    assign_in_user(&p32->timestamp.tv_nsec, &p64->timestamp.tv_nsec) ||
+	    assign_in_user(&p32->id, &p64->id) ||
+	    copy_in_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
+		return -EFAULT;
+	return 0;
+}
+#endif
+
 struct v4l2_edid32 {
 	__u32 pad;
 	__u32 start_block;
@@ -1108,10 +1385,13 @@ static int put_v4l2_edid32(struct v4l2_edid __user *p64,
 #define VIDIOC_G_FMT32		_IOWR('V',  4, struct v4l2_format32)
 #define VIDIOC_S_FMT32		_IOWR('V',  5, struct v4l2_format32)
 #define VIDIOC_QUERYBUF32	_IOWR('V',  9, struct v4l2_buffer32)
+#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V',  9, struct v4l2_buffer32_time32)
 #define VIDIOC_G_FBUF32		_IOR ('V', 10, struct v4l2_framebuffer32)
 #define VIDIOC_S_FBUF32		_IOW ('V', 11, struct v4l2_framebuffer32)
 #define VIDIOC_QBUF32		_IOWR('V', 15, struct v4l2_buffer32)
+#define VIDIOC_QBUF32_TIME32	_IOWR('V', 15, struct v4l2_buffer32_time32)
 #define VIDIOC_DQBUF32		_IOWR('V', 17, struct v4l2_buffer32)
+#define VIDIOC_DQBUF32_TIME32	_IOWR('V', 17, struct v4l2_buffer32_time32)
 #define VIDIOC_ENUMSTD32	_IOWR('V', 25, struct v4l2_standard32)
 #define VIDIOC_ENUMINPUT32	_IOWR('V', 26, struct v4l2_input32)
 #define VIDIOC_G_EDID32		_IOWR('V', 40, struct v4l2_edid32)
@@ -1121,8 +1401,10 @@ static int put_v4l2_edid32(struct v4l2_edid __user *p64,
 #define VIDIOC_S_EXT_CTRLS32    _IOWR('V', 72, struct v4l2_ext_controls32)
 #define VIDIOC_TRY_EXT_CTRLS32  _IOWR('V', 73, struct v4l2_ext_controls32)
 #define	VIDIOC_DQEVENT32	_IOR ('V', 89, struct v4l2_event32)
+#define	VIDIOC_DQEVENT32_TIME32	_IOR ('V', 89, struct v4l2_event32_time32)
 #define VIDIOC_CREATE_BUFS32	_IOWR('V', 92, struct v4l2_create_buffers32)
 #define VIDIOC_PREPARE_BUF32	_IOWR('V', 93, struct v4l2_buffer32)
+#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32)
 
 #define VIDIOC_OVERLAY32	_IOW ('V', 14, s32)
 #define VIDIOC_STREAMON32	_IOW ('V', 18, s32)
@@ -1183,36 +1465,45 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 	u32 aux_space;
 	int compatible_arg = 1;
 	long err = 0;
+	unsigned int ncmd;
 
 	/*
 	 * 1. When struct size is different, converts the command.
 	 */
 	switch (cmd) {
-	case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
-	case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
-	case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
-	case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
-	case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
-	case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
-	case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
-	case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break;
-	case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break;
-	case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
-	case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
-	case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
-	case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
-	case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
-	case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
-	case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
-	case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
-	case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
-	case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
-	case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
-	case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
-	case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
-	case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
-	case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break;
-	case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break;
+	case VIDIOC_G_FMT32: ncmd = VIDIOC_G_FMT; break;
+	case VIDIOC_S_FMT32: ncmd = VIDIOC_S_FMT; break;
+	case VIDIOC_QUERYBUF32: ncmd = VIDIOC_QUERYBUF; break;
+	case VIDIOC_QUERYBUF32_TIME32: ncmd = VIDIOC_QUERYBUF_TIME32; break;
+	case VIDIOC_G_FBUF32: ncmd = VIDIOC_G_FBUF; break;
+	case VIDIOC_S_FBUF32: ncmd = VIDIOC_S_FBUF; break;
+	case VIDIOC_QBUF32: ncmd = VIDIOC_QBUF; break;
+	case VIDIOC_QBUF32_TIME32: ncmd = VIDIOC_QBUF_TIME32; break;
+	case VIDIOC_DQBUF32: ncmd = VIDIOC_DQBUF; break;
+	case VIDIOC_DQBUF32_TIME32: ncmd = VIDIOC_DQBUF_TIME32; break;
+	case VIDIOC_ENUMSTD32: ncmd = VIDIOC_ENUMSTD; break;
+	case VIDIOC_ENUMINPUT32: ncmd = VIDIOC_ENUMINPUT; break;
+	case VIDIOC_TRY_FMT32: ncmd = VIDIOC_TRY_FMT; break;
+	case VIDIOC_G_EXT_CTRLS32: ncmd = VIDIOC_G_EXT_CTRLS; break;
+	case VIDIOC_S_EXT_CTRLS32: ncmd = VIDIOC_S_EXT_CTRLS; break;
+	case VIDIOC_TRY_EXT_CTRLS32: ncmd = VIDIOC_TRY_EXT_CTRLS; break;
+#ifdef CONFIG_X86_64
+	case VIDIOC_DQEVENT32: ncmd = VIDIOC_DQEVENT; break;
+	case VIDIOC_DQEVENT32_TIME32: ncmd = VIDIOC_DQEVENT_TIME32; break;
+#endif
+	case VIDIOC_OVERLAY32: ncmd = VIDIOC_OVERLAY; break;
+	case VIDIOC_STREAMON32: ncmd = VIDIOC_STREAMON; break;
+	case VIDIOC_STREAMOFF32: ncmd = VIDIOC_STREAMOFF; break;
+	case VIDIOC_G_INPUT32: ncmd = VIDIOC_G_INPUT; break;
+	case VIDIOC_S_INPUT32: ncmd = VIDIOC_S_INPUT; break;
+	case VIDIOC_G_OUTPUT32: ncmd = VIDIOC_G_OUTPUT; break;
+	case VIDIOC_S_OUTPUT32: ncmd = VIDIOC_S_OUTPUT; break;
+	case VIDIOC_CREATE_BUFS32: ncmd = VIDIOC_CREATE_BUFS; break;
+	case VIDIOC_PREPARE_BUF32: ncmd = VIDIOC_PREPARE_BUF; break;
+	case VIDIOC_PREPARE_BUF32_TIME32: ncmd = VIDIOC_PREPARE_BUF_TIME32; break;
+	case VIDIOC_G_EDID32: ncmd = VIDIOC_G_EDID; break;
+	case VIDIOC_S_EDID32: ncmd = VIDIOC_S_EDID; break;
+	default: ncmd = cmd; break;
 	}
 
 	/*
@@ -1221,11 +1512,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 	 * argument into it.
 	 */
 	switch (cmd) {
-	case VIDIOC_OVERLAY:
-	case VIDIOC_STREAMON:
-	case VIDIOC_STREAMOFF:
-	case VIDIOC_S_INPUT:
-	case VIDIOC_S_OUTPUT:
+	case VIDIOC_OVERLAY32:
+	case VIDIOC_STREAMON32:
+	case VIDIOC_STREAMOFF32:
+	case VIDIOC_S_INPUT32:
+	case VIDIOC_S_OUTPUT32:
 		err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
 		if (!err && assign_in_user((unsigned int __user *)new_p64,
 					   (compat_uint_t __user *)p32))
@@ -1233,23 +1524,23 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_G_INPUT:
-	case VIDIOC_G_OUTPUT:
+	case VIDIOC_G_INPUT32:
+	case VIDIOC_G_OUTPUT32:
 		err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_G_EDID:
-	case VIDIOC_S_EDID:
+	case VIDIOC_G_EDID32:
+	case VIDIOC_S_EDID32:
 		err = alloc_userspace(sizeof(struct v4l2_edid), 0, &new_p64);
 		if (!err)
 			err = get_v4l2_edid32(new_p64, p32);
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_G_FMT:
-	case VIDIOC_S_FMT:
-	case VIDIOC_TRY_FMT:
+	case VIDIOC_G_FMT32:
+	case VIDIOC_S_FMT32:
+	case VIDIOC_TRY_FMT32:
 		err = bufsize_v4l2_format(p32, &aux_space);
 		if (!err)
 			err = alloc_userspace(sizeof(struct v4l2_format),
@@ -1262,7 +1553,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_CREATE_BUFS:
+	case VIDIOC_CREATE_BUFS32:
 		err = bufsize_v4l2_create(p32, &aux_space);
 		if (!err)
 			err = alloc_userspace(sizeof(struct v4l2_create_buffers),
@@ -1275,10 +1566,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_PREPARE_BUF:
-	case VIDIOC_QUERYBUF:
-	case VIDIOC_QBUF:
-	case VIDIOC_DQBUF:
+	case VIDIOC_PREPARE_BUF32:
+	case VIDIOC_QUERYBUF32:
+	case VIDIOC_QBUF32:
+	case VIDIOC_DQBUF32:
 		err = bufsize_v4l2_buffer(p32, &aux_space);
 		if (!err)
 			err = alloc_userspace(sizeof(struct v4l2_buffer),
@@ -1291,7 +1582,23 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_S_FBUF:
+	case VIDIOC_PREPARE_BUF32_TIME32:
+	case VIDIOC_QUERYBUF32_TIME32:
+	case VIDIOC_QBUF32_TIME32:
+	case VIDIOC_DQBUF32_TIME32:
+		err = bufsize_v4l2_buffer_time32(p32, &aux_space);
+		if (!err)
+			err = alloc_userspace(sizeof(struct v4l2_buffer),
+					      aux_space, &new_p64);
+		if (!err) {
+			aux_buf = new_p64 + sizeof(struct v4l2_buffer);
+			err = get_v4l2_buffer32_time32(new_p64, p32,
+						       aux_buf, aux_space);
+		}
+		compatible_arg = 0;
+		break;
+
+	case VIDIOC_S_FBUF32:
 		err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
 				      &new_p64);
 		if (!err)
@@ -1299,13 +1606,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_G_FBUF:
+	case VIDIOC_G_FBUF32:
 		err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
 				      &new_p64);
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_ENUMSTD:
+	case VIDIOC_ENUMSTD32:
 		err = alloc_userspace(sizeof(struct v4l2_standard), 0,
 				      &new_p64);
 		if (!err)
@@ -1313,16 +1620,16 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_ENUMINPUT:
+	case VIDIOC_ENUMINPUT32:
 		err = alloc_userspace(sizeof(struct v4l2_input), 0, &new_p64);
 		if (!err)
 			err = get_v4l2_input32(new_p64, p32);
 		compatible_arg = 0;
 		break;
 
-	case VIDIOC_G_EXT_CTRLS:
-	case VIDIOC_S_EXT_CTRLS:
-	case VIDIOC_TRY_EXT_CTRLS:
+	case VIDIOC_G_EXT_CTRLS32:
+	case VIDIOC_S_EXT_CTRLS32:
+	case VIDIOC_TRY_EXT_CTRLS32:
 		err = bufsize_v4l2_ext_controls(p32, &aux_space);
 		if (!err)
 			err = alloc_userspace(sizeof(struct v4l2_ext_controls),
@@ -1334,10 +1641,16 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 		}
 		compatible_arg = 0;
 		break;
-	case VIDIOC_DQEVENT:
+#ifdef CONFIG_X86_64
+	case VIDIOC_DQEVENT32:
 		err = alloc_userspace(sizeof(struct v4l2_event), 0, &new_p64);
 		compatible_arg = 0;
 		break;
+	case VIDIOC_DQEVENT32_TIME32:
+		err = alloc_userspace(sizeof(struct v4l2_event_time32), 0, &new_p64);
+		compatible_arg = 0;
+		break;
+#endif
 	}
 	if (err)
 		return err;
@@ -1352,9 +1665,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 	 * Otherwise, it will pass the newly allocated @new_p64 argument.
 	 */
 	if (compatible_arg)
-		err = native_ioctl(file, cmd, (unsigned long)p32);
+		err = native_ioctl(file, ncmd, (unsigned long)p32);
 	else
-		err = native_ioctl(file, cmd, (unsigned long)new_p64);
+		err = native_ioctl(file, ncmd, (unsigned long)new_p64);
 
 	if (err == -ENOTTY)
 		return err;
@@ -1370,13 +1683,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 	 * the blocks to maximum allowed value.
 	 */
 	switch (cmd) {
-	case VIDIOC_G_EXT_CTRLS:
-	case VIDIOC_S_EXT_CTRLS:
-	case VIDIOC_TRY_EXT_CTRLS:
+	case VIDIOC_G_EXT_CTRLS32:
+	case VIDIOC_S_EXT_CTRLS32:
+	case VIDIOC_TRY_EXT_CTRLS32:
 		if (put_v4l2_ext_controls32(file, new_p64, p32))
 			err = -EFAULT;
 		break;
-	case VIDIOC_S_EDID:
+	case VIDIOC_S_EDID32:
 		if (put_v4l2_edid32(new_p64, p32))
 			err = -EFAULT;
 		break;
@@ -1389,49 +1702,62 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 	 * the original 32 bits structure.
 	 */
 	switch (cmd) {
-	case VIDIOC_S_INPUT:
-	case VIDIOC_S_OUTPUT:
-	case VIDIOC_G_INPUT:
-	case VIDIOC_G_OUTPUT:
+	case VIDIOC_S_INPUT32:
+	case VIDIOC_S_OUTPUT32:
+	case VIDIOC_G_INPUT32:
+	case VIDIOC_G_OUTPUT32:
 		if (assign_in_user((compat_uint_t __user *)p32,
 				   ((unsigned int __user *)new_p64)))
 			err = -EFAULT;
 		break;
 
-	case VIDIOC_G_FBUF:
+	case VIDIOC_G_FBUF32:
 		err = put_v4l2_framebuffer32(new_p64, p32);
 		break;
 
-	case VIDIOC_DQEVENT:
+#ifdef CONFIG_X86_64
+	case VIDIOC_DQEVENT32:
 		err = put_v4l2_event32(new_p64, p32);
 		break;
 
-	case VIDIOC_G_EDID:
+	case VIDIOC_DQEVENT32_TIME32:
+		err = put_v4l2_event32_time32(new_p64, p32);
+		break;
+#endif
+
+	case VIDIOC_G_EDID32:
 		err = put_v4l2_edid32(new_p64, p32);
 		break;
 
-	case VIDIOC_G_FMT:
-	case VIDIOC_S_FMT:
-	case VIDIOC_TRY_FMT:
+	case VIDIOC_G_FMT32:
+	case VIDIOC_S_FMT32:
+	case VIDIOC_TRY_FMT32:
 		err = put_v4l2_format32(new_p64, p32);
 		break;
 
-	case VIDIOC_CREATE_BUFS:
+	case VIDIOC_CREATE_BUFS32:
 		err = put_v4l2_create32(new_p64, p32);
 		break;
 
-	case VIDIOC_PREPARE_BUF:
-	case VIDIOC_QUERYBUF:
-	case VIDIOC_QBUF:
-	case VIDIOC_DQBUF:
+	case VIDIOC_PREPARE_BUF32:
+	case VIDIOC_QUERYBUF32:
+	case VIDIOC_QBUF32:
+	case VIDIOC_DQBUF32:
 		err = put_v4l2_buffer32(new_p64, p32);
 		break;
 
-	case VIDIOC_ENUMSTD:
+	case VIDIOC_PREPARE_BUF32_TIME32:
+	case VIDIOC_QUERYBUF32_TIME32:
+	case VIDIOC_QBUF32_TIME32:
+	case VIDIOC_DQBUF32_TIME32:
+		err = put_v4l2_buffer32_time32(new_p64, p32);
+		break;
+
+	case VIDIOC_ENUMSTD32:
 		err = put_v4l2_standard32(new_p64, p32);
 		break;
 
-	case VIDIOC_ENUMINPUT:
+	case VIDIOC_ENUMINPUT32:
 		err = put_v4l2_input32(new_p64, p32);
 		break;
 	}
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 9d673d1..290c6b2 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -27,6 +27,7 @@ static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
 {
 	struct v4l2_kevent *kev;
+	struct timespec64 ts;
 	unsigned long flags;
 
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
@@ -44,7 +45,9 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
 
 	kev->event.pending = fh->navailable;
 	*event = kev->event;
-	event->timestamp = ns_to_timespec(kev->ts);
+	ts = ns_to_timespec64(kev->ts);
+	event->timestamp.tv_sec = ts.tv_sec;
+	event->timestamp.tv_nsec = ts.tv_nsec;
 	kev->sev->first = sev_pos(kev->sev, 1);
 	kev->sev->in_use--;
 
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 192cac0..6ece432 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -422,7 +422,7 @@ static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
 		       sizeof(*vep) - offsetof(typeof(*vep), bus));
 	}
 
-	pr_debug("===== begin V4L2 endpoint properties\n");
+	pr_debug("===== begin parsing endpoint %pfw\n", fwnode);
 
 	/*
 	 * Zero the fwnode graph endpoint memory in case we don't end up parsing
@@ -500,7 +500,7 @@ int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
 
 	ret = __v4l2_fwnode_endpoint_parse(fwnode, vep);
 
-	pr_debug("===== end V4L2 endpoint properties\n");
+	pr_debug("===== end parsing endpoint %pfw\n", fwnode);
 
 	return ret;
 }
@@ -551,7 +551,7 @@ int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
 				vep->link_frequencies[i]);
 	}
 
-	pr_debug("===== end V4L2 endpoint properties\n");
+	pr_debug("===== end parsing endpoint %pfw\n", fwnode);
 
 	return 0;
 }
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 003b742..aaf83e2 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -474,10 +474,10 @@ static void v4l_print_buffer(const void *arg, bool write_only)
 	const struct v4l2_plane *plane;
 	int i;
 
-	pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
-			p->timestamp.tv_sec / 3600,
-			(int)(p->timestamp.tv_sec / 60) % 60,
-			(int)(p->timestamp.tv_sec % 60),
+	pr_cont("%02d:%02d:%02d.%09ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
+			(int)p->timestamp.tv_sec / 3600,
+			((int)p->timestamp.tv_sec / 60) % 60,
+			((int)p->timestamp.tv_sec % 60),
 			(long)p->timestamp.tv_usec,
 			p->index,
 			prt_names(p->type, v4l2_type_names), p->request_fd,
@@ -821,7 +821,7 @@ static void v4l_print_event(const void *arg, bool write_only)
 	const struct v4l2_event *p = arg;
 	const struct v4l2_event_ctrl *c;
 
-	pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%lu.%9.9lu\n",
+	pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n",
 			p->type, p->pending, p->sequence, p->id,
 			p->timestamp.tv_sec, p->timestamp.tv_nsec);
 	switch (p->type) {
@@ -961,7 +961,7 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
 			return 0;
 		break;
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		if (is_vid && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
+		if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
 			return 0;
 		break;
 	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
@@ -3023,8 +3023,162 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
 	return ret;
 }
 
+static unsigned int video_translate_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+	case VIDIOC_DQEVENT_TIME32:
+		return VIDIOC_DQEVENT;
+	case VIDIOC_QUERYBUF_TIME32:
+		return VIDIOC_QUERYBUF;
+	case VIDIOC_QBUF_TIME32:
+		return VIDIOC_QBUF;
+	case VIDIOC_DQBUF_TIME32:
+		return VIDIOC_DQBUF;
+	case VIDIOC_PREPARE_BUF_TIME32:
+		return VIDIOC_PREPARE_BUF;
+#endif
+	}
+
+	return cmd;
+}
+
+static int video_get_user(void __user *arg, void *parg, unsigned int cmd,
+			  bool *always_copy)
+{
+	unsigned int n = _IOC_SIZE(cmd);
+
+	if (!(_IOC_DIR(cmd) & _IOC_WRITE)) {
+		/* read-only ioctl */
+		memset(parg, 0, n);
+		return 0;
+	}
+
+	switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+	case VIDIOC_QUERYBUF_TIME32:
+	case VIDIOC_QBUF_TIME32:
+	case VIDIOC_DQBUF_TIME32:
+	case VIDIOC_PREPARE_BUF_TIME32: {
+		struct v4l2_buffer_time32 vb32;
+		struct v4l2_buffer *vb = parg;
+
+		if (copy_from_user(&vb32, arg, sizeof(vb32)))
+			return -EFAULT;
+
+		*vb = (struct v4l2_buffer) {
+			.index		= vb32.index,
+			.type		= vb32.type,
+			.bytesused	= vb32.bytesused,
+			.flags		= vb32.flags,
+			.field		= vb32.field,
+			.timestamp.tv_sec	= vb32.timestamp.tv_sec,
+			.timestamp.tv_usec	= vb32.timestamp.tv_usec,
+			.timecode	= vb32.timecode,
+			.sequence	= vb32.sequence,
+			.memory		= vb32.memory,
+			.m.userptr	= vb32.m.userptr,
+			.length		= vb32.length,
+			.request_fd	= vb32.request_fd,
+		};
+
+		if (cmd == VIDIOC_QUERYBUF_TIME32)
+			vb->request_fd = 0;
+
+		break;
+	}
+#endif
+	default:
+		/*
+		 * In some cases, only a few fields are used as input,
+		 * i.e. when the app sets "index" and then the driver
+		 * fills in the rest of the structure for the thing
+		 * with that index.  We only need to copy up the first
+		 * non-input field.
+		 */
+		if (v4l2_is_known_ioctl(cmd)) {
+			u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
+
+			if (flags & INFO_FL_CLEAR_MASK)
+				n = (flags & INFO_FL_CLEAR_MASK) >> 16;
+			*always_copy = flags & INFO_FL_ALWAYS_COPY;
+		}
+
+		if (copy_from_user(parg, (void __user *)arg, n))
+			return -EFAULT;
+
+		/* zero out anything we don't copy from userspace */
+		if (n < _IOC_SIZE(cmd))
+			memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
+		break;
+	}
+
+	return 0;
+}
+
+static int video_put_user(void __user *arg, void *parg, unsigned int cmd)
+{
+	if (!(_IOC_DIR(cmd) & _IOC_READ))
+		return 0;
+
+	switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+	case VIDIOC_DQEVENT_TIME32: {
+		struct v4l2_event *ev = parg;
+		struct v4l2_event_time32 ev32 = {
+			.type		= ev->type,
+			.pending	= ev->pending,
+			.sequence	= ev->sequence,
+			.timestamp.tv_sec  = ev->timestamp.tv_sec,
+			.timestamp.tv_nsec = ev->timestamp.tv_nsec,
+			.id		= ev->id,
+		};
+
+		memcpy(&ev32.u, &ev->u, sizeof(ev->u));
+		memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved));
+
+		if (copy_to_user(arg, &ev32, sizeof(ev32)))
+			return -EFAULT;
+		break;
+	}
+	case VIDIOC_QUERYBUF_TIME32:
+	case VIDIOC_QBUF_TIME32:
+	case VIDIOC_DQBUF_TIME32:
+	case VIDIOC_PREPARE_BUF_TIME32: {
+		struct v4l2_buffer *vb = parg;
+		struct v4l2_buffer_time32 vb32 = {
+			.index		= vb->index,
+			.type		= vb->type,
+			.bytesused	= vb->bytesused,
+			.flags		= vb->flags,
+			.field		= vb->field,
+			.timestamp.tv_sec	= vb->timestamp.tv_sec,
+			.timestamp.tv_usec	= vb->timestamp.tv_usec,
+			.timecode	= vb->timecode,
+			.sequence	= vb->sequence,
+			.memory		= vb->memory,
+			.m.userptr	= vb->m.userptr,
+			.length		= vb->length,
+			.request_fd	= vb->request_fd,
+		};
+
+		if (copy_to_user(arg, &vb32, sizeof(vb32)))
+			return -EFAULT;
+		break;
+	}
+#endif
+	default:
+		/*  Copy results into user buffer  */
+		if (copy_to_user(arg, parg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+
 long
-video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
 	       v4l2_kioctl func)
 {
 	char	sbuf[128];
@@ -3036,6 +3190,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
 	size_t  array_size = 0;
 	void __user *user_ptr = NULL;
 	void	**kernel_ptr = NULL;
+	unsigned int cmd = video_translate_cmd(orig_cmd);
 	const size_t ioc_size = _IOC_SIZE(cmd);
 
 	/*  Copy arguments into temp kernel buffer  */
@@ -3050,35 +3205,10 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
 			parg = mbuf;
 		}
 
-		err = -EFAULT;
-		if (_IOC_DIR(cmd) & _IOC_WRITE) {
-			unsigned int n = ioc_size;
-
-			/*
-			 * In some cases, only a few fields are used as input,
-			 * i.e. when the app sets "index" and then the driver
-			 * fills in the rest of the structure for the thing
-			 * with that index.  We only need to copy up the first
-			 * non-input field.
-			 */
-			if (v4l2_is_known_ioctl(cmd)) {
-				u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
-
-				if (flags & INFO_FL_CLEAR_MASK)
-					n = (flags & INFO_FL_CLEAR_MASK) >> 16;
-				always_copy = flags & INFO_FL_ALWAYS_COPY;
-			}
-
-			if (copy_from_user(parg, (void __user *)arg, n))
-				goto out;
-
-			/* zero out anything we don't copy from userspace */
-			if (n < ioc_size)
-				memset((u8 *)parg + n, 0, ioc_size - n);
-		} else {
-			/* read-only ioctl */
-			memset(parg, 0, ioc_size);
-		}
+		err = video_get_user((void __user *)arg, parg, orig_cmd,
+				     &always_copy);
+		if (err)
+			goto out;
 	}
 
 	err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
@@ -3131,15 +3261,8 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
 		goto out;
 
 out_array_args:
-	/*  Copy results into user buffer  */
-	switch (_IOC_DIR(cmd)) {
-	case _IOC_READ:
-	case (_IOC_WRITE | _IOC_READ):
-		if (copy_to_user((void __user *)arg, parg, ioc_size))
-			err = -EFAULT;
-		break;
-	}
-
+	if (video_put_user((void __user *)arg, parg, orig_cmd))
+		err = -EFAULT;
 out:
 	kvfree(mbuf);
 	return err;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 9e987c0..a376b35 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -331,8 +331,8 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
 	struct v4l2_fh *vfh = file->private_data;
 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
 	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
-	int rval;
 #endif
+	int rval;
 
 	switch (cmd) {
 	case VIDIOC_QUERYCTRL:
@@ -392,6 +392,30 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
 
 		return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
 
+	case VIDIOC_DQEVENT_TIME32: {
+		struct v4l2_event_time32 *ev32 = arg;
+		struct v4l2_event ev = { };
+
+		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+			return -ENOIOCTLCMD;
+
+		rval = v4l2_event_dequeue(vfh, &ev, file->f_flags & O_NONBLOCK);
+
+		*ev32 = (struct v4l2_event_time32) {
+			.type		= ev.type,
+			.pending	= ev.pending,
+			.sequence	= ev.sequence,
+			.timestamp.tv_sec  = ev.timestamp.tv_sec,
+			.timestamp.tv_nsec = ev.timestamp.tv_nsec,
+			.id		= ev.id,
+		};
+
+		memcpy(&ev32->u, &ev.u, sizeof(ev.u));
+		memcpy(&ev32->reserved, &ev.reserved, sizeof(ev.reserved));
+
+		return rval;
+	}
+
 	case VIDIOC_SUBSCRIBE_EVENT:
 		return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
 
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 939fc11..2686f03 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 
 #include <media/videobuf-core.h>
+#include <media/v4l2-common.h>
 
 #define MAGIC_BUFFER 0x20070728
 #define MAGIC_CHECK(is, should)						\
@@ -364,7 +365,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
 	}
 
 	b->field     = vb->field;
-	b->timestamp = ns_to_timeval(vb->ts);
+	v4l2_buffer_set_timestamp(b, vb->ts);
 	b->bytesused = vb->size;
 	b->sequence  = vb->field_count >> 1;
 }
@@ -578,7 +579,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
 		    || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
 			buf->size = b->bytesused;
 			buf->field = b->field;
-			buf->ts = v4l2_timeval_to_ns(&b->timestamp);
+			buf->ts = v4l2_buffer_get_timestamp(b);
 		}
 		break;
 	case V4L2_MEMORY_USERPTR:
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 66a6c6c..13b65ed 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -183,12 +183,12 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
 	dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
 		data, size, dma->nr_pages);
 
-	err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+	err = pin_user_pages(data & PAGE_MASK, dma->nr_pages,
 			     flags | FOLL_LONGTERM, dma->pages, NULL);
 
 	if (err != dma->nr_pages) {
 		dma->nr_pages = (err >= 0) ? err : 0;
-		dprintk(1, "get_user_pages: err=%d [%d]\n", err,
+		dprintk(1, "pin_user_pages: err=%d [%d]\n", err,
 			dma->nr_pages);
 		return err < 0 ? err : -EINVAL;
 	}
@@ -349,8 +349,8 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
 	BUG_ON(dma->sglen);
 
 	if (dma->pages) {
-		for (i = 0; i < dma->nr_pages; i++)
-			put_page(dma->pages[i]);
+		unpin_user_pages_dirty_lock(dma->pages, dma->nr_pages,
+					    dma->direction == DMA_FROM_DEVICE);
 		kfree(dma->pages);
 		dma->pages = NULL;
 	}
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 095f8a3..886aea5 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -267,7 +267,6 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
 	struct devbus_read_params r;
 	struct devbus_write_params w;
 	struct devbus *devbus;
-	struct resource *res;
 	struct clk *clk;
 	unsigned long rate;
 	int err;
@@ -277,8 +276,7 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	devbus->dev = dev;
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	devbus->base = devm_ioremap_resource(&pdev->dev, res);
+	devbus->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(devbus->base))
 		return PTR_ERR(devbus->base);
 
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index e9c3ce9..20a8406 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -8,7 +8,7 @@
 if SAMSUNG_MC
 
 config EXYNOS5422_DMC
-	tristate "EXYNOS5422 Dynamic Memory Controller driver"
+	tristate "Exynos5422 Dynamic Memory Controller driver"
 	depends on ARCH_EXYNOS || (COMPILE_TEST && HAS_IOMEM)
 	select DDR
 	depends on DEVFREQ_GOV_SIMPLE_ONDEMAND
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index c27c610..6510d7b 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2015 Samsung Electronics Co., Ltd.
 //	      http://www.samsung.com/
 //
-// EXYNOS - SROM Controller support
+// Exynos - SROM Controller support
 // Author: Pankaj Dubey <pankaj.dubey@samsung.com>
 
 #include <linux/io.h>
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 47dbf6d..81a1b1d 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1374,7 +1374,6 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct exynos5_dmc *dmc;
-	struct resource *res;
 	int irq[2];
 
 	dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL);
@@ -1386,13 +1385,11 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
 	dmc->dev = dev;
 	platform_set_drvdata(pdev, dmc);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	dmc->base_drexi0 = devm_ioremap_resource(dev, res);
+	dmc->base_drexi0 = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(dmc->base_drexi0))
 		return PTR_ERR(dmc->base_drexi0);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	dmc->base_drexi1 = devm_ioremap_resource(dev, res);
+	dmc->base_drexi1 = devm_platform_ioremap_resource(pdev, 1);
 	if (IS_ERR(dmc->base_drexi1))
 		return PTR_ERR(dmc->base_drexi1);
 
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 3d23c42..529d10b 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -13,4 +13,5 @@
 obj-$(CONFIG_TEGRA20_EMC)  += tegra20-emc.o
 obj-$(CONFIG_TEGRA30_EMC)  += tegra30-emc.o
 obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
-obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o
+obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra186-emc.o
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 464f0ce..21f0524 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -467,12 +467,20 @@ struct tegra_emc {
 
 	void __iomem *regs;
 
+	struct clk *clk;
+
 	enum emc_dram_type dram_type;
 	unsigned int dram_num;
 
 	struct emc_timing last_timing;
 	struct emc_timing *timings;
 	unsigned int num_timings;
+
+	struct {
+		struct dentry *root;
+		unsigned long min_rate;
+		unsigned long max_rate;
+	} debugfs;
 };
 
 /* Timing change sequence functions */
@@ -998,38 +1006,51 @@ tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
 	return NULL;
 }
 
-/* Debugfs entry */
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ *   /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ *   - available_rates: This file contains a list of valid, space-separated
+ *     EMC frequencies.
+ *
+ *   - min_rate: Writing a value to this file sets the given frequency as the
+ *       floor of the permitted range. If this is higher than the currently
+ *       configured EMC frequency, this will cause the frequency to be
+ *       increased so that it stays within the valid range.
+ *
+ *   - max_rate: Similarily to the min_rate file, writing a value to this file
+ *       sets the given frequency as the ceiling of the permitted range. If
+ *       the value is lower than the currently configured EMC frequency, this
+ *       will cause the frequency to be decreased so that it stays within the
+ *       valid range.
+ */
 
-static int emc_debug_rate_get(void *data, u64 *rate)
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
 {
-	struct clk *c = data;
+	unsigned int i;
 
-	*rate = clk_get_rate(c);
+	for (i = 0; i < emc->num_timings; i++)
+		if (rate == emc->timings[i].rate)
+			return true;
 
-	return 0;
+	return false;
 }
 
-static int emc_debug_rate_set(void *data, u64 rate)
-{
-	struct clk *c = data;
-
-	return clk_set_rate(c, rate);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(emc_debug_rate_fops, emc_debug_rate_get,
-			emc_debug_rate_set, "%lld\n");
-
-static int emc_debug_supported_rates_show(struct seq_file *s, void *data)
+static int tegra_emc_debug_available_rates_show(struct seq_file *s,
+						void *data)
 {
 	struct tegra_emc *emc = s->private;
 	const char *prefix = "";
 	unsigned int i;
 
 	for (i = 0; i < emc->num_timings; i++) {
-		struct emc_timing *timing = &emc->timings[i];
-
-		seq_printf(s, "%s%lu", prefix, timing->rate);
-
+		seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
 		prefix = " ";
 	}
 
@@ -1038,46 +1059,126 @@ static int emc_debug_supported_rates_show(struct seq_file *s, void *data)
 	return 0;
 }
 
-static int emc_debug_supported_rates_open(struct inode *inode,
-					  struct file *file)
+static int tegra_emc_debug_available_rates_open(struct inode *inode,
+						struct file *file)
 {
-	return single_open(file, emc_debug_supported_rates_show,
+	return single_open(file, tegra_emc_debug_available_rates_show,
 			   inode->i_private);
 }
 
-static const struct file_operations emc_debug_supported_rates_fops = {
-	.open = emc_debug_supported_rates_open,
+static const struct file_operations tegra_emc_debug_available_rates_fops = {
+	.open = tegra_emc_debug_available_rates_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
 	.release = single_release,
 };
 
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+	struct tegra_emc *emc = data;
+
+	*rate = emc->debugfs.min_rate;
+
+	return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+	struct tegra_emc *emc = data;
+	int err;
+
+	if (!tegra_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_min_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.min_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+			tegra_emc_debug_min_rate_get,
+			tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+	struct tegra_emc *emc = data;
+
+	*rate = emc->debugfs.max_rate;
+
+	return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+	struct tegra_emc *emc = data;
+	int err;
+
+	if (!tegra_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_max_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.max_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+			tegra_emc_debug_max_rate_get,
+			tegra_emc_debug_max_rate_set, "%llu\n");
+
 static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
 {
-	struct dentry *root, *file;
-	struct clk *clk;
+	unsigned int i;
+	int err;
 
-	root = debugfs_create_dir("emc", NULL);
-	if (!root) {
+	emc->clk = devm_clk_get(dev, "emc");
+	if (IS_ERR(emc->clk)) {
+		if (PTR_ERR(emc->clk) != -ENODEV) {
+			dev_err(dev, "failed to get EMC clock: %ld\n",
+				PTR_ERR(emc->clk));
+			return;
+		}
+	}
+
+	emc->debugfs.min_rate = ULONG_MAX;
+	emc->debugfs.max_rate = 0;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		if (emc->timings[i].rate < emc->debugfs.min_rate)
+			emc->debugfs.min_rate = emc->timings[i].rate;
+
+		if (emc->timings[i].rate > emc->debugfs.max_rate)
+			emc->debugfs.max_rate = emc->timings[i].rate;
+	}
+
+	err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+				 emc->debugfs.max_rate);
+	if (err < 0) {
+		dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+			emc->debugfs.min_rate, emc->debugfs.max_rate,
+			emc->clk);
+		return;
+	}
+
+	emc->debugfs.root = debugfs_create_dir("emc", NULL);
+	if (!emc->debugfs.root) {
 		dev_err(dev, "failed to create debugfs directory\n");
 		return;
 	}
 
-	clk = clk_get_sys("tegra-clk-debug", "emc");
-	if (IS_ERR(clk)) {
-		dev_err(dev, "failed to get debug clock: %ld\n", PTR_ERR(clk));
-		return;
-	}
-
-	file = debugfs_create_file("rate", S_IRUGO | S_IWUSR, root, clk,
-				   &emc_debug_rate_fops);
-	if (!file)
-		dev_err(dev, "failed to create debugfs entry\n");
-
-	file = debugfs_create_file("supported_rates", S_IRUGO, root, emc,
-				   &emc_debug_supported_rates_fops);
-	if (!file)
-		dev_err(dev, "failed to create debugfs entry\n");
+	debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, emc,
+			    &tegra_emc_debug_available_rates_fops);
+	debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+			    emc, &tegra_emc_debug_min_rate_fops);
+	debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+			    emc, &tegra_emc_debug_max_rate_fops);
 }
 
 static int tegra_emc_probe(struct platform_device *pdev)
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
new file mode 100644
index 0000000..97f26bc
--- /dev/null
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 NVIDIA CORPORATION.  All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra186_emc_dvfs {
+	unsigned long latency;
+	unsigned long rate;
+};
+
+struct tegra186_emc {
+	struct tegra_bpmp *bpmp;
+	struct device *dev;
+	struct clk *clk;
+
+	struct tegra186_emc_dvfs *dvfs;
+	unsigned int num_dvfs;
+
+	struct {
+		struct dentry *root;
+		unsigned long min_rate;
+		unsigned long max_rate;
+	} debugfs;
+};
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ *   /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ *   - available_rates: This file contains a list of valid, space-separated
+ *     EMC frequencies.
+ *
+ *   - min_rate: Writing a value to this file sets the given frequency as the
+ *       floor of the permitted range. If this is higher than the currently
+ *       configured EMC frequency, this will cause the frequency to be
+ *       increased so that it stays within the valid range.
+ *
+ *   - max_rate: Similarily to the min_rate file, writing a value to this file
+ *       sets the given frequency as the ceiling of the permitted range. If
+ *       the value is lower than the currently configured EMC frequency, this
+ *       will cause the frequency to be decreased so that it stays within the
+ *       valid range.
+ */
+
+static bool tegra186_emc_validate_rate(struct tegra186_emc *emc,
+				       unsigned long rate)
+{
+	unsigned int i;
+
+	for (i = 0; i < emc->num_dvfs; i++)
+		if (rate == emc->dvfs[i].rate)
+			return true;
+
+	return false;
+}
+
+static int tegra186_emc_debug_available_rates_show(struct seq_file *s,
+						   void *data)
+{
+	struct tegra186_emc *emc = s->private;
+	const char *prefix = "";
+	unsigned int i;
+
+	for (i = 0; i < emc->num_dvfs; i++) {
+		seq_printf(s, "%s%lu", prefix, emc->dvfs[i].rate);
+		prefix = " ";
+	}
+
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int tegra186_emc_debug_available_rates_open(struct inode *inode,
+						   struct file *file)
+{
+	return single_open(file, tegra186_emc_debug_available_rates_show,
+			   inode->i_private);
+}
+
+static const struct file_operations tegra186_emc_debug_available_rates_fops = {
+	.open = tegra186_emc_debug_available_rates_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int tegra186_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+	struct tegra186_emc *emc = data;
+
+	*rate = emc->debugfs.min_rate;
+
+	return 0;
+}
+
+static int tegra186_emc_debug_min_rate_set(void *data, u64 rate)
+{
+	struct tegra186_emc *emc = data;
+	int err;
+
+	if (!tegra186_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_min_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.min_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra186_emc_debug_min_rate_fops,
+			tegra186_emc_debug_min_rate_get,
+			tegra186_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra186_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+	struct tegra186_emc *emc = data;
+
+	*rate = emc->debugfs.max_rate;
+
+	return 0;
+}
+
+static int tegra186_emc_debug_max_rate_set(void *data, u64 rate)
+{
+	struct tegra186_emc *emc = data;
+	int err;
+
+	if (!tegra186_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_max_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.max_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra186_emc_debug_max_rate_fops,
+			tegra186_emc_debug_max_rate_get,
+			tegra186_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra186_emc_probe(struct platform_device *pdev)
+{
+	struct mrq_emc_dvfs_latency_response response;
+	struct tegra_bpmp_message msg;
+	struct tegra186_emc *emc;
+	unsigned int i;
+	int err;
+
+	emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+	if (!emc)
+		return -ENOMEM;
+
+	emc->bpmp = tegra_bpmp_get(&pdev->dev);
+	if (IS_ERR(emc->bpmp)) {
+		err = PTR_ERR(emc->bpmp);
+
+		if (err != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "failed to get BPMP: %d\n", err);
+
+		return err;
+	}
+
+	emc->clk = devm_clk_get(&pdev->dev, "emc");
+	if (IS_ERR(emc->clk)) {
+		err = PTR_ERR(emc->clk);
+		dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, emc);
+	emc->dev = &pdev->dev;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_EMC_DVFS_LATENCY;
+	msg.tx.data = NULL;
+	msg.tx.size = 0;
+	msg.rx.data = &response;
+	msg.rx.size = sizeof(response);
+
+	err = tegra_bpmp_transfer(emc->bpmp, &msg);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
+		return err;
+	}
+
+	emc->debugfs.min_rate = ULONG_MAX;
+	emc->debugfs.max_rate = 0;
+
+	emc->num_dvfs = response.num_pairs;
+
+	emc->dvfs = devm_kmalloc_array(&pdev->dev, emc->num_dvfs,
+				       sizeof(*emc->dvfs), GFP_KERNEL);
+	if (!emc->dvfs)
+		return -ENOMEM;
+
+	dev_dbg(&pdev->dev, "%u DVFS pairs:\n", emc->num_dvfs);
+
+	for (i = 0; i < emc->num_dvfs; i++) {
+		emc->dvfs[i].rate = response.pairs[i].freq * 1000;
+		emc->dvfs[i].latency = response.pairs[i].latency;
+
+		if (emc->dvfs[i].rate < emc->debugfs.min_rate)
+			emc->debugfs.min_rate = emc->dvfs[i].rate;
+
+		if (emc->dvfs[i].rate > emc->debugfs.max_rate)
+			emc->debugfs.max_rate = emc->dvfs[i].rate;
+
+		dev_dbg(&pdev->dev, "  %2u: %lu Hz -> %lu us\n", i,
+			emc->dvfs[i].rate, emc->dvfs[i].latency);
+	}
+
+	err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+				 emc->debugfs.max_rate);
+	if (err < 0) {
+		dev_err(&pdev->dev,
+			"failed to set rate range [%lu-%lu] for %pC\n",
+			emc->debugfs.min_rate, emc->debugfs.max_rate,
+			emc->clk);
+		return err;
+	}
+
+	emc->debugfs.root = debugfs_create_dir("emc", NULL);
+	if (!emc->debugfs.root) {
+		dev_err(&pdev->dev, "failed to create debugfs directory\n");
+		return 0;
+	}
+
+	debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+			    emc, &tegra186_emc_debug_available_rates_fops);
+	debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+			    emc, &tegra186_emc_debug_min_rate_fops);
+	debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+			    emc, &tegra186_emc_debug_max_rate_fops);
+
+	return 0;
+}
+
+static int tegra186_emc_remove(struct platform_device *pdev)
+{
+	struct tegra186_emc *emc = platform_get_drvdata(pdev);
+
+	debugfs_remove_recursive(emc->debugfs.root);
+	tegra_bpmp_put(emc->bpmp);
+
+	return 0;
+}
+
+static const struct of_device_id tegra186_emc_of_match[] = {
+#if defined(CONFIG_ARCH_TEGRA186_SOC)
+	{ .compatible = "nvidia,tegra186-emc" },
+#endif
+#if defined(CONFIG_ARCH_TEGRA194_SOC)
+	{ .compatible = "nvidia,tegra194-emc" },
+#endif
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra186_emc_of_match);
+
+static struct platform_driver tegra186_emc_driver = {
+	.driver = {
+		.name = "tegra186-emc",
+		.of_match_table = tegra186_emc_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = tegra186_emc_probe,
+	.remove = tegra186_emc_remove,
+};
+module_platform_driver(tegra186_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra186 External Memory Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 441213a..5d53f11 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -6,16 +6,18 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
 #include <dt-bindings/memory/tegra186-mc.h>
+#endif
 
-struct tegra_mc {
-	struct device *dev;
-	void __iomem *regs;
-};
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+#include <dt-bindings/memory/tegra194-mc.h>
+#endif
 
-struct tegra_mc_client {
+struct tegra186_mc_client {
 	const char *name;
 	unsigned int sid;
 	struct {
@@ -24,7 +26,46 @@ struct tegra_mc_client {
 	} regs;
 };
 
-static const struct tegra_mc_client tegra186_mc_clients[] = {
+struct tegra186_mc_soc {
+	const struct tegra186_mc_client *clients;
+	unsigned int num_clients;
+};
+
+struct tegra186_mc {
+	struct device *dev;
+	void __iomem *regs;
+
+	const struct tegra186_mc_soc *soc;
+};
+
+static void tegra186_mc_program_sid(struct tegra186_mc *mc)
+{
+	unsigned int i;
+
+	for (i = 0; i < mc->soc->num_clients; i++) {
+		const struct tegra186_mc_client *client = &mc->soc->clients[i];
+		u32 override, security;
+
+		override = readl(mc->regs + client->regs.override);
+		security = readl(mc->regs + client->regs.security);
+
+		dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
+			client->name, override, security);
+
+		dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
+			client->name);
+		writel(client->sid, mc->regs + client->regs.override);
+
+		override = readl(mc->regs + client->regs.override);
+		security = readl(mc->regs + client->regs.security);
+
+		dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
+			client->name, override, security);
+	}
+}
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+static const struct tegra186_mc_client tegra186_mc_clients[] = {
 	{
 		.name = "ptcr",
 		.sid = TEGRA186_SID_PASSTHROUGH,
@@ -532,17 +573,966 @@ static const struct tegra_mc_client tegra186_mc_clients[] = {
 	},
 };
 
+static const struct tegra186_mc_soc tegra186_mc_soc = {
+	.num_clients = ARRAY_SIZE(tegra186_mc_clients),
+	.clients = tegra186_mc_clients,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+static const struct tegra186_mc_client tegra194_mc_clients[] = {
+	{
+		.name = "ptcr",
+		.sid = TEGRA194_SID_PASSTHROUGH,
+		.regs = {
+			.override = 0x000,
+			.security = 0x004,
+		},
+	}, {
+		.name = "miu7r",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x008,
+			.security = 0x00c,
+		},
+	}, {
+		.name = "miu7w",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x010,
+			.security = 0x014,
+		},
+	}, {
+		.name = "hdar",
+		.sid = TEGRA194_SID_HDA,
+		.regs = {
+			.override = 0x0a8,
+			.security = 0x0ac,
+		},
+	}, {
+		.name = "host1xdmar",
+		.sid = TEGRA194_SID_HOST1X,
+		.regs = {
+			.override = 0x0b0,
+			.security = 0x0b4,
+		},
+	}, {
+		.name = "nvencsrd",
+		.sid = TEGRA194_SID_NVENC,
+		.regs = {
+			.override = 0x0e0,
+			.security = 0x0e4,
+		},
+	}, {
+		.name = "satar",
+		.sid = TEGRA194_SID_SATA,
+		.regs = {
+			.override = 0x0f8,
+			.security = 0x0fc,
+		},
+	}, {
+		.name = "mpcorer",
+		.sid = TEGRA194_SID_PASSTHROUGH,
+		.regs = {
+			.override = 0x138,
+			.security = 0x13c,
+		},
+	}, {
+		.name = "nvencswr",
+		.sid = TEGRA194_SID_NVENC,
+		.regs = {
+			.override = 0x158,
+			.security = 0x15c,
+		},
+	}, {
+		.name = "hdaw",
+		.sid = TEGRA194_SID_HDA,
+		.regs = {
+			.override = 0x1a8,
+			.security = 0x1ac,
+		},
+	}, {
+		.name = "mpcorew",
+		.sid = TEGRA194_SID_PASSTHROUGH,
+		.regs = {
+			.override = 0x1c8,
+			.security = 0x1cc,
+		},
+	}, {
+		.name = "sataw",
+		.sid = TEGRA194_SID_SATA,
+		.regs = {
+			.override = 0x1e8,
+			.security = 0x1ec,
+		},
+	}, {
+		.name = "ispra",
+		.sid = TEGRA194_SID_ISP,
+		.regs = {
+			.override = 0x220,
+			.security = 0x224,
+		},
+	}, {
+		.name = "ispfalr",
+		.sid = TEGRA194_SID_ISP_FALCON,
+		.regs = {
+			.override = 0x228,
+			.security = 0x22c,
+		},
+	}, {
+		.name = "ispwa",
+		.sid = TEGRA194_SID_ISP,
+		.regs = {
+			.override = 0x230,
+			.security = 0x234,
+		},
+	}, {
+		.name = "ispwb",
+		.sid = TEGRA194_SID_ISP,
+		.regs = {
+			.override = 0x238,
+			.security = 0x23c,
+		},
+	}, {
+		.name = "xusb_hostr",
+		.sid = TEGRA194_SID_XUSB_HOST,
+		.regs = {
+			.override = 0x250,
+			.security = 0x254,
+		},
+	}, {
+		.name = "xusb_hostw",
+		.sid = TEGRA194_SID_XUSB_HOST,
+		.regs = {
+			.override = 0x258,
+			.security = 0x25c,
+		},
+	}, {
+		.name = "xusb_devr",
+		.sid = TEGRA194_SID_XUSB_DEV,
+		.regs = {
+			.override = 0x260,
+			.security = 0x264,
+		},
+	}, {
+		.name = "xusb_devw",
+		.sid = TEGRA194_SID_XUSB_DEV,
+		.regs = {
+			.override = 0x268,
+			.security = 0x26c,
+		},
+	}, {
+		.name = "sdmmcra",
+		.sid = TEGRA194_SID_SDMMC1,
+		.regs = {
+			.override = 0x300,
+			.security = 0x304,
+		},
+	}, {
+		.name = "sdmmcr",
+		.sid = TEGRA194_SID_SDMMC3,
+		.regs = {
+			.override = 0x310,
+			.security = 0x314,
+		},
+	}, {
+		.name = "sdmmcrab",
+		.sid = TEGRA194_SID_SDMMC4,
+		.regs = {
+			.override = 0x318,
+			.security = 0x31c,
+		},
+	}, {
+		.name = "sdmmcwa",
+		.sid = TEGRA194_SID_SDMMC1,
+		.regs = {
+			.override = 0x320,
+			.security = 0x324,
+		},
+	}, {
+		.name = "sdmmcw",
+		.sid = TEGRA194_SID_SDMMC3,
+		.regs = {
+			.override = 0x330,
+			.security = 0x334,
+		},
+	}, {
+		.name = "sdmmcwab",
+		.sid = TEGRA194_SID_SDMMC4,
+		.regs = {
+			.override = 0x338,
+			.security = 0x33c,
+		},
+	}, {
+		.name = "vicsrd",
+		.sid = TEGRA194_SID_VIC,
+		.regs = {
+			.override = 0x360,
+			.security = 0x364,
+		},
+	}, {
+		.name = "vicswr",
+		.sid = TEGRA194_SID_VIC,
+		.regs = {
+			.override = 0x368,
+			.security = 0x36c,
+		},
+	}, {
+		.name = "viw",
+		.sid = TEGRA194_SID_VI,
+		.regs = {
+			.override = 0x390,
+			.security = 0x394,
+		},
+	}, {
+		.name = "nvdecsrd",
+		.sid = TEGRA194_SID_NVDEC,
+		.regs = {
+			.override = 0x3c0,
+			.security = 0x3c4,
+		},
+	}, {
+		.name = "nvdecswr",
+		.sid = TEGRA194_SID_NVDEC,
+		.regs = {
+			.override = 0x3c8,
+			.security = 0x3cc,
+		},
+	}, {
+		.name = "aper",
+		.sid = TEGRA194_SID_APE,
+		.regs = {
+			.override = 0x3c0,
+			.security = 0x3c4,
+		},
+	}, {
+		.name = "apew",
+		.sid = TEGRA194_SID_APE,
+		.regs = {
+			.override = 0x3d0,
+			.security = 0x3d4,
+		},
+	}, {
+		.name = "nvjpgsrd",
+		.sid = TEGRA194_SID_NVJPG,
+		.regs = {
+			.override = 0x3f0,
+			.security = 0x3f4,
+		},
+	}, {
+		.name = "nvjpgswr",
+		.sid = TEGRA194_SID_NVJPG,
+		.regs = {
+			.override = 0x3f0,
+			.security = 0x3f4,
+		},
+	}, {
+		.name = "axiapr",
+		.sid = TEGRA194_SID_PASSTHROUGH,
+		.regs = {
+			.override = 0x410,
+			.security = 0x414,
+		},
+	}, {
+		.name = "axiapw",
+		.sid = TEGRA194_SID_PASSTHROUGH,
+		.regs = {
+			.override = 0x418,
+			.security = 0x41c,
+		},
+	}, {
+		.name = "etrr",
+		.sid = TEGRA194_SID_ETR,
+		.regs = {
+			.override = 0x420,
+			.security = 0x424,
+		},
+	}, {
+		.name = "etrw",
+		.sid = TEGRA194_SID_ETR,
+		.regs = {
+			.override = 0x428,
+			.security = 0x42c,
+		},
+	}, {
+		.name = "axisr",
+		.sid = TEGRA194_SID_PASSTHROUGH,
+		.regs = {
+			.override = 0x460,
+			.security = 0x464,
+		},
+	}, {
+		.name = "axisw",
+		.sid = TEGRA194_SID_PASSTHROUGH,
+		.regs = {
+			.override = 0x468,
+			.security = 0x46c,
+		},
+	}, {
+		.name = "eqosr",
+		.sid = TEGRA194_SID_EQOS,
+		.regs = {
+			.override = 0x470,
+			.security = 0x474,
+		},
+	}, {
+		.name = "eqosw",
+		.sid = TEGRA194_SID_EQOS,
+		.regs = {
+			.override = 0x478,
+			.security = 0x47c,
+		},
+	}, {
+		.name = "ufshcr",
+		.sid = TEGRA194_SID_UFSHC,
+		.regs = {
+			.override = 0x480,
+			.security = 0x484,
+		},
+	}, {
+		.name = "ufshcw",
+		.sid = TEGRA194_SID_UFSHC,
+		.regs = {
+			.override = 0x488,
+			.security = 0x48c,
+		},
+	}, {
+		.name = "nvdisplayr",
+		.sid = TEGRA194_SID_NVDISPLAY,
+		.regs = {
+			.override = 0x490,
+			.security = 0x494,
+		},
+	}, {
+		.name = "bpmpr",
+		.sid = TEGRA194_SID_BPMP,
+		.regs = {
+			.override = 0x498,
+			.security = 0x49c,
+		},
+	}, {
+		.name = "bpmpw",
+		.sid = TEGRA194_SID_BPMP,
+		.regs = {
+			.override = 0x4a0,
+			.security = 0x4a4,
+		},
+	}, {
+		.name = "bpmpdmar",
+		.sid = TEGRA194_SID_BPMP,
+		.regs = {
+			.override = 0x4a8,
+			.security = 0x4ac,
+		},
+	}, {
+		.name = "bpmpdmaw",
+		.sid = TEGRA194_SID_BPMP,
+		.regs = {
+			.override = 0x4b0,
+			.security = 0x4b4,
+		},
+	}, {
+		.name = "aonr",
+		.sid = TEGRA194_SID_AON,
+		.regs = {
+			.override = 0x4b8,
+			.security = 0x4bc,
+		},
+	}, {
+		.name = "aonw",
+		.sid = TEGRA194_SID_AON,
+		.regs = {
+			.override = 0x4c0,
+			.security = 0x4c4,
+		},
+	}, {
+		.name = "aondmar",
+		.sid = TEGRA194_SID_AON,
+		.regs = {
+			.override = 0x4c8,
+			.security = 0x4cc,
+		},
+	}, {
+		.name = "aondmaw",
+		.sid = TEGRA194_SID_AON,
+		.regs = {
+			.override = 0x4d0,
+			.security = 0x4d4,
+		},
+	}, {
+		.name = "scer",
+		.sid = TEGRA194_SID_SCE,
+		.regs = {
+			.override = 0x4d8,
+			.security = 0x4dc,
+		},
+	}, {
+		.name = "scew",
+		.sid = TEGRA194_SID_SCE,
+		.regs = {
+			.override = 0x4e0,
+			.security = 0x4e4,
+		},
+	}, {
+		.name = "scedmar",
+		.sid = TEGRA194_SID_SCE,
+		.regs = {
+			.override = 0x4e8,
+			.security = 0x4ec,
+		},
+	}, {
+		.name = "scedmaw",
+		.sid = TEGRA194_SID_SCE,
+		.regs = {
+			.override = 0x4f0,
+			.security = 0x4f4,
+		},
+	}, {
+		.name = "apedmar",
+		.sid = TEGRA194_SID_APE,
+		.regs = {
+			.override = 0x4f8,
+			.security = 0x4fc,
+		},
+	}, {
+		.name = "apedmaw",
+		.sid = TEGRA194_SID_APE,
+		.regs = {
+			.override = 0x500,
+			.security = 0x504,
+		},
+	}, {
+		.name = "nvdisplayr1",
+		.sid = TEGRA194_SID_NVDISPLAY,
+		.regs = {
+			.override = 0x508,
+			.security = 0x50c,
+		},
+	}, {
+		.name = "vicsrd1",
+		.sid = TEGRA194_SID_VIC,
+		.regs = {
+			.override = 0x510,
+			.security = 0x514,
+		},
+	}, {
+		.name = "nvdecsrd1",
+		.sid = TEGRA194_SID_NVDEC,
+		.regs = {
+			.override = 0x518,
+			.security = 0x51c,
+		},
+	}, {
+		.name = "miu0r",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x530,
+			.security = 0x534,
+		},
+	}, {
+		.name = "miu0w",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x538,
+			.security = 0x53c,
+		},
+	}, {
+		.name = "miu1r",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x540,
+			.security = 0x544,
+		},
+	}, {
+		.name = "miu1w",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x548,
+			.security = 0x54c,
+		},
+	}, {
+		.name = "miu2r",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x570,
+			.security = 0x574,
+		},
+	}, {
+		.name = "miu2w",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x578,
+			.security = 0x57c,
+		},
+	}, {
+		.name = "miu3r",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x580,
+			.security = 0x584,
+		},
+	}, {
+		.name = "miu3w",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x588,
+			.security = 0x58c,
+		},
+	}, {
+		.name = "miu4r",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x590,
+			.security = 0x594,
+		},
+	}, {
+		.name = "miu4w",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x598,
+			.security = 0x59c,
+		},
+	}, {
+		.name = "dpmur",
+		.sid = TEGRA194_SID_PASSTHROUGH,
+		.regs = {
+			.override = 0x598,
+			.security = 0x59c,
+		},
+	}, {
+		.name = "vifalr",
+		.sid = TEGRA194_SID_VI_FALCON,
+		.regs = {
+			.override = 0x5e0,
+			.security = 0x5e4,
+		},
+	}, {
+		.name = "vifalw",
+		.sid = TEGRA194_SID_VI_FALCON,
+		.regs = {
+			.override = 0x5e8,
+			.security = 0x5ec,
+		},
+	}, {
+		.name = "dla0rda",
+		.sid = TEGRA194_SID_NVDLA0,
+		.regs = {
+			.override = 0x5f0,
+			.security = 0x5f4,
+		},
+	}, {
+		.name = "dla0falrdb",
+		.sid = TEGRA194_SID_NVDLA0,
+		.regs = {
+			.override = 0x5f8,
+			.security = 0x5fc,
+		},
+	}, {
+		.name = "dla0wra",
+		.sid = TEGRA194_SID_NVDLA0,
+		.regs = {
+			.override = 0x600,
+			.security = 0x604,
+		},
+	}, {
+		.name = "dla0falwrb",
+		.sid = TEGRA194_SID_NVDLA0,
+		.regs = {
+			.override = 0x608,
+			.security = 0x60c,
+		},
+	}, {
+		.name = "dla1rda",
+		.sid = TEGRA194_SID_NVDLA1,
+		.regs = {
+			.override = 0x610,
+			.security = 0x614,
+		},
+	}, {
+		.name = "dla1falrdb",
+		.sid = TEGRA194_SID_NVDLA1,
+		.regs = {
+			.override = 0x618,
+			.security = 0x61c,
+		},
+	}, {
+		.name = "dla1wra",
+		.sid = TEGRA194_SID_NVDLA1,
+		.regs = {
+			.override = 0x620,
+			.security = 0x624,
+		},
+	}, {
+		.name = "dla1falwrb",
+		.sid = TEGRA194_SID_NVDLA1,
+		.regs = {
+			.override = 0x628,
+			.security = 0x62c,
+		},
+	}, {
+		.name = "pva0rda",
+		.sid = TEGRA194_SID_PVA0,
+		.regs = {
+			.override = 0x630,
+			.security = 0x634,
+		},
+	}, {
+		.name = "pva0rdb",
+		.sid = TEGRA194_SID_PVA0,
+		.regs = {
+			.override = 0x638,
+			.security = 0x63c,
+		},
+	}, {
+		.name = "pva0rdc",
+		.sid = TEGRA194_SID_PVA0,
+		.regs = {
+			.override = 0x640,
+			.security = 0x644,
+		},
+	}, {
+		.name = "pva0wra",
+		.sid = TEGRA194_SID_PVA0,
+		.regs = {
+			.override = 0x648,
+			.security = 0x64c,
+		},
+	}, {
+		.name = "pva0wrb",
+		.sid = TEGRA194_SID_PVA0,
+		.regs = {
+			.override = 0x650,
+			.security = 0x654,
+		},
+	}, {
+		.name = "pva0wrc",
+		.sid = TEGRA194_SID_PVA0,
+		.regs = {
+			.override = 0x658,
+			.security = 0x65c,
+		},
+	}, {
+		.name = "pva1rda",
+		.sid = TEGRA194_SID_PVA1,
+		.regs = {
+			.override = 0x660,
+			.security = 0x664,
+		},
+	}, {
+		.name = "pva1rdb",
+		.sid = TEGRA194_SID_PVA1,
+		.regs = {
+			.override = 0x668,
+			.security = 0x66c,
+		},
+	}, {
+		.name = "pva1rdc",
+		.sid = TEGRA194_SID_PVA1,
+		.regs = {
+			.override = 0x670,
+			.security = 0x674,
+		},
+	}, {
+		.name = "pva1wra",
+		.sid = TEGRA194_SID_PVA1,
+		.regs = {
+			.override = 0x678,
+			.security = 0x67c,
+		},
+	}, {
+		.name = "pva1wrb",
+		.sid = TEGRA194_SID_PVA1,
+		.regs = {
+			.override = 0x680,
+			.security = 0x684,
+		},
+	}, {
+		.name = "pva1wrc",
+		.sid = TEGRA194_SID_PVA1,
+		.regs = {
+			.override = 0x688,
+			.security = 0x68c,
+		},
+	}, {
+		.name = "rcer",
+		.sid = TEGRA194_SID_RCE,
+		.regs = {
+			.override = 0x690,
+			.security = 0x694,
+		},
+	}, {
+		.name = "rcew",
+		.sid = TEGRA194_SID_RCE,
+		.regs = {
+			.override = 0x698,
+			.security = 0x69c,
+		},
+	}, {
+		.name = "rcedmar",
+		.sid = TEGRA194_SID_RCE,
+		.regs = {
+			.override = 0x6a0,
+			.security = 0x6a4,
+		},
+	}, {
+		.name = "rcedmaw",
+		.sid = TEGRA194_SID_RCE,
+		.regs = {
+			.override = 0x6a8,
+			.security = 0x6ac,
+		},
+	}, {
+		.name = "nvenc1srd",
+		.sid = TEGRA194_SID_NVENC1,
+		.regs = {
+			.override = 0x6b0,
+			.security = 0x6b4,
+		},
+	}, {
+		.name = "nvenc1swr",
+		.sid = TEGRA194_SID_NVENC1,
+		.regs = {
+			.override = 0x6b8,
+			.security = 0x6bc,
+		},
+	}, {
+		.name = "pcie0r",
+		.sid = TEGRA194_SID_PCIE0,
+		.regs = {
+			.override = 0x6c0,
+			.security = 0x6c4,
+		},
+	}, {
+		.name = "pcie0w",
+		.sid = TEGRA194_SID_PCIE0,
+		.regs = {
+			.override = 0x6c8,
+			.security = 0x6cc,
+		},
+	}, {
+		.name = "pcie1r",
+		.sid = TEGRA194_SID_PCIE1,
+		.regs = {
+			.override = 0x6d0,
+			.security = 0x6d4,
+		},
+	}, {
+		.name = "pcie1w",
+		.sid = TEGRA194_SID_PCIE1,
+		.regs = {
+			.override = 0x6d8,
+			.security = 0x6dc,
+		},
+	}, {
+		.name = "pcie2ar",
+		.sid = TEGRA194_SID_PCIE2,
+		.regs = {
+			.override = 0x6e0,
+			.security = 0x6e4,
+		},
+	}, {
+		.name = "pcie2aw",
+		.sid = TEGRA194_SID_PCIE2,
+		.regs = {
+			.override = 0x6e8,
+			.security = 0x6ec,
+		},
+	}, {
+		.name = "pcie3r",
+		.sid = TEGRA194_SID_PCIE3,
+		.regs = {
+			.override = 0x6f0,
+			.security = 0x6f4,
+		},
+	}, {
+		.name = "pcie3w",
+		.sid = TEGRA194_SID_PCIE3,
+		.regs = {
+			.override = 0x6f8,
+			.security = 0x6fc,
+		},
+	}, {
+		.name = "pcie4r",
+		.sid = TEGRA194_SID_PCIE4,
+		.regs = {
+			.override = 0x700,
+			.security = 0x704,
+		},
+	}, {
+		.name = "pcie4w",
+		.sid = TEGRA194_SID_PCIE4,
+		.regs = {
+			.override = 0x708,
+			.security = 0x70c,
+		},
+	}, {
+		.name = "pcie5r",
+		.sid = TEGRA194_SID_PCIE5,
+		.regs = {
+			.override = 0x710,
+			.security = 0x714,
+		},
+	}, {
+		.name = "pcie5w",
+		.sid = TEGRA194_SID_PCIE5,
+		.regs = {
+			.override = 0x718,
+			.security = 0x71c,
+		},
+	}, {
+		.name = "ispfalw",
+		.sid = TEGRA194_SID_ISP_FALCON,
+		.regs = {
+			.override = 0x720,
+			.security = 0x724,
+		},
+	}, {
+		.name = "dla0rda1",
+		.sid = TEGRA194_SID_NVDLA0,
+		.regs = {
+			.override = 0x748,
+			.security = 0x74c,
+		},
+	}, {
+		.name = "dla1rda1",
+		.sid = TEGRA194_SID_NVDLA1,
+		.regs = {
+			.override = 0x750,
+			.security = 0x754,
+		},
+	}, {
+		.name = "pva0rda1",
+		.sid = TEGRA194_SID_PVA0,
+		.regs = {
+			.override = 0x758,
+			.security = 0x75c,
+		},
+	}, {
+		.name = "pva0rdb1",
+		.sid = TEGRA194_SID_PVA0,
+		.regs = {
+			.override = 0x760,
+			.security = 0x764,
+		},
+	}, {
+		.name = "pva1rda1",
+		.sid = TEGRA194_SID_PVA1,
+		.regs = {
+			.override = 0x768,
+			.security = 0x76c,
+		},
+	}, {
+		.name = "pva1rdb1",
+		.sid = TEGRA194_SID_PVA1,
+		.regs = {
+			.override = 0x770,
+			.security = 0x774,
+		},
+	}, {
+		.name = "pcie5r1",
+		.sid = TEGRA194_SID_PCIE5,
+		.regs = {
+			.override = 0x778,
+			.security = 0x77c,
+		},
+	}, {
+		.name = "nvencsrd1",
+		.sid = TEGRA194_SID_NVENC,
+		.regs = {
+			.override = 0x780,
+			.security = 0x784,
+		},
+	}, {
+		.name = "nvenc1srd1",
+		.sid = TEGRA194_SID_NVENC1,
+		.regs = {
+			.override = 0x788,
+			.security = 0x78c,
+		},
+	}, {
+		.name = "ispra1",
+		.sid = TEGRA194_SID_ISP,
+		.regs = {
+			.override = 0x790,
+			.security = 0x794,
+		},
+	}, {
+		.name = "pcie0r1",
+		.sid = TEGRA194_SID_PCIE0,
+		.regs = {
+			.override = 0x798,
+			.security = 0x79c,
+		},
+	}, {
+		.name = "nvdec1srd",
+		.sid = TEGRA194_SID_NVDEC1,
+		.regs = {
+			.override = 0x7c8,
+			.security = 0x7cc,
+		},
+	}, {
+		.name = "nvdec1srd1",
+		.sid = TEGRA194_SID_NVDEC1,
+		.regs = {
+			.override = 0x7d0,
+			.security = 0x7d4,
+		},
+	}, {
+		.name = "nvdec1swr",
+		.sid = TEGRA194_SID_NVDEC1,
+		.regs = {
+			.override = 0x7d8,
+			.security = 0x7dc,
+		},
+	}, {
+		.name = "miu5r",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x7e0,
+			.security = 0x7e4,
+		},
+	}, {
+		.name = "miu5w",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x7e8,
+			.security = 0x7ec,
+		},
+	}, {
+		.name = "miu6r",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x7f0,
+			.security = 0x7f4,
+		},
+	}, {
+		.name = "miu6w",
+		.sid = TEGRA194_SID_MIU,
+		.regs = {
+			.override = 0x7f8,
+			.security = 0x7fc,
+		},
+	},
+};
+
+static const struct tegra186_mc_soc tegra194_mc_soc = {
+	.num_clients = ARRAY_SIZE(tegra194_mc_clients),
+	.clients = tegra194_mc_clients,
+};
+#endif
+
 static int tegra186_mc_probe(struct platform_device *pdev)
 {
+	struct tegra186_mc *mc;
 	struct resource *res;
-	struct tegra_mc *mc;
-	unsigned int i;
-	int err = 0;
+	int err;
 
 	mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
 	if (!mc)
 		return -ENOMEM;
 
+	mc->soc = of_device_get_match_data(&pdev->dev);
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mc->regs = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(mc->regs))
@@ -550,46 +1540,63 @@ static int tegra186_mc_probe(struct platform_device *pdev)
 
 	mc->dev = &pdev->dev;
 
-	for (i = 0; i < ARRAY_SIZE(tegra186_mc_clients); i++) {
-		const struct tegra_mc_client *client = &tegra186_mc_clients[i];
-		u32 override, security;
-
-		override = readl(mc->regs + client->regs.override);
-		security = readl(mc->regs + client->regs.security);
-
-		dev_dbg(&pdev->dev, "client %s: override: %x security: %x\n",
-			client->name, override, security);
-
-		dev_dbg(&pdev->dev, "setting SID %u for %s\n", client->sid,
-			client->name);
-		writel(client->sid, mc->regs + client->regs.override);
-
-		override = readl(mc->regs + client->regs.override);
-		security = readl(mc->regs + client->regs.security);
-
-		dev_dbg(&pdev->dev, "client %s: override: %x security: %x\n",
-			client->name, override, security);
-	}
+	err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (err < 0)
+		return err;
 
 	platform_set_drvdata(pdev, mc);
+	tegra186_mc_program_sid(mc);
 
-	return err;
+	return 0;
+}
+
+static int tegra186_mc_remove(struct platform_device *pdev)
+{
+	struct tegra186_mc *mc = platform_get_drvdata(pdev);
+
+	of_platform_depopulate(mc->dev);
+
+	return 0;
 }
 
 static const struct of_device_id tegra186_mc_of_match[] = {
-	{ .compatible = "nvidia,tegra186-mc", },
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+	{ .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+	{ .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
+#endif
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, tegra186_mc_of_match);
 
+static int tegra186_mc_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int tegra186_mc_resume(struct device *dev)
+{
+	struct tegra186_mc *mc = dev_get_drvdata(dev);
+
+	tegra186_mc_program_sid(mc);
+
+	return 0;
+}
+
+static const struct dev_pm_ops tegra186_mc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(tegra186_mc_suspend, tegra186_mc_resume)
+};
+
 static struct platform_driver tegra186_mc_driver = {
 	.driver = {
 		.name = "tegra186-mc",
 		.of_match_table = tegra186_mc_of_match,
+		.pm = &tegra186_mc_pm_ops,
 		.suppress_bind_attrs = true,
 	},
-	.prevent_deferred_probe = true,
 	.probe = tegra186_mc_probe,
+	.remove = tegra186_mc_remove,
 };
 module_platform_driver(tegra186_mc_driver);
 
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 1b23b1c..8ae474d 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -8,6 +8,7 @@
 #include <linux/clk.h>
 #include <linux/clk/tegra.h>
 #include <linux/completion.h>
+#include <linux/debugfs.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -150,6 +151,12 @@ struct tegra_emc {
 
 	struct emc_timing *timings;
 	unsigned int num_timings;
+
+	struct {
+		struct dentry *root;
+		unsigned long min_rate;
+		unsigned long max_rate;
+	} debugfs;
 };
 
 static irqreturn_t tegra_emc_isr(int irq, void *data)
@@ -478,6 +485,171 @@ static long emc_round_rate(unsigned long rate,
 	return timing->rate;
 }
 
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ *   /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ *   - available_rates: This file contains a list of valid, space-separated
+ *     EMC frequencies.
+ *
+ *   - min_rate: Writing a value to this file sets the given frequency as the
+ *       floor of the permitted range. If this is higher than the currently
+ *       configured EMC frequency, this will cause the frequency to be
+ *       increased so that it stays within the valid range.
+ *
+ *   - max_rate: Similarily to the min_rate file, writing a value to this file
+ *       sets the given frequency as the ceiling of the permitted range. If
+ *       the value is lower than the currently configured EMC frequency, this
+ *       will cause the frequency to be decreased so that it stays within the
+ *       valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+	unsigned int i;
+
+	for (i = 0; i < emc->num_timings; i++)
+		if (rate == emc->timings[i].rate)
+			return true;
+
+	return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+{
+	struct tegra_emc *emc = s->private;
+	const char *prefix = "";
+	unsigned int i;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+		prefix = " ";
+	}
+
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int tegra_emc_debug_available_rates_open(struct inode *inode,
+						struct file *file)
+{
+	return single_open(file, tegra_emc_debug_available_rates_show,
+			   inode->i_private);
+}
+
+static const struct file_operations tegra_emc_debug_available_rates_fops = {
+	.open = tegra_emc_debug_available_rates_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+	struct tegra_emc *emc = data;
+
+	*rate = emc->debugfs.min_rate;
+
+	return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+	struct tegra_emc *emc = data;
+	int err;
+
+	if (!tegra_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_min_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.min_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+			tegra_emc_debug_min_rate_get,
+			tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+	struct tegra_emc *emc = data;
+
+	*rate = emc->debugfs.max_rate;
+
+	return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+	struct tegra_emc *emc = data;
+	int err;
+
+	if (!tegra_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_max_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.max_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+			tegra_emc_debug_max_rate_get,
+			tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+{
+	struct device *dev = emc->dev;
+	unsigned int i;
+	int err;
+
+	emc->debugfs.min_rate = ULONG_MAX;
+	emc->debugfs.max_rate = 0;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		if (emc->timings[i].rate < emc->debugfs.min_rate)
+			emc->debugfs.min_rate = emc->timings[i].rate;
+
+		if (emc->timings[i].rate > emc->debugfs.max_rate)
+			emc->debugfs.max_rate = emc->timings[i].rate;
+	}
+
+	err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+				 emc->debugfs.max_rate);
+	if (err < 0) {
+		dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+			emc->debugfs.min_rate, emc->debugfs.max_rate,
+			emc->clk);
+	}
+
+	emc->debugfs.root = debugfs_create_dir("emc", NULL);
+	if (!emc->debugfs.root) {
+		dev_err(emc->dev, "failed to create debugfs directory\n");
+		return;
+	}
+
+	debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+			    emc, &tegra_emc_debug_available_rates_fops);
+	debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+			    emc, &tegra_emc_debug_min_rate_fops);
+	debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+			    emc, &tegra_emc_debug_max_rate_fops);
+}
+
 static int tegra_emc_probe(struct platform_device *pdev)
 {
 	struct device_node *np;
@@ -550,6 +722,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
 		goto unset_cb;
 	}
 
+	platform_set_drvdata(pdev, emc);
+	tegra_emc_debugfs_init(emc);
+
 	return 0;
 
 unset_cb:
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index b420268..cc04824 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -436,7 +436,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
 			.reg = 0x37c,
 			.shift = 0,
 			.mask = 0xff,
-			.def = 0x39,
+			.def = 0x7a,
 		},
 	}, {
 		.id = 0x4b,
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 0b6a5e4..e3efd95 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -12,6 +12,7 @@
 #include <linux/clk.h>
 #include <linux/clk/tegra.h>
 #include <linux/completion.h>
+#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
@@ -331,7 +332,9 @@ struct tegra_emc {
 	struct clk *clk;
 	void __iomem *regs;
 	unsigned int irq;
+	bool bad_state;
 
+	struct emc_timing *new_timing;
 	struct emc_timing *timings;
 	unsigned int num_timings;
 
@@ -345,10 +348,74 @@ struct tegra_emc {
 	bool vref_cal_toggle : 1;
 	bool zcal_long : 1;
 	bool dll_on : 1;
-	bool prepared : 1;
-	bool bad_state : 1;
+
+	struct {
+		struct dentry *root;
+		unsigned long min_rate;
+		unsigned long max_rate;
+	} debugfs;
 };
 
+static int emc_seq_update_timing(struct tegra_emc *emc)
+{
+	u32 val;
+	int err;
+
+	writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL);
+
+	err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val,
+				!(val & EMC_STATUS_TIMING_UPDATE_STALLED),
+				1, 200);
+	if (err) {
+		dev_err(emc->dev, "failed to update timing: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static void emc_complete_clk_change(struct tegra_emc *emc)
+{
+	struct emc_timing *timing = emc->new_timing;
+	unsigned int dram_num;
+	bool failed = false;
+	int err;
+
+	/* re-enable auto-refresh */
+	dram_num = tegra_mc_get_emem_device_count(emc->mc);
+	writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+		       emc->regs + EMC_REFCTRL);
+
+	/* restore auto-calibration */
+	if (emc->vref_cal_toggle)
+		writel_relaxed(timing->emc_auto_cal_interval,
+			       emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+	/* restore dynamic self-refresh */
+	if (timing->emc_cfg_dyn_self_ref) {
+		emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+		writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+	}
+
+	/* set number of clocks to wait after each ZQ command */
+	if (emc->zcal_long)
+		writel_relaxed(timing->emc_zcal_cnt_long,
+			       emc->regs + EMC_ZCAL_WAIT_CNT);
+
+	/* wait for writes to settle */
+	udelay(2);
+
+	/* update restored timing */
+	err = emc_seq_update_timing(emc);
+	if (err)
+		failed = true;
+
+	/* restore early ACK */
+	mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+	WRITE_ONCE(emc->bad_state, failed);
+}
+
 static irqreturn_t tegra_emc_isr(int irq, void *data)
 {
 	struct tegra_emc *emc = data;
@@ -359,10 +426,6 @@ static irqreturn_t tegra_emc_isr(int irq, void *data)
 	if (!status)
 		return IRQ_NONE;
 
-	/* notify about EMC-CAR handshake completion */
-	if (status & EMC_CLKCHANGE_COMPLETE_INT)
-		complete(&emc->clk_handshake_complete);
-
 	/* notify about HW problem */
 	if (status & EMC_REFRESH_OVERFLOW_INT)
 		dev_err_ratelimited(emc->dev,
@@ -371,6 +434,18 @@ static irqreturn_t tegra_emc_isr(int irq, void *data)
 	/* clear interrupts */
 	writel_relaxed(status, emc->regs + EMC_INTSTATUS);
 
+	/* notify about EMC-CAR handshake completion */
+	if (status & EMC_CLKCHANGE_COMPLETE_INT) {
+		if (completion_done(&emc->clk_handshake_complete)) {
+			dev_err_ratelimited(emc->dev,
+					    "bogus handshake interrupt\n");
+			return IRQ_NONE;
+		}
+
+		emc_complete_clk_change(emc);
+		complete(&emc->clk_handshake_complete);
+	}
+
 	return IRQ_HANDLED;
 }
 
@@ -438,24 +513,6 @@ static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing,
 	return preset;
 }
 
-static int emc_seq_update_timing(struct tegra_emc *emc)
-{
-	u32 val;
-	int err;
-
-	writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL);
-
-	err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val,
-				!(val & EMC_STATUS_TIMING_UPDATE_STALLED),
-				1, 200);
-	if (err) {
-		dev_err(emc->dev, "failed to update timing: %d\n", err);
-		return err;
-	}
-
-	return 0;
-}
-
 static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate)
 {
 	struct tegra_mc *mc = emc->mc;
@@ -582,8 +639,7 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
 				!(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300);
 			if (err) {
 				dev_err(emc->dev,
-					"failed to disable auto-cal: %d\n",
-					err);
+					"auto-cal finish timeout: %d\n", err);
 				return err;
 			}
 
@@ -621,9 +677,6 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
 		writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT);
 	}
 
-	/* disable interrupt since read access is prohibited after stalling */
-	disable_irq(emc->irq);
-
 	/* this read also completes the writes */
 	val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL);
 
@@ -739,20 +792,18 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
 				       emc->regs + EMC_ZQ_CAL);
 	}
 
-	/* re-enable auto-refresh */
-	writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
-		       emc->regs + EMC_REFCTRL);
-
 	/* flow control marker 3 */
 	writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE);
 
+	/*
+	 * Read and discard an arbitrary MC register (Note: EMC registers
+	 * can't be used) to ensure the register writes are completed.
+	 */
+	mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
+
 	reinit_completion(&emc->clk_handshake_complete);
 
-	/* interrupt can be re-enabled now */
-	enable_irq(emc->irq);
-
-	emc->bad_state = false;
-	emc->prepared = true;
+	emc->new_timing = timing;
 
 	return 0;
 }
@@ -760,52 +811,25 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
 static int emc_complete_timing_change(struct tegra_emc *emc,
 				      unsigned long rate)
 {
-	struct emc_timing *timing = emc_find_timing(emc, rate);
 	unsigned long timeout;
-	int ret;
 
 	timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
 					      msecs_to_jiffies(100));
 	if (timeout == 0) {
 		dev_err(emc->dev, "emc-car handshake failed\n");
-		emc->bad_state = true;
 		return -EIO;
 	}
 
-	/* restore auto-calibration */
-	if (emc->vref_cal_toggle)
-		writel_relaxed(timing->emc_auto_cal_interval,
-			       emc->regs + EMC_AUTO_CAL_INTERVAL);
+	if (READ_ONCE(emc->bad_state))
+		return -EIO;
 
-	/* restore dynamic self-refresh */
-	if (timing->emc_cfg_dyn_self_ref) {
-		emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
-		writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
-	}
-
-	/* set number of clocks to wait after each ZQ command */
-	if (emc->zcal_long)
-		writel_relaxed(timing->emc_zcal_cnt_long,
-			       emc->regs + EMC_ZCAL_WAIT_CNT);
-
-	udelay(2);
-	/* update restored timing */
-	ret = emc_seq_update_timing(emc);
-	if (ret)
-		emc->bad_state = true;
-
-	/* restore early ACK */
-	mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
-
-	emc->prepared = false;
-
-	return ret;
+	return 0;
 }
 
 static int emc_unprepare_timing_change(struct tegra_emc *emc,
 				       unsigned long rate)
 {
-	if (emc->prepared && !emc->bad_state) {
+	if (!emc->bad_state) {
 		/* shouldn't ever happen in practice */
 		dev_err(emc->dev, "timing configuration can't be reverted\n");
 		emc->bad_state = true;
@@ -823,7 +847,13 @@ static int emc_clk_change_notify(struct notifier_block *nb,
 
 	switch (msg) {
 	case PRE_RATE_CHANGE:
+		/*
+		 * Disable interrupt since read accesses are prohibited after
+		 * stalling.
+		 */
+		disable_irq(emc->irq);
 		err = emc_prepare_timing_change(emc, cnd->new_rate);
+		enable_irq(emc->irq);
 		break;
 
 	case ABORT_RATE_CHANGE:
@@ -1083,6 +1113,171 @@ static long emc_round_rate(unsigned long rate,
 	return timing->rate;
 }
 
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ *   /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ *   - available_rates: This file contains a list of valid, space-separated
+ *     EMC frequencies.
+ *
+ *   - min_rate: Writing a value to this file sets the given frequency as the
+ *       floor of the permitted range. If this is higher than the currently
+ *       configured EMC frequency, this will cause the frequency to be
+ *       increased so that it stays within the valid range.
+ *
+ *   - max_rate: Similarily to the min_rate file, writing a value to this file
+ *       sets the given frequency as the ceiling of the permitted range. If
+ *       the value is lower than the currently configured EMC frequency, this
+ *       will cause the frequency to be decreased so that it stays within the
+ *       valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+	unsigned int i;
+
+	for (i = 0; i < emc->num_timings; i++)
+		if (rate == emc->timings[i].rate)
+			return true;
+
+	return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+{
+	struct tegra_emc *emc = s->private;
+	const char *prefix = "";
+	unsigned int i;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+		prefix = " ";
+	}
+
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int tegra_emc_debug_available_rates_open(struct inode *inode,
+						struct file *file)
+{
+	return single_open(file, tegra_emc_debug_available_rates_show,
+			   inode->i_private);
+}
+
+static const struct file_operations tegra_emc_debug_available_rates_fops = {
+	.open = tegra_emc_debug_available_rates_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+	struct tegra_emc *emc = data;
+
+	*rate = emc->debugfs.min_rate;
+
+	return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+	struct tegra_emc *emc = data;
+	int err;
+
+	if (!tegra_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_min_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.min_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+			tegra_emc_debug_min_rate_get,
+			tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+	struct tegra_emc *emc = data;
+
+	*rate = emc->debugfs.max_rate;
+
+	return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+	struct tegra_emc *emc = data;
+	int err;
+
+	if (!tegra_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_max_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.max_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+			tegra_emc_debug_max_rate_get,
+			tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+{
+	struct device *dev = emc->dev;
+	unsigned int i;
+	int err;
+
+	emc->debugfs.min_rate = ULONG_MAX;
+	emc->debugfs.max_rate = 0;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		if (emc->timings[i].rate < emc->debugfs.min_rate)
+			emc->debugfs.min_rate = emc->timings[i].rate;
+
+		if (emc->timings[i].rate > emc->debugfs.max_rate)
+			emc->debugfs.max_rate = emc->timings[i].rate;
+	}
+
+	err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+				 emc->debugfs.max_rate);
+	if (err < 0) {
+		dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+			emc->debugfs.min_rate, emc->debugfs.max_rate,
+			emc->clk);
+	}
+
+	emc->debugfs.root = debugfs_create_dir("emc", NULL);
+	if (!emc->debugfs.root) {
+		dev_err(emc->dev, "failed to create debugfs directory\n");
+		return;
+	}
+
+	debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+			    emc, &tegra_emc_debug_available_rates_fops);
+	debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+			    emc, &tegra_emc_debug_min_rate_fops);
+	debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+			    emc, &tegra_emc_debug_max_rate_fops);
+}
+
 static int tegra_emc_probe(struct platform_device *pdev)
 {
 	struct platform_device *mc;
@@ -1169,6 +1364,7 @@ static int tegra_emc_probe(struct platform_device *pdev)
 	}
 
 	platform_set_drvdata(pdev, emc);
+	tegra_emc_debugfs_init(emc);
 
 	return 0;
 
@@ -1181,13 +1377,17 @@ static int tegra_emc_probe(struct platform_device *pdev)
 static int tegra_emc_suspend(struct device *dev)
 {
 	struct tegra_emc *emc = dev_get_drvdata(dev);
+	int err;
 
-	/*
-	 * Suspending in a bad state will hang machine. The "prepared" var
-	 * shall be always false here unless it's a kernel bug that caused
-	 * suspending in a wrong order.
-	 */
-	if (WARN_ON(emc->prepared) || emc->bad_state)
+	/* take exclusive control over the clock's rate */
+	err = clk_rate_exclusive_get(emc->clk);
+	if (err) {
+		dev_err(emc->dev, "failed to acquire clk: %d\n", err);
+		return err;
+	}
+
+	/* suspending in a bad state will hang machine */
+	if (WARN(emc->bad_state, "hardware in a bad state\n"))
 		return -EINVAL;
 
 	emc->bad_state = true;
@@ -1202,6 +1402,8 @@ static int tegra_emc_resume(struct device *dev)
 	emc_setup_hw(emc);
 	emc->bad_state = false;
 
+	clk_rate_exclusive_put(emc->clk);
+
 	return 0;
 }
 
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 4209008..2b20329 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -758,6 +758,7 @@
 	depends on OF || COMPILE_TEST
 	select MFD_CORE
 	select REGMAP_I2C
+	select REGMAP_IRQ
 	help
 	  Say Y here to add support for Maxim Semiconductor MAX77650 and
 	  MAX77651 Power Management ICs. This is the core multifunction
@@ -1065,7 +1066,7 @@
 	  functionality of the device.
 
 config MFD_SEC_CORE
-	tristate "SAMSUNG Electronics PMIC Series Support"
+	tristate "Samsung Electronics PMIC Series Support"
 	depends on I2C=y
 	select MFD_CORE
 	select REGMAP_I2C
@@ -1906,6 +1907,21 @@
 	  10 bits SAR ADC for battery temperature monitor and 1S battery
 	  charger.
 
+config MFD_ROHM_BD71828
+	tristate "ROHM BD71828 Power Management IC"
+	depends on I2C=y
+	depends on OF
+	select REGMAP_I2C
+	select REGMAP_IRQ
+	select MFD_CORE
+	help
+	  Select this option to get support for the ROHM BD71828 Power
+	  Management IC. BD71828GW is a single-chip power management IC for
+	  battery-powered portable devices. The IC integrates 7 buck
+	  converters, 7 LDOs, and a 1500 mA single-cell linear charger.
+	  Also included is a Coulomb counter, a real-time clock (RTC), and
+	  a 32.768 kHz clock gate.
+
 config MFD_STM32_LPTIMER
 	tristate "Support for STM32 Low-Power Timer"
 	depends on (ARCH_STM32 && OF) || COMPILE_TEST
@@ -1960,6 +1976,18 @@
 	  additional drivers must be enabled in order to use the functionality
 	  of the device.
 
+config MFD_WCD934X
+	tristate "Support for WCD9340/WCD9341 Codec"
+	depends on SLIMBUS
+	select REGMAP
+	select REGMAP_SLIMBUS
+	select REGMAP_IRQ
+	select MFD_CORE
+	help
+	  Support for the Qualcomm WCD9340/WCD9341 Codec.
+	  This driver provides common support WCD934x audio codec and its
+	  associated Pin Controller, Soundwire Controller and Audio codec.
+
 menu "Multimedia Capabilities Port drivers"
 	depends on ARCH_SA1100
 
@@ -2004,5 +2032,18 @@
 	  Select this to get support for the Supervisory Processor
 	  device found on several devices in RAVE line of hardware.
 
+config SGI_MFD_IOC3
+	tristate "SGI IOC3 core driver"
+	depends on PCI && MIPS && 64BIT
+	select MFD_CORE
+	help
+	  This option enables basic support for the SGI IOC3-based
+	  controller cards.  This option does not enable any specific
+	  functions on such a card, but provides necessary infrastructure
+	  for other drivers to utilize.
+
+	  If you have an SGI Origin, Octane, or a PCI IOC3 card,
+	  then say Y. Otherwise say N.
+
 endmenu
 endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index aed99f0..b83f172 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -58,6 +58,7 @@
 ifeq ($(CONFIG_MFD_CS47L24),y)
 obj-$(CONFIG_MFD_ARIZONA)	+= cs47l24-tables.o
 endif
+obj-$(CONFIG_MFD_WCD934X)	+= wcd934x.o
 obj-$(CONFIG_MFD_WM8400)	+= wm8400-core.o
 wm831x-objs			:= wm831x-core.o wm831x-irq.o wm831x-otp.o
 wm831x-objs			+= wm831x-auxadc.o
@@ -252,6 +253,8 @@
 obj-$(CONFIG_MFD_SC27XX_PMIC)	+= sprd-sc27xx-spi.o
 obj-$(CONFIG_RAVE_SP_CORE)	+= rave-sp.o
 obj-$(CONFIG_MFD_ROHM_BD70528)	+= rohm-bd70528.o
+obj-$(CONFIG_MFD_ROHM_BD71828)	+= rohm-bd71828.o
 obj-$(CONFIG_MFD_ROHM_BD718XX)	+= rohm-bd718x7.o
 obj-$(CONFIG_MFD_STMFX) 	+= stmfx.o
 
+obj-$(CONFIG_SGI_MFD_IOC3)	+= ioc3.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index bafc729..a3bac9d 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -631,8 +631,8 @@ static const struct mfd_cell ab8500_devs[] = {
 		    NULL, NULL, 0, 0, "stericsson,ab8500-ext-regulator"),
 	OF_MFD_CELL("ab8500-regulator",
 		    NULL, NULL, 0, 0, "stericsson,ab8500-regulator"),
-	OF_MFD_CELL("abx500-clk",
-		    NULL, NULL, 0, 0, "stericsson,abx500-clk"),
+	OF_MFD_CELL("ab8500-clk",
+		    NULL, NULL, 0, 0, "stericsson,ab8500-clk"),
 	OF_MFD_CELL("ab8500-gpadc",
 		    NULL, NULL, 0, 0, "stericsson,ab8500-gpadc"),
 	OF_MFD_CELL("ab8500-rtc",
@@ -718,17 +718,20 @@ static const struct mfd_cell ab8505_devs[] = {
 #ifdef CONFIG_DEBUG_FS
 	{
 		.name = "ab8500-debug",
+		.of_compatible = "stericsson,ab8500-debug",
 	},
 #endif
 	{
 		.name = "ab8500-sysctrl",
+		.of_compatible = "stericsson,ab8500-sysctrl",
 	},
 	{
 		.name = "ab8500-regulator",
+		.of_compatible = "stericsson,ab8505-regulator",
 	},
 	{
 		.name = "abx500-clk",
-		.of_compatible = "stericsson,abx500-clk",
+		.of_compatible = "stericsson,ab8500-clk",
 	},
 	{
 		.name = "ab8500-gpadc",
@@ -736,25 +739,32 @@ static const struct mfd_cell ab8505_devs[] = {
 	},
 	{
 		.name = "ab8500-rtc",
+		.of_compatible = "stericsson,ab8500-rtc",
 	},
 	{
 		.name = "ab8500-acc-det",
+		.of_compatible = "stericsson,ab8500-acc-det",
 	},
 	{
 		.name = "ab8500-poweron-key",
+		.of_compatible = "stericsson,ab8500-poweron-key",
 	},
 	{
 		.name = "ab8500-pwm",
+		.of_compatible = "stericsson,ab8500-pwm",
 		.id = 1,
 	},
 	{
 		.name = "pinctrl-ab8505",
+		.of_compatible = "stericsson,ab8505-gpio",
 	},
 	{
 		.name = "ab8500-usb",
+		.of_compatible = "stericsson,ab8500-usb",
 	},
 	{
 		.name = "ab8500-codec",
+		.of_compatible = "stericsson,ab8500-codec",
 	},
 	{
 		.name = "ab-iddet",
@@ -1276,7 +1286,7 @@ static int ab8500_probe(struct platform_device *pdev)
 
 static const struct platform_device_id ab8500_id[] = {
 	{ "ab8500-core", AB8500_VERSION_AB8500 },
-	{ "ab8505-i2c", AB8500_VERSION_AB8505 },
+	{ "ab8505-core", AB8500_VERSION_AB8505 },
 	{ "ab9540-i2c", AB8500_VERSION_AB9540 },
 	{ "ab8540-i2c", AB8500_VERSION_AB8540 },
 	{ }
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index 64013c5..3c2414b 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -19,6 +19,7 @@
 
 struct atmel_hlcdc_regmap {
 	void __iomem *regs;
+	struct device *dev;
 };
 
 static const struct mfd_cell atmel_hlcdc_cells[] = {
@@ -39,10 +40,17 @@ static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg,
 
 	if (reg <= ATMEL_HLCDC_DIS) {
 		u32 status;
+		int ret;
 
-		readl_poll_timeout_atomic(hregmap->regs + ATMEL_HLCDC_SR,
-					  status, !(status & ATMEL_HLCDC_SIP),
-					  1, 100);
+		ret = readl_poll_timeout_atomic(hregmap->regs + ATMEL_HLCDC_SR,
+						status,
+						!(status & ATMEL_HLCDC_SIP),
+						1, 100);
+		if (ret) {
+			dev_err(hregmap->dev,
+				"Timeout! Clock domain synchronization is in progress!\n");
+			return ret;
+		}
 	}
 
 	writel(val, hregmap->regs + reg);
@@ -90,6 +98,8 @@ static int atmel_hlcdc_probe(struct platform_device *pdev)
 	if (IS_ERR(hregmap->regs))
 		return PTR_ERR(hregmap->regs);
 
+	hregmap->dev = &pdev->dev;
+
 	hlcdc->irq = platform_get_irq(pdev, 0);
 	if (hlcdc->irq < 0)
 		return hlcdc->irq;
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index a4aaada..aa59496 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -126,7 +126,7 @@ static const struct regmap_range axp288_writeable_ranges[] = {
 static const struct regmap_range axp288_volatile_ranges[] = {
 	regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
 	regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
-	regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
+	regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
 	regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
 	regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
 	regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index c4b977a..39e6116 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -5,8 +5,8 @@
  * Copyright (C) 2014 Google, Inc.
  */
 
+#include <linux/kconfig.h>
 #include <linux/mfd/core.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
 #include <linux/of_platform.h>
@@ -87,6 +87,10 @@ static const struct mfd_cell cros_usbpd_charger_cells[] = {
 	{ .name = "cros-usbpd-logger", },
 };
 
+static const struct mfd_cell cros_usbpd_notify_cells[] = {
+	{ .name = "cros-usbpd-notify", },
+};
+
 static const struct cros_feature_to_cells cros_subdevices[] = {
 	{
 		.id		= EC_FEATURE_CEC,
@@ -203,6 +207,23 @@ static int ec_device_probe(struct platform_device *pdev)
 	}
 
 	/*
+	 * The PD notifier driver cell is separate since it only needs to be
+	 * explicitly added on platforms that don't have the PD notifier ACPI
+	 * device entry defined.
+	 */
+	if (IS_ENABLED(CONFIG_OF)) {
+		if (cros_ec_check_features(ec, EC_FEATURE_USB_PD)) {
+			retval = mfd_add_hotplug_devices(ec->dev,
+					cros_usbpd_notify_cells,
+					ARRAY_SIZE(cros_usbpd_notify_cells));
+			if (retval)
+				dev_err(ec->dev,
+					"failed to add PD notify devices: %d\n",
+					retval);
+		}
+	}
+
+	/*
 	 * The following subdevices cannot be detected by sending the
 	 * EC_FEATURE_GET_CMD to the Embedded Controller device.
 	 */
diff --git a/drivers/mfd/cs47l15-tables.c b/drivers/mfd/cs47l15-tables.c
index f81b453..3c77f0a 100644
--- a/drivers/mfd/cs47l15-tables.c
+++ b/drivers/mfd/cs47l15-tables.c
@@ -112,6 +112,7 @@ static const struct reg_default cs47l15_reg_default[] = {
 	{ 0x000001dd, 0x0011 }, /* R477 (0x1DD) - FLL AO Control 11 */
 	{ 0x00000218, 0x00e6 }, /* R536 (0x218) - Mic Bias Ctrl 1 */
 	{ 0x0000021c, 0x0222 }, /* R540 (0x21C) - Mic Bias Ctrl 5 */
+	{ 0x00000293, 0x0080 }, /* R659 (0x293) - Accessory Detect Mode 1 */
 	{ 0x00000299, 0x0000 }, /* R665 (0x299) - Headphone Detect 0 */
 	{ 0x0000029b, 0x0000 }, /* R667 (0x29B) - Headphone Detect 1 */
 	{ 0x000002a2, 0x0010 }, /* R674 (0x2A2) - Mic Detect 1 Control 0 */
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index e6962686..419c735 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -233,6 +233,14 @@ static struct resource da9062_onkey_resources[] = {
 	DEFINE_RES_NAMED(DA9062_IRQ_ONKEY, 1, "ONKEY", IORESOURCE_IRQ),
 };
 
+static struct resource da9062_gpio_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_GPI0, 1, "GPI0", IORESOURCE_IRQ),
+	DEFINE_RES_NAMED(DA9062_IRQ_GPI1, 1, "GPI1", IORESOURCE_IRQ),
+	DEFINE_RES_NAMED(DA9062_IRQ_GPI2, 1, "GPI2", IORESOURCE_IRQ),
+	DEFINE_RES_NAMED(DA9062_IRQ_GPI3, 1, "GPI3", IORESOURCE_IRQ),
+	DEFINE_RES_NAMED(DA9062_IRQ_GPI4, 1, "GPI4", IORESOURCE_IRQ),
+};
+
 static const struct mfd_cell da9062_devs[] = {
 	{
 		.name		= "da9062-core",
@@ -248,7 +256,7 @@ static const struct mfd_cell da9062_devs[] = {
 		.name		= "da9062-watchdog",
 		.num_resources	= ARRAY_SIZE(da9062_wdt_resources),
 		.resources	= da9062_wdt_resources,
-		.of_compatible  = "dlg,da9062-wdt",
+		.of_compatible  = "dlg,da9062-watchdog",
 	},
 	{
 		.name		= "da9062-thermal",
@@ -266,7 +274,13 @@ static const struct mfd_cell da9062_devs[] = {
 		.name		= "da9062-onkey",
 		.num_resources	= ARRAY_SIZE(da9062_onkey_resources),
 		.resources	= da9062_onkey_resources,
-		.of_compatible = "dlg,da9062-onkey",
+		.of_compatible	= "dlg,da9062-onkey",
+	},
+	{
+		.name		= "da9062-gpio",
+		.num_resources	= ARRAY_SIZE(da9062_gpio_resources),
+		.resources	= da9062_gpio_resources,
+		.of_compatible	= "dlg,da9062-gpio",
 	},
 };
 
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 57ac58b..0452b43 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -542,102 +542,6 @@ static struct dsiescclk dsiescclk[3] = {
 	}
 };
 
-
-/*
-* Used by MCDE to setup all necessary PRCMU registers
-*/
-#define PRCMU_RESET_DSIPLL		0x00004000
-#define PRCMU_UNCLAMP_DSIPLL		0x00400800
-
-#define PRCMU_CLK_PLL_DIV_SHIFT		0
-#define PRCMU_CLK_PLL_SW_SHIFT		5
-#define PRCMU_CLK_38			(1 << 9)
-#define PRCMU_CLK_38_SRC		(1 << 10)
-#define PRCMU_CLK_38_DIV		(1 << 11)
-
-/* PLLDIV=12, PLLSW=4 (PLLDDR) */
-#define PRCMU_DSI_CLOCK_SETTING		0x0000008C
-
-/* DPI 50000000 Hz */
-#define PRCMU_DPI_CLOCK_SETTING		((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
-					  (16 << PRCMU_CLK_PLL_DIV_SHIFT))
-#define PRCMU_DSI_LP_CLOCK_SETTING	0x00000E00
-
-/* D=101, N=1, R=4, SELDIV2=0 */
-#define PRCMU_PLLDSI_FREQ_SETTING	0x00040165
-
-#define PRCMU_ENABLE_PLLDSI		0x00000001
-#define PRCMU_DISABLE_PLLDSI		0x00000000
-#define PRCMU_RELEASE_RESET_DSS		0x0000400C
-#define PRCMU_DSI_PLLOUT_SEL_SETTING	0x00000202
-/* ESC clk, div0=1, div1=1, div2=3 */
-#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV	0x07030101
-#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV	0x00030101
-#define PRCMU_DSI_RESET_SW		0x00000007
-
-#define PRCMU_PLLDSI_LOCKP_LOCKED	0x3
-
-int db8500_prcmu_enable_dsipll(void)
-{
-	int i;
-
-	/* Clear DSIPLL_RESETN */
-	writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
-	/* Unclamp DSIPLL in/out */
-	writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
-
-	/* Set DSI PLL FREQ */
-	writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
-	writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL);
-	/* Enable Escape clocks */
-	writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
-
-	/* Start DSI PLL */
-	writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
-	/* Reset DSI PLL */
-	writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
-	for (i = 0; i < 10; i++) {
-		if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED)
-					== PRCMU_PLLDSI_LOCKP_LOCKED)
-			break;
-		udelay(100);
-	}
-	/* Set DSIPLL_RESETN */
-	writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
-	return 0;
-}
-
-int db8500_prcmu_disable_dsipll(void)
-{
-	/* Disable dsi pll */
-	writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
-	/* Disable  escapeclock */
-	writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
-	return 0;
-}
-
-int db8500_prcmu_set_display_clocks(void)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&clk_mgt_lock, flags);
-
-	/* Grab the HW semaphore. */
-	while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
-		cpu_relax();
-
-	writel(PRCMU_DSI_CLOCK_SETTING, prcmu_base + PRCM_HDMICLK_MGT);
-	writel(PRCMU_DSI_LP_CLOCK_SETTING, prcmu_base + PRCM_TVCLK_MGT);
-	writel(PRCMU_DPI_CLOCK_SETTING, prcmu_base + PRCM_LCDCLK_MGT);
-
-	/* Release the HW semaphore. */
-	writel(0, PRCM_SEM);
-
-	spin_unlock_irqrestore(&clk_mgt_lock, flags);
-
-	return 0;
-}
-
 u32 db8500_prcmu_read(unsigned int reg)
 {
 	return readl(prcmu_base + reg);
@@ -3060,30 +2964,44 @@ static const struct mfd_cell db8500_prcmu_devs[] = {
 static int db8500_prcmu_register_ab8500(struct device *parent)
 {
 	struct device_node *np;
-	struct resource ab8500_resource;
+	struct resource ab850x_resource;
 	const struct mfd_cell ab8500_cell = {
 		.name = "ab8500-core",
 		.of_compatible = "stericsson,ab8500",
 		.id = AB8500_VERSION_AB8500,
-		.resources = &ab8500_resource,
+		.resources = &ab850x_resource,
 		.num_resources = 1,
 	};
+	const struct mfd_cell ab8505_cell = {
+		.name = "ab8505-core",
+		.of_compatible = "stericsson,ab8505",
+		.id = AB8500_VERSION_AB8505,
+		.resources = &ab850x_resource,
+		.num_resources = 1,
+	};
+	const struct mfd_cell *ab850x_cell;
 
 	if (!parent->of_node)
 		return -ENODEV;
 
 	/* Look up the device node, sneak the IRQ out of it */
 	for_each_child_of_node(parent->of_node, np) {
-		if (of_device_is_compatible(np, ab8500_cell.of_compatible))
+		if (of_device_is_compatible(np, ab8500_cell.of_compatible)) {
+			ab850x_cell = &ab8500_cell;
 			break;
+		}
+		if (of_device_is_compatible(np, ab8505_cell.of_compatible)) {
+			ab850x_cell = &ab8505_cell;
+			break;
+		}
 	}
 	if (!np) {
-		dev_info(parent, "could not find AB8500 node in the device tree\n");
+		dev_info(parent, "could not find AB850X node in the device tree\n");
 		return -ENODEV;
 	}
-	of_irq_to_resource_table(np, &ab8500_resource, 1);
+	of_irq_to_resource_table(np, &ab850x_resource, 1);
 
-	return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL);
+	return mfd_add_devices(parent, 0, ab850x_cell, 1, NULL, 0, NULL);
 }
 
 /**
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 381593f..7841c11 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -722,6 +722,8 @@ static int dln2_probe(struct usb_interface *interface,
 		      const struct usb_device_id *usb_id)
 {
 	struct usb_host_interface *hostif = interface->cur_altsetting;
+	struct usb_endpoint_descriptor *epin;
+	struct usb_endpoint_descriptor *epout;
 	struct device *dev = &interface->dev;
 	struct dln2_dev *dln2;
 	int ret;
@@ -731,12 +733,19 @@ static int dln2_probe(struct usb_interface *interface,
 	    hostif->desc.bNumEndpoints < 2)
 		return -ENODEV;
 
+	epin = &hostif->endpoint[0].desc;
+	epout = &hostif->endpoint[1].desc;
+	if (!usb_endpoint_is_bulk_out(epout))
+		return -ENODEV;
+	if (!usb_endpoint_is_bulk_in(epin))
+		return -ENODEV;
+
 	dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
 	if (!dln2)
 		return -ENOMEM;
 
-	dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
-	dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
+	dln2->ep_out = epout->bEndpointAddress;
+	dln2->ep_in = epin->bEndpointAddress;
 	dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
 	dln2->interface = interface;
 	usb_set_intfdata(interface, dln2);
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index b33030e..c40a6c7 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -240,6 +240,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
 	{ PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&bxt_i2c_info },
 	{ PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&bxt_i2c_info },
 	{ PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&bxt_i2c_info },
+	/* JSL */
+	{ PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x4daf), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0x4dc5), (kernel_ulong_t)&bxt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0x4dc6), (kernel_ulong_t)&bxt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0x4de8), (kernel_ulong_t)&bxt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0x4de9), (kernel_ulong_t)&bxt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0x4dea), (kernel_ulong_t)&bxt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0x4deb), (kernel_ulong_t)&bxt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&spt_info },
 	/* APL */
 	{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
 	{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index c9f3537..ddd64f9 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -9,8 +9,6 @@
  */
 
 #include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/machine.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -25,20 +23,9 @@
 #define BYT_CRC_HRV		2
 #define CHT_CRC_HRV		3
 
-/* Lookup table for the Panel Enable/Disable line as GPIO signals */
-static struct gpiod_lookup_table panel_gpio_table = {
-	/* Intel GFX is consumer */
-	.dev_id = "0000:00:02.0",
-	.table = {
-		/* Panel EN/DISABLE */
-		GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
-		{ },
-	},
-};
-
 /* PWM consumed by the Intel GFX */
 static struct pwm_lookup crc_pwm_lookup[] = {
-	PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_backlight", 0, PWM_POLARITY_NORMAL),
+	PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
 };
 
 static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
@@ -96,9 +83,6 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
 	if (ret)
 		dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
 
-	/* Add lookup table binding for Panel Control to the GPIO Chip */
-	gpiod_add_lookup_table(&panel_gpio_table);
-
 	/* Add lookup table for crc-pwm */
 	pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
 
@@ -121,9 +105,6 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
 
 	regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
 
-	/* Remove lookup table for Panel Control from the GPIO Chip */
-	gpiod_remove_lookup_table(&panel_gpio_table);
-
 	/* remove crc-pwm lookup table */
 	pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
 
diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c
new file mode 100644
index 0000000..02998d4
--- /dev/null
+++ b/drivers/mfd/ioc3.c
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 multifunction device driver
+ *
+ * Copyright (C) 2018, 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * Based on work by:
+ *   Stanislaw Skowronek <skylark@unaligned.org>
+ *   Joshua Kinard <kumba@gentoo.org>
+ *   Brent Casavant <bcasavan@sgi.com> - IOC4 master driver
+ *   Pat Gefre <pfg@sgi.com> - IOC3 serial port IRQ demuxer
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+#include <linux/rtc/ds1685.h>
+
+#include <asm/pci/bridge.h>
+#include <asm/sn/ioc3.h>
+
+#define IOC3_IRQ_SERIAL_A	6
+#define IOC3_IRQ_SERIAL_B	15
+#define IOC3_IRQ_KBD		22
+
+/* Bitmask for selecting which IRQs are level triggered */
+#define IOC3_LVL_MASK	(BIT(IOC3_IRQ_SERIAL_A) | BIT(IOC3_IRQ_SERIAL_B))
+
+#define M48T35_REG_SIZE	32768	/* size of m48t35 registers */
+
+/* 1.2 us latency timer (40 cycles at 33 MHz) */
+#define IOC3_LATENCY	40
+
+struct ioc3_priv_data {
+	struct irq_domain *domain;
+	struct ioc3 __iomem *regs;
+	struct pci_dev *pdev;
+	int domain_irq;
+};
+
+static void ioc3_irq_ack(struct irq_data *d)
+{
+	struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+	unsigned int hwirq = irqd_to_hwirq(d);
+
+	writel(BIT(hwirq), &ipd->regs->sio_ir);
+}
+
+static void ioc3_irq_mask(struct irq_data *d)
+{
+	struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+	unsigned int hwirq = irqd_to_hwirq(d);
+
+	writel(BIT(hwirq), &ipd->regs->sio_iec);
+}
+
+static void ioc3_irq_unmask(struct irq_data *d)
+{
+	struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+	unsigned int hwirq = irqd_to_hwirq(d);
+
+	writel(BIT(hwirq), &ipd->regs->sio_ies);
+}
+
+static struct irq_chip ioc3_irq_chip = {
+	.name		= "IOC3",
+	.irq_ack	= ioc3_irq_ack,
+	.irq_mask	= ioc3_irq_mask,
+	.irq_unmask	= ioc3_irq_unmask,
+};
+
+static int ioc3_irq_domain_map(struct irq_domain *d, unsigned int irq,
+			      irq_hw_number_t hwirq)
+{
+	/* Set level IRQs for every interrupt contained in IOC3_LVL_MASK */
+	if (BIT(hwirq) & IOC3_LVL_MASK)
+		irq_set_chip_and_handler(irq, &ioc3_irq_chip, handle_level_irq);
+	else
+		irq_set_chip_and_handler(irq, &ioc3_irq_chip, handle_edge_irq);
+
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static void ioc3_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+	irq_set_chip_and_handler(irq, NULL, NULL);
+	irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ioc3_irq_domain_ops = {
+	.map = ioc3_irq_domain_map,
+	.unmap = ioc3_irq_domain_unmap,
+};
+
+static void ioc3_irq_handler(struct irq_desc *desc)
+{
+	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	struct ioc3_priv_data *ipd = domain->host_data;
+	struct ioc3 __iomem *regs = ipd->regs;
+	u32 pending, mask;
+	unsigned int irq;
+
+	pending = readl(&regs->sio_ir);
+	mask = readl(&regs->sio_ies);
+	pending &= mask; /* Mask off not enabled interrupts */
+
+	if (pending) {
+		irq = irq_find_mapping(domain, __ffs(pending));
+		if (irq)
+			generic_handle_irq(irq);
+	} else  {
+		spurious_interrupt();
+	}
+}
+
+/*
+ * System boards/BaseIOs use more interrupt pins of the bridge ASIC
+ * to which the IOC3 is connected. Since the IOC3 MFD driver
+ * knows wiring of these extra pins, we use the map_irq function
+ * to get interrupts activated
+ */
+static int ioc3_map_irq(struct pci_dev *pdev, int slot, int pin)
+{
+	struct pci_host_bridge *hbrg = pci_find_host_bridge(pdev->bus);
+
+	return hbrg->map_irq(pdev, slot, pin);
+}
+
+static int ioc3_irq_domain_setup(struct ioc3_priv_data *ipd, int irq)
+{
+	struct irq_domain *domain;
+	struct fwnode_handle *fn;
+
+	fn = irq_domain_alloc_named_fwnode("IOC3");
+	if (!fn)
+		goto err;
+
+	domain = irq_domain_create_linear(fn, 24, &ioc3_irq_domain_ops, ipd);
+	if (!domain)
+		goto err;
+
+	irq_domain_free_fwnode(fn);
+	ipd->domain = domain;
+
+	irq_set_chained_handler_and_data(irq, ioc3_irq_handler, domain);
+	ipd->domain_irq = irq;
+	return 0;
+
+err:
+	dev_err(&ipd->pdev->dev, "irq domain setup failed\n");
+	return -ENOMEM;
+}
+
+static struct resource ioc3_uarta_resources[] = {
+	DEFINE_RES_MEM(offsetof(struct ioc3, sregs.uarta),
+		       sizeof_field(struct ioc3, sregs.uarta)),
+	DEFINE_RES_IRQ(IOC3_IRQ_SERIAL_A)
+};
+
+static struct resource ioc3_uartb_resources[] = {
+	DEFINE_RES_MEM(offsetof(struct ioc3, sregs.uartb),
+		       sizeof_field(struct ioc3, sregs.uartb)),
+	DEFINE_RES_IRQ(IOC3_IRQ_SERIAL_B)
+};
+
+static struct mfd_cell ioc3_serial_cells[] = {
+	{
+		.name = "ioc3-serial8250",
+		.resources = ioc3_uarta_resources,
+		.num_resources = ARRAY_SIZE(ioc3_uarta_resources),
+	},
+	{
+		.name = "ioc3-serial8250",
+		.resources = ioc3_uartb_resources,
+		.num_resources = ARRAY_SIZE(ioc3_uartb_resources),
+	}
+};
+
+static int ioc3_serial_setup(struct ioc3_priv_data *ipd)
+{
+	int ret;
+
+	/* Set gpio pins for RS232/RS422 mode selection */
+	writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL,
+		&ipd->regs->gpcr_s);
+	/* Select RS232 mode for uart a */
+	writel(0, &ipd->regs->gppr[6]);
+	/* Select RS232 mode for uart b */
+	writel(0, &ipd->regs->gppr[7]);
+
+	/* Switch both ports to 16650 mode */
+	writel(readl(&ipd->regs->port_a.sscr) & ~SSCR_DMA_EN,
+	       &ipd->regs->port_a.sscr);
+	writel(readl(&ipd->regs->port_b.sscr) & ~SSCR_DMA_EN,
+	       &ipd->regs->port_b.sscr);
+	udelay(1000); /* Wait until mode switch is done */
+
+	ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+			      ioc3_serial_cells, ARRAY_SIZE(ioc3_serial_cells),
+			      &ipd->pdev->resource[0], 0, ipd->domain);
+	if (ret) {
+		dev_err(&ipd->pdev->dev, "Failed to add 16550 subdevs\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct resource ioc3_kbd_resources[] = {
+	DEFINE_RES_MEM(offsetof(struct ioc3, serio),
+		       sizeof_field(struct ioc3, serio)),
+	DEFINE_RES_IRQ(IOC3_IRQ_KBD)
+};
+
+static struct mfd_cell ioc3_kbd_cells[] = {
+	{
+		.name = "ioc3-kbd",
+		.resources = ioc3_kbd_resources,
+		.num_resources = ARRAY_SIZE(ioc3_kbd_resources),
+	}
+};
+
+static int ioc3_kbd_setup(struct ioc3_priv_data *ipd)
+{
+	int ret;
+
+	ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+			      ioc3_kbd_cells, ARRAY_SIZE(ioc3_kbd_cells),
+			      &ipd->pdev->resource[0], 0, ipd->domain);
+	if (ret) {
+		dev_err(&ipd->pdev->dev, "Failed to add 16550 subdevs\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct resource ioc3_eth_resources[] = {
+	DEFINE_RES_MEM(offsetof(struct ioc3, eth),
+		       sizeof_field(struct ioc3, eth)),
+	DEFINE_RES_MEM(offsetof(struct ioc3, ssram),
+		       sizeof_field(struct ioc3, ssram)),
+	DEFINE_RES_IRQ(0)
+};
+
+static struct resource ioc3_w1_resources[] = {
+	DEFINE_RES_MEM(offsetof(struct ioc3, mcr),
+		       sizeof_field(struct ioc3, mcr)),
+};
+static struct sgi_w1_platform_data ioc3_w1_platform_data;
+
+static struct mfd_cell ioc3_eth_cells[] = {
+	{
+		.name = "ioc3-eth",
+		.resources = ioc3_eth_resources,
+		.num_resources = ARRAY_SIZE(ioc3_eth_resources),
+	},
+	{
+		.name = "sgi_w1",
+		.resources = ioc3_w1_resources,
+		.num_resources = ARRAY_SIZE(ioc3_w1_resources),
+		.platform_data = &ioc3_w1_platform_data,
+		.pdata_size = sizeof(ioc3_w1_platform_data),
+	}
+};
+
+static int ioc3_eth_setup(struct ioc3_priv_data *ipd)
+{
+	int ret;
+
+	/* Enable One-Wire bus */
+	writel(GPCR_MLAN_EN, &ipd->regs->gpcr_s);
+
+	/* Generate unique identifier */
+	snprintf(ioc3_w1_platform_data.dev_id,
+		 sizeof(ioc3_w1_platform_data.dev_id), "ioc3-%012llx",
+		 ipd->pdev->resource->start);
+
+	ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+			      ioc3_eth_cells, ARRAY_SIZE(ioc3_eth_cells),
+			      &ipd->pdev->resource[0], ipd->pdev->irq, NULL);
+	if (ret) {
+		dev_err(&ipd->pdev->dev, "Failed to add ETH/W1 subdev\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct resource ioc3_m48t35_resources[] = {
+	DEFINE_RES_MEM(IOC3_BYTEBUS_DEV0, M48T35_REG_SIZE)
+};
+
+static struct mfd_cell ioc3_m48t35_cells[] = {
+	{
+		.name = "rtc-m48t35",
+		.resources = ioc3_m48t35_resources,
+		.num_resources = ARRAY_SIZE(ioc3_m48t35_resources),
+	}
+};
+
+static int ioc3_m48t35_setup(struct ioc3_priv_data *ipd)
+{
+	int ret;
+
+	ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+			      ioc3_m48t35_cells, ARRAY_SIZE(ioc3_m48t35_cells),
+			      &ipd->pdev->resource[0], 0, ipd->domain);
+	if (ret)
+		dev_err(&ipd->pdev->dev, "Failed to add M48T35 subdev\n");
+
+	return ret;
+}
+
+static struct ds1685_rtc_platform_data ip30_rtc_platform_data = {
+	.bcd_mode = false,
+	.no_irq = false,
+	.uie_unsupported = true,
+	.access_type = ds1685_reg_indirect,
+};
+
+static struct resource ioc3_rtc_ds1685_resources[] = {
+	DEFINE_RES_MEM(IOC3_BYTEBUS_DEV1, 1),
+	DEFINE_RES_MEM(IOC3_BYTEBUS_DEV2, 1),
+	DEFINE_RES_IRQ(0)
+};
+
+static struct mfd_cell ioc3_ds1685_cells[] = {
+	{
+		.name = "rtc-ds1685",
+		.resources = ioc3_rtc_ds1685_resources,
+		.num_resources = ARRAY_SIZE(ioc3_rtc_ds1685_resources),
+		.platform_data = &ip30_rtc_platform_data,
+		.pdata_size = sizeof(ip30_rtc_platform_data),
+		.id = PLATFORM_DEVID_NONE,
+	}
+};
+
+static int ioc3_ds1685_setup(struct ioc3_priv_data *ipd)
+{
+	int ret, irq;
+
+	irq = ioc3_map_irq(ipd->pdev, 6, 0);
+
+	ret = mfd_add_devices(&ipd->pdev->dev, 0, ioc3_ds1685_cells,
+			      ARRAY_SIZE(ioc3_ds1685_cells),
+			      &ipd->pdev->resource[0], irq, NULL);
+	if (ret)
+		dev_err(&ipd->pdev->dev, "Failed to add DS1685 subdev\n");
+
+	return ret;
+};
+
+
+static struct resource ioc3_leds_resources[] = {
+	DEFINE_RES_MEM(offsetof(struct ioc3, gppr[0]),
+		       sizeof_field(struct ioc3, gppr[0])),
+	DEFINE_RES_MEM(offsetof(struct ioc3, gppr[1]),
+		       sizeof_field(struct ioc3, gppr[1])),
+};
+
+static struct mfd_cell ioc3_led_cells[] = {
+	{
+		.name = "ip30-leds",
+		.resources = ioc3_leds_resources,
+		.num_resources = ARRAY_SIZE(ioc3_leds_resources),
+		.id = PLATFORM_DEVID_NONE,
+	}
+};
+
+static int ioc3_led_setup(struct ioc3_priv_data *ipd)
+{
+	int ret;
+
+	ret = mfd_add_devices(&ipd->pdev->dev, 0, ioc3_led_cells,
+			      ARRAY_SIZE(ioc3_led_cells),
+			      &ipd->pdev->resource[0], 0, ipd->domain);
+	if (ret)
+		dev_err(&ipd->pdev->dev, "Failed to add LED subdev\n");
+
+	return ret;
+}
+
+static int ip27_baseio_setup(struct ioc3_priv_data *ipd)
+{
+	int ret, io_irq;
+
+	io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+			      PCI_INTERRUPT_INTB);
+	ret = ioc3_irq_domain_setup(ipd, io_irq);
+	if (ret)
+		return ret;
+
+	ret = ioc3_eth_setup(ipd);
+	if (ret)
+		return ret;
+
+	ret = ioc3_serial_setup(ipd);
+	if (ret)
+		return ret;
+
+	return ioc3_m48t35_setup(ipd);
+}
+
+static int ip27_baseio6g_setup(struct ioc3_priv_data *ipd)
+{
+	int ret, io_irq;
+
+	io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+			      PCI_INTERRUPT_INTB);
+	ret = ioc3_irq_domain_setup(ipd, io_irq);
+	if (ret)
+		return ret;
+
+	ret = ioc3_eth_setup(ipd);
+	if (ret)
+		return ret;
+
+	ret = ioc3_serial_setup(ipd);
+	if (ret)
+		return ret;
+
+	ret = ioc3_m48t35_setup(ipd);
+	if (ret)
+		return ret;
+
+	return ioc3_kbd_setup(ipd);
+}
+
+static int ip27_mio_setup(struct ioc3_priv_data *ipd)
+{
+	int ret;
+
+	ret = ioc3_irq_domain_setup(ipd, ipd->pdev->irq);
+	if (ret)
+		return ret;
+
+	ret = ioc3_serial_setup(ipd);
+	if (ret)
+		return ret;
+
+	return ioc3_kbd_setup(ipd);
+}
+
+static int ip30_sysboard_setup(struct ioc3_priv_data *ipd)
+{
+	int ret, io_irq;
+
+	io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+			      PCI_INTERRUPT_INTB);
+	ret = ioc3_irq_domain_setup(ipd, io_irq);
+	if (ret)
+		return ret;
+
+	ret = ioc3_eth_setup(ipd);
+	if (ret)
+		return ret;
+
+	ret = ioc3_serial_setup(ipd);
+	if (ret)
+		return ret;
+
+	ret = ioc3_kbd_setup(ipd);
+	if (ret)
+		return ret;
+
+	ret = ioc3_ds1685_setup(ipd);
+	if (ret)
+		return ret;
+
+	return ioc3_led_setup(ipd);
+}
+
+static int ioc3_menet_setup(struct ioc3_priv_data *ipd)
+{
+	int ret, io_irq;
+
+	io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+			      PCI_INTERRUPT_INTB);
+	ret = ioc3_irq_domain_setup(ipd, io_irq);
+	if (ret)
+		return ret;
+
+	ret = ioc3_eth_setup(ipd);
+	if (ret)
+		return ret;
+
+	return ioc3_serial_setup(ipd);
+}
+
+static int ioc3_menet4_setup(struct ioc3_priv_data *ipd)
+{
+	return ioc3_eth_setup(ipd);
+}
+
+static int ioc3_cad_duo_setup(struct ioc3_priv_data *ipd)
+{
+	int ret, io_irq;
+
+	io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+			      PCI_INTERRUPT_INTB);
+	ret = ioc3_irq_domain_setup(ipd, io_irq);
+	if (ret)
+		return ret;
+
+	ret = ioc3_eth_setup(ipd);
+	if (ret)
+		return ret;
+
+	return ioc3_kbd_setup(ipd);
+}
+
+/* Helper macro for filling ioc3_info array */
+#define IOC3_SID(_name, _sid, _setup) \
+	{								   \
+		.name = _name,						   \
+		.sid = PCI_VENDOR_ID_SGI | (IOC3_SUBSYS_ ## _sid << 16),   \
+		.setup = _setup,					   \
+	}
+
+static struct {
+	const char *name;
+	u32 sid;
+	int (*setup)(struct ioc3_priv_data *ipd);
+} ioc3_infos[] = {
+	IOC3_SID("IP27 BaseIO6G", IP27_BASEIO6G, &ip27_baseio6g_setup),
+	IOC3_SID("IP27 MIO", IP27_MIO, &ip27_mio_setup),
+	IOC3_SID("IP27 BaseIO", IP27_BASEIO, &ip27_baseio_setup),
+	IOC3_SID("IP29 System Board", IP29_SYSBOARD, &ip27_baseio6g_setup),
+	IOC3_SID("IP30 System Board", IP30_SYSBOARD, &ip30_sysboard_setup),
+	IOC3_SID("MENET", MENET, &ioc3_menet_setup),
+	IOC3_SID("MENET4", MENET4, &ioc3_menet4_setup)
+};
+#undef IOC3_SID
+
+static int ioc3_setup(struct ioc3_priv_data *ipd)
+{
+	u32 sid;
+	int i;
+
+	/* Clear IRQs */
+	writel(~0, &ipd->regs->sio_iec);
+	writel(~0, &ipd->regs->sio_ir);
+	writel(0, &ipd->regs->eth.eier);
+	writel(~0, &ipd->regs->eth.eisr);
+
+	/* Read subsystem vendor id and subsystem id */
+	pci_read_config_dword(ipd->pdev, PCI_SUBSYSTEM_VENDOR_ID, &sid);
+
+	for (i = 0; i < ARRAY_SIZE(ioc3_infos); i++)
+		if (sid == ioc3_infos[i].sid) {
+			pr_info("ioc3: %s\n", ioc3_infos[i].name);
+			return ioc3_infos[i].setup(ipd);
+		}
+
+	/* Treat everything not identified by PCI subid as CAD DUO */
+	pr_info("ioc3: CAD DUO\n");
+	return ioc3_cad_duo_setup(ipd);
+}
+
+static int ioc3_mfd_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *pci_id)
+{
+	struct ioc3_priv_data *ipd;
+	struct ioc3 __iomem *regs;
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, IOC3_LATENCY);
+	pci_set_master(pdev);
+
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (ret) {
+		pr_err("%s: No usable DMA configuration, aborting.\n",
+		       pci_name(pdev));
+		goto out_disable_device;
+	}
+
+	/* Set up per-IOC3 data */
+	ipd = devm_kzalloc(&pdev->dev, sizeof(struct ioc3_priv_data),
+			   GFP_KERNEL);
+	if (!ipd) {
+		ret = -ENOMEM;
+		goto out_disable_device;
+	}
+	ipd->pdev = pdev;
+
+	/*
+	 * Map all IOC3 registers.  These are shared between subdevices
+	 * so the main IOC3 module manages them.
+	 */
+	regs = pci_ioremap_bar(pdev, 0);
+	if (!regs) {
+		dev_warn(&pdev->dev, "ioc3: Unable to remap PCI BAR for %s.\n",
+			 pci_name(pdev));
+		ret = -ENOMEM;
+		goto out_disable_device;
+	}
+	ipd->regs = regs;
+
+	/* Track PCI-device specific data */
+	pci_set_drvdata(pdev, ipd);
+
+	ret = ioc3_setup(ipd);
+	if (ret) {
+		/* Remove all already added MFD devices */
+		mfd_remove_devices(&ipd->pdev->dev);
+		if (ipd->domain) {
+			irq_domain_remove(ipd->domain);
+			free_irq(ipd->domain_irq, (void *)ipd);
+		}
+		pci_iounmap(pdev, regs);
+		goto out_disable_device;
+	}
+
+	return 0;
+
+out_disable_device:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+static void ioc3_mfd_remove(struct pci_dev *pdev)
+{
+	struct ioc3_priv_data *ipd;
+
+	ipd = pci_get_drvdata(pdev);
+
+	/* Clear and disable all IRQs */
+	writel(~0, &ipd->regs->sio_iec);
+	writel(~0, &ipd->regs->sio_ir);
+
+	/* Release resources */
+	mfd_remove_devices(&ipd->pdev->dev);
+	if (ipd->domain) {
+		irq_domain_remove(ipd->domain);
+		free_irq(ipd->domain_irq, (void *)ipd);
+	}
+	pci_iounmap(pdev, ipd->regs);
+	pci_disable_device(pdev);
+}
+
+static struct pci_device_id ioc3_mfd_id_table[] = {
+	{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
+	{ 0, },
+};
+MODULE_DEVICE_TABLE(pci, ioc3_mfd_id_table);
+
+static struct pci_driver ioc3_mfd_driver = {
+	.name = "IOC3",
+	.id_table = ioc3_mfd_id_table,
+	.probe = ioc3_mfd_probe,
+	.remove = ioc3_mfd_remove,
+};
+
+module_pci_driver(ioc3_mfd_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 MFD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index a8cfadc..7e0835c 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -35,6 +35,9 @@
 
 #define MADERA_32KZ_MCLK2	1
 
+#define MADERA_RESET_MIN_US	2000
+#define MADERA_RESET_MAX_US	3000
+
 static const char * const madera_core_supplies[] = {
 	"AVDD",
 	"DBVDD1",
@@ -199,7 +202,7 @@ EXPORT_SYMBOL_GPL(madera_name_from_type);
 #define MADERA_BOOT_POLL_INTERVAL_USEC		5000
 #define MADERA_BOOT_POLL_TIMEOUT_USEC		25000
 
-static int madera_wait_for_boot(struct madera *madera)
+static int madera_wait_for_boot_noack(struct madera *madera)
 {
 	ktime_t timeout;
 	unsigned int val = 0;
@@ -226,6 +229,13 @@ static int madera_wait_for_boot(struct madera *madera)
 		ret = -ETIMEDOUT;
 	}
 
+	return ret;
+}
+
+static int madera_wait_for_boot(struct madera *madera)
+{
+	int ret = madera_wait_for_boot_noack(madera);
+
 	/*
 	 * BOOT_DONE defaults to unmasked on boot so we must ack it.
 	 * Do this even after a timeout to avoid interrupt storms.
@@ -249,16 +259,13 @@ static int madera_soft_reset(struct madera *madera)
 	}
 
 	/* Allow time for internal clocks to startup after reset */
-	usleep_range(1000, 2000);
+	usleep_range(MADERA_RESET_MIN_US, MADERA_RESET_MAX_US);
 
 	return 0;
 }
 
 static void madera_enable_hard_reset(struct madera *madera)
 {
-	if (!madera->pdata.reset)
-		return;
-
 	/*
 	 * There are many existing out-of-tree users of these codecs that we
 	 * can't break so preserve the expected behaviour of setting the line
@@ -269,11 +276,9 @@ static void madera_enable_hard_reset(struct madera *madera)
 
 static void madera_disable_hard_reset(struct madera *madera)
 {
-	if (!madera->pdata.reset)
-		return;
-
 	gpiod_set_raw_value_cansleep(madera->pdata.reset, 1);
-	usleep_range(1000, 2000);
+
+	usleep_range(MADERA_RESET_MIN_US, MADERA_RESET_MAX_US);
 }
 
 static int __maybe_unused madera_runtime_resume(struct device *dev)
@@ -292,6 +297,8 @@ static int __maybe_unused madera_runtime_resume(struct device *dev)
 	regcache_cache_only(madera->regmap, false);
 	regcache_cache_only(madera->regmap_32bit, false);
 
+	usleep_range(MADERA_RESET_MIN_US, MADERA_RESET_MAX_US);
+
 	ret = madera_wait_for_boot(madera);
 	if (ret)
 		goto err;
@@ -545,6 +552,12 @@ int madera_dev_init(struct madera *madera)
 	regcache_cache_only(madera->regmap, false);
 	regcache_cache_only(madera->regmap_32bit, false);
 
+	ret = madera_wait_for_boot_noack(madera);
+	if (ret) {
+		dev_err(madera->dev, "Device failed initial boot: %d\n", ret);
+		goto err_reset;
+	}
+
 	/*
 	 * Now we can power up and verify that this is a chip we know about
 	 * before we start doing any writes to its registers.
@@ -650,7 +663,7 @@ int madera_dev_init(struct madera *madera)
 
 	ret = madera_wait_for_boot(madera);
 	if (ret) {
-		dev_err(madera->dev, "Device failed initial boot: %d\n", ret);
+		dev_err(madera->dev, "Failed to clear boot done: %d\n", ret);
 		goto err_reset;
 	}
 
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index da5cd9c..ead2e79 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -26,6 +26,7 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
 	case RN5T618_WATCHDOGCNT:
 	case RN5T618_DCIRQ:
 	case RN5T618_ILIMDATAH ... RN5T618_AIN0DATAL:
+	case RN5T618_ADCCNT3:
 	case RN5T618_IR_ADC1 ... RN5T618_IR_ADC3:
 	case RN5T618_IR_GPR:
 	case RN5T618_IR_GPF:
diff --git a/drivers/mfd/rohm-bd70528.c b/drivers/mfd/rohm-bd70528.c
index ef6786f..5c44d3b 100644
--- a/drivers/mfd/rohm-bd70528.c
+++ b/drivers/mfd/rohm-bd70528.c
@@ -48,7 +48,7 @@ static struct mfd_cell bd70528_mfd_cells[] = {
 	 * We use BD71837 driver to drive the clock block. Only differences to
 	 * BD70528 clock gate are the register address and mask.
 	 */
-	{ .name = "bd718xx-clk", },
+	{ .name = "bd70528-clk", },
 	{ .name = "bd70528-wdt", },
 	{
 		.name = "bd70528-power",
@@ -236,7 +236,6 @@ static int bd70528_i2c_probe(struct i2c_client *i2c,
 
 	dev_set_drvdata(&i2c->dev, &bd70528->chip);
 
-	bd70528->chip.chip_type = ROHM_CHIP_TYPE_BD70528;
 	bd70528->chip.regmap = devm_regmap_init_i2c(i2c, &bd70528_regmap);
 	if (IS_ERR(bd70528->chip.regmap)) {
 		dev_err(&i2c->dev, "Failed to initialize Regmap\n");
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
new file mode 100644
index 0000000..210261d
--- /dev/null
+++ b/drivers/mfd/rohm-bd71828.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2019 ROHM Semiconductors
+//
+// ROHM BD71828 PMIC driver
+
+#include <linux/gpio_keys.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+static struct gpio_keys_button button = {
+	.code = KEY_POWER,
+	.gpio = -1,
+	.type = EV_KEY,
+};
+
+static struct gpio_keys_platform_data bd71828_powerkey_data = {
+	.buttons = &button,
+	.nbuttons = 1,
+	.name = "bd71828-pwrkey",
+};
+
+static const struct resource rtc_irqs[] = {
+	DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC0, "bd71828-rtc-alm-0"),
+	DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC1, "bd71828-rtc-alm-1"),
+	DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC2, "bd71828-rtc-alm-2"),
+};
+
+static struct mfd_cell bd71828_mfd_cells[] = {
+	{ .name = "bd71828-pmic", },
+	{ .name = "bd71828-gpio", },
+	{ .name = "bd71828-led", .of_compatible = "rohm,bd71828-leds" },
+	/*
+	 * We use BD71837 driver to drive the clock block. Only differences to
+	 * BD70528 clock gate are the register address and mask.
+	 */
+	{ .name = "bd71828-clk", },
+	{ .name = "bd71827-power", },
+	{
+		.name = "bd71828-rtc",
+		.resources = rtc_irqs,
+		.num_resources = ARRAY_SIZE(rtc_irqs),
+	}, {
+		.name = "gpio-keys",
+		.platform_data = &bd71828_powerkey_data,
+		.pdata_size = sizeof(bd71828_powerkey_data),
+	},
+};
+
+static const struct regmap_range volatile_ranges[] = {
+	{
+		.range_min = BD71828_REG_PS_CTRL_1,
+		.range_max = BD71828_REG_PS_CTRL_1,
+	}, {
+		.range_min = BD71828_REG_PS_CTRL_3,
+		.range_max = BD71828_REG_PS_CTRL_3,
+	}, {
+		.range_min = BD71828_REG_RTC_SEC,
+		.range_max = BD71828_REG_RTC_YEAR,
+	}, {
+		/*
+		 * For now make all charger registers volatile because many
+		 * needs to be and because the charger block is not that
+		 * performance critical.
+		 */
+		.range_min = BD71828_REG_CHG_STATE,
+		.range_max = BD71828_REG_CHG_FULL,
+	}, {
+		.range_min = BD71828_REG_INT_MAIN,
+		.range_max = BD71828_REG_IO_STAT,
+	},
+};
+
+static const struct regmap_access_table volatile_regs = {
+	.yes_ranges = &volatile_ranges[0],
+	.n_yes_ranges = ARRAY_SIZE(volatile_ranges),
+};
+
+static struct regmap_config bd71828_regmap = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.volatile_table = &volatile_regs,
+	.max_register = BD71828_MAX_REGISTER,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+/*
+ * Mapping of main IRQ register bits to sub-IRQ register offsets so that we can
+ * access corect sub-IRQ registers based on bits that are set in main IRQ
+ * register.
+ */
+
+static unsigned int bit0_offsets[] = {11};		/* RTC IRQ */
+static unsigned int bit1_offsets[] = {10};		/* TEMP IRQ */
+static unsigned int bit2_offsets[] = {6, 7, 8, 9};	/* BAT MON IRQ */
+static unsigned int bit3_offsets[] = {5};		/* BAT IRQ */
+static unsigned int bit4_offsets[] = {4};		/* CHG IRQ */
+static unsigned int bit5_offsets[] = {3};		/* VSYS IRQ */
+static unsigned int bit6_offsets[] = {1, 2};		/* DCIN IRQ */
+static unsigned int bit7_offsets[] = {0};		/* BUCK IRQ */
+
+static struct regmap_irq_sub_irq_map bd71828_sub_irq_offsets[] = {
+	REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
+	REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
+	REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
+	REGMAP_IRQ_MAIN_REG_OFFSET(bit3_offsets),
+	REGMAP_IRQ_MAIN_REG_OFFSET(bit4_offsets),
+	REGMAP_IRQ_MAIN_REG_OFFSET(bit5_offsets),
+	REGMAP_IRQ_MAIN_REG_OFFSET(bit6_offsets),
+	REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
+};
+
+static struct regmap_irq bd71828_irqs[] = {
+	REGMAP_IRQ_REG(BD71828_INT_BUCK1_OCP, 0, BD71828_INT_BUCK1_OCP_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BUCK2_OCP, 0, BD71828_INT_BUCK2_OCP_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BUCK3_OCP, 0, BD71828_INT_BUCK3_OCP_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BUCK4_OCP, 0, BD71828_INT_BUCK4_OCP_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BUCK5_OCP, 0, BD71828_INT_BUCK5_OCP_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BUCK6_OCP, 0, BD71828_INT_BUCK6_OCP_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BUCK7_OCP, 0, BD71828_INT_BUCK7_OCP_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_PGFAULT, 0, BD71828_INT_PGFAULT_MASK),
+	/* DCIN1 interrupts */
+	REGMAP_IRQ_REG(BD71828_INT_DCIN_DET, 1, BD71828_INT_DCIN_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_DCIN_RMV, 1, BD71828_INT_DCIN_RMV_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CLPS_OUT, 1, BD71828_INT_CLPS_OUT_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CLPS_IN, 1, BD71828_INT_CLPS_IN_MASK),
+	/* DCIN2 interrupts */
+	REGMAP_IRQ_REG(BD71828_INT_DCIN_MON_RES, 2,
+		       BD71828_INT_DCIN_MON_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_DCIN_MON_DET, 2,
+		       BD71828_INT_DCIN_MON_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_LONGPUSH, 2, BD71828_INT_LONGPUSH_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_MIDPUSH, 2, BD71828_INT_MIDPUSH_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_SHORTPUSH, 2, BD71828_INT_SHORTPUSH_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_PUSH, 2, BD71828_INT_PUSH_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_WDOG, 2, BD71828_INT_WDOG_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_SWRESET, 2, BD71828_INT_SWRESET_MASK),
+	/* Vsys */
+	REGMAP_IRQ_REG(BD71828_INT_VSYS_UV_RES, 3,
+		       BD71828_INT_VSYS_UV_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_VSYS_UV_DET, 3,
+		       BD71828_INT_VSYS_UV_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_VSYS_LOW_RES, 3,
+		       BD71828_INT_VSYS_LOW_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_VSYS_LOW_DET, 3,
+		       BD71828_INT_VSYS_LOW_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_VSYS_HALL_IN, 3,
+		       BD71828_INT_VSYS_HALL_IN_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_VSYS_HALL_TOGGLE, 3,
+		       BD71828_INT_VSYS_HALL_TOGGLE_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_VSYS_MON_RES, 3,
+		       BD71828_INT_VSYS_MON_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_VSYS_MON_DET, 3,
+		       BD71828_INT_VSYS_MON_DET_MASK),
+	/* Charger */
+	REGMAP_IRQ_REG(BD71828_INT_CHG_DCIN_ILIM, 4,
+		       BD71828_INT_CHG_DCIN_ILIM_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CHG_TOPOFF_TO_DONE, 4,
+		       BD71828_INT_CHG_TOPOFF_TO_DONE_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CHG_WDG_TEMP, 4,
+		       BD71828_INT_CHG_WDG_TEMP_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CHG_WDG_TIME, 4,
+		       BD71828_INT_CHG_WDG_TIME_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CHG_RECHARGE_RES, 4,
+		       BD71828_INT_CHG_RECHARGE_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CHG_RECHARGE_DET, 4,
+		       BD71828_INT_CHG_RECHARGE_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CHG_RANGED_TEMP_TRANSITION, 4,
+		       BD71828_INT_CHG_RANGED_TEMP_TRANSITION_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_CHG_STATE_TRANSITION, 4,
+		       BD71828_INT_CHG_STATE_TRANSITION_MASK),
+	/* Battery */
+	REGMAP_IRQ_REG(BD71828_INT_BAT_TEMP_NORMAL, 5,
+		       BD71828_INT_BAT_TEMP_NORMAL_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_TEMP_ERANGE, 5,
+		       BD71828_INT_BAT_TEMP_ERANGE_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_TEMP_WARN, 5,
+		       BD71828_INT_BAT_TEMP_WARN_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_REMOVED, 5,
+		       BD71828_INT_BAT_REMOVED_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_DETECTED, 5,
+		       BD71828_INT_BAT_DETECTED_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_THERM_REMOVED, 5,
+		       BD71828_INT_THERM_REMOVED_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_THERM_DETECTED, 5,
+		       BD71828_INT_THERM_DETECTED_MASK),
+	/* Battery Mon 1 */
+	REGMAP_IRQ_REG(BD71828_INT_BAT_DEAD, 6, BD71828_INT_BAT_DEAD_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_SHORTC_RES, 6,
+		       BD71828_INT_BAT_SHORTC_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_SHORTC_DET, 6,
+		       BD71828_INT_BAT_SHORTC_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_LOW_VOLT_RES, 6,
+		       BD71828_INT_BAT_LOW_VOLT_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_LOW_VOLT_DET, 6,
+		       BD71828_INT_BAT_LOW_VOLT_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_VOLT_RES, 6,
+		       BD71828_INT_BAT_OVER_VOLT_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_VOLT_DET, 6,
+		       BD71828_INT_BAT_OVER_VOLT_DET_MASK),
+	/* Battery Mon 2 */
+	REGMAP_IRQ_REG(BD71828_INT_BAT_MON_RES, 7,
+		       BD71828_INT_BAT_MON_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_MON_DET, 7,
+		       BD71828_INT_BAT_MON_DET_MASK),
+	/* Battery Mon 3 (Coulomb counter) */
+	REGMAP_IRQ_REG(BD71828_INT_BAT_CC_MON1, 8,
+		       BD71828_INT_BAT_CC_MON1_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_CC_MON2, 8,
+		       BD71828_INT_BAT_CC_MON2_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_CC_MON3, 8,
+		       BD71828_INT_BAT_CC_MON3_MASK),
+	/* Battery Mon 4 */
+	REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_1_RES, 9,
+		       BD71828_INT_BAT_OVER_CURR_1_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_1_DET, 9,
+		       BD71828_INT_BAT_OVER_CURR_1_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_2_RES, 9,
+		       BD71828_INT_BAT_OVER_CURR_2_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_2_DET, 9,
+		       BD71828_INT_BAT_OVER_CURR_2_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_3_RES, 9,
+		       BD71828_INT_BAT_OVER_CURR_3_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_3_DET, 9,
+		       BD71828_INT_BAT_OVER_CURR_3_DET_MASK),
+	/* Temperature */
+	REGMAP_IRQ_REG(BD71828_INT_TEMP_BAT_LOW_RES, 10,
+		       BD71828_INT_TEMP_BAT_LOW_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_TEMP_BAT_LOW_DET, 10,
+		       BD71828_INT_TEMP_BAT_LOW_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_TEMP_BAT_HI_RES, 10,
+		       BD71828_INT_TEMP_BAT_HI_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_TEMP_BAT_HI_DET, 10,
+		       BD71828_INT_TEMP_BAT_HI_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_TEMP_CHIP_OVER_125_RES, 10,
+		       BD71828_INT_TEMP_CHIP_OVER_125_RES_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_TEMP_CHIP_OVER_125_DET, 10,
+		       BD71828_INT_TEMP_CHIP_OVER_125_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_TEMP_CHIP_OVER_VF_DET, 10,
+		       BD71828_INT_TEMP_CHIP_OVER_VF_DET_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_TEMP_CHIP_OVER_VF_RES, 10,
+		       BD71828_INT_TEMP_CHIP_OVER_VF_RES_MASK),
+	/* RTC Alarm */
+	REGMAP_IRQ_REG(BD71828_INT_RTC0, 11, BD71828_INT_RTC0_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_RTC1, 11, BD71828_INT_RTC1_MASK),
+	REGMAP_IRQ_REG(BD71828_INT_RTC2, 11, BD71828_INT_RTC2_MASK),
+};
+
+static struct regmap_irq_chip bd71828_irq_chip = {
+	.name = "bd71828_irq",
+	.main_status = BD71828_REG_INT_MAIN,
+	.irqs = &bd71828_irqs[0],
+	.num_irqs = ARRAY_SIZE(bd71828_irqs),
+	.status_base = BD71828_REG_INT_BUCK,
+	.mask_base = BD71828_REG_INT_MASK_BUCK,
+	.ack_base = BD71828_REG_INT_BUCK,
+	.mask_invert = true,
+	.init_ack_masked = true,
+	.num_regs = 12,
+	.num_main_regs = 1,
+	.sub_reg_offsets = &bd71828_sub_irq_offsets[0],
+	.num_main_status_bits = 8,
+	.irq_reg_stride = 1,
+};
+
+static int bd71828_i2c_probe(struct i2c_client *i2c)
+{
+	struct rohm_regmap_dev *chip;
+	struct regmap_irq_chip_data *irq_data;
+	int ret;
+
+	if (!i2c->irq) {
+		dev_err(&i2c->dev, "No IRQ configured\n");
+		return -EINVAL;
+	}
+
+	chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	dev_set_drvdata(&i2c->dev, chip);
+
+	chip->regmap = devm_regmap_init_i2c(i2c, &bd71828_regmap);
+	if (IS_ERR(chip->regmap)) {
+		dev_err(&i2c->dev, "Failed to initialize Regmap\n");
+		return PTR_ERR(chip->regmap);
+	}
+
+	ret = devm_regmap_add_irq_chip(&i2c->dev, chip->regmap,
+				       i2c->irq, IRQF_ONESHOT, 0,
+				       &bd71828_irq_chip, &irq_data);
+	if (ret) {
+		dev_err(&i2c->dev, "Failed to add IRQ chip\n");
+		return ret;
+	}
+
+	dev_dbg(&i2c->dev, "Registered %d IRQs for chip\n",
+		bd71828_irq_chip.num_irqs);
+
+	ret = regmap_irq_get_virq(irq_data, BD71828_INT_SHORTPUSH);
+	if (ret < 0) {
+		dev_err(&i2c->dev, "Failed to get the power-key IRQ\n");
+		return ret;
+	}
+
+	button.irq = ret;
+
+	ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO,
+				   bd71828_mfd_cells,
+				   ARRAY_SIZE(bd71828_mfd_cells), NULL, 0,
+				   regmap_irq_get_domain(irq_data));
+	if (ret)
+		dev_err(&i2c->dev, "Failed to create subdevices\n");
+
+	return ret;
+}
+
+static const struct of_device_id bd71828_of_match[] = {
+	{ .compatible = "rohm,bd71828", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, bd71828_of_match);
+
+static struct i2c_driver bd71828_drv = {
+	.driver = {
+		.name = "rohm-bd71828",
+		.of_match_table = bd71828_of_match,
+	},
+	.probe_new = &bd71828_i2c_probe,
+};
+module_i2c_driver(bd71828_drv);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("ROHM BD71828 Power Management IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 85e7f51..c32c1b6 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -30,14 +30,24 @@ static struct gpio_keys_platform_data bd718xx_powerkey_data = {
 	.name = "bd718xx-pwrkey",
 };
 
-static struct mfd_cell bd718xx_mfd_cells[] = {
+static struct mfd_cell bd71837_mfd_cells[] = {
 	{
 		.name = "gpio-keys",
 		.platform_data = &bd718xx_powerkey_data,
 		.pdata_size = sizeof(bd718xx_powerkey_data),
 	},
-	{ .name = "bd718xx-clk", },
-	{ .name = "bd718xx-pmic", },
+	{ .name = "bd71837-clk", },
+	{ .name = "bd71837-pmic", },
+};
+
+static struct mfd_cell bd71847_mfd_cells[] = {
+	{
+		.name = "gpio-keys",
+		.platform_data = &bd718xx_powerkey_data,
+		.pdata_size = sizeof(bd718xx_powerkey_data),
+	},
+	{ .name = "bd71847-clk", },
+	{ .name = "bd71847-pmic", },
 };
 
 static const struct regmap_irq bd718xx_irqs[] = {
@@ -124,6 +134,9 @@ static int bd718xx_i2c_probe(struct i2c_client *i2c,
 {
 	struct bd718xx *bd718xx;
 	int ret;
+	unsigned int chip_type;
+	struct mfd_cell *mfd;
+	int cells;
 
 	if (!i2c->irq) {
 		dev_err(&i2c->dev, "No IRQ configured\n");
@@ -136,8 +149,21 @@ static int bd718xx_i2c_probe(struct i2c_client *i2c,
 		return -ENOMEM;
 
 	bd718xx->chip_irq = i2c->irq;
-	bd718xx->chip.chip_type = (unsigned int)(uintptr_t)
-				of_device_get_match_data(&i2c->dev);
+	chip_type = (unsigned int)(uintptr_t)
+		    of_device_get_match_data(&i2c->dev);
+	switch (chip_type) {
+	case ROHM_CHIP_TYPE_BD71837:
+		mfd = bd71837_mfd_cells;
+		cells = ARRAY_SIZE(bd71837_mfd_cells);
+		break;
+	case ROHM_CHIP_TYPE_BD71847:
+		mfd = bd71847_mfd_cells;
+		cells = ARRAY_SIZE(bd71847_mfd_cells);
+		break;
+	default:
+		dev_err(&i2c->dev, "Unknown device type");
+		return -EINVAL;
+	}
 	bd718xx->chip.dev = &i2c->dev;
 	dev_set_drvdata(&i2c->dev, bd718xx);
 
@@ -170,8 +196,7 @@ static int bd718xx_i2c_probe(struct i2c_client *i2c,
 	button.irq = ret;
 
 	ret = devm_mfd_add_devices(bd718xx->chip.dev, PLATFORM_DEVID_AUTO,
-				   bd718xx_mfd_cells,
-				   ARRAY_SIZE(bd718xx_mfd_cells), NULL, 0,
+				   mfd, cells, NULL, 0,
 				   regmap_irq_get_domain(bd718xx->irq_data));
 	if (ret)
 		dev_err(&i2c->dev, "Failed to create subdevices\n");
@@ -188,6 +213,10 @@ static const struct of_device_id bd718xx_of_match[] = {
 		.compatible = "rohm,bd71847",
 		.data = (void *)ROHM_CHIP_TYPE_BD71847,
 	},
+	{
+		.compatible = "rohm,bd71850",
+		.data = (void *)ROHM_CHIP_TYPE_BD71847,
+	},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, bd718xx_of_match);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 154270f..e49787e 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1086,8 +1086,7 @@ static int sm501_register_gpio(struct sm501_devdata *sm)
 	iounmap(gpio->regs);
 
  err_claimed:
-	release_resource(gpio->regs_res);
-	kfree(gpio->regs_res);
+	release_mem_region(iobase, 0x20);
 
 	return ret;
 }
@@ -1095,6 +1094,7 @@ static int sm501_register_gpio(struct sm501_devdata *sm)
 static void sm501_gpio_remove(struct sm501_devdata *sm)
 {
 	struct sm501_gpio *gpio = &sm->gpio;
+	resource_size_t iobase = sm->io_res->start + SM501_GPIO;
 
 	if (!sm->gpio.registered)
 		return;
@@ -1103,8 +1103,7 @@ static void sm501_gpio_remove(struct sm501_devdata *sm)
 	gpiochip_remove(&gpio->high.gpio);
 
 	iounmap(gpio->regs);
-	release_resource(gpio->regs_res);
-	kfree(gpio->regs_res);
+	release_mem_region(iobase, 0x20);
 }
 
 static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
@@ -1427,8 +1426,7 @@ static int sm501_plat_probe(struct platform_device *dev)
 	return sm501_init_dev(sm);
 
  err_claim:
-	release_resource(sm->regs_claim);
-	kfree(sm->regs_claim);
+	release_mem_region(sm->io_res->start, 0x100);
  err_res:
 	kfree(sm);
  err1:
@@ -1637,8 +1635,7 @@ static int sm501_pci_probe(struct pci_dev *dev,
 	return 0;
 
  err4:
-	release_resource(sm->regs_claim);
-	kfree(sm->regs_claim);
+	release_mem_region(sm->io_res->start, 0x100);
  err3:
 	pci_disable_device(dev);
  err2:
@@ -1673,8 +1670,7 @@ static void sm501_pci_remove(struct pci_dev *dev)
 	sm501_dev_remove(sm);
 	iounmap(sm->regs);
 
-	release_resource(sm->regs_claim);
-	kfree(sm->regs_claim);
+	release_mem_region(sm->io_res->start, 0x100);
 
 	pci_disable_device(dev);
 }
@@ -1686,8 +1682,7 @@ static int sm501_plat_remove(struct platform_device *dev)
 	sm501_dev_remove(sm);
 	iounmap(sm->regs);
 
-	release_resource(sm->regs_claim);
-	kfree(sm->regs_claim);
+	release_mem_region(sm->io_res->start, 0x100);
 
 	return 0;
 }
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index e22197c..3a97816 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -224,6 +224,35 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle);
 
+struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np,
+					const char *property,
+					int arg_count,
+					unsigned int *out_args)
+{
+	struct device_node *syscon_np;
+	struct of_phandle_args args;
+	struct regmap *regmap;
+	unsigned int index;
+	int rc;
+
+	rc = of_parse_phandle_with_fixed_args(np, property, arg_count,
+			0, &args);
+	if (rc)
+		return ERR_PTR(rc);
+
+	syscon_np = args.np;
+	if (!syscon_np)
+		return ERR_PTR(-ENODEV);
+
+	regmap = syscon_node_to_regmap(syscon_np);
+	for (index = 0; index < arg_count; index++)
+		out_args[index] = args.args[index];
+	of_node_put(syscon_np);
+
+	return regmap;
+}
+EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle_args);
+
 static int syscon_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -245,7 +274,7 @@ static int syscon_probe(struct platform_device *pdev)
 	if (!base)
 		return -ENOMEM;
 
-	syscon_config.max_register = res->end - res->start - 3;
+	syscon_config.max_register = resource_size(res) - 4;
 	if (pdata)
 		syscon_config.name = pdata->label;
 	syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_config);
diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
index 22d2f02..b9f48e5 100644
--- a/drivers/mfd/tqmx86.c
+++ b/drivers/mfd/tqmx86.c
@@ -158,7 +158,7 @@ static int tqmx86_board_id_to_clk_rate(u8 board_id)
 
 static int tqmx86_probe(struct platform_device *pdev)
 {
-	u8 board_id, rev, i2c_det, i2c_ien, io_ext_int_val;
+	u8 board_id, rev, i2c_det, io_ext_int_val;
 	struct device *dev = &pdev->dev;
 	u8 gpio_irq_cfg, readback;
 	const char *board_name;
@@ -196,7 +196,6 @@ static int tqmx86_probe(struct platform_device *pdev)
 		 board_name, board_id, rev >> 4, rev & 0xf);
 
 	i2c_det = ioread8(io_base + TQMX86_REG_I2C_DETECT);
-	i2c_ien = ioread8(io_base + TQMX86_REG_I2C_INT_EN);
 
 	if (gpio_irq_cfg) {
 		io_ext_int_val =
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
new file mode 100644
index 0000000..90341f3
--- /dev/null
+++ b/drivers/mfd/wcd934x.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd934x/wcd934x.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slimbus.h>
+
+static const struct mfd_cell wcd934x_devices[] = {
+	{
+		.name = "wcd934x-codec",
+	}, {
+		.name = "wcd934x-gpio",
+		.of_compatible = "qcom,wcd9340-gpio",
+	}, {
+		.name = "wcd934x-soundwire",
+		.of_compatible = "qcom,soundwire-v1.3.0",
+	},
+};
+
+static const struct regmap_irq wcd934x_irqs[] = {
+	[WCD934X_IRQ_SLIMBUS] = {
+		.reg_offset = 0,
+		.mask = BIT(0),
+		.type = {
+			.type_reg_offset = 0,
+			.types_supported = IRQ_TYPE_EDGE_BOTH,
+			.type_reg_mask  = BIT(0),
+			.type_level_low_val = BIT(0),
+			.type_level_high_val = BIT(0),
+			.type_falling_val = 0,
+			.type_rising_val = 0,
+		},
+	},
+	[WCD934X_IRQ_SOUNDWIRE] = {
+		.reg_offset = 2,
+		.mask = BIT(4),
+		.type = {
+			.type_reg_offset = 2,
+			.types_supported = IRQ_TYPE_EDGE_BOTH,
+			.type_reg_mask  = BIT(4),
+			.type_level_low_val = BIT(4),
+			.type_level_high_val = BIT(4),
+			.type_falling_val = 0,
+			.type_rising_val = 0,
+		},
+	},
+};
+
+static const struct regmap_irq_chip wcd934x_regmap_irq_chip = {
+	.name = "wcd934x_irq",
+	.status_base = WCD934X_INTR_PIN1_STATUS0,
+	.mask_base = WCD934X_INTR_PIN1_MASK0,
+	.ack_base = WCD934X_INTR_PIN1_CLEAR0,
+	.type_base = WCD934X_INTR_LEVEL0,
+	.num_type_reg = 4,
+	.type_in_mask = false,
+	.num_regs = 4,
+	.irqs = wcd934x_irqs,
+	.num_irqs = ARRAY_SIZE(wcd934x_irqs),
+};
+
+static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case WCD934X_INTR_PIN1_STATUS0...WCD934X_INTR_PIN2_CLEAR3:
+	case WCD934X_SWR_AHB_BRIDGE_RD_DATA_0:
+	case WCD934X_SWR_AHB_BRIDGE_RD_DATA_1:
+	case WCD934X_SWR_AHB_BRIDGE_RD_DATA_2:
+	case WCD934X_SWR_AHB_BRIDGE_RD_DATA_3:
+	case WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS:
+	case WCD934X_ANA_MBHC_RESULT_3:
+	case WCD934X_ANA_MBHC_RESULT_2:
+	case WCD934X_ANA_MBHC_RESULT_1:
+	case WCD934X_ANA_MBHC_MECH:
+	case WCD934X_ANA_MBHC_ELECT:
+	case WCD934X_ANA_MBHC_ZDET:
+	case WCD934X_ANA_MICB2:
+	case WCD934X_ANA_RCO:
+	case WCD934X_ANA_BIAS:
+		return true;
+	default:
+		return false;
+	}
+};
+
+static const struct regmap_range_cfg wcd934x_ranges[] = {
+	{	.name = "WCD934X",
+		.range_min =  0x0,
+		.range_max =  WCD934X_MAX_REGISTER,
+		.selector_reg = WCD934X_SEL_REGISTER,
+		.selector_mask = WCD934X_SEL_MASK,
+		.selector_shift = WCD934X_SEL_SHIFT,
+		.window_start = WCD934X_WINDOW_START,
+		.window_len = WCD934X_WINDOW_LENGTH,
+	},
+};
+
+static struct regmap_config wcd934x_regmap_config = {
+	.reg_bits = 16,
+	.val_bits = 8,
+	.cache_type = REGCACHE_RBTREE,
+	.max_register = 0xffff,
+	.can_multi_write = true,
+	.ranges = wcd934x_ranges,
+	.num_ranges = ARRAY_SIZE(wcd934x_ranges),
+	.volatile_reg = wcd934x_is_volatile_register,
+};
+
+static int wcd934x_bring_up(struct wcd934x_ddata *ddata)
+{
+	struct regmap *regmap = ddata->regmap;
+	u16 id_minor, id_major;
+	int ret;
+
+	ret = regmap_bulk_read(regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
+			       (u8 *)&id_minor, sizeof(u16));
+	if (ret)
+		return ret;
+
+	ret = regmap_bulk_read(regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
+			       (u8 *)&id_major, sizeof(u16));
+	if (ret)
+		return ret;
+
+	dev_info(ddata->dev, "WCD934x chip id major 0x%x, minor 0x%x\n",
+		 id_major, id_minor);
+
+	regmap_write(regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
+	regmap_write(regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
+	regmap_write(regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+	/* Add 1msec delay for VOUT to settle */
+	usleep_range(1000, 1100);
+	regmap_write(regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
+	regmap_write(regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
+	regmap_write(regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
+	regmap_write(regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+	regmap_write(regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+
+	return 0;
+}
+
+static int wcd934x_slim_status_up(struct slim_device *sdev)
+{
+	struct device *dev = &sdev->dev;
+	struct wcd934x_ddata *ddata;
+	int ret;
+
+	ddata = dev_get_drvdata(dev);
+
+	ddata->regmap = regmap_init_slimbus(sdev, &wcd934x_regmap_config);
+	if (IS_ERR(ddata->regmap)) {
+		dev_err(dev, "Error allocating slim regmap\n");
+		return PTR_ERR(ddata->regmap);
+	}
+
+	ret = wcd934x_bring_up(ddata);
+	if (ret) {
+		dev_err(dev, "Failed to bring up WCD934X: err = %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_regmap_add_irq_chip(dev, ddata->regmap, ddata->irq,
+				       IRQF_TRIGGER_HIGH, 0,
+				       &wcd934x_regmap_irq_chip,
+				       &ddata->irq_data);
+	if (ret) {
+		dev_err(dev, "Failed to add IRQ chip: err = %d\n", ret);
+		return ret;
+	}
+
+	ret = mfd_add_devices(dev, PLATFORM_DEVID_AUTO, wcd934x_devices,
+			      ARRAY_SIZE(wcd934x_devices), NULL, 0, NULL);
+	if (ret) {
+		dev_err(dev, "Failed to add child devices: err = %d\n",
+			ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int wcd934x_slim_status(struct slim_device *sdev,
+			       enum slim_device_status status)
+{
+	switch (status) {
+	case SLIM_DEVICE_STATUS_UP:
+		return wcd934x_slim_status_up(sdev);
+	case SLIM_DEVICE_STATUS_DOWN:
+		mfd_remove_devices(&sdev->dev);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int wcd934x_slim_probe(struct slim_device *sdev)
+{
+	struct device *dev = &sdev->dev;
+	struct device_node *np = dev->of_node;
+	struct wcd934x_ddata *ddata;
+	int reset_gpio, ret;
+
+	ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+	if (!ddata)
+		return	-ENOMEM;
+
+	ddata->irq = of_irq_get(np, 0);
+	if (ddata->irq < 0) {
+		if (ddata->irq != -EPROBE_DEFER)
+			dev_err(ddata->dev, "Failed to get IRQ: err = %d\n",
+				ddata->irq);
+		return ddata->irq;
+	}
+
+	reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
+	if (reset_gpio < 0) {
+		dev_err(dev, "Failed to get reset gpio: err = %d\n",
+			reset_gpio);
+		return reset_gpio;
+	}
+
+	ddata->extclk = devm_clk_get(dev, "extclk");
+	if (IS_ERR(ddata->extclk)) {
+		dev_err(dev, "Failed to get extclk");
+		return PTR_ERR(ddata->extclk);
+	}
+
+	ddata->supplies[0].supply = "vdd-buck";
+	ddata->supplies[1].supply = "vdd-buck-sido";
+	ddata->supplies[2].supply = "vdd-tx";
+	ddata->supplies[3].supply = "vdd-rx";
+	ddata->supplies[4].supply = "vdd-io";
+
+	ret = regulator_bulk_get(dev, WCD934X_MAX_SUPPLY, ddata->supplies);
+	if (ret) {
+		dev_err(dev, "Failed to get supplies: err = %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(WCD934X_MAX_SUPPLY, ddata->supplies);
+	if (ret) {
+		dev_err(dev, "Failed to enable supplies: err = %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * For WCD934X, it takes about 600us for the Vout_A and
+	 * Vout_D to be ready after BUCK_SIDO is powered up.
+	 * SYS_RST_N shouldn't be pulled high during this time
+	 */
+	usleep_range(600, 650);
+	gpio_direction_output(reset_gpio, 0);
+	msleep(20);
+	gpio_set_value(reset_gpio, 1);
+	msleep(20);
+
+	ddata->dev = dev;
+	dev_set_drvdata(dev, ddata);
+
+	return 0;
+}
+
+static void wcd934x_slim_remove(struct slim_device *sdev)
+{
+	struct wcd934x_ddata *ddata = dev_get_drvdata(&sdev->dev);
+
+	regulator_bulk_disable(WCD934X_MAX_SUPPLY, ddata->supplies);
+	mfd_remove_devices(&sdev->dev);
+	kfree(ddata);
+}
+
+static const struct slim_device_id wcd934x_slim_id[] = {
+	{ SLIM_MANF_ID_QCOM, SLIM_PROD_CODE_WCD9340,
+	  SLIM_DEV_IDX_WCD9340, SLIM_DEV_INSTANCE_ID_WCD9340 },
+	{}
+};
+
+static struct slim_driver wcd934x_slim_driver = {
+	.driver = {
+		.name = "wcd934x-slim",
+	},
+	.probe = wcd934x_slim_probe,
+	.remove = wcd934x_slim_remove,
+	.device_status = wcd934x_slim_status,
+	.id_table = wcd934x_slim_id,
+};
+
+module_slim_driver(wcd934x_slim_driver);
+MODULE_DESCRIPTION("WCD934X slim driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("slim:217:250:*");
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index 259fe1d..cd402c8 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -38,12 +38,18 @@ static const struct alcor_dev_cfg au6621_cfg = {
 	.dma = 1,
 };
 
+static const struct alcor_dev_cfg au6625_cfg = {
+	.dma = 0,
+};
+
 static const struct pci_device_id pci_ids[] = {
 	{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6601),
 		.driver_data = (kernel_ulong_t)&alcor_cfg },
 	{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6621),
 		.driver_data = (kernel_ulong_t)&au6621_cfg },
-	{ },
+	{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6625),
+		.driver_data = (kernel_ulong_t)&au6625_cfg },
+	{},
 };
 MODULE_DEVICE_TABLE(pci, pci_ids);
 
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 32dcec2..bc4967a 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -628,7 +628,8 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
 		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
 {
 	int err, clk;
-	u8 n, clk_divider, mcu_cnt, div;
+	u16 n;
+	u8 clk_divider, mcu_cnt, div;
 	static const u8 depth[] = {
 		[RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M,
 		[RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M,
@@ -661,13 +662,13 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
 		return 0;
 
 	if (pcr->ops->conv_clk_and_div_n)
-		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+		n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
 	else
-		n = (u8)(clk - 4);
+		n = clk - 4;
 	if ((clk <= 4) || (n > 396))
 		return -EINVAL;
 
-	mcu_cnt = (u8)(125/clk + 3);
+	mcu_cnt = 125/clk + 3;
 	if (mcu_cnt > 15)
 		mcu_cnt = 15;
 
@@ -676,7 +677,7 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
 		if (pcr->ops->conv_clk_and_div_n) {
 			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
 					DIV_N_TO_CLK) * 2;
-			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+			n = pcr->ops->conv_clk_and_div_n(dbl_clk,
 					CLK_TO_DIV_N);
 		} else {
 			n = (n + 4) * 2 - 4;
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index aed9c44..fb2eff6 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -352,7 +352,7 @@ void cxl_context_free(struct cxl_context *ctx)
 void cxl_context_mm_count_get(struct cxl_context *ctx)
 {
 	if (ctx->mm)
-		atomic_inc(&ctx->mm->mm_count);
+		mmgrab(ctx->mm);
 }
 
 void cxl_context_mm_count_put(struct cxl_context *ctx)
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 0681d5f..031eb64 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * at24.c - handle most I2C EEPROMs
  *
@@ -6,23 +6,23 @@
  * Copyright (C) 2008 Wolfram Sang, Pengutronix
  */
 
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/mod_devicetable.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-#include <linux/property.h>
 #include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
 #include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/nvmem-provider.h>
-#include <linux/regmap.h>
+#include <linux/of_device.h>
 #include <linux/pm_runtime.h>
-#include <linux/gpio/consumer.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
 
 /* Address pointer is 16 bit. */
 #define AT24_FLAG_ADDR16	BIT(7)
@@ -88,8 +88,7 @@ struct at24_data {
 	u8 flags;
 
 	struct nvmem_device *nvmem;
-
-	struct gpio_desc *wp_gpio;
+	struct regulator *vcc_reg;
 
 	/*
 	 * Some chips tie up multiple I2C addresses; dummy devices reserve
@@ -457,12 +456,10 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
 	 * from this host, but not from other I2C masters.
 	 */
 	mutex_lock(&at24->lock);
-	gpiod_set_value_cansleep(at24->wp_gpio, 0);
 
 	while (count) {
 		ret = at24_regmap_write(at24, buf, off, count);
 		if (ret < 0) {
-			gpiod_set_value_cansleep(at24->wp_gpio, 1);
 			mutex_unlock(&at24->lock);
 			pm_runtime_put(dev);
 			return ret;
@@ -472,7 +469,6 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
 		count -= ret;
 	}
 
-	gpiod_set_value_cansleep(at24->wp_gpio, 1);
 	mutex_unlock(&at24->lock);
 
 	pm_runtime_put(dev);
@@ -662,9 +658,9 @@ static int at24_probe(struct i2c_client *client)
 	at24->client[0].client = client;
 	at24->client[0].regmap = regmap;
 
-	at24->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_HIGH);
-	if (IS_ERR(at24->wp_gpio))
-		return PTR_ERR(at24->wp_gpio);
+	at24->vcc_reg = devm_regulator_get(dev, "vcc");
+	if (IS_ERR(at24->vcc_reg))
+		return PTR_ERR(at24->vcc_reg);
 
 	writable = !(flags & AT24_FLAG_READONLY);
 	if (writable) {
@@ -701,6 +697,12 @@ static int at24_probe(struct i2c_client *client)
 
 	i2c_set_clientdata(client, at24);
 
+	err = regulator_enable(at24->vcc_reg);
+	if (err) {
+		dev_err(dev, "Failed to enable vcc regulator\n");
+		return err;
+	}
+
 	/* enable runtime pm */
 	pm_runtime_set_active(dev);
 	pm_runtime_enable(dev);
@@ -713,6 +715,7 @@ static int at24_probe(struct i2c_client *client)
 	pm_runtime_idle(dev);
 	if (err) {
 		pm_runtime_disable(dev);
+		regulator_disable(at24->vcc_reg);
 		return -ENODEV;
 	}
 
@@ -728,15 +731,42 @@ static int at24_probe(struct i2c_client *client)
 
 static int at24_remove(struct i2c_client *client)
 {
+	struct at24_data *at24 = i2c_get_clientdata(client);
+
 	pm_runtime_disable(&client->dev);
+	if (!pm_runtime_status_suspended(&client->dev))
+		regulator_disable(at24->vcc_reg);
 	pm_runtime_set_suspended(&client->dev);
 
 	return 0;
 }
 
+static int __maybe_unused at24_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct at24_data *at24 = i2c_get_clientdata(client);
+
+	return regulator_disable(at24->vcc_reg);
+}
+
+static int __maybe_unused at24_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct at24_data *at24 = i2c_get_clientdata(client);
+
+	return regulator_enable(at24->vcc_reg);
+}
+
+static const struct dev_pm_ops at24_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(at24_suspend, at24_resume, NULL)
+};
+
 static struct i2c_driver at24_driver = {
 	.driver = {
 		.name = "at24",
+		.pm = &at24_pm_ops,
 		.of_match_table = at24_of_match,
 		.acpi_match_table = ACPI_PTR(at24_acpi_ids),
 	},
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index ae4ee27..e3e085e 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -581,13 +581,6 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
 	kfree(a);
 }
 
-static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
-{
-	struct fastrpc_buf *buf = dmabuf->priv;
-
-	return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
-}
-
 static void *fastrpc_vmap(struct dma_buf *dmabuf)
 {
 	struct fastrpc_buf *buf = dmabuf->priv;
@@ -611,7 +604,6 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = {
 	.map_dma_buf = fastrpc_map_dma_buf,
 	.unmap_dma_buf = fastrpc_unmap_dma_buf,
 	.mmap = fastrpc_mmap,
-	.map = fastrpc_kmap,
 	.vmap = fastrpc_vmap,
 	.release = fastrpc_release,
 };
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 026c6ca..9051065 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -1084,7 +1084,7 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
 				queue->ddcb_daddr);
 	queue->ddcb_vaddr = NULL;
 	queue->ddcb_daddr = 0ull;
-	return -ENODEV;
+	return rc;
 
 }
 
@@ -1179,7 +1179,7 @@ static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
  */
 static int genwqe_card_thread(void *data)
 {
-	int should_stop = 0, rc = 0;
+	int should_stop = 0;
 	struct genwqe_dev *cd = (struct genwqe_dev *)data;
 
 	while (!kthread_should_stop()) {
@@ -1187,12 +1187,12 @@ static int genwqe_card_thread(void *data)
 		genwqe_check_ddcb_queue(cd, &cd->queue);
 
 		if (GENWQE_POLLING_ENABLED) {
-			rc = wait_event_interruptible_timeout(
+			wait_event_interruptible_timeout(
 				cd->queue_waitq,
 				genwqe_ddcbs_in_flight(cd) ||
 				(should_stop = kthread_should_stop()), 1);
 		} else {
-			rc = wait_event_interruptible_timeout(
+			wait_event_interruptible_timeout(
 				cd->queue_waitq,
 				genwqe_next_ddcb_ready(cd) ||
 				(should_stop = kthread_should_stop()), HZ);
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index b612562..fc5ff28 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -173,6 +173,7 @@ static int  isl29020_probe(struct i2c_client *client,
 
 static int isl29020_remove(struct i2c_client *client)
 {
+	pm_runtime_disable(&client->dev);
 	sysfs_remove_group(&client->dev.kobj, &m_als_gr);
 	return 0;
 }
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index a0a495c..8d468e0 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -765,7 +765,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
 
-	return scnprintf(buf, PAGE_SIZE, "%pUl", uuid);
+	return sprintf(buf, "%pUl", uuid);
 }
 static DEVICE_ATTR_RO(uuid);
 
@@ -775,7 +775,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *a,
 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
 	u8 version = mei_me_cl_ver(cldev->me_cl);
 
-	return scnprintf(buf, PAGE_SIZE, "%02X", version);
+	return sprintf(buf, "%02X", version);
 }
 static DEVICE_ATTR_RO(version);
 
@@ -797,7 +797,7 @@ static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
 	u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
 
-	return scnprintf(buf, PAGE_SIZE, "%d", maxconn);
+	return sprintf(buf, "%d", maxconn);
 }
 static DEVICE_ATTR_RO(max_conn);
 
@@ -807,7 +807,7 @@ static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
 	u8 fixed = mei_me_cl_fixed(cldev->me_cl);
 
-	return scnprintf(buf, PAGE_SIZE, "%d", fixed);
+	return sprintf(buf, "%d", fixed);
 }
 static DEVICE_ATTR_RO(fixed);
 
@@ -817,7 +817,7 @@ static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
 	u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
 
-	return scnprintf(buf, PAGE_SIZE, "%u", maxlen);
+	return sprintf(buf, "%u", maxlen);
 }
 static DEVICE_ATTR_RO(max_len);
 
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 93027fd9..4c596c6 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -757,11 +757,38 @@ static const struct component_master_ops mei_component_master_ops = {
 	.unbind = mei_component_master_unbind,
 };
 
+/**
+ * mei_hdcp_component_match - compare function for matching mei hdcp.
+ *
+ *    The function checks if the driver is i915, the subcomponent is HDCP
+ *    and the grand parent of hdcp and the parent of i915 are the same
+ *    PCH device.
+ *
+ * @dev: master device
+ * @subcomponent: subcomponent to match (I915_COMPONENT_HDCP)
+ * @data: compare data (mei hdcp device)
+ *
+ * Return:
+ * * 1 - if components match
+ * * 0 - otherwise
+ */
 static int mei_hdcp_component_match(struct device *dev, int subcomponent,
 				    void *data)
 {
-	return !strcmp(dev->driver->name, "i915") &&
-	       subcomponent == I915_COMPONENT_HDCP;
+	struct device *base = data;
+
+	if (strcmp(dev->driver->name, "i915") ||
+	    subcomponent != I915_COMPONENT_HDCP)
+		return 0;
+
+	base = base->parent;
+	if (!base)
+		return 0;
+
+	base = base->parent;
+	dev = dev->parent;
+
+	return (base && dev && dev == base);
 }
 
 static int mei_hdcp_probe(struct mei_cl_device *cldev,
@@ -785,7 +812,7 @@ static int mei_hdcp_probe(struct mei_cl_device *cldev,
 
 	master_match = NULL;
 	component_match_add_typed(&cldev->dev, &master_match,
-				  mei_hdcp_component_match, comp_master);
+				  mei_hdcp_component_match, &cldev->dev);
 	if (IS_ERR_OR_NULL(master_match)) {
 		ret = -ENOMEM;
 		goto err_exit;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 7cd67fb..87a0201 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,10 +81,16 @@
 
 #define MEI_DEV_ID_CMP_LP     0x02e0  /* Comet Point LP */
 #define MEI_DEV_ID_CMP_LP_3   0x02e4  /* Comet Point LP 3 (iTouch) */
+
 #define MEI_DEV_ID_CMP_V      0xA3BA  /* Comet Point Lake V */
 
+#define MEI_DEV_ID_CMP_H      0x06e0  /* Comet Lake H */
+#define MEI_DEV_ID_CMP_H_3    0x06e4  /* Comet Lake H 3 (iTouch) */
+
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
 
+#define MEI_DEV_ID_JSP_N      0x4DE0  /* Jasper Lake Point N */
+
 #define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
 
 #define MEI_DEV_ID_MCC        0x4B70  /* Mule Creek Canyon (EHL) */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index c845b7e..2711451b 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -99,11 +99,15 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_CFG)},
 
 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
 	{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
+
 	{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
 
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
index 3ee3d24..b586088 100644
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ b/drivers/misc/mic/card/mic_debugfs.c
@@ -65,9 +65,6 @@ void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
  */
 void mic_delete_card_debug_dir(struct mic_driver *mdrv)
 {
-	if (!mdrv->dbg_dir)
-		return;
-
 	debugfs_remove_recursive(mdrv->dbg_dir);
 }
 
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
index 2fc9f4b..68a731f 100644
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ b/drivers/misc/mic/cosm/cosm_debugfs.c
@@ -102,9 +102,6 @@ void cosm_create_debug_dir(struct cosm_device *cdev)
 
 void cosm_delete_debug_dir(struct cosm_device *cdev)
 {
-	if (!cdev->dbg_dir)
-		return;
-
 	debugfs_remove_recursive(cdev->dbg_dir);
 }
 
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 8a8e416..ab0db7a 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -129,9 +129,6 @@ void mic_create_debug_dir(struct mic_device *mdev)
  */
 void mic_delete_debug_dir(struct mic_device *mdev)
 {
-	if (!mdev->dbg_dir)
-		return;
-
 	debugfs_remove_recursive(mdev->dbg_dir);
 }
 
diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig
index 1916fa6..2d2266c 100644
--- a/drivers/misc/ocxl/Kconfig
+++ b/drivers/misc/ocxl/Kconfig
@@ -11,6 +11,7 @@
 	tristate "OpenCAPI coherent accelerator support"
 	depends on PPC_POWERNV && PCI && EEH
 	select OCXL_BASE
+	select HOTPLUG_PCI_POWERNV
 	default m
 	help
 	  Select this option to enable the ocxl driver for Open
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 063e441..b7f5106 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -792,7 +792,7 @@ static int pti_pci_probe(struct pci_dev *pdev,
 		const struct pci_device_id *ent)
 {
 	unsigned int a;
-	int retval = -EINVAL;
+	int retval;
 	int pci_bar = 1;
 
 	dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
@@ -910,7 +910,7 @@ static struct pci_driver pti_pci_driver = {
  */
 static int __init pti_init(void)
 {
-	int retval = -EINVAL;
+	int retval;
 
 	/* First register module as tty device */
 
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 95ff7c5..a6e1a89 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -10,16 +10,16 @@
 
 #include <linux/acpi.h>
 #include <linux/kernel.h>
+#include <linux/kexec.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/types.h>
+#include <uapi/misc/pvpanic.h>
 
 static void __iomem *base;
 
-#define PVPANIC_PANICKED        (1 << 0)
-
 MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
 MODULE_DESCRIPTION("pvpanic device driver");
 MODULE_LICENSE("GPL");
@@ -34,7 +34,13 @@ static int
 pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
 		     void *unused)
 {
-	pvpanic_send_event(PVPANIC_PANICKED);
+	unsigned int event = PVPANIC_PANICKED;
+
+	if (kexec_crash_loaded())
+		event = PVPANIC_CRASH_LOADED;
+
+	pvpanic_send_event(event);
+
 	return NOTIFY_DONE;
 }
 
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 2817f47..97b8b38 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -255,28 +255,28 @@ static int options_open(struct inode *inode, struct file *file)
 }
 
 /* *INDENT-OFF* */
-static const struct file_operations statistics_fops = {
-	.open 		= statistics_open,
-	.read 		= seq_read,
-	.write 		= statistics_write,
-	.llseek 	= seq_lseek,
-	.release 	= single_release,
+static const struct proc_ops statistics_proc_ops = {
+	.proc_open	= statistics_open,
+	.proc_read	= seq_read,
+	.proc_write	= statistics_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
-static const struct file_operations mcs_statistics_fops = {
-	.open 		= mcs_statistics_open,
-	.read 		= seq_read,
-	.write 		= mcs_statistics_write,
-	.llseek 	= seq_lseek,
-	.release 	= single_release,
+static const struct proc_ops mcs_statistics_proc_ops = {
+	.proc_open	= mcs_statistics_open,
+	.proc_read	= seq_read,
+	.proc_write	= mcs_statistics_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
-static const struct file_operations options_fops = {
-	.open 		= options_open,
-	.read 		= seq_read,
-	.write 		= options_write,
-	.llseek 	= seq_lseek,
-	.release 	= single_release,
+static const struct proc_ops options_proc_ops = {
+	.proc_open	= options_open,
+	.proc_read	= seq_read,
+	.proc_write	= options_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 static struct proc_dir_entry *proc_gru __read_mostly;
@@ -286,11 +286,11 @@ int gru_proc_init(void)
 	proc_gru = proc_mkdir("sgi_uv/gru", NULL);
 	if (!proc_gru)
 		return -1;
-	if (!proc_create("statistics", 0644, proc_gru, &statistics_fops))
+	if (!proc_create("statistics", 0644, proc_gru, &statistics_proc_ops))
 		goto err;
-	if (!proc_create("mcs_statistics", 0644, proc_gru, &mcs_statistics_fops))
+	if (!proc_create("mcs_statistics", 0644, proc_gru, &mcs_statistics_proc_ops))
 		goto err;
-	if (!proc_create("debug_options", 0644, proc_gru, &options_fops))
+	if (!proc_create("debug_options", 0644, proc_gru, &options_proc_ops))
 		goto err;
 	if (!proc_create_seq("cch_status", 0444, proc_gru, &cch_seq_ops))
 		goto err;
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index d054e28..cb57ac6 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -85,6 +85,7 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
 	unsigned long base;
 	int pages;
 	void *dst_cpy;
+	int ret;
 
 	mutex_lock(&exec_pool_list_mutex);
 	list_for_each_entry(p, &exec_pool_list, list) {
@@ -104,16 +105,28 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
 
 	mutex_lock(&part->lock);
 
-	set_memory_nx((unsigned long)base, pages);
-	set_memory_rw((unsigned long)base, pages);
+	ret = set_memory_nx((unsigned long)base, pages);
+	if (ret)
+		goto error_out;
+	ret = set_memory_rw((unsigned long)base, pages);
+	if (ret)
+		goto error_out;
 
 	dst_cpy = fncpy(dst, src, size);
 
-	set_memory_ro((unsigned long)base, pages);
-	set_memory_x((unsigned long)base, pages);
+	ret = set_memory_ro((unsigned long)base, pages);
+	if (ret)
+		goto error_out;
+	ret = set_memory_x((unsigned long)base, pages);
+	if (ret)
+		goto error_out;
 
 	mutex_unlock(&part->lock);
 
 	return dst_cpy;
+
+error_out:
+	mutex_unlock(&part->lock);
+	return NULL;
 }
 EXPORT_SYMBOL_GPL(sram_exec_copy);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 2ae9948..14136d2 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -736,8 +736,8 @@ static int st_tty_open(struct tty_struct *tty)
 
 static void st_tty_close(struct tty_struct *tty)
 {
-	unsigned char i = ST_MAX_CHANNELS;
-	unsigned long flags = 0;
+	unsigned char i;
+	unsigned long flags;
 	struct	st_data_s *st_gdata = tty->disc_data;
 
 	pr_info("%s ", __func__);
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 09db397..6d71865 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -148,16 +148,14 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
 	u16 c0 = count_lut[ch0];
 	u16 c1 = count_lut[ch1];
 
-	/*
-	 * Calculate ratio.
-	 * Note: the "128" is a scaling factor
-	 */
-	u8 r = 128;
-
 	/* Avoid division by 0 and count 1 cannot be greater than count 0 */
 	if (c1 <= c0)
 		if (c0) {
-			r = c1 * 128 / c0;
+			/*
+			 * Calculate ratio.
+			 * Note: the "128" is a scaling factor
+			 */
+			u8 r = c1 * 128 / c0;
 
 			/* Calculate LUX */
 			lux = ((c0 - c1) * ratio_lut[r]) / 256;
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 1183596..71bbaa5 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -733,7 +733,7 @@ static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
 	enum xsdfec_order order;
 	int err;
 
-	err = get_user(order, (enum xsdfec_order *)arg);
+	err = get_user(order, (enum xsdfec_order __user *)arg);
 	if (err)
 		return -EFAULT;
 
@@ -1025,25 +1025,25 @@ static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
 }
 #endif
 
-static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
+static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
 {
-	unsigned int mask = 0;
+	__poll_t mask = 0;
 	struct xsdfec_dev *xsdfec;
 
 	xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
 
 	if (!xsdfec)
-		return POLLNVAL | POLLHUP;
+		return EPOLLNVAL | EPOLLHUP;
 
 	poll_wait(file, &xsdfec->waitq, wait);
 
 	/* XSDFEC ISR detected an error */
 	spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
 	if (xsdfec->state_updated)
-		mask |= POLLIN | POLLPRI;
+		mask |= EPOLLIN | EPOLLPRI;
 
 	if (xsdfec->stats_updated)
-		mask |= POLLIN | POLLRDNORM;
+		mask |= EPOLLIN | EPOLLRDNORM;
 	spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
 
 	return mask;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 410a321..36aa082 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -44,7 +44,7 @@ struct block2mtd_dev {
 static LIST_HEAD(blkmtd_device_list);
 
 
-static struct page *page_read(struct address_space *mapping, int index)
+static struct page *page_read(struct address_space *mapping, pgoff_t index)
 {
 	return read_mapping_page(mapping, index, NULL);
 }
@@ -54,7 +54,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
 {
 	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
 	struct page *page;
-	int index = to >> PAGE_SHIFT;	// page index
+	pgoff_t index = to >> PAGE_SHIFT;	// page index
 	int pages = len >> PAGE_SHIFT;
 	u_long *p;
 	u_long *max;
@@ -103,7 +103,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 {
 	struct block2mtd_dev *dev = mtd->priv;
 	struct page *page;
-	int index = from >> PAGE_SHIFT;
+	pgoff_t index = from >> PAGE_SHIFT;
 	int offset = from & (PAGE_SIZE-1);
 	int cpylen;
 
@@ -137,7 +137,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
 {
 	struct page *page;
 	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
-	int index = to >> PAGE_SHIFT;	// page index
+	pgoff_t index = to >> PAGE_SHIFT;	// page index
 	int offset = to & ~PAGE_MASK;	// page offset
 	int cpylen;
 
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 70bb403..2ac79e1 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -294,16 +294,15 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
 }
 
 
-static DEFINE_SPINLOCK(pcmcia_vpp_lock);
+static DEFINE_MUTEX(pcmcia_vpp_lock);
 static int pcmcia_vpp_refcnt;
 static void pcmciamtd_set_vpp(struct map_info *map, int on)
 {
 	struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
 	struct pcmcia_device *link = dev->p_dev;
-	unsigned long flags;
 
 	pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
-	spin_lock_irqsave(&pcmcia_vpp_lock, flags);
+	mutex_lock(&pcmcia_vpp_lock);
 	if (on) {
 		if (++pcmcia_vpp_refcnt == 1)   /* first nested 'on' */
 			pcmcia_fixup_vpp(link, dev->vpp);
@@ -311,7 +310,7 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
 		if (--pcmcia_vpp_refcnt == 0)   /* last nested 'off' */
 			pcmcia_fixup_vpp(link, 0);
 	}
-	spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
+	mutex_unlock(&pcmcia_vpp_lock);
 }
 
 
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index a9f7964..8f7f966 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -38,6 +38,7 @@
 #include <linux/mtd/cfi_endian.h>
 #include <linux/io.h>
 #include <linux/of_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/gpio/consumer.h>
 
 #include "physmap-gemini.h"
@@ -64,16 +65,16 @@ static int physmap_flash_remove(struct platform_device *dev)
 {
 	struct physmap_flash_info *info;
 	struct physmap_flash_data *physmap_data;
-	int i, err;
+	int i, err = 0;
 
 	info = platform_get_drvdata(dev);
 	if (!info)
-		return 0;
+		goto out;
 
 	if (info->cmtd) {
 		err = mtd_device_unregister(info->cmtd);
 		if (err)
-			return err;
+			goto out;
 
 		if (info->cmtd != info->mtds[0])
 			mtd_concat_destroy(info->cmtd);
@@ -88,7 +89,10 @@ static int physmap_flash_remove(struct platform_device *dev)
 	if (physmap_data && physmap_data->exit)
 		physmap_data->exit(dev);
 
-	return 0;
+out:
+	pm_runtime_put(&dev->dev);
+	pm_runtime_disable(&dev->dev);
+	return err;
 }
 
 static void physmap_set_vpp(struct map_info *map, int state)
@@ -484,13 +488,19 @@ static int physmap_flash_probe(struct platform_device *dev)
 		return -EINVAL;
 	}
 
+	pm_runtime_enable(&dev->dev);
+	pm_runtime_get_sync(&dev->dev);
+
 	if (dev->dev.of_node)
 		err = physmap_flash_of_init(dev);
 	else
 		err = physmap_flash_pdata_init(dev);
 
-	if (err)
+	if (err) {
+		pm_runtime_put(&dev->dev);
+		pm_runtime_disable(&dev->dev);
 		return err;
+	}
 
 	for (i = 0; i < info->nmaps; i++) {
 		struct resource *res;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 170a722..1d6c9e7 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -841,10 +841,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to c
 	return &concat->mtd;
 }
 
-/*
- * This function destroys an MTD object obtained from concat_mtd_devs()
- */
-
+/* Cleans the context obtained from mtd_concat_create() */
 void mtd_concat_destroy(struct mtd_info *mtd)
 {
 	struct mtd_concat *concat = CONCAT(mtd);
diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig
index ae0b8fe..572b8fe 100644
--- a/drivers/mtd/nand/onenand/Kconfig
+++ b/drivers/mtd/nand/onenand/Kconfig
@@ -25,7 +25,7 @@
 
 config MTD_ONENAND_OMAP2
 	tristate "OneNAND on OMAP2/OMAP3 support"
-	depends on ARCH_OMAP2 || ARCH_OMAP3
+	depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM)
 	depends on OF || COMPILE_TEST
 	help
 	  Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
@@ -33,12 +33,12 @@
 	  Enable dmaengine and gpiolib for better performance.
 
 config MTD_ONENAND_SAMSUNG
-        tristate "OneNAND on Samsung SOC controller support"
-        depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4
-        help
-          Support for a OneNAND flash device connected to an Samsung SOC.
-          S3C64XX uses command mapping method.
-          S5PC110/S5PC210 use generic OneNAND method.
+	tristate "OneNAND on Samsung SOC controller support"
+	depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4 || COMPILE_TEST
+	help
+	  Support for a OneNAND flash device connected to an Samsung SOC.
+	  S3C64XX uses command mapping method.
+	  S5PC110/S5PC210 use generic OneNAND method.
 
 config MTD_ONENAND_OTP
 	bool "OneNAND OTP Support"
diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile
index a27b635..a0761c7 100644
--- a/drivers/mtd/nand/onenand/Makefile
+++ b/drivers/mtd/nand/onenand/Makefile
@@ -8,7 +8,7 @@
 
 # Board specific.
 obj-$(CONFIG_MTD_ONENAND_GENERIC)	+= generic.o
-obj-$(CONFIG_MTD_ONENAND_OMAP2)		+= omap2.o
-obj-$(CONFIG_MTD_ONENAND_SAMSUNG)       += samsung_mtd.o
+obj-$(CONFIG_MTD_ONENAND_OMAP2)		+= onenand_omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG)       += onenand_samsung.o
 
 onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 85640ee..d5326d1 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
 
 	stats = mtd->ecc_stats;
 
- 	/* Read-while-load method */
+	/* Read-while-load method */
 
- 	/* Do first load to bufferRAM */
- 	if (read < len) {
- 		if (!onenand_check_bufferram(mtd, from)) {
+	/* Do first load to bufferRAM */
+	if (read < len) {
+		if (!onenand_check_bufferram(mtd, from)) {
 			this->command(mtd, ONENAND_CMD_READ, from, writesize);
- 			ret = this->wait(mtd, FL_READING);
- 			onenand_update_bufferram(mtd, from, !ret);
+			ret = this->wait(mtd, FL_READING);
+			onenand_update_bufferram(mtd, from, !ret);
 			if (mtd_is_eccerr(ret))
 				ret = 0;
- 		}
- 	}
+		}
+	}
 
 	thislen = min_t(int, writesize, len - read);
 	column = from & (writesize - 1);
 	if (column + thislen > writesize)
 		thislen = writesize - column;
 
- 	while (!ret) {
- 		/* If there is more to load then start next load */
- 		from += thislen;
- 		if (read + thislen < len) {
+	while (!ret) {
+		/* If there is more to load then start next load */
+		from += thislen;
+		if (read + thislen < len) {
 			this->command(mtd, ONENAND_CMD_READ, from, writesize);
- 			/*
- 			 * Chip boundary handling in DDP
- 			 * Now we issued chip 1 read and pointed chip 1
+			/*
+			 * Chip boundary handling in DDP
+			 * Now we issued chip 1 read and pointed chip 1
 			 * bufferram so we have to point chip 0 bufferram.
- 			 */
- 			if (ONENAND_IS_DDP(this) &&
- 			    unlikely(from == (this->chipsize >> 1))) {
- 				this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
- 				boundary = 1;
- 			} else
- 				boundary = 0;
- 			ONENAND_SET_PREV_BUFFERRAM(this);
- 		}
- 		/* While load is going, read from last bufferRAM */
- 		this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+			 */
+			if (ONENAND_IS_DDP(this) &&
+			    unlikely(from == (this->chipsize >> 1))) {
+				this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
+				boundary = 1;
+			} else
+				boundary = 0;
+			ONENAND_SET_PREV_BUFFERRAM(this);
+		}
+		/* While load is going, read from last bufferRAM */
+		this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
 
 		/* Read oob area if needed */
 		if (oobbuf) {
@@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
 			oobcolumn = 0;
 		}
 
- 		/* See if we are done */
- 		read += thislen;
- 		if (read == len)
- 			break;
- 		/* Set up for next read from bufferRAM */
- 		if (unlikely(boundary))
- 			this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
- 		ONENAND_SET_NEXT_BUFFERRAM(this);
- 		buf += thislen;
+		/* See if we are done */
+		read += thislen;
+		if (read == len)
+			break;
+		/* Set up for next read from bufferRAM */
+		if (unlikely(boundary))
+			this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
+		ONENAND_SET_NEXT_BUFFERRAM(this);
+		buf += thislen;
 		thislen = min_t(int, writesize, len - read);
- 		column = 0;
- 		cond_resched();
- 		/* Now wait for load */
- 		ret = this->wait(mtd, FL_READING);
- 		onenand_update_bufferram(mtd, from, !ret);
+		column = 0;
+		cond_resched();
+		/* Now wait for load */
+		ret = this->wait(mtd, FL_READING);
+		onenand_update_bufferram(mtd, from, !ret);
 		if (mtd_is_eccerr(ret))
 			ret = 0;
- 	}
+	}
 
 	/*
 	 * Return success, if no ECC failures, else -EBADMSG
diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
similarity index 100%
rename from drivers/mtd/nand/onenand/omap2.c
rename to drivers/mtd/nand/onenand/onenand_omap2.c
diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c
new file mode 100644
index 0000000..87b28e3
--- /dev/null
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung S3C64XX/S5PC1XX OneNAND driver
+ *
+ *  Copyright © 2008-2010 Samsung Electronics
+ *  Kyungmin Park <kyungmin.park@samsung.com>
+ *  Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Implementation:
+ *	S3C64XX: emulate the pseudo BufferRAM
+ *	S5PC110: use DMA
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include "samsung.h"
+
+enum soc_type {
+	TYPE_S3C6400,
+	TYPE_S3C6410,
+	TYPE_S5PC110,
+};
+
+#define ONENAND_ERASE_STATUS		0x00
+#define ONENAND_MULTI_ERASE_SET		0x01
+#define ONENAND_ERASE_START		0x03
+#define ONENAND_UNLOCK_START		0x08
+#define ONENAND_UNLOCK_END		0x09
+#define ONENAND_LOCK_START		0x0A
+#define ONENAND_LOCK_END		0x0B
+#define ONENAND_LOCK_TIGHT_START	0x0C
+#define ONENAND_LOCK_TIGHT_END		0x0D
+#define ONENAND_UNLOCK_ALL		0x0E
+#define ONENAND_OTP_ACCESS		0x12
+#define ONENAND_SPARE_ACCESS_ONLY	0x13
+#define ONENAND_MAIN_ACCESS_ONLY	0x14
+#define ONENAND_ERASE_VERIFY		0x15
+#define ONENAND_MAIN_SPARE_ACCESS	0x16
+#define ONENAND_PIPELINE_READ		0x4000
+
+#define MAP_00				(0x0)
+#define MAP_01				(0x1)
+#define MAP_10				(0x2)
+#define MAP_11				(0x3)
+
+#define S3C64XX_CMD_MAP_SHIFT		24
+
+#define S3C6400_FBA_SHIFT		10
+#define S3C6400_FPA_SHIFT		4
+#define S3C6400_FSA_SHIFT		2
+
+#define S3C6410_FBA_SHIFT		12
+#define S3C6410_FPA_SHIFT		6
+#define S3C6410_FSA_SHIFT		4
+
+/* S5PC110 specific definitions */
+#define S5PC110_DMA_SRC_ADDR		0x400
+#define S5PC110_DMA_SRC_CFG		0x404
+#define S5PC110_DMA_DST_ADDR		0x408
+#define S5PC110_DMA_DST_CFG		0x40C
+#define S5PC110_DMA_TRANS_SIZE		0x414
+#define S5PC110_DMA_TRANS_CMD		0x418
+#define S5PC110_DMA_TRANS_STATUS	0x41C
+#define S5PC110_DMA_TRANS_DIR		0x420
+#define S5PC110_INTC_DMA_CLR		0x1004
+#define S5PC110_INTC_ONENAND_CLR	0x1008
+#define S5PC110_INTC_DMA_MASK		0x1024
+#define S5PC110_INTC_ONENAND_MASK	0x1028
+#define S5PC110_INTC_DMA_PEND		0x1044
+#define S5PC110_INTC_ONENAND_PEND	0x1048
+#define S5PC110_INTC_DMA_STATUS		0x1064
+#define S5PC110_INTC_ONENAND_STATUS	0x1068
+
+#define S5PC110_INTC_DMA_TD		(1 << 24)
+#define S5PC110_INTC_DMA_TE		(1 << 16)
+
+#define S5PC110_DMA_CFG_SINGLE		(0x0 << 16)
+#define S5PC110_DMA_CFG_4BURST		(0x2 << 16)
+#define S5PC110_DMA_CFG_8BURST		(0x3 << 16)
+#define S5PC110_DMA_CFG_16BURST		(0x4 << 16)
+
+#define S5PC110_DMA_CFG_INC		(0x0 << 8)
+#define S5PC110_DMA_CFG_CNT		(0x1 << 8)
+
+#define S5PC110_DMA_CFG_8BIT		(0x0 << 0)
+#define S5PC110_DMA_CFG_16BIT		(0x1 << 0)
+#define S5PC110_DMA_CFG_32BIT		(0x2 << 0)
+
+#define S5PC110_DMA_SRC_CFG_READ	(S5PC110_DMA_CFG_16BURST | \
+					S5PC110_DMA_CFG_INC | \
+					S5PC110_DMA_CFG_16BIT)
+#define S5PC110_DMA_DST_CFG_READ	(S5PC110_DMA_CFG_16BURST | \
+					S5PC110_DMA_CFG_INC | \
+					S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_SRC_CFG_WRITE	(S5PC110_DMA_CFG_16BURST | \
+					S5PC110_DMA_CFG_INC | \
+					S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_DST_CFG_WRITE	(S5PC110_DMA_CFG_16BURST | \
+					S5PC110_DMA_CFG_INC | \
+					S5PC110_DMA_CFG_16BIT)
+
+#define S5PC110_DMA_TRANS_CMD_TDC	(0x1 << 18)
+#define S5PC110_DMA_TRANS_CMD_TEC	(0x1 << 16)
+#define S5PC110_DMA_TRANS_CMD_TR	(0x1 << 0)
+
+#define S5PC110_DMA_TRANS_STATUS_TD	(0x1 << 18)
+#define S5PC110_DMA_TRANS_STATUS_TB	(0x1 << 17)
+#define S5PC110_DMA_TRANS_STATUS_TE	(0x1 << 16)
+
+#define S5PC110_DMA_DIR_READ		0x0
+#define S5PC110_DMA_DIR_WRITE		0x1
+
+struct s3c_onenand {
+	struct mtd_info	*mtd;
+	struct platform_device	*pdev;
+	enum soc_type	type;
+	void __iomem	*base;
+	void __iomem	*ahb_addr;
+	int		bootram_command;
+	void		*page_buf;
+	void		*oob_buf;
+	unsigned int	(*mem_addr)(int fba, int fpa, int fsa);
+	unsigned int	(*cmd_map)(unsigned int type, unsigned int val);
+	void __iomem	*dma_addr;
+	unsigned long	phys_base;
+	struct completion	complete;
+};
+
+#define CMD_MAP_00(dev, addr)		(dev->cmd_map(MAP_00, ((addr) << 1)))
+#define CMD_MAP_01(dev, mem_addr)	(dev->cmd_map(MAP_01, (mem_addr)))
+#define CMD_MAP_10(dev, mem_addr)	(dev->cmd_map(MAP_10, (mem_addr)))
+#define CMD_MAP_11(dev, addr)		(dev->cmd_map(MAP_11, ((addr) << 2)))
+
+static struct s3c_onenand *onenand;
+
+static inline int s3c_read_reg(int offset)
+{
+	return readl(onenand->base + offset);
+}
+
+static inline void s3c_write_reg(int value, int offset)
+{
+	writel(value, onenand->base + offset);
+}
+
+static inline int s3c_read_cmd(unsigned int cmd)
+{
+	return readl(onenand->ahb_addr + cmd);
+}
+
+static inline void s3c_write_cmd(int value, unsigned int cmd)
+{
+	writel(value, onenand->ahb_addr + cmd);
+}
+
+#ifdef SAMSUNG_DEBUG
+static void s3c_dump_reg(void)
+{
+	int i;
+
+	for (i = 0; i < 0x400; i += 0x40) {
+		printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			(unsigned int) onenand->base + i,
+			s3c_read_reg(i), s3c_read_reg(i + 0x10),
+			s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
+	}
+}
+#endif
+
+static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
+{
+	return (type << S3C64XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
+{
+	return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
+		(fsa << S3C6400_FSA_SHIFT);
+}
+
+static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
+{
+	return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
+		(fsa << S3C6410_FSA_SHIFT);
+}
+
+static void s3c_onenand_reset(void)
+{
+	unsigned long timeout = 0x10000;
+	int stat;
+
+	s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+	while (1 && timeout--) {
+		stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+		if (stat & RST_CMP)
+			break;
+	}
+	stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+	s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+	/* Clear interrupt */
+	s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
+	/* Clear the ECC status */
+	s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
+}
+
+static unsigned short s3c_onenand_readw(void __iomem *addr)
+{
+	struct onenand_chip *this = onenand->mtd->priv;
+	struct device *dev = &onenand->pdev->dev;
+	int reg = addr - this->base;
+	int word_addr = reg >> 1;
+	int value;
+
+	/* It's used for probing time */
+	switch (reg) {
+	case ONENAND_REG_MANUFACTURER_ID:
+		return s3c_read_reg(MANUFACT_ID_OFFSET);
+	case ONENAND_REG_DEVICE_ID:
+		return s3c_read_reg(DEVICE_ID_OFFSET);
+	case ONENAND_REG_VERSION_ID:
+		return s3c_read_reg(FLASH_VER_ID_OFFSET);
+	case ONENAND_REG_DATA_BUFFER_SIZE:
+		return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
+	case ONENAND_REG_TECHNOLOGY:
+		return s3c_read_reg(TECH_OFFSET);
+	case ONENAND_REG_SYS_CFG1:
+		return s3c_read_reg(MEM_CFG_OFFSET);
+
+	/* Used at unlock all status */
+	case ONENAND_REG_CTRL_STATUS:
+		return 0;
+
+	case ONENAND_REG_WP_STATUS:
+		return ONENAND_WP_US;
+
+	default:
+		break;
+	}
+
+	/* BootRAM access control */
+	if ((unsigned long)addr < ONENAND_DATARAM && onenand->bootram_command) {
+		if (word_addr == 0)
+			return s3c_read_reg(MANUFACT_ID_OFFSET);
+		if (word_addr == 1)
+			return s3c_read_reg(DEVICE_ID_OFFSET);
+		if (word_addr == 2)
+			return s3c_read_reg(FLASH_VER_ID_OFFSET);
+	}
+
+	value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
+	dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+		 word_addr, value);
+	return value;
+}
+
+static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
+{
+	struct onenand_chip *this = onenand->mtd->priv;
+	struct device *dev = &onenand->pdev->dev;
+	unsigned int reg = addr - this->base;
+	unsigned int word_addr = reg >> 1;
+
+	/* It's used for probing time */
+	switch (reg) {
+	case ONENAND_REG_SYS_CFG1:
+		s3c_write_reg(value, MEM_CFG_OFFSET);
+		return;
+
+	case ONENAND_REG_START_ADDRESS1:
+	case ONENAND_REG_START_ADDRESS2:
+		return;
+
+	/* Lock/lock-tight/unlock/unlock_all */
+	case ONENAND_REG_START_BLOCK_ADDRESS:
+		return;
+
+	default:
+		break;
+	}
+
+	/* BootRAM access control */
+	if ((unsigned long)addr < ONENAND_DATARAM) {
+		if (value == ONENAND_CMD_READID) {
+			onenand->bootram_command = 1;
+			return;
+		}
+		if (value == ONENAND_CMD_RESET) {
+			s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+			onenand->bootram_command = 0;
+			return;
+		}
+	}
+
+	dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+		 word_addr, value);
+
+	s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
+}
+
+static int s3c_onenand_wait(struct mtd_info *mtd, int state)
+{
+	struct device *dev = &onenand->pdev->dev;
+	unsigned int flags = INT_ACT;
+	unsigned int stat, ecc;
+	unsigned long timeout;
+
+	switch (state) {
+	case FL_READING:
+		flags |= BLK_RW_CMP | LOAD_CMP;
+		break;
+	case FL_WRITING:
+		flags |= BLK_RW_CMP | PGM_CMP;
+		break;
+	case FL_ERASING:
+		flags |= BLK_RW_CMP | ERS_CMP;
+		break;
+	case FL_LOCKING:
+		flags |= BLK_RW_CMP;
+		break;
+	default:
+		break;
+	}
+
+	/* The 20 msec is enough */
+	timeout = jiffies + msecs_to_jiffies(20);
+	while (time_before(jiffies, timeout)) {
+		stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+		if (stat & flags)
+			break;
+
+		if (state != FL_READING)
+			cond_resched();
+	}
+	/* To get correct interrupt status in timeout case */
+	stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+	s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+	/*
+	 * In the Spec. it checks the controller status first
+	 * However if you get the correct information in case of
+	 * power off recovery (POR) test, it should read ECC status first
+	 */
+	if (stat & LOAD_CMP) {
+		ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+		if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+			dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
+				 ecc);
+			mtd->ecc_stats.failed++;
+			return -EBADMSG;
+		}
+	}
+
+	if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
+		dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
+			 stat);
+		if (stat & LOCKED_BLK)
+			dev_info(dev, "%s: it's locked error = 0x%04x\n",
+				 __func__, stat);
+
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
+			       size_t len)
+{
+	struct onenand_chip *this = mtd->priv;
+	unsigned int *m, *s;
+	int fba, fpa, fsa = 0;
+	unsigned int mem_addr, cmd_map_01, cmd_map_10;
+	int i, mcount, scount;
+	int index;
+
+	fba = (int) (addr >> this->erase_shift);
+	fpa = (int) (addr >> this->page_shift);
+	fpa &= this->page_mask;
+
+	mem_addr = onenand->mem_addr(fba, fpa, fsa);
+	cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
+	cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
+
+	switch (cmd) {
+	case ONENAND_CMD_READ:
+	case ONENAND_CMD_READOOB:
+	case ONENAND_CMD_BUFFERRAM:
+		ONENAND_SET_NEXT_BUFFERRAM(this);
+	default:
+		break;
+	}
+
+	index = ONENAND_CURRENT_BUFFERRAM(this);
+
+	/*
+	 * Emulate Two BufferRAMs and access with 4 bytes pointer
+	 */
+	m = onenand->page_buf;
+	s = onenand->oob_buf;
+
+	if (index) {
+		m += (this->writesize >> 2);
+		s += (mtd->oobsize >> 2);
+	}
+
+	mcount = mtd->writesize >> 2;
+	scount = mtd->oobsize >> 2;
+
+	switch (cmd) {
+	case ONENAND_CMD_READ:
+		/* Main */
+		for (i = 0; i < mcount; i++)
+			*m++ = s3c_read_cmd(cmd_map_01);
+		return 0;
+
+	case ONENAND_CMD_READOOB:
+		s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+		/* Main */
+		for (i = 0; i < mcount; i++)
+			*m++ = s3c_read_cmd(cmd_map_01);
+
+		/* Spare */
+		for (i = 0; i < scount; i++)
+			*s++ = s3c_read_cmd(cmd_map_01);
+
+		s3c_write_reg(0, TRANS_SPARE_OFFSET);
+		return 0;
+
+	case ONENAND_CMD_PROG:
+		/* Main */
+		for (i = 0; i < mcount; i++)
+			s3c_write_cmd(*m++, cmd_map_01);
+		return 0;
+
+	case ONENAND_CMD_PROGOOB:
+		s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+
+		/* Main - dummy write */
+		for (i = 0; i < mcount; i++)
+			s3c_write_cmd(0xffffffff, cmd_map_01);
+
+		/* Spare */
+		for (i = 0; i < scount; i++)
+			s3c_write_cmd(*s++, cmd_map_01);
+
+		s3c_write_reg(0, TRANS_SPARE_OFFSET);
+		return 0;
+
+	case ONENAND_CMD_UNLOCK_ALL:
+		s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
+		return 0;
+
+	case ONENAND_CMD_ERASE:
+		s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
+		return 0;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
+{
+	struct onenand_chip *this = mtd->priv;
+	int index = ONENAND_CURRENT_BUFFERRAM(this);
+	unsigned char *p;
+
+	if (area == ONENAND_DATARAM) {
+		p = onenand->page_buf;
+		if (index == 1)
+			p += this->writesize;
+	} else {
+		p = onenand->oob_buf;
+		if (index == 1)
+			p += mtd->oobsize;
+	}
+
+	return p;
+}
+
+static int onenand_read_bufferram(struct mtd_info *mtd, int area,
+				  unsigned char *buffer, int offset,
+				  size_t count)
+{
+	unsigned char *p;
+
+	p = s3c_get_bufferram(mtd, area);
+	memcpy(buffer, p + offset, count);
+	return 0;
+}
+
+static int onenand_write_bufferram(struct mtd_info *mtd, int area,
+				   const unsigned char *buffer, int offset,
+				   size_t count)
+{
+	unsigned char *p;
+
+	p = s3c_get_bufferram(mtd, area);
+	memcpy(p + offset, buffer, count);
+	return 0;
+}
+
+static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction);
+
+static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
+{
+	void __iomem *base = onenand->dma_addr;
+	int status;
+	unsigned long timeout;
+
+	writel(src, base + S5PC110_DMA_SRC_ADDR);
+	writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+	if (direction == S5PC110_DMA_DIR_READ) {
+		writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+		writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+	} else {
+		writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+		writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+	}
+
+	writel(count, base + S5PC110_DMA_TRANS_SIZE);
+	writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+	writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+	/*
+	 * There's no exact timeout values at Spec.
+	 * In real case it takes under 1 msec.
+	 * So 20 msecs are enough.
+	 */
+	timeout = jiffies + msecs_to_jiffies(20);
+
+	do {
+		status = readl(base + S5PC110_DMA_TRANS_STATUS);
+		if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+			writel(S5PC110_DMA_TRANS_CMD_TEC,
+					base + S5PC110_DMA_TRANS_CMD);
+			return -EIO;
+		}
+	} while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
+		time_before(jiffies, timeout));
+
+	writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+
+	return 0;
+}
+
+static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
+{
+	void __iomem *base = onenand->dma_addr;
+	int status, cmd = 0;
+
+	status = readl(base + S5PC110_INTC_DMA_STATUS);
+
+	if (likely(status & S5PC110_INTC_DMA_TD))
+		cmd = S5PC110_DMA_TRANS_CMD_TDC;
+
+	if (unlikely(status & S5PC110_INTC_DMA_TE))
+		cmd = S5PC110_DMA_TRANS_CMD_TEC;
+
+	writel(cmd, base + S5PC110_DMA_TRANS_CMD);
+	writel(status, base + S5PC110_INTC_DMA_CLR);
+
+	if (!onenand->complete.done)
+		complete(&onenand->complete);
+
+	return IRQ_HANDLED;
+}
+
+static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
+{
+	void __iomem *base = onenand->dma_addr;
+	int status;
+
+	status = readl(base + S5PC110_INTC_DMA_MASK);
+	if (status) {
+		status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
+		writel(status, base + S5PC110_INTC_DMA_MASK);
+	}
+
+	writel(src, base + S5PC110_DMA_SRC_ADDR);
+	writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+	if (direction == S5PC110_DMA_DIR_READ) {
+		writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+		writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+	} else {
+		writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+		writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+	}
+
+	writel(count, base + S5PC110_DMA_TRANS_SIZE);
+	writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+	writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+	wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
+
+	return 0;
+}
+
+static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
+		unsigned char *buffer, int offset, size_t count)
+{
+	struct onenand_chip *this = mtd->priv;
+	void __iomem *p;
+	void *buf = (void *) buffer;
+	dma_addr_t dma_src, dma_dst;
+	int err, ofs, page_dma = 0;
+	struct device *dev = &onenand->pdev->dev;
+
+	p = this->base + area;
+	if (ONENAND_CURRENT_BUFFERRAM(this)) {
+		if (area == ONENAND_DATARAM)
+			p += this->writesize;
+		else
+			p += mtd->oobsize;
+	}
+
+	if (offset & 3 || (size_t) buf & 3 ||
+		!onenand->dma_addr || count != mtd->writesize)
+		goto normal;
+
+	/* Handle vmalloc address */
+	if (buf >= high_memory) {
+		struct page *page;
+
+		if (((size_t) buf & PAGE_MASK) !=
+		    ((size_t) (buf + count - 1) & PAGE_MASK))
+			goto normal;
+		page = vmalloc_to_page(buf);
+		if (!page)
+			goto normal;
+
+		/* Page offset */
+		ofs = ((size_t) buf & ~PAGE_MASK);
+		page_dma = 1;
+
+		/* DMA routine */
+		dma_src = onenand->phys_base + (p - this->base);
+		dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
+	} else {
+		/* DMA routine */
+		dma_src = onenand->phys_base + (p - this->base);
+		dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
+	}
+	if (dma_mapping_error(dev, dma_dst)) {
+		dev_err(dev, "Couldn't map a %zu byte buffer for DMA\n", count);
+		goto normal;
+	}
+	err = s5pc110_dma_ops(dma_dst, dma_src,
+			count, S5PC110_DMA_DIR_READ);
+
+	if (page_dma)
+		dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
+	else
+		dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
+
+	if (!err)
+		return 0;
+
+normal:
+	if (count != mtd->writesize) {
+		/* Copy the bufferram to memory to prevent unaligned access */
+		memcpy_fromio(this->page_buf, p, mtd->writesize);
+		memcpy(buffer, this->page_buf + offset, count);
+	} else {
+		memcpy_fromio(buffer, p, count);
+	}
+
+	return 0;
+}
+
+static int s5pc110_chip_probe(struct mtd_info *mtd)
+{
+	/* Now just return 0 */
+	return 0;
+}
+
+static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+	unsigned int flags = INT_ACT | LOAD_CMP;
+	unsigned int stat;
+	unsigned long timeout;
+
+	/* The 20 msec is enough */
+	timeout = jiffies + msecs_to_jiffies(20);
+	while (time_before(jiffies, timeout)) {
+		stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+		if (stat & flags)
+			break;
+	}
+	/* To get correct interrupt status in timeout case */
+	stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+	s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+	if (stat & LD_FAIL_ECC_ERR) {
+		s3c_onenand_reset();
+		return ONENAND_BBT_READ_ERROR;
+	}
+
+	if (stat & LOAD_CMP) {
+		int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+		if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+			s3c_onenand_reset();
+			return ONENAND_BBT_READ_ERROR;
+		}
+	}
+
+	return 0;
+}
+
+static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
+{
+	struct onenand_chip *this = mtd->priv;
+	struct device *dev = &onenand->pdev->dev;
+	unsigned int block, end;
+
+	end = this->chipsize >> this->erase_shift;
+
+	for (block = 0; block < end; block++) {
+		unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
+		s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+
+		if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
+			dev_err(dev, "block %d is write-protected!\n", block);
+			s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
+		}
+	}
+}
+
+static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
+				    size_t len, int cmd)
+{
+	struct onenand_chip *this = mtd->priv;
+	int start, end, start_mem_addr, end_mem_addr;
+
+	start = ofs >> this->erase_shift;
+	start_mem_addr = onenand->mem_addr(start, 0, 0);
+	end = start + (len >> this->erase_shift) - 1;
+	end_mem_addr = onenand->mem_addr(end, 0, 0);
+
+	if (cmd == ONENAND_CMD_LOCK) {
+		s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
+							     start_mem_addr));
+		s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
+							   end_mem_addr));
+	} else {
+		s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
+							       start_mem_addr));
+		s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
+							     end_mem_addr));
+	}
+
+	this->wait(mtd, FL_LOCKING);
+}
+
+static void s3c_unlock_all(struct mtd_info *mtd)
+{
+	struct onenand_chip *this = mtd->priv;
+	loff_t ofs = 0;
+	size_t len = this->chipsize;
+
+	if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+		/* Write unlock command */
+		this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+		/* No need to check return value */
+		this->wait(mtd, FL_LOCKING);
+
+		/* Workaround for all block unlock in DDP */
+		if (!ONENAND_IS_DDP(this)) {
+			s3c_onenand_check_lock_status(mtd);
+			return;
+		}
+
+		/* All blocks on another chip */
+		ofs = this->chipsize >> 1;
+		len = this->chipsize >> 1;
+	}
+
+	s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+
+	s3c_onenand_check_lock_status(mtd);
+}
+
+static void s3c_onenand_setup(struct mtd_info *mtd)
+{
+	struct onenand_chip *this = mtd->priv;
+
+	onenand->mtd = mtd;
+
+	if (onenand->type == TYPE_S3C6400) {
+		onenand->mem_addr = s3c6400_mem_addr;
+		onenand->cmd_map = s3c64xx_cmd_map;
+	} else if (onenand->type == TYPE_S3C6410) {
+		onenand->mem_addr = s3c6410_mem_addr;
+		onenand->cmd_map = s3c64xx_cmd_map;
+	} else if (onenand->type == TYPE_S5PC110) {
+		/* Use generic onenand functions */
+		this->read_bufferram = s5pc110_read_bufferram;
+		this->chip_probe = s5pc110_chip_probe;
+		return;
+	} else {
+		BUG();
+	}
+
+	this->read_word = s3c_onenand_readw;
+	this->write_word = s3c_onenand_writew;
+
+	this->wait = s3c_onenand_wait;
+	this->bbt_wait = s3c_onenand_bbt_wait;
+	this->unlock_all = s3c_unlock_all;
+	this->command = s3c_onenand_command;
+
+	this->read_bufferram = onenand_read_bufferram;
+	this->write_bufferram = onenand_write_bufferram;
+}
+
+static int s3c_onenand_probe(struct platform_device *pdev)
+{
+	struct onenand_platform_data *pdata;
+	struct onenand_chip *this;
+	struct mtd_info *mtd;
+	struct resource *r;
+	int size, err;
+
+	pdata = dev_get_platdata(&pdev->dev);
+	/* No need to check pdata. the platform data is optional */
+
+	size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
+	mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!mtd)
+		return -ENOMEM;
+
+	onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
+			       GFP_KERNEL);
+	if (!onenand)
+		return -ENOMEM;
+
+	this = (struct onenand_chip *) &mtd[1];
+	mtd->priv = this;
+	mtd->dev.parent = &pdev->dev;
+	onenand->pdev = pdev;
+	onenand->type = platform_get_device_id(pdev)->driver_data;
+
+	s3c_onenand_setup(mtd);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	onenand->base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(onenand->base))
+		return PTR_ERR(onenand->base);
+
+	onenand->phys_base = r->start;
+
+	/* Set onenand_chip also */
+	this->base = onenand->base;
+
+	/* Use runtime badblock check */
+	this->options |= ONENAND_SKIP_UNLOCK_CHECK;
+
+	if (onenand->type != TYPE_S5PC110) {
+		r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+		if (IS_ERR(onenand->ahb_addr))
+			return PTR_ERR(onenand->ahb_addr);
+
+		/* Allocate 4KiB BufferRAM */
+		onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
+						 GFP_KERNEL);
+		if (!onenand->page_buf)
+			return -ENOMEM;
+
+		/* Allocate 128 SpareRAM */
+		onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
+		if (!onenand->oob_buf)
+			return -ENOMEM;
+
+		/* S3C doesn't handle subpage write */
+		mtd->subpage_sft = 0;
+		this->subpagesize = mtd->writesize;
+
+	} else { /* S5PC110 */
+		r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+		if (IS_ERR(onenand->dma_addr))
+			return PTR_ERR(onenand->dma_addr);
+
+		s5pc110_dma_ops = s5pc110_dma_poll;
+		/* Interrupt support */
+		r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+		if (r) {
+			init_completion(&onenand->complete);
+			s5pc110_dma_ops = s5pc110_dma_irq;
+			err = devm_request_irq(&pdev->dev, r->start,
+					       s5pc110_onenand_irq,
+					       IRQF_SHARED, "onenand",
+					       &onenand);
+			if (err) {
+				dev_err(&pdev->dev, "failed to get irq\n");
+				return err;
+			}
+		}
+	}
+
+	err = onenand_scan(mtd, 1);
+	if (err)
+		return err;
+
+	if (onenand->type != TYPE_S5PC110) {
+		/* S3C doesn't handle subpage write */
+		mtd->subpage_sft = 0;
+		this->subpagesize = mtd->writesize;
+	}
+
+	if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
+		dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
+
+	err = mtd_device_register(mtd, pdata ? pdata->parts : NULL,
+				  pdata ? pdata->nr_parts : 0);
+	if (err) {
+		dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
+		onenand_release(mtd);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, mtd);
+
+	return 0;
+}
+
+static int s3c_onenand_remove(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = platform_get_drvdata(pdev);
+
+	onenand_release(mtd);
+
+	return 0;
+}
+
+static int s3c_pm_ops_suspend(struct device *dev)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct onenand_chip *this = mtd->priv;
+
+	this->wait(mtd, FL_PM_SUSPENDED);
+	return 0;
+}
+
+static  int s3c_pm_ops_resume(struct device *dev)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct onenand_chip *this = mtd->priv;
+
+	this->unlock_all(mtd);
+	return 0;
+}
+
+static const struct dev_pm_ops s3c_pm_ops = {
+	.suspend	= s3c_pm_ops_suspend,
+	.resume		= s3c_pm_ops_resume,
+};
+
+static const struct platform_device_id s3c_onenand_driver_ids[] = {
+	{
+		.name		= "s3c6400-onenand",
+		.driver_data	= TYPE_S3C6400,
+	}, {
+		.name		= "s3c6410-onenand",
+		.driver_data	= TYPE_S3C6410,
+	}, {
+		.name		= "s5pc110-onenand",
+		.driver_data	= TYPE_S5PC110,
+	}, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
+
+static struct platform_driver s3c_onenand_driver = {
+	.driver         = {
+		.name	= "samsung-onenand",
+		.pm	= &s3c_pm_ops,
+	},
+	.id_table	= s3c_onenand_driver_ids,
+	.probe          = s3c_onenand_probe,
+	.remove         = s3c_onenand_remove,
+};
+
+module_platform_driver(s3c_onenand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Samsung OneNAND controller support");
diff --git a/drivers/mtd/nand/onenand/samsung_mtd.c b/drivers/mtd/nand/onenand/samsung_mtd.c
deleted file mode 100644
index beb7987..0000000
--- a/drivers/mtd/nand/onenand/samsung_mtd.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Samsung S3C64XX/S5PC1XX OneNAND driver
- *
- *  Copyright © 2008-2010 Samsung Electronics
- *  Kyungmin Park <kyungmin.park@samsung.com>
- *  Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * Implementation:
- *	S3C64XX: emulate the pseudo BufferRAM
- *	S5PC110: use DMA
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/onenand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include "samsung.h"
-
-enum soc_type {
-	TYPE_S3C6400,
-	TYPE_S3C6410,
-	TYPE_S5PC110,
-};
-
-#define ONENAND_ERASE_STATUS		0x00
-#define ONENAND_MULTI_ERASE_SET		0x01
-#define ONENAND_ERASE_START		0x03
-#define ONENAND_UNLOCK_START		0x08
-#define ONENAND_UNLOCK_END		0x09
-#define ONENAND_LOCK_START		0x0A
-#define ONENAND_LOCK_END		0x0B
-#define ONENAND_LOCK_TIGHT_START	0x0C
-#define ONENAND_LOCK_TIGHT_END		0x0D
-#define ONENAND_UNLOCK_ALL		0x0E
-#define ONENAND_OTP_ACCESS		0x12
-#define ONENAND_SPARE_ACCESS_ONLY	0x13
-#define ONENAND_MAIN_ACCESS_ONLY	0x14
-#define ONENAND_ERASE_VERIFY		0x15
-#define ONENAND_MAIN_SPARE_ACCESS	0x16
-#define ONENAND_PIPELINE_READ		0x4000
-
-#define MAP_00				(0x0)
-#define MAP_01				(0x1)
-#define MAP_10				(0x2)
-#define MAP_11				(0x3)
-
-#define S3C64XX_CMD_MAP_SHIFT		24
-
-#define S3C6400_FBA_SHIFT		10
-#define S3C6400_FPA_SHIFT		4
-#define S3C6400_FSA_SHIFT		2
-
-#define S3C6410_FBA_SHIFT		12
-#define S3C6410_FPA_SHIFT		6
-#define S3C6410_FSA_SHIFT		4
-
-/* S5PC110 specific definitions */
-#define S5PC110_DMA_SRC_ADDR		0x400
-#define S5PC110_DMA_SRC_CFG		0x404
-#define S5PC110_DMA_DST_ADDR		0x408
-#define S5PC110_DMA_DST_CFG		0x40C
-#define S5PC110_DMA_TRANS_SIZE		0x414
-#define S5PC110_DMA_TRANS_CMD		0x418
-#define S5PC110_DMA_TRANS_STATUS	0x41C
-#define S5PC110_DMA_TRANS_DIR		0x420
-#define S5PC110_INTC_DMA_CLR		0x1004
-#define S5PC110_INTC_ONENAND_CLR	0x1008
-#define S5PC110_INTC_DMA_MASK		0x1024
-#define S5PC110_INTC_ONENAND_MASK	0x1028
-#define S5PC110_INTC_DMA_PEND		0x1044
-#define S5PC110_INTC_ONENAND_PEND	0x1048
-#define S5PC110_INTC_DMA_STATUS		0x1064
-#define S5PC110_INTC_ONENAND_STATUS	0x1068
-
-#define S5PC110_INTC_DMA_TD		(1 << 24)
-#define S5PC110_INTC_DMA_TE		(1 << 16)
-
-#define S5PC110_DMA_CFG_SINGLE		(0x0 << 16)
-#define S5PC110_DMA_CFG_4BURST		(0x2 << 16)
-#define S5PC110_DMA_CFG_8BURST		(0x3 << 16)
-#define S5PC110_DMA_CFG_16BURST		(0x4 << 16)
-
-#define S5PC110_DMA_CFG_INC		(0x0 << 8)
-#define S5PC110_DMA_CFG_CNT		(0x1 << 8)
-
-#define S5PC110_DMA_CFG_8BIT		(0x0 << 0)
-#define S5PC110_DMA_CFG_16BIT		(0x1 << 0)
-#define S5PC110_DMA_CFG_32BIT		(0x2 << 0)
-
-#define S5PC110_DMA_SRC_CFG_READ	(S5PC110_DMA_CFG_16BURST | \
-					S5PC110_DMA_CFG_INC | \
-					S5PC110_DMA_CFG_16BIT)
-#define S5PC110_DMA_DST_CFG_READ	(S5PC110_DMA_CFG_16BURST | \
-					S5PC110_DMA_CFG_INC | \
-					S5PC110_DMA_CFG_32BIT)
-#define S5PC110_DMA_SRC_CFG_WRITE	(S5PC110_DMA_CFG_16BURST | \
-					S5PC110_DMA_CFG_INC | \
-					S5PC110_DMA_CFG_32BIT)
-#define S5PC110_DMA_DST_CFG_WRITE	(S5PC110_DMA_CFG_16BURST | \
-					S5PC110_DMA_CFG_INC | \
-					S5PC110_DMA_CFG_16BIT)
-
-#define S5PC110_DMA_TRANS_CMD_TDC	(0x1 << 18)
-#define S5PC110_DMA_TRANS_CMD_TEC	(0x1 << 16)
-#define S5PC110_DMA_TRANS_CMD_TR	(0x1 << 0)
-
-#define S5PC110_DMA_TRANS_STATUS_TD	(0x1 << 18)
-#define S5PC110_DMA_TRANS_STATUS_TB	(0x1 << 17)
-#define S5PC110_DMA_TRANS_STATUS_TE	(0x1 << 16)
-
-#define S5PC110_DMA_DIR_READ		0x0
-#define S5PC110_DMA_DIR_WRITE		0x1
-
-struct s3c_onenand {
-	struct mtd_info	*mtd;
-	struct platform_device	*pdev;
-	enum soc_type	type;
-	void __iomem	*base;
-	void __iomem	*ahb_addr;
-	int		bootram_command;
-	void		*page_buf;
-	void		*oob_buf;
-	unsigned int	(*mem_addr)(int fba, int fpa, int fsa);
-	unsigned int	(*cmd_map)(unsigned int type, unsigned int val);
-	void __iomem	*dma_addr;
-	unsigned long	phys_base;
-	struct completion	complete;
-};
-
-#define CMD_MAP_00(dev, addr)		(dev->cmd_map(MAP_00, ((addr) << 1)))
-#define CMD_MAP_01(dev, mem_addr)	(dev->cmd_map(MAP_01, (mem_addr)))
-#define CMD_MAP_10(dev, mem_addr)	(dev->cmd_map(MAP_10, (mem_addr)))
-#define CMD_MAP_11(dev, addr)		(dev->cmd_map(MAP_11, ((addr) << 2)))
-
-static struct s3c_onenand *onenand;
-
-static inline int s3c_read_reg(int offset)
-{
-	return readl(onenand->base + offset);
-}
-
-static inline void s3c_write_reg(int value, int offset)
-{
-	writel(value, onenand->base + offset);
-}
-
-static inline int s3c_read_cmd(unsigned int cmd)
-{
-	return readl(onenand->ahb_addr + cmd);
-}
-
-static inline void s3c_write_cmd(int value, unsigned int cmd)
-{
-	writel(value, onenand->ahb_addr + cmd);
-}
-
-#ifdef SAMSUNG_DEBUG
-static void s3c_dump_reg(void)
-{
-	int i;
-
-	for (i = 0; i < 0x400; i += 0x40) {
-		printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			(unsigned int) onenand->base + i,
-			s3c_read_reg(i), s3c_read_reg(i + 0x10),
-			s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
-	}
-}
-#endif
-
-static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
-{
-	return (type << S3C64XX_CMD_MAP_SHIFT) | val;
-}
-
-static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
-{
-	return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
-		(fsa << S3C6400_FSA_SHIFT);
-}
-
-static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
-{
-	return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
-		(fsa << S3C6410_FSA_SHIFT);
-}
-
-static void s3c_onenand_reset(void)
-{
-	unsigned long timeout = 0x10000;
-	int stat;
-
-	s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
-	while (1 && timeout--) {
-		stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
-		if (stat & RST_CMP)
-			break;
-	}
-	stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
-	s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
-
-	/* Clear interrupt */
-	s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
-	/* Clear the ECC status */
-	s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
-}
-
-static unsigned short s3c_onenand_readw(void __iomem *addr)
-{
-	struct onenand_chip *this = onenand->mtd->priv;
-	struct device *dev = &onenand->pdev->dev;
-	int reg = addr - this->base;
-	int word_addr = reg >> 1;
-	int value;
-
-	/* It's used for probing time */
-	switch (reg) {
-	case ONENAND_REG_MANUFACTURER_ID:
-		return s3c_read_reg(MANUFACT_ID_OFFSET);
-	case ONENAND_REG_DEVICE_ID:
-		return s3c_read_reg(DEVICE_ID_OFFSET);
-	case ONENAND_REG_VERSION_ID:
-		return s3c_read_reg(FLASH_VER_ID_OFFSET);
-	case ONENAND_REG_DATA_BUFFER_SIZE:
-		return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
-	case ONENAND_REG_TECHNOLOGY:
-		return s3c_read_reg(TECH_OFFSET);
-	case ONENAND_REG_SYS_CFG1:
-		return s3c_read_reg(MEM_CFG_OFFSET);
-
-	/* Used at unlock all status */
-	case ONENAND_REG_CTRL_STATUS:
-		return 0;
-
-	case ONENAND_REG_WP_STATUS:
-		return ONENAND_WP_US;
-
-	default:
-		break;
-	}
-
-	/* BootRAM access control */
-	if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
-		if (word_addr == 0)
-			return s3c_read_reg(MANUFACT_ID_OFFSET);
-		if (word_addr == 1)
-			return s3c_read_reg(DEVICE_ID_OFFSET);
-		if (word_addr == 2)
-			return s3c_read_reg(FLASH_VER_ID_OFFSET);
-	}
-
-	value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
-	dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
-		 word_addr, value);
-	return value;
-}
-
-static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
-{
-	struct onenand_chip *this = onenand->mtd->priv;
-	struct device *dev = &onenand->pdev->dev;
-	unsigned int reg = addr - this->base;
-	unsigned int word_addr = reg >> 1;
-
-	/* It's used for probing time */
-	switch (reg) {
-	case ONENAND_REG_SYS_CFG1:
-		s3c_write_reg(value, MEM_CFG_OFFSET);
-		return;
-
-	case ONENAND_REG_START_ADDRESS1:
-	case ONENAND_REG_START_ADDRESS2:
-		return;
-
-	/* Lock/lock-tight/unlock/unlock_all */
-	case ONENAND_REG_START_BLOCK_ADDRESS:
-		return;
-
-	default:
-		break;
-	}
-
-	/* BootRAM access control */
-	if ((unsigned int)addr < ONENAND_DATARAM) {
-		if (value == ONENAND_CMD_READID) {
-			onenand->bootram_command = 1;
-			return;
-		}
-		if (value == ONENAND_CMD_RESET) {
-			s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
-			onenand->bootram_command = 0;
-			return;
-		}
-	}
-
-	dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
-		 word_addr, value);
-
-	s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
-}
-
-static int s3c_onenand_wait(struct mtd_info *mtd, int state)
-{
-	struct device *dev = &onenand->pdev->dev;
-	unsigned int flags = INT_ACT;
-	unsigned int stat, ecc;
-	unsigned long timeout;
-
-	switch (state) {
-	case FL_READING:
-		flags |= BLK_RW_CMP | LOAD_CMP;
-		break;
-	case FL_WRITING:
-		flags |= BLK_RW_CMP | PGM_CMP;
-		break;
-	case FL_ERASING:
-		flags |= BLK_RW_CMP | ERS_CMP;
-		break;
-	case FL_LOCKING:
-		flags |= BLK_RW_CMP;
-		break;
-	default:
-		break;
-	}
-
-	/* The 20 msec is enough */
-	timeout = jiffies + msecs_to_jiffies(20);
-	while (time_before(jiffies, timeout)) {
-		stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
-		if (stat & flags)
-			break;
-
-		if (state != FL_READING)
-			cond_resched();
-	}
-	/* To get correct interrupt status in timeout case */
-	stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
-	s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
-
-	/*
-	 * In the Spec. it checks the controller status first
-	 * However if you get the correct information in case of
-	 * power off recovery (POR) test, it should read ECC status first
-	 */
-	if (stat & LOAD_CMP) {
-		ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
-		if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
-			dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
-				 ecc);
-			mtd->ecc_stats.failed++;
-			return -EBADMSG;
-		}
-	}
-
-	if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
-		dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
-			 stat);
-		if (stat & LOCKED_BLK)
-			dev_info(dev, "%s: it's locked error = 0x%04x\n",
-				 __func__, stat);
-
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
-			       size_t len)
-{
-	struct onenand_chip *this = mtd->priv;
-	unsigned int *m, *s;
-	int fba, fpa, fsa = 0;
-	unsigned int mem_addr, cmd_map_01, cmd_map_10;
-	int i, mcount, scount;
-	int index;
-
-	fba = (int) (addr >> this->erase_shift);
-	fpa = (int) (addr >> this->page_shift);
-	fpa &= this->page_mask;
-
-	mem_addr = onenand->mem_addr(fba, fpa, fsa);
-	cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
-	cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
-
-	switch (cmd) {
-	case ONENAND_CMD_READ:
-	case ONENAND_CMD_READOOB:
-	case ONENAND_CMD_BUFFERRAM:
-		ONENAND_SET_NEXT_BUFFERRAM(this);
-	default:
-		break;
-	}
-
-	index = ONENAND_CURRENT_BUFFERRAM(this);
-
-	/*
-	 * Emulate Two BufferRAMs and access with 4 bytes pointer
-	 */
-	m = onenand->page_buf;
-	s = onenand->oob_buf;
-
-	if (index) {
-		m += (this->writesize >> 2);
-		s += (mtd->oobsize >> 2);
-	}
-
-	mcount = mtd->writesize >> 2;
-	scount = mtd->oobsize >> 2;
-
-	switch (cmd) {
-	case ONENAND_CMD_READ:
-		/* Main */
-		for (i = 0; i < mcount; i++)
-			*m++ = s3c_read_cmd(cmd_map_01);
-		return 0;
-
-	case ONENAND_CMD_READOOB:
-		s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
-		/* Main */
-		for (i = 0; i < mcount; i++)
-			*m++ = s3c_read_cmd(cmd_map_01);
-
-		/* Spare */
-		for (i = 0; i < scount; i++)
-			*s++ = s3c_read_cmd(cmd_map_01);
-
-		s3c_write_reg(0, TRANS_SPARE_OFFSET);
-		return 0;
-
-	case ONENAND_CMD_PROG:
-		/* Main */
-		for (i = 0; i < mcount; i++)
-			s3c_write_cmd(*m++, cmd_map_01);
-		return 0;
-
-	case ONENAND_CMD_PROGOOB:
-		s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
-
-		/* Main - dummy write */
-		for (i = 0; i < mcount; i++)
-			s3c_write_cmd(0xffffffff, cmd_map_01);
-
-		/* Spare */
-		for (i = 0; i < scount; i++)
-			s3c_write_cmd(*s++, cmd_map_01);
-
-		s3c_write_reg(0, TRANS_SPARE_OFFSET);
-		return 0;
-
-	case ONENAND_CMD_UNLOCK_ALL:
-		s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
-		return 0;
-
-	case ONENAND_CMD_ERASE:
-		s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
-		return 0;
-
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
-{
-	struct onenand_chip *this = mtd->priv;
-	int index = ONENAND_CURRENT_BUFFERRAM(this);
-	unsigned char *p;
-
-	if (area == ONENAND_DATARAM) {
-		p = onenand->page_buf;
-		if (index == 1)
-			p += this->writesize;
-	} else {
-		p = onenand->oob_buf;
-		if (index == 1)
-			p += mtd->oobsize;
-	}
-
-	return p;
-}
-
-static int onenand_read_bufferram(struct mtd_info *mtd, int area,
-				  unsigned char *buffer, int offset,
-				  size_t count)
-{
-	unsigned char *p;
-
-	p = s3c_get_bufferram(mtd, area);
-	memcpy(buffer, p + offset, count);
-	return 0;
-}
-
-static int onenand_write_bufferram(struct mtd_info *mtd, int area,
-				   const unsigned char *buffer, int offset,
-				   size_t count)
-{
-	unsigned char *p;
-
-	p = s3c_get_bufferram(mtd, area);
-	memcpy(p + offset, buffer, count);
-	return 0;
-}
-
-static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction);
-
-static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
-{
-	void __iomem *base = onenand->dma_addr;
-	int status;
-	unsigned long timeout;
-
-	writel(src, base + S5PC110_DMA_SRC_ADDR);
-	writel(dst, base + S5PC110_DMA_DST_ADDR);
-
-	if (direction == S5PC110_DMA_DIR_READ) {
-		writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
-		writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
-	} else {
-		writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
-		writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
-	}
-
-	writel(count, base + S5PC110_DMA_TRANS_SIZE);
-	writel(direction, base + S5PC110_DMA_TRANS_DIR);
-
-	writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
-
-	/*
-	 * There's no exact timeout values at Spec.
-	 * In real case it takes under 1 msec.
-	 * So 20 msecs are enough.
-	 */
-	timeout = jiffies + msecs_to_jiffies(20);
-
-	do {
-		status = readl(base + S5PC110_DMA_TRANS_STATUS);
-		if (status & S5PC110_DMA_TRANS_STATUS_TE) {
-			writel(S5PC110_DMA_TRANS_CMD_TEC,
-					base + S5PC110_DMA_TRANS_CMD);
-			return -EIO;
-		}
-	} while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
-		time_before(jiffies, timeout));
-
-	writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
-
-	return 0;
-}
-
-static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
-{
-	void __iomem *base = onenand->dma_addr;
-	int status, cmd = 0;
-
-	status = readl(base + S5PC110_INTC_DMA_STATUS);
-
-	if (likely(status & S5PC110_INTC_DMA_TD))
-		cmd = S5PC110_DMA_TRANS_CMD_TDC;
-
-	if (unlikely(status & S5PC110_INTC_DMA_TE))
-		cmd = S5PC110_DMA_TRANS_CMD_TEC;
-
-	writel(cmd, base + S5PC110_DMA_TRANS_CMD);
-	writel(status, base + S5PC110_INTC_DMA_CLR);
-
-	if (!onenand->complete.done)
-		complete(&onenand->complete);
-
-	return IRQ_HANDLED;
-}
-
-static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
-{
-	void __iomem *base = onenand->dma_addr;
-	int status;
-
-	status = readl(base + S5PC110_INTC_DMA_MASK);
-	if (status) {
-		status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
-		writel(status, base + S5PC110_INTC_DMA_MASK);
-	}
-
-	writel(src, base + S5PC110_DMA_SRC_ADDR);
-	writel(dst, base + S5PC110_DMA_DST_ADDR);
-
-	if (direction == S5PC110_DMA_DIR_READ) {
-		writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
-		writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
-	} else {
-		writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
-		writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
-	}
-
-	writel(count, base + S5PC110_DMA_TRANS_SIZE);
-	writel(direction, base + S5PC110_DMA_TRANS_DIR);
-
-	writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
-
-	wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
-
-	return 0;
-}
-
-static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
-		unsigned char *buffer, int offset, size_t count)
-{
-	struct onenand_chip *this = mtd->priv;
-	void __iomem *p;
-	void *buf = (void *) buffer;
-	dma_addr_t dma_src, dma_dst;
-	int err, ofs, page_dma = 0;
-	struct device *dev = &onenand->pdev->dev;
-
-	p = this->base + area;
-	if (ONENAND_CURRENT_BUFFERRAM(this)) {
-		if (area == ONENAND_DATARAM)
-			p += this->writesize;
-		else
-			p += mtd->oobsize;
-	}
-
-	if (offset & 3 || (size_t) buf & 3 ||
-		!onenand->dma_addr || count != mtd->writesize)
-		goto normal;
-
-	/* Handle vmalloc address */
-	if (buf >= high_memory) {
-		struct page *page;
-
-		if (((size_t) buf & PAGE_MASK) !=
-		    ((size_t) (buf + count - 1) & PAGE_MASK))
-			goto normal;
-		page = vmalloc_to_page(buf);
-		if (!page)
-			goto normal;
-
-		/* Page offset */
-		ofs = ((size_t) buf & ~PAGE_MASK);
-		page_dma = 1;
-
-		/* DMA routine */
-		dma_src = onenand->phys_base + (p - this->base);
-		dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
-	} else {
-		/* DMA routine */
-		dma_src = onenand->phys_base + (p - this->base);
-		dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
-	}
-	if (dma_mapping_error(dev, dma_dst)) {
-		dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
-		goto normal;
-	}
-	err = s5pc110_dma_ops(dma_dst, dma_src,
-			count, S5PC110_DMA_DIR_READ);
-
-	if (page_dma)
-		dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
-	else
-		dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
-
-	if (!err)
-		return 0;
-
-normal:
-	if (count != mtd->writesize) {
-		/* Copy the bufferram to memory to prevent unaligned access */
-		memcpy_fromio(this->page_buf, p, mtd->writesize);
-		memcpy(buffer, this->page_buf + offset, count);
-	} else {
-		memcpy_fromio(buffer, p, count);
-	}
-
-	return 0;
-}
-
-static int s5pc110_chip_probe(struct mtd_info *mtd)
-{
-	/* Now just return 0 */
-	return 0;
-}
-
-static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
-{
-	unsigned int flags = INT_ACT | LOAD_CMP;
-	unsigned int stat;
-	unsigned long timeout;
-
-	/* The 20 msec is enough */
-	timeout = jiffies + msecs_to_jiffies(20);
-	while (time_before(jiffies, timeout)) {
-		stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
-		if (stat & flags)
-			break;
-	}
-	/* To get correct interrupt status in timeout case */
-	stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
-	s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
-
-	if (stat & LD_FAIL_ECC_ERR) {
-		s3c_onenand_reset();
-		return ONENAND_BBT_READ_ERROR;
-	}
-
-	if (stat & LOAD_CMP) {
-		int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
-		if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
-			s3c_onenand_reset();
-			return ONENAND_BBT_READ_ERROR;
-		}
-	}
-
-	return 0;
-}
-
-static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
-{
-	struct onenand_chip *this = mtd->priv;
-	struct device *dev = &onenand->pdev->dev;
-	unsigned int block, end;
-	int tmp;
-
-	end = this->chipsize >> this->erase_shift;
-
-	for (block = 0; block < end; block++) {
-		unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
-		tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
-
-		if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
-			dev_err(dev, "block %d is write-protected!\n", block);
-			s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
-		}
-	}
-}
-
-static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
-				    size_t len, int cmd)
-{
-	struct onenand_chip *this = mtd->priv;
-	int start, end, start_mem_addr, end_mem_addr;
-
-	start = ofs >> this->erase_shift;
-	start_mem_addr = onenand->mem_addr(start, 0, 0);
-	end = start + (len >> this->erase_shift) - 1;
-	end_mem_addr = onenand->mem_addr(end, 0, 0);
-
-	if (cmd == ONENAND_CMD_LOCK) {
-		s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
-							     start_mem_addr));
-		s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
-							   end_mem_addr));
-	} else {
-		s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
-							       start_mem_addr));
-		s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
-							     end_mem_addr));
-	}
-
-	this->wait(mtd, FL_LOCKING);
-}
-
-static void s3c_unlock_all(struct mtd_info *mtd)
-{
-	struct onenand_chip *this = mtd->priv;
-	loff_t ofs = 0;
-	size_t len = this->chipsize;
-
-	if (this->options & ONENAND_HAS_UNLOCK_ALL) {
-		/* Write unlock command */
-		this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
-
-		/* No need to check return value */
-		this->wait(mtd, FL_LOCKING);
-
-		/* Workaround for all block unlock in DDP */
-		if (!ONENAND_IS_DDP(this)) {
-			s3c_onenand_check_lock_status(mtd);
-			return;
-		}
-
-		/* All blocks on another chip */
-		ofs = this->chipsize >> 1;
-		len = this->chipsize >> 1;
-	}
-
-	s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
-
-	s3c_onenand_check_lock_status(mtd);
-}
-
-static void s3c_onenand_setup(struct mtd_info *mtd)
-{
-	struct onenand_chip *this = mtd->priv;
-
-	onenand->mtd = mtd;
-
-	if (onenand->type == TYPE_S3C6400) {
-		onenand->mem_addr = s3c6400_mem_addr;
-		onenand->cmd_map = s3c64xx_cmd_map;
-	} else if (onenand->type == TYPE_S3C6410) {
-		onenand->mem_addr = s3c6410_mem_addr;
-		onenand->cmd_map = s3c64xx_cmd_map;
-	} else if (onenand->type == TYPE_S5PC110) {
-		/* Use generic onenand functions */
-		this->read_bufferram = s5pc110_read_bufferram;
-		this->chip_probe = s5pc110_chip_probe;
-		return;
-	} else {
-		BUG();
-	}
-
-	this->read_word = s3c_onenand_readw;
-	this->write_word = s3c_onenand_writew;
-
-	this->wait = s3c_onenand_wait;
-	this->bbt_wait = s3c_onenand_bbt_wait;
-	this->unlock_all = s3c_unlock_all;
-	this->command = s3c_onenand_command;
-
-	this->read_bufferram = onenand_read_bufferram;
-	this->write_bufferram = onenand_write_bufferram;
-}
-
-static int s3c_onenand_probe(struct platform_device *pdev)
-{
-	struct onenand_platform_data *pdata;
-	struct onenand_chip *this;
-	struct mtd_info *mtd;
-	struct resource *r;
-	int size, err;
-
-	pdata = dev_get_platdata(&pdev->dev);
-	/* No need to check pdata. the platform data is optional */
-
-	size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
-	mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-	if (!mtd)
-		return -ENOMEM;
-
-	onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
-			       GFP_KERNEL);
-	if (!onenand)
-		return -ENOMEM;
-
-	this = (struct onenand_chip *) &mtd[1];
-	mtd->priv = this;
-	mtd->dev.parent = &pdev->dev;
-	onenand->pdev = pdev;
-	onenand->type = platform_get_device_id(pdev)->driver_data;
-
-	s3c_onenand_setup(mtd);
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	onenand->base = devm_ioremap_resource(&pdev->dev, r);
-	if (IS_ERR(onenand->base))
-		return PTR_ERR(onenand->base);
-
-	onenand->phys_base = r->start;
-
-	/* Set onenand_chip also */
-	this->base = onenand->base;
-
-	/* Use runtime badblock check */
-	this->options |= ONENAND_SKIP_UNLOCK_CHECK;
-
-	if (onenand->type != TYPE_S5PC110) {
-		r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-		onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
-		if (IS_ERR(onenand->ahb_addr))
-			return PTR_ERR(onenand->ahb_addr);
-
-		/* Allocate 4KiB BufferRAM */
-		onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
-						 GFP_KERNEL);
-		if (!onenand->page_buf)
-			return -ENOMEM;
-
-		/* Allocate 128 SpareRAM */
-		onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
-		if (!onenand->oob_buf)
-			return -ENOMEM;
-
-		/* S3C doesn't handle subpage write */
-		mtd->subpage_sft = 0;
-		this->subpagesize = mtd->writesize;
-
-	} else { /* S5PC110 */
-		r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-		onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
-		if (IS_ERR(onenand->dma_addr))
-			return PTR_ERR(onenand->dma_addr);
-
-		s5pc110_dma_ops = s5pc110_dma_poll;
-		/* Interrupt support */
-		r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-		if (r) {
-			init_completion(&onenand->complete);
-			s5pc110_dma_ops = s5pc110_dma_irq;
-			err = devm_request_irq(&pdev->dev, r->start,
-					       s5pc110_onenand_irq,
-					       IRQF_SHARED, "onenand",
-					       &onenand);
-			if (err) {
-				dev_err(&pdev->dev, "failed to get irq\n");
-				return err;
-			}
-		}
-	}
-
-	err = onenand_scan(mtd, 1);
-	if (err)
-		return err;
-
-	if (onenand->type != TYPE_S5PC110) {
-		/* S3C doesn't handle subpage write */
-		mtd->subpage_sft = 0;
-		this->subpagesize = mtd->writesize;
-	}
-
-	if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
-		dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
-
-	err = mtd_device_register(mtd, pdata ? pdata->parts : NULL,
-				  pdata ? pdata->nr_parts : 0);
-	if (err) {
-		dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
-		onenand_release(mtd);
-		return err;
-	}
-
-	platform_set_drvdata(pdev, mtd);
-
-	return 0;
-}
-
-static int s3c_onenand_remove(struct platform_device *pdev)
-{
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
-
-	onenand_release(mtd);
-
-	return 0;
-}
-
-static int s3c_pm_ops_suspend(struct device *dev)
-{
-	struct mtd_info *mtd = dev_get_drvdata(dev);
-	struct onenand_chip *this = mtd->priv;
-
-	this->wait(mtd, FL_PM_SUSPENDED);
-	return 0;
-}
-
-static  int s3c_pm_ops_resume(struct device *dev)
-{
-	struct mtd_info *mtd = dev_get_drvdata(dev);
-	struct onenand_chip *this = mtd->priv;
-
-	this->unlock_all(mtd);
-	return 0;
-}
-
-static const struct dev_pm_ops s3c_pm_ops = {
-	.suspend	= s3c_pm_ops_suspend,
-	.resume		= s3c_pm_ops_resume,
-};
-
-static const struct platform_device_id s3c_onenand_driver_ids[] = {
-	{
-		.name		= "s3c6400-onenand",
-		.driver_data	= TYPE_S3C6400,
-	}, {
-		.name		= "s3c6410-onenand",
-		.driver_data	= TYPE_S3C6410,
-	}, {
-		.name		= "s5pc110-onenand",
-		.driver_data	= TYPE_S5PC110,
-	}, { },
-};
-MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
-
-static struct platform_driver s3c_onenand_driver = {
-	.driver         = {
-		.name	= "samsung-onenand",
-		.pm	= &s3c_pm_ops,
-	},
-	.id_table	= s3c_onenand_driver_ids,
-	.probe          = s3c_onenand_probe,
-	.remove         = s3c_onenand_remove,
-};
-
-module_platform_driver(s3c_onenand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
-MODULE_DESCRIPTION("Samsung OneNAND controller support");
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 74fb91a..a80a46b 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -452,7 +452,7 @@
 
 config MTD_NAND_CADENCE
 	tristate "Support Cadence NAND (HPNFC) controller"
-	depends on OF || COMPILE_TEST
+	depends on (OF || COMPILE_TEST) && HAS_IOMEM
 	help
 	  Enable the driver for NAND flash on platforms using a Cadence NAND
 	  controller.
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 8d6be90..3ba17a9 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1578,9 +1578,8 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
 
 	nand->numcs = numcs;
 
-	gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
-						      &np->fwnode, GPIOD_IN,
-						      "nand-det");
+	gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
+				     "det", GPIOD_IN, "nand-det");
 	if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
 		dev_err(nc->dev,
 			"Failed to get detect gpio (err = %ld)\n",
@@ -1624,9 +1623,10 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
 			nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
 			nand->cs[i].rb.id = val;
 		} else {
-			gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
-							"rb", i, &np->fwnode,
-							GPIOD_IN, "nand-rb");
+			gpio = devm_fwnode_gpiod_get_index(nc->dev,
+							   of_fwnode_handle(np),
+							   "rb", i, GPIOD_IN,
+							   "nand-rb");
 			if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
 				dev_err(nc->dev,
 					"Failed to get R/B gpio (err = %ld)\n",
@@ -1640,10 +1640,10 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
 			}
 		}
 
-		gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
-							      i, &np->fwnode,
-							      GPIOD_OUT_HIGH,
-							      "nand-cs");
+		gpio = devm_fwnode_gpiod_get_index(nc->dev,
+						   of_fwnode_handle(np),
+						   "cs", i, GPIOD_OUT_HIGH,
+						   "nand-cs");
 		if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
 			dev_err(nc->dev,
 				"Failed to get CS gpio (err = %ld)\n",
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 1a66b1c..44518dad 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2635,6 +2635,16 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
 		/* initialize the dma version */
 		brcmnand_flash_dma_revision_init(ctrl);
 
+		ret = -EIO;
+		if (ctrl->nand_version >= 0x0700)
+			ret = dma_set_mask_and_coherent(&pdev->dev,
+							DMA_BIT_MASK(40));
+		if (ret)
+			ret = dma_set_mask_and_coherent(&pdev->dev,
+							DMA_BIT_MASK(32));
+		if (ret)
+			goto err;
+
 		/* linked-list and stop on error */
 		flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
 		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 3102ddb..fafd0a0 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -21,7 +21,6 @@
 #include "denali.h"
 
 #define DENALI_NAND_NAME    "denali-nand"
-#define DENALI_DEFAULT_OOB_SKIP_BYTES	8
 
 /* for Indexed Addressing */
 #define DENALI_INDEXED_CTRL	0x00
@@ -1302,15 +1301,16 @@ int denali_init(struct denali_controller *denali)
 
 	/*
 	 * Set how many bytes should be skipped before writing data in OOB.
-	 * If a non-zero value has already been set (by firmware or something),
-	 * just use it. Otherwise, set the driver's default.
+	 * If a platform requests a non-zero value, set it to the register.
+	 * Otherwise, read the value out, expecting it has already been set up
+	 * by firmware.
 	 */
-	denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
-	if (!denali->oob_skip_bytes) {
-		denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
+	if (denali->oob_skip_bytes)
 		iowrite32(denali->oob_skip_bytes,
 			  denali->reg + SPARE_AREA_SKIP_BYTES);
-	}
+	else
+		denali->oob_skip_bytes = ioread32(denali->reg +
+						  SPARE_AREA_SKIP_BYTES);
 
 	iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
 	iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 8b779a8..f08740a 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -6,6 +6,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
@@ -14,6 +15,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/reset.h>
 
 #include "denali.h"
 
@@ -22,11 +24,14 @@ struct denali_dt {
 	struct clk *clk;	/* core clock */
 	struct clk *clk_x;	/* bus interface clock */
 	struct clk *clk_ecc;	/* ECC circuit clock */
+	struct reset_control *rst;	/* core reset */
+	struct reset_control *rst_reg;	/* register reset */
 };
 
 struct denali_dt_data {
 	unsigned int revision;
 	unsigned int caps;
+	unsigned int oob_skip_bytes;
 	const struct nand_ecc_caps *ecc_caps;
 };
 
@@ -34,6 +39,7 @@ NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes,
 		     512, 8, 15);
 static const struct denali_dt_data denali_socfpga_data = {
 	.caps = DENALI_CAP_HW_ECC_FIXUP,
+	.oob_skip_bytes = 2,
 	.ecc_caps = &denali_socfpga_ecc_caps,
 };
 
@@ -42,6 +48,7 @@ NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes,
 static const struct denali_dt_data denali_uniphier_v5a_data = {
 	.caps = DENALI_CAP_HW_ECC_FIXUP |
 		DENALI_CAP_DMA_64BIT,
+	.oob_skip_bytes = 8,
 	.ecc_caps = &denali_uniphier_v5a_ecc_caps,
 };
 
@@ -51,6 +58,7 @@ static const struct denali_dt_data denali_uniphier_v5b_data = {
 	.revision = 0x0501,
 	.caps = DENALI_CAP_HW_ECC_FIXUP |
 		DENALI_CAP_DMA_64BIT,
+	.oob_skip_bytes = 8,
 	.ecc_caps = &denali_uniphier_v5b_ecc_caps,
 };
 
@@ -118,11 +126,13 @@ static int denali_dt_probe(struct platform_device *pdev)
 	denali = &dt->controller;
 
 	data = of_device_get_match_data(dev);
-	if (data) {
-		denali->revision = data->revision;
-		denali->caps = data->caps;
-		denali->ecc_caps = data->ecc_caps;
-	}
+	if (WARN_ON(!data))
+		return -EINVAL;
+
+	denali->revision = data->revision;
+	denali->caps = data->caps;
+	denali->oob_skip_bytes = data->oob_skip_bytes;
+	denali->ecc_caps = data->ecc_caps;
 
 	denali->dev = dev;
 	denali->irq = platform_get_irq(pdev, 0);
@@ -151,6 +161,14 @@ static int denali_dt_probe(struct platform_device *pdev)
 	if (IS_ERR(dt->clk_ecc))
 		return PTR_ERR(dt->clk_ecc);
 
+	dt->rst = devm_reset_control_get_optional_shared(dev, "nand");
+	if (IS_ERR(dt->rst))
+		return PTR_ERR(dt->rst);
+
+	dt->rst_reg = devm_reset_control_get_optional_shared(dev, "reg");
+	if (IS_ERR(dt->rst_reg))
+		return PTR_ERR(dt->rst_reg);
+
 	ret = clk_prepare_enable(dt->clk);
 	if (ret)
 		return ret;
@@ -166,10 +184,30 @@ static int denali_dt_probe(struct platform_device *pdev)
 	denali->clk_rate = clk_get_rate(dt->clk);
 	denali->clk_x_rate = clk_get_rate(dt->clk_x);
 
-	ret = denali_init(denali);
+	/*
+	 * Deassert the register reset, and the core reset in this order.
+	 * Deasserting the core reset while the register reset is asserted
+	 * will cause unpredictable behavior in the controller.
+	 */
+	ret = reset_control_deassert(dt->rst_reg);
 	if (ret)
 		goto out_disable_clk_ecc;
 
+	ret = reset_control_deassert(dt->rst);
+	if (ret)
+		goto out_assert_rst_reg;
+
+	/*
+	 * When the reset is deasserted, the initialization sequence is kicked
+	 * (bootstrap process). The driver must wait until it finished.
+	 * Otherwise, it will result in unpredictable behavior.
+	 */
+	usleep_range(200, 1000);
+
+	ret = denali_init(denali);
+	if (ret)
+		goto out_assert_rst;
+
 	for_each_child_of_node(dev->of_node, np) {
 		ret = denali_dt_chip_init(denali, np);
 		if (ret) {
@@ -184,6 +222,10 @@ static int denali_dt_probe(struct platform_device *pdev)
 
 out_remove_denali:
 	denali_remove(denali);
+out_assert_rst:
+	reset_control_assert(dt->rst);
+out_assert_rst_reg:
+	reset_control_assert(dt->rst_reg);
 out_disable_clk_ecc:
 	clk_disable_unprepare(dt->clk_ecc);
 out_disable_clk_x:
@@ -199,6 +241,8 @@ static int denali_dt_remove(struct platform_device *pdev)
 	struct denali_dt *dt = platform_get_drvdata(pdev);
 
 	denali_remove(&dt->controller);
+	reset_control_assert(dt->rst);
+	reset_control_assert(dt->rst_reg);
 	clk_disable_unprepare(dt->clk_ecc);
 	clk_disable_unprepare(dt->clk_x);
 	clk_disable_unprepare(dt->clk);
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 8b90def..a2fcb73 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -438,7 +438,7 @@ static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
 		buffer += blksize;
 		offset += blksize;
 		size -= blksize;
-	};
+	}
 }
 
 /* Copy data from/to NFC main and spare buffers */
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 58511ae..3ff7ce0 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -59,7 +59,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip)
  */
 static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
 {
-	unsigned int i;
+	int i;
 	static const char * const broken_get_timings[] = {
 		"MX30LF1G18AC",
 		"MX30LF1G28AC",
@@ -80,12 +80,9 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
 	if (!chip->parameters.supports_set_get_features)
 		return;
 
-	for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
-		if (!strcmp(broken_get_timings[i], chip->parameters.model))
-			break;
-	}
-
-	if (i == ARRAY_SIZE(broken_get_timings))
+	i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings),
+			 chip->parameters.model);
+	if (i < 0)
 		return;
 
 	bitmap_clear(chip->parameters.get_feature_list,
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 1cb3760..0db5ee4 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -124,6 +124,16 @@ static const struct spinand_info toshiba_spinand_table[] = {
 		     0,
 		     SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
 				     tc58cxgxsx_ecc_get_status)),
+	/* 3.3V 4Gb */
+	SPINAND_INFO("TC58CVG2S0", 0xED,
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     0,
+		     SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+				     tc58cxgxsx_ecc_get_status)),
 	/* 1.8V 1Gb */
 	SPINAND_INFO("TC58CYG0S3", 0xB2,
 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
index e5ea612..671a618 100644
--- a/drivers/mtd/parsers/sharpslpart.c
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob)
 
 static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
 {
-	unsigned int block_num, log_num, phymax;
+	unsigned int block_num, phymax;
+	int i, ret, log_num;
 	loff_t block_adr;
 	u8 *oob;
-	int i, ret;
 
 	oob = kzalloc(mtd->oobsize, GFP_KERNEL);
 	if (!oob)
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index f237fcd..c1eda67 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -46,11 +46,11 @@
 	  Flash as an MTD device.
 
 config SPI_HISI_SFC
-	tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
+	tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
 	depends on ARCH_HISI || COMPILE_TEST
 	depends on HAS_IOMEM
 	help
-	  This enables support for hisilicon SPI-NOR flash controller.
+	  This enables support for HiSilicon FMC SPI-NOR flash controller.
 
 config SPI_MTK_QUADSPI
 	tristate "MediaTek Quad SPI controller"
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
index 2b7cabb..3951273 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -305,7 +305,7 @@ static void aspeed_smc_stop_user(struct spi_nor *nor)
 	writel(ctl, chip->ctl);		/* default to fread or read mode */
 }
 
-static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int aspeed_smc_prep(struct spi_nor *nor)
 {
 	struct aspeed_smc_chip *chip = nor->priv;
 
@@ -313,7 +313,7 @@ static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
 	return 0;
 }
 
-static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void aspeed_smc_unprep(struct spi_nor *nor)
 {
 	struct aspeed_smc_chip *chip = nor->priv;
 
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 06f9972..494dcab 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -1062,7 +1062,7 @@ static int cqspi_erase(struct spi_nor *nor, loff_t offs)
 	return 0;
 }
 
-static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int cqspi_prep(struct spi_nor *nor)
 {
 	struct cqspi_flash_pdata *f_pdata = nor->priv;
 	struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -1072,7 +1072,7 @@ static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
 	return 0;
 }
 
-static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void cqspi_unprep(struct spi_nor *nor)
 {
 	struct cqspi_flash_pdata *f_pdata = nor->priv;
 	struct cqspi_st *cqspi = f_pdata->cqspi;
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index a125821..6c7a411 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * HiSilicon SPI Nor Flash Controller Driver
+ * HiSilicon FMC SPI-NOR flash controller driver
  *
  * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
  */
@@ -144,7 +144,7 @@ static void hisi_spi_nor_init(struct hifmc_host *host)
 	writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
 }
 
-static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int hisi_spi_nor_prep(struct spi_nor *nor)
 {
 	struct hifmc_priv *priv = nor->priv;
 	struct hifmc_host *host = priv->host;
@@ -167,7 +167,7 @@ static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
 	return ret;
 }
 
-static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void hisi_spi_nor_unprep(struct spi_nor *nor)
 {
 	struct hifmc_priv *priv = nor->priv;
 	struct hifmc_host *host = priv->host;
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 3d8987b..81329f6 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -70,10 +70,12 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
 	{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
+	{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
+	{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info },
 	{ },
 };
 MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index b0cd443..4fc632e 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -85,7 +85,7 @@ struct sfdp_header {
 #define BFPT_DWORD(i)		((i) - 1)
 #define BFPT_DWORD_MAX		16
 
-/* The first version of JESB216 defined only 9 DWORDs. */
+/* The first version of JESD216 defined only 9 DWORDs. */
 #define BFPT_DWORD_MAX_JESD216			9
 
 /* 1st DWORD. */
@@ -196,7 +196,7 @@ struct flash_info {
 	u16		page_size;
 	u16		addr_width;
 
-	u16		flags;
+	u32		flags;
 #define SECT_4K			BIT(0)	/* SPINOR_OP_BE_4K works uniformly */
 #define SPI_NOR_NO_ERASE	BIT(1)	/* No erase command needed */
 #define SST_WRITE		BIT(2)	/* use SST byte programming */
@@ -233,6 +233,11 @@ struct flash_info {
 #define SPI_NOR_SKIP_SFDP	BIT(13)	/* Skip parsing of SFDP tables */
 #define USE_CLSR		BIT(14)	/* use CLSR command */
 #define SPI_NOR_OCTAL_READ	BIT(15)	/* Flash supports Octal Read */
+#define SPI_NOR_TB_SR_BIT6	BIT(16)	/*
+					 * Top/Bottom (TB) is bit 6 of
+					 * status register. Must be used with
+					 * SPI_NOR_HAS_TB.
+					 */
 
 	/* Part specific fixup hooks. */
 	const struct spi_nor_fixups *fixups;
@@ -1307,14 +1312,14 @@ static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
 	}
 }
 
-static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int spi_nor_lock_and_prep(struct spi_nor *nor)
 {
 	int ret = 0;
 
 	mutex_lock(&nor->lock);
 
 	if (nor->controller_ops &&  nor->controller_ops->prepare) {
-		ret = nor->controller_ops->prepare(nor, ops);
+		ret = nor->controller_ops->prepare(nor);
 		if (ret) {
 			mutex_unlock(&nor->lock);
 			return ret;
@@ -1323,10 +1328,10 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
 	return ret;
 }
 
-static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void spi_nor_unlock_and_unprep(struct spi_nor *nor)
 {
 	if (nor->controller_ops && nor->controller_ops->unprepare)
-		nor->controller_ops->unprepare(nor, ops);
+		nor->controller_ops->unprepare(nor);
 	mutex_unlock(&nor->lock);
 }
 
@@ -1688,7 +1693,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
 	addr = instr->addr;
 	len = instr->len;
 
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
+	ret = spi_nor_lock_and_prep(nor);
 	if (ret)
 		return ret;
 
@@ -1751,7 +1756,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
 	ret = spi_nor_write_disable(nor);
 
 erase_err:
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+	spi_nor_unlock_and_unprep(nor);
 
 	return ret;
 }
@@ -1761,9 +1766,13 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
 {
 	struct mtd_info *mtd = &nor->mtd;
 	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+	u8 tb_mask = SR_TB_BIT5;
 	int shift = ffs(mask) - 1;
 	int pow;
 
+	if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+		tb_mask = SR_TB_BIT6;
+
 	if (!(sr & mask)) {
 		/* No protection */
 		*ofs = 0;
@@ -1771,7 +1780,7 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
 	} else {
 		pow = ((sr & mask) ^ mask) >> shift;
 		*len = mtd->size >> pow;
-		if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
+		if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
 			*ofs = 0;
 		else
 			*ofs = mtd->size - *len;
@@ -1850,6 +1859,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
 	struct mtd_info *mtd = &nor->mtd;
 	int ret, status_old, status_new;
 	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+	u8 tb_mask = SR_TB_BIT5;
 	u8 shift = ffs(mask) - 1, pow, val;
 	loff_t lock_len;
 	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
@@ -1886,6 +1896,9 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
 	else
 		lock_len = ofs + len;
 
+	if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+		tb_mask = SR_TB_BIT6;
+
 	/*
 	 * Need smallest pow such that:
 	 *
@@ -1903,13 +1916,13 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
 	if (!(val & mask))
 		return -EINVAL;
 
-	status_new = (status_old & ~mask & ~SR_TB) | val;
+	status_new = (status_old & ~mask & ~tb_mask) | val;
 
 	/* Disallow further writes if WP pin is asserted */
 	status_new |= SR_SRWD;
 
 	if (!use_top)
-		status_new |= SR_TB;
+		status_new |= tb_mask;
 
 	/* Don't bother if they're the same */
 	if (status_new == status_old)
@@ -1932,6 +1945,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
 	struct mtd_info *mtd = &nor->mtd;
 	int ret, status_old, status_new;
 	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+	u8 tb_mask = SR_TB_BIT5;
 	u8 shift = ffs(mask) - 1, pow, val;
 	loff_t lock_len;
 	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
@@ -1968,6 +1982,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
 	else
 		lock_len = ofs;
 
+	if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+		tb_mask = SR_TB_BIT6;
 	/*
 	 * Need largest pow such that:
 	 *
@@ -1987,14 +2003,14 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
 			return -EINVAL;
 	}
 
-	status_new = (status_old & ~mask & ~SR_TB) | val;
+	status_new = (status_old & ~mask & ~tb_mask) | val;
 
 	/* Don't protect status register if we're fully unlocked */
 	if (lock_len == 0)
 		status_new &= ~SR_SRWD;
 
 	if (!use_top)
-		status_new |= SR_TB;
+		status_new |= tb_mask;
 
 	/* Don't bother if they're the same */
 	if (status_new == status_old)
@@ -2036,13 +2052,13 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
 	int ret;
 
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+	ret = spi_nor_lock_and_prep(nor);
 	if (ret)
 		return ret;
 
 	ret = nor->params.locking_ops->lock(nor, ofs, len);
 
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+	spi_nor_unlock_and_unprep(nor);
 	return ret;
 }
 
@@ -2051,13 +2067,13 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
 	int ret;
 
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+	ret = spi_nor_lock_and_prep(nor);
 	if (ret)
 		return ret;
 
 	ret = nor->params.locking_ops->unlock(nor, ofs, len);
 
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+	spi_nor_unlock_and_unprep(nor);
 	return ret;
 }
 
@@ -2066,13 +2082,13 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
 	int ret;
 
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+	ret = spi_nor_lock_and_prep(nor);
 	if (ret)
 		return ret;
 
 	ret = nor->params.locking_ops->is_locked(nor, ofs, len);
 
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+	spi_nor_unlock_and_unprep(nor);
 	return ret;
 }
 
@@ -2309,6 +2325,9 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) },
 	{ "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
 
+	{ "at25sl321",	INFO(0x1f4216, 0, 64 * 1024, 64,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
 	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
 	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
 	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
@@ -2374,6 +2393,11 @@ static const struct flash_info spi_nor_ids[] = {
 			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
 	},
 	{
+		"gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{
 		"gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
 			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
 			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -2381,7 +2405,8 @@ static const struct flash_info spi_nor_ids[] = {
 	{
 		"gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
 			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+			SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+			SPI_NOR_TB_SR_BIT6)
 			.fixups = &gd25q256_fixups,
 	},
 
@@ -2436,6 +2461,8 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
 	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
 	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+	{ "mx25r3235f",  INFO(0xc22816, 0, 64 * 1024,  64,
+			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
 			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
@@ -2456,20 +2483,35 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "n25q032a",	 INFO(0x20bb16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
 	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
 	{ "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K |
+			      USE_FSR | SPI_NOR_QUAD_READ) },
+	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K |
+			      USE_FSR | SPI_NOR_QUAD_READ) },
+	{ "mt25ql256a",  INFO6(0x20ba19, 0x104400, 64 * 1024,  512,
+			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K |
+			      USE_FSR | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "mt25qu256a",  INFO6(0x20bb19, 0x104400, 64 * 1024,  512,
+			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512, SECT_4K |
+			      USE_FSR | SPI_NOR_QUAD_READ) },
+	{ "mt25ql512a",  INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
+			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
 	{ "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+	{ "mt25qu512a",  INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
+			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
+			      USE_FSR | SPI_NOR_QUAD_READ) },
 	{ "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 	{ "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 	{ "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096,
 			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
 			      NO_CHIP_ERASE) },
-	{ "mt25qu512a (n25q512a)", INFO(0x20bb20, 0, 64 * 1024, 1024,
-					SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
-					SPI_NOR_QUAD_READ |
-					SPI_NOR_4B_OPCODES) },
 	{ "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 
 	/* Micron */
@@ -2540,6 +2582,8 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
 	{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
 			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32, SECT_4K |
+			      SPI_NOR_DUAL_READ) },
 	{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 
 	/* ST Microelectronics -- newer production may have feature updates */
@@ -2610,6 +2654,11 @@ static const struct flash_info spi_nor_ids[] = {
 			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
 			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
 	},
+	{
+		"w25q32jwm", INFO(0xef8016, 0, 64 * 1024,  64,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
 	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
 	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
 	{
@@ -2630,7 +2679,9 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
 	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
 	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
-	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
+			  SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			  SPI_NOR_4B_OPCODES) },
 	{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
 			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
@@ -2701,7 +2752,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
 
 	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
 
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
+	ret = spi_nor_lock_and_prep(nor);
 	if (ret)
 		return ret;
 
@@ -2728,7 +2779,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
 	ret = 0;
 
 read_err:
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+	spi_nor_unlock_and_unprep(nor);
 	return ret;
 }
 
@@ -2741,7 +2792,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
 
 	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
 
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+	ret = spi_nor_lock_and_prep(nor);
 	if (ret)
 		return ret;
 
@@ -2814,7 +2865,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
 	}
 out:
 	*retlen += actual;
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+	spi_nor_unlock_and_unprep(nor);
 	return ret;
 }
 
@@ -2832,7 +2883,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
 
 	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
 
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+	ret = spi_nor_lock_and_prep(nor);
 	if (ret)
 		return ret;
 
@@ -2878,7 +2929,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
 	}
 
 write_err:
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+	spi_nor_unlock_and_unprep(nor);
 	return ret;
 }
 
@@ -5143,8 +5194,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
 
 	if (info->flags & USE_FSR)
 		nor->flags |= SNOR_F_USE_FSR;
-	if (info->flags & SPI_NOR_HAS_TB)
+	if (info->flags & SPI_NOR_HAS_TB) {
 		nor->flags |= SNOR_F_HAS_SR_TB;
+		if (info->flags & SPI_NOR_TB_SR_BIT6)
+			nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+	}
+
 	if (info->flags & NO_CHIP_ERASE)
 		nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
 	if (info->flags & USE_CLSR)
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 10b2459..ea7440a 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1640,7 +1640,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
 out_wl:
 	ubi_wl_close(ubi);
 out_vtbl:
-	ubi_free_internal_volumes(ubi);
+	ubi_free_all_volumes(ubi);
 	vfree(ubi->vtbl);
 out_ai:
 	destroy_ai(ai);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index d636bbe..2f93c25 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -503,19 +503,40 @@ static void uif_close(struct ubi_device *ubi)
 }
 
 /**
+ * ubi_free_volumes_from - free volumes from specific index.
+ * @ubi: UBI device description object
+ * @from: the start index used for volume free.
+ */
+static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
+{
+	int i;
+
+	for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+		if (!ubi->volumes[i])
+			continue;
+		ubi_eba_replace_table(ubi->volumes[i], NULL);
+		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
+		kfree(ubi->volumes[i]);
+		ubi->volumes[i] = NULL;
+	}
+}
+
+/**
+ * ubi_free_all_volumes - free all volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_all_volumes(struct ubi_device *ubi)
+{
+	ubi_free_volumes_from(ubi, 0);
+}
+
+/**
  * ubi_free_internal_volumes - free internal volumes.
  * @ubi: UBI device description object
  */
 void ubi_free_internal_volumes(struct ubi_device *ubi)
 {
-	int i;
-
-	for (i = ubi->vtbl_slots;
-	     i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
-		ubi_eba_replace_table(ubi->volumes[i], NULL);
-		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
-		kfree(ubi->volumes[i]);
-	}
+	ubi_free_volumes_from(ubi, ubi->vtbl_slots);
 }
 
 static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
@@ -1013,7 +1034,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
 out_detach:
 	ubi_devices[ubi_num] = NULL;
 	ubi_wl_close(ubi);
-	ubi_free_internal_volumes(ubi);
+	ubi_free_all_volumes(ubi);
 	vfree(ubi->vtbl);
 out_free:
 	vfree(ubi->peb_buf);
@@ -1159,7 +1180,7 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
 		 * MTD device name.
 		 */
 		mtd = get_mtd_device_nm(mtd_dev);
-		if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
+		if (PTR_ERR(mtd) == -ENODEV)
 			/* Probably this is an MTD character device node path */
 			mtd = open_mtd_by_chdev(mtd_dev);
 	} else
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 1c7be4e..53f448e 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -64,7 +64,7 @@ static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
 		return 0;
 
 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
-		if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
+		if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
 			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
 			ret = -EINVAL;
 		}
@@ -1137,7 +1137,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
 	struct rb_node *tmp_rb;
 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
 	int scrub_peb_count, erase_peb_count;
-	unsigned long *seen_pebs = NULL;
+	unsigned long *seen_pebs;
 
 	fm_raw = ubi->fm_buf;
 	memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -1151,7 +1151,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
 	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
 	if (!dvbuf) {
 		ret = -ENOMEM;
-		goto out_kfree;
+		goto out_free_avbuf;
 	}
 
 	avhdr = ubi_get_vid_hdr(avbuf);
@@ -1160,7 +1160,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
 	seen_pebs = init_seen(ubi);
 	if (IS_ERR(seen_pebs)) {
 		ret = PTR_ERR(seen_pebs);
-		goto out_kfree;
+		goto out_free_dvbuf;
 	}
 
 	spin_lock(&ubi->volumes_lock);
@@ -1328,7 +1328,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
 	if (ret) {
 		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
-		goto out_kfree;
+		goto out_free_seen;
 	}
 
 	for (i = 0; i < new_fm->used_blocks; i++) {
@@ -1350,7 +1350,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
 		if (ret) {
 			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
 				new_fm->e[i]->pnum);
-			goto out_kfree;
+			goto out_free_seen;
 		}
 	}
 
@@ -1360,7 +1360,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
 		if (ret) {
 			ubi_err(ubi, "unable to write fastmap to PEB %i!",
 				new_fm->e[i]->pnum);
-			goto out_kfree;
+			goto out_free_seen;
 		}
 	}
 
@@ -1370,10 +1370,13 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
 	ret = self_check_seen(ubi, seen_pebs);
 	dbg_bld("fastmap written!");
 
-out_kfree:
-	ubi_free_vid_buf(avbuf);
-	ubi_free_vid_buf(dvbuf);
+out_free_seen:
 	free_seen(seen_pebs);
+out_free_dvbuf:
+	ubi_free_vid_buf(dvbuf);
+out_free_avbuf:
+	ubi_free_vid_buf(avbuf);
+
 out:
 	return ret;
 }
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 9688b41..73c67e5 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -950,6 +950,7 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
 int ubi_notify_all(struct ubi_device *ubi, int ntype,
 		   struct notifier_block *nb);
 int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_all_volumes(struct ubi_device *ubi);
 void ubi_free_internal_volumes(struct ubi_device *ubi);
 
 /* kapi.c */
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 53d8ab5..f700f0e 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -782,7 +782,7 @@ static int check_attaching_info(const struct ubi_device *ubi,
  */
 int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
-	int i, err;
+	int err;
 	struct ubi_ainf_volume *av;
 
 	empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -851,11 +851,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
 
 out_free:
 	vfree(ubi->vtbl);
-	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
-		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
-		kfree(ubi->volumes[i]);
-		ubi->volumes[i] = NULL;
-	}
+	ubi_free_all_volumes(ubi);
 	return err;
 }
 
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5d77a38..837d690 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -319,7 +319,7 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
 					  struct rb_root *root, int diff)
 {
 	struct rb_node *p;
-	struct ubi_wl_entry *e, *prev_e = NULL;
+	struct ubi_wl_entry *e;
 	int max;
 
 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
@@ -334,7 +334,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
 			p = p->rb_left;
 		else {
 			p = p->rb_right;
-			prev_e = e;
 			e = e1;
 		}
 	}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dee7958..25a8f93 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -532,12 +532,12 @@
 	  This driver provides support for Extended Socket network device
 	  on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
 
-config THUNDERBOLT_NET
-	tristate "Networking over Thunderbolt cable"
-	depends on THUNDERBOLT && INET
+config USB4_NET
+	tristate "Networking over USB4 and Thunderbolt cables"
+	depends on USB4 && INET
 	help
-	  Select this if you want to create network between two
-	  computers over a Thunderbolt cable. The driver supports Apple
+	  Select this if you want to create network between two computers
+	  over a USB4 and Thunderbolt cables. The driver supports Apple
 	  ThunderboltIP protocol and allows communication with any host
 	  supporting the same protocol including Windows and macOS.
 
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 953b7c1..71b88ff 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -77,6 +77,6 @@
 obj-$(CONFIG_FUJITSU_ES) += fjes/
 
 thunderbolt-net-y += thunderbolt.o
-obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
+obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
 obj-$(CONFIG_NETDEVSIM) += netdevsim/
 obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 4f2e691..1cc2cd8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 	bool do_tx_balance = true;
 	u32 hash_index = 0;
 	const u8 *hash_start = NULL;
-	struct ipv6hdr *ip6hdr;
 
 	skb_reset_mac_header(skb);
 	eth_data = eth_hdr(skb);
 
 	switch (ntohs(skb->protocol)) {
 	case ETH_P_IP: {
-		const struct iphdr *iph = ip_hdr(skb);
+		const struct iphdr *iph;
 
 		if (is_broadcast_ether_addr(eth_data->h_dest) ||
-		    iph->daddr == ip_bcast ||
-		    iph->protocol == IPPROTO_IGMP) {
+		    !pskb_network_may_pull(skb, sizeof(*iph))) {
+			do_tx_balance = false;
+			break;
+		}
+		iph = ip_hdr(skb);
+		if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
 			do_tx_balance = false;
 			break;
 		}
 		hash_start = (char *)&(iph->daddr);
 		hash_size = sizeof(iph->daddr);
-	}
 		break;
-	case ETH_P_IPV6:
+	}
+	case ETH_P_IPV6: {
+		const struct ipv6hdr *ip6hdr;
+
 		/* IPv6 doesn't really use broadcast mac address, but leave
 		 * that here just in case.
 		 */
@@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 			break;
 		}
 
-		/* Additianally, DAD probes should not be tx-balanced as that
+		if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+			do_tx_balance = false;
+			break;
+		}
+		/* Additionally, DAD probes should not be tx-balanced as that
 		 * will lead to false positives for duplicate addresses and
 		 * prevent address configuration from working.
 		 */
@@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 			break;
 		}
 
-		hash_start = (char *)&(ipv6_hdr(skb)->daddr);
-		hash_size = sizeof(ipv6_hdr(skb)->daddr);
+		hash_start = (char *)&ip6hdr->daddr;
+		hash_size = sizeof(ip6hdr->daddr);
 		break;
-	case ETH_P_IPX:
-		if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
+	}
+	case ETH_P_IPX: {
+		const struct ipxhdr *ipxhdr;
+
+		if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
+			do_tx_balance = false;
+			break;
+		}
+		ipxhdr = (struct ipxhdr *)skb_network_header(skb);
+
+		if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
 			/* something is wrong with this packet */
 			do_tx_balance = false;
 			break;
 		}
 
-		if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
+		if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
 			/* The only protocol worth balancing in
 			 * this family since it has an "ARP" like
 			 * mechanism
@@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 			break;
 		}
 
+		eth_data = eth_hdr(skb);
 		hash_start = (char *)eth_data->h_dest;
 		hash_size = ETH_ALEN;
 		break;
+	}
 	case ETH_P_ARP:
 		do_tx_balance = false;
 		if (bond_info->rlb_enabled)
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0604975..449a221 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -693,7 +693,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
 		b53_do_vlan_op(dev, VTA_CMD_CLEAR);
 	}
 
-	b53_enable_vlan(dev, false, ds->vlan_filtering);
+	b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
 
 	b53_for_each_port(dev, i)
 		b53_write16(dev, B53_VLAN_PAGE,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 3e86353..d195554 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -68,7 +68,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 
 		/* Force link status for IMP port */
 		reg = core_readl(priv, offset);
-		reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
+		reg |= (MII_SW_OR | LINK_STS);
+		if (priv->type == BCM7278_DEVICE_ID)
+			reg |= GMII_SPEED_UP_2G;
 		core_writel(priv, reg, offset);
 
 		/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index c5f6495..1142768 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = {
 
 module_spi_driver(ksz9477_spi_driver);
 
+MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9897");
+MODULE_ALIAS("spi:ksz9893");
+MODULE_ALIAS("spi:ksz9563");
+MODULE_ALIAS("spi:ksz8563");
+MODULE_ALIAS("spi:ksz9567");
 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f07ac0e..e0611cb 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2736,6 +2736,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
 
 	umac_reset(priv);
 
+	/* Disable the UniMAC RX/TX */
+	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
+
 	/* We may have been suspended and never received a WOL event that
 	 * would turn off MPD detection, take care of that now
 	 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 066765f..0a59a09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -296,7 +296,6 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
  *    possible, the driver should only write the valid vnics into the internal
  *    ram according to the appropriate port mode.
  */
-#define BITS_TO_BYTES(x) ((x)/8)
 
 /* CMNG constants, as derived from system spec calculations */
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 483935b..597e6fd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7893,7 +7893,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
 	int tcs, i;
 
 	tcs = netdev_get_num_tc(dev);
-	if (tcs > 1) {
+	if (tcs) {
 		int i, off, count;
 
 		for (i = 0; i < tcs; i++) {
@@ -9241,6 +9241,17 @@ void bnxt_half_close_nic(struct bnxt *bp)
 	bnxt_free_mem(bp, false);
 }
 
+static void bnxt_reenable_sriov(struct bnxt *bp)
+{
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+		int n = pf->active_vfs;
+
+		if (n)
+			bnxt_cfg_hw_sriov(bp, &n, true);
+	}
+}
+
 static int bnxt_open(struct net_device *dev)
 {
 	struct bnxt *bp = netdev_priv(dev);
@@ -9259,15 +9270,10 @@ static int bnxt_open(struct net_device *dev)
 		bnxt_hwrm_if_change(bp, false);
 	} else {
 		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
-			if (BNXT_PF(bp)) {
-				struct bnxt_pf_info *pf = &bp->pf;
-				int n = pf->active_vfs;
-
-				if (n)
-					bnxt_cfg_hw_sriov(bp, &n, true);
-			}
-			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
 				bnxt_ulp_start(bp, 0);
+				bnxt_reenable_sriov(bp);
+			}
 		}
 		bnxt_hwmon_open(bp);
 	}
@@ -9307,10 +9313,6 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
 	bnxt_debug_dev_exit(bp);
 	bnxt_disable_napi(bp);
 	del_timer_sync(&bp->timer);
-	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
-	    pci_is_enabled(bp->pdev))
-		pci_disable_device(bp->pdev);
-
 	bnxt_free_skbs(bp);
 
 	/* Save ring stats before shutdown */
@@ -10096,9 +10098,16 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
 static void bnxt_fw_reset_close(struct bnxt *bp)
 {
 	bnxt_ulp_stop(bp);
+	/* When firmware is fatal state, disable PCI device to prevent
+	 * any potential bad DMAs before freeing kernel memory.
+	 */
+	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+		pci_disable_device(bp->pdev);
 	__bnxt_close_nic(bp, true, false);
 	bnxt_clear_int_mode(bp);
 	bnxt_hwrm_func_drv_unrgtr(bp);
+	if (pci_is_enabled(bp->pdev))
+		pci_disable_device(bp->pdev);
 	bnxt_free_ctx_mem(bp);
 	kfree(bp->ctx);
 	bp->ctx = NULL;
@@ -10831,6 +10840,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
 		smp_mb__before_atomic();
 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
 		bnxt_ulp_start(bp, rc);
+		if (!rc)
+			bnxt_reenable_sriov(bp);
 		bnxt_dl_health_recovery_done(bp);
 		bnxt_dl_health_status_update(bp, true);
 		rtnl_unlock();
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 7a2fe63..4508f0d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt {
 /* Max length of transmit frame must be a multiple of 8 bytes */
 #define MACB_TX_LEN_ALIGN	8
 #define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
-#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
+/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
+ * false amba_error in TX path from the DMA assuming there is not enough
+ * space in the SRAM (16KB) even when there is.
+ */
+#define GEM_MAX_TX_LEN		(unsigned int)(0x3FC0)
 
 #define GEM_MTU_MIN_SIZE	ETH_MIN_MTU
 #define MACB_NETIF_LSO		NETIF_F_TSO
@@ -1791,16 +1795,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb,
 
 	/* Validate LSO compatibility */
 
-	/* there is only one buffer */
-	if (!skb_is_nonlinear(skb))
+	/* there is only one buffer or protocol is not UDP */
+	if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
 		return features;
 
 	/* length of header */
 	hdrlen = skb_transport_offset(skb);
-	if (ip_hdr(skb)->protocol == IPPROTO_TCP)
-		hdrlen += tcp_hdrlen(skb);
 
-	/* For LSO:
+	/* For UFO only:
 	 * When software supplies two or more payload buffers all payload buffers
 	 * apart from the last must be a multiple of 8 bytes in size.
 	 */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index c4f6ec0..17a4110 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1039,7 +1039,7 @@ static int phy_interface_mode(u8 lmac_type)
 	if (lmac_type == BGX_MODE_QSGMII)
 		return PHY_INTERFACE_MODE_QSGMII;
 	if (lmac_type == BGX_MODE_RGMII)
-		return PHY_INTERFACE_MODE_RGMII;
+		return PHY_INTERFACE_MODE_RGMII_RXID;
 
 	return PHY_INTERFACE_MODE_SGMII;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 9d1f2f8..de30d61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3403,6 +3403,13 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
 		   atomic_read(&adap->chcr_stats.fallback));
 	seq_printf(seq, "IPSec PDU: %10u\n",
 		   atomic_read(&adap->chcr_stats.ipsec_cnt));
+	seq_printf(seq, "TLS PDU Tx: %10u\n",
+		   atomic_read(&adap->chcr_stats.tls_pdu_tx));
+	seq_printf(seq, "TLS PDU Rx: %10u\n",
+		   atomic_read(&adap->chcr_stats.tls_pdu_rx));
+	seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
+		   atomic_read(&adap->chcr_stats.tls_key));
+
 	return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(chcr_stats);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d305d1b..42b798a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -417,7 +417,10 @@ static void de_rx (struct de_private *de)
 		if (status & DescOwn)
 			break;
 
-		len = ((status >> 16) & 0x7ff) - 4;
+		/* the length is actually a 15 bit value here according
+		 * to Table 4-1 in the DE2104x spec so mask is 0x7fff
+		 */
+		len = ((status >> 16) & 0x7fff) - 4;
 		mapping = de->rx_skb[rx_tail].mapping;
 
 		if (unlikely(drop)) {
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6a7e899..2bd7ace 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -74,7 +74,7 @@
 
 config UCC_GETH
 	tristate "Freescale QE Gigabit Ethernet"
-	depends on QUICC_ENGINE
+	depends on QUICC_ENGINE && PPC32
 	select FSL_PQ_MDIO
 	select PHYLIB
 	---help---
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 09dbcd81..fd93d54 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2453,6 +2453,9 @@ static void dpaa_adjust_link(struct net_device *net_dev)
 	mac_dev->adjust_link(mac_dev);
 }
 
+/* The Aquantia PHYs are capable of performing rate adaptation */
+#define PHY_VEND_AQUANTIA	0x03a1b400
+
 static int dpaa_phy_init(struct net_device *net_dev)
 {
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
@@ -2471,9 +2474,14 @@ static int dpaa_phy_init(struct net_device *net_dev)
 		return -ENODEV;
 	}
 
-	/* Remove any features not supported by the controller */
-	ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
-	linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+	/* Unless the PHY is capable of rate adaptation */
+	if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
+	    ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+		/* remove any features not supported by the controller */
+		ethtool_convert_legacy_u32_to_link_mode(mask,
+							mac_dev->if_support);
+		linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+	}
 
 	phy_support_asym_pause(phy_dev);
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 42058fa..0b7d291 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
 	struct i40e_ring *ring;
 
 	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
-		return -ENETDOWN;
+		return -EAGAIN;
 
 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
 		return -ENETDOWN;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 2dfbfdf..98017e7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -324,8 +324,7 @@
 	      ETH_HLEN + ETH_FCS_LEN,			     \
 	      cache_line_size())
 
-#define MVNETA_SKB_HEADROOM	(max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
-				 NET_IP_ALIGN)
+#define MVNETA_SKB_HEADROOM	max(XDP_PACKET_HEADROOM, NET_SKB_PAD)
 #define MVNETA_SKB_PAD	(SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
 			 MVNETA_SKB_HEADROOM))
 #define MVNETA_SKB_SIZE(len)	(SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
@@ -402,6 +401,8 @@ struct mvneta_pcpu_stats {
 	struct	u64_stats_sync syncp;
 	u64	rx_packets;
 	u64	rx_bytes;
+	u64	rx_dropped;
+	u64	rx_errors;
 	u64	tx_packets;
 	u64	tx_bytes;
 };
@@ -739,6 +740,8 @@ mvneta_get_stats64(struct net_device *dev,
 		struct mvneta_pcpu_stats *cpu_stats;
 		u64 rx_packets;
 		u64 rx_bytes;
+		u64 rx_dropped;
+		u64 rx_errors;
 		u64 tx_packets;
 		u64 tx_bytes;
 
@@ -747,19 +750,20 @@ mvneta_get_stats64(struct net_device *dev,
 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
 			rx_packets = cpu_stats->rx_packets;
 			rx_bytes   = cpu_stats->rx_bytes;
+			rx_dropped = cpu_stats->rx_dropped;
+			rx_errors  = cpu_stats->rx_errors;
 			tx_packets = cpu_stats->tx_packets;
 			tx_bytes   = cpu_stats->tx_bytes;
 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
 
 		stats->rx_packets += rx_packets;
 		stats->rx_bytes   += rx_bytes;
+		stats->rx_dropped += rx_dropped;
+		stats->rx_errors  += rx_errors;
 		stats->tx_packets += tx_packets;
 		stats->tx_bytes   += tx_bytes;
 	}
 
-	stats->rx_errors	= dev->stats.rx_errors;
-	stats->rx_dropped	= dev->stats.rx_dropped;
-
 	stats->tx_dropped	= dev->stats.tx_dropped;
 }
 
@@ -1167,6 +1171,7 @@ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
 	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
 
 	pp->bm_priv = NULL;
+	pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
 	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
 	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
 }
@@ -1736,8 +1741,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
 static void mvneta_rx_error(struct mvneta_port *pp,
 			    struct mvneta_rx_desc *rx_desc)
 {
+	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 	u32 status = rx_desc->status;
 
+	/* update per-cpu counter */
+	u64_stats_update_begin(&stats->syncp);
+	stats->rx_errors++;
+	u64_stats_update_end(&stats->syncp);
+
 	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
 	case MVNETA_RXD_ERR_CRC:
 		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@@ -2179,11 +2190,15 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 
 	rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
 	if (unlikely(!rxq->skb)) {
-		netdev_err(dev,
-			   "Can't allocate skb on queue %d\n",
-			   rxq->id);
-		dev->stats.rx_dropped++;
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+		netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
 		rxq->skb_alloc_err++;
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_dropped++;
+		u64_stats_update_end(&stats->syncp);
+
 		return -ENOMEM;
 	}
 	page_pool_release_page(rxq->page_pool, page);
@@ -2270,7 +2285,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 			/* Check errors only for FIRST descriptor */
 			if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
 				mvneta_rx_error(pp, rx_desc);
-				dev->stats.rx_errors++;
 				/* leave the descriptor untouched */
 				continue;
 			}
@@ -2372,7 +2386,6 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
 			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
 					      rx_desc->buf_phys_addr);
 err_drop_frame:
-			dev->stats.rx_errors++;
 			mvneta_rx_error(pp, rx_desc);
 			/* leave the descriptor untouched */
 			continue;
@@ -4948,7 +4961,6 @@ static int mvneta_probe(struct platform_device *pdev)
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	pp->id = global_port_id++;
-	pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
 
 	/* Obtain access to BM resources if enabled and already initialized */
 	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
@@ -4973,6 +4985,10 @@ static int mvneta_probe(struct platform_device *pdev)
 	}
 	of_node_put(bm_node);
 
+	/* sw buffer management */
+	if (!pp->bm_priv)
+		pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
+
 	err = mvneta_init(&pdev->dev, pp);
 	if (err < 0)
 		goto err_netdev;
@@ -5130,6 +5146,7 @@ static int mvneta_resume(struct device *device)
 		err = mvneta_bm_port_init(pdev, pp);
 		if (err < 0) {
 			dev_info(&pdev->dev, "use SW buffer management\n");
+			pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
 			pp->bm_priv = NULL;
 		}
 	}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 8247d21..b945bd3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -171,9 +171,9 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
 	}
 
 	msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
-	if (!msghdr) {
+	if (IS_ERR(msghdr)) {
 		otx2_mbox_unlock(&pfvf->mbox);
-		return -ENOMEM;
+		return PTR_ERR(msghdr);
 	}
 	rsp = (struct nix_get_mac_addr_rsp *)msghdr;
 	ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index d787bc0..e09bc38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
 
 static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
 {
-	if (!MLX5_CAP_GEN(mdev, tls))
+	if (!MLX5_CAP_GEN(mdev, tls_tx))
 		return false;
 
 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 71384ad..ef1ed15 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
 	int datalen;
 	u32 skb_seq;
 
-	if (MLX5_CAP_GEN(sq->channel->mdev, tls)) {
+	if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
 		skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
 		goto out;
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9e99601..1c3ab69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 
 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
 
-		if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
-			netdev_WARN_ONCE(cq->channel->netdev,
-					 "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
-			if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
-				queue_work(cq->channel->priv->wq, &sq->recover_work);
-			break;
-		}
 		do {
 			struct mlx5e_sq_wqe_info *wi;
 			u16 ci;
@@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 			ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
 			wi = &sq->db.ico_wqe[ci];
 
+			if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+				netdev_WARN_ONCE(cq->channel->netdev,
+						 "Bad OP in ICOSQ CQE: 0x%x\n",
+						 get_cqe_opcode(cqe));
+				if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+					queue_work(cq->channel->priv->wq, &sq->recover_work);
+				break;
+			}
+
 			if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
 				sqcc += MLX5E_UMR_WQEBBS;
 				wi->umr.rq->mpwqe.umr_completed++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2565ba8..ee60383 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
 	i = 0;
 	do {
+		struct mlx5e_tx_wqe_info *wi;
 		u16 wqe_counter;
 		bool last_wqe;
+		u16 ci;
 
 		mlx5_cqwq_pop(&cq->wq);
 
 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
 
-		if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
-			if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
-					      &sq->state)) {
-				struct mlx5e_tx_wqe_info *wi;
-				u16 ci;
-
-				ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
-				wi = &sq->db.wqe_info[ci];
-				mlx5e_dump_error_cqe(sq,
-						     (struct mlx5_err_cqe *)cqe);
-				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
-				queue_work(cq->channel->priv->wq,
-					   &sq->recover_work);
-			}
-			stats->cqe_err++;
-		}
-
 		do {
-			struct mlx5e_tx_wqe_info *wi;
 			struct sk_buff *skb;
-			u16 ci;
 			int j;
 
 			last_wqe = (sqcc == wqe_counter);
@@ -516,6 +499,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 			napi_consume_skb(skb, napi_budget);
 		} while (!last_wqe);
 
+		if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
+			if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
+					      &sq->state)) {
+				mlx5e_dump_error_cqe(sq,
+						     (struct mlx5_err_cqe *)cqe);
+				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
+				queue_work(cq->channel->priv->wq,
+					   &sq->recover_work);
+			}
+			stats->cqe_err++;
+		}
+
 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
 	stats->cqes += i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index e4ec0e03..4c61d25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
 	mutex_lock(&fpga_xfrm->lock);
 	if (!--fpga_xfrm->num_rules) {
 		mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
+		kfree(fpga_xfrm->sa_ctx);
 		fpga_xfrm->sa_ctx = NULL;
 	}
 	mutex_unlock(&fpga_xfrm->lock);
@@ -1478,7 +1479,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
 	if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
 		return 0;
 
-	if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
+	if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
 		mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
 		return -EOPNOTSUPP;
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c7a16ae..9dc2424 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1582,16 +1582,16 @@ struct match_list_head {
 	struct match_list first;
 };
 
-static void free_match_list(struct match_list_head *head)
+static void free_match_list(struct match_list_head *head, bool ft_locked)
 {
 	if (!list_empty(&head->list)) {
 		struct match_list *iter, *match_tmp;
 
 		list_del(&head->first.list);
-		tree_put_node(&head->first.g->node, false);
+		tree_put_node(&head->first.g->node, ft_locked);
 		list_for_each_entry_safe(iter, match_tmp, &head->list,
 					 list) {
-			tree_put_node(&iter->g->node, false);
+			tree_put_node(&iter->g->node, ft_locked);
 			list_del(&iter->list);
 			kfree(iter);
 		}
@@ -1600,7 +1600,8 @@ static void free_match_list(struct match_list_head *head)
 
 static int build_match_list(struct match_list_head *match_head,
 			    struct mlx5_flow_table *ft,
-			    const struct mlx5_flow_spec *spec)
+			    const struct mlx5_flow_spec *spec,
+			    bool ft_locked)
 {
 	struct rhlist_head *tmp, *list;
 	struct mlx5_flow_group *g;
@@ -1625,7 +1626,7 @@ static int build_match_list(struct match_list_head *match_head,
 
 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
 		if (!curr_match) {
-			free_match_list(match_head);
+			free_match_list(match_head, ft_locked);
 			err = -ENOMEM;
 			goto out;
 		}
@@ -1805,7 +1806,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
 	version = atomic_read(&ft->node.version);
 
 	/* Collect all fgs which has a matching match_criteria */
-	err = build_match_list(&match_head, ft, spec);
+	err = build_match_list(&match_head, ft, spec, take_write);
 	if (err) {
 		if (take_write)
 			up_write_ref_node(&ft->node, false);
@@ -1819,7 +1820,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
 
 	rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
 				      dest_num, version);
-	free_match_list(&match_head);
+	free_match_list(&match_head, take_write);
 	if (!IS_ERR(rule) ||
 	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
 		if (take_write)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index d89ff1d..909a7f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -242,7 +242,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
 			return err;
 	}
 
-	if (MLX5_CAP_GEN(dev, tls)) {
+	if (MLX5_CAP_GEN(dev, tls_tx)) {
 		err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
 		if (err)
 			return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 9bf8da5..3fe878d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -573,6 +573,7 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
 
 static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
 {
+	enum mlxsw_reg_mgpir_device_type device_type;
 	int index, max_index, sensor_index;
 	char mgpir_pl[MLXSW_REG_MGPIR_LEN];
 	char mtmp_pl[MLXSW_REG_MTMP_LEN];
@@ -584,8 +585,9 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
 	if (err)
 		return err;
 
-	mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
-	if (!gbox_num)
+	mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
+	if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
+	    !gbox_num)
 		return 0;
 
 	index = mlxsw_hwmon->module_sensor_max;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index c721b17..ce0a683 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -895,8 +895,10 @@ static int
 mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
 			     struct mlxsw_thermal *thermal)
 {
+	enum mlxsw_reg_mgpir_device_type device_type;
 	struct mlxsw_thermal_module *gearbox_tz;
 	char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+	u8 gbox_num;
 	int i;
 	int err;
 
@@ -908,11 +910,13 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
 	if (err)
 		return err;
 
-	mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
+	mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
 			       NULL);
-	if (!thermal->tz_gearbox_num)
+	if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
+	    !gbox_num)
 		return 0;
 
+	thermal->tz_gearbox_num = gbox_num;
 	thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
 					  sizeof(*thermal->tz_gearbox_arr),
 					  GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 4993381..2dc0978 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
 start_again:
 	err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
 	if (err)
-		return err;
+		goto err_ctx_prepare;
 	j = 0;
 	for (; i < rif_count; i++) {
 		struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
@@ -247,6 +247,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
 	return 0;
 err_entry_append:
 err_entry_get:
+err_ctx_prepare:
 	rtnl_unlock();
 	devlink_dpipe_entry_clear(&entry);
 	return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 79a2801..02526c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -614,7 +614,7 @@ mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
 	/* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
 	 * Kbits/s.
 	 */
-	return p->rate.rate_bytes_ps / 1000 * 8;
+	return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
 }
 
 static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index ce70772..4a77b51 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4844,6 +4844,23 @@ mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
 	fib_node->fib_entry = NULL;
 }
 
+static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
+	struct mlxsw_sp_fib4_entry *fib4_replaced;
+
+	if (!fib_node->fib_entry)
+		return true;
+
+	fib4_replaced = container_of(fib_node->fib_entry,
+				     struct mlxsw_sp_fib4_entry, common);
+	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
+	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
+		return false;
+
+	return true;
+}
+
 static int
 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
 			     const struct fib_entry_notifier_info *fen_info)
@@ -4872,6 +4889,12 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
 		goto err_fib4_entry_create;
 	}
 
+	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
+		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+		return 0;
+	}
+
 	replaced = fib_node->fib_entry;
 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
 	if (err) {
@@ -4908,7 +4931,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
 		return;
 
 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
-	if (WARN_ON(!fib4_entry))
+	if (!fib4_entry)
 		return;
 	fib_node = fib4_entry->common.fib_node;
 
@@ -4970,6 +4993,9 @@ static void mlxsw_sp_rt6_release(struct fib6_info *rt)
 
 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
 {
+	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+
+	fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
 	kfree(mlxsw_sp_rt6);
 }
@@ -5408,6 +5434,27 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
 	return NULL;
 }
 
+static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+	struct mlxsw_sp_fib6_entry *fib6_replaced;
+	struct fib6_info *rt, *rt_replaced;
+
+	if (!fib_node->fib_entry)
+		return true;
+
+	fib6_replaced = container_of(fib_node->fib_entry,
+				     struct mlxsw_sp_fib6_entry,
+				     common);
+	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
+	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
+	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
+		return false;
+
+	return true;
+}
+
 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
 					struct fib6_info **rt_arr,
 					unsigned int nrt6)
@@ -5442,6 +5489,12 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
 		goto err_fib6_entry_create;
 	}
 
+	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
+		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+		return 0;
+	}
+
 	replaced = fib_node->fib_entry;
 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
 	if (err)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index f131ada..ce07c29 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -866,7 +866,7 @@ struct ionic_rxq_comp {
 #define IONIC_RXQ_COMP_CSUM_F_VLAN	0x40
 #define IONIC_RXQ_COMP_CSUM_F_CALC	0x80
 	u8     pkt_type_color;
-#define IONIC_RXQ_COMP_PKT_TYPE_MASK	0x0f
+#define IONIC_RXQ_COMP_PKT_TYPE_MASK	0x7f
 };
 
 enum ionic_pkt_type {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index fbfff2b..1a636ba 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1398,14 +1398,11 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
 {
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 	struct qed_qm_pf_rt_init_params params;
-	struct qed_mcp_link_state *p_link;
 	struct qed_qm_iids iids;
 
 	memset(&iids, 0, sizeof(iids));
 	qed_cxt_qm_iids(p_hwfn, &iids);
 
-	p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
-
 	memset(&params, 0, sizeof(params));
 	params.port_id = p_hwfn->port_id;
 	params.pf_id = p_hwfn->rel_pf_id;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 7912911..03bdd2e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3114,6 +3114,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 		if (!p_hwfn->fw_overlay_mem) {
 			DP_NOTICE(p_hwfn,
 				  "Failed to allocate fw overlay memory\n");
+			rc = -ENOMEM;
 			goto load_err;
 		}
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 0dacf2c..3e61305 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -44,8 +44,8 @@
 /* Add/subtract the Adjustment_Value when making a Drift adjustment */
 #define QED_DRIFT_CNTR_DIRECTION_SHIFT		31
 #define QED_TIMESTAMP_MASK			BIT(16)
-/* Param mask for Hardware to detect/timestamp the unicast PTP packets */
-#define QED_PTP_UCAST_PARAM_MASK		0xF
+/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
+#define QED_PTP_UCAST_PARAM_MASK              0x70F
 
 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
 {
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index aaa316b..a2168a1 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2477,15 +2477,18 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_12:
 	case RTL_GIGA_MAC_VER_17:
+		pcie_set_readrq(tp->pci_dev, 512);
 		r8168b_1_hw_jumbo_enable(tp);
 		break;
 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+		pcie_set_readrq(tp->pci_dev, 512);
 		r8168c_hw_jumbo_enable(tp);
 		break;
 	case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
 		r8168dp_hw_jumbo_enable(tp);
 		break;
 	case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
+		pcie_set_readrq(tp->pci_dev, 512);
 		r8168e_hw_jumbo_enable(tp);
 		break;
 	default:
@@ -2515,6 +2518,9 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
 		break;
 	}
 	rtl_lock_config_regs(tp);
+
+	if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+		pcie_set_readrq(tp->pci_dev, 4096);
 }
 
 static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
index 37f048e..bc26fa0 100644
--- a/drivers/net/ethernet/sgi/Kconfig
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -6,7 +6,7 @@
 config NET_VENDOR_SGI
 	bool "SGI devices"
 	default y
-	depends on (PCI && SGI_IP27) || SGI_IP32
+	depends on (PCI && SGI_MFD_IOC3) ||  SGI_IP32
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y.
 
@@ -19,7 +19,8 @@
 
 config SGI_IOC3_ETH
 	bool "SGI IOC3 Ethernet"
-	depends on PCI && SGI_IP27
+	depends on PCI && SGI_MFD_IOC3
+	select CRC16
 	select CRC32
 	select MII
 	---help---
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 06637b0..db6b298 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -14,7 +14,6 @@
  *  o Use prefetching for large packets.  What is a good lower limit for
  *    prefetching?
  *  o Use hardware checksums.
- *  o Convert to using a IOC3 meta driver.
  *  o Which PHYs might possibly be attached to the IOC3 in real live,
  *    which workarounds are required for them?  Do we ever have Lucent's?
  *  o For the 2.5 branch kill the mii-tool ioctls.
@@ -28,7 +27,8 @@
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/crc16.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
 #include <linux/in.h>
@@ -37,28 +37,22 @@
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/gfp.h>
-
-#ifdef CONFIG_SERIAL_8250
-#include <linux/serial_core.h>
-#include <linux/serial_8250.h>
-#include <linux/serial_reg.h>
-#endif
-
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/skbuff.h>
 #include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/nvmem-consumer.h>
 
 #include <net/ip.h>
 
-#include <asm/byteorder.h>
-#include <asm/pgtable.h>
-#include <linux/uaccess.h>
-#include <asm/sn/types.h>
 #include <asm/sn/ioc3.h>
 #include <asm/pci/bridge.h>
 
+#define CRC16_INIT	0
+#define CRC16_VALID	0xb001
+
 /* Number of RX buffers.  This is tunable in the range of 16 <= x < 512.
  * The value must be a power of two.
  */
@@ -85,7 +79,6 @@
 /* Private per NIC data of the driver.  */
 struct ioc3_private {
 	struct ioc3_ethregs *regs;
-	struct ioc3 *all_regs;
 	struct device *dma_dev;
 	u32 *ssram;
 	unsigned long *rxr;		/* pointer to receiver ring */
@@ -104,9 +97,6 @@ struct ioc3_private {
 	spinlock_t ioc3_lock;
 	struct mii_if_info mii;
 
-	struct net_device *dev;
-	struct pci_dev *pdev;
-
 	/* Members used by autonegotiation  */
 	struct timer_list ioc3_timer;
 };
@@ -123,10 +113,8 @@ static int ioc3_alloc_rx_bufs(struct net_device *dev);
 static void ioc3_free_rx_bufs(struct ioc3_private *ip);
 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
 
-static const char ioc3_str[] = "IOC3 Ethernet";
 static const struct ethtool_ops ioc3_ethtool_ops;
 
-
 static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
 {
 	return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL);
@@ -179,225 +167,61 @@ static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
 #define ERBAR_VAL	0
 #endif
 
-#define IOC3_SIZE 0x100000
-
-static inline u32 mcr_pack(u32 pulse, u32 sample)
+static int ioc3eth_nvmem_match(struct device *dev, const void *data)
 {
-	return (pulse << 10) | (sample << 2);
-}
+	const char *name = dev_name(dev);
+	const char *prefix = data;
+	int prefix_len;
 
-static int nic_wait(u32 __iomem *mcr)
-{
-	u32 m;
+	prefix_len = strlen(prefix);
+	if (strlen(name) < (prefix_len + 3))
+		return 0;
 
-	do {
-		m = readl(mcr);
-	} while (!(m & 2));
+	if (memcmp(prefix, name, prefix_len) != 0)
+		return 0;
 
-	return m & 1;
-}
-
-static int nic_reset(u32 __iomem *mcr)
-{
-	int presence;
-
-	writel(mcr_pack(500, 65), mcr);
-	presence = nic_wait(mcr);
-
-	writel(mcr_pack(0, 500), mcr);
-	nic_wait(mcr);
-
-	return presence;
-}
-
-static inline int nic_read_bit(u32 __iomem *mcr)
-{
-	int result;
-
-	writel(mcr_pack(6, 13), mcr);
-	result = nic_wait(mcr);
-	writel(mcr_pack(0, 100), mcr);
-	nic_wait(mcr);
-
-	return result;
-}
-
-static inline void nic_write_bit(u32 __iomem *mcr, int bit)
-{
-	if (bit)
-		writel(mcr_pack(6, 110), mcr);
-	else
-		writel(mcr_pack(80, 30), mcr);
-
-	nic_wait(mcr);
-}
-
-/* Read a byte from an iButton device
- */
-static u32 nic_read_byte(u32 __iomem *mcr)
-{
-	u32 result = 0;
-	int i;
-
-	for (i = 0; i < 8; i++)
-		result = (result >> 1) | (nic_read_bit(mcr) << 7);
-
-	return result;
-}
-
-/* Write a byte to an iButton device
- */
-static void nic_write_byte(u32 __iomem *mcr, int byte)
-{
-	int i, bit;
-
-	for (i = 8; i; i--) {
-		bit = byte & 1;
-		byte >>= 1;
-
-		nic_write_bit(mcr, bit);
-	}
-}
-
-static u64 nic_find(u32 __iomem *mcr, int *last)
-{
-	int a, b, index, disc;
-	u64 address = 0;
-
-	nic_reset(mcr);
-	/* Search ROM.  */
-	nic_write_byte(mcr, 0xf0);
-
-	/* Algorithm from ``Book of iButton Standards''.  */
-	for (index = 0, disc = 0; index < 64; index++) {
-		a = nic_read_bit(mcr);
-		b = nic_read_bit(mcr);
-
-		if (a && b) {
-			pr_warn("NIC search failed (not fatal).\n");
-			*last = 0;
-			return 0;
-		}
-
-		if (!a && !b) {
-			if (index == *last) {
-				address |= 1UL << index;
-			} else if (index > *last) {
-				address &= ~(1UL << index);
-				disc = index;
-			} else if ((address & (1UL << index)) == 0) {
-				disc = index;
-			}
-			nic_write_bit(mcr, address & (1UL << index));
-			continue;
-		} else {
-			if (a)
-				address |= 1UL << index;
-			else
-				address &= ~(1UL << index);
-			nic_write_bit(mcr, a);
-			continue;
-		}
-	}
-
-	*last = disc;
-
-	return address;
-}
-
-static int nic_init(u32 __iomem *mcr)
-{
-	const char *unknown = "unknown";
-	const char *type = unknown;
-	u8 crc;
-	u8 serial[6];
-	int save = 0, i;
-
-	while (1) {
-		u64 reg;
-
-		reg = nic_find(mcr, &save);
-
-		switch (reg & 0xff) {
-		case 0x91:
-			type = "DS1981U";
-			break;
-		default:
-			if (save == 0) {
-				/* Let the caller try again.  */
-				return -1;
-			}
-			continue;
-		}
-
-		nic_reset(mcr);
-
-		/* Match ROM.  */
-		nic_write_byte(mcr, 0x55);
-		for (i = 0; i < 8; i++)
-			nic_write_byte(mcr, (reg >> (i << 3)) & 0xff);
-
-		reg >>= 8; /* Shift out type.  */
-		for (i = 0; i < 6; i++) {
-			serial[i] = reg & 0xff;
-			reg >>= 8;
-		}
-		crc = reg & 0xff;
-		break;
-	}
-
-	pr_info("Found %s NIC", type);
-	if (type != unknown)
-		pr_cont(" registration number %pM, CRC %02x", serial, crc);
-	pr_cont(".\n");
+	/* found nvmem device which is attached to our ioc3
+	 * now check for one wire family code 09, 89 and 91
+	 */
+	if (memcmp(name + prefix_len, "09-", 3) == 0)
+		return 1;
+	if (memcmp(name + prefix_len, "89-", 3) == 0)
+		return 1;
+	if (memcmp(name + prefix_len, "91-", 3) == 0)
+		return 1;
 
 	return 0;
 }
 
-/* Read the NIC (Number-In-a-Can) device used to store the MAC address on
- * SN0 / SN00 nodeboards and PCI cards.
- */
-static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
+static int ioc3eth_get_mac_addr(struct resource *res, u8 mac_addr[6])
 {
-	u32 __iomem *mcr = &ip->all_regs->mcr;
-	int tries = 2; /* There may be some problem with the battery?  */
-	u8 nic[14];
+	struct nvmem_device *nvmem;
+	char prefix[24];
+	u8 prom[16];
+	int ret;
 	int i;
 
-	writel(1 << 21, &ip->all_regs->gpcr_s);
+	snprintf(prefix, sizeof(prefix), "ioc3-%012llx-",
+		 res->start & ~0xffff);
 
-	while (tries--) {
-		if (!nic_init(mcr))
-			break;
-		udelay(500);
-	}
+	nvmem = nvmem_device_find(prefix, ioc3eth_nvmem_match);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
 
-	if (tries < 0) {
-		pr_err("Failed to read MAC address\n");
-		return;
-	}
+	ret = nvmem_device_read(nvmem, 0, 16, prom);
+	nvmem_device_put(nvmem);
+	if (ret < 0)
+		return ret;
 
-	/* Read Memory.  */
-	nic_write_byte(mcr, 0xf0);
-	nic_write_byte(mcr, 0x00);
-	nic_write_byte(mcr, 0x00);
+	/* check, if content is valid */
+	if (prom[0] != 0x0a ||
+	    crc16(CRC16_INIT, prom, 13) != CRC16_VALID)
+		return -EINVAL;
 
-	for (i = 13; i >= 0; i--)
-		nic[i] = nic_read_byte(mcr);
+	for (i = 0; i < 6; i++)
+		mac_addr[i] = prom[10 - i];
 
-	for (i = 2; i < 8; i++)
-		ip->dev->dev_addr[i - 2] = nic[i];
-}
-
-/* Ok, this is hosed by design.  It's necessary to know what machine the
- * NIC is in in order to know how to read the NIC address.  We also have
- * to know if it's a PCI card or a NIC in on the node board ...
- */
-static void ioc3_get_eaddr(struct ioc3_private *ip)
-{
-	ioc3_get_eaddr_nic(ip);
-
-	pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr);
+	return 0;
 }
 
 static void __ioc3_set_mac_address(struct net_device *dev)
@@ -770,7 +594,7 @@ static int ioc3_mii_init(struct ioc3_private *ip)
 	u16 word;
 
 	for (i = 0; i < 32; i++) {
-		word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
+		word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
 
 		if (word != 0xffff && word != 0x0000) {
 			found = 1;
@@ -975,12 +799,6 @@ static int ioc3_open(struct net_device *dev)
 {
 	struct ioc3_private *ip = netdev_priv(dev);
 
-	if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
-		netdev_err(dev, "Can't get irq %d\n", dev->irq);
-
-		return -EAGAIN;
-	}
-
 	ip->ehar_h = 0;
 	ip->ehar_l = 0;
 
@@ -1005,7 +823,6 @@ static int ioc3_close(struct net_device *dev)
 	netif_stop_queue(dev);
 
 	ioc3_stop(ip);
-	free_irq(dev->irq, dev);
 
 	ioc3_free_rx_bufs(ip);
 	ioc3_clean_tx_ring(ip);
@@ -1013,147 +830,6 @@ static int ioc3_close(struct net_device *dev)
 	return 0;
 }
 
-/* MENET cards have four IOC3 chips, which are attached to two sets of
- * PCI slot resources each: the primary connections are on slots
- * 0..3 and the secondaries are on 4..7
- *
- * All four ethernets are brought out to connectors; six serial ports
- * (a pair from each of the first three IOC3s) are brought out to
- * MiniDINs; all other subdevices are left swinging in the wind, leave
- * them disabled.
- */
-
-static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
-{
-	struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
-	int ret = 0;
-
-	if (dev) {
-		if (dev->vendor == PCI_VENDOR_ID_SGI &&
-		    dev->device == PCI_DEVICE_ID_SGI_IOC3)
-			ret = 1;
-		pci_dev_put(dev);
-	}
-
-	return ret;
-}
-
-static int ioc3_is_menet(struct pci_dev *pdev)
-{
-	return !pdev->bus->parent &&
-	       ioc3_adjacent_is_ioc3(pdev, 0) &&
-	       ioc3_adjacent_is_ioc3(pdev, 1) &&
-	       ioc3_adjacent_is_ioc3(pdev, 2);
-}
-
-#ifdef CONFIG_SERIAL_8250
-/* Note about serial ports and consoles:
- * For console output, everyone uses the IOC3 UARTA (offset 0x178)
- * connected to the master node (look in ip27_setup_console() and
- * ip27prom_console_write()).
- *
- * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port
- * addresses on a partitioned machine. Since we currently use the ioc3
- * serial ports, we use dynamic serial port discovery that the serial.c
- * driver uses for pci/pnp ports (there is an entry for the SGI ioc3
- * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater
- * than UARTB's, although UARTA on o200s has traditionally been known as
- * port 0. So, we just use one serial port from each ioc3 (since the
- * serial driver adds addresses to get to higher ports).
- *
- * The first one to do a register_console becomes the preferred console
- * (if there is no kernel command line console= directive). /dev/console
- * (ie 5, 1) is then "aliased" into the device number returned by the
- * "device" routine referred to in this console structure
- * (ip27prom_console_dev).
- *
- * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working
- * around ioc3 oddities in this respect.
- *
- * The IOC3 serials use a 22MHz clock rate with an additional divider which
- * can be programmed in the SCR register if the DLAB bit is set.
- *
- * Register to interrupt zero because we share the interrupt with
- * the serial driver which we don't properly support yet.
- *
- * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
- * registered.
- */
-static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
-{
-#define COSMISC_CONSTANT 6
-
-	struct uart_8250_port port = {
-		.port = {
-			.irq		= 0,
-			.flags		= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
-			.iotype		= UPIO_MEM,
-			.regshift	= 0,
-			.uartclk	= (22000000 << 1) / COSMISC_CONSTANT,
-
-			.membase	= (unsigned char __iomem *)uart,
-			.mapbase	= (unsigned long)uart,
-		}
-	};
-	unsigned char lcr;
-
-	lcr = readb(&uart->iu_lcr);
-	writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
-	writeb(COSMISC_CONSTANT, &uart->iu_scr);
-	writeb(lcr, &uart->iu_lcr);
-	readb(&uart->iu_lcr);
-	serial8250_register_8250_port(&port);
-}
-
-static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
-{
-	u32 sio_iec;
-
-	/* We need to recognice and treat the fourth MENET serial as it
-	 * does not have an SuperIO chip attached to it, therefore attempting
-	 * to access it will result in bus errors.  We call something an
-	 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
-	 * in it.  This is paranoid but we want to avoid blowing up on a
-	 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
-	 * not paranoid enough ...
-	 */
-	if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
-		return;
-
-	/* Switch IOC3 to PIO mode.  It probably already was but let's be
-	 * paranoid
-	 */
-	writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s);
-	readl(&ioc3->gpcr_s);
-	writel(0, &ioc3->gppr[6]);
-	readl(&ioc3->gppr[6]);
-	writel(0, &ioc3->gppr[7]);
-	readl(&ioc3->gppr[7]);
-	writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr);
-	readl(&ioc3->port_a.sscr);
-	writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr);
-	readl(&ioc3->port_b.sscr);
-	/* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
-	sio_iec = readl(&ioc3->sio_iec);
-	sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
-		     SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
-		     SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
-		     SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
-	sio_iec |= SIO_IR_SA_INT;
-	sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
-		     SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
-		     SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
-		     SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
-	sio_iec |= SIO_IR_SB_INT;
-	writel(sio_iec, &ioc3->sio_iec);
-	writel(0, &ioc3->port_a.sscr);
-	writel(0, &ioc3->port_b.sscr);
-
-	ioc3_8250_register(&ioc3->sregs.uarta);
-	ioc3_8250_register(&ioc3->sregs.uartb);
-}
-#endif
-
 static const struct net_device_ops ioc3_netdev_ops = {
 	.ndo_open		= ioc3_open,
 	.ndo_stop		= ioc3_close,
@@ -1166,61 +842,52 @@ static const struct net_device_ops ioc3_netdev_ops = {
 	.ndo_set_mac_address	= ioc3_set_mac_address,
 };
 
-static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int ioc3eth_probe(struct platform_device *pdev)
 {
-	unsigned int sw_physid1, sw_physid2;
-	struct net_device *dev = NULL;
+	u32 sw_physid1, sw_physid2, vendor, model, rev;
 	struct ioc3_private *ip;
-	struct ioc3 *ioc3;
-	unsigned long ioc3_base, ioc3_size;
-	u32 vendor, model, rev;
+	struct net_device *dev;
+	struct resource *regs;
+	u8 mac_addr[6];
 	int err;
 
-	/* Configure DMA attributes. */
-	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-	if (err) {
-		pr_err("%s: No usable DMA configuration, aborting.\n",
-		       pci_name(pdev));
-		goto out;
-	}
-
-	if (pci_enable_device(pdev))
-		return -ENODEV;
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	/* get mac addr from one wire prom */
+	if (ioc3eth_get_mac_addr(regs, mac_addr))
+		return -EPROBE_DEFER; /* not available yet */
 
 	dev = alloc_etherdev(sizeof(struct ioc3_private));
-	if (!dev) {
-		err = -ENOMEM;
-		goto out_disable;
-	}
-
-	err = pci_request_regions(pdev, "ioc3");
-	if (err)
-		goto out_free;
+	if (!dev)
+		return -ENOMEM;
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	ip = netdev_priv(dev);
-	ip->dev = dev;
-	ip->dma_dev = &pdev->dev;
-
-	dev->irq = pdev->irq;
-
-	ioc3_base = pci_resource_start(pdev, 0);
-	ioc3_size = pci_resource_len(pdev, 0);
-	ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size);
-	if (!ioc3) {
-		pr_err("ioc3eth(%s): ioremap failed, goodbye.\n",
-		       pci_name(pdev));
+	ip->dma_dev = pdev->dev.parent;
+	ip->regs = devm_platform_ioremap_resource(pdev, 0);
+	if (!ip->regs) {
 		err = -ENOMEM;
-		goto out_res;
+		goto out_free;
 	}
-	ip->regs = &ioc3->eth;
-	ip->ssram = ioc3->ssram;
-	ip->all_regs = ioc3;
 
-#ifdef CONFIG_SERIAL_8250
-	ioc3_serial_probe(pdev, ioc3);
-#endif
+	ip->ssram = devm_platform_ioremap_resource(pdev, 1);
+	if (!ip->ssram) {
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	dev->irq = platform_get_irq(pdev, 0);
+	if (dev->irq < 0) {
+		err = dev->irq;
+		goto out_free;
+	}
+
+	if (devm_request_irq(&pdev->dev, dev->irq, ioc3_interrupt,
+			     IRQF_SHARED, "ioc3-eth", dev)) {
+		dev_err(&pdev->dev, "Can't get irq %d\n", dev->irq);
+		err = -ENODEV;
+		goto out_free;
+	}
 
 	spin_lock_init(&ip->ioc3_lock);
 	timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
@@ -1250,8 +917,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	ioc3_init(dev);
 
-	ip->pdev = pdev;
-
 	ip->mii.phy_id_mask = 0x1f;
 	ip->mii.reg_num_mask = 0x1f;
 	ip->mii.dev = dev;
@@ -1261,15 +926,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	ioc3_mii_init(ip);
 
 	if (ip->mii.phy_id == -1) {
-		pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
-		       pci_name(pdev));
+		netdev_err(dev, "Didn't find a PHY, goodbye.\n");
 		err = -ENODEV;
 		goto out_stop;
 	}
 
 	ioc3_mii_start(ip);
 	ioc3_ssram_disc(ip);
-	ioc3_get_eaddr(ip);
+	memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
 	/* The IOC3-specific entries in the device structure. */
 	dev->watchdog_timeo	= 5 * HZ;
@@ -1306,21 +970,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (ip->tx_ring)
 		dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
 				  ip->txr_dma);
-out_res:
-	pci_release_regions(pdev);
 out_free:
 	free_netdev(dev);
-out_disable:
-	/* We should call pci_disable_device(pdev); here if the IOC3 wasn't
-	 * such a weird device ...
-	 */
-out:
 	return err;
 }
 
-static void ioc3_remove_one(struct pci_dev *pdev)
+static int ioc3eth_remove(struct platform_device *pdev)
 {
-	struct net_device *dev = pci_get_drvdata(pdev);
+	struct net_device *dev = platform_get_drvdata(pdev);
 	struct ioc3_private *ip = netdev_priv(dev);
 
 	dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
@@ -1328,27 +985,11 @@ static void ioc3_remove_one(struct pci_dev *pdev)
 
 	unregister_netdev(dev);
 	del_timer_sync(&ip->ioc3_timer);
-
-	iounmap(ip->all_regs);
-	pci_release_regions(pdev);
 	free_netdev(dev);
-	/* We should call pci_disable_device(pdev); here if the IOC3 wasn't
-	 * such a weird device ...
-	 */
+
+	return 0;
 }
 
-static const struct pci_device_id ioc3_pci_tbl[] = {
-	{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
-	{ 0 }
-};
-MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
-
-static struct pci_driver ioc3_driver = {
-	.name		= "ioc3-eth",
-	.id_table	= ioc3_pci_tbl,
-	.probe		= ioc3_probe,
-	.remove		= ioc3_remove_one,
-};
 
 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -1530,11 +1171,10 @@ static inline unsigned int ioc3_hash(const unsigned char *addr)
 static void ioc3_get_drvinfo(struct net_device *dev,
 			     struct ethtool_drvinfo *info)
 {
-	struct ioc3_private *ip = netdev_priv(dev);
-
 	strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
 	strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
-	strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
+	strlcpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+		sizeof(info->bus_info));
 }
 
 static int ioc3_get_link_ksettings(struct net_device *dev,
@@ -1646,7 +1286,16 @@ static void ioc3_set_multicast_list(struct net_device *dev)
 	spin_unlock_irq(&ip->ioc3_lock);
 }
 
-module_pci_driver(ioc3_driver);
+static struct platform_driver ioc3eth_driver = {
+	.probe  = ioc3eth_probe,
+	.remove = ioc3eth_remove,
+	.driver = {
+		.name = "ioc3-eth",
+	}
+};
+
+module_platform_driver(ioc3eth_driver);
+
 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 7ec8954..e0a5fe8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -413,6 +413,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
 			dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
 			if (dll_lock & SDC4_STATUS_DLL_LOCK)
 				break;
+			retry--;
 		} while (retry > 0);
 		if (!retry)
 			dev_err(&ethqos->pdev->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f0c0ea6..dc09d21 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -420,7 +420,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
 		value |= GMAC_PACKET_FILTER_PM;
 		/* Set all the bits of the HASH tab */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
-	} else if (!netdev_mc_empty(dev)) {
+	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
 		struct netdev_hw_addr *ha;
 
 		/* Hash filter for multicast */
@@ -736,11 +736,14 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
 				    __le16 perfect_match, bool is_double)
 {
 	void __iomem *ioaddr = hw->pcsr;
+	u32 value;
 
 	writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
 
+	value = readl(ioaddr + GMAC_VLAN_TAG);
+
 	if (hash) {
-		u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
+		value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
 		if (is_double) {
 			value |= GMAC_VLAN_EDVLP;
 			value |= GMAC_VLAN_ESVL;
@@ -759,8 +762,6 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
 
 		writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
 	} else {
-		u32 value = readl(ioaddr + GMAC_VLAN_TAG);
-
 		value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
 		value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
 		value &= ~GMAC_VLAN_DOVLTC;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 2af3ac5..67b754a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -458,7 +458,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
 
 		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
 			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
-	} else if (!netdev_mc_empty(dev)) {
+	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
 		struct netdev_hw_addr *ha;
 
 		value |= XGMAC_FILTER_HMC;
@@ -569,7 +569,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
 
 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
 
-		value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
+		value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+		value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
 		if (is_double) {
 			value |= XGMAC_VLAN_EDVLP;
 			value |= XGMAC_VLAN_ESVL;
@@ -584,7 +586,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
 
 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
 
-		value = XGMAC_VLAN_ETV;
+		value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+		value |= XGMAC_VLAN_ETV;
 		if (is_double) {
 			value |= XGMAC_VLAN_EDVLP;
 			value |= XGMAC_VLAN_ESVL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ff1cbfc..5836b21 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4974,6 +4974,7 @@ int stmmac_suspend(struct device *dev)
 {
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
+	u32 chan;
 
 	if (!ndev || !netif_running(ndev))
 		return 0;
@@ -4987,6 +4988,9 @@ int stmmac_suspend(struct device *dev)
 
 	stmmac_disable_all_queues(priv);
 
+	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+		del_timer_sync(&priv->tx_queue[chan].txtimer);
+
 	/* Stop TX/RX DMA */
 	stmmac_stop_all_dma(priv);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 6235210..fe2c9fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -95,7 +95,7 @@ static int stmmac_default_data(struct pci_dev *pdev,
 
 	plat->bus_id = 1;
 	plat->phy_addr = 0;
-	plat->interface = PHY_INTERFACE_MODE_GMII;
+	plat->phy_interface = PHY_INTERFACE_MODE_GMII;
 
 	plat->dma_cfg->pbl = 32;
 	plat->dma_cfg->pblx8 = true;
@@ -217,7 +217,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
 {
 	plat->bus_id = 1;
 	plat->phy_addr = 0;
-	plat->interface = PHY_INTERFACE_MODE_SGMII;
+	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
 	return ehl_common_data(pdev, plat);
 }
 
@@ -230,7 +231,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
 {
 	plat->bus_id = 1;
 	plat->phy_addr = 0;
-	plat->interface = PHY_INTERFACE_MODE_RGMII;
+	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
+
 	return ehl_common_data(pdev, plat);
 }
 
@@ -258,7 +260,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
 {
 	plat->bus_id = 1;
 	plat->phy_addr = 0;
-	plat->interface = PHY_INTERFACE_MODE_SGMII;
+	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
 	return tgl_common_data(pdev, plat);
 }
 
@@ -358,7 +360,7 @@ static int quark_default_data(struct pci_dev *pdev,
 
 	plat->bus_id = pci_dev_id(pdev);
 	plat->phy_addr = ret;
-	plat->interface = PHY_INTERFACE_MODE_RMII;
+	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
 
 	plat->dma_cfg->pbl = 16;
 	plat->dma_cfg->pblx8 = true;
@@ -415,7 +417,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
 
 	plat->bus_id = 1;
 	plat->phy_addr = -1;
-	plat->interface = PHY_INTERFACE_MODE_GMII;
+	plat->phy_interface = PHY_INTERFACE_MODE_GMII;
 
 	plat->dma_cfg->pbl = 32;
 	plat->dma_cfg->pblx8 = true;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7032a24..af07ea7 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -767,12 +767,12 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
 	int i;
 
 	gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
-				       GFP_KERNEL);
+				       GFP_KERNEL | __GFP_NOWARN);
 	if (gtp->addr_hash == NULL)
 		return -ENOMEM;
 
 	gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
-				      GFP_KERNEL);
+				      GFP_KERNEL | __GFP_NOWARN);
 	if (gtp->tid_hash == NULL)
 		goto err1;
 
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 20adfe5..b866110 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -120,7 +120,7 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 	}
 
 	if (prog)
-		bpf_prog_add(prog, nvdev->num_chn);
+		bpf_prog_add(prog, nvdev->num_chn - 1);
 
 	for (i = 0; i < nvdev->num_chn; i++)
 		rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
@@ -136,6 +136,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
 {
 	struct netdev_bpf xdp;
 	bpf_op_t ndo_bpf;
+	int ret;
 
 	ASSERT_RTNL();
 
@@ -148,10 +149,18 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
 
 	memset(&xdp, 0, sizeof(xdp));
 
+	if (prog)
+		bpf_prog_inc(prog);
+
 	xdp.command = XDP_SETUP_PROG;
 	xdp.prog = prog;
 
-	return ndo_bpf(vf_netdev, &xdp);
+	ret = ndo_bpf(vf_netdev, &xdp);
+
+	if (ret && prog)
+		bpf_prog_put(prog);
+
+	return ret;
 }
 
 static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8fc71bd..65e12cb 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1059,9 +1059,12 @@ static int netvsc_attach(struct net_device *ndev,
 
 	prog = dev_info->bprog;
 	if (prog) {
+		bpf_prog_inc(prog);
 		ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
-		if (ret)
+		if (ret) {
+			bpf_prog_put(prog);
 			goto err1;
+		}
 	}
 
 	/* In any case device is now ready */
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 2b74425..0b362b8 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -218,6 +218,7 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
 {
 	struct nsim_bpf_bound_prog *state;
 	char name[16];
+	int ret;
 
 	state = kzalloc(sizeof(*state), GFP_KERNEL);
 	if (!state)
@@ -230,9 +231,10 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
 	/* Program id is not populated yet when we create the state. */
 	sprintf(name, "%u", nsim_dev->prog_id_gen++);
 	state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
-	if (IS_ERR_OR_NULL(state->ddir)) {
+	if (IS_ERR(state->ddir)) {
+		ret = PTR_ERR(state->ddir);
 		kfree(state);
-		return -ENOMEM;
+		return ret;
 	}
 
 	debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
@@ -587,8 +589,8 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
 
 	nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
 							    nsim_dev->ddir);
-	if (IS_ERR_OR_NULL(nsim_dev->ddir_bpf_bound_progs))
-		return -ENOMEM;
+	if (IS_ERR(nsim_dev->ddir_bpf_bound_progs))
+		return PTR_ERR(nsim_dev->ddir_bpf_bound_progs);
 
 	nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
 	err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 6aeed0c..7971dc4 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -17,6 +17,7 @@
 static DEFINE_IDA(nsim_bus_dev_ids);
 static LIST_HEAD(nsim_bus_dev_list);
 static DEFINE_MUTEX(nsim_bus_dev_list_lock);
+static bool nsim_bus_enable;
 
 static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
 {
@@ -28,7 +29,7 @@ static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
 {
 	nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
 					  sizeof(struct nsim_vf_config),
-					  GFP_KERNEL);
+					  GFP_KERNEL | __GFP_NOWARN);
 	if (!nsim_bus_dev->vfconfigs)
 		return -ENOMEM;
 	nsim_bus_dev->num_vfs = num_vfs;
@@ -96,13 +97,25 @@ new_port_store(struct device *dev, struct device_attribute *attr,
 	       const char *buf, size_t count)
 {
 	struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+	struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
+	struct devlink *devlink;
 	unsigned int port_index;
 	int ret;
 
+	/* Prevent to use nsim_bus_dev before initialization. */
+	if (!smp_load_acquire(&nsim_bus_dev->init))
+		return -EBUSY;
 	ret = kstrtouint(buf, 0, &port_index);
 	if (ret)
 		return ret;
+
+	devlink = priv_to_devlink(nsim_dev);
+
+	mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+	devlink_reload_disable(devlink);
 	ret = nsim_dev_port_add(nsim_bus_dev, port_index);
+	devlink_reload_enable(devlink);
+	mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
 	return ret ? ret : count;
 }
 
@@ -113,13 +126,25 @@ del_port_store(struct device *dev, struct device_attribute *attr,
 	       const char *buf, size_t count)
 {
 	struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+	struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
+	struct devlink *devlink;
 	unsigned int port_index;
 	int ret;
 
+	/* Prevent to use nsim_bus_dev before initialization. */
+	if (!smp_load_acquire(&nsim_bus_dev->init))
+		return -EBUSY;
 	ret = kstrtouint(buf, 0, &port_index);
 	if (ret)
 		return ret;
+
+	devlink = priv_to_devlink(nsim_dev);
+
+	mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+	devlink_reload_disable(devlink);
 	ret = nsim_dev_port_del(nsim_bus_dev, port_index);
+	devlink_reload_enable(devlink);
+	mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
 	return ret ? ret : count;
 }
 
@@ -179,15 +204,30 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
 		pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
 		return -EINVAL;
 	}
-	nsim_bus_dev = nsim_bus_dev_new(id, port_count);
-	if (IS_ERR(nsim_bus_dev))
-		return PTR_ERR(nsim_bus_dev);
 
 	mutex_lock(&nsim_bus_dev_list_lock);
+	/* Prevent to use resource before initialization. */
+	if (!smp_load_acquire(&nsim_bus_enable)) {
+		err = -EBUSY;
+		goto err;
+	}
+
+	nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+	if (IS_ERR(nsim_bus_dev)) {
+		err = PTR_ERR(nsim_bus_dev);
+		goto err;
+	}
+
+	/* Allow using nsim_bus_dev */
+	smp_store_release(&nsim_bus_dev->init, true);
+
 	list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
 	mutex_unlock(&nsim_bus_dev_list_lock);
 
 	return count;
+err:
+	mutex_unlock(&nsim_bus_dev_list_lock);
+	return err;
 }
 static BUS_ATTR_WO(new_device);
 
@@ -215,6 +255,11 @@ del_device_store(struct bus_type *bus, const char *buf, size_t count)
 
 	err = -ENOENT;
 	mutex_lock(&nsim_bus_dev_list_lock);
+	/* Prevent to use resource before initialization. */
+	if (!smp_load_acquire(&nsim_bus_enable)) {
+		mutex_unlock(&nsim_bus_dev_list_lock);
+		return -EBUSY;
+	}
 	list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
 		if (nsim_bus_dev->dev.id != id)
 			continue;
@@ -284,6 +329,9 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
 	nsim_bus_dev->dev.type = &nsim_bus_dev_type;
 	nsim_bus_dev->port_count = port_count;
 	nsim_bus_dev->initial_net = current->nsproxy->net_ns;
+	mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
+	/* Disallow using nsim_bus_dev */
+	smp_store_release(&nsim_bus_dev->init, false);
 
 	err = device_register(&nsim_bus_dev->dev);
 	if (err)
@@ -299,6 +347,8 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
 
 static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
 {
+	/* Disallow using nsim_bus_dev */
+	smp_store_release(&nsim_bus_dev->init, false);
 	device_unregister(&nsim_bus_dev->dev);
 	ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
 	kfree(nsim_bus_dev);
@@ -320,6 +370,8 @@ int nsim_bus_init(void)
 	err = driver_register(&nsim_driver);
 	if (err)
 		goto err_bus_unregister;
+	/* Allow using resources */
+	smp_store_release(&nsim_bus_enable, true);
 	return 0;
 
 err_bus_unregister:
@@ -331,12 +383,16 @@ void nsim_bus_exit(void)
 {
 	struct nsim_bus_dev *nsim_bus_dev, *tmp;
 
+	/* Disallow using resources */
+	smp_store_release(&nsim_bus_enable, false);
+
 	mutex_lock(&nsim_bus_dev_list_lock);
 	list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
 		list_del(&nsim_bus_dev->list);
 		nsim_bus_dev_del(nsim_bus_dev);
 	}
 	mutex_unlock(&nsim_bus_dev_list_lock);
+
 	driver_unregister(&nsim_driver);
 	bus_unregister(&nsim_bus);
 }
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b53fbc0..d7706a0 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -73,23 +73,26 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
 
 static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
 {
-	char dev_ddir_name[16];
+	char dev_ddir_name[sizeof(DRV_NAME) + 10];
 
 	sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
 	nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
-	if (IS_ERR_OR_NULL(nsim_dev->ddir))
-		return PTR_ERR_OR_ZERO(nsim_dev->ddir) ?: -EINVAL;
+	if (IS_ERR(nsim_dev->ddir))
+		return PTR_ERR(nsim_dev->ddir);
 	nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
-	if (IS_ERR_OR_NULL(nsim_dev->ports_ddir))
-		return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
+	if (IS_ERR(nsim_dev->ports_ddir))
+		return PTR_ERR(nsim_dev->ports_ddir);
 	debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
 			    &nsim_dev->fw_update_status);
 	debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
 			   &nsim_dev->max_macs);
 	debugfs_create_bool("test1", 0600, nsim_dev->ddir,
 			    &nsim_dev->test1);
-	debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
-			    &nsim_dev_take_snapshot_fops);
+	nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
+						      0200,
+						      nsim_dev->ddir,
+						      nsim_dev,
+						&nsim_dev_take_snapshot_fops);
 	debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
 			    &nsim_dev->dont_allow_reload);
 	debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
@@ -112,8 +115,8 @@ static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
 	sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
 	nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
 						 nsim_dev->ports_ddir);
-	if (IS_ERR_OR_NULL(nsim_dev_port->ddir))
-		return -ENOMEM;
+	if (IS_ERR(nsim_dev_port->ddir))
+		return PTR_ERR(nsim_dev_port->ddir);
 
 	sprintf(dev_link_name, "../../../" DRV_NAME "%u",
 		nsim_dev->nsim_bus_dev->dev.id);
@@ -740,6 +743,11 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
 	if (err)
 		goto err_health_exit;
 
+	nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
+						      0200,
+						      nsim_dev->ddir,
+						      nsim_dev,
+						&nsim_dev_take_snapshot_fops);
 	return 0;
 
 err_health_exit:
@@ -853,6 +861,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
 
 	if (devlink_is_reload_failed(devlink))
 		return;
+	debugfs_remove(nsim_dev->take_snapshot);
 	nsim_dev_port_del_all(nsim_dev);
 	nsim_dev_health_exit(nsim_dev);
 	nsim_dev_traps_exit(devlink);
@@ -925,9 +934,7 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
 int nsim_dev_init(void)
 {
 	nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
-	if (IS_ERR_OR_NULL(nsim_dev_ddir))
-		return -ENOMEM;
-	return 0;
+	return PTR_ERR_OR_ZERO(nsim_dev_ddir);
 }
 
 void nsim_dev_exit(void)
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 9aa637d..ba8d9ad 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -82,7 +82,7 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
 	if (err)
 		return err;
 
-	binary = kmalloc(binary_len, GFP_KERNEL);
+	binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN);
 	if (!binary)
 		return -ENOMEM;
 	get_random_bytes(binary, binary_len);
@@ -285,8 +285,8 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
 	}
 
 	health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
-	if (IS_ERR_OR_NULL(health->ddir)) {
-		err = PTR_ERR_OR_ZERO(health->ddir) ?: -EINVAL;
+	if (IS_ERR(health->ddir)) {
+		err = PTR_ERR(health->ddir);
 		goto err_dummy_reporter_destroy;
 	}
 
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 94df795..2eb7b0d 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -160,6 +160,7 @@ struct nsim_dev {
 	struct nsim_trap_data *trap_data;
 	struct dentry *ddir;
 	struct dentry *ports_ddir;
+	struct dentry *take_snapshot;
 	struct bpf_offload_dev *bpf_dev;
 	bool bpf_bind_accept;
 	u32 bpf_bind_verifier_delay;
@@ -240,6 +241,9 @@ struct nsim_bus_dev {
 				  */
 	unsigned int num_vfs;
 	struct nsim_vf_config *vfconfigs;
+	/* Lock for devlink->reload_enabled in netdevsim module */
+	struct mutex nsim_bus_reload_lock;
+	bool init;
 };
 
 int nsim_bus_init(void);
diff --git a/drivers/net/netdevsim/sdev.c b/drivers/net/netdevsim/sdev.c
deleted file mode 100644
index 6712da3..0000000
--- a/drivers/net/netdevsim/sdev.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
-
-#include <linux/debugfs.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include "netdevsim.h"
-
-static struct dentry *nsim_sdev_ddir;
-
-static u32 nsim_sdev_id;
-
-struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns)
-{
-	struct netdevsim_shared_dev *sdev;
-	char sdev_ddir_name[10];
-	int err;
-
-	if (joinns) {
-		if (WARN_ON(!joinns->sdev))
-			return ERR_PTR(-EINVAL);
-		sdev = joinns->sdev;
-		sdev->refcnt++;
-		return sdev;
-	}
-
-	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
-	if (!sdev)
-		return ERR_PTR(-ENOMEM);
-	sdev->refcnt = 1;
-	sdev->switch_id = nsim_sdev_id++;
-
-	sprintf(sdev_ddir_name, "%u", sdev->switch_id);
-	sdev->ddir = debugfs_create_dir(sdev_ddir_name, nsim_sdev_ddir);
-	if (IS_ERR_OR_NULL(sdev->ddir)) {
-		err = PTR_ERR_OR_ZERO(sdev->ddir) ?: -EINVAL;
-		goto err_sdev_free;
-	}
-
-	return sdev;
-
-err_sdev_free:
-	nsim_sdev_id--;
-	kfree(sdev);
-	return ERR_PTR(err);
-}
-
-void nsim_sdev_put(struct netdevsim_shared_dev *sdev)
-{
-	if (--sdev->refcnt)
-		return;
-	debugfs_remove_recursive(sdev->ddir);
-	kfree(sdev);
-}
-
-int nsim_sdev_init(void)
-{
-	nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
-	if (IS_ERR_OR_NULL(nsim_sdev_ddir))
-		return -ENOMEM;
-	return 0;
-}
-
-void nsim_sdev_exit(void)
-{
-	debugfs_remove_recursive(nsim_sdev_ddir);
-}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index aee6261..481cf48 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -489,6 +489,14 @@ static int at803x_probe(struct phy_device *phydev)
 	return at803x_parse_dt(phydev);
 }
 
+static void at803x_remove(struct phy_device *phydev)
+{
+	struct at803x_priv *priv = phydev->priv;
+
+	if (priv->vddio)
+		regulator_disable(priv->vddio);
+}
+
 static int at803x_clk_out_config(struct phy_device *phydev)
 {
 	struct at803x_priv *priv = phydev->priv;
@@ -711,6 +719,7 @@ static struct phy_driver at803x_driver[] = {
 	.name			= "Qualcomm Atheros AR8035",
 	.phy_id_mask		= AT803X_PHY_ID_MASK,
 	.probe			= at803x_probe,
+	.remove			= at803x_remove,
 	.config_init		= at803x_config_init,
 	.set_wol		= at803x_set_wol,
 	.get_wol		= at803x_get_wol,
@@ -726,6 +735,7 @@ static struct phy_driver at803x_driver[] = {
 	.name			= "Qualcomm Atheros AR8030",
 	.phy_id_mask		= AT803X_PHY_ID_MASK,
 	.probe			= at803x_probe,
+	.remove			= at803x_remove,
 	.config_init		= at803x_config_init,
 	.link_change_notify	= at803x_link_change_notify,
 	.set_wol		= at803x_set_wol,
@@ -741,6 +751,7 @@ static struct phy_driver at803x_driver[] = {
 	.name			= "Qualcomm Atheros AR8031/AR8033",
 	.phy_id_mask		= AT803X_PHY_ID_MASK,
 	.probe			= at803x_probe,
+	.remove			= at803x_remove,
 	.config_init		= at803x_config_init,
 	.set_wol		= at803x_set_wol,
 	.get_wol		= at803x_get_wol,
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
index 7a9ad54..bf86c9c 100644
--- a/drivers/net/phy/mdio-mux-meson-g12a.c
+++ b/drivers/net/phy/mdio-mux-meson-g12a.c
@@ -123,7 +123,7 @@ static int g12a_ephy_pll_is_enabled(struct clk_hw *hw)
 	return (val & PLL_CTL0_LOCK_DIG) ? 1 : 0;
 }
 
-static void g12a_ephy_pll_init(struct clk_hw *hw)
+static int g12a_ephy_pll_init(struct clk_hw *hw)
 {
 	struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
 
@@ -136,6 +136,8 @@ static void g12a_ephy_pll_init(struct clk_hw *hw)
 	writel(0x20200000, pll->base + ETH_PLL_CTL5);
 	writel(0x0000c002, pll->base + ETH_PLL_CTL6);
 	writel(0x00000023, pll->base + ETH_PLL_CTL7);
+
+	return 0;
 }
 
 static const struct clk_ops g12a_ephy_pll_ops = {
diff --git a/drivers/net/phy/mii_timestamper.c b/drivers/net/phy/mii_timestamper.c
index 2f12c5d..b71b745 100644
--- a/drivers/net/phy/mii_timestamper.c
+++ b/drivers/net/phy/mii_timestamper.c
@@ -111,6 +111,13 @@ void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
 	struct mii_timestamping_desc *desc;
 	struct list_head *this;
 
+	/* mii_timestamper statically registered by the PHY driver won't use the
+	 * register_mii_timestamper() and thus don't have ->device set. Don't
+	 * try to unregister these.
+	 */
+	if (!mii_ts->device)
+		return;
+
 	mutex_lock(&tstamping_devices_lock);
 	list_for_each(this, &mii_timestamping_devices) {
 		desc = list_entry(this, struct mii_timestamping_desc, list);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e8cd8c0..78ddbaf 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -698,6 +698,9 @@ enum rtl8152_flags {
 #define VENDOR_ID_NVIDIA		0x0955
 #define VENDOR_ID_TPLINK		0x2357
 
+#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2	0x3082
+#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2		0xa387
+
 #define MCU_TYPE_PLA			0x0100
 #define MCU_TYPE_USB			0x0000
 
@@ -6759,9 +6762,13 @@ static int rtl8152_probe(struct usb_interface *intf,
 		netdev->hw_features &= ~NETIF_F_RXCSUM;
 	}
 
-	if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO &&
-	    le16_to_cpu(udev->descriptor.idProduct) == 0x3082)
-		set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+	if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
+		switch (le16_to_cpu(udev->descriptor.idProduct)) {
+		case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
+		case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+			set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+		}
+	}
 
 	if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
 	    (!strcmp(udev->serial, "000001000000") ||
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 3998cac..9edd946 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -84,8 +84,8 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
 	int ret, i;
 	void *bd_buffer;
 	dma_addr_t bd_dma_addr;
-	u32 riptr;
-	u32 tiptr;
+	s32 riptr;
+	s32 tiptr;
 	u32 gumr;
 
 	ut_info = priv->ut_info;
@@ -195,7 +195,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
 	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
 				ALIGNMENT_OF_UCC_HDLC_PRAM);
 
-	if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
+	if (priv->ucc_pram_offset < 0) {
 		dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
 		ret = -ENOMEM;
 		goto free_tx_bd;
@@ -233,18 +233,23 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
 
 	/* Alloc riptr, tiptr */
 	riptr = qe_muram_alloc(32, 32);
-	if (IS_ERR_VALUE(riptr)) {
+	if (riptr < 0) {
 		dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
 		ret = -ENOMEM;
 		goto free_tx_skbuff;
 	}
 
 	tiptr = qe_muram_alloc(32, 32);
-	if (IS_ERR_VALUE(tiptr)) {
+	if (tiptr < 0) {
 		dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
 		ret = -ENOMEM;
 		goto free_riptr;
 	}
+	if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
+		dev_err(priv->dev, "MURAM allocation out of addressable range\n");
+		ret = -ENOMEM;
+		goto free_tiptr;
+	}
 
 	/* Set RIPTR, TIPTR */
 	iowrite16be(riptr, &priv->ucc_pram->riptr);
@@ -623,8 +628,8 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
 
 	if (howmany < budget) {
 		napi_complete_done(napi, howmany);
-		qe_setbits32(priv->uccf->p_uccm,
-			     (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
+		qe_setbits_be32(priv->uccf->p_uccm,
+				(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
 	}
 
 	return howmany;
@@ -730,8 +735,8 @@ static int uhdlc_open(struct net_device *dev)
 
 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
 {
-	qe_muram_free(priv->ucc_pram->riptr);
-	qe_muram_free(priv->ucc_pram->tiptr);
+	qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
+	qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
 
 	if (priv->rx_bd_base) {
 		dma_free_coherent(priv->dev,
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
index 8b3507a..71d5ad0 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.h
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -98,7 +98,7 @@ struct ucc_hdlc_private {
 
 	unsigned short tx_ring_size;
 	unsigned short rx_ring_size;
-	u32 ucc_pram_offset;
+	s32 ucc_pram_offset;
 
 	unsigned short encoding;
 	unsigned short parity;
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 121d9ea..3725e9c 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -263,6 +263,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
 	} else {
 		node = kzalloc(sizeof(*node), GFP_KERNEL);
 		if (unlikely(!node)) {
+			list_del(&newnode->peer_list);
 			kfree(newnode);
 			return -ENOMEM;
 		}
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index 0fdbd1c..bda2640 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -569,10 +569,8 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
 							 private_key);
 		list_for_each_entry_safe(peer, temp, &wg->peer_list,
 					 peer_list) {
-			if (wg_noise_precompute_static_static(peer))
-				wg_noise_expire_current_peer_keypairs(peer);
-			else
-				wg_peer_remove(peer);
+			BUG_ON(!wg_noise_precompute_static_static(peer));
+			wg_noise_expire_current_peer_keypairs(peer);
 		}
 		wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
 		up_write(&wg->static_identity.lock);
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index d71c8db..919d9d8 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -46,17 +46,21 @@ void __init wg_noise_init(void)
 /* Must hold peer->handshake.static_identity->lock */
 bool wg_noise_precompute_static_static(struct wg_peer *peer)
 {
-	bool ret = true;
+	bool ret;
 
 	down_write(&peer->handshake.lock);
-	if (peer->handshake.static_identity->has_identity)
+	if (peer->handshake.static_identity->has_identity) {
 		ret = curve25519(
 			peer->handshake.precomputed_static_static,
 			peer->handshake.static_identity->static_private,
 			peer->handshake.remote_static);
-	else
+	} else {
+		u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 };
+
+		ret = curve25519(empty, empty, peer->handshake.remote_static);
 		memset(peer->handshake.precomputed_static_static, 0,
 		       NOISE_PUBLIC_KEY_LEN);
+	}
 	up_write(&peer->handshake.lock);
 	return ret;
 }
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index c4c8f1b..8363f91 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -4420,73 +4420,65 @@ static int proc_BSSList_open( struct inode *inode, struct file *file );
 static int proc_config_open( struct inode *inode, struct file *file );
 static int proc_wepkey_open( struct inode *inode, struct file *file );
 
-static const struct file_operations proc_statsdelta_ops = {
-	.owner		= THIS_MODULE,
-	.read		= proc_read,
-	.open		= proc_statsdelta_open,
-	.release	= proc_close,
-	.llseek		= default_llseek,
+static const struct proc_ops proc_statsdelta_ops = {
+	.proc_read	= proc_read,
+	.proc_open	= proc_statsdelta_open,
+	.proc_release	= proc_close,
+	.proc_lseek	= default_llseek,
 };
 
-static const struct file_operations proc_stats_ops = {
-	.owner		= THIS_MODULE,
-	.read		= proc_read,
-	.open		= proc_stats_open,
-	.release	= proc_close,
-	.llseek		= default_llseek,
+static const struct proc_ops proc_stats_ops = {
+	.proc_read	= proc_read,
+	.proc_open	= proc_stats_open,
+	.proc_release	= proc_close,
+	.proc_lseek	= default_llseek,
 };
 
-static const struct file_operations proc_status_ops = {
-	.owner		= THIS_MODULE,
-	.read		= proc_read,
-	.open		= proc_status_open,
-	.release	= proc_close,
-	.llseek		= default_llseek,
+static const struct proc_ops proc_status_ops = {
+	.proc_read	= proc_read,
+	.proc_open	= proc_status_open,
+	.proc_release	= proc_close,
+	.proc_lseek	= default_llseek,
 };
 
-static const struct file_operations proc_SSID_ops = {
-	.owner		= THIS_MODULE,
-	.read		= proc_read,
-	.write		= proc_write,
-	.open		= proc_SSID_open,
-	.release	= proc_close,
-	.llseek		= default_llseek,
+static const struct proc_ops proc_SSID_ops = {
+	.proc_read	= proc_read,
+	.proc_write	= proc_write,
+	.proc_open	= proc_SSID_open,
+	.proc_release	= proc_close,
+	.proc_lseek	= default_llseek,
 };
 
-static const struct file_operations proc_BSSList_ops = {
-	.owner		= THIS_MODULE,
-	.read		= proc_read,
-	.write		= proc_write,
-	.open		= proc_BSSList_open,
-	.release	= proc_close,
-	.llseek		= default_llseek,
+static const struct proc_ops proc_BSSList_ops = {
+	.proc_read	= proc_read,
+	.proc_write	= proc_write,
+	.proc_open	= proc_BSSList_open,
+	.proc_release	= proc_close,
+	.proc_lseek	= default_llseek,
 };
 
-static const struct file_operations proc_APList_ops = {
-	.owner		= THIS_MODULE,
-	.read		= proc_read,
-	.write		= proc_write,
-	.open		= proc_APList_open,
-	.release	= proc_close,
-	.llseek		= default_llseek,
+static const struct proc_ops proc_APList_ops = {
+	.proc_read	= proc_read,
+	.proc_write	= proc_write,
+	.proc_open	= proc_APList_open,
+	.proc_release	= proc_close,
+	.proc_lseek	= default_llseek,
 };
 
-static const struct file_operations proc_config_ops = {
-	.owner		= THIS_MODULE,
-	.read		= proc_read,
-	.write		= proc_write,
-	.open		= proc_config_open,
-	.release	= proc_close,
-	.llseek		= default_llseek,
+static const struct proc_ops proc_config_ops = {
+	.proc_read	= proc_read,
+	.proc_write	= proc_write,
+	.proc_open	= proc_config_open,
+	.proc_release	= proc_close,
+	.proc_lseek	= default_llseek,
 };
 
-static const struct file_operations proc_wepkey_ops = {
-	.owner		= THIS_MODULE,
-	.read		= proc_read,
-	.write		= proc_write,
-	.open		= proc_wepkey_open,
-	.release	= proc_close,
-	.llseek		= default_llseek,
+static const struct proc_ops proc_wepkey_ops = {
+	.proc_read	= proc_read,
+	.proc_write	= proc_write,
+	.proc_open	= proc_wepkey_open,
+	.proc_release	= proc_close,
+	.proc_lseek	= default_llseek,
 };
 
 static struct proc_dir_entry *airo_entry;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_module.c b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
index 436b819..43bab92 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
@@ -240,13 +240,12 @@ static ssize_t debug_level_proc_write(struct file *file,
 	return strnlen(buf, len);
 }
 
-static const struct file_operations debug_level_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= debug_level_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= debug_level_proc_write,
+static const struct proc_ops debug_level_proc_ops = {
+	.proc_open	= debug_level_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= debug_level_proc_write,
 };
 #endif				/* CONFIG_LIBIPW_DEBUG */
 
@@ -263,7 +262,7 @@ static int __init libipw_init(void)
 		return -EIO;
 	}
 	e = proc_create("debug_level", 0644, libipw_proc,
-			&debug_level_proc_fops);
+			&debug_level_proc_ops);
 	if (!e) {
 		remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
 		libipw_proc = NULL;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index d1e1758..da6d420 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -27,6 +27,7 @@
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
 #include <linux/if_arp.h>
+#include <linux/units.h>
 
 #include <net/mac80211.h>
 
@@ -6468,7 +6469,7 @@ il4965_set_hw_params(struct il_priv *il)
 	il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
 
 	il->hw_params.ct_kill_threshold =
-	   CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+	   celsius_to_kelvin(CT_KILL_THRESHOLD_LEGACY);
 
 	il->hw_params.sens = &il4965_sensitivity;
 	il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index 32699b6..34d0579 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -17,6 +17,7 @@
 #include <linux/sched.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
+#include <linux/units.h>
 #include <net/mac80211.h>
 #include <linux/etherdevice.h>
 #include <asm/unaligned.h>
@@ -1104,7 +1105,7 @@ il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
 	/* get current temperature (Celsius) */
 	current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
 	current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
-	current_temp = KELVIN_TO_CELSIUS(current_temp);
+	current_temp = kelvin_to_celsius(current_temp);
 
 	/* select thermal txpower adjustment params, based on channel group
 	 *   (same frequency group used for mimo txatten adjustment) */
@@ -1610,8 +1611,8 @@ il4965_hw_get_temperature(struct il_priv *il)
 	temperature =
 	    (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
 
-	D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
-	       KELVIN_TO_CELSIUS(temperature));
+	D_TEMP("Calibrated temperature: %dK, %ldC\n", temperature,
+	       kelvin_to_celsius(temperature));
 
 	return temperature;
 }
@@ -1670,12 +1671,12 @@ il4965_temperature_calib(struct il_priv *il)
 
 	if (il->temperature != temp) {
 		if (il->temperature)
-			D_TEMP("Temperature changed " "from %dC to %dC\n",
-			       KELVIN_TO_CELSIUS(il->temperature),
-			       KELVIN_TO_CELSIUS(temp));
+			D_TEMP("Temperature changed " "from %ldC to %ldC\n",
+			       kelvin_to_celsius(il->temperature),
+			       kelvin_to_celsius(temp));
 		else
-			D_TEMP("Temperature " "initialized to %dC\n",
-			       KELVIN_TO_CELSIUS(temp));
+			D_TEMP("Temperature " "initialized to %ldC\n",
+			       kelvin_to_celsius(temp));
 	}
 
 	il->temperature = temp;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index e7fb8e6..bc9cd7e 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -779,9 +779,6 @@ struct il_sensitivity_ranges {
 	u16 nrg_th_cca;
 };
 
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
 /**
  * struct il_hw_params
  * @bcast_id: f/w broadcast station ID
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index be5ef4c..8d83800 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -237,11 +237,6 @@ struct iwl_sensitivity_ranges {
 	u16 nrg_th_cca;
 };
 
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
 /******************************************************************************
  *
  * Functions implemented in core module which are forward declared here
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index dc3f197..d42bc46 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -10,6 +10,8 @@
  *
  *****************************************************************************/
 
+#include <linux/units.h>
+
 /*
  * DVM device-specific data & functions
  */
@@ -345,7 +347,7 @@ static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
 static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
 {
 	const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
-	s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
+	s32 threshold = (s32)celsius_to_kelvin(CT_KILL_THRESHOLD_LEGACY) -
 			iwl_temp_calib_to_offset(priv);
 
 	priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
@@ -381,7 +383,7 @@ static void iwl5150_temperature(struct iwl_priv *priv)
 	vt = le32_to_cpu(priv->statistics.common.temperature);
 	vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
 	/* now vt hold the temperature in Kelvin */
-	priv->temperature = KELVIN_TO_CELSIUS(vt);
+	priv->temperature = kelvin_to_celsius(vt);
 	iwl_tt_handler(priv);
 }
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 8878409..22a32eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1897,27 +1897,55 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
 		ieee80211_resume_disconnect(vif);
 }
 
-static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
-				   struct ieee80211_vif *vif)
+static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id)
 {
-	u32 base = mvm->trans->dbg.lmac_error_event_table[0];
 	struct error_table_start {
 		/* cf. struct iwl_error_event_table */
 		u32 valid;
-		u32 error_id;
+		__le32 err_id;
 	} err_info;
 
-	iwl_trans_read_mem_bytes(mvm->trans, base,
-				 &err_info, sizeof(err_info));
+	if (!base)
+		return false;
 
-	if (err_info.valid &&
-	    err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
-		struct cfg80211_wowlan_wakeup wakeup = {
-			.rfkill_release = true,
-		};
-		ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+	iwl_trans_read_mem_bytes(trans, base,
+				 &err_info, sizeof(err_info));
+	if (err_info.valid && err_id)
+		*err_id = le32_to_cpu(err_info.err_id);
+
+	return !!err_info.valid;
+}
+
+static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+				   struct ieee80211_vif *vif)
+{
+	u32 err_id;
+
+	/* check for lmac1 error */
+	if (iwl_mvm_rt_status(mvm->trans,
+			      mvm->trans->dbg.lmac_error_event_table[0],
+			      &err_id)) {
+		if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+			struct cfg80211_wowlan_wakeup wakeup = {
+				.rfkill_release = true,
+			};
+			ieee80211_report_wowlan_wakeup(vif, &wakeup,
+						       GFP_KERNEL);
+		}
+		return true;
 	}
-	return err_info.valid;
+
+	/* check if we have lmac2 set and check for error */
+	if (iwl_mvm_rt_status(mvm->trans,
+			      mvm->trans->dbg.lmac_error_event_table[1], NULL))
+		return true;
+
+	/* check for umac error */
+	if (iwl_mvm_rt_status(mvm->trans,
+			      mvm->trans->dbg.umac_error_event_table, NULL))
+		return true;
+
+	return false;
 }
 
 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index f783d6d..6e1ea92 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -8,6 +8,7 @@
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  * Copyright (C) 2018 Intel Corporation
  * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  * Copyright (C) 2018 Intel Corporation
  * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -528,6 +530,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
 	if (req != mvm->ftm_initiator.req)
 		return;
 
+	iwl_mvm_ftm_reset(mvm);
+
 	if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
 						 LOCATION_GROUP, 0),
 				 0, sizeof(cmd), &cmd))
@@ -641,7 +645,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 	lockdep_assert_held(&mvm->mutex);
 
 	if (!mvm->ftm_initiator.req) {
-		IWL_ERR(mvm, "Got FTM response but have no request?\n");
 		return;
 	}
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6717f25..02df603 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -5,10 +5,9 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -2037,7 +2035,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
 	rcu_read_lock();
 
 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
-	if (IS_ERR(sta)) {
+	if (IS_ERR_OR_NULL(sta)) {
 		rcu_read_unlock();
 		WARN(1, "Can't find STA to configure HE\n");
 		return;
@@ -3293,7 +3291,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
 	if (fw_has_capa(&mvm->fw->ucode_capa,
 			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
 		iwl_mvm_schedule_session_protection(mvm, vif, 900,
-						    min_duration);
+						    min_duration, false);
 	else
 		iwl_mvm_protect_session(mvm, vif, duration,
 					min_duration, 500, false);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 7b35f4164..64ef3f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -3320,6 +3320,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
 	igtk_cmd.sta_id = cpu_to_le32(sta_id);
 
 	if (remove_key) {
+		/* This is a valid situation for IGTK */
+		if (sta_id == IWL_MVM_INVALID_STA)
+			return 0;
+
 		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
 	} else {
 		struct ieee80211_key_seq seq;
@@ -3574,9 +3578,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
 		      keyconf->keyidx, sta_id);
 
-	if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
+	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
 		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
 
 	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 1851719..d781777 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -205,9 +205,15 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
 
-	mutex_lock(&mvm->mutex);
 	/* Protect the session to hear the TDLS setup response on the channel */
-	iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
+	mutex_lock(&mvm->mutex);
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+		iwl_mvm_schedule_session_protection(mvm, vif, duration,
+						    duration, true);
+	else
+		iwl_mvm_protect_session(mvm, vif, duration,
+					duration, 100, true);
 	mutex_unlock(&mvm->mutex);
 }
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 51b1386..c0b420f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1056,13 +1056,42 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
 }
 
+static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
+				       struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_mvm *mvm =
+		container_of(notif_wait, struct iwl_mvm, notif_wait);
+	struct iwl_mvm_session_prot_notif *resp;
+	int resp_len = iwl_rx_packet_payload_len(pkt);
+
+	if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
+		    pkt->hdr.group_id != MAC_CONF_GROUP))
+		return true;
+
+	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+		IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
+		return true;
+	}
+
+	resp = (void *)pkt->data;
+
+	if (!resp->status)
+		IWL_ERR(mvm,
+			"TIME_EVENT_NOTIFICATION received but not executed\n");
+
+	return true;
+}
+
 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
 					 struct ieee80211_vif *vif,
-					 u32 duration, u32 min_duration)
+					 u32 duration, u32 min_duration,
+					 bool wait_for_notif)
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-
+	const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
+					 MAC_CONF_GROUP, 0) };
+	struct iwl_notification_wait wait_notif;
 	struct iwl_mvm_session_prot_cmd cmd = {
 		.id_and_color =
 			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
@@ -1071,7 +1100,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
 		.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
 		.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
 	};
-	int ret;
 
 	lockdep_assert_held(&mvm->mutex);
 
@@ -1092,14 +1120,35 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
 	IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
 		     le32_to_cpu(cmd.duration_tu));
 
-	ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
-						   MAC_CONF_GROUP, 0),
-				   0, sizeof(cmd), &cmd);
-	if (ret) {
+	if (!wait_for_notif) {
+		if (iwl_mvm_send_cmd_pdu(mvm,
+					 iwl_cmd_id(SESSION_PROTECTION_CMD,
+						    MAC_CONF_GROUP, 0),
+					 0, sizeof(cmd), &cmd)) {
+			IWL_ERR(mvm,
+				"Couldn't send the SESSION_PROTECTION_CMD\n");
+			spin_lock_bh(&mvm->time_event_lock);
+			iwl_mvm_te_clear_data(mvm, te_data);
+			spin_unlock_bh(&mvm->time_event_lock);
+		}
+
+		return;
+	}
+
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
+				   notif, ARRAY_SIZE(notif),
+				   iwl_mvm_session_prot_notif, NULL);
+
+	if (iwl_mvm_send_cmd_pdu(mvm,
+				 iwl_cmd_id(SESSION_PROTECTION_CMD,
+					    MAC_CONF_GROUP, 0),
+				 0, sizeof(cmd), &cmd)) {
 		IWL_ERR(mvm,
-			"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
-		spin_lock_bh(&mvm->time_event_lock);
-		iwl_mvm_te_clear_data(mvm, te_data);
-		spin_unlock_bh(&mvm->time_event_lock);
+			"Couldn't send the SESSION_PROTECTION_CMD\n");
+		iwl_remove_notification(&mvm->notif_wait, &wait_notif);
+	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
+					 TU_TO_JIFFIES(100))) {
+		IWL_ERR(mvm,
+			"Failed to protect session until session protection\n");
 	}
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index df6832b..3186d7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -250,10 +250,12 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
  * @mvm: the mvm component
  * @vif: the virtual interface for which the protection issued
  * @duration: the duration of the protection
+ * @wait_for_notif: if true, will block until the start of the protection
  */
 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
 					 struct ieee80211_vif *vif,
-					 u32 duration, u32 min_duration);
+					 u32 duration, u32 min_duration,
+					 bool wait_for_notif);
 
 /**
  * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index b5a16f0..418e59b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -8,7 +8,7 @@
  * Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -234,7 +234,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
 		.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
 	};
 	struct iwl_ext_dts_measurement_cmd extcmd = {
-		.control_mode = cpu_to_le32(DTS_AUTOMATIC),
+		.control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE),
 	};
 	u32 cmdid;
 
@@ -734,7 +734,8 @@ static  struct thermal_zone_device_ops tzone_ops = {
 static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
 {
 	int i;
-	char name[] = "iwlwifi";
+	char name[16];
+	static atomic_t counter = ATOMIC_INIT(0);
 
 	if (!iwl_mvm_is_tt_in_fw(mvm)) {
 		mvm->tz_device.tzone = NULL;
@@ -744,6 +745,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
 
 	BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
 
+	sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
 	mvm->tz_device.tzone = thermal_zone_device_register(name,
 							IWL_MAX_DTS_TRIPS,
 							IWL_WRITABLE_TRIPS_MSK,
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index e323e9a..58212c5 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -126,7 +126,7 @@ static void prism2_check_sta_fw_version(local_info_t *local);
 
 #ifdef PRISM2_DOWNLOAD_SUPPORT
 /* hostap_download.c */
-static const struct file_operations prism2_download_aux_dump_proc_fops;
+static const struct proc_ops prism2_download_aux_dump_proc_ops;
 static u8 * prism2_read_pda(struct net_device *dev);
 static int prism2_download(local_info_t *local,
 			   struct prism2_download_param *param);
@@ -3094,7 +3094,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
 	local->func->reset_port = prism2_reset_port;
 	local->func->schedule_reset = prism2_schedule_reset;
 #ifdef PRISM2_DOWNLOAD_SUPPORT
-	local->func->read_aux_fops = &prism2_download_aux_dump_proc_fops;
+	local->func->read_aux_proc_ops = &prism2_download_aux_dump_proc_ops;
 	local->func->download = prism2_download;
 #endif /* PRISM2_DOWNLOAD_SUPPORT */
 	local->func->tx = prism2_tx_80211;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 6151d8d..a2ee469 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -211,9 +211,9 @@ static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
 	return count;
 }
 
-static const struct file_operations prism2_pda_proc_fops = {
-	.read		= prism2_pda_proc_read,
-	.llseek		= generic_file_llseek,
+static const struct proc_ops prism2_pda_proc_ops = {
+	.proc_read	= prism2_pda_proc_read,
+	.proc_lseek	= generic_file_llseek,
 };
 
 
@@ -223,8 +223,8 @@ static ssize_t prism2_aux_dump_proc_no_read(struct file *file, char __user *buf,
 	return 0;
 }
 
-static const struct file_operations prism2_aux_dump_proc_fops = {
-	.read		= prism2_aux_dump_proc_no_read,
+static const struct proc_ops prism2_aux_dump_proc_ops = {
+	.proc_read	= prism2_aux_dump_proc_no_read,
 };
 
 
@@ -379,9 +379,9 @@ void hostap_init_proc(local_info_t *local)
 	proc_create_seq_data("wds", 0, local->proc,
 			&prism2_wds_proc_seqops, local);
 	proc_create_data("pda", 0, local->proc,
-			 &prism2_pda_proc_fops, local);
+			 &prism2_pda_proc_ops, local);
 	proc_create_data("aux_dump", 0, local->proc,
-			 local->func->read_aux_fops ?: &prism2_aux_dump_proc_fops,
+			 local->func->read_aux_proc_ops ?: &prism2_aux_dump_proc_ops,
 			 local);
 	proc_create_seq_data("bss_list", 0, local->proc,
 			&prism2_bss_list_proc_seqops, local);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
index a8c4c1a..487883f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
@@ -599,7 +599,7 @@ struct prism2_helper_functions {
 			struct prism2_download_param *param);
 	int (*tx)(struct sk_buff *skb, struct net_device *dev);
 	int (*set_tim)(struct net_device *dev, int aid, int set);
-	const struct file_operations *read_aux_fops;
+	const struct proc_ops *read_aux_proc_ops;
 
 	int need_tx_headroom; /* number of bytes of headroom needed before
 			       * IEEE 802.11 header */
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index c9401c1..4e3de68 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
 		rates_max = rates_eid[1];
 		if (rates_max > MAX_RATES) {
 			lbs_deb_join("invalid rates");
+			rcu_read_unlock();
+			ret = -EINVAL;
 			goto out;
 		}
 		rates = cmd.bss.rates;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 98f942b..a7968a8 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -2884,6 +2884,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
 			vs_param_set->header.len =
 				cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
 				& 0x00FF) + 2);
+			if (le16_to_cpu(vs_param_set->header.len) >
+				MWIFIEX_MAX_VSIE_LEN) {
+				mwifiex_dbg(priv->adapter, ERROR,
+					    "Invalid param length!\n");
+				break;
+			}
+
 			memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
 			       le16_to_cpu(vs_param_set->header.len));
 			*buffer += le16_to_cpu(vs_param_set->header.len) +
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 6dd835f..fbfa0b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -232,6 +232,7 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
 
 	if (country_ie_len >
 	    (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
+		rcu_read_unlock();
 		mwifiex_dbg(priv->adapter, ERROR,
 			    "11D: country_ie_len overflow!, deauth AP\n");
 		return -EINVAL;
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 41f0231..132f9e8 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
 				    "WMM Parameter Set Count: %d\n",
 				    wmm_param_ie->qos_info_bitmap & mask);
 
+			if (wmm_param_ie->vend_hdr.len + 2 >
+				sizeof(struct ieee_types_wmm_parameter))
+				break;
+
 			memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
 			       wmm_ie, wmm_param_ie,
 			       wmm_param_ie->vend_hdr.len + 2);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index eccad49..17e277b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -92,8 +92,9 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
 
 static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
 {
-	u8 val, *eeprom = dev->mt76.eeprom.data;
+	u8 *eeprom = dev->mt76.eeprom.data;
 	u8 tx_mask, rx_mask, max_nss;
+	u32 val;
 
 	val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
 			eeprom[MT_EE_WIFI_CONF]);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index cf37268..c1d542b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2717,10 +2717,9 @@ static ssize_t ray_cs_essid_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations ray_cs_essid_proc_fops = {
-	.owner		= THIS_MODULE,
-	.write		= ray_cs_essid_proc_write,
-	.llseek		= noop_llseek,
+static const struct proc_ops ray_cs_essid_proc_ops = {
+	.proc_write	= ray_cs_essid_proc_write,
+	.proc_lseek	= noop_llseek,
 };
 
 static ssize_t int_proc_write(struct file *file, const char __user *buffer,
@@ -2751,10 +2750,9 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
 	return count;
 }
 
-static const struct file_operations int_proc_fops = {
-	.owner		= THIS_MODULE,
-	.write		= int_proc_write,
-	.llseek		= noop_llseek,
+static const struct proc_ops int_proc_ops = {
+	.proc_write	= int_proc_write,
+	.proc_lseek	= noop_llseek,
 };
 #endif
 
@@ -2790,10 +2788,10 @@ static int __init init_ray_cs(void)
 	proc_mkdir("driver/ray_cs", NULL);
 
 	proc_create_single("driver/ray_cs/ray_cs", 0, NULL, ray_cs_proc_show);
-	proc_create("driver/ray_cs/essid", 0200, NULL, &ray_cs_essid_proc_fops);
-	proc_create_data("driver/ray_cs/net_type", 0200, NULL, &int_proc_fops,
+	proc_create("driver/ray_cs/essid", 0200, NULL, &ray_cs_essid_proc_ops);
+	proc_create_data("driver/ray_cs/net_type", 0200, NULL, &int_proc_ops,
 			 &net_type);
-	proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_fops,
+	proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_ops,
 			 &translate);
 #endif
 	if (translate != 0)
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
index af5c27e1..4820dca 100644
--- a/drivers/net/wireless/realtek/rtw88/wow.c
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -281,27 +281,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
 	rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
 }
 
-static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
+static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
 {
-	bool ret;
-
 	/* wait 100ms for wow firmware to finish work */
 	msleep(100);
 
 	if (wow_enable) {
-		if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
-			ret = 0;
+		if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
+			goto wow_fail;
 	} else {
-		if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 &&
-		    rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0)
-			ret = 0;
+		if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) ||
+		    rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE))
+			goto wow_fail;
 	}
 
-	if (ret)
-		rtw_err(rtwdev, "failed to check wow status %s\n",
-			wow_enable ? "enabled" : "disabled");
+	return 0;
 
-	return ret;
+wow_fail:
+	rtw_err(rtwdev, "failed to check wow status %s\n",
+		wow_enable ? "enabled" : "disabled");
+	return -EBUSY;
 }
 
 static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ad8e4df..4eae441 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -337,13 +337,7 @@ static void pmem_release_disk(void *__pmem)
 	put_disk(pmem->disk);
 }
 
-static void pmem_pagemap_page_free(struct page *page)
-{
-	wake_up_var(&page->_refcount);
-}
-
 static const struct dev_pagemap_ops fsdax_pagemap_ops = {
-	.page_free		= pmem_pagemap_page_free,
 	.kill			= pmem_pagemap_kill,
 	.cleanup		= pmem_pagemap_cleanup,
 };
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index a5af21f..2e6477e 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -5,14 +5,11 @@
  */
 
 #include <linux/hwmon.h>
+#include <linux/units.h>
 #include <asm/unaligned.h>
 
 #include "nvme.h"
 
-/* These macros should be moved to linux/temperature.h */
-#define MILLICELSIUS_TO_KELVIN(t) DIV_ROUND_CLOSEST((t) + 273150, 1000)
-#define KELVIN_TO_MILLICELSIUS(t) ((t) * 1000L - 273150)
-
 struct nvme_hwmon_data {
 	struct nvme_ctrl *ctrl;
 	struct nvme_smart_log log;
@@ -35,7 +32,7 @@ static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
 		return -EIO;
 	if (ret < 0)
 		return ret;
-	*temp = KELVIN_TO_MILLICELSIUS(status & NVME_TEMP_THRESH_MASK);
+	*temp = kelvin_to_millicelsius(status & NVME_TEMP_THRESH_MASK);
 
 	return 0;
 }
@@ -46,7 +43,7 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
 	unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
 	int ret;
 
-	temp = MILLICELSIUS_TO_KELVIN(temp);
+	temp = millicelsius_to_kelvin(temp);
 	threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK);
 
 	if (under)
@@ -88,7 +85,7 @@ static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
 	case hwmon_temp_min:
 		return nvme_get_temp_thresh(data->ctrl, channel, true, val);
 	case hwmon_temp_crit:
-		*val = KELVIN_TO_MILLICELSIUS(data->ctrl->cctemp);
+		*val = kelvin_to_millicelsius(data->ctrl->cctemp);
 		return 0;
 	default:
 		break;
@@ -105,7 +102,7 @@ static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
 			temp = get_unaligned_le16(log->temperature);
 		else
 			temp = le16_to_cpu(log->temp_sensor[channel - 1]);
-		*val = KELVIN_TO_MILLICELSIUS(temp);
+		*val = kelvin_to_millicelsius(temp);
 		break;
 	case hwmon_temp_alarm:
 		*val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 365a2dd..da392b5 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -167,7 +167,6 @@ struct nvme_queue {
 	 /* only used for poll queues: */
 	spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
 	volatile struct nvme_completion *cqes;
-	struct blk_mq_tags **tags;
 	dma_addr_t sq_dma_addr;
 	dma_addr_t cq_dma_addr;
 	u32 __iomem *q_db;
@@ -376,29 +375,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 
 	WARN_ON(hctx_idx != 0);
 	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
-	WARN_ON(nvmeq->tags);
 
 	hctx->driver_data = nvmeq;
-	nvmeq->tags = &dev->admin_tagset.tags[0];
 	return 0;
 }
 
-static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
-{
-	struct nvme_queue *nvmeq = hctx->driver_data;
-
-	nvmeq->tags = NULL;
-}
-
 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 			  unsigned int hctx_idx)
 {
 	struct nvme_dev *dev = data;
 	struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
 
-	if (!nvmeq->tags)
-		nvmeq->tags = &dev->tagset.tags[hctx_idx];
-
 	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
 	hctx->driver_data = nvmeq;
 	return 0;
@@ -948,6 +935,13 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
 		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
 }
 
+static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
+{
+	if (!nvmeq->qid)
+		return nvmeq->dev->admin_tagset.tags[0];
+	return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
+}
+
 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
 {
 	volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
@@ -972,7 +966,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
 		return;
 	}
 
-	req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
+	req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
 	trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
 	nvme_end_request(req, cqe->status, cqe->result);
 }
@@ -1572,7 +1566,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
 	.queue_rq	= nvme_queue_rq,
 	.complete	= nvme_pci_complete_rq,
 	.init_hctx	= nvme_admin_init_hctx,
-	.exit_hctx      = nvme_admin_exit_hctx,
 	.init_request	= nvme_init_request,
 	.timeout	= nvme_timeout,
 };
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 28438b8..576de77 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
 }
 
-static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
 {
-	struct nvmet_req *req;
-
-	while (1) {
-		mutex_lock(&ctrl->lock);
-		if (!ctrl->nr_async_event_cmds) {
-			mutex_unlock(&ctrl->lock);
-			return;
-		}
-
-		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
-		mutex_unlock(&ctrl->lock);
-		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
-	}
-}
-
-static void nvmet_async_event_work(struct work_struct *work)
-{
-	struct nvmet_ctrl *ctrl =
-		container_of(work, struct nvmet_ctrl, async_event_work);
 	struct nvmet_async_event *aen;
 	struct nvmet_req *req;
 
@@ -159,20 +140,43 @@ static void nvmet_async_event_work(struct work_struct *work)
 				struct nvmet_async_event, entry);
 		if (!aen || !ctrl->nr_async_event_cmds) {
 			mutex_unlock(&ctrl->lock);
-			return;
+			break;
 		}
 
 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
-		nvmet_set_result(req, nvmet_async_event_result(aen));
+		if (status == 0)
+			nvmet_set_result(req, nvmet_async_event_result(aen));
 
 		list_del(&aen->entry);
 		kfree(aen);
 
 		mutex_unlock(&ctrl->lock);
-		nvmet_req_complete(req, 0);
+		nvmet_req_complete(req, status);
 	}
 }
 
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+	struct nvmet_req *req;
+
+	mutex_lock(&ctrl->lock);
+	while (ctrl->nr_async_event_cmds) {
+		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+		mutex_unlock(&ctrl->lock);
+		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+		mutex_lock(&ctrl->lock);
+	}
+	mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+	struct nvmet_ctrl *ctrl =
+		container_of(work, struct nvmet_ctrl, async_event_work);
+
+	nvmet_async_events_process(ctrl, 0);
+}
+
 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
 		u8 event_info, u8 log_page)
 {
@@ -555,7 +559,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 	} else {
 		struct nvmet_ns *old;
 
-		list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
+		list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
+					lockdep_is_held(&subsys->lock)) {
 			BUG_ON(ns->nsid == old->nsid);
 			if (ns->nsid < old->nsid)
 				break;
@@ -752,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
 
 void nvmet_sq_destroy(struct nvmet_sq *sq)
 {
+	u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
+	struct nvmet_ctrl *ctrl = sq->ctrl;
+
 	/*
 	 * If this is the admin queue, complete all AERs so that our
 	 * queue doesn't have outstanding requests on it.
 	 */
-	if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
-		nvmet_async_events_free(sq->ctrl);
+	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
+		nvmet_async_events_process(ctrl, status);
+		nvmet_async_events_free(ctrl);
+	}
 	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
 	wait_for_completion(&sq->confirm_done);
 	wait_for_completion(&sq->free_done);
 	percpu_ref_exit(&sq->ref);
 
-	if (sq->ctrl) {
-		nvmet_ctrl_put(sq->ctrl);
+	if (ctrl) {
+		nvmet_ctrl_put(ctrl);
 		sq->ctrl = NULL; /* allows reusing the queue later */
 	}
 }
@@ -938,6 +948,17 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
 }
 EXPORT_SYMBOL_GPL(nvmet_check_data_len);
 
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
+{
+	if (unlikely(data_len > req->transfer_len)) {
+		req->error_loc = offsetof(struct nvme_common_command, dptr);
+		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+		return false;
+	}
+
+	return true;
+}
+
 int nvmet_req_alloc_sgl(struct nvmet_req *req)
 {
 	struct pci_dev *p2p_dev = NULL;
@@ -1172,7 +1193,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
 
 	ctrl->p2p_client = get_device(req->p2p_client);
 
-	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
+				lockdep_is_held(&ctrl->subsys->lock))
 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
 }
 
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index f729747..feef15c 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -109,6 +109,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
 	u16 qid = le16_to_cpu(c->qid);
 	u16 sqsize = le16_to_cpu(c->sqsize);
 	struct nvmet_ctrl *old;
+	u16 ret;
 
 	old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
 	if (old) {
@@ -119,7 +120,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
 	if (!sqsize) {
 		pr_warn("queue size zero!\n");
 		req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
-		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+		ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+		goto err;
 	}
 
 	/* note: convert queue size from 0's-based value to 1's-based value */
@@ -132,16 +134,19 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
 	}
 
 	if (ctrl->ops->install_queue) {
-		u16 ret = ctrl->ops->install_queue(req->sq);
-
+		ret = ctrl->ops->install_queue(req->sq);
 		if (ret) {
 			pr_err("failed to install queue %d cntlid %d ret %x\n",
-				qid, ret, ctrl->cntlid);
-			return ret;
+				qid, ctrl->cntlid, ret);
+			goto err;
 		}
 	}
 
 	return 0;
+
+err:
+	req->sq->ctrl = NULL;
+	return ret;
 }
 
 static void nvmet_execute_admin_connect(struct nvmet_req *req)
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index b6fca0e..ea0e596 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -280,7 +280,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
 
 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
 {
-	if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
 		return;
 
 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index caebfce..cd5670b 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -336,7 +336,7 @@ static void nvmet_file_dsm_work(struct work_struct *w)
 
 static void nvmet_file_execute_dsm(struct nvmet_req *req)
 {
-	if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
 		return;
 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
 	schedule_work(&req->f.work);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 46df45e..eda28b2 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -374,6 +374,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
 void nvmet_req_uninit(struct nvmet_req *req);
 bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
 void nvmet_req_complete(struct nvmet_req *req, u16 status);
 int nvmet_req_alloc_sgl(struct nvmet_req *req);
 void nvmet_req_free_sgl(struct nvmet_req *req);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 73567e9..35efab1 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -109,6 +109,14 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called nvmem_qfprom.
 
+config NVMEM_SPMI_SDAM
+	tristate "SPMI SDAM Support"
+	depends on SPMI
+	help
+	  This driver supports the Shared Direct Access Memory Module on
+	  Qualcomm Technologies, Inc. PMICs. It provides the clients
+	  an interface to read/write to the SDAM module's shared memory.
+
 config ROCKCHIP_EFUSE
 	tristate "Rockchip eFuse Support"
 	depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 9e66782..6b466cd 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -28,6 +28,8 @@
 nvmem_mtk-efuse-y		:= mtk-efuse.o
 obj-$(CONFIG_QCOM_QFPROM)	+= nvmem_qfprom.o
 nvmem_qfprom-y			:= qfprom.o
+obj-$(CONFIG_NVMEM_SPMI_SDAM)	+= nvmem_qcom-spmi-sdam.o
+nvmem_qcom-spmi-sdam-y		+= qcom-spmi-sdam.o
 obj-$(CONFIG_ROCKCHIP_EFUSE)	+= nvmem_rockchip_efuse.o
 nvmem_rockchip_efuse-y		:= rockchip-efuse.o
 obj-$(CONFIG_ROCKCHIP_OTP)	+= nvmem-rockchip-otp.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9f1ee9c..ef326f2 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/nvmem-consumer.h>
 #include <linux/nvmem-provider.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include "nvmem.h"
@@ -54,8 +55,14 @@ static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
 			   void *val, size_t bytes)
 {
-	if (nvmem->reg_write)
-		return nvmem->reg_write(nvmem->priv, offset, val, bytes);
+	int ret;
+
+	if (nvmem->reg_write) {
+		gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
+		ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
+		gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
+		return ret;
+	}
 
 	return -EINVAL;
 }
@@ -83,7 +90,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell)
 	list_del(&cell->node);
 	mutex_unlock(&nvmem_mutex);
 	of_node_put(cell->np);
-	kfree(cell->name);
+	kfree_const(cell->name);
 	kfree(cell);
 }
 
@@ -110,7 +117,9 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
 	cell->nvmem = nvmem;
 	cell->offset = info->offset;
 	cell->bytes = info->bytes;
-	cell->name = info->name;
+	cell->name = kstrdup_const(info->name, GFP_KERNEL);
+	if (!cell->name)
+		return -ENOMEM;
 
 	cell->bit_offset = info->bit_offset;
 	cell->nbits = info->nbits;
@@ -300,7 +309,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
 			dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
 				cell->name, nvmem->stride);
 			/* Cells already added will be freed later. */
-			kfree(cell->name);
+			kfree_const(cell->name);
 			kfree(cell);
 			return -EINVAL;
 		}
@@ -338,6 +347,14 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 		kfree(nvmem);
 		return ERR_PTR(rval);
 	}
+	if (config->wp_gpio)
+		nvmem->wp_gpio = config->wp_gpio;
+	else
+		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
+						    GPIOD_OUT_HIGH);
+	if (IS_ERR(nvmem->wp_gpio))
+		return ERR_CAST(nvmem->wp_gpio);
+
 
 	kref_init(&nvmem->refcnt);
 	INIT_LIST_HEAD(&nvmem->cells);
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index 03f1ab2..399e1eb 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -15,8 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
-#define IMX_SIP_OTP			0xC200000A
-#define IMX_SIP_OTP_WRITE		0x2
+#define IMX_SIP_OTP_WRITE		0xc200000B
 
 enum ocotp_devtype {
 	IMX8QXP,
@@ -139,8 +138,8 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
 	void *p;
 	int i, ret;
 
-	index = offset >> 2;
-	num_bytes = round_up((offset % 4) + bytes, 4);
+	index = offset;
+	num_bytes = round_up(bytes, 4);
 	count = num_bytes >> 2;
 
 	if (count > (priv->data->nregs - index))
@@ -169,7 +168,7 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
 		buf++;
 	}
 
-	memcpy(val, (u8 *)p + offset % 4, bytes);
+	memcpy(val, (u8 *)p, bytes);
 
 	mutex_unlock(&scu_ocotp_mutex);
 
@@ -189,10 +188,10 @@ static int imx_scu_ocotp_write(void *context, unsigned int offset,
 	int ret;
 
 	/* allow only writing one complete OTP word at a time */
-	if ((bytes != 4) || (offset % 4))
+	if (bytes != 4)
 		return -EINVAL;
 
-	index = offset >> 2;
+	index = offset;
 
 	if (in_hole(context, index))
 		return -EINVAL;
@@ -212,8 +211,7 @@ static int imx_scu_ocotp_write(void *context, unsigned int offset,
 
 	mutex_lock(&scu_ocotp_mutex);
 
-	arm_smccc_smc(IMX_SIP_OTP, IMX_SIP_OTP_WRITE, index, *buf,
-		      0, 0, 0, 0, &res);
+	arm_smccc_smc(IMX_SIP_OTP_WRITE, index, *buf, 0, 0, 0, 0, 0, &res);
 
 	mutex_unlock(&scu_ocotp_mutex);
 
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index fc40555..4ba9cc8 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -44,6 +44,14 @@
 #define IMX_OCOTP_BM_CTRL_ERROR		0x00000200
 #define IMX_OCOTP_BM_CTRL_REL_SHADOWS	0x00000400
 
+#define IMX_OCOTP_BM_CTRL_DEFAULT				\
+	{							\
+		.bm_addr = IMX_OCOTP_BM_CTRL_ADDR,		\
+		.bm_busy = IMX_OCOTP_BM_CTRL_BUSY,		\
+		.bm_error = IMX_OCOTP_BM_CTRL_ERROR,		\
+		.bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\
+	}
+
 #define TIMING_STROBE_PROG_US		10	/* Min time to blow a fuse */
 #define TIMING_STROBE_READ_NS		37	/* Min time before read */
 #define TIMING_RELAX_NS			17
@@ -62,18 +70,31 @@ struct ocotp_priv {
 	struct nvmem_config *config;
 };
 
+struct ocotp_ctrl_reg {
+	u32 bm_addr;
+	u32 bm_busy;
+	u32 bm_error;
+	u32 bm_rel_shadows;
+};
+
 struct ocotp_params {
 	unsigned int nregs;
 	unsigned int bank_address_words;
 	void (*set_timing)(struct ocotp_priv *priv);
+	struct ocotp_ctrl_reg ctrl;
 };
 
-static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
+static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags)
 {
 	int count;
 	u32 c, mask;
+	u32 bm_ctrl_busy, bm_ctrl_error;
+	void __iomem *base = priv->base;
 
-	mask = IMX_OCOTP_BM_CTRL_BUSY | IMX_OCOTP_BM_CTRL_ERROR | flags;
+	bm_ctrl_busy = priv->params->ctrl.bm_busy;
+	bm_ctrl_error = priv->params->ctrl.bm_error;
+
+	mask = bm_ctrl_busy | bm_ctrl_error | flags;
 
 	for (count = 10000; count >= 0; count--) {
 		c = readl(base + IMX_OCOTP_ADDR_CTRL);
@@ -97,7 +118,7 @@ static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
 		 * - A read is performed to from a fuse word which has been read
 		 *   locked.
 		 */
-		if (c & IMX_OCOTP_BM_CTRL_ERROR)
+		if (c & bm_ctrl_error)
 			return -EPERM;
 		return -ETIMEDOUT;
 	}
@@ -105,15 +126,18 @@ static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
 	return 0;
 }
 
-static void imx_ocotp_clr_err_if_set(void __iomem *base)
+static void imx_ocotp_clr_err_if_set(struct ocotp_priv *priv)
 {
-	u32 c;
+	u32 c, bm_ctrl_error;
+	void __iomem *base = priv->base;
+
+	bm_ctrl_error = priv->params->ctrl.bm_error;
 
 	c = readl(base + IMX_OCOTP_ADDR_CTRL);
-	if (!(c & IMX_OCOTP_BM_CTRL_ERROR))
+	if (!(c & bm_ctrl_error))
 		return;
 
-	writel(IMX_OCOTP_BM_CTRL_ERROR, base + IMX_OCOTP_ADDR_CTRL_CLR);
+	writel(bm_ctrl_error, base + IMX_OCOTP_ADDR_CTRL_CLR);
 }
 
 static int imx_ocotp_read(void *context, unsigned int offset,
@@ -140,7 +164,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
 		return ret;
 	}
 
-	ret = imx_ocotp_wait_for_busy(priv->base, 0);
+	ret = imx_ocotp_wait_for_busy(priv, 0);
 	if (ret < 0) {
 		dev_err(priv->dev, "timeout during read setup\n");
 		goto read_end;
@@ -157,7 +181,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
 		 * issued
 		 */
 		if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL)
-			imx_ocotp_clr_err_if_set(priv->base);
+			imx_ocotp_clr_err_if_set(priv);
 	}
 	ret = 0;
 
@@ -274,7 +298,7 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
 	 * write or reload must be completed before a write access can be
 	 * requested.
 	 */
-	ret = imx_ocotp_wait_for_busy(priv->base, 0);
+	ret = imx_ocotp_wait_for_busy(priv, 0);
 	if (ret < 0) {
 		dev_err(priv->dev, "timeout during timing setup\n");
 		goto write_end;
@@ -306,8 +330,8 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
 	}
 
 	ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
-	ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR;
-	ctrl |= waddr & IMX_OCOTP_BM_CTRL_ADDR;
+	ctrl &= ~priv->params->ctrl.bm_addr;
+	ctrl |= waddr & priv->params->ctrl.bm_addr;
 	ctrl |= IMX_OCOTP_WR_UNLOCK;
 
 	writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL);
@@ -374,11 +398,11 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
 	 * be set. It must be cleared by software before any new write access
 	 * can be issued.
 	 */
-	ret = imx_ocotp_wait_for_busy(priv->base, 0);
+	ret = imx_ocotp_wait_for_busy(priv, 0);
 	if (ret < 0) {
 		if (ret == -EPERM) {
 			dev_err(priv->dev, "failed write to locked region");
-			imx_ocotp_clr_err_if_set(priv->base);
+			imx_ocotp_clr_err_if_set(priv);
 		} else {
 			dev_err(priv->dev, "timeout during data write\n");
 		}
@@ -394,10 +418,10 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
 	udelay(2);
 
 	/* reload all shadow registers */
-	writel(IMX_OCOTP_BM_CTRL_REL_SHADOWS,
+	writel(priv->params->ctrl.bm_rel_shadows,
 	       priv->base + IMX_OCOTP_ADDR_CTRL_SET);
-	ret = imx_ocotp_wait_for_busy(priv->base,
-				      IMX_OCOTP_BM_CTRL_REL_SHADOWS);
+	ret = imx_ocotp_wait_for_busy(priv,
+				      priv->params->ctrl.bm_rel_shadows);
 	if (ret < 0) {
 		dev_err(priv->dev, "timeout during shadow register reload\n");
 		goto write_end;
@@ -424,65 +448,76 @@ static const struct ocotp_params imx6q_params = {
 	.nregs = 128,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx6sl_params = {
 	.nregs = 64,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx6sll_params = {
 	.nregs = 128,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx6sx_params = {
 	.nregs = 128,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx6ul_params = {
 	.nregs = 128,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx6ull_params = {
 	.nregs = 64,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx7d_params = {
 	.nregs = 64,
 	.bank_address_words = 4,
 	.set_timing = imx_ocotp_set_imx7_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx7ulp_params = {
 	.nregs = 256,
 	.bank_address_words = 0,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx8mq_params = {
 	.nregs = 256,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx8mm_params = {
 	.nregs = 256,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct ocotp_params imx8mn_params = {
 	.nregs = 256,
 	.bank_address_words = 0,
 	.set_timing = imx_ocotp_set_imx6_timing,
+	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
 };
 
 static const struct of_device_id imx_ocotp_dt_ids[] = {
@@ -521,17 +556,17 @@ static int imx_ocotp_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->clk))
 		return PTR_ERR(priv->clk);
 
-	clk_prepare_enable(priv->clk);
-	imx_ocotp_clr_err_if_set(priv->base);
-	clk_disable_unprepare(priv->clk);
-
 	priv->params = of_device_get_match_data(&pdev->dev);
 	imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
 	imx_ocotp_nvmem_config.dev = dev;
 	imx_ocotp_nvmem_config.priv = priv;
 	priv->config = &imx_ocotp_nvmem_config;
-	nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config);
 
+	clk_prepare_enable(priv->clk);
+	imx_ocotp_clr_err_if_set(priv);
+	clk_disable_unprepare(priv->clk);
+
+	nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config);
 
 	return PTR_ERR_OR_ZERO(nvmem);
 }
diff --git a/drivers/nvmem/nvmem.h b/drivers/nvmem/nvmem.h
index eb8ed71..be0d66d 100644
--- a/drivers/nvmem/nvmem.h
+++ b/drivers/nvmem/nvmem.h
@@ -9,6 +9,7 @@
 #include <linux/list.h>
 #include <linux/nvmem-consumer.h>
 #include <linux/nvmem-provider.h>
+#include <linux/gpio/consumer.h>
 
 struct nvmem_device {
 	struct module		*owner;
@@ -26,6 +27,7 @@ struct nvmem_device {
 	struct list_head	cells;
 	nvmem_reg_read_t	reg_read;
 	nvmem_reg_write_t	reg_write;
+	struct gpio_desc	*wp_gpio;
 	void *priv;
 };
 
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
new file mode 100644
index 0000000..8682cda
--- /dev/null
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
+
+#define SDAM_MEM_START			0x40
+#define REGISTER_MAP_ID			0x40
+#define REGISTER_MAP_VERSION		0x41
+#define SDAM_SIZE			0x44
+#define SDAM_PBS_TRIG_SET		0xE5
+#define SDAM_PBS_TRIG_CLR		0xE6
+
+struct sdam_chip {
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	struct nvmem_config		sdam_config;
+	unsigned int			base;
+	unsigned int			size;
+};
+
+/* read only register offsets */
+static const u8 sdam_ro_map[] = {
+	REGISTER_MAP_ID,
+	REGISTER_MAP_VERSION,
+	SDAM_SIZE
+};
+
+static bool sdam_is_valid(struct sdam_chip *sdam, unsigned int offset,
+				size_t len)
+{
+	unsigned int sdam_mem_end = SDAM_MEM_START + sdam->size - 1;
+
+	if (!len)
+		return false;
+
+	if (offset >= SDAM_MEM_START && offset <= sdam_mem_end
+				&& (offset + len - 1) <= sdam_mem_end)
+		return true;
+	else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR)
+				&& (len == 1))
+		return true;
+
+	return false;
+}
+
+static bool sdam_is_ro(unsigned int offset, size_t len)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++)
+		if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i])
+			return true;
+
+	return false;
+}
+
+static int sdam_read(void *priv, unsigned int offset, void *val,
+				size_t bytes)
+{
+	struct sdam_chip *sdam = priv;
+	struct device *dev = &sdam->pdev->dev;
+	int rc;
+
+	if (!sdam_is_valid(sdam, offset, bytes)) {
+		dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
+			offset, bytes);
+		return -EINVAL;
+	}
+
+	rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes);
+	if (rc < 0)
+		dev_err(dev, "Failed to read SDAM offset %#x len=%zd, rc=%d\n",
+						offset, bytes, rc);
+
+	return rc;
+}
+
+static int sdam_write(void *priv, unsigned int offset, void *val,
+				size_t bytes)
+{
+	struct sdam_chip *sdam = priv;
+	struct device *dev = &sdam->pdev->dev;
+	int rc;
+
+	if (!sdam_is_valid(sdam, offset, bytes)) {
+		dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
+			offset, bytes);
+		return -EINVAL;
+	}
+
+	if (sdam_is_ro(offset, bytes)) {
+		dev_err(dev, "Invalid write offset %#x len=%zd\n",
+			offset, bytes);
+		return -EINVAL;
+	}
+
+	rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes);
+	if (rc < 0)
+		dev_err(dev, "Failed to write SDAM offset %#x len=%zd, rc=%d\n",
+						offset, bytes, rc);
+
+	return rc;
+}
+
+static int sdam_probe(struct platform_device *pdev)
+{
+	struct sdam_chip *sdam;
+	struct nvmem_device *nvmem;
+	unsigned int val;
+	int rc;
+
+	sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL);
+	if (!sdam)
+		return -ENOMEM;
+
+	sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!sdam->regmap) {
+		dev_err(&pdev->dev, "Failed to get regmap handle\n");
+		return -ENXIO;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to get SDAM base, rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+	rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to read SDAM_SIZE rc=%d\n", rc);
+		return -EINVAL;
+	}
+	sdam->size = val * 32;
+
+	sdam->sdam_config.dev = &pdev->dev;
+	sdam->sdam_config.name = "spmi_sdam";
+	sdam->sdam_config.id = pdev->id;
+	sdam->sdam_config.owner = THIS_MODULE,
+	sdam->sdam_config.stride = 1;
+	sdam->sdam_config.word_size = 1;
+	sdam->sdam_config.reg_read = sdam_read;
+	sdam->sdam_config.reg_write = sdam_write;
+	sdam->sdam_config.priv = sdam;
+
+	nvmem = devm_nvmem_register(&pdev->dev, &sdam->sdam_config);
+	if (IS_ERR(nvmem)) {
+		dev_err(&pdev->dev,
+			"Failed to register SDAM nvmem device rc=%ld\n",
+			PTR_ERR(nvmem));
+		return -ENXIO;
+	}
+	dev_dbg(&pdev->dev,
+		"SDAM base=%#x size=%u registered successfully\n",
+		sdam->base, sdam->size);
+
+	return 0;
+}
+
+static const struct of_device_id sdam_match_table[] = {
+	{ .compatible = "qcom,spmi-sdam" },
+	{},
+};
+
+static struct platform_driver sdam_driver = {
+	.driver = {
+		.name = "qcom,spmi-sdam",
+		.of_match_table = sdam_match_table,
+	},
+	.probe		= sdam_probe,
+};
+
+static int __init sdam_init(void)
+{
+	return platform_driver_register(&sdam_driver);
+}
+subsys_initcall(sdam_init);
+
+static void __exit sdam_exit(void)
+{
+	return platform_driver_unregister(&sdam_driver);
+}
+module_exit(sdam_exit);
+
+MODULE_DESCRIPTION("QCOM SPMI SDAM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 37c2ccb..d916186 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -103,4 +103,8 @@
 config OF_NUMA
 	bool
 
+config OF_DMA_DEFAULT_COHERENT
+	# arches should select this if DMA is coherent by default for OF devices
+	bool
+
 endif # OF
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 99c1b80..e8a39c3 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -995,12 +995,16 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
  * @np:	device node
  *
  * It returns true if "dma-coherent" property was found
- * for this device in DT.
+ * for this device in the DT, or if DMA is coherent by
+ * default for OF devices on the current platform.
  */
 bool of_dma_is_coherent(struct device_node *np)
 {
 	struct device_node *node = of_node_get(np);
 
+	if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
+		return true;
+
 	while (node) {
 		if (of_property_read_bool(node, "dma-coherent")) {
 			of_node_put(node);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index db7fbc0..ae03b12 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -135,115 +135,38 @@ int __weak of_node_to_nid(struct device_node *np)
 }
 #endif
 
-/*
- * Assumptions behind phandle_cache implementation:
- *   - phandle property values are in a contiguous range of 1..n
- *
- * If the assumptions do not hold, then
- *   - the phandle lookup overhead reduction provided by the cache
- *     will likely be less
- */
+#define OF_PHANDLE_CACHE_BITS	7
+#define OF_PHANDLE_CACHE_SZ	BIT(OF_PHANDLE_CACHE_BITS)
 
-static struct device_node **phandle_cache;
-static u32 phandle_cache_mask;
+static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
+
+static u32 of_phandle_cache_hash(phandle handle)
+{
+	return hash_32(handle, OF_PHANDLE_CACHE_BITS);
+}
 
 /*
  * Caller must hold devtree_lock.
  */
-static void __of_free_phandle_cache(void)
+void __of_phandle_cache_inv_entry(phandle handle)
 {
-	u32 cache_entries = phandle_cache_mask + 1;
-	u32 k;
-
-	if (!phandle_cache)
-		return;
-
-	for (k = 0; k < cache_entries; k++)
-		of_node_put(phandle_cache[k]);
-
-	kfree(phandle_cache);
-	phandle_cache = NULL;
-}
-
-int of_free_phandle_cache(void)
-{
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&devtree_lock, flags);
-
-	__of_free_phandle_cache();
-
-	raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
-	return 0;
-}
-#if !defined(CONFIG_MODULES)
-late_initcall_sync(of_free_phandle_cache);
-#endif
-
-/*
- * Caller must hold devtree_lock.
- */
-void __of_free_phandle_cache_entry(phandle handle)
-{
-	phandle masked_handle;
+	u32 handle_hash;
 	struct device_node *np;
 
 	if (!handle)
 		return;
 
-	masked_handle = handle & phandle_cache_mask;
+	handle_hash = of_phandle_cache_hash(handle);
 
-	if (phandle_cache) {
-		np = phandle_cache[masked_handle];
-		if (np && handle == np->phandle) {
-			of_node_put(np);
-			phandle_cache[masked_handle] = NULL;
-		}
-	}
-}
-
-void of_populate_phandle_cache(void)
-{
-	unsigned long flags;
-	u32 cache_entries;
-	struct device_node *np;
-	u32 phandles = 0;
-
-	raw_spin_lock_irqsave(&devtree_lock, flags);
-
-	__of_free_phandle_cache();
-
-	for_each_of_allnodes(np)
-		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
-			phandles++;
-
-	if (!phandles)
-		goto out;
-
-	cache_entries = roundup_pow_of_two(phandles);
-	phandle_cache_mask = cache_entries - 1;
-
-	phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
-				GFP_ATOMIC);
-	if (!phandle_cache)
-		goto out;
-
-	for_each_of_allnodes(np)
-		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
-			of_node_get(np);
-			phandle_cache[np->phandle & phandle_cache_mask] = np;
-		}
-
-out:
-	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+	np = phandle_cache[handle_hash];
+	if (np && handle == np->phandle)
+		phandle_cache[handle_hash] = NULL;
 }
 
 void __init of_core_init(void)
 {
 	struct device_node *np;
 
-	of_populate_phandle_cache();
 
 	/* Create the kset, and register existing nodes */
 	mutex_lock(&of_mutex);
@@ -253,8 +176,11 @@ void __init of_core_init(void)
 		pr_err("failed to register existing nodes\n");
 		return;
 	}
-	for_each_of_allnodes(np)
+	for_each_of_allnodes(np) {
 		__of_attach_node_sysfs(np);
+		if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
+			phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
+	}
 	mutex_unlock(&of_mutex);
 
 	/* Symlink in /proc as required by userspace ABI */
@@ -490,6 +416,42 @@ int of_cpu_node_to_id(struct device_node *cpu_node)
 EXPORT_SYMBOL(of_cpu_node_to_id);
 
 /**
+ * of_get_cpu_state_node - Get CPU's idle state node at the given index
+ *
+ * @cpu_node: The device node for the CPU
+ * @index: The index in the list of the idle states
+ *
+ * Two generic methods can be used to describe a CPU's idle states, either via
+ * a flattened description through the "cpu-idle-states" binding or via the
+ * hierarchical layout, using the "power-domains" and the "domain-idle-states"
+ * bindings. This function check for both and returns the idle state node for
+ * the requested index.
+ *
+ * In case an idle state node is found at @index, the refcount is incremented
+ * for it, so call of_node_put() on it when done. Returns NULL if not found.
+ */
+struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+					  int index)
+{
+	struct of_phandle_args args;
+	int err;
+
+	err = of_parse_phandle_with_args(cpu_node, "power-domains",
+					"#power-domain-cells", 0, &args);
+	if (!err) {
+		struct device_node *state_node =
+			of_parse_phandle(args.np, "domain-idle-states", index);
+
+		of_node_put(args.np);
+		if (state_node)
+			return state_node;
+	}
+
+	return of_parse_phandle(cpu_node, "cpu-idle-states", index);
+}
+EXPORT_SYMBOL(of_get_cpu_state_node);
+
+/**
  * __of_device_is_compatible() - Check if the node matches given constraints
  * @device: pointer to node
  * @compat: required compatible string, NULL or "" for any match
@@ -1235,36 +1197,24 @@ struct device_node *of_find_node_by_phandle(phandle handle)
 {
 	struct device_node *np = NULL;
 	unsigned long flags;
-	phandle masked_handle;
+	u32 handle_hash;
 
 	if (!handle)
 		return NULL;
 
+	handle_hash = of_phandle_cache_hash(handle);
+
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 
-	masked_handle = handle & phandle_cache_mask;
-
-	if (phandle_cache) {
-		if (phandle_cache[masked_handle] &&
-		    handle == phandle_cache[masked_handle]->phandle)
-			np = phandle_cache[masked_handle];
-		if (np && of_node_check_flag(np, OF_DETACHED)) {
-			WARN_ON(1); /* did not uncache np on node removal */
-			of_node_put(np);
-			phandle_cache[masked_handle] = NULL;
-			np = NULL;
-		}
-	}
+	if (phandle_cache[handle_hash] &&
+	    handle == phandle_cache[handle_hash]->phandle)
+		np = phandle_cache[handle_hash];
 
 	if (!np) {
 		for_each_of_allnodes(np)
 			if (np->phandle == handle &&
 			    !of_node_check_flag(np, OF_DETACHED)) {
-				if (phandle_cache) {
-					/* will put when removed from cache */
-					of_node_get(np);
-					phandle_cache[masked_handle] = np;
-				}
+				phandle_cache[handle_hash] = np;
 				break;
 			}
 	}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index e9127db..27203bf 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -161,7 +161,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
 		coherent ? " " : " not ");
 
 	iommu = of_iommu_configure(dev, np);
-	if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+	if (PTR_ERR(iommu) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	dev_dbg(dev, "device is%sbehind an iommu\n",
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 49b16f7..08fd823 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -276,7 +276,7 @@ void __of_detach_node(struct device_node *np)
 	of_node_set_flag(np, OF_DETACHED);
 
 	/* race with of_find_node_by_phandle() prevented by devtree_lock */
-	__of_free_phandle_cache_entry(np->phandle);
+	__of_phandle_cache_inv_entry(np->phandle);
 }
 
 /**
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index f5c2a54..8270bbf 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -81,13 +81,15 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
 	else
 		phy = get_phy_device(mdio, addr, is_c45);
 	if (IS_ERR(phy)) {
-		unregister_mii_timestamper(mii_ts);
+		if (mii_ts)
+			unregister_mii_timestamper(mii_ts);
 		return PTR_ERR(phy);
 	}
 
 	rc = of_irq_get(child, 0);
 	if (rc == -EPROBE_DEFER) {
-		unregister_mii_timestamper(mii_ts);
+		if (mii_ts)
+			unregister_mii_timestamper(mii_ts);
 		phy_device_free(phy);
 		return rc;
 	}
@@ -116,12 +118,19 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
 	 * register it */
 	rc = phy_device_register(phy);
 	if (rc) {
-		unregister_mii_timestamper(mii_ts);
+		if (mii_ts)
+			unregister_mii_timestamper(mii_ts);
 		phy_device_free(phy);
 		of_node_put(child);
 		return rc;
 	}
-	phy->mii_ts = mii_ts;
+
+	/* phy->mii_ts may already be defined by the PHY driver. A
+	 * mii_timestamper probed via the device tree will still have
+	 * precedence.
+	 */
+	if (mii_ts)
+		phy->mii_ts = mii_ts;
 
 	dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
 		child, addr);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 66294d2..207863c 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -84,15 +84,11 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {}
 int of_resolve_phandles(struct device_node *tree);
 #endif
 
-#if defined(CONFIG_OF_DYNAMIC)
-void __of_free_phandle_cache_entry(phandle handle);
-#endif
+void __of_phandle_cache_inv_entry(phandle handle);
 
 #if defined(CONFIG_OF_OVERLAY)
 void of_overlay_mutex_lock(void);
 void of_overlay_mutex_unlock(void);
-int of_free_phandle_cache(void);
-void of_populate_phandle_cache(void);
 #else
 static inline void of_overlay_mutex_lock(void) {};
 static inline void of_overlay_mutex_unlock(void) {};
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 9617b7d..c9219fd 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -974,8 +974,6 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
 		goto err_free_overlay_changeset;
 	}
 
-	of_populate_phandle_cache();
-
 	ret = __of_changeset_apply_notify(&ovcs->cset);
 	if (ret)
 		pr_err("overlay apply changeset entry notify error %d\n", ret);
@@ -1218,17 +1216,8 @@ int of_overlay_remove(int *ovcs_id)
 
 	list_del(&ovcs->ovcs_list);
 
-	/*
-	 * Disable phandle cache.  Avoids race condition that would arise
-	 * from removing cache entry when the associated node is deleted.
-	 */
-	of_free_phandle_cache();
-
 	ret_apply = 0;
 	ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
-
-	of_populate_phandle_cache();
-
 	if (ret) {
 		if (ret_apply)
 			devicetree_state_flags |= DTSF_REVERT_FAIL;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index eda2633..9210a95 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -32,7 +32,7 @@
 
 #define OP_BUFFER_FLAGS	0
 
-static struct ring_buffer *op_ring_buffer;
+static struct trace_buffer *op_ring_buffer;
 DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
 
 static void wq_sync_buffer(struct work_struct *work);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 73e37bb..36c6613 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -230,13 +230,12 @@ static ssize_t led_proc_write(struct file *file, const char __user *buf,
 	return -EINVAL;
 }
 
-static const struct file_operations led_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= led_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= led_proc_write,
+static const struct proc_ops led_proc_ops = {
+	.proc_open	= led_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= led_proc_write,
 };
 
 static int __init led_create_procfs(void)
@@ -252,14 +251,14 @@ static int __init led_create_procfs(void)
 	if (!lcd_no_led_support)
 	{
 		ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
-					&led_proc_fops, (void *)LED_NOLCD); /* LED */
+					&led_proc_ops, (void *)LED_NOLCD); /* LED */
 		if (!ent) return -1;
 	}
 
 	if (led_type == LED_HASLCD)
 	{
 		ent = proc_create_data("lcd", S_IRUGO|S_IWUSR, proc_pdc_root,
-					&led_proc_fops, (void *)LED_HASLCD); /* LCD */
+					&led_proc_ops, (void *)LED_HASLCD); /* LCD */
 		if (!ent) return -1;
 	}
 
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 982b46f..3ef0bb2 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -69,6 +69,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
 	dev->ats_enabled = 1;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(pci_enable_ats);
 
 /**
  * pci_disable_ats - disable the ATS capability
@@ -87,6 +88,7 @@ void pci_disable_ats(struct pci_dev *dev)
 
 	dev->ats_enabled = 0;
 }
+EXPORT_SYMBOL_GPL(pci_disable_ats);
 
 void pci_restore_ats_state(struct pci_dev *dev)
 {
@@ -424,11 +426,12 @@ void pci_restore_pasid_state(struct pci_dev *pdev)
 int pci_pasid_features(struct pci_dev *pdev)
 {
 	u16 supported;
-	int pasid = pdev->pasid_cap;
+	int pasid;
 
 	if (pdev->is_virtfn)
 		pdev = pci_physfn(pdev);
 
+	pasid = pdev->pasid_cap;
 	if (!pasid)
 		return -EINVAL;
 
@@ -451,11 +454,12 @@ int pci_pasid_features(struct pci_dev *pdev)
 int pci_max_pasids(struct pci_dev *pdev)
 {
 	u16 supported;
-	int pasid = pdev->pasid_cap;
+	int pasid;
 
 	if (pdev->is_virtfn)
 		pdev = pci_physfn(pdev);
 
+	pasid = pdev->pasid_cap;
 	if (!pasid)
 		return -EINVAL;
 
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index c77069c..20bf00f 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -239,7 +239,6 @@
 
 config VMD
 	depends on PCI_MSI && X86_64 && SRCU
-	select X86_DEV_DMA_OPS
 	tristate "Intel Volume Management Device Driver"
 	---help---
 	  Adds support for the Intel Volume Management Device (VMD). VMD is a
@@ -253,6 +252,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmd.
 
+config PCIE_BRCMSTB
+	tristate "Broadcom Brcmstb PCIe host controller"
+	depends on ARCH_BCM2835 || COMPILE_TEST
+	depends on OF
+	depends on PCI_MSI_IRQ_DOMAIN
+	help
+	  Say Y here to enable PCIe host controller support for
+	  Broadcom STB based SoCs, like the Raspberry Pi 4.
+
 config PCI_HYPERV_INTERFACE
 	tristate "Hyper-V PCI Interface"
 	depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 3d4f597..01b2502 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
 obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
 obj-$(CONFIG_VMD) += vmd.o
+obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
 # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
 obj-y				+= dwc/
 
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 625a031..0830dfc 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -209,6 +209,17 @@
 	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
 	  endpoint mode. This uses the DesignWare core.
 
+config PCIE_INTEL_GW
+	bool "Intel Gateway PCIe host controller support"
+	depends on OF && (X86 || COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say 'Y' here to enable PCIe Host controller support on Intel
+	  Gateway SoCs.
+	  The PCIe controller uses the DesignWare core plus Intel-specific
+	  hardware wrappers.
+
 config PCIE_KIRIN
 	depends on OF && (ARM64 || COMPILE_TEST)
 	bool "HiSilicon Kirin series SoCs PCIe controllers"
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 69faff3..8a637cf 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
 obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
 obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
 obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
 obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
 obj-$(CONFIG_PCI_MESON) += pci-meson.o
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 14a6ba4..c5043d9 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * PCIe host controller driver for Samsung EXYNOS SoCs
+ * PCIe host controller driver for Samsung Exynos SoCs
  *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index af67725..c8c702c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -422,7 +422,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
 				   lower_32_bits(start) | OB_ENABLEN);
 		ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
 				   upper_32_bits(start));
-		start += OB_WIN_SIZE;
+		start += OB_WIN_SIZE * SZ_1M;
 	}
 
 	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
@@ -510,7 +510,7 @@ static void ks_pcie_stop_link(struct dw_pcie *pci)
 	/* Disable Link training */
 	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
 	val &= ~LTSSM_EN_VAL;
-	ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 }
 
 static int ks_pcie_start_link(struct dw_pcie *pci)
@@ -1354,7 +1354,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
 		ret = of_property_read_u32(np, "num-viewport", &num_viewport);
 		if (ret < 0) {
 			dev_err(dev, "unable to read *num-viewport* property\n");
-			return ret;
+			goto err_get_sync;
 		}
 
 		/*
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 9e2482b..28d5a10 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -51,9 +51,6 @@ static const struct of_device_id artpec6_pcie_of_match[];
 #define ACK_N_FTS_MASK			GENMASK(15, 8)
 #define ACK_N_FTS(x)			(((x) << 8) & ACK_N_FTS_MASK)
 
-#define FAST_TRAINING_SEQ_MASK		GENMASK(7, 0)
-#define FAST_TRAINING_SEQ(x)		(((x) << 0) & FAST_TRAINING_SEQ_MASK)
-
 /* ARTPEC-6 specific registers */
 #define PCIECFG				0x18
 #define  PCIECFG_DBG_OEN		BIT(24)
@@ -313,10 +310,7 @@ static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
 	 * Set the Number of Fast Training Sequences that the core
 	 * advertises as its N_FTS during Gen2 or Gen3 link training.
 	 */
-	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
-	val &= ~FAST_TRAINING_SEQ_MASK;
-	val |= FAST_TRAINING_SEQ(180);
-	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+	dw_pcie_link_set_n_fts(pci, 180);
 }
 
 static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 820488d..681548c8 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -12,6 +12,7 @@
 #include <linux/of.h>
 #include <linux/types.h>
 
+#include "../../pci.h"
 #include "pcie-designware.h"
 
 /*
@@ -474,6 +475,61 @@ int dw_pcie_link_up(struct dw_pcie *pci)
 		(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
 }
 
+void dw_pcie_upconfig_setup(struct dw_pcie *pci)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
+	val |= PORT_MLTI_UPCFG_SUPPORT;
+	dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
+
+void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+{
+	u32 reg, val;
+	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+	reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+	reg &= ~PCI_EXP_LNKCTL2_TLS;
+
+	switch (pcie_link_speed[link_gen]) {
+	case PCIE_SPEED_2_5GT:
+		reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+		break;
+	case PCIE_SPEED_5_0GT:
+		reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+		break;
+	case PCIE_SPEED_8_0GT:
+		reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
+		break;
+	case PCIE_SPEED_16_0GT:
+		reg |= PCI_EXP_LNKCTL2_TLS_16_0GT;
+		break;
+	default:
+		/* Use hardware capability */
+		val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+		val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
+		reg &= ~PCI_EXP_LNKCTL2_HASD;
+		reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val);
+		break;
+	}
+
+	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed);
+
+void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+	val &= ~PORT_LOGIC_N_FTS_MASK;
+	val |= n_fts & PORT_LOGIC_N_FTS_MASK;
+	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);
+
 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
 {
 	u32 val;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 5accdd6..a22ea59 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -30,7 +30,12 @@
 #define LINK_WAIT_IATU			9
 
 /* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_AFR			0x70C
+#define PORT_AFR_N_FTS_MASK		GENMASK(15, 8)
+#define PORT_AFR_CC_N_FTS_MASK		GENMASK(23, 16)
+
 #define PCIE_PORT_LINK_CONTROL		0x710
+#define PORT_LINK_DLL_LINK_EN		BIT(5)
 #define PORT_LINK_MODE_MASK		GENMASK(21, 16)
 #define PORT_LINK_MODE(n)		FIELD_PREP(PORT_LINK_MODE_MASK, n)
 #define PORT_LINK_MODE_1_LANES		PORT_LINK_MODE(0x1)
@@ -46,6 +51,7 @@
 #define PCIE_PORT_DEBUG1_LINK_IN_TRAINING	BIT(29)
 
 #define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
+#define PORT_LOGIC_N_FTS_MASK		GENMASK(7, 0)
 #define PORT_LOGIC_SPEED_CHANGE		BIT(17)
 #define PORT_LOGIC_LINK_WIDTH_MASK	GENMASK(12, 8)
 #define PORT_LOGIC_LINK_WIDTH(n)	FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
@@ -60,6 +66,9 @@
 #define PCIE_MSI_INTR0_MASK		0x82C
 #define PCIE_MSI_INTR0_STATUS		0x830
 
+#define PCIE_PORT_MULTI_LANE_CTRL	0x8C0
+#define PORT_MLTI_UPCFG_SUPPORT		BIT(7)
+
 #define PCIE_ATU_VIEWPORT		0x900
 #define PCIE_ATU_REGION_INBOUND		BIT(31)
 #define PCIE_ATU_REGION_OUTBOUND	0
@@ -273,6 +282,9 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
 u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
 void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
 int dw_pcie_link_up(struct dw_pcie *pci);
+void dw_pcie_upconfig_setup(struct dw_pcie *pci);
+void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen);
+void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts);
 int dw_pcie_wait_for_link(struct dw_pcie *pci);
 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
 			       int type, u64 cpu_addr, u64 pci_addr,
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
new file mode 100644
index 0000000..fc2a122
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Intel Gateway SoCs
+ *
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/pci_regs.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PORT_AFR_N_FTS_GEN12_DFT	(SZ_128 - 1)
+#define PORT_AFR_N_FTS_GEN3		180
+#define PORT_AFR_N_FTS_GEN4		196
+
+/* PCIe Application logic Registers */
+#define PCIE_APP_CCR			0x10
+#define PCIE_APP_CCR_LTSSM_ENABLE	BIT(0)
+
+#define PCIE_APP_MSG_CR			0x30
+#define PCIE_APP_MSG_XMT_PM_TURNOFF	BIT(0)
+
+#define PCIE_APP_PMC			0x44
+#define PCIE_APP_PMC_IN_L2		BIT(20)
+
+#define PCIE_APP_IRNEN			0xF4
+#define PCIE_APP_IRNCR			0xF8
+#define PCIE_APP_IRN_AER_REPORT		BIT(0)
+#define PCIE_APP_IRN_PME		BIT(2)
+#define PCIE_APP_IRN_RX_VDM_MSG		BIT(4)
+#define PCIE_APP_IRN_PM_TO_ACK		BIT(9)
+#define PCIE_APP_IRN_LINK_AUTO_BW_STAT	BIT(11)
+#define PCIE_APP_IRN_BW_MGT		BIT(12)
+#define PCIE_APP_IRN_MSG_LTR		BIT(18)
+#define PCIE_APP_IRN_SYS_ERR_RC		BIT(29)
+#define PCIE_APP_INTX_OFST		12
+
+#define PCIE_APP_IRN_INT \
+	(PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \
+	PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
+	PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
+	PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
+	(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTA) | \
+	(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTB) | \
+	(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTC) | \
+	(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTD))
+
+#define BUS_IATU_OFFSET			SZ_256M
+#define RESET_INTERVAL_MS		100
+
+struct intel_pcie_soc {
+	unsigned int	pcie_ver;
+	unsigned int	pcie_atu_offset;
+	u32		num_viewport;
+};
+
+struct intel_pcie_port {
+	struct dw_pcie		pci;
+	void __iomem		*app_base;
+	struct gpio_desc	*reset_gpio;
+	u32			rst_intrvl;
+	u32			max_speed;
+	u32			link_gen;
+	u32			max_width;
+	u32			n_fts;
+	struct clk		*core_clk;
+	struct reset_control	*core_rst;
+	struct phy		*phy;
+	u8			pcie_cap_ofst;
+};
+
+static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
+{
+	u32 old;
+
+	old = readl(base + ofs);
+	val = (old & ~mask) | (val & mask);
+
+	if (val != old)
+		writel(val, base + ofs);
+}
+
+static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs)
+{
+	return readl(lpp->app_base + ofs);
+}
+
+static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+{
+	writel(val, lpp->app_base + ofs);
+}
+
+static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+			     u32 mask, u32 val)
+{
+	pcie_update_bits(lpp->app_base, ofs, mask, val);
+}
+
+static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs)
+{
+	return dw_pcie_readl_dbi(&lpp->pci, ofs);
+}
+
+static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+{
+	dw_pcie_writel_dbi(&lpp->pci, ofs, val);
+}
+
+static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+				u32 mask, u32 val)
+{
+	pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val);
+}
+
+static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp)
+{
+	pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
+			 PCIE_APP_CCR_LTSSM_ENABLE);
+}
+
+static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)
+{
+	pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
+}
+
+static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
+{
+	u32 val;
+	u8 offset = lpp->pcie_cap_ofst;
+
+	val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCAP);
+	lpp->max_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
+	lpp->max_width = FIELD_GET(PCI_EXP_LNKCAP_MLW, val);
+
+	val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL);
+
+	val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
+	pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);
+}
+
+static void intel_pcie_port_logic_setup(struct intel_pcie_port *lpp)
+{
+	u32 val, mask;
+
+	switch (pcie_link_speed[lpp->max_speed]) {
+	case PCIE_SPEED_8_0GT:
+		lpp->n_fts = PORT_AFR_N_FTS_GEN3;
+		break;
+	case PCIE_SPEED_16_0GT:
+		lpp->n_fts = PORT_AFR_N_FTS_GEN4;
+		break;
+	default:
+		lpp->n_fts = PORT_AFR_N_FTS_GEN12_DFT;
+		break;
+	}
+
+	mask = PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK;
+	val = FIELD_PREP(PORT_AFR_N_FTS_MASK, lpp->n_fts) |
+	       FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, lpp->n_fts);
+	pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_AFR, mask, val);
+
+	/* Port Link Control Register */
+	pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_LINK_CONTROL, PORT_LINK_DLL_LINK_EN,
+			    PORT_LINK_DLL_LINK_EN);
+}
+
+static void intel_pcie_rc_setup(struct intel_pcie_port *lpp)
+{
+	intel_pcie_ltssm_disable(lpp);
+	intel_pcie_link_setup(lpp);
+	dw_pcie_setup_rc(&lpp->pci.pp);
+	dw_pcie_upconfig_setup(&lpp->pci);
+	intel_pcie_port_logic_setup(lpp);
+	dw_pcie_link_set_max_speed(&lpp->pci, lpp->link_gen);
+	dw_pcie_link_set_n_fts(&lpp->pci, lpp->n_fts);
+}
+
+static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
+{
+	struct device *dev = lpp->pci.dev;
+	int ret;
+
+	lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(lpp->reset_gpio)) {
+		ret = PTR_ERR(lpp->reset_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
+		return ret;
+	}
+
+	/* Make initial reset last for 100us */
+	usleep_range(100, 200);
+
+	return 0;
+}
+
+static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp)
+{
+	reset_control_assert(lpp->core_rst);
+}
+
+static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp)
+{
+	/*
+	 * One micro-second delay to make sure the reset pulse
+	 * wide enough so that core reset is clean.
+	 */
+	udelay(1);
+	reset_control_deassert(lpp->core_rst);
+
+	/*
+	 * Some SoC core reset also reset PHY, more delay needed
+	 * to make sure the reset process is done.
+	 */
+	usleep_range(1000, 2000);
+}
+
+static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp)
+{
+	gpiod_set_value_cansleep(lpp->reset_gpio, 1);
+}
+
+static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
+{
+	msleep(lpp->rst_intrvl);
+	gpiod_set_value_cansleep(lpp->reset_gpio, 0);
+}
+
+static int intel_pcie_app_logic_setup(struct intel_pcie_port *lpp)
+{
+	intel_pcie_device_rst_deassert(lpp);
+	intel_pcie_ltssm_enable(lpp);
+
+	return dw_pcie_wait_for_link(&lpp->pci);
+}
+
+static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
+{
+	pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
+	pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
+}
+
+static int intel_pcie_get_resources(struct platform_device *pdev)
+{
+	struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
+	struct dw_pcie *pci = &lpp->pci;
+	struct device *dev = pci->dev;
+	struct resource *res;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	lpp->core_clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(lpp->core_clk)) {
+		ret = PTR_ERR(lpp->core_clk);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get clks: %d\n", ret);
+		return ret;
+	}
+
+	lpp->core_rst = devm_reset_control_get(dev, NULL);
+	if (IS_ERR(lpp->core_rst)) {
+		ret = PTR_ERR(lpp->core_rst);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get resets: %d\n", ret);
+		return ret;
+	}
+
+	ret = device_property_match_string(dev, "device_type", "pci");
+	if (ret) {
+		dev_err(dev, "Failed to find pci device type: %d\n", ret);
+		return ret;
+	}
+
+	ret = device_property_read_u32(dev, "reset-assert-ms",
+				       &lpp->rst_intrvl);
+	if (ret)
+		lpp->rst_intrvl = RESET_INTERVAL_MS;
+
+	ret = of_pci_get_max_link_speed(dev->of_node);
+	lpp->link_gen = ret < 0 ? 0 : ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+	lpp->app_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(lpp->app_base))
+		return PTR_ERR(lpp->app_base);
+
+	lpp->phy = devm_phy_get(dev, "pcie");
+	if (IS_ERR(lpp->phy)) {
+		ret = PTR_ERR(lpp->phy);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void intel_pcie_deinit_phy(struct intel_pcie_port *lpp)
+{
+	phy_exit(lpp->phy);
+}
+
+static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
+{
+	u32 value;
+	int ret;
+
+	if (pcie_link_speed[lpp->max_speed] < PCIE_SPEED_8_0GT)
+		return 0;
+
+	/* Send PME_TURN_OFF message */
+	pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
+			 PCIE_APP_MSG_XMT_PM_TURNOFF);
+
+	/* Read PMC status and wait for falling into L2 link state */
+	ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value,
+				 value & PCIE_APP_PMC_IN_L2, 20,
+				 jiffies_to_usecs(5 * HZ));
+	if (ret)
+		dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n");
+
+	return ret;
+}
+
+static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
+{
+	if (dw_pcie_link_up(&lpp->pci))
+		intel_pcie_wait_l2(lpp);
+
+	/* Put endpoint device in reset state */
+	intel_pcie_device_rst_assert(lpp);
+	pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
+}
+
+static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
+{
+	struct device *dev = lpp->pci.dev;
+	int ret;
+
+	intel_pcie_core_rst_assert(lpp);
+	intel_pcie_device_rst_assert(lpp);
+
+	ret = phy_init(lpp->phy);
+	if (ret)
+		return ret;
+
+	intel_pcie_core_rst_deassert(lpp);
+
+	ret = clk_prepare_enable(lpp->core_clk);
+	if (ret) {
+		dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret);
+		goto clk_err;
+	}
+
+	if (!lpp->pcie_cap_ofst) {
+		ret = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
+		if (!ret) {
+			ret = -ENXIO;
+			dev_err(dev, "Invalid PCIe capability offset\n");
+			goto app_init_err;
+		}
+
+		lpp->pcie_cap_ofst = ret;
+	}
+
+	intel_pcie_rc_setup(lpp);
+	ret = intel_pcie_app_logic_setup(lpp);
+	if (ret)
+		goto app_init_err;
+
+	/* Enable integrated interrupts */
+	pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
+			 PCIE_APP_IRN_INT);
+
+	return 0;
+
+app_init_err:
+	clk_disable_unprepare(lpp->core_clk);
+clk_err:
+	intel_pcie_core_rst_assert(lpp);
+	intel_pcie_deinit_phy(lpp);
+
+	return ret;
+}
+
+static void __intel_pcie_remove(struct intel_pcie_port *lpp)
+{
+	intel_pcie_core_irq_disable(lpp);
+	intel_pcie_turn_off(lpp);
+	clk_disable_unprepare(lpp->core_clk);
+	intel_pcie_core_rst_assert(lpp);
+	intel_pcie_deinit_phy(lpp);
+}
+
+static int intel_pcie_remove(struct platform_device *pdev)
+{
+	struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
+	struct pcie_port *pp = &lpp->pci.pp;
+
+	dw_pcie_host_deinit(pp);
+	__intel_pcie_remove(lpp);
+
+	return 0;
+}
+
+static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
+{
+	struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+	int ret;
+
+	intel_pcie_core_irq_disable(lpp);
+	ret = intel_pcie_wait_l2(lpp);
+	if (ret)
+		return ret;
+
+	intel_pcie_deinit_phy(lpp);
+	clk_disable_unprepare(lpp->core_clk);
+	return ret;
+}
+
+static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
+{
+	struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+
+	return intel_pcie_host_setup(lpp);
+}
+
+static int intel_pcie_rc_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev);
+
+	return intel_pcie_host_setup(lpp);
+}
+
+/*
+ * Dummy function so that DW core doesn't configure MSI
+ */
+static int intel_pcie_msi_init(struct pcie_port *pp)
+{
+	return 0;
+}
+
+u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
+{
+	return cpu_addr + BUS_IATU_OFFSET;
+}
+
+static const struct dw_pcie_ops intel_pcie_ops = {
+	.cpu_addr_fixup = intel_pcie_cpu_addr,
+};
+
+static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
+	.host_init =		intel_pcie_rc_init,
+	.msi_host_init =	intel_pcie_msi_init,
+};
+
+static const struct intel_pcie_soc pcie_data = {
+	.pcie_ver =		0x520A,
+	.pcie_atu_offset =	0xC0000,
+	.num_viewport =		3,
+};
+
+static int intel_pcie_probe(struct platform_device *pdev)
+{
+	const struct intel_pcie_soc *data;
+	struct device *dev = &pdev->dev;
+	struct intel_pcie_port *lpp;
+	struct pcie_port *pp;
+	struct dw_pcie *pci;
+	int ret;
+
+	lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL);
+	if (!lpp)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, lpp);
+	pci = &lpp->pci;
+	pci->dev = dev;
+	pp = &pci->pp;
+
+	ret = intel_pcie_get_resources(pdev);
+	if (ret)
+		return ret;
+
+	ret = intel_pcie_ep_rst_init(lpp);
+	if (ret)
+		return ret;
+
+	data = device_get_match_data(dev);
+	if (!data)
+		return -ENODEV;
+
+	pci->ops = &intel_pcie_ops;
+	pci->version = data->pcie_ver;
+	pci->atu_base = pci->dbi_base + data->pcie_atu_offset;
+	pp->ops = &intel_pcie_dw_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "Cannot initialize host\n");
+		return ret;
+	}
+
+	/*
+	 * Intel PCIe doesn't configure IO region, so set viewport
+	 * to not perform IO region access.
+	 */
+	pci->num_viewport = data->num_viewport;
+
+	return 0;
+}
+
+static const struct dev_pm_ops intel_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+				      intel_pcie_resume_noirq)
+};
+
+static const struct of_device_id of_intel_pcie_match[] = {
+	{ .compatible = "intel,lgm-pcie", .data = &pcie_data },
+	{}
+};
+
+static struct platform_driver intel_pcie_driver = {
+	.probe = intel_pcie_probe,
+	.remove = intel_pcie_remove,
+	.driver = {
+		.name = "intel-gw-pcie",
+		.of_match_table = of_intel_pcie_match,
+		.pm = &intel_pcie_pm_ops,
+	},
+};
+builtin_platform_driver(intel_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 7e58174..5ea527a 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -54,6 +54,7 @@
 #define PCIE20_PARF_LTSSM			0x1B0
 #define PCIE20_PARF_SID_OFFSET			0x234
 #define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
+#define PCIE20_PARF_DEVICE_TYPE			0x1000
 
 #define PCIE20_ELBI_SYS_CTRL			0x04
 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
@@ -80,6 +81,8 @@
 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
 #define SLV_ADDR_SPACE_SZ			0x10000000
 
+#define DEVICE_TYPE_RC				0x4
+
 #define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
 struct qcom_pcie_resources_2_1_0 {
 	struct clk *iface_clk;
@@ -139,12 +142,20 @@ struct qcom_pcie_resources_2_3_3 {
 	struct reset_control *rst[7];
 };
 
+struct qcom_pcie_resources_2_7_0 {
+	struct clk_bulk_data clks[6];
+	struct regulator_bulk_data supplies[2];
+	struct reset_control *pci_reset;
+	struct clk *pipe_clk;
+};
+
 union qcom_pcie_resources {
 	struct qcom_pcie_resources_1_0_0 v1_0_0;
 	struct qcom_pcie_resources_2_1_0 v2_1_0;
 	struct qcom_pcie_resources_2_3_2 v2_3_2;
 	struct qcom_pcie_resources_2_3_3 v2_3_3;
 	struct qcom_pcie_resources_2_4_0 v2_4_0;
+	struct qcom_pcie_resources_2_7_0 v2_7_0;
 };
 
 struct qcom_pcie;
@@ -1068,6 +1079,134 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
 	return ret;
 }
 
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+	if (IS_ERR(res->pci_reset))
+		return PTR_ERR(res->pci_reset);
+
+	res->supplies[0].supply = "vdda";
+	res->supplies[1].supply = "vddpe-3v3";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+				      res->supplies);
+	if (ret)
+		return ret;
+
+	res->clks[0].id = "aux";
+	res->clks[1].id = "cfg";
+	res->clks[2].id = "bus_master";
+	res->clks[3].id = "bus_slave";
+	res->clks[4].id = "slave_q2a";
+	res->clks[5].id = "tbu";
+
+	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+	if (ret < 0)
+		return ret;
+
+	res->pipe_clk = devm_clk_get(dev, "pipe");
+	return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+	if (ret < 0) {
+		dev_err(dev, "cannot enable regulators\n");
+		return ret;
+	}
+
+	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+	if (ret < 0)
+		goto err_disable_regulators;
+
+	ret = reset_control_assert(res->pci_reset);
+	if (ret < 0) {
+		dev_err(dev, "cannot deassert pci reset\n");
+		goto err_disable_clocks;
+	}
+
+	usleep_range(1000, 1500);
+
+	ret = reset_control_deassert(res->pci_reset);
+	if (ret < 0) {
+		dev_err(dev, "cannot deassert pci reset\n");
+		goto err_disable_clocks;
+	}
+
+	ret = clk_prepare_enable(res->pipe_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable pipe clock\n");
+		goto err_disable_clocks;
+	}
+
+	/* configure PCIe to RC mode */
+	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+		val |= BIT(31);
+		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+	}
+
+	return 0;
+err_disable_clocks:
+	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+err_disable_regulators:
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+	return ret;
+}
+
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	return clk_prepare_enable(res->pipe_clk);
+}
+
+static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	clk_disable_unprepare(res->pipe_clk);
+}
+
 static int qcom_pcie_link_up(struct dw_pcie *pci)
 {
 	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
@@ -1167,6 +1306,16 @@ static const struct qcom_pcie_ops ops_2_3_3 = {
 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
 };
 
+/* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_7_0 = {
+	.get_resources = qcom_pcie_get_resources_2_7_0,
+	.init = qcom_pcie_init_2_7_0,
+	.deinit = qcom_pcie_deinit_2_7_0,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+	.post_init = qcom_pcie_post_init_2_7_0,
+	.post_deinit = qcom_pcie_post_deinit_2_7_0,
+};
+
 static const struct dw_pcie_ops dw_pcie_ops = {
 	.link_up = qcom_pcie_link_up,
 };
@@ -1282,6 +1431,7 @@ static const struct of_device_id qcom_pcie_match[] = {
 	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
 	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
 	{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
+	{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
 	{ }
 };
 
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 8fd7bad..a5401a0 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -9,11 +9,11 @@
 #include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
-#include <linux/module.h>
 #include <linux/of_irq.h>
 #include <linux/pci.h>
 #include <linux/phy/phy.h>
@@ -171,12 +171,6 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
 	writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
 }
 
-static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
-{
-	writel(0, priv->base + PCL_RCV_INT);
-	writel(0, priv->base + PCL_RCV_INTX);
-}
-
 static void uniphier_pcie_irq_ack(struct irq_data *d)
 {
 	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
@@ -397,14 +391,6 @@ static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
 	return ret;
 }
 
-static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
-{
-	uniphier_pcie_irq_disable(priv);
-	phy_exit(priv->phy);
-	reset_control_assert(priv->rst);
-	clk_disable_unprepare(priv->clk);
-}
-
 static const struct dw_pcie_ops dw_pcie_ops = {
 	.start_link = uniphier_pcie_establish_link,
 	.stop_link = uniphier_pcie_stop_link,
@@ -456,31 +442,16 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
 	return uniphier_add_pcie_port(priv, pdev);
 }
 
-static int uniphier_pcie_remove(struct platform_device *pdev)
-{
-	struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
-
-	uniphier_pcie_host_disable(priv);
-
-	return 0;
-}
-
 static const struct of_device_id uniphier_pcie_match[] = {
 	{ .compatible = "socionext,uniphier-pcie", },
 	{ /* sentinel */ },
 };
-MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
 
 static struct platform_driver uniphier_pcie_driver = {
 	.probe  = uniphier_pcie_probe,
-	.remove = uniphier_pcie_remove,
 	.driver = {
 		.name = "uniphier-pcie",
 		.of_match_table = uniphier_pcie_match,
 	},
 };
 builtin_platform_driver(uniphier_pcie_driver);
-
-MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
-MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 673a172..0e03cef 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -1406,7 +1406,7 @@ static struct phy *devm_of_phy_optional_get_index(struct device *dev,
 	phy = devm_of_phy_get(dev, np, name);
 	kfree(name);
 
-	if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
+	if (PTR_ERR(phy) == -ENODEV)
 		phy = NULL;
 
 	return phy;
@@ -2499,7 +2499,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
 	.num_ports = 2,
 	.ports = tegra20_pcie_ports,
 	.msi_base_shift = 0,
-	.afi_pex2_ctrl = 0x128,
 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
 	.pads_refclk_cfg0 = 0xfa5cfa5c,
@@ -2528,6 +2527,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
 	.num_ports = 3,
 	.ports = tegra30_pcie_ports,
 	.msi_base_shift = 8,
+	.afi_pex2_ctrl = 0x128,
 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
 	.pads_refclk_cfg0 = 0xfa5cfa5c,
@@ -2798,7 +2798,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
 
 	pm_runtime_enable(pcie->dev);
 	err = pm_runtime_get_sync(pcie->dev);
-	if (err) {
+	if (err < 0) {
 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
 		goto teardown_msi;
 	}
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
new file mode 100644
index 0000000..d20aabc
--- /dev/null
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -0,0 +1,1015 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2009 - 2019 Broadcom */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "../pci.h"
+
+/* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
+#define BRCM_PCIE_CAP_REGS				0x00ac
+
+/* Broadcom STB PCIe Register Offsets */
+#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1				0x0188
+#define  PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK	0xc
+#define  PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN			0x0
+
+#define PCIE_RC_CFG_PRIV1_ID_VAL3			0x043c
+#define  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK	0xffffff
+
+#define PCIE_RC_DL_MDIO_ADDR				0x1100
+#define PCIE_RC_DL_MDIO_WR_DATA				0x1104
+#define PCIE_RC_DL_MDIO_RD_DATA				0x1108
+
+#define PCIE_MISC_MISC_CTRL				0x4008
+#define  PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK		0x1000
+#define  PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK	0x2000
+#define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK	0x300000
+#define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128		0x0
+#define  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK		0xf8000000
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO		0x400c
+#define PCIE_MEM_WIN0_LO(win)	\
+		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 4)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI		0x4010
+#define PCIE_MEM_WIN0_HI(win)	\
+		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 4)
+
+#define PCIE_MISC_RC_BAR1_CONFIG_LO			0x402c
+#define  PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK		0x1f
+
+#define PCIE_MISC_RC_BAR2_CONFIG_LO			0x4034
+#define  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK		0x1f
+#define PCIE_MISC_RC_BAR2_CONFIG_HI			0x4038
+
+#define PCIE_MISC_RC_BAR3_CONFIG_LO			0x403c
+#define  PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK		0x1f
+
+#define PCIE_MISC_MSI_BAR_CONFIG_LO			0x4044
+#define PCIE_MISC_MSI_BAR_CONFIG_HI			0x4048
+
+#define PCIE_MISC_MSI_DATA_CONFIG			0x404c
+#define  PCIE_MISC_MSI_DATA_CONFIG_VAL			0xffe06540
+
+#define PCIE_MISC_PCIE_CTRL				0x4064
+#define  PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK	0x1
+
+#define PCIE_MISC_PCIE_STATUS				0x4068
+#define  PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK		0x80
+#define  PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK	0x20
+#define  PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK	0x10
+#define  PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK	0x40
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT		0x4070
+#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK	0xfff00000
+#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK	0xfff0
+#define PCIE_MEM_WIN0_BASE_LIMIT(win)	\
+		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI			0x4080
+#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK	0xff
+#define PCIE_MEM_WIN0_BASE_HI(win)	\
+		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI			0x4084
+#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK	0xff
+#define PCIE_MEM_WIN0_LIMIT_HI(win)	\
+		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
+
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG					0x4204
+#define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK	0x2
+#define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x08000000
+
+#define PCIE_MSI_INTR2_STATUS				0x4500
+#define PCIE_MSI_INTR2_CLR				0x4508
+#define PCIE_MSI_INTR2_MASK_SET				0x4510
+#define PCIE_MSI_INTR2_MASK_CLR				0x4514
+
+#define PCIE_EXT_CFG_DATA				0x8000
+
+#define PCIE_EXT_CFG_INDEX				0x9000
+#define  PCIE_EXT_BUSNUM_SHIFT				20
+#define  PCIE_EXT_SLOT_SHIFT				15
+#define  PCIE_EXT_FUNC_SHIFT				12
+
+#define PCIE_RGR1_SW_INIT_1				0x9210
+#define  PCIE_RGR1_SW_INIT_1_PERST_MASK			0x1
+#define  PCIE_RGR1_SW_INIT_1_INIT_MASK			0x2
+
+/* PCIe parameters */
+#define BRCM_NUM_PCIE_OUT_WINS		0x4
+#define BRCM_INT_PCI_MSI_NR		32
+
+/* MSI target adresses */
+#define BRCM_MSI_TARGET_ADDR_LT_4GB	0x0fffffffcULL
+#define BRCM_MSI_TARGET_ADDR_GT_4GB	0xffffffffcULL
+
+/* MDIO registers */
+#define MDIO_PORT0			0x0
+#define MDIO_DATA_MASK			0x7fffffff
+#define MDIO_PORT_MASK			0xf0000
+#define MDIO_REGAD_MASK			0xffff
+#define MDIO_CMD_MASK			0xfff00000
+#define MDIO_CMD_READ			0x1
+#define MDIO_CMD_WRITE			0x0
+#define MDIO_DATA_DONE_MASK		0x80000000
+#define MDIO_RD_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
+#define MDIO_WT_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
+#define SSC_REGS_ADDR			0x1100
+#define SET_ADDR_OFFSET			0x1f
+#define SSC_CNTL_OFFSET			0x2
+#define SSC_CNTL_OVRD_EN_MASK		0x8000
+#define SSC_CNTL_OVRD_VAL_MASK		0x4000
+#define SSC_STATUS_OFFSET		0x1
+#define SSC_STATUS_SSC_MASK		0x400
+#define SSC_STATUS_PLL_LOCK_MASK	0x800
+
+struct brcm_msi {
+	struct device		*dev;
+	void __iomem		*base;
+	struct device_node	*np;
+	struct irq_domain	*msi_domain;
+	struct irq_domain	*inner_domain;
+	struct mutex		lock; /* guards the alloc/free operations */
+	u64			target_addr;
+	int			irq;
+	/* used indicates which MSI interrupts have been alloc'd */
+	unsigned long		used;
+};
+
+/* Internal PCIe Host Controller Information.*/
+struct brcm_pcie {
+	struct device		*dev;
+	void __iomem		*base;
+	struct clk		*clk;
+	struct pci_bus		*root_bus;
+	struct device_node	*np;
+	bool			ssc;
+	int			gen;
+	u64			msi_target_addr;
+	struct brcm_msi		*msi;
+};
+
+/*
+ * This is to convert the size of the inbound "BAR" region to the
+ * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
+ */
+static int brcm_pcie_encode_ibar_size(u64 size)
+{
+	int log2_in = ilog2(size);
+
+	if (log2_in >= 12 && log2_in <= 15)
+		/* Covers 4KB to 32KB (inclusive) */
+		return (log2_in - 12) + 0x1c;
+	else if (log2_in >= 16 && log2_in <= 35)
+		/* Covers 64KB to 32GB, (inclusive) */
+		return log2_in - 15;
+	/* Something is awry so disable */
+	return 0;
+}
+
+static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
+{
+	u32 pkt = 0;
+
+	pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
+	pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
+	pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
+
+	return pkt;
+}
+
+/* negative return value indicates error */
+static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
+{
+	int tries;
+	u32 data;
+
+	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
+		   base + PCIE_RC_DL_MDIO_ADDR);
+	readl(base + PCIE_RC_DL_MDIO_ADDR);
+
+	data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
+	for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
+		udelay(10);
+		data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
+	}
+
+	*val = FIELD_GET(MDIO_DATA_MASK, data);
+	return MDIO_RD_DONE(data) ? 0 : -EIO;
+}
+
+/* negative return value indicates error */
+static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
+				u8 regad, u16 wrdata)
+{
+	int tries;
+	u32 data;
+
+	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
+		   base + PCIE_RC_DL_MDIO_ADDR);
+	readl(base + PCIE_RC_DL_MDIO_ADDR);
+	writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
+
+	data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
+	for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
+		udelay(10);
+		data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
+	}
+
+	return MDIO_WT_DONE(data) ? 0 : -EIO;
+}
+
+/*
+ * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
+ * return value indicates error.
+ */
+static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
+{
+	int pll, ssc;
+	int ret;
+	u32 tmp;
+
+	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
+				   SSC_REGS_ADDR);
+	if (ret < 0)
+		return ret;
+
+	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
+				  SSC_CNTL_OFFSET, &tmp);
+	if (ret < 0)
+		return ret;
+
+	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
+	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
+	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
+				   SSC_CNTL_OFFSET, tmp);
+	if (ret < 0)
+		return ret;
+
+	usleep_range(1000, 2000);
+	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
+				  SSC_STATUS_OFFSET, &tmp);
+	if (ret < 0)
+		return ret;
+
+	ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
+	pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
+
+	return ssc && pll ? 0 : -EIO;
+}
+
+/* Limits operation to a specific generation (1, 2, or 3) */
+static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
+{
+	u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
+	u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+
+	lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
+	writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+
+	lnkctl2 = (lnkctl2 & ~0xf) | gen;
+	writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
+}
+
+static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
+				       unsigned int win, u64 cpu_addr,
+				       u64 pcie_addr, u64 size)
+{
+	u32 cpu_addr_mb_high, limit_addr_mb_high;
+	phys_addr_t cpu_addr_mb, limit_addr_mb;
+	int high_addr_shift;
+	u32 tmp;
+
+	/* Set the base of the pcie_addr window */
+	writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
+	writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
+
+	/* Write the addr base & limit lower bits (in MBs) */
+	cpu_addr_mb = cpu_addr / SZ_1M;
+	limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
+
+	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
+	u32p_replace_bits(&tmp, cpu_addr_mb,
+			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
+	u32p_replace_bits(&tmp, limit_addr_mb,
+			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
+	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
+
+	/* Write the cpu & limit addr upper bits */
+	high_addr_shift =
+		HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
+
+	cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
+	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
+	u32p_replace_bits(&tmp, cpu_addr_mb_high,
+			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
+	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
+
+	limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
+	tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
+	u32p_replace_bits(&tmp, limit_addr_mb_high,
+			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
+	writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
+}
+
+static struct irq_chip brcm_msi_irq_chip = {
+	.name            = "BRCM STB PCIe MSI",
+	.irq_ack         = irq_chip_ack_parent,
+	.irq_mask        = pci_msi_mask_irq,
+	.irq_unmask      = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info brcm_msi_domain_info = {
+	/* Multi MSI is supported by the controller, but not by this driver */
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.chip	= &brcm_msi_irq_chip,
+};
+
+static void brcm_pcie_msi_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long status, virq;
+	struct brcm_msi *msi;
+	struct device *dev;
+	u32 bit;
+
+	chained_irq_enter(chip, desc);
+	msi = irq_desc_get_handler_data(desc);
+	dev = msi->dev;
+
+	status = readl(msi->base + PCIE_MSI_INTR2_STATUS);
+	for_each_set_bit(bit, &status, BRCM_INT_PCI_MSI_NR) {
+		virq = irq_find_mapping(msi->inner_domain, bit);
+		if (virq)
+			generic_handle_irq(virq);
+		else
+			dev_dbg(dev, "unexpected MSI\n");
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
+
+	msg->address_lo = lower_32_bits(msi->target_addr);
+	msg->address_hi = upper_32_bits(msi->target_addr);
+	msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL) | data->hwirq;
+}
+
+static int brcm_msi_set_affinity(struct irq_data *irq_data,
+				 const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static void brcm_msi_ack_irq(struct irq_data *data)
+{
+	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
+
+	writel(1 << data->hwirq, msi->base + PCIE_MSI_INTR2_CLR);
+}
+
+
+static struct irq_chip brcm_msi_bottom_irq_chip = {
+	.name			= "BRCM STB MSI",
+	.irq_compose_msi_msg	= brcm_msi_compose_msi_msg,
+	.irq_set_affinity	= brcm_msi_set_affinity,
+	.irq_ack                = brcm_msi_ack_irq,
+};
+
+static int brcm_msi_alloc(struct brcm_msi *msi)
+{
+	int hwirq;
+
+	mutex_lock(&msi->lock);
+	hwirq = bitmap_find_free_region(&msi->used, BRCM_INT_PCI_MSI_NR, 0);
+	mutex_unlock(&msi->lock);
+
+	return hwirq;
+}
+
+static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
+{
+	mutex_lock(&msi->lock);
+	bitmap_release_region(&msi->used, hwirq, 0);
+	mutex_unlock(&msi->lock);
+}
+
+static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				 unsigned int nr_irqs, void *args)
+{
+	struct brcm_msi *msi = domain->host_data;
+	int hwirq;
+
+	hwirq = brcm_msi_alloc(msi);
+
+	if (hwirq < 0)
+		return hwirq;
+
+	irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
+			    &brcm_msi_bottom_irq_chip, domain->host_data,
+			    handle_edge_irq, NULL, NULL);
+	return 0;
+}
+
+static void brcm_irq_domain_free(struct irq_domain *domain,
+				 unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
+
+	brcm_msi_free(msi, d->hwirq);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc	= brcm_irq_domain_alloc,
+	.free	= brcm_irq_domain_free,
+};
+
+static int brcm_allocate_domains(struct brcm_msi *msi)
+{
+	struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
+	struct device *dev = msi->dev;
+
+	msi->inner_domain = irq_domain_add_linear(NULL, BRCM_INT_PCI_MSI_NR,
+						  &msi_domain_ops, msi);
+	if (!msi->inner_domain) {
+		dev_err(dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+						    &brcm_msi_domain_info,
+						    msi->inner_domain);
+	if (!msi->msi_domain) {
+		dev_err(dev, "failed to create MSI domain\n");
+		irq_domain_remove(msi->inner_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void brcm_free_domains(struct brcm_msi *msi)
+{
+	irq_domain_remove(msi->msi_domain);
+	irq_domain_remove(msi->inner_domain);
+}
+
+static void brcm_msi_remove(struct brcm_pcie *pcie)
+{
+	struct brcm_msi *msi = pcie->msi;
+
+	if (!msi)
+		return;
+	irq_set_chained_handler(msi->irq, NULL);
+	irq_set_handler_data(msi->irq, NULL);
+	brcm_free_domains(msi);
+}
+
+static void brcm_msi_set_regs(struct brcm_msi *msi)
+{
+	writel(0xffffffff, msi->base + PCIE_MSI_INTR2_MASK_CLR);
+
+	/*
+	 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
+	 * enable, which we set to 1.
+	 */
+	writel(lower_32_bits(msi->target_addr) | 0x1,
+	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
+	writel(upper_32_bits(msi->target_addr),
+	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
+
+	writel(PCIE_MISC_MSI_DATA_CONFIG_VAL,
+	       msi->base + PCIE_MISC_MSI_DATA_CONFIG);
+}
+
+static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
+{
+	struct brcm_msi *msi;
+	int irq, ret;
+	struct device *dev = pcie->dev;
+
+	irq = irq_of_parse_and_map(dev->of_node, 1);
+	if (irq <= 0) {
+		dev_err(dev, "cannot map MSI interrupt\n");
+		return -ENODEV;
+	}
+
+	msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
+	if (!msi)
+		return -ENOMEM;
+
+	mutex_init(&msi->lock);
+	msi->dev = dev;
+	msi->base = pcie->base;
+	msi->np = pcie->np;
+	msi->target_addr = pcie->msi_target_addr;
+	msi->irq = irq;
+
+	ret = brcm_allocate_domains(msi);
+	if (ret)
+		return ret;
+
+	irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
+
+	brcm_msi_set_regs(msi);
+	pcie->msi = msi;
+
+	return 0;
+}
+
+/* The controller is capable of serving in both RC and EP roles */
+static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
+{
+	void __iomem *base = pcie->base;
+	u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
+
+	return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
+}
+
+static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
+{
+	u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
+	u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
+	u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
+
+	return dla && plu;
+}
+
+/* Configuration space read/write support */
+static inline int brcm_pcie_cfg_index(int busnr, int devfn, int reg)
+{
+	return ((PCI_SLOT(devfn) & 0x1f) << PCIE_EXT_SLOT_SHIFT)
+		| ((PCI_FUNC(devfn) & 0x07) << PCIE_EXT_FUNC_SHIFT)
+		| (busnr << PCIE_EXT_BUSNUM_SHIFT)
+		| (reg & ~3);
+}
+
+static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
+					int where)
+{
+	struct brcm_pcie *pcie = bus->sysdata;
+	void __iomem *base = pcie->base;
+	int idx;
+
+	/* Accesses to the RC go right to the RC registers if slot==0 */
+	if (pci_is_root_bus(bus))
+		return PCI_SLOT(devfn) ? NULL : base + where;
+
+	/* For devices, write to the config space index register */
+	idx = brcm_pcie_cfg_index(bus->number, devfn, 0);
+	writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
+	return base + PCIE_EXT_CFG_DATA + where;
+}
+
+static struct pci_ops brcm_pcie_ops = {
+	.map_bus = brcm_pcie_map_conf,
+	.read = pci_generic_config_read,
+	.write = pci_generic_config_write,
+};
+
+static inline void brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val)
+{
+	u32 tmp;
+
+	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
+	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_INIT_MASK);
+	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
+}
+
+static inline void brcm_pcie_perst_set(struct brcm_pcie *pcie, u32 val)
+{
+	u32 tmp;
+
+	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
+	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
+	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
+}
+
+static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
+							u64 *rc_bar2_size,
+							u64 *rc_bar2_offset)
+{
+	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+	struct device *dev = pcie->dev;
+	struct resource_entry *entry;
+
+	entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM);
+	if (!entry)
+		return -ENODEV;
+
+
+	/*
+	 * The controller expects the inbound window offset to be calculated as
+	 * the difference between PCIe's address space and CPU's. The offset
+	 * provided by the firmware is calculated the opposite way, so we
+	 * negate it.
+	 */
+	*rc_bar2_offset = -entry->offset;
+	*rc_bar2_size = 1ULL << fls64(entry->res->end - entry->res->start);
+
+	/*
+	 * We validate the inbound memory view even though we should trust
+	 * whatever the device-tree provides. This is because of an HW issue on
+	 * early Raspberry Pi 4's revisions (bcm2711). It turns out its
+	 * firmware has to dynamically edit dma-ranges due to a bug on the
+	 * PCIe controller integration, which prohibits any access above the
+	 * lower 3GB of memory. Given this, we decided to keep the dma-ranges
+	 * in check, avoiding hard to debug device-tree related issues in the
+	 * future:
+	 *
+	 * The PCIe host controller by design must set the inbound viewport to
+	 * be a contiguous arrangement of all of the system's memory.  In
+	 * addition, its size mut be a power of two.  To further complicate
+	 * matters, the viewport must start on a pcie-address that is aligned
+	 * on a multiple of its size.  If a portion of the viewport does not
+	 * represent system memory -- e.g. 3GB of memory requires a 4GB
+	 * viewport -- we can map the outbound memory in or after 3GB and even
+	 * though the viewport will overlap the outbound memory the controller
+	 * will know to send outbound memory downstream and everything else
+	 * upstream.
+	 *
+	 * For example:
+	 *
+	 * - The best-case scenario, memory up to 3GB, is to place the inbound
+	 *   region in the first 4GB of pcie-space, as some legacy devices can
+	 *   only address 32bits. We would also like to put the MSI under 4GB
+	 *   as well, since some devices require a 32bit MSI target address.
+	 *
+	 * - If the system memory is 4GB or larger we cannot start the inbound
+	 *   region at location 0 (since we have to allow some space for
+	 *   outbound memory @ 3GB). So instead it will  start at the 1x
+	 *   multiple of its size
+	 */
+	if (!*rc_bar2_size || *rc_bar2_offset % *rc_bar2_size ||
+	    (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
+		dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
+			*rc_bar2_size, *rc_bar2_offset);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int brcm_pcie_setup(struct brcm_pcie *pcie)
+{
+	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+	u64 rc_bar2_offset, rc_bar2_size;
+	void __iomem *base = pcie->base;
+	struct device *dev = pcie->dev;
+	struct resource_entry *entry;
+	unsigned int scb_size_val;
+	bool ssc_good = false;
+	struct resource *res;
+	int num_out_wins = 0;
+	u16 nlw, cls, lnksta;
+	int i, ret;
+	u32 tmp;
+
+	/* Reset the bridge */
+	brcm_pcie_bridge_sw_init_set(pcie, 1);
+
+	usleep_range(100, 200);
+
+	/* Take the bridge out of reset */
+	brcm_pcie_bridge_sw_init_set(pcie, 0);
+
+	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+	tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
+	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+	/* Wait for SerDes to be stable */
+	usleep_range(100, 200);
+
+	/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
+	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
+	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
+	u32p_replace_bits(&tmp, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128,
+			  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
+	writel(tmp, base + PCIE_MISC_MISC_CTRL);
+
+	ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
+						    &rc_bar2_offset);
+	if (ret)
+		return ret;
+
+	tmp = lower_32_bits(rc_bar2_offset);
+	u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
+			  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
+	writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
+	writel(upper_32_bits(rc_bar2_offset),
+	       base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+
+	scb_size_val = rc_bar2_size ?
+		       ilog2(rc_bar2_size) - 15 : 0xf; /* 0xf is 1GB */
+	tmp = readl(base + PCIE_MISC_MISC_CTRL);
+	u32p_replace_bits(&tmp, scb_size_val,
+			  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK);
+	writel(tmp, base + PCIE_MISC_MISC_CTRL);
+
+	/*
+	 * We ideally want the MSI target address to be located in the 32bit
+	 * addressable memory area. Some devices might depend on it. This is
+	 * possible either when the inbound window is located above the lower
+	 * 4GB or when the inbound area is smaller than 4GB (taking into
+	 * account the rounding-up we're forced to perform).
+	 */
+	if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
+	else
+		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
+
+	/* disable the PCIe->GISB memory window (RC_BAR1) */
+	tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
+	tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
+	writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
+
+	/* disable the PCIe->SCB memory window (RC_BAR3) */
+	tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
+	tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
+	writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
+
+	/* Mask all interrupts since we are not handling any yet */
+	writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_MASK_SET);
+
+	/* clear any interrupts we find on boot */
+	writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_CLR);
+
+	if (pcie->gen)
+		brcm_pcie_set_gen(pcie, pcie->gen);
+
+	/* Unassert the fundamental reset */
+	brcm_pcie_perst_set(pcie, 0);
+
+	/*
+	 * Give the RC/EP time to wake up, before trying to configure RC.
+	 * Intermittently check status for link-up, up to a total of 100ms.
+	 */
+	for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
+		msleep(5);
+
+	if (!brcm_pcie_link_up(pcie)) {
+		dev_err(dev, "link down\n");
+		return -ENODEV;
+	}
+
+	if (!brcm_pcie_rc_mode(pcie)) {
+		dev_err(dev, "PCIe misconfigured; is in EP mode\n");
+		return -EINVAL;
+	}
+
+	resource_list_for_each_entry(entry, &bridge->windows) {
+		res = entry->res;
+
+		if (resource_type(res) != IORESOURCE_MEM)
+			continue;
+
+		if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
+			dev_err(pcie->dev, "too many outbound wins\n");
+			return -EINVAL;
+		}
+
+		brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
+					   res->start - entry->offset,
+					   resource_size(res));
+		num_out_wins++;
+	}
+
+	/*
+	 * For config space accesses on the RC, show the right class for
+	 * a PCIe-PCIe bridge (the default setting is to be EP mode).
+	 */
+	tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+	u32p_replace_bits(&tmp, 0x060400,
+			  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+	writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+
+	if (pcie->ssc) {
+		ret = brcm_pcie_set_ssc(pcie);
+		if (ret == 0)
+			ssc_good = true;
+		else
+			dev_err(dev, "failed attempt to enter ssc mode\n");
+	}
+
+	lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
+	cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
+	nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+	dev_info(dev, "link up, %s x%u %s\n",
+		 PCIE_SPEED2STR(cls + PCI_SPEED_133MHz_PCIX_533),
+		 nlw, ssc_good ? "(SSC)" : "(!SSC)");
+
+	/* PCIe->SCB endian mode for BAR */
+	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
+	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+
+	/*
+	 * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
+	 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
+	 */
+	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+	tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
+	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+	return 0;
+}
+
+/* L23 is a low-power PCIe link state */
+static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
+{
+	void __iomem *base = pcie->base;
+	int l23, i;
+	u32 tmp;
+
+	/* Assert request for L23 */
+	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
+	u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
+	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
+
+	/* Wait up to 36 msec for L23 */
+	tmp = readl(base + PCIE_MISC_PCIE_STATUS);
+	l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
+	for (i = 0; i < 15 && !l23; i++) {
+		usleep_range(2000, 2400);
+		tmp = readl(base + PCIE_MISC_PCIE_STATUS);
+		l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
+				tmp);
+	}
+
+	if (!l23)
+		dev_err(pcie->dev, "failed to enter low-power link state\n");
+}
+
+static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+{
+	void __iomem *base = pcie->base;
+	int tmp;
+
+	if (brcm_pcie_link_up(pcie))
+		brcm_pcie_enter_l23(pcie);
+	/* Assert fundamental reset */
+	brcm_pcie_perst_set(pcie, 1);
+
+	/* Deassert request for L23 in case it was asserted */
+	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
+	u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
+	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
+
+	/* Turn off SerDes */
+	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+	u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
+	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+	/* Shutdown PCIe bridge */
+	brcm_pcie_bridge_sw_init_set(pcie, 1);
+}
+
+static void __brcm_pcie_remove(struct brcm_pcie *pcie)
+{
+	brcm_msi_remove(pcie);
+	brcm_pcie_turn_off(pcie);
+	clk_disable_unprepare(pcie->clk);
+	clk_put(pcie->clk);
+}
+
+static int brcm_pcie_remove(struct platform_device *pdev)
+{
+	struct brcm_pcie *pcie = platform_get_drvdata(pdev);
+
+	pci_stop_root_bus(pcie->root_bus);
+	pci_remove_root_bus(pcie->root_bus);
+	__brcm_pcie_remove(pcie);
+
+	return 0;
+}
+
+static int brcm_pcie_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node, *msi_np;
+	struct pci_host_bridge *bridge;
+	struct brcm_pcie *pcie;
+	struct pci_bus *child;
+	struct resource *res;
+	int ret;
+
+	bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
+	if (!bridge)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(bridge);
+	pcie->dev = &pdev->dev;
+	pcie->np = np;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pcie->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pcie->base))
+		return PTR_ERR(pcie->base);
+
+	pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
+	if (IS_ERR(pcie->clk))
+		return PTR_ERR(pcie->clk);
+
+	ret = of_pci_get_max_link_speed(np);
+	pcie->gen = (ret < 0) ? 0 : ret;
+
+	pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
+
+	ret = pci_parse_request_of_pci_ranges(pcie->dev, &bridge->windows,
+					      &bridge->dma_ranges, NULL);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(pcie->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "could not enable clock\n");
+		return ret;
+	}
+
+	ret = brcm_pcie_setup(pcie);
+	if (ret)
+		goto fail;
+
+	msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+	if (pci_msi_enabled() && msi_np == pcie->np) {
+		ret = brcm_pcie_enable_msi(pcie);
+		if (ret) {
+			dev_err(pcie->dev, "probe of internal MSI failed");
+			goto fail;
+		}
+	}
+
+	bridge->dev.parent = &pdev->dev;
+	bridge->busnr = 0;
+	bridge->ops = &brcm_pcie_ops;
+	bridge->sysdata = pcie;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0) {
+		dev_err(pcie->dev, "Scanning root bridge failed\n");
+		goto fail;
+	}
+
+	pci_assign_unassigned_bus_resources(bridge->bus);
+	list_for_each_entry(child, &bridge->bus->children, node)
+		pcie_bus_configure_settings(child);
+	pci_bus_add_devices(bridge->bus);
+	platform_set_drvdata(pdev, pcie);
+	pcie->root_bus = bridge->bus;
+
+	return 0;
+fail:
+	__brcm_pcie_remove(pcie);
+	return ret;
+}
+
+static const struct of_device_id brcm_pcie_match[] = {
+	{ .compatible = "brcm,bcm2711-pcie" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, brcm_pcie_match);
+
+static struct platform_driver brcm_pcie_driver = {
+	.probe = brcm_pcie_probe,
+	.remove = brcm_pcie_remove,
+	.driver = {
+		.name = "brcm-pcie",
+		.of_match_table = brcm_pcie_match,
+	},
+};
+module_platform_driver(brcm_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
+MODULE_AUTHOR("Broadcom");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 0a468c7..8c7f875 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -1588,6 +1588,30 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
 			quirk_paxc_disable_msi_parsing);
 
+static void quirk_paxc_bridge(struct pci_dev *pdev)
+{
+	/*
+	 * The PCI config space is shared with the PAXC root port and the first
+	 * Ethernet device.  So, we need to workaround this by telling the PCI
+	 * code that the bridge is not an Ethernet device.
+	 */
+	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+		pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
+
+	/*
+	 * MPSS is not being set properly (as it is currently 0).  This is
+	 * because that area of the PCI config space is hard coded to zero, and
+	 * is not modifiable by firmware.  Set this to 2 (e.g., 512 byte MPS)
+	 * so that the MPS can be set to the real max value.
+	 */
+	pdev->pcie_mpss = 2;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
+
 MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
 MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 2128422..dac91d6 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -98,9 +98,6 @@ struct vmd_dev {
 	struct irq_domain	*irq_domain;
 	struct pci_bus		*bus;
 	u8			busn_start;
-
-	struct dma_map_ops	dma_ops;
-	struct dma_domain	dma_domain;
 };
 
 static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
@@ -295,151 +292,6 @@ static struct msi_domain_info vmd_msi_domain_info = {
 	.chip		= &vmd_msi_controller,
 };
 
-/*
- * VMD replaces the requester ID with its own.  DMA mappings for devices in a
- * VMD domain need to be mapped for the VMD, not the device requiring
- * the mapping.
- */
-static struct device *to_vmd_dev(struct device *dev)
-{
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
-
-	return &vmd->dev->dev;
-}
-
-static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
-		       gfp_t flag, unsigned long attrs)
-{
-	return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
-}
-
-static void vmd_free(struct device *dev, size_t size, void *vaddr,
-		     dma_addr_t addr, unsigned long attrs)
-{
-	return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
-}
-
-static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
-		    void *cpu_addr, dma_addr_t addr, size_t size,
-		    unsigned long attrs)
-{
-	return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
-			attrs);
-}
-
-static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
-			   void *cpu_addr, dma_addr_t addr, size_t size,
-			   unsigned long attrs)
-{
-	return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
-			attrs);
-}
-
-static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
-			       unsigned long offset, size_t size,
-			       enum dma_data_direction dir,
-			       unsigned long attrs)
-{
-	return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
-			attrs);
-}
-
-static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
-			   enum dma_data_direction dir, unsigned long attrs)
-{
-	dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
-}
-
-static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-		      enum dma_data_direction dir, unsigned long attrs)
-{
-	return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
-}
-
-static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-			 enum dma_data_direction dir, unsigned long attrs)
-{
-	dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
-}
-
-static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
-				    size_t size, enum dma_data_direction dir)
-{
-	dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
-}
-
-static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
-				       size_t size, enum dma_data_direction dir)
-{
-	dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
-}
-
-static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-				int nents, enum dma_data_direction dir)
-{
-	dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
-}
-
-static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-				   int nents, enum dma_data_direction dir)
-{
-	dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
-}
-
-static int vmd_dma_supported(struct device *dev, u64 mask)
-{
-	return dma_supported(to_vmd_dev(dev), mask);
-}
-
-static u64 vmd_get_required_mask(struct device *dev)
-{
-	return dma_get_required_mask(to_vmd_dev(dev));
-}
-
-static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
-{
-	struct dma_domain *domain = &vmd->dma_domain;
-
-	if (get_dma_ops(&vmd->dev->dev))
-		del_dma_domain(domain);
-}
-
-#define ASSIGN_VMD_DMA_OPS(source, dest, fn)	\
-	do {					\
-		if (source->fn)			\
-			dest->fn = vmd_##fn;	\
-	} while (0)
-
-static void vmd_setup_dma_ops(struct vmd_dev *vmd)
-{
-	const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
-	struct dma_map_ops *dest = &vmd->dma_ops;
-	struct dma_domain *domain = &vmd->dma_domain;
-
-	domain->domain_nr = vmd->sysdata.domain;
-	domain->dma_ops = dest;
-
-	if (!source)
-		return;
-	ASSIGN_VMD_DMA_OPS(source, dest, alloc);
-	ASSIGN_VMD_DMA_OPS(source, dest, free);
-	ASSIGN_VMD_DMA_OPS(source, dest, mmap);
-	ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
-	ASSIGN_VMD_DMA_OPS(source, dest, map_page);
-	ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
-	ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
-	ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
-	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
-	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
-	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
-	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
-	ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
-	ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
-	add_dma_domain(domain);
-}
-#undef ASSIGN_VMD_DMA_OPS
-
 static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
 				  unsigned int devfn, int reg, int len)
 {
@@ -679,7 +531,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 		.parent = res,
 	};
 
-	sd->vmd_domain = true;
+	sd->vmd_dev = vmd->dev;
 	sd->domain = vmd_find_free_domain();
 	if (sd->domain < 0)
 		return sd->domain;
@@ -709,7 +561,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 	}
 
 	vmd_attach_resources(vmd);
-	vmd_setup_dma_ops(vmd);
 	dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
 
 	pci_scan_child_bus(vmd->bus);
@@ -824,7 +675,6 @@ static void vmd_remove(struct pci_dev *dev)
 	pci_stop_root_bus(vmd->bus);
 	pci_remove_root_bus(vmd->bus);
 	vmd_cleanup_srcu(vmd);
-	vmd_teardown_dma_ops(vmd);
 	vmd_detach_resources(vmd);
 	irq_domain_remove(vmd->irq_domain);
 }
@@ -868,6 +718,10 @@ static const struct pci_device_id vmd_ids[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
 		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
 				VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
+		.driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
+		.driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
 		.driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
 	{0,}
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index d7b2b47..0456516 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -18,6 +18,9 @@
 #define DRIVER_AUTHOR	"Gavin Shan, IBM Corporation"
 #define DRIVER_DESC	"PowerPC PowerNV PCI Hotplug Driver"
 
+#define SLOT_WARN(sl, x...) \
+	((sl)->pdev ? pci_warn((sl)->pdev, x) : dev_warn(&(sl)->bus->dev, x))
+
 struct pnv_php_event {
 	bool			added;
 	struct pnv_php_slot	*php_slot;
@@ -151,17 +154,11 @@ static void pnv_php_rmv_pdns(struct device_node *dn)
 static void pnv_php_detach_device_nodes(struct device_node *parent)
 {
 	struct device_node *dn;
-	int refcount;
 
 	for_each_child_of_node(parent, dn) {
 		pnv_php_detach_device_nodes(dn);
 
 		of_node_put(dn);
-		refcount = kref_read(&dn->kobj.kref);
-		if (refcount != 1)
-			pr_warn("Invalid refcount %d on <%pOF>\n",
-				refcount, dn);
-
 		of_detach_node(dn);
 	}
 }
@@ -271,7 +268,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
 
 	ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
 	if (ret) {
-		pci_warn(php_slot->pdev, "Error %d getting FDT blob\n", ret);
+		SLOT_WARN(php_slot, "Error %d getting FDT blob\n", ret);
 		goto free_fdt1;
 	}
 
@@ -285,7 +282,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
 	dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
 	if (!dt) {
 		ret = -EINVAL;
-		pci_warn(php_slot->pdev, "Cannot unflatten FDT\n");
+		SLOT_WARN(php_slot, "Cannot unflatten FDT\n");
 		goto free_fdt;
 	}
 
@@ -295,15 +292,15 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
 	ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
 	if (ret) {
 		pnv_php_reverse_nodes(php_slot->dn);
-		pci_warn(php_slot->pdev, "Error %d populating changeset\n",
-			 ret);
+		SLOT_WARN(php_slot, "Error %d populating changeset\n",
+			  ret);
 		goto free_dt;
 	}
 
 	php_slot->dn->child = NULL;
 	ret = of_changeset_apply(&php_slot->ocs);
 	if (ret) {
-		pci_warn(php_slot->pdev, "Error %d applying changeset\n", ret);
+		SLOT_WARN(php_slot, "Error %d applying changeset\n", ret);
 		goto destroy_changeset;
 	}
 
@@ -342,18 +339,19 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
 	ret = pnv_pci_set_power_state(php_slot->id, state, &msg);
 	if (ret > 0) {
 		if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle	||
-		    be64_to_cpu(msg.params[2]) != state			||
-		    be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
-			pci_warn(php_slot->pdev, "Wrong msg (%lld, %lld, %lld)\n",
-				 be64_to_cpu(msg.params[1]),
-				 be64_to_cpu(msg.params[2]),
-				 be64_to_cpu(msg.params[3]));
+		    be64_to_cpu(msg.params[2]) != state) {
+			SLOT_WARN(php_slot, "Wrong msg (%lld, %lld, %lld)\n",
+				  be64_to_cpu(msg.params[1]),
+				  be64_to_cpu(msg.params[2]),
+				  be64_to_cpu(msg.params[3]));
 			return -ENOMSG;
 		}
+		if (be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
+			ret = -ENODEV;
+			goto error;
+		}
 	} else if (ret < 0) {
-		pci_warn(php_slot->pdev, "Error %d powering %s\n",
-			 ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
-		return ret;
+		goto error;
 	}
 
 	if (state == OPAL_PCI_SLOT_POWER_OFF || state == OPAL_PCI_SLOT_OFFLINE)
@@ -362,6 +360,11 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
 		ret = pnv_php_add_devtree(php_slot);
 
 	return ret;
+
+error:
+	SLOT_WARN(php_slot, "Error %d powering %s\n",
+		  ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
+	return ret;
 }
 EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state);
 
@@ -378,8 +381,8 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
 	 */
 	ret = pnv_pci_get_power_state(php_slot->id, &power_state);
 	if (ret) {
-		pci_warn(php_slot->pdev, "Error %d getting power status\n",
-			 ret);
+		SLOT_WARN(php_slot, "Error %d getting power status\n",
+			  ret);
 	} else {
 		*state = power_state;
 	}
@@ -402,7 +405,7 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
 		*state = presence;
 		ret = 0;
 	} else {
-		pci_warn(php_slot->pdev, "Error %d getting presence\n", ret);
+		SLOT_WARN(php_slot, "Error %d getting presence\n", ret);
 	}
 
 	return ret;
@@ -566,7 +569,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
 	struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
 	int ret;
 
-	if (php_slot->state != PNV_PHP_STATE_POPULATED)
+	/*
+	 * Allow to disable a slot already in the registered state to
+	 * cover cases where the slot couldn't be enabled and never
+	 * reached the populated state
+	 */
+	if (php_slot->state != PNV_PHP_STATE_POPULATED &&
+	    php_slot->state != PNV_PHP_STATE_REGISTERED)
 		return 0;
 
 	/* Remove all devices behind the slot */
@@ -675,7 +684,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
 	ret = pci_hp_register(&php_slot->slot, php_slot->bus,
 			      php_slot->slot_no, php_slot->name);
 	if (ret) {
-		pci_warn(php_slot->pdev, "Error %d registering slot\n", ret);
+		SLOT_WARN(php_slot, "Error %d registering slot\n", ret);
 		return ret;
 	}
 
@@ -728,7 +737,7 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
 	/* Enable MSIx */
 	ret = pci_enable_msix_exact(pdev, &entry, 1);
 	if (ret) {
-		pci_warn(pdev, "Error %d enabling MSIx\n", ret);
+		SLOT_WARN(php_slot, "Error %d enabling MSIx\n", ret);
 		return ret;
 	}
 
@@ -778,8 +787,9 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
 		   (sts & PCI_EXP_SLTSTA_PDC)) {
 		ret = pnv_pci_get_presence_state(php_slot->id, &presence);
 		if (ret) {
-			pci_warn(pdev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
-				 php_slot->name, ret, sts);
+			SLOT_WARN(php_slot,
+				  "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
+				  php_slot->name, ret, sts);
 			return IRQ_HANDLED;
 		}
 
@@ -809,8 +819,9 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
 	 */
 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
 	if (!event) {
-		pci_warn(pdev, "PCI slot [%s] missed hotplug event 0x%04x\n",
-			 php_slot->name, sts);
+		SLOT_WARN(php_slot,
+			  "PCI slot [%s] missed hotplug event 0x%04x\n",
+			  php_slot->name, sts);
 		return IRQ_HANDLED;
 	}
 
@@ -834,7 +845,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
 	/* Allocate workqueue */
 	php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
 	if (!php_slot->wq) {
-		pci_warn(pdev, "Cannot alloc workqueue\n");
+		SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
 		pnv_php_disable_irq(php_slot, true);
 		return;
 	}
@@ -858,7 +869,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
 			  php_slot->name, php_slot);
 	if (ret) {
 		pnv_php_disable_irq(php_slot, true);
-		pci_warn(pdev, "Error %d enabling IRQ %d\n", ret, irq);
+		SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq);
 		return;
 	}
 
@@ -894,7 +905,7 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
 
 	ret = pci_enable_device(pdev);
 	if (ret) {
-		pci_warn(pdev, "Error %d enabling device\n", ret);
+		SLOT_WARN(php_slot, "Error %d enabling device\n", ret);
 		return;
 	}
 
@@ -1009,6 +1020,8 @@ static int __init pnv_php_init(void)
 	for_each_compatible_node(dn, NULL, "ibm,ioda3-phb")
 		pnv_php_register(dn);
 
+	for_each_compatible_node(dn, NULL, "ibm,ioda2-npu2-opencapi-phb")
+		pnv_php_register_one(dn); /* slot directly under the PHB */
 	return 0;
 }
 
@@ -1021,6 +1034,9 @@ static void __exit pnv_php_exit(void)
 
 	for_each_compatible_node(dn, NULL, "ibm,ioda3-phb")
 		pnv_php_unregister(dn);
+
+	for_each_compatible_node(dn, NULL, "ibm,ioda2-npu2-opencapi-phb")
+		pnv_php_unregister_one(dn); /* slot directly under the PHB */
 }
 
 module_init(pnv_php_init);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1e88fd4..4d1f392 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -186,10 +186,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
 	sprintf(buf, "virtfn%u", id);
 	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
 	if (rc)
-		goto failed2;
+		goto failed1;
 	rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
 	if (rc)
-		goto failed3;
+		goto failed2;
 
 	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
 
@@ -197,11 +197,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
 
 	return 0;
 
-failed3:
-	sysfs_remove_link(&dev->dev.kobj, buf);
 failed2:
-	pci_stop_and_remove_bus_device(virtfn);
+	sysfs_remove_link(&dev->dev.kobj, buf);
 failed1:
+	pci_stop_and_remove_bus_device(virtfn);
 	pci_dev_put(dev);
 failed0:
 	virtfn_remove_bus(dev->bus, bus);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 0608aae..9a8a383 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -289,6 +289,9 @@ static const struct pci_p2pdma_whitelist_entry {
 	/* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */
 	{PCI_VENDOR_ID_INTEL,	0x2f00, REQ_SAME_HOST_BRIDGE},
 	{PCI_VENDOR_ID_INTEL,	0x2f01, REQ_SAME_HOST_BRIDGE},
+	/* Intel SkyLake-E */
+	{PCI_VENDOR_ID_INTEL,	0x2030, 0},
+	{PCI_VENDOR_ID_INTEL,	0x2020, 0},
 	{}
 };
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index df21e32..d828ca8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -131,6 +131,7 @@ bool pci_ats_disabled(void)
 {
 	return pcie_ats_disabled;
 }
+EXPORT_SYMBOL_GPL(pci_ats_disabled);
 
 /* Disable bridge_d3 for all PCIe ports */
 static bool pci_bridge_d3_disable;
@@ -1372,8 +1373,11 @@ int pci_save_state(struct pci_dev *dev)
 {
 	int i;
 	/* XXX: 100% dword access ok here? */
-	for (i = 0; i < 16; i++)
+	for (i = 0; i < 16; i++) {
 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
+		pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
+			i * 4, dev->saved_config_space[i]);
+	}
 	dev->state_saved = true;
 
 	i = pci_save_pcie_state(dev);
@@ -5998,7 +6002,8 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
 /**
  * pci_add_dma_alias - Add a DMA devfn alias for a device
  * @dev: the PCI device for which alias is added
- * @devfn: alias slot and function
+ * @devfn_from: alias slot and function
+ * @nr_devfns: number of subsequent devfns to alias
  *
  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
  * which is used to program permissible bus-devfn source addresses for DMA
@@ -6014,18 +6019,29 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
  * cannot be left as a userspace activity).  DMA aliases should therefore
  * be configured via quirks, such as the PCI fixup header quirk.
  */
-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
 {
+	int devfn_to;
+
+	nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
+	devfn_to = devfn_from + nr_devfns - 1;
+
 	if (!dev->dma_alias_mask)
-		dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
+		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
 	if (!dev->dma_alias_mask) {
 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
 		return;
 	}
 
-	set_bit(devfn, dev->dma_alias_mask);
-	pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
-		 PCI_SLOT(devfn), PCI_FUNC(devfn));
+	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
+
+	if (nr_devfns == 1)
+		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
+				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
+	else if (nr_devfns > 1)
+		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
+				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
+				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
 }
 
 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
@@ -6033,7 +6049,9 @@ bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
 	return (dev1->dma_alias_mask &&
 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
 	       (dev2->dma_alias_mask &&
-		test_bit(dev1->devfn, dev2->dma_alias_mask));
+		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
+	       pci_real_dma_dev(dev1) == dev2 ||
+	       pci_real_dma_dev(dev2) == dev1;
 }
 
 bool pci_device_is_present(struct pci_dev *pdev)
@@ -6057,6 +6075,21 @@ void pci_ignore_hotplug(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
 
+/**
+ * pci_real_dma_dev - Get PCI DMA device for PCI device
+ * @dev: the PCI device that may have a PCI DMA alias
+ *
+ * Permits the platform to provide architecture-specific functionality to
+ * devices needing to alias DMA to another PCI device on another PCI bus. If
+ * the PCI device is on the same bus, it is recommended to use
+ * pci_add_dma_alias(). This is the default implementation. Architecture
+ * implementations can override this.
+ */
+struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
+{
+	return dev;
+}
+
 resource_size_t __weak pcibios_default_alignment(void)
 {
 	return 0;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a0a53bd..6394e77 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,9 @@
 
 #include <linux/pci.h>
 
+/* Number of possible devfns: 0.0 to 1f.7 inclusive */
+#define MAX_NR_DEVFNS 256
+
 #define PCI_FIND_CAP_TTL	48
 
 #define PCI_VSEC_ID_INTEL_TBT	0x1234	/* Thunderbolt */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 1ca86f2..4a818b0 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1445,6 +1445,7 @@ static int aer_probe(struct pcie_device *dev)
 		return -ENOMEM;
 
 	rpc->rpd = port;
+	INIT_KFIFO(rpc->aer_fifo);
 	set_service_data(dev, rpc);
 
 	status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index b0e6048..01dfc8b 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -10,6 +10,8 @@
  *	Zhang Yanmin (yanmin.zhang@intel.com)
  */
 
+#define dev_fmt(fmt) "AER: " fmt
+
 #include <linux/pci.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -61,10 +63,12 @@ static int report_error_detected(struct pci_dev *dev,
 		 * error callbacks of "any" device in the subtree, and will
 		 * exit in the disconnected error state.
 		 */
-		if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+		if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
 			vote = PCI_ERS_RESULT_NO_AER_DRIVER;
-		else
+			pci_info(dev, "can't recover (no error_detected callback)\n");
+		} else {
 			vote = PCI_ERS_RESULT_NONE;
+		}
 	} else {
 		err_handler = dev->driver->err_handler;
 		vote = err_handler->error_detected(dev, state);
@@ -233,12 +237,12 @@ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
 
 	pci_aer_clear_device_status(dev);
 	pci_cleanup_aer_uncorrect_error_status(dev);
-	pci_info(dev, "AER: Device recovery successful\n");
+	pci_info(dev, "device recovery successful\n");
 	return;
 
 failed:
 	pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
 
 	/* TODO: Should kernel panic here? */
-	pci_info(dev, "AER: Device recovery failed\n");
+	pci_info(dev, "device recovery failed\n");
 }
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 6ef74bf..bd2b691 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -306,19 +306,20 @@ static int proc_bus_pci_release(struct inode *inode, struct file *file)
 }
 #endif /* HAVE_PCI_MMAP */
 
-static const struct file_operations proc_bus_pci_operations = {
-	.owner		= THIS_MODULE,
-	.llseek		= proc_bus_pci_lseek,
-	.read		= proc_bus_pci_read,
-	.write		= proc_bus_pci_write,
-	.unlocked_ioctl	= proc_bus_pci_ioctl,
-	.compat_ioctl	= proc_bus_pci_ioctl,
+static const struct proc_ops proc_bus_pci_ops = {
+	.proc_lseek	= proc_bus_pci_lseek,
+	.proc_read	= proc_bus_pci_read,
+	.proc_write	= proc_bus_pci_write,
+	.proc_ioctl	= proc_bus_pci_ioctl,
+#ifdef CONFIG_COMPAT
+	.proc_compat_ioctl = proc_bus_pci_ioctl,
+#endif
 #ifdef HAVE_PCI_MMAP
-	.open		= proc_bus_pci_open,
-	.release	= proc_bus_pci_release,
-	.mmap		= proc_bus_pci_mmap,
+	.proc_open	= proc_bus_pci_open,
+	.proc_release	= proc_bus_pci_release,
+	.proc_mmap	= proc_bus_pci_mmap,
 #ifdef HAVE_ARCH_PCI_GET_UNMAPPED_AREA
-	.get_unmapped_area = get_pci_unmapped_area,
+	.proc_get_unmapped_area = get_pci_unmapped_area,
 #endif /* HAVE_ARCH_PCI_GET_UNMAPPED_AREA */
 #endif /* HAVE_PCI_MMAP */
 };
@@ -424,7 +425,7 @@ int pci_proc_attach_device(struct pci_dev *dev)
 
 	sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
 	e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
-			     &proc_bus_pci_operations, dev);
+			     &proc_bus_pci_ops, dev);
 	if (!e)
 		return -ENOMEM;
 	proc_set_size(e, dev->cfg_size);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a3a1a0e..29f473e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1871,19 +1871,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2609, quirk_intel_pcie_pm);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260a, quirk_intel_pcie_pm);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260b, quirk_intel_pcie_pm);
 
+static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
+{
+	if (dev->d3_delay >= delay)
+		return;
+
+	dev->d3_delay = delay;
+	pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
+		 dev->d3_delay);
+}
+
 static void quirk_radeon_pm(struct pci_dev *dev)
 {
 	if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
-	    dev->subsystem_device == 0x00e2) {
-		if (dev->d3_delay < 20) {
-			dev->d3_delay = 20;
-			pci_info(dev, "extending delay after power-on from D3 to %d msec\n",
-				 dev->d3_delay);
-		}
-	}
+	    dev->subsystem_device == 0x00e2)
+		quirk_d3hot_delay(dev, 20);
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
 
+/*
+ * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=205587
+ *
+ * The kernel attempts to transition these devices to D3cold, but that seems
+ * to be ineffective on the platforms in question; the PCI device appears to
+ * remain on in D3hot state. The D3hot-to-D0 transition then requires an
+ * extended delay in order to succeed.
+ */
+static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
+{
+	quirk_d3hot_delay(dev, 20);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+
 #ifdef CONFIG_X86_IO_APIC
 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
 {
@@ -2381,32 +2402,6 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
 			 PCI_DEVICE_ID_TIGON3_5719,
 			 quirk_brcm_5719_limit_mrrs);
 
-#ifdef CONFIG_PCIE_IPROC_PLATFORM
-static void quirk_paxc_bridge(struct pci_dev *pdev)
-{
-	/*
-	 * The PCI config space is shared with the PAXC root port and the first
-	 * Ethernet device.  So, we need to workaround this by telling the PCI
-	 * code that the bridge is not an Ethernet device.
-	 */
-	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
-		pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
-
-	/*
-	 * MPSS is not being set properly (as it is currently 0).  This is
-	 * because that area of the PCI config space is hard coded to zero, and
-	 * is not modifiable by firmware.  Set this to 2 (e.g., 512 byte MPS)
-	 * so that the MPS can be set to the real max value.
-	 */
-	pdev->pcie_mpss = 2;
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
-#endif
-
 /*
  * Originally in EDAC sources for i82875P: Intel tells BIOS developers to
  * hide device 6 which configures the overflow device access containing the
@@ -3932,7 +3927,7 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
 static void quirk_dma_func0_alias(struct pci_dev *dev)
 {
 	if (PCI_FUNC(dev->devfn) != 0)
-		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
 }
 
 /*
@@ -3946,7 +3941,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
 static void quirk_dma_func1_alias(struct pci_dev *dev)
 {
 	if (PCI_FUNC(dev->devfn) != 1)
-		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
+		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
 }
 
 /*
@@ -4031,7 +4026,7 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
 
 	id = pci_match_id(fixed_dma_alias_tbl, dev);
 	if (id)
-		pci_add_dma_alias(dev, id->driver_data);
+		pci_add_dma_alias(dev, id->driver_data, 1);
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
 
@@ -4072,9 +4067,9 @@ DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
  */
 static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
 {
-	pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
-	pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
-	pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
+	pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
+	pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
+	pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
@@ -4098,13 +4093,8 @@ static void quirk_pex_vca_alias(struct pci_dev *pdev)
 	const unsigned int num_pci_slots = 0x20;
 	unsigned int slot;
 
-	for (slot = 0; slot < num_pci_slots; slot++) {
-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0));
-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x1));
-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x2));
-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x3));
-		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x4));
-	}
+	for (slot = 0; slot < num_pci_slots; slot++)
+		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
@@ -5339,7 +5329,7 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
 			pci_dbg(pdev,
 				"Aliasing Partition %d Proxy ID %02x.%d\n",
 				pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
-			pci_add_dma_alias(pdev, devfn);
+			pci_add_dma_alias(pdev, devfn, 1);
 		}
 	}
 
@@ -5380,6 +5370,39 @@ SWITCHTEC_QUIRK(0x8573);  /* PFXI 48XG3 */
 SWITCHTEC_QUIRK(0x8574);  /* PFXI 64XG3 */
 SWITCHTEC_QUIRK(0x8575);  /* PFXI 80XG3 */
 SWITCHTEC_QUIRK(0x8576);  /* PFXI 96XG3 */
+SWITCHTEC_QUIRK(0x4000);  /* PFX 100XG4 */
+SWITCHTEC_QUIRK(0x4084);  /* PFX 84XG4  */
+SWITCHTEC_QUIRK(0x4068);  /* PFX 68XG4  */
+SWITCHTEC_QUIRK(0x4052);  /* PFX 52XG4  */
+SWITCHTEC_QUIRK(0x4036);  /* PFX 36XG4  */
+SWITCHTEC_QUIRK(0x4028);  /* PFX 28XG4  */
+SWITCHTEC_QUIRK(0x4100);  /* PSX 100XG4 */
+SWITCHTEC_QUIRK(0x4184);  /* PSX 84XG4  */
+SWITCHTEC_QUIRK(0x4168);  /* PSX 68XG4  */
+SWITCHTEC_QUIRK(0x4152);  /* PSX 52XG4  */
+SWITCHTEC_QUIRK(0x4136);  /* PSX 36XG4  */
+SWITCHTEC_QUIRK(0x4128);  /* PSX 28XG4  */
+SWITCHTEC_QUIRK(0x4200);  /* PAX 100XG4 */
+SWITCHTEC_QUIRK(0x4284);  /* PAX 84XG4  */
+SWITCHTEC_QUIRK(0x4268);  /* PAX 68XG4  */
+SWITCHTEC_QUIRK(0x4252);  /* PAX 52XG4  */
+SWITCHTEC_QUIRK(0x4236);  /* PAX 36XG4  */
+SWITCHTEC_QUIRK(0x4228);  /* PAX 28XG4  */
+
+/*
+ * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
+ * These IDs are used to forward responses to the originator on the other
+ * side of the NTB.  Alias all possible IDs to the NTB to permit access when
+ * the IOMMU is turned on.
+ */
+static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
+{
+	pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
+	/* PLX NTB may use all 256 devfns */
+	pci_add_dma_alias(pdev, 0, 256);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
 
 /*
  * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index bade140..2061672 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -32,6 +32,12 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
 	struct pci_bus *bus;
 	int ret;
 
+	/*
+	 * The device may have an explicit alias requester ID for DMA where the
+	 * requester is on another PCI bus.
+	 */
+	pdev = pci_real_dma_dev(pdev);
+
 	ret = fn(pdev, pci_dev_id(pdev), data);
 	if (ret)
 		return ret;
@@ -41,9 +47,9 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
 	 * DMA, iterate over that too.
 	 */
 	if (unlikely(pdev->dma_alias_mask)) {
-		u8 devfn;
+		unsigned int devfn;
 
-		for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
+		for_each_set_bit(devfn, pdev->dma_alias_mask, MAX_NR_DEVFNS) {
 			ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
 				 data);
 			if (ret)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f279826..f2461bf 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1803,12 +1803,18 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
 	/* Restore size and flags */
 	list_for_each_entry(fail_res, &fail_head, list) {
 		struct resource *res = fail_res->res;
+		int idx;
 
 		res->start = fail_res->start;
 		res->end = fail_res->end;
 		res->flags = fail_res->flags;
-		if (fail_res->dev->subordinate)
-			res->flags = 0;
+
+		if (pci_is_bridge(fail_res->dev)) {
+			idx = res - &fail_res->dev->resource[0];
+			if (idx >= PCI_BRIDGE_RESOURCES &&
+			    idx <= PCI_BRIDGE_RESOURCE_END)
+				res->flags = 0;
+		}
 	}
 	free_list(&fail_head);
 
@@ -1832,56 +1838,72 @@ void __init pci_assign_unassigned_resources(void)
 	}
 }
 
-static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
+static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
 				 struct list_head *add_list,
-				 resource_size_t available)
+				 resource_size_t new_size)
 {
-	struct pci_dev_resource *dev_res;
+	resource_size_t add_size, size = resource_size(res);
 
 	if (res->parent)
 		return;
 
-	if (resource_size(res) >= available)
+	if (!new_size)
 		return;
 
-	dev_res = res_to_dev_res(add_list, res);
-	if (!dev_res)
-		return;
+	if (new_size > size) {
+		add_size = new_size - size;
+		pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
+			&add_size);
+	} else if (new_size < size) {
+		add_size = size - new_size;
+		pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
+			&add_size);
+	}
 
-	/* Is there room to extend the window? */
-	if (available - resource_size(res) <= dev_res->add_size)
-		return;
-
-	dev_res->add_size = available - resource_size(res);
-	pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
-		&dev_res->add_size);
+	res->end = res->start + new_size - 1;
+	remove_from_list(add_list, res);
 }
 
 static void pci_bus_distribute_available_resources(struct pci_bus *bus,
 					    struct list_head *add_list,
-					    resource_size_t available_io,
-					    resource_size_t available_mmio,
-					    resource_size_t available_mmio_pref)
+					    struct resource io,
+					    struct resource mmio,
+					    struct resource mmio_pref)
 {
-	resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref;
 	unsigned int normal_bridges = 0, hotplug_bridges = 0;
 	struct resource *io_res, *mmio_res, *mmio_pref_res;
 	struct pci_dev *dev, *bridge = bus->self;
+	resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
 
 	io_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
 	mmio_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
 	mmio_pref_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
 
 	/*
-	 * Update additional resource list (add_list) to fill all the
-	 * extra resource space available for this port except the space
-	 * calculated in __pci_bus_size_bridges() which covers all the
-	 * devices currently connected to the port and below.
+	 * The alignment of this bridge is yet to be considered, hence it must
+	 * be done now before extending its bridge window.
 	 */
-	extend_bridge_window(bridge, io_res, add_list, available_io);
-	extend_bridge_window(bridge, mmio_res, add_list, available_mmio);
-	extend_bridge_window(bridge, mmio_pref_res, add_list,
-			     available_mmio_pref);
+	align = pci_resource_alignment(bridge, io_res);
+	if (!io_res->parent && align)
+		io.start = min(ALIGN(io.start, align), io.end + 1);
+
+	align = pci_resource_alignment(bridge, mmio_res);
+	if (!mmio_res->parent && align)
+		mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1);
+
+	align = pci_resource_alignment(bridge, mmio_pref_res);
+	if (!mmio_pref_res->parent && align)
+		mmio_pref.start = min(ALIGN(mmio_pref.start, align),
+			mmio_pref.end + 1);
+
+	/*
+	 * Now that we have adjusted for alignment, update the bridge window
+	 * resources to fill as much remaining resource space as possible.
+	 */
+	adjust_bridge_window(bridge, io_res, add_list, resource_size(&io));
+	adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio));
+	adjust_bridge_window(bridge, mmio_pref_res, add_list,
+			     resource_size(&mmio_pref));
 
 	/*
 	 * Calculate how many hotplug bridges and normal bridges there
@@ -1902,11 +1924,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
 	 */
 	if (hotplug_bridges + normal_bridges == 1) {
 		dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
-		if (dev->subordinate) {
+		if (dev->subordinate)
 			pci_bus_distribute_available_resources(dev->subordinate,
-				add_list, available_io, available_mmio,
-				available_mmio_pref);
-		}
+				add_list, io, mmio, mmio_pref);
 		return;
 	}
 
@@ -1919,12 +1939,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
 	 * extra space reduced by the minimal required space for the
 	 * non-hotplug bridges.
 	 */
-	remaining_io = available_io;
-	remaining_mmio = available_mmio;
-	remaining_mmio_pref = available_mmio_pref;
-
 	for_each_pci_bridge(dev, bus) {
-		const struct resource *res;
+		resource_size_t used_size;
+		struct resource *res;
 
 		if (dev->is_hotplug_bridge)
 			continue;
@@ -1934,24 +1951,39 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
 		 * bridge and devices below it occupy.
 		 */
 		res = &dev->resource[PCI_BRIDGE_RESOURCES + 0];
-		if (!res->parent && available_io > resource_size(res))
-			remaining_io -= resource_size(res);
+		align = pci_resource_alignment(dev, res);
+		align = align ? ALIGN(io.start, align) - io.start : 0;
+		used_size = align + resource_size(res);
+		if (!res->parent)
+			io.start = min(io.start + used_size, io.end + 1);
 
 		res = &dev->resource[PCI_BRIDGE_RESOURCES + 1];
-		if (!res->parent && available_mmio > resource_size(res))
-			remaining_mmio -= resource_size(res);
+		align = pci_resource_alignment(dev, res);
+		align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
+		used_size = align + resource_size(res);
+		if (!res->parent)
+			mmio.start = min(mmio.start + used_size, mmio.end + 1);
 
 		res = &dev->resource[PCI_BRIDGE_RESOURCES + 2];
-		if (!res->parent && available_mmio_pref > resource_size(res))
-			remaining_mmio_pref -= resource_size(res);
+		align = pci_resource_alignment(dev, res);
+		align = align ? ALIGN(mmio_pref.start, align) -
+			mmio_pref.start : 0;
+		used_size = align + resource_size(res);
+		if (!res->parent)
+			mmio_pref.start = min(mmio_pref.start + used_size,
+				mmio_pref.end + 1);
 	}
 
+	io_per_hp = div64_ul(resource_size(&io), hotplug_bridges);
+	mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges);
+	mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref),
+		hotplug_bridges);
+
 	/*
 	 * Go over devices on this bus and distribute the remaining
 	 * resource space between hotplug bridges.
 	 */
 	for_each_pci_bridge(dev, bus) {
-		resource_size_t align, io, mmio, mmio_pref;
 		struct pci_bus *b;
 
 		b = dev->subordinate;
@@ -1963,42 +1995,31 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
 		 * hotplug-capable downstream ports taking alignment into
 		 * account.
 		 */
-		align = pci_resource_alignment(bridge, io_res);
-		io = div64_ul(available_io, hotplug_bridges);
-		io = min(ALIGN(io, align), remaining_io);
-		remaining_io -= io;
-
-		align = pci_resource_alignment(bridge, mmio_res);
-		mmio = div64_ul(available_mmio, hotplug_bridges);
-		mmio = min(ALIGN(mmio, align), remaining_mmio);
-		remaining_mmio -= mmio;
-
-		align = pci_resource_alignment(bridge, mmio_pref_res);
-		mmio_pref = div64_ul(available_mmio_pref, hotplug_bridges);
-		mmio_pref = min(ALIGN(mmio_pref, align), remaining_mmio_pref);
-		remaining_mmio_pref -= mmio_pref;
+		io.end = io.start + io_per_hp - 1;
+		mmio.end = mmio.start + mmio_per_hp - 1;
+		mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1;
 
 		pci_bus_distribute_available_resources(b, add_list, io, mmio,
 						       mmio_pref);
+
+		io.start += io_per_hp;
+		mmio.start += mmio_per_hp;
+		mmio_pref.start += mmio_pref_per_hp;
 	}
 }
 
 static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
 						     struct list_head *add_list)
 {
-	resource_size_t available_io, available_mmio, available_mmio_pref;
-	const struct resource *res;
+	struct resource available_io, available_mmio, available_mmio_pref;
 
 	if (!bridge->is_hotplug_bridge)
 		return;
 
 	/* Take the initial extra resources from the hotplug port */
-	res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
-	available_io = resource_size(res);
-	res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
-	available_mmio = resource_size(res);
-	res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
-	available_mmio_pref = resource_size(res);
+	available_io = bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+	available_mmio = bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+	available_mmio_pref = bridge->resource[PCI_BRIDGE_RESOURCES + 2];
 
 	pci_bus_distribute_available_resources(bridge->subordinate,
 					       add_list, available_io,
@@ -2055,12 +2076,18 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
 	/* Restore size and flags */
 	list_for_each_entry(fail_res, &fail_head, list) {
 		struct resource *res = fail_res->res;
+		int idx;
 
 		res->start = fail_res->start;
 		res->end = fail_res->end;
 		res->flags = fail_res->flags;
-		if (fail_res->dev->subordinate)
-			res->flags = 0;
+
+		if (pci_is_bridge(fail_res->dev)) {
+			idx = res - &fail_res->dev->resource[0];
+			if (idx >= PCI_BRIDGE_RESOURCES &&
+			    idx <= PCI_BRIDGE_RESOURCE_END)
+				res->flags = 0;
+		}
 	}
 	free_list(&fail_head);
 
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 88091bb..a823b4b 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -317,8 +317,15 @@ static ssize_t field ## _show(struct device *dev, \
 	struct device_attribute *attr, char *buf) \
 { \
 	struct switchtec_dev *stdev = to_stdev(dev); \
-	return io_string_show(buf, &stdev->mmio_sys_info->field, \
-			    sizeof(stdev->mmio_sys_info->field)); \
+	struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
+	if (stdev->gen == SWITCHTEC_GEN3) \
+		return io_string_show(buf, &si->gen3.field, \
+				      sizeof(si->gen3.field)); \
+	else if (stdev->gen == SWITCHTEC_GEN4) \
+		return io_string_show(buf, &si->gen4.field, \
+				      sizeof(si->gen4.field)); \
+	else \
+		return -ENOTSUPP; \
 } \
 \
 static DEVICE_ATTR_RO(field)
@@ -326,13 +333,31 @@ static DEVICE_ATTR_RO(field)
 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
 DEVICE_ATTR_SYS_INFO_STR(product_id);
 DEVICE_ATTR_SYS_INFO_STR(product_revision);
-DEVICE_ATTR_SYS_INFO_STR(component_vendor);
+
+static ssize_t component_vendor_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct switchtec_dev *stdev = to_stdev(dev);
+	struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
+
+	/* component_vendor field not supported after gen3 */
+	if (stdev->gen != SWITCHTEC_GEN3)
+		return sprintf(buf, "none\n");
+
+	return io_string_show(buf, &si->gen3.component_vendor,
+			      sizeof(si->gen3.component_vendor));
+}
+static DEVICE_ATTR_RO(component_vendor);
 
 static ssize_t component_id_show(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
 	struct switchtec_dev *stdev = to_stdev(dev);
-	int id = ioread16(&stdev->mmio_sys_info->component_id);
+	int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
+
+	/* component_id field not supported after gen3 */
+	if (stdev->gen != SWITCHTEC_GEN3)
+		return sprintf(buf, "none\n");
 
 	return sprintf(buf, "PM%04X\n", id);
 }
@@ -342,7 +367,11 @@ static ssize_t component_revision_show(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
 	struct switchtec_dev *stdev = to_stdev(dev);
-	int rev = ioread8(&stdev->mmio_sys_info->component_revision);
+	int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
+
+	/* component_revision field not supported after gen3 */
+	if (stdev->gen != SWITCHTEC_GEN3)
+		return sprintf(buf, "255\n");
 
 	return sprintf(buf, "%d\n", rev);
 }
@@ -450,6 +479,12 @@ static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
 		rc = -EFAULT;
 		goto out;
 	}
+	if (((MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_WRITE) ||
+	     (MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_READ)) &&
+	    !capable(CAP_SYS_ADMIN)) {
+		rc = -EPERM;
+		goto out;
+	}
 
 	data += sizeof(stuser->cmd);
 	rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
@@ -568,8 +603,15 @@ static int ioctl_flash_info(struct switchtec_dev *stdev,
 	struct switchtec_ioctl_flash_info info = {0};
 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
 
-	info.flash_length = ioread32(&fi->flash_length);
-	info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
+	if (stdev->gen == SWITCHTEC_GEN3) {
+		info.flash_length = ioread32(&fi->gen3.flash_length);
+		info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
+	} else if (stdev->gen == SWITCHTEC_GEN4) {
+		info.flash_length = ioread32(&fi->gen4.flash_length);
+		info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
+	} else {
+		return -ENOTSUPP;
+	}
 
 	if (copy_to_user(uinfo, &info, sizeof(info)))
 		return -EFAULT;
@@ -584,75 +626,200 @@ static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
 	info->length = ioread32(&pi->length);
 }
 
-static int ioctl_flash_part_info(struct switchtec_dev *stdev,
-	struct switchtec_ioctl_flash_part_info __user *uinfo)
+static int flash_part_info_gen3(struct switchtec_dev *stdev,
+		struct switchtec_ioctl_flash_part_info *info)
 {
-	struct switchtec_ioctl_flash_part_info info = {0};
-	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
-	struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
+	struct flash_info_regs_gen3 __iomem *fi =
+		&stdev->mmio_flash_info->gen3;
+	struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
 	u32 active_addr = -1;
 
-	if (copy_from_user(&info, uinfo, sizeof(info)))
-		return -EFAULT;
-
-	switch (info.flash_partition) {
+	switch (info->flash_partition) {
 	case SWITCHTEC_IOCTL_PART_CFG0:
 		active_addr = ioread32(&fi->active_cfg);
-		set_fw_info_part(&info, &fi->cfg0);
-		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
-			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		set_fw_info_part(info, &fi->cfg0);
+		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
 		break;
 	case SWITCHTEC_IOCTL_PART_CFG1:
 		active_addr = ioread32(&fi->active_cfg);
-		set_fw_info_part(&info, &fi->cfg1);
-		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
-			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		set_fw_info_part(info, &fi->cfg1);
+		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
 		break;
 	case SWITCHTEC_IOCTL_PART_IMG0:
 		active_addr = ioread32(&fi->active_img);
-		set_fw_info_part(&info, &fi->img0);
-		if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
-			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		set_fw_info_part(info, &fi->img0);
+		if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
 		break;
 	case SWITCHTEC_IOCTL_PART_IMG1:
 		active_addr = ioread32(&fi->active_img);
-		set_fw_info_part(&info, &fi->img1);
-		if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
-			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		set_fw_info_part(info, &fi->img1);
+		if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
 		break;
 	case SWITCHTEC_IOCTL_PART_NVLOG:
-		set_fw_info_part(&info, &fi->nvlog);
+		set_fw_info_part(info, &fi->nvlog);
 		break;
 	case SWITCHTEC_IOCTL_PART_VENDOR0:
-		set_fw_info_part(&info, &fi->vendor[0]);
+		set_fw_info_part(info, &fi->vendor[0]);
 		break;
 	case SWITCHTEC_IOCTL_PART_VENDOR1:
-		set_fw_info_part(&info, &fi->vendor[1]);
+		set_fw_info_part(info, &fi->vendor[1]);
 		break;
 	case SWITCHTEC_IOCTL_PART_VENDOR2:
-		set_fw_info_part(&info, &fi->vendor[2]);
+		set_fw_info_part(info, &fi->vendor[2]);
 		break;
 	case SWITCHTEC_IOCTL_PART_VENDOR3:
-		set_fw_info_part(&info, &fi->vendor[3]);
+		set_fw_info_part(info, &fi->vendor[3]);
 		break;
 	case SWITCHTEC_IOCTL_PART_VENDOR4:
-		set_fw_info_part(&info, &fi->vendor[4]);
+		set_fw_info_part(info, &fi->vendor[4]);
 		break;
 	case SWITCHTEC_IOCTL_PART_VENDOR5:
-		set_fw_info_part(&info, &fi->vendor[5]);
+		set_fw_info_part(info, &fi->vendor[5]);
 		break;
 	case SWITCHTEC_IOCTL_PART_VENDOR6:
-		set_fw_info_part(&info, &fi->vendor[6]);
+		set_fw_info_part(info, &fi->vendor[6]);
 		break;
 	case SWITCHTEC_IOCTL_PART_VENDOR7:
-		set_fw_info_part(&info, &fi->vendor[7]);
+		set_fw_info_part(info, &fi->vendor[7]);
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	if (info.address == active_addr)
-		info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+	if (info->address == active_addr)
+		info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+
+	return 0;
+}
+
+static int flash_part_info_gen4(struct switchtec_dev *stdev,
+		struct switchtec_ioctl_flash_part_info *info)
+{
+	struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4;
+	struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4;
+	struct active_partition_info_gen4 __iomem *af = &fi->active_flag;
+
+	switch (info->flash_partition) {
+	case SWITCHTEC_IOCTL_PART_MAP_0:
+		set_fw_info_part(info, &fi->map0);
+		break;
+	case SWITCHTEC_IOCTL_PART_MAP_1:
+		set_fw_info_part(info, &fi->map1);
+		break;
+	case SWITCHTEC_IOCTL_PART_KEY_0:
+		set_fw_info_part(info, &fi->key0);
+		if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY0_ACTIVE)
+			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+		if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY0_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		break;
+	case SWITCHTEC_IOCTL_PART_KEY_1:
+		set_fw_info_part(info, &fi->key1);
+		if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY1_ACTIVE)
+			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+		if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY1_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		break;
+	case SWITCHTEC_IOCTL_PART_BL2_0:
+		set_fw_info_part(info, &fi->bl2_0);
+		if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_0_ACTIVE)
+			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+		if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_0_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		break;
+	case SWITCHTEC_IOCTL_PART_BL2_1:
+		set_fw_info_part(info, &fi->bl2_1);
+		if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_1_ACTIVE)
+			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+		if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_1_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		break;
+	case SWITCHTEC_IOCTL_PART_CFG0:
+		set_fw_info_part(info, &fi->cfg0);
+		if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG0_ACTIVE)
+			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG0_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		break;
+	case SWITCHTEC_IOCTL_PART_CFG1:
+		set_fw_info_part(info, &fi->cfg1);
+		if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG1_ACTIVE)
+			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG1_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		break;
+	case SWITCHTEC_IOCTL_PART_IMG0:
+		set_fw_info_part(info, &fi->img0);
+		if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG0_ACTIVE)
+			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+		if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG0_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		break;
+	case SWITCHTEC_IOCTL_PART_IMG1:
+		set_fw_info_part(info, &fi->img1);
+		if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG1_ACTIVE)
+			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+		if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG1_RUNNING)
+			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+		break;
+	case SWITCHTEC_IOCTL_PART_NVLOG:
+		set_fw_info_part(info, &fi->nvlog);
+		break;
+	case SWITCHTEC_IOCTL_PART_VENDOR0:
+		set_fw_info_part(info, &fi->vendor[0]);
+		break;
+	case SWITCHTEC_IOCTL_PART_VENDOR1:
+		set_fw_info_part(info, &fi->vendor[1]);
+		break;
+	case SWITCHTEC_IOCTL_PART_VENDOR2:
+		set_fw_info_part(info, &fi->vendor[2]);
+		break;
+	case SWITCHTEC_IOCTL_PART_VENDOR3:
+		set_fw_info_part(info, &fi->vendor[3]);
+		break;
+	case SWITCHTEC_IOCTL_PART_VENDOR4:
+		set_fw_info_part(info, &fi->vendor[4]);
+		break;
+	case SWITCHTEC_IOCTL_PART_VENDOR5:
+		set_fw_info_part(info, &fi->vendor[5]);
+		break;
+	case SWITCHTEC_IOCTL_PART_VENDOR6:
+		set_fw_info_part(info, &fi->vendor[6]);
+		break;
+	case SWITCHTEC_IOCTL_PART_VENDOR7:
+		set_fw_info_part(info, &fi->vendor[7]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ioctl_flash_part_info(struct switchtec_dev *stdev,
+		struct switchtec_ioctl_flash_part_info __user *uinfo)
+{
+	int ret;
+	struct switchtec_ioctl_flash_part_info info = {0};
+
+	if (copy_from_user(&info, uinfo, sizeof(info)))
+		return -EFAULT;
+
+	if (stdev->gen == SWITCHTEC_GEN3) {
+		ret = flash_part_info_gen3(stdev, &info);
+		if (ret)
+			return ret;
+	} else if (stdev->gen == SWITCHTEC_GEN4) {
+		ret = flash_part_info_gen4(stdev, &info);
+		if (ret)
+			return ret;
+	} else {
+		return -ENOTSUPP;
+	}
 
 	if (copy_to_user(uinfo, &info, sizeof(info)))
 		return -EFAULT;
@@ -683,11 +850,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
 		s->part[i] = reg;
 	}
 
-	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
-		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
-		if (reg != PCI_VENDOR_ID_MICROSEMI)
-			break;
-
+	for (i = 0; i < stdev->pff_csr_count; i++) {
 		reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
 		s->pff[i] = reg;
 	}
@@ -751,10 +914,13 @@ static const struct event_reg {
 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
 	EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
+	EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY,
+	       intercomm_notify_hdr),
 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
 	EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
+	EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr),
 	EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
 	EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
 	EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
@@ -1181,10 +1347,6 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
 	if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
 		return 0;
 
-	if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
-	    eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
-		return 0;
-
 	dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
 	hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
 	iowrite32(hdr, hdr_reg);
@@ -1231,8 +1393,13 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
 
 	check_link_state_events(stdev);
 
-	for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
+	for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) {
+		if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
+		    eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
+			continue;
+
 		event_count += mask_all_events(stdev, eid);
+	}
 
 	if (event_count) {
 		atomic_inc(&stdev->event_cnt);
@@ -1276,7 +1443,7 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
 	if (nvecs < 0)
 		return nvecs;
 
-	event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
+	event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
 	if (event_irq < 0 || event_irq >= nvecs)
 		return -EFAULT;
 
@@ -1324,16 +1491,16 @@ static void init_pff(struct switchtec_dev *stdev)
 	stdev->pff_csr_count = i;
 
 	reg = ioread32(&pcfg->usp_pff_inst_id);
-	if (reg < SWITCHTEC_MAX_PFF_CSR)
+	if (reg < stdev->pff_csr_count)
 		stdev->pff_local[reg] = 1;
 
 	reg = ioread32(&pcfg->vep_pff_inst_id);
-	if (reg < SWITCHTEC_MAX_PFF_CSR)
+	if (reg < stdev->pff_csr_count)
 		stdev->pff_local[reg] = 1;
 
 	for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
 		reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
-		if (reg < SWITCHTEC_MAX_PFF_CSR)
+		if (reg < stdev->pff_csr_count)
 			stdev->pff_local[reg] = 1;
 	}
 }
@@ -1344,12 +1511,13 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
 	int rc;
 	void __iomem *map;
 	unsigned long res_start, res_len;
+	u32 __iomem *part_id;
 
 	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 	if (rc)
 		return rc;
 
@@ -1378,7 +1546,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
 	stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
 	stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
 	stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
-	stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
+
+	if (stdev->gen == SWITCHTEC_GEN3)
+		part_id = &stdev->mmio_sys_info->gen3.partition_id;
+	else if (stdev->gen == SWITCHTEC_GEN4)
+		part_id = &stdev->mmio_sys_info->gen4.partition_id;
+	else
+		return -ENOTSUPP;
+
+	stdev->partition = ioread8(part_id);
 	stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
 	stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
 	stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
@@ -1420,6 +1596,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
 	if (IS_ERR(stdev))
 		return PTR_ERR(stdev);
 
+	stdev->gen = id->driver_data;
+
 	rc = switchtec_init_pci(stdev, pdev);
 	if (rc)
 		goto err_put;
@@ -1467,7 +1645,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
 	put_device(&stdev->dev);
 }
 
-#define SWITCHTEC_PCI_DEVICE(device_id) \
+#define SWITCHTEC_PCI_DEVICE(device_id, gen) \
 	{ \
 		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
 		.device     = device_id, \
@@ -1475,6 +1653,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
 		.subdevice  = PCI_ANY_ID, \
 		.class      = (PCI_CLASS_MEMORY_OTHER << 8), \
 		.class_mask = 0xFFFFFFFF, \
+		.driver_data = gen, \
 	}, \
 	{ \
 		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
@@ -1483,39 +1662,58 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
 		.subdevice  = PCI_ANY_ID, \
 		.class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
 		.class_mask = 0xFFFFFFFF, \
+		.driver_data = gen, \
 	}
 
 static const struct pci_device_id switchtec_pci_tbl[] = {
-	SWITCHTEC_PCI_DEVICE(0x8531),  //PFX 24xG3
-	SWITCHTEC_PCI_DEVICE(0x8532),  //PFX 32xG3
-	SWITCHTEC_PCI_DEVICE(0x8533),  //PFX 48xG3
-	SWITCHTEC_PCI_DEVICE(0x8534),  //PFX 64xG3
-	SWITCHTEC_PCI_DEVICE(0x8535),  //PFX 80xG3
-	SWITCHTEC_PCI_DEVICE(0x8536),  //PFX 96xG3
-	SWITCHTEC_PCI_DEVICE(0x8541),  //PSX 24xG3
-	SWITCHTEC_PCI_DEVICE(0x8542),  //PSX 32xG3
-	SWITCHTEC_PCI_DEVICE(0x8543),  //PSX 48xG3
-	SWITCHTEC_PCI_DEVICE(0x8544),  //PSX 64xG3
-	SWITCHTEC_PCI_DEVICE(0x8545),  //PSX 80xG3
-	SWITCHTEC_PCI_DEVICE(0x8546),  //PSX 96xG3
-	SWITCHTEC_PCI_DEVICE(0x8551),  //PAX 24XG3
-	SWITCHTEC_PCI_DEVICE(0x8552),  //PAX 32XG3
-	SWITCHTEC_PCI_DEVICE(0x8553),  //PAX 48XG3
-	SWITCHTEC_PCI_DEVICE(0x8554),  //PAX 64XG3
-	SWITCHTEC_PCI_DEVICE(0x8555),  //PAX 80XG3
-	SWITCHTEC_PCI_DEVICE(0x8556),  //PAX 96XG3
-	SWITCHTEC_PCI_DEVICE(0x8561),  //PFXL 24XG3
-	SWITCHTEC_PCI_DEVICE(0x8562),  //PFXL 32XG3
-	SWITCHTEC_PCI_DEVICE(0x8563),  //PFXL 48XG3
-	SWITCHTEC_PCI_DEVICE(0x8564),  //PFXL 64XG3
-	SWITCHTEC_PCI_DEVICE(0x8565),  //PFXL 80XG3
-	SWITCHTEC_PCI_DEVICE(0x8566),  //PFXL 96XG3
-	SWITCHTEC_PCI_DEVICE(0x8571),  //PFXI 24XG3
-	SWITCHTEC_PCI_DEVICE(0x8572),  //PFXI 32XG3
-	SWITCHTEC_PCI_DEVICE(0x8573),  //PFXI 48XG3
-	SWITCHTEC_PCI_DEVICE(0x8574),  //PFXI 64XG3
-	SWITCHTEC_PCI_DEVICE(0x8575),  //PFXI 80XG3
-	SWITCHTEC_PCI_DEVICE(0x8576),  //PFXI 96XG3
+	SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3),  //PFX 24xG3
+	SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3),  //PFX 32xG3
+	SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3),  //PFX 48xG3
+	SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3),  //PFX 64xG3
+	SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3),  //PFX 80xG3
+	SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3),  //PFX 96xG3
+	SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3),  //PSX 24xG3
+	SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3),  //PSX 32xG3
+	SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3),  //PSX 48xG3
+	SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3),  //PSX 64xG3
+	SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3),  //PSX 80xG3
+	SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3),  //PSX 96xG3
+	SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3),  //PAX 24XG3
+	SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3),  //PAX 32XG3
+	SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3),  //PAX 48XG3
+	SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3),  //PAX 64XG3
+	SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3),  //PAX 80XG3
+	SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3),  //PAX 96XG3
+	SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3),  //PFXL 24XG3
+	SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3),  //PFXL 32XG3
+	SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3),  //PFXL 48XG3
+	SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3),  //PFXL 64XG3
+	SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3),  //PFXL 80XG3
+	SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3),  //PFXL 96XG3
+	SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3),  //PFXI 24XG3
+	SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3),  //PFXI 32XG3
+	SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3),  //PFXI 48XG3
+	SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3),  //PFXI 64XG3
+	SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3),  //PFXI 80XG3
+	SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3),  //PFXI 96XG3
+	SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4),  //PFX 100XG4
+	SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4),  //PFX 84XG4
+	SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4),  //PFX 68XG4
+	SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4),  //PFX 52XG4
+	SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4),  //PFX 36XG4
+	SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4),  //PFX 28XG4
+	SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4),  //PSX 100XG4
+	SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4),  //PSX 84XG4
+	SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4),  //PSX 68XG4
+	SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4),  //PSX 52XG4
+	SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4),  //PSX 36XG4
+	SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4),  //PSX 28XG4
+	SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4),  //PAX 100XG4
+	SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4),  //PAX 84XG4
+	SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4),  //PAX 68XG4
+	SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4),  //PAX 52XG4
+	SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4),  //PAX 36XG4
+	SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4),  //PAX 28XG4
 	{0}
 };
 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index aad8a46..85887d8 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* 
+/*
  * Driver for Intel I82092AA PCI-PCMCIA bridge.
  *
  * (C) 2001 Red Hat, Inc.
@@ -18,7 +18,7 @@
 
 #include <pcmcia/ss.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include "i82092aa.h"
 #include "i82365.h"
@@ -33,16 +33,16 @@ static const struct pci_device_id i82092aa_pci_ids[] = {
 MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
 
 static struct pci_driver i82092aa_pci_driver = {
-	.name           = "i82092aa",
-	.id_table       = i82092aa_pci_ids,
-	.probe          = i82092aa_pci_probe,
-	.remove         = i82092aa_pci_remove,
+	.name		= "i82092aa",
+	.id_table	= i82092aa_pci_ids,
+	.probe		= i82092aa_pci_probe,
+	.remove	= i82092aa_pci_remove,
 };
 
 
 /* the pccard structure and its functions */
 static struct pccard_operations i82092aa_operations = {
-	.init 		 	= i82092aa_init,
+	.init			= i82092aa_init,
 	.get_status		= i82092aa_get_status,
 	.set_socket		= i82092aa_set_socket,
 	.set_io_map		= i82092aa_set_io_map,
@@ -53,57 +53,63 @@ static struct pccard_operations i82092aa_operations = {
 
 struct socket_info {
 	int	number;
-	int	card_state; 	/*  0 = no socket,
-				    1 = empty socket, 
-				    2 = card but not initialized,
-				    3 = operational card */
-	unsigned int io_base; 	/* base io address of the socket */
-	
+	int	card_state;
+		/* 0 = no socket,
+		 * 1 = empty socket,
+		 * 2 = card but not initialized,
+		 * 3 = operational card
+		 */
+	unsigned int io_base;	/* base io address of the socket */
+
 	struct pcmcia_socket socket;
 	struct pci_dev *dev;	/* The PCI device for the socket */
 };
 
 #define MAX_SOCKETS 4
 static struct socket_info sockets[MAX_SOCKETS];
-static int socket_count;  /* shortcut */                                  	                                	
+static int socket_count;	/* shortcut */
 
 
-static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int i82092aa_pci_probe(struct pci_dev *dev,
+			      const struct pci_device_id *id)
 {
 	unsigned char configbyte;
 	int i, ret;
-	
-	enter("i82092aa_pci_probe");
-	
-	if ((ret = pci_enable_device(dev)))
+
+	ret = pci_enable_device(dev);
+	if (ret)
 		return ret;
-		
-	pci_read_config_byte(dev, 0x40, &configbyte);  /* PCI Configuration Control */
-	switch(configbyte&6) {
-		case 0:
-			socket_count = 2;
-			break;
-		case 2:
-			socket_count = 1;
-			break;
-		case 4:
-		case 6:
-			socket_count = 4;
-			break;
-			
-		default:
-			printk(KERN_ERR "i82092aa: Oops, you did something we didn't think of.\n");
-			ret = -EIO;
-			goto err_out_disable;
+
+	/* PCI Configuration Control */
+	pci_read_config_byte(dev, 0x40, &configbyte);
+
+	switch (configbyte&6) {
+	case 0:
+		socket_count = 2;
+		break;
+	case 2:
+		socket_count = 1;
+		break;
+	case 4:
+	case 6:
+		socket_count = 4;
+		break;
+
+	default:
+		dev_err(&dev->dev,
+			"Oops, you did something we didn't think of.\n");
+		ret = -EIO;
+		goto err_out_disable;
 	}
-	printk(KERN_INFO "i82092aa: configured as a %d socket device.\n", socket_count);
+	dev_info(&dev->dev, "configured as a %d socket device.\n",
+		 socket_count);
 
 	if (!request_region(pci_resource_start(dev, 0), 2, "i82092aa")) {
 		ret = -EBUSY;
 		goto err_out_disable;
 	}
-	
-	for (i = 0;i<socket_count;i++) {
+
+	for (i = 0; i < socket_count; i++) {
 		sockets[i].card_state = 1; /* 1 = present but empty */
 		sockets[i].io_base = pci_resource_start(dev, 0);
 		sockets[i].socket.features |= SS_CAP_PCCARD;
@@ -114,65 +120,65 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
 		sockets[i].socket.owner = THIS_MODULE;
 
 		sockets[i].number = i;
-		
+
 		if (card_present(i)) {
 			sockets[i].card_state = 3;
-			dev_dbg(&dev->dev, "i82092aa: slot %i is occupied\n", i);
+			dev_dbg(&dev->dev, "slot %i is occupied\n", i);
 		} else {
-			dev_dbg(&dev->dev, "i82092aa: slot %i is vacant\n", i);
+			dev_dbg(&dev->dev, "slot %i is vacant\n", i);
 		}
 	}
-		
-	/* Now, specifiy that all interrupts are to be done as PCI interrupts */
-	configbyte = 0xFF; /* bitmask, one bit per event, 1 = PCI interrupt, 0 = ISA interrupt */
-	pci_write_config_byte(dev, 0x50, configbyte); /* PCI Interrupt Routing Register */
+
+	/* Now, specifiy that all interrupts are to be done as PCI interrupts
+	 * bitmask, one bit per event, 1 = PCI interrupt, 0 = ISA interrupt
+	 */
+	configbyte = 0xFF;
+
+	/* PCI Interrupt Routing Register */
+	pci_write_config_byte(dev, 0x50, configbyte);
 
 	/* Register the interrupt handler */
 	dev_dbg(&dev->dev, "Requesting interrupt %i\n", dev->irq);
-	if ((ret = request_irq(dev->irq, i82092aa_interrupt, IRQF_SHARED, "i82092aa", i82092aa_interrupt))) {
-		printk(KERN_ERR "i82092aa: Failed to register IRQ %d, aborting\n", dev->irq);
+	ret = request_irq(dev->irq, i82092aa_interrupt, IRQF_SHARED,
+			  "i82092aa", i82092aa_interrupt);
+	if (ret) {
+		dev_err(&dev->dev, "Failed to register IRQ %d, aborting\n",
+			dev->irq);
 		goto err_out_free_res;
 	}
 
-	for (i = 0; i<socket_count; i++) {
+	for (i = 0; i < socket_count; i++) {
 		sockets[i].socket.dev.parent = &dev->dev;
 		sockets[i].socket.ops = &i82092aa_operations;
 		sockets[i].socket.resource_ops = &pccard_nonstatic_ops;
 		ret = pcmcia_register_socket(&sockets[i].socket);
-		if (ret) {
+		if (ret)
 			goto err_out_free_sockets;
-		}
 	}
 
-	leave("i82092aa_pci_probe");
 	return 0;
 
 err_out_free_sockets:
 	if (i) {
-		for (i--;i>=0;i--) {
+		for (i--; i >= 0; i--)
 			pcmcia_unregister_socket(&sockets[i].socket);
-		}
 	}
 	free_irq(dev->irq, i82092aa_interrupt);
 err_out_free_res:
 	release_region(pci_resource_start(dev, 0), 2);
 err_out_disable:
 	pci_disable_device(dev);
-	return ret;			
+	return ret;
 }
 
 static void i82092aa_pci_remove(struct pci_dev *dev)
 {
 	int i;
 
-	enter("i82092aa_pci_remove");
-	
 	free_irq(dev->irq, i82092aa_interrupt);
 
 	for (i = 0; i < socket_count; i++)
 		pcmcia_unregister_socket(&sockets[i].socket);
-
-	leave("i82092aa_pci_remove");
 }
 
 static DEFINE_SPINLOCK(port_lock);
@@ -184,44 +190,27 @@ static unsigned char indirect_read(int socket, unsigned short reg)
 	unsigned short int port;
 	unsigned char val;
 	unsigned long flags;
-	spin_lock_irqsave(&port_lock,flags);
+
+	spin_lock_irqsave(&port_lock, flags);
 	reg += socket * 0x40;
 	port = sockets[socket].io_base;
-	outb(reg,port);
+	outb(reg, port);
 	val = inb(port+1);
-	spin_unlock_irqrestore(&port_lock,flags);
+	spin_unlock_irqrestore(&port_lock, flags);
 	return val;
 }
 
-#if 0
-static unsigned short indirect_read16(int socket, unsigned short reg)
-{
-	unsigned short int port;
-	unsigned short tmp;
-	unsigned long flags;
-	spin_lock_irqsave(&port_lock,flags);
-	reg  = reg + socket * 0x40;
-	port = sockets[socket].io_base;
-	outb(reg,port);
-	tmp = inb(port+1);
-	reg++;
-	outb(reg,port);
-	tmp = tmp | (inb(port+1)<<8);
-	spin_unlock_irqrestore(&port_lock,flags);
-	return tmp;
-}
-#endif
-
 static void indirect_write(int socket, unsigned short reg, unsigned char value)
 {
 	unsigned short int port;
 	unsigned long flags;
-	spin_lock_irqsave(&port_lock,flags);
+
+	spin_lock_irqsave(&port_lock, flags);
 	reg = reg + socket * 0x40;
-	port = sockets[socket].io_base; 
-	outb(reg,port);
-	outb(value,port+1);
-	spin_unlock_irqrestore(&port_lock,flags);
+	port = sockets[socket].io_base;
+	outb(reg, port);
+	outb(value, port+1);
+	spin_unlock_irqrestore(&port_lock, flags);
 }
 
 static void indirect_setbit(int socket, unsigned short reg, unsigned char mask)
@@ -229,53 +218,58 @@ static void indirect_setbit(int socket, unsigned short reg, unsigned char mask)
 	unsigned short int port;
 	unsigned char val;
 	unsigned long flags;
-	spin_lock_irqsave(&port_lock,flags);
+
+	spin_lock_irqsave(&port_lock, flags);
 	reg = reg + socket * 0x40;
-	port = sockets[socket].io_base; 
-	outb(reg,port);
+	port = sockets[socket].io_base;
+	outb(reg, port);
 	val = inb(port+1);
 	val |= mask;
-	outb(reg,port);
-	outb(val,port+1);
-	spin_unlock_irqrestore(&port_lock,flags);
+	outb(reg, port);
+	outb(val, port+1);
+	spin_unlock_irqrestore(&port_lock, flags);
 }
 
 
-static void indirect_resetbit(int socket, unsigned short reg, unsigned char mask)
+static void indirect_resetbit(int socket,
+			      unsigned short reg, unsigned char mask)
 {
 	unsigned short int port;
 	unsigned char val;
 	unsigned long flags;
-	spin_lock_irqsave(&port_lock,flags);
+
+	spin_lock_irqsave(&port_lock, flags);
 	reg = reg + socket * 0x40;
-	port = sockets[socket].io_base; 
-	outb(reg,port);
+	port = sockets[socket].io_base;
+	outb(reg, port);
 	val = inb(port+1);
 	val &= ~mask;
-	outb(reg,port);
-	outb(val,port+1);
-	spin_unlock_irqrestore(&port_lock,flags);
+	outb(reg, port);
+	outb(val, port+1);
+	spin_unlock_irqrestore(&port_lock, flags);
 }
 
-static void indirect_write16(int socket, unsigned short reg, unsigned short value)
+static void indirect_write16(int socket,
+			     unsigned short reg, unsigned short value)
 {
 	unsigned short int port;
 	unsigned char val;
 	unsigned long flags;
-	spin_lock_irqsave(&port_lock,flags);
+
+	spin_lock_irqsave(&port_lock, flags);
 	reg = reg + socket * 0x40;
-	port = sockets[socket].io_base; 
-	
-	outb(reg,port);
+	port = sockets[socket].io_base;
+
+	outb(reg, port);
 	val = value & 255;
-	outb(val,port+1);
-	
+	outb(val, port+1);
+
 	reg++;
-	
-	outb(reg,port);
+
+	outb(reg, port);
 	val = value>>8;
-	outb(val,port+1);
-	spin_unlock_irqrestore(&port_lock,flags);
+	outb(val, port+1);
+	spin_unlock_irqrestore(&port_lock, flags);
 }
 
 /* simple helper functions */
@@ -284,12 +278,12 @@ static int cycle_time = 120;
 
 static int to_cycles(int ns)
 {
-	if (cycle_time!=0)
+	if (cycle_time != 0)
 		return ns/cycle_time;
 	else
 		return 0;
 }
-    
+
 
 /* Interrupt handler functionality */
 
@@ -299,58 +293,61 @@ static irqreturn_t i82092aa_interrupt(int irq, void *dev)
 	int loopcount = 0;
 	int handled = 0;
 
-	unsigned int events, active=0;
-	
-/*	enter("i82092aa_interrupt");*/
-	
+	unsigned int events, active = 0;
+
 	while (1) {
 		loopcount++;
-		if (loopcount>20) {
-			printk(KERN_ERR "i82092aa: infinite eventloop in interrupt \n");
+		if (loopcount > 20) {
+			pr_err("i82092aa: infinite eventloop in interrupt\n");
 			break;
 		}
-		
+
 		active = 0;
-		
-		for (i=0;i<socket_count;i++) {
+
+		for (i = 0; i < socket_count; i++) {
 			int csc;
-			if (sockets[i].card_state==0) /* Inactive socket, should not happen */
+
+			/* Inactive socket, should not happen */
+			if (sockets[i].card_state == 0)
 				continue;
-			
-			csc = indirect_read(i,I365_CSC); /* card status change register */
-			
-			if (csc==0)  /* no events on this socket */
-			   	continue;
+
+			/* card status change register */
+			csc = indirect_read(i, I365_CSC);
+
+			if (csc == 0)  /* no events on this socket */
+				continue;
 			handled = 1;
 			events = 0;
-			 
+
 			if (csc & I365_CSC_DETECT) {
 				events |= SS_DETECT;
-				printk("Card detected in socket %i!\n",i);
-			 }
-			
-			if (indirect_read(i,I365_INTCTL) & I365_PC_IOCARD) { 
+				dev_info(&sockets[i].dev->dev,
+					 "Card detected in socket %i!\n", i);
+			}
+
+			if (indirect_read(i, I365_INTCTL) & I365_PC_IOCARD) {
 				/* For IO/CARDS, bit 0 means "read the card" */
-				events |= (csc & I365_CSC_STSCHG) ? SS_STSCHG : 0; 
+				if (csc & I365_CSC_STSCHG)
+					events |= SS_STSCHG;
 			} else {
 				/* Check for battery/ready events */
-				events |= (csc & I365_CSC_BVD1) ? SS_BATDEAD : 0;
-				events |= (csc & I365_CSC_BVD2) ? SS_BATWARN : 0;
-				events |= (csc & I365_CSC_READY) ? SS_READY : 0;
+				if (csc & I365_CSC_BVD1)
+					events |= SS_BATDEAD;
+				if (csc & I365_CSC_BVD2)
+					events |= SS_BATWARN;
+				if (csc & I365_CSC_READY)
+					events |= SS_READY;
 			}
-			
-			if (events) {
+
+			if (events)
 				pcmcia_parse_events(&sockets[i].socket, events);
-			}
 			active |= events;
 		}
-				
-		if (active==0) /* no more events to handle */
-			break;				
-		
+
+		if (active == 0) /* no more events to handle */
+			break;
 	}
 	return IRQ_RETVAL(handled);
-/*	leave("i82092aa_interrupt");*/
 }
 
 
@@ -358,80 +355,67 @@ static irqreturn_t i82092aa_interrupt(int irq, void *dev)
 /* socket functions */
 
 static int card_present(int socketno)
-{	
+{
 	unsigned int val;
-	enter("card_present");
-	
-	if ((socketno<0) || (socketno >= MAX_SOCKETS))
+
+	if ((socketno < 0) || (socketno >= MAX_SOCKETS))
 		return 0;
 	if (sockets[socketno].io_base == 0)
 		return 0;
 
-		
+
 	val = indirect_read(socketno, 1); /* Interface status register */
-	if ((val&12)==12) {
-		leave("card_present 1");
+	if ((val&12) == 12)
 		return 1;
-	}
-		
-	leave("card_present 0");
+
 	return 0;
 }
 
 static void set_bridge_state(int sock)
 {
-	enter("set_bridge_state");
-	indirect_write(sock, I365_GBLCTL,0x00);
-	indirect_write(sock, I365_GENCTL,0x00);
-	
-	indirect_setbit(sock, I365_INTCTL,0x08);
-	leave("set_bridge_state");
+	indirect_write(sock, I365_GBLCTL, 0x00);
+	indirect_write(sock, I365_GENCTL, 0x00);
+
+	indirect_setbit(sock, I365_INTCTL, 0x08);
 }
 
 
-
-
-
-      
 static int i82092aa_init(struct pcmcia_socket *sock)
 {
 	int i;
 	struct resource res = { .start = 0, .end = 0x0fff };
-        pccard_io_map io = { 0, 0, 0, 0, 1 };
+	pccard_io_map io = { 0, 0, 0, 0, 1 };
 	pccard_mem_map mem = { .res = &res, };
-        
-        enter("i82092aa_init");
-                        
-        for (i = 0; i < 2; i++) {
-        	io.map = i;
-                i82092aa_set_io_map(sock, &io);
+
+	for (i = 0; i < 2; i++) {
+		io.map = i;
+		i82092aa_set_io_map(sock, &io);
 	}
-        for (i = 0; i < 5; i++) {
-        	mem.map = i;
-                i82092aa_set_mem_map(sock, &mem);
+	for (i = 0; i < 5; i++) {
+		mem.map = i;
+		i82092aa_set_mem_map(sock, &mem);
 	}
-	
-	leave("i82092aa_init");
+
 	return 0;
 }
-                                                                                                                                                                                                                                              
+
 static int i82092aa_get_status(struct pcmcia_socket *socket, u_int *value)
 {
-	unsigned int sock = container_of(socket, struct socket_info, socket)->number;
+	unsigned int sock = container_of(socket,
+				struct socket_info, socket)->number;
 	unsigned int status;
-	
-	enter("i82092aa_get_status");
-	
-	status = indirect_read(sock,I365_STATUS); /* Interface Status Register */
+
+	/* Interface Status Register */
+	status = indirect_read(sock, I365_STATUS);
+
 	*value = 0;
-	
-	if ((status & I365_CS_DETECT) == I365_CS_DETECT) {
+
+	if ((status & I365_CS_DETECT) == I365_CS_DETECT)
 		*value |= SS_DETECT;
-	}
-		
+
 	/* IO cards have a different meaning of bits 0,1 */
 	/* Also notice the inverse-logic on the bits */
-	if (indirect_read(sock, I365_INTCTL) & I365_PC_IOCARD)	{
+	if (indirect_read(sock, I365_INTCTL) & I365_PC_IOCARD) {
 		/* IO card */
 		if (!(status & I365_CS_STSCHG))
 			*value |= SS_STSCHG;
@@ -441,246 +425,238 @@ static int i82092aa_get_status(struct pcmcia_socket *socket, u_int *value)
 		if (!(status & I365_CS_BVD2))
 			*value |= SS_BATWARN;
 	}
-	 
+
 	if (status & I365_CS_WRPROT)
 		(*value) |= SS_WRPROT;	/* card is write protected */
-	 
+
 	if (status & I365_CS_READY)
 		(*value) |= SS_READY;    /* card is not busy */
-	 	
+
 	if (status & I365_CS_POWERON)
 		(*value) |= SS_POWERON;  /* power is applied to the card */
 
-	leave("i82092aa_get_status");
 	return 0;
 }
 
 
-static int i82092aa_set_socket(struct pcmcia_socket *socket, socket_state_t *state) 
+static int i82092aa_set_socket(struct pcmcia_socket *socket,
+			       socket_state_t *state)
 {
-	unsigned int sock = container_of(socket, struct socket_info, socket)->number;
+	struct socket_info *sock_info = container_of(socket, struct socket_info,
+						     socket);
+	unsigned int sock = sock_info->number;
 	unsigned char reg;
-	
-	enter("i82092aa_set_socket");
-	
+
 	/* First, set the global controller options */
-	
+
 	set_bridge_state(sock);
-	
+
 	/* Values for the IGENC register */
-	
+
 	reg = 0;
-	if (!(state->flags & SS_RESET)) 	/* The reset bit has "inverse" logic */
-		reg = reg | I365_PC_RESET;  
-	if (state->flags & SS_IOCARD) 
+
+	/* The reset bit has "inverse" logic */
+	if (!(state->flags & SS_RESET))
+		reg = reg | I365_PC_RESET;
+	if (state->flags & SS_IOCARD)
 		reg = reg | I365_PC_IOCARD;
-		
-	indirect_write(sock,I365_INTCTL,reg); /* IGENC, Interrupt and General Control Register */
-	
+
+	/* IGENC, Interrupt and General Control Register */
+	indirect_write(sock, I365_INTCTL, reg);
+
 	/* Power registers */
-	
+
 	reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */
-	
+
 	if (state->flags & SS_PWR_AUTO) {
-		printk("Auto power\n");
+		dev_info(&sock_info->dev->dev, "Auto power\n");
 		reg |= I365_PWR_AUTO;	/* automatic power mngmnt */
 	}
 	if (state->flags & SS_OUTPUT_ENA) {
-		printk("Power Enabled \n");
+		dev_info(&sock_info->dev->dev, "Power Enabled\n");
 		reg |= I365_PWR_OUT;	/* enable power */
 	}
-	
+
 	switch (state->Vcc) {
-		case 0:	
-			break;
-		case 50: 
-			printk("setting voltage to Vcc to 5V on socket %i\n",sock);
-			reg |= I365_VCC_5V;
-			break;
-		default:
-			printk("i82092aa: i82092aa_set_socket called with invalid VCC power value: %i ", state->Vcc);
-			leave("i82092aa_set_socket");
-			return -EINVAL;
+	case 0:
+		break;
+	case 50:
+		dev_info(&sock_info->dev->dev,
+			 "setting voltage to Vcc to 5V on socket %i\n",
+			 sock);
+		reg |= I365_VCC_5V;
+		break;
+	default:
+		dev_err(&sock_info->dev->dev,
+			"%s called with invalid VCC power value: %i",
+			__func__, state->Vcc);
+		return -EINVAL;
 	}
-	
-	
+
 	switch (state->Vpp) {
-		case 0:	
-			printk("not setting Vpp on socket %i\n",sock);
-			break;
-		case 50: 
-			printk("setting Vpp to 5.0 for socket %i\n",sock);
-			reg |= I365_VPP1_5V | I365_VPP2_5V;
-			break;
-		case 120: 
-			printk("setting Vpp to 12.0\n");
-			reg |= I365_VPP1_12V | I365_VPP2_12V;
-			break;
-		default:
-			printk("i82092aa: i82092aa_set_socket called with invalid VPP power value: %i ", state->Vcc);
-			leave("i82092aa_set_socket");
-			return -EINVAL;
+	case 0:
+		dev_info(&sock_info->dev->dev,
+			 "not setting Vpp on socket %i\n", sock);
+		break;
+	case 50:
+		dev_info(&sock_info->dev->dev,
+			 "setting Vpp to 5.0 for socket %i\n", sock);
+		reg |= I365_VPP1_5V | I365_VPP2_5V;
+		break;
+	case 120:
+		dev_info(&sock_info->dev->dev, "setting Vpp to 12.0\n");
+		reg |= I365_VPP1_12V | I365_VPP2_12V;
+		break;
+	default:
+		dev_err(&sock_info->dev->dev,
+			"%s called with invalid VPP power value: %i",
+			__func__, state->Vcc);
+		return -EINVAL;
 	}
-	
-	if (reg != indirect_read(sock,I365_POWER)) /* only write if changed */
-		indirect_write(sock,I365_POWER,reg);
-		
+
+	if (reg != indirect_read(sock, I365_POWER)) /* only write if changed */
+		indirect_write(sock, I365_POWER, reg);
+
 	/* Enable specific interrupt events */
-	
+
 	reg = 0x00;
-	if (state->csc_mask & SS_DETECT) {
+	if (state->csc_mask & SS_DETECT)
 		reg |= I365_CSC_DETECT;
-	}
 	if (state->flags & SS_IOCARD) {
 		if (state->csc_mask & SS_STSCHG)
 			reg |= I365_CSC_STSCHG;
 	} else {
-		if (state->csc_mask & SS_BATDEAD) 
+		if (state->csc_mask & SS_BATDEAD)
 			reg |= I365_CSC_BVD1;
-		if (state->csc_mask & SS_BATWARN) 
+		if (state->csc_mask & SS_BATWARN)
 			reg |= I365_CSC_BVD2;
-		if (state->csc_mask & SS_READY) 
-			reg |= I365_CSC_READY; 
-		                        
-	}
-	
-	/* now write the value and clear the (probably bogus) pending stuff by doing a dummy read*/
-	
-	indirect_write(sock,I365_CSCINT,reg);
-	(void)indirect_read(sock,I365_CSC);
+		if (state->csc_mask & SS_READY)
+			reg |= I365_CSC_READY;
 
-	leave("i82092aa_set_socket");
+	}
+
+	/* now write the value and clear the (probably bogus) pending stuff
+	 * by doing a dummy read
+	 */
+
+	indirect_write(sock, I365_CSCINT, reg);
+	(void)indirect_read(sock, I365_CSC);
+
 	return 0;
 }
 
-static int i82092aa_set_io_map(struct pcmcia_socket *socket, struct pccard_io_map *io)
+static int i82092aa_set_io_map(struct pcmcia_socket *socket,
+			       struct pccard_io_map *io)
 {
-	unsigned int sock = container_of(socket, struct socket_info, socket)->number;
+	struct socket_info *sock_info = container_of(socket, struct socket_info,
+						     socket);
+	unsigned int sock = sock_info->number;
 	unsigned char map, ioctl;
-	
-	enter("i82092aa_set_io_map");
-	
-	map = io->map;
-	
-	/* Check error conditions */	
-	if (map > 1) {
-		leave("i82092aa_set_io_map with invalid map");
-		return -EINVAL;
-	}
-	if ((io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)){
-		leave("i82092aa_set_io_map with invalid io");
-		return -EINVAL;
-	}
 
-	/* Turn off the window before changing anything */ 
+	map = io->map;
+
+	/* Check error conditions */
+	if (map > 1)
+		return -EINVAL;
+
+	if ((io->start > 0xffff) || (io->stop > 0xffff)
+				 || (io->stop < io->start))
+		return -EINVAL;
+
+	/* Turn off the window before changing anything */
 	if (indirect_read(sock, I365_ADDRWIN) & I365_ENA_IO(map))
 		indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_IO(map));
 
-/*	printk("set_io_map: Setting range to %x - %x \n",io->start,io->stop);  */
-	
 	/* write the new values */
-	indirect_write16(sock,I365_IO(map)+I365_W_START,io->start);            	
-	indirect_write16(sock,I365_IO(map)+I365_W_STOP,io->stop);            	
-	            	
-	ioctl = indirect_read(sock,I365_IOCTL) & ~I365_IOCTL_MASK(map);
-	
+	indirect_write16(sock, I365_IO(map)+I365_W_START, io->start);
+	indirect_write16(sock, I365_IO(map)+I365_W_STOP, io->stop);
+
+	ioctl = indirect_read(sock, I365_IOCTL) & ~I365_IOCTL_MASK(map);
+
 	if (io->flags & (MAP_16BIT|MAP_AUTOSZ))
 		ioctl |= I365_IOCTL_16BIT(map);
-		
-	indirect_write(sock,I365_IOCTL,ioctl);
-	
+
+	indirect_write(sock, I365_IOCTL, ioctl);
+
 	/* Turn the window back on if needed */
 	if (io->flags & MAP_ACTIVE)
-		indirect_setbit(sock,I365_ADDRWIN,I365_ENA_IO(map));
-			
-	leave("i82092aa_set_io_map");	
+		indirect_setbit(sock, I365_ADDRWIN, I365_ENA_IO(map));
+
 	return 0;
 }
 
-static int i82092aa_set_mem_map(struct pcmcia_socket *socket, struct pccard_mem_map *mem)
+static int i82092aa_set_mem_map(struct pcmcia_socket *socket,
+				struct pccard_mem_map *mem)
 {
-	struct socket_info *sock_info = container_of(socket, struct socket_info, socket);
+	struct socket_info *sock_info = container_of(socket, struct socket_info,
+						     socket);
 	unsigned int sock = sock_info->number;
 	struct pci_bus_region region;
 	unsigned short base, i;
 	unsigned char map;
-	
-	enter("i82092aa_set_mem_map");
 
 	pcibios_resource_to_bus(sock_info->dev->bus, &region, mem->res);
-	
+
 	map = mem->map;
-	if (map > 4) {
-		leave("i82092aa_set_mem_map: invalid map");
+	if (map > 4)
 		return -EINVAL;
-	}
-	
-	
-	if ( (mem->card_start > 0x3ffffff) || (region.start > region.end) ||
-	     (mem->speed > 1000) ) {
-		leave("i82092aa_set_mem_map: invalid address / speed");
-		printk("invalid mem map for socket %i: %llx to %llx with a "
-			"start of %x\n",
+
+	if ((mem->card_start > 0x3ffffff) || (region.start > region.end) ||
+	     (mem->speed > 1000)) {
+		dev_err(&sock_info->dev->dev,
+			"invalid mem map for socket %i: %llx to %llx with a start of %x\n",
 			sock,
 			(unsigned long long)region.start,
 			(unsigned long long)region.end,
 			mem->card_start);
 		return -EINVAL;
 	}
-	
+
 	/* Turn off the window before changing anything */
 	if (indirect_read(sock, I365_ADDRWIN) & I365_ENA_MEM(map))
-	              indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_MEM(map));
-	                 
-	                 
-/* 	printk("set_mem_map: Setting map %i range to %x - %x on socket %i, speed is %i, active = %i \n",map, region.start,region.end,sock,mem->speed,mem->flags & MAP_ACTIVE);  */
+		indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_MEM(map));
 
 	/* write the start address */
 	base = I365_MEM(map);
 	i = (region.start >> 12) & 0x0fff;
-	if (mem->flags & MAP_16BIT) 
+	if (mem->flags & MAP_16BIT)
 		i |= I365_MEM_16BIT;
 	if (mem->flags & MAP_0WS)
-		i |= I365_MEM_0WS;	
-	indirect_write16(sock,base+I365_W_START,i);
-		               
+		i |= I365_MEM_0WS;
+	indirect_write16(sock, base+I365_W_START, i);
+
 	/* write the stop address */
-	
-	i= (region.end >> 12) & 0x0fff;
+
+	i = (region.end >> 12) & 0x0fff;
 	switch (to_cycles(mem->speed)) {
-		case 0:
-			break;
-		case 1:
-			i |= I365_MEM_WS0;
-			break;
-		case 2:
-			i |= I365_MEM_WS1;
-			break;
-		default:
-			i |= I365_MEM_WS1 | I365_MEM_WS0;
-			break;
+	case 0:
+		break;
+	case 1:
+		i |= I365_MEM_WS0;
+		break;
+	case 2:
+		i |= I365_MEM_WS1;
+		break;
+	default:
+		i |= I365_MEM_WS1 | I365_MEM_WS0;
+		break;
 	}
-	
-	indirect_write16(sock,base+I365_W_STOP,i);
-	
+
+	indirect_write16(sock, base+I365_W_STOP, i);
+
 	/* card start */
-	
+
 	i = ((mem->card_start - region.start) >> 12) & 0x3fff;
 	if (mem->flags & MAP_WRPROT)
 		i |= I365_MEM_WRPROT;
-	if (mem->flags & MAP_ATTRIB) {
-/*		printk("requesting attribute memory for socket %i\n",sock);*/
+	if (mem->flags & MAP_ATTRIB)
 		i |= I365_MEM_REG;
-	} else {
-/*		printk("requesting normal memory for socket %i\n",sock);*/
-	}
-	indirect_write16(sock,base+I365_W_OFF,i);
-	
+	indirect_write16(sock, base+I365_W_OFF, i);
+
 	/* Enable the window if necessary */
 	if (mem->flags & MAP_ACTIVE)
 		indirect_setbit(sock, I365_ADDRWIN, I365_ENA_MEM(map));
-	            
-	leave("i82092aa_set_mem_map");
+
 	return 0;
 }
 
@@ -691,11 +667,9 @@ static int i82092aa_module_init(void)
 
 static void i82092aa_module_exit(void)
 {
-	enter("i82092aa_module_exit");
 	pci_unregister_driver(&i82092aa_pci_driver);
-	if (sockets[0].io_base>0)
-			 release_region(sockets[0].io_base, 2);
-	leave("i82092aa_module_exit");
+	if (sockets[0].io_base > 0)
+		release_region(sockets[0].io_base, 2);
 }
 
 module_init(i82092aa_module_init);
diff --git a/drivers/pcmcia/i82092aa.h b/drivers/pcmcia/i82092aa.h
index 4586c43..0f851ac 100644
--- a/drivers/pcmcia/i82092aa.h
+++ b/drivers/pcmcia/i82092aa.h
@@ -4,17 +4,6 @@
 
 #include <linux/interrupt.h>
 
-/* Debuging defines */
-#ifdef NOTRACE
-#define enter(x)   printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
-#define leave(x)   printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__)
-#else
-#define enter(x)   do {} while (0)
-#define leave(x)   do {} while (0)
-#endif
-
-
-
 /* prototypes */
 
 static int  i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *id);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 0263db2..b3ed94b 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -69,5 +69,6 @@
 source "drivers/phy/st/Kconfig"
 source "drivers/phy/tegra/Kconfig"
 source "drivers/phy/ti/Kconfig"
+source "drivers/phy/intel/Kconfig"
 
 endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c96a1af..310c149 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -18,6 +18,7 @@
 					   cadence/	\
 					   freescale/	\
 					   hisilicon/	\
+					   intel/	\
 					   lantiq/	\
 					   marvell/	\
 					   motorola/	\
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index 3dab79e..e760d89 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -48,7 +48,8 @@
 
 config PHY_SUN50I_USB3
 	tristate "Allwinner H6 SoC USB3 PHY driver"
-	depends on ARCH_SUNXI && HAS_IOMEM && OF
+	depends on ARCH_SUNXI || COMPILE_TEST
+	depends on HAS_IOMEM && OF
 	depends on RESET_CONTROLLER
 	select GENERIC_PHY
 	help
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index d3d983c1..b29f11c 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -50,7 +50,7 @@
 
 config PHY_NS2_PCIE
 	tristate "Broadcom Northstar2 PCIe PHY driver"
-	depends on OF && MDIO_BUS_MUX_BCM_IPROC
+	depends on (OF && MDIO_BUS_MUX_BCM_IPROC) || (COMPILE_TEST && MDIO_BUS)
 	select GENERIC_PHY
 	default ARCH_BCM_IPROC
 	help
@@ -83,7 +83,7 @@
 
 config PHY_BRCM_USB
 	tristate "Broadcom STB USB PHY driver"
-	depends on ARCH_BRCMSTB
+	depends on ARCH_BRCMSTB || COMPILE_TEST
 	depends on OF
 	select GENERIC_PHY
 	select SOC_BRCMSTB
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index f453c7d..c78de54 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -8,7 +8,7 @@
 obj-$(CONFIG_PHY_BRCM_SATA)		+= phy-brcm-sata.o
 obj-$(CONFIG_PHY_BRCM_USB)		+= phy-brcm-usb-dvr.o
 
-phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o
+phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o phy-brcm-usb-init-synopsys.o
 
 obj-$(CONFIG_PHY_BCM_SR_PCIE)		+= phy-bcm-sr-pcie.o
 obj-$(CONFIG_PHY_BCM_SR_USB)		+= phy-bcm-sr-usb.o
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 50ac75b..4710cfc 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -33,6 +33,7 @@
 #define SATA_PHY_CTRL_REG_28NM_SPACE_SIZE		0x8
 
 enum brcm_sata_phy_version {
+	BRCM_SATA_PHY_STB_16NM,
 	BRCM_SATA_PHY_STB_28NM,
 	BRCM_SATA_PHY_STB_40NM,
 	BRCM_SATA_PHY_IPROC_NS2,
@@ -104,10 +105,13 @@ enum sata_phy_regs {
 	PLL1_ACTRL5				= 0x85,
 	PLL1_ACTRL6				= 0x86,
 	PLL1_ACTRL7				= 0x87,
+	PLL1_ACTRL8				= 0x88,
 
 	TX_REG_BANK				= 0x070,
 	TX_ACTRL0				= 0x80,
 	TX_ACTRL0_TXPOL_FLIP			= BIT(6),
+	TX_ACTRL5				= 0x85,
+	TX_ACTRL5_SSC_EN			= BIT(11),
 
 	AEQRX_REG_BANK_0			= 0xd0,
 	AEQ_CONTROL1				= 0x81,
@@ -116,6 +120,7 @@ enum sata_phy_regs {
 	AEQ_FRC_EQ				= 0x83,
 	AEQ_FRC_EQ_FORCE			= BIT(0),
 	AEQ_FRC_EQ_FORCE_VAL			= BIT(1),
+	AEQ_RFZ_FRC_VAL				= BIT(8),
 	AEQRX_REG_BANK_1			= 0xe0,
 	AEQRX_SLCAL0_CTRL0			= 0x82,
 	AEQRX_SLCAL1_CTRL0			= 0x86,
@@ -152,7 +157,28 @@ enum sata_phy_regs {
 	TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK	= 0x3ff,
 
 	RXPMD_REG_BANK				= 0x1c0,
+	RXPMD_RX_CDR_CONTROL1			= 0x81,
+	RXPMD_RX_PPM_VAL_MASK			= 0x1ff,
+	RXPMD_RXPMD_EN_FRC			= BIT(12),
+	RXPMD_RXPMD_EN_FRC_VAL			= BIT(13),
+	RXPMD_RX_CDR_CDR_PROP_BW		= 0x82,
+	RXPMD_G_CDR_PROP_BW_MASK		= 0x7,
+	RXPMD_G1_CDR_PROP_BW_SHIFT		= 0,
+	RXPMD_G2_CDR_PROP_BW_SHIFT		= 3,
+	RXPMD_G3_CDR_PROB_BW_SHIFT		= 6,
+	RXPMD_RX_CDR_CDR_ACQ_INTEG_BW		= 0x83,
+	RXPMD_G_CDR_ACQ_INT_BW_MASK		= 0x7,
+	RXPMD_G1_CDR_ACQ_INT_BW_SHIFT		= 0,
+	RXPMD_G2_CDR_ACQ_INT_BW_SHIFT		= 3,
+	RXPMD_G3_CDR_ACQ_INT_BW_SHIFT		= 6,
+	RXPMD_RX_CDR_CDR_LOCK_INTEG_BW		= 0x84,
+	RXPMD_G_CDR_LOCK_INT_BW_MASK		= 0x7,
+	RXPMD_G1_CDR_LOCK_INT_BW_SHIFT		= 0,
+	RXPMD_G2_CDR_LOCK_INT_BW_SHIFT		= 3,
+	RXPMD_G3_CDR_LOCK_INT_BW_SHIFT		= 6,
 	RXPMD_RX_FREQ_MON_CONTROL1		= 0x87,
+	RXPMD_MON_CORRECT_EN			= BIT(8),
+	RXPMD_MON_MARGIN_VAL_MASK		= 0xff,
 };
 
 enum sata_phy_ctrl_regs {
@@ -166,6 +192,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
 	u32 size = 0;
 
 	switch (priv->version) {
+	case BRCM_SATA_PHY_STB_16NM:
 	case BRCM_SATA_PHY_STB_28NM:
 	case BRCM_SATA_PHY_IPROC_NS2:
 	case BRCM_SATA_PHY_DSL_28NM:
@@ -287,6 +314,94 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port)
 	return brcm_stb_sata_rxaeq_init(port);
 }
 
+static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
+{
+	void __iomem *base = brcm_sata_pcb_base(port);
+	u32 tmp, value;
+
+	/* Reduce CP tail current to 1/16th of its default value */
+	brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141);
+
+	/* Turn off CP tail current boost */
+	brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006);
+
+	/* Set a specific AEQ equalizer value */
+	tmp = AEQ_FRC_EQ_FORCE_VAL | AEQ_FRC_EQ_FORCE;
+	brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, AEQ_FRC_EQ,
+			 ~(tmp | AEQ_RFZ_FRC_VAL |
+			   AEQ_FRC_EQ_VAL_MASK << AEQ_FRC_EQ_VAL_SHIFT),
+			 tmp | 32 << AEQ_FRC_EQ_VAL_SHIFT);
+
+	/* Set RX PPM val center frequency */
+	if (port->ssc_en)
+		value = 0x52;
+	else
+		value = 0;
+	brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1,
+			 ~RXPMD_RX_PPM_VAL_MASK, value);
+
+	/* Set proportional loop bandwith Gen1/2/3 */
+	tmp = RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G1_CDR_PROP_BW_SHIFT |
+	      RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G2_CDR_PROP_BW_SHIFT |
+	      RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G3_CDR_PROB_BW_SHIFT;
+	if (port->ssc_en)
+		value = 2 << RXPMD_G1_CDR_PROP_BW_SHIFT |
+			2 << RXPMD_G2_CDR_PROP_BW_SHIFT |
+			2 << RXPMD_G3_CDR_PROB_BW_SHIFT;
+	else
+		value = 1 << RXPMD_G1_CDR_PROP_BW_SHIFT |
+			1 << RXPMD_G2_CDR_PROP_BW_SHIFT |
+			1 << RXPMD_G3_CDR_PROB_BW_SHIFT;
+	brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp,
+			 value);
+
+	/* Set CDR integral loop acquisition bandwidth for Gen1/2/3 */
+	tmp = RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G1_CDR_ACQ_INT_BW_SHIFT |
+	      RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G2_CDR_ACQ_INT_BW_SHIFT |
+	      RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
+	if (port->ssc_en)
+		value = 1 << RXPMD_G1_CDR_ACQ_INT_BW_SHIFT |
+			1 << RXPMD_G2_CDR_ACQ_INT_BW_SHIFT |
+			1 << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
+	else
+		value = 0;
+	brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW,
+			 ~tmp, value);
+
+	/* Set CDR integral loop locking bandwidth to 1 for Gen 1/2/3 */
+	tmp = RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G1_CDR_LOCK_INT_BW_SHIFT |
+	      RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G2_CDR_LOCK_INT_BW_SHIFT |
+	      RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
+	if (port->ssc_en)
+		value = 1 << RXPMD_G1_CDR_LOCK_INT_BW_SHIFT |
+			1 << RXPMD_G2_CDR_LOCK_INT_BW_SHIFT |
+			1 << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
+	else
+		value = 0;
+	brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW,
+			 ~tmp, value);
+
+	/* Set no guard band and clamp CDR */
+	tmp = RXPMD_MON_CORRECT_EN | RXPMD_MON_MARGIN_VAL_MASK;
+	if (port->ssc_en)
+		value = 0x51;
+	else
+		value = 0;
+	brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+			 ~tmp, RXPMD_MON_CORRECT_EN | value);
+
+	/* Turn on/off SSC */
+	brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
+			 port->ssc_en ? TX_ACTRL5_SSC_EN : 0);
+
+	return 0;
+}
+
+static int brcm_stb_sata_16nm_init(struct brcm_sata_port *port)
+{
+	return brcm_stb_sata_16nm_ssc_init(port);
+}
+
 /* NS2 SATA PLL1 defaults were characterized by H/W group */
 #define NS2_PLL1_ACTRL2_MAGIC	0x1df8
 #define NS2_PLL1_ACTRL3_MAGIC	0x2b00
@@ -544,6 +659,9 @@ static int brcm_sata_phy_init(struct phy *phy)
 	struct brcm_sata_port *port = phy_get_drvdata(phy);
 
 	switch (port->phy_priv->version) {
+	case BRCM_SATA_PHY_STB_16NM:
+		rc = brcm_stb_sata_16nm_init(port);
+		break;
 	case BRCM_SATA_PHY_STB_28NM:
 	case BRCM_SATA_PHY_STB_40NM:
 		rc = brcm_stb_sata_init(port);
@@ -601,6 +719,8 @@ static const struct phy_ops phy_ops = {
 };
 
 static const struct of_device_id brcm_sata_phy_of_match[] = {
+	{ .compatible	= "brcm,bcm7216-sata-phy",
+	  .data = (void *)BRCM_SATA_PHY_STB_16NM },
 	{ .compatible	= "brcm,bcm7445-sata-phy",
 	  .data = (void *)BRCM_SATA_PHY_STB_28NM },
 	{ .compatible	= "brcm,bcm7425-sata-phy",
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
new file mode 100644
index 0000000..456dc4a
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Broadcom */
+
+/*
+ * This module contains USB PHY initialization for power up and S3 resume
+ * for newer Synopsys based USB hardware first used on the bcm7216.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/soc/brcmstb/brcmstb.h>
+#include "phy-brcm-usb-init.h"
+
+#define PHY_LOCK_TIMEOUT_MS 200
+
+/* Register definitions for syscon piarbctl registers */
+#define PIARBCTL_CAM			0x00
+#define PIARBCTL_SPLITTER		0x04
+#define PIARBCTL_MISC			0x08
+#define   PIARBCTL_MISC_SECURE_MASK			0x80000000
+#define   PIARBCTL_MISC_USB_SELECT_MASK			0x40000000
+#define   PIARBCTL_MISC_USB_4G_SDRAM_MASK		0x20000000
+#define   PIARBCTL_MISC_USB_PRIORITY_MASK		0x000f0000
+#define   PIARBCTL_MISC_USB_MEM_PAGE_MASK		0x0000f000
+#define   PIARBCTL_MISC_CAM1_MEM_PAGE_MASK		0x00000f00
+#define   PIARBCTL_MISC_CAM0_MEM_PAGE_MASK		0x000000f0
+#define   PIARBCTL_MISC_SATA_PRIORITY_MASK		0x0000000f
+
+#define PIARBCTL_MISC_USB_ONLY_MASK		\
+	(PIARBCTL_MISC_USB_SELECT_MASK |	\
+	 PIARBCTL_MISC_USB_4G_SDRAM_MASK |	\
+	 PIARBCTL_MISC_USB_PRIORITY_MASK |	\
+	 PIARBCTL_MISC_USB_MEM_PAGE_MASK)
+
+/* Register definitions for the USB CTRL block */
+#define USB_CTRL_SETUP			0x00
+#define   USB_CTRL_SETUP_STRAP_IPP_SEL_MASK		0x02000000
+#define   USB_CTRL_SETUP_SCB2_EN_MASK			0x00008000
+#define   USB_CTRL_SETUP_tca_drv_sel_MASK		0x01000000
+#define   USB_CTRL_SETUP_SCB1_EN_MASK			0x00004000
+#define   USB_CTRL_SETUP_SOFT_SHUTDOWN_MASK		0x00000200
+#define   USB_CTRL_SETUP_IPP_MASK			0x00000020
+#define   USB_CTRL_SETUP_IOC_MASK			0x00000010
+#define USB_CTRL_USB_PM			0x04
+#define   USB_CTRL_USB_PM_USB_PWRDN_MASK		0x80000000
+#define   USB_CTRL_USB_PM_SOFT_RESET_MASK		0x40000000
+#define   USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK		0x00800000
+#define   USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK		0x00400000
+#define USB_CTRL_USB_PM_STATUS		0x08
+#define USB_CTRL_USB_DEVICE_CTL1	0x10
+#define   USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK	0x00000003
+#define USB_CTRL_TEST_PORT_CTL		0x30
+#define   USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_MASK	0x000000ff
+#define   USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_PME_GEN_MASK	0x0000002e
+#define USB_CTRL_TP_DIAG1		0x34
+#define   USB_CTLR_TP_DIAG1_wake_MASK	0x00000002
+#define USB_CTRL_CTLR_CSHCR		0x50
+#define   USB_CTRL_CTLR_CSHCR_ctl_pme_en_MASK	0x00040000
+
+/* Register definitions for the USB_PHY block in 7211b0 */
+#define USB_PHY_PLL_CTL			0x00
+#define   USB_PHY_PLL_CTL_PLL_RESETB_MASK		0x40000000
+#define USB_PHY_PLL_LDO_CTL		0x08
+#define   USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK		0x00000004
+#define   USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK	0x00000002
+#define   USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK	0x00000001
+#define USB_PHY_UTMI_CTL_1		0x04
+#define   USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK	0x00000800
+#define   USB_PHY_UTMI_CTL_1_PHY_MODE_MASK		0x0000000c
+#define   USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT		2
+#define USB_PHY_IDDQ			0x1c
+#define   USB_PHY_IDDQ_phy_iddq_MASK			0x00000001
+#define USB_PHY_STATUS			0x20
+#define   USB_PHY_STATUS_pll_lock_MASK			0x00000001
+
+/* Register definitions for the MDIO registers in the DWC2 block of
+ * the 7211b0.
+ * NOTE: The PHY's MDIO registers are only accessible through the
+ * legacy DesignWare USB controller even though it's not being used.
+ */
+#define USB_GMDIOCSR	0
+#define USB_GMDIOGEN	4
+
+/* Register definitions for the BDC EC block in 7211b0 */
+#define BDC_EC_AXIRDA			0x0c
+#define   BDC_EC_AXIRDA_RTS_MASK			0xf0000000
+#define   BDC_EC_AXIRDA_RTS_SHIFT			28
+
+
+static void usb_mdio_write_7211b0(struct brcm_usb_init_params *params,
+				  uint8_t addr, uint16_t data)
+{
+	void __iomem *usb_mdio = params->regs[BRCM_REGS_USB_MDIO];
+
+	addr &= 0x1f; /* 5-bit address */
+	brcm_usb_writel(0xffffffff, usb_mdio + USB_GMDIOGEN);
+	while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+		;
+	brcm_usb_writel(0x59020000 | (addr << 18) | data,
+			usb_mdio + USB_GMDIOGEN);
+	while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+		;
+	brcm_usb_writel(0x00000000, usb_mdio + USB_GMDIOGEN);
+	while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+		;
+}
+
+static uint16_t __maybe_unused usb_mdio_read_7211b0(
+	struct brcm_usb_init_params *params, uint8_t addr)
+{
+	void __iomem *usb_mdio = params->regs[BRCM_REGS_USB_MDIO];
+
+	addr &= 0x1f; /* 5-bit address */
+	brcm_usb_writel(0xffffffff, usb_mdio + USB_GMDIOGEN);
+	while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+		;
+	brcm_usb_writel(0x69020000 | (addr << 18), usb_mdio + USB_GMDIOGEN);
+	while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+		;
+	brcm_usb_writel(0x00000000, usb_mdio + USB_GMDIOGEN);
+	while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+		;
+	return brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & 0xffff;
+}
+
+static void usb2_eye_fix_7211b0(struct brcm_usb_init_params *params)
+{
+	/* select bank */
+	usb_mdio_write_7211b0(params, 0x1f, 0x80a0);
+
+	/* Set the eye */
+	usb_mdio_write_7211b0(params, 0x0a, 0xc6a0);
+}
+
+static void xhci_soft_reset(struct brcm_usb_init_params *params,
+			int on_off)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+	/* Assert reset */
+	if (on_off)
+		USB_CTRL_UNSET(ctrl, USB_PM, XHC_SOFT_RESETB);
+	/* De-assert reset */
+	else
+		USB_CTRL_SET(ctrl, USB_PM, XHC_SOFT_RESETB);
+}
+
+static void usb_init_ipp(struct brcm_usb_init_params *params)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+	u32 reg;
+	u32 orig_reg;
+
+	pr_debug("%s\n", __func__);
+
+	orig_reg = reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
+	if (params->ipp != 2)
+		/* override ipp strap pin (if it exits) */
+		reg &= ~(USB_CTRL_MASK(SETUP, STRAP_IPP_SEL));
+
+	/* Override the default OC and PP polarity */
+	reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
+	if (params->ioc)
+		reg |= USB_CTRL_MASK(SETUP, IOC);
+	if (params->ipp == 1)
+		reg |= USB_CTRL_MASK(SETUP, IPP);
+	brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+	/*
+	 * If we're changing IPP, make sure power is off long enough
+	 * to turn off any connected devices.
+	 */
+	if ((reg ^ orig_reg) & USB_CTRL_MASK(SETUP, IPP))
+		msleep(50);
+}
+
+static void syscon_piarbctl_init(struct regmap *rmap)
+{
+	/* Switch from legacy USB OTG controller to new STB USB controller */
+	regmap_update_bits(rmap, PIARBCTL_MISC, PIARBCTL_MISC_USB_ONLY_MASK,
+			   PIARBCTL_MISC_USB_SELECT_MASK |
+			   PIARBCTL_MISC_USB_4G_SDRAM_MASK);
+}
+
+static void usb_init_common(struct brcm_usb_init_params *params)
+{
+	u32 reg;
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+	pr_debug("%s\n", __func__);
+
+	USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+	/* 1 millisecond - for USB clocks to settle down */
+	usleep_range(1000, 2000);
+
+	if (USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE)) {
+		reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+		reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+		reg |= params->mode;
+		brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+	}
+	switch (params->mode) {
+	case USB_CTLR_MODE_HOST:
+		USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB);
+		break;
+	default:
+		USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB);
+		USB_CTRL_SET(ctrl, USB_PM, BDC_SOFT_RESETB);
+		break;
+	}
+}
+
+static void usb_wake_enable_7211b0(struct brcm_usb_init_params *params,
+				   bool enable)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+	if (enable)
+		USB_CTRL_SET(ctrl, CTLR_CSHCR, ctl_pme_en);
+	else
+		USB_CTRL_UNSET(ctrl, CTLR_CSHCR, ctl_pme_en);
+}
+
+static void usb_init_common_7211b0(struct brcm_usb_init_params *params)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+	void __iomem *usb_phy = params->regs[BRCM_REGS_USB_PHY];
+	void __iomem *bdc_ec = params->regs[BRCM_REGS_BDC_EC];
+	int timeout_ms = PHY_LOCK_TIMEOUT_MS;
+	u32 reg;
+
+	if (params->syscon_piarbctl)
+		syscon_piarbctl_init(params->syscon_piarbctl);
+
+	USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+
+	usb_wake_enable_7211b0(params, false);
+	if (!params->wake_enabled) {
+
+		/* undo possible suspend settings */
+		brcm_usb_writel(0, usb_phy + USB_PHY_IDDQ);
+		reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL);
+		reg |= USB_PHY_PLL_CTL_PLL_RESETB_MASK;
+		brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL);
+
+		/* temporarily enable FSM so PHY comes up properly */
+		reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+		reg |= USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK;
+		brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+	}
+
+	/* Init the PHY */
+	reg = USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK |
+		USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK |
+		USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK;
+	brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_LDO_CTL);
+
+	/* wait for lock */
+	while (timeout_ms-- > 0) {
+		reg = brcm_usb_readl(usb_phy + USB_PHY_STATUS);
+		if (reg & USB_PHY_STATUS_pll_lock_MASK)
+			break;
+		usleep_range(1000, 2000);
+	}
+
+	/* Set the PHY_MODE */
+	reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+	reg &= ~USB_PHY_UTMI_CTL_1_PHY_MODE_MASK;
+	reg |= params->mode << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT;
+	brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+
+	/* Fix the incorrect default */
+	reg = brcm_usb_readl(ctrl + USB_CTRL_SETUP);
+	reg &= ~USB_CTRL_SETUP_tca_drv_sel_MASK;
+	brcm_usb_writel(reg, ctrl + USB_CTRL_SETUP);
+
+	usb_init_common(params);
+
+	/*
+	 * The BDC controller will get occasional failures with
+	 * the default "Read Transaction Size" of 6 (1024 bytes).
+	 * Set it to 4 (256 bytes).
+	 */
+	if ((params->mode != USB_CTLR_MODE_HOST) && bdc_ec) {
+		reg = brcm_usb_readl(bdc_ec + BDC_EC_AXIRDA);
+		reg &= ~BDC_EC_AXIRDA_RTS_MASK;
+		reg |= (0x4 << BDC_EC_AXIRDA_RTS_SHIFT);
+		brcm_usb_writel(reg, bdc_ec + BDC_EC_AXIRDA);
+	}
+
+	/*
+	 * Disable FSM, otherwise the PHY will auto suspend when no
+	 * device is connected and will be reset on resume.
+	 */
+	reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+	reg &= ~USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK;
+	brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+
+	usb2_eye_fix_7211b0(params);
+}
+
+static void usb_init_xhci(struct brcm_usb_init_params *params)
+{
+	pr_debug("%s\n", __func__);
+
+	xhci_soft_reset(params, 0);
+}
+
+static void usb_uninit_common(struct brcm_usb_init_params *params)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+	pr_debug("%s\n", __func__);
+
+	USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+
+}
+
+static void usb_uninit_common_7211b0(struct brcm_usb_init_params *params)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+	void __iomem *usb_phy = params->regs[BRCM_REGS_USB_PHY];
+	u32 reg;
+
+	pr_debug("%s\n", __func__);
+
+	if (params->wake_enabled) {
+		USB_CTRL_SET(ctrl, TEST_PORT_CTL, TPOUT_SEL_PME_GEN);
+		usb_wake_enable_7211b0(params, true);
+	} else {
+		USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+		brcm_usb_writel(0, usb_phy + USB_PHY_PLL_LDO_CTL);
+		reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL);
+		reg &= ~USB_PHY_PLL_CTL_PLL_RESETB_MASK;
+		brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL);
+		brcm_usb_writel(USB_PHY_IDDQ_phy_iddq_MASK,
+				usb_phy + USB_PHY_IDDQ);
+	}
+
+}
+
+static void usb_uninit_xhci(struct brcm_usb_init_params *params)
+{
+
+	pr_debug("%s\n", __func__);
+
+	if (!params->wake_enabled)
+		xhci_soft_reset(params, 1);
+}
+
+static int usb_get_dual_select(struct brcm_usb_init_params *params)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+	u32 reg = 0;
+
+	pr_debug("%s\n", __func__);
+
+	reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+	reg &= USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+	return reg;
+}
+
+static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+	u32 reg;
+
+	pr_debug("%s\n", __func__);
+
+	reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+	reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+	reg |= mode;
+	brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+}
+
+static const struct brcm_usb_init_ops bcm7216_ops = {
+	.init_ipp = usb_init_ipp,
+	.init_common = usb_init_common,
+	.init_xhci = usb_init_xhci,
+	.uninit_common = usb_uninit_common,
+	.uninit_xhci = usb_uninit_xhci,
+	.get_dual_select = usb_get_dual_select,
+	.set_dual_select = usb_set_dual_select,
+};
+
+static const struct brcm_usb_init_ops bcm7211b0_ops = {
+	.init_ipp = usb_init_ipp,
+	.init_common = usb_init_common_7211b0,
+	.init_xhci = usb_init_xhci,
+	.uninit_common = usb_uninit_common_7211b0,
+	.uninit_xhci = usb_uninit_xhci,
+	.get_dual_select = usb_get_dual_select,
+	.set_dual_select = usb_set_dual_select,
+};
+
+void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params)
+{
+
+	pr_debug("%s\n", __func__);
+
+	params->family_name = "7216";
+	params->ops = &bcm7216_ops;
+}
+
+void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params)
+{
+
+	pr_debug("%s\n", __func__);
+
+	params->family_name = "7211";
+	params->ops = &bcm7211b0_ops;
+	params->suspend_with_clocks = true;
+}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 91b5b09..9391ab4 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -42,6 +42,7 @@
 #define   USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK		0x80000000 /* option */
 #define USB_CTRL_EBRIDGE		0x0c
 #define   USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK		0x00020000 /* option */
+#define   USB_CTRL_EBRIDGE_EBR_SCB_SIZE_MASK		0x00000f80 /* option */
 #define USB_CTRL_OBRIDGE		0x10
 #define   USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK		0x08000000
 #define USB_CTRL_MDIO			0x14
@@ -57,6 +58,8 @@
 #define   USB_CTRL_USB_PM_SOFT_RESET_MASK		0x40000000 /* option */
 #define   USB_CTRL_USB_PM_USB20_HC_RESETB_MASK		0x30000000 /* option */
 #define   USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK	0x00300000 /* option */
+#define   USB_CTRL_USB_PM_RMTWKUP_EN_MASK		0x00000001
+#define USB_CTRL_USB_PM_STATUS		0x38
 #define USB_CTRL_USB30_CTL1		0x60
 #define   USB_CTRL_USB30_CTL1_PHY3_PLL_SEQ_START_MASK	0x00000010
 #define   USB_CTRL_USB30_CTL1_PHY3_RESETB_MASK		0x00010000
@@ -126,10 +129,6 @@ enum {
 	USB_CTRL_SELECTOR_COUNT,
 };
 
-#define USB_CTRL_REG(base, reg)	((void __iomem *)base + USB_CTRL_##reg)
-#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
-#define USB_CTRL_MASK(reg, field) \
-	USB_CTRL_##reg##_##field##_MASK
 #define USB_CTRL_MASK_FAMILY(params, reg, field)			\
 	(params->usb_reg_bits_map[USB_CTRL_##reg##_##field##_SELECTOR])
 
@@ -140,13 +139,6 @@ enum {
 	usb_ctrl_unset_family(params, USB_CTRL_##reg,	\
 		USB_CTRL_##reg##_##field##_SELECTOR)
 
-#define USB_CTRL_SET(base, reg, field)	\
-	usb_ctrl_set(USB_CTRL_REG(base, reg),		\
-		     USB_CTRL_##reg##_##field##_MASK)
-#define USB_CTRL_UNSET(base, reg, field)	\
-	usb_ctrl_unset(USB_CTRL_REG(base, reg),		\
-		       USB_CTRL_##reg##_##field##_MASK)
-
 #define MDIO_USB2	0
 #define MDIO_USB3	BIT(31)
 
@@ -176,6 +168,7 @@ static const struct id_to_type id_to_type_table[] = {
 	{ 0x33900000, BRCM_FAMILY_3390A0 },
 	{ 0x72500010, BRCM_FAMILY_7250B0 },
 	{ 0x72600000, BRCM_FAMILY_7260A0 },
+	{ 0x72550000, BRCM_FAMILY_7260A0 },
 	{ 0x72680000, BRCM_FAMILY_7271A0 },
 	{ 0x72710000, BRCM_FAMILY_7271A0 },
 	{ 0x73640000, BRCM_FAMILY_7364A0 },
@@ -401,26 +394,14 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
 	},
 };
 
-static inline u32 brcmusb_readl(void __iomem *addr)
-{
-	return readl(addr);
-}
-
-static inline void brcmusb_writel(u32 val, void __iomem *addr)
-{
-	writel(val, addr);
-}
-
 static inline
 void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
 			   u32 reg_offset, u32 field)
 {
 	u32 mask;
-	void __iomem *reg;
 
 	mask = params->usb_reg_bits_map[field];
-	reg = params->ctrl_regs + reg_offset;
-	brcmusb_writel(brcmusb_readl(reg) & ~mask, reg);
+	brcm_usb_ctrl_unset(params->regs[BRCM_REGS_CTRL] + reg_offset, mask);
 };
 
 static inline
@@ -428,45 +409,27 @@ void usb_ctrl_set_family(struct brcm_usb_init_params *params,
 			 u32 reg_offset, u32 field)
 {
 	u32 mask;
-	void __iomem *reg;
 
 	mask = params->usb_reg_bits_map[field];
-	reg = params->ctrl_regs + reg_offset;
-	brcmusb_writel(brcmusb_readl(reg) | mask, reg);
+	brcm_usb_ctrl_set(params->regs[BRCM_REGS_CTRL] + reg_offset, mask);
 };
 
-static inline void usb_ctrl_set(void __iomem *reg, u32 field)
-{
-	u32 value;
-
-	value = brcmusb_readl(reg);
-	brcmusb_writel(value | field, reg);
-}
-
-static inline void usb_ctrl_unset(void __iomem *reg, u32 field)
-{
-	u32 value;
-
-	value = brcmusb_readl(reg);
-	brcmusb_writel(value & ~field, reg);
-}
-
 static u32 brcmusb_usb_mdio_read(void __iomem *ctrl_base, u32 reg, int mode)
 {
 	u32 data;
 
 	data = (reg << 16) | mode;
-	brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+	brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
 	data |= (1 << 24);
-	brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+	brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
 	data &= ~(1 << 24);
 	/* wait for the 60MHz parallel to serial shifter */
 	usleep_range(10, 20);
-	brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+	brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
 	/* wait for the 60MHz parallel to serial shifter */
 	usleep_range(10, 20);
 
-	return brcmusb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
+	return brcm_usb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
 }
 
 static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
@@ -475,14 +438,14 @@ static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
 	u32 data;
 
 	data = (reg << 16) | val | mode;
-	brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+	brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
 	data |= (1 << 25);
-	brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+	brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
 	data &= ~(1 << 25);
 
 	/* wait for the 60MHz parallel to serial shifter */
 	usleep_range(10, 20);
-	brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+	brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
 	/* wait for the 60MHz parallel to serial shifter */
 	usleep_range(10, 20);
 }
@@ -581,7 +544,7 @@ static void brcmusb_usb3_pll_54mhz(struct brcm_usb_init_params *params)
 {
 	u32 ofs;
 	int ii;
-	void __iomem *ctrl_base = params->ctrl_regs;
+	void __iomem *ctrl_base = params->regs[BRCM_REGS_CTRL];
 
 	/*
 	 * On newer B53 based SoC's, the reference clock for the
@@ -662,7 +625,7 @@ static void brcmusb_usb3_ssc_enable(void __iomem *ctrl_base)
 
 static void brcmusb_usb3_phy_workarounds(struct brcm_usb_init_params *params)
 {
-	void __iomem *ctrl_base = params->ctrl_regs;
+	void __iomem *ctrl_base = params->regs[BRCM_REGS_CTRL];
 
 	brcmusb_usb3_pll_fix(ctrl_base);
 	brcmusb_usb3_pll_54mhz(params);
@@ -704,21 +667,21 @@ static void brcmusb_memc_fix(struct brcm_usb_init_params *params)
 
 static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
 {
-	void __iomem *xhci_ec_base = params->xhci_ec_regs;
+	void __iomem *xhci_ec_base = params->regs[BRCM_REGS_XHCI_EC];
 	u32 val;
 
 	if (params->family_id != 0x74371000 || !xhci_ec_base)
 		return;
-	brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
-	val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+	brcm_usb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
+	val = brcm_usb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
 
 	/* set cfg_pick_ss_lock */
 	val |= (1 << 27);
-	brcmusb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+	brcm_usb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
 
 	/* Reset USB 3.0 PHY for workaround to take effect */
-	USB_CTRL_UNSET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
-	USB_CTRL_SET(params->ctrl_regs,	USB30_CTL1, PHY3_RESETB);
+	USB_CTRL_UNSET(params->regs[BRCM_REGS_CTRL], USB30_CTL1, PHY3_RESETB);
+	USB_CTRL_SET(params->regs[BRCM_REGS_CTRL], USB30_CTL1, PHY3_RESETB);
 }
 
 static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
@@ -747,7 +710,7 @@ static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
  *   - default chip/rev.
  * NOTE: The minor rev is always ignored.
  */
-static enum brcm_family_type brcmusb_get_family_type(
+static enum brcm_family_type get_family_type(
 	struct brcm_usb_init_params *params)
 {
 	int last_type = -1;
@@ -775,9 +738,9 @@ static enum brcm_family_type brcmusb_get_family_type(
 	return last_type;
 }
 
-void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
+static void usb_init_ipp(struct brcm_usb_init_params *params)
 {
-	void __iomem *ctrl = params->ctrl_regs;
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
 	u32 reg;
 	u32 orig_reg;
 
@@ -791,7 +754,7 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
 			USB_CTRL_SET_FAMILY(params, USB30_CTL1, USB3_IPP);
 	}
 
-	reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+	reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
 	orig_reg = reg;
 	if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_CC_DRD_MODE_ENABLE_SEL))
 		/* Never use the strap, it's going away. */
@@ -799,8 +762,8 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
 					      SETUP,
 					      STRAP_CC_DRD_MODE_ENABLE_SEL));
 	if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_IPP_SEL))
+		/* override ipp strap pin (if it exits) */
 		if (params->ipp != 2)
-			/* override ipp strap pin (if it exits) */
 			reg &= ~(USB_CTRL_MASK_FAMILY(params, SETUP,
 						      STRAP_IPP_SEL));
 
@@ -808,50 +771,38 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
 	reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
 	if (params->ioc)
 		reg |= USB_CTRL_MASK(SETUP, IOC);
-	if (params->ipp == 1 && ((reg & USB_CTRL_MASK(SETUP, IPP)) == 0))
+	if (params->ipp == 1)
 		reg |= USB_CTRL_MASK(SETUP, IPP);
-	brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+	brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
 
 	/*
 	 * If we're changing IPP, make sure power is off long enough
 	 * to turn off any connected devices.
 	 */
-	if (reg != orig_reg)
+	if ((reg ^ orig_reg) & USB_CTRL_MASK(SETUP, IPP))
 		msleep(50);
 }
 
-int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params)
+static void usb_wake_enable(struct brcm_usb_init_params *params,
+			  bool enable)
 {
-	void __iomem *ctrl = params->ctrl_regs;
-	u32 reg = 0;
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
 
-	if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
-		reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
-		reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
-					PORT_MODE);
-	}
-	return reg;
+	if (enable)
+		USB_CTRL_SET(ctrl, USB_PM, RMTWKUP_EN);
+	else
+		USB_CTRL_UNSET(ctrl, USB_PM, RMTWKUP_EN);
 }
 
-void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
-				   int mode)
-{
-	void __iomem *ctrl = params->ctrl_regs;
-	u32 reg;
-
-	if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
-		reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
-		reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
-					PORT_MODE);
-		reg |= mode;
-		brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
-	}
-}
-
-void brcm_usb_init_common(struct brcm_usb_init_params *params)
+static void usb_init_common(struct brcm_usb_init_params *params)
 {
 	u32 reg;
-	void __iomem *ctrl = params->ctrl_regs;
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+	/* Clear any pending wake conditions */
+	usb_wake_enable(params, false);
+	reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_PM_STATUS));
+	brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_PM_STATUS));
 
 	/* Take USB out of power down */
 	if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN)) {
@@ -877,7 +828,7 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
 	/* Block auto PLL suspend by USB2 PHY (Sasi) */
 	USB_CTRL_SET(ctrl, PLL_CTL, PLL_SUSPEND_EN);
 
-	reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+	reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
 	if (params->selected_family == BRCM_FAMILY_7364A0)
 		/* Suppress overcurrent indication from USB30 ports for A0 */
 		reg |= USB_CTRL_MASK_FAMILY(params, SETUP, OC3_DISABLE);
@@ -893,16 +844,16 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
 		reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN);
 	if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN))
 		reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN);
-	brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+	brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
 
 	brcmusb_memc_fix(params);
 
 	if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
-		reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+		reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
 		reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
 					PORT_MODE);
 		reg |= params->mode;
-		brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+		brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
 	}
 	if (USB_CTRL_MASK_FAMILY(params, USB_PM, BDC_SOFT_RESETB)) {
 		switch (params->mode) {
@@ -924,10 +875,10 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
 	}
 }
 
-void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
+static void usb_init_eohci(struct brcm_usb_init_params *params)
 {
 	u32 reg;
-	void __iomem *ctrl = params->ctrl_regs;
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
 
 	if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
 		USB_CTRL_SET_FAMILY(params, USB_PM, USB20_HC_RESETB);
@@ -940,19 +891,30 @@ void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
 		USB_CTRL_SET(ctrl, EBRIDGE, ESTOP_SCB_REQ);
 
 	/* Setup the endian bits */
-	reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+	reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
 	reg &= ~USB_CTRL_SETUP_ENDIAN_BITS;
 	reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN);
-	brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+	brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
 
 	if (params->selected_family == BRCM_FAMILY_7271A0)
 		/* Enable LS keep alive fix for certain keyboards */
 		USB_CTRL_SET(ctrl, OBRIDGE, LS_KEEP_ALIVE);
+
+	if (params->family_id == 0x72550000) {
+		/*
+		 * Make the burst size 512 bytes to fix a hardware bug
+		 * on the 7255a0. See HW7255-24.
+		 */
+		reg = brcm_usb_readl(USB_CTRL_REG(ctrl, EBRIDGE));
+		reg &= ~USB_CTRL_MASK(EBRIDGE, EBR_SCB_SIZE);
+		reg |= 0x800;
+		brcm_usb_writel(reg, USB_CTRL_REG(ctrl, EBRIDGE));
+	}
 }
 
-void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
+static void usb_init_xhci(struct brcm_usb_init_params *params)
 {
-	void __iomem *ctrl = params->ctrl_regs;
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
 
 	USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
 	/* 1 millisecond - for USB clocks to settle down */
@@ -978,34 +940,80 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
 	brcmusb_usb3_otp_fix(params);
 }
 
-void brcm_usb_uninit_common(struct brcm_usb_init_params *params)
+static void usb_uninit_common(struct brcm_usb_init_params *params)
 {
 	if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB_PWRDN))
 		USB_CTRL_SET_FAMILY(params, USB_PM, USB_PWRDN);
 
 	if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN))
 		USB_CTRL_SET_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN);
+	if (params->wake_enabled)
+		usb_wake_enable(params, true);
 }
 
-void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params)
+static void usb_uninit_eohci(struct brcm_usb_init_params *params)
 {
-	if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
-		USB_CTRL_UNSET_FAMILY(params, USB_PM, USB20_HC_RESETB);
 }
 
-void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params)
+static void usb_uninit_xhci(struct brcm_usb_init_params *params)
 {
 	brcmusb_xhci_soft_reset(params, 1);
-	USB_CTRL_SET(params->ctrl_regs, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+	USB_CTRL_SET(params->regs[BRCM_REGS_CTRL], USB30_PCTL,
+		     PHY3_IDDQ_OVERRIDE);
 }
 
-void brcm_usb_set_family_map(struct brcm_usb_init_params *params)
+static int usb_get_dual_select(struct brcm_usb_init_params *params)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+	u32 reg = 0;
+
+	pr_debug("%s\n", __func__);
+	if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+		reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+		reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+					PORT_MODE);
+	}
+	return reg;
+}
+
+static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode)
+{
+	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+	u32 reg;
+
+	pr_debug("%s\n", __func__);
+
+	if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+		reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+		reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+					PORT_MODE);
+		reg |= mode;
+		brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+	}
+}
+
+static const struct brcm_usb_init_ops bcm7445_ops = {
+	.init_ipp = usb_init_ipp,
+	.init_common = usb_init_common,
+	.init_eohci = usb_init_eohci,
+	.init_xhci = usb_init_xhci,
+	.uninit_common = usb_uninit_common,
+	.uninit_eohci = usb_uninit_eohci,
+	.uninit_xhci = usb_uninit_xhci,
+	.get_dual_select = usb_get_dual_select,
+	.set_dual_select = usb_set_dual_select,
+};
+
+void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params)
 {
 	int fam;
 
-	fam = brcmusb_get_family_type(params);
+	pr_debug("%s\n", __func__);
+
+	fam = get_family_type(params);
 	params->selected_family = fam;
 	params->usb_reg_bits_map =
 		&usb_reg_bits_map_table[fam][0];
 	params->family_name = family_names[fam];
+	params->ops = &bcm7445_ops;
 }
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index f4f4f6d..899b9eb 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -6,16 +6,50 @@
 #ifndef _USB_BRCM_COMMON_INIT_H
 #define _USB_BRCM_COMMON_INIT_H
 
+#include <linux/regmap.h>
+
 #define USB_CTLR_MODE_HOST 0
 #define USB_CTLR_MODE_DEVICE 1
 #define USB_CTLR_MODE_DRD 2
 #define USB_CTLR_MODE_TYPEC_PD 3
 
+enum brcmusb_reg_sel {
+	BRCM_REGS_CTRL = 0,
+	BRCM_REGS_XHCI_EC,
+	BRCM_REGS_XHCI_GBL,
+	BRCM_REGS_USB_PHY,
+	BRCM_REGS_USB_MDIO,
+	BRCM_REGS_BDC_EC,
+	BRCM_REGS_MAX
+};
+
+#define USB_CTRL_REG(base, reg)	((void __iomem *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_MASK(reg, field) \
+	USB_CTRL_##reg##_##field##_MASK
+#define USB_CTRL_SET(base, reg, field)	\
+	brcm_usb_ctrl_set(USB_CTRL_REG(base, reg),	\
+			  USB_CTRL_##reg##_##field##_MASK)
+#define USB_CTRL_UNSET(base, reg, field)	\
+	brcm_usb_ctrl_unset(USB_CTRL_REG(base, reg),		\
+			    USB_CTRL_##reg##_##field##_MASK)
+
 struct  brcm_usb_init_params;
 
+struct brcm_usb_init_ops {
+	void (*init_ipp)(struct brcm_usb_init_params *params);
+	void (*init_common)(struct brcm_usb_init_params *params);
+	void (*init_eohci)(struct brcm_usb_init_params *params);
+	void (*init_xhci)(struct brcm_usb_init_params *params);
+	void (*uninit_common)(struct brcm_usb_init_params *params);
+	void (*uninit_eohci)(struct brcm_usb_init_params *params);
+	void (*uninit_xhci)(struct brcm_usb_init_params *params);
+	int  (*get_dual_select)(struct brcm_usb_init_params *params);
+	void (*set_dual_select)(struct brcm_usb_init_params *params, int mode);
+};
+
 struct  brcm_usb_init_params {
-	void __iomem *ctrl_regs;
-	void __iomem *xhci_ec_regs;
+	void __iomem *regs[BRCM_REGS_MAX];
 	int ioc;
 	int ipp;
 	int mode;
@@ -24,19 +58,105 @@ struct  brcm_usb_init_params {
 	int selected_family;
 	const char *family_name;
 	const u32 *usb_reg_bits_map;
+	const struct brcm_usb_init_ops *ops;
+	struct regmap *syscon_piarbctl;
+	bool wake_enabled;
+	bool suspend_with_clocks;
 };
 
-void brcm_usb_set_family_map(struct brcm_usb_init_params *params);
-int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params);
-void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
-				   int mode);
+void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params);
+void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params);
+void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params);
 
-void brcm_usb_init_ipp(struct brcm_usb_init_params *ini);
-void brcm_usb_init_common(struct brcm_usb_init_params *ini);
-void brcm_usb_init_eohci(struct brcm_usb_init_params *ini);
-void brcm_usb_init_xhci(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_common(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini);
+static inline u32 brcm_usb_readl(void __iomem *addr)
+{
+	/*
+	 * MIPS endianness is configured by boot strap, which also reverses all
+	 * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+	 * endian I/O).
+	 *
+	 * Other architectures (e.g., ARM) either do not support big endian, or
+	 * else leave I/O in little endian mode.
+	 */
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+		return __raw_readl(addr);
+	else
+		return readl_relaxed(addr);
+}
+
+static inline void brcm_usb_writel(u32 val, void __iomem *addr)
+{
+	/* See brcmnand_readl() comments */
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+		__raw_writel(val, addr);
+	else
+		writel_relaxed(val, addr);
+}
+
+static inline void brcm_usb_ctrl_unset(void __iomem *reg, u32 mask)
+{
+	brcm_usb_writel(brcm_usb_readl(reg) & ~(mask), reg);
+};
+
+static inline void brcm_usb_ctrl_set(void __iomem *reg, u32 mask)
+{
+	brcm_usb_writel(brcm_usb_readl(reg) | (mask), reg);
+};
+
+static inline void brcm_usb_init_ipp(struct brcm_usb_init_params *ini)
+{
+	if (ini->ops->init_ipp)
+		ini->ops->init_ipp(ini);
+}
+
+static inline void brcm_usb_init_common(struct brcm_usb_init_params *ini)
+{
+	if (ini->ops->init_common)
+		ini->ops->init_common(ini);
+}
+
+static inline void brcm_usb_init_eohci(struct brcm_usb_init_params *ini)
+{
+	if (ini->ops->init_eohci)
+		ini->ops->init_eohci(ini);
+}
+
+static inline void brcm_usb_init_xhci(struct brcm_usb_init_params *ini)
+{
+	if (ini->ops->init_xhci)
+		ini->ops->init_xhci(ini);
+}
+
+static inline void brcm_usb_uninit_common(struct brcm_usb_init_params *ini)
+{
+	if (ini->ops->uninit_common)
+		ini->ops->uninit_common(ini);
+}
+
+static inline void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini)
+{
+	if (ini->ops->uninit_eohci)
+		ini->ops->uninit_eohci(ini);
+}
+
+static inline void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini)
+{
+	if (ini->ops->uninit_xhci)
+		ini->ops->uninit_xhci(ini);
+}
+
+static inline int brcm_usb_get_dual_select(struct brcm_usb_init_params *ini)
+{
+	if (ini->ops->get_dual_select)
+		return ini->ops->get_dual_select(ini);
+	return 0;
+}
+
+static inline void brcm_usb_set_dual_select(struct brcm_usb_init_params *ini,
+	int mode)
+{
+	if (ini->ops->set_dual_select)
+		ini->ops->set_dual_select(ini, mode);
+}
 
 #endif /* _USB_BRCM_COMMON_INIT_H */
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index f5c1f29..491bbd4 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -16,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/soc/brcmstb/brcmstb.h>
 #include <dt-bindings/phy/phy.h>
+#include <linux/mfd/syscon.h>
 
 #include "phy-brcm-usb-init.h"
 
@@ -32,6 +33,12 @@ struct value_to_name_map {
 	const char *name;
 };
 
+struct match_chip_info {
+	void *init_func;
+	u8 required_regs[BRCM_REGS_MAX + 1];
+	u8 optional_reg;
+};
+
 static struct value_to_name_map brcm_dr_mode_to_name[] = {
 	{ USB_CTLR_MODE_HOST, "host" },
 	{ USB_CTLR_MODE_DEVICE, "peripheral" },
@@ -57,11 +64,26 @@ struct brcm_usb_phy_data {
 	bool			has_xhci;
 	struct clk		*usb_20_clk;
 	struct clk		*usb_30_clk;
+	struct clk		*suspend_clk;
 	struct mutex		mutex;	/* serialize phy init */
 	int			init_count;
+	int			wake_irq;
 	struct brcm_usb_phy	phys[BRCM_USB_PHY_ID_MAX];
 };
 
+static s8 *node_reg_names[BRCM_REGS_MAX] = {
+	"crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
+};
+
+static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
+{
+	struct phy *gphy = dev_id;
+
+	pm_wakeup_event(&gphy->dev, 0);
+
+	return IRQ_HANDLED;
+}
+
 static int brcm_usb_phy_init(struct phy *gphy)
 {
 	struct brcm_usb_phy *phy = phy_get_drvdata(gphy);
@@ -74,8 +96,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
 	 */
 	mutex_lock(&priv->mutex);
 	if (priv->init_count++ == 0) {
-		clk_enable(priv->usb_20_clk);
-		clk_enable(priv->usb_30_clk);
+		clk_prepare_enable(priv->usb_20_clk);
+		clk_prepare_enable(priv->usb_30_clk);
+		clk_prepare_enable(priv->suspend_clk);
 		brcm_usb_init_common(&priv->ini);
 	}
 	mutex_unlock(&priv->mutex);
@@ -106,8 +129,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
 	mutex_lock(&priv->mutex);
 	if (--priv->init_count == 0) {
 		brcm_usb_uninit_common(&priv->ini);
-		clk_disable(priv->usb_20_clk);
-		clk_disable(priv->usb_30_clk);
+		clk_disable_unprepare(priv->usb_20_clk);
+		clk_disable_unprepare(priv->usb_30_clk);
+		clk_disable_unprepare(priv->suspend_clk);
 	}
 	mutex_unlock(&priv->mutex);
 	phy->inited = false;
@@ -194,7 +218,7 @@ static ssize_t dual_select_store(struct device *dev,
 	res = name_to_value(&brcm_dual_mode_to_name[0],
 			    ARRAY_SIZE(brcm_dual_mode_to_name), buf, &value);
 	if (!res) {
-		brcm_usb_init_set_dual_select(&priv->ini, value);
+		brcm_usb_set_dual_select(&priv->ini, value);
 		res = len;
 	}
 	mutex_unlock(&sysfs_lock);
@@ -209,7 +233,7 @@ static ssize_t dual_select_show(struct device *dev,
 	int value;
 
 	mutex_lock(&sysfs_lock);
-	value = brcm_usb_init_get_dual_select(&priv->ini);
+	value = brcm_usb_get_dual_select(&priv->ini);
 	mutex_unlock(&sysfs_lock);
 	return sprintf(buf, "%s\n",
 		value_to_name(&brcm_dual_mode_to_name[0],
@@ -228,15 +252,106 @@ static const struct attribute_group brcm_usb_phy_group = {
 	.attrs = brcm_usb_phy_attrs,
 };
 
-static int brcm_usb_phy_dvr_init(struct device *dev,
+static struct match_chip_info chip_info_7216 = {
+	.init_func = &brcm_usb_dvr_init_7216,
+	.required_regs = {
+		BRCM_REGS_CTRL,
+		BRCM_REGS_XHCI_EC,
+		BRCM_REGS_XHCI_GBL,
+		-1,
+	},
+};
+
+static struct match_chip_info chip_info_7211b0 = {
+	.init_func = &brcm_usb_dvr_init_7211b0,
+	.required_regs = {
+		BRCM_REGS_CTRL,
+		BRCM_REGS_XHCI_EC,
+		BRCM_REGS_XHCI_GBL,
+		BRCM_REGS_USB_PHY,
+		BRCM_REGS_USB_MDIO,
+		-1,
+	},
+	.optional_reg = BRCM_REGS_BDC_EC,
+};
+
+static struct match_chip_info chip_info_7445 = {
+	.init_func = &brcm_usb_dvr_init_7445,
+	.required_regs = {
+		BRCM_REGS_CTRL,
+		BRCM_REGS_XHCI_EC,
+		-1,
+	},
+};
+
+static const struct of_device_id brcm_usb_dt_ids[] = {
+	{
+		.compatible = "brcm,bcm7216-usb-phy",
+		.data = &chip_info_7216,
+	},
+	{
+		.compatible = "brcm,bcm7211-usb-phy",
+		.data = &chip_info_7211b0,
+	},
+	{
+		.compatible = "brcm,brcmstb-usb-phy",
+		.data = &chip_info_7445,
+	},
+	{ /* sentinel */ }
+};
+
+static int brcm_usb_get_regs(struct platform_device *pdev,
+			     enum brcmusb_reg_sel regs,
+			     struct  brcm_usb_init_params *ini,
+			     bool optional)
+{
+	struct resource *res;
+
+	/* Older DT nodes have ctrl and optional xhci_ec by index only */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						node_reg_names[regs]);
+	if (res == NULL) {
+		if (regs == BRCM_REGS_CTRL) {
+			res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		} else if (regs == BRCM_REGS_XHCI_EC) {
+			res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+			/* XHCI_EC registers are optional */
+			if (res == NULL)
+				return 0;
+		}
+		if (res == NULL) {
+			if (optional) {
+				dev_dbg(&pdev->dev,
+					"Optional reg %s not found\n",
+					node_reg_names[regs]);
+				return 0;
+			}
+			dev_err(&pdev->dev, "can't get %s base addr\n",
+				node_reg_names[regs]);
+			return 1;
+		}
+	}
+	ini->regs[regs] = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ini->regs[regs])) {
+		dev_err(&pdev->dev, "can't map %s register space\n",
+			node_reg_names[regs]);
+		return 1;
+	}
+	return 0;
+}
+
+static int brcm_usb_phy_dvr_init(struct platform_device *pdev,
 				 struct brcm_usb_phy_data *priv,
 				 struct device_node *dn)
 {
-	struct phy *gphy;
+	struct device *dev = &pdev->dev;
+	struct phy *gphy = NULL;
 	int err;
 
 	priv->usb_20_clk = of_clk_get_by_name(dn, "sw_usb");
 	if (IS_ERR(priv->usb_20_clk)) {
+		if (PTR_ERR(priv->usb_20_clk) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
 		dev_info(dev, "Clock not found in Device Tree\n");
 		priv->usb_20_clk = NULL;
 	}
@@ -267,6 +382,8 @@ static int brcm_usb_phy_dvr_init(struct device *dev,
 
 		priv->usb_30_clk = of_clk_get_by_name(dn, "sw_usb3");
 		if (IS_ERR(priv->usb_30_clk)) {
+			if (PTR_ERR(priv->usb_30_clk) == -EPROBE_DEFER)
+				return -EPROBE_DEFER;
 			dev_info(dev,
 				 "USB3.0 clock not found in Device Tree\n");
 			priv->usb_30_clk = NULL;
@@ -275,18 +392,46 @@ static int brcm_usb_phy_dvr_init(struct device *dev,
 		if (err)
 			return err;
 	}
+
+	priv->suspend_clk = clk_get(dev, "usb0_freerun");
+	if (IS_ERR(priv->suspend_clk)) {
+		if (PTR_ERR(priv->suspend_clk) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_err(dev, "Suspend Clock not found in Device Tree\n");
+		priv->suspend_clk = NULL;
+	}
+
+	priv->wake_irq = platform_get_irq_byname(pdev, "wake");
+	if (priv->wake_irq < 0)
+		priv->wake_irq = platform_get_irq_byname(pdev, "wakeup");
+	if (priv->wake_irq >= 0) {
+		err = devm_request_irq(dev, priv->wake_irq,
+				       brcm_usb_phy_wake_isr, 0,
+				       dev_name(dev), gphy);
+		if (err < 0)
+			return err;
+		device_set_wakeup_capable(dev, 1);
+	} else {
+		dev_info(dev,
+			 "Wake interrupt missing, system wake not supported\n");
+	}
+
 	return 0;
 }
 
 static int brcm_usb_phy_probe(struct platform_device *pdev)
 {
-	struct resource *res;
 	struct device *dev = &pdev->dev;
 	struct brcm_usb_phy_data *priv;
 	struct phy_provider *phy_provider;
 	struct device_node *dn = pdev->dev.of_node;
 	int err;
 	const char *mode;
+	const struct of_device_id *match;
+	void (*dvr_init)(struct brcm_usb_init_params *params);
+	const struct match_chip_info *info;
+	struct regmap *rmap;
+	int x;
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
@@ -295,30 +440,14 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
 
 	priv->ini.family_id = brcmstb_get_family_id();
 	priv->ini.product_id = brcmstb_get_product_id();
-	brcm_usb_set_family_map(&priv->ini);
+
+	match = of_match_node(brcm_usb_dt_ids, dev->of_node);
+	info = match->data;
+	dvr_init = info->init_func;
+	(*dvr_init)(&priv->ini);
+
 	dev_dbg(dev, "Best mapping table is for %s\n",
 		priv->ini.family_name);
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "can't get USB_CTRL base address\n");
-		return -EINVAL;
-	}
-	priv->ini.ctrl_regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(priv->ini.ctrl_regs)) {
-		dev_err(dev, "can't map CTRL register space\n");
-		return -EINVAL;
-	}
-
-	/* The XHCI EC registers are optional */
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (res) {
-		priv->ini.xhci_ec_regs =
-			devm_ioremap_resource(dev, res);
-		if (IS_ERR(priv->ini.xhci_ec_regs)) {
-			dev_err(dev, "can't map XHCI EC register space\n");
-			return -EINVAL;
-		}
-	}
 
 	of_property_read_u32(dn, "brcm,ipp", &priv->ini.ipp);
 	of_property_read_u32(dn, "brcm,ioc", &priv->ini.ioc);
@@ -335,7 +464,23 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
 	if (of_property_read_bool(dn, "brcm,has-eohci"))
 		priv->has_eohci = true;
 
-	err = brcm_usb_phy_dvr_init(dev, priv, dn);
+	for (x = 0; x < BRCM_REGS_MAX; x++) {
+		if (info->required_regs[x] >= BRCM_REGS_MAX)
+			break;
+
+		err = brcm_usb_get_regs(pdev, info->required_regs[x],
+					&priv->ini, false);
+		if (err)
+			return -EINVAL;
+	}
+	if (info->optional_reg) {
+		err = brcm_usb_get_regs(pdev, info->optional_reg,
+					&priv->ini, true);
+		if (err)
+			return -EINVAL;
+	}
+
+	err = brcm_usb_phy_dvr_init(pdev, priv, dn);
 	if (err)
 		return err;
 
@@ -354,14 +499,23 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
 	if (err)
 		dev_warn(dev, "Error creating sysfs attributes\n");
 
+	/* Get piarbctl syscon if it exists */
+	rmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+						 "syscon-piarbctl");
+	if (IS_ERR(rmap))
+		rmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+						       "brcm,syscon-piarbctl");
+	if (!IS_ERR(rmap))
+		priv->ini.syscon_piarbctl = rmap;
+
 	/* start with everything off */
 	if (priv->has_xhci)
 		brcm_usb_uninit_xhci(&priv->ini);
 	if (priv->has_eohci)
 		brcm_usb_uninit_eohci(&priv->ini);
 	brcm_usb_uninit_common(&priv->ini);
-	clk_disable(priv->usb_20_clk);
-	clk_disable(priv->usb_30_clk);
+	clk_disable_unprepare(priv->usb_20_clk);
+	clk_disable_unprepare(priv->usb_30_clk);
 
 	phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
 
@@ -381,8 +535,28 @@ static int brcm_usb_phy_suspend(struct device *dev)
 	struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
 
 	if (priv->init_count) {
-		clk_disable(priv->usb_20_clk);
-		clk_disable(priv->usb_30_clk);
+		priv->ini.wake_enabled = device_may_wakeup(dev);
+		if (priv->phys[BRCM_USB_PHY_3_0].inited)
+			brcm_usb_uninit_xhci(&priv->ini);
+		if (priv->phys[BRCM_USB_PHY_2_0].inited)
+			brcm_usb_uninit_eohci(&priv->ini);
+		brcm_usb_uninit_common(&priv->ini);
+
+		/*
+		 * Handle the clocks unless needed for wake. This has
+		 * to work for both older XHCI->3.0-clks, EOHCI->2.0-clks
+		 * and newer XHCI->2.0-clks/3.0-clks.
+		 */
+
+		if (!priv->ini.suspend_with_clocks) {
+			if (priv->phys[BRCM_USB_PHY_3_0].inited)
+				clk_disable_unprepare(priv->usb_30_clk);
+			if (priv->phys[BRCM_USB_PHY_2_0].inited ||
+			    !priv->has_eohci)
+				clk_disable_unprepare(priv->usb_20_clk);
+		}
+		if (priv->wake_irq >= 0)
+			enable_irq_wake(priv->wake_irq);
 	}
 	return 0;
 }
@@ -391,8 +565,8 @@ static int brcm_usb_phy_resume(struct device *dev)
 {
 	struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
 
-	clk_enable(priv->usb_20_clk);
-	clk_enable(priv->usb_30_clk);
+	clk_prepare_enable(priv->usb_20_clk);
+	clk_prepare_enable(priv->usb_30_clk);
 	brcm_usb_init_ipp(&priv->ini);
 
 	/*
@@ -400,18 +574,22 @@ static int brcm_usb_phy_resume(struct device *dev)
 	 * Uninitialize anything that wasn't previously initialized.
 	 */
 	if (priv->init_count) {
+		if (priv->wake_irq >= 0)
+			disable_irq_wake(priv->wake_irq);
 		brcm_usb_init_common(&priv->ini);
 		if (priv->phys[BRCM_USB_PHY_2_0].inited) {
 			brcm_usb_init_eohci(&priv->ini);
 		} else if (priv->has_eohci) {
 			brcm_usb_uninit_eohci(&priv->ini);
-			clk_disable(priv->usb_20_clk);
+			clk_disable_unprepare(priv->usb_20_clk);
 		}
 		if (priv->phys[BRCM_USB_PHY_3_0].inited) {
 			brcm_usb_init_xhci(&priv->ini);
 		} else if (priv->has_xhci) {
 			brcm_usb_uninit_xhci(&priv->ini);
-			clk_disable(priv->usb_30_clk);
+			clk_disable_unprepare(priv->usb_30_clk);
+			if (!priv->has_eohci)
+				clk_disable_unprepare(priv->usb_20_clk);
 		}
 	} else {
 		if (priv->has_xhci)
@@ -419,10 +597,10 @@ static int brcm_usb_phy_resume(struct device *dev)
 		if (priv->has_eohci)
 			brcm_usb_uninit_eohci(&priv->ini);
 		brcm_usb_uninit_common(&priv->ini);
-		clk_disable(priv->usb_20_clk);
-		clk_disable(priv->usb_30_clk);
+		clk_disable_unprepare(priv->usb_20_clk);
+		clk_disable_unprepare(priv->usb_30_clk);
 	}
-
+	priv->ini.wake_enabled = false;
 	return 0;
 }
 #endif /* CONFIG_PM_SLEEP */
@@ -431,11 +609,6 @@ static const struct dev_pm_ops brcm_usb_phy_pm_ops = {
 	SET_LATE_SYSTEM_SLEEP_PM_OPS(brcm_usb_phy_suspend, brcm_usb_phy_resume)
 };
 
-static const struct of_device_id brcm_usb_dt_ids[] = {
-	{ .compatible = "brcm,brcmstb-usb-phy" },
-	{ /* sentinel */ }
-};
-
 MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids);
 
 static struct platform_driver brcm_usb_driver = {
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index de10402..a5c08e5 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -22,48 +22,134 @@
 #include <dt-bindings/phy/phy.h>
 
 /* PHY register offsets */
-#define SIERRA_PHY_PLL_CFG		(0xc00e << 2)
-#define SIERRA_DET_STANDEC_A		(0x4000 << 2)
-#define SIERRA_DET_STANDEC_B		(0x4001 << 2)
-#define SIERRA_DET_STANDEC_C		(0x4002 << 2)
-#define SIERRA_DET_STANDEC_D		(0x4003 << 2)
-#define SIERRA_DET_STANDEC_E		(0x4004 << 2)
-#define SIERRA_PSM_LANECAL		(0x4008 << 2)
-#define SIERRA_PSM_DIAG			(0x4015 << 2)
-#define SIERRA_PSC_TX_A0		(0x4028 << 2)
-#define SIERRA_PSC_TX_A1		(0x4029 << 2)
-#define SIERRA_PSC_TX_A2		(0x402A << 2)
-#define SIERRA_PSC_TX_A3		(0x402B << 2)
-#define SIERRA_PSC_RX_A0		(0x4030 << 2)
-#define SIERRA_PSC_RX_A1		(0x4031 << 2)
-#define SIERRA_PSC_RX_A2		(0x4032 << 2)
-#define SIERRA_PSC_RX_A3		(0x4033 << 2)
-#define SIERRA_PLLCTRL_SUBRATE		(0x403A << 2)
-#define SIERRA_PLLCTRL_GEN_D		(0x403E << 2)
-#define SIERRA_DRVCTRL_ATTEN		(0x406A << 2)
-#define SIERRA_CLKPATHCTRL_TMR		(0x4081 << 2)
-#define SIERRA_RX_CREQ_FLTR_A_MODE1	(0x4087 << 2)
-#define SIERRA_RX_CREQ_FLTR_A_MODE0	(0x4088 << 2)
-#define SIERRA_CREQ_CCLKDET_MODE01	(0x408E << 2)
-#define SIERRA_RX_CTLE_MAINTENANCE	(0x4091 << 2)
-#define SIERRA_CREQ_FSMCLK_SEL		(0x4092 << 2)
-#define SIERRA_CTLELUT_CTRL		(0x4098 << 2)
-#define SIERRA_DFE_ECMP_RATESEL		(0x40C0 << 2)
-#define SIERRA_DFE_SMP_RATESEL		(0x40C1 << 2)
-#define SIERRA_DEQ_VGATUNE_CTRL		(0x40E1 << 2)
-#define SIERRA_TMRVAL_MODE3		(0x416E << 2)
-#define SIERRA_TMRVAL_MODE2		(0x416F << 2)
-#define SIERRA_TMRVAL_MODE1		(0x4170 << 2)
-#define SIERRA_TMRVAL_MODE0		(0x4171 << 2)
-#define SIERRA_PICNT_MODE1		(0x4174 << 2)
-#define SIERRA_CPI_OUTBUF_RATESEL	(0x417C << 2)
-#define SIERRA_LFPSFILT_NS		(0x418A << 2)
-#define SIERRA_LFPSFILT_RD		(0x418B << 2)
-#define SIERRA_LFPSFILT_MP		(0x418C << 2)
-#define SIERRA_SDFILT_H2L_A		(0x4191 << 2)
+#define SIERRA_COMMON_CDB_OFFSET			0x0
+#define SIERRA_MACRO_ID_REG				0x0
+#define SIERRA_CMN_PLLLC_MODE_PREG			0x48
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG		0x49
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG		0x4A
+#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG		0x4B
+#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG		0x4F
+#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG		0x50
+#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG	0x62
 
-#define SIERRA_MACRO_ID			0x00007364
-#define SIERRA_MAX_LANES		4
+#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset)	\
+				((0x4000 << (block_offset)) + \
+				 (((ln) << 9) << (reg_offset)))
+
+#define SIERRA_DET_STANDEC_A_PREG			0x000
+#define SIERRA_DET_STANDEC_B_PREG			0x001
+#define SIERRA_DET_STANDEC_C_PREG			0x002
+#define SIERRA_DET_STANDEC_D_PREG			0x003
+#define SIERRA_DET_STANDEC_E_PREG			0x004
+#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG		0x008
+#define SIERRA_PSM_A0IN_TMR_PREG			0x009
+#define SIERRA_PSM_DIAG_PREG				0x015
+#define SIERRA_PSC_TX_A0_PREG				0x028
+#define SIERRA_PSC_TX_A1_PREG				0x029
+#define SIERRA_PSC_TX_A2_PREG				0x02A
+#define SIERRA_PSC_TX_A3_PREG				0x02B
+#define SIERRA_PSC_RX_A0_PREG				0x030
+#define SIERRA_PSC_RX_A1_PREG				0x031
+#define SIERRA_PSC_RX_A2_PREG				0x032
+#define SIERRA_PSC_RX_A3_PREG				0x033
+#define SIERRA_PLLCTRL_SUBRATE_PREG			0x03A
+#define SIERRA_PLLCTRL_GEN_D_PREG			0x03E
+#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG			0x03F
+#define SIERRA_PLLCTRL_STATUS_PREG			0x044
+#define SIERRA_CLKPATH_BIASTRIM_PREG			0x04B
+#define SIERRA_DFE_BIASTRIM_PREG			0x04C
+#define SIERRA_DRVCTRL_ATTEN_PREG			0x06A
+#define SIERRA_CLKPATHCTRL_TMR_PREG			0x081
+#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG		0x085
+#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG		0x086
+#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG		0x087
+#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG		0x088
+#define SIERRA_CREQ_CCLKDET_MODE01_PREG			0x08E
+#define SIERRA_RX_CTLE_MAINTENANCE_PREG			0x091
+#define SIERRA_CREQ_FSMCLK_SEL_PREG			0x092
+#define SIERRA_CREQ_EQ_CTRL_PREG			0x093
+#define SIERRA_CREQ_SPARE_PREG				0x096
+#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG		0x097
+#define SIERRA_CTLELUT_CTRL_PREG			0x098
+#define SIERRA_DFE_ECMP_RATESEL_PREG			0x0C0
+#define SIERRA_DFE_SMP_RATESEL_PREG			0x0C1
+#define SIERRA_DEQ_PHALIGN_CTRL				0x0C4
+#define SIERRA_DEQ_CONCUR_CTRL1_PREG			0x0C8
+#define SIERRA_DEQ_CONCUR_CTRL2_PREG			0x0C9
+#define SIERRA_DEQ_EPIPWR_CTRL2_PREG			0x0CD
+#define SIERRA_DEQ_FAST_MAINT_CYCLES_PREG		0x0CE
+#define SIERRA_DEQ_ERRCMP_CTRL_PREG			0x0D0
+#define SIERRA_DEQ_OFFSET_CTRL_PREG			0x0D8
+#define SIERRA_DEQ_GAIN_CTRL_PREG			0x0E0
+#define SIERRA_DEQ_VGATUNE_CTRL_PREG			0x0E1
+#define SIERRA_DEQ_GLUT0				0x0E8
+#define SIERRA_DEQ_GLUT1				0x0E9
+#define SIERRA_DEQ_GLUT2				0x0EA
+#define SIERRA_DEQ_GLUT3				0x0EB
+#define SIERRA_DEQ_GLUT4				0x0EC
+#define SIERRA_DEQ_GLUT5				0x0ED
+#define SIERRA_DEQ_GLUT6				0x0EE
+#define SIERRA_DEQ_GLUT7				0x0EF
+#define SIERRA_DEQ_GLUT8				0x0F0
+#define SIERRA_DEQ_GLUT9				0x0F1
+#define SIERRA_DEQ_GLUT10				0x0F2
+#define SIERRA_DEQ_GLUT11				0x0F3
+#define SIERRA_DEQ_GLUT12				0x0F4
+#define SIERRA_DEQ_GLUT13				0x0F5
+#define SIERRA_DEQ_GLUT14				0x0F6
+#define SIERRA_DEQ_GLUT15				0x0F7
+#define SIERRA_DEQ_GLUT16				0x0F8
+#define SIERRA_DEQ_ALUT0				0x108
+#define SIERRA_DEQ_ALUT1				0x109
+#define SIERRA_DEQ_ALUT2				0x10A
+#define SIERRA_DEQ_ALUT3				0x10B
+#define SIERRA_DEQ_ALUT4				0x10C
+#define SIERRA_DEQ_ALUT5				0x10D
+#define SIERRA_DEQ_ALUT6				0x10E
+#define SIERRA_DEQ_ALUT7				0x10F
+#define SIERRA_DEQ_ALUT8				0x110
+#define SIERRA_DEQ_ALUT9				0x111
+#define SIERRA_DEQ_ALUT10				0x112
+#define SIERRA_DEQ_ALUT11				0x113
+#define SIERRA_DEQ_ALUT12				0x114
+#define SIERRA_DEQ_ALUT13				0x115
+#define SIERRA_DEQ_DFETAP_CTRL_PREG			0x128
+#define SIERRA_DFE_EN_1010_IGNORE_PREG			0x134
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG		0x150
+#define SIERRA_DEQ_TAU_CTRL2_PREG			0x151
+#define SIERRA_DEQ_PICTRL_PREG				0x161
+#define SIERRA_CPICAL_TMRVAL_MODE1_PREG			0x170
+#define SIERRA_CPICAL_TMRVAL_MODE0_PREG			0x171
+#define SIERRA_CPICAL_PICNT_MODE1_PREG			0x174
+#define SIERRA_CPI_OUTBUF_RATESEL_PREG			0x17C
+#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG		0x183
+#define SIERRA_LFPSDET_SUPPORT_PREG			0x188
+#define SIERRA_LFPSFILT_NS_PREG				0x18A
+#define SIERRA_LFPSFILT_RD_PREG				0x18B
+#define SIERRA_LFPSFILT_MP_PREG				0x18C
+#define SIERRA_SIGDET_SUPPORT_PREG			0x190
+#define SIERRA_SDFILT_H2L_A_PREG			0x191
+#define SIERRA_SDFILT_L2H_PREG				0x193
+#define SIERRA_RXBUFFER_CTLECTRL_PREG			0x19E
+#define SIERRA_RXBUFFER_RCDFECTRL_PREG			0x19F
+#define SIERRA_RXBUFFER_DFECTRL_PREG			0x1A0
+#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG		0x14F
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG		0x150
+
+#define SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset)	\
+				      (0xc000 << (block_offset))
+#define SIERRA_PHY_PLL_CFG				0xe
+
+#define SIERRA_MACRO_ID					0x00007364
+#define SIERRA_MAX_LANES				16
+#define PLL_LOCK_TIME					100000
+
+static const struct reg_field macro_id_type =
+				REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
+static const struct reg_field phy_pll_cfg_1 =
+				REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pllctrl_lock =
+				REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
 
 struct cdns_sierra_inst {
 	struct phy *phy;
@@ -80,53 +166,172 @@ struct cdns_reg_pairs {
 
 struct cdns_sierra_data {
 		u32 id_value;
-		u32 pcie_regs;
-		u32 usb_regs;
-		struct cdns_reg_pairs *pcie_vals;
-		struct cdns_reg_pairs  *usb_vals;
+		u8 block_offset_shift;
+		u8 reg_offset_shift;
+		u32 pcie_cmn_regs;
+		u32 pcie_ln_regs;
+		u32 usb_cmn_regs;
+		u32 usb_ln_regs;
+		struct cdns_reg_pairs *pcie_cmn_vals;
+		struct cdns_reg_pairs *pcie_ln_vals;
+		struct cdns_reg_pairs *usb_cmn_vals;
+		struct cdns_reg_pairs *usb_ln_vals;
+};
+
+struct cdns_regmap_cdb_context {
+	struct device *dev;
+	void __iomem *base;
+	u8 reg_offset_shift;
 };
 
 struct cdns_sierra_phy {
 	struct device *dev;
-	void __iomem *base;
+	struct regmap *regmap;
 	struct cdns_sierra_data *init_data;
 	struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
 	struct reset_control *phy_rst;
 	struct reset_control *apb_rst;
+	struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
+	struct regmap *regmap_phy_config_ctrl;
+	struct regmap *regmap_common_cdb;
+	struct regmap_field *macro_id_type;
+	struct regmap_field *phy_pll_cfg_1;
+	struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
 	struct clk *clk;
+	struct clk *cmn_refclk_dig_div;
+	struct clk *cmn_refclk1_dig_div;
 	int nsubnodes;
+	u32 num_lanes;
 	bool autoconf;
 };
 
-static void cdns_sierra_phy_init(struct phy *gphy)
+static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+	struct cdns_regmap_cdb_context *ctx = context;
+	u32 offset = reg << ctx->reg_offset_shift;
+
+	writew(val, ctx->base + offset);
+
+	return 0;
+}
+
+static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+	struct cdns_regmap_cdb_context *ctx = context;
+	u32 offset = reg << ctx->reg_offset_shift;
+
+	*val = readw(ctx->base + offset);
+	return 0;
+}
+
+#define SIERRA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+	.name = "sierra_lane" n "_cdb", \
+	.reg_stride = 1, \
+	.fast_io = true, \
+	.reg_write = cdns_regmap_write, \
+	.reg_read = cdns_regmap_read, \
+}
+
+static struct regmap_config cdns_sierra_lane_cdb_config[] = {
+	SIERRA_LANE_CDB_REGMAP_CONF("0"),
+	SIERRA_LANE_CDB_REGMAP_CONF("1"),
+	SIERRA_LANE_CDB_REGMAP_CONF("2"),
+	SIERRA_LANE_CDB_REGMAP_CONF("3"),
+	SIERRA_LANE_CDB_REGMAP_CONF("4"),
+	SIERRA_LANE_CDB_REGMAP_CONF("5"),
+	SIERRA_LANE_CDB_REGMAP_CONF("6"),
+	SIERRA_LANE_CDB_REGMAP_CONF("7"),
+	SIERRA_LANE_CDB_REGMAP_CONF("8"),
+	SIERRA_LANE_CDB_REGMAP_CONF("9"),
+	SIERRA_LANE_CDB_REGMAP_CONF("10"),
+	SIERRA_LANE_CDB_REGMAP_CONF("11"),
+	SIERRA_LANE_CDB_REGMAP_CONF("12"),
+	SIERRA_LANE_CDB_REGMAP_CONF("13"),
+	SIERRA_LANE_CDB_REGMAP_CONF("14"),
+	SIERRA_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static struct regmap_config cdns_sierra_common_cdb_config = {
+	.name = "sierra_common_cdb",
+	.reg_stride = 1,
+	.fast_io = true,
+	.reg_write = cdns_regmap_write,
+	.reg_read = cdns_regmap_read,
+};
+
+static struct regmap_config cdns_sierra_phy_config_ctrl_config = {
+	.name = "sierra_phy_config_ctrl",
+	.reg_stride = 1,
+	.fast_io = true,
+	.reg_write = cdns_regmap_write,
+	.reg_read = cdns_regmap_read,
+};
+
+static int cdns_sierra_phy_init(struct phy *gphy)
 {
 	struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
 	struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
+	struct regmap *regmap;
 	int i, j;
-	struct cdns_reg_pairs *vals;
-	u32 num_regs;
+	struct cdns_reg_pairs *cmn_vals, *ln_vals;
+	u32 num_cmn_regs, num_ln_regs;
 
+	/* Initialise the PHY registers, unless auto configured */
+	if (phy->autoconf)
+		return 0;
+
+	clk_set_rate(phy->cmn_refclk_dig_div, 25000000);
+	clk_set_rate(phy->cmn_refclk1_dig_div, 25000000);
 	if (ins->phy_type == PHY_TYPE_PCIE) {
-		num_regs = phy->init_data->pcie_regs;
-		vals = phy->init_data->pcie_vals;
+		num_cmn_regs = phy->init_data->pcie_cmn_regs;
+		num_ln_regs = phy->init_data->pcie_ln_regs;
+		cmn_vals = phy->init_data->pcie_cmn_vals;
+		ln_vals = phy->init_data->pcie_ln_vals;
 	} else if (ins->phy_type == PHY_TYPE_USB3) {
-		num_regs = phy->init_data->usb_regs;
-		vals = phy->init_data->usb_vals;
+		num_cmn_regs = phy->init_data->usb_cmn_regs;
+		num_ln_regs = phy->init_data->usb_ln_regs;
+		cmn_vals = phy->init_data->usb_cmn_vals;
+		ln_vals = phy->init_data->usb_ln_vals;
 	} else {
-		return;
+		return -EINVAL;
 	}
-	for (i = 0; i < ins->num_lanes; i++)
-		for (j = 0; j < num_regs ; j++)
-			writel(vals[j].val, phy->base +
-				vals[j].off + (i + ins->mlane) * 0x800);
+
+	regmap = phy->regmap_common_cdb;
+	for (j = 0; j < num_cmn_regs ; j++)
+		regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val);
+
+	for (i = 0; i < ins->num_lanes; i++) {
+		for (j = 0; j < num_ln_regs ; j++) {
+			regmap = phy->regmap_lane_cdb[i + ins->mlane];
+			regmap_write(regmap, ln_vals[j].off, ln_vals[j].val);
+		}
+	}
+
+	return 0;
 }
 
 static int cdns_sierra_phy_on(struct phy *gphy)
 {
+	struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
 	struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
+	struct device *dev = sp->dev;
+	u32 val;
+	int ret;
 
 	/* Take the PHY lane group out of reset */
-	return reset_control_deassert(ins->lnk_rst);
+	ret = reset_control_deassert(ins->lnk_rst);
+	if (ret) {
+		dev_err(dev, "Failed to take the PHY lane out of reset\n");
+		return ret;
+	}
+
+	ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
+					     val, val, 1000, PLL_LOCK_TIME);
+	if (ret < 0)
+		dev_err(dev, "PLL lock of lane failed\n");
+
+	return ret;
 }
 
 static int cdns_sierra_phy_off(struct phy *gphy)
@@ -136,9 +341,20 @@ static int cdns_sierra_phy_off(struct phy *gphy)
 	return reset_control_assert(ins->lnk_rst);
 }
 
+static int cdns_sierra_phy_reset(struct phy *gphy)
+{
+	struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
+
+	reset_control_assert(sp->phy_rst);
+	reset_control_deassert(sp->phy_rst);
+	return 0;
+};
+
 static const struct phy_ops ops = {
+	.init		= cdns_sierra_phy_init,
 	.power_on	= cdns_sierra_phy_on,
 	.power_off	= cdns_sierra_phy_off,
+	.reset		= cdns_sierra_phy_reset,
 	.owner		= THIS_MODULE,
 };
 
@@ -159,41 +375,152 @@ static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
 
 static const struct of_device_id cdns_sierra_id_table[];
 
+static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
+				       u32 block_offset, u8 reg_offset_shift,
+				       const struct regmap_config *config)
+{
+	struct cdns_regmap_cdb_context *ctx;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->dev = dev;
+	ctx->base = base + block_offset;
+	ctx->reg_offset_shift = reg_offset_shift;
+
+	return devm_regmap_init(dev, NULL, ctx, config);
+}
+
+static int cdns_regfield_init(struct cdns_sierra_phy *sp)
+{
+	struct device *dev = sp->dev;
+	struct regmap_field *field;
+	struct regmap *regmap;
+	int i;
+
+	regmap = sp->regmap_common_cdb;
+	field = devm_regmap_field_alloc(dev, regmap, macro_id_type);
+	if (IS_ERR(field)) {
+		dev_err(dev, "MACRO_ID_TYPE reg field init failed\n");
+		return PTR_ERR(field);
+	}
+	sp->macro_id_type = field;
+
+	regmap = sp->regmap_phy_config_ctrl;
+	field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
+	if (IS_ERR(field)) {
+		dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
+		return PTR_ERR(field);
+	}
+	sp->phy_pll_cfg_1 = field;
+
+	for (i = 0; i < SIERRA_MAX_LANES; i++) {
+		regmap = sp->regmap_lane_cdb[i];
+		field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
+		if (IS_ERR(field)) {
+			dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+			return PTR_ERR(field);
+		}
+		sp->pllctrl_lock[i] =  field;
+	}
+
+	return 0;
+}
+
+static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
+				   void __iomem *base, u8 block_offset_shift,
+				   u8 reg_offset_shift)
+{
+	struct device *dev = sp->dev;
+	struct regmap *regmap;
+	u32 block_offset;
+	int i;
+
+	for (i = 0; i < SIERRA_MAX_LANES; i++) {
+		block_offset = SIERRA_LANE_CDB_OFFSET(i, block_offset_shift,
+						      reg_offset_shift);
+		regmap = cdns_regmap_init(dev, base, block_offset,
+					  reg_offset_shift,
+					  &cdns_sierra_lane_cdb_config[i]);
+		if (IS_ERR(regmap)) {
+			dev_err(dev, "Failed to init lane CDB regmap\n");
+			return PTR_ERR(regmap);
+		}
+		sp->regmap_lane_cdb[i] = regmap;
+	}
+
+	regmap = cdns_regmap_init(dev, base, SIERRA_COMMON_CDB_OFFSET,
+				  reg_offset_shift,
+				  &cdns_sierra_common_cdb_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "Failed to init common CDB regmap\n");
+		return PTR_ERR(regmap);
+	}
+	sp->regmap_common_cdb = regmap;
+
+	block_offset = SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset_shift);
+	regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+				  &cdns_sierra_phy_config_ctrl_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "Failed to init PHY config and control regmap\n");
+		return PTR_ERR(regmap);
+	}
+	sp->regmap_phy_config_ctrl = regmap;
+
+	return 0;
+}
+
 static int cdns_sierra_phy_probe(struct platform_device *pdev)
 {
 	struct cdns_sierra_phy *sp;
 	struct phy_provider *phy_provider;
 	struct device *dev = &pdev->dev;
 	const struct of_device_id *match;
+	struct cdns_sierra_data *data;
+	unsigned int id_value;
 	struct resource *res;
 	int i, ret, node = 0;
+	void __iomem *base;
+	struct clk *clk;
 	struct device_node *dn = dev->of_node, *child;
 
 	if (of_get_child_count(dn) == 0)
 		return -ENODEV;
 
+	/* Get init data for this PHY */
+	match = of_match_device(cdns_sierra_id_table, dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct cdns_sierra_data *)match->data;
+
 	sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
 	if (!sp)
 		return -ENOMEM;
 	dev_set_drvdata(dev, sp);
 	sp->dev = dev;
+	sp->init_data = data;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	sp->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(sp->base)) {
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base)) {
 		dev_err(dev, "missing \"reg\"\n");
-		return PTR_ERR(sp->base);
+		return PTR_ERR(base);
 	}
 
-	/* Get init data for this PHY */
-	match = of_match_device(cdns_sierra_id_table, dev);
-	if (!match)
-		return -EINVAL;
-	sp->init_data = (struct cdns_sierra_data *)match->data;
+	ret = cdns_regmap_init_blocks(sp, base, data->block_offset_shift,
+				      data->reg_offset_shift);
+	if (ret)
+		return ret;
+
+	ret = cdns_regfield_init(sp);
+	if (ret)
+		return ret;
 
 	platform_set_drvdata(pdev, sp);
 
-	sp->clk = devm_clk_get(dev, "phy_clk");
+	sp->clk = devm_clk_get_optional(dev, "phy_clk");
 	if (IS_ERR(sp->clk)) {
 		dev_err(dev, "failed to get clock phy_clk\n");
 		return PTR_ERR(sp->clk);
@@ -205,12 +532,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 		return PTR_ERR(sp->phy_rst);
 	}
 
-	sp->apb_rst = devm_reset_control_get(dev, "sierra_apb");
+	sp->apb_rst = devm_reset_control_get_optional(dev, "sierra_apb");
 	if (IS_ERR(sp->apb_rst)) {
 		dev_err(dev, "failed to get apb reset\n");
 		return PTR_ERR(sp->apb_rst);
 	}
 
+	clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+	if (IS_ERR(clk)) {
+		dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+		ret = PTR_ERR(clk);
+		return ret;
+	}
+	sp->cmn_refclk_dig_div = clk;
+
+	clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+	if (IS_ERR(clk)) {
+		dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+		ret = PTR_ERR(clk);
+		return ret;
+	}
+	sp->cmn_refclk1_dig_div = clk;
+
 	ret = clk_prepare_enable(sp->clk);
 	if (ret)
 		return ret;
@@ -219,7 +562,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 	reset_control_deassert(sp->apb_rst);
 
 	/* Check that PHY is present */
-	if  (sp->init_data->id_value != readl(sp->base)) {
+	regmap_field_read(sp->macro_id_type, &id_value);
+	if  (sp->init_data->id_value != id_value) {
 		ret = -EINVAL;
 		goto clk_disable;
 	}
@@ -230,7 +574,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 		struct phy *gphy;
 
 		sp->phys[node].lnk_rst =
-			of_reset_control_get_exclusive_by_index(child, 0);
+			of_reset_control_array_get_exclusive(child);
 
 		if (IS_ERR(sp->phys[node].lnk_rst)) {
 			dev_err(dev, "failed to get reset %s\n",
@@ -248,6 +592,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 			}
 		}
 
+		sp->num_lanes += sp->phys[node].num_lanes;
+
 		gphy = devm_phy_create(dev, child, &ops);
 
 		if (IS_ERR(gphy)) {
@@ -257,17 +603,18 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 		sp->phys[node].phy = gphy;
 		phy_set_drvdata(gphy, &sp->phys[node]);
 
-		/* Initialise the PHY registers, unless auto configured */
-		if (!sp->autoconf)
-			cdns_sierra_phy_init(gphy);
-
 		node++;
 	}
 	sp->nsubnodes = node;
 
+	if (sp->num_lanes > SIERRA_MAX_LANES) {
+		dev_err(dev, "Invalid lane configuration\n");
+		goto put_child2;
+	}
+
 	/* If more than one subnode, configure the PHY as multilink */
 	if (!sp->autoconf && sp->nsubnodes > 1)
-		writel(2, sp->base + SIERRA_PHY_PLL_CFG);
+		regmap_field_write(sp->phy_pll_cfg_1, 0x1);
 
 	pm_runtime_enable(dev);
 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -288,7 +635,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 
 static int cdns_sierra_phy_remove(struct platform_device *pdev)
 {
-	struct cdns_sierra_phy *phy = dev_get_drvdata(pdev->dev.parent);
+	struct cdns_sierra_phy *phy = platform_get_drvdata(pdev);
 	int i;
 
 	reset_control_assert(phy->phy_rst);
@@ -306,68 +653,158 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static struct cdns_reg_pairs cdns_usb_regs[] = {
-	/*
-	 * Write USB configuration parameters to the PHY.
-	 * These values are specific to this specific hardware
-	 * configuration.
-	 */
-	{0xFE0A, SIERRA_DET_STANDEC_A},
-	{0x000F, SIERRA_DET_STANDEC_B},
-	{0x55A5, SIERRA_DET_STANDEC_C},
-	{0x69AD, SIERRA_DET_STANDEC_D},
-	{0x0241, SIERRA_DET_STANDEC_E},
-	{0x0110, SIERRA_PSM_LANECAL},
-	{0xCF00, SIERRA_PSM_DIAG},
-	{0x001F, SIERRA_PSC_TX_A0},
-	{0x0007, SIERRA_PSC_TX_A1},
-	{0x0003, SIERRA_PSC_TX_A2},
-	{0x0003, SIERRA_PSC_TX_A3},
-	{0x0FFF, SIERRA_PSC_RX_A0},
-	{0x0003, SIERRA_PSC_RX_A1},
-	{0x0003, SIERRA_PSC_RX_A2},
-	{0x0001, SIERRA_PSC_RX_A3},
-	{0x0001, SIERRA_PLLCTRL_SUBRATE},
-	{0x0406, SIERRA_PLLCTRL_GEN_D},
-	{0x0000, SIERRA_DRVCTRL_ATTEN},
-	{0x823E, SIERRA_CLKPATHCTRL_TMR},
-	{0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1},
-	{0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0},
-	{0x7B3C, SIERRA_CREQ_CCLKDET_MODE01},
-	{0x023C, SIERRA_RX_CTLE_MAINTENANCE},
-	{0x3232, SIERRA_CREQ_FSMCLK_SEL},
-	{0x8452, SIERRA_CTLELUT_CTRL},
-	{0x4121, SIERRA_DFE_ECMP_RATESEL},
-	{0x4121, SIERRA_DFE_SMP_RATESEL},
-	{0x9999, SIERRA_DEQ_VGATUNE_CTRL},
-	{0x0330, SIERRA_TMRVAL_MODE0},
-	{0x01FF, SIERRA_PICNT_MODE1},
-	{0x0009, SIERRA_CPI_OUTBUF_RATESEL},
-	{0x000F, SIERRA_LFPSFILT_NS},
-	{0x0009, SIERRA_LFPSFILT_RD},
-	{0x0001, SIERRA_LFPSFILT_MP},
-	{0x8013, SIERRA_SDFILT_H2L_A},
-	{0x0400, SIERRA_TMRVAL_MODE1},
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
+static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
+	{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+	{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+	{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+	{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+	{0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
 };
 
-static struct cdns_reg_pairs cdns_pcie_regs[] = {
-	/*
-	 * Write PCIe configuration parameters to the PHY.
-	 * These values are specific to this specific hardware
-	 * configuration.
-	 */
-	{0x891f, SIERRA_DET_STANDEC_D},
-	{0x0053, SIERRA_DET_STANDEC_E},
-	{0x0400, SIERRA_TMRVAL_MODE2},
-	{0x0200, SIERRA_TMRVAL_MODE3},
+/* refclk100MHz_32b_PCIe_ln_ext_ssc */
+static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+	{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+	{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+	{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+	{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+	{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+	{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+	{0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}
+};
+
+/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
+static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
+	{0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+	{0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+	{0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+	{0x0000, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/* refclk100MHz_20b_USB_ln_ext_ssc */
+static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
+	{0xFE0A, SIERRA_DET_STANDEC_A_PREG},
+	{0x000F, SIERRA_DET_STANDEC_B_PREG},
+	{0x00A5, SIERRA_DET_STANDEC_C_PREG},
+	{0x69ad, SIERRA_DET_STANDEC_D_PREG},
+	{0x0241, SIERRA_DET_STANDEC_E_PREG},
+	{0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+	{0x0014, SIERRA_PSM_A0IN_TMR_PREG},
+	{0xCF00, SIERRA_PSM_DIAG_PREG},
+	{0x001F, SIERRA_PSC_TX_A0_PREG},
+	{0x0007, SIERRA_PSC_TX_A1_PREG},
+	{0x0003, SIERRA_PSC_TX_A2_PREG},
+	{0x0003, SIERRA_PSC_TX_A3_PREG},
+	{0x0FFF, SIERRA_PSC_RX_A0_PREG},
+	{0x0619, SIERRA_PSC_RX_A1_PREG},
+	{0x0003, SIERRA_PSC_RX_A2_PREG},
+	{0x0001, SIERRA_PSC_RX_A3_PREG},
+	{0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
+	{0x0406, SIERRA_PLLCTRL_GEN_D_PREG},
+	{0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+	{0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
+	{0x2512, SIERRA_DFE_BIASTRIM_PREG},
+	{0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+	{0x873E, SIERRA_CLKPATHCTRL_TMR_PREG},
+	{0x03CF, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+	{0x01CE, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+	{0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+	{0x033F, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+	{0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
+	{0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+	{0x8000, SIERRA_CREQ_SPARE_PREG},
+	{0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+	{0x8453, SIERRA_CTLELUT_CTRL_PREG},
+	{0x4110, SIERRA_DFE_ECMP_RATESEL_PREG},
+	{0x4110, SIERRA_DFE_SMP_RATESEL_PREG},
+	{0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+	{0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
+	{0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+	{0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+	{0x0048, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+	{0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+	{0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
+	{0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
+	{0x9A8A, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+	{0x0014, SIERRA_DEQ_GLUT0},
+	{0x0014, SIERRA_DEQ_GLUT1},
+	{0x0014, SIERRA_DEQ_GLUT2},
+	{0x0014, SIERRA_DEQ_GLUT3},
+	{0x0014, SIERRA_DEQ_GLUT4},
+	{0x0014, SIERRA_DEQ_GLUT5},
+	{0x0014, SIERRA_DEQ_GLUT6},
+	{0x0014, SIERRA_DEQ_GLUT7},
+	{0x0014, SIERRA_DEQ_GLUT8},
+	{0x0014, SIERRA_DEQ_GLUT9},
+	{0x0014, SIERRA_DEQ_GLUT10},
+	{0x0014, SIERRA_DEQ_GLUT11},
+	{0x0014, SIERRA_DEQ_GLUT12},
+	{0x0014, SIERRA_DEQ_GLUT13},
+	{0x0014, SIERRA_DEQ_GLUT14},
+	{0x0014, SIERRA_DEQ_GLUT15},
+	{0x0014, SIERRA_DEQ_GLUT16},
+	{0x0BAE, SIERRA_DEQ_ALUT0},
+	{0x0AEB, SIERRA_DEQ_ALUT1},
+	{0x0A28, SIERRA_DEQ_ALUT2},
+	{0x0965, SIERRA_DEQ_ALUT3},
+	{0x08A2, SIERRA_DEQ_ALUT4},
+	{0x07DF, SIERRA_DEQ_ALUT5},
+	{0x071C, SIERRA_DEQ_ALUT6},
+	{0x0659, SIERRA_DEQ_ALUT7},
+	{0x0596, SIERRA_DEQ_ALUT8},
+	{0x0514, SIERRA_DEQ_ALUT9},
+	{0x0492, SIERRA_DEQ_ALUT10},
+	{0x0410, SIERRA_DEQ_ALUT11},
+	{0x038E, SIERRA_DEQ_ALUT12},
+	{0x030C, SIERRA_DEQ_ALUT13},
+	{0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG},
+	{0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG},
+	{0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+	{0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+	{0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG},
+	{0x0033, SIERRA_DEQ_PICTRL_PREG},
+	{0x0400, SIERRA_CPICAL_TMRVAL_MODE1_PREG},
+	{0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+	{0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG},
+	{0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+	{0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
+	{0x0005, SIERRA_LFPSDET_SUPPORT_PREG},
+	{0x000F, SIERRA_LFPSFILT_NS_PREG},
+	{0x0009, SIERRA_LFPSFILT_RD_PREG},
+	{0x0001, SIERRA_LFPSFILT_MP_PREG},
+	{0x8013, SIERRA_SDFILT_H2L_A_PREG},
+	{0x8009, SIERRA_SDFILT_L2H_PREG},
+	{0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
+	{0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+	{0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
 };
 
 static const struct cdns_sierra_data cdns_map_sierra = {
 	SIERRA_MACRO_ID,
-	ARRAY_SIZE(cdns_pcie_regs),
-	ARRAY_SIZE(cdns_usb_regs),
-	cdns_pcie_regs,
-	cdns_usb_regs
+	0x2,
+	0x2,
+	ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+	ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+	ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+	ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+	cdns_pcie_cmn_regs_ext_ssc,
+	cdns_pcie_ln_regs_ext_ssc,
+	cdns_usb_cmn_regs_ext_ssc,
+	cdns_usb_ln_regs_ext_ssc,
+};
+
+static const struct cdns_sierra_data cdns_ti_map_sierra = {
+	SIERRA_MACRO_ID,
+	0x0,
+	0x1,
+	ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+	ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+	ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+	ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+	cdns_pcie_cmn_regs_ext_ssc,
+	cdns_pcie_ln_regs_ext_ssc,
+	cdns_usb_cmn_regs_ext_ssc,
+	cdns_usb_ln_regs_ext_ssc,
 };
 
 static const struct of_device_id cdns_sierra_id_table[] = {
@@ -375,6 +812,10 @@ static const struct of_device_id cdns_sierra_id_table[] = {
 		.compatible = "cdns,sierra-phy-t0",
 		.data = &cdns_map_sierra,
 	},
+	{
+		.compatible = "ti,sierra-phy-t0",
+		.data = &cdns_ti_map_sierra,
+	},
 	{}
 };
 MODULE_DEVICE_TABLE(of, cdns_sierra_id_table);
diff --git a/drivers/phy/hisilicon/Kconfig b/drivers/phy/hisilicon/Kconfig
index 534e393..1c73053 100644
--- a/drivers/phy/hisilicon/Kconfig
+++ b/drivers/phy/hisilicon/Kconfig
@@ -33,14 +33,14 @@
 	  If unsure, say N.
 
 config PHY_HISI_INNO_USB2
-       tristate "HiSilicon INNO USB2 PHY support"
-       depends on (ARCH_HISI && ARM64) || COMPILE_TEST
-       select GENERIC_PHY
-       select MFD_SYSCON
-       help
-         Support for INNO USB2 PHY on HiSilicon SoCs. This Phy supports
-         USB 1.5Mb/s, USB 12Mb/s, USB 480Mb/s speeds. It supports one
-         USB host port to accept one USB device.
+	tristate "HiSilicon INNO USB2 PHY support"
+	depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+	select GENERIC_PHY
+	select MFD_SYSCON
+	help
+	  Support for INNO USB2 PHY on HiSilicon SoCs. This Phy supports
+	  USB 1.5Mb/s, USB 12Mb/s, USB 480Mb/s speeds. It supports one
+	  USB host port to accept one USB device.
 
 config PHY_HIX5HD2_SATA
 	tristate "HIX5HD2 SATA PHY Driver"
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
new file mode 100644
index 0000000..4ea6a88
--- /dev/null
+++ b/drivers/phy/intel/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Intel Lightning Mountain(LGM) platform
+#
+config PHY_INTEL_EMMC
+	tristate "Intel EMMC PHY driver"
+	select GENERIC_PHY
+	help
+	  Enable this to support the Intel EMMC PHY
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
new file mode 100644
index 0000000..6b876a7
--- /dev/null
+++ b/drivers/phy/intel/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_INTEL_EMMC)            += phy-intel-emmc.o
diff --git a/drivers/phy/intel/phy-intel-emmc.c b/drivers/phy/intel/phy-intel-emmc.c
new file mode 100644
index 0000000..703aeb1
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-emmc.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel eMMC PHY driver
+ * Copyright (C) 2019 Intel, Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* eMMC phy register definitions */
+#define EMMC_PHYCTRL0_REG	0xa8
+#define DR_TY_MASK		GENMASK(30, 28)
+#define DR_TY_SHIFT(x)		(((x) << 28) & DR_TY_MASK)
+#define OTAPDLYENA		BIT(14)
+#define OTAPDLYSEL_MASK		GENMASK(13, 10)
+#define OTAPDLYSEL_SHIFT(x)	(((x) << 10) & OTAPDLYSEL_MASK)
+
+#define EMMC_PHYCTRL1_REG	0xac
+#define PDB_MASK		BIT(0)
+#define PDB_SHIFT(x)		(((x) << 0) & PDB_MASK)
+#define ENDLL_MASK		BIT(7)
+#define ENDLL_SHIFT(x)		(((x) << 7) & ENDLL_MASK)
+
+#define EMMC_PHYCTRL2_REG	0xb0
+#define FRQSEL_25M		0
+#define FRQSEL_50M		1
+#define FRQSEL_100M		2
+#define FRQSEL_150M		3
+#define FRQSEL_MASK		GENMASK(24, 22)
+#define FRQSEL_SHIFT(x)		(((x) << 22) & FRQSEL_MASK)
+
+#define EMMC_PHYSTAT_REG	0xbc
+#define CALDONE_MASK		BIT(9)
+#define DLLRDY_MASK		BIT(8)
+#define IS_CALDONE(x)	((x) & CALDONE_MASK)
+#define IS_DLLRDY(x)	((x) & DLLRDY_MASK)
+
+struct intel_emmc_phy {
+	struct regmap *syscfg;
+	struct clk *emmcclk;
+};
+
+static int intel_emmc_phy_power(struct phy *phy, bool on_off)
+{
+	struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+	unsigned int caldone;
+	unsigned int dllrdy;
+	unsigned int freqsel;
+	unsigned long rate;
+	int ret, quot;
+
+	/*
+	 * Keep phyctrl_pdb and phyctrl_endll low to allow
+	 * initialization of CALIO state M/C DFFs
+	 */
+	ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+				 PDB_SHIFT(0));
+	if (ret) {
+		dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Already finish power_off above */
+	if (!on_off)
+		return 0;
+
+	rate = clk_get_rate(priv->emmcclk);
+	quot = DIV_ROUND_CLOSEST(rate, 50000000);
+	if (quot > FRQSEL_150M)
+		dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+	freqsel = clamp_t(int, quot, FRQSEL_25M, FRQSEL_150M);
+
+	/*
+	 * According to the user manual, calpad calibration
+	 * cycle takes more than 2us without the minimal recommended
+	 * value, so we may need a little margin here
+	 */
+	udelay(5);
+
+	ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+				 PDB_SHIFT(1));
+	if (ret) {
+		dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * According to the user manual, it asks driver to wait 5us for
+	 * calpad busy trimming. However it is documented that this value is
+	 * PVT(A.K.A process,voltage and temperature) relevant, so some
+	 * failure cases are found which indicates we should be more tolerant
+	 * to calpad busy trimming.
+	 */
+	ret = regmap_read_poll_timeout(priv->syscfg, EMMC_PHYSTAT_REG,
+				       caldone, IS_CALDONE(caldone),
+				       0, 50);
+	if (ret) {
+		dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
+		return ret;
+	}
+
+	/* Set the frequency of the DLL operation */
+	ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL2_REG, FRQSEL_MASK,
+				 FRQSEL_SHIFT(freqsel));
+	if (ret) {
+		dev_err(&phy->dev, "set the frequency of dll failed:%d\n", ret);
+		return ret;
+	}
+
+	/* Turn on the DLL */
+	ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, ENDLL_MASK,
+				 ENDLL_SHIFT(1));
+	if (ret) {
+		dev_err(&phy->dev, "turn on the dll failed: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * After enabling analog DLL circuits docs say that we need 10.2 us if
+	 * our source clock is at 50 MHz and that lock time scales linearly
+	 * with clock speed.  If we are powering on the PHY and the card clock
+	 * is super slow (like 100 kHZ) this could take as long as 5.1 ms as
+	 * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+	 * Hopefully we won't be running at 100 kHz, but we should still make
+	 * sure we wait long enough.
+	 *
+	 * NOTE: There appear to be corner cases where the DLL seems to take
+	 * extra long to lock for reasons that aren't understood.  In some
+	 * extreme cases we've seen it take up to over 10ms (!).  We'll be
+	 * generous and give it 50ms.
+	 */
+	ret = regmap_read_poll_timeout(priv->syscfg,
+				       EMMC_PHYSTAT_REG,
+				       dllrdy, IS_DLLRDY(dllrdy),
+				       0, 50 * USEC_PER_MSEC);
+	if (ret) {
+		dev_err(&phy->dev, "dllrdy failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int intel_emmc_phy_init(struct phy *phy)
+{
+	struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+	/*
+	 * We purposely get the clock here and not in probe to avoid the
+	 * circular dependency problem. We expect:
+	 * - PHY driver to probe
+	 * - SDHCI driver to start probe
+	 * - SDHCI driver to register it's clock
+	 * - SDHCI driver to get the PHY
+	 * - SDHCI driver to init the PHY
+	 *
+	 * The clock is optional, so upon any error just return it like
+	 * any other error to user.
+	 *
+	 */
+	priv->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+	if (IS_ERR(priv->emmcclk)) {
+		dev_err(&phy->dev, "ERROR: getting emmcclk\n");
+		return PTR_ERR(priv->emmcclk);
+	}
+
+	return 0;
+}
+
+static int intel_emmc_phy_exit(struct phy *phy)
+{
+	struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+	clk_put(priv->emmcclk);
+
+	return 0;
+}
+
+static int intel_emmc_phy_power_on(struct phy *phy)
+{
+	struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+	int ret;
+
+	/* Drive impedance: 50 Ohm */
+	ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, DR_TY_MASK,
+				 DR_TY_SHIFT(6));
+	if (ret) {
+		dev_err(&phy->dev, "ERROR set drive-impednce-50ohm: %d\n", ret);
+		return ret;
+	}
+
+	/* Output tap delay: disable */
+	ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, OTAPDLYENA,
+				 0);
+	if (ret) {
+		dev_err(&phy->dev, "ERROR Set output tap delay : %d\n", ret);
+		return ret;
+	}
+
+	/* Output tap delay */
+	ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG,
+				 OTAPDLYSEL_MASK, OTAPDLYSEL_SHIFT(4));
+	if (ret) {
+		dev_err(&phy->dev, "ERROR: output tap dly select: %d\n", ret);
+		return ret;
+	}
+
+	/* Power up eMMC phy analog blocks */
+	return intel_emmc_phy_power(phy, true);
+}
+
+static int intel_emmc_phy_power_off(struct phy *phy)
+{
+	/* Power down eMMC phy analog blocks */
+	return intel_emmc_phy_power(phy, false);
+}
+
+static const struct phy_ops ops = {
+	.init		= intel_emmc_phy_init,
+	.exit		= intel_emmc_phy_exit,
+	.power_on	= intel_emmc_phy_power_on,
+	.power_off	= intel_emmc_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+static int intel_emmc_phy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct intel_emmc_phy *priv;
+	struct phy *generic_phy;
+	struct phy_provider *phy_provider;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	/* Get eMMC phy (accessed via chiptop) regmap */
+	priv->syscfg = syscon_regmap_lookup_by_phandle(np, "intel,syscon");
+	if (IS_ERR(priv->syscfg)) {
+		dev_err(dev, "failed to find syscon\n");
+		return PTR_ERR(priv->syscfg);
+	}
+
+	generic_phy = devm_phy_create(dev, np, &ops);
+	if (IS_ERR(generic_phy)) {
+		dev_err(dev, "failed to create PHY\n");
+		return PTR_ERR(generic_phy);
+	}
+
+	phy_set_drvdata(generic_phy, priv);
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+	return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id intel_emmc_phy_dt_ids[] = {
+	{ .compatible = "intel,lgm-emmc-phy" },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, intel_emmc_phy_dt_ids);
+
+static struct platform_driver intel_emmc_driver = {
+	.probe		= intel_emmc_phy_probe,
+	.driver		= {
+		.name	= "intel-emmc-phy",
+		.of_match_table = intel_emmc_phy_dt_ids,
+	},
+};
+
+module_platform_driver(intel_emmc_driver);
+
+MODULE_AUTHOR("Peter Harliman Liem <peter.harliman.liem@intel.com>");
+MODULE_DESCRIPTION("Intel eMMC PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 6e45796..2ff9a48 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -386,7 +386,7 @@ static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
 	default:
 		dev_err(dev, "invalid PHY mode %u\n", mode);
 		return ERR_PTR(-EINVAL);
-	};
+	}
 
 	return priv->phy;
 }
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 005e02d..8f6273c 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -10,14 +10,16 @@
 
 config PHY_BERLIN_SATA
 	tristate "Marvell Berlin SATA PHY driver"
-	depends on ARCH_BERLIN && HAS_IOMEM && OF
+	depends on ARCH_BERLIN || COMPILE_TEST
+	depends on OF && HAS_IOMEM
 	select GENERIC_PHY
 	help
 	  Enable this to support the SATA PHY on Marvell Berlin SoCs.
 
 config PHY_BERLIN_USB
 	tristate "Marvell Berlin USB PHY Driver"
-	depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
+	depends on ARCH_BERLIN || COMPILE_TEST
+	depends on OF && HAS_IOMEM && RESET_CONTROLLER
 	select GENERIC_PHY
 	help
 	  Enable this to support the USB PHY on Marvell Berlin SoCs.
@@ -95,7 +97,7 @@
 
 config PHY_PXA_USB
 	tristate "Marvell PXA USB PHY Driver"
-	depends on ARCH_PXA || ARCH_MMP
+	depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
 	select GENERIC_PHY
 	help
 	  Enable this to support Marvell PXA USB PHY driver for Marvell
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 376f5d1..dee757c 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -3,12 +3,13 @@
 # Phy drivers for Mediatek devices
 #
 config PHY_MTK_TPHY
-    tristate "MediaTek T-PHY Driver"
-    depends on ARCH_MEDIATEK && OF
-    select GENERIC_PHY
-    help
-      Say 'Y' here to add support for MediaTek T-PHY driver,
-      it supports multiple usb2.0, usb3.0 ports, PCIe and
+	tristate "MediaTek T-PHY Driver"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on OF
+	select GENERIC_PHY
+	help
+	  Say 'Y' here to add support for MediaTek T-PHY driver,
+	  it supports multiple usb2.0, usb3.0 ports, PCIe and
 	  SATA, and meanwhile supports two version T-PHY which have
 	  different banks layout, the T-PHY with shared banks between
 	  multi-ports is first version, otherwise is second veriosn,
@@ -16,7 +17,8 @@
 
 config PHY_MTK_UFS
 	tristate "MediaTek UFS M-PHY driver"
-	depends on ARCH_MEDIATEK && OF
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on OF
 	select GENERIC_PHY
 	help
 	  Support for UFS M-PHY on MediaTek chipsets.
@@ -25,10 +27,11 @@
 	  specified M-PHYs.
 
 config PHY_MTK_XSPHY
-    tristate "MediaTek XS-PHY Driver"
-    depends on ARCH_MEDIATEK && OF
-    select GENERIC_PHY
-    help
+	tristate "MediaTek XS-PHY Driver"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on OF
+	select GENERIC_PHY
+	help
 	  Enable this to support the SuperSpeedPlus XS-PHY transceiver for
 	  USB3.1 GEN2 controllers on MediaTek chips. The driver supports
 	  multiple USB2.0, USB3.1 GEN2 ports.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b04f4fe..cd5a6c9 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -29,7 +29,7 @@ static void devm_phy_release(struct device *dev, void *res)
 {
 	struct phy *phy = *(struct phy **)res;
 
-	phy_put(phy);
+	phy_put(dev, phy);
 }
 
 static void devm_phy_provider_release(struct device *dev, void *res)
@@ -566,12 +566,12 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id)
 EXPORT_SYMBOL_GPL(of_phy_get);
 
 /**
- * phy_put() - release the PHY
- * @phy: the phy returned by phy_get()
+ * of_phy_put() - release the PHY
+ * @phy: the phy returned by of_phy_get()
  *
- * Releases a refcount the caller received from phy_get().
+ * Releases a refcount the caller received from of_phy_get().
  */
-void phy_put(struct phy *phy)
+void of_phy_put(struct phy *phy)
 {
 	if (!phy || IS_ERR(phy))
 		return;
@@ -584,6 +584,20 @@ void phy_put(struct phy *phy)
 	module_put(phy->ops->owner);
 	put_device(&phy->dev);
 }
+EXPORT_SYMBOL_GPL(of_phy_put);
+
+/**
+ * phy_put() - release the PHY
+ * @dev: device that wants to release this phy
+ * @phy: the phy returned by phy_get()
+ *
+ * Releases a refcount the caller received from phy_get().
+ */
+void phy_put(struct device *dev, struct phy *phy)
+{
+	device_link_remove(dev, &phy->dev);
+	of_phy_put(phy);
+}
 EXPORT_SYMBOL_GPL(phy_put);
 
 /**
@@ -651,6 +665,7 @@ struct phy *phy_get(struct device *dev, const char *string)
 {
 	int index = 0;
 	struct phy *phy;
+	struct device_link *link;
 
 	if (string == NULL) {
 		dev_WARN(dev, "missing string\n");
@@ -672,6 +687,13 @@ struct phy *phy_get(struct device *dev, const char *string)
 
 	get_device(&phy->dev);
 
+	link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+	if (!link) {
+		dev_err(dev, "failed to create device link to %s\n",
+			dev_name(phy->dev.parent));
+		return ERR_PTR(-EINVAL);
+	}
+
 	return phy;
 }
 EXPORT_SYMBOL_GPL(phy_get);
@@ -690,7 +712,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
 {
 	struct phy *phy = phy_get(dev, string);
 
-	if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
+	if (PTR_ERR(phy) == -ENODEV)
 		phy = NULL;
 
 	return phy;
@@ -744,7 +766,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
 {
 	struct phy *phy = devm_phy_get(dev, string);
 
-	if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
+	if (PTR_ERR(phy) == -ENODEV)
 		phy = NULL;
 
 	return phy;
@@ -765,6 +787,7 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
 			    const char *con_id)
 {
 	struct phy **ptr, *phy;
+	struct device_link *link;
 
 	ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
 	if (!ptr)
@@ -776,6 +799,14 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
 		devres_add(dev, ptr);
 	} else {
 		devres_free(ptr);
+		return phy;
+	}
+
+	link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+	if (!link) {
+		dev_err(dev, "failed to create device link to %s\n",
+			dev_name(phy->dev.parent));
+		return ERR_PTR(-EINVAL);
 	}
 
 	return phy;
@@ -798,6 +829,7 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
 				     int index)
 {
 	struct phy **ptr, *phy;
+	struct device_link *link;
 
 	ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
 	if (!ptr)
@@ -819,6 +851,13 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
 	*ptr = phy;
 	devres_add(dev, ptr);
 
+	link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+	if (!link) {
+		dev_err(dev, "failed to create device link to %s\n",
+			dev_name(phy->dev.parent));
+		return ERR_PTR(-EINVAL);
+	}
+
 	return phy;
 }
 EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index 42bc515..febe0ae 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -80,7 +80,7 @@ static int read_poll_timeout(void __iomem *addr, u32 mask)
 		if (readl_relaxed(addr) & mask)
 			return 0;
 
-		 usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
+		usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
 	} while (!time_after(jiffies, timeout));
 
 	return (readl_relaxed(addr) & mask) ? 0 : -ETIMEDOUT;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 66f9172..7db2a94 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -166,8 +166,9 @@ static const unsigned int sdm845_ufsphy_regs_layout[] = {
 };
 
 static const unsigned int sm8150_ufsphy_regs_layout[] = {
-	[QPHY_START_CTRL]		= 0x00,
-	[QPHY_PCS_READY_STATUS]		= 0x180,
+	[QPHY_START_CTRL]		= QPHY_V4_PHY_START,
+	[QPHY_PCS_READY_STATUS]		= QPHY_V4_PCS_READY_STATUS,
+	[QPHY_SW_RESET]			= QPHY_V4_SW_RESET,
 };
 
 static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
@@ -885,7 +886,6 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
 };
 
 static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
-	QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
 	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
 	QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
 	QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
@@ -1390,7 +1390,6 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
 	.pwrdn_ctrl		= SW_PWRDN,
 
 	.is_dual_lane_phy	= true,
-	.no_pcs_sw_reset	= true,
 };
 
 static void qcom_qmp_phy_configure(void __iomem *base,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index ab6ff9b..90f793c 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  */
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index dbd2de4..0824b9d 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -39,6 +39,7 @@
 	tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
 	depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
 	select GENERIC_PHY
+	select GENERIC_PHY_MIPI_DPHY
 	help
 	  Enable this to support the Rockchip MIPI/LVDS/TTL PHY with
 	  Innosilicon IP block.
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index fc729ec..a7c6c94 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 #include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
 #include <linux/pm_runtime.h>
 #include <linux/mfd/syscon.h>
 
@@ -167,31 +168,6 @@
 #define DSI_PHY_STATUS		0xb0
 #define PHY_LOCK		BIT(0)
 
-struct mipi_dphy_timing {
-	unsigned int clkmiss;
-	unsigned int clkpost;
-	unsigned int clkpre;
-	unsigned int clkprepare;
-	unsigned int clksettle;
-	unsigned int clktermen;
-	unsigned int clktrail;
-	unsigned int clkzero;
-	unsigned int dtermen;
-	unsigned int eot;
-	unsigned int hsexit;
-	unsigned int hsprepare;
-	unsigned int hszero;
-	unsigned int hssettle;
-	unsigned int hsskip;
-	unsigned int hstrail;
-	unsigned int init;
-	unsigned int lpx;
-	unsigned int taget;
-	unsigned int tago;
-	unsigned int tasure;
-	unsigned int wakeup;
-};
-
 struct inno_dsidphy {
 	struct device *dev;
 	struct clk *ref_clk;
@@ -201,7 +177,9 @@ struct inno_dsidphy {
 	void __iomem *host_base;
 	struct reset_control *rst;
 	enum phy_mode mode;
+	struct phy_configure_opts_mipi_dphy dphy_cfg;
 
+	struct clk *pll_clk;
 	struct {
 		struct clk_hw hw;
 		u8 prediv;
@@ -238,37 +216,79 @@ static void phy_update_bits(struct inno_dsidphy *inno,
 	writel(tmp, inno->phy_base + reg);
 }
 
-static void mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
-					 unsigned long period)
+static unsigned long inno_dsidphy_pll_calc_rate(struct inno_dsidphy *inno,
+						unsigned long rate)
 {
-	/* Global Operation Timing Parameters */
-	timing->clkmiss = 0;
-	timing->clkpost = 70000 + 52 * period;
-	timing->clkpre = 8 * period;
-	timing->clkprepare = 65000;
-	timing->clksettle = 95000;
-	timing->clktermen = 0;
-	timing->clktrail = 80000;
-	timing->clkzero = 260000;
-	timing->dtermen = 0;
-	timing->eot = 0;
-	timing->hsexit = 120000;
-	timing->hsprepare = 65000 + 4 * period;
-	timing->hszero = 145000 + 6 * period;
-	timing->hssettle = 85000 + 6 * period;
-	timing->hsskip = 40000;
-	timing->hstrail = max(8 * period, 60000 + 4 * period);
-	timing->init = 100000000;
-	timing->lpx = 60000;
-	timing->taget = 5 * timing->lpx;
-	timing->tago = 4 * timing->lpx;
-	timing->tasure = 2 * timing->lpx;
-	timing->wakeup = 1000000000;
+	unsigned long prate = clk_get_rate(inno->ref_clk);
+	unsigned long best_freq = 0;
+	unsigned long fref, fout;
+	u8 min_prediv, max_prediv;
+	u8 _prediv, best_prediv = 1;
+	u16 _fbdiv, best_fbdiv = 1;
+	u32 min_delta = UINT_MAX;
+
+	/*
+	 * The PLL output frequency can be calculated using a simple formula:
+	 * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
+	 * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
+	 */
+	fref = prate / 2;
+	if (rate > 1000000000UL)
+		fout = 1000000000UL;
+	else
+		fout = rate;
+
+	/* 5Mhz < Fref / prediv < 40MHz */
+	min_prediv = DIV_ROUND_UP(fref, 40000000);
+	max_prediv = fref / 5000000;
+
+	for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+		u64 tmp;
+		u32 delta;
+
+		tmp = (u64)fout * _prediv;
+		do_div(tmp, fref);
+		_fbdiv = tmp;
+
+		/*
+		 * The possible settings of feedback divider are
+		 * 12, 13, 14, 16, ~ 511
+		 */
+		if (_fbdiv == 15)
+			continue;
+
+		if (_fbdiv < 12 || _fbdiv > 511)
+			continue;
+
+		tmp = (u64)_fbdiv * fref;
+		do_div(tmp, _prediv);
+
+		delta = abs(fout - tmp);
+		if (!delta) {
+			best_prediv = _prediv;
+			best_fbdiv = _fbdiv;
+			best_freq = tmp;
+			break;
+		} else if (delta < min_delta) {
+			best_prediv = _prediv;
+			best_fbdiv = _fbdiv;
+			best_freq = tmp;
+			min_delta = delta;
+		}
+	}
+
+	if (best_freq) {
+		inno->pll.prediv = best_prediv;
+		inno->pll.fbdiv = best_fbdiv;
+		inno->pll.rate = best_freq;
+	}
+
+	return best_freq;
 }
 
 static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
 {
-	struct mipi_dphy_timing gotp;
+	struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
 	const struct {
 		unsigned long rate;
 		u8 hs_prepare;
@@ -288,12 +308,14 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
 		{ 800000000, 0x21, 0x1f, 0x09, 0x29},
 		{1000000000, 0x09, 0x20, 0x09, 0x27},
 	};
-	u32 t_txbyteclkhs, t_txclkesc, ui;
+	u32 t_txbyteclkhs, t_txclkesc;
 	u32 txbyteclkhs, txclkesc, esc_clk_div;
 	u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
 	u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
 	unsigned int i;
 
+	inno_dsidphy_pll_calc_rate(inno, cfg->hs_clk_rate);
+
 	/* Select MIPI mode */
 	phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
 			MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
@@ -328,32 +350,27 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
 	txclkesc = txbyteclkhs / esc_clk_div;
 	t_txclkesc = div_u64(PSEC_PER_SEC, txclkesc);
 
-	ui = div_u64(PSEC_PER_SEC, inno->pll.rate);
-
-	memset(&gotp, 0, sizeof(gotp));
-	mipi_dphy_timing_get_default(&gotp, ui);
-
 	/*
 	 * The value of counter for HS Ths-exit
 	 * Ths-exit = Tpin_txbyteclkhs * value
 	 */
-	hs_exit = DIV_ROUND_UP(gotp.hsexit, t_txbyteclkhs);
+	hs_exit = DIV_ROUND_UP(cfg->hs_exit, t_txbyteclkhs);
 	/*
 	 * The value of counter for HS Tclk-post
 	 * Tclk-post = Tpin_txbyteclkhs * value
 	 */
-	clk_post = DIV_ROUND_UP(gotp.clkpost, t_txbyteclkhs);
+	clk_post = DIV_ROUND_UP(cfg->clk_post, t_txbyteclkhs);
 	/*
 	 * The value of counter for HS Tclk-pre
 	 * Tclk-pre = Tpin_txbyteclkhs * value
 	 */
-	clk_pre = DIV_ROUND_UP(gotp.clkpre, t_txbyteclkhs);
+	clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
 
 	/*
 	 * The value of counter for HS Tlpx Time
 	 * Tlpx = Tpin_txbyteclkhs * (2 + value)
 	 */
-	lpx = DIV_ROUND_UP(gotp.lpx, t_txbyteclkhs);
+	lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
 	if (lpx >= 2)
 		lpx -= 2;
 
@@ -362,19 +379,19 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
 	 * Tta-go for turnaround
 	 * Tta-go = Ttxclkesc * value
 	 */
-	ta_go = DIV_ROUND_UP(gotp.tago, t_txclkesc);
+	ta_go = DIV_ROUND_UP(cfg->ta_go, t_txclkesc);
 	/*
 	 * The value of counter for HS Tta-sure
 	 * Tta-sure for turnaround
 	 * Tta-sure = Ttxclkesc * value
 	 */
-	ta_sure = DIV_ROUND_UP(gotp.tasure, t_txclkesc);
+	ta_sure = DIV_ROUND_UP(cfg->ta_sure, t_txclkesc);
 	/*
 	 * The value of counter for HS Tta-wait
 	 * Tta-wait for turnaround
 	 * Tta-wait = Ttxclkesc * value
 	 */
-	ta_wait = DIV_ROUND_UP(gotp.taget, t_txclkesc);
+	ta_wait = DIV_ROUND_UP(cfg->ta_get, t_txclkesc);
 
 	for (i = 0; i < ARRAY_SIZE(timings); i++)
 		if (inno->pll.rate <= timings[i].rate)
@@ -479,6 +496,7 @@ static int inno_dsidphy_power_on(struct phy *phy)
 	struct inno_dsidphy *inno = phy_get_drvdata(phy);
 
 	clk_prepare_enable(inno->pclk_phy);
+	clk_prepare_enable(inno->ref_clk);
 	pm_runtime_get_sync(inno->dev);
 
 	/* Bandgap power on */
@@ -524,6 +542,7 @@ static int inno_dsidphy_power_off(struct phy *phy)
 			LVDS_PLL_POWER_OFF | LVDS_BANDGAP_POWER_DOWN);
 
 	pm_runtime_put(inno->dev);
+	clk_disable_unprepare(inno->ref_clk);
 	clk_disable_unprepare(inno->pclk_phy);
 
 	return 0;
@@ -546,168 +565,32 @@ static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
 	return 0;
 }
 
+static int inno_dsidphy_configure(struct phy *phy,
+				  union phy_configure_opts *opts)
+{
+	struct inno_dsidphy *inno = phy_get_drvdata(phy);
+	int ret;
+
+	if (inno->mode != PHY_MODE_MIPI_DPHY)
+		return -EINVAL;
+
+	ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+	if (ret)
+		return ret;
+
+	memcpy(&inno->dphy_cfg, &opts->mipi_dphy, sizeof(inno->dphy_cfg));
+
+	return 0;
+}
+
 static const struct phy_ops inno_dsidphy_ops = {
+	.configure = inno_dsidphy_configure,
 	.set_mode = inno_dsidphy_set_mode,
 	.power_on = inno_dsidphy_power_on,
 	.power_off = inno_dsidphy_power_off,
 	.owner = THIS_MODULE,
 };
 
-static unsigned long inno_dsidphy_pll_round_rate(struct inno_dsidphy *inno,
-						   unsigned long prate,
-						   unsigned long rate,
-						   u8 *prediv, u16 *fbdiv)
-{
-	unsigned long best_freq = 0;
-	unsigned long fref, fout;
-	u8 min_prediv, max_prediv;
-	u8 _prediv, best_prediv = 1;
-	u16 _fbdiv, best_fbdiv = 1;
-	u32 min_delta = UINT_MAX;
-
-	/*
-	 * The PLL output frequency can be calculated using a simple formula:
-	 * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
-	 * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
-	 */
-	fref = prate / 2;
-	if (rate > 1000000000UL)
-		fout = 1000000000UL;
-	else
-		fout = rate;
-
-	/* 5Mhz < Fref / prediv < 40MHz */
-	min_prediv = DIV_ROUND_UP(fref, 40000000);
-	max_prediv = fref / 5000000;
-
-	for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
-		u64 tmp;
-		u32 delta;
-
-		tmp = (u64)fout * _prediv;
-		do_div(tmp, fref);
-		_fbdiv = tmp;
-
-		/*
-		 * The possible settings of feedback divider are
-		 * 12, 13, 14, 16, ~ 511
-		 */
-		if (_fbdiv == 15)
-			continue;
-
-		if (_fbdiv < 12 || _fbdiv > 511)
-			continue;
-
-		tmp = (u64)_fbdiv * fref;
-		do_div(tmp, _prediv);
-
-		delta = abs(fout - tmp);
-		if (!delta) {
-			best_prediv = _prediv;
-			best_fbdiv = _fbdiv;
-			best_freq = tmp;
-			break;
-		} else if (delta < min_delta) {
-			best_prediv = _prediv;
-			best_fbdiv = _fbdiv;
-			best_freq = tmp;
-			min_delta = delta;
-		}
-	}
-
-	if (best_freq) {
-		*prediv = best_prediv;
-		*fbdiv = best_fbdiv;
-	}
-
-	return best_freq;
-}
-
-static long inno_dsidphy_pll_clk_round_rate(struct clk_hw *hw,
-					      unsigned long rate,
-					      unsigned long *prate)
-{
-	struct inno_dsidphy *inno = hw_to_inno(hw);
-	unsigned long fout;
-	u16 fbdiv = 1;
-	u8 prediv = 1;
-
-	fout = inno_dsidphy_pll_round_rate(inno, *prate, rate,
-					     &prediv, &fbdiv);
-
-	return fout;
-}
-
-static int inno_dsidphy_pll_clk_set_rate(struct clk_hw *hw,
-					   unsigned long rate,
-					   unsigned long parent_rate)
-{
-	struct inno_dsidphy *inno = hw_to_inno(hw);
-	unsigned long fout;
-	u16 fbdiv = 1;
-	u8 prediv = 1;
-
-	fout = inno_dsidphy_pll_round_rate(inno, parent_rate, rate,
-					     &prediv, &fbdiv);
-
-	dev_dbg(inno->dev, "fin=%lu, fout=%lu, prediv=%u, fbdiv=%u\n",
-		parent_rate, fout, prediv, fbdiv);
-
-	inno->pll.prediv = prediv;
-	inno->pll.fbdiv = fbdiv;
-	inno->pll.rate = fout;
-
-	return 0;
-}
-
-static unsigned long
-inno_dsidphy_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
-{
-	struct inno_dsidphy *inno = hw_to_inno(hw);
-
-	/* PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2 */
-	return (prate / inno->pll.prediv * inno->pll.fbdiv) / 2;
-}
-
-static const struct clk_ops inno_dsidphy_pll_clk_ops = {
-	.round_rate = inno_dsidphy_pll_clk_round_rate,
-	.set_rate = inno_dsidphy_pll_clk_set_rate,
-	.recalc_rate = inno_dsidphy_pll_clk_recalc_rate,
-};
-
-static int inno_dsidphy_pll_register(struct inno_dsidphy *inno)
-{
-	struct device *dev = inno->dev;
-	struct clk *clk;
-	const char *parent_name;
-	struct clk_init_data init;
-	int ret;
-
-	parent_name = __clk_get_name(inno->ref_clk);
-
-	init.name = "mipi_dphy_pll";
-	ret = of_property_read_string(dev->of_node, "clock-output-names",
-				      &init.name);
-	if (ret < 0)
-		dev_dbg(dev, "phy should set clock-output-names property\n");
-
-	init.ops = &inno_dsidphy_pll_clk_ops;
-	init.parent_names = &parent_name;
-	init.num_parents = 1;
-	init.flags = 0;
-
-	inno->pll.hw.init = &init;
-	clk = devm_clk_register(dev, &inno->pll.hw);
-	if (IS_ERR(clk)) {
-		ret = PTR_ERR(clk);
-		dev_err(dev, "failed to register PLL: %d\n", ret);
-		return ret;
-	}
-
-	return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
-					   &inno->pll.hw);
-}
-
 static int inno_dsidphy_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -764,10 +647,6 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	ret = inno_dsidphy_pll_register(inno);
-	if (ret)
-		return ret;
-
 	pm_runtime_enable(dev);
 
 	return 0;
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index 290a6c7..9e483d1 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -32,7 +32,7 @@
 config PHY_SAMSUNG_USB2
 	tristate "Samsung USB 2.0 PHY driver"
 	depends on HAS_IOMEM
-	depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2
+	depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2 || COMPILE_TEST
 	select GENERIC_PHY
 	select MFD_SYSCON
 	default ARCH_EXYNOS
@@ -60,7 +60,7 @@
 config PHY_S5PV210_USB2
 	bool "Support for S5PV210"
 	depends on PHY_SAMSUNG_USB2
-	depends on ARCH_S5PV210
+	depends on ARCH_S5PV210 || COMPILE_TEST
 	help
 	  Enable USB PHY support for S5PV210. This option requires that Samsung
 	  USB 2.0 PHY driver is enabled and means that support for this
@@ -69,7 +69,7 @@
 
 config PHY_EXYNOS5_USBDRD
 	tristate "Exynos5 SoC series USB DRD PHY driver"
-	depends on ARCH_EXYNOS && OF
+	depends on (ARCH_EXYNOS && OF) || COMPILE_TEST
 	depends on HAS_IOMEM
 	depends on USB_DWC3_EXYNOS
 	select GENERIC_PHY
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index 1748886..6dbe9d0 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -4,7 +4,7 @@
 #
 config PHY_DA8XX_USB
 	tristate "TI DA8xx USB PHY Driver"
-	depends on ARCH_DAVINCI_DA8XX
+	depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
 	select GENERIC_PHY
 	select MFD_SYSCON
 	help
@@ -14,7 +14,7 @@
 
 config PHY_DM816X_USB
 	tristate "TI dm816x USB PHY driver"
-	depends on ARCH_OMAP2PLUS
+	depends on ARCH_OMAP2PLUS || COMPILE_TEST
 	depends on USB_SUPPORT
 	select GENERIC_PHY
 	select USB_PHY
@@ -33,6 +33,22 @@
 	  This option enables support for TI AM654 SerDes PHY used for
 	  PCIe.
 
+config PHY_J721E_WIZ
+	tristate "TI J721E WIZ (SERDES Wrapper) support"
+	depends on OF && ARCH_K3 || COMPILE_TEST
+	depends on HAS_IOMEM && OF_ADDRESS
+	depends on COMMON_CLK
+	select GENERIC_PHY
+	select MULTIPLEXER
+	select REGMAP_MMIO
+	select MUX_MMIO
+	help
+	  This option enables support for WIZ module present in TI's J721E
+	  SoC. WIZ is a serdes wrapper used to configure some of the input
+	  signals to the SERDES (Sierra/Torrent). This driver configures
+	  three clock selects (pll0, pll1, dig) and resets for each of the
+	  lanes.
+
 config OMAP_CONTROL_PHY
 	tristate "OMAP CONTROL PHY Driver"
 	depends on ARCH_OMAP2PLUS || COMPILE_TEST
diff --git a/drivers/phy/ti/Makefile b/drivers/phy/ti/Makefile
index bff901e..dcba257 100644
--- a/drivers/phy/ti/Makefile
+++ b/drivers/phy/ti/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_TWL4030_USB)		+= phy-twl4030-usb.o
 obj-$(CONFIG_PHY_AM654_SERDES)		+= phy-am654-serdes.o
 obj-$(CONFIG_PHY_TI_GMII_SEL)		+= phy-gmii-sel.o
+obj-$(CONFIG_PHY_J721E_WIZ)		+= phy-j721e-wiz.o
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
new file mode 100644
index 0000000..7b51045d
--- /dev/null
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Wrapper driver for SERDES used in J721E
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#define WIZ_SERDES_CTRL		0x404
+#define WIZ_SERDES_TOP_CTRL	0x408
+#define WIZ_SERDES_RST		0x40c
+#define WIZ_SERDES_TYPEC	0x410
+#define WIZ_LANECTL(n)		(0x480 + (0x40 * (n)))
+
+#define WIZ_MAX_LANES		4
+#define WIZ_MUX_NUM_CLOCKS	3
+#define WIZ_DIV_NUM_CLOCKS_16G	2
+#define WIZ_DIV_NUM_CLOCKS_10G	1
+
+#define WIZ_SERDES_TYPEC_LN10_SWAP	BIT(30)
+
+enum wiz_lane_standard_mode {
+	LANE_MODE_GEN1,
+	LANE_MODE_GEN2,
+	LANE_MODE_GEN3,
+	LANE_MODE_GEN4,
+};
+
+enum wiz_refclk_mux_sel {
+	PLL0_REFCLK,
+	PLL1_REFCLK,
+	REFCLK_DIG,
+};
+
+enum wiz_refclk_div_sel {
+	CMN_REFCLK_DIG_DIV,
+	CMN_REFCLK1_DIG_DIV,
+};
+
+static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31);
+static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31);
+static const struct reg_field pll1_refclk_mux_sel =
+					REG_FIELD(WIZ_SERDES_RST, 29, 29);
+static const struct reg_field pll0_refclk_mux_sel =
+					REG_FIELD(WIZ_SERDES_RST, 28, 28);
+static const struct reg_field refclk_dig_sel_16g =
+					REG_FIELD(WIZ_SERDES_RST, 24, 25);
+static const struct reg_field refclk_dig_sel_10g =
+					REG_FIELD(WIZ_SERDES_RST, 24, 24);
+static const struct reg_field pma_cmn_refclk_int_mode =
+					REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29);
+static const struct reg_field pma_cmn_refclk_mode =
+					REG_FIELD(WIZ_SERDES_TOP_CTRL, 30, 31);
+static const struct reg_field pma_cmn_refclk_dig_div =
+					REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
+static const struct reg_field pma_cmn_refclk1_dig_div =
+					REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+
+static const struct reg_field p_enable[WIZ_MAX_LANES] = {
+	REG_FIELD(WIZ_LANECTL(0), 30, 31),
+	REG_FIELD(WIZ_LANECTL(1), 30, 31),
+	REG_FIELD(WIZ_LANECTL(2), 30, 31),
+	REG_FIELD(WIZ_LANECTL(3), 30, 31),
+};
+
+static const struct reg_field p_align[WIZ_MAX_LANES] = {
+	REG_FIELD(WIZ_LANECTL(0), 29, 29),
+	REG_FIELD(WIZ_LANECTL(1), 29, 29),
+	REG_FIELD(WIZ_LANECTL(2), 29, 29),
+	REG_FIELD(WIZ_LANECTL(3), 29, 29),
+};
+
+static const struct reg_field p_raw_auto_start[WIZ_MAX_LANES] = {
+	REG_FIELD(WIZ_LANECTL(0), 28, 28),
+	REG_FIELD(WIZ_LANECTL(1), 28, 28),
+	REG_FIELD(WIZ_LANECTL(2), 28, 28),
+	REG_FIELD(WIZ_LANECTL(3), 28, 28),
+};
+
+static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = {
+	REG_FIELD(WIZ_LANECTL(0), 24, 25),
+	REG_FIELD(WIZ_LANECTL(1), 24, 25),
+	REG_FIELD(WIZ_LANECTL(2), 24, 25),
+	REG_FIELD(WIZ_LANECTL(3), 24, 25),
+};
+
+static const struct reg_field typec_ln10_swap =
+					REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
+
+struct wiz_clk_mux {
+	struct clk_hw		hw;
+	struct regmap_field	*field;
+	u32			*table;
+	struct clk_init_data	clk_data;
+};
+
+#define to_wiz_clk_mux(_hw) container_of(_hw, struct wiz_clk_mux, hw)
+
+struct wiz_clk_divider {
+	struct clk_hw		hw;
+	struct regmap_field	*field;
+	struct clk_div_table	*table;
+	struct clk_init_data	clk_data;
+};
+
+#define to_wiz_clk_div(_hw) container_of(_hw, struct wiz_clk_divider, hw)
+
+struct wiz_clk_mux_sel {
+	struct regmap_field	*field;
+	u32			table[4];
+	const char		*node_name;
+};
+
+struct wiz_clk_div_sel {
+	struct regmap_field	*field;
+	struct clk_div_table	*table;
+	const char		*node_name;
+};
+
+static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
+	{
+		/*
+		 * Mux value to be configured for each of the input clocks
+		 * in the order populated in device tree
+		 */
+		.table = { 1, 0 },
+		.node_name = "pll0-refclk",
+	},
+	{
+		.table = { 1, 0 },
+		.node_name = "pll1-refclk",
+	},
+	{
+		.table = { 1, 3, 0, 2 },
+		.node_name = "refclk-dig",
+	},
+};
+
+static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
+	{
+		/*
+		 * Mux value to be configured for each of the input clocks
+		 * in the order populated in device tree
+		 */
+		.table = { 1, 0 },
+		.node_name = "pll0-refclk",
+	},
+	{
+		.table = { 1, 0 },
+		.node_name = "pll1-refclk",
+	},
+	{
+		.table = { 1, 0 },
+		.node_name = "refclk-dig",
+	},
+};
+
+static struct clk_div_table clk_div_table[] = {
+	{ .val = 0, .div = 1, },
+	{ .val = 1, .div = 2, },
+	{ .val = 2, .div = 4, },
+	{ .val = 3, .div = 8, },
+};
+
+static struct wiz_clk_div_sel clk_div_sel[] = {
+	{
+		.table = clk_div_table,
+		.node_name = "cmn-refclk-dig-div",
+	},
+	{
+		.table = clk_div_table,
+		.node_name = "cmn-refclk1-dig-div",
+	},
+};
+
+enum wiz_type {
+	J721E_WIZ_16G,
+	J721E_WIZ_10G,
+};
+
+#define WIZ_TYPEC_DIR_DEBOUNCE_MIN	100	/* ms */
+#define WIZ_TYPEC_DIR_DEBOUNCE_MAX	1000
+
+struct wiz {
+	struct regmap		*regmap;
+	enum wiz_type		type;
+	struct wiz_clk_mux_sel	*clk_mux_sel;
+	struct wiz_clk_div_sel	*clk_div_sel;
+	unsigned int		clk_div_sel_num;
+	struct regmap_field	*por_en;
+	struct regmap_field	*phy_reset_n;
+	struct regmap_field	*p_enable[WIZ_MAX_LANES];
+	struct regmap_field	*p_align[WIZ_MAX_LANES];
+	struct regmap_field	*p_raw_auto_start[WIZ_MAX_LANES];
+	struct regmap_field	*p_standard_mode[WIZ_MAX_LANES];
+	struct regmap_field	*pma_cmn_refclk_int_mode;
+	struct regmap_field	*pma_cmn_refclk_mode;
+	struct regmap_field	*pma_cmn_refclk_dig_div;
+	struct regmap_field	*pma_cmn_refclk1_dig_div;
+	struct regmap_field	*typec_ln10_swap;
+
+	struct device		*dev;
+	u32			num_lanes;
+	struct platform_device	*serdes_pdev;
+	struct reset_controller_dev wiz_phy_reset_dev;
+	struct gpio_desc	*gpio_typec_dir;
+	int			typec_dir_delay;
+};
+
+static int wiz_reset(struct wiz *wiz)
+{
+	int ret;
+
+	ret = regmap_field_write(wiz->por_en, 0x1);
+	if (ret)
+		return ret;
+
+	mdelay(1);
+
+	ret = regmap_field_write(wiz->por_en, 0x0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int wiz_mode_select(struct wiz *wiz)
+{
+	u32 num_lanes = wiz->num_lanes;
+	int ret;
+	int i;
+
+	for (i = 0; i < num_lanes; i++) {
+		ret = regmap_field_write(wiz->p_standard_mode[i],
+					 LANE_MODE_GEN4);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int wiz_init_raw_interface(struct wiz *wiz, bool enable)
+{
+	u32 num_lanes = wiz->num_lanes;
+	int i;
+	int ret;
+
+	for (i = 0; i < num_lanes; i++) {
+		ret = regmap_field_write(wiz->p_align[i], enable);
+		if (ret)
+			return ret;
+
+		ret = regmap_field_write(wiz->p_raw_auto_start[i], enable);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int wiz_init(struct wiz *wiz)
+{
+	struct device *dev = wiz->dev;
+	int ret;
+
+	ret = wiz_reset(wiz);
+	if (ret) {
+		dev_err(dev, "WIZ reset failed\n");
+		return ret;
+	}
+
+	ret = wiz_mode_select(wiz);
+	if (ret) {
+		dev_err(dev, "WIZ mode select failed\n");
+		return ret;
+	}
+
+	ret = wiz_init_raw_interface(wiz, true);
+	if (ret) {
+		dev_err(dev, "WIZ interface initialization failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int wiz_regfield_init(struct wiz *wiz)
+{
+	struct wiz_clk_mux_sel *clk_mux_sel;
+	struct wiz_clk_div_sel *clk_div_sel;
+	struct regmap *regmap = wiz->regmap;
+	int num_lanes = wiz->num_lanes;
+	struct device *dev = wiz->dev;
+	int i;
+
+	wiz->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
+	if (IS_ERR(wiz->por_en)) {
+		dev_err(dev, "POR_EN reg field init failed\n");
+		return PTR_ERR(wiz->por_en);
+	}
+
+	wiz->phy_reset_n = devm_regmap_field_alloc(dev, regmap,
+						   phy_reset_n);
+	if (IS_ERR(wiz->phy_reset_n)) {
+		dev_err(dev, "PHY_RESET_N reg field init failed\n");
+		return PTR_ERR(wiz->phy_reset_n);
+	}
+
+	wiz->pma_cmn_refclk_int_mode =
+		devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_int_mode);
+	if (IS_ERR(wiz->pma_cmn_refclk_int_mode)) {
+		dev_err(dev, "PMA_CMN_REFCLK_INT_MODE reg field init failed\n");
+		return PTR_ERR(wiz->pma_cmn_refclk_int_mode);
+	}
+
+	wiz->pma_cmn_refclk_mode =
+		devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_mode);
+	if (IS_ERR(wiz->pma_cmn_refclk_mode)) {
+		dev_err(dev, "PMA_CMN_REFCLK_MODE reg field init failed\n");
+		return PTR_ERR(wiz->pma_cmn_refclk_mode);
+	}
+
+	clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK_DIG_DIV];
+	clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
+						     pma_cmn_refclk_dig_div);
+	if (IS_ERR(clk_div_sel->field)) {
+		dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n");
+		return PTR_ERR(clk_div_sel->field);
+	}
+
+	if (wiz->type == J721E_WIZ_16G) {
+		clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1_DIG_DIV];
+		clk_div_sel->field =
+			devm_regmap_field_alloc(dev, regmap,
+						pma_cmn_refclk1_dig_div);
+		if (IS_ERR(clk_div_sel->field)) {
+			dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
+			return PTR_ERR(clk_div_sel->field);
+		}
+	}
+
+	clk_mux_sel = &wiz->clk_mux_sel[PLL0_REFCLK];
+	clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
+						     pll0_refclk_mux_sel);
+	if (IS_ERR(clk_mux_sel->field)) {
+		dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
+		return PTR_ERR(clk_mux_sel->field);
+	}
+
+	clk_mux_sel = &wiz->clk_mux_sel[PLL1_REFCLK];
+	clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
+						     pll1_refclk_mux_sel);
+	if (IS_ERR(clk_mux_sel->field)) {
+		dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
+		return PTR_ERR(clk_mux_sel->field);
+	}
+
+	clk_mux_sel = &wiz->clk_mux_sel[REFCLK_DIG];
+	if (wiz->type == J721E_WIZ_10G)
+		clk_mux_sel->field =
+			devm_regmap_field_alloc(dev, regmap,
+						refclk_dig_sel_10g);
+	else
+		clk_mux_sel->field =
+			devm_regmap_field_alloc(dev, regmap,
+						refclk_dig_sel_16g);
+
+	if (IS_ERR(clk_mux_sel->field)) {
+		dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
+		return PTR_ERR(clk_mux_sel->field);
+	}
+
+	for (i = 0; i < num_lanes; i++) {
+		wiz->p_enable[i] = devm_regmap_field_alloc(dev, regmap,
+							   p_enable[i]);
+		if (IS_ERR(wiz->p_enable[i])) {
+			dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+			return PTR_ERR(wiz->p_enable[i]);
+		}
+
+		wiz->p_align[i] = devm_regmap_field_alloc(dev, regmap,
+							  p_align[i]);
+		if (IS_ERR(wiz->p_align[i])) {
+			dev_err(dev, "P%d_ALIGN reg field init failed\n", i);
+			return PTR_ERR(wiz->p_align[i]);
+		}
+
+		wiz->p_raw_auto_start[i] =
+		  devm_regmap_field_alloc(dev, regmap, p_raw_auto_start[i]);
+		if (IS_ERR(wiz->p_raw_auto_start[i])) {
+			dev_err(dev, "P%d_RAW_AUTO_START reg field init fail\n",
+				i);
+			return PTR_ERR(wiz->p_raw_auto_start[i]);
+		}
+
+		wiz->p_standard_mode[i] =
+		  devm_regmap_field_alloc(dev, regmap, p_standard_mode[i]);
+		if (IS_ERR(wiz->p_standard_mode[i])) {
+			dev_err(dev, "P%d_STANDARD_MODE reg field init fail\n",
+				i);
+			return PTR_ERR(wiz->p_standard_mode[i]);
+		}
+	}
+
+	wiz->typec_ln10_swap = devm_regmap_field_alloc(dev, regmap,
+						       typec_ln10_swap);
+	if (IS_ERR(wiz->typec_ln10_swap)) {
+		dev_err(dev, "LN10_SWAP reg field init failed\n");
+		return PTR_ERR(wiz->typec_ln10_swap);
+	}
+
+	return 0;
+}
+
+static u8 wiz_clk_mux_get_parent(struct clk_hw *hw)
+{
+	struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
+	struct regmap_field *field = mux->field;
+	unsigned int val;
+
+	regmap_field_read(field, &val);
+	return clk_mux_val_to_index(hw, mux->table, 0, val);
+}
+
+static int wiz_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
+	struct regmap_field *field = mux->field;
+	int val;
+
+	val = mux->table[index];
+	return regmap_field_write(field, val);
+}
+
+static const struct clk_ops wiz_clk_mux_ops = {
+	.set_parent = wiz_clk_mux_set_parent,
+	.get_parent = wiz_clk_mux_get_parent,
+};
+
+static int wiz_mux_clk_register(struct wiz *wiz, struct device_node *node,
+				struct regmap_field *field, u32 *table)
+{
+	struct device *dev = wiz->dev;
+	struct clk_init_data *init;
+	const char **parent_names;
+	unsigned int num_parents;
+	struct wiz_clk_mux *mux;
+	char clk_name[100];
+	struct clk *clk;
+	int ret;
+
+	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+	if (!mux)
+		return -ENOMEM;
+
+	num_parents = of_clk_get_parent_count(node);
+	if (num_parents < 2) {
+		dev_err(dev, "SERDES clock must have parents\n");
+		return -EINVAL;
+	}
+
+	parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
+				    GFP_KERNEL);
+	if (!parent_names)
+		return -ENOMEM;
+
+	of_clk_parent_fill(node, parent_names, num_parents);
+
+	snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+		 node->name);
+
+	init = &mux->clk_data;
+
+	init->ops = &wiz_clk_mux_ops;
+	init->flags = CLK_SET_RATE_NO_REPARENT;
+	init->parent_names = parent_names;
+	init->num_parents = num_parents;
+	init->name = clk_name;
+
+	mux->field = field;
+	mux->table = table;
+	mux->hw.init = init;
+
+	clk = devm_clk_register(dev, &mux->hw);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+	if (ret)
+		dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
+
+	return ret;
+}
+
+static unsigned long wiz_clk_div_recalc_rate(struct clk_hw *hw,
+					     unsigned long parent_rate)
+{
+	struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+	struct regmap_field *field = div->field;
+	int val;
+
+	regmap_field_read(field, &val);
+
+	return divider_recalc_rate(hw, parent_rate, val, div->table, 0x0, 2);
+}
+
+static long wiz_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long *prate)
+{
+	struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+
+	return divider_round_rate(hw, rate, prate, div->table, 2, 0x0);
+}
+
+static int wiz_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+	struct regmap_field *field = div->field;
+	int val;
+
+	val = divider_get_val(rate, parent_rate, div->table, 2, 0x0);
+	if (val < 0)
+		return val;
+
+	return regmap_field_write(field, val);
+}
+
+static const struct clk_ops wiz_clk_div_ops = {
+	.recalc_rate = wiz_clk_div_recalc_rate,
+	.round_rate = wiz_clk_div_round_rate,
+	.set_rate = wiz_clk_div_set_rate,
+};
+
+static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
+				struct regmap_field *field,
+				struct clk_div_table *table)
+{
+	struct device *dev = wiz->dev;
+	struct wiz_clk_divider *div;
+	struct clk_init_data *init;
+	const char **parent_names;
+	char clk_name[100];
+	struct clk *clk;
+	int ret;
+
+	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+	if (!div)
+		return -ENOMEM;
+
+	snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+		 node->name);
+
+	parent_names = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
+	if (!parent_names)
+		return -ENOMEM;
+
+	of_clk_parent_fill(node, parent_names, 1);
+
+	init = &div->clk_data;
+
+	init->ops = &wiz_clk_div_ops;
+	init->flags = 0;
+	init->parent_names = parent_names;
+	init->num_parents = 1;
+	init->name = clk_name;
+
+	div->field = field;
+	div->table = table;
+	div->hw.init = init;
+
+	clk = devm_clk_register(dev, &div->hw);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+	if (ret)
+		dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
+
+	return ret;
+}
+
+static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
+{
+	struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+	struct device_node *clk_node;
+	int i;
+
+	for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
+		clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
+		of_clk_del_provider(clk_node);
+		of_node_put(clk_node);
+	}
+}
+
+static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
+{
+	struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+	struct device *dev = wiz->dev;
+	struct device_node *clk_node;
+	const char *node_name;
+	unsigned long rate;
+	struct clk *clk;
+	int ret;
+	int i;
+
+	clk = devm_clk_get(dev, "core_ref_clk");
+	if (IS_ERR(clk)) {
+		dev_err(dev, "core_ref_clk clock not found\n");
+		ret = PTR_ERR(clk);
+		return ret;
+	}
+
+	rate = clk_get_rate(clk);
+	if (rate >= 100000000)
+		regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1);
+	else
+		regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
+
+	clk = devm_clk_get(dev, "ext_ref_clk");
+	if (IS_ERR(clk)) {
+		dev_err(dev, "ext_ref_clk clock not found\n");
+		ret = PTR_ERR(clk);
+		return ret;
+	}
+
+	rate = clk_get_rate(clk);
+	if (rate >= 100000000)
+		regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0);
+	else
+		regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
+
+	for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
+		node_name = clk_mux_sel[i].node_name;
+		clk_node = of_get_child_by_name(node, node_name);
+		if (!clk_node) {
+			dev_err(dev, "Unable to get %s node\n", node_name);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		ret = wiz_mux_clk_register(wiz, clk_node, clk_mux_sel[i].field,
+					   clk_mux_sel[i].table);
+		if (ret) {
+			dev_err(dev, "Failed to register %s clock\n",
+				node_name);
+			of_node_put(clk_node);
+			goto err;
+		}
+
+		of_node_put(clk_node);
+	}
+
+	for (i = 0; i < wiz->clk_div_sel_num; i++) {
+		node_name = clk_div_sel[i].node_name;
+		clk_node = of_get_child_by_name(node, node_name);
+		if (!clk_node) {
+			dev_err(dev, "Unable to get %s node\n", node_name);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		ret = wiz_div_clk_register(wiz, clk_node, clk_div_sel[i].field,
+					   clk_div_sel[i].table);
+		if (ret) {
+			dev_err(dev, "Failed to register %s clock\n",
+				node_name);
+			of_node_put(clk_node);
+			goto err;
+		}
+
+		of_node_put(clk_node);
+	}
+
+	return 0;
+err:
+	wiz_clock_cleanup(wiz, node);
+
+	return ret;
+}
+
+static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
+				unsigned long id)
+{
+	struct device *dev = rcdev->dev;
+	struct wiz *wiz = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (id == 0) {
+		ret = regmap_field_write(wiz->phy_reset_n, false);
+		return ret;
+	}
+
+	ret = regmap_field_write(wiz->p_enable[id - 1], false);
+	return ret;
+}
+
+static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
+				  unsigned long id)
+{
+	struct device *dev = rcdev->dev;
+	struct wiz *wiz = dev_get_drvdata(dev);
+	int ret;
+
+	/* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
+	if (id == 0 && wiz->gpio_typec_dir) {
+		if (wiz->typec_dir_delay)
+			msleep_interruptible(wiz->typec_dir_delay);
+
+		if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
+			regmap_field_write(wiz->typec_ln10_swap, 1);
+		else
+			regmap_field_write(wiz->typec_ln10_swap, 0);
+	}
+
+	if (id == 0) {
+		ret = regmap_field_write(wiz->phy_reset_n, true);
+		return ret;
+	}
+
+	ret = regmap_field_write(wiz->p_enable[id - 1], true);
+	return ret;
+}
+
+static const struct reset_control_ops wiz_phy_reset_ops = {
+	.assert = wiz_phy_reset_assert,
+	.deassert = wiz_phy_reset_deassert,
+};
+
+static struct regmap_config wiz_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.fast_io = true,
+};
+
+static const struct of_device_id wiz_id_table[] = {
+	{
+		.compatible = "ti,j721e-wiz-16g", .data = (void *)J721E_WIZ_16G
+	},
+	{
+		.compatible = "ti,j721e-wiz-10g", .data = (void *)J721E_WIZ_10G
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, wiz_id_table);
+
+static int wiz_probe(struct platform_device *pdev)
+{
+	struct reset_controller_dev *phy_reset_dev;
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct platform_device *serdes_pdev;
+	struct device_node *child_node;
+	struct regmap *regmap;
+	struct resource res;
+	void __iomem *base;
+	struct wiz *wiz;
+	u32 num_lanes;
+	int ret;
+
+	wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
+	if (!wiz)
+		return -ENOMEM;
+
+	wiz->type = (enum wiz_type)of_device_get_match_data(dev);
+
+	child_node = of_get_child_by_name(node, "serdes");
+	if (!child_node) {
+		dev_err(dev, "Failed to get SERDES child DT node\n");
+		return -ENODEV;
+	}
+
+	ret = of_address_to_resource(child_node, 0, &res);
+	if (ret) {
+		dev_err(dev, "Failed to get memory resource\n");
+		goto err_addr_to_resource;
+	}
+
+	base = devm_ioremap(dev, res.start, resource_size(&res));
+	if (!base)
+		goto err_addr_to_resource;
+
+	regmap = devm_regmap_init_mmio(dev, base, &wiz_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "Failed to initialize regmap\n");
+		ret = PTR_ERR(regmap);
+		goto err_addr_to_resource;
+	}
+
+	ret = of_property_read_u32(node, "num-lanes", &num_lanes);
+	if (ret) {
+		dev_err(dev, "Failed to read num-lanes property\n");
+		goto err_addr_to_resource;
+	}
+
+	if (num_lanes > WIZ_MAX_LANES) {
+		dev_err(dev, "Cannot support %d lanes\n", num_lanes);
+		goto err_addr_to_resource;
+	}
+
+	wiz->gpio_typec_dir = devm_gpiod_get_optional(dev, "typec-dir",
+						      GPIOD_IN);
+	if (IS_ERR(wiz->gpio_typec_dir)) {
+		ret = PTR_ERR(wiz->gpio_typec_dir);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to request typec-dir gpio: %d\n",
+				ret);
+		goto err_addr_to_resource;
+	}
+
+	if (wiz->gpio_typec_dir) {
+		ret = of_property_read_u32(node, "typec-dir-debounce-ms",
+					   &wiz->typec_dir_delay);
+		if (ret && ret != -EINVAL) {
+			dev_err(dev, "Invalid typec-dir-debounce property\n");
+			goto err_addr_to_resource;
+		}
+
+		/* use min. debounce from Type-C spec if not provided in DT  */
+		if (ret == -EINVAL)
+			wiz->typec_dir_delay = WIZ_TYPEC_DIR_DEBOUNCE_MIN;
+
+		if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
+		    wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+			dev_err(dev, "Invalid typec-dir-debounce property\n");
+			goto err_addr_to_resource;
+		}
+	}
+
+	wiz->dev = dev;
+	wiz->regmap = regmap;
+	wiz->num_lanes = num_lanes;
+	if (wiz->type == J721E_WIZ_10G)
+		wiz->clk_mux_sel = clk_mux_sel_10g;
+	else
+		wiz->clk_mux_sel = clk_mux_sel_16g;
+
+	wiz->clk_div_sel = clk_div_sel;
+
+	if (wiz->type == J721E_WIZ_10G)
+		wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
+	else
+		wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
+
+	platform_set_drvdata(pdev, wiz);
+
+	ret = wiz_regfield_init(wiz);
+	if (ret) {
+		dev_err(dev, "Failed to initialize regfields\n");
+		goto err_addr_to_resource;
+	}
+
+	phy_reset_dev = &wiz->wiz_phy_reset_dev;
+	phy_reset_dev->dev = dev;
+	phy_reset_dev->ops = &wiz_phy_reset_ops,
+	phy_reset_dev->owner = THIS_MODULE,
+	phy_reset_dev->of_node = node;
+	/* Reset for each of the lane and one for the entire SERDES */
+	phy_reset_dev->nr_resets = num_lanes + 1;
+
+	ret = devm_reset_controller_register(dev, phy_reset_dev);
+	if (ret < 0) {
+		dev_warn(dev, "Failed to register reset controller\n");
+		goto err_addr_to_resource;
+	}
+
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "pm_runtime_get_sync failed\n");
+		goto err_get_sync;
+	}
+
+	ret = wiz_clock_init(wiz, node);
+	if (ret < 0) {
+		dev_warn(dev, "Failed to initialize clocks\n");
+		goto err_get_sync;
+	}
+
+	serdes_pdev = of_platform_device_create(child_node, NULL, dev);
+	if (!serdes_pdev) {
+		dev_WARN(dev, "Unable to create SERDES platform device\n");
+		goto err_pdev_create;
+	}
+	wiz->serdes_pdev = serdes_pdev;
+
+	ret = wiz_init(wiz);
+	if (ret) {
+		dev_err(dev, "WIZ initialization failed\n");
+		goto err_wiz_init;
+	}
+
+	of_node_put(child_node);
+	return 0;
+
+err_wiz_init:
+	of_platform_device_destroy(&serdes_pdev->dev, NULL);
+
+err_pdev_create:
+	wiz_clock_cleanup(wiz, node);
+
+err_get_sync:
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+
+err_addr_to_resource:
+	of_node_put(child_node);
+
+	return ret;
+}
+
+static int wiz_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct platform_device *serdes_pdev;
+	struct wiz *wiz;
+
+	wiz = dev_get_drvdata(dev);
+	serdes_pdev = wiz->serdes_pdev;
+
+	of_platform_device_destroy(&serdes_pdev->dev, NULL);
+	wiz_clock_cleanup(wiz, node);
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+
+	return 0;
+}
+
+static struct platform_driver wiz_driver = {
+	.probe		= wiz_probe,
+	.remove		= wiz_remove,
+	.driver		= {
+		.name	= "wiz",
+		.of_match_table = wiz_id_table,
+	},
+};
+module_platform_driver(wiz_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("TI J721E WIZ driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index edd6859..a879465 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -850,6 +850,12 @@ static int ti_pipe3_probe(struct platform_device *pdev)
 
 static int ti_pipe3_remove(struct platform_device *pdev)
 {
+	struct ti_pipe3 *phy = platform_get_drvdata(pdev);
+
+	if (phy->mode == PIPE3_MODE_SATA) {
+		clk_disable_unprepare(phy->refclk);
+		phy->sata_refclk_enabled = false;
+	}
 	pm_runtime_disable(&pdev->dev);
 
 	return 0;
@@ -900,18 +906,8 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
 {
 	if (!IS_ERR(phy->wkupclk))
 		clk_disable_unprepare(phy->wkupclk);
-	if (!IS_ERR(phy->refclk)) {
+	if (!IS_ERR(phy->refclk))
 		clk_disable_unprepare(phy->refclk);
-		/*
-		 * SATA refclk needs an additional disable as we left it
-		 * on in probe to avoid Errata i783
-		 */
-		if (phy->sata_refclk_enabled) {
-			clk_disable_unprepare(phy->refclk);
-			phy->sata_refclk_enabled = false;
-		}
-	}
-
 	if (!IS_ERR(phy->div_clk))
 		clk_disable_unprepare(phy->div_clk);
 }
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index 8b8121e..771d6fd 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1583,7 +1583,6 @@ static const struct owl_pinmux_func s700_functions[] = {
 	[S700_MUX_USB30] = FUNCTION(usb30),
 	[S700_MUX_CLKO_25M] = FUNCTION(clko_25m),
 	[S700_MUX_MIPI_CSI] = FUNCTION(mipi_csi),
-	[S700_MUX_DSI] = FUNCTION(dsi),
 	[S700_MUX_NAND] = FUNCTION(nand),
 	[S700_MUX_SPDIF] = FUNCTION(spdif),
 	[S700_MUX_SIRQ0] = FUNCTION(sirq0),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index 95ea593..bfed0e2 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -2439,88 +2439,88 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
 
 static const struct aspeed_pin_config aspeed_g4_configs[] = {
 	/* GPIO banks ranges [A, B], [D, J], [M, R] */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { D6,  D5  }, SCU8C, 16 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { D6,  D5  }, SCU8C, 16 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { J21, E18 }, SCU8C, 17 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { J21, E18 }, SCU8C, 17 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { A18, E15 }, SCU8C, 19 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { A18, E15 }, SCU8C, 19 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { D15, B14 }, SCU8C, 20 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { D15, B14 }, SCU8C, 20 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { D18, C17 }, SCU8C, 21 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { D18, C17 }, SCU8C, 21 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { A14, U18 }, SCU8C, 22 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { A14, U18 }, SCU8C, 22 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { A8,  E7  }, SCU8C, 23 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { A8,  E7  }, SCU8C, 23 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { C22, E20 }, SCU8C, 24 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { C22, E20 }, SCU8C, 24 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { J5,  T1  }, SCU8C, 25 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { J5,  T1  }, SCU8C, 25 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { U1,  U5  }, SCU8C, 26 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { U1,  U5  }, SCU8C, 26 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { V3,  V5  }, SCU8C, 27 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { V3,  V5  }, SCU8C, 27 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { W4,  AB2 }, SCU8C, 28 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { W4,  AB2 }, SCU8C, 28 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { V6,  V7  }, SCU8C, 29 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { V6,  V7  }, SCU8C, 29 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { Y6,  AB7 }, SCU8C, 30 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { Y6,  AB7 }, SCU8C, 30 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { V20, A5  }, SCU8C, 31 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { V20, A5  }, SCU8C, 31 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D6,  D5,  SCU8C, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   D6,  D5,  SCU8C, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J21, E18, SCU8C, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   J21, E18, SCU8C, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A18, E15, SCU8C, 19),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   A18, E15, SCU8C, 19),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D15, B14, SCU8C, 20),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   D15, B14, SCU8C, 20),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D18, C17, SCU8C, 21),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   D18, C17, SCU8C, 21),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A14, U18, SCU8C, 22),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   A14, U18, SCU8C, 22),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A8,  E7,  SCU8C, 23),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   A8,  E7,  SCU8C, 23),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C22, E20, SCU8C, 24),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   C22, E20, SCU8C, 24),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J5,  T1,  SCU8C, 25),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   J5,  T1,  SCU8C, 25),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, U1,  U5,  SCU8C, 26),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   U1,  U5,  SCU8C, 26),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V3,  V5,  SCU8C, 27),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   V3,  V5,  SCU8C, 27),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, W4,  AB2, SCU8C, 28),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   W4,  AB2, SCU8C, 28),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V6,  V7,  SCU8C, 29),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   V6,  V7,  SCU8C, 29),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y6,  AB7, SCU8C, 30),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   Y6,  AB7, SCU8C, 30),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V20, A5,  SCU8C, 31),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   V20, A5,  SCU8C, 31),
 
 	/* GPIOs T[0-5] (RGMII1 Tx pins) */
-	{ PIN_CONFIG_DRIVE_STRENGTH, { A12, A13 }, SCU90, 9  },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { A12, A13 }, SCU90, 12 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { A12, A13 }, SCU90, 12 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, A12, A13, SCU90, 9),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A12, A13, SCU90, 12),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   A12, A13, SCU90, 12),
 
 	/* GPIOs T[6-7], U[0-3] (RGMII2 TX pins) */
-	{ PIN_CONFIG_DRIVE_STRENGTH, { D9,  D10 }, SCU90, 11 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { D9,  D10 }, SCU90, 14 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { D9,  D10 }, SCU90, 14 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, D9,  D10, SCU90, 11),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D9,  D10, SCU90, 14),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   D9,  D10, SCU90, 14),
 
 	/* GPIOs U[4-7], V[0-1] (RGMII1 Rx pins) */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { E11, E10 }, SCU90, 13 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { E11, E10 }, SCU90, 13 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E11, E10, SCU90, 13),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   E11, E10, SCU90, 13),
 
 	/* GPIOs V[2-7] (RGMII2 Rx pins) */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { C9,  C8  }, SCU90, 15 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { C9,  C8  }, SCU90, 15 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C9,  C8,  SCU90, 15),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   C9,  C8,  SCU90, 15),
 
 	/* ADC pull-downs (SCUA8[19:4]) */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { L5,  L5  }, SCUA8, 4 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { L5,  L5  }, SCUA8, 4 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { L4,  L4  }, SCUA8, 5 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { L4,  L4  }, SCUA8, 5 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { L3,  L3  }, SCUA8, 6 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { L3,  L3  }, SCUA8, 6 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { L2,  L2  }, SCUA8, 7 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { L2,  L2  }, SCUA8, 7 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { L1,  L1  }, SCUA8, 8 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { L1,  L1  }, SCUA8, 8 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { M5,  M5  }, SCUA8, 9 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { M5,  M5  }, SCUA8, 9 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { M4,  M4  }, SCUA8, 10 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { M4,  M4  }, SCUA8, 10 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { M3,  M3  }, SCUA8, 11 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { M3,  M3  }, SCUA8, 11 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { M2,  M2  }, SCUA8, 12 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { M2,  M2  }, SCUA8, 12 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { M1,  M1  }, SCUA8, 13 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { M1,  M1  }, SCUA8, 13 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { N5,  N5  }, SCUA8, 14 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { N5,  N5  }, SCUA8, 14 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { N4,  N4  }, SCUA8, 15 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { N4,  N4  }, SCUA8, 15 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { N3,  N3  }, SCUA8, 16 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { N3,  N3  }, SCUA8, 16 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { N2,  N2  }, SCUA8, 17 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { N2,  N2  }, SCUA8, 17 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { N1,  N1  }, SCUA8, 18 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { N1,  N1  }, SCUA8, 18 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { P5,  P5  }, SCUA8, 19 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { P5,  P5  }, SCUA8, 19 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L5,  L5,  SCUA8, 4),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   L5,  L5,  SCUA8, 4),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L4,  L4,  SCUA8, 5),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   L4,  L4,  SCUA8, 5),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L3,  L3,  SCUA8, 6),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   L3,  L3,  SCUA8, 6),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L2,  L2,  SCUA8, 7),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   L2,  L2,  SCUA8, 7),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L1,  L1,  SCUA8, 8),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   L1,  L1,  SCUA8, 8),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M5,  M5,  SCUA8, 9),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   M5,  M5,  SCUA8, 9),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M4,  M4,  SCUA8, 10),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   M4,  M4,  SCUA8, 10),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M3,  M3,  SCUA8, 11),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   M3,  M3,  SCUA8, 11),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M2,  M2,  SCUA8, 12),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   M2,  M2,  SCUA8, 12),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M1,  M1,  SCUA8, 13),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   M1,  M1,  SCUA8, 13),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N5,  N5,  SCUA8, 14),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   N5,  N5,  SCUA8, 14),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N4,  N4,  SCUA8, 15),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   N4,  N4,  SCUA8, 15),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N3,  N3,  SCUA8, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   N3,  N3,  SCUA8, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N2,  N2,  SCUA8, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   N2,  N2,  SCUA8, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N1,  N1,  SCUA8, 18),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   N1,  N1,  SCUA8, 18),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, P5,  P5,  SCUA8, 19),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   P5,  P5,  SCUA8, 19),
 
 	/*
 	 * Debounce settings for GPIOs D and E passthrough mode are in
@@ -2531,14 +2531,14 @@ static const struct aspeed_pin_config aspeed_g4_configs[] = {
 	 * controller. Due to this tangle between GPIO and pinctrl we don't yet
 	 * fully support pass-through debounce.
 	 */
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { A18, D16 }, SCUA8, 20 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { B17, A17 }, SCUA8, 21 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { C16, B16 }, SCUA8, 22 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { A16, E15 }, SCUA8, 23 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { D15, C15 }, SCUA8, 24 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { B15, A15 }, SCUA8, 25 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { E14, D14 }, SCUA8, 26 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { C14, B14 }, SCUA8, 27 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A18, D16, SCUA8, 20),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B17, A17, SCUA8, 21),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, C16, B16, SCUA8, 22),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A16, E15, SCUA8, 23),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, D15, C15, SCUA8, 24),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B15, A15, SCUA8, 25),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, E14, D14, SCUA8, 26),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, C14, B14, SCUA8, 27),
 };
 
 static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
@@ -2594,6 +2594,14 @@ static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
 	return 0;
 }
 
+static const struct aspeed_pin_config_map aspeed_g4_pin_config_map[] = {
+	{ PIN_CONFIG_BIAS_PULL_DOWN,  0, 1, BIT_MASK(0)},
+	{ PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+	{ PIN_CONFIG_BIAS_DISABLE,   -1, 1, BIT_MASK(0)},
+	{ PIN_CONFIG_DRIVE_STRENGTH,  8, 0, BIT_MASK(0)},
+	{ PIN_CONFIG_DRIVE_STRENGTH, 16, 1, BIT_MASK(0)},
+};
+
 static const struct aspeed_pinmux_ops aspeed_g4_ops = {
 	.set = aspeed_g4_sig_expr_set,
 };
@@ -2610,6 +2618,8 @@ static struct aspeed_pinctrl_data aspeed_g4_pinctrl_data = {
 	},
 	.configs = aspeed_g4_configs,
 	.nconfigs = ARRAY_SIZE(aspeed_g4_configs),
+	.confmaps = aspeed_g4_pin_config_map,
+	.nconfmaps = ARRAY_SIZE(aspeed_g4_pin_config_map),
 };
 
 static const struct pinmux_ops aspeed_g4_pinmux_ops = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index d8a804b..0cab4c2 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2476,124 +2476,124 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
 
 static struct aspeed_pin_config aspeed_g5_configs[] = {
 	/* GPIOA, GPIOQ */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { B14, B13 }, SCU8C, 16 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { B14, B13 }, SCU8C, 16 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { A11, N20 }, SCU8C, 16 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { A11, N20 }, SCU8C, 16 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B14, B13, SCU8C, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   B14, B13, SCU8C, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A11, N20, SCU8C, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   A11, N20, SCU8C, 16),
 
 	/* GPIOB, GPIOR */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { K19, H20 }, SCU8C, 17 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { K19, H20 }, SCU8C, 17 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { AA19, E10 }, SCU8C, 17 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { AA19, E10 }, SCU8C, 17 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, K19, H20, SCU8C, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   K19, H20, SCU8C, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, AA19, E10, SCU8C, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   AA19, E10, SCU8C, 17),
 
 	/* GPIOC, GPIOS*/
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { C12, B11 }, SCU8C, 18 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { C12, B11 }, SCU8C, 18 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { V20, AA20 }, SCU8C, 18 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { V20, AA20 }, SCU8C, 18 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C12, B11, SCU8C, 18),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   C12, B11, SCU8C, 18),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V20, AA20, SCU8C, 18),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   V20, AA20, SCU8C, 18),
 
 	/* GPIOD, GPIOY */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { F19, C21 }, SCU8C, 19 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { F19, C21 }, SCU8C, 19 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { R22, P20 }, SCU8C, 19 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { R22, P20 }, SCU8C, 19 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F19, C21, SCU8C, 19),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   F19, C21, SCU8C, 19),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, R22, P20, SCU8C, 19),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   R22, P20, SCU8C, 19),
 
 	/* GPIOE, GPIOZ */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { B20, B19 }, SCU8C, 20 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { B20, B19 }, SCU8C, 20 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { Y20, W21 }, SCU8C, 20 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { Y20, W21 }, SCU8C, 20 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B20, B19, SCU8C, 20),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   B20, B19, SCU8C, 20),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y20, W21, SCU8C, 20),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   Y20, W21, SCU8C, 20),
 
 	/* GPIOF, GPIOAA */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { J19, H18 }, SCU8C, 21 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { J19, H18 }, SCU8C, 21 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { Y21, P19 }, SCU8C, 21 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { Y21, P19 }, SCU8C, 21 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J19, H18, SCU8C, 21),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   J19, H18, SCU8C, 21),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y21, P19, SCU8C, 21),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   Y21, P19, SCU8C, 21),
 
-	/* GPIOG, GPIOAB */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { A19, E14 }, SCU8C, 22 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { A19, E14 }, SCU8C, 22 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { N19, R20 }, SCU8C, 22 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { N19, R20 }, SCU8C, 22 },
+		/* GPIOG, GPIOAB */
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A19, E14, SCU8C, 22),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   A19, E14, SCU8C, 22),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N19, R20, SCU8C, 22),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   N19, R20, SCU8C, 22),
 
 	/* GPIOH, GPIOAC */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { A18,  D18  }, SCU8C, 23 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { A18,  D18  }, SCU8C, 23 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { G21,  G22  }, SCU8C, 23 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { G21,  G22  }, SCU8C, 23 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A18,  D18, SCU8C, 23),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   A18,  D18, SCU8C, 23),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G21,  G22, SCU8C, 23),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   G21,  G22, SCU8C, 23),
 
 	/* GPIOs [I, P] */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { C18, A15 }, SCU8C, 24 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { C18, A15 }, SCU8C, 24 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { R2,  T3  }, SCU8C, 25 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { R2,  T3  }, SCU8C, 25 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { L3,  R1  }, SCU8C, 26 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { L3,  R1  }, SCU8C, 26 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { T2,  W1  }, SCU8C, 27 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { T2,  W1  }, SCU8C, 27 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { Y1,  T5  }, SCU8C, 28 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { Y1,  T5  }, SCU8C, 28 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { V2,  T4  }, SCU8C, 29 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { V2,  T4  }, SCU8C, 29 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { U5,  W4  }, SCU8C, 30 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { U5,  W4  }, SCU8C, 30 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { V4,  V6  }, SCU8C, 31 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { V4,  V6  }, SCU8C, 31 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C18, A15, SCU8C, 24),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   C18, A15, SCU8C, 24),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, R2,  T3,  SCU8C, 25),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   R2,  T3,  SCU8C, 25),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L3,  R1,  SCU8C, 26),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   L3,  R1,  SCU8C, 26),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, T2,  W1,  SCU8C, 27),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   T2,  W1,  SCU8C, 27),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y1,  T5,  SCU8C, 28),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   Y1,  T5,  SCU8C, 28),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V2,  T4,  SCU8C, 29),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   V2,  T4,  SCU8C, 29),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, U5,  W4,  SCU8C, 30),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   U5,  W4,  SCU8C, 30),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V4,  V6,  SCU8C, 31),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   V4,  V6,  SCU8C, 31),
 
 	/* GPIOs T[0-5] (RGMII1 Tx pins) */
-	{ PIN_CONFIG_DRIVE_STRENGTH, { B5, B5 }, SCU90, 8 },
-	{ PIN_CONFIG_DRIVE_STRENGTH, { E9, A5 }, SCU90, 9 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { B5, D7 }, SCU90, 12 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { B5, D7 }, SCU90, 12 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B5, B5, SCU90, 8),
+	ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, E9, A5, SCU90, 9),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B5, D7, SCU90, 12),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   B5, D7, SCU90, 12),
 
 	/* GPIOs T[6-7], U[0-3] (RGMII2 TX pins) */
-	{ PIN_CONFIG_DRIVE_STRENGTH, { B2, B2 }, SCU90, 10 },
-	{ PIN_CONFIG_DRIVE_STRENGTH, { B1, B3 }, SCU90, 11 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { B2, D4 }, SCU90, 14 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { B2, D4 }, SCU90, 14 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B2, B2, SCU90, 10),
+	ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B1, B3, SCU90, 11),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B2, D4, SCU90, 14),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   B2, D4, SCU90, 14),
 
 	/* GPIOs U[4-7], V[0-1] (RGMII1 Rx pins) */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { B4, C4 }, SCU90, 13 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { B4, C4 }, SCU90, 13 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B4, C4, SCU90, 13),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   B4, C4, SCU90, 13),
 
 	/* GPIOs V[2-7] (RGMII2 Rx pins) */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { C2, E6 }, SCU90, 15 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { C2, E6 }, SCU90, 15 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C2, E6, SCU90, 15),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   C2, E6, SCU90, 15),
 
 	/* ADC pull-downs (SCUA8[19:4]) */
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { F4, F4 }, SCUA8, 4 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { F4, F4 }, SCUA8, 4 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { F5, F5 }, SCUA8, 5 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { F5, F5 }, SCUA8, 5 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { E2, E2 }, SCUA8, 6 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { E2, E2 }, SCUA8, 6 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { E1, E1 }, SCUA8, 7 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { E1, E1 }, SCUA8, 7 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { F3, F3 }, SCUA8, 8 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { F3, F3 }, SCUA8, 8 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { E3, E3 }, SCUA8, 9 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { E3, E3 }, SCUA8, 9 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { G5, G5 }, SCUA8, 10 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { G5, G5 }, SCUA8, 10 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { G4, G4 }, SCUA8, 11 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { G4, G4 }, SCUA8, 11 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { F2, F2 }, SCUA8, 12 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { F2, F2 }, SCUA8, 12 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { G3, G3 }, SCUA8, 13 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { G3, G3 }, SCUA8, 13 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { G2, G2 }, SCUA8, 14 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { G2, G2 }, SCUA8, 14 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { F1, F1 }, SCUA8, 15 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { F1, F1 }, SCUA8, 15 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { H5, H5 }, SCUA8, 16 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { H5, H5 }, SCUA8, 16 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { G1, G1 }, SCUA8, 17 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { G1, G1 }, SCUA8, 17 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { H3, H3 }, SCUA8, 18 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { H3, H3 }, SCUA8, 18 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, { H4, H4 }, SCUA8, 19 },
-	{ PIN_CONFIG_BIAS_DISABLE,   { H4, H4 }, SCUA8, 19 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F4, F4, SCUA8, 4),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   F4, F4, SCUA8, 4),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F5, F5, SCUA8, 5),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   F5, F5, SCUA8, 5),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E2, E2, SCUA8, 6),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   E2, E2, SCUA8, 6),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E1, E1, SCUA8, 7),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   E1, E1, SCUA8, 7),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F3, F3, SCUA8, 8),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   F3, F3, SCUA8, 8),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E3, E3, SCUA8, 9),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   E3, E3, SCUA8, 9),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G5, G5, SCUA8, 10),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   G5, G5, SCUA8, 10),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G4, G4, SCUA8, 11),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   G4, G4, SCUA8, 11),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F2, F2, SCUA8, 12),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   F2, F2, SCUA8, 12),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G3, G3, SCUA8, 13),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   G3, G3, SCUA8, 13),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G2, G2, SCUA8, 14),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   G2, G2, SCUA8, 14),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F1, F1, SCUA8, 15),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   F1, F1, SCUA8, 15),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H5, H5, SCUA8, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   H5, H5, SCUA8, 16),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G1, G1, SCUA8, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   G1, G1, SCUA8, 17),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H3, H3, SCUA8, 18),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   H3, H3, SCUA8, 18),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H4, H4, SCUA8, 19),
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   H4, H4, SCUA8, 19),
 
 	/*
 	 * Debounce settings for GPIOs D and E passthrough mode are in
@@ -2604,14 +2604,14 @@ static struct aspeed_pin_config aspeed_g5_configs[] = {
 	 * controller. Due to this tangle between GPIO and pinctrl we don't yet
 	 * fully support pass-through debounce.
 	 */
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { F19, E21 }, SCUA8, 20 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { F20, D20 }, SCUA8, 21 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { D21, E20 }, SCUA8, 22 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { G18, C21 }, SCUA8, 23 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { B20, C20 }, SCUA8, 24 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { F18, F17 }, SCUA8, 25 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { E18, D19 }, SCUA8, 26 },
-	{ PIN_CONFIG_INPUT_DEBOUNCE, { A20, B19 }, SCUA8, 27 },
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F19, E21, SCUA8, 20),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F20, D20, SCUA8, 21),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, D21, E20, SCUA8, 22),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, G18, C21, SCUA8, 23),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B20, C20, SCUA8, 24),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F18, F17, SCUA8, 25),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, E18, D19, SCUA8, 26),
+	ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A20, B19, SCUA8, 27),
 };
 
 static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
@@ -2780,6 +2780,14 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
 	return 0;
 }
 
+static const struct aspeed_pin_config_map aspeed_g5_pin_config_map[] = {
+	{ PIN_CONFIG_BIAS_PULL_DOWN,  0, 1, BIT_MASK(0)},
+	{ PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+	{ PIN_CONFIG_BIAS_DISABLE,   -1, 1, BIT_MASK(0)},
+	{ PIN_CONFIG_DRIVE_STRENGTH,  8, 0, BIT_MASK(0)},
+	{ PIN_CONFIG_DRIVE_STRENGTH, 16, 1, BIT_MASK(0)},
+};
+
 static const struct aspeed_pinmux_ops aspeed_g5_ops = {
 	.eval = aspeed_g5_sig_expr_eval,
 	.set = aspeed_g5_sig_expr_set,
@@ -2797,6 +2805,8 @@ static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
 	},
 	.configs = aspeed_g5_configs,
 	.nconfigs = ARRAY_SIZE(aspeed_g5_configs),
+	.confmaps = aspeed_g5_pin_config_map,
+	.nconfmaps = ARRAY_SIZE(aspeed_g5_pin_config_map),
 };
 
 static const struct pinmux_ops aspeed_g5_pinmux_ops = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index bb07024..fa32c3e 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -26,7 +26,10 @@
 #define SCU430		0x430 /* Multi-function Pin Control #8  */
 #define SCU434		0x434 /* Multi-function Pin Control #9  */
 #define SCU438		0x438 /* Multi-function Pin Control #10 */
+#define SCU440		0x440 /* USB Multi-function Pin Control #12 */
 #define SCU450		0x450 /* Multi-function Pin Control #14 */
+#define SCU454		0x454 /* Multi-function Pin Control #15 */
+#define SCU458		0x458 /* Multi-function Pin Control #16 */
 #define SCU4B0		0x4B0 /* Multi-function Pin Control #17 */
 #define SCU4B4		0x4B4 /* Multi-function Pin Control #18 */
 #define SCU4B8		0x4B8 /* Multi-function Pin Control #19 */
@@ -35,9 +38,17 @@
 #define SCU4D8		0x4D8 /* Multi-function Pin Control #23 */
 #define SCU500		0x500 /* Hardware Strap 1 */
 #define SCU510		0x510 /* Hardware Strap 2 */
+#define SCU610		0x610 /* Disable GPIO Internal Pull-Down #0 */
+#define SCU614		0x614 /* Disable GPIO Internal Pull-Down #1 */
+#define SCU618		0x618 /* Disable GPIO Internal Pull-Down #2 */
+#define SCU61C		0x61c /* Disable GPIO Internal Pull-Down #3 */
+#define SCU620		0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU634		0x634 /* Disable GPIO Internal Pull-Down #5 */
+#define SCU638		0x638 /* Disable GPIO Internal Pull-Down #6 */
 #define SCU694		0x694 /* Multi-function Pin Control #25 */
+#define SCUC20		0xC20 /* PCIE configuration Setting Control */
 
-#define ASPEED_G6_NR_PINS 248
+#define ASPEED_G6_NR_PINS 256
 
 #define M24 0
 SIG_EXPR_LIST_DECL_SESG(M24, MDC3, MDIO3, SIG_DESC_SET(SCU410, 0));
@@ -1534,6 +1545,78 @@ GROUP_DECL(I3C4, AE25, AF24);
 FUNC_DECL_2(I3C4, HVI3C4, I3C4);
 FUNC_GROUP_DECL(FSI2, AE25, AF24);
 
+#define AF23 248
+SIG_EXPR_LIST_DECL_SESG(AF23, I3C1SCL, I3C1, SIG_DESC_SET(SCU438, 16));
+PIN_DECL_(AF23, SIG_EXPR_LIST_PTR(AF23, I3C1SCL));
+
+#define AE24 249
+SIG_EXPR_LIST_DECL_SESG(AE24, I3C1SDA, I3C1, SIG_DESC_SET(SCU438, 17));
+PIN_DECL_(AE24, SIG_EXPR_LIST_PTR(AE24, I3C1SDA));
+
+FUNC_GROUP_DECL(I3C1, AF23, AE24);
+
+#define AF22 250
+SIG_EXPR_LIST_DECL_SESG(AF22, I3C2SCL, I3C2, SIG_DESC_SET(SCU438, 18));
+PIN_DECL_(AF22, SIG_EXPR_LIST_PTR(AF22, I3C2SCL));
+
+#define AE22 251
+SIG_EXPR_LIST_DECL_SESG(AE22, I3C2SDA, I3C2, SIG_DESC_SET(SCU438, 19));
+PIN_DECL_(AE22, SIG_EXPR_LIST_PTR(AE22, I3C2SDA));
+
+FUNC_GROUP_DECL(I3C2, AF22, AE22);
+
+#define USB2ADP_DESC   { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 0, 0 }
+#define USB2AD_DESC    { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 1, 0 }
+#define USB2AH_DESC    { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 2, 0 }
+#define USB2AHP_DESC   { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 3, 0 }
+#define USB11BHID_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 0, 0 }
+#define USB2BD_DESC    { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 1, 0 }
+#define USB2BH_DESC    { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 2, 0 }
+
+#define A4 252
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADPDP, USBA, USB2ADP, USB2ADP_DESC,
+			SIG_DESC_SET(SCUC20, 16));
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADDP, USBA, USB2AD, USB2AD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHDP, USBA, USB2AH, USB2AH_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHPDP, USBA, USB2AHP, USB2AHP_DESC);
+PIN_DECL_(A4, SIG_EXPR_LIST_PTR(A4, USB2ADPDP), SIG_EXPR_LIST_PTR(A4, USB2ADDP),
+	  SIG_EXPR_LIST_PTR(A4, USB2AHDP));
+
+#define B4 253
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADPDN, USBA, USB2ADP, USB2ADP_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADDN, USBA, USB2AD, USB2AD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHDN, USBA, USB2AH, USB2AH_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHPDN, USBA, USB2AHP, USB2AHP_DESC);
+PIN_DECL_(B4, SIG_EXPR_LIST_PTR(B4, USB2ADPDN), SIG_EXPR_LIST_PTR(B4, USB2ADDN),
+	  SIG_EXPR_LIST_PTR(B4, USB2AHDN));
+
+GROUP_DECL(USBA, A4, B4);
+
+FUNC_DECL_1(USB2ADP, USBA);
+FUNC_DECL_1(USB2AD, USBA);
+FUNC_DECL_1(USB2AH, USBA);
+FUNC_DECL_1(USB2AHP, USBA);
+
+#define A6 254
+SIG_EXPR_LIST_DECL_SEMG(A6, USB11BDP, USBB, USB11BHID, USB11BHID_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A6, USB2BDDP, USBB, USB2BD, USB2BD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A6, USB2BHDP, USBB, USB2BH, USB2BH_DESC);
+PIN_DECL_(A6, SIG_EXPR_LIST_PTR(A6, USB11BDP), SIG_EXPR_LIST_PTR(A6, USB2BDDP),
+	  SIG_EXPR_LIST_PTR(A6, USB2BHDP));
+
+#define B6 255
+SIG_EXPR_LIST_DECL_SEMG(B6, USB11BDN, USBB, USB11BHID, USB11BHID_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B6, USB2BDDN, USBB, USB2BD, USB2BD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B6, USB2BHDN, USBB, USB2BH, USB2BH_DESC);
+PIN_DECL_(B6, SIG_EXPR_LIST_PTR(B6, USB11BDN), SIG_EXPR_LIST_PTR(B6, USB2BDDN),
+	  SIG_EXPR_LIST_PTR(B6, USB2BHDN));
+
+GROUP_DECL(USBB, A6, B6);
+
+FUNC_DECL_1(USB11BHID, USBB);
+FUNC_DECL_1(USB2BD, USBB);
+FUNC_DECL_1(USB2BH, USBB);
+
 /* Pins, groups and functions are sort(1):ed alphabetically for sanity */
 
 static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
@@ -1554,6 +1637,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
 	ASPEED_PINCTRL_PIN(A24),
 	ASPEED_PINCTRL_PIN(A25),
 	ASPEED_PINCTRL_PIN(A3),
+	ASPEED_PINCTRL_PIN(A4),
+	ASPEED_PINCTRL_PIN(A6),
 	ASPEED_PINCTRL_PIN(AA11),
 	ASPEED_PINCTRL_PIN(AA12),
 	ASPEED_PINCTRL_PIN(AA16),
@@ -1625,6 +1710,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
 	ASPEED_PINCTRL_PIN(AE16),
 	ASPEED_PINCTRL_PIN(AE18),
 	ASPEED_PINCTRL_PIN(AE19),
+	ASPEED_PINCTRL_PIN(AE22),
+	ASPEED_PINCTRL_PIN(AE24),
 	ASPEED_PINCTRL_PIN(AE25),
 	ASPEED_PINCTRL_PIN(AE26),
 	ASPEED_PINCTRL_PIN(AE7),
@@ -1634,6 +1721,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
 	ASPEED_PINCTRL_PIN(AF12),
 	ASPEED_PINCTRL_PIN(AF14),
 	ASPEED_PINCTRL_PIN(AF15),
+	ASPEED_PINCTRL_PIN(AF22),
+	ASPEED_PINCTRL_PIN(AF23),
 	ASPEED_PINCTRL_PIN(AF24),
 	ASPEED_PINCTRL_PIN(AF25),
 	ASPEED_PINCTRL_PIN(AF7),
@@ -1654,6 +1743,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
 	ASPEED_PINCTRL_PIN(B25),
 	ASPEED_PINCTRL_PIN(B26),
 	ASPEED_PINCTRL_PIN(B3),
+	ASPEED_PINCTRL_PIN(B4),
+	ASPEED_PINCTRL_PIN(B6),
 	ASPEED_PINCTRL_PIN(C1),
 	ASPEED_PINCTRL_PIN(C11),
 	ASPEED_PINCTRL_PIN(C12),
@@ -1847,6 +1938,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
 	ASPEED_PINCTRL_GROUP(I2C7),
 	ASPEED_PINCTRL_GROUP(I2C8),
 	ASPEED_PINCTRL_GROUP(I2C9),
+	ASPEED_PINCTRL_GROUP(I3C1),
+	ASPEED_PINCTRL_GROUP(I3C2),
 	ASPEED_PINCTRL_GROUP(I3C3),
 	ASPEED_PINCTRL_GROUP(I3C4),
 	ASPEED_PINCTRL_GROUP(I3C5),
@@ -2012,6 +2105,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
 	ASPEED_PINCTRL_GROUP(UART7),
 	ASPEED_PINCTRL_GROUP(UART8),
 	ASPEED_PINCTRL_GROUP(UART9),
+	ASPEED_PINCTRL_GROUP(USBA),
+	ASPEED_PINCTRL_GROUP(USBB),
 	ASPEED_PINCTRL_GROUP(VB),
 	ASPEED_PINCTRL_GROUP(VGAHS),
 	ASPEED_PINCTRL_GROUP(VGAVS),
@@ -2079,6 +2174,8 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
 	ASPEED_PINCTRL_FUNC(I2C7),
 	ASPEED_PINCTRL_FUNC(I2C8),
 	ASPEED_PINCTRL_FUNC(I2C9),
+	ASPEED_PINCTRL_FUNC(I3C1),
+	ASPEED_PINCTRL_FUNC(I3C2),
 	ASPEED_PINCTRL_FUNC(I3C3),
 	ASPEED_PINCTRL_FUNC(I3C4),
 	ASPEED_PINCTRL_FUNC(I3C5),
@@ -2221,6 +2318,13 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
 	ASPEED_PINCTRL_FUNC(UART7),
 	ASPEED_PINCTRL_FUNC(UART8),
 	ASPEED_PINCTRL_FUNC(UART9),
+	ASPEED_PINCTRL_FUNC(USB11BHID),
+	ASPEED_PINCTRL_FUNC(USB2AD),
+	ASPEED_PINCTRL_FUNC(USB2ADP),
+	ASPEED_PINCTRL_FUNC(USB2AH),
+	ASPEED_PINCTRL_FUNC(USB2AHP),
+	ASPEED_PINCTRL_FUNC(USB2BD),
+	ASPEED_PINCTRL_FUNC(USB2BH),
 	ASPEED_PINCTRL_FUNC(VB),
 	ASPEED_PINCTRL_FUNC(VGAHS),
 	ASPEED_PINCTRL_FUNC(VGAVS),
@@ -2230,6 +2334,260 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
 	ASPEED_PINCTRL_FUNC(WDTRST4),
 };
 
+static struct aspeed_pin_config aspeed_g6_configs[] = {
+	/* GPIOB7 */
+	ASPEED_PULL_DOWN_PINCONF(J24, SCU610, 15),
+	/* GPIOB6 */
+	ASPEED_PULL_DOWN_PINCONF(H25, SCU610, 14),
+	/* GPIOB5 */
+	ASPEED_PULL_DOWN_PINCONF(G26, SCU610, 13),
+	/* GPIOB4 */
+	ASPEED_PULL_DOWN_PINCONF(J23, SCU610, 12),
+	/* GPIOB3 */
+	ASPEED_PULL_DOWN_PINCONF(J25, SCU610, 11),
+	/* GPIOB2 */
+	ASPEED_PULL_DOWN_PINCONF(H26, SCU610, 10),
+	/* GPIOB1 */
+	ASPEED_PULL_DOWN_PINCONF(K23, SCU610, 9),
+	/* GPIOB0 */
+	ASPEED_PULL_DOWN_PINCONF(J26, SCU610, 8),
+
+	/* GPIOH3 */
+	ASPEED_PULL_DOWN_PINCONF(A17, SCU614, 27),
+	/* GPIOH2 */
+	ASPEED_PULL_DOWN_PINCONF(C18, SCU614, 26),
+	/* GPIOH1 */
+	ASPEED_PULL_DOWN_PINCONF(B18, SCU614, 25),
+	/* GPIOH0 */
+	ASPEED_PULL_DOWN_PINCONF(A18, SCU614, 24),
+
+	/* GPIOL7 */
+	ASPEED_PULL_DOWN_PINCONF(C14, SCU618, 31),
+	/* GPIOL6 */
+	ASPEED_PULL_DOWN_PINCONF(B14, SCU618, 30),
+	/* GPIOL5 */
+	ASPEED_PULL_DOWN_PINCONF(F15, SCU618, 29),
+	/* GPIOL4 */
+	ASPEED_PULL_DOWN_PINCONF(C15, SCU618, 28),
+
+	/* GPIOJ7 */
+	ASPEED_PULL_UP_PINCONF(D19, SCU618, 15),
+	/* GPIOJ6 */
+	ASPEED_PULL_UP_PINCONF(C20, SCU618, 14),
+	/* GPIOJ5 */
+	ASPEED_PULL_UP_PINCONF(A19, SCU618, 13),
+	/* GPIOJ4 */
+	ASPEED_PULL_UP_PINCONF(C19, SCU618, 12),
+	/* GPIOJ3 */
+	ASPEED_PULL_UP_PINCONF(D20, SCU618, 11),
+	/* GPIOJ2 */
+	ASPEED_PULL_UP_PINCONF(E19, SCU618, 10),
+	/* GPIOJ1 */
+	ASPEED_PULL_UP_PINCONF(A20, SCU618, 9),
+	/* GPIOJ0 */
+	ASPEED_PULL_UP_PINCONF(B20, SCU618, 8),
+
+	/* GPIOI7 */
+	ASPEED_PULL_DOWN_PINCONF(A15, SCU618, 7),
+	/* GPIOI6 */
+	ASPEED_PULL_DOWN_PINCONF(B16, SCU618, 6),
+	/* GPIOI5 */
+	ASPEED_PULL_DOWN_PINCONF(E16, SCU618, 5),
+	/* GPIOI4 */
+	ASPEED_PULL_DOWN_PINCONF(C16, SCU618, 4),
+	/* GPIOI3 */
+	ASPEED_PULL_DOWN_PINCONF(D16, SCU618, 3),
+	/* GPIOI2 */
+	ASPEED_PULL_DOWN_PINCONF(E17, SCU618, 2),
+	/* GPIOI1 */
+	ASPEED_PULL_DOWN_PINCONF(A16, SCU618, 1),
+	/* GPIOI0 */
+	ASPEED_PULL_DOWN_PINCONF(D17, SCU618, 0),
+
+	/* GPIOP7 */
+	ASPEED_PULL_DOWN_PINCONF(Y23, SCU61C, 31),
+	/* GPIOP6 */
+	ASPEED_PULL_DOWN_PINCONF(AB24, SCU61C, 30),
+	/* GPIOP5 */
+	ASPEED_PULL_DOWN_PINCONF(AB23, SCU61C, 29),
+	/* GPIOP4 */
+	ASPEED_PULL_DOWN_PINCONF(W23, SCU61C, 28),
+	/* GPIOP3 */
+	ASPEED_PULL_DOWN_PINCONF(AA24, SCU61C, 27),
+	/* GPIOP2 */
+	ASPEED_PULL_DOWN_PINCONF(AA23, SCU61C, 26),
+	/* GPIOP1 */
+	ASPEED_PULL_DOWN_PINCONF(W24, SCU61C, 25),
+	/* GPIOP0 */
+	ASPEED_PULL_DOWN_PINCONF(AB22, SCU61C, 24),
+
+	/* GPIOO7 */
+	ASPEED_PULL_DOWN_PINCONF(AC23, SCU61C, 23),
+	/* GPIOO6 */
+	ASPEED_PULL_DOWN_PINCONF(AC24, SCU61C, 22),
+	/* GPIOO5 */
+	ASPEED_PULL_DOWN_PINCONF(AC22, SCU61C, 21),
+	/* GPIOO4 */
+	ASPEED_PULL_DOWN_PINCONF(AD25, SCU61C, 20),
+	/* GPIOO3 */
+	ASPEED_PULL_DOWN_PINCONF(AD24, SCU61C, 19),
+	/* GPIOO2 */
+	ASPEED_PULL_DOWN_PINCONF(AD23, SCU61C, 18),
+	/* GPIOO1 */
+	ASPEED_PULL_DOWN_PINCONF(AD22, SCU61C, 17),
+	/* GPIOO0 */
+	ASPEED_PULL_DOWN_PINCONF(AD26, SCU61C, 16),
+
+	/* GPION7 */
+	ASPEED_PULL_DOWN_PINCONF(M26, SCU61C, 15),
+	/* GPION6 */
+	ASPEED_PULL_DOWN_PINCONF(N26, SCU61C, 14),
+	/* GPION5 */
+	ASPEED_PULL_DOWN_PINCONF(M23, SCU61C, 13),
+	/* GPION4 */
+	ASPEED_PULL_DOWN_PINCONF(P26, SCU61C, 12),
+	/* GPION3 */
+	ASPEED_PULL_DOWN_PINCONF(N24, SCU61C, 11),
+	/* GPION2 */
+	ASPEED_PULL_DOWN_PINCONF(N25, SCU61C, 10),
+	/* GPION1 */
+	ASPEED_PULL_DOWN_PINCONF(N23, SCU61C, 9),
+	/* GPION0 */
+	ASPEED_PULL_DOWN_PINCONF(P25, SCU61C, 8),
+
+	/* GPIOM7 */
+	ASPEED_PULL_DOWN_PINCONF(D13, SCU61C, 7),
+	/* GPIOM6 */
+	ASPEED_PULL_DOWN_PINCONF(C13, SCU61C, 6),
+	/* GPIOM5 */
+	ASPEED_PULL_DOWN_PINCONF(C12, SCU61C, 5),
+	/* GPIOM4 */
+	ASPEED_PULL_DOWN_PINCONF(B12, SCU61C, 4),
+	/* GPIOM3 */
+	ASPEED_PULL_DOWN_PINCONF(E14, SCU61C, 3),
+	/* GPIOM2 */
+	ASPEED_PULL_DOWN_PINCONF(A12, SCU61C, 2),
+	/* GPIOM1 */
+	ASPEED_PULL_DOWN_PINCONF(B13, SCU61C, 1),
+	/* GPIOM0 */
+	ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
+
+	/* GPIOS7 */
+	ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+	/* GPIOS6 */
+	ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+	/* GPIOS5 */
+	ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+	/* GPIOS4 */
+	ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+	/* GPIOS3*/
+	ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+	/* GPIOS2 */
+	ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+	/* GPIOS1 */
+	ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+	/* GPIOS0 */
+	ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+
+	/* GPIOR7 */
+	ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+	/* GPIOR6 */
+	ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+	/* GPIOR5 */
+	ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+	/* GPIOR4 */
+	ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+	/* GPIOR3*/
+	ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+	/* GPIOR2 */
+	ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+	/* GPIOR1 */
+	ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+	/* GPIOR0 */
+	ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+
+	/* GPIOX7 */
+	ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
+	/* GPIOX6 */
+	ASPEED_PULL_DOWN_PINCONF(AF9, SCU634, 30),
+	/* GPIOX5 */
+	ASPEED_PULL_DOWN_PINCONF(AD9, SCU634, 29),
+	/* GPIOX4 */
+	ASPEED_PULL_DOWN_PINCONF(AB9, SCU634, 28),
+	/* GPIOX3*/
+	ASPEED_PULL_DOWN_PINCONF(AF8, SCU634, 27),
+	/* GPIOX2 */
+	ASPEED_PULL_DOWN_PINCONF(AC9, SCU634, 26),
+	/* GPIOX1 */
+	ASPEED_PULL_DOWN_PINCONF(AA9, SCU634, 25),
+	/* GPIOX0 */
+	ASPEED_PULL_DOWN_PINCONF(AE8, SCU634, 24),
+
+	/* GPIOV7 */
+	ASPEED_PULL_DOWN_PINCONF(AF15, SCU634, 15),
+	/* GPIOV6 */
+	ASPEED_PULL_DOWN_PINCONF(AD15, SCU634, 14),
+	/* GPIOV5 */
+	ASPEED_PULL_DOWN_PINCONF(AE14, SCU634, 13),
+	/* GPIOV4 */
+	ASPEED_PULL_DOWN_PINCONF(AE15, SCU634, 12),
+	/* GPIOV3*/
+	ASPEED_PULL_DOWN_PINCONF(AC15, SCU634, 11),
+	/* GPIOV2 */
+	ASPEED_PULL_DOWN_PINCONF(AD14, SCU634, 10),
+	/* GPIOV1 */
+	ASPEED_PULL_DOWN_PINCONF(AF14, SCU634, 9),
+	/* GPIOV0 */
+	ASPEED_PULL_DOWN_PINCONF(AB15, SCU634, 8),
+
+	/* GPIOZ7 */
+	ASPEED_PULL_DOWN_PINCONF(AF10, SCU638, 15),
+	/* GPIOZ6 */
+	ASPEED_PULL_DOWN_PINCONF(AD11, SCU638, 14),
+	/* GPIOZ5 */
+	ASPEED_PULL_DOWN_PINCONF(AA11, SCU638, 13),
+	/* GPIOZ4 */
+	ASPEED_PULL_DOWN_PINCONF(AC11, SCU638, 12),
+	/* GPIOZ3*/
+	ASPEED_PULL_DOWN_PINCONF(AB11, SCU638, 11),
+
+	/* GPIOZ1 */
+	ASPEED_PULL_DOWN_PINCONF(AD10, SCU638, 9),
+	/* GPIOZ0 */
+	ASPEED_PULL_DOWN_PINCONF(AC10, SCU638, 8),
+
+	/* GPIOY6 */
+	ASPEED_PULL_DOWN_PINCONF(AC12, SCU638, 6),
+	/* GPIOY5 */
+	ASPEED_PULL_DOWN_PINCONF(AF12, SCU638, 5),
+	/* GPIOY4 */
+	ASPEED_PULL_DOWN_PINCONF(AE12, SCU638, 4),
+	/* GPIOY3 */
+	ASPEED_PULL_DOWN_PINCONF(AA12, SCU638, 3),
+	/* GPIOY2 */
+	ASPEED_PULL_DOWN_PINCONF(AE11, SCU638, 2),
+	/* GPIOY1 */
+	ASPEED_PULL_DOWN_PINCONF(AD12, SCU638, 1),
+	/* GPIOY0 */
+	ASPEED_PULL_DOWN_PINCONF(AF11, SCU638, 0),
+
+	/* LAD3 */
+	{ PIN_CONFIG_DRIVE_STRENGTH, { AC7, AC7 }, SCU454, GENMASK(31, 30)},
+	/* LAD2 */
+	{ PIN_CONFIG_DRIVE_STRENGTH, { AC8, AC8 }, SCU454, GENMASK(29, 28)},
+	/* LAD1 */
+	{ PIN_CONFIG_DRIVE_STRENGTH, { AB8, AB8 }, SCU454, GENMASK(27, 26)},
+	/* LAD0 */
+	{ PIN_CONFIG_DRIVE_STRENGTH, { AB7, AB7 }, SCU454, GENMASK(25, 24)},
+
+	/* MAC3 */
+	{ PIN_CONFIG_POWER_SOURCE,   { H24, E26 }, SCU458, BIT_MASK(4)},
+	{ PIN_CONFIG_DRIVE_STRENGTH, { H24, E26 }, SCU458, GENMASK(1, 0)},
+	/* MAC4 */
+	{ PIN_CONFIG_POWER_SOURCE,   { F24, B24 }, SCU458, BIT_MASK(5)},
+	{ PIN_CONFIG_DRIVE_STRENGTH, { F24, B24 }, SCU458, GENMASK(3, 2)},
+};
+
 /**
  * Configure a pin's signal by applying an expression's descriptor state for
  * all descriptors in the expression.
@@ -2297,6 +2655,20 @@ static int aspeed_g6_sig_expr_set(struct aspeed_pinmux_data *ctx,
 	return 0;
 }
 
+static const struct aspeed_pin_config_map aspeed_g6_pin_config_map[] = {
+	{ PIN_CONFIG_BIAS_PULL_DOWN,  0,   1, BIT_MASK(0)},
+	{ PIN_CONFIG_BIAS_PULL_DOWN, -1,   0, BIT_MASK(0)},
+	{ PIN_CONFIG_BIAS_PULL_UP,    0,   1, BIT_MASK(0)},
+	{ PIN_CONFIG_BIAS_PULL_UP,   -1,   0, BIT_MASK(0)},
+	{ PIN_CONFIG_BIAS_DISABLE,   -1,   1, BIT_MASK(0)},
+	{ PIN_CONFIG_DRIVE_STRENGTH,  4,   0, GENMASK(1, 0)},
+	{ PIN_CONFIG_DRIVE_STRENGTH,  8,   1, GENMASK(1, 0)},
+	{ PIN_CONFIG_DRIVE_STRENGTH, 12,   2, GENMASK(1, 0)},
+	{ PIN_CONFIG_DRIVE_STRENGTH, 16,   3, GENMASK(1, 0)},
+	{ PIN_CONFIG_POWER_SOURCE,   3300, 0, BIT_MASK(0)},
+	{ PIN_CONFIG_POWER_SOURCE,   1800, 1, BIT_MASK(0)},
+};
+
 static const struct aspeed_pinmux_ops aspeed_g5_ops = {
 	.set = aspeed_g6_sig_expr_set,
 };
@@ -2311,6 +2683,10 @@ static struct aspeed_pinctrl_data aspeed_g6_pinctrl_data = {
 		.functions = aspeed_g6_functions,
 		.nfunctions = ARRAY_SIZE(aspeed_g6_functions),
 	},
+	.configs = aspeed_g6_configs,
+	.nconfigs = ARRAY_SIZE(aspeed_g6_configs),
+	.confmaps = aspeed_g6_pin_config_map,
+	.nconfmaps = ARRAY_SIZE(aspeed_g6_pin_config_map),
 };
 
 static const struct pinmux_ops aspeed_g6_pinmux_ops = {
@@ -2331,12 +2707,21 @@ static const struct pinctrl_ops aspeed_g6_pinctrl_ops = {
 	.dt_free_map = pinctrl_utils_free_map,
 };
 
+static const struct pinconf_ops aspeed_g6_conf_ops = {
+	.is_generic = true,
+	.pin_config_get = aspeed_pin_config_get,
+	.pin_config_set = aspeed_pin_config_set,
+	.pin_config_group_get = aspeed_pin_config_group_get,
+	.pin_config_group_set = aspeed_pin_config_group_set,
+};
+
 static struct pinctrl_desc aspeed_g6_pinctrl_desc = {
 	.name = "aspeed-g6-pinctrl",
 	.pins = aspeed_g6_pins,
 	.npins = ARRAY_SIZE(aspeed_g6_pins),
 	.pctlops = &aspeed_g6_pinctrl_ops,
 	.pmxops = &aspeed_g6_pinmux_ops,
+	.confops = &aspeed_g6_conf_ops,
 };
 
 static int aspeed_g6_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 5493366..b625a65 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -411,49 +411,21 @@ static inline const struct aspeed_pin_config *find_pinconf_config(
 	return NULL;
 }
 
-/*
- * Aspeed pin configuration description.
- *
- * @param: pinconf configuration parameter
- * @arg: The supported argument for @param, or -1 if any value is supported
- * @val: The register value to write to configure @arg for @param
- *
- * The map is to be used in conjunction with the configuration array supplied
- * by the driver implementation.
- */
-struct aspeed_pin_config_map {
-	enum pin_config_param param;
-	s32 arg;
-	u32 val;
-};
-
 enum aspeed_pin_config_map_type { MAP_TYPE_ARG, MAP_TYPE_VAL };
 
-/* Aspeed consistently both:
- *
- * 1. Defines "disable bits" for internal pull-downs
- * 2. Uses 8mA or 16mA drive strengths
- */
-static const struct aspeed_pin_config_map pin_config_map[] = {
-	{ PIN_CONFIG_BIAS_PULL_DOWN,  0, 1 },
-	{ PIN_CONFIG_BIAS_PULL_DOWN, -1, 0 },
-	{ PIN_CONFIG_BIAS_DISABLE,   -1, 1 },
-	{ PIN_CONFIG_DRIVE_STRENGTH,  8, 0 },
-	{ PIN_CONFIG_DRIVE_STRENGTH, 16, 1 },
-};
-
 static const struct aspeed_pin_config_map *find_pinconf_map(
+		const struct aspeed_pinctrl_data *pdata,
 		enum pin_config_param param,
 		enum aspeed_pin_config_map_type type,
 		s64 value)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(pin_config_map); i++) {
+	for (i = 0; i < pdata->nconfmaps; i++) {
 		const struct aspeed_pin_config_map *elem;
 		bool match;
 
-		elem = &pin_config_map[i];
+		elem = &pdata->confmaps[i];
 
 		switch (type) {
 		case MAP_TYPE_ARG:
@@ -491,8 +463,8 @@ int aspeed_pin_config_get(struct pinctrl_dev *pctldev, unsigned int offset,
 	if (rc < 0)
 		return rc;
 
-	pmap = find_pinconf_map(param, MAP_TYPE_VAL,
-			(val & BIT(pconf->bit)) >> pconf->bit);
+	pmap = find_pinconf_map(pdata, param, MAP_TYPE_VAL,
+			(val & pconf->mask) >> __ffs(pconf->mask));
 
 	if (!pmap)
 		return -EINVAL;
@@ -535,22 +507,22 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
 		if (!pconf)
 			return -ENOTSUPP;
 
-		pmap = find_pinconf_map(param, MAP_TYPE_ARG, arg);
+		pmap = find_pinconf_map(pdata, param, MAP_TYPE_ARG, arg);
 
 		if (WARN_ON(!pmap))
 			return -EINVAL;
 
-		val = pmap->val << pconf->bit;
+		val = pmap->val << __ffs(pconf->mask);
 
 		rc = regmap_update_bits(pdata->scu, pconf->reg,
-					BIT(pconf->bit), val);
+					pmap->mask, val);
 
 		if (rc < 0)
 			return rc;
 
-		pr_debug("%s: Set SCU%02X[%d]=%d for param %d(=%d) on pin %d\n",
-				__func__, pconf->reg, pconf->bit, pmap->val,
-				param, arg, offset);
+		pr_debug("%s: Set SCU%02X[%lu]=%d for param %d(=%d) on pin %d\n",
+				__func__, pconf->reg, __ffs(pconf->mask),
+				pmap->val, param, arg, offset);
 	}
 
 	return 0;
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
index a5d8398..4dcde3b 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -24,8 +24,7 @@ struct aspeed_pin_config {
 	enum pin_config_param param;
 	unsigned int pins[2];
 	unsigned int reg;
-	u8 bit;
-	u8 value;
+	u32 mask;
 };
 
 #define ASPEED_PINCTRL_PIN(name_) \
@@ -35,6 +34,38 @@ struct aspeed_pin_config {
 		.drv_data = (void *) &(PIN_SYM(name_)) \
 	}
 
+#define ASPEED_SB_PINCONF(param_, pin0_, pin1_, reg_, bit_) { \
+	.param = param_, \
+	.pins = {pin0_, pin1_}, \
+	.reg = reg_, \
+	.mask = BIT_MASK(bit_) \
+}
+
+#define ASPEED_PULL_DOWN_PINCONF(pin_, reg_, bit_) \
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, pin_, pin_, reg_, bit_), \
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE,   pin_, pin_, reg_, bit_)
+
+#define ASPEED_PULL_UP_PINCONF(pin_, reg_, bit_) \
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_UP, pin_, pin_, reg_, bit_), \
+	ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, pin_, pin_, reg_, bit_)
+/*
+ * Aspeed pin configuration description.
+ *
+ * @param: pinconf configuration parameter
+ * @arg: The supported argument for @param, or -1 if any value is supported
+ * @val: The register value to write to configure @arg for @param
+ * @mask: The bitfield mask for @val
+ *
+ * The map is to be used in conjunction with the configuration array supplied
+ * by the driver implementation.
+ */
+struct aspeed_pin_config_map {
+	enum pin_config_param param;
+	s32 arg;
+	u32 val;
+	u32 mask;
+};
+
 struct aspeed_pinctrl_data {
 	struct regmap *scu;
 
@@ -45,6 +76,9 @@ struct aspeed_pinctrl_data {
 	const unsigned int nconfigs;
 
 	struct aspeed_pinmux_data pinmux;
+
+	const struct aspeed_pin_config_map *confmaps;
+	const unsigned int nconfmaps;
 };
 
 /* Aspeed pinctrl helpers */
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 140c5ce..f86739e 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -737,6 +737,7 @@ struct aspeed_pin_desc {
 #define FUNC_DECL_(func, ...) \
 	static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
 
+#define FUNC_DECL_1(func, group) FUNC_DECL_(func, #group)
 #define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
 #define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
 
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 831a931..2516621 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -286,6 +286,12 @@ static int iproc_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 	iproc_set_bit(chip, IPROC_GPIO_INT_DE_OFFSET, gpio, dual_edge);
 	iproc_set_bit(chip, IPROC_GPIO_INT_EDGE_OFFSET, gpio,
 		       rising_or_high);
+
+	if (type & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(d, handle_edge_irq);
+	else
+		irq_set_handler_locked(d, handle_level_irq);
+
 	raw_spin_unlock_irqrestore(&chip->lock, flags);
 
 	dev_dbg(chip->dev,
@@ -843,7 +849,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
 							"gpio-ranges");
 
 	/* optional GPIO interrupt support */
-	irq = platform_get_irq(pdev, 0);
+	irq = platform_get_irq_optional(pdev, 0);
 	if (irq > 0) {
 		struct irq_chip *irqc;
 		struct gpio_irq_chip *girq;
@@ -868,7 +874,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
 			return -ENOMEM;
 		girq->parents[0] = irq;
 		girq->default_type = IRQ_TYPE_NONE;
-		girq->handler = handle_simple_irq;
+		girq->handler = handle_bad_irq;
 	}
 
 	ret = gpiochip_add_data(gc, chip);
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index e2f72dc..7b6409e 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -560,7 +560,6 @@ static void __maybe_unused madera_pin_dbg_show(struct pinctrl_dev *pctldev,
 		seq_puts(s, " SCHMITT");
 }
 
-
 static const struct pinctrl_ops madera_pin_group_ops = {
 	.get_groups_count = madera_get_groups_count,
 	.get_group_name = madera_get_group_name,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 46600d9..446d84f 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1376,8 +1376,15 @@ void devm_pinctrl_put(struct pinctrl *p)
 }
 EXPORT_SYMBOL_GPL(devm_pinctrl_put);
 
-int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
-			 bool dup)
+/**
+ * pinctrl_register_mappings() - register a set of pin controller mappings
+ * @maps: the pincontrol mappings table to register. Note the pinctrl-core
+ *	keeps a reference to the passed in maps, so they should _not_ be
+ *	marked with __initdata.
+ * @num_maps: the number of maps in the mapping table
+ */
+int pinctrl_register_mappings(const struct pinctrl_map *maps,
+			      unsigned num_maps)
 {
 	int i, ret;
 	struct pinctrl_maps *maps_node;
@@ -1430,17 +1437,8 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
 	if (!maps_node)
 		return -ENOMEM;
 
+	maps_node->maps = maps;
 	maps_node->num_maps = num_maps;
-	if (dup) {
-		maps_node->maps = kmemdup(maps, sizeof(*maps) * num_maps,
-					  GFP_KERNEL);
-		if (!maps_node->maps) {
-			kfree(maps_node);
-			return -ENOMEM;
-		}
-	} else {
-		maps_node->maps = maps;
-	}
 
 	mutex_lock(&pinctrl_maps_mutex);
 	list_add_tail(&maps_node->node, &pinctrl_maps);
@@ -1448,22 +1446,14 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
 
 	return 0;
 }
-
-/**
- * pinctrl_register_mappings() - register a set of pin controller mappings
- * @maps: the pincontrol mappings table to register. This should probably be
- *	marked with __initdata so it can be discarded after boot. This
- *	function will perform a shallow copy for the mapping entries.
- * @num_maps: the number of maps in the mapping table
- */
-int pinctrl_register_mappings(const struct pinctrl_map *maps,
-			      unsigned num_maps)
-{
-	return pinctrl_register_map(maps, num_maps, true);
-}
 EXPORT_SYMBOL_GPL(pinctrl_register_mappings);
 
-void pinctrl_unregister_map(const struct pinctrl_map *map)
+/**
+ * pinctrl_unregister_mappings() - unregister a set of pin controller mappings
+ * @maps: the pincontrol mappings table passed to pinctrl_register_mappings()
+ *	when registering the mappings.
+ */
+void pinctrl_unregister_mappings(const struct pinctrl_map *map)
 {
 	struct pinctrl_maps *maps_node;
 
@@ -1478,6 +1468,7 @@ void pinctrl_unregister_map(const struct pinctrl_map *map)
 	}
 	mutex_unlock(&pinctrl_maps_mutex);
 }
+EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings);
 
 /**
  * pinctrl_force_sleep() - turn a given controller device into sleep state
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 7f34167..840103c 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -236,10 +236,6 @@ extern struct pinctrl_gpio_range *
 pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev,
 					unsigned int pin);
 
-int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
-			 bool dup);
-void pinctrl_unregister_map(const struct pinctrl_map *map);
-
 extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
 extern int pinctrl_force_default(struct pinctrl_dev *pctldev);
 
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 674920d..9357f7c 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -51,7 +51,7 @@ void pinctrl_dt_free_maps(struct pinctrl *p)
 	struct pinctrl_dt_map *dt_map, *n1;
 
 	list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) {
-		pinctrl_unregister_map(dt_map->map);
+		pinctrl_unregister_mappings(dt_map->map);
 		list_del(&dt_map->node);
 		dt_free_map(dt_map->pctldev, dt_map->map,
 			    dt_map->num_maps);
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
 	dt_map->num_maps = num_maps;
 	list_add_tail(&dt_map->node, &p->dt_maps);
 
-	return pinctrl_register_map(map, num_maps, false);
+	return pinctrl_register_mappings(map, num_maps);
 
 err_free_map:
 	dt_free_map(pctldev, map, num_maps);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 3ea9ce3..de775a8 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -137,6 +137,13 @@
 	help
 	  Say Y here to enable the imx8mn pinctrl driver
 
+config PINCTRL_IMX8MP
+	bool "IMX8MP pinctrl driver"
+	depends on ARCH_MXC && ARM64
+	select PINCTRL_IMX
+	help
+	  Say Y here to enable the imx8mp pinctrl driver
+
 config PINCTRL_IMX8MQ
 	bool "IMX8MQ pinctrl driver"
 	depends on ARCH_MXC && ARM64
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 78e9140..0ebd3af 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_PINCTRL_IMX7ULP)	+= pinctrl-imx7ulp.o
 obj-$(CONFIG_PINCTRL_IMX8MM)	+= pinctrl-imx8mm.o
 obj-$(CONFIG_PINCTRL_IMX8MN)	+= pinctrl-imx8mn.o
+obj-$(CONFIG_PINCTRL_IMX8MP)	+= pinctrl-imx8mp.o
 obj-$(CONFIG_PINCTRL_IMX8MQ)	+= pinctrl-imx8mq.o
 obj-$(CONFIG_PINCTRL_IMX8QM)	+= pinctrl-imx8qm.o
 obj-$(CONFIG_PINCTRL_IMX8QXP)	+= pinctrl-imx8qxp.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mp.c b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
new file mode 100644
index 0000000..e3f644c
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imx8mp_pads {
+	MX8MP_IOMUXC_RESERVE0 = 0,
+	MX8MP_IOMUXC_RESERVE1 = 1,
+	MX8MP_IOMUXC_RESERVE2 = 2,
+	MX8MP_IOMUXC_RESERVE3 = 3,
+	MX8MP_IOMUXC_RESERVE4 = 4,
+	MX8MP_IOMUXC_GPIO1_IO00 = 5,
+	MX8MP_IOMUXC_GPIO1_IO01 = 6,
+	MX8MP_IOMUXC_GPIO1_IO02 = 7,
+	MX8MP_IOMUXC_GPIO1_IO03 = 8,
+	MX8MP_IOMUXC_GPIO1_IO04 = 9,
+	MX8MP_IOMUXC_GPIO1_IO05 = 10,
+	MX8MP_IOMUXC_GPIO1_IO06 = 11,
+	MX8MP_IOMUXC_GPIO1_IO07 = 12,
+	MX8MP_IOMUXC_GPIO1_IO08 = 13,
+	MX8MP_IOMUXC_GPIO1_IO09 = 14,
+	MX8MP_IOMUXC_GPIO1_IO10 = 15,
+	MX8MP_IOMUXC_GPIO1_IO11 = 16,
+	MX8MP_IOMUXC_GPIO1_IO12 = 17,
+	MX8MP_IOMUXC_GPIO1_IO13 = 18,
+	MX8MP_IOMUXC_GPIO1_IO14 = 19,
+	MX8MP_IOMUXC_GPIO1_IO15 = 20,
+	MX8MP_IOMUXC_ENET_MDC = 21,
+	MX8MP_IOMUXC_ENET_MDIO = 22,
+	MX8MP_IOMUXC_ENET_TD3 = 23,
+	MX8MP_IOMUXC_ENET_TD2 = 24,
+	MX8MP_IOMUXC_ENET_TD1 = 25,
+	MX8MP_IOMUXC_ENET_TD0 = 26,
+	MX8MP_IOMUXC_ENET_TX_CTL = 27,
+	MX8MP_IOMUXC_ENET_TXC = 28,
+	MX8MP_IOMUXC_ENET_RX_CTL = 29,
+	MX8MP_IOMUXC_ENET_RXC = 30,
+	MX8MP_IOMUXC_ENET_RD0 = 31,
+	MX8MP_IOMUXC_ENET_RD1 = 32,
+	MX8MP_IOMUXC_ENET_RD2 = 33,
+	MX8MP_IOMUXC_ENET_RD3 = 34,
+	MX8MP_IOMUXC_SD1_CLK = 35,
+	MX8MP_IOMUXC_SD1_CMD = 36,
+	MX8MP_IOMUXC_SD1_DATA0 = 37,
+	MX8MP_IOMUXC_SD1_DATA1 = 38,
+	MX8MP_IOMUXC_SD1_DATA2 = 39,
+	MX8MP_IOMUXC_SD1_DATA3 = 40,
+	MX8MP_IOMUXC_SD1_DATA4 = 41,
+	MX8MP_IOMUXC_SD1_DATA5 = 42,
+	MX8MP_IOMUXC_SD1_DATA6 = 43,
+	MX8MP_IOMUXC_SD1_DATA7 = 44,
+	MX8MP_IOMUXC_SD1_RESET_B = 45,
+	MX8MP_IOMUXC_SD1_STROBE = 46,
+	MX8MP_IOMUXC_SD2_CD_B = 47,
+	MX8MP_IOMUXC_SD2_CLK = 48,
+	MX8MP_IOMUXC_SD2_CMD = 49,
+	MX8MP_IOMUXC_SD2_DATA0 = 50,
+	MX8MP_IOMUXC_SD2_DATA1 = 51,
+	MX8MP_IOMUXC_SD2_DATA2 = 52,
+	MX8MP_IOMUXC_SD2_DATA3 = 53,
+	MX8MP_IOMUXC_SD2_RESET_B = 54,
+	MX8MP_IOMUXC_SD2_WP = 55,
+	MX8MP_IOMUXC_NAND_ALE = 56,
+	MX8MP_IOMUXC_NAND_CE0_B = 57,
+	MX8MP_IOMUXC_NAND_CE1_B = 58,
+	MX8MP_IOMUXC_NAND_CE2_B = 59,
+	MX8MP_IOMUXC_NAND_CE3_B = 60,
+	MX8MP_IOMUXC_NAND_CLE = 61,
+	MX8MP_IOMUXC_NAND_DATA00 = 62,
+	MX8MP_IOMUXC_NAND_DATA01 = 63,
+	MX8MP_IOMUXC_NAND_DATA02 = 64,
+	MX8MP_IOMUXC_NAND_DATA03 = 65,
+	MX8MP_IOMUXC_NAND_DATA04 = 66,
+	MX8MP_IOMUXC_NAND_DATA05 = 67,
+	MX8MP_IOMUXC_NAND_DATA06 = 68,
+	MX8MP_IOMUXC_NAND_DATA07 = 69,
+	MX8MP_IOMUXC_NAND_DQS = 70,
+	MX8MP_IOMUXC_NAND_RE_B = 71,
+	MX8MP_IOMUXC_NAND_READY_B = 72,
+	MX8MP_IOMUXC_NAND_WE_B = 73,
+	MX8MP_IOMUXC_NAND_WP_B = 74,
+	MX8MP_IOMUXC_SAI5_RXFS = 75,
+	MX8MP_IOMUXC_SAI5_RXC = 76,
+	MX8MP_IOMUXC_SAI5_RXD0 = 77,
+	MX8MP_IOMUXC_SAI5_RXD1 = 78,
+	MX8MP_IOMUXC_SAI5_RXD2 = 79,
+	MX8MP_IOMUXC_SAI5_RXD3 = 80,
+	MX8MP_IOMUXC_SAI5_MCLK = 81,
+	MX8MP_IOMUXC_SAI1_RXFS = 82,
+	MX8MP_IOMUXC_SAI1_RXC = 83,
+	MX8MP_IOMUXC_SAI1_RXD0 = 84,
+	MX8MP_IOMUXC_SAI1_RXD1 = 85,
+	MX8MP_IOMUXC_SAI1_RXD2 = 86,
+	MX8MP_IOMUXC_SAI1_RXD3 = 87,
+	MX8MP_IOMUXC_SAI1_RXD4 = 88,
+	MX8MP_IOMUXC_SAI1_RXD5 = 89,
+	MX8MP_IOMUXC_SAI1_RXD6 = 90,
+	MX8MP_IOMUXC_SAI1_RXD7 = 91,
+	MX8MP_IOMUXC_SAI1_TXFS = 92,
+	MX8MP_IOMUXC_SAI1_TXC = 93,
+	MX8MP_IOMUXC_SAI1_TXD0 = 94,
+	MX8MP_IOMUXC_SAI1_TXD1 = 95,
+	MX8MP_IOMUXC_SAI1_TXD2 = 96,
+	MX8MP_IOMUXC_SAI1_TXD3 = 97,
+	MX8MP_IOMUXC_SAI1_TXD4 = 98,
+	MX8MP_IOMUXC_SAI1_TXD5 = 99,
+	MX8MP_IOMUXC_SAI1_TXD6 = 100,
+	MX8MP_IOMUXC_SAI1_TXD7 = 101,
+	MX8MP_IOMUXC_SAI1_MCLK = 102,
+	MX8MP_IOMUXC_SAI2_RXFS = 103,
+	MX8MP_IOMUXC_SAI2_RXC = 104,
+	MX8MP_IOMUXC_SAI2_RXD0 = 105,
+	MX8MP_IOMUXC_SAI2_TXFS = 106,
+	MX8MP_IOMUXC_SAI2_TXC = 107,
+	MX8MP_IOMUXC_SAI2_TXD0 = 108,
+	MX8MP_IOMUXC_SAI2_MCLK = 109,
+	MX8MP_IOMUXC_SAI3_RXFS = 110,
+	MX8MP_IOMUXC_SAI3_RXC = 111,
+	MX8MP_IOMUXC_SAI3_RXD = 112,
+	MX8MP_IOMUXC_SAI3_TXFS = 113,
+	MX8MP_IOMUXC_SAI3_TXC = 114,
+	MX8MP_IOMUXC_SAI3_TXD = 115,
+	MX8MP_IOMUXC_SAI3_MCLK = 116,
+	MX8MP_IOMUXC_SPDIF_TX = 117,
+	MX8MP_IOMUXC_SPDIF_RX = 118,
+	MX8MP_IOMUXC_SPDIF_EXT_CLK = 119,
+	MX8MP_IOMUXC_ECSPI1_SCLK = 120,
+	MX8MP_IOMUXC_ECSPI1_MOSI = 121,
+	MX8MP_IOMUXC_ECSPI1_MISO = 122,
+	MX8MP_IOMUXC_ECSPI1_SS0 = 123,
+	MX8MP_IOMUXC_ECSPI2_SCLK = 124,
+	MX8MP_IOMUXC_ECSPI2_MOSI = 125,
+	MX8MP_IOMUXC_ECSPI2_MISO = 126,
+	MX8MP_IOMUXC_ECSPI2_SS0 = 127,
+	MX8MP_IOMUXC_I2C1_SCL = 128,
+	MX8MP_IOMUXC_I2C1_SDA = 129,
+	MX8MP_IOMUXC_I2C2_SCL = 130,
+	MX8MP_IOMUXC_I2C2_SDA = 131,
+	MX8MP_IOMUXC_I2C3_SCL = 132,
+	MX8MP_IOMUXC_I2C3_SDA = 133,
+	MX8MP_IOMUXC_I2C4_SCL = 134,
+	MX8MP_IOMUXC_I2C4_SDA = 135,
+	MX8MP_IOMUXC_UART1_RXD = 136,
+	MX8MP_IOMUXC_UART1_TXD = 137,
+	MX8MP_IOMUXC_UART2_RXD = 138,
+	MX8MP_IOMUXC_UART2_TXD = 139,
+	MX8MP_IOMUXC_UART3_RXD = 140,
+	MX8MP_IOMUXC_UART3_TXD = 141,
+	MX8MP_IOMUXC_UART4_RXD = 142,
+	MX8MP_IOMUXC_UART4_TXD = 143,
+	MX8MP_IOMUXC_HDMI_DDC_SCL = 144,
+	MX8MP_IOMUXC_HDMI_DDC_SDA = 145,
+	MX8MP_IOMUXC_HDMI_CEC = 146,
+	MX8MP_IOMUXC_HDMI_HPD = 147,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx8mp_pinctrl_pads[] = {
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE1),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE2),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE3),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE4),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO00),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO01),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO02),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO03),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO04),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO05),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO06),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO07),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO08),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO09),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO10),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO11),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO12),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO13),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO14),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO15),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_MDC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_MDIO),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD3),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD2),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD1),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TX_CTL),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RX_CTL),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD1),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD2),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD3),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_CLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_CMD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA1),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA2),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA3),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA4),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA5),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA6),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA7),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_RESET_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_STROBE),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CD_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CMD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA1),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA2),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA3),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_RESET_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_WP),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_ALE),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE0_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE1_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE2_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE3_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CLE),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA00),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA01),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA02),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA03),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA04),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA05),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA06),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA07),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DQS),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_RE_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_READY_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_WE_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_WP_B),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXFS),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD1),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD2),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD3),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_MCLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXFS),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD1),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD2),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD3),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD4),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD5),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD6),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD7),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXFS),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD1),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD2),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD3),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD4),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD5),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD6),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD7),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_MCLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXFS),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXD0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXFS),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXD0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_MCLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXFS),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXFS),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_MCLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_TX),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_RX),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_EXT_CLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_SCLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_MOSI),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_MISO),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_SS0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_SCLK),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_MOSI),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_MISO),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_SS0),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C1_SCL),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C1_SDA),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C2_SCL),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C2_SDA),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C3_SCL),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C3_SDA),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C4_SCL),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C4_SDA),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART1_RXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART1_TXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART2_RXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART2_TXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART3_RXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART3_TXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART4_RXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART4_TXD),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_DDC_SCL),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_DDC_SDA),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_CEC),
+	IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_HPD),
+};
+
+static const struct imx_pinctrl_soc_info imx8mp_pinctrl_info = {
+	.pins = imx8mp_pinctrl_pads,
+	.npins = ARRAY_SIZE(imx8mp_pinctrl_pads),
+	.gpr_compatible = "fsl,imx8mp-iomuxc-gpr",
+};
+
+static const struct of_device_id imx8mp_pinctrl_of_match[] = {
+	{ .compatible = "fsl,imx8mp-iomuxc", .data = &imx8mp_pinctrl_info, },
+	{ /* sentinel */ }
+};
+
+static int imx8mp_pinctrl_probe(struct platform_device *pdev)
+{
+	return imx_pinctrl_probe(pdev, &imx8mp_pinctrl_info);
+}
+
+static struct platform_driver imx8mp_pinctrl_driver = {
+	.driver = {
+		.name = "imx8mp-pinctrl",
+		.of_match_table = of_match_ptr(imx8mp_pinctrl_of_match),
+	},
+	.probe = imx8mp_pinctrl_probe,
+};
+
+static int __init imx8mp_pinctrl_init(void)
+{
+	return platform_driver_register(&imx8mp_pinctrl_driver);
+}
+arch_initcall(imx8mp_pinctrl_init);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 6091947..ee440ec 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -31,6 +31,19 @@
 	  Cherryview/Braswell pinctrl driver provides an interface that
 	  allows configuring of SoC pins and using them as GPIOs.
 
+config PINCTRL_LYNXPOINT
+	tristate "Intel Lynxpoint pinctrl and GPIO driver"
+	depends on ACPI
+	select PINMUX
+	select PINCONF
+	select GENERIC_PINCONF
+	select GPIOLIB
+	select GPIOLIB_IRQCHIP
+	help
+	  Lynxpoint is the PCH of Intel Haswell. This pinctrl driver
+	  provides an interface that allows configuring of PCH pins and
+	  using them as GPIOs.
+
 config PINCTRL_MERRIFIELD
 	tristate "Intel Merrifield pinctrl driver"
 	depends on X86_INTEL_MID
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 7e620b4..f60f99c 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -3,6 +3,7 @@
 
 obj-$(CONFIG_PINCTRL_BAYTRAIL)		+= pinctrl-baytrail.o
 obj-$(CONFIG_PINCTRL_CHERRYVIEW)	+= pinctrl-cherryview.o
+obj-$(CONFIG_PINCTRL_LYNXPOINT)		+= pinctrl-lynxpoint.o
 obj-$(CONFIG_PINCTRL_MERRIFIELD)	+= pinctrl-merrifield.o
 obj-$(CONFIG_PINCTRL_INTEL)		+= pinctrl-intel.o
 obj-$(CONFIG_PINCTRL_BROXTON)		+= pinctrl-broxton.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 55141d5..b409642f 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -93,7 +93,7 @@
 #define BYT_DEFAULT_GPIO_MUX	0
 #define BYT_ALTER_GPIO_MUX	1
 
-struct byt_gpio_pin_context {
+struct intel_pad_context {
 	u32 conf0;
 	u32 val;
 };
@@ -105,16 +105,6 @@ struct byt_gpio_pin_context {
 		.pad_map	= (map),\
 	}
 
-struct byt_gpio {
-	struct gpio_chip chip;
-	struct platform_device *pdev;
-	struct pinctrl_dev *pctl_dev;
-	struct pinctrl_desc pctl_desc;
-	const struct intel_pinctrl_soc_data *soc_data;
-	struct intel_community *communities_copy;
-	struct byt_gpio_pin_context *saved_context;
-};
-
 /* SCORE pins, aka GPIOC_<pin_no> or GPIO_S0_SC[<pin_no>] */
 static const struct pinctrl_pin_desc byt_score_pins[] = {
 	PINCTRL_PIN(0, "SATA_GP0"),
@@ -550,14 +540,14 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = {
 
 static DEFINE_RAW_SPINLOCK(byt_lock);
 
-static struct intel_community *byt_get_community(struct byt_gpio *vg,
+static struct intel_community *byt_get_community(struct intel_pinctrl *vg,
 						 unsigned int pin)
 {
 	struct intel_community *comm;
 	int i;
 
-	for (i = 0; i < vg->soc_data->ncommunities; i++) {
-		comm = vg->communities_copy + i;
+	for (i = 0; i < vg->ncommunities; i++) {
+		comm = vg->communities + i;
 		if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
 			return comm;
 	}
@@ -565,7 +555,7 @@ static struct intel_community *byt_get_community(struct byt_gpio *vg,
 	return NULL;
 }
 
-static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
+static void __iomem *byt_gpio_reg(struct intel_pinctrl *vg, unsigned int offset,
 				  int reg)
 {
 	struct intel_community *comm = byt_get_community(vg, offset);
@@ -592,17 +582,17 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
 
 static int byt_get_groups_count(struct pinctrl_dev *pctldev)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
 
-	return vg->soc_data->ngroups;
+	return vg->soc->ngroups;
 }
 
 static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
 				      unsigned int selector)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
 
-	return vg->soc_data->groups[selector].name;
+	return vg->soc->groups[selector].name;
 }
 
 static int byt_get_group_pins(struct pinctrl_dev *pctldev,
@@ -610,10 +600,10 @@ static int byt_get_group_pins(struct pinctrl_dev *pctldev,
 			      const unsigned int **pins,
 			      unsigned int *num_pins)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
 
-	*pins		= vg->soc_data->groups[selector].pins;
-	*num_pins	= vg->soc_data->groups[selector].npins;
+	*pins		= vg->soc->groups[selector].pins;
+	*num_pins	= vg->soc->groups[selector].npins;
 
 	return 0;
 }
@@ -626,17 +616,17 @@ static const struct pinctrl_ops byt_pinctrl_ops = {
 
 static int byt_get_functions_count(struct pinctrl_dev *pctldev)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
 
-	return vg->soc_data->nfunctions;
+	return vg->soc->nfunctions;
 }
 
 static const char *byt_get_function_name(struct pinctrl_dev *pctldev,
 					 unsigned int selector)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
 
-	return vg->soc_data->functions[selector].name;
+	return vg->soc->functions[selector].name;
 }
 
 static int byt_get_function_groups(struct pinctrl_dev *pctldev,
@@ -644,15 +634,15 @@ static int byt_get_function_groups(struct pinctrl_dev *pctldev,
 				   const char * const **groups,
 				   unsigned int *num_groups)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
 
-	*groups		= vg->soc_data->functions[selector].groups;
-	*num_groups	= vg->soc_data->functions[selector].ngroups;
+	*groups		= vg->soc->functions[selector].groups;
+	*num_groups	= vg->soc->functions[selector].ngroups;
 
 	return 0;
 }
 
-static void byt_set_group_simple_mux(struct byt_gpio *vg,
+static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
 				     const struct intel_pingroup group,
 				     unsigned int func)
 {
@@ -667,7 +657,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
 
 		padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
 		if (!padcfg0) {
-			dev_warn(&vg->pdev->dev,
+			dev_warn(vg->dev,
 				 "Group %s, pin %i not muxed (no padcfg0)\n",
 				 group.name, i);
 			continue;
@@ -682,7 +672,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
 	raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
-static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
 				    const struct intel_pingroup group,
 				    const unsigned int *func)
 {
@@ -697,7 +687,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
 
 		padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
 		if (!padcfg0) {
-			dev_warn(&vg->pdev->dev,
+			dev_warn(vg->dev,
 				 "Group %s, pin %i not muxed (no padcfg0)\n",
 				 group.name, i);
 			continue;
@@ -715,9 +705,9 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
 static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
 		       unsigned int group_selector)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
-	const struct intel_function func = vg->soc_data->functions[func_selector];
-	const struct intel_pingroup group = vg->soc_data->groups[group_selector];
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
+	const struct intel_function func = vg->soc->functions[func_selector];
+	const struct intel_pingroup group = vg->soc->groups[group_selector];
 
 	if (group.modes)
 		byt_set_group_mixed_mux(vg, group, group.modes);
@@ -729,22 +719,22 @@ static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
 	return 0;
 }
 
-static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned int offset)
+static u32 byt_get_gpio_mux(struct intel_pinctrl *vg, unsigned int offset)
 {
 	/* SCORE pin 92-93 */
-	if (!strcmp(vg->soc_data->uid, BYT_SCORE_ACPI_UID) &&
+	if (!strcmp(vg->soc->uid, BYT_SCORE_ACPI_UID) &&
 	    offset >= 92 && offset <= 93)
 		return BYT_ALTER_GPIO_MUX;
 
 	/* SUS pin 11-21 */
-	if (!strcmp(vg->soc_data->uid, BYT_SUS_ACPI_UID) &&
+	if (!strcmp(vg->soc->uid, BYT_SUS_ACPI_UID) &&
 	    offset >= 11 && offset <= 21)
 		return BYT_ALTER_GPIO_MUX;
 
 	return BYT_DEFAULT_GPIO_MUX;
 }
 
-static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
+static void byt_gpio_clear_triggering(struct intel_pinctrl *vg, unsigned int offset)
 {
 	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 	unsigned long flags;
@@ -752,7 +742,13 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
 
 	raw_spin_lock_irqsave(&byt_lock, flags);
 	value = readl(reg);
-	value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
+	/* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
+	if (value & BYT_DIRECT_IRQ_EN)
+		/* nothing to do */ ;
+	else
+		value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
 	writel(value, reg);
 	raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
@@ -761,7 +757,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
 				   struct pinctrl_gpio_range *range,
 				   unsigned int offset)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
 	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 	u32 value, gpio_mux;
 	unsigned long flags;
@@ -784,13 +780,12 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
 		value |= gpio_mux;
 		writel(value, reg);
 
-		dev_warn(&vg->pdev->dev, FW_BUG
-			 "pin %u forcibly re-configured as GPIO\n", offset);
+		dev_warn(vg->dev, FW_BUG "pin %u forcibly re-configured as GPIO\n", offset);
 	}
 
 	raw_spin_unlock_irqrestore(&byt_lock, flags);
 
-	pm_runtime_get(&vg->pdev->dev);
+	pm_runtime_get(vg->dev);
 
 	return 0;
 }
@@ -799,10 +794,10 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
 				  struct pinctrl_gpio_range *range,
 				  unsigned int offset)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
 
 	byt_gpio_clear_triggering(vg, offset);
-	pm_runtime_put(&vg->pdev->dev);
+	pm_runtime_put(vg->dev);
 }
 
 static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
@@ -810,7 +805,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
 				  unsigned int offset,
 				  bool input)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
 	void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
 	void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 	unsigned long flags;
@@ -822,15 +817,15 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
 	value &= ~BYT_DIR_MASK;
 	if (input)
 		value |= BYT_OUTPUT_EN;
-	else
+	else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
 		/*
 		 * Before making any direction modifications, do a check if gpio
 		 * is set for direct IRQ.  On baytrail, setting GPIO to output
-		 * does not make sense, so let's at least warn the caller before
+		 * does not make sense, so let's at least inform the caller before
 		 * they shoot themselves in the foot.
 		 */
-		WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
-		     "Potential Error: Setting GPIO with direct_irq_en to output");
+		dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+
 	writel(value, val_reg);
 
 	raw_spin_unlock_irqrestore(&byt_lock, flags);
@@ -893,7 +888,7 @@ static int byt_set_pull_strength(u32 *reg, u16 strength)
 static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
 			      unsigned long *config)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
 	enum pin_config_param param = pinconf_to_config_param(*config);
 	void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 	void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -978,7 +973,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 			      unsigned long *configs,
 			      unsigned int num_configs)
 {
-	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
 	unsigned int param, arg;
 	void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 	void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -1012,7 +1007,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 			if (val & BYT_INPUT_EN) {
 				val &= ~BYT_INPUT_EN;
 				writel(val, val_reg);
-				dev_warn(&vg->pdev->dev,
+				dev_warn(vg->dev,
 					 "pin %u forcibly set to input mode\n",
 					 offset);
 			}
@@ -1034,7 +1029,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 			if (val & BYT_INPUT_EN) {
 				val &= ~BYT_INPUT_EN;
 				writel(val, val_reg);
-				dev_warn(&vg->pdev->dev,
+				dev_warn(vg->dev,
 					 "pin %u forcibly set to input mode\n",
 					 offset);
 			}
@@ -1115,7 +1110,7 @@ static const struct pinctrl_desc byt_pinctrl_desc = {
 
 static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
 {
-	struct byt_gpio *vg = gpiochip_get_data(chip);
+	struct intel_pinctrl *vg = gpiochip_get_data(chip);
 	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
 	unsigned long flags;
 	u32 val;
@@ -1129,7 +1124,7 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
 
 static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
 {
-	struct byt_gpio *vg = gpiochip_get_data(chip);
+	struct intel_pinctrl *vg = gpiochip_get_data(chip);
 	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
 	unsigned long flags;
 	u32 old_val;
@@ -1148,7 +1143,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
 
 static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 {
-	struct byt_gpio *vg = gpiochip_get_data(chip);
+	struct intel_pinctrl *vg = gpiochip_get_data(chip);
 	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
 	unsigned long flags;
 	u32 value;
@@ -1161,9 +1156,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 	raw_spin_unlock_irqrestore(&byt_lock, flags);
 
 	if (!(value & BYT_OUTPUT_EN))
-		return 0;
+		return GPIO_LINE_DIRECTION_OUT;
 	if (!(value & BYT_INPUT_EN))
-		return 1;
+		return GPIO_LINE_DIRECTION_IN;
 
 	return -EINVAL;
 }
@@ -1188,11 +1183,11 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
 
 static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 {
-	struct byt_gpio *vg = gpiochip_get_data(chip);
+	struct intel_pinctrl *vg = gpiochip_get_data(chip);
 	int i;
 	u32 conf0, val;
 
-	for (i = 0; i < vg->soc_data->npins; i++) {
+	for (i = 0; i < vg->soc->npins; i++) {
 		const struct intel_community *comm;
 		const char *pull_str = NULL;
 		const char *pull = NULL;
@@ -1202,7 +1197,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 		unsigned int pin;
 
 		raw_spin_lock_irqsave(&byt_lock, flags);
-		pin = vg->soc_data->pins[i].number;
+		pin = vg->soc->pins[i].number;
 		reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
 		if (!reg) {
 			seq_printf(s,
@@ -1297,7 +1292,7 @@ static const struct gpio_chip byt_gpio_chip = {
 static void byt_irq_ack(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct byt_gpio *vg = gpiochip_get_data(gc);
+	struct intel_pinctrl *vg = gpiochip_get_data(gc);
 	unsigned int offset = irqd_to_hwirq(d);
 	void __iomem *reg;
 
@@ -1313,7 +1308,7 @@ static void byt_irq_ack(struct irq_data *d)
 static void byt_irq_mask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct byt_gpio *vg = gpiochip_get_data(gc);
+	struct intel_pinctrl *vg = gpiochip_get_data(gc);
 
 	byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
 }
@@ -1321,7 +1316,7 @@ static void byt_irq_mask(struct irq_data *d)
 static void byt_irq_unmask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct byt_gpio *vg = gpiochip_get_data(gc);
+	struct intel_pinctrl *vg = gpiochip_get_data(gc);
 	unsigned int offset = irqd_to_hwirq(d);
 	unsigned long flags;
 	void __iomem *reg;
@@ -1359,7 +1354,7 @@ static void byt_irq_unmask(struct irq_data *d)
 
 static int byt_irq_type(struct irq_data *d, unsigned int type)
 {
-	struct byt_gpio *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+	struct intel_pinctrl *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
 	u32 offset = irqd_to_hwirq(d);
 	u32 value;
 	unsigned long flags;
@@ -1395,20 +1390,10 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
 	return 0;
 }
 
-static struct irq_chip byt_irqchip = {
-	.name		= "BYT-GPIO",
-	.irq_ack	= byt_irq_ack,
-	.irq_mask	= byt_irq_mask,
-	.irq_unmask	= byt_irq_unmask,
-	.irq_set_type	= byt_irq_type,
-	.flags		= IRQCHIP_SKIP_SET_WAKE,
-};
-
 static void byt_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct byt_gpio *vg = gpiochip_get_data(
-				irq_desc_get_handler_data(desc));
+	struct intel_pinctrl *vg = gpiochip_get_data(irq_desc_get_handler_data(desc));
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	u32 base, pin;
 	void __iomem *reg;
@@ -1420,7 +1405,7 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
 		reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
 
 		if (!reg) {
-			dev_warn(&vg->pdev->dev,
+			dev_warn(vg->dev,
 				 "Pin %i: could not retrieve interrupt status register\n",
 				 base);
 			continue;
@@ -1441,22 +1426,9 @@ static void byt_init_irq_valid_mask(struct gpio_chip *chip,
 				    unsigned long *valid_mask,
 				    unsigned int ngpios)
 {
-	/*
-	 * FIXME: currently the valid_mask is filled in as part of
-	 * initializing the irq_chip below in byt_gpio_irq_init_hw().
-	 * when converting this driver to the new way of passing the
-	 * gpio_irq_chip along when adding the gpio_chip, move the
-	 * mask initialization into this callback instead. Right now
-	 * this callback is here to make sure the mask gets allocated.
-	 */
-}
-
-static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
-{
-	struct byt_gpio *vg = gpiochip_get_data(chip);
-	struct device *dev = &vg->pdev->dev;
+	struct intel_pinctrl *vg = gpiochip_get_data(chip);
 	void __iomem *reg;
-	u32 base, value;
+	u32 value;
 	int i;
 
 	/*
@@ -1464,12 +1436,12 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
 	 * do not use direct IRQ mode. This will prevent spurious
 	 * interrupts from misconfigured pins.
 	 */
-	for (i = 0; i < vg->soc_data->npins; i++) {
-		unsigned int pin = vg->soc_data->pins[i].number;
+	for (i = 0; i < vg->soc->npins; i++) {
+		unsigned int pin = vg->soc->pins[i].number;
 
 		reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
 		if (!reg) {
-			dev_warn(&vg->pdev->dev,
+			dev_warn(vg->dev,
 				 "Pin %i: could not retrieve conf0 register\n",
 				 i);
 			continue;
@@ -1477,20 +1449,27 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
 
 		value = readl(reg);
 		if (value & BYT_DIRECT_IRQ_EN) {
-			clear_bit(i, chip->irq.valid_mask);
-			dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+			clear_bit(i, valid_mask);
+			dev_dbg(vg->dev, "excluding GPIO %d from IRQ domain\n", i);
 		} else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
 			byt_gpio_clear_triggering(vg, i);
-			dev_dbg(dev, "disabling GPIO %d\n", i);
+			dev_dbg(vg->dev, "disabling GPIO %d\n", i);
 		}
 	}
+}
+
+static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
+{
+	struct intel_pinctrl *vg = gpiochip_get_data(chip);
+	void __iomem *reg;
+	u32 base, value;
 
 	/* clear interrupt status trigger registers */
-	for (base = 0; base < vg->soc_data->npins; base += 32) {
+	for (base = 0; base < vg->soc->npins; base += 32) {
 		reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
 
 		if (!reg) {
-			dev_warn(&vg->pdev->dev,
+			dev_warn(vg->dev,
 				 "Pin %i: could not retrieve irq status reg\n",
 				 base);
 			continue;
@@ -1501,7 +1480,7 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
 		   might be misconfigured in bios */
 		value = readl(reg);
 		if (value)
-			dev_err(&vg->pdev->dev,
+			dev_err(vg->dev,
 				"GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n",
 				base / 32, value);
 	}
@@ -1511,19 +1490,20 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
 
 static int byt_gpio_add_pin_ranges(struct gpio_chip *chip)
 {
-	struct byt_gpio *vg = gpiochip_get_data(chip);
-	struct device *dev = &vg->pdev->dev;
+	struct intel_pinctrl *vg = gpiochip_get_data(chip);
+	struct device *dev = vg->dev;
 	int ret;
 
-	ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc_data->npins);
+	ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc->npins);
 	if (ret)
 		dev_err(dev, "failed to add GPIO pin range\n");
 
 	return ret;
 }
 
-static int byt_gpio_probe(struct byt_gpio *vg)
+static int byt_gpio_probe(struct intel_pinctrl *vg)
 {
+	struct platform_device *pdev = to_platform_device(vg->dev);
 	struct gpio_chip *gc;
 	struct resource *irq_rc;
 	int ret;
@@ -1531,32 +1511,39 @@ static int byt_gpio_probe(struct byt_gpio *vg)
 	/* Set up gpio chip */
 	vg->chip	= byt_gpio_chip;
 	gc		= &vg->chip;
-	gc->label	= dev_name(&vg->pdev->dev);
+	gc->label	= dev_name(vg->dev);
 	gc->base	= -1;
 	gc->can_sleep	= false;
 	gc->add_pin_ranges = byt_gpio_add_pin_ranges;
-	gc->parent	= &vg->pdev->dev;
-	gc->ngpio	= vg->soc_data->npins;
-	gc->irq.init_valid_mask	= byt_init_irq_valid_mask;
+	gc->parent	= vg->dev;
+	gc->ngpio	= vg->soc->npins;
 
 #ifdef CONFIG_PM_SLEEP
-	vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
-				       sizeof(*vg->saved_context), GFP_KERNEL);
-	if (!vg->saved_context)
+	vg->context.pads = devm_kcalloc(vg->dev, gc->ngpio, sizeof(*vg->context.pads),
+					GFP_KERNEL);
+	if (!vg->context.pads)
 		return -ENOMEM;
 #endif
 
 	/* set up interrupts  */
-	irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0);
+	irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (irq_rc && irq_rc->start) {
 		struct gpio_irq_chip *girq;
 
+		vg->irqchip.name = "BYT-GPIO",
+		vg->irqchip.irq_ack = byt_irq_ack,
+		vg->irqchip.irq_mask = byt_irq_mask,
+		vg->irqchip.irq_unmask = byt_irq_unmask,
+		vg->irqchip.irq_set_type = byt_irq_type,
+		vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE,
+
 		girq = &gc->irq;
-		girq->chip = &byt_irqchip;
+		girq->chip = &vg->irqchip;
 		girq->init_hw = byt_gpio_irq_init_hw;
+		girq->init_valid_mask = byt_init_irq_valid_mask;
 		girq->parent_handler = byt_gpio_irq_handler;
 		girq->num_parents = 1;
-		girq->parents = devm_kcalloc(&vg->pdev->dev, girq->num_parents,
+		girq->parents = devm_kcalloc(vg->dev, girq->num_parents,
 					     sizeof(*girq->parents), GFP_KERNEL);
 		if (!girq->parents)
 			return -ENOMEM;
@@ -1565,34 +1552,35 @@ static int byt_gpio_probe(struct byt_gpio *vg)
 		girq->handler = handle_bad_irq;
 	}
 
-	ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
+	ret = devm_gpiochip_add_data(vg->dev, gc, vg);
 	if (ret) {
-		dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
+		dev_err(vg->dev, "failed adding byt-gpio chip\n");
 		return ret;
 	}
 
 	return ret;
 }
 
-static int byt_set_soc_data(struct byt_gpio *vg,
-			    const struct intel_pinctrl_soc_data *soc_data)
+static int byt_set_soc_data(struct intel_pinctrl *vg,
+			    const struct intel_pinctrl_soc_data *soc)
 {
+	struct platform_device *pdev = to_platform_device(vg->dev);
 	int i;
 
-	vg->soc_data = soc_data;
-	vg->communities_copy = devm_kcalloc(&vg->pdev->dev,
-					    soc_data->ncommunities,
-					    sizeof(*vg->communities_copy),
-					    GFP_KERNEL);
-	if (!vg->communities_copy)
+	vg->soc = soc;
+
+	vg->ncommunities = vg->soc->ncommunities;
+	vg->communities = devm_kcalloc(vg->dev, vg->ncommunities,
+				       sizeof(*vg->communities), GFP_KERNEL);
+	if (!vg->communities)
 		return -ENOMEM;
 
-	for (i = 0; i < soc_data->ncommunities; i++) {
-		struct intel_community *comm = vg->communities_copy + i;
+	for (i = 0; i < vg->soc->ncommunities; i++) {
+		struct intel_community *comm = vg->communities + i;
 
-		*comm = vg->soc_data->communities[i];
+		*comm = vg->soc->communities[i];
 
-		comm->pad_regs = devm_platform_ioremap_resource(vg->pdev, 0);
+		comm->pad_regs = devm_platform_ioremap_resource(pdev, 0);
 		if (IS_ERR(comm->pad_regs))
 			return PTR_ERR(comm->pad_regs);
 	}
@@ -1610,15 +1598,16 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
 {
 	const struct intel_pinctrl_soc_data *soc_data = NULL;
 	const struct intel_pinctrl_soc_data **soc_table;
+	struct device *dev = &pdev->dev;
 	struct acpi_device *acpi_dev;
-	struct byt_gpio *vg;
+	struct intel_pinctrl *vg;
 	int i, ret;
 
-	acpi_dev = ACPI_COMPANION(&pdev->dev);
+	acpi_dev = ACPI_COMPANION(dev);
 	if (!acpi_dev)
 		return -ENODEV;
 
-	soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(&pdev->dev);
+	soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(dev);
 
 	for (i = 0; soc_table[i]; i++) {
 		if (!strcmp(acpi_dev->pnp.unique_id, soc_table[i]->uid)) {
@@ -1630,26 +1619,26 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
 	if (!soc_data)
 		return -ENODEV;
 
-	vg = devm_kzalloc(&pdev->dev, sizeof(*vg), GFP_KERNEL);
+	vg = devm_kzalloc(dev, sizeof(*vg), GFP_KERNEL);
 	if (!vg)
 		return -ENOMEM;
 
-	vg->pdev = pdev;
+	vg->dev = dev;
 	ret = byt_set_soc_data(vg, soc_data);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to set soc data\n");
+		dev_err(dev, "failed to set soc data\n");
 		return ret;
 	}
 
-	vg->pctl_desc		= byt_pinctrl_desc;
-	vg->pctl_desc.name	= dev_name(&pdev->dev);
-	vg->pctl_desc.pins	= vg->soc_data->pins;
-	vg->pctl_desc.npins	= vg->soc_data->npins;
+	vg->pctldesc		= byt_pinctrl_desc;
+	vg->pctldesc.name	= dev_name(dev);
+	vg->pctldesc.pins	= vg->soc->pins;
+	vg->pctldesc.npins	= vg->soc->npins;
 
-	vg->pctl_dev = devm_pinctrl_register(&pdev->dev, &vg->pctl_desc, vg);
-	if (IS_ERR(vg->pctl_dev)) {
-		dev_err(&pdev->dev, "failed to register pinctrl driver\n");
-		return PTR_ERR(vg->pctl_dev);
+	vg->pctldev = devm_pinctrl_register(dev, &vg->pctldesc, vg);
+	if (IS_ERR(vg->pctldev)) {
+		dev_err(dev, "failed to register pinctrl driver\n");
+		return PTR_ERR(vg->pctldev);
 	}
 
 	ret = byt_gpio_probe(vg);
@@ -1657,7 +1646,7 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
 		return ret;
 
 	platform_set_drvdata(pdev, vg);
-	pm_runtime_enable(&pdev->dev);
+	pm_runtime_enable(dev);
 
 	return 0;
 }
@@ -1665,30 +1654,30 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int byt_gpio_suspend(struct device *dev)
 {
-	struct byt_gpio *vg = dev_get_drvdata(dev);
+	struct intel_pinctrl *vg = dev_get_drvdata(dev);
 	unsigned long flags;
 	int i;
 
 	raw_spin_lock_irqsave(&byt_lock, flags);
 
-	for (i = 0; i < vg->soc_data->npins; i++) {
+	for (i = 0; i < vg->soc->npins; i++) {
 		void __iomem *reg;
 		u32 value;
-		unsigned int pin = vg->soc_data->pins[i].number;
+		unsigned int pin = vg->soc->pins[i].number;
 
 		reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
 		if (!reg) {
-			dev_warn(&vg->pdev->dev,
+			dev_warn(vg->dev,
 				 "Pin %i: could not retrieve conf0 register\n",
 				 i);
 			continue;
 		}
 		value = readl(reg) & BYT_CONF0_RESTORE_MASK;
-		vg->saved_context[i].conf0 = value;
+		vg->context.pads[i].conf0 = value;
 
 		reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
 		value = readl(reg) & BYT_VAL_RESTORE_MASK;
-		vg->saved_context[i].val = value;
+		vg->context.pads[i].val = value;
 	}
 
 	raw_spin_unlock_irqrestore(&byt_lock, flags);
@@ -1697,29 +1686,29 @@ static int byt_gpio_suspend(struct device *dev)
 
 static int byt_gpio_resume(struct device *dev)
 {
-	struct byt_gpio *vg = dev_get_drvdata(dev);
+	struct intel_pinctrl *vg = dev_get_drvdata(dev);
 	unsigned long flags;
 	int i;
 
 	raw_spin_lock_irqsave(&byt_lock, flags);
 
-	for (i = 0; i < vg->soc_data->npins; i++) {
+	for (i = 0; i < vg->soc->npins; i++) {
 		void __iomem *reg;
 		u32 value;
-		unsigned int pin = vg->soc_data->pins[i].number;
+		unsigned int pin = vg->soc->pins[i].number;
 
 		reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
 		if (!reg) {
-			dev_warn(&vg->pdev->dev,
+			dev_warn(vg->dev,
 				 "Pin %i: could not retrieve conf0 register\n",
 				 i);
 			continue;
 		}
 		value = readl(reg);
 		if ((value & BYT_CONF0_RESTORE_MASK) !=
-		     vg->saved_context[i].conf0) {
+		     vg->context.pads[i].conf0) {
 			value &= ~BYT_CONF0_RESTORE_MASK;
-			value |= vg->saved_context[i].conf0;
+			value |= vg->context.pads[i].conf0;
 			writel(value, reg);
 			dev_info(dev, "restored pin %d conf0 %#08x", i, value);
 		}
@@ -1727,11 +1716,11 @@ static int byt_gpio_resume(struct device *dev)
 		reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
 		value = readl(reg);
 		if ((value & BYT_VAL_RESTORE_MASK) !=
-		     vg->saved_context[i].val) {
+		     vg->context.pads[i].val) {
 			u32 v;
 
 			v = value & ~BYT_VAL_RESTORE_MASK;
-			v |= vg->saved_context[i].val;
+			v |= vg->context.pads[i].val;
 			if (v != value) {
 				writel(v, reg);
 				dev_dbg(dev, "restored pin %d val %#08x\n",
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 60527b9..4c74fdd 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1289,7 +1289,10 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 	direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
 	direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
 
-	return direction != CHV_PADCTRL0_GPIOCFG_GPO;
+	if (direction == CHV_PADCTRL0_GPIOCFG_GPO)
+		return GPIO_LINE_DIRECTION_OUT;
+
+	return GPIO_LINE_DIRECTION_IN;
 }
 
 static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 4860bc9..74fdfd2 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -8,8 +8,8 @@
  */
 
 #include <linux/acpi.h>
-#include <linux/interrupt.h>
 #include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
 #include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -85,39 +85,6 @@ struct intel_community_context {
 	u32 *hostown;
 };
 
-struct intel_pinctrl_context {
-	struct intel_pad_context *pads;
-	struct intel_community_context *communities;
-};
-
-/**
- * struct intel_pinctrl - Intel pinctrl private structure
- * @dev: Pointer to the device structure
- * @lock: Lock to serialize register access
- * @pctldesc: Pin controller description
- * @pctldev: Pointer to the pin controller device
- * @chip: GPIO chip in this pin controller
- * @irqchip: IRQ chip in this pin controller
- * @soc: SoC/PCH specific pin configuration data
- * @communities: All communities in this pin controller
- * @ncommunities: Number of communities in this pin controller
- * @context: Configuration saved over system sleep
- * @irq: pinctrl/GPIO chip irq number
- */
-struct intel_pinctrl {
-	struct device *dev;
-	raw_spinlock_t lock;
-	struct pinctrl_desc pctldesc;
-	struct pinctrl_dev *pctldev;
-	struct gpio_chip chip;
-	struct irq_chip irqchip;
-	const struct intel_pinctrl_soc_data *soc;
-	struct intel_community *communities;
-	size_t ncommunities;
-	struct intel_pinctrl_context context;
-	int irq;
-};
-
 #define pin_to_padno(c, p)	((p) - (c)->pin_base)
 #define padgroup_offset(g, p)	((p) - (g)->base)
 
@@ -944,7 +911,10 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 	if (padcfg0 & PADCFG0_PMODE_MASK)
 		return -EINVAL;
 
-	return !!(padcfg0 & PADCFG0_GPIOTXDIS);
+	if (padcfg0 & PADCFG0_GPIOTXDIS)
+		return GPIO_LINE_DIRECTION_IN;
+
+	return GPIO_LINE_DIRECTION_OUT;
 }
 
 static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -1160,8 +1130,8 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
 	return ret;
 }
 
-static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
-				     const struct intel_community *community)
+static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
+				const struct intel_community *community)
 {
 	int ret = 0, i;
 
@@ -1181,6 +1151,24 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
 	return ret;
 }
 
+static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
+{
+	struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+	int ret, i;
+
+	for (i = 0; i < pctrl->ncommunities; i++) {
+		struct intel_community *community = &pctrl->communities[i];
+
+		ret = intel_gpio_add_community_ranges(pctrl, community);
+		if (ret) {
+			dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
 {
 	const struct intel_community *community;
@@ -1205,7 +1193,8 @@ static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
 
 static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
 {
-	int ret, i;
+	int ret;
+	struct gpio_irq_chip *girq;
 
 	pctrl->chip = intel_gpio_chip;
 
@@ -1214,6 +1203,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
 	pctrl->chip.label = dev_name(pctrl->dev);
 	pctrl->chip.parent = pctrl->dev;
 	pctrl->chip.base = -1;
+	pctrl->chip.add_pin_ranges = intel_gpio_add_pin_ranges;
 	pctrl->irq = irq;
 
 	/* Setup IRQ chip */
@@ -1225,26 +1215,9 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
 	pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
 	pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
 
-	ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
-	if (ret) {
-		dev_err(pctrl->dev, "failed to register gpiochip\n");
-		return ret;
-	}
-
-	for (i = 0; i < pctrl->ncommunities; i++) {
-		struct intel_community *community = &pctrl->communities[i];
-
-		ret = intel_gpio_add_pin_ranges(pctrl, community);
-		if (ret) {
-			dev_err(pctrl->dev, "failed to add GPIO pin range\n");
-			return ret;
-		}
-	}
-
 	/*
-	 * We need to request the interrupt here (instead of providing chip
-	 * to the irq directly) because on some platforms several GPIO
-	 * controllers share the same interrupt line.
+	 * On some platforms several GPIO controllers share the same interrupt
+	 * line.
 	 */
 	ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq,
 			       IRQF_SHARED | IRQF_NO_THREAD,
@@ -1254,14 +1227,20 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
 		return ret;
 	}
 
-	ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
-				   handle_bad_irq, IRQ_TYPE_NONE);
+	girq = &pctrl->chip.irq;
+	girq->chip = &pctrl->irqchip;
+	/* This will let us handle the IRQ in the driver */
+	girq->parent_handler = NULL;
+	girq->num_parents = 0;
+	girq->default_type = IRQ_TYPE_NONE;
+	girq->handler = handle_bad_irq;
+
+	ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
 	if (ret) {
-		dev_err(pctrl->dev, "failed to add irqchip\n");
+		dev_err(pctrl->dev, "failed to register gpiochip\n");
 		return ret;
 	}
 
-	gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 34b38a3..c6f066f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -10,7 +10,10 @@
 #ifndef PINCTRL_INTEL_H
 #define PINCTRL_INTEL_H
 
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
 #include <linux/pm.h>
+#include <linux/spinlock_types.h>
 
 struct pinctrl_pin_desc;
 struct platform_device;
@@ -174,6 +177,47 @@ struct intel_pinctrl_soc_data {
 	size_t ncommunities;
 };
 
+struct intel_pad_context;
+struct intel_community_context;
+
+/**
+ * struct intel_pinctrl_context - context to be saved during suspend-resume
+ * @pads: Opaque context per pad (driver dependent)
+ * @communities: Opaque context per community (driver dependent)
+ */
+struct intel_pinctrl_context {
+	struct intel_pad_context *pads;
+	struct intel_community_context *communities;
+};
+
+/**
+ * struct intel_pinctrl - Intel pinctrl private structure
+ * @dev: Pointer to the device structure
+ * @lock: Lock to serialize register access
+ * @pctldesc: Pin controller description
+ * @pctldev: Pointer to the pin controller device
+ * @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
+ * @soc: SoC/PCH specific pin configuration data
+ * @communities: All communities in this pin controller
+ * @ncommunities: Number of communities in this pin controller
+ * @context: Configuration saved over system sleep
+ * @irq: pinctrl/GPIO chip irq number
+ */
+struct intel_pinctrl {
+	struct device *dev;
+	raw_spinlock_t lock;
+	struct pinctrl_desc pctldesc;
+	struct pinctrl_dev *pctldev;
+	struct gpio_chip chip;
+	struct irq_chip irqchip;
+	const struct intel_pinctrl_soc_data *soc;
+	struct intel_community *communities;
+	size_t ncommunities;
+	struct intel_pinctrl_context context;
+	int irq;
+};
+
 int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
 int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
 
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
new file mode 100644
index 0000000..e928742
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Lynxpoint PCH pinctrl/GPIO driver
+ *
+ * Copyright (c) 2012, 2019, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ *          Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "pinctrl-intel.h"
+
+#define COMMUNITY(p, n)			\
+	{				\
+		.pin_base	= (p),	\
+		.npins		= (n),	\
+	}
+
+static const struct pinctrl_pin_desc lptlp_pins[] = {
+	PINCTRL_PIN(0, "GP0_UART1_RXD"),
+	PINCTRL_PIN(1, "GP1_UART1_TXD"),
+	PINCTRL_PIN(2, "GP2_UART1_RTSB"),
+	PINCTRL_PIN(3, "GP3_UART1_CTSB"),
+	PINCTRL_PIN(4, "GP4_I2C0_SDA"),
+	PINCTRL_PIN(5, "GP5_I2C0_SCL"),
+	PINCTRL_PIN(6, "GP6_I2C1_SDA"),
+	PINCTRL_PIN(7, "GP7_I2C1_SCL"),
+	PINCTRL_PIN(8, "GP8"),
+	PINCTRL_PIN(9, "GP9"),
+	PINCTRL_PIN(10, "GP10"),
+	PINCTRL_PIN(11, "GP11_SMBALERTB"),
+	PINCTRL_PIN(12, "GP12_LANPHYPC"),
+	PINCTRL_PIN(13, "GP13"),
+	PINCTRL_PIN(14, "GP14"),
+	PINCTRL_PIN(15, "GP15"),
+	PINCTRL_PIN(16, "GP16_MGPIO9"),
+	PINCTRL_PIN(17, "GP17_MGPIO10"),
+	PINCTRL_PIN(18, "GP18_SRC0CLKRQB"),
+	PINCTRL_PIN(19, "GP19_SRC1CLKRQB"),
+	PINCTRL_PIN(20, "GP20_SRC2CLKRQB"),
+	PINCTRL_PIN(21, "GP21_SRC3CLKRQB"),
+	PINCTRL_PIN(22, "GP22_SRC4CLKRQB_TRST2"),
+	PINCTRL_PIN(23, "GP23_SRC5CLKRQB_TDI2"),
+	PINCTRL_PIN(24, "GP24_MGPIO0"),
+	PINCTRL_PIN(25, "GP25_USBWAKEOUTB"),
+	PINCTRL_PIN(26, "GP26_MGPIO5"),
+	PINCTRL_PIN(27, "GP27_MGPIO6"),
+	PINCTRL_PIN(28, "GP28_MGPIO7"),
+	PINCTRL_PIN(29, "GP29_SLP_WLANB_MGPIO3"),
+	PINCTRL_PIN(30, "GP30_SUSWARNB_SUSPWRDNACK_MGPIO1"),
+	PINCTRL_PIN(31, "GP31_ACPRESENT_MGPIO2"),
+	PINCTRL_PIN(32, "GP32_CLKRUNB"),
+	PINCTRL_PIN(33, "GP33_DEVSLP0"),
+	PINCTRL_PIN(34, "GP34_SATA0XPCIE6L3B_SATA0GP"),
+	PINCTRL_PIN(35, "GP35_SATA1XPCIE6L2B_SATA1GP"),
+	PINCTRL_PIN(36, "GP36_SATA2XPCIE6L1B_SATA2GP"),
+	PINCTRL_PIN(37, "GP37_SATA3XPCIE6L0B_SATA3GP"),
+	PINCTRL_PIN(38, "GP38_DEVSLP1"),
+	PINCTRL_PIN(39, "GP39_DEVSLP2"),
+	PINCTRL_PIN(40, "GP40_OC0B"),
+	PINCTRL_PIN(41, "GP41_OC1B"),
+	PINCTRL_PIN(42, "GP42_OC2B"),
+	PINCTRL_PIN(43, "GP43_OC3B"),
+	PINCTRL_PIN(44, "GP44"),
+	PINCTRL_PIN(45, "GP45_TMS2"),
+	PINCTRL_PIN(46, "GP46_TDO2"),
+	PINCTRL_PIN(47, "GP47"),
+	PINCTRL_PIN(48, "GP48"),
+	PINCTRL_PIN(49, "GP49"),
+	PINCTRL_PIN(50, "GP50"),
+	PINCTRL_PIN(51, "GP51_GSXDOUT"),
+	PINCTRL_PIN(52, "GP52_GSXSLOAD"),
+	PINCTRL_PIN(53, "GP53_GSXDIN"),
+	PINCTRL_PIN(54, "GP54_GSXSRESETB"),
+	PINCTRL_PIN(55, "GP55_GSXCLK"),
+	PINCTRL_PIN(56, "GP56"),
+	PINCTRL_PIN(57, "GP57"),
+	PINCTRL_PIN(58, "GP58"),
+	PINCTRL_PIN(59, "GP59"),
+	PINCTRL_PIN(60, "GP60_SML0ALERTB_MGPIO4"),
+	PINCTRL_PIN(61, "GP61_SUS_STATB"),
+	PINCTRL_PIN(62, "GP62_SUSCLK"),
+	PINCTRL_PIN(63, "GP63_SLP_S5B"),
+	PINCTRL_PIN(64, "GP64_SDIO_CLK"),
+	PINCTRL_PIN(65, "GP65_SDIO_CMD"),
+	PINCTRL_PIN(66, "GP66_SDIO_D0"),
+	PINCTRL_PIN(67, "GP67_SDIO_D1"),
+	PINCTRL_PIN(68, "GP68_SDIO_D2"),
+	PINCTRL_PIN(69, "GP69_SDIO_D3"),
+	PINCTRL_PIN(70, "GP70_SDIO_POWER_EN"),
+	PINCTRL_PIN(71, "GP71_MPHYPC"),
+	PINCTRL_PIN(72, "GP72_BATLOWB"),
+	PINCTRL_PIN(73, "GP73_SML1ALERTB_PCHHOTB_MGPIO8"),
+	PINCTRL_PIN(74, "GP74_SML1DATA_MGPIO12"),
+	PINCTRL_PIN(75, "GP75_SML1CLK_MGPIO11"),
+	PINCTRL_PIN(76, "GP76_BMBUSYB"),
+	PINCTRL_PIN(77, "GP77_PIRQAB"),
+	PINCTRL_PIN(78, "GP78_PIRQBB"),
+	PINCTRL_PIN(79, "GP79_PIRQCB"),
+	PINCTRL_PIN(80, "GP80_PIRQDB"),
+	PINCTRL_PIN(81, "GP81_SPKR"),
+	PINCTRL_PIN(82, "GP82_RCINB"),
+	PINCTRL_PIN(83, "GP83_GSPI0_CSB"),
+	PINCTRL_PIN(84, "GP84_GSPI0_CLK"),
+	PINCTRL_PIN(85, "GP85_GSPI0_MISO"),
+	PINCTRL_PIN(86, "GP86_GSPI0_MOSI"),
+	PINCTRL_PIN(87, "GP87_GSPI1_CSB"),
+	PINCTRL_PIN(88, "GP88_GSPI1_CLK"),
+	PINCTRL_PIN(89, "GP89_GSPI1_MISO"),
+	PINCTRL_PIN(90, "GP90_GSPI1_MOSI"),
+	PINCTRL_PIN(91, "GP91_UART0_RXD"),
+	PINCTRL_PIN(92, "GP92_UART0_TXD"),
+	PINCTRL_PIN(93, "GP93_UART0_RTSB"),
+	PINCTRL_PIN(94, "GP94_UART0_CTSB"),
+};
+
+static const struct intel_community lptlp_communities[] = {
+	COMMUNITY(0, 95),
+};
+
+static const struct intel_pinctrl_soc_data lptlp_soc_data = {
+	.pins		= lptlp_pins,
+	.npins		= ARRAY_SIZE(lptlp_pins),
+	.communities	= lptlp_communities,
+	.ncommunities	= ARRAY_SIZE(lptlp_communities),
+};
+
+/* LynxPoint chipset has support for 95 GPIO pins */
+
+#define LP_NUM_GPIO	95
+
+/* Bitmapped register offsets */
+#define LP_ACPI_OWNED	0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
+#define LP_IRQ2IOXAPIC	0x10 /* Bitmap, set by bios, 1: pin routed to IOxAPIC */
+#define LP_GC		0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
+#define LP_INT_STAT	0x80
+#define LP_INT_ENABLE	0x90
+
+/* Each pin has two 32 bit config registers, starting at 0x100 */
+#define LP_CONFIG1	0x100
+#define LP_CONFIG2	0x104
+
+/* LP_CONFIG1 reg bits */
+#define OUT_LVL_BIT	BIT(31)
+#define IN_LVL_BIT	BIT(30)
+#define TRIG_SEL_BIT	BIT(4) /* 0: Edge, 1: Level */
+#define INT_INV_BIT	BIT(3) /* Invert interrupt triggering */
+#define DIR_BIT		BIT(2) /* 0: Output, 1: Input */
+#define USE_SEL_MASK	GENMASK(1, 0)	/* 0: Native, 1: GPIO, ... */
+#define USE_SEL_NATIVE	(0 << 0)
+#define USE_SEL_GPIO	(1 << 0)
+
+/* LP_CONFIG2 reg bits */
+#define GPINDIS_BIT	BIT(2) /* disable input sensing */
+#define GPIWP_MASK	GENMASK(1, 0)	/* weak pull options */
+#define GPIWP_NONE	0		/* none */
+#define GPIWP_DOWN	1		/* weak pull down */
+#define GPIWP_UP	2		/* weak pull up */
+
+/*
+ * Lynxpoint gpios are controlled through both bitmapped registers and
+ * per gpio specific registers. The bitmapped registers are in chunks of
+ * 3 x 32bit registers to cover all 95 GPIOs
+ *
+ * per gpio specific registers consist of two 32bit registers per gpio
+ * (LP_CONFIG1 and LP_CONFIG2), with 95 GPIOs there's a total of
+ * 190 config registers.
+ *
+ * A simplified view of the register layout look like this:
+ *
+ * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31  (bitmapped registers)
+ * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
+ * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
+ * ...
+ * LP_INT_ENABLE[31:0] ...
+ * LP_INT_ENABLE[63:32] ...
+ * LP_INT_ENABLE[94:64] ...
+ * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
+ * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
+ * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
+ * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
+ * LP2_CONFIG1 (gpio 2) ...
+ * LP2_CONFIG2 (gpio 2) ...
+ * ...
+ * LP94_CONFIG1 (gpio 94) ...
+ * LP94_CONFIG2 (gpio 94) ...
+ *
+ * IOxAPIC redirection map applies only for gpio 8-10, 13-14, 45-55.
+ */
+
+static struct intel_community *lp_get_community(struct intel_pinctrl *lg,
+						unsigned int pin)
+{
+	struct intel_community *comm;
+	int i;
+
+	for (i = 0; i < lg->ncommunities; i++) {
+		comm = &lg->communities[i];
+		if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
+			return comm;
+	}
+
+	return NULL;
+}
+
+static void __iomem *lp_gpio_reg(struct gpio_chip *chip, unsigned int offset,
+				 int reg)
+{
+	struct intel_pinctrl *lg = gpiochip_get_data(chip);
+	struct intel_community *comm;
+	int reg_offset;
+
+	comm = lp_get_community(lg, offset);
+	if (!comm)
+		return NULL;
+
+	offset -= comm->pin_base;
+
+	if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
+		/* per gpio specific config registers */
+		reg_offset = offset * 8;
+	else
+		/* bitmapped registers */
+		reg_offset = (offset / 32) * 4;
+
+	return comm->regs + reg_offset + reg;
+}
+
+static bool lp_gpio_acpi_use(struct intel_pinctrl *lg, unsigned int pin)
+{
+	void __iomem *acpi_use;
+
+	acpi_use = lp_gpio_reg(&lg->chip, pin, LP_ACPI_OWNED);
+	if (!acpi_use)
+		return true;
+
+	return !(ioread32(acpi_use) & BIT(pin % 32));
+}
+
+static bool lp_gpio_ioxapic_use(struct gpio_chip *chip, unsigned int offset)
+{
+	void __iomem *ioxapic_use = lp_gpio_reg(chip, offset, LP_IRQ2IOXAPIC);
+	u32 value;
+
+	value = ioread32(ioxapic_use);
+
+	if (offset >= 8 && offset <= 10)
+		return !!(value & BIT(offset -  8 + 0));
+	if (offset >= 13 && offset <= 14)
+		return !!(value & BIT(offset - 13 + 3));
+	if (offset >= 45 && offset <= 55)
+		return !!(value & BIT(offset - 45 + 5));
+
+	return false;
+}
+
+static int lp_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+	return lg->soc->ngroups;
+}
+
+static const char *lp_get_group_name(struct pinctrl_dev *pctldev,
+				     unsigned int selector)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+	return lg->soc->groups[selector].name;
+}
+
+static int lp_get_group_pins(struct pinctrl_dev *pctldev,
+			     unsigned int selector,
+			     const unsigned int **pins,
+			     unsigned int *num_pins)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins		= lg->soc->groups[selector].pins;
+	*num_pins	= lg->soc->groups[selector].npins;
+
+	return 0;
+}
+
+static void lp_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+			    unsigned int pin)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+	void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+	void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+	u32 value, mode;
+
+	value = ioread32(reg);
+
+	mode = value & USE_SEL_MASK;
+	if (mode == USE_SEL_GPIO)
+		seq_puts(s, "GPIO ");
+	else
+		seq_printf(s, "mode %d ", mode);
+
+	seq_printf(s, "0x%08x 0x%08x", value, ioread32(conf2));
+
+	if (lp_gpio_acpi_use(lg, pin))
+		seq_puts(s, " [ACPI]");
+}
+
+static const struct pinctrl_ops lptlp_pinctrl_ops = {
+	.get_groups_count	= lp_get_groups_count,
+	.get_group_name		= lp_get_group_name,
+	.get_group_pins		= lp_get_group_pins,
+	.pin_dbg_show		= lp_pin_dbg_show,
+};
+
+static int lp_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+	return lg->soc->nfunctions;
+}
+
+static const char *lp_get_function_name(struct pinctrl_dev *pctldev,
+					unsigned int selector)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+	return lg->soc->functions[selector].name;
+}
+
+static int lp_get_function_groups(struct pinctrl_dev *pctldev,
+				  unsigned int selector,
+				  const char * const **groups,
+				  unsigned int *num_groups)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups		= lg->soc->functions[selector].groups;
+	*num_groups	= lg->soc->functions[selector].ngroups;
+
+	return 0;
+}
+
+static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+			     unsigned int function, unsigned int group)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+	const struct intel_pingroup *grp = &lg->soc->groups[group];
+	unsigned long flags;
+	int i;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+
+	/* Now enable the mux setting for each pin in the group */
+	for (i = 0; i < grp->npins; i++) {
+		void __iomem *reg = lp_gpio_reg(&lg->chip, grp->pins[i], LP_CONFIG1);
+		u32 value;
+
+		value = ioread32(reg);
+
+		value &= ~USE_SEL_MASK;
+		if (grp->modes)
+			value |= grp->modes[i];
+		else
+			value |= grp->mode;
+
+		iowrite32(value, reg);
+	}
+
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+	return 0;
+}
+
+static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
+				  struct pinctrl_gpio_range *range,
+				  unsigned int pin)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+	void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+	void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+	unsigned long flags;
+	u32 value;
+
+	pm_runtime_get(lg->dev);
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+
+	/*
+	 * Reconfigure pin to GPIO mode if needed and issue a warning,
+	 * since we expect firmware to configure it properly.
+	 */
+	value = ioread32(reg);
+	if ((value & USE_SEL_MASK) != USE_SEL_GPIO) {
+		iowrite32((value & USE_SEL_MASK) | USE_SEL_GPIO, reg);
+		dev_warn(lg->dev, FW_BUG "pin %u forcibly reconfigured as GPIO\n", pin);
+	}
+
+	/* Enable input sensing */
+	iowrite32(ioread32(conf2) & ~GPINDIS_BIT, conf2);
+
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+	return 0;
+}
+
+static void lp_gpio_disable_free(struct pinctrl_dev *pctldev,
+				 struct pinctrl_gpio_range *range,
+				 unsigned int pin)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+	void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+
+	/* Disable input sensing */
+	iowrite32(ioread32(conf2) | GPINDIS_BIT, conf2);
+
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+	pm_runtime_put(lg->dev);
+}
+
+static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
+				 struct pinctrl_gpio_range *range,
+				 unsigned int pin, bool input)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+	void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+	unsigned long flags;
+	u32 value;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+
+	value = ioread32(reg);
+	value &= ~DIR_BIT;
+	if (input) {
+		value |= DIR_BIT;
+	} else {
+		/*
+		 * Before making any direction modifications, do a check if GPIO
+		 * is set for direct IRQ. On Lynxpoint, setting GPIO to output
+		 * does not make sense, so let's at least warn the caller before
+		 * they shoot themselves in the foot.
+		 */
+		WARN(lp_gpio_ioxapic_use(&lg->chip, pin),
+		     "Potential Error: Setting GPIO to output with IOxAPIC redirection");
+	}
+	iowrite32(value, reg);
+
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+	return 0;
+}
+
+static const struct pinmux_ops lptlp_pinmux_ops = {
+	.get_functions_count	= lp_get_functions_count,
+	.get_function_name	= lp_get_function_name,
+	.get_function_groups	= lp_get_function_groups,
+	.set_mux		= lp_pinmux_set_mux,
+	.gpio_request_enable	= lp_gpio_request_enable,
+	.gpio_disable_free	= lp_gpio_disable_free,
+	.gpio_set_direction	= lp_gpio_set_direction,
+};
+
+static int lp_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+			     unsigned long *config)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+	void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+	enum pin_config_param param = pinconf_to_config_param(*config);
+	unsigned long flags;
+	u32 value, pull;
+	u16 arg = 0;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+	value = ioread32(conf2);
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+	pull = value & GPIWP_MASK;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (pull)
+			return -EINVAL;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (pull != GPIWP_DOWN)
+			return -EINVAL;
+
+		arg = 1;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (pull != GPIWP_UP)
+			return -EINVAL;
+
+		arg = 1;
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+			     unsigned long *configs, unsigned int num_configs)
+{
+	struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+	void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+	enum pin_config_param param;
+	unsigned long flags;
+	int i, ret = 0;
+	u32 value;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+
+	value = ioread32(conf2);
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			value &= ~GPIWP_MASK;
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			value &= ~GPIWP_MASK;
+			value |= GPIWP_DOWN;
+			break;
+		case PIN_CONFIG_BIAS_PULL_UP:
+			value &= ~GPIWP_MASK;
+			value |= GPIWP_UP;
+			break;
+		default:
+			ret = -ENOTSUPP;
+		}
+
+		if (ret)
+			break;
+	}
+
+	if (!ret)
+		iowrite32(value, conf2);
+
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+	return ret;
+}
+
+static const struct pinconf_ops lptlp_pinconf_ops = {
+	.is_generic	= true,
+	.pin_config_get	= lp_pin_config_get,
+	.pin_config_set	= lp_pin_config_set,
+};
+
+static const struct pinctrl_desc lptlp_pinctrl_desc = {
+	.pctlops	= &lptlp_pinctrl_ops,
+	.pmxops		= &lptlp_pinmux_ops,
+	.confops	= &lptlp_pinconf_ops,
+	.owner		= THIS_MODULE,
+};
+
+static int lp_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+	void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+	return !!(ioread32(reg) & IN_LVL_BIT);
+}
+
+static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+	struct intel_pinctrl *lg = gpiochip_get_data(chip);
+	void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+
+	if (value)
+		iowrite32(ioread32(reg) | OUT_LVL_BIT, reg);
+	else
+		iowrite32(ioread32(reg) & ~OUT_LVL_BIT, reg);
+
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+	return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int lp_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+				    int value)
+{
+	lp_gpio_set(chip, offset, value);
+
+	return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int lp_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+	void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+
+	if (ioread32(reg) & DIR_BIT)
+		return GPIO_LINE_DIRECTION_IN;
+
+	return GPIO_LINE_DIRECTION_OUT;
+}
+
+static void lp_gpio_irq_handler(struct irq_desc *desc)
+{
+	struct irq_data *data = irq_desc_get_irq_data(desc);
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct intel_pinctrl *lg = gpiochip_get_data(gc);
+	struct irq_chip *chip = irq_data_get_irq_chip(data);
+	void __iomem *reg, *ena;
+	unsigned long pending;
+	u32 base, pin;
+
+	/* check from GPIO controller which pin triggered the interrupt */
+	for (base = 0; base < lg->chip.ngpio; base += 32) {
+		reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+		ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+
+		/* Only interrupts that are enabled */
+		pending = ioread32(reg) & ioread32(ena);
+
+		for_each_set_bit(pin, &pending, 32) {
+			unsigned int irq;
+
+			irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
+			generic_handle_irq(irq);
+		}
+	}
+	chip->irq_eoi(data);
+}
+
+static void lp_irq_ack(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct intel_pinctrl *lg = gpiochip_get_data(gc);
+	u32 hwirq = irqd_to_hwirq(d);
+	void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_STAT);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+	iowrite32(BIT(hwirq % 32), reg);
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_unmask(struct irq_data *d)
+{
+}
+
+static void lp_irq_mask(struct irq_data *d)
+{
+}
+
+static void lp_irq_enable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct intel_pinctrl *lg = gpiochip_get_data(gc);
+	u32 hwirq = irqd_to_hwirq(d);
+	void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+	iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_disable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct intel_pinctrl *lg = gpiochip_get_data(gc);
+	u32 hwirq = irqd_to_hwirq(d);
+	void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+	iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct intel_pinctrl *lg = gpiochip_get_data(gc);
+	u32 hwirq = irqd_to_hwirq(d);
+	void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
+	unsigned long flags;
+	u32 value;
+
+	if (hwirq >= lg->chip.ngpio)
+		return -EINVAL;
+
+	/* Fail if BIOS reserved pin for ACPI use */
+	if (lp_gpio_acpi_use(lg, hwirq)) {
+		dev_err(lg->dev, "pin %u can't be used as IRQ\n", hwirq);
+		return -EBUSY;
+	}
+
+	raw_spin_lock_irqsave(&lg->lock, flags);
+	value = ioread32(reg);
+
+	/* set both TRIG_SEL and INV bits to 0 for rising edge */
+	if (type & IRQ_TYPE_EDGE_RISING)
+		value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
+
+	/* TRIG_SEL bit 0, INV bit 1 for falling edge */
+	if (type & IRQ_TYPE_EDGE_FALLING)
+		value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
+
+	/* TRIG_SEL bit 1, INV bit 0 for level low */
+	if (type & IRQ_TYPE_LEVEL_LOW)
+		value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
+
+	/* TRIG_SEL bit 1, INV bit 1 for level high */
+	if (type & IRQ_TYPE_LEVEL_HIGH)
+		value |= TRIG_SEL_BIT | INT_INV_BIT;
+
+	iowrite32(value, reg);
+
+	if (type & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(d, handle_edge_irq);
+	else if (type & IRQ_TYPE_LEVEL_MASK)
+		irq_set_handler_locked(d, handle_level_irq);
+
+	raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+	return 0;
+}
+
+static struct irq_chip lp_irqchip = {
+	.name = "LP-GPIO",
+	.irq_ack = lp_irq_ack,
+	.irq_mask = lp_irq_mask,
+	.irq_unmask = lp_irq_unmask,
+	.irq_enable = lp_irq_enable,
+	.irq_disable = lp_irq_disable,
+	.irq_set_type = lp_irq_set_type,
+	.flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
+{
+	struct intel_pinctrl *lg = gpiochip_get_data(chip);
+	void __iomem *reg;
+	unsigned int base;
+
+	for (base = 0; base < lg->chip.ngpio; base += 32) {
+		/* disable gpio pin interrupts */
+		reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+		iowrite32(0, reg);
+		/* Clear interrupt status register */
+		reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+		iowrite32(0xffffffff, reg);
+	}
+
+	return 0;
+}
+
+static int lp_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+	struct intel_pinctrl *lg = gpiochip_get_data(chip);
+	struct device *dev = lg->dev;
+	int ret;
+
+	ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, lg->soc->npins);
+	if (ret)
+		dev_err(dev, "failed to add GPIO pin range\n");
+
+	return ret;
+}
+
+static int lp_gpio_probe(struct platform_device *pdev)
+{
+	const struct intel_pinctrl_soc_data *soc;
+	struct intel_pinctrl *lg;
+	struct gpio_chip *gc;
+	struct resource *io_rc, *irq_rc;
+	struct device *dev = &pdev->dev;
+	void __iomem *regs;
+	unsigned int i;
+	int ret;
+
+	soc = (const struct intel_pinctrl_soc_data *)device_get_match_data(dev);
+	if (!soc)
+		return -ENODEV;
+
+	lg = devm_kzalloc(dev, sizeof(*lg), GFP_KERNEL);
+	if (!lg)
+		return -ENOMEM;
+
+	lg->dev = dev;
+	lg->soc = soc;
+
+	lg->ncommunities = lg->soc->ncommunities;
+	lg->communities = devm_kcalloc(dev, lg->ncommunities,
+				       sizeof(*lg->communities), GFP_KERNEL);
+	if (!lg->communities)
+		return -ENOMEM;
+
+	lg->pctldesc           = lptlp_pinctrl_desc;
+	lg->pctldesc.name      = dev_name(dev);
+	lg->pctldesc.pins      = lg->soc->pins;
+	lg->pctldesc.npins     = lg->soc->npins;
+
+	lg->pctldev = devm_pinctrl_register(dev, &lg->pctldesc, lg);
+	if (IS_ERR(lg->pctldev)) {
+		dev_err(dev, "failed to register pinctrl driver\n");
+		return PTR_ERR(lg->pctldev);
+	}
+
+	platform_set_drvdata(pdev, lg);
+
+	io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!io_rc) {
+		dev_err(dev, "missing IO resources\n");
+		return -EINVAL;
+	}
+
+	regs = devm_ioport_map(dev, io_rc->start, resource_size(io_rc));
+	if (!regs) {
+		dev_err(dev, "failed mapping IO region %pR\n", &io_rc);
+		return -EBUSY;
+	}
+
+	for (i = 0; i < lg->soc->ncommunities; i++) {
+		struct intel_community *comm = &lg->communities[i];
+
+		*comm = lg->soc->communities[i];
+
+		comm->regs = regs;
+		comm->pad_regs = regs + 0x100;
+	}
+
+	raw_spin_lock_init(&lg->lock);
+
+	gc = &lg->chip;
+	gc->label = dev_name(dev);
+	gc->owner = THIS_MODULE;
+	gc->request = gpiochip_generic_request;
+	gc->free = gpiochip_generic_free;
+	gc->direction_input = lp_gpio_direction_input;
+	gc->direction_output = lp_gpio_direction_output;
+	gc->get = lp_gpio_get;
+	gc->set = lp_gpio_set;
+	gc->get_direction = lp_gpio_get_direction;
+	gc->base = -1;
+	gc->ngpio = LP_NUM_GPIO;
+	gc->can_sleep = false;
+	gc->add_pin_ranges = lp_gpio_add_pin_ranges;
+	gc->parent = dev;
+
+	/* set up interrupts  */
+	irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (irq_rc && irq_rc->start) {
+		struct gpio_irq_chip *girq;
+
+		girq = &gc->irq;
+		girq->chip = &lp_irqchip;
+		girq->init_hw = lp_gpio_irq_init_hw;
+		girq->parent_handler = lp_gpio_irq_handler;
+		girq->num_parents = 1;
+		girq->parents = devm_kcalloc(dev, girq->num_parents,
+					     sizeof(*girq->parents),
+					     GFP_KERNEL);
+		if (!girq->parents)
+			return -ENOMEM;
+		girq->parents[0] = (unsigned int)irq_rc->start;
+		girq->default_type = IRQ_TYPE_NONE;
+		girq->handler = handle_bad_irq;
+	}
+
+	ret = devm_gpiochip_add_data(dev, gc, lg);
+	if (ret) {
+		dev_err(dev, "failed adding lp-gpio chip\n");
+		return ret;
+	}
+
+	pm_runtime_enable(dev);
+
+	return 0;
+}
+
+static int lp_gpio_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static int lp_gpio_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int lp_gpio_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int lp_gpio_resume(struct device *dev)
+{
+	struct intel_pinctrl *lg = dev_get_drvdata(dev);
+	void __iomem *reg;
+	int i;
+
+	/* on some hardware suspend clears input sensing, re-enable it here */
+	for (i = 0; i < lg->chip.ngpio; i++) {
+		if (gpiochip_is_requested(&lg->chip, i) != NULL) {
+			reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
+			iowrite32(ioread32(reg) & ~GPINDIS_BIT, reg);
+		}
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops lp_gpio_pm_ops = {
+	.runtime_suspend = lp_gpio_runtime_suspend,
+	.runtime_resume = lp_gpio_runtime_resume,
+	.resume = lp_gpio_resume,
+};
+
+static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
+	{ "INT33C7", (kernel_ulong_t)&lptlp_soc_data },
+	{ "INT3437", (kernel_ulong_t)&lptlp_soc_data },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
+
+static struct platform_driver lp_gpio_driver = {
+	.probe          = lp_gpio_probe,
+	.remove         = lp_gpio_remove,
+	.driver         = {
+		.name   = "lp_gpio",
+		.pm	= &lp_gpio_pm_ops,
+		.acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
+	},
+};
+
+static int __init lp_gpio_init(void)
+{
+	return platform_driver_register(&lp_gpio_driver);
+}
+
+static void __exit lp_gpio_exit(void)
+{
+	platform_driver_unregister(&lp_gpio_driver);
+}
+
+subsys_initcall(lp_gpio_init);
+module_exit(lp_gpio_exit);
+
+MODULE_AUTHOR("Mathias Nyman (Intel)");
+MODULE_AUTHOR("Andy Shevchenko (Intel)");
+MODULE_DESCRIPTION("Intel Lynxpoint pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index d936e7a..330c8f0 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -589,6 +589,7 @@ static const struct intel_pinctrl_soc_data spth_soc_data = {
 
 static const struct acpi_device_id spt_pinctrl_acpi_match[] = {
 	{ "INT344B", (kernel_ulong_t)&sptlp_soc_data },
+	{ "INT3451", (kernel_ulong_t)&spth_soc_data },
 	{ "INT345D", (kernel_ulong_t)&spth_soc_data },
 	{ }
 };
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 58572b1..08a86f6 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -2,7 +2,7 @@
 /*
  * Intel Tiger Lake PCH pinctrl/GPIO driver
  *
- * Copyright (C) 2019, Intel Corporation
+ * Copyright (C) 2019 - 2020, Intel Corporation
  * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
  */
@@ -21,15 +21,19 @@
 #define TGL_GPI_IS	0x100
 #define TGL_GPI_IE	0x120
 
-#define TGL_GPP(r, s, e)				\
+#define TGL_NO_GPIO	-1
+
+#define TGL_GPP(r, s, e, g)				\
 	{						\
 		.reg_num = (r),				\
 		.base = (s),				\
 		.size = ((e) - (s) + 1),		\
+		.gpio_base = (g),			\
 	}
 
-#define TGL_COMMUNITY(s, e, g)				\
+#define TGL_COMMUNITY(b, s, e, g)			\
 	{						\
+		.barno = (b),				\
 		.padown_offset = TGL_PAD_OWN,		\
 		.padcfglock_offset = TGL_PADCFGLOCK,	\
 		.hostown_offset = TGL_HOSTSW_OWN,	\
@@ -42,7 +46,7 @@
 	}
 
 /* Tiger Lake-LP */
-static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+static const struct pinctrl_pin_desc tgllp_pins[] = {
 	/* GPP_B */
 	PINCTRL_PIN(0, "CORE_VID_0"),
 	PINCTRL_PIN(1, "CORE_VID_1"),
@@ -113,324 +117,273 @@ static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
 	PINCTRL_PIN(64, "GPPC_A_22"),
 	PINCTRL_PIN(65, "I2S1_SCLK"),
 	PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+	/* GPP_S */
+	PINCTRL_PIN(67, "SNDW0_CLK"),
+	PINCTRL_PIN(68, "SNDW0_DATA"),
+	PINCTRL_PIN(69, "SNDW1_CLK"),
+	PINCTRL_PIN(70, "SNDW1_DATA"),
+	PINCTRL_PIN(71, "SNDW2_CLK"),
+	PINCTRL_PIN(72, "SNDW2_DATA"),
+	PINCTRL_PIN(73, "SNDW3_CLK"),
+	PINCTRL_PIN(74, "SNDW3_DATA"),
+	/* GPP_H */
+	PINCTRL_PIN(75, "GPPC_H_0"),
+	PINCTRL_PIN(76, "GPPC_H_1"),
+	PINCTRL_PIN(77, "GPPC_H_2"),
+	PINCTRL_PIN(78, "SX_EXIT_HOLDOFFB"),
+	PINCTRL_PIN(79, "I2C2_SDA"),
+	PINCTRL_PIN(80, "I2C2_SCL"),
+	PINCTRL_PIN(81, "I2C3_SDA"),
+	PINCTRL_PIN(82, "I2C3_SCL"),
+	PINCTRL_PIN(83, "I2C4_SDA"),
+	PINCTRL_PIN(84, "I2C4_SCL"),
+	PINCTRL_PIN(85, "SRCCLKREQB_4"),
+	PINCTRL_PIN(86, "SRCCLKREQB_5"),
+	PINCTRL_PIN(87, "M2_SKT2_CFG_0"),
+	PINCTRL_PIN(88, "M2_SKT2_CFG_1"),
+	PINCTRL_PIN(89, "M2_SKT2_CFG_2"),
+	PINCTRL_PIN(90, "M2_SKT2_CFG_3"),
+	PINCTRL_PIN(91, "DDPB_CTRLCLK"),
+	PINCTRL_PIN(92, "DDPB_CTRLDATA"),
+	PINCTRL_PIN(93, "CPU_C10_GATEB"),
+	PINCTRL_PIN(94, "TIME_SYNC_0"),
+	PINCTRL_PIN(95, "IMGCLKOUT_1"),
+	PINCTRL_PIN(96, "IMGCLKOUT_2"),
+	PINCTRL_PIN(97, "IMGCLKOUT_3"),
+	PINCTRL_PIN(98, "IMGCLKOUT_4"),
+	/* GPP_D */
+	PINCTRL_PIN(99, "ISH_GP_0"),
+	PINCTRL_PIN(100, "ISH_GP_1"),
+	PINCTRL_PIN(101, "ISH_GP_2"),
+	PINCTRL_PIN(102, "ISH_GP_3"),
+	PINCTRL_PIN(103, "IMGCLKOUT_0"),
+	PINCTRL_PIN(104, "SRCCLKREQB_0"),
+	PINCTRL_PIN(105, "SRCCLKREQB_1"),
+	PINCTRL_PIN(106, "SRCCLKREQB_2"),
+	PINCTRL_PIN(107, "SRCCLKREQB_3"),
+	PINCTRL_PIN(108, "ISH_SPI_CSB"),
+	PINCTRL_PIN(109, "ISH_SPI_CLK"),
+	PINCTRL_PIN(110, "ISH_SPI_MISO"),
+	PINCTRL_PIN(111, "ISH_SPI_MOSI"),
+	PINCTRL_PIN(112, "ISH_UART0_RXD"),
+	PINCTRL_PIN(113, "ISH_UART0_TXD"),
+	PINCTRL_PIN(114, "ISH_UART0_RTSB"),
+	PINCTRL_PIN(115, "ISH_UART0_CTSB"),
+	PINCTRL_PIN(116, "ISH_GP_4"),
+	PINCTRL_PIN(117, "ISH_GP_5"),
+	PINCTRL_PIN(118, "I2S_MCLK1_OUT"),
+	PINCTRL_PIN(119, "GSPI2_CLK_LOOPBK"),
+	/* GPP_U */
+	PINCTRL_PIN(120, "UART3_RXD"),
+	PINCTRL_PIN(121, "UART3_TXD"),
+	PINCTRL_PIN(122, "UART3_RTSB"),
+	PINCTRL_PIN(123, "UART3_CTSB"),
+	PINCTRL_PIN(124, "GSPI3_CS0B"),
+	PINCTRL_PIN(125, "GSPI3_CLK"),
+	PINCTRL_PIN(126, "GSPI3_MISO"),
+	PINCTRL_PIN(127, "GSPI3_MOSI"),
+	PINCTRL_PIN(128, "GSPI4_CS0B"),
+	PINCTRL_PIN(129, "GSPI4_CLK"),
+	PINCTRL_PIN(130, "GSPI4_MISO"),
+	PINCTRL_PIN(131, "GSPI4_MOSI"),
+	PINCTRL_PIN(132, "GSPI5_CS0B"),
+	PINCTRL_PIN(133, "GSPI5_CLK"),
+	PINCTRL_PIN(134, "GSPI5_MISO"),
+	PINCTRL_PIN(135, "GSPI5_MOSI"),
+	PINCTRL_PIN(136, "GSPI6_CS0B"),
+	PINCTRL_PIN(137, "GSPI6_CLK"),
+	PINCTRL_PIN(138, "GSPI6_MISO"),
+	PINCTRL_PIN(139, "GSPI6_MOSI"),
+	PINCTRL_PIN(140, "GSPI3_CLK_LOOPBK"),
+	PINCTRL_PIN(141, "GSPI4_CLK_LOOPBK"),
+	PINCTRL_PIN(142, "GSPI5_CLK_LOOPBK"),
+	PINCTRL_PIN(143, "GSPI6_CLK_LOOPBK"),
+	/* vGPIO */
+	PINCTRL_PIN(144, "CNV_BTEN"),
+	PINCTRL_PIN(145, "CNV_BT_HOST_WAKEB"),
+	PINCTRL_PIN(146, "CNV_BT_IF_SELECT"),
+	PINCTRL_PIN(147, "vCNV_BT_UART_TXD"),
+	PINCTRL_PIN(148, "vCNV_BT_UART_RXD"),
+	PINCTRL_PIN(149, "vCNV_BT_UART_CTS_B"),
+	PINCTRL_PIN(150, "vCNV_BT_UART_RTS_B"),
+	PINCTRL_PIN(151, "vCNV_MFUART1_TXD"),
+	PINCTRL_PIN(152, "vCNV_MFUART1_RXD"),
+	PINCTRL_PIN(153, "vCNV_MFUART1_CTS_B"),
+	PINCTRL_PIN(154, "vCNV_MFUART1_RTS_B"),
+	PINCTRL_PIN(155, "vUART0_TXD"),
+	PINCTRL_PIN(156, "vUART0_RXD"),
+	PINCTRL_PIN(157, "vUART0_CTS_B"),
+	PINCTRL_PIN(158, "vUART0_RTS_B"),
+	PINCTRL_PIN(159, "vISH_UART0_TXD"),
+	PINCTRL_PIN(160, "vISH_UART0_RXD"),
+	PINCTRL_PIN(161, "vISH_UART0_CTS_B"),
+	PINCTRL_PIN(162, "vISH_UART0_RTS_B"),
+	PINCTRL_PIN(163, "vCNV_BT_I2S_BCLK"),
+	PINCTRL_PIN(164, "vCNV_BT_I2S_WS_SYNC"),
+	PINCTRL_PIN(165, "vCNV_BT_I2S_SDO"),
+	PINCTRL_PIN(166, "vCNV_BT_I2S_SDI"),
+	PINCTRL_PIN(167, "vI2S2_SCLK"),
+	PINCTRL_PIN(168, "vI2S2_SFRM"),
+	PINCTRL_PIN(169, "vI2S2_TXD"),
+	PINCTRL_PIN(170, "vI2S2_RXD"),
+	/* GPP_C */
+	PINCTRL_PIN(171, "SMBCLK"),
+	PINCTRL_PIN(172, "SMBDATA"),
+	PINCTRL_PIN(173, "SMBALERTB"),
+	PINCTRL_PIN(174, "SML0CLK"),
+	PINCTRL_PIN(175, "SML0DATA"),
+	PINCTRL_PIN(176, "SML0ALERTB"),
+	PINCTRL_PIN(177, "SML1CLK"),
+	PINCTRL_PIN(178, "SML1DATA"),
+	PINCTRL_PIN(179, "UART0_RXD"),
+	PINCTRL_PIN(180, "UART0_TXD"),
+	PINCTRL_PIN(181, "UART0_RTSB"),
+	PINCTRL_PIN(182, "UART0_CTSB"),
+	PINCTRL_PIN(183, "UART1_RXD"),
+	PINCTRL_PIN(184, "UART1_TXD"),
+	PINCTRL_PIN(185, "UART1_RTSB"),
+	PINCTRL_PIN(186, "UART1_CTSB"),
+	PINCTRL_PIN(187, "I2C0_SDA"),
+	PINCTRL_PIN(188, "I2C0_SCL"),
+	PINCTRL_PIN(189, "I2C1_SDA"),
+	PINCTRL_PIN(190, "I2C1_SCL"),
+	PINCTRL_PIN(191, "UART2_RXD"),
+	PINCTRL_PIN(192, "UART2_TXD"),
+	PINCTRL_PIN(193, "UART2_RTSB"),
+	PINCTRL_PIN(194, "UART2_CTSB"),
+	/* GPP_F */
+	PINCTRL_PIN(195, "CNV_BRI_DT"),
+	PINCTRL_PIN(196, "CNV_BRI_RSP"),
+	PINCTRL_PIN(197, "CNV_RGI_DT"),
+	PINCTRL_PIN(198, "CNV_RGI_RSP"),
+	PINCTRL_PIN(199, "CNV_RF_RESET_B"),
+	PINCTRL_PIN(200, "GPPC_F_5"),
+	PINCTRL_PIN(201, "CNV_PA_BLANKING"),
+	PINCTRL_PIN(202, "GPPC_F_7"),
+	PINCTRL_PIN(203, "I2S_MCLK2_INOUT"),
+	PINCTRL_PIN(204, "BOOTMPC"),
+	PINCTRL_PIN(205, "GPPC_F_10"),
+	PINCTRL_PIN(206, "GPPC_F_11"),
+	PINCTRL_PIN(207, "GSXDOUT"),
+	PINCTRL_PIN(208, "GSXSLOAD"),
+	PINCTRL_PIN(209, "GSXDIN"),
+	PINCTRL_PIN(210, "GSXSRESETB"),
+	PINCTRL_PIN(211, "GSXCLK"),
+	PINCTRL_PIN(212, "GMII_MDC"),
+	PINCTRL_PIN(213, "GMII_MDIO"),
+	PINCTRL_PIN(214, "SRCCLKREQB_6"),
+	PINCTRL_PIN(215, "EXT_PWR_GATEB"),
+	PINCTRL_PIN(216, "EXT_PWR_GATE2B"),
+	PINCTRL_PIN(217, "VNN_CTRL"),
+	PINCTRL_PIN(218, "V1P05_CTRL"),
+	PINCTRL_PIN(219, "GPPF_CLK_LOOPBACK"),
+	/* HVCMOS */
+	PINCTRL_PIN(220, "L_BKLTEN"),
+	PINCTRL_PIN(221, "L_BKLTCTL"),
+	PINCTRL_PIN(222, "L_VDDEN"),
+	PINCTRL_PIN(223, "SYS_PWROK"),
+	PINCTRL_PIN(224, "SYS_RESETB"),
+	PINCTRL_PIN(225, "MLK_RSTB"),
+	/* GPP_E */
+	PINCTRL_PIN(226, "SATAXPCIE_0"),
+	PINCTRL_PIN(227, "SPI1_IO_2"),
+	PINCTRL_PIN(228, "SPI1_IO_3"),
+	PINCTRL_PIN(229, "CPU_GP_0"),
+	PINCTRL_PIN(230, "SATA_DEVSLP_0"),
+	PINCTRL_PIN(231, "SATA_DEVSLP_1"),
+	PINCTRL_PIN(232, "GPPC_E_6"),
+	PINCTRL_PIN(233, "CPU_GP_1"),
+	PINCTRL_PIN(234, "SPI1_CS1B"),
+	PINCTRL_PIN(235, "USB2_OCB_0"),
+	PINCTRL_PIN(236, "SPI1_CSB"),
+	PINCTRL_PIN(237, "SPI1_CLK"),
+	PINCTRL_PIN(238, "SPI1_MISO_IO_1"),
+	PINCTRL_PIN(239, "SPI1_MOSI_IO_0"),
+	PINCTRL_PIN(240, "DDSP_HPD_A"),
+	PINCTRL_PIN(241, "ISH_GP_6"),
+	PINCTRL_PIN(242, "ISH_GP_7"),
+	PINCTRL_PIN(243, "GPPC_E_17"),
+	PINCTRL_PIN(244, "DDP1_CTRLCLK"),
+	PINCTRL_PIN(245, "DDP1_CTRLDATA"),
+	PINCTRL_PIN(246, "DDP2_CTRLCLK"),
+	PINCTRL_PIN(247, "DDP2_CTRLDATA"),
+	PINCTRL_PIN(248, "DDPA_CTRLCLK"),
+	PINCTRL_PIN(249, "DDPA_CTRLDATA"),
+	PINCTRL_PIN(250, "SPI1_CLK_LOOPBK"),
+	/* JTAG */
+	PINCTRL_PIN(251, "JTAG_TDO"),
+	PINCTRL_PIN(252, "JTAGX"),
+	PINCTRL_PIN(253, "PRDYB"),
+	PINCTRL_PIN(254, "PREQB"),
+	PINCTRL_PIN(255, "CPU_TRSTB"),
+	PINCTRL_PIN(256, "JTAG_TDI"),
+	PINCTRL_PIN(257, "JTAG_TMS"),
+	PINCTRL_PIN(258, "JTAG_TCK"),
+	PINCTRL_PIN(259, "DBG_PMODE"),
+	/* GPP_R */
+	PINCTRL_PIN(260, "HDA_BCLK"),
+	PINCTRL_PIN(261, "HDA_SYNC"),
+	PINCTRL_PIN(262, "HDA_SDO"),
+	PINCTRL_PIN(263, "HDA_SDI_0"),
+	PINCTRL_PIN(264, "HDA_RSTB"),
+	PINCTRL_PIN(265, "HDA_SDI_1"),
+	PINCTRL_PIN(266, "GPP_R_6"),
+	PINCTRL_PIN(267, "GPP_R_7"),
+	/* SPI */
+	PINCTRL_PIN(268, "SPI0_IO_2"),
+	PINCTRL_PIN(269, "SPI0_IO_3"),
+	PINCTRL_PIN(270, "SPI0_MOSI_IO_0"),
+	PINCTRL_PIN(271, "SPI0_MISO_IO_1"),
+	PINCTRL_PIN(272, "SPI0_TPM_CSB"),
+	PINCTRL_PIN(273, "SPI0_FLASH_0_CSB"),
+	PINCTRL_PIN(274, "SPI0_FLASH_1_CSB"),
+	PINCTRL_PIN(275, "SPI0_CLK"),
+	PINCTRL_PIN(276, "SPI0_CLK_LOOPBK"),
 };
 
 static const struct intel_padgroup tgllp_community0_gpps[] = {
-	TGL_GPP(0, 0, 25),	/* GPP_B */
-	TGL_GPP(1, 26, 41),	/* GPP_T */
-	TGL_GPP(2, 42, 66),	/* GPP_A */
-};
-
-static const struct intel_community tgllp_community0[] = {
-	TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
-	.uid = "0",
-	.pins = tgllp_community0_pins,
-	.npins = ARRAY_SIZE(tgllp_community0_pins),
-	.communities = tgllp_community0,
-	.ncommunities = ARRAY_SIZE(tgllp_community0),
-};
-
-static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
-	/* GPP_S */
-	PINCTRL_PIN(0, "SNDW0_CLK"),
-	PINCTRL_PIN(1, "SNDW0_DATA"),
-	PINCTRL_PIN(2, "SNDW1_CLK"),
-	PINCTRL_PIN(3, "SNDW1_DATA"),
-	PINCTRL_PIN(4, "SNDW2_CLK"),
-	PINCTRL_PIN(5, "SNDW2_DATA"),
-	PINCTRL_PIN(6, "SNDW3_CLK"),
-	PINCTRL_PIN(7, "SNDW3_DATA"),
-	/* GPP_H */
-	PINCTRL_PIN(8, "GPPC_H_0"),
-	PINCTRL_PIN(9, "GPPC_H_1"),
-	PINCTRL_PIN(10, "GPPC_H_2"),
-	PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
-	PINCTRL_PIN(12, "I2C2_SDA"),
-	PINCTRL_PIN(13, "I2C2_SCL"),
-	PINCTRL_PIN(14, "I2C3_SDA"),
-	PINCTRL_PIN(15, "I2C3_SCL"),
-	PINCTRL_PIN(16, "I2C4_SDA"),
-	PINCTRL_PIN(17, "I2C4_SCL"),
-	PINCTRL_PIN(18, "SRCCLKREQB_4"),
-	PINCTRL_PIN(19, "SRCCLKREQB_5"),
-	PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
-	PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
-	PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
-	PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
-	PINCTRL_PIN(24, "DDPB_CTRLCLK"),
-	PINCTRL_PIN(25, "DDPB_CTRLDATA"),
-	PINCTRL_PIN(26, "CPU_C10_GATEB"),
-	PINCTRL_PIN(27, "TIME_SYNC_0"),
-	PINCTRL_PIN(28, "IMGCLKOUT_1"),
-	PINCTRL_PIN(29, "IMGCLKOUT_2"),
-	PINCTRL_PIN(30, "IMGCLKOUT_3"),
-	PINCTRL_PIN(31, "IMGCLKOUT_4"),
-	/* GPP_D */
-	PINCTRL_PIN(32, "ISH_GP_0"),
-	PINCTRL_PIN(33, "ISH_GP_1"),
-	PINCTRL_PIN(34, "ISH_GP_2"),
-	PINCTRL_PIN(35, "ISH_GP_3"),
-	PINCTRL_PIN(36, "IMGCLKOUT_0"),
-	PINCTRL_PIN(37, "SRCCLKREQB_0"),
-	PINCTRL_PIN(38, "SRCCLKREQB_1"),
-	PINCTRL_PIN(39, "SRCCLKREQB_2"),
-	PINCTRL_PIN(40, "SRCCLKREQB_3"),
-	PINCTRL_PIN(41, "ISH_SPI_CSB"),
-	PINCTRL_PIN(42, "ISH_SPI_CLK"),
-	PINCTRL_PIN(43, "ISH_SPI_MISO"),
-	PINCTRL_PIN(44, "ISH_SPI_MOSI"),
-	PINCTRL_PIN(45, "ISH_UART0_RXD"),
-	PINCTRL_PIN(46, "ISH_UART0_TXD"),
-	PINCTRL_PIN(47, "ISH_UART0_RTSB"),
-	PINCTRL_PIN(48, "ISH_UART0_CTSB"),
-	PINCTRL_PIN(49, "ISH_GP_4"),
-	PINCTRL_PIN(50, "ISH_GP_5"),
-	PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
-	PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
-	/* GPP_U */
-	PINCTRL_PIN(53, "UART3_RXD"),
-	PINCTRL_PIN(54, "UART3_TXD"),
-	PINCTRL_PIN(55, "UART3_RTSB"),
-	PINCTRL_PIN(56, "UART3_CTSB"),
-	PINCTRL_PIN(57, "GSPI3_CS0B"),
-	PINCTRL_PIN(58, "GSPI3_CLK"),
-	PINCTRL_PIN(59, "GSPI3_MISO"),
-	PINCTRL_PIN(60, "GSPI3_MOSI"),
-	PINCTRL_PIN(61, "GSPI4_CS0B"),
-	PINCTRL_PIN(62, "GSPI4_CLK"),
-	PINCTRL_PIN(63, "GSPI4_MISO"),
-	PINCTRL_PIN(64, "GSPI4_MOSI"),
-	PINCTRL_PIN(65, "GSPI5_CS0B"),
-	PINCTRL_PIN(66, "GSPI5_CLK"),
-	PINCTRL_PIN(67, "GSPI5_MISO"),
-	PINCTRL_PIN(68, "GSPI5_MOSI"),
-	PINCTRL_PIN(69, "GSPI6_CS0B"),
-	PINCTRL_PIN(70, "GSPI6_CLK"),
-	PINCTRL_PIN(71, "GSPI6_MISO"),
-	PINCTRL_PIN(72, "GSPI6_MOSI"),
-	PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
-	PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
-	PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
-	PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
-	/* vGPIO */
-	PINCTRL_PIN(77, "CNV_BTEN"),
-	PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
-	PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
-	PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
-	PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
-	PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
-	PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
-	PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
-	PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
-	PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
-	PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
-	PINCTRL_PIN(88, "vUART0_TXD"),
-	PINCTRL_PIN(89, "vUART0_RXD"),
-	PINCTRL_PIN(90, "vUART0_CTS_B"),
-	PINCTRL_PIN(91, "vUART0_RTS_B"),
-	PINCTRL_PIN(92, "vISH_UART0_TXD"),
-	PINCTRL_PIN(93, "vISH_UART0_RXD"),
-	PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
-	PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
-	PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
-	PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
-	PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
-	PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
-	PINCTRL_PIN(100, "vI2S2_SCLK"),
-	PINCTRL_PIN(101, "vI2S2_SFRM"),
-	PINCTRL_PIN(102, "vI2S2_TXD"),
-	PINCTRL_PIN(103, "vI2S2_RXD"),
+	TGL_GPP(0, 0, 25, 0),			/* GPP_B */
+	TGL_GPP(1, 26, 41, 32),			/* GPP_T */
+	TGL_GPP(2, 42, 66, 64),			/* GPP_A */
 };
 
 static const struct intel_padgroup tgllp_community1_gpps[] = {
-	TGL_GPP(0, 0, 7),	/* GPP_S */
-	TGL_GPP(1, 8, 31),	/* GPP_H */
-	TGL_GPP(2, 32, 52),	/* GPP_D */
-	TGL_GPP(3, 53, 76),	/* GPP_U */
-	TGL_GPP(4, 77, 103),	/* vGPIO */
-};
-
-static const struct intel_community tgllp_community1[] = {
-	TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
-	.uid = "1",
-	.pins = tgllp_community1_pins,
-	.npins = ARRAY_SIZE(tgllp_community1_pins),
-	.communities = tgllp_community1,
-	.ncommunities = ARRAY_SIZE(tgllp_community1),
-};
-
-static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
-	/* GPP_C */
-	PINCTRL_PIN(0, "SMBCLK"),
-	PINCTRL_PIN(1, "SMBDATA"),
-	PINCTRL_PIN(2, "SMBALERTB"),
-	PINCTRL_PIN(3, "SML0CLK"),
-	PINCTRL_PIN(4, "SML0DATA"),
-	PINCTRL_PIN(5, "SML0ALERTB"),
-	PINCTRL_PIN(6, "SML1CLK"),
-	PINCTRL_PIN(7, "SML1DATA"),
-	PINCTRL_PIN(8, "UART0_RXD"),
-	PINCTRL_PIN(9, "UART0_TXD"),
-	PINCTRL_PIN(10, "UART0_RTSB"),
-	PINCTRL_PIN(11, "UART0_CTSB"),
-	PINCTRL_PIN(12, "UART1_RXD"),
-	PINCTRL_PIN(13, "UART1_TXD"),
-	PINCTRL_PIN(14, "UART1_RTSB"),
-	PINCTRL_PIN(15, "UART1_CTSB"),
-	PINCTRL_PIN(16, "I2C0_SDA"),
-	PINCTRL_PIN(17, "I2C0_SCL"),
-	PINCTRL_PIN(18, "I2C1_SDA"),
-	PINCTRL_PIN(19, "I2C1_SCL"),
-	PINCTRL_PIN(20, "UART2_RXD"),
-	PINCTRL_PIN(21, "UART2_TXD"),
-	PINCTRL_PIN(22, "UART2_RTSB"),
-	PINCTRL_PIN(23, "UART2_CTSB"),
-	/* GPP_F */
-	PINCTRL_PIN(24, "CNV_BRI_DT"),
-	PINCTRL_PIN(25, "CNV_BRI_RSP"),
-	PINCTRL_PIN(26, "CNV_RGI_DT"),
-	PINCTRL_PIN(27, "CNV_RGI_RSP"),
-	PINCTRL_PIN(28, "CNV_RF_RESET_B"),
-	PINCTRL_PIN(29, "GPPC_F_5"),
-	PINCTRL_PIN(30, "CNV_PA_BLANKING"),
-	PINCTRL_PIN(31, "GPPC_F_7"),
-	PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
-	PINCTRL_PIN(33, "BOOTMPC"),
-	PINCTRL_PIN(34, "GPPC_F_10"),
-	PINCTRL_PIN(35, "GPPC_F_11"),
-	PINCTRL_PIN(36, "GSXDOUT"),
-	PINCTRL_PIN(37, "GSXSLOAD"),
-	PINCTRL_PIN(38, "GSXDIN"),
-	PINCTRL_PIN(39, "GSXSRESETB"),
-	PINCTRL_PIN(40, "GSXCLK"),
-	PINCTRL_PIN(41, "GMII_MDC"),
-	PINCTRL_PIN(42, "GMII_MDIO"),
-	PINCTRL_PIN(43, "SRCCLKREQB_6"),
-	PINCTRL_PIN(44, "EXT_PWR_GATEB"),
-	PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
-	PINCTRL_PIN(46, "VNN_CTRL"),
-	PINCTRL_PIN(47, "V1P05_CTRL"),
-	PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
-	/* HVCMOS */
-	PINCTRL_PIN(49, "L_BKLTEN"),
-	PINCTRL_PIN(50, "L_BKLTCTL"),
-	PINCTRL_PIN(51, "L_VDDEN"),
-	PINCTRL_PIN(52, "SYS_PWROK"),
-	PINCTRL_PIN(53, "SYS_RESETB"),
-	PINCTRL_PIN(54, "MLK_RSTB"),
-	/* GPP_E */
-	PINCTRL_PIN(55, "SATAXPCIE_0"),
-	PINCTRL_PIN(56, "SPI1_IO_2"),
-	PINCTRL_PIN(57, "SPI1_IO_3"),
-	PINCTRL_PIN(58, "CPU_GP_0"),
-	PINCTRL_PIN(59, "SATA_DEVSLP_0"),
-	PINCTRL_PIN(60, "SATA_DEVSLP_1"),
-	PINCTRL_PIN(61, "GPPC_E_6"),
-	PINCTRL_PIN(62, "CPU_GP_1"),
-	PINCTRL_PIN(63, "SPI1_CS1B"),
-	PINCTRL_PIN(64, "USB2_OCB_0"),
-	PINCTRL_PIN(65, "SPI1_CSB"),
-	PINCTRL_PIN(66, "SPI1_CLK"),
-	PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
-	PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
-	PINCTRL_PIN(69, "DDSP_HPD_A"),
-	PINCTRL_PIN(70, "ISH_GP_6"),
-	PINCTRL_PIN(71, "ISH_GP_7"),
-	PINCTRL_PIN(72, "GPPC_E_17"),
-	PINCTRL_PIN(73, "DDP1_CTRLCLK"),
-	PINCTRL_PIN(74, "DDP1_CTRLDATA"),
-	PINCTRL_PIN(75, "DDP2_CTRLCLK"),
-	PINCTRL_PIN(76, "DDP2_CTRLDATA"),
-	PINCTRL_PIN(77, "DDPA_CTRLCLK"),
-	PINCTRL_PIN(78, "DDPA_CTRLDATA"),
-	PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
-	/* JTAG */
-	PINCTRL_PIN(80, "JTAG_TDO"),
-	PINCTRL_PIN(81, "JTAGX"),
-	PINCTRL_PIN(82, "PRDYB"),
-	PINCTRL_PIN(83, "PREQB"),
-	PINCTRL_PIN(84, "CPU_TRSTB"),
-	PINCTRL_PIN(85, "JTAG_TDI"),
-	PINCTRL_PIN(86, "JTAG_TMS"),
-	PINCTRL_PIN(87, "JTAG_TCK"),
-	PINCTRL_PIN(88, "DBG_PMODE"),
+	TGL_GPP(0, 67, 74, 96),			/* GPP_S */
+	TGL_GPP(1, 75, 98, 128),		/* GPP_H */
+	TGL_GPP(2, 99, 119, 160),		/* GPP_D */
+	TGL_GPP(3, 120, 143, 192),		/* GPP_U */
+	TGL_GPP(4, 144, 170, 224),		/* vGPIO */
 };
 
 static const struct intel_padgroup tgllp_community4_gpps[] = {
-	TGL_GPP(0, 0, 23),	/* GPP_C */
-	TGL_GPP(1, 24, 48),	/* GPP_F */
-	TGL_GPP(2, 49, 54),	/* HVCMOS */
-	TGL_GPP(3, 55, 79),	/* GPP_E */
-	TGL_GPP(4, 80, 88),	/* JTAG */
-};
-
-static const struct intel_community tgllp_community4[] = {
-	TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
-	.uid = "4",
-	.pins = tgllp_community4_pins,
-	.npins = ARRAY_SIZE(tgllp_community4_pins),
-	.communities = tgllp_community4,
-	.ncommunities = ARRAY_SIZE(tgllp_community4),
-};
-
-static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
-	/* GPP_R */
-	PINCTRL_PIN(0, "HDA_BCLK"),
-	PINCTRL_PIN(1, "HDA_SYNC"),
-	PINCTRL_PIN(2, "HDA_SDO"),
-	PINCTRL_PIN(3, "HDA_SDI_0"),
-	PINCTRL_PIN(4, "HDA_RSTB"),
-	PINCTRL_PIN(5, "HDA_SDI_1"),
-	PINCTRL_PIN(6, "GPP_R_6"),
-	PINCTRL_PIN(7, "GPP_R_7"),
-	/* SPI */
-	PINCTRL_PIN(8, "SPI0_IO_2"),
-	PINCTRL_PIN(9, "SPI0_IO_3"),
-	PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
-	PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
-	PINCTRL_PIN(12, "SPI0_TPM_CSB"),
-	PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
-	PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
-	PINCTRL_PIN(15, "SPI0_CLK"),
-	PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
+	TGL_GPP(0, 171, 194, 256),		/* GPP_C */
+	TGL_GPP(1, 195, 219, 288),		/* GPP_F */
+	TGL_GPP(2, 220, 225, TGL_NO_GPIO),	/* HVCMOS */
+	TGL_GPP(3, 226, 250, 320),		/* GPP_E */
+	TGL_GPP(4, 251, 259, TGL_NO_GPIO),	/* JTAG */
 };
 
 static const struct intel_padgroup tgllp_community5_gpps[] = {
-	TGL_GPP(0, 0, 7),	/* GPP_R */
-	TGL_GPP(1, 8, 16),	/* SPI */
+	TGL_GPP(0, 260, 267, 352),		/* GPP_R */
+	TGL_GPP(1, 268, 276, TGL_NO_GPIO),	/* SPI */
 };
 
-static const struct intel_community tgllp_community5[] = {
-	TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
+static const struct intel_community tgllp_communities[] = {
+	TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
+	TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
+	TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
+	TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
 };
 
-static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
-	.uid = "5",
-	.pins = tgllp_community5_pins,
-	.npins = ARRAY_SIZE(tgllp_community5_pins),
-	.communities = tgllp_community5,
-	.ncommunities = ARRAY_SIZE(tgllp_community5),
-};
-
-static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
-	&tgllp_community0_soc_data,
-	&tgllp_community1_soc_data,
-	&tgllp_community4_soc_data,
-	&tgllp_community5_soc_data,
-	NULL
+static const struct intel_pinctrl_soc_data tgllp_soc_data = {
+	.pins = tgllp_pins,
+	.npins = ARRAY_SIZE(tgllp_pins),
+	.communities = tgllp_communities,
+	.ncommunities = ARRAY_SIZE(tgllp_communities),
 };
 
 static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
-	{ "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
+	{ "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
@@ -438,7 +391,7 @@ MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
 static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
 
 static struct platform_driver tgl_pinctrl_driver = {
-	.probe = intel_pinctrl_probe_by_uid,
+	.probe = intel_pinctrl_probe_by_hid,
 	.driver = {
 		.name = "tigerlake-pinctrl",
 		.acpi_match_table = tgl_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
index ba2356a..845c408 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2018 MediaTek Inc.
  * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
index 8ff88bf..aa79d7e 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
 /*
  * Copyright (c) 2017 Baylibre SAS.
  * Author:  Jerome Brunet  <jbrunet@baylibre.com>
@@ -5,7 +6,6 @@
  * Copyright (c) 2017 Amlogic, Inc. All rights reserved.
  * Author: Xingyu Chen <xingyu.chen@amlogic.com>
  *
- * SPDX-License-Identifier: (GPL-2.0+ or MIT)
  */
 
 struct meson_pmx_bank {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 2d5339e..6cd4b3e 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -233,6 +233,8 @@ static const unsigned int hdmi_scl_pins[]	= { GPIOH_2 };
 static const unsigned int hdmi_cec_0_pins[]	= { GPIOH_3 };
 static const unsigned int eth_txd1_0_pins[]	= { GPIOH_5 };
 static const unsigned int eth_txd0_0_pins[]	= { GPIOH_6 };
+static const unsigned int eth_rxd3_h_pins[]	= { GPIOH_5 };
+static const unsigned int eth_rxd2_h_pins[]	= { GPIOH_6 };
 static const unsigned int clk_24m_out_pins[]	= { GPIOH_9 };
 
 static const unsigned int spi_ss1_pins[]	= { GPIOH_0 };
@@ -535,6 +537,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
 	GROUP(spi_miso_1,	9,	12),
 	GROUP(spi_mosi_1,	9,	11),
 	GROUP(spi_sclk_1,	9,	10),
+	GROUP(eth_rxd3_h,	6,	15),
+	GROUP(eth_rxd2_h,	6,	14),
 	GROUP(eth_txd3,		6,	13),
 	GROUP(eth_txd2,		6,	12),
 	GROUP(eth_tx_clk,	6,	11),
@@ -746,7 +750,8 @@ static const char * const ethernet_groups[] = {
 	"eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
 	"eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
 	"eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
-	"eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2"
+	"eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2",
+	"eth_rxd3_h", "eth_rxd2_h"
 };
 
 static const char * const i2c_a_groups[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index aa9dcde..243fba2 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -15,7 +15,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
-#include <linux/of_irq.h>
 #include <linux/pinctrl/pinconf-generic.h>
 #include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinctrl.h>
@@ -733,13 +732,20 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
 			ret = 0;
 			break;
 		}
-	};
+	}
 	if (ret) {
 		dev_err(dev, "no gpio-controller child node\n");
 		return ret;
 	}
 
-	nr_irq_parent = of_irq_count(np);
+	nr_irq_parent = platform_irq_count(pdev);
+	if (nr_irq_parent < 0) {
+		if (nr_irq_parent != -EPROBE_DEFER)
+			dev_err(dev, "Couldn't determine irq count: %pe\n",
+				ERR_PTR(nr_irq_parent));
+		return nr_irq_parent;
+	}
+
 	spin_lock_init(&info->irq_lock);
 
 	if (!nr_irq_parent) {
@@ -776,7 +782,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
 	if (!girq->parents)
 		return -ENOMEM;
 	for (i = 0; i < nr_irq_parent; i++) {
-		int irq = irq_of_parse_and_map(np, i);
+		int irq = platform_get_irq(pdev, i);
 
 		if (irq < 0)
 			continue;
@@ -800,7 +806,7 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
 			ret = 0;
 			break;
 		}
-	};
+	}
 	if (ret)
 		return ret;
 
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 22077cb..a935065 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -331,7 +331,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d)
 	return 0;
 }
 
-static struct irq_chip npcmgpio_irqchip = {
+static const struct irq_chip npcmgpio_irqchip = {
 	.name = "NPCM7XX-GPIO-IRQ",
 	.irq_ack = npcmgpio_irq_ack,
 	.irq_unmask = npcmgpio_irq_unmask,
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index 986e04a..d6c9f9d 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -798,7 +798,7 @@ static int artpec6_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
 	enum pin_config_param param;
 	unsigned int arg;
 	unsigned int regval;
-	unsigned int *reg;
+	void __iomem *reg;
 	int i;
 
 	/* Check for valid pin */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 369e043..96f04d1 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3,7 +3,7 @@
  * Ingenic SoCs pinctrl driver
  *
  * Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
- * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
  */
 
 #include <linux/compiler.h>
@@ -42,28 +42,36 @@
 #define JZ4760_GPIO_FLAG	0x50
 #define JZ4760_GPIO_PEN		0x70
 
-#define X1000_GPIO_PZ_BASE		0x700
-#define X1000_GPIO_PZ_GID2LD	0x7f0
+#define X1830_GPIO_PEL			0x110
+#define X1830_GPIO_PEH			0x120
 
 #define REG_SET(x) ((x) + 0x4)
 #define REG_CLEAR(x) ((x) + 0x8)
 
+#define REG_PZ_BASE(x) ((x) * 7)
+#define REG_PZ_GID2LD(x) ((x) * 7 + 0xf0)
+
+#define GPIO_PULL_DIS	0
+#define GPIO_PULL_UP	1
+#define GPIO_PULL_DOWN	2
+
 #define PINS_PER_GPIO_CHIP 32
 
 enum jz_version {
 	ID_JZ4740,
 	ID_JZ4725B,
 	ID_JZ4760,
-	ID_JZ4760B,
 	ID_JZ4770,
 	ID_JZ4780,
 	ID_X1000,
-	ID_X1000E,
 	ID_X1500,
+	ID_X1830,
 };
 
 struct ingenic_chip_info {
 	unsigned int num_chips;
+	unsigned int reg_offset;
+	enum jz_version version;
 
 	const struct group_desc *groups;
 	unsigned int num_groups;
@@ -79,7 +87,6 @@ struct ingenic_pinctrl {
 	struct regmap *map;
 	struct pinctrl_dev *pctl;
 	struct pinctrl_pin_desc *pdesc;
-	enum jz_version version;
 
 	const struct ingenic_chip_info *info;
 };
@@ -216,6 +223,8 @@ static const struct function_desc jz4740_functions[] = {
 
 static const struct ingenic_chip_info jz4740_chip_info = {
 	.num_chips = 4,
+	.reg_offset = 0x100,
+	.version = ID_JZ4740,
 	.groups = jz4740_groups,
 	.num_groups = ARRAY_SIZE(jz4740_groups),
 	.functions = jz4740_functions,
@@ -339,6 +348,8 @@ static const struct function_desc jz4725b_functions[] = {
 
 static const struct ingenic_chip_info jz4725b_chip_info = {
 	.num_chips = 4,
+	.reg_offset = 0x100,
+	.version = ID_JZ4725B,
 	.groups = jz4725b_groups,
 	.num_groups = ARRAY_SIZE(jz4725b_groups),
 	.functions = jz4725b_functions,
@@ -592,16 +603,8 @@ static const struct function_desc jz4760_functions[] = {
 
 static const struct ingenic_chip_info jz4760_chip_info = {
 	.num_chips = 6,
-	.groups = jz4760_groups,
-	.num_groups = ARRAY_SIZE(jz4760_groups),
-	.functions = jz4760_functions,
-	.num_functions = ARRAY_SIZE(jz4760_functions),
-	.pull_ups = jz4760_pull_ups,
-	.pull_downs = jz4760_pull_downs,
-};
-
-static const struct ingenic_chip_info jz4760b_chip_info = {
-	.num_chips = 6,
+	.reg_offset = 0x100,
+	.version = ID_JZ4760,
 	.groups = jz4760_groups,
 	.num_groups = ARRAY_SIZE(jz4760_groups),
 	.functions = jz4760_functions,
@@ -880,6 +883,8 @@ static const struct function_desc jz4770_functions[] = {
 
 static const struct ingenic_chip_info jz4770_chip_info = {
 	.num_chips = 6,
+	.reg_offset = 0x100,
+	.version = ID_JZ4770,
 	.groups = jz4770_groups,
 	.num_groups = ARRAY_SIZE(jz4770_groups),
 	.functions = jz4770_functions,
@@ -1013,6 +1018,8 @@ static const struct function_desc jz4780_functions[] = {
 
 static const struct ingenic_chip_info jz4780_chip_info = {
 	.num_chips = 6,
+	.reg_offset = 0x100,
+	.version = ID_JZ4780,
 	.groups = jz4780_groups,
 	.num_groups = ARRAY_SIZE(jz4780_groups),
 	.functions = jz4780_functions,
@@ -1022,7 +1029,7 @@ static const struct ingenic_chip_info jz4780_chip_info = {
 };
 
 static const u32 x1000_pull_ups[4] = {
-	0xffffffff, 0x8dffffff, 0x7d3fffff, 0xffffffff,
+	0xffffffff, 0xfdffffff, 0x0dffffff, 0x0000003f,
 };
 
 static const u32 x1000_pull_downs[4] = {
@@ -1033,28 +1040,45 @@ static int x1000_uart0_data_pins[] = { 0x4a, 0x4b, };
 static int x1000_uart0_hwflow_pins[] = { 0x4c, 0x4d, };
 static int x1000_uart1_data_a_pins[] = { 0x04, 0x05, };
 static int x1000_uart1_data_d_pins[] = { 0x62, 0x63, };
-static int x1000_uart1_hwflow_d_pins[] = { 0x64, 0x65, };
+static int x1000_uart1_hwflow_pins[] = { 0x64, 0x65, };
 static int x1000_uart2_data_a_pins[] = { 0x02, 0x03, };
 static int x1000_uart2_data_d_pins[] = { 0x65, 0x64, };
+static int x1000_sfc_pins[] = { 0x1d, 0x1c, 0x1e, 0x1f, 0x1a, 0x1b, };
+static int x1000_ssi_dt_a_22_pins[] = { 0x16, };
+static int x1000_ssi_dt_a_29_pins[] = { 0x1d, };
+static int x1000_ssi_dt_d_pins[] = { 0x62, };
+static int x1000_ssi_dr_a_23_pins[] = { 0x17, };
+static int x1000_ssi_dr_a_28_pins[] = { 0x1c, };
+static int x1000_ssi_dr_d_pins[] = { 0x63, };
+static int x1000_ssi_clk_a_24_pins[] = { 0x18, };
+static int x1000_ssi_clk_a_26_pins[] = { 0x1a, };
+static int x1000_ssi_clk_d_pins[] = { 0x60, };
+static int x1000_ssi_gpc_a_20_pins[] = { 0x14, };
+static int x1000_ssi_gpc_a_31_pins[] = { 0x1f, };
+static int x1000_ssi_ce0_a_25_pins[] = { 0x19, };
+static int x1000_ssi_ce0_a_27_pins[] = { 0x1b, };
+static int x1000_ssi_ce0_d_pins[] = { 0x61, };
+static int x1000_ssi_ce1_a_21_pins[] = { 0x15, };
+static int x1000_ssi_ce1_a_30_pins[] = { 0x1e, };
 static int x1000_mmc0_1bit_pins[] = { 0x18, 0x19, 0x17, };
 static int x1000_mmc0_4bit_pins[] = { 0x16, 0x15, 0x14, };
 static int x1000_mmc0_8bit_pins[] = { 0x13, 0x12, 0x11, 0x10, };
 static int x1000_mmc1_1bit_pins[] = { 0x40, 0x41, 0x42, };
 static int x1000_mmc1_4bit_pins[] = { 0x43, 0x44, 0x45, };
-static int x1000_nemc_8bit_data_pins[] = {
+static int x1000_emc_8bit_data_pins[] = {
 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
 };
-static int x1000_nemc_16bit_data_pins[] = {
+static int x1000_emc_16bit_data_pins[] = {
 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
 };
-static int x1000_nemc_addr_pins[] = {
+static int x1000_emc_addr_pins[] = {
 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
 };
-static int x1000_nemc_rd_we_pins[] = { 0x30, 0x31, };
-static int x1000_nemc_wait_pins[] = { 0x34, };
-static int x1000_nemc_cs1_pins[] = { 0x32, };
-static int x1000_nemc_cs2_pins[] = { 0x33, };
+static int x1000_emc_rd_we_pins[] = { 0x30, 0x31, };
+static int x1000_emc_wait_pins[] = { 0x34, };
+static int x1000_emc_cs1_pins[] = { 0x32, };
+static int x1000_emc_cs2_pins[] = { 0x33, };
 static int x1000_i2c0_pins[] = { 0x38, 0x37, };
 static int x1000_i2c1_a_pins[] = { 0x01, 0x00, };
 static int x1000_i2c1_c_pins[] = { 0x5b, 0x5a, };
@@ -1083,23 +1107,40 @@ static int x1000_uart0_data_funcs[] = { 0, 0, };
 static int x1000_uart0_hwflow_funcs[] = { 0, 0, };
 static int x1000_uart1_data_a_funcs[] = { 2, 2, };
 static int x1000_uart1_data_d_funcs[] = { 1, 1, };
-static int x1000_uart1_hwflow_d_funcs[] = { 1, 1, };
+static int x1000_uart1_hwflow_funcs[] = { 1, 1, };
 static int x1000_uart2_data_a_funcs[] = { 2, 2, };
 static int x1000_uart2_data_d_funcs[] = { 0, 0, };
+static int x1000_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int x1000_ssi_dt_a_22_funcs[] = { 2, };
+static int x1000_ssi_dt_a_29_funcs[] = { 2, };
+static int x1000_ssi_dt_d_funcs[] = { 0, };
+static int x1000_ssi_dr_a_23_funcs[] = { 2, };
+static int x1000_ssi_dr_a_28_funcs[] = { 2, };
+static int x1000_ssi_dr_d_funcs[] = { 0, };
+static int x1000_ssi_clk_a_24_funcs[] = { 2, };
+static int x1000_ssi_clk_a_26_funcs[] = { 2, };
+static int x1000_ssi_clk_d_funcs[] = { 0, };
+static int x1000_ssi_gpc_a_20_funcs[] = { 2, };
+static int x1000_ssi_gpc_a_31_funcs[] = { 2, };
+static int x1000_ssi_ce0_a_25_funcs[] = { 2, };
+static int x1000_ssi_ce0_a_27_funcs[] = { 2, };
+static int x1000_ssi_ce0_d_funcs[] = { 0, };
+static int x1000_ssi_ce1_a_21_funcs[] = { 2, };
+static int x1000_ssi_ce1_a_30_funcs[] = { 2, };
 static int x1000_mmc0_1bit_funcs[] = { 1, 1, 1, };
 static int x1000_mmc0_4bit_funcs[] = { 1, 1, 1, };
 static int x1000_mmc0_8bit_funcs[] = { 1, 1, 1, 1, 1, };
 static int x1000_mmc1_1bit_funcs[] = { 0, 0, 0, };
 static int x1000_mmc1_4bit_funcs[] = { 0, 0, 0, };
-static int x1000_nemc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_nemc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_nemc_addr_funcs[] = {
+static int x1000_emc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int x1000_emc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int x1000_emc_addr_funcs[] = {
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 };
-static int x1000_nemc_rd_we_funcs[] = { 0, 0, };
-static int x1000_nemc_wait_funcs[] = { 0, };
-static int x1000_nemc_cs1_funcs[] = { 0, };
-static int x1000_nemc_cs2_funcs[] = { 0, };
+static int x1000_emc_rd_we_funcs[] = { 0, 0, };
+static int x1000_emc_wait_funcs[] = { 0, };
+static int x1000_emc_cs1_funcs[] = { 0, };
+static int x1000_emc_cs2_funcs[] = { 0, };
 static int x1000_i2c0_funcs[] = { 0, 0, };
 static int x1000_i2c1_a_funcs[] = { 2, 2, };
 static int x1000_i2c1_c_funcs[] = { 0, 0, };
@@ -1121,21 +1162,38 @@ static const struct group_desc x1000_groups[] = {
 	INGENIC_PIN_GROUP("uart0-hwflow", x1000_uart0_hwflow),
 	INGENIC_PIN_GROUP("uart1-data-a", x1000_uart1_data_a),
 	INGENIC_PIN_GROUP("uart1-data-d", x1000_uart1_data_d),
-	INGENIC_PIN_GROUP("uart1-hwflow-d", x1000_uart1_hwflow_d),
+	INGENIC_PIN_GROUP("uart1-hwflow", x1000_uart1_hwflow),
 	INGENIC_PIN_GROUP("uart2-data-a", x1000_uart2_data_a),
 	INGENIC_PIN_GROUP("uart2-data-d", x1000_uart2_data_d),
+	INGENIC_PIN_GROUP("sfc", x1000_sfc),
+	INGENIC_PIN_GROUP("ssi-dt-a-22", x1000_ssi_dt_a_22),
+	INGENIC_PIN_GROUP("ssi-dt-a-29", x1000_ssi_dt_a_29),
+	INGENIC_PIN_GROUP("ssi-dt-d", x1000_ssi_dt_d),
+	INGENIC_PIN_GROUP("ssi-dr-a-23", x1000_ssi_dr_a_23),
+	INGENIC_PIN_GROUP("ssi-dr-a-28", x1000_ssi_dr_a_28),
+	INGENIC_PIN_GROUP("ssi-dr-d", x1000_ssi_dr_d),
+	INGENIC_PIN_GROUP("ssi-clk-a-24", x1000_ssi_clk_a_24),
+	INGENIC_PIN_GROUP("ssi-clk-a-26", x1000_ssi_clk_a_26),
+	INGENIC_PIN_GROUP("ssi-clk-d", x1000_ssi_clk_d),
+	INGENIC_PIN_GROUP("ssi-gpc-a-20", x1000_ssi_gpc_a_20),
+	INGENIC_PIN_GROUP("ssi-gpc-a-31", x1000_ssi_gpc_a_31),
+	INGENIC_PIN_GROUP("ssi-ce0-a-25", x1000_ssi_ce0_a_25),
+	INGENIC_PIN_GROUP("ssi-ce0-a-27", x1000_ssi_ce0_a_27),
+	INGENIC_PIN_GROUP("ssi-ce0-d", x1000_ssi_ce0_d),
+	INGENIC_PIN_GROUP("ssi-ce1-a-21", x1000_ssi_ce1_a_21),
+	INGENIC_PIN_GROUP("ssi-ce1-a-30", x1000_ssi_ce1_a_30),
 	INGENIC_PIN_GROUP("mmc0-1bit", x1000_mmc0_1bit),
 	INGENIC_PIN_GROUP("mmc0-4bit", x1000_mmc0_4bit),
 	INGENIC_PIN_GROUP("mmc0-8bit", x1000_mmc0_8bit),
 	INGENIC_PIN_GROUP("mmc1-1bit", x1000_mmc1_1bit),
 	INGENIC_PIN_GROUP("mmc1-4bit", x1000_mmc1_4bit),
-	INGENIC_PIN_GROUP("nemc-8bit-data", x1000_nemc_8bit_data),
-	INGENIC_PIN_GROUP("nemc-16bit-data", x1000_nemc_16bit_data),
-	INGENIC_PIN_GROUP("nemc-addr", x1000_nemc_addr),
-	INGENIC_PIN_GROUP("nemc-rd-we", x1000_nemc_rd_we),
-	INGENIC_PIN_GROUP("nemc-wait", x1000_nemc_wait),
-	INGENIC_PIN_GROUP("nemc-cs1", x1000_nemc_cs1),
-	INGENIC_PIN_GROUP("nemc-cs2", x1000_nemc_cs2),
+	INGENIC_PIN_GROUP("emc-8bit-data", x1000_emc_8bit_data),
+	INGENIC_PIN_GROUP("emc-16bit-data", x1000_emc_16bit_data),
+	INGENIC_PIN_GROUP("emc-addr", x1000_emc_addr),
+	INGENIC_PIN_GROUP("emc-rd-we", x1000_emc_rd_we),
+	INGENIC_PIN_GROUP("emc-wait", x1000_emc_wait),
+	INGENIC_PIN_GROUP("emc-cs1", x1000_emc_cs1),
+	INGENIC_PIN_GROUP("emc-cs2", x1000_emc_cs2),
 	INGENIC_PIN_GROUP("i2c0-data", x1000_i2c0),
 	INGENIC_PIN_GROUP("i2c1-data-a", x1000_i2c1_a),
 	INGENIC_PIN_GROUP("i2c1-data-c", x1000_i2c1_c),
@@ -1154,21 +1212,30 @@ static const struct group_desc x1000_groups[] = {
 
 static const char *x1000_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
 static const char *x1000_uart1_groups[] = {
-	"uart1-data-a", "uart1-data-d", "uart1-hwflow-d",
+	"uart1-data-a", "uart1-data-d", "uart1-hwflow",
 };
 static const char *x1000_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
+static const char *x1000_sfc_groups[] = { "sfc", };
+static const char *x1000_ssi_groups[] = {
+	"ssi-dt-a-22", "ssi-dt-a-29", "ssi-dt-d",
+	"ssi-dr-a-23", "ssi-dr-a-28", "ssi-dr-d",
+	"ssi-clk-a-24", "ssi-clk-a-26", "ssi-clk-d",
+	"ssi-gpc-a-20", "ssi-gpc-a-31",
+	"ssi-ce0-a-25", "ssi-ce0-a-27", "ssi-ce0-d",
+	"ssi-ce1-a-21", "ssi-ce1-a-30",
+};
 static const char *x1000_mmc0_groups[] = {
 	"mmc0-1bit", "mmc0-4bit", "mmc0-8bit",
 };
 static const char *x1000_mmc1_groups[] = {
-	"mmc1-1bit-e", "mmc1-4bit-e",
+	"mmc1-1bit", "mmc1-4bit",
 };
-static const char *x1000_nemc_groups[] = {
-	"nemc-8bit-data", "nemc-16bit-data",
-	"nemc-addr", "nemc-rd-we", "nemc-wait",
+static const char *x1000_emc_groups[] = {
+	"emc-8bit-data", "emc-16bit-data",
+	"emc-addr", "emc-rd-we", "emc-wait",
 };
-static const char *x1000_cs1_groups[] = { "nemc-cs1", };
-static const char *x1000_cs2_groups[] = { "nemc-cs2", };
+static const char *x1000_cs1_groups[] = { "emc-cs1", };
+static const char *x1000_cs2_groups[] = { "emc-cs2", };
 static const char *x1000_i2c0_groups[] = { "i2c0-data", };
 static const char *x1000_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
 static const char *x1000_i2c2_groups[] = { "i2c2-data", };
@@ -1187,11 +1254,13 @@ static const struct function_desc x1000_functions[] = {
 	{ "uart0", x1000_uart0_groups, ARRAY_SIZE(x1000_uart0_groups), },
 	{ "uart1", x1000_uart1_groups, ARRAY_SIZE(x1000_uart1_groups), },
 	{ "uart2", x1000_uart2_groups, ARRAY_SIZE(x1000_uart2_groups), },
+	{ "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
+	{ "ssi", x1000_ssi_groups, ARRAY_SIZE(x1000_ssi_groups), },
 	{ "mmc0", x1000_mmc0_groups, ARRAY_SIZE(x1000_mmc0_groups), },
 	{ "mmc1", x1000_mmc1_groups, ARRAY_SIZE(x1000_mmc1_groups), },
-	{ "nemc", x1000_nemc_groups, ARRAY_SIZE(x1000_nemc_groups), },
-	{ "nemc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
-	{ "nemc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
+	{ "emc", x1000_emc_groups, ARRAY_SIZE(x1000_emc_groups), },
+	{ "emc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
+	{ "emc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
 	{ "i2c0", x1000_i2c0_groups, ARRAY_SIZE(x1000_i2c0_groups), },
 	{ "i2c1", x1000_i2c1_groups, ARRAY_SIZE(x1000_i2c1_groups), },
 	{ "i2c2", x1000_i2c2_groups, ARRAY_SIZE(x1000_i2c2_groups), },
@@ -1207,16 +1276,8 @@ static const struct function_desc x1000_functions[] = {
 
 static const struct ingenic_chip_info x1000_chip_info = {
 	.num_chips = 4,
-	.groups = x1000_groups,
-	.num_groups = ARRAY_SIZE(x1000_groups),
-	.functions = x1000_functions,
-	.num_functions = ARRAY_SIZE(x1000_functions),
-	.pull_ups = x1000_pull_ups,
-	.pull_downs = x1000_pull_downs,
-};
-
-static const struct ingenic_chip_info x1000e_chip_info = {
-	.num_chips = 4,
+	.reg_offset = 0x100,
+	.version = ID_X1000,
 	.groups = x1000_groups,
 	.num_groups = ARRAY_SIZE(x1000_groups),
 	.functions = x1000_functions,
@@ -1229,11 +1290,11 @@ static int x1500_uart0_data_pins[] = { 0x4a, 0x4b, };
 static int x1500_uart0_hwflow_pins[] = { 0x4c, 0x4d, };
 static int x1500_uart1_data_a_pins[] = { 0x04, 0x05, };
 static int x1500_uart1_data_d_pins[] = { 0x62, 0x63, };
-static int x1500_uart1_hwflow_d_pins[] = { 0x64, 0x65, };
+static int x1500_uart1_hwflow_pins[] = { 0x64, 0x65, };
 static int x1500_uart2_data_a_pins[] = { 0x02, 0x03, };
 static int x1500_uart2_data_d_pins[] = { 0x65, 0x64, };
-static int x1500_mmc0_1bit_pins[] = { 0x18, 0x19, 0x17, };
-static int x1500_mmc0_4bit_pins[] = { 0x16, 0x15, 0x14, };
+static int x1500_mmc_1bit_pins[] = { 0x18, 0x19, 0x17, };
+static int x1500_mmc_4bit_pins[] = { 0x16, 0x15, 0x14, };
 static int x1500_i2c0_pins[] = { 0x38, 0x37, };
 static int x1500_i2c1_a_pins[] = { 0x01, 0x00, };
 static int x1500_i2c1_c_pins[] = { 0x5b, 0x5a, };
@@ -1252,11 +1313,11 @@ static int x1500_uart0_data_funcs[] = { 0, 0, };
 static int x1500_uart0_hwflow_funcs[] = { 0, 0, };
 static int x1500_uart1_data_a_funcs[] = { 2, 2, };
 static int x1500_uart1_data_d_funcs[] = { 1, 1, };
-static int x1500_uart1_hwflow_d_funcs[] = { 1, 1, };
+static int x1500_uart1_hwflow_funcs[] = { 1, 1, };
 static int x1500_uart2_data_a_funcs[] = { 2, 2, };
 static int x1500_uart2_data_d_funcs[] = { 0, 0, };
-static int x1500_mmc0_1bit_funcs[] = { 1, 1, 1, };
-static int x1500_mmc0_4bit_funcs[] = { 1, 1, 1, };
+static int x1500_mmc_1bit_funcs[] = { 1, 1, 1, };
+static int x1500_mmc_4bit_funcs[] = { 1, 1, 1, };
 static int x1500_i2c0_funcs[] = { 0, 0, };
 static int x1500_i2c1_a_funcs[] = { 2, 2, };
 static int x1500_i2c1_c_funcs[] = { 0, 0, };
@@ -1273,11 +1334,12 @@ static const struct group_desc x1500_groups[] = {
 	INGENIC_PIN_GROUP("uart0-hwflow", x1500_uart0_hwflow),
 	INGENIC_PIN_GROUP("uart1-data-a", x1500_uart1_data_a),
 	INGENIC_PIN_GROUP("uart1-data-d", x1500_uart1_data_d),
-	INGENIC_PIN_GROUP("uart1-hwflow-d", x1500_uart1_hwflow_d),
+	INGENIC_PIN_GROUP("uart1-hwflow", x1500_uart1_hwflow),
 	INGENIC_PIN_GROUP("uart2-data-a", x1500_uart2_data_a),
 	INGENIC_PIN_GROUP("uart2-data-d", x1500_uart2_data_d),
-	INGENIC_PIN_GROUP("mmc0-1bit", x1500_mmc0_1bit),
-	INGENIC_PIN_GROUP("mmc0-4bit", x1500_mmc0_4bit),
+	INGENIC_PIN_GROUP("sfc", x1000_sfc),
+	INGENIC_PIN_GROUP("mmc-1bit", x1500_mmc_1bit),
+	INGENIC_PIN_GROUP("mmc-4bit", x1500_mmc_4bit),
 	INGENIC_PIN_GROUP("i2c0-data", x1500_i2c0),
 	INGENIC_PIN_GROUP("i2c1-data-a", x1500_i2c1_a),
 	INGENIC_PIN_GROUP("i2c1-data-c", x1500_i2c1_c),
@@ -1293,10 +1355,10 @@ static const struct group_desc x1500_groups[] = {
 
 static const char *x1500_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
 static const char *x1500_uart1_groups[] = {
-	"uart1-data-a", "uart1-data-d", "uart1-hwflow-d",
+	"uart1-data-a", "uart1-data-d", "uart1-hwflow",
 };
 static const char *x1500_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
-static const char *x1500_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *x1500_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
 static const char *x1500_i2c0_groups[] = { "i2c0-data", };
 static const char *x1500_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
 static const char *x1500_i2c2_groups[] = { "i2c2-data", };
@@ -1312,7 +1374,8 @@ static const struct function_desc x1500_functions[] = {
 	{ "uart0", x1500_uart0_groups, ARRAY_SIZE(x1500_uart0_groups), },
 	{ "uart1", x1500_uart1_groups, ARRAY_SIZE(x1500_uart1_groups), },
 	{ "uart2", x1500_uart2_groups, ARRAY_SIZE(x1500_uart2_groups), },
-	{ "mmc0", x1500_mmc0_groups, ARRAY_SIZE(x1500_mmc0_groups), },
+	{ "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
+	{ "mmc", x1500_mmc_groups, ARRAY_SIZE(x1500_mmc_groups), },
 	{ "i2c0", x1500_i2c0_groups, ARRAY_SIZE(x1500_i2c0_groups), },
 	{ "i2c1", x1500_i2c1_groups, ARRAY_SIZE(x1500_i2c1_groups), },
 	{ "i2c2", x1500_i2c2_groups, ARRAY_SIZE(x1500_i2c2_groups), },
@@ -1327,6 +1390,8 @@ static const struct function_desc x1500_functions[] = {
 
 static const struct ingenic_chip_info x1500_chip_info = {
 	.num_chips = 4,
+	.reg_offset = 0x100,
+	.version = ID_X1500,
 	.groups = x1500_groups,
 	.num_groups = ARRAY_SIZE(x1500_groups),
 	.functions = x1500_functions,
@@ -1335,6 +1400,222 @@ static const struct ingenic_chip_info x1500_chip_info = {
 	.pull_downs = x1000_pull_downs,
 };
 
+static const u32 x1830_pull_ups[4] = {
+	0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
+};
+
+static const u32 x1830_pull_downs[4] = {
+	0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
+};
+
+static int x1830_uart0_data_pins[] = { 0x33, 0x36, };
+static int x1830_uart0_hwflow_pins[] = { 0x34, 0x35, };
+static int x1830_uart1_data_pins[] = { 0x38, 0x37, };
+static int x1830_sfc_pins[] = { 0x17, 0x18, 0x1a, 0x19, 0x1b, 0x1c, };
+static int x1830_ssi0_dt_pins[] = { 0x4c, };
+static int x1830_ssi0_dr_pins[] = { 0x4b, };
+static int x1830_ssi0_clk_pins[] = { 0x4f, };
+static int x1830_ssi0_gpc_pins[] = { 0x4d, };
+static int x1830_ssi0_ce0_pins[] = { 0x50, };
+static int x1830_ssi0_ce1_pins[] = { 0x4e, };
+static int x1830_ssi1_dt_c_pins[] = { 0x53, };
+static int x1830_ssi1_dr_c_pins[] = { 0x54, };
+static int x1830_ssi1_clk_c_pins[] = { 0x57, };
+static int x1830_ssi1_gpc_c_pins[] = { 0x55, };
+static int x1830_ssi1_ce0_c_pins[] = { 0x58, };
+static int x1830_ssi1_ce1_c_pins[] = { 0x56, };
+static int x1830_ssi1_dt_d_pins[] = { 0x62, };
+static int x1830_ssi1_dr_d_pins[] = { 0x63, };
+static int x1830_ssi1_clk_d_pins[] = { 0x66, };
+static int x1830_ssi1_gpc_d_pins[] = { 0x64, };
+static int x1830_ssi1_ce0_d_pins[] = { 0x67, };
+static int x1830_ssi1_ce1_d_pins[] = { 0x65, };
+static int x1830_mmc0_1bit_pins[] = { 0x24, 0x25, 0x20, };
+static int x1830_mmc0_4bit_pins[] = { 0x21, 0x22, 0x23, };
+static int x1830_mmc1_1bit_pins[] = { 0x42, 0x43, 0x44, };
+static int x1830_mmc1_4bit_pins[] = { 0x45, 0x46, 0x47, };
+static int x1830_i2c0_pins[] = { 0x0c, 0x0d, };
+static int x1830_i2c1_pins[] = { 0x39, 0x3a, };
+static int x1830_i2c2_pins[] = { 0x5b, 0x5c, };
+static int x1830_pwm_pwm0_b_pins[] = { 0x31, };
+static int x1830_pwm_pwm0_c_pins[] = { 0x4b, };
+static int x1830_pwm_pwm1_b_pins[] = { 0x32, };
+static int x1830_pwm_pwm1_c_pins[] = { 0x4c, };
+static int x1830_pwm_pwm2_c_8_pins[] = { 0x48, };
+static int x1830_pwm_pwm2_c_13_pins[] = { 0x4d, };
+static int x1830_pwm_pwm3_c_9_pins[] = { 0x49, };
+static int x1830_pwm_pwm3_c_14_pins[] = { 0x4e, };
+static int x1830_pwm_pwm4_c_15_pins[] = { 0x4f, };
+static int x1830_pwm_pwm4_c_25_pins[] = { 0x59, };
+static int x1830_pwm_pwm5_c_16_pins[] = { 0x50, };
+static int x1830_pwm_pwm5_c_26_pins[] = { 0x5a, };
+static int x1830_pwm_pwm6_c_17_pins[] = { 0x51, };
+static int x1830_pwm_pwm6_c_27_pins[] = { 0x5b, };
+static int x1830_pwm_pwm7_c_18_pins[] = { 0x52, };
+static int x1830_pwm_pwm7_c_28_pins[] = { 0x5c, };
+static int x1830_mac_pins[] = {
+	0x29, 0x30, 0x2f, 0x28, 0x2e, 0x2d, 0x2a, 0x2b, 0x26, 0x27,
+};
+
+static int x1830_uart0_data_funcs[] = { 0, 0, };
+static int x1830_uart0_hwflow_funcs[] = { 0, 0, };
+static int x1830_uart1_data_funcs[] = { 0, 0, };
+static int x1830_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int x1830_ssi0_dt_funcs[] = { 0, };
+static int x1830_ssi0_dr_funcs[] = { 0, };
+static int x1830_ssi0_clk_funcs[] = { 0, };
+static int x1830_ssi0_gpc_funcs[] = { 0, };
+static int x1830_ssi0_ce0_funcs[] = { 0, };
+static int x1830_ssi0_ce1_funcs[] = { 0, };
+static int x1830_ssi1_dt_c_funcs[] = { 1, };
+static int x1830_ssi1_dr_c_funcs[] = { 1, };
+static int x1830_ssi1_clk_c_funcs[] = { 1, };
+static int x1830_ssi1_gpc_c_funcs[] = { 1, };
+static int x1830_ssi1_ce0_c_funcs[] = { 1, };
+static int x1830_ssi1_ce1_c_funcs[] = { 1, };
+static int x1830_ssi1_dt_d_funcs[] = { 2, };
+static int x1830_ssi1_dr_d_funcs[] = { 2, };
+static int x1830_ssi1_clk_d_funcs[] = { 2, };
+static int x1830_ssi1_gpc_d_funcs[] = { 2, };
+static int x1830_ssi1_ce0_d_funcs[] = { 2, };
+static int x1830_ssi1_ce1_d_funcs[] = { 2, };
+static int x1830_mmc0_1bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc0_4bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc1_1bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc1_4bit_funcs[] = { 0, 0, 0, };
+static int x1830_i2c0_funcs[] = { 1, 1, };
+static int x1830_i2c1_funcs[] = { 0, 0, };
+static int x1830_i2c2_funcs[] = { 1, 1, };
+static int x1830_pwm_pwm0_b_funcs[] = { 0, };
+static int x1830_pwm_pwm0_c_funcs[] = { 1, };
+static int x1830_pwm_pwm1_b_funcs[] = { 0, };
+static int x1830_pwm_pwm1_c_funcs[] = { 1, };
+static int x1830_pwm_pwm2_c_8_funcs[] = { 0, };
+static int x1830_pwm_pwm2_c_13_funcs[] = { 1, };
+static int x1830_pwm_pwm3_c_9_funcs[] = { 0, };
+static int x1830_pwm_pwm3_c_14_funcs[] = { 1, };
+static int x1830_pwm_pwm4_c_15_funcs[] = { 1, };
+static int x1830_pwm_pwm4_c_25_funcs[] = { 0, };
+static int x1830_pwm_pwm5_c_16_funcs[] = { 1, };
+static int x1830_pwm_pwm5_c_26_funcs[] = { 0, };
+static int x1830_pwm_pwm6_c_17_funcs[] = { 1, };
+static int x1830_pwm_pwm6_c_27_funcs[] = { 0, };
+static int x1830_pwm_pwm7_c_18_funcs[] = { 1, };
+static int x1830_pwm_pwm7_c_28_funcs[] = { 0, };
+static int x1830_mac_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+
+static const struct group_desc x1830_groups[] = {
+	INGENIC_PIN_GROUP("uart0-data", x1830_uart0_data),
+	INGENIC_PIN_GROUP("uart0-hwflow", x1830_uart0_hwflow),
+	INGENIC_PIN_GROUP("uart1-data", x1830_uart1_data),
+	INGENIC_PIN_GROUP("sfc", x1830_sfc),
+	INGENIC_PIN_GROUP("ssi0-dt", x1830_ssi0_dt),
+	INGENIC_PIN_GROUP("ssi0-dr", x1830_ssi0_dr),
+	INGENIC_PIN_GROUP("ssi0-clk", x1830_ssi0_clk),
+	INGENIC_PIN_GROUP("ssi0-gpc", x1830_ssi0_gpc),
+	INGENIC_PIN_GROUP("ssi0-ce0", x1830_ssi0_ce0),
+	INGENIC_PIN_GROUP("ssi0-ce1", x1830_ssi0_ce1),
+	INGENIC_PIN_GROUP("ssi1-dt-c", x1830_ssi1_dt_c),
+	INGENIC_PIN_GROUP("ssi1-dr-c", x1830_ssi1_dr_c),
+	INGENIC_PIN_GROUP("ssi1-clk-c", x1830_ssi1_clk_c),
+	INGENIC_PIN_GROUP("ssi1-gpc-c", x1830_ssi1_gpc_c),
+	INGENIC_PIN_GROUP("ssi1-ce0-c", x1830_ssi1_ce0_c),
+	INGENIC_PIN_GROUP("ssi1-ce1-c", x1830_ssi1_ce1_c),
+	INGENIC_PIN_GROUP("ssi1-dt-d", x1830_ssi1_dt_d),
+	INGENIC_PIN_GROUP("ssi1-dr-d", x1830_ssi1_dr_d),
+	INGENIC_PIN_GROUP("ssi1-clk-d", x1830_ssi1_clk_d),
+	INGENIC_PIN_GROUP("ssi1-gpc-d", x1830_ssi1_gpc_d),
+	INGENIC_PIN_GROUP("ssi1-ce0-d", x1830_ssi1_ce0_d),
+	INGENIC_PIN_GROUP("ssi1-ce1-d", x1830_ssi1_ce1_d),
+	INGENIC_PIN_GROUP("mmc0-1bit", x1830_mmc0_1bit),
+	INGENIC_PIN_GROUP("mmc0-4bit", x1830_mmc0_4bit),
+	INGENIC_PIN_GROUP("mmc1-1bit", x1830_mmc1_1bit),
+	INGENIC_PIN_GROUP("mmc1-4bit", x1830_mmc1_4bit),
+	INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0),
+	INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1),
+	INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2),
+	INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b),
+	INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c),
+	INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b),
+	INGENIC_PIN_GROUP("pwm1-c", x1830_pwm_pwm1_c),
+	INGENIC_PIN_GROUP("pwm2-c-8", x1830_pwm_pwm2_c_8),
+	INGENIC_PIN_GROUP("pwm2-c-13", x1830_pwm_pwm2_c_13),
+	INGENIC_PIN_GROUP("pwm3-c-9", x1830_pwm_pwm3_c_9),
+	INGENIC_PIN_GROUP("pwm3-c-14", x1830_pwm_pwm3_c_14),
+	INGENIC_PIN_GROUP("pwm4-c-15", x1830_pwm_pwm4_c_15),
+	INGENIC_PIN_GROUP("pwm4-c-25", x1830_pwm_pwm4_c_25),
+	INGENIC_PIN_GROUP("pwm5-c-16", x1830_pwm_pwm5_c_16),
+	INGENIC_PIN_GROUP("pwm5-c-26", x1830_pwm_pwm5_c_26),
+	INGENIC_PIN_GROUP("pwm6-c-17", x1830_pwm_pwm6_c_17),
+	INGENIC_PIN_GROUP("pwm6-c-27", x1830_pwm_pwm6_c_27),
+	INGENIC_PIN_GROUP("pwm7-c-18", x1830_pwm_pwm7_c_18),
+	INGENIC_PIN_GROUP("pwm7-c-28", x1830_pwm_pwm7_c_28),
+	INGENIC_PIN_GROUP("mac", x1830_mac),
+};
+
+static const char *x1830_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *x1830_uart1_groups[] = { "uart1-data", };
+static const char *x1830_sfc_groups[] = { "sfc", };
+static const char *x1830_ssi0_groups[] = {
+	"ssi0-dt", "ssi0-dr", "ssi0-clk", "ssi0-gpc", "ssi0-ce0", "ssi0-ce1",
+};
+static const char *x1830_ssi1_groups[] = {
+	"ssi1-dt-c", "ssi1-dt-d",
+	"ssi1-dr-c", "ssi1-dr-d",
+	"ssi1-clk-c", "ssi1-clk-d",
+	"ssi1-gpc-c", "ssi1-gpc-d",
+	"ssi1-ce0-c", "ssi1-ce0-d",
+	"ssi1-ce1-c", "ssi1-ce1-d",
+};
+static const char *x1830_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *x1830_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
+static const char *x1830_i2c0_groups[] = { "i2c0-data", };
+static const char *x1830_i2c1_groups[] = { "i2c1-data", };
+static const char *x1830_i2c2_groups[] = { "i2c2-data", };
+static const char *x1830_pwm0_groups[] = { "pwm0-b", "pwm0-c", };
+static const char *x1830_pwm1_groups[] = { "pwm1-b", "pwm1-c", };
+static const char *x1830_pwm2_groups[] = { "pwm2-c-8", "pwm2-c-13", };
+static const char *x1830_pwm3_groups[] = { "pwm3-c-9", "pwm3-c-14", };
+static const char *x1830_pwm4_groups[] = { "pwm4-c-15", "pwm4-c-25", };
+static const char *x1830_pwm5_groups[] = { "pwm5-c-16", "pwm5-c-26", };
+static const char *x1830_pwm6_groups[] = { "pwm6-c-17", "pwm6-c-27", };
+static const char *x1830_pwm7_groups[] = { "pwm7-c-18", "pwm7-c-28", };
+static const char *x1830_mac_groups[] = { "mac", };
+
+static const struct function_desc x1830_functions[] = {
+	{ "uart0", x1830_uart0_groups, ARRAY_SIZE(x1830_uart0_groups), },
+	{ "uart1", x1830_uart1_groups, ARRAY_SIZE(x1830_uart1_groups), },
+	{ "sfc", x1830_sfc_groups, ARRAY_SIZE(x1830_sfc_groups), },
+	{ "ssi0", x1830_ssi0_groups, ARRAY_SIZE(x1830_ssi0_groups), },
+	{ "ssi1", x1830_ssi1_groups, ARRAY_SIZE(x1830_ssi1_groups), },
+	{ "mmc0", x1830_mmc0_groups, ARRAY_SIZE(x1830_mmc0_groups), },
+	{ "mmc1", x1830_mmc1_groups, ARRAY_SIZE(x1830_mmc1_groups), },
+	{ "i2c0", x1830_i2c0_groups, ARRAY_SIZE(x1830_i2c0_groups), },
+	{ "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
+	{ "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
+	{ "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
+	{ "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
+	{ "pwm2", x1830_pwm2_groups, ARRAY_SIZE(x1830_pwm2_groups), },
+	{ "pwm3", x1830_pwm3_groups, ARRAY_SIZE(x1830_pwm3_groups), },
+	{ "pwm4", x1830_pwm4_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+	{ "pwm5", x1830_pwm5_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+	{ "pwm6", x1830_pwm6_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+	{ "pwm7", x1830_pwm7_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+	{ "mac", x1830_mac_groups, ARRAY_SIZE(x1830_mac_groups), },
+};
+
+static const struct ingenic_chip_info x1830_chip_info = {
+	.num_chips = 4,
+	.reg_offset = 0x1000,
+	.version = ID_X1830,
+	.groups = x1830_groups,
+	.num_groups = ARRAY_SIZE(x1830_groups),
+	.functions = x1830_functions,
+	.num_functions = ARRAY_SIZE(x1830_functions),
+	.pull_ups = x1830_pull_ups,
+	.pull_downs = x1830_pull_downs,
+};
+
 static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
 {
 	unsigned int val;
@@ -1363,12 +1644,14 @@ static void ingenic_gpio_shadow_set_bit(struct ingenic_gpio_chip *jzgc,
 	else
 		reg = REG_CLEAR(reg);
 
-	regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_BASE + reg, BIT(offset));
+	regmap_write(jzgc->jzpc->map, REG_PZ_BASE(
+			jzgc->jzpc->info->reg_offset) + reg, BIT(offset));
 }
 
 static void ingenic_gpio_shadow_set_bit_load(struct ingenic_gpio_chip *jzgc)
 {
-	regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_GID2LD,
+	regmap_write(jzgc->jzpc->map, REG_PZ_GID2LD(
+			jzgc->jzpc->info->reg_offset),
 			jzgc->gc.base / PINS_PER_GPIO_CHIP);
 }
 
@@ -1383,7 +1666,7 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
 static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
 				   u8 offset, int value)
 {
-	if (jzgc->jzpc->version >= ID_JZ4760)
+	if (jzgc->jzpc->info->version >= ID_JZ4760)
 		ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
 	else
 		ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
@@ -1393,8 +1676,27 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
 		u8 offset, unsigned int type)
 {
 	u8 reg1, reg2;
+	bool val1, val2;
 
-	if (jzgc->jzpc->version >= ID_JZ4760) {
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		val1 = val2 = true;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		val1 = false;
+		val2 = true;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		val1 = true;
+		val2 = false;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+	default:
+		val1 = val2 = false;
+		break;
+	}
+
+	if (jzgc->jzpc->info->version >= ID_JZ4760) {
 		reg1 = JZ4760_GPIO_PAT1;
 		reg2 = JZ4760_GPIO_PAT0;
 	} else {
@@ -1402,48 +1704,13 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
 		reg2 = JZ4740_GPIO_DIR;
 	}
 
-	switch (type) {
-	case IRQ_TYPE_EDGE_RISING:
-		if (jzgc->jzpc->version >= ID_X1000) {
-			ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, true);
-			ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, true);
-			ingenic_gpio_shadow_set_bit_load(jzgc);
-		} else {
-			ingenic_gpio_set_bit(jzgc, reg2, offset, true);
-			ingenic_gpio_set_bit(jzgc, reg1, offset, true);
-		}
-		break;
-	case IRQ_TYPE_EDGE_FALLING:
-		if (jzgc->jzpc->version >= ID_X1000) {
-			ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, false);
-			ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, true);
-			ingenic_gpio_shadow_set_bit_load(jzgc);
-		} else {
-			ingenic_gpio_set_bit(jzgc, reg2, offset, false);
-			ingenic_gpio_set_bit(jzgc, reg1, offset, true);
-		}
-		break;
-	case IRQ_TYPE_LEVEL_HIGH:
-		if (jzgc->jzpc->version >= ID_X1000) {
-			ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, true);
-			ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, false);
-			ingenic_gpio_shadow_set_bit_load(jzgc);
-		} else {
-			ingenic_gpio_set_bit(jzgc, reg2, offset, true);
-			ingenic_gpio_set_bit(jzgc, reg1, offset, false);
-		}
-		break;
-	case IRQ_TYPE_LEVEL_LOW:
-	default:
-		if (jzgc->jzpc->version >= ID_X1000) {
-			ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, false);
-			ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, false);
-			ingenic_gpio_shadow_set_bit_load(jzgc);
-		} else {
-			ingenic_gpio_set_bit(jzgc, reg2, offset, false);
-			ingenic_gpio_set_bit(jzgc, reg1, offset, false);
-		}
-		break;
+	if (jzgc->jzpc->info->version >= ID_X1000) {
+		ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
+		ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
+		ingenic_gpio_shadow_set_bit_load(jzgc);
+	} else {
+		ingenic_gpio_set_bit(jzgc, reg2, offset, val1);
+		ingenic_gpio_set_bit(jzgc, reg1, offset, val2);
 	}
 }
 
@@ -1469,7 +1736,7 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
 	struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
 	int irq = irqd->hwirq;
 
-	if (jzgc->jzpc->version >= ID_JZ4760)
+	if (jzgc->jzpc->info->version >= ID_JZ4760)
 		ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
 	else
 		ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
@@ -1485,7 +1752,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
 
 	ingenic_gpio_irq_mask(irqd);
 
-	if (jzgc->jzpc->version >= ID_JZ4760)
+	if (jzgc->jzpc->info->version >= ID_JZ4760)
 		ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
 	else
 		ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
@@ -1510,7 +1777,7 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
 			irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
 	}
 
-	if (jzgc->jzpc->version >= ID_JZ4760)
+	if (jzgc->jzpc->info->version >= ID_JZ4760)
 		ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
 	else
 		ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
@@ -1567,7 +1834,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
 
 	chained_irq_enter(irq_chip, desc);
 
-	if (jzgc->jzpc->version >= ID_JZ4760)
+	if (jzgc->jzpc->info->version >= ID_JZ4760)
 		flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
 	else
 		flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
@@ -1611,7 +1878,7 @@ static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
 	unsigned int idx = pin % PINS_PER_GPIO_CHIP;
 	unsigned int offt = pin / PINS_PER_GPIO_CHIP;
 
-	regmap_write(jzpc->map, offt * 0x100 +
+	regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
 			(set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
 }
 
@@ -1620,14 +1887,15 @@ static inline void ingenic_shadow_config_pin(struct ingenic_pinctrl *jzpc,
 {
 	unsigned int idx = pin % PINS_PER_GPIO_CHIP;
 
-	regmap_write(jzpc->map, X1000_GPIO_PZ_BASE +
+	regmap_write(jzpc->map, REG_PZ_BASE(jzpc->info->reg_offset) +
 			(set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
 }
 
 static inline void ingenic_shadow_config_pin_load(struct ingenic_pinctrl *jzpc,
 		unsigned int pin)
 {
-	regmap_write(jzpc->map, X1000_GPIO_PZ_GID2LD, pin / PINS_PER_GPIO_CHIP);
+	regmap_write(jzpc->map, REG_PZ_GID2LD(jzpc->info->reg_offset),
+			pin / PINS_PER_GPIO_CHIP);
 }
 
 static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
@@ -1637,7 +1905,7 @@ static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
 	unsigned int offt = pin / PINS_PER_GPIO_CHIP;
 	unsigned int val;
 
-	regmap_read(jzpc->map, offt * 0x100 + reg, &val);
+	regmap_read(jzpc->map, offt * jzpc->info->reg_offset + reg, &val);
 
 	return val & BIT(idx);
 }
@@ -1648,7 +1916,7 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
 	struct ingenic_pinctrl *jzpc = jzgc->jzpc;
 	unsigned int pin = gc->base + offset;
 
-	if (jzpc->version >= ID_JZ4760)
+	if (jzpc->info->version >= ID_JZ4760)
 		return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
 
 	if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
@@ -1674,13 +1942,13 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
 	dev_dbg(jzpc->dev, "set pin P%c%u to function %u\n",
 			'A' + offt, idx, func);
 
-	if (jzpc->version >= ID_X1000) {
+	if (jzpc->info->version >= ID_X1000) {
 		ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
 		ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
 		ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
 		ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
 		ingenic_shadow_config_pin_load(jzpc, pin);
-	} else if (jzpc->version >= ID_JZ4760) {
+	} else if (jzpc->info->version >= ID_JZ4760) {
 		ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
 		ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
 		ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
@@ -1733,12 +2001,12 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
 	dev_dbg(pctldev->dev, "set pin P%c%u to %sput\n",
 			'A' + offt, idx, input ? "in" : "out");
 
-	if (jzpc->version >= ID_X1000) {
+	if (jzpc->info->version >= ID_X1000) {
 		ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
 		ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
 		ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
 		ingenic_shadow_config_pin_load(jzpc, pin);
-	} else if (jzpc->version >= ID_JZ4760) {
+	} else if (jzpc->info->version >= ID_JZ4760) {
 		ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
 		ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
 		ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
@@ -1768,7 +2036,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
 	unsigned int offt = pin / PINS_PER_GPIO_CHIP;
 	bool pull;
 
-	if (jzpc->version >= ID_JZ4760)
+	if (jzpc->info->version >= ID_JZ4760)
 		pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
 	else
 		pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -1798,18 +2066,37 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
 }
 
 static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
-		unsigned int pin, bool enabled)
+		unsigned int pin, unsigned int bias)
 {
-	if (jzpc->version >= ID_JZ4760)
-		ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !enabled);
-	else
-		ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
+	if (jzpc->info->version >= ID_X1830) {
+		unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+		unsigned int half = PINS_PER_GPIO_CHIP / 2;
+		unsigned int idxh = pin % half * 2;
+		unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+
+		if (idx < half) {
+			regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+					REG_CLEAR(X1830_GPIO_PEL), 3 << idxh);
+			regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+					REG_SET(X1830_GPIO_PEL), bias << idxh);
+		} else {
+			regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+					REG_CLEAR(X1830_GPIO_PEH), 3 << idxh);
+			regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+					REG_SET(X1830_GPIO_PEH), bias << idxh);
+		}
+
+	} else if (jzpc->info->version >= ID_JZ4760) {
+		ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+	} else {
+		ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
+	}
 }
 
 static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
 				     unsigned int pin, bool high)
 {
-	if (jzpc->version >= ID_JZ4760)
+	if (jzpc->info->version >= ID_JZ4760)
 		ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
 	else
 		ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
@@ -1843,7 +2130,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
 		case PIN_CONFIG_BIAS_DISABLE:
 			dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
 					'A' + offt, idx);
-			ingenic_set_bias(jzpc, pin, false);
+			ingenic_set_bias(jzpc, pin, GPIO_PULL_DIS);
 			break;
 
 		case PIN_CONFIG_BIAS_PULL_UP:
@@ -1851,7 +2138,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
 				return -EINVAL;
 			dev_dbg(jzpc->dev, "set pull-up for pin P%c%u\n",
 					'A' + offt, idx);
-			ingenic_set_bias(jzpc, pin, true);
+			ingenic_set_bias(jzpc, pin, GPIO_PULL_UP);
 			break;
 
 		case PIN_CONFIG_BIAS_PULL_DOWN:
@@ -1859,7 +2146,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
 				return -EINVAL;
 			dev_dbg(jzpc->dev, "set pull-down for pin P%c%u\n",
 					'A' + offt, idx);
-			ingenic_set_bias(jzpc, pin, true);
+			ingenic_set_bias(jzpc, pin, GPIO_PULL_DOWN);
 			break;
 
 		case PIN_CONFIG_OUTPUT:
@@ -1939,25 +2226,13 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
 	.reg_stride = 4,
 };
 
-static const struct of_device_id ingenic_pinctrl_of_match[] = {
-	{ .compatible = "ingenic,jz4740-pinctrl", .data = (void *) ID_JZ4740 },
-	{ .compatible = "ingenic,jz4725b-pinctrl", .data = (void *)ID_JZ4725B },
-	{ .compatible = "ingenic,jz4760-pinctrl", .data = (void *) ID_JZ4760 },
-	{ .compatible = "ingenic,jz4760b-pinctrl", .data = (void *) ID_JZ4760B },
-	{ .compatible = "ingenic,jz4770-pinctrl", .data = (void *) ID_JZ4770 },
-	{ .compatible = "ingenic,jz4780-pinctrl", .data = (void *) ID_JZ4780 },
-	{ .compatible = "ingenic,x1000-pinctrl", .data = (void *) ID_X1000 },
-	{ .compatible = "ingenic,x1000e-pinctrl", .data = (void *) ID_X1000E },
-	{ .compatible = "ingenic,x1500-pinctrl", .data = (void *) ID_X1500 },
-	{},
-};
-
 static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
 	{ .compatible = "ingenic,jz4740-gpio", },
 	{ .compatible = "ingenic,jz4760-gpio", },
 	{ .compatible = "ingenic,jz4770-gpio", },
 	{ .compatible = "ingenic,jz4780-gpio", },
 	{ .compatible = "ingenic,x1000-gpio", },
+	{ .compatible = "ingenic,x1830-gpio", },
 	{},
 };
 
@@ -1981,7 +2256,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
 		return -ENOMEM;
 
 	jzgc->jzpc = jzpc;
-	jzgc->reg_base = bank * 0x100;
+	jzgc->reg_base = bank * jzpc->info->reg_offset;
 
 	jzgc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "GPIO%c", 'A' + bank);
 	if (!jzgc->gc.label)
@@ -2048,9 +2323,6 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
 	struct ingenic_pinctrl *jzpc;
 	struct pinctrl_desc *pctl_desc;
 	void __iomem *base;
-	const struct platform_device_id *id = platform_get_device_id(pdev);
-	const struct of_device_id *of_id = of_match_device(
-			ingenic_pinctrl_of_match, dev);
 	const struct ingenic_chip_info *chip_info;
 	struct device_node *node;
 	unsigned int i;
@@ -2060,8 +2332,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
 	if (!jzpc)
 		return -ENOMEM;
 
-	base = devm_ioremap_resource(dev,
-			platform_get_resource(pdev, IORESOURCE_MEM, 0));
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
@@ -2073,31 +2344,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
 	}
 
 	jzpc->dev = dev;
-
-	if (of_id)
-		jzpc->version = (enum jz_version)of_id->data;
-	else
-		jzpc->version = (enum jz_version)id->driver_data;
-
-	if (jzpc->version >= ID_X1500)
-		chip_info = &x1500_chip_info;
-	else if (jzpc->version >= ID_X1000E)
-		chip_info = &x1000e_chip_info;
-	else if (jzpc->version >= ID_X1000)
-		chip_info = &x1000_chip_info;
-	else if (jzpc->version >= ID_JZ4780)
-		chip_info = &jz4780_chip_info;
-	else if (jzpc->version >= ID_JZ4770)
-		chip_info = &jz4770_chip_info;
-	else if (jzpc->version >= ID_JZ4760B)
-		chip_info = &jz4760b_chip_info;
-	else if (jzpc->version >= ID_JZ4760)
-		chip_info = &jz4760_chip_info;
-	else if (jzpc->version >= ID_JZ4725B)
-		chip_info = &jz4725b_chip_info;
-	else
-		chip_info = &jz4740_chip_info;
-	jzpc->info = chip_info;
+	jzpc->info = chip_info = of_device_get_match_data(dev);
 
 	pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
 	if (!pctl_desc)
@@ -2166,25 +2413,25 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
 	return 0;
 }
 
-static const struct platform_device_id ingenic_pinctrl_ids[] = {
-	{ "jz4740-pinctrl", ID_JZ4740 },
-	{ "jz4725b-pinctrl", ID_JZ4725B },
-	{ "jz4760-pinctrl", ID_JZ4760 },
-	{ "jz4760b-pinctrl", ID_JZ4760B },
-	{ "jz4770-pinctrl", ID_JZ4770 },
-	{ "jz4780-pinctrl", ID_JZ4780 },
-	{ "x1000-pinctrl", ID_X1000 },
-	{ "x1000e-pinctrl", ID_X1000E },
-	{ "x1500-pinctrl", ID_X1500 },
+static const struct of_device_id ingenic_pinctrl_of_match[] = {
+	{ .compatible = "ingenic,jz4740-pinctrl", .data = &jz4740_chip_info },
+	{ .compatible = "ingenic,jz4725b-pinctrl", .data = &jz4725b_chip_info },
+	{ .compatible = "ingenic,jz4760-pinctrl", .data = &jz4760_chip_info },
+	{ .compatible = "ingenic,jz4760b-pinctrl", .data = &jz4760_chip_info },
+	{ .compatible = "ingenic,jz4770-pinctrl", .data = &jz4770_chip_info },
+	{ .compatible = "ingenic,jz4780-pinctrl", .data = &jz4780_chip_info },
+	{ .compatible = "ingenic,x1000-pinctrl", .data = &x1000_chip_info },
+	{ .compatible = "ingenic,x1000e-pinctrl", .data = &x1000_chip_info },
+	{ .compatible = "ingenic,x1500-pinctrl", .data = &x1500_chip_info },
+	{ .compatible = "ingenic,x1830-pinctrl", .data = &x1830_chip_info },
 	{},
 };
 
 static struct platform_driver ingenic_pinctrl_driver = {
 	.driver = {
 		.name = "pinctrl-ingenic",
-		.of_match_table = of_match_ptr(ingenic_pinctrl_of_match),
+		.of_match_table = ingenic_pinctrl_of_match,
 	},
-	.id_table = ingenic_pinctrl_ids,
 };
 
 static int __init ingenic_pinctrl_drv_register(void)
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 215db22..617585b 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -1229,8 +1229,8 @@ static int rza1_parse_gpiochip(struct rza1_pinctrl *rza1_pctl,
 
 	pinctrl_add_gpio_range(rza1_pctl->pctl, range);
 
-	dev_info(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
-		 chip->label, chip->ngpio);
+	dev_dbg(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
+		chip->label, chip->ngpio);
 
 	return 0;
 }
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index 21c370d..bddf2c5 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -10,6 +10,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/module.h>
+#include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinconf-generic.h>
 #include <linux/pinctrl/pinmux.h>
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 5d6f9f6..9a8daa2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -960,7 +960,6 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
-	unsigned long flags;
 
 	/*
 	 * While they may not wake up when the TLMM is powered off,
@@ -971,12 +970,8 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 	if (d->parent_data)
 		irq_chip_set_wake_parent(d, on);
 
-	raw_spin_lock_irqsave(&pctrl->lock, flags);
-
 	irq_set_irq_wake(pctrl->irq, on);
 
-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
 	return 0;
 }
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index e1259ce..183f0b2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -589,7 +589,7 @@ static const char * const blsp_uart5_groups[] = {
 static const char * const qdss_traceclk_a_groups[] = {
 	"gpio46",
 };
-const char * const m_voc_groups[] = {
+static const char * const m_voc_groups[] = {
 	"gpio123", "gpio124",
 };
 static const char * const blsp_i2c5_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index d6cfad7..1b6465a 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -456,14 +456,18 @@ enum sc7180_functions {
 	msm_mux_qspi_data,
 	msm_mux_qup00,
 	msm_mux_qup01,
-	msm_mux_qup02,
+	msm_mux_qup02_i2c,
+	msm_mux_qup02_uart,
 	msm_mux_qup03,
-	msm_mux_qup04,
+	msm_mux_qup04_i2c,
+	msm_mux_qup04_uart,
 	msm_mux_qup05,
 	msm_mux_qup10,
-	msm_mux_qup11,
+	msm_mux_qup11_i2c,
+	msm_mux_qup11_uart,
 	msm_mux_qup12,
-	msm_mux_qup13,
+	msm_mux_qup13_i2c,
+	msm_mux_qup13_uart,
 	msm_mux_qup14,
 	msm_mux_qup15,
 	msm_mux_sdc1_tb,
@@ -543,7 +547,10 @@ static const char * const sdc1_tb_groups[] = {
 static const char * const sdc2_tb_groups[] = {
 	"gpio5",
 };
-static const char * const qup11_groups[] = {
+static const char * const qup11_i2c_groups[] = {
+	"gpio6", "gpio7",
+};
+static const char * const qup11_uart_groups[] = {
 	"gpio6", "gpio7",
 };
 static const char * const ddr_bist_groups[] = {
@@ -593,7 +600,10 @@ static const char * const qdss_groups[] = {
 static const char * const pll_reset_groups[] = {
 	"gpio14",
 };
-static const char * const qup02_groups[] = {
+static const char * const qup02_i2c_groups[] = {
+	"gpio15", "gpio16",
+};
+static const char * const qup02_uart_groups[] = {
 	"gpio15", "gpio16",
 };
 static const char * const cci_i2c_groups[] = {
@@ -698,7 +708,10 @@ static const char * const wlan1_adc1_groups[] = {
 static const char * const atest_usb13_groups[] = {
 	"gpio44",
 };
-static const char * const qup13_groups[] = {
+static const char * const qup13_i2c_groups[] = {
+	"gpio46", "gpio47",
+};
+static const char * const qup13_uart_groups[] = {
 	"gpio46", "gpio47",
 };
 static const char * const gcc_gp1_groups[] = {
@@ -848,7 +861,10 @@ static const char * const usb_phy_groups[] = {
 static const char * const mss_lte_groups[] = {
 	"gpio108", "gpio109",
 };
-static const char * const qup04_groups[] = {
+static const char * const qup04_i2c_groups[] = {
+	"gpio115", "gpio116",
+};
+static const char * const qup04_uart_groups[] = {
 	"gpio115", "gpio116",
 };
 
@@ -929,14 +945,18 @@ static const struct msm_function sc7180_functions[] = {
 	FUNCTION(qspi_data),
 	FUNCTION(qup00),
 	FUNCTION(qup01),
-	FUNCTION(qup02),
+	FUNCTION(qup02_i2c),
+	FUNCTION(qup02_uart),
 	FUNCTION(qup03),
-	FUNCTION(qup04),
+	FUNCTION(qup04_i2c),
+	FUNCTION(qup04_uart),
 	FUNCTION(qup05),
 	FUNCTION(qup10),
-	FUNCTION(qup11),
+	FUNCTION(qup11_i2c),
+	FUNCTION(qup11_uart),
 	FUNCTION(qup12),
-	FUNCTION(qup13),
+	FUNCTION(qup13_i2c),
+	FUNCTION(qup13_uart),
 	FUNCTION(qup14),
 	FUNCTION(qup15),
 	FUNCTION(sdc1_tb),
@@ -976,8 +996,8 @@ static const struct msm_pingroup sc7180_groups[] = {
 	[3] = PINGROUP(3, SOUTH, qup01, sp_cmu, dbg_out, qdss_cti, _, _, _, _, _),
 	[4] = PINGROUP(4, NORTH, sdc1_tb, _, qdss_cti, _, _, _, _, _, _),
 	[5] = PINGROUP(5, NORTH, sdc2_tb, _, _, _, _, _, _, _, _),
-	[6] = PINGROUP(6, NORTH, qup11, qup11, _, _, _, _, _, _, _),
-	[7] = PINGROUP(7, NORTH, qup11, qup11, ddr_bist, _, _, _, _, _, _),
+	[6] = PINGROUP(6, NORTH, qup11_i2c, qup11_uart, _, _, _, _, _, _, _),
+	[7] = PINGROUP(7, NORTH, qup11_i2c, qup11_uart, ddr_bist, _, _, _, _, _, _),
 	[8] = PINGROUP(8, NORTH, gp_pdm1, ddr_bist, _, phase_flag, qdss_cti, _, _, _, _),
 	[9] = PINGROUP(9, NORTH, ddr_bist, _, phase_flag, qdss_cti, _, _, _, _, _),
 	[10] = PINGROUP(10, NORTH, mdp_vsync, ddr_bist, _, _, _, _, _, _, _),
@@ -985,8 +1005,8 @@ static const struct msm_pingroup sc7180_groups[] = {
 	[12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, qup01, _, phase_flag, wlan2_adc0, atest_usb10, ddr_pxi3, _),
 	[13] = PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, qdss, _, _, _, _, _, _),
 	[14] = PINGROUP(14, SOUTH, cam_mclk, pll_reset, qdss, _, _, _, _, _, _),
-	[15] = PINGROUP(15, SOUTH, cam_mclk, qup02, qup02, qdss, _, _, _, _, _),
-	[16] = PINGROUP(16, SOUTH, cam_mclk, qup02, qup02, qdss, _, _, _, _, _),
+	[15] = PINGROUP(15, SOUTH, cam_mclk, qup02_i2c, qup02_uart, qdss, _, _, _, _, _),
+	[16] = PINGROUP(16, SOUTH, cam_mclk, qup02_i2c, qup02_uart, qdss, _, _, _, _, _),
 	[17] = PINGROUP(17, SOUTH, cci_i2c, _, phase_flag, qdss, _, wlan1_adc0, atest_usb12, ddr_pxi1, atest_char),
 	[18] = PINGROUP(18, SOUTH, cci_i2c, agera_pll, _, phase_flag, qdss, vsense_trigger, ddr_pxi0, atest_char3, _),
 	[19] = PINGROUP(19, SOUTH, cci_i2c, _, phase_flag, qdss, atest_char2, _, _, _, _),
@@ -1016,8 +1036,8 @@ static const struct msm_pingroup sc7180_groups[] = {
 	[43] = PINGROUP(43, NORTH, qup12, _, _, _, _, _, _, _, _),
 	[44] = PINGROUP(44, NORTH, qup12, _, phase_flag, qdss_cti, wlan1_adc1, atest_usb13, ddr_pxi1, _, _),
 	[45] = PINGROUP(45, NORTH, qup12, qdss_cti, _, _, _, _, _, _, _),
-	[46] = PINGROUP(46, NORTH, qup13, qup13, _, _, _, _, _, _, _),
-	[47] = PINGROUP(47, NORTH, qup13, qup13, _, _, _, _, _, _, _),
+	[46] = PINGROUP(46, NORTH, qup13_i2c, qup13_uart, _, _, _, _, _, _, _),
+	[47] = PINGROUP(47, NORTH, qup13_i2c, qup13_uart, _, _, _, _, _, _, _),
 	[48] = PINGROUP(48, NORTH, gcc_gp1, _, _, _, _, _, _, _, _),
 	[49] = PINGROUP(49, WEST, mi2s_1, btfm_slimbus, _, _, _, _, _, _, _),
 	[50] = PINGROUP(50, WEST, mi2s_1, btfm_slimbus, gp_pdm1, _, _, _, _, _, _),
@@ -1085,8 +1105,8 @@ static const struct msm_pingroup sc7180_groups[] = {
 	[112] = PINGROUP(112, NORTH, _, _, _, _, _, _, _, _, _),
 	[113] = PINGROUP(113, NORTH, _, _, _, _, _, _, _, _, _),
 	[114] = PINGROUP(114, NORTH, _, _, _, _, _, _, _, _, _),
-	[115] = PINGROUP(115, WEST, qup04, qup04, _, _, _, _, _, _, _),
-	[116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
+	[115] = PINGROUP(115, WEST, qup04_i2c, qup04_uart, _, _, _, _, _, _, _),
+	[116] = PINGROUP(116, WEST, qup04_i2c, qup04_uart, _, _, _, _, _, _, _),
 	[117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
 	[118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
 	[119] = UFS_RESET(ufs_reset, 0x7f000),
@@ -1099,6 +1119,22 @@ static const struct msm_pingroup sc7180_groups[] = {
 	[126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
 };
 
+static const struct msm_gpio_wakeirq_map sc7180_pdc_map[] = {
+	{0, 40}, {3, 50}, {4, 42}, {5, 70}, {6, 41}, {9, 35},
+	{10, 80}, {11, 51}, {16, 20}, {21, 55}, {22, 90}, {23, 21},
+	{24, 61}, {26, 52}, {28, 36}, {30, 100}, {31, 33}, {32, 81},
+	{33, 62}, {34, 43}, {36, 91}, {37, 53}, {38, 63}, {39, 72},
+	{41, 101}, {42, 7}, {43, 34}, {45, 73}, {47, 82}, {49, 17},
+	{52, 109}, {53, 102}, {55, 92}, {56, 56}, {57, 57}, {58, 83},
+	{59, 37}, {62, 110}, {63, 111}, {64, 74}, {65, 44}, {66, 93},
+	{67, 58}, {68, 112}, {69, 32}, {70, 54}, {72, 59}, {73, 64},
+	{74, 71}, {78, 31}, {82, 30}, {85, 103}, {86, 38}, {87, 39},
+	{88, 45}, {89, 46}, {90, 47}, {91, 48}, {92, 60}, {93, 49},
+	{94, 84}, {95, 94}, {98, 65}, {101, 66}, {104, 67}, {109, 104},
+	{110, 68}, {113, 69}, {114, 113}, {115, 108}, {116, 121},
+	{117, 114}, {118, 119},
+};
+
 static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
 	.pins = sc7180_pins,
 	.npins = ARRAY_SIZE(sc7180_pins),
@@ -1109,6 +1145,8 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
 	.ngpios = 120,
 	.tiles = sc7180_tiles,
 	.ntiles = ARRAY_SIZE(sc7180_tiles),
+	.wakeirq_map = sc7180_pdc_map,
+	.nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
 };
 
 static int sc7180_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 653d109..fe0be8a 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1060,7 +1060,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
 	girq->fwnode = of_node_to_fwnode(state->dev->of_node);
 	girq->parent_domain = parent_domain;
 	girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
-	girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+	girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
 	girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
 	girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
 
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index dca8688..fba1d41 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -439,7 +439,7 @@ static const struct pinconf_ops pm8xxx_pinconf_ops = {
 	.pin_config_group_set = pm8xxx_pin_config_set,
 };
 
-static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+static const struct pinctrl_desc pm8xxx_pinctrl_desc = {
 	.name = "pm8xxx_gpio",
 	.pctlops = &pm8xxx_pinctrl_ops,
 	.pmxops = &pm8xxx_pinmux_ops,
@@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
 	girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
 	girq->parent_domain = parent_domain;
 	girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
-	girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+	girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
 	girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
 	girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
 
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 3d8b1d7..681d8dc 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -430,7 +430,7 @@ static const struct pinconf_ops pm8xxx_pinconf_ops = {
 	.pin_config_group_set = pm8xxx_pin_config_set,
 };
 
-static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+static const struct pinctrl_desc pm8xxx_pinctrl_desc = {
 	.name = "pm8xxx_mpp",
 	.pctlops = &pm8xxx_pinctrl_ops,
 	.pmxops = &pm8xxx_pinmux_ops,
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 425fadd..dfd805e 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -4,30 +4,34 @@
 #
 config PINCTRL_SAMSUNG
 	bool
+	depends on OF_GPIO
 	select PINMUX
 	select PINCONF
 
 config PINCTRL_EXYNOS
-	bool "Pinctrl driver data for Samsung EXYNOS SoCs"
-	depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
+	bool "Pinctrl common driver part for Samsung Exynos SoCs"
+	depends on OF_GPIO
+	depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
 	select PINCTRL_SAMSUNG
 	select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
 	select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
 
 config PINCTRL_EXYNOS_ARM
-	bool "ARMv7-specific pinctrl driver data for Exynos" if COMPILE_TEST
+	bool "ARMv7-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
 	depends on PINCTRL_EXYNOS
 
 config PINCTRL_EXYNOS_ARM64
-	bool "ARMv8-specific pinctrl driver data for Exynos" if COMPILE_TEST
+	bool "ARMv8-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
 	depends on PINCTRL_EXYNOS
 
 config PINCTRL_S3C24XX
 	bool "Samsung S3C24XX SoC pinctrl driver"
-	depends on ARCH_S3C24XX && OF
+	depends on OF_GPIO
+	depends on ARCH_S3C24XX || COMPILE_TEST
 	select PINCTRL_SAMSUNG
 
 config PINCTRL_S3C64XX
 	bool "Samsung S3C64XX SoC pinctrl driver"
-	depends on ARCH_S3C64XX
+	depends on OF_GPIO
+	depends on ARCH_S3C64XX || COMPILE_TEST
 	select PINCTRL_SAMSUNG
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 28d66e7..cf0e0dc 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -26,8 +26,9 @@
 	select PINCTRL_PFC_R8A7792 if ARCH_R8A7792
 	select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
 	select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
-	select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
-	select PINCTRL_PFC_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+	select PINCTRL_PFC_R8A77950 if ARCH_R8A77950 || ARCH_R8A7795
+	select PINCTRL_PFC_R8A77951 if ARCH_R8A77951 || ARCH_R8A7795
+	select PINCTRL_PFC_R8A77960 if ARCH_R8A77960
 	select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
 	select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
 	select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
@@ -115,8 +116,11 @@
 config PINCTRL_PFC_R8A7794
 	bool "R-Car E2 pin control support" if COMPILE_TEST
 
-config PINCTRL_PFC_R8A7795
-	bool "R-Car H3 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77950
+	bool "R-Car H3 ES1.x pin control support" if COMPILE_TEST
+
+config PINCTRL_PFC_R8A77951
+	bool "R-Car H3 ES2.0+ pin control support" if COMPILE_TEST
 
 config PINCTRL_PFC_R8A77960
 	bool "R-Car M3-W pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 3bc0566..9ebe321 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -18,8 +18,8 @@
 obj-$(CONFIG_PINCTRL_PFC_R8A7792)	+= pfc-r8a7792.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7793)	+= pfc-r8a7791.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7794)	+= pfc-r8a7794.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7795)	+= pfc-r8a7795.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7795)	+= pfc-r8a7795-es1.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77950)	+= pfc-r8a77950.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77951)	+= pfc-r8a77951.o
 obj-$(CONFIG_PINCTRL_PFC_R8A77960)	+= pfc-r8a7796.o
 obj-$(CONFIG_PINCTRL_PFC_R8A77961)	+= pfc-r8a7796.o
 obj-$(CONFIG_PINCTRL_PFC_R8A77965)	+= pfc-r8a77965.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 65e5268..8220911 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -23,6 +23,7 @@
 #include <linux/platform_device.h>
 #include <linux/psci.h>
 #include <linux/slab.h>
+#include <linux/sys_soc.h>
 
 #include "core.h"
 
@@ -568,18 +569,18 @@ static const struct of_device_id sh_pfc_of_table[] = {
 		.data = &r8a7794_pinmux_info,
 	},
 #endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7795
+/* Both r8a7795 entries must be present to make sanity checks work */
+#ifdef CONFIG_PINCTRL_PFC_R8A77950
 	{
 		.compatible = "renesas,pfc-r8a7795",
-		.data = &r8a7795_pinmux_info,
+		.data = &r8a77950_pinmux_info,
 	},
-#ifdef DEBUG
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
 	{
-		/* For sanity checks only (nothing matches against this) */
-		.compatible = "renesas,pfc-r8a77950",	/* R-Car H3 ES1.0 */
-		.data = &r8a7795es1_pinmux_info,
+		.compatible = "renesas,pfc-r8a7795",
+		.data = &r8a77951_pinmux_info,
 	},
-#endif /* DEBUG */
 #endif
 #ifdef CONFIG_PINCTRL_PFC_R8A77960
 	{
@@ -886,19 +887,49 @@ static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
 static inline void sh_pfc_check_driver(struct platform_driver *pdrv) {}
 #endif /* !DEBUG */
 
+#ifdef CONFIG_OF
+static const void *sh_pfc_quirk_match(void)
+{
+#if defined(CONFIG_PINCTRL_PFC_R8A77950) || \
+    defined(CONFIG_PINCTRL_PFC_R8A77951)
+	const struct soc_device_attribute *match;
+	static const struct soc_device_attribute quirks[] = {
+		{
+			.soc_id = "r8a7795", .revision = "ES1.*",
+			.data = &r8a77950_pinmux_info,
+		},
+		{
+			.soc_id = "r8a7795",
+			.data = &r8a77951_pinmux_info,
+		},
+
+		{ /* sentinel */ }
+	};
+
+	match = soc_device_match(quirks);
+	if (match)
+		return match->data ?: ERR_PTR(-ENODEV);
+#endif /* CONFIG_PINCTRL_PFC_R8A77950 || CONFIG_PINCTRL_PFC_R8A77951 */
+
+	return NULL;
+}
+#endif /* CONFIG_OF */
+
 static int sh_pfc_probe(struct platform_device *pdev)
 {
-#ifdef CONFIG_OF
-	struct device_node *np = pdev->dev.of_node;
-#endif
 	const struct sh_pfc_soc_info *info;
 	struct sh_pfc *pfc;
 	int ret;
 
 #ifdef CONFIG_OF
-	if (np)
-		info = of_device_get_match_data(&pdev->dev);
-	else
+	if (pdev->dev.of_node) {
+		info = sh_pfc_quirk_match();
+		if (IS_ERR(info))
+			return PTR_ERR(info);
+
+		if (!info)
+			info = of_device_get_match_data(&pdev->dev);
+	} else
 #endif
 		info = (const void *)platform_get_device_id(pdev)->driver_data;
 
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 5a55b8d..8213e11 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -386,12 +386,11 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
 	}
 
 	/* Register the function GPIOs chip. */
-	if (pfc->info->nr_func_gpios == 0)
-		return 0;
-
-	chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
-	if (IS_ERR(chip))
-		return PTR_ERR(chip);
+	if (pfc->info->nr_func_gpios) {
+		chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
+		if (IS_ERR(chip))
+			return PTR_ERR(chip);
+	}
 #endif /* CONFIG_PINCTRL_SH_FUNC_GPIO */
 
 	return 0;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 24866a5..a987503 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
 		FN_ATAG0_A,	0,		FN_REMOCON_B,	0,
 		/* IP0_11_8 [4] */
 		FN_SD1_DAT2_A,	FN_MMC_D2,	0,		FN_BS,
-		FN_ATADIR0_A,	0,		FN_SDSELF_B,	0,
+		FN_ATADIR0_A,	0,		FN_SDSELF_A,	0,
 		FN_PWM4_B,	0,		0,		0,
 		0,		0,		0,		0,
 		/* IP0_7_5 [3] */
@@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
 		FN_TS_SDAT0_A,	0,		0,		0,
 		0,		0,		0,		0,
 		/* IP1_10_8 [3] */
-		FN_SD1_CLK_B,	FN_MMC_D6,	0,		FN_A24,
+		FN_SD1_CD_A,	FN_MMC_D6,	0,		FN_A24,
 		FN_DREQ1_A,	0,		FN_HRX0_B,	FN_TS_SPSYNC0_A,
 		/* IP1_7_5 [3] */
 		FN_A23,		FN_HTX0_B,	FN_TX2_B,	FN_DACK2_A,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
deleted file mode 100644
index ad05da8..0000000
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ /dev/null
@@ -1,5891 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * R8A7795 ES1.x processor support - PFC hardware block.
- *
- * Copyright (C) 2015-2017  Renesas Electronics Corporation
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-
-#include "core.h"
-#include "sh_pfc.h"
-
-#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
-
-#define CPU_ALL_GP(fn, sfx)						\
-	PORT_GP_CFG_16(0, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_15(2, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE),	\
-	PORT_GP_CFG_1(3, 12, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_18(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE),	\
-	PORT_GP_CFG_26(5, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_32(6, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_4(7, fn, sfx, CFG_FLAGS)
-
-#define CPU_ALL_NOGP(fn)						\
-	PIN_NOGP_CFG(ASEBRK, "ASEBRK", fn, CFG_FLAGS),			\
-	PIN_NOGP_CFG(AVB_MDIO, "AVB_MDIO", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RD0, "AVB_RD0", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RD1, "AVB_RD1", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RD2, "AVB_RD2", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RD3, "AVB_RD3", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RXC, "AVB_RXC", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RX_CTL, "AVB_RX_CTL", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TD0, "AVB_TD0", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TD1, "AVB_TD1", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TD2, "AVB_TD2", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TD3, "AVB_TD3", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TXC, "AVB_TXC", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TXCREFCLK, "AVB_TXCREFCLK", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(AVB_TX_CTL, "AVB_TX_CTL", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(CLKOUT, "CLKOUT", fn, CFG_FLAGS),			\
-	PIN_NOGP_CFG(DU_DOTCLKIN0, "DU_DOTCLKIN0", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(DU_DOTCLKIN1, "DU_DOTCLKIN1", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(DU_DOTCLKIN2, "DU_DOTCLKIN2", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(DU_DOTCLKIN3, "DU_DOTCLKIN3", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(EXTALR, "EXTALR", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),\
-	PIN_NOGP_CFG(FSCLKST_N, "FSCLKST#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(MLB_REF, "MLB_REF", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI0_IO2, "QSPI0_IO2", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI0_IO3, "QSPI0_IO3", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI0_MISO_IO1, "QSPI0_MISO_IO1", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI0_MOSI_IO0, "QSPI0_MOSI_IO0", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI0_SPCLK, "QSPI0_SPCLK", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI0_SSL, "QSPI0_SSL", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI1_IO2, "QSPI1_IO2", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI1_IO3, "QSPI1_IO3", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI1_MISO_IO1, "QSPI1_MISO_IO1", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI1_MOSI_IO0, "QSPI1_MOSI_IO0", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI1_SPCLK, "QSPI1_SPCLK", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI1_SSL, "QSPI1_SSL", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(RPC_INT_N, "RPC_INT#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(RPC_RESET_N, "RPC_RESET#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(RPC_WP_N, "RPC_WP#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),	\
-	PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),	\
-	PIN_NOGP_CFG(TDO, "TDO", fn, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
-	PIN_NOGP_CFG(TMS, "TMS", fn, CFG_FLAGS),			\
-	PIN_NOGP_CFG(TRST_N, "TRST#", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN)
-
-/*
- * F_() : just information
- * FM() : macro for FN_xxx / xxx_MARK
- */
-
-/* GPSR0 */
-#define GPSR0_15	F_(D15,			IP7_11_8)
-#define GPSR0_14	F_(D14,			IP7_7_4)
-#define GPSR0_13	F_(D13,			IP7_3_0)
-#define GPSR0_12	F_(D12,			IP6_31_28)
-#define GPSR0_11	F_(D11,			IP6_27_24)
-#define GPSR0_10	F_(D10,			IP6_23_20)
-#define GPSR0_9		F_(D9,			IP6_19_16)
-#define GPSR0_8		F_(D8,			IP6_15_12)
-#define GPSR0_7		F_(D7,			IP6_11_8)
-#define GPSR0_6		F_(D6,			IP6_7_4)
-#define GPSR0_5		F_(D5,			IP6_3_0)
-#define GPSR0_4		F_(D4,			IP5_31_28)
-#define GPSR0_3		F_(D3,			IP5_27_24)
-#define GPSR0_2		F_(D2,			IP5_23_20)
-#define GPSR0_1		F_(D1,			IP5_19_16)
-#define GPSR0_0		F_(D0,			IP5_15_12)
-
-/* GPSR1 */
-#define GPSR1_27	F_(EX_WAIT0_A,		IP5_11_8)
-#define GPSR1_26	F_(WE1_N,		IP5_7_4)
-#define GPSR1_25	F_(WE0_N,		IP5_3_0)
-#define GPSR1_24	F_(RD_WR_N,		IP4_31_28)
-#define GPSR1_23	F_(RD_N,		IP4_27_24)
-#define GPSR1_22	F_(BS_N,		IP4_23_20)
-#define GPSR1_21	F_(CS1_N_A26,		IP4_19_16)
-#define GPSR1_20	F_(CS0_N,		IP4_15_12)
-#define GPSR1_19	F_(A19,			IP4_11_8)
-#define GPSR1_18	F_(A18,			IP4_7_4)
-#define GPSR1_17	F_(A17,			IP4_3_0)
-#define GPSR1_16	F_(A16,			IP3_31_28)
-#define GPSR1_15	F_(A15,			IP3_27_24)
-#define GPSR1_14	F_(A14,			IP3_23_20)
-#define GPSR1_13	F_(A13,			IP3_19_16)
-#define GPSR1_12	F_(A12,			IP3_15_12)
-#define GPSR1_11	F_(A11,			IP3_11_8)
-#define GPSR1_10	F_(A10,			IP3_7_4)
-#define GPSR1_9		F_(A9,			IP3_3_0)
-#define GPSR1_8		F_(A8,			IP2_31_28)
-#define GPSR1_7		F_(A7,			IP2_27_24)
-#define GPSR1_6		F_(A6,			IP2_23_20)
-#define GPSR1_5		F_(A5,			IP2_19_16)
-#define GPSR1_4		F_(A4,			IP2_15_12)
-#define GPSR1_3		F_(A3,			IP2_11_8)
-#define GPSR1_2		F_(A2,			IP2_7_4)
-#define GPSR1_1		F_(A1,			IP2_3_0)
-#define GPSR1_0		F_(A0,			IP1_31_28)
-
-/* GPSR2 */
-#define GPSR2_14	F_(AVB_AVTP_CAPTURE_A,	IP0_23_20)
-#define GPSR2_13	F_(AVB_AVTP_MATCH_A,	IP0_19_16)
-#define GPSR2_12	F_(AVB_LINK,		IP0_15_12)
-#define GPSR2_11	F_(AVB_PHY_INT,		IP0_11_8)
-#define GPSR2_10	F_(AVB_MAGIC,		IP0_7_4)
-#define GPSR2_9		F_(AVB_MDC,		IP0_3_0)
-#define GPSR2_8		F_(PWM2_A,		IP1_27_24)
-#define GPSR2_7		F_(PWM1_A,		IP1_23_20)
-#define GPSR2_6		F_(PWM0,		IP1_19_16)
-#define GPSR2_5		F_(IRQ5,		IP1_15_12)
-#define GPSR2_4		F_(IRQ4,		IP1_11_8)
-#define GPSR2_3		F_(IRQ3,		IP1_7_4)
-#define GPSR2_2		F_(IRQ2,		IP1_3_0)
-#define GPSR2_1		F_(IRQ1,		IP0_31_28)
-#define GPSR2_0		F_(IRQ0,		IP0_27_24)
-
-/* GPSR3 */
-#define GPSR3_15	F_(SD1_WP,		IP10_23_20)
-#define GPSR3_14	F_(SD1_CD,		IP10_19_16)
-#define GPSR3_13	F_(SD0_WP,		IP10_15_12)
-#define GPSR3_12	F_(SD0_CD,		IP10_11_8)
-#define GPSR3_11	F_(SD1_DAT3,		IP8_31_28)
-#define GPSR3_10	F_(SD1_DAT2,		IP8_27_24)
-#define GPSR3_9		F_(SD1_DAT1,		IP8_23_20)
-#define GPSR3_8		F_(SD1_DAT0,		IP8_19_16)
-#define GPSR3_7		F_(SD1_CMD,		IP8_15_12)
-#define GPSR3_6		F_(SD1_CLK,		IP8_11_8)
-#define GPSR3_5		F_(SD0_DAT3,		IP8_7_4)
-#define GPSR3_4		F_(SD0_DAT2,		IP8_3_0)
-#define GPSR3_3		F_(SD0_DAT1,		IP7_31_28)
-#define GPSR3_2		F_(SD0_DAT0,		IP7_27_24)
-#define GPSR3_1		F_(SD0_CMD,		IP7_23_20)
-#define GPSR3_0		F_(SD0_CLK,		IP7_19_16)
-
-/* GPSR4 */
-#define GPSR4_17	FM(SD3_DS)
-#define GPSR4_16	F_(SD3_DAT7,		IP10_7_4)
-#define GPSR4_15	F_(SD3_DAT6,		IP10_3_0)
-#define GPSR4_14	F_(SD3_DAT5,		IP9_31_28)
-#define GPSR4_13	F_(SD3_DAT4,		IP9_27_24)
-#define GPSR4_12	FM(SD3_DAT3)
-#define GPSR4_11	FM(SD3_DAT2)
-#define GPSR4_10	FM(SD3_DAT1)
-#define GPSR4_9		FM(SD3_DAT0)
-#define GPSR4_8		FM(SD3_CMD)
-#define GPSR4_7		FM(SD3_CLK)
-#define GPSR4_6		F_(SD2_DS,		IP9_23_20)
-#define GPSR4_5		F_(SD2_DAT3,		IP9_19_16)
-#define GPSR4_4		F_(SD2_DAT2,		IP9_15_12)
-#define GPSR4_3		F_(SD2_DAT1,		IP9_11_8)
-#define GPSR4_2		F_(SD2_DAT0,		IP9_7_4)
-#define GPSR4_1		FM(SD2_CMD)
-#define GPSR4_0		F_(SD2_CLK,		IP9_3_0)
-
-/* GPSR5 */
-#define GPSR5_25	F_(MLB_DAT,		IP13_19_16)
-#define GPSR5_24	F_(MLB_SIG,		IP13_15_12)
-#define GPSR5_23	F_(MLB_CLK,		IP13_11_8)
-#define GPSR5_22	FM(MSIOF0_RXD)
-#define GPSR5_21	F_(MSIOF0_SS2,		IP13_7_4)
-#define GPSR5_20	FM(MSIOF0_TXD)
-#define GPSR5_19	F_(MSIOF0_SS1,		IP13_3_0)
-#define GPSR5_18	F_(MSIOF0_SYNC,		IP12_31_28)
-#define GPSR5_17	FM(MSIOF0_SCK)
-#define GPSR5_16	F_(HRTS0_N,		IP12_27_24)
-#define GPSR5_15	F_(HCTS0_N,		IP12_23_20)
-#define GPSR5_14	F_(HTX0,		IP12_19_16)
-#define GPSR5_13	F_(HRX0,		IP12_15_12)
-#define GPSR5_12	F_(HSCK0,		IP12_11_8)
-#define GPSR5_11	F_(RX2_A,		IP12_7_4)
-#define GPSR5_10	F_(TX2_A,		IP12_3_0)
-#define GPSR5_9		F_(SCK2,		IP11_31_28)
-#define GPSR5_8		F_(RTS1_N,		IP11_27_24)
-#define GPSR5_7		F_(CTS1_N,		IP11_23_20)
-#define GPSR5_6		F_(TX1_A,		IP11_19_16)
-#define GPSR5_5		F_(RX1_A,		IP11_15_12)
-#define GPSR5_4		F_(RTS0_N,		IP11_11_8)
-#define GPSR5_3		F_(CTS0_N,		IP11_7_4)
-#define GPSR5_2		F_(TX0,			IP11_3_0)
-#define GPSR5_1		F_(RX0,			IP10_31_28)
-#define GPSR5_0		F_(SCK0,		IP10_27_24)
-
-/* GPSR6 */
-#define GPSR6_31	F_(USB31_OVC,		IP17_7_4)
-#define GPSR6_30	F_(USB31_PWEN,		IP17_3_0)
-#define GPSR6_29	F_(USB30_OVC,		IP16_31_28)
-#define GPSR6_28	F_(USB30_PWEN,		IP16_27_24)
-#define GPSR6_27	F_(USB1_OVC,		IP16_23_20)
-#define GPSR6_26	F_(USB1_PWEN,		IP16_19_16)
-#define GPSR6_25	F_(USB0_OVC,		IP16_15_12)
-#define GPSR6_24	F_(USB0_PWEN,		IP16_11_8)
-#define GPSR6_23	F_(AUDIO_CLKB_B,	IP16_7_4)
-#define GPSR6_22	F_(AUDIO_CLKA_A,	IP16_3_0)
-#define GPSR6_21	F_(SSI_SDATA9_A,	IP15_31_28)
-#define GPSR6_20	F_(SSI_SDATA8,		IP15_27_24)
-#define GPSR6_19	F_(SSI_SDATA7,		IP15_23_20)
-#define GPSR6_18	F_(SSI_WS78,		IP15_19_16)
-#define GPSR6_17	F_(SSI_SCK78,		IP15_15_12)
-#define GPSR6_16	F_(SSI_SDATA6,		IP15_11_8)
-#define GPSR6_15	F_(SSI_WS6,		IP15_7_4)
-#define GPSR6_14	F_(SSI_SCK6,		IP15_3_0)
-#define GPSR6_13	FM(SSI_SDATA5)
-#define GPSR6_12	FM(SSI_WS5)
-#define GPSR6_11	FM(SSI_SCK5)
-#define GPSR6_10	F_(SSI_SDATA4,		IP14_31_28)
-#define GPSR6_9		F_(SSI_WS4,		IP14_27_24)
-#define GPSR6_8		F_(SSI_SCK4,		IP14_23_20)
-#define GPSR6_7		F_(SSI_SDATA3,		IP14_19_16)
-#define GPSR6_6		F_(SSI_WS349,		IP14_15_12)
-#define GPSR6_5		F_(SSI_SCK349,		IP14_11_8)
-#define GPSR6_4		F_(SSI_SDATA2_A,	IP14_7_4)
-#define GPSR6_3		F_(SSI_SDATA1_A,	IP14_3_0)
-#define GPSR6_2		F_(SSI_SDATA0,		IP13_31_28)
-#define GPSR6_1		F_(SSI_WS01239,		IP13_27_24)
-#define GPSR6_0		F_(SSI_SCK01239,	IP13_23_20)
-
-/* GPSR7 */
-#define GPSR7_3		FM(GP7_03)
-#define GPSR7_2		FM(GP7_02)
-#define GPSR7_1		FM(AVS2)
-#define GPSR7_0		FM(AVS1)
-
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP0_3_0		FM(AVB_MDC)		F_(0, 0)	FM(MSIOF2_SS2_C)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_7_4		FM(AVB_MAGIC)		F_(0, 0)	FM(MSIOF2_SS1_C)	FM(SCK4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_11_8	FM(AVB_PHY_INT)		F_(0, 0)	FM(MSIOF2_SYNC_C)	FM(RX4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_15_12	FM(AVB_LINK)		F_(0, 0)	FM(MSIOF2_SCK_C)	FM(TX4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_19_16	FM(AVB_AVTP_MATCH_A)	F_(0, 0)	FM(MSIOF2_RXD_C)	FM(CTS4_N_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_23_20	FM(AVB_AVTP_CAPTURE_A)	F_(0, 0)	FM(MSIOF2_TXD_C)	FM(RTS4_N_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_27_24	FM(IRQ0)		FM(QPOLB)	F_(0, 0)		FM(DU_CDE)			FM(VI4_DATA0_B)	FM(CAN0_TX_B)	FM(CANFD0_TX_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_31_28	FM(IRQ1)		FM(QPOLA)	F_(0, 0)		FM(DU_DISP)			FM(VI4_DATA1_B)	FM(CAN0_RX_B)	FM(CANFD0_RX_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_3_0		FM(IRQ2)		FM(QCPV_QDE)	F_(0, 0)		FM(DU_EXODDF_DU_ODDF_DISP_CDE)	FM(VI4_DATA2_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(PWM3_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_7_4		FM(IRQ3)		FM(QSTVB_QVE)	FM(A25)			FM(DU_DOTCLKOUT1)		FM(VI4_DATA3_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(PWM4_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_11_8	FM(IRQ4)		FM(QSTH_QHS)	FM(A24)			FM(DU_EXHSYNC_DU_HSYNC)		FM(VI4_DATA4_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(PWM5_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_15_12	FM(IRQ5)		FM(QSTB_QHE)	FM(A23)			FM(DU_EXVSYNC_DU_VSYNC)		FM(VI4_DATA5_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(PWM6_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_19_16	FM(PWM0)		FM(AVB_AVTP_PPS)FM(A22)			F_(0, 0)			FM(VI4_DATA6_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IECLK_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_23_20	FM(PWM1_A)		F_(0, 0)	FM(A21)			FM(HRX3_D)			FM(VI4_DATA7_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IERX_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_27_24	FM(PWM2_A)		F_(0, 0)	FM(A20)			FM(HTX3_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IETX_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_31_28	FM(A0)			FM(LCDOUT16)	FM(MSIOF3_SYNC_B)	F_(0, 0)			FM(VI4_DATA8)	F_(0, 0)	FM(DU_DB0)		F_(0, 0)	F_(0, 0)		FM(PWM3_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_3_0		FM(A1)			FM(LCDOUT17)	FM(MSIOF3_TXD_B)	F_(0, 0)			FM(VI4_DATA9)	F_(0, 0)	FM(DU_DB1)		F_(0, 0)	F_(0, 0)		FM(PWM4_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_7_4		FM(A2)			FM(LCDOUT18)	FM(MSIOF3_SCK_B)	F_(0, 0)			FM(VI4_DATA10)	F_(0, 0)	FM(DU_DB2)		F_(0, 0)	F_(0, 0)		FM(PWM5_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_11_8	FM(A3)			FM(LCDOUT19)	FM(MSIOF3_RXD_B)	F_(0, 0)			FM(VI4_DATA11)	F_(0, 0)	FM(DU_DB3)		F_(0, 0)	F_(0, 0)		FM(PWM6_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP2_15_12	FM(A4)			FM(LCDOUT20)	FM(MSIOF3_SS1_B)	F_(0, 0)			FM(VI4_DATA12)	FM(VI5_DATA12)	FM(DU_DB4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_19_16	FM(A5)			FM(LCDOUT21)	FM(MSIOF3_SS2_B)	FM(SCK4_B)			FM(VI4_DATA13)	FM(VI5_DATA13)	FM(DU_DB5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_23_20	FM(A6)			FM(LCDOUT22)	FM(MSIOF2_SS1_A)	FM(RX4_B)			FM(VI4_DATA14)	FM(VI5_DATA14)	FM(DU_DB6)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_27_24	FM(A7)			FM(LCDOUT23)	FM(MSIOF2_SS2_A)	FM(TX4_B)			FM(VI4_DATA15)	FM(VI5_DATA15)	FM(DU_DB7)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_31_28	FM(A8)			FM(RX3_B)	FM(MSIOF2_SYNC_A)	FM(HRX4_B)			F_(0, 0)	F_(0, 0)	F_(0, 0)		FM(SDA6_A)	FM(AVB_AVTP_MATCH_B)	FM(PWM1_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_3_0		FM(A9)			F_(0, 0)	FM(MSIOF2_SCK_A)	FM(CTS4_N_B)			F_(0, 0)	FM(VI5_VSYNC_N)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_7_4		FM(A10)			F_(0, 0)	FM(MSIOF2_RXD_A)	FM(RTS4_N_B)			F_(0, 0)	FM(VI5_HSYNC_N)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_11_8	FM(A11)			FM(TX3_B)	FM(MSIOF2_TXD_A)	FM(HTX4_B)			FM(HSCK4)	FM(VI5_FIELD)	F_(0, 0)		FM(SCL6_A)	FM(AVB_AVTP_CAPTURE_B)	FM(PWM2_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_15_12	FM(A12)			FM(LCDOUT12)	FM(MSIOF3_SCK_C)	F_(0, 0)			FM(HRX4_A)	FM(VI5_DATA8)	FM(DU_DG4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_19_16	FM(A13)			FM(LCDOUT13)	FM(MSIOF3_SYNC_C)	F_(0, 0)			FM(HTX4_A)	FM(VI5_DATA9)	FM(DU_DG5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_23_20	FM(A14)			FM(LCDOUT14)	FM(MSIOF3_RXD_C)	F_(0, 0)			FM(HCTS4_N)	FM(VI5_DATA10)	FM(DU_DG6)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_27_24	FM(A15)			FM(LCDOUT15)	FM(MSIOF3_TXD_C)	F_(0, 0)			FM(HRTS4_N)	FM(VI5_DATA11)	FM(DU_DG7)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_31_28	FM(A16)			FM(LCDOUT8)	F_(0, 0)		F_(0, 0)			FM(VI4_FIELD)	F_(0, 0)	FM(DU_DG0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_3_0		FM(A17)			FM(LCDOUT9)	F_(0, 0)		F_(0, 0)			FM(VI4_VSYNC_N)	F_(0, 0)	FM(DU_DG1)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_7_4		FM(A18)			FM(LCDOUT10)	F_(0, 0)		F_(0, 0)			FM(VI4_HSYNC_N)	F_(0, 0)	FM(DU_DG2)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_11_8	FM(A19)			FM(LCDOUT11)	F_(0, 0)		F_(0, 0)			FM(VI4_CLKENB)	F_(0, 0)	FM(DU_DG3)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_15_12	FM(CS0_N)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(VI5_CLKENB)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_19_16	FM(CS1_N_A26)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(VI5_CLK)	F_(0, 0)		FM(EX_WAIT0_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_23_20	FM(BS_N)		FM(QSTVA_QVS)	FM(MSIOF3_SCK_D)	FM(SCK3)			FM(HSCK3)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN1_TX)		FM(CANFD1_TX)	FM(IETX_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_27_24	FM(RD_N)		F_(0, 0)	FM(MSIOF3_SYNC_D)	FM(RX3_A)			FM(HRX3_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN0_TX_A)		FM(CANFD0_TX_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_31_28	FM(RD_WR_N)		F_(0, 0)	FM(MSIOF3_RXD_D)	FM(TX3_A)			FM(HTX3_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN0_RX_A)		FM(CANFD0_RX_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_3_0		FM(WE0_N)		F_(0, 0)	FM(MSIOF3_TXD_D)	FM(CTS3_N)			FM(HCTS3_N)	F_(0, 0)	F_(0, 0)		FM(SCL6_B)	FM(CAN_CLK)		F_(0, 0)	FM(IECLK_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_7_4		FM(WE1_N)		F_(0, 0)	FM(MSIOF3_SS1_D)	FM(RTS3_N)			FM(HRTS3_N)	F_(0, 0)	F_(0, 0)		FM(SDA6_B)	FM(CAN1_RX)		FM(CANFD1_RX)	FM(IERX_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_11_8	FM(EX_WAIT0_A)		FM(QCLK)	F_(0, 0)		F_(0, 0)			FM(VI4_CLK)	F_(0, 0)	FM(DU_DOTCLKOUT0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_15_12	FM(D0)			FM(MSIOF2_SS1_B)FM(MSIOF3_SCK_A)	F_(0, 0)			FM(VI4_DATA16)	FM(VI5_DATA0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_19_16	FM(D1)			FM(MSIOF2_SS2_B)FM(MSIOF3_SYNC_A)	F_(0, 0)			FM(VI4_DATA17)	FM(VI5_DATA1)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_23_20	FM(D2)			F_(0, 0)	FM(MSIOF3_RXD_A)	F_(0, 0)			FM(VI4_DATA18)	FM(VI5_DATA2)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_27_24	FM(D3)			F_(0, 0)	FM(MSIOF3_TXD_A)	F_(0, 0)			FM(VI4_DATA19)	FM(VI5_DATA3)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_31_28	FM(D4)			FM(MSIOF2_SCK_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA20)	FM(VI5_DATA4)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_3_0		FM(D5)			FM(MSIOF2_SYNC_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA21)	FM(VI5_DATA5)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_7_4		FM(D6)			FM(MSIOF2_RXD_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA22)	FM(VI5_DATA6)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_11_8	FM(D7)			FM(MSIOF2_TXD_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA23)	FM(VI5_DATA7)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_15_12	FM(D8)			FM(LCDOUT0)	FM(MSIOF2_SCK_D)	FM(SCK4_C)			FM(VI4_DATA0_A)	F_(0, 0)	FM(DU_DR0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_19_16	FM(D9)			FM(LCDOUT1)	FM(MSIOF2_SYNC_D)	F_(0, 0)			FM(VI4_DATA1_A)	F_(0, 0)	FM(DU_DR1)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_23_20	FM(D10)			FM(LCDOUT2)	FM(MSIOF2_RXD_D)	FM(HRX3_B)			FM(VI4_DATA2_A)	FM(CTS4_N_C)	FM(DU_DR2)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_27_24	FM(D11)			FM(LCDOUT3)	FM(MSIOF2_TXD_D)	FM(HTX3_B)			FM(VI4_DATA3_A)	FM(RTS4_N_C)	FM(DU_DR3)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_31_28	FM(D12)			FM(LCDOUT4)	FM(MSIOF2_SS1_D)	FM(RX4_C)			FM(VI4_DATA4_A)	F_(0, 0)	FM(DU_DR4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_3_0		FM(D13)			FM(LCDOUT5)	FM(MSIOF2_SS2_D)	FM(TX4_C)			FM(VI4_DATA5_A)	F_(0, 0)	FM(DU_DR5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_7_4		FM(D14)			FM(LCDOUT6)	FM(MSIOF3_SS1_A)	FM(HRX3_C)			FM(VI4_DATA6_A)	F_(0, 0)	FM(DU_DR6)		FM(SCL6_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_11_8	FM(D15)			FM(LCDOUT7)	FM(MSIOF3_SS2_A)	FM(HTX3_C)			FM(VI4_DATA7_A)	F_(0, 0)	FM(DU_DR7)		FM(SDA6_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_15_12	FM(FSCLKST)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_19_16	FM(SD0_CLK)		F_(0, 0)	FM(MSIOF1_SCK_E)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_OPWM_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP7_23_20	FM(SD0_CMD)		F_(0, 0)	FM(MSIOF1_SYNC_E)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_27_24	FM(SD0_DAT0)		F_(0, 0)	FM(MSIOF1_RXD_E)	F_(0, 0)			F_(0, 0)	FM(TS_SCK0_B)	FM(STP_ISCLK_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_31_28	FM(SD0_DAT1)		F_(0, 0)	FM(MSIOF1_TXD_E)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_B)FM(STP_ISSYNC_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_3_0		FM(SD0_DAT2)		F_(0, 0)	FM(MSIOF1_SS1_E)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_B)	FM(STP_ISD_0_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_7_4		FM(SD0_DAT3)		F_(0, 0)	FM(MSIOF1_SS2_E)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_B)	FM(STP_ISEN_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_11_8	FM(SD1_CLK)		F_(0, 0)	FM(MSIOF1_SCK_G)	F_(0, 0)			F_(0, 0)	FM(SIM0_CLK_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_15_12	FM(SD1_CMD)		F_(0, 0)	FM(MSIOF1_SYNC_G)	F_(0, 0)			F_(0, 0)	FM(SIM0_D_A)	FM(STP_IVCXO27_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_19_16	FM(SD1_DAT0)		FM(SD2_DAT4)	FM(MSIOF1_RXD_G)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_B)	FM(STP_ISCLK_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_23_20	FM(SD1_DAT1)		FM(SD2_DAT5)	FM(MSIOF1_TXD_G)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_B)FM(STP_ISSYNC_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_27_24	FM(SD1_DAT2)		FM(SD2_DAT6)	FM(MSIOF1_SS1_G)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_B)	FM(STP_ISD_1_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_31_28	FM(SD1_DAT3)		FM(SD2_DAT7)	FM(MSIOF1_SS2_G)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_B)	FM(STP_ISEN_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_3_0		FM(SD2_CLK)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_7_4		FM(SD2_DAT0)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_11_8	FM(SD2_DAT1)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_15_12	FM(SD2_DAT2)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_19_16	FM(SD2_DAT3)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_23_20	FM(SD2_DS)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(SATA_DEVSLP_B)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_27_24	FM(SD3_DAT4)		FM(SD2_CD_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_31_28	FM(SD3_DAT5)		FM(SD2_WP_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_3_0	FM(SD3_DAT6)		FM(SD3_CD)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_7_4	FM(SD3_DAT7)		FM(SD3_WP)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_11_8	FM(SD0_CD)		F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SCL2_B)	FM(SIM0_RST_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_15_12	FM(SD0_WP)		F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SDA2_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_19_16	FM(SD1_CD)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(SIM0_CLK_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_23_20	FM(SD1_WP)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(SIM0_D_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_27_24	FM(SCK0)		FM(HSCK1_B)	FM(MSIOF1_SS2_B)	FM(AUDIO_CLKC_B)		FM(SDA2_A)	FM(SIM0_RST_B)	FM(STP_OPWM_0_C)	FM(RIF0_CLK_B)	F_(0, 0)		FM(ADICHS2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_31_28	FM(RX0)			FM(HRX1_B)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SCK0_C)	FM(STP_ISCLK_0_C)	FM(RIF0_D0_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_3_0	FM(TX0)			FM(HTX1_B)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_C)FM(STP_ISSYNC_0_C)	FM(RIF0_D1_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_7_4	FM(CTS0_N)		FM(HCTS1_N_B)	FM(MSIOF1_SYNC_B)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_C)FM(STP_ISSYNC_1_C)	FM(RIF1_SYNC_B)	FM(AUDIO_CLKOUT_C)	FM(ADICS_SAMP)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_11_8	FM(RTS0_N)		FM(HRTS1_N_B)	FM(MSIOF1_SS1_B)	FM(AUDIO_CLKA_B)		FM(SCL2_A)	F_(0, 0)	FM(STP_IVCXO27_1_C)	FM(RIF0_SYNC_B)	F_(0, 0)		FM(ADICHS1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_15_12	FM(RX1_A)		FM(HRX1_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_C)	FM(STP_ISD_0_C)		FM(RIF1_CLK_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_19_16	FM(TX1_A)		FM(HTX1_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_C)	FM(STP_ISEN_0_C)	FM(RIF1_D0_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_23_20	FM(CTS1_N)		FM(HCTS1_N_A)	FM(MSIOF1_RXD_B)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_C)	FM(STP_ISEN_1_C)	FM(RIF1_D0_B)	F_(0, 0)		FM(ADIDATA)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_27_24	FM(RTS1_N)		FM(HRTS1_N_A)	FM(MSIOF1_TXD_B)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_C)	FM(STP_ISD_1_C)		FM(RIF1_D1_B)	F_(0, 0)		FM(ADICHS0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_31_28	FM(SCK2)		FM(SCIF_CLK_B)	FM(MSIOF1_SCK_B)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_C)	FM(STP_ISCLK_1_C)	FM(RIF1_CLK_B)	F_(0, 0)		FM(ADICLK)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_3_0	FM(TX2_A)		F_(0, 0)	F_(0, 0)		FM(SD2_CD_B)			FM(SCL1_A)	F_(0, 0)	FM(FMCLK_A)		FM(RIF1_D1_C)	F_(0, 0)		FM(FSO_CFE_0_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_7_4	FM(RX2_A)		F_(0, 0)	F_(0, 0)		FM(SD2_WP_B)			FM(SDA1_A)	F_(0, 0)	FM(FMIN_A)		FM(RIF1_SYNC_C)	F_(0, 0)		FM(FSO_CFE_1_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_11_8	FM(HSCK0)		F_(0, 0)	FM(MSIOF1_SCK_D)	FM(AUDIO_CLKB_A)		FM(SSI_SDATA1_B)FM(TS_SCK0_D)	FM(STP_ISCLK_0_D)	FM(RIF0_CLK_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_15_12	FM(HRX0)		F_(0, 0)	FM(MSIOF1_RXD_D)	F_(0, 0)			FM(SSI_SDATA2_B)FM(TS_SDEN0_D)	FM(STP_ISEN_0_D)	FM(RIF0_D0_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_19_16	FM(HTX0)		F_(0, 0)	FM(MSIOF1_TXD_D)	F_(0, 0)			FM(SSI_SDATA9_B)FM(TS_SDAT0_D)	FM(STP_ISD_0_D)		FM(RIF0_D1_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_23_20	FM(HCTS0_N)		FM(RX2_B)	FM(MSIOF1_SYNC_D)	F_(0, 0)			FM(SSI_SCK9_A)	FM(TS_SPSYNC0_D)FM(STP_ISSYNC_0_D)	FM(RIF0_SYNC_C)	FM(AUDIO_CLKOUT1_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_27_24	FM(HRTS0_N)		FM(TX2_B)	FM(MSIOF1_SS1_D)	F_(0, 0)			FM(SSI_WS9_A)	F_(0, 0)	FM(STP_IVCXO27_0_D)	FM(BPFCLK_A)	FM(AUDIO_CLKOUT2_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP12_31_28	FM(MSIOF0_SYNC)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(AUDIO_CLKOUT_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_3_0	FM(MSIOF0_SS1)		FM(RX5)		F_(0, 0)		FM(AUDIO_CLKA_C)		FM(SSI_SCK2_A)	F_(0, 0)	FM(STP_IVCXO27_0_C)	F_(0, 0)	FM(AUDIO_CLKOUT3_A)	F_(0, 0)	FM(TCLK1_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_7_4	FM(MSIOF0_SS2)		FM(TX5)		FM(MSIOF1_SS2_D)	FM(AUDIO_CLKC_A)		FM(SSI_WS2_A)	F_(0, 0)	FM(STP_OPWM_0_D)	F_(0, 0)	FM(AUDIO_CLKOUT_D)	F_(0, 0)	FM(SPEEDIN_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_11_8	FM(MLB_CLK)		F_(0, 0)	FM(MSIOF1_SCK_F)	F_(0, 0)			FM(SCL1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_15_12	FM(MLB_SIG)		FM(RX1_B)	FM(MSIOF1_SYNC_F)	F_(0, 0)			FM(SDA1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_19_16	FM(MLB_DAT)		FM(TX1_B)	FM(MSIOF1_RXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_23_20	FM(SSI_SCK01239)	F_(0, 0)	FM(MSIOF1_TXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_27_24	FM(SSI_WS01239)		F_(0, 0)	FM(MSIOF1_SS1_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_31_28	FM(SSI_SDATA0)		F_(0, 0)	FM(MSIOF1_SS2_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_3_0	FM(SSI_SDATA1_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_7_4	FM(SSI_SDATA2_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SSI_SCK1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_11_8	FM(SSI_SCK349)		F_(0, 0)	FM(MSIOF1_SS1_A)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_OPWM_0_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_15_12	FM(SSI_WS349)		FM(HCTS2_N_A)	FM(MSIOF1_SS2_A)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_0_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_19_16	FM(SSI_SDATA3)		FM(HRTS2_N_A)	FM(MSIOF1_TXD_A)	F_(0, 0)			F_(0, 0)	FM(TS_SCK0_A)	FM(STP_ISCLK_0_A)	FM(RIF0_D1_A)	FM(RIF2_D0_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_23_20	FM(SSI_SCK4)		FM(HRX2_A)	FM(MSIOF1_SCK_A)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_A)	FM(STP_ISD_0_A)		FM(RIF0_CLK_A)	FM(RIF2_CLK_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_27_24	FM(SSI_WS4)		FM(HTX2_A)	FM(MSIOF1_SYNC_A)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_A)	FM(STP_ISEN_0_A)	FM(RIF0_SYNC_A)	FM(RIF2_SYNC_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_31_28	FM(SSI_SDATA4)		FM(HSCK2_A)	FM(MSIOF1_RXD_A)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_A)FM(STP_ISSYNC_0_A)	FM(RIF0_D0_A)	FM(RIF2_D1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_3_0	FM(SSI_SCK6)		FM(USB2_PWEN)	F_(0, 0)		FM(SIM0_RST_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_7_4	FM(SSI_WS6)		FM(USB2_OVC)	F_(0, 0)		FM(SIM0_D_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_11_8	FM(SSI_SDATA6)		F_(0, 0)	F_(0, 0)		FM(SIM0_CLK_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(SATA_DEVSLP_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_15_12	FM(SSI_SCK78)		FM(HRX2_B)	FM(MSIOF1_SCK_C)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_A)	FM(STP_ISCLK_1_A)	FM(RIF1_CLK_A)	FM(RIF3_CLK_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_19_16	FM(SSI_WS78)		FM(HTX2_B)	FM(MSIOF1_SYNC_C)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_A)	FM(STP_ISD_1_A)		FM(RIF1_SYNC_A)	FM(RIF3_SYNC_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_23_20	FM(SSI_SDATA7)		FM(HCTS2_N_B)	FM(MSIOF1_RXD_C)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_A)	FM(STP_ISEN_1_A)	FM(RIF1_D0_A)	FM(RIF3_D0_A)		F_(0, 0)	FM(TCLK2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_27_24	FM(SSI_SDATA8)		FM(HRTS2_N_B)	FM(MSIOF1_TXD_C)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A)	FM(RIF1_D1_A)	FM(RIF3_D1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_31_28	FM(SSI_SDATA9_A)	FM(HSCK2_B)	FM(MSIOF1_SS1_C)	FM(HSCK1_A)			FM(SSI_WS1_B)	FM(SCK1)	FM(STP_IVCXO27_1_A)	FM(SCK5)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_3_0	FM(AUDIO_CLKA_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_7_4	FM(AUDIO_CLKB_B)	FM(SCIF_CLK_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_1_D)	FM(REMOCON_A)	F_(0, 0)		F_(0, 0)	FM(TCLK1_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_11_8	FM(USB0_PWEN)		F_(0, 0)	F_(0, 0)		FM(SIM0_RST_C)			F_(0, 0)	FM(TS_SCK1_D)	FM(STP_ISCLK_1_D)	FM(BPFCLK_B)	FM(RIF3_CLK_B)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_15_12	FM(USB0_OVC)		F_(0, 0)	F_(0, 0)		FM(SIM0_D_C)			F_(0, 0)	FM(TS_SDAT1_D)	FM(STP_ISD_1_D)		F_(0, 0)	FM(RIF3_SYNC_B)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_19_16	FM(USB1_PWEN)		F_(0, 0)	F_(0, 0)		FM(SIM0_CLK_C)			FM(SSI_SCK1_A)	FM(TS_SCK0_E)	FM(STP_ISCLK_0_E)	FM(FMCLK_B)	FM(RIF2_CLK_B)		F_(0, 0)	FM(SPEEDIN_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_23_20	FM(USB1_OVC)		F_(0, 0)	FM(MSIOF1_SS2_C)	F_(0, 0)			FM(SSI_WS1_A)	FM(TS_SDAT0_E)	FM(STP_ISD_0_E)		FM(FMIN_B)	FM(RIF2_SYNC_B)		F_(0, 0)	FM(REMOCON_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_27_24	FM(USB30_PWEN)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT_B)		FM(SSI_SCK2_B)	FM(TS_SDEN1_D)	FM(STP_ISEN_1_D)	FM(STP_OPWM_0_E)FM(RIF3_D0_B)		F_(0, 0)	FM(TCLK2_B)	FM(TPU0TO0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_31_28	FM(USB30_OVC)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT1_B)		FM(SSI_WS2_B)	FM(TS_SPSYNC1_D)FM(STP_ISSYNC_1_D)	FM(STP_IVCXO27_0_E)FM(RIF3_D1_B)	F_(0, 0)	FM(FSO_TOE_B)	FM(TPU0TO1)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP17_3_0	FM(USB31_PWEN)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT2_B)		FM(SSI_SCK9_B)	FM(TS_SDEN0_E)	FM(STP_ISEN_0_E)	F_(0, 0)	FM(RIF2_D0_B)		F_(0, 0)	F_(0, 0)	FM(TPU0TO2)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP17_7_4	FM(USB31_OVC)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT3_B)		FM(SSI_WS9_B)	FM(TS_SPSYNC0_E)FM(STP_ISSYNC_0_E)	F_(0, 0)	FM(RIF2_D1_B)		F_(0, 0)	F_(0, 0)	FM(TPU0TO3)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-#define PINMUX_GPSR	\
-\
-												GPSR6_31 \
-												GPSR6_30 \
-												GPSR6_29 \
-												GPSR6_28 \
-		GPSR1_27									GPSR6_27 \
-		GPSR1_26									GPSR6_26 \
-		GPSR1_25							GPSR5_25	GPSR6_25 \
-		GPSR1_24							GPSR5_24	GPSR6_24 \
-		GPSR1_23							GPSR5_23	GPSR6_23 \
-		GPSR1_22							GPSR5_22	GPSR6_22 \
-		GPSR1_21							GPSR5_21	GPSR6_21 \
-		GPSR1_20							GPSR5_20	GPSR6_20 \
-		GPSR1_19							GPSR5_19	GPSR6_19 \
-		GPSR1_18							GPSR5_18	GPSR6_18 \
-		GPSR1_17					GPSR4_17	GPSR5_17	GPSR6_17 \
-		GPSR1_16					GPSR4_16	GPSR5_16	GPSR6_16 \
-GPSR0_15	GPSR1_15			GPSR3_15	GPSR4_15	GPSR5_15	GPSR6_15 \
-GPSR0_14	GPSR1_14	GPSR2_14	GPSR3_14	GPSR4_14	GPSR5_14	GPSR6_14 \
-GPSR0_13	GPSR1_13	GPSR2_13	GPSR3_13	GPSR4_13	GPSR5_13	GPSR6_13 \
-GPSR0_12	GPSR1_12	GPSR2_12	GPSR3_12	GPSR4_12	GPSR5_12	GPSR6_12 \
-GPSR0_11	GPSR1_11	GPSR2_11	GPSR3_11	GPSR4_11	GPSR5_11	GPSR6_11 \
-GPSR0_10	GPSR1_10	GPSR2_10	GPSR3_10	GPSR4_10	GPSR5_10	GPSR6_10 \
-GPSR0_9		GPSR1_9		GPSR2_9		GPSR3_9		GPSR4_9		GPSR5_9		GPSR6_9 \
-GPSR0_8		GPSR1_8		GPSR2_8		GPSR3_8		GPSR4_8		GPSR5_8		GPSR6_8 \
-GPSR0_7		GPSR1_7		GPSR2_7		GPSR3_7		GPSR4_7		GPSR5_7		GPSR6_7 \
-GPSR0_6		GPSR1_6		GPSR2_6		GPSR3_6		GPSR4_6		GPSR5_6		GPSR6_6 \
-GPSR0_5		GPSR1_5		GPSR2_5		GPSR3_5		GPSR4_5		GPSR5_5		GPSR6_5 \
-GPSR0_4		GPSR1_4		GPSR2_4		GPSR3_4		GPSR4_4		GPSR5_4		GPSR6_4 \
-GPSR0_3		GPSR1_3		GPSR2_3		GPSR3_3		GPSR4_3		GPSR5_3		GPSR6_3		GPSR7_3 \
-GPSR0_2		GPSR1_2		GPSR2_2		GPSR3_2		GPSR4_2		GPSR5_2		GPSR6_2		GPSR7_2 \
-GPSR0_1		GPSR1_1		GPSR2_1		GPSR3_1		GPSR4_1		GPSR5_1		GPSR6_1		GPSR7_1 \
-GPSR0_0		GPSR1_0		GPSR2_0		GPSR3_0		GPSR4_0		GPSR5_0		GPSR6_0		GPSR7_0
-
-#define PINMUX_IPSR				\
-\
-FM(IP0_3_0)	IP0_3_0		FM(IP1_3_0)	IP1_3_0		FM(IP2_3_0)	IP2_3_0		FM(IP3_3_0)	IP3_3_0 \
-FM(IP0_7_4)	IP0_7_4		FM(IP1_7_4)	IP1_7_4		FM(IP2_7_4)	IP2_7_4		FM(IP3_7_4)	IP3_7_4 \
-FM(IP0_11_8)	IP0_11_8	FM(IP1_11_8)	IP1_11_8	FM(IP2_11_8)	IP2_11_8	FM(IP3_11_8)	IP3_11_8 \
-FM(IP0_15_12)	IP0_15_12	FM(IP1_15_12)	IP1_15_12	FM(IP2_15_12)	IP2_15_12	FM(IP3_15_12)	IP3_15_12 \
-FM(IP0_19_16)	IP0_19_16	FM(IP1_19_16)	IP1_19_16	FM(IP2_19_16)	IP2_19_16	FM(IP3_19_16)	IP3_19_16 \
-FM(IP0_23_20)	IP0_23_20	FM(IP1_23_20)	IP1_23_20	FM(IP2_23_20)	IP2_23_20	FM(IP3_23_20)	IP3_23_20 \
-FM(IP0_27_24)	IP0_27_24	FM(IP1_27_24)	IP1_27_24	FM(IP2_27_24)	IP2_27_24	FM(IP3_27_24)	IP3_27_24 \
-FM(IP0_31_28)	IP0_31_28	FM(IP1_31_28)	IP1_31_28	FM(IP2_31_28)	IP2_31_28	FM(IP3_31_28)	IP3_31_28 \
-\
-FM(IP4_3_0)	IP4_3_0		FM(IP5_3_0)	IP5_3_0		FM(IP6_3_0)	IP6_3_0		FM(IP7_3_0)	IP7_3_0 \
-FM(IP4_7_4)	IP4_7_4		FM(IP5_7_4)	IP5_7_4		FM(IP6_7_4)	IP6_7_4		FM(IP7_7_4)	IP7_7_4 \
-FM(IP4_11_8)	IP4_11_8	FM(IP5_11_8)	IP5_11_8	FM(IP6_11_8)	IP6_11_8	FM(IP7_11_8)	IP7_11_8 \
-FM(IP4_15_12)	IP4_15_12	FM(IP5_15_12)	IP5_15_12	FM(IP6_15_12)	IP6_15_12	FM(IP7_15_12)	IP7_15_12 \
-FM(IP4_19_16)	IP4_19_16	FM(IP5_19_16)	IP5_19_16	FM(IP6_19_16)	IP6_19_16	FM(IP7_19_16)	IP7_19_16 \
-FM(IP4_23_20)	IP4_23_20	FM(IP5_23_20)	IP5_23_20	FM(IP6_23_20)	IP6_23_20	FM(IP7_23_20)	IP7_23_20 \
-FM(IP4_27_24)	IP4_27_24	FM(IP5_27_24)	IP5_27_24	FM(IP6_27_24)	IP6_27_24	FM(IP7_27_24)	IP7_27_24 \
-FM(IP4_31_28)	IP4_31_28	FM(IP5_31_28)	IP5_31_28	FM(IP6_31_28)	IP6_31_28	FM(IP7_31_28)	IP7_31_28 \
-\
-FM(IP8_3_0)	IP8_3_0		FM(IP9_3_0)	IP9_3_0		FM(IP10_3_0)	IP10_3_0	FM(IP11_3_0)	IP11_3_0 \
-FM(IP8_7_4)	IP8_7_4		FM(IP9_7_4)	IP9_7_4		FM(IP10_7_4)	IP10_7_4	FM(IP11_7_4)	IP11_7_4 \
-FM(IP8_11_8)	IP8_11_8	FM(IP9_11_8)	IP9_11_8	FM(IP10_11_8)	IP10_11_8	FM(IP11_11_8)	IP11_11_8 \
-FM(IP8_15_12)	IP8_15_12	FM(IP9_15_12)	IP9_15_12	FM(IP10_15_12)	IP10_15_12	FM(IP11_15_12)	IP11_15_12 \
-FM(IP8_19_16)	IP8_19_16	FM(IP9_19_16)	IP9_19_16	FM(IP10_19_16)	IP10_19_16	FM(IP11_19_16)	IP11_19_16 \
-FM(IP8_23_20)	IP8_23_20	FM(IP9_23_20)	IP9_23_20	FM(IP10_23_20)	IP10_23_20	FM(IP11_23_20)	IP11_23_20 \
-FM(IP8_27_24)	IP8_27_24	FM(IP9_27_24)	IP9_27_24	FM(IP10_27_24)	IP10_27_24	FM(IP11_27_24)	IP11_27_24 \
-FM(IP8_31_28)	IP8_31_28	FM(IP9_31_28)	IP9_31_28	FM(IP10_31_28)	IP10_31_28	FM(IP11_31_28)	IP11_31_28 \
-\
-FM(IP12_3_0)	IP12_3_0	FM(IP13_3_0)	IP13_3_0	FM(IP14_3_0)	IP14_3_0	FM(IP15_3_0)	IP15_3_0 \
-FM(IP12_7_4)	IP12_7_4	FM(IP13_7_4)	IP13_7_4	FM(IP14_7_4)	IP14_7_4	FM(IP15_7_4)	IP15_7_4 \
-FM(IP12_11_8)	IP12_11_8	FM(IP13_11_8)	IP13_11_8	FM(IP14_11_8)	IP14_11_8	FM(IP15_11_8)	IP15_11_8 \
-FM(IP12_15_12)	IP12_15_12	FM(IP13_15_12)	IP13_15_12	FM(IP14_15_12)	IP14_15_12	FM(IP15_15_12)	IP15_15_12 \
-FM(IP12_19_16)	IP12_19_16	FM(IP13_19_16)	IP13_19_16	FM(IP14_19_16)	IP14_19_16	FM(IP15_19_16)	IP15_19_16 \
-FM(IP12_23_20)	IP12_23_20	FM(IP13_23_20)	IP13_23_20	FM(IP14_23_20)	IP14_23_20	FM(IP15_23_20)	IP15_23_20 \
-FM(IP12_27_24)	IP12_27_24	FM(IP13_27_24)	IP13_27_24	FM(IP14_27_24)	IP14_27_24	FM(IP15_27_24)	IP15_27_24 \
-FM(IP12_31_28)	IP12_31_28	FM(IP13_31_28)	IP13_31_28	FM(IP14_31_28)	IP14_31_28	FM(IP15_31_28)	IP15_31_28 \
-\
-FM(IP16_3_0)	IP16_3_0	FM(IP17_3_0)	IP17_3_0 \
-FM(IP16_7_4)	IP16_7_4	FM(IP17_7_4)	IP17_7_4 \
-FM(IP16_11_8)	IP16_11_8 \
-FM(IP16_15_12)	IP16_15_12 \
-FM(IP16_19_16)	IP16_19_16 \
-FM(IP16_23_20)	IP16_23_20 \
-FM(IP16_27_24)	IP16_27_24 \
-FM(IP16_31_28)	IP16_31_28
-
-/* MOD_SEL0 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */			/* 4 */			/* 5 */			/* 6 */			/* 7 */
-#define MOD_SEL0_30_29		FM(SEL_MSIOF3_0)	FM(SEL_MSIOF3_1)	FM(SEL_MSIOF3_2)	FM(SEL_MSIOF3_3)
-#define MOD_SEL0_28_27		FM(SEL_MSIOF2_0)	FM(SEL_MSIOF2_1)	FM(SEL_MSIOF2_2)	FM(SEL_MSIOF2_3)
-#define MOD_SEL0_26_25_24	FM(SEL_MSIOF1_0)	FM(SEL_MSIOF1_1)	FM(SEL_MSIOF1_2)	FM(SEL_MSIOF1_3)	FM(SEL_MSIOF1_4)	FM(SEL_MSIOF1_5)	FM(SEL_MSIOF1_6)	F_(0, 0)
-#define MOD_SEL0_23		FM(SEL_LBSC_0)		FM(SEL_LBSC_1)
-#define MOD_SEL0_22		FM(SEL_IEBUS_0)		FM(SEL_IEBUS_1)
-#define MOD_SEL0_21_20		FM(SEL_I2C6_0)		FM(SEL_I2C6_1)		FM(SEL_I2C6_2)		F_(0, 0)
-#define MOD_SEL0_19		FM(SEL_I2C2_0)		FM(SEL_I2C2_1)
-#define MOD_SEL0_18		FM(SEL_I2C1_0)		FM(SEL_I2C1_1)
-#define MOD_SEL0_17		FM(SEL_HSCIF4_0)	FM(SEL_HSCIF4_1)
-#define MOD_SEL0_16_15		FM(SEL_HSCIF3_0)	FM(SEL_HSCIF3_1)	FM(SEL_HSCIF3_2)	FM(SEL_HSCIF3_3)
-#define MOD_SEL0_14		FM(SEL_HSCIF2_0)	FM(SEL_HSCIF2_1)
-#define MOD_SEL0_13		FM(SEL_HSCIF1_0)	FM(SEL_HSCIF1_1)
-#define MOD_SEL0_12		FM(SEL_FSO_0)		FM(SEL_FSO_1)
-#define MOD_SEL0_11		FM(SEL_FM_0)		FM(SEL_FM_1)
-#define MOD_SEL0_10		FM(SEL_ETHERAVB_0)	FM(SEL_ETHERAVB_1)
-#define MOD_SEL0_9		FM(SEL_DRIF3_0)		FM(SEL_DRIF3_1)
-#define MOD_SEL0_8		FM(SEL_DRIF2_0)		FM(SEL_DRIF2_1)
-#define MOD_SEL0_7_6		FM(SEL_DRIF1_0)		FM(SEL_DRIF1_1)		FM(SEL_DRIF1_2)		F_(0, 0)
-#define MOD_SEL0_5_4		FM(SEL_DRIF0_0)		FM(SEL_DRIF0_1)		FM(SEL_DRIF0_2)		F_(0, 0)
-#define MOD_SEL0_3		FM(SEL_CANFD0_0)	FM(SEL_CANFD0_1)
-#define MOD_SEL0_2_1		FM(SEL_ADG_0)		FM(SEL_ADG_1)		FM(SEL_ADG_2)		FM(SEL_ADG_3)
-
-/* MOD_SEL1 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */			/* 4 */			/* 5 */			/* 6 */			/* 7 */
-#define MOD_SEL1_31_30		FM(SEL_TSIF1_0)		FM(SEL_TSIF1_1)		FM(SEL_TSIF1_2)		FM(SEL_TSIF1_3)
-#define MOD_SEL1_29_28_27	FM(SEL_TSIF0_0)		FM(SEL_TSIF0_1)		FM(SEL_TSIF0_2)		FM(SEL_TSIF0_3)		FM(SEL_TSIF0_4)		F_(0, 0)		F_(0, 0)		F_(0, 0)
-#define MOD_SEL1_26		FM(SEL_TIMER_TMU_0)	FM(SEL_TIMER_TMU_1)
-#define MOD_SEL1_25_24		FM(SEL_SSP1_1_0)	FM(SEL_SSP1_1_1)	FM(SEL_SSP1_1_2)	FM(SEL_SSP1_1_3)
-#define MOD_SEL1_23_22_21	FM(SEL_SSP1_0_0)	FM(SEL_SSP1_0_1)	FM(SEL_SSP1_0_2)	FM(SEL_SSP1_0_3)	FM(SEL_SSP1_0_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
-#define MOD_SEL1_20		FM(SEL_SSI_0)		FM(SEL_SSI_1)
-#define MOD_SEL1_19		FM(SEL_SPEED_PULSE_0)	FM(SEL_SPEED_PULSE_1)
-#define MOD_SEL1_18_17		FM(SEL_SIMCARD_0)	FM(SEL_SIMCARD_1)	FM(SEL_SIMCARD_2)	FM(SEL_SIMCARD_3)
-#define MOD_SEL1_16		FM(SEL_SDHI2_0)		FM(SEL_SDHI2_1)
-#define MOD_SEL1_15_14		FM(SEL_SCIF4_0)		FM(SEL_SCIF4_1)		FM(SEL_SCIF4_2)		F_(0, 0)
-#define MOD_SEL1_13		FM(SEL_SCIF3_0)		FM(SEL_SCIF3_1)
-#define MOD_SEL1_12		FM(SEL_SCIF2_0)		FM(SEL_SCIF2_1)
-#define MOD_SEL1_11		FM(SEL_SCIF1_0)		FM(SEL_SCIF1_1)
-#define MOD_SEL1_10		FM(SEL_SATA_0)		FM(SEL_SATA_1)
-#define MOD_SEL1_9		FM(SEL_REMOCON_0)	FM(SEL_REMOCON_1)
-#define MOD_SEL1_6		FM(SEL_RCAN0_0)		FM(SEL_RCAN0_1)
-#define MOD_SEL1_5		FM(SEL_PWM6_0)		FM(SEL_PWM6_1)
-#define MOD_SEL1_4		FM(SEL_PWM5_0)		FM(SEL_PWM5_1)
-#define MOD_SEL1_3		FM(SEL_PWM4_0)		FM(SEL_PWM4_1)
-#define MOD_SEL1_2		FM(SEL_PWM3_0)		FM(SEL_PWM3_1)
-#define MOD_SEL1_1		FM(SEL_PWM2_0)		FM(SEL_PWM2_1)
-#define MOD_SEL1_0		FM(SEL_PWM1_0)		FM(SEL_PWM1_1)
-
-/* MOD_SEL2 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */
-#define MOD_SEL2_31		FM(I2C_SEL_5_0)		FM(I2C_SEL_5_1)
-#define MOD_SEL2_30		FM(I2C_SEL_3_0)		FM(I2C_SEL_3_1)
-#define MOD_SEL2_29		FM(I2C_SEL_0_0)		FM(I2C_SEL_0_1)
-#define MOD_SEL2_0		FM(SEL_VIN4_0)		FM(SEL_VIN4_1)
-
-#define PINMUX_MOD_SELS\
-\
-			MOD_SEL1_31_30		MOD_SEL2_31 \
-MOD_SEL0_30_29					MOD_SEL2_30 \
-			MOD_SEL1_29_28_27	MOD_SEL2_29 \
-MOD_SEL0_28_27 \
-\
-MOD_SEL0_26_25_24	MOD_SEL1_26 \
-			MOD_SEL1_25_24 \
-\
-MOD_SEL0_23		MOD_SEL1_23_22_21 \
-MOD_SEL0_22 \
-MOD_SEL0_21_20 \
-			MOD_SEL1_20 \
-MOD_SEL0_19		MOD_SEL1_19 \
-MOD_SEL0_18		MOD_SEL1_18_17 \
-MOD_SEL0_17 \
-MOD_SEL0_16_15		MOD_SEL1_16 \
-			MOD_SEL1_15_14 \
-MOD_SEL0_14 \
-MOD_SEL0_13		MOD_SEL1_13 \
-MOD_SEL0_12		MOD_SEL1_12 \
-MOD_SEL0_11		MOD_SEL1_11 \
-MOD_SEL0_10		MOD_SEL1_10 \
-MOD_SEL0_9		MOD_SEL1_9 \
-MOD_SEL0_8 \
-MOD_SEL0_7_6 \
-			MOD_SEL1_6 \
-MOD_SEL0_5_4		MOD_SEL1_5 \
-			MOD_SEL1_4 \
-MOD_SEL0_3		MOD_SEL1_3 \
-MOD_SEL0_2_1		MOD_SEL1_2 \
-			MOD_SEL1_1 \
-			MOD_SEL1_0		MOD_SEL2_0
-
-/*
- * These pins are not able to be muxed but have other properties
- * that can be set, such as drive-strength or pull-up/pull-down enable.
- */
-#define PINMUX_STATIC \
-	FM(QSPI0_SPCLK) FM(QSPI0_SSL) FM(QSPI0_MOSI_IO0) FM(QSPI0_MISO_IO1) \
-	FM(QSPI0_IO2) FM(QSPI0_IO3) \
-	FM(QSPI1_SPCLK) FM(QSPI1_SSL) FM(QSPI1_MOSI_IO0) FM(QSPI1_MISO_IO1) \
-	FM(QSPI1_IO2) FM(QSPI1_IO3) \
-	FM(RPC_INT) FM(RPC_WP) FM(RPC_RESET) \
-	FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) FM(AVB_TD3) \
-	FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \
-	FM(AVB_TXCREFCLK) FM(AVB_MDIO) \
-	FM(CLKOUT) FM(PRESETOUT) \
-	FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) FM(DU_DOTCLKIN3) \
-	FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR)
-
-#define PINMUX_PHYS \
-	FM(SCL0) FM(SDA0) FM(SCL3) FM(SDA3) FM(SCL5) FM(SDA5)
-
-enum {
-	PINMUX_RESERVED = 0,
-
-	PINMUX_DATA_BEGIN,
-	GP_ALL(DATA),
-	PINMUX_DATA_END,
-
-#define F_(x, y)
-#define FM(x)	FN_##x,
-	PINMUX_FUNCTION_BEGIN,
-	GP_ALL(FN),
-	PINMUX_GPSR
-	PINMUX_IPSR
-	PINMUX_MOD_SELS
-	PINMUX_FUNCTION_END,
-#undef F_
-#undef FM
-
-#define F_(x, y)
-#define FM(x)	x##_MARK,
-	PINMUX_MARK_BEGIN,
-	PINMUX_GPSR
-	PINMUX_IPSR
-	PINMUX_MOD_SELS
-	PINMUX_STATIC
-	PINMUX_PHYS
-	PINMUX_MARK_END,
-#undef F_
-#undef FM
-};
-
-static const u16 pinmux_data[] = {
-	PINMUX_DATA_GP_ALL(),
-
-	PINMUX_SINGLE(AVS1),
-	PINMUX_SINGLE(AVS2),
-	PINMUX_SINGLE(GP7_02),
-	PINMUX_SINGLE(GP7_03),
-	PINMUX_SINGLE(MSIOF0_RXD),
-	PINMUX_SINGLE(MSIOF0_SCK),
-	PINMUX_SINGLE(MSIOF0_TXD),
-	PINMUX_SINGLE(SD2_CMD),
-	PINMUX_SINGLE(SD3_CLK),
-	PINMUX_SINGLE(SD3_CMD),
-	PINMUX_SINGLE(SD3_DAT0),
-	PINMUX_SINGLE(SD3_DAT1),
-	PINMUX_SINGLE(SD3_DAT2),
-	PINMUX_SINGLE(SD3_DAT3),
-	PINMUX_SINGLE(SD3_DS),
-	PINMUX_SINGLE(SSI_SCK5),
-	PINMUX_SINGLE(SSI_SDATA5),
-	PINMUX_SINGLE(SSI_WS5),
-
-	/* IPSR0 */
-	PINMUX_IPSR_GPSR(IP0_3_0,	AVB_MDC),
-	PINMUX_IPSR_MSEL(IP0_3_0,	MSIOF2_SS2_C,		SEL_MSIOF2_2),
-
-	PINMUX_IPSR_GPSR(IP0_7_4,	AVB_MAGIC),
-	PINMUX_IPSR_MSEL(IP0_7_4,	MSIOF2_SS1_C,		SEL_MSIOF2_2),
-	PINMUX_IPSR_MSEL(IP0_7_4,	SCK4_A,			SEL_SCIF4_0),
-
-	PINMUX_IPSR_GPSR(IP0_11_8,	AVB_PHY_INT),
-	PINMUX_IPSR_MSEL(IP0_11_8,	MSIOF2_SYNC_C,		SEL_MSIOF2_2),
-	PINMUX_IPSR_MSEL(IP0_11_8,	RX4_A,			SEL_SCIF4_0),
-
-	PINMUX_IPSR_GPSR(IP0_15_12,	AVB_LINK),
-	PINMUX_IPSR_MSEL(IP0_15_12,	MSIOF2_SCK_C,		SEL_MSIOF2_2),
-	PINMUX_IPSR_MSEL(IP0_15_12,	TX4_A,			SEL_SCIF4_0),
-
-	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, AVB_AVTP_MATCH_A,	I2C_SEL_5_0,	SEL_ETHERAVB_0),
-	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, MSIOF2_RXD_C,		I2C_SEL_5_0,	SEL_MSIOF2_2),
-	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, CTS4_N_A,		I2C_SEL_5_0,	SEL_SCIF4_0),
-	PINMUX_IPSR_PHYS(IP0_19_16,	SCL5,			I2C_SEL_5_1),
-
-	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A,	I2C_SEL_5_0,	SEL_ETHERAVB_0),
-	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, MSIOF2_TXD_C,		I2C_SEL_5_0,	SEL_MSIOF2_2),
-	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, RTS4_N_A,		I2C_SEL_5_0,	SEL_SCIF4_0),
-	PINMUX_IPSR_PHYS(IP0_23_20,	SDA5,			I2C_SEL_5_1),
-
-	PINMUX_IPSR_GPSR(IP0_27_24,	IRQ0),
-	PINMUX_IPSR_GPSR(IP0_27_24,	QPOLB),
-	PINMUX_IPSR_GPSR(IP0_27_24,	DU_CDE),
-	PINMUX_IPSR_MSEL(IP0_27_24,	VI4_DATA0_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP0_27_24,	CAN0_TX_B,		SEL_RCAN0_1),
-	PINMUX_IPSR_MSEL(IP0_27_24,	CANFD0_TX_B,		SEL_CANFD0_1),
-
-	PINMUX_IPSR_GPSR(IP0_31_28,	IRQ1),
-	PINMUX_IPSR_GPSR(IP0_31_28,	QPOLA),
-	PINMUX_IPSR_GPSR(IP0_31_28,	DU_DISP),
-	PINMUX_IPSR_MSEL(IP0_31_28,	VI4_DATA1_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP0_31_28,	CAN0_RX_B,		SEL_RCAN0_1),
-	PINMUX_IPSR_MSEL(IP0_31_28,	CANFD0_RX_B,		SEL_CANFD0_1),
-
-	/* IPSR1 */
-	PINMUX_IPSR_GPSR(IP1_3_0,	IRQ2),
-	PINMUX_IPSR_GPSR(IP1_3_0,	QCPV_QDE),
-	PINMUX_IPSR_GPSR(IP1_3_0,	DU_EXODDF_DU_ODDF_DISP_CDE),
-	PINMUX_IPSR_MSEL(IP1_3_0,	VI4_DATA2_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_3_0,	PWM3_B,			SEL_PWM3_1),
-
-	PINMUX_IPSR_GPSR(IP1_7_4,	IRQ3),
-	PINMUX_IPSR_GPSR(IP1_7_4,	QSTVB_QVE),
-	PINMUX_IPSR_GPSR(IP1_7_4,	A25),
-	PINMUX_IPSR_GPSR(IP1_7_4,	DU_DOTCLKOUT1),
-	PINMUX_IPSR_MSEL(IP1_7_4,	VI4_DATA3_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_7_4,	PWM4_B,			SEL_PWM4_1),
-
-	PINMUX_IPSR_GPSR(IP1_11_8,	IRQ4),
-	PINMUX_IPSR_GPSR(IP1_11_8,	QSTH_QHS),
-	PINMUX_IPSR_GPSR(IP1_11_8,	A24),
-	PINMUX_IPSR_GPSR(IP1_11_8,	DU_EXHSYNC_DU_HSYNC),
-	PINMUX_IPSR_MSEL(IP1_11_8,	VI4_DATA4_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_11_8,	PWM5_B,			SEL_PWM5_1),
-
-	PINMUX_IPSR_GPSR(IP1_15_12,	IRQ5),
-	PINMUX_IPSR_GPSR(IP1_15_12,	QSTB_QHE),
-	PINMUX_IPSR_GPSR(IP1_15_12,	A23),
-	PINMUX_IPSR_GPSR(IP1_15_12,	DU_EXVSYNC_DU_VSYNC),
-	PINMUX_IPSR_MSEL(IP1_15_12,	VI4_DATA5_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_15_12,	PWM6_B,			SEL_PWM6_1),
-
-	PINMUX_IPSR_GPSR(IP1_19_16,	PWM0),
-	PINMUX_IPSR_GPSR(IP1_19_16,	AVB_AVTP_PPS),
-	PINMUX_IPSR_GPSR(IP1_19_16,	A22),
-	PINMUX_IPSR_MSEL(IP1_19_16,	VI4_DATA6_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_19_16,	IECLK_B,		SEL_IEBUS_1),
-
-	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, PWM1_A,		I2C_SEL_3_0,	SEL_PWM1_0),
-	PINMUX_IPSR_MSEL(IP1_23_20,	A21,			I2C_SEL_3_0),
-	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D,		I2C_SEL_3_0,	SEL_HSCIF3_3),
-	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B,		I2C_SEL_3_0,	SEL_VIN4_1),
-	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B,		I2C_SEL_3_0,	SEL_IEBUS_1),
-	PINMUX_IPSR_PHYS(IP1_23_20,	SCL3,			I2C_SEL_3_1),
-
-	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A,		I2C_SEL_3_0,	SEL_PWM2_0),
-	PINMUX_IPSR_MSEL(IP1_27_24,	A20,			I2C_SEL_3_0),
-	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D,		I2C_SEL_3_0,	SEL_HSCIF3_3),
-	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, IETX_B,		I2C_SEL_3_0,	SEL_IEBUS_1),
-	PINMUX_IPSR_PHYS(IP1_27_24,	SDA3,			I2C_SEL_3_1),
-
-	PINMUX_IPSR_GPSR(IP1_31_28,	A0),
-	PINMUX_IPSR_GPSR(IP1_31_28,	LCDOUT16),
-	PINMUX_IPSR_MSEL(IP1_31_28,	MSIOF3_SYNC_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP1_31_28,	VI4_DATA8),
-	PINMUX_IPSR_GPSR(IP1_31_28,	DU_DB0),
-	PINMUX_IPSR_MSEL(IP1_31_28,	PWM3_A,			SEL_PWM3_0),
-
-	/* IPSR2 */
-	PINMUX_IPSR_GPSR(IP2_3_0,	A1),
-	PINMUX_IPSR_GPSR(IP2_3_0,	LCDOUT17),
-	PINMUX_IPSR_MSEL(IP2_3_0,	MSIOF3_TXD_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP2_3_0,	VI4_DATA9),
-	PINMUX_IPSR_GPSR(IP2_3_0,	DU_DB1),
-	PINMUX_IPSR_MSEL(IP2_3_0,	PWM4_A,			SEL_PWM4_0),
-
-	PINMUX_IPSR_GPSR(IP2_7_4,	A2),
-	PINMUX_IPSR_GPSR(IP2_7_4,	LCDOUT18),
-	PINMUX_IPSR_MSEL(IP2_7_4,	MSIOF3_SCK_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP2_7_4,	VI4_DATA10),
-	PINMUX_IPSR_GPSR(IP2_7_4,	DU_DB2),
-	PINMUX_IPSR_MSEL(IP2_7_4,	PWM5_A,			SEL_PWM5_0),
-
-	PINMUX_IPSR_GPSR(IP2_11_8,	A3),
-	PINMUX_IPSR_GPSR(IP2_11_8,	LCDOUT19),
-	PINMUX_IPSR_MSEL(IP2_11_8,	MSIOF3_RXD_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP2_11_8,	VI4_DATA11),
-	PINMUX_IPSR_GPSR(IP2_11_8,	DU_DB3),
-	PINMUX_IPSR_MSEL(IP2_11_8,	PWM6_A,			SEL_PWM6_0),
-
-	PINMUX_IPSR_GPSR(IP2_15_12,	A4),
-	PINMUX_IPSR_GPSR(IP2_15_12,	LCDOUT20),
-	PINMUX_IPSR_MSEL(IP2_15_12,	MSIOF3_SS1_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP2_15_12,	VI4_DATA12),
-	PINMUX_IPSR_GPSR(IP2_15_12,	VI5_DATA12),
-	PINMUX_IPSR_GPSR(IP2_15_12,	DU_DB4),
-
-	PINMUX_IPSR_GPSR(IP2_19_16,	A5),
-	PINMUX_IPSR_GPSR(IP2_19_16,	LCDOUT21),
-	PINMUX_IPSR_MSEL(IP2_19_16,	MSIOF3_SS2_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_MSEL(IP2_19_16,	SCK4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP2_19_16,	VI4_DATA13),
-	PINMUX_IPSR_GPSR(IP2_19_16,	VI5_DATA13),
-	PINMUX_IPSR_GPSR(IP2_19_16,	DU_DB5),
-
-	PINMUX_IPSR_GPSR(IP2_23_20,	A6),
-	PINMUX_IPSR_GPSR(IP2_23_20,	LCDOUT22),
-	PINMUX_IPSR_MSEL(IP2_23_20,	MSIOF2_SS1_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP2_23_20,	RX4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP2_23_20,	VI4_DATA14),
-	PINMUX_IPSR_GPSR(IP2_23_20,	VI5_DATA14),
-	PINMUX_IPSR_GPSR(IP2_23_20,	DU_DB6),
-
-	PINMUX_IPSR_GPSR(IP2_27_24,	A7),
-	PINMUX_IPSR_GPSR(IP2_27_24,	LCDOUT23),
-	PINMUX_IPSR_MSEL(IP2_27_24,	MSIOF2_SS2_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP2_27_24,	TX4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP2_27_24,	VI4_DATA15),
-	PINMUX_IPSR_GPSR(IP2_27_24,	VI5_DATA15),
-	PINMUX_IPSR_GPSR(IP2_27_24,	DU_DB7),
-
-	PINMUX_IPSR_GPSR(IP2_31_28,	A8),
-	PINMUX_IPSR_MSEL(IP2_31_28,	RX3_B,			SEL_SCIF3_1),
-	PINMUX_IPSR_MSEL(IP2_31_28,	MSIOF2_SYNC_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP2_31_28,	HRX4_B,			SEL_HSCIF4_1),
-	PINMUX_IPSR_MSEL(IP2_31_28,	SDA6_A,			SEL_I2C6_0),
-	PINMUX_IPSR_MSEL(IP2_31_28,	AVB_AVTP_MATCH_B,	SEL_ETHERAVB_1),
-	PINMUX_IPSR_MSEL(IP2_31_28,	PWM1_B,			SEL_PWM1_1),
-
-	/* IPSR3 */
-	PINMUX_IPSR_GPSR(IP3_3_0,	A9),
-	PINMUX_IPSR_MSEL(IP3_3_0,	MSIOF2_SCK_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP3_3_0,	CTS4_N_B,		SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP3_3_0,	VI5_VSYNC_N),
-
-	PINMUX_IPSR_GPSR(IP3_7_4,	A10),
-	PINMUX_IPSR_MSEL(IP3_7_4,	MSIOF2_RXD_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP3_7_4,	RTS4_N_B,		SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP3_7_4,	VI5_HSYNC_N),
-
-	PINMUX_IPSR_GPSR(IP3_11_8,	A11),
-	PINMUX_IPSR_MSEL(IP3_11_8,	TX3_B,			SEL_SCIF3_1),
-	PINMUX_IPSR_MSEL(IP3_11_8,	MSIOF2_TXD_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP3_11_8,	HTX4_B,			SEL_HSCIF4_1),
-	PINMUX_IPSR_GPSR(IP3_11_8,	HSCK4),
-	PINMUX_IPSR_GPSR(IP3_11_8,	VI5_FIELD),
-	PINMUX_IPSR_MSEL(IP3_11_8,	SCL6_A,			SEL_I2C6_0),
-	PINMUX_IPSR_MSEL(IP3_11_8,	AVB_AVTP_CAPTURE_B,	SEL_ETHERAVB_1),
-	PINMUX_IPSR_MSEL(IP3_11_8,	PWM2_B,			SEL_PWM2_1),
-
-	PINMUX_IPSR_GPSR(IP3_15_12,	A12),
-	PINMUX_IPSR_GPSR(IP3_15_12,	LCDOUT12),
-	PINMUX_IPSR_MSEL(IP3_15_12,	MSIOF3_SCK_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_MSEL(IP3_15_12,	HRX4_A,			SEL_HSCIF4_0),
-	PINMUX_IPSR_GPSR(IP3_15_12,	VI5_DATA8),
-	PINMUX_IPSR_GPSR(IP3_15_12,	DU_DG4),
-
-	PINMUX_IPSR_GPSR(IP3_19_16,	A13),
-	PINMUX_IPSR_GPSR(IP3_19_16,	LCDOUT13),
-	PINMUX_IPSR_MSEL(IP3_19_16,	MSIOF3_SYNC_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_MSEL(IP3_19_16,	HTX4_A,			SEL_HSCIF4_0),
-	PINMUX_IPSR_GPSR(IP3_19_16,	VI5_DATA9),
-	PINMUX_IPSR_GPSR(IP3_19_16,	DU_DG5),
-
-	PINMUX_IPSR_GPSR(IP3_23_20,	A14),
-	PINMUX_IPSR_GPSR(IP3_23_20,	LCDOUT14),
-	PINMUX_IPSR_MSEL(IP3_23_20,	MSIOF3_RXD_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_GPSR(IP3_23_20,	HCTS4_N),
-	PINMUX_IPSR_GPSR(IP3_23_20,	VI5_DATA10),
-	PINMUX_IPSR_GPSR(IP3_23_20,	DU_DG6),
-
-	PINMUX_IPSR_GPSR(IP3_27_24,	A15),
-	PINMUX_IPSR_GPSR(IP3_27_24,	LCDOUT15),
-	PINMUX_IPSR_MSEL(IP3_27_24,	MSIOF3_TXD_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_GPSR(IP3_27_24,	HRTS4_N),
-	PINMUX_IPSR_GPSR(IP3_27_24,	VI5_DATA11),
-	PINMUX_IPSR_GPSR(IP3_27_24,	DU_DG7),
-
-	PINMUX_IPSR_GPSR(IP3_31_28,	A16),
-	PINMUX_IPSR_GPSR(IP3_31_28,	LCDOUT8),
-	PINMUX_IPSR_GPSR(IP3_31_28,	VI4_FIELD),
-	PINMUX_IPSR_GPSR(IP3_31_28,	DU_DG0),
-
-	/* IPSR4 */
-	PINMUX_IPSR_GPSR(IP4_3_0,	A17),
-	PINMUX_IPSR_GPSR(IP4_3_0,	LCDOUT9),
-	PINMUX_IPSR_GPSR(IP4_3_0,	VI4_VSYNC_N),
-	PINMUX_IPSR_GPSR(IP4_3_0,	DU_DG1),
-
-	PINMUX_IPSR_GPSR(IP4_7_4,	A18),
-	PINMUX_IPSR_GPSR(IP4_7_4,	LCDOUT10),
-	PINMUX_IPSR_GPSR(IP4_7_4,	VI4_HSYNC_N),
-	PINMUX_IPSR_GPSR(IP4_7_4,	DU_DG2),
-
-	PINMUX_IPSR_GPSR(IP4_11_8,	A19),
-	PINMUX_IPSR_GPSR(IP4_11_8,	LCDOUT11),
-	PINMUX_IPSR_GPSR(IP4_11_8,	VI4_CLKENB),
-	PINMUX_IPSR_GPSR(IP4_11_8,	DU_DG3),
-
-	PINMUX_IPSR_GPSR(IP4_15_12,	CS0_N),
-	PINMUX_IPSR_GPSR(IP4_15_12,	VI5_CLKENB),
-
-	PINMUX_IPSR_GPSR(IP4_19_16,	CS1_N_A26),
-	PINMUX_IPSR_GPSR(IP4_19_16,	VI5_CLK),
-	PINMUX_IPSR_MSEL(IP4_19_16,	EX_WAIT0_B,		SEL_LBSC_1),
-
-	PINMUX_IPSR_GPSR(IP4_23_20,	BS_N),
-	PINMUX_IPSR_GPSR(IP4_23_20,	QSTVA_QVS),
-	PINMUX_IPSR_MSEL(IP4_23_20,	MSIOF3_SCK_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_GPSR(IP4_23_20,	SCK3),
-	PINMUX_IPSR_GPSR(IP4_23_20,	HSCK3),
-	PINMUX_IPSR_GPSR(IP4_23_20,	CAN1_TX),
-	PINMUX_IPSR_GPSR(IP4_23_20,	CANFD1_TX),
-	PINMUX_IPSR_MSEL(IP4_23_20,	IETX_A,			SEL_IEBUS_0),
-
-	PINMUX_IPSR_GPSR(IP4_27_24,	RD_N),
-	PINMUX_IPSR_MSEL(IP4_27_24,	MSIOF3_SYNC_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_MSEL(IP4_27_24,	RX3_A,			SEL_SCIF3_0),
-	PINMUX_IPSR_MSEL(IP4_27_24,	HRX3_A,			SEL_HSCIF3_0),
-	PINMUX_IPSR_MSEL(IP4_27_24,	CAN0_TX_A,		SEL_RCAN0_0),
-	PINMUX_IPSR_MSEL(IP4_27_24,	CANFD0_TX_A,		SEL_CANFD0_0),
-
-	PINMUX_IPSR_GPSR(IP4_31_28,	RD_WR_N),
-	PINMUX_IPSR_MSEL(IP4_31_28,	MSIOF3_RXD_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_MSEL(IP4_31_28,	TX3_A,			SEL_SCIF3_0),
-	PINMUX_IPSR_MSEL(IP4_31_28,	HTX3_A,			SEL_HSCIF3_0),
-	PINMUX_IPSR_MSEL(IP4_31_28,	CAN0_RX_A,		SEL_RCAN0_0),
-	PINMUX_IPSR_MSEL(IP4_31_28,	CANFD0_RX_A,		SEL_CANFD0_0),
-
-	/* IPSR5 */
-	PINMUX_IPSR_GPSR(IP5_3_0,	WE0_N),
-	PINMUX_IPSR_MSEL(IP5_3_0,	MSIOF3_TXD_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_GPSR(IP5_3_0,	CTS3_N),
-	PINMUX_IPSR_GPSR(IP5_3_0,	HCTS3_N),
-	PINMUX_IPSR_MSEL(IP5_3_0,	SCL6_B,			SEL_I2C6_1),
-	PINMUX_IPSR_GPSR(IP5_3_0,	CAN_CLK),
-	PINMUX_IPSR_MSEL(IP5_3_0,	IECLK_A,		SEL_IEBUS_0),
-
-	PINMUX_IPSR_GPSR(IP5_7_4,	WE1_N),
-	PINMUX_IPSR_MSEL(IP5_7_4,	MSIOF3_SS1_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_GPSR(IP5_7_4,	RTS3_N),
-	PINMUX_IPSR_GPSR(IP5_7_4,	HRTS3_N),
-	PINMUX_IPSR_MSEL(IP5_7_4,	SDA6_B,			SEL_I2C6_1),
-	PINMUX_IPSR_GPSR(IP5_7_4,	CAN1_RX),
-	PINMUX_IPSR_GPSR(IP5_7_4,	CANFD1_RX),
-	PINMUX_IPSR_MSEL(IP5_7_4,	IERX_A,			SEL_IEBUS_0),
-
-	PINMUX_IPSR_MSEL(IP5_11_8,	EX_WAIT0_A,		SEL_LBSC_0),
-	PINMUX_IPSR_GPSR(IP5_11_8,	QCLK),
-	PINMUX_IPSR_GPSR(IP5_11_8,	VI4_CLK),
-	PINMUX_IPSR_GPSR(IP5_11_8,	DU_DOTCLKOUT0),
-
-	PINMUX_IPSR_GPSR(IP5_15_12,	D0),
-	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF2_SS1_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF3_SCK_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_GPSR(IP5_15_12,	VI4_DATA16),
-	PINMUX_IPSR_GPSR(IP5_15_12,	VI5_DATA0),
-
-	PINMUX_IPSR_GPSR(IP5_19_16,	D1),
-	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF2_SS2_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF3_SYNC_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_GPSR(IP5_19_16,	VI4_DATA17),
-	PINMUX_IPSR_GPSR(IP5_19_16,	VI5_DATA1),
-
-	PINMUX_IPSR_GPSR(IP5_23_20,	D2),
-	PINMUX_IPSR_MSEL(IP5_23_20,	MSIOF3_RXD_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_GPSR(IP5_23_20,	VI4_DATA18),
-	PINMUX_IPSR_GPSR(IP5_23_20,	VI5_DATA2),
-
-	PINMUX_IPSR_GPSR(IP5_27_24,	D3),
-	PINMUX_IPSR_MSEL(IP5_27_24,	MSIOF3_TXD_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_GPSR(IP5_27_24,	VI4_DATA19),
-	PINMUX_IPSR_GPSR(IP5_27_24,	VI5_DATA3),
-
-	PINMUX_IPSR_GPSR(IP5_31_28,	D4),
-	PINMUX_IPSR_MSEL(IP5_31_28,	MSIOF2_SCK_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_GPSR(IP5_31_28,	VI4_DATA20),
-	PINMUX_IPSR_GPSR(IP5_31_28,	VI5_DATA4),
-
-	/* IPSR6 */
-	PINMUX_IPSR_GPSR(IP6_3_0,	D5),
-	PINMUX_IPSR_MSEL(IP6_3_0,	MSIOF2_SYNC_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_GPSR(IP6_3_0,	VI4_DATA21),
-	PINMUX_IPSR_GPSR(IP6_3_0,	VI5_DATA5),
-
-	PINMUX_IPSR_GPSR(IP6_7_4,	D6),
-	PINMUX_IPSR_MSEL(IP6_7_4,	MSIOF2_RXD_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_GPSR(IP6_7_4,	VI4_DATA22),
-	PINMUX_IPSR_GPSR(IP6_7_4,	VI5_DATA6),
-
-	PINMUX_IPSR_GPSR(IP6_11_8,	D7),
-	PINMUX_IPSR_MSEL(IP6_11_8,	MSIOF2_TXD_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_GPSR(IP6_11_8,	VI4_DATA23),
-	PINMUX_IPSR_GPSR(IP6_11_8,	VI5_DATA7),
-
-	PINMUX_IPSR_GPSR(IP6_15_12,	D8),
-	PINMUX_IPSR_GPSR(IP6_15_12,	LCDOUT0),
-	PINMUX_IPSR_MSEL(IP6_15_12,	MSIOF2_SCK_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_15_12,	SCK4_C,			SEL_SCIF4_2),
-	PINMUX_IPSR_MSEL(IP6_15_12,	VI4_DATA0_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP6_15_12,	DU_DR0),
-
-	PINMUX_IPSR_GPSR(IP6_19_16,	D9),
-	PINMUX_IPSR_GPSR(IP6_19_16,	LCDOUT1),
-	PINMUX_IPSR_MSEL(IP6_19_16,	MSIOF2_SYNC_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_19_16,	VI4_DATA1_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP6_19_16,	DU_DR1),
-
-	PINMUX_IPSR_GPSR(IP6_23_20,	D10),
-	PINMUX_IPSR_GPSR(IP6_23_20,	LCDOUT2),
-	PINMUX_IPSR_MSEL(IP6_23_20,	MSIOF2_RXD_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_23_20,	HRX3_B,			SEL_HSCIF3_1),
-	PINMUX_IPSR_MSEL(IP6_23_20,	VI4_DATA2_A,		SEL_VIN4_0),
-	PINMUX_IPSR_MSEL(IP6_23_20,	CTS4_N_C,		SEL_SCIF4_2),
-	PINMUX_IPSR_GPSR(IP6_23_20,	DU_DR2),
-
-	PINMUX_IPSR_GPSR(IP6_27_24,	D11),
-	PINMUX_IPSR_GPSR(IP6_27_24,	LCDOUT3),
-	PINMUX_IPSR_MSEL(IP6_27_24,	MSIOF2_TXD_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_27_24,	HTX3_B,			SEL_HSCIF3_1),
-	PINMUX_IPSR_MSEL(IP6_27_24,	VI4_DATA3_A,		SEL_VIN4_0),
-	PINMUX_IPSR_MSEL(IP6_27_24,	RTS4_N_C,		SEL_SCIF4_2),
-	PINMUX_IPSR_GPSR(IP6_27_24,	DU_DR3),
-
-	PINMUX_IPSR_GPSR(IP6_31_28,	D12),
-	PINMUX_IPSR_GPSR(IP6_31_28,	LCDOUT4),
-	PINMUX_IPSR_MSEL(IP6_31_28,	MSIOF2_SS1_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_31_28,	RX4_C,			SEL_SCIF4_2),
-	PINMUX_IPSR_MSEL(IP6_31_28,	VI4_DATA4_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP6_31_28,	DU_DR4),
-
-	/* IPSR7 */
-	PINMUX_IPSR_GPSR(IP7_3_0,	D13),
-	PINMUX_IPSR_GPSR(IP7_3_0,	LCDOUT5),
-	PINMUX_IPSR_MSEL(IP7_3_0,	MSIOF2_SS2_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP7_3_0,	TX4_C,			SEL_SCIF4_2),
-	PINMUX_IPSR_MSEL(IP7_3_0,	VI4_DATA5_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP7_3_0,	DU_DR5),
-
-	PINMUX_IPSR_GPSR(IP7_7_4,	D14),
-	PINMUX_IPSR_GPSR(IP7_7_4,	LCDOUT6),
-	PINMUX_IPSR_MSEL(IP7_7_4,	MSIOF3_SS1_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_MSEL(IP7_7_4,	HRX3_C,			SEL_HSCIF3_2),
-	PINMUX_IPSR_MSEL(IP7_7_4,	VI4_DATA6_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP7_7_4,	DU_DR6),
-	PINMUX_IPSR_MSEL(IP7_7_4,	SCL6_C,			SEL_I2C6_2),
-
-	PINMUX_IPSR_GPSR(IP7_11_8,	D15),
-	PINMUX_IPSR_GPSR(IP7_11_8,	LCDOUT7),
-	PINMUX_IPSR_MSEL(IP7_11_8,	MSIOF3_SS2_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_MSEL(IP7_11_8,	HTX3_C,			SEL_HSCIF3_2),
-	PINMUX_IPSR_MSEL(IP7_11_8,	VI4_DATA7_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP7_11_8,	DU_DR7),
-	PINMUX_IPSR_MSEL(IP7_11_8,	SDA6_C,			SEL_I2C6_2),
-
-	PINMUX_IPSR_GPSR(IP7_15_12,	FSCLKST),
-
-	PINMUX_IPSR_GPSR(IP7_19_16,	SD0_CLK),
-	PINMUX_IPSR_MSEL(IP7_19_16,	MSIOF1_SCK_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP7_19_16,	STP_OPWM_0_B,		SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP7_23_20,	SD0_CMD),
-	PINMUX_IPSR_MSEL(IP7_23_20,	MSIOF1_SYNC_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP7_23_20,	STP_IVCXO27_0_B,	SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP7_27_24,	SD0_DAT0),
-	PINMUX_IPSR_MSEL(IP7_27_24,	MSIOF1_RXD_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP7_27_24,	TS_SCK0_B,		SEL_TSIF0_1),
-	PINMUX_IPSR_MSEL(IP7_27_24,	STP_ISCLK_0_B,		SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP7_31_28,	SD0_DAT1),
-	PINMUX_IPSR_MSEL(IP7_31_28,	MSIOF1_TXD_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP7_31_28,	TS_SPSYNC0_B,		SEL_TSIF0_1),
-	PINMUX_IPSR_MSEL(IP7_31_28,	STP_ISSYNC_0_B,		SEL_SSP1_0_1),
-
-	/* IPSR8 */
-	PINMUX_IPSR_GPSR(IP8_3_0,	SD0_DAT2),
-	PINMUX_IPSR_MSEL(IP8_3_0,	MSIOF1_SS1_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP8_3_0,	TS_SDAT0_B,		SEL_TSIF0_1),
-	PINMUX_IPSR_MSEL(IP8_3_0,	STP_ISD_0_B,		SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP8_7_4,	SD0_DAT3),
-	PINMUX_IPSR_MSEL(IP8_7_4,	MSIOF1_SS2_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP8_7_4,	TS_SDEN0_B,		SEL_TSIF0_1),
-	PINMUX_IPSR_MSEL(IP8_7_4,	STP_ISEN_0_B,		SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP8_11_8,	SD1_CLK),
-	PINMUX_IPSR_MSEL(IP8_11_8,	MSIOF1_SCK_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_MSEL(IP8_11_8,	SIM0_CLK_A,		SEL_SIMCARD_0),
-
-	PINMUX_IPSR_GPSR(IP8_15_12,	SD1_CMD),
-	PINMUX_IPSR_MSEL(IP8_15_12,	MSIOF1_SYNC_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_MSEL(IP8_15_12,	SIM0_D_A,		SEL_SIMCARD_0),
-	PINMUX_IPSR_MSEL(IP8_15_12,	STP_IVCXO27_1_B,	SEL_SSP1_1_1),
-
-	PINMUX_IPSR_GPSR(IP8_19_16,	SD1_DAT0),
-	PINMUX_IPSR_GPSR(IP8_19_16,	SD2_DAT4),
-	PINMUX_IPSR_MSEL(IP8_19_16,	MSIOF1_RXD_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_MSEL(IP8_19_16,	TS_SCK1_B,		SEL_TSIF1_1),
-	PINMUX_IPSR_MSEL(IP8_19_16,	STP_ISCLK_1_B,		SEL_SSP1_1_1),
-
-	PINMUX_IPSR_GPSR(IP8_23_20,	SD1_DAT1),
-	PINMUX_IPSR_GPSR(IP8_23_20,	SD2_DAT5),
-	PINMUX_IPSR_MSEL(IP8_23_20,	MSIOF1_TXD_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_MSEL(IP8_23_20,	TS_SPSYNC1_B,		SEL_TSIF1_1),
-	PINMUX_IPSR_MSEL(IP8_23_20,	STP_ISSYNC_1_B,		SEL_SSP1_1_1),
-
-	PINMUX_IPSR_GPSR(IP8_27_24,	SD1_DAT2),
-	PINMUX_IPSR_GPSR(IP8_27_24,	SD2_DAT6),
-	PINMUX_IPSR_MSEL(IP8_27_24,	MSIOF1_SS1_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_MSEL(IP8_27_24,	TS_SDAT1_B,		SEL_TSIF1_1),
-	PINMUX_IPSR_MSEL(IP8_27_24,	STP_ISD_1_B,		SEL_SSP1_1_1),
-
-	PINMUX_IPSR_GPSR(IP8_31_28,	SD1_DAT3),
-	PINMUX_IPSR_GPSR(IP8_31_28,	SD2_DAT7),
-	PINMUX_IPSR_MSEL(IP8_31_28,	MSIOF1_SS2_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_MSEL(IP8_31_28,	TS_SDEN1_B,		SEL_TSIF1_1),
-	PINMUX_IPSR_MSEL(IP8_31_28,	STP_ISEN_1_B,		SEL_SSP1_1_1),
-
-	/* IPSR9 */
-	PINMUX_IPSR_GPSR(IP9_3_0,	SD2_CLK),
-
-	PINMUX_IPSR_GPSR(IP9_7_4,	SD2_DAT0),
-
-	PINMUX_IPSR_GPSR(IP9_11_8,	SD2_DAT1),
-
-	PINMUX_IPSR_GPSR(IP9_15_12,	SD2_DAT2),
-
-	PINMUX_IPSR_GPSR(IP9_19_16,	SD2_DAT3),
-
-	PINMUX_IPSR_GPSR(IP9_23_20,	SD2_DS),
-	PINMUX_IPSR_MSEL(IP9_23_20,	SATA_DEVSLP_B,		SEL_SATA_1),
-
-	PINMUX_IPSR_GPSR(IP9_27_24,	SD3_DAT4),
-	PINMUX_IPSR_MSEL(IP9_27_24,	SD2_CD_A,		SEL_SDHI2_0),
-
-	PINMUX_IPSR_GPSR(IP9_31_28,	SD3_DAT5),
-	PINMUX_IPSR_MSEL(IP9_31_28,	SD2_WP_A,		SEL_SDHI2_0),
-
-	/* IPSR10 */
-	PINMUX_IPSR_GPSR(IP10_3_0,	SD3_DAT6),
-	PINMUX_IPSR_GPSR(IP10_3_0,	SD3_CD),
-
-	PINMUX_IPSR_GPSR(IP10_7_4,	SD3_DAT7),
-	PINMUX_IPSR_GPSR(IP10_7_4,	SD3_WP),
-
-	PINMUX_IPSR_GPSR(IP10_11_8,	SD0_CD),
-	PINMUX_IPSR_MSEL(IP10_11_8,	SCL2_B,			SEL_I2C2_1),
-	PINMUX_IPSR_MSEL(IP10_11_8,	SIM0_RST_A,		SEL_SIMCARD_0),
-
-	PINMUX_IPSR_GPSR(IP10_15_12,	SD0_WP),
-	PINMUX_IPSR_MSEL(IP10_15_12,	SDA2_B,			SEL_I2C2_1),
-
-	PINMUX_IPSR_MSEL(IP10_19_16,	SD1_CD,			I2C_SEL_0_0),
-	PINMUX_IPSR_PHYS_MSEL(IP10_19_16, SIM0_CLK_B,		I2C_SEL_0_0,	SEL_SIMCARD_1),
-	PINMUX_IPSR_PHYS(IP10_19_16,	SCL0,			I2C_SEL_0_1),
-
-	PINMUX_IPSR_MSEL(IP10_23_20,	SD1_WP,			I2C_SEL_0_0),
-	PINMUX_IPSR_PHYS_MSEL(IP10_23_20, SIM0_D_B,		I2C_SEL_0_0,	SEL_SIMCARD_1),
-	PINMUX_IPSR_PHYS(IP10_23_20,	SDA0,			I2C_SEL_0_1),
-
-	PINMUX_IPSR_GPSR(IP10_27_24,	SCK0),
-	PINMUX_IPSR_MSEL(IP10_27_24,	HSCK1_B,		SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP10_27_24,	MSIOF1_SS2_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP10_27_24,	AUDIO_CLKC_B,		SEL_ADG_1),
-	PINMUX_IPSR_MSEL(IP10_27_24,	SDA2_A,			SEL_I2C2_0),
-	PINMUX_IPSR_MSEL(IP10_27_24,	SIM0_RST_B,		SEL_SIMCARD_1),
-	PINMUX_IPSR_MSEL(IP10_27_24,	STP_OPWM_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP10_27_24,	RIF0_CLK_B,		SEL_DRIF0_1),
-	PINMUX_IPSR_GPSR(IP10_27_24,	ADICHS2),
-
-	PINMUX_IPSR_GPSR(IP10_31_28,	RX0),
-	PINMUX_IPSR_MSEL(IP10_31_28,	HRX1_B,			SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP10_31_28,	TS_SCK0_C,		SEL_TSIF0_2),
-	PINMUX_IPSR_MSEL(IP10_31_28,	STP_ISCLK_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP10_31_28,	RIF0_D0_B,		SEL_DRIF0_1),
-
-	/* IPSR11 */
-	PINMUX_IPSR_GPSR(IP11_3_0,	TX0),
-	PINMUX_IPSR_MSEL(IP11_3_0,	HTX1_B,			SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP11_3_0,	TS_SPSYNC0_C,		SEL_TSIF0_2),
-	PINMUX_IPSR_MSEL(IP11_3_0,	STP_ISSYNC_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP11_3_0,	RIF0_D1_B,		SEL_DRIF0_1),
-
-	PINMUX_IPSR_GPSR(IP11_7_4,	CTS0_N),
-	PINMUX_IPSR_MSEL(IP11_7_4,	HCTS1_N_B,		SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP11_7_4,	MSIOF1_SYNC_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP11_7_4,	TS_SPSYNC1_C,		SEL_TSIF1_2),
-	PINMUX_IPSR_MSEL(IP11_7_4,	STP_ISSYNC_1_C,		SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP11_7_4,	RIF1_SYNC_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_MSEL(IP11_7_4,	AUDIO_CLKOUT_C,		SEL_ADG_2),
-	PINMUX_IPSR_GPSR(IP11_7_4,	ADICS_SAMP),
-
-	PINMUX_IPSR_GPSR(IP11_11_8,	RTS0_N),
-	PINMUX_IPSR_MSEL(IP11_11_8,	HRTS1_N_B,		SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP11_11_8,	MSIOF1_SS1_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP11_11_8,	AUDIO_CLKA_B,		SEL_ADG_1),
-	PINMUX_IPSR_MSEL(IP11_11_8,	SCL2_A,			SEL_I2C2_0),
-	PINMUX_IPSR_MSEL(IP11_11_8,	STP_IVCXO27_1_C,	SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP11_11_8,	RIF0_SYNC_B,		SEL_DRIF0_1),
-	PINMUX_IPSR_GPSR(IP11_11_8,	ADICHS1),
-
-	PINMUX_IPSR_MSEL(IP11_15_12,	RX1_A,			SEL_SCIF1_0),
-	PINMUX_IPSR_MSEL(IP11_15_12,	HRX1_A,			SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP11_15_12,	TS_SDAT0_C,		SEL_TSIF0_2),
-	PINMUX_IPSR_MSEL(IP11_15_12,	STP_ISD_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP11_15_12,	RIF1_CLK_C,		SEL_DRIF1_2),
-
-	PINMUX_IPSR_MSEL(IP11_19_16,	TX1_A,			SEL_SCIF1_0),
-	PINMUX_IPSR_MSEL(IP11_19_16,	HTX1_A,			SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP11_19_16,	TS_SDEN0_C,		SEL_TSIF0_2),
-	PINMUX_IPSR_MSEL(IP11_19_16,	STP_ISEN_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP11_19_16,	RIF1_D0_C,		SEL_DRIF1_2),
-
-	PINMUX_IPSR_GPSR(IP11_23_20,	CTS1_N),
-	PINMUX_IPSR_MSEL(IP11_23_20,	HCTS1_N_A,		SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP11_23_20,	MSIOF1_RXD_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP11_23_20,	TS_SDEN1_C,		SEL_TSIF1_2),
-	PINMUX_IPSR_MSEL(IP11_23_20,	STP_ISEN_1_C,		SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP11_23_20,	RIF1_D0_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_GPSR(IP11_23_20,	ADIDATA),
-
-	PINMUX_IPSR_GPSR(IP11_27_24,	RTS1_N),
-	PINMUX_IPSR_MSEL(IP11_27_24,	HRTS1_N_A,		SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP11_27_24,	MSIOF1_TXD_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP11_27_24,	TS_SDAT1_C,		SEL_TSIF1_2),
-	PINMUX_IPSR_MSEL(IP11_27_24,	STP_ISD_1_C,		SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP11_27_24,	RIF1_D1_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_GPSR(IP11_27_24,	ADICHS0),
-
-	PINMUX_IPSR_GPSR(IP11_31_28,	SCK2),
-	PINMUX_IPSR_MSEL(IP11_31_28,	SCIF_CLK_B,		SEL_SCIF1_1),
-	PINMUX_IPSR_MSEL(IP11_31_28,	MSIOF1_SCK_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP11_31_28,	TS_SCK1_C,		SEL_TSIF1_2),
-	PINMUX_IPSR_MSEL(IP11_31_28,	STP_ISCLK_1_C,		SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP11_31_28,	RIF1_CLK_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_GPSR(IP11_31_28,	ADICLK),
-
-	/* IPSR12 */
-	PINMUX_IPSR_MSEL(IP12_3_0,	TX2_A,			SEL_SCIF2_0),
-	PINMUX_IPSR_MSEL(IP12_3_0,	SD2_CD_B,		SEL_SDHI2_1),
-	PINMUX_IPSR_MSEL(IP12_3_0,	SCL1_A,			SEL_I2C1_0),
-	PINMUX_IPSR_MSEL(IP12_3_0,	FMCLK_A,		SEL_FM_0),
-	PINMUX_IPSR_MSEL(IP12_3_0,	RIF1_D1_C,		SEL_DRIF1_2),
-	PINMUX_IPSR_MSEL(IP12_3_0,	FSO_CFE_0_B,		SEL_FSO_1),
-
-	PINMUX_IPSR_MSEL(IP12_7_4,	RX2_A,			SEL_SCIF2_0),
-	PINMUX_IPSR_MSEL(IP12_7_4,	SD2_WP_B,		SEL_SDHI2_1),
-	PINMUX_IPSR_MSEL(IP12_7_4,	SDA1_A,			SEL_I2C1_0),
-	PINMUX_IPSR_MSEL(IP12_7_4,	FMIN_A,			SEL_FM_0),
-	PINMUX_IPSR_MSEL(IP12_7_4,	RIF1_SYNC_C,		SEL_DRIF1_2),
-	PINMUX_IPSR_MSEL(IP12_7_4,	FSO_CFE_1_B,		SEL_FSO_1),
-
-	PINMUX_IPSR_GPSR(IP12_11_8,	HSCK0),
-	PINMUX_IPSR_MSEL(IP12_11_8,	MSIOF1_SCK_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP12_11_8,	AUDIO_CLKB_A,		SEL_ADG_0),
-	PINMUX_IPSR_MSEL(IP12_11_8,	SSI_SDATA1_B,		SEL_SSI_1),
-	PINMUX_IPSR_MSEL(IP12_11_8,	TS_SCK0_D,		SEL_TSIF0_3),
-	PINMUX_IPSR_MSEL(IP12_11_8,	STP_ISCLK_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP12_11_8,	RIF0_CLK_C,		SEL_DRIF0_2),
-
-	PINMUX_IPSR_GPSR(IP12_15_12,	HRX0),
-	PINMUX_IPSR_MSEL(IP12_15_12,	MSIOF1_RXD_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP12_15_12,	SSI_SDATA2_B,		SEL_SSI_1),
-	PINMUX_IPSR_MSEL(IP12_15_12,	TS_SDEN0_D,		SEL_TSIF0_3),
-	PINMUX_IPSR_MSEL(IP12_15_12,	STP_ISEN_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP12_15_12,	RIF0_D0_C,		SEL_DRIF0_2),
-
-	PINMUX_IPSR_GPSR(IP12_19_16,	HTX0),
-	PINMUX_IPSR_MSEL(IP12_19_16,	MSIOF1_TXD_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP12_19_16,	SSI_SDATA9_B,		SEL_SSI_1),
-	PINMUX_IPSR_MSEL(IP12_19_16,	TS_SDAT0_D,		SEL_TSIF0_3),
-	PINMUX_IPSR_MSEL(IP12_19_16,	STP_ISD_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP12_19_16,	RIF0_D1_C,		SEL_DRIF0_2),
-
-	PINMUX_IPSR_GPSR(IP12_23_20,	HCTS0_N),
-	PINMUX_IPSR_MSEL(IP12_23_20,	RX2_B,			SEL_SCIF2_1),
-	PINMUX_IPSR_MSEL(IP12_23_20,	MSIOF1_SYNC_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP12_23_20,	SSI_SCK9_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP12_23_20,	TS_SPSYNC0_D,		SEL_TSIF0_3),
-	PINMUX_IPSR_MSEL(IP12_23_20,	STP_ISSYNC_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP12_23_20,	RIF0_SYNC_C,		SEL_DRIF0_2),
-	PINMUX_IPSR_MSEL(IP12_23_20,	AUDIO_CLKOUT1_A,	SEL_ADG_0),
-
-	PINMUX_IPSR_GPSR(IP12_27_24,	HRTS0_N),
-	PINMUX_IPSR_MSEL(IP12_27_24,	TX2_B,			SEL_SCIF2_1),
-	PINMUX_IPSR_MSEL(IP12_27_24,	MSIOF1_SS1_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP12_27_24,	SSI_WS9_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP12_27_24,	STP_IVCXO27_0_D,	SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP12_27_24,	BPFCLK_A,		SEL_FM_0),
-	PINMUX_IPSR_MSEL(IP12_27_24,	AUDIO_CLKOUT2_A,	SEL_ADG_0),
-
-	PINMUX_IPSR_GPSR(IP12_31_28,	MSIOF0_SYNC),
-	PINMUX_IPSR_MSEL(IP12_31_28,	AUDIO_CLKOUT_A,		SEL_ADG_0),
-
-	/* IPSR13 */
-	PINMUX_IPSR_GPSR(IP13_3_0,	MSIOF0_SS1),
-	PINMUX_IPSR_GPSR(IP13_3_0,	RX5),
-	PINMUX_IPSR_MSEL(IP13_3_0,	AUDIO_CLKA_C,		SEL_ADG_2),
-	PINMUX_IPSR_MSEL(IP13_3_0,	SSI_SCK2_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP13_3_0,	STP_IVCXO27_0_C,	SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP13_3_0,	AUDIO_CLKOUT3_A,	SEL_ADG_0),
-	PINMUX_IPSR_MSEL(IP13_3_0,	TCLK1_B,		SEL_TIMER_TMU_1),
-
-	PINMUX_IPSR_GPSR(IP13_7_4,	MSIOF0_SS2),
-	PINMUX_IPSR_GPSR(IP13_7_4,	TX5),
-	PINMUX_IPSR_MSEL(IP13_7_4,	MSIOF1_SS2_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_7_4,	AUDIO_CLKC_A,		SEL_ADG_0),
-	PINMUX_IPSR_MSEL(IP13_7_4,	SSI_WS2_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP13_7_4,	STP_OPWM_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP13_7_4,	AUDIO_CLKOUT_D,		SEL_ADG_3),
-	PINMUX_IPSR_MSEL(IP13_7_4,	SPEEDIN_B,		SEL_SPEED_PULSE_1),
-
-	PINMUX_IPSR_GPSR(IP13_11_8,	MLB_CLK),
-	PINMUX_IPSR_MSEL(IP13_11_8,	MSIOF1_SCK_F,		SEL_MSIOF1_5),
-	PINMUX_IPSR_MSEL(IP13_11_8,	SCL1_B,			SEL_I2C1_1),
-
-	PINMUX_IPSR_GPSR(IP13_15_12,	MLB_SIG),
-	PINMUX_IPSR_MSEL(IP13_15_12,	RX1_B,			SEL_SCIF1_1),
-	PINMUX_IPSR_MSEL(IP13_15_12,	MSIOF1_SYNC_F,		SEL_MSIOF1_5),
-	PINMUX_IPSR_MSEL(IP13_15_12,	SDA1_B,			SEL_I2C1_1),
-
-	PINMUX_IPSR_GPSR(IP13_19_16,	MLB_DAT),
-	PINMUX_IPSR_MSEL(IP13_19_16,	TX1_B,			SEL_SCIF1_1),
-	PINMUX_IPSR_MSEL(IP13_19_16,	MSIOF1_RXD_F,		SEL_MSIOF1_5),
-
-	PINMUX_IPSR_GPSR(IP13_23_20,	SSI_SCK01239),
-	PINMUX_IPSR_MSEL(IP13_23_20,	MSIOF1_TXD_F,		SEL_MSIOF1_5),
-
-	PINMUX_IPSR_GPSR(IP13_27_24,	SSI_WS01239),
-	PINMUX_IPSR_MSEL(IP13_27_24,	MSIOF1_SS1_F,		SEL_MSIOF1_5),
-
-	PINMUX_IPSR_GPSR(IP13_31_28,	SSI_SDATA0),
-	PINMUX_IPSR_MSEL(IP13_31_28,	MSIOF1_SS2_F,		SEL_MSIOF1_5),
-
-	/* IPSR14 */
-	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SDATA1_A,		SEL_SSI_0),
-
-	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_SDATA2_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_SCK1_B,		SEL_SSI_1),
-
-	PINMUX_IPSR_GPSR(IP14_11_8,	SSI_SCK349),
-	PINMUX_IPSR_MSEL(IP14_11_8,	MSIOF1_SS1_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP14_11_8,	STP_OPWM_0_A,		SEL_SSP1_0_0),
-
-	PINMUX_IPSR_GPSR(IP14_15_12,	SSI_WS349),
-	PINMUX_IPSR_MSEL(IP14_15_12,	HCTS2_N_A,		SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP14_15_12,	MSIOF1_SS2_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP14_15_12,	STP_IVCXO27_0_A,	SEL_SSP1_0_0),
-
-	PINMUX_IPSR_GPSR(IP14_19_16,	SSI_SDATA3),
-	PINMUX_IPSR_MSEL(IP14_19_16,	HRTS2_N_A,		SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP14_19_16,	MSIOF1_TXD_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP14_19_16,	TS_SCK0_A,		SEL_TSIF0_0),
-	PINMUX_IPSR_MSEL(IP14_19_16,	STP_ISCLK_0_A,		SEL_SSP1_0_0),
-	PINMUX_IPSR_MSEL(IP14_19_16,	RIF0_D1_A,		SEL_DRIF0_0),
-	PINMUX_IPSR_MSEL(IP14_19_16,	RIF2_D0_A,		SEL_DRIF2_0),
-
-	PINMUX_IPSR_GPSR(IP14_23_20,	SSI_SCK4),
-	PINMUX_IPSR_MSEL(IP14_23_20,	HRX2_A,			SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP14_23_20,	MSIOF1_SCK_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP14_23_20,	TS_SDAT0_A,		SEL_TSIF0_0),
-	PINMUX_IPSR_MSEL(IP14_23_20,	STP_ISD_0_A,		SEL_SSP1_0_0),
-	PINMUX_IPSR_MSEL(IP14_23_20,	RIF0_CLK_A,		SEL_DRIF0_0),
-	PINMUX_IPSR_MSEL(IP14_23_20,	RIF2_CLK_A,		SEL_DRIF2_0),
-
-	PINMUX_IPSR_GPSR(IP14_27_24,	SSI_WS4),
-	PINMUX_IPSR_MSEL(IP14_27_24,	HTX2_A,			SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP14_27_24,	MSIOF1_SYNC_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP14_27_24,	TS_SDEN0_A,		SEL_TSIF0_0),
-	PINMUX_IPSR_MSEL(IP14_27_24,	STP_ISEN_0_A,		SEL_SSP1_0_0),
-	PINMUX_IPSR_MSEL(IP14_27_24,	RIF0_SYNC_A,		SEL_DRIF0_0),
-	PINMUX_IPSR_MSEL(IP14_27_24,	RIF2_SYNC_A,		SEL_DRIF2_0),
-
-	PINMUX_IPSR_GPSR(IP14_31_28,	SSI_SDATA4),
-	PINMUX_IPSR_MSEL(IP14_31_28,	HSCK2_A,		SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP14_31_28,	MSIOF1_RXD_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP14_31_28,	TS_SPSYNC0_A,		SEL_TSIF0_0),
-	PINMUX_IPSR_MSEL(IP14_31_28,	STP_ISSYNC_0_A,		SEL_SSP1_0_0),
-	PINMUX_IPSR_MSEL(IP14_31_28,	RIF0_D0_A,		SEL_DRIF0_0),
-	PINMUX_IPSR_MSEL(IP14_31_28,	RIF2_D1_A,		SEL_DRIF2_0),
-
-	/* IPSR15 */
-	PINMUX_IPSR_GPSR(IP15_3_0,	SSI_SCK6),
-	PINMUX_IPSR_GPSR(IP15_3_0,	USB2_PWEN),
-	PINMUX_IPSR_MSEL(IP15_3_0,	SIM0_RST_D,		SEL_SIMCARD_3),
-
-	PINMUX_IPSR_GPSR(IP15_7_4,	SSI_WS6),
-	PINMUX_IPSR_GPSR(IP15_7_4,	USB2_OVC),
-	PINMUX_IPSR_MSEL(IP15_7_4,	SIM0_D_D,		SEL_SIMCARD_3),
-
-	PINMUX_IPSR_GPSR(IP15_11_8,	SSI_SDATA6),
-	PINMUX_IPSR_MSEL(IP15_11_8,	SIM0_CLK_D,		SEL_SIMCARD_3),
-	PINMUX_IPSR_MSEL(IP15_11_8,	SATA_DEVSLP_A,		SEL_SATA_0),
-
-	PINMUX_IPSR_GPSR(IP15_15_12,	SSI_SCK78),
-	PINMUX_IPSR_MSEL(IP15_15_12,	HRX2_B,			SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP15_15_12,	MSIOF1_SCK_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP15_15_12,	TS_SCK1_A,		SEL_TSIF1_0),
-	PINMUX_IPSR_MSEL(IP15_15_12,	STP_ISCLK_1_A,		SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP15_15_12,	RIF1_CLK_A,		SEL_DRIF1_0),
-	PINMUX_IPSR_MSEL(IP15_15_12,	RIF3_CLK_A,		SEL_DRIF3_0),
-
-	PINMUX_IPSR_GPSR(IP15_19_16,	SSI_WS78),
-	PINMUX_IPSR_MSEL(IP15_19_16,	HTX2_B,			SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP15_19_16,	MSIOF1_SYNC_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP15_19_16,	TS_SDAT1_A,		SEL_TSIF1_0),
-	PINMUX_IPSR_MSEL(IP15_19_16,	STP_ISD_1_A,		SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP15_19_16,	RIF1_SYNC_A,		SEL_DRIF1_0),
-	PINMUX_IPSR_MSEL(IP15_19_16,	RIF3_SYNC_A,		SEL_DRIF3_0),
-
-	PINMUX_IPSR_GPSR(IP15_23_20,	SSI_SDATA7),
-	PINMUX_IPSR_MSEL(IP15_23_20,	HCTS2_N_B,		SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP15_23_20,	MSIOF1_RXD_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP15_23_20,	TS_SDEN1_A,		SEL_TSIF1_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	STP_ISEN_1_A,		SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	RIF1_D0_A,		SEL_DRIF1_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	RIF3_D0_A,		SEL_DRIF3_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	TCLK2_A,		SEL_TIMER_TMU_0),
-
-	PINMUX_IPSR_GPSR(IP15_27_24,	SSI_SDATA8),
-	PINMUX_IPSR_MSEL(IP15_27_24,	HRTS2_N_B,		SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP15_27_24,	MSIOF1_TXD_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP15_27_24,	TS_SPSYNC1_A,		SEL_TSIF1_0),
-	PINMUX_IPSR_MSEL(IP15_27_24,	STP_ISSYNC_1_A,		SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP15_27_24,	RIF1_D1_A,		SEL_DRIF1_0),
-	PINMUX_IPSR_MSEL(IP15_27_24,	RIF3_D1_A,		SEL_DRIF3_0),
-
-	PINMUX_IPSR_MSEL(IP15_31_28,	SSI_SDATA9_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP15_31_28,	HSCK2_B,		SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP15_31_28,	MSIOF1_SS1_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP15_31_28,	HSCK1_A,		SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP15_31_28,	SSI_WS1_B,		SEL_SSI_1),
-	PINMUX_IPSR_GPSR(IP15_31_28,	SCK1),
-	PINMUX_IPSR_MSEL(IP15_31_28,	STP_IVCXO27_1_A,	SEL_SSP1_1_0),
-	PINMUX_IPSR_GPSR(IP15_31_28,	SCK5),
-
-	/* IPSR16 */
-	PINMUX_IPSR_MSEL(IP16_3_0,	AUDIO_CLKA_A,		SEL_ADG_0),
-
-	PINMUX_IPSR_MSEL(IP16_7_4,	AUDIO_CLKB_B,		SEL_ADG_1),
-	PINMUX_IPSR_MSEL(IP16_7_4,	SCIF_CLK_A,		SEL_SCIF1_0),
-	PINMUX_IPSR_MSEL(IP16_7_4,	STP_IVCXO27_1_D,	SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP16_7_4,	REMOCON_A,		SEL_REMOCON_0),
-	PINMUX_IPSR_MSEL(IP16_7_4,	TCLK1_A,		SEL_TIMER_TMU_0),
-
-	PINMUX_IPSR_GPSR(IP16_11_8,	USB0_PWEN),
-	PINMUX_IPSR_MSEL(IP16_11_8,	SIM0_RST_C,		SEL_SIMCARD_2),
-	PINMUX_IPSR_MSEL(IP16_11_8,	TS_SCK1_D,		SEL_TSIF1_3),
-	PINMUX_IPSR_MSEL(IP16_11_8,	STP_ISCLK_1_D,		SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP16_11_8,	BPFCLK_B,		SEL_FM_1),
-	PINMUX_IPSR_MSEL(IP16_11_8,	RIF3_CLK_B,		SEL_DRIF3_1),
-
-	PINMUX_IPSR_GPSR(IP16_15_12,	USB0_OVC),
-	PINMUX_IPSR_MSEL(IP16_11_8,	SIM0_D_C,		SEL_SIMCARD_2),
-	PINMUX_IPSR_MSEL(IP16_11_8,	TS_SDAT1_D,		SEL_TSIF1_3),
-	PINMUX_IPSR_MSEL(IP16_11_8,	STP_ISD_1_D,		SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP16_11_8,	RIF3_SYNC_B,		SEL_DRIF3_1),
-
-	PINMUX_IPSR_GPSR(IP16_19_16,	USB1_PWEN),
-	PINMUX_IPSR_MSEL(IP16_19_16,	SIM0_CLK_C,		SEL_SIMCARD_2),
-	PINMUX_IPSR_MSEL(IP16_19_16,	SSI_SCK1_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP16_19_16,	TS_SCK0_E,		SEL_TSIF0_4),
-	PINMUX_IPSR_MSEL(IP16_19_16,	STP_ISCLK_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP16_19_16,	FMCLK_B,		SEL_FM_1),
-	PINMUX_IPSR_MSEL(IP16_19_16,	RIF2_CLK_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_MSEL(IP16_19_16,	SPEEDIN_A,		SEL_SPEED_PULSE_0),
-
-	PINMUX_IPSR_GPSR(IP16_23_20,	USB1_OVC),
-	PINMUX_IPSR_MSEL(IP16_23_20,	MSIOF1_SS2_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP16_23_20,	SSI_WS1_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP16_23_20,	TS_SDAT0_E,		SEL_TSIF0_4),
-	PINMUX_IPSR_MSEL(IP16_23_20,	STP_ISD_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP16_23_20,	FMIN_B,			SEL_FM_1),
-	PINMUX_IPSR_MSEL(IP16_23_20,	RIF2_SYNC_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_MSEL(IP16_23_20,	REMOCON_B,		SEL_REMOCON_1),
-
-	PINMUX_IPSR_GPSR(IP16_27_24,	USB30_PWEN),
-	PINMUX_IPSR_MSEL(IP16_27_24,	AUDIO_CLKOUT_B,		SEL_ADG_1),
-	PINMUX_IPSR_MSEL(IP16_27_24,	SSI_SCK2_B,		SEL_SSI_1),
-	PINMUX_IPSR_MSEL(IP16_27_24,	TS_SDEN1_D,		SEL_TSIF1_3),
-	PINMUX_IPSR_MSEL(IP16_27_24,	STP_ISEN_1_D,		SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP16_27_24,	STP_OPWM_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP16_27_24,	RIF3_D0_B,		SEL_DRIF3_1),
-	PINMUX_IPSR_MSEL(IP16_27_24,	TCLK2_B,		SEL_TIMER_TMU_1),
-	PINMUX_IPSR_GPSR(IP16_27_24,	TPU0TO0),
-
-	PINMUX_IPSR_GPSR(IP16_31_28,	USB30_OVC),
-	PINMUX_IPSR_MSEL(IP16_31_28,	AUDIO_CLKOUT1_B,	SEL_ADG_1),
-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS2_B,		SEL_SSI_1),
-	PINMUX_IPSR_MSEL(IP16_31_28,	TS_SPSYNC1_D,		SEL_TSIF1_3),
-	PINMUX_IPSR_MSEL(IP16_31_28,	STP_ISSYNC_1_D,		SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP16_31_28,	STP_IVCXO27_0_E,	SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP16_31_28,	RIF3_D1_B,		SEL_DRIF3_1),
-	PINMUX_IPSR_MSEL(IP16_31_28,	FSO_TOE_B,		SEL_FSO_1),
-	PINMUX_IPSR_GPSR(IP16_31_28,	TPU0TO1),
-
-	/* IPSR17 */
-	PINMUX_IPSR_GPSR(IP17_3_0,	USB31_PWEN),
-	PINMUX_IPSR_MSEL(IP17_3_0,	AUDIO_CLKOUT2_B,	SEL_ADG_1),
-	PINMUX_IPSR_MSEL(IP17_3_0,	SSI_SCK9_B,		SEL_SSI_1),
-	PINMUX_IPSR_MSEL(IP17_3_0,	TS_SDEN0_E,		SEL_TSIF0_4),
-	PINMUX_IPSR_MSEL(IP17_3_0,	STP_ISEN_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP17_3_0,	RIF2_D0_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_GPSR(IP17_3_0,	TPU0TO2),
-
-	PINMUX_IPSR_GPSR(IP17_7_4,	USB31_OVC),
-	PINMUX_IPSR_MSEL(IP17_7_4,	AUDIO_CLKOUT3_B,	SEL_ADG_1),
-	PINMUX_IPSR_MSEL(IP17_7_4,	SSI_WS9_B,		SEL_SSI_1),
-	PINMUX_IPSR_MSEL(IP17_7_4,	TS_SPSYNC0_E,		SEL_TSIF0_4),
-	PINMUX_IPSR_MSEL(IP17_7_4,	STP_ISSYNC_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP17_7_4,	RIF2_D1_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_GPSR(IP17_7_4,	TPU0TO3),
-
-/*
- * Static pins can not be muxed between different functions but
- * still need mark entries in the pinmux list. Add each static
- * pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux the pin
- * while still applying configuration to it.
- */
-#define FM(x)	PINMUX_DATA(x##_MARK, 0),
-	PINMUX_STATIC
-#undef FM
-};
-
-/*
- * Pins not associated with a GPIO port.
- */
-enum {
-	GP_ASSIGN_LAST(),
-	NOGP_ALL(),
-};
-
-static const struct sh_pfc_pin pinmux_pins[] = {
-	PINMUX_GPIO_GP_ALL(),
-	PINMUX_NOGP_ALL(),
-};
-
-/* - AUDIO CLOCK ------------------------------------------------------------ */
-static const unsigned int audio_clk_a_a_pins[] = {
-	/* CLK A */
-	RCAR_GP_PIN(6, 22),
-};
-static const unsigned int audio_clk_a_a_mux[] = {
-	AUDIO_CLKA_A_MARK,
-};
-static const unsigned int audio_clk_a_b_pins[] = {
-	/* CLK A */
-	RCAR_GP_PIN(5, 4),
-};
-static const unsigned int audio_clk_a_b_mux[] = {
-	AUDIO_CLKA_B_MARK,
-};
-static const unsigned int audio_clk_a_c_pins[] = {
-	/* CLK A */
-	RCAR_GP_PIN(5, 19),
-};
-static const unsigned int audio_clk_a_c_mux[] = {
-	AUDIO_CLKA_C_MARK,
-};
-static const unsigned int audio_clk_b_a_pins[] = {
-	/* CLK B */
-	RCAR_GP_PIN(5, 12),
-};
-static const unsigned int audio_clk_b_a_mux[] = {
-	AUDIO_CLKB_A_MARK,
-};
-static const unsigned int audio_clk_b_b_pins[] = {
-	/* CLK B */
-	RCAR_GP_PIN(6, 23),
-};
-static const unsigned int audio_clk_b_b_mux[] = {
-	AUDIO_CLKB_B_MARK,
-};
-static const unsigned int audio_clk_c_a_pins[] = {
-	/* CLK C */
-	RCAR_GP_PIN(5, 21),
-};
-static const unsigned int audio_clk_c_a_mux[] = {
-	AUDIO_CLKC_A_MARK,
-};
-static const unsigned int audio_clk_c_b_pins[] = {
-	/* CLK C */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int audio_clk_c_b_mux[] = {
-	AUDIO_CLKC_B_MARK,
-};
-static const unsigned int audio_clkout_a_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(5, 18),
-};
-static const unsigned int audio_clkout_a_mux[] = {
-	AUDIO_CLKOUT_A_MARK,
-};
-static const unsigned int audio_clkout_b_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(6, 28),
-};
-static const unsigned int audio_clkout_b_mux[] = {
-	AUDIO_CLKOUT_B_MARK,
-};
-static const unsigned int audio_clkout_c_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(5, 3),
-};
-static const unsigned int audio_clkout_c_mux[] = {
-	AUDIO_CLKOUT_C_MARK,
-};
-static const unsigned int audio_clkout_d_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(5, 21),
-};
-static const unsigned int audio_clkout_d_mux[] = {
-	AUDIO_CLKOUT_D_MARK,
-};
-static const unsigned int audio_clkout1_a_pins[] = {
-	/* CLKOUT1 */
-	RCAR_GP_PIN(5, 15),
-};
-static const unsigned int audio_clkout1_a_mux[] = {
-	AUDIO_CLKOUT1_A_MARK,
-};
-static const unsigned int audio_clkout1_b_pins[] = {
-	/* CLKOUT1 */
-	RCAR_GP_PIN(6, 29),
-};
-static const unsigned int audio_clkout1_b_mux[] = {
-	AUDIO_CLKOUT1_B_MARK,
-};
-static const unsigned int audio_clkout2_a_pins[] = {
-	/* CLKOUT2 */
-	RCAR_GP_PIN(5, 16),
-};
-static const unsigned int audio_clkout2_a_mux[] = {
-	AUDIO_CLKOUT2_A_MARK,
-};
-static const unsigned int audio_clkout2_b_pins[] = {
-	/* CLKOUT2 */
-	RCAR_GP_PIN(6, 30),
-};
-static const unsigned int audio_clkout2_b_mux[] = {
-	AUDIO_CLKOUT2_B_MARK,
-};
-
-static const unsigned int audio_clkout3_a_pins[] = {
-	/* CLKOUT3 */
-	RCAR_GP_PIN(5, 19),
-};
-static const unsigned int audio_clkout3_a_mux[] = {
-	AUDIO_CLKOUT3_A_MARK,
-};
-static const unsigned int audio_clkout3_b_pins[] = {
-	/* CLKOUT3 */
-	RCAR_GP_PIN(6, 31),
-};
-static const unsigned int audio_clkout3_b_mux[] = {
-	AUDIO_CLKOUT3_B_MARK,
-};
-
-/* - EtherAVB --------------------------------------------------------------- */
-static const unsigned int avb_link_pins[] = {
-	/* AVB_LINK */
-	RCAR_GP_PIN(2, 12),
-};
-static const unsigned int avb_link_mux[] = {
-	AVB_LINK_MARK,
-};
-static const unsigned int avb_magic_pins[] = {
-	/* AVB_MAGIC_ */
-	RCAR_GP_PIN(2, 10),
-};
-static const unsigned int avb_magic_mux[] = {
-	AVB_MAGIC_MARK,
-};
-static const unsigned int avb_phy_int_pins[] = {
-	/* AVB_PHY_INT */
-	RCAR_GP_PIN(2, 11),
-};
-static const unsigned int avb_phy_int_mux[] = {
-	AVB_PHY_INT_MARK,
-};
-static const unsigned int avb_mdio_pins[] = {
-	/* AVB_MDC, AVB_MDIO */
-	RCAR_GP_PIN(2, 9), PIN_AVB_MDIO,
-};
-static const unsigned int avb_mdio_mux[] = {
-	AVB_MDC_MARK, AVB_MDIO_MARK,
-};
-static const unsigned int avb_mii_pins[] = {
-	/*
-	 * AVB_TX_CTL, AVB_TXC, AVB_TD0,
-	 * AVB_TD1, AVB_TD2, AVB_TD3,
-	 * AVB_RX_CTL, AVB_RXC, AVB_RD0,
-	 * AVB_RD1, AVB_RD2, AVB_RD3,
-	 * AVB_TXCREFCLK
-	 */
-	PIN_AVB_TX_CTL, PIN_AVB_TXC, PIN_AVB_TD0,
-	PIN_AVB_TD1, PIN_AVB_TD2, PIN_AVB_TD3,
-	PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
-	PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
-	PIN_AVB_TXCREFCLK,
-
-};
-static const unsigned int avb_mii_mux[] = {
-	AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
-	AVB_TD1_MARK, AVB_TD2_MARK, AVB_TD3_MARK,
-	AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
-	AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
-	AVB_TXCREFCLK_MARK,
-};
-static const unsigned int avb_avtp_pps_pins[] = {
-	/* AVB_AVTP_PPS */
-	RCAR_GP_PIN(2, 6),
-};
-static const unsigned int avb_avtp_pps_mux[] = {
-	AVB_AVTP_PPS_MARK,
-};
-static const unsigned int avb_avtp_match_a_pins[] = {
-	/* AVB_AVTP_MATCH_A */
-	RCAR_GP_PIN(2, 13),
-};
-static const unsigned int avb_avtp_match_a_mux[] = {
-	AVB_AVTP_MATCH_A_MARK,
-};
-static const unsigned int avb_avtp_capture_a_pins[] = {
-	/* AVB_AVTP_CAPTURE_A */
-	RCAR_GP_PIN(2, 14),
-};
-static const unsigned int avb_avtp_capture_a_mux[] = {
-	AVB_AVTP_CAPTURE_A_MARK,
-};
-static const unsigned int avb_avtp_match_b_pins[] = {
-	/*  AVB_AVTP_MATCH_B */
-	RCAR_GP_PIN(1, 8),
-};
-static const unsigned int avb_avtp_match_b_mux[] = {
-	AVB_AVTP_MATCH_B_MARK,
-};
-static const unsigned int avb_avtp_capture_b_pins[] = {
-	/* AVB_AVTP_CAPTURE_B */
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int avb_avtp_capture_b_mux[] = {
-	AVB_AVTP_CAPTURE_B_MARK,
-};
-
-/* - CAN ------------------------------------------------------------------ */
-static const unsigned int can0_data_a_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(1, 23),	RCAR_GP_PIN(1, 24),
-};
-static const unsigned int can0_data_a_mux[] = {
-	CAN0_TX_A_MARK,		CAN0_RX_A_MARK,
-};
-static const unsigned int can0_data_b_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(2, 0),	RCAR_GP_PIN(2, 1),
-};
-static const unsigned int can0_data_b_mux[] = {
-	CAN0_TX_B_MARK,		CAN0_RX_B_MARK,
-};
-static const unsigned int can1_data_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(1, 22),	RCAR_GP_PIN(1, 26),
-};
-static const unsigned int can1_data_mux[] = {
-	CAN1_TX_MARK,		CAN1_RX_MARK,
-};
-
-/* - CAN Clock -------------------------------------------------------------- */
-static const unsigned int can_clk_pins[] = {
-	/* CLK */
-	RCAR_GP_PIN(1, 25),
-};
-static const unsigned int can_clk_mux[] = {
-	CAN_CLK_MARK,
-};
-
-/* - CAN FD --------------------------------------------------------------- */
-static const unsigned int canfd0_data_a_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(1, 23),     RCAR_GP_PIN(1, 24),
-};
-static const unsigned int canfd0_data_a_mux[] = {
-	CANFD0_TX_A_MARK,       CANFD0_RX_A_MARK,
-};
-static const unsigned int canfd0_data_b_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(2, 0),      RCAR_GP_PIN(2, 1),
-};
-static const unsigned int canfd0_data_b_mux[] = {
-	CANFD0_TX_B_MARK,       CANFD0_RX_B_MARK,
-};
-static const unsigned int canfd1_data_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(1, 22),     RCAR_GP_PIN(1, 26),
-};
-static const unsigned int canfd1_data_mux[] = {
-	CANFD1_TX_MARK,         CANFD1_RX_MARK,
-};
-
-/* - DRIF0 --------------------------------------------------------------- */
-static const unsigned int drif0_ctrl_a_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
-};
-static const unsigned int drif0_ctrl_a_mux[] = {
-	RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
-};
-static const unsigned int drif0_data0_a_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int drif0_data0_a_mux[] = {
-	RIF0_D0_A_MARK,
-};
-static const unsigned int drif0_data1_a_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 7),
-};
-static const unsigned int drif0_data1_a_mux[] = {
-	RIF0_D1_A_MARK,
-};
-static const unsigned int drif0_ctrl_b_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
-};
-static const unsigned int drif0_ctrl_b_mux[] = {
-	RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
-};
-static const unsigned int drif0_data0_b_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(5, 1),
-};
-static const unsigned int drif0_data0_b_mux[] = {
-	RIF0_D0_B_MARK,
-};
-static const unsigned int drif0_data1_b_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(5, 2),
-};
-static const unsigned int drif0_data1_b_mux[] = {
-	RIF0_D1_B_MARK,
-};
-static const unsigned int drif0_ctrl_c_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
-};
-static const unsigned int drif0_ctrl_c_mux[] = {
-	RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
-};
-static const unsigned int drif0_data0_c_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(5, 13),
-};
-static const unsigned int drif0_data0_c_mux[] = {
-	RIF0_D0_C_MARK,
-};
-static const unsigned int drif0_data1_c_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(5, 14),
-};
-static const unsigned int drif0_data1_c_mux[] = {
-	RIF0_D1_C_MARK,
-};
-/* - DRIF1 --------------------------------------------------------------- */
-static const unsigned int drif1_ctrl_a_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
-};
-static const unsigned int drif1_ctrl_a_mux[] = {
-	RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
-};
-static const unsigned int drif1_data0_a_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int drif1_data0_a_mux[] = {
-	RIF1_D0_A_MARK,
-};
-static const unsigned int drif1_data1_a_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 20),
-};
-static const unsigned int drif1_data1_a_mux[] = {
-	RIF1_D1_A_MARK,
-};
-static const unsigned int drif1_ctrl_b_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
-};
-static const unsigned int drif1_ctrl_b_mux[] = {
-	RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
-};
-static const unsigned int drif1_data0_b_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(5, 7),
-};
-static const unsigned int drif1_data0_b_mux[] = {
-	RIF1_D0_B_MARK,
-};
-static const unsigned int drif1_data1_b_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(5, 8),
-};
-static const unsigned int drif1_data1_b_mux[] = {
-	RIF1_D1_B_MARK,
-};
-static const unsigned int drif1_ctrl_c_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
-};
-static const unsigned int drif1_ctrl_c_mux[] = {
-	RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
-};
-static const unsigned int drif1_data0_c_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(5, 6),
-};
-static const unsigned int drif1_data0_c_mux[] = {
-	RIF1_D0_C_MARK,
-};
-static const unsigned int drif1_data1_c_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(5, 10),
-};
-static const unsigned int drif1_data1_c_mux[] = {
-	RIF1_D1_C_MARK,
-};
-/* - DRIF2 --------------------------------------------------------------- */
-static const unsigned int drif2_ctrl_a_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
-};
-static const unsigned int drif2_ctrl_a_mux[] = {
-	RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
-};
-static const unsigned int drif2_data0_a_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 7),
-};
-static const unsigned int drif2_data0_a_mux[] = {
-	RIF2_D0_A_MARK,
-};
-static const unsigned int drif2_data1_a_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int drif2_data1_a_mux[] = {
-	RIF2_D1_A_MARK,
-};
-static const unsigned int drif2_ctrl_b_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
-};
-static const unsigned int drif2_ctrl_b_mux[] = {
-	RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
-};
-static const unsigned int drif2_data0_b_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 30),
-};
-static const unsigned int drif2_data0_b_mux[] = {
-	RIF2_D0_B_MARK,
-};
-static const unsigned int drif2_data1_b_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 31),
-};
-static const unsigned int drif2_data1_b_mux[] = {
-	RIF2_D1_B_MARK,
-};
-/* - DRIF3 --------------------------------------------------------------- */
-static const unsigned int drif3_ctrl_a_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
-};
-static const unsigned int drif3_ctrl_a_mux[] = {
-	RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
-};
-static const unsigned int drif3_data0_a_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int drif3_data0_a_mux[] = {
-	RIF3_D0_A_MARK,
-};
-static const unsigned int drif3_data1_a_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 20),
-};
-static const unsigned int drif3_data1_a_mux[] = {
-	RIF3_D1_A_MARK,
-};
-static const unsigned int drif3_ctrl_b_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
-};
-static const unsigned int drif3_ctrl_b_mux[] = {
-	RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
-};
-static const unsigned int drif3_data0_b_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 28),
-};
-static const unsigned int drif3_data0_b_mux[] = {
-	RIF3_D0_B_MARK,
-};
-static const unsigned int drif3_data1_b_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 29),
-};
-static const unsigned int drif3_data1_b_mux[] = {
-	RIF3_D1_B_MARK,
-};
-
-/* - DU --------------------------------------------------------------------- */
-static const unsigned int du_rgb666_pins[] = {
-	/* R[7:2], G[7:2], B[7:2] */
-	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
-	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
-	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
-	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
-	RCAR_GP_PIN(1, 7),  RCAR_GP_PIN(1, 6),  RCAR_GP_PIN(1, 5),
-	RCAR_GP_PIN(1, 4),  RCAR_GP_PIN(1, 3),  RCAR_GP_PIN(1, 2),
-};
-static const unsigned int du_rgb666_mux[] = {
-	DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
-	DU_DR3_MARK, DU_DR2_MARK,
-	DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
-	DU_DG3_MARK, DU_DG2_MARK,
-	DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
-	DU_DB3_MARK, DU_DB2_MARK,
-};
-static const unsigned int du_rgb888_pins[] = {
-	/* R[7:0], G[7:0], B[7:0] */
-	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
-	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
-	RCAR_GP_PIN(0, 9),  RCAR_GP_PIN(0, 8),
-	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
-	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
-	RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
-	RCAR_GP_PIN(1, 7),  RCAR_GP_PIN(1, 6),  RCAR_GP_PIN(1, 5),
-	RCAR_GP_PIN(1, 4),  RCAR_GP_PIN(1, 3),  RCAR_GP_PIN(1, 2),
-	RCAR_GP_PIN(1, 1),  RCAR_GP_PIN(1, 0),
-};
-static const unsigned int du_rgb888_mux[] = {
-	DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
-	DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
-	DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
-	DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
-	DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
-	DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
-};
-static const unsigned int du_clk_out_0_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(1, 27),
-};
-static const unsigned int du_clk_out_0_mux[] = {
-	DU_DOTCLKOUT0_MARK
-};
-static const unsigned int du_clk_out_1_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(2, 3),
-};
-static const unsigned int du_clk_out_1_mux[] = {
-	DU_DOTCLKOUT1_MARK
-};
-static const unsigned int du_sync_pins[] = {
-	/* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
-	RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
-};
-static const unsigned int du_sync_mux[] = {
-	DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
-};
-static const unsigned int du_oddf_pins[] = {
-	/* EXDISP/EXODDF/EXCDE */
-	RCAR_GP_PIN(2, 2),
-};
-static const unsigned int du_oddf_mux[] = {
-	DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
-};
-static const unsigned int du_cde_pins[] = {
-	/* CDE */
-	RCAR_GP_PIN(2, 0),
-};
-static const unsigned int du_cde_mux[] = {
-	DU_CDE_MARK,
-};
-static const unsigned int du_disp_pins[] = {
-	/* DISP */
-	RCAR_GP_PIN(2, 1),
-};
-static const unsigned int du_disp_mux[] = {
-	DU_DISP_MARK,
-};
-/* - HSCIF0 ----------------------------------------------------------------- */
-static const unsigned int hscif0_data_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
-};
-static const unsigned int hscif0_data_mux[] = {
-	HRX0_MARK, HTX0_MARK,
-};
-static const unsigned int hscif0_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 12),
-};
-static const unsigned int hscif0_clk_mux[] = {
-	HSCK0_MARK,
-};
-static const unsigned int hscif0_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
-};
-static const unsigned int hscif0_ctrl_mux[] = {
-	HRTS0_N_MARK, HCTS0_N_MARK,
-};
-/* - HSCIF1 ----------------------------------------------------------------- */
-static const unsigned int hscif1_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
-};
-static const unsigned int hscif1_data_a_mux[] = {
-	HRX1_A_MARK, HTX1_A_MARK,
-};
-static const unsigned int hscif1_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int hscif1_clk_a_mux[] = {
-	HSCK1_A_MARK,
-};
-static const unsigned int hscif1_ctrl_a_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
-};
-static const unsigned int hscif1_ctrl_a_mux[] = {
-	HRTS1_N_A_MARK, HCTS1_N_A_MARK,
-};
-
-static const unsigned int hscif1_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
-};
-static const unsigned int hscif1_data_b_mux[] = {
-	HRX1_B_MARK, HTX1_B_MARK,
-};
-static const unsigned int hscif1_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int hscif1_clk_b_mux[] = {
-	HSCK1_B_MARK,
-};
-static const unsigned int hscif1_ctrl_b_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
-};
-static const unsigned int hscif1_ctrl_b_mux[] = {
-	HRTS1_N_B_MARK, HCTS1_N_B_MARK,
-};
-/* - HSCIF2 ----------------------------------------------------------------- */
-static const unsigned int hscif2_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
-};
-static const unsigned int hscif2_data_a_mux[] = {
-	HRX2_A_MARK, HTX2_A_MARK,
-};
-static const unsigned int hscif2_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int hscif2_clk_a_mux[] = {
-	HSCK2_A_MARK,
-};
-static const unsigned int hscif2_ctrl_a_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
-};
-static const unsigned int hscif2_ctrl_a_mux[] = {
-	HRTS2_N_A_MARK, HCTS2_N_A_MARK,
-};
-
-static const unsigned int hscif2_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
-};
-static const unsigned int hscif2_data_b_mux[] = {
-	HRX2_B_MARK, HTX2_B_MARK,
-};
-static const unsigned int hscif2_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int hscif2_clk_b_mux[] = {
-	HSCK2_B_MARK,
-};
-static const unsigned int hscif2_ctrl_b_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
-};
-static const unsigned int hscif2_ctrl_b_mux[] = {
-	HRTS2_N_B_MARK, HCTS2_N_B_MARK,
-};
-/* - HSCIF3 ----------------------------------------------------------------- */
-static const unsigned int hscif3_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
-};
-static const unsigned int hscif3_data_a_mux[] = {
-	HRX3_A_MARK, HTX3_A_MARK,
-};
-static const unsigned int hscif3_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 22),
-};
-static const unsigned int hscif3_clk_mux[] = {
-	HSCK3_MARK,
-};
-static const unsigned int hscif3_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
-};
-static const unsigned int hscif3_ctrl_mux[] = {
-	HRTS3_N_MARK, HCTS3_N_MARK,
-};
-
-static const unsigned int hscif3_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
-};
-static const unsigned int hscif3_data_b_mux[] = {
-	HRX3_B_MARK, HTX3_B_MARK,
-};
-static const unsigned int hscif3_data_c_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
-};
-static const unsigned int hscif3_data_c_mux[] = {
-	HRX3_C_MARK, HTX3_C_MARK,
-};
-static const unsigned int hscif3_data_d_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
-};
-static const unsigned int hscif3_data_d_mux[] = {
-	HRX3_D_MARK, HTX3_D_MARK,
-};
-/* - HSCIF4 ----------------------------------------------------------------- */
-static const unsigned int hscif4_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
-};
-static const unsigned int hscif4_data_a_mux[] = {
-	HRX4_A_MARK, HTX4_A_MARK,
-};
-static const unsigned int hscif4_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int hscif4_clk_mux[] = {
-	HSCK4_MARK,
-};
-static const unsigned int hscif4_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
-};
-static const unsigned int hscif4_ctrl_mux[] = {
-	HRTS4_N_MARK, HCTS4_N_MARK,
-};
-
-static const unsigned int hscif4_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
-};
-static const unsigned int hscif4_data_b_mux[] = {
-	HRX4_B_MARK, HTX4_B_MARK,
-};
-
-/* - I2C -------------------------------------------------------------------- */
-static const unsigned int i2c0_pins[] = {
-	/* SCL, SDA */
-	RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
-};
-
-static const unsigned int i2c0_mux[] = {
-	SCL0_MARK, SDA0_MARK,
-};
-
-static const unsigned int i2c1_a_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
-};
-static const unsigned int i2c1_a_mux[] = {
-	SDA1_A_MARK, SCL1_A_MARK,
-};
-static const unsigned int i2c1_b_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
-};
-static const unsigned int i2c1_b_mux[] = {
-	SDA1_B_MARK, SCL1_B_MARK,
-};
-static const unsigned int i2c2_a_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
-};
-static const unsigned int i2c2_a_mux[] = {
-	SDA2_A_MARK, SCL2_A_MARK,
-};
-static const unsigned int i2c2_b_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
-};
-static const unsigned int i2c2_b_mux[] = {
-	SDA2_B_MARK, SCL2_B_MARK,
-};
-
-static const unsigned int i2c3_pins[] = {
-	/* SCL, SDA */
-	RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
-};
-
-static const unsigned int i2c3_mux[] = {
-	SCL3_MARK, SDA3_MARK,
-};
-
-static const unsigned int i2c5_pins[] = {
-	/* SCL, SDA */
-	RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 14),
-};
-
-static const unsigned int i2c5_mux[] = {
-	SCL5_MARK, SDA5_MARK,
-};
-
-static const unsigned int i2c6_a_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
-};
-static const unsigned int i2c6_a_mux[] = {
-	SDA6_A_MARK, SCL6_A_MARK,
-};
-static const unsigned int i2c6_b_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
-};
-static const unsigned int i2c6_b_mux[] = {
-	SDA6_B_MARK, SCL6_B_MARK,
-};
-static const unsigned int i2c6_c_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
-};
-static const unsigned int i2c6_c_mux[] = {
-	SDA6_C_MARK, SCL6_C_MARK,
-};
-
-/* - INTC-EX ---------------------------------------------------------------- */
-static const unsigned int intc_ex_irq0_pins[] = {
-	/* IRQ0 */
-	RCAR_GP_PIN(2, 0),
-};
-static const unsigned int intc_ex_irq0_mux[] = {
-	IRQ0_MARK,
-};
-static const unsigned int intc_ex_irq1_pins[] = {
-	/* IRQ1 */
-	RCAR_GP_PIN(2, 1),
-};
-static const unsigned int intc_ex_irq1_mux[] = {
-	IRQ1_MARK,
-};
-static const unsigned int intc_ex_irq2_pins[] = {
-	/* IRQ2 */
-	RCAR_GP_PIN(2, 2),
-};
-static const unsigned int intc_ex_irq2_mux[] = {
-	IRQ2_MARK,
-};
-static const unsigned int intc_ex_irq3_pins[] = {
-	/* IRQ3 */
-	RCAR_GP_PIN(2, 3),
-};
-static const unsigned int intc_ex_irq3_mux[] = {
-	IRQ3_MARK,
-};
-static const unsigned int intc_ex_irq4_pins[] = {
-	/* IRQ4 */
-	RCAR_GP_PIN(2, 4),
-};
-static const unsigned int intc_ex_irq4_mux[] = {
-	IRQ4_MARK,
-};
-static const unsigned int intc_ex_irq5_pins[] = {
-	/* IRQ5 */
-	RCAR_GP_PIN(2, 5),
-};
-static const unsigned int intc_ex_irq5_mux[] = {
-	IRQ5_MARK,
-};
-
-/* - MSIOF0 ----------------------------------------------------------------- */
-static const unsigned int msiof0_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 17),
-};
-static const unsigned int msiof0_clk_mux[] = {
-	MSIOF0_SCK_MARK,
-};
-static const unsigned int msiof0_sync_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(5, 18),
-};
-static const unsigned int msiof0_sync_mux[] = {
-	MSIOF0_SYNC_MARK,
-};
-static const unsigned int msiof0_ss1_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(5, 19),
-};
-static const unsigned int msiof0_ss1_mux[] = {
-	MSIOF0_SS1_MARK,
-};
-static const unsigned int msiof0_ss2_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(5, 21),
-};
-static const unsigned int msiof0_ss2_mux[] = {
-	MSIOF0_SS2_MARK,
-};
-static const unsigned int msiof0_txd_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(5, 20),
-};
-static const unsigned int msiof0_txd_mux[] = {
-	MSIOF0_TXD_MARK,
-};
-static const unsigned int msiof0_rxd_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(5, 22),
-};
-static const unsigned int msiof0_rxd_mux[] = {
-	MSIOF0_RXD_MARK,
-};
-/* - MSIOF1 ----------------------------------------------------------------- */
-static const unsigned int msiof1_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 8),
-};
-static const unsigned int msiof1_clk_a_mux[] = {
-	MSIOF1_SCK_A_MARK,
-};
-static const unsigned int msiof1_sync_a_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(6, 9),
-};
-static const unsigned int msiof1_sync_a_mux[] = {
-	MSIOF1_SYNC_A_MARK,
-};
-static const unsigned int msiof1_ss1_a_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(6, 5),
-};
-static const unsigned int msiof1_ss1_a_mux[] = {
-	MSIOF1_SS1_A_MARK,
-};
-static const unsigned int msiof1_ss2_a_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(6, 6),
-};
-static const unsigned int msiof1_ss2_a_mux[] = {
-	MSIOF1_SS2_A_MARK,
-};
-static const unsigned int msiof1_txd_a_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(6, 7),
-};
-static const unsigned int msiof1_txd_a_mux[] = {
-	MSIOF1_TXD_A_MARK,
-};
-static const unsigned int msiof1_rxd_a_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int msiof1_rxd_a_mux[] = {
-	MSIOF1_RXD_A_MARK,
-};
-static const unsigned int msiof1_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 9),
-};
-static const unsigned int msiof1_clk_b_mux[] = {
-	MSIOF1_SCK_B_MARK,
-};
-static const unsigned int msiof1_sync_b_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(5, 3),
-};
-static const unsigned int msiof1_sync_b_mux[] = {
-	MSIOF1_SYNC_B_MARK,
-};
-static const unsigned int msiof1_ss1_b_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(5, 4),
-};
-static const unsigned int msiof1_ss1_b_mux[] = {
-	MSIOF1_SS1_B_MARK,
-};
-static const unsigned int msiof1_ss2_b_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int msiof1_ss2_b_mux[] = {
-	MSIOF1_SS2_B_MARK,
-};
-static const unsigned int msiof1_txd_b_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(5, 8),
-};
-static const unsigned int msiof1_txd_b_mux[] = {
-	MSIOF1_TXD_B_MARK,
-};
-static const unsigned int msiof1_rxd_b_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(5, 7),
-};
-static const unsigned int msiof1_rxd_b_mux[] = {
-	MSIOF1_RXD_B_MARK,
-};
-static const unsigned int msiof1_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 17),
-};
-static const unsigned int msiof1_clk_c_mux[] = {
-	MSIOF1_SCK_C_MARK,
-};
-static const unsigned int msiof1_sync_c_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(6, 18),
-};
-static const unsigned int msiof1_sync_c_mux[] = {
-	MSIOF1_SYNC_C_MARK,
-};
-static const unsigned int msiof1_ss1_c_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int msiof1_ss1_c_mux[] = {
-	MSIOF1_SS1_C_MARK,
-};
-static const unsigned int msiof1_ss2_c_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(6, 27),
-};
-static const unsigned int msiof1_ss2_c_mux[] = {
-	MSIOF1_SS2_C_MARK,
-};
-static const unsigned int msiof1_txd_c_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(6, 20),
-};
-static const unsigned int msiof1_txd_c_mux[] = {
-	MSIOF1_TXD_C_MARK,
-};
-static const unsigned int msiof1_rxd_c_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int msiof1_rxd_c_mux[] = {
-	MSIOF1_RXD_C_MARK,
-};
-static const unsigned int msiof1_clk_d_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 12),
-};
-static const unsigned int msiof1_clk_d_mux[] = {
-	MSIOF1_SCK_D_MARK,
-};
-static const unsigned int msiof1_sync_d_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(5, 15),
-};
-static const unsigned int msiof1_sync_d_mux[] = {
-	MSIOF1_SYNC_D_MARK,
-};
-static const unsigned int msiof1_ss1_d_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(5, 16),
-};
-static const unsigned int msiof1_ss1_d_mux[] = {
-	MSIOF1_SS1_D_MARK,
-};
-static const unsigned int msiof1_ss2_d_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(5, 21),
-};
-static const unsigned int msiof1_ss2_d_mux[] = {
-	MSIOF1_SS2_D_MARK,
-};
-static const unsigned int msiof1_txd_d_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(5, 14),
-};
-static const unsigned int msiof1_txd_d_mux[] = {
-	MSIOF1_TXD_D_MARK,
-};
-static const unsigned int msiof1_rxd_d_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(5, 13),
-};
-static const unsigned int msiof1_rxd_d_mux[] = {
-	MSIOF1_RXD_D_MARK,
-};
-static const unsigned int msiof1_clk_e_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(3, 0),
-};
-static const unsigned int msiof1_clk_e_mux[] = {
-	MSIOF1_SCK_E_MARK,
-};
-static const unsigned int msiof1_sync_e_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(3, 1),
-};
-static const unsigned int msiof1_sync_e_mux[] = {
-	MSIOF1_SYNC_E_MARK,
-};
-static const unsigned int msiof1_ss1_e_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(3, 4),
-};
-static const unsigned int msiof1_ss1_e_mux[] = {
-	MSIOF1_SS1_E_MARK,
-};
-static const unsigned int msiof1_ss2_e_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(3, 5),
-};
-static const unsigned int msiof1_ss2_e_mux[] = {
-	MSIOF1_SS2_E_MARK,
-};
-static const unsigned int msiof1_txd_e_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(3, 3),
-};
-static const unsigned int msiof1_txd_e_mux[] = {
-	MSIOF1_TXD_E_MARK,
-};
-static const unsigned int msiof1_rxd_e_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(3, 2),
-};
-static const unsigned int msiof1_rxd_e_mux[] = {
-	MSIOF1_RXD_E_MARK,
-};
-static const unsigned int msiof1_clk_f_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 23),
-};
-static const unsigned int msiof1_clk_f_mux[] = {
-	MSIOF1_SCK_F_MARK,
-};
-static const unsigned int msiof1_sync_f_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(5, 24),
-};
-static const unsigned int msiof1_sync_f_mux[] = {
-	MSIOF1_SYNC_F_MARK,
-};
-static const unsigned int msiof1_ss1_f_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(6, 1),
-};
-static const unsigned int msiof1_ss1_f_mux[] = {
-	MSIOF1_SS1_F_MARK,
-};
-static const unsigned int msiof1_ss2_f_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(6, 2),
-};
-static const unsigned int msiof1_ss2_f_mux[] = {
-	MSIOF1_SS2_F_MARK,
-};
-static const unsigned int msiof1_txd_f_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(6, 0),
-};
-static const unsigned int msiof1_txd_f_mux[] = {
-	MSIOF1_TXD_F_MARK,
-};
-static const unsigned int msiof1_rxd_f_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(5, 25),
-};
-static const unsigned int msiof1_rxd_f_mux[] = {
-	MSIOF1_RXD_F_MARK,
-};
-static const unsigned int msiof1_clk_g_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(3, 6),
-};
-static const unsigned int msiof1_clk_g_mux[] = {
-	MSIOF1_SCK_G_MARK,
-};
-static const unsigned int msiof1_sync_g_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(3, 7),
-};
-static const unsigned int msiof1_sync_g_mux[] = {
-	MSIOF1_SYNC_G_MARK,
-};
-static const unsigned int msiof1_ss1_g_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(3, 10),
-};
-static const unsigned int msiof1_ss1_g_mux[] = {
-	MSIOF1_SS1_G_MARK,
-};
-static const unsigned int msiof1_ss2_g_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(3, 11),
-};
-static const unsigned int msiof1_ss2_g_mux[] = {
-	MSIOF1_SS2_G_MARK,
-};
-static const unsigned int msiof1_txd_g_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(3, 9),
-};
-static const unsigned int msiof1_txd_g_mux[] = {
-	MSIOF1_TXD_G_MARK,
-};
-static const unsigned int msiof1_rxd_g_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(3, 8),
-};
-static const unsigned int msiof1_rxd_g_mux[] = {
-	MSIOF1_RXD_G_MARK,
-};
-/* - MSIOF2 ----------------------------------------------------------------- */
-static const unsigned int msiof2_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 9),
-};
-static const unsigned int msiof2_clk_a_mux[] = {
-	MSIOF2_SCK_A_MARK,
-};
-static const unsigned int msiof2_sync_a_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(1, 8),
-};
-static const unsigned int msiof2_sync_a_mux[] = {
-	MSIOF2_SYNC_A_MARK,
-};
-static const unsigned int msiof2_ss1_a_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(1, 6),
-};
-static const unsigned int msiof2_ss1_a_mux[] = {
-	MSIOF2_SS1_A_MARK,
-};
-static const unsigned int msiof2_ss2_a_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(1, 7),
-};
-static const unsigned int msiof2_ss2_a_mux[] = {
-	MSIOF2_SS2_A_MARK,
-};
-static const unsigned int msiof2_txd_a_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int msiof2_txd_a_mux[] = {
-	MSIOF2_TXD_A_MARK,
-};
-static const unsigned int msiof2_rxd_a_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(1, 10),
-};
-static const unsigned int msiof2_rxd_a_mux[] = {
-	MSIOF2_RXD_A_MARK,
-};
-static const unsigned int msiof2_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(0, 4),
-};
-static const unsigned int msiof2_clk_b_mux[] = {
-	MSIOF2_SCK_B_MARK,
-};
-static const unsigned int msiof2_sync_b_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(0, 5),
-};
-static const unsigned int msiof2_sync_b_mux[] = {
-	MSIOF2_SYNC_B_MARK,
-};
-static const unsigned int msiof2_ss1_b_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(0, 0),
-};
-static const unsigned int msiof2_ss1_b_mux[] = {
-	MSIOF2_SS1_B_MARK,
-};
-static const unsigned int msiof2_ss2_b_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(0, 1),
-};
-static const unsigned int msiof2_ss2_b_mux[] = {
-	MSIOF2_SS2_B_MARK,
-};
-static const unsigned int msiof2_txd_b_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(0, 7),
-};
-static const unsigned int msiof2_txd_b_mux[] = {
-	MSIOF2_TXD_B_MARK,
-};
-static const unsigned int msiof2_rxd_b_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(0, 6),
-};
-static const unsigned int msiof2_rxd_b_mux[] = {
-	MSIOF2_RXD_B_MARK,
-};
-static const unsigned int msiof2_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(2, 12),
-};
-static const unsigned int msiof2_clk_c_mux[] = {
-	MSIOF2_SCK_C_MARK,
-};
-static const unsigned int msiof2_sync_c_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(2, 11),
-};
-static const unsigned int msiof2_sync_c_mux[] = {
-	MSIOF2_SYNC_C_MARK,
-};
-static const unsigned int msiof2_ss1_c_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(2, 10),
-};
-static const unsigned int msiof2_ss1_c_mux[] = {
-	MSIOF2_SS1_C_MARK,
-};
-static const unsigned int msiof2_ss2_c_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(2, 9),
-};
-static const unsigned int msiof2_ss2_c_mux[] = {
-	MSIOF2_SS2_C_MARK,
-};
-static const unsigned int msiof2_txd_c_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(2, 14),
-};
-static const unsigned int msiof2_txd_c_mux[] = {
-	MSIOF2_TXD_C_MARK,
-};
-static const unsigned int msiof2_rxd_c_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(2, 13),
-};
-static const unsigned int msiof2_rxd_c_mux[] = {
-	MSIOF2_RXD_C_MARK,
-};
-static const unsigned int msiof2_clk_d_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(0, 8),
-};
-static const unsigned int msiof2_clk_d_mux[] = {
-	MSIOF2_SCK_D_MARK,
-};
-static const unsigned int msiof2_sync_d_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(0, 9),
-};
-static const unsigned int msiof2_sync_d_mux[] = {
-	MSIOF2_SYNC_D_MARK,
-};
-static const unsigned int msiof2_ss1_d_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(0, 12),
-};
-static const unsigned int msiof2_ss1_d_mux[] = {
-	MSIOF2_SS1_D_MARK,
-};
-static const unsigned int msiof2_ss2_d_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(0, 13),
-};
-static const unsigned int msiof2_ss2_d_mux[] = {
-	MSIOF2_SS2_D_MARK,
-};
-static const unsigned int msiof2_txd_d_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(0, 11),
-};
-static const unsigned int msiof2_txd_d_mux[] = {
-	MSIOF2_TXD_D_MARK,
-};
-static const unsigned int msiof2_rxd_d_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(0, 10),
-};
-static const unsigned int msiof2_rxd_d_mux[] = {
-	MSIOF2_RXD_D_MARK,
-};
-/* - MSIOF3 ----------------------------------------------------------------- */
-static const unsigned int msiof3_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(0, 0),
-};
-static const unsigned int msiof3_clk_a_mux[] = {
-	MSIOF3_SCK_A_MARK,
-};
-static const unsigned int msiof3_sync_a_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(0, 1),
-};
-static const unsigned int msiof3_sync_a_mux[] = {
-	MSIOF3_SYNC_A_MARK,
-};
-static const unsigned int msiof3_ss1_a_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(0, 14),
-};
-static const unsigned int msiof3_ss1_a_mux[] = {
-	MSIOF3_SS1_A_MARK,
-};
-static const unsigned int msiof3_ss2_a_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(0, 15),
-};
-static const unsigned int msiof3_ss2_a_mux[] = {
-	MSIOF3_SS2_A_MARK,
-};
-static const unsigned int msiof3_txd_a_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(0, 3),
-};
-static const unsigned int msiof3_txd_a_mux[] = {
-	MSIOF3_TXD_A_MARK,
-};
-static const unsigned int msiof3_rxd_a_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(0, 2),
-};
-static const unsigned int msiof3_rxd_a_mux[] = {
-	MSIOF3_RXD_A_MARK,
-};
-static const unsigned int msiof3_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 2),
-};
-static const unsigned int msiof3_clk_b_mux[] = {
-	MSIOF3_SCK_B_MARK,
-};
-static const unsigned int msiof3_sync_b_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(1, 0),
-};
-static const unsigned int msiof3_sync_b_mux[] = {
-	MSIOF3_SYNC_B_MARK,
-};
-static const unsigned int msiof3_ss1_b_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(1, 4),
-};
-static const unsigned int msiof3_ss1_b_mux[] = {
-	MSIOF3_SS1_B_MARK,
-};
-static const unsigned int msiof3_ss2_b_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(1, 5),
-};
-static const unsigned int msiof3_ss2_b_mux[] = {
-	MSIOF3_SS2_B_MARK,
-};
-static const unsigned int msiof3_txd_b_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(1, 1),
-};
-static const unsigned int msiof3_txd_b_mux[] = {
-	MSIOF3_TXD_B_MARK,
-};
-static const unsigned int msiof3_rxd_b_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(1, 3),
-};
-static const unsigned int msiof3_rxd_b_mux[] = {
-	MSIOF3_RXD_B_MARK,
-};
-static const unsigned int msiof3_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 12),
-};
-static const unsigned int msiof3_clk_c_mux[] = {
-	MSIOF3_SCK_C_MARK,
-};
-static const unsigned int msiof3_sync_c_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(1, 13),
-};
-static const unsigned int msiof3_sync_c_mux[] = {
-	MSIOF3_SYNC_C_MARK,
-};
-static const unsigned int msiof3_txd_c_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(1, 15),
-};
-static const unsigned int msiof3_txd_c_mux[] = {
-	MSIOF3_TXD_C_MARK,
-};
-static const unsigned int msiof3_rxd_c_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(1, 14),
-};
-static const unsigned int msiof3_rxd_c_mux[] = {
-	MSIOF3_RXD_C_MARK,
-};
-static const unsigned int msiof3_clk_d_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 22),
-};
-static const unsigned int msiof3_clk_d_mux[] = {
-	MSIOF3_SCK_D_MARK,
-};
-static const unsigned int msiof3_sync_d_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(1, 23),
-};
-static const unsigned int msiof3_sync_d_mux[] = {
-	MSIOF3_SYNC_D_MARK,
-};
-static const unsigned int msiof3_ss1_d_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(1, 26),
-};
-static const unsigned int msiof3_ss1_d_mux[] = {
-	MSIOF3_SS1_D_MARK,
-};
-static const unsigned int msiof3_txd_d_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(1, 25),
-};
-static const unsigned int msiof3_txd_d_mux[] = {
-	MSIOF3_TXD_D_MARK,
-};
-static const unsigned int msiof3_rxd_d_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(1, 24),
-};
-static const unsigned int msiof3_rxd_d_mux[] = {
-	MSIOF3_RXD_D_MARK,
-};
-
-/* - PWM0 --------------------------------------------------------------------*/
-static const unsigned int pwm0_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 6),
-};
-static const unsigned int pwm0_mux[] = {
-	PWM0_MARK,
-};
-/* - PWM1 --------------------------------------------------------------------*/
-static const unsigned int pwm1_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 7),
-};
-static const unsigned int pwm1_a_mux[] = {
-	PWM1_A_MARK,
-};
-static const unsigned int pwm1_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 8),
-};
-static const unsigned int pwm1_b_mux[] = {
-	PWM1_B_MARK,
-};
-/* - PWM2 --------------------------------------------------------------------*/
-static const unsigned int pwm2_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 8),
-};
-static const unsigned int pwm2_a_mux[] = {
-	PWM2_A_MARK,
-};
-static const unsigned int pwm2_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int pwm2_b_mux[] = {
-	PWM2_B_MARK,
-};
-/* - PWM3 --------------------------------------------------------------------*/
-static const unsigned int pwm3_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 0),
-};
-static const unsigned int pwm3_a_mux[] = {
-	PWM3_A_MARK,
-};
-static const unsigned int pwm3_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 2),
-};
-static const unsigned int pwm3_b_mux[] = {
-	PWM3_B_MARK,
-};
-/* - PWM4 --------------------------------------------------------------------*/
-static const unsigned int pwm4_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 1),
-};
-static const unsigned int pwm4_a_mux[] = {
-	PWM4_A_MARK,
-};
-static const unsigned int pwm4_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 3),
-};
-static const unsigned int pwm4_b_mux[] = {
-	PWM4_B_MARK,
-};
-/* - PWM5 --------------------------------------------------------------------*/
-static const unsigned int pwm5_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 2),
-};
-static const unsigned int pwm5_a_mux[] = {
-	PWM5_A_MARK,
-};
-static const unsigned int pwm5_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 4),
-};
-static const unsigned int pwm5_b_mux[] = {
-	PWM5_B_MARK,
-};
-/* - PWM6 --------------------------------------------------------------------*/
-static const unsigned int pwm6_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 3),
-};
-static const unsigned int pwm6_a_mux[] = {
-	PWM6_A_MARK,
-};
-static const unsigned int pwm6_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 5),
-};
-static const unsigned int pwm6_b_mux[] = {
-	PWM6_B_MARK,
-};
-
-/* - QSPI0 ------------------------------------------------------------------ */
-static const unsigned int qspi0_ctrl_pins[] = {
-	/* QSPI0_SPCLK, QSPI0_SSL */
-	PIN_QSPI0_SPCLK, PIN_QSPI0_SSL,
-};
-static const unsigned int qspi0_ctrl_mux[] = {
-	QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
-};
-static const unsigned int qspi0_data2_pins[] = {
-	/* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
-	PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1,
-};
-static const unsigned int qspi0_data2_mux[] = {
-	QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
-};
-static const unsigned int qspi0_data4_pins[] = {
-	/* QSPI0_MOSI_IO0, QSPI0_MISO_IO1, QSPI0_IO2, QSPI0_IO3 */
-	PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1, PIN_QSPI0_IO2, PIN_QSPI0_IO3,
-};
-static const unsigned int qspi0_data4_mux[] = {
-	QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
-	QSPI0_IO2_MARK, QSPI0_IO3_MARK,
-};
-/* - QSPI1 ------------------------------------------------------------------ */
-static const unsigned int qspi1_ctrl_pins[] = {
-	/* QSPI1_SPCLK, QSPI1_SSL */
-	PIN_QSPI1_SPCLK, PIN_QSPI1_SSL,
-};
-static const unsigned int qspi1_ctrl_mux[] = {
-	QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
-};
-static const unsigned int qspi1_data2_pins[] = {
-	/* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
-	PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1,
-};
-static const unsigned int qspi1_data2_mux[] = {
-	QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
-};
-static const unsigned int qspi1_data4_pins[] = {
-	/* QSPI1_MOSI_IO0, QSPI1_MISO_IO1, QSPI1_IO2, QSPI1_IO3 */
-	PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1, PIN_QSPI1_IO2, PIN_QSPI1_IO3,
-};
-static const unsigned int qspi1_data4_mux[] = {
-	QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
-	QSPI1_IO2_MARK, QSPI1_IO3_MARK,
-};
-
-/* - SATA --------------------------------------------------------------------*/
-static const unsigned int sata0_devslp_a_pins[] = {
-	/* DEVSLP */
-	RCAR_GP_PIN(6, 16),
-};
-static const unsigned int sata0_devslp_a_mux[] = {
-	SATA_DEVSLP_A_MARK,
-};
-static const unsigned int sata0_devslp_b_pins[] = {
-	/* DEVSLP */
-	RCAR_GP_PIN(4, 6),
-};
-static const unsigned int sata0_devslp_b_mux[] = {
-	SATA_DEVSLP_B_MARK,
-};
-
-/* - SCIF0 ------------------------------------------------------------------ */
-static const unsigned int scif0_data_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
-};
-static const unsigned int scif0_data_mux[] = {
-	RX0_MARK, TX0_MARK,
-};
-static const unsigned int scif0_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int scif0_clk_mux[] = {
-	SCK0_MARK,
-};
-static const unsigned int scif0_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
-};
-static const unsigned int scif0_ctrl_mux[] = {
-	RTS0_N_MARK, CTS0_N_MARK,
-};
-/* - SCIF1 ------------------------------------------------------------------ */
-static const unsigned int scif1_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
-};
-static const unsigned int scif1_data_a_mux[] = {
-	RX1_A_MARK, TX1_A_MARK,
-};
-static const unsigned int scif1_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int scif1_clk_mux[] = {
-	SCK1_MARK,
-};
-static const unsigned int scif1_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
-};
-static const unsigned int scif1_ctrl_mux[] = {
-	RTS1_N_MARK, CTS1_N_MARK,
-};
-
-static const unsigned int scif1_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 25),
-};
-static const unsigned int scif1_data_b_mux[] = {
-	RX1_B_MARK, TX1_B_MARK,
-};
-/* - SCIF2 ------------------------------------------------------------------ */
-static const unsigned int scif2_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
-};
-static const unsigned int scif2_data_a_mux[] = {
-	RX2_A_MARK, TX2_A_MARK,
-};
-static const unsigned int scif2_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 9),
-};
-static const unsigned int scif2_clk_mux[] = {
-	SCK2_MARK,
-};
-static const unsigned int scif2_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
-};
-static const unsigned int scif2_data_b_mux[] = {
-	RX2_B_MARK, TX2_B_MARK,
-};
-/* - SCIF3 ------------------------------------------------------------------ */
-static const unsigned int scif3_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
-};
-static const unsigned int scif3_data_a_mux[] = {
-	RX3_A_MARK, TX3_A_MARK,
-};
-static const unsigned int scif3_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 22),
-};
-static const unsigned int scif3_clk_mux[] = {
-	SCK3_MARK,
-};
-static const unsigned int scif3_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
-};
-static const unsigned int scif3_ctrl_mux[] = {
-	RTS3_N_MARK, CTS3_N_MARK,
-};
-static const unsigned int scif3_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
-};
-static const unsigned int scif3_data_b_mux[] = {
-	RX3_B_MARK, TX3_B_MARK,
-};
-/* - SCIF4 ------------------------------------------------------------------ */
-static const unsigned int scif4_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
-};
-static const unsigned int scif4_data_a_mux[] = {
-	RX4_A_MARK, TX4_A_MARK,
-};
-static const unsigned int scif4_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(2, 10),
-};
-static const unsigned int scif4_clk_a_mux[] = {
-	SCK4_A_MARK,
-};
-static const unsigned int scif4_ctrl_a_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
-};
-static const unsigned int scif4_ctrl_a_mux[] = {
-	RTS4_N_A_MARK, CTS4_N_A_MARK,
-};
-static const unsigned int scif4_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
-};
-static const unsigned int scif4_data_b_mux[] = {
-	RX4_B_MARK, TX4_B_MARK,
-};
-static const unsigned int scif4_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 5),
-};
-static const unsigned int scif4_clk_b_mux[] = {
-	SCK4_B_MARK,
-};
-static const unsigned int scif4_ctrl_b_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
-};
-static const unsigned int scif4_ctrl_b_mux[] = {
-	RTS4_N_B_MARK, CTS4_N_B_MARK,
-};
-static const unsigned int scif4_data_c_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
-};
-static const unsigned int scif4_data_c_mux[] = {
-	RX4_C_MARK, TX4_C_MARK,
-};
-static const unsigned int scif4_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(0, 8),
-};
-static const unsigned int scif4_clk_c_mux[] = {
-	SCK4_C_MARK,
-};
-static const unsigned int scif4_ctrl_c_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
-};
-static const unsigned int scif4_ctrl_c_mux[] = {
-	RTS4_N_C_MARK, CTS4_N_C_MARK,
-};
-/* - SCIF5 ------------------------------------------------------------------ */
-static const unsigned int scif5_data_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
-};
-static const unsigned int scif5_data_mux[] = {
-	RX5_MARK, TX5_MARK,
-};
-static const unsigned int scif5_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int scif5_clk_mux[] = {
-	SCK5_MARK,
-};
-
-/* - SCIF Clock ------------------------------------------------------------- */
-static const unsigned int scif_clk_a_pins[] = {
-	/* SCIF_CLK */
-	RCAR_GP_PIN(6, 23),
-};
-static const unsigned int scif_clk_a_mux[] = {
-	SCIF_CLK_A_MARK,
-};
-static const unsigned int scif_clk_b_pins[] = {
-	/* SCIF_CLK */
-	RCAR_GP_PIN(5, 9),
-};
-static const unsigned int scif_clk_b_mux[] = {
-	SCIF_CLK_B_MARK,
-};
-
-/* - SDHI0 ------------------------------------------------------------------ */
-static const unsigned int sdhi0_data1_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(3, 2),
-};
-static const unsigned int sdhi0_data1_mux[] = {
-	SD0_DAT0_MARK,
-};
-static const unsigned int sdhi0_data4_pins[] = {
-	/* D[0:3] */
-	RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
-	RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
-};
-static const unsigned int sdhi0_data4_mux[] = {
-	SD0_DAT0_MARK, SD0_DAT1_MARK,
-	SD0_DAT2_MARK, SD0_DAT3_MARK,
-};
-static const unsigned int sdhi0_ctrl_pins[] = {
-	/* CLK, CMD */
-	RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
-};
-static const unsigned int sdhi0_ctrl_mux[] = {
-	SD0_CLK_MARK, SD0_CMD_MARK,
-};
-static const unsigned int sdhi0_cd_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(3, 12),
-};
-static const unsigned int sdhi0_cd_mux[] = {
-	SD0_CD_MARK,
-};
-static const unsigned int sdhi0_wp_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(3, 13),
-};
-static const unsigned int sdhi0_wp_mux[] = {
-	SD0_WP_MARK,
-};
-/* - SDHI1 ------------------------------------------------------------------ */
-static const unsigned int sdhi1_data1_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(3, 8),
-};
-static const unsigned int sdhi1_data1_mux[] = {
-	SD1_DAT0_MARK,
-};
-static const unsigned int sdhi1_data4_pins[] = {
-	/* D[0:3] */
-	RCAR_GP_PIN(3, 8),  RCAR_GP_PIN(3, 9),
-	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
-};
-static const unsigned int sdhi1_data4_mux[] = {
-	SD1_DAT0_MARK, SD1_DAT1_MARK,
-	SD1_DAT2_MARK, SD1_DAT3_MARK,
-};
-static const unsigned int sdhi1_ctrl_pins[] = {
-	/* CLK, CMD */
-	RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
-};
-static const unsigned int sdhi1_ctrl_mux[] = {
-	SD1_CLK_MARK, SD1_CMD_MARK,
-};
-static const unsigned int sdhi1_cd_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(3, 14),
-};
-static const unsigned int sdhi1_cd_mux[] = {
-	SD1_CD_MARK,
-};
-static const unsigned int sdhi1_wp_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(3, 15),
-};
-static const unsigned int sdhi1_wp_mux[] = {
-	SD1_WP_MARK,
-};
-/* - SDHI2 ------------------------------------------------------------------ */
-static const unsigned int sdhi2_data1_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(4, 2),
-};
-static const unsigned int sdhi2_data1_mux[] = {
-	SD2_DAT0_MARK,
-};
-static const unsigned int sdhi2_data4_pins[] = {
-	/* D[0:3] */
-	RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
-	RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
-};
-static const unsigned int sdhi2_data4_mux[] = {
-	SD2_DAT0_MARK, SD2_DAT1_MARK,
-	SD2_DAT2_MARK, SD2_DAT3_MARK,
-};
-static const unsigned int sdhi2_data8_pins[] = {
-	/* D[0:7] */
-	RCAR_GP_PIN(4, 2),  RCAR_GP_PIN(4, 3),
-	RCAR_GP_PIN(4, 4),  RCAR_GP_PIN(4, 5),
-	RCAR_GP_PIN(3, 8),  RCAR_GP_PIN(3, 9),
-	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
-};
-static const unsigned int sdhi2_data8_mux[] = {
-	SD2_DAT0_MARK, SD2_DAT1_MARK,
-	SD2_DAT2_MARK, SD2_DAT3_MARK,
-	SD2_DAT4_MARK, SD2_DAT5_MARK,
-	SD2_DAT6_MARK, SD2_DAT7_MARK,
-};
-static const unsigned int sdhi2_ctrl_pins[] = {
-	/* CLK, CMD */
-	RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
-};
-static const unsigned int sdhi2_ctrl_mux[] = {
-	SD2_CLK_MARK, SD2_CMD_MARK,
-};
-static const unsigned int sdhi2_cd_a_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(4, 13),
-};
-static const unsigned int sdhi2_cd_a_mux[] = {
-	SD2_CD_A_MARK,
-};
-static const unsigned int sdhi2_cd_b_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(5, 10),
-};
-static const unsigned int sdhi2_cd_b_mux[] = {
-	SD2_CD_B_MARK,
-};
-static const unsigned int sdhi2_wp_a_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(4, 14),
-};
-static const unsigned int sdhi2_wp_a_mux[] = {
-	SD2_WP_A_MARK,
-};
-static const unsigned int sdhi2_wp_b_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(5, 11),
-};
-static const unsigned int sdhi2_wp_b_mux[] = {
-	SD2_WP_B_MARK,
-};
-static const unsigned int sdhi2_ds_pins[] = {
-	/* DS */
-	RCAR_GP_PIN(4, 6),
-};
-static const unsigned int sdhi2_ds_mux[] = {
-	SD2_DS_MARK,
-};
-/* - SDHI3 ------------------------------------------------------------------ */
-static const unsigned int sdhi3_data1_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(4, 9),
-};
-static const unsigned int sdhi3_data1_mux[] = {
-	SD3_DAT0_MARK,
-};
-static const unsigned int sdhi3_data4_pins[] = {
-	/* D[0:3] */
-	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 10),
-	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
-};
-static const unsigned int sdhi3_data4_mux[] = {
-	SD3_DAT0_MARK, SD3_DAT1_MARK,
-	SD3_DAT2_MARK, SD3_DAT3_MARK,
-};
-static const unsigned int sdhi3_data8_pins[] = {
-	/* D[0:7] */
-	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 10),
-	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
-	RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
-	RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
-};
-static const unsigned int sdhi3_data8_mux[] = {
-	SD3_DAT0_MARK, SD3_DAT1_MARK,
-	SD3_DAT2_MARK, SD3_DAT3_MARK,
-	SD3_DAT4_MARK, SD3_DAT5_MARK,
-	SD3_DAT6_MARK, SD3_DAT7_MARK,
-};
-static const unsigned int sdhi3_ctrl_pins[] = {
-	/* CLK, CMD */
-	RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
-};
-static const unsigned int sdhi3_ctrl_mux[] = {
-	SD3_CLK_MARK, SD3_CMD_MARK,
-};
-static const unsigned int sdhi3_cd_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(4, 15),
-};
-static const unsigned int sdhi3_cd_mux[] = {
-	SD3_CD_MARK,
-};
-static const unsigned int sdhi3_wp_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(4, 16),
-};
-static const unsigned int sdhi3_wp_mux[] = {
-	SD3_WP_MARK,
-};
-static const unsigned int sdhi3_ds_pins[] = {
-	/* DS */
-	RCAR_GP_PIN(4, 17),
-};
-static const unsigned int sdhi3_ds_mux[] = {
-	SD3_DS_MARK,
-};
-
-/* - SSI -------------------------------------------------------------------- */
-static const unsigned int ssi0_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 2),
-};
-static const unsigned int ssi0_data_mux[] = {
-	SSI_SDATA0_MARK,
-};
-static const unsigned int ssi01239_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
-};
-static const unsigned int ssi01239_ctrl_mux[] = {
-	SSI_SCK01239_MARK, SSI_WS01239_MARK,
-};
-static const unsigned int ssi1_data_a_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 3),
-};
-static const unsigned int ssi1_data_a_mux[] = {
-	SSI_SDATA1_A_MARK,
-};
-static const unsigned int ssi1_data_b_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(5, 12),
-};
-static const unsigned int ssi1_data_b_mux[] = {
-	SSI_SDATA1_B_MARK,
-};
-static const unsigned int ssi1_ctrl_a_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
-};
-static const unsigned int ssi1_ctrl_a_mux[] = {
-	SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
-};
-static const unsigned int ssi1_ctrl_b_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
-};
-static const unsigned int ssi1_ctrl_b_mux[] = {
-	SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
-};
-static const unsigned int ssi2_data_a_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 4),
-};
-static const unsigned int ssi2_data_a_mux[] = {
-	SSI_SDATA2_A_MARK,
-};
-static const unsigned int ssi2_data_b_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(5, 13),
-};
-static const unsigned int ssi2_data_b_mux[] = {
-	SSI_SDATA2_B_MARK,
-};
-static const unsigned int ssi2_ctrl_a_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
-};
-static const unsigned int ssi2_ctrl_a_mux[] = {
-	SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
-};
-static const unsigned int ssi2_ctrl_b_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
-};
-static const unsigned int ssi2_ctrl_b_mux[] = {
-	SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
-};
-static const unsigned int ssi3_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 7),
-};
-static const unsigned int ssi3_data_mux[] = {
-	SSI_SDATA3_MARK,
-};
-static const unsigned int ssi349_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
-};
-static const unsigned int ssi349_ctrl_mux[] = {
-	SSI_SCK349_MARK, SSI_WS349_MARK,
-};
-static const unsigned int ssi4_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int ssi4_data_mux[] = {
-	SSI_SDATA4_MARK,
-};
-static const unsigned int ssi4_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
-};
-static const unsigned int ssi4_ctrl_mux[] = {
-	SSI_SCK4_MARK, SSI_WS4_MARK,
-};
-static const unsigned int ssi5_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 13),
-};
-static const unsigned int ssi5_data_mux[] = {
-	SSI_SDATA5_MARK,
-};
-static const unsigned int ssi5_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
-};
-static const unsigned int ssi5_ctrl_mux[] = {
-	SSI_SCK5_MARK, SSI_WS5_MARK,
-};
-static const unsigned int ssi6_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 16),
-};
-static const unsigned int ssi6_data_mux[] = {
-	SSI_SDATA6_MARK,
-};
-static const unsigned int ssi6_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
-};
-static const unsigned int ssi6_ctrl_mux[] = {
-	SSI_SCK6_MARK, SSI_WS6_MARK,
-};
-static const unsigned int ssi7_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int ssi7_data_mux[] = {
-	SSI_SDATA7_MARK,
-};
-static const unsigned int ssi78_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
-};
-static const unsigned int ssi78_ctrl_mux[] = {
-	SSI_SCK78_MARK, SSI_WS78_MARK,
-};
-static const unsigned int ssi8_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 20),
-};
-static const unsigned int ssi8_data_mux[] = {
-	SSI_SDATA8_MARK,
-};
-static const unsigned int ssi9_data_a_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int ssi9_data_a_mux[] = {
-	SSI_SDATA9_A_MARK,
-};
-static const unsigned int ssi9_data_b_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(5, 14),
-};
-static const unsigned int ssi9_data_b_mux[] = {
-	SSI_SDATA9_B_MARK,
-};
-static const unsigned int ssi9_ctrl_a_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
-};
-static const unsigned int ssi9_ctrl_a_mux[] = {
-	SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
-};
-static const unsigned int ssi9_ctrl_b_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
-};
-static const unsigned int ssi9_ctrl_b_mux[] = {
-	SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
-};
-
-/* - TMU -------------------------------------------------------------------- */
-static const unsigned int tmu_tclk1_a_pins[] = {
-	/* TCLK */
-	RCAR_GP_PIN(6, 23),
-};
-static const unsigned int tmu_tclk1_a_mux[] = {
-	TCLK1_A_MARK,
-};
-static const unsigned int tmu_tclk1_b_pins[] = {
-	/* TCLK */
-	RCAR_GP_PIN(5, 19),
-};
-static const unsigned int tmu_tclk1_b_mux[] = {
-	TCLK1_B_MARK,
-};
-static const unsigned int tmu_tclk2_a_pins[] = {
-	/* TCLK */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int tmu_tclk2_a_mux[] = {
-	TCLK2_A_MARK,
-};
-static const unsigned int tmu_tclk2_b_pins[] = {
-	/* TCLK */
-	RCAR_GP_PIN(6, 28),
-};
-static const unsigned int tmu_tclk2_b_mux[] = {
-	TCLK2_B_MARK,
-};
-
-/* - TPU ------------------------------------------------------------------- */
-static const unsigned int tpu_to0_pins[] = {
-	/* TPU0TO0 */
-	RCAR_GP_PIN(6, 28),
-};
-static const unsigned int tpu_to0_mux[] = {
-	TPU0TO0_MARK,
-};
-static const unsigned int tpu_to1_pins[] = {
-	/* TPU0TO1 */
-	RCAR_GP_PIN(6, 29),
-};
-static const unsigned int tpu_to1_mux[] = {
-	TPU0TO1_MARK,
-};
-static const unsigned int tpu_to2_pins[] = {
-	/* TPU0TO2 */
-	RCAR_GP_PIN(6, 30),
-};
-static const unsigned int tpu_to2_mux[] = {
-	TPU0TO2_MARK,
-};
-static const unsigned int tpu_to3_pins[] = {
-	/* TPU0TO3 */
-	RCAR_GP_PIN(6, 31),
-};
-static const unsigned int tpu_to3_mux[] = {
-	TPU0TO3_MARK,
-};
-
-/* - USB0 ------------------------------------------------------------------- */
-static const unsigned int usb0_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
-};
-static const unsigned int usb0_mux[] = {
-	USB0_PWEN_MARK, USB0_OVC_MARK,
-};
-/* - USB1 ------------------------------------------------------------------- */
-static const unsigned int usb1_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
-};
-static const unsigned int usb1_mux[] = {
-	USB1_PWEN_MARK, USB1_OVC_MARK,
-};
-/* - USB2 ------------------------------------------------------------------- */
-static const unsigned int usb2_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
-};
-static const unsigned int usb2_mux[] = {
-	USB2_PWEN_MARK, USB2_OVC_MARK,
-};
-
-/* - USB30 ------------------------------------------------------------------ */
-static const unsigned int usb30_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
-};
-static const unsigned int usb30_mux[] = {
-	USB30_PWEN_MARK, USB30_OVC_MARK,
-};
-/* - USB31 ------------------------------------------------------------------ */
-static const unsigned int usb31_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
-};
-static const unsigned int usb31_mux[] = {
-	USB31_PWEN_MARK, USB31_OVC_MARK,
-};
-
-static const struct sh_pfc_pin_group pinmux_groups[] = {
-	SH_PFC_PIN_GROUP(audio_clk_a_a),
-	SH_PFC_PIN_GROUP(audio_clk_a_b),
-	SH_PFC_PIN_GROUP(audio_clk_a_c),
-	SH_PFC_PIN_GROUP(audio_clk_b_a),
-	SH_PFC_PIN_GROUP(audio_clk_b_b),
-	SH_PFC_PIN_GROUP(audio_clk_c_a),
-	SH_PFC_PIN_GROUP(audio_clk_c_b),
-	SH_PFC_PIN_GROUP(audio_clkout_a),
-	SH_PFC_PIN_GROUP(audio_clkout_b),
-	SH_PFC_PIN_GROUP(audio_clkout_c),
-	SH_PFC_PIN_GROUP(audio_clkout_d),
-	SH_PFC_PIN_GROUP(audio_clkout1_a),
-	SH_PFC_PIN_GROUP(audio_clkout1_b),
-	SH_PFC_PIN_GROUP(audio_clkout2_a),
-	SH_PFC_PIN_GROUP(audio_clkout2_b),
-	SH_PFC_PIN_GROUP(audio_clkout3_a),
-	SH_PFC_PIN_GROUP(audio_clkout3_b),
-	SH_PFC_PIN_GROUP(avb_link),
-	SH_PFC_PIN_GROUP(avb_magic),
-	SH_PFC_PIN_GROUP(avb_phy_int),
-	SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio),	/* Deprecated */
-	SH_PFC_PIN_GROUP(avb_mdio),
-	SH_PFC_PIN_GROUP(avb_mii),
-	SH_PFC_PIN_GROUP(avb_avtp_pps),
-	SH_PFC_PIN_GROUP(avb_avtp_match_a),
-	SH_PFC_PIN_GROUP(avb_avtp_capture_a),
-	SH_PFC_PIN_GROUP(avb_avtp_match_b),
-	SH_PFC_PIN_GROUP(avb_avtp_capture_b),
-	SH_PFC_PIN_GROUP(can0_data_a),
-	SH_PFC_PIN_GROUP(can0_data_b),
-	SH_PFC_PIN_GROUP(can1_data),
-	SH_PFC_PIN_GROUP(can_clk),
-	SH_PFC_PIN_GROUP(canfd0_data_a),
-	SH_PFC_PIN_GROUP(canfd0_data_b),
-	SH_PFC_PIN_GROUP(canfd1_data),
-	SH_PFC_PIN_GROUP(drif0_ctrl_a),
-	SH_PFC_PIN_GROUP(drif0_data0_a),
-	SH_PFC_PIN_GROUP(drif0_data1_a),
-	SH_PFC_PIN_GROUP(drif0_ctrl_b),
-	SH_PFC_PIN_GROUP(drif0_data0_b),
-	SH_PFC_PIN_GROUP(drif0_data1_b),
-	SH_PFC_PIN_GROUP(drif0_ctrl_c),
-	SH_PFC_PIN_GROUP(drif0_data0_c),
-	SH_PFC_PIN_GROUP(drif0_data1_c),
-	SH_PFC_PIN_GROUP(drif1_ctrl_a),
-	SH_PFC_PIN_GROUP(drif1_data0_a),
-	SH_PFC_PIN_GROUP(drif1_data1_a),
-	SH_PFC_PIN_GROUP(drif1_ctrl_b),
-	SH_PFC_PIN_GROUP(drif1_data0_b),
-	SH_PFC_PIN_GROUP(drif1_data1_b),
-	SH_PFC_PIN_GROUP(drif1_ctrl_c),
-	SH_PFC_PIN_GROUP(drif1_data0_c),
-	SH_PFC_PIN_GROUP(drif1_data1_c),
-	SH_PFC_PIN_GROUP(drif2_ctrl_a),
-	SH_PFC_PIN_GROUP(drif2_data0_a),
-	SH_PFC_PIN_GROUP(drif2_data1_a),
-	SH_PFC_PIN_GROUP(drif2_ctrl_b),
-	SH_PFC_PIN_GROUP(drif2_data0_b),
-	SH_PFC_PIN_GROUP(drif2_data1_b),
-	SH_PFC_PIN_GROUP(drif3_ctrl_a),
-	SH_PFC_PIN_GROUP(drif3_data0_a),
-	SH_PFC_PIN_GROUP(drif3_data1_a),
-	SH_PFC_PIN_GROUP(drif3_ctrl_b),
-	SH_PFC_PIN_GROUP(drif3_data0_b),
-	SH_PFC_PIN_GROUP(drif3_data1_b),
-	SH_PFC_PIN_GROUP(du_rgb666),
-	SH_PFC_PIN_GROUP(du_rgb888),
-	SH_PFC_PIN_GROUP(du_clk_out_0),
-	SH_PFC_PIN_GROUP(du_clk_out_1),
-	SH_PFC_PIN_GROUP(du_sync),
-	SH_PFC_PIN_GROUP(du_oddf),
-	SH_PFC_PIN_GROUP(du_cde),
-	SH_PFC_PIN_GROUP(du_disp),
-	SH_PFC_PIN_GROUP(hscif0_data),
-	SH_PFC_PIN_GROUP(hscif0_clk),
-	SH_PFC_PIN_GROUP(hscif0_ctrl),
-	SH_PFC_PIN_GROUP(hscif1_data_a),
-	SH_PFC_PIN_GROUP(hscif1_clk_a),
-	SH_PFC_PIN_GROUP(hscif1_ctrl_a),
-	SH_PFC_PIN_GROUP(hscif1_data_b),
-	SH_PFC_PIN_GROUP(hscif1_clk_b),
-	SH_PFC_PIN_GROUP(hscif1_ctrl_b),
-	SH_PFC_PIN_GROUP(hscif2_data_a),
-	SH_PFC_PIN_GROUP(hscif2_clk_a),
-	SH_PFC_PIN_GROUP(hscif2_ctrl_a),
-	SH_PFC_PIN_GROUP(hscif2_data_b),
-	SH_PFC_PIN_GROUP(hscif2_clk_b),
-	SH_PFC_PIN_GROUP(hscif2_ctrl_b),
-	SH_PFC_PIN_GROUP(hscif3_data_a),
-	SH_PFC_PIN_GROUP(hscif3_clk),
-	SH_PFC_PIN_GROUP(hscif3_ctrl),
-	SH_PFC_PIN_GROUP(hscif3_data_b),
-	SH_PFC_PIN_GROUP(hscif3_data_c),
-	SH_PFC_PIN_GROUP(hscif3_data_d),
-	SH_PFC_PIN_GROUP(hscif4_data_a),
-	SH_PFC_PIN_GROUP(hscif4_clk),
-	SH_PFC_PIN_GROUP(hscif4_ctrl),
-	SH_PFC_PIN_GROUP(hscif4_data_b),
-	SH_PFC_PIN_GROUP(i2c0),
-	SH_PFC_PIN_GROUP(i2c1_a),
-	SH_PFC_PIN_GROUP(i2c1_b),
-	SH_PFC_PIN_GROUP(i2c2_a),
-	SH_PFC_PIN_GROUP(i2c2_b),
-	SH_PFC_PIN_GROUP(i2c3),
-	SH_PFC_PIN_GROUP(i2c5),
-	SH_PFC_PIN_GROUP(i2c6_a),
-	SH_PFC_PIN_GROUP(i2c6_b),
-	SH_PFC_PIN_GROUP(i2c6_c),
-	SH_PFC_PIN_GROUP(intc_ex_irq0),
-	SH_PFC_PIN_GROUP(intc_ex_irq1),
-	SH_PFC_PIN_GROUP(intc_ex_irq2),
-	SH_PFC_PIN_GROUP(intc_ex_irq3),
-	SH_PFC_PIN_GROUP(intc_ex_irq4),
-	SH_PFC_PIN_GROUP(intc_ex_irq5),
-	SH_PFC_PIN_GROUP(msiof0_clk),
-	SH_PFC_PIN_GROUP(msiof0_sync),
-	SH_PFC_PIN_GROUP(msiof0_ss1),
-	SH_PFC_PIN_GROUP(msiof0_ss2),
-	SH_PFC_PIN_GROUP(msiof0_txd),
-	SH_PFC_PIN_GROUP(msiof0_rxd),
-	SH_PFC_PIN_GROUP(msiof1_clk_a),
-	SH_PFC_PIN_GROUP(msiof1_sync_a),
-	SH_PFC_PIN_GROUP(msiof1_ss1_a),
-	SH_PFC_PIN_GROUP(msiof1_ss2_a),
-	SH_PFC_PIN_GROUP(msiof1_txd_a),
-	SH_PFC_PIN_GROUP(msiof1_rxd_a),
-	SH_PFC_PIN_GROUP(msiof1_clk_b),
-	SH_PFC_PIN_GROUP(msiof1_sync_b),
-	SH_PFC_PIN_GROUP(msiof1_ss1_b),
-	SH_PFC_PIN_GROUP(msiof1_ss2_b),
-	SH_PFC_PIN_GROUP(msiof1_txd_b),
-	SH_PFC_PIN_GROUP(msiof1_rxd_b),
-	SH_PFC_PIN_GROUP(msiof1_clk_c),
-	SH_PFC_PIN_GROUP(msiof1_sync_c),
-	SH_PFC_PIN_GROUP(msiof1_ss1_c),
-	SH_PFC_PIN_GROUP(msiof1_ss2_c),
-	SH_PFC_PIN_GROUP(msiof1_txd_c),
-	SH_PFC_PIN_GROUP(msiof1_rxd_c),
-	SH_PFC_PIN_GROUP(msiof1_clk_d),
-	SH_PFC_PIN_GROUP(msiof1_sync_d),
-	SH_PFC_PIN_GROUP(msiof1_ss1_d),
-	SH_PFC_PIN_GROUP(msiof1_ss2_d),
-	SH_PFC_PIN_GROUP(msiof1_txd_d),
-	SH_PFC_PIN_GROUP(msiof1_rxd_d),
-	SH_PFC_PIN_GROUP(msiof1_clk_e),
-	SH_PFC_PIN_GROUP(msiof1_sync_e),
-	SH_PFC_PIN_GROUP(msiof1_ss1_e),
-	SH_PFC_PIN_GROUP(msiof1_ss2_e),
-	SH_PFC_PIN_GROUP(msiof1_txd_e),
-	SH_PFC_PIN_GROUP(msiof1_rxd_e),
-	SH_PFC_PIN_GROUP(msiof1_clk_f),
-	SH_PFC_PIN_GROUP(msiof1_sync_f),
-	SH_PFC_PIN_GROUP(msiof1_ss1_f),
-	SH_PFC_PIN_GROUP(msiof1_ss2_f),
-	SH_PFC_PIN_GROUP(msiof1_txd_f),
-	SH_PFC_PIN_GROUP(msiof1_rxd_f),
-	SH_PFC_PIN_GROUP(msiof1_clk_g),
-	SH_PFC_PIN_GROUP(msiof1_sync_g),
-	SH_PFC_PIN_GROUP(msiof1_ss1_g),
-	SH_PFC_PIN_GROUP(msiof1_ss2_g),
-	SH_PFC_PIN_GROUP(msiof1_txd_g),
-	SH_PFC_PIN_GROUP(msiof1_rxd_g),
-	SH_PFC_PIN_GROUP(msiof2_clk_a),
-	SH_PFC_PIN_GROUP(msiof2_sync_a),
-	SH_PFC_PIN_GROUP(msiof2_ss1_a),
-	SH_PFC_PIN_GROUP(msiof2_ss2_a),
-	SH_PFC_PIN_GROUP(msiof2_txd_a),
-	SH_PFC_PIN_GROUP(msiof2_rxd_a),
-	SH_PFC_PIN_GROUP(msiof2_clk_b),
-	SH_PFC_PIN_GROUP(msiof2_sync_b),
-	SH_PFC_PIN_GROUP(msiof2_ss1_b),
-	SH_PFC_PIN_GROUP(msiof2_ss2_b),
-	SH_PFC_PIN_GROUP(msiof2_txd_b),
-	SH_PFC_PIN_GROUP(msiof2_rxd_b),
-	SH_PFC_PIN_GROUP(msiof2_clk_c),
-	SH_PFC_PIN_GROUP(msiof2_sync_c),
-	SH_PFC_PIN_GROUP(msiof2_ss1_c),
-	SH_PFC_PIN_GROUP(msiof2_ss2_c),
-	SH_PFC_PIN_GROUP(msiof2_txd_c),
-	SH_PFC_PIN_GROUP(msiof2_rxd_c),
-	SH_PFC_PIN_GROUP(msiof2_clk_d),
-	SH_PFC_PIN_GROUP(msiof2_sync_d),
-	SH_PFC_PIN_GROUP(msiof2_ss1_d),
-	SH_PFC_PIN_GROUP(msiof2_ss2_d),
-	SH_PFC_PIN_GROUP(msiof2_txd_d),
-	SH_PFC_PIN_GROUP(msiof2_rxd_d),
-	SH_PFC_PIN_GROUP(msiof3_clk_a),
-	SH_PFC_PIN_GROUP(msiof3_sync_a),
-	SH_PFC_PIN_GROUP(msiof3_ss1_a),
-	SH_PFC_PIN_GROUP(msiof3_ss2_a),
-	SH_PFC_PIN_GROUP(msiof3_txd_a),
-	SH_PFC_PIN_GROUP(msiof3_rxd_a),
-	SH_PFC_PIN_GROUP(msiof3_clk_b),
-	SH_PFC_PIN_GROUP(msiof3_sync_b),
-	SH_PFC_PIN_GROUP(msiof3_ss1_b),
-	SH_PFC_PIN_GROUP(msiof3_ss2_b),
-	SH_PFC_PIN_GROUP(msiof3_txd_b),
-	SH_PFC_PIN_GROUP(msiof3_rxd_b),
-	SH_PFC_PIN_GROUP(msiof3_clk_c),
-	SH_PFC_PIN_GROUP(msiof3_sync_c),
-	SH_PFC_PIN_GROUP(msiof3_txd_c),
-	SH_PFC_PIN_GROUP(msiof3_rxd_c),
-	SH_PFC_PIN_GROUP(msiof3_clk_d),
-	SH_PFC_PIN_GROUP(msiof3_sync_d),
-	SH_PFC_PIN_GROUP(msiof3_ss1_d),
-	SH_PFC_PIN_GROUP(msiof3_txd_d),
-	SH_PFC_PIN_GROUP(msiof3_rxd_d),
-	SH_PFC_PIN_GROUP(pwm0),
-	SH_PFC_PIN_GROUP(pwm1_a),
-	SH_PFC_PIN_GROUP(pwm1_b),
-	SH_PFC_PIN_GROUP(pwm2_a),
-	SH_PFC_PIN_GROUP(pwm2_b),
-	SH_PFC_PIN_GROUP(pwm3_a),
-	SH_PFC_PIN_GROUP(pwm3_b),
-	SH_PFC_PIN_GROUP(pwm4_a),
-	SH_PFC_PIN_GROUP(pwm4_b),
-	SH_PFC_PIN_GROUP(pwm5_a),
-	SH_PFC_PIN_GROUP(pwm5_b),
-	SH_PFC_PIN_GROUP(pwm6_a),
-	SH_PFC_PIN_GROUP(pwm6_b),
-	SH_PFC_PIN_GROUP(qspi0_ctrl),
-	SH_PFC_PIN_GROUP(qspi0_data2),
-	SH_PFC_PIN_GROUP(qspi0_data4),
-	SH_PFC_PIN_GROUP(qspi1_ctrl),
-	SH_PFC_PIN_GROUP(qspi1_data2),
-	SH_PFC_PIN_GROUP(qspi1_data4),
-	SH_PFC_PIN_GROUP(sata0_devslp_a),
-	SH_PFC_PIN_GROUP(sata0_devslp_b),
-	SH_PFC_PIN_GROUP(scif0_data),
-	SH_PFC_PIN_GROUP(scif0_clk),
-	SH_PFC_PIN_GROUP(scif0_ctrl),
-	SH_PFC_PIN_GROUP(scif1_data_a),
-	SH_PFC_PIN_GROUP(scif1_clk),
-	SH_PFC_PIN_GROUP(scif1_ctrl),
-	SH_PFC_PIN_GROUP(scif1_data_b),
-	SH_PFC_PIN_GROUP(scif2_data_a),
-	SH_PFC_PIN_GROUP(scif2_clk),
-	SH_PFC_PIN_GROUP(scif2_data_b),
-	SH_PFC_PIN_GROUP(scif3_data_a),
-	SH_PFC_PIN_GROUP(scif3_clk),
-	SH_PFC_PIN_GROUP(scif3_ctrl),
-	SH_PFC_PIN_GROUP(scif3_data_b),
-	SH_PFC_PIN_GROUP(scif4_data_a),
-	SH_PFC_PIN_GROUP(scif4_clk_a),
-	SH_PFC_PIN_GROUP(scif4_ctrl_a),
-	SH_PFC_PIN_GROUP(scif4_data_b),
-	SH_PFC_PIN_GROUP(scif4_clk_b),
-	SH_PFC_PIN_GROUP(scif4_ctrl_b),
-	SH_PFC_PIN_GROUP(scif4_data_c),
-	SH_PFC_PIN_GROUP(scif4_clk_c),
-	SH_PFC_PIN_GROUP(scif4_ctrl_c),
-	SH_PFC_PIN_GROUP(scif5_data),
-	SH_PFC_PIN_GROUP(scif5_clk),
-	SH_PFC_PIN_GROUP(scif_clk_a),
-	SH_PFC_PIN_GROUP(scif_clk_b),
-	SH_PFC_PIN_GROUP(sdhi0_data1),
-	SH_PFC_PIN_GROUP(sdhi0_data4),
-	SH_PFC_PIN_GROUP(sdhi0_ctrl),
-	SH_PFC_PIN_GROUP(sdhi0_cd),
-	SH_PFC_PIN_GROUP(sdhi0_wp),
-	SH_PFC_PIN_GROUP(sdhi1_data1),
-	SH_PFC_PIN_GROUP(sdhi1_data4),
-	SH_PFC_PIN_GROUP(sdhi1_ctrl),
-	SH_PFC_PIN_GROUP(sdhi1_cd),
-	SH_PFC_PIN_GROUP(sdhi1_wp),
-	SH_PFC_PIN_GROUP(sdhi2_data1),
-	SH_PFC_PIN_GROUP(sdhi2_data4),
-	SH_PFC_PIN_GROUP(sdhi2_data8),
-	SH_PFC_PIN_GROUP(sdhi2_ctrl),
-	SH_PFC_PIN_GROUP(sdhi2_cd_a),
-	SH_PFC_PIN_GROUP(sdhi2_wp_a),
-	SH_PFC_PIN_GROUP(sdhi2_cd_b),
-	SH_PFC_PIN_GROUP(sdhi2_wp_b),
-	SH_PFC_PIN_GROUP(sdhi2_ds),
-	SH_PFC_PIN_GROUP(sdhi3_data1),
-	SH_PFC_PIN_GROUP(sdhi3_data4),
-	SH_PFC_PIN_GROUP(sdhi3_data8),
-	SH_PFC_PIN_GROUP(sdhi3_ctrl),
-	SH_PFC_PIN_GROUP(sdhi3_cd),
-	SH_PFC_PIN_GROUP(sdhi3_wp),
-	SH_PFC_PIN_GROUP(sdhi3_ds),
-	SH_PFC_PIN_GROUP(ssi0_data),
-	SH_PFC_PIN_GROUP(ssi01239_ctrl),
-	SH_PFC_PIN_GROUP(ssi1_data_a),
-	SH_PFC_PIN_GROUP(ssi1_data_b),
-	SH_PFC_PIN_GROUP(ssi1_ctrl_a),
-	SH_PFC_PIN_GROUP(ssi1_ctrl_b),
-	SH_PFC_PIN_GROUP(ssi2_data_a),
-	SH_PFC_PIN_GROUP(ssi2_data_b),
-	SH_PFC_PIN_GROUP(ssi2_ctrl_a),
-	SH_PFC_PIN_GROUP(ssi2_ctrl_b),
-	SH_PFC_PIN_GROUP(ssi3_data),
-	SH_PFC_PIN_GROUP(ssi349_ctrl),
-	SH_PFC_PIN_GROUP(ssi4_data),
-	SH_PFC_PIN_GROUP(ssi4_ctrl),
-	SH_PFC_PIN_GROUP(ssi5_data),
-	SH_PFC_PIN_GROUP(ssi5_ctrl),
-	SH_PFC_PIN_GROUP(ssi6_data),
-	SH_PFC_PIN_GROUP(ssi6_ctrl),
-	SH_PFC_PIN_GROUP(ssi7_data),
-	SH_PFC_PIN_GROUP(ssi78_ctrl),
-	SH_PFC_PIN_GROUP(ssi8_data),
-	SH_PFC_PIN_GROUP(ssi9_data_a),
-	SH_PFC_PIN_GROUP(ssi9_data_b),
-	SH_PFC_PIN_GROUP(ssi9_ctrl_a),
-	SH_PFC_PIN_GROUP(ssi9_ctrl_b),
-	SH_PFC_PIN_GROUP(tmu_tclk1_a),
-	SH_PFC_PIN_GROUP(tmu_tclk1_b),
-	SH_PFC_PIN_GROUP(tmu_tclk2_a),
-	SH_PFC_PIN_GROUP(tmu_tclk2_b),
-	SH_PFC_PIN_GROUP(tpu_to0),
-	SH_PFC_PIN_GROUP(tpu_to1),
-	SH_PFC_PIN_GROUP(tpu_to2),
-	SH_PFC_PIN_GROUP(tpu_to3),
-	SH_PFC_PIN_GROUP(usb0),
-	SH_PFC_PIN_GROUP(usb1),
-	SH_PFC_PIN_GROUP(usb2),
-	SH_PFC_PIN_GROUP(usb30),
-	SH_PFC_PIN_GROUP(usb31),
-};
-
-static const char * const audio_clk_groups[] = {
-	"audio_clk_a_a",
-	"audio_clk_a_b",
-	"audio_clk_a_c",
-	"audio_clk_b_a",
-	"audio_clk_b_b",
-	"audio_clk_c_a",
-	"audio_clk_c_b",
-	"audio_clkout_a",
-	"audio_clkout_b",
-	"audio_clkout_c",
-	"audio_clkout_d",
-	"audio_clkout1_a",
-	"audio_clkout1_b",
-	"audio_clkout2_a",
-	"audio_clkout2_b",
-	"audio_clkout3_a",
-	"audio_clkout3_b",
-};
-
-static const char * const avb_groups[] = {
-	"avb_link",
-	"avb_magic",
-	"avb_phy_int",
-	"avb_mdc",	/* Deprecated, please use "avb_mdio" instead */
-	"avb_mdio",
-	"avb_mii",
-	"avb_avtp_pps",
-	"avb_avtp_match_a",
-	"avb_avtp_capture_a",
-	"avb_avtp_match_b",
-	"avb_avtp_capture_b",
-};
-
-static const char * const can0_groups[] = {
-	"can0_data_a",
-	"can0_data_b",
-};
-
-static const char * const can1_groups[] = {
-	"can1_data",
-};
-
-static const char * const can_clk_groups[] = {
-	"can_clk",
-};
-
-static const char * const canfd0_groups[] = {
-	"canfd0_data_a",
-	"canfd0_data_b",
-};
-
-static const char * const canfd1_groups[] = {
-	"canfd1_data",
-};
-
-static const char * const drif0_groups[] = {
-	"drif0_ctrl_a",
-	"drif0_data0_a",
-	"drif0_data1_a",
-	"drif0_ctrl_b",
-	"drif0_data0_b",
-	"drif0_data1_b",
-	"drif0_ctrl_c",
-	"drif0_data0_c",
-	"drif0_data1_c",
-};
-
-static const char * const drif1_groups[] = {
-	"drif1_ctrl_a",
-	"drif1_data0_a",
-	"drif1_data1_a",
-	"drif1_ctrl_b",
-	"drif1_data0_b",
-	"drif1_data1_b",
-	"drif1_ctrl_c",
-	"drif1_data0_c",
-	"drif1_data1_c",
-};
-
-static const char * const drif2_groups[] = {
-	"drif2_ctrl_a",
-	"drif2_data0_a",
-	"drif2_data1_a",
-	"drif2_ctrl_b",
-	"drif2_data0_b",
-	"drif2_data1_b",
-};
-
-static const char * const drif3_groups[] = {
-	"drif3_ctrl_a",
-	"drif3_data0_a",
-	"drif3_data1_a",
-	"drif3_ctrl_b",
-	"drif3_data0_b",
-	"drif3_data1_b",
-};
-
-static const char * const du_groups[] = {
-	"du_rgb666",
-	"du_rgb888",
-	"du_clk_out_0",
-	"du_clk_out_1",
-	"du_sync",
-	"du_oddf",
-	"du_cde",
-	"du_disp",
-};
-
-static const char * const hscif0_groups[] = {
-	"hscif0_data",
-	"hscif0_clk",
-	"hscif0_ctrl",
-};
-
-static const char * const hscif1_groups[] = {
-	"hscif1_data_a",
-	"hscif1_clk_a",
-	"hscif1_ctrl_a",
-	"hscif1_data_b",
-	"hscif1_clk_b",
-	"hscif1_ctrl_b",
-};
-
-static const char * const hscif2_groups[] = {
-	"hscif2_data_a",
-	"hscif2_clk_a",
-	"hscif2_ctrl_a",
-	"hscif2_data_b",
-	"hscif2_clk_b",
-	"hscif2_ctrl_b",
-};
-
-static const char * const hscif3_groups[] = {
-	"hscif3_data_a",
-	"hscif3_clk",
-	"hscif3_ctrl",
-	"hscif3_data_b",
-	"hscif3_data_c",
-	"hscif3_data_d",
-};
-
-static const char * const hscif4_groups[] = {
-	"hscif4_data_a",
-	"hscif4_clk",
-	"hscif4_ctrl",
-	"hscif4_data_b",
-};
-
-static const char * const i2c0_groups[] = {
-	"i2c0",
-};
-
-static const char * const i2c1_groups[] = {
-	"i2c1_a",
-	"i2c1_b",
-};
-
-static const char * const i2c2_groups[] = {
-	"i2c2_a",
-	"i2c2_b",
-};
-
-static const char * const i2c3_groups[] = {
-	"i2c3",
-};
-
-static const char * const i2c5_groups[] = {
-	"i2c5",
-};
-
-static const char * const i2c6_groups[] = {
-	"i2c6_a",
-	"i2c6_b",
-	"i2c6_c",
-};
-
-static const char * const intc_ex_groups[] = {
-	"intc_ex_irq0",
-	"intc_ex_irq1",
-	"intc_ex_irq2",
-	"intc_ex_irq3",
-	"intc_ex_irq4",
-	"intc_ex_irq5",
-};
-
-static const char * const msiof0_groups[] = {
-	"msiof0_clk",
-	"msiof0_sync",
-	"msiof0_ss1",
-	"msiof0_ss2",
-	"msiof0_txd",
-	"msiof0_rxd",
-};
-
-static const char * const msiof1_groups[] = {
-	"msiof1_clk_a",
-	"msiof1_sync_a",
-	"msiof1_ss1_a",
-	"msiof1_ss2_a",
-	"msiof1_txd_a",
-	"msiof1_rxd_a",
-	"msiof1_clk_b",
-	"msiof1_sync_b",
-	"msiof1_ss1_b",
-	"msiof1_ss2_b",
-	"msiof1_txd_b",
-	"msiof1_rxd_b",
-	"msiof1_clk_c",
-	"msiof1_sync_c",
-	"msiof1_ss1_c",
-	"msiof1_ss2_c",
-	"msiof1_txd_c",
-	"msiof1_rxd_c",
-	"msiof1_clk_d",
-	"msiof1_sync_d",
-	"msiof1_ss1_d",
-	"msiof1_ss2_d",
-	"msiof1_txd_d",
-	"msiof1_rxd_d",
-	"msiof1_clk_e",
-	"msiof1_sync_e",
-	"msiof1_ss1_e",
-	"msiof1_ss2_e",
-	"msiof1_txd_e",
-	"msiof1_rxd_e",
-	"msiof1_clk_f",
-	"msiof1_sync_f",
-	"msiof1_ss1_f",
-	"msiof1_ss2_f",
-	"msiof1_txd_f",
-	"msiof1_rxd_f",
-	"msiof1_clk_g",
-	"msiof1_sync_g",
-	"msiof1_ss1_g",
-	"msiof1_ss2_g",
-	"msiof1_txd_g",
-	"msiof1_rxd_g",
-};
-
-static const char * const msiof2_groups[] = {
-	"msiof2_clk_a",
-	"msiof2_sync_a",
-	"msiof2_ss1_a",
-	"msiof2_ss2_a",
-	"msiof2_txd_a",
-	"msiof2_rxd_a",
-	"msiof2_clk_b",
-	"msiof2_sync_b",
-	"msiof2_ss1_b",
-	"msiof2_ss2_b",
-	"msiof2_txd_b",
-	"msiof2_rxd_b",
-	"msiof2_clk_c",
-	"msiof2_sync_c",
-	"msiof2_ss1_c",
-	"msiof2_ss2_c",
-	"msiof2_txd_c",
-	"msiof2_rxd_c",
-	"msiof2_clk_d",
-	"msiof2_sync_d",
-	"msiof2_ss1_d",
-	"msiof2_ss2_d",
-	"msiof2_txd_d",
-	"msiof2_rxd_d",
-};
-
-static const char * const msiof3_groups[] = {
-	"msiof3_clk_a",
-	"msiof3_sync_a",
-	"msiof3_ss1_a",
-	"msiof3_ss2_a",
-	"msiof3_txd_a",
-	"msiof3_rxd_a",
-	"msiof3_clk_b",
-	"msiof3_sync_b",
-	"msiof3_ss1_b",
-	"msiof3_ss2_b",
-	"msiof3_txd_b",
-	"msiof3_rxd_b",
-	"msiof3_clk_c",
-	"msiof3_sync_c",
-	"msiof3_txd_c",
-	"msiof3_rxd_c",
-	"msiof3_clk_d",
-	"msiof3_sync_d",
-	"msiof3_ss1_d",
-	"msiof3_txd_d",
-	"msiof3_rxd_d",
-};
-
-static const char * const pwm0_groups[] = {
-	"pwm0",
-};
-
-static const char * const pwm1_groups[] = {
-	"pwm1_a",
-	"pwm1_b",
-};
-
-static const char * const pwm2_groups[] = {
-	"pwm2_a",
-	"pwm2_b",
-};
-
-static const char * const pwm3_groups[] = {
-	"pwm3_a",
-	"pwm3_b",
-};
-
-static const char * const pwm4_groups[] = {
-	"pwm4_a",
-	"pwm4_b",
-};
-
-static const char * const pwm5_groups[] = {
-	"pwm5_a",
-	"pwm5_b",
-};
-
-static const char * const pwm6_groups[] = {
-	"pwm6_a",
-	"pwm6_b",
-};
-
-static const char * const qspi0_groups[] = {
-	"qspi0_ctrl",
-	"qspi0_data2",
-	"qspi0_data4",
-};
-
-static const char * const qspi1_groups[] = {
-	"qspi1_ctrl",
-	"qspi1_data2",
-	"qspi1_data4",
-};
-
-static const char * const sata0_groups[] = {
-	"sata0_devslp_a",
-	"sata0_devslp_b",
-};
-
-static const char * const scif0_groups[] = {
-	"scif0_data",
-	"scif0_clk",
-	"scif0_ctrl",
-};
-
-static const char * const scif1_groups[] = {
-	"scif1_data_a",
-	"scif1_clk",
-	"scif1_ctrl",
-	"scif1_data_b",
-};
-
-static const char * const scif2_groups[] = {
-	"scif2_data_a",
-	"scif2_clk",
-	"scif2_data_b",
-};
-
-static const char * const scif3_groups[] = {
-	"scif3_data_a",
-	"scif3_clk",
-	"scif3_ctrl",
-	"scif3_data_b",
-};
-
-static const char * const scif4_groups[] = {
-	"scif4_data_a",
-	"scif4_clk_a",
-	"scif4_ctrl_a",
-	"scif4_data_b",
-	"scif4_clk_b",
-	"scif4_ctrl_b",
-	"scif4_data_c",
-	"scif4_clk_c",
-	"scif4_ctrl_c",
-};
-
-static const char * const scif5_groups[] = {
-	"scif5_data",
-	"scif5_clk",
-};
-
-static const char * const scif_clk_groups[] = {
-	"scif_clk_a",
-	"scif_clk_b",
-};
-
-static const char * const sdhi0_groups[] = {
-	"sdhi0_data1",
-	"sdhi0_data4",
-	"sdhi0_ctrl",
-	"sdhi0_cd",
-	"sdhi0_wp",
-};
-
-static const char * const sdhi1_groups[] = {
-	"sdhi1_data1",
-	"sdhi1_data4",
-	"sdhi1_ctrl",
-	"sdhi1_cd",
-	"sdhi1_wp",
-};
-
-static const char * const sdhi2_groups[] = {
-	"sdhi2_data1",
-	"sdhi2_data4",
-	"sdhi2_data8",
-	"sdhi2_ctrl",
-	"sdhi2_cd_a",
-	"sdhi2_wp_a",
-	"sdhi2_cd_b",
-	"sdhi2_wp_b",
-	"sdhi2_ds",
-};
-
-static const char * const sdhi3_groups[] = {
-	"sdhi3_data1",
-	"sdhi3_data4",
-	"sdhi3_data8",
-	"sdhi3_ctrl",
-	"sdhi3_cd",
-	"sdhi3_wp",
-	"sdhi3_ds",
-};
-
-static const char * const ssi_groups[] = {
-	"ssi0_data",
-	"ssi01239_ctrl",
-	"ssi1_data_a",
-	"ssi1_data_b",
-	"ssi1_ctrl_a",
-	"ssi1_ctrl_b",
-	"ssi2_data_a",
-	"ssi2_data_b",
-	"ssi2_ctrl_a",
-	"ssi2_ctrl_b",
-	"ssi3_data",
-	"ssi349_ctrl",
-	"ssi4_data",
-	"ssi4_ctrl",
-	"ssi5_data",
-	"ssi5_ctrl",
-	"ssi6_data",
-	"ssi6_ctrl",
-	"ssi7_data",
-	"ssi78_ctrl",
-	"ssi8_data",
-	"ssi9_data_a",
-	"ssi9_data_b",
-	"ssi9_ctrl_a",
-	"ssi9_ctrl_b",
-};
-
-static const char * const tmu_groups[] = {
-	"tmu_tclk1_a",
-	"tmu_tclk1_b",
-	"tmu_tclk2_a",
-	"tmu_tclk2_b",
-};
-
-static const char * const tpu_groups[] = {
-	"tpu_to0",
-	"tpu_to1",
-	"tpu_to2",
-	"tpu_to3",
-};
-
-static const char * const usb0_groups[] = {
-	"usb0",
-};
-
-static const char * const usb1_groups[] = {
-	"usb1",
-};
-
-static const char * const usb2_groups[] = {
-	"usb2",
-};
-
-static const char * const usb30_groups[] = {
-	"usb30",
-};
-
-static const char * const usb31_groups[] = {
-	"usb31",
-};
-
-static const struct sh_pfc_function pinmux_functions[] = {
-	SH_PFC_FUNCTION(audio_clk),
-	SH_PFC_FUNCTION(avb),
-	SH_PFC_FUNCTION(can0),
-	SH_PFC_FUNCTION(can1),
-	SH_PFC_FUNCTION(can_clk),
-	SH_PFC_FUNCTION(canfd0),
-	SH_PFC_FUNCTION(canfd1),
-	SH_PFC_FUNCTION(drif0),
-	SH_PFC_FUNCTION(drif1),
-	SH_PFC_FUNCTION(drif2),
-	SH_PFC_FUNCTION(drif3),
-	SH_PFC_FUNCTION(du),
-	SH_PFC_FUNCTION(hscif0),
-	SH_PFC_FUNCTION(hscif1),
-	SH_PFC_FUNCTION(hscif2),
-	SH_PFC_FUNCTION(hscif3),
-	SH_PFC_FUNCTION(hscif4),
-	SH_PFC_FUNCTION(i2c0),
-	SH_PFC_FUNCTION(i2c1),
-	SH_PFC_FUNCTION(i2c2),
-	SH_PFC_FUNCTION(i2c3),
-	SH_PFC_FUNCTION(i2c5),
-	SH_PFC_FUNCTION(i2c6),
-	SH_PFC_FUNCTION(intc_ex),
-	SH_PFC_FUNCTION(msiof0),
-	SH_PFC_FUNCTION(msiof1),
-	SH_PFC_FUNCTION(msiof2),
-	SH_PFC_FUNCTION(msiof3),
-	SH_PFC_FUNCTION(pwm0),
-	SH_PFC_FUNCTION(pwm1),
-	SH_PFC_FUNCTION(pwm2),
-	SH_PFC_FUNCTION(pwm3),
-	SH_PFC_FUNCTION(pwm4),
-	SH_PFC_FUNCTION(pwm5),
-	SH_PFC_FUNCTION(pwm6),
-	SH_PFC_FUNCTION(qspi0),
-	SH_PFC_FUNCTION(qspi1),
-	SH_PFC_FUNCTION(sata0),
-	SH_PFC_FUNCTION(scif0),
-	SH_PFC_FUNCTION(scif1),
-	SH_PFC_FUNCTION(scif2),
-	SH_PFC_FUNCTION(scif3),
-	SH_PFC_FUNCTION(scif4),
-	SH_PFC_FUNCTION(scif5),
-	SH_PFC_FUNCTION(scif_clk),
-	SH_PFC_FUNCTION(sdhi0),
-	SH_PFC_FUNCTION(sdhi1),
-	SH_PFC_FUNCTION(sdhi2),
-	SH_PFC_FUNCTION(sdhi3),
-	SH_PFC_FUNCTION(ssi),
-	SH_PFC_FUNCTION(tmu),
-	SH_PFC_FUNCTION(tpu),
-	SH_PFC_FUNCTION(usb0),
-	SH_PFC_FUNCTION(usb1),
-	SH_PFC_FUNCTION(usb2),
-	SH_PFC_FUNCTION(usb30),
-	SH_PFC_FUNCTION(usb31),
-};
-
-static const struct pinmux_cfg_reg pinmux_config_regs[] = {
-#define F_(x, y)	FN_##y
-#define FM(x)		FN_##x
-	{ PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_0_15_FN,	GPSR0_15,
-		GP_0_14_FN,	GPSR0_14,
-		GP_0_13_FN,	GPSR0_13,
-		GP_0_12_FN,	GPSR0_12,
-		GP_0_11_FN,	GPSR0_11,
-		GP_0_10_FN,	GPSR0_10,
-		GP_0_9_FN,	GPSR0_9,
-		GP_0_8_FN,	GPSR0_8,
-		GP_0_7_FN,	GPSR0_7,
-		GP_0_6_FN,	GPSR0_6,
-		GP_0_5_FN,	GPSR0_5,
-		GP_0_4_FN,	GPSR0_4,
-		GP_0_3_FN,	GPSR0_3,
-		GP_0_2_FN,	GPSR0_2,
-		GP_0_1_FN,	GPSR0_1,
-		GP_0_0_FN,	GPSR0_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_1_27_FN,	GPSR1_27,
-		GP_1_26_FN,	GPSR1_26,
-		GP_1_25_FN,	GPSR1_25,
-		GP_1_24_FN,	GPSR1_24,
-		GP_1_23_FN,	GPSR1_23,
-		GP_1_22_FN,	GPSR1_22,
-		GP_1_21_FN,	GPSR1_21,
-		GP_1_20_FN,	GPSR1_20,
-		GP_1_19_FN,	GPSR1_19,
-		GP_1_18_FN,	GPSR1_18,
-		GP_1_17_FN,	GPSR1_17,
-		GP_1_16_FN,	GPSR1_16,
-		GP_1_15_FN,	GPSR1_15,
-		GP_1_14_FN,	GPSR1_14,
-		GP_1_13_FN,	GPSR1_13,
-		GP_1_12_FN,	GPSR1_12,
-		GP_1_11_FN,	GPSR1_11,
-		GP_1_10_FN,	GPSR1_10,
-		GP_1_9_FN,	GPSR1_9,
-		GP_1_8_FN,	GPSR1_8,
-		GP_1_7_FN,	GPSR1_7,
-		GP_1_6_FN,	GPSR1_6,
-		GP_1_5_FN,	GPSR1_5,
-		GP_1_4_FN,	GPSR1_4,
-		GP_1_3_FN,	GPSR1_3,
-		GP_1_2_FN,	GPSR1_2,
-		GP_1_1_FN,	GPSR1_1,
-		GP_1_0_FN,	GPSR1_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_2_14_FN,	GPSR2_14,
-		GP_2_13_FN,	GPSR2_13,
-		GP_2_12_FN,	GPSR2_12,
-		GP_2_11_FN,	GPSR2_11,
-		GP_2_10_FN,	GPSR2_10,
-		GP_2_9_FN,	GPSR2_9,
-		GP_2_8_FN,	GPSR2_8,
-		GP_2_7_FN,	GPSR2_7,
-		GP_2_6_FN,	GPSR2_6,
-		GP_2_5_FN,	GPSR2_5,
-		GP_2_4_FN,	GPSR2_4,
-		GP_2_3_FN,	GPSR2_3,
-		GP_2_2_FN,	GPSR2_2,
-		GP_2_1_FN,	GPSR2_1,
-		GP_2_0_FN,	GPSR2_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_3_15_FN,	GPSR3_15,
-		GP_3_14_FN,	GPSR3_14,
-		GP_3_13_FN,	GPSR3_13,
-		GP_3_12_FN,	GPSR3_12,
-		GP_3_11_FN,	GPSR3_11,
-		GP_3_10_FN,	GPSR3_10,
-		GP_3_9_FN,	GPSR3_9,
-		GP_3_8_FN,	GPSR3_8,
-		GP_3_7_FN,	GPSR3_7,
-		GP_3_6_FN,	GPSR3_6,
-		GP_3_5_FN,	GPSR3_5,
-		GP_3_4_FN,	GPSR3_4,
-		GP_3_3_FN,	GPSR3_3,
-		GP_3_2_FN,	GPSR3_2,
-		GP_3_1_FN,	GPSR3_1,
-		GP_3_0_FN,	GPSR3_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_4_17_FN,	GPSR4_17,
-		GP_4_16_FN,	GPSR4_16,
-		GP_4_15_FN,	GPSR4_15,
-		GP_4_14_FN,	GPSR4_14,
-		GP_4_13_FN,	GPSR4_13,
-		GP_4_12_FN,	GPSR4_12,
-		GP_4_11_FN,	GPSR4_11,
-		GP_4_10_FN,	GPSR4_10,
-		GP_4_9_FN,	GPSR4_9,
-		GP_4_8_FN,	GPSR4_8,
-		GP_4_7_FN,	GPSR4_7,
-		GP_4_6_FN,	GPSR4_6,
-		GP_4_5_FN,	GPSR4_5,
-		GP_4_4_FN,	GPSR4_4,
-		GP_4_3_FN,	GPSR4_3,
-		GP_4_2_FN,	GPSR4_2,
-		GP_4_1_FN,	GPSR4_1,
-		GP_4_0_FN,	GPSR4_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_5_25_FN,	GPSR5_25,
-		GP_5_24_FN,	GPSR5_24,
-		GP_5_23_FN,	GPSR5_23,
-		GP_5_22_FN,	GPSR5_22,
-		GP_5_21_FN,	GPSR5_21,
-		GP_5_20_FN,	GPSR5_20,
-		GP_5_19_FN,	GPSR5_19,
-		GP_5_18_FN,	GPSR5_18,
-		GP_5_17_FN,	GPSR5_17,
-		GP_5_16_FN,	GPSR5_16,
-		GP_5_15_FN,	GPSR5_15,
-		GP_5_14_FN,	GPSR5_14,
-		GP_5_13_FN,	GPSR5_13,
-		GP_5_12_FN,	GPSR5_12,
-		GP_5_11_FN,	GPSR5_11,
-		GP_5_10_FN,	GPSR5_10,
-		GP_5_9_FN,	GPSR5_9,
-		GP_5_8_FN,	GPSR5_8,
-		GP_5_7_FN,	GPSR5_7,
-		GP_5_6_FN,	GPSR5_6,
-		GP_5_5_FN,	GPSR5_5,
-		GP_5_4_FN,	GPSR5_4,
-		GP_5_3_FN,	GPSR5_3,
-		GP_5_2_FN,	GPSR5_2,
-		GP_5_1_FN,	GPSR5_1,
-		GP_5_0_FN,	GPSR5_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
-		GP_6_31_FN,	GPSR6_31,
-		GP_6_30_FN,	GPSR6_30,
-		GP_6_29_FN,	GPSR6_29,
-		GP_6_28_FN,	GPSR6_28,
-		GP_6_27_FN,	GPSR6_27,
-		GP_6_26_FN,	GPSR6_26,
-		GP_6_25_FN,	GPSR6_25,
-		GP_6_24_FN,	GPSR6_24,
-		GP_6_23_FN,	GPSR6_23,
-		GP_6_22_FN,	GPSR6_22,
-		GP_6_21_FN,	GPSR6_21,
-		GP_6_20_FN,	GPSR6_20,
-		GP_6_19_FN,	GPSR6_19,
-		GP_6_18_FN,	GPSR6_18,
-		GP_6_17_FN,	GPSR6_17,
-		GP_6_16_FN,	GPSR6_16,
-		GP_6_15_FN,	GPSR6_15,
-		GP_6_14_FN,	GPSR6_14,
-		GP_6_13_FN,	GPSR6_13,
-		GP_6_12_FN,	GPSR6_12,
-		GP_6_11_FN,	GPSR6_11,
-		GP_6_10_FN,	GPSR6_10,
-		GP_6_9_FN,	GPSR6_9,
-		GP_6_8_FN,	GPSR6_8,
-		GP_6_7_FN,	GPSR6_7,
-		GP_6_6_FN,	GPSR6_6,
-		GP_6_5_FN,	GPSR6_5,
-		GP_6_4_FN,	GPSR6_4,
-		GP_6_3_FN,	GPSR6_3,
-		GP_6_2_FN,	GPSR6_2,
-		GP_6_1_FN,	GPSR6_1,
-		GP_6_0_FN,	GPSR6_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_7_3_FN, GPSR7_3,
-		GP_7_2_FN, GPSR7_2,
-		GP_7_1_FN, GPSR7_1,
-		GP_7_0_FN, GPSR7_0, ))
-	},
-#undef F_
-#undef FM
-
-#define F_(x, y)	x,
-#define FM(x)		FN_##x,
-	{ PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
-		IP0_31_28
-		IP0_27_24
-		IP0_23_20
-		IP0_19_16
-		IP0_15_12
-		IP0_11_8
-		IP0_7_4
-		IP0_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
-		IP1_31_28
-		IP1_27_24
-		IP1_23_20
-		IP1_19_16
-		IP1_15_12
-		IP1_11_8
-		IP1_7_4
-		IP1_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
-		IP2_31_28
-		IP2_27_24
-		IP2_23_20
-		IP2_19_16
-		IP2_15_12
-		IP2_11_8
-		IP2_7_4
-		IP2_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
-		IP3_31_28
-		IP3_27_24
-		IP3_23_20
-		IP3_19_16
-		IP3_15_12
-		IP3_11_8
-		IP3_7_4
-		IP3_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
-		IP4_31_28
-		IP4_27_24
-		IP4_23_20
-		IP4_19_16
-		IP4_15_12
-		IP4_11_8
-		IP4_7_4
-		IP4_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
-		IP5_31_28
-		IP5_27_24
-		IP5_23_20
-		IP5_19_16
-		IP5_15_12
-		IP5_11_8
-		IP5_7_4
-		IP5_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
-		IP6_31_28
-		IP6_27_24
-		IP6_23_20
-		IP6_19_16
-		IP6_15_12
-		IP6_11_8
-		IP6_7_4
-		IP6_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
-		IP7_31_28
-		IP7_27_24
-		IP7_23_20
-		IP7_19_16
-		IP7_15_12
-		IP7_11_8
-		IP7_7_4
-		IP7_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
-		IP8_31_28
-		IP8_27_24
-		IP8_23_20
-		IP8_19_16
-		IP8_15_12
-		IP8_11_8
-		IP8_7_4
-		IP8_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
-		IP9_31_28
-		IP9_27_24
-		IP9_23_20
-		IP9_19_16
-		IP9_15_12
-		IP9_11_8
-		IP9_7_4
-		IP9_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
-		IP10_31_28
-		IP10_27_24
-		IP10_23_20
-		IP10_19_16
-		IP10_15_12
-		IP10_11_8
-		IP10_7_4
-		IP10_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
-		IP11_31_28
-		IP11_27_24
-		IP11_23_20
-		IP11_19_16
-		IP11_15_12
-		IP11_11_8
-		IP11_7_4
-		IP11_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
-		IP12_31_28
-		IP12_27_24
-		IP12_23_20
-		IP12_19_16
-		IP12_15_12
-		IP12_11_8
-		IP12_7_4
-		IP12_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
-		IP13_31_28
-		IP13_27_24
-		IP13_23_20
-		IP13_19_16
-		IP13_15_12
-		IP13_11_8
-		IP13_7_4
-		IP13_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
-		IP14_31_28
-		IP14_27_24
-		IP14_23_20
-		IP14_19_16
-		IP14_15_12
-		IP14_11_8
-		IP14_7_4
-		IP14_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
-		IP15_31_28
-		IP15_27_24
-		IP15_23_20
-		IP15_19_16
-		IP15_15_12
-		IP15_11_8
-		IP15_7_4
-		IP15_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4, GROUP(
-		IP16_31_28
-		IP16_27_24
-		IP16_23_20
-		IP16_19_16
-		IP16_15_12
-		IP16_11_8
-		IP16_7_4
-		IP16_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
-		/* IP17_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP17_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP17_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP17_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP17_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP17_11_8  */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		IP17_7_4
-		IP17_3_0 ))
-	},
-#undef F_
-#undef FM
-
-#define F_(x, y)	x,
-#define FM(x)		FN_##x,
-	{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
-			     GROUP(1, 2, 2, 3, 1, 1, 2, 1, 1, 1, 2, 1,
-				   1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1),
-			     GROUP(
-		0, 0, /* RESERVED 31 */
-		MOD_SEL0_30_29
-		MOD_SEL0_28_27
-		MOD_SEL0_26_25_24
-		MOD_SEL0_23
-		MOD_SEL0_22
-		MOD_SEL0_21_20
-		MOD_SEL0_19
-		MOD_SEL0_18
-		MOD_SEL0_17
-		MOD_SEL0_16_15
-		MOD_SEL0_14
-		MOD_SEL0_13
-		MOD_SEL0_12
-		MOD_SEL0_11
-		MOD_SEL0_10
-		MOD_SEL0_9
-		MOD_SEL0_8
-		MOD_SEL0_7_6
-		MOD_SEL0_5_4
-		MOD_SEL0_3
-		MOD_SEL0_2_1
-		0, 0, /* RESERVED 0 */ ))
-	},
-	{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
-			     GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
-				   1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
-			     GROUP(
-		MOD_SEL1_31_30
-		MOD_SEL1_29_28_27
-		MOD_SEL1_26
-		MOD_SEL1_25_24
-		MOD_SEL1_23_22_21
-		MOD_SEL1_20
-		MOD_SEL1_19
-		MOD_SEL1_18_17
-		MOD_SEL1_16
-		MOD_SEL1_15_14
-		MOD_SEL1_13
-		MOD_SEL1_12
-		MOD_SEL1_11
-		MOD_SEL1_10
-		MOD_SEL1_9
-		0, 0, 0, 0, /* RESERVED 8, 7 */
-		MOD_SEL1_6
-		MOD_SEL1_5
-		MOD_SEL1_4
-		MOD_SEL1_3
-		MOD_SEL1_2
-		MOD_SEL1_1
-		MOD_SEL1_0 ))
-	},
-	{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
-			     GROUP(1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 1, 2, 1),
-			     GROUP(
-		MOD_SEL2_31
-		MOD_SEL2_30
-		MOD_SEL2_29
-		/* RESERVED 28 */
-		0, 0,
-		/* RESERVED 27, 26, 25, 24 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 23, 22, 21, 20 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 19, 18, 17, 16 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 15, 14, 13, 12 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 11, 10, 9, 8 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 7, 6, 5, 4 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 3 */
-		0, 0,
-		/* RESERVED 2, 1 */
-		0, 0, 0, 0,
-		MOD_SEL2_0 ))
-	},
-	{ },
-};
-
-static const struct pinmux_drive_reg pinmux_drive_regs[] = {
-	{ PINMUX_DRIVE_REG("DRVCTRL0", 0xe6060300) {
-		{ PIN_QSPI0_SPCLK,    28, 2 },	/* QSPI0_SPCLK */
-		{ PIN_QSPI0_MOSI_IO0, 24, 2 },	/* QSPI0_MOSI_IO0 */
-		{ PIN_QSPI0_MISO_IO1, 20, 2 },	/* QSPI0_MISO_IO1 */
-		{ PIN_QSPI0_IO2,      16, 2 },	/* QSPI0_IO2 */
-		{ PIN_QSPI0_IO3,      12, 2 },	/* QSPI0_IO3 */
-		{ PIN_QSPI0_SSL,       8, 2 },	/* QSPI0_SSL */
-		{ PIN_QSPI1_SPCLK,     4, 2 },	/* QSPI1_SPCLK */
-		{ PIN_QSPI1_MOSI_IO0,  0, 2 },	/* QSPI1_MOSI_IO0 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL1", 0xe6060304) {
-		{ PIN_QSPI1_MISO_IO1, 28, 2 },	/* QSPI1_MISO_IO1 */
-		{ PIN_QSPI1_IO2,      24, 2 },	/* QSPI1_IO2 */
-		{ PIN_QSPI1_IO3,      20, 2 },	/* QSPI1_IO3 */
-		{ PIN_QSPI1_SSL,      16, 2 },	/* QSPI1_SSL */
-		{ PIN_RPC_INT_N,      12, 2 },	/* RPC_INT# */
-		{ PIN_RPC_WP_N,        8, 2 },	/* RPC_WP# */
-		{ PIN_RPC_RESET_N,     4, 2 },	/* RPC_RESET# */
-		{ PIN_AVB_RX_CTL,      0, 3 },	/* AVB_RX_CTL */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL2", 0xe6060308) {
-		{ PIN_AVB_RXC,        28, 3 },	/* AVB_RXC */
-		{ PIN_AVB_RD0,        24, 3 },	/* AVB_RD0 */
-		{ PIN_AVB_RD1,        20, 3 },	/* AVB_RD1 */
-		{ PIN_AVB_RD2,        16, 3 },	/* AVB_RD2 */
-		{ PIN_AVB_RD3,        12, 3 },	/* AVB_RD3 */
-		{ PIN_AVB_TX_CTL,      8, 3 },	/* AVB_TX_CTL */
-		{ PIN_AVB_TXC,         4, 3 },	/* AVB_TXC */
-		{ PIN_AVB_TD0,         0, 3 },	/* AVB_TD0 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) {
-		{ PIN_AVB_TD1,        28, 3 },	/* AVB_TD1 */
-		{ PIN_AVB_TD2,        24, 3 },	/* AVB_TD2 */
-		{ PIN_AVB_TD3,        20, 3 },	/* AVB_TD3 */
-		{ PIN_AVB_TXCREFCLK,  16, 3 },	/* AVB_TXCREFCLK */
-		{ PIN_AVB_MDIO,       12, 3 },	/* AVB_MDIO */
-		{ RCAR_GP_PIN(2,  9),  8, 3 },	/* AVB_MDC */
-		{ RCAR_GP_PIN(2, 10),  4, 3 },	/* AVB_MAGIC */
-		{ RCAR_GP_PIN(2, 11),  0, 3 },	/* AVB_PHY_INT */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) {
-		{ RCAR_GP_PIN(2, 12), 28, 3 },	/* AVB_LINK */
-		{ RCAR_GP_PIN(2, 13), 24, 3 },	/* AVB_AVTP_MATCH */
-		{ RCAR_GP_PIN(2, 14), 20, 3 },	/* AVB_AVTP_CAPTURE */
-		{ RCAR_GP_PIN(2,  0), 16, 3 },	/* IRQ0 */
-		{ RCAR_GP_PIN(2,  1), 12, 3 },	/* IRQ1 */
-		{ RCAR_GP_PIN(2,  2),  8, 3 },	/* IRQ2 */
-		{ RCAR_GP_PIN(2,  3),  4, 3 },	/* IRQ3 */
-		{ RCAR_GP_PIN(2,  4),  0, 3 },	/* IRQ4 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL5", 0xe6060314) {
-		{ RCAR_GP_PIN(2,  5), 28, 3 },	/* IRQ5 */
-		{ RCAR_GP_PIN(2,  6), 24, 3 },	/* PWM0 */
-		{ RCAR_GP_PIN(2,  7), 20, 3 },	/* PWM1 */
-		{ RCAR_GP_PIN(2,  8), 16, 3 },	/* PWM2 */
-		{ RCAR_GP_PIN(1,  0), 12, 3 },	/* A0 */
-		{ RCAR_GP_PIN(1,  1),  8, 3 },	/* A1 */
-		{ RCAR_GP_PIN(1,  2),  4, 3 },	/* A2 */
-		{ RCAR_GP_PIN(1,  3),  0, 3 },	/* A3 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL6", 0xe6060318) {
-		{ RCAR_GP_PIN(1,  4), 28, 3 },	/* A4 */
-		{ RCAR_GP_PIN(1,  5), 24, 3 },	/* A5 */
-		{ RCAR_GP_PIN(1,  6), 20, 3 },	/* A6 */
-		{ RCAR_GP_PIN(1,  7), 16, 3 },	/* A7 */
-		{ RCAR_GP_PIN(1,  8), 12, 3 },	/* A8 */
-		{ RCAR_GP_PIN(1,  9),  8, 3 },	/* A9 */
-		{ RCAR_GP_PIN(1, 10),  4, 3 },	/* A10 */
-		{ RCAR_GP_PIN(1, 11),  0, 3 },	/* A11 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL7", 0xe606031c) {
-		{ RCAR_GP_PIN(1, 12), 28, 3 },	/* A12 */
-		{ RCAR_GP_PIN(1, 13), 24, 3 },	/* A13 */
-		{ RCAR_GP_PIN(1, 14), 20, 3 },	/* A14 */
-		{ RCAR_GP_PIN(1, 15), 16, 3 },	/* A15 */
-		{ RCAR_GP_PIN(1, 16), 12, 3 },	/* A16 */
-		{ RCAR_GP_PIN(1, 17),  8, 3 },	/* A17 */
-		{ RCAR_GP_PIN(1, 18),  4, 3 },	/* A18 */
-		{ RCAR_GP_PIN(1, 19),  0, 3 },	/* A19 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
-		{ PIN_CLKOUT,         28, 3 },	/* CLKOUT */
-		{ RCAR_GP_PIN(1, 20), 24, 3 },	/* CS0 */
-		{ RCAR_GP_PIN(1, 21), 20, 3 },	/* CS1_A26 */
-		{ RCAR_GP_PIN(1, 22), 16, 3 },	/* BS */
-		{ RCAR_GP_PIN(1, 23), 12, 3 },	/* RD */
-		{ RCAR_GP_PIN(1, 24),  8, 3 },	/* RD_WR */
-		{ RCAR_GP_PIN(1, 25),  4, 3 },	/* WE0 */
-		{ RCAR_GP_PIN(1, 26),  0, 3 },	/* WE1 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
-		{ RCAR_GP_PIN(1, 27), 28, 3 },	/* EX_WAIT0 */
-		{ PIN_PRESETOUT_N,    24, 3 },	/* PRESETOUT# */
-		{ RCAR_GP_PIN(0,  0), 20, 3 },	/* D0 */
-		{ RCAR_GP_PIN(0,  1), 16, 3 },	/* D1 */
-		{ RCAR_GP_PIN(0,  2), 12, 3 },	/* D2 */
-		{ RCAR_GP_PIN(0,  3),  8, 3 },	/* D3 */
-		{ RCAR_GP_PIN(0,  4),  4, 3 },	/* D4 */
-		{ RCAR_GP_PIN(0,  5),  0, 3 },	/* D5 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) {
-		{ RCAR_GP_PIN(0,  6), 28, 3 },	/* D6 */
-		{ RCAR_GP_PIN(0,  7), 24, 3 },	/* D7 */
-		{ RCAR_GP_PIN(0,  8), 20, 3 },	/* D8 */
-		{ RCAR_GP_PIN(0,  9), 16, 3 },	/* D9 */
-		{ RCAR_GP_PIN(0, 10), 12, 3 },	/* D10 */
-		{ RCAR_GP_PIN(0, 11),  8, 3 },	/* D11 */
-		{ RCAR_GP_PIN(0, 12),  4, 3 },	/* D12 */
-		{ RCAR_GP_PIN(0, 13),  0, 3 },	/* D13 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) {
-		{ RCAR_GP_PIN(0, 14), 28, 3 },	/* D14 */
-		{ RCAR_GP_PIN(0, 15), 24, 3 },	/* D15 */
-		{ RCAR_GP_PIN(7,  0), 20, 3 },	/* AVS1 */
-		{ RCAR_GP_PIN(7,  1), 16, 3 },	/* AVS2 */
-		{ RCAR_GP_PIN(7,  2), 12, 3 },	/* GP7_02 */
-		{ RCAR_GP_PIN(7,  3),  8, 3 },	/* GP7_03 */
-		{ PIN_DU_DOTCLKIN0,    4, 2 },	/* DU_DOTCLKIN0 */
-		{ PIN_DU_DOTCLKIN1,    0, 2 },	/* DU_DOTCLKIN1 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
-		{ PIN_DU_DOTCLKIN2,   28, 2 },	/* DU_DOTCLKIN2 */
-		{ PIN_DU_DOTCLKIN3,   24, 2 },	/* DU_DOTCLKIN3 */
-		{ PIN_FSCLKST_N,      20, 2 },	/* FSCLKST# */
-		{ PIN_TMS,             4, 2 },	/* TMS */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) {
-		{ PIN_TDO,            28, 2 },	/* TDO */
-		{ PIN_ASEBRK,         24, 2 },	/* ASEBRK */
-		{ RCAR_GP_PIN(3,  0), 20, 3 },	/* SD0_CLK */
-		{ RCAR_GP_PIN(3,  1), 16, 3 },	/* SD0_CMD */
-		{ RCAR_GP_PIN(3,  2), 12, 3 },	/* SD0_DAT0 */
-		{ RCAR_GP_PIN(3,  3),  8, 3 },	/* SD0_DAT1 */
-		{ RCAR_GP_PIN(3,  4),  4, 3 },	/* SD0_DAT2 */
-		{ RCAR_GP_PIN(3,  5),  0, 3 },	/* SD0_DAT3 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) {
-		{ RCAR_GP_PIN(3,  6), 28, 3 },	/* SD1_CLK */
-		{ RCAR_GP_PIN(3,  7), 24, 3 },	/* SD1_CMD */
-		{ RCAR_GP_PIN(3,  8), 20, 3 },	/* SD1_DAT0 */
-		{ RCAR_GP_PIN(3,  9), 16, 3 },	/* SD1_DAT1 */
-		{ RCAR_GP_PIN(3, 10), 12, 3 },	/* SD1_DAT2 */
-		{ RCAR_GP_PIN(3, 11),  8, 3 },	/* SD1_DAT3 */
-		{ RCAR_GP_PIN(4,  0),  4, 3 },	/* SD2_CLK */
-		{ RCAR_GP_PIN(4,  1),  0, 3 },	/* SD2_CMD */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL15", 0xe606033c) {
-		{ RCAR_GP_PIN(4,  2), 28, 3 },	/* SD2_DAT0 */
-		{ RCAR_GP_PIN(4,  3), 24, 3 },	/* SD2_DAT1 */
-		{ RCAR_GP_PIN(4,  4), 20, 3 },	/* SD2_DAT2 */
-		{ RCAR_GP_PIN(4,  5), 16, 3 },	/* SD2_DAT3 */
-		{ RCAR_GP_PIN(4,  6), 12, 3 },	/* SD2_DS */
-		{ RCAR_GP_PIN(4,  7),  8, 3 },	/* SD3_CLK */
-		{ RCAR_GP_PIN(4,  8),  4, 3 },	/* SD3_CMD */
-		{ RCAR_GP_PIN(4,  9),  0, 3 },	/* SD3_DAT0 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL16", 0xe6060340) {
-		{ RCAR_GP_PIN(4, 10), 28, 3 },	/* SD3_DAT1 */
-		{ RCAR_GP_PIN(4, 11), 24, 3 },	/* SD3_DAT2 */
-		{ RCAR_GP_PIN(4, 12), 20, 3 },	/* SD3_DAT3 */
-		{ RCAR_GP_PIN(4, 13), 16, 3 },	/* SD3_DAT4 */
-		{ RCAR_GP_PIN(4, 14), 12, 3 },	/* SD3_DAT5 */
-		{ RCAR_GP_PIN(4, 15),  8, 3 },	/* SD3_DAT6 */
-		{ RCAR_GP_PIN(4, 16),  4, 3 },	/* SD3_DAT7 */
-		{ RCAR_GP_PIN(4, 17),  0, 3 },	/* SD3_DS */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL17", 0xe6060344) {
-		{ RCAR_GP_PIN(3, 12), 28, 3 },	/* SD0_CD */
-		{ RCAR_GP_PIN(3, 13), 24, 3 },	/* SD0_WP */
-		{ RCAR_GP_PIN(3, 14), 20, 3 },	/* SD1_CD */
-		{ RCAR_GP_PIN(3, 15), 16, 3 },	/* SD1_WP */
-		{ RCAR_GP_PIN(5,  0), 12, 3 },	/* SCK0 */
-		{ RCAR_GP_PIN(5,  1),  8, 3 },	/* RX0 */
-		{ RCAR_GP_PIN(5,  2),  4, 3 },	/* TX0 */
-		{ RCAR_GP_PIN(5,  3),  0, 3 },	/* CTS0 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL18", 0xe6060348) {
-		{ RCAR_GP_PIN(5,  4), 28, 3 },	/* RTS0 */
-		{ RCAR_GP_PIN(5,  5), 24, 3 },	/* RX1 */
-		{ RCAR_GP_PIN(5,  6), 20, 3 },	/* TX1 */
-		{ RCAR_GP_PIN(5,  7), 16, 3 },	/* CTS1 */
-		{ RCAR_GP_PIN(5,  8), 12, 3 },	/* RTS1 */
-		{ RCAR_GP_PIN(5,  9),  8, 3 },	/* SCK2 */
-		{ RCAR_GP_PIN(5, 10),  4, 3 },	/* TX2 */
-		{ RCAR_GP_PIN(5, 11),  0, 3 },	/* RX2 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL19", 0xe606034c) {
-		{ RCAR_GP_PIN(5, 12), 28, 3 },	/* HSCK0 */
-		{ RCAR_GP_PIN(5, 13), 24, 3 },	/* HRX0 */
-		{ RCAR_GP_PIN(5, 14), 20, 3 },	/* HTX0 */
-		{ RCAR_GP_PIN(5, 15), 16, 3 },	/* HCTS0 */
-		{ RCAR_GP_PIN(5, 16), 12, 3 },	/* HRTS0 */
-		{ RCAR_GP_PIN(5, 17),  8, 3 },	/* MSIOF0_SCK */
-		{ RCAR_GP_PIN(5, 18),  4, 3 },	/* MSIOF0_SYNC */
-		{ RCAR_GP_PIN(5, 19),  0, 3 },	/* MSIOF0_SS1 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL20", 0xe6060350) {
-		{ RCAR_GP_PIN(5, 20), 28, 3 },	/* MSIOF0_TXD */
-		{ RCAR_GP_PIN(5, 21), 24, 3 },	/* MSIOF0_SS2 */
-		{ RCAR_GP_PIN(5, 22), 20, 3 },	/* MSIOF0_RXD */
-		{ RCAR_GP_PIN(5, 23), 16, 3 },	/* MLB_CLK */
-		{ RCAR_GP_PIN(5, 24), 12, 3 },	/* MLB_SIG */
-		{ RCAR_GP_PIN(5, 25),  8, 3 },	/* MLB_DAT */
-		{ PIN_MLB_REF,         4, 3 },	/* MLB_REF */
-		{ RCAR_GP_PIN(6,  0),  0, 3 },	/* SSI_SCK01239 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) {
-		{ RCAR_GP_PIN(6,  1), 28, 3 },	/* SSI_WS01239 */
-		{ RCAR_GP_PIN(6,  2), 24, 3 },	/* SSI_SDATA0 */
-		{ RCAR_GP_PIN(6,  3), 20, 3 },	/* SSI_SDATA1 */
-		{ RCAR_GP_PIN(6,  4), 16, 3 },	/* SSI_SDATA2 */
-		{ RCAR_GP_PIN(6,  5), 12, 3 },	/* SSI_SCK349 */
-		{ RCAR_GP_PIN(6,  6),  8, 3 },	/* SSI_WS349 */
-		{ RCAR_GP_PIN(6,  7),  4, 3 },	/* SSI_SDATA3 */
-		{ RCAR_GP_PIN(6,  8),  0, 3 },	/* SSI_SCK4 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL22", 0xe6060358) {
-		{ RCAR_GP_PIN(6,  9), 28, 3 },	/* SSI_WS4 */
-		{ RCAR_GP_PIN(6, 10), 24, 3 },	/* SSI_SDATA4 */
-		{ RCAR_GP_PIN(6, 11), 20, 3 },	/* SSI_SCK5 */
-		{ RCAR_GP_PIN(6, 12), 16, 3 },	/* SSI_WS5 */
-		{ RCAR_GP_PIN(6, 13), 12, 3 },	/* SSI_SDATA5 */
-		{ RCAR_GP_PIN(6, 14),  8, 3 },	/* SSI_SCK6 */
-		{ RCAR_GP_PIN(6, 15),  4, 3 },	/* SSI_WS6 */
-		{ RCAR_GP_PIN(6, 16),  0, 3 },	/* SSI_SDATA6 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL23", 0xe606035c) {
-		{ RCAR_GP_PIN(6, 17), 28, 3 },	/* SSI_SCK78 */
-		{ RCAR_GP_PIN(6, 18), 24, 3 },	/* SSI_WS78 */
-		{ RCAR_GP_PIN(6, 19), 20, 3 },	/* SSI_SDATA7 */
-		{ RCAR_GP_PIN(6, 20), 16, 3 },	/* SSI_SDATA8 */
-		{ RCAR_GP_PIN(6, 21), 12, 3 },	/* SSI_SDATA9 */
-		{ RCAR_GP_PIN(6, 22),  8, 3 },	/* AUDIO_CLKA */
-		{ RCAR_GP_PIN(6, 23),  4, 3 },	/* AUDIO_CLKB */
-		{ RCAR_GP_PIN(6, 24),  0, 3 },	/* USB0_PWEN */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL24", 0xe6060360) {
-		{ RCAR_GP_PIN(6, 25), 28, 3 },	/* USB0_OVC */
-		{ RCAR_GP_PIN(6, 26), 24, 3 },	/* USB1_PWEN */
-		{ RCAR_GP_PIN(6, 27), 20, 3 },	/* USB1_OVC */
-		{ RCAR_GP_PIN(6, 28), 16, 3 },	/* USB30_PWEN */
-		{ RCAR_GP_PIN(6, 29), 12, 3 },	/* USB30_OVC */
-		{ RCAR_GP_PIN(6, 30),  8, 3 },	/* USB31_PWEN */
-		{ RCAR_GP_PIN(6, 31),  4, 3 },	/* USB31_OVC */
-	} },
-	{ },
-};
-
-enum ioctrl_regs {
-	POCCTRL,
-	TDSELCTRL,
-};
-
-static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
-	[POCCTRL] = { 0xe6060380, },
-	[TDSELCTRL] = { 0xe60603c0, },
-	{ /* sentinel */ },
-};
-
-static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
-				     u32 *pocctrl)
-{
-	int bit = -EINVAL;
-
-	*pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
-
-	if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
-		bit = pin & 0x1f;
-
-	if (pin >= RCAR_GP_PIN(4, 0) && pin <= RCAR_GP_PIN(4, 17))
-		bit = (pin & 0x1f) + 12;
-
-	return bit;
-}
-
-static const struct pinmux_bias_reg pinmux_bias_regs[] = {
-	{ PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
-		[ 0] = PIN_QSPI0_SPCLK,		/* QSPI0_SPCLK */
-		[ 1] = PIN_QSPI0_MOSI_IO0,	/* QSPI0_MOSI_IO0 */
-		[ 2] = PIN_QSPI0_MISO_IO1,	/* QSPI0_MISO_IO1 */
-		[ 3] = PIN_QSPI0_IO2,		/* QSPI0_IO2 */
-		[ 4] = PIN_QSPI0_IO3,		/* QSPI0_IO3 */
-		[ 5] = PIN_QSPI0_SSL,		/* QSPI0_SSL */
-		[ 6] = PIN_QSPI1_SPCLK,		/* QSPI1_SPCLK */
-		[ 7] = PIN_QSPI1_MOSI_IO0,	/* QSPI1_MOSI_IO0 */
-		[ 8] = PIN_QSPI1_MISO_IO1,	/* QSPI1_MISO_IO1 */
-		[ 9] = PIN_QSPI1_IO2,		/* QSPI1_IO2 */
-		[10] = PIN_QSPI1_IO3,		/* QSPI1_IO3 */
-		[11] = PIN_QSPI1_SSL,		/* QSPI1_SSL */
-		[12] = PIN_RPC_INT_N,		/* RPC_INT# */
-		[13] = PIN_RPC_WP_N,		/* RPC_WP# */
-		[14] = PIN_RPC_RESET_N,		/* RPC_RESET# */
-		[15] = PIN_AVB_RX_CTL,		/* AVB_RX_CTL */
-		[16] = PIN_AVB_RXC,		/* AVB_RXC */
-		[17] = PIN_AVB_RD0,		/* AVB_RD0 */
-		[18] = PIN_AVB_RD1,		/* AVB_RD1 */
-		[19] = PIN_AVB_RD2,		/* AVB_RD2 */
-		[20] = PIN_AVB_RD3,		/* AVB_RD3 */
-		[21] = PIN_AVB_TX_CTL,		/* AVB_TX_CTL */
-		[22] = PIN_AVB_TXC,		/* AVB_TXC */
-		[23] = PIN_AVB_TD0,		/* AVB_TD0 */
-		[24] = PIN_AVB_TD1,		/* AVB_TD1 */
-		[25] = PIN_AVB_TD2,		/* AVB_TD2 */
-		[26] = PIN_AVB_TD3,		/* AVB_TD3 */
-		[27] = PIN_AVB_TXCREFCLK,	/* AVB_TXCREFCLK */
-		[28] = PIN_AVB_MDIO,		/* AVB_MDIO */
-		[29] = RCAR_GP_PIN(2,  9),	/* AVB_MDC */
-		[30] = RCAR_GP_PIN(2, 10),	/* AVB_MAGIC */
-		[31] = RCAR_GP_PIN(2, 11),	/* AVB_PHY_INT */
-	} },
-	{ PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
-		[ 0] = RCAR_GP_PIN(2, 12),	/* AVB_LINK */
-		[ 1] = RCAR_GP_PIN(2, 13),	/* AVB_AVTP_MATCH_A */
-		[ 2] = RCAR_GP_PIN(2, 14),	/* AVB_AVTP_CAPTURE_A */
-		[ 3] = RCAR_GP_PIN(2,  0),	/* IRQ0 */
-		[ 4] = RCAR_GP_PIN(2,  1),	/* IRQ1 */
-		[ 5] = RCAR_GP_PIN(2,  2),	/* IRQ2 */
-		[ 6] = RCAR_GP_PIN(2,  3),	/* IRQ3 */
-		[ 7] = RCAR_GP_PIN(2,  4),	/* IRQ4 */
-		[ 8] = RCAR_GP_PIN(2,  5),	/* IRQ5 */
-		[ 9] = RCAR_GP_PIN(2,  6),	/* PWM0 */
-		[10] = RCAR_GP_PIN(2,  7),	/* PWM1_A */
-		[11] = RCAR_GP_PIN(2,  8),	/* PWM2_A */
-		[12] = RCAR_GP_PIN(1,  0),	/* A0 */
-		[13] = RCAR_GP_PIN(1,  1),	/* A1 */
-		[14] = RCAR_GP_PIN(1,  2),	/* A2 */
-		[15] = RCAR_GP_PIN(1,  3),	/* A3 */
-		[16] = RCAR_GP_PIN(1,  4),	/* A4 */
-		[17] = RCAR_GP_PIN(1,  5),	/* A5 */
-		[18] = RCAR_GP_PIN(1,  6),	/* A6 */
-		[19] = RCAR_GP_PIN(1,  7),	/* A7 */
-		[20] = RCAR_GP_PIN(1,  8),	/* A8 */
-		[21] = RCAR_GP_PIN(1,  9),	/* A9 */
-		[22] = RCAR_GP_PIN(1, 10),	/* A10 */
-		[23] = RCAR_GP_PIN(1, 11),	/* A11 */
-		[24] = RCAR_GP_PIN(1, 12),	/* A12 */
-		[25] = RCAR_GP_PIN(1, 13),	/* A13 */
-		[26] = RCAR_GP_PIN(1, 14),	/* A14 */
-		[27] = RCAR_GP_PIN(1, 15),	/* A15 */
-		[28] = RCAR_GP_PIN(1, 16),	/* A16 */
-		[29] = RCAR_GP_PIN(1, 17),	/* A17 */
-		[30] = RCAR_GP_PIN(1, 18),	/* A18 */
-		[31] = RCAR_GP_PIN(1, 19),	/* A19 */
-	} },
-	{ PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
-		[ 0] = PIN_CLKOUT,		/* CLKOUT */
-		[ 1] = RCAR_GP_PIN(1, 20),	/* CS0_N */
-		[ 2] = RCAR_GP_PIN(1, 21),	/* CS1_N_A26 */
-		[ 3] = RCAR_GP_PIN(1, 22),	/* BS_N */
-		[ 4] = RCAR_GP_PIN(1, 23),	/* RD_N */
-		[ 5] = RCAR_GP_PIN(1, 24),	/* RD_WR_N */
-		[ 6] = RCAR_GP_PIN(1, 25),	/* WE0_N */
-		[ 7] = RCAR_GP_PIN(1, 26),	/* WE1_N */
-		[ 8] = RCAR_GP_PIN(1, 27),	/* EX_WAIT0_A */
-		[ 9] = PIN_PRESETOUT_N,		/* PRESETOUT# */
-		[10] = RCAR_GP_PIN(0,  0),	/* D0 */
-		[11] = RCAR_GP_PIN(0,  1),	/* D1 */
-		[12] = RCAR_GP_PIN(0,  2),	/* D2 */
-		[13] = RCAR_GP_PIN(0,  3),	/* D3 */
-		[14] = RCAR_GP_PIN(0,  4),	/* D4 */
-		[15] = RCAR_GP_PIN(0,  5),	/* D5 */
-		[16] = RCAR_GP_PIN(0,  6),	/* D6 */
-		[17] = RCAR_GP_PIN(0,  7),	/* D7 */
-		[18] = RCAR_GP_PIN(0,  8),	/* D8 */
-		[19] = RCAR_GP_PIN(0,  9),	/* D9 */
-		[20] = RCAR_GP_PIN(0, 10),	/* D10 */
-		[21] = RCAR_GP_PIN(0, 11),	/* D11 */
-		[22] = RCAR_GP_PIN(0, 12),	/* D12 */
-		[23] = RCAR_GP_PIN(0, 13),	/* D13 */
-		[24] = RCAR_GP_PIN(0, 14),	/* D14 */
-		[25] = RCAR_GP_PIN(0, 15),	/* D15 */
-		[26] = RCAR_GP_PIN(7,  0),	/* AVS1 */
-		[27] = RCAR_GP_PIN(7,  1),	/* AVS2 */
-		[28] = RCAR_GP_PIN(7,  2),	/* GP7_02 */
-		[29] = RCAR_GP_PIN(7,  3),	/* GP7_03 */
-		[30] = PIN_DU_DOTCLKIN0,	/* DU_DOTCLKIN0 */
-		[31] = PIN_DU_DOTCLKIN1,	/* DU_DOTCLKIN1 */
-	} },
-	{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
-		[ 0] = PIN_DU_DOTCLKIN2,	/* DU_DOTCLKIN2 */
-		[ 1] = PIN_DU_DOTCLKIN3,	/* DU_DOTCLKIN3 */
-		[ 2] = PIN_FSCLKST_N,		/* FSCLKST# */
-		[ 3] = PIN_EXTALR,		/* EXTALR*/
-		[ 4] = PIN_TRST_N,		/* TRST# */
-		[ 5] = PIN_TCK,			/* TCK */
-		[ 6] = PIN_TMS,			/* TMS */
-		[ 7] = PIN_TDI,			/* TDI */
-		[ 8] = SH_PFC_PIN_NONE,
-		[ 9] = PIN_ASEBRK,		/* ASEBRK */
-		[10] = RCAR_GP_PIN(3,  0),	/* SD0_CLK */
-		[11] = RCAR_GP_PIN(3,  1),	/* SD0_CMD */
-		[12] = RCAR_GP_PIN(3,  2),	/* SD0_DAT0 */
-		[13] = RCAR_GP_PIN(3,  3),	/* SD0_DAT1 */
-		[14] = RCAR_GP_PIN(3,  4),	/* SD0_DAT2 */
-		[15] = RCAR_GP_PIN(3,  5),	/* SD0_DAT3 */
-		[16] = RCAR_GP_PIN(3,  6),	/* SD1_CLK */
-		[17] = RCAR_GP_PIN(3,  7),	/* SD1_CMD */
-		[18] = RCAR_GP_PIN(3,  8),	/* SD1_DAT0 */
-		[19] = RCAR_GP_PIN(3,  9),	/* SD1_DAT1 */
-		[20] = RCAR_GP_PIN(3, 10),	/* SD1_DAT2 */
-		[21] = RCAR_GP_PIN(3, 11),	/* SD1_DAT3 */
-		[22] = RCAR_GP_PIN(4,  0),	/* SD2_CLK */
-		[23] = RCAR_GP_PIN(4,  1),	/* SD2_CMD */
-		[24] = RCAR_GP_PIN(4,  2),	/* SD2_DAT0 */
-		[25] = RCAR_GP_PIN(4,  3),	/* SD2_DAT1 */
-		[26] = RCAR_GP_PIN(4,  4),	/* SD2_DAT2 */
-		[27] = RCAR_GP_PIN(4,  5),	/* SD2_DAT3 */
-		[28] = RCAR_GP_PIN(4,  6),	/* SD2_DS */
-		[29] = RCAR_GP_PIN(4,  7),	/* SD3_CLK */
-		[30] = RCAR_GP_PIN(4,  8),	/* SD3_CMD */
-		[31] = RCAR_GP_PIN(4,  9),	/* SD3_DAT0 */
-	} },
-	{ PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
-		[ 0] = RCAR_GP_PIN(4, 10),	/* SD3_DAT1 */
-		[ 1] = RCAR_GP_PIN(4, 11),	/* SD3_DAT2 */
-		[ 2] = RCAR_GP_PIN(4, 12),	/* SD3_DAT3 */
-		[ 3] = RCAR_GP_PIN(4, 13),	/* SD3_DAT4 */
-		[ 4] = RCAR_GP_PIN(4, 14),	/* SD3_DAT5 */
-		[ 5] = RCAR_GP_PIN(4, 15),	/* SD3_DAT6 */
-		[ 6] = RCAR_GP_PIN(4, 16),	/* SD3_DAT7 */
-		[ 7] = RCAR_GP_PIN(4, 17),	/* SD3_DS */
-		[ 8] = RCAR_GP_PIN(3, 12),	/* SD0_CD */
-		[ 9] = RCAR_GP_PIN(3, 13),	/* SD0_WP */
-		[10] = RCAR_GP_PIN(3, 14),	/* SD1_CD */
-		[11] = RCAR_GP_PIN(3, 15),	/* SD1_WP */
-		[12] = RCAR_GP_PIN(5,  0),	/* SCK0 */
-		[13] = RCAR_GP_PIN(5,  1),	/* RX0 */
-		[14] = RCAR_GP_PIN(5,  2),	/* TX0 */
-		[15] = RCAR_GP_PIN(5,  3),	/* CTS0_N */
-		[16] = RCAR_GP_PIN(5,  4),	/* RTS0_N */
-		[17] = RCAR_GP_PIN(5,  5),	/* RX1_A */
-		[18] = RCAR_GP_PIN(5,  6),	/* TX1_A */
-		[19] = RCAR_GP_PIN(5,  7),	/* CTS1_N */
-		[20] = RCAR_GP_PIN(5,  8),	/* RTS1_N */
-		[21] = RCAR_GP_PIN(5,  9),	/* SCK2 */
-		[22] = RCAR_GP_PIN(5, 10),	/* TX2_A */
-		[23] = RCAR_GP_PIN(5, 11),	/* RX2_A */
-		[24] = RCAR_GP_PIN(5, 12),	/* HSCK0 */
-		[25] = RCAR_GP_PIN(5, 13),	/* HRX0 */
-		[26] = RCAR_GP_PIN(5, 14),	/* HTX0 */
-		[27] = RCAR_GP_PIN(5, 15),	/* HCTS0_N */
-		[28] = RCAR_GP_PIN(5, 16),	/* HRTS0_N */
-		[29] = RCAR_GP_PIN(5, 17),	/* MSIOF0_SCK */
-		[30] = RCAR_GP_PIN(5, 18),	/* MSIOF0_SYNC */
-		[31] = RCAR_GP_PIN(5, 19),	/* MSIOF0_SS1 */
-	} },
-	{ PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
-		[ 0] = RCAR_GP_PIN(5, 20),	/* MSIOF0_TXD */
-		[ 1] = RCAR_GP_PIN(5, 21),	/* MSIOF0_SS2 */
-		[ 2] = RCAR_GP_PIN(5, 22),	/* MSIOF0_RXD */
-		[ 3] = RCAR_GP_PIN(5, 23),	/* MLB_CLK */
-		[ 4] = RCAR_GP_PIN(5, 24),	/* MLB_SIG */
-		[ 5] = RCAR_GP_PIN(5, 25),	/* MLB_DAT */
-		[ 6] = PIN_MLB_REF,		/* MLB_REF */
-		[ 7] = RCAR_GP_PIN(6,  0),	/* SSI_SCK01239 */
-		[ 8] = RCAR_GP_PIN(6,  1),	/* SSI_WS01239 */
-		[ 9] = RCAR_GP_PIN(6,  2),	/* SSI_SDATA0 */
-		[10] = RCAR_GP_PIN(6,  3),	/* SSI_SDATA1_A */
-		[11] = RCAR_GP_PIN(6,  4),	/* SSI_SDATA2_A */
-		[12] = RCAR_GP_PIN(6,  5),	/* SSI_SCK349 */
-		[13] = RCAR_GP_PIN(6,  6),	/* SSI_WS349 */
-		[14] = RCAR_GP_PIN(6,  7),	/* SSI_SDATA3 */
-		[15] = RCAR_GP_PIN(6,  8),	/* SSI_SCK4 */
-		[16] = RCAR_GP_PIN(6,  9),	/* SSI_WS4 */
-		[17] = RCAR_GP_PIN(6, 10),	/* SSI_SDATA4 */
-		[18] = RCAR_GP_PIN(6, 11),	/* SSI_SCK5 */
-		[19] = RCAR_GP_PIN(6, 12),	/* SSI_WS5 */
-		[20] = RCAR_GP_PIN(6, 13),	/* SSI_SDATA5 */
-		[21] = RCAR_GP_PIN(6, 14),	/* SSI_SCK6 */
-		[22] = RCAR_GP_PIN(6, 15),	/* SSI_WS6 */
-		[23] = RCAR_GP_PIN(6, 16),	/* SSI_SDATA6 */
-		[24] = RCAR_GP_PIN(6, 17),	/* SSI_SCK78 */
-		[25] = RCAR_GP_PIN(6, 18),	/* SSI_WS78 */
-		[26] = RCAR_GP_PIN(6, 19),	/* SSI_SDATA7 */
-		[27] = RCAR_GP_PIN(6, 20),	/* SSI_SDATA8 */
-		[28] = RCAR_GP_PIN(6, 21),	/* SSI_SDATA9_A */
-		[29] = RCAR_GP_PIN(6, 22),	/* AUDIO_CLKA_A */
-		[30] = RCAR_GP_PIN(6, 23),	/* AUDIO_CLKB_B */
-		[31] = RCAR_GP_PIN(6, 24),	/* USB0_PWEN */
-	} },
-	{ PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
-		[ 0] = RCAR_GP_PIN(6, 25),	/* USB0_OVC */
-		[ 1] = RCAR_GP_PIN(6, 26),	/* USB1_PWEN */
-		[ 2] = RCAR_GP_PIN(6, 27),	/* USB1_OVC */
-		[ 3] = RCAR_GP_PIN(6, 28),	/* USB30_PWEN */
-		[ 4] = RCAR_GP_PIN(6, 29),	/* USB30_OVC */
-		[ 5] = RCAR_GP_PIN(6, 30),	/* USB31_PWEN */
-		[ 6] = RCAR_GP_PIN(6, 31),	/* USB31_OVC */
-		[ 7] = SH_PFC_PIN_NONE,
-		[ 8] = SH_PFC_PIN_NONE,
-		[ 9] = SH_PFC_PIN_NONE,
-		[10] = SH_PFC_PIN_NONE,
-		[11] = SH_PFC_PIN_NONE,
-		[12] = SH_PFC_PIN_NONE,
-		[13] = SH_PFC_PIN_NONE,
-		[14] = SH_PFC_PIN_NONE,
-		[15] = SH_PFC_PIN_NONE,
-		[16] = SH_PFC_PIN_NONE,
-		[17] = SH_PFC_PIN_NONE,
-		[18] = SH_PFC_PIN_NONE,
-		[19] = SH_PFC_PIN_NONE,
-		[20] = SH_PFC_PIN_NONE,
-		[21] = SH_PFC_PIN_NONE,
-		[22] = SH_PFC_PIN_NONE,
-		[23] = SH_PFC_PIN_NONE,
-		[24] = SH_PFC_PIN_NONE,
-		[25] = SH_PFC_PIN_NONE,
-		[26] = SH_PFC_PIN_NONE,
-		[27] = SH_PFC_PIN_NONE,
-		[28] = SH_PFC_PIN_NONE,
-		[29] = SH_PFC_PIN_NONE,
-		[30] = SH_PFC_PIN_NONE,
-		[31] = SH_PFC_PIN_NONE,
-	} },
-	{ /* sentinel */ },
-};
-
-static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
-					       unsigned int pin)
-{
-	const struct pinmux_bias_reg *reg;
-	unsigned int bit;
-
-	reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
-	if (!reg)
-		return PIN_CONFIG_BIAS_DISABLE;
-
-	if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
-		return PIN_CONFIG_BIAS_DISABLE;
-	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
-		return PIN_CONFIG_BIAS_PULL_UP;
-	else
-		return PIN_CONFIG_BIAS_PULL_DOWN;
-}
-
-static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
-				       unsigned int bias)
-{
-	const struct pinmux_bias_reg *reg;
-	u32 enable, updown;
-	unsigned int bit;
-
-	reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
-	if (!reg)
-		return;
-
-	enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
-	if (bias != PIN_CONFIG_BIAS_DISABLE)
-		enable |= BIT(bit);
-
-	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
-	if (bias == PIN_CONFIG_BIAS_PULL_UP)
-		updown |= BIT(bit);
-
-	sh_pfc_write(pfc, reg->pud, updown);
-	sh_pfc_write(pfc, reg->puen, enable);
-}
-
-static const struct sh_pfc_soc_operations r8a7795es1_pinmux_ops = {
-	.pin_to_pocctrl = r8a7795es1_pin_to_pocctrl,
-	.get_bias = r8a7795es1_pinmux_get_bias,
-	.set_bias = r8a7795es1_pinmux_set_bias,
-};
-
-const struct sh_pfc_soc_info r8a7795es1_pinmux_info = {
-	.name = "r8a77950_pfc",
-	.ops = &r8a7795es1_pinmux_ops,
-	.unlock_reg = 0xe6060000, /* PMMR */
-
-	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
-
-	.pins = pinmux_pins,
-	.nr_pins = ARRAY_SIZE(pinmux_pins),
-	.groups = pinmux_groups,
-	.nr_groups = ARRAY_SIZE(pinmux_groups),
-	.functions = pinmux_functions,
-	.nr_functions = ARRAY_SIZE(pinmux_functions),
-
-	.cfg_regs = pinmux_config_regs,
-	.drive_regs = pinmux_drive_regs,
-	.bias_regs = pinmux_bias_regs,
-	.ioctrl_regs = pinmux_ioctrl_regs,
-
-	.pinmux_data = pinmux_data,
-	.pinmux_data_size = ARRAY_SIZE(pinmux_data),
-};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
deleted file mode 100644
index d3145aa..0000000
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ /dev/null
@@ -1,6257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * R8A7795 ES2.0+ processor support - PFC hardware block.
- *
- * Copyright (C) 2015-2019 Renesas Electronics Corporation
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sys_soc.h>
-
-#include "core.h"
-#include "sh_pfc.h"
-
-#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
-
-#define CPU_ALL_GP(fn, sfx)						\
-	PORT_GP_CFG_16(0, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_29(1, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_15(2, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE),	\
-	PORT_GP_CFG_1(3, 12, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_18(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE),	\
-	PORT_GP_CFG_26(5, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_32(6, fn, sfx, CFG_FLAGS),	\
-	PORT_GP_CFG_4(7, fn, sfx, CFG_FLAGS)
-
-#define CPU_ALL_NOGP(fn)						\
-	PIN_NOGP_CFG(ASEBRK, "ASEBRK", fn, CFG_FLAGS),			\
-	PIN_NOGP_CFG(AVB_MDIO, "AVB_MDIO", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RD0, "AVB_RD0", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RD1, "AVB_RD1", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RD2, "AVB_RD2", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RD3, "AVB_RD3", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RXC, "AVB_RXC", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_RX_CTL, "AVB_RX_CTL", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TD0, "AVB_TD0", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TD1, "AVB_TD1", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TD2, "AVB_TD2", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TD3, "AVB_TD3", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TXC, "AVB_TXC", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(AVB_TXCREFCLK, "AVB_TXCREFCLK", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(AVB_TX_CTL, "AVB_TX_CTL", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(DU_DOTCLKIN0, "DU_DOTCLKIN0", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(DU_DOTCLKIN1, "DU_DOTCLKIN1", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(DU_DOTCLKIN2, "DU_DOTCLKIN2", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(DU_DOTCLKIN3, "DU_DOTCLKIN3", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(EXTALR, "EXTALR", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),\
-	PIN_NOGP_CFG(FSCLKST_N, "FSCLKST#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(MLB_REF, "MLB_REF", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI0_IO2, "QSPI0_IO2", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI0_IO3, "QSPI0_IO3", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI0_MISO_IO1, "QSPI0_MISO_IO1", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI0_MOSI_IO0, "QSPI0_MOSI_IO0", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI0_SPCLK, "QSPI0_SPCLK", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI0_SSL, "QSPI0_SSL", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI1_IO2, "QSPI1_IO2", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI1_IO3, "QSPI1_IO3", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(QSPI1_MISO_IO1, "QSPI1_MISO_IO1", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI1_MOSI_IO0, "QSPI1_MOSI_IO0", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI1_SPCLK, "QSPI1_SPCLK", fn, CFG_FLAGS),	\
-	PIN_NOGP_CFG(QSPI1_SSL, "QSPI1_SSL", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(RPC_INT_N, "RPC_INT#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(RPC_RESET_N, "RPC_RESET#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(RPC_WP_N, "RPC_WP#", fn, CFG_FLAGS),		\
-	PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),	\
-	PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),	\
-	PIN_NOGP_CFG(TDO, "TDO", fn, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
-	PIN_NOGP_CFG(TMS, "TMS", fn, CFG_FLAGS),			\
-	PIN_NOGP_CFG(TRST_N, "TRST#", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN)
-
-/*
- * F_() : just information
- * FM() : macro for FN_xxx / xxx_MARK
- */
-
-/* GPSR0 */
-#define GPSR0_15	F_(D15,			IP7_11_8)
-#define GPSR0_14	F_(D14,			IP7_7_4)
-#define GPSR0_13	F_(D13,			IP7_3_0)
-#define GPSR0_12	F_(D12,			IP6_31_28)
-#define GPSR0_11	F_(D11,			IP6_27_24)
-#define GPSR0_10	F_(D10,			IP6_23_20)
-#define GPSR0_9		F_(D9,			IP6_19_16)
-#define GPSR0_8		F_(D8,			IP6_15_12)
-#define GPSR0_7		F_(D7,			IP6_11_8)
-#define GPSR0_6		F_(D6,			IP6_7_4)
-#define GPSR0_5		F_(D5,			IP6_3_0)
-#define GPSR0_4		F_(D4,			IP5_31_28)
-#define GPSR0_3		F_(D3,			IP5_27_24)
-#define GPSR0_2		F_(D2,			IP5_23_20)
-#define GPSR0_1		F_(D1,			IP5_19_16)
-#define GPSR0_0		F_(D0,			IP5_15_12)
-
-/* GPSR1 */
-#define GPSR1_28	FM(CLKOUT)
-#define GPSR1_27	F_(EX_WAIT0_A,		IP5_11_8)
-#define GPSR1_26	F_(WE1_N,		IP5_7_4)
-#define GPSR1_25	F_(WE0_N,		IP5_3_0)
-#define GPSR1_24	F_(RD_WR_N,		IP4_31_28)
-#define GPSR1_23	F_(RD_N,		IP4_27_24)
-#define GPSR1_22	F_(BS_N,		IP4_23_20)
-#define GPSR1_21	F_(CS1_N,		IP4_19_16)
-#define GPSR1_20	F_(CS0_N,		IP4_15_12)
-#define GPSR1_19	F_(A19,			IP4_11_8)
-#define GPSR1_18	F_(A18,			IP4_7_4)
-#define GPSR1_17	F_(A17,			IP4_3_0)
-#define GPSR1_16	F_(A16,			IP3_31_28)
-#define GPSR1_15	F_(A15,			IP3_27_24)
-#define GPSR1_14	F_(A14,			IP3_23_20)
-#define GPSR1_13	F_(A13,			IP3_19_16)
-#define GPSR1_12	F_(A12,			IP3_15_12)
-#define GPSR1_11	F_(A11,			IP3_11_8)
-#define GPSR1_10	F_(A10,			IP3_7_4)
-#define GPSR1_9		F_(A9,			IP3_3_0)
-#define GPSR1_8		F_(A8,			IP2_31_28)
-#define GPSR1_7		F_(A7,			IP2_27_24)
-#define GPSR1_6		F_(A6,			IP2_23_20)
-#define GPSR1_5		F_(A5,			IP2_19_16)
-#define GPSR1_4		F_(A4,			IP2_15_12)
-#define GPSR1_3		F_(A3,			IP2_11_8)
-#define GPSR1_2		F_(A2,			IP2_7_4)
-#define GPSR1_1		F_(A1,			IP2_3_0)
-#define GPSR1_0		F_(A0,			IP1_31_28)
-
-/* GPSR2 */
-#define GPSR2_14	F_(AVB_AVTP_CAPTURE_A,	IP0_23_20)
-#define GPSR2_13	F_(AVB_AVTP_MATCH_A,	IP0_19_16)
-#define GPSR2_12	F_(AVB_LINK,		IP0_15_12)
-#define GPSR2_11	F_(AVB_PHY_INT,		IP0_11_8)
-#define GPSR2_10	F_(AVB_MAGIC,		IP0_7_4)
-#define GPSR2_9		F_(AVB_MDC,		IP0_3_0)
-#define GPSR2_8		F_(PWM2_A,		IP1_27_24)
-#define GPSR2_7		F_(PWM1_A,		IP1_23_20)
-#define GPSR2_6		F_(PWM0,		IP1_19_16)
-#define GPSR2_5		F_(IRQ5,		IP1_15_12)
-#define GPSR2_4		F_(IRQ4,		IP1_11_8)
-#define GPSR2_3		F_(IRQ3,		IP1_7_4)
-#define GPSR2_2		F_(IRQ2,		IP1_3_0)
-#define GPSR2_1		F_(IRQ1,		IP0_31_28)
-#define GPSR2_0		F_(IRQ0,		IP0_27_24)
-
-/* GPSR3 */
-#define GPSR3_15	F_(SD1_WP,		IP11_23_20)
-#define GPSR3_14	F_(SD1_CD,		IP11_19_16)
-#define GPSR3_13	F_(SD0_WP,		IP11_15_12)
-#define GPSR3_12	F_(SD0_CD,		IP11_11_8)
-#define GPSR3_11	F_(SD1_DAT3,		IP8_31_28)
-#define GPSR3_10	F_(SD1_DAT2,		IP8_27_24)
-#define GPSR3_9		F_(SD1_DAT1,		IP8_23_20)
-#define GPSR3_8		F_(SD1_DAT0,		IP8_19_16)
-#define GPSR3_7		F_(SD1_CMD,		IP8_15_12)
-#define GPSR3_6		F_(SD1_CLK,		IP8_11_8)
-#define GPSR3_5		F_(SD0_DAT3,		IP8_7_4)
-#define GPSR3_4		F_(SD0_DAT2,		IP8_3_0)
-#define GPSR3_3		F_(SD0_DAT1,		IP7_31_28)
-#define GPSR3_2		F_(SD0_DAT0,		IP7_27_24)
-#define GPSR3_1		F_(SD0_CMD,		IP7_23_20)
-#define GPSR3_0		F_(SD0_CLK,		IP7_19_16)
-
-/* GPSR4 */
-#define GPSR4_17	F_(SD3_DS,		IP11_7_4)
-#define GPSR4_16	F_(SD3_DAT7,		IP11_3_0)
-#define GPSR4_15	F_(SD3_DAT6,		IP10_31_28)
-#define GPSR4_14	F_(SD3_DAT5,		IP10_27_24)
-#define GPSR4_13	F_(SD3_DAT4,		IP10_23_20)
-#define GPSR4_12	F_(SD3_DAT3,		IP10_19_16)
-#define GPSR4_11	F_(SD3_DAT2,		IP10_15_12)
-#define GPSR4_10	F_(SD3_DAT1,		IP10_11_8)
-#define GPSR4_9		F_(SD3_DAT0,		IP10_7_4)
-#define GPSR4_8		F_(SD3_CMD,		IP10_3_0)
-#define GPSR4_7		F_(SD3_CLK,		IP9_31_28)
-#define GPSR4_6		F_(SD2_DS,		IP9_27_24)
-#define GPSR4_5		F_(SD2_DAT3,		IP9_23_20)
-#define GPSR4_4		F_(SD2_DAT2,		IP9_19_16)
-#define GPSR4_3		F_(SD2_DAT1,		IP9_15_12)
-#define GPSR4_2		F_(SD2_DAT0,		IP9_11_8)
-#define GPSR4_1		F_(SD2_CMD,		IP9_7_4)
-#define GPSR4_0		F_(SD2_CLK,		IP9_3_0)
-
-/* GPSR5 */
-#define GPSR5_25	F_(MLB_DAT,		IP14_19_16)
-#define GPSR5_24	F_(MLB_SIG,		IP14_15_12)
-#define GPSR5_23	F_(MLB_CLK,		IP14_11_8)
-#define GPSR5_22	FM(MSIOF0_RXD)
-#define GPSR5_21	F_(MSIOF0_SS2,		IP14_7_4)
-#define GPSR5_20	FM(MSIOF0_TXD)
-#define GPSR5_19	F_(MSIOF0_SS1,		IP14_3_0)
-#define GPSR5_18	F_(MSIOF0_SYNC,		IP13_31_28)
-#define GPSR5_17	FM(MSIOF0_SCK)
-#define GPSR5_16	F_(HRTS0_N,		IP13_27_24)
-#define GPSR5_15	F_(HCTS0_N,		IP13_23_20)
-#define GPSR5_14	F_(HTX0,		IP13_19_16)
-#define GPSR5_13	F_(HRX0,		IP13_15_12)
-#define GPSR5_12	F_(HSCK0,		IP13_11_8)
-#define GPSR5_11	F_(RX2_A,		IP13_7_4)
-#define GPSR5_10	F_(TX2_A,		IP13_3_0)
-#define GPSR5_9		F_(SCK2,		IP12_31_28)
-#define GPSR5_8		F_(RTS1_N,		IP12_27_24)
-#define GPSR5_7		F_(CTS1_N,		IP12_23_20)
-#define GPSR5_6		F_(TX1_A,		IP12_19_16)
-#define GPSR5_5		F_(RX1_A,		IP12_15_12)
-#define GPSR5_4		F_(RTS0_N,		IP12_11_8)
-#define GPSR5_3		F_(CTS0_N,		IP12_7_4)
-#define GPSR5_2		F_(TX0,			IP12_3_0)
-#define GPSR5_1		F_(RX0,			IP11_31_28)
-#define GPSR5_0		F_(SCK0,		IP11_27_24)
-
-/* GPSR6 */
-#define GPSR6_31	F_(USB2_CH3_OVC,	IP18_7_4)
-#define GPSR6_30	F_(USB2_CH3_PWEN,	IP18_3_0)
-#define GPSR6_29	F_(USB30_OVC,		IP17_31_28)
-#define GPSR6_28	F_(USB30_PWEN,		IP17_27_24)
-#define GPSR6_27	F_(USB1_OVC,		IP17_23_20)
-#define GPSR6_26	F_(USB1_PWEN,		IP17_19_16)
-#define GPSR6_25	F_(USB0_OVC,		IP17_15_12)
-#define GPSR6_24	F_(USB0_PWEN,		IP17_11_8)
-#define GPSR6_23	F_(AUDIO_CLKB_B,	IP17_7_4)
-#define GPSR6_22	F_(AUDIO_CLKA_A,	IP17_3_0)
-#define GPSR6_21	F_(SSI_SDATA9_A,	IP16_31_28)
-#define GPSR6_20	F_(SSI_SDATA8,		IP16_27_24)
-#define GPSR6_19	F_(SSI_SDATA7,		IP16_23_20)
-#define GPSR6_18	F_(SSI_WS78,		IP16_19_16)
-#define GPSR6_17	F_(SSI_SCK78,		IP16_15_12)
-#define GPSR6_16	F_(SSI_SDATA6,		IP16_11_8)
-#define GPSR6_15	F_(SSI_WS6,		IP16_7_4)
-#define GPSR6_14	F_(SSI_SCK6,		IP16_3_0)
-#define GPSR6_13	FM(SSI_SDATA5)
-#define GPSR6_12	FM(SSI_WS5)
-#define GPSR6_11	FM(SSI_SCK5)
-#define GPSR6_10	F_(SSI_SDATA4,		IP15_31_28)
-#define GPSR6_9		F_(SSI_WS4,		IP15_27_24)
-#define GPSR6_8		F_(SSI_SCK4,		IP15_23_20)
-#define GPSR6_7		F_(SSI_SDATA3,		IP15_19_16)
-#define GPSR6_6		F_(SSI_WS349,		IP15_15_12)
-#define GPSR6_5		F_(SSI_SCK349,		IP15_11_8)
-#define GPSR6_4		F_(SSI_SDATA2_A,	IP15_7_4)
-#define GPSR6_3		F_(SSI_SDATA1_A,	IP15_3_0)
-#define GPSR6_2		F_(SSI_SDATA0,		IP14_31_28)
-#define GPSR6_1		F_(SSI_WS01239,		IP14_27_24)
-#define GPSR6_0		F_(SSI_SCK01239,		IP14_23_20)
-
-/* GPSR7 */
-#define GPSR7_3		FM(GP7_03)
-#define GPSR7_2		FM(GP7_02)
-#define GPSR7_1		FM(AVS2)
-#define GPSR7_0		FM(AVS1)
-
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP0_3_0		FM(AVB_MDC)		F_(0, 0)	FM(MSIOF2_SS2_C)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_7_4		FM(AVB_MAGIC)		F_(0, 0)	FM(MSIOF2_SS1_C)	FM(SCK4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_11_8	FM(AVB_PHY_INT)		F_(0, 0)	FM(MSIOF2_SYNC_C)	FM(RX4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_15_12	FM(AVB_LINK)		F_(0, 0)	FM(MSIOF2_SCK_C)	FM(TX4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_19_16	FM(AVB_AVTP_MATCH_A)	F_(0, 0)	FM(MSIOF2_RXD_C)	FM(CTS4_N_A)			F_(0, 0)	FM(FSCLKST2_N_A) F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_23_20	FM(AVB_AVTP_CAPTURE_A)	F_(0, 0)	FM(MSIOF2_TXD_C)	FM(RTS4_N_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_27_24	FM(IRQ0)		FM(QPOLB)	F_(0, 0)		FM(DU_CDE)			FM(VI4_DATA0_B) FM(CAN0_TX_B)	FM(CANFD0_TX_B)		FM(MSIOF3_SS2_E) F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_31_28	FM(IRQ1)		FM(QPOLA)	F_(0, 0)		FM(DU_DISP)			FM(VI4_DATA1_B) FM(CAN0_RX_B)	FM(CANFD0_RX_B)		FM(MSIOF3_SS1_E) F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_3_0		FM(IRQ2)		FM(QCPV_QDE)	F_(0, 0)		FM(DU_EXODDF_DU_ODDF_DISP_CDE)	FM(VI4_DATA2_B) F_(0, 0)	F_(0, 0)		FM(MSIOF3_SYNC_E) F_(0, 0)		FM(PWM3_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_7_4		FM(IRQ3)		FM(QSTVB_QVE)	F_(0, 0)		FM(DU_DOTCLKOUT1)		FM(VI4_DATA3_B) F_(0, 0)	F_(0, 0)		FM(MSIOF3_SCK_E) F_(0, 0)		FM(PWM4_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_11_8	FM(IRQ4)		FM(QSTH_QHS)	F_(0, 0)		FM(DU_EXHSYNC_DU_HSYNC)		FM(VI4_DATA4_B) F_(0, 0)	F_(0, 0)		FM(MSIOF3_RXD_E) F_(0, 0)		FM(PWM5_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_15_12	FM(IRQ5)		FM(QSTB_QHE)	F_(0, 0)		FM(DU_EXVSYNC_DU_VSYNC)		FM(VI4_DATA5_B) FM(FSCLKST2_N_B) F_(0, 0)		FM(MSIOF3_TXD_E) F_(0, 0)		FM(PWM6_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_19_16	FM(PWM0)		FM(AVB_AVTP_PPS)F_(0, 0)		F_(0, 0)			FM(VI4_DATA6_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IECLK_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_23_20	FM(PWM1_A)		F_(0, 0)	F_(0, 0)		FM(HRX3_D)			FM(VI4_DATA7_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IERX_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_27_24	FM(PWM2_A)		F_(0, 0)	F_(0, 0)		FM(HTX3_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IETX_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1_31_28	FM(A0)			FM(LCDOUT16)	FM(MSIOF3_SYNC_B)	F_(0, 0)			FM(VI4_DATA8)	F_(0, 0)	FM(DU_DB0)		F_(0, 0)	F_(0, 0)		FM(PWM3_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_3_0		FM(A1)			FM(LCDOUT17)	FM(MSIOF3_TXD_B)	F_(0, 0)			FM(VI4_DATA9)	F_(0, 0)	FM(DU_DB1)		F_(0, 0)	F_(0, 0)		FM(PWM4_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_7_4		FM(A2)			FM(LCDOUT18)	FM(MSIOF3_SCK_B)	F_(0, 0)			FM(VI4_DATA10)	F_(0, 0)	FM(DU_DB2)		F_(0, 0)	F_(0, 0)		FM(PWM5_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_11_8	FM(A3)			FM(LCDOUT19)	FM(MSIOF3_RXD_B)	F_(0, 0)			FM(VI4_DATA11)	F_(0, 0)	FM(DU_DB3)		F_(0, 0)	F_(0, 0)		FM(PWM6_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP2_15_12	FM(A4)			FM(LCDOUT20)	FM(MSIOF3_SS1_B)	F_(0, 0)			FM(VI4_DATA12)	FM(VI5_DATA12)	FM(DU_DB4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_19_16	FM(A5)			FM(LCDOUT21)	FM(MSIOF3_SS2_B)	FM(SCK4_B)			FM(VI4_DATA13)	FM(VI5_DATA13)	FM(DU_DB5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_23_20	FM(A6)			FM(LCDOUT22)	FM(MSIOF2_SS1_A)	FM(RX4_B)			FM(VI4_DATA14)	FM(VI5_DATA14)	FM(DU_DB6)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_27_24	FM(A7)			FM(LCDOUT23)	FM(MSIOF2_SS2_A)	FM(TX4_B)			FM(VI4_DATA15)	FM(VI5_DATA15)	FM(DU_DB7)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_31_28	FM(A8)			FM(RX3_B)	FM(MSIOF2_SYNC_A)	FM(HRX4_B)			F_(0, 0)	F_(0, 0)	F_(0, 0)		FM(SDA6_A)	FM(AVB_AVTP_MATCH_B)	FM(PWM1_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_3_0		FM(A9)			F_(0, 0)	FM(MSIOF2_SCK_A)	FM(CTS4_N_B)			F_(0, 0)	FM(VI5_VSYNC_N)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_7_4		FM(A10)			F_(0, 0)	FM(MSIOF2_RXD_A)	FM(RTS4_N_B)			F_(0, 0)	FM(VI5_HSYNC_N)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_11_8	FM(A11)			FM(TX3_B)	FM(MSIOF2_TXD_A)	FM(HTX4_B)			FM(HSCK4)	FM(VI5_FIELD)	F_(0, 0)		FM(SCL6_A)	FM(AVB_AVTP_CAPTURE_B)	FM(PWM2_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_15_12	FM(A12)			FM(LCDOUT12)	FM(MSIOF3_SCK_C)	F_(0, 0)			FM(HRX4_A)	FM(VI5_DATA8)	FM(DU_DG4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_19_16	FM(A13)			FM(LCDOUT13)	FM(MSIOF3_SYNC_C)	F_(0, 0)			FM(HTX4_A)	FM(VI5_DATA9)	FM(DU_DG5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_23_20	FM(A14)			FM(LCDOUT14)	FM(MSIOF3_RXD_C)	F_(0, 0)			FM(HCTS4_N)	FM(VI5_DATA10)	FM(DU_DG6)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_27_24	FM(A15)			FM(LCDOUT15)	FM(MSIOF3_TXD_C)	F_(0, 0)			FM(HRTS4_N)	FM(VI5_DATA11)	FM(DU_DG7)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3_31_28	FM(A16)			FM(LCDOUT8)	F_(0, 0)		F_(0, 0)			FM(VI4_FIELD)	F_(0, 0)	FM(DU_DG0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_3_0		FM(A17)			FM(LCDOUT9)	F_(0, 0)		F_(0, 0)			FM(VI4_VSYNC_N)	F_(0, 0)	FM(DU_DG1)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_7_4		FM(A18)			FM(LCDOUT10)	F_(0, 0)		F_(0, 0)			FM(VI4_HSYNC_N)	F_(0, 0)	FM(DU_DG2)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_11_8	FM(A19)			FM(LCDOUT11)	F_(0, 0)		F_(0, 0)			FM(VI4_CLKENB)	F_(0, 0)	FM(DU_DG3)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_15_12	FM(CS0_N)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(VI5_CLKENB)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_19_16	FM(CS1_N)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(VI5_CLK)	F_(0, 0)		FM(EX_WAIT0_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_23_20	FM(BS_N)		FM(QSTVA_QVS)	FM(MSIOF3_SCK_D)	FM(SCK3)			FM(HSCK3)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN1_TX)		FM(CANFD1_TX)	FM(IETX_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_27_24	FM(RD_N)		F_(0, 0)	FM(MSIOF3_SYNC_D)	FM(RX3_A)			FM(HRX3_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN0_TX_A)		FM(CANFD0_TX_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP4_31_28	FM(RD_WR_N)		F_(0, 0)	FM(MSIOF3_RXD_D)	FM(TX3_A)			FM(HTX3_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN0_RX_A)		FM(CANFD0_RX_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_3_0		FM(WE0_N)		F_(0, 0)	FM(MSIOF3_TXD_D)	FM(CTS3_N)			FM(HCTS3_N)	F_(0, 0)	F_(0, 0)		FM(SCL6_B)	FM(CAN_CLK)		F_(0, 0)	FM(IECLK_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_7_4		FM(WE1_N)		F_(0, 0)	FM(MSIOF3_SS1_D)	FM(RTS3_N)			FM(HRTS3_N)	F_(0, 0)	F_(0, 0)		FM(SDA6_B)	FM(CAN1_RX)		FM(CANFD1_RX)	FM(IERX_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_11_8	FM(EX_WAIT0_A)		FM(QCLK)	F_(0, 0)		F_(0, 0)			FM(VI4_CLK)	F_(0, 0)	FM(DU_DOTCLKOUT0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_15_12	FM(D0)			FM(MSIOF2_SS1_B)FM(MSIOF3_SCK_A)	F_(0, 0)			FM(VI4_DATA16)	FM(VI5_DATA0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_19_16	FM(D1)			FM(MSIOF2_SS2_B)FM(MSIOF3_SYNC_A)	F_(0, 0)			FM(VI4_DATA17)	FM(VI5_DATA1)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_23_20	FM(D2)			F_(0, 0)	FM(MSIOF3_RXD_A)	F_(0, 0)			FM(VI4_DATA18)	FM(VI5_DATA2)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_27_24	FM(D3)			F_(0, 0)	FM(MSIOF3_TXD_A)	F_(0, 0)			FM(VI4_DATA19)	FM(VI5_DATA3)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP5_31_28	FM(D4)			FM(MSIOF2_SCK_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA20)	FM(VI5_DATA4)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_3_0		FM(D5)			FM(MSIOF2_SYNC_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA21)	FM(VI5_DATA5)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_7_4		FM(D6)			FM(MSIOF2_RXD_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA22)	FM(VI5_DATA6)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_11_8	FM(D7)			FM(MSIOF2_TXD_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA23)	FM(VI5_DATA7)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_15_12	FM(D8)			FM(LCDOUT0)	FM(MSIOF2_SCK_D)	FM(SCK4_C)			FM(VI4_DATA0_A)	F_(0, 0)	FM(DU_DR0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_19_16	FM(D9)			FM(LCDOUT1)	FM(MSIOF2_SYNC_D)	F_(0, 0)			FM(VI4_DATA1_A)	F_(0, 0)	FM(DU_DR1)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_23_20	FM(D10)			FM(LCDOUT2)	FM(MSIOF2_RXD_D)	FM(HRX3_B)			FM(VI4_DATA2_A)	FM(CTS4_N_C)	FM(DU_DR2)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_27_24	FM(D11)			FM(LCDOUT3)	FM(MSIOF2_TXD_D)	FM(HTX3_B)			FM(VI4_DATA3_A)	FM(RTS4_N_C)	FM(DU_DR3)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP6_31_28	FM(D12)			FM(LCDOUT4)	FM(MSIOF2_SS1_D)	FM(RX4_C)			FM(VI4_DATA4_A)	F_(0, 0)	FM(DU_DR4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_3_0		FM(D13)			FM(LCDOUT5)	FM(MSIOF2_SS2_D)	FM(TX4_C)			FM(VI4_DATA5_A)	F_(0, 0)	FM(DU_DR5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_7_4		FM(D14)			FM(LCDOUT6)	FM(MSIOF3_SS1_A)	FM(HRX3_C)			FM(VI4_DATA6_A)	F_(0, 0)	FM(DU_DR6)		FM(SCL6_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_11_8	FM(D15)			FM(LCDOUT7)	FM(MSIOF3_SS2_A)	FM(HTX3_C)			FM(VI4_DATA7_A)	F_(0, 0)	FM(DU_DR7)		FM(SDA6_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_19_16	FM(SD0_CLK)		F_(0, 0)	FM(MSIOF1_SCK_E)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_OPWM_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP7_23_20	FM(SD0_CMD)		F_(0, 0)	FM(MSIOF1_SYNC_E)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_27_24	FM(SD0_DAT0)		F_(0, 0)	FM(MSIOF1_RXD_E)	F_(0, 0)			F_(0, 0)	FM(TS_SCK0_B)	FM(STP_ISCLK_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP7_31_28	FM(SD0_DAT1)		F_(0, 0)	FM(MSIOF1_TXD_E)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_B)FM(STP_ISSYNC_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_3_0		FM(SD0_DAT2)		F_(0, 0)	FM(MSIOF1_SS1_E)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_B)	FM(STP_ISD_0_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_7_4		FM(SD0_DAT3)		F_(0, 0)	FM(MSIOF1_SS2_E)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_B)	FM(STP_ISEN_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_11_8	FM(SD1_CLK)		F_(0, 0)	FM(MSIOF1_SCK_G)	F_(0, 0)			F_(0, 0)	FM(SIM0_CLK_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_15_12	FM(SD1_CMD)		F_(0, 0)	FM(MSIOF1_SYNC_G)	FM(NFCE_N_B)			F_(0, 0)	FM(SIM0_D_A)	FM(STP_IVCXO27_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_19_16	FM(SD1_DAT0)		FM(SD2_DAT4)	FM(MSIOF1_RXD_G)	FM(NFWP_N_B)			F_(0, 0)	FM(TS_SCK1_B)	FM(STP_ISCLK_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_23_20	FM(SD1_DAT1)		FM(SD2_DAT5)	FM(MSIOF1_TXD_G)	FM(NFDATA14_B)			F_(0, 0)	FM(TS_SPSYNC1_B)FM(STP_ISSYNC_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_27_24	FM(SD1_DAT2)		FM(SD2_DAT6)	FM(MSIOF1_SS1_G)	FM(NFDATA15_B)			F_(0, 0)	FM(TS_SDAT1_B)	FM(STP_ISD_1_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP8_31_28	FM(SD1_DAT3)		FM(SD2_DAT7)	FM(MSIOF1_SS2_G)	FM(NFRB_N_B)			F_(0, 0)	FM(TS_SDEN1_B)	FM(STP_ISEN_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_3_0		FM(SD2_CLK)		F_(0, 0)	FM(NFDATA8)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_7_4		FM(SD2_CMD)		F_(0, 0)	FM(NFDATA9)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_11_8	FM(SD2_DAT0)		F_(0, 0)	FM(NFDATA10)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_15_12	FM(SD2_DAT1)		F_(0, 0)	FM(NFDATA11)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_19_16	FM(SD2_DAT2)		F_(0, 0)	FM(NFDATA12)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_23_20	FM(SD2_DAT3)		F_(0, 0)	FM(NFDATA13)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_27_24	FM(SD2_DS)		F_(0, 0)	FM(NFALE)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(SATA_DEVSLP_B)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP9_31_28	FM(SD3_CLK)		F_(0, 0)	FM(NFWE_N)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_3_0	FM(SD3_CMD)		F_(0, 0)	FM(NFRE_N)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_7_4	FM(SD3_DAT0)		F_(0, 0)	FM(NFDATA0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_11_8	FM(SD3_DAT1)		F_(0, 0)	FM(NFDATA1)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_15_12	FM(SD3_DAT2)		F_(0, 0)	FM(NFDATA2)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_19_16	FM(SD3_DAT3)		F_(0, 0)	FM(NFDATA3)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_23_20	FM(SD3_DAT4)		FM(SD2_CD_A)	FM(NFDATA4)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_27_24	FM(SD3_DAT5)		FM(SD2_WP_A)	FM(NFDATA5)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP10_31_28	FM(SD3_DAT6)		FM(SD3_CD)	FM(NFDATA6)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_3_0	FM(SD3_DAT7)		FM(SD3_WP)	FM(NFDATA7)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_7_4	FM(SD3_DS)		F_(0, 0)	FM(NFCLE)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_11_8	FM(SD0_CD)		F_(0, 0)	FM(NFDATA14_A)		F_(0, 0)			FM(SCL2_B)	FM(SIM0_RST_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP11_15_12	FM(SD0_WP)		F_(0, 0)	FM(NFDATA15_A)		F_(0, 0)			FM(SDA2_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_19_16	FM(SD1_CD)		F_(0, 0)	FM(NFRB_N_A)		F_(0, 0)			F_(0, 0)	FM(SIM0_CLK_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_23_20	FM(SD1_WP)		F_(0, 0)	FM(NFCE_N_A)		F_(0, 0)			F_(0, 0)	FM(SIM0_D_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_27_24	FM(SCK0)		FM(HSCK1_B)	FM(MSIOF1_SS2_B)	FM(AUDIO_CLKC_B)		FM(SDA2_A)	FM(SIM0_RST_B)	FM(STP_OPWM_0_C)	FM(RIF0_CLK_B)	F_(0, 0)		FM(ADICHS2)	FM(SCK5_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_31_28	FM(RX0)			FM(HRX1_B)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SCK0_C)	FM(STP_ISCLK_0_C)	FM(RIF0_D0_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_3_0	FM(TX0)			FM(HTX1_B)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_C)FM(STP_ISSYNC_0_C)	FM(RIF0_D1_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_7_4	FM(CTS0_N)		FM(HCTS1_N_B)	FM(MSIOF1_SYNC_B)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_C)FM(STP_ISSYNC_1_C)	FM(RIF1_SYNC_B)	FM(AUDIO_CLKOUT_C)	FM(ADICS_SAMP)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_11_8	FM(RTS0_N)		FM(HRTS1_N_B)	FM(MSIOF1_SS1_B)	FM(AUDIO_CLKA_B)		FM(SCL2_A)	F_(0, 0)	FM(STP_IVCXO27_1_C)	FM(RIF0_SYNC_B)	F_(0, 0)		FM(ADICHS1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_15_12	FM(RX1_A)		FM(HRX1_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_C)	FM(STP_ISD_0_C)		FM(RIF1_CLK_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_19_16	FM(TX1_A)		FM(HTX1_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_C)	FM(STP_ISEN_0_C)	FM(RIF1_D0_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_23_20	FM(CTS1_N)		FM(HCTS1_N_A)	FM(MSIOF1_RXD_B)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_C)	FM(STP_ISEN_1_C)	FM(RIF1_D0_B)	F_(0, 0)		FM(ADIDATA)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_27_24	FM(RTS1_N)		FM(HRTS1_N_A)	FM(MSIOF1_TXD_B)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_C)	FM(STP_ISD_1_C)		FM(RIF1_D1_B)	F_(0, 0)		FM(ADICHS0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP12_31_28	FM(SCK2)		FM(SCIF_CLK_B)	FM(MSIOF1_SCK_B)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_C)	FM(STP_ISCLK_1_C)	FM(RIF1_CLK_B)	F_(0, 0)		FM(ADICLK)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_3_0	FM(TX2_A)		F_(0, 0)	F_(0, 0)		FM(SD2_CD_B)			FM(SCL1_A)	F_(0, 0)	FM(FMCLK_A)		FM(RIF1_D1_C)	F_(0, 0)		FM(FSO_CFE_0_N)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_7_4	FM(RX2_A)		F_(0, 0)	F_(0, 0)		FM(SD2_WP_B)			FM(SDA1_A)	F_(0, 0)	FM(FMIN_A)		FM(RIF1_SYNC_C)	F_(0, 0)		FM(FSO_CFE_1_N)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_11_8	FM(HSCK0)		F_(0, 0)	FM(MSIOF1_SCK_D)	FM(AUDIO_CLKB_A)		FM(SSI_SDATA1_B)FM(TS_SCK0_D)	FM(STP_ISCLK_0_D)	FM(RIF0_CLK_C)	F_(0, 0)		F_(0, 0)	FM(RX5_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_15_12	FM(HRX0)		F_(0, 0)	FM(MSIOF1_RXD_D)	F_(0, 0)			FM(SSI_SDATA2_B)FM(TS_SDEN0_D)	FM(STP_ISEN_0_D)	FM(RIF0_D0_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_19_16	FM(HTX0)		F_(0, 0)	FM(MSIOF1_TXD_D)	F_(0, 0)			FM(SSI_SDATA9_B)FM(TS_SDAT0_D)	FM(STP_ISD_0_D)		FM(RIF0_D1_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_23_20	FM(HCTS0_N)		FM(RX2_B)	FM(MSIOF1_SYNC_D)	F_(0, 0)			FM(SSI_SCK9_A)	FM(TS_SPSYNC0_D)FM(STP_ISSYNC_0_D)	FM(RIF0_SYNC_C)	FM(AUDIO_CLKOUT1_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_27_24	FM(HRTS0_N)		FM(TX2_B)	FM(MSIOF1_SS1_D)	F_(0, 0)			FM(SSI_WS9_A)	F_(0, 0)	FM(STP_IVCXO27_0_D)	FM(BPFCLK_A)	FM(AUDIO_CLKOUT2_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_31_28	FM(MSIOF0_SYNC)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(AUDIO_CLKOUT_A)	F_(0, 0)	FM(TX5_B)	F_(0, 0)	F_(0, 0) FM(BPFCLK_D) F_(0, 0) F_(0, 0)
-#define IP14_3_0	FM(MSIOF0_SS1)		FM(RX5_A)	FM(NFWP_N_A)		FM(AUDIO_CLKA_C)		FM(SSI_SCK2_A)	F_(0, 0)	FM(STP_IVCXO27_0_C)	F_(0, 0)	FM(AUDIO_CLKOUT3_A)	F_(0, 0)	FM(TCLK1_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_7_4	FM(MSIOF0_SS2)		FM(TX5_A)	FM(MSIOF1_SS2_D)	FM(AUDIO_CLKC_A)		FM(SSI_WS2_A)	F_(0, 0)	FM(STP_OPWM_0_D)	F_(0, 0)	FM(AUDIO_CLKOUT_D)	F_(0, 0)	FM(SPEEDIN_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_11_8	FM(MLB_CLK)		F_(0, 0)	FM(MSIOF1_SCK_F)	F_(0, 0)			FM(SCL1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_15_12	FM(MLB_SIG)		FM(RX1_B)	FM(MSIOF1_SYNC_F)	F_(0, 0)			FM(SDA1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_19_16	FM(MLB_DAT)		FM(TX1_B)	FM(MSIOF1_RXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_23_20	FM(SSI_SCK01239)	F_(0, 0)	FM(MSIOF1_TXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_27_24	FM(SSI_WS01239)		F_(0, 0)	FM(MSIOF1_SS1_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
-#define IP14_31_28	FM(SSI_SDATA0)		F_(0, 0)	FM(MSIOF1_SS2_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_3_0	FM(SSI_SDATA1_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_7_4	FM(SSI_SDATA2_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SSI_SCK1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_11_8	FM(SSI_SCK349)		F_(0, 0)	FM(MSIOF1_SS1_A)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_OPWM_0_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_15_12	FM(SSI_WS349)		FM(HCTS2_N_A)	FM(MSIOF1_SS2_A)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_0_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_19_16	FM(SSI_SDATA3)		FM(HRTS2_N_A)	FM(MSIOF1_TXD_A)	F_(0, 0)			F_(0, 0)	FM(TS_SCK0_A)	FM(STP_ISCLK_0_A)	FM(RIF0_D1_A)	FM(RIF2_D0_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_23_20	FM(SSI_SCK4)		FM(HRX2_A)	FM(MSIOF1_SCK_A)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_A)	FM(STP_ISD_0_A)		FM(RIF0_CLK_A)	FM(RIF2_CLK_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_27_24	FM(SSI_WS4)		FM(HTX2_A)	FM(MSIOF1_SYNC_A)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_A)	FM(STP_ISEN_0_A)	FM(RIF0_SYNC_A)	FM(RIF2_SYNC_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_31_28	FM(SSI_SDATA4)		FM(HSCK2_A)	FM(MSIOF1_RXD_A)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_A)FM(STP_ISSYNC_0_A)	FM(RIF0_D0_A)	FM(RIF2_D1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_3_0	FM(SSI_SCK6)		FM(USB2_PWEN)	F_(0, 0)		FM(SIM0_RST_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_7_4	FM(SSI_WS6)		FM(USB2_OVC)	F_(0, 0)		FM(SIM0_D_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_11_8	FM(SSI_SDATA6)		F_(0, 0)	F_(0, 0)		FM(SIM0_CLK_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(SATA_DEVSLP_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_15_12	FM(SSI_SCK78)		FM(HRX2_B)	FM(MSIOF1_SCK_C)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_A)	FM(STP_ISCLK_1_A)	FM(RIF1_CLK_A)	FM(RIF3_CLK_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_19_16	FM(SSI_WS78)		FM(HTX2_B)	FM(MSIOF1_SYNC_C)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_A)	FM(STP_ISD_1_A)		FM(RIF1_SYNC_A)	FM(RIF3_SYNC_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_23_20	FM(SSI_SDATA7)		FM(HCTS2_N_B)	FM(MSIOF1_RXD_C)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_A)	FM(STP_ISEN_1_A)	FM(RIF1_D0_A)	FM(RIF3_D0_A)		F_(0, 0)	FM(TCLK2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_27_24	FM(SSI_SDATA8)		FM(HRTS2_N_B)	FM(MSIOF1_TXD_C)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A)	FM(RIF1_D1_A)	FM(RIF3_D1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP16_31_28	FM(SSI_SDATA9_A)	FM(HSCK2_B)	FM(MSIOF1_SS1_C)	FM(HSCK1_A)			FM(SSI_WS1_B)	FM(SCK1)	FM(STP_IVCXO27_1_A)	FM(SCK5_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP17_3_0	FM(AUDIO_CLKA_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP17_7_4	FM(AUDIO_CLKB_B)	FM(SCIF_CLK_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_1_D)	FM(REMOCON_A)	F_(0, 0)		F_(0, 0)	FM(TCLK1_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP17_11_8	FM(USB0_PWEN)		F_(0, 0)	F_(0, 0)		FM(SIM0_RST_C)			F_(0, 0)	FM(TS_SCK1_D)	FM(STP_ISCLK_1_D)	FM(BPFCLK_B)	FM(RIF3_CLK_B)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) FM(HSCK2_C) F_(0, 0) F_(0, 0)
-#define IP17_15_12	FM(USB0_OVC)		F_(0, 0)	F_(0, 0)		FM(SIM0_D_C)			F_(0, 0)	FM(TS_SDAT1_D)	FM(STP_ISD_1_D)		F_(0, 0)	FM(RIF3_SYNC_B)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) FM(HRX2_C) F_(0, 0) F_(0, 0)
-#define IP17_19_16	FM(USB1_PWEN)		F_(0, 0)	F_(0, 0)		FM(SIM0_CLK_C)			FM(SSI_SCK1_A)	FM(TS_SCK0_E)	FM(STP_ISCLK_0_E)	FM(FMCLK_B)	FM(RIF2_CLK_B)		F_(0, 0)	FM(SPEEDIN_A)	F_(0, 0)	F_(0, 0) FM(HTX2_C) F_(0, 0) F_(0, 0)
-#define IP17_23_20	FM(USB1_OVC)		F_(0, 0)	FM(MSIOF1_SS2_C)	F_(0, 0)			FM(SSI_WS1_A)	FM(TS_SDAT0_E)	FM(STP_ISD_0_E)		FM(FMIN_B)	FM(RIF2_SYNC_B)		F_(0, 0)	FM(REMOCON_B)	F_(0, 0)	F_(0, 0) FM(HCTS2_N_C) F_(0, 0) F_(0, 0)
-#define IP17_27_24	FM(USB30_PWEN)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT_B)		FM(SSI_SCK2_B)	FM(TS_SDEN1_D)	FM(STP_ISEN_1_D)	FM(STP_OPWM_0_E)FM(RIF3_D0_B)		F_(0, 0)	FM(TCLK2_B)	FM(TPU0TO0)	FM(BPFCLK_C) FM(HRTS2_N_C) F_(0, 0) F_(0, 0)
-#define IP17_31_28	FM(USB30_OVC)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT1_B)		FM(SSI_WS2_B)	FM(TS_SPSYNC1_D)FM(STP_ISSYNC_1_D)	FM(STP_IVCXO27_0_E)FM(RIF3_D1_B)	F_(0, 0)	FM(FSO_TOE_N)	FM(TPU0TO1)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP18_3_0	FM(USB2_CH3_PWEN)	F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT2_B)		FM(SSI_SCK9_B)	FM(TS_SDEN0_E)	FM(STP_ISEN_0_E)	F_(0, 0)	FM(RIF2_D0_B)		F_(0, 0)	F_(0, 0)	FM(TPU0TO2)	FM(FMCLK_C) FM(FMCLK_D) F_(0, 0) F_(0, 0)
-#define IP18_7_4	FM(USB2_CH3_OVC)	F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT3_B)		FM(SSI_WS9_B)	FM(TS_SPSYNC0_E)FM(STP_ISSYNC_0_E)	F_(0, 0)	FM(RIF2_D1_B)		F_(0, 0)	F_(0, 0)	FM(TPU0TO3)	FM(FMIN_C) FM(FMIN_D) F_(0, 0) F_(0, 0)
-
-#define PINMUX_GPSR	\
-\
-												GPSR6_31 \
-												GPSR6_30 \
-												GPSR6_29 \
-		GPSR1_28									GPSR6_28 \
-		GPSR1_27									GPSR6_27 \
-		GPSR1_26									GPSR6_26 \
-		GPSR1_25							GPSR5_25	GPSR6_25 \
-		GPSR1_24							GPSR5_24	GPSR6_24 \
-		GPSR1_23							GPSR5_23	GPSR6_23 \
-		GPSR1_22							GPSR5_22	GPSR6_22 \
-		GPSR1_21							GPSR5_21	GPSR6_21 \
-		GPSR1_20							GPSR5_20	GPSR6_20 \
-		GPSR1_19							GPSR5_19	GPSR6_19 \
-		GPSR1_18							GPSR5_18	GPSR6_18 \
-		GPSR1_17					GPSR4_17	GPSR5_17	GPSR6_17 \
-		GPSR1_16					GPSR4_16	GPSR5_16	GPSR6_16 \
-GPSR0_15	GPSR1_15			GPSR3_15	GPSR4_15	GPSR5_15	GPSR6_15 \
-GPSR0_14	GPSR1_14	GPSR2_14	GPSR3_14	GPSR4_14	GPSR5_14	GPSR6_14 \
-GPSR0_13	GPSR1_13	GPSR2_13	GPSR3_13	GPSR4_13	GPSR5_13	GPSR6_13 \
-GPSR0_12	GPSR1_12	GPSR2_12	GPSR3_12	GPSR4_12	GPSR5_12	GPSR6_12 \
-GPSR0_11	GPSR1_11	GPSR2_11	GPSR3_11	GPSR4_11	GPSR5_11	GPSR6_11 \
-GPSR0_10	GPSR1_10	GPSR2_10	GPSR3_10	GPSR4_10	GPSR5_10	GPSR6_10 \
-GPSR0_9		GPSR1_9		GPSR2_9		GPSR3_9		GPSR4_9		GPSR5_9		GPSR6_9 \
-GPSR0_8		GPSR1_8		GPSR2_8		GPSR3_8		GPSR4_8		GPSR5_8		GPSR6_8 \
-GPSR0_7		GPSR1_7		GPSR2_7		GPSR3_7		GPSR4_7		GPSR5_7		GPSR6_7 \
-GPSR0_6		GPSR1_6		GPSR2_6		GPSR3_6		GPSR4_6		GPSR5_6		GPSR6_6 \
-GPSR0_5		GPSR1_5		GPSR2_5		GPSR3_5		GPSR4_5		GPSR5_5		GPSR6_5 \
-GPSR0_4		GPSR1_4		GPSR2_4		GPSR3_4		GPSR4_4		GPSR5_4		GPSR6_4 \
-GPSR0_3		GPSR1_3		GPSR2_3		GPSR3_3		GPSR4_3		GPSR5_3		GPSR6_3		GPSR7_3 \
-GPSR0_2		GPSR1_2		GPSR2_2		GPSR3_2		GPSR4_2		GPSR5_2		GPSR6_2		GPSR7_2 \
-GPSR0_1		GPSR1_1		GPSR2_1		GPSR3_1		GPSR4_1		GPSR5_1		GPSR6_1		GPSR7_1 \
-GPSR0_0		GPSR1_0		GPSR2_0		GPSR3_0		GPSR4_0		GPSR5_0		GPSR6_0		GPSR7_0
-
-#define PINMUX_IPSR				\
-\
-FM(IP0_3_0)	IP0_3_0		FM(IP1_3_0)	IP1_3_0		FM(IP2_3_0)	IP2_3_0		FM(IP3_3_0)	IP3_3_0 \
-FM(IP0_7_4)	IP0_7_4		FM(IP1_7_4)	IP1_7_4		FM(IP2_7_4)	IP2_7_4		FM(IP3_7_4)	IP3_7_4 \
-FM(IP0_11_8)	IP0_11_8	FM(IP1_11_8)	IP1_11_8	FM(IP2_11_8)	IP2_11_8	FM(IP3_11_8)	IP3_11_8 \
-FM(IP0_15_12)	IP0_15_12	FM(IP1_15_12)	IP1_15_12	FM(IP2_15_12)	IP2_15_12	FM(IP3_15_12)	IP3_15_12 \
-FM(IP0_19_16)	IP0_19_16	FM(IP1_19_16)	IP1_19_16	FM(IP2_19_16)	IP2_19_16	FM(IP3_19_16)	IP3_19_16 \
-FM(IP0_23_20)	IP0_23_20	FM(IP1_23_20)	IP1_23_20	FM(IP2_23_20)	IP2_23_20	FM(IP3_23_20)	IP3_23_20 \
-FM(IP0_27_24)	IP0_27_24	FM(IP1_27_24)	IP1_27_24	FM(IP2_27_24)	IP2_27_24	FM(IP3_27_24)	IP3_27_24 \
-FM(IP0_31_28)	IP0_31_28	FM(IP1_31_28)	IP1_31_28	FM(IP2_31_28)	IP2_31_28	FM(IP3_31_28)	IP3_31_28 \
-\
-FM(IP4_3_0)	IP4_3_0		FM(IP5_3_0)	IP5_3_0		FM(IP6_3_0)	IP6_3_0		FM(IP7_3_0)	IP7_3_0 \
-FM(IP4_7_4)	IP4_7_4		FM(IP5_7_4)	IP5_7_4		FM(IP6_7_4)	IP6_7_4		FM(IP7_7_4)	IP7_7_4 \
-FM(IP4_11_8)	IP4_11_8	FM(IP5_11_8)	IP5_11_8	FM(IP6_11_8)	IP6_11_8	FM(IP7_11_8)	IP7_11_8 \
-FM(IP4_15_12)	IP4_15_12	FM(IP5_15_12)	IP5_15_12	FM(IP6_15_12)	IP6_15_12 \
-FM(IP4_19_16)	IP4_19_16	FM(IP5_19_16)	IP5_19_16	FM(IP6_19_16)	IP6_19_16	FM(IP7_19_16)	IP7_19_16 \
-FM(IP4_23_20)	IP4_23_20	FM(IP5_23_20)	IP5_23_20	FM(IP6_23_20)	IP6_23_20	FM(IP7_23_20)	IP7_23_20 \
-FM(IP4_27_24)	IP4_27_24	FM(IP5_27_24)	IP5_27_24	FM(IP6_27_24)	IP6_27_24	FM(IP7_27_24)	IP7_27_24 \
-FM(IP4_31_28)	IP4_31_28	FM(IP5_31_28)	IP5_31_28	FM(IP6_31_28)	IP6_31_28	FM(IP7_31_28)	IP7_31_28 \
-\
-FM(IP8_3_0)	IP8_3_0		FM(IP9_3_0)	IP9_3_0		FM(IP10_3_0)	IP10_3_0	FM(IP11_3_0)	IP11_3_0 \
-FM(IP8_7_4)	IP8_7_4		FM(IP9_7_4)	IP9_7_4		FM(IP10_7_4)	IP10_7_4	FM(IP11_7_4)	IP11_7_4 \
-FM(IP8_11_8)	IP8_11_8	FM(IP9_11_8)	IP9_11_8	FM(IP10_11_8)	IP10_11_8	FM(IP11_11_8)	IP11_11_8 \
-FM(IP8_15_12)	IP8_15_12	FM(IP9_15_12)	IP9_15_12	FM(IP10_15_12)	IP10_15_12	FM(IP11_15_12)	IP11_15_12 \
-FM(IP8_19_16)	IP8_19_16	FM(IP9_19_16)	IP9_19_16	FM(IP10_19_16)	IP10_19_16	FM(IP11_19_16)	IP11_19_16 \
-FM(IP8_23_20)	IP8_23_20	FM(IP9_23_20)	IP9_23_20	FM(IP10_23_20)	IP10_23_20	FM(IP11_23_20)	IP11_23_20 \
-FM(IP8_27_24)	IP8_27_24	FM(IP9_27_24)	IP9_27_24	FM(IP10_27_24)	IP10_27_24	FM(IP11_27_24)	IP11_27_24 \
-FM(IP8_31_28)	IP8_31_28	FM(IP9_31_28)	IP9_31_28	FM(IP10_31_28)	IP10_31_28	FM(IP11_31_28)	IP11_31_28 \
-\
-FM(IP12_3_0)	IP12_3_0	FM(IP13_3_0)	IP13_3_0	FM(IP14_3_0)	IP14_3_0	FM(IP15_3_0)	IP15_3_0 \
-FM(IP12_7_4)	IP12_7_4	FM(IP13_7_4)	IP13_7_4	FM(IP14_7_4)	IP14_7_4	FM(IP15_7_4)	IP15_7_4 \
-FM(IP12_11_8)	IP12_11_8	FM(IP13_11_8)	IP13_11_8	FM(IP14_11_8)	IP14_11_8	FM(IP15_11_8)	IP15_11_8 \
-FM(IP12_15_12)	IP12_15_12	FM(IP13_15_12)	IP13_15_12	FM(IP14_15_12)	IP14_15_12	FM(IP15_15_12)	IP15_15_12 \
-FM(IP12_19_16)	IP12_19_16	FM(IP13_19_16)	IP13_19_16	FM(IP14_19_16)	IP14_19_16	FM(IP15_19_16)	IP15_19_16 \
-FM(IP12_23_20)	IP12_23_20	FM(IP13_23_20)	IP13_23_20	FM(IP14_23_20)	IP14_23_20	FM(IP15_23_20)	IP15_23_20 \
-FM(IP12_27_24)	IP12_27_24	FM(IP13_27_24)	IP13_27_24	FM(IP14_27_24)	IP14_27_24	FM(IP15_27_24)	IP15_27_24 \
-FM(IP12_31_28)	IP12_31_28	FM(IP13_31_28)	IP13_31_28	FM(IP14_31_28)	IP14_31_28	FM(IP15_31_28)	IP15_31_28 \
-\
-FM(IP16_3_0)	IP16_3_0	FM(IP17_3_0)	IP17_3_0	FM(IP18_3_0)	IP18_3_0 \
-FM(IP16_7_4)	IP16_7_4	FM(IP17_7_4)	IP17_7_4	FM(IP18_7_4)	IP18_7_4 \
-FM(IP16_11_8)	IP16_11_8	FM(IP17_11_8)	IP17_11_8 \
-FM(IP16_15_12)	IP16_15_12	FM(IP17_15_12)	IP17_15_12 \
-FM(IP16_19_16)	IP16_19_16	FM(IP17_19_16)	IP17_19_16 \
-FM(IP16_23_20)	IP16_23_20	FM(IP17_23_20)	IP17_23_20 \
-FM(IP16_27_24)	IP16_27_24	FM(IP17_27_24)	IP17_27_24 \
-FM(IP16_31_28)	IP16_31_28	FM(IP17_31_28)	IP17_31_28
-
-/* MOD_SEL0 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */			/* 4 */			/* 5 */			/* 6 */			/* 7 */
-#define MOD_SEL0_31_30_29	FM(SEL_MSIOF3_0)	FM(SEL_MSIOF3_1)	FM(SEL_MSIOF3_2)	FM(SEL_MSIOF3_3)	FM(SEL_MSIOF3_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
-#define MOD_SEL0_28_27		FM(SEL_MSIOF2_0)	FM(SEL_MSIOF2_1)	FM(SEL_MSIOF2_2)	FM(SEL_MSIOF2_3)
-#define MOD_SEL0_26_25_24	FM(SEL_MSIOF1_0)	FM(SEL_MSIOF1_1)	FM(SEL_MSIOF1_2)	FM(SEL_MSIOF1_3)	FM(SEL_MSIOF1_4)	FM(SEL_MSIOF1_5)	FM(SEL_MSIOF1_6)	F_(0, 0)
-#define MOD_SEL0_23		FM(SEL_LBSC_0)		FM(SEL_LBSC_1)
-#define MOD_SEL0_22		FM(SEL_IEBUS_0)		FM(SEL_IEBUS_1)
-#define MOD_SEL0_21		FM(SEL_I2C2_0)		FM(SEL_I2C2_1)
-#define MOD_SEL0_20		FM(SEL_I2C1_0)		FM(SEL_I2C1_1)
-#define MOD_SEL0_19		FM(SEL_HSCIF4_0)	FM(SEL_HSCIF4_1)
-#define MOD_SEL0_18_17		FM(SEL_HSCIF3_0)	FM(SEL_HSCIF3_1)	FM(SEL_HSCIF3_2)	FM(SEL_HSCIF3_3)
-#define MOD_SEL0_16		FM(SEL_HSCIF1_0)	FM(SEL_HSCIF1_1)
-#define MOD_SEL0_14_13		FM(SEL_HSCIF2_0)	FM(SEL_HSCIF2_1)	FM(SEL_HSCIF2_2)	F_(0, 0)
-#define MOD_SEL0_12		FM(SEL_ETHERAVB_0)	FM(SEL_ETHERAVB_1)
-#define MOD_SEL0_11		FM(SEL_DRIF3_0)		FM(SEL_DRIF3_1)
-#define MOD_SEL0_10		FM(SEL_DRIF2_0)		FM(SEL_DRIF2_1)
-#define MOD_SEL0_9_8		FM(SEL_DRIF1_0)		FM(SEL_DRIF1_1)		FM(SEL_DRIF1_2)		F_(0, 0)
-#define MOD_SEL0_7_6		FM(SEL_DRIF0_0)		FM(SEL_DRIF0_1)		FM(SEL_DRIF0_2)		F_(0, 0)
-#define MOD_SEL0_5		FM(SEL_CANFD0_0)	FM(SEL_CANFD0_1)
-#define MOD_SEL0_4_3		FM(SEL_ADGA_0)		FM(SEL_ADGA_1)		FM(SEL_ADGA_2)		FM(SEL_ADGA_3)
-
-/* MOD_SEL1 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */			/* 4 */			/* 5 */			/* 6 */			/* 7 */
-#define MOD_SEL1_31_30		FM(SEL_TSIF1_0)		FM(SEL_TSIF1_1)		FM(SEL_TSIF1_2)		FM(SEL_TSIF1_3)
-#define MOD_SEL1_29_28_27	FM(SEL_TSIF0_0)		FM(SEL_TSIF0_1)		FM(SEL_TSIF0_2)		FM(SEL_TSIF0_3)		FM(SEL_TSIF0_4)		F_(0, 0)		F_(0, 0)		F_(0, 0)
-#define MOD_SEL1_26		FM(SEL_TIMER_TMU1_0)	FM(SEL_TIMER_TMU1_1)
-#define MOD_SEL1_25_24		FM(SEL_SSP1_1_0)	FM(SEL_SSP1_1_1)	FM(SEL_SSP1_1_2)	FM(SEL_SSP1_1_3)
-#define MOD_SEL1_23_22_21	FM(SEL_SSP1_0_0)	FM(SEL_SSP1_0_1)	FM(SEL_SSP1_0_2)	FM(SEL_SSP1_0_3)	FM(SEL_SSP1_0_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
-#define MOD_SEL1_20		FM(SEL_SSI1_0)		FM(SEL_SSI1_1)
-#define MOD_SEL1_19		FM(SEL_SPEED_PULSE_0)	FM(SEL_SPEED_PULSE_1)
-#define MOD_SEL1_18_17		FM(SEL_SIMCARD_0)	FM(SEL_SIMCARD_1)	FM(SEL_SIMCARD_2)	FM(SEL_SIMCARD_3)
-#define MOD_SEL1_16		FM(SEL_SDHI2_0)		FM(SEL_SDHI2_1)
-#define MOD_SEL1_15_14		FM(SEL_SCIF4_0)		FM(SEL_SCIF4_1)		FM(SEL_SCIF4_2)		F_(0, 0)
-#define MOD_SEL1_13		FM(SEL_SCIF3_0)		FM(SEL_SCIF3_1)
-#define MOD_SEL1_12		FM(SEL_SCIF2_0)		FM(SEL_SCIF2_1)
-#define MOD_SEL1_11		FM(SEL_SCIF1_0)		FM(SEL_SCIF1_1)
-#define MOD_SEL1_10		FM(SEL_SCIF_0)		FM(SEL_SCIF_1)
-#define MOD_SEL1_9		FM(SEL_REMOCON_0)	FM(SEL_REMOCON_1)
-#define MOD_SEL1_6		FM(SEL_RCAN0_0)		FM(SEL_RCAN0_1)
-#define MOD_SEL1_5		FM(SEL_PWM6_0)		FM(SEL_PWM6_1)
-#define MOD_SEL1_4		FM(SEL_PWM5_0)		FM(SEL_PWM5_1)
-#define MOD_SEL1_3		FM(SEL_PWM4_0)		FM(SEL_PWM4_1)
-#define MOD_SEL1_2		FM(SEL_PWM3_0)		FM(SEL_PWM3_1)
-#define MOD_SEL1_1		FM(SEL_PWM2_0)		FM(SEL_PWM2_1)
-#define MOD_SEL1_0		FM(SEL_PWM1_0)		FM(SEL_PWM1_1)
-
-/* MOD_SEL2 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */
-#define MOD_SEL2_31		FM(I2C_SEL_5_0)		FM(I2C_SEL_5_1)
-#define MOD_SEL2_30		FM(I2C_SEL_3_0)		FM(I2C_SEL_3_1)
-#define MOD_SEL2_29		FM(I2C_SEL_0_0)		FM(I2C_SEL_0_1)
-#define MOD_SEL2_28_27		FM(SEL_FM_0)		FM(SEL_FM_1)		FM(SEL_FM_2)		FM(SEL_FM_3)
-#define MOD_SEL2_26		FM(SEL_SCIF5_0)		FM(SEL_SCIF5_1)
-#define MOD_SEL2_25_24_23	FM(SEL_I2C6_0)		FM(SEL_I2C6_1)		FM(SEL_I2C6_2)		F_(0, 0)		F_(0, 0)		F_(0, 0)		F_(0, 0)		F_(0, 0)
-#define MOD_SEL2_21		FM(SEL_SSI2_0)		FM(SEL_SSI2_1)
-#define MOD_SEL2_20		FM(SEL_SSI9_0)		FM(SEL_SSI9_1)
-#define MOD_SEL2_19		FM(SEL_TIMER_TMU2_0)	FM(SEL_TIMER_TMU2_1)
-#define MOD_SEL2_18		FM(SEL_ADGB_0)		FM(SEL_ADGB_1)
-#define MOD_SEL2_17		FM(SEL_ADGC_0)		FM(SEL_ADGC_1)
-#define MOD_SEL2_0		FM(SEL_VIN4_0)		FM(SEL_VIN4_1)
-
-#define PINMUX_MOD_SELS	\
-\
-MOD_SEL0_31_30_29	MOD_SEL1_31_30		MOD_SEL2_31 \
-						MOD_SEL2_30 \
-			MOD_SEL1_29_28_27	MOD_SEL2_29 \
-MOD_SEL0_28_27					MOD_SEL2_28_27 \
-MOD_SEL0_26_25_24	MOD_SEL1_26		MOD_SEL2_26 \
-			MOD_SEL1_25_24		MOD_SEL2_25_24_23 \
-MOD_SEL0_23		MOD_SEL1_23_22_21 \
-MOD_SEL0_22 \
-MOD_SEL0_21					MOD_SEL2_21 \
-MOD_SEL0_20		MOD_SEL1_20		MOD_SEL2_20 \
-MOD_SEL0_19		MOD_SEL1_19		MOD_SEL2_19 \
-MOD_SEL0_18_17		MOD_SEL1_18_17		MOD_SEL2_18 \
-						MOD_SEL2_17 \
-MOD_SEL0_16		MOD_SEL1_16 \
-			MOD_SEL1_15_14 \
-MOD_SEL0_14_13 \
-			MOD_SEL1_13 \
-MOD_SEL0_12		MOD_SEL1_12 \
-MOD_SEL0_11		MOD_SEL1_11 \
-MOD_SEL0_10		MOD_SEL1_10 \
-MOD_SEL0_9_8		MOD_SEL1_9 \
-MOD_SEL0_7_6 \
-			MOD_SEL1_6 \
-MOD_SEL0_5		MOD_SEL1_5 \
-MOD_SEL0_4_3		MOD_SEL1_4 \
-			MOD_SEL1_3 \
-			MOD_SEL1_2 \
-			MOD_SEL1_1 \
-			MOD_SEL1_0		MOD_SEL2_0
-
-/*
- * These pins are not able to be muxed but have other properties
- * that can be set, such as drive-strength or pull-up/pull-down enable.
- */
-#define PINMUX_STATIC \
-	FM(QSPI0_SPCLK) FM(QSPI0_SSL) FM(QSPI0_MOSI_IO0) FM(QSPI0_MISO_IO1) \
-	FM(QSPI0_IO2) FM(QSPI0_IO3) \
-	FM(QSPI1_SPCLK) FM(QSPI1_SSL) FM(QSPI1_MOSI_IO0) FM(QSPI1_MISO_IO1) \
-	FM(QSPI1_IO2) FM(QSPI1_IO3) \
-	FM(RPC_INT) FM(RPC_WP) FM(RPC_RESET) \
-	FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) FM(AVB_TD3) \
-	FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \
-	FM(AVB_TXCREFCLK) FM(AVB_MDIO) \
-	FM(PRESETOUT) \
-	FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) FM(DU_DOTCLKIN3) \
-	FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR)
-
-#define PINMUX_PHYS \
-	FM(SCL0) FM(SDA0) FM(SCL3) FM(SDA3) FM(SCL5) FM(SDA5)
-
-enum {
-	PINMUX_RESERVED = 0,
-
-	PINMUX_DATA_BEGIN,
-	GP_ALL(DATA),
-	PINMUX_DATA_END,
-
-#define F_(x, y)
-#define FM(x)	FN_##x,
-	PINMUX_FUNCTION_BEGIN,
-	GP_ALL(FN),
-	PINMUX_GPSR
-	PINMUX_IPSR
-	PINMUX_MOD_SELS
-	PINMUX_FUNCTION_END,
-#undef F_
-#undef FM
-
-#define F_(x, y)
-#define FM(x)	x##_MARK,
-	PINMUX_MARK_BEGIN,
-	PINMUX_GPSR
-	PINMUX_IPSR
-	PINMUX_MOD_SELS
-	PINMUX_STATIC
-	PINMUX_PHYS
-	PINMUX_MARK_END,
-#undef F_
-#undef FM
-};
-
-static const u16 pinmux_data[] = {
-	PINMUX_DATA_GP_ALL(),
-
-	PINMUX_SINGLE(AVS1),
-	PINMUX_SINGLE(AVS2),
-	PINMUX_SINGLE(CLKOUT),
-	PINMUX_SINGLE(GP7_02),
-	PINMUX_SINGLE(GP7_03),
-	PINMUX_SINGLE(MSIOF0_RXD),
-	PINMUX_SINGLE(MSIOF0_SCK),
-	PINMUX_SINGLE(MSIOF0_TXD),
-	PINMUX_SINGLE(SSI_SCK5),
-	PINMUX_SINGLE(SSI_SDATA5),
-	PINMUX_SINGLE(SSI_WS5),
-
-	/* IPSR0 */
-	PINMUX_IPSR_GPSR(IP0_3_0,	AVB_MDC),
-	PINMUX_IPSR_MSEL(IP0_3_0,	MSIOF2_SS2_C,		SEL_MSIOF2_2),
-
-	PINMUX_IPSR_GPSR(IP0_7_4,	AVB_MAGIC),
-	PINMUX_IPSR_MSEL(IP0_7_4,	MSIOF2_SS1_C,		SEL_MSIOF2_2),
-	PINMUX_IPSR_MSEL(IP0_7_4,	SCK4_A,			SEL_SCIF4_0),
-
-	PINMUX_IPSR_GPSR(IP0_11_8,	AVB_PHY_INT),
-	PINMUX_IPSR_MSEL(IP0_11_8,	MSIOF2_SYNC_C,		SEL_MSIOF2_2),
-	PINMUX_IPSR_MSEL(IP0_11_8,	RX4_A,			SEL_SCIF4_0),
-
-	PINMUX_IPSR_GPSR(IP0_15_12,	AVB_LINK),
-	PINMUX_IPSR_MSEL(IP0_15_12,	MSIOF2_SCK_C,		SEL_MSIOF2_2),
-	PINMUX_IPSR_MSEL(IP0_15_12,	TX4_A,			SEL_SCIF4_0),
-
-	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, AVB_AVTP_MATCH_A,	I2C_SEL_5_0,	SEL_ETHERAVB_0),
-	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, MSIOF2_RXD_C,		I2C_SEL_5_0,	SEL_MSIOF2_2),
-	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, CTS4_N_A,		I2C_SEL_5_0,	SEL_SCIF4_0),
-	PINMUX_IPSR_MSEL(IP0_19_16,	FSCLKST2_N_A,		I2C_SEL_5_0),
-	PINMUX_IPSR_PHYS(IP0_19_16,     SCL5,                   I2C_SEL_5_1),
-
-	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A,	I2C_SEL_5_0,	SEL_ETHERAVB_0),
-	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, MSIOF2_TXD_C,		I2C_SEL_5_0,	SEL_MSIOF2_2),
-	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, RTS4_N_A,		I2C_SEL_5_0,	SEL_SCIF4_0),
-	PINMUX_IPSR_PHYS(IP0_23_20,     SDA5,                   I2C_SEL_5_1),
-
-	PINMUX_IPSR_GPSR(IP0_27_24,	IRQ0),
-	PINMUX_IPSR_GPSR(IP0_27_24,	QPOLB),
-	PINMUX_IPSR_GPSR(IP0_27_24,	DU_CDE),
-	PINMUX_IPSR_MSEL(IP0_27_24,	VI4_DATA0_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP0_27_24,	CAN0_TX_B,		SEL_RCAN0_1),
-	PINMUX_IPSR_MSEL(IP0_27_24,	CANFD0_TX_B,		SEL_CANFD0_1),
-	PINMUX_IPSR_MSEL(IP0_27_24,	MSIOF3_SS2_E,		SEL_MSIOF3_4),
-
-	PINMUX_IPSR_GPSR(IP0_31_28,	IRQ1),
-	PINMUX_IPSR_GPSR(IP0_31_28,	QPOLA),
-	PINMUX_IPSR_GPSR(IP0_31_28,	DU_DISP),
-	PINMUX_IPSR_MSEL(IP0_31_28,	VI4_DATA1_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP0_31_28,	CAN0_RX_B,		SEL_RCAN0_1),
-	PINMUX_IPSR_MSEL(IP0_31_28,	CANFD0_RX_B,		SEL_CANFD0_1),
-	PINMUX_IPSR_MSEL(IP0_31_28,	MSIOF3_SS1_E,		SEL_MSIOF3_4),
-
-	/* IPSR1 */
-	PINMUX_IPSR_GPSR(IP1_3_0,	IRQ2),
-	PINMUX_IPSR_GPSR(IP1_3_0,	QCPV_QDE),
-	PINMUX_IPSR_GPSR(IP1_3_0,	DU_EXODDF_DU_ODDF_DISP_CDE),
-	PINMUX_IPSR_MSEL(IP1_3_0,	VI4_DATA2_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_3_0,	PWM3_B,			SEL_PWM3_1),
-	PINMUX_IPSR_MSEL(IP1_3_0,	MSIOF3_SYNC_E,		SEL_MSIOF3_4),
-
-	PINMUX_IPSR_GPSR(IP1_7_4,	IRQ3),
-	PINMUX_IPSR_GPSR(IP1_7_4,	QSTVB_QVE),
-	PINMUX_IPSR_GPSR(IP1_7_4,	DU_DOTCLKOUT1),
-	PINMUX_IPSR_MSEL(IP1_7_4,	VI4_DATA3_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_7_4,	PWM4_B,			SEL_PWM4_1),
-	PINMUX_IPSR_MSEL(IP1_7_4,	MSIOF3_SCK_E,		SEL_MSIOF3_4),
-
-	PINMUX_IPSR_GPSR(IP1_11_8,	IRQ4),
-	PINMUX_IPSR_GPSR(IP1_11_8,	QSTH_QHS),
-	PINMUX_IPSR_GPSR(IP1_11_8,	DU_EXHSYNC_DU_HSYNC),
-	PINMUX_IPSR_MSEL(IP1_11_8,	VI4_DATA4_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_11_8,	PWM5_B,			SEL_PWM5_1),
-	PINMUX_IPSR_MSEL(IP1_11_8,	MSIOF3_RXD_E,		SEL_MSIOF3_4),
-
-	PINMUX_IPSR_GPSR(IP1_15_12,	IRQ5),
-	PINMUX_IPSR_GPSR(IP1_15_12,	QSTB_QHE),
-	PINMUX_IPSR_GPSR(IP1_15_12,	DU_EXVSYNC_DU_VSYNC),
-	PINMUX_IPSR_MSEL(IP1_15_12,	VI4_DATA5_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_15_12,	PWM6_B,			SEL_PWM6_1),
-	PINMUX_IPSR_GPSR(IP1_15_12,	FSCLKST2_N_B),
-	PINMUX_IPSR_MSEL(IP1_15_12,	MSIOF3_TXD_E,		SEL_MSIOF3_4),
-
-	PINMUX_IPSR_GPSR(IP1_19_16,	PWM0),
-	PINMUX_IPSR_GPSR(IP1_19_16,	AVB_AVTP_PPS),
-	PINMUX_IPSR_MSEL(IP1_19_16,	VI4_DATA6_B,		SEL_VIN4_1),
-	PINMUX_IPSR_MSEL(IP1_19_16,	IECLK_B,		SEL_IEBUS_1),
-
-	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, PWM1_A,		I2C_SEL_3_0,	SEL_PWM1_0),
-	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D,		I2C_SEL_3_0,	SEL_HSCIF3_3),
-	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B,		I2C_SEL_3_0,	SEL_VIN4_1),
-	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B,		I2C_SEL_3_0,	SEL_IEBUS_1),
-	PINMUX_IPSR_PHYS(IP1_23_20,	SCL3,			I2C_SEL_3_1),
-
-	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A,		I2C_SEL_3_0,	SEL_PWM2_0),
-	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D,		I2C_SEL_3_0,	SEL_HSCIF3_3),
-	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, IETX_B,		I2C_SEL_3_0,	SEL_IEBUS_1),
-	PINMUX_IPSR_PHYS(IP1_27_24,	SDA3,			I2C_SEL_3_1),
-
-	PINMUX_IPSR_GPSR(IP1_31_28,	A0),
-	PINMUX_IPSR_GPSR(IP1_31_28,	LCDOUT16),
-	PINMUX_IPSR_MSEL(IP1_31_28,	MSIOF3_SYNC_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP1_31_28,	VI4_DATA8),
-	PINMUX_IPSR_GPSR(IP1_31_28,	DU_DB0),
-	PINMUX_IPSR_MSEL(IP1_31_28,	PWM3_A,			SEL_PWM3_0),
-
-	/* IPSR2 */
-	PINMUX_IPSR_GPSR(IP2_3_0,	A1),
-	PINMUX_IPSR_GPSR(IP2_3_0,	LCDOUT17),
-	PINMUX_IPSR_MSEL(IP2_3_0,	MSIOF3_TXD_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP2_3_0,	VI4_DATA9),
-	PINMUX_IPSR_GPSR(IP2_3_0,	DU_DB1),
-	PINMUX_IPSR_MSEL(IP2_3_0,	PWM4_A,			SEL_PWM4_0),
-
-	PINMUX_IPSR_GPSR(IP2_7_4,	A2),
-	PINMUX_IPSR_GPSR(IP2_7_4,	LCDOUT18),
-	PINMUX_IPSR_MSEL(IP2_7_4,	MSIOF3_SCK_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP2_7_4,	VI4_DATA10),
-	PINMUX_IPSR_GPSR(IP2_7_4,	DU_DB2),
-	PINMUX_IPSR_MSEL(IP2_7_4,	PWM5_A,			SEL_PWM5_0),
-
-	PINMUX_IPSR_GPSR(IP2_11_8,	A3),
-	PINMUX_IPSR_GPSR(IP2_11_8,	LCDOUT19),
-	PINMUX_IPSR_MSEL(IP2_11_8,	MSIOF3_RXD_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP2_11_8,	VI4_DATA11),
-	PINMUX_IPSR_GPSR(IP2_11_8,	DU_DB3),
-	PINMUX_IPSR_MSEL(IP2_11_8,	PWM6_A,			SEL_PWM6_0),
-
-	PINMUX_IPSR_GPSR(IP2_15_12,	A4),
-	PINMUX_IPSR_GPSR(IP2_15_12,	LCDOUT20),
-	PINMUX_IPSR_MSEL(IP2_15_12,	MSIOF3_SS1_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_GPSR(IP2_15_12,	VI4_DATA12),
-	PINMUX_IPSR_GPSR(IP2_15_12,	VI5_DATA12),
-	PINMUX_IPSR_GPSR(IP2_15_12,	DU_DB4),
-
-	PINMUX_IPSR_GPSR(IP2_19_16,	A5),
-	PINMUX_IPSR_GPSR(IP2_19_16,	LCDOUT21),
-	PINMUX_IPSR_MSEL(IP2_19_16,	MSIOF3_SS2_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_MSEL(IP2_19_16,	SCK4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP2_19_16,	VI4_DATA13),
-	PINMUX_IPSR_GPSR(IP2_19_16,	VI5_DATA13),
-	PINMUX_IPSR_GPSR(IP2_19_16,	DU_DB5),
-
-	PINMUX_IPSR_GPSR(IP2_23_20,	A6),
-	PINMUX_IPSR_GPSR(IP2_23_20,	LCDOUT22),
-	PINMUX_IPSR_MSEL(IP2_23_20,	MSIOF2_SS1_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP2_23_20,	RX4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP2_23_20,	VI4_DATA14),
-	PINMUX_IPSR_GPSR(IP2_23_20,	VI5_DATA14),
-	PINMUX_IPSR_GPSR(IP2_23_20,	DU_DB6),
-
-	PINMUX_IPSR_GPSR(IP2_27_24,	A7),
-	PINMUX_IPSR_GPSR(IP2_27_24,	LCDOUT23),
-	PINMUX_IPSR_MSEL(IP2_27_24,	MSIOF2_SS2_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP2_27_24,	TX4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP2_27_24,	VI4_DATA15),
-	PINMUX_IPSR_GPSR(IP2_27_24,	VI5_DATA15),
-	PINMUX_IPSR_GPSR(IP2_27_24,	DU_DB7),
-
-	PINMUX_IPSR_GPSR(IP2_31_28,	A8),
-	PINMUX_IPSR_MSEL(IP2_31_28,	RX3_B,			SEL_SCIF3_1),
-	PINMUX_IPSR_MSEL(IP2_31_28,	MSIOF2_SYNC_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP2_31_28,	HRX4_B,			SEL_HSCIF4_1),
-	PINMUX_IPSR_MSEL(IP2_31_28,	SDA6_A,			SEL_I2C6_0),
-	PINMUX_IPSR_MSEL(IP2_31_28,	AVB_AVTP_MATCH_B,	SEL_ETHERAVB_1),
-	PINMUX_IPSR_MSEL(IP2_31_28,	PWM1_B,			SEL_PWM1_1),
-
-	/* IPSR3 */
-	PINMUX_IPSR_GPSR(IP3_3_0,	A9),
-	PINMUX_IPSR_MSEL(IP3_3_0,	MSIOF2_SCK_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP3_3_0,	CTS4_N_B,		SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP3_3_0,	VI5_VSYNC_N),
-
-	PINMUX_IPSR_GPSR(IP3_7_4,	A10),
-	PINMUX_IPSR_MSEL(IP3_7_4,	MSIOF2_RXD_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP3_7_4,	RTS4_N_B,		SEL_SCIF4_1),
-	PINMUX_IPSR_GPSR(IP3_7_4,	VI5_HSYNC_N),
-
-	PINMUX_IPSR_GPSR(IP3_11_8,	A11),
-	PINMUX_IPSR_MSEL(IP3_11_8,	TX3_B,			SEL_SCIF3_1),
-	PINMUX_IPSR_MSEL(IP3_11_8,	MSIOF2_TXD_A,		SEL_MSIOF2_0),
-	PINMUX_IPSR_MSEL(IP3_11_8,	HTX4_B,			SEL_HSCIF4_1),
-	PINMUX_IPSR_GPSR(IP3_11_8,	HSCK4),
-	PINMUX_IPSR_GPSR(IP3_11_8,	VI5_FIELD),
-	PINMUX_IPSR_MSEL(IP3_11_8,	SCL6_A,			SEL_I2C6_0),
-	PINMUX_IPSR_MSEL(IP3_11_8,	AVB_AVTP_CAPTURE_B,	SEL_ETHERAVB_1),
-	PINMUX_IPSR_MSEL(IP3_11_8,	PWM2_B,			SEL_PWM2_1),
-
-	PINMUX_IPSR_GPSR(IP3_15_12,	A12),
-	PINMUX_IPSR_GPSR(IP3_15_12,	LCDOUT12),
-	PINMUX_IPSR_MSEL(IP3_15_12,	MSIOF3_SCK_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_MSEL(IP3_15_12,	HRX4_A,			SEL_HSCIF4_0),
-	PINMUX_IPSR_GPSR(IP3_15_12,	VI5_DATA8),
-	PINMUX_IPSR_GPSR(IP3_15_12,	DU_DG4),
-
-	PINMUX_IPSR_GPSR(IP3_19_16,	A13),
-	PINMUX_IPSR_GPSR(IP3_19_16,	LCDOUT13),
-	PINMUX_IPSR_MSEL(IP3_19_16,	MSIOF3_SYNC_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_MSEL(IP3_19_16,	HTX4_A,			SEL_HSCIF4_0),
-	PINMUX_IPSR_GPSR(IP3_19_16,	VI5_DATA9),
-	PINMUX_IPSR_GPSR(IP3_19_16,	DU_DG5),
-
-	PINMUX_IPSR_GPSR(IP3_23_20,	A14),
-	PINMUX_IPSR_GPSR(IP3_23_20,	LCDOUT14),
-	PINMUX_IPSR_MSEL(IP3_23_20,	MSIOF3_RXD_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_GPSR(IP3_23_20,	HCTS4_N),
-	PINMUX_IPSR_GPSR(IP3_23_20,	VI5_DATA10),
-	PINMUX_IPSR_GPSR(IP3_23_20,	DU_DG6),
-
-	PINMUX_IPSR_GPSR(IP3_27_24,	A15),
-	PINMUX_IPSR_GPSR(IP3_27_24,	LCDOUT15),
-	PINMUX_IPSR_MSEL(IP3_27_24,	MSIOF3_TXD_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_GPSR(IP3_27_24,	HRTS4_N),
-	PINMUX_IPSR_GPSR(IP3_27_24,	VI5_DATA11),
-	PINMUX_IPSR_GPSR(IP3_27_24,	DU_DG7),
-
-	PINMUX_IPSR_GPSR(IP3_31_28,	A16),
-	PINMUX_IPSR_GPSR(IP3_31_28,	LCDOUT8),
-	PINMUX_IPSR_GPSR(IP3_31_28,	VI4_FIELD),
-	PINMUX_IPSR_GPSR(IP3_31_28,	DU_DG0),
-
-	/* IPSR4 */
-	PINMUX_IPSR_GPSR(IP4_3_0,	A17),
-	PINMUX_IPSR_GPSR(IP4_3_0,	LCDOUT9),
-	PINMUX_IPSR_GPSR(IP4_3_0,	VI4_VSYNC_N),
-	PINMUX_IPSR_GPSR(IP4_3_0,	DU_DG1),
-
-	PINMUX_IPSR_GPSR(IP4_7_4,	A18),
-	PINMUX_IPSR_GPSR(IP4_7_4,	LCDOUT10),
-	PINMUX_IPSR_GPSR(IP4_7_4,	VI4_HSYNC_N),
-	PINMUX_IPSR_GPSR(IP4_7_4,	DU_DG2),
-
-	PINMUX_IPSR_GPSR(IP4_11_8,	A19),
-	PINMUX_IPSR_GPSR(IP4_11_8,	LCDOUT11),
-	PINMUX_IPSR_GPSR(IP4_11_8,	VI4_CLKENB),
-	PINMUX_IPSR_GPSR(IP4_11_8,	DU_DG3),
-
-	PINMUX_IPSR_GPSR(IP4_15_12,	CS0_N),
-	PINMUX_IPSR_GPSR(IP4_15_12,	VI5_CLKENB),
-
-	PINMUX_IPSR_GPSR(IP4_19_16,	CS1_N),
-	PINMUX_IPSR_GPSR(IP4_19_16,	VI5_CLK),
-	PINMUX_IPSR_MSEL(IP4_19_16,	EX_WAIT0_B,		SEL_LBSC_1),
-
-	PINMUX_IPSR_GPSR(IP4_23_20,	BS_N),
-	PINMUX_IPSR_GPSR(IP4_23_20,	QSTVA_QVS),
-	PINMUX_IPSR_MSEL(IP4_23_20,	MSIOF3_SCK_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_GPSR(IP4_23_20,	SCK3),
-	PINMUX_IPSR_GPSR(IP4_23_20,	HSCK3),
-	PINMUX_IPSR_GPSR(IP4_23_20,	CAN1_TX),
-	PINMUX_IPSR_GPSR(IP4_23_20,	CANFD1_TX),
-	PINMUX_IPSR_MSEL(IP4_23_20,	IETX_A,			SEL_IEBUS_0),
-
-	PINMUX_IPSR_GPSR(IP4_27_24,	RD_N),
-	PINMUX_IPSR_MSEL(IP4_27_24,	MSIOF3_SYNC_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_MSEL(IP4_27_24,	RX3_A,			SEL_SCIF3_0),
-	PINMUX_IPSR_MSEL(IP4_27_24,	HRX3_A,			SEL_HSCIF3_0),
-	PINMUX_IPSR_MSEL(IP4_27_24,	CAN0_TX_A,		SEL_RCAN0_0),
-	PINMUX_IPSR_MSEL(IP4_27_24,	CANFD0_TX_A,		SEL_CANFD0_0),
-
-	PINMUX_IPSR_GPSR(IP4_31_28,	RD_WR_N),
-	PINMUX_IPSR_MSEL(IP4_31_28,	MSIOF3_RXD_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_MSEL(IP4_31_28,	TX3_A,			SEL_SCIF3_0),
-	PINMUX_IPSR_MSEL(IP4_31_28,	HTX3_A,			SEL_HSCIF3_0),
-	PINMUX_IPSR_MSEL(IP4_31_28,	CAN0_RX_A,		SEL_RCAN0_0),
-	PINMUX_IPSR_MSEL(IP4_31_28,	CANFD0_RX_A,		SEL_CANFD0_0),
-
-	/* IPSR5 */
-	PINMUX_IPSR_GPSR(IP5_3_0,	WE0_N),
-	PINMUX_IPSR_MSEL(IP5_3_0,	MSIOF3_TXD_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_GPSR(IP5_3_0,	CTS3_N),
-	PINMUX_IPSR_GPSR(IP5_3_0,	HCTS3_N),
-	PINMUX_IPSR_MSEL(IP5_3_0,	SCL6_B,			SEL_I2C6_1),
-	PINMUX_IPSR_GPSR(IP5_3_0,	CAN_CLK),
-	PINMUX_IPSR_MSEL(IP5_3_0,	IECLK_A,		SEL_IEBUS_0),
-
-	PINMUX_IPSR_GPSR(IP5_7_4,	WE1_N),
-	PINMUX_IPSR_MSEL(IP5_7_4,	MSIOF3_SS1_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_GPSR(IP5_7_4,	RTS3_N),
-	PINMUX_IPSR_GPSR(IP5_7_4,	HRTS3_N),
-	PINMUX_IPSR_MSEL(IP5_7_4,	SDA6_B,			SEL_I2C6_1),
-	PINMUX_IPSR_GPSR(IP5_7_4,	CAN1_RX),
-	PINMUX_IPSR_GPSR(IP5_7_4,	CANFD1_RX),
-	PINMUX_IPSR_MSEL(IP5_7_4,	IERX_A,			SEL_IEBUS_0),
-
-	PINMUX_IPSR_MSEL(IP5_11_8,	EX_WAIT0_A,		SEL_LBSC_0),
-	PINMUX_IPSR_GPSR(IP5_11_8,	QCLK),
-	PINMUX_IPSR_GPSR(IP5_11_8,	VI4_CLK),
-	PINMUX_IPSR_GPSR(IP5_11_8,	DU_DOTCLKOUT0),
-
-	PINMUX_IPSR_GPSR(IP5_15_12,	D0),
-	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF2_SS1_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF3_SCK_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_GPSR(IP5_15_12,	VI4_DATA16),
-	PINMUX_IPSR_GPSR(IP5_15_12,	VI5_DATA0),
-
-	PINMUX_IPSR_GPSR(IP5_19_16,	D1),
-	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF2_SS2_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF3_SYNC_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_GPSR(IP5_19_16,	VI4_DATA17),
-	PINMUX_IPSR_GPSR(IP5_19_16,	VI5_DATA1),
-
-	PINMUX_IPSR_GPSR(IP5_23_20,	D2),
-	PINMUX_IPSR_MSEL(IP5_23_20,	MSIOF3_RXD_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_GPSR(IP5_23_20,	VI4_DATA18),
-	PINMUX_IPSR_GPSR(IP5_23_20,	VI5_DATA2),
-
-	PINMUX_IPSR_GPSR(IP5_27_24,	D3),
-	PINMUX_IPSR_MSEL(IP5_27_24,	MSIOF3_TXD_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_GPSR(IP5_27_24,	VI4_DATA19),
-	PINMUX_IPSR_GPSR(IP5_27_24,	VI5_DATA3),
-
-	PINMUX_IPSR_GPSR(IP5_31_28,	D4),
-	PINMUX_IPSR_MSEL(IP5_31_28,	MSIOF2_SCK_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_GPSR(IP5_31_28,	VI4_DATA20),
-	PINMUX_IPSR_GPSR(IP5_31_28,	VI5_DATA4),
-
-	/* IPSR6 */
-	PINMUX_IPSR_GPSR(IP6_3_0,	D5),
-	PINMUX_IPSR_MSEL(IP6_3_0,	MSIOF2_SYNC_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_GPSR(IP6_3_0,	VI4_DATA21),
-	PINMUX_IPSR_GPSR(IP6_3_0,	VI5_DATA5),
-
-	PINMUX_IPSR_GPSR(IP6_7_4,	D6),
-	PINMUX_IPSR_MSEL(IP6_7_4,	MSIOF2_RXD_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_GPSR(IP6_7_4,	VI4_DATA22),
-	PINMUX_IPSR_GPSR(IP6_7_4,	VI5_DATA6),
-
-	PINMUX_IPSR_GPSR(IP6_11_8,	D7),
-	PINMUX_IPSR_MSEL(IP6_11_8,	MSIOF2_TXD_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_GPSR(IP6_11_8,	VI4_DATA23),
-	PINMUX_IPSR_GPSR(IP6_11_8,	VI5_DATA7),
-
-	PINMUX_IPSR_GPSR(IP6_15_12,	D8),
-	PINMUX_IPSR_GPSR(IP6_15_12,	LCDOUT0),
-	PINMUX_IPSR_MSEL(IP6_15_12,	MSIOF2_SCK_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_15_12,	SCK4_C,			SEL_SCIF4_2),
-	PINMUX_IPSR_MSEL(IP6_15_12,	VI4_DATA0_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP6_15_12,	DU_DR0),
-
-	PINMUX_IPSR_GPSR(IP6_19_16,	D9),
-	PINMUX_IPSR_GPSR(IP6_19_16,	LCDOUT1),
-	PINMUX_IPSR_MSEL(IP6_19_16,	MSIOF2_SYNC_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_19_16,	VI4_DATA1_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP6_19_16,	DU_DR1),
-
-	PINMUX_IPSR_GPSR(IP6_23_20,	D10),
-	PINMUX_IPSR_GPSR(IP6_23_20,	LCDOUT2),
-	PINMUX_IPSR_MSEL(IP6_23_20,	MSIOF2_RXD_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_23_20,	HRX3_B,			SEL_HSCIF3_1),
-	PINMUX_IPSR_MSEL(IP6_23_20,	VI4_DATA2_A,		SEL_VIN4_0),
-	PINMUX_IPSR_MSEL(IP6_23_20,	CTS4_N_C,		SEL_SCIF4_2),
-	PINMUX_IPSR_GPSR(IP6_23_20,	DU_DR2),
-
-	PINMUX_IPSR_GPSR(IP6_27_24,	D11),
-	PINMUX_IPSR_GPSR(IP6_27_24,	LCDOUT3),
-	PINMUX_IPSR_MSEL(IP6_27_24,	MSIOF2_TXD_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_27_24,	HTX3_B,			SEL_HSCIF3_1),
-	PINMUX_IPSR_MSEL(IP6_27_24,	VI4_DATA3_A,		SEL_VIN4_0),
-	PINMUX_IPSR_MSEL(IP6_27_24,	RTS4_N_C,		SEL_SCIF4_2),
-	PINMUX_IPSR_GPSR(IP6_27_24,	DU_DR3),
-
-	PINMUX_IPSR_GPSR(IP6_31_28,	D12),
-	PINMUX_IPSR_GPSR(IP6_31_28,	LCDOUT4),
-	PINMUX_IPSR_MSEL(IP6_31_28,	MSIOF2_SS1_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP6_31_28,	RX4_C,			SEL_SCIF4_2),
-	PINMUX_IPSR_MSEL(IP6_31_28,	VI4_DATA4_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP6_31_28,	DU_DR4),
-
-	/* IPSR7 */
-	PINMUX_IPSR_GPSR(IP7_3_0,	D13),
-	PINMUX_IPSR_GPSR(IP7_3_0,	LCDOUT5),
-	PINMUX_IPSR_MSEL(IP7_3_0,	MSIOF2_SS2_D,		SEL_MSIOF2_3),
-	PINMUX_IPSR_MSEL(IP7_3_0,	TX4_C,			SEL_SCIF4_2),
-	PINMUX_IPSR_MSEL(IP7_3_0,	VI4_DATA5_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP7_3_0,	DU_DR5),
-
-	PINMUX_IPSR_GPSR(IP7_7_4,	D14),
-	PINMUX_IPSR_GPSR(IP7_7_4,	LCDOUT6),
-	PINMUX_IPSR_MSEL(IP7_7_4,	MSIOF3_SS1_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_MSEL(IP7_7_4,	HRX3_C,			SEL_HSCIF3_2),
-	PINMUX_IPSR_MSEL(IP7_7_4,	VI4_DATA6_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP7_7_4,	DU_DR6),
-	PINMUX_IPSR_MSEL(IP7_7_4,	SCL6_C,			SEL_I2C6_2),
-
-	PINMUX_IPSR_GPSR(IP7_11_8,	D15),
-	PINMUX_IPSR_GPSR(IP7_11_8,	LCDOUT7),
-	PINMUX_IPSR_MSEL(IP7_11_8,	MSIOF3_SS2_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_MSEL(IP7_11_8,	HTX3_C,			SEL_HSCIF3_2),
-	PINMUX_IPSR_MSEL(IP7_11_8,	VI4_DATA7_A,		SEL_VIN4_0),
-	PINMUX_IPSR_GPSR(IP7_11_8,	DU_DR7),
-	PINMUX_IPSR_MSEL(IP7_11_8,	SDA6_C,			SEL_I2C6_2),
-
-	PINMUX_IPSR_GPSR(IP7_19_16,	SD0_CLK),
-	PINMUX_IPSR_MSEL(IP7_19_16,	MSIOF1_SCK_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP7_19_16,	STP_OPWM_0_B,		SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP7_23_20,	SD0_CMD),
-	PINMUX_IPSR_MSEL(IP7_23_20,	MSIOF1_SYNC_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP7_23_20,	STP_IVCXO27_0_B,	SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP7_27_24,	SD0_DAT0),
-	PINMUX_IPSR_MSEL(IP7_27_24,	MSIOF1_RXD_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP7_27_24,	TS_SCK0_B,		SEL_TSIF0_1),
-	PINMUX_IPSR_MSEL(IP7_27_24,	STP_ISCLK_0_B,		SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP7_31_28,	SD0_DAT1),
-	PINMUX_IPSR_MSEL(IP7_31_28,	MSIOF1_TXD_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP7_31_28,	TS_SPSYNC0_B,		SEL_TSIF0_1),
-	PINMUX_IPSR_MSEL(IP7_31_28,	STP_ISSYNC_0_B,		SEL_SSP1_0_1),
-
-	/* IPSR8 */
-	PINMUX_IPSR_GPSR(IP8_3_0,	SD0_DAT2),
-	PINMUX_IPSR_MSEL(IP8_3_0,	MSIOF1_SS1_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP8_3_0,	TS_SDAT0_B,		SEL_TSIF0_1),
-	PINMUX_IPSR_MSEL(IP8_3_0,	STP_ISD_0_B,		SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP8_7_4,	SD0_DAT3),
-	PINMUX_IPSR_MSEL(IP8_7_4,	MSIOF1_SS2_E,		SEL_MSIOF1_4),
-	PINMUX_IPSR_MSEL(IP8_7_4,	TS_SDEN0_B,		SEL_TSIF0_1),
-	PINMUX_IPSR_MSEL(IP8_7_4,	STP_ISEN_0_B,		SEL_SSP1_0_1),
-
-	PINMUX_IPSR_GPSR(IP8_11_8,	SD1_CLK),
-	PINMUX_IPSR_MSEL(IP8_11_8,	MSIOF1_SCK_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_MSEL(IP8_11_8,	SIM0_CLK_A,		SEL_SIMCARD_0),
-
-	PINMUX_IPSR_GPSR(IP8_15_12,	SD1_CMD),
-	PINMUX_IPSR_MSEL(IP8_15_12,	MSIOF1_SYNC_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_GPSR(IP8_15_12,	NFCE_N_B),
-	PINMUX_IPSR_MSEL(IP8_15_12,	SIM0_D_A,		SEL_SIMCARD_0),
-	PINMUX_IPSR_MSEL(IP8_15_12,	STP_IVCXO27_1_B,	SEL_SSP1_1_1),
-
-	PINMUX_IPSR_GPSR(IP8_19_16,	SD1_DAT0),
-	PINMUX_IPSR_GPSR(IP8_19_16,	SD2_DAT4),
-	PINMUX_IPSR_MSEL(IP8_19_16,	MSIOF1_RXD_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_GPSR(IP8_19_16,	NFWP_N_B),
-	PINMUX_IPSR_MSEL(IP8_19_16,	TS_SCK1_B,		SEL_TSIF1_1),
-	PINMUX_IPSR_MSEL(IP8_19_16,	STP_ISCLK_1_B,		SEL_SSP1_1_1),
-
-	PINMUX_IPSR_GPSR(IP8_23_20,	SD1_DAT1),
-	PINMUX_IPSR_GPSR(IP8_23_20,	SD2_DAT5),
-	PINMUX_IPSR_MSEL(IP8_23_20,	MSIOF1_TXD_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_GPSR(IP8_23_20,	NFDATA14_B),
-	PINMUX_IPSR_MSEL(IP8_23_20,	TS_SPSYNC1_B,		SEL_TSIF1_1),
-	PINMUX_IPSR_MSEL(IP8_23_20,	STP_ISSYNC_1_B,		SEL_SSP1_1_1),
-
-	PINMUX_IPSR_GPSR(IP8_27_24,	SD1_DAT2),
-	PINMUX_IPSR_GPSR(IP8_27_24,	SD2_DAT6),
-	PINMUX_IPSR_MSEL(IP8_27_24,	MSIOF1_SS1_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_GPSR(IP8_27_24,	NFDATA15_B),
-	PINMUX_IPSR_MSEL(IP8_27_24,	TS_SDAT1_B,		SEL_TSIF1_1),
-	PINMUX_IPSR_MSEL(IP8_27_24,	STP_ISD_1_B,		SEL_SSP1_1_1),
-
-	PINMUX_IPSR_GPSR(IP8_31_28,	SD1_DAT3),
-	PINMUX_IPSR_GPSR(IP8_31_28,	SD2_DAT7),
-	PINMUX_IPSR_MSEL(IP8_31_28,	MSIOF1_SS2_G,		SEL_MSIOF1_6),
-	PINMUX_IPSR_GPSR(IP8_31_28,	NFRB_N_B),
-	PINMUX_IPSR_MSEL(IP8_31_28,	TS_SDEN1_B,		SEL_TSIF1_1),
-	PINMUX_IPSR_MSEL(IP8_31_28,	STP_ISEN_1_B,		SEL_SSP1_1_1),
-
-	/* IPSR9 */
-	PINMUX_IPSR_GPSR(IP9_3_0,	SD2_CLK),
-	PINMUX_IPSR_GPSR(IP9_3_0,	NFDATA8),
-
-	PINMUX_IPSR_GPSR(IP9_7_4,	SD2_CMD),
-	PINMUX_IPSR_GPSR(IP9_7_4,	NFDATA9),
-
-	PINMUX_IPSR_GPSR(IP9_11_8,	SD2_DAT0),
-	PINMUX_IPSR_GPSR(IP9_11_8,	NFDATA10),
-
-	PINMUX_IPSR_GPSR(IP9_15_12,	SD2_DAT1),
-	PINMUX_IPSR_GPSR(IP9_15_12,	NFDATA11),
-
-	PINMUX_IPSR_GPSR(IP9_19_16,	SD2_DAT2),
-	PINMUX_IPSR_GPSR(IP9_19_16,	NFDATA12),
-
-	PINMUX_IPSR_GPSR(IP9_23_20,	SD2_DAT3),
-	PINMUX_IPSR_GPSR(IP9_23_20,	NFDATA13),
-
-	PINMUX_IPSR_GPSR(IP9_27_24,	SD2_DS),
-	PINMUX_IPSR_GPSR(IP9_27_24,	NFALE),
-	PINMUX_IPSR_GPSR(IP9_27_24,	SATA_DEVSLP_B),
-
-	PINMUX_IPSR_GPSR(IP9_31_28,	SD3_CLK),
-	PINMUX_IPSR_GPSR(IP9_31_28,	NFWE_N),
-
-	/* IPSR10 */
-	PINMUX_IPSR_GPSR(IP10_3_0,	SD3_CMD),
-	PINMUX_IPSR_GPSR(IP10_3_0,	NFRE_N),
-
-	PINMUX_IPSR_GPSR(IP10_7_4,	SD3_DAT0),
-	PINMUX_IPSR_GPSR(IP10_7_4,	NFDATA0),
-
-	PINMUX_IPSR_GPSR(IP10_11_8,	SD3_DAT1),
-	PINMUX_IPSR_GPSR(IP10_11_8,	NFDATA1),
-
-	PINMUX_IPSR_GPSR(IP10_15_12,	SD3_DAT2),
-	PINMUX_IPSR_GPSR(IP10_15_12,	NFDATA2),
-
-	PINMUX_IPSR_GPSR(IP10_19_16,	SD3_DAT3),
-	PINMUX_IPSR_GPSR(IP10_19_16,	NFDATA3),
-
-	PINMUX_IPSR_GPSR(IP10_23_20,	SD3_DAT4),
-	PINMUX_IPSR_MSEL(IP10_23_20,	SD2_CD_A,		SEL_SDHI2_0),
-	PINMUX_IPSR_GPSR(IP10_23_20,	NFDATA4),
-
-	PINMUX_IPSR_GPSR(IP10_27_24,	SD3_DAT5),
-	PINMUX_IPSR_MSEL(IP10_27_24,	SD2_WP_A,		SEL_SDHI2_0),
-	PINMUX_IPSR_GPSR(IP10_27_24,	NFDATA5),
-
-	PINMUX_IPSR_GPSR(IP10_31_28,	SD3_DAT6),
-	PINMUX_IPSR_GPSR(IP10_31_28,	SD3_CD),
-	PINMUX_IPSR_GPSR(IP10_31_28,	NFDATA6),
-
-	/* IPSR11 */
-	PINMUX_IPSR_GPSR(IP11_3_0,	SD3_DAT7),
-	PINMUX_IPSR_GPSR(IP11_3_0,	SD3_WP),
-	PINMUX_IPSR_GPSR(IP11_3_0,	NFDATA7),
-
-	PINMUX_IPSR_GPSR(IP11_7_4,	SD3_DS),
-	PINMUX_IPSR_GPSR(IP11_7_4,	NFCLE),
-
-	PINMUX_IPSR_GPSR(IP11_11_8,	SD0_CD),
-	PINMUX_IPSR_MSEL(IP11_11_8,	SCL2_B,			SEL_I2C2_1),
-	PINMUX_IPSR_MSEL(IP11_11_8,	SIM0_RST_A,		SEL_SIMCARD_0),
-
-	PINMUX_IPSR_GPSR(IP11_15_12,	SD0_WP),
-	PINMUX_IPSR_MSEL(IP11_15_12,	SDA2_B,			SEL_I2C2_1),
-
-	PINMUX_IPSR_MSEL(IP11_19_16,	SD1_CD,			I2C_SEL_0_0),
-	PINMUX_IPSR_PHYS_MSEL(IP11_19_16, SIM0_CLK_B,		I2C_SEL_0_0,	SEL_SIMCARD_1),
-	PINMUX_IPSR_PHYS(IP11_19_16,	SCL0,			I2C_SEL_0_1),
-
-	PINMUX_IPSR_MSEL(IP11_23_20,	SD1_WP,			I2C_SEL_0_0),
-	PINMUX_IPSR_PHYS_MSEL(IP11_23_20, SIM0_D_B,		I2C_SEL_0_0,	SEL_SIMCARD_1),
-	PINMUX_IPSR_PHYS(IP11_23_20,	SDA0,			I2C_SEL_0_1),
-
-	PINMUX_IPSR_GPSR(IP11_27_24,	SCK0),
-	PINMUX_IPSR_MSEL(IP11_27_24,	HSCK1_B,		SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP11_27_24,	MSIOF1_SS2_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP11_27_24,	AUDIO_CLKC_B,		SEL_ADGC_1),
-	PINMUX_IPSR_MSEL(IP11_27_24,	SDA2_A,			SEL_I2C2_0),
-	PINMUX_IPSR_MSEL(IP11_27_24,	SIM0_RST_B,		SEL_SIMCARD_1),
-	PINMUX_IPSR_MSEL(IP11_27_24,	STP_OPWM_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP11_27_24,	RIF0_CLK_B,		SEL_DRIF0_1),
-	PINMUX_IPSR_GPSR(IP11_27_24,	ADICHS2),
-	PINMUX_IPSR_MSEL(IP11_27_24,	SCK5_B,			SEL_SCIF5_1),
-
-	PINMUX_IPSR_GPSR(IP11_31_28,	RX0),
-	PINMUX_IPSR_MSEL(IP11_31_28,	HRX1_B,			SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP11_31_28,	TS_SCK0_C,		SEL_TSIF0_2),
-	PINMUX_IPSR_MSEL(IP11_31_28,	STP_ISCLK_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP11_31_28,	RIF0_D0_B,		SEL_DRIF0_1),
-
-	/* IPSR12 */
-	PINMUX_IPSR_GPSR(IP12_3_0,	TX0),
-	PINMUX_IPSR_MSEL(IP12_3_0,	HTX1_B,			SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP12_3_0,	TS_SPSYNC0_C,		SEL_TSIF0_2),
-	PINMUX_IPSR_MSEL(IP12_3_0,	STP_ISSYNC_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP12_3_0,	RIF0_D1_B,		SEL_DRIF0_1),
-
-	PINMUX_IPSR_GPSR(IP12_7_4,	CTS0_N),
-	PINMUX_IPSR_MSEL(IP12_7_4,	HCTS1_N_B,		SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP12_7_4,	MSIOF1_SYNC_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP12_7_4,	TS_SPSYNC1_C,		SEL_TSIF1_2),
-	PINMUX_IPSR_MSEL(IP12_7_4,	STP_ISSYNC_1_C,		SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP12_7_4,	RIF1_SYNC_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_GPSR(IP12_7_4,	AUDIO_CLKOUT_C),
-	PINMUX_IPSR_GPSR(IP12_7_4,	ADICS_SAMP),
-
-	PINMUX_IPSR_GPSR(IP12_11_8,	RTS0_N),
-	PINMUX_IPSR_MSEL(IP12_11_8,	HRTS1_N_B,		SEL_HSCIF1_1),
-	PINMUX_IPSR_MSEL(IP12_11_8,	MSIOF1_SS1_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP12_11_8,	AUDIO_CLKA_B,		SEL_ADGA_1),
-	PINMUX_IPSR_MSEL(IP12_11_8,	SCL2_A,			SEL_I2C2_0),
-	PINMUX_IPSR_MSEL(IP12_11_8,	STP_IVCXO27_1_C,	SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP12_11_8,	RIF0_SYNC_B,		SEL_DRIF0_1),
-	PINMUX_IPSR_GPSR(IP12_11_8,	ADICHS1),
-
-	PINMUX_IPSR_MSEL(IP12_15_12,	RX1_A,			SEL_SCIF1_0),
-	PINMUX_IPSR_MSEL(IP12_15_12,	HRX1_A,			SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP12_15_12,	TS_SDAT0_C,		SEL_TSIF0_2),
-	PINMUX_IPSR_MSEL(IP12_15_12,	STP_ISD_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP12_15_12,	RIF1_CLK_C,		SEL_DRIF1_2),
-
-	PINMUX_IPSR_MSEL(IP12_19_16,	TX1_A,			SEL_SCIF1_0),
-	PINMUX_IPSR_MSEL(IP12_19_16,	HTX1_A,			SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP12_19_16,	TS_SDEN0_C,		SEL_TSIF0_2),
-	PINMUX_IPSR_MSEL(IP12_19_16,	STP_ISEN_0_C,		SEL_SSP1_0_2),
-	PINMUX_IPSR_MSEL(IP12_19_16,	RIF1_D0_C,		SEL_DRIF1_2),
-
-	PINMUX_IPSR_GPSR(IP12_23_20,	CTS1_N),
-	PINMUX_IPSR_MSEL(IP12_23_20,	HCTS1_N_A,		SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP12_23_20,	MSIOF1_RXD_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP12_23_20,	TS_SDEN1_C,		SEL_TSIF1_2),
-	PINMUX_IPSR_MSEL(IP12_23_20,	STP_ISEN_1_C,		SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP12_23_20,	RIF1_D0_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_GPSR(IP12_23_20,	ADIDATA),
-
-	PINMUX_IPSR_GPSR(IP12_27_24,	RTS1_N),
-	PINMUX_IPSR_MSEL(IP12_27_24,	HRTS1_N_A,		SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP12_27_24,	MSIOF1_TXD_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP12_27_24,	TS_SDAT1_C,		SEL_TSIF1_2),
-	PINMUX_IPSR_MSEL(IP12_27_24,	STP_ISD_1_C,		SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP12_27_24,	RIF1_D1_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_GPSR(IP12_27_24,	ADICHS0),
-
-	PINMUX_IPSR_GPSR(IP12_31_28,	SCK2),
-	PINMUX_IPSR_MSEL(IP12_31_28,	SCIF_CLK_B,		SEL_SCIF_1),
-	PINMUX_IPSR_MSEL(IP12_31_28,	MSIOF1_SCK_B,		SEL_MSIOF1_1),
-	PINMUX_IPSR_MSEL(IP12_31_28,	TS_SCK1_C,		SEL_TSIF1_2),
-	PINMUX_IPSR_MSEL(IP12_31_28,	STP_ISCLK_1_C,		SEL_SSP1_1_2),
-	PINMUX_IPSR_MSEL(IP12_31_28,	RIF1_CLK_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_GPSR(IP12_31_28,	ADICLK),
-
-	/* IPSR13 */
-	PINMUX_IPSR_MSEL(IP13_3_0,	TX2_A,			SEL_SCIF2_0),
-	PINMUX_IPSR_MSEL(IP13_3_0,	SD2_CD_B,		SEL_SDHI2_1),
-	PINMUX_IPSR_MSEL(IP13_3_0,	SCL1_A,			SEL_I2C1_0),
-	PINMUX_IPSR_MSEL(IP13_3_0,	FMCLK_A,		SEL_FM_0),
-	PINMUX_IPSR_MSEL(IP13_3_0,	RIF1_D1_C,		SEL_DRIF1_2),
-	PINMUX_IPSR_GPSR(IP13_3_0,	FSO_CFE_0_N),
-
-	PINMUX_IPSR_MSEL(IP13_7_4,	RX2_A,			SEL_SCIF2_0),
-	PINMUX_IPSR_MSEL(IP13_7_4,	SD2_WP_B,		SEL_SDHI2_1),
-	PINMUX_IPSR_MSEL(IP13_7_4,	SDA1_A,			SEL_I2C1_0),
-	PINMUX_IPSR_MSEL(IP13_7_4,	FMIN_A,			SEL_FM_0),
-	PINMUX_IPSR_MSEL(IP13_7_4,	RIF1_SYNC_C,		SEL_DRIF1_2),
-	PINMUX_IPSR_GPSR(IP13_7_4,	FSO_CFE_1_N),
-
-	PINMUX_IPSR_GPSR(IP13_11_8,	HSCK0),
-	PINMUX_IPSR_MSEL(IP13_11_8,	MSIOF1_SCK_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_11_8,	AUDIO_CLKB_A,		SEL_ADGB_0),
-	PINMUX_IPSR_MSEL(IP13_11_8,	SSI_SDATA1_B,		SEL_SSI1_1),
-	PINMUX_IPSR_MSEL(IP13_11_8,	TS_SCK0_D,		SEL_TSIF0_3),
-	PINMUX_IPSR_MSEL(IP13_11_8,	STP_ISCLK_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP13_11_8,	RIF0_CLK_C,		SEL_DRIF0_2),
-	PINMUX_IPSR_MSEL(IP13_11_8,	RX5_B,			SEL_SCIF5_1),
-
-	PINMUX_IPSR_GPSR(IP13_15_12,	HRX0),
-	PINMUX_IPSR_MSEL(IP13_15_12,	MSIOF1_RXD_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_15_12,	SSI_SDATA2_B,		SEL_SSI2_1),
-	PINMUX_IPSR_MSEL(IP13_15_12,	TS_SDEN0_D,		SEL_TSIF0_3),
-	PINMUX_IPSR_MSEL(IP13_15_12,	STP_ISEN_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP13_15_12,	RIF0_D0_C,		SEL_DRIF0_2),
-
-	PINMUX_IPSR_GPSR(IP13_19_16,	HTX0),
-	PINMUX_IPSR_MSEL(IP13_19_16,	MSIOF1_TXD_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_19_16,	SSI_SDATA9_B,		SEL_SSI9_1),
-	PINMUX_IPSR_MSEL(IP13_19_16,	TS_SDAT0_D,		SEL_TSIF0_3),
-	PINMUX_IPSR_MSEL(IP13_19_16,	STP_ISD_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP13_19_16,	RIF0_D1_C,		SEL_DRIF0_2),
-
-	PINMUX_IPSR_GPSR(IP13_23_20,	HCTS0_N),
-	PINMUX_IPSR_MSEL(IP13_23_20,	RX2_B,			SEL_SCIF2_1),
-	PINMUX_IPSR_MSEL(IP13_23_20,	MSIOF1_SYNC_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_23_20,	SSI_SCK9_A,		SEL_SSI9_0),
-	PINMUX_IPSR_MSEL(IP13_23_20,	TS_SPSYNC0_D,		SEL_TSIF0_3),
-	PINMUX_IPSR_MSEL(IP13_23_20,	STP_ISSYNC_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP13_23_20,	RIF0_SYNC_C,		SEL_DRIF0_2),
-	PINMUX_IPSR_GPSR(IP13_23_20,	AUDIO_CLKOUT1_A),
-
-	PINMUX_IPSR_GPSR(IP13_27_24,	HRTS0_N),
-	PINMUX_IPSR_MSEL(IP13_27_24,	TX2_B,			SEL_SCIF2_1),
-	PINMUX_IPSR_MSEL(IP13_27_24,	MSIOF1_SS1_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_27_24,	SSI_WS9_A,		SEL_SSI9_0),
-	PINMUX_IPSR_MSEL(IP13_27_24,	STP_IVCXO27_0_D,	SEL_SSP1_0_3),
-	PINMUX_IPSR_MSEL(IP13_27_24,	BPFCLK_A,		SEL_FM_0),
-	PINMUX_IPSR_GPSR(IP13_27_24,	AUDIO_CLKOUT2_A),
-
-	PINMUX_IPSR_GPSR(IP13_31_28,	MSIOF0_SYNC),
-	PINMUX_IPSR_GPSR(IP13_31_28,	AUDIO_CLKOUT_A),
-	PINMUX_IPSR_MSEL(IP13_31_28,	TX5_B,			SEL_SCIF5_1),
-	PINMUX_IPSR_MSEL(IP13_31_28,	BPFCLK_D,		SEL_FM_3),
-
-	/* IPSR14 */
-	PINMUX_IPSR_GPSR(IP14_3_0,	MSIOF0_SS1),
-	PINMUX_IPSR_MSEL(IP14_3_0,	RX5_A,			SEL_SCIF5_0),
-	PINMUX_IPSR_GPSR(IP14_3_0,	NFWP_N_A),
-	PINMUX_IPSR_MSEL(IP14_3_0,	AUDIO_CLKA_C,		SEL_ADGA_2),
-	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SCK2_A,		SEL_SSI2_0),
-	PINMUX_IPSR_MSEL(IP14_3_0,	STP_IVCXO27_0_C,	SEL_SSP1_0_2),
-	PINMUX_IPSR_GPSR(IP14_3_0,	AUDIO_CLKOUT3_A),
-	PINMUX_IPSR_MSEL(IP14_3_0,	TCLK1_B,		SEL_TIMER_TMU1_1),
-
-	PINMUX_IPSR_GPSR(IP14_7_4,	MSIOF0_SS2),
-	PINMUX_IPSR_MSEL(IP14_7_4,	TX5_A,			SEL_SCIF5_0),
-	PINMUX_IPSR_MSEL(IP14_7_4,	MSIOF1_SS2_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP14_7_4,	AUDIO_CLKC_A,		SEL_ADGC_0),
-	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_WS2_A,		SEL_SSI2_0),
-	PINMUX_IPSR_MSEL(IP14_7_4,	STP_OPWM_0_D,		SEL_SSP1_0_3),
-	PINMUX_IPSR_GPSR(IP14_7_4,	AUDIO_CLKOUT_D),
-	PINMUX_IPSR_MSEL(IP14_7_4,	SPEEDIN_B,		SEL_SPEED_PULSE_1),
-
-	PINMUX_IPSR_GPSR(IP14_11_8,	MLB_CLK),
-	PINMUX_IPSR_MSEL(IP14_11_8,	MSIOF1_SCK_F,		SEL_MSIOF1_5),
-	PINMUX_IPSR_MSEL(IP14_11_8,	SCL1_B,			SEL_I2C1_1),
-
-	PINMUX_IPSR_GPSR(IP14_15_12,	MLB_SIG),
-	PINMUX_IPSR_MSEL(IP14_15_12,	RX1_B,			SEL_SCIF1_1),
-	PINMUX_IPSR_MSEL(IP14_15_12,	MSIOF1_SYNC_F,		SEL_MSIOF1_5),
-	PINMUX_IPSR_MSEL(IP14_15_12,	SDA1_B,			SEL_I2C1_1),
-
-	PINMUX_IPSR_GPSR(IP14_19_16,	MLB_DAT),
-	PINMUX_IPSR_MSEL(IP14_19_16,	TX1_B,			SEL_SCIF1_1),
-	PINMUX_IPSR_MSEL(IP14_19_16,	MSIOF1_RXD_F,		SEL_MSIOF1_5),
-
-	PINMUX_IPSR_GPSR(IP14_23_20,	SSI_SCK01239),
-	PINMUX_IPSR_MSEL(IP14_23_20,	MSIOF1_TXD_F,		SEL_MSIOF1_5),
-
-	PINMUX_IPSR_GPSR(IP14_27_24,	SSI_WS01239),
-	PINMUX_IPSR_MSEL(IP14_27_24,	MSIOF1_SS1_F,		SEL_MSIOF1_5),
-
-	PINMUX_IPSR_GPSR(IP14_31_28,	SSI_SDATA0),
-	PINMUX_IPSR_MSEL(IP14_31_28,	MSIOF1_SS2_F,		SEL_MSIOF1_5),
-
-	/* IPSR15 */
-	PINMUX_IPSR_MSEL(IP15_3_0,	SSI_SDATA1_A,		SEL_SSI1_0),
-
-	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SDATA2_A,		SEL_SSI2_0),
-	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SCK1_B,		SEL_SSI1_1),
-
-	PINMUX_IPSR_GPSR(IP15_11_8,	SSI_SCK349),
-	PINMUX_IPSR_MSEL(IP15_11_8,	MSIOF1_SS1_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP15_11_8,	STP_OPWM_0_A,		SEL_SSP1_0_0),
-
-	PINMUX_IPSR_GPSR(IP15_15_12,	SSI_WS349),
-	PINMUX_IPSR_MSEL(IP15_15_12,	HCTS2_N_A,		SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP15_15_12,	MSIOF1_SS2_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP15_15_12,	STP_IVCXO27_0_A,	SEL_SSP1_0_0),
-
-	PINMUX_IPSR_GPSR(IP15_19_16,	SSI_SDATA3),
-	PINMUX_IPSR_MSEL(IP15_19_16,	HRTS2_N_A,		SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP15_19_16,	MSIOF1_TXD_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP15_19_16,	TS_SCK0_A,		SEL_TSIF0_0),
-	PINMUX_IPSR_MSEL(IP15_19_16,	STP_ISCLK_0_A,		SEL_SSP1_0_0),
-	PINMUX_IPSR_MSEL(IP15_19_16,	RIF0_D1_A,		SEL_DRIF0_0),
-	PINMUX_IPSR_MSEL(IP15_19_16,	RIF2_D0_A,		SEL_DRIF2_0),
-
-	PINMUX_IPSR_GPSR(IP15_23_20,	SSI_SCK4),
-	PINMUX_IPSR_MSEL(IP15_23_20,	HRX2_A,			SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	MSIOF1_SCK_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	TS_SDAT0_A,		SEL_TSIF0_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	STP_ISD_0_A,		SEL_SSP1_0_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	RIF0_CLK_A,		SEL_DRIF0_0),
-	PINMUX_IPSR_MSEL(IP15_23_20,	RIF2_CLK_A,		SEL_DRIF2_0),
-
-	PINMUX_IPSR_GPSR(IP15_27_24,	SSI_WS4),
-	PINMUX_IPSR_MSEL(IP15_27_24,	HTX2_A,			SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP15_27_24,	MSIOF1_SYNC_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP15_27_24,	TS_SDEN0_A,		SEL_TSIF0_0),
-	PINMUX_IPSR_MSEL(IP15_27_24,	STP_ISEN_0_A,		SEL_SSP1_0_0),
-	PINMUX_IPSR_MSEL(IP15_27_24,	RIF0_SYNC_A,		SEL_DRIF0_0),
-	PINMUX_IPSR_MSEL(IP15_27_24,	RIF2_SYNC_A,		SEL_DRIF2_0),
-
-	PINMUX_IPSR_GPSR(IP15_31_28,	SSI_SDATA4),
-	PINMUX_IPSR_MSEL(IP15_31_28,	HSCK2_A,		SEL_HSCIF2_0),
-	PINMUX_IPSR_MSEL(IP15_31_28,	MSIOF1_RXD_A,		SEL_MSIOF1_0),
-	PINMUX_IPSR_MSEL(IP15_31_28,	TS_SPSYNC0_A,		SEL_TSIF0_0),
-	PINMUX_IPSR_MSEL(IP15_31_28,	STP_ISSYNC_0_A,		SEL_SSP1_0_0),
-	PINMUX_IPSR_MSEL(IP15_31_28,	RIF0_D0_A,		SEL_DRIF0_0),
-	PINMUX_IPSR_MSEL(IP15_31_28,	RIF2_D1_A,		SEL_DRIF2_0),
-
-	/* IPSR16 */
-	PINMUX_IPSR_GPSR(IP16_3_0,	SSI_SCK6),
-	PINMUX_IPSR_GPSR(IP16_3_0,	USB2_PWEN),
-	PINMUX_IPSR_MSEL(IP16_3_0,	SIM0_RST_D,		SEL_SIMCARD_3),
-
-	PINMUX_IPSR_GPSR(IP16_7_4,	SSI_WS6),
-	PINMUX_IPSR_GPSR(IP16_7_4,	USB2_OVC),
-	PINMUX_IPSR_MSEL(IP16_7_4,	SIM0_D_D,		SEL_SIMCARD_3),
-
-	PINMUX_IPSR_GPSR(IP16_11_8,	SSI_SDATA6),
-	PINMUX_IPSR_MSEL(IP16_11_8,	SIM0_CLK_D,		SEL_SIMCARD_3),
-	PINMUX_IPSR_GPSR(IP16_11_8,	SATA_DEVSLP_A),
-
-	PINMUX_IPSR_GPSR(IP16_15_12,	SSI_SCK78),
-	PINMUX_IPSR_MSEL(IP16_15_12,	HRX2_B,			SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP16_15_12,	MSIOF1_SCK_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP16_15_12,	TS_SCK1_A,		SEL_TSIF1_0),
-	PINMUX_IPSR_MSEL(IP16_15_12,	STP_ISCLK_1_A,		SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP16_15_12,	RIF1_CLK_A,		SEL_DRIF1_0),
-	PINMUX_IPSR_MSEL(IP16_15_12,	RIF3_CLK_A,		SEL_DRIF3_0),
-
-	PINMUX_IPSR_GPSR(IP16_19_16,	SSI_WS78),
-	PINMUX_IPSR_MSEL(IP16_19_16,	HTX2_B,			SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP16_19_16,	MSIOF1_SYNC_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP16_19_16,	TS_SDAT1_A,		SEL_TSIF1_0),
-	PINMUX_IPSR_MSEL(IP16_19_16,	STP_ISD_1_A,		SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP16_19_16,	RIF1_SYNC_A,		SEL_DRIF1_0),
-	PINMUX_IPSR_MSEL(IP16_19_16,	RIF3_SYNC_A,		SEL_DRIF3_0),
-
-	PINMUX_IPSR_GPSR(IP16_23_20,	SSI_SDATA7),
-	PINMUX_IPSR_MSEL(IP16_23_20,	HCTS2_N_B,		SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP16_23_20,	MSIOF1_RXD_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP16_23_20,	TS_SDEN1_A,		SEL_TSIF1_0),
-	PINMUX_IPSR_MSEL(IP16_23_20,	STP_ISEN_1_A,		SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP16_23_20,	RIF1_D0_A,		SEL_DRIF1_0),
-	PINMUX_IPSR_MSEL(IP16_23_20,	RIF3_D0_A,		SEL_DRIF3_0),
-	PINMUX_IPSR_MSEL(IP16_23_20,	TCLK2_A,		SEL_TIMER_TMU2_0),
-
-	PINMUX_IPSR_GPSR(IP16_27_24,	SSI_SDATA8),
-	PINMUX_IPSR_MSEL(IP16_27_24,	HRTS2_N_B,		SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP16_27_24,	MSIOF1_TXD_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP16_27_24,	TS_SPSYNC1_A,		SEL_TSIF1_0),
-	PINMUX_IPSR_MSEL(IP16_27_24,	STP_ISSYNC_1_A,		SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP16_27_24,	RIF1_D1_A,		SEL_DRIF1_0),
-	PINMUX_IPSR_MSEL(IP16_27_24,	RIF3_D1_A,		SEL_DRIF3_0),
-
-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_SDATA9_A,		SEL_SSI9_0),
-	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK2_B,		SEL_HSCIF2_1),
-	PINMUX_IPSR_MSEL(IP16_31_28,	MSIOF1_SS1_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK1_A,		SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS1_B,		SEL_SSI1_1),
-	PINMUX_IPSR_GPSR(IP16_31_28,	SCK1),
-	PINMUX_IPSR_MSEL(IP16_31_28,	STP_IVCXO27_1_A,	SEL_SSP1_1_0),
-	PINMUX_IPSR_MSEL(IP16_31_28,	SCK5_A,			SEL_SCIF5_0),
-
-	/* IPSR17 */
-	PINMUX_IPSR_MSEL(IP17_3_0,	AUDIO_CLKA_A,		SEL_ADGA_0),
-
-	PINMUX_IPSR_MSEL(IP17_7_4,	AUDIO_CLKB_B,		SEL_ADGB_1),
-	PINMUX_IPSR_MSEL(IP17_7_4,	SCIF_CLK_A,		SEL_SCIF_0),
-	PINMUX_IPSR_MSEL(IP17_7_4,	STP_IVCXO27_1_D,	SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP17_7_4,	REMOCON_A,		SEL_REMOCON_0),
-	PINMUX_IPSR_MSEL(IP17_7_4,	TCLK1_A,		SEL_TIMER_TMU1_0),
-
-	PINMUX_IPSR_GPSR(IP17_11_8,	USB0_PWEN),
-	PINMUX_IPSR_MSEL(IP17_11_8,	SIM0_RST_C,		SEL_SIMCARD_2),
-	PINMUX_IPSR_MSEL(IP17_11_8,	TS_SCK1_D,		SEL_TSIF1_3),
-	PINMUX_IPSR_MSEL(IP17_11_8,	STP_ISCLK_1_D,		SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP17_11_8,	BPFCLK_B,		SEL_FM_1),
-	PINMUX_IPSR_MSEL(IP17_11_8,	RIF3_CLK_B,		SEL_DRIF3_1),
-	PINMUX_IPSR_MSEL(IP17_11_8,	HSCK2_C,		SEL_HSCIF2_2),
-
-	PINMUX_IPSR_GPSR(IP17_15_12,	USB0_OVC),
-	PINMUX_IPSR_MSEL(IP17_15_12,	SIM0_D_C,		SEL_SIMCARD_2),
-	PINMUX_IPSR_MSEL(IP17_15_12,	TS_SDAT1_D,		SEL_TSIF1_3),
-	PINMUX_IPSR_MSEL(IP17_15_12,	STP_ISD_1_D,		SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP17_15_12,	RIF3_SYNC_B,		SEL_DRIF3_1),
-	PINMUX_IPSR_MSEL(IP17_15_12,	HRX2_C,			SEL_HSCIF2_2),
-
-	PINMUX_IPSR_GPSR(IP17_19_16,	USB1_PWEN),
-	PINMUX_IPSR_MSEL(IP17_19_16,	SIM0_CLK_C,		SEL_SIMCARD_2),
-	PINMUX_IPSR_MSEL(IP17_19_16,	SSI_SCK1_A,		SEL_SSI1_0),
-	PINMUX_IPSR_MSEL(IP17_19_16,	TS_SCK0_E,		SEL_TSIF0_4),
-	PINMUX_IPSR_MSEL(IP17_19_16,	STP_ISCLK_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP17_19_16,	FMCLK_B,		SEL_FM_1),
-	PINMUX_IPSR_MSEL(IP17_19_16,	RIF2_CLK_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_MSEL(IP17_19_16,	SPEEDIN_A,		SEL_SPEED_PULSE_0),
-	PINMUX_IPSR_MSEL(IP17_19_16,	HTX2_C,			SEL_HSCIF2_2),
-
-	PINMUX_IPSR_GPSR(IP17_23_20,	USB1_OVC),
-	PINMUX_IPSR_MSEL(IP17_23_20,	MSIOF1_SS2_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP17_23_20,	SSI_WS1_A,		SEL_SSI1_0),
-	PINMUX_IPSR_MSEL(IP17_23_20,	TS_SDAT0_E,		SEL_TSIF0_4),
-	PINMUX_IPSR_MSEL(IP17_23_20,	STP_ISD_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP17_23_20,	FMIN_B,			SEL_FM_1),
-	PINMUX_IPSR_MSEL(IP17_23_20,	RIF2_SYNC_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_MSEL(IP17_23_20,	REMOCON_B,		SEL_REMOCON_1),
-	PINMUX_IPSR_MSEL(IP17_23_20,	HCTS2_N_C,		SEL_HSCIF2_2),
-
-	PINMUX_IPSR_GPSR(IP17_27_24,	USB30_PWEN),
-	PINMUX_IPSR_GPSR(IP17_27_24,	AUDIO_CLKOUT_B),
-	PINMUX_IPSR_MSEL(IP17_27_24,	SSI_SCK2_B,		SEL_SSI2_1),
-	PINMUX_IPSR_MSEL(IP17_27_24,	TS_SDEN1_D,		SEL_TSIF1_3),
-	PINMUX_IPSR_MSEL(IP17_27_24,	STP_ISEN_1_D,		SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP17_27_24,	STP_OPWM_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP17_27_24,	RIF3_D0_B,		SEL_DRIF3_1),
-	PINMUX_IPSR_MSEL(IP17_27_24,	TCLK2_B,		SEL_TIMER_TMU2_1),
-	PINMUX_IPSR_GPSR(IP17_27_24,	TPU0TO0),
-	PINMUX_IPSR_MSEL(IP17_27_24,	BPFCLK_C,		SEL_FM_2),
-	PINMUX_IPSR_MSEL(IP17_27_24,	HRTS2_N_C,		SEL_HSCIF2_2),
-
-	PINMUX_IPSR_GPSR(IP17_31_28,	USB30_OVC),
-	PINMUX_IPSR_GPSR(IP17_31_28,	AUDIO_CLKOUT1_B),
-	PINMUX_IPSR_MSEL(IP17_31_28,	SSI_WS2_B,		SEL_SSI2_1),
-	PINMUX_IPSR_MSEL(IP17_31_28,	TS_SPSYNC1_D,		SEL_TSIF1_3),
-	PINMUX_IPSR_MSEL(IP17_31_28,	STP_ISSYNC_1_D,		SEL_SSP1_1_3),
-	PINMUX_IPSR_MSEL(IP17_31_28,	STP_IVCXO27_0_E,	SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP17_31_28,	RIF3_D1_B,		SEL_DRIF3_1),
-	PINMUX_IPSR_GPSR(IP17_31_28,	FSO_TOE_N),
-	PINMUX_IPSR_GPSR(IP17_31_28,	TPU0TO1),
-
-	/* IPSR18 */
-	PINMUX_IPSR_GPSR(IP18_3_0,	USB2_CH3_PWEN),
-	PINMUX_IPSR_GPSR(IP18_3_0,	AUDIO_CLKOUT2_B),
-	PINMUX_IPSR_MSEL(IP18_3_0,	SSI_SCK9_B,		SEL_SSI9_1),
-	PINMUX_IPSR_MSEL(IP18_3_0,	TS_SDEN0_E,		SEL_TSIF0_4),
-	PINMUX_IPSR_MSEL(IP18_3_0,	STP_ISEN_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP18_3_0,	RIF2_D0_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_GPSR(IP18_3_0,	TPU0TO2),
-	PINMUX_IPSR_MSEL(IP18_3_0,	FMCLK_C,		SEL_FM_2),
-	PINMUX_IPSR_MSEL(IP18_3_0,	FMCLK_D,		SEL_FM_3),
-
-	PINMUX_IPSR_GPSR(IP18_7_4,	USB2_CH3_OVC),
-	PINMUX_IPSR_GPSR(IP18_7_4,	AUDIO_CLKOUT3_B),
-	PINMUX_IPSR_MSEL(IP18_7_4,	SSI_WS9_B,		SEL_SSI9_1),
-	PINMUX_IPSR_MSEL(IP18_7_4,	TS_SPSYNC0_E,		SEL_TSIF0_4),
-	PINMUX_IPSR_MSEL(IP18_7_4,	STP_ISSYNC_0_E,		SEL_SSP1_0_4),
-	PINMUX_IPSR_MSEL(IP18_7_4,	RIF2_D1_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_GPSR(IP18_7_4,	TPU0TO3),
-	PINMUX_IPSR_MSEL(IP18_7_4,	FMIN_C,			SEL_FM_2),
-	PINMUX_IPSR_MSEL(IP18_7_4,	FMIN_D,			SEL_FM_3),
-
-/*
- * Static pins can not be muxed between different functions but
- * still need mark entries in the pinmux list. Add each static
- * pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux the pin
- * while still applying configuration to it.
- */
-#define FM(x)	PINMUX_DATA(x##_MARK, 0),
-	PINMUX_STATIC
-#undef FM
-};
-
-/*
- * Pins not associated with a GPIO port.
- */
-enum {
-	GP_ASSIGN_LAST(),
-	NOGP_ALL(),
-};
-
-static const struct sh_pfc_pin pinmux_pins[] = {
-	PINMUX_GPIO_GP_ALL(),
-	PINMUX_NOGP_ALL(),
-};
-
-/* - AUDIO CLOCK ------------------------------------------------------------ */
-static const unsigned int audio_clk_a_a_pins[] = {
-	/* CLK A */
-	RCAR_GP_PIN(6, 22),
-};
-static const unsigned int audio_clk_a_a_mux[] = {
-	AUDIO_CLKA_A_MARK,
-};
-static const unsigned int audio_clk_a_b_pins[] = {
-	/* CLK A */
-	RCAR_GP_PIN(5, 4),
-};
-static const unsigned int audio_clk_a_b_mux[] = {
-	AUDIO_CLKA_B_MARK,
-};
-static const unsigned int audio_clk_a_c_pins[] = {
-	/* CLK A */
-	RCAR_GP_PIN(5, 19),
-};
-static const unsigned int audio_clk_a_c_mux[] = {
-	AUDIO_CLKA_C_MARK,
-};
-static const unsigned int audio_clk_b_a_pins[] = {
-	/* CLK B */
-	RCAR_GP_PIN(5, 12),
-};
-static const unsigned int audio_clk_b_a_mux[] = {
-	AUDIO_CLKB_A_MARK,
-};
-static const unsigned int audio_clk_b_b_pins[] = {
-	/* CLK B */
-	RCAR_GP_PIN(6, 23),
-};
-static const unsigned int audio_clk_b_b_mux[] = {
-	AUDIO_CLKB_B_MARK,
-};
-static const unsigned int audio_clk_c_a_pins[] = {
-	/* CLK C */
-	RCAR_GP_PIN(5, 21),
-};
-static const unsigned int audio_clk_c_a_mux[] = {
-	AUDIO_CLKC_A_MARK,
-};
-static const unsigned int audio_clk_c_b_pins[] = {
-	/* CLK C */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int audio_clk_c_b_mux[] = {
-	AUDIO_CLKC_B_MARK,
-};
-static const unsigned int audio_clkout_a_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(5, 18),
-};
-static const unsigned int audio_clkout_a_mux[] = {
-	AUDIO_CLKOUT_A_MARK,
-};
-static const unsigned int audio_clkout_b_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(6, 28),
-};
-static const unsigned int audio_clkout_b_mux[] = {
-	AUDIO_CLKOUT_B_MARK,
-};
-static const unsigned int audio_clkout_c_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(5, 3),
-};
-static const unsigned int audio_clkout_c_mux[] = {
-	AUDIO_CLKOUT_C_MARK,
-};
-static const unsigned int audio_clkout_d_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(5, 21),
-};
-static const unsigned int audio_clkout_d_mux[] = {
-	AUDIO_CLKOUT_D_MARK,
-};
-static const unsigned int audio_clkout1_a_pins[] = {
-	/* CLKOUT1 */
-	RCAR_GP_PIN(5, 15),
-};
-static const unsigned int audio_clkout1_a_mux[] = {
-	AUDIO_CLKOUT1_A_MARK,
-};
-static const unsigned int audio_clkout1_b_pins[] = {
-	/* CLKOUT1 */
-	RCAR_GP_PIN(6, 29),
-};
-static const unsigned int audio_clkout1_b_mux[] = {
-	AUDIO_CLKOUT1_B_MARK,
-};
-static const unsigned int audio_clkout2_a_pins[] = {
-	/* CLKOUT2 */
-	RCAR_GP_PIN(5, 16),
-};
-static const unsigned int audio_clkout2_a_mux[] = {
-	AUDIO_CLKOUT2_A_MARK,
-};
-static const unsigned int audio_clkout2_b_pins[] = {
-	/* CLKOUT2 */
-	RCAR_GP_PIN(6, 30),
-};
-static const unsigned int audio_clkout2_b_mux[] = {
-	AUDIO_CLKOUT2_B_MARK,
-};
-static const unsigned int audio_clkout3_a_pins[] = {
-	/* CLKOUT3 */
-	RCAR_GP_PIN(5, 19),
-};
-static const unsigned int audio_clkout3_a_mux[] = {
-	AUDIO_CLKOUT3_A_MARK,
-};
-static const unsigned int audio_clkout3_b_pins[] = {
-	/* CLKOUT3 */
-	RCAR_GP_PIN(6, 31),
-};
-static const unsigned int audio_clkout3_b_mux[] = {
-	AUDIO_CLKOUT3_B_MARK,
-};
-
-/* - EtherAVB --------------------------------------------------------------- */
-static const unsigned int avb_link_pins[] = {
-	/* AVB_LINK */
-	RCAR_GP_PIN(2, 12),
-};
-static const unsigned int avb_link_mux[] = {
-	AVB_LINK_MARK,
-};
-static const unsigned int avb_magic_pins[] = {
-	/* AVB_MAGIC_ */
-	RCAR_GP_PIN(2, 10),
-};
-static const unsigned int avb_magic_mux[] = {
-	AVB_MAGIC_MARK,
-};
-static const unsigned int avb_phy_int_pins[] = {
-	/* AVB_PHY_INT */
-	RCAR_GP_PIN(2, 11),
-};
-static const unsigned int avb_phy_int_mux[] = {
-	AVB_PHY_INT_MARK,
-};
-static const unsigned int avb_mdio_pins[] = {
-	/* AVB_MDC, AVB_MDIO */
-	RCAR_GP_PIN(2, 9), PIN_AVB_MDIO,
-};
-static const unsigned int avb_mdio_mux[] = {
-	AVB_MDC_MARK, AVB_MDIO_MARK,
-};
-static const unsigned int avb_mii_pins[] = {
-	/*
-	 * AVB_TX_CTL, AVB_TXC, AVB_TD0,
-	 * AVB_TD1, AVB_TD2, AVB_TD3,
-	 * AVB_RX_CTL, AVB_RXC, AVB_RD0,
-	 * AVB_RD1, AVB_RD2, AVB_RD3,
-	 * AVB_TXCREFCLK
-	 */
-	PIN_AVB_TX_CTL, PIN_AVB_TXC, PIN_AVB_TD0,
-	PIN_AVB_TD1, PIN_AVB_TD2, PIN_AVB_TD3,
-	PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
-	PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
-	PIN_AVB_TXCREFCLK,
-
-};
-static const unsigned int avb_mii_mux[] = {
-	AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
-	AVB_TD1_MARK, AVB_TD2_MARK, AVB_TD3_MARK,
-	AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
-	AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
-	AVB_TXCREFCLK_MARK,
-};
-static const unsigned int avb_avtp_pps_pins[] = {
-	/* AVB_AVTP_PPS */
-	RCAR_GP_PIN(2, 6),
-};
-static const unsigned int avb_avtp_pps_mux[] = {
-	AVB_AVTP_PPS_MARK,
-};
-static const unsigned int avb_avtp_match_a_pins[] = {
-	/* AVB_AVTP_MATCH_A */
-	RCAR_GP_PIN(2, 13),
-};
-static const unsigned int avb_avtp_match_a_mux[] = {
-	AVB_AVTP_MATCH_A_MARK,
-};
-static const unsigned int avb_avtp_capture_a_pins[] = {
-	/* AVB_AVTP_CAPTURE_A */
-	RCAR_GP_PIN(2, 14),
-};
-static const unsigned int avb_avtp_capture_a_mux[] = {
-	AVB_AVTP_CAPTURE_A_MARK,
-};
-static const unsigned int avb_avtp_match_b_pins[] = {
-	/*  AVB_AVTP_MATCH_B */
-	RCAR_GP_PIN(1, 8),
-};
-static const unsigned int avb_avtp_match_b_mux[] = {
-	AVB_AVTP_MATCH_B_MARK,
-};
-static const unsigned int avb_avtp_capture_b_pins[] = {
-	/* AVB_AVTP_CAPTURE_B */
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int avb_avtp_capture_b_mux[] = {
-	AVB_AVTP_CAPTURE_B_MARK,
-};
-
-/* - CAN ------------------------------------------------------------------ */
-static const unsigned int can0_data_a_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(1, 23),	RCAR_GP_PIN(1, 24),
-};
-static const unsigned int can0_data_a_mux[] = {
-	CAN0_TX_A_MARK,		CAN0_RX_A_MARK,
-};
-static const unsigned int can0_data_b_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(2, 0),	RCAR_GP_PIN(2, 1),
-};
-static const unsigned int can0_data_b_mux[] = {
-	CAN0_TX_B_MARK,		CAN0_RX_B_MARK,
-};
-static const unsigned int can1_data_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(1, 22),	RCAR_GP_PIN(1, 26),
-};
-static const unsigned int can1_data_mux[] = {
-	CAN1_TX_MARK,		CAN1_RX_MARK,
-};
-
-/* - CAN Clock -------------------------------------------------------------- */
-static const unsigned int can_clk_pins[] = {
-	/* CLK */
-	RCAR_GP_PIN(1, 25),
-};
-static const unsigned int can_clk_mux[] = {
-	CAN_CLK_MARK,
-};
-
-/* - CAN FD --------------------------------------------------------------- */
-static const unsigned int canfd0_data_a_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(1, 23),     RCAR_GP_PIN(1, 24),
-};
-static const unsigned int canfd0_data_a_mux[] = {
-	CANFD0_TX_A_MARK,       CANFD0_RX_A_MARK,
-};
-static const unsigned int canfd0_data_b_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(2, 0),      RCAR_GP_PIN(2, 1),
-};
-static const unsigned int canfd0_data_b_mux[] = {
-	CANFD0_TX_B_MARK,       CANFD0_RX_B_MARK,
-};
-static const unsigned int canfd1_data_pins[] = {
-	/* TX, RX */
-	RCAR_GP_PIN(1, 22),     RCAR_GP_PIN(1, 26),
-};
-static const unsigned int canfd1_data_mux[] = {
-	CANFD1_TX_MARK,         CANFD1_RX_MARK,
-};
-
-/* - DRIF0 --------------------------------------------------------------- */
-static const unsigned int drif0_ctrl_a_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
-};
-static const unsigned int drif0_ctrl_a_mux[] = {
-	RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
-};
-static const unsigned int drif0_data0_a_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int drif0_data0_a_mux[] = {
-	RIF0_D0_A_MARK,
-};
-static const unsigned int drif0_data1_a_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 7),
-};
-static const unsigned int drif0_data1_a_mux[] = {
-	RIF0_D1_A_MARK,
-};
-static const unsigned int drif0_ctrl_b_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
-};
-static const unsigned int drif0_ctrl_b_mux[] = {
-	RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
-};
-static const unsigned int drif0_data0_b_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(5, 1),
-};
-static const unsigned int drif0_data0_b_mux[] = {
-	RIF0_D0_B_MARK,
-};
-static const unsigned int drif0_data1_b_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(5, 2),
-};
-static const unsigned int drif0_data1_b_mux[] = {
-	RIF0_D1_B_MARK,
-};
-static const unsigned int drif0_ctrl_c_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
-};
-static const unsigned int drif0_ctrl_c_mux[] = {
-	RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
-};
-static const unsigned int drif0_data0_c_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(5, 13),
-};
-static const unsigned int drif0_data0_c_mux[] = {
-	RIF0_D0_C_MARK,
-};
-static const unsigned int drif0_data1_c_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(5, 14),
-};
-static const unsigned int drif0_data1_c_mux[] = {
-	RIF0_D1_C_MARK,
-};
-/* - DRIF1 --------------------------------------------------------------- */
-static const unsigned int drif1_ctrl_a_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
-};
-static const unsigned int drif1_ctrl_a_mux[] = {
-	RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
-};
-static const unsigned int drif1_data0_a_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int drif1_data0_a_mux[] = {
-	RIF1_D0_A_MARK,
-};
-static const unsigned int drif1_data1_a_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 20),
-};
-static const unsigned int drif1_data1_a_mux[] = {
-	RIF1_D1_A_MARK,
-};
-static const unsigned int drif1_ctrl_b_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
-};
-static const unsigned int drif1_ctrl_b_mux[] = {
-	RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
-};
-static const unsigned int drif1_data0_b_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(5, 7),
-};
-static const unsigned int drif1_data0_b_mux[] = {
-	RIF1_D0_B_MARK,
-};
-static const unsigned int drif1_data1_b_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(5, 8),
-};
-static const unsigned int drif1_data1_b_mux[] = {
-	RIF1_D1_B_MARK,
-};
-static const unsigned int drif1_ctrl_c_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
-};
-static const unsigned int drif1_ctrl_c_mux[] = {
-	RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
-};
-static const unsigned int drif1_data0_c_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(5, 6),
-};
-static const unsigned int drif1_data0_c_mux[] = {
-	RIF1_D0_C_MARK,
-};
-static const unsigned int drif1_data1_c_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(5, 10),
-};
-static const unsigned int drif1_data1_c_mux[] = {
-	RIF1_D1_C_MARK,
-};
-/* - DRIF2 --------------------------------------------------------------- */
-static const unsigned int drif2_ctrl_a_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
-};
-static const unsigned int drif2_ctrl_a_mux[] = {
-	RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
-};
-static const unsigned int drif2_data0_a_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 7),
-};
-static const unsigned int drif2_data0_a_mux[] = {
-	RIF2_D0_A_MARK,
-};
-static const unsigned int drif2_data1_a_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int drif2_data1_a_mux[] = {
-	RIF2_D1_A_MARK,
-};
-static const unsigned int drif2_ctrl_b_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
-};
-static const unsigned int drif2_ctrl_b_mux[] = {
-	RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
-};
-static const unsigned int drif2_data0_b_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 30),
-};
-static const unsigned int drif2_data0_b_mux[] = {
-	RIF2_D0_B_MARK,
-};
-static const unsigned int drif2_data1_b_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 31),
-};
-static const unsigned int drif2_data1_b_mux[] = {
-	RIF2_D1_B_MARK,
-};
-/* - DRIF3 --------------------------------------------------------------- */
-static const unsigned int drif3_ctrl_a_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
-};
-static const unsigned int drif3_ctrl_a_mux[] = {
-	RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
-};
-static const unsigned int drif3_data0_a_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int drif3_data0_a_mux[] = {
-	RIF3_D0_A_MARK,
-};
-static const unsigned int drif3_data1_a_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 20),
-};
-static const unsigned int drif3_data1_a_mux[] = {
-	RIF3_D1_A_MARK,
-};
-static const unsigned int drif3_ctrl_b_pins[] = {
-	/* CLK, SYNC */
-	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
-};
-static const unsigned int drif3_ctrl_b_mux[] = {
-	RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
-};
-static const unsigned int drif3_data0_b_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(6, 28),
-};
-static const unsigned int drif3_data0_b_mux[] = {
-	RIF3_D0_B_MARK,
-};
-static const unsigned int drif3_data1_b_pins[] = {
-	/* D1 */
-	RCAR_GP_PIN(6, 29),
-};
-static const unsigned int drif3_data1_b_mux[] = {
-	RIF3_D1_B_MARK,
-};
-
-/* - DU --------------------------------------------------------------------- */
-static const unsigned int du_rgb666_pins[] = {
-	/* R[7:2], G[7:2], B[7:2] */
-	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
-	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
-	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
-	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
-	RCAR_GP_PIN(1, 7),  RCAR_GP_PIN(1, 6),  RCAR_GP_PIN(1, 5),
-	RCAR_GP_PIN(1, 4),  RCAR_GP_PIN(1, 3),  RCAR_GP_PIN(1, 2),
-};
-static const unsigned int du_rgb666_mux[] = {
-	DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
-	DU_DR3_MARK, DU_DR2_MARK,
-	DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
-	DU_DG3_MARK, DU_DG2_MARK,
-	DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
-	DU_DB3_MARK, DU_DB2_MARK,
-};
-static const unsigned int du_rgb888_pins[] = {
-	/* R[7:0], G[7:0], B[7:0] */
-	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
-	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
-	RCAR_GP_PIN(0, 9),  RCAR_GP_PIN(0, 8),
-	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
-	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
-	RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
-	RCAR_GP_PIN(1, 7),  RCAR_GP_PIN(1, 6),  RCAR_GP_PIN(1, 5),
-	RCAR_GP_PIN(1, 4),  RCAR_GP_PIN(1, 3),  RCAR_GP_PIN(1, 2),
-	RCAR_GP_PIN(1, 1),  RCAR_GP_PIN(1, 0),
-};
-static const unsigned int du_rgb888_mux[] = {
-	DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
-	DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
-	DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
-	DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
-	DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
-	DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
-};
-static const unsigned int du_clk_out_0_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(1, 27),
-};
-static const unsigned int du_clk_out_0_mux[] = {
-	DU_DOTCLKOUT0_MARK
-};
-static const unsigned int du_clk_out_1_pins[] = {
-	/* CLKOUT */
-	RCAR_GP_PIN(2, 3),
-};
-static const unsigned int du_clk_out_1_mux[] = {
-	DU_DOTCLKOUT1_MARK
-};
-static const unsigned int du_sync_pins[] = {
-	/* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
-	RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
-};
-static const unsigned int du_sync_mux[] = {
-	DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
-};
-static const unsigned int du_oddf_pins[] = {
-	/* EXDISP/EXODDF/EXCDE */
-	RCAR_GP_PIN(2, 2),
-};
-static const unsigned int du_oddf_mux[] = {
-	DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
-};
-static const unsigned int du_cde_pins[] = {
-	/* CDE */
-	RCAR_GP_PIN(2, 0),
-};
-static const unsigned int du_cde_mux[] = {
-	DU_CDE_MARK,
-};
-static const unsigned int du_disp_pins[] = {
-	/* DISP */
-	RCAR_GP_PIN(2, 1),
-};
-static const unsigned int du_disp_mux[] = {
-	DU_DISP_MARK,
-};
-
-/* - HSCIF0 ----------------------------------------------------------------- */
-static const unsigned int hscif0_data_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
-};
-static const unsigned int hscif0_data_mux[] = {
-	HRX0_MARK, HTX0_MARK,
-};
-static const unsigned int hscif0_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 12),
-};
-static const unsigned int hscif0_clk_mux[] = {
-	HSCK0_MARK,
-};
-static const unsigned int hscif0_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
-};
-static const unsigned int hscif0_ctrl_mux[] = {
-	HRTS0_N_MARK, HCTS0_N_MARK,
-};
-/* - HSCIF1 ----------------------------------------------------------------- */
-static const unsigned int hscif1_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
-};
-static const unsigned int hscif1_data_a_mux[] = {
-	HRX1_A_MARK, HTX1_A_MARK,
-};
-static const unsigned int hscif1_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int hscif1_clk_a_mux[] = {
-	HSCK1_A_MARK,
-};
-static const unsigned int hscif1_ctrl_a_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
-};
-static const unsigned int hscif1_ctrl_a_mux[] = {
-	HRTS1_N_A_MARK, HCTS1_N_A_MARK,
-};
-
-static const unsigned int hscif1_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
-};
-static const unsigned int hscif1_data_b_mux[] = {
-	HRX1_B_MARK, HTX1_B_MARK,
-};
-static const unsigned int hscif1_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int hscif1_clk_b_mux[] = {
-	HSCK1_B_MARK,
-};
-static const unsigned int hscif1_ctrl_b_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
-};
-static const unsigned int hscif1_ctrl_b_mux[] = {
-	HRTS1_N_B_MARK, HCTS1_N_B_MARK,
-};
-/* - HSCIF2 ----------------------------------------------------------------- */
-static const unsigned int hscif2_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
-};
-static const unsigned int hscif2_data_a_mux[] = {
-	HRX2_A_MARK, HTX2_A_MARK,
-};
-static const unsigned int hscif2_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int hscif2_clk_a_mux[] = {
-	HSCK2_A_MARK,
-};
-static const unsigned int hscif2_ctrl_a_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
-};
-static const unsigned int hscif2_ctrl_a_mux[] = {
-	HRTS2_N_A_MARK, HCTS2_N_A_MARK,
-};
-
-static const unsigned int hscif2_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
-};
-static const unsigned int hscif2_data_b_mux[] = {
-	HRX2_B_MARK, HTX2_B_MARK,
-};
-static const unsigned int hscif2_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int hscif2_clk_b_mux[] = {
-	HSCK2_B_MARK,
-};
-static const unsigned int hscif2_ctrl_b_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
-};
-static const unsigned int hscif2_ctrl_b_mux[] = {
-	HRTS2_N_B_MARK, HCTS2_N_B_MARK,
-};
-
-static const unsigned int hscif2_data_c_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
-};
-static const unsigned int hscif2_data_c_mux[] = {
-	HRX2_C_MARK, HTX2_C_MARK,
-};
-static const unsigned int hscif2_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 24),
-};
-static const unsigned int hscif2_clk_c_mux[] = {
-	HSCK2_C_MARK,
-};
-static const unsigned int hscif2_ctrl_c_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 27),
-};
-static const unsigned int hscif2_ctrl_c_mux[] = {
-	HRTS2_N_C_MARK, HCTS2_N_C_MARK,
-};
-/* - HSCIF3 ----------------------------------------------------------------- */
-static const unsigned int hscif3_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
-};
-static const unsigned int hscif3_data_a_mux[] = {
-	HRX3_A_MARK, HTX3_A_MARK,
-};
-static const unsigned int hscif3_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 22),
-};
-static const unsigned int hscif3_clk_mux[] = {
-	HSCK3_MARK,
-};
-static const unsigned int hscif3_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
-};
-static const unsigned int hscif3_ctrl_mux[] = {
-	HRTS3_N_MARK, HCTS3_N_MARK,
-};
-
-static const unsigned int hscif3_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
-};
-static const unsigned int hscif3_data_b_mux[] = {
-	HRX3_B_MARK, HTX3_B_MARK,
-};
-static const unsigned int hscif3_data_c_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
-};
-static const unsigned int hscif3_data_c_mux[] = {
-	HRX3_C_MARK, HTX3_C_MARK,
-};
-static const unsigned int hscif3_data_d_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
-};
-static const unsigned int hscif3_data_d_mux[] = {
-	HRX3_D_MARK, HTX3_D_MARK,
-};
-/* - HSCIF4 ----------------------------------------------------------------- */
-static const unsigned int hscif4_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
-};
-static const unsigned int hscif4_data_a_mux[] = {
-	HRX4_A_MARK, HTX4_A_MARK,
-};
-static const unsigned int hscif4_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int hscif4_clk_mux[] = {
-	HSCK4_MARK,
-};
-static const unsigned int hscif4_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
-};
-static const unsigned int hscif4_ctrl_mux[] = {
-	HRTS4_N_MARK, HCTS4_N_MARK,
-};
-
-static const unsigned int hscif4_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
-};
-static const unsigned int hscif4_data_b_mux[] = {
-	HRX4_B_MARK, HTX4_B_MARK,
-};
-
-/* - I2C -------------------------------------------------------------------- */
-static const unsigned int i2c0_pins[] = {
-	/* SCL, SDA */
-	RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
-};
-
-static const unsigned int i2c0_mux[] = {
-	SCL0_MARK, SDA0_MARK,
-};
-
-static const unsigned int i2c1_a_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
-};
-static const unsigned int i2c1_a_mux[] = {
-	SDA1_A_MARK, SCL1_A_MARK,
-};
-static const unsigned int i2c1_b_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
-};
-static const unsigned int i2c1_b_mux[] = {
-	SDA1_B_MARK, SCL1_B_MARK,
-};
-static const unsigned int i2c2_a_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
-};
-static const unsigned int i2c2_a_mux[] = {
-	SDA2_A_MARK, SCL2_A_MARK,
-};
-static const unsigned int i2c2_b_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
-};
-static const unsigned int i2c2_b_mux[] = {
-	SDA2_B_MARK, SCL2_B_MARK,
-};
-
-static const unsigned int i2c3_pins[] = {
-	/* SCL, SDA */
-	RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
-};
-
-static const unsigned int i2c3_mux[] = {
-	SCL3_MARK, SDA3_MARK,
-};
-
-static const unsigned int i2c5_pins[] = {
-	/* SCL, SDA */
-	RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 14),
-};
-
-static const unsigned int i2c5_mux[] = {
-	SCL5_MARK, SDA5_MARK,
-};
-
-static const unsigned int i2c6_a_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
-};
-static const unsigned int i2c6_a_mux[] = {
-	SDA6_A_MARK, SCL6_A_MARK,
-};
-static const unsigned int i2c6_b_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
-};
-static const unsigned int i2c6_b_mux[] = {
-	SDA6_B_MARK, SCL6_B_MARK,
-};
-static const unsigned int i2c6_c_pins[] = {
-	/* SDA, SCL */
-	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
-};
-static const unsigned int i2c6_c_mux[] = {
-	SDA6_C_MARK, SCL6_C_MARK,
-};
-
-/* - INTC-EX ---------------------------------------------------------------- */
-static const unsigned int intc_ex_irq0_pins[] = {
-	/* IRQ0 */
-	RCAR_GP_PIN(2, 0),
-};
-static const unsigned int intc_ex_irq0_mux[] = {
-	IRQ0_MARK,
-};
-static const unsigned int intc_ex_irq1_pins[] = {
-	/* IRQ1 */
-	RCAR_GP_PIN(2, 1),
-};
-static const unsigned int intc_ex_irq1_mux[] = {
-	IRQ1_MARK,
-};
-static const unsigned int intc_ex_irq2_pins[] = {
-	/* IRQ2 */
-	RCAR_GP_PIN(2, 2),
-};
-static const unsigned int intc_ex_irq2_mux[] = {
-	IRQ2_MARK,
-};
-static const unsigned int intc_ex_irq3_pins[] = {
-	/* IRQ3 */
-	RCAR_GP_PIN(2, 3),
-};
-static const unsigned int intc_ex_irq3_mux[] = {
-	IRQ3_MARK,
-};
-static const unsigned int intc_ex_irq4_pins[] = {
-	/* IRQ4 */
-	RCAR_GP_PIN(2, 4),
-};
-static const unsigned int intc_ex_irq4_mux[] = {
-	IRQ4_MARK,
-};
-static const unsigned int intc_ex_irq5_pins[] = {
-	/* IRQ5 */
-	RCAR_GP_PIN(2, 5),
-};
-static const unsigned int intc_ex_irq5_mux[] = {
-	IRQ5_MARK,
-};
-
-/* - MSIOF0 ----------------------------------------------------------------- */
-static const unsigned int msiof0_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 17),
-};
-static const unsigned int msiof0_clk_mux[] = {
-	MSIOF0_SCK_MARK,
-};
-static const unsigned int msiof0_sync_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(5, 18),
-};
-static const unsigned int msiof0_sync_mux[] = {
-	MSIOF0_SYNC_MARK,
-};
-static const unsigned int msiof0_ss1_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(5, 19),
-};
-static const unsigned int msiof0_ss1_mux[] = {
-	MSIOF0_SS1_MARK,
-};
-static const unsigned int msiof0_ss2_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(5, 21),
-};
-static const unsigned int msiof0_ss2_mux[] = {
-	MSIOF0_SS2_MARK,
-};
-static const unsigned int msiof0_txd_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(5, 20),
-};
-static const unsigned int msiof0_txd_mux[] = {
-	MSIOF0_TXD_MARK,
-};
-static const unsigned int msiof0_rxd_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(5, 22),
-};
-static const unsigned int msiof0_rxd_mux[] = {
-	MSIOF0_RXD_MARK,
-};
-/* - MSIOF1 ----------------------------------------------------------------- */
-static const unsigned int msiof1_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 8),
-};
-static const unsigned int msiof1_clk_a_mux[] = {
-	MSIOF1_SCK_A_MARK,
-};
-static const unsigned int msiof1_sync_a_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(6, 9),
-};
-static const unsigned int msiof1_sync_a_mux[] = {
-	MSIOF1_SYNC_A_MARK,
-};
-static const unsigned int msiof1_ss1_a_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(6, 5),
-};
-static const unsigned int msiof1_ss1_a_mux[] = {
-	MSIOF1_SS1_A_MARK,
-};
-static const unsigned int msiof1_ss2_a_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(6, 6),
-};
-static const unsigned int msiof1_ss2_a_mux[] = {
-	MSIOF1_SS2_A_MARK,
-};
-static const unsigned int msiof1_txd_a_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(6, 7),
-};
-static const unsigned int msiof1_txd_a_mux[] = {
-	MSIOF1_TXD_A_MARK,
-};
-static const unsigned int msiof1_rxd_a_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int msiof1_rxd_a_mux[] = {
-	MSIOF1_RXD_A_MARK,
-};
-static const unsigned int msiof1_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 9),
-};
-static const unsigned int msiof1_clk_b_mux[] = {
-	MSIOF1_SCK_B_MARK,
-};
-static const unsigned int msiof1_sync_b_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(5, 3),
-};
-static const unsigned int msiof1_sync_b_mux[] = {
-	MSIOF1_SYNC_B_MARK,
-};
-static const unsigned int msiof1_ss1_b_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(5, 4),
-};
-static const unsigned int msiof1_ss1_b_mux[] = {
-	MSIOF1_SS1_B_MARK,
-};
-static const unsigned int msiof1_ss2_b_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int msiof1_ss2_b_mux[] = {
-	MSIOF1_SS2_B_MARK,
-};
-static const unsigned int msiof1_txd_b_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(5, 8),
-};
-static const unsigned int msiof1_txd_b_mux[] = {
-	MSIOF1_TXD_B_MARK,
-};
-static const unsigned int msiof1_rxd_b_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(5, 7),
-};
-static const unsigned int msiof1_rxd_b_mux[] = {
-	MSIOF1_RXD_B_MARK,
-};
-static const unsigned int msiof1_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 17),
-};
-static const unsigned int msiof1_clk_c_mux[] = {
-	MSIOF1_SCK_C_MARK,
-};
-static const unsigned int msiof1_sync_c_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(6, 18),
-};
-static const unsigned int msiof1_sync_c_mux[] = {
-	MSIOF1_SYNC_C_MARK,
-};
-static const unsigned int msiof1_ss1_c_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int msiof1_ss1_c_mux[] = {
-	MSIOF1_SS1_C_MARK,
-};
-static const unsigned int msiof1_ss2_c_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(6, 27),
-};
-static const unsigned int msiof1_ss2_c_mux[] = {
-	MSIOF1_SS2_C_MARK,
-};
-static const unsigned int msiof1_txd_c_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(6, 20),
-};
-static const unsigned int msiof1_txd_c_mux[] = {
-	MSIOF1_TXD_C_MARK,
-};
-static const unsigned int msiof1_rxd_c_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int msiof1_rxd_c_mux[] = {
-	MSIOF1_RXD_C_MARK,
-};
-static const unsigned int msiof1_clk_d_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 12),
-};
-static const unsigned int msiof1_clk_d_mux[] = {
-	MSIOF1_SCK_D_MARK,
-};
-static const unsigned int msiof1_sync_d_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(5, 15),
-};
-static const unsigned int msiof1_sync_d_mux[] = {
-	MSIOF1_SYNC_D_MARK,
-};
-static const unsigned int msiof1_ss1_d_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(5, 16),
-};
-static const unsigned int msiof1_ss1_d_mux[] = {
-	MSIOF1_SS1_D_MARK,
-};
-static const unsigned int msiof1_ss2_d_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(5, 21),
-};
-static const unsigned int msiof1_ss2_d_mux[] = {
-	MSIOF1_SS2_D_MARK,
-};
-static const unsigned int msiof1_txd_d_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(5, 14),
-};
-static const unsigned int msiof1_txd_d_mux[] = {
-	MSIOF1_TXD_D_MARK,
-};
-static const unsigned int msiof1_rxd_d_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(5, 13),
-};
-static const unsigned int msiof1_rxd_d_mux[] = {
-	MSIOF1_RXD_D_MARK,
-};
-static const unsigned int msiof1_clk_e_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(3, 0),
-};
-static const unsigned int msiof1_clk_e_mux[] = {
-	MSIOF1_SCK_E_MARK,
-};
-static const unsigned int msiof1_sync_e_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(3, 1),
-};
-static const unsigned int msiof1_sync_e_mux[] = {
-	MSIOF1_SYNC_E_MARK,
-};
-static const unsigned int msiof1_ss1_e_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(3, 4),
-};
-static const unsigned int msiof1_ss1_e_mux[] = {
-	MSIOF1_SS1_E_MARK,
-};
-static const unsigned int msiof1_ss2_e_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(3, 5),
-};
-static const unsigned int msiof1_ss2_e_mux[] = {
-	MSIOF1_SS2_E_MARK,
-};
-static const unsigned int msiof1_txd_e_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(3, 3),
-};
-static const unsigned int msiof1_txd_e_mux[] = {
-	MSIOF1_TXD_E_MARK,
-};
-static const unsigned int msiof1_rxd_e_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(3, 2),
-};
-static const unsigned int msiof1_rxd_e_mux[] = {
-	MSIOF1_RXD_E_MARK,
-};
-static const unsigned int msiof1_clk_f_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 23),
-};
-static const unsigned int msiof1_clk_f_mux[] = {
-	MSIOF1_SCK_F_MARK,
-};
-static const unsigned int msiof1_sync_f_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(5, 24),
-};
-static const unsigned int msiof1_sync_f_mux[] = {
-	MSIOF1_SYNC_F_MARK,
-};
-static const unsigned int msiof1_ss1_f_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(6, 1),
-};
-static const unsigned int msiof1_ss1_f_mux[] = {
-	MSIOF1_SS1_F_MARK,
-};
-static const unsigned int msiof1_ss2_f_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(6, 2),
-};
-static const unsigned int msiof1_ss2_f_mux[] = {
-	MSIOF1_SS2_F_MARK,
-};
-static const unsigned int msiof1_txd_f_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(6, 0),
-};
-static const unsigned int msiof1_txd_f_mux[] = {
-	MSIOF1_TXD_F_MARK,
-};
-static const unsigned int msiof1_rxd_f_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(5, 25),
-};
-static const unsigned int msiof1_rxd_f_mux[] = {
-	MSIOF1_RXD_F_MARK,
-};
-static const unsigned int msiof1_clk_g_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(3, 6),
-};
-static const unsigned int msiof1_clk_g_mux[] = {
-	MSIOF1_SCK_G_MARK,
-};
-static const unsigned int msiof1_sync_g_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(3, 7),
-};
-static const unsigned int msiof1_sync_g_mux[] = {
-	MSIOF1_SYNC_G_MARK,
-};
-static const unsigned int msiof1_ss1_g_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(3, 10),
-};
-static const unsigned int msiof1_ss1_g_mux[] = {
-	MSIOF1_SS1_G_MARK,
-};
-static const unsigned int msiof1_ss2_g_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(3, 11),
-};
-static const unsigned int msiof1_ss2_g_mux[] = {
-	MSIOF1_SS2_G_MARK,
-};
-static const unsigned int msiof1_txd_g_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(3, 9),
-};
-static const unsigned int msiof1_txd_g_mux[] = {
-	MSIOF1_TXD_G_MARK,
-};
-static const unsigned int msiof1_rxd_g_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(3, 8),
-};
-static const unsigned int msiof1_rxd_g_mux[] = {
-	MSIOF1_RXD_G_MARK,
-};
-/* - MSIOF2 ----------------------------------------------------------------- */
-static const unsigned int msiof2_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 9),
-};
-static const unsigned int msiof2_clk_a_mux[] = {
-	MSIOF2_SCK_A_MARK,
-};
-static const unsigned int msiof2_sync_a_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(1, 8),
-};
-static const unsigned int msiof2_sync_a_mux[] = {
-	MSIOF2_SYNC_A_MARK,
-};
-static const unsigned int msiof2_ss1_a_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(1, 6),
-};
-static const unsigned int msiof2_ss1_a_mux[] = {
-	MSIOF2_SS1_A_MARK,
-};
-static const unsigned int msiof2_ss2_a_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(1, 7),
-};
-static const unsigned int msiof2_ss2_a_mux[] = {
-	MSIOF2_SS2_A_MARK,
-};
-static const unsigned int msiof2_txd_a_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int msiof2_txd_a_mux[] = {
-	MSIOF2_TXD_A_MARK,
-};
-static const unsigned int msiof2_rxd_a_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(1, 10),
-};
-static const unsigned int msiof2_rxd_a_mux[] = {
-	MSIOF2_RXD_A_MARK,
-};
-static const unsigned int msiof2_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(0, 4),
-};
-static const unsigned int msiof2_clk_b_mux[] = {
-	MSIOF2_SCK_B_MARK,
-};
-static const unsigned int msiof2_sync_b_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(0, 5),
-};
-static const unsigned int msiof2_sync_b_mux[] = {
-	MSIOF2_SYNC_B_MARK,
-};
-static const unsigned int msiof2_ss1_b_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(0, 0),
-};
-static const unsigned int msiof2_ss1_b_mux[] = {
-	MSIOF2_SS1_B_MARK,
-};
-static const unsigned int msiof2_ss2_b_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(0, 1),
-};
-static const unsigned int msiof2_ss2_b_mux[] = {
-	MSIOF2_SS2_B_MARK,
-};
-static const unsigned int msiof2_txd_b_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(0, 7),
-};
-static const unsigned int msiof2_txd_b_mux[] = {
-	MSIOF2_TXD_B_MARK,
-};
-static const unsigned int msiof2_rxd_b_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(0, 6),
-};
-static const unsigned int msiof2_rxd_b_mux[] = {
-	MSIOF2_RXD_B_MARK,
-};
-static const unsigned int msiof2_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(2, 12),
-};
-static const unsigned int msiof2_clk_c_mux[] = {
-	MSIOF2_SCK_C_MARK,
-};
-static const unsigned int msiof2_sync_c_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(2, 11),
-};
-static const unsigned int msiof2_sync_c_mux[] = {
-	MSIOF2_SYNC_C_MARK,
-};
-static const unsigned int msiof2_ss1_c_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(2, 10),
-};
-static const unsigned int msiof2_ss1_c_mux[] = {
-	MSIOF2_SS1_C_MARK,
-};
-static const unsigned int msiof2_ss2_c_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(2, 9),
-};
-static const unsigned int msiof2_ss2_c_mux[] = {
-	MSIOF2_SS2_C_MARK,
-};
-static const unsigned int msiof2_txd_c_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(2, 14),
-};
-static const unsigned int msiof2_txd_c_mux[] = {
-	MSIOF2_TXD_C_MARK,
-};
-static const unsigned int msiof2_rxd_c_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(2, 13),
-};
-static const unsigned int msiof2_rxd_c_mux[] = {
-	MSIOF2_RXD_C_MARK,
-};
-static const unsigned int msiof2_clk_d_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(0, 8),
-};
-static const unsigned int msiof2_clk_d_mux[] = {
-	MSIOF2_SCK_D_MARK,
-};
-static const unsigned int msiof2_sync_d_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(0, 9),
-};
-static const unsigned int msiof2_sync_d_mux[] = {
-	MSIOF2_SYNC_D_MARK,
-};
-static const unsigned int msiof2_ss1_d_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(0, 12),
-};
-static const unsigned int msiof2_ss1_d_mux[] = {
-	MSIOF2_SS1_D_MARK,
-};
-static const unsigned int msiof2_ss2_d_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(0, 13),
-};
-static const unsigned int msiof2_ss2_d_mux[] = {
-	MSIOF2_SS2_D_MARK,
-};
-static const unsigned int msiof2_txd_d_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(0, 11),
-};
-static const unsigned int msiof2_txd_d_mux[] = {
-	MSIOF2_TXD_D_MARK,
-};
-static const unsigned int msiof2_rxd_d_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(0, 10),
-};
-static const unsigned int msiof2_rxd_d_mux[] = {
-	MSIOF2_RXD_D_MARK,
-};
-/* - MSIOF3 ----------------------------------------------------------------- */
-static const unsigned int msiof3_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(0, 0),
-};
-static const unsigned int msiof3_clk_a_mux[] = {
-	MSIOF3_SCK_A_MARK,
-};
-static const unsigned int msiof3_sync_a_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(0, 1),
-};
-static const unsigned int msiof3_sync_a_mux[] = {
-	MSIOF3_SYNC_A_MARK,
-};
-static const unsigned int msiof3_ss1_a_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(0, 14),
-};
-static const unsigned int msiof3_ss1_a_mux[] = {
-	MSIOF3_SS1_A_MARK,
-};
-static const unsigned int msiof3_ss2_a_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(0, 15),
-};
-static const unsigned int msiof3_ss2_a_mux[] = {
-	MSIOF3_SS2_A_MARK,
-};
-static const unsigned int msiof3_txd_a_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(0, 3),
-};
-static const unsigned int msiof3_txd_a_mux[] = {
-	MSIOF3_TXD_A_MARK,
-};
-static const unsigned int msiof3_rxd_a_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(0, 2),
-};
-static const unsigned int msiof3_rxd_a_mux[] = {
-	MSIOF3_RXD_A_MARK,
-};
-static const unsigned int msiof3_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 2),
-};
-static const unsigned int msiof3_clk_b_mux[] = {
-	MSIOF3_SCK_B_MARK,
-};
-static const unsigned int msiof3_sync_b_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(1, 0),
-};
-static const unsigned int msiof3_sync_b_mux[] = {
-	MSIOF3_SYNC_B_MARK,
-};
-static const unsigned int msiof3_ss1_b_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(1, 4),
-};
-static const unsigned int msiof3_ss1_b_mux[] = {
-	MSIOF3_SS1_B_MARK,
-};
-static const unsigned int msiof3_ss2_b_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(1, 5),
-};
-static const unsigned int msiof3_ss2_b_mux[] = {
-	MSIOF3_SS2_B_MARK,
-};
-static const unsigned int msiof3_txd_b_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(1, 1),
-};
-static const unsigned int msiof3_txd_b_mux[] = {
-	MSIOF3_TXD_B_MARK,
-};
-static const unsigned int msiof3_rxd_b_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(1, 3),
-};
-static const unsigned int msiof3_rxd_b_mux[] = {
-	MSIOF3_RXD_B_MARK,
-};
-static const unsigned int msiof3_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 12),
-};
-static const unsigned int msiof3_clk_c_mux[] = {
-	MSIOF3_SCK_C_MARK,
-};
-static const unsigned int msiof3_sync_c_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(1, 13),
-};
-static const unsigned int msiof3_sync_c_mux[] = {
-	MSIOF3_SYNC_C_MARK,
-};
-static const unsigned int msiof3_txd_c_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(1, 15),
-};
-static const unsigned int msiof3_txd_c_mux[] = {
-	MSIOF3_TXD_C_MARK,
-};
-static const unsigned int msiof3_rxd_c_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(1, 14),
-};
-static const unsigned int msiof3_rxd_c_mux[] = {
-	MSIOF3_RXD_C_MARK,
-};
-static const unsigned int msiof3_clk_d_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 22),
-};
-static const unsigned int msiof3_clk_d_mux[] = {
-	MSIOF3_SCK_D_MARK,
-};
-static const unsigned int msiof3_sync_d_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(1, 23),
-};
-static const unsigned int msiof3_sync_d_mux[] = {
-	MSIOF3_SYNC_D_MARK,
-};
-static const unsigned int msiof3_ss1_d_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(1, 26),
-};
-static const unsigned int msiof3_ss1_d_mux[] = {
-	MSIOF3_SS1_D_MARK,
-};
-static const unsigned int msiof3_txd_d_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(1, 25),
-};
-static const unsigned int msiof3_txd_d_mux[] = {
-	MSIOF3_TXD_D_MARK,
-};
-static const unsigned int msiof3_rxd_d_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(1, 24),
-};
-static const unsigned int msiof3_rxd_d_mux[] = {
-	MSIOF3_RXD_D_MARK,
-};
-static const unsigned int msiof3_clk_e_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(2, 3),
-};
-static const unsigned int msiof3_clk_e_mux[] = {
-	MSIOF3_SCK_E_MARK,
-};
-static const unsigned int msiof3_sync_e_pins[] = {
-	/* SYNC */
-	RCAR_GP_PIN(2, 2),
-};
-static const unsigned int msiof3_sync_e_mux[] = {
-	MSIOF3_SYNC_E_MARK,
-};
-static const unsigned int msiof3_ss1_e_pins[] = {
-	/* SS1 */
-	RCAR_GP_PIN(2, 1),
-};
-static const unsigned int msiof3_ss1_e_mux[] = {
-	MSIOF3_SS1_E_MARK,
-};
-static const unsigned int msiof3_ss2_e_pins[] = {
-	/* SS2 */
-	RCAR_GP_PIN(2, 0),
-};
-static const unsigned int msiof3_ss2_e_mux[] = {
-	MSIOF3_SS2_E_MARK,
-};
-static const unsigned int msiof3_txd_e_pins[] = {
-	/* TXD */
-	RCAR_GP_PIN(2, 5),
-};
-static const unsigned int msiof3_txd_e_mux[] = {
-	MSIOF3_TXD_E_MARK,
-};
-static const unsigned int msiof3_rxd_e_pins[] = {
-	/* RXD */
-	RCAR_GP_PIN(2, 4),
-};
-static const unsigned int msiof3_rxd_e_mux[] = {
-	MSIOF3_RXD_E_MARK,
-};
-
-/* - PWM0 --------------------------------------------------------------------*/
-static const unsigned int pwm0_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 6),
-};
-static const unsigned int pwm0_mux[] = {
-	PWM0_MARK,
-};
-/* - PWM1 --------------------------------------------------------------------*/
-static const unsigned int pwm1_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 7),
-};
-static const unsigned int pwm1_a_mux[] = {
-	PWM1_A_MARK,
-};
-static const unsigned int pwm1_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 8),
-};
-static const unsigned int pwm1_b_mux[] = {
-	PWM1_B_MARK,
-};
-/* - PWM2 --------------------------------------------------------------------*/
-static const unsigned int pwm2_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 8),
-};
-static const unsigned int pwm2_a_mux[] = {
-	PWM2_A_MARK,
-};
-static const unsigned int pwm2_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int pwm2_b_mux[] = {
-	PWM2_B_MARK,
-};
-/* - PWM3 --------------------------------------------------------------------*/
-static const unsigned int pwm3_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 0),
-};
-static const unsigned int pwm3_a_mux[] = {
-	PWM3_A_MARK,
-};
-static const unsigned int pwm3_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 2),
-};
-static const unsigned int pwm3_b_mux[] = {
-	PWM3_B_MARK,
-};
-/* - PWM4 --------------------------------------------------------------------*/
-static const unsigned int pwm4_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 1),
-};
-static const unsigned int pwm4_a_mux[] = {
-	PWM4_A_MARK,
-};
-static const unsigned int pwm4_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 3),
-};
-static const unsigned int pwm4_b_mux[] = {
-	PWM4_B_MARK,
-};
-/* - PWM5 --------------------------------------------------------------------*/
-static const unsigned int pwm5_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 2),
-};
-static const unsigned int pwm5_a_mux[] = {
-	PWM5_A_MARK,
-};
-static const unsigned int pwm5_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 4),
-};
-static const unsigned int pwm5_b_mux[] = {
-	PWM5_B_MARK,
-};
-/* - PWM6 --------------------------------------------------------------------*/
-static const unsigned int pwm6_a_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(1, 3),
-};
-static const unsigned int pwm6_a_mux[] = {
-	PWM6_A_MARK,
-};
-static const unsigned int pwm6_b_pins[] = {
-	/* PWM */
-	RCAR_GP_PIN(2, 5),
-};
-static const unsigned int pwm6_b_mux[] = {
-	PWM6_B_MARK,
-};
-
-/* - SATA --------------------------------------------------------------------*/
-static const unsigned int sata0_devslp_a_pins[] = {
-	/* DEVSLP */
-	RCAR_GP_PIN(6, 16),
-};
-static const unsigned int sata0_devslp_a_mux[] = {
-	SATA_DEVSLP_A_MARK,
-};
-static const unsigned int sata0_devslp_b_pins[] = {
-	/* DEVSLP */
-	RCAR_GP_PIN(4, 6),
-};
-static const unsigned int sata0_devslp_b_mux[] = {
-	SATA_DEVSLP_B_MARK,
-};
-
-/* - SCIF0 ------------------------------------------------------------------ */
-static const unsigned int scif0_data_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
-};
-static const unsigned int scif0_data_mux[] = {
-	RX0_MARK, TX0_MARK,
-};
-static const unsigned int scif0_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int scif0_clk_mux[] = {
-	SCK0_MARK,
-};
-static const unsigned int scif0_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
-};
-static const unsigned int scif0_ctrl_mux[] = {
-	RTS0_N_MARK, CTS0_N_MARK,
-};
-/* - SCIF1 ------------------------------------------------------------------ */
-static const unsigned int scif1_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
-};
-static const unsigned int scif1_data_a_mux[] = {
-	RX1_A_MARK, TX1_A_MARK,
-};
-static const unsigned int scif1_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int scif1_clk_mux[] = {
-	SCK1_MARK,
-};
-static const unsigned int scif1_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
-};
-static const unsigned int scif1_ctrl_mux[] = {
-	RTS1_N_MARK, CTS1_N_MARK,
-};
-
-static const unsigned int scif1_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 25),
-};
-static const unsigned int scif1_data_b_mux[] = {
-	RX1_B_MARK, TX1_B_MARK,
-};
-/* - SCIF2 ------------------------------------------------------------------ */
-static const unsigned int scif2_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
-};
-static const unsigned int scif2_data_a_mux[] = {
-	RX2_A_MARK, TX2_A_MARK,
-};
-static const unsigned int scif2_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 9),
-};
-static const unsigned int scif2_clk_mux[] = {
-	SCK2_MARK,
-};
-static const unsigned int scif2_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
-};
-static const unsigned int scif2_data_b_mux[] = {
-	RX2_B_MARK, TX2_B_MARK,
-};
-/* - SCIF3 ------------------------------------------------------------------ */
-static const unsigned int scif3_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
-};
-static const unsigned int scif3_data_a_mux[] = {
-	RX3_A_MARK, TX3_A_MARK,
-};
-static const unsigned int scif3_clk_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 22),
-};
-static const unsigned int scif3_clk_mux[] = {
-	SCK3_MARK,
-};
-static const unsigned int scif3_ctrl_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
-};
-static const unsigned int scif3_ctrl_mux[] = {
-	RTS3_N_MARK, CTS3_N_MARK,
-};
-static const unsigned int scif3_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
-};
-static const unsigned int scif3_data_b_mux[] = {
-	RX3_B_MARK, TX3_B_MARK,
-};
-/* - SCIF4 ------------------------------------------------------------------ */
-static const unsigned int scif4_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
-};
-static const unsigned int scif4_data_a_mux[] = {
-	RX4_A_MARK, TX4_A_MARK,
-};
-static const unsigned int scif4_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(2, 10),
-};
-static const unsigned int scif4_clk_a_mux[] = {
-	SCK4_A_MARK,
-};
-static const unsigned int scif4_ctrl_a_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
-};
-static const unsigned int scif4_ctrl_a_mux[] = {
-	RTS4_N_A_MARK, CTS4_N_A_MARK,
-};
-static const unsigned int scif4_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
-};
-static const unsigned int scif4_data_b_mux[] = {
-	RX4_B_MARK, TX4_B_MARK,
-};
-static const unsigned int scif4_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(1, 5),
-};
-static const unsigned int scif4_clk_b_mux[] = {
-	SCK4_B_MARK,
-};
-static const unsigned int scif4_ctrl_b_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
-};
-static const unsigned int scif4_ctrl_b_mux[] = {
-	RTS4_N_B_MARK, CTS4_N_B_MARK,
-};
-static const unsigned int scif4_data_c_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
-};
-static const unsigned int scif4_data_c_mux[] = {
-	RX4_C_MARK, TX4_C_MARK,
-};
-static const unsigned int scif4_clk_c_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(0, 8),
-};
-static const unsigned int scif4_clk_c_mux[] = {
-	SCK4_C_MARK,
-};
-static const unsigned int scif4_ctrl_c_pins[] = {
-	/* RTS, CTS */
-	RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
-};
-static const unsigned int scif4_ctrl_c_mux[] = {
-	RTS4_N_C_MARK, CTS4_N_C_MARK,
-};
-/* - SCIF5 ------------------------------------------------------------------ */
-static const unsigned int scif5_data_a_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
-};
-static const unsigned int scif5_data_a_mux[] = {
-	RX5_A_MARK, TX5_A_MARK,
-};
-static const unsigned int scif5_clk_a_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int scif5_clk_a_mux[] = {
-	SCK5_A_MARK,
-};
-static const unsigned int scif5_data_b_pins[] = {
-	/* RX, TX */
-	RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 18),
-};
-static const unsigned int scif5_data_b_mux[] = {
-	RX5_B_MARK, TX5_B_MARK,
-};
-static const unsigned int scif5_clk_b_pins[] = {
-	/* SCK */
-	RCAR_GP_PIN(5, 0),
-};
-static const unsigned int scif5_clk_b_mux[] = {
-	SCK5_B_MARK,
-};
-
-/* - SCIF Clock ------------------------------------------------------------- */
-static const unsigned int scif_clk_a_pins[] = {
-	/* SCIF_CLK */
-	RCAR_GP_PIN(6, 23),
-};
-static const unsigned int scif_clk_a_mux[] = {
-	SCIF_CLK_A_MARK,
-};
-static const unsigned int scif_clk_b_pins[] = {
-	/* SCIF_CLK */
-	RCAR_GP_PIN(5, 9),
-};
-static const unsigned int scif_clk_b_mux[] = {
-	SCIF_CLK_B_MARK,
-};
-
-/* - SDHI0 ------------------------------------------------------------------ */
-static const unsigned int sdhi0_data1_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(3, 2),
-};
-static const unsigned int sdhi0_data1_mux[] = {
-	SD0_DAT0_MARK,
-};
-static const unsigned int sdhi0_data4_pins[] = {
-	/* D[0:3] */
-	RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
-	RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
-};
-static const unsigned int sdhi0_data4_mux[] = {
-	SD0_DAT0_MARK, SD0_DAT1_MARK,
-	SD0_DAT2_MARK, SD0_DAT3_MARK,
-};
-static const unsigned int sdhi0_ctrl_pins[] = {
-	/* CLK, CMD */
-	RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
-};
-static const unsigned int sdhi0_ctrl_mux[] = {
-	SD0_CLK_MARK, SD0_CMD_MARK,
-};
-static const unsigned int sdhi0_cd_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(3, 12),
-};
-static const unsigned int sdhi0_cd_mux[] = {
-	SD0_CD_MARK,
-};
-static const unsigned int sdhi0_wp_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(3, 13),
-};
-static const unsigned int sdhi0_wp_mux[] = {
-	SD0_WP_MARK,
-};
-/* - SDHI1 ------------------------------------------------------------------ */
-static const unsigned int sdhi1_data1_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(3, 8),
-};
-static const unsigned int sdhi1_data1_mux[] = {
-	SD1_DAT0_MARK,
-};
-static const unsigned int sdhi1_data4_pins[] = {
-	/* D[0:3] */
-	RCAR_GP_PIN(3, 8),  RCAR_GP_PIN(3, 9),
-	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
-};
-static const unsigned int sdhi1_data4_mux[] = {
-	SD1_DAT0_MARK, SD1_DAT1_MARK,
-	SD1_DAT2_MARK, SD1_DAT3_MARK,
-};
-static const unsigned int sdhi1_ctrl_pins[] = {
-	/* CLK, CMD */
-	RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
-};
-static const unsigned int sdhi1_ctrl_mux[] = {
-	SD1_CLK_MARK, SD1_CMD_MARK,
-};
-static const unsigned int sdhi1_cd_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(3, 14),
-};
-static const unsigned int sdhi1_cd_mux[] = {
-	SD1_CD_MARK,
-};
-static const unsigned int sdhi1_wp_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(3, 15),
-};
-static const unsigned int sdhi1_wp_mux[] = {
-	SD1_WP_MARK,
-};
-/* - SDHI2 ------------------------------------------------------------------ */
-static const unsigned int sdhi2_data1_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(4, 2),
-};
-static const unsigned int sdhi2_data1_mux[] = {
-	SD2_DAT0_MARK,
-};
-static const unsigned int sdhi2_data4_pins[] = {
-	/* D[0:3] */
-	RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
-	RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
-};
-static const unsigned int sdhi2_data4_mux[] = {
-	SD2_DAT0_MARK, SD2_DAT1_MARK,
-	SD2_DAT2_MARK, SD2_DAT3_MARK,
-};
-static const unsigned int sdhi2_data8_pins[] = {
-	/* D[0:7] */
-	RCAR_GP_PIN(4, 2),  RCAR_GP_PIN(4, 3),
-	RCAR_GP_PIN(4, 4),  RCAR_GP_PIN(4, 5),
-	RCAR_GP_PIN(3, 8),  RCAR_GP_PIN(3, 9),
-	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
-};
-static const unsigned int sdhi2_data8_mux[] = {
-	SD2_DAT0_MARK, SD2_DAT1_MARK,
-	SD2_DAT2_MARK, SD2_DAT3_MARK,
-	SD2_DAT4_MARK, SD2_DAT5_MARK,
-	SD2_DAT6_MARK, SD2_DAT7_MARK,
-};
-static const unsigned int sdhi2_ctrl_pins[] = {
-	/* CLK, CMD */
-	RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
-};
-static const unsigned int sdhi2_ctrl_mux[] = {
-	SD2_CLK_MARK, SD2_CMD_MARK,
-};
-static const unsigned int sdhi2_cd_a_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(4, 13),
-};
-static const unsigned int sdhi2_cd_a_mux[] = {
-	SD2_CD_A_MARK,
-};
-static const unsigned int sdhi2_cd_b_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(5, 10),
-};
-static const unsigned int sdhi2_cd_b_mux[] = {
-	SD2_CD_B_MARK,
-};
-static const unsigned int sdhi2_wp_a_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(4, 14),
-};
-static const unsigned int sdhi2_wp_a_mux[] = {
-	SD2_WP_A_MARK,
-};
-static const unsigned int sdhi2_wp_b_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(5, 11),
-};
-static const unsigned int sdhi2_wp_b_mux[] = {
-	SD2_WP_B_MARK,
-};
-static const unsigned int sdhi2_ds_pins[] = {
-	/* DS */
-	RCAR_GP_PIN(4, 6),
-};
-static const unsigned int sdhi2_ds_mux[] = {
-	SD2_DS_MARK,
-};
-/* - SDHI3 ------------------------------------------------------------------ */
-static const unsigned int sdhi3_data1_pins[] = {
-	/* D0 */
-	RCAR_GP_PIN(4, 9),
-};
-static const unsigned int sdhi3_data1_mux[] = {
-	SD3_DAT0_MARK,
-};
-static const unsigned int sdhi3_data4_pins[] = {
-	/* D[0:3] */
-	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 10),
-	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
-};
-static const unsigned int sdhi3_data4_mux[] = {
-	SD3_DAT0_MARK, SD3_DAT1_MARK,
-	SD3_DAT2_MARK, SD3_DAT3_MARK,
-};
-static const unsigned int sdhi3_data8_pins[] = {
-	/* D[0:7] */
-	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 10),
-	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
-	RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
-	RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
-};
-static const unsigned int sdhi3_data8_mux[] = {
-	SD3_DAT0_MARK, SD3_DAT1_MARK,
-	SD3_DAT2_MARK, SD3_DAT3_MARK,
-	SD3_DAT4_MARK, SD3_DAT5_MARK,
-	SD3_DAT6_MARK, SD3_DAT7_MARK,
-};
-static const unsigned int sdhi3_ctrl_pins[] = {
-	/* CLK, CMD */
-	RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
-};
-static const unsigned int sdhi3_ctrl_mux[] = {
-	SD3_CLK_MARK, SD3_CMD_MARK,
-};
-static const unsigned int sdhi3_cd_pins[] = {
-	/* CD */
-	RCAR_GP_PIN(4, 15),
-};
-static const unsigned int sdhi3_cd_mux[] = {
-	SD3_CD_MARK,
-};
-static const unsigned int sdhi3_wp_pins[] = {
-	/* WP */
-	RCAR_GP_PIN(4, 16),
-};
-static const unsigned int sdhi3_wp_mux[] = {
-	SD3_WP_MARK,
-};
-static const unsigned int sdhi3_ds_pins[] = {
-	/* DS */
-	RCAR_GP_PIN(4, 17),
-};
-static const unsigned int sdhi3_ds_mux[] = {
-	SD3_DS_MARK,
-};
-
-/* - SSI -------------------------------------------------------------------- */
-static const unsigned int ssi0_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 2),
-};
-static const unsigned int ssi0_data_mux[] = {
-	SSI_SDATA0_MARK,
-};
-static const unsigned int ssi01239_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
-};
-static const unsigned int ssi01239_ctrl_mux[] = {
-	SSI_SCK01239_MARK, SSI_WS01239_MARK,
-};
-static const unsigned int ssi1_data_a_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 3),
-};
-static const unsigned int ssi1_data_a_mux[] = {
-	SSI_SDATA1_A_MARK,
-};
-static const unsigned int ssi1_data_b_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(5, 12),
-};
-static const unsigned int ssi1_data_b_mux[] = {
-	SSI_SDATA1_B_MARK,
-};
-static const unsigned int ssi1_ctrl_a_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
-};
-static const unsigned int ssi1_ctrl_a_mux[] = {
-	SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
-};
-static const unsigned int ssi1_ctrl_b_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
-};
-static const unsigned int ssi1_ctrl_b_mux[] = {
-	SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
-};
-static const unsigned int ssi2_data_a_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 4),
-};
-static const unsigned int ssi2_data_a_mux[] = {
-	SSI_SDATA2_A_MARK,
-};
-static const unsigned int ssi2_data_b_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(5, 13),
-};
-static const unsigned int ssi2_data_b_mux[] = {
-	SSI_SDATA2_B_MARK,
-};
-static const unsigned int ssi2_ctrl_a_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
-};
-static const unsigned int ssi2_ctrl_a_mux[] = {
-	SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
-};
-static const unsigned int ssi2_ctrl_b_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
-};
-static const unsigned int ssi2_ctrl_b_mux[] = {
-	SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
-};
-static const unsigned int ssi3_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 7),
-};
-static const unsigned int ssi3_data_mux[] = {
-	SSI_SDATA3_MARK,
-};
-static const unsigned int ssi349_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
-};
-static const unsigned int ssi349_ctrl_mux[] = {
-	SSI_SCK349_MARK, SSI_WS349_MARK,
-};
-static const unsigned int ssi4_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 10),
-};
-static const unsigned int ssi4_data_mux[] = {
-	SSI_SDATA4_MARK,
-};
-static const unsigned int ssi4_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
-};
-static const unsigned int ssi4_ctrl_mux[] = {
-	SSI_SCK4_MARK, SSI_WS4_MARK,
-};
-static const unsigned int ssi5_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 13),
-};
-static const unsigned int ssi5_data_mux[] = {
-	SSI_SDATA5_MARK,
-};
-static const unsigned int ssi5_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
-};
-static const unsigned int ssi5_ctrl_mux[] = {
-	SSI_SCK5_MARK, SSI_WS5_MARK,
-};
-static const unsigned int ssi6_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 16),
-};
-static const unsigned int ssi6_data_mux[] = {
-	SSI_SDATA6_MARK,
-};
-static const unsigned int ssi6_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
-};
-static const unsigned int ssi6_ctrl_mux[] = {
-	SSI_SCK6_MARK, SSI_WS6_MARK,
-};
-static const unsigned int ssi7_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int ssi7_data_mux[] = {
-	SSI_SDATA7_MARK,
-};
-static const unsigned int ssi78_ctrl_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
-};
-static const unsigned int ssi78_ctrl_mux[] = {
-	SSI_SCK78_MARK, SSI_WS78_MARK,
-};
-static const unsigned int ssi8_data_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 20),
-};
-static const unsigned int ssi8_data_mux[] = {
-	SSI_SDATA8_MARK,
-};
-static const unsigned int ssi9_data_a_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(6, 21),
-};
-static const unsigned int ssi9_data_a_mux[] = {
-	SSI_SDATA9_A_MARK,
-};
-static const unsigned int ssi9_data_b_pins[] = {
-	/* SDATA */
-	RCAR_GP_PIN(5, 14),
-};
-static const unsigned int ssi9_data_b_mux[] = {
-	SSI_SDATA9_B_MARK,
-};
-static const unsigned int ssi9_ctrl_a_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
-};
-static const unsigned int ssi9_ctrl_a_mux[] = {
-	SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
-};
-static const unsigned int ssi9_ctrl_b_pins[] = {
-	/* SCK, WS */
-	RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
-};
-static const unsigned int ssi9_ctrl_b_mux[] = {
-	SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
-};
-
-/* - TMU -------------------------------------------------------------------- */
-static const unsigned int tmu_tclk1_a_pins[] = {
-	/* TCLK */
-	RCAR_GP_PIN(6, 23),
-};
-static const unsigned int tmu_tclk1_a_mux[] = {
-	TCLK1_A_MARK,
-};
-static const unsigned int tmu_tclk1_b_pins[] = {
-	/* TCLK */
-	RCAR_GP_PIN(5, 19),
-};
-static const unsigned int tmu_tclk1_b_mux[] = {
-	TCLK1_B_MARK,
-};
-static const unsigned int tmu_tclk2_a_pins[] = {
-	/* TCLK */
-	RCAR_GP_PIN(6, 19),
-};
-static const unsigned int tmu_tclk2_a_mux[] = {
-	TCLK2_A_MARK,
-};
-static const unsigned int tmu_tclk2_b_pins[] = {
-	/* TCLK */
-	RCAR_GP_PIN(6, 28),
-};
-static const unsigned int tmu_tclk2_b_mux[] = {
-	TCLK2_B_MARK,
-};
-
-/* - TPU ------------------------------------------------------------------- */
-static const unsigned int tpu_to0_pins[] = {
-	/* TPU0TO0 */
-	RCAR_GP_PIN(6, 28),
-};
-static const unsigned int tpu_to0_mux[] = {
-	TPU0TO0_MARK,
-};
-static const unsigned int tpu_to1_pins[] = {
-	/* TPU0TO1 */
-	RCAR_GP_PIN(6, 29),
-};
-static const unsigned int tpu_to1_mux[] = {
-	TPU0TO1_MARK,
-};
-static const unsigned int tpu_to2_pins[] = {
-	/* TPU0TO2 */
-	RCAR_GP_PIN(6, 30),
-};
-static const unsigned int tpu_to2_mux[] = {
-	TPU0TO2_MARK,
-};
-static const unsigned int tpu_to3_pins[] = {
-	/* TPU0TO3 */
-	RCAR_GP_PIN(6, 31),
-};
-static const unsigned int tpu_to3_mux[] = {
-	TPU0TO3_MARK,
-};
-
-/* - USB0 ------------------------------------------------------------------- */
-static const unsigned int usb0_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
-};
-static const unsigned int usb0_mux[] = {
-	USB0_PWEN_MARK, USB0_OVC_MARK,
-};
-/* - USB1 ------------------------------------------------------------------- */
-static const unsigned int usb1_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
-};
-static const unsigned int usb1_mux[] = {
-	USB1_PWEN_MARK, USB1_OVC_MARK,
-};
-/* - USB2 ------------------------------------------------------------------- */
-static const unsigned int usb2_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
-};
-static const unsigned int usb2_mux[] = {
-	USB2_PWEN_MARK, USB2_OVC_MARK,
-};
-/* - USB2_CH3 --------------------------------------------------------------- */
-static const unsigned int usb2_ch3_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
-};
-static const unsigned int usb2_ch3_mux[] = {
-	USB2_CH3_PWEN_MARK, USB2_CH3_OVC_MARK,
-};
-
-/* - USB30 ------------------------------------------------------------------ */
-static const unsigned int usb30_pins[] = {
-	/* PWEN, OVC */
-	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
-};
-static const unsigned int usb30_mux[] = {
-	USB30_PWEN_MARK, USB30_OVC_MARK,
-};
-
-/* - VIN4 ------------------------------------------------------------------- */
-static const unsigned int vin4_data18_a_pins[] = {
-	RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
-	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
-	RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
-	RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
-	RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
-	RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
-	RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
-	RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
-	RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
-};
-static const unsigned int vin4_data18_a_mux[] = {
-	VI4_DATA2_A_MARK, VI4_DATA3_A_MARK,
-	VI4_DATA4_A_MARK, VI4_DATA5_A_MARK,
-	VI4_DATA6_A_MARK, VI4_DATA7_A_MARK,
-	VI4_DATA10_MARK, VI4_DATA11_MARK,
-	VI4_DATA12_MARK, VI4_DATA13_MARK,
-	VI4_DATA14_MARK, VI4_DATA15_MARK,
-	VI4_DATA18_MARK, VI4_DATA19_MARK,
-	VI4_DATA20_MARK, VI4_DATA21_MARK,
-	VI4_DATA22_MARK, VI4_DATA23_MARK,
-};
-static const unsigned int vin4_data18_b_pins[] = {
-	RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
-	RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
-	RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
-	RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
-	RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
-	RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
-	RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
-	RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
-	RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
-};
-static const unsigned int vin4_data18_b_mux[] = {
-	VI4_DATA2_B_MARK, VI4_DATA3_B_MARK,
-	VI4_DATA4_B_MARK, VI4_DATA5_B_MARK,
-	VI4_DATA6_B_MARK, VI4_DATA7_B_MARK,
-	VI4_DATA10_MARK, VI4_DATA11_MARK,
-	VI4_DATA12_MARK, VI4_DATA13_MARK,
-	VI4_DATA14_MARK, VI4_DATA15_MARK,
-	VI4_DATA18_MARK, VI4_DATA19_MARK,
-	VI4_DATA20_MARK, VI4_DATA21_MARK,
-	VI4_DATA22_MARK, VI4_DATA23_MARK,
-};
-static const union vin_data vin4_data_a_pins = {
-	.data24 = {
-		RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9),
-		RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
-		RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
-		RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
-		RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
-		RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
-		RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
-		RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
-		RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
-		RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
-		RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
-		RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
-	},
-};
-static const union vin_data vin4_data_a_mux = {
-	.data24 = {
-		VI4_DATA0_A_MARK, VI4_DATA1_A_MARK,
-		VI4_DATA2_A_MARK, VI4_DATA3_A_MARK,
-		VI4_DATA4_A_MARK, VI4_DATA5_A_MARK,
-		VI4_DATA6_A_MARK, VI4_DATA7_A_MARK,
-		VI4_DATA8_MARK,  VI4_DATA9_MARK,
-		VI4_DATA10_MARK, VI4_DATA11_MARK,
-		VI4_DATA12_MARK, VI4_DATA13_MARK,
-		VI4_DATA14_MARK, VI4_DATA15_MARK,
-		VI4_DATA16_MARK, VI4_DATA17_MARK,
-		VI4_DATA18_MARK, VI4_DATA19_MARK,
-		VI4_DATA20_MARK, VI4_DATA21_MARK,
-		VI4_DATA22_MARK, VI4_DATA23_MARK,
-	},
-};
-static const union vin_data vin4_data_b_pins = {
-	.data24 = {
-		RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
-		RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
-		RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
-		RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
-		RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
-		RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
-		RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
-		RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
-		RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
-		RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
-		RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
-		RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
-	},
-};
-static const union vin_data vin4_data_b_mux = {
-	.data24 = {
-		VI4_DATA0_B_MARK, VI4_DATA1_B_MARK,
-		VI4_DATA2_B_MARK, VI4_DATA3_B_MARK,
-		VI4_DATA4_B_MARK, VI4_DATA5_B_MARK,
-		VI4_DATA6_B_MARK, VI4_DATA7_B_MARK,
-		VI4_DATA8_MARK,  VI4_DATA9_MARK,
-		VI4_DATA10_MARK, VI4_DATA11_MARK,
-		VI4_DATA12_MARK, VI4_DATA13_MARK,
-		VI4_DATA14_MARK, VI4_DATA15_MARK,
-		VI4_DATA16_MARK, VI4_DATA17_MARK,
-		VI4_DATA18_MARK, VI4_DATA19_MARK,
-		VI4_DATA20_MARK, VI4_DATA21_MARK,
-		VI4_DATA22_MARK, VI4_DATA23_MARK,
-	},
-};
-static const unsigned int vin4_sync_pins[] = {
-	/* HSYNC#, VSYNC# */
-	RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 17),
-};
-static const unsigned int vin4_sync_mux[] = {
-	VI4_HSYNC_N_MARK, VI4_VSYNC_N_MARK,
-};
-static const unsigned int vin4_field_pins[] = {
-	/* FIELD */
-	RCAR_GP_PIN(1, 16),
-};
-static const unsigned int vin4_field_mux[] = {
-	VI4_FIELD_MARK,
-};
-static const unsigned int vin4_clkenb_pins[] = {
-	/* CLKENB */
-	RCAR_GP_PIN(1, 19),
-};
-static const unsigned int vin4_clkenb_mux[] = {
-	VI4_CLKENB_MARK,
-};
-static const unsigned int vin4_clk_pins[] = {
-	/* CLK */
-	RCAR_GP_PIN(1, 27),
-};
-static const unsigned int vin4_clk_mux[] = {
-	VI4_CLK_MARK,
-};
-
-/* - VIN5 ------------------------------------------------------------------- */
-static const union vin_data16 vin5_data_pins = {
-	.data16 = {
-		RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
-		RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
-		RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
-		RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
-		RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
-		RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
-		RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
-		RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
-	},
-};
-static const union vin_data16 vin5_data_mux = {
-	.data16 = {
-		VI5_DATA0_MARK, VI5_DATA1_MARK,
-		VI5_DATA2_MARK, VI5_DATA3_MARK,
-		VI5_DATA4_MARK, VI5_DATA5_MARK,
-		VI5_DATA6_MARK, VI5_DATA7_MARK,
-		VI5_DATA8_MARK,  VI5_DATA9_MARK,
-		VI5_DATA10_MARK, VI5_DATA11_MARK,
-		VI5_DATA12_MARK, VI5_DATA13_MARK,
-		VI5_DATA14_MARK, VI5_DATA15_MARK,
-	},
-};
-static const unsigned int vin5_sync_pins[] = {
-	/* HSYNC#, VSYNC# */
-	RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
-};
-static const unsigned int vin5_sync_mux[] = {
-	VI5_HSYNC_N_MARK, VI5_VSYNC_N_MARK,
-};
-static const unsigned int vin5_field_pins[] = {
-	RCAR_GP_PIN(1, 11),
-};
-static const unsigned int vin5_field_mux[] = {
-	/* FIELD */
-	VI5_FIELD_MARK,
-};
-static const unsigned int vin5_clkenb_pins[] = {
-	RCAR_GP_PIN(1, 20),
-};
-static const unsigned int vin5_clkenb_mux[] = {
-	/* CLKENB */
-	VI5_CLKENB_MARK,
-};
-static const unsigned int vin5_clk_pins[] = {
-	RCAR_GP_PIN(1, 21),
-};
-static const unsigned int vin5_clk_mux[] = {
-	/* CLK */
-	VI5_CLK_MARK,
-};
-
-static const struct sh_pfc_pin_group pinmux_groups[] = {
-	SH_PFC_PIN_GROUP(audio_clk_a_a),
-	SH_PFC_PIN_GROUP(audio_clk_a_b),
-	SH_PFC_PIN_GROUP(audio_clk_a_c),
-	SH_PFC_PIN_GROUP(audio_clk_b_a),
-	SH_PFC_PIN_GROUP(audio_clk_b_b),
-	SH_PFC_PIN_GROUP(audio_clk_c_a),
-	SH_PFC_PIN_GROUP(audio_clk_c_b),
-	SH_PFC_PIN_GROUP(audio_clkout_a),
-	SH_PFC_PIN_GROUP(audio_clkout_b),
-	SH_PFC_PIN_GROUP(audio_clkout_c),
-	SH_PFC_PIN_GROUP(audio_clkout_d),
-	SH_PFC_PIN_GROUP(audio_clkout1_a),
-	SH_PFC_PIN_GROUP(audio_clkout1_b),
-	SH_PFC_PIN_GROUP(audio_clkout2_a),
-	SH_PFC_PIN_GROUP(audio_clkout2_b),
-	SH_PFC_PIN_GROUP(audio_clkout3_a),
-	SH_PFC_PIN_GROUP(audio_clkout3_b),
-	SH_PFC_PIN_GROUP(avb_link),
-	SH_PFC_PIN_GROUP(avb_magic),
-	SH_PFC_PIN_GROUP(avb_phy_int),
-	SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio),	/* Deprecated */
-	SH_PFC_PIN_GROUP(avb_mdio),
-	SH_PFC_PIN_GROUP(avb_mii),
-	SH_PFC_PIN_GROUP(avb_avtp_pps),
-	SH_PFC_PIN_GROUP(avb_avtp_match_a),
-	SH_PFC_PIN_GROUP(avb_avtp_capture_a),
-	SH_PFC_PIN_GROUP(avb_avtp_match_b),
-	SH_PFC_PIN_GROUP(avb_avtp_capture_b),
-	SH_PFC_PIN_GROUP(can0_data_a),
-	SH_PFC_PIN_GROUP(can0_data_b),
-	SH_PFC_PIN_GROUP(can1_data),
-	SH_PFC_PIN_GROUP(can_clk),
-	SH_PFC_PIN_GROUP(canfd0_data_a),
-	SH_PFC_PIN_GROUP(canfd0_data_b),
-	SH_PFC_PIN_GROUP(canfd1_data),
-	SH_PFC_PIN_GROUP(drif0_ctrl_a),
-	SH_PFC_PIN_GROUP(drif0_data0_a),
-	SH_PFC_PIN_GROUP(drif0_data1_a),
-	SH_PFC_PIN_GROUP(drif0_ctrl_b),
-	SH_PFC_PIN_GROUP(drif0_data0_b),
-	SH_PFC_PIN_GROUP(drif0_data1_b),
-	SH_PFC_PIN_GROUP(drif0_ctrl_c),
-	SH_PFC_PIN_GROUP(drif0_data0_c),
-	SH_PFC_PIN_GROUP(drif0_data1_c),
-	SH_PFC_PIN_GROUP(drif1_ctrl_a),
-	SH_PFC_PIN_GROUP(drif1_data0_a),
-	SH_PFC_PIN_GROUP(drif1_data1_a),
-	SH_PFC_PIN_GROUP(drif1_ctrl_b),
-	SH_PFC_PIN_GROUP(drif1_data0_b),
-	SH_PFC_PIN_GROUP(drif1_data1_b),
-	SH_PFC_PIN_GROUP(drif1_ctrl_c),
-	SH_PFC_PIN_GROUP(drif1_data0_c),
-	SH_PFC_PIN_GROUP(drif1_data1_c),
-	SH_PFC_PIN_GROUP(drif2_ctrl_a),
-	SH_PFC_PIN_GROUP(drif2_data0_a),
-	SH_PFC_PIN_GROUP(drif2_data1_a),
-	SH_PFC_PIN_GROUP(drif2_ctrl_b),
-	SH_PFC_PIN_GROUP(drif2_data0_b),
-	SH_PFC_PIN_GROUP(drif2_data1_b),
-	SH_PFC_PIN_GROUP(drif3_ctrl_a),
-	SH_PFC_PIN_GROUP(drif3_data0_a),
-	SH_PFC_PIN_GROUP(drif3_data1_a),
-	SH_PFC_PIN_GROUP(drif3_ctrl_b),
-	SH_PFC_PIN_GROUP(drif3_data0_b),
-	SH_PFC_PIN_GROUP(drif3_data1_b),
-	SH_PFC_PIN_GROUP(du_rgb666),
-	SH_PFC_PIN_GROUP(du_rgb888),
-	SH_PFC_PIN_GROUP(du_clk_out_0),
-	SH_PFC_PIN_GROUP(du_clk_out_1),
-	SH_PFC_PIN_GROUP(du_sync),
-	SH_PFC_PIN_GROUP(du_oddf),
-	SH_PFC_PIN_GROUP(du_cde),
-	SH_PFC_PIN_GROUP(du_disp),
-	SH_PFC_PIN_GROUP(hscif0_data),
-	SH_PFC_PIN_GROUP(hscif0_clk),
-	SH_PFC_PIN_GROUP(hscif0_ctrl),
-	SH_PFC_PIN_GROUP(hscif1_data_a),
-	SH_PFC_PIN_GROUP(hscif1_clk_a),
-	SH_PFC_PIN_GROUP(hscif1_ctrl_a),
-	SH_PFC_PIN_GROUP(hscif1_data_b),
-	SH_PFC_PIN_GROUP(hscif1_clk_b),
-	SH_PFC_PIN_GROUP(hscif1_ctrl_b),
-	SH_PFC_PIN_GROUP(hscif2_data_a),
-	SH_PFC_PIN_GROUP(hscif2_clk_a),
-	SH_PFC_PIN_GROUP(hscif2_ctrl_a),
-	SH_PFC_PIN_GROUP(hscif2_data_b),
-	SH_PFC_PIN_GROUP(hscif2_clk_b),
-	SH_PFC_PIN_GROUP(hscif2_ctrl_b),
-	SH_PFC_PIN_GROUP(hscif2_data_c),
-	SH_PFC_PIN_GROUP(hscif2_clk_c),
-	SH_PFC_PIN_GROUP(hscif2_ctrl_c),
-	SH_PFC_PIN_GROUP(hscif3_data_a),
-	SH_PFC_PIN_GROUP(hscif3_clk),
-	SH_PFC_PIN_GROUP(hscif3_ctrl),
-	SH_PFC_PIN_GROUP(hscif3_data_b),
-	SH_PFC_PIN_GROUP(hscif3_data_c),
-	SH_PFC_PIN_GROUP(hscif3_data_d),
-	SH_PFC_PIN_GROUP(hscif4_data_a),
-	SH_PFC_PIN_GROUP(hscif4_clk),
-	SH_PFC_PIN_GROUP(hscif4_ctrl),
-	SH_PFC_PIN_GROUP(hscif4_data_b),
-	SH_PFC_PIN_GROUP(i2c0),
-	SH_PFC_PIN_GROUP(i2c1_a),
-	SH_PFC_PIN_GROUP(i2c1_b),
-	SH_PFC_PIN_GROUP(i2c2_a),
-	SH_PFC_PIN_GROUP(i2c2_b),
-	SH_PFC_PIN_GROUP(i2c3),
-	SH_PFC_PIN_GROUP(i2c5),
-	SH_PFC_PIN_GROUP(i2c6_a),
-	SH_PFC_PIN_GROUP(i2c6_b),
-	SH_PFC_PIN_GROUP(i2c6_c),
-	SH_PFC_PIN_GROUP(intc_ex_irq0),
-	SH_PFC_PIN_GROUP(intc_ex_irq1),
-	SH_PFC_PIN_GROUP(intc_ex_irq2),
-	SH_PFC_PIN_GROUP(intc_ex_irq3),
-	SH_PFC_PIN_GROUP(intc_ex_irq4),
-	SH_PFC_PIN_GROUP(intc_ex_irq5),
-	SH_PFC_PIN_GROUP(msiof0_clk),
-	SH_PFC_PIN_GROUP(msiof0_sync),
-	SH_PFC_PIN_GROUP(msiof0_ss1),
-	SH_PFC_PIN_GROUP(msiof0_ss2),
-	SH_PFC_PIN_GROUP(msiof0_txd),
-	SH_PFC_PIN_GROUP(msiof0_rxd),
-	SH_PFC_PIN_GROUP(msiof1_clk_a),
-	SH_PFC_PIN_GROUP(msiof1_sync_a),
-	SH_PFC_PIN_GROUP(msiof1_ss1_a),
-	SH_PFC_PIN_GROUP(msiof1_ss2_a),
-	SH_PFC_PIN_GROUP(msiof1_txd_a),
-	SH_PFC_PIN_GROUP(msiof1_rxd_a),
-	SH_PFC_PIN_GROUP(msiof1_clk_b),
-	SH_PFC_PIN_GROUP(msiof1_sync_b),
-	SH_PFC_PIN_GROUP(msiof1_ss1_b),
-	SH_PFC_PIN_GROUP(msiof1_ss2_b),
-	SH_PFC_PIN_GROUP(msiof1_txd_b),
-	SH_PFC_PIN_GROUP(msiof1_rxd_b),
-	SH_PFC_PIN_GROUP(msiof1_clk_c),
-	SH_PFC_PIN_GROUP(msiof1_sync_c),
-	SH_PFC_PIN_GROUP(msiof1_ss1_c),
-	SH_PFC_PIN_GROUP(msiof1_ss2_c),
-	SH_PFC_PIN_GROUP(msiof1_txd_c),
-	SH_PFC_PIN_GROUP(msiof1_rxd_c),
-	SH_PFC_PIN_GROUP(msiof1_clk_d),
-	SH_PFC_PIN_GROUP(msiof1_sync_d),
-	SH_PFC_PIN_GROUP(msiof1_ss1_d),
-	SH_PFC_PIN_GROUP(msiof1_ss2_d),
-	SH_PFC_PIN_GROUP(msiof1_txd_d),
-	SH_PFC_PIN_GROUP(msiof1_rxd_d),
-	SH_PFC_PIN_GROUP(msiof1_clk_e),
-	SH_PFC_PIN_GROUP(msiof1_sync_e),
-	SH_PFC_PIN_GROUP(msiof1_ss1_e),
-	SH_PFC_PIN_GROUP(msiof1_ss2_e),
-	SH_PFC_PIN_GROUP(msiof1_txd_e),
-	SH_PFC_PIN_GROUP(msiof1_rxd_e),
-	SH_PFC_PIN_GROUP(msiof1_clk_f),
-	SH_PFC_PIN_GROUP(msiof1_sync_f),
-	SH_PFC_PIN_GROUP(msiof1_ss1_f),
-	SH_PFC_PIN_GROUP(msiof1_ss2_f),
-	SH_PFC_PIN_GROUP(msiof1_txd_f),
-	SH_PFC_PIN_GROUP(msiof1_rxd_f),
-	SH_PFC_PIN_GROUP(msiof1_clk_g),
-	SH_PFC_PIN_GROUP(msiof1_sync_g),
-	SH_PFC_PIN_GROUP(msiof1_ss1_g),
-	SH_PFC_PIN_GROUP(msiof1_ss2_g),
-	SH_PFC_PIN_GROUP(msiof1_txd_g),
-	SH_PFC_PIN_GROUP(msiof1_rxd_g),
-	SH_PFC_PIN_GROUP(msiof2_clk_a),
-	SH_PFC_PIN_GROUP(msiof2_sync_a),
-	SH_PFC_PIN_GROUP(msiof2_ss1_a),
-	SH_PFC_PIN_GROUP(msiof2_ss2_a),
-	SH_PFC_PIN_GROUP(msiof2_txd_a),
-	SH_PFC_PIN_GROUP(msiof2_rxd_a),
-	SH_PFC_PIN_GROUP(msiof2_clk_b),
-	SH_PFC_PIN_GROUP(msiof2_sync_b),
-	SH_PFC_PIN_GROUP(msiof2_ss1_b),
-	SH_PFC_PIN_GROUP(msiof2_ss2_b),
-	SH_PFC_PIN_GROUP(msiof2_txd_b),
-	SH_PFC_PIN_GROUP(msiof2_rxd_b),
-	SH_PFC_PIN_GROUP(msiof2_clk_c),
-	SH_PFC_PIN_GROUP(msiof2_sync_c),
-	SH_PFC_PIN_GROUP(msiof2_ss1_c),
-	SH_PFC_PIN_GROUP(msiof2_ss2_c),
-	SH_PFC_PIN_GROUP(msiof2_txd_c),
-	SH_PFC_PIN_GROUP(msiof2_rxd_c),
-	SH_PFC_PIN_GROUP(msiof2_clk_d),
-	SH_PFC_PIN_GROUP(msiof2_sync_d),
-	SH_PFC_PIN_GROUP(msiof2_ss1_d),
-	SH_PFC_PIN_GROUP(msiof2_ss2_d),
-	SH_PFC_PIN_GROUP(msiof2_txd_d),
-	SH_PFC_PIN_GROUP(msiof2_rxd_d),
-	SH_PFC_PIN_GROUP(msiof3_clk_a),
-	SH_PFC_PIN_GROUP(msiof3_sync_a),
-	SH_PFC_PIN_GROUP(msiof3_ss1_a),
-	SH_PFC_PIN_GROUP(msiof3_ss2_a),
-	SH_PFC_PIN_GROUP(msiof3_txd_a),
-	SH_PFC_PIN_GROUP(msiof3_rxd_a),
-	SH_PFC_PIN_GROUP(msiof3_clk_b),
-	SH_PFC_PIN_GROUP(msiof3_sync_b),
-	SH_PFC_PIN_GROUP(msiof3_ss1_b),
-	SH_PFC_PIN_GROUP(msiof3_ss2_b),
-	SH_PFC_PIN_GROUP(msiof3_txd_b),
-	SH_PFC_PIN_GROUP(msiof3_rxd_b),
-	SH_PFC_PIN_GROUP(msiof3_clk_c),
-	SH_PFC_PIN_GROUP(msiof3_sync_c),
-	SH_PFC_PIN_GROUP(msiof3_txd_c),
-	SH_PFC_PIN_GROUP(msiof3_rxd_c),
-	SH_PFC_PIN_GROUP(msiof3_clk_d),
-	SH_PFC_PIN_GROUP(msiof3_sync_d),
-	SH_PFC_PIN_GROUP(msiof3_ss1_d),
-	SH_PFC_PIN_GROUP(msiof3_txd_d),
-	SH_PFC_PIN_GROUP(msiof3_rxd_d),
-	SH_PFC_PIN_GROUP(msiof3_clk_e),
-	SH_PFC_PIN_GROUP(msiof3_sync_e),
-	SH_PFC_PIN_GROUP(msiof3_ss1_e),
-	SH_PFC_PIN_GROUP(msiof3_ss2_e),
-	SH_PFC_PIN_GROUP(msiof3_txd_e),
-	SH_PFC_PIN_GROUP(msiof3_rxd_e),
-	SH_PFC_PIN_GROUP(pwm0),
-	SH_PFC_PIN_GROUP(pwm1_a),
-	SH_PFC_PIN_GROUP(pwm1_b),
-	SH_PFC_PIN_GROUP(pwm2_a),
-	SH_PFC_PIN_GROUP(pwm2_b),
-	SH_PFC_PIN_GROUP(pwm3_a),
-	SH_PFC_PIN_GROUP(pwm3_b),
-	SH_PFC_PIN_GROUP(pwm4_a),
-	SH_PFC_PIN_GROUP(pwm4_b),
-	SH_PFC_PIN_GROUP(pwm5_a),
-	SH_PFC_PIN_GROUP(pwm5_b),
-	SH_PFC_PIN_GROUP(pwm6_a),
-	SH_PFC_PIN_GROUP(pwm6_b),
-	SH_PFC_PIN_GROUP(sata0_devslp_a),
-	SH_PFC_PIN_GROUP(sata0_devslp_b),
-	SH_PFC_PIN_GROUP(scif0_data),
-	SH_PFC_PIN_GROUP(scif0_clk),
-	SH_PFC_PIN_GROUP(scif0_ctrl),
-	SH_PFC_PIN_GROUP(scif1_data_a),
-	SH_PFC_PIN_GROUP(scif1_clk),
-	SH_PFC_PIN_GROUP(scif1_ctrl),
-	SH_PFC_PIN_GROUP(scif1_data_b),
-	SH_PFC_PIN_GROUP(scif2_data_a),
-	SH_PFC_PIN_GROUP(scif2_clk),
-	SH_PFC_PIN_GROUP(scif2_data_b),
-	SH_PFC_PIN_GROUP(scif3_data_a),
-	SH_PFC_PIN_GROUP(scif3_clk),
-	SH_PFC_PIN_GROUP(scif3_ctrl),
-	SH_PFC_PIN_GROUP(scif3_data_b),
-	SH_PFC_PIN_GROUP(scif4_data_a),
-	SH_PFC_PIN_GROUP(scif4_clk_a),
-	SH_PFC_PIN_GROUP(scif4_ctrl_a),
-	SH_PFC_PIN_GROUP(scif4_data_b),
-	SH_PFC_PIN_GROUP(scif4_clk_b),
-	SH_PFC_PIN_GROUP(scif4_ctrl_b),
-	SH_PFC_PIN_GROUP(scif4_data_c),
-	SH_PFC_PIN_GROUP(scif4_clk_c),
-	SH_PFC_PIN_GROUP(scif4_ctrl_c),
-	SH_PFC_PIN_GROUP(scif5_data_a),
-	SH_PFC_PIN_GROUP(scif5_clk_a),
-	SH_PFC_PIN_GROUP(scif5_data_b),
-	SH_PFC_PIN_GROUP(scif5_clk_b),
-	SH_PFC_PIN_GROUP(scif_clk_a),
-	SH_PFC_PIN_GROUP(scif_clk_b),
-	SH_PFC_PIN_GROUP(sdhi0_data1),
-	SH_PFC_PIN_GROUP(sdhi0_data4),
-	SH_PFC_PIN_GROUP(sdhi0_ctrl),
-	SH_PFC_PIN_GROUP(sdhi0_cd),
-	SH_PFC_PIN_GROUP(sdhi0_wp),
-	SH_PFC_PIN_GROUP(sdhi1_data1),
-	SH_PFC_PIN_GROUP(sdhi1_data4),
-	SH_PFC_PIN_GROUP(sdhi1_ctrl),
-	SH_PFC_PIN_GROUP(sdhi1_cd),
-	SH_PFC_PIN_GROUP(sdhi1_wp),
-	SH_PFC_PIN_GROUP(sdhi2_data1),
-	SH_PFC_PIN_GROUP(sdhi2_data4),
-	SH_PFC_PIN_GROUP(sdhi2_data8),
-	SH_PFC_PIN_GROUP(sdhi2_ctrl),
-	SH_PFC_PIN_GROUP(sdhi2_cd_a),
-	SH_PFC_PIN_GROUP(sdhi2_wp_a),
-	SH_PFC_PIN_GROUP(sdhi2_cd_b),
-	SH_PFC_PIN_GROUP(sdhi2_wp_b),
-	SH_PFC_PIN_GROUP(sdhi2_ds),
-	SH_PFC_PIN_GROUP(sdhi3_data1),
-	SH_PFC_PIN_GROUP(sdhi3_data4),
-	SH_PFC_PIN_GROUP(sdhi3_data8),
-	SH_PFC_PIN_GROUP(sdhi3_ctrl),
-	SH_PFC_PIN_GROUP(sdhi3_cd),
-	SH_PFC_PIN_GROUP(sdhi3_wp),
-	SH_PFC_PIN_GROUP(sdhi3_ds),
-	SH_PFC_PIN_GROUP(ssi0_data),
-	SH_PFC_PIN_GROUP(ssi01239_ctrl),
-	SH_PFC_PIN_GROUP(ssi1_data_a),
-	SH_PFC_PIN_GROUP(ssi1_data_b),
-	SH_PFC_PIN_GROUP(ssi1_ctrl_a),
-	SH_PFC_PIN_GROUP(ssi1_ctrl_b),
-	SH_PFC_PIN_GROUP(ssi2_data_a),
-	SH_PFC_PIN_GROUP(ssi2_data_b),
-	SH_PFC_PIN_GROUP(ssi2_ctrl_a),
-	SH_PFC_PIN_GROUP(ssi2_ctrl_b),
-	SH_PFC_PIN_GROUP(ssi3_data),
-	SH_PFC_PIN_GROUP(ssi349_ctrl),
-	SH_PFC_PIN_GROUP(ssi4_data),
-	SH_PFC_PIN_GROUP(ssi4_ctrl),
-	SH_PFC_PIN_GROUP(ssi5_data),
-	SH_PFC_PIN_GROUP(ssi5_ctrl),
-	SH_PFC_PIN_GROUP(ssi6_data),
-	SH_PFC_PIN_GROUP(ssi6_ctrl),
-	SH_PFC_PIN_GROUP(ssi7_data),
-	SH_PFC_PIN_GROUP(ssi78_ctrl),
-	SH_PFC_PIN_GROUP(ssi8_data),
-	SH_PFC_PIN_GROUP(ssi9_data_a),
-	SH_PFC_PIN_GROUP(ssi9_data_b),
-	SH_PFC_PIN_GROUP(ssi9_ctrl_a),
-	SH_PFC_PIN_GROUP(ssi9_ctrl_b),
-	SH_PFC_PIN_GROUP(tmu_tclk1_a),
-	SH_PFC_PIN_GROUP(tmu_tclk1_b),
-	SH_PFC_PIN_GROUP(tmu_tclk2_a),
-	SH_PFC_PIN_GROUP(tmu_tclk2_b),
-	SH_PFC_PIN_GROUP(tpu_to0),
-	SH_PFC_PIN_GROUP(tpu_to1),
-	SH_PFC_PIN_GROUP(tpu_to2),
-	SH_PFC_PIN_GROUP(tpu_to3),
-	SH_PFC_PIN_GROUP(usb0),
-	SH_PFC_PIN_GROUP(usb1),
-	SH_PFC_PIN_GROUP(usb2),
-	SH_PFC_PIN_GROUP(usb2_ch3),
-	SH_PFC_PIN_GROUP(usb30),
-	VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
-	VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
-	VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
-	VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
-	SH_PFC_PIN_GROUP(vin4_data18_a),
-	VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
-	VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
-	VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
-	VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
-	VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
-	VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
-	SH_PFC_PIN_GROUP(vin4_data18_b),
-	VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
-	VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
-	SH_PFC_PIN_GROUP(vin4_sync),
-	SH_PFC_PIN_GROUP(vin4_field),
-	SH_PFC_PIN_GROUP(vin4_clkenb),
-	SH_PFC_PIN_GROUP(vin4_clk),
-	VIN_DATA_PIN_GROUP(vin5_data, 8),
-	VIN_DATA_PIN_GROUP(vin5_data, 10),
-	VIN_DATA_PIN_GROUP(vin5_data, 12),
-	VIN_DATA_PIN_GROUP(vin5_data, 16),
-	SH_PFC_PIN_GROUP(vin5_sync),
-	SH_PFC_PIN_GROUP(vin5_field),
-	SH_PFC_PIN_GROUP(vin5_clkenb),
-	SH_PFC_PIN_GROUP(vin5_clk),
-};
-
-static const char * const audio_clk_groups[] = {
-	"audio_clk_a_a",
-	"audio_clk_a_b",
-	"audio_clk_a_c",
-	"audio_clk_b_a",
-	"audio_clk_b_b",
-	"audio_clk_c_a",
-	"audio_clk_c_b",
-	"audio_clkout_a",
-	"audio_clkout_b",
-	"audio_clkout_c",
-	"audio_clkout_d",
-	"audio_clkout1_a",
-	"audio_clkout1_b",
-	"audio_clkout2_a",
-	"audio_clkout2_b",
-	"audio_clkout3_a",
-	"audio_clkout3_b",
-};
-
-static const char * const avb_groups[] = {
-	"avb_link",
-	"avb_magic",
-	"avb_phy_int",
-	"avb_mdc",	/* Deprecated, please use "avb_mdio" instead */
-	"avb_mdio",
-	"avb_mii",
-	"avb_avtp_pps",
-	"avb_avtp_match_a",
-	"avb_avtp_capture_a",
-	"avb_avtp_match_b",
-	"avb_avtp_capture_b",
-};
-
-static const char * const can0_groups[] = {
-	"can0_data_a",
-	"can0_data_b",
-};
-
-static const char * const can1_groups[] = {
-	"can1_data",
-};
-
-static const char * const can_clk_groups[] = {
-	"can_clk",
-};
-
-static const char * const canfd0_groups[] = {
-	"canfd0_data_a",
-	"canfd0_data_b",
-};
-
-static const char * const canfd1_groups[] = {
-	"canfd1_data",
-};
-
-static const char * const drif0_groups[] = {
-	"drif0_ctrl_a",
-	"drif0_data0_a",
-	"drif0_data1_a",
-	"drif0_ctrl_b",
-	"drif0_data0_b",
-	"drif0_data1_b",
-	"drif0_ctrl_c",
-	"drif0_data0_c",
-	"drif0_data1_c",
-};
-
-static const char * const drif1_groups[] = {
-	"drif1_ctrl_a",
-	"drif1_data0_a",
-	"drif1_data1_a",
-	"drif1_ctrl_b",
-	"drif1_data0_b",
-	"drif1_data1_b",
-	"drif1_ctrl_c",
-	"drif1_data0_c",
-	"drif1_data1_c",
-};
-
-static const char * const drif2_groups[] = {
-	"drif2_ctrl_a",
-	"drif2_data0_a",
-	"drif2_data1_a",
-	"drif2_ctrl_b",
-	"drif2_data0_b",
-	"drif2_data1_b",
-};
-
-static const char * const drif3_groups[] = {
-	"drif3_ctrl_a",
-	"drif3_data0_a",
-	"drif3_data1_a",
-	"drif3_ctrl_b",
-	"drif3_data0_b",
-	"drif3_data1_b",
-};
-
-static const char * const du_groups[] = {
-	"du_rgb666",
-	"du_rgb888",
-	"du_clk_out_0",
-	"du_clk_out_1",
-	"du_sync",
-	"du_oddf",
-	"du_cde",
-	"du_disp",
-};
-
-static const char * const hscif0_groups[] = {
-	"hscif0_data",
-	"hscif0_clk",
-	"hscif0_ctrl",
-};
-
-static const char * const hscif1_groups[] = {
-	"hscif1_data_a",
-	"hscif1_clk_a",
-	"hscif1_ctrl_a",
-	"hscif1_data_b",
-	"hscif1_clk_b",
-	"hscif1_ctrl_b",
-};
-
-static const char * const hscif2_groups[] = {
-	"hscif2_data_a",
-	"hscif2_clk_a",
-	"hscif2_ctrl_a",
-	"hscif2_data_b",
-	"hscif2_clk_b",
-	"hscif2_ctrl_b",
-	"hscif2_data_c",
-	"hscif2_clk_c",
-	"hscif2_ctrl_c",
-};
-
-static const char * const hscif3_groups[] = {
-	"hscif3_data_a",
-	"hscif3_clk",
-	"hscif3_ctrl",
-	"hscif3_data_b",
-	"hscif3_data_c",
-	"hscif3_data_d",
-};
-
-static const char * const hscif4_groups[] = {
-	"hscif4_data_a",
-	"hscif4_clk",
-	"hscif4_ctrl",
-	"hscif4_data_b",
-};
-
-static const char * const i2c0_groups[] = {
-	"i2c0",
-};
-
-static const char * const i2c1_groups[] = {
-	"i2c1_a",
-	"i2c1_b",
-};
-
-static const char * const i2c2_groups[] = {
-	"i2c2_a",
-	"i2c2_b",
-};
-
-static const char * const i2c3_groups[] = {
-	"i2c3",
-};
-
-static const char * const i2c5_groups[] = {
-	"i2c5",
-};
-
-static const char * const i2c6_groups[] = {
-	"i2c6_a",
-	"i2c6_b",
-	"i2c6_c",
-};
-
-static const char * const intc_ex_groups[] = {
-	"intc_ex_irq0",
-	"intc_ex_irq1",
-	"intc_ex_irq2",
-	"intc_ex_irq3",
-	"intc_ex_irq4",
-	"intc_ex_irq5",
-};
-
-static const char * const msiof0_groups[] = {
-	"msiof0_clk",
-	"msiof0_sync",
-	"msiof0_ss1",
-	"msiof0_ss2",
-	"msiof0_txd",
-	"msiof0_rxd",
-};
-
-static const char * const msiof1_groups[] = {
-	"msiof1_clk_a",
-	"msiof1_sync_a",
-	"msiof1_ss1_a",
-	"msiof1_ss2_a",
-	"msiof1_txd_a",
-	"msiof1_rxd_a",
-	"msiof1_clk_b",
-	"msiof1_sync_b",
-	"msiof1_ss1_b",
-	"msiof1_ss2_b",
-	"msiof1_txd_b",
-	"msiof1_rxd_b",
-	"msiof1_clk_c",
-	"msiof1_sync_c",
-	"msiof1_ss1_c",
-	"msiof1_ss2_c",
-	"msiof1_txd_c",
-	"msiof1_rxd_c",
-	"msiof1_clk_d",
-	"msiof1_sync_d",
-	"msiof1_ss1_d",
-	"msiof1_ss2_d",
-	"msiof1_txd_d",
-	"msiof1_rxd_d",
-	"msiof1_clk_e",
-	"msiof1_sync_e",
-	"msiof1_ss1_e",
-	"msiof1_ss2_e",
-	"msiof1_txd_e",
-	"msiof1_rxd_e",
-	"msiof1_clk_f",
-	"msiof1_sync_f",
-	"msiof1_ss1_f",
-	"msiof1_ss2_f",
-	"msiof1_txd_f",
-	"msiof1_rxd_f",
-	"msiof1_clk_g",
-	"msiof1_sync_g",
-	"msiof1_ss1_g",
-	"msiof1_ss2_g",
-	"msiof1_txd_g",
-	"msiof1_rxd_g",
-};
-
-static const char * const msiof2_groups[] = {
-	"msiof2_clk_a",
-	"msiof2_sync_a",
-	"msiof2_ss1_a",
-	"msiof2_ss2_a",
-	"msiof2_txd_a",
-	"msiof2_rxd_a",
-	"msiof2_clk_b",
-	"msiof2_sync_b",
-	"msiof2_ss1_b",
-	"msiof2_ss2_b",
-	"msiof2_txd_b",
-	"msiof2_rxd_b",
-	"msiof2_clk_c",
-	"msiof2_sync_c",
-	"msiof2_ss1_c",
-	"msiof2_ss2_c",
-	"msiof2_txd_c",
-	"msiof2_rxd_c",
-	"msiof2_clk_d",
-	"msiof2_sync_d",
-	"msiof2_ss1_d",
-	"msiof2_ss2_d",
-	"msiof2_txd_d",
-	"msiof2_rxd_d",
-};
-
-static const char * const msiof3_groups[] = {
-	"msiof3_clk_a",
-	"msiof3_sync_a",
-	"msiof3_ss1_a",
-	"msiof3_ss2_a",
-	"msiof3_txd_a",
-	"msiof3_rxd_a",
-	"msiof3_clk_b",
-	"msiof3_sync_b",
-	"msiof3_ss1_b",
-	"msiof3_ss2_b",
-	"msiof3_txd_b",
-	"msiof3_rxd_b",
-	"msiof3_clk_c",
-	"msiof3_sync_c",
-	"msiof3_txd_c",
-	"msiof3_rxd_c",
-	"msiof3_clk_d",
-	"msiof3_sync_d",
-	"msiof3_ss1_d",
-	"msiof3_txd_d",
-	"msiof3_rxd_d",
-	"msiof3_clk_e",
-	"msiof3_sync_e",
-	"msiof3_ss1_e",
-	"msiof3_ss2_e",
-	"msiof3_txd_e",
-	"msiof3_rxd_e",
-};
-
-static const char * const pwm0_groups[] = {
-	"pwm0",
-};
-
-static const char * const pwm1_groups[] = {
-	"pwm1_a",
-	"pwm1_b",
-};
-
-static const char * const pwm2_groups[] = {
-	"pwm2_a",
-	"pwm2_b",
-};
-
-static const char * const pwm3_groups[] = {
-	"pwm3_a",
-	"pwm3_b",
-};
-
-static const char * const pwm4_groups[] = {
-	"pwm4_a",
-	"pwm4_b",
-};
-
-static const char * const pwm5_groups[] = {
-	"pwm5_a",
-	"pwm5_b",
-};
-
-static const char * const pwm6_groups[] = {
-	"pwm6_a",
-	"pwm6_b",
-};
-
-static const char * const sata0_groups[] = {
-	"sata0_devslp_a",
-	"sata0_devslp_b",
-};
-
-static const char * const scif0_groups[] = {
-	"scif0_data",
-	"scif0_clk",
-	"scif0_ctrl",
-};
-
-static const char * const scif1_groups[] = {
-	"scif1_data_a",
-	"scif1_clk",
-	"scif1_ctrl",
-	"scif1_data_b",
-};
-
-static const char * const scif2_groups[] = {
-	"scif2_data_a",
-	"scif2_clk",
-	"scif2_data_b",
-};
-
-static const char * const scif3_groups[] = {
-	"scif3_data_a",
-	"scif3_clk",
-	"scif3_ctrl",
-	"scif3_data_b",
-};
-
-static const char * const scif4_groups[] = {
-	"scif4_data_a",
-	"scif4_clk_a",
-	"scif4_ctrl_a",
-	"scif4_data_b",
-	"scif4_clk_b",
-	"scif4_ctrl_b",
-	"scif4_data_c",
-	"scif4_clk_c",
-	"scif4_ctrl_c",
-};
-
-static const char * const scif5_groups[] = {
-	"scif5_data_a",
-	"scif5_clk_a",
-	"scif5_data_b",
-	"scif5_clk_b",
-};
-
-static const char * const scif_clk_groups[] = {
-	"scif_clk_a",
-	"scif_clk_b",
-};
-
-static const char * const sdhi0_groups[] = {
-	"sdhi0_data1",
-	"sdhi0_data4",
-	"sdhi0_ctrl",
-	"sdhi0_cd",
-	"sdhi0_wp",
-};
-
-static const char * const sdhi1_groups[] = {
-	"sdhi1_data1",
-	"sdhi1_data4",
-	"sdhi1_ctrl",
-	"sdhi1_cd",
-	"sdhi1_wp",
-};
-
-static const char * const sdhi2_groups[] = {
-	"sdhi2_data1",
-	"sdhi2_data4",
-	"sdhi2_data8",
-	"sdhi2_ctrl",
-	"sdhi2_cd_a",
-	"sdhi2_wp_a",
-	"sdhi2_cd_b",
-	"sdhi2_wp_b",
-	"sdhi2_ds",
-};
-
-static const char * const sdhi3_groups[] = {
-	"sdhi3_data1",
-	"sdhi3_data4",
-	"sdhi3_data8",
-	"sdhi3_ctrl",
-	"sdhi3_cd",
-	"sdhi3_wp",
-	"sdhi3_ds",
-};
-
-static const char * const ssi_groups[] = {
-	"ssi0_data",
-	"ssi01239_ctrl",
-	"ssi1_data_a",
-	"ssi1_data_b",
-	"ssi1_ctrl_a",
-	"ssi1_ctrl_b",
-	"ssi2_data_a",
-	"ssi2_data_b",
-	"ssi2_ctrl_a",
-	"ssi2_ctrl_b",
-	"ssi3_data",
-	"ssi349_ctrl",
-	"ssi4_data",
-	"ssi4_ctrl",
-	"ssi5_data",
-	"ssi5_ctrl",
-	"ssi6_data",
-	"ssi6_ctrl",
-	"ssi7_data",
-	"ssi78_ctrl",
-	"ssi8_data",
-	"ssi9_data_a",
-	"ssi9_data_b",
-	"ssi9_ctrl_a",
-	"ssi9_ctrl_b",
-};
-
-static const char * const tmu_groups[] = {
-	"tmu_tclk1_a",
-	"tmu_tclk1_b",
-	"tmu_tclk2_a",
-	"tmu_tclk2_b",
-};
-
-static const char * const tpu_groups[] = {
-	"tpu_to0",
-	"tpu_to1",
-	"tpu_to2",
-	"tpu_to3",
-};
-
-static const char * const usb0_groups[] = {
-	"usb0",
-};
-
-static const char * const usb1_groups[] = {
-	"usb1",
-};
-
-static const char * const usb2_groups[] = {
-	"usb2",
-};
-
-static const char * const usb2_ch3_groups[] = {
-	"usb2_ch3",
-};
-
-static const char * const usb30_groups[] = {
-	"usb30",
-};
-
-static const char * const vin4_groups[] = {
-	"vin4_data8_a",
-	"vin4_data10_a",
-	"vin4_data12_a",
-	"vin4_data16_a",
-	"vin4_data18_a",
-	"vin4_data20_a",
-	"vin4_data24_a",
-	"vin4_data8_b",
-	"vin4_data10_b",
-	"vin4_data12_b",
-	"vin4_data16_b",
-	"vin4_data18_b",
-	"vin4_data20_b",
-	"vin4_data24_b",
-	"vin4_sync",
-	"vin4_field",
-	"vin4_clkenb",
-	"vin4_clk",
-};
-
-static const char * const vin5_groups[] = {
-	"vin5_data8",
-	"vin5_data10",
-	"vin5_data12",
-	"vin5_data16",
-	"vin5_sync",
-	"vin5_field",
-	"vin5_clkenb",
-	"vin5_clk",
-};
-
-static const struct sh_pfc_function pinmux_functions[] = {
-	SH_PFC_FUNCTION(audio_clk),
-	SH_PFC_FUNCTION(avb),
-	SH_PFC_FUNCTION(can0),
-	SH_PFC_FUNCTION(can1),
-	SH_PFC_FUNCTION(can_clk),
-	SH_PFC_FUNCTION(canfd0),
-	SH_PFC_FUNCTION(canfd1),
-	SH_PFC_FUNCTION(drif0),
-	SH_PFC_FUNCTION(drif1),
-	SH_PFC_FUNCTION(drif2),
-	SH_PFC_FUNCTION(drif3),
-	SH_PFC_FUNCTION(du),
-	SH_PFC_FUNCTION(hscif0),
-	SH_PFC_FUNCTION(hscif1),
-	SH_PFC_FUNCTION(hscif2),
-	SH_PFC_FUNCTION(hscif3),
-	SH_PFC_FUNCTION(hscif4),
-	SH_PFC_FUNCTION(i2c0),
-	SH_PFC_FUNCTION(i2c1),
-	SH_PFC_FUNCTION(i2c2),
-	SH_PFC_FUNCTION(i2c3),
-	SH_PFC_FUNCTION(i2c5),
-	SH_PFC_FUNCTION(i2c6),
-	SH_PFC_FUNCTION(intc_ex),
-	SH_PFC_FUNCTION(msiof0),
-	SH_PFC_FUNCTION(msiof1),
-	SH_PFC_FUNCTION(msiof2),
-	SH_PFC_FUNCTION(msiof3),
-	SH_PFC_FUNCTION(pwm0),
-	SH_PFC_FUNCTION(pwm1),
-	SH_PFC_FUNCTION(pwm2),
-	SH_PFC_FUNCTION(pwm3),
-	SH_PFC_FUNCTION(pwm4),
-	SH_PFC_FUNCTION(pwm5),
-	SH_PFC_FUNCTION(pwm6),
-	SH_PFC_FUNCTION(sata0),
-	SH_PFC_FUNCTION(scif0),
-	SH_PFC_FUNCTION(scif1),
-	SH_PFC_FUNCTION(scif2),
-	SH_PFC_FUNCTION(scif3),
-	SH_PFC_FUNCTION(scif4),
-	SH_PFC_FUNCTION(scif5),
-	SH_PFC_FUNCTION(scif_clk),
-	SH_PFC_FUNCTION(sdhi0),
-	SH_PFC_FUNCTION(sdhi1),
-	SH_PFC_FUNCTION(sdhi2),
-	SH_PFC_FUNCTION(sdhi3),
-	SH_PFC_FUNCTION(ssi),
-	SH_PFC_FUNCTION(tmu),
-	SH_PFC_FUNCTION(tpu),
-	SH_PFC_FUNCTION(usb0),
-	SH_PFC_FUNCTION(usb1),
-	SH_PFC_FUNCTION(usb2),
-	SH_PFC_FUNCTION(usb2_ch3),
-	SH_PFC_FUNCTION(usb30),
-	SH_PFC_FUNCTION(vin4),
-	SH_PFC_FUNCTION(vin5),
-};
-
-static const struct pinmux_cfg_reg pinmux_config_regs[] = {
-#define F_(x, y)	FN_##y
-#define FM(x)		FN_##x
-	{ PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_0_15_FN,	GPSR0_15,
-		GP_0_14_FN,	GPSR0_14,
-		GP_0_13_FN,	GPSR0_13,
-		GP_0_12_FN,	GPSR0_12,
-		GP_0_11_FN,	GPSR0_11,
-		GP_0_10_FN,	GPSR0_10,
-		GP_0_9_FN,	GPSR0_9,
-		GP_0_8_FN,	GPSR0_8,
-		GP_0_7_FN,	GPSR0_7,
-		GP_0_6_FN,	GPSR0_6,
-		GP_0_5_FN,	GPSR0_5,
-		GP_0_4_FN,	GPSR0_4,
-		GP_0_3_FN,	GPSR0_3,
-		GP_0_2_FN,	GPSR0_2,
-		GP_0_1_FN,	GPSR0_1,
-		GP_0_0_FN,	GPSR0_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_1_28_FN,	GPSR1_28,
-		GP_1_27_FN,	GPSR1_27,
-		GP_1_26_FN,	GPSR1_26,
-		GP_1_25_FN,	GPSR1_25,
-		GP_1_24_FN,	GPSR1_24,
-		GP_1_23_FN,	GPSR1_23,
-		GP_1_22_FN,	GPSR1_22,
-		GP_1_21_FN,	GPSR1_21,
-		GP_1_20_FN,	GPSR1_20,
-		GP_1_19_FN,	GPSR1_19,
-		GP_1_18_FN,	GPSR1_18,
-		GP_1_17_FN,	GPSR1_17,
-		GP_1_16_FN,	GPSR1_16,
-		GP_1_15_FN,	GPSR1_15,
-		GP_1_14_FN,	GPSR1_14,
-		GP_1_13_FN,	GPSR1_13,
-		GP_1_12_FN,	GPSR1_12,
-		GP_1_11_FN,	GPSR1_11,
-		GP_1_10_FN,	GPSR1_10,
-		GP_1_9_FN,	GPSR1_9,
-		GP_1_8_FN,	GPSR1_8,
-		GP_1_7_FN,	GPSR1_7,
-		GP_1_6_FN,	GPSR1_6,
-		GP_1_5_FN,	GPSR1_5,
-		GP_1_4_FN,	GPSR1_4,
-		GP_1_3_FN,	GPSR1_3,
-		GP_1_2_FN,	GPSR1_2,
-		GP_1_1_FN,	GPSR1_1,
-		GP_1_0_FN,	GPSR1_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_2_14_FN,	GPSR2_14,
-		GP_2_13_FN,	GPSR2_13,
-		GP_2_12_FN,	GPSR2_12,
-		GP_2_11_FN,	GPSR2_11,
-		GP_2_10_FN,	GPSR2_10,
-		GP_2_9_FN,	GPSR2_9,
-		GP_2_8_FN,	GPSR2_8,
-		GP_2_7_FN,	GPSR2_7,
-		GP_2_6_FN,	GPSR2_6,
-		GP_2_5_FN,	GPSR2_5,
-		GP_2_4_FN,	GPSR2_4,
-		GP_2_3_FN,	GPSR2_3,
-		GP_2_2_FN,	GPSR2_2,
-		GP_2_1_FN,	GPSR2_1,
-		GP_2_0_FN,	GPSR2_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_3_15_FN,	GPSR3_15,
-		GP_3_14_FN,	GPSR3_14,
-		GP_3_13_FN,	GPSR3_13,
-		GP_3_12_FN,	GPSR3_12,
-		GP_3_11_FN,	GPSR3_11,
-		GP_3_10_FN,	GPSR3_10,
-		GP_3_9_FN,	GPSR3_9,
-		GP_3_8_FN,	GPSR3_8,
-		GP_3_7_FN,	GPSR3_7,
-		GP_3_6_FN,	GPSR3_6,
-		GP_3_5_FN,	GPSR3_5,
-		GP_3_4_FN,	GPSR3_4,
-		GP_3_3_FN,	GPSR3_3,
-		GP_3_2_FN,	GPSR3_2,
-		GP_3_1_FN,	GPSR3_1,
-		GP_3_0_FN,	GPSR3_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_4_17_FN,	GPSR4_17,
-		GP_4_16_FN,	GPSR4_16,
-		GP_4_15_FN,	GPSR4_15,
-		GP_4_14_FN,	GPSR4_14,
-		GP_4_13_FN,	GPSR4_13,
-		GP_4_12_FN,	GPSR4_12,
-		GP_4_11_FN,	GPSR4_11,
-		GP_4_10_FN,	GPSR4_10,
-		GP_4_9_FN,	GPSR4_9,
-		GP_4_8_FN,	GPSR4_8,
-		GP_4_7_FN,	GPSR4_7,
-		GP_4_6_FN,	GPSR4_6,
-		GP_4_5_FN,	GPSR4_5,
-		GP_4_4_FN,	GPSR4_4,
-		GP_4_3_FN,	GPSR4_3,
-		GP_4_2_FN,	GPSR4_2,
-		GP_4_1_FN,	GPSR4_1,
-		GP_4_0_FN,	GPSR4_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_5_25_FN,	GPSR5_25,
-		GP_5_24_FN,	GPSR5_24,
-		GP_5_23_FN,	GPSR5_23,
-		GP_5_22_FN,	GPSR5_22,
-		GP_5_21_FN,	GPSR5_21,
-		GP_5_20_FN,	GPSR5_20,
-		GP_5_19_FN,	GPSR5_19,
-		GP_5_18_FN,	GPSR5_18,
-		GP_5_17_FN,	GPSR5_17,
-		GP_5_16_FN,	GPSR5_16,
-		GP_5_15_FN,	GPSR5_15,
-		GP_5_14_FN,	GPSR5_14,
-		GP_5_13_FN,	GPSR5_13,
-		GP_5_12_FN,	GPSR5_12,
-		GP_5_11_FN,	GPSR5_11,
-		GP_5_10_FN,	GPSR5_10,
-		GP_5_9_FN,	GPSR5_9,
-		GP_5_8_FN,	GPSR5_8,
-		GP_5_7_FN,	GPSR5_7,
-		GP_5_6_FN,	GPSR5_6,
-		GP_5_5_FN,	GPSR5_5,
-		GP_5_4_FN,	GPSR5_4,
-		GP_5_3_FN,	GPSR5_3,
-		GP_5_2_FN,	GPSR5_2,
-		GP_5_1_FN,	GPSR5_1,
-		GP_5_0_FN,	GPSR5_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
-		GP_6_31_FN,	GPSR6_31,
-		GP_6_30_FN,	GPSR6_30,
-		GP_6_29_FN,	GPSR6_29,
-		GP_6_28_FN,	GPSR6_28,
-		GP_6_27_FN,	GPSR6_27,
-		GP_6_26_FN,	GPSR6_26,
-		GP_6_25_FN,	GPSR6_25,
-		GP_6_24_FN,	GPSR6_24,
-		GP_6_23_FN,	GPSR6_23,
-		GP_6_22_FN,	GPSR6_22,
-		GP_6_21_FN,	GPSR6_21,
-		GP_6_20_FN,	GPSR6_20,
-		GP_6_19_FN,	GPSR6_19,
-		GP_6_18_FN,	GPSR6_18,
-		GP_6_17_FN,	GPSR6_17,
-		GP_6_16_FN,	GPSR6_16,
-		GP_6_15_FN,	GPSR6_15,
-		GP_6_14_FN,	GPSR6_14,
-		GP_6_13_FN,	GPSR6_13,
-		GP_6_12_FN,	GPSR6_12,
-		GP_6_11_FN,	GPSR6_11,
-		GP_6_10_FN,	GPSR6_10,
-		GP_6_9_FN,	GPSR6_9,
-		GP_6_8_FN,	GPSR6_8,
-		GP_6_7_FN,	GPSR6_7,
-		GP_6_6_FN,	GPSR6_6,
-		GP_6_5_FN,	GPSR6_5,
-		GP_6_4_FN,	GPSR6_4,
-		GP_6_3_FN,	GPSR6_3,
-		GP_6_2_FN,	GPSR6_2,
-		GP_6_1_FN,	GPSR6_1,
-		GP_6_0_FN,	GPSR6_0, ))
-	},
-	{ PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		0, 0,
-		GP_7_3_FN, GPSR7_3,
-		GP_7_2_FN, GPSR7_2,
-		GP_7_1_FN, GPSR7_1,
-		GP_7_0_FN, GPSR7_0, ))
-	},
-#undef F_
-#undef FM
-
-#define F_(x, y)	x,
-#define FM(x)		FN_##x,
-	{ PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
-		IP0_31_28
-		IP0_27_24
-		IP0_23_20
-		IP0_19_16
-		IP0_15_12
-		IP0_11_8
-		IP0_7_4
-		IP0_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
-		IP1_31_28
-		IP1_27_24
-		IP1_23_20
-		IP1_19_16
-		IP1_15_12
-		IP1_11_8
-		IP1_7_4
-		IP1_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
-		IP2_31_28
-		IP2_27_24
-		IP2_23_20
-		IP2_19_16
-		IP2_15_12
-		IP2_11_8
-		IP2_7_4
-		IP2_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
-		IP3_31_28
-		IP3_27_24
-		IP3_23_20
-		IP3_19_16
-		IP3_15_12
-		IP3_11_8
-		IP3_7_4
-		IP3_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
-		IP4_31_28
-		IP4_27_24
-		IP4_23_20
-		IP4_19_16
-		IP4_15_12
-		IP4_11_8
-		IP4_7_4
-		IP4_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
-		IP5_31_28
-		IP5_27_24
-		IP5_23_20
-		IP5_19_16
-		IP5_15_12
-		IP5_11_8
-		IP5_7_4
-		IP5_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
-		IP6_31_28
-		IP6_27_24
-		IP6_23_20
-		IP6_19_16
-		IP6_15_12
-		IP6_11_8
-		IP6_7_4
-		IP6_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
-		IP7_31_28
-		IP7_27_24
-		IP7_23_20
-		IP7_19_16
-		/* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		IP7_11_8
-		IP7_7_4
-		IP7_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
-		IP8_31_28
-		IP8_27_24
-		IP8_23_20
-		IP8_19_16
-		IP8_15_12
-		IP8_11_8
-		IP8_7_4
-		IP8_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
-		IP9_31_28
-		IP9_27_24
-		IP9_23_20
-		IP9_19_16
-		IP9_15_12
-		IP9_11_8
-		IP9_7_4
-		IP9_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
-		IP10_31_28
-		IP10_27_24
-		IP10_23_20
-		IP10_19_16
-		IP10_15_12
-		IP10_11_8
-		IP10_7_4
-		IP10_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
-		IP11_31_28
-		IP11_27_24
-		IP11_23_20
-		IP11_19_16
-		IP11_15_12
-		IP11_11_8
-		IP11_7_4
-		IP11_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
-		IP12_31_28
-		IP12_27_24
-		IP12_23_20
-		IP12_19_16
-		IP12_15_12
-		IP12_11_8
-		IP12_7_4
-		IP12_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
-		IP13_31_28
-		IP13_27_24
-		IP13_23_20
-		IP13_19_16
-		IP13_15_12
-		IP13_11_8
-		IP13_7_4
-		IP13_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
-		IP14_31_28
-		IP14_27_24
-		IP14_23_20
-		IP14_19_16
-		IP14_15_12
-		IP14_11_8
-		IP14_7_4
-		IP14_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
-		IP15_31_28
-		IP15_27_24
-		IP15_23_20
-		IP15_19_16
-		IP15_15_12
-		IP15_11_8
-		IP15_7_4
-		IP15_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4, GROUP(
-		IP16_31_28
-		IP16_27_24
-		IP16_23_20
-		IP16_19_16
-		IP16_15_12
-		IP16_11_8
-		IP16_7_4
-		IP16_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
-		IP17_31_28
-		IP17_27_24
-		IP17_23_20
-		IP17_19_16
-		IP17_15_12
-		IP17_11_8
-		IP17_7_4
-		IP17_3_0 ))
-	},
-	{ PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP(
-		/* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		/* IP18_11_8  */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		IP18_7_4
-		IP18_3_0 ))
-	},
-#undef F_
-#undef FM
-
-#define F_(x, y)	x,
-#define FM(x)		FN_##x,
-	{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
-			     GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2,
-				   1, 1, 1, 2, 2, 1, 2, 3),
-			     GROUP(
-		MOD_SEL0_31_30_29
-		MOD_SEL0_28_27
-		MOD_SEL0_26_25_24
-		MOD_SEL0_23
-		MOD_SEL0_22
-		MOD_SEL0_21
-		MOD_SEL0_20
-		MOD_SEL0_19
-		MOD_SEL0_18_17
-		MOD_SEL0_16
-		0, 0, /* RESERVED 15 */
-		MOD_SEL0_14_13
-		MOD_SEL0_12
-		MOD_SEL0_11
-		MOD_SEL0_10
-		MOD_SEL0_9_8
-		MOD_SEL0_7_6
-		MOD_SEL0_5
-		MOD_SEL0_4_3
-		/* RESERVED 2, 1, 0 */
-		0, 0, 0, 0, 0, 0, 0, 0 ))
-	},
-	{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
-			     GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
-				   1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
-			     GROUP(
-		MOD_SEL1_31_30
-		MOD_SEL1_29_28_27
-		MOD_SEL1_26
-		MOD_SEL1_25_24
-		MOD_SEL1_23_22_21
-		MOD_SEL1_20
-		MOD_SEL1_19
-		MOD_SEL1_18_17
-		MOD_SEL1_16
-		MOD_SEL1_15_14
-		MOD_SEL1_13
-		MOD_SEL1_12
-		MOD_SEL1_11
-		MOD_SEL1_10
-		MOD_SEL1_9
-		0, 0, 0, 0, /* RESERVED 8, 7 */
-		MOD_SEL1_6
-		MOD_SEL1_5
-		MOD_SEL1_4
-		MOD_SEL1_3
-		MOD_SEL1_2
-		MOD_SEL1_1
-		MOD_SEL1_0 ))
-	},
-	{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
-			     GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1,
-				   1, 4, 4, 4, 3, 1),
-			     GROUP(
-		MOD_SEL2_31
-		MOD_SEL2_30
-		MOD_SEL2_29
-		MOD_SEL2_28_27
-		MOD_SEL2_26
-		MOD_SEL2_25_24_23
-		/* RESERVED 22 */
-		0, 0,
-		MOD_SEL2_21
-		MOD_SEL2_20
-		MOD_SEL2_19
-		MOD_SEL2_18
-		MOD_SEL2_17
-		/* RESERVED 16 */
-		0, 0,
-		/* RESERVED 15, 14, 13, 12 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 11, 10, 9, 8 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 7, 6, 5, 4 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0,
-		/* RESERVED 3, 2, 1 */
-		0, 0, 0, 0, 0, 0, 0, 0,
-		MOD_SEL2_0 ))
-	},
-	{ },
-};
-
-static const struct pinmux_drive_reg pinmux_drive_regs[] = {
-	{ PINMUX_DRIVE_REG("DRVCTRL0", 0xe6060300) {
-		{ PIN_QSPI0_SPCLK,    28, 2 },	/* QSPI0_SPCLK */
-		{ PIN_QSPI0_MOSI_IO0, 24, 2 },	/* QSPI0_MOSI_IO0 */
-		{ PIN_QSPI0_MISO_IO1, 20, 2 },	/* QSPI0_MISO_IO1 */
-		{ PIN_QSPI0_IO2,      16, 2 },	/* QSPI0_IO2 */
-		{ PIN_QSPI0_IO3,      12, 2 },	/* QSPI0_IO3 */
-		{ PIN_QSPI0_SSL,       8, 2 },	/* QSPI0_SSL */
-		{ PIN_QSPI1_SPCLK,     4, 2 },	/* QSPI1_SPCLK */
-		{ PIN_QSPI1_MOSI_IO0,  0, 2 },	/* QSPI1_MOSI_IO0 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL1", 0xe6060304) {
-		{ PIN_QSPI1_MISO_IO1, 28, 2 },	/* QSPI1_MISO_IO1 */
-		{ PIN_QSPI1_IO2,      24, 2 },	/* QSPI1_IO2 */
-		{ PIN_QSPI1_IO3,      20, 2 },	/* QSPI1_IO3 */
-		{ PIN_QSPI1_SSL,      16, 2 },	/* QSPI1_SSL */
-		{ PIN_RPC_INT_N,      12, 2 },	/* RPC_INT# */
-		{ PIN_RPC_WP_N,        8, 2 },	/* RPC_WP# */
-		{ PIN_RPC_RESET_N,     4, 2 },	/* RPC_RESET# */
-		{ PIN_AVB_RX_CTL,      0, 3 },	/* AVB_RX_CTL */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL2", 0xe6060308) {
-		{ PIN_AVB_RXC,        28, 3 },	/* AVB_RXC */
-		{ PIN_AVB_RD0,        24, 3 },	/* AVB_RD0 */
-		{ PIN_AVB_RD1,        20, 3 },	/* AVB_RD1 */
-		{ PIN_AVB_RD2,        16, 3 },	/* AVB_RD2 */
-		{ PIN_AVB_RD3,        12, 3 },	/* AVB_RD3 */
-		{ PIN_AVB_TX_CTL,      8, 3 },	/* AVB_TX_CTL */
-		{ PIN_AVB_TXC,         4, 3 },	/* AVB_TXC */
-		{ PIN_AVB_TD0,         0, 3 },	/* AVB_TD0 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) {
-		{ PIN_AVB_TD1,        28, 3 },	/* AVB_TD1 */
-		{ PIN_AVB_TD2,        24, 3 },	/* AVB_TD2 */
-		{ PIN_AVB_TD3,        20, 3 },	/* AVB_TD3 */
-		{ PIN_AVB_TXCREFCLK,  16, 3 },	/* AVB_TXCREFCLK */
-		{ PIN_AVB_MDIO,       12, 3 },	/* AVB_MDIO */
-		{ RCAR_GP_PIN(2,  9),  8, 3 },	/* AVB_MDC */
-		{ RCAR_GP_PIN(2, 10),  4, 3 },	/* AVB_MAGIC */
-		{ RCAR_GP_PIN(2, 11),  0, 3 },	/* AVB_PHY_INT */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) {
-		{ RCAR_GP_PIN(2, 12), 28, 3 },	/* AVB_LINK */
-		{ RCAR_GP_PIN(2, 13), 24, 3 },	/* AVB_AVTP_MATCH */
-		{ RCAR_GP_PIN(2, 14), 20, 3 },	/* AVB_AVTP_CAPTURE */
-		{ RCAR_GP_PIN(2,  0), 16, 3 },	/* IRQ0 */
-		{ RCAR_GP_PIN(2,  1), 12, 3 },	/* IRQ1 */
-		{ RCAR_GP_PIN(2,  2),  8, 3 },	/* IRQ2 */
-		{ RCAR_GP_PIN(2,  3),  4, 3 },	/* IRQ3 */
-		{ RCAR_GP_PIN(2,  4),  0, 3 },	/* IRQ4 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL5", 0xe6060314) {
-		{ RCAR_GP_PIN(2,  5), 28, 3 },	/* IRQ5 */
-		{ RCAR_GP_PIN(2,  6), 24, 3 },	/* PWM0 */
-		{ RCAR_GP_PIN(2,  7), 20, 3 },	/* PWM1 */
-		{ RCAR_GP_PIN(2,  8), 16, 3 },	/* PWM2 */
-		{ RCAR_GP_PIN(1,  0), 12, 3 },	/* A0 */
-		{ RCAR_GP_PIN(1,  1),  8, 3 },	/* A1 */
-		{ RCAR_GP_PIN(1,  2),  4, 3 },	/* A2 */
-		{ RCAR_GP_PIN(1,  3),  0, 3 },	/* A3 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL6", 0xe6060318) {
-		{ RCAR_GP_PIN(1,  4), 28, 3 },	/* A4 */
-		{ RCAR_GP_PIN(1,  5), 24, 3 },	/* A5 */
-		{ RCAR_GP_PIN(1,  6), 20, 3 },	/* A6 */
-		{ RCAR_GP_PIN(1,  7), 16, 3 },	/* A7 */
-		{ RCAR_GP_PIN(1,  8), 12, 3 },	/* A8 */
-		{ RCAR_GP_PIN(1,  9),  8, 3 },	/* A9 */
-		{ RCAR_GP_PIN(1, 10),  4, 3 },	/* A10 */
-		{ RCAR_GP_PIN(1, 11),  0, 3 },	/* A11 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL7", 0xe606031c) {
-		{ RCAR_GP_PIN(1, 12), 28, 3 },	/* A12 */
-		{ RCAR_GP_PIN(1, 13), 24, 3 },	/* A13 */
-		{ RCAR_GP_PIN(1, 14), 20, 3 },	/* A14 */
-		{ RCAR_GP_PIN(1, 15), 16, 3 },	/* A15 */
-		{ RCAR_GP_PIN(1, 16), 12, 3 },	/* A16 */
-		{ RCAR_GP_PIN(1, 17),  8, 3 },	/* A17 */
-		{ RCAR_GP_PIN(1, 18),  4, 3 },	/* A18 */
-		{ RCAR_GP_PIN(1, 19),  0, 3 },	/* A19 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
-		{ RCAR_GP_PIN(1, 28), 28, 3 },	/* CLKOUT */
-		{ RCAR_GP_PIN(1, 20), 24, 3 },	/* CS0 */
-		{ RCAR_GP_PIN(1, 21), 20, 3 },	/* CS1_A26 */
-		{ RCAR_GP_PIN(1, 22), 16, 3 },	/* BS */
-		{ RCAR_GP_PIN(1, 23), 12, 3 },	/* RD */
-		{ RCAR_GP_PIN(1, 24),  8, 3 },	/* RD_WR */
-		{ RCAR_GP_PIN(1, 25),  4, 3 },	/* WE0 */
-		{ RCAR_GP_PIN(1, 26),  0, 3 },	/* WE1 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
-		{ RCAR_GP_PIN(1, 27), 28, 3 },	/* EX_WAIT0 */
-		{ PIN_PRESETOUT_N,    24, 3 },	/* PRESETOUT# */
-		{ RCAR_GP_PIN(0,  0), 20, 3 },	/* D0 */
-		{ RCAR_GP_PIN(0,  1), 16, 3 },	/* D1 */
-		{ RCAR_GP_PIN(0,  2), 12, 3 },	/* D2 */
-		{ RCAR_GP_PIN(0,  3),  8, 3 },	/* D3 */
-		{ RCAR_GP_PIN(0,  4),  4, 3 },	/* D4 */
-		{ RCAR_GP_PIN(0,  5),  0, 3 },	/* D5 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) {
-		{ RCAR_GP_PIN(0,  6), 28, 3 },	/* D6 */
-		{ RCAR_GP_PIN(0,  7), 24, 3 },	/* D7 */
-		{ RCAR_GP_PIN(0,  8), 20, 3 },	/* D8 */
-		{ RCAR_GP_PIN(0,  9), 16, 3 },	/* D9 */
-		{ RCAR_GP_PIN(0, 10), 12, 3 },	/* D10 */
-		{ RCAR_GP_PIN(0, 11),  8, 3 },	/* D11 */
-		{ RCAR_GP_PIN(0, 12),  4, 3 },	/* D12 */
-		{ RCAR_GP_PIN(0, 13),  0, 3 },	/* D13 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) {
-		{ RCAR_GP_PIN(0, 14), 28, 3 },	/* D14 */
-		{ RCAR_GP_PIN(0, 15), 24, 3 },	/* D15 */
-		{ RCAR_GP_PIN(7,  0), 20, 3 },	/* AVS1 */
-		{ RCAR_GP_PIN(7,  1), 16, 3 },	/* AVS2 */
-		{ RCAR_GP_PIN(7,  2), 12, 3 },	/* GP7_02 */
-		{ RCAR_GP_PIN(7,  3),  8, 3 },	/* GP7_03 */
-		{ PIN_DU_DOTCLKIN0,    4, 2 },	/* DU_DOTCLKIN0 */
-		{ PIN_DU_DOTCLKIN1,    0, 2 },	/* DU_DOTCLKIN1 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
-		{ PIN_DU_DOTCLKIN2,   28, 2 },	/* DU_DOTCLKIN2 */
-		{ PIN_DU_DOTCLKIN3,   24, 2 },	/* DU_DOTCLKIN3 */
-		{ PIN_FSCLKST_N,      20, 2 },	/* FSCLKST# */
-		{ PIN_TMS,             4, 2 },	/* TMS */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) {
-		{ PIN_TDO,            28, 2 },	/* TDO */
-		{ PIN_ASEBRK,         24, 2 },	/* ASEBRK */
-		{ RCAR_GP_PIN(3,  0), 20, 3 },	/* SD0_CLK */
-		{ RCAR_GP_PIN(3,  1), 16, 3 },	/* SD0_CMD */
-		{ RCAR_GP_PIN(3,  2), 12, 3 },	/* SD0_DAT0 */
-		{ RCAR_GP_PIN(3,  3),  8, 3 },	/* SD0_DAT1 */
-		{ RCAR_GP_PIN(3,  4),  4, 3 },	/* SD0_DAT2 */
-		{ RCAR_GP_PIN(3,  5),  0, 3 },	/* SD0_DAT3 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) {
-		{ RCAR_GP_PIN(3,  6), 28, 3 },	/* SD1_CLK */
-		{ RCAR_GP_PIN(3,  7), 24, 3 },	/* SD1_CMD */
-		{ RCAR_GP_PIN(3,  8), 20, 3 },	/* SD1_DAT0 */
-		{ RCAR_GP_PIN(3,  9), 16, 3 },	/* SD1_DAT1 */
-		{ RCAR_GP_PIN(3, 10), 12, 3 },	/* SD1_DAT2 */
-		{ RCAR_GP_PIN(3, 11),  8, 3 },	/* SD1_DAT3 */
-		{ RCAR_GP_PIN(4,  0),  4, 3 },	/* SD2_CLK */
-		{ RCAR_GP_PIN(4,  1),  0, 3 },	/* SD2_CMD */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL15", 0xe606033c) {
-		{ RCAR_GP_PIN(4,  2), 28, 3 },	/* SD2_DAT0 */
-		{ RCAR_GP_PIN(4,  3), 24, 3 },	/* SD2_DAT1 */
-		{ RCAR_GP_PIN(4,  4), 20, 3 },	/* SD2_DAT2 */
-		{ RCAR_GP_PIN(4,  5), 16, 3 },	/* SD2_DAT3 */
-		{ RCAR_GP_PIN(4,  6), 12, 3 },	/* SD2_DS */
-		{ RCAR_GP_PIN(4,  7),  8, 3 },	/* SD3_CLK */
-		{ RCAR_GP_PIN(4,  8),  4, 3 },	/* SD3_CMD */
-		{ RCAR_GP_PIN(4,  9),  0, 3 },	/* SD3_DAT0 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL16", 0xe6060340) {
-		{ RCAR_GP_PIN(4, 10), 28, 3 },	/* SD3_DAT1 */
-		{ RCAR_GP_PIN(4, 11), 24, 3 },	/* SD3_DAT2 */
-		{ RCAR_GP_PIN(4, 12), 20, 3 },	/* SD3_DAT3 */
-		{ RCAR_GP_PIN(4, 13), 16, 3 },	/* SD3_DAT4 */
-		{ RCAR_GP_PIN(4, 14), 12, 3 },	/* SD3_DAT5 */
-		{ RCAR_GP_PIN(4, 15),  8, 3 },	/* SD3_DAT6 */
-		{ RCAR_GP_PIN(4, 16),  4, 3 },	/* SD3_DAT7 */
-		{ RCAR_GP_PIN(4, 17),  0, 3 },	/* SD3_DS */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL17", 0xe6060344) {
-		{ RCAR_GP_PIN(3, 12), 28, 3 },	/* SD0_CD */
-		{ RCAR_GP_PIN(3, 13), 24, 3 },	/* SD0_WP */
-		{ RCAR_GP_PIN(3, 14), 20, 3 },	/* SD1_CD */
-		{ RCAR_GP_PIN(3, 15), 16, 3 },	/* SD1_WP */
-		{ RCAR_GP_PIN(5,  0), 12, 3 },	/* SCK0 */
-		{ RCAR_GP_PIN(5,  1),  8, 3 },	/* RX0 */
-		{ RCAR_GP_PIN(5,  2),  4, 3 },	/* TX0 */
-		{ RCAR_GP_PIN(5,  3),  0, 3 },	/* CTS0 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL18", 0xe6060348) {
-		{ RCAR_GP_PIN(5,  4), 28, 3 },	/* RTS0 */
-		{ RCAR_GP_PIN(5,  5), 24, 3 },	/* RX1 */
-		{ RCAR_GP_PIN(5,  6), 20, 3 },	/* TX1 */
-		{ RCAR_GP_PIN(5,  7), 16, 3 },	/* CTS1 */
-		{ RCAR_GP_PIN(5,  8), 12, 3 },	/* RTS1 */
-		{ RCAR_GP_PIN(5,  9),  8, 3 },	/* SCK2 */
-		{ RCAR_GP_PIN(5, 10),  4, 3 },	/* TX2 */
-		{ RCAR_GP_PIN(5, 11),  0, 3 },	/* RX2 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL19", 0xe606034c) {
-		{ RCAR_GP_PIN(5, 12), 28, 3 },	/* HSCK0 */
-		{ RCAR_GP_PIN(5, 13), 24, 3 },	/* HRX0 */
-		{ RCAR_GP_PIN(5, 14), 20, 3 },	/* HTX0 */
-		{ RCAR_GP_PIN(5, 15), 16, 3 },	/* HCTS0 */
-		{ RCAR_GP_PIN(5, 16), 12, 3 },	/* HRTS0 */
-		{ RCAR_GP_PIN(5, 17),  8, 3 },	/* MSIOF0_SCK */
-		{ RCAR_GP_PIN(5, 18),  4, 3 },	/* MSIOF0_SYNC */
-		{ RCAR_GP_PIN(5, 19),  0, 3 },	/* MSIOF0_SS1 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL20", 0xe6060350) {
-		{ RCAR_GP_PIN(5, 20), 28, 3 },	/* MSIOF0_TXD */
-		{ RCAR_GP_PIN(5, 21), 24, 3 },	/* MSIOF0_SS2 */
-		{ RCAR_GP_PIN(5, 22), 20, 3 },	/* MSIOF0_RXD */
-		{ RCAR_GP_PIN(5, 23), 16, 3 },	/* MLB_CLK */
-		{ RCAR_GP_PIN(5, 24), 12, 3 },	/* MLB_SIG */
-		{ RCAR_GP_PIN(5, 25),  8, 3 },	/* MLB_DAT */
-		{ PIN_MLB_REF,         4, 3 },	/* MLB_REF */
-		{ RCAR_GP_PIN(6,  0),  0, 3 },	/* SSI_SCK01239 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) {
-		{ RCAR_GP_PIN(6,  1), 28, 3 },	/* SSI_WS01239 */
-		{ RCAR_GP_PIN(6,  2), 24, 3 },	/* SSI_SDATA0 */
-		{ RCAR_GP_PIN(6,  3), 20, 3 },	/* SSI_SDATA1 */
-		{ RCAR_GP_PIN(6,  4), 16, 3 },	/* SSI_SDATA2 */
-		{ RCAR_GP_PIN(6,  5), 12, 3 },	/* SSI_SCK349 */
-		{ RCAR_GP_PIN(6,  6),  8, 3 },	/* SSI_WS349 */
-		{ RCAR_GP_PIN(6,  7),  4, 3 },	/* SSI_SDATA3 */
-		{ RCAR_GP_PIN(6,  8),  0, 3 },	/* SSI_SCK4 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL22", 0xe6060358) {
-		{ RCAR_GP_PIN(6,  9), 28, 3 },	/* SSI_WS4 */
-		{ RCAR_GP_PIN(6, 10), 24, 3 },	/* SSI_SDATA4 */
-		{ RCAR_GP_PIN(6, 11), 20, 3 },	/* SSI_SCK5 */
-		{ RCAR_GP_PIN(6, 12), 16, 3 },	/* SSI_WS5 */
-		{ RCAR_GP_PIN(6, 13), 12, 3 },	/* SSI_SDATA5 */
-		{ RCAR_GP_PIN(6, 14),  8, 3 },	/* SSI_SCK6 */
-		{ RCAR_GP_PIN(6, 15),  4, 3 },	/* SSI_WS6 */
-		{ RCAR_GP_PIN(6, 16),  0, 3 },	/* SSI_SDATA6 */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL23", 0xe606035c) {
-		{ RCAR_GP_PIN(6, 17), 28, 3 },	/* SSI_SCK78 */
-		{ RCAR_GP_PIN(6, 18), 24, 3 },	/* SSI_WS78 */
-		{ RCAR_GP_PIN(6, 19), 20, 3 },	/* SSI_SDATA7 */
-		{ RCAR_GP_PIN(6, 20), 16, 3 },	/* SSI_SDATA8 */
-		{ RCAR_GP_PIN(6, 21), 12, 3 },	/* SSI_SDATA9 */
-		{ RCAR_GP_PIN(6, 22),  8, 3 },	/* AUDIO_CLKA */
-		{ RCAR_GP_PIN(6, 23),  4, 3 },	/* AUDIO_CLKB */
-		{ RCAR_GP_PIN(6, 24),  0, 3 },	/* USB0_PWEN */
-	} },
-	{ PINMUX_DRIVE_REG("DRVCTRL24", 0xe6060360) {
-		{ RCAR_GP_PIN(6, 25), 28, 3 },	/* USB0_OVC */
-		{ RCAR_GP_PIN(6, 26), 24, 3 },	/* USB1_PWEN */
-		{ RCAR_GP_PIN(6, 27), 20, 3 },	/* USB1_OVC */
-		{ RCAR_GP_PIN(6, 28), 16, 3 },	/* USB30_PWEN */
-		{ RCAR_GP_PIN(6, 29), 12, 3 },	/* USB30_OVC */
-		{ RCAR_GP_PIN(6, 30),  8, 3 },	/* USB2_CH3_PWEN */
-		{ RCAR_GP_PIN(6, 31),  4, 3 },	/* USB2_CH3_OVC */
-	} },
-	{ },
-};
-
-enum ioctrl_regs {
-	POCCTRL,
-	TDSELCTRL,
-};
-
-static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
-	[POCCTRL] = { 0xe6060380, },
-	[TDSELCTRL] = { 0xe60603c0, },
-	{ /* sentinel */ },
-};
-
-static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
-{
-	int bit = -EINVAL;
-
-	*pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
-
-	if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
-		bit = pin & 0x1f;
-
-	if (pin >= RCAR_GP_PIN(4, 0) && pin <= RCAR_GP_PIN(4, 17))
-		bit = (pin & 0x1f) + 12;
-
-	return bit;
-}
-
-static const struct pinmux_bias_reg pinmux_bias_regs[] = {
-	{ PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
-		[ 0] = PIN_QSPI0_SPCLK,		/* QSPI0_SPCLK */
-		[ 1] = PIN_QSPI0_MOSI_IO0,	/* QSPI0_MOSI_IO0 */
-		[ 2] = PIN_QSPI0_MISO_IO1,	/* QSPI0_MISO_IO1 */
-		[ 3] = PIN_QSPI0_IO2,		/* QSPI0_IO2 */
-		[ 4] = PIN_QSPI0_IO3,		/* QSPI0_IO3 */
-		[ 5] = PIN_QSPI0_SSL,		/* QSPI0_SSL */
-		[ 6] = PIN_QSPI1_SPCLK,		/* QSPI1_SPCLK */
-		[ 7] = PIN_QSPI1_MOSI_IO0,	/* QSPI1_MOSI_IO0 */
-		[ 8] = PIN_QSPI1_MISO_IO1,	/* QSPI1_MISO_IO1 */
-		[ 9] = PIN_QSPI1_IO2,		/* QSPI1_IO2 */
-		[10] = PIN_QSPI1_IO3,		/* QSPI1_IO3 */
-		[11] = PIN_QSPI1_SSL,		/* QSPI1_SSL */
-		[12] = PIN_RPC_INT_N,		/* RPC_INT# */
-		[13] = PIN_RPC_WP_N,		/* RPC_WP# */
-		[14] = PIN_RPC_RESET_N,		/* RPC_RESET# */
-		[15] = PIN_AVB_RX_CTL,		/* AVB_RX_CTL */
-		[16] = PIN_AVB_RXC,		/* AVB_RXC */
-		[17] = PIN_AVB_RD0,		/* AVB_RD0 */
-		[18] = PIN_AVB_RD1,		/* AVB_RD1 */
-		[19] = PIN_AVB_RD2,		/* AVB_RD2 */
-		[20] = PIN_AVB_RD3,		/* AVB_RD3 */
-		[21] = PIN_AVB_TX_CTL,		/* AVB_TX_CTL */
-		[22] = PIN_AVB_TXC,		/* AVB_TXC */
-		[23] = PIN_AVB_TD0,		/* AVB_TD0 */
-		[24] = PIN_AVB_TD1,		/* AVB_TD1 */
-		[25] = PIN_AVB_TD2,		/* AVB_TD2 */
-		[26] = PIN_AVB_TD3,		/* AVB_TD3 */
-		[27] = PIN_AVB_TXCREFCLK,	/* AVB_TXCREFCLK */
-		[28] = PIN_AVB_MDIO,		/* AVB_MDIO */
-		[29] = RCAR_GP_PIN(2,  9),	/* AVB_MDC */
-		[30] = RCAR_GP_PIN(2, 10),	/* AVB_MAGIC */
-		[31] = RCAR_GP_PIN(2, 11),	/* AVB_PHY_INT */
-	} },
-	{ PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
-		[ 0] = RCAR_GP_PIN(2, 12),	/* AVB_LINK */
-		[ 1] = RCAR_GP_PIN(2, 13),	/* AVB_AVTP_MATCH_A */
-		[ 2] = RCAR_GP_PIN(2, 14),	/* AVB_AVTP_CAPTURE_A */
-		[ 3] = RCAR_GP_PIN(2,  0),	/* IRQ0 */
-		[ 4] = RCAR_GP_PIN(2,  1),	/* IRQ1 */
-		[ 5] = RCAR_GP_PIN(2,  2),	/* IRQ2 */
-		[ 6] = RCAR_GP_PIN(2,  3),	/* IRQ3 */
-		[ 7] = RCAR_GP_PIN(2,  4),	/* IRQ4 */
-		[ 8] = RCAR_GP_PIN(2,  5),	/* IRQ5 */
-		[ 9] = RCAR_GP_PIN(2,  6),	/* PWM0 */
-		[10] = RCAR_GP_PIN(2,  7),	/* PWM1_A */
-		[11] = RCAR_GP_PIN(2,  8),	/* PWM2_A */
-		[12] = RCAR_GP_PIN(1,  0),	/* A0 */
-		[13] = RCAR_GP_PIN(1,  1),	/* A1 */
-		[14] = RCAR_GP_PIN(1,  2),	/* A2 */
-		[15] = RCAR_GP_PIN(1,  3),	/* A3 */
-		[16] = RCAR_GP_PIN(1,  4),	/* A4 */
-		[17] = RCAR_GP_PIN(1,  5),	/* A5 */
-		[18] = RCAR_GP_PIN(1,  6),	/* A6 */
-		[19] = RCAR_GP_PIN(1,  7),	/* A7 */
-		[20] = RCAR_GP_PIN(1,  8),	/* A8 */
-		[21] = RCAR_GP_PIN(1,  9),	/* A9 */
-		[22] = RCAR_GP_PIN(1, 10),	/* A10 */
-		[23] = RCAR_GP_PIN(1, 11),	/* A11 */
-		[24] = RCAR_GP_PIN(1, 12),	/* A12 */
-		[25] = RCAR_GP_PIN(1, 13),	/* A13 */
-		[26] = RCAR_GP_PIN(1, 14),	/* A14 */
-		[27] = RCAR_GP_PIN(1, 15),	/* A15 */
-		[28] = RCAR_GP_PIN(1, 16),	/* A16 */
-		[29] = RCAR_GP_PIN(1, 17),	/* A17 */
-		[30] = RCAR_GP_PIN(1, 18),	/* A18 */
-		[31] = RCAR_GP_PIN(1, 19),	/* A19 */
-	} },
-	{ PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
-		[ 0] = RCAR_GP_PIN(1, 28),	/* CLKOUT */
-		[ 1] = RCAR_GP_PIN(1, 20),	/* CS0_N */
-		[ 2] = RCAR_GP_PIN(1, 21),	/* CS1_N */
-		[ 3] = RCAR_GP_PIN(1, 22),	/* BS_N */
-		[ 4] = RCAR_GP_PIN(1, 23),	/* RD_N */
-		[ 5] = RCAR_GP_PIN(1, 24),	/* RD_WR_N */
-		[ 6] = RCAR_GP_PIN(1, 25),	/* WE0_N */
-		[ 7] = RCAR_GP_PIN(1, 26),	/* WE1_N */
-		[ 8] = RCAR_GP_PIN(1, 27),	/* EX_WAIT0_A */
-		[ 9] = PIN_PRESETOUT_N,		/* PRESETOUT# */
-		[10] = RCAR_GP_PIN(0,  0),	/* D0 */
-		[11] = RCAR_GP_PIN(0,  1),	/* D1 */
-		[12] = RCAR_GP_PIN(0,  2),	/* D2 */
-		[13] = RCAR_GP_PIN(0,  3),	/* D3 */
-		[14] = RCAR_GP_PIN(0,  4),	/* D4 */
-		[15] = RCAR_GP_PIN(0,  5),	/* D5 */
-		[16] = RCAR_GP_PIN(0,  6),	/* D6 */
-		[17] = RCAR_GP_PIN(0,  7),	/* D7 */
-		[18] = RCAR_GP_PIN(0,  8),	/* D8 */
-		[19] = RCAR_GP_PIN(0,  9),	/* D9 */
-		[20] = RCAR_GP_PIN(0, 10),	/* D10 */
-		[21] = RCAR_GP_PIN(0, 11),	/* D11 */
-		[22] = RCAR_GP_PIN(0, 12),	/* D12 */
-		[23] = RCAR_GP_PIN(0, 13),	/* D13 */
-		[24] = RCAR_GP_PIN(0, 14),	/* D14 */
-		[25] = RCAR_GP_PIN(0, 15),	/* D15 */
-		[26] = RCAR_GP_PIN(7,  0),	/* AVS1 */
-		[27] = RCAR_GP_PIN(7,  1),	/* AVS2 */
-		[28] = RCAR_GP_PIN(7,  2),	/* GP7_02 */
-		[29] = RCAR_GP_PIN(7,  3),	/* GP7_03 */
-		[30] = PIN_DU_DOTCLKIN0,	/* DU_DOTCLKIN0 */
-		[31] = PIN_DU_DOTCLKIN1,	/* DU_DOTCLKIN1 */
-	} },
-	{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
-		[ 0] = PIN_DU_DOTCLKIN2,	/* DU_DOTCLKIN2 */
-		[ 1] = PIN_DU_DOTCLKIN3,	/* DU_DOTCLKIN3 */
-		[ 2] = PIN_FSCLKST_N,		/* FSCLKST# */
-		[ 3] = PIN_EXTALR,		/* EXTALR*/
-		[ 4] = PIN_TRST_N,		/* TRST# */
-		[ 5] = PIN_TCK,			/* TCK */
-		[ 6] = PIN_TMS,			/* TMS */
-		[ 7] = PIN_TDI,			/* TDI */
-		[ 8] = SH_PFC_PIN_NONE,
-		[ 9] = PIN_ASEBRK,		/* ASEBRK */
-		[10] = RCAR_GP_PIN(3,  0),	/* SD0_CLK */
-		[11] = RCAR_GP_PIN(3,  1),	/* SD0_CMD */
-		[12] = RCAR_GP_PIN(3,  2),	/* SD0_DAT0 */
-		[13] = RCAR_GP_PIN(3,  3),	/* SD0_DAT1 */
-		[14] = RCAR_GP_PIN(3,  4),	/* SD0_DAT2 */
-		[15] = RCAR_GP_PIN(3,  5),	/* SD0_DAT3 */
-		[16] = RCAR_GP_PIN(3,  6),	/* SD1_CLK */
-		[17] = RCAR_GP_PIN(3,  7),	/* SD1_CMD */
-		[18] = RCAR_GP_PIN(3,  8),	/* SD1_DAT0 */
-		[19] = RCAR_GP_PIN(3,  9),	/* SD1_DAT1 */
-		[20] = RCAR_GP_PIN(3, 10),	/* SD1_DAT2 */
-		[21] = RCAR_GP_PIN(3, 11),	/* SD1_DAT3 */
-		[22] = RCAR_GP_PIN(4,  0),	/* SD2_CLK */
-		[23] = RCAR_GP_PIN(4,  1),	/* SD2_CMD */
-		[24] = RCAR_GP_PIN(4,  2),	/* SD2_DAT0 */
-		[25] = RCAR_GP_PIN(4,  3),	/* SD2_DAT1 */
-		[26] = RCAR_GP_PIN(4,  4),	/* SD2_DAT2 */
-		[27] = RCAR_GP_PIN(4,  5),	/* SD2_DAT3 */
-		[28] = RCAR_GP_PIN(4,  6),	/* SD2_DS */
-		[29] = RCAR_GP_PIN(4,  7),	/* SD3_CLK */
-		[30] = RCAR_GP_PIN(4,  8),	/* SD3_CMD */
-		[31] = RCAR_GP_PIN(4,  9),	/* SD3_DAT0 */
-	} },
-	{ PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
-		[ 0] = RCAR_GP_PIN(4, 10),	/* SD3_DAT1 */
-		[ 1] = RCAR_GP_PIN(4, 11),	/* SD3_DAT2 */
-		[ 2] = RCAR_GP_PIN(4, 12),	/* SD3_DAT3 */
-		[ 3] = RCAR_GP_PIN(4, 13),	/* SD3_DAT4 */
-		[ 4] = RCAR_GP_PIN(4, 14),	/* SD3_DAT5 */
-		[ 5] = RCAR_GP_PIN(4, 15),	/* SD3_DAT6 */
-		[ 6] = RCAR_GP_PIN(4, 16),	/* SD3_DAT7 */
-		[ 7] = RCAR_GP_PIN(4, 17),	/* SD3_DS */
-		[ 8] = RCAR_GP_PIN(3, 12),	/* SD0_CD */
-		[ 9] = RCAR_GP_PIN(3, 13),	/* SD0_WP */
-		[10] = RCAR_GP_PIN(3, 14),	/* SD1_CD */
-		[11] = RCAR_GP_PIN(3, 15),	/* SD1_WP */
-		[12] = RCAR_GP_PIN(5,  0),	/* SCK0 */
-		[13] = RCAR_GP_PIN(5,  1),	/* RX0 */
-		[14] = RCAR_GP_PIN(5,  2),	/* TX0 */
-		[15] = RCAR_GP_PIN(5,  3),	/* CTS0_N */
-		[16] = RCAR_GP_PIN(5,  4),	/* RTS0_N */
-		[17] = RCAR_GP_PIN(5,  5),	/* RX1_A */
-		[18] = RCAR_GP_PIN(5,  6),	/* TX1_A */
-		[19] = RCAR_GP_PIN(5,  7),	/* CTS1_N */
-		[20] = RCAR_GP_PIN(5,  8),	/* RTS1_N */
-		[21] = RCAR_GP_PIN(5,  9),	/* SCK2 */
-		[22] = RCAR_GP_PIN(5, 10),	/* TX2_A */
-		[23] = RCAR_GP_PIN(5, 11),	/* RX2_A */
-		[24] = RCAR_GP_PIN(5, 12),	/* HSCK0 */
-		[25] = RCAR_GP_PIN(5, 13),	/* HRX0 */
-		[26] = RCAR_GP_PIN(5, 14),	/* HTX0 */
-		[27] = RCAR_GP_PIN(5, 15),	/* HCTS0_N */
-		[28] = RCAR_GP_PIN(5, 16),	/* HRTS0_N */
-		[29] = RCAR_GP_PIN(5, 17),	/* MSIOF0_SCK */
-		[30] = RCAR_GP_PIN(5, 18),	/* MSIOF0_SYNC */
-		[31] = RCAR_GP_PIN(5, 19),	/* MSIOF0_SS1 */
-	} },
-	{ PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
-		[ 0] = RCAR_GP_PIN(5, 20),	/* MSIOF0_TXD */
-		[ 1] = RCAR_GP_PIN(5, 21),	/* MSIOF0_SS2 */
-		[ 2] = RCAR_GP_PIN(5, 22),	/* MSIOF0_RXD */
-		[ 3] = RCAR_GP_PIN(5, 23),	/* MLB_CLK */
-		[ 4] = RCAR_GP_PIN(5, 24),	/* MLB_SIG */
-		[ 5] = RCAR_GP_PIN(5, 25),	/* MLB_DAT */
-		[ 6] = PIN_MLB_REF,		/* MLB_REF */
-		[ 7] = RCAR_GP_PIN(6,  0),	/* SSI_SCK01239 */
-		[ 8] = RCAR_GP_PIN(6,  1),	/* SSI_WS01239 */
-		[ 9] = RCAR_GP_PIN(6,  2),	/* SSI_SDATA0 */
-		[10] = RCAR_GP_PIN(6,  3),	/* SSI_SDATA1_A */
-		[11] = RCAR_GP_PIN(6,  4),	/* SSI_SDATA2_A */
-		[12] = RCAR_GP_PIN(6,  5),	/* SSI_SCK349 */
-		[13] = RCAR_GP_PIN(6,  6),	/* SSI_WS349 */
-		[14] = RCAR_GP_PIN(6,  7),	/* SSI_SDATA3 */
-		[15] = RCAR_GP_PIN(6,  8),	/* SSI_SCK4 */
-		[16] = RCAR_GP_PIN(6,  9),	/* SSI_WS4 */
-		[17] = RCAR_GP_PIN(6, 10),	/* SSI_SDATA4 */
-		[18] = RCAR_GP_PIN(6, 11),	/* SSI_SCK5 */
-		[19] = RCAR_GP_PIN(6, 12),	/* SSI_WS5 */
-		[20] = RCAR_GP_PIN(6, 13),	/* SSI_SDATA5 */
-		[21] = RCAR_GP_PIN(6, 14),	/* SSI_SCK6 */
-		[22] = RCAR_GP_PIN(6, 15),	/* SSI_WS6 */
-		[23] = RCAR_GP_PIN(6, 16),	/* SSI_SDATA6 */
-		[24] = RCAR_GP_PIN(6, 17),	/* SSI_SCK78 */
-		[25] = RCAR_GP_PIN(6, 18),	/* SSI_WS78 */
-		[26] = RCAR_GP_PIN(6, 19),	/* SSI_SDATA7 */
-		[27] = RCAR_GP_PIN(6, 20),	/* SSI_SDATA8 */
-		[28] = RCAR_GP_PIN(6, 21),	/* SSI_SDATA9_A */
-		[29] = RCAR_GP_PIN(6, 22),	/* AUDIO_CLKA_A */
-		[30] = RCAR_GP_PIN(6, 23),	/* AUDIO_CLKB_B */
-		[31] = RCAR_GP_PIN(6, 24),	/* USB0_PWEN */
-	} },
-	{ PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
-		[ 0] = RCAR_GP_PIN(6, 25),	/* USB0_OVC */
-		[ 1] = RCAR_GP_PIN(6, 26),	/* USB1_PWEN */
-		[ 2] = RCAR_GP_PIN(6, 27),	/* USB1_OVC */
-		[ 3] = RCAR_GP_PIN(6, 28),	/* USB30_PWEN */
-		[ 4] = RCAR_GP_PIN(6, 29),	/* USB30_OVC */
-		[ 5] = RCAR_GP_PIN(6, 30),	/* USB2_CH3_PWEN */
-		[ 6] = RCAR_GP_PIN(6, 31),	/* USB2_CH3_OVC */
-		[ 7] = SH_PFC_PIN_NONE,
-		[ 8] = SH_PFC_PIN_NONE,
-		[ 9] = SH_PFC_PIN_NONE,
-		[10] = SH_PFC_PIN_NONE,
-		[11] = SH_PFC_PIN_NONE,
-		[12] = SH_PFC_PIN_NONE,
-		[13] = SH_PFC_PIN_NONE,
-		[14] = SH_PFC_PIN_NONE,
-		[15] = SH_PFC_PIN_NONE,
-		[16] = SH_PFC_PIN_NONE,
-		[17] = SH_PFC_PIN_NONE,
-		[18] = SH_PFC_PIN_NONE,
-		[19] = SH_PFC_PIN_NONE,
-		[20] = SH_PFC_PIN_NONE,
-		[21] = SH_PFC_PIN_NONE,
-		[22] = SH_PFC_PIN_NONE,
-		[23] = SH_PFC_PIN_NONE,
-		[24] = SH_PFC_PIN_NONE,
-		[25] = SH_PFC_PIN_NONE,
-		[26] = SH_PFC_PIN_NONE,
-		[27] = SH_PFC_PIN_NONE,
-		[28] = SH_PFC_PIN_NONE,
-		[29] = SH_PFC_PIN_NONE,
-		[30] = SH_PFC_PIN_NONE,
-		[31] = SH_PFC_PIN_NONE,
-	} },
-	{ /* sentinel */ },
-};
-
-static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
-					    unsigned int pin)
-{
-	const struct pinmux_bias_reg *reg;
-	unsigned int bit;
-
-	reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
-	if (!reg)
-		return PIN_CONFIG_BIAS_DISABLE;
-
-	if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
-		return PIN_CONFIG_BIAS_DISABLE;
-	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
-		return PIN_CONFIG_BIAS_PULL_UP;
-	else
-		return PIN_CONFIG_BIAS_PULL_DOWN;
-}
-
-static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
-				   unsigned int bias)
-{
-	const struct pinmux_bias_reg *reg;
-	u32 enable, updown;
-	unsigned int bit;
-
-	reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
-	if (!reg)
-		return;
-
-	enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
-	if (bias != PIN_CONFIG_BIAS_DISABLE)
-		enable |= BIT(bit);
-
-	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
-	if (bias == PIN_CONFIG_BIAS_PULL_UP)
-		updown |= BIT(bit);
-
-	sh_pfc_write(pfc, reg->pud, updown);
-	sh_pfc_write(pfc, reg->puen, enable);
-}
-
-static const struct soc_device_attribute r8a7795es1[] = {
-	{ .soc_id = "r8a7795", .revision = "ES1.*" },
-	{ /* sentinel */ }
-};
-
-static int r8a7795_pinmux_init(struct sh_pfc *pfc)
-{
-	if (soc_device_match(r8a7795es1))
-		pfc->info = &r8a7795es1_pinmux_info;
-
-	return 0;
-}
-
-static const struct sh_pfc_soc_operations r8a7795_pinmux_ops = {
-	.init = r8a7795_pinmux_init,
-	.pin_to_pocctrl = r8a7795_pin_to_pocctrl,
-	.get_bias = r8a7795_pinmux_get_bias,
-	.set_bias = r8a7795_pinmux_set_bias,
-};
-
-const struct sh_pfc_soc_info r8a7795_pinmux_info = {
-	.name = "r8a77951_pfc",
-	.ops = &r8a7795_pinmux_ops,
-	.unlock_reg = 0xe6060000, /* PMMR */
-
-	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
-
-	.pins = pinmux_pins,
-	.nr_pins = ARRAY_SIZE(pinmux_pins),
-	.groups = pinmux_groups,
-	.nr_groups = ARRAY_SIZE(pinmux_groups),
-	.functions = pinmux_functions,
-	.nr_functions = ARRAY_SIZE(pinmux_functions),
-
-	.cfg_regs = pinmux_config_regs,
-	.drive_regs = pinmux_drive_regs,
-	.bias_regs = pinmux_bias_regs,
-	.ioctrl_regs = pinmux_ioctrl_regs,
-
-	.pinmux_data = pinmux_data,
-	.pinmux_data_size = ARRAY_SIZE(pinmux_data),
-};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77950.c b/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
new file mode 100644
index 0000000..04812e62
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
@@ -0,0 +1,5891 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A77950 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2015-2017  Renesas Electronics Corporation
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx)						\
+	PORT_GP_CFG_16(0, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_15(2, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE),	\
+	PORT_GP_CFG_1(3, 12, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_18(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE),	\
+	PORT_GP_CFG_26(5, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_32(6, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_4(7, fn, sfx, CFG_FLAGS)
+
+#define CPU_ALL_NOGP(fn)						\
+	PIN_NOGP_CFG(ASEBRK, "ASEBRK", fn, CFG_FLAGS),			\
+	PIN_NOGP_CFG(AVB_MDIO, "AVB_MDIO", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RD0, "AVB_RD0", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RD1, "AVB_RD1", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RD2, "AVB_RD2", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RD3, "AVB_RD3", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RXC, "AVB_RXC", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RX_CTL, "AVB_RX_CTL", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TD0, "AVB_TD0", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TD1, "AVB_TD1", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TD2, "AVB_TD2", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TD3, "AVB_TD3", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TXC, "AVB_TXC", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TXCREFCLK, "AVB_TXCREFCLK", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(AVB_TX_CTL, "AVB_TX_CTL", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(CLKOUT, "CLKOUT", fn, CFG_FLAGS),			\
+	PIN_NOGP_CFG(DU_DOTCLKIN0, "DU_DOTCLKIN0", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(DU_DOTCLKIN1, "DU_DOTCLKIN1", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(DU_DOTCLKIN2, "DU_DOTCLKIN2", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(DU_DOTCLKIN3, "DU_DOTCLKIN3", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(EXTALR, "EXTALR", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),\
+	PIN_NOGP_CFG(FSCLKST_N, "FSCLKST#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(MLB_REF, "MLB_REF", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI0_IO2, "QSPI0_IO2", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI0_IO3, "QSPI0_IO3", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI0_MISO_IO1, "QSPI0_MISO_IO1", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI0_MOSI_IO0, "QSPI0_MOSI_IO0", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI0_SPCLK, "QSPI0_SPCLK", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI0_SSL, "QSPI0_SSL", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI1_IO2, "QSPI1_IO2", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI1_IO3, "QSPI1_IO3", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI1_MISO_IO1, "QSPI1_MISO_IO1", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI1_MOSI_IO0, "QSPI1_MOSI_IO0", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI1_SPCLK, "QSPI1_SPCLK", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI1_SSL, "QSPI1_SSL", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(RPC_INT_N, "RPC_INT#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(RPC_RESET_N, "RPC_RESET#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(RPC_WP_N, "RPC_WP#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),	\
+	PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),	\
+	PIN_NOGP_CFG(TDO, "TDO", fn, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PIN_NOGP_CFG(TMS, "TMS", fn, CFG_FLAGS),			\
+	PIN_NOGP_CFG(TRST_N, "TRST#", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_15	F_(D15,			IP7_11_8)
+#define GPSR0_14	F_(D14,			IP7_7_4)
+#define GPSR0_13	F_(D13,			IP7_3_0)
+#define GPSR0_12	F_(D12,			IP6_31_28)
+#define GPSR0_11	F_(D11,			IP6_27_24)
+#define GPSR0_10	F_(D10,			IP6_23_20)
+#define GPSR0_9		F_(D9,			IP6_19_16)
+#define GPSR0_8		F_(D8,			IP6_15_12)
+#define GPSR0_7		F_(D7,			IP6_11_8)
+#define GPSR0_6		F_(D6,			IP6_7_4)
+#define GPSR0_5		F_(D5,			IP6_3_0)
+#define GPSR0_4		F_(D4,			IP5_31_28)
+#define GPSR0_3		F_(D3,			IP5_27_24)
+#define GPSR0_2		F_(D2,			IP5_23_20)
+#define GPSR0_1		F_(D1,			IP5_19_16)
+#define GPSR0_0		F_(D0,			IP5_15_12)
+
+/* GPSR1 */
+#define GPSR1_27	F_(EX_WAIT0_A,		IP5_11_8)
+#define GPSR1_26	F_(WE1_N,		IP5_7_4)
+#define GPSR1_25	F_(WE0_N,		IP5_3_0)
+#define GPSR1_24	F_(RD_WR_N,		IP4_31_28)
+#define GPSR1_23	F_(RD_N,		IP4_27_24)
+#define GPSR1_22	F_(BS_N,		IP4_23_20)
+#define GPSR1_21	F_(CS1_N_A26,		IP4_19_16)
+#define GPSR1_20	F_(CS0_N,		IP4_15_12)
+#define GPSR1_19	F_(A19,			IP4_11_8)
+#define GPSR1_18	F_(A18,			IP4_7_4)
+#define GPSR1_17	F_(A17,			IP4_3_0)
+#define GPSR1_16	F_(A16,			IP3_31_28)
+#define GPSR1_15	F_(A15,			IP3_27_24)
+#define GPSR1_14	F_(A14,			IP3_23_20)
+#define GPSR1_13	F_(A13,			IP3_19_16)
+#define GPSR1_12	F_(A12,			IP3_15_12)
+#define GPSR1_11	F_(A11,			IP3_11_8)
+#define GPSR1_10	F_(A10,			IP3_7_4)
+#define GPSR1_9		F_(A9,			IP3_3_0)
+#define GPSR1_8		F_(A8,			IP2_31_28)
+#define GPSR1_7		F_(A7,			IP2_27_24)
+#define GPSR1_6		F_(A6,			IP2_23_20)
+#define GPSR1_5		F_(A5,			IP2_19_16)
+#define GPSR1_4		F_(A4,			IP2_15_12)
+#define GPSR1_3		F_(A3,			IP2_11_8)
+#define GPSR1_2		F_(A2,			IP2_7_4)
+#define GPSR1_1		F_(A1,			IP2_3_0)
+#define GPSR1_0		F_(A0,			IP1_31_28)
+
+/* GPSR2 */
+#define GPSR2_14	F_(AVB_AVTP_CAPTURE_A,	IP0_23_20)
+#define GPSR2_13	F_(AVB_AVTP_MATCH_A,	IP0_19_16)
+#define GPSR2_12	F_(AVB_LINK,		IP0_15_12)
+#define GPSR2_11	F_(AVB_PHY_INT,		IP0_11_8)
+#define GPSR2_10	F_(AVB_MAGIC,		IP0_7_4)
+#define GPSR2_9		F_(AVB_MDC,		IP0_3_0)
+#define GPSR2_8		F_(PWM2_A,		IP1_27_24)
+#define GPSR2_7		F_(PWM1_A,		IP1_23_20)
+#define GPSR2_6		F_(PWM0,		IP1_19_16)
+#define GPSR2_5		F_(IRQ5,		IP1_15_12)
+#define GPSR2_4		F_(IRQ4,		IP1_11_8)
+#define GPSR2_3		F_(IRQ3,		IP1_7_4)
+#define GPSR2_2		F_(IRQ2,		IP1_3_0)
+#define GPSR2_1		F_(IRQ1,		IP0_31_28)
+#define GPSR2_0		F_(IRQ0,		IP0_27_24)
+
+/* GPSR3 */
+#define GPSR3_15	F_(SD1_WP,		IP10_23_20)
+#define GPSR3_14	F_(SD1_CD,		IP10_19_16)
+#define GPSR3_13	F_(SD0_WP,		IP10_15_12)
+#define GPSR3_12	F_(SD0_CD,		IP10_11_8)
+#define GPSR3_11	F_(SD1_DAT3,		IP8_31_28)
+#define GPSR3_10	F_(SD1_DAT2,		IP8_27_24)
+#define GPSR3_9		F_(SD1_DAT1,		IP8_23_20)
+#define GPSR3_8		F_(SD1_DAT0,		IP8_19_16)
+#define GPSR3_7		F_(SD1_CMD,		IP8_15_12)
+#define GPSR3_6		F_(SD1_CLK,		IP8_11_8)
+#define GPSR3_5		F_(SD0_DAT3,		IP8_7_4)
+#define GPSR3_4		F_(SD0_DAT2,		IP8_3_0)
+#define GPSR3_3		F_(SD0_DAT1,		IP7_31_28)
+#define GPSR3_2		F_(SD0_DAT0,		IP7_27_24)
+#define GPSR3_1		F_(SD0_CMD,		IP7_23_20)
+#define GPSR3_0		F_(SD0_CLK,		IP7_19_16)
+
+/* GPSR4 */
+#define GPSR4_17	FM(SD3_DS)
+#define GPSR4_16	F_(SD3_DAT7,		IP10_7_4)
+#define GPSR4_15	F_(SD3_DAT6,		IP10_3_0)
+#define GPSR4_14	F_(SD3_DAT5,		IP9_31_28)
+#define GPSR4_13	F_(SD3_DAT4,		IP9_27_24)
+#define GPSR4_12	FM(SD3_DAT3)
+#define GPSR4_11	FM(SD3_DAT2)
+#define GPSR4_10	FM(SD3_DAT1)
+#define GPSR4_9		FM(SD3_DAT0)
+#define GPSR4_8		FM(SD3_CMD)
+#define GPSR4_7		FM(SD3_CLK)
+#define GPSR4_6		F_(SD2_DS,		IP9_23_20)
+#define GPSR4_5		F_(SD2_DAT3,		IP9_19_16)
+#define GPSR4_4		F_(SD2_DAT2,		IP9_15_12)
+#define GPSR4_3		F_(SD2_DAT1,		IP9_11_8)
+#define GPSR4_2		F_(SD2_DAT0,		IP9_7_4)
+#define GPSR4_1		FM(SD2_CMD)
+#define GPSR4_0		F_(SD2_CLK,		IP9_3_0)
+
+/* GPSR5 */
+#define GPSR5_25	F_(MLB_DAT,		IP13_19_16)
+#define GPSR5_24	F_(MLB_SIG,		IP13_15_12)
+#define GPSR5_23	F_(MLB_CLK,		IP13_11_8)
+#define GPSR5_22	FM(MSIOF0_RXD)
+#define GPSR5_21	F_(MSIOF0_SS2,		IP13_7_4)
+#define GPSR5_20	FM(MSIOF0_TXD)
+#define GPSR5_19	F_(MSIOF0_SS1,		IP13_3_0)
+#define GPSR5_18	F_(MSIOF0_SYNC,		IP12_31_28)
+#define GPSR5_17	FM(MSIOF0_SCK)
+#define GPSR5_16	F_(HRTS0_N,		IP12_27_24)
+#define GPSR5_15	F_(HCTS0_N,		IP12_23_20)
+#define GPSR5_14	F_(HTX0,		IP12_19_16)
+#define GPSR5_13	F_(HRX0,		IP12_15_12)
+#define GPSR5_12	F_(HSCK0,		IP12_11_8)
+#define GPSR5_11	F_(RX2_A,		IP12_7_4)
+#define GPSR5_10	F_(TX2_A,		IP12_3_0)
+#define GPSR5_9		F_(SCK2,		IP11_31_28)
+#define GPSR5_8		F_(RTS1_N,		IP11_27_24)
+#define GPSR5_7		F_(CTS1_N,		IP11_23_20)
+#define GPSR5_6		F_(TX1_A,		IP11_19_16)
+#define GPSR5_5		F_(RX1_A,		IP11_15_12)
+#define GPSR5_4		F_(RTS0_N,		IP11_11_8)
+#define GPSR5_3		F_(CTS0_N,		IP11_7_4)
+#define GPSR5_2		F_(TX0,			IP11_3_0)
+#define GPSR5_1		F_(RX0,			IP10_31_28)
+#define GPSR5_0		F_(SCK0,		IP10_27_24)
+
+/* GPSR6 */
+#define GPSR6_31	F_(USB31_OVC,		IP17_7_4)
+#define GPSR6_30	F_(USB31_PWEN,		IP17_3_0)
+#define GPSR6_29	F_(USB30_OVC,		IP16_31_28)
+#define GPSR6_28	F_(USB30_PWEN,		IP16_27_24)
+#define GPSR6_27	F_(USB1_OVC,		IP16_23_20)
+#define GPSR6_26	F_(USB1_PWEN,		IP16_19_16)
+#define GPSR6_25	F_(USB0_OVC,		IP16_15_12)
+#define GPSR6_24	F_(USB0_PWEN,		IP16_11_8)
+#define GPSR6_23	F_(AUDIO_CLKB_B,	IP16_7_4)
+#define GPSR6_22	F_(AUDIO_CLKA_A,	IP16_3_0)
+#define GPSR6_21	F_(SSI_SDATA9_A,	IP15_31_28)
+#define GPSR6_20	F_(SSI_SDATA8,		IP15_27_24)
+#define GPSR6_19	F_(SSI_SDATA7,		IP15_23_20)
+#define GPSR6_18	F_(SSI_WS78,		IP15_19_16)
+#define GPSR6_17	F_(SSI_SCK78,		IP15_15_12)
+#define GPSR6_16	F_(SSI_SDATA6,		IP15_11_8)
+#define GPSR6_15	F_(SSI_WS6,		IP15_7_4)
+#define GPSR6_14	F_(SSI_SCK6,		IP15_3_0)
+#define GPSR6_13	FM(SSI_SDATA5)
+#define GPSR6_12	FM(SSI_WS5)
+#define GPSR6_11	FM(SSI_SCK5)
+#define GPSR6_10	F_(SSI_SDATA4,		IP14_31_28)
+#define GPSR6_9		F_(SSI_WS4,		IP14_27_24)
+#define GPSR6_8		F_(SSI_SCK4,		IP14_23_20)
+#define GPSR6_7		F_(SSI_SDATA3,		IP14_19_16)
+#define GPSR6_6		F_(SSI_WS349,		IP14_15_12)
+#define GPSR6_5		F_(SSI_SCK349,		IP14_11_8)
+#define GPSR6_4		F_(SSI_SDATA2_A,	IP14_7_4)
+#define GPSR6_3		F_(SSI_SDATA1_A,	IP14_3_0)
+#define GPSR6_2		F_(SSI_SDATA0,		IP13_31_28)
+#define GPSR6_1		F_(SSI_WS01239,		IP13_27_24)
+#define GPSR6_0		F_(SSI_SCK01239,	IP13_23_20)
+
+/* GPSR7 */
+#define GPSR7_3		FM(GP7_03)
+#define GPSR7_2		FM(GP7_02)
+#define GPSR7_1		FM(AVS2)
+#define GPSR7_0		FM(AVS1)
+
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP0_3_0		FM(AVB_MDC)		F_(0, 0)	FM(MSIOF2_SS2_C)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4		FM(AVB_MAGIC)		F_(0, 0)	FM(MSIOF2_SS1_C)	FM(SCK4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_11_8	FM(AVB_PHY_INT)		F_(0, 0)	FM(MSIOF2_SYNC_C)	FM(RX4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_15_12	FM(AVB_LINK)		F_(0, 0)	FM(MSIOF2_SCK_C)	FM(TX4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_19_16	FM(AVB_AVTP_MATCH_A)	F_(0, 0)	FM(MSIOF2_RXD_C)	FM(CTS4_N_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_23_20	FM(AVB_AVTP_CAPTURE_A)	F_(0, 0)	FM(MSIOF2_TXD_C)	FM(RTS4_N_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_27_24	FM(IRQ0)		FM(QPOLB)	F_(0, 0)		FM(DU_CDE)			FM(VI4_DATA0_B)	FM(CAN0_TX_B)	FM(CANFD0_TX_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_31_28	FM(IRQ1)		FM(QPOLA)	F_(0, 0)		FM(DU_DISP)			FM(VI4_DATA1_B)	FM(CAN0_RX_B)	FM(CANFD0_RX_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_3_0		FM(IRQ2)		FM(QCPV_QDE)	F_(0, 0)		FM(DU_EXODDF_DU_ODDF_DISP_CDE)	FM(VI4_DATA2_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(PWM3_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_7_4		FM(IRQ3)		FM(QSTVB_QVE)	FM(A25)			FM(DU_DOTCLKOUT1)		FM(VI4_DATA3_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(PWM4_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_11_8	FM(IRQ4)		FM(QSTH_QHS)	FM(A24)			FM(DU_EXHSYNC_DU_HSYNC)		FM(VI4_DATA4_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(PWM5_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_15_12	FM(IRQ5)		FM(QSTB_QHE)	FM(A23)			FM(DU_EXVSYNC_DU_VSYNC)		FM(VI4_DATA5_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(PWM6_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_19_16	FM(PWM0)		FM(AVB_AVTP_PPS)FM(A22)			F_(0, 0)			FM(VI4_DATA6_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IECLK_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_23_20	FM(PWM1_A)		F_(0, 0)	FM(A21)			FM(HRX3_D)			FM(VI4_DATA7_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IERX_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_27_24	FM(PWM2_A)		F_(0, 0)	FM(A20)			FM(HTX3_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IETX_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_31_28	FM(A0)			FM(LCDOUT16)	FM(MSIOF3_SYNC_B)	F_(0, 0)			FM(VI4_DATA8)	F_(0, 0)	FM(DU_DB0)		F_(0, 0)	F_(0, 0)		FM(PWM3_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_3_0		FM(A1)			FM(LCDOUT17)	FM(MSIOF3_TXD_B)	F_(0, 0)			FM(VI4_DATA9)	F_(0, 0)	FM(DU_DB1)		F_(0, 0)	F_(0, 0)		FM(PWM4_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_7_4		FM(A2)			FM(LCDOUT18)	FM(MSIOF3_SCK_B)	F_(0, 0)			FM(VI4_DATA10)	F_(0, 0)	FM(DU_DB2)		F_(0, 0)	F_(0, 0)		FM(PWM5_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_11_8	FM(A3)			FM(LCDOUT19)	FM(MSIOF3_RXD_B)	F_(0, 0)			FM(VI4_DATA11)	F_(0, 0)	FM(DU_DB3)		F_(0, 0)	F_(0, 0)		FM(PWM6_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP2_15_12	FM(A4)			FM(LCDOUT20)	FM(MSIOF3_SS1_B)	F_(0, 0)			FM(VI4_DATA12)	FM(VI5_DATA12)	FM(DU_DB4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_19_16	FM(A5)			FM(LCDOUT21)	FM(MSIOF3_SS2_B)	FM(SCK4_B)			FM(VI4_DATA13)	FM(VI5_DATA13)	FM(DU_DB5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20	FM(A6)			FM(LCDOUT22)	FM(MSIOF2_SS1_A)	FM(RX4_B)			FM(VI4_DATA14)	FM(VI5_DATA14)	FM(DU_DB6)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24	FM(A7)			FM(LCDOUT23)	FM(MSIOF2_SS2_A)	FM(TX4_B)			FM(VI4_DATA15)	FM(VI5_DATA15)	FM(DU_DB7)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_31_28	FM(A8)			FM(RX3_B)	FM(MSIOF2_SYNC_A)	FM(HRX4_B)			F_(0, 0)	F_(0, 0)	F_(0, 0)		FM(SDA6_A)	FM(AVB_AVTP_MATCH_B)	FM(PWM1_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_3_0		FM(A9)			F_(0, 0)	FM(MSIOF2_SCK_A)	FM(CTS4_N_B)			F_(0, 0)	FM(VI5_VSYNC_N)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_7_4		FM(A10)			F_(0, 0)	FM(MSIOF2_RXD_A)	FM(RTS4_N_B)			F_(0, 0)	FM(VI5_HSYNC_N)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_11_8	FM(A11)			FM(TX3_B)	FM(MSIOF2_TXD_A)	FM(HTX4_B)			FM(HSCK4)	FM(VI5_FIELD)	F_(0, 0)		FM(SCL6_A)	FM(AVB_AVTP_CAPTURE_B)	FM(PWM2_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_15_12	FM(A12)			FM(LCDOUT12)	FM(MSIOF3_SCK_C)	F_(0, 0)			FM(HRX4_A)	FM(VI5_DATA8)	FM(DU_DG4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_19_16	FM(A13)			FM(LCDOUT13)	FM(MSIOF3_SYNC_C)	F_(0, 0)			FM(HTX4_A)	FM(VI5_DATA9)	FM(DU_DG5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_23_20	FM(A14)			FM(LCDOUT14)	FM(MSIOF3_RXD_C)	F_(0, 0)			FM(HCTS4_N)	FM(VI5_DATA10)	FM(DU_DG6)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_27_24	FM(A15)			FM(LCDOUT15)	FM(MSIOF3_TXD_C)	F_(0, 0)			FM(HRTS4_N)	FM(VI5_DATA11)	FM(DU_DG7)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_31_28	FM(A16)			FM(LCDOUT8)	F_(0, 0)		F_(0, 0)			FM(VI4_FIELD)	F_(0, 0)	FM(DU_DG0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_3_0		FM(A17)			FM(LCDOUT9)	F_(0, 0)		F_(0, 0)			FM(VI4_VSYNC_N)	F_(0, 0)	FM(DU_DG1)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_7_4		FM(A18)			FM(LCDOUT10)	F_(0, 0)		F_(0, 0)			FM(VI4_HSYNC_N)	F_(0, 0)	FM(DU_DG2)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_11_8	FM(A19)			FM(LCDOUT11)	F_(0, 0)		F_(0, 0)			FM(VI4_CLKENB)	F_(0, 0)	FM(DU_DG3)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_15_12	FM(CS0_N)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(VI5_CLKENB)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_19_16	FM(CS1_N_A26)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(VI5_CLK)	F_(0, 0)		FM(EX_WAIT0_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_23_20	FM(BS_N)		FM(QSTVA_QVS)	FM(MSIOF3_SCK_D)	FM(SCK3)			FM(HSCK3)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN1_TX)		FM(CANFD1_TX)	FM(IETX_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_27_24	FM(RD_N)		F_(0, 0)	FM(MSIOF3_SYNC_D)	FM(RX3_A)			FM(HRX3_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN0_TX_A)		FM(CANFD0_TX_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_31_28	FM(RD_WR_N)		F_(0, 0)	FM(MSIOF3_RXD_D)	FM(TX3_A)			FM(HTX3_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN0_RX_A)		FM(CANFD0_RX_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_3_0		FM(WE0_N)		F_(0, 0)	FM(MSIOF3_TXD_D)	FM(CTS3_N)			FM(HCTS3_N)	F_(0, 0)	F_(0, 0)		FM(SCL6_B)	FM(CAN_CLK)		F_(0, 0)	FM(IECLK_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_7_4		FM(WE1_N)		F_(0, 0)	FM(MSIOF3_SS1_D)	FM(RTS3_N)			FM(HRTS3_N)	F_(0, 0)	F_(0, 0)		FM(SDA6_B)	FM(CAN1_RX)		FM(CANFD1_RX)	FM(IERX_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_11_8	FM(EX_WAIT0_A)		FM(QCLK)	F_(0, 0)		F_(0, 0)			FM(VI4_CLK)	F_(0, 0)	FM(DU_DOTCLKOUT0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_15_12	FM(D0)			FM(MSIOF2_SS1_B)FM(MSIOF3_SCK_A)	F_(0, 0)			FM(VI4_DATA16)	FM(VI5_DATA0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_19_16	FM(D1)			FM(MSIOF2_SS2_B)FM(MSIOF3_SYNC_A)	F_(0, 0)			FM(VI4_DATA17)	FM(VI5_DATA1)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_23_20	FM(D2)			F_(0, 0)	FM(MSIOF3_RXD_A)	F_(0, 0)			FM(VI4_DATA18)	FM(VI5_DATA2)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_27_24	FM(D3)			F_(0, 0)	FM(MSIOF3_TXD_A)	F_(0, 0)			FM(VI4_DATA19)	FM(VI5_DATA3)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_31_28	FM(D4)			FM(MSIOF2_SCK_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA20)	FM(VI5_DATA4)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_3_0		FM(D5)			FM(MSIOF2_SYNC_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA21)	FM(VI5_DATA5)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_7_4		FM(D6)			FM(MSIOF2_RXD_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA22)	FM(VI5_DATA6)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_11_8	FM(D7)			FM(MSIOF2_TXD_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA23)	FM(VI5_DATA7)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_15_12	FM(D8)			FM(LCDOUT0)	FM(MSIOF2_SCK_D)	FM(SCK4_C)			FM(VI4_DATA0_A)	F_(0, 0)	FM(DU_DR0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_19_16	FM(D9)			FM(LCDOUT1)	FM(MSIOF2_SYNC_D)	F_(0, 0)			FM(VI4_DATA1_A)	F_(0, 0)	FM(DU_DR1)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_23_20	FM(D10)			FM(LCDOUT2)	FM(MSIOF2_RXD_D)	FM(HRX3_B)			FM(VI4_DATA2_A)	FM(CTS4_N_C)	FM(DU_DR2)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_27_24	FM(D11)			FM(LCDOUT3)	FM(MSIOF2_TXD_D)	FM(HTX3_B)			FM(VI4_DATA3_A)	FM(RTS4_N_C)	FM(DU_DR3)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_31_28	FM(D12)			FM(LCDOUT4)	FM(MSIOF2_SS1_D)	FM(RX4_C)			FM(VI4_DATA4_A)	F_(0, 0)	FM(DU_DR4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_3_0		FM(D13)			FM(LCDOUT5)	FM(MSIOF2_SS2_D)	FM(TX4_C)			FM(VI4_DATA5_A)	F_(0, 0)	FM(DU_DR5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_7_4		FM(D14)			FM(LCDOUT6)	FM(MSIOF3_SS1_A)	FM(HRX3_C)			FM(VI4_DATA6_A)	F_(0, 0)	FM(DU_DR6)		FM(SCL6_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_11_8	FM(D15)			FM(LCDOUT7)	FM(MSIOF3_SS2_A)	FM(HTX3_C)			FM(VI4_DATA7_A)	F_(0, 0)	FM(DU_DR7)		FM(SDA6_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_15_12	FM(FSCLKST)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_19_16	FM(SD0_CLK)		F_(0, 0)	FM(MSIOF1_SCK_E)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_OPWM_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP7_23_20	FM(SD0_CMD)		F_(0, 0)	FM(MSIOF1_SYNC_E)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_27_24	FM(SD0_DAT0)		F_(0, 0)	FM(MSIOF1_RXD_E)	F_(0, 0)			F_(0, 0)	FM(TS_SCK0_B)	FM(STP_ISCLK_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_31_28	FM(SD0_DAT1)		F_(0, 0)	FM(MSIOF1_TXD_E)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_B)FM(STP_ISSYNC_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_3_0		FM(SD0_DAT2)		F_(0, 0)	FM(MSIOF1_SS1_E)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_B)	FM(STP_ISD_0_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_7_4		FM(SD0_DAT3)		F_(0, 0)	FM(MSIOF1_SS2_E)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_B)	FM(STP_ISEN_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_11_8	FM(SD1_CLK)		F_(0, 0)	FM(MSIOF1_SCK_G)	F_(0, 0)			F_(0, 0)	FM(SIM0_CLK_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_15_12	FM(SD1_CMD)		F_(0, 0)	FM(MSIOF1_SYNC_G)	F_(0, 0)			F_(0, 0)	FM(SIM0_D_A)	FM(STP_IVCXO27_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_19_16	FM(SD1_DAT0)		FM(SD2_DAT4)	FM(MSIOF1_RXD_G)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_B)	FM(STP_ISCLK_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_23_20	FM(SD1_DAT1)		FM(SD2_DAT5)	FM(MSIOF1_TXD_G)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_B)FM(STP_ISSYNC_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_27_24	FM(SD1_DAT2)		FM(SD2_DAT6)	FM(MSIOF1_SS1_G)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_B)	FM(STP_ISD_1_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_31_28	FM(SD1_DAT3)		FM(SD2_DAT7)	FM(MSIOF1_SS2_G)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_B)	FM(STP_ISEN_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_3_0		FM(SD2_CLK)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_7_4		FM(SD2_DAT0)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_11_8	FM(SD2_DAT1)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_15_12	FM(SD2_DAT2)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_19_16	FM(SD2_DAT3)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_23_20	FM(SD2_DS)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(SATA_DEVSLP_B)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_27_24	FM(SD3_DAT4)		FM(SD2_CD_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_31_28	FM(SD3_DAT5)		FM(SD2_WP_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_3_0	FM(SD3_DAT6)		FM(SD3_CD)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_7_4	FM(SD3_DAT7)		FM(SD3_WP)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_11_8	FM(SD0_CD)		F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SCL2_B)	FM(SIM0_RST_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_15_12	FM(SD0_WP)		F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SDA2_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_19_16	FM(SD1_CD)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(SIM0_CLK_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_23_20	FM(SD1_WP)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(SIM0_D_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_27_24	FM(SCK0)		FM(HSCK1_B)	FM(MSIOF1_SS2_B)	FM(AUDIO_CLKC_B)		FM(SDA2_A)	FM(SIM0_RST_B)	FM(STP_OPWM_0_C)	FM(RIF0_CLK_B)	F_(0, 0)		FM(ADICHS2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_31_28	FM(RX0)			FM(HRX1_B)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SCK0_C)	FM(STP_ISCLK_0_C)	FM(RIF0_D0_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_3_0	FM(TX0)			FM(HTX1_B)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_C)FM(STP_ISSYNC_0_C)	FM(RIF0_D1_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_7_4	FM(CTS0_N)		FM(HCTS1_N_B)	FM(MSIOF1_SYNC_B)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_C)FM(STP_ISSYNC_1_C)	FM(RIF1_SYNC_B)	FM(AUDIO_CLKOUT_C)	FM(ADICS_SAMP)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_11_8	FM(RTS0_N)		FM(HRTS1_N_B)	FM(MSIOF1_SS1_B)	FM(AUDIO_CLKA_B)		FM(SCL2_A)	F_(0, 0)	FM(STP_IVCXO27_1_C)	FM(RIF0_SYNC_B)	F_(0, 0)		FM(ADICHS1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_15_12	FM(RX1_A)		FM(HRX1_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_C)	FM(STP_ISD_0_C)		FM(RIF1_CLK_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_19_16	FM(TX1_A)		FM(HTX1_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_C)	FM(STP_ISEN_0_C)	FM(RIF1_D0_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_23_20	FM(CTS1_N)		FM(HCTS1_N_A)	FM(MSIOF1_RXD_B)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_C)	FM(STP_ISEN_1_C)	FM(RIF1_D0_B)	F_(0, 0)		FM(ADIDATA)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24	FM(RTS1_N)		FM(HRTS1_N_A)	FM(MSIOF1_TXD_B)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_C)	FM(STP_ISD_1_C)		FM(RIF1_D1_B)	F_(0, 0)		FM(ADICHS0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_31_28	FM(SCK2)		FM(SCIF_CLK_B)	FM(MSIOF1_SCK_B)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_C)	FM(STP_ISCLK_1_C)	FM(RIF1_CLK_B)	F_(0, 0)		FM(ADICLK)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_3_0	FM(TX2_A)		F_(0, 0)	F_(0, 0)		FM(SD2_CD_B)			FM(SCL1_A)	F_(0, 0)	FM(FMCLK_A)		FM(RIF1_D1_C)	F_(0, 0)		FM(FSO_CFE_0_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_7_4	FM(RX2_A)		F_(0, 0)	F_(0, 0)		FM(SD2_WP_B)			FM(SDA1_A)	F_(0, 0)	FM(FMIN_A)		FM(RIF1_SYNC_C)	F_(0, 0)		FM(FSO_CFE_1_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_11_8	FM(HSCK0)		F_(0, 0)	FM(MSIOF1_SCK_D)	FM(AUDIO_CLKB_A)		FM(SSI_SDATA1_B)FM(TS_SCK0_D)	FM(STP_ISCLK_0_D)	FM(RIF0_CLK_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_15_12	FM(HRX0)		F_(0, 0)	FM(MSIOF1_RXD_D)	F_(0, 0)			FM(SSI_SDATA2_B)FM(TS_SDEN0_D)	FM(STP_ISEN_0_D)	FM(RIF0_D0_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_19_16	FM(HTX0)		F_(0, 0)	FM(MSIOF1_TXD_D)	F_(0, 0)			FM(SSI_SDATA9_B)FM(TS_SDAT0_D)	FM(STP_ISD_0_D)		FM(RIF0_D1_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_23_20	FM(HCTS0_N)		FM(RX2_B)	FM(MSIOF1_SYNC_D)	F_(0, 0)			FM(SSI_SCK9_A)	FM(TS_SPSYNC0_D)FM(STP_ISSYNC_0_D)	FM(RIF0_SYNC_C)	FM(AUDIO_CLKOUT1_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_27_24	FM(HRTS0_N)		FM(TX2_B)	FM(MSIOF1_SS1_D)	F_(0, 0)			FM(SSI_WS9_A)	F_(0, 0)	FM(STP_IVCXO27_0_D)	FM(BPFCLK_A)	FM(AUDIO_CLKOUT2_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP12_31_28	FM(MSIOF0_SYNC)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(AUDIO_CLKOUT_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_3_0	FM(MSIOF0_SS1)		FM(RX5)		F_(0, 0)		FM(AUDIO_CLKA_C)		FM(SSI_SCK2_A)	F_(0, 0)	FM(STP_IVCXO27_0_C)	F_(0, 0)	FM(AUDIO_CLKOUT3_A)	F_(0, 0)	FM(TCLK1_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_7_4	FM(MSIOF0_SS2)		FM(TX5)		FM(MSIOF1_SS2_D)	FM(AUDIO_CLKC_A)		FM(SSI_WS2_A)	F_(0, 0)	FM(STP_OPWM_0_D)	F_(0, 0)	FM(AUDIO_CLKOUT_D)	F_(0, 0)	FM(SPEEDIN_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_11_8	FM(MLB_CLK)		F_(0, 0)	FM(MSIOF1_SCK_F)	F_(0, 0)			FM(SCL1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_15_12	FM(MLB_SIG)		FM(RX1_B)	FM(MSIOF1_SYNC_F)	F_(0, 0)			FM(SDA1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_19_16	FM(MLB_DAT)		FM(TX1_B)	FM(MSIOF1_RXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_23_20	FM(SSI_SCK01239)	F_(0, 0)	FM(MSIOF1_TXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_27_24	FM(SSI_WS01239)		F_(0, 0)	FM(MSIOF1_SS1_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_31_28	FM(SSI_SDATA0)		F_(0, 0)	FM(MSIOF1_SS2_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_3_0	FM(SSI_SDATA1_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_7_4	FM(SSI_SDATA2_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SSI_SCK1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_11_8	FM(SSI_SCK349)		F_(0, 0)	FM(MSIOF1_SS1_A)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_OPWM_0_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_15_12	FM(SSI_WS349)		FM(HCTS2_N_A)	FM(MSIOF1_SS2_A)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_0_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_19_16	FM(SSI_SDATA3)		FM(HRTS2_N_A)	FM(MSIOF1_TXD_A)	F_(0, 0)			F_(0, 0)	FM(TS_SCK0_A)	FM(STP_ISCLK_0_A)	FM(RIF0_D1_A)	FM(RIF2_D0_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_23_20	FM(SSI_SCK4)		FM(HRX2_A)	FM(MSIOF1_SCK_A)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_A)	FM(STP_ISD_0_A)		FM(RIF0_CLK_A)	FM(RIF2_CLK_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_27_24	FM(SSI_WS4)		FM(HTX2_A)	FM(MSIOF1_SYNC_A)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_A)	FM(STP_ISEN_0_A)	FM(RIF0_SYNC_A)	FM(RIF2_SYNC_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_31_28	FM(SSI_SDATA4)		FM(HSCK2_A)	FM(MSIOF1_RXD_A)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_A)FM(STP_ISSYNC_0_A)	FM(RIF0_D0_A)	FM(RIF2_D1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_3_0	FM(SSI_SCK6)		FM(USB2_PWEN)	F_(0, 0)		FM(SIM0_RST_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_7_4	FM(SSI_WS6)		FM(USB2_OVC)	F_(0, 0)		FM(SIM0_D_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_11_8	FM(SSI_SDATA6)		F_(0, 0)	F_(0, 0)		FM(SIM0_CLK_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(SATA_DEVSLP_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_15_12	FM(SSI_SCK78)		FM(HRX2_B)	FM(MSIOF1_SCK_C)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_A)	FM(STP_ISCLK_1_A)	FM(RIF1_CLK_A)	FM(RIF3_CLK_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_19_16	FM(SSI_WS78)		FM(HTX2_B)	FM(MSIOF1_SYNC_C)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_A)	FM(STP_ISD_1_A)		FM(RIF1_SYNC_A)	FM(RIF3_SYNC_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_23_20	FM(SSI_SDATA7)		FM(HCTS2_N_B)	FM(MSIOF1_RXD_C)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_A)	FM(STP_ISEN_1_A)	FM(RIF1_D0_A)	FM(RIF3_D0_A)		F_(0, 0)	FM(TCLK2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_27_24	FM(SSI_SDATA8)		FM(HRTS2_N_B)	FM(MSIOF1_TXD_C)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A)	FM(RIF1_D1_A)	FM(RIF3_D1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_31_28	FM(SSI_SDATA9_A)	FM(HSCK2_B)	FM(MSIOF1_SS1_C)	FM(HSCK1_A)			FM(SSI_WS1_B)	FM(SCK1)	FM(STP_IVCXO27_1_A)	FM(SCK5)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_3_0	FM(AUDIO_CLKA_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_7_4	FM(AUDIO_CLKB_B)	FM(SCIF_CLK_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_1_D)	FM(REMOCON_A)	F_(0, 0)		F_(0, 0)	FM(TCLK1_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_11_8	FM(USB0_PWEN)		F_(0, 0)	F_(0, 0)		FM(SIM0_RST_C)			F_(0, 0)	FM(TS_SCK1_D)	FM(STP_ISCLK_1_D)	FM(BPFCLK_B)	FM(RIF3_CLK_B)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_15_12	FM(USB0_OVC)		F_(0, 0)	F_(0, 0)		FM(SIM0_D_C)			F_(0, 0)	FM(TS_SDAT1_D)	FM(STP_ISD_1_D)		F_(0, 0)	FM(RIF3_SYNC_B)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_19_16	FM(USB1_PWEN)		F_(0, 0)	F_(0, 0)		FM(SIM0_CLK_C)			FM(SSI_SCK1_A)	FM(TS_SCK0_E)	FM(STP_ISCLK_0_E)	FM(FMCLK_B)	FM(RIF2_CLK_B)		F_(0, 0)	FM(SPEEDIN_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_23_20	FM(USB1_OVC)		F_(0, 0)	FM(MSIOF1_SS2_C)	F_(0, 0)			FM(SSI_WS1_A)	FM(TS_SDAT0_E)	FM(STP_ISD_0_E)		FM(FMIN_B)	FM(RIF2_SYNC_B)		F_(0, 0)	FM(REMOCON_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_27_24	FM(USB30_PWEN)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT_B)		FM(SSI_SCK2_B)	FM(TS_SDEN1_D)	FM(STP_ISEN_1_D)	FM(STP_OPWM_0_E)FM(RIF3_D0_B)		F_(0, 0)	FM(TCLK2_B)	FM(TPU0TO0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_31_28	FM(USB30_OVC)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT1_B)		FM(SSI_WS2_B)	FM(TS_SPSYNC1_D)FM(STP_ISSYNC_1_D)	FM(STP_IVCXO27_0_E)FM(RIF3_D1_B)	F_(0, 0)	FM(FSO_TOE_B)	FM(TPU0TO1)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_3_0	FM(USB31_PWEN)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT2_B)		FM(SSI_SCK9_B)	FM(TS_SDEN0_E)	FM(STP_ISEN_0_E)	F_(0, 0)	FM(RIF2_D0_B)		F_(0, 0)	F_(0, 0)	FM(TPU0TO2)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_7_4	FM(USB31_OVC)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT3_B)		FM(SSI_WS9_B)	FM(TS_SPSYNC0_E)FM(STP_ISSYNC_0_E)	F_(0, 0)	FM(RIF2_D1_B)		F_(0, 0)	F_(0, 0)	FM(TPU0TO3)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR	\
+\
+												GPSR6_31 \
+												GPSR6_30 \
+												GPSR6_29 \
+												GPSR6_28 \
+		GPSR1_27									GPSR6_27 \
+		GPSR1_26									GPSR6_26 \
+		GPSR1_25							GPSR5_25	GPSR6_25 \
+		GPSR1_24							GPSR5_24	GPSR6_24 \
+		GPSR1_23							GPSR5_23	GPSR6_23 \
+		GPSR1_22							GPSR5_22	GPSR6_22 \
+		GPSR1_21							GPSR5_21	GPSR6_21 \
+		GPSR1_20							GPSR5_20	GPSR6_20 \
+		GPSR1_19							GPSR5_19	GPSR6_19 \
+		GPSR1_18							GPSR5_18	GPSR6_18 \
+		GPSR1_17					GPSR4_17	GPSR5_17	GPSR6_17 \
+		GPSR1_16					GPSR4_16	GPSR5_16	GPSR6_16 \
+GPSR0_15	GPSR1_15			GPSR3_15	GPSR4_15	GPSR5_15	GPSR6_15 \
+GPSR0_14	GPSR1_14	GPSR2_14	GPSR3_14	GPSR4_14	GPSR5_14	GPSR6_14 \
+GPSR0_13	GPSR1_13	GPSR2_13	GPSR3_13	GPSR4_13	GPSR5_13	GPSR6_13 \
+GPSR0_12	GPSR1_12	GPSR2_12	GPSR3_12	GPSR4_12	GPSR5_12	GPSR6_12 \
+GPSR0_11	GPSR1_11	GPSR2_11	GPSR3_11	GPSR4_11	GPSR5_11	GPSR6_11 \
+GPSR0_10	GPSR1_10	GPSR2_10	GPSR3_10	GPSR4_10	GPSR5_10	GPSR6_10 \
+GPSR0_9		GPSR1_9		GPSR2_9		GPSR3_9		GPSR4_9		GPSR5_9		GPSR6_9 \
+GPSR0_8		GPSR1_8		GPSR2_8		GPSR3_8		GPSR4_8		GPSR5_8		GPSR6_8 \
+GPSR0_7		GPSR1_7		GPSR2_7		GPSR3_7		GPSR4_7		GPSR5_7		GPSR6_7 \
+GPSR0_6		GPSR1_6		GPSR2_6		GPSR3_6		GPSR4_6		GPSR5_6		GPSR6_6 \
+GPSR0_5		GPSR1_5		GPSR2_5		GPSR3_5		GPSR4_5		GPSR5_5		GPSR6_5 \
+GPSR0_4		GPSR1_4		GPSR2_4		GPSR3_4		GPSR4_4		GPSR5_4		GPSR6_4 \
+GPSR0_3		GPSR1_3		GPSR2_3		GPSR3_3		GPSR4_3		GPSR5_3		GPSR6_3		GPSR7_3 \
+GPSR0_2		GPSR1_2		GPSR2_2		GPSR3_2		GPSR4_2		GPSR5_2		GPSR6_2		GPSR7_2 \
+GPSR0_1		GPSR1_1		GPSR2_1		GPSR3_1		GPSR4_1		GPSR5_1		GPSR6_1		GPSR7_1 \
+GPSR0_0		GPSR1_0		GPSR2_0		GPSR3_0		GPSR4_0		GPSR5_0		GPSR6_0		GPSR7_0
+
+#define PINMUX_IPSR				\
+\
+FM(IP0_3_0)	IP0_3_0		FM(IP1_3_0)	IP1_3_0		FM(IP2_3_0)	IP2_3_0		FM(IP3_3_0)	IP3_3_0 \
+FM(IP0_7_4)	IP0_7_4		FM(IP1_7_4)	IP1_7_4		FM(IP2_7_4)	IP2_7_4		FM(IP3_7_4)	IP3_7_4 \
+FM(IP0_11_8)	IP0_11_8	FM(IP1_11_8)	IP1_11_8	FM(IP2_11_8)	IP2_11_8	FM(IP3_11_8)	IP3_11_8 \
+FM(IP0_15_12)	IP0_15_12	FM(IP1_15_12)	IP1_15_12	FM(IP2_15_12)	IP2_15_12	FM(IP3_15_12)	IP3_15_12 \
+FM(IP0_19_16)	IP0_19_16	FM(IP1_19_16)	IP1_19_16	FM(IP2_19_16)	IP2_19_16	FM(IP3_19_16)	IP3_19_16 \
+FM(IP0_23_20)	IP0_23_20	FM(IP1_23_20)	IP1_23_20	FM(IP2_23_20)	IP2_23_20	FM(IP3_23_20)	IP3_23_20 \
+FM(IP0_27_24)	IP0_27_24	FM(IP1_27_24)	IP1_27_24	FM(IP2_27_24)	IP2_27_24	FM(IP3_27_24)	IP3_27_24 \
+FM(IP0_31_28)	IP0_31_28	FM(IP1_31_28)	IP1_31_28	FM(IP2_31_28)	IP2_31_28	FM(IP3_31_28)	IP3_31_28 \
+\
+FM(IP4_3_0)	IP4_3_0		FM(IP5_3_0)	IP5_3_0		FM(IP6_3_0)	IP6_3_0		FM(IP7_3_0)	IP7_3_0 \
+FM(IP4_7_4)	IP4_7_4		FM(IP5_7_4)	IP5_7_4		FM(IP6_7_4)	IP6_7_4		FM(IP7_7_4)	IP7_7_4 \
+FM(IP4_11_8)	IP4_11_8	FM(IP5_11_8)	IP5_11_8	FM(IP6_11_8)	IP6_11_8	FM(IP7_11_8)	IP7_11_8 \
+FM(IP4_15_12)	IP4_15_12	FM(IP5_15_12)	IP5_15_12	FM(IP6_15_12)	IP6_15_12	FM(IP7_15_12)	IP7_15_12 \
+FM(IP4_19_16)	IP4_19_16	FM(IP5_19_16)	IP5_19_16	FM(IP6_19_16)	IP6_19_16	FM(IP7_19_16)	IP7_19_16 \
+FM(IP4_23_20)	IP4_23_20	FM(IP5_23_20)	IP5_23_20	FM(IP6_23_20)	IP6_23_20	FM(IP7_23_20)	IP7_23_20 \
+FM(IP4_27_24)	IP4_27_24	FM(IP5_27_24)	IP5_27_24	FM(IP6_27_24)	IP6_27_24	FM(IP7_27_24)	IP7_27_24 \
+FM(IP4_31_28)	IP4_31_28	FM(IP5_31_28)	IP5_31_28	FM(IP6_31_28)	IP6_31_28	FM(IP7_31_28)	IP7_31_28 \
+\
+FM(IP8_3_0)	IP8_3_0		FM(IP9_3_0)	IP9_3_0		FM(IP10_3_0)	IP10_3_0	FM(IP11_3_0)	IP11_3_0 \
+FM(IP8_7_4)	IP8_7_4		FM(IP9_7_4)	IP9_7_4		FM(IP10_7_4)	IP10_7_4	FM(IP11_7_4)	IP11_7_4 \
+FM(IP8_11_8)	IP8_11_8	FM(IP9_11_8)	IP9_11_8	FM(IP10_11_8)	IP10_11_8	FM(IP11_11_8)	IP11_11_8 \
+FM(IP8_15_12)	IP8_15_12	FM(IP9_15_12)	IP9_15_12	FM(IP10_15_12)	IP10_15_12	FM(IP11_15_12)	IP11_15_12 \
+FM(IP8_19_16)	IP8_19_16	FM(IP9_19_16)	IP9_19_16	FM(IP10_19_16)	IP10_19_16	FM(IP11_19_16)	IP11_19_16 \
+FM(IP8_23_20)	IP8_23_20	FM(IP9_23_20)	IP9_23_20	FM(IP10_23_20)	IP10_23_20	FM(IP11_23_20)	IP11_23_20 \
+FM(IP8_27_24)	IP8_27_24	FM(IP9_27_24)	IP9_27_24	FM(IP10_27_24)	IP10_27_24	FM(IP11_27_24)	IP11_27_24 \
+FM(IP8_31_28)	IP8_31_28	FM(IP9_31_28)	IP9_31_28	FM(IP10_31_28)	IP10_31_28	FM(IP11_31_28)	IP11_31_28 \
+\
+FM(IP12_3_0)	IP12_3_0	FM(IP13_3_0)	IP13_3_0	FM(IP14_3_0)	IP14_3_0	FM(IP15_3_0)	IP15_3_0 \
+FM(IP12_7_4)	IP12_7_4	FM(IP13_7_4)	IP13_7_4	FM(IP14_7_4)	IP14_7_4	FM(IP15_7_4)	IP15_7_4 \
+FM(IP12_11_8)	IP12_11_8	FM(IP13_11_8)	IP13_11_8	FM(IP14_11_8)	IP14_11_8	FM(IP15_11_8)	IP15_11_8 \
+FM(IP12_15_12)	IP12_15_12	FM(IP13_15_12)	IP13_15_12	FM(IP14_15_12)	IP14_15_12	FM(IP15_15_12)	IP15_15_12 \
+FM(IP12_19_16)	IP12_19_16	FM(IP13_19_16)	IP13_19_16	FM(IP14_19_16)	IP14_19_16	FM(IP15_19_16)	IP15_19_16 \
+FM(IP12_23_20)	IP12_23_20	FM(IP13_23_20)	IP13_23_20	FM(IP14_23_20)	IP14_23_20	FM(IP15_23_20)	IP15_23_20 \
+FM(IP12_27_24)	IP12_27_24	FM(IP13_27_24)	IP13_27_24	FM(IP14_27_24)	IP14_27_24	FM(IP15_27_24)	IP15_27_24 \
+FM(IP12_31_28)	IP12_31_28	FM(IP13_31_28)	IP13_31_28	FM(IP14_31_28)	IP14_31_28	FM(IP15_31_28)	IP15_31_28 \
+\
+FM(IP16_3_0)	IP16_3_0	FM(IP17_3_0)	IP17_3_0 \
+FM(IP16_7_4)	IP16_7_4	FM(IP17_7_4)	IP17_7_4 \
+FM(IP16_11_8)	IP16_11_8 \
+FM(IP16_15_12)	IP16_15_12 \
+FM(IP16_19_16)	IP16_19_16 \
+FM(IP16_23_20)	IP16_23_20 \
+FM(IP16_27_24)	IP16_27_24 \
+FM(IP16_31_28)	IP16_31_28
+
+/* MOD_SEL0 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */			/* 4 */			/* 5 */			/* 6 */			/* 7 */
+#define MOD_SEL0_30_29		FM(SEL_MSIOF3_0)	FM(SEL_MSIOF3_1)	FM(SEL_MSIOF3_2)	FM(SEL_MSIOF3_3)
+#define MOD_SEL0_28_27		FM(SEL_MSIOF2_0)	FM(SEL_MSIOF2_1)	FM(SEL_MSIOF2_2)	FM(SEL_MSIOF2_3)
+#define MOD_SEL0_26_25_24	FM(SEL_MSIOF1_0)	FM(SEL_MSIOF1_1)	FM(SEL_MSIOF1_2)	FM(SEL_MSIOF1_3)	FM(SEL_MSIOF1_4)	FM(SEL_MSIOF1_5)	FM(SEL_MSIOF1_6)	F_(0, 0)
+#define MOD_SEL0_23		FM(SEL_LBSC_0)		FM(SEL_LBSC_1)
+#define MOD_SEL0_22		FM(SEL_IEBUS_0)		FM(SEL_IEBUS_1)
+#define MOD_SEL0_21_20		FM(SEL_I2C6_0)		FM(SEL_I2C6_1)		FM(SEL_I2C6_2)		F_(0, 0)
+#define MOD_SEL0_19		FM(SEL_I2C2_0)		FM(SEL_I2C2_1)
+#define MOD_SEL0_18		FM(SEL_I2C1_0)		FM(SEL_I2C1_1)
+#define MOD_SEL0_17		FM(SEL_HSCIF4_0)	FM(SEL_HSCIF4_1)
+#define MOD_SEL0_16_15		FM(SEL_HSCIF3_0)	FM(SEL_HSCIF3_1)	FM(SEL_HSCIF3_2)	FM(SEL_HSCIF3_3)
+#define MOD_SEL0_14		FM(SEL_HSCIF2_0)	FM(SEL_HSCIF2_1)
+#define MOD_SEL0_13		FM(SEL_HSCIF1_0)	FM(SEL_HSCIF1_1)
+#define MOD_SEL0_12		FM(SEL_FSO_0)		FM(SEL_FSO_1)
+#define MOD_SEL0_11		FM(SEL_FM_0)		FM(SEL_FM_1)
+#define MOD_SEL0_10		FM(SEL_ETHERAVB_0)	FM(SEL_ETHERAVB_1)
+#define MOD_SEL0_9		FM(SEL_DRIF3_0)		FM(SEL_DRIF3_1)
+#define MOD_SEL0_8		FM(SEL_DRIF2_0)		FM(SEL_DRIF2_1)
+#define MOD_SEL0_7_6		FM(SEL_DRIF1_0)		FM(SEL_DRIF1_1)		FM(SEL_DRIF1_2)		F_(0, 0)
+#define MOD_SEL0_5_4		FM(SEL_DRIF0_0)		FM(SEL_DRIF0_1)		FM(SEL_DRIF0_2)		F_(0, 0)
+#define MOD_SEL0_3		FM(SEL_CANFD0_0)	FM(SEL_CANFD0_1)
+#define MOD_SEL0_2_1		FM(SEL_ADG_0)		FM(SEL_ADG_1)		FM(SEL_ADG_2)		FM(SEL_ADG_3)
+
+/* MOD_SEL1 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */			/* 4 */			/* 5 */			/* 6 */			/* 7 */
+#define MOD_SEL1_31_30		FM(SEL_TSIF1_0)		FM(SEL_TSIF1_1)		FM(SEL_TSIF1_2)		FM(SEL_TSIF1_3)
+#define MOD_SEL1_29_28_27	FM(SEL_TSIF0_0)		FM(SEL_TSIF0_1)		FM(SEL_TSIF0_2)		FM(SEL_TSIF0_3)		FM(SEL_TSIF0_4)		F_(0, 0)		F_(0, 0)		F_(0, 0)
+#define MOD_SEL1_26		FM(SEL_TIMER_TMU_0)	FM(SEL_TIMER_TMU_1)
+#define MOD_SEL1_25_24		FM(SEL_SSP1_1_0)	FM(SEL_SSP1_1_1)	FM(SEL_SSP1_1_2)	FM(SEL_SSP1_1_3)
+#define MOD_SEL1_23_22_21	FM(SEL_SSP1_0_0)	FM(SEL_SSP1_0_1)	FM(SEL_SSP1_0_2)	FM(SEL_SSP1_0_3)	FM(SEL_SSP1_0_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
+#define MOD_SEL1_20		FM(SEL_SSI_0)		FM(SEL_SSI_1)
+#define MOD_SEL1_19		FM(SEL_SPEED_PULSE_0)	FM(SEL_SPEED_PULSE_1)
+#define MOD_SEL1_18_17		FM(SEL_SIMCARD_0)	FM(SEL_SIMCARD_1)	FM(SEL_SIMCARD_2)	FM(SEL_SIMCARD_3)
+#define MOD_SEL1_16		FM(SEL_SDHI2_0)		FM(SEL_SDHI2_1)
+#define MOD_SEL1_15_14		FM(SEL_SCIF4_0)		FM(SEL_SCIF4_1)		FM(SEL_SCIF4_2)		F_(0, 0)
+#define MOD_SEL1_13		FM(SEL_SCIF3_0)		FM(SEL_SCIF3_1)
+#define MOD_SEL1_12		FM(SEL_SCIF2_0)		FM(SEL_SCIF2_1)
+#define MOD_SEL1_11		FM(SEL_SCIF1_0)		FM(SEL_SCIF1_1)
+#define MOD_SEL1_10		FM(SEL_SATA_0)		FM(SEL_SATA_1)
+#define MOD_SEL1_9		FM(SEL_REMOCON_0)	FM(SEL_REMOCON_1)
+#define MOD_SEL1_6		FM(SEL_RCAN0_0)		FM(SEL_RCAN0_1)
+#define MOD_SEL1_5		FM(SEL_PWM6_0)		FM(SEL_PWM6_1)
+#define MOD_SEL1_4		FM(SEL_PWM5_0)		FM(SEL_PWM5_1)
+#define MOD_SEL1_3		FM(SEL_PWM4_0)		FM(SEL_PWM4_1)
+#define MOD_SEL1_2		FM(SEL_PWM3_0)		FM(SEL_PWM3_1)
+#define MOD_SEL1_1		FM(SEL_PWM2_0)		FM(SEL_PWM2_1)
+#define MOD_SEL1_0		FM(SEL_PWM1_0)		FM(SEL_PWM1_1)
+
+/* MOD_SEL2 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */
+#define MOD_SEL2_31		FM(I2C_SEL_5_0)		FM(I2C_SEL_5_1)
+#define MOD_SEL2_30		FM(I2C_SEL_3_0)		FM(I2C_SEL_3_1)
+#define MOD_SEL2_29		FM(I2C_SEL_0_0)		FM(I2C_SEL_0_1)
+#define MOD_SEL2_0		FM(SEL_VIN4_0)		FM(SEL_VIN4_1)
+
+#define PINMUX_MOD_SELS\
+\
+			MOD_SEL1_31_30		MOD_SEL2_31 \
+MOD_SEL0_30_29					MOD_SEL2_30 \
+			MOD_SEL1_29_28_27	MOD_SEL2_29 \
+MOD_SEL0_28_27 \
+\
+MOD_SEL0_26_25_24	MOD_SEL1_26 \
+			MOD_SEL1_25_24 \
+\
+MOD_SEL0_23		MOD_SEL1_23_22_21 \
+MOD_SEL0_22 \
+MOD_SEL0_21_20 \
+			MOD_SEL1_20 \
+MOD_SEL0_19		MOD_SEL1_19 \
+MOD_SEL0_18		MOD_SEL1_18_17 \
+MOD_SEL0_17 \
+MOD_SEL0_16_15		MOD_SEL1_16 \
+			MOD_SEL1_15_14 \
+MOD_SEL0_14 \
+MOD_SEL0_13		MOD_SEL1_13 \
+MOD_SEL0_12		MOD_SEL1_12 \
+MOD_SEL0_11		MOD_SEL1_11 \
+MOD_SEL0_10		MOD_SEL1_10 \
+MOD_SEL0_9		MOD_SEL1_9 \
+MOD_SEL0_8 \
+MOD_SEL0_7_6 \
+			MOD_SEL1_6 \
+MOD_SEL0_5_4		MOD_SEL1_5 \
+			MOD_SEL1_4 \
+MOD_SEL0_3		MOD_SEL1_3 \
+MOD_SEL0_2_1		MOD_SEL1_2 \
+			MOD_SEL1_1 \
+			MOD_SEL1_0		MOD_SEL2_0
+
+/*
+ * These pins are not able to be muxed but have other properties
+ * that can be set, such as drive-strength or pull-up/pull-down enable.
+ */
+#define PINMUX_STATIC \
+	FM(QSPI0_SPCLK) FM(QSPI0_SSL) FM(QSPI0_MOSI_IO0) FM(QSPI0_MISO_IO1) \
+	FM(QSPI0_IO2) FM(QSPI0_IO3) \
+	FM(QSPI1_SPCLK) FM(QSPI1_SSL) FM(QSPI1_MOSI_IO0) FM(QSPI1_MISO_IO1) \
+	FM(QSPI1_IO2) FM(QSPI1_IO3) \
+	FM(RPC_INT) FM(RPC_WP) FM(RPC_RESET) \
+	FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) FM(AVB_TD3) \
+	FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \
+	FM(AVB_TXCREFCLK) FM(AVB_MDIO) \
+	FM(CLKOUT) FM(PRESETOUT) \
+	FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) FM(DU_DOTCLKIN3) \
+	FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR)
+
+#define PINMUX_PHYS \
+	FM(SCL0) FM(SDA0) FM(SCL3) FM(SDA3) FM(SCL5) FM(SDA5)
+
+enum {
+	PINMUX_RESERVED = 0,
+
+	PINMUX_DATA_BEGIN,
+	GP_ALL(DATA),
+	PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x)	FN_##x,
+	PINMUX_FUNCTION_BEGIN,
+	GP_ALL(FN),
+	PINMUX_GPSR
+	PINMUX_IPSR
+	PINMUX_MOD_SELS
+	PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x)	x##_MARK,
+	PINMUX_MARK_BEGIN,
+	PINMUX_GPSR
+	PINMUX_IPSR
+	PINMUX_MOD_SELS
+	PINMUX_STATIC
+	PINMUX_PHYS
+	PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+	PINMUX_DATA_GP_ALL(),
+
+	PINMUX_SINGLE(AVS1),
+	PINMUX_SINGLE(AVS2),
+	PINMUX_SINGLE(GP7_02),
+	PINMUX_SINGLE(GP7_03),
+	PINMUX_SINGLE(MSIOF0_RXD),
+	PINMUX_SINGLE(MSIOF0_SCK),
+	PINMUX_SINGLE(MSIOF0_TXD),
+	PINMUX_SINGLE(SD2_CMD),
+	PINMUX_SINGLE(SD3_CLK),
+	PINMUX_SINGLE(SD3_CMD),
+	PINMUX_SINGLE(SD3_DAT0),
+	PINMUX_SINGLE(SD3_DAT1),
+	PINMUX_SINGLE(SD3_DAT2),
+	PINMUX_SINGLE(SD3_DAT3),
+	PINMUX_SINGLE(SD3_DS),
+	PINMUX_SINGLE(SSI_SCK5),
+	PINMUX_SINGLE(SSI_SDATA5),
+	PINMUX_SINGLE(SSI_WS5),
+
+	/* IPSR0 */
+	PINMUX_IPSR_GPSR(IP0_3_0,	AVB_MDC),
+	PINMUX_IPSR_MSEL(IP0_3_0,	MSIOF2_SS2_C,		SEL_MSIOF2_2),
+
+	PINMUX_IPSR_GPSR(IP0_7_4,	AVB_MAGIC),
+	PINMUX_IPSR_MSEL(IP0_7_4,	MSIOF2_SS1_C,		SEL_MSIOF2_2),
+	PINMUX_IPSR_MSEL(IP0_7_4,	SCK4_A,			SEL_SCIF4_0),
+
+	PINMUX_IPSR_GPSR(IP0_11_8,	AVB_PHY_INT),
+	PINMUX_IPSR_MSEL(IP0_11_8,	MSIOF2_SYNC_C,		SEL_MSIOF2_2),
+	PINMUX_IPSR_MSEL(IP0_11_8,	RX4_A,			SEL_SCIF4_0),
+
+	PINMUX_IPSR_GPSR(IP0_15_12,	AVB_LINK),
+	PINMUX_IPSR_MSEL(IP0_15_12,	MSIOF2_SCK_C,		SEL_MSIOF2_2),
+	PINMUX_IPSR_MSEL(IP0_15_12,	TX4_A,			SEL_SCIF4_0),
+
+	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, AVB_AVTP_MATCH_A,	I2C_SEL_5_0,	SEL_ETHERAVB_0),
+	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, MSIOF2_RXD_C,		I2C_SEL_5_0,	SEL_MSIOF2_2),
+	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, CTS4_N_A,		I2C_SEL_5_0,	SEL_SCIF4_0),
+	PINMUX_IPSR_PHYS(IP0_19_16,	SCL5,			I2C_SEL_5_1),
+
+	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A,	I2C_SEL_5_0,	SEL_ETHERAVB_0),
+	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, MSIOF2_TXD_C,		I2C_SEL_5_0,	SEL_MSIOF2_2),
+	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, RTS4_N_A,		I2C_SEL_5_0,	SEL_SCIF4_0),
+	PINMUX_IPSR_PHYS(IP0_23_20,	SDA5,			I2C_SEL_5_1),
+
+	PINMUX_IPSR_GPSR(IP0_27_24,	IRQ0),
+	PINMUX_IPSR_GPSR(IP0_27_24,	QPOLB),
+	PINMUX_IPSR_GPSR(IP0_27_24,	DU_CDE),
+	PINMUX_IPSR_MSEL(IP0_27_24,	VI4_DATA0_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP0_27_24,	CAN0_TX_B,		SEL_RCAN0_1),
+	PINMUX_IPSR_MSEL(IP0_27_24,	CANFD0_TX_B,		SEL_CANFD0_1),
+
+	PINMUX_IPSR_GPSR(IP0_31_28,	IRQ1),
+	PINMUX_IPSR_GPSR(IP0_31_28,	QPOLA),
+	PINMUX_IPSR_GPSR(IP0_31_28,	DU_DISP),
+	PINMUX_IPSR_MSEL(IP0_31_28,	VI4_DATA1_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP0_31_28,	CAN0_RX_B,		SEL_RCAN0_1),
+	PINMUX_IPSR_MSEL(IP0_31_28,	CANFD0_RX_B,		SEL_CANFD0_1),
+
+	/* IPSR1 */
+	PINMUX_IPSR_GPSR(IP1_3_0,	IRQ2),
+	PINMUX_IPSR_GPSR(IP1_3_0,	QCPV_QDE),
+	PINMUX_IPSR_GPSR(IP1_3_0,	DU_EXODDF_DU_ODDF_DISP_CDE),
+	PINMUX_IPSR_MSEL(IP1_3_0,	VI4_DATA2_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_3_0,	PWM3_B,			SEL_PWM3_1),
+
+	PINMUX_IPSR_GPSR(IP1_7_4,	IRQ3),
+	PINMUX_IPSR_GPSR(IP1_7_4,	QSTVB_QVE),
+	PINMUX_IPSR_GPSR(IP1_7_4,	A25),
+	PINMUX_IPSR_GPSR(IP1_7_4,	DU_DOTCLKOUT1),
+	PINMUX_IPSR_MSEL(IP1_7_4,	VI4_DATA3_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_7_4,	PWM4_B,			SEL_PWM4_1),
+
+	PINMUX_IPSR_GPSR(IP1_11_8,	IRQ4),
+	PINMUX_IPSR_GPSR(IP1_11_8,	QSTH_QHS),
+	PINMUX_IPSR_GPSR(IP1_11_8,	A24),
+	PINMUX_IPSR_GPSR(IP1_11_8,	DU_EXHSYNC_DU_HSYNC),
+	PINMUX_IPSR_MSEL(IP1_11_8,	VI4_DATA4_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_11_8,	PWM5_B,			SEL_PWM5_1),
+
+	PINMUX_IPSR_GPSR(IP1_15_12,	IRQ5),
+	PINMUX_IPSR_GPSR(IP1_15_12,	QSTB_QHE),
+	PINMUX_IPSR_GPSR(IP1_15_12,	A23),
+	PINMUX_IPSR_GPSR(IP1_15_12,	DU_EXVSYNC_DU_VSYNC),
+	PINMUX_IPSR_MSEL(IP1_15_12,	VI4_DATA5_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_15_12,	PWM6_B,			SEL_PWM6_1),
+
+	PINMUX_IPSR_GPSR(IP1_19_16,	PWM0),
+	PINMUX_IPSR_GPSR(IP1_19_16,	AVB_AVTP_PPS),
+	PINMUX_IPSR_GPSR(IP1_19_16,	A22),
+	PINMUX_IPSR_MSEL(IP1_19_16,	VI4_DATA6_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_19_16,	IECLK_B,		SEL_IEBUS_1),
+
+	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, PWM1_A,		I2C_SEL_3_0,	SEL_PWM1_0),
+	PINMUX_IPSR_MSEL(IP1_23_20,	A21,			I2C_SEL_3_0),
+	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D,		I2C_SEL_3_0,	SEL_HSCIF3_3),
+	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B,		I2C_SEL_3_0,	SEL_VIN4_1),
+	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B,		I2C_SEL_3_0,	SEL_IEBUS_1),
+	PINMUX_IPSR_PHYS(IP1_23_20,	SCL3,			I2C_SEL_3_1),
+
+	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A,		I2C_SEL_3_0,	SEL_PWM2_0),
+	PINMUX_IPSR_MSEL(IP1_27_24,	A20,			I2C_SEL_3_0),
+	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D,		I2C_SEL_3_0,	SEL_HSCIF3_3),
+	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, IETX_B,		I2C_SEL_3_0,	SEL_IEBUS_1),
+	PINMUX_IPSR_PHYS(IP1_27_24,	SDA3,			I2C_SEL_3_1),
+
+	PINMUX_IPSR_GPSR(IP1_31_28,	A0),
+	PINMUX_IPSR_GPSR(IP1_31_28,	LCDOUT16),
+	PINMUX_IPSR_MSEL(IP1_31_28,	MSIOF3_SYNC_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP1_31_28,	VI4_DATA8),
+	PINMUX_IPSR_GPSR(IP1_31_28,	DU_DB0),
+	PINMUX_IPSR_MSEL(IP1_31_28,	PWM3_A,			SEL_PWM3_0),
+
+	/* IPSR2 */
+	PINMUX_IPSR_GPSR(IP2_3_0,	A1),
+	PINMUX_IPSR_GPSR(IP2_3_0,	LCDOUT17),
+	PINMUX_IPSR_MSEL(IP2_3_0,	MSIOF3_TXD_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP2_3_0,	VI4_DATA9),
+	PINMUX_IPSR_GPSR(IP2_3_0,	DU_DB1),
+	PINMUX_IPSR_MSEL(IP2_3_0,	PWM4_A,			SEL_PWM4_0),
+
+	PINMUX_IPSR_GPSR(IP2_7_4,	A2),
+	PINMUX_IPSR_GPSR(IP2_7_4,	LCDOUT18),
+	PINMUX_IPSR_MSEL(IP2_7_4,	MSIOF3_SCK_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP2_7_4,	VI4_DATA10),
+	PINMUX_IPSR_GPSR(IP2_7_4,	DU_DB2),
+	PINMUX_IPSR_MSEL(IP2_7_4,	PWM5_A,			SEL_PWM5_0),
+
+	PINMUX_IPSR_GPSR(IP2_11_8,	A3),
+	PINMUX_IPSR_GPSR(IP2_11_8,	LCDOUT19),
+	PINMUX_IPSR_MSEL(IP2_11_8,	MSIOF3_RXD_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP2_11_8,	VI4_DATA11),
+	PINMUX_IPSR_GPSR(IP2_11_8,	DU_DB3),
+	PINMUX_IPSR_MSEL(IP2_11_8,	PWM6_A,			SEL_PWM6_0),
+
+	PINMUX_IPSR_GPSR(IP2_15_12,	A4),
+	PINMUX_IPSR_GPSR(IP2_15_12,	LCDOUT20),
+	PINMUX_IPSR_MSEL(IP2_15_12,	MSIOF3_SS1_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP2_15_12,	VI4_DATA12),
+	PINMUX_IPSR_GPSR(IP2_15_12,	VI5_DATA12),
+	PINMUX_IPSR_GPSR(IP2_15_12,	DU_DB4),
+
+	PINMUX_IPSR_GPSR(IP2_19_16,	A5),
+	PINMUX_IPSR_GPSR(IP2_19_16,	LCDOUT21),
+	PINMUX_IPSR_MSEL(IP2_19_16,	MSIOF3_SS2_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_MSEL(IP2_19_16,	SCK4_B,			SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP2_19_16,	VI4_DATA13),
+	PINMUX_IPSR_GPSR(IP2_19_16,	VI5_DATA13),
+	PINMUX_IPSR_GPSR(IP2_19_16,	DU_DB5),
+
+	PINMUX_IPSR_GPSR(IP2_23_20,	A6),
+	PINMUX_IPSR_GPSR(IP2_23_20,	LCDOUT22),
+	PINMUX_IPSR_MSEL(IP2_23_20,	MSIOF2_SS1_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP2_23_20,	RX4_B,			SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP2_23_20,	VI4_DATA14),
+	PINMUX_IPSR_GPSR(IP2_23_20,	VI5_DATA14),
+	PINMUX_IPSR_GPSR(IP2_23_20,	DU_DB6),
+
+	PINMUX_IPSR_GPSR(IP2_27_24,	A7),
+	PINMUX_IPSR_GPSR(IP2_27_24,	LCDOUT23),
+	PINMUX_IPSR_MSEL(IP2_27_24,	MSIOF2_SS2_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP2_27_24,	TX4_B,			SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP2_27_24,	VI4_DATA15),
+	PINMUX_IPSR_GPSR(IP2_27_24,	VI5_DATA15),
+	PINMUX_IPSR_GPSR(IP2_27_24,	DU_DB7),
+
+	PINMUX_IPSR_GPSR(IP2_31_28,	A8),
+	PINMUX_IPSR_MSEL(IP2_31_28,	RX3_B,			SEL_SCIF3_1),
+	PINMUX_IPSR_MSEL(IP2_31_28,	MSIOF2_SYNC_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP2_31_28,	HRX4_B,			SEL_HSCIF4_1),
+	PINMUX_IPSR_MSEL(IP2_31_28,	SDA6_A,			SEL_I2C6_0),
+	PINMUX_IPSR_MSEL(IP2_31_28,	AVB_AVTP_MATCH_B,	SEL_ETHERAVB_1),
+	PINMUX_IPSR_MSEL(IP2_31_28,	PWM1_B,			SEL_PWM1_1),
+
+	/* IPSR3 */
+	PINMUX_IPSR_GPSR(IP3_3_0,	A9),
+	PINMUX_IPSR_MSEL(IP3_3_0,	MSIOF2_SCK_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP3_3_0,	CTS4_N_B,		SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP3_3_0,	VI5_VSYNC_N),
+
+	PINMUX_IPSR_GPSR(IP3_7_4,	A10),
+	PINMUX_IPSR_MSEL(IP3_7_4,	MSIOF2_RXD_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP3_7_4,	RTS4_N_B,		SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP3_7_4,	VI5_HSYNC_N),
+
+	PINMUX_IPSR_GPSR(IP3_11_8,	A11),
+	PINMUX_IPSR_MSEL(IP3_11_8,	TX3_B,			SEL_SCIF3_1),
+	PINMUX_IPSR_MSEL(IP3_11_8,	MSIOF2_TXD_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP3_11_8,	HTX4_B,			SEL_HSCIF4_1),
+	PINMUX_IPSR_GPSR(IP3_11_8,	HSCK4),
+	PINMUX_IPSR_GPSR(IP3_11_8,	VI5_FIELD),
+	PINMUX_IPSR_MSEL(IP3_11_8,	SCL6_A,			SEL_I2C6_0),
+	PINMUX_IPSR_MSEL(IP3_11_8,	AVB_AVTP_CAPTURE_B,	SEL_ETHERAVB_1),
+	PINMUX_IPSR_MSEL(IP3_11_8,	PWM2_B,			SEL_PWM2_1),
+
+	PINMUX_IPSR_GPSR(IP3_15_12,	A12),
+	PINMUX_IPSR_GPSR(IP3_15_12,	LCDOUT12),
+	PINMUX_IPSR_MSEL(IP3_15_12,	MSIOF3_SCK_C,		SEL_MSIOF3_2),
+	PINMUX_IPSR_MSEL(IP3_15_12,	HRX4_A,			SEL_HSCIF4_0),
+	PINMUX_IPSR_GPSR(IP3_15_12,	VI5_DATA8),
+	PINMUX_IPSR_GPSR(IP3_15_12,	DU_DG4),
+
+	PINMUX_IPSR_GPSR(IP3_19_16,	A13),
+	PINMUX_IPSR_GPSR(IP3_19_16,	LCDOUT13),
+	PINMUX_IPSR_MSEL(IP3_19_16,	MSIOF3_SYNC_C,		SEL_MSIOF3_2),
+	PINMUX_IPSR_MSEL(IP3_19_16,	HTX4_A,			SEL_HSCIF4_0),
+	PINMUX_IPSR_GPSR(IP3_19_16,	VI5_DATA9),
+	PINMUX_IPSR_GPSR(IP3_19_16,	DU_DG5),
+
+	PINMUX_IPSR_GPSR(IP3_23_20,	A14),
+	PINMUX_IPSR_GPSR(IP3_23_20,	LCDOUT14),
+	PINMUX_IPSR_MSEL(IP3_23_20,	MSIOF3_RXD_C,		SEL_MSIOF3_2),
+	PINMUX_IPSR_GPSR(IP3_23_20,	HCTS4_N),
+	PINMUX_IPSR_GPSR(IP3_23_20,	VI5_DATA10),
+	PINMUX_IPSR_GPSR(IP3_23_20,	DU_DG6),
+
+	PINMUX_IPSR_GPSR(IP3_27_24,	A15),
+	PINMUX_IPSR_GPSR(IP3_27_24,	LCDOUT15),
+	PINMUX_IPSR_MSEL(IP3_27_24,	MSIOF3_TXD_C,		SEL_MSIOF3_2),
+	PINMUX_IPSR_GPSR(IP3_27_24,	HRTS4_N),
+	PINMUX_IPSR_GPSR(IP3_27_24,	VI5_DATA11),
+	PINMUX_IPSR_GPSR(IP3_27_24,	DU_DG7),
+
+	PINMUX_IPSR_GPSR(IP3_31_28,	A16),
+	PINMUX_IPSR_GPSR(IP3_31_28,	LCDOUT8),
+	PINMUX_IPSR_GPSR(IP3_31_28,	VI4_FIELD),
+	PINMUX_IPSR_GPSR(IP3_31_28,	DU_DG0),
+
+	/* IPSR4 */
+	PINMUX_IPSR_GPSR(IP4_3_0,	A17),
+	PINMUX_IPSR_GPSR(IP4_3_0,	LCDOUT9),
+	PINMUX_IPSR_GPSR(IP4_3_0,	VI4_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP4_3_0,	DU_DG1),
+
+	PINMUX_IPSR_GPSR(IP4_7_4,	A18),
+	PINMUX_IPSR_GPSR(IP4_7_4,	LCDOUT10),
+	PINMUX_IPSR_GPSR(IP4_7_4,	VI4_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP4_7_4,	DU_DG2),
+
+	PINMUX_IPSR_GPSR(IP4_11_8,	A19),
+	PINMUX_IPSR_GPSR(IP4_11_8,	LCDOUT11),
+	PINMUX_IPSR_GPSR(IP4_11_8,	VI4_CLKENB),
+	PINMUX_IPSR_GPSR(IP4_11_8,	DU_DG3),
+
+	PINMUX_IPSR_GPSR(IP4_15_12,	CS0_N),
+	PINMUX_IPSR_GPSR(IP4_15_12,	VI5_CLKENB),
+
+	PINMUX_IPSR_GPSR(IP4_19_16,	CS1_N_A26),
+	PINMUX_IPSR_GPSR(IP4_19_16,	VI5_CLK),
+	PINMUX_IPSR_MSEL(IP4_19_16,	EX_WAIT0_B,		SEL_LBSC_1),
+
+	PINMUX_IPSR_GPSR(IP4_23_20,	BS_N),
+	PINMUX_IPSR_GPSR(IP4_23_20,	QSTVA_QVS),
+	PINMUX_IPSR_MSEL(IP4_23_20,	MSIOF3_SCK_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_GPSR(IP4_23_20,	SCK3),
+	PINMUX_IPSR_GPSR(IP4_23_20,	HSCK3),
+	PINMUX_IPSR_GPSR(IP4_23_20,	CAN1_TX),
+	PINMUX_IPSR_GPSR(IP4_23_20,	CANFD1_TX),
+	PINMUX_IPSR_MSEL(IP4_23_20,	IETX_A,			SEL_IEBUS_0),
+
+	PINMUX_IPSR_GPSR(IP4_27_24,	RD_N),
+	PINMUX_IPSR_MSEL(IP4_27_24,	MSIOF3_SYNC_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_MSEL(IP4_27_24,	RX3_A,			SEL_SCIF3_0),
+	PINMUX_IPSR_MSEL(IP4_27_24,	HRX3_A,			SEL_HSCIF3_0),
+	PINMUX_IPSR_MSEL(IP4_27_24,	CAN0_TX_A,		SEL_RCAN0_0),
+	PINMUX_IPSR_MSEL(IP4_27_24,	CANFD0_TX_A,		SEL_CANFD0_0),
+
+	PINMUX_IPSR_GPSR(IP4_31_28,	RD_WR_N),
+	PINMUX_IPSR_MSEL(IP4_31_28,	MSIOF3_RXD_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_MSEL(IP4_31_28,	TX3_A,			SEL_SCIF3_0),
+	PINMUX_IPSR_MSEL(IP4_31_28,	HTX3_A,			SEL_HSCIF3_0),
+	PINMUX_IPSR_MSEL(IP4_31_28,	CAN0_RX_A,		SEL_RCAN0_0),
+	PINMUX_IPSR_MSEL(IP4_31_28,	CANFD0_RX_A,		SEL_CANFD0_0),
+
+	/* IPSR5 */
+	PINMUX_IPSR_GPSR(IP5_3_0,	WE0_N),
+	PINMUX_IPSR_MSEL(IP5_3_0,	MSIOF3_TXD_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_GPSR(IP5_3_0,	CTS3_N),
+	PINMUX_IPSR_GPSR(IP5_3_0,	HCTS3_N),
+	PINMUX_IPSR_MSEL(IP5_3_0,	SCL6_B,			SEL_I2C6_1),
+	PINMUX_IPSR_GPSR(IP5_3_0,	CAN_CLK),
+	PINMUX_IPSR_MSEL(IP5_3_0,	IECLK_A,		SEL_IEBUS_0),
+
+	PINMUX_IPSR_GPSR(IP5_7_4,	WE1_N),
+	PINMUX_IPSR_MSEL(IP5_7_4,	MSIOF3_SS1_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_GPSR(IP5_7_4,	RTS3_N),
+	PINMUX_IPSR_GPSR(IP5_7_4,	HRTS3_N),
+	PINMUX_IPSR_MSEL(IP5_7_4,	SDA6_B,			SEL_I2C6_1),
+	PINMUX_IPSR_GPSR(IP5_7_4,	CAN1_RX),
+	PINMUX_IPSR_GPSR(IP5_7_4,	CANFD1_RX),
+	PINMUX_IPSR_MSEL(IP5_7_4,	IERX_A,			SEL_IEBUS_0),
+
+	PINMUX_IPSR_MSEL(IP5_11_8,	EX_WAIT0_A,		SEL_LBSC_0),
+	PINMUX_IPSR_GPSR(IP5_11_8,	QCLK),
+	PINMUX_IPSR_GPSR(IP5_11_8,	VI4_CLK),
+	PINMUX_IPSR_GPSR(IP5_11_8,	DU_DOTCLKOUT0),
+
+	PINMUX_IPSR_GPSR(IP5_15_12,	D0),
+	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF2_SS1_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF3_SCK_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_GPSR(IP5_15_12,	VI4_DATA16),
+	PINMUX_IPSR_GPSR(IP5_15_12,	VI5_DATA0),
+
+	PINMUX_IPSR_GPSR(IP5_19_16,	D1),
+	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF2_SS2_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF3_SYNC_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_GPSR(IP5_19_16,	VI4_DATA17),
+	PINMUX_IPSR_GPSR(IP5_19_16,	VI5_DATA1),
+
+	PINMUX_IPSR_GPSR(IP5_23_20,	D2),
+	PINMUX_IPSR_MSEL(IP5_23_20,	MSIOF3_RXD_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_GPSR(IP5_23_20,	VI4_DATA18),
+	PINMUX_IPSR_GPSR(IP5_23_20,	VI5_DATA2),
+
+	PINMUX_IPSR_GPSR(IP5_27_24,	D3),
+	PINMUX_IPSR_MSEL(IP5_27_24,	MSIOF3_TXD_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_GPSR(IP5_27_24,	VI4_DATA19),
+	PINMUX_IPSR_GPSR(IP5_27_24,	VI5_DATA3),
+
+	PINMUX_IPSR_GPSR(IP5_31_28,	D4),
+	PINMUX_IPSR_MSEL(IP5_31_28,	MSIOF2_SCK_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_GPSR(IP5_31_28,	VI4_DATA20),
+	PINMUX_IPSR_GPSR(IP5_31_28,	VI5_DATA4),
+
+	/* IPSR6 */
+	PINMUX_IPSR_GPSR(IP6_3_0,	D5),
+	PINMUX_IPSR_MSEL(IP6_3_0,	MSIOF2_SYNC_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_GPSR(IP6_3_0,	VI4_DATA21),
+	PINMUX_IPSR_GPSR(IP6_3_0,	VI5_DATA5),
+
+	PINMUX_IPSR_GPSR(IP6_7_4,	D6),
+	PINMUX_IPSR_MSEL(IP6_7_4,	MSIOF2_RXD_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_GPSR(IP6_7_4,	VI4_DATA22),
+	PINMUX_IPSR_GPSR(IP6_7_4,	VI5_DATA6),
+
+	PINMUX_IPSR_GPSR(IP6_11_8,	D7),
+	PINMUX_IPSR_MSEL(IP6_11_8,	MSIOF2_TXD_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_GPSR(IP6_11_8,	VI4_DATA23),
+	PINMUX_IPSR_GPSR(IP6_11_8,	VI5_DATA7),
+
+	PINMUX_IPSR_GPSR(IP6_15_12,	D8),
+	PINMUX_IPSR_GPSR(IP6_15_12,	LCDOUT0),
+	PINMUX_IPSR_MSEL(IP6_15_12,	MSIOF2_SCK_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_15_12,	SCK4_C,			SEL_SCIF4_2),
+	PINMUX_IPSR_MSEL(IP6_15_12,	VI4_DATA0_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP6_15_12,	DU_DR0),
+
+	PINMUX_IPSR_GPSR(IP6_19_16,	D9),
+	PINMUX_IPSR_GPSR(IP6_19_16,	LCDOUT1),
+	PINMUX_IPSR_MSEL(IP6_19_16,	MSIOF2_SYNC_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_19_16,	VI4_DATA1_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP6_19_16,	DU_DR1),
+
+	PINMUX_IPSR_GPSR(IP6_23_20,	D10),
+	PINMUX_IPSR_GPSR(IP6_23_20,	LCDOUT2),
+	PINMUX_IPSR_MSEL(IP6_23_20,	MSIOF2_RXD_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_23_20,	HRX3_B,			SEL_HSCIF3_1),
+	PINMUX_IPSR_MSEL(IP6_23_20,	VI4_DATA2_A,		SEL_VIN4_0),
+	PINMUX_IPSR_MSEL(IP6_23_20,	CTS4_N_C,		SEL_SCIF4_2),
+	PINMUX_IPSR_GPSR(IP6_23_20,	DU_DR2),
+
+	PINMUX_IPSR_GPSR(IP6_27_24,	D11),
+	PINMUX_IPSR_GPSR(IP6_27_24,	LCDOUT3),
+	PINMUX_IPSR_MSEL(IP6_27_24,	MSIOF2_TXD_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_27_24,	HTX3_B,			SEL_HSCIF3_1),
+	PINMUX_IPSR_MSEL(IP6_27_24,	VI4_DATA3_A,		SEL_VIN4_0),
+	PINMUX_IPSR_MSEL(IP6_27_24,	RTS4_N_C,		SEL_SCIF4_2),
+	PINMUX_IPSR_GPSR(IP6_27_24,	DU_DR3),
+
+	PINMUX_IPSR_GPSR(IP6_31_28,	D12),
+	PINMUX_IPSR_GPSR(IP6_31_28,	LCDOUT4),
+	PINMUX_IPSR_MSEL(IP6_31_28,	MSIOF2_SS1_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_31_28,	RX4_C,			SEL_SCIF4_2),
+	PINMUX_IPSR_MSEL(IP6_31_28,	VI4_DATA4_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP6_31_28,	DU_DR4),
+
+	/* IPSR7 */
+	PINMUX_IPSR_GPSR(IP7_3_0,	D13),
+	PINMUX_IPSR_GPSR(IP7_3_0,	LCDOUT5),
+	PINMUX_IPSR_MSEL(IP7_3_0,	MSIOF2_SS2_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP7_3_0,	TX4_C,			SEL_SCIF4_2),
+	PINMUX_IPSR_MSEL(IP7_3_0,	VI4_DATA5_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP7_3_0,	DU_DR5),
+
+	PINMUX_IPSR_GPSR(IP7_7_4,	D14),
+	PINMUX_IPSR_GPSR(IP7_7_4,	LCDOUT6),
+	PINMUX_IPSR_MSEL(IP7_7_4,	MSIOF3_SS1_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_MSEL(IP7_7_4,	HRX3_C,			SEL_HSCIF3_2),
+	PINMUX_IPSR_MSEL(IP7_7_4,	VI4_DATA6_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP7_7_4,	DU_DR6),
+	PINMUX_IPSR_MSEL(IP7_7_4,	SCL6_C,			SEL_I2C6_2),
+
+	PINMUX_IPSR_GPSR(IP7_11_8,	D15),
+	PINMUX_IPSR_GPSR(IP7_11_8,	LCDOUT7),
+	PINMUX_IPSR_MSEL(IP7_11_8,	MSIOF3_SS2_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_MSEL(IP7_11_8,	HTX3_C,			SEL_HSCIF3_2),
+	PINMUX_IPSR_MSEL(IP7_11_8,	VI4_DATA7_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP7_11_8,	DU_DR7),
+	PINMUX_IPSR_MSEL(IP7_11_8,	SDA6_C,			SEL_I2C6_2),
+
+	PINMUX_IPSR_GPSR(IP7_15_12,	FSCLKST),
+
+	PINMUX_IPSR_GPSR(IP7_19_16,	SD0_CLK),
+	PINMUX_IPSR_MSEL(IP7_19_16,	MSIOF1_SCK_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP7_19_16,	STP_OPWM_0_B,		SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP7_23_20,	SD0_CMD),
+	PINMUX_IPSR_MSEL(IP7_23_20,	MSIOF1_SYNC_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP7_23_20,	STP_IVCXO27_0_B,	SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP7_27_24,	SD0_DAT0),
+	PINMUX_IPSR_MSEL(IP7_27_24,	MSIOF1_RXD_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP7_27_24,	TS_SCK0_B,		SEL_TSIF0_1),
+	PINMUX_IPSR_MSEL(IP7_27_24,	STP_ISCLK_0_B,		SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP7_31_28,	SD0_DAT1),
+	PINMUX_IPSR_MSEL(IP7_31_28,	MSIOF1_TXD_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP7_31_28,	TS_SPSYNC0_B,		SEL_TSIF0_1),
+	PINMUX_IPSR_MSEL(IP7_31_28,	STP_ISSYNC_0_B,		SEL_SSP1_0_1),
+
+	/* IPSR8 */
+	PINMUX_IPSR_GPSR(IP8_3_0,	SD0_DAT2),
+	PINMUX_IPSR_MSEL(IP8_3_0,	MSIOF1_SS1_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP8_3_0,	TS_SDAT0_B,		SEL_TSIF0_1),
+	PINMUX_IPSR_MSEL(IP8_3_0,	STP_ISD_0_B,		SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP8_7_4,	SD0_DAT3),
+	PINMUX_IPSR_MSEL(IP8_7_4,	MSIOF1_SS2_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP8_7_4,	TS_SDEN0_B,		SEL_TSIF0_1),
+	PINMUX_IPSR_MSEL(IP8_7_4,	STP_ISEN_0_B,		SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP8_11_8,	SD1_CLK),
+	PINMUX_IPSR_MSEL(IP8_11_8,	MSIOF1_SCK_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_MSEL(IP8_11_8,	SIM0_CLK_A,		SEL_SIMCARD_0),
+
+	PINMUX_IPSR_GPSR(IP8_15_12,	SD1_CMD),
+	PINMUX_IPSR_MSEL(IP8_15_12,	MSIOF1_SYNC_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_MSEL(IP8_15_12,	SIM0_D_A,		SEL_SIMCARD_0),
+	PINMUX_IPSR_MSEL(IP8_15_12,	STP_IVCXO27_1_B,	SEL_SSP1_1_1),
+
+	PINMUX_IPSR_GPSR(IP8_19_16,	SD1_DAT0),
+	PINMUX_IPSR_GPSR(IP8_19_16,	SD2_DAT4),
+	PINMUX_IPSR_MSEL(IP8_19_16,	MSIOF1_RXD_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_MSEL(IP8_19_16,	TS_SCK1_B,		SEL_TSIF1_1),
+	PINMUX_IPSR_MSEL(IP8_19_16,	STP_ISCLK_1_B,		SEL_SSP1_1_1),
+
+	PINMUX_IPSR_GPSR(IP8_23_20,	SD1_DAT1),
+	PINMUX_IPSR_GPSR(IP8_23_20,	SD2_DAT5),
+	PINMUX_IPSR_MSEL(IP8_23_20,	MSIOF1_TXD_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_MSEL(IP8_23_20,	TS_SPSYNC1_B,		SEL_TSIF1_1),
+	PINMUX_IPSR_MSEL(IP8_23_20,	STP_ISSYNC_1_B,		SEL_SSP1_1_1),
+
+	PINMUX_IPSR_GPSR(IP8_27_24,	SD1_DAT2),
+	PINMUX_IPSR_GPSR(IP8_27_24,	SD2_DAT6),
+	PINMUX_IPSR_MSEL(IP8_27_24,	MSIOF1_SS1_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_MSEL(IP8_27_24,	TS_SDAT1_B,		SEL_TSIF1_1),
+	PINMUX_IPSR_MSEL(IP8_27_24,	STP_ISD_1_B,		SEL_SSP1_1_1),
+
+	PINMUX_IPSR_GPSR(IP8_31_28,	SD1_DAT3),
+	PINMUX_IPSR_GPSR(IP8_31_28,	SD2_DAT7),
+	PINMUX_IPSR_MSEL(IP8_31_28,	MSIOF1_SS2_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_MSEL(IP8_31_28,	TS_SDEN1_B,		SEL_TSIF1_1),
+	PINMUX_IPSR_MSEL(IP8_31_28,	STP_ISEN_1_B,		SEL_SSP1_1_1),
+
+	/* IPSR9 */
+	PINMUX_IPSR_GPSR(IP9_3_0,	SD2_CLK),
+
+	PINMUX_IPSR_GPSR(IP9_7_4,	SD2_DAT0),
+
+	PINMUX_IPSR_GPSR(IP9_11_8,	SD2_DAT1),
+
+	PINMUX_IPSR_GPSR(IP9_15_12,	SD2_DAT2),
+
+	PINMUX_IPSR_GPSR(IP9_19_16,	SD2_DAT3),
+
+	PINMUX_IPSR_GPSR(IP9_23_20,	SD2_DS),
+	PINMUX_IPSR_MSEL(IP9_23_20,	SATA_DEVSLP_B,		SEL_SATA_1),
+
+	PINMUX_IPSR_GPSR(IP9_27_24,	SD3_DAT4),
+	PINMUX_IPSR_MSEL(IP9_27_24,	SD2_CD_A,		SEL_SDHI2_0),
+
+	PINMUX_IPSR_GPSR(IP9_31_28,	SD3_DAT5),
+	PINMUX_IPSR_MSEL(IP9_31_28,	SD2_WP_A,		SEL_SDHI2_0),
+
+	/* IPSR10 */
+	PINMUX_IPSR_GPSR(IP10_3_0,	SD3_DAT6),
+	PINMUX_IPSR_GPSR(IP10_3_0,	SD3_CD),
+
+	PINMUX_IPSR_GPSR(IP10_7_4,	SD3_DAT7),
+	PINMUX_IPSR_GPSR(IP10_7_4,	SD3_WP),
+
+	PINMUX_IPSR_GPSR(IP10_11_8,	SD0_CD),
+	PINMUX_IPSR_MSEL(IP10_11_8,	SCL2_B,			SEL_I2C2_1),
+	PINMUX_IPSR_MSEL(IP10_11_8,	SIM0_RST_A,		SEL_SIMCARD_0),
+
+	PINMUX_IPSR_GPSR(IP10_15_12,	SD0_WP),
+	PINMUX_IPSR_MSEL(IP10_15_12,	SDA2_B,			SEL_I2C2_1),
+
+	PINMUX_IPSR_MSEL(IP10_19_16,	SD1_CD,			I2C_SEL_0_0),
+	PINMUX_IPSR_PHYS_MSEL(IP10_19_16, SIM0_CLK_B,		I2C_SEL_0_0,	SEL_SIMCARD_1),
+	PINMUX_IPSR_PHYS(IP10_19_16,	SCL0,			I2C_SEL_0_1),
+
+	PINMUX_IPSR_MSEL(IP10_23_20,	SD1_WP,			I2C_SEL_0_0),
+	PINMUX_IPSR_PHYS_MSEL(IP10_23_20, SIM0_D_B,		I2C_SEL_0_0,	SEL_SIMCARD_1),
+	PINMUX_IPSR_PHYS(IP10_23_20,	SDA0,			I2C_SEL_0_1),
+
+	PINMUX_IPSR_GPSR(IP10_27_24,	SCK0),
+	PINMUX_IPSR_MSEL(IP10_27_24,	HSCK1_B,		SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP10_27_24,	MSIOF1_SS2_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP10_27_24,	AUDIO_CLKC_B,		SEL_ADG_1),
+	PINMUX_IPSR_MSEL(IP10_27_24,	SDA2_A,			SEL_I2C2_0),
+	PINMUX_IPSR_MSEL(IP10_27_24,	SIM0_RST_B,		SEL_SIMCARD_1),
+	PINMUX_IPSR_MSEL(IP10_27_24,	STP_OPWM_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP10_27_24,	RIF0_CLK_B,		SEL_DRIF0_1),
+	PINMUX_IPSR_GPSR(IP10_27_24,	ADICHS2),
+
+	PINMUX_IPSR_GPSR(IP10_31_28,	RX0),
+	PINMUX_IPSR_MSEL(IP10_31_28,	HRX1_B,			SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP10_31_28,	TS_SCK0_C,		SEL_TSIF0_2),
+	PINMUX_IPSR_MSEL(IP10_31_28,	STP_ISCLK_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP10_31_28,	RIF0_D0_B,		SEL_DRIF0_1),
+
+	/* IPSR11 */
+	PINMUX_IPSR_GPSR(IP11_3_0,	TX0),
+	PINMUX_IPSR_MSEL(IP11_3_0,	HTX1_B,			SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP11_3_0,	TS_SPSYNC0_C,		SEL_TSIF0_2),
+	PINMUX_IPSR_MSEL(IP11_3_0,	STP_ISSYNC_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP11_3_0,	RIF0_D1_B,		SEL_DRIF0_1),
+
+	PINMUX_IPSR_GPSR(IP11_7_4,	CTS0_N),
+	PINMUX_IPSR_MSEL(IP11_7_4,	HCTS1_N_B,		SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP11_7_4,	MSIOF1_SYNC_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP11_7_4,	TS_SPSYNC1_C,		SEL_TSIF1_2),
+	PINMUX_IPSR_MSEL(IP11_7_4,	STP_ISSYNC_1_C,		SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP11_7_4,	RIF1_SYNC_B,		SEL_DRIF1_1),
+	PINMUX_IPSR_MSEL(IP11_7_4,	AUDIO_CLKOUT_C,		SEL_ADG_2),
+	PINMUX_IPSR_GPSR(IP11_7_4,	ADICS_SAMP),
+
+	PINMUX_IPSR_GPSR(IP11_11_8,	RTS0_N),
+	PINMUX_IPSR_MSEL(IP11_11_8,	HRTS1_N_B,		SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP11_11_8,	MSIOF1_SS1_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP11_11_8,	AUDIO_CLKA_B,		SEL_ADG_1),
+	PINMUX_IPSR_MSEL(IP11_11_8,	SCL2_A,			SEL_I2C2_0),
+	PINMUX_IPSR_MSEL(IP11_11_8,	STP_IVCXO27_1_C,	SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP11_11_8,	RIF0_SYNC_B,		SEL_DRIF0_1),
+	PINMUX_IPSR_GPSR(IP11_11_8,	ADICHS1),
+
+	PINMUX_IPSR_MSEL(IP11_15_12,	RX1_A,			SEL_SCIF1_0),
+	PINMUX_IPSR_MSEL(IP11_15_12,	HRX1_A,			SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP11_15_12,	TS_SDAT0_C,		SEL_TSIF0_2),
+	PINMUX_IPSR_MSEL(IP11_15_12,	STP_ISD_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP11_15_12,	RIF1_CLK_C,		SEL_DRIF1_2),
+
+	PINMUX_IPSR_MSEL(IP11_19_16,	TX1_A,			SEL_SCIF1_0),
+	PINMUX_IPSR_MSEL(IP11_19_16,	HTX1_A,			SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP11_19_16,	TS_SDEN0_C,		SEL_TSIF0_2),
+	PINMUX_IPSR_MSEL(IP11_19_16,	STP_ISEN_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP11_19_16,	RIF1_D0_C,		SEL_DRIF1_2),
+
+	PINMUX_IPSR_GPSR(IP11_23_20,	CTS1_N),
+	PINMUX_IPSR_MSEL(IP11_23_20,	HCTS1_N_A,		SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP11_23_20,	MSIOF1_RXD_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP11_23_20,	TS_SDEN1_C,		SEL_TSIF1_2),
+	PINMUX_IPSR_MSEL(IP11_23_20,	STP_ISEN_1_C,		SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP11_23_20,	RIF1_D0_B,		SEL_DRIF1_1),
+	PINMUX_IPSR_GPSR(IP11_23_20,	ADIDATA),
+
+	PINMUX_IPSR_GPSR(IP11_27_24,	RTS1_N),
+	PINMUX_IPSR_MSEL(IP11_27_24,	HRTS1_N_A,		SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP11_27_24,	MSIOF1_TXD_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP11_27_24,	TS_SDAT1_C,		SEL_TSIF1_2),
+	PINMUX_IPSR_MSEL(IP11_27_24,	STP_ISD_1_C,		SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP11_27_24,	RIF1_D1_B,		SEL_DRIF1_1),
+	PINMUX_IPSR_GPSR(IP11_27_24,	ADICHS0),
+
+	PINMUX_IPSR_GPSR(IP11_31_28,	SCK2),
+	PINMUX_IPSR_MSEL(IP11_31_28,	SCIF_CLK_B,		SEL_SCIF1_1),
+	PINMUX_IPSR_MSEL(IP11_31_28,	MSIOF1_SCK_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP11_31_28,	TS_SCK1_C,		SEL_TSIF1_2),
+	PINMUX_IPSR_MSEL(IP11_31_28,	STP_ISCLK_1_C,		SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP11_31_28,	RIF1_CLK_B,		SEL_DRIF1_1),
+	PINMUX_IPSR_GPSR(IP11_31_28,	ADICLK),
+
+	/* IPSR12 */
+	PINMUX_IPSR_MSEL(IP12_3_0,	TX2_A,			SEL_SCIF2_0),
+	PINMUX_IPSR_MSEL(IP12_3_0,	SD2_CD_B,		SEL_SDHI2_1),
+	PINMUX_IPSR_MSEL(IP12_3_0,	SCL1_A,			SEL_I2C1_0),
+	PINMUX_IPSR_MSEL(IP12_3_0,	FMCLK_A,		SEL_FM_0),
+	PINMUX_IPSR_MSEL(IP12_3_0,	RIF1_D1_C,		SEL_DRIF1_2),
+	PINMUX_IPSR_MSEL(IP12_3_0,	FSO_CFE_0_B,		SEL_FSO_1),
+
+	PINMUX_IPSR_MSEL(IP12_7_4,	RX2_A,			SEL_SCIF2_0),
+	PINMUX_IPSR_MSEL(IP12_7_4,	SD2_WP_B,		SEL_SDHI2_1),
+	PINMUX_IPSR_MSEL(IP12_7_4,	SDA1_A,			SEL_I2C1_0),
+	PINMUX_IPSR_MSEL(IP12_7_4,	FMIN_A,			SEL_FM_0),
+	PINMUX_IPSR_MSEL(IP12_7_4,	RIF1_SYNC_C,		SEL_DRIF1_2),
+	PINMUX_IPSR_MSEL(IP12_7_4,	FSO_CFE_1_B,		SEL_FSO_1),
+
+	PINMUX_IPSR_GPSR(IP12_11_8,	HSCK0),
+	PINMUX_IPSR_MSEL(IP12_11_8,	MSIOF1_SCK_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP12_11_8,	AUDIO_CLKB_A,		SEL_ADG_0),
+	PINMUX_IPSR_MSEL(IP12_11_8,	SSI_SDATA1_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP12_11_8,	TS_SCK0_D,		SEL_TSIF0_3),
+	PINMUX_IPSR_MSEL(IP12_11_8,	STP_ISCLK_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP12_11_8,	RIF0_CLK_C,		SEL_DRIF0_2),
+
+	PINMUX_IPSR_GPSR(IP12_15_12,	HRX0),
+	PINMUX_IPSR_MSEL(IP12_15_12,	MSIOF1_RXD_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP12_15_12,	SSI_SDATA2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP12_15_12,	TS_SDEN0_D,		SEL_TSIF0_3),
+	PINMUX_IPSR_MSEL(IP12_15_12,	STP_ISEN_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP12_15_12,	RIF0_D0_C,		SEL_DRIF0_2),
+
+	PINMUX_IPSR_GPSR(IP12_19_16,	HTX0),
+	PINMUX_IPSR_MSEL(IP12_19_16,	MSIOF1_TXD_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP12_19_16,	SSI_SDATA9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP12_19_16,	TS_SDAT0_D,		SEL_TSIF0_3),
+	PINMUX_IPSR_MSEL(IP12_19_16,	STP_ISD_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP12_19_16,	RIF0_D1_C,		SEL_DRIF0_2),
+
+	PINMUX_IPSR_GPSR(IP12_23_20,	HCTS0_N),
+	PINMUX_IPSR_MSEL(IP12_23_20,	RX2_B,			SEL_SCIF2_1),
+	PINMUX_IPSR_MSEL(IP12_23_20,	MSIOF1_SYNC_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP12_23_20,	SSI_SCK9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP12_23_20,	TS_SPSYNC0_D,		SEL_TSIF0_3),
+	PINMUX_IPSR_MSEL(IP12_23_20,	STP_ISSYNC_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP12_23_20,	RIF0_SYNC_C,		SEL_DRIF0_2),
+	PINMUX_IPSR_MSEL(IP12_23_20,	AUDIO_CLKOUT1_A,	SEL_ADG_0),
+
+	PINMUX_IPSR_GPSR(IP12_27_24,	HRTS0_N),
+	PINMUX_IPSR_MSEL(IP12_27_24,	TX2_B,			SEL_SCIF2_1),
+	PINMUX_IPSR_MSEL(IP12_27_24,	MSIOF1_SS1_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP12_27_24,	SSI_WS9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP12_27_24,	STP_IVCXO27_0_D,	SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP12_27_24,	BPFCLK_A,		SEL_FM_0),
+	PINMUX_IPSR_MSEL(IP12_27_24,	AUDIO_CLKOUT2_A,	SEL_ADG_0),
+
+	PINMUX_IPSR_GPSR(IP12_31_28,	MSIOF0_SYNC),
+	PINMUX_IPSR_MSEL(IP12_31_28,	AUDIO_CLKOUT_A,		SEL_ADG_0),
+
+	/* IPSR13 */
+	PINMUX_IPSR_GPSR(IP13_3_0,	MSIOF0_SS1),
+	PINMUX_IPSR_GPSR(IP13_3_0,	RX5),
+	PINMUX_IPSR_MSEL(IP13_3_0,	AUDIO_CLKA_C,		SEL_ADG_2),
+	PINMUX_IPSR_MSEL(IP13_3_0,	SSI_SCK2_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP13_3_0,	STP_IVCXO27_0_C,	SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP13_3_0,	AUDIO_CLKOUT3_A,	SEL_ADG_0),
+	PINMUX_IPSR_MSEL(IP13_3_0,	TCLK1_B,		SEL_TIMER_TMU_1),
+
+	PINMUX_IPSR_GPSR(IP13_7_4,	MSIOF0_SS2),
+	PINMUX_IPSR_GPSR(IP13_7_4,	TX5),
+	PINMUX_IPSR_MSEL(IP13_7_4,	MSIOF1_SS2_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP13_7_4,	AUDIO_CLKC_A,		SEL_ADG_0),
+	PINMUX_IPSR_MSEL(IP13_7_4,	SSI_WS2_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP13_7_4,	STP_OPWM_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP13_7_4,	AUDIO_CLKOUT_D,		SEL_ADG_3),
+	PINMUX_IPSR_MSEL(IP13_7_4,	SPEEDIN_B,		SEL_SPEED_PULSE_1),
+
+	PINMUX_IPSR_GPSR(IP13_11_8,	MLB_CLK),
+	PINMUX_IPSR_MSEL(IP13_11_8,	MSIOF1_SCK_F,		SEL_MSIOF1_5),
+	PINMUX_IPSR_MSEL(IP13_11_8,	SCL1_B,			SEL_I2C1_1),
+
+	PINMUX_IPSR_GPSR(IP13_15_12,	MLB_SIG),
+	PINMUX_IPSR_MSEL(IP13_15_12,	RX1_B,			SEL_SCIF1_1),
+	PINMUX_IPSR_MSEL(IP13_15_12,	MSIOF1_SYNC_F,		SEL_MSIOF1_5),
+	PINMUX_IPSR_MSEL(IP13_15_12,	SDA1_B,			SEL_I2C1_1),
+
+	PINMUX_IPSR_GPSR(IP13_19_16,	MLB_DAT),
+	PINMUX_IPSR_MSEL(IP13_19_16,	TX1_B,			SEL_SCIF1_1),
+	PINMUX_IPSR_MSEL(IP13_19_16,	MSIOF1_RXD_F,		SEL_MSIOF1_5),
+
+	PINMUX_IPSR_GPSR(IP13_23_20,	SSI_SCK01239),
+	PINMUX_IPSR_MSEL(IP13_23_20,	MSIOF1_TXD_F,		SEL_MSIOF1_5),
+
+	PINMUX_IPSR_GPSR(IP13_27_24,	SSI_WS01239),
+	PINMUX_IPSR_MSEL(IP13_27_24,	MSIOF1_SS1_F,		SEL_MSIOF1_5),
+
+	PINMUX_IPSR_GPSR(IP13_31_28,	SSI_SDATA0),
+	PINMUX_IPSR_MSEL(IP13_31_28,	MSIOF1_SS2_F,		SEL_MSIOF1_5),
+
+	/* IPSR14 */
+	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SDATA1_A,		SEL_SSI_0),
+
+	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_SDATA2_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_SCK1_B,		SEL_SSI_1),
+
+	PINMUX_IPSR_GPSR(IP14_11_8,	SSI_SCK349),
+	PINMUX_IPSR_MSEL(IP14_11_8,	MSIOF1_SS1_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP14_11_8,	STP_OPWM_0_A,		SEL_SSP1_0_0),
+
+	PINMUX_IPSR_GPSR(IP14_15_12,	SSI_WS349),
+	PINMUX_IPSR_MSEL(IP14_15_12,	HCTS2_N_A,		SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP14_15_12,	MSIOF1_SS2_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP14_15_12,	STP_IVCXO27_0_A,	SEL_SSP1_0_0),
+
+	PINMUX_IPSR_GPSR(IP14_19_16,	SSI_SDATA3),
+	PINMUX_IPSR_MSEL(IP14_19_16,	HRTS2_N_A,		SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP14_19_16,	MSIOF1_TXD_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP14_19_16,	TS_SCK0_A,		SEL_TSIF0_0),
+	PINMUX_IPSR_MSEL(IP14_19_16,	STP_ISCLK_0_A,		SEL_SSP1_0_0),
+	PINMUX_IPSR_MSEL(IP14_19_16,	RIF0_D1_A,		SEL_DRIF0_0),
+	PINMUX_IPSR_MSEL(IP14_19_16,	RIF2_D0_A,		SEL_DRIF2_0),
+
+	PINMUX_IPSR_GPSR(IP14_23_20,	SSI_SCK4),
+	PINMUX_IPSR_MSEL(IP14_23_20,	HRX2_A,			SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP14_23_20,	MSIOF1_SCK_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP14_23_20,	TS_SDAT0_A,		SEL_TSIF0_0),
+	PINMUX_IPSR_MSEL(IP14_23_20,	STP_ISD_0_A,		SEL_SSP1_0_0),
+	PINMUX_IPSR_MSEL(IP14_23_20,	RIF0_CLK_A,		SEL_DRIF0_0),
+	PINMUX_IPSR_MSEL(IP14_23_20,	RIF2_CLK_A,		SEL_DRIF2_0),
+
+	PINMUX_IPSR_GPSR(IP14_27_24,	SSI_WS4),
+	PINMUX_IPSR_MSEL(IP14_27_24,	HTX2_A,			SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP14_27_24,	MSIOF1_SYNC_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP14_27_24,	TS_SDEN0_A,		SEL_TSIF0_0),
+	PINMUX_IPSR_MSEL(IP14_27_24,	STP_ISEN_0_A,		SEL_SSP1_0_0),
+	PINMUX_IPSR_MSEL(IP14_27_24,	RIF0_SYNC_A,		SEL_DRIF0_0),
+	PINMUX_IPSR_MSEL(IP14_27_24,	RIF2_SYNC_A,		SEL_DRIF2_0),
+
+	PINMUX_IPSR_GPSR(IP14_31_28,	SSI_SDATA4),
+	PINMUX_IPSR_MSEL(IP14_31_28,	HSCK2_A,		SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP14_31_28,	MSIOF1_RXD_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP14_31_28,	TS_SPSYNC0_A,		SEL_TSIF0_0),
+	PINMUX_IPSR_MSEL(IP14_31_28,	STP_ISSYNC_0_A,		SEL_SSP1_0_0),
+	PINMUX_IPSR_MSEL(IP14_31_28,	RIF0_D0_A,		SEL_DRIF0_0),
+	PINMUX_IPSR_MSEL(IP14_31_28,	RIF2_D1_A,		SEL_DRIF2_0),
+
+	/* IPSR15 */
+	PINMUX_IPSR_GPSR(IP15_3_0,	SSI_SCK6),
+	PINMUX_IPSR_GPSR(IP15_3_0,	USB2_PWEN),
+	PINMUX_IPSR_MSEL(IP15_3_0,	SIM0_RST_D,		SEL_SIMCARD_3),
+
+	PINMUX_IPSR_GPSR(IP15_7_4,	SSI_WS6),
+	PINMUX_IPSR_GPSR(IP15_7_4,	USB2_OVC),
+	PINMUX_IPSR_MSEL(IP15_7_4,	SIM0_D_D,		SEL_SIMCARD_3),
+
+	PINMUX_IPSR_GPSR(IP15_11_8,	SSI_SDATA6),
+	PINMUX_IPSR_MSEL(IP15_11_8,	SIM0_CLK_D,		SEL_SIMCARD_3),
+	PINMUX_IPSR_MSEL(IP15_11_8,	SATA_DEVSLP_A,		SEL_SATA_0),
+
+	PINMUX_IPSR_GPSR(IP15_15_12,	SSI_SCK78),
+	PINMUX_IPSR_MSEL(IP15_15_12,	HRX2_B,			SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP15_15_12,	MSIOF1_SCK_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP15_15_12,	TS_SCK1_A,		SEL_TSIF1_0),
+	PINMUX_IPSR_MSEL(IP15_15_12,	STP_ISCLK_1_A,		SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP15_15_12,	RIF1_CLK_A,		SEL_DRIF1_0),
+	PINMUX_IPSR_MSEL(IP15_15_12,	RIF3_CLK_A,		SEL_DRIF3_0),
+
+	PINMUX_IPSR_GPSR(IP15_19_16,	SSI_WS78),
+	PINMUX_IPSR_MSEL(IP15_19_16,	HTX2_B,			SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP15_19_16,	MSIOF1_SYNC_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP15_19_16,	TS_SDAT1_A,		SEL_TSIF1_0),
+	PINMUX_IPSR_MSEL(IP15_19_16,	STP_ISD_1_A,		SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP15_19_16,	RIF1_SYNC_A,		SEL_DRIF1_0),
+	PINMUX_IPSR_MSEL(IP15_19_16,	RIF3_SYNC_A,		SEL_DRIF3_0),
+
+	PINMUX_IPSR_GPSR(IP15_23_20,	SSI_SDATA7),
+	PINMUX_IPSR_MSEL(IP15_23_20,	HCTS2_N_B,		SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP15_23_20,	MSIOF1_RXD_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP15_23_20,	TS_SDEN1_A,		SEL_TSIF1_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	STP_ISEN_1_A,		SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	RIF1_D0_A,		SEL_DRIF1_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	RIF3_D0_A,		SEL_DRIF3_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	TCLK2_A,		SEL_TIMER_TMU_0),
+
+	PINMUX_IPSR_GPSR(IP15_27_24,	SSI_SDATA8),
+	PINMUX_IPSR_MSEL(IP15_27_24,	HRTS2_N_B,		SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP15_27_24,	MSIOF1_TXD_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP15_27_24,	TS_SPSYNC1_A,		SEL_TSIF1_0),
+	PINMUX_IPSR_MSEL(IP15_27_24,	STP_ISSYNC_1_A,		SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP15_27_24,	RIF1_D1_A,		SEL_DRIF1_0),
+	PINMUX_IPSR_MSEL(IP15_27_24,	RIF3_D1_A,		SEL_DRIF3_0),
+
+	PINMUX_IPSR_MSEL(IP15_31_28,	SSI_SDATA9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP15_31_28,	HSCK2_B,		SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP15_31_28,	MSIOF1_SS1_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP15_31_28,	HSCK1_A,		SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP15_31_28,	SSI_WS1_B,		SEL_SSI_1),
+	PINMUX_IPSR_GPSR(IP15_31_28,	SCK1),
+	PINMUX_IPSR_MSEL(IP15_31_28,	STP_IVCXO27_1_A,	SEL_SSP1_1_0),
+	PINMUX_IPSR_GPSR(IP15_31_28,	SCK5),
+
+	/* IPSR16 */
+	PINMUX_IPSR_MSEL(IP16_3_0,	AUDIO_CLKA_A,		SEL_ADG_0),
+
+	PINMUX_IPSR_MSEL(IP16_7_4,	AUDIO_CLKB_B,		SEL_ADG_1),
+	PINMUX_IPSR_MSEL(IP16_7_4,	SCIF_CLK_A,		SEL_SCIF1_0),
+	PINMUX_IPSR_MSEL(IP16_7_4,	STP_IVCXO27_1_D,	SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP16_7_4,	REMOCON_A,		SEL_REMOCON_0),
+	PINMUX_IPSR_MSEL(IP16_7_4,	TCLK1_A,		SEL_TIMER_TMU_0),
+
+	PINMUX_IPSR_GPSR(IP16_11_8,	USB0_PWEN),
+	PINMUX_IPSR_MSEL(IP16_11_8,	SIM0_RST_C,		SEL_SIMCARD_2),
+	PINMUX_IPSR_MSEL(IP16_11_8,	TS_SCK1_D,		SEL_TSIF1_3),
+	PINMUX_IPSR_MSEL(IP16_11_8,	STP_ISCLK_1_D,		SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP16_11_8,	BPFCLK_B,		SEL_FM_1),
+	PINMUX_IPSR_MSEL(IP16_11_8,	RIF3_CLK_B,		SEL_DRIF3_1),
+
+	PINMUX_IPSR_GPSR(IP16_15_12,	USB0_OVC),
+	PINMUX_IPSR_MSEL(IP16_11_8,	SIM0_D_C,		SEL_SIMCARD_2),
+	PINMUX_IPSR_MSEL(IP16_11_8,	TS_SDAT1_D,		SEL_TSIF1_3),
+	PINMUX_IPSR_MSEL(IP16_11_8,	STP_ISD_1_D,		SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP16_11_8,	RIF3_SYNC_B,		SEL_DRIF3_1),
+
+	PINMUX_IPSR_GPSR(IP16_19_16,	USB1_PWEN),
+	PINMUX_IPSR_MSEL(IP16_19_16,	SIM0_CLK_C,		SEL_SIMCARD_2),
+	PINMUX_IPSR_MSEL(IP16_19_16,	SSI_SCK1_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP16_19_16,	TS_SCK0_E,		SEL_TSIF0_4),
+	PINMUX_IPSR_MSEL(IP16_19_16,	STP_ISCLK_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP16_19_16,	FMCLK_B,		SEL_FM_1),
+	PINMUX_IPSR_MSEL(IP16_19_16,	RIF2_CLK_B,		SEL_DRIF2_1),
+	PINMUX_IPSR_MSEL(IP16_19_16,	SPEEDIN_A,		SEL_SPEED_PULSE_0),
+
+	PINMUX_IPSR_GPSR(IP16_23_20,	USB1_OVC),
+	PINMUX_IPSR_MSEL(IP16_23_20,	MSIOF1_SS2_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP16_23_20,	SSI_WS1_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP16_23_20,	TS_SDAT0_E,		SEL_TSIF0_4),
+	PINMUX_IPSR_MSEL(IP16_23_20,	STP_ISD_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP16_23_20,	FMIN_B,			SEL_FM_1),
+	PINMUX_IPSR_MSEL(IP16_23_20,	RIF2_SYNC_B,		SEL_DRIF2_1),
+	PINMUX_IPSR_MSEL(IP16_23_20,	REMOCON_B,		SEL_REMOCON_1),
+
+	PINMUX_IPSR_GPSR(IP16_27_24,	USB30_PWEN),
+	PINMUX_IPSR_MSEL(IP16_27_24,	AUDIO_CLKOUT_B,		SEL_ADG_1),
+	PINMUX_IPSR_MSEL(IP16_27_24,	SSI_SCK2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP16_27_24,	TS_SDEN1_D,		SEL_TSIF1_3),
+	PINMUX_IPSR_MSEL(IP16_27_24,	STP_ISEN_1_D,		SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP16_27_24,	STP_OPWM_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP16_27_24,	RIF3_D0_B,		SEL_DRIF3_1),
+	PINMUX_IPSR_MSEL(IP16_27_24,	TCLK2_B,		SEL_TIMER_TMU_1),
+	PINMUX_IPSR_GPSR(IP16_27_24,	TPU0TO0),
+
+	PINMUX_IPSR_GPSR(IP16_31_28,	USB30_OVC),
+	PINMUX_IPSR_MSEL(IP16_31_28,	AUDIO_CLKOUT1_B,	SEL_ADG_1),
+	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP16_31_28,	TS_SPSYNC1_D,		SEL_TSIF1_3),
+	PINMUX_IPSR_MSEL(IP16_31_28,	STP_ISSYNC_1_D,		SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP16_31_28,	STP_IVCXO27_0_E,	SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP16_31_28,	RIF3_D1_B,		SEL_DRIF3_1),
+	PINMUX_IPSR_MSEL(IP16_31_28,	FSO_TOE_B,		SEL_FSO_1),
+	PINMUX_IPSR_GPSR(IP16_31_28,	TPU0TO1),
+
+	/* IPSR17 */
+	PINMUX_IPSR_GPSR(IP17_3_0,	USB31_PWEN),
+	PINMUX_IPSR_MSEL(IP17_3_0,	AUDIO_CLKOUT2_B,	SEL_ADG_1),
+	PINMUX_IPSR_MSEL(IP17_3_0,	SSI_SCK9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP17_3_0,	TS_SDEN0_E,		SEL_TSIF0_4),
+	PINMUX_IPSR_MSEL(IP17_3_0,	STP_ISEN_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP17_3_0,	RIF2_D0_B,		SEL_DRIF2_1),
+	PINMUX_IPSR_GPSR(IP17_3_0,	TPU0TO2),
+
+	PINMUX_IPSR_GPSR(IP17_7_4,	USB31_OVC),
+	PINMUX_IPSR_MSEL(IP17_7_4,	AUDIO_CLKOUT3_B,	SEL_ADG_1),
+	PINMUX_IPSR_MSEL(IP17_7_4,	SSI_WS9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP17_7_4,	TS_SPSYNC0_E,		SEL_TSIF0_4),
+	PINMUX_IPSR_MSEL(IP17_7_4,	STP_ISSYNC_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP17_7_4,	RIF2_D1_B,		SEL_DRIF2_1),
+	PINMUX_IPSR_GPSR(IP17_7_4,	TPU0TO3),
+
+/*
+ * Static pins can not be muxed between different functions but
+ * still need mark entries in the pinmux list. Add each static
+ * pin to the list without an associated function. The sh-pfc
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
+ */
+#define FM(x)	PINMUX_DATA(x##_MARK, 0),
+	PINMUX_STATIC
+#undef FM
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+	GP_ASSIGN_LAST(),
+	NOGP_ALL(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+	PINMUX_GPIO_GP_ALL(),
+	PINMUX_NOGP_ALL(),
+};
+
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_a_pins[] = {
+	/* CLK A */
+	RCAR_GP_PIN(6, 22),
+};
+static const unsigned int audio_clk_a_a_mux[] = {
+	AUDIO_CLKA_A_MARK,
+};
+static const unsigned int audio_clk_a_b_pins[] = {
+	/* CLK A */
+	RCAR_GP_PIN(5, 4),
+};
+static const unsigned int audio_clk_a_b_mux[] = {
+	AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clk_a_c_pins[] = {
+	/* CLK A */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clk_a_c_mux[] = {
+	AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clk_b_a_pins[] = {
+	/* CLK B */
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int audio_clk_b_a_mux[] = {
+	AUDIO_CLKB_A_MARK,
+};
+static const unsigned int audio_clk_b_b_pins[] = {
+	/* CLK B */
+	RCAR_GP_PIN(6, 23),
+};
+static const unsigned int audio_clk_b_b_mux[] = {
+	AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clk_c_a_pins[] = {
+	/* CLK C */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clk_c_a_mux[] = {
+	AUDIO_CLKC_A_MARK,
+};
+static const unsigned int audio_clk_c_b_pins[] = {
+	/* CLK C */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clk_c_b_mux[] = {
+	AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkout_a_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(5, 18),
+};
+static const unsigned int audio_clkout_a_mux[] = {
+	AUDIO_CLKOUT_A_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(6, 28),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+	AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(5, 3),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+	AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+	AUDIO_CLKOUT_D_MARK,
+};
+static const unsigned int audio_clkout1_a_pins[] = {
+	/* CLKOUT1 */
+	RCAR_GP_PIN(5, 15),
+};
+static const unsigned int audio_clkout1_a_mux[] = {
+	AUDIO_CLKOUT1_A_MARK,
+};
+static const unsigned int audio_clkout1_b_pins[] = {
+	/* CLKOUT1 */
+	RCAR_GP_PIN(6, 29),
+};
+static const unsigned int audio_clkout1_b_mux[] = {
+	AUDIO_CLKOUT1_B_MARK,
+};
+static const unsigned int audio_clkout2_a_pins[] = {
+	/* CLKOUT2 */
+	RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout2_a_mux[] = {
+	AUDIO_CLKOUT2_A_MARK,
+};
+static const unsigned int audio_clkout2_b_pins[] = {
+	/* CLKOUT2 */
+	RCAR_GP_PIN(6, 30),
+};
+static const unsigned int audio_clkout2_b_mux[] = {
+	AUDIO_CLKOUT2_B_MARK,
+};
+
+static const unsigned int audio_clkout3_a_pins[] = {
+	/* CLKOUT3 */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clkout3_a_mux[] = {
+	AUDIO_CLKOUT3_A_MARK,
+};
+static const unsigned int audio_clkout3_b_pins[] = {
+	/* CLKOUT3 */
+	RCAR_GP_PIN(6, 31),
+};
+static const unsigned int audio_clkout3_b_mux[] = {
+	AUDIO_CLKOUT3_B_MARK,
+};
+
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+	/* AVB_LINK */
+	RCAR_GP_PIN(2, 12),
+};
+static const unsigned int avb_link_mux[] = {
+	AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+	/* AVB_MAGIC_ */
+	RCAR_GP_PIN(2, 10),
+};
+static const unsigned int avb_magic_mux[] = {
+	AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+	/* AVB_PHY_INT */
+	RCAR_GP_PIN(2, 11),
+};
+static const unsigned int avb_phy_int_mux[] = {
+	AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdio_pins[] = {
+	/* AVB_MDC, AVB_MDIO */
+	RCAR_GP_PIN(2, 9), PIN_AVB_MDIO,
+};
+static const unsigned int avb_mdio_mux[] = {
+	AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+	/*
+	 * AVB_TX_CTL, AVB_TXC, AVB_TD0,
+	 * AVB_TD1, AVB_TD2, AVB_TD3,
+	 * AVB_RX_CTL, AVB_RXC, AVB_RD0,
+	 * AVB_RD1, AVB_RD2, AVB_RD3,
+	 * AVB_TXCREFCLK
+	 */
+	PIN_AVB_TX_CTL, PIN_AVB_TXC, PIN_AVB_TD0,
+	PIN_AVB_TD1, PIN_AVB_TD2, PIN_AVB_TD3,
+	PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
+	PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
+	PIN_AVB_TXCREFCLK,
+
+};
+static const unsigned int avb_mii_mux[] = {
+	AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
+	AVB_TD1_MARK, AVB_TD2_MARK, AVB_TD3_MARK,
+	AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
+	AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
+	AVB_TXCREFCLK_MARK,
+};
+static const unsigned int avb_avtp_pps_pins[] = {
+	/* AVB_AVTP_PPS */
+	RCAR_GP_PIN(2, 6),
+};
+static const unsigned int avb_avtp_pps_mux[] = {
+	AVB_AVTP_PPS_MARK,
+};
+static const unsigned int avb_avtp_match_a_pins[] = {
+	/* AVB_AVTP_MATCH_A */
+	RCAR_GP_PIN(2, 13),
+};
+static const unsigned int avb_avtp_match_a_mux[] = {
+	AVB_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb_avtp_capture_a_pins[] = {
+	/* AVB_AVTP_CAPTURE_A */
+	RCAR_GP_PIN(2, 14),
+};
+static const unsigned int avb_avtp_capture_a_mux[] = {
+	AVB_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb_avtp_match_b_pins[] = {
+	/*  AVB_AVTP_MATCH_B */
+	RCAR_GP_PIN(1, 8),
+};
+static const unsigned int avb_avtp_match_b_mux[] = {
+	AVB_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb_avtp_capture_b_pins[] = {
+	/* AVB_AVTP_CAPTURE_B */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int avb_avtp_capture_b_mux[] = {
+	AVB_AVTP_CAPTURE_B_MARK,
+};
+
+/* - CAN ------------------------------------------------------------------ */
+static const unsigned int can0_data_a_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 23),	RCAR_GP_PIN(1, 24),
+};
+static const unsigned int can0_data_a_mux[] = {
+	CAN0_TX_A_MARK,		CAN0_RX_A_MARK,
+};
+static const unsigned int can0_data_b_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(2, 0),	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int can0_data_b_mux[] = {
+	CAN0_TX_B_MARK,		CAN0_RX_B_MARK,
+};
+static const unsigned int can1_data_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 22),	RCAR_GP_PIN(1, 26),
+};
+static const unsigned int can1_data_mux[] = {
+	CAN1_TX_MARK,		CAN1_RX_MARK,
+};
+
+/* - CAN Clock -------------------------------------------------------------- */
+static const unsigned int can_clk_pins[] = {
+	/* CLK */
+	RCAR_GP_PIN(1, 25),
+};
+static const unsigned int can_clk_mux[] = {
+	CAN_CLK_MARK,
+};
+
+/* - CAN FD --------------------------------------------------------------- */
+static const unsigned int canfd0_data_a_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 23),     RCAR_GP_PIN(1, 24),
+};
+static const unsigned int canfd0_data_a_mux[] = {
+	CANFD0_TX_A_MARK,       CANFD0_RX_A_MARK,
+};
+static const unsigned int canfd0_data_b_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(2, 0),      RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd0_data_b_mux[] = {
+	CANFD0_TX_B_MARK,       CANFD0_RX_B_MARK,
+};
+static const unsigned int canfd1_data_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 22),     RCAR_GP_PIN(1, 26),
+};
+static const unsigned int canfd1_data_mux[] = {
+	CANFD1_TX_MARK,         CANFD1_RX_MARK,
+};
+
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif0_ctrl_a_mux[] = {
+	RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+static const unsigned int drif0_data0_a_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif0_data0_a_mux[] = {
+	RIF0_D0_A_MARK,
+};
+static const unsigned int drif0_data1_a_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif0_data1_a_mux[] = {
+	RIF0_D1_A_MARK,
+};
+static const unsigned int drif0_ctrl_b_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int drif0_ctrl_b_mux[] = {
+	RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+static const unsigned int drif0_data0_b_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(5, 1),
+};
+static const unsigned int drif0_data0_b_mux[] = {
+	RIF0_D0_B_MARK,
+};
+static const unsigned int drif0_data1_b_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(5, 2),
+};
+static const unsigned int drif0_data1_b_mux[] = {
+	RIF0_D1_B_MARK,
+};
+static const unsigned int drif0_ctrl_c_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int drif0_ctrl_c_mux[] = {
+	RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
+};
+static const unsigned int drif0_data0_c_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(5, 13),
+};
+static const unsigned int drif0_data0_c_mux[] = {
+	RIF0_D0_C_MARK,
+};
+static const unsigned int drif0_data1_c_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(5, 14),
+};
+static const unsigned int drif0_data1_c_mux[] = {
+	RIF0_D1_C_MARK,
+};
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_a_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif1_ctrl_a_mux[] = {
+	RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
+};
+static const unsigned int drif1_data0_a_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif1_data0_a_mux[] = {
+	RIF1_D0_A_MARK,
+};
+static const unsigned int drif1_data1_a_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif1_data1_a_mux[] = {
+	RIF1_D1_A_MARK,
+};
+static const unsigned int drif1_ctrl_b_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int drif1_ctrl_b_mux[] = {
+	RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
+};
+static const unsigned int drif1_data0_b_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(5, 7),
+};
+static const unsigned int drif1_data0_b_mux[] = {
+	RIF1_D0_B_MARK,
+};
+static const unsigned int drif1_data1_b_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(5, 8),
+};
+static const unsigned int drif1_data1_b_mux[] = {
+	RIF1_D1_B_MARK,
+};
+static const unsigned int drif1_ctrl_c_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int drif1_ctrl_c_mux[] = {
+	RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
+};
+static const unsigned int drif1_data0_c_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(5, 6),
+};
+static const unsigned int drif1_data0_c_mux[] = {
+	RIF1_D0_C_MARK,
+};
+static const unsigned int drif1_data1_c_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(5, 10),
+};
+static const unsigned int drif1_data1_c_mux[] = {
+	RIF1_D1_C_MARK,
+};
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif2_ctrl_a_mux[] = {
+	RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+static const unsigned int drif2_data0_a_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif2_data0_a_mux[] = {
+	RIF2_D0_A_MARK,
+};
+static const unsigned int drif2_data1_a_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif2_data1_a_mux[] = {
+	RIF2_D1_A_MARK,
+};
+static const unsigned int drif2_ctrl_b_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int drif2_ctrl_b_mux[] = {
+	RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+static const unsigned int drif2_data0_b_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 30),
+};
+static const unsigned int drif2_data0_b_mux[] = {
+	RIF2_D0_B_MARK,
+};
+static const unsigned int drif2_data1_b_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 31),
+};
+static const unsigned int drif2_data1_b_mux[] = {
+	RIF2_D1_B_MARK,
+};
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif3_ctrl_a_mux[] = {
+	RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+static const unsigned int drif3_data0_a_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif3_data0_a_mux[] = {
+	RIF3_D0_A_MARK,
+};
+static const unsigned int drif3_data1_a_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif3_data1_a_mux[] = {
+	RIF3_D1_A_MARK,
+};
+static const unsigned int drif3_ctrl_b_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int drif3_ctrl_b_mux[] = {
+	RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+static const unsigned int drif3_data0_b_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 28),
+};
+static const unsigned int drif3_data0_b_mux[] = {
+	RIF3_D0_B_MARK,
+};
+static const unsigned int drif3_data1_b_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 29),
+};
+static const unsigned int drif3_data1_b_mux[] = {
+	RIF3_D1_B_MARK,
+};
+
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+	/* R[7:2], G[7:2], B[7:2] */
+	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+	RCAR_GP_PIN(1, 7),  RCAR_GP_PIN(1, 6),  RCAR_GP_PIN(1, 5),
+	RCAR_GP_PIN(1, 4),  RCAR_GP_PIN(1, 3),  RCAR_GP_PIN(1, 2),
+};
+static const unsigned int du_rgb666_mux[] = {
+	DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+	DU_DR3_MARK, DU_DR2_MARK,
+	DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+	DU_DG3_MARK, DU_DG2_MARK,
+	DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+	DU_DB3_MARK, DU_DB2_MARK,
+};
+static const unsigned int du_rgb888_pins[] = {
+	/* R[7:0], G[7:0], B[7:0] */
+	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+	RCAR_GP_PIN(0, 9),  RCAR_GP_PIN(0, 8),
+	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+	RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+	RCAR_GP_PIN(1, 7),  RCAR_GP_PIN(1, 6),  RCAR_GP_PIN(1, 5),
+	RCAR_GP_PIN(1, 4),  RCAR_GP_PIN(1, 3),  RCAR_GP_PIN(1, 2),
+	RCAR_GP_PIN(1, 1),  RCAR_GP_PIN(1, 0),
+};
+static const unsigned int du_rgb888_mux[] = {
+	DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+	DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
+	DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+	DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
+	DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+	DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
+};
+static const unsigned int du_clk_out_0_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(1, 27),
+};
+static const unsigned int du_clk_out_0_mux[] = {
+	DU_DOTCLKOUT0_MARK
+};
+static const unsigned int du_clk_out_1_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int du_clk_out_1_mux[] = {
+	DU_DOTCLKOUT1_MARK
+};
+static const unsigned int du_sync_pins[] = {
+	/* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+	RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
+};
+static const unsigned int du_sync_mux[] = {
+	DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
+};
+static const unsigned int du_oddf_pins[] = {
+	/* EXDISP/EXODDF/EXCDE */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int du_oddf_mux[] = {
+	DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du_cde_pins[] = {
+	/* CDE */
+	RCAR_GP_PIN(2, 0),
+};
+static const unsigned int du_cde_mux[] = {
+	DU_CDE_MARK,
+};
+static const unsigned int du_disp_pins[] = {
+	/* DISP */
+	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int du_disp_mux[] = {
+	DU_DISP_MARK,
+};
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+static const unsigned int hscif0_data_mux[] = {
+	HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int hscif0_clk_mux[] = {
+	HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+	HRTS0_N_MARK, HCTS0_N_MARK,
+};
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int hscif1_data_a_mux[] = {
+	HRX1_A_MARK, HTX1_A_MARK,
+};
+static const unsigned int hscif1_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif1_clk_a_mux[] = {
+	HSCK1_A_MARK,
+};
+static const unsigned int hscif1_ctrl_a_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int hscif1_ctrl_a_mux[] = {
+	HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+static const unsigned int hscif1_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+	HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+	HSCK1_B_MARK,
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+	HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int hscif2_data_a_mux[] = {
+	HRX2_A_MARK, HTX2_A_MARK,
+};
+static const unsigned int hscif2_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int hscif2_clk_a_mux[] = {
+	HSCK2_A_MARK,
+};
+static const unsigned int hscif2_ctrl_a_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int hscif2_ctrl_a_mux[] = {
+	HRTS2_N_A_MARK, HCTS2_N_A_MARK,
+};
+
+static const unsigned int hscif2_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int hscif2_data_b_mux[] = {
+	HRX2_B_MARK, HTX2_B_MARK,
+};
+static const unsigned int hscif2_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif2_clk_b_mux[] = {
+	HSCK2_B_MARK,
+};
+static const unsigned int hscif2_ctrl_b_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int hscif2_ctrl_b_mux[] = {
+	HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+	HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 22),
+};
+static const unsigned int hscif3_clk_mux[] = {
+	HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+	HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+static const unsigned int hscif3_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+};
+static const unsigned int hscif3_data_b_mux[] = {
+	HRX3_B_MARK, HTX3_B_MARK,
+};
+static const unsigned int hscif3_data_c_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+static const unsigned int hscif3_data_c_mux[] = {
+	HRX3_C_MARK, HTX3_C_MARK,
+};
+static const unsigned int hscif3_data_d_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int hscif3_data_d_mux[] = {
+	HRX3_D_MARK, HTX3_D_MARK,
+};
+/* - HSCIF4 ----------------------------------------------------------------- */
+static const unsigned int hscif4_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif4_data_a_mux[] = {
+	HRX4_A_MARK, HTX4_A_MARK,
+};
+static const unsigned int hscif4_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_clk_mux[] = {
+	HSCK4_MARK,
+};
+static const unsigned int hscif4_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int hscif4_ctrl_mux[] = {
+	HRTS4_N_MARK, HCTS4_N_MARK,
+};
+
+static const unsigned int hscif4_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_data_b_mux[] = {
+	HRX4_B_MARK, HTX4_B_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+	/* SCL, SDA */
+	RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+};
+
+static const unsigned int i2c0_mux[] = {
+	SCL0_MARK, SDA0_MARK,
+};
+
+static const unsigned int i2c1_a_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+	SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+	SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+	SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+	SDA2_B_MARK, SCL2_B_MARK,
+};
+
+static const unsigned int i2c3_pins[] = {
+	/* SCL, SDA */
+	RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+
+static const unsigned int i2c3_mux[] = {
+	SCL3_MARK, SDA3_MARK,
+};
+
+static const unsigned int i2c5_pins[] = {
+	/* SCL, SDA */
+	RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 14),
+};
+
+static const unsigned int i2c5_mux[] = {
+	SCL5_MARK, SDA5_MARK,
+};
+
+static const unsigned int i2c6_a_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+	SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+	SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+	SDA6_C_MARK, SCL6_C_MARK,
+};
+
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+	/* IRQ0 */
+	RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+	IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+	/* IRQ1 */
+	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+	IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+	/* IRQ2 */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+	IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+	/* IRQ3 */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+	IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+	/* IRQ4 */
+	RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+	IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+	/* IRQ5 */
+	RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+	IRQ5_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof0_clk_mux[] = {
+	MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(5, 18),
+};
+static const unsigned int msiof0_sync_mux[] = {
+	MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+	MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+	MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(5, 20),
+};
+static const unsigned int msiof0_txd_mux[] = {
+	MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(5, 22),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+	MSIOF0_RXD_MARK,
+};
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 8),
+};
+static const unsigned int msiof1_clk_a_mux[] = {
+	MSIOF1_SCK_A_MARK,
+};
+static const unsigned int msiof1_sync_a_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(6, 9),
+};
+static const unsigned int msiof1_sync_a_mux[] = {
+	MSIOF1_SYNC_A_MARK,
+};
+static const unsigned int msiof1_ss1_a_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(6, 5),
+};
+static const unsigned int msiof1_ss1_a_mux[] = {
+	MSIOF1_SS1_A_MARK,
+};
+static const unsigned int msiof1_ss2_a_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(6, 6),
+};
+static const unsigned int msiof1_ss2_a_mux[] = {
+	MSIOF1_SS2_A_MARK,
+};
+static const unsigned int msiof1_txd_a_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(6, 7),
+};
+static const unsigned int msiof1_txd_a_mux[] = {
+	MSIOF1_TXD_A_MARK,
+};
+static const unsigned int msiof1_rxd_a_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int msiof1_rxd_a_mux[] = {
+	MSIOF1_RXD_A_MARK,
+};
+static const unsigned int msiof1_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 9),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+	MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_sync_b_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(5, 3),
+};
+static const unsigned int msiof1_sync_b_mux[] = {
+	MSIOF1_SYNC_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(5, 4),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+	MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+	MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_txd_b_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(5, 8),
+};
+static const unsigned int msiof1_txd_b_mux[] = {
+	MSIOF1_TXD_B_MARK,
+};
+static const unsigned int msiof1_rxd_b_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(5, 7),
+};
+static const unsigned int msiof1_rxd_b_mux[] = {
+	MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 17),
+};
+static const unsigned int msiof1_clk_c_mux[] = {
+	MSIOF1_SCK_C_MARK,
+};
+static const unsigned int msiof1_sync_c_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(6, 18),
+};
+static const unsigned int msiof1_sync_c_mux[] = {
+	MSIOF1_SYNC_C_MARK,
+};
+static const unsigned int msiof1_ss1_c_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int msiof1_ss1_c_mux[] = {
+	MSIOF1_SS1_C_MARK,
+};
+static const unsigned int msiof1_ss2_c_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(6, 27),
+};
+static const unsigned int msiof1_ss2_c_mux[] = {
+	MSIOF1_SS2_C_MARK,
+};
+static const unsigned int msiof1_txd_c_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(6, 20),
+};
+static const unsigned int msiof1_txd_c_mux[] = {
+	MSIOF1_TXD_C_MARK,
+};
+static const unsigned int msiof1_rxd_c_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int msiof1_rxd_c_mux[] = {
+	MSIOF1_RXD_C_MARK,
+};
+static const unsigned int msiof1_clk_d_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int msiof1_clk_d_mux[] = {
+	MSIOF1_SCK_D_MARK,
+};
+static const unsigned int msiof1_sync_d_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(5, 15),
+};
+static const unsigned int msiof1_sync_d_mux[] = {
+	MSIOF1_SYNC_D_MARK,
+};
+static const unsigned int msiof1_ss1_d_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(5, 16),
+};
+static const unsigned int msiof1_ss1_d_mux[] = {
+	MSIOF1_SS1_D_MARK,
+};
+static const unsigned int msiof1_ss2_d_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof1_ss2_d_mux[] = {
+	MSIOF1_SS2_D_MARK,
+};
+static const unsigned int msiof1_txd_d_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(5, 14),
+};
+static const unsigned int msiof1_txd_d_mux[] = {
+	MSIOF1_TXD_D_MARK,
+};
+static const unsigned int msiof1_rxd_d_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(5, 13),
+};
+static const unsigned int msiof1_rxd_d_mux[] = {
+	MSIOF1_RXD_D_MARK,
+};
+static const unsigned int msiof1_clk_e_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(3, 0),
+};
+static const unsigned int msiof1_clk_e_mux[] = {
+	MSIOF1_SCK_E_MARK,
+};
+static const unsigned int msiof1_sync_e_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(3, 1),
+};
+static const unsigned int msiof1_sync_e_mux[] = {
+	MSIOF1_SYNC_E_MARK,
+};
+static const unsigned int msiof1_ss1_e_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(3, 4),
+};
+static const unsigned int msiof1_ss1_e_mux[] = {
+	MSIOF1_SS1_E_MARK,
+};
+static const unsigned int msiof1_ss2_e_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(3, 5),
+};
+static const unsigned int msiof1_ss2_e_mux[] = {
+	MSIOF1_SS2_E_MARK,
+};
+static const unsigned int msiof1_txd_e_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(3, 3),
+};
+static const unsigned int msiof1_txd_e_mux[] = {
+	MSIOF1_TXD_E_MARK,
+};
+static const unsigned int msiof1_rxd_e_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(3, 2),
+};
+static const unsigned int msiof1_rxd_e_mux[] = {
+	MSIOF1_RXD_E_MARK,
+};
+static const unsigned int msiof1_clk_f_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 23),
+};
+static const unsigned int msiof1_clk_f_mux[] = {
+	MSIOF1_SCK_F_MARK,
+};
+static const unsigned int msiof1_sync_f_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(5, 24),
+};
+static const unsigned int msiof1_sync_f_mux[] = {
+	MSIOF1_SYNC_F_MARK,
+};
+static const unsigned int msiof1_ss1_f_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(6, 1),
+};
+static const unsigned int msiof1_ss1_f_mux[] = {
+	MSIOF1_SS1_F_MARK,
+};
+static const unsigned int msiof1_ss2_f_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(6, 2),
+};
+static const unsigned int msiof1_ss2_f_mux[] = {
+	MSIOF1_SS2_F_MARK,
+};
+static const unsigned int msiof1_txd_f_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(6, 0),
+};
+static const unsigned int msiof1_txd_f_mux[] = {
+	MSIOF1_TXD_F_MARK,
+};
+static const unsigned int msiof1_rxd_f_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(5, 25),
+};
+static const unsigned int msiof1_rxd_f_mux[] = {
+	MSIOF1_RXD_F_MARK,
+};
+static const unsigned int msiof1_clk_g_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(3, 6),
+};
+static const unsigned int msiof1_clk_g_mux[] = {
+	MSIOF1_SCK_G_MARK,
+};
+static const unsigned int msiof1_sync_g_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(3, 7),
+};
+static const unsigned int msiof1_sync_g_mux[] = {
+	MSIOF1_SYNC_G_MARK,
+};
+static const unsigned int msiof1_ss1_g_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(3, 10),
+};
+static const unsigned int msiof1_ss1_g_mux[] = {
+	MSIOF1_SS1_G_MARK,
+};
+static const unsigned int msiof1_ss2_g_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(3, 11),
+};
+static const unsigned int msiof1_ss2_g_mux[] = {
+	MSIOF1_SS2_G_MARK,
+};
+static const unsigned int msiof1_txd_g_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(3, 9),
+};
+static const unsigned int msiof1_txd_g_mux[] = {
+	MSIOF1_TXD_G_MARK,
+};
+static const unsigned int msiof1_rxd_g_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(3, 8),
+};
+static const unsigned int msiof1_rxd_g_mux[] = {
+	MSIOF1_RXD_G_MARK,
+};
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof2_clk_a_mux[] = {
+	MSIOF2_SCK_A_MARK,
+};
+static const unsigned int msiof2_sync_a_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof2_sync_a_mux[] = {
+	MSIOF2_SYNC_A_MARK,
+};
+static const unsigned int msiof2_ss1_a_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof2_ss1_a_mux[] = {
+	MSIOF2_SS1_A_MARK,
+};
+static const unsigned int msiof2_ss2_a_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof2_ss2_a_mux[] = {
+	MSIOF2_SS2_A_MARK,
+};
+static const unsigned int msiof2_txd_a_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof2_txd_a_mux[] = {
+	MSIOF2_TXD_A_MARK,
+};
+static const unsigned int msiof2_rxd_a_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof2_rxd_a_mux[] = {
+	MSIOF2_RXD_A_MARK,
+};
+static const unsigned int msiof2_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof2_clk_b_mux[] = {
+	MSIOF2_SCK_B_MARK,
+};
+static const unsigned int msiof2_sync_b_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof2_sync_b_mux[] = {
+	MSIOF2_SYNC_B_MARK,
+};
+static const unsigned int msiof2_ss1_b_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof2_ss1_b_mux[] = {
+	MSIOF2_SS1_B_MARK,
+};
+static const unsigned int msiof2_ss2_b_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof2_ss2_b_mux[] = {
+	MSIOF2_SS2_B_MARK,
+};
+static const unsigned int msiof2_txd_b_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof2_txd_b_mux[] = {
+	MSIOF2_TXD_B_MARK,
+};
+static const unsigned int msiof2_rxd_b_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof2_rxd_b_mux[] = {
+	MSIOF2_RXD_B_MARK,
+};
+static const unsigned int msiof2_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(2, 12),
+};
+static const unsigned int msiof2_clk_c_mux[] = {
+	MSIOF2_SCK_C_MARK,
+};
+static const unsigned int msiof2_sync_c_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(2, 11),
+};
+static const unsigned int msiof2_sync_c_mux[] = {
+	MSIOF2_SYNC_C_MARK,
+};
+static const unsigned int msiof2_ss1_c_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(2, 10),
+};
+static const unsigned int msiof2_ss1_c_mux[] = {
+	MSIOF2_SS1_C_MARK,
+};
+static const unsigned int msiof2_ss2_c_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(2, 9),
+};
+static const unsigned int msiof2_ss2_c_mux[] = {
+	MSIOF2_SS2_C_MARK,
+};
+static const unsigned int msiof2_txd_c_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof2_txd_c_mux[] = {
+	MSIOF2_TXD_C_MARK,
+};
+static const unsigned int msiof2_rxd_c_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(2, 13),
+};
+static const unsigned int msiof2_rxd_c_mux[] = {
+	MSIOF2_RXD_C_MARK,
+};
+static const unsigned int msiof2_clk_d_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof2_clk_d_mux[] = {
+	MSIOF2_SCK_D_MARK,
+};
+static const unsigned int msiof2_sync_d_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof2_sync_d_mux[] = {
+	MSIOF2_SYNC_D_MARK,
+};
+static const unsigned int msiof2_ss1_d_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof2_ss1_d_mux[] = {
+	MSIOF2_SS1_D_MARK,
+};
+static const unsigned int msiof2_ss2_d_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_d_mux[] = {
+	MSIOF2_SS2_D_MARK,
+};
+static const unsigned int msiof2_txd_d_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof2_txd_d_mux[] = {
+	MSIOF2_TXD_D_MARK,
+};
+static const unsigned int msiof2_rxd_d_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof2_rxd_d_mux[] = {
+	MSIOF2_RXD_D_MARK,
+};
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof3_clk_a_mux[] = {
+	MSIOF3_SCK_A_MARK,
+};
+static const unsigned int msiof3_sync_a_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_sync_a_mux[] = {
+	MSIOF3_SYNC_A_MARK,
+};
+static const unsigned int msiof3_ss1_a_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof3_ss1_a_mux[] = {
+	MSIOF3_SS1_A_MARK,
+};
+static const unsigned int msiof3_ss2_a_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof3_ss2_a_mux[] = {
+	MSIOF3_SS2_A_MARK,
+};
+static const unsigned int msiof3_txd_a_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_txd_a_mux[] = {
+	MSIOF3_TXD_A_MARK,
+};
+static const unsigned int msiof3_rxd_a_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_rxd_a_mux[] = {
+	MSIOF3_RXD_A_MARK,
+};
+static const unsigned int msiof3_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof3_clk_b_mux[] = {
+	MSIOF3_SCK_B_MARK,
+};
+static const unsigned int msiof3_sync_b_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof3_sync_b_mux[] = {
+	MSIOF3_SYNC_B_MARK,
+};
+static const unsigned int msiof3_ss1_b_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof3_ss1_b_mux[] = {
+	MSIOF3_SS1_B_MARK,
+};
+static const unsigned int msiof3_ss2_b_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof3_ss2_b_mux[] = {
+	MSIOF3_SS2_B_MARK,
+};
+static const unsigned int msiof3_txd_b_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof3_txd_b_mux[] = {
+	MSIOF3_TXD_B_MARK,
+};
+static const unsigned int msiof3_rxd_b_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof3_rxd_b_mux[] = {
+	MSIOF3_RXD_B_MARK,
+};
+static const unsigned int msiof3_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof3_clk_c_mux[] = {
+	MSIOF3_SCK_C_MARK,
+};
+static const unsigned int msiof3_sync_c_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(1, 13),
+};
+static const unsigned int msiof3_sync_c_mux[] = {
+	MSIOF3_SYNC_C_MARK,
+};
+static const unsigned int msiof3_txd_c_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(1, 15),
+};
+static const unsigned int msiof3_txd_c_mux[] = {
+	MSIOF3_TXD_C_MARK,
+};
+static const unsigned int msiof3_rxd_c_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(1, 14),
+};
+static const unsigned int msiof3_rxd_c_mux[] = {
+	MSIOF3_RXD_C_MARK,
+};
+static const unsigned int msiof3_clk_d_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 22),
+};
+static const unsigned int msiof3_clk_d_mux[] = {
+	MSIOF3_SCK_D_MARK,
+};
+static const unsigned int msiof3_sync_d_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof3_sync_d_mux[] = {
+	MSIOF3_SYNC_D_MARK,
+};
+static const unsigned int msiof3_ss1_d_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof3_ss1_d_mux[] = {
+	MSIOF3_SS1_D_MARK,
+};
+static const unsigned int msiof3_txd_d_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof3_txd_d_mux[] = {
+	MSIOF3_TXD_D_MARK,
+};
+static const unsigned int msiof3_rxd_d_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof3_rxd_d_mux[] = {
+	MSIOF3_RXD_D_MARK,
+};
+
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 6),
+};
+static const unsigned int pwm0_mux[] = {
+	PWM0_MARK,
+};
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 7),
+};
+static const unsigned int pwm1_a_mux[] = {
+	PWM1_A_MARK,
+};
+static const unsigned int pwm1_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 8),
+};
+static const unsigned int pwm1_b_mux[] = {
+	PWM1_B_MARK,
+};
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 8),
+};
+static const unsigned int pwm2_a_mux[] = {
+	PWM2_A_MARK,
+};
+static const unsigned int pwm2_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int pwm2_b_mux[] = {
+	PWM2_B_MARK,
+};
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 0),
+};
+static const unsigned int pwm3_a_mux[] = {
+	PWM3_A_MARK,
+};
+static const unsigned int pwm3_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int pwm3_b_mux[] = {
+	PWM3_B_MARK,
+};
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 1),
+};
+static const unsigned int pwm4_a_mux[] = {
+	PWM4_A_MARK,
+};
+static const unsigned int pwm4_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int pwm4_b_mux[] = {
+	PWM4_B_MARK,
+};
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 2),
+};
+static const unsigned int pwm5_a_mux[] = {
+	PWM5_A_MARK,
+};
+static const unsigned int pwm5_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 4),
+};
+static const unsigned int pwm5_b_mux[] = {
+	PWM5_B_MARK,
+};
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 3),
+};
+static const unsigned int pwm6_a_mux[] = {
+	PWM6_A_MARK,
+};
+static const unsigned int pwm6_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 5),
+};
+static const unsigned int pwm6_b_mux[] = {
+	PWM6_B_MARK,
+};
+
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+	/* QSPI0_SPCLK, QSPI0_SSL */
+	PIN_QSPI0_SPCLK, PIN_QSPI0_SSL,
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+	QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data2_pins[] = {
+	/* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+	PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1,
+};
+static const unsigned int qspi0_data2_mux[] = {
+	QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+};
+static const unsigned int qspi0_data4_pins[] = {
+	/* QSPI0_MOSI_IO0, QSPI0_MISO_IO1, QSPI0_IO2, QSPI0_IO3 */
+	PIN_QSPI0_MOSI_IO0, PIN_QSPI0_MISO_IO1, PIN_QSPI0_IO2, PIN_QSPI0_IO3,
+};
+static const unsigned int qspi0_data4_mux[] = {
+	QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+	QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+};
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+	/* QSPI1_SPCLK, QSPI1_SSL */
+	PIN_QSPI1_SPCLK, PIN_QSPI1_SSL,
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+	QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data2_pins[] = {
+	/* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+	PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1,
+};
+static const unsigned int qspi1_data2_mux[] = {
+	QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+};
+static const unsigned int qspi1_data4_pins[] = {
+	/* QSPI1_MOSI_IO0, QSPI1_MISO_IO1, QSPI1_IO2, QSPI1_IO3 */
+	PIN_QSPI1_MOSI_IO0, PIN_QSPI1_MISO_IO1, PIN_QSPI1_IO2, PIN_QSPI1_IO3,
+};
+static const unsigned int qspi1_data4_mux[] = {
+	QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+	QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+
+/* - SATA --------------------------------------------------------------------*/
+static const unsigned int sata0_devslp_a_pins[] = {
+	/* DEVSLP */
+	RCAR_GP_PIN(6, 16),
+};
+static const unsigned int sata0_devslp_a_mux[] = {
+	SATA_DEVSLP_A_MARK,
+};
+static const unsigned int sata0_devslp_b_pins[] = {
+	/* DEVSLP */
+	RCAR_GP_PIN(4, 6),
+};
+static const unsigned int sata0_devslp_b_mux[] = {
+	SATA_DEVSLP_B_MARK,
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int scif0_data_mux[] = {
+	RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int scif0_clk_mux[] = {
+	SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+	RTS0_N_MARK, CTS0_N_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int scif1_data_a_mux[] = {
+	RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int scif1_clk_mux[] = {
+	SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+	RTS1_N_MARK, CTS1_N_MARK,
+};
+
+static const unsigned int scif1_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 25),
+};
+static const unsigned int scif1_data_b_mux[] = {
+	RX1_B_MARK, TX1_B_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int scif2_data_a_mux[] = {
+	RX2_A_MARK, TX2_A_MARK,
+};
+static const unsigned int scif2_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 9),
+};
+static const unsigned int scif2_clk_mux[] = {
+	SCK2_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int scif2_data_b_mux[] = {
+	RX2_B_MARK, TX2_B_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_data_a_mux[] = {
+	RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 22),
+};
+static const unsigned int scif3_clk_mux[] = {
+	SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+	RTS3_N_MARK, CTS3_N_MARK,
+};
+static const unsigned int scif3_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int scif3_data_b_mux[] = {
+	RX3_B_MARK, TX3_B_MARK,
+};
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
+};
+static const unsigned int scif4_data_a_mux[] = {
+	RX4_A_MARK, TX4_A_MARK,
+};
+static const unsigned int scif4_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(2, 10),
+};
+static const unsigned int scif4_clk_a_mux[] = {
+	SCK4_A_MARK,
+};
+static const unsigned int scif4_ctrl_a_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int scif4_ctrl_a_mux[] = {
+	RTS4_N_A_MARK, CTS4_N_A_MARK,
+};
+static const unsigned int scif4_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int scif4_data_b_mux[] = {
+	RX4_B_MARK, TX4_B_MARK,
+};
+static const unsigned int scif4_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 5),
+};
+static const unsigned int scif4_clk_b_mux[] = {
+	SCK4_B_MARK,
+};
+static const unsigned int scif4_ctrl_b_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int scif4_ctrl_b_mux[] = {
+	RTS4_N_B_MARK, CTS4_N_B_MARK,
+};
+static const unsigned int scif4_data_c_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+};
+static const unsigned int scif4_data_c_mux[] = {
+	RX4_C_MARK, TX4_C_MARK,
+};
+static const unsigned int scif4_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(0, 8),
+};
+static const unsigned int scif4_clk_c_mux[] = {
+	SCK4_C_MARK,
+};
+static const unsigned int scif4_ctrl_c_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+};
+static const unsigned int scif4_ctrl_c_mux[] = {
+	RTS4_N_C_MARK, CTS4_N_C_MARK,
+};
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int scif5_data_mux[] = {
+	RX5_MARK, TX5_MARK,
+};
+static const unsigned int scif5_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int scif5_clk_mux[] = {
+	SCK5_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_a_pins[] = {
+	/* SCIF_CLK */
+	RCAR_GP_PIN(6, 23),
+};
+static const unsigned int scif_clk_a_mux[] = {
+	SCIF_CLK_A_MARK,
+};
+static const unsigned int scif_clk_b_pins[] = {
+	/* SCIF_CLK */
+	RCAR_GP_PIN(5, 9),
+};
+static const unsigned int scif_clk_b_mux[] = {
+	SCIF_CLK_B_MARK,
+};
+
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(3, 2),
+};
+static const unsigned int sdhi0_data1_mux[] = {
+	SD0_DAT0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+	/* D[0:3] */
+	RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+	RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+static const unsigned int sdhi0_data4_mux[] = {
+	SD0_DAT0_MARK, SD0_DAT1_MARK,
+	SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+	/* CLK, CMD */
+	RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+	SD0_CLK_MARK, SD0_CMD_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(3, 12),
+};
+static const unsigned int sdhi0_cd_mux[] = {
+	SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(3, 13),
+};
+static const unsigned int sdhi0_wp_mux[] = {
+	SD0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(3, 8),
+};
+static const unsigned int sdhi1_data1_mux[] = {
+	SD1_DAT0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+	/* D[0:3] */
+	RCAR_GP_PIN(3, 8),  RCAR_GP_PIN(3, 9),
+	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi1_data4_mux[] = {
+	SD1_DAT0_MARK, SD1_DAT1_MARK,
+	SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+	/* CLK, CMD */
+	RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+	SD1_CLK_MARK, SD1_CMD_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(3, 14),
+};
+static const unsigned int sdhi1_cd_mux[] = {
+	SD1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(3, 15),
+};
+static const unsigned int sdhi1_wp_mux[] = {
+	SD1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(4, 2),
+};
+static const unsigned int sdhi2_data1_mux[] = {
+	SD2_DAT0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+	/* D[0:3] */
+	RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+	RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int sdhi2_data4_mux[] = {
+	SD2_DAT0_MARK, SD2_DAT1_MARK,
+	SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+static const unsigned int sdhi2_data8_pins[] = {
+	/* D[0:7] */
+	RCAR_GP_PIN(4, 2),  RCAR_GP_PIN(4, 3),
+	RCAR_GP_PIN(4, 4),  RCAR_GP_PIN(4, 5),
+	RCAR_GP_PIN(3, 8),  RCAR_GP_PIN(3, 9),
+	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi2_data8_mux[] = {
+	SD2_DAT0_MARK, SD2_DAT1_MARK,
+	SD2_DAT2_MARK, SD2_DAT3_MARK,
+	SD2_DAT4_MARK, SD2_DAT5_MARK,
+	SD2_DAT6_MARK, SD2_DAT7_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+	/* CLK, CMD */
+	RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+	SD2_CLK_MARK, SD2_CMD_MARK,
+};
+static const unsigned int sdhi2_cd_a_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(4, 13),
+};
+static const unsigned int sdhi2_cd_a_mux[] = {
+	SD2_CD_A_MARK,
+};
+static const unsigned int sdhi2_cd_b_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(5, 10),
+};
+static const unsigned int sdhi2_cd_b_mux[] = {
+	SD2_CD_B_MARK,
+};
+static const unsigned int sdhi2_wp_a_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(4, 14),
+};
+static const unsigned int sdhi2_wp_a_mux[] = {
+	SD2_WP_A_MARK,
+};
+static const unsigned int sdhi2_wp_b_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(5, 11),
+};
+static const unsigned int sdhi2_wp_b_mux[] = {
+	SD2_WP_B_MARK,
+};
+static const unsigned int sdhi2_ds_pins[] = {
+	/* DS */
+	RCAR_GP_PIN(4, 6),
+};
+static const unsigned int sdhi2_ds_mux[] = {
+	SD2_DS_MARK,
+};
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(4, 9),
+};
+static const unsigned int sdhi3_data1_mux[] = {
+	SD3_DAT0_MARK,
+};
+static const unsigned int sdhi3_data4_pins[] = {
+	/* D[0:3] */
+	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 10),
+	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int sdhi3_data4_mux[] = {
+	SD3_DAT0_MARK, SD3_DAT1_MARK,
+	SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+static const unsigned int sdhi3_data8_pins[] = {
+	/* D[0:7] */
+	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 10),
+	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+	RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+	RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_data8_mux[] = {
+	SD3_DAT0_MARK, SD3_DAT1_MARK,
+	SD3_DAT2_MARK, SD3_DAT3_MARK,
+	SD3_DAT4_MARK, SD3_DAT5_MARK,
+	SD3_DAT6_MARK, SD3_DAT7_MARK,
+};
+static const unsigned int sdhi3_ctrl_pins[] = {
+	/* CLK, CMD */
+	RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+};
+static const unsigned int sdhi3_ctrl_mux[] = {
+	SD3_CLK_MARK, SD3_CMD_MARK,
+};
+static const unsigned int sdhi3_cd_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(4, 15),
+};
+static const unsigned int sdhi3_cd_mux[] = {
+	SD3_CD_MARK,
+};
+static const unsigned int sdhi3_wp_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_wp_mux[] = {
+	SD3_WP_MARK,
+};
+static const unsigned int sdhi3_ds_pins[] = {
+	/* DS */
+	RCAR_GP_PIN(4, 17),
+};
+static const unsigned int sdhi3_ds_mux[] = {
+	SD3_DS_MARK,
+};
+
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 2),
+};
+static const unsigned int ssi0_data_mux[] = {
+	SSI_SDATA0_MARK,
+};
+static const unsigned int ssi01239_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int ssi01239_ctrl_mux[] = {
+	SSI_SCK01239_MARK, SSI_WS01239_MARK,
+};
+static const unsigned int ssi1_data_a_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 3),
+};
+static const unsigned int ssi1_data_a_mux[] = {
+	SSI_SDATA1_A_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+	SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_a_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int ssi1_ctrl_a_mux[] = {
+	SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+	SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_a_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 4),
+};
+static const unsigned int ssi2_data_a_mux[] = {
+	SSI_SDATA2_A_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+	SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_a_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int ssi2_ctrl_a_mux[] = {
+	SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+	SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 7),
+};
+static const unsigned int ssi3_data_mux[] = {
+	SSI_SDATA3_MARK,
+};
+static const unsigned int ssi349_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int ssi349_ctrl_mux[] = {
+	SSI_SCK349_MARK, SSI_WS349_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int ssi4_data_mux[] = {
+	SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+	SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 13),
+};
+static const unsigned int ssi5_data_mux[] = {
+	SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+	SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 16),
+};
+static const unsigned int ssi6_data_mux[] = {
+	SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+	SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int ssi7_data_mux[] = {
+	SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+	SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 20),
+};
+static const unsigned int ssi8_data_mux[] = {
+	SSI_SDATA8_MARK,
+};
+static const unsigned int ssi9_data_a_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi9_data_a_mux[] = {
+	SSI_SDATA9_A_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(5, 14),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+	SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_a_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi9_ctrl_a_mux[] = {
+	SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+	SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
+
+/* - TMU -------------------------------------------------------------------- */
+static const unsigned int tmu_tclk1_a_pins[] = {
+	/* TCLK */
+	RCAR_GP_PIN(6, 23),
+};
+static const unsigned int tmu_tclk1_a_mux[] = {
+	TCLK1_A_MARK,
+};
+static const unsigned int tmu_tclk1_b_pins[] = {
+	/* TCLK */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int tmu_tclk1_b_mux[] = {
+	TCLK1_B_MARK,
+};
+static const unsigned int tmu_tclk2_a_pins[] = {
+	/* TCLK */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int tmu_tclk2_a_mux[] = {
+	TCLK2_A_MARK,
+};
+static const unsigned int tmu_tclk2_b_pins[] = {
+	/* TCLK */
+	RCAR_GP_PIN(6, 28),
+};
+static const unsigned int tmu_tclk2_b_mux[] = {
+	TCLK2_B_MARK,
+};
+
+/* - TPU ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_pins[] = {
+	/* TPU0TO0 */
+	RCAR_GP_PIN(6, 28),
+};
+static const unsigned int tpu_to0_mux[] = {
+	TPU0TO0_MARK,
+};
+static const unsigned int tpu_to1_pins[] = {
+	/* TPU0TO1 */
+	RCAR_GP_PIN(6, 29),
+};
+static const unsigned int tpu_to1_mux[] = {
+	TPU0TO1_MARK,
+};
+static const unsigned int tpu_to2_pins[] = {
+	/* TPU0TO2 */
+	RCAR_GP_PIN(6, 30),
+};
+static const unsigned int tpu_to2_mux[] = {
+	TPU0TO2_MARK,
+};
+static const unsigned int tpu_to3_pins[] = {
+	/* TPU0TO3 */
+	RCAR_GP_PIN(6, 31),
+};
+static const unsigned int tpu_to3_mux[] = {
+	TPU0TO3_MARK,
+};
+
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int usb0_mux[] = {
+	USB0_PWEN_MARK, USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int usb1_mux[] = {
+	USB1_PWEN_MARK, USB1_OVC_MARK,
+};
+/* - USB2 ------------------------------------------------------------------- */
+static const unsigned int usb2_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int usb2_mux[] = {
+	USB2_PWEN_MARK, USB2_OVC_MARK,
+};
+
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int usb30_mux[] = {
+	USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+/* - USB31 ------------------------------------------------------------------ */
+static const unsigned int usb31_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int usb31_mux[] = {
+	USB31_PWEN_MARK, USB31_OVC_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+	SH_PFC_PIN_GROUP(audio_clk_a_a),
+	SH_PFC_PIN_GROUP(audio_clk_a_b),
+	SH_PFC_PIN_GROUP(audio_clk_a_c),
+	SH_PFC_PIN_GROUP(audio_clk_b_a),
+	SH_PFC_PIN_GROUP(audio_clk_b_b),
+	SH_PFC_PIN_GROUP(audio_clk_c_a),
+	SH_PFC_PIN_GROUP(audio_clk_c_b),
+	SH_PFC_PIN_GROUP(audio_clkout_a),
+	SH_PFC_PIN_GROUP(audio_clkout_b),
+	SH_PFC_PIN_GROUP(audio_clkout_c),
+	SH_PFC_PIN_GROUP(audio_clkout_d),
+	SH_PFC_PIN_GROUP(audio_clkout1_a),
+	SH_PFC_PIN_GROUP(audio_clkout1_b),
+	SH_PFC_PIN_GROUP(audio_clkout2_a),
+	SH_PFC_PIN_GROUP(audio_clkout2_b),
+	SH_PFC_PIN_GROUP(audio_clkout3_a),
+	SH_PFC_PIN_GROUP(audio_clkout3_b),
+	SH_PFC_PIN_GROUP(avb_link),
+	SH_PFC_PIN_GROUP(avb_magic),
+	SH_PFC_PIN_GROUP(avb_phy_int),
+	SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio),	/* Deprecated */
+	SH_PFC_PIN_GROUP(avb_mdio),
+	SH_PFC_PIN_GROUP(avb_mii),
+	SH_PFC_PIN_GROUP(avb_avtp_pps),
+	SH_PFC_PIN_GROUP(avb_avtp_match_a),
+	SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+	SH_PFC_PIN_GROUP(avb_avtp_match_b),
+	SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+	SH_PFC_PIN_GROUP(can0_data_a),
+	SH_PFC_PIN_GROUP(can0_data_b),
+	SH_PFC_PIN_GROUP(can1_data),
+	SH_PFC_PIN_GROUP(can_clk),
+	SH_PFC_PIN_GROUP(canfd0_data_a),
+	SH_PFC_PIN_GROUP(canfd0_data_b),
+	SH_PFC_PIN_GROUP(canfd1_data),
+	SH_PFC_PIN_GROUP(drif0_ctrl_a),
+	SH_PFC_PIN_GROUP(drif0_data0_a),
+	SH_PFC_PIN_GROUP(drif0_data1_a),
+	SH_PFC_PIN_GROUP(drif0_ctrl_b),
+	SH_PFC_PIN_GROUP(drif0_data0_b),
+	SH_PFC_PIN_GROUP(drif0_data1_b),
+	SH_PFC_PIN_GROUP(drif0_ctrl_c),
+	SH_PFC_PIN_GROUP(drif0_data0_c),
+	SH_PFC_PIN_GROUP(drif0_data1_c),
+	SH_PFC_PIN_GROUP(drif1_ctrl_a),
+	SH_PFC_PIN_GROUP(drif1_data0_a),
+	SH_PFC_PIN_GROUP(drif1_data1_a),
+	SH_PFC_PIN_GROUP(drif1_ctrl_b),
+	SH_PFC_PIN_GROUP(drif1_data0_b),
+	SH_PFC_PIN_GROUP(drif1_data1_b),
+	SH_PFC_PIN_GROUP(drif1_ctrl_c),
+	SH_PFC_PIN_GROUP(drif1_data0_c),
+	SH_PFC_PIN_GROUP(drif1_data1_c),
+	SH_PFC_PIN_GROUP(drif2_ctrl_a),
+	SH_PFC_PIN_GROUP(drif2_data0_a),
+	SH_PFC_PIN_GROUP(drif2_data1_a),
+	SH_PFC_PIN_GROUP(drif2_ctrl_b),
+	SH_PFC_PIN_GROUP(drif2_data0_b),
+	SH_PFC_PIN_GROUP(drif2_data1_b),
+	SH_PFC_PIN_GROUP(drif3_ctrl_a),
+	SH_PFC_PIN_GROUP(drif3_data0_a),
+	SH_PFC_PIN_GROUP(drif3_data1_a),
+	SH_PFC_PIN_GROUP(drif3_ctrl_b),
+	SH_PFC_PIN_GROUP(drif3_data0_b),
+	SH_PFC_PIN_GROUP(drif3_data1_b),
+	SH_PFC_PIN_GROUP(du_rgb666),
+	SH_PFC_PIN_GROUP(du_rgb888),
+	SH_PFC_PIN_GROUP(du_clk_out_0),
+	SH_PFC_PIN_GROUP(du_clk_out_1),
+	SH_PFC_PIN_GROUP(du_sync),
+	SH_PFC_PIN_GROUP(du_oddf),
+	SH_PFC_PIN_GROUP(du_cde),
+	SH_PFC_PIN_GROUP(du_disp),
+	SH_PFC_PIN_GROUP(hscif0_data),
+	SH_PFC_PIN_GROUP(hscif0_clk),
+	SH_PFC_PIN_GROUP(hscif0_ctrl),
+	SH_PFC_PIN_GROUP(hscif1_data_a),
+	SH_PFC_PIN_GROUP(hscif1_clk_a),
+	SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+	SH_PFC_PIN_GROUP(hscif1_data_b),
+	SH_PFC_PIN_GROUP(hscif1_clk_b),
+	SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+	SH_PFC_PIN_GROUP(hscif2_data_a),
+	SH_PFC_PIN_GROUP(hscif2_clk_a),
+	SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+	SH_PFC_PIN_GROUP(hscif2_data_b),
+	SH_PFC_PIN_GROUP(hscif2_clk_b),
+	SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+	SH_PFC_PIN_GROUP(hscif3_data_a),
+	SH_PFC_PIN_GROUP(hscif3_clk),
+	SH_PFC_PIN_GROUP(hscif3_ctrl),
+	SH_PFC_PIN_GROUP(hscif3_data_b),
+	SH_PFC_PIN_GROUP(hscif3_data_c),
+	SH_PFC_PIN_GROUP(hscif3_data_d),
+	SH_PFC_PIN_GROUP(hscif4_data_a),
+	SH_PFC_PIN_GROUP(hscif4_clk),
+	SH_PFC_PIN_GROUP(hscif4_ctrl),
+	SH_PFC_PIN_GROUP(hscif4_data_b),
+	SH_PFC_PIN_GROUP(i2c0),
+	SH_PFC_PIN_GROUP(i2c1_a),
+	SH_PFC_PIN_GROUP(i2c1_b),
+	SH_PFC_PIN_GROUP(i2c2_a),
+	SH_PFC_PIN_GROUP(i2c2_b),
+	SH_PFC_PIN_GROUP(i2c3),
+	SH_PFC_PIN_GROUP(i2c5),
+	SH_PFC_PIN_GROUP(i2c6_a),
+	SH_PFC_PIN_GROUP(i2c6_b),
+	SH_PFC_PIN_GROUP(i2c6_c),
+	SH_PFC_PIN_GROUP(intc_ex_irq0),
+	SH_PFC_PIN_GROUP(intc_ex_irq1),
+	SH_PFC_PIN_GROUP(intc_ex_irq2),
+	SH_PFC_PIN_GROUP(intc_ex_irq3),
+	SH_PFC_PIN_GROUP(intc_ex_irq4),
+	SH_PFC_PIN_GROUP(intc_ex_irq5),
+	SH_PFC_PIN_GROUP(msiof0_clk),
+	SH_PFC_PIN_GROUP(msiof0_sync),
+	SH_PFC_PIN_GROUP(msiof0_ss1),
+	SH_PFC_PIN_GROUP(msiof0_ss2),
+	SH_PFC_PIN_GROUP(msiof0_txd),
+	SH_PFC_PIN_GROUP(msiof0_rxd),
+	SH_PFC_PIN_GROUP(msiof1_clk_a),
+	SH_PFC_PIN_GROUP(msiof1_sync_a),
+	SH_PFC_PIN_GROUP(msiof1_ss1_a),
+	SH_PFC_PIN_GROUP(msiof1_ss2_a),
+	SH_PFC_PIN_GROUP(msiof1_txd_a),
+	SH_PFC_PIN_GROUP(msiof1_rxd_a),
+	SH_PFC_PIN_GROUP(msiof1_clk_b),
+	SH_PFC_PIN_GROUP(msiof1_sync_b),
+	SH_PFC_PIN_GROUP(msiof1_ss1_b),
+	SH_PFC_PIN_GROUP(msiof1_ss2_b),
+	SH_PFC_PIN_GROUP(msiof1_txd_b),
+	SH_PFC_PIN_GROUP(msiof1_rxd_b),
+	SH_PFC_PIN_GROUP(msiof1_clk_c),
+	SH_PFC_PIN_GROUP(msiof1_sync_c),
+	SH_PFC_PIN_GROUP(msiof1_ss1_c),
+	SH_PFC_PIN_GROUP(msiof1_ss2_c),
+	SH_PFC_PIN_GROUP(msiof1_txd_c),
+	SH_PFC_PIN_GROUP(msiof1_rxd_c),
+	SH_PFC_PIN_GROUP(msiof1_clk_d),
+	SH_PFC_PIN_GROUP(msiof1_sync_d),
+	SH_PFC_PIN_GROUP(msiof1_ss1_d),
+	SH_PFC_PIN_GROUP(msiof1_ss2_d),
+	SH_PFC_PIN_GROUP(msiof1_txd_d),
+	SH_PFC_PIN_GROUP(msiof1_rxd_d),
+	SH_PFC_PIN_GROUP(msiof1_clk_e),
+	SH_PFC_PIN_GROUP(msiof1_sync_e),
+	SH_PFC_PIN_GROUP(msiof1_ss1_e),
+	SH_PFC_PIN_GROUP(msiof1_ss2_e),
+	SH_PFC_PIN_GROUP(msiof1_txd_e),
+	SH_PFC_PIN_GROUP(msiof1_rxd_e),
+	SH_PFC_PIN_GROUP(msiof1_clk_f),
+	SH_PFC_PIN_GROUP(msiof1_sync_f),
+	SH_PFC_PIN_GROUP(msiof1_ss1_f),
+	SH_PFC_PIN_GROUP(msiof1_ss2_f),
+	SH_PFC_PIN_GROUP(msiof1_txd_f),
+	SH_PFC_PIN_GROUP(msiof1_rxd_f),
+	SH_PFC_PIN_GROUP(msiof1_clk_g),
+	SH_PFC_PIN_GROUP(msiof1_sync_g),
+	SH_PFC_PIN_GROUP(msiof1_ss1_g),
+	SH_PFC_PIN_GROUP(msiof1_ss2_g),
+	SH_PFC_PIN_GROUP(msiof1_txd_g),
+	SH_PFC_PIN_GROUP(msiof1_rxd_g),
+	SH_PFC_PIN_GROUP(msiof2_clk_a),
+	SH_PFC_PIN_GROUP(msiof2_sync_a),
+	SH_PFC_PIN_GROUP(msiof2_ss1_a),
+	SH_PFC_PIN_GROUP(msiof2_ss2_a),
+	SH_PFC_PIN_GROUP(msiof2_txd_a),
+	SH_PFC_PIN_GROUP(msiof2_rxd_a),
+	SH_PFC_PIN_GROUP(msiof2_clk_b),
+	SH_PFC_PIN_GROUP(msiof2_sync_b),
+	SH_PFC_PIN_GROUP(msiof2_ss1_b),
+	SH_PFC_PIN_GROUP(msiof2_ss2_b),
+	SH_PFC_PIN_GROUP(msiof2_txd_b),
+	SH_PFC_PIN_GROUP(msiof2_rxd_b),
+	SH_PFC_PIN_GROUP(msiof2_clk_c),
+	SH_PFC_PIN_GROUP(msiof2_sync_c),
+	SH_PFC_PIN_GROUP(msiof2_ss1_c),
+	SH_PFC_PIN_GROUP(msiof2_ss2_c),
+	SH_PFC_PIN_GROUP(msiof2_txd_c),
+	SH_PFC_PIN_GROUP(msiof2_rxd_c),
+	SH_PFC_PIN_GROUP(msiof2_clk_d),
+	SH_PFC_PIN_GROUP(msiof2_sync_d),
+	SH_PFC_PIN_GROUP(msiof2_ss1_d),
+	SH_PFC_PIN_GROUP(msiof2_ss2_d),
+	SH_PFC_PIN_GROUP(msiof2_txd_d),
+	SH_PFC_PIN_GROUP(msiof2_rxd_d),
+	SH_PFC_PIN_GROUP(msiof3_clk_a),
+	SH_PFC_PIN_GROUP(msiof3_sync_a),
+	SH_PFC_PIN_GROUP(msiof3_ss1_a),
+	SH_PFC_PIN_GROUP(msiof3_ss2_a),
+	SH_PFC_PIN_GROUP(msiof3_txd_a),
+	SH_PFC_PIN_GROUP(msiof3_rxd_a),
+	SH_PFC_PIN_GROUP(msiof3_clk_b),
+	SH_PFC_PIN_GROUP(msiof3_sync_b),
+	SH_PFC_PIN_GROUP(msiof3_ss1_b),
+	SH_PFC_PIN_GROUP(msiof3_ss2_b),
+	SH_PFC_PIN_GROUP(msiof3_txd_b),
+	SH_PFC_PIN_GROUP(msiof3_rxd_b),
+	SH_PFC_PIN_GROUP(msiof3_clk_c),
+	SH_PFC_PIN_GROUP(msiof3_sync_c),
+	SH_PFC_PIN_GROUP(msiof3_txd_c),
+	SH_PFC_PIN_GROUP(msiof3_rxd_c),
+	SH_PFC_PIN_GROUP(msiof3_clk_d),
+	SH_PFC_PIN_GROUP(msiof3_sync_d),
+	SH_PFC_PIN_GROUP(msiof3_ss1_d),
+	SH_PFC_PIN_GROUP(msiof3_txd_d),
+	SH_PFC_PIN_GROUP(msiof3_rxd_d),
+	SH_PFC_PIN_GROUP(pwm0),
+	SH_PFC_PIN_GROUP(pwm1_a),
+	SH_PFC_PIN_GROUP(pwm1_b),
+	SH_PFC_PIN_GROUP(pwm2_a),
+	SH_PFC_PIN_GROUP(pwm2_b),
+	SH_PFC_PIN_GROUP(pwm3_a),
+	SH_PFC_PIN_GROUP(pwm3_b),
+	SH_PFC_PIN_GROUP(pwm4_a),
+	SH_PFC_PIN_GROUP(pwm4_b),
+	SH_PFC_PIN_GROUP(pwm5_a),
+	SH_PFC_PIN_GROUP(pwm5_b),
+	SH_PFC_PIN_GROUP(pwm6_a),
+	SH_PFC_PIN_GROUP(pwm6_b),
+	SH_PFC_PIN_GROUP(qspi0_ctrl),
+	SH_PFC_PIN_GROUP(qspi0_data2),
+	SH_PFC_PIN_GROUP(qspi0_data4),
+	SH_PFC_PIN_GROUP(qspi1_ctrl),
+	SH_PFC_PIN_GROUP(qspi1_data2),
+	SH_PFC_PIN_GROUP(qspi1_data4),
+	SH_PFC_PIN_GROUP(sata0_devslp_a),
+	SH_PFC_PIN_GROUP(sata0_devslp_b),
+	SH_PFC_PIN_GROUP(scif0_data),
+	SH_PFC_PIN_GROUP(scif0_clk),
+	SH_PFC_PIN_GROUP(scif0_ctrl),
+	SH_PFC_PIN_GROUP(scif1_data_a),
+	SH_PFC_PIN_GROUP(scif1_clk),
+	SH_PFC_PIN_GROUP(scif1_ctrl),
+	SH_PFC_PIN_GROUP(scif1_data_b),
+	SH_PFC_PIN_GROUP(scif2_data_a),
+	SH_PFC_PIN_GROUP(scif2_clk),
+	SH_PFC_PIN_GROUP(scif2_data_b),
+	SH_PFC_PIN_GROUP(scif3_data_a),
+	SH_PFC_PIN_GROUP(scif3_clk),
+	SH_PFC_PIN_GROUP(scif3_ctrl),
+	SH_PFC_PIN_GROUP(scif3_data_b),
+	SH_PFC_PIN_GROUP(scif4_data_a),
+	SH_PFC_PIN_GROUP(scif4_clk_a),
+	SH_PFC_PIN_GROUP(scif4_ctrl_a),
+	SH_PFC_PIN_GROUP(scif4_data_b),
+	SH_PFC_PIN_GROUP(scif4_clk_b),
+	SH_PFC_PIN_GROUP(scif4_ctrl_b),
+	SH_PFC_PIN_GROUP(scif4_data_c),
+	SH_PFC_PIN_GROUP(scif4_clk_c),
+	SH_PFC_PIN_GROUP(scif4_ctrl_c),
+	SH_PFC_PIN_GROUP(scif5_data),
+	SH_PFC_PIN_GROUP(scif5_clk),
+	SH_PFC_PIN_GROUP(scif_clk_a),
+	SH_PFC_PIN_GROUP(scif_clk_b),
+	SH_PFC_PIN_GROUP(sdhi0_data1),
+	SH_PFC_PIN_GROUP(sdhi0_data4),
+	SH_PFC_PIN_GROUP(sdhi0_ctrl),
+	SH_PFC_PIN_GROUP(sdhi0_cd),
+	SH_PFC_PIN_GROUP(sdhi0_wp),
+	SH_PFC_PIN_GROUP(sdhi1_data1),
+	SH_PFC_PIN_GROUP(sdhi1_data4),
+	SH_PFC_PIN_GROUP(sdhi1_ctrl),
+	SH_PFC_PIN_GROUP(sdhi1_cd),
+	SH_PFC_PIN_GROUP(sdhi1_wp),
+	SH_PFC_PIN_GROUP(sdhi2_data1),
+	SH_PFC_PIN_GROUP(sdhi2_data4),
+	SH_PFC_PIN_GROUP(sdhi2_data8),
+	SH_PFC_PIN_GROUP(sdhi2_ctrl),
+	SH_PFC_PIN_GROUP(sdhi2_cd_a),
+	SH_PFC_PIN_GROUP(sdhi2_wp_a),
+	SH_PFC_PIN_GROUP(sdhi2_cd_b),
+	SH_PFC_PIN_GROUP(sdhi2_wp_b),
+	SH_PFC_PIN_GROUP(sdhi2_ds),
+	SH_PFC_PIN_GROUP(sdhi3_data1),
+	SH_PFC_PIN_GROUP(sdhi3_data4),
+	SH_PFC_PIN_GROUP(sdhi3_data8),
+	SH_PFC_PIN_GROUP(sdhi3_ctrl),
+	SH_PFC_PIN_GROUP(sdhi3_cd),
+	SH_PFC_PIN_GROUP(sdhi3_wp),
+	SH_PFC_PIN_GROUP(sdhi3_ds),
+	SH_PFC_PIN_GROUP(ssi0_data),
+	SH_PFC_PIN_GROUP(ssi01239_ctrl),
+	SH_PFC_PIN_GROUP(ssi1_data_a),
+	SH_PFC_PIN_GROUP(ssi1_data_b),
+	SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+	SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi2_data_a),
+	SH_PFC_PIN_GROUP(ssi2_data_b),
+	SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+	SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi3_data),
+	SH_PFC_PIN_GROUP(ssi349_ctrl),
+	SH_PFC_PIN_GROUP(ssi4_data),
+	SH_PFC_PIN_GROUP(ssi4_ctrl),
+	SH_PFC_PIN_GROUP(ssi5_data),
+	SH_PFC_PIN_GROUP(ssi5_ctrl),
+	SH_PFC_PIN_GROUP(ssi6_data),
+	SH_PFC_PIN_GROUP(ssi6_ctrl),
+	SH_PFC_PIN_GROUP(ssi7_data),
+	SH_PFC_PIN_GROUP(ssi78_ctrl),
+	SH_PFC_PIN_GROUP(ssi8_data),
+	SH_PFC_PIN_GROUP(ssi9_data_a),
+	SH_PFC_PIN_GROUP(ssi9_data_b),
+	SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+	SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+	SH_PFC_PIN_GROUP(tmu_tclk1_a),
+	SH_PFC_PIN_GROUP(tmu_tclk1_b),
+	SH_PFC_PIN_GROUP(tmu_tclk2_a),
+	SH_PFC_PIN_GROUP(tmu_tclk2_b),
+	SH_PFC_PIN_GROUP(tpu_to0),
+	SH_PFC_PIN_GROUP(tpu_to1),
+	SH_PFC_PIN_GROUP(tpu_to2),
+	SH_PFC_PIN_GROUP(tpu_to3),
+	SH_PFC_PIN_GROUP(usb0),
+	SH_PFC_PIN_GROUP(usb1),
+	SH_PFC_PIN_GROUP(usb2),
+	SH_PFC_PIN_GROUP(usb30),
+	SH_PFC_PIN_GROUP(usb31),
+};
+
+static const char * const audio_clk_groups[] = {
+	"audio_clk_a_a",
+	"audio_clk_a_b",
+	"audio_clk_a_c",
+	"audio_clk_b_a",
+	"audio_clk_b_b",
+	"audio_clk_c_a",
+	"audio_clk_c_b",
+	"audio_clkout_a",
+	"audio_clkout_b",
+	"audio_clkout_c",
+	"audio_clkout_d",
+	"audio_clkout1_a",
+	"audio_clkout1_b",
+	"audio_clkout2_a",
+	"audio_clkout2_b",
+	"audio_clkout3_a",
+	"audio_clkout3_b",
+};
+
+static const char * const avb_groups[] = {
+	"avb_link",
+	"avb_magic",
+	"avb_phy_int",
+	"avb_mdc",	/* Deprecated, please use "avb_mdio" instead */
+	"avb_mdio",
+	"avb_mii",
+	"avb_avtp_pps",
+	"avb_avtp_match_a",
+	"avb_avtp_capture_a",
+	"avb_avtp_match_b",
+	"avb_avtp_capture_b",
+};
+
+static const char * const can0_groups[] = {
+	"can0_data_a",
+	"can0_data_b",
+};
+
+static const char * const can1_groups[] = {
+	"can1_data",
+};
+
+static const char * const can_clk_groups[] = {
+	"can_clk",
+};
+
+static const char * const canfd0_groups[] = {
+	"canfd0_data_a",
+	"canfd0_data_b",
+};
+
+static const char * const canfd1_groups[] = {
+	"canfd1_data",
+};
+
+static const char * const drif0_groups[] = {
+	"drif0_ctrl_a",
+	"drif0_data0_a",
+	"drif0_data1_a",
+	"drif0_ctrl_b",
+	"drif0_data0_b",
+	"drif0_data1_b",
+	"drif0_ctrl_c",
+	"drif0_data0_c",
+	"drif0_data1_c",
+};
+
+static const char * const drif1_groups[] = {
+	"drif1_ctrl_a",
+	"drif1_data0_a",
+	"drif1_data1_a",
+	"drif1_ctrl_b",
+	"drif1_data0_b",
+	"drif1_data1_b",
+	"drif1_ctrl_c",
+	"drif1_data0_c",
+	"drif1_data1_c",
+};
+
+static const char * const drif2_groups[] = {
+	"drif2_ctrl_a",
+	"drif2_data0_a",
+	"drif2_data1_a",
+	"drif2_ctrl_b",
+	"drif2_data0_b",
+	"drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+	"drif3_ctrl_a",
+	"drif3_data0_a",
+	"drif3_data1_a",
+	"drif3_ctrl_b",
+	"drif3_data0_b",
+	"drif3_data1_b",
+};
+
+static const char * const du_groups[] = {
+	"du_rgb666",
+	"du_rgb888",
+	"du_clk_out_0",
+	"du_clk_out_1",
+	"du_sync",
+	"du_oddf",
+	"du_cde",
+	"du_disp",
+};
+
+static const char * const hscif0_groups[] = {
+	"hscif0_data",
+	"hscif0_clk",
+	"hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+	"hscif1_data_a",
+	"hscif1_clk_a",
+	"hscif1_ctrl_a",
+	"hscif1_data_b",
+	"hscif1_clk_b",
+	"hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+	"hscif2_data_a",
+	"hscif2_clk_a",
+	"hscif2_ctrl_a",
+	"hscif2_data_b",
+	"hscif2_clk_b",
+	"hscif2_ctrl_b",
+};
+
+static const char * const hscif3_groups[] = {
+	"hscif3_data_a",
+	"hscif3_clk",
+	"hscif3_ctrl",
+	"hscif3_data_b",
+	"hscif3_data_c",
+	"hscif3_data_d",
+};
+
+static const char * const hscif4_groups[] = {
+	"hscif4_data_a",
+	"hscif4_clk",
+	"hscif4_ctrl",
+	"hscif4_data_b",
+};
+
+static const char * const i2c0_groups[] = {
+	"i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+	"i2c1_a",
+	"i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+	"i2c2_a",
+	"i2c2_b",
+};
+
+static const char * const i2c3_groups[] = {
+	"i2c3",
+};
+
+static const char * const i2c5_groups[] = {
+	"i2c5",
+};
+
+static const char * const i2c6_groups[] = {
+	"i2c6_a",
+	"i2c6_b",
+	"i2c6_c",
+};
+
+static const char * const intc_ex_groups[] = {
+	"intc_ex_irq0",
+	"intc_ex_irq1",
+	"intc_ex_irq2",
+	"intc_ex_irq3",
+	"intc_ex_irq4",
+	"intc_ex_irq5",
+};
+
+static const char * const msiof0_groups[] = {
+	"msiof0_clk",
+	"msiof0_sync",
+	"msiof0_ss1",
+	"msiof0_ss2",
+	"msiof0_txd",
+	"msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+	"msiof1_clk_a",
+	"msiof1_sync_a",
+	"msiof1_ss1_a",
+	"msiof1_ss2_a",
+	"msiof1_txd_a",
+	"msiof1_rxd_a",
+	"msiof1_clk_b",
+	"msiof1_sync_b",
+	"msiof1_ss1_b",
+	"msiof1_ss2_b",
+	"msiof1_txd_b",
+	"msiof1_rxd_b",
+	"msiof1_clk_c",
+	"msiof1_sync_c",
+	"msiof1_ss1_c",
+	"msiof1_ss2_c",
+	"msiof1_txd_c",
+	"msiof1_rxd_c",
+	"msiof1_clk_d",
+	"msiof1_sync_d",
+	"msiof1_ss1_d",
+	"msiof1_ss2_d",
+	"msiof1_txd_d",
+	"msiof1_rxd_d",
+	"msiof1_clk_e",
+	"msiof1_sync_e",
+	"msiof1_ss1_e",
+	"msiof1_ss2_e",
+	"msiof1_txd_e",
+	"msiof1_rxd_e",
+	"msiof1_clk_f",
+	"msiof1_sync_f",
+	"msiof1_ss1_f",
+	"msiof1_ss2_f",
+	"msiof1_txd_f",
+	"msiof1_rxd_f",
+	"msiof1_clk_g",
+	"msiof1_sync_g",
+	"msiof1_ss1_g",
+	"msiof1_ss2_g",
+	"msiof1_txd_g",
+	"msiof1_rxd_g",
+};
+
+static const char * const msiof2_groups[] = {
+	"msiof2_clk_a",
+	"msiof2_sync_a",
+	"msiof2_ss1_a",
+	"msiof2_ss2_a",
+	"msiof2_txd_a",
+	"msiof2_rxd_a",
+	"msiof2_clk_b",
+	"msiof2_sync_b",
+	"msiof2_ss1_b",
+	"msiof2_ss2_b",
+	"msiof2_txd_b",
+	"msiof2_rxd_b",
+	"msiof2_clk_c",
+	"msiof2_sync_c",
+	"msiof2_ss1_c",
+	"msiof2_ss2_c",
+	"msiof2_txd_c",
+	"msiof2_rxd_c",
+	"msiof2_clk_d",
+	"msiof2_sync_d",
+	"msiof2_ss1_d",
+	"msiof2_ss2_d",
+	"msiof2_txd_d",
+	"msiof2_rxd_d",
+};
+
+static const char * const msiof3_groups[] = {
+	"msiof3_clk_a",
+	"msiof3_sync_a",
+	"msiof3_ss1_a",
+	"msiof3_ss2_a",
+	"msiof3_txd_a",
+	"msiof3_rxd_a",
+	"msiof3_clk_b",
+	"msiof3_sync_b",
+	"msiof3_ss1_b",
+	"msiof3_ss2_b",
+	"msiof3_txd_b",
+	"msiof3_rxd_b",
+	"msiof3_clk_c",
+	"msiof3_sync_c",
+	"msiof3_txd_c",
+	"msiof3_rxd_c",
+	"msiof3_clk_d",
+	"msiof3_sync_d",
+	"msiof3_ss1_d",
+	"msiof3_txd_d",
+	"msiof3_rxd_d",
+};
+
+static const char * const pwm0_groups[] = {
+	"pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+	"pwm1_a",
+	"pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+	"pwm2_a",
+	"pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+	"pwm3_a",
+	"pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+	"pwm4_a",
+	"pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+	"pwm5_a",
+	"pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+	"pwm6_a",
+	"pwm6_b",
+};
+
+static const char * const qspi0_groups[] = {
+	"qspi0_ctrl",
+	"qspi0_data2",
+	"qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+	"qspi1_ctrl",
+	"qspi1_data2",
+	"qspi1_data4",
+};
+
+static const char * const sata0_groups[] = {
+	"sata0_devslp_a",
+	"sata0_devslp_b",
+};
+
+static const char * const scif0_groups[] = {
+	"scif0_data",
+	"scif0_clk",
+	"scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+	"scif1_data_a",
+	"scif1_clk",
+	"scif1_ctrl",
+	"scif1_data_b",
+};
+
+static const char * const scif2_groups[] = {
+	"scif2_data_a",
+	"scif2_clk",
+	"scif2_data_b",
+};
+
+static const char * const scif3_groups[] = {
+	"scif3_data_a",
+	"scif3_clk",
+	"scif3_ctrl",
+	"scif3_data_b",
+};
+
+static const char * const scif4_groups[] = {
+	"scif4_data_a",
+	"scif4_clk_a",
+	"scif4_ctrl_a",
+	"scif4_data_b",
+	"scif4_clk_b",
+	"scif4_ctrl_b",
+	"scif4_data_c",
+	"scif4_clk_c",
+	"scif4_ctrl_c",
+};
+
+static const char * const scif5_groups[] = {
+	"scif5_data",
+	"scif5_clk",
+};
+
+static const char * const scif_clk_groups[] = {
+	"scif_clk_a",
+	"scif_clk_b",
+};
+
+static const char * const sdhi0_groups[] = {
+	"sdhi0_data1",
+	"sdhi0_data4",
+	"sdhi0_ctrl",
+	"sdhi0_cd",
+	"sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+	"sdhi1_data1",
+	"sdhi1_data4",
+	"sdhi1_ctrl",
+	"sdhi1_cd",
+	"sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+	"sdhi2_data1",
+	"sdhi2_data4",
+	"sdhi2_data8",
+	"sdhi2_ctrl",
+	"sdhi2_cd_a",
+	"sdhi2_wp_a",
+	"sdhi2_cd_b",
+	"sdhi2_wp_b",
+	"sdhi2_ds",
+};
+
+static const char * const sdhi3_groups[] = {
+	"sdhi3_data1",
+	"sdhi3_data4",
+	"sdhi3_data8",
+	"sdhi3_ctrl",
+	"sdhi3_cd",
+	"sdhi3_wp",
+	"sdhi3_ds",
+};
+
+static const char * const ssi_groups[] = {
+	"ssi0_data",
+	"ssi01239_ctrl",
+	"ssi1_data_a",
+	"ssi1_data_b",
+	"ssi1_ctrl_a",
+	"ssi1_ctrl_b",
+	"ssi2_data_a",
+	"ssi2_data_b",
+	"ssi2_ctrl_a",
+	"ssi2_ctrl_b",
+	"ssi3_data",
+	"ssi349_ctrl",
+	"ssi4_data",
+	"ssi4_ctrl",
+	"ssi5_data",
+	"ssi5_ctrl",
+	"ssi6_data",
+	"ssi6_ctrl",
+	"ssi7_data",
+	"ssi78_ctrl",
+	"ssi8_data",
+	"ssi9_data_a",
+	"ssi9_data_b",
+	"ssi9_ctrl_a",
+	"ssi9_ctrl_b",
+};
+
+static const char * const tmu_groups[] = {
+	"tmu_tclk1_a",
+	"tmu_tclk1_b",
+	"tmu_tclk2_a",
+	"tmu_tclk2_b",
+};
+
+static const char * const tpu_groups[] = {
+	"tpu_to0",
+	"tpu_to1",
+	"tpu_to2",
+	"tpu_to3",
+};
+
+static const char * const usb0_groups[] = {
+	"usb0",
+};
+
+static const char * const usb1_groups[] = {
+	"usb1",
+};
+
+static const char * const usb2_groups[] = {
+	"usb2",
+};
+
+static const char * const usb30_groups[] = {
+	"usb30",
+};
+
+static const char * const usb31_groups[] = {
+	"usb31",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+	SH_PFC_FUNCTION(audio_clk),
+	SH_PFC_FUNCTION(avb),
+	SH_PFC_FUNCTION(can0),
+	SH_PFC_FUNCTION(can1),
+	SH_PFC_FUNCTION(can_clk),
+	SH_PFC_FUNCTION(canfd0),
+	SH_PFC_FUNCTION(canfd1),
+	SH_PFC_FUNCTION(drif0),
+	SH_PFC_FUNCTION(drif1),
+	SH_PFC_FUNCTION(drif2),
+	SH_PFC_FUNCTION(drif3),
+	SH_PFC_FUNCTION(du),
+	SH_PFC_FUNCTION(hscif0),
+	SH_PFC_FUNCTION(hscif1),
+	SH_PFC_FUNCTION(hscif2),
+	SH_PFC_FUNCTION(hscif3),
+	SH_PFC_FUNCTION(hscif4),
+	SH_PFC_FUNCTION(i2c0),
+	SH_PFC_FUNCTION(i2c1),
+	SH_PFC_FUNCTION(i2c2),
+	SH_PFC_FUNCTION(i2c3),
+	SH_PFC_FUNCTION(i2c5),
+	SH_PFC_FUNCTION(i2c6),
+	SH_PFC_FUNCTION(intc_ex),
+	SH_PFC_FUNCTION(msiof0),
+	SH_PFC_FUNCTION(msiof1),
+	SH_PFC_FUNCTION(msiof2),
+	SH_PFC_FUNCTION(msiof3),
+	SH_PFC_FUNCTION(pwm0),
+	SH_PFC_FUNCTION(pwm1),
+	SH_PFC_FUNCTION(pwm2),
+	SH_PFC_FUNCTION(pwm3),
+	SH_PFC_FUNCTION(pwm4),
+	SH_PFC_FUNCTION(pwm5),
+	SH_PFC_FUNCTION(pwm6),
+	SH_PFC_FUNCTION(qspi0),
+	SH_PFC_FUNCTION(qspi1),
+	SH_PFC_FUNCTION(sata0),
+	SH_PFC_FUNCTION(scif0),
+	SH_PFC_FUNCTION(scif1),
+	SH_PFC_FUNCTION(scif2),
+	SH_PFC_FUNCTION(scif3),
+	SH_PFC_FUNCTION(scif4),
+	SH_PFC_FUNCTION(scif5),
+	SH_PFC_FUNCTION(scif_clk),
+	SH_PFC_FUNCTION(sdhi0),
+	SH_PFC_FUNCTION(sdhi1),
+	SH_PFC_FUNCTION(sdhi2),
+	SH_PFC_FUNCTION(sdhi3),
+	SH_PFC_FUNCTION(ssi),
+	SH_PFC_FUNCTION(tmu),
+	SH_PFC_FUNCTION(tpu),
+	SH_PFC_FUNCTION(usb0),
+	SH_PFC_FUNCTION(usb1),
+	SH_PFC_FUNCTION(usb2),
+	SH_PFC_FUNCTION(usb30),
+	SH_PFC_FUNCTION(usb31),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y)	FN_##y
+#define FM(x)		FN_##x
+	{ PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_0_15_FN,	GPSR0_15,
+		GP_0_14_FN,	GPSR0_14,
+		GP_0_13_FN,	GPSR0_13,
+		GP_0_12_FN,	GPSR0_12,
+		GP_0_11_FN,	GPSR0_11,
+		GP_0_10_FN,	GPSR0_10,
+		GP_0_9_FN,	GPSR0_9,
+		GP_0_8_FN,	GPSR0_8,
+		GP_0_7_FN,	GPSR0_7,
+		GP_0_6_FN,	GPSR0_6,
+		GP_0_5_FN,	GPSR0_5,
+		GP_0_4_FN,	GPSR0_4,
+		GP_0_3_FN,	GPSR0_3,
+		GP_0_2_FN,	GPSR0_2,
+		GP_0_1_FN,	GPSR0_1,
+		GP_0_0_FN,	GPSR0_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_1_27_FN,	GPSR1_27,
+		GP_1_26_FN,	GPSR1_26,
+		GP_1_25_FN,	GPSR1_25,
+		GP_1_24_FN,	GPSR1_24,
+		GP_1_23_FN,	GPSR1_23,
+		GP_1_22_FN,	GPSR1_22,
+		GP_1_21_FN,	GPSR1_21,
+		GP_1_20_FN,	GPSR1_20,
+		GP_1_19_FN,	GPSR1_19,
+		GP_1_18_FN,	GPSR1_18,
+		GP_1_17_FN,	GPSR1_17,
+		GP_1_16_FN,	GPSR1_16,
+		GP_1_15_FN,	GPSR1_15,
+		GP_1_14_FN,	GPSR1_14,
+		GP_1_13_FN,	GPSR1_13,
+		GP_1_12_FN,	GPSR1_12,
+		GP_1_11_FN,	GPSR1_11,
+		GP_1_10_FN,	GPSR1_10,
+		GP_1_9_FN,	GPSR1_9,
+		GP_1_8_FN,	GPSR1_8,
+		GP_1_7_FN,	GPSR1_7,
+		GP_1_6_FN,	GPSR1_6,
+		GP_1_5_FN,	GPSR1_5,
+		GP_1_4_FN,	GPSR1_4,
+		GP_1_3_FN,	GPSR1_3,
+		GP_1_2_FN,	GPSR1_2,
+		GP_1_1_FN,	GPSR1_1,
+		GP_1_0_FN,	GPSR1_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_2_14_FN,	GPSR2_14,
+		GP_2_13_FN,	GPSR2_13,
+		GP_2_12_FN,	GPSR2_12,
+		GP_2_11_FN,	GPSR2_11,
+		GP_2_10_FN,	GPSR2_10,
+		GP_2_9_FN,	GPSR2_9,
+		GP_2_8_FN,	GPSR2_8,
+		GP_2_7_FN,	GPSR2_7,
+		GP_2_6_FN,	GPSR2_6,
+		GP_2_5_FN,	GPSR2_5,
+		GP_2_4_FN,	GPSR2_4,
+		GP_2_3_FN,	GPSR2_3,
+		GP_2_2_FN,	GPSR2_2,
+		GP_2_1_FN,	GPSR2_1,
+		GP_2_0_FN,	GPSR2_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_3_15_FN,	GPSR3_15,
+		GP_3_14_FN,	GPSR3_14,
+		GP_3_13_FN,	GPSR3_13,
+		GP_3_12_FN,	GPSR3_12,
+		GP_3_11_FN,	GPSR3_11,
+		GP_3_10_FN,	GPSR3_10,
+		GP_3_9_FN,	GPSR3_9,
+		GP_3_8_FN,	GPSR3_8,
+		GP_3_7_FN,	GPSR3_7,
+		GP_3_6_FN,	GPSR3_6,
+		GP_3_5_FN,	GPSR3_5,
+		GP_3_4_FN,	GPSR3_4,
+		GP_3_3_FN,	GPSR3_3,
+		GP_3_2_FN,	GPSR3_2,
+		GP_3_1_FN,	GPSR3_1,
+		GP_3_0_FN,	GPSR3_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_4_17_FN,	GPSR4_17,
+		GP_4_16_FN,	GPSR4_16,
+		GP_4_15_FN,	GPSR4_15,
+		GP_4_14_FN,	GPSR4_14,
+		GP_4_13_FN,	GPSR4_13,
+		GP_4_12_FN,	GPSR4_12,
+		GP_4_11_FN,	GPSR4_11,
+		GP_4_10_FN,	GPSR4_10,
+		GP_4_9_FN,	GPSR4_9,
+		GP_4_8_FN,	GPSR4_8,
+		GP_4_7_FN,	GPSR4_7,
+		GP_4_6_FN,	GPSR4_6,
+		GP_4_5_FN,	GPSR4_5,
+		GP_4_4_FN,	GPSR4_4,
+		GP_4_3_FN,	GPSR4_3,
+		GP_4_2_FN,	GPSR4_2,
+		GP_4_1_FN,	GPSR4_1,
+		GP_4_0_FN,	GPSR4_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_5_25_FN,	GPSR5_25,
+		GP_5_24_FN,	GPSR5_24,
+		GP_5_23_FN,	GPSR5_23,
+		GP_5_22_FN,	GPSR5_22,
+		GP_5_21_FN,	GPSR5_21,
+		GP_5_20_FN,	GPSR5_20,
+		GP_5_19_FN,	GPSR5_19,
+		GP_5_18_FN,	GPSR5_18,
+		GP_5_17_FN,	GPSR5_17,
+		GP_5_16_FN,	GPSR5_16,
+		GP_5_15_FN,	GPSR5_15,
+		GP_5_14_FN,	GPSR5_14,
+		GP_5_13_FN,	GPSR5_13,
+		GP_5_12_FN,	GPSR5_12,
+		GP_5_11_FN,	GPSR5_11,
+		GP_5_10_FN,	GPSR5_10,
+		GP_5_9_FN,	GPSR5_9,
+		GP_5_8_FN,	GPSR5_8,
+		GP_5_7_FN,	GPSR5_7,
+		GP_5_6_FN,	GPSR5_6,
+		GP_5_5_FN,	GPSR5_5,
+		GP_5_4_FN,	GPSR5_4,
+		GP_5_3_FN,	GPSR5_3,
+		GP_5_2_FN,	GPSR5_2,
+		GP_5_1_FN,	GPSR5_1,
+		GP_5_0_FN,	GPSR5_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
+		GP_6_31_FN,	GPSR6_31,
+		GP_6_30_FN,	GPSR6_30,
+		GP_6_29_FN,	GPSR6_29,
+		GP_6_28_FN,	GPSR6_28,
+		GP_6_27_FN,	GPSR6_27,
+		GP_6_26_FN,	GPSR6_26,
+		GP_6_25_FN,	GPSR6_25,
+		GP_6_24_FN,	GPSR6_24,
+		GP_6_23_FN,	GPSR6_23,
+		GP_6_22_FN,	GPSR6_22,
+		GP_6_21_FN,	GPSR6_21,
+		GP_6_20_FN,	GPSR6_20,
+		GP_6_19_FN,	GPSR6_19,
+		GP_6_18_FN,	GPSR6_18,
+		GP_6_17_FN,	GPSR6_17,
+		GP_6_16_FN,	GPSR6_16,
+		GP_6_15_FN,	GPSR6_15,
+		GP_6_14_FN,	GPSR6_14,
+		GP_6_13_FN,	GPSR6_13,
+		GP_6_12_FN,	GPSR6_12,
+		GP_6_11_FN,	GPSR6_11,
+		GP_6_10_FN,	GPSR6_10,
+		GP_6_9_FN,	GPSR6_9,
+		GP_6_8_FN,	GPSR6_8,
+		GP_6_7_FN,	GPSR6_7,
+		GP_6_6_FN,	GPSR6_6,
+		GP_6_5_FN,	GPSR6_5,
+		GP_6_4_FN,	GPSR6_4,
+		GP_6_3_FN,	GPSR6_3,
+		GP_6_2_FN,	GPSR6_2,
+		GP_6_1_FN,	GPSR6_1,
+		GP_6_0_FN,	GPSR6_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_7_3_FN, GPSR7_3,
+		GP_7_2_FN, GPSR7_2,
+		GP_7_1_FN, GPSR7_1,
+		GP_7_0_FN, GPSR7_0, ))
+	},
+#undef F_
+#undef FM
+
+#define F_(x, y)	x,
+#define FM(x)		FN_##x,
+	{ PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
+		IP0_31_28
+		IP0_27_24
+		IP0_23_20
+		IP0_19_16
+		IP0_15_12
+		IP0_11_8
+		IP0_7_4
+		IP0_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
+		IP1_31_28
+		IP1_27_24
+		IP1_23_20
+		IP1_19_16
+		IP1_15_12
+		IP1_11_8
+		IP1_7_4
+		IP1_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
+		IP2_31_28
+		IP2_27_24
+		IP2_23_20
+		IP2_19_16
+		IP2_15_12
+		IP2_11_8
+		IP2_7_4
+		IP2_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
+		IP3_31_28
+		IP3_27_24
+		IP3_23_20
+		IP3_19_16
+		IP3_15_12
+		IP3_11_8
+		IP3_7_4
+		IP3_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
+		IP4_31_28
+		IP4_27_24
+		IP4_23_20
+		IP4_19_16
+		IP4_15_12
+		IP4_11_8
+		IP4_7_4
+		IP4_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
+		IP5_31_28
+		IP5_27_24
+		IP5_23_20
+		IP5_19_16
+		IP5_15_12
+		IP5_11_8
+		IP5_7_4
+		IP5_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
+		IP6_31_28
+		IP6_27_24
+		IP6_23_20
+		IP6_19_16
+		IP6_15_12
+		IP6_11_8
+		IP6_7_4
+		IP6_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
+		IP7_31_28
+		IP7_27_24
+		IP7_23_20
+		IP7_19_16
+		IP7_15_12
+		IP7_11_8
+		IP7_7_4
+		IP7_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
+		IP8_31_28
+		IP8_27_24
+		IP8_23_20
+		IP8_19_16
+		IP8_15_12
+		IP8_11_8
+		IP8_7_4
+		IP8_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
+		IP9_31_28
+		IP9_27_24
+		IP9_23_20
+		IP9_19_16
+		IP9_15_12
+		IP9_11_8
+		IP9_7_4
+		IP9_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
+		IP10_31_28
+		IP10_27_24
+		IP10_23_20
+		IP10_19_16
+		IP10_15_12
+		IP10_11_8
+		IP10_7_4
+		IP10_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
+		IP11_31_28
+		IP11_27_24
+		IP11_23_20
+		IP11_19_16
+		IP11_15_12
+		IP11_11_8
+		IP11_7_4
+		IP11_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
+		IP12_31_28
+		IP12_27_24
+		IP12_23_20
+		IP12_19_16
+		IP12_15_12
+		IP12_11_8
+		IP12_7_4
+		IP12_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
+		IP13_31_28
+		IP13_27_24
+		IP13_23_20
+		IP13_19_16
+		IP13_15_12
+		IP13_11_8
+		IP13_7_4
+		IP13_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
+		IP14_31_28
+		IP14_27_24
+		IP14_23_20
+		IP14_19_16
+		IP14_15_12
+		IP14_11_8
+		IP14_7_4
+		IP14_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
+		IP15_31_28
+		IP15_27_24
+		IP15_23_20
+		IP15_19_16
+		IP15_15_12
+		IP15_11_8
+		IP15_7_4
+		IP15_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4, GROUP(
+		IP16_31_28
+		IP16_27_24
+		IP16_23_20
+		IP16_19_16
+		IP16_15_12
+		IP16_11_8
+		IP16_7_4
+		IP16_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
+		/* IP17_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP17_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP17_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP17_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP17_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP17_11_8  */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		IP17_7_4
+		IP17_3_0 ))
+	},
+#undef F_
+#undef FM
+
+#define F_(x, y)	x,
+#define FM(x)		FN_##x,
+	{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
+			     GROUP(1, 2, 2, 3, 1, 1, 2, 1, 1, 1, 2, 1,
+				   1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1),
+			     GROUP(
+		0, 0, /* RESERVED 31 */
+		MOD_SEL0_30_29
+		MOD_SEL0_28_27
+		MOD_SEL0_26_25_24
+		MOD_SEL0_23
+		MOD_SEL0_22
+		MOD_SEL0_21_20
+		MOD_SEL0_19
+		MOD_SEL0_18
+		MOD_SEL0_17
+		MOD_SEL0_16_15
+		MOD_SEL0_14
+		MOD_SEL0_13
+		MOD_SEL0_12
+		MOD_SEL0_11
+		MOD_SEL0_10
+		MOD_SEL0_9
+		MOD_SEL0_8
+		MOD_SEL0_7_6
+		MOD_SEL0_5_4
+		MOD_SEL0_3
+		MOD_SEL0_2_1
+		0, 0, /* RESERVED 0 */ ))
+	},
+	{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
+			     GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
+				   1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+			     GROUP(
+		MOD_SEL1_31_30
+		MOD_SEL1_29_28_27
+		MOD_SEL1_26
+		MOD_SEL1_25_24
+		MOD_SEL1_23_22_21
+		MOD_SEL1_20
+		MOD_SEL1_19
+		MOD_SEL1_18_17
+		MOD_SEL1_16
+		MOD_SEL1_15_14
+		MOD_SEL1_13
+		MOD_SEL1_12
+		MOD_SEL1_11
+		MOD_SEL1_10
+		MOD_SEL1_9
+		0, 0, 0, 0, /* RESERVED 8, 7 */
+		MOD_SEL1_6
+		MOD_SEL1_5
+		MOD_SEL1_4
+		MOD_SEL1_3
+		MOD_SEL1_2
+		MOD_SEL1_1
+		MOD_SEL1_0 ))
+	},
+	{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
+			     GROUP(1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 1, 2, 1),
+			     GROUP(
+		MOD_SEL2_31
+		MOD_SEL2_30
+		MOD_SEL2_29
+		/* RESERVED 28 */
+		0, 0,
+		/* RESERVED 27, 26, 25, 24 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 23, 22, 21, 20 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 19, 18, 17, 16 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 15, 14, 13, 12 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 11, 10, 9, 8 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 7, 6, 5, 4 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 3 */
+		0, 0,
+		/* RESERVED 2, 1 */
+		0, 0, 0, 0,
+		MOD_SEL2_0 ))
+	},
+	{ },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+	{ PINMUX_DRIVE_REG("DRVCTRL0", 0xe6060300) {
+		{ PIN_QSPI0_SPCLK,    28, 2 },	/* QSPI0_SPCLK */
+		{ PIN_QSPI0_MOSI_IO0, 24, 2 },	/* QSPI0_MOSI_IO0 */
+		{ PIN_QSPI0_MISO_IO1, 20, 2 },	/* QSPI0_MISO_IO1 */
+		{ PIN_QSPI0_IO2,      16, 2 },	/* QSPI0_IO2 */
+		{ PIN_QSPI0_IO3,      12, 2 },	/* QSPI0_IO3 */
+		{ PIN_QSPI0_SSL,       8, 2 },	/* QSPI0_SSL */
+		{ PIN_QSPI1_SPCLK,     4, 2 },	/* QSPI1_SPCLK */
+		{ PIN_QSPI1_MOSI_IO0,  0, 2 },	/* QSPI1_MOSI_IO0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL1", 0xe6060304) {
+		{ PIN_QSPI1_MISO_IO1, 28, 2 },	/* QSPI1_MISO_IO1 */
+		{ PIN_QSPI1_IO2,      24, 2 },	/* QSPI1_IO2 */
+		{ PIN_QSPI1_IO3,      20, 2 },	/* QSPI1_IO3 */
+		{ PIN_QSPI1_SSL,      16, 2 },	/* QSPI1_SSL */
+		{ PIN_RPC_INT_N,      12, 2 },	/* RPC_INT# */
+		{ PIN_RPC_WP_N,        8, 2 },	/* RPC_WP# */
+		{ PIN_RPC_RESET_N,     4, 2 },	/* RPC_RESET# */
+		{ PIN_AVB_RX_CTL,      0, 3 },	/* AVB_RX_CTL */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL2", 0xe6060308) {
+		{ PIN_AVB_RXC,        28, 3 },	/* AVB_RXC */
+		{ PIN_AVB_RD0,        24, 3 },	/* AVB_RD0 */
+		{ PIN_AVB_RD1,        20, 3 },	/* AVB_RD1 */
+		{ PIN_AVB_RD2,        16, 3 },	/* AVB_RD2 */
+		{ PIN_AVB_RD3,        12, 3 },	/* AVB_RD3 */
+		{ PIN_AVB_TX_CTL,      8, 3 },	/* AVB_TX_CTL */
+		{ PIN_AVB_TXC,         4, 3 },	/* AVB_TXC */
+		{ PIN_AVB_TD0,         0, 3 },	/* AVB_TD0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) {
+		{ PIN_AVB_TD1,        28, 3 },	/* AVB_TD1 */
+		{ PIN_AVB_TD2,        24, 3 },	/* AVB_TD2 */
+		{ PIN_AVB_TD3,        20, 3 },	/* AVB_TD3 */
+		{ PIN_AVB_TXCREFCLK,  16, 3 },	/* AVB_TXCREFCLK */
+		{ PIN_AVB_MDIO,       12, 3 },	/* AVB_MDIO */
+		{ RCAR_GP_PIN(2,  9),  8, 3 },	/* AVB_MDC */
+		{ RCAR_GP_PIN(2, 10),  4, 3 },	/* AVB_MAGIC */
+		{ RCAR_GP_PIN(2, 11),  0, 3 },	/* AVB_PHY_INT */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) {
+		{ RCAR_GP_PIN(2, 12), 28, 3 },	/* AVB_LINK */
+		{ RCAR_GP_PIN(2, 13), 24, 3 },	/* AVB_AVTP_MATCH */
+		{ RCAR_GP_PIN(2, 14), 20, 3 },	/* AVB_AVTP_CAPTURE */
+		{ RCAR_GP_PIN(2,  0), 16, 3 },	/* IRQ0 */
+		{ RCAR_GP_PIN(2,  1), 12, 3 },	/* IRQ1 */
+		{ RCAR_GP_PIN(2,  2),  8, 3 },	/* IRQ2 */
+		{ RCAR_GP_PIN(2,  3),  4, 3 },	/* IRQ3 */
+		{ RCAR_GP_PIN(2,  4),  0, 3 },	/* IRQ4 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL5", 0xe6060314) {
+		{ RCAR_GP_PIN(2,  5), 28, 3 },	/* IRQ5 */
+		{ RCAR_GP_PIN(2,  6), 24, 3 },	/* PWM0 */
+		{ RCAR_GP_PIN(2,  7), 20, 3 },	/* PWM1 */
+		{ RCAR_GP_PIN(2,  8), 16, 3 },	/* PWM2 */
+		{ RCAR_GP_PIN(1,  0), 12, 3 },	/* A0 */
+		{ RCAR_GP_PIN(1,  1),  8, 3 },	/* A1 */
+		{ RCAR_GP_PIN(1,  2),  4, 3 },	/* A2 */
+		{ RCAR_GP_PIN(1,  3),  0, 3 },	/* A3 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL6", 0xe6060318) {
+		{ RCAR_GP_PIN(1,  4), 28, 3 },	/* A4 */
+		{ RCAR_GP_PIN(1,  5), 24, 3 },	/* A5 */
+		{ RCAR_GP_PIN(1,  6), 20, 3 },	/* A6 */
+		{ RCAR_GP_PIN(1,  7), 16, 3 },	/* A7 */
+		{ RCAR_GP_PIN(1,  8), 12, 3 },	/* A8 */
+		{ RCAR_GP_PIN(1,  9),  8, 3 },	/* A9 */
+		{ RCAR_GP_PIN(1, 10),  4, 3 },	/* A10 */
+		{ RCAR_GP_PIN(1, 11),  0, 3 },	/* A11 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL7", 0xe606031c) {
+		{ RCAR_GP_PIN(1, 12), 28, 3 },	/* A12 */
+		{ RCAR_GP_PIN(1, 13), 24, 3 },	/* A13 */
+		{ RCAR_GP_PIN(1, 14), 20, 3 },	/* A14 */
+		{ RCAR_GP_PIN(1, 15), 16, 3 },	/* A15 */
+		{ RCAR_GP_PIN(1, 16), 12, 3 },	/* A16 */
+		{ RCAR_GP_PIN(1, 17),  8, 3 },	/* A17 */
+		{ RCAR_GP_PIN(1, 18),  4, 3 },	/* A18 */
+		{ RCAR_GP_PIN(1, 19),  0, 3 },	/* A19 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
+		{ PIN_CLKOUT,         28, 3 },	/* CLKOUT */
+		{ RCAR_GP_PIN(1, 20), 24, 3 },	/* CS0 */
+		{ RCAR_GP_PIN(1, 21), 20, 3 },	/* CS1_A26 */
+		{ RCAR_GP_PIN(1, 22), 16, 3 },	/* BS */
+		{ RCAR_GP_PIN(1, 23), 12, 3 },	/* RD */
+		{ RCAR_GP_PIN(1, 24),  8, 3 },	/* RD_WR */
+		{ RCAR_GP_PIN(1, 25),  4, 3 },	/* WE0 */
+		{ RCAR_GP_PIN(1, 26),  0, 3 },	/* WE1 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
+		{ RCAR_GP_PIN(1, 27), 28, 3 },	/* EX_WAIT0 */
+		{ PIN_PRESETOUT_N,    24, 3 },	/* PRESETOUT# */
+		{ RCAR_GP_PIN(0,  0), 20, 3 },	/* D0 */
+		{ RCAR_GP_PIN(0,  1), 16, 3 },	/* D1 */
+		{ RCAR_GP_PIN(0,  2), 12, 3 },	/* D2 */
+		{ RCAR_GP_PIN(0,  3),  8, 3 },	/* D3 */
+		{ RCAR_GP_PIN(0,  4),  4, 3 },	/* D4 */
+		{ RCAR_GP_PIN(0,  5),  0, 3 },	/* D5 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) {
+		{ RCAR_GP_PIN(0,  6), 28, 3 },	/* D6 */
+		{ RCAR_GP_PIN(0,  7), 24, 3 },	/* D7 */
+		{ RCAR_GP_PIN(0,  8), 20, 3 },	/* D8 */
+		{ RCAR_GP_PIN(0,  9), 16, 3 },	/* D9 */
+		{ RCAR_GP_PIN(0, 10), 12, 3 },	/* D10 */
+		{ RCAR_GP_PIN(0, 11),  8, 3 },	/* D11 */
+		{ RCAR_GP_PIN(0, 12),  4, 3 },	/* D12 */
+		{ RCAR_GP_PIN(0, 13),  0, 3 },	/* D13 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) {
+		{ RCAR_GP_PIN(0, 14), 28, 3 },	/* D14 */
+		{ RCAR_GP_PIN(0, 15), 24, 3 },	/* D15 */
+		{ RCAR_GP_PIN(7,  0), 20, 3 },	/* AVS1 */
+		{ RCAR_GP_PIN(7,  1), 16, 3 },	/* AVS2 */
+		{ RCAR_GP_PIN(7,  2), 12, 3 },	/* GP7_02 */
+		{ RCAR_GP_PIN(7,  3),  8, 3 },	/* GP7_03 */
+		{ PIN_DU_DOTCLKIN0,    4, 2 },	/* DU_DOTCLKIN0 */
+		{ PIN_DU_DOTCLKIN1,    0, 2 },	/* DU_DOTCLKIN1 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
+		{ PIN_DU_DOTCLKIN2,   28, 2 },	/* DU_DOTCLKIN2 */
+		{ PIN_DU_DOTCLKIN3,   24, 2 },	/* DU_DOTCLKIN3 */
+		{ PIN_FSCLKST_N,      20, 2 },	/* FSCLKST# */
+		{ PIN_TMS,             4, 2 },	/* TMS */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) {
+		{ PIN_TDO,            28, 2 },	/* TDO */
+		{ PIN_ASEBRK,         24, 2 },	/* ASEBRK */
+		{ RCAR_GP_PIN(3,  0), 20, 3 },	/* SD0_CLK */
+		{ RCAR_GP_PIN(3,  1), 16, 3 },	/* SD0_CMD */
+		{ RCAR_GP_PIN(3,  2), 12, 3 },	/* SD0_DAT0 */
+		{ RCAR_GP_PIN(3,  3),  8, 3 },	/* SD0_DAT1 */
+		{ RCAR_GP_PIN(3,  4),  4, 3 },	/* SD0_DAT2 */
+		{ RCAR_GP_PIN(3,  5),  0, 3 },	/* SD0_DAT3 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) {
+		{ RCAR_GP_PIN(3,  6), 28, 3 },	/* SD1_CLK */
+		{ RCAR_GP_PIN(3,  7), 24, 3 },	/* SD1_CMD */
+		{ RCAR_GP_PIN(3,  8), 20, 3 },	/* SD1_DAT0 */
+		{ RCAR_GP_PIN(3,  9), 16, 3 },	/* SD1_DAT1 */
+		{ RCAR_GP_PIN(3, 10), 12, 3 },	/* SD1_DAT2 */
+		{ RCAR_GP_PIN(3, 11),  8, 3 },	/* SD1_DAT3 */
+		{ RCAR_GP_PIN(4,  0),  4, 3 },	/* SD2_CLK */
+		{ RCAR_GP_PIN(4,  1),  0, 3 },	/* SD2_CMD */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL15", 0xe606033c) {
+		{ RCAR_GP_PIN(4,  2), 28, 3 },	/* SD2_DAT0 */
+		{ RCAR_GP_PIN(4,  3), 24, 3 },	/* SD2_DAT1 */
+		{ RCAR_GP_PIN(4,  4), 20, 3 },	/* SD2_DAT2 */
+		{ RCAR_GP_PIN(4,  5), 16, 3 },	/* SD2_DAT3 */
+		{ RCAR_GP_PIN(4,  6), 12, 3 },	/* SD2_DS */
+		{ RCAR_GP_PIN(4,  7),  8, 3 },	/* SD3_CLK */
+		{ RCAR_GP_PIN(4,  8),  4, 3 },	/* SD3_CMD */
+		{ RCAR_GP_PIN(4,  9),  0, 3 },	/* SD3_DAT0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL16", 0xe6060340) {
+		{ RCAR_GP_PIN(4, 10), 28, 3 },	/* SD3_DAT1 */
+		{ RCAR_GP_PIN(4, 11), 24, 3 },	/* SD3_DAT2 */
+		{ RCAR_GP_PIN(4, 12), 20, 3 },	/* SD3_DAT3 */
+		{ RCAR_GP_PIN(4, 13), 16, 3 },	/* SD3_DAT4 */
+		{ RCAR_GP_PIN(4, 14), 12, 3 },	/* SD3_DAT5 */
+		{ RCAR_GP_PIN(4, 15),  8, 3 },	/* SD3_DAT6 */
+		{ RCAR_GP_PIN(4, 16),  4, 3 },	/* SD3_DAT7 */
+		{ RCAR_GP_PIN(4, 17),  0, 3 },	/* SD3_DS */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL17", 0xe6060344) {
+		{ RCAR_GP_PIN(3, 12), 28, 3 },	/* SD0_CD */
+		{ RCAR_GP_PIN(3, 13), 24, 3 },	/* SD0_WP */
+		{ RCAR_GP_PIN(3, 14), 20, 3 },	/* SD1_CD */
+		{ RCAR_GP_PIN(3, 15), 16, 3 },	/* SD1_WP */
+		{ RCAR_GP_PIN(5,  0), 12, 3 },	/* SCK0 */
+		{ RCAR_GP_PIN(5,  1),  8, 3 },	/* RX0 */
+		{ RCAR_GP_PIN(5,  2),  4, 3 },	/* TX0 */
+		{ RCAR_GP_PIN(5,  3),  0, 3 },	/* CTS0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL18", 0xe6060348) {
+		{ RCAR_GP_PIN(5,  4), 28, 3 },	/* RTS0 */
+		{ RCAR_GP_PIN(5,  5), 24, 3 },	/* RX1 */
+		{ RCAR_GP_PIN(5,  6), 20, 3 },	/* TX1 */
+		{ RCAR_GP_PIN(5,  7), 16, 3 },	/* CTS1 */
+		{ RCAR_GP_PIN(5,  8), 12, 3 },	/* RTS1 */
+		{ RCAR_GP_PIN(5,  9),  8, 3 },	/* SCK2 */
+		{ RCAR_GP_PIN(5, 10),  4, 3 },	/* TX2 */
+		{ RCAR_GP_PIN(5, 11),  0, 3 },	/* RX2 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL19", 0xe606034c) {
+		{ RCAR_GP_PIN(5, 12), 28, 3 },	/* HSCK0 */
+		{ RCAR_GP_PIN(5, 13), 24, 3 },	/* HRX0 */
+		{ RCAR_GP_PIN(5, 14), 20, 3 },	/* HTX0 */
+		{ RCAR_GP_PIN(5, 15), 16, 3 },	/* HCTS0 */
+		{ RCAR_GP_PIN(5, 16), 12, 3 },	/* HRTS0 */
+		{ RCAR_GP_PIN(5, 17),  8, 3 },	/* MSIOF0_SCK */
+		{ RCAR_GP_PIN(5, 18),  4, 3 },	/* MSIOF0_SYNC */
+		{ RCAR_GP_PIN(5, 19),  0, 3 },	/* MSIOF0_SS1 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL20", 0xe6060350) {
+		{ RCAR_GP_PIN(5, 20), 28, 3 },	/* MSIOF0_TXD */
+		{ RCAR_GP_PIN(5, 21), 24, 3 },	/* MSIOF0_SS2 */
+		{ RCAR_GP_PIN(5, 22), 20, 3 },	/* MSIOF0_RXD */
+		{ RCAR_GP_PIN(5, 23), 16, 3 },	/* MLB_CLK */
+		{ RCAR_GP_PIN(5, 24), 12, 3 },	/* MLB_SIG */
+		{ RCAR_GP_PIN(5, 25),  8, 3 },	/* MLB_DAT */
+		{ PIN_MLB_REF,         4, 3 },	/* MLB_REF */
+		{ RCAR_GP_PIN(6,  0),  0, 3 },	/* SSI_SCK01239 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) {
+		{ RCAR_GP_PIN(6,  1), 28, 3 },	/* SSI_WS01239 */
+		{ RCAR_GP_PIN(6,  2), 24, 3 },	/* SSI_SDATA0 */
+		{ RCAR_GP_PIN(6,  3), 20, 3 },	/* SSI_SDATA1 */
+		{ RCAR_GP_PIN(6,  4), 16, 3 },	/* SSI_SDATA2 */
+		{ RCAR_GP_PIN(6,  5), 12, 3 },	/* SSI_SCK349 */
+		{ RCAR_GP_PIN(6,  6),  8, 3 },	/* SSI_WS349 */
+		{ RCAR_GP_PIN(6,  7),  4, 3 },	/* SSI_SDATA3 */
+		{ RCAR_GP_PIN(6,  8),  0, 3 },	/* SSI_SCK4 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL22", 0xe6060358) {
+		{ RCAR_GP_PIN(6,  9), 28, 3 },	/* SSI_WS4 */
+		{ RCAR_GP_PIN(6, 10), 24, 3 },	/* SSI_SDATA4 */
+		{ RCAR_GP_PIN(6, 11), 20, 3 },	/* SSI_SCK5 */
+		{ RCAR_GP_PIN(6, 12), 16, 3 },	/* SSI_WS5 */
+		{ RCAR_GP_PIN(6, 13), 12, 3 },	/* SSI_SDATA5 */
+		{ RCAR_GP_PIN(6, 14),  8, 3 },	/* SSI_SCK6 */
+		{ RCAR_GP_PIN(6, 15),  4, 3 },	/* SSI_WS6 */
+		{ RCAR_GP_PIN(6, 16),  0, 3 },	/* SSI_SDATA6 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL23", 0xe606035c) {
+		{ RCAR_GP_PIN(6, 17), 28, 3 },	/* SSI_SCK78 */
+		{ RCAR_GP_PIN(6, 18), 24, 3 },	/* SSI_WS78 */
+		{ RCAR_GP_PIN(6, 19), 20, 3 },	/* SSI_SDATA7 */
+		{ RCAR_GP_PIN(6, 20), 16, 3 },	/* SSI_SDATA8 */
+		{ RCAR_GP_PIN(6, 21), 12, 3 },	/* SSI_SDATA9 */
+		{ RCAR_GP_PIN(6, 22),  8, 3 },	/* AUDIO_CLKA */
+		{ RCAR_GP_PIN(6, 23),  4, 3 },	/* AUDIO_CLKB */
+		{ RCAR_GP_PIN(6, 24),  0, 3 },	/* USB0_PWEN */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL24", 0xe6060360) {
+		{ RCAR_GP_PIN(6, 25), 28, 3 },	/* USB0_OVC */
+		{ RCAR_GP_PIN(6, 26), 24, 3 },	/* USB1_PWEN */
+		{ RCAR_GP_PIN(6, 27), 20, 3 },	/* USB1_OVC */
+		{ RCAR_GP_PIN(6, 28), 16, 3 },	/* USB30_PWEN */
+		{ RCAR_GP_PIN(6, 29), 12, 3 },	/* USB30_OVC */
+		{ RCAR_GP_PIN(6, 30),  8, 3 },	/* USB31_PWEN */
+		{ RCAR_GP_PIN(6, 31),  4, 3 },	/* USB31_OVC */
+	} },
+	{ },
+};
+
+enum ioctrl_regs {
+	POCCTRL,
+	TDSELCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+	[POCCTRL] = { 0xe6060380, },
+	[TDSELCTRL] = { 0xe60603c0, },
+	{ /* sentinel */ },
+};
+
+static int r8a77950_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
+				   u32 *pocctrl)
+{
+	int bit = -EINVAL;
+
+	*pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
+
+	if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
+		bit = pin & 0x1f;
+
+	if (pin >= RCAR_GP_PIN(4, 0) && pin <= RCAR_GP_PIN(4, 17))
+		bit = (pin & 0x1f) + 12;
+
+	return bit;
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+	{ PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+		[ 0] = PIN_QSPI0_SPCLK,		/* QSPI0_SPCLK */
+		[ 1] = PIN_QSPI0_MOSI_IO0,	/* QSPI0_MOSI_IO0 */
+		[ 2] = PIN_QSPI0_MISO_IO1,	/* QSPI0_MISO_IO1 */
+		[ 3] = PIN_QSPI0_IO2,		/* QSPI0_IO2 */
+		[ 4] = PIN_QSPI0_IO3,		/* QSPI0_IO3 */
+		[ 5] = PIN_QSPI0_SSL,		/* QSPI0_SSL */
+		[ 6] = PIN_QSPI1_SPCLK,		/* QSPI1_SPCLK */
+		[ 7] = PIN_QSPI1_MOSI_IO0,	/* QSPI1_MOSI_IO0 */
+		[ 8] = PIN_QSPI1_MISO_IO1,	/* QSPI1_MISO_IO1 */
+		[ 9] = PIN_QSPI1_IO2,		/* QSPI1_IO2 */
+		[10] = PIN_QSPI1_IO3,		/* QSPI1_IO3 */
+		[11] = PIN_QSPI1_SSL,		/* QSPI1_SSL */
+		[12] = PIN_RPC_INT_N,		/* RPC_INT# */
+		[13] = PIN_RPC_WP_N,		/* RPC_WP# */
+		[14] = PIN_RPC_RESET_N,		/* RPC_RESET# */
+		[15] = PIN_AVB_RX_CTL,		/* AVB_RX_CTL */
+		[16] = PIN_AVB_RXC,		/* AVB_RXC */
+		[17] = PIN_AVB_RD0,		/* AVB_RD0 */
+		[18] = PIN_AVB_RD1,		/* AVB_RD1 */
+		[19] = PIN_AVB_RD2,		/* AVB_RD2 */
+		[20] = PIN_AVB_RD3,		/* AVB_RD3 */
+		[21] = PIN_AVB_TX_CTL,		/* AVB_TX_CTL */
+		[22] = PIN_AVB_TXC,		/* AVB_TXC */
+		[23] = PIN_AVB_TD0,		/* AVB_TD0 */
+		[24] = PIN_AVB_TD1,		/* AVB_TD1 */
+		[25] = PIN_AVB_TD2,		/* AVB_TD2 */
+		[26] = PIN_AVB_TD3,		/* AVB_TD3 */
+		[27] = PIN_AVB_TXCREFCLK,	/* AVB_TXCREFCLK */
+		[28] = PIN_AVB_MDIO,		/* AVB_MDIO */
+		[29] = RCAR_GP_PIN(2,  9),	/* AVB_MDC */
+		[30] = RCAR_GP_PIN(2, 10),	/* AVB_MAGIC */
+		[31] = RCAR_GP_PIN(2, 11),	/* AVB_PHY_INT */
+	} },
+	{ PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+		[ 0] = RCAR_GP_PIN(2, 12),	/* AVB_LINK */
+		[ 1] = RCAR_GP_PIN(2, 13),	/* AVB_AVTP_MATCH_A */
+		[ 2] = RCAR_GP_PIN(2, 14),	/* AVB_AVTP_CAPTURE_A */
+		[ 3] = RCAR_GP_PIN(2,  0),	/* IRQ0 */
+		[ 4] = RCAR_GP_PIN(2,  1),	/* IRQ1 */
+		[ 5] = RCAR_GP_PIN(2,  2),	/* IRQ2 */
+		[ 6] = RCAR_GP_PIN(2,  3),	/* IRQ3 */
+		[ 7] = RCAR_GP_PIN(2,  4),	/* IRQ4 */
+		[ 8] = RCAR_GP_PIN(2,  5),	/* IRQ5 */
+		[ 9] = RCAR_GP_PIN(2,  6),	/* PWM0 */
+		[10] = RCAR_GP_PIN(2,  7),	/* PWM1_A */
+		[11] = RCAR_GP_PIN(2,  8),	/* PWM2_A */
+		[12] = RCAR_GP_PIN(1,  0),	/* A0 */
+		[13] = RCAR_GP_PIN(1,  1),	/* A1 */
+		[14] = RCAR_GP_PIN(1,  2),	/* A2 */
+		[15] = RCAR_GP_PIN(1,  3),	/* A3 */
+		[16] = RCAR_GP_PIN(1,  4),	/* A4 */
+		[17] = RCAR_GP_PIN(1,  5),	/* A5 */
+		[18] = RCAR_GP_PIN(1,  6),	/* A6 */
+		[19] = RCAR_GP_PIN(1,  7),	/* A7 */
+		[20] = RCAR_GP_PIN(1,  8),	/* A8 */
+		[21] = RCAR_GP_PIN(1,  9),	/* A9 */
+		[22] = RCAR_GP_PIN(1, 10),	/* A10 */
+		[23] = RCAR_GP_PIN(1, 11),	/* A11 */
+		[24] = RCAR_GP_PIN(1, 12),	/* A12 */
+		[25] = RCAR_GP_PIN(1, 13),	/* A13 */
+		[26] = RCAR_GP_PIN(1, 14),	/* A14 */
+		[27] = RCAR_GP_PIN(1, 15),	/* A15 */
+		[28] = RCAR_GP_PIN(1, 16),	/* A16 */
+		[29] = RCAR_GP_PIN(1, 17),	/* A17 */
+		[30] = RCAR_GP_PIN(1, 18),	/* A18 */
+		[31] = RCAR_GP_PIN(1, 19),	/* A19 */
+	} },
+	{ PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+		[ 0] = PIN_CLKOUT,		/* CLKOUT */
+		[ 1] = RCAR_GP_PIN(1, 20),	/* CS0_N */
+		[ 2] = RCAR_GP_PIN(1, 21),	/* CS1_N_A26 */
+		[ 3] = RCAR_GP_PIN(1, 22),	/* BS_N */
+		[ 4] = RCAR_GP_PIN(1, 23),	/* RD_N */
+		[ 5] = RCAR_GP_PIN(1, 24),	/* RD_WR_N */
+		[ 6] = RCAR_GP_PIN(1, 25),	/* WE0_N */
+		[ 7] = RCAR_GP_PIN(1, 26),	/* WE1_N */
+		[ 8] = RCAR_GP_PIN(1, 27),	/* EX_WAIT0_A */
+		[ 9] = PIN_PRESETOUT_N,		/* PRESETOUT# */
+		[10] = RCAR_GP_PIN(0,  0),	/* D0 */
+		[11] = RCAR_GP_PIN(0,  1),	/* D1 */
+		[12] = RCAR_GP_PIN(0,  2),	/* D2 */
+		[13] = RCAR_GP_PIN(0,  3),	/* D3 */
+		[14] = RCAR_GP_PIN(0,  4),	/* D4 */
+		[15] = RCAR_GP_PIN(0,  5),	/* D5 */
+		[16] = RCAR_GP_PIN(0,  6),	/* D6 */
+		[17] = RCAR_GP_PIN(0,  7),	/* D7 */
+		[18] = RCAR_GP_PIN(0,  8),	/* D8 */
+		[19] = RCAR_GP_PIN(0,  9),	/* D9 */
+		[20] = RCAR_GP_PIN(0, 10),	/* D10 */
+		[21] = RCAR_GP_PIN(0, 11),	/* D11 */
+		[22] = RCAR_GP_PIN(0, 12),	/* D12 */
+		[23] = RCAR_GP_PIN(0, 13),	/* D13 */
+		[24] = RCAR_GP_PIN(0, 14),	/* D14 */
+		[25] = RCAR_GP_PIN(0, 15),	/* D15 */
+		[26] = RCAR_GP_PIN(7,  0),	/* AVS1 */
+		[27] = RCAR_GP_PIN(7,  1),	/* AVS2 */
+		[28] = RCAR_GP_PIN(7,  2),	/* GP7_02 */
+		[29] = RCAR_GP_PIN(7,  3),	/* GP7_03 */
+		[30] = PIN_DU_DOTCLKIN0,	/* DU_DOTCLKIN0 */
+		[31] = PIN_DU_DOTCLKIN1,	/* DU_DOTCLKIN1 */
+	} },
+	{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+		[ 0] = PIN_DU_DOTCLKIN2,	/* DU_DOTCLKIN2 */
+		[ 1] = PIN_DU_DOTCLKIN3,	/* DU_DOTCLKIN3 */
+		[ 2] = PIN_FSCLKST_N,		/* FSCLKST# */
+		[ 3] = PIN_EXTALR,		/* EXTALR*/
+		[ 4] = PIN_TRST_N,		/* TRST# */
+		[ 5] = PIN_TCK,			/* TCK */
+		[ 6] = PIN_TMS,			/* TMS */
+		[ 7] = PIN_TDI,			/* TDI */
+		[ 8] = SH_PFC_PIN_NONE,
+		[ 9] = PIN_ASEBRK,		/* ASEBRK */
+		[10] = RCAR_GP_PIN(3,  0),	/* SD0_CLK */
+		[11] = RCAR_GP_PIN(3,  1),	/* SD0_CMD */
+		[12] = RCAR_GP_PIN(3,  2),	/* SD0_DAT0 */
+		[13] = RCAR_GP_PIN(3,  3),	/* SD0_DAT1 */
+		[14] = RCAR_GP_PIN(3,  4),	/* SD0_DAT2 */
+		[15] = RCAR_GP_PIN(3,  5),	/* SD0_DAT3 */
+		[16] = RCAR_GP_PIN(3,  6),	/* SD1_CLK */
+		[17] = RCAR_GP_PIN(3,  7),	/* SD1_CMD */
+		[18] = RCAR_GP_PIN(3,  8),	/* SD1_DAT0 */
+		[19] = RCAR_GP_PIN(3,  9),	/* SD1_DAT1 */
+		[20] = RCAR_GP_PIN(3, 10),	/* SD1_DAT2 */
+		[21] = RCAR_GP_PIN(3, 11),	/* SD1_DAT3 */
+		[22] = RCAR_GP_PIN(4,  0),	/* SD2_CLK */
+		[23] = RCAR_GP_PIN(4,  1),	/* SD2_CMD */
+		[24] = RCAR_GP_PIN(4,  2),	/* SD2_DAT0 */
+		[25] = RCAR_GP_PIN(4,  3),	/* SD2_DAT1 */
+		[26] = RCAR_GP_PIN(4,  4),	/* SD2_DAT2 */
+		[27] = RCAR_GP_PIN(4,  5),	/* SD2_DAT3 */
+		[28] = RCAR_GP_PIN(4,  6),	/* SD2_DS */
+		[29] = RCAR_GP_PIN(4,  7),	/* SD3_CLK */
+		[30] = RCAR_GP_PIN(4,  8),	/* SD3_CMD */
+		[31] = RCAR_GP_PIN(4,  9),	/* SD3_DAT0 */
+	} },
+	{ PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+		[ 0] = RCAR_GP_PIN(4, 10),	/* SD3_DAT1 */
+		[ 1] = RCAR_GP_PIN(4, 11),	/* SD3_DAT2 */
+		[ 2] = RCAR_GP_PIN(4, 12),	/* SD3_DAT3 */
+		[ 3] = RCAR_GP_PIN(4, 13),	/* SD3_DAT4 */
+		[ 4] = RCAR_GP_PIN(4, 14),	/* SD3_DAT5 */
+		[ 5] = RCAR_GP_PIN(4, 15),	/* SD3_DAT6 */
+		[ 6] = RCAR_GP_PIN(4, 16),	/* SD3_DAT7 */
+		[ 7] = RCAR_GP_PIN(4, 17),	/* SD3_DS */
+		[ 8] = RCAR_GP_PIN(3, 12),	/* SD0_CD */
+		[ 9] = RCAR_GP_PIN(3, 13),	/* SD0_WP */
+		[10] = RCAR_GP_PIN(3, 14),	/* SD1_CD */
+		[11] = RCAR_GP_PIN(3, 15),	/* SD1_WP */
+		[12] = RCAR_GP_PIN(5,  0),	/* SCK0 */
+		[13] = RCAR_GP_PIN(5,  1),	/* RX0 */
+		[14] = RCAR_GP_PIN(5,  2),	/* TX0 */
+		[15] = RCAR_GP_PIN(5,  3),	/* CTS0_N */
+		[16] = RCAR_GP_PIN(5,  4),	/* RTS0_N */
+		[17] = RCAR_GP_PIN(5,  5),	/* RX1_A */
+		[18] = RCAR_GP_PIN(5,  6),	/* TX1_A */
+		[19] = RCAR_GP_PIN(5,  7),	/* CTS1_N */
+		[20] = RCAR_GP_PIN(5,  8),	/* RTS1_N */
+		[21] = RCAR_GP_PIN(5,  9),	/* SCK2 */
+		[22] = RCAR_GP_PIN(5, 10),	/* TX2_A */
+		[23] = RCAR_GP_PIN(5, 11),	/* RX2_A */
+		[24] = RCAR_GP_PIN(5, 12),	/* HSCK0 */
+		[25] = RCAR_GP_PIN(5, 13),	/* HRX0 */
+		[26] = RCAR_GP_PIN(5, 14),	/* HTX0 */
+		[27] = RCAR_GP_PIN(5, 15),	/* HCTS0_N */
+		[28] = RCAR_GP_PIN(5, 16),	/* HRTS0_N */
+		[29] = RCAR_GP_PIN(5, 17),	/* MSIOF0_SCK */
+		[30] = RCAR_GP_PIN(5, 18),	/* MSIOF0_SYNC */
+		[31] = RCAR_GP_PIN(5, 19),	/* MSIOF0_SS1 */
+	} },
+	{ PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+		[ 0] = RCAR_GP_PIN(5, 20),	/* MSIOF0_TXD */
+		[ 1] = RCAR_GP_PIN(5, 21),	/* MSIOF0_SS2 */
+		[ 2] = RCAR_GP_PIN(5, 22),	/* MSIOF0_RXD */
+		[ 3] = RCAR_GP_PIN(5, 23),	/* MLB_CLK */
+		[ 4] = RCAR_GP_PIN(5, 24),	/* MLB_SIG */
+		[ 5] = RCAR_GP_PIN(5, 25),	/* MLB_DAT */
+		[ 6] = PIN_MLB_REF,		/* MLB_REF */
+		[ 7] = RCAR_GP_PIN(6,  0),	/* SSI_SCK01239 */
+		[ 8] = RCAR_GP_PIN(6,  1),	/* SSI_WS01239 */
+		[ 9] = RCAR_GP_PIN(6,  2),	/* SSI_SDATA0 */
+		[10] = RCAR_GP_PIN(6,  3),	/* SSI_SDATA1_A */
+		[11] = RCAR_GP_PIN(6,  4),	/* SSI_SDATA2_A */
+		[12] = RCAR_GP_PIN(6,  5),	/* SSI_SCK349 */
+		[13] = RCAR_GP_PIN(6,  6),	/* SSI_WS349 */
+		[14] = RCAR_GP_PIN(6,  7),	/* SSI_SDATA3 */
+		[15] = RCAR_GP_PIN(6,  8),	/* SSI_SCK4 */
+		[16] = RCAR_GP_PIN(6,  9),	/* SSI_WS4 */
+		[17] = RCAR_GP_PIN(6, 10),	/* SSI_SDATA4 */
+		[18] = RCAR_GP_PIN(6, 11),	/* SSI_SCK5 */
+		[19] = RCAR_GP_PIN(6, 12),	/* SSI_WS5 */
+		[20] = RCAR_GP_PIN(6, 13),	/* SSI_SDATA5 */
+		[21] = RCAR_GP_PIN(6, 14),	/* SSI_SCK6 */
+		[22] = RCAR_GP_PIN(6, 15),	/* SSI_WS6 */
+		[23] = RCAR_GP_PIN(6, 16),	/* SSI_SDATA6 */
+		[24] = RCAR_GP_PIN(6, 17),	/* SSI_SCK78 */
+		[25] = RCAR_GP_PIN(6, 18),	/* SSI_WS78 */
+		[26] = RCAR_GP_PIN(6, 19),	/* SSI_SDATA7 */
+		[27] = RCAR_GP_PIN(6, 20),	/* SSI_SDATA8 */
+		[28] = RCAR_GP_PIN(6, 21),	/* SSI_SDATA9_A */
+		[29] = RCAR_GP_PIN(6, 22),	/* AUDIO_CLKA_A */
+		[30] = RCAR_GP_PIN(6, 23),	/* AUDIO_CLKB_B */
+		[31] = RCAR_GP_PIN(6, 24),	/* USB0_PWEN */
+	} },
+	{ PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+		[ 0] = RCAR_GP_PIN(6, 25),	/* USB0_OVC */
+		[ 1] = RCAR_GP_PIN(6, 26),	/* USB1_PWEN */
+		[ 2] = RCAR_GP_PIN(6, 27),	/* USB1_OVC */
+		[ 3] = RCAR_GP_PIN(6, 28),	/* USB30_PWEN */
+		[ 4] = RCAR_GP_PIN(6, 29),	/* USB30_OVC */
+		[ 5] = RCAR_GP_PIN(6, 30),	/* USB31_PWEN */
+		[ 6] = RCAR_GP_PIN(6, 31),	/* USB31_OVC */
+		[ 7] = SH_PFC_PIN_NONE,
+		[ 8] = SH_PFC_PIN_NONE,
+		[ 9] = SH_PFC_PIN_NONE,
+		[10] = SH_PFC_PIN_NONE,
+		[11] = SH_PFC_PIN_NONE,
+		[12] = SH_PFC_PIN_NONE,
+		[13] = SH_PFC_PIN_NONE,
+		[14] = SH_PFC_PIN_NONE,
+		[15] = SH_PFC_PIN_NONE,
+		[16] = SH_PFC_PIN_NONE,
+		[17] = SH_PFC_PIN_NONE,
+		[18] = SH_PFC_PIN_NONE,
+		[19] = SH_PFC_PIN_NONE,
+		[20] = SH_PFC_PIN_NONE,
+		[21] = SH_PFC_PIN_NONE,
+		[22] = SH_PFC_PIN_NONE,
+		[23] = SH_PFC_PIN_NONE,
+		[24] = SH_PFC_PIN_NONE,
+		[25] = SH_PFC_PIN_NONE,
+		[26] = SH_PFC_PIN_NONE,
+		[27] = SH_PFC_PIN_NONE,
+		[28] = SH_PFC_PIN_NONE,
+		[29] = SH_PFC_PIN_NONE,
+		[30] = SH_PFC_PIN_NONE,
+		[31] = SH_PFC_PIN_NONE,
+	} },
+	{ /* sentinel */ },
+};
+
+static unsigned int r8a77950_pinmux_get_bias(struct sh_pfc *pfc,
+					     unsigned int pin)
+{
+	const struct pinmux_bias_reg *reg;
+	unsigned int bit;
+
+	reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+	if (!reg)
+		return PIN_CONFIG_BIAS_DISABLE;
+
+	if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
+		return PIN_CONFIG_BIAS_DISABLE;
+	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
+		return PIN_CONFIG_BIAS_PULL_UP;
+	else
+		return PIN_CONFIG_BIAS_PULL_DOWN;
+}
+
+static void r8a77950_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+				     unsigned int bias)
+{
+	const struct pinmux_bias_reg *reg;
+	u32 enable, updown;
+	unsigned int bit;
+
+	reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+	if (!reg)
+		return;
+
+	enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
+	if (bias != PIN_CONFIG_BIAS_DISABLE)
+		enable |= BIT(bit);
+
+	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+	if (bias == PIN_CONFIG_BIAS_PULL_UP)
+		updown |= BIT(bit);
+
+	sh_pfc_write(pfc, reg->pud, updown);
+	sh_pfc_write(pfc, reg->puen, enable);
+}
+
+static const struct sh_pfc_soc_operations r8a77950_pinmux_ops = {
+	.pin_to_pocctrl = r8a77950_pin_to_pocctrl,
+	.get_bias = r8a77950_pinmux_get_bias,
+	.set_bias = r8a77950_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a77950_pinmux_info = {
+	.name = "r8a77950_pfc",
+	.ops = &r8a77950_pinmux_ops,
+	.unlock_reg = 0xe6060000, /* PMMR */
+
+	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+	.pins = pinmux_pins,
+	.nr_pins = ARRAY_SIZE(pinmux_pins),
+	.groups = pinmux_groups,
+	.nr_groups = ARRAY_SIZE(pinmux_groups),
+	.functions = pinmux_functions,
+	.nr_functions = ARRAY_SIZE(pinmux_functions),
+
+	.cfg_regs = pinmux_config_regs,
+	.drive_regs = pinmux_drive_regs,
+	.bias_regs = pinmux_bias_regs,
+	.ioctrl_regs = pinmux_ioctrl_regs,
+
+	.pinmux_data = pinmux_data,
+	.pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77951.c b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
new file mode 100644
index 0000000..256fab4
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
@@ -0,0 +1,6244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A77951 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2015-2019 Renesas Electronics Corporation
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sys_soc.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx)						\
+	PORT_GP_CFG_16(0, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_29(1, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_15(2, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE),	\
+	PORT_GP_CFG_1(3, 12, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_18(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE),	\
+	PORT_GP_CFG_26(5, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_32(6, fn, sfx, CFG_FLAGS),	\
+	PORT_GP_CFG_4(7, fn, sfx, CFG_FLAGS)
+
+#define CPU_ALL_NOGP(fn)						\
+	PIN_NOGP_CFG(ASEBRK, "ASEBRK", fn, CFG_FLAGS),			\
+	PIN_NOGP_CFG(AVB_MDIO, "AVB_MDIO", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RD0, "AVB_RD0", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RD1, "AVB_RD1", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RD2, "AVB_RD2", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RD3, "AVB_RD3", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RXC, "AVB_RXC", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_RX_CTL, "AVB_RX_CTL", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TD0, "AVB_TD0", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TD1, "AVB_TD1", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TD2, "AVB_TD2", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TD3, "AVB_TD3", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TXC, "AVB_TXC", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(AVB_TXCREFCLK, "AVB_TXCREFCLK", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(AVB_TX_CTL, "AVB_TX_CTL", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(DU_DOTCLKIN0, "DU_DOTCLKIN0", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(DU_DOTCLKIN1, "DU_DOTCLKIN1", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(DU_DOTCLKIN2, "DU_DOTCLKIN2", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(DU_DOTCLKIN3, "DU_DOTCLKIN3", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(EXTALR, "EXTALR", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),\
+	PIN_NOGP_CFG(FSCLKST_N, "FSCLKST#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(MLB_REF, "MLB_REF", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI0_IO2, "QSPI0_IO2", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI0_IO3, "QSPI0_IO3", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI0_MISO_IO1, "QSPI0_MISO_IO1", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI0_MOSI_IO0, "QSPI0_MOSI_IO0", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI0_SPCLK, "QSPI0_SPCLK", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI0_SSL, "QSPI0_SSL", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI1_IO2, "QSPI1_IO2", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI1_IO3, "QSPI1_IO3", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(QSPI1_MISO_IO1, "QSPI1_MISO_IO1", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI1_MOSI_IO0, "QSPI1_MOSI_IO0", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI1_SPCLK, "QSPI1_SPCLK", fn, CFG_FLAGS),	\
+	PIN_NOGP_CFG(QSPI1_SSL, "QSPI1_SSL", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(RPC_INT_N, "RPC_INT#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(RPC_RESET_N, "RPC_RESET#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(RPC_WP_N, "RPC_WP#", fn, CFG_FLAGS),		\
+	PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),	\
+	PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN),	\
+	PIN_NOGP_CFG(TDO, "TDO", fn, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PIN_NOGP_CFG(TMS, "TMS", fn, CFG_FLAGS),			\
+	PIN_NOGP_CFG(TRST_N, "TRST#", fn, SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_15	F_(D15,			IP7_11_8)
+#define GPSR0_14	F_(D14,			IP7_7_4)
+#define GPSR0_13	F_(D13,			IP7_3_0)
+#define GPSR0_12	F_(D12,			IP6_31_28)
+#define GPSR0_11	F_(D11,			IP6_27_24)
+#define GPSR0_10	F_(D10,			IP6_23_20)
+#define GPSR0_9		F_(D9,			IP6_19_16)
+#define GPSR0_8		F_(D8,			IP6_15_12)
+#define GPSR0_7		F_(D7,			IP6_11_8)
+#define GPSR0_6		F_(D6,			IP6_7_4)
+#define GPSR0_5		F_(D5,			IP6_3_0)
+#define GPSR0_4		F_(D4,			IP5_31_28)
+#define GPSR0_3		F_(D3,			IP5_27_24)
+#define GPSR0_2		F_(D2,			IP5_23_20)
+#define GPSR0_1		F_(D1,			IP5_19_16)
+#define GPSR0_0		F_(D0,			IP5_15_12)
+
+/* GPSR1 */
+#define GPSR1_28	FM(CLKOUT)
+#define GPSR1_27	F_(EX_WAIT0_A,		IP5_11_8)
+#define GPSR1_26	F_(WE1_N,		IP5_7_4)
+#define GPSR1_25	F_(WE0_N,		IP5_3_0)
+#define GPSR1_24	F_(RD_WR_N,		IP4_31_28)
+#define GPSR1_23	F_(RD_N,		IP4_27_24)
+#define GPSR1_22	F_(BS_N,		IP4_23_20)
+#define GPSR1_21	F_(CS1_N,		IP4_19_16)
+#define GPSR1_20	F_(CS0_N,		IP4_15_12)
+#define GPSR1_19	F_(A19,			IP4_11_8)
+#define GPSR1_18	F_(A18,			IP4_7_4)
+#define GPSR1_17	F_(A17,			IP4_3_0)
+#define GPSR1_16	F_(A16,			IP3_31_28)
+#define GPSR1_15	F_(A15,			IP3_27_24)
+#define GPSR1_14	F_(A14,			IP3_23_20)
+#define GPSR1_13	F_(A13,			IP3_19_16)
+#define GPSR1_12	F_(A12,			IP3_15_12)
+#define GPSR1_11	F_(A11,			IP3_11_8)
+#define GPSR1_10	F_(A10,			IP3_7_4)
+#define GPSR1_9		F_(A9,			IP3_3_0)
+#define GPSR1_8		F_(A8,			IP2_31_28)
+#define GPSR1_7		F_(A7,			IP2_27_24)
+#define GPSR1_6		F_(A6,			IP2_23_20)
+#define GPSR1_5		F_(A5,			IP2_19_16)
+#define GPSR1_4		F_(A4,			IP2_15_12)
+#define GPSR1_3		F_(A3,			IP2_11_8)
+#define GPSR1_2		F_(A2,			IP2_7_4)
+#define GPSR1_1		F_(A1,			IP2_3_0)
+#define GPSR1_0		F_(A0,			IP1_31_28)
+
+/* GPSR2 */
+#define GPSR2_14	F_(AVB_AVTP_CAPTURE_A,	IP0_23_20)
+#define GPSR2_13	F_(AVB_AVTP_MATCH_A,	IP0_19_16)
+#define GPSR2_12	F_(AVB_LINK,		IP0_15_12)
+#define GPSR2_11	F_(AVB_PHY_INT,		IP0_11_8)
+#define GPSR2_10	F_(AVB_MAGIC,		IP0_7_4)
+#define GPSR2_9		F_(AVB_MDC,		IP0_3_0)
+#define GPSR2_8		F_(PWM2_A,		IP1_27_24)
+#define GPSR2_7		F_(PWM1_A,		IP1_23_20)
+#define GPSR2_6		F_(PWM0,		IP1_19_16)
+#define GPSR2_5		F_(IRQ5,		IP1_15_12)
+#define GPSR2_4		F_(IRQ4,		IP1_11_8)
+#define GPSR2_3		F_(IRQ3,		IP1_7_4)
+#define GPSR2_2		F_(IRQ2,		IP1_3_0)
+#define GPSR2_1		F_(IRQ1,		IP0_31_28)
+#define GPSR2_0		F_(IRQ0,		IP0_27_24)
+
+/* GPSR3 */
+#define GPSR3_15	F_(SD1_WP,		IP11_23_20)
+#define GPSR3_14	F_(SD1_CD,		IP11_19_16)
+#define GPSR3_13	F_(SD0_WP,		IP11_15_12)
+#define GPSR3_12	F_(SD0_CD,		IP11_11_8)
+#define GPSR3_11	F_(SD1_DAT3,		IP8_31_28)
+#define GPSR3_10	F_(SD1_DAT2,		IP8_27_24)
+#define GPSR3_9		F_(SD1_DAT1,		IP8_23_20)
+#define GPSR3_8		F_(SD1_DAT0,		IP8_19_16)
+#define GPSR3_7		F_(SD1_CMD,		IP8_15_12)
+#define GPSR3_6		F_(SD1_CLK,		IP8_11_8)
+#define GPSR3_5		F_(SD0_DAT3,		IP8_7_4)
+#define GPSR3_4		F_(SD0_DAT2,		IP8_3_0)
+#define GPSR3_3		F_(SD0_DAT1,		IP7_31_28)
+#define GPSR3_2		F_(SD0_DAT0,		IP7_27_24)
+#define GPSR3_1		F_(SD0_CMD,		IP7_23_20)
+#define GPSR3_0		F_(SD0_CLK,		IP7_19_16)
+
+/* GPSR4 */
+#define GPSR4_17	F_(SD3_DS,		IP11_7_4)
+#define GPSR4_16	F_(SD3_DAT7,		IP11_3_0)
+#define GPSR4_15	F_(SD3_DAT6,		IP10_31_28)
+#define GPSR4_14	F_(SD3_DAT5,		IP10_27_24)
+#define GPSR4_13	F_(SD3_DAT4,		IP10_23_20)
+#define GPSR4_12	F_(SD3_DAT3,		IP10_19_16)
+#define GPSR4_11	F_(SD3_DAT2,		IP10_15_12)
+#define GPSR4_10	F_(SD3_DAT1,		IP10_11_8)
+#define GPSR4_9		F_(SD3_DAT0,		IP10_7_4)
+#define GPSR4_8		F_(SD3_CMD,		IP10_3_0)
+#define GPSR4_7		F_(SD3_CLK,		IP9_31_28)
+#define GPSR4_6		F_(SD2_DS,		IP9_27_24)
+#define GPSR4_5		F_(SD2_DAT3,		IP9_23_20)
+#define GPSR4_4		F_(SD2_DAT2,		IP9_19_16)
+#define GPSR4_3		F_(SD2_DAT1,		IP9_15_12)
+#define GPSR4_2		F_(SD2_DAT0,		IP9_11_8)
+#define GPSR4_1		F_(SD2_CMD,		IP9_7_4)
+#define GPSR4_0		F_(SD2_CLK,		IP9_3_0)
+
+/* GPSR5 */
+#define GPSR5_25	F_(MLB_DAT,		IP14_19_16)
+#define GPSR5_24	F_(MLB_SIG,		IP14_15_12)
+#define GPSR5_23	F_(MLB_CLK,		IP14_11_8)
+#define GPSR5_22	FM(MSIOF0_RXD)
+#define GPSR5_21	F_(MSIOF0_SS2,		IP14_7_4)
+#define GPSR5_20	FM(MSIOF0_TXD)
+#define GPSR5_19	F_(MSIOF0_SS1,		IP14_3_0)
+#define GPSR5_18	F_(MSIOF0_SYNC,		IP13_31_28)
+#define GPSR5_17	FM(MSIOF0_SCK)
+#define GPSR5_16	F_(HRTS0_N,		IP13_27_24)
+#define GPSR5_15	F_(HCTS0_N,		IP13_23_20)
+#define GPSR5_14	F_(HTX0,		IP13_19_16)
+#define GPSR5_13	F_(HRX0,		IP13_15_12)
+#define GPSR5_12	F_(HSCK0,		IP13_11_8)
+#define GPSR5_11	F_(RX2_A,		IP13_7_4)
+#define GPSR5_10	F_(TX2_A,		IP13_3_0)
+#define GPSR5_9		F_(SCK2,		IP12_31_28)
+#define GPSR5_8		F_(RTS1_N,		IP12_27_24)
+#define GPSR5_7		F_(CTS1_N,		IP12_23_20)
+#define GPSR5_6		F_(TX1_A,		IP12_19_16)
+#define GPSR5_5		F_(RX1_A,		IP12_15_12)
+#define GPSR5_4		F_(RTS0_N,		IP12_11_8)
+#define GPSR5_3		F_(CTS0_N,		IP12_7_4)
+#define GPSR5_2		F_(TX0,			IP12_3_0)
+#define GPSR5_1		F_(RX0,			IP11_31_28)
+#define GPSR5_0		F_(SCK0,		IP11_27_24)
+
+/* GPSR6 */
+#define GPSR6_31	F_(USB2_CH3_OVC,	IP18_7_4)
+#define GPSR6_30	F_(USB2_CH3_PWEN,	IP18_3_0)
+#define GPSR6_29	F_(USB30_OVC,		IP17_31_28)
+#define GPSR6_28	F_(USB30_PWEN,		IP17_27_24)
+#define GPSR6_27	F_(USB1_OVC,		IP17_23_20)
+#define GPSR6_26	F_(USB1_PWEN,		IP17_19_16)
+#define GPSR6_25	F_(USB0_OVC,		IP17_15_12)
+#define GPSR6_24	F_(USB0_PWEN,		IP17_11_8)
+#define GPSR6_23	F_(AUDIO_CLKB_B,	IP17_7_4)
+#define GPSR6_22	F_(AUDIO_CLKA_A,	IP17_3_0)
+#define GPSR6_21	F_(SSI_SDATA9_A,	IP16_31_28)
+#define GPSR6_20	F_(SSI_SDATA8,		IP16_27_24)
+#define GPSR6_19	F_(SSI_SDATA7,		IP16_23_20)
+#define GPSR6_18	F_(SSI_WS78,		IP16_19_16)
+#define GPSR6_17	F_(SSI_SCK78,		IP16_15_12)
+#define GPSR6_16	F_(SSI_SDATA6,		IP16_11_8)
+#define GPSR6_15	F_(SSI_WS6,		IP16_7_4)
+#define GPSR6_14	F_(SSI_SCK6,		IP16_3_0)
+#define GPSR6_13	FM(SSI_SDATA5)
+#define GPSR6_12	FM(SSI_WS5)
+#define GPSR6_11	FM(SSI_SCK5)
+#define GPSR6_10	F_(SSI_SDATA4,		IP15_31_28)
+#define GPSR6_9		F_(SSI_WS4,		IP15_27_24)
+#define GPSR6_8		F_(SSI_SCK4,		IP15_23_20)
+#define GPSR6_7		F_(SSI_SDATA3,		IP15_19_16)
+#define GPSR6_6		F_(SSI_WS349,		IP15_15_12)
+#define GPSR6_5		F_(SSI_SCK349,		IP15_11_8)
+#define GPSR6_4		F_(SSI_SDATA2_A,	IP15_7_4)
+#define GPSR6_3		F_(SSI_SDATA1_A,	IP15_3_0)
+#define GPSR6_2		F_(SSI_SDATA0,		IP14_31_28)
+#define GPSR6_1		F_(SSI_WS01239,		IP14_27_24)
+#define GPSR6_0		F_(SSI_SCK01239,		IP14_23_20)
+
+/* GPSR7 */
+#define GPSR7_3		FM(GP7_03)
+#define GPSR7_2		FM(GP7_02)
+#define GPSR7_1		FM(AVS2)
+#define GPSR7_0		FM(AVS1)
+
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP0_3_0		FM(AVB_MDC)		F_(0, 0)	FM(MSIOF2_SS2_C)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4		FM(AVB_MAGIC)		F_(0, 0)	FM(MSIOF2_SS1_C)	FM(SCK4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_11_8	FM(AVB_PHY_INT)		F_(0, 0)	FM(MSIOF2_SYNC_C)	FM(RX4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_15_12	FM(AVB_LINK)		F_(0, 0)	FM(MSIOF2_SCK_C)	FM(TX4_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_19_16	FM(AVB_AVTP_MATCH_A)	F_(0, 0)	FM(MSIOF2_RXD_C)	FM(CTS4_N_A)			F_(0, 0)	FM(FSCLKST2_N_A) F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_23_20	FM(AVB_AVTP_CAPTURE_A)	F_(0, 0)	FM(MSIOF2_TXD_C)	FM(RTS4_N_A)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_27_24	FM(IRQ0)		FM(QPOLB)	F_(0, 0)		FM(DU_CDE)			FM(VI4_DATA0_B) FM(CAN0_TX_B)	FM(CANFD0_TX_B)		FM(MSIOF3_SS2_E) F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_31_28	FM(IRQ1)		FM(QPOLA)	F_(0, 0)		FM(DU_DISP)			FM(VI4_DATA1_B) FM(CAN0_RX_B)	FM(CANFD0_RX_B)		FM(MSIOF3_SS1_E) F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_3_0		FM(IRQ2)		FM(QCPV_QDE)	F_(0, 0)		FM(DU_EXODDF_DU_ODDF_DISP_CDE)	FM(VI4_DATA2_B) F_(0, 0)	F_(0, 0)		FM(MSIOF3_SYNC_E) F_(0, 0)		FM(PWM3_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_7_4		FM(IRQ3)		FM(QSTVB_QVE)	F_(0, 0)		FM(DU_DOTCLKOUT1)		FM(VI4_DATA3_B) F_(0, 0)	F_(0, 0)		FM(MSIOF3_SCK_E) F_(0, 0)		FM(PWM4_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_11_8	FM(IRQ4)		FM(QSTH_QHS)	F_(0, 0)		FM(DU_EXHSYNC_DU_HSYNC)		FM(VI4_DATA4_B) F_(0, 0)	F_(0, 0)		FM(MSIOF3_RXD_E) F_(0, 0)		FM(PWM5_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_15_12	FM(IRQ5)		FM(QSTB_QHE)	F_(0, 0)		FM(DU_EXVSYNC_DU_VSYNC)		FM(VI4_DATA5_B) FM(FSCLKST2_N_B) F_(0, 0)		FM(MSIOF3_TXD_E) F_(0, 0)		FM(PWM6_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_19_16	FM(PWM0)		FM(AVB_AVTP_PPS)F_(0, 0)		F_(0, 0)			FM(VI4_DATA6_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IECLK_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_23_20	FM(PWM1_A)		F_(0, 0)	F_(0, 0)		FM(HRX3_D)			FM(VI4_DATA7_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IERX_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_27_24	FM(PWM2_A)		F_(0, 0)	F_(0, 0)		FM(HTX3_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		FM(IETX_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_31_28	FM(A0)			FM(LCDOUT16)	FM(MSIOF3_SYNC_B)	F_(0, 0)			FM(VI4_DATA8)	F_(0, 0)	FM(DU_DB0)		F_(0, 0)	F_(0, 0)		FM(PWM3_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_3_0		FM(A1)			FM(LCDOUT17)	FM(MSIOF3_TXD_B)	F_(0, 0)			FM(VI4_DATA9)	F_(0, 0)	FM(DU_DB1)		F_(0, 0)	F_(0, 0)		FM(PWM4_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_7_4		FM(A2)			FM(LCDOUT18)	FM(MSIOF3_SCK_B)	F_(0, 0)			FM(VI4_DATA10)	F_(0, 0)	FM(DU_DB2)		F_(0, 0)	F_(0, 0)		FM(PWM5_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_11_8	FM(A3)			FM(LCDOUT19)	FM(MSIOF3_RXD_B)	F_(0, 0)			FM(VI4_DATA11)	F_(0, 0)	FM(DU_DB3)		F_(0, 0)	F_(0, 0)		FM(PWM6_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP2_15_12	FM(A4)			FM(LCDOUT20)	FM(MSIOF3_SS1_B)	F_(0, 0)			FM(VI4_DATA12)	FM(VI5_DATA12)	FM(DU_DB4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_19_16	FM(A5)			FM(LCDOUT21)	FM(MSIOF3_SS2_B)	FM(SCK4_B)			FM(VI4_DATA13)	FM(VI5_DATA13)	FM(DU_DB5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20	FM(A6)			FM(LCDOUT22)	FM(MSIOF2_SS1_A)	FM(RX4_B)			FM(VI4_DATA14)	FM(VI5_DATA14)	FM(DU_DB6)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24	FM(A7)			FM(LCDOUT23)	FM(MSIOF2_SS2_A)	FM(TX4_B)			FM(VI4_DATA15)	FM(VI5_DATA15)	FM(DU_DB7)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_31_28	FM(A8)			FM(RX3_B)	FM(MSIOF2_SYNC_A)	FM(HRX4_B)			F_(0, 0)	F_(0, 0)	F_(0, 0)		FM(SDA6_A)	FM(AVB_AVTP_MATCH_B)	FM(PWM1_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_3_0		FM(A9)			F_(0, 0)	FM(MSIOF2_SCK_A)	FM(CTS4_N_B)			F_(0, 0)	FM(VI5_VSYNC_N)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_7_4		FM(A10)			F_(0, 0)	FM(MSIOF2_RXD_A)	FM(RTS4_N_B)			F_(0, 0)	FM(VI5_HSYNC_N)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_11_8	FM(A11)			FM(TX3_B)	FM(MSIOF2_TXD_A)	FM(HTX4_B)			FM(HSCK4)	FM(VI5_FIELD)	F_(0, 0)		FM(SCL6_A)	FM(AVB_AVTP_CAPTURE_B)	FM(PWM2_B)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_15_12	FM(A12)			FM(LCDOUT12)	FM(MSIOF3_SCK_C)	F_(0, 0)			FM(HRX4_A)	FM(VI5_DATA8)	FM(DU_DG4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_19_16	FM(A13)			FM(LCDOUT13)	FM(MSIOF3_SYNC_C)	F_(0, 0)			FM(HTX4_A)	FM(VI5_DATA9)	FM(DU_DG5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_23_20	FM(A14)			FM(LCDOUT14)	FM(MSIOF3_RXD_C)	F_(0, 0)			FM(HCTS4_N)	FM(VI5_DATA10)	FM(DU_DG6)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_27_24	FM(A15)			FM(LCDOUT15)	FM(MSIOF3_TXD_C)	F_(0, 0)			FM(HRTS4_N)	FM(VI5_DATA11)	FM(DU_DG7)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_31_28	FM(A16)			FM(LCDOUT8)	F_(0, 0)		F_(0, 0)			FM(VI4_FIELD)	F_(0, 0)	FM(DU_DG0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_3_0		FM(A17)			FM(LCDOUT9)	F_(0, 0)		F_(0, 0)			FM(VI4_VSYNC_N)	F_(0, 0)	FM(DU_DG1)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_7_4		FM(A18)			FM(LCDOUT10)	F_(0, 0)		F_(0, 0)			FM(VI4_HSYNC_N)	F_(0, 0)	FM(DU_DG2)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_11_8	FM(A19)			FM(LCDOUT11)	F_(0, 0)		F_(0, 0)			FM(VI4_CLKENB)	F_(0, 0)	FM(DU_DG3)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_15_12	FM(CS0_N)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(VI5_CLKENB)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_19_16	FM(CS1_N)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(VI5_CLK)	F_(0, 0)		FM(EX_WAIT0_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_23_20	FM(BS_N)		FM(QSTVA_QVS)	FM(MSIOF3_SCK_D)	FM(SCK3)			FM(HSCK3)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN1_TX)		FM(CANFD1_TX)	FM(IETX_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_27_24	FM(RD_N)		F_(0, 0)	FM(MSIOF3_SYNC_D)	FM(RX3_A)			FM(HRX3_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN0_TX_A)		FM(CANFD0_TX_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_31_28	FM(RD_WR_N)		F_(0, 0)	FM(MSIOF3_RXD_D)	FM(TX3_A)			FM(HTX3_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(CAN0_RX_A)		FM(CANFD0_RX_A)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_3_0		FM(WE0_N)		F_(0, 0)	FM(MSIOF3_TXD_D)	FM(CTS3_N)			FM(HCTS3_N)	F_(0, 0)	F_(0, 0)		FM(SCL6_B)	FM(CAN_CLK)		F_(0, 0)	FM(IECLK_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_7_4		FM(WE1_N)		F_(0, 0)	FM(MSIOF3_SS1_D)	FM(RTS3_N)			FM(HRTS3_N)	F_(0, 0)	F_(0, 0)		FM(SDA6_B)	FM(CAN1_RX)		FM(CANFD1_RX)	FM(IERX_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_11_8	FM(EX_WAIT0_A)		FM(QCLK)	F_(0, 0)		F_(0, 0)			FM(VI4_CLK)	F_(0, 0)	FM(DU_DOTCLKOUT0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_15_12	FM(D0)			FM(MSIOF2_SS1_B)FM(MSIOF3_SCK_A)	F_(0, 0)			FM(VI4_DATA16)	FM(VI5_DATA0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_19_16	FM(D1)			FM(MSIOF2_SS2_B)FM(MSIOF3_SYNC_A)	F_(0, 0)			FM(VI4_DATA17)	FM(VI5_DATA1)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_23_20	FM(D2)			F_(0, 0)	FM(MSIOF3_RXD_A)	F_(0, 0)			FM(VI4_DATA18)	FM(VI5_DATA2)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_27_24	FM(D3)			F_(0, 0)	FM(MSIOF3_TXD_A)	F_(0, 0)			FM(VI4_DATA19)	FM(VI5_DATA3)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_31_28	FM(D4)			FM(MSIOF2_SCK_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA20)	FM(VI5_DATA4)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_3_0		FM(D5)			FM(MSIOF2_SYNC_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA21)	FM(VI5_DATA5)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_7_4		FM(D6)			FM(MSIOF2_RXD_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA22)	FM(VI5_DATA6)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_11_8	FM(D7)			FM(MSIOF2_TXD_B)F_(0, 0)		F_(0, 0)			FM(VI4_DATA23)	FM(VI5_DATA7)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_15_12	FM(D8)			FM(LCDOUT0)	FM(MSIOF2_SCK_D)	FM(SCK4_C)			FM(VI4_DATA0_A)	F_(0, 0)	FM(DU_DR0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_19_16	FM(D9)			FM(LCDOUT1)	FM(MSIOF2_SYNC_D)	F_(0, 0)			FM(VI4_DATA1_A)	F_(0, 0)	FM(DU_DR1)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_23_20	FM(D10)			FM(LCDOUT2)	FM(MSIOF2_RXD_D)	FM(HRX3_B)			FM(VI4_DATA2_A)	FM(CTS4_N_C)	FM(DU_DR2)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_27_24	FM(D11)			FM(LCDOUT3)	FM(MSIOF2_TXD_D)	FM(HTX3_B)			FM(VI4_DATA3_A)	FM(RTS4_N_C)	FM(DU_DR3)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_31_28	FM(D12)			FM(LCDOUT4)	FM(MSIOF2_SS1_D)	FM(RX4_C)			FM(VI4_DATA4_A)	F_(0, 0)	FM(DU_DR4)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_3_0		FM(D13)			FM(LCDOUT5)	FM(MSIOF2_SS2_D)	FM(TX4_C)			FM(VI4_DATA5_A)	F_(0, 0)	FM(DU_DR5)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_7_4		FM(D14)			FM(LCDOUT6)	FM(MSIOF3_SS1_A)	FM(HRX3_C)			FM(VI4_DATA6_A)	F_(0, 0)	FM(DU_DR6)		FM(SCL6_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_11_8	FM(D15)			FM(LCDOUT7)	FM(MSIOF3_SS2_A)	FM(HTX3_C)			FM(VI4_DATA7_A)	F_(0, 0)	FM(DU_DR7)		FM(SDA6_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_19_16	FM(SD0_CLK)		F_(0, 0)	FM(MSIOF1_SCK_E)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_OPWM_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP7_23_20	FM(SD0_CMD)		F_(0, 0)	FM(MSIOF1_SYNC_E)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_27_24	FM(SD0_DAT0)		F_(0, 0)	FM(MSIOF1_RXD_E)	F_(0, 0)			F_(0, 0)	FM(TS_SCK0_B)	FM(STP_ISCLK_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_31_28	FM(SD0_DAT1)		F_(0, 0)	FM(MSIOF1_TXD_E)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_B)FM(STP_ISSYNC_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_3_0		FM(SD0_DAT2)		F_(0, 0)	FM(MSIOF1_SS1_E)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_B)	FM(STP_ISD_0_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_7_4		FM(SD0_DAT3)		F_(0, 0)	FM(MSIOF1_SS2_E)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_B)	FM(STP_ISEN_0_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_11_8	FM(SD1_CLK)		F_(0, 0)	FM(MSIOF1_SCK_G)	F_(0, 0)			F_(0, 0)	FM(SIM0_CLK_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_15_12	FM(SD1_CMD)		F_(0, 0)	FM(MSIOF1_SYNC_G)	FM(NFCE_N_B)			F_(0, 0)	FM(SIM0_D_A)	FM(STP_IVCXO27_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_19_16	FM(SD1_DAT0)		FM(SD2_DAT4)	FM(MSIOF1_RXD_G)	FM(NFWP_N_B)			F_(0, 0)	FM(TS_SCK1_B)	FM(STP_ISCLK_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_23_20	FM(SD1_DAT1)		FM(SD2_DAT5)	FM(MSIOF1_TXD_G)	FM(NFDATA14_B)			F_(0, 0)	FM(TS_SPSYNC1_B)FM(STP_ISSYNC_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_27_24	FM(SD1_DAT2)		FM(SD2_DAT6)	FM(MSIOF1_SS1_G)	FM(NFDATA15_B)			F_(0, 0)	FM(TS_SDAT1_B)	FM(STP_ISD_1_B)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_31_28	FM(SD1_DAT3)		FM(SD2_DAT7)	FM(MSIOF1_SS2_G)	FM(NFRB_N_B)			F_(0, 0)	FM(TS_SDEN1_B)	FM(STP_ISEN_1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_3_0		FM(SD2_CLK)		F_(0, 0)	FM(NFDATA8)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_7_4		FM(SD2_CMD)		F_(0, 0)	FM(NFDATA9)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_11_8	FM(SD2_DAT0)		F_(0, 0)	FM(NFDATA10)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_15_12	FM(SD2_DAT1)		F_(0, 0)	FM(NFDATA11)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_19_16	FM(SD2_DAT2)		F_(0, 0)	FM(NFDATA12)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_23_20	FM(SD2_DAT3)		F_(0, 0)	FM(NFDATA13)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_27_24	FM(SD2_DS)		F_(0, 0)	FM(NFALE)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(SATA_DEVSLP_B)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_31_28	FM(SD3_CLK)		F_(0, 0)	FM(NFWE_N)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_3_0	FM(SD3_CMD)		F_(0, 0)	FM(NFRE_N)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_7_4	FM(SD3_DAT0)		F_(0, 0)	FM(NFDATA0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_11_8	FM(SD3_DAT1)		F_(0, 0)	FM(NFDATA1)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_15_12	FM(SD3_DAT2)		F_(0, 0)	FM(NFDATA2)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_19_16	FM(SD3_DAT3)		F_(0, 0)	FM(NFDATA3)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_23_20	FM(SD3_DAT4)		FM(SD2_CD_A)	FM(NFDATA4)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_27_24	FM(SD3_DAT5)		FM(SD2_WP_A)	FM(NFDATA5)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_31_28	FM(SD3_DAT6)		FM(SD3_CD)	FM(NFDATA6)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_3_0	FM(SD3_DAT7)		FM(SD3_WP)	FM(NFDATA7)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_7_4	FM(SD3_DS)		F_(0, 0)	FM(NFCLE)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_11_8	FM(SD0_CD)		F_(0, 0)	FM(NFDATA14_A)		F_(0, 0)			FM(SCL2_B)	FM(SIM0_RST_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP11_15_12	FM(SD0_WP)		F_(0, 0)	FM(NFDATA15_A)		F_(0, 0)			FM(SDA2_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_19_16	FM(SD1_CD)		F_(0, 0)	FM(NFRB_N_A)		F_(0, 0)			F_(0, 0)	FM(SIM0_CLK_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_23_20	FM(SD1_WP)		F_(0, 0)	FM(NFCE_N_A)		F_(0, 0)			F_(0, 0)	FM(SIM0_D_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24	FM(SCK0)		FM(HSCK1_B)	FM(MSIOF1_SS2_B)	FM(AUDIO_CLKC_B)		FM(SDA2_A)	FM(SIM0_RST_B)	FM(STP_OPWM_0_C)	FM(RIF0_CLK_B)	F_(0, 0)		FM(ADICHS2)	FM(SCK5_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_31_28	FM(RX0)			FM(HRX1_B)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SCK0_C)	FM(STP_ISCLK_0_C)	FM(RIF0_D0_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_3_0	FM(TX0)			FM(HTX1_B)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_C)FM(STP_ISSYNC_0_C)	FM(RIF0_D1_B)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_7_4	FM(CTS0_N)		FM(HCTS1_N_B)	FM(MSIOF1_SYNC_B)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_C)FM(STP_ISSYNC_1_C)	FM(RIF1_SYNC_B)	FM(AUDIO_CLKOUT_C)	FM(ADICS_SAMP)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_11_8	FM(RTS0_N)		FM(HRTS1_N_B)	FM(MSIOF1_SS1_B)	FM(AUDIO_CLKA_B)		FM(SCL2_A)	F_(0, 0)	FM(STP_IVCXO27_1_C)	FM(RIF0_SYNC_B)	F_(0, 0)		FM(ADICHS1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_15_12	FM(RX1_A)		FM(HRX1_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_C)	FM(STP_ISD_0_C)		FM(RIF1_CLK_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_19_16	FM(TX1_A)		FM(HTX1_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_C)	FM(STP_ISEN_0_C)	FM(RIF1_D0_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_23_20	FM(CTS1_N)		FM(HCTS1_N_A)	FM(MSIOF1_RXD_B)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_C)	FM(STP_ISEN_1_C)	FM(RIF1_D0_B)	F_(0, 0)		FM(ADIDATA)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_27_24	FM(RTS1_N)		FM(HRTS1_N_A)	FM(MSIOF1_TXD_B)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_C)	FM(STP_ISD_1_C)		FM(RIF1_D1_B)	F_(0, 0)		FM(ADICHS0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_31_28	FM(SCK2)		FM(SCIF_CLK_B)	FM(MSIOF1_SCK_B)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_C)	FM(STP_ISCLK_1_C)	FM(RIF1_CLK_B)	F_(0, 0)		FM(ADICLK)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_3_0	FM(TX2_A)		F_(0, 0)	F_(0, 0)		FM(SD2_CD_B)			FM(SCL1_A)	F_(0, 0)	FM(FMCLK_A)		FM(RIF1_D1_C)	F_(0, 0)		FM(FSO_CFE_0_N)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_7_4	FM(RX2_A)		F_(0, 0)	F_(0, 0)		FM(SD2_WP_B)			FM(SDA1_A)	F_(0, 0)	FM(FMIN_A)		FM(RIF1_SYNC_C)	F_(0, 0)		FM(FSO_CFE_1_N)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_11_8	FM(HSCK0)		F_(0, 0)	FM(MSIOF1_SCK_D)	FM(AUDIO_CLKB_A)		FM(SSI_SDATA1_B)FM(TS_SCK0_D)	FM(STP_ISCLK_0_D)	FM(RIF0_CLK_C)	F_(0, 0)		F_(0, 0)	FM(RX5_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_15_12	FM(HRX0)		F_(0, 0)	FM(MSIOF1_RXD_D)	F_(0, 0)			FM(SSI_SDATA2_B)FM(TS_SDEN0_D)	FM(STP_ISEN_0_D)	FM(RIF0_D0_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_19_16	FM(HTX0)		F_(0, 0)	FM(MSIOF1_TXD_D)	F_(0, 0)			FM(SSI_SDATA9_B)FM(TS_SDAT0_D)	FM(STP_ISD_0_D)		FM(RIF0_D1_C)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_23_20	FM(HCTS0_N)		FM(RX2_B)	FM(MSIOF1_SYNC_D)	F_(0, 0)			FM(SSI_SCK9_A)	FM(TS_SPSYNC0_D)FM(STP_ISSYNC_0_D)	FM(RIF0_SYNC_C)	FM(AUDIO_CLKOUT1_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_27_24	FM(HRTS0_N)		FM(TX2_B)	FM(MSIOF1_SS1_D)	F_(0, 0)			FM(SSI_WS9_A)	F_(0, 0)	FM(STP_IVCXO27_0_D)	FM(BPFCLK_A)	FM(AUDIO_CLKOUT2_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_31_28	FM(MSIOF0_SYNC)		F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(AUDIO_CLKOUT_A)	F_(0, 0)	FM(TX5_B)	F_(0, 0)	F_(0, 0) FM(BPFCLK_D) F_(0, 0) F_(0, 0)
+#define IP14_3_0	FM(MSIOF0_SS1)		FM(RX5_A)	FM(NFWP_N_A)		FM(AUDIO_CLKA_C)		FM(SSI_SCK2_A)	F_(0, 0)	FM(STP_IVCXO27_0_C)	F_(0, 0)	FM(AUDIO_CLKOUT3_A)	F_(0, 0)	FM(TCLK1_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_7_4	FM(MSIOF0_SS2)		FM(TX5_A)	FM(MSIOF1_SS2_D)	FM(AUDIO_CLKC_A)		FM(SSI_WS2_A)	F_(0, 0)	FM(STP_OPWM_0_D)	F_(0, 0)	FM(AUDIO_CLKOUT_D)	F_(0, 0)	FM(SPEEDIN_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_11_8	FM(MLB_CLK)		F_(0, 0)	FM(MSIOF1_SCK_F)	F_(0, 0)			FM(SCL1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_15_12	FM(MLB_SIG)		FM(RX1_B)	FM(MSIOF1_SYNC_F)	F_(0, 0)			FM(SDA1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_19_16	FM(MLB_DAT)		FM(TX1_B)	FM(MSIOF1_RXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_23_20	FM(SSI_SCK01239)	F_(0, 0)	FM(MSIOF1_TXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_27_24	FM(SSI_WS01239)		F_(0, 0)	FM(MSIOF1_SS1_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */		/* 0 */			/* 1 */		/* 2 */			/* 3 */				/* 4 */		/* 5 */		/* 6 */			/* 7 */		/* 8 */			/* 9 */		/* A */		/* B */		/* C - F */
+#define IP14_31_28	FM(SSI_SDATA0)		F_(0, 0)	FM(MSIOF1_SS2_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_3_0	FM(SSI_SDATA1_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_7_4	FM(SSI_SDATA2_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SSI_SCK1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_11_8	FM(SSI_SCK349)		F_(0, 0)	FM(MSIOF1_SS1_A)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_OPWM_0_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_15_12	FM(SSI_WS349)		FM(HCTS2_N_A)	FM(MSIOF1_SS2_A)	F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_0_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_19_16	FM(SSI_SDATA3)		FM(HRTS2_N_A)	FM(MSIOF1_TXD_A)	F_(0, 0)			F_(0, 0)	FM(TS_SCK0_A)	FM(STP_ISCLK_0_A)	FM(RIF0_D1_A)	FM(RIF2_D0_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_23_20	FM(SSI_SCK4)		FM(HRX2_A)	FM(MSIOF1_SCK_A)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT0_A)	FM(STP_ISD_0_A)		FM(RIF0_CLK_A)	FM(RIF2_CLK_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_27_24	FM(SSI_WS4)		FM(HTX2_A)	FM(MSIOF1_SYNC_A)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN0_A)	FM(STP_ISEN_0_A)	FM(RIF0_SYNC_A)	FM(RIF2_SYNC_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_31_28	FM(SSI_SDATA4)		FM(HSCK2_A)	FM(MSIOF1_RXD_A)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC0_A)FM(STP_ISSYNC_0_A)	FM(RIF0_D0_A)	FM(RIF2_D1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_3_0	FM(SSI_SCK6)		FM(USB2_PWEN)	F_(0, 0)		FM(SIM0_RST_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_7_4	FM(SSI_WS6)		FM(USB2_OVC)	F_(0, 0)		FM(SIM0_D_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_11_8	FM(SSI_SDATA6)		F_(0, 0)	F_(0, 0)		FM(SIM0_CLK_D)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	FM(SATA_DEVSLP_A)	F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_15_12	FM(SSI_SCK78)		FM(HRX2_B)	FM(MSIOF1_SCK_C)	F_(0, 0)			F_(0, 0)	FM(TS_SCK1_A)	FM(STP_ISCLK_1_A)	FM(RIF1_CLK_A)	FM(RIF3_CLK_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_19_16	FM(SSI_WS78)		FM(HTX2_B)	FM(MSIOF1_SYNC_C)	F_(0, 0)			F_(0, 0)	FM(TS_SDAT1_A)	FM(STP_ISD_1_A)		FM(RIF1_SYNC_A)	FM(RIF3_SYNC_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_23_20	FM(SSI_SDATA7)		FM(HCTS2_N_B)	FM(MSIOF1_RXD_C)	F_(0, 0)			F_(0, 0)	FM(TS_SDEN1_A)	FM(STP_ISEN_1_A)	FM(RIF1_D0_A)	FM(RIF3_D0_A)		F_(0, 0)	FM(TCLK2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_27_24	FM(SSI_SDATA8)		FM(HRTS2_N_B)	FM(MSIOF1_TXD_C)	F_(0, 0)			F_(0, 0)	FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A)	FM(RIF1_D1_A)	FM(RIF3_D1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_31_28	FM(SSI_SDATA9_A)	FM(HSCK2_B)	FM(MSIOF1_SS1_C)	FM(HSCK1_A)			FM(SSI_WS1_B)	FM(SCK1)	FM(STP_IVCXO27_1_A)	FM(SCK5_A)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_3_0	FM(AUDIO_CLKA_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_7_4	FM(AUDIO_CLKB_B)	FM(SCIF_CLK_A)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	FM(STP_IVCXO27_1_D)	FM(REMOCON_A)	F_(0, 0)		F_(0, 0)	FM(TCLK1_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_11_8	FM(USB0_PWEN)		F_(0, 0)	F_(0, 0)		FM(SIM0_RST_C)			F_(0, 0)	FM(TS_SCK1_D)	FM(STP_ISCLK_1_D)	FM(BPFCLK_B)	FM(RIF3_CLK_B)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) FM(HSCK2_C) F_(0, 0) F_(0, 0)
+#define IP17_15_12	FM(USB0_OVC)		F_(0, 0)	F_(0, 0)		FM(SIM0_D_C)			F_(0, 0)	FM(TS_SDAT1_D)	FM(STP_ISD_1_D)		F_(0, 0)	FM(RIF3_SYNC_B)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) FM(HRX2_C) F_(0, 0) F_(0, 0)
+#define IP17_19_16	FM(USB1_PWEN)		F_(0, 0)	F_(0, 0)		FM(SIM0_CLK_C)			FM(SSI_SCK1_A)	FM(TS_SCK0_E)	FM(STP_ISCLK_0_E)	FM(FMCLK_B)	FM(RIF2_CLK_B)		F_(0, 0)	FM(SPEEDIN_A)	F_(0, 0)	F_(0, 0) FM(HTX2_C) F_(0, 0) F_(0, 0)
+#define IP17_23_20	FM(USB1_OVC)		F_(0, 0)	FM(MSIOF1_SS2_C)	F_(0, 0)			FM(SSI_WS1_A)	FM(TS_SDAT0_E)	FM(STP_ISD_0_E)		FM(FMIN_B)	FM(RIF2_SYNC_B)		F_(0, 0)	FM(REMOCON_B)	F_(0, 0)	F_(0, 0) FM(HCTS2_N_C) F_(0, 0) F_(0, 0)
+#define IP17_27_24	FM(USB30_PWEN)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT_B)		FM(SSI_SCK2_B)	FM(TS_SDEN1_D)	FM(STP_ISEN_1_D)	FM(STP_OPWM_0_E)FM(RIF3_D0_B)		F_(0, 0)	FM(TCLK2_B)	FM(TPU0TO0)	FM(BPFCLK_C) FM(HRTS2_N_C) F_(0, 0) F_(0, 0)
+#define IP17_31_28	FM(USB30_OVC)		F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT1_B)		FM(SSI_WS2_B)	FM(TS_SPSYNC1_D)FM(STP_ISSYNC_1_D)	FM(STP_IVCXO27_0_E)FM(RIF3_D1_B)	F_(0, 0)	FM(FSO_TOE_N)	FM(TPU0TO1)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP18_3_0	FM(USB2_CH3_PWEN)	F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT2_B)		FM(SSI_SCK9_B)	FM(TS_SDEN0_E)	FM(STP_ISEN_0_E)	F_(0, 0)	FM(RIF2_D0_B)		F_(0, 0)	F_(0, 0)	FM(TPU0TO2)	FM(FMCLK_C) FM(FMCLK_D) F_(0, 0) F_(0, 0)
+#define IP18_7_4	FM(USB2_CH3_OVC)	F_(0, 0)	F_(0, 0)		FM(AUDIO_CLKOUT3_B)		FM(SSI_WS9_B)	FM(TS_SPSYNC0_E)FM(STP_ISSYNC_0_E)	F_(0, 0)	FM(RIF2_D1_B)		F_(0, 0)	F_(0, 0)	FM(TPU0TO3)	FM(FMIN_C) FM(FMIN_D) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR	\
+\
+												GPSR6_31 \
+												GPSR6_30 \
+												GPSR6_29 \
+		GPSR1_28									GPSR6_28 \
+		GPSR1_27									GPSR6_27 \
+		GPSR1_26									GPSR6_26 \
+		GPSR1_25							GPSR5_25	GPSR6_25 \
+		GPSR1_24							GPSR5_24	GPSR6_24 \
+		GPSR1_23							GPSR5_23	GPSR6_23 \
+		GPSR1_22							GPSR5_22	GPSR6_22 \
+		GPSR1_21							GPSR5_21	GPSR6_21 \
+		GPSR1_20							GPSR5_20	GPSR6_20 \
+		GPSR1_19							GPSR5_19	GPSR6_19 \
+		GPSR1_18							GPSR5_18	GPSR6_18 \
+		GPSR1_17					GPSR4_17	GPSR5_17	GPSR6_17 \
+		GPSR1_16					GPSR4_16	GPSR5_16	GPSR6_16 \
+GPSR0_15	GPSR1_15			GPSR3_15	GPSR4_15	GPSR5_15	GPSR6_15 \
+GPSR0_14	GPSR1_14	GPSR2_14	GPSR3_14	GPSR4_14	GPSR5_14	GPSR6_14 \
+GPSR0_13	GPSR1_13	GPSR2_13	GPSR3_13	GPSR4_13	GPSR5_13	GPSR6_13 \
+GPSR0_12	GPSR1_12	GPSR2_12	GPSR3_12	GPSR4_12	GPSR5_12	GPSR6_12 \
+GPSR0_11	GPSR1_11	GPSR2_11	GPSR3_11	GPSR4_11	GPSR5_11	GPSR6_11 \
+GPSR0_10	GPSR1_10	GPSR2_10	GPSR3_10	GPSR4_10	GPSR5_10	GPSR6_10 \
+GPSR0_9		GPSR1_9		GPSR2_9		GPSR3_9		GPSR4_9		GPSR5_9		GPSR6_9 \
+GPSR0_8		GPSR1_8		GPSR2_8		GPSR3_8		GPSR4_8		GPSR5_8		GPSR6_8 \
+GPSR0_7		GPSR1_7		GPSR2_7		GPSR3_7		GPSR4_7		GPSR5_7		GPSR6_7 \
+GPSR0_6		GPSR1_6		GPSR2_6		GPSR3_6		GPSR4_6		GPSR5_6		GPSR6_6 \
+GPSR0_5		GPSR1_5		GPSR2_5		GPSR3_5		GPSR4_5		GPSR5_5		GPSR6_5 \
+GPSR0_4		GPSR1_4		GPSR2_4		GPSR3_4		GPSR4_4		GPSR5_4		GPSR6_4 \
+GPSR0_3		GPSR1_3		GPSR2_3		GPSR3_3		GPSR4_3		GPSR5_3		GPSR6_3		GPSR7_3 \
+GPSR0_2		GPSR1_2		GPSR2_2		GPSR3_2		GPSR4_2		GPSR5_2		GPSR6_2		GPSR7_2 \
+GPSR0_1		GPSR1_1		GPSR2_1		GPSR3_1		GPSR4_1		GPSR5_1		GPSR6_1		GPSR7_1 \
+GPSR0_0		GPSR1_0		GPSR2_0		GPSR3_0		GPSR4_0		GPSR5_0		GPSR6_0		GPSR7_0
+
+#define PINMUX_IPSR				\
+\
+FM(IP0_3_0)	IP0_3_0		FM(IP1_3_0)	IP1_3_0		FM(IP2_3_0)	IP2_3_0		FM(IP3_3_0)	IP3_3_0 \
+FM(IP0_7_4)	IP0_7_4		FM(IP1_7_4)	IP1_7_4		FM(IP2_7_4)	IP2_7_4		FM(IP3_7_4)	IP3_7_4 \
+FM(IP0_11_8)	IP0_11_8	FM(IP1_11_8)	IP1_11_8	FM(IP2_11_8)	IP2_11_8	FM(IP3_11_8)	IP3_11_8 \
+FM(IP0_15_12)	IP0_15_12	FM(IP1_15_12)	IP1_15_12	FM(IP2_15_12)	IP2_15_12	FM(IP3_15_12)	IP3_15_12 \
+FM(IP0_19_16)	IP0_19_16	FM(IP1_19_16)	IP1_19_16	FM(IP2_19_16)	IP2_19_16	FM(IP3_19_16)	IP3_19_16 \
+FM(IP0_23_20)	IP0_23_20	FM(IP1_23_20)	IP1_23_20	FM(IP2_23_20)	IP2_23_20	FM(IP3_23_20)	IP3_23_20 \
+FM(IP0_27_24)	IP0_27_24	FM(IP1_27_24)	IP1_27_24	FM(IP2_27_24)	IP2_27_24	FM(IP3_27_24)	IP3_27_24 \
+FM(IP0_31_28)	IP0_31_28	FM(IP1_31_28)	IP1_31_28	FM(IP2_31_28)	IP2_31_28	FM(IP3_31_28)	IP3_31_28 \
+\
+FM(IP4_3_0)	IP4_3_0		FM(IP5_3_0)	IP5_3_0		FM(IP6_3_0)	IP6_3_0		FM(IP7_3_0)	IP7_3_0 \
+FM(IP4_7_4)	IP4_7_4		FM(IP5_7_4)	IP5_7_4		FM(IP6_7_4)	IP6_7_4		FM(IP7_7_4)	IP7_7_4 \
+FM(IP4_11_8)	IP4_11_8	FM(IP5_11_8)	IP5_11_8	FM(IP6_11_8)	IP6_11_8	FM(IP7_11_8)	IP7_11_8 \
+FM(IP4_15_12)	IP4_15_12	FM(IP5_15_12)	IP5_15_12	FM(IP6_15_12)	IP6_15_12 \
+FM(IP4_19_16)	IP4_19_16	FM(IP5_19_16)	IP5_19_16	FM(IP6_19_16)	IP6_19_16	FM(IP7_19_16)	IP7_19_16 \
+FM(IP4_23_20)	IP4_23_20	FM(IP5_23_20)	IP5_23_20	FM(IP6_23_20)	IP6_23_20	FM(IP7_23_20)	IP7_23_20 \
+FM(IP4_27_24)	IP4_27_24	FM(IP5_27_24)	IP5_27_24	FM(IP6_27_24)	IP6_27_24	FM(IP7_27_24)	IP7_27_24 \
+FM(IP4_31_28)	IP4_31_28	FM(IP5_31_28)	IP5_31_28	FM(IP6_31_28)	IP6_31_28	FM(IP7_31_28)	IP7_31_28 \
+\
+FM(IP8_3_0)	IP8_3_0		FM(IP9_3_0)	IP9_3_0		FM(IP10_3_0)	IP10_3_0	FM(IP11_3_0)	IP11_3_0 \
+FM(IP8_7_4)	IP8_7_4		FM(IP9_7_4)	IP9_7_4		FM(IP10_7_4)	IP10_7_4	FM(IP11_7_4)	IP11_7_4 \
+FM(IP8_11_8)	IP8_11_8	FM(IP9_11_8)	IP9_11_8	FM(IP10_11_8)	IP10_11_8	FM(IP11_11_8)	IP11_11_8 \
+FM(IP8_15_12)	IP8_15_12	FM(IP9_15_12)	IP9_15_12	FM(IP10_15_12)	IP10_15_12	FM(IP11_15_12)	IP11_15_12 \
+FM(IP8_19_16)	IP8_19_16	FM(IP9_19_16)	IP9_19_16	FM(IP10_19_16)	IP10_19_16	FM(IP11_19_16)	IP11_19_16 \
+FM(IP8_23_20)	IP8_23_20	FM(IP9_23_20)	IP9_23_20	FM(IP10_23_20)	IP10_23_20	FM(IP11_23_20)	IP11_23_20 \
+FM(IP8_27_24)	IP8_27_24	FM(IP9_27_24)	IP9_27_24	FM(IP10_27_24)	IP10_27_24	FM(IP11_27_24)	IP11_27_24 \
+FM(IP8_31_28)	IP8_31_28	FM(IP9_31_28)	IP9_31_28	FM(IP10_31_28)	IP10_31_28	FM(IP11_31_28)	IP11_31_28 \
+\
+FM(IP12_3_0)	IP12_3_0	FM(IP13_3_0)	IP13_3_0	FM(IP14_3_0)	IP14_3_0	FM(IP15_3_0)	IP15_3_0 \
+FM(IP12_7_4)	IP12_7_4	FM(IP13_7_4)	IP13_7_4	FM(IP14_7_4)	IP14_7_4	FM(IP15_7_4)	IP15_7_4 \
+FM(IP12_11_8)	IP12_11_8	FM(IP13_11_8)	IP13_11_8	FM(IP14_11_8)	IP14_11_8	FM(IP15_11_8)	IP15_11_8 \
+FM(IP12_15_12)	IP12_15_12	FM(IP13_15_12)	IP13_15_12	FM(IP14_15_12)	IP14_15_12	FM(IP15_15_12)	IP15_15_12 \
+FM(IP12_19_16)	IP12_19_16	FM(IP13_19_16)	IP13_19_16	FM(IP14_19_16)	IP14_19_16	FM(IP15_19_16)	IP15_19_16 \
+FM(IP12_23_20)	IP12_23_20	FM(IP13_23_20)	IP13_23_20	FM(IP14_23_20)	IP14_23_20	FM(IP15_23_20)	IP15_23_20 \
+FM(IP12_27_24)	IP12_27_24	FM(IP13_27_24)	IP13_27_24	FM(IP14_27_24)	IP14_27_24	FM(IP15_27_24)	IP15_27_24 \
+FM(IP12_31_28)	IP12_31_28	FM(IP13_31_28)	IP13_31_28	FM(IP14_31_28)	IP14_31_28	FM(IP15_31_28)	IP15_31_28 \
+\
+FM(IP16_3_0)	IP16_3_0	FM(IP17_3_0)	IP17_3_0	FM(IP18_3_0)	IP18_3_0 \
+FM(IP16_7_4)	IP16_7_4	FM(IP17_7_4)	IP17_7_4	FM(IP18_7_4)	IP18_7_4 \
+FM(IP16_11_8)	IP16_11_8	FM(IP17_11_8)	IP17_11_8 \
+FM(IP16_15_12)	IP16_15_12	FM(IP17_15_12)	IP17_15_12 \
+FM(IP16_19_16)	IP16_19_16	FM(IP17_19_16)	IP17_19_16 \
+FM(IP16_23_20)	IP16_23_20	FM(IP17_23_20)	IP17_23_20 \
+FM(IP16_27_24)	IP16_27_24	FM(IP17_27_24)	IP17_27_24 \
+FM(IP16_31_28)	IP16_31_28	FM(IP17_31_28)	IP17_31_28
+
+/* MOD_SEL0 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */			/* 4 */			/* 5 */			/* 6 */			/* 7 */
+#define MOD_SEL0_31_30_29	FM(SEL_MSIOF3_0)	FM(SEL_MSIOF3_1)	FM(SEL_MSIOF3_2)	FM(SEL_MSIOF3_3)	FM(SEL_MSIOF3_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
+#define MOD_SEL0_28_27		FM(SEL_MSIOF2_0)	FM(SEL_MSIOF2_1)	FM(SEL_MSIOF2_2)	FM(SEL_MSIOF2_3)
+#define MOD_SEL0_26_25_24	FM(SEL_MSIOF1_0)	FM(SEL_MSIOF1_1)	FM(SEL_MSIOF1_2)	FM(SEL_MSIOF1_3)	FM(SEL_MSIOF1_4)	FM(SEL_MSIOF1_5)	FM(SEL_MSIOF1_6)	F_(0, 0)
+#define MOD_SEL0_23		FM(SEL_LBSC_0)		FM(SEL_LBSC_1)
+#define MOD_SEL0_22		FM(SEL_IEBUS_0)		FM(SEL_IEBUS_1)
+#define MOD_SEL0_21		FM(SEL_I2C2_0)		FM(SEL_I2C2_1)
+#define MOD_SEL0_20		FM(SEL_I2C1_0)		FM(SEL_I2C1_1)
+#define MOD_SEL0_19		FM(SEL_HSCIF4_0)	FM(SEL_HSCIF4_1)
+#define MOD_SEL0_18_17		FM(SEL_HSCIF3_0)	FM(SEL_HSCIF3_1)	FM(SEL_HSCIF3_2)	FM(SEL_HSCIF3_3)
+#define MOD_SEL0_16		FM(SEL_HSCIF1_0)	FM(SEL_HSCIF1_1)
+#define MOD_SEL0_14_13		FM(SEL_HSCIF2_0)	FM(SEL_HSCIF2_1)	FM(SEL_HSCIF2_2)	F_(0, 0)
+#define MOD_SEL0_12		FM(SEL_ETHERAVB_0)	FM(SEL_ETHERAVB_1)
+#define MOD_SEL0_11		FM(SEL_DRIF3_0)		FM(SEL_DRIF3_1)
+#define MOD_SEL0_10		FM(SEL_DRIF2_0)		FM(SEL_DRIF2_1)
+#define MOD_SEL0_9_8		FM(SEL_DRIF1_0)		FM(SEL_DRIF1_1)		FM(SEL_DRIF1_2)		F_(0, 0)
+#define MOD_SEL0_7_6		FM(SEL_DRIF0_0)		FM(SEL_DRIF0_1)		FM(SEL_DRIF0_2)		F_(0, 0)
+#define MOD_SEL0_5		FM(SEL_CANFD0_0)	FM(SEL_CANFD0_1)
+#define MOD_SEL0_4_3		FM(SEL_ADGA_0)		FM(SEL_ADGA_1)		FM(SEL_ADGA_2)		FM(SEL_ADGA_3)
+
+/* MOD_SEL1 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */			/* 4 */			/* 5 */			/* 6 */			/* 7 */
+#define MOD_SEL1_31_30		FM(SEL_TSIF1_0)		FM(SEL_TSIF1_1)		FM(SEL_TSIF1_2)		FM(SEL_TSIF1_3)
+#define MOD_SEL1_29_28_27	FM(SEL_TSIF0_0)		FM(SEL_TSIF0_1)		FM(SEL_TSIF0_2)		FM(SEL_TSIF0_3)		FM(SEL_TSIF0_4)		F_(0, 0)		F_(0, 0)		F_(0, 0)
+#define MOD_SEL1_26		FM(SEL_TIMER_TMU1_0)	FM(SEL_TIMER_TMU1_1)
+#define MOD_SEL1_25_24		FM(SEL_SSP1_1_0)	FM(SEL_SSP1_1_1)	FM(SEL_SSP1_1_2)	FM(SEL_SSP1_1_3)
+#define MOD_SEL1_23_22_21	FM(SEL_SSP1_0_0)	FM(SEL_SSP1_0_1)	FM(SEL_SSP1_0_2)	FM(SEL_SSP1_0_3)	FM(SEL_SSP1_0_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
+#define MOD_SEL1_20		FM(SEL_SSI1_0)		FM(SEL_SSI1_1)
+#define MOD_SEL1_19		FM(SEL_SPEED_PULSE_0)	FM(SEL_SPEED_PULSE_1)
+#define MOD_SEL1_18_17		FM(SEL_SIMCARD_0)	FM(SEL_SIMCARD_1)	FM(SEL_SIMCARD_2)	FM(SEL_SIMCARD_3)
+#define MOD_SEL1_16		FM(SEL_SDHI2_0)		FM(SEL_SDHI2_1)
+#define MOD_SEL1_15_14		FM(SEL_SCIF4_0)		FM(SEL_SCIF4_1)		FM(SEL_SCIF4_2)		F_(0, 0)
+#define MOD_SEL1_13		FM(SEL_SCIF3_0)		FM(SEL_SCIF3_1)
+#define MOD_SEL1_12		FM(SEL_SCIF2_0)		FM(SEL_SCIF2_1)
+#define MOD_SEL1_11		FM(SEL_SCIF1_0)		FM(SEL_SCIF1_1)
+#define MOD_SEL1_10		FM(SEL_SCIF_0)		FM(SEL_SCIF_1)
+#define MOD_SEL1_9		FM(SEL_REMOCON_0)	FM(SEL_REMOCON_1)
+#define MOD_SEL1_6		FM(SEL_RCAN0_0)		FM(SEL_RCAN0_1)
+#define MOD_SEL1_5		FM(SEL_PWM6_0)		FM(SEL_PWM6_1)
+#define MOD_SEL1_4		FM(SEL_PWM5_0)		FM(SEL_PWM5_1)
+#define MOD_SEL1_3		FM(SEL_PWM4_0)		FM(SEL_PWM4_1)
+#define MOD_SEL1_2		FM(SEL_PWM3_0)		FM(SEL_PWM3_1)
+#define MOD_SEL1_1		FM(SEL_PWM2_0)		FM(SEL_PWM2_1)
+#define MOD_SEL1_0		FM(SEL_PWM1_0)		FM(SEL_PWM1_1)
+
+/* MOD_SEL2 */			/* 0 */			/* 1 */			/* 2 */			/* 3 */
+#define MOD_SEL2_31		FM(I2C_SEL_5_0)		FM(I2C_SEL_5_1)
+#define MOD_SEL2_30		FM(I2C_SEL_3_0)		FM(I2C_SEL_3_1)
+#define MOD_SEL2_29		FM(I2C_SEL_0_0)		FM(I2C_SEL_0_1)
+#define MOD_SEL2_28_27		FM(SEL_FM_0)		FM(SEL_FM_1)		FM(SEL_FM_2)		FM(SEL_FM_3)
+#define MOD_SEL2_26		FM(SEL_SCIF5_0)		FM(SEL_SCIF5_1)
+#define MOD_SEL2_25_24_23	FM(SEL_I2C6_0)		FM(SEL_I2C6_1)		FM(SEL_I2C6_2)		F_(0, 0)		F_(0, 0)		F_(0, 0)		F_(0, 0)		F_(0, 0)
+#define MOD_SEL2_21		FM(SEL_SSI2_0)		FM(SEL_SSI2_1)
+#define MOD_SEL2_20		FM(SEL_SSI9_0)		FM(SEL_SSI9_1)
+#define MOD_SEL2_19		FM(SEL_TIMER_TMU2_0)	FM(SEL_TIMER_TMU2_1)
+#define MOD_SEL2_18		FM(SEL_ADGB_0)		FM(SEL_ADGB_1)
+#define MOD_SEL2_17		FM(SEL_ADGC_0)		FM(SEL_ADGC_1)
+#define MOD_SEL2_0		FM(SEL_VIN4_0)		FM(SEL_VIN4_1)
+
+#define PINMUX_MOD_SELS	\
+\
+MOD_SEL0_31_30_29	MOD_SEL1_31_30		MOD_SEL2_31 \
+						MOD_SEL2_30 \
+			MOD_SEL1_29_28_27	MOD_SEL2_29 \
+MOD_SEL0_28_27					MOD_SEL2_28_27 \
+MOD_SEL0_26_25_24	MOD_SEL1_26		MOD_SEL2_26 \
+			MOD_SEL1_25_24		MOD_SEL2_25_24_23 \
+MOD_SEL0_23		MOD_SEL1_23_22_21 \
+MOD_SEL0_22 \
+MOD_SEL0_21					MOD_SEL2_21 \
+MOD_SEL0_20		MOD_SEL1_20		MOD_SEL2_20 \
+MOD_SEL0_19		MOD_SEL1_19		MOD_SEL2_19 \
+MOD_SEL0_18_17		MOD_SEL1_18_17		MOD_SEL2_18 \
+						MOD_SEL2_17 \
+MOD_SEL0_16		MOD_SEL1_16 \
+			MOD_SEL1_15_14 \
+MOD_SEL0_14_13 \
+			MOD_SEL1_13 \
+MOD_SEL0_12		MOD_SEL1_12 \
+MOD_SEL0_11		MOD_SEL1_11 \
+MOD_SEL0_10		MOD_SEL1_10 \
+MOD_SEL0_9_8		MOD_SEL1_9 \
+MOD_SEL0_7_6 \
+			MOD_SEL1_6 \
+MOD_SEL0_5		MOD_SEL1_5 \
+MOD_SEL0_4_3		MOD_SEL1_4 \
+			MOD_SEL1_3 \
+			MOD_SEL1_2 \
+			MOD_SEL1_1 \
+			MOD_SEL1_0		MOD_SEL2_0
+
+/*
+ * These pins are not able to be muxed but have other properties
+ * that can be set, such as drive-strength or pull-up/pull-down enable.
+ */
+#define PINMUX_STATIC \
+	FM(QSPI0_SPCLK) FM(QSPI0_SSL) FM(QSPI0_MOSI_IO0) FM(QSPI0_MISO_IO1) \
+	FM(QSPI0_IO2) FM(QSPI0_IO3) \
+	FM(QSPI1_SPCLK) FM(QSPI1_SSL) FM(QSPI1_MOSI_IO0) FM(QSPI1_MISO_IO1) \
+	FM(QSPI1_IO2) FM(QSPI1_IO3) \
+	FM(RPC_INT) FM(RPC_WP) FM(RPC_RESET) \
+	FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) FM(AVB_TD3) \
+	FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \
+	FM(AVB_TXCREFCLK) FM(AVB_MDIO) \
+	FM(PRESETOUT) \
+	FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) FM(DU_DOTCLKIN3) \
+	FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR)
+
+#define PINMUX_PHYS \
+	FM(SCL0) FM(SDA0) FM(SCL3) FM(SDA3) FM(SCL5) FM(SDA5)
+
+enum {
+	PINMUX_RESERVED = 0,
+
+	PINMUX_DATA_BEGIN,
+	GP_ALL(DATA),
+	PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x)	FN_##x,
+	PINMUX_FUNCTION_BEGIN,
+	GP_ALL(FN),
+	PINMUX_GPSR
+	PINMUX_IPSR
+	PINMUX_MOD_SELS
+	PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x)	x##_MARK,
+	PINMUX_MARK_BEGIN,
+	PINMUX_GPSR
+	PINMUX_IPSR
+	PINMUX_MOD_SELS
+	PINMUX_STATIC
+	PINMUX_PHYS
+	PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+	PINMUX_DATA_GP_ALL(),
+
+	PINMUX_SINGLE(AVS1),
+	PINMUX_SINGLE(AVS2),
+	PINMUX_SINGLE(CLKOUT),
+	PINMUX_SINGLE(GP7_02),
+	PINMUX_SINGLE(GP7_03),
+	PINMUX_SINGLE(MSIOF0_RXD),
+	PINMUX_SINGLE(MSIOF0_SCK),
+	PINMUX_SINGLE(MSIOF0_TXD),
+	PINMUX_SINGLE(SSI_SCK5),
+	PINMUX_SINGLE(SSI_SDATA5),
+	PINMUX_SINGLE(SSI_WS5),
+
+	/* IPSR0 */
+	PINMUX_IPSR_GPSR(IP0_3_0,	AVB_MDC),
+	PINMUX_IPSR_MSEL(IP0_3_0,	MSIOF2_SS2_C,		SEL_MSIOF2_2),
+
+	PINMUX_IPSR_GPSR(IP0_7_4,	AVB_MAGIC),
+	PINMUX_IPSR_MSEL(IP0_7_4,	MSIOF2_SS1_C,		SEL_MSIOF2_2),
+	PINMUX_IPSR_MSEL(IP0_7_4,	SCK4_A,			SEL_SCIF4_0),
+
+	PINMUX_IPSR_GPSR(IP0_11_8,	AVB_PHY_INT),
+	PINMUX_IPSR_MSEL(IP0_11_8,	MSIOF2_SYNC_C,		SEL_MSIOF2_2),
+	PINMUX_IPSR_MSEL(IP0_11_8,	RX4_A,			SEL_SCIF4_0),
+
+	PINMUX_IPSR_GPSR(IP0_15_12,	AVB_LINK),
+	PINMUX_IPSR_MSEL(IP0_15_12,	MSIOF2_SCK_C,		SEL_MSIOF2_2),
+	PINMUX_IPSR_MSEL(IP0_15_12,	TX4_A,			SEL_SCIF4_0),
+
+	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, AVB_AVTP_MATCH_A,	I2C_SEL_5_0,	SEL_ETHERAVB_0),
+	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, MSIOF2_RXD_C,		I2C_SEL_5_0,	SEL_MSIOF2_2),
+	PINMUX_IPSR_PHYS_MSEL(IP0_19_16, CTS4_N_A,		I2C_SEL_5_0,	SEL_SCIF4_0),
+	PINMUX_IPSR_MSEL(IP0_19_16,	FSCLKST2_N_A,		I2C_SEL_5_0),
+	PINMUX_IPSR_PHYS(IP0_19_16,     SCL5,                   I2C_SEL_5_1),
+
+	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A,	I2C_SEL_5_0,	SEL_ETHERAVB_0),
+	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, MSIOF2_TXD_C,		I2C_SEL_5_0,	SEL_MSIOF2_2),
+	PINMUX_IPSR_PHYS_MSEL(IP0_23_20, RTS4_N_A,		I2C_SEL_5_0,	SEL_SCIF4_0),
+	PINMUX_IPSR_PHYS(IP0_23_20,     SDA5,                   I2C_SEL_5_1),
+
+	PINMUX_IPSR_GPSR(IP0_27_24,	IRQ0),
+	PINMUX_IPSR_GPSR(IP0_27_24,	QPOLB),
+	PINMUX_IPSR_GPSR(IP0_27_24,	DU_CDE),
+	PINMUX_IPSR_MSEL(IP0_27_24,	VI4_DATA0_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP0_27_24,	CAN0_TX_B,		SEL_RCAN0_1),
+	PINMUX_IPSR_MSEL(IP0_27_24,	CANFD0_TX_B,		SEL_CANFD0_1),
+	PINMUX_IPSR_MSEL(IP0_27_24,	MSIOF3_SS2_E,		SEL_MSIOF3_4),
+
+	PINMUX_IPSR_GPSR(IP0_31_28,	IRQ1),
+	PINMUX_IPSR_GPSR(IP0_31_28,	QPOLA),
+	PINMUX_IPSR_GPSR(IP0_31_28,	DU_DISP),
+	PINMUX_IPSR_MSEL(IP0_31_28,	VI4_DATA1_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP0_31_28,	CAN0_RX_B,		SEL_RCAN0_1),
+	PINMUX_IPSR_MSEL(IP0_31_28,	CANFD0_RX_B,		SEL_CANFD0_1),
+	PINMUX_IPSR_MSEL(IP0_31_28,	MSIOF3_SS1_E,		SEL_MSIOF3_4),
+
+	/* IPSR1 */
+	PINMUX_IPSR_GPSR(IP1_3_0,	IRQ2),
+	PINMUX_IPSR_GPSR(IP1_3_0,	QCPV_QDE),
+	PINMUX_IPSR_GPSR(IP1_3_0,	DU_EXODDF_DU_ODDF_DISP_CDE),
+	PINMUX_IPSR_MSEL(IP1_3_0,	VI4_DATA2_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_3_0,	PWM3_B,			SEL_PWM3_1),
+	PINMUX_IPSR_MSEL(IP1_3_0,	MSIOF3_SYNC_E,		SEL_MSIOF3_4),
+
+	PINMUX_IPSR_GPSR(IP1_7_4,	IRQ3),
+	PINMUX_IPSR_GPSR(IP1_7_4,	QSTVB_QVE),
+	PINMUX_IPSR_GPSR(IP1_7_4,	DU_DOTCLKOUT1),
+	PINMUX_IPSR_MSEL(IP1_7_4,	VI4_DATA3_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_7_4,	PWM4_B,			SEL_PWM4_1),
+	PINMUX_IPSR_MSEL(IP1_7_4,	MSIOF3_SCK_E,		SEL_MSIOF3_4),
+
+	PINMUX_IPSR_GPSR(IP1_11_8,	IRQ4),
+	PINMUX_IPSR_GPSR(IP1_11_8,	QSTH_QHS),
+	PINMUX_IPSR_GPSR(IP1_11_8,	DU_EXHSYNC_DU_HSYNC),
+	PINMUX_IPSR_MSEL(IP1_11_8,	VI4_DATA4_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_11_8,	PWM5_B,			SEL_PWM5_1),
+	PINMUX_IPSR_MSEL(IP1_11_8,	MSIOF3_RXD_E,		SEL_MSIOF3_4),
+
+	PINMUX_IPSR_GPSR(IP1_15_12,	IRQ5),
+	PINMUX_IPSR_GPSR(IP1_15_12,	QSTB_QHE),
+	PINMUX_IPSR_GPSR(IP1_15_12,	DU_EXVSYNC_DU_VSYNC),
+	PINMUX_IPSR_MSEL(IP1_15_12,	VI4_DATA5_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_15_12,	PWM6_B,			SEL_PWM6_1),
+	PINMUX_IPSR_GPSR(IP1_15_12,	FSCLKST2_N_B),
+	PINMUX_IPSR_MSEL(IP1_15_12,	MSIOF3_TXD_E,		SEL_MSIOF3_4),
+
+	PINMUX_IPSR_GPSR(IP1_19_16,	PWM0),
+	PINMUX_IPSR_GPSR(IP1_19_16,	AVB_AVTP_PPS),
+	PINMUX_IPSR_MSEL(IP1_19_16,	VI4_DATA6_B,		SEL_VIN4_1),
+	PINMUX_IPSR_MSEL(IP1_19_16,	IECLK_B,		SEL_IEBUS_1),
+
+	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, PWM1_A,		I2C_SEL_3_0,	SEL_PWM1_0),
+	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D,		I2C_SEL_3_0,	SEL_HSCIF3_3),
+	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B,		I2C_SEL_3_0,	SEL_VIN4_1),
+	PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B,		I2C_SEL_3_0,	SEL_IEBUS_1),
+	PINMUX_IPSR_PHYS(IP1_23_20,	SCL3,			I2C_SEL_3_1),
+
+	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A,		I2C_SEL_3_0,	SEL_PWM2_0),
+	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D,		I2C_SEL_3_0,	SEL_HSCIF3_3),
+	PINMUX_IPSR_PHYS_MSEL(IP1_27_24, IETX_B,		I2C_SEL_3_0,	SEL_IEBUS_1),
+	PINMUX_IPSR_PHYS(IP1_27_24,	SDA3,			I2C_SEL_3_1),
+
+	PINMUX_IPSR_GPSR(IP1_31_28,	A0),
+	PINMUX_IPSR_GPSR(IP1_31_28,	LCDOUT16),
+	PINMUX_IPSR_MSEL(IP1_31_28,	MSIOF3_SYNC_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP1_31_28,	VI4_DATA8),
+	PINMUX_IPSR_GPSR(IP1_31_28,	DU_DB0),
+	PINMUX_IPSR_MSEL(IP1_31_28,	PWM3_A,			SEL_PWM3_0),
+
+	/* IPSR2 */
+	PINMUX_IPSR_GPSR(IP2_3_0,	A1),
+	PINMUX_IPSR_GPSR(IP2_3_0,	LCDOUT17),
+	PINMUX_IPSR_MSEL(IP2_3_0,	MSIOF3_TXD_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP2_3_0,	VI4_DATA9),
+	PINMUX_IPSR_GPSR(IP2_3_0,	DU_DB1),
+	PINMUX_IPSR_MSEL(IP2_3_0,	PWM4_A,			SEL_PWM4_0),
+
+	PINMUX_IPSR_GPSR(IP2_7_4,	A2),
+	PINMUX_IPSR_GPSR(IP2_7_4,	LCDOUT18),
+	PINMUX_IPSR_MSEL(IP2_7_4,	MSIOF3_SCK_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP2_7_4,	VI4_DATA10),
+	PINMUX_IPSR_GPSR(IP2_7_4,	DU_DB2),
+	PINMUX_IPSR_MSEL(IP2_7_4,	PWM5_A,			SEL_PWM5_0),
+
+	PINMUX_IPSR_GPSR(IP2_11_8,	A3),
+	PINMUX_IPSR_GPSR(IP2_11_8,	LCDOUT19),
+	PINMUX_IPSR_MSEL(IP2_11_8,	MSIOF3_RXD_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP2_11_8,	VI4_DATA11),
+	PINMUX_IPSR_GPSR(IP2_11_8,	DU_DB3),
+	PINMUX_IPSR_MSEL(IP2_11_8,	PWM6_A,			SEL_PWM6_0),
+
+	PINMUX_IPSR_GPSR(IP2_15_12,	A4),
+	PINMUX_IPSR_GPSR(IP2_15_12,	LCDOUT20),
+	PINMUX_IPSR_MSEL(IP2_15_12,	MSIOF3_SS1_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_GPSR(IP2_15_12,	VI4_DATA12),
+	PINMUX_IPSR_GPSR(IP2_15_12,	VI5_DATA12),
+	PINMUX_IPSR_GPSR(IP2_15_12,	DU_DB4),
+
+	PINMUX_IPSR_GPSR(IP2_19_16,	A5),
+	PINMUX_IPSR_GPSR(IP2_19_16,	LCDOUT21),
+	PINMUX_IPSR_MSEL(IP2_19_16,	MSIOF3_SS2_B,		SEL_MSIOF3_1),
+	PINMUX_IPSR_MSEL(IP2_19_16,	SCK4_B,			SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP2_19_16,	VI4_DATA13),
+	PINMUX_IPSR_GPSR(IP2_19_16,	VI5_DATA13),
+	PINMUX_IPSR_GPSR(IP2_19_16,	DU_DB5),
+
+	PINMUX_IPSR_GPSR(IP2_23_20,	A6),
+	PINMUX_IPSR_GPSR(IP2_23_20,	LCDOUT22),
+	PINMUX_IPSR_MSEL(IP2_23_20,	MSIOF2_SS1_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP2_23_20,	RX4_B,			SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP2_23_20,	VI4_DATA14),
+	PINMUX_IPSR_GPSR(IP2_23_20,	VI5_DATA14),
+	PINMUX_IPSR_GPSR(IP2_23_20,	DU_DB6),
+
+	PINMUX_IPSR_GPSR(IP2_27_24,	A7),
+	PINMUX_IPSR_GPSR(IP2_27_24,	LCDOUT23),
+	PINMUX_IPSR_MSEL(IP2_27_24,	MSIOF2_SS2_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP2_27_24,	TX4_B,			SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP2_27_24,	VI4_DATA15),
+	PINMUX_IPSR_GPSR(IP2_27_24,	VI5_DATA15),
+	PINMUX_IPSR_GPSR(IP2_27_24,	DU_DB7),
+
+	PINMUX_IPSR_GPSR(IP2_31_28,	A8),
+	PINMUX_IPSR_MSEL(IP2_31_28,	RX3_B,			SEL_SCIF3_1),
+	PINMUX_IPSR_MSEL(IP2_31_28,	MSIOF2_SYNC_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP2_31_28,	HRX4_B,			SEL_HSCIF4_1),
+	PINMUX_IPSR_MSEL(IP2_31_28,	SDA6_A,			SEL_I2C6_0),
+	PINMUX_IPSR_MSEL(IP2_31_28,	AVB_AVTP_MATCH_B,	SEL_ETHERAVB_1),
+	PINMUX_IPSR_MSEL(IP2_31_28,	PWM1_B,			SEL_PWM1_1),
+
+	/* IPSR3 */
+	PINMUX_IPSR_GPSR(IP3_3_0,	A9),
+	PINMUX_IPSR_MSEL(IP3_3_0,	MSIOF2_SCK_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP3_3_0,	CTS4_N_B,		SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP3_3_0,	VI5_VSYNC_N),
+
+	PINMUX_IPSR_GPSR(IP3_7_4,	A10),
+	PINMUX_IPSR_MSEL(IP3_7_4,	MSIOF2_RXD_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP3_7_4,	RTS4_N_B,		SEL_SCIF4_1),
+	PINMUX_IPSR_GPSR(IP3_7_4,	VI5_HSYNC_N),
+
+	PINMUX_IPSR_GPSR(IP3_11_8,	A11),
+	PINMUX_IPSR_MSEL(IP3_11_8,	TX3_B,			SEL_SCIF3_1),
+	PINMUX_IPSR_MSEL(IP3_11_8,	MSIOF2_TXD_A,		SEL_MSIOF2_0),
+	PINMUX_IPSR_MSEL(IP3_11_8,	HTX4_B,			SEL_HSCIF4_1),
+	PINMUX_IPSR_GPSR(IP3_11_8,	HSCK4),
+	PINMUX_IPSR_GPSR(IP3_11_8,	VI5_FIELD),
+	PINMUX_IPSR_MSEL(IP3_11_8,	SCL6_A,			SEL_I2C6_0),
+	PINMUX_IPSR_MSEL(IP3_11_8,	AVB_AVTP_CAPTURE_B,	SEL_ETHERAVB_1),
+	PINMUX_IPSR_MSEL(IP3_11_8,	PWM2_B,			SEL_PWM2_1),
+
+	PINMUX_IPSR_GPSR(IP3_15_12,	A12),
+	PINMUX_IPSR_GPSR(IP3_15_12,	LCDOUT12),
+	PINMUX_IPSR_MSEL(IP3_15_12,	MSIOF3_SCK_C,		SEL_MSIOF3_2),
+	PINMUX_IPSR_MSEL(IP3_15_12,	HRX4_A,			SEL_HSCIF4_0),
+	PINMUX_IPSR_GPSR(IP3_15_12,	VI5_DATA8),
+	PINMUX_IPSR_GPSR(IP3_15_12,	DU_DG4),
+
+	PINMUX_IPSR_GPSR(IP3_19_16,	A13),
+	PINMUX_IPSR_GPSR(IP3_19_16,	LCDOUT13),
+	PINMUX_IPSR_MSEL(IP3_19_16,	MSIOF3_SYNC_C,		SEL_MSIOF3_2),
+	PINMUX_IPSR_MSEL(IP3_19_16,	HTX4_A,			SEL_HSCIF4_0),
+	PINMUX_IPSR_GPSR(IP3_19_16,	VI5_DATA9),
+	PINMUX_IPSR_GPSR(IP3_19_16,	DU_DG5),
+
+	PINMUX_IPSR_GPSR(IP3_23_20,	A14),
+	PINMUX_IPSR_GPSR(IP3_23_20,	LCDOUT14),
+	PINMUX_IPSR_MSEL(IP3_23_20,	MSIOF3_RXD_C,		SEL_MSIOF3_2),
+	PINMUX_IPSR_GPSR(IP3_23_20,	HCTS4_N),
+	PINMUX_IPSR_GPSR(IP3_23_20,	VI5_DATA10),
+	PINMUX_IPSR_GPSR(IP3_23_20,	DU_DG6),
+
+	PINMUX_IPSR_GPSR(IP3_27_24,	A15),
+	PINMUX_IPSR_GPSR(IP3_27_24,	LCDOUT15),
+	PINMUX_IPSR_MSEL(IP3_27_24,	MSIOF3_TXD_C,		SEL_MSIOF3_2),
+	PINMUX_IPSR_GPSR(IP3_27_24,	HRTS4_N),
+	PINMUX_IPSR_GPSR(IP3_27_24,	VI5_DATA11),
+	PINMUX_IPSR_GPSR(IP3_27_24,	DU_DG7),
+
+	PINMUX_IPSR_GPSR(IP3_31_28,	A16),
+	PINMUX_IPSR_GPSR(IP3_31_28,	LCDOUT8),
+	PINMUX_IPSR_GPSR(IP3_31_28,	VI4_FIELD),
+	PINMUX_IPSR_GPSR(IP3_31_28,	DU_DG0),
+
+	/* IPSR4 */
+	PINMUX_IPSR_GPSR(IP4_3_0,	A17),
+	PINMUX_IPSR_GPSR(IP4_3_0,	LCDOUT9),
+	PINMUX_IPSR_GPSR(IP4_3_0,	VI4_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP4_3_0,	DU_DG1),
+
+	PINMUX_IPSR_GPSR(IP4_7_4,	A18),
+	PINMUX_IPSR_GPSR(IP4_7_4,	LCDOUT10),
+	PINMUX_IPSR_GPSR(IP4_7_4,	VI4_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP4_7_4,	DU_DG2),
+
+	PINMUX_IPSR_GPSR(IP4_11_8,	A19),
+	PINMUX_IPSR_GPSR(IP4_11_8,	LCDOUT11),
+	PINMUX_IPSR_GPSR(IP4_11_8,	VI4_CLKENB),
+	PINMUX_IPSR_GPSR(IP4_11_8,	DU_DG3),
+
+	PINMUX_IPSR_GPSR(IP4_15_12,	CS0_N),
+	PINMUX_IPSR_GPSR(IP4_15_12,	VI5_CLKENB),
+
+	PINMUX_IPSR_GPSR(IP4_19_16,	CS1_N),
+	PINMUX_IPSR_GPSR(IP4_19_16,	VI5_CLK),
+	PINMUX_IPSR_MSEL(IP4_19_16,	EX_WAIT0_B,		SEL_LBSC_1),
+
+	PINMUX_IPSR_GPSR(IP4_23_20,	BS_N),
+	PINMUX_IPSR_GPSR(IP4_23_20,	QSTVA_QVS),
+	PINMUX_IPSR_MSEL(IP4_23_20,	MSIOF3_SCK_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_GPSR(IP4_23_20,	SCK3),
+	PINMUX_IPSR_GPSR(IP4_23_20,	HSCK3),
+	PINMUX_IPSR_GPSR(IP4_23_20,	CAN1_TX),
+	PINMUX_IPSR_GPSR(IP4_23_20,	CANFD1_TX),
+	PINMUX_IPSR_MSEL(IP4_23_20,	IETX_A,			SEL_IEBUS_0),
+
+	PINMUX_IPSR_GPSR(IP4_27_24,	RD_N),
+	PINMUX_IPSR_MSEL(IP4_27_24,	MSIOF3_SYNC_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_MSEL(IP4_27_24,	RX3_A,			SEL_SCIF3_0),
+	PINMUX_IPSR_MSEL(IP4_27_24,	HRX3_A,			SEL_HSCIF3_0),
+	PINMUX_IPSR_MSEL(IP4_27_24,	CAN0_TX_A,		SEL_RCAN0_0),
+	PINMUX_IPSR_MSEL(IP4_27_24,	CANFD0_TX_A,		SEL_CANFD0_0),
+
+	PINMUX_IPSR_GPSR(IP4_31_28,	RD_WR_N),
+	PINMUX_IPSR_MSEL(IP4_31_28,	MSIOF3_RXD_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_MSEL(IP4_31_28,	TX3_A,			SEL_SCIF3_0),
+	PINMUX_IPSR_MSEL(IP4_31_28,	HTX3_A,			SEL_HSCIF3_0),
+	PINMUX_IPSR_MSEL(IP4_31_28,	CAN0_RX_A,		SEL_RCAN0_0),
+	PINMUX_IPSR_MSEL(IP4_31_28,	CANFD0_RX_A,		SEL_CANFD0_0),
+
+	/* IPSR5 */
+	PINMUX_IPSR_GPSR(IP5_3_0,	WE0_N),
+	PINMUX_IPSR_MSEL(IP5_3_0,	MSIOF3_TXD_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_GPSR(IP5_3_0,	CTS3_N),
+	PINMUX_IPSR_GPSR(IP5_3_0,	HCTS3_N),
+	PINMUX_IPSR_MSEL(IP5_3_0,	SCL6_B,			SEL_I2C6_1),
+	PINMUX_IPSR_GPSR(IP5_3_0,	CAN_CLK),
+	PINMUX_IPSR_MSEL(IP5_3_0,	IECLK_A,		SEL_IEBUS_0),
+
+	PINMUX_IPSR_GPSR(IP5_7_4,	WE1_N),
+	PINMUX_IPSR_MSEL(IP5_7_4,	MSIOF3_SS1_D,		SEL_MSIOF3_3),
+	PINMUX_IPSR_GPSR(IP5_7_4,	RTS3_N),
+	PINMUX_IPSR_GPSR(IP5_7_4,	HRTS3_N),
+	PINMUX_IPSR_MSEL(IP5_7_4,	SDA6_B,			SEL_I2C6_1),
+	PINMUX_IPSR_GPSR(IP5_7_4,	CAN1_RX),
+	PINMUX_IPSR_GPSR(IP5_7_4,	CANFD1_RX),
+	PINMUX_IPSR_MSEL(IP5_7_4,	IERX_A,			SEL_IEBUS_0),
+
+	PINMUX_IPSR_MSEL(IP5_11_8,	EX_WAIT0_A,		SEL_LBSC_0),
+	PINMUX_IPSR_GPSR(IP5_11_8,	QCLK),
+	PINMUX_IPSR_GPSR(IP5_11_8,	VI4_CLK),
+	PINMUX_IPSR_GPSR(IP5_11_8,	DU_DOTCLKOUT0),
+
+	PINMUX_IPSR_GPSR(IP5_15_12,	D0),
+	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF2_SS1_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF3_SCK_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_GPSR(IP5_15_12,	VI4_DATA16),
+	PINMUX_IPSR_GPSR(IP5_15_12,	VI5_DATA0),
+
+	PINMUX_IPSR_GPSR(IP5_19_16,	D1),
+	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF2_SS2_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF3_SYNC_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_GPSR(IP5_19_16,	VI4_DATA17),
+	PINMUX_IPSR_GPSR(IP5_19_16,	VI5_DATA1),
+
+	PINMUX_IPSR_GPSR(IP5_23_20,	D2),
+	PINMUX_IPSR_MSEL(IP5_23_20,	MSIOF3_RXD_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_GPSR(IP5_23_20,	VI4_DATA18),
+	PINMUX_IPSR_GPSR(IP5_23_20,	VI5_DATA2),
+
+	PINMUX_IPSR_GPSR(IP5_27_24,	D3),
+	PINMUX_IPSR_MSEL(IP5_27_24,	MSIOF3_TXD_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_GPSR(IP5_27_24,	VI4_DATA19),
+	PINMUX_IPSR_GPSR(IP5_27_24,	VI5_DATA3),
+
+	PINMUX_IPSR_GPSR(IP5_31_28,	D4),
+	PINMUX_IPSR_MSEL(IP5_31_28,	MSIOF2_SCK_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_GPSR(IP5_31_28,	VI4_DATA20),
+	PINMUX_IPSR_GPSR(IP5_31_28,	VI5_DATA4),
+
+	/* IPSR6 */
+	PINMUX_IPSR_GPSR(IP6_3_0,	D5),
+	PINMUX_IPSR_MSEL(IP6_3_0,	MSIOF2_SYNC_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_GPSR(IP6_3_0,	VI4_DATA21),
+	PINMUX_IPSR_GPSR(IP6_3_0,	VI5_DATA5),
+
+	PINMUX_IPSR_GPSR(IP6_7_4,	D6),
+	PINMUX_IPSR_MSEL(IP6_7_4,	MSIOF2_RXD_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_GPSR(IP6_7_4,	VI4_DATA22),
+	PINMUX_IPSR_GPSR(IP6_7_4,	VI5_DATA6),
+
+	PINMUX_IPSR_GPSR(IP6_11_8,	D7),
+	PINMUX_IPSR_MSEL(IP6_11_8,	MSIOF2_TXD_B,		SEL_MSIOF2_1),
+	PINMUX_IPSR_GPSR(IP6_11_8,	VI4_DATA23),
+	PINMUX_IPSR_GPSR(IP6_11_8,	VI5_DATA7),
+
+	PINMUX_IPSR_GPSR(IP6_15_12,	D8),
+	PINMUX_IPSR_GPSR(IP6_15_12,	LCDOUT0),
+	PINMUX_IPSR_MSEL(IP6_15_12,	MSIOF2_SCK_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_15_12,	SCK4_C,			SEL_SCIF4_2),
+	PINMUX_IPSR_MSEL(IP6_15_12,	VI4_DATA0_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP6_15_12,	DU_DR0),
+
+	PINMUX_IPSR_GPSR(IP6_19_16,	D9),
+	PINMUX_IPSR_GPSR(IP6_19_16,	LCDOUT1),
+	PINMUX_IPSR_MSEL(IP6_19_16,	MSIOF2_SYNC_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_19_16,	VI4_DATA1_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP6_19_16,	DU_DR1),
+
+	PINMUX_IPSR_GPSR(IP6_23_20,	D10),
+	PINMUX_IPSR_GPSR(IP6_23_20,	LCDOUT2),
+	PINMUX_IPSR_MSEL(IP6_23_20,	MSIOF2_RXD_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_23_20,	HRX3_B,			SEL_HSCIF3_1),
+	PINMUX_IPSR_MSEL(IP6_23_20,	VI4_DATA2_A,		SEL_VIN4_0),
+	PINMUX_IPSR_MSEL(IP6_23_20,	CTS4_N_C,		SEL_SCIF4_2),
+	PINMUX_IPSR_GPSR(IP6_23_20,	DU_DR2),
+
+	PINMUX_IPSR_GPSR(IP6_27_24,	D11),
+	PINMUX_IPSR_GPSR(IP6_27_24,	LCDOUT3),
+	PINMUX_IPSR_MSEL(IP6_27_24,	MSIOF2_TXD_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_27_24,	HTX3_B,			SEL_HSCIF3_1),
+	PINMUX_IPSR_MSEL(IP6_27_24,	VI4_DATA3_A,		SEL_VIN4_0),
+	PINMUX_IPSR_MSEL(IP6_27_24,	RTS4_N_C,		SEL_SCIF4_2),
+	PINMUX_IPSR_GPSR(IP6_27_24,	DU_DR3),
+
+	PINMUX_IPSR_GPSR(IP6_31_28,	D12),
+	PINMUX_IPSR_GPSR(IP6_31_28,	LCDOUT4),
+	PINMUX_IPSR_MSEL(IP6_31_28,	MSIOF2_SS1_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP6_31_28,	RX4_C,			SEL_SCIF4_2),
+	PINMUX_IPSR_MSEL(IP6_31_28,	VI4_DATA4_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP6_31_28,	DU_DR4),
+
+	/* IPSR7 */
+	PINMUX_IPSR_GPSR(IP7_3_0,	D13),
+	PINMUX_IPSR_GPSR(IP7_3_0,	LCDOUT5),
+	PINMUX_IPSR_MSEL(IP7_3_0,	MSIOF2_SS2_D,		SEL_MSIOF2_3),
+	PINMUX_IPSR_MSEL(IP7_3_0,	TX4_C,			SEL_SCIF4_2),
+	PINMUX_IPSR_MSEL(IP7_3_0,	VI4_DATA5_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP7_3_0,	DU_DR5),
+
+	PINMUX_IPSR_GPSR(IP7_7_4,	D14),
+	PINMUX_IPSR_GPSR(IP7_7_4,	LCDOUT6),
+	PINMUX_IPSR_MSEL(IP7_7_4,	MSIOF3_SS1_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_MSEL(IP7_7_4,	HRX3_C,			SEL_HSCIF3_2),
+	PINMUX_IPSR_MSEL(IP7_7_4,	VI4_DATA6_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP7_7_4,	DU_DR6),
+	PINMUX_IPSR_MSEL(IP7_7_4,	SCL6_C,			SEL_I2C6_2),
+
+	PINMUX_IPSR_GPSR(IP7_11_8,	D15),
+	PINMUX_IPSR_GPSR(IP7_11_8,	LCDOUT7),
+	PINMUX_IPSR_MSEL(IP7_11_8,	MSIOF3_SS2_A,		SEL_MSIOF3_0),
+	PINMUX_IPSR_MSEL(IP7_11_8,	HTX3_C,			SEL_HSCIF3_2),
+	PINMUX_IPSR_MSEL(IP7_11_8,	VI4_DATA7_A,		SEL_VIN4_0),
+	PINMUX_IPSR_GPSR(IP7_11_8,	DU_DR7),
+	PINMUX_IPSR_MSEL(IP7_11_8,	SDA6_C,			SEL_I2C6_2),
+
+	PINMUX_IPSR_GPSR(IP7_19_16,	SD0_CLK),
+	PINMUX_IPSR_MSEL(IP7_19_16,	MSIOF1_SCK_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP7_19_16,	STP_OPWM_0_B,		SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP7_23_20,	SD0_CMD),
+	PINMUX_IPSR_MSEL(IP7_23_20,	MSIOF1_SYNC_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP7_23_20,	STP_IVCXO27_0_B,	SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP7_27_24,	SD0_DAT0),
+	PINMUX_IPSR_MSEL(IP7_27_24,	MSIOF1_RXD_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP7_27_24,	TS_SCK0_B,		SEL_TSIF0_1),
+	PINMUX_IPSR_MSEL(IP7_27_24,	STP_ISCLK_0_B,		SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP7_31_28,	SD0_DAT1),
+	PINMUX_IPSR_MSEL(IP7_31_28,	MSIOF1_TXD_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP7_31_28,	TS_SPSYNC0_B,		SEL_TSIF0_1),
+	PINMUX_IPSR_MSEL(IP7_31_28,	STP_ISSYNC_0_B,		SEL_SSP1_0_1),
+
+	/* IPSR8 */
+	PINMUX_IPSR_GPSR(IP8_3_0,	SD0_DAT2),
+	PINMUX_IPSR_MSEL(IP8_3_0,	MSIOF1_SS1_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP8_3_0,	TS_SDAT0_B,		SEL_TSIF0_1),
+	PINMUX_IPSR_MSEL(IP8_3_0,	STP_ISD_0_B,		SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP8_7_4,	SD0_DAT3),
+	PINMUX_IPSR_MSEL(IP8_7_4,	MSIOF1_SS2_E,		SEL_MSIOF1_4),
+	PINMUX_IPSR_MSEL(IP8_7_4,	TS_SDEN0_B,		SEL_TSIF0_1),
+	PINMUX_IPSR_MSEL(IP8_7_4,	STP_ISEN_0_B,		SEL_SSP1_0_1),
+
+	PINMUX_IPSR_GPSR(IP8_11_8,	SD1_CLK),
+	PINMUX_IPSR_MSEL(IP8_11_8,	MSIOF1_SCK_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_MSEL(IP8_11_8,	SIM0_CLK_A,		SEL_SIMCARD_0),
+
+	PINMUX_IPSR_GPSR(IP8_15_12,	SD1_CMD),
+	PINMUX_IPSR_MSEL(IP8_15_12,	MSIOF1_SYNC_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_GPSR(IP8_15_12,	NFCE_N_B),
+	PINMUX_IPSR_MSEL(IP8_15_12,	SIM0_D_A,		SEL_SIMCARD_0),
+	PINMUX_IPSR_MSEL(IP8_15_12,	STP_IVCXO27_1_B,	SEL_SSP1_1_1),
+
+	PINMUX_IPSR_GPSR(IP8_19_16,	SD1_DAT0),
+	PINMUX_IPSR_GPSR(IP8_19_16,	SD2_DAT4),
+	PINMUX_IPSR_MSEL(IP8_19_16,	MSIOF1_RXD_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_GPSR(IP8_19_16,	NFWP_N_B),
+	PINMUX_IPSR_MSEL(IP8_19_16,	TS_SCK1_B,		SEL_TSIF1_1),
+	PINMUX_IPSR_MSEL(IP8_19_16,	STP_ISCLK_1_B,		SEL_SSP1_1_1),
+
+	PINMUX_IPSR_GPSR(IP8_23_20,	SD1_DAT1),
+	PINMUX_IPSR_GPSR(IP8_23_20,	SD2_DAT5),
+	PINMUX_IPSR_MSEL(IP8_23_20,	MSIOF1_TXD_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_GPSR(IP8_23_20,	NFDATA14_B),
+	PINMUX_IPSR_MSEL(IP8_23_20,	TS_SPSYNC1_B,		SEL_TSIF1_1),
+	PINMUX_IPSR_MSEL(IP8_23_20,	STP_ISSYNC_1_B,		SEL_SSP1_1_1),
+
+	PINMUX_IPSR_GPSR(IP8_27_24,	SD1_DAT2),
+	PINMUX_IPSR_GPSR(IP8_27_24,	SD2_DAT6),
+	PINMUX_IPSR_MSEL(IP8_27_24,	MSIOF1_SS1_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_GPSR(IP8_27_24,	NFDATA15_B),
+	PINMUX_IPSR_MSEL(IP8_27_24,	TS_SDAT1_B,		SEL_TSIF1_1),
+	PINMUX_IPSR_MSEL(IP8_27_24,	STP_ISD_1_B,		SEL_SSP1_1_1),
+
+	PINMUX_IPSR_GPSR(IP8_31_28,	SD1_DAT3),
+	PINMUX_IPSR_GPSR(IP8_31_28,	SD2_DAT7),
+	PINMUX_IPSR_MSEL(IP8_31_28,	MSIOF1_SS2_G,		SEL_MSIOF1_6),
+	PINMUX_IPSR_GPSR(IP8_31_28,	NFRB_N_B),
+	PINMUX_IPSR_MSEL(IP8_31_28,	TS_SDEN1_B,		SEL_TSIF1_1),
+	PINMUX_IPSR_MSEL(IP8_31_28,	STP_ISEN_1_B,		SEL_SSP1_1_1),
+
+	/* IPSR9 */
+	PINMUX_IPSR_GPSR(IP9_3_0,	SD2_CLK),
+	PINMUX_IPSR_GPSR(IP9_3_0,	NFDATA8),
+
+	PINMUX_IPSR_GPSR(IP9_7_4,	SD2_CMD),
+	PINMUX_IPSR_GPSR(IP9_7_4,	NFDATA9),
+
+	PINMUX_IPSR_GPSR(IP9_11_8,	SD2_DAT0),
+	PINMUX_IPSR_GPSR(IP9_11_8,	NFDATA10),
+
+	PINMUX_IPSR_GPSR(IP9_15_12,	SD2_DAT1),
+	PINMUX_IPSR_GPSR(IP9_15_12,	NFDATA11),
+
+	PINMUX_IPSR_GPSR(IP9_19_16,	SD2_DAT2),
+	PINMUX_IPSR_GPSR(IP9_19_16,	NFDATA12),
+
+	PINMUX_IPSR_GPSR(IP9_23_20,	SD2_DAT3),
+	PINMUX_IPSR_GPSR(IP9_23_20,	NFDATA13),
+
+	PINMUX_IPSR_GPSR(IP9_27_24,	SD2_DS),
+	PINMUX_IPSR_GPSR(IP9_27_24,	NFALE),
+	PINMUX_IPSR_GPSR(IP9_27_24,	SATA_DEVSLP_B),
+
+	PINMUX_IPSR_GPSR(IP9_31_28,	SD3_CLK),
+	PINMUX_IPSR_GPSR(IP9_31_28,	NFWE_N),
+
+	/* IPSR10 */
+	PINMUX_IPSR_GPSR(IP10_3_0,	SD3_CMD),
+	PINMUX_IPSR_GPSR(IP10_3_0,	NFRE_N),
+
+	PINMUX_IPSR_GPSR(IP10_7_4,	SD3_DAT0),
+	PINMUX_IPSR_GPSR(IP10_7_4,	NFDATA0),
+
+	PINMUX_IPSR_GPSR(IP10_11_8,	SD3_DAT1),
+	PINMUX_IPSR_GPSR(IP10_11_8,	NFDATA1),
+
+	PINMUX_IPSR_GPSR(IP10_15_12,	SD3_DAT2),
+	PINMUX_IPSR_GPSR(IP10_15_12,	NFDATA2),
+
+	PINMUX_IPSR_GPSR(IP10_19_16,	SD3_DAT3),
+	PINMUX_IPSR_GPSR(IP10_19_16,	NFDATA3),
+
+	PINMUX_IPSR_GPSR(IP10_23_20,	SD3_DAT4),
+	PINMUX_IPSR_MSEL(IP10_23_20,	SD2_CD_A,		SEL_SDHI2_0),
+	PINMUX_IPSR_GPSR(IP10_23_20,	NFDATA4),
+
+	PINMUX_IPSR_GPSR(IP10_27_24,	SD3_DAT5),
+	PINMUX_IPSR_MSEL(IP10_27_24,	SD2_WP_A,		SEL_SDHI2_0),
+	PINMUX_IPSR_GPSR(IP10_27_24,	NFDATA5),
+
+	PINMUX_IPSR_GPSR(IP10_31_28,	SD3_DAT6),
+	PINMUX_IPSR_GPSR(IP10_31_28,	SD3_CD),
+	PINMUX_IPSR_GPSR(IP10_31_28,	NFDATA6),
+
+	/* IPSR11 */
+	PINMUX_IPSR_GPSR(IP11_3_0,	SD3_DAT7),
+	PINMUX_IPSR_GPSR(IP11_3_0,	SD3_WP),
+	PINMUX_IPSR_GPSR(IP11_3_0,	NFDATA7),
+
+	PINMUX_IPSR_GPSR(IP11_7_4,	SD3_DS),
+	PINMUX_IPSR_GPSR(IP11_7_4,	NFCLE),
+
+	PINMUX_IPSR_GPSR(IP11_11_8,	SD0_CD),
+	PINMUX_IPSR_MSEL(IP11_11_8,	SCL2_B,			SEL_I2C2_1),
+	PINMUX_IPSR_MSEL(IP11_11_8,	SIM0_RST_A,		SEL_SIMCARD_0),
+
+	PINMUX_IPSR_GPSR(IP11_15_12,	SD0_WP),
+	PINMUX_IPSR_MSEL(IP11_15_12,	SDA2_B,			SEL_I2C2_1),
+
+	PINMUX_IPSR_MSEL(IP11_19_16,	SD1_CD,			I2C_SEL_0_0),
+	PINMUX_IPSR_PHYS_MSEL(IP11_19_16, SIM0_CLK_B,		I2C_SEL_0_0,	SEL_SIMCARD_1),
+	PINMUX_IPSR_PHYS(IP11_19_16,	SCL0,			I2C_SEL_0_1),
+
+	PINMUX_IPSR_MSEL(IP11_23_20,	SD1_WP,			I2C_SEL_0_0),
+	PINMUX_IPSR_PHYS_MSEL(IP11_23_20, SIM0_D_B,		I2C_SEL_0_0,	SEL_SIMCARD_1),
+	PINMUX_IPSR_PHYS(IP11_23_20,	SDA0,			I2C_SEL_0_1),
+
+	PINMUX_IPSR_GPSR(IP11_27_24,	SCK0),
+	PINMUX_IPSR_MSEL(IP11_27_24,	HSCK1_B,		SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP11_27_24,	MSIOF1_SS2_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP11_27_24,	AUDIO_CLKC_B,		SEL_ADGC_1),
+	PINMUX_IPSR_MSEL(IP11_27_24,	SDA2_A,			SEL_I2C2_0),
+	PINMUX_IPSR_MSEL(IP11_27_24,	SIM0_RST_B,		SEL_SIMCARD_1),
+	PINMUX_IPSR_MSEL(IP11_27_24,	STP_OPWM_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP11_27_24,	RIF0_CLK_B,		SEL_DRIF0_1),
+	PINMUX_IPSR_GPSR(IP11_27_24,	ADICHS2),
+	PINMUX_IPSR_MSEL(IP11_27_24,	SCK5_B,			SEL_SCIF5_1),
+
+	PINMUX_IPSR_GPSR(IP11_31_28,	RX0),
+	PINMUX_IPSR_MSEL(IP11_31_28,	HRX1_B,			SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP11_31_28,	TS_SCK0_C,		SEL_TSIF0_2),
+	PINMUX_IPSR_MSEL(IP11_31_28,	STP_ISCLK_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP11_31_28,	RIF0_D0_B,		SEL_DRIF0_1),
+
+	/* IPSR12 */
+	PINMUX_IPSR_GPSR(IP12_3_0,	TX0),
+	PINMUX_IPSR_MSEL(IP12_3_0,	HTX1_B,			SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP12_3_0,	TS_SPSYNC0_C,		SEL_TSIF0_2),
+	PINMUX_IPSR_MSEL(IP12_3_0,	STP_ISSYNC_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP12_3_0,	RIF0_D1_B,		SEL_DRIF0_1),
+
+	PINMUX_IPSR_GPSR(IP12_7_4,	CTS0_N),
+	PINMUX_IPSR_MSEL(IP12_7_4,	HCTS1_N_B,		SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP12_7_4,	MSIOF1_SYNC_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP12_7_4,	TS_SPSYNC1_C,		SEL_TSIF1_2),
+	PINMUX_IPSR_MSEL(IP12_7_4,	STP_ISSYNC_1_C,		SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP12_7_4,	RIF1_SYNC_B,		SEL_DRIF1_1),
+	PINMUX_IPSR_GPSR(IP12_7_4,	AUDIO_CLKOUT_C),
+	PINMUX_IPSR_GPSR(IP12_7_4,	ADICS_SAMP),
+
+	PINMUX_IPSR_GPSR(IP12_11_8,	RTS0_N),
+	PINMUX_IPSR_MSEL(IP12_11_8,	HRTS1_N_B,		SEL_HSCIF1_1),
+	PINMUX_IPSR_MSEL(IP12_11_8,	MSIOF1_SS1_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP12_11_8,	AUDIO_CLKA_B,		SEL_ADGA_1),
+	PINMUX_IPSR_MSEL(IP12_11_8,	SCL2_A,			SEL_I2C2_0),
+	PINMUX_IPSR_MSEL(IP12_11_8,	STP_IVCXO27_1_C,	SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP12_11_8,	RIF0_SYNC_B,		SEL_DRIF0_1),
+	PINMUX_IPSR_GPSR(IP12_11_8,	ADICHS1),
+
+	PINMUX_IPSR_MSEL(IP12_15_12,	RX1_A,			SEL_SCIF1_0),
+	PINMUX_IPSR_MSEL(IP12_15_12,	HRX1_A,			SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP12_15_12,	TS_SDAT0_C,		SEL_TSIF0_2),
+	PINMUX_IPSR_MSEL(IP12_15_12,	STP_ISD_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP12_15_12,	RIF1_CLK_C,		SEL_DRIF1_2),
+
+	PINMUX_IPSR_MSEL(IP12_19_16,	TX1_A,			SEL_SCIF1_0),
+	PINMUX_IPSR_MSEL(IP12_19_16,	HTX1_A,			SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP12_19_16,	TS_SDEN0_C,		SEL_TSIF0_2),
+	PINMUX_IPSR_MSEL(IP12_19_16,	STP_ISEN_0_C,		SEL_SSP1_0_2),
+	PINMUX_IPSR_MSEL(IP12_19_16,	RIF1_D0_C,		SEL_DRIF1_2),
+
+	PINMUX_IPSR_GPSR(IP12_23_20,	CTS1_N),
+	PINMUX_IPSR_MSEL(IP12_23_20,	HCTS1_N_A,		SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP12_23_20,	MSIOF1_RXD_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP12_23_20,	TS_SDEN1_C,		SEL_TSIF1_2),
+	PINMUX_IPSR_MSEL(IP12_23_20,	STP_ISEN_1_C,		SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP12_23_20,	RIF1_D0_B,		SEL_DRIF1_1),
+	PINMUX_IPSR_GPSR(IP12_23_20,	ADIDATA),
+
+	PINMUX_IPSR_GPSR(IP12_27_24,	RTS1_N),
+	PINMUX_IPSR_MSEL(IP12_27_24,	HRTS1_N_A,		SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP12_27_24,	MSIOF1_TXD_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP12_27_24,	TS_SDAT1_C,		SEL_TSIF1_2),
+	PINMUX_IPSR_MSEL(IP12_27_24,	STP_ISD_1_C,		SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP12_27_24,	RIF1_D1_B,		SEL_DRIF1_1),
+	PINMUX_IPSR_GPSR(IP12_27_24,	ADICHS0),
+
+	PINMUX_IPSR_GPSR(IP12_31_28,	SCK2),
+	PINMUX_IPSR_MSEL(IP12_31_28,	SCIF_CLK_B,		SEL_SCIF_1),
+	PINMUX_IPSR_MSEL(IP12_31_28,	MSIOF1_SCK_B,		SEL_MSIOF1_1),
+	PINMUX_IPSR_MSEL(IP12_31_28,	TS_SCK1_C,		SEL_TSIF1_2),
+	PINMUX_IPSR_MSEL(IP12_31_28,	STP_ISCLK_1_C,		SEL_SSP1_1_2),
+	PINMUX_IPSR_MSEL(IP12_31_28,	RIF1_CLK_B,		SEL_DRIF1_1),
+	PINMUX_IPSR_GPSR(IP12_31_28,	ADICLK),
+
+	/* IPSR13 */
+	PINMUX_IPSR_MSEL(IP13_3_0,	TX2_A,			SEL_SCIF2_0),
+	PINMUX_IPSR_MSEL(IP13_3_0,	SD2_CD_B,		SEL_SDHI2_1),
+	PINMUX_IPSR_MSEL(IP13_3_0,	SCL1_A,			SEL_I2C1_0),
+	PINMUX_IPSR_MSEL(IP13_3_0,	FMCLK_A,		SEL_FM_0),
+	PINMUX_IPSR_MSEL(IP13_3_0,	RIF1_D1_C,		SEL_DRIF1_2),
+	PINMUX_IPSR_GPSR(IP13_3_0,	FSO_CFE_0_N),
+
+	PINMUX_IPSR_MSEL(IP13_7_4,	RX2_A,			SEL_SCIF2_0),
+	PINMUX_IPSR_MSEL(IP13_7_4,	SD2_WP_B,		SEL_SDHI2_1),
+	PINMUX_IPSR_MSEL(IP13_7_4,	SDA1_A,			SEL_I2C1_0),
+	PINMUX_IPSR_MSEL(IP13_7_4,	FMIN_A,			SEL_FM_0),
+	PINMUX_IPSR_MSEL(IP13_7_4,	RIF1_SYNC_C,		SEL_DRIF1_2),
+	PINMUX_IPSR_GPSR(IP13_7_4,	FSO_CFE_1_N),
+
+	PINMUX_IPSR_GPSR(IP13_11_8,	HSCK0),
+	PINMUX_IPSR_MSEL(IP13_11_8,	MSIOF1_SCK_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP13_11_8,	AUDIO_CLKB_A,		SEL_ADGB_0),
+	PINMUX_IPSR_MSEL(IP13_11_8,	SSI_SDATA1_B,		SEL_SSI1_1),
+	PINMUX_IPSR_MSEL(IP13_11_8,	TS_SCK0_D,		SEL_TSIF0_3),
+	PINMUX_IPSR_MSEL(IP13_11_8,	STP_ISCLK_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP13_11_8,	RIF0_CLK_C,		SEL_DRIF0_2),
+	PINMUX_IPSR_MSEL(IP13_11_8,	RX5_B,			SEL_SCIF5_1),
+
+	PINMUX_IPSR_GPSR(IP13_15_12,	HRX0),
+	PINMUX_IPSR_MSEL(IP13_15_12,	MSIOF1_RXD_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP13_15_12,	SSI_SDATA2_B,		SEL_SSI2_1),
+	PINMUX_IPSR_MSEL(IP13_15_12,	TS_SDEN0_D,		SEL_TSIF0_3),
+	PINMUX_IPSR_MSEL(IP13_15_12,	STP_ISEN_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP13_15_12,	RIF0_D0_C,		SEL_DRIF0_2),
+
+	PINMUX_IPSR_GPSR(IP13_19_16,	HTX0),
+	PINMUX_IPSR_MSEL(IP13_19_16,	MSIOF1_TXD_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP13_19_16,	SSI_SDATA9_B,		SEL_SSI9_1),
+	PINMUX_IPSR_MSEL(IP13_19_16,	TS_SDAT0_D,		SEL_TSIF0_3),
+	PINMUX_IPSR_MSEL(IP13_19_16,	STP_ISD_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP13_19_16,	RIF0_D1_C,		SEL_DRIF0_2),
+
+	PINMUX_IPSR_GPSR(IP13_23_20,	HCTS0_N),
+	PINMUX_IPSR_MSEL(IP13_23_20,	RX2_B,			SEL_SCIF2_1),
+	PINMUX_IPSR_MSEL(IP13_23_20,	MSIOF1_SYNC_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP13_23_20,	SSI_SCK9_A,		SEL_SSI9_0),
+	PINMUX_IPSR_MSEL(IP13_23_20,	TS_SPSYNC0_D,		SEL_TSIF0_3),
+	PINMUX_IPSR_MSEL(IP13_23_20,	STP_ISSYNC_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP13_23_20,	RIF0_SYNC_C,		SEL_DRIF0_2),
+	PINMUX_IPSR_GPSR(IP13_23_20,	AUDIO_CLKOUT1_A),
+
+	PINMUX_IPSR_GPSR(IP13_27_24,	HRTS0_N),
+	PINMUX_IPSR_MSEL(IP13_27_24,	TX2_B,			SEL_SCIF2_1),
+	PINMUX_IPSR_MSEL(IP13_27_24,	MSIOF1_SS1_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP13_27_24,	SSI_WS9_A,		SEL_SSI9_0),
+	PINMUX_IPSR_MSEL(IP13_27_24,	STP_IVCXO27_0_D,	SEL_SSP1_0_3),
+	PINMUX_IPSR_MSEL(IP13_27_24,	BPFCLK_A,		SEL_FM_0),
+	PINMUX_IPSR_GPSR(IP13_27_24,	AUDIO_CLKOUT2_A),
+
+	PINMUX_IPSR_GPSR(IP13_31_28,	MSIOF0_SYNC),
+	PINMUX_IPSR_GPSR(IP13_31_28,	AUDIO_CLKOUT_A),
+	PINMUX_IPSR_MSEL(IP13_31_28,	TX5_B,			SEL_SCIF5_1),
+	PINMUX_IPSR_MSEL(IP13_31_28,	BPFCLK_D,		SEL_FM_3),
+
+	/* IPSR14 */
+	PINMUX_IPSR_GPSR(IP14_3_0,	MSIOF0_SS1),
+	PINMUX_IPSR_MSEL(IP14_3_0,	RX5_A,			SEL_SCIF5_0),
+	PINMUX_IPSR_GPSR(IP14_3_0,	NFWP_N_A),
+	PINMUX_IPSR_MSEL(IP14_3_0,	AUDIO_CLKA_C,		SEL_ADGA_2),
+	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SCK2_A,		SEL_SSI2_0),
+	PINMUX_IPSR_MSEL(IP14_3_0,	STP_IVCXO27_0_C,	SEL_SSP1_0_2),
+	PINMUX_IPSR_GPSR(IP14_3_0,	AUDIO_CLKOUT3_A),
+	PINMUX_IPSR_MSEL(IP14_3_0,	TCLK1_B,		SEL_TIMER_TMU1_1),
+
+	PINMUX_IPSR_GPSR(IP14_7_4,	MSIOF0_SS2),
+	PINMUX_IPSR_MSEL(IP14_7_4,	TX5_A,			SEL_SCIF5_0),
+	PINMUX_IPSR_MSEL(IP14_7_4,	MSIOF1_SS2_D,		SEL_MSIOF1_3),
+	PINMUX_IPSR_MSEL(IP14_7_4,	AUDIO_CLKC_A,		SEL_ADGC_0),
+	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_WS2_A,		SEL_SSI2_0),
+	PINMUX_IPSR_MSEL(IP14_7_4,	STP_OPWM_0_D,		SEL_SSP1_0_3),
+	PINMUX_IPSR_GPSR(IP14_7_4,	AUDIO_CLKOUT_D),
+	PINMUX_IPSR_MSEL(IP14_7_4,	SPEEDIN_B,		SEL_SPEED_PULSE_1),
+
+	PINMUX_IPSR_GPSR(IP14_11_8,	MLB_CLK),
+	PINMUX_IPSR_MSEL(IP14_11_8,	MSIOF1_SCK_F,		SEL_MSIOF1_5),
+	PINMUX_IPSR_MSEL(IP14_11_8,	SCL1_B,			SEL_I2C1_1),
+
+	PINMUX_IPSR_GPSR(IP14_15_12,	MLB_SIG),
+	PINMUX_IPSR_MSEL(IP14_15_12,	RX1_B,			SEL_SCIF1_1),
+	PINMUX_IPSR_MSEL(IP14_15_12,	MSIOF1_SYNC_F,		SEL_MSIOF1_5),
+	PINMUX_IPSR_MSEL(IP14_15_12,	SDA1_B,			SEL_I2C1_1),
+
+	PINMUX_IPSR_GPSR(IP14_19_16,	MLB_DAT),
+	PINMUX_IPSR_MSEL(IP14_19_16,	TX1_B,			SEL_SCIF1_1),
+	PINMUX_IPSR_MSEL(IP14_19_16,	MSIOF1_RXD_F,		SEL_MSIOF1_5),
+
+	PINMUX_IPSR_GPSR(IP14_23_20,	SSI_SCK01239),
+	PINMUX_IPSR_MSEL(IP14_23_20,	MSIOF1_TXD_F,		SEL_MSIOF1_5),
+
+	PINMUX_IPSR_GPSR(IP14_27_24,	SSI_WS01239),
+	PINMUX_IPSR_MSEL(IP14_27_24,	MSIOF1_SS1_F,		SEL_MSIOF1_5),
+
+	PINMUX_IPSR_GPSR(IP14_31_28,	SSI_SDATA0),
+	PINMUX_IPSR_MSEL(IP14_31_28,	MSIOF1_SS2_F,		SEL_MSIOF1_5),
+
+	/* IPSR15 */
+	PINMUX_IPSR_MSEL(IP15_3_0,	SSI_SDATA1_A,		SEL_SSI1_0),
+
+	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SDATA2_A,		SEL_SSI2_0),
+	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SCK1_B,		SEL_SSI1_1),
+
+	PINMUX_IPSR_GPSR(IP15_11_8,	SSI_SCK349),
+	PINMUX_IPSR_MSEL(IP15_11_8,	MSIOF1_SS1_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP15_11_8,	STP_OPWM_0_A,		SEL_SSP1_0_0),
+
+	PINMUX_IPSR_GPSR(IP15_15_12,	SSI_WS349),
+	PINMUX_IPSR_MSEL(IP15_15_12,	HCTS2_N_A,		SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP15_15_12,	MSIOF1_SS2_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP15_15_12,	STP_IVCXO27_0_A,	SEL_SSP1_0_0),
+
+	PINMUX_IPSR_GPSR(IP15_19_16,	SSI_SDATA3),
+	PINMUX_IPSR_MSEL(IP15_19_16,	HRTS2_N_A,		SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP15_19_16,	MSIOF1_TXD_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP15_19_16,	TS_SCK0_A,		SEL_TSIF0_0),
+	PINMUX_IPSR_MSEL(IP15_19_16,	STP_ISCLK_0_A,		SEL_SSP1_0_0),
+	PINMUX_IPSR_MSEL(IP15_19_16,	RIF0_D1_A,		SEL_DRIF0_0),
+	PINMUX_IPSR_MSEL(IP15_19_16,	RIF2_D0_A,		SEL_DRIF2_0),
+
+	PINMUX_IPSR_GPSR(IP15_23_20,	SSI_SCK4),
+	PINMUX_IPSR_MSEL(IP15_23_20,	HRX2_A,			SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	MSIOF1_SCK_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	TS_SDAT0_A,		SEL_TSIF0_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	STP_ISD_0_A,		SEL_SSP1_0_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	RIF0_CLK_A,		SEL_DRIF0_0),
+	PINMUX_IPSR_MSEL(IP15_23_20,	RIF2_CLK_A,		SEL_DRIF2_0),
+
+	PINMUX_IPSR_GPSR(IP15_27_24,	SSI_WS4),
+	PINMUX_IPSR_MSEL(IP15_27_24,	HTX2_A,			SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP15_27_24,	MSIOF1_SYNC_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP15_27_24,	TS_SDEN0_A,		SEL_TSIF0_0),
+	PINMUX_IPSR_MSEL(IP15_27_24,	STP_ISEN_0_A,		SEL_SSP1_0_0),
+	PINMUX_IPSR_MSEL(IP15_27_24,	RIF0_SYNC_A,		SEL_DRIF0_0),
+	PINMUX_IPSR_MSEL(IP15_27_24,	RIF2_SYNC_A,		SEL_DRIF2_0),
+
+	PINMUX_IPSR_GPSR(IP15_31_28,	SSI_SDATA4),
+	PINMUX_IPSR_MSEL(IP15_31_28,	HSCK2_A,		SEL_HSCIF2_0),
+	PINMUX_IPSR_MSEL(IP15_31_28,	MSIOF1_RXD_A,		SEL_MSIOF1_0),
+	PINMUX_IPSR_MSEL(IP15_31_28,	TS_SPSYNC0_A,		SEL_TSIF0_0),
+	PINMUX_IPSR_MSEL(IP15_31_28,	STP_ISSYNC_0_A,		SEL_SSP1_0_0),
+	PINMUX_IPSR_MSEL(IP15_31_28,	RIF0_D0_A,		SEL_DRIF0_0),
+	PINMUX_IPSR_MSEL(IP15_31_28,	RIF2_D1_A,		SEL_DRIF2_0),
+
+	/* IPSR16 */
+	PINMUX_IPSR_GPSR(IP16_3_0,	SSI_SCK6),
+	PINMUX_IPSR_GPSR(IP16_3_0,	USB2_PWEN),
+	PINMUX_IPSR_MSEL(IP16_3_0,	SIM0_RST_D,		SEL_SIMCARD_3),
+
+	PINMUX_IPSR_GPSR(IP16_7_4,	SSI_WS6),
+	PINMUX_IPSR_GPSR(IP16_7_4,	USB2_OVC),
+	PINMUX_IPSR_MSEL(IP16_7_4,	SIM0_D_D,		SEL_SIMCARD_3),
+
+	PINMUX_IPSR_GPSR(IP16_11_8,	SSI_SDATA6),
+	PINMUX_IPSR_MSEL(IP16_11_8,	SIM0_CLK_D,		SEL_SIMCARD_3),
+	PINMUX_IPSR_GPSR(IP16_11_8,	SATA_DEVSLP_A),
+
+	PINMUX_IPSR_GPSR(IP16_15_12,	SSI_SCK78),
+	PINMUX_IPSR_MSEL(IP16_15_12,	HRX2_B,			SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP16_15_12,	MSIOF1_SCK_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP16_15_12,	TS_SCK1_A,		SEL_TSIF1_0),
+	PINMUX_IPSR_MSEL(IP16_15_12,	STP_ISCLK_1_A,		SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP16_15_12,	RIF1_CLK_A,		SEL_DRIF1_0),
+	PINMUX_IPSR_MSEL(IP16_15_12,	RIF3_CLK_A,		SEL_DRIF3_0),
+
+	PINMUX_IPSR_GPSR(IP16_19_16,	SSI_WS78),
+	PINMUX_IPSR_MSEL(IP16_19_16,	HTX2_B,			SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP16_19_16,	MSIOF1_SYNC_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP16_19_16,	TS_SDAT1_A,		SEL_TSIF1_0),
+	PINMUX_IPSR_MSEL(IP16_19_16,	STP_ISD_1_A,		SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP16_19_16,	RIF1_SYNC_A,		SEL_DRIF1_0),
+	PINMUX_IPSR_MSEL(IP16_19_16,	RIF3_SYNC_A,		SEL_DRIF3_0),
+
+	PINMUX_IPSR_GPSR(IP16_23_20,	SSI_SDATA7),
+	PINMUX_IPSR_MSEL(IP16_23_20,	HCTS2_N_B,		SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP16_23_20,	MSIOF1_RXD_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP16_23_20,	TS_SDEN1_A,		SEL_TSIF1_0),
+	PINMUX_IPSR_MSEL(IP16_23_20,	STP_ISEN_1_A,		SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP16_23_20,	RIF1_D0_A,		SEL_DRIF1_0),
+	PINMUX_IPSR_MSEL(IP16_23_20,	RIF3_D0_A,		SEL_DRIF3_0),
+	PINMUX_IPSR_MSEL(IP16_23_20,	TCLK2_A,		SEL_TIMER_TMU2_0),
+
+	PINMUX_IPSR_GPSR(IP16_27_24,	SSI_SDATA8),
+	PINMUX_IPSR_MSEL(IP16_27_24,	HRTS2_N_B,		SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP16_27_24,	MSIOF1_TXD_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP16_27_24,	TS_SPSYNC1_A,		SEL_TSIF1_0),
+	PINMUX_IPSR_MSEL(IP16_27_24,	STP_ISSYNC_1_A,		SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP16_27_24,	RIF1_D1_A,		SEL_DRIF1_0),
+	PINMUX_IPSR_MSEL(IP16_27_24,	RIF3_D1_A,		SEL_DRIF3_0),
+
+	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_SDATA9_A,		SEL_SSI9_0),
+	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK2_B,		SEL_HSCIF2_1),
+	PINMUX_IPSR_MSEL(IP16_31_28,	MSIOF1_SS1_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK1_A,		SEL_HSCIF1_0),
+	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS1_B,		SEL_SSI1_1),
+	PINMUX_IPSR_GPSR(IP16_31_28,	SCK1),
+	PINMUX_IPSR_MSEL(IP16_31_28,	STP_IVCXO27_1_A,	SEL_SSP1_1_0),
+	PINMUX_IPSR_MSEL(IP16_31_28,	SCK5_A,			SEL_SCIF5_0),
+
+	/* IPSR17 */
+	PINMUX_IPSR_MSEL(IP17_3_0,	AUDIO_CLKA_A,		SEL_ADGA_0),
+
+	PINMUX_IPSR_MSEL(IP17_7_4,	AUDIO_CLKB_B,		SEL_ADGB_1),
+	PINMUX_IPSR_MSEL(IP17_7_4,	SCIF_CLK_A,		SEL_SCIF_0),
+	PINMUX_IPSR_MSEL(IP17_7_4,	STP_IVCXO27_1_D,	SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP17_7_4,	REMOCON_A,		SEL_REMOCON_0),
+	PINMUX_IPSR_MSEL(IP17_7_4,	TCLK1_A,		SEL_TIMER_TMU1_0),
+
+	PINMUX_IPSR_GPSR(IP17_11_8,	USB0_PWEN),
+	PINMUX_IPSR_MSEL(IP17_11_8,	SIM0_RST_C,		SEL_SIMCARD_2),
+	PINMUX_IPSR_MSEL(IP17_11_8,	TS_SCK1_D,		SEL_TSIF1_3),
+	PINMUX_IPSR_MSEL(IP17_11_8,	STP_ISCLK_1_D,		SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP17_11_8,	BPFCLK_B,		SEL_FM_1),
+	PINMUX_IPSR_MSEL(IP17_11_8,	RIF3_CLK_B,		SEL_DRIF3_1),
+	PINMUX_IPSR_MSEL(IP17_11_8,	HSCK2_C,		SEL_HSCIF2_2),
+
+	PINMUX_IPSR_GPSR(IP17_15_12,	USB0_OVC),
+	PINMUX_IPSR_MSEL(IP17_15_12,	SIM0_D_C,		SEL_SIMCARD_2),
+	PINMUX_IPSR_MSEL(IP17_15_12,	TS_SDAT1_D,		SEL_TSIF1_3),
+	PINMUX_IPSR_MSEL(IP17_15_12,	STP_ISD_1_D,		SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP17_15_12,	RIF3_SYNC_B,		SEL_DRIF3_1),
+	PINMUX_IPSR_MSEL(IP17_15_12,	HRX2_C,			SEL_HSCIF2_2),
+
+	PINMUX_IPSR_GPSR(IP17_19_16,	USB1_PWEN),
+	PINMUX_IPSR_MSEL(IP17_19_16,	SIM0_CLK_C,		SEL_SIMCARD_2),
+	PINMUX_IPSR_MSEL(IP17_19_16,	SSI_SCK1_A,		SEL_SSI1_0),
+	PINMUX_IPSR_MSEL(IP17_19_16,	TS_SCK0_E,		SEL_TSIF0_4),
+	PINMUX_IPSR_MSEL(IP17_19_16,	STP_ISCLK_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP17_19_16,	FMCLK_B,		SEL_FM_1),
+	PINMUX_IPSR_MSEL(IP17_19_16,	RIF2_CLK_B,		SEL_DRIF2_1),
+	PINMUX_IPSR_MSEL(IP17_19_16,	SPEEDIN_A,		SEL_SPEED_PULSE_0),
+	PINMUX_IPSR_MSEL(IP17_19_16,	HTX2_C,			SEL_HSCIF2_2),
+
+	PINMUX_IPSR_GPSR(IP17_23_20,	USB1_OVC),
+	PINMUX_IPSR_MSEL(IP17_23_20,	MSIOF1_SS2_C,		SEL_MSIOF1_2),
+	PINMUX_IPSR_MSEL(IP17_23_20,	SSI_WS1_A,		SEL_SSI1_0),
+	PINMUX_IPSR_MSEL(IP17_23_20,	TS_SDAT0_E,		SEL_TSIF0_4),
+	PINMUX_IPSR_MSEL(IP17_23_20,	STP_ISD_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP17_23_20,	FMIN_B,			SEL_FM_1),
+	PINMUX_IPSR_MSEL(IP17_23_20,	RIF2_SYNC_B,		SEL_DRIF2_1),
+	PINMUX_IPSR_MSEL(IP17_23_20,	REMOCON_B,		SEL_REMOCON_1),
+	PINMUX_IPSR_MSEL(IP17_23_20,	HCTS2_N_C,		SEL_HSCIF2_2),
+
+	PINMUX_IPSR_GPSR(IP17_27_24,	USB30_PWEN),
+	PINMUX_IPSR_GPSR(IP17_27_24,	AUDIO_CLKOUT_B),
+	PINMUX_IPSR_MSEL(IP17_27_24,	SSI_SCK2_B,		SEL_SSI2_1),
+	PINMUX_IPSR_MSEL(IP17_27_24,	TS_SDEN1_D,		SEL_TSIF1_3),
+	PINMUX_IPSR_MSEL(IP17_27_24,	STP_ISEN_1_D,		SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP17_27_24,	STP_OPWM_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP17_27_24,	RIF3_D0_B,		SEL_DRIF3_1),
+	PINMUX_IPSR_MSEL(IP17_27_24,	TCLK2_B,		SEL_TIMER_TMU2_1),
+	PINMUX_IPSR_GPSR(IP17_27_24,	TPU0TO0),
+	PINMUX_IPSR_MSEL(IP17_27_24,	BPFCLK_C,		SEL_FM_2),
+	PINMUX_IPSR_MSEL(IP17_27_24,	HRTS2_N_C,		SEL_HSCIF2_2),
+
+	PINMUX_IPSR_GPSR(IP17_31_28,	USB30_OVC),
+	PINMUX_IPSR_GPSR(IP17_31_28,	AUDIO_CLKOUT1_B),
+	PINMUX_IPSR_MSEL(IP17_31_28,	SSI_WS2_B,		SEL_SSI2_1),
+	PINMUX_IPSR_MSEL(IP17_31_28,	TS_SPSYNC1_D,		SEL_TSIF1_3),
+	PINMUX_IPSR_MSEL(IP17_31_28,	STP_ISSYNC_1_D,		SEL_SSP1_1_3),
+	PINMUX_IPSR_MSEL(IP17_31_28,	STP_IVCXO27_0_E,	SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP17_31_28,	RIF3_D1_B,		SEL_DRIF3_1),
+	PINMUX_IPSR_GPSR(IP17_31_28,	FSO_TOE_N),
+	PINMUX_IPSR_GPSR(IP17_31_28,	TPU0TO1),
+
+	/* IPSR18 */
+	PINMUX_IPSR_GPSR(IP18_3_0,	USB2_CH3_PWEN),
+	PINMUX_IPSR_GPSR(IP18_3_0,	AUDIO_CLKOUT2_B),
+	PINMUX_IPSR_MSEL(IP18_3_0,	SSI_SCK9_B,		SEL_SSI9_1),
+	PINMUX_IPSR_MSEL(IP18_3_0,	TS_SDEN0_E,		SEL_TSIF0_4),
+	PINMUX_IPSR_MSEL(IP18_3_0,	STP_ISEN_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP18_3_0,	RIF2_D0_B,		SEL_DRIF2_1),
+	PINMUX_IPSR_GPSR(IP18_3_0,	TPU0TO2),
+	PINMUX_IPSR_MSEL(IP18_3_0,	FMCLK_C,		SEL_FM_2),
+	PINMUX_IPSR_MSEL(IP18_3_0,	FMCLK_D,		SEL_FM_3),
+
+	PINMUX_IPSR_GPSR(IP18_7_4,	USB2_CH3_OVC),
+	PINMUX_IPSR_GPSR(IP18_7_4,	AUDIO_CLKOUT3_B),
+	PINMUX_IPSR_MSEL(IP18_7_4,	SSI_WS9_B,		SEL_SSI9_1),
+	PINMUX_IPSR_MSEL(IP18_7_4,	TS_SPSYNC0_E,		SEL_TSIF0_4),
+	PINMUX_IPSR_MSEL(IP18_7_4,	STP_ISSYNC_0_E,		SEL_SSP1_0_4),
+	PINMUX_IPSR_MSEL(IP18_7_4,	RIF2_D1_B,		SEL_DRIF2_1),
+	PINMUX_IPSR_GPSR(IP18_7_4,	TPU0TO3),
+	PINMUX_IPSR_MSEL(IP18_7_4,	FMIN_C,			SEL_FM_2),
+	PINMUX_IPSR_MSEL(IP18_7_4,	FMIN_D,			SEL_FM_3),
+
+/*
+ * Static pins can not be muxed between different functions but
+ * still need mark entries in the pinmux list. Add each static
+ * pin to the list without an associated function. The sh-pfc
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
+ */
+#define FM(x)	PINMUX_DATA(x##_MARK, 0),
+	PINMUX_STATIC
+#undef FM
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+	GP_ASSIGN_LAST(),
+	NOGP_ALL(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+	PINMUX_GPIO_GP_ALL(),
+	PINMUX_NOGP_ALL(),
+};
+
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_a_pins[] = {
+	/* CLK A */
+	RCAR_GP_PIN(6, 22),
+};
+static const unsigned int audio_clk_a_a_mux[] = {
+	AUDIO_CLKA_A_MARK,
+};
+static const unsigned int audio_clk_a_b_pins[] = {
+	/* CLK A */
+	RCAR_GP_PIN(5, 4),
+};
+static const unsigned int audio_clk_a_b_mux[] = {
+	AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clk_a_c_pins[] = {
+	/* CLK A */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clk_a_c_mux[] = {
+	AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clk_b_a_pins[] = {
+	/* CLK B */
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int audio_clk_b_a_mux[] = {
+	AUDIO_CLKB_A_MARK,
+};
+static const unsigned int audio_clk_b_b_pins[] = {
+	/* CLK B */
+	RCAR_GP_PIN(6, 23),
+};
+static const unsigned int audio_clk_b_b_mux[] = {
+	AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clk_c_a_pins[] = {
+	/* CLK C */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clk_c_a_mux[] = {
+	AUDIO_CLKC_A_MARK,
+};
+static const unsigned int audio_clk_c_b_pins[] = {
+	/* CLK C */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clk_c_b_mux[] = {
+	AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkout_a_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(5, 18),
+};
+static const unsigned int audio_clkout_a_mux[] = {
+	AUDIO_CLKOUT_A_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(6, 28),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+	AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(5, 3),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+	AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+	AUDIO_CLKOUT_D_MARK,
+};
+static const unsigned int audio_clkout1_a_pins[] = {
+	/* CLKOUT1 */
+	RCAR_GP_PIN(5, 15),
+};
+static const unsigned int audio_clkout1_a_mux[] = {
+	AUDIO_CLKOUT1_A_MARK,
+};
+static const unsigned int audio_clkout1_b_pins[] = {
+	/* CLKOUT1 */
+	RCAR_GP_PIN(6, 29),
+};
+static const unsigned int audio_clkout1_b_mux[] = {
+	AUDIO_CLKOUT1_B_MARK,
+};
+static const unsigned int audio_clkout2_a_pins[] = {
+	/* CLKOUT2 */
+	RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout2_a_mux[] = {
+	AUDIO_CLKOUT2_A_MARK,
+};
+static const unsigned int audio_clkout2_b_pins[] = {
+	/* CLKOUT2 */
+	RCAR_GP_PIN(6, 30),
+};
+static const unsigned int audio_clkout2_b_mux[] = {
+	AUDIO_CLKOUT2_B_MARK,
+};
+static const unsigned int audio_clkout3_a_pins[] = {
+	/* CLKOUT3 */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clkout3_a_mux[] = {
+	AUDIO_CLKOUT3_A_MARK,
+};
+static const unsigned int audio_clkout3_b_pins[] = {
+	/* CLKOUT3 */
+	RCAR_GP_PIN(6, 31),
+};
+static const unsigned int audio_clkout3_b_mux[] = {
+	AUDIO_CLKOUT3_B_MARK,
+};
+
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+	/* AVB_LINK */
+	RCAR_GP_PIN(2, 12),
+};
+static const unsigned int avb_link_mux[] = {
+	AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+	/* AVB_MAGIC_ */
+	RCAR_GP_PIN(2, 10),
+};
+static const unsigned int avb_magic_mux[] = {
+	AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+	/* AVB_PHY_INT */
+	RCAR_GP_PIN(2, 11),
+};
+static const unsigned int avb_phy_int_mux[] = {
+	AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdio_pins[] = {
+	/* AVB_MDC, AVB_MDIO */
+	RCAR_GP_PIN(2, 9), PIN_AVB_MDIO,
+};
+static const unsigned int avb_mdio_mux[] = {
+	AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+	/*
+	 * AVB_TX_CTL, AVB_TXC, AVB_TD0,
+	 * AVB_TD1, AVB_TD2, AVB_TD3,
+	 * AVB_RX_CTL, AVB_RXC, AVB_RD0,
+	 * AVB_RD1, AVB_RD2, AVB_RD3,
+	 * AVB_TXCREFCLK
+	 */
+	PIN_AVB_TX_CTL, PIN_AVB_TXC, PIN_AVB_TD0,
+	PIN_AVB_TD1, PIN_AVB_TD2, PIN_AVB_TD3,
+	PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
+	PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
+	PIN_AVB_TXCREFCLK,
+
+};
+static const unsigned int avb_mii_mux[] = {
+	AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
+	AVB_TD1_MARK, AVB_TD2_MARK, AVB_TD3_MARK,
+	AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
+	AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
+	AVB_TXCREFCLK_MARK,
+};
+static const unsigned int avb_avtp_pps_pins[] = {
+	/* AVB_AVTP_PPS */
+	RCAR_GP_PIN(2, 6),
+};
+static const unsigned int avb_avtp_pps_mux[] = {
+	AVB_AVTP_PPS_MARK,
+};
+static const unsigned int avb_avtp_match_a_pins[] = {
+	/* AVB_AVTP_MATCH_A */
+	RCAR_GP_PIN(2, 13),
+};
+static const unsigned int avb_avtp_match_a_mux[] = {
+	AVB_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb_avtp_capture_a_pins[] = {
+	/* AVB_AVTP_CAPTURE_A */
+	RCAR_GP_PIN(2, 14),
+};
+static const unsigned int avb_avtp_capture_a_mux[] = {
+	AVB_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb_avtp_match_b_pins[] = {
+	/*  AVB_AVTP_MATCH_B */
+	RCAR_GP_PIN(1, 8),
+};
+static const unsigned int avb_avtp_match_b_mux[] = {
+	AVB_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb_avtp_capture_b_pins[] = {
+	/* AVB_AVTP_CAPTURE_B */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int avb_avtp_capture_b_mux[] = {
+	AVB_AVTP_CAPTURE_B_MARK,
+};
+
+/* - CAN ------------------------------------------------------------------ */
+static const unsigned int can0_data_a_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 23),	RCAR_GP_PIN(1, 24),
+};
+static const unsigned int can0_data_a_mux[] = {
+	CAN0_TX_A_MARK,		CAN0_RX_A_MARK,
+};
+static const unsigned int can0_data_b_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(2, 0),	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int can0_data_b_mux[] = {
+	CAN0_TX_B_MARK,		CAN0_RX_B_MARK,
+};
+static const unsigned int can1_data_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 22),	RCAR_GP_PIN(1, 26),
+};
+static const unsigned int can1_data_mux[] = {
+	CAN1_TX_MARK,		CAN1_RX_MARK,
+};
+
+/* - CAN Clock -------------------------------------------------------------- */
+static const unsigned int can_clk_pins[] = {
+	/* CLK */
+	RCAR_GP_PIN(1, 25),
+};
+static const unsigned int can_clk_mux[] = {
+	CAN_CLK_MARK,
+};
+
+/* - CAN FD --------------------------------------------------------------- */
+static const unsigned int canfd0_data_a_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 23),     RCAR_GP_PIN(1, 24),
+};
+static const unsigned int canfd0_data_a_mux[] = {
+	CANFD0_TX_A_MARK,       CANFD0_RX_A_MARK,
+};
+static const unsigned int canfd0_data_b_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(2, 0),      RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd0_data_b_mux[] = {
+	CANFD0_TX_B_MARK,       CANFD0_RX_B_MARK,
+};
+static const unsigned int canfd1_data_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 22),     RCAR_GP_PIN(1, 26),
+};
+static const unsigned int canfd1_data_mux[] = {
+	CANFD1_TX_MARK,         CANFD1_RX_MARK,
+};
+
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif0_ctrl_a_mux[] = {
+	RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+static const unsigned int drif0_data0_a_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif0_data0_a_mux[] = {
+	RIF0_D0_A_MARK,
+};
+static const unsigned int drif0_data1_a_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif0_data1_a_mux[] = {
+	RIF0_D1_A_MARK,
+};
+static const unsigned int drif0_ctrl_b_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int drif0_ctrl_b_mux[] = {
+	RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+static const unsigned int drif0_data0_b_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(5, 1),
+};
+static const unsigned int drif0_data0_b_mux[] = {
+	RIF0_D0_B_MARK,
+};
+static const unsigned int drif0_data1_b_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(5, 2),
+};
+static const unsigned int drif0_data1_b_mux[] = {
+	RIF0_D1_B_MARK,
+};
+static const unsigned int drif0_ctrl_c_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int drif0_ctrl_c_mux[] = {
+	RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
+};
+static const unsigned int drif0_data0_c_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(5, 13),
+};
+static const unsigned int drif0_data0_c_mux[] = {
+	RIF0_D0_C_MARK,
+};
+static const unsigned int drif0_data1_c_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(5, 14),
+};
+static const unsigned int drif0_data1_c_mux[] = {
+	RIF0_D1_C_MARK,
+};
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_a_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif1_ctrl_a_mux[] = {
+	RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
+};
+static const unsigned int drif1_data0_a_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif1_data0_a_mux[] = {
+	RIF1_D0_A_MARK,
+};
+static const unsigned int drif1_data1_a_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif1_data1_a_mux[] = {
+	RIF1_D1_A_MARK,
+};
+static const unsigned int drif1_ctrl_b_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int drif1_ctrl_b_mux[] = {
+	RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
+};
+static const unsigned int drif1_data0_b_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(5, 7),
+};
+static const unsigned int drif1_data0_b_mux[] = {
+	RIF1_D0_B_MARK,
+};
+static const unsigned int drif1_data1_b_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(5, 8),
+};
+static const unsigned int drif1_data1_b_mux[] = {
+	RIF1_D1_B_MARK,
+};
+static const unsigned int drif1_ctrl_c_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int drif1_ctrl_c_mux[] = {
+	RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
+};
+static const unsigned int drif1_data0_c_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(5, 6),
+};
+static const unsigned int drif1_data0_c_mux[] = {
+	RIF1_D0_C_MARK,
+};
+static const unsigned int drif1_data1_c_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(5, 10),
+};
+static const unsigned int drif1_data1_c_mux[] = {
+	RIF1_D1_C_MARK,
+};
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif2_ctrl_a_mux[] = {
+	RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+static const unsigned int drif2_data0_a_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif2_data0_a_mux[] = {
+	RIF2_D0_A_MARK,
+};
+static const unsigned int drif2_data1_a_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif2_data1_a_mux[] = {
+	RIF2_D1_A_MARK,
+};
+static const unsigned int drif2_ctrl_b_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int drif2_ctrl_b_mux[] = {
+	RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+static const unsigned int drif2_data0_b_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 30),
+};
+static const unsigned int drif2_data0_b_mux[] = {
+	RIF2_D0_B_MARK,
+};
+static const unsigned int drif2_data1_b_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 31),
+};
+static const unsigned int drif2_data1_b_mux[] = {
+	RIF2_D1_B_MARK,
+};
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif3_ctrl_a_mux[] = {
+	RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+static const unsigned int drif3_data0_a_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif3_data0_a_mux[] = {
+	RIF3_D0_A_MARK,
+};
+static const unsigned int drif3_data1_a_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif3_data1_a_mux[] = {
+	RIF3_D1_A_MARK,
+};
+static const unsigned int drif3_ctrl_b_pins[] = {
+	/* CLK, SYNC */
+	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int drif3_ctrl_b_mux[] = {
+	RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+static const unsigned int drif3_data0_b_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(6, 28),
+};
+static const unsigned int drif3_data0_b_mux[] = {
+	RIF3_D0_B_MARK,
+};
+static const unsigned int drif3_data1_b_pins[] = {
+	/* D1 */
+	RCAR_GP_PIN(6, 29),
+};
+static const unsigned int drif3_data1_b_mux[] = {
+	RIF3_D1_B_MARK,
+};
+
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+	/* R[7:2], G[7:2], B[7:2] */
+	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+	RCAR_GP_PIN(1, 7),  RCAR_GP_PIN(1, 6),  RCAR_GP_PIN(1, 5),
+	RCAR_GP_PIN(1, 4),  RCAR_GP_PIN(1, 3),  RCAR_GP_PIN(1, 2),
+};
+static const unsigned int du_rgb666_mux[] = {
+	DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+	DU_DR3_MARK, DU_DR2_MARK,
+	DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+	DU_DG3_MARK, DU_DG2_MARK,
+	DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+	DU_DB3_MARK, DU_DB2_MARK,
+};
+static const unsigned int du_rgb888_pins[] = {
+	/* R[7:0], G[7:0], B[7:0] */
+	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+	RCAR_GP_PIN(0, 9),  RCAR_GP_PIN(0, 8),
+	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+	RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+	RCAR_GP_PIN(1, 7),  RCAR_GP_PIN(1, 6),  RCAR_GP_PIN(1, 5),
+	RCAR_GP_PIN(1, 4),  RCAR_GP_PIN(1, 3),  RCAR_GP_PIN(1, 2),
+	RCAR_GP_PIN(1, 1),  RCAR_GP_PIN(1, 0),
+};
+static const unsigned int du_rgb888_mux[] = {
+	DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+	DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
+	DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+	DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
+	DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+	DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
+};
+static const unsigned int du_clk_out_0_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(1, 27),
+};
+static const unsigned int du_clk_out_0_mux[] = {
+	DU_DOTCLKOUT0_MARK
+};
+static const unsigned int du_clk_out_1_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int du_clk_out_1_mux[] = {
+	DU_DOTCLKOUT1_MARK
+};
+static const unsigned int du_sync_pins[] = {
+	/* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+	RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
+};
+static const unsigned int du_sync_mux[] = {
+	DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
+};
+static const unsigned int du_oddf_pins[] = {
+	/* EXDISP/EXODDF/EXCDE */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int du_oddf_mux[] = {
+	DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du_cde_pins[] = {
+	/* CDE */
+	RCAR_GP_PIN(2, 0),
+};
+static const unsigned int du_cde_mux[] = {
+	DU_CDE_MARK,
+};
+static const unsigned int du_disp_pins[] = {
+	/* DISP */
+	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int du_disp_mux[] = {
+	DU_DISP_MARK,
+};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+static const unsigned int hscif0_data_mux[] = {
+	HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int hscif0_clk_mux[] = {
+	HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+	HRTS0_N_MARK, HCTS0_N_MARK,
+};
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int hscif1_data_a_mux[] = {
+	HRX1_A_MARK, HTX1_A_MARK,
+};
+static const unsigned int hscif1_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif1_clk_a_mux[] = {
+	HSCK1_A_MARK,
+};
+static const unsigned int hscif1_ctrl_a_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int hscif1_ctrl_a_mux[] = {
+	HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+static const unsigned int hscif1_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+	HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+	HSCK1_B_MARK,
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+	HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int hscif2_data_a_mux[] = {
+	HRX2_A_MARK, HTX2_A_MARK,
+};
+static const unsigned int hscif2_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int hscif2_clk_a_mux[] = {
+	HSCK2_A_MARK,
+};
+static const unsigned int hscif2_ctrl_a_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int hscif2_ctrl_a_mux[] = {
+	HRTS2_N_A_MARK, HCTS2_N_A_MARK,
+};
+
+static const unsigned int hscif2_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int hscif2_data_b_mux[] = {
+	HRX2_B_MARK, HTX2_B_MARK,
+};
+static const unsigned int hscif2_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif2_clk_b_mux[] = {
+	HSCK2_B_MARK,
+};
+static const unsigned int hscif2_ctrl_b_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int hscif2_ctrl_b_mux[] = {
+	HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+
+static const unsigned int hscif2_data_c_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
+};
+static const unsigned int hscif2_data_c_mux[] = {
+	HRX2_C_MARK, HTX2_C_MARK,
+};
+static const unsigned int hscif2_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 24),
+};
+static const unsigned int hscif2_clk_c_mux[] = {
+	HSCK2_C_MARK,
+};
+static const unsigned int hscif2_ctrl_c_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int hscif2_ctrl_c_mux[] = {
+	HRTS2_N_C_MARK, HCTS2_N_C_MARK,
+};
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+	HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 22),
+};
+static const unsigned int hscif3_clk_mux[] = {
+	HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+	HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+static const unsigned int hscif3_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+};
+static const unsigned int hscif3_data_b_mux[] = {
+	HRX3_B_MARK, HTX3_B_MARK,
+};
+static const unsigned int hscif3_data_c_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+static const unsigned int hscif3_data_c_mux[] = {
+	HRX3_C_MARK, HTX3_C_MARK,
+};
+static const unsigned int hscif3_data_d_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int hscif3_data_d_mux[] = {
+	HRX3_D_MARK, HTX3_D_MARK,
+};
+/* - HSCIF4 ----------------------------------------------------------------- */
+static const unsigned int hscif4_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif4_data_a_mux[] = {
+	HRX4_A_MARK, HTX4_A_MARK,
+};
+static const unsigned int hscif4_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_clk_mux[] = {
+	HSCK4_MARK,
+};
+static const unsigned int hscif4_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int hscif4_ctrl_mux[] = {
+	HRTS4_N_MARK, HCTS4_N_MARK,
+};
+
+static const unsigned int hscif4_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_data_b_mux[] = {
+	HRX4_B_MARK, HTX4_B_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+	/* SCL, SDA */
+	RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+};
+
+static const unsigned int i2c0_mux[] = {
+	SCL0_MARK, SDA0_MARK,
+};
+
+static const unsigned int i2c1_a_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+	SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+	SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+	SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+	SDA2_B_MARK, SCL2_B_MARK,
+};
+
+static const unsigned int i2c3_pins[] = {
+	/* SCL, SDA */
+	RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+
+static const unsigned int i2c3_mux[] = {
+	SCL3_MARK, SDA3_MARK,
+};
+
+static const unsigned int i2c5_pins[] = {
+	/* SCL, SDA */
+	RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 14),
+};
+
+static const unsigned int i2c5_mux[] = {
+	SCL5_MARK, SDA5_MARK,
+};
+
+static const unsigned int i2c6_a_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+	SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+	SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+	/* SDA, SCL */
+	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+	SDA6_C_MARK, SCL6_C_MARK,
+};
+
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+	/* IRQ0 */
+	RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+	IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+	/* IRQ1 */
+	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+	IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+	/* IRQ2 */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+	IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+	/* IRQ3 */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+	IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+	/* IRQ4 */
+	RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+	IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+	/* IRQ5 */
+	RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+	IRQ5_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof0_clk_mux[] = {
+	MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(5, 18),
+};
+static const unsigned int msiof0_sync_mux[] = {
+	MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+	MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+	MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(5, 20),
+};
+static const unsigned int msiof0_txd_mux[] = {
+	MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(5, 22),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+	MSIOF0_RXD_MARK,
+};
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 8),
+};
+static const unsigned int msiof1_clk_a_mux[] = {
+	MSIOF1_SCK_A_MARK,
+};
+static const unsigned int msiof1_sync_a_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(6, 9),
+};
+static const unsigned int msiof1_sync_a_mux[] = {
+	MSIOF1_SYNC_A_MARK,
+};
+static const unsigned int msiof1_ss1_a_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(6, 5),
+};
+static const unsigned int msiof1_ss1_a_mux[] = {
+	MSIOF1_SS1_A_MARK,
+};
+static const unsigned int msiof1_ss2_a_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(6, 6),
+};
+static const unsigned int msiof1_ss2_a_mux[] = {
+	MSIOF1_SS2_A_MARK,
+};
+static const unsigned int msiof1_txd_a_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(6, 7),
+};
+static const unsigned int msiof1_txd_a_mux[] = {
+	MSIOF1_TXD_A_MARK,
+};
+static const unsigned int msiof1_rxd_a_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int msiof1_rxd_a_mux[] = {
+	MSIOF1_RXD_A_MARK,
+};
+static const unsigned int msiof1_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 9),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+	MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_sync_b_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(5, 3),
+};
+static const unsigned int msiof1_sync_b_mux[] = {
+	MSIOF1_SYNC_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(5, 4),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+	MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+	MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_txd_b_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(5, 8),
+};
+static const unsigned int msiof1_txd_b_mux[] = {
+	MSIOF1_TXD_B_MARK,
+};
+static const unsigned int msiof1_rxd_b_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(5, 7),
+};
+static const unsigned int msiof1_rxd_b_mux[] = {
+	MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 17),
+};
+static const unsigned int msiof1_clk_c_mux[] = {
+	MSIOF1_SCK_C_MARK,
+};
+static const unsigned int msiof1_sync_c_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(6, 18),
+};
+static const unsigned int msiof1_sync_c_mux[] = {
+	MSIOF1_SYNC_C_MARK,
+};
+static const unsigned int msiof1_ss1_c_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int msiof1_ss1_c_mux[] = {
+	MSIOF1_SS1_C_MARK,
+};
+static const unsigned int msiof1_ss2_c_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(6, 27),
+};
+static const unsigned int msiof1_ss2_c_mux[] = {
+	MSIOF1_SS2_C_MARK,
+};
+static const unsigned int msiof1_txd_c_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(6, 20),
+};
+static const unsigned int msiof1_txd_c_mux[] = {
+	MSIOF1_TXD_C_MARK,
+};
+static const unsigned int msiof1_rxd_c_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int msiof1_rxd_c_mux[] = {
+	MSIOF1_RXD_C_MARK,
+};
+static const unsigned int msiof1_clk_d_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int msiof1_clk_d_mux[] = {
+	MSIOF1_SCK_D_MARK,
+};
+static const unsigned int msiof1_sync_d_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(5, 15),
+};
+static const unsigned int msiof1_sync_d_mux[] = {
+	MSIOF1_SYNC_D_MARK,
+};
+static const unsigned int msiof1_ss1_d_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(5, 16),
+};
+static const unsigned int msiof1_ss1_d_mux[] = {
+	MSIOF1_SS1_D_MARK,
+};
+static const unsigned int msiof1_ss2_d_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof1_ss2_d_mux[] = {
+	MSIOF1_SS2_D_MARK,
+};
+static const unsigned int msiof1_txd_d_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(5, 14),
+};
+static const unsigned int msiof1_txd_d_mux[] = {
+	MSIOF1_TXD_D_MARK,
+};
+static const unsigned int msiof1_rxd_d_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(5, 13),
+};
+static const unsigned int msiof1_rxd_d_mux[] = {
+	MSIOF1_RXD_D_MARK,
+};
+static const unsigned int msiof1_clk_e_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(3, 0),
+};
+static const unsigned int msiof1_clk_e_mux[] = {
+	MSIOF1_SCK_E_MARK,
+};
+static const unsigned int msiof1_sync_e_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(3, 1),
+};
+static const unsigned int msiof1_sync_e_mux[] = {
+	MSIOF1_SYNC_E_MARK,
+};
+static const unsigned int msiof1_ss1_e_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(3, 4),
+};
+static const unsigned int msiof1_ss1_e_mux[] = {
+	MSIOF1_SS1_E_MARK,
+};
+static const unsigned int msiof1_ss2_e_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(3, 5),
+};
+static const unsigned int msiof1_ss2_e_mux[] = {
+	MSIOF1_SS2_E_MARK,
+};
+static const unsigned int msiof1_txd_e_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(3, 3),
+};
+static const unsigned int msiof1_txd_e_mux[] = {
+	MSIOF1_TXD_E_MARK,
+};
+static const unsigned int msiof1_rxd_e_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(3, 2),
+};
+static const unsigned int msiof1_rxd_e_mux[] = {
+	MSIOF1_RXD_E_MARK,
+};
+static const unsigned int msiof1_clk_f_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 23),
+};
+static const unsigned int msiof1_clk_f_mux[] = {
+	MSIOF1_SCK_F_MARK,
+};
+static const unsigned int msiof1_sync_f_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(5, 24),
+};
+static const unsigned int msiof1_sync_f_mux[] = {
+	MSIOF1_SYNC_F_MARK,
+};
+static const unsigned int msiof1_ss1_f_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(6, 1),
+};
+static const unsigned int msiof1_ss1_f_mux[] = {
+	MSIOF1_SS1_F_MARK,
+};
+static const unsigned int msiof1_ss2_f_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(6, 2),
+};
+static const unsigned int msiof1_ss2_f_mux[] = {
+	MSIOF1_SS2_F_MARK,
+};
+static const unsigned int msiof1_txd_f_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(6, 0),
+};
+static const unsigned int msiof1_txd_f_mux[] = {
+	MSIOF1_TXD_F_MARK,
+};
+static const unsigned int msiof1_rxd_f_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(5, 25),
+};
+static const unsigned int msiof1_rxd_f_mux[] = {
+	MSIOF1_RXD_F_MARK,
+};
+static const unsigned int msiof1_clk_g_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(3, 6),
+};
+static const unsigned int msiof1_clk_g_mux[] = {
+	MSIOF1_SCK_G_MARK,
+};
+static const unsigned int msiof1_sync_g_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(3, 7),
+};
+static const unsigned int msiof1_sync_g_mux[] = {
+	MSIOF1_SYNC_G_MARK,
+};
+static const unsigned int msiof1_ss1_g_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(3, 10),
+};
+static const unsigned int msiof1_ss1_g_mux[] = {
+	MSIOF1_SS1_G_MARK,
+};
+static const unsigned int msiof1_ss2_g_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(3, 11),
+};
+static const unsigned int msiof1_ss2_g_mux[] = {
+	MSIOF1_SS2_G_MARK,
+};
+static const unsigned int msiof1_txd_g_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(3, 9),
+};
+static const unsigned int msiof1_txd_g_mux[] = {
+	MSIOF1_TXD_G_MARK,
+};
+static const unsigned int msiof1_rxd_g_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(3, 8),
+};
+static const unsigned int msiof1_rxd_g_mux[] = {
+	MSIOF1_RXD_G_MARK,
+};
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof2_clk_a_mux[] = {
+	MSIOF2_SCK_A_MARK,
+};
+static const unsigned int msiof2_sync_a_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof2_sync_a_mux[] = {
+	MSIOF2_SYNC_A_MARK,
+};
+static const unsigned int msiof2_ss1_a_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof2_ss1_a_mux[] = {
+	MSIOF2_SS1_A_MARK,
+};
+static const unsigned int msiof2_ss2_a_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof2_ss2_a_mux[] = {
+	MSIOF2_SS2_A_MARK,
+};
+static const unsigned int msiof2_txd_a_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof2_txd_a_mux[] = {
+	MSIOF2_TXD_A_MARK,
+};
+static const unsigned int msiof2_rxd_a_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof2_rxd_a_mux[] = {
+	MSIOF2_RXD_A_MARK,
+};
+static const unsigned int msiof2_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof2_clk_b_mux[] = {
+	MSIOF2_SCK_B_MARK,
+};
+static const unsigned int msiof2_sync_b_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof2_sync_b_mux[] = {
+	MSIOF2_SYNC_B_MARK,
+};
+static const unsigned int msiof2_ss1_b_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof2_ss1_b_mux[] = {
+	MSIOF2_SS1_B_MARK,
+};
+static const unsigned int msiof2_ss2_b_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof2_ss2_b_mux[] = {
+	MSIOF2_SS2_B_MARK,
+};
+static const unsigned int msiof2_txd_b_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof2_txd_b_mux[] = {
+	MSIOF2_TXD_B_MARK,
+};
+static const unsigned int msiof2_rxd_b_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof2_rxd_b_mux[] = {
+	MSIOF2_RXD_B_MARK,
+};
+static const unsigned int msiof2_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(2, 12),
+};
+static const unsigned int msiof2_clk_c_mux[] = {
+	MSIOF2_SCK_C_MARK,
+};
+static const unsigned int msiof2_sync_c_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(2, 11),
+};
+static const unsigned int msiof2_sync_c_mux[] = {
+	MSIOF2_SYNC_C_MARK,
+};
+static const unsigned int msiof2_ss1_c_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(2, 10),
+};
+static const unsigned int msiof2_ss1_c_mux[] = {
+	MSIOF2_SS1_C_MARK,
+};
+static const unsigned int msiof2_ss2_c_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(2, 9),
+};
+static const unsigned int msiof2_ss2_c_mux[] = {
+	MSIOF2_SS2_C_MARK,
+};
+static const unsigned int msiof2_txd_c_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof2_txd_c_mux[] = {
+	MSIOF2_TXD_C_MARK,
+};
+static const unsigned int msiof2_rxd_c_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(2, 13),
+};
+static const unsigned int msiof2_rxd_c_mux[] = {
+	MSIOF2_RXD_C_MARK,
+};
+static const unsigned int msiof2_clk_d_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof2_clk_d_mux[] = {
+	MSIOF2_SCK_D_MARK,
+};
+static const unsigned int msiof2_sync_d_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof2_sync_d_mux[] = {
+	MSIOF2_SYNC_D_MARK,
+};
+static const unsigned int msiof2_ss1_d_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof2_ss1_d_mux[] = {
+	MSIOF2_SS1_D_MARK,
+};
+static const unsigned int msiof2_ss2_d_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_d_mux[] = {
+	MSIOF2_SS2_D_MARK,
+};
+static const unsigned int msiof2_txd_d_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof2_txd_d_mux[] = {
+	MSIOF2_TXD_D_MARK,
+};
+static const unsigned int msiof2_rxd_d_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof2_rxd_d_mux[] = {
+	MSIOF2_RXD_D_MARK,
+};
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof3_clk_a_mux[] = {
+	MSIOF3_SCK_A_MARK,
+};
+static const unsigned int msiof3_sync_a_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_sync_a_mux[] = {
+	MSIOF3_SYNC_A_MARK,
+};
+static const unsigned int msiof3_ss1_a_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof3_ss1_a_mux[] = {
+	MSIOF3_SS1_A_MARK,
+};
+static const unsigned int msiof3_ss2_a_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof3_ss2_a_mux[] = {
+	MSIOF3_SS2_A_MARK,
+};
+static const unsigned int msiof3_txd_a_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_txd_a_mux[] = {
+	MSIOF3_TXD_A_MARK,
+};
+static const unsigned int msiof3_rxd_a_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_rxd_a_mux[] = {
+	MSIOF3_RXD_A_MARK,
+};
+static const unsigned int msiof3_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof3_clk_b_mux[] = {
+	MSIOF3_SCK_B_MARK,
+};
+static const unsigned int msiof3_sync_b_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof3_sync_b_mux[] = {
+	MSIOF3_SYNC_B_MARK,
+};
+static const unsigned int msiof3_ss1_b_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof3_ss1_b_mux[] = {
+	MSIOF3_SS1_B_MARK,
+};
+static const unsigned int msiof3_ss2_b_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof3_ss2_b_mux[] = {
+	MSIOF3_SS2_B_MARK,
+};
+static const unsigned int msiof3_txd_b_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof3_txd_b_mux[] = {
+	MSIOF3_TXD_B_MARK,
+};
+static const unsigned int msiof3_rxd_b_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof3_rxd_b_mux[] = {
+	MSIOF3_RXD_B_MARK,
+};
+static const unsigned int msiof3_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof3_clk_c_mux[] = {
+	MSIOF3_SCK_C_MARK,
+};
+static const unsigned int msiof3_sync_c_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(1, 13),
+};
+static const unsigned int msiof3_sync_c_mux[] = {
+	MSIOF3_SYNC_C_MARK,
+};
+static const unsigned int msiof3_txd_c_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(1, 15),
+};
+static const unsigned int msiof3_txd_c_mux[] = {
+	MSIOF3_TXD_C_MARK,
+};
+static const unsigned int msiof3_rxd_c_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(1, 14),
+};
+static const unsigned int msiof3_rxd_c_mux[] = {
+	MSIOF3_RXD_C_MARK,
+};
+static const unsigned int msiof3_clk_d_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 22),
+};
+static const unsigned int msiof3_clk_d_mux[] = {
+	MSIOF3_SCK_D_MARK,
+};
+static const unsigned int msiof3_sync_d_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof3_sync_d_mux[] = {
+	MSIOF3_SYNC_D_MARK,
+};
+static const unsigned int msiof3_ss1_d_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof3_ss1_d_mux[] = {
+	MSIOF3_SS1_D_MARK,
+};
+static const unsigned int msiof3_txd_d_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof3_txd_d_mux[] = {
+	MSIOF3_TXD_D_MARK,
+};
+static const unsigned int msiof3_rxd_d_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof3_rxd_d_mux[] = {
+	MSIOF3_RXD_D_MARK,
+};
+static const unsigned int msiof3_clk_e_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int msiof3_clk_e_mux[] = {
+	MSIOF3_SCK_E_MARK,
+};
+static const unsigned int msiof3_sync_e_pins[] = {
+	/* SYNC */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int msiof3_sync_e_mux[] = {
+	MSIOF3_SYNC_E_MARK,
+};
+static const unsigned int msiof3_ss1_e_pins[] = {
+	/* SS1 */
+	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int msiof3_ss1_e_mux[] = {
+	MSIOF3_SS1_E_MARK,
+};
+static const unsigned int msiof3_ss2_e_pins[] = {
+	/* SS2 */
+	RCAR_GP_PIN(2, 0),
+};
+static const unsigned int msiof3_ss2_e_mux[] = {
+	MSIOF3_SS2_E_MARK,
+};
+static const unsigned int msiof3_txd_e_pins[] = {
+	/* TXD */
+	RCAR_GP_PIN(2, 5),
+};
+static const unsigned int msiof3_txd_e_mux[] = {
+	MSIOF3_TXD_E_MARK,
+};
+static const unsigned int msiof3_rxd_e_pins[] = {
+	/* RXD */
+	RCAR_GP_PIN(2, 4),
+};
+static const unsigned int msiof3_rxd_e_mux[] = {
+	MSIOF3_RXD_E_MARK,
+};
+
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 6),
+};
+static const unsigned int pwm0_mux[] = {
+	PWM0_MARK,
+};
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 7),
+};
+static const unsigned int pwm1_a_mux[] = {
+	PWM1_A_MARK,
+};
+static const unsigned int pwm1_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 8),
+};
+static const unsigned int pwm1_b_mux[] = {
+	PWM1_B_MARK,
+};
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 8),
+};
+static const unsigned int pwm2_a_mux[] = {
+	PWM2_A_MARK,
+};
+static const unsigned int pwm2_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int pwm2_b_mux[] = {
+	PWM2_B_MARK,
+};
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 0),
+};
+static const unsigned int pwm3_a_mux[] = {
+	PWM3_A_MARK,
+};
+static const unsigned int pwm3_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int pwm3_b_mux[] = {
+	PWM3_B_MARK,
+};
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 1),
+};
+static const unsigned int pwm4_a_mux[] = {
+	PWM4_A_MARK,
+};
+static const unsigned int pwm4_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int pwm4_b_mux[] = {
+	PWM4_B_MARK,
+};
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 2),
+};
+static const unsigned int pwm5_a_mux[] = {
+	PWM5_A_MARK,
+};
+static const unsigned int pwm5_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 4),
+};
+static const unsigned int pwm5_b_mux[] = {
+	PWM5_B_MARK,
+};
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 3),
+};
+static const unsigned int pwm6_a_mux[] = {
+	PWM6_A_MARK,
+};
+static const unsigned int pwm6_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 5),
+};
+static const unsigned int pwm6_b_mux[] = {
+	PWM6_B_MARK,
+};
+
+/* - SATA --------------------------------------------------------------------*/
+static const unsigned int sata0_devslp_a_pins[] = {
+	/* DEVSLP */
+	RCAR_GP_PIN(6, 16),
+};
+static const unsigned int sata0_devslp_a_mux[] = {
+	SATA_DEVSLP_A_MARK,
+};
+static const unsigned int sata0_devslp_b_pins[] = {
+	/* DEVSLP */
+	RCAR_GP_PIN(4, 6),
+};
+static const unsigned int sata0_devslp_b_mux[] = {
+	SATA_DEVSLP_B_MARK,
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int scif0_data_mux[] = {
+	RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int scif0_clk_mux[] = {
+	SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+	RTS0_N_MARK, CTS0_N_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int scif1_data_a_mux[] = {
+	RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int scif1_clk_mux[] = {
+	SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+	RTS1_N_MARK, CTS1_N_MARK,
+};
+
+static const unsigned int scif1_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 25),
+};
+static const unsigned int scif1_data_b_mux[] = {
+	RX1_B_MARK, TX1_B_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int scif2_data_a_mux[] = {
+	RX2_A_MARK, TX2_A_MARK,
+};
+static const unsigned int scif2_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 9),
+};
+static const unsigned int scif2_clk_mux[] = {
+	SCK2_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int scif2_data_b_mux[] = {
+	RX2_B_MARK, TX2_B_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_data_a_mux[] = {
+	RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 22),
+};
+static const unsigned int scif3_clk_mux[] = {
+	SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+	RTS3_N_MARK, CTS3_N_MARK,
+};
+static const unsigned int scif3_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int scif3_data_b_mux[] = {
+	RX3_B_MARK, TX3_B_MARK,
+};
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
+};
+static const unsigned int scif4_data_a_mux[] = {
+	RX4_A_MARK, TX4_A_MARK,
+};
+static const unsigned int scif4_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(2, 10),
+};
+static const unsigned int scif4_clk_a_mux[] = {
+	SCK4_A_MARK,
+};
+static const unsigned int scif4_ctrl_a_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int scif4_ctrl_a_mux[] = {
+	RTS4_N_A_MARK, CTS4_N_A_MARK,
+};
+static const unsigned int scif4_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int scif4_data_b_mux[] = {
+	RX4_B_MARK, TX4_B_MARK,
+};
+static const unsigned int scif4_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(1, 5),
+};
+static const unsigned int scif4_clk_b_mux[] = {
+	SCK4_B_MARK,
+};
+static const unsigned int scif4_ctrl_b_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int scif4_ctrl_b_mux[] = {
+	RTS4_N_B_MARK, CTS4_N_B_MARK,
+};
+static const unsigned int scif4_data_c_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+};
+static const unsigned int scif4_data_c_mux[] = {
+	RX4_C_MARK, TX4_C_MARK,
+};
+static const unsigned int scif4_clk_c_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(0, 8),
+};
+static const unsigned int scif4_clk_c_mux[] = {
+	SCK4_C_MARK,
+};
+static const unsigned int scif4_ctrl_c_pins[] = {
+	/* RTS, CTS */
+	RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+};
+static const unsigned int scif4_ctrl_c_mux[] = {
+	RTS4_N_C_MARK, CTS4_N_C_MARK,
+};
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_a_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int scif5_data_a_mux[] = {
+	RX5_A_MARK, TX5_A_MARK,
+};
+static const unsigned int scif5_clk_a_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int scif5_clk_a_mux[] = {
+	SCK5_A_MARK,
+};
+static const unsigned int scif5_data_b_pins[] = {
+	/* RX, TX */
+	RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 18),
+};
+static const unsigned int scif5_data_b_mux[] = {
+	RX5_B_MARK, TX5_B_MARK,
+};
+static const unsigned int scif5_clk_b_pins[] = {
+	/* SCK */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int scif5_clk_b_mux[] = {
+	SCK5_B_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_a_pins[] = {
+	/* SCIF_CLK */
+	RCAR_GP_PIN(6, 23),
+};
+static const unsigned int scif_clk_a_mux[] = {
+	SCIF_CLK_A_MARK,
+};
+static const unsigned int scif_clk_b_pins[] = {
+	/* SCIF_CLK */
+	RCAR_GP_PIN(5, 9),
+};
+static const unsigned int scif_clk_b_mux[] = {
+	SCIF_CLK_B_MARK,
+};
+
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(3, 2),
+};
+static const unsigned int sdhi0_data1_mux[] = {
+	SD0_DAT0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+	/* D[0:3] */
+	RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+	RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+static const unsigned int sdhi0_data4_mux[] = {
+	SD0_DAT0_MARK, SD0_DAT1_MARK,
+	SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+	/* CLK, CMD */
+	RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+	SD0_CLK_MARK, SD0_CMD_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(3, 12),
+};
+static const unsigned int sdhi0_cd_mux[] = {
+	SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(3, 13),
+};
+static const unsigned int sdhi0_wp_mux[] = {
+	SD0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(3, 8),
+};
+static const unsigned int sdhi1_data1_mux[] = {
+	SD1_DAT0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+	/* D[0:3] */
+	RCAR_GP_PIN(3, 8),  RCAR_GP_PIN(3, 9),
+	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi1_data4_mux[] = {
+	SD1_DAT0_MARK, SD1_DAT1_MARK,
+	SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+	/* CLK, CMD */
+	RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+	SD1_CLK_MARK, SD1_CMD_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(3, 14),
+};
+static const unsigned int sdhi1_cd_mux[] = {
+	SD1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(3, 15),
+};
+static const unsigned int sdhi1_wp_mux[] = {
+	SD1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(4, 2),
+};
+static const unsigned int sdhi2_data1_mux[] = {
+	SD2_DAT0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+	/* D[0:3] */
+	RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+	RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int sdhi2_data4_mux[] = {
+	SD2_DAT0_MARK, SD2_DAT1_MARK,
+	SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+static const unsigned int sdhi2_data8_pins[] = {
+	/* D[0:7] */
+	RCAR_GP_PIN(4, 2),  RCAR_GP_PIN(4, 3),
+	RCAR_GP_PIN(4, 4),  RCAR_GP_PIN(4, 5),
+	RCAR_GP_PIN(3, 8),  RCAR_GP_PIN(3, 9),
+	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi2_data8_mux[] = {
+	SD2_DAT0_MARK, SD2_DAT1_MARK,
+	SD2_DAT2_MARK, SD2_DAT3_MARK,
+	SD2_DAT4_MARK, SD2_DAT5_MARK,
+	SD2_DAT6_MARK, SD2_DAT7_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+	/* CLK, CMD */
+	RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+	SD2_CLK_MARK, SD2_CMD_MARK,
+};
+static const unsigned int sdhi2_cd_a_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(4, 13),
+};
+static const unsigned int sdhi2_cd_a_mux[] = {
+	SD2_CD_A_MARK,
+};
+static const unsigned int sdhi2_cd_b_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(5, 10),
+};
+static const unsigned int sdhi2_cd_b_mux[] = {
+	SD2_CD_B_MARK,
+};
+static const unsigned int sdhi2_wp_a_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(4, 14),
+};
+static const unsigned int sdhi2_wp_a_mux[] = {
+	SD2_WP_A_MARK,
+};
+static const unsigned int sdhi2_wp_b_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(5, 11),
+};
+static const unsigned int sdhi2_wp_b_mux[] = {
+	SD2_WP_B_MARK,
+};
+static const unsigned int sdhi2_ds_pins[] = {
+	/* DS */
+	RCAR_GP_PIN(4, 6),
+};
+static const unsigned int sdhi2_ds_mux[] = {
+	SD2_DS_MARK,
+};
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+	/* D0 */
+	RCAR_GP_PIN(4, 9),
+};
+static const unsigned int sdhi3_data1_mux[] = {
+	SD3_DAT0_MARK,
+};
+static const unsigned int sdhi3_data4_pins[] = {
+	/* D[0:3] */
+	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 10),
+	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int sdhi3_data4_mux[] = {
+	SD3_DAT0_MARK, SD3_DAT1_MARK,
+	SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+static const unsigned int sdhi3_data8_pins[] = {
+	/* D[0:7] */
+	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 10),
+	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+	RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+	RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_data8_mux[] = {
+	SD3_DAT0_MARK, SD3_DAT1_MARK,
+	SD3_DAT2_MARK, SD3_DAT3_MARK,
+	SD3_DAT4_MARK, SD3_DAT5_MARK,
+	SD3_DAT6_MARK, SD3_DAT7_MARK,
+};
+static const unsigned int sdhi3_ctrl_pins[] = {
+	/* CLK, CMD */
+	RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+};
+static const unsigned int sdhi3_ctrl_mux[] = {
+	SD3_CLK_MARK, SD3_CMD_MARK,
+};
+static const unsigned int sdhi3_cd_pins[] = {
+	/* CD */
+	RCAR_GP_PIN(4, 15),
+};
+static const unsigned int sdhi3_cd_mux[] = {
+	SD3_CD_MARK,
+};
+static const unsigned int sdhi3_wp_pins[] = {
+	/* WP */
+	RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_wp_mux[] = {
+	SD3_WP_MARK,
+};
+static const unsigned int sdhi3_ds_pins[] = {
+	/* DS */
+	RCAR_GP_PIN(4, 17),
+};
+static const unsigned int sdhi3_ds_mux[] = {
+	SD3_DS_MARK,
+};
+
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 2),
+};
+static const unsigned int ssi0_data_mux[] = {
+	SSI_SDATA0_MARK,
+};
+static const unsigned int ssi01239_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int ssi01239_ctrl_mux[] = {
+	SSI_SCK01239_MARK, SSI_WS01239_MARK,
+};
+static const unsigned int ssi1_data_a_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 3),
+};
+static const unsigned int ssi1_data_a_mux[] = {
+	SSI_SDATA1_A_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+	SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_a_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int ssi1_ctrl_a_mux[] = {
+	SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+	SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_a_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 4),
+};
+static const unsigned int ssi2_data_a_mux[] = {
+	SSI_SDATA2_A_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+	SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_a_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int ssi2_ctrl_a_mux[] = {
+	SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+	SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 7),
+};
+static const unsigned int ssi3_data_mux[] = {
+	SSI_SDATA3_MARK,
+};
+static const unsigned int ssi349_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int ssi349_ctrl_mux[] = {
+	SSI_SCK349_MARK, SSI_WS349_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 10),
+};
+static const unsigned int ssi4_data_mux[] = {
+	SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+	SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 13),
+};
+static const unsigned int ssi5_data_mux[] = {
+	SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+	SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 16),
+};
+static const unsigned int ssi6_data_mux[] = {
+	SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+	SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int ssi7_data_mux[] = {
+	SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+	SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 20),
+};
+static const unsigned int ssi8_data_mux[] = {
+	SSI_SDATA8_MARK,
+};
+static const unsigned int ssi9_data_a_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi9_data_a_mux[] = {
+	SSI_SDATA9_A_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+	/* SDATA */
+	RCAR_GP_PIN(5, 14),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+	SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_a_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi9_ctrl_a_mux[] = {
+	SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+	/* SCK, WS */
+	RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+	SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
+
+/* - TMU -------------------------------------------------------------------- */
+static const unsigned int tmu_tclk1_a_pins[] = {
+	/* TCLK */
+	RCAR_GP_PIN(6, 23),
+};
+static const unsigned int tmu_tclk1_a_mux[] = {
+	TCLK1_A_MARK,
+};
+static const unsigned int tmu_tclk1_b_pins[] = {
+	/* TCLK */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int tmu_tclk1_b_mux[] = {
+	TCLK1_B_MARK,
+};
+static const unsigned int tmu_tclk2_a_pins[] = {
+	/* TCLK */
+	RCAR_GP_PIN(6, 19),
+};
+static const unsigned int tmu_tclk2_a_mux[] = {
+	TCLK2_A_MARK,
+};
+static const unsigned int tmu_tclk2_b_pins[] = {
+	/* TCLK */
+	RCAR_GP_PIN(6, 28),
+};
+static const unsigned int tmu_tclk2_b_mux[] = {
+	TCLK2_B_MARK,
+};
+
+/* - TPU ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_pins[] = {
+	/* TPU0TO0 */
+	RCAR_GP_PIN(6, 28),
+};
+static const unsigned int tpu_to0_mux[] = {
+	TPU0TO0_MARK,
+};
+static const unsigned int tpu_to1_pins[] = {
+	/* TPU0TO1 */
+	RCAR_GP_PIN(6, 29),
+};
+static const unsigned int tpu_to1_mux[] = {
+	TPU0TO1_MARK,
+};
+static const unsigned int tpu_to2_pins[] = {
+	/* TPU0TO2 */
+	RCAR_GP_PIN(6, 30),
+};
+static const unsigned int tpu_to2_mux[] = {
+	TPU0TO2_MARK,
+};
+static const unsigned int tpu_to3_pins[] = {
+	/* TPU0TO3 */
+	RCAR_GP_PIN(6, 31),
+};
+static const unsigned int tpu_to3_mux[] = {
+	TPU0TO3_MARK,
+};
+
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int usb0_mux[] = {
+	USB0_PWEN_MARK, USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int usb1_mux[] = {
+	USB1_PWEN_MARK, USB1_OVC_MARK,
+};
+/* - USB2 ------------------------------------------------------------------- */
+static const unsigned int usb2_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int usb2_mux[] = {
+	USB2_PWEN_MARK, USB2_OVC_MARK,
+};
+/* - USB2_CH3 --------------------------------------------------------------- */
+static const unsigned int usb2_ch3_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int usb2_ch3_mux[] = {
+	USB2_CH3_PWEN_MARK, USB2_CH3_OVC_MARK,
+};
+
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int usb30_mux[] = {
+	USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+
+/* - VIN4 ------------------------------------------------------------------- */
+static const unsigned int vin4_data18_a_pins[] = {
+	RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+	RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+	RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+	RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+	RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+	RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+	RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+	RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+	RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+};
+static const unsigned int vin4_data18_a_mux[] = {
+	VI4_DATA2_A_MARK, VI4_DATA3_A_MARK,
+	VI4_DATA4_A_MARK, VI4_DATA5_A_MARK,
+	VI4_DATA6_A_MARK, VI4_DATA7_A_MARK,
+	VI4_DATA10_MARK, VI4_DATA11_MARK,
+	VI4_DATA12_MARK, VI4_DATA13_MARK,
+	VI4_DATA14_MARK, VI4_DATA15_MARK,
+	VI4_DATA18_MARK, VI4_DATA19_MARK,
+	VI4_DATA20_MARK, VI4_DATA21_MARK,
+	VI4_DATA22_MARK, VI4_DATA23_MARK,
+};
+static const unsigned int vin4_data18_b_pins[] = {
+	RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+	RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+	RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+	RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+	RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+	RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+	RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+	RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+	RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+};
+static const unsigned int vin4_data18_b_mux[] = {
+	VI4_DATA2_B_MARK, VI4_DATA3_B_MARK,
+	VI4_DATA4_B_MARK, VI4_DATA5_B_MARK,
+	VI4_DATA6_B_MARK, VI4_DATA7_B_MARK,
+	VI4_DATA10_MARK, VI4_DATA11_MARK,
+	VI4_DATA12_MARK, VI4_DATA13_MARK,
+	VI4_DATA14_MARK, VI4_DATA15_MARK,
+	VI4_DATA18_MARK, VI4_DATA19_MARK,
+	VI4_DATA20_MARK, VI4_DATA21_MARK,
+	VI4_DATA22_MARK, VI4_DATA23_MARK,
+};
+static const union vin_data vin4_data_a_pins = {
+	.data24 = {
+		RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9),
+		RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+		RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+		RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+		RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+		RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+		RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+		RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+		RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+		RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+		RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+		RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+	},
+};
+static const union vin_data vin4_data_a_mux = {
+	.data24 = {
+		VI4_DATA0_A_MARK, VI4_DATA1_A_MARK,
+		VI4_DATA2_A_MARK, VI4_DATA3_A_MARK,
+		VI4_DATA4_A_MARK, VI4_DATA5_A_MARK,
+		VI4_DATA6_A_MARK, VI4_DATA7_A_MARK,
+		VI4_DATA8_MARK,  VI4_DATA9_MARK,
+		VI4_DATA10_MARK, VI4_DATA11_MARK,
+		VI4_DATA12_MARK, VI4_DATA13_MARK,
+		VI4_DATA14_MARK, VI4_DATA15_MARK,
+		VI4_DATA16_MARK, VI4_DATA17_MARK,
+		VI4_DATA18_MARK, VI4_DATA19_MARK,
+		VI4_DATA20_MARK, VI4_DATA21_MARK,
+		VI4_DATA22_MARK, VI4_DATA23_MARK,
+	},
+};
+static const union vin_data vin4_data_b_pins = {
+	.data24 = {
+		RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+		RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+		RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+		RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+		RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+		RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+		RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+		RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+		RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+		RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+		RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+		RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+	},
+};
+static const union vin_data vin4_data_b_mux = {
+	.data24 = {
+		VI4_DATA0_B_MARK, VI4_DATA1_B_MARK,
+		VI4_DATA2_B_MARK, VI4_DATA3_B_MARK,
+		VI4_DATA4_B_MARK, VI4_DATA5_B_MARK,
+		VI4_DATA6_B_MARK, VI4_DATA7_B_MARK,
+		VI4_DATA8_MARK,  VI4_DATA9_MARK,
+		VI4_DATA10_MARK, VI4_DATA11_MARK,
+		VI4_DATA12_MARK, VI4_DATA13_MARK,
+		VI4_DATA14_MARK, VI4_DATA15_MARK,
+		VI4_DATA16_MARK, VI4_DATA17_MARK,
+		VI4_DATA18_MARK, VI4_DATA19_MARK,
+		VI4_DATA20_MARK, VI4_DATA21_MARK,
+		VI4_DATA22_MARK, VI4_DATA23_MARK,
+	},
+};
+static const unsigned int vin4_sync_pins[] = {
+	/* HSYNC#, VSYNC# */
+	RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 17),
+};
+static const unsigned int vin4_sync_mux[] = {
+	VI4_HSYNC_N_MARK, VI4_VSYNC_N_MARK,
+};
+static const unsigned int vin4_field_pins[] = {
+	/* FIELD */
+	RCAR_GP_PIN(1, 16),
+};
+static const unsigned int vin4_field_mux[] = {
+	VI4_FIELD_MARK,
+};
+static const unsigned int vin4_clkenb_pins[] = {
+	/* CLKENB */
+	RCAR_GP_PIN(1, 19),
+};
+static const unsigned int vin4_clkenb_mux[] = {
+	VI4_CLKENB_MARK,
+};
+static const unsigned int vin4_clk_pins[] = {
+	/* CLK */
+	RCAR_GP_PIN(1, 27),
+};
+static const unsigned int vin4_clk_mux[] = {
+	VI4_CLK_MARK,
+};
+
+/* - VIN5 ------------------------------------------------------------------- */
+static const union vin_data16 vin5_data_pins = {
+	.data16 = {
+		RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+		RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+		RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+		RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+		RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+		RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+		RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+		RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+	},
+};
+static const union vin_data16 vin5_data_mux = {
+	.data16 = {
+		VI5_DATA0_MARK, VI5_DATA1_MARK,
+		VI5_DATA2_MARK, VI5_DATA3_MARK,
+		VI5_DATA4_MARK, VI5_DATA5_MARK,
+		VI5_DATA6_MARK, VI5_DATA7_MARK,
+		VI5_DATA8_MARK,  VI5_DATA9_MARK,
+		VI5_DATA10_MARK, VI5_DATA11_MARK,
+		VI5_DATA12_MARK, VI5_DATA13_MARK,
+		VI5_DATA14_MARK, VI5_DATA15_MARK,
+	},
+};
+static const unsigned int vin5_sync_pins[] = {
+	/* HSYNC#, VSYNC# */
+	RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int vin5_sync_mux[] = {
+	VI5_HSYNC_N_MARK, VI5_VSYNC_N_MARK,
+};
+static const unsigned int vin5_field_pins[] = {
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int vin5_field_mux[] = {
+	/* FIELD */
+	VI5_FIELD_MARK,
+};
+static const unsigned int vin5_clkenb_pins[] = {
+	RCAR_GP_PIN(1, 20),
+};
+static const unsigned int vin5_clkenb_mux[] = {
+	/* CLKENB */
+	VI5_CLKENB_MARK,
+};
+static const unsigned int vin5_clk_pins[] = {
+	RCAR_GP_PIN(1, 21),
+};
+static const unsigned int vin5_clk_mux[] = {
+	/* CLK */
+	VI5_CLK_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+	SH_PFC_PIN_GROUP(audio_clk_a_a),
+	SH_PFC_PIN_GROUP(audio_clk_a_b),
+	SH_PFC_PIN_GROUP(audio_clk_a_c),
+	SH_PFC_PIN_GROUP(audio_clk_b_a),
+	SH_PFC_PIN_GROUP(audio_clk_b_b),
+	SH_PFC_PIN_GROUP(audio_clk_c_a),
+	SH_PFC_PIN_GROUP(audio_clk_c_b),
+	SH_PFC_PIN_GROUP(audio_clkout_a),
+	SH_PFC_PIN_GROUP(audio_clkout_b),
+	SH_PFC_PIN_GROUP(audio_clkout_c),
+	SH_PFC_PIN_GROUP(audio_clkout_d),
+	SH_PFC_PIN_GROUP(audio_clkout1_a),
+	SH_PFC_PIN_GROUP(audio_clkout1_b),
+	SH_PFC_PIN_GROUP(audio_clkout2_a),
+	SH_PFC_PIN_GROUP(audio_clkout2_b),
+	SH_PFC_PIN_GROUP(audio_clkout3_a),
+	SH_PFC_PIN_GROUP(audio_clkout3_b),
+	SH_PFC_PIN_GROUP(avb_link),
+	SH_PFC_PIN_GROUP(avb_magic),
+	SH_PFC_PIN_GROUP(avb_phy_int),
+	SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio),	/* Deprecated */
+	SH_PFC_PIN_GROUP(avb_mdio),
+	SH_PFC_PIN_GROUP(avb_mii),
+	SH_PFC_PIN_GROUP(avb_avtp_pps),
+	SH_PFC_PIN_GROUP(avb_avtp_match_a),
+	SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+	SH_PFC_PIN_GROUP(avb_avtp_match_b),
+	SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+	SH_PFC_PIN_GROUP(can0_data_a),
+	SH_PFC_PIN_GROUP(can0_data_b),
+	SH_PFC_PIN_GROUP(can1_data),
+	SH_PFC_PIN_GROUP(can_clk),
+	SH_PFC_PIN_GROUP(canfd0_data_a),
+	SH_PFC_PIN_GROUP(canfd0_data_b),
+	SH_PFC_PIN_GROUP(canfd1_data),
+	SH_PFC_PIN_GROUP(drif0_ctrl_a),
+	SH_PFC_PIN_GROUP(drif0_data0_a),
+	SH_PFC_PIN_GROUP(drif0_data1_a),
+	SH_PFC_PIN_GROUP(drif0_ctrl_b),
+	SH_PFC_PIN_GROUP(drif0_data0_b),
+	SH_PFC_PIN_GROUP(drif0_data1_b),
+	SH_PFC_PIN_GROUP(drif0_ctrl_c),
+	SH_PFC_PIN_GROUP(drif0_data0_c),
+	SH_PFC_PIN_GROUP(drif0_data1_c),
+	SH_PFC_PIN_GROUP(drif1_ctrl_a),
+	SH_PFC_PIN_GROUP(drif1_data0_a),
+	SH_PFC_PIN_GROUP(drif1_data1_a),
+	SH_PFC_PIN_GROUP(drif1_ctrl_b),
+	SH_PFC_PIN_GROUP(drif1_data0_b),
+	SH_PFC_PIN_GROUP(drif1_data1_b),
+	SH_PFC_PIN_GROUP(drif1_ctrl_c),
+	SH_PFC_PIN_GROUP(drif1_data0_c),
+	SH_PFC_PIN_GROUP(drif1_data1_c),
+	SH_PFC_PIN_GROUP(drif2_ctrl_a),
+	SH_PFC_PIN_GROUP(drif2_data0_a),
+	SH_PFC_PIN_GROUP(drif2_data1_a),
+	SH_PFC_PIN_GROUP(drif2_ctrl_b),
+	SH_PFC_PIN_GROUP(drif2_data0_b),
+	SH_PFC_PIN_GROUP(drif2_data1_b),
+	SH_PFC_PIN_GROUP(drif3_ctrl_a),
+	SH_PFC_PIN_GROUP(drif3_data0_a),
+	SH_PFC_PIN_GROUP(drif3_data1_a),
+	SH_PFC_PIN_GROUP(drif3_ctrl_b),
+	SH_PFC_PIN_GROUP(drif3_data0_b),
+	SH_PFC_PIN_GROUP(drif3_data1_b),
+	SH_PFC_PIN_GROUP(du_rgb666),
+	SH_PFC_PIN_GROUP(du_rgb888),
+	SH_PFC_PIN_GROUP(du_clk_out_0),
+	SH_PFC_PIN_GROUP(du_clk_out_1),
+	SH_PFC_PIN_GROUP(du_sync),
+	SH_PFC_PIN_GROUP(du_oddf),
+	SH_PFC_PIN_GROUP(du_cde),
+	SH_PFC_PIN_GROUP(du_disp),
+	SH_PFC_PIN_GROUP(hscif0_data),
+	SH_PFC_PIN_GROUP(hscif0_clk),
+	SH_PFC_PIN_GROUP(hscif0_ctrl),
+	SH_PFC_PIN_GROUP(hscif1_data_a),
+	SH_PFC_PIN_GROUP(hscif1_clk_a),
+	SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+	SH_PFC_PIN_GROUP(hscif1_data_b),
+	SH_PFC_PIN_GROUP(hscif1_clk_b),
+	SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+	SH_PFC_PIN_GROUP(hscif2_data_a),
+	SH_PFC_PIN_GROUP(hscif2_clk_a),
+	SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+	SH_PFC_PIN_GROUP(hscif2_data_b),
+	SH_PFC_PIN_GROUP(hscif2_clk_b),
+	SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+	SH_PFC_PIN_GROUP(hscif2_data_c),
+	SH_PFC_PIN_GROUP(hscif2_clk_c),
+	SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+	SH_PFC_PIN_GROUP(hscif3_data_a),
+	SH_PFC_PIN_GROUP(hscif3_clk),
+	SH_PFC_PIN_GROUP(hscif3_ctrl),
+	SH_PFC_PIN_GROUP(hscif3_data_b),
+	SH_PFC_PIN_GROUP(hscif3_data_c),
+	SH_PFC_PIN_GROUP(hscif3_data_d),
+	SH_PFC_PIN_GROUP(hscif4_data_a),
+	SH_PFC_PIN_GROUP(hscif4_clk),
+	SH_PFC_PIN_GROUP(hscif4_ctrl),
+	SH_PFC_PIN_GROUP(hscif4_data_b),
+	SH_PFC_PIN_GROUP(i2c0),
+	SH_PFC_PIN_GROUP(i2c1_a),
+	SH_PFC_PIN_GROUP(i2c1_b),
+	SH_PFC_PIN_GROUP(i2c2_a),
+	SH_PFC_PIN_GROUP(i2c2_b),
+	SH_PFC_PIN_GROUP(i2c3),
+	SH_PFC_PIN_GROUP(i2c5),
+	SH_PFC_PIN_GROUP(i2c6_a),
+	SH_PFC_PIN_GROUP(i2c6_b),
+	SH_PFC_PIN_GROUP(i2c6_c),
+	SH_PFC_PIN_GROUP(intc_ex_irq0),
+	SH_PFC_PIN_GROUP(intc_ex_irq1),
+	SH_PFC_PIN_GROUP(intc_ex_irq2),
+	SH_PFC_PIN_GROUP(intc_ex_irq3),
+	SH_PFC_PIN_GROUP(intc_ex_irq4),
+	SH_PFC_PIN_GROUP(intc_ex_irq5),
+	SH_PFC_PIN_GROUP(msiof0_clk),
+	SH_PFC_PIN_GROUP(msiof0_sync),
+	SH_PFC_PIN_GROUP(msiof0_ss1),
+	SH_PFC_PIN_GROUP(msiof0_ss2),
+	SH_PFC_PIN_GROUP(msiof0_txd),
+	SH_PFC_PIN_GROUP(msiof0_rxd),
+	SH_PFC_PIN_GROUP(msiof1_clk_a),
+	SH_PFC_PIN_GROUP(msiof1_sync_a),
+	SH_PFC_PIN_GROUP(msiof1_ss1_a),
+	SH_PFC_PIN_GROUP(msiof1_ss2_a),
+	SH_PFC_PIN_GROUP(msiof1_txd_a),
+	SH_PFC_PIN_GROUP(msiof1_rxd_a),
+	SH_PFC_PIN_GROUP(msiof1_clk_b),
+	SH_PFC_PIN_GROUP(msiof1_sync_b),
+	SH_PFC_PIN_GROUP(msiof1_ss1_b),
+	SH_PFC_PIN_GROUP(msiof1_ss2_b),
+	SH_PFC_PIN_GROUP(msiof1_txd_b),
+	SH_PFC_PIN_GROUP(msiof1_rxd_b),
+	SH_PFC_PIN_GROUP(msiof1_clk_c),
+	SH_PFC_PIN_GROUP(msiof1_sync_c),
+	SH_PFC_PIN_GROUP(msiof1_ss1_c),
+	SH_PFC_PIN_GROUP(msiof1_ss2_c),
+	SH_PFC_PIN_GROUP(msiof1_txd_c),
+	SH_PFC_PIN_GROUP(msiof1_rxd_c),
+	SH_PFC_PIN_GROUP(msiof1_clk_d),
+	SH_PFC_PIN_GROUP(msiof1_sync_d),
+	SH_PFC_PIN_GROUP(msiof1_ss1_d),
+	SH_PFC_PIN_GROUP(msiof1_ss2_d),
+	SH_PFC_PIN_GROUP(msiof1_txd_d),
+	SH_PFC_PIN_GROUP(msiof1_rxd_d),
+	SH_PFC_PIN_GROUP(msiof1_clk_e),
+	SH_PFC_PIN_GROUP(msiof1_sync_e),
+	SH_PFC_PIN_GROUP(msiof1_ss1_e),
+	SH_PFC_PIN_GROUP(msiof1_ss2_e),
+	SH_PFC_PIN_GROUP(msiof1_txd_e),
+	SH_PFC_PIN_GROUP(msiof1_rxd_e),
+	SH_PFC_PIN_GROUP(msiof1_clk_f),
+	SH_PFC_PIN_GROUP(msiof1_sync_f),
+	SH_PFC_PIN_GROUP(msiof1_ss1_f),
+	SH_PFC_PIN_GROUP(msiof1_ss2_f),
+	SH_PFC_PIN_GROUP(msiof1_txd_f),
+	SH_PFC_PIN_GROUP(msiof1_rxd_f),
+	SH_PFC_PIN_GROUP(msiof1_clk_g),
+	SH_PFC_PIN_GROUP(msiof1_sync_g),
+	SH_PFC_PIN_GROUP(msiof1_ss1_g),
+	SH_PFC_PIN_GROUP(msiof1_ss2_g),
+	SH_PFC_PIN_GROUP(msiof1_txd_g),
+	SH_PFC_PIN_GROUP(msiof1_rxd_g),
+	SH_PFC_PIN_GROUP(msiof2_clk_a),
+	SH_PFC_PIN_GROUP(msiof2_sync_a),
+	SH_PFC_PIN_GROUP(msiof2_ss1_a),
+	SH_PFC_PIN_GROUP(msiof2_ss2_a),
+	SH_PFC_PIN_GROUP(msiof2_txd_a),
+	SH_PFC_PIN_GROUP(msiof2_rxd_a),
+	SH_PFC_PIN_GROUP(msiof2_clk_b),
+	SH_PFC_PIN_GROUP(msiof2_sync_b),
+	SH_PFC_PIN_GROUP(msiof2_ss1_b),
+	SH_PFC_PIN_GROUP(msiof2_ss2_b),
+	SH_PFC_PIN_GROUP(msiof2_txd_b),
+	SH_PFC_PIN_GROUP(msiof2_rxd_b),
+	SH_PFC_PIN_GROUP(msiof2_clk_c),
+	SH_PFC_PIN_GROUP(msiof2_sync_c),
+	SH_PFC_PIN_GROUP(msiof2_ss1_c),
+	SH_PFC_PIN_GROUP(msiof2_ss2_c),
+	SH_PFC_PIN_GROUP(msiof2_txd_c),
+	SH_PFC_PIN_GROUP(msiof2_rxd_c),
+	SH_PFC_PIN_GROUP(msiof2_clk_d),
+	SH_PFC_PIN_GROUP(msiof2_sync_d),
+	SH_PFC_PIN_GROUP(msiof2_ss1_d),
+	SH_PFC_PIN_GROUP(msiof2_ss2_d),
+	SH_PFC_PIN_GROUP(msiof2_txd_d),
+	SH_PFC_PIN_GROUP(msiof2_rxd_d),
+	SH_PFC_PIN_GROUP(msiof3_clk_a),
+	SH_PFC_PIN_GROUP(msiof3_sync_a),
+	SH_PFC_PIN_GROUP(msiof3_ss1_a),
+	SH_PFC_PIN_GROUP(msiof3_ss2_a),
+	SH_PFC_PIN_GROUP(msiof3_txd_a),
+	SH_PFC_PIN_GROUP(msiof3_rxd_a),
+	SH_PFC_PIN_GROUP(msiof3_clk_b),
+	SH_PFC_PIN_GROUP(msiof3_sync_b),
+	SH_PFC_PIN_GROUP(msiof3_ss1_b),
+	SH_PFC_PIN_GROUP(msiof3_ss2_b),
+	SH_PFC_PIN_GROUP(msiof3_txd_b),
+	SH_PFC_PIN_GROUP(msiof3_rxd_b),
+	SH_PFC_PIN_GROUP(msiof3_clk_c),
+	SH_PFC_PIN_GROUP(msiof3_sync_c),
+	SH_PFC_PIN_GROUP(msiof3_txd_c),
+	SH_PFC_PIN_GROUP(msiof3_rxd_c),
+	SH_PFC_PIN_GROUP(msiof3_clk_d),
+	SH_PFC_PIN_GROUP(msiof3_sync_d),
+	SH_PFC_PIN_GROUP(msiof3_ss1_d),
+	SH_PFC_PIN_GROUP(msiof3_txd_d),
+	SH_PFC_PIN_GROUP(msiof3_rxd_d),
+	SH_PFC_PIN_GROUP(msiof3_clk_e),
+	SH_PFC_PIN_GROUP(msiof3_sync_e),
+	SH_PFC_PIN_GROUP(msiof3_ss1_e),
+	SH_PFC_PIN_GROUP(msiof3_ss2_e),
+	SH_PFC_PIN_GROUP(msiof3_txd_e),
+	SH_PFC_PIN_GROUP(msiof3_rxd_e),
+	SH_PFC_PIN_GROUP(pwm0),
+	SH_PFC_PIN_GROUP(pwm1_a),
+	SH_PFC_PIN_GROUP(pwm1_b),
+	SH_PFC_PIN_GROUP(pwm2_a),
+	SH_PFC_PIN_GROUP(pwm2_b),
+	SH_PFC_PIN_GROUP(pwm3_a),
+	SH_PFC_PIN_GROUP(pwm3_b),
+	SH_PFC_PIN_GROUP(pwm4_a),
+	SH_PFC_PIN_GROUP(pwm4_b),
+	SH_PFC_PIN_GROUP(pwm5_a),
+	SH_PFC_PIN_GROUP(pwm5_b),
+	SH_PFC_PIN_GROUP(pwm6_a),
+	SH_PFC_PIN_GROUP(pwm6_b),
+	SH_PFC_PIN_GROUP(sata0_devslp_a),
+	SH_PFC_PIN_GROUP(sata0_devslp_b),
+	SH_PFC_PIN_GROUP(scif0_data),
+	SH_PFC_PIN_GROUP(scif0_clk),
+	SH_PFC_PIN_GROUP(scif0_ctrl),
+	SH_PFC_PIN_GROUP(scif1_data_a),
+	SH_PFC_PIN_GROUP(scif1_clk),
+	SH_PFC_PIN_GROUP(scif1_ctrl),
+	SH_PFC_PIN_GROUP(scif1_data_b),
+	SH_PFC_PIN_GROUP(scif2_data_a),
+	SH_PFC_PIN_GROUP(scif2_clk),
+	SH_PFC_PIN_GROUP(scif2_data_b),
+	SH_PFC_PIN_GROUP(scif3_data_a),
+	SH_PFC_PIN_GROUP(scif3_clk),
+	SH_PFC_PIN_GROUP(scif3_ctrl),
+	SH_PFC_PIN_GROUP(scif3_data_b),
+	SH_PFC_PIN_GROUP(scif4_data_a),
+	SH_PFC_PIN_GROUP(scif4_clk_a),
+	SH_PFC_PIN_GROUP(scif4_ctrl_a),
+	SH_PFC_PIN_GROUP(scif4_data_b),
+	SH_PFC_PIN_GROUP(scif4_clk_b),
+	SH_PFC_PIN_GROUP(scif4_ctrl_b),
+	SH_PFC_PIN_GROUP(scif4_data_c),
+	SH_PFC_PIN_GROUP(scif4_clk_c),
+	SH_PFC_PIN_GROUP(scif4_ctrl_c),
+	SH_PFC_PIN_GROUP(scif5_data_a),
+	SH_PFC_PIN_GROUP(scif5_clk_a),
+	SH_PFC_PIN_GROUP(scif5_data_b),
+	SH_PFC_PIN_GROUP(scif5_clk_b),
+	SH_PFC_PIN_GROUP(scif_clk_a),
+	SH_PFC_PIN_GROUP(scif_clk_b),
+	SH_PFC_PIN_GROUP(sdhi0_data1),
+	SH_PFC_PIN_GROUP(sdhi0_data4),
+	SH_PFC_PIN_GROUP(sdhi0_ctrl),
+	SH_PFC_PIN_GROUP(sdhi0_cd),
+	SH_PFC_PIN_GROUP(sdhi0_wp),
+	SH_PFC_PIN_GROUP(sdhi1_data1),
+	SH_PFC_PIN_GROUP(sdhi1_data4),
+	SH_PFC_PIN_GROUP(sdhi1_ctrl),
+	SH_PFC_PIN_GROUP(sdhi1_cd),
+	SH_PFC_PIN_GROUP(sdhi1_wp),
+	SH_PFC_PIN_GROUP(sdhi2_data1),
+	SH_PFC_PIN_GROUP(sdhi2_data4),
+	SH_PFC_PIN_GROUP(sdhi2_data8),
+	SH_PFC_PIN_GROUP(sdhi2_ctrl),
+	SH_PFC_PIN_GROUP(sdhi2_cd_a),
+	SH_PFC_PIN_GROUP(sdhi2_wp_a),
+	SH_PFC_PIN_GROUP(sdhi2_cd_b),
+	SH_PFC_PIN_GROUP(sdhi2_wp_b),
+	SH_PFC_PIN_GROUP(sdhi2_ds),
+	SH_PFC_PIN_GROUP(sdhi3_data1),
+	SH_PFC_PIN_GROUP(sdhi3_data4),
+	SH_PFC_PIN_GROUP(sdhi3_data8),
+	SH_PFC_PIN_GROUP(sdhi3_ctrl),
+	SH_PFC_PIN_GROUP(sdhi3_cd),
+	SH_PFC_PIN_GROUP(sdhi3_wp),
+	SH_PFC_PIN_GROUP(sdhi3_ds),
+	SH_PFC_PIN_GROUP(ssi0_data),
+	SH_PFC_PIN_GROUP(ssi01239_ctrl),
+	SH_PFC_PIN_GROUP(ssi1_data_a),
+	SH_PFC_PIN_GROUP(ssi1_data_b),
+	SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+	SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi2_data_a),
+	SH_PFC_PIN_GROUP(ssi2_data_b),
+	SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+	SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi3_data),
+	SH_PFC_PIN_GROUP(ssi349_ctrl),
+	SH_PFC_PIN_GROUP(ssi4_data),
+	SH_PFC_PIN_GROUP(ssi4_ctrl),
+	SH_PFC_PIN_GROUP(ssi5_data),
+	SH_PFC_PIN_GROUP(ssi5_ctrl),
+	SH_PFC_PIN_GROUP(ssi6_data),
+	SH_PFC_PIN_GROUP(ssi6_ctrl),
+	SH_PFC_PIN_GROUP(ssi7_data),
+	SH_PFC_PIN_GROUP(ssi78_ctrl),
+	SH_PFC_PIN_GROUP(ssi8_data),
+	SH_PFC_PIN_GROUP(ssi9_data_a),
+	SH_PFC_PIN_GROUP(ssi9_data_b),
+	SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+	SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+	SH_PFC_PIN_GROUP(tmu_tclk1_a),
+	SH_PFC_PIN_GROUP(tmu_tclk1_b),
+	SH_PFC_PIN_GROUP(tmu_tclk2_a),
+	SH_PFC_PIN_GROUP(tmu_tclk2_b),
+	SH_PFC_PIN_GROUP(tpu_to0),
+	SH_PFC_PIN_GROUP(tpu_to1),
+	SH_PFC_PIN_GROUP(tpu_to2),
+	SH_PFC_PIN_GROUP(tpu_to3),
+	SH_PFC_PIN_GROUP(usb0),
+	SH_PFC_PIN_GROUP(usb1),
+	SH_PFC_PIN_GROUP(usb2),
+	SH_PFC_PIN_GROUP(usb2_ch3),
+	SH_PFC_PIN_GROUP(usb30),
+	VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
+	VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
+	VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
+	VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
+	SH_PFC_PIN_GROUP(vin4_data18_a),
+	VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
+	VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
+	VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
+	VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
+	VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
+	VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
+	SH_PFC_PIN_GROUP(vin4_data18_b),
+	VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
+	VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+	SH_PFC_PIN_GROUP(vin4_sync),
+	SH_PFC_PIN_GROUP(vin4_field),
+	SH_PFC_PIN_GROUP(vin4_clkenb),
+	SH_PFC_PIN_GROUP(vin4_clk),
+	VIN_DATA_PIN_GROUP(vin5_data, 8),
+	VIN_DATA_PIN_GROUP(vin5_data, 10),
+	VIN_DATA_PIN_GROUP(vin5_data, 12),
+	VIN_DATA_PIN_GROUP(vin5_data, 16),
+	SH_PFC_PIN_GROUP(vin5_sync),
+	SH_PFC_PIN_GROUP(vin5_field),
+	SH_PFC_PIN_GROUP(vin5_clkenb),
+	SH_PFC_PIN_GROUP(vin5_clk),
+};
+
+static const char * const audio_clk_groups[] = {
+	"audio_clk_a_a",
+	"audio_clk_a_b",
+	"audio_clk_a_c",
+	"audio_clk_b_a",
+	"audio_clk_b_b",
+	"audio_clk_c_a",
+	"audio_clk_c_b",
+	"audio_clkout_a",
+	"audio_clkout_b",
+	"audio_clkout_c",
+	"audio_clkout_d",
+	"audio_clkout1_a",
+	"audio_clkout1_b",
+	"audio_clkout2_a",
+	"audio_clkout2_b",
+	"audio_clkout3_a",
+	"audio_clkout3_b",
+};
+
+static const char * const avb_groups[] = {
+	"avb_link",
+	"avb_magic",
+	"avb_phy_int",
+	"avb_mdc",	/* Deprecated, please use "avb_mdio" instead */
+	"avb_mdio",
+	"avb_mii",
+	"avb_avtp_pps",
+	"avb_avtp_match_a",
+	"avb_avtp_capture_a",
+	"avb_avtp_match_b",
+	"avb_avtp_capture_b",
+};
+
+static const char * const can0_groups[] = {
+	"can0_data_a",
+	"can0_data_b",
+};
+
+static const char * const can1_groups[] = {
+	"can1_data",
+};
+
+static const char * const can_clk_groups[] = {
+	"can_clk",
+};
+
+static const char * const canfd0_groups[] = {
+	"canfd0_data_a",
+	"canfd0_data_b",
+};
+
+static const char * const canfd1_groups[] = {
+	"canfd1_data",
+};
+
+static const char * const drif0_groups[] = {
+	"drif0_ctrl_a",
+	"drif0_data0_a",
+	"drif0_data1_a",
+	"drif0_ctrl_b",
+	"drif0_data0_b",
+	"drif0_data1_b",
+	"drif0_ctrl_c",
+	"drif0_data0_c",
+	"drif0_data1_c",
+};
+
+static const char * const drif1_groups[] = {
+	"drif1_ctrl_a",
+	"drif1_data0_a",
+	"drif1_data1_a",
+	"drif1_ctrl_b",
+	"drif1_data0_b",
+	"drif1_data1_b",
+	"drif1_ctrl_c",
+	"drif1_data0_c",
+	"drif1_data1_c",
+};
+
+static const char * const drif2_groups[] = {
+	"drif2_ctrl_a",
+	"drif2_data0_a",
+	"drif2_data1_a",
+	"drif2_ctrl_b",
+	"drif2_data0_b",
+	"drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+	"drif3_ctrl_a",
+	"drif3_data0_a",
+	"drif3_data1_a",
+	"drif3_ctrl_b",
+	"drif3_data0_b",
+	"drif3_data1_b",
+};
+
+static const char * const du_groups[] = {
+	"du_rgb666",
+	"du_rgb888",
+	"du_clk_out_0",
+	"du_clk_out_1",
+	"du_sync",
+	"du_oddf",
+	"du_cde",
+	"du_disp",
+};
+
+static const char * const hscif0_groups[] = {
+	"hscif0_data",
+	"hscif0_clk",
+	"hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+	"hscif1_data_a",
+	"hscif1_clk_a",
+	"hscif1_ctrl_a",
+	"hscif1_data_b",
+	"hscif1_clk_b",
+	"hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+	"hscif2_data_a",
+	"hscif2_clk_a",
+	"hscif2_ctrl_a",
+	"hscif2_data_b",
+	"hscif2_clk_b",
+	"hscif2_ctrl_b",
+	"hscif2_data_c",
+	"hscif2_clk_c",
+	"hscif2_ctrl_c",
+};
+
+static const char * const hscif3_groups[] = {
+	"hscif3_data_a",
+	"hscif3_clk",
+	"hscif3_ctrl",
+	"hscif3_data_b",
+	"hscif3_data_c",
+	"hscif3_data_d",
+};
+
+static const char * const hscif4_groups[] = {
+	"hscif4_data_a",
+	"hscif4_clk",
+	"hscif4_ctrl",
+	"hscif4_data_b",
+};
+
+static const char * const i2c0_groups[] = {
+	"i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+	"i2c1_a",
+	"i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+	"i2c2_a",
+	"i2c2_b",
+};
+
+static const char * const i2c3_groups[] = {
+	"i2c3",
+};
+
+static const char * const i2c5_groups[] = {
+	"i2c5",
+};
+
+static const char * const i2c6_groups[] = {
+	"i2c6_a",
+	"i2c6_b",
+	"i2c6_c",
+};
+
+static const char * const intc_ex_groups[] = {
+	"intc_ex_irq0",
+	"intc_ex_irq1",
+	"intc_ex_irq2",
+	"intc_ex_irq3",
+	"intc_ex_irq4",
+	"intc_ex_irq5",
+};
+
+static const char * const msiof0_groups[] = {
+	"msiof0_clk",
+	"msiof0_sync",
+	"msiof0_ss1",
+	"msiof0_ss2",
+	"msiof0_txd",
+	"msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+	"msiof1_clk_a",
+	"msiof1_sync_a",
+	"msiof1_ss1_a",
+	"msiof1_ss2_a",
+	"msiof1_txd_a",
+	"msiof1_rxd_a",
+	"msiof1_clk_b",
+	"msiof1_sync_b",
+	"msiof1_ss1_b",
+	"msiof1_ss2_b",
+	"msiof1_txd_b",
+	"msiof1_rxd_b",
+	"msiof1_clk_c",
+	"msiof1_sync_c",
+	"msiof1_ss1_c",
+	"msiof1_ss2_c",
+	"msiof1_txd_c",
+	"msiof1_rxd_c",
+	"msiof1_clk_d",
+	"msiof1_sync_d",
+	"msiof1_ss1_d",
+	"msiof1_ss2_d",
+	"msiof1_txd_d",
+	"msiof1_rxd_d",
+	"msiof1_clk_e",
+	"msiof1_sync_e",
+	"msiof1_ss1_e",
+	"msiof1_ss2_e",
+	"msiof1_txd_e",
+	"msiof1_rxd_e",
+	"msiof1_clk_f",
+	"msiof1_sync_f",
+	"msiof1_ss1_f",
+	"msiof1_ss2_f",
+	"msiof1_txd_f",
+	"msiof1_rxd_f",
+	"msiof1_clk_g",
+	"msiof1_sync_g",
+	"msiof1_ss1_g",
+	"msiof1_ss2_g",
+	"msiof1_txd_g",
+	"msiof1_rxd_g",
+};
+
+static const char * const msiof2_groups[] = {
+	"msiof2_clk_a",
+	"msiof2_sync_a",
+	"msiof2_ss1_a",
+	"msiof2_ss2_a",
+	"msiof2_txd_a",
+	"msiof2_rxd_a",
+	"msiof2_clk_b",
+	"msiof2_sync_b",
+	"msiof2_ss1_b",
+	"msiof2_ss2_b",
+	"msiof2_txd_b",
+	"msiof2_rxd_b",
+	"msiof2_clk_c",
+	"msiof2_sync_c",
+	"msiof2_ss1_c",
+	"msiof2_ss2_c",
+	"msiof2_txd_c",
+	"msiof2_rxd_c",
+	"msiof2_clk_d",
+	"msiof2_sync_d",
+	"msiof2_ss1_d",
+	"msiof2_ss2_d",
+	"msiof2_txd_d",
+	"msiof2_rxd_d",
+};
+
+static const char * const msiof3_groups[] = {
+	"msiof3_clk_a",
+	"msiof3_sync_a",
+	"msiof3_ss1_a",
+	"msiof3_ss2_a",
+	"msiof3_txd_a",
+	"msiof3_rxd_a",
+	"msiof3_clk_b",
+	"msiof3_sync_b",
+	"msiof3_ss1_b",
+	"msiof3_ss2_b",
+	"msiof3_txd_b",
+	"msiof3_rxd_b",
+	"msiof3_clk_c",
+	"msiof3_sync_c",
+	"msiof3_txd_c",
+	"msiof3_rxd_c",
+	"msiof3_clk_d",
+	"msiof3_sync_d",
+	"msiof3_ss1_d",
+	"msiof3_txd_d",
+	"msiof3_rxd_d",
+	"msiof3_clk_e",
+	"msiof3_sync_e",
+	"msiof3_ss1_e",
+	"msiof3_ss2_e",
+	"msiof3_txd_e",
+	"msiof3_rxd_e",
+};
+
+static const char * const pwm0_groups[] = {
+	"pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+	"pwm1_a",
+	"pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+	"pwm2_a",
+	"pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+	"pwm3_a",
+	"pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+	"pwm4_a",
+	"pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+	"pwm5_a",
+	"pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+	"pwm6_a",
+	"pwm6_b",
+};
+
+static const char * const sata0_groups[] = {
+	"sata0_devslp_a",
+	"sata0_devslp_b",
+};
+
+static const char * const scif0_groups[] = {
+	"scif0_data",
+	"scif0_clk",
+	"scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+	"scif1_data_a",
+	"scif1_clk",
+	"scif1_ctrl",
+	"scif1_data_b",
+};
+
+static const char * const scif2_groups[] = {
+	"scif2_data_a",
+	"scif2_clk",
+	"scif2_data_b",
+};
+
+static const char * const scif3_groups[] = {
+	"scif3_data_a",
+	"scif3_clk",
+	"scif3_ctrl",
+	"scif3_data_b",
+};
+
+static const char * const scif4_groups[] = {
+	"scif4_data_a",
+	"scif4_clk_a",
+	"scif4_ctrl_a",
+	"scif4_data_b",
+	"scif4_clk_b",
+	"scif4_ctrl_b",
+	"scif4_data_c",
+	"scif4_clk_c",
+	"scif4_ctrl_c",
+};
+
+static const char * const scif5_groups[] = {
+	"scif5_data_a",
+	"scif5_clk_a",
+	"scif5_data_b",
+	"scif5_clk_b",
+};
+
+static const char * const scif_clk_groups[] = {
+	"scif_clk_a",
+	"scif_clk_b",
+};
+
+static const char * const sdhi0_groups[] = {
+	"sdhi0_data1",
+	"sdhi0_data4",
+	"sdhi0_ctrl",
+	"sdhi0_cd",
+	"sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+	"sdhi1_data1",
+	"sdhi1_data4",
+	"sdhi1_ctrl",
+	"sdhi1_cd",
+	"sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+	"sdhi2_data1",
+	"sdhi2_data4",
+	"sdhi2_data8",
+	"sdhi2_ctrl",
+	"sdhi2_cd_a",
+	"sdhi2_wp_a",
+	"sdhi2_cd_b",
+	"sdhi2_wp_b",
+	"sdhi2_ds",
+};
+
+static const char * const sdhi3_groups[] = {
+	"sdhi3_data1",
+	"sdhi3_data4",
+	"sdhi3_data8",
+	"sdhi3_ctrl",
+	"sdhi3_cd",
+	"sdhi3_wp",
+	"sdhi3_ds",
+};
+
+static const char * const ssi_groups[] = {
+	"ssi0_data",
+	"ssi01239_ctrl",
+	"ssi1_data_a",
+	"ssi1_data_b",
+	"ssi1_ctrl_a",
+	"ssi1_ctrl_b",
+	"ssi2_data_a",
+	"ssi2_data_b",
+	"ssi2_ctrl_a",
+	"ssi2_ctrl_b",
+	"ssi3_data",
+	"ssi349_ctrl",
+	"ssi4_data",
+	"ssi4_ctrl",
+	"ssi5_data",
+	"ssi5_ctrl",
+	"ssi6_data",
+	"ssi6_ctrl",
+	"ssi7_data",
+	"ssi78_ctrl",
+	"ssi8_data",
+	"ssi9_data_a",
+	"ssi9_data_b",
+	"ssi9_ctrl_a",
+	"ssi9_ctrl_b",
+};
+
+static const char * const tmu_groups[] = {
+	"tmu_tclk1_a",
+	"tmu_tclk1_b",
+	"tmu_tclk2_a",
+	"tmu_tclk2_b",
+};
+
+static const char * const tpu_groups[] = {
+	"tpu_to0",
+	"tpu_to1",
+	"tpu_to2",
+	"tpu_to3",
+};
+
+static const char * const usb0_groups[] = {
+	"usb0",
+};
+
+static const char * const usb1_groups[] = {
+	"usb1",
+};
+
+static const char * const usb2_groups[] = {
+	"usb2",
+};
+
+static const char * const usb2_ch3_groups[] = {
+	"usb2_ch3",
+};
+
+static const char * const usb30_groups[] = {
+	"usb30",
+};
+
+static const char * const vin4_groups[] = {
+	"vin4_data8_a",
+	"vin4_data10_a",
+	"vin4_data12_a",
+	"vin4_data16_a",
+	"vin4_data18_a",
+	"vin4_data20_a",
+	"vin4_data24_a",
+	"vin4_data8_b",
+	"vin4_data10_b",
+	"vin4_data12_b",
+	"vin4_data16_b",
+	"vin4_data18_b",
+	"vin4_data20_b",
+	"vin4_data24_b",
+	"vin4_sync",
+	"vin4_field",
+	"vin4_clkenb",
+	"vin4_clk",
+};
+
+static const char * const vin5_groups[] = {
+	"vin5_data8",
+	"vin5_data10",
+	"vin5_data12",
+	"vin5_data16",
+	"vin5_sync",
+	"vin5_field",
+	"vin5_clkenb",
+	"vin5_clk",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+	SH_PFC_FUNCTION(audio_clk),
+	SH_PFC_FUNCTION(avb),
+	SH_PFC_FUNCTION(can0),
+	SH_PFC_FUNCTION(can1),
+	SH_PFC_FUNCTION(can_clk),
+	SH_PFC_FUNCTION(canfd0),
+	SH_PFC_FUNCTION(canfd1),
+	SH_PFC_FUNCTION(drif0),
+	SH_PFC_FUNCTION(drif1),
+	SH_PFC_FUNCTION(drif2),
+	SH_PFC_FUNCTION(drif3),
+	SH_PFC_FUNCTION(du),
+	SH_PFC_FUNCTION(hscif0),
+	SH_PFC_FUNCTION(hscif1),
+	SH_PFC_FUNCTION(hscif2),
+	SH_PFC_FUNCTION(hscif3),
+	SH_PFC_FUNCTION(hscif4),
+	SH_PFC_FUNCTION(i2c0),
+	SH_PFC_FUNCTION(i2c1),
+	SH_PFC_FUNCTION(i2c2),
+	SH_PFC_FUNCTION(i2c3),
+	SH_PFC_FUNCTION(i2c5),
+	SH_PFC_FUNCTION(i2c6),
+	SH_PFC_FUNCTION(intc_ex),
+	SH_PFC_FUNCTION(msiof0),
+	SH_PFC_FUNCTION(msiof1),
+	SH_PFC_FUNCTION(msiof2),
+	SH_PFC_FUNCTION(msiof3),
+	SH_PFC_FUNCTION(pwm0),
+	SH_PFC_FUNCTION(pwm1),
+	SH_PFC_FUNCTION(pwm2),
+	SH_PFC_FUNCTION(pwm3),
+	SH_PFC_FUNCTION(pwm4),
+	SH_PFC_FUNCTION(pwm5),
+	SH_PFC_FUNCTION(pwm6),
+	SH_PFC_FUNCTION(sata0),
+	SH_PFC_FUNCTION(scif0),
+	SH_PFC_FUNCTION(scif1),
+	SH_PFC_FUNCTION(scif2),
+	SH_PFC_FUNCTION(scif3),
+	SH_PFC_FUNCTION(scif4),
+	SH_PFC_FUNCTION(scif5),
+	SH_PFC_FUNCTION(scif_clk),
+	SH_PFC_FUNCTION(sdhi0),
+	SH_PFC_FUNCTION(sdhi1),
+	SH_PFC_FUNCTION(sdhi2),
+	SH_PFC_FUNCTION(sdhi3),
+	SH_PFC_FUNCTION(ssi),
+	SH_PFC_FUNCTION(tmu),
+	SH_PFC_FUNCTION(tpu),
+	SH_PFC_FUNCTION(usb0),
+	SH_PFC_FUNCTION(usb1),
+	SH_PFC_FUNCTION(usb2),
+	SH_PFC_FUNCTION(usb2_ch3),
+	SH_PFC_FUNCTION(usb30),
+	SH_PFC_FUNCTION(vin4),
+	SH_PFC_FUNCTION(vin5),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y)	FN_##y
+#define FM(x)		FN_##x
+	{ PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_0_15_FN,	GPSR0_15,
+		GP_0_14_FN,	GPSR0_14,
+		GP_0_13_FN,	GPSR0_13,
+		GP_0_12_FN,	GPSR0_12,
+		GP_0_11_FN,	GPSR0_11,
+		GP_0_10_FN,	GPSR0_10,
+		GP_0_9_FN,	GPSR0_9,
+		GP_0_8_FN,	GPSR0_8,
+		GP_0_7_FN,	GPSR0_7,
+		GP_0_6_FN,	GPSR0_6,
+		GP_0_5_FN,	GPSR0_5,
+		GP_0_4_FN,	GPSR0_4,
+		GP_0_3_FN,	GPSR0_3,
+		GP_0_2_FN,	GPSR0_2,
+		GP_0_1_FN,	GPSR0_1,
+		GP_0_0_FN,	GPSR0_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_1_28_FN,	GPSR1_28,
+		GP_1_27_FN,	GPSR1_27,
+		GP_1_26_FN,	GPSR1_26,
+		GP_1_25_FN,	GPSR1_25,
+		GP_1_24_FN,	GPSR1_24,
+		GP_1_23_FN,	GPSR1_23,
+		GP_1_22_FN,	GPSR1_22,
+		GP_1_21_FN,	GPSR1_21,
+		GP_1_20_FN,	GPSR1_20,
+		GP_1_19_FN,	GPSR1_19,
+		GP_1_18_FN,	GPSR1_18,
+		GP_1_17_FN,	GPSR1_17,
+		GP_1_16_FN,	GPSR1_16,
+		GP_1_15_FN,	GPSR1_15,
+		GP_1_14_FN,	GPSR1_14,
+		GP_1_13_FN,	GPSR1_13,
+		GP_1_12_FN,	GPSR1_12,
+		GP_1_11_FN,	GPSR1_11,
+		GP_1_10_FN,	GPSR1_10,
+		GP_1_9_FN,	GPSR1_9,
+		GP_1_8_FN,	GPSR1_8,
+		GP_1_7_FN,	GPSR1_7,
+		GP_1_6_FN,	GPSR1_6,
+		GP_1_5_FN,	GPSR1_5,
+		GP_1_4_FN,	GPSR1_4,
+		GP_1_3_FN,	GPSR1_3,
+		GP_1_2_FN,	GPSR1_2,
+		GP_1_1_FN,	GPSR1_1,
+		GP_1_0_FN,	GPSR1_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_2_14_FN,	GPSR2_14,
+		GP_2_13_FN,	GPSR2_13,
+		GP_2_12_FN,	GPSR2_12,
+		GP_2_11_FN,	GPSR2_11,
+		GP_2_10_FN,	GPSR2_10,
+		GP_2_9_FN,	GPSR2_9,
+		GP_2_8_FN,	GPSR2_8,
+		GP_2_7_FN,	GPSR2_7,
+		GP_2_6_FN,	GPSR2_6,
+		GP_2_5_FN,	GPSR2_5,
+		GP_2_4_FN,	GPSR2_4,
+		GP_2_3_FN,	GPSR2_3,
+		GP_2_2_FN,	GPSR2_2,
+		GP_2_1_FN,	GPSR2_1,
+		GP_2_0_FN,	GPSR2_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_3_15_FN,	GPSR3_15,
+		GP_3_14_FN,	GPSR3_14,
+		GP_3_13_FN,	GPSR3_13,
+		GP_3_12_FN,	GPSR3_12,
+		GP_3_11_FN,	GPSR3_11,
+		GP_3_10_FN,	GPSR3_10,
+		GP_3_9_FN,	GPSR3_9,
+		GP_3_8_FN,	GPSR3_8,
+		GP_3_7_FN,	GPSR3_7,
+		GP_3_6_FN,	GPSR3_6,
+		GP_3_5_FN,	GPSR3_5,
+		GP_3_4_FN,	GPSR3_4,
+		GP_3_3_FN,	GPSR3_3,
+		GP_3_2_FN,	GPSR3_2,
+		GP_3_1_FN,	GPSR3_1,
+		GP_3_0_FN,	GPSR3_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_4_17_FN,	GPSR4_17,
+		GP_4_16_FN,	GPSR4_16,
+		GP_4_15_FN,	GPSR4_15,
+		GP_4_14_FN,	GPSR4_14,
+		GP_4_13_FN,	GPSR4_13,
+		GP_4_12_FN,	GPSR4_12,
+		GP_4_11_FN,	GPSR4_11,
+		GP_4_10_FN,	GPSR4_10,
+		GP_4_9_FN,	GPSR4_9,
+		GP_4_8_FN,	GPSR4_8,
+		GP_4_7_FN,	GPSR4_7,
+		GP_4_6_FN,	GPSR4_6,
+		GP_4_5_FN,	GPSR4_5,
+		GP_4_4_FN,	GPSR4_4,
+		GP_4_3_FN,	GPSR4_3,
+		GP_4_2_FN,	GPSR4_2,
+		GP_4_1_FN,	GPSR4_1,
+		GP_4_0_FN,	GPSR4_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_5_25_FN,	GPSR5_25,
+		GP_5_24_FN,	GPSR5_24,
+		GP_5_23_FN,	GPSR5_23,
+		GP_5_22_FN,	GPSR5_22,
+		GP_5_21_FN,	GPSR5_21,
+		GP_5_20_FN,	GPSR5_20,
+		GP_5_19_FN,	GPSR5_19,
+		GP_5_18_FN,	GPSR5_18,
+		GP_5_17_FN,	GPSR5_17,
+		GP_5_16_FN,	GPSR5_16,
+		GP_5_15_FN,	GPSR5_15,
+		GP_5_14_FN,	GPSR5_14,
+		GP_5_13_FN,	GPSR5_13,
+		GP_5_12_FN,	GPSR5_12,
+		GP_5_11_FN,	GPSR5_11,
+		GP_5_10_FN,	GPSR5_10,
+		GP_5_9_FN,	GPSR5_9,
+		GP_5_8_FN,	GPSR5_8,
+		GP_5_7_FN,	GPSR5_7,
+		GP_5_6_FN,	GPSR5_6,
+		GP_5_5_FN,	GPSR5_5,
+		GP_5_4_FN,	GPSR5_4,
+		GP_5_3_FN,	GPSR5_3,
+		GP_5_2_FN,	GPSR5_2,
+		GP_5_1_FN,	GPSR5_1,
+		GP_5_0_FN,	GPSR5_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP(
+		GP_6_31_FN,	GPSR6_31,
+		GP_6_30_FN,	GPSR6_30,
+		GP_6_29_FN,	GPSR6_29,
+		GP_6_28_FN,	GPSR6_28,
+		GP_6_27_FN,	GPSR6_27,
+		GP_6_26_FN,	GPSR6_26,
+		GP_6_25_FN,	GPSR6_25,
+		GP_6_24_FN,	GPSR6_24,
+		GP_6_23_FN,	GPSR6_23,
+		GP_6_22_FN,	GPSR6_22,
+		GP_6_21_FN,	GPSR6_21,
+		GP_6_20_FN,	GPSR6_20,
+		GP_6_19_FN,	GPSR6_19,
+		GP_6_18_FN,	GPSR6_18,
+		GP_6_17_FN,	GPSR6_17,
+		GP_6_16_FN,	GPSR6_16,
+		GP_6_15_FN,	GPSR6_15,
+		GP_6_14_FN,	GPSR6_14,
+		GP_6_13_FN,	GPSR6_13,
+		GP_6_12_FN,	GPSR6_12,
+		GP_6_11_FN,	GPSR6_11,
+		GP_6_10_FN,	GPSR6_10,
+		GP_6_9_FN,	GPSR6_9,
+		GP_6_8_FN,	GPSR6_8,
+		GP_6_7_FN,	GPSR6_7,
+		GP_6_6_FN,	GPSR6_6,
+		GP_6_5_FN,	GPSR6_5,
+		GP_6_4_FN,	GPSR6_4,
+		GP_6_3_FN,	GPSR6_3,
+		GP_6_2_FN,	GPSR6_2,
+		GP_6_1_FN,	GPSR6_1,
+		GP_6_0_FN,	GPSR6_0, ))
+	},
+	{ PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP(
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		0, 0,
+		GP_7_3_FN, GPSR7_3,
+		GP_7_2_FN, GPSR7_2,
+		GP_7_1_FN, GPSR7_1,
+		GP_7_0_FN, GPSR7_0, ))
+	},
+#undef F_
+#undef FM
+
+#define F_(x, y)	x,
+#define FM(x)		FN_##x,
+	{ PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4, GROUP(
+		IP0_31_28
+		IP0_27_24
+		IP0_23_20
+		IP0_19_16
+		IP0_15_12
+		IP0_11_8
+		IP0_7_4
+		IP0_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4, GROUP(
+		IP1_31_28
+		IP1_27_24
+		IP1_23_20
+		IP1_19_16
+		IP1_15_12
+		IP1_11_8
+		IP1_7_4
+		IP1_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4, GROUP(
+		IP2_31_28
+		IP2_27_24
+		IP2_23_20
+		IP2_19_16
+		IP2_15_12
+		IP2_11_8
+		IP2_7_4
+		IP2_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4, GROUP(
+		IP3_31_28
+		IP3_27_24
+		IP3_23_20
+		IP3_19_16
+		IP3_15_12
+		IP3_11_8
+		IP3_7_4
+		IP3_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4, GROUP(
+		IP4_31_28
+		IP4_27_24
+		IP4_23_20
+		IP4_19_16
+		IP4_15_12
+		IP4_11_8
+		IP4_7_4
+		IP4_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4, GROUP(
+		IP5_31_28
+		IP5_27_24
+		IP5_23_20
+		IP5_19_16
+		IP5_15_12
+		IP5_11_8
+		IP5_7_4
+		IP5_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4, GROUP(
+		IP6_31_28
+		IP6_27_24
+		IP6_23_20
+		IP6_19_16
+		IP6_15_12
+		IP6_11_8
+		IP6_7_4
+		IP6_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP(
+		IP7_31_28
+		IP7_27_24
+		IP7_23_20
+		IP7_19_16
+		/* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		IP7_11_8
+		IP7_7_4
+		IP7_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP(
+		IP8_31_28
+		IP8_27_24
+		IP8_23_20
+		IP8_19_16
+		IP8_15_12
+		IP8_11_8
+		IP8_7_4
+		IP8_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4, GROUP(
+		IP9_31_28
+		IP9_27_24
+		IP9_23_20
+		IP9_19_16
+		IP9_15_12
+		IP9_11_8
+		IP9_7_4
+		IP9_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP(
+		IP10_31_28
+		IP10_27_24
+		IP10_23_20
+		IP10_19_16
+		IP10_15_12
+		IP10_11_8
+		IP10_7_4
+		IP10_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4, GROUP(
+		IP11_31_28
+		IP11_27_24
+		IP11_23_20
+		IP11_19_16
+		IP11_15_12
+		IP11_11_8
+		IP11_7_4
+		IP11_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4, GROUP(
+		IP12_31_28
+		IP12_27_24
+		IP12_23_20
+		IP12_19_16
+		IP12_15_12
+		IP12_11_8
+		IP12_7_4
+		IP12_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP(
+		IP13_31_28
+		IP13_27_24
+		IP13_23_20
+		IP13_19_16
+		IP13_15_12
+		IP13_11_8
+		IP13_7_4
+		IP13_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4, GROUP(
+		IP14_31_28
+		IP14_27_24
+		IP14_23_20
+		IP14_19_16
+		IP14_15_12
+		IP14_11_8
+		IP14_7_4
+		IP14_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4, GROUP(
+		IP15_31_28
+		IP15_27_24
+		IP15_23_20
+		IP15_19_16
+		IP15_15_12
+		IP15_11_8
+		IP15_7_4
+		IP15_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4, GROUP(
+		IP16_31_28
+		IP16_27_24
+		IP16_23_20
+		IP16_19_16
+		IP16_15_12
+		IP16_11_8
+		IP16_7_4
+		IP16_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP(
+		IP17_31_28
+		IP17_27_24
+		IP17_23_20
+		IP17_19_16
+		IP17_15_12
+		IP17_11_8
+		IP17_7_4
+		IP17_3_0 ))
+	},
+	{ PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP(
+		/* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		/* IP18_11_8  */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		IP18_7_4
+		IP18_3_0 ))
+	},
+#undef F_
+#undef FM
+
+#define F_(x, y)	x,
+#define FM(x)		FN_##x,
+	{ PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
+			     GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2,
+				   1, 1, 1, 2, 2, 1, 2, 3),
+			     GROUP(
+		MOD_SEL0_31_30_29
+		MOD_SEL0_28_27
+		MOD_SEL0_26_25_24
+		MOD_SEL0_23
+		MOD_SEL0_22
+		MOD_SEL0_21
+		MOD_SEL0_20
+		MOD_SEL0_19
+		MOD_SEL0_18_17
+		MOD_SEL0_16
+		0, 0, /* RESERVED 15 */
+		MOD_SEL0_14_13
+		MOD_SEL0_12
+		MOD_SEL0_11
+		MOD_SEL0_10
+		MOD_SEL0_9_8
+		MOD_SEL0_7_6
+		MOD_SEL0_5
+		MOD_SEL0_4_3
+		/* RESERVED 2, 1, 0 */
+		0, 0, 0, 0, 0, 0, 0, 0 ))
+	},
+	{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
+			     GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1,
+				   1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1),
+			     GROUP(
+		MOD_SEL1_31_30
+		MOD_SEL1_29_28_27
+		MOD_SEL1_26
+		MOD_SEL1_25_24
+		MOD_SEL1_23_22_21
+		MOD_SEL1_20
+		MOD_SEL1_19
+		MOD_SEL1_18_17
+		MOD_SEL1_16
+		MOD_SEL1_15_14
+		MOD_SEL1_13
+		MOD_SEL1_12
+		MOD_SEL1_11
+		MOD_SEL1_10
+		MOD_SEL1_9
+		0, 0, 0, 0, /* RESERVED 8, 7 */
+		MOD_SEL1_6
+		MOD_SEL1_5
+		MOD_SEL1_4
+		MOD_SEL1_3
+		MOD_SEL1_2
+		MOD_SEL1_1
+		MOD_SEL1_0 ))
+	},
+	{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
+			     GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1,
+				   1, 4, 4, 4, 3, 1),
+			     GROUP(
+		MOD_SEL2_31
+		MOD_SEL2_30
+		MOD_SEL2_29
+		MOD_SEL2_28_27
+		MOD_SEL2_26
+		MOD_SEL2_25_24_23
+		/* RESERVED 22 */
+		0, 0,
+		MOD_SEL2_21
+		MOD_SEL2_20
+		MOD_SEL2_19
+		MOD_SEL2_18
+		MOD_SEL2_17
+		/* RESERVED 16 */
+		0, 0,
+		/* RESERVED 15, 14, 13, 12 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 11, 10, 9, 8 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 7, 6, 5, 4 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* RESERVED 3, 2, 1 */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		MOD_SEL2_0 ))
+	},
+	{ },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+	{ PINMUX_DRIVE_REG("DRVCTRL0", 0xe6060300) {
+		{ PIN_QSPI0_SPCLK,    28, 2 },	/* QSPI0_SPCLK */
+		{ PIN_QSPI0_MOSI_IO0, 24, 2 },	/* QSPI0_MOSI_IO0 */
+		{ PIN_QSPI0_MISO_IO1, 20, 2 },	/* QSPI0_MISO_IO1 */
+		{ PIN_QSPI0_IO2,      16, 2 },	/* QSPI0_IO2 */
+		{ PIN_QSPI0_IO3,      12, 2 },	/* QSPI0_IO3 */
+		{ PIN_QSPI0_SSL,       8, 2 },	/* QSPI0_SSL */
+		{ PIN_QSPI1_SPCLK,     4, 2 },	/* QSPI1_SPCLK */
+		{ PIN_QSPI1_MOSI_IO0,  0, 2 },	/* QSPI1_MOSI_IO0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL1", 0xe6060304) {
+		{ PIN_QSPI1_MISO_IO1, 28, 2 },	/* QSPI1_MISO_IO1 */
+		{ PIN_QSPI1_IO2,      24, 2 },	/* QSPI1_IO2 */
+		{ PIN_QSPI1_IO3,      20, 2 },	/* QSPI1_IO3 */
+		{ PIN_QSPI1_SSL,      16, 2 },	/* QSPI1_SSL */
+		{ PIN_RPC_INT_N,      12, 2 },	/* RPC_INT# */
+		{ PIN_RPC_WP_N,        8, 2 },	/* RPC_WP# */
+		{ PIN_RPC_RESET_N,     4, 2 },	/* RPC_RESET# */
+		{ PIN_AVB_RX_CTL,      0, 3 },	/* AVB_RX_CTL */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL2", 0xe6060308) {
+		{ PIN_AVB_RXC,        28, 3 },	/* AVB_RXC */
+		{ PIN_AVB_RD0,        24, 3 },	/* AVB_RD0 */
+		{ PIN_AVB_RD1,        20, 3 },	/* AVB_RD1 */
+		{ PIN_AVB_RD2,        16, 3 },	/* AVB_RD2 */
+		{ PIN_AVB_RD3,        12, 3 },	/* AVB_RD3 */
+		{ PIN_AVB_TX_CTL,      8, 3 },	/* AVB_TX_CTL */
+		{ PIN_AVB_TXC,         4, 3 },	/* AVB_TXC */
+		{ PIN_AVB_TD0,         0, 3 },	/* AVB_TD0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) {
+		{ PIN_AVB_TD1,        28, 3 },	/* AVB_TD1 */
+		{ PIN_AVB_TD2,        24, 3 },	/* AVB_TD2 */
+		{ PIN_AVB_TD3,        20, 3 },	/* AVB_TD3 */
+		{ PIN_AVB_TXCREFCLK,  16, 3 },	/* AVB_TXCREFCLK */
+		{ PIN_AVB_MDIO,       12, 3 },	/* AVB_MDIO */
+		{ RCAR_GP_PIN(2,  9),  8, 3 },	/* AVB_MDC */
+		{ RCAR_GP_PIN(2, 10),  4, 3 },	/* AVB_MAGIC */
+		{ RCAR_GP_PIN(2, 11),  0, 3 },	/* AVB_PHY_INT */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) {
+		{ RCAR_GP_PIN(2, 12), 28, 3 },	/* AVB_LINK */
+		{ RCAR_GP_PIN(2, 13), 24, 3 },	/* AVB_AVTP_MATCH */
+		{ RCAR_GP_PIN(2, 14), 20, 3 },	/* AVB_AVTP_CAPTURE */
+		{ RCAR_GP_PIN(2,  0), 16, 3 },	/* IRQ0 */
+		{ RCAR_GP_PIN(2,  1), 12, 3 },	/* IRQ1 */
+		{ RCAR_GP_PIN(2,  2),  8, 3 },	/* IRQ2 */
+		{ RCAR_GP_PIN(2,  3),  4, 3 },	/* IRQ3 */
+		{ RCAR_GP_PIN(2,  4),  0, 3 },	/* IRQ4 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL5", 0xe6060314) {
+		{ RCAR_GP_PIN(2,  5), 28, 3 },	/* IRQ5 */
+		{ RCAR_GP_PIN(2,  6), 24, 3 },	/* PWM0 */
+		{ RCAR_GP_PIN(2,  7), 20, 3 },	/* PWM1 */
+		{ RCAR_GP_PIN(2,  8), 16, 3 },	/* PWM2 */
+		{ RCAR_GP_PIN(1,  0), 12, 3 },	/* A0 */
+		{ RCAR_GP_PIN(1,  1),  8, 3 },	/* A1 */
+		{ RCAR_GP_PIN(1,  2),  4, 3 },	/* A2 */
+		{ RCAR_GP_PIN(1,  3),  0, 3 },	/* A3 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL6", 0xe6060318) {
+		{ RCAR_GP_PIN(1,  4), 28, 3 },	/* A4 */
+		{ RCAR_GP_PIN(1,  5), 24, 3 },	/* A5 */
+		{ RCAR_GP_PIN(1,  6), 20, 3 },	/* A6 */
+		{ RCAR_GP_PIN(1,  7), 16, 3 },	/* A7 */
+		{ RCAR_GP_PIN(1,  8), 12, 3 },	/* A8 */
+		{ RCAR_GP_PIN(1,  9),  8, 3 },	/* A9 */
+		{ RCAR_GP_PIN(1, 10),  4, 3 },	/* A10 */
+		{ RCAR_GP_PIN(1, 11),  0, 3 },	/* A11 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL7", 0xe606031c) {
+		{ RCAR_GP_PIN(1, 12), 28, 3 },	/* A12 */
+		{ RCAR_GP_PIN(1, 13), 24, 3 },	/* A13 */
+		{ RCAR_GP_PIN(1, 14), 20, 3 },	/* A14 */
+		{ RCAR_GP_PIN(1, 15), 16, 3 },	/* A15 */
+		{ RCAR_GP_PIN(1, 16), 12, 3 },	/* A16 */
+		{ RCAR_GP_PIN(1, 17),  8, 3 },	/* A17 */
+		{ RCAR_GP_PIN(1, 18),  4, 3 },	/* A18 */
+		{ RCAR_GP_PIN(1, 19),  0, 3 },	/* A19 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
+		{ RCAR_GP_PIN(1, 28), 28, 3 },	/* CLKOUT */
+		{ RCAR_GP_PIN(1, 20), 24, 3 },	/* CS0 */
+		{ RCAR_GP_PIN(1, 21), 20, 3 },	/* CS1_A26 */
+		{ RCAR_GP_PIN(1, 22), 16, 3 },	/* BS */
+		{ RCAR_GP_PIN(1, 23), 12, 3 },	/* RD */
+		{ RCAR_GP_PIN(1, 24),  8, 3 },	/* RD_WR */
+		{ RCAR_GP_PIN(1, 25),  4, 3 },	/* WE0 */
+		{ RCAR_GP_PIN(1, 26),  0, 3 },	/* WE1 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
+		{ RCAR_GP_PIN(1, 27), 28, 3 },	/* EX_WAIT0 */
+		{ PIN_PRESETOUT_N,    24, 3 },	/* PRESETOUT# */
+		{ RCAR_GP_PIN(0,  0), 20, 3 },	/* D0 */
+		{ RCAR_GP_PIN(0,  1), 16, 3 },	/* D1 */
+		{ RCAR_GP_PIN(0,  2), 12, 3 },	/* D2 */
+		{ RCAR_GP_PIN(0,  3),  8, 3 },	/* D3 */
+		{ RCAR_GP_PIN(0,  4),  4, 3 },	/* D4 */
+		{ RCAR_GP_PIN(0,  5),  0, 3 },	/* D5 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) {
+		{ RCAR_GP_PIN(0,  6), 28, 3 },	/* D6 */
+		{ RCAR_GP_PIN(0,  7), 24, 3 },	/* D7 */
+		{ RCAR_GP_PIN(0,  8), 20, 3 },	/* D8 */
+		{ RCAR_GP_PIN(0,  9), 16, 3 },	/* D9 */
+		{ RCAR_GP_PIN(0, 10), 12, 3 },	/* D10 */
+		{ RCAR_GP_PIN(0, 11),  8, 3 },	/* D11 */
+		{ RCAR_GP_PIN(0, 12),  4, 3 },	/* D12 */
+		{ RCAR_GP_PIN(0, 13),  0, 3 },	/* D13 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) {
+		{ RCAR_GP_PIN(0, 14), 28, 3 },	/* D14 */
+		{ RCAR_GP_PIN(0, 15), 24, 3 },	/* D15 */
+		{ RCAR_GP_PIN(7,  0), 20, 3 },	/* AVS1 */
+		{ RCAR_GP_PIN(7,  1), 16, 3 },	/* AVS2 */
+		{ RCAR_GP_PIN(7,  2), 12, 3 },	/* GP7_02 */
+		{ RCAR_GP_PIN(7,  3),  8, 3 },	/* GP7_03 */
+		{ PIN_DU_DOTCLKIN0,    4, 2 },	/* DU_DOTCLKIN0 */
+		{ PIN_DU_DOTCLKIN1,    0, 2 },	/* DU_DOTCLKIN1 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
+		{ PIN_DU_DOTCLKIN2,   28, 2 },	/* DU_DOTCLKIN2 */
+		{ PIN_DU_DOTCLKIN3,   24, 2 },	/* DU_DOTCLKIN3 */
+		{ PIN_FSCLKST_N,      20, 2 },	/* FSCLKST# */
+		{ PIN_TMS,             4, 2 },	/* TMS */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) {
+		{ PIN_TDO,            28, 2 },	/* TDO */
+		{ PIN_ASEBRK,         24, 2 },	/* ASEBRK */
+		{ RCAR_GP_PIN(3,  0), 20, 3 },	/* SD0_CLK */
+		{ RCAR_GP_PIN(3,  1), 16, 3 },	/* SD0_CMD */
+		{ RCAR_GP_PIN(3,  2), 12, 3 },	/* SD0_DAT0 */
+		{ RCAR_GP_PIN(3,  3),  8, 3 },	/* SD0_DAT1 */
+		{ RCAR_GP_PIN(3,  4),  4, 3 },	/* SD0_DAT2 */
+		{ RCAR_GP_PIN(3,  5),  0, 3 },	/* SD0_DAT3 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) {
+		{ RCAR_GP_PIN(3,  6), 28, 3 },	/* SD1_CLK */
+		{ RCAR_GP_PIN(3,  7), 24, 3 },	/* SD1_CMD */
+		{ RCAR_GP_PIN(3,  8), 20, 3 },	/* SD1_DAT0 */
+		{ RCAR_GP_PIN(3,  9), 16, 3 },	/* SD1_DAT1 */
+		{ RCAR_GP_PIN(3, 10), 12, 3 },	/* SD1_DAT2 */
+		{ RCAR_GP_PIN(3, 11),  8, 3 },	/* SD1_DAT3 */
+		{ RCAR_GP_PIN(4,  0),  4, 3 },	/* SD2_CLK */
+		{ RCAR_GP_PIN(4,  1),  0, 3 },	/* SD2_CMD */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL15", 0xe606033c) {
+		{ RCAR_GP_PIN(4,  2), 28, 3 },	/* SD2_DAT0 */
+		{ RCAR_GP_PIN(4,  3), 24, 3 },	/* SD2_DAT1 */
+		{ RCAR_GP_PIN(4,  4), 20, 3 },	/* SD2_DAT2 */
+		{ RCAR_GP_PIN(4,  5), 16, 3 },	/* SD2_DAT3 */
+		{ RCAR_GP_PIN(4,  6), 12, 3 },	/* SD2_DS */
+		{ RCAR_GP_PIN(4,  7),  8, 3 },	/* SD3_CLK */
+		{ RCAR_GP_PIN(4,  8),  4, 3 },	/* SD3_CMD */
+		{ RCAR_GP_PIN(4,  9),  0, 3 },	/* SD3_DAT0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL16", 0xe6060340) {
+		{ RCAR_GP_PIN(4, 10), 28, 3 },	/* SD3_DAT1 */
+		{ RCAR_GP_PIN(4, 11), 24, 3 },	/* SD3_DAT2 */
+		{ RCAR_GP_PIN(4, 12), 20, 3 },	/* SD3_DAT3 */
+		{ RCAR_GP_PIN(4, 13), 16, 3 },	/* SD3_DAT4 */
+		{ RCAR_GP_PIN(4, 14), 12, 3 },	/* SD3_DAT5 */
+		{ RCAR_GP_PIN(4, 15),  8, 3 },	/* SD3_DAT6 */
+		{ RCAR_GP_PIN(4, 16),  4, 3 },	/* SD3_DAT7 */
+		{ RCAR_GP_PIN(4, 17),  0, 3 },	/* SD3_DS */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL17", 0xe6060344) {
+		{ RCAR_GP_PIN(3, 12), 28, 3 },	/* SD0_CD */
+		{ RCAR_GP_PIN(3, 13), 24, 3 },	/* SD0_WP */
+		{ RCAR_GP_PIN(3, 14), 20, 3 },	/* SD1_CD */
+		{ RCAR_GP_PIN(3, 15), 16, 3 },	/* SD1_WP */
+		{ RCAR_GP_PIN(5,  0), 12, 3 },	/* SCK0 */
+		{ RCAR_GP_PIN(5,  1),  8, 3 },	/* RX0 */
+		{ RCAR_GP_PIN(5,  2),  4, 3 },	/* TX0 */
+		{ RCAR_GP_PIN(5,  3),  0, 3 },	/* CTS0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL18", 0xe6060348) {
+		{ RCAR_GP_PIN(5,  4), 28, 3 },	/* RTS0 */
+		{ RCAR_GP_PIN(5,  5), 24, 3 },	/* RX1 */
+		{ RCAR_GP_PIN(5,  6), 20, 3 },	/* TX1 */
+		{ RCAR_GP_PIN(5,  7), 16, 3 },	/* CTS1 */
+		{ RCAR_GP_PIN(5,  8), 12, 3 },	/* RTS1 */
+		{ RCAR_GP_PIN(5,  9),  8, 3 },	/* SCK2 */
+		{ RCAR_GP_PIN(5, 10),  4, 3 },	/* TX2 */
+		{ RCAR_GP_PIN(5, 11),  0, 3 },	/* RX2 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL19", 0xe606034c) {
+		{ RCAR_GP_PIN(5, 12), 28, 3 },	/* HSCK0 */
+		{ RCAR_GP_PIN(5, 13), 24, 3 },	/* HRX0 */
+		{ RCAR_GP_PIN(5, 14), 20, 3 },	/* HTX0 */
+		{ RCAR_GP_PIN(5, 15), 16, 3 },	/* HCTS0 */
+		{ RCAR_GP_PIN(5, 16), 12, 3 },	/* HRTS0 */
+		{ RCAR_GP_PIN(5, 17),  8, 3 },	/* MSIOF0_SCK */
+		{ RCAR_GP_PIN(5, 18),  4, 3 },	/* MSIOF0_SYNC */
+		{ RCAR_GP_PIN(5, 19),  0, 3 },	/* MSIOF0_SS1 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL20", 0xe6060350) {
+		{ RCAR_GP_PIN(5, 20), 28, 3 },	/* MSIOF0_TXD */
+		{ RCAR_GP_PIN(5, 21), 24, 3 },	/* MSIOF0_SS2 */
+		{ RCAR_GP_PIN(5, 22), 20, 3 },	/* MSIOF0_RXD */
+		{ RCAR_GP_PIN(5, 23), 16, 3 },	/* MLB_CLK */
+		{ RCAR_GP_PIN(5, 24), 12, 3 },	/* MLB_SIG */
+		{ RCAR_GP_PIN(5, 25),  8, 3 },	/* MLB_DAT */
+		{ PIN_MLB_REF,         4, 3 },	/* MLB_REF */
+		{ RCAR_GP_PIN(6,  0),  0, 3 },	/* SSI_SCK01239 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) {
+		{ RCAR_GP_PIN(6,  1), 28, 3 },	/* SSI_WS01239 */
+		{ RCAR_GP_PIN(6,  2), 24, 3 },	/* SSI_SDATA0 */
+		{ RCAR_GP_PIN(6,  3), 20, 3 },	/* SSI_SDATA1 */
+		{ RCAR_GP_PIN(6,  4), 16, 3 },	/* SSI_SDATA2 */
+		{ RCAR_GP_PIN(6,  5), 12, 3 },	/* SSI_SCK349 */
+		{ RCAR_GP_PIN(6,  6),  8, 3 },	/* SSI_WS349 */
+		{ RCAR_GP_PIN(6,  7),  4, 3 },	/* SSI_SDATA3 */
+		{ RCAR_GP_PIN(6,  8),  0, 3 },	/* SSI_SCK4 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL22", 0xe6060358) {
+		{ RCAR_GP_PIN(6,  9), 28, 3 },	/* SSI_WS4 */
+		{ RCAR_GP_PIN(6, 10), 24, 3 },	/* SSI_SDATA4 */
+		{ RCAR_GP_PIN(6, 11), 20, 3 },	/* SSI_SCK5 */
+		{ RCAR_GP_PIN(6, 12), 16, 3 },	/* SSI_WS5 */
+		{ RCAR_GP_PIN(6, 13), 12, 3 },	/* SSI_SDATA5 */
+		{ RCAR_GP_PIN(6, 14),  8, 3 },	/* SSI_SCK6 */
+		{ RCAR_GP_PIN(6, 15),  4, 3 },	/* SSI_WS6 */
+		{ RCAR_GP_PIN(6, 16),  0, 3 },	/* SSI_SDATA6 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL23", 0xe606035c) {
+		{ RCAR_GP_PIN(6, 17), 28, 3 },	/* SSI_SCK78 */
+		{ RCAR_GP_PIN(6, 18), 24, 3 },	/* SSI_WS78 */
+		{ RCAR_GP_PIN(6, 19), 20, 3 },	/* SSI_SDATA7 */
+		{ RCAR_GP_PIN(6, 20), 16, 3 },	/* SSI_SDATA8 */
+		{ RCAR_GP_PIN(6, 21), 12, 3 },	/* SSI_SDATA9 */
+		{ RCAR_GP_PIN(6, 22),  8, 3 },	/* AUDIO_CLKA */
+		{ RCAR_GP_PIN(6, 23),  4, 3 },	/* AUDIO_CLKB */
+		{ RCAR_GP_PIN(6, 24),  0, 3 },	/* USB0_PWEN */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL24", 0xe6060360) {
+		{ RCAR_GP_PIN(6, 25), 28, 3 },	/* USB0_OVC */
+		{ RCAR_GP_PIN(6, 26), 24, 3 },	/* USB1_PWEN */
+		{ RCAR_GP_PIN(6, 27), 20, 3 },	/* USB1_OVC */
+		{ RCAR_GP_PIN(6, 28), 16, 3 },	/* USB30_PWEN */
+		{ RCAR_GP_PIN(6, 29), 12, 3 },	/* USB30_OVC */
+		{ RCAR_GP_PIN(6, 30),  8, 3 },	/* USB2_CH3_PWEN */
+		{ RCAR_GP_PIN(6, 31),  4, 3 },	/* USB2_CH3_OVC */
+	} },
+	{ },
+};
+
+enum ioctrl_regs {
+	POCCTRL,
+	TDSELCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+	[POCCTRL] = { 0xe6060380, },
+	[TDSELCTRL] = { 0xe60603c0, },
+	{ /* sentinel */ },
+};
+
+static int r8a77951_pin_to_pocctrl(struct sh_pfc *pfc,
+				   unsigned int pin, u32 *pocctrl)
+{
+	int bit = -EINVAL;
+
+	*pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
+
+	if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
+		bit = pin & 0x1f;
+
+	if (pin >= RCAR_GP_PIN(4, 0) && pin <= RCAR_GP_PIN(4, 17))
+		bit = (pin & 0x1f) + 12;
+
+	return bit;
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+	{ PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+		[ 0] = PIN_QSPI0_SPCLK,		/* QSPI0_SPCLK */
+		[ 1] = PIN_QSPI0_MOSI_IO0,	/* QSPI0_MOSI_IO0 */
+		[ 2] = PIN_QSPI0_MISO_IO1,	/* QSPI0_MISO_IO1 */
+		[ 3] = PIN_QSPI0_IO2,		/* QSPI0_IO2 */
+		[ 4] = PIN_QSPI0_IO3,		/* QSPI0_IO3 */
+		[ 5] = PIN_QSPI0_SSL,		/* QSPI0_SSL */
+		[ 6] = PIN_QSPI1_SPCLK,		/* QSPI1_SPCLK */
+		[ 7] = PIN_QSPI1_MOSI_IO0,	/* QSPI1_MOSI_IO0 */
+		[ 8] = PIN_QSPI1_MISO_IO1,	/* QSPI1_MISO_IO1 */
+		[ 9] = PIN_QSPI1_IO2,		/* QSPI1_IO2 */
+		[10] = PIN_QSPI1_IO3,		/* QSPI1_IO3 */
+		[11] = PIN_QSPI1_SSL,		/* QSPI1_SSL */
+		[12] = PIN_RPC_INT_N,		/* RPC_INT# */
+		[13] = PIN_RPC_WP_N,		/* RPC_WP# */
+		[14] = PIN_RPC_RESET_N,		/* RPC_RESET# */
+		[15] = PIN_AVB_RX_CTL,		/* AVB_RX_CTL */
+		[16] = PIN_AVB_RXC,		/* AVB_RXC */
+		[17] = PIN_AVB_RD0,		/* AVB_RD0 */
+		[18] = PIN_AVB_RD1,		/* AVB_RD1 */
+		[19] = PIN_AVB_RD2,		/* AVB_RD2 */
+		[20] = PIN_AVB_RD3,		/* AVB_RD3 */
+		[21] = PIN_AVB_TX_CTL,		/* AVB_TX_CTL */
+		[22] = PIN_AVB_TXC,		/* AVB_TXC */
+		[23] = PIN_AVB_TD0,		/* AVB_TD0 */
+		[24] = PIN_AVB_TD1,		/* AVB_TD1 */
+		[25] = PIN_AVB_TD2,		/* AVB_TD2 */
+		[26] = PIN_AVB_TD3,		/* AVB_TD3 */
+		[27] = PIN_AVB_TXCREFCLK,	/* AVB_TXCREFCLK */
+		[28] = PIN_AVB_MDIO,		/* AVB_MDIO */
+		[29] = RCAR_GP_PIN(2,  9),	/* AVB_MDC */
+		[30] = RCAR_GP_PIN(2, 10),	/* AVB_MAGIC */
+		[31] = RCAR_GP_PIN(2, 11),	/* AVB_PHY_INT */
+	} },
+	{ PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+		[ 0] = RCAR_GP_PIN(2, 12),	/* AVB_LINK */
+		[ 1] = RCAR_GP_PIN(2, 13),	/* AVB_AVTP_MATCH_A */
+		[ 2] = RCAR_GP_PIN(2, 14),	/* AVB_AVTP_CAPTURE_A */
+		[ 3] = RCAR_GP_PIN(2,  0),	/* IRQ0 */
+		[ 4] = RCAR_GP_PIN(2,  1),	/* IRQ1 */
+		[ 5] = RCAR_GP_PIN(2,  2),	/* IRQ2 */
+		[ 6] = RCAR_GP_PIN(2,  3),	/* IRQ3 */
+		[ 7] = RCAR_GP_PIN(2,  4),	/* IRQ4 */
+		[ 8] = RCAR_GP_PIN(2,  5),	/* IRQ5 */
+		[ 9] = RCAR_GP_PIN(2,  6),	/* PWM0 */
+		[10] = RCAR_GP_PIN(2,  7),	/* PWM1_A */
+		[11] = RCAR_GP_PIN(2,  8),	/* PWM2_A */
+		[12] = RCAR_GP_PIN(1,  0),	/* A0 */
+		[13] = RCAR_GP_PIN(1,  1),	/* A1 */
+		[14] = RCAR_GP_PIN(1,  2),	/* A2 */
+		[15] = RCAR_GP_PIN(1,  3),	/* A3 */
+		[16] = RCAR_GP_PIN(1,  4),	/* A4 */
+		[17] = RCAR_GP_PIN(1,  5),	/* A5 */
+		[18] = RCAR_GP_PIN(1,  6),	/* A6 */
+		[19] = RCAR_GP_PIN(1,  7),	/* A7 */
+		[20] = RCAR_GP_PIN(1,  8),	/* A8 */
+		[21] = RCAR_GP_PIN(1,  9),	/* A9 */
+		[22] = RCAR_GP_PIN(1, 10),	/* A10 */
+		[23] = RCAR_GP_PIN(1, 11),	/* A11 */
+		[24] = RCAR_GP_PIN(1, 12),	/* A12 */
+		[25] = RCAR_GP_PIN(1, 13),	/* A13 */
+		[26] = RCAR_GP_PIN(1, 14),	/* A14 */
+		[27] = RCAR_GP_PIN(1, 15),	/* A15 */
+		[28] = RCAR_GP_PIN(1, 16),	/* A16 */
+		[29] = RCAR_GP_PIN(1, 17),	/* A17 */
+		[30] = RCAR_GP_PIN(1, 18),	/* A18 */
+		[31] = RCAR_GP_PIN(1, 19),	/* A19 */
+	} },
+	{ PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+		[ 0] = RCAR_GP_PIN(1, 28),	/* CLKOUT */
+		[ 1] = RCAR_GP_PIN(1, 20),	/* CS0_N */
+		[ 2] = RCAR_GP_PIN(1, 21),	/* CS1_N */
+		[ 3] = RCAR_GP_PIN(1, 22),	/* BS_N */
+		[ 4] = RCAR_GP_PIN(1, 23),	/* RD_N */
+		[ 5] = RCAR_GP_PIN(1, 24),	/* RD_WR_N */
+		[ 6] = RCAR_GP_PIN(1, 25),	/* WE0_N */
+		[ 7] = RCAR_GP_PIN(1, 26),	/* WE1_N */
+		[ 8] = RCAR_GP_PIN(1, 27),	/* EX_WAIT0_A */
+		[ 9] = PIN_PRESETOUT_N,		/* PRESETOUT# */
+		[10] = RCAR_GP_PIN(0,  0),	/* D0 */
+		[11] = RCAR_GP_PIN(0,  1),	/* D1 */
+		[12] = RCAR_GP_PIN(0,  2),	/* D2 */
+		[13] = RCAR_GP_PIN(0,  3),	/* D3 */
+		[14] = RCAR_GP_PIN(0,  4),	/* D4 */
+		[15] = RCAR_GP_PIN(0,  5),	/* D5 */
+		[16] = RCAR_GP_PIN(0,  6),	/* D6 */
+		[17] = RCAR_GP_PIN(0,  7),	/* D7 */
+		[18] = RCAR_GP_PIN(0,  8),	/* D8 */
+		[19] = RCAR_GP_PIN(0,  9),	/* D9 */
+		[20] = RCAR_GP_PIN(0, 10),	/* D10 */
+		[21] = RCAR_GP_PIN(0, 11),	/* D11 */
+		[22] = RCAR_GP_PIN(0, 12),	/* D12 */
+		[23] = RCAR_GP_PIN(0, 13),	/* D13 */
+		[24] = RCAR_GP_PIN(0, 14),	/* D14 */
+		[25] = RCAR_GP_PIN(0, 15),	/* D15 */
+		[26] = RCAR_GP_PIN(7,  0),	/* AVS1 */
+		[27] = RCAR_GP_PIN(7,  1),	/* AVS2 */
+		[28] = RCAR_GP_PIN(7,  2),	/* GP7_02 */
+		[29] = RCAR_GP_PIN(7,  3),	/* GP7_03 */
+		[30] = PIN_DU_DOTCLKIN0,	/* DU_DOTCLKIN0 */
+		[31] = PIN_DU_DOTCLKIN1,	/* DU_DOTCLKIN1 */
+	} },
+	{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+		[ 0] = PIN_DU_DOTCLKIN2,	/* DU_DOTCLKIN2 */
+		[ 1] = PIN_DU_DOTCLKIN3,	/* DU_DOTCLKIN3 */
+		[ 2] = PIN_FSCLKST_N,		/* FSCLKST# */
+		[ 3] = PIN_EXTALR,		/* EXTALR*/
+		[ 4] = PIN_TRST_N,		/* TRST# */
+		[ 5] = PIN_TCK,			/* TCK */
+		[ 6] = PIN_TMS,			/* TMS */
+		[ 7] = PIN_TDI,			/* TDI */
+		[ 8] = SH_PFC_PIN_NONE,
+		[ 9] = PIN_ASEBRK,		/* ASEBRK */
+		[10] = RCAR_GP_PIN(3,  0),	/* SD0_CLK */
+		[11] = RCAR_GP_PIN(3,  1),	/* SD0_CMD */
+		[12] = RCAR_GP_PIN(3,  2),	/* SD0_DAT0 */
+		[13] = RCAR_GP_PIN(3,  3),	/* SD0_DAT1 */
+		[14] = RCAR_GP_PIN(3,  4),	/* SD0_DAT2 */
+		[15] = RCAR_GP_PIN(3,  5),	/* SD0_DAT3 */
+		[16] = RCAR_GP_PIN(3,  6),	/* SD1_CLK */
+		[17] = RCAR_GP_PIN(3,  7),	/* SD1_CMD */
+		[18] = RCAR_GP_PIN(3,  8),	/* SD1_DAT0 */
+		[19] = RCAR_GP_PIN(3,  9),	/* SD1_DAT1 */
+		[20] = RCAR_GP_PIN(3, 10),	/* SD1_DAT2 */
+		[21] = RCAR_GP_PIN(3, 11),	/* SD1_DAT3 */
+		[22] = RCAR_GP_PIN(4,  0),	/* SD2_CLK */
+		[23] = RCAR_GP_PIN(4,  1),	/* SD2_CMD */
+		[24] = RCAR_GP_PIN(4,  2),	/* SD2_DAT0 */
+		[25] = RCAR_GP_PIN(4,  3),	/* SD2_DAT1 */
+		[26] = RCAR_GP_PIN(4,  4),	/* SD2_DAT2 */
+		[27] = RCAR_GP_PIN(4,  5),	/* SD2_DAT3 */
+		[28] = RCAR_GP_PIN(4,  6),	/* SD2_DS */
+		[29] = RCAR_GP_PIN(4,  7),	/* SD3_CLK */
+		[30] = RCAR_GP_PIN(4,  8),	/* SD3_CMD */
+		[31] = RCAR_GP_PIN(4,  9),	/* SD3_DAT0 */
+	} },
+	{ PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+		[ 0] = RCAR_GP_PIN(4, 10),	/* SD3_DAT1 */
+		[ 1] = RCAR_GP_PIN(4, 11),	/* SD3_DAT2 */
+		[ 2] = RCAR_GP_PIN(4, 12),	/* SD3_DAT3 */
+		[ 3] = RCAR_GP_PIN(4, 13),	/* SD3_DAT4 */
+		[ 4] = RCAR_GP_PIN(4, 14),	/* SD3_DAT5 */
+		[ 5] = RCAR_GP_PIN(4, 15),	/* SD3_DAT6 */
+		[ 6] = RCAR_GP_PIN(4, 16),	/* SD3_DAT7 */
+		[ 7] = RCAR_GP_PIN(4, 17),	/* SD3_DS */
+		[ 8] = RCAR_GP_PIN(3, 12),	/* SD0_CD */
+		[ 9] = RCAR_GP_PIN(3, 13),	/* SD0_WP */
+		[10] = RCAR_GP_PIN(3, 14),	/* SD1_CD */
+		[11] = RCAR_GP_PIN(3, 15),	/* SD1_WP */
+		[12] = RCAR_GP_PIN(5,  0),	/* SCK0 */
+		[13] = RCAR_GP_PIN(5,  1),	/* RX0 */
+		[14] = RCAR_GP_PIN(5,  2),	/* TX0 */
+		[15] = RCAR_GP_PIN(5,  3),	/* CTS0_N */
+		[16] = RCAR_GP_PIN(5,  4),	/* RTS0_N */
+		[17] = RCAR_GP_PIN(5,  5),	/* RX1_A */
+		[18] = RCAR_GP_PIN(5,  6),	/* TX1_A */
+		[19] = RCAR_GP_PIN(5,  7),	/* CTS1_N */
+		[20] = RCAR_GP_PIN(5,  8),	/* RTS1_N */
+		[21] = RCAR_GP_PIN(5,  9),	/* SCK2 */
+		[22] = RCAR_GP_PIN(5, 10),	/* TX2_A */
+		[23] = RCAR_GP_PIN(5, 11),	/* RX2_A */
+		[24] = RCAR_GP_PIN(5, 12),	/* HSCK0 */
+		[25] = RCAR_GP_PIN(5, 13),	/* HRX0 */
+		[26] = RCAR_GP_PIN(5, 14),	/* HTX0 */
+		[27] = RCAR_GP_PIN(5, 15),	/* HCTS0_N */
+		[28] = RCAR_GP_PIN(5, 16),	/* HRTS0_N */
+		[29] = RCAR_GP_PIN(5, 17),	/* MSIOF0_SCK */
+		[30] = RCAR_GP_PIN(5, 18),	/* MSIOF0_SYNC */
+		[31] = RCAR_GP_PIN(5, 19),	/* MSIOF0_SS1 */
+	} },
+	{ PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+		[ 0] = RCAR_GP_PIN(5, 20),	/* MSIOF0_TXD */
+		[ 1] = RCAR_GP_PIN(5, 21),	/* MSIOF0_SS2 */
+		[ 2] = RCAR_GP_PIN(5, 22),	/* MSIOF0_RXD */
+		[ 3] = RCAR_GP_PIN(5, 23),	/* MLB_CLK */
+		[ 4] = RCAR_GP_PIN(5, 24),	/* MLB_SIG */
+		[ 5] = RCAR_GP_PIN(5, 25),	/* MLB_DAT */
+		[ 6] = PIN_MLB_REF,		/* MLB_REF */
+		[ 7] = RCAR_GP_PIN(6,  0),	/* SSI_SCK01239 */
+		[ 8] = RCAR_GP_PIN(6,  1),	/* SSI_WS01239 */
+		[ 9] = RCAR_GP_PIN(6,  2),	/* SSI_SDATA0 */
+		[10] = RCAR_GP_PIN(6,  3),	/* SSI_SDATA1_A */
+		[11] = RCAR_GP_PIN(6,  4),	/* SSI_SDATA2_A */
+		[12] = RCAR_GP_PIN(6,  5),	/* SSI_SCK349 */
+		[13] = RCAR_GP_PIN(6,  6),	/* SSI_WS349 */
+		[14] = RCAR_GP_PIN(6,  7),	/* SSI_SDATA3 */
+		[15] = RCAR_GP_PIN(6,  8),	/* SSI_SCK4 */
+		[16] = RCAR_GP_PIN(6,  9),	/* SSI_WS4 */
+		[17] = RCAR_GP_PIN(6, 10),	/* SSI_SDATA4 */
+		[18] = RCAR_GP_PIN(6, 11),	/* SSI_SCK5 */
+		[19] = RCAR_GP_PIN(6, 12),	/* SSI_WS5 */
+		[20] = RCAR_GP_PIN(6, 13),	/* SSI_SDATA5 */
+		[21] = RCAR_GP_PIN(6, 14),	/* SSI_SCK6 */
+		[22] = RCAR_GP_PIN(6, 15),	/* SSI_WS6 */
+		[23] = RCAR_GP_PIN(6, 16),	/* SSI_SDATA6 */
+		[24] = RCAR_GP_PIN(6, 17),	/* SSI_SCK78 */
+		[25] = RCAR_GP_PIN(6, 18),	/* SSI_WS78 */
+		[26] = RCAR_GP_PIN(6, 19),	/* SSI_SDATA7 */
+		[27] = RCAR_GP_PIN(6, 20),	/* SSI_SDATA8 */
+		[28] = RCAR_GP_PIN(6, 21),	/* SSI_SDATA9_A */
+		[29] = RCAR_GP_PIN(6, 22),	/* AUDIO_CLKA_A */
+		[30] = RCAR_GP_PIN(6, 23),	/* AUDIO_CLKB_B */
+		[31] = RCAR_GP_PIN(6, 24),	/* USB0_PWEN */
+	} },
+	{ PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+		[ 0] = RCAR_GP_PIN(6, 25),	/* USB0_OVC */
+		[ 1] = RCAR_GP_PIN(6, 26),	/* USB1_PWEN */
+		[ 2] = RCAR_GP_PIN(6, 27),	/* USB1_OVC */
+		[ 3] = RCAR_GP_PIN(6, 28),	/* USB30_PWEN */
+		[ 4] = RCAR_GP_PIN(6, 29),	/* USB30_OVC */
+		[ 5] = RCAR_GP_PIN(6, 30),	/* USB2_CH3_PWEN */
+		[ 6] = RCAR_GP_PIN(6, 31),	/* USB2_CH3_OVC */
+		[ 7] = SH_PFC_PIN_NONE,
+		[ 8] = SH_PFC_PIN_NONE,
+		[ 9] = SH_PFC_PIN_NONE,
+		[10] = SH_PFC_PIN_NONE,
+		[11] = SH_PFC_PIN_NONE,
+		[12] = SH_PFC_PIN_NONE,
+		[13] = SH_PFC_PIN_NONE,
+		[14] = SH_PFC_PIN_NONE,
+		[15] = SH_PFC_PIN_NONE,
+		[16] = SH_PFC_PIN_NONE,
+		[17] = SH_PFC_PIN_NONE,
+		[18] = SH_PFC_PIN_NONE,
+		[19] = SH_PFC_PIN_NONE,
+		[20] = SH_PFC_PIN_NONE,
+		[21] = SH_PFC_PIN_NONE,
+		[22] = SH_PFC_PIN_NONE,
+		[23] = SH_PFC_PIN_NONE,
+		[24] = SH_PFC_PIN_NONE,
+		[25] = SH_PFC_PIN_NONE,
+		[26] = SH_PFC_PIN_NONE,
+		[27] = SH_PFC_PIN_NONE,
+		[28] = SH_PFC_PIN_NONE,
+		[29] = SH_PFC_PIN_NONE,
+		[30] = SH_PFC_PIN_NONE,
+		[31] = SH_PFC_PIN_NONE,
+	} },
+	{ /* sentinel */ },
+};
+
+static unsigned int r8a77951_pinmux_get_bias(struct sh_pfc *pfc,
+					     unsigned int pin)
+{
+	const struct pinmux_bias_reg *reg;
+	unsigned int bit;
+
+	reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+	if (!reg)
+		return PIN_CONFIG_BIAS_DISABLE;
+
+	if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
+		return PIN_CONFIG_BIAS_DISABLE;
+	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
+		return PIN_CONFIG_BIAS_PULL_UP;
+	else
+		return PIN_CONFIG_BIAS_PULL_DOWN;
+}
+
+static void r8a77951_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+				     unsigned int bias)
+{
+	const struct pinmux_bias_reg *reg;
+	u32 enable, updown;
+	unsigned int bit;
+
+	reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+	if (!reg)
+		return;
+
+	enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
+	if (bias != PIN_CONFIG_BIAS_DISABLE)
+		enable |= BIT(bit);
+
+	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+	if (bias == PIN_CONFIG_BIAS_PULL_UP)
+		updown |= BIT(bit);
+
+	sh_pfc_write(pfc, reg->pud, updown);
+	sh_pfc_write(pfc, reg->puen, enable);
+}
+
+static const struct sh_pfc_soc_operations r8a77951_pinmux_ops = {
+	.pin_to_pocctrl = r8a77951_pin_to_pocctrl,
+	.get_bias = r8a77951_pinmux_get_bias,
+	.set_bias = r8a77951_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a77951_pinmux_info = {
+	.name = "r8a77951_pfc",
+	.ops = &r8a77951_pinmux_ops,
+	.unlock_reg = 0xe6060000, /* PMMR */
+
+	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+	.pins = pinmux_pins,
+	.nr_pins = ARRAY_SIZE(pinmux_pins),
+	.groups = pinmux_groups,
+	.nr_groups = ARRAY_SIZE(pinmux_groups),
+	.functions = pinmux_functions,
+	.nr_functions = ARRAY_SIZE(pinmux_functions),
+
+	.cfg_regs = pinmux_config_regs,
+	.drive_regs = pinmux_drive_regs,
+	.bias_regs = pinmux_bias_regs,
+	.ioctrl_regs = pinmux_ioctrl_regs,
+
+	.pinmux_data = pinmux_data,
+	.pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 8bdf33c..6616f52 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -5998,7 +5998,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
 		{ PIN_DU_DOTCLKIN1,    0, 2 },	/* DU_DOTCLKIN1 */
 	} },
 	{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
-		{ PIN_DU_DOTCLKIN3,   28, 2 },	/* DU_DOTCLKIN3 */
+		{ PIN_DU_DOTCLKIN3,   24, 2 },	/* DU_DOTCLKIN3 */
 		{ PIN_FSCLKST,        20, 2 },	/* FSCLKST */
 		{ PIN_TMS,             4, 2 },	/* TMS */
 	} },
@@ -6254,8 +6254,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
 		[31] = PIN_DU_DOTCLKIN1,	/* DU_DOTCLKIN1 */
 	} },
 	{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
-		[ 0] = PIN_DU_DOTCLKIN3,	/* DU_DOTCLKIN3 */
-		[ 1] = SH_PFC_PIN_NONE,
+		[ 0] = SH_PFC_PIN_NONE,
+		[ 1] = PIN_DU_DOTCLKIN3,	/* DU_DOTCLKIN3 */
 		[ 2] = PIN_FSCLKST,		/* FSCLKST */
 		[ 3] = PIN_EXTALR,		/* EXTALR*/
 		[ 4] = PIN_TRST_N,		/* TRST# */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 4a95867..908837e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -497,17 +497,15 @@ enum {
 	SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
 	CRX0_MARK, CRX1_MARK,
 	CTX0_MARK, CTX1_MARK,
+	CRX0_CRX1_MARK, CTX0_CTX1_MARK,
 
 	PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
 	PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
 	PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
 	PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
 	IERXD_MARK, IETXD_MARK,
-	CRX0_CRX1_MARK,
 	WDTOVF_MARK,
 
-	CRX0X1_MARK,
-
 	/* DMAC */
 	TEND0_MARK, DACK0_MARK, DREQ0_MARK,
 	TEND1_MARK, DACK1_MARK, DREQ1_MARK,
@@ -995,12 +993,12 @@ static const u16 pinmux_data[] = {
 
 	PINMUX_DATA(PJ3_DATA, PJ3MD_00),
 	PINMUX_DATA(CRX1_MARK, PJ3MD_01),
-	PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
+	PINMUX_DATA(CRX0_CRX1_MARK, PJ3MD_10),
 	PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
 
 	PINMUX_DATA(PJ2_DATA, PJ2MD_000),
 	PINMUX_DATA(CTX1_MARK, PJ2MD_001),
-	PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
+	PINMUX_DATA(CTX0_CTX1_MARK, PJ2MD_010),
 	PINMUX_DATA(CS2_MARK, PJ2MD_011),
 	PINMUX_DATA(SCK0_MARK, PJ2MD_100),
 	PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
@@ -1245,6 +1243,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
 	GPIO_FN(CTX1),
 	GPIO_FN(CRX1),
 	GPIO_FN(CTX0),
+	GPIO_FN(CTX0_CTX1),
 	GPIO_FN(CRX0),
 	GPIO_FN(CRX0_CRX1),
 
@@ -2020,18 +2019,18 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
 
 	{ PINMUX_CFG_REG("PKIOR0", 0xfffe3932, 16, 1, GROUP(
 		0, 0, 0, 0, 0, 0, 0, 0,
-		PJ11_IN, PJ11_OUT,
-		PJ10_IN, PJ10_OUT,
-		PJ9_IN, PJ9_OUT,
-		PJ8_IN, PJ8_OUT,
-		PJ7_IN, PJ7_OUT,
-		PJ6_IN, PJ6_OUT,
-		PJ5_IN, PJ5_OUT,
-		PJ4_IN, PJ4_OUT,
-		PJ3_IN, PJ3_OUT,
-		PJ2_IN, PJ2_OUT,
-		PJ1_IN, PJ1_OUT,
-		PJ0_IN, PJ0_OUT ))
+		PK11_IN, PK11_OUT,
+		PK10_IN, PK10_OUT,
+		PK9_IN, PK9_OUT,
+		PK8_IN, PK8_OUT,
+		PK7_IN, PK7_OUT,
+		PK6_IN, PK6_OUT,
+		PK5_IN, PK5_OUT,
+		PK4_IN, PK4_OUT,
+		PK3_IN, PK3_OUT,
+		PK2_IN, PK2_OUT,
+		PK1_IN, PK1_OUT,
+		PK0_IN, PK0_OUT ))
 	},
 	{}
 };
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index 6cbb18e..d20974a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -737,13 +737,12 @@ enum {
 	CRX0_MARK, CTX0_MARK,
 	CRX1_MARK, CTX1_MARK,
 	CRX2_MARK, CTX2_MARK,
-	CRX0_CRX1_MARK,
-	CRX0_CRX1_CRX2_MARK,
-	CTX0CTX1CTX2_MARK,
+	CRX0_CRX1_MARK, CTX0_CTX1_MARK,
+	CRX0_CRX1_CRX2_MARK, CTX0_CTX1_CTX2_MARK,
 	CRX1_PJ22_MARK, CTX1_PJ23_MARK,
 	CRX2_PJ20_MARK, CTX2_PJ21_MARK,
-	CRX0CRX1_PJ22_MARK,
-	CRX0CRX1CRX2_PJ20_MARK,
+	CRX0_CRX1_PJ22_MARK, CTX0_CTX1_PJ23_MARK,
+	CRX0_CRX1_CRX2_PJ20_MARK, CTX0_CTX1_CTX2_PJ21_MARK,
 
 	/* VDC */
 	DV_CLK_MARK,
@@ -821,6 +820,7 @@ static const u16 pinmux_data[] = {
 	PINMUX_DATA(CS3_MARK, PC8MD_001),
 	PINMUX_DATA(TXD7_MARK, PC8MD_010),
 	PINMUX_DATA(CTX1_MARK, PC8MD_011),
+	PINMUX_DATA(CTX0_CTX1_MARK, PC8MD_100),
 
 	PINMUX_DATA(PC7_DATA, PC7MD_000),
 	PINMUX_DATA(CKE_MARK, PC7MD_001),
@@ -833,11 +833,12 @@ static const u16 pinmux_data[] = {
 	PINMUX_DATA(CAS_MARK, PC6MD_001),
 	PINMUX_DATA(SCK7_MARK, PC6MD_010),
 	PINMUX_DATA(CTX0_MARK, PC6MD_011),
+	PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC6MD_100),
 
 	PINMUX_DATA(PC5_DATA, PC5MD_000),
 	PINMUX_DATA(RAS_MARK, PC5MD_001),
 	PINMUX_DATA(CRX0_MARK, PC5MD_011),
-	PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
+	PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC5MD_100),
 	PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
 
 	PINMUX_DATA(PC4_DATA, PC4MD_00),
@@ -1289,30 +1290,32 @@ static const u16 pinmux_data[] = {
 	PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
 	PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
 	PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
-	PINMUX_DATA(CTX1_MARK, PJ23MD_101),
+	PINMUX_DATA(CTX1_PJ23_MARK, PJ23MD_101),
+	PINMUX_DATA(CTX0_CTX1_PJ23_MARK, PJ23MD_110),
 
 	PINMUX_DATA(PJ22_DATA, PJ22MD_000),
 	PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
 	PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
 	PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
 	PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
-	PINMUX_DATA(CRX1_MARK, PJ22MD_101),
-	PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
+	PINMUX_DATA(CRX1_PJ22_MARK, PJ22MD_101),
+	PINMUX_DATA(CRX0_CRX1_PJ22_MARK, PJ22MD_110),
 
 	PINMUX_DATA(PJ21_DATA, PJ21MD_000),
 	PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
 	PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
 	PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
 	PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
-	PINMUX_DATA(CTX2_MARK, PJ21MD_101),
+	PINMUX_DATA(CTX2_PJ21_MARK, PJ21MD_101),
+	PINMUX_DATA(CTX0_CTX1_CTX2_PJ21_MARK, PJ21MD_110),
 
 	PINMUX_DATA(PJ20_DATA, PJ20MD_000),
 	PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
 	PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
 	PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
 	PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
-	PINMUX_DATA(CRX2_MARK, PJ20MD_101),
-	PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
+	PINMUX_DATA(CRX2_PJ20_MARK, PJ20MD_101),
+	PINMUX_DATA(CRX0_CRX1_CRX2_PJ20_MARK, PJ20MD_110),
 
 	PINMUX_DATA(PJ19_DATA, PJ19MD_000),
 	PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
@@ -1663,12 +1666,24 @@ static const struct pinmux_func pinmux_func_gpios[] = {
 	GPIO_FN(WDTOVF),
 
 	/* CAN */
+	GPIO_FN(CTX2),
+	GPIO_FN(CRX2),
 	GPIO_FN(CTX1),
 	GPIO_FN(CRX1),
 	GPIO_FN(CTX0),
 	GPIO_FN(CRX0),
+	GPIO_FN(CTX0_CTX1),
 	GPIO_FN(CRX0_CRX1),
+	GPIO_FN(CTX0_CTX1_CTX2),
 	GPIO_FN(CRX0_CRX1_CRX2),
+	GPIO_FN(CTX2_PJ21),
+	GPIO_FN(CRX2_PJ20),
+	GPIO_FN(CTX1_PJ23),
+	GPIO_FN(CRX1_PJ22),
+	GPIO_FN(CTX0_CTX1_PJ23),
+	GPIO_FN(CRX0_CRX1_PJ22),
+	GPIO_FN(CTX0_CTX1_CTX2_PJ21),
+	GPIO_FN(CRX0_CRX1_CRX2_PJ20),
 
 	/* DMAC */
 	GPIO_FN(TEND0),
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 640d2a4..d57e633 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -318,8 +318,8 @@ extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7792_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795es1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77950_pinmux_info __weak;
+extern const struct sh_pfc_soc_info r8a77951_pinmux_info __weak;
 extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
 extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
 extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
index ec0d34c..b0882d1 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.h
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) Maxime Coquelin 2015
  * Copyright (C) STMicroelectronics 2017
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
index a78d7b9..31d62bb 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -19,7 +19,6 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/of_irq.h>
 #include <linux/pinctrl/pinctrl.h>
 
 #include "pinctrl-sunxi.h"
@@ -549,7 +548,17 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
 
 static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
 {
-	switch (of_irq_count(pdev->dev.of_node)) {
+	int ret;
+
+	ret = platform_irq_count(pdev);
+	if (ret < 0) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Couldn't determine irq count: %pe\n",
+				ERR_PTR(ret));
+		return ret;
+	}
+
+	switch (ret) {
 	case 2:
 		dev_warn(&pdev->dev,
 			 "Your device tree's pinctrl node is broken, which has no IRQ of PG bank routed.\n");
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 692d8b3..cefbbb8 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -648,7 +648,7 @@ static int tegra_pinctrl_suspend(struct device *dev)
 {
 	struct tegra_pmx *pmx = dev_get_drvdata(dev);
 	u32 *backup_regs = pmx->backup_regs;
-	u32 *regs;
+	u32 __iomem *regs;
 	size_t bank_size;
 	unsigned int i, k;
 
@@ -666,7 +666,7 @@ static int tegra_pinctrl_resume(struct device *dev)
 {
 	struct tegra_pmx *pmx = dev_get_drvdata(dev);
 	u32 *backup_regs = pmx->backup_regs;
-	u32 *regs;
+	u32 __iomem *regs;
 	size_t bank_size;
 	unsigned int i, k;
 
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 8723bcf..4f3651f 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -63,7 +63,7 @@ struct acpi_peripheral {
 struct chromeos_laptop {
 	/*
 	 * Note that we can't mark this pointer as const because
-	 * i2c_new_probed_device() changes passed in I2C board info, so.
+	 * i2c_new_scanned_device() changes passed in I2C board info, so.
 	 */
 	struct i2c_peripheral *i2c_peripherals;
 	unsigned int num_i2c_peripherals;
@@ -87,8 +87,8 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
 	 * address we scan secondary addresses. In any case the client
 	 * structure gets assigned primary address.
 	 */
-	client = i2c_new_probed_device(adapter, info, addr_list, NULL);
-	if (!client && alt_addr) {
+	client = i2c_new_scanned_device(adapter, info, addr_list, NULL);
+	if (IS_ERR(client) && alt_addr) {
 		struct i2c_board_info dummy_info = {
 			I2C_BOARD_INFO("dummy", info->addr),
 		};
@@ -97,9 +97,9 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
 		};
 		struct i2c_client *dummy;
 
-		dummy = i2c_new_probed_device(adapter, &dummy_info,
-					      alt_addr_list, NULL);
-		if (dummy) {
+		dummy = i2c_new_scanned_device(adapter, &dummy_info,
+					       alt_addr_list, NULL);
+		if (!IS_ERR(dummy)) {
 			pr_debug("%d-%02x is probed at %02x\n",
 				 adapter->nr, info->addr, dummy->addr);
 			i2c_unregister_device(dummy);
@@ -107,12 +107,14 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
 		}
 	}
 
-	if (!client)
+	if (IS_ERR(client)) {
+		client = NULL;
 		pr_debug("failed to register device %d-%02x\n",
 			 adapter->nr, info->addr);
-	else
+	} else {
 		pr_debug("added i2c device %d-%02x\n",
 			 adapter->nr, info->addr);
+	}
 
 	return client;
 }
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 6d6ce86..6fc8f2c 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -16,7 +16,8 @@
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
 #include <linux/suspend.h>
-#include <asm/unaligned.h>
+
+#include "cros_ec.h"
 
 #define CROS_EC_DEV_EC_INDEX 0
 #define CROS_EC_DEV_PD_INDEX 1
diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
new file mode 100644
index 0000000..e69fc1f
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ChromeOS Embedded Controller core interface.
+ *
+ * Copyright (C) 2020 Google LLC
+ */
+
+#ifndef __CROS_EC_H
+#define __CROS_EC_H
+
+int cros_ec_register(struct cros_ec_device *ec_dev);
+int cros_ec_unregister(struct cros_ec_device *ec_dev);
+
+int cros_ec_suspend(struct cros_ec_device *ec_dev);
+int cros_ec_resume(struct cros_ec_device *ec_dev);
+
+bool cros_ec_handle_event(struct cros_ec_device *ec_dev);
+
+#endif /* __CROS_EC_H */
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 74ded44..c65e70b 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -13,7 +13,6 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/fs.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/notifier.h>
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 6ae4849..ecfada0 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -7,7 +7,6 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 9bd97bc..6119ecc 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -14,6 +14,8 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
+#include "cros_ec.h"
+
 /**
  * Request format for protocol v3
  * byte 0	0xda (EC_COMMAND_PROTOCOL_3)
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index e599682..93a71e9 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -14,6 +14,8 @@
 #include <linux/platform_data/cros_ec_proto.h>
 #include <linux/intel-ish-client-if.h>
 
+#include "cros_ec.h"
+
 /*
  * ISH TX/RX ring buffer pool size
  *
@@ -76,7 +78,7 @@ struct cros_ish_in_msg {
  *
  * The writers are .reset() and .probe() function.
  */
-DECLARE_RWSEM(init_lock);
+static DECLARE_RWSEM(init_lock);
 
 /**
  * struct response_info - Encapsulate firmware response related
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index c0f2eec..b4c110c 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -8,7 +8,6 @@
 #include <linux/device.h>
 #include <linux/fs.h>
 #include <linux/kobject.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index dccf479..1f78619 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -23,6 +23,7 @@
 #include <linux/printk.h>
 #include <linux/suspend.h>
 
+#include "cros_ec.h"
 #include "cros_ec_lpc_mec.h"
 
 #define DRV_NAME "cros_ec_lpcs"
@@ -396,7 +397,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
 	 * Some boards do not have an IRQ allotted for cros_ec_lpc,
 	 * which makes ENXIO an expected (and safe) scenario.
 	 */
-	irq = platform_get_irq(pdev, 0);
+	irq = platform_get_irq_optional(pdev, 0);
 	if (irq > 0)
 		ec_dev->irq = irq;
 	else if (irq != -ENXIO) {
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index da1b1c4..3cfa643 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -54,8 +54,6 @@ static int send_command(struct cros_ec_device *ec_dev,
 	int ret;
 	int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
 
-	trace_cros_ec_cmd(msg);
-
 	if (ec_dev->proto_version > 2)
 		xfer_fxn = ec_dev->pkt_xfer;
 	else
@@ -72,7 +70,9 @@ static int send_command(struct cros_ec_device *ec_dev,
 		return -EIO;
 	}
 
+	trace_cros_ec_request_start(msg);
 	ret = (*xfer_fxn)(ec_dev, msg);
+	trace_cros_ec_request_done(msg, ret);
 	if (msg->result == EC_RES_IN_PROGRESS) {
 		int i;
 		struct cros_ec_command *status_msg;
@@ -95,7 +95,9 @@ static int send_command(struct cros_ec_device *ec_dev,
 		for (i = 0; i < EC_COMMAND_RETRIES; i++) {
 			usleep_range(10000, 11000);
 
+			trace_cros_ec_request_start(status_msg);
 			ret = (*xfer_fxn)(ec_dev, status_msg);
+			trace_cros_ec_request_done(status_msg, ret);
 			if (ret == -EAGAIN)
 				continue;
 			if (ret < 0)
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index bd068af..dbc3f55 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -13,6 +13,8 @@
 #include <linux/rpmsg.h>
 #include <linux/slab.h>
 
+#include "cros_ec.h"
+
 #define EC_MSG_TIMEOUT_MS	200
 #define HOST_COMMAND_MARK	1
 #define HOST_EVENT_MARK		2
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
index 04d8879..79fefd3 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -9,7 +9,6 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/module.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
 #include <linux/platform_data/cros_ec_sensorhub.h>
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index a831bd5..46786d2 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -14,6 +14,8 @@
 #include <linux/spi/spi.h>
 #include <uapi/linux/sched/types.h>
 
+#include "cros_ec.h"
+
 /* The header byte, which follows the preamble */
 #define EC_MSG_HEADER			0xec
 
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index 74d36b8..07dac97 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -8,7 +8,6 @@
 #include <linux/device.h>
 #include <linux/fs.h>
 #include <linux/kobject.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 5af1d66..523a39b 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -8,6 +8,11 @@
 // Generate the list using the following script:
 // sed -n 's/^#define \(EC_CMD_[[:alnum:]_]*\)\s.*/\tTRACE_SYMBOL(\1), \\/p' include/linux/platform_data/cros_ec_commands.h
 #define EC_CMDS \
+	TRACE_SYMBOL(EC_CMD_ACPI_READ), \
+	TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
+	TRACE_SYMBOL(EC_CMD_ACPI_BURST_ENABLE), \
+	TRACE_SYMBOL(EC_CMD_ACPI_BURST_DISABLE), \
+	TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
 	TRACE_SYMBOL(EC_CMD_PROTO_VERSION), \
 	TRACE_SYMBOL(EC_CMD_HELLO), \
 	TRACE_SYMBOL(EC_CMD_GET_VERSION), \
@@ -22,6 +27,8 @@
 	TRACE_SYMBOL(EC_CMD_GET_PROTOCOL_INFO), \
 	TRACE_SYMBOL(EC_CMD_GSV_PAUSE_IN_S5), \
 	TRACE_SYMBOL(EC_CMD_GET_FEATURES), \
+	TRACE_SYMBOL(EC_CMD_GET_SKU_ID), \
+	TRACE_SYMBOL(EC_CMD_SET_SKU_ID), \
 	TRACE_SYMBOL(EC_CMD_FLASH_INFO), \
 	TRACE_SYMBOL(EC_CMD_FLASH_READ), \
 	TRACE_SYMBOL(EC_CMD_FLASH_WRITE), \
@@ -29,6 +36,8 @@
 	TRACE_SYMBOL(EC_CMD_FLASH_PROTECT), \
 	TRACE_SYMBOL(EC_CMD_FLASH_REGION_INFO), \
 	TRACE_SYMBOL(EC_CMD_VBNV_CONTEXT), \
+	TRACE_SYMBOL(EC_CMD_FLASH_SPI_INFO), \
+	TRACE_SYMBOL(EC_CMD_FLASH_SELECT), \
 	TRACE_SYMBOL(EC_CMD_PWM_GET_FAN_TARGET_RPM), \
 	TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_TARGET_RPM), \
 	TRACE_SYMBOL(EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT), \
@@ -40,6 +49,8 @@
 	TRACE_SYMBOL(EC_CMD_LED_CONTROL), \
 	TRACE_SYMBOL(EC_CMD_VBOOT_HASH), \
 	TRACE_SYMBOL(EC_CMD_MOTION_SENSE_CMD), \
+	TRACE_SYMBOL(EC_CMD_FORCE_LID_OPEN), \
+	TRACE_SYMBOL(EC_CMD_CONFIG_POWER_BUTTON), \
 	TRACE_SYMBOL(EC_CMD_USB_CHARGE_SET_MODE), \
 	TRACE_SYMBOL(EC_CMD_PSTORE_INFO), \
 	TRACE_SYMBOL(EC_CMD_PSTORE_READ), \
@@ -50,6 +61,9 @@
 	TRACE_SYMBOL(EC_CMD_RTC_SET_ALARM), \
 	TRACE_SYMBOL(EC_CMD_PORT80_LAST_BOOT), \
 	TRACE_SYMBOL(EC_CMD_PORT80_READ), \
+	TRACE_SYMBOL(EC_CMD_VSTORE_INFO), \
+	TRACE_SYMBOL(EC_CMD_VSTORE_READ), \
+	TRACE_SYMBOL(EC_CMD_VSTORE_WRITE), \
 	TRACE_SYMBOL(EC_CMD_THERMAL_SET_THRESHOLD), \
 	TRACE_SYMBOL(EC_CMD_THERMAL_GET_THRESHOLD), \
 	TRACE_SYMBOL(EC_CMD_THERMAL_AUTO_FAN_CTRL), \
@@ -59,10 +73,12 @@
 	TRACE_SYMBOL(EC_CMD_MKBP_STATE), \
 	TRACE_SYMBOL(EC_CMD_MKBP_INFO), \
 	TRACE_SYMBOL(EC_CMD_MKBP_SIMULATE_KEY), \
+	TRACE_SYMBOL(EC_CMD_GET_KEYBOARD_ID), \
 	TRACE_SYMBOL(EC_CMD_MKBP_SET_CONFIG), \
 	TRACE_SYMBOL(EC_CMD_MKBP_GET_CONFIG), \
 	TRACE_SYMBOL(EC_CMD_KEYSCAN_SEQ_CTRL), \
 	TRACE_SYMBOL(EC_CMD_GET_NEXT_EVENT), \
+	TRACE_SYMBOL(EC_CMD_KEYBOARD_FACTORY_TEST), \
 	TRACE_SYMBOL(EC_CMD_TEMP_SENSOR_GET_INFO), \
 	TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_B), \
 	TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SMI_MASK), \
@@ -73,6 +89,7 @@
 	TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR), \
 	TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_WAKE_MASK), \
 	TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR_B), \
+	TRACE_SYMBOL(EC_CMD_HOST_EVENT), \
 	TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_BKLIGHT), \
 	TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_WIRELESS), \
 	TRACE_SYMBOL(EC_CMD_GPIO_SET), \
@@ -92,36 +109,102 @@
 	TRACE_SYMBOL(EC_CMD_CHARGE_STATE), \
 	TRACE_SYMBOL(EC_CMD_CHARGE_CURRENT_LIMIT), \
 	TRACE_SYMBOL(EC_CMD_EXTERNAL_POWER_LIMIT), \
+	TRACE_SYMBOL(EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT), \
+	TRACE_SYMBOL(EC_CMD_HIBERNATION_DELAY), \
 	TRACE_SYMBOL(EC_CMD_HOST_SLEEP_EVENT), \
+	TRACE_SYMBOL(EC_CMD_DEVICE_EVENT), \
 	TRACE_SYMBOL(EC_CMD_SB_READ_WORD), \
 	TRACE_SYMBOL(EC_CMD_SB_WRITE_WORD), \
 	TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
 	TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
 	TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
+	TRACE_SYMBOL(EC_CMD_SB_FW_UPDATE), \
+	TRACE_SYMBOL(EC_CMD_ENTERING_MODE), \
+	TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU_PROTECT), \
+	TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
+	TRACE_SYMBOL(EC_CMD_CEC_SET), \
+	TRACE_SYMBOL(EC_CMD_CEC_GET), \
 	TRACE_SYMBOL(EC_CMD_EC_CODEC), \
 	TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
 	TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
 	TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
 	TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
 	TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
-	TRACE_SYMBOL(EC_CMD_ACPI_READ), \
-	TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
-	TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
-	TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
-	TRACE_SYMBOL(EC_CMD_CEC_SET), \
-	TRACE_SYMBOL(EC_CMD_CEC_GET), \
 	TRACE_SYMBOL(EC_CMD_REBOOT), \
 	TRACE_SYMBOL(EC_CMD_RESEND_RESPONSE), \
 	TRACE_SYMBOL(EC_CMD_VERSION0), \
 	TRACE_SYMBOL(EC_CMD_PD_EXCHANGE_STATUS), \
+	TRACE_SYMBOL(EC_CMD_PD_HOST_EVENT_STATUS), \
 	TRACE_SYMBOL(EC_CMD_USB_PD_CONTROL), \
 	TRACE_SYMBOL(EC_CMD_USB_PD_PORTS), \
 	TRACE_SYMBOL(EC_CMD_USB_PD_POWER_INFO), \
 	TRACE_SYMBOL(EC_CMD_CHARGE_PORT_COUNT), \
+	TRACE_SYMBOL(EC_CMD_USB_PD_FW_UPDATE), \
+	TRACE_SYMBOL(EC_CMD_USB_PD_RW_HASH_ENTRY), \
+	TRACE_SYMBOL(EC_CMD_USB_PD_DEV_INFO), \
 	TRACE_SYMBOL(EC_CMD_USB_PD_DISCOVERY), \
 	TRACE_SYMBOL(EC_CMD_PD_CHARGE_PORT_OVERRIDE), \
 	TRACE_SYMBOL(EC_CMD_PD_GET_LOG_ENTRY), \
-	TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO)
+	TRACE_SYMBOL(EC_CMD_USB_PD_GET_AMODE), \
+	TRACE_SYMBOL(EC_CMD_USB_PD_SET_AMODE), \
+	TRACE_SYMBOL(EC_CMD_PD_WRITE_LOG_ENTRY), \
+	TRACE_SYMBOL(EC_CMD_PD_CONTROL), \
+	TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO), \
+	TRACE_SYMBOL(EC_CMD_PD_CHIP_INFO), \
+	TRACE_SYMBOL(EC_CMD_RWSIG_CHECK_STATUS), \
+	TRACE_SYMBOL(EC_CMD_RWSIG_ACTION), \
+	TRACE_SYMBOL(EC_CMD_EFS_VERIFY), \
+	TRACE_SYMBOL(EC_CMD_GET_CROS_BOARD_INFO), \
+	TRACE_SYMBOL(EC_CMD_SET_CROS_BOARD_INFO), \
+	TRACE_SYMBOL(EC_CMD_GET_UPTIME_INFO), \
+	TRACE_SYMBOL(EC_CMD_ADD_ENTROPY), \
+	TRACE_SYMBOL(EC_CMD_ADC_READ), \
+	TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \
+	TRACE_SYMBOL(EC_CMD_AP_RESET), \
+	TRACE_SYMBOL(EC_CMD_CR51_BASE), \
+	TRACE_SYMBOL(EC_CMD_CR51_LAST), \
+	TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \
+	TRACE_SYMBOL(EC_CMD_FP_MODE), \
+	TRACE_SYMBOL(EC_CMD_FP_INFO), \
+	TRACE_SYMBOL(EC_CMD_FP_FRAME), \
+	TRACE_SYMBOL(EC_CMD_FP_TEMPLATE), \
+	TRACE_SYMBOL(EC_CMD_FP_CONTEXT), \
+	TRACE_SYMBOL(EC_CMD_FP_STATS), \
+	TRACE_SYMBOL(EC_CMD_FP_SEED), \
+	TRACE_SYMBOL(EC_CMD_FP_ENC_STATUS), \
+	TRACE_SYMBOL(EC_CMD_TP_SELF_TEST), \
+	TRACE_SYMBOL(EC_CMD_TP_FRAME_INFO), \
+	TRACE_SYMBOL(EC_CMD_TP_FRAME_SNAPSHOT), \
+	TRACE_SYMBOL(EC_CMD_TP_FRAME_GET), \
+	TRACE_SYMBOL(EC_CMD_BATTERY_GET_STATIC), \
+	TRACE_SYMBOL(EC_CMD_BATTERY_GET_DYNAMIC), \
+	TRACE_SYMBOL(EC_CMD_CHARGER_CONTROL), \
+	TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_BASE), \
+	TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_LAST)
+
+/* See the enum ec_status in include/linux/platform_data/cros_ec_commands.h */
+#define EC_RESULT \
+	TRACE_SYMBOL(EC_RES_SUCCESS), \
+	TRACE_SYMBOL(EC_RES_INVALID_COMMAND), \
+	TRACE_SYMBOL(EC_RES_ERROR), \
+	TRACE_SYMBOL(EC_RES_INVALID_PARAM), \
+	TRACE_SYMBOL(EC_RES_ACCESS_DENIED), \
+	TRACE_SYMBOL(EC_RES_INVALID_RESPONSE), \
+	TRACE_SYMBOL(EC_RES_INVALID_VERSION), \
+	TRACE_SYMBOL(EC_RES_INVALID_CHECKSUM), \
+	TRACE_SYMBOL(EC_RES_IN_PROGRESS), \
+	TRACE_SYMBOL(EC_RES_UNAVAILABLE), \
+	TRACE_SYMBOL(EC_RES_TIMEOUT), \
+	TRACE_SYMBOL(EC_RES_OVERFLOW), \
+	TRACE_SYMBOL(EC_RES_INVALID_HEADER), \
+	TRACE_SYMBOL(EC_RES_REQUEST_TRUNCATED), \
+	TRACE_SYMBOL(EC_RES_RESPONSE_TOO_BIG), \
+	TRACE_SYMBOL(EC_RES_BUS_ERROR), \
+	TRACE_SYMBOL(EC_RES_BUSY), \
+	TRACE_SYMBOL(EC_RES_INVALID_HEADER_VERSION), \
+	TRACE_SYMBOL(EC_RES_INVALID_HEADER_CRC), \
+	TRACE_SYMBOL(EC_RES_INVALID_DATA_CRC), \
+	TRACE_SYMBOL(EC_RES_DUP_UNAVAILABLE)
 
 #define CREATE_TRACE_POINTS
 #include "cros_ec_trace.h"
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index 0dd4df3..e9fb05f 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -18,7 +18,7 @@
 
 #include <linux/tracepoint.h>
 
-DECLARE_EVENT_CLASS(cros_ec_cmd_class,
+TRACE_EVENT(cros_ec_request_start,
 	TP_PROTO(struct cros_ec_command *cmd),
 	TP_ARGS(cmd),
 	TP_STRUCT__entry(
@@ -33,10 +33,26 @@ DECLARE_EVENT_CLASS(cros_ec_cmd_class,
 		  __print_symbolic(__entry->command, EC_CMDS))
 );
 
-
-DEFINE_EVENT(cros_ec_cmd_class, cros_ec_cmd,
-	TP_PROTO(struct cros_ec_command *cmd),
-	TP_ARGS(cmd)
+TRACE_EVENT(cros_ec_request_done,
+	TP_PROTO(struct cros_ec_command *cmd, int retval),
+	TP_ARGS(cmd, retval),
+	TP_STRUCT__entry(
+		__field(uint32_t, version)
+		__field(uint32_t, command)
+		__field(uint32_t, result)
+		__field(int, retval)
+	),
+	TP_fast_assign(
+		__entry->version = cmd->version;
+		__entry->command = cmd->command;
+		__entry->result = cmd->result;
+		__entry->retval = retval;
+	),
+	TP_printk("version: %u, command: %s, ec result: %s, retval: %d",
+		  __entry->version,
+		  __print_symbolic(__entry->command, EC_CMDS),
+		  __print_symbolic(__entry->result, EC_RESULT),
+		  __entry->retval)
 );
 
 
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index f11a128..8edae46 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -6,7 +6,6 @@
 
 #include <linux/of.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index 374cdd1..7de3ea7 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -6,7 +6,6 @@
  */
 
 #include <linux/ktime.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
index 365f30e..49e8530 100644
--- a/drivers/platform/chrome/wilco_ec/Kconfig
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config WILCO_EC
 	tristate "ChromeOS Wilco Embedded Controller"
-	depends on ACPI && X86 && CROS_EC_LPC && LEDS_CLASS
+	depends on X86 || COMPILE_TEST
+	depends on ACPI && CROS_EC_LPC && LEDS_CLASS
 	help
 	  If you say Y here, you get support for talking to the ChromeOS
 	  Wilco EC over an eSPI bus. This uses a simple byte-level protocol
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
index 5210c35..5b42992 100644
--- a/drivers/platform/chrome/wilco_ec/core.c
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -94,7 +94,7 @@ static int wilco_ec_probe(struct platform_device *pdev)
 
 	ret = wilco_ec_add_sysfs(ec);
 	if (ret < 0) {
-		dev_err(dev, "Failed to create sysfs entries: %d", ret);
+		dev_err(dev, "Failed to create sysfs entries: %d\n", ret);
 		goto unregister_rtc;
 	}
 
@@ -137,9 +137,9 @@ static int wilco_ec_remove(struct platform_device *pdev)
 {
 	struct wilco_ec_device *ec = platform_get_drvdata(pdev);
 
+	platform_device_unregister(ec->telem_pdev);
 	platform_device_unregister(ec->charger_pdev);
 	wilco_ec_remove_sysfs(ec);
-	platform_device_unregister(ec->telem_pdev);
 	platform_device_unregister(ec->rtc_pdev);
 	if (ec->debugfs_pdev)
 		platform_device_unregister(ec->debugfs_pdev);
diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
index 5731d1b..6ce9c67 100644
--- a/drivers/platform/chrome/wilco_ec/keyboard_leds.c
+++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
@@ -69,7 +69,7 @@ static int send_kbbl_msg(struct wilco_ec_device *ec,
 	ret = wilco_ec_mailbox(ec, &msg);
 	if (ret < 0) {
 		dev_err(ec->dev,
-			"Failed sending keyboard LEDs command: %d", ret);
+			"Failed sending keyboard LEDs command: %d\n", ret);
 		return ret;
 	}
 
@@ -94,7 +94,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
 
 	if (response.status) {
 		dev_err(ec->dev,
-			"EC reported failure sending keyboard LEDs command: %d",
+			"EC reported failure sending keyboard LEDs command: %d\n",
 			response.status);
 		return -EIO;
 	}
@@ -147,7 +147,7 @@ static int kbbl_init(struct wilco_ec_device *ec)
 
 	if (response.status) {
 		dev_err(ec->dev,
-			"EC reported failure sending keyboard LEDs command: %d",
+			"EC reported failure sending keyboard LEDs command: %d\n",
 			response.status);
 		return -EIO;
 	}
@@ -179,7 +179,7 @@ int wilco_keyboard_leds_init(struct wilco_ec_device *ec)
 	ret = kbbl_exist(ec, &leds_exist);
 	if (ret < 0) {
 		dev_err(ec->dev,
-			"Failed checking keyboard LEDs support: %d", ret);
+			"Failed checking keyboard LEDs support: %d\n", ret);
 		return ret;
 	}
 	if (!leds_exist)
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
index ced1f9f..0f98358 100644
--- a/drivers/platform/chrome/wilco_ec/mailbox.c
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -163,13 +163,13 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
 	}
 
 	if (rs->data_size != EC_MAILBOX_DATA_SIZE) {
-		dev_dbg(ec->dev, "unexpected packet size (%u != %u)",
+		dev_dbg(ec->dev, "unexpected packet size (%u != %u)\n",
 			rs->data_size, EC_MAILBOX_DATA_SIZE);
 		return -EMSGSIZE;
 	}
 
 	if (rs->data_size < msg->response_size) {
-		dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)",
+		dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)\n",
 			rs->data_size, msg->response_size);
 		return -EMSGSIZE;
 	}
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index 1176d54..e06d96f 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -367,7 +367,7 @@ static int telem_device_probe(struct platform_device *pdev)
 	minor = ida_alloc_max(&telem_ida, TELEM_MAX_DEV-1, GFP_KERNEL);
 	if (minor < 0) {
 		error = minor;
-		dev_err(&pdev->dev, "Failed to find minor number: %d", error);
+		dev_err(&pdev->dev, "Failed to find minor number: %d\n", error);
 		return error;
 	}
 
@@ -427,14 +427,14 @@ static int __init telem_module_init(void)
 
 	ret = class_register(&telem_class);
 	if (ret) {
-		pr_err(DRV_NAME ": Failed registering class: %d", ret);
+		pr_err(DRV_NAME ": Failed registering class: %d\n", ret);
 		return ret;
 	}
 
 	/* Request the kernel for device numbers, starting with minor=0 */
 	ret = alloc_chrdev_region(&dev_num, 0, TELEM_MAX_DEV, TELEM_DEV_NAME);
 	if (ret) {
-		pr_err(DRV_NAME ": Failed allocating dev numbers: %d", ret);
+		pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret);
 		goto destroy_class;
 	}
 	telem_major = MAJOR(dev_num);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index cef0133..1ab207e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -257,12 +257,12 @@ static int goldfish_pipe_error_convert(int status)
 	}
 }
 
-static int pin_user_pages(unsigned long first_page,
-			  unsigned long last_page,
-			  unsigned int last_page_size,
-			  int is_write,
-			  struct page *pages[MAX_BUFFERS_PER_COMMAND],
-			  unsigned int *iter_last_page_size)
+static int goldfish_pin_pages(unsigned long first_page,
+			      unsigned long last_page,
+			      unsigned int last_page_size,
+			      int is_write,
+			      struct page *pages[MAX_BUFFERS_PER_COMMAND],
+			      unsigned int *iter_last_page_size)
 {
 	int ret;
 	int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
@@ -274,7 +274,7 @@ static int pin_user_pages(unsigned long first_page,
 		*iter_last_page_size = last_page_size;
 	}
 
-	ret = get_user_pages_fast(first_page, requested_pages,
+	ret = pin_user_pages_fast(first_page, requested_pages,
 				  !is_write ? FOLL_WRITE : 0,
 				  pages);
 	if (ret <= 0)
@@ -285,18 +285,6 @@ static int pin_user_pages(unsigned long first_page,
 	return ret;
 }
 
-static void release_user_pages(struct page **pages, int pages_count,
-			       int is_write, s32 consumed_size)
-{
-	int i;
-
-	for (i = 0; i < pages_count; i++) {
-		if (!is_write && consumed_size > 0)
-			set_page_dirty(pages[i]);
-		put_page(pages[i]);
-	}
-}
-
 /* Populate the call parameters, merging adjacent pages together */
 static void populate_rw_params(struct page **pages,
 			       int pages_count,
@@ -354,9 +342,9 @@ static int transfer_max_buffers(struct goldfish_pipe *pipe,
 	if (mutex_lock_interruptible(&pipe->lock))
 		return -ERESTARTSYS;
 
-	pages_count = pin_user_pages(first_page, last_page,
-				     last_page_size, is_write,
-				     pipe->pages, &iter_last_page_size);
+	pages_count = goldfish_pin_pages(first_page, last_page,
+					 last_page_size, is_write,
+					 pipe->pages, &iter_last_page_size);
 	if (pages_count < 0) {
 		mutex_unlock(&pipe->lock);
 		return pages_count;
@@ -372,7 +360,8 @@ static int transfer_max_buffers(struct goldfish_pipe *pipe,
 
 	*consumed_size = pipe->command_buffer->rw_params.consumed_size;
 
-	release_user_pages(pipe->pages, pages_count, is_write, *consumed_size);
+	unpin_user_pages_dirty_lock(pipe->pages, pages_count,
+				    !is_write && *consumed_size > 0);
 
 	mutex_unlock(&pipe->lock);
 	return 0;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 43bb15e..612ef55 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -33,9 +33,9 @@
 #include <linux/seq_file.h>
 #include <linux/platform_data/x86/asus-wmi.h>
 #include <linux/platform_device.h>
-#include <linux/thermal.h>
 #include <linux/acpi.h>
 #include <linux/dmi.h>
+#include <linux/units.h>
 
 #include <acpi/battery.h>
 #include <acpi/video.h>
@@ -1514,9 +1514,8 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
 	if (err < 0)
 		return err;
 
-	value = DECI_KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000;
-
-	return sprintf(buf, "%d\n", value);
+	return sprintf(buf, "%ld\n",
+		       deci_kelvin_to_millicelsius(value & 0xFFFF));
 }
 
 /* Fan1 */
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index b102f6d..101d7e7 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/thermal.h>
 #include <linux/types.h>
+#include <linux/units.h>
 
 MODULE_AUTHOR("Thomas Sujith");
 MODULE_AUTHOR("Zhang Rui");
@@ -302,8 +303,10 @@ static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr,
 	int result;
 
 	result = sensor_get_auxtrip(attr->handle, idx, &value);
+	if (result)
+		return result;
 
-	return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
+	return sprintf(buf, "%lu", deci_kelvin_to_celsius(value));
 }
 
 static ssize_t aux0_show(struct device *dev,
@@ -332,8 +335,8 @@ static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr,
 	if (value < 0)
 		return -EINVAL;
 
-	result = sensor_set_auxtrip(attr->handle, idx, 
-				    CELSIUS_TO_DECI_KELVIN(value));
+	result = sensor_set_auxtrip(attr->handle, idx,
+				    celsius_to_deci_kelvin(value));
 	return result ? result : count;
 }
 
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index da794dc..8eaadba 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -907,13 +907,12 @@ static ssize_t dispatch_proc_write(struct file *file,
 	return ret;
 }
 
-static const struct file_operations dispatch_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= dispatch_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= dispatch_proc_write,
+static const struct proc_ops dispatch_proc_ops = {
+	.proc_open	= dispatch_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= dispatch_proc_write,
 };
 
 static char *next_cmd(char **cmds)
@@ -9984,7 +9983,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
 		if (ibm->write)
 			mode |= S_IWUSR;
 		entry = proc_create_data(ibm->name, mode, proc_dir,
-					 &dispatch_proc_fops, ibm);
+					 &dispatch_proc_ops, ibm);
 		if (!entry) {
 			pr_err("unable to create proc entry %s\n", ibm->name);
 			ret = -ENODEV;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index a1e6569..8089445 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1432,13 +1432,12 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
 	return count;
 }
 
-static const struct file_operations lcd_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= lcd_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= lcd_proc_write,
+static const struct proc_ops lcd_proc_ops = {
+	.proc_open	= lcd_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= lcd_proc_write,
 };
 
 /* Video-Out */
@@ -1539,13 +1538,12 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
 	return ret ? -EIO : count;
 }
 
-static const struct file_operations video_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= video_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= video_proc_write,
+static const struct proc_ops video_proc_ops = {
+	.proc_open	= video_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= video_proc_write,
 };
 
 /* Fan status */
@@ -1617,13 +1615,12 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
 	return count;
 }
 
-static const struct file_operations fan_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= fan_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= fan_proc_write,
+static const struct proc_ops fan_proc_ops = {
+	.proc_open	= fan_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= fan_proc_write,
 };
 
 static int keys_proc_show(struct seq_file *m, void *v)
@@ -1662,13 +1659,12 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf,
 	return count;
 }
 
-static const struct file_operations keys_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= keys_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= keys_proc_write,
+static const struct proc_ops keys_proc_ops = {
+	.proc_open	= keys_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= keys_proc_write,
 };
 
 static int __maybe_unused version_proc_show(struct seq_file *m, void *v)
@@ -1688,16 +1684,16 @@ static void create_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
 {
 	if (dev->backlight_dev)
 		proc_create_data("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir,
-				 &lcd_proc_fops, dev);
+				 &lcd_proc_ops, dev);
 	if (dev->video_supported)
 		proc_create_data("video", S_IRUGO | S_IWUSR, toshiba_proc_dir,
-				 &video_proc_fops, dev);
+				 &video_proc_ops, dev);
 	if (dev->fan_supported)
 		proc_create_data("fan", S_IRUGO | S_IWUSR, toshiba_proc_dir,
-				 &fan_proc_fops, dev);
+				 &fan_proc_ops, dev);
 	if (dev->hotkey_dev)
 		proc_create_data("keys", S_IRUGO | S_IWUSR, toshiba_proc_dir,
-				 &keys_proc_fops, dev);
+				 &keys_proc_ops, dev);
 	proc_create_single_data("version", S_IRUGO, toshiba_proc_dir,
 			version_proc_show, dev);
 }
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 3682097..785a796 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -49,10 +49,9 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
 	return nbytes;
 }
 
-static const struct file_operations isapnp_proc_bus_file_operations = {
-	.owner	= THIS_MODULE,
-	.llseek = isapnp_proc_bus_lseek,
-	.read = isapnp_proc_bus_read,
+static const struct proc_ops isapnp_proc_bus_proc_ops = {
+	.proc_lseek	= isapnp_proc_bus_lseek,
+	.proc_read	= isapnp_proc_bus_read,
 };
 
 static int isapnp_proc_attach_device(struct pnp_dev *dev)
@@ -69,7 +68,7 @@ static int isapnp_proc_attach_device(struct pnp_dev *dev)
 	}
 	sprintf(name, "%02x", dev->number);
 	e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de,
-			&isapnp_proc_bus_file_operations, dev);
+					    &isapnp_proc_bus_proc_ops, dev);
 	if (!e)
 		return -ENOMEM;
 	proc_set_size(e, 256);
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index fe1c8f5..a806830 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -210,13 +210,12 @@ static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
 	return ret;
 }
 
-static const struct file_operations pnpbios_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= pnpbios_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= pnpbios_proc_write,
+static const struct proc_ops pnpbios_proc_ops = {
+	.proc_open	= pnpbios_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= pnpbios_proc_write,
 };
 
 int pnpbios_interface_attach_device(struct pnp_bios_node *node)
@@ -228,13 +227,13 @@ int pnpbios_interface_attach_device(struct pnp_bios_node *node)
 	if (!proc_pnp)
 		return -EIO;
 	if (!pnpbios_dont_use_current_config) {
-		proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops,
+		proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_ops,
 				 (void *)(long)(node->handle));
 	}
 
 	if (!proc_pnp_boot)
 		return -EIO;
-	if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops,
+	if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_ops,
 			     (void *)(long)(node->handle + 0x100)))
 		return 0;
 	return -EIO;
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index b8fe166..cdb4237 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -14,7 +14,7 @@
 
 config QCOM_CPR
 	tristate "QCOM Core Power Reduction (CPR) support"
-	depends on POWER_AVS
+	depends on POWER_AVS && HAS_IOMEM
 	select PM_OPP
 	select REGMAP
 	help
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c
index 9192fb7..bd7c3e4 100644
--- a/drivers/power/avs/qcom-cpr.c
+++ b/drivers/power/avs/qcom-cpr.c
@@ -517,7 +517,7 @@ static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
 		dev_dbg(drv->dev,
 			"UP: -> new_uV: %d last_uV: %d perf state: %u\n",
 			new_uV, last_uV, cpr_get_cur_perf_state(drv));
-	} else if (dir == DOWN) {
+	} else {
 		if (desc->clamp_timer_interval &&
 		    error_steps < desc->down_threshold) {
 			/*
@@ -567,7 +567,7 @@ static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
 		/* Disable auto nack down */
 		reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
 		val = 0;
-	} else if (dir == DOWN) {
+	} else {
 		/* Restore default threshold for UP */
 		reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
 		reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
@@ -1547,8 +1547,6 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
 		goto unlock;
 	}
 
-	dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners);
-
 	drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
 				    sizeof(*drv->corners),
 				    GFP_KERNEL);
@@ -1586,6 +1584,9 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
 				   acc_desc->enable_mask,
 				   acc_desc->enable_mask);
 
+	dev_info(drv->dev, "driver initialized with %u OPPs\n",
+		 drv->num_corners);
+
 unlock:
 	mutex_unlock(&drv->lock);
 
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 398fc95..eece97f 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -152,18 +152,18 @@ static void px30_iodomain_init(struct rockchip_iodomain *iod)
 	int ret;
 	u32 val;
 
-	/* if no VCCIO0 supply we should leave things alone */
+	/* if no VCCIO6 supply we should leave things alone */
 	if (!iod->supplies[PX30_IO_VSEL_VCCIO6_SUPPLY_NUM].reg)
 		return;
 
 	/*
-	 * set vccio0 iodomain to also use this framework
+	 * set vccio6 iodomain to also use this framework
 	 * instead of a special gpio.
 	 */
 	val = PX30_IO_VSEL_VCCIO6_SRC | (PX30_IO_VSEL_VCCIO6_SRC << 16);
 	ret = regmap_write(iod->grf, PX30_IO_VSEL, val);
 	if (ret < 0)
-		dev_warn(iod->dev, "couldn't update vccio0 ctrl\n");
+		dev_warn(iod->dev, "couldn't update vccio6 ctrl\n");
 }
 
 static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c721939..513efe8 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -26,7 +26,7 @@
 config POWER_RESET_AT91_RESET
 	tristate "Atmel AT91 reset driver"
 	depends on ARCH_AT91
-	default SOC_AT91SAM9 || SOC_SAMA5
+	default SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5
 	help
 	  This driver supports restart for Atmel AT91SAM9 and SAMA5
 	  SoCs
@@ -34,7 +34,7 @@
 config POWER_RESET_AT91_SAMA5D2_SHDWC
 	tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver"
 	depends on ARCH_AT91
-	default SOC_SAMA5
+	default SOC_SAM9X60 || SOC_SAMA5
 	help
 	  This driver supports the alternate shutdown controller for some Atmel
 	  SAMA5 SoCs. It is present for example on SAMA5D2 SoC.
@@ -141,14 +141,14 @@
 	  down via the LTC2952. Bindings are made in the device tree.
 
 config POWER_RESET_MT6323
-       bool "MediaTek MT6323 power-off driver"
-       depends on MFD_MT6397
-       help
-         The power-off driver is responsible for externally shutdown down
-         the power of a remote MediaTek SoC MT6323 is connected to through
-         controlling a tiny circuit BBPU inside MT6323 RTC.
+	bool "MediaTek MT6323 power-off driver"
+	depends on MFD_MT6397
+	help
+	  The power-off driver is responsible for externally shutdown down
+	  the power of a remote MediaTek SoC MT6323 is connected to through
+	  controlling a tiny circuit BBPU inside MT6323 RTC.
 
-         Say Y if you have a board where MT6323 could be found.
+	  Say Y if you have a board where MT6323 could be found.
 
 config POWER_RESET_QNAP
 	bool "QNAP power-off driver"
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 1c18f46..2fe3a62 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -66,7 +66,7 @@
 
 #define SHDW_CFG_NOT_USED	(32)
 
-struct shdwc_config {
+struct shdwc_reg_config {
 	u8 wkup_pin_input;
 	u8 mr_rtcwk_shift;
 	u8 mr_rttwk_shift;
@@ -74,8 +74,17 @@ struct shdwc_config {
 	u8 sr_rttwk_shift;
 };
 
+struct pmc_reg_config {
+	u8 mckr;
+};
+
+struct reg_config {
+	struct shdwc_reg_config shdwc;
+	struct pmc_reg_config pmc;
+};
+
 struct shdwc {
-	const struct shdwc_config *cfg;
+	const struct reg_config *rcfg;
 	struct clk *sclk;
 	void __iomem *shdwc_base;
 	void __iomem *mpddrc_base;
@@ -95,6 +104,7 @@ static const unsigned long long sdwc_dbc_period[] = {
 static void __init at91_wakeup_status(struct platform_device *pdev)
 {
 	struct shdwc *shdw = platform_get_drvdata(pdev);
+	const struct reg_config *rcfg = shdw->rcfg;
 	u32 reg;
 	char *reason = "unknown";
 
@@ -106,11 +116,11 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
 	if (!reg)
 		return;
 
-	if (SHDW_WK_PIN(reg, shdw->cfg))
+	if (SHDW_WK_PIN(reg, &rcfg->shdwc))
 		reason = "WKUP pin";
-	else if (SHDW_RTCWK(reg, shdw->cfg))
+	else if (SHDW_RTCWK(reg, &rcfg->shdwc))
 		reason = "RTC";
-	else if (SHDW_RTTWK(reg, shdw->cfg))
+	else if (SHDW_RTTWK(reg, &rcfg->shdwc))
 		reason = "RTT";
 
 	pr_info("AT91: Wake-Up source: %s\n", reason);
@@ -131,9 +141,9 @@ static void at91_poweroff(void)
 		"	str	%1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
 
 		/* Switch the master clock source to slow clock. */
-		"1:	ldr	r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+		"1:	ldr	r6, [%4, %5]\n\t"
 		"	bic	r6, r6,  #" __stringify(AT91_PMC_CSS) "\n\t"
-		"	str	r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+		"	str	r6, [%4, %5]\n\t"
 		/* Wait for clock switch. */
 		"2:	ldr	r6, [%4, #" __stringify(AT91_PMC_SR) "]\n\t"
 		"	tst	r6, #"	    __stringify(AT91_PMC_MCKRDY) "\n\t"
@@ -148,7 +158,8 @@ static void at91_poweroff(void)
 		  "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
 		  "r" (at91_shdwc->shdwc_base),
 		  "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW),
-		  "r" (at91_shdwc->pmc_base)
+		  "r" (at91_shdwc->pmc_base),
+		  "r" (at91_shdwc->rcfg->pmc.mckr)
 		: "r6");
 }
 
@@ -215,6 +226,7 @@ static u32 at91_shdwc_get_wakeup_input(struct platform_device *pdev,
 static void at91_shdwc_dt_configure(struct platform_device *pdev)
 {
 	struct shdwc *shdw = platform_get_drvdata(pdev);
+	const struct reg_config *rcfg = shdw->rcfg;
 	struct device_node *np = pdev->dev.of_node;
 	u32 mode = 0, tmp, input;
 
@@ -227,10 +239,10 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
 		mode |= AT91_SHDW_WKUPDBC(at91_shdwc_debouncer_value(pdev, tmp));
 
 	if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
-		mode |= SHDW_RTCWKEN(shdw->cfg);
+		mode |= SHDW_RTCWKEN(&rcfg->shdwc);
 
 	if (of_property_read_bool(np, "atmel,wakeup-rtt-timer"))
-		mode |= SHDW_RTTWKEN(shdw->cfg);
+		mode |= SHDW_RTTWKEN(&rcfg->shdwc);
 
 	dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
 	writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
@@ -239,30 +251,40 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
 	writel(input, shdw->shdwc_base + AT91_SHDW_WUIR);
 }
 
-static const struct shdwc_config sama5d2_shdwc_config = {
-	.wkup_pin_input = 0,
-	.mr_rtcwk_shift = 17,
-	.mr_rttwk_shift	= SHDW_CFG_NOT_USED,
-	.sr_rtcwk_shift = 5,
-	.sr_rttwk_shift = SHDW_CFG_NOT_USED,
+static const struct reg_config sama5d2_reg_config = {
+	.shdwc = {
+		.wkup_pin_input = 0,
+		.mr_rtcwk_shift = 17,
+		.mr_rttwk_shift	= SHDW_CFG_NOT_USED,
+		.sr_rtcwk_shift = 5,
+		.sr_rttwk_shift = SHDW_CFG_NOT_USED,
+	},
+	.pmc = {
+		.mckr		= 0x30,
+	},
 };
 
-static const struct shdwc_config sam9x60_shdwc_config = {
-	.wkup_pin_input = 0,
-	.mr_rtcwk_shift = 17,
-	.mr_rttwk_shift = 16,
-	.sr_rtcwk_shift = 5,
-	.sr_rttwk_shift = 4,
+static const struct reg_config sam9x60_reg_config = {
+	.shdwc = {
+		.wkup_pin_input = 0,
+		.mr_rtcwk_shift = 17,
+		.mr_rttwk_shift = 16,
+		.sr_rtcwk_shift = 5,
+		.sr_rttwk_shift = 4,
+	},
+	.pmc = {
+		.mckr		= 0x28,
+	},
 };
 
 static const struct of_device_id at91_shdwc_of_match[] = {
 	{
 		.compatible = "atmel,sama5d2-shdwc",
-		.data = &sama5d2_shdwc_config,
+		.data = &sama5d2_reg_config,
 	},
 	{
 		.compatible = "microchip,sam9x60-shdwc",
-		.data = &sam9x60_shdwc_config,
+		.data = &sam9x60_reg_config,
 	}, {
 		/*sentinel*/
 	}
@@ -303,7 +325,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
 	}
 
 	match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
-	at91_shdwc->cfg = match->data;
+	at91_shdwc->rcfg = match->data;
 
 	at91_shdwc->sclk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(at91_shdwc->sclk))
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index 308ca9d..5466eee 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -64,9 +64,11 @@ static int gpio_restart_probe(struct platform_device *pdev)
 
 	gpio_restart->reset_gpio = devm_gpiod_get(&pdev->dev, NULL,
 			open_source ? GPIOD_IN : GPIOD_OUT_LOW);
-	if (IS_ERR(gpio_restart->reset_gpio)) {
-		dev_err(&pdev->dev, "Could not get reset GPIO\n");
-		return PTR_ERR(gpio_restart->reset_gpio);
+	ret = PTR_ERR_OR_ZERO(gpio_restart->reset_gpio);
+	if (ret) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Could not get reset GPIO\n");
+		return ret;
 	}
 
 	gpio_restart->restart_handler.notifier_call = gpio_restart_notify;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 27164a1..9a5591a 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -73,10 +73,10 @@
 	  provided by Wolfson Microelectronics WM831x PMICs.
 
 config WM8350_POWER
-        tristate "WM8350 PMU support"
-        depends on MFD_WM8350
-        help
-          Say Y here to enable support for the power management unit
+	tristate "WM8350 PMU support"
+	depends on MFD_WM8350
+	help
+	  Say Y here to enable support for the power management unit
 	  provided by the Wolfson Microelectronics WM8350 PMIC.
 
 config TEST_POWER
@@ -209,16 +209,16 @@
 	  Say Y to enable support for battery measured by WM97xx aux port.
 
 config BATTERY_SBS
-        tristate "SBS Compliant gas gauge"
-        depends on I2C
-        help
+	tristate "SBS Compliant gas gauge"
+	depends on I2C
+	help
 	  Say Y to include support for SBS battery driver for SBS-compliant
 	  gas gauges.
 
 config CHARGER_SBS
-        tristate "SBS Compliant charger"
-        depends on I2C
-        help
+	tristate "SBS Compliant charger"
+	depends on I2C
+	help
 	  Say Y to include support for SBS compliant battery chargers.
 
 config MANAGER_SBS
@@ -484,11 +484,11 @@
 	depends on REGULATOR
 	select EXTCON
 	help
-          Say Y to enable charger-manager support, which allows multiple
-          chargers attached to a battery and multiple batteries attached to a
-          system. The charger-manager also can monitor charging status in
-          runtime and in suspend-to-RAM by waking up the system periodically
-          with help of suspend_again support.
+	  Say Y to enable charger-manager support, which allows multiple
+	  chargers attached to a battery and multiple batteries attached to a
+	  system. The charger-manager also can monitor charging status in
+	  runtime and in suspend-to-RAM by waking up the system periodically
+	  with help of suspend_again support.
 
 config CHARGER_LT3651
 	tristate "Analog Devices LT3651 charger"
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 8a0f9d7..f69550d 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -789,7 +789,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
 		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
 		ret = -ENXIO;
 		break;
-	};
+	}
 
 	di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
 	dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
@@ -1079,7 +1079,7 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
 		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
 		ret = -EPERM;
 		break;
-	};
+	}
 	di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
 	return ret;
 }
@@ -2427,7 +2427,7 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
 
 	default:
 		break;
-	};
+	}
 }
 
 /**
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index c3912ee..b96f90a 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2221,10 +2221,10 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
 						ab8500_fg_update_cap_scalers(di);
 					queue_work(di->fg_wq, &di->fg_work);
 					break;
-				};
+				}
 			default:
 				break;
-			};
+			}
 			break;
 		case POWER_SUPPLY_PROP_TECHNOLOGY:
 			switch (ext->desc->type) {
@@ -2331,7 +2331,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
 		if (ret) {
 			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_MAX_TIME_REG\n", __func__);
 			goto out;
-		};
+		}
 
 		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
 			AB8505_RTC_PCUT_FLAG_TIME_REG, di->bm->fg_params->pcut_flag_time);
@@ -2339,7 +2339,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
 		if (ret) {
 			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_FLAG_TIME_REG\n", __func__);
 			goto out;
-		};
+		}
 
 		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
 			AB8505_RTC_PCUT_RESTART_REG, di->bm->fg_params->pcut_max_restart);
@@ -2347,7 +2347,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
 		if (ret) {
 			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_RESTART_REG\n", __func__);
 			goto out;
-		};
+		}
 
 		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
 			AB8505_RTC_PCUT_DEBOUNCE_REG, di->bm->fg_params->pcut_debounce_time);
@@ -2355,7 +2355,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
 		if (ret) {
 			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_DEBOUNCE_REG\n", __func__);
 			goto out;
-		};
+		}
 
 		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
 			AB8505_RTC_PCUT_CTL_STATUS_REG, di->bm->fg_params->pcut_enable);
@@ -2363,7 +2363,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
 		if (ret) {
 			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_CTL_STATUS_REG\n", __func__);
 			goto out;
-		};
+		}
 	}
 out:
 	return ret;
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index e6e37d4..2fb33a0 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -1823,7 +1823,7 @@ static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di,
 			"Enter 0. Disable AC/USB Charging\n"
 			"1. Enable AC charging\n"
 			"2. Enable USB Charging\n");
-	};
+	}
 	return strlen(buf);
 }
 
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index 0d34a93..ac36001 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -15,6 +15,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
@@ -23,6 +24,9 @@
 #define AXP20X_PWR_STATUS_ACIN_PRESENT	BIT(7)
 #define AXP20X_PWR_STATUS_ACIN_AVAIL	BIT(6)
 
+#define AXP813_ACIN_PATH_SEL		BIT(7)
+#define AXP813_ACIN_PATH_SEL_TO_BIT(x)	(!!(x) << 7)
+
 #define AXP813_VHOLD_MASK		GENMASK(5, 3)
 #define AXP813_VHOLD_UV_TO_BIT(x)	((((x) / 100000) - 40) << 3)
 #define AXP813_VHOLD_REG_TO_UV(x)	\
@@ -40,6 +44,9 @@ struct axp20x_ac_power {
 	struct power_supply *supply;
 	struct iio_channel *acin_v;
 	struct iio_channel *acin_i;
+	bool has_acin_path_sel;
+	unsigned int num_irqs;
+	unsigned int irqs[];
 };
 
 static irqreturn_t axp20x_ac_power_irq(int irq, void *devid)
@@ -86,6 +93,17 @@ static int axp20x_ac_power_get_property(struct power_supply *psy,
 			return ret;
 
 		val->intval = !!(reg & AXP20X_PWR_STATUS_ACIN_AVAIL);
+
+		/* ACIN_PATH_SEL disables ACIN even if ACIN_AVAIL is set. */
+		if (val->intval && power->has_acin_path_sel) {
+			ret = regmap_read(power->regmap, AXP813_ACIN_PATH_CTRL,
+					  &reg);
+			if (ret)
+				return ret;
+
+			val->intval = !!(reg & AXP813_ACIN_PATH_SEL);
+		}
+
 		return 0;
 
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
@@ -143,6 +161,11 @@ static int axp813_ac_power_set_property(struct power_supply *psy,
 	struct axp20x_ac_power *power = power_supply_get_drvdata(psy);
 
 	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		return regmap_update_bits(power->regmap, AXP813_ACIN_PATH_CTRL,
+					  AXP813_ACIN_PATH_SEL,
+					  AXP813_ACIN_PATH_SEL_TO_BIT(val->intval));
+
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
 		if (val->intval < 4000000 || val->intval > 4700000)
 			return -EINVAL;
@@ -169,7 +192,8 @@ static int axp813_ac_power_set_property(struct power_supply *psy,
 static int axp813_ac_power_prop_writeable(struct power_supply *psy,
 					  enum power_supply_property psp)
 {
-	return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
+	return psp == POWER_SUPPLY_PROP_ONLINE ||
+	       psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
 	       psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
 }
 
@@ -221,34 +245,86 @@ static const struct power_supply_desc axp813_ac_power_desc = {
 	.set_property = axp813_ac_power_set_property,
 };
 
+static const char * const axp20x_irq_names[] = {
+	"ACIN_PLUGIN",
+	"ACIN_REMOVAL",
+};
+
 struct axp_data {
 	const struct power_supply_desc	*power_desc;
+	const char * const		*irq_names;
+	unsigned int			num_irq_names;
 	bool				acin_adc;
+	bool				acin_path_sel;
 };
 
 static const struct axp_data axp20x_data = {
-	.power_desc = &axp20x_ac_power_desc,
-	.acin_adc = true,
+	.power_desc	= &axp20x_ac_power_desc,
+	.irq_names	= axp20x_irq_names,
+	.num_irq_names	= ARRAY_SIZE(axp20x_irq_names),
+	.acin_adc	= true,
+	.acin_path_sel	= false,
 };
 
 static const struct axp_data axp22x_data = {
-	.power_desc = &axp22x_ac_power_desc,
-	.acin_adc = false,
+	.power_desc	= &axp22x_ac_power_desc,
+	.irq_names	= axp20x_irq_names,
+	.num_irq_names	= ARRAY_SIZE(axp20x_irq_names),
+	.acin_adc	= false,
+	.acin_path_sel	= false,
 };
 
 static const struct axp_data axp813_data = {
-	.power_desc = &axp813_ac_power_desc,
-	.acin_adc = false,
+	.power_desc	= &axp813_ac_power_desc,
+	.irq_names	= axp20x_irq_names,
+	.num_irq_names	= ARRAY_SIZE(axp20x_irq_names),
+	.acin_adc	= false,
+	.acin_path_sel	= true,
 };
 
+#ifdef CONFIG_PM_SLEEP
+static int axp20x_ac_power_suspend(struct device *dev)
+{
+	struct axp20x_ac_power *power = dev_get_drvdata(dev);
+	int i = 0;
+
+	/*
+	 * Allow wake via ACIN_PLUGIN only.
+	 *
+	 * As nested threaded IRQs are not automatically disabled during
+	 * suspend, we must explicitly disable the remainder of the IRQs.
+	 */
+	if (device_may_wakeup(&power->supply->dev))
+		enable_irq_wake(power->irqs[i++]);
+	while (i < power->num_irqs)
+		disable_irq(power->irqs[i++]);
+
+	return 0;
+}
+
+static int axp20x_ac_power_resume(struct device *dev)
+{
+	struct axp20x_ac_power *power = dev_get_drvdata(dev);
+	int i = 0;
+
+	if (device_may_wakeup(&power->supply->dev))
+		disable_irq_wake(power->irqs[i++]);
+	while (i < power->num_irqs)
+		enable_irq(power->irqs[i++]);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(axp20x_ac_power_pm_ops, axp20x_ac_power_suspend,
+						 axp20x_ac_power_resume);
+
 static int axp20x_ac_power_probe(struct platform_device *pdev)
 {
 	struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
 	struct power_supply_config psy_cfg = {};
 	struct axp20x_ac_power *power;
 	const struct axp_data *axp_data;
-	static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
-		NULL };
 	int i, irq, ret;
 
 	if (!of_device_is_available(pdev->dev.of_node))
@@ -259,12 +335,14 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
-	power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+	axp_data = of_device_get_match_data(&pdev->dev);
+
+	power = devm_kzalloc(&pdev->dev,
+			     struct_size(power, irqs, axp_data->num_irq_names),
+			     GFP_KERNEL);
 	if (!power)
 		return -ENOMEM;
 
-	axp_data = of_device_get_match_data(&pdev->dev);
-
 	if (axp_data->acin_adc) {
 		power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
 		if (IS_ERR(power->acin_v)) {
@@ -282,6 +360,8 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
 	}
 
 	power->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	power->has_acin_path_sel = axp_data->acin_path_sel;
+	power->num_irqs = axp_data->num_irq_names;
 
 	platform_set_drvdata(pdev, power);
 
@@ -295,20 +375,22 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
 		return PTR_ERR(power->supply);
 
 	/* Request irqs after registering, as irqs may trigger immediately */
-	for (i = 0; irq_names[i]; i++) {
-		irq = platform_get_irq_byname(pdev, irq_names[i]);
+	for (i = 0; i < axp_data->num_irq_names; i++) {
+		irq = platform_get_irq_byname(pdev, axp_data->irq_names[i]);
 		if (irq < 0) {
-			dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
-				 irq_names[i], irq);
-			continue;
+			dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+				axp_data->irq_names[i], irq);
+			return irq;
 		}
-		irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
-		ret = devm_request_any_context_irq(&pdev->dev, irq,
+		power->irqs[i] = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+		ret = devm_request_any_context_irq(&pdev->dev, power->irqs[i],
 						   axp20x_ac_power_irq, 0,
 						   DRVNAME, power);
-		if (ret < 0)
-			dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
-				 irq_names[i], ret);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Error requesting %s IRQ: %d\n",
+				axp_data->irq_names[i], ret);
+			return ret;
+		}
 	}
 
 	return 0;
@@ -331,8 +413,9 @@ MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
 static struct platform_driver axp20x_ac_power_driver = {
 	.probe = axp20x_ac_power_probe,
 	.driver = {
-		.name = DRVNAME,
-		.of_match_table = axp20x_ac_power_match,
+		.name		= DRVNAME,
+		.of_match_table	= axp20x_ac_power_match,
+		.pm		= &axp20x_ac_power_pm_ops,
 	},
 };
 
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 5f0a572..4fde24b 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -16,6 +16,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
@@ -29,6 +30,9 @@
 
 #define AXP20X_USB_STATUS_VBUS_VALID	BIT(2)
 
+#define AXP20X_VBUS_PATH_SEL		BIT(7)
+#define AXP20X_VBUS_PATH_SEL_OFFSET	7
+
 #define AXP20X_VBUS_VHOLD_uV(b)		(4000000 + (((b) >> 3) & 7) * 100000)
 #define AXP20X_VBUS_VHOLD_MASK		GENMASK(5, 3)
 #define AXP20X_VBUS_VHOLD_OFFSET	3
@@ -57,7 +61,6 @@
 #define DEBOUNCE_TIME			msecs_to_jiffies(50)
 
 struct axp20x_usb_power {
-	struct device_node *np;
 	struct regmap *regmap;
 	struct power_supply *supply;
 	enum axp20x_variants axp20x_id;
@@ -65,14 +68,32 @@ struct axp20x_usb_power {
 	struct iio_channel *vbus_i;
 	struct delayed_work vbus_detect;
 	unsigned int old_status;
+	unsigned int online;
+	unsigned int num_irqs;
+	unsigned int irqs[];
 };
 
+static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
+{
+	/*
+	 * Polling is only necessary while VBUS is offline. While online, a
+	 * present->absent transition implies an online->offline transition
+	 * and will triger the VBUS_REMOVAL IRQ.
+	 */
+	if (power->axp20x_id >= AXP221_ID && !power->online)
+		return true;
+
+	return false;
+}
+
 static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
 {
 	struct axp20x_usb_power *power = devid;
 
 	power_supply_changed(power->supply);
 
+	mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+
 	return IRQ_HANDLED;
 }
 
@@ -92,17 +113,11 @@ static void axp20x_usb_power_poll_vbus(struct work_struct *work)
 		power_supply_changed(power->supply);
 
 	power->old_status = val;
+	power->online = val & AXP20X_PWR_STATUS_VBUS_USED;
 
 out:
-	mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
-}
-
-static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
-{
-	if (power->axp20x_id >= AXP221_ID)
-		return true;
-
-	return false;
+	if (axp20x_usb_vbus_needs_polling(power))
+		mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
 }
 
 static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
@@ -264,6 +279,16 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
 	return 0;
 }
 
+static int axp813_usb_power_set_online(struct axp20x_usb_power *power,
+				       int intval)
+{
+	int val = !intval << AXP20X_VBUS_PATH_SEL_OFFSET;
+
+	return regmap_update_bits(power->regmap,
+				  AXP20X_VBUS_IPSOUT_MGMT,
+				  AXP20X_VBUS_PATH_SEL, val);
+}
+
 static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
 					    int intval)
 {
@@ -345,6 +370,11 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
 	struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
 
 	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		if (power->axp20x_id != AXP813_ID)
+			return -EINVAL;
+		return axp813_usb_power_set_online(power, val->intval);
+
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
 		return axp20x_usb_power_set_voltage_min(power, val->intval);
 
@@ -364,6 +394,18 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
 static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
 					   enum power_supply_property psp)
 {
+	struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
+
+	/*
+	 * The VBUS path select flag works differently on on AXP288 and newer:
+	 *  - On AXP20x and AXP22x, the flag enables VBUS (ignoring N_VBUSEN).
+	 *  - On AXP288 and AXP8xx, the flag disables VBUS (ignoring N_VBUSEN).
+	 * We only expose the control on variants where it can be used to force
+	 * the VBUS input offline.
+	 */
+	if (psp == POWER_SUPPLY_PROP_ONLINE)
+		return power->axp20x_id == AXP813_ID;
+
 	return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
 	       psp == POWER_SUPPLY_PROP_CURRENT_MAX;
 }
@@ -406,6 +448,92 @@ static const struct power_supply_desc axp22x_usb_power_desc = {
 	.set_property = axp20x_usb_power_set_property,
 };
 
+static const char * const axp20x_irq_names[] = {
+	"VBUS_PLUGIN",
+	"VBUS_REMOVAL",
+	"VBUS_VALID",
+	"VBUS_NOT_VALID",
+};
+
+static const char * const axp22x_irq_names[] = {
+	"VBUS_PLUGIN",
+	"VBUS_REMOVAL",
+};
+
+struct axp_data {
+	const struct power_supply_desc	*power_desc;
+	const char * const		*irq_names;
+	unsigned int			num_irq_names;
+	enum axp20x_variants		axp20x_id;
+};
+
+static const struct axp_data axp202_data = {
+	.power_desc	= &axp20x_usb_power_desc,
+	.irq_names	= axp20x_irq_names,
+	.num_irq_names	= ARRAY_SIZE(axp20x_irq_names),
+	.axp20x_id	= AXP202_ID,
+};
+
+static const struct axp_data axp221_data = {
+	.power_desc	= &axp22x_usb_power_desc,
+	.irq_names	= axp22x_irq_names,
+	.num_irq_names	= ARRAY_SIZE(axp22x_irq_names),
+	.axp20x_id	= AXP221_ID,
+};
+
+static const struct axp_data axp223_data = {
+	.power_desc	= &axp22x_usb_power_desc,
+	.irq_names	= axp22x_irq_names,
+	.num_irq_names	= ARRAY_SIZE(axp22x_irq_names),
+	.axp20x_id	= AXP223_ID,
+};
+
+static const struct axp_data axp813_data = {
+	.power_desc	= &axp22x_usb_power_desc,
+	.irq_names	= axp22x_irq_names,
+	.num_irq_names	= ARRAY_SIZE(axp22x_irq_names),
+	.axp20x_id	= AXP813_ID,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int axp20x_usb_power_suspend(struct device *dev)
+{
+	struct axp20x_usb_power *power = dev_get_drvdata(dev);
+	int i = 0;
+
+	/*
+	 * Allow wake via VBUS_PLUGIN only.
+	 *
+	 * As nested threaded IRQs are not automatically disabled during
+	 * suspend, we must explicitly disable the remainder of the IRQs.
+	 */
+	if (device_may_wakeup(&power->supply->dev))
+		enable_irq_wake(power->irqs[i++]);
+	while (i < power->num_irqs)
+		disable_irq(power->irqs[i++]);
+
+	return 0;
+}
+
+static int axp20x_usb_power_resume(struct device *dev)
+{
+	struct axp20x_usb_power *power = dev_get_drvdata(dev);
+	int i = 0;
+
+	if (device_may_wakeup(&power->supply->dev))
+		disable_irq_wake(power->irqs[i++]);
+	while (i < power->num_irqs)
+		enable_irq(power->irqs[i++]);
+
+	mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(axp20x_usb_power_pm_ops, axp20x_usb_power_suspend,
+						  axp20x_usb_power_resume);
+
 static int configure_iio_channels(struct platform_device *pdev,
 				  struct axp20x_usb_power *power)
 {
@@ -441,12 +569,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
 	struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
 	struct power_supply_config psy_cfg = {};
 	struct axp20x_usb_power *power;
-	static const char * const axp20x_irq_names[] = { "VBUS_PLUGIN",
-		"VBUS_REMOVAL", "VBUS_VALID", "VBUS_NOT_VALID", NULL };
-	static const char * const axp22x_irq_names[] = {
-		"VBUS_PLUGIN", "VBUS_REMOVAL", NULL };
-	const char * const *irq_names;
-	const struct power_supply_desc *usb_power_desc;
+	const struct axp_data *axp_data;
 	int i, irq, ret;
 
 	if (!of_device_is_available(pdev->dev.of_node))
@@ -457,16 +580,19 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
-	power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+	axp_data = of_device_get_match_data(&pdev->dev);
+
+	power = devm_kzalloc(&pdev->dev,
+			     struct_size(power, irqs, axp_data->num_irq_names),
+			     GFP_KERNEL);
 	if (!power)
 		return -ENOMEM;
 
 	platform_set_drvdata(pdev, power);
-	power->axp20x_id = (enum axp20x_variants)of_device_get_match_data(
-								&pdev->dev);
 
-	power->np = pdev->dev.of_node;
+	power->axp20x_id = axp_data->axp20x_id;
 	power->regmap = axp20x->regmap;
+	power->num_irqs = axp_data->num_irq_names;
 
 	if (power->axp20x_id == AXP202_ID) {
 		/* Enable vbus valid checking */
@@ -483,18 +609,6 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
 
 		if (ret)
 			return ret;
-
-		usb_power_desc = &axp20x_usb_power_desc;
-		irq_names = axp20x_irq_names;
-	} else if (power->axp20x_id == AXP221_ID ||
-		   power->axp20x_id == AXP223_ID ||
-		   power->axp20x_id == AXP813_ID) {
-		usb_power_desc = &axp22x_usb_power_desc;
-		irq_names = axp22x_irq_names;
-	} else {
-		dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
-			axp20x->variant);
-		return -EINVAL;
 	}
 
 	if (power->axp20x_id == AXP813_ID) {
@@ -506,25 +620,29 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
 	psy_cfg.of_node = pdev->dev.of_node;
 	psy_cfg.drv_data = power;
 
-	power->supply = devm_power_supply_register(&pdev->dev, usb_power_desc,
+	power->supply = devm_power_supply_register(&pdev->dev,
+						   axp_data->power_desc,
 						   &psy_cfg);
 	if (IS_ERR(power->supply))
 		return PTR_ERR(power->supply);
 
 	/* Request irqs after registering, as irqs may trigger immediately */
-	for (i = 0; irq_names[i]; i++) {
-		irq = platform_get_irq_byname(pdev, irq_names[i]);
+	for (i = 0; i < axp_data->num_irq_names; i++) {
+		irq = platform_get_irq_byname(pdev, axp_data->irq_names[i]);
 		if (irq < 0) {
-			dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
-				 irq_names[i], irq);
-			continue;
+			dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+				axp_data->irq_names[i], irq);
+			return irq;
 		}
-		irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
-		ret = devm_request_any_context_irq(&pdev->dev, irq,
-				axp20x_usb_power_irq, 0, DRVNAME, power);
-		if (ret < 0)
-			dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
-				 irq_names[i], ret);
+		power->irqs[i] = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+		ret = devm_request_any_context_irq(&pdev->dev, power->irqs[i],
+						   axp20x_usb_power_irq, 0,
+						   DRVNAME, power);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Error requesting %s IRQ: %d\n",
+				axp_data->irq_names[i], ret);
+			return ret;
+		}
 	}
 
 	INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
@@ -546,16 +664,16 @@ static int axp20x_usb_power_remove(struct platform_device *pdev)
 static const struct of_device_id axp20x_usb_power_match[] = {
 	{
 		.compatible = "x-powers,axp202-usb-power-supply",
-		.data = (void *)AXP202_ID,
+		.data = &axp202_data,
 	}, {
 		.compatible = "x-powers,axp221-usb-power-supply",
-		.data = (void *)AXP221_ID,
+		.data = &axp221_data,
 	}, {
 		.compatible = "x-powers,axp223-usb-power-supply",
-		.data = (void *)AXP223_ID,
+		.data = &axp223_data,
 	}, {
 		.compatible = "x-powers,axp813-usb-power-supply",
-		.data = (void *)AXP813_ID,
+		.data = &axp813_data,
 	}, { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
@@ -564,8 +682,9 @@ static struct platform_driver axp20x_usb_power_driver = {
 	.probe = axp20x_usb_power_probe,
 	.remove = axp20x_usb_power_remove,
 	.driver = {
-		.name = DRVNAME,
-		.of_match_table = axp20x_usb_power_match,
+		.name		= DRVNAME,
+		.of_match_table	= axp20x_usb_power_match,
+		.pm		= &axp20x_usb_power_pm_ops,
 	},
 };
 
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 9d1ec8d..aebd125 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -25,12 +25,20 @@
 #define BQ25895_ID			7
 #define BQ25896_ID			0
 
+enum bq25890_chip_version {
+	BQ25890,
+	BQ25892,
+	BQ25895,
+	BQ25896,
+};
+
 enum bq25890_fields {
 	F_EN_HIZ, F_EN_ILIM, F_IILIM,				     /* Reg00 */
 	F_BHOT, F_BCOLD, F_VINDPM_OFS,				     /* Reg01 */
 	F_CONV_START, F_CONV_RATE, F_BOOSTF, F_ICO_EN,
 	F_HVDCP_EN, F_MAXC_EN, F_FORCE_DPM, F_AUTO_DPDM_EN,	     /* Reg02 */
-	F_BAT_LOAD_EN, F_WD_RST, F_OTG_CFG, F_CHG_CFG, F_SYSVMIN,    /* Reg03 */
+	F_BAT_LOAD_EN, F_WD_RST, F_OTG_CFG, F_CHG_CFG, F_SYSVMIN,
+	F_MIN_VBAT_SEL,						     /* Reg03 */
 	F_PUMPX_EN, F_ICHG,					     /* Reg04 */
 	F_IPRECHG, F_ITERM,					     /* Reg05 */
 	F_VREG, F_BATLOWV, F_VRECHG,				     /* Reg06 */
@@ -39,8 +47,9 @@ enum bq25890_fields {
 	F_BATCMP, F_VCLAMP, F_TREG,				     /* Reg08 */
 	F_FORCE_ICO, F_TMR2X_EN, F_BATFET_DIS, F_JEITA_VSET,
 	F_BATFET_DLY, F_BATFET_RST_EN, F_PUMPX_UP, F_PUMPX_DN,	     /* Reg09 */
-	F_BOOSTV, F_BOOSTI,					     /* Reg0A */
-	F_VBUS_STAT, F_CHG_STAT, F_PG_STAT, F_SDP_STAT, F_VSYS_STAT, /* Reg0B */
+	F_BOOSTV, F_PFM_OTG_DIS, F_BOOSTI,			     /* Reg0A */
+	F_VBUS_STAT, F_CHG_STAT, F_PG_STAT, F_SDP_STAT, F_0B_RSVD,
+	F_VSYS_STAT,						     /* Reg0B */
 	F_WD_FAULT, F_BOOST_FAULT, F_CHG_FAULT, F_BAT_FAULT,
 	F_NTC_FAULT,						     /* Reg0C */
 	F_FORCE_VINDPM, F_VINDPM,				     /* Reg0D */
@@ -91,7 +100,7 @@ struct bq25890_device {
 	struct regmap *rmap;
 	struct regmap_field *rmap_fields[F_MAX_FIELDS];
 
-	int chip_id;
+	enum bq25890_chip_version chip_version;
 	struct bq25890_init_data init_data;
 	struct bq25890_state state;
 
@@ -111,8 +120,7 @@ static const struct regmap_access_table bq25890_writeable_regs = {
 static const struct regmap_range bq25890_volatile_reg_ranges[] = {
 	regmap_reg_range(0x00, 0x00),
 	regmap_reg_range(0x09, 0x09),
-	regmap_reg_range(0x0b, 0x0c),
-	regmap_reg_range(0x0e, 0x14),
+	regmap_reg_range(0x0b, 0x14),
 };
 
 static const struct regmap_access_table bq25890_volatile_regs = {
@@ -155,7 +163,7 @@ static const struct reg_field bq25890_reg_fields[] = {
 	[F_OTG_CFG]		= REG_FIELD(0x03, 5, 5),
 	[F_CHG_CFG]		= REG_FIELD(0x03, 4, 4),
 	[F_SYSVMIN]		= REG_FIELD(0x03, 1, 3),
-	/* MIN_VBAT_SEL on BQ25896 */
+	[F_MIN_VBAT_SEL]	= REG_FIELD(0x03, 0, 0), // BQ25896 only
 	/* REG04 */
 	[F_PUMPX_EN]		= REG_FIELD(0x04, 7, 7),
 	[F_ICHG]		= REG_FIELD(0x04, 0, 6),
@@ -188,8 +196,8 @@ static const struct reg_field bq25890_reg_fields[] = {
 	[F_PUMPX_DN]		= REG_FIELD(0x09, 0, 0),
 	/* REG0A */
 	[F_BOOSTV]		= REG_FIELD(0x0A, 4, 7),
-	/* PFM_OTG_DIS 3 on BQ25896 */
 	[F_BOOSTI]		= REG_FIELD(0x0A, 0, 2), // reserved on BQ25895
+	[F_PFM_OTG_DIS]		= REG_FIELD(0x0A, 3, 3), // BQ25896 only
 	/* REG0B */
 	[F_VBUS_STAT]		= REG_FIELD(0x0B, 5, 7),
 	[F_CHG_STAT]		= REG_FIELD(0x0B, 3, 4),
@@ -275,6 +283,7 @@ static const union {
 	struct bq25890_lookup lt;
 } bq25890_tables[] = {
 	/* range tables */
+	/* TODO: BQ25896 has max ICHG 3008 mA */
 	[TBL_ICHG] =	{ .rt = {0,	  5056000, 64000} },	 /* uA */
 	[TBL_ITERM] =	{ .rt = {64000,   1024000, 64000} },	 /* uA */
 	[TBL_VREG] =	{ .rt = {3840000, 4608000, 16000} },	 /* uV */
@@ -391,11 +400,13 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
 		break;
 
 	case POWER_SUPPLY_PROP_MODEL_NAME:
-		if (bq->chip_id == BQ25890_ID)
+		if (bq->chip_version == BQ25890)
 			val->strval = "BQ25890";
-		else if (bq->chip_id == BQ25895_ID)
+		else if (bq->chip_version == BQ25892)
+			val->strval = "BQ25892";
+		else if (bq->chip_version == BQ25895)
 			val->strval = "BQ25895";
-		else if (bq->chip_id == BQ25896_ID)
+		else if (bq->chip_version == BQ25896)
 			val->strval = "BQ25896";
 		else
 			val->strval = "UNKNOWN";
@@ -741,6 +752,56 @@ static int bq25890_usb_notifier(struct notifier_block *nb, unsigned long val,
 	return NOTIFY_OK;
 }
 
+static int bq25890_get_chip_version(struct bq25890_device *bq)
+{
+	int id, rev;
+
+	id = bq25890_field_read(bq, F_PN);
+	if (id < 0) {
+		dev_err(bq->dev, "Cannot read chip ID.\n");
+		return id;
+	}
+
+	rev = bq25890_field_read(bq, F_DEV_REV);
+	if (rev < 0) {
+		dev_err(bq->dev, "Cannot read chip revision.\n");
+		return rev;
+	}
+
+	switch (id) {
+	case BQ25890_ID:
+		bq->chip_version = BQ25890;
+		break;
+
+	/* BQ25892 and BQ25896 share same ID 0 */
+	case BQ25896_ID:
+		switch (rev) {
+		case 2:
+			bq->chip_version = BQ25896;
+			break;
+		case 1:
+			bq->chip_version = BQ25892;
+			break;
+		default:
+			dev_err(bq->dev,
+				"Unknown device revision %d, assume BQ25892\n",
+				rev);
+			bq->chip_version = BQ25892;
+		}
+		break;
+
+	case BQ25895_ID:
+		bq->chip_version = BQ25895;
+		break;
+
+	default:
+		dev_err(bq->dev, "Unknown chip ID %d\n", id);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
 static int bq25890_irq_probe(struct bq25890_device *bq)
 {
 	struct gpio_desc *irq;
@@ -859,16 +920,10 @@ static int bq25890_probe(struct i2c_client *client,
 
 	i2c_set_clientdata(client, bq);
 
-	bq->chip_id = bq25890_field_read(bq, F_PN);
-	if (bq->chip_id < 0) {
-		dev_err(dev, "Cannot read chip ID.\n");
-		return bq->chip_id;
-	}
-
-	if ((bq->chip_id != BQ25890_ID) && (bq->chip_id != BQ25895_ID)
-			&& (bq->chip_id != BQ25896_ID)) {
-		dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id);
-		return -ENODEV;
+	ret = bq25890_get_chip_version(bq);
+	if (ret) {
+		dev_err(dev, "Cannot read chip ID or unknown chip.\n");
+		return ret;
 	}
 
 	if (!dev->platform_data) {
@@ -986,12 +1041,18 @@ static const struct dev_pm_ops bq25890_pm = {
 
 static const struct i2c_device_id bq25890_i2c_ids[] = {
 	{ "bq25890", 0 },
+	{ "bq25892", 0 },
+	{ "bq25895", 0 },
+	{ "bq25896", 0 },
 	{},
 };
 MODULE_DEVICE_TABLE(i2c, bq25890_i2c_ids);
 
 static const struct of_device_id bq25890_of_match[] = {
 	{ .compatible = "ti,bq25890", },
+	{ .compatible = "ti,bq25892", },
+	{ .compatible = "ti,bq25895", },
+	{ .compatible = "ti,bq25896", },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, bq25890_of_match);
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 6cc7c39..30c3d37 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -5,7 +5,6 @@
  * Copyright (c) 2014 - 2018 Google, Inc
  */
 
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
@@ -132,11 +131,8 @@ static int cros_usbpd_charger_get_num_ports(struct charger_data *charger)
 	ret = cros_usbpd_charger_ec_command(charger, 0,
 					    EC_CMD_CHARGE_PORT_COUNT,
 					    NULL, 0, &resp, sizeof(resp));
-	if (ret < 0) {
-		dev_err(charger->dev,
-			"Unable to get the number of ports (err:0x%x)\n", ret);
+	if (ret < 0)
 		return ret;
-	}
 
 	return resp.port_count;
 }
@@ -148,11 +144,8 @@ static int cros_usbpd_charger_get_usbpd_num_ports(struct charger_data *charger)
 
 	ret = cros_usbpd_charger_ec_command(charger, 0, EC_CMD_USB_PD_PORTS,
 					    NULL, 0, &resp, sizeof(resp));
-	if (ret < 0) {
-		dev_err(charger->dev,
-			"Unable to get the number or ports (err:0x%x)\n", ret);
+	if (ret < 0)
 		return ret;
-	}
 
 	return resp.num_ports;
 }
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 35816d4..2748715 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -100,10 +100,17 @@ static int ingenic_battery_set_scale(struct ingenic_battery *bat)
 		return -EINVAL;
 	}
 
-	return iio_write_channel_attribute(bat->channel,
-					   scale_raw[best_idx],
-					   scale_raw[best_idx + 1],
-					   IIO_CHAN_INFO_SCALE);
+	/* Only set scale if there is more than one (fractional) entry */
+	if (scale_len > 2) {
+		ret = iio_write_channel_attribute(bat->channel,
+						  scale_raw[best_idx],
+						  scale_raw[best_idx + 1],
+						  IIO_CHAN_INFO_SCALE);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 static enum power_supply_property ingenic_battery_properties[] = {
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 03592ce..192d9db 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -149,7 +149,7 @@ static int micro_batt_get_property(struct power_supply *b,
 		default:
 			val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
 			break;
-		};
+		}
 		break;
 	case POWER_SUPPLY_PROP_STATUS:
 		val->intval = get_status(b);
@@ -168,7 +168,7 @@ static int micro_batt_get_property(struct power_supply *b,
 		break;
 	default:
 		return -EINVAL;
-	};
+	}
 
 	return 0;
 }
@@ -185,7 +185,7 @@ static int micro_ac_get_property(struct power_supply *b,
 		break;
 	default:
 		return -EINVAL;
-	};
+	}
 
 	return 0;
 }
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index da49436..30a9014 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -449,7 +449,7 @@ static int ltc294x_i2c_remove(struct i2c_client *client)
 {
 	struct ltc294x_info *info = i2c_get_clientdata(client);
 
-	cancel_delayed_work(&info->work);
+	cancel_delayed_work_sync(&info->work);
 	power_supply_unregister(info->supply);
 	return 0;
 }
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 6249901..8a1f0ee 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -13,6 +13,7 @@
 #include <linux/err.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/interrupt.h>
 #include <linux/power_supply.h>
 #include <linux/max17040_battery.h>
 #include <linux/slab.h>
@@ -28,6 +29,9 @@
 #define MAX17040_DELAY		1000
 #define MAX17040_BATTERY_FULL	95
 
+#define MAX17040_ATHD_MASK		0xFFC0
+#define MAX17040_ATHD_DEFAULT_POWER_UP	4
+
 struct max17040_chip {
 	struct i2c_client		*client;
 	struct delayed_work		work;
@@ -42,6 +46,8 @@ struct max17040_chip {
 	int soc;
 	/* State Of Charge */
 	int status;
+	/* Low alert threshold from 32% to 1% of the State of Charge */
+	u32 low_soc_alert;
 };
 
 static int max17040_get_property(struct power_supply *psy,
@@ -98,6 +104,21 @@ static void max17040_reset(struct i2c_client *client)
 	max17040_write_reg(client, MAX17040_CMD, 0x0054);
 }
 
+static int max17040_set_low_soc_alert(struct i2c_client *client, u32 level)
+{
+	int ret;
+	u16 data;
+
+	level = 32 - level;
+	data = max17040_read_reg(client, MAX17040_RCOMP);
+	/* clear the alrt bit and set LSb 5 bits */
+	data &= MAX17040_ATHD_MASK;
+	data |= level;
+	ret = max17040_write_reg(client, MAX17040_RCOMP, data);
+
+	return ret;
+}
+
 static void max17040_get_vcell(struct i2c_client *client)
 {
 	struct max17040_chip *chip = i2c_get_clientdata(client);
@@ -160,21 +181,81 @@ static void max17040_get_status(struct i2c_client *client)
 		chip->status = POWER_SUPPLY_STATUS_FULL;
 }
 
+static int max17040_get_of_data(struct max17040_chip *chip)
+{
+	struct device *dev = &chip->client->dev;
+
+	chip->low_soc_alert = MAX17040_ATHD_DEFAULT_POWER_UP;
+	device_property_read_u32(dev,
+				 "maxim,alert-low-soc-level",
+				 &chip->low_soc_alert);
+
+	if (chip->low_soc_alert <= 0 || chip->low_soc_alert >= 33)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void max17040_check_changes(struct i2c_client *client)
+{
+	max17040_get_vcell(client);
+	max17040_get_soc(client);
+	max17040_get_online(client);
+	max17040_get_status(client);
+}
+
 static void max17040_work(struct work_struct *work)
 {
 	struct max17040_chip *chip;
+	int last_soc, last_status;
 
 	chip = container_of(work, struct max17040_chip, work.work);
 
-	max17040_get_vcell(chip->client);
-	max17040_get_soc(chip->client);
-	max17040_get_online(chip->client);
-	max17040_get_status(chip->client);
+	/* store SOC and status to check changes */
+	last_soc = chip->soc;
+	last_status = chip->status;
+	max17040_check_changes(chip->client);
+
+	/* check changes and send uevent */
+	if (last_soc != chip->soc || last_status != chip->status)
+		power_supply_changed(chip->battery);
 
 	queue_delayed_work(system_power_efficient_wq, &chip->work,
 			   MAX17040_DELAY);
 }
 
+static irqreturn_t max17040_thread_handler(int id, void *dev)
+{
+	struct max17040_chip *chip = dev;
+	struct i2c_client *client = chip->client;
+
+	dev_warn(&client->dev, "IRQ: Alert battery low level");
+	/* read registers */
+	max17040_check_changes(chip->client);
+
+	/* send uevent */
+	power_supply_changed(chip->battery);
+
+	/* reset alert bit */
+	max17040_set_low_soc_alert(client, chip->low_soc_alert);
+
+	return IRQ_HANDLED;
+}
+
+static int max17040_enable_alert_irq(struct max17040_chip *chip)
+{
+	struct i2c_client *client = chip->client;
+	unsigned int flags;
+	int ret;
+
+	flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
+	ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+					max17040_thread_handler, flags,
+					chip->battery->desc->name, chip);
+
+	return ret;
+}
+
 static enum power_supply_property max17040_battery_props[] = {
 	POWER_SUPPLY_PROP_STATUS,
 	POWER_SUPPLY_PROP_ONLINE,
@@ -196,6 +277,7 @@ static int max17040_probe(struct i2c_client *client,
 	struct i2c_adapter *adapter = client->adapter;
 	struct power_supply_config psy_cfg = {};
 	struct max17040_chip *chip;
+	int ret;
 
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
 		return -EIO;
@@ -206,6 +288,12 @@ static int max17040_probe(struct i2c_client *client,
 
 	chip->client = client;
 	chip->pdata = client->dev.platform_data;
+	ret = max17040_get_of_data(chip);
+	if (ret) {
+		dev_err(&client->dev,
+			"failed: low SOC alert OF data out of bounds\n");
+		return ret;
+	}
 
 	i2c_set_clientdata(client, chip);
 	psy_cfg.drv_data = chip;
@@ -220,6 +308,24 @@ static int max17040_probe(struct i2c_client *client,
 	max17040_reset(client);
 	max17040_get_version(client);
 
+	/* check interrupt */
+	if (client->irq && of_device_is_compatible(client->dev.of_node,
+						   "maxim,max77836-battery")) {
+		ret = max17040_set_low_soc_alert(client, chip->low_soc_alert);
+		if (ret) {
+			dev_err(&client->dev,
+				"Failed to set low SOC alert: err %d\n", ret);
+			return ret;
+		}
+
+		ret = max17040_enable_alert_irq(chip);
+		if (ret) {
+			client->irq = 0;
+			dev_warn(&client->dev,
+				 "Failed to get IRQ err %d\n", ret);
+		}
+	}
+
 	INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
 	queue_delayed_work(system_power_efficient_wq, &chip->work,
 			   MAX17040_DELAY);
@@ -244,6 +350,10 @@ static int max17040_suspend(struct device *dev)
 	struct max17040_chip *chip = i2c_get_clientdata(client);
 
 	cancel_delayed_work(&chip->work);
+
+	if (client->irq && device_may_wakeup(dev))
+		enable_irq_wake(client->irq);
+
 	return 0;
 }
 
@@ -254,6 +364,10 @@ static int max17040_resume(struct device *dev)
 
 	queue_delayed_work(system_power_efficient_wq, &chip->work,
 			   MAX17040_DELAY);
+
+	if (client->irq && device_may_wakeup(dev))
+		disable_irq_wake(client->irq);
+
 	return 0;
 }
 
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 0dfad2c..69ec429 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -282,6 +282,8 @@ static int max17042_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
 		if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
 			ret = regmap_read(map, MAX17042_V_empty, &data);
+		else if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
+			ret = regmap_read(map, MAX17055_V_empty, &data);
 		else
 			ret = regmap_read(map, MAX17047_V_empty, &data);
 		if (ret < 0)
@@ -627,7 +629,8 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
 			config->filter_cfg);
 	regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg);
 	if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047 ||
-			chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)
+			chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050 ||
+			chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
 		regmap_write(map, MAX17047_FullSOCThr,
 						config->full_soc_thresh);
 }
@@ -758,6 +761,8 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
 
 	if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
 		max17042_override_por(map, MAX17042_V_empty, config->vempty);
+	if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
+		max17042_override_por(map, MAX17055_V_empty, config->vempty);
 	else
 		max17042_override_por(map, MAX17047_V_empty, config->vempty);
 	max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
@@ -765,7 +770,10 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
 	max17042_override_por(map, MAX17042_FCTC, config->fctc);
 	max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
 	max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
-	if (chip->chip_type) {
+	if (chip->chip_type &&
+	    ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
+	    (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+	    (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050))) {
 		max17042_override_por(map, MAX17042_EmptyTempCo,
 						config->empty_tempco);
 		max17042_override_por(map, MAX17042_K_empty0,
@@ -929,7 +937,8 @@ max17042_get_default_pdata(struct max17042_chip *chip)
 	if (!pdata)
 		return pdata;
 
-	if (chip->chip_type != MAXIM_DEVICE_TYPE_MAX17042) {
+	if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+	    (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)) {
 		pdata->init_data = max17047_default_pdata_init_regs;
 		pdata->num_init_data =
 			ARRAY_SIZE(max17047_default_pdata_init_regs);
@@ -1167,6 +1176,7 @@ static const struct of_device_id max17042_dt_match[] = {
 	{ .compatible = "maxim,max17042" },
 	{ .compatible = "maxim,max17047" },
 	{ .compatible = "maxim,max17050" },
+	{ .compatible = "maxim,max17055" },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -1176,6 +1186,7 @@ static const struct i2c_device_id max17042_id[] = {
 	{ "max17042", MAXIM_DEVICE_TYPE_MAX17042 },
 	{ "max17047", MAXIM_DEVICE_TYPE_MAX17047 },
 	{ "max17050", MAXIM_DEVICE_TYPE_MAX17050 },
+	{ "max17055", MAXIM_DEVICE_TYPE_MAX17055 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, max17042_id);
diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c
index 5f9477c..d913428 100644
--- a/drivers/power/supply/max77650-charger.c
+++ b/drivers/power/supply/max77650-charger.c
@@ -354,9 +354,16 @@ static int max77650_charger_remove(struct platform_device *pdev)
 	return max77650_charger_disable(chg);
 }
 
+static const struct of_device_id max77650_charger_of_match[] = {
+	{ .compatible = "maxim,max77650-charger" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, max77650_charger_of_match);
+
 static struct platform_driver max77650_charger_driver = {
 	.driver = {
 		.name = "max77650-charger",
+		.of_match_table = max77650_charger_of_match,
 	},
 	.probe = max77650_charger_probe,
 	.remove = max77650_charger_remove,
diff --git a/drivers/power/supply/pda_power.c b/drivers/power/supply/pda_power.c
index 3ae5707..03a37fd 100644
--- a/drivers/power/supply/pda_power.c
+++ b/drivers/power/supply/pda_power.c
@@ -429,6 +429,10 @@ static int pda_power_probe(struct platform_device *pdev)
 
 static int pda_power_remove(struct platform_device *pdev)
 {
+#if IS_ENABLED(CONFIG_USB_PHY)
+	if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier)
+		usb_unregister_notifier(transceiver, &otg_nb);
+#endif
 	if (pdata->is_usb_online && usb_irq)
 		free_irq(usb_irq->start, pda_psy_usb);
 	if (pdata->is_ac_online && ac_irq)
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 5c36c43..1a9a9fa 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -565,9 +565,11 @@ EXPORT_SYMBOL_GPL(devm_power_supply_get_by_phandle);
 int power_supply_get_battery_info(struct power_supply *psy,
 				  struct power_supply_battery_info *info)
 {
+	struct power_supply_resistance_temp_table *resist_table;
 	struct device_node *battery_np;
 	const char *value;
 	int err, len, index;
+	const __be32 *list;
 
 	info->energy_full_design_uwh         = -EINVAL;
 	info->charge_full_design_uah         = -EINVAL;
@@ -578,6 +580,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
 	info->constant_charge_current_max_ua = -EINVAL;
 	info->constant_charge_voltage_max_uv = -EINVAL;
 	info->factory_internal_resistance_uohm  = -EINVAL;
+	info->resist_table = NULL;
 
 	for (index = 0; index < POWER_SUPPLY_OCV_TEMP_MAX; index++) {
 		info->ocv_table[index]       = NULL;
@@ -644,7 +647,6 @@ int power_supply_get_battery_info(struct power_supply *psy,
 	for (index = 0; index < len; index++) {
 		struct power_supply_battery_ocv_table *table;
 		char *propname;
-		const __be32 *list;
 		int i, tab_len, size;
 
 		propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index);
@@ -677,6 +679,26 @@ int power_supply_get_battery_info(struct power_supply *psy,
 		}
 	}
 
+	list = of_get_property(battery_np, "resistance-temp-table", &len);
+	if (!list || !len)
+		goto out_put_node;
+
+	info->resist_table_size = len / (2 * sizeof(__be32));
+	resist_table = info->resist_table = devm_kcalloc(&psy->dev,
+							 info->resist_table_size,
+							 sizeof(*resist_table),
+							 GFP_KERNEL);
+	if (!info->resist_table) {
+		power_supply_put_battery_info(psy, info);
+		err = -ENOMEM;
+		goto out_put_node;
+	}
+
+	for (index = 0; index < info->resist_table_size; index++) {
+		resist_table[index].temp = be32_to_cpu(*list++);
+		resist_table[index].resistance = be32_to_cpu(*list++);
+	}
+
 out_put_node:
 	of_node_put(battery_np);
 	return err;
@@ -692,10 +714,53 @@ void power_supply_put_battery_info(struct power_supply *psy,
 		if (info->ocv_table[i])
 			devm_kfree(&psy->dev, info->ocv_table[i]);
 	}
+
+	if (info->resist_table)
+		devm_kfree(&psy->dev, info->resist_table);
 }
 EXPORT_SYMBOL_GPL(power_supply_put_battery_info);
 
 /**
+ * power_supply_temp2resist_simple() - find the battery internal resistance
+ * percent
+ * @table: Pointer to battery resistance temperature table
+ * @table_len: The table length
+ * @ocv: Current temperature
+ *
+ * This helper function is used to look up battery internal resistance percent
+ * according to current temperature value from the resistance temperature table,
+ * and the table must be ordered descending. Then the actual battery internal
+ * resistance = the ideal battery internal resistance * percent / 100.
+ *
+ * Return: the battery internal resistance percent
+ */
+int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+				    int table_len, int temp)
+{
+	int i, resist;
+
+	for (i = 0; i < table_len; i++)
+		if (temp > table[i].temp)
+			break;
+
+	if (i > 0 && i < table_len) {
+		int tmp;
+
+		tmp = (table[i - 1].resistance - table[i].resistance) *
+			(temp - table[i].temp);
+		tmp /= table[i - 1].temp - table[i].temp;
+		resist = tmp + table[i].resistance;
+	} else if (i == 0) {
+		resist = table[0].resistance;
+	} else {
+		resist = table[table_len - 1].resistance;
+	}
+
+	return resist;
+}
+EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple);
+
+/**
  * power_supply_ocv2cap_simple() - find the battery capacity
  * @table: Pointer to battery OCV lookup table
  * @table_len: OCV table length
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index f8d74e9..6acd242 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -5,6 +5,7 @@
  * Copyright (c) 2010, NVIDIA Corporation.
  */
 
+#include <linux/bits.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/gpio/consumer.h>
@@ -46,10 +47,10 @@ enum {
 
 /* Battery Mode defines */
 #define BATTERY_MODE_OFFSET		0x03
-#define BATTERY_MODE_MASK		0x8000
-enum sbs_battery_mode {
-	BATTERY_MODE_AMPS = 0,
-	BATTERY_MODE_WATTS = 0x8000
+#define BATTERY_MODE_CAPACITY_MASK	BIT(15)
+enum sbs_capacity_mode {
+	CAPACITY_MODE_AMPS = 0,
+	CAPACITY_MODE_WATTS = BATTERY_MODE_CAPACITY_MASK
 };
 
 /* manufacturer access defines */
@@ -518,8 +519,8 @@ static void  sbs_unit_adjustment(struct i2c_client *client,
 	}
 }
 
-static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
-	enum sbs_battery_mode mode)
+static enum sbs_capacity_mode sbs_set_capacity_mode(struct i2c_client *client,
+	enum sbs_capacity_mode mode)
 {
 	int ret, original_val;
 
@@ -527,13 +528,13 @@ static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
 	if (original_val < 0)
 		return original_val;
 
-	if ((original_val & BATTERY_MODE_MASK) == mode)
+	if ((original_val & BATTERY_MODE_CAPACITY_MASK) == mode)
 		return mode;
 
-	if (mode == BATTERY_MODE_AMPS)
-		ret = original_val & ~BATTERY_MODE_MASK;
+	if (mode == CAPACITY_MODE_AMPS)
+		ret = original_val & ~BATTERY_MODE_CAPACITY_MASK;
 	else
-		ret = original_val | BATTERY_MODE_MASK;
+		ret = original_val | BATTERY_MODE_CAPACITY_MASK;
 
 	ret = sbs_write_word_data(client, BATTERY_MODE_OFFSET, ret);
 	if (ret < 0)
@@ -541,7 +542,7 @@ static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
 
 	usleep_range(1000, 2000);
 
-	return original_val & BATTERY_MODE_MASK;
+	return original_val & BATTERY_MODE_CAPACITY_MASK;
 }
 
 static int sbs_get_battery_capacity(struct i2c_client *client,
@@ -549,13 +550,13 @@ static int sbs_get_battery_capacity(struct i2c_client *client,
 	union power_supply_propval *val)
 {
 	s32 ret;
-	enum sbs_battery_mode mode = BATTERY_MODE_WATTS;
+	enum sbs_capacity_mode mode = CAPACITY_MODE_WATTS;
 
 	if (power_supply_is_amp_property(psp))
-		mode = BATTERY_MODE_AMPS;
+		mode = CAPACITY_MODE_AMPS;
 
-	mode = sbs_set_battery_mode(client, mode);
-	if (mode < 0)
+	mode = sbs_set_capacity_mode(client, mode);
+	if ((int)mode < 0)
 		return mode;
 
 	ret = sbs_read_word_data(client, sbs_data[reg_offset].addr);
@@ -564,7 +565,7 @@ static int sbs_get_battery_capacity(struct i2c_client *client,
 
 	val->intval = ret;
 
-	ret = sbs_set_battery_mode(client, mode);
+	ret = sbs_set_capacity_mode(client, mode);
 	if (ret < 0)
 		return ret;
 
@@ -1001,6 +1002,6 @@ module_i2c_driver(sbs_battery_driver);
 MODULE_DESCRIPTION("SBS battery monitor driver");
 MODULE_LICENSE("GPL");
 
-module_param(force_load, bool, S_IRUSR | S_IRGRP | S_IROTH);
+module_param(force_load, bool, 0444);
 MODULE_PARM_DESC(force_load,
 		 "Attempt to load the driver even if no battery is connected");
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index bc8f5bd..469c83f 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -62,6 +62,8 @@
 
 #define SC27XX_FGU_CUR_BASIC_ADC	8192
 #define SC27XX_FGU_SAMPLE_HZ		2
+/* micro Ohms */
+#define SC27XX_FGU_IDEAL_RESISTANCE	20000
 
 /*
  * struct sc27xx_fgu_data: describe the FGU device
@@ -81,9 +83,12 @@
  * @max_volt: the maximum constant input voltage in millivolt
  * @min_volt: the minimum drained battery voltage in microvolt
  * @table_len: the capacity table length
+ * @resist_table_len: the resistance table length
  * @cur_1000ma_adc: ADC value corresponding to 1000 mA
  * @vol_1000mv_adc: ADC value corresponding to 1000 mV
+ * @calib_resist: the real resistance of coulomb counter chip in uOhm
  * @cap_table: capacity table with corresponding ocv
+ * @resist_table: resistance percent table with corresponding temperature
  */
 struct sc27xx_fgu_data {
 	struct regmap *regmap;
@@ -103,15 +108,19 @@ struct sc27xx_fgu_data {
 	int max_volt;
 	int min_volt;
 	int table_len;
+	int resist_table_len;
 	int cur_1000ma_adc;
 	int vol_1000mv_adc;
+	int calib_resist;
 	struct power_supply_battery_ocv_table *cap_table;
+	struct power_supply_resistance_temp_table *resist_table;
 };
 
 static int sc27xx_fgu_cap_to_clbcnt(struct sc27xx_fgu_data *data, int capacity);
 static void sc27xx_fgu_capacity_calibration(struct sc27xx_fgu_data *data,
 					    int cap, bool int_mode);
 static void sc27xx_fgu_adjust_cap(struct sc27xx_fgu_data *data, int cap);
+static int sc27xx_fgu_get_temp(struct sc27xx_fgu_data *data, int *temp);
 
 static const char * const sc27xx_charger_supply_name[] = {
 	"sc2731_charger",
@@ -434,7 +443,7 @@ static int sc27xx_fgu_get_current(struct sc27xx_fgu_data *data, int *val)
 
 static int sc27xx_fgu_get_vbat_ocv(struct sc27xx_fgu_data *data, int *val)
 {
-	int vol, cur, ret;
+	int vol, cur, ret, temp, resistance;
 
 	ret = sc27xx_fgu_get_vbat_vol(data, &vol);
 	if (ret)
@@ -444,8 +453,19 @@ static int sc27xx_fgu_get_vbat_ocv(struct sc27xx_fgu_data *data, int *val)
 	if (ret)
 		return ret;
 
+	resistance = data->internal_resist;
+	if (data->resist_table_len > 0) {
+		ret = sc27xx_fgu_get_temp(data, &temp);
+		if (ret)
+			return ret;
+
+		resistance = power_supply_temp2resist_simple(data->resist_table,
+						data->resist_table_len, temp);
+		resistance = data->internal_resist * resistance / 100;
+	}
+
 	/* Return the battery OCV in micro volts. */
-	*val = vol * 1000 - cur * data->internal_resist;
+	*val = vol * 1000 - cur * resistance;
 
 	return 0;
 }
@@ -884,7 +904,9 @@ static int sc27xx_fgu_calibration(struct sc27xx_fgu_data *data)
 	 */
 	cal_4200mv = (calib_data & 0x1ff) + 6963 - 4096 - 256;
 	data->vol_1000mv_adc = DIV_ROUND_CLOSEST(cal_4200mv * 10, 42);
-	data->cur_1000ma_adc = data->vol_1000mv_adc * 4;
+	data->cur_1000ma_adc =
+		DIV_ROUND_CLOSEST(data->vol_1000mv_adc * 4 * data->calib_resist,
+				  SC27XX_FGU_IDEAL_RESISTANCE);
 
 	kfree(buf);
 	return 0;
@@ -929,6 +951,18 @@ static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data)
 	if (!data->alarm_cap)
 		data->alarm_cap += 1;
 
+	data->resist_table_len = info.resist_table_size;
+	if (data->resist_table_len > 0) {
+		data->resist_table = devm_kmemdup(data->dev, info.resist_table,
+						  data->resist_table_len *
+						  sizeof(struct power_supply_resistance_temp_table),
+						  GFP_KERNEL);
+		if (!data->resist_table) {
+			power_supply_put_battery_info(data->battery, &info);
+			return -ENOMEM;
+		}
+	}
+
 	power_supply_put_battery_info(data->battery, &info);
 
 	ret = sc27xx_fgu_calibration(data);
@@ -1051,6 +1085,15 @@ static int sc27xx_fgu_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	ret = device_property_read_u32(&pdev->dev,
+				       "sprd,calib-resistance-micro-ohms",
+				       &data->calib_resist);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"failed to get fgu calibration resistance\n");
+		return ret;
+	}
+
 	data->channel = devm_iio_channel_get(dev, "bat-temp");
 	if (IS_ERR(data->channel)) {
 		dev_err(dev, "failed to get IIO channel\n");
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index 1b80ae4..cdb9a23 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -100,7 +100,9 @@ struct ucs1002_info {
 	struct i2c_client *client;
 	struct regmap *regmap;
 	struct regulator_desc *regulator_descriptor;
+	struct regulator_dev *rdev;
 	bool present;
+	bool output_disable;
 };
 
 static enum power_supply_property ucs1002_props[] = {
@@ -233,6 +235,11 @@ static int ucs1002_get_max_current(struct ucs1002_info *info,
 	unsigned int reg;
 	int ret;
 
+	if (info->output_disable) {
+		val->intval = 0;
+		return 0;
+	}
+
 	ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
 	if (ret)
 		return ret;
@@ -247,6 +254,12 @@ static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
 	unsigned int reg;
 	int ret, idx;
 
+	if (val == 0) {
+		info->output_disable = true;
+		regulator_disable_regmap(info->rdev);
+		return 0;
+	}
+
 	for (idx = 0; idx < ARRAY_SIZE(ucs1002_current_limit_uA); idx++) {
 		if (val == ucs1002_current_limit_uA[idx])
 			break;
@@ -270,6 +283,12 @@ static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
 	if (reg != idx)
 		return -EINVAL;
 
+	info->output_disable = false;
+
+	if (info->rdev && info->rdev->use_count &&
+	    !regulator_is_enabled_regmap(info->rdev))
+		regulator_enable_regmap(info->rdev);
+
 	return 0;
 }
 
@@ -470,9 +489,24 @@ static irqreturn_t ucs1002_alert_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static int ucs1002_regulator_enable(struct regulator_dev *rdev)
+{
+	struct ucs1002_info *info = rdev_get_drvdata(rdev);
+
+	/*
+	 * If the output is disabled due to 0 maximum current, just pretend the
+	 * enable did work. The regulator will be enabled as soon as we get a
+	 * a non-zero maximum current budget.
+	 */
+	if (info->output_disable)
+		return 0;
+
+	return regulator_enable_regmap(rdev);
+}
+
 static const struct regulator_ops ucs1002_regulator_ops = {
 	.is_enabled	= regulator_is_enabled_regmap,
-	.enable		= regulator_enable_regmap,
+	.enable		= ucs1002_regulator_enable,
 	.disable	= regulator_disable_regmap,
 };
 
@@ -499,7 +533,6 @@ static int ucs1002_probe(struct i2c_client *client,
 	};
 	struct regulator_config regulator_config = {};
 	int irq_a_det, irq_alert, ret;
-	struct regulator_dev *rdev;
 	struct ucs1002_info *info;
 	unsigned int regval;
 
@@ -589,10 +622,11 @@ static int ucs1002_probe(struct i2c_client *client,
 	regulator_config.dev = dev;
 	regulator_config.of_node = dev->of_node;
 	regulator_config.regmap = info->regmap;
+	regulator_config.driver_data = info;
 
-	rdev = devm_regulator_register(dev, info->regulator_descriptor,
+	info->rdev = devm_regulator_register(dev, info->regulator_descriptor,
 				       &regulator_config);
-	ret = PTR_ERR_OR_ZERO(rdev);
+	ret = PTR_ERR_OR_ZERO(info->rdev);
 	if (ret) {
 		dev_err(dev, "Failed to register VBUS regulator: %d\n", ret);
 		return ret;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index bd21655..30190be 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -100,7 +100,7 @@
 
 config PWM_BCM2835
 	tristate "BCM2835 PWM support"
-	depends on ARCH_BCM2835
+	depends on ARCH_BCM2835 || ARCH_BRCMSTB
 	help
 	  PWM framework driver for BCM2835 controller (Raspberry Pi)
 
@@ -328,7 +328,8 @@
 
 config PWM_OMAP_DMTIMER
 	tristate "OMAP Dual-Mode Timer PWM support"
-	depends on OF && ARCH_OMAP && OMAP_DM_TIMER
+	depends on OF
+	depends on OMAP_DM_TIMER || COMPILE_TEST
 	help
 	  Generic PWM framework driver for OMAP Dual-Mode Timer PWM output
 
@@ -490,7 +491,7 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-tegra.
 
-config  PWM_TIECAP
+config PWM_TIECAP
 	tristate "ECAP PWM support"
 	depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3
 	help
@@ -499,7 +500,7 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-tiecap.
 
-config  PWM_TIEHRPWM
+config PWM_TIEHRPWM
 	tristate "EHRPWM PWM support"
 	depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3
 	help
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index f877e77..5a7f659 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -20,6 +20,9 @@
 
 #include <dt-bindings/pwm/pwm.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/pwm.h>
+
 #define MAX_PWMS 1024
 
 static DEFINE_MUTEX(pwm_lookup_lock);
@@ -114,6 +117,11 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
 		}
 	}
 
+	if (pwm->chip->ops->get_state) {
+		pwm->chip->ops->get_state(pwm->chip, pwm, &pwm->state);
+		trace_pwm_get(pwm, &pwm->state);
+	}
+
 	set_bit(PWMF_REQUESTED, &pwm->flags);
 	pwm->label = label;
 
@@ -283,9 +291,6 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
 		pwm->hwpwm = i;
 		pwm->state.polarity = polarity;
 
-		if (chip->ops->get_state)
-			chip->ops->get_state(chip, pwm, &pwm->state);
-
 		radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
 	}
 
@@ -472,6 +477,8 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
 		if (err)
 			return err;
 
+		trace_pwm_apply(pwm, state);
+
 		pwm->state = *state;
 	} else {
 		/*
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 9ba7334..6161e7e 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -4,6 +4,19 @@
  *
  * Copyright (C) 2013 Atmel Corporation
  *		 Bo Shen <voice.shen@atmel.com>
+ *
+ * Links to reference manuals for the supported PWM chips can be found in
+ * Documentation/arm/microchip.rst.
+ *
+ * Limitations:
+ * - Periods start with the inactive level.
+ * - Hardware has to be stopped in general to update settings.
+ *
+ * Software bugs/possible improvements:
+ * - When atmel_pwm_apply() is called with state->enabled=false a change in
+ *   state->polarity isn't honored.
+ * - Instead of sleeping to wait for a completed period, the interrupt
+ *   functionality could be used.
  */
 
 #include <linux/clk.h>
@@ -47,6 +60,8 @@
 #define PWMV2_CPRD		0x0C
 #define PWMV2_CPRDUPD		0x10
 
+#define PWM_MAX_PRES		10
+
 struct atmel_pwm_registers {
 	u8 period;
 	u8 period_upd;
@@ -55,8 +70,7 @@ struct atmel_pwm_registers {
 };
 
 struct atmel_pwm_config {
-	u32 max_period;
-	u32 max_pres;
+	u32 period_bits;
 };
 
 struct atmel_pwm_data {
@@ -97,7 +111,7 @@ static inline u32 atmel_pwm_ch_readl(struct atmel_pwm_chip *chip,
 {
 	unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
 
-	return readl_relaxed(chip->base + base + offset);
+	return atmel_pwm_readl(chip, base + offset);
 }
 
 static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
@@ -106,7 +120,7 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
 {
 	unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
 
-	writel_relaxed(val, chip->base + base + offset);
+	atmel_pwm_writel(chip, base + offset, val);
 }
 
 static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
@@ -115,17 +129,27 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
 {
 	struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
 	unsigned long long cycles = state->period;
+	int shift;
 
 	/* Calculate the period cycles and prescale value */
 	cycles *= clk_get_rate(atmel_pwm->clk);
 	do_div(cycles, NSEC_PER_SEC);
 
-	for (*pres = 0; cycles > atmel_pwm->data->cfg.max_period; cycles >>= 1)
-		(*pres)++;
+	/*
+	 * The register for the period length is cfg.period_bits bits wide.
+	 * So for each bit the number of clock cycles is wider divide the input
+	 * clock frequency by two using pres and shift cprd accordingly.
+	 */
+	shift = fls(cycles) - atmel_pwm->data->cfg.period_bits;
 
-	if (*pres > atmel_pwm->data->cfg.max_pres) {
+	if (shift > PWM_MAX_PRES) {
 		dev_err(chip->dev, "pres exceeds the maximum value\n");
 		return -EINVAL;
+	} else if (shift > 0) {
+		*pres = shift;
+		cycles >>= *pres;
+	} else {
+		*pres = 0;
 	}
 
 	*cprd = cycles;
@@ -271,8 +295,48 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 	return 0;
 }
 
+static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+				struct pwm_state *state)
+{
+	struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+	u32 sr, cmr;
+
+	sr = atmel_pwm_readl(atmel_pwm, PWM_SR);
+	cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+
+	if (sr & (1 << pwm->hwpwm)) {
+		unsigned long rate = clk_get_rate(atmel_pwm->clk);
+		u32 cdty, cprd, pres;
+		u64 tmp;
+
+		pres = cmr & PWM_CMR_CPRE_MSK;
+
+		cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+					  atmel_pwm->data->regs.period);
+		tmp = (u64)cprd * NSEC_PER_SEC;
+		tmp <<= pres;
+		state->period = DIV64_U64_ROUND_UP(tmp, rate);
+
+		cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+					  atmel_pwm->data->regs.duty);
+		tmp = (u64)cdty * NSEC_PER_SEC;
+		tmp <<= pres;
+		state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
+
+		state->enabled = true;
+	} else {
+		state->enabled = false;
+	}
+
+	if (cmr & PWM_CMR_CPOL)
+		state->polarity = PWM_POLARITY_INVERSED;
+	else
+		state->polarity = PWM_POLARITY_NORMAL;
+}
+
 static const struct pwm_ops atmel_pwm_ops = {
 	.apply = atmel_pwm_apply,
+	.get_state = atmel_pwm_get_state,
 	.owner = THIS_MODULE,
 };
 
@@ -285,8 +349,7 @@ static const struct atmel_pwm_data atmel_sam9rl_pwm_data = {
 	},
 	.cfg = {
 		/* 16 bits to keep period and duty. */
-		.max_period	= 0xffff,
-		.max_pres	= 10,
+		.period_bits	= 16,
 	},
 };
 
@@ -299,8 +362,7 @@ static const struct atmel_pwm_data atmel_sama5_pwm_data = {
 	},
 	.cfg = {
 		/* 16 bits to keep period and duty. */
-		.max_period	= 0xffff,
-		.max_pres	= 10,
+		.period_bits	= 16,
 	},
 };
 
@@ -313,8 +375,7 @@ static const struct atmel_pwm_data mchp_sam9x60_pwm_data = {
 	},
 	.cfg = {
 		/* 32 bits to keep period and duty. */
-		.max_period	= 0xffffffff,
-		.max_pres	= 10,
+		.period_bits	= 32,
 	},
 };
 
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 8949744..09c08de 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -25,11 +25,39 @@ struct cros_ec_pwm_device {
 	struct pwm_chip chip;
 };
 
+/**
+ * struct cros_ec_pwm - per-PWM driver data
+ * @duty_cycle: cached duty cycle
+ */
+struct cros_ec_pwm {
+	u16 duty_cycle;
+};
+
 static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *c)
 {
 	return container_of(c, struct cros_ec_pwm_device, chip);
 }
 
+static int cros_ec_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct cros_ec_pwm *channel;
+
+	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	if (!channel)
+		return -ENOMEM;
+
+	pwm_set_chip_data(pwm, channel);
+
+	return 0;
+}
+
+static void cros_ec_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+
+	kfree(channel);
+}
+
 static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
 {
 	struct {
@@ -96,7 +124,9 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 			     const struct pwm_state *state)
 {
 	struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
-	int duty_cycle;
+	struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+	u16 duty_cycle;
+	int ret;
 
 	/* The EC won't let us change the period */
 	if (state->period != EC_PWM_MAX_DUTY)
@@ -108,13 +138,20 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 	 */
 	duty_cycle = state->enabled ? state->duty_cycle : 0;
 
-	return cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+	ret = cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+	if (ret < 0)
+		return ret;
+
+	channel->duty_cycle = state->duty_cycle;
+
+	return 0;
 }
 
 static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 				  struct pwm_state *state)
 {
 	struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
+	struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
 	int ret;
 
 	ret = cros_ec_pwm_get_duty(ec_pwm->ec, pwm->hwpwm);
@@ -126,8 +163,19 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 	state->enabled = (ret > 0);
 	state->period = EC_PWM_MAX_DUTY;
 
-	/* Note that "disabled" and "duty cycle == 0" are treated the same */
-	state->duty_cycle = ret;
+	/*
+	 * Note that "disabled" and "duty cycle == 0" are treated the same. If
+	 * the cached duty cycle is not zero, used the cached duty cycle. This
+	 * ensures that the configured duty cycle is kept across a disable and
+	 * enable operation and avoids potentially confusing consumers.
+	 *
+	 * For the case of the initial hardware readout, channel->duty_cycle
+	 * will be 0 and the actual duty cycle read from the EC is used.
+	 */
+	if (ret == 0 && channel->duty_cycle > 0)
+		state->duty_cycle = channel->duty_cycle;
+	else
+		state->duty_cycle = ret;
 }
 
 static struct pwm_device *
@@ -149,6 +197,8 @@ cros_ec_pwm_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
 }
 
 static const struct pwm_ops cros_ec_pwm_ops = {
+	.request = cros_ec_pwm_request,
+	.free = cros_ec_pwm_free,
 	.get_state	= cros_ec_pwm_get_state,
 	.apply		= cros_ec_pwm_apply,
 	.owner		= THIS_MODULE,
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index ae11d85..35a7ac42 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -85,6 +85,13 @@ struct pwm_imx27_chip {
 	struct clk	*clk_per;
 	void __iomem	*mmio_base;
 	struct pwm_chip	chip;
+
+	/*
+	 * The driver cannot read the current duty cycle from the hardware if
+	 * the hardware is disabled. Cache the last programmed duty cycle
+	 * value to return in that case.
+	 */
+	unsigned int duty_cycle;
 };
 
 #define to_pwm_imx27_chip(chip)	container_of(chip, struct pwm_imx27_chip, chip)
@@ -155,14 +162,17 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
 	tmp = NSEC_PER_SEC * (u64)(period + 2);
 	state->period = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
 
-	/* PWMSAR can be read only if PWM is enabled */
-	if (state->enabled) {
+	/*
+	 * PWMSAR can be read only if PWM is enabled. If the PWM is disabled,
+	 * use the cached value.
+	 */
+	if (state->enabled)
 		val = readl(imx->mmio_base + MX3_PWMSAR);
-		tmp = NSEC_PER_SEC * (u64)(val);
-		state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
-	} else {
-		state->duty_cycle = 0;
-	}
+	else
+		val = imx->duty_cycle;
+
+	tmp = NSEC_PER_SEC * (u64)(val);
+	state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
 
 	if (!state->enabled)
 		pwm_imx27_clk_disable_unprepare(chip);
@@ -220,63 +230,68 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 	pwm_get_state(pwm, &cstate);
 
-	if (state->enabled) {
-		c = clk_get_rate(imx->clk_per);
-		c *= state->period;
+	c = clk_get_rate(imx->clk_per);
+	c *= state->period;
 
-		do_div(c, 1000000000);
-		period_cycles = c;
+	do_div(c, 1000000000);
+	period_cycles = c;
 
-		prescale = period_cycles / 0x10000 + 1;
+	prescale = period_cycles / 0x10000 + 1;
 
-		period_cycles /= prescale;
-		c = (unsigned long long)period_cycles * state->duty_cycle;
-		do_div(c, state->period);
-		duty_cycles = c;
+	period_cycles /= prescale;
+	c = (unsigned long long)period_cycles * state->duty_cycle;
+	do_div(c, state->period);
+	duty_cycles = c;
 
-		/*
-		 * according to imx pwm RM, the real period value should be
-		 * PERIOD value in PWMPR plus 2.
-		 */
-		if (period_cycles > 2)
-			period_cycles -= 2;
-		else
-			period_cycles = 0;
+	/*
+	 * according to imx pwm RM, the real period value should be PERIOD
+	 * value in PWMPR plus 2.
+	 */
+	if (period_cycles > 2)
+		period_cycles -= 2;
+	else
+		period_cycles = 0;
 
-		/*
-		 * Wait for a free FIFO slot if the PWM is already enabled, and
-		 * flush the FIFO if the PWM was disabled and is about to be
-		 * enabled.
-		 */
-		if (cstate.enabled) {
-			pwm_imx27_wait_fifo_slot(chip, pwm);
-		} else {
-			ret = pwm_imx27_clk_prepare_enable(chip);
-			if (ret)
-				return ret;
+	/*
+	 * Wait for a free FIFO slot if the PWM is already enabled, and flush
+	 * the FIFO if the PWM was disabled and is about to be enabled.
+	 */
+	if (cstate.enabled) {
+		pwm_imx27_wait_fifo_slot(chip, pwm);
+	} else {
+		ret = pwm_imx27_clk_prepare_enable(chip);
+		if (ret)
+			return ret;
 
-			pwm_imx27_sw_reset(chip);
-		}
-
-		writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
-		writel(period_cycles, imx->mmio_base + MX3_PWMPR);
-
-		cr = MX3_PWMCR_PRESCALER_SET(prescale) |
-		     MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEN | MX3_PWMCR_WAITEN |
-		     FIELD_PREP(MX3_PWMCR_CLKSRC, MX3_PWMCR_CLKSRC_IPG_HIGH) |
-		     MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
-
-		if (state->polarity == PWM_POLARITY_INVERSED)
-			cr |= FIELD_PREP(MX3_PWMCR_POUTC,
-					MX3_PWMCR_POUTC_INVERTED);
-
-		writel(cr, imx->mmio_base + MX3_PWMCR);
-	} else if (cstate.enabled) {
-		writel(0, imx->mmio_base + MX3_PWMCR);
-
-		pwm_imx27_clk_disable_unprepare(chip);
+		pwm_imx27_sw_reset(chip);
 	}
 
+	writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
+	writel(period_cycles, imx->mmio_base + MX3_PWMPR);
+
+	/*
+	 * Store the duty cycle for future reference in cases where the
+	 * MX3_PWMSAR register can't be read (i.e. when the PWM is disabled).
+	 */
+	imx->duty_cycle = duty_cycles;
+
+	cr = MX3_PWMCR_PRESCALER_SET(prescale) |
+	     MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEN | MX3_PWMCR_WAITEN |
+	     FIELD_PREP(MX3_PWMCR_CLKSRC, MX3_PWMCR_CLKSRC_IPG_HIGH) |
+	     MX3_PWMCR_DBGEN;
+
+	if (state->polarity == PWM_POLARITY_INVERSED)
+		cr |= FIELD_PREP(MX3_PWMCR_POUTC,
+				MX3_PWMCR_POUTC_INVERTED);
+
+	if (state->enabled)
+		cr |= MX3_PWMCR_EN;
+
+	writel(cr, imx->mmio_base + MX3_PWMCR);
+
+	if (!state->enabled && cstate.enabled)
+		pwm_imx27_clk_disable_unprepare(chip);
+
 	return 0;
 }
 
@@ -304,9 +319,13 @@ static int pwm_imx27_probe(struct platform_device *pdev)
 
 	imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
 	if (IS_ERR(imx->clk_ipg)) {
-		dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
-				PTR_ERR(imx->clk_ipg));
-		return PTR_ERR(imx->clk_ipg);
+		int ret = PTR_ERR(imx->clk_ipg);
+
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"getting ipg clock failed with %d\n",
+				ret);
+		return ret;
 	}
 
 	imx->clk_per = devm_clk_get(&pdev->dev, "per");
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index b14376b..f2e57fc 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -25,12 +25,16 @@
 #define  PERIOD_PERIOD(p)	((p) & 0xffff)
 #define  PERIOD_PERIOD_MAX	0x10000
 #define  PERIOD_ACTIVE_HIGH	(3 << 16)
+#define  PERIOD_ACTIVE_LOW	(2 << 16)
+#define  PERIOD_INACTIVE_HIGH	(3 << 18)
 #define  PERIOD_INACTIVE_LOW	(2 << 18)
+#define  PERIOD_POLARITY_NORMAL	(PERIOD_ACTIVE_HIGH | PERIOD_INACTIVE_LOW)
+#define  PERIOD_POLARITY_INVERSE	(PERIOD_ACTIVE_LOW | PERIOD_INACTIVE_HIGH)
 #define  PERIOD_CDIV(div)	(((div) & 0x7) << 20)
 #define  PERIOD_CDIV_MAX	8
 
-static const unsigned int cdiv[PERIOD_CDIV_MAX] = {
-	1, 2, 4, 8, 16, 64, 256, 1024
+static const u8 cdiv_shift[PERIOD_CDIV_MAX] = {
+	0, 1, 2, 3, 4, 6, 8, 10
 };
 
 struct mxs_pwm_chip {
@@ -41,19 +45,34 @@ struct mxs_pwm_chip {
 
 #define to_mxs_pwm_chip(_chip) container_of(_chip, struct mxs_pwm_chip, chip)
 
-static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
-			  int duty_ns, int period_ns)
+static int mxs_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+			 const struct pwm_state *state)
 {
 	struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
 	int ret, div = 0;
 	unsigned int period_cycles, duty_cycles;
 	unsigned long rate;
 	unsigned long long c;
+	unsigned int pol_bits;
+
+	/*
+	 * If the PWM channel is disabled, make sure to turn on the
+	 * clock before calling clk_get_rate() and writing to the
+	 * registers. Otherwise, just keep it enabled.
+	 */
+	if (!pwm_is_enabled(pwm)) {
+		ret = clk_prepare_enable(mxs->clk);
+		if (ret)
+			return ret;
+	}
+
+	if (!state->enabled && pwm_is_enabled(pwm))
+		writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR);
 
 	rate = clk_get_rate(mxs->clk);
 	while (1) {
-		c = rate / cdiv[div];
-		c = c * period_ns;
+		c = rate >> cdiv_shift[div];
+		c = c * state->period;
 		do_div(c, 1000000000);
 		if (c < PERIOD_PERIOD_MAX)
 			break;
@@ -63,62 +82,40 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 	}
 
 	period_cycles = c;
-	c *= duty_ns;
-	do_div(c, period_ns);
+	c *= state->duty_cycle;
+	do_div(c, state->period);
 	duty_cycles = c;
 
 	/*
-	 * If the PWM channel is disabled, make sure to turn on the clock
-	 * before writing the register. Otherwise, keep it enabled.
+	 * The data sheet the says registers must be written to in
+	 * this order (ACTIVEn, then PERIODn). Also, the new settings
+	 * only take effect at the beginning of a new period, avoiding
+	 * glitches.
 	 */
-	if (!pwm_is_enabled(pwm)) {
-		ret = clk_prepare_enable(mxs->clk);
-		if (ret)
-			return ret;
-	}
 
+	pol_bits = state->polarity == PWM_POLARITY_NORMAL ?
+		PERIOD_POLARITY_NORMAL : PERIOD_POLARITY_INVERSE;
 	writel(duty_cycles << 16,
-			mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20);
-	writel(PERIOD_PERIOD(period_cycles) | PERIOD_ACTIVE_HIGH |
-	       PERIOD_INACTIVE_LOW | PERIOD_CDIV(div),
-			mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20);
+	       mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20);
+	writel(PERIOD_PERIOD(period_cycles) | pol_bits | PERIOD_CDIV(div),
+	       mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20);
 
-	/*
-	 * If the PWM is not enabled, turn the clock off again to save power.
-	 */
-	if (!pwm_is_enabled(pwm))
+	if (state->enabled) {
+		if (!pwm_is_enabled(pwm)) {
+			/*
+			 * The clock was enabled above. Just enable
+			 * the channel in the control register.
+			 */
+			writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET);
+		}
+	} else {
 		clk_disable_unprepare(mxs->clk);
-
+	}
 	return 0;
 }
 
-static int mxs_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-	struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
-	int ret;
-
-	ret = clk_prepare_enable(mxs->clk);
-	if (ret)
-		return ret;
-
-	writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET);
-
-	return 0;
-}
-
-static void mxs_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-	struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
-
-	writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR);
-
-	clk_disable_unprepare(mxs->clk);
-}
-
 static const struct pwm_ops mxs_pwm_ops = {
-	.config = mxs_pwm_config,
-	.enable = mxs_pwm_enable,
-	.disable = mxs_pwm_disable,
+	.apply = mxs_pwm_apply,
 	.owner = THIS_MODULE,
 };
 
@@ -142,6 +139,8 @@ static int mxs_pwm_probe(struct platform_device *pdev)
 
 	mxs->chip.dev = &pdev->dev;
 	mxs->chip.ops = &mxs_pwm_ops;
+	mxs->chip.of_xlate = of_pwm_xlate_with_flags;
+	mxs->chip.of_pwm_n_cells = 3;
 	mxs->chip.base = -1;
 
 	ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 00772fc..88a3c56 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -256,7 +256,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
 	if (!timer_pdev) {
 		dev_err(&pdev->dev, "Unable to find Timer pdev\n");
 		ret = -ENODEV;
-		goto put;
+		goto err_find_timer_pdev;
 	}
 
 	timer_pdata = dev_get_platdata(&timer_pdev->dev);
@@ -264,7 +264,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
 		dev_dbg(&pdev->dev,
 			 "dmtimer pdata structure NULL, deferring probe\n");
 		ret = -EPROBE_DEFER;
-		goto put;
+		goto err_platdata;
 	}
 
 	pdata = timer_pdata->timer_ops;
@@ -283,30 +283,25 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
 	    !pdata->write_counter) {
 		dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
 		ret = -EINVAL;
-		goto put;
+		goto err_platdata;
 	}
 
 	if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
 		dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
 		ret = -ENODEV;
-		goto put;
+		goto err_timer_property;
 	}
 
 	dm_timer = pdata->request_by_node(timer);
 	if (!dm_timer) {
 		ret = -EPROBE_DEFER;
-		goto put;
+		goto err_request_timer;
 	}
 
-put:
-	of_node_put(timer);
-	if (ret < 0)
-		return ret;
-
 	omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
 	if (!omap) {
-		pdata->free(dm_timer);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto err_alloc_omap;
 	}
 
 	omap->pdata = pdata;
@@ -339,27 +334,56 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
 	ret = pwmchip_add(&omap->chip);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to register PWM\n");
-		omap->pdata->free(omap->dm_timer);
-		return ret;
+		goto err_pwmchip_add;
 	}
 
+	of_node_put(timer);
+
 	platform_set_drvdata(pdev, omap);
 
 	return 0;
+
+err_pwmchip_add:
+
+	/*
+	 * *omap is allocated using devm_kzalloc,
+	 * so no free necessary here
+	 */
+err_alloc_omap:
+
+	pdata->free(dm_timer);
+err_request_timer:
+
+err_timer_property:
+err_platdata:
+
+	put_device(&timer_pdev->dev);
+err_find_timer_pdev:
+
+	of_node_put(timer);
+
+	return ret;
 }
 
 static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
 {
 	struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
+	int ret;
+
+	ret = pwmchip_remove(&omap->chip);
+	if (ret)
+		return ret;
 
 	if (pm_runtime_active(&omap->dm_timer_pdev->dev))
 		omap->pdata->stop(omap->dm_timer);
 
 	omap->pdata->free(omap->dm_timer);
 
+	put_device(&omap->dm_timer_pdev->dev);
+
 	mutex_destroy(&omap->mutex);
 
-	return pwmchip_remove(&omap->chip);
+	return 0;
 }
 
 static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 168684b..b07bdca 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -159,13 +159,9 @@ static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
 static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
 {
 	struct pca9685 *pca = gpiochip_get_data(gpio);
-	struct pwm_device *pwm;
 
 	pca9685_pwm_gpio_set(gpio, offset, 0);
 	pm_runtime_put(pca->chip.dev);
-	mutex_lock(&pca->lock);
-	pwm = &pca->chip.pwms[offset];
-	mutex_unlock(&pca->lock);
 }
 
 static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 852eb23..2685577 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -3,6 +3,9 @@
  * R-Car PWM Timer driver
  *
  * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Limitations:
+ * - The hardware cannot generate a 0% duty cycle.
  */
 
 #include <linux/clk.h>
@@ -161,11 +164,9 @@ static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 			  const struct pwm_state *state)
 {
 	struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
-	struct pwm_state cur_state;
 	int div, ret;
 
 	/* This HW/driver only supports normal polarity */
-	pwm_get_state(pwm, &cur_state);
 	if (state->polarity != PWM_POLARITY_NORMAL)
 		return -ENOTSUPP;
 
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 7ff48c1..d3be944 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -377,9 +377,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
 	else
 		regmap_update_bits(priv->regmap, TIM_CCMR2, mask, ccmr);
 
-	regmap_update_bits(priv->regmap, TIM_BDTR,
-			   TIM_BDTR_MOE | TIM_BDTR_AOE,
-			   TIM_BDTR_MOE | TIM_BDTR_AOE);
+	regmap_update_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE, TIM_BDTR_MOE);
 
 	return 0;
 }
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 581d232..3e3efa6 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -3,6 +3,10 @@
  * Driver for Allwinner sun4i Pulse Width Modulation Controller
  *
  * Copyright (C) 2014 Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * Limitations:
+ * - When outputing the source clock directly, the PWM logic will be bypassed
+ *   and the currently running period is not guaranteed to be completed
  */
 
 #include <linux/bitops.h>
@@ -16,6 +20,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
+#include <linux/reset.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/time.h>
@@ -72,12 +77,15 @@ static const u32 prescaler_table[] = {
 
 struct sun4i_pwm_data {
 	bool has_prescaler_bypass;
+	bool has_direct_mod_clk_output;
 	unsigned int npwm;
 };
 
 struct sun4i_pwm_chip {
 	struct pwm_chip chip;
+	struct clk *bus_clk;
 	struct clk *clk;
+	struct reset_control *rst;
 	void __iomem *base;
 	spinlock_t ctrl_lock;
 	const struct sun4i_pwm_data *data;
@@ -115,6 +123,20 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
 
 	val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
 
+	/*
+	 * PWM chapter in H6 manual has a diagram which explains that if bypass
+	 * bit is set, no other setting has any meaning. Even more, experiment
+	 * proved that also enable bit is ignored in this case.
+	 */
+	if ((val & BIT_CH(PWM_BYPASS, pwm->hwpwm)) &&
+	    sun4i_pwm->data->has_direct_mod_clk_output) {
+		state->period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, clk_rate);
+		state->duty_cycle = DIV_ROUND_UP_ULL(state->period, 2);
+		state->polarity = PWM_POLARITY_NORMAL;
+		state->enabled = true;
+		return;
+	}
+
 	if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
 	    sun4i_pwm->data->has_prescaler_bypass)
 		prescaler = 1;
@@ -146,13 +168,24 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
 
 static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
 			       const struct pwm_state *state,
-			       u32 *dty, u32 *prd, unsigned int *prsclr)
+			       u32 *dty, u32 *prd, unsigned int *prsclr,
+			       bool *bypass)
 {
 	u64 clk_rate, div = 0;
-	unsigned int pval, prescaler = 0;
+	unsigned int prescaler = 0;
 
 	clk_rate = clk_get_rate(sun4i_pwm->clk);
 
+	*bypass = sun4i_pwm->data->has_direct_mod_clk_output &&
+		  state->enabled &&
+		  (state->period * clk_rate >= NSEC_PER_SEC) &&
+		  (state->period * clk_rate < 2 * NSEC_PER_SEC) &&
+		  (state->duty_cycle * clk_rate * 2 >= NSEC_PER_SEC);
+
+	/* Skip calculation of other parameters if we bypass them */
+	if (*bypass)
+		return 0;
+
 	if (sun4i_pwm->data->has_prescaler_bypass) {
 		/* First, test without any prescaler when available */
 		prescaler = PWM_PRESCAL_MASK;
@@ -170,9 +203,11 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
 	if (prescaler == 0) {
 		/* Go up from the first divider */
 		for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) {
-			if (!prescaler_table[prescaler])
+			unsigned int pval = prescaler_table[prescaler];
+
+			if (!pval)
 				continue;
-			pval = prescaler_table[prescaler];
+
 			div = clk_rate;
 			do_div(div, pval);
 			div = div * state->period;
@@ -199,10 +234,11 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 {
 	struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
 	struct pwm_state cstate;
-	u32 ctrl;
+	u32 ctrl, duty = 0, period = 0, val;
 	int ret;
-	unsigned int delay_us;
+	unsigned int delay_us, prescaler = 0;
 	unsigned long now;
+	bool bypass;
 
 	pwm_get_state(pwm, &cstate);
 
@@ -214,46 +250,52 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 		}
 	}
 
+	ret = sun4i_pwm_calculate(sun4i_pwm, state, &duty, &period, &prescaler,
+				  &bypass);
+	if (ret) {
+		dev_err(chip->dev, "period exceeds the maximum value\n");
+		if (!cstate.enabled)
+			clk_disable_unprepare(sun4i_pwm->clk);
+		return ret;
+	}
+
 	spin_lock(&sun4i_pwm->ctrl_lock);
 	ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
 
-	if ((cstate.period != state->period) ||
-	    (cstate.duty_cycle != state->duty_cycle)) {
-		u32 period, duty, val;
-		unsigned int prescaler;
-
-		ret = sun4i_pwm_calculate(sun4i_pwm, state,
-					  &duty, &period, &prescaler);
-		if (ret) {
-			dev_err(chip->dev, "period exceeds the maximum value\n");
-			spin_unlock(&sun4i_pwm->ctrl_lock);
-			if (!cstate.enabled)
-				clk_disable_unprepare(sun4i_pwm->clk);
-			return ret;
-		}
-
-		if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
-			/* Prescaler changed, the clock has to be gated */
-			ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+	if (sun4i_pwm->data->has_direct_mod_clk_output) {
+		if (bypass) {
+			ctrl |= BIT_CH(PWM_BYPASS, pwm->hwpwm);
+			/* We can skip other parameter */
 			sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
-
-			ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
-			ctrl |= BIT_CH(prescaler, pwm->hwpwm);
+			spin_unlock(&sun4i_pwm->ctrl_lock);
+			return 0;
 		}
 
-		val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
-		sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
-		sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
-			usecs_to_jiffies(cstate.period / 1000 + 1);
-		sun4i_pwm->needs_delay[pwm->hwpwm] = true;
+		ctrl &= ~BIT_CH(PWM_BYPASS, pwm->hwpwm);
 	}
 
+	if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
+		/* Prescaler changed, the clock has to be gated */
+		ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+		sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
+
+		ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
+		ctrl |= BIT_CH(prescaler, pwm->hwpwm);
+	}
+
+	val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
+	sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
+	sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
+		usecs_to_jiffies(cstate.period / 1000 + 1);
+	sun4i_pwm->needs_delay[pwm->hwpwm] = true;
+
 	if (state->polarity != PWM_POLARITY_NORMAL)
 		ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
 	else
 		ctrl |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
 
 	ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+
 	if (state->enabled) {
 		ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
 	} else if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
@@ -319,6 +361,12 @@ static const struct sun4i_pwm_data sun4i_pwm_single_bypass = {
 	.npwm = 1,
 };
 
+static const struct sun4i_pwm_data sun50i_h6_pwm_data = {
+	.has_prescaler_bypass = true,
+	.has_direct_mod_clk_output = true,
+	.npwm = 2,
+};
+
 static const struct of_device_id sun4i_pwm_dt_ids[] = {
 	{
 		.compatible = "allwinner,sun4i-a10-pwm",
@@ -336,6 +384,9 @@ static const struct of_device_id sun4i_pwm_dt_ids[] = {
 		.compatible = "allwinner,sun8i-h3-pwm",
 		.data = &sun4i_pwm_single_bypass,
 	}, {
+		.compatible = "allwinner,sun50i-h6-pwm",
+		.data = &sun50i_h6_pwm_data,
+	}, {
 		/* sentinel */
 	},
 };
@@ -360,9 +411,69 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
 	if (IS_ERR(pwm->base))
 		return PTR_ERR(pwm->base);
 
-	pwm->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(pwm->clk))
+	/*
+	 * All hardware variants need a source clock that is divided and
+	 * then feeds the counter that defines the output wave form. In the
+	 * device tree this clock is either unnamed or called "mod".
+	 * Some variants (e.g. H6) need another clock to access the
+	 * hardware registers; this is called "bus".
+	 * So we request "mod" first (and ignore the corner case that a
+	 * parent provides a "mod" clock while the right one would be the
+	 * unnamed one of the PWM device) and if this is not found we fall
+	 * back to the first clock of the PWM.
+	 */
+	pwm->clk = devm_clk_get_optional(&pdev->dev, "mod");
+	if (IS_ERR(pwm->clk)) {
+		if (PTR_ERR(pwm->clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "get mod clock failed %pe\n",
+				pwm->clk);
 		return PTR_ERR(pwm->clk);
+	}
+
+	if (!pwm->clk) {
+		pwm->clk = devm_clk_get(&pdev->dev, NULL);
+		if (IS_ERR(pwm->clk)) {
+			if (PTR_ERR(pwm->clk) != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "get unnamed clock failed %pe\n",
+					pwm->clk);
+			return PTR_ERR(pwm->clk);
+		}
+	}
+
+	pwm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
+	if (IS_ERR(pwm->bus_clk)) {
+		if (PTR_ERR(pwm->bus_clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "get bus clock failed %pe\n",
+				pwm->bus_clk);
+		return PTR_ERR(pwm->bus_clk);
+	}
+
+	pwm->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+	if (IS_ERR(pwm->rst)) {
+		if (PTR_ERR(pwm->rst) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "get reset failed %pe\n",
+				pwm->rst);
+		return PTR_ERR(pwm->rst);
+	}
+
+	/* Deassert reset */
+	ret = reset_control_deassert(pwm->rst);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot deassert reset control: %pe\n",
+			ERR_PTR(ret));
+		return ret;
+	}
+
+	/*
+	 * We're keeping the bus clock on for the sake of simplicity.
+	 * Actually it only needs to be on for hardware register accesses.
+	 */
+	ret = clk_prepare_enable(pwm->bus_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot prepare and enable bus_clk %pe\n",
+			ERR_PTR(ret));
+		goto err_bus;
+	}
 
 	pwm->chip.dev = &pdev->dev;
 	pwm->chip.ops = &sun4i_pwm_ops;
@@ -376,19 +487,34 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
 	ret = pwmchip_add(&pwm->chip);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
-		return ret;
+		goto err_pwm_add;
 	}
 
 	platform_set_drvdata(pdev, pwm);
 
 	return 0;
+
+err_pwm_add:
+	clk_disable_unprepare(pwm->bus_clk);
+err_bus:
+	reset_control_assert(pwm->rst);
+
+	return ret;
 }
 
 static int sun4i_pwm_remove(struct platform_device *pdev)
 {
 	struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
+	int ret;
 
-	return pwmchip_remove(&pwm->chip);
+	ret = pwmchip_remove(&pwm->chip);
+	if (ret)
+		return ret;
+
+	clk_disable_unprepare(pwm->bus_clk);
+	reset_control_assert(pwm->rst);
+
+	return 0;
 }
 
 static struct platform_driver sun4i_pwm_driver = {
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 97bfdd4..074a2ef 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -209,6 +209,7 @@
 config REGULATOR_BD718XX
 	tristate "ROHM BD71837 Power Regulator"
 	depends on MFD_ROHM_BD718XX
+	select REGULATOR_ROHM
 	help
 	  This driver supports voltage regulators on ROHM BD71837 PMIC.
 	  This will enable support for the software controllable buck
@@ -823,6 +824,9 @@
 	  Say y here to support the regulators found on Ricoh RN5T567,
 	  RN5T618 or RC5T619 PMIC.
 
+config REGULATOR_ROHM
+	tristate
+
 config REGULATOR_RT5033
 	tristate "Richtek RT5033 Regulators"
 	depends on MFD_RT5033
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 07bc977..c0d6b96 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -102,6 +102,7 @@
 obj-$(CONFIG_REGULATOR_RC5T583)  += rc5t583-regulator.o
 obj-$(CONFIG_REGULATOR_RK808)   += rk808-regulator.o
 obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o
+obj-$(CONFIG_REGULATOR_ROHM)	+= rohm-regulator.o
 obj-$(CONFIG_REGULATOR_RT5033)	+= rt5033-regulator.o
 obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
 obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index 8f9b2d8..cf38728 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -318,6 +318,7 @@ struct reg_init {
 };
 struct bd718xx_regulator_data {
 	struct regulator_desc desc;
+	const struct rohm_dvs_config dvs;
 	const struct reg_init init;
 	const struct reg_init *additional_inits;
 	int additional_init_amnt;
@@ -349,133 +350,15 @@ static const struct reg_init bd71837_ldo6_inits[] = {
 	},
 };
 
-#define NUM_DVS_BUCKS 4
-
-struct of_dvs_setting {
-	const char *prop;
-	unsigned int reg;
-};
-
-static int set_dvs_levels(const struct of_dvs_setting *dvs,
-			  struct device_node *np,
-			  const struct regulator_desc *desc,
-			  struct regmap *regmap)
-{
-	int ret, i;
-	unsigned int uv;
-
-	ret = of_property_read_u32(np, dvs->prop, &uv);
-	if (ret) {
-		if (ret != -EINVAL)
-			return ret;
-		return 0;
-	}
-
-	for (i = 0; i < desc->n_voltages; i++) {
-		ret = regulator_desc_list_voltage_linear_range(desc, i);
-		if (ret < 0)
-			continue;
-		if (ret == uv) {
-			i <<= ffs(desc->vsel_mask) - 1;
-			ret = regmap_update_bits(regmap, dvs->reg,
-						 DVS_BUCK_RUN_MASK, i);
-			break;
-		}
-	}
-	return ret;
-}
-
-static int buck4_set_hw_dvs_levels(struct device_node *np,
+static int buck_set_hw_dvs_levels(struct device_node *np,
 			    const struct regulator_desc *desc,
 			    struct regulator_config *cfg)
 {
-	int ret, i;
-	const struct of_dvs_setting dvs[] = {
-		{
-			.prop = "rohm,dvs-run-voltage",
-			.reg = BD71837_REG_BUCK4_VOLT_RUN,
-		},
-	};
+	struct bd718xx_regulator_data *data;
 
-	for (i = 0; i < ARRAY_SIZE(dvs); i++) {
-		ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
-		if (ret)
-			break;
-	}
-	return ret;
-}
-static int buck3_set_hw_dvs_levels(struct device_node *np,
-			    const struct regulator_desc *desc,
-			    struct regulator_config *cfg)
-{
-	int ret, i;
-	const struct of_dvs_setting dvs[] = {
-		{
-			.prop = "rohm,dvs-run-voltage",
-			.reg = BD71837_REG_BUCK3_VOLT_RUN,
-		},
-	};
+	data = container_of(desc, struct bd718xx_regulator_data, desc);
 
-	for (i = 0; i < ARRAY_SIZE(dvs); i++) {
-		ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
-		if (ret)
-			break;
-	}
-	return ret;
-}
-
-static int buck2_set_hw_dvs_levels(struct device_node *np,
-			    const struct regulator_desc *desc,
-			    struct regulator_config *cfg)
-{
-	int ret, i;
-	const struct of_dvs_setting dvs[] = {
-		{
-			.prop = "rohm,dvs-run-voltage",
-			.reg = BD718XX_REG_BUCK2_VOLT_RUN,
-		},
-		{
-			.prop = "rohm,dvs-idle-voltage",
-			.reg = BD718XX_REG_BUCK2_VOLT_IDLE,
-		},
-	};
-
-
-
-	for (i = 0; i < ARRAY_SIZE(dvs); i++) {
-		ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
-		if (ret)
-			break;
-	}
-	return ret;
-}
-
-static int buck1_set_hw_dvs_levels(struct device_node *np,
-			    const struct regulator_desc *desc,
-			    struct regulator_config *cfg)
-{
-	int ret, i;
-	const struct of_dvs_setting dvs[] = {
-		{
-			.prop = "rohm,dvs-run-voltage",
-			.reg = BD718XX_REG_BUCK1_VOLT_RUN,
-		},
-		{
-			.prop = "rohm,dvs-idle-voltage",
-			.reg = BD718XX_REG_BUCK1_VOLT_IDLE,
-		},
-		{
-			.prop = "rohm,dvs-suspend-voltage",
-			.reg = BD718XX_REG_BUCK1_VOLT_SUSP,
-		},
-	};
-
-	for (i = 0; i < ARRAY_SIZE(dvs); i++) {
-		ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
-		if (ret)
-			break;
-	}
-	return ret;
+	return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap);
 }
 
 static const struct bd718xx_regulator_data bd71847_regulators[] = {
@@ -496,7 +379,17 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
 			.enable_reg = BD718XX_REG_BUCK1_CTRL,
 			.enable_mask = BD718XX_BUCK_EN,
 			.owner = THIS_MODULE,
-			.of_parse_cb = buck1_set_hw_dvs_levels,
+			.of_parse_cb = buck_set_hw_dvs_levels,
+		},
+		.dvs = {
+			.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+				     ROHM_DVS_LEVEL_SUSPEND,
+			.run_reg = BD718XX_REG_BUCK1_VOLT_RUN,
+			.run_mask = DVS_BUCK_RUN_MASK,
+			.idle_reg = BD718XX_REG_BUCK1_VOLT_IDLE,
+			.idle_mask = DVS_BUCK_RUN_MASK,
+			.suspend_reg = BD718XX_REG_BUCK1_VOLT_SUSP,
+			.suspend_mask = DVS_BUCK_RUN_MASK,
 		},
 		.init = {
 			.reg = BD718XX_REG_BUCK1_CTRL,
@@ -520,7 +413,14 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
 			.enable_reg = BD718XX_REG_BUCK2_CTRL,
 			.enable_mask = BD718XX_BUCK_EN,
 			.owner = THIS_MODULE,
-			.of_parse_cb = buck2_set_hw_dvs_levels,
+			.of_parse_cb = buck_set_hw_dvs_levels,
+		},
+		.dvs = {
+			.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE,
+			.run_reg = BD718XX_REG_BUCK2_VOLT_RUN,
+			.run_mask = DVS_BUCK_RUN_MASK,
+			.idle_reg = BD718XX_REG_BUCK2_VOLT_IDLE,
+			.idle_mask = DVS_BUCK_RUN_MASK,
 		},
 		.init = {
 			.reg = BD718XX_REG_BUCK2_CTRL,
@@ -792,7 +692,17 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
 			.enable_reg = BD718XX_REG_BUCK1_CTRL,
 			.enable_mask = BD718XX_BUCK_EN,
 			.owner = THIS_MODULE,
-			.of_parse_cb = buck1_set_hw_dvs_levels,
+			.of_parse_cb = buck_set_hw_dvs_levels,
+		},
+		.dvs = {
+			.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+				     ROHM_DVS_LEVEL_SUSPEND,
+			.run_reg = BD718XX_REG_BUCK1_VOLT_RUN,
+			.run_mask = DVS_BUCK_RUN_MASK,
+			.idle_reg = BD718XX_REG_BUCK1_VOLT_IDLE,
+			.idle_mask = DVS_BUCK_RUN_MASK,
+			.suspend_reg = BD718XX_REG_BUCK1_VOLT_SUSP,
+			.suspend_mask = DVS_BUCK_RUN_MASK,
 		},
 		.init = {
 			.reg = BD718XX_REG_BUCK1_CTRL,
@@ -816,7 +726,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
 			.enable_reg = BD718XX_REG_BUCK2_CTRL,
 			.enable_mask = BD718XX_BUCK_EN,
 			.owner = THIS_MODULE,
-			.of_parse_cb = buck2_set_hw_dvs_levels,
+			.of_parse_cb = buck_set_hw_dvs_levels,
+		},
+		.dvs = {
+			.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE,
+			.run_reg = BD718XX_REG_BUCK2_VOLT_RUN,
+			.run_mask = DVS_BUCK_RUN_MASK,
+			.idle_reg = BD718XX_REG_BUCK2_VOLT_IDLE,
+			.idle_mask = DVS_BUCK_RUN_MASK,
 		},
 		.init = {
 			.reg = BD718XX_REG_BUCK2_CTRL,
@@ -840,7 +757,12 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
 			.enable_reg = BD71837_REG_BUCK3_CTRL,
 			.enable_mask = BD718XX_BUCK_EN,
 			.owner = THIS_MODULE,
-			.of_parse_cb = buck3_set_hw_dvs_levels,
+			.of_parse_cb = buck_set_hw_dvs_levels,
+		},
+		.dvs = {
+			.level_map = ROHM_DVS_LEVEL_RUN,
+			.run_reg = BD71837_REG_BUCK3_VOLT_RUN,
+			.run_mask = DVS_BUCK_RUN_MASK,
 		},
 		.init = {
 			.reg = BD71837_REG_BUCK3_CTRL,
@@ -864,7 +786,12 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
 			.enable_reg = BD71837_REG_BUCK4_CTRL,
 			.enable_mask = BD718XX_BUCK_EN,
 			.owner = THIS_MODULE,
-			.of_parse_cb = buck4_set_hw_dvs_levels,
+			.of_parse_cb = buck_set_hw_dvs_levels,
+		},
+		.dvs = {
+			.level_map = ROHM_DVS_LEVEL_RUN,
+			.run_reg = BD71837_REG_BUCK4_VOLT_RUN,
+			.run_mask = DVS_BUCK_RUN_MASK,
 		},
 		.init = {
 			.reg = BD71837_REG_BUCK4_CTRL,
@@ -1150,6 +1077,7 @@ static int bd718xx_probe(struct platform_device *pdev)
 	bool use_snvs;
 	const struct bd718xx_regulator_data *reg_data;
 	unsigned int num_reg_data;
+	enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
 
 	mfd = dev_get_drvdata(pdev->dev.parent);
 	if (!mfd) {
@@ -1158,7 +1086,7 @@ static int bd718xx_probe(struct platform_device *pdev)
 		goto err;
 	}
 
-	switch (mfd->chip.chip_type) {
+	switch (chip) {
 	case ROHM_CHIP_TYPE_BD71837:
 		reg_data = bd71837_regulators;
 		num_reg_data = ARRAY_SIZE(bd71837_regulators);
@@ -1275,11 +1203,19 @@ static int bd718xx_probe(struct platform_device *pdev)
 	return err;
 }
 
+static const struct platform_device_id bd718x7_pmic_id[] = {
+	{ "bd71837-pmic", ROHM_CHIP_TYPE_BD71837 },
+	{ "bd71847-pmic", ROHM_CHIP_TYPE_BD71847 },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, bd718x7_pmic_id);
+
 static struct platform_driver bd718xx_regulator = {
 	.driver = {
 		.name = "bd718xx-pmic",
 	},
 	.probe = bd718xx_probe,
+	.id_table = bd718x7_pmic_id,
 };
 
 module_platform_driver(bd718xx_regulator);
diff --git a/drivers/regulator/rohm-regulator.c b/drivers/regulator/rohm-regulator.c
new file mode 100644
index 0000000..3990023
--- /dev/null
+++ b/drivers/regulator/rohm-regulator.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 ROHM Semiconductors
+
+#include <linux/errno.h>
+#include <linux/mfd/rohm-generic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+static int set_dvs_level(const struct regulator_desc *desc,
+			 struct device_node *np, struct regmap *regmap,
+			 char *prop, unsigned int reg, unsigned int mask,
+			 unsigned int omask, unsigned int oreg)
+{
+	int ret, i;
+	uint32_t uv;
+
+	ret = of_property_read_u32(np, prop, &uv);
+	if (ret) {
+		if (ret != -EINVAL)
+			return ret;
+		return 0;
+	}
+
+	if (uv == 0) {
+		if (omask)
+			return regmap_update_bits(regmap, oreg, omask, 0);
+	}
+	for (i = 0; i < desc->n_voltages; i++) {
+		ret = regulator_desc_list_voltage_linear_range(desc, i);
+		if (ret < 0)
+			continue;
+		if (ret == uv) {
+			i <<= ffs(desc->vsel_mask) - 1;
+			ret = regmap_update_bits(regmap, reg, mask, i);
+			if (omask && !ret)
+				ret = regmap_update_bits(regmap, oreg, omask,
+							 omask);
+			break;
+		}
+	}
+	return ret;
+}
+
+int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+			  struct device_node *np,
+			  const struct regulator_desc *desc,
+			  struct regmap *regmap)
+{
+	int i, ret = 0;
+	char *prop;
+	unsigned int reg, mask, omask, oreg = desc->enable_reg;
+
+	for (i = 0; i < ROHM_DVS_LEVEL_MAX && !ret; i++) {
+		if (dvs->level_map & (1 << i)) {
+			switch (i + 1) {
+			case ROHM_DVS_LEVEL_RUN:
+				prop = "rohm,dvs-run-voltage";
+				reg = dvs->run_reg;
+				mask = dvs->run_mask;
+				omask = dvs->run_on_mask;
+				break;
+			case ROHM_DVS_LEVEL_IDLE:
+				prop = "rohm,dvs-idle-voltage";
+				reg = dvs->idle_reg;
+				mask = dvs->idle_mask;
+				omask = dvs->idle_on_mask;
+				break;
+			case ROHM_DVS_LEVEL_SUSPEND:
+				prop = "rohm,dvs-suspend-voltage";
+				reg = dvs->suspend_reg;
+				mask = dvs->suspend_mask;
+				omask = dvs->suspend_on_mask;
+				break;
+			case ROHM_DVS_LEVEL_LPSR:
+				prop = "rohm,dvs-lpsr-voltage";
+				reg = dvs->lpsr_reg;
+				mask = dvs->lpsr_mask;
+				omask = dvs->lpsr_on_mask;
+				break;
+			default:
+				return -EINVAL;
+			}
+			ret = set_dvs_level(desc, np, regmap, prop, reg, mask,
+					    omask, oreg);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(rohm_regulator_set_dvs_levels);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("Generic helpers for ROHM PMIC regulator drivers");
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 94afdde..de3862c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -23,6 +23,16 @@
 
 	  It's safe to say N here.
 
+config MTK_SCP
+	tristate "Mediatek SCP support"
+	depends on ARCH_MEDIATEK
+	select RPMSG_MTK_SCP
+	help
+	  Say y here to support Mediatek's System Companion Processor (SCP) via
+	  the remote processor framework.
+
+	  It's safe to say N here.
+
 config OMAP_REMOTEPROC
 	tristate "OMAP remoteproc support"
 	depends on ARCH_OMAP4 || SOC_OMAP5
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 00f09e6..e30a1b1 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -10,6 +10,7 @@
 remoteproc-y				+= remoteproc_virtio.o
 remoteproc-y				+= remoteproc_elf_loader.o
 obj-$(CONFIG_IMX_REMOTEPROC)		+= imx_rproc.o
+obj-$(CONFIG_MTK_SCP)			+= mtk_scp.o mtk_scp_ipi.o
 obj-$(CONFIG_OMAP_REMOTEPROC)		+= omap_remoteproc.o
 obj-$(CONFIG_WKUP_M3_RPROC)		+= wkup_m3_rproc.o
 obj-$(CONFIG_DA8XX_REMOTEPROC)		+= da8xx_remoteproc.o
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
new file mode 100644
index 0000000..deb2009
--- /dev/null
+++ b/drivers/remoteproc/mtk_common.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __RPROC_MTK_COMMON_H
+#define __RPROC_MTK_COMMON_H
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#define MT8183_SW_RSTN			0x0
+#define MT8183_SW_RSTN_BIT		BIT(0)
+#define MT8183_SCP_TO_HOST		0x1C
+#define MT8183_SCP_IPC_INT_BIT		BIT(0)
+#define MT8183_SCP_WDT_INT_BIT		BIT(8)
+#define MT8183_HOST_TO_SCP		0x28
+#define MT8183_HOST_IPC_INT_BIT		BIT(0)
+#define MT8183_WDT_CFG			0x84
+#define MT8183_SCP_CLK_SW_SEL		0x4000
+#define MT8183_SCP_CLK_DIV_SEL		0x4024
+#define MT8183_SCP_SRAM_PDN		0x402C
+#define MT8183_SCP_L1_SRAM_PD		0x4080
+#define MT8183_SCP_TCM_TAIL_SRAM_PD	0x4094
+
+#define MT8183_SCP_CACHE_SEL(x)		(0x14000 + (x) * 0x3000)
+#define MT8183_SCP_CACHE_CON		MT8183_SCP_CACHE_SEL(0)
+#define MT8183_SCP_DCACHE_CON		MT8183_SCP_CACHE_SEL(1)
+#define MT8183_SCP_CACHESIZE_8KB	BIT(8)
+#define MT8183_SCP_CACHE_CON_WAYEN	BIT(10)
+
+#define SCP_FW_VER_LEN			32
+#define SCP_SHARE_BUFFER_SIZE		288
+
+struct scp_run {
+	u32 signaled;
+	s8 fw_ver[SCP_FW_VER_LEN];
+	u32 dec_capability;
+	u32 enc_capability;
+	wait_queue_head_t wq;
+};
+
+struct scp_ipi_desc {
+	/* For protecting handler. */
+	struct mutex lock;
+	scp_ipi_handler_t handler;
+	void *priv;
+};
+
+struct mtk_scp {
+	struct device *dev;
+	struct rproc *rproc;
+	struct clk *clk;
+	void __iomem *reg_base;
+	void __iomem *sram_base;
+	size_t sram_size;
+
+	struct mtk_share_obj __iomem *recv_buf;
+	struct mtk_share_obj __iomem *send_buf;
+	struct scp_run run;
+	/* To prevent multiple ipi_send run concurrently. */
+	struct mutex send_lock;
+	struct scp_ipi_desc ipi_desc[SCP_IPI_MAX];
+	bool ipi_id_ack[SCP_IPI_MAX];
+	wait_queue_head_t ack_wq;
+
+	void __iomem *cpu_addr;
+	phys_addr_t phys_addr;
+	size_t dram_size;
+
+	struct rproc_subdev *rpmsg_subdev;
+};
+
+/**
+ * struct mtk_share_obj - SRAM buffer shared with AP and SCP
+ *
+ * @id:		IPI id
+ * @len:	share buffer length
+ * @share_buf:	share buffer data
+ */
+struct mtk_share_obj {
+	u32 id;
+	u32 len;
+	u8 share_buf[SCP_SHARE_BUFFER_SIZE];
+};
+
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len);
+void scp_ipi_lock(struct mtk_scp *scp, u32 id);
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id);
+
+#endif
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
new file mode 100644
index 0000000..7ccdf64
--- /dev/null
+++ b/drivers/remoteproc/mtk_scp.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+
+#include "mtk_common.h"
+#include "remoteproc_internal.h"
+
+#define MAX_CODE_SIZE 0x500000
+#define SCP_FW_END 0x7C000
+
+/**
+ * scp_get() - get a reference to SCP.
+ *
+ * @pdev:	the platform device of the module requesting SCP platform
+ *		device for using SCP API.
+ *
+ * Return: Return NULL if failed.  otherwise reference to SCP.
+ **/
+struct mtk_scp *scp_get(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *scp_node;
+	struct platform_device *scp_pdev;
+
+	scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
+	if (!scp_node) {
+		dev_err(dev, "can't get SCP node\n");
+		return NULL;
+	}
+
+	scp_pdev = of_find_device_by_node(scp_node);
+	of_node_put(scp_node);
+
+	if (WARN_ON(!scp_pdev)) {
+		dev_err(dev, "SCP pdev failed\n");
+		return NULL;
+	}
+
+	return platform_get_drvdata(scp_pdev);
+}
+EXPORT_SYMBOL_GPL(scp_get);
+
+/**
+ * scp_put() - "free" the SCP
+ *
+ * @scp:	mtk_scp structure from scp_get().
+ **/
+void scp_put(struct mtk_scp *scp)
+{
+	put_device(scp->dev);
+}
+EXPORT_SYMBOL_GPL(scp_put);
+
+static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
+{
+	dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
+	rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
+}
+
+static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
+{
+	struct mtk_scp *scp = (struct mtk_scp *)priv;
+	struct scp_run *run = (struct scp_run *)data;
+
+	scp->run.signaled = run->signaled;
+	strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
+	scp->run.dec_capability = run->dec_capability;
+	scp->run.enc_capability = run->enc_capability;
+	wake_up_interruptible(&scp->run.wq);
+}
+
+static void scp_ipi_handler(struct mtk_scp *scp)
+{
+	struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
+	struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
+	u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
+	scp_ipi_handler_t handler;
+	u32 id = readl(&rcv_obj->id);
+	u32 len = readl(&rcv_obj->len);
+
+	if (len > SCP_SHARE_BUFFER_SIZE) {
+		dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
+			SCP_SHARE_BUFFER_SIZE);
+		return;
+	}
+	if (id >= SCP_IPI_MAX) {
+		dev_err(scp->dev, "No such ipi id = %d\n", id);
+		return;
+	}
+
+	scp_ipi_lock(scp, id);
+	handler = ipi_desc[id].handler;
+	if (!handler) {
+		dev_err(scp->dev, "No such ipi id = %d\n", id);
+		scp_ipi_unlock(scp, id);
+		return;
+	}
+
+	memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
+	handler(tmp_data, len, ipi_desc[id].priv);
+	scp_ipi_unlock(scp, id);
+
+	scp->ipi_id_ack[id] = true;
+	wake_up(&scp->ack_wq);
+}
+
+static int scp_ipi_init(struct mtk_scp *scp)
+{
+	size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj);
+	size_t recv_offset = send_offset - sizeof(struct mtk_share_obj);
+
+	/* Disable SCP to host interrupt */
+	writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+	/* shared buffer initialization */
+	scp->recv_buf =
+		(struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset);
+	scp->send_buf =
+		(struct mtk_share_obj __iomem *)(scp->sram_base + send_offset);
+	memset_io(scp->recv_buf, 0, sizeof(scp->recv_buf));
+	memset_io(scp->send_buf, 0, sizeof(scp->send_buf));
+
+	return 0;
+}
+
+static void scp_reset_assert(const struct mtk_scp *scp)
+{
+	u32 val;
+
+	val = readl(scp->reg_base + MT8183_SW_RSTN);
+	val &= ~MT8183_SW_RSTN_BIT;
+	writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static void scp_reset_deassert(const struct mtk_scp *scp)
+{
+	u32 val;
+
+	val = readl(scp->reg_base + MT8183_SW_RSTN);
+	val |= MT8183_SW_RSTN_BIT;
+	writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static irqreturn_t scp_irq_handler(int irq, void *priv)
+{
+	struct mtk_scp *scp = priv;
+	u32 scp_to_host;
+	int ret;
+
+	ret = clk_prepare_enable(scp->clk);
+	if (ret) {
+		dev_err(scp->dev, "failed to enable clocks\n");
+		return IRQ_NONE;
+	}
+
+	scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
+	if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
+		scp_ipi_handler(scp);
+	else
+		scp_wdt_handler(scp, scp_to_host);
+
+	/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
+	writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
+	       scp->reg_base + MT8183_SCP_TO_HOST);
+	clk_disable_unprepare(scp->clk);
+
+	return IRQ_HANDLED;
+}
+
+static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+	struct device *dev = &rproc->dev;
+	struct elf32_hdr *ehdr;
+	struct elf32_phdr *phdr;
+	int i, ret = 0;
+	const u8 *elf_data = fw->data;
+
+	ehdr = (struct elf32_hdr *)elf_data;
+	phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+	/* go through the available ELF segments */
+	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+		u32 da = phdr->p_paddr;
+		u32 memsz = phdr->p_memsz;
+		u32 filesz = phdr->p_filesz;
+		u32 offset = phdr->p_offset;
+		void __iomem *ptr;
+
+		if (phdr->p_type != PT_LOAD)
+			continue;
+
+		dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+			phdr->p_type, da, memsz, filesz);
+
+		if (filesz > memsz) {
+			dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+				filesz, memsz);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (offset + filesz > fw->size) {
+			dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+				offset + filesz, fw->size);
+			ret = -EINVAL;
+			break;
+		}
+
+		/* grab the kernel address for this device address */
+		ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz);
+		if (!ptr) {
+			dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+			ret = -EINVAL;
+			break;
+		}
+
+		/* put the segment where the remote processor expects it */
+		if (phdr->p_filesz)
+			scp_memcpy_aligned(ptr, elf_data + phdr->p_offset,
+					   filesz);
+	}
+
+	return ret;
+}
+
+static int scp_load(struct rproc *rproc, const struct firmware *fw)
+{
+	const struct mtk_scp *scp = rproc->priv;
+	struct device *dev = scp->dev;
+	int ret;
+
+	ret = clk_prepare_enable(scp->clk);
+	if (ret) {
+		dev_err(dev, "failed to enable clocks\n");
+		return ret;
+	}
+
+	/* Hold SCP in reset while loading FW. */
+	scp_reset_assert(scp);
+
+	/* Reset clocks before loading FW */
+	writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+	writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+	/* Initialize TCM before loading FW. */
+	writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+	writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+
+	/* Turn on the power of SCP's SRAM before using it. */
+	writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
+
+	/*
+	 * Set I-cache and D-cache size before loading SCP FW.
+	 * SCP SRAM logical address may change when cache size setting differs.
+	 */
+	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+	       scp->reg_base + MT8183_SCP_CACHE_CON);
+	writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
+	ret = scp_elf_load_segments(rproc, fw);
+	clk_disable_unprepare(scp->clk);
+
+	return ret;
+}
+
+static int scp_start(struct rproc *rproc)
+{
+	struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+	struct device *dev = scp->dev;
+	struct scp_run *run = &scp->run;
+	int ret;
+
+	ret = clk_prepare_enable(scp->clk);
+	if (ret) {
+		dev_err(dev, "failed to enable clocks\n");
+		return ret;
+	}
+
+	run->signaled = false;
+
+	scp_reset_deassert(scp);
+
+	ret = wait_event_interruptible_timeout(
+					run->wq,
+					run->signaled,
+					msecs_to_jiffies(2000));
+
+	if (ret == 0) {
+		dev_err(dev, "wait SCP initialization timeout!\n");
+		ret = -ETIME;
+		goto stop;
+	}
+	if (ret == -ERESTARTSYS) {
+		dev_err(dev, "wait SCP interrupted by a signal!\n");
+		goto stop;
+	}
+	clk_disable_unprepare(scp->clk);
+	dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
+
+	return 0;
+
+stop:
+	scp_reset_assert(scp);
+	clk_disable_unprepare(scp->clk);
+	return ret;
+}
+
+static void *scp_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+	struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+	int offset;
+
+	if (da < scp->sram_size) {
+		offset = da;
+		if (offset >= 0 && (offset + len) < scp->sram_size)
+			return (void __force *)scp->sram_base + offset;
+	} else {
+		offset = da - scp->phys_addr;
+		if (offset >= 0 && (offset + len) < scp->dram_size)
+			return (void __force *)scp->cpu_addr + offset;
+	}
+
+	return NULL;
+}
+
+static int scp_stop(struct rproc *rproc)
+{
+	struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+	int ret;
+
+	ret = clk_prepare_enable(scp->clk);
+	if (ret) {
+		dev_err(scp->dev, "failed to enable clocks\n");
+		return ret;
+	}
+
+	scp_reset_assert(scp);
+	/* Disable SCP watchdog */
+	writel(0, scp->reg_base + MT8183_WDT_CFG);
+	clk_disable_unprepare(scp->clk);
+
+	return 0;
+}
+
+static const struct rproc_ops scp_ops = {
+	.start		= scp_start,
+	.stop		= scp_stop,
+	.load		= scp_load,
+	.da_to_va	= scp_da_to_va,
+};
+
+/**
+ * scp_get_device() - get device struct of SCP
+ *
+ * @scp:	mtk_scp structure
+ **/
+struct device *scp_get_device(struct mtk_scp *scp)
+{
+	return scp->dev;
+}
+EXPORT_SYMBOL_GPL(scp_get_device);
+
+/**
+ * scp_get_rproc() - get rproc struct of SCP
+ *
+ * @scp:	mtk_scp structure
+ **/
+struct rproc *scp_get_rproc(struct mtk_scp *scp)
+{
+	return scp->rproc;
+}
+EXPORT_SYMBOL_GPL(scp_get_rproc);
+
+/**
+ * scp_get_vdec_hw_capa() - get video decoder hardware capability
+ *
+ * @scp:	mtk_scp structure
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
+{
+	return scp->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
+
+/**
+ * scp_get_venc_hw_capa() - get video encoder hardware capability
+ *
+ * @scp:	mtk_scp structure
+ *
+ * Return: video encoder hardware capability
+ **/
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
+{
+	return scp->run.enc_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
+
+/**
+ * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
+ *
+ * @scp:	mtk_scp structure
+ * @mem_addr:	SCP views memory address
+ *
+ * Mapping the SCP's SRAM address /
+ * DMEM (Data Extended Memory) memory address /
+ * Working buffer memory address to
+ * kernel virtual address.
+ *
+ * Return: Return ERR_PTR(-EINVAL) if mapping failed,
+ * otherwise the mapped kernel virtual address
+ **/
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
+{
+	void *ptr;
+
+	ptr = scp_da_to_va(scp->rproc, mem_addr, 0);
+	if (!ptr)
+		return ERR_PTR(-EINVAL);
+
+	return ptr;
+}
+EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
+
+static int scp_map_memory_region(struct mtk_scp *scp)
+{
+	int ret;
+
+	ret = of_reserved_mem_device_init(scp->dev);
+	if (ret) {
+		dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
+		return -ENOMEM;
+	}
+
+	/* Reserved SCP code size */
+	scp->dram_size = MAX_CODE_SIZE;
+	scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
+					   &scp->phys_addr, GFP_KERNEL);
+	if (!scp->cpu_addr)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void scp_unmap_memory_region(struct mtk_scp *scp)
+{
+	dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
+			  scp->phys_addr);
+	of_reserved_mem_device_release(scp->dev);
+}
+
+static int scp_register_ipi(struct platform_device *pdev, u32 id,
+			    ipi_handler_t handler, void *priv)
+{
+	struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+	return scp_ipi_register(scp, id, handler, priv);
+}
+
+static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
+{
+	struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+	scp_ipi_unregister(scp, id);
+}
+
+static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
+			unsigned int len, unsigned int wait)
+{
+	struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+	return scp_ipi_send(scp, id, buf, len, wait);
+}
+
+static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
+	.send_ipi = scp_send_ipi,
+	.register_ipi = scp_register_ipi,
+	.unregister_ipi = scp_unregister_ipi,
+	.ns_ipi_id = SCP_IPI_NS_SERVICE,
+};
+
+static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
+{
+	scp->rpmsg_subdev =
+		mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
+					      &mtk_scp_rpmsg_info);
+	if (scp->rpmsg_subdev)
+		rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
+}
+
+static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
+{
+	if (scp->rpmsg_subdev) {
+		rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
+		mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
+		scp->rpmsg_subdev = NULL;
+	}
+}
+
+static int scp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct mtk_scp *scp;
+	struct rproc *rproc;
+	struct resource *res;
+	char *fw_name = "scp.img";
+	int ret, i;
+
+	rproc = rproc_alloc(dev,
+			    np->name,
+			    &scp_ops,
+			    fw_name,
+			    sizeof(*scp));
+	if (!rproc) {
+		dev_err(dev, "unable to allocate remoteproc\n");
+		return -ENOMEM;
+	}
+
+	scp = (struct mtk_scp *)rproc->priv;
+	scp->rproc = rproc;
+	scp->dev = dev;
+	platform_set_drvdata(pdev, scp);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+	scp->sram_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR((__force void *)scp->sram_base)) {
+		dev_err(dev, "Failed to parse and map sram memory\n");
+		ret = PTR_ERR((__force void *)scp->sram_base);
+		goto free_rproc;
+	}
+	scp->sram_size = resource_size(res);
+
+	mutex_init(&scp->send_lock);
+	for (i = 0; i < SCP_IPI_MAX; i++)
+		mutex_init(&scp->ipi_desc[i].lock);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+	scp->reg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR((__force void *)scp->reg_base)) {
+		dev_err(dev, "Failed to parse and map cfg memory\n");
+		ret = PTR_ERR((__force void *)scp->reg_base);
+		goto destroy_mutex;
+	}
+
+	ret = scp_map_memory_region(scp);
+	if (ret)
+		goto destroy_mutex;
+
+	scp->clk = devm_clk_get(dev, "main");
+	if (IS_ERR(scp->clk)) {
+		dev_err(dev, "Failed to get clock\n");
+		ret = PTR_ERR(scp->clk);
+		goto release_dev_mem;
+	}
+
+	ret = clk_prepare_enable(scp->clk);
+	if (ret) {
+		dev_err(dev, "failed to enable clocks\n");
+		goto release_dev_mem;
+	}
+
+	ret = scp_ipi_init(scp);
+	clk_disable_unprepare(scp->clk);
+	if (ret) {
+		dev_err(dev, "Failed to init ipi\n");
+		goto release_dev_mem;
+	}
+
+	/* register SCP initialization IPI */
+	ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
+	if (ret) {
+		dev_err(dev, "Failed to register IPI_SCP_INIT\n");
+		goto release_dev_mem;
+	}
+
+	init_waitqueue_head(&scp->run.wq);
+	init_waitqueue_head(&scp->ack_wq);
+
+	scp_add_rpmsg_subdev(scp);
+
+	ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
+					scp_irq_handler, IRQF_ONESHOT,
+					pdev->name, scp);
+
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
+		goto remove_subdev;
+	}
+
+	ret = rproc_add(rproc);
+	if (ret)
+		goto remove_subdev;
+
+	return 0;
+
+remove_subdev:
+	scp_remove_rpmsg_subdev(scp);
+	scp_ipi_unregister(scp, SCP_IPI_INIT);
+release_dev_mem:
+	scp_unmap_memory_region(scp);
+destroy_mutex:
+	for (i = 0; i < SCP_IPI_MAX; i++)
+		mutex_destroy(&scp->ipi_desc[i].lock);
+	mutex_destroy(&scp->send_lock);
+free_rproc:
+	rproc_free(rproc);
+
+	return ret;
+}
+
+static int scp_remove(struct platform_device *pdev)
+{
+	struct mtk_scp *scp = platform_get_drvdata(pdev);
+	int i;
+
+	rproc_del(scp->rproc);
+	scp_remove_rpmsg_subdev(scp);
+	scp_ipi_unregister(scp, SCP_IPI_INIT);
+	scp_unmap_memory_region(scp);
+	for (i = 0; i < SCP_IPI_MAX; i++)
+		mutex_destroy(&scp->ipi_desc[i].lock);
+	mutex_destroy(&scp->send_lock);
+	rproc_free(scp->rproc);
+
+	return 0;
+}
+
+static const struct of_device_id mtk_scp_of_match[] = {
+	{ .compatible = "mediatek,mt8183-scp"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
+
+static struct platform_driver mtk_scp_driver = {
+	.probe = scp_probe,
+	.remove = scp_remove,
+	.driver = {
+		.name = "mtk-scp",
+		.of_match_table = of_match_ptr(mtk_scp_of_match),
+	},
+};
+
+module_platform_driver(mtk_scp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek SCP control driver");
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
new file mode 100644
index 0000000..3d3d872
--- /dev/null
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#include "mtk_common.h"
+
+/**
+ * scp_ipi_register() - register an ipi function
+ *
+ * @scp:	mtk_scp structure
+ * @id:		IPI ID
+ * @handler:	IPI handler
+ * @priv:	private data for IPI handler
+ *
+ * Register an ipi function to receive ipi interrupt from SCP.
+ *
+ * Returns 0 if ipi registers successfully, -error on error.
+ */
+int scp_ipi_register(struct mtk_scp *scp,
+		     u32 id,
+		     scp_ipi_handler_t handler,
+		     void *priv)
+{
+	if (!scp) {
+		dev_err(scp->dev, "scp device is not ready\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
+		return -EINVAL;
+
+	scp_ipi_lock(scp, id);
+	scp->ipi_desc[id].handler = handler;
+	scp->ipi_desc[id].priv = priv;
+	scp_ipi_unlock(scp, id);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_register);
+
+/**
+ * scp_ipi_unregister() - unregister an ipi function
+ *
+ * @scp:	mtk_scp structure
+ * @id:		IPI ID
+ *
+ * Unregister an ipi function to receive ipi interrupt from SCP.
+ */
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id)
+{
+	if (!scp)
+		return;
+
+	if (WARN_ON(id >= SCP_IPI_MAX))
+		return;
+
+	scp_ipi_lock(scp, id);
+	scp->ipi_desc[id].handler = NULL;
+	scp->ipi_desc[id].priv = NULL;
+	scp_ipi_unlock(scp, id);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unregister);
+
+/*
+ * scp_memcpy_aligned() - Copy src to dst, where dst is in SCP SRAM region.
+ *
+ * @dst:	Pointer to the destination buffer, should be in SCP SRAM region.
+ * @src:	Pointer to the source buffer.
+ * @len:	Length of the source buffer to be copied.
+ *
+ * Since AP access of SCP SRAM don't support byte write, this always write a
+ * full word at a time, and may cause some extra bytes to be written at the
+ * beginning & ending of dst.
+ */
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len)
+{
+	void __iomem *ptr;
+	u32 val;
+	unsigned int i = 0, remain;
+
+	if (!IS_ALIGNED((unsigned long)dst, 4)) {
+		ptr = (void __iomem *)ALIGN_DOWN((unsigned long)dst, 4);
+		i = 4 - (dst - ptr);
+		val = readl_relaxed(ptr);
+		memcpy((u8 *)&val + (4 - i), src, i);
+		writel_relaxed(val, ptr);
+	}
+
+	__iowrite32_copy(dst + i, src + i, (len - i) / 4);
+	remain = (len - i) % 4;
+
+	if (remain > 0) {
+		val = readl_relaxed(dst + len - remain);
+		memcpy(&val, src + len - remain, remain);
+		writel_relaxed(val, dst + len - remain);
+	}
+}
+EXPORT_SYMBOL_GPL(scp_memcpy_aligned);
+
+/**
+ * scp_ipi_lock() - Lock before operations of an IPI ID
+ *
+ * @scp:	mtk_scp structure
+ * @id:		IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_lock(struct mtk_scp *scp, u32 id)
+{
+	if (WARN_ON(id >= SCP_IPI_MAX))
+		return;
+	mutex_lock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_lock);
+
+/**
+ * scp_ipi_lock() - Unlock after operations of an IPI ID
+ *
+ * @scp:	mtk_scp structure
+ * @id:		IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id)
+{
+	if (WARN_ON(id >= SCP_IPI_MAX))
+		return;
+	mutex_unlock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unlock);
+
+/**
+ * scp_ipi_send() - send data from AP to scp.
+ *
+ * @scp:	mtk_scp structure
+ * @id:		IPI ID
+ * @buf:	the data buffer
+ * @len:	the data buffer length
+ * @wait:	number of msecs to wait for ack. 0 to skip waiting.
+ *
+ * This function is thread-safe. When this function returns,
+ * SCP has received the data and starts the processing.
+ * When the processing completes, IPI handler registered
+ * by scp_ipi_register will be called in interrupt context.
+ *
+ * Returns 0 if sending data successfully, -error on error.
+ **/
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+		 unsigned int wait)
+{
+	struct mtk_share_obj __iomem *send_obj = scp->send_buf;
+	unsigned long timeout;
+	int ret;
+
+	if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) ||
+	    WARN_ON(id == SCP_IPI_NS_SERVICE) ||
+	    WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
+		return -EINVAL;
+
+	mutex_lock(&scp->send_lock);
+
+	ret = clk_prepare_enable(scp->clk);
+	if (ret) {
+		dev_err(scp->dev, "failed to enable clock\n");
+		goto unlock_mutex;
+	}
+
+	 /* Wait until SCP receives the last command */
+	timeout = jiffies + msecs_to_jiffies(2000);
+	do {
+		if (time_after(jiffies, timeout)) {
+			dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
+			ret = -ETIMEDOUT;
+			goto clock_disable;
+		}
+	} while (readl(scp->reg_base + MT8183_HOST_TO_SCP));
+
+	scp_memcpy_aligned(send_obj->share_buf, buf, len);
+
+	writel(len, &send_obj->len);
+	writel(id, &send_obj->id);
+
+	scp->ipi_id_ack[id] = false;
+	/* send the command to SCP */
+	writel(MT8183_HOST_IPC_INT_BIT, scp->reg_base + MT8183_HOST_TO_SCP);
+
+	if (wait) {
+		/* wait for SCP's ACK */
+		timeout = msecs_to_jiffies(wait);
+		ret = wait_event_timeout(scp->ack_wq,
+					 scp->ipi_id_ack[id],
+					 timeout);
+		scp->ipi_id_ack[id] = false;
+		if (WARN(!ret, "scp ipi %d ack time out !", id))
+			ret = -EIO;
+		else
+			ret = 0;
+	}
+
+clock_disable:
+	clk_disable_unprepare(scp->clk);
+unlock_mutex:
+	mutex_unlock(&scp->send_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_send);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp IPI interface");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 471128a..a1cc9cb 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -68,14 +68,24 @@
 #define AXI_HALTREQ_REG			0x0
 #define AXI_HALTACK_REG			0x4
 #define AXI_IDLE_REG			0x8
+#define NAV_AXI_HALTREQ_BIT		BIT(0)
+#define NAV_AXI_HALTACK_BIT		BIT(1)
+#define NAV_AXI_IDLE_BIT		BIT(2)
+#define AXI_GATING_VALID_OVERRIDE	BIT(0)
 
-#define HALT_ACK_TIMEOUT_MS		100
+#define HALT_ACK_TIMEOUT_US		100000
+#define NAV_HALT_ACK_TIMEOUT_US		200
 
 /* QDSP6SS_RESET */
 #define Q6SS_STOP_CORE			BIT(0)
 #define Q6SS_CORE_ARES			BIT(1)
 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
 
+/* QDSP6SS CBCR */
+#define Q6SS_CBCR_CLKEN			BIT(0)
+#define Q6SS_CBCR_CLKOFF		BIT(31)
+#define Q6SS_CBCR_TIMEOUT_US		200
+
 /* QDSP6SS_GFMUX_CTL */
 #define Q6SS_CLK_ENABLE			BIT(1)
 
@@ -96,15 +106,16 @@
 #define QDSP6v56_BHS_ON		BIT(24)
 #define QDSP6v56_CLAMP_WL		BIT(21)
 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
-#define HALT_CHECK_MAX_LOOPS		200
 #define QDSP6SS_XO_CBCR		0x0038
 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
 
 /* QDSP6v65 parameters */
+#define QDSP6SS_CORE_CBCR		0x20
 #define QDSP6SS_SLEEP                   0x3C
 #define QDSP6SS_BOOT_CORE_START         0x400
 #define QDSP6SS_BOOT_CMD                0x404
-#define SLEEP_CHECK_MAX_LOOPS           200
+#define QDSP6SS_BOOT_STATUS		0x408
+#define BOOT_STATUS_TIMEOUT_US		200
 #define BOOT_FSM_TIMEOUT                10000
 
 struct reg_info {
@@ -131,6 +142,7 @@ struct rproc_hexagon_res {
 	int version;
 	bool need_mem_protection;
 	bool has_alt_reset;
+	bool has_halt_nav;
 };
 
 struct q6v5 {
@@ -141,9 +153,14 @@ struct q6v5 {
 	void __iomem *rmb_base;
 
 	struct regmap *halt_map;
+	struct regmap *halt_nav_map;
+	struct regmap *conn_map;
+
 	u32 halt_q6;
 	u32 halt_modem;
 	u32 halt_nc;
+	u32 halt_nav;
+	u32 conn_box;
 
 	struct reset_control *mss_restart;
 	struct reset_control *pdc_reset;
@@ -187,6 +204,7 @@ struct q6v5 {
 	struct qcom_sysmon *sysmon;
 	bool need_mem_protection;
 	bool has_alt_reset;
+	bool has_halt_nav;
 	int mpss_perm;
 	int mba_perm;
 	const char *hexagon_mdt_image;
@@ -198,6 +216,7 @@ enum {
 	MSS_MSM8974,
 	MSS_MSM8996,
 	MSS_MSM8998,
+	MSS_SC7180,
 	MSS_SDM845,
 };
 
@@ -396,6 +415,26 @@ static int q6v5_reset_assert(struct q6v5 *qproc)
 		reset_control_assert(qproc->pdc_reset);
 		ret = reset_control_reset(qproc->mss_restart);
 		reset_control_deassert(qproc->pdc_reset);
+	} else if (qproc->has_halt_nav) {
+		/*
+		 * When the AXI pipeline is being reset with the Q6 modem partly
+		 * operational there is possibility of AXI valid signal to
+		 * glitch, leading to spurious transactions and Q6 hangs. A work
+		 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
+		 * BIT before triggering Q6 MSS reset. Both the HALTREQ and
+		 * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
+		 * followed by a MSS deassert, while holding the PDC reset.
+		 */
+		reset_control_assert(qproc->pdc_reset);
+		regmap_update_bits(qproc->conn_map, qproc->conn_box,
+				   AXI_GATING_VALID_OVERRIDE, 1);
+		regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav,
+				   NAV_AXI_HALTREQ_BIT, 0);
+		reset_control_assert(qproc->mss_restart);
+		reset_control_deassert(qproc->pdc_reset);
+		regmap_update_bits(qproc->conn_map, qproc->conn_box,
+				   AXI_GATING_VALID_OVERRIDE, 0);
+		ret = reset_control_deassert(qproc->mss_restart);
 	} else {
 		ret = reset_control_assert(qproc->mss_restart);
 	}
@@ -413,6 +452,8 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
 		ret = reset_control_reset(qproc->mss_restart);
 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
 		reset_control_deassert(qproc->pdc_reset);
+	} else if (qproc->has_halt_nav) {
+		ret = reset_control_reset(qproc->mss_restart);
 	} else {
 		ret = reset_control_deassert(qproc->mss_restart);
 	}
@@ -474,12 +515,12 @@ static int q6v5proc_reset(struct q6v5 *qproc)
 
 	if (qproc->version == MSS_SDM845) {
 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
-		val |= 0x1;
+		val |= Q6SS_CBCR_CLKEN;
 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
 
 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
-					 val, !(val & BIT(31)), 1,
-					 SLEEP_CHECK_MAX_LOOPS);
+					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
+					 Q6SS_CBCR_TIMEOUT_US);
 		if (ret) {
 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
 			return -ETIMEDOUT;
@@ -500,6 +541,54 @@ static int q6v5proc_reset(struct q6v5 *qproc)
 		}
 
 		goto pbl_wait;
+	} else if (qproc->version == MSS_SC7180) {
+		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
+		val |= Q6SS_CBCR_CLKEN;
+		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
+
+		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
+					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
+					 Q6SS_CBCR_TIMEOUT_US);
+		if (ret) {
+			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
+			return -ETIMEDOUT;
+		}
+
+		/* Turn on the XO clock needed for PLL setup */
+		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+		val |= Q6SS_CBCR_CLKEN;
+		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
+					 Q6SS_CBCR_TIMEOUT_US);
+		if (ret) {
+			dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
+			return -ETIMEDOUT;
+		}
+
+		/* Configure Q6 core CBCR to auto-enable after reset sequence */
+		val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
+		val |= Q6SS_CBCR_CLKEN;
+		writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
+
+		/* De-assert the Q6 stop core signal */
+		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
+
+		/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
+		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
+
+		/* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
+		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
+					 val, (val & BIT(0)) != 0, 1,
+					 BOOT_STATUS_TIMEOUT_US);
+		if (ret) {
+			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
+			/* Reset the modem so that boot FSM is in reset state */
+			q6v5_reset_deassert(qproc);
+			return ret;
+		}
+		goto pbl_wait;
 	} else if (qproc->version == MSS_MSM8996 ||
 		   qproc->version == MSS_MSM8998) {
 		int mem_pwr_ctl;
@@ -515,13 +604,13 @@ static int q6v5proc_reset(struct q6v5 *qproc)
 
 		/* BHS require xo cbcr to be enabled */
 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
-		val |= 0x1;
+		val |= Q6SS_CBCR_CLKEN;
 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
 
 		/* Read CLKOFF bit to go low indicating CLK is enabled */
 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
-					 val, !(val & BIT(31)), 1,
-					 HALT_CHECK_MAX_LOOPS);
+					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
+					 Q6SS_CBCR_TIMEOUT_US);
 		if (ret) {
 			dev_err(qproc->dev,
 				"xo cbcr enabling timed out (rc:%d)\n", ret);
@@ -637,7 +726,6 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
 				   struct regmap *halt_map,
 				   u32 offset)
 {
-	unsigned long timeout;
 	unsigned int val;
 	int ret;
 
@@ -650,14 +738,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
 
 	/* Wait for halt */
-	timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
-	for (;;) {
-		ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
-		if (ret || val || time_after(jiffies, timeout))
-			break;
-
-		msleep(1);
-	}
+	regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
+				 val, 1000, HALT_ACK_TIMEOUT_US);
 
 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
 	if (ret || !val)
@@ -667,6 +749,32 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
 }
 
+static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc,
+				       struct regmap *halt_map,
+				       u32 offset)
+{
+	unsigned int val;
+	int ret;
+
+	/* Check if we're already idle */
+	ret = regmap_read(halt_map, offset, &val);
+	if (!ret && (val & NAV_AXI_IDLE_BIT))
+		return;
+
+	/* Assert halt request */
+	regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT,
+			   NAV_AXI_HALTREQ_BIT);
+
+	/* Wait for halt ack*/
+	regmap_read_poll_timeout(halt_map, offset, val,
+				 (val & NAV_AXI_HALTACK_BIT),
+				 5, NAV_HALT_ACK_TIMEOUT_US);
+
+	ret = regmap_read(halt_map, offset, &val);
+	if (ret || !(val & NAV_AXI_IDLE_BIT))
+		dev_err(qproc->dev, "port failed halt\n");
+}
+
 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
 {
 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
@@ -829,6 +937,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
 halt_axi_ports:
 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+	if (qproc->has_halt_nav)
+		q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
+					   qproc->halt_nav);
 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
 
 reclaim_mba:
@@ -876,6 +987,9 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
 
 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+	if (qproc->has_halt_nav)
+		q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
+					   qproc->halt_nav);
 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
 	if (qproc->version == MSS_MSM8996) {
 		/*
@@ -1253,6 +1367,47 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
 	qproc->halt_modem = args.args[1];
 	qproc->halt_nc = args.args[2];
 
+	if (qproc->has_halt_nav) {
+		struct platform_device *nav_pdev;
+
+		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+						       "qcom,halt-nav-regs",
+						       1, 0, &args);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
+			return -EINVAL;
+		}
+
+		nav_pdev = of_find_device_by_node(args.np);
+		of_node_put(args.np);
+		if (!nav_pdev) {
+			dev_err(&pdev->dev, "failed to get mss clock device\n");
+			return -EPROBE_DEFER;
+		}
+
+		qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL);
+		if (!qproc->halt_nav_map) {
+			dev_err(&pdev->dev, "failed to get map from device\n");
+			return -EINVAL;
+		}
+		qproc->halt_nav = args.args[0];
+
+		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+						       "qcom,halt-nav-regs",
+						       1, 1, &args);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
+			return -EINVAL;
+		}
+
+		qproc->conn_map = syscon_node_to_regmap(args.np);
+		of_node_put(args.np);
+		if (IS_ERR(qproc->conn_map))
+			return PTR_ERR(qproc->conn_map);
+
+		qproc->conn_box = args.args[0];
+	}
+
 	return 0;
 }
 
@@ -1327,7 +1482,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
 		return PTR_ERR(qproc->mss_restart);
 	}
 
-	if (qproc->has_alt_reset) {
+	if (qproc->has_alt_reset || qproc->has_halt_nav) {
 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
 								    "pdc_reset");
 		if (IS_ERR(qproc->pdc_reset)) {
@@ -1426,6 +1581,7 @@ static int q6v5_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, qproc);
 
+	qproc->has_halt_nav = desc->has_halt_nav;
 	ret = q6v5_init_mem(qproc, pdev);
 	if (ret)
 		goto free_rproc;
@@ -1549,6 +1705,41 @@ static int q6v5_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct rproc_hexagon_res sc7180_mss = {
+	.hexagon_mba_image = "mba.mbn",
+	.proxy_clk_names = (char*[]){
+		"xo",
+		NULL
+	},
+	.reset_clk_names = (char*[]){
+		"iface",
+		"bus",
+		"snoc_axi",
+		NULL
+	},
+	.active_clk_names = (char*[]){
+		"mnoc_axi",
+		"nav",
+		"mss_nav",
+		"mss_crypto",
+		NULL
+	},
+	.active_pd_names = (char*[]){
+		"load_state",
+		NULL
+	},
+	.proxy_pd_names = (char*[]){
+		"cx",
+		"mx",
+		"mss",
+		NULL
+	},
+	.need_mem_protection = true,
+	.has_alt_reset = false,
+	.has_halt_nav = true,
+	.version = MSS_SC7180,
+};
+
 static const struct rproc_hexagon_res sdm845_mss = {
 	.hexagon_mba_image = "mba.mbn",
 	.proxy_clk_names = (char*[]){
@@ -1580,6 +1771,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
 	},
 	.need_mem_protection = true,
 	.has_alt_reset = true,
+	.has_halt_nav = false,
 	.version = MSS_SDM845,
 };
 
@@ -1594,7 +1786,6 @@ static const struct rproc_hexagon_res msm8998_mss = {
 	.active_clk_names = (char*[]){
 			"iface",
 			"bus",
-			"mem",
 			"gpll0_mss",
 			"mnoc_axi",
 			"snoc_axi",
@@ -1607,6 +1798,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
 	},
 	.need_mem_protection = true,
 	.has_alt_reset = false,
+	.has_halt_nav = false,
 	.version = MSS_MSM8998,
 };
 
@@ -1636,6 +1828,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
 	},
 	.need_mem_protection = true,
 	.has_alt_reset = false,
+	.has_halt_nav = false,
 	.version = MSS_MSM8996,
 };
 
@@ -1668,6 +1861,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
 	},
 	.need_mem_protection = false,
 	.has_alt_reset = false,
+	.has_halt_nav = false,
 	.version = MSS_MSM8916,
 };
 
@@ -1708,6 +1902,7 @@ static const struct rproc_hexagon_res msm8974_mss = {
 	},
 	.need_mem_protection = false,
 	.has_alt_reset = false,
+	.has_halt_nav = false,
 	.version = MSS_MSM8974,
 };
 
@@ -1717,6 +1912,7 @@ static const struct of_device_id q6v5_of_match[] = {
 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
 	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
+	{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
 	{ },
 };
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index db4b3c4..edf9d0e 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -15,6 +15,8 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 #include <linux/qcom_scm.h>
 #include <linux/regulator/consumer.h>
 #include <linux/remoteproc.h>
@@ -31,6 +33,10 @@ struct adsp_data {
 	const char *firmware_name;
 	int pas_id;
 	bool has_aggre2_clk;
+	bool auto_boot;
+
+	char **active_pd_names;
+	char **proxy_pd_names;
 
 	const char *ssr_name;
 	const char *sysmon_name;
@@ -49,6 +55,12 @@ struct qcom_adsp {
 	struct regulator *cx_supply;
 	struct regulator *px_supply;
 
+	struct device *active_pds[1];
+	struct device *proxy_pds[3];
+
+	int active_pd_count;
+	int proxy_pd_count;
+
 	int pas_id;
 	int crash_reason_smem;
 	bool has_aggre2_clk;
@@ -67,6 +79,41 @@ struct qcom_adsp {
 	struct qcom_sysmon *sysmon;
 };
 
+static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+			   size_t pd_count)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < pd_count; i++) {
+		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+		ret = pm_runtime_get_sync(pds[i]);
+		if (ret < 0)
+			goto unroll_pd_votes;
+	}
+
+	return 0;
+
+unroll_pd_votes:
+	for (i--; i >= 0; i--) {
+		dev_pm_genpd_set_performance_state(pds[i], 0);
+		pm_runtime_put(pds[i]);
+	}
+
+	return ret;
+};
+
+static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+			     size_t pd_count)
+{
+	int i;
+
+	for (i = 0; i < pd_count; i++) {
+		dev_pm_genpd_set_performance_state(pds[i], 0);
+		pm_runtime_put(pds[i]);
+	}
+}
+
 static int adsp_load(struct rproc *rproc, const struct firmware *fw)
 {
 	struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
@@ -84,9 +131,17 @@ static int adsp_start(struct rproc *rproc)
 
 	qcom_q6v5_prepare(&adsp->q6v5);
 
+	ret = adsp_pds_enable(adsp, adsp->active_pds, adsp->active_pd_count);
+	if (ret < 0)
+		goto disable_irqs;
+
+	ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+	if (ret < 0)
+		goto disable_active_pds;
+
 	ret = clk_prepare_enable(adsp->xo);
 	if (ret)
-		return ret;
+		goto disable_proxy_pds;
 
 	ret = clk_prepare_enable(adsp->aggre2_clk);
 	if (ret)
@@ -124,6 +179,12 @@ static int adsp_start(struct rproc *rproc)
 	clk_disable_unprepare(adsp->aggre2_clk);
 disable_xo_clk:
 	clk_disable_unprepare(adsp->xo);
+disable_proxy_pds:
+	adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+disable_active_pds:
+	adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count);
+disable_irqs:
+	qcom_q6v5_unprepare(&adsp->q6v5);
 
 	return ret;
 }
@@ -136,6 +197,7 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
 	regulator_disable(adsp->cx_supply);
 	clk_disable_unprepare(adsp->aggre2_clk);
 	clk_disable_unprepare(adsp->xo);
+	adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
 }
 
 static int adsp_stop(struct rproc *rproc)
@@ -152,6 +214,7 @@ static int adsp_stop(struct rproc *rproc)
 	if (ret)
 		dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
 
+	adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count);
 	handover = qcom_q6v5_unprepare(&adsp->q6v5);
 	if (handover)
 		qcom_pas_handover(&adsp->q6v5);
@@ -217,6 +280,59 @@ static int adsp_init_regulator(struct qcom_adsp *adsp)
 	return PTR_ERR_OR_ZERO(adsp->px_supply);
 }
 
+static int adsp_pds_attach(struct device *dev, struct device **devs,
+			   char **pd_names)
+{
+	size_t num_pds = 0;
+	int ret;
+	int i;
+
+	if (!pd_names)
+		return 0;
+
+	/* Handle single power domain */
+	if (dev->pm_domain) {
+		devs[0] = dev;
+		pm_runtime_enable(dev);
+		return 1;
+	}
+
+	while (pd_names[num_pds])
+		num_pds++;
+
+	for (i = 0; i < num_pds; i++) {
+		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+		if (IS_ERR_OR_NULL(devs[i])) {
+			ret = PTR_ERR(devs[i]) ? : -ENODATA;
+			goto unroll_attach;
+		}
+	}
+
+	return num_pds;
+
+unroll_attach:
+	for (i--; i >= 0; i--)
+		dev_pm_domain_detach(devs[i], false);
+
+	return ret;
+};
+
+static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+			    size_t pd_count)
+{
+	struct device *dev = adsp->dev;
+	int i;
+
+	/* Handle single power domain */
+	if (dev->pm_domain && pd_count) {
+		pm_runtime_disable(dev);
+		return;
+	}
+
+	for (i = 0; i < pd_count; i++)
+		dev_pm_domain_detach(pds[i], false);
+}
+
 static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
 {
 	struct device_node *node;
@@ -273,6 +389,8 @@ static int adsp_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	}
 
+	rproc->auto_boot = desc->auto_boot;
+
 	adsp = (struct qcom_adsp *)rproc->priv;
 	adsp->dev = &pdev->dev;
 	adsp->rproc = rproc;
@@ -292,10 +410,22 @@ static int adsp_probe(struct platform_device *pdev)
 	if (ret)
 		goto free_rproc;
 
+	ret = adsp_pds_attach(&pdev->dev, adsp->active_pds,
+			      desc->active_pd_names);
+	if (ret < 0)
+		goto free_rproc;
+	adsp->active_pd_count = ret;
+
+	ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
+			      desc->proxy_pd_names);
+	if (ret < 0)
+		goto detach_active_pds;
+	adsp->proxy_pd_count = ret;
+
 	ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
 			     qcom_pas_handover);
 	if (ret)
-		goto free_rproc;
+		goto detach_proxy_pds;
 
 	qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
 	qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
@@ -305,15 +435,19 @@ static int adsp_probe(struct platform_device *pdev)
 					      desc->ssctl_id);
 	if (IS_ERR(adsp->sysmon)) {
 		ret = PTR_ERR(adsp->sysmon);
-		goto free_rproc;
+		goto detach_proxy_pds;
 	}
 
 	ret = rproc_add(rproc);
 	if (ret)
-		goto free_rproc;
+		goto detach_proxy_pds;
 
 	return 0;
 
+detach_proxy_pds:
+	adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+detach_active_pds:
+	adsp_pds_detach(adsp, adsp->active_pds, adsp->active_pd_count);
 free_rproc:
 	rproc_free(rproc);
 
@@ -340,6 +474,41 @@ static const struct adsp_data adsp_resource_init = {
 		.firmware_name = "adsp.mdt",
 		.pas_id = 1,
 		.has_aggre2_clk = false,
+		.auto_boot = true,
+		.ssr_name = "lpass",
+		.sysmon_name = "adsp",
+		.ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8150_adsp_resource = {
+		.crash_reason_smem = 423,
+		.firmware_name = "adsp.mdt",
+		.pas_id = 1,
+		.has_aggre2_clk = false,
+		.auto_boot = true,
+		.active_pd_names = (char*[]){
+			"load_state",
+			NULL
+		},
+		.proxy_pd_names = (char*[]){
+			"cx",
+			NULL
+		},
+		.ssr_name = "lpass",
+		.sysmon_name = "adsp",
+		.ssctl_id = 0x14,
+};
+
+static const struct adsp_data msm8998_adsp_resource = {
+		.crash_reason_smem = 423,
+		.firmware_name = "adsp.mdt",
+		.pas_id = 1,
+		.has_aggre2_clk = false,
+		.auto_boot = true,
+		.proxy_pd_names = (char*[]){
+			"cx",
+			NULL
+		},
 		.ssr_name = "lpass",
 		.sysmon_name = "adsp",
 		.ssctl_id = 0x14,
@@ -350,16 +519,92 @@ static const struct adsp_data cdsp_resource_init = {
 	.firmware_name = "cdsp.mdt",
 	.pas_id = 18,
 	.has_aggre2_clk = false,
+	.auto_boot = true,
 	.ssr_name = "cdsp",
 	.sysmon_name = "cdsp",
 	.ssctl_id = 0x17,
 };
 
+static const struct adsp_data sm8150_cdsp_resource = {
+	.crash_reason_smem = 601,
+	.firmware_name = "cdsp.mdt",
+	.pas_id = 18,
+	.has_aggre2_clk = false,
+	.auto_boot = true,
+	.active_pd_names = (char*[]){
+		"load_state",
+		NULL
+	},
+	.proxy_pd_names = (char*[]){
+		"cx",
+		NULL
+	},
+	.ssr_name = "cdsp",
+	.sysmon_name = "cdsp",
+	.ssctl_id = 0x17,
+};
+
+static const struct adsp_data mpss_resource_init = {
+	.crash_reason_smem = 421,
+	.firmware_name = "modem.mdt",
+	.pas_id = 4,
+	.has_aggre2_clk = false,
+	.auto_boot = false,
+	.active_pd_names = (char*[]){
+		"load_state",
+		NULL
+	},
+	.proxy_pd_names = (char*[]){
+		"cx",
+		"mss",
+		NULL
+	},
+	.ssr_name = "mpss",
+	.sysmon_name = "modem",
+	.ssctl_id = 0x12,
+};
+
 static const struct adsp_data slpi_resource_init = {
 		.crash_reason_smem = 424,
 		.firmware_name = "slpi.mdt",
 		.pas_id = 12,
 		.has_aggre2_clk = true,
+		.auto_boot = true,
+		.ssr_name = "dsps",
+		.sysmon_name = "slpi",
+		.ssctl_id = 0x16,
+};
+
+static const struct adsp_data sm8150_slpi_resource = {
+		.crash_reason_smem = 424,
+		.firmware_name = "slpi.mdt",
+		.pas_id = 12,
+		.has_aggre2_clk = false,
+		.auto_boot = true,
+		.active_pd_names = (char*[]){
+			"load_state",
+			NULL
+		},
+		.proxy_pd_names = (char*[]){
+			"lcx",
+			"lmx",
+			NULL
+		},
+		.ssr_name = "dsps",
+		.sysmon_name = "slpi",
+		.ssctl_id = 0x16,
+};
+
+static const struct adsp_data msm8998_slpi_resource = {
+		.crash_reason_smem = 424,
+		.firmware_name = "slpi.mdt",
+		.pas_id = 12,
+		.has_aggre2_clk = true,
+		.auto_boot = true,
+		.proxy_pd_names = (char*[]){
+			"ssc_cx",
+			NULL
+		},
 		.ssr_name = "dsps",
 		.sysmon_name = "slpi",
 		.ssctl_id = 0x16,
@@ -369,6 +614,7 @@ static const struct adsp_data wcss_resource_init = {
 	.crash_reason_smem = 421,
 	.firmware_name = "wcnss.mdt",
 	.pas_id = 6,
+	.auto_boot = true,
 	.ssr_name = "mpss",
 	.sysmon_name = "wcnss",
 	.ssctl_id = 0x12,
@@ -378,11 +624,17 @@ static const struct of_device_id adsp_of_match[] = {
 	{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
 	{ .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
 	{ .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
+	{ .compatible = "qcom,msm8998-adsp-pas", .data = &msm8998_adsp_resource},
+	{ .compatible = "qcom,msm8998-slpi-pas", .data = &msm8998_slpi_resource},
 	{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
 	{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
 	{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
 	{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
 	{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
+	{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
+	{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
+	{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
+	{ .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
 	{ },
 };
 MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index c231314..faf3822 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -394,7 +394,7 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
 		break;
 	default:
 		return -EINVAL;
-	};
+	}
 
 	sysmon->ssctl_version = svc->version;
 
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 307df98..097f33e 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -477,8 +477,8 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
 	char name[16];
 
 	/* make sure resource isn't truncated */
-	if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
-			+ rsc->config_len > avail) {
+	if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
+			avail) {
 		dev_err(dev, "vdev rsc is truncated\n");
 		return -EINVAL;
 	}
@@ -2223,7 +2223,7 @@ static int __init remoteproc_init(void)
 
 	return 0;
 }
-module_init(remoteproc_init);
+subsys_initcall(remoteproc_init);
 
 static void __exit remoteproc_exit(void)
 {
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 3ad7817..461b0e5 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -49,6 +49,13 @@
 	  This enables the reset controller driver for Broadcom STB SoCs using
 	  a SUN_TOP_CTRL_SW_INIT style controller.
 
+config RESET_BRCMSTB_RESCAL
+	bool "Broadcom STB RESCAL reset controller"
+	default ARCH_BRCMSTB || COMPILE_TEST
+	help
+	  This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
+	  BCM7216.
+
 config RESET_HSDK
 	bool "Synopsys HSDK Reset Driver"
 	depends on HAS_IOMEM
@@ -64,6 +71,15 @@
 	help
 	  This enables the reset controller driver for i.MX7 SoCs.
 
+config RESET_INTEL_GW
+	bool "Intel Reset Controller Driver"
+	depends on OF
+	select REGMAP_MMIO
+	help
+	  This enables the reset controller driver for Intel Gateway SoCs.
+	  Say Y to control the reset signals provided by reset controller.
+	  Otherwise, say N.
+
 config RESET_LANTIQ
 	bool "Lantiq XWAY Reset Driver" if COMPILE_TEST
 	default SOC_TYPE_XWAY
@@ -89,6 +105,13 @@
 	  This enables the reset driver for Audio Memory Arbiter of
 	  Amlogic's A113 based SoCs
 
+config RESET_NPCM
+	bool "NPCM BMC Reset Driver" if COMPILE_TEST
+	default ARCH_NPCM
+	help
+	  This enables the reset controller driver for Nuvoton NPCM
+	  BMC SoCs.
+
 config RESET_OXNAS
 	bool
 
@@ -99,7 +122,7 @@
 	  This enables the reset driver for ImgTec Pistachio SoCs.
 
 config RESET_QCOM_AOSS
-	bool "Qcom AOSS Reset Driver"
+	tristate "Qcom AOSS Reset Driver"
 	depends on ARCH_QCOM || COMPILE_TEST
 	help
 	  This enables the AOSS (always on subsystem) reset driver
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index cf60ce5..249ed35 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -8,12 +8,15 @@
 obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
 obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
 obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
+obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
 obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
 obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
+obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
 obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
 obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
 obj-$(CONFIG_RESET_MESON) += reset-meson.o
 obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o
+obj-$(CONFIG_RESET_NPCM) += reset-npcm.o
 obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
 obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
 obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 7597c70..01c0c7a 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -150,13 +150,14 @@ int devm_reset_controller_register(struct device *dev,
 		return -ENOMEM;
 
 	ret = reset_controller_register(rcdev);
-	if (!ret) {
-		*rcdevp = rcdev;
-		devres_add(dev, rcdevp);
-	} else {
+	if (ret) {
 		devres_free(rcdevp);
+		return ret;
 	}
 
+	*rcdevp = rcdev;
+	devres_add(dev, rcdevp);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(devm_reset_controller_register);
@@ -787,13 +788,14 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
 		return ERR_PTR(-ENOMEM);
 
 	rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
-	if (!IS_ERR_OR_NULL(rstc)) {
-		*ptr = rstc;
-		devres_add(dev, ptr);
-	} else {
+	if (IS_ERR_OR_NULL(rstc)) {
 		devres_free(ptr);
+		return rstc;
 	}
 
+	*ptr = rstc;
+	devres_add(dev, ptr);
+
 	return rstc;
 }
 EXPORT_SYMBOL_GPL(__devm_reset_control_get);
@@ -919,22 +921,21 @@ EXPORT_SYMBOL_GPL(of_reset_control_array_get);
 struct reset_control *
 devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
 {
-	struct reset_control **devres;
-	struct reset_control *rstc;
+	struct reset_control **ptr, *rstc;
 
-	devres = devres_alloc(devm_reset_control_release, sizeof(*devres),
-			      GFP_KERNEL);
-	if (!devres)
+	ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
+			   GFP_KERNEL);
+	if (!ptr)
 		return ERR_PTR(-ENOMEM);
 
 	rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
 	if (IS_ERR_OR_NULL(rstc)) {
-		devres_free(devres);
+		devres_free(ptr);
 		return rstc;
 	}
 
-	*devres = rstc;
-	devres_add(dev, devres);
+	*ptr = rstc;
+	devres_add(dev, ptr);
 
 	return rstc;
 }
diff --git a/drivers/reset/reset-brcmstb-rescal.c b/drivers/reset/reset-brcmstb-rescal.c
new file mode 100644
index 0000000..b6f074d
--- /dev/null
+++ b/drivers/reset/reset-brcmstb-rescal.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020 Broadcom */
+
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#define BRCM_RESCAL_START	0x0
+#define  BRCM_RESCAL_START_BIT	BIT(0)
+#define BRCM_RESCAL_CTRL	0x4
+#define BRCM_RESCAL_STATUS	0x8
+#define  BRCM_RESCAL_STATUS_BIT	BIT(0)
+
+struct brcm_rescal_reset {
+	void __iomem *base;
+	struct device *dev;
+	struct reset_controller_dev rcdev;
+};
+
+static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
+				 unsigned long id)
+{
+	struct brcm_rescal_reset *data =
+		container_of(rcdev, struct brcm_rescal_reset, rcdev);
+	void __iomem *base = data->base;
+	u32 reg;
+	int ret;
+
+	reg = readl(base + BRCM_RESCAL_START);
+	writel(reg | BRCM_RESCAL_START_BIT, base + BRCM_RESCAL_START);
+	reg = readl(base + BRCM_RESCAL_START);
+	if (!(reg & BRCM_RESCAL_START_BIT)) {
+		dev_err(data->dev, "failed to start SATA/PCIe rescal\n");
+		return -EIO;
+	}
+
+	ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
+				 !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
+	if (ret) {
+		dev_err(data->dev, "time out on SATA/PCIe rescal\n");
+		return ret;
+	}
+
+	reg = readl(base + BRCM_RESCAL_START);
+	writel(reg & ~BRCM_RESCAL_START_BIT, base + BRCM_RESCAL_START);
+
+	dev_dbg(data->dev, "SATA/PCIe rescal success\n");
+
+	return 0;
+}
+
+static int brcm_rescal_reset_xlate(struct reset_controller_dev *rcdev,
+				   const struct of_phandle_args *reset_spec)
+{
+	/* This is needed if #reset-cells == 0. */
+	return 0;
+}
+
+static const struct reset_control_ops brcm_rescal_reset_ops = {
+	.reset = brcm_rescal_reset_set,
+};
+
+static int brcm_rescal_reset_probe(struct platform_device *pdev)
+{
+	struct brcm_rescal_reset *data;
+	struct resource *res;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(data->base))
+		return PTR_ERR(data->base);
+
+	data->rcdev.owner = THIS_MODULE;
+	data->rcdev.nr_resets = 1;
+	data->rcdev.ops = &brcm_rescal_reset_ops;
+	data->rcdev.of_node = pdev->dev.of_node;
+	data->rcdev.of_xlate = brcm_rescal_reset_xlate;
+	data->dev = &pdev->dev;
+
+	return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static const struct of_device_id brcm_rescal_reset_of_match[] = {
+	{ .compatible = "brcm,bcm7216-pcie-sata-rescal" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, brcm_rescal_reset_of_match);
+
+static struct platform_driver brcm_rescal_reset_driver = {
+	.probe = brcm_rescal_reset_probe,
+	.driver = {
+		.name	= "brcm-rescal-reset",
+		.of_match_table	= brcm_rescal_reset_of_match,
+	}
+};
+module_platform_driver(brcm_rescal_reset_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom SATA/PCIe rescal reset controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-intel-gw.c b/drivers/reset/reset-intel-gw.c
new file mode 100644
index 0000000..85423844
--- /dev/null
+++ b/drivers/reset/reset-intel-gw.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Intel Corporation.
+ * Lei Chuanhua <Chuanhua.lei@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#define RCU_RST_STAT	0x0024
+#define RCU_RST_REQ	0x0048
+
+#define REG_OFFSET	GENMASK(31, 16)
+#define BIT_OFFSET	GENMASK(15, 8)
+#define STAT_BIT_OFFSET	GENMASK(7, 0)
+
+#define to_reset_data(x)	container_of(x, struct intel_reset_data, rcdev)
+
+struct intel_reset_soc {
+	bool legacy;
+	u32 reset_cell_count;
+};
+
+struct intel_reset_data {
+	struct reset_controller_dev rcdev;
+	struct notifier_block restart_nb;
+	const struct intel_reset_soc *soc_data;
+	struct regmap *regmap;
+	struct device *dev;
+	u32 reboot_id;
+};
+
+static const struct regmap_config intel_rcu_regmap_config = {
+	.name =		"intel-reset",
+	.reg_bits =	32,
+	.reg_stride =	4,
+	.val_bits =	32,
+	.fast_io =	true,
+};
+
+/*
+ * Reset status register offset relative to
+ * the reset control register(X) is X + 4
+ */
+static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data,
+				     unsigned long id, u32 *rst_req,
+				     u32 *req_bit, u32 *stat_bit)
+{
+	*rst_req = FIELD_GET(REG_OFFSET, id);
+	*req_bit = FIELD_GET(BIT_OFFSET, id);
+
+	if (data->soc_data->legacy)
+		*stat_bit = FIELD_GET(STAT_BIT_OFFSET, id);
+	else
+		*stat_bit = *req_bit;
+
+	if (data->soc_data->legacy && *rst_req == RCU_RST_REQ)
+		return RCU_RST_STAT;
+	else
+		return *rst_req + 0x4;
+}
+
+static int intel_set_clr_bits(struct intel_reset_data *data, unsigned long id,
+			      bool set)
+{
+	u32 rst_req, req_bit, rst_stat, stat_bit, val;
+	int ret;
+
+	rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
+					     &req_bit, &stat_bit);
+
+	val = set ? BIT(req_bit) : 0;
+	ret = regmap_update_bits(data->regmap, rst_req,  BIT(req_bit), val);
+	if (ret)
+		return ret;
+
+	return regmap_read_poll_timeout(data->regmap, rst_stat, val,
+					set == !!(val & BIT(stat_bit)), 20,
+					200);
+}
+
+static int intel_assert_device(struct reset_controller_dev *rcdev,
+			       unsigned long id)
+{
+	struct intel_reset_data *data = to_reset_data(rcdev);
+	int ret;
+
+	ret = intel_set_clr_bits(data, id, true);
+	if (ret)
+		dev_err(data->dev, "Reset assert failed %d\n", ret);
+
+	return ret;
+}
+
+static int intel_deassert_device(struct reset_controller_dev *rcdev,
+				 unsigned long id)
+{
+	struct intel_reset_data *data = to_reset_data(rcdev);
+	int ret;
+
+	ret = intel_set_clr_bits(data, id, false);
+	if (ret)
+		dev_err(data->dev, "Reset deassert failed %d\n", ret);
+
+	return ret;
+}
+
+static int intel_reset_status(struct reset_controller_dev *rcdev,
+			      unsigned long id)
+{
+	struct intel_reset_data *data = to_reset_data(rcdev);
+	u32 rst_req, req_bit, rst_stat, stat_bit, val;
+	int ret;
+
+	rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
+					     &req_bit, &stat_bit);
+	ret = regmap_read(data->regmap, rst_stat, &val);
+	if (ret)
+		return ret;
+
+	return !!(val & BIT(stat_bit));
+}
+
+static const struct reset_control_ops intel_reset_ops = {
+	.assert =	intel_assert_device,
+	.deassert =	intel_deassert_device,
+	.status	=	intel_reset_status,
+};
+
+static int intel_reset_xlate(struct reset_controller_dev *rcdev,
+			     const struct of_phandle_args *spec)
+{
+	struct intel_reset_data *data = to_reset_data(rcdev);
+	u32 id;
+
+	if (spec->args[1] > 31)
+		return -EINVAL;
+
+	id = FIELD_PREP(REG_OFFSET, spec->args[0]);
+	id |= FIELD_PREP(BIT_OFFSET, spec->args[1]);
+
+	if (data->soc_data->legacy) {
+		if (spec->args[2] > 31)
+			return -EINVAL;
+
+		id |= FIELD_PREP(STAT_BIT_OFFSET, spec->args[2]);
+	}
+
+	return id;
+}
+
+static int intel_reset_restart_handler(struct notifier_block *nb,
+				       unsigned long action, void *data)
+{
+	struct intel_reset_data *reset_data;
+
+	reset_data = container_of(nb, struct intel_reset_data, restart_nb);
+	intel_assert_device(&reset_data->rcdev, reset_data->reboot_id);
+
+	return NOTIFY_DONE;
+}
+
+static int intel_reset_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct intel_reset_data *data;
+	void __iomem *base;
+	u32 rb_id[3];
+	int ret;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->soc_data = of_device_get_match_data(dev);
+	if (!data->soc_data)
+		return -ENODEV;
+
+	base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	data->regmap = devm_regmap_init_mmio(dev, base,
+					     &intel_rcu_regmap_config);
+	if (IS_ERR(data->regmap)) {
+		dev_err(dev, "regmap initialization failed\n");
+		return PTR_ERR(data->regmap);
+	}
+
+	ret = device_property_read_u32_array(dev, "intel,global-reset", rb_id,
+					     data->soc_data->reset_cell_count);
+	if (ret) {
+		dev_err(dev, "Failed to get global reset offset!\n");
+		return ret;
+	}
+
+	data->dev =			dev;
+	data->rcdev.of_node =		np;
+	data->rcdev.owner =		dev->driver->owner;
+	data->rcdev.ops	=		&intel_reset_ops;
+	data->rcdev.of_xlate =		intel_reset_xlate;
+	data->rcdev.of_reset_n_cells =	data->soc_data->reset_cell_count;
+	ret = devm_reset_controller_register(&pdev->dev, &data->rcdev);
+	if (ret)
+		return ret;
+
+	data->reboot_id = FIELD_PREP(REG_OFFSET, rb_id[0]);
+	data->reboot_id |= FIELD_PREP(BIT_OFFSET, rb_id[1]);
+
+	if (data->soc_data->legacy)
+		data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET, rb_id[2]);
+
+	data->restart_nb.notifier_call =	intel_reset_restart_handler;
+	data->restart_nb.priority =		128;
+	register_restart_handler(&data->restart_nb);
+
+	return 0;
+}
+
+static const struct intel_reset_soc xrx200_data = {
+	.legacy =		true,
+	.reset_cell_count =	3,
+};
+
+static const struct intel_reset_soc lgm_data = {
+	.legacy =		false,
+	.reset_cell_count =	2,
+};
+
+static const struct of_device_id intel_reset_match[] = {
+	{ .compatible = "intel,rcu-lgm", .data = &lgm_data },
+	{ .compatible = "intel,rcu-xrx200", .data = &xrx200_data },
+	{}
+};
+
+static struct platform_driver intel_reset_driver = {
+	.probe = intel_reset_probe,
+	.driver = {
+		.name = "intel-reset",
+		.of_match_table = intel_reset_match,
+	},
+};
+
+static int __init intel_reset_init(void)
+{
+	return platform_driver_register(&intel_reset_driver);
+}
+
+/*
+ * RCU is system core entity which is in Always On Domain whose clocks
+ * or resource initialization happens in system core initialization.
+ * Also, it is required for most of the platform or architecture
+ * specific devices to perform reset operation as part of initialization.
+ * So perform RCU as post core initialization.
+ */
+postcore_initcall(intel_reset_init);
diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c
new file mode 100644
index 0000000..2ea4d31
--- /dev/null
+++ b/drivers/reset/reset-npcm.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of_address.h>
+
+/* NPCM7xx GCR registers */
+#define NPCM_MDLR_OFFSET	0x7C
+#define NPCM_MDLR_USBD0		BIT(9)
+#define NPCM_MDLR_USBD1		BIT(8)
+#define NPCM_MDLR_USBD2_4	BIT(21)
+#define NPCM_MDLR_USBD5_9	BIT(22)
+
+#define NPCM_USB1PHYCTL_OFFSET	0x140
+#define NPCM_USB2PHYCTL_OFFSET	0x144
+#define NPCM_USBXPHYCTL_RS	BIT(28)
+
+/* NPCM7xx Reset registers */
+#define NPCM_SWRSTR		0x14
+#define NPCM_SWRST		BIT(2)
+
+#define NPCM_IPSRST1		0x20
+#define NPCM_IPSRST1_USBD1	BIT(5)
+#define NPCM_IPSRST1_USBD2	BIT(8)
+#define NPCM_IPSRST1_USBD3	BIT(25)
+#define NPCM_IPSRST1_USBD4	BIT(22)
+#define NPCM_IPSRST1_USBD5	BIT(23)
+#define NPCM_IPSRST1_USBD6	BIT(24)
+
+#define NPCM_IPSRST2		0x24
+#define NPCM_IPSRST2_USB_HOST	BIT(26)
+
+#define NPCM_IPSRST3		0x34
+#define NPCM_IPSRST3_USBD0	BIT(4)
+#define NPCM_IPSRST3_USBD7	BIT(5)
+#define NPCM_IPSRST3_USBD8	BIT(6)
+#define NPCM_IPSRST3_USBD9	BIT(7)
+#define NPCM_IPSRST3_USBPHY1	BIT(24)
+#define NPCM_IPSRST3_USBPHY2	BIT(25)
+
+#define NPCM_RC_RESETS_PER_REG	32
+#define NPCM_MASK_RESETS	GENMASK(4, 0)
+
+struct npcm_rc_data {
+	struct reset_controller_dev rcdev;
+	struct notifier_block restart_nb;
+	u32 sw_reset_number;
+	void __iomem *base;
+	spinlock_t lock;
+};
+
+#define to_rc_data(p) container_of(p, struct npcm_rc_data, rcdev)
+
+static int npcm_rc_restart(struct notifier_block *nb, unsigned long mode,
+			   void *cmd)
+{
+	struct npcm_rc_data *rc = container_of(nb, struct npcm_rc_data,
+					       restart_nb);
+
+	writel(NPCM_SWRST << rc->sw_reset_number, rc->base + NPCM_SWRSTR);
+	mdelay(1000);
+
+	pr_emerg("%s: unable to restart system\n", __func__);
+
+	return NOTIFY_DONE;
+}
+
+static int npcm_rc_setclear_reset(struct reset_controller_dev *rcdev,
+				  unsigned long id, bool set)
+{
+	struct npcm_rc_data *rc = to_rc_data(rcdev);
+	unsigned int rst_bit = BIT(id & NPCM_MASK_RESETS);
+	unsigned int ctrl_offset = id >> 8;
+	unsigned long flags;
+	u32 stat;
+
+	spin_lock_irqsave(&rc->lock, flags);
+	stat = readl(rc->base + ctrl_offset);
+	if (set)
+		writel(stat | rst_bit, rc->base + ctrl_offset);
+	else
+		writel(stat & ~rst_bit, rc->base + ctrl_offset);
+	spin_unlock_irqrestore(&rc->lock, flags);
+
+	return 0;
+}
+
+static int npcm_rc_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	return npcm_rc_setclear_reset(rcdev, id, true);
+}
+
+static int npcm_rc_deassert(struct reset_controller_dev *rcdev,
+			    unsigned long id)
+{
+	return npcm_rc_setclear_reset(rcdev, id, false);
+}
+
+static int npcm_rc_status(struct reset_controller_dev *rcdev,
+			  unsigned long id)
+{
+	struct npcm_rc_data *rc = to_rc_data(rcdev);
+	unsigned int rst_bit = BIT(id & NPCM_MASK_RESETS);
+	unsigned int ctrl_offset = id >> 8;
+
+	return (readl(rc->base + ctrl_offset) & rst_bit);
+}
+
+static int npcm_reset_xlate(struct reset_controller_dev *rcdev,
+			    const struct of_phandle_args *reset_spec)
+{
+	unsigned int offset, bit;
+
+	offset = reset_spec->args[0];
+	if (offset != NPCM_IPSRST1 && offset != NPCM_IPSRST2 &&
+	    offset != NPCM_IPSRST3) {
+		dev_err(rcdev->dev, "Error reset register (0x%x)\n", offset);
+		return -EINVAL;
+	}
+	bit = reset_spec->args[1];
+	if (bit >= NPCM_RC_RESETS_PER_REG) {
+		dev_err(rcdev->dev, "Error reset number (%d)\n", bit);
+		return -EINVAL;
+	}
+
+	return (offset << 8) | bit;
+}
+
+static const struct of_device_id npcm_rc_match[] = {
+	{ .compatible = "nuvoton,npcm750-reset",
+		.data = (void *)"nuvoton,npcm750-gcr" },
+	{ }
+};
+
+/*
+ *  The following procedure should be observed in USB PHY, USB device and
+ *  USB host initialization at BMC boot
+ */
+static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc)
+{
+	u32 mdlr, iprst1, iprst2, iprst3;
+	struct device *dev = &pdev->dev;
+	struct regmap *gcr_regmap;
+	u32 ipsrst1_bits = 0;
+	u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST;
+	u32 ipsrst3_bits = 0;
+	const char *gcr_dt;
+
+	gcr_dt = (const char *)
+	of_match_device(dev->driver->of_match_table, dev)->data;
+
+	gcr_regmap = syscon_regmap_lookup_by_compatible(gcr_dt);
+	if (IS_ERR(gcr_regmap)) {
+		dev_err(&pdev->dev, "Failed to find %s\n", gcr_dt);
+		return PTR_ERR(gcr_regmap);
+	}
+
+	/* checking which USB device is enabled */
+	regmap_read(gcr_regmap, NPCM_MDLR_OFFSET, &mdlr);
+	if (!(mdlr & NPCM_MDLR_USBD0))
+		ipsrst3_bits |= NPCM_IPSRST3_USBD0;
+	if (!(mdlr & NPCM_MDLR_USBD1))
+		ipsrst1_bits |= NPCM_IPSRST1_USBD1;
+	if (!(mdlr & NPCM_MDLR_USBD2_4))
+		ipsrst1_bits |= (NPCM_IPSRST1_USBD2 |
+				 NPCM_IPSRST1_USBD3 |
+				 NPCM_IPSRST1_USBD4);
+	if (!(mdlr & NPCM_MDLR_USBD0)) {
+		ipsrst1_bits |= (NPCM_IPSRST1_USBD5 |
+				 NPCM_IPSRST1_USBD6);
+		ipsrst3_bits |= (NPCM_IPSRST3_USBD7 |
+				 NPCM_IPSRST3_USBD8 |
+				 NPCM_IPSRST3_USBD9);
+	}
+
+	/* assert reset USB PHY and USB devices */
+	iprst1 = readl(rc->base + NPCM_IPSRST1);
+	iprst2 = readl(rc->base + NPCM_IPSRST2);
+	iprst3 = readl(rc->base + NPCM_IPSRST3);
+
+	iprst1 |= ipsrst1_bits;
+	iprst2 |= ipsrst2_bits;
+	iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
+		   NPCM_IPSRST3_USBPHY2);
+
+	writel(iprst1, rc->base + NPCM_IPSRST1);
+	writel(iprst2, rc->base + NPCM_IPSRST2);
+	writel(iprst3, rc->base + NPCM_IPSRST3);
+
+	/* clear USB PHY RS bit */
+	regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+			   NPCM_USBXPHYCTL_RS, 0);
+	regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
+			   NPCM_USBXPHYCTL_RS, 0);
+
+	/* deassert reset USB PHY */
+	iprst3 &= ~(NPCM_IPSRST3_USBPHY1 | NPCM_IPSRST3_USBPHY2);
+	writel(iprst3, rc->base + NPCM_IPSRST3);
+
+	udelay(50);
+
+	/* set USB PHY RS bit */
+	regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+			   NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
+	regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
+			   NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
+
+	/* deassert reset USB devices*/
+	iprst1 &= ~ipsrst1_bits;
+	iprst2 &= ~ipsrst2_bits;
+	iprst3 &= ~ipsrst3_bits;
+
+	writel(iprst1, rc->base + NPCM_IPSRST1);
+	writel(iprst2, rc->base + NPCM_IPSRST2);
+	writel(iprst3, rc->base + NPCM_IPSRST3);
+
+	return 0;
+}
+
+static const struct reset_control_ops npcm_rc_ops = {
+	.assert		= npcm_rc_assert,
+	.deassert	= npcm_rc_deassert,
+	.status		= npcm_rc_status,
+};
+
+static int npcm_rc_probe(struct platform_device *pdev)
+{
+	struct npcm_rc_data *rc;
+	int ret;
+
+	rc = devm_kzalloc(&pdev->dev, sizeof(*rc), GFP_KERNEL);
+	if (!rc)
+		return -ENOMEM;
+
+	rc->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(rc->base))
+		return PTR_ERR(rc->base);
+
+	spin_lock_init(&rc->lock);
+
+	rc->rcdev.owner = THIS_MODULE;
+	rc->rcdev.ops = &npcm_rc_ops;
+	rc->rcdev.of_node = pdev->dev.of_node;
+	rc->rcdev.of_reset_n_cells = 2;
+	rc->rcdev.of_xlate = npcm_reset_xlate;
+
+	platform_set_drvdata(pdev, rc);
+
+	ret = devm_reset_controller_register(&pdev->dev, &rc->rcdev);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to register device\n");
+		return ret;
+	}
+
+	if (npcm_usb_reset(pdev, rc))
+		dev_warn(&pdev->dev, "NPCM USB reset failed, can cause issues with UDC and USB host\n");
+
+	if (!of_property_read_u32(pdev->dev.of_node, "nuvoton,sw-reset-number",
+				  &rc->sw_reset_number)) {
+		if (rc->sw_reset_number && rc->sw_reset_number < 5) {
+			rc->restart_nb.priority = 192,
+			rc->restart_nb.notifier_call = npcm_rc_restart,
+			ret = register_restart_handler(&rc->restart_nb);
+			if (ret)
+				dev_warn(&pdev->dev, "failed to register restart handler\n");
+		}
+	}
+
+	return ret;
+}
+
+static struct platform_driver npcm_rc_driver = {
+	.probe	= npcm_rc_probe,
+	.driver	= {
+		.name			= "npcm-reset",
+		.of_match_table		= npcm_rc_match,
+		.suppress_bind_attrs	= true,
+	},
+};
+builtin_platform_driver(npcm_rc_driver);
diff --git a/drivers/reset/reset-qcom-aoss.c b/drivers/reset/reset-qcom-aoss.c
index 36db967..9333b92 100644
--- a/drivers/reset/reset-qcom-aoss.c
+++ b/drivers/reset/reset-qcom-aoss.c
@@ -118,6 +118,7 @@ static const struct of_device_id qcom_aoss_reset_of_match[] = {
 	{ .compatible = "qcom,sdm845-aoss-cc", .data = &sdm845_aoss_desc },
 	{}
 };
+MODULE_DEVICE_TABLE(of, qcom_aoss_reset_of_match);
 
 static struct platform_driver qcom_aoss_reset_driver = {
 	.probe = qcom_aoss_reset_probe,
@@ -127,7 +128,7 @@ static struct platform_driver qcom_aoss_reset_driver = {
 	},
 };
 
-builtin_platform_driver(qcom_aoss_reset_driver);
+module_platform_driver(qcom_aoss_reset_driver);
 
 MODULE_DESCRIPTION("Qualcomm AOSS Reset Driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c
index b46df80..8d3a858 100644
--- a/drivers/reset/reset-scmi.c
+++ b/drivers/reset/reset-scmi.c
@@ -108,7 +108,7 @@ static int scmi_reset_probe(struct scmi_device *sdev)
 }
 
 static const struct scmi_device_id scmi_id_table[] = {
-	{ SCMI_PROTOCOL_RESET },
+	{ SCMI_PROTOCOL_RESET, "reset" },
 	{ },
 };
 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 74e589f..279e535 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -193,8 +193,8 @@ static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
 #define UNIPHIER_PERI_RESET_FI2C(id, ch)		\
 	UNIPHIER_RESETX((id), 0x114, 24 + (ch))
 
-#define UNIPHIER_PERI_RESET_SCSSI(id)			\
-	UNIPHIER_RESETX((id), 0x110, 17)
+#define UNIPHIER_PERI_RESET_SCSSI(id, ch)		\
+	UNIPHIER_RESETX((id), 0x110, 17 + (ch))
 
 #define UNIPHIER_PERI_RESET_MCSSI(id)			\
 	UNIPHIER_RESETX((id), 0x114, 14)
@@ -209,7 +209,7 @@ static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
 	UNIPHIER_PERI_RESET_I2C(6, 2),
 	UNIPHIER_PERI_RESET_I2C(7, 3),
 	UNIPHIER_PERI_RESET_I2C(8, 4),
-	UNIPHIER_PERI_RESET_SCSSI(11),
+	UNIPHIER_PERI_RESET_SCSSI(11, 0),
 	UNIPHIER_RESET_END,
 };
 
@@ -225,8 +225,11 @@ static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
 	UNIPHIER_PERI_RESET_FI2C(8, 4),
 	UNIPHIER_PERI_RESET_FI2C(9, 5),
 	UNIPHIER_PERI_RESET_FI2C(10, 6),
-	UNIPHIER_PERI_RESET_SCSSI(11),
-	UNIPHIER_PERI_RESET_MCSSI(12),
+	UNIPHIER_PERI_RESET_SCSSI(11, 0),
+	UNIPHIER_PERI_RESET_SCSSI(12, 1),
+	UNIPHIER_PERI_RESET_SCSSI(13, 2),
+	UNIPHIER_PERI_RESET_SCSSI(14, 3),
+	UNIPHIER_PERI_RESET_MCSSI(15),
 	UNIPHIER_RESET_END,
 };
 
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 7092765..a9108ff 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -15,6 +15,15 @@
 	  in /dev. They make it possible for user-space programs to send and
 	  receive rpmsg packets.
 
+config RPMSG_MTK_SCP
+	tristate "MediaTek SCP"
+	depends on MTK_SCP
+	select RPMSG
+	help
+	  Say y here to enable support providing communication channels to
+	  remote processors in MediaTek platforms.
+	  This use IPI and IPC to communicate with remote processors.
+
 config RPMSG_QCOM_GLINK_NATIVE
 	tristate
 	select RPMSG
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 9aa8595..ae92a7f 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_RPMSG)		+= rpmsg_core.o
 obj-$(CONFIG_RPMSG_CHAR)	+= rpmsg_char.o
+obj-$(CONFIG_RPMSG_MTK_SCP)	+= mtk_rpmsg.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o
 obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
new file mode 100644
index 0000000..232aa4e
--- /dev/null
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2019 Google LLC.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+#include <linux/workqueue.h>
+
+#include "rpmsg_internal.h"
+
+struct mtk_rpmsg_rproc_subdev {
+	struct platform_device *pdev;
+	struct mtk_rpmsg_info *info;
+	struct rpmsg_endpoint *ns_ept;
+	struct rproc_subdev subdev;
+
+	struct work_struct register_work;
+	struct list_head channels;
+	struct mutex channels_lock;
+};
+
+#define to_mtk_subdev(d) container_of(d, struct mtk_rpmsg_rproc_subdev, subdev)
+
+struct mtk_rpmsg_channel_info {
+	struct rpmsg_channel_info info;
+	bool registered;
+	struct list_head list;
+};
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ *
+ * This message is sent across to publish a new service. When we receive these
+ * messages, an appropriate rpmsg channel (i.e device) is created. In turn, the
+ * ->probe() handler of the appropriate rpmsg driver will be invoked
+ *  (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+	char name[RPMSG_NAME_SIZE];
+	u32 addr;
+} __packed;
+
+struct mtk_rpmsg_device {
+	struct rpmsg_device rpdev;
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+};
+
+struct mtk_rpmsg_endpoint {
+	struct rpmsg_endpoint ept;
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+};
+
+#define to_mtk_rpmsg_device(r) container_of(r, struct mtk_rpmsg_device, rpdev)
+#define to_mtk_rpmsg_endpoint(r) container_of(r, struct mtk_rpmsg_endpoint, ept)
+
+static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops;
+
+static void __mtk_ept_release(struct kref *kref)
+{
+	struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+						  refcount);
+	kfree(to_mtk_rpmsg_endpoint(ept));
+}
+
+static void mtk_rpmsg_ipi_handler(void *data, unsigned int len, void *priv)
+{
+	struct mtk_rpmsg_endpoint *mept = priv;
+	struct rpmsg_endpoint *ept = &mept->ept;
+	int ret;
+
+	ret = (*ept->cb)(ept->rpdev, data, len, ept->priv, ept->addr);
+	if (ret)
+		dev_warn(&ept->rpdev->dev, "rpmsg handler return error = %d",
+			 ret);
+}
+
+static struct rpmsg_endpoint *
+__mtk_create_ept(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+		 struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
+		 u32 id)
+{
+	struct mtk_rpmsg_endpoint *mept;
+	struct rpmsg_endpoint *ept;
+	struct platform_device *pdev = mtk_subdev->pdev;
+	int ret;
+
+	mept = kzalloc(sizeof(*mept), GFP_KERNEL);
+	if (!mept)
+		return NULL;
+	mept->mtk_subdev = mtk_subdev;
+
+	ept = &mept->ept;
+	kref_init(&ept->refcount);
+
+	ept->rpdev = rpdev;
+	ept->cb = cb;
+	ept->priv = priv;
+	ept->ops = &mtk_rpmsg_endpoint_ops;
+	ept->addr = id;
+
+	ret = mtk_subdev->info->register_ipi(pdev, id, mtk_rpmsg_ipi_handler,
+					     mept);
+	if (ret) {
+		dev_err(&pdev->dev, "IPI register failed, id = %d", id);
+		kref_put(&ept->refcount, __mtk_ept_release);
+		return NULL;
+	}
+
+	return ept;
+}
+
+static struct rpmsg_endpoint *
+mtk_rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
+		     struct rpmsg_channel_info chinfo)
+{
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+		to_mtk_rpmsg_device(rpdev)->mtk_subdev;
+
+	return __mtk_create_ept(mtk_subdev, rpdev, cb, priv, chinfo.src);
+}
+
+static void mtk_rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+		to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+	mtk_subdev->info->unregister_ipi(mtk_subdev->pdev, ept->addr);
+	kref_put(&ept->refcount, __mtk_ept_release);
+}
+
+static int mtk_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+		to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+	return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
+					  len, 0);
+}
+
+static int mtk_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+		to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+	/*
+	 * TODO: This currently is same as mtk_rpmsg_send, and wait until SCP
+	 * received the last command.
+	 */
+	return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
+					  len, 0);
+}
+
+static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops = {
+	.destroy_ept = mtk_rpmsg_destroy_ept,
+	.send = mtk_rpmsg_send,
+	.trysend = mtk_rpmsg_trysend,
+};
+
+static void mtk_rpmsg_release_device(struct device *dev)
+{
+	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+	struct mtk_rpmsg_device *mdev = to_mtk_rpmsg_device(rpdev);
+
+	kfree(mdev);
+}
+
+static const struct rpmsg_device_ops mtk_rpmsg_device_ops = {
+	.create_ept = mtk_rpmsg_create_ept,
+};
+
+static struct device_node *
+mtk_rpmsg_match_device_subnode(struct device_node *node, const char *channel)
+{
+	struct device_node *child;
+	const char *name;
+	int ret;
+
+	for_each_available_child_of_node(node, child) {
+		ret = of_property_read_string(child, "mtk,rpmsg-name", &name);
+		if (ret)
+			continue;
+
+		if (strcmp(name, channel) == 0)
+			return child;
+	}
+
+	return NULL;
+}
+
+static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+				     struct rpmsg_channel_info *info)
+{
+	struct rpmsg_device *rpdev;
+	struct mtk_rpmsg_device *mdev;
+	struct platform_device *pdev = mtk_subdev->pdev;
+	int ret;
+
+	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+
+	mdev->mtk_subdev = mtk_subdev;
+
+	rpdev = &mdev->rpdev;
+	rpdev->ops = &mtk_rpmsg_device_ops;
+	rpdev->src = info->src;
+	rpdev->dst = info->dst;
+	strscpy(rpdev->id.name, info->name, RPMSG_NAME_SIZE);
+
+	rpdev->dev.of_node =
+		mtk_rpmsg_match_device_subnode(pdev->dev.of_node, info->name);
+	rpdev->dev.parent = &pdev->dev;
+	rpdev->dev.release = mtk_rpmsg_release_device;
+
+	ret = rpmsg_register_device(rpdev);
+	if (ret) {
+		kfree(mdev);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void mtk_register_device_work_function(struct work_struct *register_work)
+{
+	struct mtk_rpmsg_rproc_subdev *subdev = container_of(
+		register_work, struct mtk_rpmsg_rproc_subdev, register_work);
+	struct platform_device *pdev = subdev->pdev;
+	struct mtk_rpmsg_channel_info *info;
+	int ret;
+
+	mutex_lock(&subdev->channels_lock);
+	list_for_each_entry(info, &subdev->channels, list) {
+		if (info->registered)
+			continue;
+
+		ret = mtk_rpmsg_register_device(subdev, &info->info);
+		if (ret) {
+			dev_err(&pdev->dev, "Can't create rpmsg_device\n");
+			continue;
+		}
+
+		info->registered = true;
+	}
+	mutex_unlock(&subdev->channels_lock);
+}
+
+static int mtk_rpmsg_create_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+				   char *name, u32 addr)
+{
+	struct mtk_rpmsg_channel_info *info;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	strscpy(info->info.name, name, RPMSG_NAME_SIZE);
+	info->info.src = addr;
+	info->info.dst = RPMSG_ADDR_ANY;
+	mutex_lock(&mtk_subdev->channels_lock);
+	list_add(&info->list, &mtk_subdev->channels);
+	mutex_unlock(&mtk_subdev->channels_lock);
+
+	schedule_work(&mtk_subdev->register_work);
+	return 0;
+}
+
+static int mtk_rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
+			   void *priv, u32 src)
+{
+	struct rpmsg_ns_msg *msg = data;
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev = priv;
+	struct device *dev = &mtk_subdev->pdev->dev;
+
+	int ret;
+
+	if (len != sizeof(*msg)) {
+		dev_err(dev, "malformed ns msg (%d)\n", len);
+		return -EINVAL;
+	}
+
+	/*
+	 * the name service ept does _not_ belong to a real rpmsg channel,
+	 * and is handled by the rpmsg bus itself.
+	 * for sanity reasons, make sure a valid rpdev has _not_ sneaked
+	 * in somehow.
+	 */
+	if (rpdev) {
+		dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
+		return -EINVAL;
+	}
+
+	/* don't trust the remote processor for null terminating the name */
+	msg->name[RPMSG_NAME_SIZE - 1] = '\0';
+
+	dev_info(dev, "creating channel %s addr 0x%x\n", msg->name, msg->addr);
+
+	ret = mtk_rpmsg_create_device(mtk_subdev, msg->name, msg->addr);
+	if (ret) {
+		dev_err(dev, "create rpmsg device failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int mtk_rpmsg_prepare(struct rproc_subdev *subdev)
+{
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+	/* a dedicated endpoint handles the name service msgs */
+	if (mtk_subdev->info->ns_ipi_id >= 0) {
+		mtk_subdev->ns_ept =
+			__mtk_create_ept(mtk_subdev, NULL, mtk_rpmsg_ns_cb,
+					 mtk_subdev,
+					 mtk_subdev->info->ns_ipi_id);
+		if (!mtk_subdev->ns_ept) {
+			dev_err(&mtk_subdev->pdev->dev,
+				"failed to create name service endpoint\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static void mtk_rpmsg_unprepare(struct rproc_subdev *subdev)
+{
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+	if (mtk_subdev->ns_ept) {
+		mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
+		mtk_subdev->ns_ept = NULL;
+	}
+}
+
+static void mtk_rpmsg_stop(struct rproc_subdev *subdev, bool crashed)
+{
+	struct mtk_rpmsg_channel_info *info, *next;
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+	struct device *dev = &mtk_subdev->pdev->dev;
+
+	/*
+	 * Destroy the name service endpoint here, to avoid new channel being
+	 * created after the rpmsg_unregister_device loop below.
+	 */
+	if (mtk_subdev->ns_ept) {
+		mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
+		mtk_subdev->ns_ept = NULL;
+	}
+
+	cancel_work_sync(&mtk_subdev->register_work);
+
+	mutex_lock(&mtk_subdev->channels_lock);
+	list_for_each_entry(info, &mtk_subdev->channels, list) {
+		if (!info->registered)
+			continue;
+		if (rpmsg_unregister_device(dev, &info->info)) {
+			dev_warn(
+				dev,
+				"rpmsg_unregister_device failed for %s.%d.%d\n",
+				info->info.name, info->info.src,
+				info->info.dst);
+		}
+	}
+
+	list_for_each_entry_safe(info, next,
+				 &mtk_subdev->channels, list) {
+		list_del(&info->list);
+		kfree(info);
+	}
+	mutex_unlock(&mtk_subdev->channels_lock);
+}
+
+struct rproc_subdev *
+mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
+			      struct mtk_rpmsg_info *info)
+{
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+
+	mtk_subdev = kzalloc(sizeof(*mtk_subdev), GFP_KERNEL);
+	if (!mtk_subdev)
+		return NULL;
+
+	mtk_subdev->pdev = pdev;
+	mtk_subdev->subdev.prepare = mtk_rpmsg_prepare;
+	mtk_subdev->subdev.stop = mtk_rpmsg_stop;
+	mtk_subdev->subdev.unprepare = mtk_rpmsg_unprepare;
+	mtk_subdev->info = info;
+	INIT_LIST_HEAD(&mtk_subdev->channels);
+	INIT_WORK(&mtk_subdev->register_work,
+		  mtk_register_device_work_function);
+	mutex_init(&mtk_subdev->channels_lock);
+
+	return &mtk_subdev->subdev;
+}
+EXPORT_SYMBOL_GPL(mtk_rpmsg_create_rproc_subdev);
+
+void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev)
+{
+	struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+	kfree(mtk_subdev);
+}
+EXPORT_SYMBOL_GPL(mtk_rpmsg_destroy_rproc_subdev);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp rpmsg driver");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d77515d..34c8b6c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -240,6 +240,7 @@
 
 config RTC_DRV_DS1307
 	tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057"
+	select REGMAP_I2C
 	help
 	  If you say yes here you get support for various compatible RTC
 	  chips (often with battery backup) connected with I2C. This driver
@@ -498,12 +499,13 @@
 	help
 	  If you say Y here you will get support for the
 	  watchdog timer in the ST M41T60 and M41T80 RTC chips series.
+
 config RTC_DRV_BD70528
 	tristate "ROHM BD70528 PMIC RTC"
 	depends on MFD_ROHM_BD70528 && (BD70528_WATCHDOG || !BD70528_WATCHDOG)
 	help
 	  If you say Y here you will get support for the RTC
-	  on ROHM BD70528 Power Management IC.
+	  block on ROHM BD70528 and BD71828 Power Management IC.
 
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-bd70528.
@@ -621,6 +623,7 @@
 
 config RTC_DRV_RX8581
 	tristate "Epson RX-8571/RX-8581"
+	select REGMAP_I2C
 	help
 	  If you say yes here you will get support for the Epson RX-8571/
 	  RX-8581.
@@ -648,6 +651,7 @@
 
 config RTC_DRV_RV3028
 	tristate "Micro Crystal RV3028"
+	select REGMAP_I2C
 	help
 	  If you say yes here you get support for the Micro Crystal
 	  RV3028.
@@ -676,13 +680,14 @@
 	  will be called rtc-s5m.
 
 config RTC_DRV_SD3078
-    tristate "ZXW Shenzhen whwave SD3078"
-    help
-      If you say yes here you get support for the ZXW Shenzhen whwave
-      SD3078 RTC chips.
+	tristate "ZXW Shenzhen whwave SD3078"
+	select REGMAP_I2C
+	help
+	  If you say yes here you get support for the ZXW Shenzhen whwave
+	  SD3078 RTC chips.
 
-      This driver can also be built as a module. If so, the module
-      will be called rtc-sd3078
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-sd3078
 
 endif # I2C
 
@@ -848,14 +853,14 @@
 	default m if I2C=m
 	default y if I2C=y
 	default y if SPI_MASTER=y
-	select REGMAP_I2C if I2C
-	select REGMAP_SPI if SPI_MASTER
 
 comment "SPI and I2C RTC drivers"
 
 config RTC_DRV_DS3232
 	tristate "Dallas/Maxim DS3232/DS3234"
 	depends on RTC_I2C_AND_SPI
+	select REGMAP_I2C if I2C
+	select REGMAP_SPI if SPI_MASTER
 	help
 	  If you say yes here you get support for Dallas Semiconductor
 	  DS3232 and DS3234 real-time clock chips. If an interrupt is associated
@@ -875,6 +880,8 @@
 config RTC_DRV_PCF2127
 	tristate "NXP PCF2127"
 	depends on RTC_I2C_AND_SPI
+	select REGMAP_I2C if I2C
+	select REGMAP_SPI if SPI_MASTER
 	select WATCHDOG_CORE if WATCHDOG
 	help
 	  If you say yes here you get support for the NXP PCF2127/29 RTC
@@ -891,6 +898,8 @@
 config RTC_DRV_RV3029C2
 	tristate "Micro Crystal RV3029/3049"
 	depends on RTC_I2C_AND_SPI
+	select REGMAP_I2C if I2C
+	select REGMAP_SPI if SPI_MASTER
 	help
 	  If you say yes here you get support for the Micro Crystal
 	  RV3029 and RV3049 RTC chips.
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 7383067..3521d8e 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -523,12 +523,9 @@ static int abx80x_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 		if (status < 0)
 			return status;
 
-		tmp = !!(status & ABX8XX_STATUS_BLF);
+		tmp = status & ABX8XX_STATUS_BLF ? RTC_VL_BACKUP_LOW : 0;
 
-		if (copy_to_user((void __user *)arg, &tmp, sizeof(int)))
-			return -EFAULT;
-
-		return 0;
+		return put_user(tmp, (unsigned int __user *)arg);
 
 	case RTC_VL_CLR:
 		status = i2c_smbus_read_byte_data(client, ABX8XX_REG_STATUS);
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 10064bd..3ab81cd 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -264,6 +264,9 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
 		return PTR_ERR(priv->iobase);
 
 	priv->clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
 	ret = clk_prepare_enable(priv->clk);
 	if (ret) {
 		dev_err(dev, "Failed to enable clk!\n");
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 3b833e0..5e811e0 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -14,6 +14,7 @@
  */
 
 #include <linux/bcd.h>
+#include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/interrupt.h>
@@ -30,7 +31,51 @@
 #include <linux/time.h>
 #include <linux/uaccess.h>
 
-#include "rtc-at91rm9200.h"
+#define	AT91_RTC_CR		0x00			/* Control Register */
+#define		AT91_RTC_UPDTIM		BIT(0)		/* Update Request Time Register */
+#define		AT91_RTC_UPDCAL		BIT(1)		/* Update Request Calendar Register */
+
+#define	AT91_RTC_MR		0x04			/* Mode Register */
+
+#define	AT91_RTC_TIMR		0x08			/* Time Register */
+#define		AT91_RTC_SEC		GENMASK(6, 0)	/* Current Second */
+#define		AT91_RTC_MIN		GENMASK(14, 8)	/* Current Minute */
+#define		AT91_RTC_HOUR		GENMASK(21, 16)	/* Current Hour */
+#define		AT91_RTC_AMPM		BIT(22)		/* Ante Meridiem Post Meridiem Indicator */
+
+#define	AT91_RTC_CALR		0x0c			/* Calendar Register */
+#define		AT91_RTC_CENT		GENMASK(6, 0)	/* Current Century */
+#define		AT91_RTC_YEAR		GENMASK(15, 8)	/* Current Year */
+#define		AT91_RTC_MONTH		GENMASK(20, 16)	/* Current Month */
+#define		AT91_RTC_DAY		GENMASK(23, 21)	/* Current Day */
+#define		AT91_RTC_DATE		GENMASK(29, 24)	/* Current Date */
+
+#define	AT91_RTC_TIMALR		0x10			/* Time Alarm Register */
+#define		AT91_RTC_SECEN		BIT(7)		/* Second Alarm Enable */
+#define		AT91_RTC_MINEN		BIT(15)		/* Minute Alarm Enable */
+#define		AT91_RTC_HOUREN		BIT(23)		/* Hour Alarm Enable */
+
+#define	AT91_RTC_CALALR		0x14			/* Calendar Alarm Register */
+#define		AT91_RTC_MTHEN		BIT(23)		/* Month Alarm Enable */
+#define		AT91_RTC_DATEEN		BIT(31)		/* Date Alarm Enable */
+
+#define	AT91_RTC_SR		0x18			/* Status Register */
+#define		AT91_RTC_ACKUPD		BIT(0)		/* Acknowledge for Update */
+#define		AT91_RTC_ALARM		BIT(1)		/* Alarm Flag */
+#define		AT91_RTC_SECEV		BIT(2)		/* Second Event */
+#define		AT91_RTC_TIMEV		BIT(3)		/* Time Event */
+#define		AT91_RTC_CALEV		BIT(4)		/* Calendar Event */
+
+#define	AT91_RTC_SCCR		0x1c			/* Status Clear Command Register */
+#define	AT91_RTC_IER		0x20			/* Interrupt Enable Register */
+#define	AT91_RTC_IDR		0x24			/* Interrupt Disable Register */
+#define	AT91_RTC_IMR		0x28			/* Interrupt Mask Register */
+
+#define	AT91_RTC_VER		0x2c			/* Valid Entry Register */
+#define		AT91_RTC_NVTIM		BIT(0)		/* Non valid Time */
+#define		AT91_RTC_NVCAL		BIT(1)		/* Non valid Calendar */
+#define		AT91_RTC_NVTIMALR	BIT(2)		/* Non valid Time Alarm */
+#define		AT91_RTC_NVCALALR	BIT(3)		/* Non valid Calendar Alarm */
 
 #define at91_rtc_read(field) \
 	readl_relaxed(at91_rtc_regs + field)
@@ -117,20 +162,20 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
 	} while ((time != at91_rtc_read(timereg)) ||
 			(date != at91_rtc_read(calreg)));
 
-	tm->tm_sec  = bcd2bin((time & AT91_RTC_SEC) >> 0);
-	tm->tm_min  = bcd2bin((time & AT91_RTC_MIN) >> 8);
-	tm->tm_hour = bcd2bin((time & AT91_RTC_HOUR) >> 16);
+	tm->tm_sec  = bcd2bin(FIELD_GET(AT91_RTC_SEC, time));
+	tm->tm_min  = bcd2bin(FIELD_GET(AT91_RTC_MIN, time));
+	tm->tm_hour = bcd2bin(FIELD_GET(AT91_RTC_HOUR, time));
 
 	/*
 	 * The Calendar Alarm register does not have a field for
 	 * the year - so these will return an invalid value.
 	 */
 	tm->tm_year  = bcd2bin(date & AT91_RTC_CENT) * 100;	/* century */
-	tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8);	/* year */
+	tm->tm_year += bcd2bin(FIELD_GET(AT91_RTC_YEAR, date));	/* year */
 
-	tm->tm_wday = bcd2bin((date & AT91_RTC_DAY) >> 21) - 1;	/* day of the week [0-6], Sunday=0 */
-	tm->tm_mon  = bcd2bin((date & AT91_RTC_MONTH) >> 16) - 1;
-	tm->tm_mday = bcd2bin((date & AT91_RTC_DATE) >> 24);
+	tm->tm_wday = bcd2bin(FIELD_GET(AT91_RTC_DAY, date)) - 1;	/* day of the week [0-6], Sunday=0 */
+	tm->tm_mon  = bcd2bin(FIELD_GET(AT91_RTC_MONTH, date)) - 1;
+	tm->tm_mday = bcd2bin(FIELD_GET(AT91_RTC_DATE, date));
 }
 
 /*
@@ -167,16 +212,17 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
 	at91_rtc_write_idr(AT91_RTC_ACKUPD);
 
 	at91_rtc_write(AT91_RTC_TIMR,
-			  bin2bcd(tm->tm_sec) << 0
-			| bin2bcd(tm->tm_min) << 8
-			| bin2bcd(tm->tm_hour) << 16);
+			  FIELD_PREP(AT91_RTC_SEC, bin2bcd(tm->tm_sec))
+			| FIELD_PREP(AT91_RTC_MIN, bin2bcd(tm->tm_min))
+			| FIELD_PREP(AT91_RTC_HOUR, bin2bcd(tm->tm_hour)));
 
 	at91_rtc_write(AT91_RTC_CALR,
-			  bin2bcd((tm->tm_year + 1900) / 100)	/* century */
-			| bin2bcd(tm->tm_year % 100) << 8	/* year */
-			| bin2bcd(tm->tm_mon + 1) << 16		/* tm_mon starts at zero */
-			| bin2bcd(tm->tm_wday + 1) << 21	/* day of the week [0-6], Sunday=0 */
-			| bin2bcd(tm->tm_mday) << 24);
+			  FIELD_PREP(AT91_RTC_CENT,
+				     bin2bcd((tm->tm_year + 1900) / 100))
+			| FIELD_PREP(AT91_RTC_YEAR, bin2bcd(tm->tm_year % 100))
+			| FIELD_PREP(AT91_RTC_MONTH, bin2bcd(tm->tm_mon + 1))
+			| FIELD_PREP(AT91_RTC_DAY, bin2bcd(tm->tm_wday + 1))
+			| FIELD_PREP(AT91_RTC_DATE, bin2bcd(tm->tm_mday)));
 
 	/* Restart Time/Calendar */
 	cr = at91_rtc_read(AT91_RTC_CR);
@@ -211,25 +257,17 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
  */
 static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
-	struct rtc_time tm;
-
-	at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
-
-	tm.tm_mon = alrm->time.tm_mon;
-	tm.tm_mday = alrm->time.tm_mday;
-	tm.tm_hour = alrm->time.tm_hour;
-	tm.tm_min = alrm->time.tm_min;
-	tm.tm_sec = alrm->time.tm_sec;
+	struct rtc_time tm = alrm->time;
 
 	at91_rtc_write_idr(AT91_RTC_ALARM);
 	at91_rtc_write(AT91_RTC_TIMALR,
-		  bin2bcd(tm.tm_sec) << 0
-		| bin2bcd(tm.tm_min) << 8
-		| bin2bcd(tm.tm_hour) << 16
+		  FIELD_PREP(AT91_RTC_SEC, bin2bcd(alrm->time.tm_sec))
+		| FIELD_PREP(AT91_RTC_MIN, bin2bcd(alrm->time.tm_min))
+		| FIELD_PREP(AT91_RTC_HOUR, bin2bcd(alrm->time.tm_hour))
 		| AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
 	at91_rtc_write(AT91_RTC_CALALR,
-		  bin2bcd(tm.tm_mon + 1) << 16		/* tm_mon starts at zero */
-		| bin2bcd(tm.tm_mday) << 24
+		  FIELD_PREP(AT91_RTC_MONTH, bin2bcd(alrm->time.tm_mon + 1))
+		| FIELD_PREP(AT91_RTC_DATE, bin2bcd(alrm->time.tm_mday))
 		| AT91_RTC_DATEEN | AT91_RTC_MTHEN);
 
 	if (alrm->enabled) {
@@ -254,20 +292,6 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 
 	return 0;
 }
-/*
- * Provide additional RTC information in /proc/driver/rtc
- */
-static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
-{
-	unsigned long imr = at91_rtc_read_imr();
-
-	seq_printf(seq, "update_IRQ\t: %s\n",
-			(imr & AT91_RTC_ACKUPD) ? "yes" : "no");
-	seq_printf(seq, "periodic_IRQ\t: %s\n",
-			(imr & AT91_RTC_SECEV) ? "yes" : "no");
-
-	return 0;
-}
 
 /*
  * IRQ handler for the RTC
@@ -327,6 +351,12 @@ static const struct of_device_id at91_rtc_dt_ids[] = {
 		.compatible = "atmel,at91sam9x5-rtc",
 		.data = &at91sam9x5_config,
 	}, {
+		.compatible = "atmel,sama5d4-rtc",
+		.data = &at91rm9200_config,
+	}, {
+		.compatible = "atmel,sama5d2-rtc",
+		.data = &at91rm9200_config,
+	}, {
 		/* sentinel */
 	}
 };
@@ -337,7 +367,6 @@ static const struct rtc_class_ops at91_rtc_ops = {
 	.set_time	= at91_rtc_settime,
 	.read_alarm	= at91_rtc_readalarm,
 	.set_alarm	= at91_rtc_setalarm,
-	.proc		= at91_rtc_proc,
 	.alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h
deleted file mode 100644
index 8be5289..0000000
--- a/drivers/rtc/rtc-at91rm9200.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * arch/arm/mach-at91/include/mach/at91_rtc.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * Real Time Clock (RTC) - System peripheral registers.
- * Based on AT91RM9200 datasheet revision E.
- */
-
-#ifndef AT91_RTC_H
-#define AT91_RTC_H
-
-#define	AT91_RTC_CR		0x00			/* Control Register */
-#define		AT91_RTC_UPDTIM		(1 <<  0)		/* Update Request Time Register */
-#define		AT91_RTC_UPDCAL		(1 <<  1)		/* Update Request Calendar Register */
-#define		AT91_RTC_TIMEVSEL	(3 <<  8)		/* Time Event Selection */
-#define			AT91_RTC_TIMEVSEL_MINUTE	(0 << 8)
-#define			AT91_RTC_TIMEVSEL_HOUR		(1 << 8)
-#define			AT91_RTC_TIMEVSEL_DAY24		(2 << 8)
-#define			AT91_RTC_TIMEVSEL_DAY12		(3 << 8)
-#define		AT91_RTC_CALEVSEL	(3 << 16)		/* Calendar Event Selection */
-#define			AT91_RTC_CALEVSEL_WEEK		(0 << 16)
-#define			AT91_RTC_CALEVSEL_MONTH		(1 << 16)
-#define			AT91_RTC_CALEVSEL_YEAR		(2 << 16)
-
-#define	AT91_RTC_MR		0x04			/* Mode Register */
-#define			AT91_RTC_HRMOD		(1 <<  0)		/* 12/24 Hour Mode */
-
-#define	AT91_RTC_TIMR		0x08			/* Time Register */
-#define		AT91_RTC_SEC		(0x7f <<  0)		/* Current Second */
-#define		AT91_RTC_MIN		(0x7f <<  8)		/* Current Minute */
-#define		AT91_RTC_HOUR		(0x3f << 16)		/* Current Hour */
-#define		AT91_RTC_AMPM		(1    << 22)		/* Ante Meridiem Post Meridiem Indicator */
-
-#define	AT91_RTC_CALR		0x0c			/* Calendar Register */
-#define		AT91_RTC_CENT		(0x7f <<  0)		/* Current Century */
-#define		AT91_RTC_YEAR		(0xff <<  8)		/* Current Year */
-#define		AT91_RTC_MONTH		(0x1f << 16)		/* Current Month */
-#define		AT91_RTC_DAY		(7    << 21)		/* Current Day */
-#define		AT91_RTC_DATE		(0x3f << 24)		/* Current Date */
-
-#define	AT91_RTC_TIMALR		0x10			/* Time Alarm Register */
-#define		AT91_RTC_SECEN		(1 <<  7)		/* Second Alarm Enable */
-#define		AT91_RTC_MINEN		(1 << 15)		/* Minute Alarm Enable */
-#define		AT91_RTC_HOUREN		(1 << 23)		/* Hour Alarm Enable */
-
-#define	AT91_RTC_CALALR		0x14			/* Calendar Alarm Register */
-#define		AT91_RTC_MTHEN		(1 << 23)		/* Month Alarm Enable */
-#define		AT91_RTC_DATEEN		(1 << 31)		/* Date Alarm Enable */
-
-#define	AT91_RTC_SR		0x18			/* Status Register */
-#define		AT91_RTC_ACKUPD		(1 <<  0)		/* Acknowledge for Update */
-#define		AT91_RTC_ALARM		(1 <<  1)		/* Alarm Flag */
-#define		AT91_RTC_SECEV		(1 <<  2)		/* Second Event */
-#define		AT91_RTC_TIMEV		(1 <<  3)		/* Time Event */
-#define		AT91_RTC_CALEV		(1 <<  4)		/* Calendar Event */
-
-#define	AT91_RTC_SCCR		0x1c			/* Status Clear Command Register */
-#define	AT91_RTC_IER		0x20			/* Interrupt Enable Register */
-#define	AT91_RTC_IDR		0x24			/* Interrupt Disable Register */
-#define	AT91_RTC_IMR		0x28			/* Interrupt Mask Register */
-
-#define	AT91_RTC_VER		0x2c			/* Valid Entry Register */
-#define		AT91_RTC_NVTIM		(1 <<  0)		/* Non valid Time */
-#define		AT91_RTC_NVCAL		(1 <<  1)		/* Non valid Calendar */
-#define		AT91_RTC_NVTIMALR	(1 <<  2)		/* Non valid Time Alarm */
-#define		AT91_RTC_NVCALALR	(1 <<  3)		/* Non valid Calendar Alarm */
-
-#endif
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index 627037a..bbbb1f0 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -6,6 +6,7 @@
 
 #include <linux/bcd.h>
 #include <linux/mfd/rohm-bd70528.h>
+#include <linux/mfd/rohm-bd71828.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -15,7 +16,7 @@
 /*
  * We read regs RTC_SEC => RTC_YEAR
  * this struct is ordered according to chip registers.
- * Keep it u8 only to avoid padding issues.
+ * Keep it u8 only (or packed) to avoid padding issues.
  */
 struct bd70528_rtc_day {
 	u8 sec;
@@ -36,6 +37,13 @@ struct bd70528_rtc_wake {
 	u8 ctrl;
 } __packed;
 
+struct bd71828_rtc_alm {
+	struct bd70528_rtc_data alm0;
+	struct bd70528_rtc_data alm1;
+	u8 alm_mask;
+	u8 alm1_mask;
+} __packed;
+
 struct bd70528_rtc_alm {
 	struct bd70528_rtc_data data;
 	u8 alm_mask;
@@ -43,8 +51,10 @@ struct bd70528_rtc_alm {
 } __packed;
 
 struct bd70528_rtc {
-	struct rohm_regmap_dev *mfd;
+	struct rohm_regmap_dev *parent;
 	struct device *dev;
+	u8 reg_time_start;
+	bool has_rtc_timers;
 };
 
 static int bd70528_set_wake(struct rohm_regmap_dev *bd70528,
@@ -123,14 +133,14 @@ static int bd70528_set_rtc_based_timers(struct bd70528_rtc *r, int new_state,
 {
 	int ret;
 
-	ret = bd70528_wdt_set(r->mfd, new_state & BD70528_WDT_STATE_BIT,
+	ret = bd70528_wdt_set(r->parent, new_state & BD70528_WDT_STATE_BIT,
 			      old_state);
 	if (ret) {
 		dev_err(r->dev,
 			"Failed to disable WDG for RTC setting (%d)\n", ret);
 		return ret;
 	}
-	ret = bd70528_set_elapsed_tmr(r->mfd,
+	ret = bd70528_set_elapsed_tmr(r->parent,
 				      new_state & BD70528_ELAPSED_STATE_BIT,
 				      old_state);
 	if (ret) {
@@ -138,7 +148,7 @@ static int bd70528_set_rtc_based_timers(struct bd70528_rtc *r, int new_state,
 			"Failed to disable 'elapsed timer' for RTC setting\n");
 		return ret;
 	}
-	ret = bd70528_set_wake(r->mfd, new_state & BD70528_WAKE_STATE_BIT,
+	ret = bd70528_set_wake(r->parent, new_state & BD70528_WAKE_STATE_BIT,
 			       old_state);
 	if (ret) {
 		dev_err(r->dev,
@@ -152,12 +162,18 @@ static int bd70528_set_rtc_based_timers(struct bd70528_rtc *r, int new_state,
 static int bd70528_re_enable_rtc_based_timers(struct bd70528_rtc *r,
 					      int old_state)
 {
+	if (!r->has_rtc_timers)
+		return 0;
+
 	return bd70528_set_rtc_based_timers(r, old_state, NULL);
 }
 
 static int bd70528_disable_rtc_based_timers(struct bd70528_rtc *r,
 					    int *old_state)
 {
+	if (!r->has_rtc_timers)
+		return 0;
+
 	return bd70528_set_rtc_based_timers(r, 0, old_state);
 }
 
@@ -213,22 +229,52 @@ static inline void rtc2tm(struct bd70528_rtc_data *r, struct rtc_time *t)
 	t->tm_wday = bcd2bin(r->week & BD70528_MASK_RTC_WEEK);
 }
 
+static int bd71828_set_alarm(struct device *dev, struct rtc_wkalrm *a)
+{
+	int ret;
+	struct bd71828_rtc_alm alm;
+	struct bd70528_rtc *r = dev_get_drvdata(dev);
+	struct rohm_regmap_dev *parent = r->parent;
+
+	ret = regmap_bulk_read(parent->regmap, BD71828_REG_RTC_ALM_START,
+			       &alm, sizeof(alm));
+	if (ret) {
+		dev_err(dev, "Failed to read alarm regs\n");
+		return ret;
+	}
+
+	tm2rtc(&a->time, &alm.alm0);
+
+	if (!a->enabled)
+		alm.alm_mask &= ~BD70528_MASK_ALM_EN;
+	else
+		alm.alm_mask |= BD70528_MASK_ALM_EN;
+
+	ret = regmap_bulk_write(parent->regmap, BD71828_REG_RTC_ALM_START,
+				&alm, sizeof(alm));
+	if (ret)
+		dev_err(dev, "Failed to set alarm time\n");
+
+	return ret;
+
+}
+
 static int bd70528_set_alarm(struct device *dev, struct rtc_wkalrm *a)
 {
 	struct bd70528_rtc_wake wake;
 	struct bd70528_rtc_alm alm;
 	int ret;
 	struct bd70528_rtc *r = dev_get_drvdata(dev);
-	struct rohm_regmap_dev *bd70528 = r->mfd;
+	struct rohm_regmap_dev *parent = r->parent;
 
-	ret = regmap_bulk_read(bd70528->regmap, BD70528_REG_RTC_WAKE_START,
+	ret = regmap_bulk_read(parent->regmap, BD70528_REG_RTC_WAKE_START,
 			       &wake, sizeof(wake));
 	if (ret) {
 		dev_err(dev, "Failed to read wake regs\n");
 		return ret;
 	}
 
-	ret = regmap_bulk_read(bd70528->regmap, BD70528_REG_RTC_ALM_START,
+	ret = regmap_bulk_read(parent->regmap, BD70528_REG_RTC_ALM_START,
 			       &alm, sizeof(alm));
 	if (ret) {
 		dev_err(dev, "Failed to read alarm regs\n");
@@ -246,14 +292,14 @@ static int bd70528_set_alarm(struct device *dev, struct rtc_wkalrm *a)
 		wake.ctrl &= ~BD70528_MASK_WAKE_EN;
 	}
 
-	ret = regmap_bulk_write(bd70528->regmap,
+	ret = regmap_bulk_write(parent->regmap,
 				BD70528_REG_RTC_WAKE_START, &wake,
 				sizeof(wake));
 	if (ret) {
 		dev_err(dev, "Failed to set wake time\n");
 		return ret;
 	}
-	ret = regmap_bulk_write(bd70528->regmap, BD70528_REG_RTC_ALM_START,
+	ret = regmap_bulk_write(parent->regmap, BD70528_REG_RTC_ALM_START,
 				&alm, sizeof(alm));
 	if (ret)
 		dev_err(dev, "Failed to set alarm time\n");
@@ -261,14 +307,38 @@ static int bd70528_set_alarm(struct device *dev, struct rtc_wkalrm *a)
 	return ret;
 }
 
+static int bd71828_read_alarm(struct device *dev, struct rtc_wkalrm *a)
+{
+	int ret;
+	struct bd71828_rtc_alm alm;
+	struct bd70528_rtc *r = dev_get_drvdata(dev);
+	struct rohm_regmap_dev *parent = r->parent;
+
+	ret = regmap_bulk_read(parent->regmap, BD71828_REG_RTC_ALM_START,
+			       &alm, sizeof(alm));
+	if (ret) {
+		dev_err(dev, "Failed to read alarm regs\n");
+		return ret;
+	}
+
+	rtc2tm(&alm.alm0, &a->time);
+	a->time.tm_mday = -1;
+	a->time.tm_mon = -1;
+	a->time.tm_year = -1;
+	a->enabled = !!(alm.alm_mask & BD70528_MASK_ALM_EN);
+	a->pending = 0;
+
+	return 0;
+}
+
 static int bd70528_read_alarm(struct device *dev, struct rtc_wkalrm *a)
 {
 	struct bd70528_rtc_alm alm;
 	int ret;
 	struct bd70528_rtc *r = dev_get_drvdata(dev);
-	struct rohm_regmap_dev *bd70528 = r->mfd;
+	struct rohm_regmap_dev *parent = r->parent;
 
-	ret = regmap_bulk_read(bd70528->regmap, BD70528_REG_RTC_ALM_START,
+	ret = regmap_bulk_read(parent->regmap, BD70528_REG_RTC_ALM_START,
 			       &alm, sizeof(alm));
 	if (ret) {
 		dev_err(dev, "Failed to read alarm regs\n");
@@ -290,14 +360,14 @@ static int bd70528_set_time_locked(struct device *dev, struct rtc_time *t)
 	int ret, tmpret, old_states;
 	struct bd70528_rtc_data rtc_data;
 	struct bd70528_rtc *r = dev_get_drvdata(dev);
-	struct rohm_regmap_dev *bd70528 = r->mfd;
+	struct rohm_regmap_dev *parent = r->parent;
 
 	ret = bd70528_disable_rtc_based_timers(r, &old_states);
 	if (ret)
 		return ret;
 
-	tmpret = regmap_bulk_read(bd70528->regmap,
-				  BD70528_REG_RTC_START, &rtc_data,
+	tmpret = regmap_bulk_read(parent->regmap,
+				  r->reg_time_start, &rtc_data,
 				  sizeof(rtc_data));
 	if (tmpret) {
 		dev_err(dev, "Failed to read RTC time registers\n");
@@ -305,8 +375,8 @@ static int bd70528_set_time_locked(struct device *dev, struct rtc_time *t)
 	}
 	tm2rtc(t, &rtc_data);
 
-	tmpret = regmap_bulk_write(bd70528->regmap,
-				   BD70528_REG_RTC_START, &rtc_data,
+	tmpret = regmap_bulk_write(parent->regmap,
+				   r->reg_time_start, &rtc_data,
 				   sizeof(rtc_data));
 	if (tmpret) {
 		dev_err(dev, "Failed to set RTC time\n");
@@ -321,27 +391,32 @@ static int bd70528_set_time_locked(struct device *dev, struct rtc_time *t)
 	return ret;
 }
 
+static int bd71828_set_time(struct device *dev, struct rtc_time *t)
+{
+	return bd70528_set_time_locked(dev, t);
+}
+
 static int bd70528_set_time(struct device *dev, struct rtc_time *t)
 {
 	int ret;
 	struct bd70528_rtc *r = dev_get_drvdata(dev);
 
-	bd70528_wdt_lock(r->mfd);
+	bd70528_wdt_lock(r->parent);
 	ret = bd70528_set_time_locked(dev, t);
-	bd70528_wdt_unlock(r->mfd);
+	bd70528_wdt_unlock(r->parent);
 	return ret;
 }
 
 static int bd70528_get_time(struct device *dev, struct rtc_time *t)
 {
 	struct bd70528_rtc *r = dev_get_drvdata(dev);
-	struct rohm_regmap_dev *bd70528 = r->mfd;
+	struct rohm_regmap_dev *parent = r->parent;
 	struct bd70528_rtc_data rtc_data;
 	int ret;
 
 	/* read the RTC date and time registers all at once */
-	ret = regmap_bulk_read(bd70528->regmap,
-			       BD70528_REG_RTC_START, &rtc_data,
+	ret = regmap_bulk_read(parent->regmap,
+			       r->reg_time_start, &rtc_data,
 			       sizeof(rtc_data));
 	if (ret) {
 		dev_err(dev, "Failed to read RTC time (err %d)\n", ret);
@@ -362,19 +437,36 @@ static int bd70528_alm_enable(struct device *dev, unsigned int enabled)
 	if (enabled)
 		enableval = 0;
 
-	bd70528_wdt_lock(r->mfd);
-	ret = bd70528_set_wake(r->mfd, enabled, NULL);
+	bd70528_wdt_lock(r->parent);
+	ret = bd70528_set_wake(r->parent, enabled, NULL);
 	if (ret) {
 		dev_err(dev, "Failed to change wake state\n");
 		goto out_unlock;
 	}
-	ret = regmap_update_bits(r->mfd->regmap, BD70528_REG_RTC_ALM_MASK,
+	ret = regmap_update_bits(r->parent->regmap, BD70528_REG_RTC_ALM_MASK,
 				 BD70528_MASK_ALM_EN, enableval);
 	if (ret)
 		dev_err(dev, "Failed to change alarm state\n");
 
 out_unlock:
-	bd70528_wdt_unlock(r->mfd);
+	bd70528_wdt_unlock(r->parent);
+	return ret;
+}
+
+static int bd71828_alm_enable(struct device *dev, unsigned int enabled)
+{
+	int ret;
+	struct bd70528_rtc *r = dev_get_drvdata(dev);
+	unsigned int enableval = BD70528_MASK_ALM_EN;
+
+	if (!enabled)
+		enableval = 0;
+
+	ret = regmap_update_bits(r->parent->regmap, BD71828_REG_RTC_ALM0_MASK,
+				 BD70528_MASK_ALM_EN, enableval);
+	if (ret)
+		dev_err(dev, "Failed to change alarm state\n");
+
 	return ret;
 }
 
@@ -386,6 +478,14 @@ static const struct rtc_class_ops bd70528_rtc_ops = {
 	.alarm_irq_enable	= bd70528_alm_enable,
 };
 
+static const struct rtc_class_ops bd71828_rtc_ops = {
+	.read_time		= bd70528_get_time,
+	.set_time		= bd71828_set_time,
+	.read_alarm		= bd71828_read_alarm,
+	.set_alarm		= bd71828_set_alarm,
+	.alarm_irq_enable	= bd71828_alm_enable,
+};
+
 static irqreturn_t alm_hndlr(int irq, void *data)
 {
 	struct rtc_device *rtc = data;
@@ -397,14 +497,19 @@ static irqreturn_t alm_hndlr(int irq, void *data)
 static int bd70528_probe(struct platform_device *pdev)
 {
 	struct bd70528_rtc *bd_rtc;
-	struct rohm_regmap_dev *mfd;
+	const struct rtc_class_ops *rtc_ops;
+	struct rohm_regmap_dev *parent;
+	const char *irq_name;
 	int ret;
 	struct rtc_device *rtc;
 	int irq;
 	unsigned int hr;
+	bool enable_main_irq = false;
+	u8 hour_reg;
+	enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
 
-	mfd = dev_get_drvdata(pdev->dev.parent);
-	if (!mfd) {
+	parent = dev_get_drvdata(pdev->dev.parent);
+	if (!parent) {
 		dev_err(&pdev->dev, "No MFD driver data\n");
 		return -EINVAL;
 	}
@@ -412,16 +517,39 @@ static int bd70528_probe(struct platform_device *pdev)
 	if (!bd_rtc)
 		return -ENOMEM;
 
-	bd_rtc->mfd = mfd;
+	bd_rtc->parent = parent;
 	bd_rtc->dev = &pdev->dev;
 
-	irq = platform_get_irq_byname(pdev, "bd70528-rtc-alm");
-	if (irq < 0)
+	switch (chip) {
+	case ROHM_CHIP_TYPE_BD70528:
+		irq_name = "bd70528-rtc-alm";
+		bd_rtc->has_rtc_timers = true;
+		bd_rtc->reg_time_start = BD70528_REG_RTC_START;
+		hour_reg = BD70528_REG_RTC_HOUR;
+		enable_main_irq = true;
+		rtc_ops = &bd70528_rtc_ops;
+		break;
+	case ROHM_CHIP_TYPE_BD71828:
+		irq_name = "bd71828-rtc-alm-0";
+		bd_rtc->reg_time_start = BD71828_REG_RTC_START;
+		hour_reg = BD71828_REG_RTC_HOUR;
+		rtc_ops = &bd71828_rtc_ops;
+		break;
+	default:
+		dev_err(&pdev->dev, "Unknown chip\n");
+		return -ENOENT;
+	}
+
+	irq = platform_get_irq_byname(pdev, irq_name);
+
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Failed to get irq\n");
 		return irq;
+	}
 
 	platform_set_drvdata(pdev, bd_rtc);
 
-	ret = regmap_read(mfd->regmap, BD70528_REG_RTC_HOUR, &hr);
+	ret = regmap_read(parent->regmap, hour_reg, &hr);
 
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to reag RTC clock\n");
@@ -431,10 +559,10 @@ static int bd70528_probe(struct platform_device *pdev)
 	if (!(hr & BD70528_MASK_RTC_HOUR_24H)) {
 		struct rtc_time t;
 
-		ret = bd70528_get_time(&pdev->dev, &t);
+		ret = rtc_ops->read_time(&pdev->dev, &t);
 
 		if (!ret)
-			ret = bd70528_set_time(&pdev->dev, &t);
+			ret = rtc_ops->set_time(&pdev->dev, &t);
 
 		if (ret) {
 			dev_err(&pdev->dev,
@@ -454,7 +582,7 @@ static int bd70528_probe(struct platform_device *pdev)
 
 	rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
 	rtc->range_max = RTC_TIMESTAMP_END_2099;
-	rtc->ops = &bd70528_rtc_ops;
+	rtc->ops = rtc_ops;
 
 	/* Request alarm IRQ prior to registerig the RTC */
 	ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, &alm_hndlr,
@@ -468,27 +596,37 @@ static int bd70528_probe(struct platform_device *pdev)
 	 *  leave them enabled as irq-controller should disable irqs
 	 *  from sub-registers when IRQ is disabled or freed.
 	 */
-	ret = regmap_update_bits(mfd->regmap,
+	if (enable_main_irq) {
+		ret = regmap_update_bits(parent->regmap,
 				 BD70528_REG_INT_MAIN_MASK,
 				 BD70528_INT_RTC_MASK, 0);
-	if (ret) {
-		dev_err(&pdev->dev, "Failed to enable RTC interrupts\n");
-		return ret;
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to enable RTC interrupts\n");
+			return ret;
+		}
 	}
 
 	return rtc_register_device(rtc);
 }
 
+static const struct platform_device_id bd718x7_rtc_id[] = {
+	{ "bd70528-rtc", ROHM_CHIP_TYPE_BD70528 },
+	{ "bd71828-rtc", ROHM_CHIP_TYPE_BD71828 },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, bd718x7_rtc_id);
+
 static struct platform_driver bd70528_rtc = {
 	.driver = {
 		.name = "bd70528-rtc"
 	},
 	.probe = bd70528_probe,
+	.id_table = bd718x7_rtc_id,
 };
 
 module_platform_driver(bd70528_rtc);
 
 MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("BD70528 RTC driver");
+MODULE_DESCRIPTION("ROHM BD70528 and BD71828 PMIC RTC driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:bd70528-rtc");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 0333037..b795fe4 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
 			rtc_cmos_int_handler = cmos_interrupt;
 
 		retval = request_irq(rtc_irq, rtc_cmos_int_handler,
-				IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
+				0, dev_name(&cmos_rtc.rtc->dev),
 				cmos_rtc.rtc);
 		if (retval < 0) {
 			dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
@@ -1197,8 +1197,6 @@ static void rtc_wake_off(struct device *dev)
 /* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
 static void use_acpi_alarm_quirks(void)
 {
-	int year;
-
 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 		return;
 
@@ -1208,8 +1206,10 @@ static void use_acpi_alarm_quirks(void)
 	if (!is_hpet_enabled())
 		return;
 
-	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2015)
-		use_acpi_alarm = true;
+	if (dmi_get_bios_year() < 2015)
+		return;
+
+	use_acpi_alarm = true;
 }
 #else
 static inline void use_acpi_alarm_quirks(void) { }
@@ -1305,7 +1305,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
 		 * hardcode it on systems with a legacy PIC.
 		 */
 		if (nr_legacy_irqs())
-			irq = 8;
+			irq = RTC_IRQ;
 #endif
 		return cmos_do_probe(&pnp->dev,
 				pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index d043d30..f7343c2 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -5,7 +5,6 @@
 // Author: Stephen Barber <smbarber@chromium.org>
 
 #include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
 #include <linux/module.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index d21004a..ba14342 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -75,7 +75,6 @@ static const struct spi_device_id ds1343_id[] = {
 MODULE_DEVICE_TABLE(spi, ds1343_id);
 
 struct ds1343_priv {
-	struct spi_device *spi;
 	struct rtc_device *rtc;
 	struct regmap *map;
 	int irq;
@@ -362,12 +361,13 @@ static int ds1343_probe(struct spi_device *spi)
 	if (!priv)
 		return -ENOMEM;
 
-	priv->spi = spi;
-
 	/* RTC DS1347 works in spi mode 3 and
-	 * its chip select is active high
+	 * its chip select is active high. Active high should be defined as
+	 * "inverse polarity" as GPIO-based chip selects can be logically
+	 * active high but inverted by the GPIO library.
 	 */
-	spi->mode = SPI_MODE_3 | SPI_CS_HIGH;
+	spi->mode |= SPI_MODE_3;
+	spi->mode ^= SPI_CS_HIGH;
 	spi->bits_per_word = 8;
 	res = spi_setup(spi);
 	if (res)
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 443f6d0..0fb79c4 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -78,7 +78,6 @@
 struct hym8563 {
 	struct i2c_client	*client;
 	struct rtc_device	*rtc;
-	bool			valid;
 #ifdef CONFIG_COMMON_CLK
 	struct clk_hw		clkout_hw;
 #endif
@@ -91,19 +90,19 @@ struct hym8563 {
 static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	struct hym8563 *hym8563 = i2c_get_clientdata(client);
 	u8 buf[7];
 	int ret;
 
-	if (!hym8563->valid) {
-		dev_warn(&client->dev, "no valid clock/calendar values available\n");
-		return -EPERM;
-	}
-
 	ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
 	if (ret < 0)
 		return ret;
 
+	if (buf[0] & HYM8563_SEC_VL) {
+		dev_warn(&client->dev,
+			 "no valid clock/calendar values available\n");
+		return -EINVAL;
+	}
+
 	tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK);
 	tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK);
 	tm->tm_hour = bcd2bin(buf[2] & HYM8563_HOUR_MASK);
@@ -118,7 +117,6 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
 static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	struct hym8563 *hym8563 = i2c_get_clientdata(client);
 	u8 buf[7];
 	int ret;
 
@@ -157,8 +155,6 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
 	if (ret < 0)
 		return ret;
 
-	hym8563->valid = true;
-
 	return 0;
 }
 
@@ -556,9 +552,8 @@ static int hym8563_probe(struct i2c_client *client,
 	if (ret < 0)
 		return ret;
 
-	hym8563->valid = !(ret & HYM8563_SEC_VL);
 	dev_dbg(&client->dev, "rtc information is %s\n",
-		hym8563->valid ? "valid" : "invalid");
+		(ret & HYM8563_SEC_VL) ? "invalid" : "valid");
 
 	hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
 						&hym8563_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index d3a75d4..e8194f1 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -20,6 +20,16 @@
 
 struct m48t35_rtc {
 	u8	pad[0x7ff8];    /* starts at 0x7ff8 */
+#ifdef CONFIG_SGI_IP27
+	u8	hour;
+	u8	min;
+	u8	sec;
+	u8	control;
+	u8	year;
+	u8	month;
+	u8	date;
+	u8	day;
+#else
 	u8	control;
 	u8	sec;
 	u8	min;
@@ -28,6 +38,7 @@ struct m48t35_rtc {
 	u8	date;
 	u8	month;
 	u8	year;
+#endif
 };
 
 #define M48T35_RTC_SET		0x80
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
index 07b30a3..6b24ac9 100644
--- a/drivers/rtc/rtc-moxart.c
+++ b/drivers/rtc/rtc-moxart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * MOXA ART RTC driver.
  *
@@ -7,10 +8,6 @@
  *
  * Based on code from
  * Moxa Technology Co., Ltd. <www.moxa.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/init.h>
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 9135e21..cda238d 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -297,15 +297,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
 
 	rtc->rtc_dev->ops = &mtk_rtc_ops;
 
-	ret = rtc_register_device(rtc->rtc_dev);
-	if (ret)
-		goto out_free_irq;
-
-	return 0;
-
-out_free_irq:
-	free_irq(rtc->irq, rtc);
-	return ret;
+	return rtc_register_device(rtc->rtc_dev);
 }
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 988a4df..d4ed20f 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -616,7 +616,7 @@ static int rtc_pinconf_get(struct pinctrl_dev *pctldev,
 		break;
 	default:
 		return -ENOTSUPP;
-	};
+	}
 
 	*config = pinconf_to_config_packed(param, arg);
 
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index ba5baac..4e50d67 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -199,11 +199,9 @@ static int pcf2127_rtc_ioctl(struct device *dev,
 		if (ret)
 			return ret;
 
-		touser = touser & PCF2127_BIT_CTRL3_BLF ? 1 : 0;
+		touser = touser & PCF2127_BIT_CTRL3_BLF ? RTC_VL_BACKUP_LOW : 0;
 
-		if (copy_to_user((void __user *)arg, &touser, sizeof(int)))
-			return -EFAULT;
-		return 0;
+		return put_user(touser, (unsigned int __user *)arg);
 	default:
 		return -ENOIOCTLCMD;
 	}
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 1afa6d9..1db17ba 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -289,21 +289,9 @@ static int pcf85063_ioctl(struct device *dev, unsigned int cmd,
 		if (ret < 0)
 			return ret;
 
-		if (status & PCF85063_REG_SC_OS)
-			dev_warn(&pcf85063->rtc->dev, "Voltage low, data loss detected.\n");
+		status = status & PCF85063_REG_SC_OS ? RTC_VL_DATA_INVALID : 0;
 
-		status &= PCF85063_REG_SC_OS;
-
-		if (copy_to_user((void __user *)arg, &status, sizeof(int)))
-			return -EFAULT;
-
-		return 0;
-
-	case RTC_VL_CLR:
-		ret = regmap_update_bits(pcf85063->regmap, PCF85063_REG_SC,
-					 PCF85063_REG_SC_OS, 0);
-
-		return ret;
+		return put_user(status, (unsigned int __user *)arg);
 
 	default:
 		return -ENOIOCTLCMD;
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index b24c908..47e0f41 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -282,11 +282,11 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
 		ret = pcf8523_voltage_low(client);
 		if (ret < 0)
 			return ret;
+		if (ret)
+			ret = RTC_VL_BACKUP_LOW;
 
-		if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
-			return -EFAULT;
+		return put_user(ret, (unsigned int __user *)arg);
 
-		return 0;
 	default:
 		return -ENOIOCTLCMD;
 	}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 3c322f3..2dc30ea 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -22,8 +22,8 @@
 
 #define PCF8563_REG_ST1		0x00 /* status */
 #define PCF8563_REG_ST2		0x01
-#define PCF8563_BIT_AIE		(1 << 1)
-#define PCF8563_BIT_AF		(1 << 3)
+#define PCF8563_BIT_AIE		BIT(1)
+#define PCF8563_BIT_AF		BIT(3)
 #define PCF8563_BITS_ST2_N	(7 << 5)
 
 #define PCF8563_REG_SC		0x02 /* datetime */
@@ -76,7 +76,6 @@ struct pcf8563 {
 	 * 1970...2069.
 	 */
 	int c_polarity;	/* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
-	int voltage_low; /* incicates if a low_voltage was detected */
 
 	struct i2c_client *client;
 #ifdef CONFIG_COMMON_CLK
@@ -208,7 +207,6 @@ static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
 		return err;
 
 	if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
-		pcf8563->voltage_low = 1;
 		dev_err(&client->dev,
 			"low voltage detected, date/time is not reliable.\n");
 		return -EINVAL;
@@ -276,43 +274,23 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
 				9 - PCF8563_REG_SC, buf + PCF8563_REG_SC);
 }
 
-#ifdef CONFIG_RTC_INTF_DEV
 static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
-	struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
-	struct rtc_time tm;
+	struct i2c_client *client = to_i2c_client(dev);
+	int ret;
 
 	switch (cmd) {
 	case RTC_VL_READ:
-		if (pcf8563->voltage_low)
-			dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+		ret = i2c_smbus_read_byte_data(client, PCF8563_REG_SC);
+		if (ret < 0)
+			return ret;
 
-		if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
-					sizeof(int)))
-			return -EFAULT;
-		return 0;
-	case RTC_VL_CLR:
-		/*
-		 * Clear the VL bit in the seconds register in case
-		 * the time has not been set already (which would
-		 * have cleared it). This does not really matter
-		 * because of the cached voltage_low value but do it
-		 * anyway for consistency.
-		 */
-		if (pcf8563_rtc_read_time(dev, &tm))
-			pcf8563_rtc_set_time(dev, &tm);
-
-		/* Clear the cached value. */
-		pcf8563->voltage_low = 0;
-
-		return 0;
+		return put_user(ret & PCF8563_SC_LV ? RTC_VL_DATA_INVALID : 0,
+				(unsigned int __user *)arg);
 	default:
 		return -ENOIOCTLCMD;
 	}
 }
-#else
-#define pcf8563_rtc_ioctl NULL
-#endif
 
 static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
 {
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 6b7b3a6..a0ddc86 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -428,21 +428,8 @@ static int rv3028_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 		if (ret < 0)
 			return ret;
 
-		if (status & RV3028_STATUS_PORF)
-			dev_warn(&rv3028->rtc->dev, "Voltage low, data loss detected.\n");
-
-		status &= RV3028_STATUS_PORF;
-
-		if (copy_to_user((void __user *)arg, &status, sizeof(int)))
-			return -EFAULT;
-
-		return 0;
-
-	case RTC_VL_CLR:
-		ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS,
-					 RV3028_STATUS_PORF, 0);
-
-		return ret;
+		status = status & RV3028_STATUS_PORF ? RTC_VL_DATA_INVALID : 0;
+		return put_user(status, (unsigned int __user *)arg);
 
 	default:
 		return -ENOIOCTLCMD;
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 4cdf6588..6271823 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -109,10 +109,8 @@
 #define RV3029_CONTROL_E2P_TOV_MASK	0x3F /* XTAL turnover temp mask */
 
 /* user ram section */
-#define RV3029_USR1_RAM_PAGE		0x38
-#define RV3029_USR1_SECTION_LEN		0x04
-#define RV3029_USR2_RAM_PAGE		0x3C
-#define RV3029_USR2_SECTION_LEN		0x04
+#define RV3029_RAM_PAGE			0x38
+#define RV3029_RAM_SECTION_LEN		8
 
 struct rv3029_data {
 	struct device		*dev;
@@ -121,77 +119,13 @@ struct rv3029_data {
 	int irq;
 };
 
-static int rv3029_read_regs(struct device *dev, u8 reg, u8 *buf,
-			    unsigned int len)
+static int rv3029_eeprom_busywait(struct rv3029_data *rv3029)
 {
-	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
-
-	if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
-	    (reg + len > RV3029_USR1_RAM_PAGE + 8))
-		return -EINVAL;
-
-	return regmap_bulk_read(rv3029->regmap, reg, buf, len);
-}
-
-static int rv3029_write_regs(struct device *dev, u8 reg, u8 const buf[],
-			     unsigned int len)
-{
-	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
-
-	if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
-	    (reg + len > RV3029_USR1_RAM_PAGE + 8))
-		return -EINVAL;
-
-	return regmap_bulk_write(rv3029->regmap, reg, buf, len);
-}
-
-static int rv3029_update_bits(struct device *dev, u8 reg, u8 mask, u8 set)
-{
-	u8 buf;
-	int ret;
-
-	ret = rv3029_read_regs(dev, reg, &buf, 1);
-	if (ret < 0)
-		return ret;
-	buf &= ~mask;
-	buf |= set & mask;
-	ret = rv3029_write_regs(dev, reg, &buf, 1);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static int rv3029_get_sr(struct device *dev, u8 *buf)
-{
-	int ret = rv3029_read_regs(dev, RV3029_STATUS, buf, 1);
-
-	if (ret < 0)
-		return -EIO;
-	dev_dbg(dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
-	return 0;
-}
-
-static int rv3029_set_sr(struct device *dev, u8 val)
-{
-	u8 buf[1];
-	int sr;
-
-	buf[0] = val;
-	sr = rv3029_write_regs(dev, RV3029_STATUS, buf, 1);
-	dev_dbg(dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
-	if (sr < 0)
-		return -EIO;
-	return 0;
-}
-
-static int rv3029_eeprom_busywait(struct device *dev)
-{
+	unsigned int sr;
 	int i, ret;
-	u8 sr;
 
 	for (i = 100; i > 0; i--) {
-		ret = rv3029_get_sr(dev, &sr);
+		ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
 		if (ret < 0)
 			break;
 		if (!(sr & RV3029_STATUS_EEBUSY))
@@ -199,126 +133,128 @@ static int rv3029_eeprom_busywait(struct device *dev)
 		usleep_range(1000, 10000);
 	}
 	if (i <= 0) {
-		dev_err(dev, "EEPROM busy wait timeout.\n");
+		dev_err(rv3029->dev, "EEPROM busy wait timeout.\n");
 		return -ETIMEDOUT;
 	}
 
 	return ret;
 }
 
-static int rv3029_eeprom_exit(struct device *dev)
+static int rv3029_eeprom_exit(struct rv3029_data *rv3029)
 {
 	/* Re-enable eeprom refresh */
-	return rv3029_update_bits(dev, RV3029_ONOFF_CTRL,
+	return regmap_update_bits(rv3029->regmap, RV3029_ONOFF_CTRL,
 				  RV3029_ONOFF_CTRL_EERE,
 				  RV3029_ONOFF_CTRL_EERE);
 }
 
-static int rv3029_eeprom_enter(struct device *dev)
+static int rv3029_eeprom_enter(struct rv3029_data *rv3029)
 {
+	unsigned int sr;
 	int ret;
-	u8 sr;
 
 	/* Check whether we are in the allowed voltage range. */
-	ret = rv3029_get_sr(dev, &sr);
+	ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
 	if (ret < 0)
 		return ret;
-	if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
+	if (sr & RV3029_STATUS_VLOW2)
+		return -ENODEV;
+	if (sr & RV3029_STATUS_VLOW1) {
 		/* We clear the bits and retry once just in case
 		 * we had a brown out in early startup.
 		 */
-		sr &= ~RV3029_STATUS_VLOW1;
-		sr &= ~RV3029_STATUS_VLOW2;
-		ret = rv3029_set_sr(dev, sr);
+		ret = regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+					 RV3029_STATUS_VLOW1, 0);
 		if (ret < 0)
 			return ret;
 		usleep_range(1000, 10000);
-		ret = rv3029_get_sr(dev, &sr);
+		ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
 		if (ret < 0)
 			return ret;
-		if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
-			dev_err(dev,
+		if (sr & RV3029_STATUS_VLOW1) {
+			dev_err(rv3029->dev,
 				"Supply voltage is too low to safely access the EEPROM.\n");
 			return -ENODEV;
 		}
 	}
 
 	/* Disable eeprom refresh. */
-	ret = rv3029_update_bits(dev, RV3029_ONOFF_CTRL, RV3029_ONOFF_CTRL_EERE,
-				 0);
+	ret = regmap_update_bits(rv3029->regmap, RV3029_ONOFF_CTRL,
+				 RV3029_ONOFF_CTRL_EERE, 0);
 	if (ret < 0)
 		return ret;
 
 	/* Wait for any previous eeprom accesses to finish. */
-	ret = rv3029_eeprom_busywait(dev);
+	ret = rv3029_eeprom_busywait(rv3029);
 	if (ret < 0)
-		rv3029_eeprom_exit(dev);
+		rv3029_eeprom_exit(rv3029);
 
 	return ret;
 }
 
-static int rv3029_eeprom_read(struct device *dev, u8 reg,
+static int rv3029_eeprom_read(struct rv3029_data *rv3029, u8 reg,
 			      u8 buf[], size_t len)
 {
 	int ret, err;
 
-	err = rv3029_eeprom_enter(dev);
+	err = rv3029_eeprom_enter(rv3029);
 	if (err < 0)
 		return err;
 
-	ret = rv3029_read_regs(dev, reg, buf, len);
+	ret = regmap_bulk_read(rv3029->regmap, reg, buf, len);
 
-	err = rv3029_eeprom_exit(dev);
+	err = rv3029_eeprom_exit(rv3029);
 	if (err < 0)
 		return err;
 
 	return ret;
 }
 
-static int rv3029_eeprom_write(struct device *dev, u8 reg,
+static int rv3029_eeprom_write(struct rv3029_data *rv3029, u8 reg,
 			       u8 const buf[], size_t len)
 {
+	unsigned int tmp;
 	int ret, err;
 	size_t i;
-	u8 tmp;
 
-	err = rv3029_eeprom_enter(dev);
+	err = rv3029_eeprom_enter(rv3029);
 	if (err < 0)
 		return err;
 
 	for (i = 0; i < len; i++, reg++) {
-		ret = rv3029_read_regs(dev, reg, &tmp, 1);
+		ret = regmap_read(rv3029->regmap, reg, &tmp);
 		if (ret < 0)
 			break;
 		if (tmp != buf[i]) {
-			ret = rv3029_write_regs(dev, reg, &buf[i], 1);
+			tmp = buf[i];
+			ret = regmap_write(rv3029->regmap, reg, tmp);
 			if (ret < 0)
 				break;
 		}
-		ret = rv3029_eeprom_busywait(dev);
+		ret = rv3029_eeprom_busywait(rv3029);
 		if (ret < 0)
 			break;
 	}
 
-	err = rv3029_eeprom_exit(dev);
+	err = rv3029_eeprom_exit(rv3029);
 	if (err < 0)
 		return err;
 
 	return ret;
 }
 
-static int rv3029_eeprom_update_bits(struct device *dev,
+static int rv3029_eeprom_update_bits(struct rv3029_data *rv3029,
 				     u8 reg, u8 mask, u8 set)
 {
 	u8 buf;
 	int ret;
 
-	ret = rv3029_eeprom_read(dev, reg, &buf, 1);
+	ret = rv3029_eeprom_read(rv3029, reg, &buf, 1);
 	if (ret < 0)
 		return ret;
 	buf &= ~mask;
 	buf |= set & mask;
-	ret = rv3029_eeprom_write(dev, reg, &buf, 1);
+	ret = rv3029_eeprom_write(rv3029, reg, &buf, 1);
 	if (ret < 0)
 		return ret;
 
@@ -330,20 +266,20 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
 	struct device *dev = dev_id;
 	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 	struct mutex *lock = &rv3029->rtc->ops_lock;
+	unsigned int flags, controls;
 	unsigned long events = 0;
-	u8 flags, controls;
 	int ret;
 
 	mutex_lock(lock);
 
-	ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+	ret = regmap_read(rv3029->regmap, RV3029_IRQ_CTRL, &controls);
 	if (ret) {
 		dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
 		mutex_unlock(lock);
 		return IRQ_NONE;
 	}
 
-	ret = rv3029_read_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
+	ret = regmap_read(rv3029->regmap, RV3029_IRQ_FLAGS, &flags);
 	if (ret) {
 		dev_warn(dev, "Read IRQ Flags Register error %d\n", ret);
 		mutex_unlock(lock);
@@ -358,8 +294,8 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
 
 	if (events) {
 		rtc_update_irq(rv3029->rtc, 1, events);
-		rv3029_write_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
-		rv3029_write_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+		regmap_write(rv3029->regmap, RV3029_IRQ_FLAGS, flags);
+		regmap_write(rv3029->regmap, RV3029_IRQ_CTRL, controls);
 	}
 	mutex_unlock(lock);
 
@@ -368,22 +304,22 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
 
 static int rv3029_read_time(struct device *dev, struct rtc_time *tm)
 {
-	u8 buf[1];
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+	unsigned int sr;
 	int ret;
 	u8 regs[RV3029_WATCH_SECTION_LEN] = { 0, };
 
-	ret = rv3029_get_sr(dev, buf);
-	if (ret < 0) {
-		dev_err(dev, "%s: reading SR failed\n", __func__);
-		return -EIO;
-	}
-
-	ret = rv3029_read_regs(dev, RV3029_W_SEC, regs,
-			       RV3029_WATCH_SECTION_LEN);
-	if (ret < 0) {
-		dev_err(dev, "%s: reading RTC section failed\n", __func__);
+	ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
+	if (ret < 0)
 		return ret;
-	}
+
+	if (sr & (RV3029_STATUS_VLOW2 | RV3029_STATUS_PON))
+		return -EINVAL;
+
+	ret = regmap_bulk_read(rv3029->regmap, RV3029_W_SEC, regs,
+			       RV3029_WATCH_SECTION_LEN);
+	if (ret < 0)
+		return ret;
 
 	tm->tm_sec = bcd2bin(regs[RV3029_W_SEC - RV3029_W_SEC]);
 	tm->tm_min = bcd2bin(regs[RV3029_W_MINUTES - RV3029_W_SEC]);
@@ -411,34 +347,24 @@ static int rv3029_read_time(struct device *dev, struct rtc_time *tm)
 
 static int rv3029_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 	struct rtc_time *const tm = &alarm->time;
+	unsigned int controls, flags;
 	int ret;
-	u8 regs[8], controls, flags;
+	u8 regs[8];
 
-	ret = rv3029_get_sr(dev, regs);
-	if (ret < 0) {
-		dev_err(dev, "%s: reading SR failed\n", __func__);
-		return -EIO;
-	}
-
-	ret = rv3029_read_regs(dev, RV3029_A_SC, regs,
+	ret = regmap_bulk_read(rv3029->regmap, RV3029_A_SC, regs,
 			       RV3029_ALARM_SECTION_LEN);
+	if (ret < 0)
+		return ret;
 
-	if (ret < 0) {
-		dev_err(dev, "%s: reading alarm section failed\n", __func__);
+	ret = regmap_read(rv3029->regmap, RV3029_IRQ_CTRL, &controls);
+	if (ret)
 		return ret;
-	}
 
-	ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
-	if (ret) {
-		dev_err(dev, "Read IRQ Control Register error %d\n", ret);
+	ret = regmap_read(rv3029->regmap, RV3029_IRQ_FLAGS, &flags);
+	if (ret < 0)
 		return ret;
-	}
-	ret = rv3029_read_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
-	if (ret < 0) {
-		dev_err(dev, "Read IRQ Flags Register error %d\n", ret);
-		return ret;
-	}
 
 	tm->tm_sec = bcd2bin(regs[RV3029_A_SC - RV3029_A_SC] & 0x7f);
 	tm->tm_min = bcd2bin(regs[RV3029_A_MN - RV3029_A_SC] & 0x7f);
@@ -456,50 +382,20 @@ static int rv3029_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 
 static int rv3029_alarm_irq_enable(struct device *dev, unsigned int enable)
 {
-	int ret;
-	u8 controls;
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 
-	ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
-	if (ret < 0) {
-		dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
-		return ret;
-	}
-
-	/* enable/disable AIE irq */
-	if (enable)
-		controls |= RV3029_IRQ_CTRL_AIE;
-	else
-		controls &= ~RV3029_IRQ_CTRL_AIE;
-
-	ret = rv3029_write_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
-	if (ret < 0) {
-		dev_err(dev, "can't update INT reg\n");
-		return ret;
-	}
-
-	return 0;
+	return regmap_update_bits(rv3029->regmap, RV3029_IRQ_CTRL,
+				  RV3029_IRQ_CTRL_AIE,
+				  enable ? RV3029_IRQ_CTRL_AIE : 0);
 }
 
 static int rv3029_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 	struct rtc_time *const tm = &alarm->time;
 	int ret;
 	u8 regs[8];
 
-	/*
-	 * The clock has an 8 bit wide bcd-coded register (they never learn)
-	 * for the year. tm_year is an offset from 1900 and we are interested
-	 * in the 2000-2099 range, so any value less than 100 is invalid.
-	*/
-	if (tm->tm_year < 100)
-		return -EINVAL;
-
-	ret = rv3029_get_sr(dev, regs);
-	if (ret < 0) {
-		dev_err(dev, "%s: reading SR failed\n", __func__);
-		return -EIO;
-	}
-
 	/* Activate all the alarms with AE_x bit */
 	regs[RV3029_A_SC - RV3029_A_SC] = bin2bcd(tm->tm_sec) | RV3029_A_AE_X;
 	regs[RV3029_A_MN - RV3029_A_SC] = bin2bcd(tm->tm_min) | RV3029_A_AE_X;
@@ -515,39 +411,20 @@ static int rv3029_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 		| RV3029_A_AE_X;
 
 	/* Write the alarm */
-	ret = rv3029_write_regs(dev, RV3029_A_SC, regs,
+	ret = regmap_bulk_write(rv3029->regmap, RV3029_A_SC, regs,
 				RV3029_ALARM_SECTION_LEN);
 	if (ret < 0)
 		return ret;
 
-	if (alarm->enabled) {
-		/* enable AIE irq */
-		ret = rv3029_alarm_irq_enable(dev, 1);
-		if (ret)
-			return ret;
-	} else {
-		/* disable AIE irq */
-		ret = rv3029_alarm_irq_enable(dev, 0);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
+	return rv3029_alarm_irq_enable(dev, alarm->enabled);
 }
 
 static int rv3029_set_time(struct device *dev, struct rtc_time *tm)
 {
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 	u8 regs[8];
 	int ret;
 
-	/*
-	 * The clock has an 8 bit wide bcd-coded register (they never learn)
-	 * for the year. tm_year is an offset from 1900 and we are interested
-	 * in the 2000-2099 range, so any value less than 100 is invalid.
-	*/
-	if (tm->tm_year < 100)
-		return -EINVAL;
-
 	regs[RV3029_W_SEC - RV3029_W_SEC] = bin2bcd(tm->tm_sec);
 	regs[RV3029_W_MINUTES - RV3029_W_SEC] = bin2bcd(tm->tm_min);
 	regs[RV3029_W_HOURS - RV3029_W_SEC] = bin2bcd(tm->tm_hour);
@@ -556,24 +433,55 @@ static int rv3029_set_time(struct device *dev, struct rtc_time *tm)
 	regs[RV3029_W_DAYS - RV3029_W_SEC] = bin2bcd(tm->tm_wday + 1) & 0x7;
 	regs[RV3029_W_YEARS - RV3029_W_SEC] = bin2bcd(tm->tm_year - 100);
 
-	ret = rv3029_write_regs(dev, RV3029_W_SEC, regs,
+	ret = regmap_bulk_write(rv3029->regmap, RV3029_W_SEC, regs,
 				RV3029_WATCH_SECTION_LEN);
 	if (ret < 0)
 		return ret;
 
-	ret = rv3029_get_sr(dev, regs);
-	if (ret < 0) {
-		dev_err(dev, "%s: reading SR failed\n", __func__);
-		return ret;
-	}
-	/* clear PON bit */
-	ret = rv3029_set_sr(dev, (regs[0] & ~RV3029_STATUS_PON));
-	if (ret < 0) {
-		dev_err(dev, "%s: reading SR failed\n", __func__);
-		return ret;
-	}
+	/* clear PON and VLOW2 bits */
+	return regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+				  RV3029_STATUS_PON | RV3029_STATUS_VLOW2, 0);
+}
 
-	return 0;
+static int rv3029_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+	unsigned long vl = 0;
+	int sr, ret = 0;
+
+	switch (cmd) {
+	case RTC_VL_READ:
+		ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
+		if (ret < 0)
+			return ret;
+
+		if (sr & RV3029_STATUS_VLOW1)
+			vl = RTC_VL_ACCURACY_LOW;
+
+		if (sr & (RV3029_STATUS_VLOW2 | RV3029_STATUS_PON))
+			vl |= RTC_VL_DATA_INVALID;
+
+		return put_user(vl, (unsigned int __user *)arg);
+
+	case RTC_VL_CLR:
+		return regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+					  RV3029_STATUS_VLOW1, 0);
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+static int rv3029_nvram_write(void *priv, unsigned int offset, void *val,
+			      size_t bytes)
+{
+	return regmap_bulk_write(priv, RV3029_RAM_PAGE + offset, val, bytes);
+}
+
+static int rv3029_nvram_read(void *priv, unsigned int offset, void *val,
+			     size_t bytes)
+{
+	return regmap_bulk_read(priv, RV3029_RAM_PAGE + offset, val, bytes);
 }
 
 static const struct rv3029_trickle_tab_elem {
@@ -635,6 +543,7 @@ static const struct rv3029_trickle_tab_elem {
 
 static void rv3029_trickle_config(struct device *dev)
 {
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 	struct device_node *of_node = dev->of_node;
 	const struct rv3029_trickle_tab_elem *elem;
 	int i, err;
@@ -661,7 +570,7 @@ static void rv3029_trickle_config(struct device *dev)
 			 "Trickle charger enabled at %d ohms resistance.\n",
 			 elem->r);
 	}
-	err = rv3029_eeprom_update_bits(dev, RV3029_CONTROL_E2P_EECTRL,
+	err = rv3029_eeprom_update_bits(rv3029, RV3029_CONTROL_E2P_EECTRL,
 					RV3029_TRICKLE_MASK,
 					trickle_set_bits);
 	if (err < 0)
@@ -670,12 +579,12 @@ static void rv3029_trickle_config(struct device *dev)
 
 #ifdef CONFIG_RTC_DRV_RV3029_HWMON
 
-static int rv3029_read_temp(struct device *dev, int *temp_mC)
+static int rv3029_read_temp(struct rv3029_data *rv3029, int *temp_mC)
 {
+	unsigned int temp;
 	int ret;
-	u8 temp;
 
-	ret = rv3029_read_regs(dev, RV3029_TEMP_PAGE, &temp, 1);
+	ret = regmap_read(rv3029->regmap, RV3029_TEMP_PAGE, &temp);
 	if (ret < 0)
 		return ret;
 
@@ -688,9 +597,10 @@ static ssize_t rv3029_hwmon_show_temp(struct device *dev,
 				      struct device_attribute *attr,
 				      char *buf)
 {
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 	int ret, temp_mC;
 
-	ret = rv3029_read_temp(dev, &temp_mC);
+	ret = rv3029_read_temp(rv3029, &temp_mC);
 	if (ret < 0)
 		return ret;
 
@@ -702,9 +612,10 @@ static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
 						const char *buf,
 						size_t count)
 {
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+	unsigned int th_set_bits = 0;
 	unsigned long interval_ms;
 	int ret;
-	u8 th_set_bits = 0;
 
 	ret = kstrtoul(buf, 10, &interval_ms);
 	if (ret < 0)
@@ -715,7 +626,7 @@ static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
 		if (interval_ms >= 16000)
 			th_set_bits |= RV3029_EECTRL_THP;
 	}
-	ret = rv3029_eeprom_update_bits(dev, RV3029_CONTROL_E2P_EECTRL,
+	ret = rv3029_eeprom_update_bits(rv3029, RV3029_CONTROL_E2P_EECTRL,
 					RV3029_EECTRL_THE | RV3029_EECTRL_THP,
 					th_set_bits);
 	if (ret < 0)
@@ -728,10 +639,11 @@ static ssize_t rv3029_hwmon_show_update_interval(struct device *dev,
 						 struct device_attribute *attr,
 						 char *buf)
 {
+	struct rv3029_data *rv3029 = dev_get_drvdata(dev);
 	int ret, interval_ms;
 	u8 eectrl;
 
-	ret = rv3029_eeprom_read(dev, RV3029_CONTROL_E2P_EECTRL,
+	ret = rv3029_eeprom_read(rv3029, RV3029_CONTROL_E2P_EECTRL,
 				 &eectrl, 1);
 	if (ret < 0)
 		return ret;
@@ -785,14 +697,23 @@ static void rv3029_hwmon_register(struct device *dev, const char *name)
 static struct rtc_class_ops rv3029_rtc_ops = {
 	.read_time	= rv3029_read_time,
 	.set_time	= rv3029_set_time,
+	.ioctl		= rv3029_ioctl,
 };
 
 static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
 			const char *name)
 {
 	struct rv3029_data *rv3029;
+	struct nvmem_config nvmem_cfg = {
+		.name = "rv3029_nvram",
+		.word_size = 1,
+		.stride = 1,
+		.size = RV3029_RAM_SECTION_LEN,
+		.type = NVMEM_TYPE_BATTERY_BACKED,
+		.reg_read = rv3029_nvram_read,
+		.reg_write = rv3029_nvram_write,
+	};
 	int rc = 0;
-	u8 buf[1];
 
 	rv3029 = devm_kzalloc(dev, sizeof(*rv3029), GFP_KERNEL);
 	if (!rv3029)
@@ -803,21 +724,12 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
 	rv3029->dev = dev;
 	dev_set_drvdata(dev, rv3029);
 
-	rc = rv3029_get_sr(dev, buf);
-	if (rc < 0) {
-		dev_err(dev, "reading status failed\n");
-		return rc;
-	}
-
 	rv3029_trickle_config(dev);
 	rv3029_hwmon_register(dev, name);
 
-	rv3029->rtc = devm_rtc_device_register(dev, name, &rv3029_rtc_ops,
-					       THIS_MODULE);
-	if (IS_ERR(rv3029->rtc)) {
-		dev_err(dev, "unable to register the class device\n");
+	rv3029->rtc = devm_rtc_allocate_device(dev);
+	if (IS_ERR(rv3029->rtc))
 		return PTR_ERR(rv3029->rtc);
-	}
 
 	if (rv3029->irq > 0) {
 		rc = devm_request_threaded_irq(dev, rv3029->irq,
@@ -834,20 +746,48 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
 		}
 	}
 
+	rv3029->rtc->ops = &rv3029_rtc_ops;
+	rv3029->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+	rv3029->rtc->range_max = RTC_TIMESTAMP_END_2079;
+
+	rc = rtc_register_device(rv3029->rtc);
+	if (rc)
+		return rc;
+
+	nvmem_cfg.priv = rv3029->regmap;
+	rtc_nvmem_register(rv3029->rtc, &nvmem_cfg);
+
 	return 0;
 }
 
+static const struct regmap_range rv3029_holes_range[] = {
+	regmap_reg_range(0x05, 0x07),
+	regmap_reg_range(0x0f, 0x0f),
+	regmap_reg_range(0x17, 0x17),
+	regmap_reg_range(0x1a, 0x1f),
+	regmap_reg_range(0x21, 0x27),
+	regmap_reg_range(0x34, 0x37),
+};
+
+static const struct regmap_access_table rv3029_regs = {
+	.no_ranges =	rv3029_holes_range,
+	.n_no_ranges =	ARRAY_SIZE(rv3029_holes_range),
+};
+
+static const struct regmap_config config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.rd_table = &rv3029_regs,
+	.wr_table = &rv3029_regs,
+	.max_register = 0x3f,
+};
+
 #if IS_ENABLED(CONFIG_I2C)
 
 static int rv3029_i2c_probe(struct i2c_client *client,
 			    const struct i2c_device_id *id)
 {
 	struct regmap *regmap;
-	static const struct regmap_config config = {
-		.reg_bits = 8,
-		.val_bits = 8,
-	};
-
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
 				     I2C_FUNC_SMBUS_BYTE)) {
 		dev_err(&client->dev, "Adapter does not support SMBUS_I2C_BLOCK or SMBUS_I2C_BYTE\n");
@@ -855,11 +795,8 @@ static int rv3029_i2c_probe(struct i2c_client *client,
 	}
 
 	regmap = devm_regmap_init_i2c(client, &config);
-	if (IS_ERR(regmap)) {
-		dev_err(&client->dev, "%s: regmap allocation failed: %ld\n",
-			__func__, PTR_ERR(regmap));
+	if (IS_ERR(regmap))
 		return PTR_ERR(regmap);
-	}
 
 	return rv3029_probe(&client->dev, regmap, client->irq, client->name);
 }
@@ -873,24 +810,20 @@ MODULE_DEVICE_TABLE(i2c, rv3029_id);
 
 static const struct of_device_id rv3029_of_match[] = {
 	{ .compatible = "microcrystal,rv3029" },
-	/* Backward compatibility only, do not use compatibles below: */
-	{ .compatible = "rv3029" },
-	{ .compatible = "rv3029c2" },
-	{ .compatible = "mc,rv3029c2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, rv3029_of_match);
 
 static struct i2c_driver rv3029_driver = {
 	.driver = {
-		.name = "rtc-rv3029c2",
+		.name = "rv3029",
 		.of_match_table = of_match_ptr(rv3029_of_match),
 	},
 	.probe		= rv3029_i2c_probe,
 	.id_table	= rv3029_id,
 };
 
-static int rv3029_register_driver(void)
+static int __init rv3029_register_driver(void)
 {
 	return i2c_add_driver(&rv3029_driver);
 }
@@ -902,7 +835,7 @@ static void rv3029_unregister_driver(void)
 
 #else
 
-static int rv3029_register_driver(void)
+static int __init rv3029_register_driver(void)
 {
 	return 0;
 }
@@ -917,18 +850,11 @@ static void rv3029_unregister_driver(void)
 
 static int rv3049_probe(struct spi_device *spi)
 {
-	static const struct regmap_config config = {
-		.reg_bits = 8,
-		.val_bits = 8,
-	};
 	struct regmap *regmap;
 
 	regmap = devm_regmap_init_spi(spi, &config);
-	if (IS_ERR(regmap)) {
-		dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
-			__func__, PTR_ERR(regmap));
+	if (IS_ERR(regmap))
 		return PTR_ERR(regmap);
-	}
 
 	return rv3029_probe(&spi->dev, regmap, spi->irq, "rv3049");
 }
@@ -940,24 +866,24 @@ static struct spi_driver rv3049_driver = {
 	.probe   = rv3049_probe,
 };
 
-static int rv3049_register_driver(void)
+static int __init rv3049_register_driver(void)
 {
 	return spi_register_driver(&rv3049_driver);
 }
 
-static void rv3049_unregister_driver(void)
+static void __exit rv3049_unregister_driver(void)
 {
 	spi_unregister_driver(&rv3049_driver);
 }
 
 #else
 
-static int rv3049_register_driver(void)
+static int __init rv3049_register_driver(void)
 {
 	return 0;
 }
 
-static void rv3049_unregister_driver(void)
+static void __exit rv3049_unregister_driver(void)
 {
 }
 
@@ -968,16 +894,12 @@ static int __init rv30x9_init(void)
 	int ret;
 
 	ret = rv3029_register_driver();
-	if (ret) {
-		pr_err("Failed to register rv3029 driver: %d\n", ret);
+	if (ret)
 		return ret;
-	}
 
 	ret = rv3049_register_driver();
-	if (ret) {
-		pr_err("Failed to register rv3049 driver: %d\n", ret);
+	if (ret)
 		rv3029_unregister_driver();
-	}
 
 	return ret;
 }
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 4960f0a..93c3a6b 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -411,6 +411,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct rv8803_data *rv8803 = dev_get_drvdata(dev);
+	unsigned int vl = 0;
 	int flags, ret = 0;
 
 	switch (cmd) {
@@ -419,18 +420,15 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 		if (flags < 0)
 			return flags;
 
-		if (flags & RV8803_FLAG_V1F)
+		if (flags & RV8803_FLAG_V1F) {
 			dev_warn(&client->dev, "Voltage low, temperature compensation stopped.\n");
+			vl = RTC_VL_ACCURACY_LOW;
+		}
 
 		if (flags & RV8803_FLAG_V2F)
-			dev_warn(&client->dev, "Voltage low, data loss detected.\n");
+			vl |= RTC_VL_DATA_INVALID;
 
-		flags &= RV8803_FLAG_V1F | RV8803_FLAG_V2F;
-
-		if (copy_to_user((void __user *)arg, &flags, sizeof(int)))
-			return -EFAULT;
-
-		return 0;
+		return put_user(vl, (unsigned int __user *)arg);
 
 	case RTC_VL_CLR:
 		mutex_lock(&rv8803->flags_lock);
@@ -440,7 +438,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 			return flags;
 		}
 
-		flags &= ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F);
+		flags &= ~RV8803_FLAG_V1F;
 		ret = rv8803_write_reg(client, RV8803_FLAG, flags);
 		mutex_unlock(&rv8803->flags_lock);
 		if (ret)
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 8102469..fe01015 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -389,9 +389,8 @@ static int rx8010_alarm_irq_enable(struct device *dev,
 
 static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
-	struct i2c_client *client = to_i2c_client(dev);
 	struct rx8010_data *rx8010 = dev_get_drvdata(dev);
-	int ret, tmp;
+	int tmp;
 	int flagreg;
 
 	switch (cmd) {
@@ -400,24 +399,8 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 		if (flagreg < 0)
 			return flagreg;
 
-		tmp = !!(flagreg & RX8010_FLAG_VLF);
-		if (copy_to_user((void __user *)arg, &tmp, sizeof(int)))
-			return -EFAULT;
-
-		return 0;
-
-	case RTC_VL_CLR:
-		flagreg = i2c_smbus_read_byte_data(rx8010->client, RX8010_FLAG);
-		if (flagreg < 0) {
-			return flagreg;
-		}
-
-		flagreg &= ~RX8010_FLAG_VLF;
-		ret = i2c_smbus_write_byte_data(client, RX8010_FLAG, flagreg);
-		if (ret < 0)
-			return ret;
-
-		return 0;
+		tmp = flagreg & RX8010_FLAG_VLF ? RTC_VL_DATA_INVALID : 0;
+		return put_user(tmp, (unsigned int __user *)arg);
 
 	default:
 		return -ENOIOCTLCMD;
@@ -482,7 +465,7 @@ static int rx8010_probe(struct i2c_client *client,
 
 	rx8010->rtc->max_user_freq = 1;
 
-	return err;
+	return 0;
 }
 
 static struct i2c_driver rx8010_driver = {
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index b9bda10..a24f858 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -67,7 +67,6 @@ static const struct i2c_device_id rx8025_id[] = {
 MODULE_DEVICE_TABLE(i2c, rx8025_id);
 
 struct rx8025_data {
-	struct i2c_client *client;
 	struct rtc_device *rtc;
 	u8 ctrl1;
 };
@@ -103,10 +102,10 @@ static s32 rx8025_write_regs(const struct i2c_client *client,
 
 static int rx8025_check_validity(struct device *dev)
 {
-	struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	int ctrl2;
 
-	ctrl2 = rx8025_read_reg(rx8025->client, RX8025_REG_CTRL2);
+	ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
 	if (ctrl2 < 0)
 		return ctrl2;
 
@@ -178,6 +177,7 @@ static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
 
 static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct rx8025_data *rx8025 = dev_get_drvdata(dev);
 	u8 date[7];
 	int err;
@@ -186,7 +186,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
 	if (err)
 		return err;
 
-	err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+	err = rx8025_read_regs(client, RX8025_REG_SEC, 7, date);
 	if (err)
 		return err;
 
@@ -211,6 +211,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
 
 static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct rx8025_data *rx8025 = dev_get_drvdata(dev);
 	u8 date[7];
 	int ret;
@@ -237,11 +238,11 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
 
 	dev_dbg(dev, "%s: write %7ph\n", __func__, date);
 
-	ret = rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+	ret = rx8025_write_regs(client, RX8025_REG_SEC, 7, date);
 	if (ret < 0)
 		return ret;
 
-	return rx8025_reset_validity(rx8025->client);
+	return rx8025_reset_validity(client);
 }
 
 static int rx8025_init_client(struct i2c_client *client)
@@ -251,7 +252,7 @@ static int rx8025_init_client(struct i2c_client *client)
 	int need_clear = 0;
 	int err;
 
-	err = rx8025_read_regs(rx8025->client, RX8025_REG_CTRL1, 2, ctrl);
+	err = rx8025_read_regs(client, RX8025_REG_CTRL1, 2, ctrl);
 	if (err)
 		goto out;
 
@@ -280,8 +281,8 @@ static int rx8025_init_client(struct i2c_client *client)
 /* Alarm support */
 static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct rx8025_data *rx8025 = dev_get_drvdata(dev);
-	struct i2c_client *client = rx8025->client;
 	u8 ald[2];
 	int ctrl2, err;
 
@@ -347,18 +348,18 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 
 	if (rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE) {
 		rx8025->ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
-		err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+		err = rx8025_write_reg(client, RX8025_REG_CTRL1,
 				       rx8025->ctrl1);
 		if (err)
 			return err;
 	}
-	err = rx8025_write_regs(rx8025->client, RX8025_REG_ALDMIN, 2, ald);
+	err = rx8025_write_regs(client, RX8025_REG_ALDMIN, 2, ald);
 	if (err)
 		return err;
 
 	if (t->enabled) {
 		rx8025->ctrl1 |= RX8025_BIT_CTRL1_DALE;
-		err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+		err = rx8025_write_reg(client, RX8025_REG_CTRL1,
 				       rx8025->ctrl1);
 		if (err)
 			return err;
@@ -369,6 +370,7 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 
 static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct rx8025_data *rx8025 = dev_get_drvdata(dev);
 	u8 ctrl1;
 	int err;
@@ -381,7 +383,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
 
 	if (ctrl1 != rx8025->ctrl1) {
 		rx8025->ctrl1 = ctrl1;
-		err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+		err = rx8025_write_reg(client, RX8025_REG_CTRL1,
 				       rx8025->ctrl1);
 		if (err)
 			return err;
@@ -516,7 +518,6 @@ static int rx8025_probe(struct i2c_client *client,
 	if (!rx8025)
 		return -ENOMEM;
 
-	rx8025->client = client;
 	i2c_set_clientdata(client, rx8025);
 
 	err = rx8025_init_client(client);
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 781cabb..d774aa1 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -897,8 +897,11 @@ static int stm32_rtc_resume(struct device *dev)
 	}
 
 	ret = stm32_rtc_wait_sync(rtc);
-	if (ret < 0)
+	if (ret < 0) {
+		if (rtc->data->has_pclk)
+			clk_disable_unprepare(rtc->pclk);
 		return ret;
+	}
 
 	if (device_may_wakeup(dev))
 		return disable_irq_wake(rtc->irq_alarm);
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 859d901..e39af2d 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -23,6 +23,7 @@
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/mfd/tps6586x.h>
 #include <linux/module.h>
@@ -267,6 +268,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
 	rtc->rtc->start_secs = mktime64(2009, 1, 1, 0, 0, 0);
 	rtc->rtc->set_start_time = true;
 
+	irq_set_status_flags(rtc->irq, IRQ_NOAUTOEN);
+
 	ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
 				tps6586x_rtc_irq,
 				IRQF_ONESHOT,
@@ -276,7 +279,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
 				rtc->irq, ret);
 		goto fail_rtc_register;
 	}
-	disable_irq(rtc->irq);
 
 	ret = rtc_register_device(rtc->rtc);
 	if (ret)
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 5396905..5786866 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -94,7 +94,7 @@ static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
 		 * RTC has updated the CURRENT_TIME with the time written into
 		 * SET_TIME_WRITE register.
 		 */
-		rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_CUR_TM), tm);
+		read_time = readl(xrtcdev->reg_base + RTC_CUR_TM);
 	} else {
 		/*
 		 * Time written in SET_TIME_WRITE has not yet updated into
@@ -104,8 +104,8 @@ static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
 		 * reading.
 		 */
 		read_time = readl(xrtcdev->reg_base + RTC_SET_TM_RD) - 1;
-		rtc_time64_to_tm(read_time, tm);
 	}
+	rtc_time64_to_tm(read_time, tm);
 
 	return 0;
 }
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 8d4d69e..62a859e 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -320,13 +320,12 @@ static ssize_t dasd_stats_proc_write(struct file *file,
 #endif				/* CONFIG_DASD_PROFILE */
 }
 
-static const struct file_operations dasd_stats_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= dasd_stats_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= dasd_stats_proc_write,
+static const struct proc_ops dasd_stats_proc_ops = {
+	.proc_open	= dasd_stats_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= dasd_stats_proc_write,
 };
 
 /*
@@ -347,7 +346,7 @@ dasd_proc_init(void)
 	dasd_statistics_entry = proc_create("statistics",
 					    S_IFREG | S_IRUGO | S_IWUSR,
 					    dasd_proc_root_entry,
-					    &dasd_stats_proc_fops);
+					    &dasd_stats_proc_ops);
 	if (!dasd_statistics_entry)
 		goto out_nostatistics;
 	return 0;
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 2a3f874..da642e8 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -398,12 +398,12 @@ cio_ignore_proc_open(struct inode *inode, struct file *file)
 				sizeof(struct ccwdev_iter));
 }
 
-static const struct file_operations cio_ignore_proc_fops = {
-	.open    = cio_ignore_proc_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = seq_release_private,
-	.write   = cio_ignore_write,
+static const struct proc_ops cio_ignore_proc_ops = {
+	.proc_open	= cio_ignore_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release_private,
+	.proc_write	= cio_ignore_write,
 };
 
 static int
@@ -412,7 +412,7 @@ cio_ignore_proc_init (void)
 	struct proc_dir_entry *entry;
 
 	entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
-			    &cio_ignore_proc_fops);
+			    &cio_ignore_proc_ops);
 	if (!entry)
 		return -ENOENT;
 	return 0;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 8318504..94edbb3 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1372,18 +1372,17 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
 	return ret ? ret : count;
 }
 
-static const struct file_operations cio_settle_proc_fops = {
-	.open = nonseekable_open,
-	.write = cio_settle_write,
-	.llseek = no_llseek,
+static const struct proc_ops cio_settle_proc_ops = {
+	.proc_open	= nonseekable_open,
+	.proc_write	= cio_settle_write,
+	.proc_lseek	= no_llseek,
 };
 
 static int __init cio_settle_init(void)
 {
 	struct proc_dir_entry *entry;
 
-	entry = proc_create("cio_settle", S_IWUSR, NULL,
-			    &cio_settle_proc_fops);
+	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
 	if (!entry)
 		return -ENOMEM;
 	return 0;
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 52aa95c..22d2db6 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -7,7 +7,8 @@
 obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
 # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
 zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
-zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt_ccamisc.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
 obj-$(CONFIG_ZCRYPT) += zcrypt.o
 # adapter drivers depend on ap.o and zcrypt.o
 obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index d78d776..71dae64 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -25,6 +25,7 @@
 
 #include "zcrypt_api.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("IBM Corporation");
@@ -71,6 +72,17 @@ struct protaeskeytoken {
 	u8  protkey[MAXPROTKEYSIZE]; /* the protected key blob */
 } __packed;
 
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearaeskeytoken {
+	u8  type;	 /* 0x00 for PAES specific key tokens */
+	u8  res0[3];
+	u8  version;	 /* 0x02 for clear AES key token */
+	u8  res1[3];
+	u32 keytype;	 /* key type, one of the PKEY_KEYTYPE values */
+	u32 len;	 /* bytes actually stored in clearkey[] */
+	u8  clearkey[0]; /* clear key value */
+} __packed;
+
 /*
  * Create a protected key from a clear key value.
  */
@@ -173,6 +185,72 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
 }
 
 /*
+ * Construct EP11 key with given clear key value.
+ */
+static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+			    u8 *keybuf, size_t *keybuflen)
+{
+	int i, rc;
+	u16 card, dom;
+	u32 nr_apqns, *apqns = NULL;
+
+	/* build a list of apqns suitable for ep11 keys with cpacf support */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, NULL);
+	if (rc)
+		goto out;
+
+	/* go through the list of apqns and try to bild an ep11 key */
+	for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+				      0, clrkey, keybuf, keybuflen);
+		if (rc == 0)
+			break;
+	}
+
+out:
+	kfree(apqns);
+	if (rc)
+		DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
+ * Find card and transform EP11 secure key into protected key.
+ */
+static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
+{
+	int i, rc;
+	u16 card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+	/* build a list of apqns suitable for this key */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+	if (rc)
+		goto out;
+
+	/* go through the list of apqns and try to derive an pkey */
+	for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = ep11_key2protkey(card, dom, key, kb->head.len,
+				      pkey->protkey, &pkey->len, &pkey->type);
+		if (rc == 0)
+			break;
+	}
+
+out:
+	kfree(apqns);
+	if (rc)
+		DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
  * Verify key and give back some info about the key.
  */
 static int pkey_verifykey(const struct pkey_seckey *seckey,
@@ -305,26 +383,90 @@ static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
 static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
 			       struct pkey_protkey *protkey)
 {
+	int rc = -EINVAL;
+	u8 *tmpbuf = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
-	struct protaeskeytoken *t;
 
 	switch (hdr->version) {
-	case TOKVER_PROTECTED_KEY:
-		if (keylen != sizeof(struct protaeskeytoken))
-			return -EINVAL;
+	case TOKVER_PROTECTED_KEY: {
+		struct protaeskeytoken *t;
 
+		if (keylen != sizeof(struct protaeskeytoken))
+			goto out;
 		t = (struct protaeskeytoken *)key;
 		protkey->len = t->len;
 		protkey->type = t->keytype;
 		memcpy(protkey->protkey, t->protkey,
 		       sizeof(protkey->protkey));
+		rc = pkey_verifyprotkey(protkey);
+		break;
+	}
+	case TOKVER_CLEAR_KEY: {
+		struct clearaeskeytoken *t;
+		struct pkey_clrkey ckey;
+		union u_tmpbuf {
+			u8 skey[SECKEYBLOBSIZE];
+			u8 ep11key[MAXEP11AESKEYBLOBSIZE];
+		};
+		size_t tmpbuflen = sizeof(union u_tmpbuf);
 
-		return pkey_verifyprotkey(protkey);
+		if (keylen < sizeof(struct clearaeskeytoken))
+			goto out;
+		t = (struct clearaeskeytoken *)key;
+		if (keylen != sizeof(*t) + t->len)
+			goto out;
+		if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
+		    || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
+		    || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+			memcpy(ckey.clrkey, t->clearkey, t->len);
+		else
+			goto out;
+		/* alloc temp key buffer space */
+		tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
+		if (!tmpbuf) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* try direct way with the PCKMO instruction */
+		rc = pkey_clr2protkey(t->keytype, &ckey, protkey);
+		if (rc == 0)
+			break;
+		/* PCKMO failed, so try the CCA secure key way */
+		rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype,
+				    ckey.clrkey, tmpbuf);
+		if (rc == 0)
+			rc = pkey_skey2pkey(tmpbuf, protkey);
+		if (rc == 0)
+			break;
+		/* if the CCA way also failed, let's try via EP11 */
+		rc = pkey_clr2ep11key(ckey.clrkey, t->len,
+				      tmpbuf, &tmpbuflen);
+		if (rc == 0)
+			rc = pkey_ep11key2pkey(tmpbuf, protkey);
+		/* now we should really have an protected key */
+		DEBUG_ERR("%s unable to build protected key from clear",
+			  __func__);
+		break;
+	}
+	case TOKVER_EP11_AES: {
+		if (keylen < MINEP11AESKEYBLOBSIZE)
+			goto out;
+		/* check ep11 key for exportable as protected key */
+		rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+		if (rc)
+			goto out;
+		rc = pkey_ep11key2pkey(key, protkey);
+		break;
+	}
 	default:
 		DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
 			  __func__, hdr->version);
-		return -EINVAL;
+		rc = -EINVAL;
 	}
+
+out:
+	kfree(tmpbuf);
+	return rc;
 }
 
 /*
@@ -403,6 +545,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
 		if (*keybufsize < SECKEYBLOBSIZE)
 			return -EINVAL;
 		break;
+	case PKEY_TYPE_EP11:
+		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -419,7 +565,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (ktype == PKEY_TYPE_CCA_DATA) {
+		if (ktype == PKEY_TYPE_EP11) {
+			rc = ep11_genaeskey(card, dom, ksize, kflags,
+					    keybuf, keybufsize);
+		} else if (ktype == PKEY_TYPE_CCA_DATA) {
 			rc = cca_genseckey(card, dom, ksize, keybuf);
 			*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
 		} else /* TOKVER_CCA_VLSC */
@@ -450,6 +599,10 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
 		if (*keybufsize < SECKEYBLOBSIZE)
 			return -EINVAL;
 		break;
+	case PKEY_TYPE_EP11:
+		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -466,7 +619,10 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (ktype == PKEY_TYPE_CCA_DATA) {
+		if (ktype == PKEY_TYPE_EP11) {
+			rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+					      clrkey, keybuf, keybufsize);
+		} else if (ktype == PKEY_TYPE_CCA_DATA) {
 			rc = cca_clr2seckey(card, dom, ksize,
 					    clrkey, keybuf);
 			*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
@@ -489,11 +645,11 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
 	u32 _nr_apqns, *_apqns = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
 
-	if (keylen < sizeof(struct keytoken_header) ||
-	    hdr->type != TOKTYPE_CCA_INTERNAL)
+	if (keylen < sizeof(struct keytoken_header))
 		return -EINVAL;
 
-	if (hdr->version == TOKVER_CCA_AES) {
+	if (hdr->type == TOKTYPE_CCA_INTERNAL
+	    && hdr->version == TOKVER_CCA_AES) {
 		struct secaeskeytoken *t = (struct secaeskeytoken *)key;
 
 		rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
@@ -521,7 +677,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
 		*cardnr = ((struct pkey_apqn *)_apqns)->card;
 		*domain = ((struct pkey_apqn *)_apqns)->domain;
 
-	} else if (hdr->version == TOKVER_CCA_VLSC) {
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL
+		   && hdr->version == TOKVER_CCA_VLSC) {
 		struct cipherkeytoken *t = (struct cipherkeytoken *)key;
 
 		rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
@@ -556,6 +713,29 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
 		*cardnr = ((struct pkey_apqn *)_apqns)->card;
 		*domain = ((struct pkey_apqn *)_apqns)->domain;
 
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES) {
+		struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+		rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+		if (rc)
+			goto out;
+		if (ktype)
+			*ktype = PKEY_TYPE_EP11;
+		if (ksize)
+			*ksize = kb->head.keybitlen;
+
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+				    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+		if (rc)
+			goto out;
+
+		if (flags)
+			*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+		*cardnr = ((struct pkey_apqn *)_apqns)->card;
+		*domain = ((struct pkey_apqn *)_apqns)->domain;
+
 	} else
 		rc = -EINVAL;
 
@@ -578,30 +758,32 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
 	if (keylen < sizeof(struct keytoken_header))
 		return -EINVAL;
 
-	switch (hdr->type) {
-	case TOKTYPE_NON_CCA:
-		return pkey_nonccatok2pkey(key, keylen, pkey);
-	case TOKTYPE_CCA_INTERNAL:
-		switch (hdr->version) {
-		case TOKVER_CCA_AES:
+	if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+		if (hdr->version == TOKVER_CCA_AES) {
 			if (keylen != sizeof(struct secaeskeytoken))
 				return -EINVAL;
 			if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
 				return -EINVAL;
-			break;
-		case TOKVER_CCA_VLSC:
+		} else if (hdr->version == TOKVER_CCA_VLSC) {
 			if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
 				return -EINVAL;
 			if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
 				return -EINVAL;
-			break;
-		default:
+		} else {
 			DEBUG_ERR("%s unknown CCA internal token version %d\n",
 				  __func__, hdr->version);
 			return -EINVAL;
 		}
-		break;
-	default:
+	} else if (hdr->type == TOKTYPE_NON_CCA) {
+		if (hdr->version == TOKVER_EP11_AES) {
+			if (keylen < sizeof(struct ep11keyblob))
+				return -EINVAL;
+			if (ep11_check_aeskeyblob(debug_info, 3, key, 0, 1))
+				return -EINVAL;
+		} else {
+			return pkey_nonccatok2pkey(key, keylen, pkey);
+		}
+	} else {
 		DEBUG_ERR("%s unknown/unsupported blob type %d\n",
 			  __func__, hdr->type);
 		return -EINVAL;
@@ -611,12 +793,21 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (hdr->version == TOKVER_CCA_AES)
+		if (hdr->type == TOKTYPE_CCA_INTERNAL
+		    && hdr->version == TOKVER_CCA_AES)
 			rc = cca_sec2protkey(card, dom, key, pkey->protkey,
 					     &pkey->len, &pkey->type);
-		else /* TOKVER_CCA_VLSC */
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL
+			 && hdr->version == TOKVER_CCA_VLSC)
 			rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
 						&pkey->len, &pkey->type);
+		else { /* EP11 AES secure key blob */
+			struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+			rc = ep11_key2protkey(card, dom, key, kb->head.len,
+					      pkey->protkey, &pkey->len,
+					      &pkey->type);
+		}
 		if (rc == 0)
 			break;
 	}
@@ -631,12 +822,24 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
 	u32 _nr_apqns, *_apqns = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
 
-	if (keylen < sizeof(struct keytoken_header) ||
-	    hdr->type != TOKTYPE_CCA_INTERNAL ||
-	    flags == 0)
+	if (keylen < sizeof(struct keytoken_header) || flags == 0)
 		return -EINVAL;
 
-	if (hdr->version == TOKVER_CCA_AES || hdr->version == TOKVER_CCA_VLSC) {
+	if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) {
+		int minhwtype = 0, api = 0;
+		struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+		if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+			return -EINVAL;
+		if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+			minhwtype = ZCRYPT_CEX7;
+			api = EP11_API_V;
+		}
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				    minhwtype, api, kb->wkvp);
+		if (rc)
+			goto out;
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
 		int minhwtype = ZCRYPT_CEX3C;
 		u64 cur_mkvp = 0, old_mkvp = 0;
 
@@ -647,7 +850,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
 				cur_mkvp = t->mkvp;
 			if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
 				old_mkvp = t->mkvp;
-		} else {
+		} else if (hdr->version == TOKVER_CCA_VLSC) {
 			struct cipherkeytoken *t = (struct cipherkeytoken *)key;
 
 			minhwtype = ZCRYPT_CEX6;
@@ -655,19 +858,24 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
 				cur_mkvp = t->mkvp0;
 			if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
 				old_mkvp = t->mkvp0;
+		} else {
+			/* unknown cca internal token type */
+			return -EINVAL;
 		}
 		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
 				   minhwtype, cur_mkvp, old_mkvp, 1);
 		if (rc)
 			goto out;
-		if (apqns) {
-			if (*nr_apqns < _nr_apqns)
-				rc = -ENOSPC;
-			else
-				memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
-		}
-		*nr_apqns = _nr_apqns;
+	} else
+		return -EINVAL;
+
+	if (apqns) {
+		if (*nr_apqns < _nr_apqns)
+			rc = -ENOSPC;
+		else
+			memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
 	}
+	*nr_apqns = _nr_apqns;
 
 out:
 	kfree(_apqns);
@@ -695,14 +903,26 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
 				   minhwtype, cur_mkvp, old_mkvp, 1);
 		if (rc)
 			goto out;
-		if (apqns) {
-			if (*nr_apqns < _nr_apqns)
-				rc = -ENOSPC;
-			else
-				memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
-		}
-		*nr_apqns = _nr_apqns;
+	} else if (ktype == PKEY_TYPE_EP11) {
+		u8 *wkvp = NULL;
+
+		if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+			wkvp = cur_mkvp;
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				    ZCRYPT_CEX7, EP11_API_V, wkvp);
+		if (rc)
+			goto out;
+
+	} else
+		return -EINVAL;
+
+	if (apqns) {
+		if (*nr_apqns < _nr_apqns)
+			rc = -ENOSPC;
+		else
+			memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
 	}
+	*nr_apqns = _nr_apqns;
 
 out:
 	kfree(_apqns);
@@ -1357,8 +1577,9 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
 					    bool is_xts, char *buf, loff_t off,
 					    size_t count)
 {
-	size_t keysize;
-	int rc;
+	int i, rc, card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	size_t keysize = CCACIPHERTOKENSIZE;
 
 	if (off != 0 || count < CCACIPHERTOKENSIZE)
 		return -EINVAL;
@@ -1366,22 +1587,31 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
 		if (count < 2 * CCACIPHERTOKENSIZE)
 			return -EINVAL;
 
-	keysize = CCACIPHERTOKENSIZE;
-	rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize);
+	/* build a list of apqns able to generate an cipher key */
+	rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			   ZCRYPT_CEX6, 0, 0, 0);
 	if (rc)
 		return rc;
-	memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize);
+
+	memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+	/* simple try all apqns from the list */
+	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			break;
+	}
+		if (rc)
+			return rc;
 
 	if (is_xts) {
 		keysize = CCACIPHERTOKENSIZE;
-		rc = cca_gencipherkey(-1, -1, keybits, 0,
-				      buf + CCACIPHERTOKENSIZE, &keysize);
-		if (rc)
-			return rc;
-		memset(buf + CCACIPHERTOKENSIZE + keysize, 0,
-		       CCACIPHERTOKENSIZE - keysize);
-
-		return 2 * CCACIPHERTOKENSIZE;
+		buf += CCACIPHERTOKENSIZE;
+		rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			return 2 * CCACIPHERTOKENSIZE;
 	}
 
 	return CCACIPHERTOKENSIZE;
@@ -1457,10 +1687,134 @@ static struct attribute_group ccacipher_attr_group = {
 	.bin_attrs = ccacipher_attrs,
 };
 
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 320 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+				       bool is_xts, char *buf, loff_t off,
+				       size_t count)
+{
+	int i, rc, card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	size_t keysize = MAXEP11AESKEYBLOBSIZE;
+
+	if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+		return -EINVAL;
+	if (is_xts)
+		if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+
+	/* build a list of apqns able to generate an cipher key */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, NULL);
+	if (rc)
+		return rc;
+
+	memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+	/* simple try all apqns from the list */
+	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			break;
+	}
+	if (rc)
+		return rc;
+
+	if (is_xts) {
+		keysize = MAXEP11AESKEYBLOBSIZE;
+		buf += MAXEP11AESKEYBLOBSIZE;
+		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			return 2 * MAXEP11AESKEYBLOBSIZE;
+	}
+
+	return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+				     struct kobject *kobj,
+				     struct bin_attribute *attr,
+				     char *buf, loff_t off,
+				     size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+				     struct kobject *kobj,
+				     struct bin_attribute *attr,
+				     char *buf, loff_t off,
+				     size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+				       off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+	&bin_attr_ep11_aes_128,
+	&bin_attr_ep11_aes_192,
+	&bin_attr_ep11_aes_256,
+	&bin_attr_ep11_aes_128_xts,
+	&bin_attr_ep11_aes_256_xts,
+	NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+	.name	   = "ep11",
+	.bin_attrs = ep11_attrs,
+};
+
 static const struct attribute_group *pkey_attr_groups[] = {
 	&protkey_attr_group,
 	&ccadata_attr_group,
 	&ccacipher_attr_group,
+	&ep11_attr_group,
 	NULL,
 };
 
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 9157e72..a42257d 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,6 +36,7 @@
 #include "zcrypt_msgtype6.h"
 #include "zcrypt_msgtype50.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 /*
  * Module description.
@@ -849,7 +850,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
 			/* check if device is online and eligible */
 			if (!zq->online ||
 			    !zq->ops->send_cprb ||
-			    (tdom != (unsigned short) AUTOSELECT &&
+			    (tdom != AUTOSEL_DOM &&
 			     tdom != AP_QID_QUEUE(zq->queue->qid)))
 				continue;
 			/* check if device node has admission for this queue */
@@ -874,7 +875,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
 
 	/* in case of auto select, provide the correct domain */
 	qid = pref_zq->queue->qid;
-	if (*domain == (unsigned short) AUTOSELECT)
+	if (*domain == AUTOSEL_DOM)
 		*domain = AP_QID_QUEUE(qid);
 
 	rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
@@ -901,7 +902,7 @@ static bool is_desired_ep11_card(unsigned int dev_id,
 				 struct ep11_target_dev *targets)
 {
 	while (target_num-- > 0) {
-		if (dev_id == targets->ap_id)
+		if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
 			return true;
 		targets++;
 	}
@@ -912,16 +913,19 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
 				  unsigned short target_num,
 				  struct ep11_target_dev *targets)
 {
+	int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
 	while (target_num-- > 0) {
-		if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+		if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+		    (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
 			return true;
 		targets++;
 	}
 	return false;
 }
 
-static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
-				  struct ep11_urb *xcrb)
+static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
+				   struct ep11_urb *xcrb)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
@@ -1026,6 +1030,12 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
 	return rc;
 }
 
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+	return _zcrypt_send_ep11_cprb(&ap_perms, xcrb);
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
 static long zcrypt_rng(char *buffer)
 {
 	struct zcrypt_card *zc, *pref_zc;
@@ -1366,12 +1376,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 		if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
 			return -EFAULT;
 		do {
-			rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+			rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
 		} while (rc == -EAGAIN);
 		/* on failure: retry once again after a requested rescan */
 		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 			do {
-				rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+				rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
 			} while (rc == -EAGAIN);
 		if (rc)
 			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
@@ -1885,6 +1895,7 @@ void __exit zcrypt_api_exit(void)
 	zcrypt_msgtype6_exit();
 	zcrypt_msgtype50_exit();
 	zcrypt_ccamisc_exit();
+	zcrypt_ep11misc_exit();
 	zcrypt_debug_exit();
 }
 
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index d464618..599e68b 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -140,6 +140,7 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
 int zcrypt_api_init(void);
 void zcrypt_api_exit(void);
 long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
 int zcrypt_device_status_ext(int card, int queue,
 			     struct zcrypt_device_status_ext *devstatus);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 77b6cc7..3a9876d 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -19,6 +19,7 @@
 
 /* For TOKTYPE_NON_CCA: */
 #define TOKVER_PROTECTED_KEY	0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY	0x02 /* Clear key token */
 
 /* For TOKTYPE_CCA_INTERNAL: */
 #define TOKVER_CCA_AES		0x04 /* CCA AES key token */
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 6fabc90..9a9d02e 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -19,6 +19,7 @@
 #include "zcrypt_error.h"
 #include "zcrypt_cex4.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 #define CEX4A_MIN_MOD_SIZE	  1	/*    8 bits	*/
 #define CEX4A_MAX_MOD_SIZE_2K	256	/* 2048 bits	*/
@@ -71,11 +72,11 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
 MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
 
 /*
- * CCA card addditional device attributes
+ * CCA card additional device attributes
  */
-static ssize_t serialnr_show(struct device *dev,
-			     struct device_attribute *attr,
-			     char *buf)
+static ssize_t cca_serialnr_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
 {
 	struct cca_info ci;
 	struct ap_card *ac = to_ap_card(dev);
@@ -88,23 +89,25 @@ static ssize_t serialnr_show(struct device *dev,
 
 	return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
 }
-static DEVICE_ATTR_RO(serialnr);
+
+static struct device_attribute dev_attr_cca_serialnr =
+	__ATTR(serialnr, 0444, cca_serialnr_show, NULL);
 
 static struct attribute *cca_card_attrs[] = {
-	&dev_attr_serialnr.attr,
+	&dev_attr_cca_serialnr.attr,
 	NULL,
 };
 
-static const struct attribute_group cca_card_attr_group = {
+static const struct attribute_group cca_card_attr_grp = {
 	.attrs = cca_card_attrs,
 };
 
-/*
- * CCA queue addditional device attributes
- */
-static ssize_t mkvps_show(struct device *dev,
-			  struct device_attribute *attr,
-			  char *buf)
+ /*
+  * CCA queue additional device attributes
+  */
+static ssize_t cca_mkvps_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
 {
 	int n = 0;
 	struct cca_info ci;
@@ -138,17 +141,233 @@ static ssize_t mkvps_show(struct device *dev,
 
 	return n;
 }
-static DEVICE_ATTR_RO(mkvps);
+
+static struct device_attribute dev_attr_cca_mkvps =
+	__ATTR(mkvps, 0444, cca_mkvps_show, NULL);
 
 static struct attribute *cca_queue_attrs[] = {
-	&dev_attr_mkvps.attr,
+	&dev_attr_cca_mkvps.attr,
 	NULL,
 };
 
-static const struct attribute_group cca_queue_attr_group = {
+static const struct attribute_group cca_queue_attr_grp = {
 	.attrs = cca_queue_attrs,
 };
 
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.API_ord_nr > 0)
+		return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+	else
+		return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+	__ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.FW_version > 0)
+		return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+				(int)(ci.FW_version >> 8),
+				(int)(ci.FW_version & 0xFF));
+	else
+		return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+	__ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.serial[0])
+		return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+	else
+		return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+	__ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+	int	    mode_bit;
+	const char *mode_txt;
+} ep11_op_modes[] = {
+	{ 0, "FIPS2009" },
+	{ 1, "BSI2009" },
+	{ 2, "FIPS2011" },
+	{ 3, "BSI2011" },
+	{ 6, "BSICC2017" },
+	{ 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	int i, n = 0;
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+		if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+			if (n > 0)
+				buf[n++] = ' ';
+			n += snprintf(buf + n, PAGE_SIZE - n,
+				      "%s", ep11_op_modes[i].mode_txt);
+		}
+	}
+	n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+	__ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+	&dev_attr_ep11_api_ordinalnr.attr,
+	&dev_attr_ep11_fw_version.attr,
+	&dev_attr_ep11_serialnr.attr,
+	&dev_attr_ep11_card_op_modes.attr,
+	NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+	.attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	int n = 0;
+	struct ep11_domain_info di;
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	static const char * const cwk_state[] = { "invalid", "valid" };
+	static const char * const nwk_state[] = { "empty", "uncommitted",
+						  "committed" };
+
+	memset(&di, 0, sizeof(di));
+
+	if (zq->online)
+		ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+				     AP_QID_QUEUE(zq->queue->qid),
+				     &di);
+
+	if (di.cur_wk_state == '0') {
+		n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+			     cwk_state[di.cur_wk_state - '0']);
+	} else if (di.cur_wk_state == '1') {
+		n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+			     cwk_state[di.cur_wk_state - '0']);
+		bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+		n += 2 * sizeof(di.cur_wkvp);
+		n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+	} else
+		n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+
+	if (di.new_wk_state == '0') {
+		n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+			      nwk_state[di.new_wk_state - '0']);
+	} else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+		n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+			      nwk_state[di.new_wk_state - '0']);
+		bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+		n += 2 * sizeof(di.new_wkvp);
+		n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+	} else
+		n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+	__ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	int i, n = 0;
+	struct ep11_domain_info di;
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+	memset(&di, 0, sizeof(di));
+
+	if (zq->online)
+		ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+				     AP_QID_QUEUE(zq->queue->qid),
+				     &di);
+
+	for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+		if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+			if (n > 0)
+				buf[n++] = ' ';
+			n += snprintf(buf + n, PAGE_SIZE - n,
+				      "%s", ep11_op_modes[i].mode_txt);
+		}
+	}
+	n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+	__ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+	&dev_attr_ep11_mkvps.attr,
+	&dev_attr_ep11_queue_op_modes.attr,
+	NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+	.attrs = ep11_queue_attrs,
+};
+
 /**
  * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
  * accepts the AP device since the bus_match already checked
@@ -313,7 +532,12 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
 
 	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
 		rc = sysfs_create_group(&ap_dev->device.kobj,
-					&cca_card_attr_group);
+					&cca_card_attr_grp);
+		if (rc)
+			zcrypt_card_unregister(zc);
+	} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&ep11_card_attr_grp);
 		if (rc)
 			zcrypt_card_unregister(zc);
 	}
@@ -332,7 +556,9 @@ static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
 	struct zcrypt_card *zc = ac->private;
 
 	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
-		sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_group);
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+	else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+		sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
 	if (zc)
 		zcrypt_card_unregister(zc);
 }
@@ -394,7 +620,12 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
 
 	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
 		rc = sysfs_create_group(&ap_dev->device.kobj,
-					&cca_queue_attr_group);
+					&cca_queue_attr_grp);
+		if (rc)
+			zcrypt_queue_unregister(zq);
+	} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&ep11_queue_attr_grp);
 		if (rc)
 			zcrypt_queue_unregister(zq);
 	}
@@ -413,7 +644,9 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
 	struct zcrypt_queue *zq = aq->private;
 
 	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
-		sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_group);
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+	else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+		sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
 	if (zq)
 		zcrypt_queue_unregister(zq);
 }
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644
index 0000000..d4caf46
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Copyright IBM Corp. 2019
+ *  Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ *  Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...)	ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...)	ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+			       0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/* ep11 card info cache */
+struct card_list_entry {
+	struct list_head list;
+	u16 cardnr;
+	struct ep11_card_info info;
+};
+static LIST_HEAD(card_list);
+static DEFINE_SPINLOCK(card_list_lock);
+
+static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
+{
+	int rc = -ENOENT;
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			memcpy(ci, &ptr->info, sizeof(*ci));
+			rc = 0;
+			break;
+		}
+	}
+	spin_unlock_bh(&card_list_lock);
+
+	return rc;
+}
+
+static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
+{
+	int found = 0;
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			memcpy(&ptr->info, ci, sizeof(*ci));
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+		if (!ptr) {
+			spin_unlock_bh(&card_list_lock);
+			return;
+		}
+		ptr->cardnr = cardnr;
+		memcpy(&ptr->info, ci, sizeof(*ci));
+		list_add(&ptr->list, &card_list);
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+static void card_cache_scrub(u16 cardnr)
+{
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			list_del(&ptr->list);
+			kfree(ptr);
+			break;
+		}
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+static void __exit card_cache_free(void)
+{
+	struct card_list_entry *ptr, *pnext;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry_safe(ptr, pnext, &card_list, list) {
+		list_del(&ptr->list);
+		kfree(ptr);
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+			  const u8 *key, int keybitsize,
+			  int checkcpacfexport)
+{
+	struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (kb->head.type != TOKTYPE_NON_CCA) {
+		if (dbg)
+			DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+		return -EINVAL;
+	}
+	if (kb->head.version != TOKVER_EP11_AES) {
+		if (dbg)
+			DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+			    __func__, (int) kb->head.version, TOKVER_EP11_AES);
+		return -EINVAL;
+	}
+	if (kb->version != EP11_STRUCT_MAGIC) {
+		if (dbg)
+			DBF("%s key check failed, magic 0x%04x != 0x%04x\n",
+			    __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+		return -EINVAL;
+	}
+	switch (kb->head.keybitlen) {
+	case 128:
+	case 192:
+	case 256:
+		break;
+	default:
+		if (dbg)
+			DBF("%s key check failed, keybitlen %d invalid\n",
+			    __func__, (int) kb->head.keybitlen);
+		return -EINVAL;
+	}
+	if (keybitsize > 0 && keybitsize != (int) kb->head.keybitlen) {
+		DBF("%s key check failed, keybitsize %d\n",
+		    __func__, keybitsize);
+		return -EINVAL;
+	}
+	if (checkcpacfexport && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+		if (dbg)
+			DBF("%s key check failed, PKEY_EXTRACTABLE is 0\n",
+			    __func__);
+		return -EINVAL;
+	}
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(ep11_check_aeskeyblob);
+
+/*
+ * Helper function which calls zcrypt_send_ep11_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_ep11_cprb(struct ep11_urb *urb)
+{
+	int rc;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	rc = zcrypt_send_ep11_cprb(urb);
+	set_fs(old_fs);
+
+	return rc;
+}
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+{
+	size_t len = sizeof(struct ep11_cprb) + payload_len;
+	struct ep11_cprb *cprb;
+
+	cprb = kmalloc(len, GFP_KERNEL);
+	if (!cprb)
+		return NULL;
+
+	memset(cprb, 0, len);
+	cprb->cprb_len = sizeof(struct ep11_cprb);
+	cprb->cprb_ver_id = 0x04;
+	memcpy(cprb->func_id, "T4", 2);
+	cprb->ret_code = 0xFFFFFFFF;
+	cprb->payload_len = payload_len;
+
+	return cprb;
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+	ptr[0] = tag;
+	if (valuelen > 255) {
+		ptr[1] = 0x82;
+		*((u16 *)(ptr + 2)) = valuelen;
+		memcpy(ptr + 4, pvalue, valuelen);
+		return 4 + valuelen;
+	}
+	if (valuelen > 127) {
+		ptr[1] = 0x81;
+		ptr[2] = (u8) valuelen;
+		memcpy(ptr + 3, pvalue, valuelen);
+		return 3 + valuelen;
+	}
+	ptr[1] = (u8) valuelen;
+	memcpy(ptr + 2, pvalue, valuelen);
+	return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+	u8  tag;
+	u8  lenfmt;
+	u16 len;
+	u8  func_tag;
+	u8  func_len;
+	u32 func;
+	u8  dom_tag;
+	u8  dom_len;
+	u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+			     size_t pl_size, int api, int func)
+{
+	h->tag = 0x30;
+	h->lenfmt = 0x82;
+	h->len = pl_size - 4;
+	h->func_tag = 0x04;
+	h->func_len = sizeof(u32);
+	h->func = (api << 16) + func;
+	h->dom_tag = 0x04;
+	h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+			    struct ep11_target_dev *t, int nt,
+			    struct ep11_cprb *req, size_t req_len,
+			    struct ep11_cprb *rep, size_t rep_len)
+{
+	u->targets = (u8 __user *) t;
+	u->targets_num = nt;
+	u->req = (u8 __user *) req;
+	u->req_len = req_len;
+	u->resp = (u8 __user *) rep;
+	u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+	int len;
+	u32 ret;
+
+	/* start tag */
+	if (*pl++ != 0x30) {
+		DEBUG_ERR("%s reply start tag mismatch\n", func);
+		return -EIO;
+	}
+
+	/* payload length format */
+	if (*pl < 127) {
+		len = *pl;
+		pl++;
+	} else if (*pl == 0x81) {
+		pl++;
+		len = *pl;
+		pl++;
+	} else if (*pl == 0x82) {
+		pl++;
+		len = *((u16 *)pl);
+		pl += 2;
+	} else {
+		DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+			  func, *pl);
+		return -EIO;
+	}
+
+	/* len should cover at least 3 fields with 32 bit value each */
+	if (len < 3 * 6) {
+		DEBUG_ERR("%s reply length %d too small\n", func, len);
+		return -EIO;
+	}
+
+	/* function tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s function tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 6;
+
+	/* dom tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s dom tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 6;
+
+	/* return value tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s return value tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 2;
+	ret = *((u32 *)pl);
+	if (ret != 0) {
+		DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+			   size_t buflen, u8 *buf)
+{
+	struct ep11_info_req_pl {
+		struct pl_head head;
+		u8  query_type_tag;
+		u8  query_type_len;
+		u32 query_type;
+		u8  query_subtype_tag;
+		u8  query_subtype_len;
+		u32 query_subtype;
+	} __packed * req_pl;
+	struct ep11_info_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	int api = 1, rc = -ENOMEM;
+
+	/* request cprb and payload */
+	req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+	if (!req)
+		goto out;
+	req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+	prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+	req_pl->query_type_tag = 0x04;
+	req_pl->query_type_len = sizeof(u32);
+	req_pl->query_type = query_type;
+	req_pl->query_subtype_tag = 0x04;
+	req_pl->query_subtype_len = sizeof(u32);
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+	if (!rep)
+		goto out;
+	rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = cardnr;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + sizeof(*req_pl),
+		 rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+	rc = _zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > buflen) {
+		DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+{
+	int rc;
+	struct ep11_module_query_info {
+		u32 API_ord_nr;
+		u32 firmware_id;
+		u8  FW_major_vers;
+		u8  FW_minor_vers;
+		u8  CSP_major_vers;
+		u8  CSP_minor_vers;
+		u8  fwid[32];
+		u8  xcp_config_hash[32];
+		u8  CSP_config_hash[32];
+		u8  serial[16];
+		u8  module_date_time[16];
+		u64 op_mode;
+		u32 PKCS11_flags;
+		u32 ext_flags;
+		u32 domains;
+		u32 sym_state_bytes;
+		u32 digest_state_bytes;
+		u32 pin_blob_bytes;
+		u32 SPKI_bytes;
+		u32 priv_key_blob_bytes;
+		u32 sym_blob_bytes;
+		u32 max_payload_bytes;
+		u32 CP_profile_bytes;
+		u32 max_CP_index;
+	} __packed * pmqi = NULL;
+
+	rc = card_cache_fetch(card, info);
+	if (rc || verify) {
+		pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
+		if (!pmqi)
+			return -ENOMEM;
+		rc = ep11_query_info(card, AUTOSEL_DOM,
+				     0x01 /* module info query */,
+				     sizeof(*pmqi), (u8 *) pmqi);
+		if (rc) {
+			if (rc == -ENODEV)
+				card_cache_scrub(card);
+			goto out;
+		}
+		memset(info, 0, sizeof(*info));
+		info->API_ord_nr = pmqi->API_ord_nr;
+		info->FW_version =
+			(pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+		memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+		info->op_mode = pmqi->op_mode;
+		card_cache_update(card, info);
+	}
+
+out:
+	kfree(pmqi);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+{
+	int rc;
+	struct ep11_domain_query_info {
+		u32 dom_index;
+		u8  cur_WK_VP[32];
+		u8  new_WK_VP[32];
+		u32 dom_flags;
+		u64 op_mode;
+	} __packed * p_dom_info;
+
+	p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
+	if (!p_dom_info)
+		return -ENOMEM;
+
+	rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+			     sizeof(*p_dom_info), (u8 *) p_dom_info);
+	if (rc)
+		goto out;
+
+	memset(info, 0, sizeof(*info));
+	info->cur_wk_state = '0';
+	info->new_wk_state = '0';
+	if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
+		if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+			info->cur_wk_state = '1';
+			memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+		}
+		if (p_dom_info->dom_flags & 0x04 /* new wk present */
+		    || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+			info->new_wk_state =
+				p_dom_info->dom_flags & 0x08 ? '2' : '1';
+			memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+		}
+	}
+	info->op_mode = p_dom_info->op_mode;
+
+out:
+	kfree(p_dom_info);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		   u8 *keybuf, size_t *keybufsize)
+{
+	struct keygen_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  keybytes_tag;
+		u8  keybytes_len;
+		u32 keybytes;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		u8  attr_tag;
+		u8  attr_len;
+		u32 attr_header;
+		u32 attr_bool_mask;
+		u32 attr_bool_bits;
+		u32 attr_val_len_type;
+		u32 attr_val_len_value;
+		u8  pin_tag;
+		u8  pin_len;
+	} __packed * req_pl;
+	struct keygen_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[512];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	int api, rc = -ENOMEM;
+
+	switch (keybitsize) {
+	case 128:
+	case 192:
+	case 256:
+		break;
+	default:
+		DEBUG_ERR(
+			"%s unknown/unsupported keybitsize %d\n",
+			__func__, keybitsize);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* request cprb and payload */
+	req = alloc_cprb(sizeof(struct keygen_req_pl));
+	if (!req)
+		goto out;
+	req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+	prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	req_pl->keybytes_tag = 0x04;
+	req_pl->keybytes_len = sizeof(u32);
+	req_pl->keybytes = keybitsize / 8;
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32);
+	req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+	req_pl->attr_tag = 0x04;
+	req_pl->attr_len = 5 * sizeof(u32);
+	req_pl->attr_header = 0x10010000;
+	req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+	req_pl->attr_val_len_value = keybitsize / 8;
+	req_pl->pin_tag = 0x04;
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + sizeof(*req_pl),
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = _zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *keybufsize) {
+		DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy key blob and set header values */
+	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+	*keybufsize = rep_pl->data_len;
+	kb = (struct ep11keyblob *) keybuf;
+	kb->head.type = TOKTYPE_NON_CCA;
+	kb->head.len = rep_pl->data_len;
+	kb->head.version = TOKVER_EP11_AES;
+	kb->head.keybitlen = keybitsize;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+			    u16 mode, u32 mech, const u8 *iv,
+			    const u8 *key, size_t keysize,
+			    const u8 *inbuf, size_t inbufsize,
+			    u8 *outbuf, size_t *outbufsize)
+{
+	struct crypt_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * maybe followed by iv data
+		 * followed by key tag + key blob
+		 * followed by plaintext tag + plaintext
+		 */
+	} __packed * req_pl;
+	struct crypt_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		/* data follows */
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	size_t req_pl_size, rep_pl_size;
+	int n, api = 1, rc = -ENOMEM;
+	u8 *p;
+
+	/* the simple asn1 coding used has length limits */
+	if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+		return -EINVAL;
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+	prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* key and input data */
+	p += asn1tag_write(p, 0x04, key, keysize);
+	p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+	/* reply cprb and payload, assume out data size <= in data size + 32 */
+	rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+	rep = alloc_cprb(rep_pl_size);
+	if (!rep)
+		goto out;
+	rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + rep_pl_size);
+
+	rc = _zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	p = ((u8 *) rep_pl) + sizeof(*rep_pl);
+	if (rep_pl->data_lenfmt <= 127)
+		n = rep_pl->data_lenfmt;
+	else if (rep_pl->data_lenfmt == 0x81)
+		n = *p++;
+	else if (rep_pl->data_lenfmt == 0x82) {
+		n = *((u16 *) p);
+		p += 2;
+	} else {
+		DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
+			  __func__, rep_pl->data_lenfmt);
+		rc = -EIO;
+		goto out;
+	}
+	if (n > *outbufsize) {
+		DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+			  __func__, n, *outbufsize);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	memcpy(outbuf, p, n);
+	*outbufsize = n;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+			  const u8 *kek, size_t keksize,
+			  const u8 *enckey, size_t enckeysize,
+			  u32 mech, const u8 *iv,
+			  u32 keybitsize, u32 keygenflags,
+			  u8 *keybuf, size_t *keybufsize)
+{
+	struct uw_req_pl {
+		struct pl_head head;
+		u8  attr_tag;
+		u8  attr_len;
+		u32 attr_header;
+		u32 attr_bool_mask;
+		u32 attr_bool_bits;
+		u32 attr_key_type;
+		u32 attr_key_type_value;
+		u32 attr_val_len;
+		u32 attr_val_len_value;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * maybe followed by iv data
+		 * followed by kek tag + kek blob
+		 * followed by empty mac tag
+		 * followed by empty pin tag
+		 * followed by encryted key tag + bytes
+		 */
+	} __packed * req_pl;
+	struct uw_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[512];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	size_t req_pl_size;
+	int api, rc = -ENOMEM;
+	u8 *p;
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+	prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+	req_pl->attr_tag = 0x04;
+	req_pl->attr_len = 7 * sizeof(u32);
+	req_pl->attr_header = 0x10020000;
+	req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+	req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+	req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+	req_pl->attr_val_len_value = keybitsize / 8;
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* kek */
+	p += asn1tag_write(p, 0x04, kek, keksize);
+	/* empty mac key tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* empty pin tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* encrytped key value tag and bytes */
+	p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct uw_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = _zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *keybufsize) {
+		DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy key blob and set header values */
+	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+	*keybufsize = rep_pl->data_len;
+	kb = (struct ep11keyblob *) keybuf;
+	kb->head.type = TOKTYPE_NON_CCA;
+	kb->head.len = rep_pl->data_len;
+	kb->head.version = TOKVER_EP11_AES;
+	kb->head.keybitlen = keybitsize;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+static int ep11_wrapkey(u16 card, u16 domain,
+			const u8 *key, size_t keysize,
+			u32 mech, const u8 *iv,
+			u8 *databuf, size_t *datasize)
+{
+	struct wk_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * followed by iv data
+		 * followed by key tag + key blob
+		 * followed by dummy kek param
+		 * followed by dummy mac param
+		 */
+	} __packed * req_pl;
+	struct wk_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[512];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	size_t req_pl_size;
+	int api, rc = -ENOMEM;
+	u8 *p;
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keysize) + 4;
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	if (!mech || mech == 0x80060001)
+		req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+	req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+	prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* key blob */
+	p += asn1tag_write(p, 0x04, key, keysize);
+	/* maybe the key argument needs the head data cleaned out */
+	kb = (struct ep11keyblob *)(p - keysize);
+	if (kb->head.version == TOKVER_EP11_AES)
+		memset(&kb->head, 0, sizeof(kb->head));
+	/* empty kek tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* empty mac tag */
+	*p++ = 0x04;
+	*p++ = 0;
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct wk_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = _zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *datasize) {
+		DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy the data from the cprb to the data buffer */
+	memcpy(databuf, rep_pl->data, rep_pl->data_len);
+	*datasize = rep_pl->data_len;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+	int rc;
+	struct ep11keyblob *kb;
+	u8 encbuf[64], *kek = NULL;
+	size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+	if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+		clrkeylen = keybitsize / 8;
+	else {
+		DEBUG_ERR(
+			"%s unknown/unsupported keybitsize %d\n",
+			__func__, keybitsize);
+		return -EINVAL;
+	}
+
+	/* allocate memory for the temp kek */
+	keklen = MAXEP11AESKEYBLOBSIZE;
+	kek = kmalloc(keklen, GFP_ATOMIC);
+	if (!kek) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Step 1: generate AES 256 bit random kek key */
+	rc = ep11_genaeskey(card, domain, 256,
+			    0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */
+			    kek, &keklen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s generate kek key failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+	kb = (struct ep11keyblob *) kek;
+	memset(&kb->head, 0, sizeof(kb->head));
+
+	/* Step 2: encrypt clear key value with the kek key */
+	rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+			      clrkey, clrkeylen, encbuf, &encbuflen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s encrypting key value with kek key failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	/* Step 3: import the encrypted key value as a new key */
+	rc = ep11_unwrapkey(card, domain, kek, keklen,
+			    encbuf, encbuflen, 0, def_iv,
+			    keybitsize, 0, keybuf, keybufsize);
+	if (rc) {
+		DEBUG_ERR(
+			"%s importing key value as new key failed,, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+out:
+	kfree(kek);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+		     u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+	int rc = -EIO;
+	u8 *wkbuf = NULL;
+	size_t wkbuflen = 256;
+	struct wk_info {
+		u16 version;
+		u8  res1[16];
+		u32 pkeytype;
+		u32 pkeybitsize;
+		u64 pkeysize;
+		u8  res2[8];
+		u8  pkey[0];
+	} __packed * wki;
+
+	/* alloc temp working buffer */
+	wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
+	if (!wkbuf)
+		return -ENOMEM;
+
+	/* ep11 secure key -> protected key + info */
+	rc = ep11_wrapkey(card, dom, key, keylen,
+			  0, def_iv, wkbuf, &wkbuflen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s rewrapping ep11 key to pkey failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+	wki = (struct wk_info *) wkbuf;
+
+	/* check struct version and pkey type */
+	if (wki->version != 1 || wki->pkeytype != 1) {
+		DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+			  __func__, (int) wki->version, (int) wki->pkeytype);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* copy the tanslated protected key */
+	switch (wki->pkeysize) {
+	case 16+32:
+		/* AES 128 protected key */
+		if (protkeytype)
+			*protkeytype = PKEY_KEYTYPE_AES_128;
+		break;
+	case 24+32:
+		/* AES 192 protected key */
+		if (protkeytype)
+			*protkeytype = PKEY_KEYTYPE_AES_192;
+		break;
+	case 32+32:
+		/* AES 256 protected key */
+		if (protkeytype)
+			*protkeytype = PKEY_KEYTYPE_AES_256;
+		break;
+	default:
+		DEBUG_ERR("%s unknown/unsupported pkeysize %d\n",
+			  __func__, (int) wki->pkeysize);
+		rc = -EIO;
+		goto out;
+	}
+	memcpy(protkey, wki->pkey, wki->pkeysize);
+	if (protkeylen)
+		*protkeylen = (u32) wki->pkeysize;
+	rc = 0;
+
+out:
+	kfree(wkbuf);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_key2protkey);
+
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+		   int minhwtype, int minapi, const u8 *wkvp)
+{
+	struct zcrypt_device_status_ext *device_status;
+	u32 *_apqns = NULL, _nr_apqns = 0;
+	int i, card, dom, rc = -ENOMEM;
+	struct ep11_domain_info edi;
+	struct ep11_card_info eci;
+
+	/* fetch status of all crypto cards */
+	device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+				      sizeof(struct zcrypt_device_status_ext),
+				      GFP_KERNEL);
+	if (!device_status)
+		return -ENOMEM;
+	zcrypt_device_status_mask_ext(device_status);
+
+	/* allocate 1k space for up to 256 apqns */
+	_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+	if (!_apqns) {
+		kfree(device_status);
+		return -ENOMEM;
+	}
+
+	/* walk through all the crypto apqnss */
+	for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+		card = AP_QID_CARD(device_status[i].qid);
+		dom = AP_QID_QUEUE(device_status[i].qid);
+		/* check online state */
+		if (!device_status[i].online)
+			continue;
+		/* check for ep11 functions */
+		if (!(device_status[i].functions & 0x01))
+			continue;
+		/* check cardnr */
+		if (cardnr != 0xFFFF && card != cardnr)
+			continue;
+		/* check domain */
+		if (domain != 0xFFFF && dom != domain)
+			continue;
+		/* check min hardware type */
+		if (minhwtype && device_status[i].hwtype < minhwtype)
+			continue;
+		/* check min api version if given */
+		if (minapi > 0) {
+			if (ep11_get_card_info(card, &eci, 0))
+				continue;
+			if (minapi > eci.API_ord_nr)
+				continue;
+		}
+		/* check wkvp if given */
+		if (wkvp) {
+			if (ep11_get_domain_info(card, dom, &edi))
+				continue;
+			if (edi.cur_wk_state != '1')
+				continue;
+			if (memcmp(wkvp, edi.cur_wkvp, 16))
+				continue;
+		}
+		/* apqn passed all filtering criterons, add to the array */
+		if (_nr_apqns < 256)
+			_apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+	}
+
+	/* nothing found ? */
+	if (!_nr_apqns) {
+		kfree(_apqns);
+		rc = -ENODEV;
+	} else {
+		/* no re-allocation, simple return the _apqns array */
+		*apqns = _apqns;
+		*nr_apqns = _nr_apqns;
+		rc = 0;
+	}
+
+	kfree(device_status);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+void __exit zcrypt_ep11misc_exit(void)
+{
+	card_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644
index 0000000..e3ed5ed
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  Copyright IBM Corp. 2019
+ *  Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ *  Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define TOKVER_EP11_AES  0x03  /* EP11 AES key blob */
+
+#define EP11_API_V 4  /* highest known and supported EP11 API version */
+
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x200000
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+	union {
+		u8 session[32];
+		struct {
+			u8  type;      /* 0x00 (TOKTYPE_NON_CCA) */
+			u8  res0;      /* unused */
+			u16 len;       /* total length in bytes of this blob */
+			u8  version;   /* 0x06 (TOKVER_EP11_AES) */
+			u8  res1;      /* unused */
+			u16 keybitlen; /* clear key bit len, 0 for unknown */
+		} head;
+	};
+	u8  wkvp[16];  /* wrapping key verification pattern */
+	u64 attr;      /* boolean key attributes */
+	u64 mode;      /* mode bits */
+	u16 version;   /* 0x1234, EP11_STRUCT_MAGIC */
+	u8  iv[14];
+	u8  encrypted_key_data[144];
+	u8  mac[32];
+} __packed;
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ * If keybitsize is given, the bitsize of the key is also checked.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+			  const u8 *key, int keybitsize,
+			  int checkcpacfexport);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+	u32  API_ord_nr;    /* API ordinal number */
+	u16  FW_version;    /* Firmware major and minor version */
+	char serial[16];    /* serial number string (16 ascii, no 0x00 !) */
+	u64  op_mode;	    /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+	char cur_wk_state;  /* '0' invalid, '1' valid */
+	char new_wk_state;  /* '0' empty, '1' uncommitted, '2' committed */
+	u8   cur_wkvp[32];  /* current wrapping key verification pattern */
+	u8   new_wkvp[32];  /* new wrapping key verification pattern */
+	u64  op_mode;	    /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		   u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from EP11 AES secure key blob.
+ */
+int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen,
+		     u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ *   to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ *   key for this domain. When a wkvp is given there will aways be a re-fetch
+ *   of the domain info for the potential apqn - so this triggers an request
+ *   reply to each apqn eligible.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+		   int minhwtype, int minapi, const u8 *wkvp);
+
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index c25e8a5..3170b29 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -134,7 +134,7 @@ static char *blogic_cmd_failure_reason;
 static void blogic_announce_drvr(struct blogic_adapter *adapter)
 {
 	blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter);
-	blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff " "<lnz@dandelion.com>\n", adapter);
+	blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>\n", adapter);
 }
 
 
@@ -440,7 +440,7 @@ static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode,
 			goto done;
 		}
 		if (blogic_global_options.trace_config)
-			blogic_notice("blogic_cmd(%02X) Status = %02X: " "(Modify I/O Address)\n", adapter, opcode, statusreg.all);
+			blogic_notice("blogic_cmd(%02X) Status = %02X: (Modify I/O Address)\n", adapter, opcode, statusreg.all);
 		result = 0;
 		goto done;
 	}
@@ -716,23 +716,23 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
 		pci_addr = base_addr1 = pci_resource_start(pci_device, 1);
 
 		if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) {
-			blogic_err("BusLogic: Base Address0 0x%X not I/O for " "MultiMaster Host Adapter\n", NULL, base_addr0);
-			blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+			blogic_err("BusLogic: Base Address0 0x%lX not I/O for MultiMaster Host Adapter\n", NULL, base_addr0);
+			blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
 			continue;
 		}
 		if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) {
-			blogic_err("BusLogic: Base Address1 0x%X not Memory for " "MultiMaster Host Adapter\n", NULL, base_addr1);
-			blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr);
+			blogic_err("BusLogic: Base Address1 0x%lX not Memory for MultiMaster Host Adapter\n", NULL, base_addr1);
+			blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr);
 			continue;
 		}
 		if (irq_ch == 0) {
-			blogic_err("BusLogic: IRQ Channel %d invalid for " "MultiMaster Host Adapter\n", NULL, irq_ch);
-			blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+			blogic_err("BusLogic: IRQ Channel %d invalid for MultiMaster Host Adapter\n", NULL, irq_ch);
+			blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
 			continue;
 		}
 		if (blogic_global_options.trace_probe) {
-			blogic_notice("BusLogic: PCI MultiMaster Host Adapter " "detected at\n", NULL);
-			blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr);
+			blogic_notice("BusLogic: PCI MultiMaster Host Adapter detected at\n", NULL);
+			blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr);
 		}
 		/*
 		   Issue the Inquire PCI Host Adapter Information command to determine
@@ -818,7 +818,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
 			nonpr_mmcount++;
 			mmcount++;
 		} else
-			blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL);
+			blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL);
 	}
 	/*
 	   If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq."
@@ -956,23 +956,23 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
 		pci_addr = base_addr1 = pci_resource_start(pci_device, 1);
 #ifdef CONFIG_SCSI_FLASHPOINT
 		if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) {
-			blogic_err("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, base_addr0);
-			blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+			blogic_err("BusLogic: Base Address0 0x%lX not I/O for FlashPoint Host Adapter\n", NULL, base_addr0);
+			blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
 			continue;
 		}
 		if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) {
-			blogic_err("BusLogic: Base Address1 0x%X not Memory for " "FlashPoint Host Adapter\n", NULL, base_addr1);
-			blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr);
+			blogic_err("BusLogic: Base Address1 0x%lX not Memory for FlashPoint Host Adapter\n", NULL, base_addr1);
+			blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr);
 			continue;
 		}
 		if (irq_ch == 0) {
-			blogic_err("BusLogic: IRQ Channel %d invalid for " "FlashPoint Host Adapter\n", NULL, irq_ch);
-			blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+			blogic_err("BusLogic: IRQ Channel %d invalid for FlashPoint Host Adapter\n", NULL, irq_ch);
+			blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
 			continue;
 		}
 		if (blogic_global_options.trace_probe) {
-			blogic_notice("BusLogic: FlashPoint Host Adapter " "detected at\n", NULL);
-			blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr);
+			blogic_notice("BusLogic: FlashPoint Host Adapter detected at\n", NULL);
+			blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr);
 		}
 		if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) {
 			struct blogic_probeinfo *probeinfo =
@@ -987,11 +987,11 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
 			probeinfo->pci_device = pci_dev_get(pci_device);
 			fpcount++;
 		} else
-			blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL);
+			blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL);
 #else
-		blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", NULL, bus, device);
-		blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, irq %d, " "but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch);
-		blogic_err("BusLogic: support was omitted in this kernel " "configuration.\n", NULL);
+		blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", NULL, bus, device);
+		blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, irq %d, but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch);
+		blogic_err("BusLogic: support was omitted in this kernel configuration.\n", NULL);
 #endif
 	}
 	/*
@@ -1099,9 +1099,9 @@ static bool blogic_failure(struct blogic_adapter *adapter, char *msg)
 	if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) {
 		blogic_err("While configuring BusLogic PCI Host Adapter at\n",
 				adapter);
-		blogic_err("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr);
+		blogic_err("Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr);
 	} else
-		blogic_err("While configuring BusLogic Host Adapter at " "I/O Address 0x%X:\n", adapter, adapter->io_addr);
+		blogic_err("While configuring BusLogic Host Adapter at I/O Address 0x%lX:\n", adapter, adapter->io_addr);
 	blogic_err("%s FAILED - DETACHING\n", adapter, msg);
 	if (blogic_cmd_failure_reason != NULL)
 		blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter,
@@ -1129,13 +1129,13 @@ static bool __init blogic_probe(struct blogic_adapter *adapter)
 		fpinfo->present = false;
 		if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 &&
 					fpinfo->present)) {
-			blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev);
-			blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, " "but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr);
+			blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev);
+			blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr);
 			blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter);
 			return false;
 		}
 		if (blogic_global_options.trace_probe)
-			blogic_notice("BusLogic_Probe(0x%X): FlashPoint Found\n", adapter, adapter->io_addr);
+			blogic_notice("BusLogic_Probe(0x%lX): FlashPoint Found\n", adapter, adapter->io_addr);
 		/*
 		   Indicate the Host Adapter Probe completed successfully.
 		 */
@@ -1152,7 +1152,7 @@ static bool __init blogic_probe(struct blogic_adapter *adapter)
 	intreg.all = blogic_rdint(adapter);
 	georeg.all = blogic_rdgeom(adapter);
 	if (blogic_global_options.trace_probe)
-		blogic_notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, " "Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all);
+		blogic_notice("BusLogic_Probe(0x%lX): Status 0x%02X, Interrupt 0x%02X, Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all);
 	if (statusreg.all == 0 || statusreg.sr.diag_active ||
 			statusreg.sr.cmd_param_busy || statusreg.sr.rsvd ||
 			statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0)
@@ -1231,7 +1231,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
 		udelay(100);
 	}
 	if (blogic_global_options.trace_hw_reset)
-		blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+		blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Active, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
 	if (timeout < 0)
 		return false;
 	/*
@@ -1251,7 +1251,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
 		udelay(100);
 	}
 	if (blogic_global_options.trace_hw_reset)
-		blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+		blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Completed, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
 	if (timeout < 0)
 		return false;
 	/*
@@ -1267,7 +1267,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
 		udelay(100);
 	}
 	if (blogic_global_options.trace_hw_reset)
-		blogic_notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+		blogic_notice("BusLogic_HardwareReset(0x%lX): Host Adapter Ready, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
 	if (timeout < 0)
 		return false;
 	/*
@@ -1323,7 +1323,7 @@ static bool __init blogic_checkadapter(struct blogic_adapter *adapter)
 	   Provide tracing information if requested and return.
 	 */
 	if (blogic_global_options.trace_probe)
-		blogic_notice("BusLogic_Check(0x%X): MultiMaster %s\n", adapter,
+		blogic_notice("BusLogic_Check(0x%lX): MultiMaster %s\n", adapter,
 				adapter->io_addr,
 				(result ? "Found" : "Not Found"));
 	return result;
@@ -1836,7 +1836,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
 	int tgt_id;
 
 	blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
-	blogic_info("  Firmware Version: %s, I/O Address: 0x%X, " "IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
+	blogic_info("  Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
 	if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
 		blogic_info("  DMA Channel: ", adapter);
 		if (adapter->dma_ch > 0)
@@ -1844,7 +1844,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
 		else
 			blogic_info("None, ", adapter);
 		if (adapter->bios_addr > 0)
-			blogic_info("BIOS Address: 0x%X, ", adapter,
+			blogic_info("BIOS Address: 0x%lX, ", adapter,
 					adapter->bios_addr);
 		else
 			blogic_info("BIOS Address: None, ", adapter);
@@ -1852,7 +1852,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
 		blogic_info("  PCI Bus: %d, Device: %d, Address: ", adapter,
 				adapter->bus, adapter->dev);
 		if (adapter->pci_addr > 0)
-			blogic_info("0x%X, ", adapter, adapter->pci_addr);
+			blogic_info("0x%lX, ", adapter, adapter->pci_addr);
 		else
 			blogic_info("Unassigned, ", adapter);
 	}
@@ -1932,10 +1932,10 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
 	blogic_info("  Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter,
 			discon_msg, tagq_msg);
 	if (blogic_multimaster_type(adapter)) {
-		blogic_info("  Scatter/Gather Limit: %d of %d segments, " "Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count);
-		blogic_info("  Driver Queue Depth: %d, " "Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth);
+		blogic_info("  Scatter/Gather Limit: %d of %d segments, Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count);
+		blogic_info("  Driver Queue Depth: %d, Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth);
 	} else
-		blogic_info("  Driver Queue Depth: %d, " "Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit);
+		blogic_info("  Driver Queue Depth: %d, Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit);
 	blogic_info("  Tagged Queue Depth: ", adapter);
 	common_tagq_depth = true;
 	for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++)
@@ -2717,7 +2717,7 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter)
 				   then there is most likely a bug in
 				   the Host Adapter firmware.
 				 */
-				blogic_warn("Illegal CCB #%ld status %d in " "Incoming Mailbox\n", adapter, ccb->serial, ccb->status);
+				blogic_warn("Illegal CCB #%ld status %d in Incoming Mailbox\n", adapter, ccb->serial, ccb->status);
 			}
 		}
 		next_inbox->comp_code = BLOGIC_INBOX_FREE;
@@ -2752,7 +2752,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
 		if (ccb->opcode == BLOGIC_BDR) {
 			int tgt_id = ccb->tgt_id;
 
-			blogic_warn("Bus Device Reset CCB #%ld to Target " "%d Completed\n", adapter, ccb->serial, tgt_id);
+			blogic_warn("Bus Device Reset CCB #%ld to Target %d Completed\n", adapter, ccb->serial, tgt_id);
 			blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done);
 			adapter->tgt_flags[tgt_id].tagq_active = false;
 			adapter->cmds_since_rst[tgt_id] = 0;
@@ -2829,7 +2829,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
 					if (blogic_global_options.trace_err) {
 						int i;
 						blogic_notice("CCB #%ld Target %d: Result %X Host "
-								"Adapter Status %02X " "Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status);
+								"Adapter Status %02X Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status);
 						blogic_notice("CDB   ", adapter);
 						for (i = 0; i < ccb->cdblen; i++)
 							blogic_notice(" %02X", adapter, ccb->cdb[i]);
@@ -3203,12 +3203,12 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
 		 */
 		if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) {
 			spin_unlock_irq(adapter->scsi_host->host_lock);
-			blogic_warn("Unable to write Outgoing Mailbox - " "Pausing for 1 second\n", adapter);
+			blogic_warn("Unable to write Outgoing Mailbox - Pausing for 1 second\n", adapter);
 			blogic_delay(1);
 			spin_lock_irq(adapter->scsi_host->host_lock);
 			if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
 						ccb)) {
-				blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
+				blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter);
 				blogic_dealloc_ccb(ccb, 1);
 				command->result = DID_ERROR << 16;
 				command->scsi_done(command);
@@ -3443,8 +3443,8 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
 			if (diskparam->cylinders != saved_cyl)
 				blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors);
 		} else if (part_end_head > 0 || part_end_sector > 0) {
-			blogic_warn("Warning: Partition Table appears to " "have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector);
-			blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors);
+			blogic_warn("Warning: Partition Table appears to have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector);
+			blogic_warn("not compatible with current BusLogic Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors);
 		}
 	}
 	kfree(buf);
@@ -3689,7 +3689,7 @@ static int __init blogic_parseopts(char *options)
 					blogic_probe_options.probe134 = true;
 					break;
 				default:
-					blogic_err("BusLogic: Invalid Driver Options " "(invalid I/O Address 0x%X)\n", NULL, io_addr);
+					blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr);
 					return 0;
 				}
 			} else if (blogic_parse(&options, "NoProbeISA"))
@@ -3710,7 +3710,7 @@ static int __init blogic_parseopts(char *options)
 				for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) {
 					unsigned short qdepth = simple_strtoul(options, &options, 0);
 					if (qdepth > BLOGIC_MAX_TAG_DEPTH) {
-						blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth);
+						blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth);
 						return 0;
 					}
 					drvr_opts->qdepth[tgt_id] = qdepth;
@@ -3719,12 +3719,12 @@ static int __init blogic_parseopts(char *options)
 					else if (*options == ']')
 						break;
 					else {
-						blogic_err("BusLogic: Invalid Driver Options " "(',' or ']' expected at '%s')\n", NULL, options);
+						blogic_err("BusLogic: Invalid Driver Options (',' or ']' expected at '%s')\n", NULL, options);
 						return 0;
 					}
 				}
 				if (*options != ']') {
-					blogic_err("BusLogic: Invalid Driver Options " "(']' expected at '%s')\n", NULL, options);
+					blogic_err("BusLogic: Invalid Driver Options (']' expected at '%s')\n", NULL, options);
 					return 0;
 				} else
 					options++;
@@ -3732,7 +3732,7 @@ static int __init blogic_parseopts(char *options)
 				unsigned short qdepth = simple_strtoul(options, &options, 0);
 				if (qdepth == 0 ||
 						qdepth > BLOGIC_MAX_TAG_DEPTH) {
-					blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth);
+					blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth);
 					return 0;
 				}
 				drvr_opts->common_qdepth = qdepth;
@@ -3778,7 +3778,7 @@ static int __init blogic_parseopts(char *options)
 				unsigned short bus_settle_time =
 					simple_strtoul(options, &options, 0);
 				if (bus_settle_time > 5 * 60) {
-					blogic_err("BusLogic: Invalid Driver Options " "(invalid Bus Settle Time %d)\n", NULL, bus_settle_time);
+					blogic_err("BusLogic: Invalid Driver Options (invalid Bus Settle Time %d)\n", NULL, bus_settle_time);
 					return 0;
 				}
 				drvr_opts->bus_settle_time = bus_settle_time;
@@ -3803,14 +3803,14 @@ static int __init blogic_parseopts(char *options)
 			if (*options == ',')
 				options++;
 			else if (*options != ';' && *options != '\0') {
-				blogic_err("BusLogic: Unexpected Driver Option '%s' " "ignored\n", NULL, options);
+				blogic_err("BusLogic: Unexpected Driver Option '%s' ignored\n", NULL, options);
 				*options = '\0';
 			}
 		}
 		if (!(blogic_drvr_options_count == 0 ||
 			blogic_probeinfo_count == 0 ||
 			blogic_drvr_options_count == blogic_probeinfo_count)) {
-			blogic_err("BusLogic: Invalid Driver Options " "(all or no I/O Addresses must be specified)\n", NULL);
+			blogic_err("BusLogic: Invalid Driver Options (all or no I/O Addresses must be specified)\n", NULL);
 			return 0;
 		}
 		/*
@@ -3864,7 +3864,7 @@ static int __init blogic_setup(char *str)
 	(void) get_options(str, ARRAY_SIZE(ints), ints);
 
 	if (ints[0] != 0) {
-		blogic_err("BusLogic: Obsolete Command Line Entry " "Format Ignored\n", NULL);
+		blogic_err("BusLogic: Obsolete Command Line Entry Format Ignored\n", NULL);
 		return 0;
 	}
 	if (str == NULL || *str == '\0')
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index a9d40d3..4190a02 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
 			 * At some speeds, we only support
 			 * ST transfers.
 			 */
-		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+			if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
 				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
 			break;
 		}
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f5781e3..d022407 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -54,6 +54,9 @@ static struct scsi_host_template aic94xx_sht = {
 	.eh_target_reset_handler	= sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= sas_ioctl,
+#endif
 	.track_queue_depth	= 1,
 };
 
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 76751d6..ed5f4a6 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -872,6 +872,10 @@ static long ch_ioctl_compat(struct file * file,
 			    unsigned int cmd, unsigned long arg)
 {
 	scsi_changer *ch = file->private_data;
+	int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
+							file->f_flags & O_NDELAY);
+	if (retval)
+		return retval;
 
 	switch (cmd) {
 	case CHIOGPARAMS:
@@ -883,7 +887,7 @@ static long ch_ioctl_compat(struct file * file,
 	case CHIOINITELEM:
 	case CHIOSVOLTAG:
 		/* compatible */
-		return ch_ioctl(file, cmd, arg);
+		return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
 	case CHIOGSTATUS32:
 	{
 		struct changer_element_status32 ces32;
@@ -898,8 +902,7 @@ static long ch_ioctl_compat(struct file * file,
 		return ch_gstatus(ch, ces32.ces_type, data);
 	}
 	default:
-		// return scsi_ioctl_compat(ch->device, cmd, (void*)arg);
-		return -ENOIOCTLCMD;
+		return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg));
 
 	}
 }
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 469d0bc..00cf335 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev,
 		return -EINVAL;
 
 	/* Delete NPIV lnodes */
-	 csio_lnodes_exit(hw, 1);
+	csio_lnodes_exit(hw, 1);
 
 	/* Block upper IOs */
 	csio_lnodes_block_request(hw);
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 80c5a235..7b49e2e 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -617,6 +617,13 @@ static const struct file_operations esas2r_proc_fops = {
 	.unlocked_ioctl = esas2r_proc_ioctl,
 };
 
+static const struct proc_ops esas2r_proc_ops = {
+	.proc_ioctl		= esas2r_proc_ioctl,
+#ifdef CONFIG_COMPAT
+	.proc_compat_ioctl	= compat_ptr_ioctl,
+#endif
+};
+
 static struct Scsi_Host *esas2r_proc_host;
 static int esas2r_proc_major;
 
@@ -728,7 +735,7 @@ const char *esas2r_info(struct Scsi_Host *sh)
 
 			pde = proc_create(ATTONODE_NAME, 0,
 					  sh->hostt->proc_dir,
-					  &esas2r_proc_fops);
+					  &esas2r_proc_ops);
 
 			if (!pde) {
 				esas2r_log_dev(ESAS2R_LOG_WARN,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index bb88995..89afa31 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -243,8 +243,6 @@ static void esp_set_all_config3(struct esp *esp, u8 val)
 /* Reset the ESP chip, _not_ the SCSI bus. */
 static void esp_reset_esp(struct esp *esp)
 {
-	u8 family_code, version;
-
 	/* Now reset the ESP chip */
 	scsi_esp_cmd(esp, ESP_CMD_RC);
 	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
@@ -257,14 +255,19 @@ static void esp_reset_esp(struct esp *esp)
 	 */
 	esp->max_period = ((35 * esp->ccycle) / 1000);
 	if (esp->rev == FAST) {
-		version = esp_read8(ESP_UID);
-		family_code = (version & 0xf8) >> 3;
-		if (family_code == 0x02)
+		u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
+
+		if (family_code == ESP_UID_F236) {
 			esp->rev = FAS236;
-		else if (family_code == 0x0a)
+		} else if (family_code == ESP_UID_HME) {
 			esp->rev = FASHME; /* Version is usually '5'. */
-		else
+		} else if (family_code == ESP_UID_FSC) {
+			esp->rev = FSC;
+			/* Enable Active Negation */
+			esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
+		} else {
 			esp->rev = FAS100A;
+		}
 		esp->min_period = ((4 * esp->ccycle) / 1000);
 	} else {
 		esp->min_period = ((5 * esp->ccycle) / 1000);
@@ -308,7 +311,7 @@ static void esp_reset_esp(struct esp *esp)
 
 	case FAS236:
 	case PCSCSI:
-		/* Fast 236, AM53c974 or HME */
+	case FSC:
 		esp_write8(esp->config2, ESP_CFG2);
 		if (esp->rev == FASHME) {
 			u8 cfg3 = esp->target[0].esp_config3;
@@ -2373,10 +2376,11 @@ static const char *esp_chip_names[] = {
 	"ESP100A",
 	"ESP236",
 	"FAS236",
+	"AM53C974",
+	"53CF9x-2",
 	"FAS100A",
 	"FAST",
 	"FASHME",
-	"AM53C974",
 };
 
 static struct scsi_transport_template *esp_transport_template;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 91b32f2..446a3d1 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -78,12 +78,14 @@
 #define ESP_CONFIG3_IMS       0x80     /* ID msg chk'ng        (esp/fas236)  */
 #define ESP_CONFIG3_OBPUSH    0x80     /* Push odd-byte to dma (hme)         */
 
-/* ESP config register 4 read-write, found only on am53c974 chips */
-#define ESP_CONFIG4_RADE      0x04     /* Active negation */
-#define ESP_CONFIG4_RAE       0x08     /* Active negation on REQ and ACK */
-#define ESP_CONFIG4_PWD       0x20     /* Reduced power feature */
-#define ESP_CONFIG4_GE0       0x40     /* Glitch eater bit 0 */
-#define ESP_CONFIG4_GE1       0x80     /* Glitch eater bit 1 */
+/* ESP config register 4 read-write */
+#define ESP_CONFIG4_BBTE      0x01     /* Back-to-back transfers     (fsc)   */
+#define ESP_CONGIG4_TEST      0x02     /* Transfer counter test mode (fsc)   */
+#define ESP_CONFIG4_RADE      0x04     /* Active negation   (am53c974/fsc)   */
+#define ESP_CONFIG4_RAE       0x08     /* Act. negation REQ/ACK (am53c974)   */
+#define ESP_CONFIG4_PWD       0x20     /* Reduced power feature (am53c974)   */
+#define ESP_CONFIG4_GE0       0x40     /* Glitch eater bit 0    (am53c974)   */
+#define ESP_CONFIG4_GE1       0x80     /* Glitch eater bit 1    (am53c974)   */
 
 #define ESP_CONFIG_GE_12NS    (0)
 #define ESP_CONFIG_GE_25NS    (ESP_CONFIG_GE1)
@@ -209,10 +211,15 @@
 #define ESP_TEST_TS           0x04     /* Tristate test mode */
 
 /* ESP unique ID register read-only, found on fas236+fas100a only */
+#define ESP_UID_FAM           0xf8     /* ESP family bitmask */
+
+#define ESP_FAMILY(uid) (((uid) & ESP_UID_FAM) >> 3)
+
+/* Values for the ESP family bits */
 #define ESP_UID_F100A         0x00     /* ESP FAS100A  */
 #define ESP_UID_F236          0x02     /* ESP FAS236   */
-#define ESP_UID_REV           0x07     /* ESP revision */
-#define ESP_UID_FAM           0xf8     /* ESP family   */
+#define ESP_UID_HME           0x0a     /* FAS HME      */
+#define ESP_UID_FSC           0x14     /* NCR/Symbios Logic 53CF9x-2 */
 
 /* ESP fifo flags register read-only */
 /* Note that the following implies a 16 byte FIFO on the ESP. */
@@ -257,15 +264,17 @@ struct esp_cmd_priv {
 };
 #define ESP_CMD_PRIV(CMD)	((struct esp_cmd_priv *)(&(CMD)->SCp))
 
+/* NOTE: this enum is ordered based on chip features! */
 enum esp_rev {
-	ESP100     = 0x00,  /* NCR53C90 - very broken */
-	ESP100A    = 0x01,  /* NCR53C90A */
-	ESP236     = 0x02,
-	FAS236     = 0x03,
-	FAS100A    = 0x04,
-	FAST       = 0x05,
-	FASHME     = 0x06,
-	PCSCSI     = 0x07,  /* AM53c974 */
+	ESP100,  /* NCR53C90 - very broken */
+	ESP100A, /* NCR53C90A */
+	ESP236,
+	FAS236,
+	PCSCSI,  /* AM53c974 */
+	FSC,     /* NCR/Symbios Logic 53CF9x-2 */
+	FAS100A,
+	FAST,
+	FASHME,
 };
 
 struct esp_cmd_entry {
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 233c73e..2bdd646 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -180,10 +180,10 @@ struct hisi_sas_port {
 
 struct hisi_sas_cq {
 	struct hisi_hba *hisi_hba;
-	const struct cpumask *pci_irq_mask;
-	struct tasklet_struct tasklet;
+	const struct cpumask *irq_mask;
 	int	rd_point;
 	int	id;
+	int	irq_no;
 };
 
 struct hisi_sas_dq {
@@ -627,7 +627,7 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
 extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
 extern void hisi_sas_rst_work_handler(struct work_struct *work);
 extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
-extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba);
 extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no);
 extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
 				enum hisi_sas_phy_event event);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 03588ec..9a6deb2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -163,13 +163,11 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
 
 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
 {
-	unsigned long flags;
-
 	if (hisi_hba->hw->slot_index_alloc ||
 	    slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
-		spin_lock_irqsave(&hisi_hba->lock, flags);
+		spin_lock(&hisi_hba->lock);
 		hisi_sas_slot_index_clear(hisi_hba, slot_idx);
-		spin_unlock_irqrestore(&hisi_hba->lock, flags);
+		spin_unlock(&hisi_hba->lock);
 	}
 }
 
@@ -185,12 +183,11 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
 {
 	int index;
 	void *bitmap = hisi_hba->slot_index_tags;
-	unsigned long flags;
 
 	if (scsi_cmnd)
 		return scsi_cmnd->request->tag;
 
-	spin_lock_irqsave(&hisi_hba->lock, flags);
+	spin_lock(&hisi_hba->lock);
 	index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
 				   hisi_hba->last_slot_index + 1);
 	if (index >= hisi_hba->slot_index_count) {
@@ -198,13 +195,13 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
 				hisi_hba->slot_index_count,
 				HISI_SAS_UNRESERVED_IPTT);
 		if (index >= hisi_hba->slot_index_count) {
-			spin_unlock_irqrestore(&hisi_hba->lock, flags);
+			spin_unlock(&hisi_hba->lock);
 			return -SAS_QUEUE_FULL;
 		}
 	}
 	hisi_sas_slot_index_set(hisi_hba, index);
 	hisi_hba->last_slot_index = index;
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	spin_unlock(&hisi_hba->lock);
 
 	return index;
 }
@@ -220,7 +217,6 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
 			     struct hisi_sas_slot *slot)
 {
-	unsigned long flags;
 	int device_id = slot->device_id;
 	struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
 
@@ -247,9 +243,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
 		}
 	}
 
-	spin_lock_irqsave(&sas_dev->lock, flags);
+	spin_lock(&sas_dev->lock);
 	list_del_init(&slot->entry);
-	spin_unlock_irqrestore(&sas_dev->lock, flags);
+	spin_unlock(&sas_dev->lock);
 
 	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
 
@@ -489,14 +485,14 @@ static int hisi_sas_task_prep(struct sas_task *task,
 	slot_idx = rc;
 	slot = &hisi_hba->slot_info[slot_idx];
 
-	spin_lock_irqsave(&dq->lock, flags);
+	spin_lock(&dq->lock);
 	wr_q_index = dq->wr_point;
 	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
 	list_add_tail(&slot->delivery, &dq->list);
-	spin_unlock_irqrestore(&dq->lock, flags);
-	spin_lock_irqsave(&sas_dev->lock, flags);
+	spin_unlock(&dq->lock);
+	spin_lock(&sas_dev->lock);
 	list_add_tail(&slot->entry, &sas_dev->list);
-	spin_unlock_irqrestore(&sas_dev->lock, flags);
+	spin_unlock(&sas_dev->lock);
 
 	dlvry_queue = dq->id;
 	dlvry_queue_slot = wr_q_index;
@@ -562,7 +558,6 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
 {
 	u32 rc;
 	u32 pass = 0;
-	unsigned long flags;
 	struct hisi_hba *hisi_hba;
 	struct device *dev;
 	struct domain_device *device = task->dev;
@@ -606,9 +601,9 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
 		dev_err(dev, "task exec: failed[%d]!\n", rc);
 
 	if (likely(pass)) {
-		spin_lock_irqsave(&dq->lock, flags);
+		spin_lock(&dq->lock);
 		hisi_hba->hw->start_delivery(dq);
-		spin_unlock_irqrestore(&dq->lock, flags);
+		spin_unlock(&dq->lock);
 	}
 
 	return rc;
@@ -659,12 +654,11 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
 {
 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 	struct hisi_sas_device *sas_dev = NULL;
-	unsigned long flags;
 	int last = hisi_hba->last_dev_id;
 	int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
 	int i;
 
-	spin_lock_irqsave(&hisi_hba->lock, flags);
+	spin_lock(&hisi_hba->lock);
 	for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
 		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
 			int queue = i % hisi_hba->queue_count;
@@ -684,7 +678,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
 		i++;
 	}
 	hisi_hba->last_dev_id = i;
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	spin_unlock(&hisi_hba->lock);
 
 	return sas_dev;
 }
@@ -1233,10 +1227,10 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
 					struct hisi_sas_cq *cq =
 					       &hisi_hba->cq[slot->dlvry_queue];
 					/*
-					 * flush tasklet to avoid free'ing task
+					 * sync irq to avoid free'ing task
 					 * before using task in IO completion
 					 */
-					tasklet_kill(&cq->tasklet);
+					synchronize_irq(cq->irq_no);
 					slot->task = NULL;
 				}
 
@@ -1626,11 +1620,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
 
 		if (slot) {
 			/*
-			 * flush tasklet to avoid free'ing task
+			 * sync irq to avoid free'ing task
 			 * before using task in IO completion
 			 */
 			cq = &hisi_hba->cq[slot->dlvry_queue];
-			tasklet_kill(&cq->tasklet);
+			synchronize_irq(cq->irq_no);
 		}
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
 		rc = TMF_RESP_FUNC_COMPLETE;
@@ -1694,10 +1688,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
 		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
 					task->lldd_task) {
 			/*
-			 * flush tasklet to avoid free'ing task
+			 * sync irq to avoid free'ing task
 			 * before using task in IO completion
 			 */
-			tasklet_kill(&cq->tasklet);
+			synchronize_irq(cq->irq_no);
 			slot->task = NULL;
 		}
 	}
@@ -1965,14 +1959,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 	slot_idx = rc;
 	slot = &hisi_hba->slot_info[slot_idx];
 
-	spin_lock_irqsave(&dq->lock, flags);
+	spin_lock(&dq->lock);
 	wr_q_index = dq->wr_point;
 	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
 	list_add_tail(&slot->delivery, &dq->list);
-	spin_unlock_irqrestore(&dq->lock, flags);
-	spin_lock_irqsave(&sas_dev->lock, flags);
+	spin_unlock(&dq->lock);
+	spin_lock(&sas_dev->lock);
 	list_add_tail(&slot->entry, &sas_dev->list);
-	spin_unlock_irqrestore(&sas_dev->lock, flags);
+	spin_unlock(&sas_dev->lock);
 
 	dlvry_queue = dq->id;
 	dlvry_queue_slot = wr_q_index;
@@ -2001,9 +1995,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
 	WRITE_ONCE(slot->ready, 1);
 	/* send abort command to the chip */
-	spin_lock_irqsave(&dq->lock, flags);
+	spin_lock(&dq->lock);
 	hisi_hba->hw->start_delivery(dq);
-	spin_unlock_irqrestore(&dq->lock, flags);
+	spin_unlock(&dq->lock);
 
 	return 0;
 
@@ -2076,10 +2070,10 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
 				struct hisi_sas_cq *cq =
 					&hisi_hba->cq[slot->dlvry_queue];
 				/*
-				 * flush tasklet to avoid free'ing task
+				 * sync irq to avoid free'ing task
 				 * before using task in IO completion
 				 */
-				tasklet_kill(&cq->tasklet);
+				synchronize_irq(cq->irq_no);
 				slot->task = NULL;
 			}
 			dev_err(dev, "internal task abort: timeout and not done.\n");
@@ -2131,7 +2125,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
 	case HISI_SAS_INT_ABT_DEV:
 		for (i = 0; i < hisi_hba->cq_nvecs; i++) {
 			struct hisi_sas_cq *cq = &hisi_hba->cq[i];
-			const struct cpumask *mask = cq->pci_irq_mask;
+			const struct cpumask *mask = cq->irq_mask;
 
 			if (mask && !cpumask_intersects(cpu_online_mask, mask))
 				continue;
@@ -2225,17 +2219,17 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
 }
 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
 
-void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
+void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
 {
 	int i;
 
 	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 
-		tasklet_kill(&cq->tasklet);
+		synchronize_irq(cq->irq_no);
 	}
 }
-EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
+EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
 
 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
 {
@@ -3936,7 +3930,7 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
 
 	hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
 						   hisi_sas_debugfs_dir);
-	debugfs_create_file("trigger_dump", 0600,
+	debugfs_create_file("trigger_dump", 0200,
 			    hisi_hba->debugfs_dir,
 			    hisi_hba,
 			    &hisi_sas_debugfs_trigger_dump_fops);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3af53cc..fa25766 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1772,6 +1772,9 @@ static struct scsi_host_template sht_v1_hw = {
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= sas_ioctl,
+#endif
 	.shost_attrs		= host_attrs_v1_hw,
 	.host_reset             = hisi_sas_host_reset,
 };
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 61b1e26..e05faf3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -773,7 +773,6 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
 	struct hisi_sas_device *sas_dev = device->lldd_dev;
 	int sata_idx = sas_dev->sata_idx;
 	int start, end;
-	unsigned long flags;
 
 	if (!sata_dev) {
 		/*
@@ -797,12 +796,12 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
 		end = 64 * (sata_idx + 2);
 	}
 
-	spin_lock_irqsave(&hisi_hba->lock, flags);
+	spin_lock(&hisi_hba->lock);
 	while (1) {
 		start = find_next_zero_bit(bitmap,
 					hisi_hba->slot_index_count, start);
 		if (start >= end) {
-			spin_unlock_irqrestore(&hisi_hba->lock, flags);
+			spin_unlock(&hisi_hba->lock);
 			return -SAS_QUEUE_FULL;
 		}
 		/*
@@ -814,7 +813,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
 	}
 
 	set_bit(start, bitmap);
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	spin_unlock(&hisi_hba->lock);
 	return start;
 }
 
@@ -843,9 +842,8 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
 	struct hisi_sas_device *sas_dev = NULL;
 	int i, sata_dev = dev_is_sata(device);
 	int sata_idx = -1;
-	unsigned long flags;
 
-	spin_lock_irqsave(&hisi_hba->lock, flags);
+	spin_lock(&hisi_hba->lock);
 
 	if (sata_dev)
 		if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
@@ -876,7 +874,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
 	}
 
 out:
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	spin_unlock(&hisi_hba->lock);
 
 	return sas_dev;
 }
@@ -3111,9 +3109,9 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
 	return IRQ_HANDLED;
 }
 
-static void cq_tasklet_v2_hw(unsigned long val)
+static irqreturn_t  cq_thread_v2_hw(int irq_no, void *p)
 {
-	struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
+	struct hisi_sas_cq *cq = p;
 	struct hisi_hba *hisi_hba = cq->hisi_hba;
 	struct hisi_sas_slot *slot;
 	struct hisi_sas_itct *itct;
@@ -3181,6 +3179,8 @@ static void cq_tasklet_v2_hw(unsigned long val)
 	/* update rd_point */
 	cq->rd_point = rd_point;
 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
@@ -3191,9 +3191,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
 
 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
 
-	tasklet_schedule(&cq->tasklet);
-
-	return IRQ_HANDLED;
+	return IRQ_WAKE_THREAD;
 }
 
 static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
@@ -3360,18 +3358,18 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
 
 	for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
 		struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
-		struct tasklet_struct *t = &cq->tasklet;
 
-		irq = irq_map[queue_no + 96];
-		rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
-				      DRV_NAME " cq", cq);
+		cq->irq_no = irq_map[queue_no + 96];
+		rc = devm_request_threaded_irq(dev, cq->irq_no,
+					       cq_interrupt_v2_hw,
+					       cq_thread_v2_hw, IRQF_ONESHOT,
+					       DRV_NAME " cq", cq);
 		if (rc) {
 			dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
 				irq, rc);
 			rc = -ENOENT;
 			goto err_out;
 		}
-		tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
 	}
 
 	hisi_hba->cq_nvecs = hisi_hba->queue_count;
@@ -3432,7 +3430,6 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
 
 	interrupt_disable_v2_hw(hisi_hba);
 	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
-	hisi_sas_kill_tasklets(hisi_hba);
 
 	hisi_sas_stop_phys(hisi_hba);
 
@@ -3551,6 +3548,9 @@ static struct scsi_host_template sht_v2_hw = {
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= sas_ioctl,
+#endif
 	.shost_attrs		= host_attrs_v2_hw,
 	.host_reset		= hisi_sas_host_reset,
 };
@@ -3603,11 +3603,6 @@ static int hisi_sas_v2_probe(struct platform_device *pdev)
 
 static int hisi_sas_v2_remove(struct platform_device *pdev)
 {
-	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
-	struct hisi_hba *hisi_hba = sha->lldd_ha;
-
-	hisi_sas_kill_tasklets(hisi_hba);
-
 	return hisi_sas_remove(pdev);
 }
 
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index bf5d5f1..a2debe0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -495,6 +495,13 @@ struct hisi_sas_err_record_v3 {
 #define BASE_VECTORS_V3_HW  16
 #define MIN_AFFINE_VECTORS_V3_HW  (BASE_VECTORS_V3_HW + 1)
 
+#define CHNL_INT_STS_MSK	0xeeeeeeee
+#define CHNL_INT_STS_PHY_MSK	0xe
+#define CHNL_INT_STS_INT0_MSK BIT(1)
+#define CHNL_INT_STS_INT1_MSK BIT(2)
+#define CHNL_INT_STS_INT2_MSK BIT(3)
+#define CHNL_WIDTH 4
+
 enum {
 	DSM_FUNC_ERR_HANDLE_MSI = 0,
 };
@@ -1819,19 +1826,19 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
 	int phy_no = 0;
 
 	irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
-				& 0xeeeeeeee;
+		  & CHNL_INT_STS_MSK;
 
 	while (irq_msk) {
-		if (irq_msk & (2 << (phy_no * 4)))
+		if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH)))
 			handle_chl_int0_v3_hw(hisi_hba, phy_no);
 
-		if (irq_msk & (4 << (phy_no * 4)))
+		if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH)))
 			handle_chl_int1_v3_hw(hisi_hba, phy_no);
 
-		if (irq_msk & (8 << (phy_no * 4)))
+		if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH)))
 			handle_chl_int2_v3_hw(hisi_hba, phy_no);
 
-		irq_msk &= ~(0xe << (phy_no * 4));
+		irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH));
 		phy_no++;
 	}
 
@@ -2299,9 +2306,9 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
 	return sts;
 }
 
-static void cq_tasklet_v3_hw(unsigned long val)
+static irqreturn_t  cq_thread_v3_hw(int irq_no, void *p)
 {
-	struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
+	struct hisi_sas_cq *cq = p;
 	struct hisi_hba *hisi_hba = cq->hisi_hba;
 	struct hisi_sas_slot *slot;
 	struct hisi_sas_complete_v3_hdr *complete_queue;
@@ -2338,6 +2345,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
 	/* update rd_point */
 	cq->rd_point = rd_point;
 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
@@ -2348,9 +2357,7 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
 
 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
 
-	tasklet_schedule(&cq->tasklet);
-
-	return IRQ_HANDLED;
+	return IRQ_WAKE_THREAD;
 }
 
 static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
@@ -2365,7 +2372,7 @@ static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
 					    BASE_VECTORS_V3_HW);
 		if (!mask)
 			goto fallback;
-		cq->pci_irq_mask = mask;
+		cq->irq_mask = mask;
 		for_each_cpu(cpu, mask)
 			hisi_hba->reply_map[cpu] = queue;
 	}
@@ -2389,6 +2396,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
 			.pre_vectors = BASE_VECTORS_V3_HW,
 		};
 
+		dev_info(dev, "Enable MSI auto-affinity\n");
+
 		min_msi = MIN_AFFINE_VECTORS_V3_HW;
 
 		hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
@@ -2441,15 +2450,20 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
 		goto free_irq_vectors;
 	}
 
-	/* Init tasklets for cq only */
+	if (hisi_sas_intr_conv)
+		dev_info(dev, "Enable interrupt converge\n");
+
 	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
-		struct tasklet_struct *t = &cq->tasklet;
 		int nr = hisi_sas_intr_conv ? 16 : 16 + i;
-		unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
+		unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED :
+							      IRQF_ONESHOT;
 
-		rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
-				      cq_interrupt_v3_hw, irqflags,
+		cq->irq_no = pci_irq_vector(pdev, nr);
+		rc = devm_request_threaded_irq(dev, cq->irq_no,
+				      cq_interrupt_v3_hw,
+				      cq_thread_v3_hw,
+				      irqflags,
 				      DRV_NAME " cq", cq);
 		if (rc) {
 			dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
@@ -2457,8 +2471,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
 			rc = -ENOENT;
 			goto free_irq_vectors;
 		}
-
-		tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
 	}
 
 	return 0;
@@ -2534,7 +2546,6 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
 
 	interrupt_disable_v3_hw(hisi_hba);
 	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
-	hisi_sas_kill_tasklets(hisi_hba);
 
 	hisi_sas_stop_phys(hisi_hba);
 
@@ -2910,7 +2921,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
 
 	wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
 
-	hisi_sas_kill_tasklets(hisi_hba);
+	hisi_sas_sync_irqs(hisi_hba);
 }
 
 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
@@ -3075,6 +3086,9 @@ static struct scsi_host_template sht_v3_hw = {
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= sas_ioctl,
+#endif
 	.shost_attrs		= host_attrs_v3_hw,
 	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 	.host_reset             = hisi_sas_host_reset,
@@ -3309,7 +3323,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
 	sas_remove_host(sha->core.shost);
 
 	hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
-	hisi_sas_kill_tasklets(hisi_hba);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 54b8c6f..d9e94e8 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1877,7 +1877,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 	 */
 	struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
 	struct ibmvscsis_cmd *cmd, *nxt;
-	struct iu_entry *iue;
 	long rc = ADAPT_SUCCESS;
 	bool retry = false;
 
@@ -1931,8 +1930,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 					 */
 					vscsi->credit += 1;
 				} else {
-					iue = cmd->iue;
-
 					crq->valid = VALID_CMD_RESP_EL;
 					crq->format = cmd->rsp.format;
 
@@ -3796,7 +3793,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
 						 se_cmd);
 	struct iu_entry *iue = cmd->iue;
 	struct scsi_info *vscsi = cmd->adapter;
-	char *sd;
 	uint len = 0;
 	int rc;
 
@@ -3804,7 +3800,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
 			       1);
 	if (rc) {
 		dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
-		sd = se_cmd->sense_buffer;
 		se_cmd->scsi_sense_length = 18;
 		memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
 		/* Logical Unit Communication Time-out asc/ascq = 0x0801 */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 41fd64c..1d39628 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -1640,7 +1640,7 @@ static int initio_state_6(struct initio_host * host)
  *
  */
 
-int initio_state_7(struct initio_host * host)
+static int initio_state_7(struct initio_host * host)
 {
 	int cnt, i;
 
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 079c04b..ae45cbe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6727,6 +6727,9 @@ static struct scsi_host_template driver_template = {
 	.name = "IPR",
 	.info = ipr_ioa_info,
 	.ioctl = ipr_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = ipr_ioctl,
+#endif
 	.queuecommand = ipr_queuecommand,
 	.eh_abort_handler = ipr_eh_abort,
 	.eh_device_reset_handler = ipr_eh_dev_reset,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 1727d0c..b48aac8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -168,6 +168,9 @@ static struct scsi_host_template isci_sht = {
 	.eh_target_reset_handler        = sas_eh_target_reset_handler,
 	.target_destroy			= sas_target_destroy,
 	.ioctl				= sas_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl			= sas_ioctl,
+#endif
 	.shost_attrs			= isci_host_attrs,
 	.track_queue_depth		= 1,
 };
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0bc63a7..b5dd1ca 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -887,6 +887,10 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
 static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
 {
 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct iscsi_session *session = cls_session->dd_data;
+
+	if (WARN_ON_ONCE(session->leadconn))
+		return;
 
 	iscsi_tcp_r2tpool_free(cls_session->dd_data);
 	iscsi_session_teardown(cls_session);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e9e0074..c5a828a 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -137,7 +137,7 @@ static void sas_ata_task_done(struct sas_task *task)
 	} else {
 		ac = sas_to_ata_err(stat);
 		if (ac) {
-			pr_warn("%s: SAS error %x\n", __func__, stat->stat);
+			pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat);
 			/* We saw a SAS error. Send a vague error. */
 			if (!link->sactive) {
 				qc->err_mask = ac;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d7302c2..daf951b 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -179,7 +179,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
 
 	res = i->dft->lldd_dev_found(dev);
 	if (res) {
-		pr_warn("driver on host %s cannot handle device %llx, error:%d\n",
+		pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
 			dev_name(sas_ha->dev),
 			SAS_ADDR(dev->sas_addr), res);
 	}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 9fdb9c9..ab671cd 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -500,7 +500,7 @@ static int sas_ex_general(struct domain_device *dev)
 		ex_assign_report_general(dev, rg_resp);
 
 		if (dev->ex_dev.configuring) {
-			pr_debug("RG: ex %llx self-configuring...\n",
+			pr_debug("RG: ex %016llx self-configuring...\n",
 				 SAS_ADDR(dev->sas_addr));
 			schedule_timeout_interruptible(5*HZ);
 		} else
@@ -881,7 +881,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 
 		res = sas_discover_end_dev(child);
 		if (res) {
-			pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n",
+			pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n",
 				  SAS_ADDR(child->sas_addr),
 				  SAS_ADDR(parent->sas_addr), phy_id, res);
 			goto out_list_del;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 01f1738..1f1d019 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -107,7 +107,7 @@ static inline void sas_smp_host_handler(struct bsg_job *job,
 
 static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
 {
-	pr_warn("%s: for %s device %16llx returned %d\n",
+	pr_warn("%s: for %s device %016llx returned %d\n",
 		func, dev->parent ? "exp-attached" :
 		"direct-attached",
 		SAS_ADDR(dev->sas_addr), err);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 7c86fd2..19cf418 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -165,7 +165,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
 	}
 	sas_port_add_phy(port->port, phy->phy);
 
-	pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n",
+	pr_debug("%s added to %s, phy_mask:0x%x (%016llx)\n",
 		 dev_name(&phy->phy->dev), dev_name(&port->port->dev),
 		 port->phy_mask,
 		 SAS_ADDR(port->attached_sas_addr));
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index bec83eb..9e0975e 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -330,7 +330,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
 
 	int_to_scsilun(cmd->device->lun, &lun);
 
-	pr_notice("eh: device %llx LUN %llx has the task\n",
+	pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
 		  SAS_ADDR(dev->sas_addr),
 		  cmd->device->lun);
 
@@ -615,7 +615,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
  reset:
 			tmf_resp = sas_recover_lu(task->dev, cmd);
 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
-				pr_notice("dev %016llx LU %llx is recovered\n",
+				pr_notice("dev %016llx LU 0x%llx is recovered\n",
 					  SAS_ADDR(task->dev),
 					  cmd->device->lun);
 				sas_eh_finish_cmd(cmd);
@@ -666,7 +666,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
 			 * of effort could recover from errors.  Quite
 			 * possibly the HA just disappeared.
 			 */
-			pr_err("error from  device %llx, LUN %llx couldn't be recovered in any way\n",
+			pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
 			       SAS_ADDR(task->dev->sas_addr),
 			       cmd->device->lun);
 
@@ -851,7 +851,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
 	if (scsi_dev->tagged_supported) {
 		scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
 	} else {
-		pr_notice("device %llx, LUN %llx doesn't support TCQ\n",
+		pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
 			  SAS_ADDR(dev->sas_addr), scsi_dev->lun);
 		scsi_change_queue_depth(scsi_dev, 1);
 	}
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index 1ded7d8..e2d4259 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -27,7 +27,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
 		memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
 
 		if (iu->status != SAM_STAT_CHECK_CONDITION)
-			dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n",
+			dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n",
 				 SAS_ADDR(task->dev->sas_addr), iu->status);
 	}
 	else
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 935f988..04d73e2 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1223,6 +1223,8 @@ struct lpfc_hba {
 #define LPFC_POLL_HB	1		/* slowpath heartbeat */
 #define LPFC_POLL_FASTPATH	0	/* called from fastpath */
 #define LPFC_POLL_SLOWPATH	1	/* called from slowpath */
+
+	char os_host_name[MAXHOSTNAMELEN];
 };
 
 static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ff82b3..46f56f30 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4123,14 +4123,13 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
 		/*
 		 * The 'topology' is not a configurable parameter if :
 		 *   - persistent topology enabled
-		 *   - G7 adapters
-		 *   - G6 with no private loop support
+		 *   - G7/G6 with no private loop support
 		 */
 
-		if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
+		if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
 		     (!phba->sli4_hba.pc_sli4_params.pls &&
-		     phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
-		     phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+		     (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+		     phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
 		    val == 4) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
 				"3114 Loop mode not supported\n");
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index ee353c8..25d3dd3 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport);
 int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
 int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
 int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
-void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
+void lpfc_fdmi_change_check(struct lpfc_vport *vport);
 void lpfc_delayed_disc_tmo(struct timer_list *);
 void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
 
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 99c9bb2..58b35a1 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1493,33 +1493,35 @@ int
 lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
 	size_t size)
 {
-	char fwrev[FW_REV_STR_SIZE];
-	int n;
+	char fwrev[FW_REV_STR_SIZE] = {0};
+	char tmp[MAXHOSTNAMELEN] = {0};
+
+	memset(symbol, 0, size);
+
+	scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
+	if (strlcat(symbol, tmp, size) >= size)
+		goto buffer_done;
 
 	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+	scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
+	if (strlcat(symbol, tmp, size) >= size)
+		goto buffer_done;
 
-	n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
-	if (size < n)
-		return n;
+	scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
+	if (strlcat(symbol, tmp, size) >= size)
+		goto buffer_done;
 
-	n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
-	if (size < n)
-		return n;
-
-	n += scnprintf(symbol + n, size - n, " DV%s.",
-		      lpfc_release_version);
-	if (size < n)
-		return n;
-
-	n += scnprintf(symbol + n, size - n, " HN:%s.",
-		      init_utsname()->nodename);
-	if (size < n)
-		return n;
+	scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
+	if (strlcat(symbol, tmp, size) >= size)
+		goto buffer_done;
 
 	/* Note :- OS name is "Linux" */
-	n += scnprintf(symbol + n, size - n, " OS:%s",
-		      init_utsname()->sysname);
-	return n;
+	scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
+	strlcat(symbol, tmp, size);
+
+buffer_done:
+	return strnlen(symbol, size);
+
 }
 
 static uint32_t
@@ -1998,14 +2000,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 
 /**
- * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
+ * lpfc_fdmi_change_check - Check for changed FDMI parameters
  * @vport: pointer to a host virtual N_Port data structure.
  *
- * Called from hbeat timeout routine to check if the number of discovered
- * ports has changed. If so, re-register thar port Attribute.
+ * Check how many mapped NPorts we are connected to
+ * Check if our hostname changed
+ * Called from hbeat timeout routine to check if any FDMI parameters
+ * changed. If so, re-register those Attributes.
  */
 void
-lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
+lpfc_fdmi_change_check(struct lpfc_vport *vport)
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_nodelist *ndlp;
@@ -2018,17 +2022,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
 	if (!(vport->fc_flag & FC_FABRIC))
 		return;
 
+	ndlp = lpfc_findnode_did(vport, FDMI_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return;
+
+	/* Check if system hostname changed */
+	if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
+		memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+		scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+			  init_utsname()->nodename);
+		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+
+		/* Since this effects multiple HBA and PORT attributes, we need
+		 * de-register and go thru the whole FDMI registration cycle.
+		 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
+		 * DPRT -> RPRT (vports)
+		 */
+		if (vport->port_type == LPFC_PHYSICAL_PORT)
+			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+		else
+			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+
+		/* Since this code path registers all the port attributes
+		 * we can just return without further checking.
+		 */
+		return;
+	}
+
 	if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
 		return;
 
+	/* Check if the number of mapped NPorts changed */
 	cnt = lpfc_find_map_node(vport);
 	if (cnt == vport->fdmi_num_disc)
 		return;
 
-	ndlp = lpfc_findnode_did(vport, FDMI_DID);
-	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
-		return;
-
 	if (vport->port_type == LPFC_PHYSICAL_PORT) {
 		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
 			      LPFC_FDMI_PORT_ATTR_num_disc);
@@ -2616,8 +2644,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 256);
 
-	snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
-		 init_utsname()->nodename);
+	scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+		  vport->phba->os_host_name);
 
 	len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
 	len += (len & 3) ? (4 - (len & 3)) : 4;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a5ecbce..819335b 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2085,6 +2085,8 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
 	int copied = 0;
 	struct lpfc_dmabuf *dmabuf, *next;
 
+	memset(buffer, 0, size);
+
 	spin_lock_irq(&phba->hbalock);
 	if (phba->ras_fwlog.state != ACTIVE) {
 		spin_unlock_irq(&phba->hbalock);
@@ -2094,10 +2096,15 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
 
 	list_for_each_entry_safe(dmabuf, next,
 				 &phba->ras_fwlog.fwlog_buff_list, list) {
+		/* Check if copying will go over size and a '\0' char */
+		if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) {
+			memcpy(buffer + copied, dmabuf->virt,
+			       size - copied - 1);
+			copied += size - copied - 1;
+			break;
+		}
 		memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
 		copied += LPFC_RAS_MAX_ENTRY_SIZE;
-		if (size > copied)
-			break;
 	}
 	return copied;
 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 85ada3d..dcc8999 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -28,6 +28,7 @@
 #include <linux/kthread.h>
 #include <linux/interrupt.h>
 #include <linux/lockdep.h>
+#include <linux/utsname.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -3315,6 +3316,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 		lpfc_sli4_clear_fcf_rr_bmask(phba);
 	}
 
+	/* Prepare for LINK up registrations */
+	memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+	scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+		  init_utsname()->nodename);
 	return;
 out:
 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 25cdcbc..9a064b9 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3925,6 +3925,9 @@ struct lpfc_mbx_wr_object {
 #define LPFC_CHANGE_STATUS_FW_RESET		0x02
 #define LPFC_CHANGE_STATUS_PORT_MIGRATION	0x04
 #define LPFC_CHANGE_STATUS_PCI_RESET		0x05
+#define lpfc_wr_object_csf_SHIFT		8
+#define lpfc_wr_object_csf_MASK			0x00000001
+#define lpfc_wr_object_csf_WORD			word5
 		} response;
 	} u;
 };
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6a04fdb..5a60577 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1362,7 +1362,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 	if (vports != NULL)
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			lpfc_rcv_seq_check_edtov(vports[i]);
-			lpfc_fdmi_num_disc_check(vports[i]);
+			lpfc_fdmi_change_check(vports[i]);
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
 
@@ -8320,14 +8320,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
 	phba->hba_flag |= HBA_PERSISTENT_TOPO;
 	switch (phba->pcidev->device) {
 	case PCI_DEVICE_ID_LANCER_G7_FC:
-		if (tf || (pt == LINK_FLAGS_LOOP)) {
-			/* Invalid values from FW - use driver params */
-			phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
-		} else {
-			/* Prism only supports PT2PT topology */
-			phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
-		}
-		break;
 	case PCI_DEVICE_ID_LANCER_G6_FC:
 		if (!tf) {
 			phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
@@ -10449,6 +10441,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
 	case LPFC_SLI_INTF_IF_TYPE_6:
 		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
 		iounmap(phba->sli4_hba.conf_regs_memmap_p);
+		if (phba->sli4_hba.dpp_regs_memmap_p)
+			iounmap(phba->sli4_hba.dpp_regs_memmap_p);
 		break;
 	case LPFC_SLI_INTF_IF_TYPE_1:
 	default:
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index ae43590..a024e5a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -308,7 +308,7 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
 				mb->mbxStatus);
 		mempool_free(login_mbox, phba->mbox_mem_pool);
 		mempool_free(link_mbox, phba->mbox_mem_pool);
-		lpfc_sli_release_iocbq(phba, save_iocb);
+		kfree(save_iocb);
 		return;
 	}
 
@@ -325,7 +325,61 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
 	}
 
 	mempool_free(link_mbox, phba->mbox_mem_pool);
-	lpfc_sli_release_iocbq(phba, save_iocb);
+	kfree(save_iocb);
+}
+
+/**
+ * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function provides the unreg rpi mailbox completion handler for a tgt.
+ * The routine frees the memory resources associated with the completed
+ * mailbox command and transmits the ELS ACC.
+ *
+ * This routine is only called if we are SLI4, acting in target
+ * mode and the remote NPort issues the PLOGI after link up.
+ **/
+static void
+lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+	LPFC_MBOXQ_t *mbox = pmb->context3;
+	struct lpfc_iocbq *piocb = NULL;
+	int rc;
+
+	if (mbox) {
+		pmb->context3 = NULL;
+		piocb = mbox->context3;
+		mbox->context3 = NULL;
+	}
+
+	/*
+	 * Complete the unreg rpi mbx request, and update flags.
+	 * This will also restart any deferred events.
+	 */
+	lpfc_nlp_get(ndlp);
+	lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
+
+	if (!piocb) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+				 "4578 PLOGI ACC fail\n");
+		if (mbox)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		goto out;
+	}
+
+	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
+	if (rc) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+				 "4579 PLOGI ACC fail %x\n", rc);
+		if (mbox)
+			mempool_free(mbox, phba->mbox_mem_pool);
+	}
+	kfree(piocb);
+out:
+	lpfc_nlp_put(ndlp);
 }
 
 static int
@@ -345,6 +399,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct lpfc_iocbq *save_iocb;
 	struct ls_rjt stat;
 	uint32_t vid, flag;
+	u16 rpi;
 	int rc, defer_acc;
 
 	memset(&stat, 0, sizeof (struct ls_rjt));
@@ -488,7 +543,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			link_mbox->vport = vport;
 			link_mbox->ctx_ndlp = ndlp;
 
-			save_iocb = lpfc_sli_get_iocbq(phba);
+			save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
 			if (!save_iocb)
 				goto out;
 			/* Save info from cmd IOCB used in rsp */
@@ -513,7 +568,36 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		goto out;
 
 	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
-	if (phba->sli_rev == LPFC_SLI_REV4)
+	if (phba->nvmet_support && !defer_acc) {
+		link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!link_mbox)
+			goto out;
+
+		/* As unique identifiers such as iotag would be overwritten
+		 * with those from the cmdiocb, allocate separate temporary
+		 * storage for the copy.
+		 */
+		save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+		if (!save_iocb)
+			goto out;
+
+		/* Unreg RPI is required for SLI4. */
+		rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+		lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
+		link_mbox->vport = vport;
+		link_mbox->ctx_ndlp = ndlp;
+		link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
+
+		if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+		    (!(vport->fc_flag & FC_OFFLINE_MODE)))
+			ndlp->nlp_flag |= NLP_UNREG_INP;
+
+		/* Save info from cmd IOCB used in rsp */
+		memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
+
+		/* Delay sending ACC till unreg RPI completes. */
+		defer_acc = 1;
+	} else if (phba->sli_rev == LPFC_SLI_REV4)
 		lpfc_unreg_rpi(vport, ndlp);
 
 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
@@ -553,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if ((vport->port_type == LPFC_NPIV_PORT &&
 	     vport->cfg_restrict_login)) {
 
+		/* no deferred ACC */
+		kfree(save_iocb);
+
 		/* In order to preserve RPIs, we want to cleanup
 		 * the default RPI the firmware created to rcv
 		 * this ELS request. The only way to do this is
@@ -571,8 +658,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	}
 	if (defer_acc) {
 		/* So the order here should be:
-		 * Issue CONFIG_LINK mbox
-		 * CONFIG_LINK cmpl
+		 * SLI3 pt2pt
+		 *   Issue CONFIG_LINK mbox
+		 *   CONFIG_LINK cmpl
+		 * SLI4 tgt
+		 *   Issue UNREG RPI mbx
+		 *   UNREG RPI cmpl
 		 * Issue PLOGI ACC
 		 * PLOGI ACC cmpl
 		 * Issue REG_LOGIN mbox
@@ -596,10 +687,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 out:
 	if (defer_acc)
 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-				"4577 pt2pt discovery failure: %p %p %p\n",
+				"4577 discovery failure: %p %p %p\n",
 				save_iocb, link_mbox, login_mbox);
-	if (save_iocb)
-		lpfc_sli_release_iocbq(phba, save_iocb);
+	kfree(save_iocb);
 	if (link_mbox)
 		mempool_free(link_mbox, phba->mbox_mem_pool);
 	if (login_mbox)
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b138d9f..2c7e0b2 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -481,7 +481,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
 		spin_lock(&qp->abts_io_buf_list_lock);
 		list_for_each_entry_safe(psb, next_psb,
 					 &qp->lpfc_abts_io_buf_list, list) {
-			if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
+			if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
 				continue;
 
 			if (psb->rdata && psb->rdata->pnode &&
@@ -528,7 +528,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
 			list_del_init(&psb->list);
 			psb->flags &= ~LPFC_SBUF_XBUSY;
 			psb->status = IOSTAT_SUCCESS;
-			if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
+			if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
 				qp->abts_nvme_io_bufs--;
 				spin_unlock(&qp->abts_io_buf_list_lock);
 				spin_unlock_irqrestore(&phba->hbalock, iflag);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 625c046..64002b0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4918,8 +4918,17 @@ static int
 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
 {
 	phba->hbq_in_use = 1;
-	phba->hbqs[LPFC_ELS_HBQ].entry_count =
-		lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
+	/**
+	 * Specific case when the MDS diagnostics is enabled and supported.
+	 * The receive buffer count is truncated to manage the incoming
+	 * traffic.
+	 **/
+	if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
+		phba->hbqs[LPFC_ELS_HBQ].entry_count =
+			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
+	else
+		phba->hbqs[LPFC_ELS_HBQ].entry_count =
+			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
 	phba->hbq_count = 1;
 	lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
 	/* Initially populate or replenish the HBQs */
@@ -19449,7 +19458,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
 	struct lpfc_mbx_wr_object *wr_object;
 	LPFC_MBOXQ_t *mbox;
 	int rc = 0, i = 0;
-	uint32_t shdr_status, shdr_add_status, shdr_change_status;
+	uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
 	uint32_t mbox_tmo;
 	struct lpfc_dmabuf *dmabuf;
 	uint32_t written = 0;
@@ -19506,6 +19515,16 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
 	if (check_change_status) {
 		shdr_change_status = bf_get(lpfc_wr_object_change_status,
 					    &wr_object->u.response);
+
+		if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+		    shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+			shdr_csf = bf_get(lpfc_wr_object_csf,
+					  &wr_object->u.response);
+			if (shdr_csf)
+				shdr_change_status =
+						   LPFC_CHANGE_STATUS_PCI_RESET;
+		}
+
 		switch (shdr_change_status) {
 		case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9e5ff58..9563c49 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.6.0.2"
+#define LPFC_DRIVER_VERSION "12.6.0.3"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index bd81840..83d8c4c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"07.710.50.00-rc1"
-#define MEGASAS_RELDATE				"June 28, 2019"
+#define MEGASAS_VERSION				"07.713.01.00-rc1"
+#define MEGASAS_RELDATE				"Dec 27, 2019"
 
 #define MEGASAS_MSIX_NAME_LEN			32
 
@@ -2233,9 +2233,9 @@ enum MR_PD_TYPE {
 
 /* JBOD Queue depth definitions */
 #define MEGASAS_SATA_QD	32
-#define MEGASAS_SAS_QD	64
+#define MEGASAS_SAS_QD 256
 #define MEGASAS_DEFAULT_PD_QD	64
-#define MEGASAS_NVME_QD		32
+#define MEGASAS_NVME_QD        64
 
 #define MR_DEFAULT_NVME_PAGE_SIZE	4096
 #define MR_DEFAULT_NVME_PAGE_SHIFT	12
@@ -2640,10 +2640,11 @@ enum MEGASAS_OCR_CAUSE {
 };
 
 enum DCMD_RETURN_STATUS {
-	DCMD_SUCCESS		= 0,
-	DCMD_TIMEOUT		= 1,
-	DCMD_FAILED		= 2,
-	DCMD_NOT_FIRED		= 3,
+	DCMD_SUCCESS    = 0x00,
+	DCMD_TIMEOUT    = 0x01,
+	DCMD_FAILED     = 0x02,
+	DCMD_BUSY       = 0x03,
+	DCMD_INIT       = 0xff,
 };
 
 u8
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index c60cd9f..fd4b5ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1099,7 +1099,7 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
 			__func__, __LINE__);
-		return DCMD_NOT_FIRED;
+		return DCMD_INIT;
 	}
 
 	instance->instancet->issue_dcmd(instance, cmd);
@@ -1123,19 +1123,19 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
 			  struct megasas_cmd *cmd, int timeout)
 {
 	int ret = 0;
-	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+	cmd->cmd_status_drv = DCMD_INIT;
 
 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
 			__func__, __LINE__);
-		return DCMD_NOT_FIRED;
+		return DCMD_INIT;
 	}
 
 	instance->instancet->issue_dcmd(instance, cmd);
 
 	if (timeout) {
 		ret = wait_event_timeout(instance->int_cmd_wait_q,
-				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
+		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
 		if (!ret) {
 			dev_err(&instance->pdev->dev,
 				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
@@ -1144,10 +1144,9 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
 		}
 	} else
 		wait_event(instance->int_cmd_wait_q,
-				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
+				cmd->cmd_status_drv != DCMD_INIT);
 
-	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
-		DCMD_SUCCESS : DCMD_FAILED;
+	return cmd->cmd_status_drv;
 }
 
 /**
@@ -1190,19 +1189,19 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
 
 	cmd->sync_cmd = 1;
-	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+	cmd->cmd_status_drv = DCMD_INIT;
 
 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
 			__func__, __LINE__);
-		return DCMD_NOT_FIRED;
+		return DCMD_INIT;
 	}
 
 	instance->instancet->issue_dcmd(instance, cmd);
 
 	if (timeout) {
 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
-				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
+		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
 		if (!ret) {
 			opcode = cmd_to_abort->frame->dcmd.opcode;
 			dev_err(&instance->pdev->dev,
@@ -1212,13 +1211,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
 		}
 	} else
 		wait_event(instance->abort_cmd_wait_q,
-				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
+		cmd->cmd_status_drv != DCMD_INIT);
 
 	cmd->sync_cmd = 0;
 
 	megasas_return_cmd(instance, cmd);
-	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
-		DCMD_SUCCESS : DCMD_FAILED;
+	return cmd->cmd_status_drv;
 }
 
 /**
@@ -1887,6 +1885,10 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
 
 		mr_device_priv_data->is_tm_capable =
 			raid->capability.tmCapable;
+
+		if (!raid->flags.isEPD)
+			sdev->no_write_same = 1;
+
 	} else if (instance->use_seqnum_jbod_fp) {
 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
 			sdev->id;
@@ -2150,6 +2152,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
 
 void megaraid_sas_kill_hba(struct megasas_instance *instance)
 {
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		dev_warn(&instance->pdev->dev,
+			 "Adapter already dead, skipping kill HBA\n");
+		return;
+	}
+
 	/* Set critical error to block I/O & ioctls in case caller didn't */
 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
 	/* Wait 1 second to ensure IO or ioctls in build have posted */
@@ -2726,7 +2734,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
 						"reset queue\n",
 						reset_cmd);
 
-				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+				reset_cmd->cmd_status_drv = DCMD_INIT;
 				instance->instancet->fire_cmd(instance,
 						reset_cmd->frame_phys_addr,
 						0, instance->reg_set);
@@ -3416,7 +3424,6 @@ static struct scsi_host_template megasas_template = {
 	.bios_param = megasas_bios_param,
 	.change_queue_depth = scsi_change_queue_depth,
 	.max_segment_size = 0xffffffff,
-	.no_write_same = 1,
 };
 
 /**
@@ -3432,7 +3439,11 @@ static void
 megasas_complete_int_cmd(struct megasas_instance *instance,
 			 struct megasas_cmd *cmd)
 {
-	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
+	if (cmd->cmd_status_drv == DCMD_INIT)
+		cmd->cmd_status_drv =
+		(cmd->frame->io.cmd_status == MFI_STAT_OK) ?
+		DCMD_SUCCESS : DCMD_FAILED;
+
 	wake_up(&instance->int_cmd_wait_q);
 }
 
@@ -3451,7 +3462,7 @@ megasas_complete_abort(struct megasas_instance *instance,
 {
 	if (cmd->sync_cmd) {
 		cmd->sync_cmd = 0;
-		cmd->cmd_status_drv = 0;
+		cmd->cmd_status_drv = DCMD_SUCCESS;
 		wake_up(&instance->abort_cmd_wait_q);
 	}
 }
@@ -3727,7 +3738,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
 						"on the internal reset queue,"
 						"issue it again.\n", cmd);
-			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+			cmd->cmd_status_drv = DCMD_INIT;
 			instance->instancet->fire_cmd(instance,
 							cmd->frame_phys_addr,
 							0, instance->reg_set);
@@ -4392,7 +4403,8 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
 	if (instance->adapter_type == MFI_SERIES)
 		return KILL_ADAPTER;
 	else if (instance->unload ||
-			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+			test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
+				 &instance->reset_flags))
 		return IGNORE_TIMEOUT;
 	else
 		return INITIATE_OCR;
@@ -7592,7 +7604,7 @@ megasas_resume(struct pci_dev *pdev)
 	int rval;
 	struct Scsi_Host *host;
 	struct megasas_instance *instance;
-	int irq_flags = PCI_IRQ_LEGACY;
+	u32 status_reg;
 
 	instance = pci_get_drvdata(pdev);
 
@@ -7620,9 +7632,35 @@ megasas_resume(struct pci_dev *pdev)
 	/*
 	 * We expect the FW state to be READY
 	 */
-	if (megasas_transition_to_ready(instance, 0))
-		goto fail_ready_state;
 
+	if (megasas_transition_to_ready(instance, 0)) {
+		dev_info(&instance->pdev->dev,
+			 "Failed to transition controller to ready from %s!\n",
+			 __func__);
+		if (instance->adapter_type != MFI_SERIES) {
+			status_reg =
+				instance->instancet->read_fw_status_reg(instance);
+			if (!(status_reg & MFI_RESET_ADAPTER) ||
+				((megasas_adp_reset_wait_for_ready
+				(instance, true, 0)) == FAILED))
+				goto fail_ready_state;
+		} else {
+			atomic_set(&instance->fw_reset_no_pci_access, 1);
+			instance->instancet->adp_reset
+				(instance, instance->reg_set);
+			atomic_set(&instance->fw_reset_no_pci_access, 0);
+
+			/* waiting for about 30 seconds before retry */
+			ssleep(30);
+
+			if (megasas_transition_to_ready(instance, 0))
+				goto fail_ready_state;
+		}
+
+		dev_info(&instance->pdev->dev,
+			 "FW restarted successfully from %s!\n",
+			 __func__);
+	}
 	if (megasas_set_dma_mask(instance))
 		goto fail_set_dma_mask;
 
@@ -7634,16 +7672,15 @@ megasas_resume(struct pci_dev *pdev)
 	atomic_set(&instance->ldio_outstanding, 0);
 
 	/* Now re-enable MSI-X */
-	if (instance->msix_vectors) {
-		irq_flags = PCI_IRQ_MSIX;
-		if (instance->smp_affinity_enable)
-			irq_flags |= PCI_IRQ_AFFINITY;
+	if (instance->msix_vectors)
+		megasas_alloc_irq_vectors(instance);
+
+	if (!instance->msix_vectors) {
+		rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
+					     PCI_IRQ_LEGACY);
+		if (rval < 0)
+			goto fail_reenable_msix;
 	}
-	rval = pci_alloc_irq_vectors(instance->pdev, 1,
-				     instance->msix_vectors ?
-				     instance->msix_vectors : 1, irq_flags);
-	if (rval < 0)
-		goto fail_reenable_msix;
 
 	megasas_setup_reply_map(instance);
 
@@ -8036,6 +8073,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	dma_addr_t sense_handle;
 	unsigned long *sense_ptr;
 	u32 opcode = 0;
+	int ret = DCMD_SUCCESS;
 
 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
 
@@ -8176,13 +8214,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	 * cmd to the SCSI mid-layer
 	 */
 	cmd->sync_cmd = 1;
-	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
+
+	ret = megasas_issue_blocked_cmd(instance, cmd, 0);
+	switch (ret) {
+	case DCMD_INIT:
+	case DCMD_BUSY:
 		cmd->sync_cmd = 0;
 		dev_err(&instance->pdev->dev,
 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
-			__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
-			cmd->cmd_status_drv);
-		return -EBUSY;
+			 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
+			 cmd->cmd_status_drv);
+			error = -EBUSY;
+			goto out;
 	}
 
 	cmd->sync_cmd = 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index e301458..f3b36fd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -364,6 +364,35 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
 		instance->max_fw_cmds = instance->max_fw_cmds-1;
 	}
 }
+
+static inline void
+megasas_get_msix_index(struct megasas_instance *instance,
+		       struct scsi_cmnd *scmd,
+		       struct megasas_cmd_fusion *cmd,
+		       u8 data_arms)
+{
+	int sdev_busy;
+
+	/* nr_hw_queue = 1 for MegaRAID */
+	struct blk_mq_hw_ctx *hctx =
+		scmd->device->request_queue->queue_hw_ctx[0];
+
+	sdev_busy = atomic_read(&hctx->nr_active);
+
+	if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
+	    sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+					MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+	else if (instance->msix_load_balance)
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+				instance->msix_vectors));
+	else
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			instance->reply_map[raw_smp_processor_id()];
+}
+
 /**
  * megasas_free_cmds_fusion -	Free all the cmds in the free cmd pool
  * @instance:		Adapter soft state
@@ -1312,7 +1341,9 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
 	}
 
 	if (ret == DCMD_TIMEOUT)
-		megaraid_sas_kill_hba(instance);
+		dev_warn(&instance->pdev->dev,
+			 "%s DCMD timed out, continue without JBOD sequence map\n",
+			 __func__);
 
 	if (ret == DCMD_SUCCESS)
 		instance->pd_seq_map_id++;
@@ -1394,7 +1425,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
 		ret = megasas_issue_polled(instance, cmd);
 
 	if (ret == DCMD_TIMEOUT)
-		megaraid_sas_kill_hba(instance);
+		dev_warn(&instance->pdev->dev,
+			 "%s DCMD timed out, RAID map is disabled\n",
+			 __func__);
 
 	megasas_return_cmd(instance, cmd);
 
@@ -2825,19 +2858,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 			fp_possible = (io_info.fpOkForIo > 0) ? true : false;
 	}
 
-	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
-		atomic_read(&scp->device->device_busy) >
-		(io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
-		cmd->request_desc->SCSIIO.MSIxIndex =
-			mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
-				MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
-	else if (instance->msix_load_balance)
-		cmd->request_desc->SCSIIO.MSIxIndex =
-			(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
-				    instance->msix_vectors));
-	else
-		cmd->request_desc->SCSIIO.MSIxIndex =
-			instance->reply_map[raw_smp_processor_id()];
+	megasas_get_msix_index(instance, scp, cmd, io_info.data_arms);
 
 	if (instance->adapter_type >= VENTURA_SERIES) {
 		/* FP for Optimal raid level 1.
@@ -3158,18 +3179,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
 
 	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
 
-	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
-		atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH)
-		cmd->request_desc->SCSIIO.MSIxIndex =
-			mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
-				MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
-	else if (instance->msix_load_balance)
-		cmd->request_desc->SCSIIO.MSIxIndex =
-			(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
-				    instance->msix_vectors));
-	else
-		cmd->request_desc->SCSIIO.MSIxIndex =
-			instance->reply_map[raw_smp_processor_id()];
+	megasas_get_msix_index(instance, scmd, cmd, 1);
 
 	if (!fp_possible) {
 		/* system pd firmware path */
@@ -4219,7 +4229,8 @@ void  megasas_reset_reply_desc(struct megasas_instance *instance)
  * megasas_refire_mgmt_cmd :	Re-fire management commands
  * @instance:				Controller's soft instance
 */
-static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
+void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+			     bool return_ioctl)
 {
 	int j;
 	struct megasas_cmd_fusion *cmd_fusion;
@@ -4283,6 +4294,16 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
 			break;
 		}
 
+		if (return_ioctl && cmd_mfi->sync_cmd &&
+		    cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
+			dev_err(&instance->pdev->dev,
+				"return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n",
+				__func__, __LINE__, cmd_mfi->frame->hdr.cmd,
+				le32_to_cpu(cmd_mfi->frame->dcmd.opcode));
+			cmd_mfi->cmd_status_drv = DCMD_BUSY;
+			result = COMPLETE_CMD;
+		}
+
 		switch (result) {
 		case REFIRE_CMD:
 			megasas_fire_cmd_fusion(instance, req_desc);
@@ -4298,6 +4319,37 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
 }
 
 /*
+ * megasas_return_polled_cmds: Return polled mode commands back to the pool
+ *			       before initiating an OCR.
+ * @instance:                  Controller's soft instance
+ */
+static void
+megasas_return_polled_cmds(struct megasas_instance *instance)
+{
+	int i;
+	struct megasas_cmd_fusion *cmd_fusion;
+	struct fusion_context *fusion;
+	struct megasas_cmd *cmd_mfi;
+
+	fusion = instance->ctrl_context;
+
+	for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) {
+		cmd_fusion = fusion->cmd_list[i];
+		cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+
+		if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
+			if (megasas_dbg_lvl & OCR_DEBUG)
+				dev_info(&instance->pdev->dev,
+					 "%s %d return cmd 0x%x opcode 0x%x\n",
+					 __func__, __LINE__, cmd_mfi->frame->hdr.cmd,
+					 le32_to_cpu(cmd_mfi->frame->dcmd.opcode));
+			cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
+			megasas_return_cmd(instance, cmd_mfi);
+		}
+	}
+}
+
+/*
  * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
  * @instance: per adapter struct
  * @channel: the channel assigned by the OS
@@ -4847,6 +4899,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
 		del_timer_sync(&instance->sriov_heartbeat_timer);
 	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+	set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
 	instance->instancet->disable_intr(instance);
 	megasas_sync_irqs((unsigned long)instance);
@@ -4951,7 +5004,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 				goto kill_hba;
 			}
 
-			megasas_refire_mgmt_cmd(instance);
+			megasas_refire_mgmt_cmd(instance,
+						(i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1)
+							? 1 : 0));
 
 			/* Reset load balance info */
 			if (fusion->load_balance_info)
@@ -4959,8 +5014,16 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 				       (sizeof(struct LD_LOAD_BALANCE_INFO) *
 				       MAX_LOGICAL_DRIVES_EXT));
 
-			if (!megasas_get_map_info(instance))
+			if (!megasas_get_map_info(instance)) {
 				megasas_sync_map_info(instance);
+			} else {
+				/*
+				 * Return pending polled mode cmds before
+				 * retrying OCR
+				 */
+				megasas_return_polled_cmds(instance);
+				continue;
+			}
 
 			megasas_setup_jbod_map(instance);
 
@@ -4987,6 +5050,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 				megasas_set_dynamic_target_properties(sdev, is_target_prop);
 			}
 
+			status_reg = instance->instancet->read_fw_status_reg
+					(instance);
+			abs_state = status_reg & MFI_STATE_MASK;
+			if (abs_state != MFI_STATE_OPERATIONAL) {
+				dev_info(&instance->pdev->dev,
+					 "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n",
+					 abs_state, instance->host->host_no);
+				goto out;
+			}
 			atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
 
 			dev_info(&instance->pdev->dev,
@@ -5046,7 +5118,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 	instance->skip_heartbeat_timer_del = 1;
 	retval = FAILED;
 out:
-	clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+	clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
 	mutex_unlock(&instance->reset_mutex);
 	return retval;
 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index c013c80..d57ecc7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -89,6 +89,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
 
 #define MEGASAS_FP_CMD_LEN	16
 #define MEGASAS_FUSION_IN_RESET 0
+#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
 #define RAID_1_PEER_CMDS 2
 #define JBOD_MAPS_COUNT	2
 #define MEGASAS_REDUCE_QD_COUNT 64
@@ -864,9 +865,20 @@ struct MR_LD_RAID {
 	u8	regTypeReqOnRead;
 	__le16     seqNum;
 
-	struct {
-		u32 ldSyncRequired:1;
-		u32 reserved:31;
+struct {
+#ifndef MFI_BIG_ENDIAN
+	u32 ldSyncRequired:1;
+	u32 regTypeReqOnReadIsValid:1;
+	u32 isEPD:1;
+	u32 enableSLDOnAllRWIOs:1;
+	u32 reserved:28;
+#else
+	u32 reserved:28;
+	u32 enableSLDOnAllRWIOs:1;
+	u32 isEPD:1;
+	u32 regTypeReqOnReadIsValid:1;
+	u32 ldSyncRequired:1;
+#endif
 	} flags;
 
 	u8	LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 18b1e31..ed3923f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -122,6 +122,9 @@
  * 08-28-18  02.00.53  Bumped MPI2_HEADER_VERSION_UNIT.
  *                     Added MPI2_IOCSTATUS_FAILURE
  * 12-17-18  02.00.54  Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-24-19  02.00.55  Bumped MPI2_HEADER_VERSION_UNIT
+ * 08-01-19  02.00.56  Bumped MPI2_HEADER_VERSION_UNIT
+ * 10-02-19  02.00.57  Bumped MPI2_HEADER_VERSION_UNIT
  *  --------------------------------------------------------------------------
  */
 
@@ -162,7 +165,7 @@
 
 
 /* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x36)
+#define MPI2_HEADER_VERSION_UNIT            (0x39)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -181,6 +184,7 @@
 #define MPI2_IOC_STATE_READY               (0x10000000)
 #define MPI2_IOC_STATE_OPERATIONAL         (0x20000000)
 #define MPI2_IOC_STATE_FAULT               (0x40000000)
+#define MPI2_IOC_STATE_COREDUMP            (0x50000000)
 
 #define MPI2_IOC_STATE_MASK                (0xF0000000)
 #define MPI2_IOC_STATE_SHIFT               (28)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 3a6871a..43a3bf8 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -249,6 +249,8 @@
  * 08-28-18  02.00.46  Added NVMs Write Cache flag to IOUnitPage1
  *                     Added DMDReport Delay Time defines to PCIeIOUnitPage1
  * 12-17-18  02.00.47  Swap locations of Slotx2 and Slotx4 in ManPage 7.
+ * 08-01-19  02.00.49  Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID
+ *                     Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT
  */
 
 #ifndef MPI2_CNFG_H
@@ -891,6 +893,8 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
 #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER       (0x00000002)
 #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO                (0x00000001)
 
+#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT        (0x00000020)
+#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID       (0x00000010)
 
 /*
  *Generic structure to use for product-specific manufacturing pages
@@ -962,9 +966,10 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
 
 /* IO Unit Page 1 Flags defines */
 #define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK             (0x00030000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE           (0x00000000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE          (0x00010000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE        (0x00020000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT            (16)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE        (0x00000000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE           (0x00010000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE          (0x00020000)
 #define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK       (0x00004000)
 #define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE  (0x00002000)
 #define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH             (0x00001000)
@@ -3931,7 +3936,13 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
 	U32	MaximumDataTransferSize;	/*0x0C */
 	U32	Capabilities;		/*0x10 */
 	U16	NOIOB;		/* 0x14 */
-	U16	Reserved2;		/* 0x16 */
+	U16     ShutdownLatency;        /* 0x16 */
+	U16     VendorID;               /* 0x18 */
+	U16     DeviceID;               /* 0x1A */
+	U16     SubsystemVendorID;      /* 0x1C */
+	U16     SubsystemID;            /* 0x1E */
+	U8      RevisionID;             /* 0x20 */
+	U8      Reserved21[3];          /* 0x21 */
 } MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
 	Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
 
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
index a3f6778..33b9c3a 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -19,6 +19,10 @@
  * 09-07-18  02.06.03  Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
  * 12-17-18  02.06.04  Addd MPI2_EXT_IMAGE_TYPE_PBLP
  *			Shorten some defines to be compatible with DOS
+ * 06-24-19  02.06.05  Whitespace adjustments to help with identifier
+ *			checking tool.
+ * 10-02-19  02.06.06  Added MPI26_IMAGE_HEADER_SIG1_COREDUMP
+ *                     Added MPI2_FLASH_REGION_COREDUMP
  */
 #ifndef MPI2_IMAGE_H
 #define MPI2_IMAGE_H
@@ -213,6 +217,8 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
 #define MPI26_IMAGE_HEADER_SIG1_NVDATA                   (0x5444564E)
 #define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE                (0x20534147)
 #define MPI26_IMAGE_HEADER_SIG1_PBLP                     (0x504C4250)
+/* little-endian "DUMP" */
+#define MPI26_IMAGE_HEADER_SIG1_COREDUMP                 (0x504D5544)
 
 /**** Definitions for Signature2 field ****/
 #define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE                    (0x50584546)
@@ -359,6 +365,7 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
 #define MPI2_FLASH_REGION_MR_NVDATA             (0x14)
 #define MPI2_FLASH_REGION_CPLD                  (0x15)
 #define MPI2_FLASH_REGION_PSOC                  (0x16)
+#define MPI2_FLASH_REGION_COREDUMP              (0x17)
 
 /*ImageRevision */
 #define MPI2_FLASH_LAYOUT_IMAGE_REVISION        (0x00)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 68ea408..e83c7c5 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -175,6 +175,10 @@
  *                     Moved FW image definitions ionto new mpi2_image,h
  * 08-14-18   02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
  * 09-07-18   02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
+ * 10-02-19   02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE
+ *                     Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED
+ *                     Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP
+ *                     Added MPI2_FW_UPLOAD_ITYPE_COREDUMP
  * --------------------------------------------------------------------------
  */
 
@@ -248,6 +252,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
 
 /*ConfigurationFlags */
 #define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT  (0x0001)
+#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE  (0x0002)
 
 /*minimum depth for a Reply Descriptor Post Queue */
 #define MPI2_RDPQ_DEPTH_MIN                     (16)
@@ -377,6 +382,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
 /*ProductID field uses MPI2_FW_HEADER_PID_ */
 
 /*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED      (0x00200000)
 #define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV            (0x00100000)
 #define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ            (0x00080000)
 #define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE     (0x00040000)
@@ -1458,8 +1464,8 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
 /*MPI v2.6 and newer */
 #define MPI2_FW_DOWNLOAD_ITYPE_CPLD                 (0x15)
 #define MPI2_FW_DOWNLOAD_ITYPE_PSOC                 (0x16)
+#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP             (0x17)
 #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
-#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE            (0xFF)
 
 /*MPI v2.0 FWDownload TransactionContext Element */
 typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 45fd8df..663782b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -124,7 +124,14 @@ enum mpt3sas_perf_mode {
 };
 
 static int
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
+		u32 ioc_state, int timeout);
+static int
 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
+static void
+_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
+static void
+_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
 
 /**
  * mpt3sas_base_check_cmd_timeout - Function
@@ -609,7 +616,8 @@ _base_fault_reset_work(struct work_struct *work)
 
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	if (ioc->shost_recovery || ioc->pci_error_recovery)
+	if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
+			ioc->pci_error_recovery)
 		goto rearm_timer;
 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
@@ -656,20 +664,64 @@ _base_fault_reset_work(struct work_struct *work)
 		return; /* don't rearm timer */
 	}
 
-	ioc->non_operational_loop = 0;
+	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
+		u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
+		    ioc->manu_pg11.CoreDumpTOSec :
+		    MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
 
+		timeout /= (FAULT_POLLING_INTERVAL/1000);
+
+		if (ioc->ioc_coredump_loop == 0) {
+			mpt3sas_print_coredump_info(ioc,
+			    doorbell & MPI2_DOORBELL_DATA_MASK);
+			/* do not accept any IOs and disable the interrupts */
+			spin_lock_irqsave(
+			    &ioc->ioc_reset_in_progress_lock, flags);
+			ioc->shost_recovery = 1;
+			spin_unlock_irqrestore(
+			    &ioc->ioc_reset_in_progress_lock, flags);
+			_base_mask_interrupts(ioc);
+			_base_clear_outstanding_commands(ioc);
+		}
+
+		ioc_info(ioc, "%s: CoreDump loop %d.",
+		    __func__, ioc->ioc_coredump_loop);
+
+		/* Wait until CoreDump completes or times out */
+		if (ioc->ioc_coredump_loop++ < timeout) {
+			spin_lock_irqsave(
+			    &ioc->ioc_reset_in_progress_lock, flags);
+			goto rearm_timer;
+		}
+	}
+
+	if (ioc->ioc_coredump_loop) {
+		if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
+			ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
+			    __func__, ioc->ioc_coredump_loop);
+		else
+			ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
+			    __func__, ioc->ioc_coredump_loop);
+		ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
+	}
+	ioc->non_operational_loop = 0;
 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 		ioc_warn(ioc, "%s: hard reset: %s\n",
 			 __func__, rc == 0 ? "success" : "failed");
 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
-		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
-			mpt3sas_base_fault_info(ioc, doorbell &
+		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+			mpt3sas_print_fault_code(ioc, doorbell &
+			    MPI2_DOORBELL_DATA_MASK);
+		} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+		    MPI2_IOC_STATE_COREDUMP)
+			mpt3sas_print_coredump_info(ioc, doorbell &
 			    MPI2_DOORBELL_DATA_MASK);
 		if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
 		    MPI2_IOC_STATE_OPERATIONAL)
 			return; /* don't rearm timer */
 	}
+	ioc->ioc_coredump_loop = 0;
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  rearm_timer:
@@ -749,6 +801,49 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
 }
 
 /**
+ * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
+{
+	ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
+}
+
+/**
+ * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
+ * completes or times out
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
+		const char *caller)
+{
+	u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
+			ioc->manu_pg11.CoreDumpTOSec :
+			MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
+
+	int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
+					timeout);
+
+	if (ioc_state)
+		ioc_err(ioc,
+		    "%s: CoreDump timed out. (ioc_state=0x%x)\n",
+		    caller, ioc_state);
+	else
+		ioc_info(ioc,
+		    "%s: CoreDump completed. (ioc_state=0x%x)\n",
+		    caller, ioc_state);
+
+	return ioc_state;
+}
+
+/**
  * mpt3sas_halt_firmware - halt's mpt controller firmware
  * @ioc: per adapter object
  *
@@ -768,9 +863,14 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
 	dump_stack();
 
 	doorbell = ioc->base_readl(&ioc->chip->Doorbell);
-	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
-		mpt3sas_base_fault_info(ioc , doorbell);
-	else {
+	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+		mpt3sas_print_fault_code(ioc, doorbell &
+		    MPI2_DOORBELL_DATA_MASK);
+	} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+	    MPI2_IOC_STATE_COREDUMP) {
+		mpt3sas_print_coredump_info(ioc, doorbell &
+		    MPI2_DOORBELL_DATA_MASK);
+	} else {
 		writel(0xC0FFEE00, &ioc->chip->Doorbell);
 		ioc_err(ioc, "Firmware is halted due to command timeout\n");
 	}
@@ -3103,6 +3203,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 	 */
 	if (!ioc->combined_reply_queue &&
 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+		ioc_info(ioc,
+		    "combined ReplyQueue is off, Enabling msix load balance\n");
 		ioc->msix_load_balance = true;
 	}
 
@@ -3115,9 +3217,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 
 	r = _base_alloc_irq_vectors(ioc);
 	if (r < 0) {
-		dfailprintk(ioc,
-			    ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
-				     r));
+		ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
 		goto try_ioapic;
 	}
 
@@ -3206,9 +3306,15 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
 	dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
 
 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
-		mpt3sas_base_fault_info(ioc, ioc_state &
+		mpt3sas_print_fault_code(ioc, ioc_state &
 		    MPI2_DOORBELL_DATA_MASK);
 		rc = _base_diag_reset(ioc);
+	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+	    MPI2_IOC_STATE_COREDUMP) {
+		mpt3sas_print_coredump_info(ioc, ioc_state &
+		     MPI2_DOORBELL_DATA_MASK);
+		mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+		rc = _base_diag_reset(ioc);
 	}
 
 	return rc;
@@ -3279,7 +3385,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 	}
 
 	if (ioc->chip == NULL) {
-		ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
+		ioc_err(ioc,
+		    "unable to map adapter memory! or resource not found\n");
 		r = -EINVAL;
 		goto out_fail;
 	}
@@ -3318,8 +3425,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 		     ioc->combined_reply_index_count,
 		     sizeof(resource_size_t *), GFP_KERNEL);
 		if (!ioc->replyPostRegisterIndex) {
-			dfailprintk(ioc,
-				    ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
+			ioc_err(ioc,
+			    "allocation for replyPostRegisterIndex failed!\n");
 			r = -ENOMEM;
 			goto out_fail;
 		}
@@ -3467,6 +3574,22 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
+ * _base_sdev_nr_inflight_request -get number of inflight requests
+ *				   of a request queue.
+ * @q: request_queue object
+ *
+ * returns number of inflight request of a request queue.
+ */
+inline unsigned long
+_base_sdev_nr_inflight_request(struct request_queue *q)
+{
+	struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
+
+	return atomic_read(&hctx->nr_active);
+}
+
+
+/**
  * _base_get_high_iops_msix_index - get the msix index of
  *				high iops queues
  * @ioc: per adapter object
@@ -3485,7 +3608,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
 	 * reply queues in terms of batch count 16 when outstanding
 	 * IOs on the target device is >=8.
 	 */
-	if (atomic_read(&scmd->device->device_busy) >
+	if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
 	    MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
 		return base_mod64((
 		    atomic64_add_return(1, &ioc->high_iops_outstanding) /
@@ -4264,7 +4387,8 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
 	fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
 			&fwpkg_data_dma, GFP_KERNEL);
 	if (!fwpkg_data) {
-		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+		ioc_err(ioc,
+		    "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
 			__FILE__, __LINE__, __func__);
 		return -ENOMEM;
 	}
@@ -4994,12 +5118,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
 	}
 
-	dinitprintk(ioc,
-		    ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
-			     ioc->max_sges_in_main_message,
-			     ioc->max_sges_in_chain_message,
-			     ioc->shost->sg_tablesize,
-			     ioc->chains_needed_per_io));
+	ioc_info(ioc,
+	    "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
+	    "sge_per_io(%d), chains_per_io(%d)\n",
+	    ioc->max_sges_in_main_message,
+	    ioc->max_sges_in_chain_message,
+	    ioc->shost->sg_tablesize,
+	    ioc->chains_needed_per_io);
 
 	/* reply post queue, 16 byte align */
 	reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -5109,15 +5234,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
 	    ioc->request_sz);
 
-	dinitprintk(ioc,
-		    ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-			     ioc->request, ioc->hba_queue_depth,
-			     ioc->request_sz,
-			     (ioc->hba_queue_depth * ioc->request_sz) / 1024));
+	ioc_info(ioc,
+	    "request pool(0x%p) - dma(0x%llx): "
+	    "depth(%d), frame_size(%d), pool_size(%d kB)\n",
+	    ioc->request, (unsigned long long) ioc->request_dma,
+	    ioc->hba_queue_depth, ioc->request_sz,
+	    (ioc->hba_queue_depth * ioc->request_sz) / 1024);
 
-	dinitprintk(ioc,
-		    ioc_info(ioc, "request pool: dma(0x%llx)\n",
-			     (unsigned long long)ioc->request_dma));
 	total_sz += sz;
 
 	dinitprintk(ioc,
@@ -5302,13 +5425,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 			goto out;
 		}
 	}
-	dinitprintk(ioc,
-		    ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
-			     ioc->sense, ioc->scsiio_depth,
-			     SCSI_SENSE_BUFFERSIZE, sz / 1024));
-	dinitprintk(ioc,
-		    ioc_info(ioc, "sense_dma(0x%llx)\n",
-			     (unsigned long long)ioc->sense_dma));
+	ioc_info(ioc,
+	    "sense pool(0x%p)- dma(0x%llx): depth(%d),"
+	    "element_size(%d), pool_size(%d kB)\n",
+	    ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
+	    SCSI_SENSE_BUFFERSIZE, sz / 1024);
+
 	total_sz += sz;
 
 	/* reply pool, 4 byte align */
@@ -5386,12 +5508,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 		ioc_err(ioc, "config page: dma_pool_alloc failed\n");
 		goto out;
 	}
-	dinitprintk(ioc,
-		    ioc_info(ioc, "config page(0x%p): size(%d)\n",
-			     ioc->config_page, ioc->config_page_sz));
-	dinitprintk(ioc,
-		    ioc_info(ioc, "config_page_dma(0x%llx)\n",
-			     (unsigned long long)ioc->config_page_dma));
+
+	ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
+	    ioc->config_page, (unsigned long long)ioc->config_page_dma,
+	    ioc->config_page_sz);
 	total_sz += ioc->config_page_sz;
 
 	ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
@@ -5446,6 +5566,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
 			return 0;
 		if (count && current_state == MPI2_IOC_STATE_FAULT)
 			break;
+		if (count && current_state == MPI2_IOC_STATE_COREDUMP)
+			break;
 
 		usleep_range(1000, 1500);
 		count++;
@@ -5547,7 +5669,12 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
 			doorbell = ioc->base_readl(&ioc->chip->Doorbell);
 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
 			    MPI2_IOC_STATE_FAULT) {
-				mpt3sas_base_fault_info(ioc , doorbell);
+				mpt3sas_print_fault_code(ioc, doorbell);
+				return -EFAULT;
+			}
+			if ((doorbell & MPI2_IOC_STATE_MASK) ==
+			    MPI2_IOC_STATE_COREDUMP) {
+				mpt3sas_print_coredump_info(ioc, doorbell);
 				return -EFAULT;
 			}
 		} else if (int_status == 0xFFFFFFFF)
@@ -5609,6 +5736,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
 {
 	u32 ioc_state;
 	int r = 0;
+	unsigned long flags;
 
 	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
 		ioc_err(ioc, "%s: unknown reset_type\n", __func__);
@@ -5627,6 +5755,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
 		r = -EFAULT;
 		goto out;
 	}
+
 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
 	if (ioc_state) {
 		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -5635,6 +5764,26 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
 		goto out;
 	}
  out:
+	if (r != 0) {
+		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+		spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+		/*
+		 * Wait for IOC state CoreDump to clear only during
+		 * HBA initialization & release time.
+		 */
+		if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+		    MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
+		    ioc->fault_reset_work_q == NULL)) {
+			spin_unlock_irqrestore(
+			    &ioc->ioc_reset_in_progress_lock, flags);
+			mpt3sas_print_coredump_info(ioc, ioc_state);
+			mpt3sas_base_wait_for_coredump_completion(ioc,
+			    __func__);
+			spin_lock_irqsave(
+			    &ioc->ioc_reset_in_progress_lock, flags);
+		}
+		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+	}
 	ioc_info(ioc, "message unit reset: %s\n",
 		 r == 0 ? "SUCCESS" : "FAILED");
 	return r;
@@ -5782,7 +5931,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 		mfp = (__le32 *)reply;
 		pr_info("\toffset:data\n");
 		for (i = 0; i < reply_bytes/4; i++)
-			pr_info("\t[0x%02x]:%08x\n", i*4,
+			ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
 			    le32_to_cpu(mfp[i]));
 	}
 	return 0;
@@ -5850,10 +5999,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
 	    ioc->ioc_link_reset_in_progress)
 		ioc->ioc_link_reset_in_progress = 0;
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-		issue_reset =
-			mpt3sas_base_check_cmd_timeout(ioc,
-				ioc->base_cmds.status, mpi_request,
-				sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+		mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
+		    mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
+		    issue_reset);
 		goto issue_host_reset;
 	}
 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5926,10 +6074,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-		issue_reset =
-			mpt3sas_base_check_cmd_timeout(ioc,
-				ioc->base_cmds.status, mpi_request,
-				sizeof(Mpi2SepRequest_t)/4);
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->base_cmds.status, mpi_request,
+		    sizeof(Mpi2SepRequest_t)/4, issue_reset);
 		goto issue_host_reset;
 	}
 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -6028,9 +6175,15 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
 	}
 
 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
-		mpt3sas_base_fault_info(ioc, ioc_state &
+		mpt3sas_print_fault_code(ioc, ioc_state &
 		    MPI2_DOORBELL_DATA_MASK);
 		goto issue_diag_reset;
+	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+	    MPI2_IOC_STATE_COREDUMP) {
+		ioc_info(ioc,
+		    "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
+		    __func__, ioc_state);
+		return -EFAULT;
 	}
 
 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
@@ -6209,6 +6362,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
 		    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
 	}
 
+	/*
+	 * Set the flag to enable CoreDump state feature in IOC firmware.
+	 */
+	mpi_request.ConfigurationFlags |=
+	    cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
+
 	/* This time stamp specifies number of milliseconds
 	 * since epoch ~ midnight January 1, 1970.
 	 */
@@ -6220,9 +6379,9 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
 		int i;
 
 		mfp = (__le32 *)&mpi_request;
-		pr_info("\toffset:data\n");
+		ioc_info(ioc, "\toffset:data\n");
 		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
-			pr_info("\t[0x%02x]:%08x\n", i*4,
+			ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
 			    le32_to_cpu(mfp[i]));
 	}
 
@@ -6592,8 +6751,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 		/* wait 100 msec */
 		msleep(100);
 
-		if (count++ > 20)
+		if (count++ > 20) {
+			ioc_info(ioc,
+			    "Stop writing magic sequence after 20 retries\n");
 			goto out;
+		}
 
 		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
 		drsprintk(ioc,
@@ -6617,8 +6779,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 
 		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
 
-		if (host_diagnostic == 0xFFFFFFFF)
+		if (host_diagnostic == 0xFFFFFFFF) {
+			ioc_info(ioc,
+			    "Invalid host diagnostic register value\n");
 			goto out;
+		}
 		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
 			break;
 
@@ -6705,16 +6870,33 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
 		return 0;
 
 	if (ioc_state & MPI2_DOORBELL_USED) {
-		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
+		ioc_info(ioc, "unexpected doorbell active!\n");
 		goto issue_diag_reset;
 	}
 
 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
-		mpt3sas_base_fault_info(ioc, ioc_state &
+		mpt3sas_print_fault_code(ioc, ioc_state &
 		    MPI2_DOORBELL_DATA_MASK);
 		goto issue_diag_reset;
 	}
 
+	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
+		/*
+		 * if host reset is invoked while watch dog thread is waiting
+		 * for IOC state to be changed to Fault state then driver has
+		 * to wait here for CoreDump state to clear otherwise reset
+		 * will be issued to the FW and FW move the IOC state to
+		 * reset state without copying the FW logs to coredump region.
+		 */
+		if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
+			mpt3sas_print_coredump_info(ioc, ioc_state &
+			    MPI2_DOORBELL_DATA_MASK);
+			mpt3sas_base_wait_for_coredump_completion(ioc,
+			    __func__);
+		}
+		goto issue_diag_reset;
+	}
+
 	if (type == FORCE_BIG_HAMMER)
 		goto issue_diag_reset;
 
@@ -6958,8 +7140,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
 	ioc->reply_queue_count = 1;
 	if (!ioc->cpu_msix_table) {
-		dfailprintk(ioc,
-			    ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
+		ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
 		r = -ENOMEM;
 		goto out_free_resources;
 	}
@@ -6968,8 +7149,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
 		    sizeof(resource_size_t *), GFP_KERNEL);
 		if (!ioc->reply_post_host_index) {
-			dfailprintk(ioc,
-				    ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
+			ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
 			r = -ENOMEM;
 			goto out_free_resources;
 		}
@@ -7195,6 +7375,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 	    sizeof(struct mpt3sas_facts));
 
 	ioc->non_operational_loop = 0;
+	ioc->ioc_coredump_loop = 0;
 	ioc->got_task_abort_from_ioctl = 0;
 	return 0;
 
@@ -7276,14 +7457,14 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * _base_after_reset_handler - after reset handler
+ * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
  * @ioc: per adapter object
  */
-static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+static void
+_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
 {
-	mpt3sas_scsih_after_reset_handler(ioc);
-	mpt3sas_ctl_after_reset_handler(ioc);
-	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+	dtmprintk(ioc,
+	    ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
 	if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
 		ioc->transport_cmds.status |= MPT3_CMD_RESET;
 		mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -7317,6 +7498,17 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
+ * _base_clear_outstanding_commands - clear all outstanding commands
+ * @ioc: per adapter object
+ */
+static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
+{
+	mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
+	mpt3sas_ctl_clear_outstanding_ioctls(ioc);
+	_base_clear_outstanding_mpt_commands(ioc);
+}
+
+/**
  * _base_reset_done_handler - reset done handler
  * @ioc: per adapter object
  */
@@ -7474,7 +7666,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
 	    MPT3_DIAG_BUFFER_IS_RELEASED))) {
 		is_trigger = 1;
 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
-		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
+		    (ioc_state & MPI2_IOC_STATE_MASK) ==
+		    MPI2_IOC_STATE_COREDUMP)
 			is_fault = 1;
 	}
 	_base_pre_reset_handler(ioc);
@@ -7483,7 +7677,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
 	r = _base_make_ioc_ready(ioc, type);
 	if (r)
 		goto out;
-	_base_after_reset_handler(ioc);
+	_base_clear_outstanding_commands(ioc);
 
 	/* If this hard reset is called while port enable is active, then
 	 * there is no reason to call make_ioc_operational
@@ -7514,9 +7708,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
 		_base_reset_done_handler(ioc);
 
  out:
-	dtmprintk(ioc,
-		  ioc_info(ioc, "%s: %s\n",
-			   __func__, r == 0 ? "SUCCESS" : "FAILED"));
+	ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 	ioc->shost_recovery = 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 4ebf81e..e719715 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
 #define MPT3SAS_DRIVER_NAME		"mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION	"LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION		"32.100.00.00"
-#define MPT3SAS_MAJOR_VERSION		32
+#define MPT3SAS_DRIVER_VERSION		"33.100.00.00"
+#define MPT3SAS_MAJOR_VERSION		33
 #define MPT3SAS_MINOR_VERSION		100
 #define MPT3SAS_BUILD_VERSION		0
 #define MPT3SAS_RELEASE_VERSION	00
@@ -90,6 +90,10 @@
 #define MPT2SAS_BUILD_VERSION		0
 #define MPT2SAS_RELEASE_VERSION	00
 
+/* CoreDump: Default timeout */
+#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS	(15) /*15 seconds*/
+#define MPT3SAS_COREDUMP_LOOP_DONE                     (0xFF)
+
 /*
  * Set MPT3SAS_SG_DEPTH value based on user input.
  */
@@ -140,6 +144,7 @@
 #define MAX_CHAIN_ELEMT_SZ		16
 #define DEFAULT_NUM_FWCHAIN_ELEMTS	8
 
+#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6
 #define FW_IMG_HDR_READ_TIMEOUT	15
 
 #define IOC_OPERATIONAL_WAIT_COUNT	10
@@ -398,7 +403,10 @@ struct Mpi2ManufacturingPage11_t {
 	u8	HostTraceBufferFlags;		/* 4Fh */
 	u16	HostTraceBufferMaxSizeKB;	/* 50h */
 	u16	HostTraceBufferMinSizeKB;	/* 52h */
-	__le32	Reserved10[2];			/* 54h - 5Bh */
+	u8	CoreDumpTOSec;			/* 54h */
+	u8	Reserved8;			/* 55h */
+	u16	Reserved9;			/* 56h */
+	__le32	Reserved10;			/* 58h */
 };
 
 /**
@@ -589,6 +597,7 @@ static inline void sas_device_put(struct _sas_device *s)
  * @connector_name: ASCII value of the Connector's name
  * @serial_number: pointer of serial number string allocated runtime
  * @access_status: Device's Access Status
+ * @shutdown_latency: NVMe device's RTD3 Entry Latency
  * @refcount: reference count for deletion
  */
 struct _pcie_device {
@@ -611,6 +620,7 @@ struct _pcie_device {
 	u8	*serial_number;
 	u8	reset_timeout;
 	u8	access_status;
+	u16	shutdown_latency;
 	struct kref refcount;
 };
 /**
@@ -1045,6 +1055,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
  * @cpu_msix_table: table for mapping cpus to msix index
  * @cpu_msix_table_sz: table size
  * @total_io_cnt: Gives total IO count, used to load balance the interrupts
+ * @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state
  * @high_iops_outstanding: used to load balance the interrupts
  *				within high iops reply queues
  * @msix_load_balance: Enables load balancing of interrupts across
@@ -1073,6 +1084,10 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
  * @event_context: unique id for each logged event
  * @event_log: event log pointer
  * @event_masks: events that are masked
+ * @max_shutdown_latency: timeout value for NVMe shutdown operation,
+ *			which is equal that NVMe drive's RTD3 Entry Latency
+ *			which has reported maximum RTD3 Entry Latency value
+ *			among attached NVMe drives.
  * @facts: static facts data
  * @prev_fw_facts: previous fw facts data
  * @pfacts: static port facts data
@@ -1231,6 +1246,7 @@ struct MPT3SAS_ADAPTER {
 	u32		ioc_reset_count;
 	MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
 	u32             non_operational_loop;
+	u8              ioc_coredump_loop;
 	atomic64_t      total_io_cnt;
 	atomic64_t	high_iops_outstanding;
 	bool            msix_load_balance;
@@ -1283,7 +1299,7 @@ struct MPT3SAS_ADAPTER {
 
 	u8		tm_custom_handling;
 	u8		nvme_abort_timeout;
-
+	u16		max_shutdown_latency;
 
 	/* static config pages */
 	struct mpt3sas_facts facts;
@@ -1531,6 +1547,17 @@ void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc,
 u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked);
 
 void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code);
+#define mpt3sas_print_fault_code(ioc, fault_code) \
+do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
+	mpt3sas_base_fault_info(ioc, fault_code); } while (0)
+
+void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code);
+#define mpt3sas_print_coredump_info(ioc, fault_code) \
+do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
+	mpt3sas_base_coredump_info(ioc, fault_code); } while (0)
+
+int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
+		const char *caller);
 int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
 	Mpi2SasIoUnitControlReply_t *mpi_reply,
 	Mpi2SasIoUnitControlRequest_t *mpi_request);
@@ -1552,6 +1579,11 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
 
 u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
 	u8 status, void *mpi_request, int sz);
+#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \
+do {	ioc_err(ioc, "In func: %s\n", __func__); \
+	issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \
+	status, mpi_request, sz); } while (0)
+
 int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
 
 /* scsih shared API */
@@ -1560,7 +1592,8 @@ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
 u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
 	u32 reply);
 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
-void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(
+	struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
 
 int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
@@ -1694,7 +1727,7 @@ void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
 u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 	u32 reply);
 void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
-void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
 u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
 	u8 msix_index, u32 reply);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 14a1a27..62ddf53 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -101,9 +101,6 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	Mpi2ConfigRequest_t *mpi_request;
 	char *desc = NULL;
 
-	if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
-		return;
-
 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 	switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
 	case MPI2_CONFIG_PAGETYPE_IO_UNIT:
@@ -269,7 +266,8 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 		    mpi_reply->MsgLength*4);
 	}
 	ioc->config_cmds.status &= ~MPT3_CMD_PENDING;
-	_config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+	if (ioc->logging_level & MPT_DEBUG_CONFIG)
+		_config_display_some_debug(ioc, smid, "config_done", mpi_reply);
 	ioc->config_cmds.smid = USHRT_MAX;
 	complete(&ioc->config_cmds.done);
 	return 1;
@@ -305,6 +303,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 	u8 retry_count, issue_host_reset = 0;
 	struct config_request mem;
 	u32 ioc_status = UINT_MAX;
+	u8 issue_reset = 0;
 
 	mutex_lock(&ioc->config_cmds.mutex);
 	if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
@@ -378,14 +377,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 	config_request = mpt3sas_base_get_msg_frame(ioc, smid);
 	ioc->config_cmds.smid = smid;
 	memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
-	_config_display_some_debug(ioc, smid, "config_request", NULL);
+	if (ioc->logging_level & MPT_DEBUG_CONFIG)
+		_config_display_some_debug(ioc, smid, "config_request", NULL);
 	init_completion(&ioc->config_cmds.done);
 	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
 	if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
-		mpt3sas_base_check_cmd_timeout(ioc,
-			ioc->config_cmds.status, mpi_request,
-			sizeof(Mpi2ConfigRequest_t)/4);
+		if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+			_config_display_some_debug(ioc,
+			    smid, "config_request", NULL);
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->config_cmds.status, mpi_request,
+		    sizeof(Mpi2ConfigRequest_t)/4, issue_reset);
 		retry_count++;
 		if (ioc->config_cmds.smid == smid)
 			mpt3sas_base_free_smid(ioc, smid);
@@ -404,8 +407,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 		/* Reply Frame Sanity Checks to workaround FW issues */
 		if ((mpi_request->Header.PageType & 0xF) !=
 		    (mpi_reply->Header.PageType & 0xF)) {
+			if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+				_config_display_some_debug(ioc,
+				    smid, "config_request", NULL);
 			_debug_dump_mf(mpi_request, ioc->request_sz/4);
-			_debug_dump_reply(mpi_reply, ioc->request_sz/4);
+			_debug_dump_reply(mpi_reply, ioc->reply_sz/4);
 			panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
 			      ioc->name, __func__,
 			      mpi_request->Header.PageType & 0xF,
@@ -415,8 +421,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 		if (((mpi_request->Header.PageType & 0xF) ==
 		    MPI2_CONFIG_PAGETYPE_EXTENDED) &&
 		    mpi_request->ExtPageType != mpi_reply->ExtPageType) {
+			if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+				_config_display_some_debug(ioc,
+				    smid, "config_request", NULL);
 			_debug_dump_mf(mpi_request, ioc->request_sz/4);
-			_debug_dump_reply(mpi_reply, ioc->request_sz/4);
+			_debug_dump_reply(mpi_reply, ioc->reply_sz/4);
 			panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
 			      ioc->name, __func__,
 			      mpi_request->ExtPageType,
@@ -439,8 +448,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 		if (p) {
 			if ((mpi_request->Header.PageType & 0xF) !=
 			    (p[3] & 0xF)) {
+				if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+					_config_display_some_debug(ioc,
+					    smid, "config_request", NULL);
 				_debug_dump_mf(mpi_request, ioc->request_sz/4);
-				_debug_dump_reply(mpi_reply, ioc->request_sz/4);
+				_debug_dump_reply(mpi_reply, ioc->reply_sz/4);
 				_debug_dump_config(p, min_t(u16, mem.sz,
 				    config_page_sz)/4);
 				panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
@@ -452,8 +464,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 			if (((mpi_request->Header.PageType & 0xF) ==
 			    MPI2_CONFIG_PAGETYPE_EXTENDED) &&
 			    (mpi_request->ExtPageType != p[6])) {
+				if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+					_config_display_some_debug(ioc,
+					    smid, "config_request", NULL);
 				_debug_dump_mf(mpi_request, ioc->request_sz/4);
-				_debug_dump_reply(mpi_reply, ioc->request_sz/4);
+				_debug_dump_reply(mpi_reply, ioc->reply_sz/4);
 				_debug_dump_config(p, min_t(u16, mem.sz,
 				    config_page_sz)/4);
 				panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 6874cf0..62e5528 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -180,6 +180,12 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
 		desc = "smp_passthrough";
 		break;
+	case MPI2_FUNCTION_TOOLBOX:
+		desc = "toolbox";
+		break;
+	case MPI2_FUNCTION_NVME_ENCAPSULATED:
+		desc = "nvme_encapsulated";
+		break;
 	}
 
 	if (!desc)
@@ -478,14 +484,15 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd.
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
  */
-void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
 {
-	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+	dtmprintk(ioc,
+	    ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__));
 	if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
 		ioc->ctl_cmds.status |= MPT3_CMD_RESET;
 		mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -1021,10 +1028,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 		ioc->ignore_loginfos = 0;
 	}
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
-		issue_reset =
-			mpt3sas_base_check_cmd_timeout(ioc,
-				ioc->ctl_cmds.status, mpi_request,
-				karg.data_sge_offset);
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->ctl_cmds.status, mpi_request,
+		    karg.data_sge_offset, issue_reset);
 		goto issue_host_reset;
 	}
 
@@ -1325,7 +1331,8 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 				 __func__));
 
 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-	ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
+	ioc_info(ioc,
+	    "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
 	return 0;
 }
 
@@ -1733,10 +1740,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
-		issue_reset =
-			mpt3sas_base_check_cmd_timeout(ioc,
-				ioc->ctl_cmds.status, mpi_request,
-				sizeof(Mpi2DiagBufferPostRequest_t)/4);
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->ctl_cmds.status, mpi_request,
+		    sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
 		goto issue_host_reset;
 	}
 
@@ -2108,6 +2114,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
 	u16 ioc_status;
 	u32 ioc_state;
 	int rc;
+	u8 reset_needed = 0;
 
 	dctlprintk(ioc, ioc_info(ioc, "%s\n",
 				 __func__));
@@ -2115,6 +2122,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
 	rc = 0;
 	*issue_reset = 0;
 
+
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
 		if (ioc->diag_buffer_status[buffer_type] &
@@ -2157,9 +2165,10 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
-		*issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
-				ioc->ctl_cmds.status, mpi_request,
-				sizeof(Mpi2DiagReleaseRequest_t)/4);
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->ctl_cmds.status, mpi_request,
+		    sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
+		 *issue_reset = reset_needed;
 		rc = -EFAULT;
 		goto out;
 	}
@@ -2417,10 +2426,9 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
-		issue_reset =
-			mpt3sas_base_check_cmd_timeout(ioc,
-				ioc->ctl_cmds.status, mpi_request,
-				sizeof(Mpi2DiagBufferPostRequest_t)/4);
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->ctl_cmds.status, mpi_request,
+		    sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
 		goto issue_host_reset;
 	}
 
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a038be8..c597d54 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1050,6 +1050,34 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 }
 
 /**
+ * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
+ * @ioc: per adapter object
+ * Context: This function will acquire ioc->pcie_device_lock
+ *
+ * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
+ * which has reported maximum among all available NVMe drives.
+ * Minimum max_shutdown_latency will be six seconds.
+ */
+static void
+_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
+{
+	struct _pcie_device *pcie_device;
+	unsigned long flags;
+	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
+
+	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+		if (pcie_device->shutdown_latency) {
+			if (shutdown_latency < pcie_device->shutdown_latency)
+				shutdown_latency =
+					pcie_device->shutdown_latency;
+		}
+	}
+	ioc->max_shutdown_latency = shutdown_latency;
+	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
  * _scsih_pcie_device_remove - remove pcie_device from list.
  * @ioc: per adapter object
  * @pcie_device: the pcie_device object
@@ -1063,6 +1091,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
 {
 	unsigned long flags;
 	int was_on_pcie_device_list = 0;
+	u8 update_latency = 0;
 
 	if (!pcie_device)
 		return;
@@ -1082,11 +1111,21 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
 		list_del_init(&pcie_device->list);
 		was_on_pcie_device_list = 1;
 	}
+	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+		update_latency = 1;
 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 	if (was_on_pcie_device_list) {
 		kfree(pcie_device->serial_number);
 		pcie_device_put(pcie_device);
 	}
+
+	/*
+	 * This device's RTD3 Entry Latency matches IOC's
+	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+	 * from the available drives as current drive is getting removed.
+	 */
+	if (update_latency)
+		_scsih_set_nvme_max_shutdown_latency(ioc);
 }
 
 
@@ -1101,6 +1140,7 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 	struct _pcie_device *pcie_device;
 	unsigned long flags;
 	int was_on_pcie_device_list = 0;
+	u8 update_latency = 0;
 
 	if (ioc->shost_recovery)
 		return;
@@ -1113,12 +1153,22 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 			was_on_pcie_device_list = 1;
 			pcie_device_put(pcie_device);
 		}
+		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+			update_latency = 1;
 	}
 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 	if (was_on_pcie_device_list) {
 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
 		pcie_device_put(pcie_device);
 	}
+
+	/*
+	 * This device's RTD3 Entry Latency matches IOC's
+	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+	 * from the available drives as current drive is getting removed.
+	 */
+	if (update_latency)
+		_scsih_set_nvme_max_shutdown_latency(ioc);
 }
 
 /**
@@ -1554,7 +1604,12 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 		max_depth = 1;
 	if (qdepth > max_depth)
 		qdepth = max_depth;
-	return scsi_change_queue_depth(sdev, qdepth);
+	scsi_change_queue_depth(sdev, qdepth);
+	sdev_printk(KERN_INFO, sdev,
+	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
+	    sdev->queue_depth, sdev->tagged_supported,
+	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
+	return sdev->queue_depth;
 }
 
 /**
@@ -2673,6 +2728,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
 	u16 smid = 0;
 	u32 ioc_state;
 	int rc;
+	u8 issue_reset = 0;
 
 	lockdep_assert_held(&ioc->tm_cmds.mutex);
 
@@ -2695,7 +2751,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
 	}
 
 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
-		mpt3sas_base_fault_info(ioc, ioc_state &
+		mpt3sas_print_fault_code(ioc, ioc_state &
+		    MPI2_DOORBELL_DATA_MASK);
+		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+		return (!rc) ? SUCCESS : FAILED;
+	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+	    MPI2_IOC_STATE_COREDUMP) {
+		mpt3sas_print_coredump_info(ioc, ioc_state &
 		    MPI2_DOORBELL_DATA_MASK);
 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 		return (!rc) ? SUCCESS : FAILED;
@@ -2726,9 +2788,10 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
-		if (mpt3sas_base_check_cmd_timeout(ioc,
-			ioc->tm_cmds.status, mpi_request,
-			sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->tm_cmds.status, mpi_request,
+		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
+		if (issue_reset) {
 			rc = mpt3sas_base_hard_reset_handler(ioc,
 					FORCE_BIG_HAMMER);
 			rc = (!rc) ? SUCCESS : FAILED;
@@ -2875,15 +2938,17 @@ scsih_abort(struct scsi_cmnd *scmd)
 
 	u8 timeout = 30;
 	struct _pcie_device *pcie_device = NULL;
-	sdev_printk(KERN_INFO, scmd->device,
-		"attempting task abort! scmd(%p)\n", scmd);
+	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
+	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
+	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+	    (scmd->request->timeout / HZ) * 1000);
 	_scsih_tm_display_info(ioc, scmd);
 
 	sas_device_priv_data = scmd->device->hostdata;
 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
 	    ioc->remove_host) {
 		sdev_printk(KERN_INFO, scmd->device,
-			"device been deleted! scmd(%p)\n", scmd);
+		    "device been deleted! scmd(0x%p)\n", scmd);
 		scmd->result = DID_NO_CONNECT << 16;
 		scmd->scsi_done(scmd);
 		r = SUCCESS;
@@ -2892,6 +2957,8 @@ scsih_abort(struct scsi_cmnd *scmd)
 
 	/* check for completed command */
 	if (st == NULL || st->cb_idx == 0xFF) {
+		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
+		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
 		scmd->result = DID_RESET << 16;
 		r = SUCCESS;
 		goto out;
@@ -2920,7 +2987,7 @@ scsih_abort(struct scsi_cmnd *scmd)
 	if (r == SUCCESS && st->cb_idx != 0xFF)
 		r = FAILED;
  out:
-	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
 	if (pcie_device)
 		pcie_device_put(pcie_device);
@@ -2949,14 +3016,14 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
 
 	sdev_printk(KERN_INFO, scmd->device,
-		"attempting device reset! scmd(%p)\n", scmd);
+	    "attempting device reset! scmd(0x%p)\n", scmd);
 	_scsih_tm_display_info(ioc, scmd);
 
 	sas_device_priv_data = scmd->device->hostdata;
 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
 	    ioc->remove_host) {
 		sdev_printk(KERN_INFO, scmd->device,
-			"device been deleted! scmd(%p)\n", scmd);
+		    "device been deleted! scmd(0x%p)\n", scmd);
 		scmd->result = DID_NO_CONNECT << 16;
 		scmd->scsi_done(scmd);
 		r = SUCCESS;
@@ -2996,7 +3063,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
 		r = FAILED;
  out:
-	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
 
 	if (sas_device)
@@ -3027,15 +3094,15 @@ scsih_target_reset(struct scsi_cmnd *scmd)
 	struct scsi_target *starget = scmd->device->sdev_target;
 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
 
-	starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
-		scmd);
+	starget_printk(KERN_INFO, starget,
+	    "attempting target reset! scmd(0x%p)\n", scmd);
 	_scsih_tm_display_info(ioc, scmd);
 
 	sas_device_priv_data = scmd->device->hostdata;
 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
 	    ioc->remove_host) {
-		starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
-			scmd);
+		starget_printk(KERN_INFO, starget,
+		    "target been deleted! scmd(0x%p)\n", scmd);
 		scmd->result = DID_NO_CONNECT << 16;
 		scmd->scsi_done(scmd);
 		r = SUCCESS;
@@ -3074,7 +3141,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
 	if (r == SUCCESS && atomic_read(&starget->target_busy))
 		r = FAILED;
  out:
-	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
 
 	if (sas_device)
@@ -3097,7 +3164,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	int r, retval;
 
-	ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
+	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
 	scsi_print_command(scmd);
 
 	if (ioc->is_driver_loading || ioc->remove_host) {
@@ -3109,7 +3176,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	r = (retval < 0) ? FAILED : SUCCESS;
 out:
-	ioc_info(ioc, "host reset: %s scmd(%p)\n",
+	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
 
 	return r;
@@ -4475,6 +4542,7 @@ static void
 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
 	Mpi2EventDataTemperature_t *event_data)
 {
+	u32 doorbell;
 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
@@ -4484,6 +4552,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
 			event_data->SensorNum);
 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
 			event_data->CurrentTemperature);
+		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+			if ((doorbell & MPI2_IOC_STATE_MASK) ==
+			    MPI2_IOC_STATE_FAULT) {
+				mpt3sas_print_fault_code(ioc,
+				    doorbell & MPI2_DOORBELL_DATA_MASK);
+			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+			    MPI2_IOC_STATE_COREDUMP) {
+				mpt3sas_print_coredump_info(ioc,
+				    doorbell & MPI2_DOORBELL_DATA_MASK);
+			}
+		}
 	}
 }
 
@@ -6933,6 +7013,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
 		pcie_device->nvme_mdts =
 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+		pcie_device->shutdown_latency =
+			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
+		/*
+		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
+		 * if drive's RTD3 Entry Latency is greater then IOC's
+		 * max_shutdown_latency.
+		 */
+		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
+			ioc->max_shutdown_latency =
+				pcie_device->shutdown_latency;
 		if (pcie_device_pg2.ControllerResetTO)
 			pcie_device->reset_timeout =
 			    pcie_device_pg2.ControllerResetTO;
@@ -7669,10 +7759,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
 
 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
-		issue_reset =
-			mpt3sas_base_check_cmd_timeout(ioc,
-				ioc->scsih_cmds.status, mpi_request,
-				sizeof(Mpi2RaidActionRequest_t)/4);
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->scsih_cmds.status, mpi_request,
+		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
 		rc = -EFAULT;
 		goto out;
 	}
@@ -9272,15 +9361,17 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
+ *							scsi & tm cmds.
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
  */
 void
-mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
 {
-	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+	dtmprintk(ioc,
+	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9357,6 +9448,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
 		}
 		_scsih_remove_unresponding_devices(ioc);
 		_scsih_scan_for_devices_after_reset(ioc);
+		_scsih_set_nvme_max_shutdown_latency(ioc);
 		break;
 	case MPT3SAS_PORT_ENABLE_COMPLETE:
 		ioc->start_scan = 0;
@@ -9660,6 +9752,75 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
+ * _scsih_nvme_shutdown - NVMe shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending IoUnitControl request with shutdown operation code to alert IOC that
+ * the host system is shutting down so that IOC can issue NVMe shutdown to
+ * NVMe drives attached to it.
+ */
+static void
+_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+	Mpi26IoUnitControlRequest_t *mpi_request;
+	Mpi26IoUnitControlReply_t *mpi_reply;
+	u16 smid;
+
+	/* are there any NVMe devices ? */
+	if (list_empty(&ioc->pcie_device_list))
+		return;
+
+	mutex_lock(&ioc->scsih_cmds.mutex);
+
+	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
+		goto out;
+	}
+
+	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+	if (!smid) {
+		ioc_err(ioc,
+		    "%s: failed obtaining a smid\n", __func__);
+		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+		goto out;
+	}
+
+	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+	ioc->scsih_cmds.smid = smid;
+	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
+	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
+	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
+
+	init_completion(&ioc->scsih_cmds.done);
+	ioc->put_smid_default(ioc, smid);
+	/* Wait for max_shutdown_latency seconds */
+	ioc_info(ioc,
+		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
+		ioc->max_shutdown_latency);
+	wait_for_completion_timeout(&ioc->scsih_cmds.done,
+			ioc->max_shutdown_latency*HZ);
+
+	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+		ioc_err(ioc, "%s: timeout\n", __func__);
+		goto out;
+	}
+
+	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+		mpi_reply = ioc->scsih_cmds.reply;
+		ioc_info(ioc, "Io Unit Control shutdown (complete):"
+			"ioc_status(0x%04x), loginfo(0x%08x)\n",
+			le16_to_cpu(mpi_reply->IOCStatus),
+			le32_to_cpu(mpi_reply->IOCLogInfo));
+	}
+ out:
+	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+	mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+
+/**
  * _scsih_ir_shutdown - IR shutdown notification
  * @ioc: per adapter object
  *
@@ -9851,6 +10012,7 @@ scsih_shutdown(struct pci_dev *pdev)
 				&ioc->ioc_pg1_copy);
 
 	_scsih_ir_shutdown(ioc);
+	_scsih_nvme_shutdown(ioc);
 	mpt3sas_base_detach(ioc);
 }
 
@@ -10533,6 +10695,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
 	ioc->logging_level = logging_level;
 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+	/* Host waits for minimum of six seconds */
+	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
 	/*
 	 * Enable MEMORY MOVE support flag.
 	 */
@@ -10681,6 +10845,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 	mpt3sas_base_stop_watchdog(ioc);
 	flush_scheduled_work();
 	scsi_block_requests(shost);
+	_scsih_nvme_shutdown(ioc);
 	device_state = pci_choose_state(pdev, state);
 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
 		 pdev, pci_name(pdev), device_state);
@@ -10715,7 +10880,7 @@ scsih_resume(struct pci_dev *pdev)
 	r = mpt3sas_base_map_resources(ioc);
 	if (r)
 		return r;
-
+	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
 	scsi_unblock_requests(shost);
 	mpt3sas_base_start_watchdog(ioc);
@@ -10784,6 +10949,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
 	if (rc)
 		return PCI_ERS_RESULT_DISCONNECT;
 
+	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
 	ioc_warn(ioc, "hard reset: %s\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 5324662..6ec5b7f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -719,11 +719,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
 		sas_device_put(sas_device);
 	}
 
-	if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
-		dev_printk(KERN_INFO, &rphy->dev,
-			"add: handle(0x%04x), sas_addr(0x%016llx)\n",
-			handle, (unsigned long long)
-		    mpt3sas_port->remote_identify.sas_address);
+	dev_info(&rphy->dev,
+	    "add: handle(0x%04x), sas_addr(0x%016llx)\n", handle,
+	    (unsigned long long)mpt3sas_port->remote_identify.sas_address);
+
 	mpt3sas_port->rphy = rphy;
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list);
@@ -813,6 +812,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
 	}
 	if (!ioc->remove_host)
 		sas_port_delete(mpt3sas_port->port);
+	ioc_info(ioc, "%s: removed: sas_addr(0x%016llx)\n",
+	    __func__, (unsigned long long)sas_address);
 	kfree(mpt3sas_port);
 }
 
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index da719b0..7af9173 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -47,6 +47,9 @@ static struct scsi_host_template mvs_sht = {
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= sas_ioctl,
+#endif
 	.shost_attrs		= mvst_host_attrs,
 	.track_queue_depth	= 1,
 };
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
index 9289c19..fb8eacf 100644
--- a/drivers/scsi/myrb.h
+++ b/drivers/scsi/myrb.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
  *
  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
index e6702ee8..9f6696d 100644
--- a/drivers/scsi/myrs.h
+++ b/drivers/scsi/myrs.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
  *
  * This driver supports the newer, SCSI-based firmware interface only.
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index ff618ad..3c6076e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -101,6 +101,9 @@ static struct scsi_host_template pm8001_sht = {
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= sas_ioctl,
+#endif
 	.shost_attrs		= pm8001_host_attrs,
 	.track_queue_depth	= 1,
 };
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 98dcdbd..d1d95f1 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2377,7 +2377,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 				ts->buf_valid_size = sizeof(*resp);
 			} else
 				PM8001_IO_DBG(pm8001_ha,
-					pm8001_printk("response to large\n"));
+					pm8001_printk("response too large\n"));
 		}
 		if (pm8001_dev)
 			pm8001_dev->running_req--;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 832af42..3337cd34 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1699,6 +1699,16 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
 	return err;
 }
 
+#ifdef QLA_64BIT_PTR
+#define LOAD_CMD	MBC_LOAD_RAM_A64_ROM
+#define DUMP_CMD	MBC_DUMP_RAM_A64_ROM
+#define CMD_ARGS	(BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
+#else
+#define LOAD_CMD	MBC_LOAD_RAM
+#define DUMP_CMD	MBC_DUMP_RAM
+#define CMD_ARGS	(BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
+#endif
+
 #define DUMP_IT_BACK 0		/* for debug of RISC loading */
 static int
 qla1280_load_firmware_dma(struct scsi_qla_host *ha)
@@ -1748,7 +1758,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
 		for(i = 0; i < cnt; i++)
 			((__le16 *)ha->request_ring)[i] = fw_data[i];
 
-		mb[0] = MBC_LOAD_RAM;
+		mb[0] = LOAD_CMD;
 		mb[1] = risc_address;
 		mb[4] = cnt;
 		mb[3] = ha->request_dma & 0xffff;
@@ -1759,8 +1769,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
 				__func__, mb[0],
 				(void *)(long)ha->request_dma,
 				mb[6], mb[7], mb[2], mb[3]);
-		err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
-				BIT_1 | BIT_0, mb);
+		err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
 		if (err) {
 			printk(KERN_ERR "scsi(%li): Failed to load partial "
 			       "segment of f\n", ha->host_no);
@@ -1768,7 +1777,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
 		}
 
 #if DUMP_IT_BACK
-		mb[0] = MBC_DUMP_RAM;
+		mb[0] = DUMP_CMD;
 		mb[1] = risc_address;
 		mb[4] = cnt;
 		mb[3] = p_tbuf & 0xffff;
@@ -1776,8 +1785,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
 		mb[7] = upper_32_bits(p_tbuf) & 0xffff;
 		mb[6] = upper_32_bits(p_tbuf) >> 16;
 
-		err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
-				BIT_1 | BIT_0, mb);
+		err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
 		if (err) {
 			printk(KERN_ERR
 			       "Failed to dump partial segment of f/w\n");
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index a1a8aef..e7820b5 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -277,6 +277,8 @@ struct device_reg {
 #define MBC_MAILBOX_REGISTER_TEST	6	/* Wrap incoming mailboxes */
 #define MBC_VERIFY_CHECKSUM		7	/* Verify checksum */
 #define MBC_ABOUT_FIRMWARE		8	/* Get firmware revision */
+#define MBC_LOAD_RAM_A64_ROM		9	/* Load RAM 64bit ROM version */
+#define MBC_DUMP_RAM_A64_ROM		0x0a	/* Dump RAM 64bit ROM version */
 #define MBC_INIT_REQUEST_QUEUE		0x10	/* Initialize request queue */
 #define MBC_INIT_RESPONSE_QUEUE		0x11	/* Initialize response queue */
 #define MBC_EXECUTE_IOCB		0x12	/* Execute IOCB command */
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index cbaf178..d7169e4 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -54,7 +54,8 @@ void qla2x00_bsg_sp_free(srb_t *sp)
 	if (sp->type == SRB_CT_CMD ||
 	    sp->type == SRB_FXIOCB_BCMD ||
 	    sp->type == SRB_ELS_CMD_HST)
-		kfree(sp->fcport);
+		qla2x00_free_fcport(sp->fcport);
+
 	qla2x00_rel_sp(sp);
 }
 
@@ -405,7 +406,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
 
 done_free_fcport:
 	if (bsg_request->msgcode == FC_BSG_RPT_ELS)
-		kfree(fcport);
+		qla2x00_free_fcport(fcport);
 done:
 	return rval;
 }
@@ -545,7 +546,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
 	return rval;
 
 done_free_fcport:
-	kfree(fcport);
+	qla2x00_free_fcport(fcport);
 done_unmap_sg:
 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -796,7 +797,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
 
 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
 	    (ha->current_topology == ISP_CFG_F ||
-	    (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+	    (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
 	     req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
 	    elreq.options == EXTERNAL_LOOPBACK) {
 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
@@ -2049,7 +2050,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
 	return rval;
 
 done_free_fcport:
-	kfree(fcport);
+	qla2x00_free_fcport(fcport);
 
 done_unmap_rsp_sg:
 	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 30afc59..88a56e8 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -18,7 +18,7 @@
  * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
  * |				  | 		       | 0x211a         |
  * |                              |                    | 0x211c-0x2128  |
- * |                              |                    | 0x212a-0x2130  |
+ * |                              |                    | 0x212a-0x2134  |
  * | Queue Command and IO tracing |       0x3074       | 0x300b         |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x303d-0x3041  |
@@ -2519,12 +2519,6 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 /*                         Driver Debug Functions.                          */
 /****************************************************************************/
 
-static inline int
-ql_mask_match(uint level)
-{
-	return (level & ql2xextended_error_logging) == level;
-}
-
 /*
  * This function is for formatting and logging debug information.
  * It is to be used when vha is available. It formats the message
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index bb01b68..433e955 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -374,3 +374,9 @@ extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
 extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
 	struct qla_hw_data *);
 extern int qla24xx_soft_reset(struct qla_hw_data *);
+
+static inline int
+ql_mask_match(uint level)
+{
+	return (level & ql2xextended_error_logging) == level;
+}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 2edd9f7..ed32e97 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2402,6 +2402,7 @@ typedef struct fc_port {
 	unsigned int scan_needed:1;
 	unsigned int n2n_flag:1;
 	unsigned int explicit_logout:1;
+	unsigned int prli_pend_timer:1;
 
 	struct completion nvme_del_done;
 	uint32_t nvme_prli_service_param;
@@ -2428,6 +2429,7 @@ typedef struct fc_port {
 	struct work_struct free_work;
 	struct work_struct reg_work;
 	uint64_t jiffies_at_registration;
+	unsigned long prli_expired;
 	struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
 
 	uint16_t tgt_id;
@@ -2464,6 +2466,7 @@ typedef struct fc_port {
 	struct qla_tgt_sess *tgt_session;
 	struct ct_sns_desc ct_desc;
 	enum discovery_state disc_state;
+	atomic_t shadow_disc_state;
 	enum discovery_state next_disc_state;
 	enum login_state fw_login_state;
 	unsigned long dm_login_expire;
@@ -2510,6 +2513,19 @@ struct event_arg {
 
 extern const char *const port_state_str[5];
 
+static const char * const port_dstate_str[] = {
+	"DELETED",
+	"GNN_ID",
+	"GNL",
+	"LOGIN_PEND",
+	"LOGIN_FAILED",
+	"GPDB",
+	"UPD_FCPORT",
+	"LOGIN_COMPLETE",
+	"ADISC",
+	"DELETE_PEND"
+};
+
 /*
  * FC port flags.
  */
@@ -3263,7 +3279,6 @@ enum qla_work_type {
 	QLA_EVT_IDC_ACK,
 	QLA_EVT_ASYNC_LOGIN,
 	QLA_EVT_ASYNC_LOGOUT,
-	QLA_EVT_ASYNC_LOGOUT_DONE,
 	QLA_EVT_ASYNC_ADISC,
 	QLA_EVT_UEVENT,
 	QLA_EVT_AENFX,
@@ -3953,7 +3968,7 @@ struct qla_hw_data {
 	void		*sfp_data;
 	dma_addr_t	sfp_data_dma;
 
-	void		*flt;
+	struct qla_flt_header *flt;
 	dma_addr_t	flt_dma;
 
 #define XGMAC_DATA_SIZE	4096
@@ -4845,6 +4860,9 @@ struct sff_8247_a0 {
 	(ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
 	NVME_ONLY_TARGET(fcport)) \
 
+#define PRLI_PHASE(_cls) \
+	((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP))
+
 #include "qla_target.h"
 #include "qla_gbl.h"
 #include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 9dc09c1..d641918 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1354,12 +1354,12 @@ struct vp_rpt_id_entry_24xx {
 	uint8_t port_id[3];
 	uint8_t format;
 	union {
-		struct {
+		struct _f0 {
 			/* format 0 loop */
 			uint8_t vp_idx_map[16];
 			uint8_t reserved_4[32];
 		} f0;
-		struct {
+		struct _f1 {
 			/* format 1 fabric */
 			uint8_t vpstat1_subcode; /* vp_status=1 subcode */
 			uint8_t flags;
@@ -1381,21 +1381,22 @@ struct vp_rpt_id_entry_24xx {
 			uint16_t bbcr;
 			uint8_t reserved_5[6];
 		} f1;
-		struct { /* format 2: N2N direct connect */
-		    uint8_t vpstat1_subcode;
-		    uint8_t flags;
-		    uint16_t rsv6;
-		    uint8_t rsv2[12];
+		struct _f2 { /* format 2: N2N direct connect */
+			uint8_t vpstat1_subcode;
+			uint8_t flags;
+			uint16_t fip_flags;
+			uint8_t rsv2[12];
 
-		    uint8_t ls_rjt_vendor;
-		    uint8_t ls_rjt_explanation;
-		    uint8_t ls_rjt_reason;
-		    uint8_t rsv3[5];
+			uint8_t ls_rjt_vendor;
+			uint8_t ls_rjt_explanation;
+			uint8_t ls_rjt_reason;
+			uint8_t rsv3[5];
 
-		    uint8_t port_name[8];
-		    uint8_t node_name[8];
-		    uint8_t remote_nport_id[4];
-		    uint32_t reserved_5;
+			uint8_t port_name[8];
+			uint8_t node_name[8];
+			uint16_t bbcr;
+			uint8_t reserved_5[2];
+			uint8_t remote_nport_id[4];
 		} f2;
 	} u;
 };
@@ -1470,13 +1471,6 @@ struct qla_flt_location {
 	uint16_t checksum;
 };
 
-struct qla_flt_header {
-	uint16_t version;
-	uint16_t length;
-	uint16_t checksum;
-	uint16_t unused;
-};
-
 #define FLT_REG_FW		0x01
 #define FLT_REG_BOOT_CODE	0x07
 #define FLT_REG_VPD_0		0x14
@@ -1537,6 +1531,14 @@ struct qla_flt_region {
 	uint32_t end;
 };
 
+struct qla_flt_header {
+	uint16_t version;
+	uint16_t length;
+	uint16_t checksum;
+	uint16_t unused;
+	struct qla_flt_region region[0];
+};
+
 #define FLT_REGION_SIZE		16
 #define FLT_MAX_REGIONS		0xFF
 #define FLT_REGIONS_SIZE	(FLT_REGION_SIZE * FLT_MAX_REGIONS)
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 5b163ad..2a64729 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,14 +72,13 @@ extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
 extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
 extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
-extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
-    uint16_t *);
 struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
     enum qla_work_type);
 extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
 int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
 extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
 extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
+extern int qla24xx_async_abort_cmd(srb_t *, bool);
 
 extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state);
 extern fc_port_t *
@@ -182,8 +181,6 @@ extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
-extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
-    fc_port_t *, uint16_t *);
 extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
@@ -201,6 +198,7 @@ extern void qla2x00_free_host(struct scsi_qla_host *);
 extern void qla2x00_relogin(struct scsi_qla_host *);
 extern void qla2x00_do_work(struct scsi_qla_host *);
 extern void qla2x00_free_fcports(struct scsi_qla_host *);
+extern void qla2x00_free_fcport(fc_port_t *);
 
 extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
 extern void qla83xx_service_idc_aen(struct work_struct *);
@@ -253,8 +251,9 @@ extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
 extern void qla2x00_sp_free_dma(srb_t *sp);
 extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
 
-extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
-extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
+extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
+extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
+extern int qla24xx_async_abort_cmd(srb_t *, bool);
 
 extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
 
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 446a9d6..aaa4a5b 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2963,7 +2963,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 		return QLA_FUNCTION_FAILED;
 
 	e->u.fcport.fcport = fcport;
-	fcport->flags |= FCF_ASYNC_ACTIVE;
 	return qla2x00_post_work(vha, e);
 }
 
@@ -3097,9 +3096,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
 
 done_free_sp:
 	sp->free(sp);
-	fcport->flags &= ~FCF_ASYNC_SENT;
 done:
-	fcport->flags &= ~FCF_ASYNC_ACTIVE;
 	return rval;
 }
 
@@ -4290,7 +4287,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
 	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
 		return rval;
 
-	fcport->disc_state = DSC_GNN_ID;
+	qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
 	if (!sp)
 		goto done;
@@ -4464,7 +4461,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
 
 done_free_sp:
 	sp->free(sp);
-	fcport->flags &= ~FCF_ASYNC_SENT;
 done:
 	return rval;
 }
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aa52041..9e6b565 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -49,16 +49,9 @@ qla2x00_sp_timeout(struct timer_list *t)
 {
 	srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
 	struct srb_iocb *iocb;
-	struct req_que *req;
-	unsigned long flags;
-	struct qla_hw_data *ha = sp->vha->hw;
 
-	WARN_ON_ONCE(irqs_disabled());
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	req = sp->qpair->req;
-	req->outstanding_cmds[sp->handle] = NULL;
+	WARN_ON(irqs_disabled());
 	iocb = &sp->u.iocb_cmd;
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	iocb->timeout(sp);
 }
 
@@ -142,7 +135,7 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res)
 		sp->free(sp);
 }
 
-static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
 {
 	scsi_qla_host_t *vha = cmd_sp->vha;
 	struct srb_iocb *abt_iocb;
@@ -242,6 +235,7 @@ qla2x00_async_iocb_timeout(void *data)
 	case SRB_NACK_PRLI:
 	case SRB_NACK_LOGO:
 	case SRB_CTRL_VP:
+	default:
 		rc = qla24xx_async_abort_cmd(sp, false);
 		if (rc) {
 			spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
@@ -258,10 +252,6 @@ qla2x00_async_iocb_timeout(void *data)
 			sp->done(sp, QLA_FUNCTION_TIMEOUT);
 		}
 		break;
-	default:
-		WARN_ON_ONCE(true);
-		sp->done(sp, QLA_FUNCTION_TIMEOUT);
-		break;
 	}
 }
 
@@ -326,10 +316,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 	if (!sp)
 		goto done;
 
+	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
 	fcport->flags |= FCF_ASYNC_SENT;
 	fcport->logout_completed = 0;
 
-	fcport->disc_state = DSC_LOGIN_PEND;
 	sp->type = SRB_LOGIN_CMD;
 	sp->name = "login";
 	sp->gen1 = fcport->rscn_gen;
@@ -425,7 +415,7 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
 	fcport->flags &= ~FCF_ASYNC_ACTIVE;
 	/* Don't re-login in target mode */
 	if (!fcport->tgt_session)
-		qla2x00_mark_device_lost(vha, fcport, 1, 0);
+		qla2x00_mark_device_lost(vha, fcport, 1);
 	qlt_logo_completion_handler(fcport, data[0]);
 }
 
@@ -533,7 +523,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 
 	e->u.fcport.fcport = fcport;
 	fcport->flags |= FCF_ASYNC_ACTIVE;
-	fcport->disc_state = DSC_LOGIN_PEND;
+	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
 	return qla2x00_post_work(vha, e);
 }
 
@@ -685,7 +675,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
 	port_id_t id;
 	u64 wwn;
 	u16 data[2];
-	u8 current_login_state;
+	u8 current_login_state, nvme_cls;
 
 	fcport = ea->fcport;
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -744,10 +734,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
 
 		loop_id = le16_to_cpu(e->nport_handle);
 		loop_id = (loop_id & 0x7fff);
-		if (NVME_TARGET(vha->hw, fcport))
-			current_login_state = e->current_login_state >> 4;
-		else
-			current_login_state = e->current_login_state & 0xf;
+		nvme_cls = e->current_login_state >> 4;
+		current_login_state = e->current_login_state & 0xf;
+
+		if (PRLI_PHASE(nvme_cls)) {
+			current_login_state = nvme_cls;
+			fcport->fc4_type &= ~FS_FC4TYPE_FCP;
+			fcport->fc4_type |= FS_FC4TYPE_NVME;
+		} else if (PRLI_PHASE(current_login_state)) {
+			fcport->fc4_type |= FS_FC4TYPE_FCP;
+			fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+		}
 
 		ql_dbg(ql_dbg_disc, vha, 0x20e2,
 		    "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
@@ -836,7 +833,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
 				 * with GNL. Push disc_state back to DELETED
 				 * so GNL can go out again
 				 */
-				fcport->disc_state = DSC_DELETED;
+				qla2x00_set_fcport_disc_state(fcport,
+				    DSC_DELETED);
 				break;
 			case DSC_LS_PRLI_COMP:
 				if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -912,7 +910,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
 			qla24xx_fcport_handle_login(vha, fcport);
 			break;
 		case ISP_CFG_N:
-			fcport->disc_state = DSC_DELETED;
+			qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
 			if (time_after_eq(jiffies, fcport->dm_login_expire)) {
 				if (fcport->n2n_link_reset_cnt < 2) {
 					fcport->n2n_link_reset_cnt++;
@@ -992,7 +990,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
 		set_bit(loop_id, vha->hw->loop_id_map);
 		wwn = wwn_to_u64(e->port_name);
 
-		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
+		ql_dbg(ql_dbg_disc, vha, 0x20e8,
 		    "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
 		    __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
 		    e->port_id[0], e->current_login_state, e->last_login_state,
@@ -1051,6 +1049,16 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
 
 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	vha->gnl.sent = 0;
+	if (!list_empty(&vha->gnl.fcports)) {
+		/* retrigger gnl */
+		list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
+		    gnl_entry) {
+			list_del_init(&fcport->gnl_entry);
+			fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+			if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
+				break;
+		}
+	}
 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
 	sp->free(sp);
@@ -1072,7 +1080,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
 
 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	fcport->flags |= FCF_ASYNC_SENT;
-	fcport->disc_state = DSC_GNL;
+	qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
 	fcport->last_rscn_gen = fcport->rscn_gen;
 	fcport->last_login_gen = fcport->login_gen;
 
@@ -1121,8 +1129,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
 
 done_free_sp:
 	sp->free(sp);
-	fcport->flags &= ~FCF_ASYNC_SENT;
 done:
+	fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
 	return rval;
 }
 
@@ -1216,12 +1224,19 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
 	struct srb_iocb *lio;
 	int rval = QLA_FUNCTION_FAILED;
 
-	if (!vha->flags.online)
+	if (!vha->flags.online) {
+		ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
+		    __func__, __LINE__, fcport->port_name);
 		return rval;
+	}
 
-	if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
-	    fcport->fw_login_state == DSC_LS_PRLI_PEND)
+	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
+	    fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
+	    qla_dual_mode_enabled(vha)) {
+		ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
+		    __func__, __LINE__, fcport->port_name);
 		return rval;
+	}
 
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
@@ -1295,12 +1310,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 		return rval;
 	}
 
-	fcport->disc_state = DSC_GPDB;
-
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
 		goto done;
 
+	qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
+
 	fcport->flags |= FCF_ASYNC_SENT;
 	sp->type = SRB_MB_IOCB;
 	sp->name = "gpdb";
@@ -1349,6 +1364,7 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 	sp->free(sp);
 	fcport->flags &= ~FCF_ASYNC_SENT;
 done:
+	fcport->flags &= ~FCF_ASYNC_ACTIVE;
 	qla24xx_post_gpdb_work(vha, fcport, opt);
 	return rval;
 }
@@ -1379,7 +1395,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
 		ql_dbg(ql_dbg_disc, vha, 0x20d6,
 		    "%s %d %8phC session revalidate success\n",
 		    __func__, __LINE__, ea->fcport->port_name);
-		ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
+		qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
 	}
 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 }
@@ -1433,7 +1449,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
 		/* Set discovery state back to GNL to Relogin attempt */
 		if (qla_dual_mode_enabled(vha) ||
 		    qla_ini_mode_enabled(vha)) {
-			fcport->disc_state = DSC_GNL;
+			qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 		}
 		return;
@@ -1600,6 +1616,10 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
 			break;
 		default:
 			if (fcport->login_pause) {
+				ql_dbg(ql_dbg_disc, vha, 0x20d8,
+				    "%s %d %8phC exit\n",
+				    __func__, __LINE__,
+				    fcport->port_name);
 				fcport->last_rscn_gen = fcport->rscn_gen;
 				fcport->last_login_gen = fcport->login_gen;
 				set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1758,9 +1778,23 @@ qla2x00_tmf_iocb_timeout(void *data)
 {
 	srb_t *sp = data;
 	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+	int rc, h;
+	unsigned long flags;
 
-	tmf->u.tmf.comp_status = CS_TIMEOUT;
-	complete(&tmf->u.tmf.comp);
+	rc = qla24xx_async_abort_cmd(sp, false);
+	if (rc) {
+		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+			if (sp->qpair->req->outstanding_cmds[h] == sp) {
+				sp->qpair->req->outstanding_cmds[h] = NULL;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+		tmf->u.tmf.comp_status = CS_TIMEOUT;
+		tmf->u.tmf.data = QLA_FUNCTION_FAILED;
+		complete(&tmf->u.tmf.comp);
+	}
 }
 
 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
@@ -1976,7 +2010,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 			qla24xx_post_prli_work(vha, ea->fcport);
 		} else {
 			ql_dbg(ql_dbg_disc, vha, 0x20ea,
-			    "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+			    "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
 			    __func__, __LINE__, ea->fcport->port_name,
 			    ea->fcport->loop_id, ea->fcport->d_id.b24);
 
@@ -1996,11 +2030,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 		    __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
 
 		ea->fcport->flags &= ~FCF_ASYNC_SENT;
-		ea->fcport->disc_state = DSC_LOGIN_FAILED;
+		qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
 		if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 		else
-			qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
+			qla2x00_mark_device_lost(vha, ea->fcport, 1);
 		break;
 	case MBS_LOOP_ID_USED:
 		/* data[1] = IO PARAM 1 = nport ID  */
@@ -2047,6 +2081,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 			set_bit(lid, vha->hw->loop_id_map);
 			ea->fcport->loop_id = lid;
 			ea->fcport->keep_nport_handle = 0;
+			ea->fcport->logout_on_delete = 1;
 			qlt_schedule_sess_for_deletion(ea->fcport);
 		}
 		break;
@@ -2054,16 +2089,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 	return;
 }
 
-void
-qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
-    uint16_t *data)
-{
-	qlt_logo_completion_handler(fcport, data[0]);
-	fcport->login_gen++;
-	fcport->flags &= ~FCF_ASYNC_ACTIVE;
-	return;
-}
-
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
 /****************************************************************************/
@@ -4925,12 +4950,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
 	qla2x00_get_data_rate(vha);
 
 	/* Determine what we need to do */
-	if (ha->current_topology == ISP_CFG_FL &&
-	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
-
-		set_bit(RSCN_UPDATE, &flags);
-
-	} else if (ha->current_topology == ISP_CFG_F &&
+	if ((ha->current_topology == ISP_CFG_FL ||
+	    ha->current_topology == ISP_CFG_F) &&
 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
 
 		set_bit(RSCN_UPDATE, &flags);
@@ -5087,7 +5108,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 	rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
 	    &entries);
 	if (rval != QLA_SUCCESS)
-		goto cleanup_allocation;
+		goto err;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2011,
 	    "Entries in ID list (%d).\n", entries);
@@ -5117,7 +5138,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 		ql_log(ql_log_warn, vha, 0x2012,
 		    "Memory allocation failed for fcport.\n");
 		rval = QLA_MEMORY_ALLOC_FAILED;
-		goto cleanup_allocation;
+		goto err;
 	}
 	new_fcport->flags &= ~FCF_FABRIC_DEVICE;
 
@@ -5207,7 +5228,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 				ql_log(ql_log_warn, vha, 0xd031,
 				    "Failed to allocate memory for fcport.\n");
 				rval = QLA_MEMORY_ALLOC_FAILED;
-				goto cleanup_allocation;
+				goto err;
 			}
 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 			new_fcport->flags &= ~FCF_FABRIC_DEVICE;
@@ -5230,7 +5251,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 			    qla_ini_mode_enabled(vha)) &&
 			    atomic_read(&fcport->state) == FCS_ONLINE) {
 				qla2x00_mark_device_lost(vha, fcport,
-					ql2xplogiabsentdevice, 0);
+					ql2xplogiabsentdevice);
 				if (fcport->loop_id != FC_NO_LOOP_ID &&
 				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
 				    fcport->port_type != FCT_INITIATOR &&
@@ -5250,15 +5271,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 			qla24xx_fcport_handle_login(vha, fcport);
 	}
 
-cleanup_allocation:
-	kfree(new_fcport);
+	qla2x00_free_fcport(new_fcport);
 
-	if (rval != QLA_SUCCESS) {
-		ql_dbg(ql_dbg_disc, vha, 0x2098,
-		    "Configure local loop error exit: rval=%x.\n", rval);
-	}
+	return rval;
 
-	return (rval);
+err:
+	ql_dbg(ql_dbg_disc, vha, 0x2098,
+	       "Configure local loop error exit: rval=%x.\n", rval);
+	return rval;
 }
 
 static void
@@ -5385,11 +5405,14 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 	ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
 	    __func__, fcport->port_name);
 
-	fcport->disc_state = DSC_UPD_FCPORT;
+	qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
 	fcport->login_retry = vha->hw->login_retry_count;
 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
 	fcport->deleted = 0;
-	fcport->logout_on_delete = 1;
+	if (vha->hw->current_topology == ISP_CFG_NL)
+		fcport->logout_on_delete = 0;
+	else
+		fcport->logout_on_delete = 1;
 	fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
 
 	switch (vha->hw->current_topology) {
@@ -5405,7 +5428,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 
 	if (NVME_TARGET(vha->hw, fcport)) {
 		qla_nvme_register_remote(vha, fcport);
-		fcport->disc_state = DSC_LOGIN_COMPLETE;
+		qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
 		qla2x00_set_fcport_state(fcport, FCS_ONLINE);
 		return;
 	}
@@ -5450,7 +5473,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 		}
 	}
 
-	fcport->disc_state = DSC_LOGIN_COMPLETE;
+	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
 }
 
 void qla_register_fcport_fn(struct work_struct *work)
@@ -5859,7 +5882,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
 
 		if (NVME_TARGET(vha->hw, fcport)) {
 			if (fcport->disc_state == DSC_DELETE_PEND) {
-				fcport->disc_state = DSC_GNL;
+				qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
 				vha->fcport_count--;
 				fcport->login_succ = 0;
 			}
@@ -5905,7 +5928,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
 			    qla_ini_mode_enabled(vha)) &&
 			    atomic_read(&fcport->state) == FCS_ONLINE) {
 				qla2x00_mark_device_lost(vha, fcport,
-					ql2xplogiabsentdevice, 0);
+					ql2xplogiabsentdevice);
 				if (fcport->loop_id != FC_NO_LOOP_ID &&
 				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
 				    fcport->port_type != FCT_INITIATOR &&
@@ -6071,7 +6094,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
 			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
 			    fcport->d_id.b.domain, fcport->d_id.b.area,
 			    fcport->d_id.b.al_pa);
-			qla2x00_mark_device_lost(vha, fcport, 1, 0);
+			qla2x00_mark_device_lost(vha, fcport, 1);
 
 			rval = 1;
 			break;
@@ -6585,9 +6608,9 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
 	atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 		atomic_set(&vha->loop_state, LOOP_DOWN);
-		qla2x00_mark_all_devices_lost(vha, 0);
+		qla2x00_mark_all_devices_lost(vha);
 		list_for_each_entry(vp, &ha->vp_list, list)
-			qla2x00_mark_all_devices_lost(vp, 0);
+			qla2x00_mark_all_devices_lost(vp);
 	} else {
 		if (!atomic_read(&vha->loop_down_timer))
 			atomic_set(&vha->loop_down_timer,
@@ -6663,14 +6686,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 		atomic_set(&vha->loop_state, LOOP_DOWN);
-		qla2x00_mark_all_devices_lost(vha, 0);
+		qla2x00_mark_all_devices_lost(vha);
 
 		spin_lock_irqsave(&ha->vport_slock, flags);
 		list_for_each_entry(vp, &ha->vp_list, list) {
 			atomic_inc(&vp->vref_count);
 			spin_unlock_irqrestore(&ha->vport_slock, flags);
 
-			qla2x00_mark_all_devices_lost(vp, 0);
+			qla2x00_mark_all_devices_lost(vp);
 
 			spin_lock_irqsave(&ha->vport_slock, flags);
 			atomic_dec(&vp->vref_count);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 352aba4..364b3db 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -105,6 +105,30 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
 	INIT_LIST_HEAD(&ctx->dsd_list);
 }
 
+static inline void
+qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
+{
+	int old_val;
+	uint8_t shiftbits, mask;
+
+	/* This will have to change when the max no. of states > 16 */
+	shiftbits = 4;
+	mask = (1 << shiftbits) - 1;
+
+	fcport->disc_state = state;
+	while (1) {
+		old_val = atomic_read(&fcport->shadow_disc_state);
+		if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
+		    old_val, (old_val << shiftbits) | state)) {
+			ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
+			    "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
+			    fcport->port_name, port_dstate_str[old_val & mask],
+			    port_dstate_str[state], fcport->d_id.b24);
+			return;
+		}
+	}
+}
+
 static inline int
 qla2x00_hba_err_chk_enabled(srb_t *sp)
 {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8b050f0..47bf60a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2537,13 +2537,32 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
 	fc_port_t *fcport = sp->fcport;
 	struct scsi_qla_host *vha = sp->vha;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	unsigned long flags = 0;
+	int res, h;
 
 	ql_dbg(ql_dbg_io, vha, 0x3069,
 	    "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
 	    sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
 	    fcport->d_id.b.al_pa);
 
-	complete(&lio->u.els_logo.comp);
+	/* Abort the exchange */
+	res = qla24xx_async_abort_cmd(sp, false);
+	if (res) {
+		ql_dbg(ql_dbg_io, vha, 0x3070,
+		    "mbx abort_command failed.\n");
+		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+			if (sp->qpair->req->outstanding_cmds[h] == sp) {
+				sp->qpair->req->outstanding_cmds[h] = NULL;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+		complete(&lio->u.els_logo.comp);
+	} else {
+		ql_dbg(ql_dbg_io, vha, 0x3071,
+		    "mbx abort_command success.\n");
+	}
 }
 
 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
@@ -2717,23 +2736,29 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
 	srb_t *sp = data;
 	fc_port_t *fcport = sp->fcport;
 	struct scsi_qla_host *vha = sp->vha;
-	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags = 0;
-	int res;
+	int res, h;
 
 	ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
 	    "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
 	    sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
 
 	/* Abort the exchange */
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	res = ha->isp_ops->abort_command(sp);
+	res = qla24xx_async_abort_cmd(sp, false);
 	ql_dbg(ql_dbg_io, vha, 0x3070,
 	    "mbx abort_command %s\n",
 	    (res == QLA_SUCCESS) ? "successful" : "failed");
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-	sp->done(sp, QLA_FUNCTION_TIMEOUT);
+	if (res) {
+		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+			if (sp->qpair->req->outstanding_cmds[h] == sp) {
+				sp->qpair->req->outstanding_cmds[h] = NULL;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+		sp->done(sp, QLA_FUNCTION_TIMEOUT);
+	}
 }
 
 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
@@ -2852,7 +2877,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
 				    fw_status[0], fw_status[1], fw_status[2]);
 
 				fcport->flags &= ~FCF_ASYNC_SENT;
-				fcport->disc_state = DSC_LOGIN_FAILED;
+				qla2x00_set_fcport_disc_state(fcport,
+				    DSC_LOGIN_FAILED);
 				set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 				break;
 			}
@@ -2865,7 +2891,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
 			    fw_status[0], fw_status[1], fw_status[2]);
 
 			sp->fcport->flags &= ~FCF_ASYNC_SENT;
-			sp->fcport->disc_state = DSC_LOGIN_FAILED;
+			qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 			break;
 		}
@@ -2898,11 +2924,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
 	if (!sp) {
 		ql_log(ql_log_info, vha, 0x70e6,
 		 "SRB allocation failed\n");
+		fcport->flags &= ~FCF_ASYNC_ACTIVE;
 		return -ENOMEM;
 	}
 
 	fcport->flags |= FCF_ASYNC_SENT;
-	fcport->disc_state = DSC_LOGIN_PEND;
+	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
 	elsio = &sp->u.iocb_cmd;
 	ql_dbg(ql_dbg_io, vha, 0x3073,
 	    "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
@@ -2975,7 +3002,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
 	}
 
 out:
-	fcport->flags &= ~(FCF_ASYNC_SENT);
+	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
 	qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
 	sp->free(sp);
 done:
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7b8a6bf..e40705d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -788,7 +788,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 			atomic_set(&vha->loop_state, LOOP_DOWN);
 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
-			qla2x00_mark_all_devices_lost(vha, 1);
+			qla2x00_mark_all_devices_lost(vha);
 		}
 
 		if (vha->vp_idx) {
@@ -861,7 +861,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 			}
 
 			vha->device_flags |= DFLG_NO_CABLE;
-			qla2x00_mark_all_devices_lost(vha, 1);
+			qla2x00_mark_all_devices_lost(vha);
 		}
 
 		if (vha->vp_idx) {
@@ -881,7 +881,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 			atomic_set(&vha->loop_state, LOOP_DOWN);
 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
-			qla2x00_mark_all_devices_lost(vha, 1);
+			qla2x00_mark_all_devices_lost(vha);
 		}
 
 		if (vha->vp_idx) {
@@ -924,7 +924,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 				atomic_set(&vha->loop_down_timer,
 				    LOOP_DOWN_TIME);
 			if (!N2N_TOPO(ha))
-				qla2x00_mark_all_devices_lost(vha, 1);
+				qla2x00_mark_all_devices_lost(vha);
 		}
 
 		if (vha->vp_idx) {
@@ -953,7 +953,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 			if (!atomic_read(&vha->loop_down_timer))
 				atomic_set(&vha->loop_down_timer,
 				    LOOP_DOWN_TIME);
-			qla2x00_mark_all_devices_lost(vha, 1);
+			qla2x00_mark_all_devices_lost(vha);
 		}
 
 		if (vha->vp_idx) {
@@ -1022,7 +1022,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 			    "Marking port lost loopid=%04x portid=%06x.\n",
 			    fcport->loop_id, fcport->d_id.b24);
 			if (qla_ini_mode_enabled(vha)) {
-				qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
 				fcport->logout_on_delete = 0;
 				qlt_schedule_sess_for_deletion(fcport);
 			}
@@ -1034,14 +1033,14 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 				atomic_set(&vha->loop_down_timer,
 				    LOOP_DOWN_TIME);
 				vha->device_flags |= DFLG_NO_CABLE;
-				qla2x00_mark_all_devices_lost(vha, 1);
+				qla2x00_mark_all_devices_lost(vha);
 			}
 
 			if (vha->vp_idx) {
 				atomic_set(&vha->vp_state, VP_FAILED);
 				fc_vport_set_state(vha->fc_vport,
 				    FC_VPORT_FAILED);
-				qla2x00_mark_all_devices_lost(vha, 1);
+				qla2x00_mark_all_devices_lost(vha);
 			}
 
 			vha->flags.management_server_logged_in = 0;
@@ -1253,11 +1252,33 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 
 	case MBA_DPORT_DIAGNOSTICS:
 		ql_dbg(ql_dbg_async, vha, 0x5052,
-		    "D-Port Diagnostics: %04x result=%s\n",
-		    mb[0],
-		    mb[1] == 0 ? "start" :
-		    mb[1] == 1 ? "done (pass)" :
-		    mb[1] == 2 ? "done (error)" : "other");
+		    "D-Port Diagnostics: %04x %04x %04x %04x\n",
+		    mb[0], mb[1], mb[2], mb[3]);
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+			static char *results[] = {
+			    "start", "done(pass)", "done(error)", "undefined" };
+			static char *types[] = {
+			    "none", "dynamic", "static", "other" };
+			uint result = mb[1] >> 0 & 0x3;
+			uint type = mb[1] >> 6 & 0x3;
+			uint sw = mb[1] >> 15 & 0x1;
+			ql_dbg(ql_dbg_async, vha, 0x5052,
+			    "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
+			    results[result], types[type], sw);
+			if (result == 2) {
+				static char *reasons[] = {
+				    "reserved", "unexpected reject",
+				    "unexpected phase", "retry exceeded",
+				    "timed out", "not supported",
+				    "user stopped" };
+				uint reason = mb[2] >> 0 & 0xf;
+				uint phase = mb[2] >> 12 & 0xf;
+				ql_dbg(ql_dbg_async, vha, 0x5052,
+				    "D-Port Diagnostics: reason=%s phase=%u \n",
+				    reason < 7 ? reasons[reason] : "other",
+				    phase >> 1);
+			}
+		}
 		break;
 
 	case MBA_TEMPERATURE_ALERT:
@@ -1918,6 +1939,18 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 		inbuf = (uint32_t *)&sts->nvme_ersp_data;
 		outbuf = (uint32_t *)fd->rspaddr;
 		iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+		if (unlikely(iocb->u.nvme.rsp_pyld_len >
+		    sizeof(struct nvme_fc_ersp_iu))) {
+			if (ql_mask_match(ql_dbg_io)) {
+				WARN_ONCE(1, "Unexpected response payload length %u.\n",
+				    iocb->u.nvme.rsp_pyld_len);
+				ql_log(ql_log_warn, fcport->vha, 0x5100,
+				    "Unexpected response payload length %u.\n",
+				    iocb->u.nvme.rsp_pyld_len);
+			}
+			iocb->u.nvme.rsp_pyld_len =
+			    sizeof(struct nvme_fc_ersp_iu);
+		}
 		iter = iocb->u.nvme.rsp_pyld_len >> 2;
 		for (; iter; iter--)
 			*outbuf++ = swab32(*inbuf++);
@@ -2152,12 +2185,12 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
 	 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
 	 * would make guard field appear at offset 2
 	 */
-	a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
-	a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
-	a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
-	e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
-	e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
-	e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
+	a_guard   = get_unaligned_le16(ap + 2);
+	a_app_tag = get_unaligned_le16(ap + 0);
+	a_ref_tag = get_unaligned_le32(ap + 4);
+	e_guard   = get_unaligned_le16(ep + 2);
+	e_app_tag = get_unaligned_le16(ep + 0);
+	e_ref_tag = get_unaligned_le32(ep + 4);
 
 	ql_dbg(ql_dbg_io, vha, 0x3023,
 	    "iocb(s) %p Returned STATUS.\n", sts24);
@@ -2745,7 +2778,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 				port_state_str[FCS_ONLINE],
 				comp_status);
 
-			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
 			qlt_schedule_sess_for_deletion(fcport);
 		}
 
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b7c1108..9e09964 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -6152,9 +6152,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
 	mcp->mb[7] = LSW(MSD(req_dma));
 	mcp->mb[8] = MSW(addr);
 	/* Setting RAM ID to valid */
-	mcp->mb[10] |= BIT_7;
 	/* For MCTP RAM ID is 0x40 */
-	mcp->mb[10] |= 0x40;
+	mcp->mb[10] = BIT_7 | 0x40;
 
 	mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
 	    MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index eabc512..8ae639d 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -147,7 +147,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
 		    "Marking port dead, loop_id=0x%04x : %x.\n",
 		    fcport->loop_id, fcport->vha->vp_idx);
 
-		qla2x00_mark_device_lost(vha, fcport, 0, 0);
+		qla2x00_mark_device_lost(vha, fcport, 0);
 		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
 	}
 }
@@ -167,7 +167,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
 	list_for_each_entry(fcport, &vha->vp_fcports, list)
 		fcport->logout_on_delete = 0;
 
-	qla2x00_mark_all_devices_lost(vha, 0);
+	qla2x00_mark_all_devices_lost(vha);
 
 	/* Remove port id from vp target map */
 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
@@ -327,7 +327,7 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 	 */
 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 		atomic_set(&vha->loop_state, LOOP_DOWN);
-		qla2x00_mark_all_devices_lost(vha, 0);
+		qla2x00_mark_all_devices_lost(vha);
 	} else {
 		if (!atomic_read(&vha->loop_down_timer))
 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index a3a44d4..cad1fc2 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1210,9 +1210,9 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
 				    " Existing TGT-ID %x did not get "
 				    " offline event from firmware.\n",
 				    fcport->old_tgt_id);
-				qla2x00_mark_device_lost(vha, fcport, 0, 0);
+				qla2x00_mark_device_lost(vha, fcport, 0);
 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-				kfree(new_fcport);
+				qla2x00_free_fcport(new_fcport);
 				return rval;
 			}
 			break;
@@ -1230,7 +1230,7 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
 			return QLA_MEMORY_ALLOC_FAILED;
 	}
 
-	kfree(new_fcport);
+	qla2x00_free_fcport(new_fcport);
 	return rval;
 }
 
@@ -1274,7 +1274,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha)
 
 		if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
 			if (fcport->port_type != FCT_INITIATOR)
-				qla2x00_mark_device_lost(vha, fcport, 0, 0);
+				qla2x00_mark_device_lost(vha, fcport, 0);
 		}
 	}
 
@@ -1298,7 +1298,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha)
 	/* Free all new device structures not processed. */
 	list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
 		list_del(&fcport->list);
-		kfree(fcport);
+		qla2x00_free_fcport(fcport);
 	}
 
 	return rval;
@@ -1706,7 +1706,7 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
 	if (!fcport)
 		return;
 
-	qla2x00_mark_device_lost(vha, fcport, 0, 0);
+	qla2x00_mark_device_lost(vha, fcport, 0);
 
 	return;
 }
@@ -1740,7 +1740,7 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 			} else if (evt->u.aenfx.mbx[2] == 2) {
 				vha->device_flags |= DFLG_NO_CABLE;
-				qla2x00_mark_all_devices_lost(vha, 1);
+				qla2x00_mark_all_devices_lost(vha);
 			}
 		}
 		break;
@@ -2513,7 +2513,7 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		    atomic_read(&fcport->state));
 
 		if (atomic_read(&fcport->state) == FCS_ONLINE)
-			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+			qla2x00_mark_device_lost(fcport->vha, fcport, 1);
 		break;
 
 	case CS_ABORTED:
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 2b2028f2..185c5f3 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1612,8 +1612,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
 	return (u8 *)&ha->hablob->fw->data[offset];
 }
 
-static __le32
-qla82xx_get_fw_size(struct qla_hw_data *ha)
+static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
 {
 	struct qla82xx_uri_data_desc *uri_desc = NULL;
 
@@ -1624,7 +1623,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha)
 			return cpu_to_le32(uri_desc->size);
 	}
 
-	return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
+	return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
 }
 
 static u8 *
@@ -1816,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
 	}
 
 	flashaddr = FLASH_ADDR_START;
-	size = (__force u32)qla82xx_get_fw_size(ha) / 8;
+	size = qla82xx_get_fw_size(ha) / 8;
 	ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
 
 	for (i = 0; i < size; i++) {
@@ -1883,7 +1882,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
 static int
 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
 {
-	__le32 val;
+	uint32_t val;
 	uint32_t min_size;
 	struct qla_hw_data *ha = vha->hw;
 	const struct firmware *fw = ha->hablob->fw;
@@ -1896,8 +1895,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
 
 		min_size = QLA82XX_URI_FW_MIN_SIZE;
 	} else {
-		val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
-		if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
+		val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
+		if (val != QLA82XX_BDINFO_MAGIC)
 			return -EINVAL;
 
 		min_size = QLA82XX_FW_MIN_SIZE;
@@ -3030,7 +3029,7 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
 	/* Set DEV_FAILED flag to disable timer */
 	vha->device_flags |= DFLG_DEV_FAILED;
 	qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
-	qla2x00_mark_all_devices_lost(vha, 0);
+	qla2x00_mark_all_devices_lost(vha);
 	vha->flags.online = 0;
 	vha->flags.init_done = 0;
 }
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8b84bc4..b520a98 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1110,7 +1110,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
 	u8 i;
 
-	qla2x00_mark_all_devices_lost(vha, 0);
+	qla2x00_mark_all_devices_lost(vha);
 
 	for (i = 0; i < 10; i++) {
 		if (wait_event_timeout(vha->fcport_waitQ,
@@ -1667,7 +1667,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
 	if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
 		atomic_set(&vha->loop_state, LOOP_DOWN);
 		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
-		qla2x00_mark_all_devices_lost(vha, 0);
+		qla2x00_mark_all_devices_lost(vha);
 		ret = qla2x00_full_login_lip(vha);
 		if (ret != QLA_SUCCESS) {
 			ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -3854,37 +3854,21 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
 }
 
 static inline void
-qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
-    int defer)
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
-	struct fc_rport *rport;
-	scsi_qla_host_t *base_vha;
-	unsigned long flags;
+	int now;
 
 	if (!fcport->rport)
 		return;
 
-	rport = fcport->rport;
-	if (defer) {
-		base_vha = pci_get_drvdata(vha->hw->pdev);
-		spin_lock_irqsave(vha->host->host_lock, flags);
-		fcport->drport = rport;
-		spin_unlock_irqrestore(vha->host->host_lock, flags);
-		qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
-		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
-		qla2xxx_wake_dpc(base_vha);
-	} else {
-		int now;
-
-		if (rport) {
-			ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
-			    "%s %8phN. rport %p roles %x\n",
-			    __func__, fcport->port_name, rport,
-			    rport->roles);
-			fc_remote_port_delete(rport);
-		}
-		qlt_do_generation_tick(vha, &now);
+	if (fcport->rport) {
+		ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
+		    "%s %8phN. rport %p roles %x\n",
+		    __func__, fcport->port_name, fcport->rport,
+		    fcport->rport->roles);
+		fc_remote_port_delete(fcport->rport);
 	}
+	qlt_do_generation_tick(vha, &now);
 }
 
 /*
@@ -3897,18 +3881,18 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
  * Context:
  */
 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
-    int do_login, int defer)
+    int do_login)
 {
 	if (IS_QLAFX00(vha->hw)) {
 		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
-		qla2x00_schedule_rport_del(vha, fcport, defer);
+		qla2x00_schedule_rport_del(vha, fcport);
 		return;
 	}
 
 	if (atomic_read(&fcport->state) == FCS_ONLINE &&
 	    vha->vp_idx == fcport->vha->vp_idx) {
 		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
-		qla2x00_schedule_rport_del(vha, fcport, defer);
+		qla2x00_schedule_rport_del(vha, fcport);
 	}
 	/*
 	 * We may need to retry the login, so don't change the state of the
@@ -3937,7 +3921,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
  * Context:
  */
 void
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
 {
 	fc_port_t *fcport;
 
@@ -3957,13 +3941,6 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
 		 */
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
 			continue;
-		if (atomic_read(&fcport->state) == FCS_ONLINE) {
-			qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
-			if (defer)
-				qla2x00_schedule_rport_del(vha, fcport, defer);
-			else if (vha->vp_idx == fcport->vha->vp_idx)
-				qla2x00_schedule_rport_del(vha, fcport, defer);
-		}
 	}
 }
 
@@ -4965,7 +4942,6 @@ int qla2x00_post_async_##name##_work(		\
 
 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
-qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
@@ -5032,7 +5008,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
 	fcport->jiffies_at_registration = jiffies;
 	fcport->sec_since_registration = 0;
 	fcport->next_disc_state = DSC_DELETED;
-	fcport->disc_state = DSC_UPD_FCPORT;
+	qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
 	spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
 
 	queue_work(system_unbound_wq, &fcport->reg_work);
@@ -5253,10 +5229,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
 		case QLA_EVT_ASYNC_LOGOUT:
 			rc = qla2x00_async_logout(vha, e->u.logio.fcport);
 			break;
-		case QLA_EVT_ASYNC_LOGOUT_DONE:
-			qla2x00_async_logout_done(vha, e->u.logio.fcport,
-			    e->u.logio.data);
-			break;
 		case QLA_EVT_ASYNC_ADISC:
 			qla2x00_async_adisc(vha, e->u.logio.fcport,
 			    e->u.logio.data);
@@ -6899,13 +6871,13 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
 		qpair->online = 0;
 	mutex_unlock(&ha->mq_lock);
 
-	qla2x00_mark_all_devices_lost(vha, 0);
+	qla2x00_mark_all_devices_lost(vha);
 
 	spin_lock_irqsave(&ha->vport_slock, flags);
 	list_for_each_entry(vp, &ha->vp_list, list) {
 		atomic_inc(&vp->vref_count);
 		spin_unlock_irqrestore(&ha->vport_slock, flags);
-		qla2x00_mark_all_devices_lost(vp, 0);
+		qla2x00_mark_all_devices_lost(vp);
 		spin_lock_irqsave(&ha->vport_slock, flags);
 		atomic_dec(&vp->vref_count);
 	}
@@ -7270,6 +7242,8 @@ qla2x00_module_init(void)
 	BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
 	BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
 	BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
+	BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
+	BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
 
 	/* Allocate cache for SRBs. */
 	srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bbe9035..76a38bf 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -669,8 +669,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 
 	struct qla_hw_data *ha = vha->hw;
 	uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
-	struct qla_flt_header *flt = (void *)ha->flt;
-	struct qla_flt_region *region = (void *)&flt[1];
+	struct qla_flt_header *flt = ha->flt;
+	struct qla_flt_region *region = &flt->region[0];
 	uint16_t *wptr, cnt, chksum;
 	uint32_t start;
 
@@ -2652,18 +2652,15 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
     struct qla_flt_region *region)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_flt_header *flt;
-	struct qla_flt_region *flt_reg;
+	struct qla_flt_header *flt = ha->flt;
+	struct qla_flt_region *flt_reg = &flt->region[0];
 	uint16_t cnt;
 	int rval = QLA_FUNCTION_FAILED;
 
 	if (!ha->flt)
 		return QLA_FUNCTION_FAILED;
 
-	flt = (struct qla_flt_header *)ha->flt;
-	flt_reg = (struct qla_flt_region *)&flt[1];
 	cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
-
 	for (; cnt; cnt--, flt_reg++) {
 		if (flt_reg->start == start) {
 			memcpy((uint8_t *)region, flt_reg,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 68c1414..70081b3 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -596,7 +596,8 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 		} else {
 			sp->fcport->login_retry = 0;
-			sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
+			qla2x00_set_fcport_disc_state(sp->fcport,
+			    DSC_LOGIN_COMPLETE);
 			sp->fcport->deleted = 0;
 			sp->fcport->logout_on_delete = 1;
 		}
@@ -957,7 +958,7 @@ void qlt_free_session_done(struct work_struct *work)
 	struct qlt_plogi_ack_t *own =
 		sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
 
-	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+	ql_dbg(ql_dbg_disc, vha, 0xf084,
 		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
 		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
 		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
@@ -966,7 +967,7 @@ void qlt_free_session_done(struct work_struct *work)
 		sess->send_els_logo);
 
 	if (!IS_SW_RESV_ADDR(sess->d_id)) {
-		qla2x00_mark_device_lost(vha, sess, 0, 0);
+		qla2x00_mark_device_lost(vha, sess, 0);
 
 		if (sess->send_els_logo) {
 			qlt_port_logo_t logo;
@@ -1024,7 +1025,7 @@ void qlt_free_session_done(struct work_struct *work)
 
 		while (!READ_ONCE(sess->logout_completed)) {
 			if (!traced) {
-				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+				ql_dbg(ql_dbg_disc, vha, 0xf086,
 					"%s: waiting for sess %p logout\n",
 					__func__, sess);
 				traced = true;
@@ -1045,6 +1046,10 @@ void qlt_free_session_done(struct work_struct *work)
 			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
 	}
 
+	spin_lock_irqsave(&vha->work_lock, flags);
+	sess->flags &= ~FCF_ASYNC_SENT;
+	spin_unlock_irqrestore(&vha->work_lock, flags);
+
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 	if (sess->se_sess) {
 		sess->se_sess = NULL;
@@ -1052,7 +1057,7 @@ void qlt_free_session_done(struct work_struct *work)
 			tgt->sess_count--;
 	}
 
-	sess->disc_state = DSC_DELETED;
+	qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
 	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
 	sess->deleted = QLA_SESS_DELETED;
 
@@ -1108,7 +1113,7 @@ void qlt_free_session_done(struct work_struct *work)
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 	sess->free_pending = 0;
 
-	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+	ql_dbg(ql_dbg_disc, vha, 0xf001,
 	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
 		sess, sess->port_name, vha->fcport_count);
 
@@ -1151,13 +1156,18 @@ void qlt_unreg_sess(struct fc_port *sess)
 		return;
 	}
 	sess->free_pending = 1;
+	/*
+	 * Use FCF_ASYNC_SENT flag to block other cmds used in sess
+	 * management from being sent.
+	 */
+	sess->flags |= FCF_ASYNC_SENT;
 	spin_unlock_irqrestore(&sess->vha->work_lock, flags);
 
 	if (sess->se_sess)
 		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
 
 	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-	sess->disc_state = DSC_DELETE_PEND;
+	qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
 	sess->last_rscn_gen = sess->rscn_gen;
 	sess->last_login_gen = sess->login_gen;
 
@@ -1257,7 +1267,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
 	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
 	spin_unlock_irqrestore(&sess->vha->work_lock, flags);
 
-	sess->disc_state = DSC_DELETE_PEND;
+	sess->prli_pend_timer = 0;
+	qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
 
 	qla24xx_chk_fcp_state(sess);
 
@@ -3446,13 +3457,13 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
 
 	cmd->trc_flags |= TRC_DIF_ERR;
 
-	cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
-	cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
-	cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+	cmd->a_guard   = get_unaligned_be16(ap + 0);
+	cmd->a_app_tag = get_unaligned_be16(ap + 2);
+	cmd->a_ref_tag = get_unaligned_be32(ap + 4);
 
-	cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
-	cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
-	cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+	cmd->e_guard   = get_unaligned_be16(ep + 0);
+	cmd->e_app_tag = get_unaligned_be16(ep + 2);
+	cmd->e_ref_tag = get_unaligned_be32(ep + 4);
 
 	ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
 	    "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
@@ -4579,7 +4590,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
 		/* find other sess with nport_id collision */
 		if (port_id.b24 == other_sess->d_id.b24) {
 			if (loop_id != other_sess->loop_id) {
-				ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
+				ql_dbg(ql_dbg_disc, vha, 0x1000c,
 				    "Invalidating sess %p loop_id %d wwn %llx.\n",
 				    other_sess, other_sess->loop_id, other_wwn);
 
@@ -4595,7 +4606,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
 				 * Another wwn used to have our s_id/loop_id
 				 * kill the session, but don't free the loop_id
 				 */
-				ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
+				ql_dbg(ql_dbg_disc, vha, 0xf01b,
 				    "Invalidating sess %p loop_id %d wwn %llx.\n",
 				    other_sess, other_sess->loop_id, other_wwn);
 
@@ -4610,7 +4621,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
 		/* find other sess with nport handle collision */
 		if ((loop_id == other_sess->loop_id) &&
 			(loop_id != FC_NO_LOOP_ID)) {
-			ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
+			ql_dbg(ql_dbg_disc, vha, 0x1000d,
 			       "Invalidating sess %p loop_id %d wwn %llx.\n",
 			       other_sess, other_sess->loop_id, other_wwn);
 
@@ -6053,7 +6064,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
 		if (!IS_SW_RESV_ADDR(fcport->d_id))
 		   vha->fcport_count++;
 		fcport->login_gen++;
-		fcport->disc_state = DSC_LOGIN_COMPLETE;
+		qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
 		fcport->login_succ = 1;
 		newfcport = 1;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d006f0a..6539499 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -379,8 +379,7 @@ static inline int get_datalen_for_atio(struct atio_from_isp *atio)
 {
 	int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
 
-	return (be32_to_cpu(get_unaligned((uint32_t *)
-	    &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+	return get_unaligned_be32(&atio->u.isp24.fcp_cmnd.add_cdb[len * 4]);
 }
 
 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 03bd3b7..bb03c02 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.01.00.21-k"
+#define QLA2XXX_VERSION      "10.01.00.22-k"
 
 #define QLA_DRIVER_MAJOR_VER	10
 #define QLA_DRIVER_MINOR_VER	1
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2323432..5504ab1 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4145,7 +4145,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
 				  ha->queues_dma);
 
-	 if (ha->fw_dump)
+	if (ha->fw_dump)
 		vfree(ha->fw_dump);
 
 	ha->queues_len = 0;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index df14597..eed3102 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -736,13 +736,12 @@ static ssize_t proc_scsi_devinfo_write(struct file *file,
 	return err;
 }
 
-static const struct file_operations scsi_devinfo_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= proc_scsi_devinfo_open,
-	.read		= seq_read,
-	.write		= proc_scsi_devinfo_write,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops scsi_devinfo_proc_ops = {
+	.proc_open	= proc_scsi_devinfo_open,
+	.proc_read	= seq_read,
+	.proc_write	= proc_scsi_devinfo_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 #endif /* CONFIG_SCSI_PROC_FS */
 
@@ -867,7 +866,7 @@ int __init scsi_init_devinfo(void)
 	}
 
 #ifdef CONFIG_SCSI_PROC_FS
-	p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
+	p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_ops);
 	if (!p) {
 		error = -ENOMEM;
 		goto out;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 57bcd05..8f3af87 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -189,17 +189,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
 }
 
 
-/**
- * scsi_ioctl - Dispatch ioctl to scsi device
- * @sdev: scsi device receiving ioctl
- * @cmd: which ioctl is it
- * @arg: data associated with ioctl
- *
- * Description: The scsi_ioctl() function differs from most ioctls in that it
- * does not take a major/minor number as the dev field.  Rather, it takes
- * a pointer to a &struct scsi_device.
- */
-int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg)
 {
 	char scsi_cmd[MAX_COMMAND_SIZE];
 	struct scsi_sense_hdr sense_hdr;
@@ -266,14 +256,50 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
                 return scsi_ioctl_get_pci(sdev, arg);
 	case SG_SCSI_RESET:
 		return scsi_ioctl_reset(sdev, arg);
-	default:
-		if (sdev->host->hostt->ioctl)
-			return sdev->host->hostt->ioctl(sdev, cmd, arg);
 	}
+	return -ENOIOCTLCMD;
+}
+
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field.  Rather, it takes
+ * a pointer to a &struct scsi_device.
+ */
+int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+	int ret = scsi_ioctl_common(sdev, cmd, arg);
+
+	if (ret != -ENOIOCTLCMD)
+		return ret;
+
+	if (sdev->host->hostt->ioctl)
+		return sdev->host->hostt->ioctl(sdev, cmd, arg);
+
 	return -EINVAL;
 }
 EXPORT_SYMBOL(scsi_ioctl);
 
+#ifdef CONFIG_COMPAT
+int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+	int ret = scsi_ioctl_common(sdev, cmd, arg);
+
+	if (ret != -ENOIOCTLCMD)
+		return ret;
+
+	if (sdev->host->hostt->compat_ioctl)
+		return sdev->host->hostt->compat_ioctl(sdev, cmd, arg);
+
+	return ret;
+}
+EXPORT_SYMBOL(scsi_compat_ioctl);
+#endif
+
 /*
  * We can process a reset even when a device isn't fully operable.
  */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3e7a45d..610ee41 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2108,6 +2108,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
 
 	memset(data, 0, sizeof(*data));
 	memset(&cmd[0], 0, 12);
+
+	dbd = sdev->set_dbd_for_ms ? 8 : dbd;
 	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
 	cmd[2] = modepage;
 
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
index 836185d..3df8778 100644
--- a/drivers/scsi/scsi_logging.h
+++ b/drivers/scsi/scsi_logging.h
@@ -53,7 +53,7 @@ do {								\
 } while (0)
 #else
 #define SCSI_LOG_LEVEL(SHIFT, BITS) 0
-#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
+#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do { } while (0)
 #endif /* CONFIG_SCSI_LOGGING */
 
 /*
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 5b31322..d6982d3 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -83,12 +83,12 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file)
 				4 * PAGE_SIZE);
 }
 
-static const struct file_operations proc_scsi_fops = {
-	.open = proc_scsi_host_open,
-	.release = single_release,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.write = proc_scsi_host_write
+static const struct proc_ops proc_scsi_ops = {
+	.proc_open	= proc_scsi_host_open,
+	.proc_release	= single_release,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= proc_scsi_host_write
 };
 
 /**
@@ -146,7 +146,7 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
 
 	sprintf(name,"%d", shost->host_no);
 	p = proc_create_data(name, S_IRUGO | S_IWUSR,
-		sht->proc_dir, &proc_scsi_fops, shost);
+		sht->proc_dir, &proc_scsi_ops, shost);
 	if (!p)
 		printk(KERN_ERR "%s: Failed to register host %d in"
 		       "%s\n", __func__, shost->host_no,
@@ -436,13 +436,12 @@ static int proc_scsi_open(struct inode *inode, struct file *file)
 	return seq_open(file, &scsi_seq_ops);
 }
 
-static const struct file_operations proc_scsi_operations = {
-	.owner		= THIS_MODULE,
-	.open		= proc_scsi_open,
-	.read		= seq_read,
-	.write		= proc_scsi_write,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops scsi_scsi_proc_ops = {
+	.proc_open	= proc_scsi_open,
+	.proc_read	= seq_read,
+	.proc_write	= proc_scsi_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 /**
@@ -456,7 +455,7 @@ int __init scsi_init_procfs(void)
 	if (!proc_scsi)
 		goto err1;
 
-	pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations);
+	pde = proc_create("scsi/scsi", 0, NULL, &scsi_scsi_proc_ops);
 	if (!pde)
 		goto err2;
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ed8d970..dfc726f 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2093,7 +2093,12 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
 					 "could not register session's dev\n");
 		goto release_ida;
 	}
-	transport_register_device(&session->dev);
+	err = transport_register_device(&session->dev);
+	if (err) {
+		iscsi_cls_session_printk(KERN_ERR, session,
+					 "could not register transport's dev\n");
+		goto release_dev;
+	}
 
 	spin_lock_irqsave(&sesslock, flags);
 	list_add(&session->sess_list, &sesslist);
@@ -2103,6 +2108,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
 	ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
 	return 0;
 
+release_dev:
+	device_del(&session->dev);
 release_ida:
 	if (session->ida_used)
 		ida_simple_remove(&iscsi_sess_ida, session->target_id);
@@ -2263,7 +2270,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
 					 "register connection's dev\n");
 		goto release_parent_ref;
 	}
-	transport_register_device(&conn->dev);
+	err = transport_register_device(&conn->dev);
+	if (err) {
+		iscsi_cls_session_printk(KERN_ERR, session, "could not "
+					 "register transport's dev\n");
+		goto release_conn_ref;
+	}
 
 	spin_lock_irqsave(&connlock, flags);
 	list_add(&conn->conn_list, &connlist);
@@ -2272,6 +2284,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
 	ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
 	return conn;
 
+release_conn_ref:
+	put_device(&conn->dev);
 release_parent_ref:
 	put_device(&session->dev);
 free_conn:
@@ -2947,6 +2961,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
 	return err;
 }
 
+static int iscsi_session_has_conns(int sid)
+{
+	struct iscsi_cls_conn *conn;
+	unsigned long flags;
+	int found = 0;
+
+	spin_lock_irqsave(&connlock, flags);
+	list_for_each_entry(conn, &connlist, conn_list) {
+		if (iscsi_conn_get_sid(conn) == sid) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&connlock, flags);
+
+	return found;
+}
+
 static int
 iscsi_set_iface_params(struct iscsi_transport *transport,
 		       struct iscsi_uevent *ev, uint32_t len)
@@ -3524,10 +3556,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 		break;
 	case ISCSI_UEVENT_DESTROY_SESSION:
 		session = iscsi_session_lookup(ev->u.d_session.sid);
-		if (session)
-			transport->destroy_session(session);
-		else
+		if (!session)
 			err = -EINVAL;
+		else if (iscsi_session_has_conns(ev->u.d_session.sid))
+			err = -EBUSY;
+		else
+			transport->destroy_session(session);
 		break;
 	case ISCSI_UEVENT_UNBIND_SESSION:
 		session = iscsi_session_lookup(ev->u.d_session.sid);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 902b649..8ca9299 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1465,13 +1465,12 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  *	Note: most ioctls are forward onto the block subsystem or further
  *	down in the scsi subsystem.
  **/
-static int sd_ioctl(struct block_device *bdev, fmode_t mode,
-		    unsigned int cmd, unsigned long arg)
+static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
+			   unsigned int cmd, void __user *p)
 {
 	struct gendisk *disk = bdev->bd_disk;
 	struct scsi_disk *sdkp = scsi_disk(disk);
 	struct scsi_device *sdp = sdkp->device;
-	void __user *p = (void __user *)arg;
 	int error;
     
 	SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
@@ -1507,9 +1506,6 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
 			break;
 		default:
 			error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
-			if (error != -ENOTTY)
-				break;
-			error = scsi_ioctl(sdp, cmd, p);
 			break;
 	}
 out:
@@ -1691,39 +1687,31 @@ static void sd_rescan(struct device *dev)
 	revalidate_disk(sdkp->disk);
 }
 
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+		    unsigned int cmd, unsigned long arg)
+{
+	void __user *p = (void __user *)arg;
+	int ret;
+
+	ret = sd_ioctl_common(bdev, mode, cmd, p);
+	if (ret != -ENOTTY)
+		return ret;
+
+	return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
+}
 
 #ifdef CONFIG_COMPAT
-/* 
- * This gets directly called from VFS. When the ioctl 
- * is not recognized we go back to the other translation paths. 
- */
 static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
 			   unsigned int cmd, unsigned long arg)
 {
-	struct gendisk *disk = bdev->bd_disk;
-	struct scsi_disk *sdkp = scsi_disk(disk);
-	struct scsi_device *sdev = sdkp->device;
 	void __user *p = compat_ptr(arg);
-	int error;
+	int ret;
 
-	error = scsi_verify_blk_ioctl(bdev, cmd);
-	if (error < 0)
-		return error;
+	ret = sd_ioctl_common(bdev, mode, cmd, p);
+	if (ret != -ENOTTY)
+		return ret;
 
-	error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
-			(mode & FMODE_NDELAY) != 0);
-	if (error)
-		return error;
-
-	if (is_sed_ioctl(cmd))
-		return sed_ioctl(sdkp->opal_dev, cmd, p);
-	       
-	/* 
-	 * Let the static ioctl translation table take care of it.
-	 */
-	if (!sdev->host->hostt->compat_ioctl)
-		return -ENOIOCTLCMD; 
-	return sdev->host->hostt->compat_ioctl(sdev, cmd, p);
+	return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
 }
 #endif
 
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index e0bd4cf..e4282bc 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -325,22 +325,21 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
 }
 
 /**
- * sd_zbc_check_zones - Check the device capacity and zone sizes
+ * sd_zbc_check_capacity - Check the device capacity
  * @sdkp: Target disk
+ * @buf: command buffer
+ * @zblock: zone size in number of blocks
  *
- * Check that the device capacity as reported by READ CAPACITY matches the
- * max_lba value (plus one)of the report zones command reply. Also check that
- * all zones of the device have an equal size, only allowing the last zone of
- * the disk to have a smaller size (runt zone). The zone size must also be a
- * power of two.
+ * Get the device zone size and check that the device capacity as reported
+ * by READ CAPACITY matches the max_lba value (plus one) of the report zones
+ * command reply for devices with RC_BASIS == 0.
  *
- * Returns the zone size in number of blocks upon success or an error code
- * upon failure.
+ * Returns 0 upon success or an error code upon failure.
  */
-static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
-			      u32 *zblocks)
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
+				 u32 *zblocks)
 {
-	u64 zone_blocks = 0;
+	u64 zone_blocks;
 	sector_t max_lba;
 	unsigned char *rec;
 	int ret;
@@ -363,17 +362,9 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
 		}
 	}
 
-	/* Parse REPORT ZONES header */
+	/* Get the size of the first reported zone */
 	rec = buf + 64;
 	zone_blocks = get_unaligned_be64(&rec[8]);
-	if (!zone_blocks || !is_power_of_2(zone_blocks)) {
-		if (sdkp->first_scan)
-			sd_printk(KERN_NOTICE, sdkp,
-				  "Devices with non power of 2 zone "
-				  "size are not supported\n");
-		return -ENODEV;
-	}
-
 	if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
 		if (sdkp->first_scan)
 			sd_printk(KERN_NOTICE, sdkp,
@@ -405,11 +396,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
 	if (ret)
 		goto err;
 
-	/*
-	 * Check zone size: only devices with a constant zone size (except
-	 * an eventual last runt zone) that is a power of 2 are supported.
-	 */
-	ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
+	/* Check the device capacity reported by report zones */
+	ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks);
 	if (ret != 0)
 		goto err;
 
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 160748a..4e6af59 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -405,6 +405,38 @@ sg_release(struct inode *inode, struct file *filp)
 	return 0;
 }
 
+static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count)
+{
+	struct sg_header __user *old_hdr = buf;
+	int reply_len;
+
+	if (count >= SZ_SG_HEADER) {
+		/* negative reply_len means v3 format, otherwise v1/v2 */
+		if (get_user(reply_len, &old_hdr->reply_len))
+			return -EFAULT;
+
+		if (reply_len >= 0)
+			return get_user(*pack_id, &old_hdr->pack_id);
+
+		if (in_compat_syscall() &&
+		    count >= sizeof(struct compat_sg_io_hdr)) {
+			struct compat_sg_io_hdr __user *hp = buf;
+
+			return get_user(*pack_id, &hp->pack_id);
+		}
+
+		if (count >= sizeof(struct sg_io_hdr)) {
+			struct sg_io_hdr __user *hp = buf;
+
+			return get_user(*pack_id, &hp->pack_id);
+		}
+	}
+
+	/* no valid header was passed, so ignore the pack_id */
+	*pack_id = -1;
+	return 0;
+}
+
 static ssize_t
 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
 {
@@ -413,8 +445,8 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
 	Sg_request *srp;
 	int req_pack_id = -1;
 	sg_io_hdr_t *hp;
-	struct sg_header *old_hdr = NULL;
-	int retval = 0;
+	struct sg_header *old_hdr;
+	int retval;
 
 	/*
 	 * This could cause a response to be stranded. Close the associated
@@ -429,79 +461,34 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
 				      "sg_read: count=%d\n", (int) count));
 
-	if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
-		old_hdr = memdup_user(buf, SZ_SG_HEADER);
-		if (IS_ERR(old_hdr))
-			return PTR_ERR(old_hdr);
-		if (old_hdr->reply_len < 0) {
-			if (count >= SZ_SG_IO_HDR) {
-				/*
-				 * This is stupid.
-				 *
-				 * We're copying the whole sg_io_hdr_t from user
-				 * space just to get the 'pack_id' field. But the
-				 * field is at different offsets for the compat
-				 * case, so we'll use "get_sg_io_hdr()" to copy
-				 * the whole thing and convert it.
-				 *
-				 * We could do something like just calculating the
-				 * offset based of 'in_compat_syscall()', but the
-				 * 'compat_sg_io_hdr' definition is in the wrong
-				 * place for that.
-				 */
-				sg_io_hdr_t *new_hdr;
-				new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
-				if (!new_hdr) {
-					retval = -ENOMEM;
-					goto free_old_hdr;
-				}
-				retval = get_sg_io_hdr(new_hdr, buf);
-				req_pack_id = new_hdr->pack_id;
-				kfree(new_hdr);
-				if (retval) {
-					retval = -EFAULT;
-					goto free_old_hdr;
-				}
-			}
-		} else
-			req_pack_id = old_hdr->pack_id;
-	}
+	if (sfp->force_packid)
+		retval = get_sg_io_pack_id(&req_pack_id, buf, count);
+	if (retval)
+		return retval;
+
 	srp = sg_get_rq_mark(sfp, req_pack_id);
 	if (!srp) {		/* now wait on packet to arrive */
-		if (atomic_read(&sdp->detaching)) {
-			retval = -ENODEV;
-			goto free_old_hdr;
-		}
-		if (filp->f_flags & O_NONBLOCK) {
-			retval = -EAGAIN;
-			goto free_old_hdr;
-		}
+		if (atomic_read(&sdp->detaching))
+			return -ENODEV;
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
 		retval = wait_event_interruptible(sfp->read_wait,
 			(atomic_read(&sdp->detaching) ||
 			(srp = sg_get_rq_mark(sfp, req_pack_id))));
-		if (atomic_read(&sdp->detaching)) {
-			retval = -ENODEV;
-			goto free_old_hdr;
-		}
-		if (retval) {
+		if (atomic_read(&sdp->detaching))
+			return -ENODEV;
+		if (retval)
 			/* -ERESTARTSYS as signal hit process */
-			goto free_old_hdr;
-		}
+			return retval;
 	}
-	if (srp->header.interface_id != '\0') {
-		retval = sg_new_read(sfp, buf, count, srp);
-		goto free_old_hdr;
-	}
+	if (srp->header.interface_id != '\0')
+		return sg_new_read(sfp, buf, count, srp);
 
 	hp = &srp->header;
-	if (old_hdr == NULL) {
-		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
-		if (! old_hdr) {
-			retval = -ENOMEM;
-			goto free_old_hdr;
-		}
-	}
-	memset(old_hdr, 0, SZ_SG_HEADER);
+	old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL);
+	if (!old_hdr)
+		return -ENOMEM;
+
 	old_hdr->reply_len = (int) hp->timeout;
 	old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
 	old_hdr->pack_id = hp->pack_id;
@@ -575,7 +562,12 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
 	int err = 0, err2;
 	int len;
 
-	if (count < SZ_SG_IO_HDR) {
+	if (in_compat_syscall()) {
+		if (count < sizeof(struct compat_sg_io_hdr)) {
+			err = -EINVAL;
+			goto err_out;
+		}
+	} else if (count < SZ_SG_IO_HDR) {
 		err = -EINVAL;
 		goto err_out;
 	}
@@ -919,19 +911,14 @@ static int put_compat_request_table(struct compat_sg_req_info __user *o,
 #endif
 
 static long
-sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
+		unsigned int cmd_in, void __user *p)
 {
-	void __user *p = (void __user *)arg;
 	int __user *ip = p;
 	int result, val, read_only;
-	Sg_device *sdp;
-	Sg_fd *sfp;
 	Sg_request *srp;
 	unsigned long iflags;
 
-	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
-		return -ENXIO;
-
 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
 				   "sg_ioctl: cmd=0x%x\n", (int) cmd_in));
 	read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
@@ -1154,29 +1141,44 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 			cmd_in, filp->f_flags & O_NDELAY);
 	if (result)
 		return result;
+
+	return -ENOIOCTLCMD;
+}
+
+static long
+sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+{
+	void __user *p = (void __user *)arg;
+	Sg_device *sdp;
+	Sg_fd *sfp;
+	int ret;
+
+	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+		return -ENXIO;
+
+	ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
+	if (ret != -ENOIOCTLCMD)
+		return ret;
+
 	return scsi_ioctl(sdp->device, cmd_in, p);
 }
 
 #ifdef CONFIG_COMPAT
 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 {
+	void __user *p = compat_ptr(arg);
 	Sg_device *sdp;
 	Sg_fd *sfp;
-	struct scsi_device *sdev;
+	int ret;
 
 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
 		return -ENXIO;
 
-	sdev = sdp->device;
-	if (sdev->host->hostt->compat_ioctl) { 
-		int ret;
-
-		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
-
+	ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
+	if (ret != -ENOIOCTLCMD)
 		return ret;
-	}
-	
-	return -ENOIOCTLCMD;
+
+	return scsi_compat_ioctl(sdp->device, cmd_in, p);
 }
 #endif
 
@@ -2320,25 +2322,23 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v);
 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
 			          size_t count, loff_t *off);
-static const struct file_operations adio_fops = {
-	.owner = THIS_MODULE,
-	.open = sg_proc_single_open_adio,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.write = sg_proc_write_adio,
-	.release = single_release,
+static const struct proc_ops adio_proc_ops = {
+	.proc_open	= sg_proc_single_open_adio,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= sg_proc_write_adio,
+	.proc_release	= single_release,
 };
 
 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
 static ssize_t sg_proc_write_dressz(struct file *filp, 
 		const char __user *buffer, size_t count, loff_t *off);
-static const struct file_operations dressz_fops = {
-	.owner = THIS_MODULE,
-	.open = sg_proc_single_open_dressz,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.write = sg_proc_write_dressz,
-	.release = single_release,
+static const struct proc_ops dressz_proc_ops = {
+	.proc_open	= sg_proc_single_open_dressz,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= sg_proc_write_dressz,
+	.proc_release	= single_release,
 };
 
 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
@@ -2379,9 +2379,9 @@ sg_proc_init(void)
 	if (!p)
 		return 1;
 
-	proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_fops);
+	proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_proc_ops);
 	proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops);
-	proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_fops);
+	proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_proc_ops);
 	proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr);
 	proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops);
 	proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 4664fdf..0fbb8fe 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -38,6 +38,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/bio.h>
+#include <linux/compat.h>
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/cdrom.h>
@@ -598,6 +599,51 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 	return ret;
 }
 
+#ifdef CONFIG_COMPAT
+static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+			  unsigned long arg)
+{
+	struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+	struct scsi_device *sdev = cd->device;
+	void __user *argp = compat_ptr(arg);
+	int ret;
+
+	mutex_lock(&sr_mutex);
+
+	ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
+			(mode & FMODE_NDELAY) != 0);
+	if (ret)
+		goto out;
+
+	scsi_autopm_get_device(sdev);
+
+	/*
+	 * Send SCSI addressing ioctls directly to mid level, send other
+	 * ioctls to cdrom/block level.
+	 */
+	switch (cmd) {
+	case SCSI_IOCTL_GET_IDLUN:
+	case SCSI_IOCTL_GET_BUS_NUMBER:
+		ret = scsi_compat_ioctl(sdev, cmd, argp);
+		goto put;
+	}
+
+	ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp);
+	if (ret != -ENOSYS)
+		goto put;
+
+	ret = scsi_compat_ioctl(sdev, cmd, argp);
+
+put:
+	scsi_autopm_put_device(sdev);
+
+out:
+	mutex_unlock(&sr_mutex);
+	return ret;
+
+}
+#endif
+
 static unsigned int sr_block_check_events(struct gendisk *disk,
 					  unsigned int clearing)
 {
@@ -641,12 +687,11 @@ static const struct block_device_operations sr_bdops =
 	.open		= sr_block_open,
 	.release	= sr_block_release,
 	.ioctl		= sr_block_ioctl,
+#ifdef CONFIG_COMPAT
+	.ioctl		= sr_block_compat_ioctl,
+#endif
 	.check_events	= sr_block_check_events,
 	.revalidate_disk = sr_block_revalidate_disk,
-	/* 
-	 * No compat_ioctl for now because sr_block_ioctl never
-	 * seems to pass arbitrary ioctls down to host drivers.
-	 */
 };
 
 static int sr_open(struct cdrom_device_info *cdi, int purpose)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9e3fff2..393f301 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3501,7 +3501,7 @@ static int partition_tape(struct scsi_tape *STp, int size)
 
 
 /* The ioctl command */
-static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p)
 {
 	int i, cmd_nr, cmd_type, bt;
 	int retval = 0;
@@ -3509,7 +3509,6 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
 	struct scsi_tape *STp = file->private_data;
 	struct st_modedef *STm;
 	struct st_partstat *STps;
-	void __user *p = (void __user *)arg;
 
 	if (mutex_lock_interruptible(&STp->lock))
 		return -ERESTARTSYS;
@@ -3824,9 +3823,19 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
 	}
 	mutex_unlock(&STp->lock);
 	switch (cmd_in) {
+		case SCSI_IOCTL_STOP_UNIT:
+			/* unload */
+			retval = scsi_ioctl(STp->device, cmd_in, p);
+			if (!retval) {
+				STp->rew_at_close = 0;
+				STp->ready = ST_NO_TAPE;
+			}
+			return retval;
+
 		case SCSI_IOCTL_GET_IDLUN:
 		case SCSI_IOCTL_GET_BUS_NUMBER:
 			break;
+
 		default:
 			if ((cmd_in == SG_IO ||
 			     cmd_in == SCSI_IOCTL_SEND_COMMAND ||
@@ -3840,42 +3849,46 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
 				return i;
 			break;
 	}
-	retval = scsi_ioctl(STp->device, cmd_in, p);
-	if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */
-		STp->rew_at_close = 0;
-		STp->ready = ST_NO_TAPE;
-	}
-	return retval;
+	return -ENOTTY;
 
  out:
 	mutex_unlock(&STp->lock);
 	return retval;
 }
 
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+{
+	void __user *p = (void __user *)arg;
+	struct scsi_tape *STp = file->private_data;
+	int ret;
+
+	ret = st_ioctl_common(file, cmd_in, p);
+	if (ret != -ENOTTY)
+		return ret;
+
+	return scsi_ioctl(STp->device, cmd_in, p);
+}
+
 #ifdef CONFIG_COMPAT
 static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
 {
 	void __user *p = compat_ptr(arg);
 	struct scsi_tape *STp = file->private_data;
-	struct scsi_device *sdev = STp->device;
-	int ret = -ENOIOCTLCMD;
+	int ret;
 
 	/* argument conversion is handled using put_user_mtpos/put_user_mtget */
 	switch (cmd_in) {
-	case MTIOCTOP:
-		return st_ioctl(file, MTIOCTOP, (unsigned long)p);
 	case MTIOCPOS32:
-		return st_ioctl(file, MTIOCPOS, (unsigned long)p);
+		return st_ioctl_common(file, MTIOCPOS, p);
 	case MTIOCGET32:
-		return st_ioctl(file, MTIOCGET, (unsigned long)p);
+		return st_ioctl_common(file, MTIOCGET, p);
 	}
 
-	if (sdev->host->hostt->compat_ioctl) { 
+	ret = st_ioctl_common(file, cmd_in, p);
+	if (ret != -ENOTTY)
+		return ret;
 
-		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
-
-	}
-	return ret;
+	return scsi_compat_ioctl(STp->device, cmd_in, p);
 }
 #endif
 
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index 9dc17f1..d37e2a6 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -227,7 +227,7 @@ static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram)
 /*
  *  24C16 EEPROM reading.
  *
- *  GPOI0 - data in/data out
+ *  GPIO0 - data in/data out
  *  GPIO1 - clock
  *  Symbios NVRAM wiring now also used by Tekram.
  */
@@ -524,7 +524,7 @@ static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
 /*
  *  93C46 EEPROM reading.
  *
- *  GPOI0 - data in
+ *  GPIO0 - data in
  *  GPIO1 - data out
  *  GPIO2 - clock
  *  GPIO4 - chip select
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 6feeb0f..56a6a1e 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -19,6 +19,85 @@
 
 #define CDNS_UFS_REG_HCLKDIV	0xFC
 #define CDNS_UFS_REG_PHY_XCFGD1	0x113C
+#define CDNS_UFS_MAX_L4_ATTRS 12
+
+struct cdns_ufs_host {
+	/**
+	 * cdns_ufs_dme_attr_val - for storing L4 attributes
+	 */
+	u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
+};
+
+/**
+ * cdns_ufs_get_l4_attr - get L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
+{
+	struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+		       &host->cdns_ufs_dme_attr_val[0]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
+		       &host->cdns_ufs_dme_attr_val[1]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+		       &host->cdns_ufs_dme_attr_val[2]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
+		       &host->cdns_ufs_dme_attr_val[3]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+		       &host->cdns_ufs_dme_attr_val[4]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+		       &host->cdns_ufs_dme_attr_val[5]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+		       &host->cdns_ufs_dme_attr_val[6]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+		       &host->cdns_ufs_dme_attr_val[7]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+		       &host->cdns_ufs_dme_attr_val[8]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+		       &host->cdns_ufs_dme_attr_val[9]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
+		       &host->cdns_ufs_dme_attr_val[10]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+		       &host->cdns_ufs_dme_attr_val[11]);
+}
+
+/**
+ * cdns_ufs_set_l4_attr - set L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
+{
+	struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+		       host->cdns_ufs_dme_attr_val[0]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
+		       host->cdns_ufs_dme_attr_val[1]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+		       host->cdns_ufs_dme_attr_val[2]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
+		       host->cdns_ufs_dme_attr_val[3]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+		       host->cdns_ufs_dme_attr_val[4]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+		       host->cdns_ufs_dme_attr_val[5]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+		       host->cdns_ufs_dme_attr_val[6]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+		       host->cdns_ufs_dme_attr_val[7]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+		       host->cdns_ufs_dme_attr_val[8]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+		       host->cdns_ufs_dme_attr_val[9]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
+		       host->cdns_ufs_dme_attr_val[10]);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+		       host->cdns_ufs_dme_attr_val[11]);
+}
 
 /**
  * Sets HCLKDIV register value based on the core_clk
@@ -78,6 +157,22 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
 }
 
 /**
+ * Called around hibern8 enter/exit.
+ * @hba: host controller instance
+ * @cmd: UIC Command
+ * @status: notify stage (pre, post change)
+ *
+ */
+static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
+				    enum ufs_notify_change_status status)
+{
+	if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
+		cdns_ufs_get_l4_attr(hba);
+	if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
+		cdns_ufs_set_l4_attr(hba);
+}
+
+/**
  * Called before and after Link startup is carried out.
  * @hba: host controller instance
  * @status: notify stage (pre, post change)
@@ -117,6 +212,14 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
 static int cdns_ufs_init(struct ufs_hba *hba)
 {
 	int status = 0;
+	struct cdns_ufs_host *host;
+	struct device *dev = hba->dev;
+
+	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+
+	if (!host)
+		return -ENOMEM;
+	ufshcd_set_variant(hba, host);
 
 	if (hba->vops && hba->vops->phy_initialization)
 		status = hba->vops->phy_initialization(hba);
@@ -144,8 +247,10 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
 
 static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
 	.name = "cdns-ufs-pltfm",
+	.init = cdns_ufs_init,
 	.hce_enable_notify = cdns_ufs_hce_enable_notify,
 	.link_startup_notify = cdns_ufs_link_startup_notify,
+	.hibern8_notify = cdns_ufs_hibern8_notify,
 };
 
 static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
@@ -154,6 +259,7 @@ static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
 	.hce_enable_notify = cdns_ufs_hce_enable_notify,
 	.link_startup_notify = cdns_ufs_link_startup_notify,
 	.phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+	.hibern8_notify = cdns_ufs_hibern8_notify,
 };
 
 static const struct of_device_id cdns_ufs_of_match[] = {
@@ -219,6 +325,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
 static struct platform_driver cdns_ufs_pltfrm_driver = {
 	.probe	= cdns_ufs_pltfrm_probe,
 	.remove	= cdns_ufs_pltfrm_remove,
+	.shutdown = ufshcd_pltfrm_shutdown,
 	.driver	= {
 		.name   = "cdns-ufshcd",
 		.pm     = &cdns_ufs_dev_pm_ops,
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 83e28ed..53eae5fe 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -6,16 +6,30 @@
  *	Peter Wang <peter.wang@mediatek.com>
  */
 
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
 
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
+#include "ufs_quirks.h"
 #include "unipro.h"
 #include "ufs-mediatek.h"
 
+#define ufs_mtk_smc(cmd, val, res) \
+	arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
+		      cmd, val, 0, 0, 0, 0, 0, &(res))
+
+#define ufs_mtk_ref_clk_notify(on, res) \
+	ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+	ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+
 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
 {
 	u32 tmp;
@@ -81,6 +95,49 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
 	return err;
 }
 
+static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
+{
+	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+	struct arm_smccc_res res;
+	unsigned long timeout;
+	u32 value;
+
+	if (host->ref_clk_enabled == on)
+		return 0;
+
+	if (on) {
+		ufs_mtk_ref_clk_notify(on, res);
+		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
+	} else {
+		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
+	}
+
+	/* Wait for ack */
+	timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS);
+	do {
+		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
+
+		/* Wait until ack bit equals to req bit */
+		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
+			goto out;
+
+		usleep_range(100, 200);
+	} while (time_before(jiffies, timeout));
+
+	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
+
+	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
+
+	return -ETIMEDOUT;
+
+out:
+	host->ref_clk_enabled = on;
+	if (!on)
+		ufs_mtk_ref_clk_notify(on, res);
+
+	return 0;
+}
+
 /**
  * ufs_mtk_setup_clocks - enables/disable clocks
  * @hba: host controller instance
@@ -105,12 +162,16 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
 
 	switch (status) {
 	case PRE_CHANGE:
-		if (!on)
+		if (!on) {
+			ufs_mtk_setup_ref_clk(hba, on);
 			ret = phy_power_off(host->mphy);
+		}
 		break;
 	case POST_CHANGE:
-		if (on)
+		if (on) {
 			ret = phy_power_on(host->mphy);
+			ufs_mtk_setup_ref_clk(hba, on);
+		}
 		break;
 	}
 
@@ -150,6 +211,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
 	/* Enable runtime autosuspend */
 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
 
+	/* Enable clock-gating */
+	hba->caps |= UFSHCD_CAP_CLK_GATING;
+
 	/*
 	 * ufshcd_vops_init() is invoked after
 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -238,6 +302,23 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
 	return ret;
 }
 
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+	unsigned long flags;
+	u32 ah_ms;
+
+	if (ufshcd_is_clkgating_allowed(hba)) {
+		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
+			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+					  hba->ahit);
+		else
+			ah_ms = 10;
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->clk_gating.delay_ms = ah_ms + 5;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	}
+}
+
 static int ufs_mtk_post_link(struct ufs_hba *hba)
 {
 	/* disable device LCC */
@@ -246,6 +327,15 @@ static int ufs_mtk_post_link(struct ufs_hba *hba)
 	/* enable unipro clock gating feature */
 	ufs_mtk_cfg_unipro_cg(hba, true);
 
+	/* configure auto-hibern8 timer to 10ms */
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		ufshcd_auto_hibern8_update(hba,
+			FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
+	}
+
+	ufs_mtk_setup_clk_gating(hba);
+
 	return 0;
 }
 
@@ -269,12 +359,86 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
 	return ret;
 }
 
+static void ufs_mtk_device_reset(struct ufs_hba *hba)
+{
+	struct arm_smccc_res res;
+
+	ufs_mtk_device_reset_ctrl(0, res);
+
+	/*
+	 * The reset signal is active low. UFS devices shall detect
+	 * more than or equal to 1us of positive or negative RST_n
+	 * pulse width.
+	 *
+	 * To be on safe side, keep the reset low for at least 10us.
+	 */
+	usleep_range(10, 15);
+
+	ufs_mtk_device_reset_ctrl(1, res);
+
+	/* Some devices may need time to respond to rst_n */
+	usleep_range(10000, 15000);
+
+	dev_info(hba->dev, "device reset done\n");
+}
+
+static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
+{
+	int err;
+
+	err = ufshcd_hba_enable(hba);
+	if (err)
+		return err;
+
+	err = ufshcd_dme_set(hba,
+			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+			     0);
+	if (err)
+		return err;
+
+	err = ufshcd_uic_hibern8_exit(hba);
+	if (!err)
+		ufshcd_set_link_active(hba);
+	else
+		return err;
+
+	err = ufshcd_make_hba_operational(hba);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
+{
+	int err;
+
+	err = ufshcd_dme_set(hba,
+			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+			     1);
+	if (err) {
+		/* Resume UniPro state for following error recovery */
+		ufshcd_dme_set(hba,
+			       UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+			       0);
+		return err;
+	}
+
+	return 0;
+}
+
 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
+	int err;
 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
 
-	if (ufshcd_is_link_hibern8(hba))
+	if (ufshcd_is_link_hibern8(hba)) {
+		err = ufs_mtk_link_set_lpm(hba);
+		if (err)
+			return -EAGAIN;
 		phy_power_off(host->mphy);
+		ufs_mtk_setup_ref_clk(hba, false);
+	}
 
 	return 0;
 }
@@ -282,9 +446,40 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+	int err;
 
-	if (ufshcd_is_link_hibern8(hba))
+	if (ufshcd_is_link_hibern8(hba)) {
+		ufs_mtk_setup_ref_clk(hba, true);
 		phy_power_on(host->mphy);
+		err = ufs_mtk_link_set_hpm(hba);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
+{
+	ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
+
+	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
+
+	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
+			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
+			 "MPHY Ctrl ");
+
+	/* Direct debugging information to REG_MTK_PROBE */
+	ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
+}
+
+static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
+{
+	struct ufs_dev_info *dev_info = &hba->dev_info;
+
+	if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG)
+		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
 
 	return 0;
 }
@@ -301,8 +496,11 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
 	.setup_clocks        = ufs_mtk_setup_clocks,
 	.link_startup_notify = ufs_mtk_link_startup_notify,
 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
+	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
 	.suspend             = ufs_mtk_suspend,
 	.resume              = ufs_mtk_resume,
+	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
+	.device_reset        = ufs_mtk_device_reset,
 };
 
 /**
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 19f8c42..fccdd97 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -6,6 +6,30 @@
 #ifndef _UFS_MEDIATEK_H
 #define _UFS_MEDIATEK_H
 
+#include <linux/bitops.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+/*
+ * Vendor specific UFSHCI Registers
+ */
+#define REG_UFS_REFCLK_CTRL         0x144
+#define REG_UFS_EXTREG              0x2100
+#define REG_UFS_MPHYCTRL            0x2200
+#define REG_UFS_REJECT_MON          0x22AC
+#define REG_UFS_DEBUG_SEL           0x22C0
+#define REG_UFS_PROBE               0x22C8
+
+/*
+ * Ref-clk control
+ *
+ * Values for register REG_UFS_REFCLK_CTRL
+ */
+#define REFCLK_RELEASE              0x0
+#define REFCLK_REQUEST              BIT(0)
+#define REFCLK_ACK                  BIT(1)
+
+#define REFCLK_REQ_TIMEOUT_MS       3
+
 /*
  * Vendor specific pre-defined parameters
  */
@@ -30,6 +54,13 @@
 #define VS_UNIPROPOWERDOWNCONTROL   0xD0A8
 
 /*
+ * SiP commands
+ */
+#define MTK_SIP_UFS_CONTROL               MTK_SIP_SMC_CMD(0x276)
+#define UFS_MTK_SIP_DEVICE_RESET          BIT(1)
+#define UFS_MTK_SIP_REF_CLK_NOTIFICATION  BIT(3)
+
+/*
  * VS_DEBUGCLOCKENABLE
  */
 enum {
@@ -48,6 +79,7 @@ enum {
 struct ufs_mtk_host {
 	struct ufs_hba *hba;
 	struct phy *mphy;
+	bool ref_clk_enabled;
 };
 
 #endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index ad2abc9..dbdf8b0 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -118,26 +118,6 @@ static ssize_t spm_target_link_state_show(struct device *dev,
 				ufs_pm_lvl_states[hba->spm_lvl].link_state));
 }
 
-static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
-{
-	unsigned long flags;
-
-	if (!ufshcd_is_auto_hibern8_supported(hba))
-		return;
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->ahit != ahit)
-		hba->ahit = ahit;
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	if (!pm_runtime_suspended(hba->dev)) {
-		pm_runtime_get_sync(hba->dev);
-		ufshcd_hold(hba, false);
-		ufshcd_auto_hibern8_enable(hba);
-		ufshcd_release(hba);
-		pm_runtime_put(hba->dev);
-	}
-}
-
 /* Convert Auto-Hibernate Idle Timer register value to microseconds */
 static int ufshcd_ahit_to_us(u32 ahit)
 {
@@ -733,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev,			\
 	struct scsi_device *sdev = to_scsi_device(dev);			\
 	struct ufs_hba *hba = shost_priv(sdev->host);			\
 	u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);			\
-	if (!ufs_is_valid_unit_desc_lun(lun))				\
+	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))		\
 		return -EINVAL;						\
 	return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname,	\
 		lun, _duname##_DESC_PARAM##_puname, buf, _size);	\
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h
index e5621e5..0f4e750 100644
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ b/drivers/scsi/ufs/ufs-sysfs.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (C) 2018 Western Digital Corporation
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Western Digital Corporation
  */
 
 #ifndef __UFS_SYSFS_H__
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 3327981..cfe3803 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -63,7 +63,6 @@
 #define UFS_UPIU_MAX_UNIT_NUM_ID	0x7F
 #define UFS_MAX_LUNS		(SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
 #define UFS_UPIU_WLUN_ID	(1 << 7)
-#define UFS_UPIU_MAX_GENERAL_LUN	8
 
 /* Well known logical unit id in LUN field of UPIU */
 enum {
@@ -499,9 +498,9 @@ struct ufs_query_res {
 #define UFS_VREG_VCC_MAX_UV	   3600000 /* uV */
 #define UFS_VREG_VCC_1P8_MIN_UV    1700000 /* uV */
 #define UFS_VREG_VCC_1P8_MAX_UV    1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV	   1100000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV	   1300000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV	   1650000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV	   1140000 /* uV */
+#define UFS_VREG_VCCQ_MAX_UV	   1260000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV	   1700000 /* uV */
 #define UFS_VREG_VCCQ2_MAX_UV	   1950000 /* uV */
 
 /*
@@ -530,28 +529,28 @@ struct ufs_dev_info {
 	bool f_power_on_wp_en;
 	/* Keeps information if any of the LU is power on write protected */
 	bool is_lu_power_on_wp;
-};
-
-#define MAX_MODEL_LEN 16
-/**
- * ufs_dev_desc - ufs device details from the device descriptor
- *
- * @wmanufacturerid: card details
- * @model: card model
- */
-struct ufs_dev_desc {
+	/* Maximum number of general LU supported by the UFS device */
+	u8 max_lu_supported;
 	u16 wmanufacturerid;
+	/*UFS device Product Name */
 	u8 *model;
 };
 
 /**
  * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
+ * @dev_info: pointer of instance of struct ufs_dev_info
  * @lun: LU number to check
  * @return: true if the lun has a matching unit descriptor, false otherwise
  */
-static inline bool ufs_is_valid_unit_desc_lun(u8 lun)
+static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
+		u8 lun)
 {
-	return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN);
+	if (!dev_info || !dev_info->max_lu_supported) {
+		pr_err("Max General LU supported by UFS isn't initialized\n");
+		return false;
+	}
+
+	return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
 }
 
 #endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index fe6cad9..d0ab147 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -22,16 +22,17 @@
  * @quirk: device quirk
  */
 struct ufs_dev_fix {
-	struct ufs_dev_desc card;
+	u16 wmanufacturerid;
+	u8 *model;
 	unsigned int quirk;
 };
 
-#define END_FIX { { 0 }, 0 }
+#define END_FIX { }
 
 /* add specific device quirk */
 #define UFS_FIX(_vendor, _model, _quirk) { \
-	.card.wmanufacturerid = (_vendor),\
-	.card.model = (_model),		   \
+	.wmanufacturerid = (_vendor),\
+	.model = (_model),		   \
 	.quirk = (_quirk),		   \
 }
 
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b5966fa..abd0e6b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -246,11 +246,10 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 static void ufshcd_hba_exit(struct ufs_hba *hba);
-static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 				 bool skip_ref_clk);
 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
@@ -266,26 +265,18 @@ static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 	return tag >= 0 && tag < hba->nutrs;
 }
 
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
 {
-	int ret = 0;
-
 	if (!hba->is_irq_enabled) {
-		ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
-				hba);
-		if (ret)
-			dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
-				__func__, ret);
+		enable_irq(hba->irq);
 		hba->is_irq_enabled = true;
 	}
-
-	return ret;
 }
 
 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 {
 	if (hba->is_irq_enabled) {
-		free_irq(hba->irq, hba);
+		disable_irq(hba->irq);
 		hba->is_irq_enabled = false;
 	}
 }
@@ -335,27 +326,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
 	u8 opcode = 0;
 	u32 intr, doorbell;
 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+	struct scsi_cmnd *cmd = lrbp->cmd;
 	int transfer_len = -1;
 
 	if (!trace_ufshcd_command_enabled()) {
 		/* trace UPIU W/O tracing command */
-		if (lrbp->cmd)
+		if (cmd)
 			ufshcd_add_cmd_upiu_trace(hba, tag, str);
 		return;
 	}
 
-	if (lrbp->cmd) { /* data phase exists */
+	if (cmd) { /* data phase exists */
 		/* trace UPIU also */
 		ufshcd_add_cmd_upiu_trace(hba, tag, str);
-		opcode = (u8)(*lrbp->cmd->cmnd);
+		opcode = cmd->cmnd[0];
 		if ((opcode == READ_10) || (opcode == WRITE_10)) {
 			/*
 			 * Currently we only fully trace read(10) and write(10)
 			 * commands
 			 */
-			if (lrbp->cmd->request && lrbp->cmd->request->bio)
-				lba =
-				  lrbp->cmd->request->bio->bi_iter.bi_sector;
+			if (cmd->request && cmd->request->bio)
+				lba = cmd->request->bio->bi_iter.bi_sector;
 			transfer_len = be32_to_cpu(
 				lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
 		}
@@ -393,7 +384,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
 	for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
 		int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
 
-		if (err_hist->reg[p] == 0)
+		if (err_hist->tstamp[p] == 0)
 			continue;
 		dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
 			err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
@@ -401,7 +392,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
 	}
 
 	if (!found)
-		dev_err(hba->dev, "No record of %s errors\n", err_name);
+		dev_err(hba->dev, "No record of %s\n", err_name);
 }
 
 static void ufshcd_print_host_regs(struct ufs_hba *hba)
@@ -436,8 +427,7 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
 
 	ufshcd_print_clk_freqs(hba);
 
-	if (hba->vops && hba->vops->dbg_register_dump)
-		hba->vops->dbg_register_dump(hba);
+	ufshcd_vops_dbg_register_dump(hba);
 }
 
 static
@@ -497,8 +487,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 static void ufshcd_print_host_state(struct ufs_hba *hba)
 {
 	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
-	dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
-		hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+	dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
+		hba->outstanding_reqs, hba->outstanding_tasks);
 	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
 		hba->saved_err, hba->saved_uic_err);
 	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -646,40 +636,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 }
 
 /**
- * ufshcd_get_tm_free_slot - get a free slot for task management request
- * @hba: per adapter instance
- * @free_slot: pointer to variable with available slot value
- *
- * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
- * Returns 0 if free slot is not available, else return 1 with tag value
- * in @free_slot.
- */
-static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
-{
-	int tag;
-	bool ret = false;
-
-	if (!free_slot)
-		goto out;
-
-	do {
-		tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
-		if (tag >= hba->nutmrs)
-			goto out;
-	} while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
-
-	*free_slot = tag;
-	ret = true;
-out:
-	return ret;
-}
-
-static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
-{
-	clear_bit_unlock(slot, &hba->tm_slots_in_use);
-}
-
-/**
  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
  * @hba: per adapter instance
  * @pos: position of the bit to be cleared
@@ -1273,6 +1229,24 @@ static int ufshcd_devfreq_target(struct device *dev,
 	return ret;
 }
 
+static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
+{
+	int *busy = priv;
+
+	WARN_ON_ONCE(reserved);
+	(*busy)++;
+	return false;
+}
+
+/* Whether or not any tag is in use by a request that is in progress. */
+static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
+{
+	struct request_queue *q = hba->cmd_queue;
+	int busy = 0;
+
+	blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
+	return busy;
+}
 
 static int ufshcd_devfreq_get_dev_status(struct device *dev,
 		struct devfreq_dev_status *stat)
@@ -1490,6 +1464,8 @@ static void ufshcd_ungate_work(struct work_struct *work)
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	ufshcd_setup_clocks(hba, true);
 
+	ufshcd_enable_irq(hba);
+
 	/* Exit from hibern8 */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
 		/* Prevent gating in this path */
@@ -1619,7 +1595,7 @@ static void ufshcd_gate_work(struct work_struct *work)
 
 	if (hba->clk_gating.active_reqs
 		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
-		|| hba->lrb_in_use || hba->outstanding_tasks
+		|| ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
 		|| hba->active_uic_cmd || hba->uic_async_done)
 		goto rel_lock;
 
@@ -1636,6 +1612,8 @@ static void ufshcd_gate_work(struct work_struct *work)
 		ufshcd_set_link_hibern8(hba);
 	}
 
+	ufshcd_disable_irq(hba);
+
 	if (!ufshcd_is_link_active(hba))
 		ufshcd_setup_clocks(hba, false);
 	else
@@ -1673,7 +1651,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
 
 	if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
 		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
-		|| hba->lrb_in_use || hba->outstanding_tasks
+		|| ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
 		|| hba->active_uic_cmd || hba->uic_async_done
 		|| ufshcd_eh_in_progress(hba))
 		return;
@@ -1881,12 +1859,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
 	hba->lrb[task_tag].issue_time_stamp = ktime_get();
 	hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+	ufshcd_add_command_trace(hba, task_tag, "send");
 	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	/* Make sure that doorbell is committed immediately */
 	wmb();
-	ufshcd_add_command_trace(hba, task_tag, "send");
 }
 
 /**
@@ -2239,6 +2217,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
 static
 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
 {
+	struct scsi_cmnd *cmd = lrbp->cmd;
 	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
 	unsigned short cdb_len;
 
@@ -2252,12 +2231,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
 	/* Total EHS length and Data segment length will be zero */
 	ucd_req_ptr->header.dword_2 = 0;
 
-	ucd_req_ptr->sc.exp_data_transfer_len =
-		cpu_to_be32(lrbp->cmd->sdb.length);
+	ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
 
-	cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+	cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
 	memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
-	memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+	memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
 
 	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
@@ -2443,22 +2421,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
 	hba->req_abort_count = 0;
 
-	/* acquire the tag to make sure device cmds don't use it */
-	if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
-		/*
-		 * Dev manage command in progress, requeue the command.
-		 * Requeuing the command helps in cases where the request *may*
-		 * find different tag instead of waiting for dev manage command
-		 * completion.
-		 */
-		err = SCSI_MLQUEUE_HOST_BUSY;
-		goto out;
-	}
-
 	err = ufshcd_hold(hba, true);
 	if (err) {
 		err = SCSI_MLQUEUE_HOST_BUSY;
-		clear_bit_unlock(tag, &hba->lrb_in_use);
 		goto out;
 	}
 	WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -2479,7 +2444,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 	err = ufshcd_map_sg(hba, lrbp);
 	if (err) {
 		lrbp->cmd = NULL;
-		clear_bit_unlock(tag, &hba->lrb_in_use);
+		ufshcd_release(hba);
 		goto out;
 	}
 	/* Make sure descriptors are ready before ringing the doorbell */
@@ -2627,44 +2592,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
 }
 
 /**
- * ufshcd_get_dev_cmd_tag - Get device management command tag
- * @hba: per-adapter instance
- * @tag_out: pointer to variable with available slot value
- *
- * Get a free slot and lock it until device management command
- * completes.
- *
- * Returns false if free slot is unavailable for locking, else
- * return true with tag value in @tag.
- */
-static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
-{
-	int tag;
-	bool ret = false;
-	unsigned long tmp;
-
-	if (!tag_out)
-		goto out;
-
-	do {
-		tmp = ~hba->lrb_in_use;
-		tag = find_last_bit(&tmp, hba->nutrs);
-		if (tag >= hba->nutrs)
-			goto out;
-	} while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
-
-	*tag_out = tag;
-	ret = true;
-out:
-	return ret;
-}
-
-static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
-{
-	clear_bit_unlock(tag, &hba->lrb_in_use);
-}
-
-/**
  * ufshcd_exec_dev_cmd - API for sending device management requests
  * @hba: UFS hba
  * @cmd_type: specifies the type (NOP, Query...)
@@ -2676,6 +2603,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 		enum dev_cmd_type cmd_type, int timeout)
 {
+	struct request_queue *q = hba->cmd_queue;
+	struct request *req;
 	struct ufshcd_lrb *lrbp;
 	int err;
 	int tag;
@@ -2689,7 +2618,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 	 * Even though we use wait_event() which sleeps indefinitely,
 	 * the maximum wait time is bounded by SCSI request timeout.
 	 */
-	wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+	req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto out_unlock;
+	}
+	tag = req->tag;
+	WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
 
 	init_completion(&wait);
 	lrbp = &hba->lrb[tag];
@@ -2714,8 +2649,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 			err ? "query_complete_err" : "query_complete");
 
 out_put_tag:
-	ufshcd_put_dev_cmd_tag(hba, tag);
-	wake_up(&hba->dev_cmd.tag_wq);
+	blk_put_request(req);
+out_unlock:
 	up_read(&hba->clk_scaling_lock);
 	return err;
 }
@@ -2918,7 +2853,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
 	int ret = 0;
 	u32 retries;
 
-	 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
 		ret = ufshcd_query_attr(hba, opcode, idn, index,
 						selector, attr_val);
 		if (ret)
@@ -3210,17 +3145,6 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba,
 	return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
 }
 
-static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
-					 u8 *buf,
-					 u32 size)
-{
-	return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
-}
-
-static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
-{
-	return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
-}
 
 /**
  * struct uc_string_id - unicode string
@@ -3345,7 +3269,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
 	 * Unit descriptors are only available for general purpose LUs (LUN id
 	 * from 0 to 7) and RPMB Well known LU.
 	 */
-	if (!ufs_is_valid_unit_desc_lun(lun))
+	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
 		return -EOPNOTSUPP;
 
 	return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -3929,7 +3853,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 	return ret;
 }
 
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 {
 	struct uic_command uic_cmd = {0};
 	int ret;
@@ -3955,6 +3879,25 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
+
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+{
+	unsigned long flags;
+
+	if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+		return;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->ahit == ahit)
+		goto out_unlock;
+	hba->ahit = ahit;
+	if (!pm_runtime_suspended(hba->dev))
+		ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+out_unlock:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
 
 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
 {
@@ -4095,6 +4038,26 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
 						pwr_mode->hs_rate);
 
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+			DL_FC0ProtectionTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+			DL_TC0ReplayTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+			DL_AFC0ReqTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+			DL_FC1ProtectionTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+			DL_TC1ReplayTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+			DL_AFC1ReqTimeOutVal_Default);
+
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+			DL_FC0ProtectionTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+			DL_TC0ReplayTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+			DL_AFC0ReqTimeOutVal_Default);
+
 	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
 			| pwr_mode->pwr_tx);
 
@@ -4188,7 +4151,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
  *
  * Returns 0 on success, non-zero value on failure
  */
-static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+int ufshcd_make_hba_operational(struct ufs_hba *hba)
 {
 	int err = 0;
 	u32 reg;
@@ -4234,6 +4197,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
 out:
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
 
 /**
  * ufshcd_hba_stop - Send controller to reset state
@@ -4311,7 +4275,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
 	return 0;
 }
 
-static int ufshcd_hba_enable(struct ufs_hba *hba)
+int ufshcd_hba_enable(struct ufs_hba *hba)
 {
 	int ret;
 
@@ -4336,6 +4300,8 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
+
 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
 {
 	int tx_lanes, i, err = 0;
@@ -4372,13 +4338,14 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
 	return ufshcd_disable_tx_lcc(hba, true);
 }
 
-static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
-				   u32 reg)
+void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+			    u32 reg)
 {
 	reg_hist->reg[reg_hist->pos] = reg;
 	reg_hist->tstamp[reg_hist->pos] = ktime_get();
 	reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
 }
+EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
 
 /**
  * ufshcd_link_startup - Initialize unipro link startup
@@ -4561,7 +4528,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba,
 	 * protected so skip reading bLUWriteProtect parameter for
 	 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
 	 */
-	else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+	else if (lun >= hba->dev_info.max_lu_supported)
 		ret = -ENOTSUPP;
 	else
 		ret = ufshcd_read_unit_desc_param(hba,
@@ -4608,6 +4575,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
 	/* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
 	sdev->use_10_for_ms = 1;
 
+	/* DBD field should be set to 1 in mode sense(10) */
+	sdev->set_dbd_for_ms = 1;
+
 	/* allow SCSI layer to restart the device in case of errors */
 	sdev->allow_restart = 1;
 
@@ -4799,7 +4769,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 		break;
 	} /* end of switch */
 
-	if (host_byte(result) != DID_OK)
+	if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
 		ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
 	return result;
 }
@@ -4856,12 +4826,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 			cmd->result = result;
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
-			clear_bit_unlock(index, &hba->lrb_in_use);
+			lrbp->compl_time_stamp = ktime_get();
 			/* Do not touch lrbp after scsi done */
 			cmd->scsi_done(cmd);
 			__ufshcd_release(hba);
 		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
 			lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+			lrbp->compl_time_stamp = ktime_get();
 			if (hba->dev_cmd.complete) {
 				ufshcd_add_command_trace(hba, index,
 						"dev_complete");
@@ -4870,17 +4841,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 		}
 		if (ufshcd_is_clkscaling_supported(hba))
 			hba->clk_scaling.active_reqs--;
-
-		lrbp->compl_time_stamp = ktime_get();
 	}
 
 	/* clear corresponding bits of completed commands */
 	hba->outstanding_reqs ^= completed_reqs;
 
 	ufshcd_clk_scaling_update_busy(hba);
-
-	/* we might have free'd some tags above */
-	wake_up(&hba->dev_cmd.tag_wq);
 }
 
 /**
@@ -5053,6 +5019,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
 
 	hba->auto_bkops_enabled = false;
 	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+	hba->is_urgent_bkops_lvl_checked = false;
 out:
 	return err;
 }
@@ -5077,6 +5044,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
 		hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
 		ufshcd_disable_auto_bkops(hba);
 	}
+	hba->is_urgent_bkops_lvl_checked = false;
 }
 
 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -5123,6 +5091,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
 		err = ufshcd_enable_auto_bkops(hba);
 	else
 		err = ufshcd_disable_auto_bkops(hba);
+	hba->urgent_bkops_lvl = curr_status;
 out:
 	return err;
 }
@@ -5200,7 +5169,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
 	hba = container_of(work, struct ufs_hba, eeh_work);
 
 	pm_runtime_get_sync(hba->dev);
-	scsi_block_requests(hba->host);
+	ufshcd_scsi_block_requests(hba);
 	err = ufshcd_get_ee_status(hba, &status);
 	if (err) {
 		dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -5214,7 +5183,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
 		ufshcd_bkops_exception_event_handler(hba);
 
 out:
-	scsi_unblock_requests(hba->host);
+	ufshcd_scsi_unblock_requests(hba);
 	pm_runtime_put_sync(hba->dev);
 	return;
 }
@@ -5348,8 +5317,8 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 	/*
 	 * if host reset is required then skip clearing the pending
-	 * transfers forcefully because they will automatically get
-	 * cleared after link startup.
+	 * transfers forcefully because they will get cleared during
+	 * host reset and restore
 	 */
 	if (needs_reset)
 		goto skip_pending_xfer_clear;
@@ -5603,6 +5572,27 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
 	return retval;
 }
 
+struct ctm_info {
+	struct ufs_hba	*hba;
+	unsigned long	pending;
+	unsigned int	ncpl;
+};
+
+static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
+{
+	struct ctm_info *const ci = priv;
+	struct completion *c;
+
+	WARN_ON_ONCE(reserved);
+	if (test_bit(req->tag, &ci->pending))
+		return true;
+	ci->ncpl++;
+	c = req->end_io_data;
+	if (c)
+		complete(c);
+	return true;
+}
+
 /**
  * ufshcd_tmc_handler - handle task management function completion
  * @hba: per adapter instance
@@ -5613,16 +5603,14 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
  */
 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 {
-	u32 tm_doorbell;
+	struct request_queue *q = hba->tmf_queue;
+	struct ctm_info ci = {
+		.hba	 = hba,
+		.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
+	};
 
-	tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
-	hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
-	if (hba->tm_condition) {
-		wake_up(&hba->tm_wq);
-		return IRQ_HANDLED;
-	} else {
-		return IRQ_NONE;
-	}
+	blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+	return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
 }
 
 /**
@@ -5728,7 +5716,10 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
 		struct utp_task_req_desc *treq, u8 tm_function)
 {
+	struct request_queue *q = hba->tmf_queue;
 	struct Scsi_Host *host = hba->host;
+	DECLARE_COMPLETION_ONSTACK(wait);
+	struct request *req;
 	unsigned long flags;
 	int free_slot, task_tag, err;
 
@@ -5737,7 +5728,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
 	 * Even though we use wait_event() which sleeps indefinitely,
 	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
 	 */
-	wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+	req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+	req->end_io_data = &wait;
+	free_slot = req->tag;
+	WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
 	ufshcd_hold(hba, false);
 
 	spin_lock_irqsave(host->host_lock, flags);
@@ -5763,10 +5757,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
 	ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
 
 	/* wait until the task management command is completed */
-	err = wait_event_timeout(hba->tm_wq,
-			test_bit(free_slot, &hba->tm_condition),
+	err = wait_for_completion_io_timeout(&wait,
 			msecs_to_jiffies(TM_CMD_TIMEOUT));
 	if (!err) {
+		/*
+		 * Make sure that ufshcd_compl_tm() does not trigger a
+		 * use-after-free.
+		 */
+		req->end_io_data = NULL;
 		ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
 		dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
 				__func__, tm_function);
@@ -5785,9 +5783,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
 	__clear_bit(free_slot, &hba->outstanding_tasks);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	clear_bit(free_slot, &hba->tm_condition);
-	ufshcd_put_tm_slot(hba, free_slot);
-	wake_up(&hba->tm_tag_wq);
+	blk_put_request(req);
 
 	ufshcd_release(hba);
 	return err;
@@ -5863,6 +5859,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
 					enum dev_cmd_type cmd_type,
 					enum query_opcode desc_op)
 {
+	struct request_queue *q = hba->cmd_queue;
+	struct request *req;
 	struct ufshcd_lrb *lrbp;
 	int err = 0;
 	int tag;
@@ -5872,7 +5870,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
 
 	down_read(&hba->clk_scaling_lock);
 
-	wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+	req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto out_unlock;
+	}
+	tag = req->tag;
+	WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
 
 	init_completion(&wait);
 	lrbp = &hba->lrb[tag];
@@ -5948,8 +5952,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
 		}
 	}
 
-	ufshcd_put_dev_cmd_tag(hba, tag);
-	wake_up(&hba->dev_cmd.tag_wq);
+	blk_put_request(req);
+out_unlock:
 	up_read(&hba->clk_scaling_lock);
 	return err;
 }
@@ -6244,9 +6248,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
 	hba->lrb[tag].cmd = NULL;
 	spin_unlock_irqrestore(host->host_lock, flags);
 
-	clear_bit_unlock(tag, &hba->lrb_in_use);
-	wake_up(&hba->dev_cmd.tag_wq);
-
 out:
 	if (!err) {
 		err = SUCCESS;
@@ -6279,9 +6280,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 	int err;
 	unsigned long flags;
 
-	/* Reset the host controller */
+	/*
+	 * Stop the host controller and complete the requests
+	 * cleared by h/w
+	 */
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	ufshcd_hba_stop(hba, false);
+	hba->silence_err_logs = true;
+	ufshcd_complete_requests(hba);
+	hba->silence_err_logs = false;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	/* scale up clocks to max frequency before full reinitialization */
@@ -6292,7 +6299,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 		goto out;
 
 	/* Establish the link again and restore the device */
-	err = ufshcd_probe_hba(hba);
+	err = ufshcd_probe_hba(hba, false);
 
 	if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
 		err = -EIO;
@@ -6315,7 +6322,6 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 {
 	int err = 0;
-	unsigned long flags;
 	int retries = MAX_HOST_RESET_RETRIES;
 
 	do {
@@ -6325,15 +6331,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 		err = ufshcd_host_reset_and_restore(hba);
 	} while (err && --retries);
 
-	/*
-	 * After reset the door-bell might be cleared, complete
-	 * outstanding requests in s/w here.
-	 */
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_transfer_req_compl(hba);
-	ufshcd_tmc_handler(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
 	return err;
 }
 
@@ -6488,7 +6485,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
 	if (!desc_buf)
 		return;
 
-	ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+	ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
+			desc_buf, buff_len);
 	if (ret) {
 		dev_err(hba->dev,
 			"%s: Failed reading power descriptor.len = %d ret = %d",
@@ -6578,16 +6576,13 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
 	return ret;
 }
 
-static int ufs_get_device_desc(struct ufs_hba *hba,
-			       struct ufs_dev_desc *dev_desc)
+static int ufs_get_device_desc(struct ufs_hba *hba)
 {
 	int err;
 	size_t buff_len;
 	u8 model_index;
 	u8 *desc_buf;
-
-	if (!dev_desc)
-		return -EINVAL;
+	struct ufs_dev_info *dev_info = &hba->dev_info;
 
 	buff_len = max_t(size_t, hba->desc_size.dev_desc,
 			 QUERY_DESC_MAX_SIZE + 1);
@@ -6597,7 +6592,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
 		goto out;
 	}
 
-	err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+	err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
+			hba->desc_size.dev_desc);
 	if (err) {
 		dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
 			__func__, err);
@@ -6608,12 +6604,12 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
 	 * getting vendor (manufacturerID) and Bank Index in big endian
 	 * format
 	 */
-	dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+	dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
 				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
 
 	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
 	err = ufshcd_read_string_desc(hba, model_index,
-				      &dev_desc->model, SD_ASCII_STD);
+				      &dev_info->model, SD_ASCII_STD);
 	if (err < 0) {
 		dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
 			__func__, err);
@@ -6631,23 +6627,25 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
 	return err;
 }
 
-static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
+static void ufs_put_device_desc(struct ufs_hba *hba)
 {
-	kfree(dev_desc->model);
-	dev_desc->model = NULL;
+	struct ufs_dev_info *dev_info = &hba->dev_info;
+
+	kfree(dev_info->model);
+	dev_info->model = NULL;
 }
 
-static void ufs_fixup_device_setup(struct ufs_hba *hba,
-				   struct ufs_dev_desc *dev_desc)
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
 {
 	struct ufs_dev_fix *f;
+	struct ufs_dev_info *dev_info = &hba->dev_info;
 
 	for (f = ufs_fixups; f->quirk; f++) {
-		if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
-		     f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
-		     ((dev_desc->model &&
-		       STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
-		      !strcmp(f->card.model, UFS_ANY_MODEL)))
+		if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
+		     f->wmanufacturerid == UFS_ANY_VENDOR) &&
+		     ((dev_info->model &&
+		       STR_PRFX_EQUAL(f->model, dev_info->model)) ||
+		      !strcmp(f->model, UFS_ANY_MODEL)))
 			hba->dev_quirks |= f->quirk;
 	}
 }
@@ -6863,6 +6861,37 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
 		hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
 }
 
+static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
+{
+	int err;
+	size_t buff_len;
+	u8 *desc_buf;
+
+	buff_len = hba->desc_size.geom_desc;
+	desc_buf = kmalloc(buff_len, GFP_KERNEL);
+	if (!desc_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+			desc_buf, buff_len);
+	if (err) {
+		dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
+				__func__, err);
+		goto out;
+	}
+
+	if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
+		hba->dev_info.max_lu_supported = 32;
+	else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
+		hba->dev_info.max_lu_supported = 8;
+
+out:
+	kfree(desc_buf);
+	return err;
+}
+
 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
 	{19200000, REF_CLK_FREQ_19_2_MHZ},
 	{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6931,15 +6960,92 @@ static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
 	return err;
 }
 
+static int ufshcd_device_params_init(struct ufs_hba *hba)
+{
+	bool flag;
+	int ret;
+
+	/* Clear any previous UFS device information */
+	memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+
+	/* Init check for device descriptor sizes */
+	ufshcd_init_desc_sizes(hba);
+
+	/* Init UFS geometry descriptor related parameters */
+	ret = ufshcd_device_geo_params_init(hba);
+	if (ret)
+		goto out;
+
+	/* Check and apply UFS device quirks */
+	ret = ufs_get_device_desc(hba);
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ufs_fixup_device_setup(hba);
+
+	if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+			QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+		hba->dev_info.f_power_on_wp_en = flag;
+
+	/* Probe maximum power mode co-supported by both UFS host and device */
+	if (ufshcd_get_max_pwr_mode(hba))
+		dev_err(hba->dev,
+			"%s: Failed getting max supported power mode\n",
+			__func__);
+out:
+	return ret;
+}
+
+/**
+ * ufshcd_add_lus - probe and add UFS logical units
+ * @hba: per-adapter instance
+ */
+static int ufshcd_add_lus(struct ufs_hba *hba)
+{
+	int ret;
+
+	ufshcd_init_icc_levels(hba);
+
+	/* Add required well known logical units to scsi mid layer */
+	ret = ufshcd_scsi_add_wlus(hba);
+	if (ret)
+		goto out;
+
+	/* Initialize devfreq after UFS device is detected */
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		memcpy(&hba->clk_scaling.saved_pwr_info.info,
+			&hba->pwr_info,
+			sizeof(struct ufs_pa_layer_attr));
+		hba->clk_scaling.saved_pwr_info.is_valid = true;
+		if (!hba->devfreq) {
+			ret = ufshcd_devfreq_init(hba);
+			if (ret)
+				goto out;
+		}
+
+		hba->clk_scaling.is_allowed = true;
+	}
+
+	ufs_bsg_probe(hba);
+	scsi_scan_host(hba->host);
+	pm_runtime_put_sync(hba->dev);
+
+out:
+	return ret;
+}
+
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
+ * @async: asynchronous execution or not
  *
  * Execute link-startup and verify device initialization
  */
-static int ufshcd_probe_hba(struct ufs_hba *hba)
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
 {
-	struct ufs_dev_desc card = {0};
 	int ret;
 	ktime_t start = ktime_get();
 
@@ -6957,27 +7063,26 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	/* UniPro link is active now */
 	ufshcd_set_link_active(hba);
 
+	/* Verify device initialization by sending NOP OUT UPIU */
 	ret = ufshcd_verify_dev_init(hba);
 	if (ret)
 		goto out;
 
+	/* Initiate UFS initialization, and waiting until completion */
 	ret = ufshcd_complete_dev_init(hba);
 	if (ret)
 		goto out;
 
-	/* Init check for device descriptor sizes */
-	ufshcd_init_desc_sizes(hba);
-
-	ret = ufs_get_device_desc(hba, &card);
-	if (ret) {
-		dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
-			__func__, ret);
-		goto out;
+	/*
+	 * Initialize UFS device parameters used by driver, these
+	 * parameters are associated with UFS descriptors.
+	 */
+	if (async) {
+		ret = ufshcd_device_params_init(hba);
+		if (ret)
+			goto out;
 	}
 
-	ufs_fixup_device_setup(hba, &card);
-	ufs_put_device_desc(&card);
-
 	ufshcd_tune_unipro_params(hba);
 
 	/* UFS device is also active now */
@@ -6985,11 +7090,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	ufshcd_force_reset_auto_bkops(hba);
 	hba->wlun_dev_clr_ua = true;
 
-	if (ufshcd_get_max_pwr_mode(hba)) {
-		dev_err(hba->dev,
-			"%s: Failed getting max supported power mode\n",
-			__func__);
-	} else {
+	/* Gear up to HS gear if supported */
+	if (hba->max_pwr_info.is_valid) {
 		/*
 		 * Set the right value to bRefClkFreq before attempting to
 		 * switch to HS gears.
@@ -7010,59 +7112,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	/* Enable Auto-Hibernate if configured */
 	ufshcd_auto_hibern8_enable(hba);
 
-	/*
-	 * If we are in error handling context or in power management callbacks
-	 * context, no need to scan the host
-	 */
-	if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
-		bool flag;
-
-		/* clear any previous UFS device information */
-		memset(&hba->dev_info, 0, sizeof(hba->dev_info));
-		if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
-				QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
-			hba->dev_info.f_power_on_wp_en = flag;
-
-		if (!hba->is_init_prefetch)
-			ufshcd_init_icc_levels(hba);
-
-		/* Add required well known logical units to scsi mid layer */
-		if (ufshcd_scsi_add_wlus(hba))
-			goto out;
-
-		/* Initialize devfreq after UFS device is detected */
-		if (ufshcd_is_clkscaling_supported(hba)) {
-			memcpy(&hba->clk_scaling.saved_pwr_info.info,
-				&hba->pwr_info,
-				sizeof(struct ufs_pa_layer_attr));
-			hba->clk_scaling.saved_pwr_info.is_valid = true;
-			if (!hba->devfreq) {
-				ret = ufshcd_devfreq_init(hba);
-				if (ret)
-					goto out;
-			}
-			hba->clk_scaling.is_allowed = true;
-		}
-
-		ufs_bsg_probe(hba);
-
-		scsi_scan_host(hba->host);
-		pm_runtime_put_sync(hba->dev);
-	}
-
-	if (!hba->is_init_prefetch)
-		hba->is_init_prefetch = true;
-
 out:
-	/*
-	 * If we failed to initialize the device or the device is not
-	 * present, turn off the power/clocks etc.
-	 */
-	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
-		pm_runtime_put_sync(hba->dev);
-		ufshcd_exit_clk_scaling(hba);
-		ufshcd_hba_exit(hba);
-	}
 
 	trace_ufshcd_init(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7078,43 +7128,25 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 {
 	struct ufs_hba *hba = (struct ufs_hba *)data;
+	int ret;
 
-	ufshcd_probe_hba(hba);
-}
+	/* Initialize hba, detect and initialize UFS device */
+	ret = ufshcd_probe_hba(hba, true);
+	if (ret)
+		goto out;
 
-static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
-{
-	unsigned long flags;
-	struct Scsi_Host *host;
-	struct ufs_hba *hba;
-	int index;
-	bool found = false;
-
-	if (!scmd || !scmd->device || !scmd->device->host)
-		return BLK_EH_DONE;
-
-	host = scmd->device->host;
-	hba = shost_priv(host);
-	if (!hba)
-		return BLK_EH_DONE;
-
-	spin_lock_irqsave(host->host_lock, flags);
-
-	for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
-		if (hba->lrb[index].cmd == scmd) {
-			found = true;
-			break;
-		}
-	}
-
-	spin_unlock_irqrestore(host->host_lock, flags);
-
+	/* Probe and add UFS logical units  */
+	ret = ufshcd_add_lus(hba);
+out:
 	/*
-	 * Bypass SCSI error handling and reset the block layer timer if this
-	 * SCSI command was not actually dispatched to UFS driver, otherwise
-	 * let SCSI layer handle the error as usual.
+	 * If we failed to initialize the device or the device is not
+	 * present, turn off the power/clocks etc.
 	 */
-	return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
+	if (ret) {
+		pm_runtime_put_sync(hba->dev);
+		ufshcd_exit_clk_scaling(hba);
+		ufshcd_hba_exit(hba);
+	}
 }
 
 static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -7135,7 +7167,6 @@ static struct scsi_host_template ufshcd_driver_template = {
 	.eh_abort_handler	= ufshcd_abort,
 	.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
 	.eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
-	.eh_timed_out		= ufshcd_eh_timed_out,
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= UFSHCD_CMD_PER_LUN,
@@ -7574,6 +7605,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
 		ufshcd_setup_clocks(hba, false);
 		ufshcd_setup_hba_vreg(hba, false);
 		hba->is_powered = false;
+		ufs_put_device_desc(hba);
 	}
 }
 
@@ -7701,8 +7733,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
 	 * turning off the link would also turn off the device.
 	 */
 	else if ((req_link_state == UIC_LINK_OFF_STATE) &&
-		   (!check_for_bkops || (check_for_bkops &&
-		    !hba->auto_bkops_enabled))) {
+		 (!check_for_bkops || !hba->auto_bkops_enabled)) {
 		/*
 		 * Let's make sure that link is in low power mode, we are doing
 		 * this currently by putting the link in Hibern8. Otherway to
@@ -7908,6 +7939,11 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ret = ufshcd_vops_suspend(hba, pm_op);
 	if (ret)
 		goto set_link_active;
+	/*
+	 * Disable the host irq as host controller as there won't be any
+	 * host controller transaction expected till resume.
+	 */
+	ufshcd_disable_irq(hba);
 
 	if (!ufshcd_is_link_active(hba))
 		ufshcd_setup_clocks(hba, false);
@@ -7917,11 +7953,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
 	hba->clk_gating.state = CLKS_OFF;
 	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
-	/*
-	 * Disable the host irq as host controller as there won't be any
-	 * host controller transaction expected till resume.
-	 */
-	ufshcd_disable_irq(hba);
+
 	/* Put the host controller in low power mode if possible */
 	ufshcd_hba_vreg_set_lpm(hba);
 	goto out;
@@ -7974,9 +8006,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 		goto out;
 
 	/* enable the host irq as host controller would be active soon */
-	ret = ufshcd_enable_irq(hba);
-	if (ret)
-		goto disable_irq_and_vops_clks;
+	ufshcd_enable_irq(hba);
 
 	ret = ufshcd_vreg_set_hpm(hba);
 	if (ret)
@@ -8250,6 +8280,9 @@ void ufshcd_remove(struct ufs_hba *hba)
 {
 	ufs_bsg_remove(hba);
 	ufs_sysfs_remove_nodes(hba->dev);
+	blk_cleanup_queue(hba->tmf_queue);
+	blk_mq_free_tag_set(&hba->tmf_tag_set);
+	blk_cleanup_queue(hba->cmd_queue);
 	scsi_remove_host(hba->host);
 	/* disable interrupts */
 	ufshcd_disable_intr(hba, hba->intr_mask);
@@ -8328,6 +8361,18 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
 }
 EXPORT_SYMBOL(ufshcd_alloc_host);
 
+/* This function exists because blk_mq_alloc_tag_set() requires this. */
+static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
+				     const struct blk_mq_queue_data *qd)
+{
+	WARN_ON_ONCE(true);
+	return BLK_STS_NOTSUPP;
+}
+
+static const struct blk_mq_ops ufshcd_tmf_ops = {
+	.queue_rq = ufshcd_queue_tmf,
+};
+
 /**
  * ufshcd_init - Driver initialization routine
  * @hba: per-adapter instance
@@ -8397,10 +8442,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
 	hba->max_pwr_info.is_valid = false;
 
-	/* Initailize wait queue for task management */
-	init_waitqueue_head(&hba->tm_wq);
-	init_waitqueue_head(&hba->tm_tag_wq);
-
 	/* Initialize work queues */
 	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
 	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
@@ -8413,9 +8454,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
 	init_rwsem(&hba->clk_scaling_lock);
 
-	/* Initialize device management tag acquire wait queue */
-	init_waitqueue_head(&hba->dev_cmd.tag_wq);
-
 	ufshcd_init_clk_gating(hba);
 
 	ufshcd_init_clk_scaling(hba);
@@ -8449,6 +8487,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 		goto exit_gating;
 	}
 
+	hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
+	if (IS_ERR(hba->cmd_queue)) {
+		err = PTR_ERR(hba->cmd_queue);
+		goto out_remove_scsi_host;
+	}
+
+	hba->tmf_tag_set = (struct blk_mq_tag_set) {
+		.nr_hw_queues	= 1,
+		.queue_depth	= hba->nutmrs,
+		.ops		= &ufshcd_tmf_ops,
+		.flags		= BLK_MQ_F_NO_SCHED,
+	};
+	err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
+	if (err < 0)
+		goto free_cmd_queue;
+	hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
+	if (IS_ERR(hba->tmf_queue)) {
+		err = PTR_ERR(hba->tmf_queue);
+		goto free_tmf_tag_set;
+	}
+
 	/* Reset the attached device */
 	ufshcd_vops_device_reset(hba);
 
@@ -8458,7 +8517,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 		dev_err(hba->dev, "Host controller enable failed\n");
 		ufshcd_print_host_regs(hba);
 		ufshcd_print_host_state(hba);
-		goto out_remove_scsi_host;
+		goto free_tmf_queue;
 	}
 
 	/*
@@ -8495,6 +8554,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
 	return 0;
 
+free_tmf_queue:
+	blk_cleanup_queue(hba->tmf_queue);
+free_tmf_tag_set:
+	blk_mq_free_tag_set(&hba->tmf_tag_set);
+free_cmd_queue:
+	blk_cleanup_queue(hba->cmd_queue);
 out_remove_scsi_host:
 	scsi_remove_host(hba->host);
 exit_gating:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 2740f69..2ae6c7c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -212,13 +212,11 @@ struct ufs_query {
  * @type: device management command type - Query, NOP OUT
  * @lock: lock to allow one command at a time
  * @complete: internal commands completion
- * @tag_wq: wait queue until free command slot is available
  */
 struct ufs_dev_cmd {
 	enum dev_cmd_type type;
 	struct mutex lock;
 	struct completion *complete;
-	wait_queue_head_t tag_wq;
 	struct ufs_query query;
 };
 
@@ -322,7 +320,7 @@ struct ufs_hba_variant_ops {
 	void	(*setup_task_mgmt)(struct ufs_hba *, int, u8);
 	void    (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
 					enum ufs_notify_change_status);
-	int	(*apply_dev_quirks)(struct ufs_hba *);
+	int	(*apply_dev_quirks)(struct ufs_hba *hba);
 	int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
 	int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
 	void	(*dbg_register_dump)(struct ufs_hba *hba);
@@ -483,7 +481,7 @@ struct ufs_stats {
  * @host: Scsi_Host instance of the driver
  * @dev: device handle
  * @lrb: local reference block
- * @lrb_in_use: lrb in use
+ * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
  * @outstanding_tasks: Bits representing outstanding task requests
  * @outstanding_reqs: Bits representing outstanding transfer requests
  * @capabilities: UFS Controller Capabilities
@@ -495,17 +493,14 @@ struct ufs_stats {
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
  * @uic_cmd_mutex: mutex for uic command
- * @tm_wq: wait queue for task management
- * @tm_tag_wq: wait queue for free task management slots
- * @tm_slots_in_use: bit map of task management request slots in use
+ * @tmf_tag_set: TMF tag set.
+ * @tmf_queue: Used to allocate TMF tags.
  * @pwr_done: completion for power mode change
- * @tm_condition: condition variable for task management
  * @ufshcd_state: UFSHCD states
  * @eh_flags: Error handling flags
  * @intr_mask: Interrupt Mask Bits
  * @ee_ctrl_mask: Exception event control mask
  * @is_powered: flag to check if HBA is powered
- * @is_init_prefetch: flag to check if data was pre-fetched in initialization
  * @init_prefetch_data: data pre-fetched during initialization
  * @eh_work: Worker to handle UFS errors that require s/w attention
  * @eeh_work: Worker to handle exception events
@@ -513,6 +508,7 @@ struct ufs_stats {
  * @uic_error: UFS interconnect layer error status
  * @saved_err: sticky error mask
  * @saved_uic_err: sticky UIC error mask
+ * @silence_err_logs: flag to silence error logs
  * @dev_cmd: ufs device management command information
  * @last_dme_cmd_tstamp: time stamp of the last completed DME command
  * @auto_bkops_enabled: to track whether bkops is enabled in device
@@ -541,6 +537,7 @@ struct ufs_hba {
 
 	struct Scsi_Host *host;
 	struct device *dev;
+	struct request_queue *cmd_queue;
 	/*
 	 * This field is to keep a reference to "scsi_device" corresponding to
 	 * "UFS device" W-LU.
@@ -561,7 +558,6 @@ struct ufs_hba {
 	u32 ahit;
 
 	struct ufshcd_lrb *lrb;
-	unsigned long lrb_in_use;
 
 	unsigned long outstanding_tasks;
 	unsigned long outstanding_reqs;
@@ -643,10 +639,8 @@ struct ufs_hba {
 	/* Device deviations from standard UFS device spec. */
 	unsigned int dev_quirks;
 
-	wait_queue_head_t tm_wq;
-	wait_queue_head_t tm_tag_wq;
-	unsigned long tm_condition;
-	unsigned long tm_slots_in_use;
+	struct blk_mq_tag_set tmf_tag_set;
+	struct request_queue *tmf_queue;
 
 	struct uic_command *active_uic_cmd;
 	struct mutex uic_cmd_mutex;
@@ -657,7 +651,6 @@ struct ufs_hba {
 	u32 intr_mask;
 	u16 ee_ctrl_mask;
 	bool is_powered;
-	bool is_init_prefetch;
 	struct ufs_init_prefetch init_prefetch_data;
 
 	/* Work Queues */
@@ -670,6 +663,7 @@ struct ufs_hba {
 	u32 saved_err;
 	u32 saved_uic_err;
 	struct ufs_stats ufs_stats;
+	bool silence_err_logs;
 
 	/* Device management request data */
 	struct ufs_dev_cmd dev_cmd;
@@ -803,12 +797,17 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
 
 int ufshcd_alloc_host(struct device *, struct ufs_hba **);
 void ufshcd_dealloc_host(struct ufs_hba *);
+int ufshcd_hba_enable(struct ufs_hba *hba);
 int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_make_hba_operational(struct ufs_hba *hba);
 void ufshcd_remove(struct ufs_hba *);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 				u32 val, unsigned long interval_us,
 				unsigned long timeout_ms, bool can_sleep);
 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
+void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+			    u32 reg);
 
 static inline void check_upiu_size(void)
 {
@@ -927,6 +926,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 	enum flag_idn idn, bool *flag_res);
 
 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
 
 #define SD_ASCII_STD true
 #define SD_RAW false
@@ -1086,8 +1086,10 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
 
 static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->device_reset)
+	if (hba->vops && hba->vops->device_reset) {
 		hba->vops->device_reset(hba);
+		ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
+	}
 }
 
 extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index f539f87..3dc4d8b 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -161,6 +161,17 @@
 /* PHY Adapter Protocol Constants */
 #define PA_MAXDATALANES	4
 
+#define DL_FC0ProtectionTimeOutVal_Default	8191
+#define DL_TC0ReplayTimeOutVal_Default		65535
+#define DL_AFC0ReqTimeOutVal_Default		32767
+#define DL_FC1ProtectionTimeOutVal_Default	8191
+#define DL_TC1ReplayTimeOutVal_Default		65535
+#define DL_AFC1ReqTimeOutVal_Default		32767
+
+#define DME_LocalFC0ProtectionTimeOutVal	0xD041
+#define DME_LocalTC0ReplayTimeOutVal		0xD042
+#define DME_LocalAFC0ReqTimeOutVal		0xD043
+
 /* PA power modes */
 enum {
 	FAST_MODE	= 1,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 7000881..c3f010d 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -365,7 +365,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
 		int segs = scsi_dma_map(cmd);
 
 		if (segs == -ENOMEM) {
-			scmd_printk(KERN_ERR, cmd,
+			scmd_printk(KERN_DEBUG, cmd,
 				    "vmw_pvscsi: Failed to map cmd sglist for DMA.\n");
 			return -ENOMEM;
 		} else if (segs > 1) {
@@ -392,7 +392,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
 		ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
 					     cmd->sc_data_direction);
 		if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
-			scmd_printk(KERN_ERR, cmd,
+			scmd_printk(KERN_DEBUG, cmd,
 				    "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
 			return -ENOMEM;
 		}
@@ -402,6 +402,17 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
 	return 0;
 }
 
+/*
+ * The device incorrectly doesn't clear the first byte of the sense
+ * buffer in some cases. We have to do it ourselves.
+ * Otherwise we run into trouble when SWIOTLB is forced.
+ */
+static void pvscsi_patch_sense(struct scsi_cmnd *cmd)
+{
+	if (cmd->sense_buffer)
+		cmd->sense_buffer[0] = 0;
+}
+
 static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
 				 struct pvscsi_ctx *ctx)
 {
@@ -544,6 +555,8 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
 	cmd = ctx->cmd;
 	abort_cmp = ctx->abort_cmp;
 	pvscsi_unmap_buffers(adapter, ctx);
+	if (sdstat != SAM_STAT_CHECK_CONDITION)
+		pvscsi_patch_sense(cmd);
 	pvscsi_release_context(adapter, ctx);
 	if (abort_cmp) {
 		/*
@@ -712,7 +725,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
 				cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
 				DMA_FROM_DEVICE);
 		if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
-			scmd_printk(KERN_ERR, cmd,
+			scmd_printk(KERN_DEBUG, cmd,
 				    "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
 			ctx->sensePA = 0;
 			return -ENOMEM;
@@ -873,6 +886,7 @@ static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
 			scmd_printk(KERN_ERR, cmd,
 				    "Forced reset on cmd %p\n", cmd);
 			pvscsi_unmap_buffers(adapter, ctx);
+			pvscsi_patch_sense(cmd);
 			pvscsi_release_context(adapter, ctx);
 			cmd->result = (DID_RESET << 16);
 			cmd->scsi_done(cmd);
diff --git a/drivers/siox/siox.h b/drivers/siox/siox.h
index c674bf6..f08b43b 100644
--- a/drivers/siox/siox.h
+++ b/drivers/siox/siox.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
  */
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index a444badd..4aad256 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -641,6 +641,8 @@ static int qcom_slim_remove(struct platform_device *pdev)
 
 	pm_runtime_disable(&pdev->dev);
 	slim_unregister_controller(&ctrl->ctrl);
+	clk_disable_unprepare(ctrl->rclk);
+	clk_disable_unprepare(ctrl->hclk);
 	destroy_workqueue(ctrl->rxwq);
 	return 0;
 }
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 29fbab5..e3f5ebc 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -666,10 +666,12 @@ static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
 	struct device *dev = ctrl->dev;
 	int ret, size;
 
-	ctrl->dma_rx_channel = dma_request_slave_channel(dev, "rx");
-	if (!ctrl->dma_rx_channel) {
-		dev_err(dev, "Failed to request dma channels");
-		return -EINVAL;
+	ctrl->dma_rx_channel = dma_request_chan(dev, "rx");
+	if (IS_ERR(ctrl->dma_rx_channel)) {
+		dev_err(dev, "Failed to request RX dma channel");
+		ret = PTR_ERR(ctrl->dma_rx_channel);
+		ctrl->dma_rx_channel = NULL;
+		return ret;
 	}
 
 	size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN;
@@ -703,10 +705,12 @@ static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
 	int ret = 0;
 	int size;
 
-	ctrl->dma_tx_channel = dma_request_slave_channel(dev, "tx");
-	if (!ctrl->dma_tx_channel) {
-		dev_err(dev, "Failed to request dma channels");
-		return -EINVAL;
+	ctrl->dma_tx_channel = dma_request_chan(dev, "tx");
+	if (IS_ERR(ctrl->dma_tx_channel)) {
+		dev_err(dev, "Failed to request TX dma channel");
+		ret = PTR_ERR(ctrl->dma_tx_channel);
+		ctrl->dma_tx_channel = NULL;
+		return ret;
 	}
 
 	size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN);
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index b2f013b..c730359 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2011-2017, The Linux Foundation
  */
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 096a83c..55a1f57 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -66,8 +66,9 @@ static const struct at91_soc __initconst socs[] = {
 	AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"),
 	AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"),
 	AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"),
-	AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH,
-		 "sam9x60", "sam9x60"),
+#endif
+#ifdef CONFIG_SOC_SAM9X60
+	AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"),
 #endif
 #ifdef CONFIG_SOC_SAMA5
 	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index d326915..61731e0 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -63,7 +63,7 @@ static const int b15_cpubiuctrl_regs[] = {
 	[CPU_WRITEBACK_CTRL_REG] = -1,
 };
 
-/* Odd cases, e.g: 7260 */
+/* Odd cases, e.g: 7260A0 */
 static const int b53_cpubiuctrl_no_wb_regs[] = {
 	[CPU_CREDIT_REG] = 0x0b0,
 	[CPU_MCP_FLOW_REG] = 0x0b4,
@@ -76,6 +76,12 @@ static const int b53_cpubiuctrl_regs[] = {
 	[CPU_WRITEBACK_CTRL_REG] = 0x22c,
 };
 
+static const int a72_cpubiuctrl_regs[] = {
+	[CPU_CREDIT_REG] = 0x18,
+	[CPU_MCP_FLOW_REG] = 0x1c,
+	[CPU_WRITEBACK_CTRL_REG] = 0x20,
+};
+
 #define NUM_CPU_BIUCTRL_REGS	3
 
 static int __init mcp_write_pairing_set(void)
@@ -101,25 +107,29 @@ static int __init mcp_write_pairing_set(void)
 	return 0;
 }
 
-static const u32 b53_mach_compat[] = {
+static const u32 a72_b53_mach_compat[] = {
+	0x7211,
+	0x7216,
+	0x7255,
+	0x7260,
 	0x7268,
 	0x7271,
 	0x7278,
 };
 
-static void __init mcp_b53_set(void)
+static void __init mcp_a72_b53_set(void)
 {
 	unsigned int i;
 	u32 reg;
 
 	reg = brcmstb_get_family_id();
 
-	for (i = 0; i < ARRAY_SIZE(b53_mach_compat); i++) {
-		if (BRCM_ID(reg) == b53_mach_compat[i])
+	for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) {
+		if (BRCM_ID(reg) == a72_b53_mach_compat[i])
 			break;
 	}
 
-	if (i == ARRAY_SIZE(b53_mach_compat))
+	if (i == ARRAY_SIZE(a72_b53_mach_compat))
 		return;
 
 	/* Set all 3 MCP interfaces to 8 credits */
@@ -157,6 +167,7 @@ static void __init mcp_b53_set(void)
 static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
 {
 	struct device_node *cpu_dn;
+	u32 family_id;
 	int ret = 0;
 
 	cpubiuctrl_base = of_iomap(np, 0);
@@ -179,13 +190,16 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
 		cpubiuctrl_regs = b15_cpubiuctrl_regs;
 	else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
 		cpubiuctrl_regs = b53_cpubiuctrl_regs;
+	else if (of_device_is_compatible(cpu_dn, "arm,cortex-a72"))
+		cpubiuctrl_regs = a72_cpubiuctrl_regs;
 	else {
 		pr_err("unsupported CPU\n");
 		ret = -EINVAL;
 	}
 	of_node_put(cpu_dn);
 
-	if (BRCM_ID(brcmstb_get_family_id()) == 0x7260)
+	family_id = brcmstb_get_family_id();
+	if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
 		cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
 out:
 	of_node_put(np);
@@ -248,7 +262,7 @@ static int __init brcmstb_biuctrl_init(void)
 		return ret;
 	}
 
-	mcp_b53_set();
+	mcp_a72_b53_set();
 #ifdef CONFIG_PM_SLEEP
 	register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
 #endif
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index cfa4b29..357c580 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -5,7 +5,8 @@
 
 config QUICC_ENGINE
 	bool "QUICC Engine (QE) framework support"
-	depends on FSL_SOC && PPC32
+	depends on OF && HAS_IOMEM
+	depends on PPC || ARM || ARM64 || COMPILE_TEST
 	select GENERIC_ALLOCATOR
 	select CRC32
 	help
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index f0c29ed..ed75198 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 		container_of(mm_gc, struct qe_gpio_chip, mm_gc);
 	struct qe_pio_regs __iomem *regs = mm_gc->regs;
 
-	qe_gc->cpdata = in_be32(&regs->cpdata);
+	qe_gc->cpdata = qe_ioread32be(&regs->cpdata);
 	qe_gc->saved_regs.cpdata = qe_gc->cpdata;
-	qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
-	qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
-	qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
-	qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
-	qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
+	qe_gc->saved_regs.cpdir1 = qe_ioread32be(&regs->cpdir1);
+	qe_gc->saved_regs.cpdir2 = qe_ioread32be(&regs->cpdir2);
+	qe_gc->saved_regs.cppar1 = qe_ioread32be(&regs->cppar1);
+	qe_gc->saved_regs.cppar2 = qe_ioread32be(&regs->cppar2);
+	qe_gc->saved_regs.cpodr = qe_ioread32be(&regs->cpodr);
 }
 
 static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
 	struct qe_pio_regs __iomem *regs = mm_gc->regs;
 	u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
 
-	return !!(in_be32(&regs->cpdata) & pin_mask);
+	return !!(qe_ioread32be(&regs->cpdata) & pin_mask);
 }
 
 static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 	else
 		qe_gc->cpdata &= ~pin_mask;
 
-	out_be32(&regs->cpdata, qe_gc->cpdata);
+	qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
 
 	spin_unlock_irqrestore(&qe_gc->lock, flags);
 }
@@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc,
 		}
 	}
 
-	out_be32(&regs->cpdata, qe_gc->cpdata);
+	qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
 
 	spin_unlock_irqrestore(&qe_gc->lock, flags);
 }
@@ -160,7 +160,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
 {
 	struct qe_pin *qe_pin;
 	struct gpio_chip *gc;
-	struct of_mm_gpio_chip *mm_gc;
 	struct qe_gpio_chip *qe_gc;
 	int err;
 	unsigned long flags;
@@ -186,7 +185,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
 		goto err0;
 	}
 
-	mm_gc = to_of_mm_gpio_chip(gc);
 	qe_gc = gpiochip_get_data(gc);
 
 	spin_lock_irqsave(&qe_gc->lock, flags);
@@ -255,11 +253,15 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
 	spin_lock_irqsave(&qe_gc->lock, flags);
 
 	if (second_reg) {
-		clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
-		clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
+		qe_clrsetbits_be32(&regs->cpdir2, mask2,
+				   sregs->cpdir2 & mask2);
+		qe_clrsetbits_be32(&regs->cppar2, mask2,
+				   sregs->cppar2 & mask2);
 	} else {
-		clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
-		clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
+		qe_clrsetbits_be32(&regs->cpdir1, mask2,
+				   sregs->cpdir1 & mask2);
+		qe_clrsetbits_be32(&regs->cppar1, mask2,
+				   sregs->cppar1 & mask2);
 	}
 
 	if (sregs->cpdata & mask1)
@@ -267,8 +269,8 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
 	else
 		qe_gc->cpdata &= ~mask1;
 
-	out_be32(&regs->cpdata, qe_gc->cpdata);
-	clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+	qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
+	qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
 
 	spin_unlock_irqrestore(&qe_gc->lock, flags);
 }
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 417df7e..96c2057 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -22,16 +22,12 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/ioport.h>
+#include <linux/iopoll.h>
 #include <linux/crc32.h>
 #include <linux/mod_devicetable.h>
 #include <linux/of_platform.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
 #include <soc/fsl/qe/immap_qe.h>
 #include <soc/fsl/qe/qe.h>
-#include <asm/prom.h>
-#include <asm/rheap.h>
 
 static void qe_snums_init(void);
 static int qe_sdma_init(void);
@@ -108,11 +104,12 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
 {
 	unsigned long flags;
 	u8 mcn_shift = 0, dev_shift = 0;
-	u32 ret;
+	u32 val;
+	int ret;
 
 	spin_lock_irqsave(&qe_lock, flags);
 	if (cmd == QE_RESET) {
-		out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
+		qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
 	} else {
 		if (cmd == QE_ASSIGN_PAGE) {
 			/* Here device is the SNUM, not sub-block */
@@ -129,20 +126,18 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
 				mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
 		}
 
-		out_be32(&qe_immr->cp.cecdr, cmd_input);
-		out_be32(&qe_immr->cp.cecr,
-			 (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
-			  mcn_protocol << mcn_shift));
+		qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr);
+		qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
+			       &qe_immr->cp.cecr);
 	}
 
 	/* wait for the QE_CR_FLG to clear */
-	ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
-			   100, 0);
-	/* On timeout (e.g. failure), the expression will be false (ret == 0),
-	   otherwise it will be true (ret == 1). */
+	ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val,
+					(val & QE_CR_FLG) == 0, 0, 100);
+	/* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
 	spin_unlock_irqrestore(&qe_lock, flags);
 
-	return ret == 1;
+	return ret == 0;
 }
 EXPORT_SYMBOL(qe_issue_cmd);
 
@@ -164,8 +159,7 @@ static unsigned int brg_clk = 0;
 unsigned int qe_get_brg_clk(void)
 {
 	struct device_node *qe;
-	int size;
-	const u32 *prop;
+	u32 brg;
 	unsigned int mod;
 
 	if (brg_clk)
@@ -175,9 +169,8 @@ unsigned int qe_get_brg_clk(void)
 	if (!qe)
 		return brg_clk;
 
-	prop = of_get_property(qe, "brg-frequency", &size);
-	if (prop && size == sizeof(*prop))
-		brg_clk = *prop;
+	if (!of_property_read_u32(qe, "brg-frequency", &brg))
+		brg_clk = brg;
 
 	of_node_put(qe);
 
@@ -197,6 +190,14 @@ EXPORT_SYMBOL(qe_get_brg_clk);
 #define PVR_VER_836x	0x8083
 #define PVR_VER_832x	0x8084
 
+static bool qe_general4_errata(void)
+{
+#ifdef CONFIG_PPC32
+	return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x);
+#endif
+	return false;
+}
+
 /* Program the BRG to the given sampling rate and multiplier
  *
  * @brg: the BRG, QE_BRG1 - QE_BRG16
@@ -223,14 +224,14 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
 	/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
 	   that the BRG divisor must be even if you're not using divide-by-16
 	   mode. */
-	if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
+	if (qe_general4_errata())
 		if (!div16 && (divisor & 1) && (divisor > 3))
 			divisor++;
 
 	tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
 		QE_BRGC_ENABLE | div16;
 
-	out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
+	qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
 
 	return 0;
 }
@@ -364,22 +365,20 @@ EXPORT_SYMBOL(qe_put_snum);
 static int qe_sdma_init(void)
 {
 	struct sdma __iomem *sdma = &qe_immr->sdma;
-	static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
-
-	if (!sdma)
-		return -ENODEV;
+	static s32 sdma_buf_offset = -ENOMEM;
 
 	/* allocate 2 internal temporary buffers (512 bytes size each) for
 	 * the SDMA */
-	if (IS_ERR_VALUE(sdma_buf_offset)) {
+	if (sdma_buf_offset < 0) {
 		sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
-		if (IS_ERR_VALUE(sdma_buf_offset))
+		if (sdma_buf_offset < 0)
 			return -ENOMEM;
 	}
 
-	out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
- 	out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
- 					(0x1 << QE_SDMR_CEN_SHIFT)));
+	qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
+		       &sdma->sdebcr);
+	qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
+		       &sdma->sdmr);
 
 	return 0;
 }
@@ -417,14 +416,14 @@ static void qe_upload_microcode(const void *base,
 			"uploading microcode '%s'\n", ucode->id);
 
 	/* Use auto-increment */
-	out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
-		QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+	qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
+		       &qe_immr->iram.iadd);
 
 	for (i = 0; i < be32_to_cpu(ucode->count); i++)
-		out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+		qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
 	
 	/* Set I-RAM Ready Register */
-	out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
+	qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
 }
 
 /*
@@ -509,7 +508,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
 	 * If the microcode calls for it, split the I-RAM.
 	 */
 	if (!firmware->split)
-		setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+		qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
 
 	if (firmware->soc.model)
 		printk(KERN_INFO
@@ -543,11 +542,13 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
 			u32 trap = be32_to_cpu(ucode->traps[j]);
 
 			if (trap)
-				out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+				qe_iowrite32be(trap,
+					       &qe_immr->rsp[i].tibcr[j]);
 		}
 
 		/* Enable traps */
-		out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
+		qe_iowrite32be(be32_to_cpu(ucode->eccr),
+			       &qe_immr->rsp[i].eccr);
 	}
 
 	qe_firmware_uploaded = 1;
@@ -565,11 +566,9 @@ EXPORT_SYMBOL(qe_upload_firmware);
 struct qe_firmware_info *qe_get_firmware_info(void)
 {
 	static int initialized;
-	struct property *prop;
 	struct device_node *qe;
 	struct device_node *fw = NULL;
 	const char *sprop;
-	unsigned int i;
 
 	/*
 	 * If we haven't checked yet, and a driver hasn't uploaded a firmware
@@ -603,20 +602,11 @@ struct qe_firmware_info *qe_get_firmware_info(void)
 		strlcpy(qe_firmware_info.id, sprop,
 			sizeof(qe_firmware_info.id));
 
-	prop = of_find_property(fw, "extended-modes", NULL);
-	if (prop && (prop->length == sizeof(u64))) {
-		const u64 *iprop = prop->value;
+	of_property_read_u64(fw, "extended-modes",
+			     &qe_firmware_info.extended_modes);
 
-		qe_firmware_info.extended_modes = *iprop;
-	}
-
-	prop = of_find_property(fw, "virtual-traps", NULL);
-	if (prop && (prop->length == 32)) {
-		const u32 *iprop = prop->value;
-
-		for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
-			qe_firmware_info.vtraps[i] = iprop[i];
-	}
+	of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps,
+				   ARRAY_SIZE(qe_firmware_info.vtraps));
 
 	of_node_put(fw);
 
@@ -627,17 +617,13 @@ EXPORT_SYMBOL(qe_get_firmware_info);
 unsigned int qe_get_num_of_risc(void)
 {
 	struct device_node *qe;
-	int size;
 	unsigned int num_of_risc = 0;
-	const u32 *prop;
 
 	qe = qe_get_device_node();
 	if (!qe)
 		return num_of_risc;
 
-	prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
-	if (prop && size == sizeof(*prop))
-		num_of_risc = *prop;
+	of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc);
 
 	of_node_put(qe);
 
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 83e85e6..a81a1a7 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -32,7 +32,7 @@ static phys_addr_t muram_pbase;
 
 struct muram_block {
 	struct list_head head;
-	unsigned long start;
+	s32 start;
 	int size;
 };
 
@@ -110,34 +110,30 @@ int cpm_muram_init(void)
  * @algo: algorithm for alloc.
  * @data: data for genalloc's algorithm.
  *
- * This function returns an offset into the muram area.
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
  */
-static unsigned long cpm_muram_alloc_common(unsigned long size,
-		genpool_algo_t algo, void *data)
+static s32 cpm_muram_alloc_common(unsigned long size,
+				  genpool_algo_t algo, void *data)
 {
 	struct muram_block *entry;
-	unsigned long start;
+	s32 start;
 
-	if (!muram_pool && cpm_muram_init())
-		goto out2;
-
-	start = gen_pool_alloc_algo(muram_pool, size, algo, data);
-	if (!start)
-		goto out2;
-	start = start - GENPOOL_OFFSET;
-	memset_io(cpm_muram_addr(start), 0, size);
 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 	if (!entry)
-		goto out1;
+		return -ENOMEM;
+	start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+	if (!start) {
+		kfree(entry);
+		return -ENOMEM;
+	}
+	start = start - GENPOOL_OFFSET;
+	memset_io(cpm_muram_addr(start), 0, size);
 	entry->start = start;
 	entry->size = size;
 	list_add(&entry->head, &muram_block_list);
 
 	return start;
-out1:
-	gen_pool_free(muram_pool, start, size);
-out2:
-	return (unsigned long)-ENOMEM;
 }
 
 /*
@@ -145,13 +141,14 @@ static unsigned long cpm_muram_alloc_common(unsigned long size,
  * @size: number of bytes to allocate
  * @align: requested alignment, in bytes
  *
- * This function returns an offset into the muram area.
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
  * Use cpm_dpram_addr() to get the virtual address of the area.
  * Use cpm_muram_free() to free the allocation.
  */
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
+s32 cpm_muram_alloc(unsigned long size, unsigned long align)
 {
-	unsigned long start;
+	s32 start;
 	unsigned long flags;
 	struct genpool_data_align muram_pool_data;
 
@@ -168,12 +165,15 @@ EXPORT_SYMBOL(cpm_muram_alloc);
  * cpm_muram_free - free a chunk of multi-user ram
  * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
  */
-int cpm_muram_free(unsigned long offset)
+void cpm_muram_free(s32 offset)
 {
 	unsigned long flags;
 	int size;
 	struct muram_block *tmp;
 
+	if (offset < 0)
+		return;
+
 	size = 0;
 	spin_lock_irqsave(&cpm_muram_lock, flags);
 	list_for_each_entry(tmp, &muram_block_list, head) {
@@ -186,7 +186,6 @@ int cpm_muram_free(unsigned long offset)
 	}
 	gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
 	spin_unlock_irqrestore(&cpm_muram_lock, flags);
-	return size;
 }
 EXPORT_SYMBOL(cpm_muram_free);
 
@@ -194,13 +193,14 @@ EXPORT_SYMBOL(cpm_muram_free);
  * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
  * @offset: offset of allocation start address
  * @size: number of bytes to allocate
- * This function returns an offset into the muram area
+ * This function returns @offset if the area was available, a negative
+ * errno otherwise.
  * Use cpm_dpram_addr() to get the virtual address of the area.
  * Use cpm_muram_free() to free the allocation.
  */
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
 {
-	unsigned long start;
+	s32 start;
 	unsigned long flags;
 	struct genpool_data_fixed muram_pool_data_fixed;
 
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 9bac546..0dd5bdb 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/errno.h>
+#include <linux/irq.h>
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/stddef.h>
@@ -24,9 +25,57 @@
 #include <linux/spinlock.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <soc/fsl/qe/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
 
-#include "qe_ic.h"
+#define NR_QE_IC_INTS		64
+
+/* QE IC registers offset */
+#define QEIC_CICR		0x00
+#define QEIC_CIVEC		0x04
+#define QEIC_CIPXCC		0x10
+#define QEIC_CIPYCC		0x14
+#define QEIC_CIPWCC		0x18
+#define QEIC_CIPZCC		0x1c
+#define QEIC_CIMR		0x20
+#define QEIC_CRIMR		0x24
+#define QEIC_CIPRTA		0x30
+#define QEIC_CIPRTB		0x34
+#define QEIC_CHIVEC		0x60
+
+struct qe_ic {
+	/* Control registers offset */
+	u32 __iomem *regs;
+
+	/* The remapper for this QEIC */
+	struct irq_domain *irqhost;
+
+	/* The "linux" controller struct */
+	struct irq_chip hc_irq;
+
+	/* VIRQ numbers of QE high/low irqs */
+	unsigned int virq_high;
+	unsigned int virq_low;
+};
+
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+	/* Location of this source at the QIMR register */
+	u32	mask;
+
+	/* Mask register offset */
+	u32	mask_reg;
+
+	/*
+	 * For grouped interrupts sources - the interrupt code as
+	 * appears at the group priority register
+	 */
+	u8	pri_code;
+
+	/* Group priority register offset */
+	u32	pri_reg;
+};
 
 static DEFINE_RAW_SPINLOCK(qe_ic_lock);
 
@@ -171,15 +220,15 @@ static struct qe_ic_info qe_ic_info[] = {
 		},
 };
 
-static inline u32 qe_ic_read(volatile __be32  __iomem * base, unsigned int reg)
+static inline u32 qe_ic_read(__be32  __iomem *base, unsigned int reg)
 {
-	return in_be32(base + (reg >> 2));
+	return qe_ioread32be(base + (reg >> 2));
 }
 
-static inline void qe_ic_write(volatile __be32  __iomem * base, unsigned int reg,
+static inline void qe_ic_write(__be32  __iomem *base, unsigned int reg,
 			       u32 value)
 {
-	out_be32(base + (reg >> 2), value);
+	qe_iowrite32be(value, base + (reg >> 2));
 }
 
 static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
@@ -281,8 +330,8 @@ static const struct irq_domain_ops qe_ic_host_ops = {
 	.xlate = irq_domain_xlate_onetwocell,
 };
 
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
 {
 	int irq;
 
@@ -292,13 +341,13 @@ unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
 	irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
 
 	if (irq == 0)
-		return NO_IRQ;
+		return 0;
 
 	return irq_linear_revmap(qe_ic->irqhost, irq);
 }
 
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
 {
 	int irq;
 
@@ -308,18 +357,60 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
 	irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
 
 	if (irq == 0)
-		return NO_IRQ;
+		return 0;
 
 	return irq_linear_revmap(qe_ic->irqhost, irq);
 }
 
-void __init qe_ic_init(struct device_node *node, unsigned int flags,
-		       void (*low_handler)(struct irq_desc *desc),
-		       void (*high_handler)(struct irq_desc *desc))
+static void qe_ic_cascade_low(struct irq_desc *desc)
 {
+	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+	unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	if (cascade_irq != 0)
+		generic_handle_irq(cascade_irq);
+
+	if (chip->irq_eoi)
+		chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_high(struct irq_desc *desc)
+{
+	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+	unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	if (cascade_irq != 0)
+		generic_handle_irq(cascade_irq);
+
+	if (chip->irq_eoi)
+		chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
+{
+	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+	unsigned int cascade_irq;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	cascade_irq = qe_ic_get_high_irq(qe_ic);
+	if (cascade_irq == 0)
+		cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+	if (cascade_irq != 0)
+		generic_handle_irq(cascade_irq);
+
+	chip->irq_eoi(&desc->irq_data);
+}
+
+static void __init qe_ic_init(struct device_node *node)
+{
+	void (*low_handler)(struct irq_desc *desc);
+	void (*high_handler)(struct irq_desc *desc);
 	struct qe_ic *qe_ic;
 	struct resource res;
-	u32 temp = 0, ret, high_active = 0;
+	u32 ret;
 
 	ret = of_address_to_resource(node, 0, &res);
 	if (ret)
@@ -343,166 +434,42 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
 	qe_ic->virq_high = irq_of_parse_and_map(node, 0);
 	qe_ic->virq_low = irq_of_parse_and_map(node, 1);
 
-	if (qe_ic->virq_low == NO_IRQ) {
+	if (!qe_ic->virq_low) {
 		printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
 		kfree(qe_ic);
 		return;
 	}
-
-	/* default priority scheme is grouped. If spread mode is    */
-	/* required, configure cicr accordingly.                    */
-	if (flags & QE_IC_SPREADMODE_GRP_W)
-		temp |= CICR_GWCC;
-	if (flags & QE_IC_SPREADMODE_GRP_X)
-		temp |= CICR_GXCC;
-	if (flags & QE_IC_SPREADMODE_GRP_Y)
-		temp |= CICR_GYCC;
-	if (flags & QE_IC_SPREADMODE_GRP_Z)
-		temp |= CICR_GZCC;
-	if (flags & QE_IC_SPREADMODE_GRP_RISCA)
-		temp |= CICR_GRTA;
-	if (flags & QE_IC_SPREADMODE_GRP_RISCB)
-		temp |= CICR_GRTB;
-
-	/* choose destination signal for highest priority interrupt */
-	if (flags & QE_IC_HIGH_SIGNAL) {
-		temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
-		high_active = 1;
+	if (qe_ic->virq_high != qe_ic->virq_low) {
+		low_handler = qe_ic_cascade_low;
+		high_handler = qe_ic_cascade_high;
+	} else {
+		low_handler = qe_ic_cascade_muxed_mpic;
+		high_handler = NULL;
 	}
 
-	qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+	qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
 
 	irq_set_handler_data(qe_ic->virq_low, qe_ic);
 	irq_set_chained_handler(qe_ic->virq_low, low_handler);
 
-	if (qe_ic->virq_high != NO_IRQ &&
-			qe_ic->virq_high != qe_ic->virq_low) {
+	if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
 		irq_set_handler_data(qe_ic->virq_high, qe_ic);
 		irq_set_chained_handler(qe_ic->virq_high, high_handler);
 	}
 }
 
-void qe_ic_set_highest_priority(unsigned int virq, int high)
+static int __init qe_ic_of_init(void)
 {
-	struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-	unsigned int src = virq_to_hw(virq);
-	u32 temp = 0;
+	struct device_node *np;
 
-	temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
-
-	temp &= ~CICR_HP_MASK;
-	temp |= src << CICR_HP_SHIFT;
-
-	temp &= ~CICR_HPIT_MASK;
-	temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
-
-	qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
-}
-
-/* Set Priority level within its group, from 1 to 8 */
-int qe_ic_set_priority(unsigned int virq, unsigned int priority)
-{
-	struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-	unsigned int src = virq_to_hw(virq);
-	u32 temp;
-
-	if (priority > 8 || priority == 0)
-		return -EINVAL;
-	if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
-		      "%s: Invalid hw irq number for QEIC\n", __func__))
-		return -EINVAL;
-	if (qe_ic_info[src].pri_reg == 0)
-		return -EINVAL;
-
-	temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
-
-	if (priority < 4) {
-		temp &= ~(0x7 << (32 - priority * 3));
-		temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
-	} else {
-		temp &= ~(0x7 << (24 - priority * 3));
-		temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
+	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+	if (!np) {
+		np = of_find_node_by_type(NULL, "qeic");
+		if (!np)
+			return -ENODEV;
 	}
-
-	qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
-
+	qe_ic_init(np);
+	of_node_put(np);
 	return 0;
 }
-
-/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
-{
-	struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-	unsigned int src = virq_to_hw(virq);
-	u32 temp, control_reg = QEIC_CICNR, shift = 0;
-
-	if (priority > 2 || priority == 0)
-		return -EINVAL;
-	if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
-		      "%s: Invalid hw irq number for QEIC\n", __func__))
-		return -EINVAL;
-
-	switch (qe_ic_info[src].pri_reg) {
-	case QEIC_CIPZCC:
-		shift = CICNR_ZCC1T_SHIFT;
-		break;
-	case QEIC_CIPWCC:
-		shift = CICNR_WCC1T_SHIFT;
-		break;
-	case QEIC_CIPYCC:
-		shift = CICNR_YCC1T_SHIFT;
-		break;
-	case QEIC_CIPXCC:
-		shift = CICNR_XCC1T_SHIFT;
-		break;
-	case QEIC_CIPRTA:
-		shift = CRICR_RTA1T_SHIFT;
-		control_reg = QEIC_CRICR;
-		break;
-	case QEIC_CIPRTB:
-		shift = CRICR_RTB1T_SHIFT;
-		control_reg = QEIC_CRICR;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	shift += (2 - priority) * 2;
-	temp = qe_ic_read(qe_ic->regs, control_reg);
-	temp &= ~(SIGNAL_MASK << shift);
-	temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
-	qe_ic_write(qe_ic->regs, control_reg, temp);
-
-	return 0;
-}
-
-static struct bus_type qe_ic_subsys = {
-	.name = "qe_ic",
-	.dev_name = "qe_ic",
-};
-
-static struct device device_qe_ic = {
-	.id = 0,
-	.bus = &qe_ic_subsys,
-};
-
-static int __init init_qe_ic_sysfs(void)
-{
-	int rc;
-
-	printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
-
-	rc = subsys_system_register(&qe_ic_subsys, NULL);
-	if (rc) {
-		printk(KERN_ERR "Failed registering qe_ic sys class\n");
-		return -ENODEV;
-	}
-	rc = device_register(&device_qe_ic);
-	if (rc) {
-		printk(KERN_ERR "Failed registering qe_ic sys device\n");
-		return -ENODEV;
-	}
-	return 0;
-}
-
-subsys_initcall(init_qe_ic_sysfs);
+subsys_initcall(qe_ic_of_init);
diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h
deleted file mode 100644
index 08c6956..0000000
--- a/drivers/soc/fsl/qe/qe_ic.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * drivers/soc/fsl/qe/qe_ic.h
- *
- * QUICC ENGINE Interrupt Controller Header
- *
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Li Yang <leoli@freescale.com>
- * Based on code from Shlomi Gridish <gridish@freescale.com>
- */
-#ifndef _POWERPC_SYSDEV_QE_IC_H
-#define _POWERPC_SYSDEV_QE_IC_H
-
-#include <soc/fsl/qe/qe_ic.h>
-
-#define NR_QE_IC_INTS		64
-
-/* QE IC registers offset */
-#define QEIC_CICR		0x00
-#define QEIC_CIVEC		0x04
-#define QEIC_CRIPNR		0x08
-#define QEIC_CIPNR		0x0c
-#define QEIC_CIPXCC		0x10
-#define QEIC_CIPYCC		0x14
-#define QEIC_CIPWCC		0x18
-#define QEIC_CIPZCC		0x1c
-#define QEIC_CIMR		0x20
-#define QEIC_CRIMR		0x24
-#define QEIC_CICNR		0x28
-#define QEIC_CIPRTA		0x30
-#define QEIC_CIPRTB		0x34
-#define QEIC_CRICR		0x3c
-#define QEIC_CHIVEC		0x60
-
-/* Interrupt priority registers */
-#define CIPCC_SHIFT_PRI0	29
-#define CIPCC_SHIFT_PRI1	26
-#define CIPCC_SHIFT_PRI2	23
-#define CIPCC_SHIFT_PRI3	20
-#define CIPCC_SHIFT_PRI4	13
-#define CIPCC_SHIFT_PRI5	10
-#define CIPCC_SHIFT_PRI6	7
-#define CIPCC_SHIFT_PRI7	4
-
-/* CICR priority modes */
-#define CICR_GWCC		0x00040000
-#define CICR_GXCC		0x00020000
-#define CICR_GYCC		0x00010000
-#define CICR_GZCC		0x00080000
-#define CICR_GRTA		0x00200000
-#define CICR_GRTB		0x00400000
-#define CICR_HPIT_SHIFT		8
-#define CICR_HPIT_MASK		0x00000300
-#define CICR_HP_SHIFT		24
-#define CICR_HP_MASK		0x3f000000
-
-/* CICNR */
-#define CICNR_WCC1T_SHIFT	20
-#define CICNR_ZCC1T_SHIFT	28
-#define CICNR_YCC1T_SHIFT	12
-#define CICNR_XCC1T_SHIFT	4
-
-/* CRICR */
-#define CRICR_RTA1T_SHIFT	20
-#define CRICR_RTB1T_SHIFT	28
-
-/* Signal indicator */
-#define SIGNAL_MASK		3
-#define SIGNAL_HIGH		2
-#define SIGNAL_LOW		0
-
-struct qe_ic {
-	/* Control registers offset */
-	volatile u32 __iomem *regs;
-
-	/* The remapper for this QEIC */
-	struct irq_domain *irqhost;
-
-	/* The "linux" controller struct */
-	struct irq_chip hc_irq;
-
-	/* VIRQ numbers of QE high/low irqs */
-	unsigned int virq_high;
-	unsigned int virq_low;
-};
-
-/*
- * QE interrupt controller internal structure
- */
-struct qe_ic_info {
-	u32	mask;	  /* location of this source at the QIMR register. */
-	u32	mask_reg; /* Mask register offset */
-	u8	pri_code; /* for grouped interrupts sources - the interrupt
-			     code as appears at the group priority register */
-	u32	pri_reg;  /* Group priority register offset */
-};
-
-#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
index 3657e29..11ea08e 100644
--- a/drivers/soc/fsl/qe/qe_io.c
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -18,8 +18,6 @@
 
 #include <asm/io.h>
 #include <soc/fsl/qe/qe.h>
-#include <asm/prom.h>
-#include <sysdev/fsl_soc.h>
 
 #undef DEBUG
 
@@ -30,7 +28,7 @@ int par_io_init(struct device_node *np)
 {
 	struct resource res;
 	int ret;
-	const u32 *num_ports;
+	u32 num_ports;
 
 	/* Map Parallel I/O ports registers */
 	ret = of_address_to_resource(np, 0, &res);
@@ -38,9 +36,8 @@ int par_io_init(struct device_node *np)
 		return ret;
 	par_io = ioremap(res.start, resource_size(&res));
 
-	num_ports = of_get_property(np, "num-ports", NULL);
-	if (num_ports)
-		num_par_io_ports = *num_ports;
+	if (!of_property_read_u32(np, "num-ports", &num_ports))
+		num_par_io_ports = num_ports;
 
 	return 0;
 }
@@ -57,16 +54,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
 	pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
 
 	/* Set open drain, if required */
-	tmp_val = in_be32(&par_io->cpodr);
+	tmp_val = qe_ioread32be(&par_io->cpodr);
 	if (open_drain)
-		out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
+		qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
 	else
-		out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
+		qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
 
 	/* define direction */
 	tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
-		in_be32(&par_io->cpdir2) :
-		in_be32(&par_io->cpdir1);
+		qe_ioread32be(&par_io->cpdir2) :
+		qe_ioread32be(&par_io->cpdir1);
 
 	/* get all bits mask for 2 bit per port */
 	pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
@@ -78,34 +75,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
 
 	/* clear and set 2 bits mask */
 	if (pin > (QE_PIO_PINS / 2) - 1) {
-		out_be32(&par_io->cpdir2,
-			 ~pin_mask2bits & tmp_val);
+		qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
 		tmp_val &= ~pin_mask2bits;
-		out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
+		qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
 	} else {
-		out_be32(&par_io->cpdir1,
-			 ~pin_mask2bits & tmp_val);
+		qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
 		tmp_val &= ~pin_mask2bits;
-		out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
+		qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
 	}
 	/* define pin assignment */
 	tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
-		in_be32(&par_io->cppar2) :
-		in_be32(&par_io->cppar1);
+		qe_ioread32be(&par_io->cppar2) :
+		qe_ioread32be(&par_io->cppar1);
 
 	new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
 			(pin % (QE_PIO_PINS / 2) + 1) * 2));
 	/* clear and set 2 bits mask */
 	if (pin > (QE_PIO_PINS / 2) - 1) {
-		out_be32(&par_io->cppar2,
-			 ~pin_mask2bits & tmp_val);
+		qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
 		tmp_val &= ~pin_mask2bits;
-		out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
+		qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
 	} else {
-		out_be32(&par_io->cppar1,
-			 ~pin_mask2bits & tmp_val);
+		qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
 		tmp_val &= ~pin_mask2bits;
-		out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
+		qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
 	}
 }
 EXPORT_SYMBOL(__par_io_config_pin);
@@ -133,12 +126,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
 	/* calculate pin location */
 	pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
 
-	tmp_val = in_be32(&par_io[port].cpdata);
+	tmp_val = qe_ioread32be(&par_io[port].cpdata);
 
 	if (val == 0)		/* clear */
-		out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
+		qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
 	else			/* set */
-		out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
+		qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
 
 	return 0;
 }
@@ -147,23 +140,20 @@ EXPORT_SYMBOL(par_io_data_set);
 int par_io_of_config(struct device_node *np)
 {
 	struct device_node *pio;
-	const phandle *ph;
 	int pio_map_len;
-	const unsigned int *pio_map;
+	const __be32 *pio_map;
 
 	if (par_io == NULL) {
 		printk(KERN_ERR "par_io not initialized\n");
 		return -1;
 	}
 
-	ph = of_get_property(np, "pio-handle", NULL);
-	if (ph == NULL) {
+	pio = of_parse_phandle(np, "pio-handle", 0);
+	if (pio == NULL) {
 		printk(KERN_ERR "pio-handle not available\n");
 		return -1;
 	}
 
-	pio = of_find_node_by_phandle(*ph);
-
 	pio_map = of_get_property(pio, "pio-map", &pio_map_len);
 	if (pio_map == NULL) {
 		printk(KERN_ERR "pio-map is not set!\n");
@@ -176,9 +166,15 @@ int par_io_of_config(struct device_node *np)
 	}
 
 	while (pio_map_len > 0) {
-		par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
-				(int) pio_map[2], (int) pio_map[3],
-				(int) pio_map[4], (int) pio_map[5]);
+		u8 port        = be32_to_cpu(pio_map[0]);
+		u8 pin         = be32_to_cpu(pio_map[1]);
+		int dir        = be32_to_cpu(pio_map[2]);
+		int open_drain = be32_to_cpu(pio_map[3]);
+		int assignment = be32_to_cpu(pio_map[4]);
+		int has_irq    = be32_to_cpu(pio_map[5]);
+
+		par_io_config_pin(port, pin, dir, open_drain,
+				  assignment, has_irq);
 		pio_map += 6;
 		pio_map_len -= 6;
 	}
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index e37ebc3..7d7d78d 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
 				    &siram[siram_entry_id * 32 + 0x200 +  i]);
 	}
 
-	setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
-		  SIR_LAST);
-	setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
-		  SIR_LAST);
+	qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+			SIR_LAST);
+	qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+			SIR_LAST);
 
 	/* Set SIxMR register */
 	sixmr = SIMR_SAD(siram_entry_id);
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index 024d239..90157ac 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -15,7 +15,6 @@
 #include <linux/spinlock.h>
 #include <linux/export.h>
 
-#include <asm/irq.h>
 #include <asm/io.h>
 #include <soc/fsl/qe/immap_qe.h>
 #include <soc/fsl/qe/qe.h>
@@ -35,8 +34,8 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
 		return -EINVAL;
 
 	spin_lock_irqsave(&cmxgcr_lock, flags);
-	clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
-		ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+	qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+			   ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
 	spin_unlock_irqrestore(&cmxgcr_lock, flags);
 
 	return 0;
@@ -80,8 +79,8 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
 		return -EINVAL;
 	}
 
-	clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
-		UCC_GUEMR_SET_RESERVED3 | speed);
+	qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+			UCC_GUEMR_SET_RESERVED3 | speed);
 
 	return 0;
 }
@@ -109,9 +108,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
 	get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
 
 	if (set)
-		setbits32(cmxucr, mask << shift);
+		qe_setbits_be32(cmxucr, mask << shift);
 	else
-		clrbits32(cmxucr, mask << shift);
+		qe_clrbits_be32(cmxucr, mask << shift);
 
 	return 0;
 }
@@ -207,8 +206,8 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
 	if (mode == COMM_DIR_RX)
 		shift += 4;
 
-	clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
-		clock_bits << shift);
+	qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+			   clock_bits << shift);
 
 	return 0;
 }
@@ -540,8 +539,8 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
 	cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
 				  &qe_mux_reg->cmxsi1cr_h;
 
-	qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
-			clock_bits << shift);
+	qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+			   clock_bits << shift);
 
 	return 0;
 }
@@ -650,9 +649,9 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
 
 	shift = ucc_get_tdm_sync_shift(mode, tdm_num);
 
-	qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
-			QE_CMXUCR_TX_CLK_SRC_MASK << shift,
-			source << shift);
+	qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr,
+			   QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+			   source << shift);
 
 	return 0;
 }
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
index af4d80e..ad6193e 100644
--- a/drivers/soc/fsl/qe/ucc_fast.c
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -29,41 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
 	printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
 
 	printk(KERN_INFO "gumr  : addr=0x%p, val=0x%08x\n",
-		  &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
+		  &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr));
 	printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
-		  &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
+		  &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr));
 	printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
+		  &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr));
 	printk(KERN_INFO "udsr  : addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
+		  &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr));
 	printk(KERN_INFO "ucce  : addr=0x%p, val=0x%08x\n",
-		  &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
+		  &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce));
 	printk(KERN_INFO "uccm  : addr=0x%p, val=0x%08x\n",
-		  &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
+		  &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm));
 	printk(KERN_INFO "uccs  : addr=0x%p, val=0x%02x\n",
-		  &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
+		  &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs));
 	printk(KERN_INFO "urfb  : addr=0x%p, val=0x%08x\n",
-		  &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
+		  &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb));
 	printk(KERN_INFO "urfs  : addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
+		  &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs));
 	printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
+		  &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet));
 	printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
+		  &uccf->uf_regs->urfset,
+		  qe_ioread16be(&uccf->uf_regs->urfset));
 	printk(KERN_INFO "utfb  : addr=0x%p, val=0x%08x\n",
-		  &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
+		  &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb));
 	printk(KERN_INFO "utfs  : addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
+		  &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs));
 	printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
+		  &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet));
 	printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
+		  &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt));
 	printk(KERN_INFO "utpt  : addr=0x%p, val=0x%04x\n",
-		  &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
+		  &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt));
 	printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
-		  &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
+		  &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry));
 	printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
-		  &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
+		  &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr));
 }
 EXPORT_SYMBOL(ucc_fast_dump_regs);
 
@@ -85,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
 
 void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
 {
-	out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
+	qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
 }
 EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
 
@@ -97,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
 	uf_regs = uccf->uf_regs;
 
 	/* Enable reception and/or transmission on this UCC. */
-	gumr = in_be32(&uf_regs->gumr);
+	gumr = qe_ioread32be(&uf_regs->gumr);
 	if (mode & COMM_DIR_TX) {
 		gumr |= UCC_FAST_GUMR_ENT;
 		uccf->enabled_tx = 1;
@@ -106,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
 		gumr |= UCC_FAST_GUMR_ENR;
 		uccf->enabled_rx = 1;
 	}
-	out_be32(&uf_regs->gumr, gumr);
+	qe_iowrite32be(gumr, &uf_regs->gumr);
 }
 EXPORT_SYMBOL(ucc_fast_enable);
 
@@ -118,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
 	uf_regs = uccf->uf_regs;
 
 	/* Disable reception and/or transmission on this UCC. */
-	gumr = in_be32(&uf_regs->gumr);
+	gumr = qe_ioread32be(&uf_regs->gumr);
 	if (mode & COMM_DIR_TX) {
 		gumr &= ~UCC_FAST_GUMR_ENT;
 		uccf->enabled_tx = 0;
@@ -127,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
 		gumr &= ~UCC_FAST_GUMR_ENR;
 		uccf->enabled_rx = 0;
 	}
-	out_be32(&uf_regs->gumr, gumr);
+	qe_iowrite32be(gumr, &uf_regs->gumr);
 }
 EXPORT_SYMBOL(ucc_fast_disable);
 
@@ -196,6 +197,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
 			__func__);
 		return -ENOMEM;
 	}
+	uccf->ucc_fast_tx_virtual_fifo_base_offset = -1;
+	uccf->ucc_fast_rx_virtual_fifo_base_offset = -1;
 
 	/* Fill fast UCC structure */
 	uccf->uf_info = uf_info;
@@ -259,15 +262,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
 	gumr |= uf_info->tenc;
 	gumr |= uf_info->tcrc;
 	gumr |= uf_info->mode;
-	out_be32(&uf_regs->gumr, gumr);
+	qe_iowrite32be(gumr, &uf_regs->gumr);
 
 	/* Allocate memory for Tx Virtual Fifo */
 	uccf->ucc_fast_tx_virtual_fifo_base_offset =
 	    qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
-	if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
+	if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) {
 		printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
 			__func__);
-		uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
 		ucc_fast_free(uccf);
 		return -ENOMEM;
 	}
@@ -277,24 +279,25 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
 		qe_muram_alloc(uf_info->urfs +
 			   UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
 			   UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
-	if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
+	if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) {
 		printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
 			__func__);
-		uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
 		ucc_fast_free(uccf);
 		return -ENOMEM;
 	}
 
 	/* Set Virtual Fifo registers */
-	out_be16(&uf_regs->urfs, uf_info->urfs);
-	out_be16(&uf_regs->urfet, uf_info->urfet);
-	out_be16(&uf_regs->urfset, uf_info->urfset);
-	out_be16(&uf_regs->utfs, uf_info->utfs);
-	out_be16(&uf_regs->utfet, uf_info->utfet);
-	out_be16(&uf_regs->utftt, uf_info->utftt);
+	qe_iowrite16be(uf_info->urfs, &uf_regs->urfs);
+	qe_iowrite16be(uf_info->urfet, &uf_regs->urfet);
+	qe_iowrite16be(uf_info->urfset, &uf_regs->urfset);
+	qe_iowrite16be(uf_info->utfs, &uf_regs->utfs);
+	qe_iowrite16be(uf_info->utfet, &uf_regs->utfet);
+	qe_iowrite16be(uf_info->utftt, &uf_regs->utftt);
 	/* utfb, urfb are offsets from MURAM base */
-	out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
-	out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
+	qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
+		       &uf_regs->utfb);
+	qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
+		       &uf_regs->urfb);
 
 	/* Mux clocking */
 	/* Grant Support */
@@ -362,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
 	}
 
 	/* Set interrupt mask register at UCC level. */
-	out_be32(&uf_regs->uccm, uf_info->uccm_mask);
+	qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
 
 	/* First, clear anything pending at UCC level,
 	 * otherwise, old garbage may come through
 	 * as soon as the dam is opened. */
 
 	/* Writing '1' clears */
-	out_be32(&uf_regs->ucce, 0xffffffff);
+	qe_iowrite32be(0xffffffff, &uf_regs->ucce);
 
 	*uccf_ret = uccf;
 	return 0;
@@ -381,11 +384,8 @@ void ucc_fast_free(struct ucc_fast_private * uccf)
 	if (!uccf)
 		return;
 
-	if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
-		qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
-
-	if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
-		qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+	qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+	qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
 
 	if (uccf->uf_regs)
 		iounmap(uccf->uf_regs);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
index 34f0ec3..274d344 100644
--- a/drivers/soc/fsl/qe/ucc_slow.c
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
 	us_regs = uccs->us_regs;
 
 	/* Enable reception and/or transmission on this UCC. */
-	gumr_l = in_be32(&us_regs->gumr_l);
+	gumr_l = qe_ioread32be(&us_regs->gumr_l);
 	if (mode & COMM_DIR_TX) {
 		gumr_l |= UCC_SLOW_GUMR_L_ENT;
 		uccs->enabled_tx = 1;
@@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
 		gumr_l |= UCC_SLOW_GUMR_L_ENR;
 		uccs->enabled_rx = 1;
 	}
-	out_be32(&us_regs->gumr_l, gumr_l);
+	qe_iowrite32be(gumr_l, &us_regs->gumr_l);
 }
 EXPORT_SYMBOL(ucc_slow_enable);
 
@@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
 	us_regs = uccs->us_regs;
 
 	/* Disable reception and/or transmission on this UCC. */
-	gumr_l = in_be32(&us_regs->gumr_l);
+	gumr_l = qe_ioread32be(&us_regs->gumr_l);
 	if (mode & COMM_DIR_TX) {
 		gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
 		uccs->enabled_tx = 0;
@@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
 		gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
 		uccs->enabled_rx = 0;
 	}
-	out_be32(&us_regs->gumr_l, gumr_l);
+	qe_iowrite32be(gumr_l, &us_regs->gumr_l);
 }
 EXPORT_SYMBOL(ucc_slow_disable);
 
@@ -154,6 +154,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 			__func__);
 		return -ENOMEM;
 	}
+	uccs->rx_base_offset = -1;
+	uccs->tx_base_offset = -1;
+	uccs->us_pram_offset = -1;
 
 	/* Fill slow UCC structure */
 	uccs->us_info = us_info;
@@ -179,7 +182,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 	/* Get PRAM base */
 	uccs->us_pram_offset =
 		qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
-	if (IS_ERR_VALUE(uccs->us_pram_offset)) {
+	if (uccs->us_pram_offset < 0) {
 		printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
 		ucc_slow_free(uccs);
 		return -ENOMEM;
@@ -198,7 +201,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 		return ret;
 	}
 
-	out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
+	qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
 
 	INIT_LIST_HEAD(&uccs->confQ);
 
@@ -206,10 +209,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 	uccs->rx_base_offset =
 		qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
 				QE_ALIGNMENT_OF_BD);
-	if (IS_ERR_VALUE(uccs->rx_base_offset)) {
+	if (uccs->rx_base_offset < 0) {
 		printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
 			us_info->rx_bd_ring_len);
-		uccs->rx_base_offset = 0;
 		ucc_slow_free(uccs);
 		return -ENOMEM;
 	}
@@ -217,9 +219,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 	uccs->tx_base_offset =
 		qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
 			QE_ALIGNMENT_OF_BD);
-	if (IS_ERR_VALUE(uccs->tx_base_offset)) {
+	if (uccs->tx_base_offset < 0) {
 		printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
-		uccs->tx_base_offset = 0;
 		ucc_slow_free(uccs);
 		return -ENOMEM;
 	}
@@ -228,27 +229,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 	bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
 	for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
 		/* clear bd buffer */
-		out_be32(&bd->buf, 0);
+		qe_iowrite32be(0, &bd->buf);
 		/* set bd status and length */
-		out_be32((u32 *) bd, 0);
+		qe_iowrite32be(0, (u32 *)bd);
 		bd++;
 	}
 	/* for last BD set Wrap bit */
-	out_be32(&bd->buf, 0);
-	out_be32((u32 *) bd, cpu_to_be32(T_W));
+	qe_iowrite32be(0, &bd->buf);
+	qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
 
 	/* Init Rx bds */
 	bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
 	for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
 		/* set bd status and length */
-		out_be32((u32*)bd, 0);
+		qe_iowrite32be(0, (u32 *)bd);
 		/* clear bd buffer */
-		out_be32(&bd->buf, 0);
+		qe_iowrite32be(0, &bd->buf);
 		bd++;
 	}
 	/* for last BD set Wrap bit */
-	out_be32((u32*)bd, cpu_to_be32(R_W));
-	out_be32(&bd->buf, 0);
+	qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
+	qe_iowrite32be(0, &bd->buf);
 
 	/* Set GUMR (For more details see the hardware spec.). */
 	/* gumr_h */
@@ -269,7 +270,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 		gumr |= UCC_SLOW_GUMR_H_TXSY;
 	if (us_info->rtsm)
 		gumr |= UCC_SLOW_GUMR_H_RTSM;
-	out_be32(&us_regs->gumr_h, gumr);
+	qe_iowrite32be(gumr, &us_regs->gumr_h);
 
 	/* gumr_l */
 	gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
@@ -282,7 +283,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 		gumr |= UCC_SLOW_GUMR_L_TINV;
 	if (us_info->tend)
 		gumr |= UCC_SLOW_GUMR_L_TEND;
-	out_be32(&us_regs->gumr_l, gumr);
+	qe_iowrite32be(gumr, &us_regs->gumr_l);
 
 	/* Function code registers */
 
@@ -292,8 +293,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 	uccs->us_pram->rbmr = UCC_BMR_BO_BE;
 
 	/* rbase, tbase are offsets from MURAM base */
-	out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
-	out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
+	qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+	qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
 
 	/* Mux clocking */
 	/* Grant Support */
@@ -323,14 +324,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
 	}
 
 	/* Set interrupt mask register at UCC level. */
-	out_be16(&us_regs->uccm, us_info->uccm_mask);
+	qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm);
 
 	/* First, clear anything pending at UCC level,
 	 * otherwise, old garbage may come through
 	 * as soon as the dam is opened. */
 
 	/* Writing '1' clears */
-	out_be16(&us_regs->ucce, 0xffff);
+	qe_iowrite16be(0xffff, &us_regs->ucce);
 
 	/* Issue QE Init command */
 	if (us_info->init_tx && us_info->init_rx)
@@ -352,14 +353,9 @@ void ucc_slow_free(struct ucc_slow_private * uccs)
 	if (!uccs)
 		return;
 
-	if (uccs->rx_base_offset)
-		qe_muram_free(uccs->rx_base_offset);
-
-	if (uccs->tx_base_offset)
-		qe_muram_free(uccs->tx_base_offset);
-
-	if (uccs->us_pram)
-		qe_muram_free(uccs->us_pram_offset);
+	qe_muram_free(uccs->rx_base_offset);
+	qe_muram_free(uccs->tx_base_offset);
+	qe_muram_free(uccs->us_pram_offset);
 
 	if (uccs->us_regs)
 		iounmap(uccs->us_regs);
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
index 32d8269..890f236 100644
--- a/drivers/soc/fsl/qe/usb.c
+++ b/drivers/soc/fsl/qe/usb.c
@@ -43,7 +43,7 @@ int qe_usb_clock_set(enum qe_clock clk, int rate)
 
 	spin_lock_irqsave(&cmxgcr_lock, flags);
 
-	clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+	qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
 
 	spin_unlock_irqrestore(&cmxgcr_lock, flags);
 
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 8aaebf1..0281ef9 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -10,7 +10,7 @@
 
 config IMX_SCU_SOC
 	bool "i.MX System Controller Unit SoC info support"
-	depends on IMX_SCU
+	depends on IMX_SCU || COMPILE_TEST
 	select SOC_BUS
 	help
 	  If you say yes here you get support for the NXP i.MX System
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
index d84ed73..719e1f18 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8.c
@@ -142,10 +142,16 @@ static const struct imx8_soc_data imx8mn_soc_data = {
 	.soc_revision = imx8mm_soc_revision,
 };
 
+static const struct imx8_soc_data imx8mp_soc_data = {
+	.name = "i.MX8MP",
+	.soc_revision = imx8mm_soc_revision,
+};
+
 static const struct of_device_id imx8_soc_match[] = {
 	{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
 	{ .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
 	{ .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
+	{ .compatible = "fsl,imx8mp", .data = &imx8mp_soc_data, },
 	{ }
 };
 
@@ -204,6 +210,9 @@ static int __init imx8_soc_init(void)
 		goto free_serial_number;
 	}
 
+	pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
+		soc_dev_attr->revision);
+
 	if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
 		platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
 
diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c
index cb0303a0..dff1375 100644
--- a/drivers/soc/lantiq/fpi-bus.c
+++ b/drivers/soc/lantiq/fpi-bus.c
@@ -28,14 +28,12 @@ static int ltq_fpi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
-	struct resource *res_xbar;
 	struct regmap *rcu_regmap;
 	void __iomem *xbar_membase;
 	u32 rcu_ahb_endianness_reg_offset;
 	int ret;
 
-	res_xbar = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	xbar_membase = devm_ioremap_resource(dev, res_xbar);
+	xbar_membase = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(xbar_membase))
 		return PTR_ERR(xbar_membase);
 
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 3c82de5..de20e6c 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -9,11 +9,51 @@
 #include <linux/mailbox_controller.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 
-#define CMDQ_ARG_A_WRITE_MASK	0xffff
 #define CMDQ_WRITE_ENABLE_MASK	BIT(0)
+#define CMDQ_POLL_ENABLE_MASK	BIT(0)
 #define CMDQ_EOC_IRQ_EN		BIT(0)
-#define CMDQ_EOC_CMD		((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
-				<< 32 | CMDQ_EOC_IRQ_EN)
+
+struct cmdq_instruction {
+	union {
+		u32 value;
+		u32 mask;
+	};
+	union {
+		u16 offset;
+		u16 event;
+	};
+	u8 subsys;
+	u8 op;
+};
+
+int cmdq_dev_get_client_reg(struct device *dev,
+			    struct cmdq_client_reg *client_reg, int idx)
+{
+	struct of_phandle_args spec;
+	int err;
+
+	if (!client_reg)
+		return -ENOENT;
+
+	err = of_parse_phandle_with_fixed_args(dev->of_node,
+					       "mediatek,gce-client-reg",
+					       3, idx, &spec);
+	if (err < 0) {
+		dev_err(dev,
+			"error %d can't parse gce-client-reg property (%d)",
+			err, idx);
+
+		return err;
+	}
+
+	client_reg->subsys = (u8)spec.args[0];
+	client_reg->offset = (u16)spec.args[1];
+	client_reg->size = (u16)spec.args[2];
+	of_node_put(spec.np);
+
+	return 0;
+}
+EXPORT_SYMBOL(cmdq_dev_get_client_reg);
 
 static void cmdq_client_timeout(struct timer_list *t)
 {
@@ -110,10 +150,10 @@ void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
 }
 EXPORT_SYMBOL(cmdq_pkt_destroy);
 
-static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
-				   u32 arg_a, u32 arg_b)
+static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
+				   struct cmdq_instruction inst)
 {
-	u64 *cmd_ptr;
+	struct cmdq_instruction *cmd_ptr;
 
 	if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
 		/*
@@ -129,8 +169,9 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
 			__func__, (u32)pkt->buf_size);
 		return -ENOMEM;
 	}
+
 	cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
-	(*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b;
+	*cmd_ptr = inst;
 	pkt->cmd_buf_size += CMDQ_INST_SIZE;
 
 	return 0;
@@ -138,24 +179,34 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
 
 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
 {
-	u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
-		    (subsys << CMDQ_SUBSYS_SHIFT);
+	struct cmdq_instruction inst;
 
-	return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
+	inst.op = CMDQ_CODE_WRITE;
+	inst.value = value;
+	inst.offset = offset;
+	inst.subsys = subsys;
+
+	return cmdq_pkt_append_command(pkt, inst);
 }
 EXPORT_SYMBOL(cmdq_pkt_write);
 
 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
 			u16 offset, u32 value, u32 mask)
 {
-	u32 offset_mask = offset;
-	int err = 0;
+	struct cmdq_instruction inst = { {0} };
+	u16 offset_mask = offset;
+	int err;
 
 	if (mask != 0xffffffff) {
-		err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
+		inst.op = CMDQ_CODE_MASK;
+		inst.mask = ~mask;
+		err = cmdq_pkt_append_command(pkt, inst);
+		if (err < 0)
+			return err;
+
 		offset_mask |= CMDQ_WRITE_ENABLE_MASK;
 	}
-	err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
+	err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
 
 	return err;
 }
@@ -163,43 +214,85 @@ EXPORT_SYMBOL(cmdq_pkt_write_mask);
 
 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
 {
-	u32 arg_b;
+	struct cmdq_instruction inst = { {0} };
 
 	if (event >= CMDQ_MAX_EVENT)
 		return -EINVAL;
 
-	/*
-	 * WFE arg_b
-	 * bit 0-11: wait value
-	 * bit 15: 1 - wait, 0 - no wait
-	 * bit 16-27: update value
-	 * bit 31: 1 - update, 0 - no update
-	 */
-	arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
+	inst.op = CMDQ_CODE_WFE;
+	inst.value = CMDQ_WFE_OPTION;
+	inst.event = event;
 
-	return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
+	return cmdq_pkt_append_command(pkt, inst);
 }
 EXPORT_SYMBOL(cmdq_pkt_wfe);
 
 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
 {
+	struct cmdq_instruction inst = { {0} };
+
 	if (event >= CMDQ_MAX_EVENT)
 		return -EINVAL;
 
-	return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
-				       CMDQ_WFE_UPDATE);
+	inst.op = CMDQ_CODE_WFE;
+	inst.value = CMDQ_WFE_UPDATE;
+	inst.event = event;
+
+	return cmdq_pkt_append_command(pkt, inst);
 }
 EXPORT_SYMBOL(cmdq_pkt_clear_event);
 
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+		  u16 offset, u32 value)
+{
+	struct cmdq_instruction inst = { {0} };
+	int err;
+
+	inst.op = CMDQ_CODE_POLL;
+	inst.value = value;
+	inst.offset = offset;
+	inst.subsys = subsys;
+	err = cmdq_pkt_append_command(pkt, inst);
+
+	return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll);
+
+int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+		       u16 offset, u32 value, u32 mask)
+{
+	struct cmdq_instruction inst = { {0} };
+	int err;
+
+	inst.op = CMDQ_CODE_MASK;
+	inst.mask = ~mask;
+	err = cmdq_pkt_append_command(pkt, inst);
+	if (err < 0)
+		return err;
+
+	offset = offset | CMDQ_POLL_ENABLE_MASK;
+	err = cmdq_pkt_poll(pkt, subsys, offset, value);
+
+	return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll_mask);
+
 static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
 {
+	struct cmdq_instruction inst = { {0} };
 	int err;
 
 	/* insert EOC and generate IRQ for each command iteration */
-	err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
+	inst.op = CMDQ_CODE_EOC;
+	inst.value = CMDQ_EOC_IRQ_EN;
+	err = cmdq_pkt_append_command(pkt, inst);
+	if (err < 0)
+		return err;
 
 	/* JUMP to end */
-	err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
+	inst.op = CMDQ_CODE_JUMP;
+	inst.value = CMDQ_JUMP_PASS;
+	err = cmdq_pkt_append_command(pkt, inst);
 
 	return err;
 }
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 79d8265..d0a73e7 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -45,13 +45,13 @@
 	  neighboring subsystems going up or down.
 
 config QCOM_GSBI
-        tristate "QCOM General Serial Bus Interface"
-        depends on ARCH_QCOM || COMPILE_TEST
-        select MFD_SYSCON
-        help
-          Say y here to enable GSBI support.  The GSBI provides control
-          functions for connecting the underlying serial UART, SPI, and I2C
-          devices to the output pins.
+	tristate "QCOM General Serial Bus Interface"
+	depends on ARCH_QCOM || COMPILE_TEST
+	select MFD_SYSCON
+	help
+	  Say y here to enable GSBI support.  The GSBI provides control
+	  functions for connecting the underlying serial UART, SPI, and I2C
+	  devices to the output pins.
 
 config QCOM_LLCC
 	tristate "Qualcomm Technologies, Inc. LLCC driver"
@@ -71,10 +71,10 @@
 	depends on ARCH_QCOM
 	select QCOM_SCM
 	help
-          The On Chip Memory (OCMEM) allocator allows various clients to
-          allocate memory from OCMEM based on performance, latency and power
-          requirements. This is typically used by the GPU, camera/video, and
-          audio components on some Snapdragon SoCs.
+	  The On Chip Memory (OCMEM) allocator allows various clients to
+	  allocate memory from OCMEM based on performance, latency and power
+	  requirements. This is typically used by the GPU, camera/video, and
+	  audio components on some Snapdragon SoCs.
 
 config QCOM_PM
 	bool "Qualcomm Power Management"
@@ -198,8 +198,8 @@
 	depends on ARCH_QCOM || COMPILE_TEST
 	depends on RPMSG
 	help
-          Enable APR IPC protocol support between
-          application processor and QDSP6. APR is
-          used by audio driver to configure QDSP6
-          ASM, ADM and AFE modules.
+	  Enable APR IPC protocol support between
+	  application processor and QDSP6. APR is
+	  used by audio driver to configure QDSP6
+	  ASM, ADM and AFE modules.
 endmenu
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index f9e309f..1a03eaa 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -655,8 +655,12 @@ int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size,
 
 	qmi->sock = qmi_sock_create(qmi, &qmi->sq);
 	if (IS_ERR(qmi->sock)) {
-		pr_err("failed to create QMI socket\n");
-		ret = PTR_ERR(qmi->sock);
+		if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) {
+			ret = -EPROBE_DEFER;
+		} else {
+			pr_err("failed to create QMI socket\n");
+			ret = PTR_ERR(qmi->sock);
+		}
 		goto err_destroy_wq;
 	}
 
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 5741ec3..4d264d0 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = {
 
 static struct rpmhpd sdm845_mx_ao = {
 	.pd = { .name = "mx_ao", },
+	.active_only = true,
 	.peer = &sdm845_mx,
 	.res_name = "mx.lvl",
 };
@@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = {
 
 static struct rpmhpd sdm845_cx_ao = {
 	.pd = { .name = "cx_ao", },
+	.active_only = true,
 	.peer = &sdm845_cx,
 	.parent = &sdm845_mx_ao.pd,
 	.res_name = "cx.lvl",
@@ -129,8 +131,62 @@ static const struct rpmhpd_desc sdm845_desc = {
 	.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
 };
 
+/* SM8150 RPMH powerdomains */
+
+static struct rpmhpd sm8150_mmcx_ao;
+static struct rpmhpd sm8150_mmcx = {
+	.pd = { .name = "mmcx", },
+	.peer = &sm8150_mmcx_ao,
+	.res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd sm8150_mmcx_ao = {
+	.pd = { .name = "mmcx_ao", },
+	.active_only = true,
+	.peer = &sm8150_mmcx,
+	.res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd *sm8150_rpmhpds[] = {
+	[SM8150_MSS] = &sdm845_mss,
+	[SM8150_EBI] = &sdm845_ebi,
+	[SM8150_LMX] = &sdm845_lmx,
+	[SM8150_LCX] = &sdm845_lcx,
+	[SM8150_GFX] = &sdm845_gfx,
+	[SM8150_MX] = &sdm845_mx,
+	[SM8150_MX_AO] = &sdm845_mx_ao,
+	[SM8150_CX] = &sdm845_cx,
+	[SM8150_CX_AO] = &sdm845_cx_ao,
+	[SM8150_MMCX] = &sm8150_mmcx,
+	[SM8150_MMCX_AO] = &sm8150_mmcx_ao,
+};
+
+static const struct rpmhpd_desc sm8150_desc = {
+	.rpmhpds = sm8150_rpmhpds,
+	.num_pds = ARRAY_SIZE(sm8150_rpmhpds),
+};
+
+/* SC7180 RPMH powerdomains */
+static struct rpmhpd *sc7180_rpmhpds[] = {
+	[SC7180_CX] = &sdm845_cx,
+	[SC7180_CX_AO] = &sdm845_cx_ao,
+	[SC7180_GFX] = &sdm845_gfx,
+	[SC7180_MX] = &sdm845_mx,
+	[SC7180_MX_AO] = &sdm845_mx_ao,
+	[SC7180_LMX] = &sdm845_lmx,
+	[SC7180_LCX] = &sdm845_lcx,
+	[SC7180_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sc7180_desc = {
+	.rpmhpds = sc7180_rpmhpds,
+	.num_pds = ARRAY_SIZE(sc7180_rpmhpds),
+};
+
 static const struct of_device_id rpmhpd_match_table[] = {
+	{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
 	{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+	{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
 	{ }
 };
 
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index f93492b..ba2b8b5 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -192,21 +192,25 @@
 	help
 	  This enables support for the Renesas RZ/G2E SoC.
 
+config ARCH_R8A77950
+	bool
+
+config ARCH_R8A77951
+	bool
+
 config ARCH_R8A7795
 	bool "Renesas R-Car H3 SoC Platform"
+	select ARCH_R8A77950
+	select ARCH_R8A77951
 	select ARCH_RCAR_GEN3
 	select SYSC_R8A7795
 	help
 	  This enables support for the Renesas R-Car H3 SoC.
 
 config ARCH_R8A77960
-	bool
+	bool "Renesas R-Car M3-W SoC Platform"
 	select ARCH_RCAR_GEN3
 	select SYSC_R8A77960
-
-config ARCH_R8A7796
-	bool "Renesas R-Car M3-W SoC Platform"
-	select ARCH_R8A77960
 	help
 	  This enables support for the Renesas R-Car M3-W SoC.
 
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 14d05a0..2af2e0d 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -21,7 +21,7 @@ static int rcar_rst_enable_wdt_reset(void __iomem *base)
 
 struct rst_config {
 	unsigned int modemr;		/* Mode Monitoring Register Offset */
-	int (*configure)(void *base);	/* Platform specific configuration */
+	int (*configure)(void __iomem *base);	/* Platform specific config */
 };
 
 static const struct rst_config rcar_rst_gen1 __initconst = {
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 27fc59b..c7a2003 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 #
-# SAMSUNG SoC drivers
+# Samsung SoC drivers
 #
 menuconfig SOC_SAMSUNG
 	bool "Samsung SoC driver support" if COMPILE_TEST
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index b89c26a..2dad496 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -3,7 +3,7 @@
  * Copyright (c) 2019 Samsung Electronics Co., Ltd.
  *	      http://www.samsung.com/
  *
- * EXYNOS - CHIP ID support
+ * Exynos - CHIP ID support
  * Author: Pankaj Dubey <pankaj.dubey@samsung.com>
  * Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  */
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index d34ca20..17304fa 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com/
 //
-// EXYNOS - CPU PMU(Power Management Unit) support
+// Exynos - CPU PMU(Power Management Unit) support
 
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -110,10 +110,8 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
 static int exynos_pmu_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct resource *res;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pmu_base_addr = devm_ioremap_resource(dev, res);
+	pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(pmu_base_addr))
 		return PTR_ERR(pmu_base_addr);
 
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 977e4da..5e851f3 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -3,7 +3,7 @@
  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * Header for EXYNOS PMU Driver support
+ * Header for Exynos PMU Driver support
  */
 
 #ifndef __EXYNOS_PMU_H
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 275d348..30f230e 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com/
 //
-// EXYNOS3250 - CPU PMU (Power Management Unit) support
+// Exynos3250 - CPU PMU (Power Management Unit) support
 
 #include <linux/soc/samsung/exynos-regs-pmu.h>
 #include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
index a7cdbf1..cb35103 100644
--- a/drivers/soc/samsung/exynos4-pmu.c
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com/
 //
-// EXYNOS4 - CPU PMU(Power Management Unit) support
+// Exynos4 - CPU PMU(Power Management Unit) support
 
 #include <linux/soc/samsung/exynos-regs-pmu.h>
 #include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 19b38e0..7a2d50b 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com/
 //
-// EXYNOS5250 - CPU PMU (Power Management Unit) support
+// Exynos5250 - CPU PMU (Power Management Unit) support
 
 #include <linux/soc/samsung/exynos-regs-pmu.h>
 #include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index b236d3b..6fedcd7 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -3,7 +3,7 @@
 // Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
 //		http://www.samsung.com/
 //
-// EXYNOS5420 - CPU PMU (Power Management Unit) support
+// Exynos5420 - CPU PMU (Power Management Unit) support
 
 #include <linux/pm.h>
 #include <linux/soc/samsung/exynos-regs-pmu.h>
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 84bd615..3693532 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -126,6 +126,7 @@
 	def_bool y
 	depends on ARCH_TEGRA
 	select SOC_BUS
+	select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC
 
 config SOC_TEGRA_FLOWCTRL
 	bool
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 606abbe5..802717b 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -49,6 +49,9 @@ static struct tegra_fuse *fuse = &(struct tegra_fuse) {
 };
 
 static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+	{ .compatible = "nvidia,tegra194-efuse", .data = &tegra194_fuse_soc },
+#endif
 #ifdef CONFIG_ARCH_TEGRA_186_SOC
 	{ .compatible = "nvidia,tegra186-efuse", .data = &tegra186_fuse_soc },
 #endif
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index b8daaf5..f68f4e1 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -320,3 +320,32 @@ const struct tegra_fuse_soc tegra186_fuse_soc = {
 	.num_lookups = ARRAY_SIZE(tegra186_fuse_lookups),
 };
 #endif
+
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+static const struct nvmem_cell_lookup tegra194_fuse_lookups[] = {
+	{
+		.nvmem_name = "fuse",
+		.cell_name = "xusb-pad-calibration",
+		.dev_id = "3520000.padctl",
+		.con_id = "calibration",
+	}, {
+		.nvmem_name = "fuse",
+		.cell_name = "xusb-pad-calibration-ext",
+		.dev_id = "3520000.padctl",
+		.con_id = "calibration-ext",
+	},
+};
+
+static const struct tegra_fuse_info tegra194_fuse_info = {
+	.read = tegra30_fuse_read,
+	.size = 0x300,
+	.spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra194_fuse_soc = {
+	.init = tegra30_fuse_init,
+	.info = &tegra194_fuse_info,
+	.lookups = tegra194_fuse_lookups,
+	.num_lookups = ARRAY_SIZE(tegra194_fuse_lookups),
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 0f74c2c..94a059e 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -108,4 +108,8 @@ extern const struct tegra_fuse_soc tegra210_fuse_soc;
 extern const struct tegra_fuse_soc tegra186_fuse_soc;
 #endif
 
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+extern const struct tegra_fuse_soc tegra194_fuse_soc;
+#endif
+
 #endif
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index a2fd6cc..089d934 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -21,18 +21,15 @@
 #define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT	\
 	(0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
 
-static void __iomem *apbmisc_base;
-static void __iomem *strapping_base;
 static bool long_ram_code;
+static u32 strapping;
+static u32 chipid;
 
 u32 tegra_read_chipid(void)
 {
-	if (!apbmisc_base) {
-		WARN(1, "Tegra Chip ID not yet available\n");
-		return 0;
-	}
+	WARN(!chipid, "Tegra ABP MISC not yet available\n");
 
-	return readl_relaxed(apbmisc_base + 4);
+	return chipid;
 }
 
 u8 tegra_get_chip_id(void)
@@ -42,10 +39,9 @@ u8 tegra_get_chip_id(void)
 
 u32 tegra_read_straps(void)
 {
-	if (strapping_base)
-		return readl_relaxed(strapping_base);
-	else
-		return 0;
+	WARN(!chipid, "Tegra ABP MISC not yet available\n");
+
+	return strapping;
 }
 
 u32 tegra_read_ram_code(void)
@@ -63,6 +59,7 @@ u32 tegra_read_ram_code(void)
 static const struct of_device_id apbmisc_match[] __initconst = {
 	{ .compatible = "nvidia,tegra20-apbmisc", },
 	{ .compatible = "nvidia,tegra186-misc", },
+	{ .compatible = "nvidia,tegra194-misc", },
 	{},
 };
 
@@ -103,6 +100,7 @@ void __init tegra_init_revision(void)
 
 void __init tegra_init_apbmisc(void)
 {
+	void __iomem *apbmisc_base, *strapping_base;
 	struct resource apbmisc, straps;
 	struct device_node *np;
 
@@ -123,7 +121,7 @@ void __init tegra_init_apbmisc(void)
 			apbmisc.flags = IORESOURCE_MEM;
 
 			/* strapping options */
-			if (tegra_get_chip_id() == TEGRA124) {
+			if (of_machine_is_compatible("nvidia,tegra124")) {
 				straps.start = 0x7000e864;
 				straps.end = 0x7000e867;
 			} else {
@@ -160,12 +158,20 @@ void __init tegra_init_apbmisc(void)
 	}
 
 	apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
-	if (!apbmisc_base)
+	if (!apbmisc_base) {
 		pr_err("failed to map APBMISC registers\n");
+	} else {
+		chipid = readl_relaxed(apbmisc_base + 4);
+		iounmap(apbmisc_base);
+	}
 
 	strapping_base = ioremap(straps.start, resource_size(&straps));
-	if (!strapping_base)
+	if (!strapping_base) {
 		pr_err("failed to map strapping options registers\n");
+	} else {
+		strapping = readl_relaxed(strapping_base);
+		iounmap(strapping_base);
+	}
 
 	long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
 }
diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c
index ea0eede..367a71a 100644
--- a/drivers/soc/tegra/regulators-tegra20.c
+++ b/drivers/soc/tegra/regulators-tegra20.c
@@ -162,6 +162,9 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
 			core_target_uV = max(rtc_uV - max_spread, core_target_uV);
 		}
 
+		if (core_uV == core_target_uV)
+			goto update_rtc;
+
 		err = regulator_set_voltage_rdev(core_rdev,
 						 core_target_uV,
 						 core_max_uV,
@@ -170,7 +173,7 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
 			return err;
 
 		core_uV = core_target_uV;
-
+update_rtc:
 		if (rtc_uV < rtc_min_uV) {
 			rtc_target_uV = min(rtc_uV + max_spread, rtc_min_uV);
 			rtc_target_uV = min(core_uV + max_spread, rtc_target_uV);
@@ -179,6 +182,9 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
 			rtc_target_uV = max(core_uV - max_spread, rtc_target_uV);
 		}
 
+		if (rtc_uV == rtc_target_uV)
+			continue;
+
 		err = regulator_set_voltage_rdev(rtc_rdev,
 						 rtc_target_uV,
 						 rtc_max_uV,
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
index 8e623ff..7f21f31 100644
--- a/drivers/soc/tegra/regulators-tegra30.c
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -209,6 +209,9 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
 			cpu_target_uV = max(core_uV - max_spread, cpu_target_uV);
 		}
 
+		if (cpu_uV == cpu_target_uV)
+			goto update_core;
+
 		err = regulator_set_voltage_rdev(cpu_rdev,
 						 cpu_target_uV,
 						 cpu_max_uV,
@@ -231,6 +234,9 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
 			core_target_uV = max(core_target_uV, core_uV - core_max_step);
 		}
 
+		if (core_uV == core_target_uV)
+			continue;
+
 		err = regulator_set_voltage_rdev(core_rdev,
 						 core_target_uV,
 						 core_max_uV,
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 1ccc906..37f3db6 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -25,6 +25,8 @@
 
 static struct knav_device *kdev;
 static DEFINE_MUTEX(knav_dev_lock);
+#define knav_dev_lock_held() \
+	lockdep_is_held(&knav_dev_lock)
 
 /* Queue manager register indices in DTS */
 #define KNAV_QUEUE_PEEK_REG_INDEX	0
@@ -52,8 +54,9 @@ static DEFINE_MUTEX(knav_dev_lock);
 #define knav_queue_idx_to_inst(kdev, idx)			\
 	(kdev->instances + (idx << kdev->inst_shift))
 
-#define for_each_handle_rcu(qh, inst)			\
-	list_for_each_entry_rcu(qh, &inst->handles, list)
+#define for_each_handle_rcu(qh, inst)				\
+	list_for_each_entry_rcu(qh, &inst->handles, list,	\
+				knav_dev_lock_held())
 
 #define for_each_instance(idx, inst, kdev)		\
 	for (idx = 0, inst = kdev->instances;		\
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 01e76b5..223f1f9 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -21,11 +21,15 @@
 	bool "Enable Xilinx Zynq MPSoC Power Management driver"
 	depends on PM && ARCH_ZYNQMP
 	default y
+	select MAILBOX
+	select ZYNQMP_IPI_MBOX
 	help
 	  Say yes to enable power management support for ZyqnMP SoC.
 	  This driver uses firmware driver as an interface for power
 	  management request to firmware. It registers isr to handle
-	  power management callbacks from firmware.
+	  power management callbacks from firmware. It registers mailbox client
+	  to handle power management callbacks from firmware.
+
 	  If in doubt, say N.
 
 config ZYNQMP_PM_DOMAINS
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 1b9d144..0922789 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -2,7 +2,7 @@
 /*
  * Xilinx Zynq MPSoC Power Management
  *
- *  Copyright (C) 2014-2018 Xilinx, Inc.
+ *  Copyright (C) 2014-2019 Xilinx, Inc.
  *
  *  Davorin Mista <davorin.mista@aggios.com>
  *  Jolly Shah <jollys@xilinx.com>
@@ -16,6 +16,21 @@
 #include <linux/suspend.h>
 
 #include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+
+/**
+ * struct zynqmp_pm_work_struct - Wrapper for struct work_struct
+ * @callback_work:	Work structure
+ * @args:		Callback arguments
+ */
+struct zynqmp_pm_work_struct {
+	struct work_struct callback_work;
+	u32 args[CB_ARG_CNT];
+};
+
+static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
+static struct mbox_chan *rx_chan;
+static const struct zynqmp_eemi_ops *eemi_ops;
 
 enum pm_suspend_mode {
 	PM_SUSPEND_MODE_FIRST = 0,
@@ -31,7 +46,6 @@ static const char *const suspend_modes[] = {
 };
 
 static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
-static const struct zynqmp_eemi_ops *eemi_ops;
 
 enum pm_api_cb_id {
 	PM_INIT_SUSPEND_CB = 30,
@@ -68,6 +82,53 @@ static irqreturn_t zynqmp_pm_isr(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static void ipi_receive_callback(struct mbox_client *cl, void *data)
+{
+	struct zynqmp_ipi_message *msg = (struct zynqmp_ipi_message *)data;
+	u32 payload[CB_PAYLOAD_SIZE];
+	int ret;
+
+	memcpy(payload, msg->data, sizeof(msg->len));
+	/* First element is callback API ID, others are callback arguments */
+	if (payload[0] == PM_INIT_SUSPEND_CB) {
+		if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+			return;
+
+		/* Copy callback arguments into work's structure */
+		memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+		       sizeof(zynqmp_pm_init_suspend_work->args));
+
+		queue_work(system_unbound_wq,
+			   &zynqmp_pm_init_suspend_work->callback_work);
+
+		/* Send NULL message to mbox controller to ack the message */
+		ret = mbox_send_message(rx_chan, NULL);
+		if (ret)
+			pr_err("IPI ack failed. Error %d\n", ret);
+	}
+}
+
+/**
+ * zynqmp_pm_init_suspend_work_fn - Initialize suspend
+ * @work:	Pointer to work_struct
+ *
+ * Bottom-half of PM callback IRQ handler.
+ */
+static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work)
+{
+	struct zynqmp_pm_work_struct *pm_work =
+		container_of(work, struct zynqmp_pm_work_struct, callback_work);
+
+	if (pm_work->args[0] == SUSPEND_SYSTEM_SHUTDOWN) {
+		orderly_poweroff(true);
+	} else if (pm_work->args[0] == SUSPEND_POWER_REQUEST) {
+		pm_suspend(PM_SUSPEND_MEM);
+	} else {
+		pr_err("%s Unsupported InitSuspendCb reason code %d.\n",
+		       __func__, pm_work->args[0]);
+	}
+}
+
 static ssize_t suspend_mode_show(struct device *dev,
 				 struct device_attribute *attr, char *buf)
 {
@@ -119,6 +180,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
 {
 	int ret, irq;
 	u32 pm_api_version;
+	struct mbox_client *client;
 
 	eemi_ops = zynqmp_pm_get_eemi_ops();
 	if (IS_ERR(eemi_ops))
@@ -134,17 +196,46 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
 	if (pm_api_version < ZYNQMP_PM_VERSION)
 		return -ENODEV;
 
-	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0)
-		return -ENXIO;
+	if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+		zynqmp_pm_init_suspend_work =
+			devm_kzalloc(&pdev->dev,
+				     sizeof(struct zynqmp_pm_work_struct),
+				     GFP_KERNEL);
+		if (!zynqmp_pm_init_suspend_work)
+			return -ENOMEM;
 
-	ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
-					IRQF_NO_SUSPEND | IRQF_ONESHOT,
-					dev_name(&pdev->dev), &pdev->dev);
-	if (ret) {
-		dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
-			"with %d\n", irq, ret);
-		return ret;
+		INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+			  zynqmp_pm_init_suspend_work_fn);
+		client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+		if (!client)
+			return -ENOMEM;
+
+		client->dev = &pdev->dev;
+		client->rx_callback = ipi_receive_callback;
+
+		rx_chan = mbox_request_channel_byname(client, "rx");
+		if (IS_ERR(rx_chan)) {
+			dev_err(&pdev->dev, "Failed to request rx channel\n");
+			return IS_ERR(rx_chan);
+		}
+	} else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
+		irq = platform_get_irq(pdev, 0);
+		if (irq <= 0)
+			return -ENXIO;
+
+		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+						zynqmp_pm_isr,
+						IRQF_NO_SUSPEND | IRQF_ONESHOT,
+						dev_name(&pdev->dev),
+						&pdev->dev);
+		if (ret) {
+			dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
+					    "failed with %d\n", irq, ret);
+			return ret;
+		}
+	} else {
+		dev_err(&pdev->dev, "Required property not found in DT node\n");
+		return -ENOENT;
 	}
 
 	ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
@@ -160,6 +251,9 @@ static int zynqmp_pm_remove(struct platform_device *pdev)
 {
 	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
 
+	if (!rx_chan)
+		mbox_free_channel(rx_chan);
+
 	return 0;
 }
 
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index c725d0a..fa2b4ab 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -31,4 +31,13 @@
 	  enable this config option to get the SoundWire support for that
 	  device.
 
+config SOUNDWIRE_QCOM
+	tristate "Qualcomm SoundWire Master driver"
+	depends on SLIMBUS
+	depends on SND_SOC
+	help
+	  SoundWire Qualcomm Master driver.
+	  If you have an Qualcomm platform which has a SoundWire Master then
+	  enable this config option to get the SoundWire support for that
+	  device
 endif
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index 563894e..e2cdff9 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -21,3 +21,7 @@
 
 soundwire-intel-init-objs := intel_init.o
 obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel-init.o
+
+#Qualcomm driver
+soundwire-qcom-objs :=	qcom.o
+obj-$(CONFIG_SOUNDWIRE_QCOM) += soundwire-qcom.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index be5d437..6106577 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -456,26 +456,35 @@ static int sdw_get_device_num(struct sdw_slave *slave)
 static int sdw_assign_device_num(struct sdw_slave *slave)
 {
 	int ret, dev_num;
+	bool new_device = false;
 
 	/* check first if device number is assigned, if so reuse that */
 	if (!slave->dev_num) {
-		mutex_lock(&slave->bus->bus_lock);
-		dev_num = sdw_get_device_num(slave);
-		mutex_unlock(&slave->bus->bus_lock);
-		if (dev_num < 0) {
-			dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
-				dev_num);
-			return dev_num;
+		if (!slave->dev_num_sticky) {
+			mutex_lock(&slave->bus->bus_lock);
+			dev_num = sdw_get_device_num(slave);
+			mutex_unlock(&slave->bus->bus_lock);
+			if (dev_num < 0) {
+				dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
+					dev_num);
+				return dev_num;
+			}
+			slave->dev_num = dev_num;
+			slave->dev_num_sticky = dev_num;
+			new_device = true;
+		} else {
+			slave->dev_num = slave->dev_num_sticky;
 		}
-	} else {
+	}
+
+	if (!new_device)
 		dev_info(slave->bus->dev,
-			 "Slave already registered dev_num:%d\n",
+			 "Slave already registered, reusing dev_num:%d\n",
 			 slave->dev_num);
 
-		/* Clear the slave->dev_num to transfer message on device 0 */
-		dev_num = slave->dev_num;
-		slave->dev_num = 0;
-	}
+	/* Clear the slave->dev_num to transfer message on device 0 */
+	dev_num = slave->dev_num;
+	slave->dev_num = 0;
 
 	ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
 	if (ret < 0) {
@@ -485,7 +494,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
 	}
 
 	/* After xfer of msg, restore dev_num */
-	slave->dev_num = dev_num;
+	slave->dev_num = slave->dev_num_sticky;
 
 	return 0;
 }
@@ -979,6 +988,24 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
 	struct sdw_slave *slave;
 	int i, ret = 0;
 
+	/* first check if any Slaves fell off the bus */
+	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+		mutex_lock(&bus->bus_lock);
+		if (test_bit(i, bus->assigned) == false) {
+			mutex_unlock(&bus->bus_lock);
+			continue;
+		}
+		mutex_unlock(&bus->bus_lock);
+
+		slave = sdw_get_slave(bus, i);
+		if (!slave)
+			continue;
+
+		if (status[i] == SDW_SLAVE_UNATTACHED &&
+		    slave->status != SDW_SLAVE_UNATTACHED)
+			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+	}
+
 	if (status[0] == SDW_SLAVE_ATTACHED) {
 		dev_dbg(bus->dev, "Slave attached, programming device number\n");
 		ret = sdw_program_device_num(bus);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index fed21e2..9bec270 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -74,6 +74,7 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
 #define CDNS_MCP_INTMASK			0x48
 
 #define CDNS_MCP_INT_IRQ			BIT(31)
+#define CDNS_MCP_INT_RESERVED1			GENMASK(30, 17)
 #define CDNS_MCP_INT_WAKEUP			BIT(16)
 #define CDNS_MCP_INT_SLAVE_RSVD			BIT(15)
 #define CDNS_MCP_INT_SLAVE_ALERT		BIT(14)
@@ -85,10 +86,12 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
 #define CDNS_MCP_INT_DATA_CLASH			BIT(9)
 #define CDNS_MCP_INT_PARITY			BIT(8)
 #define CDNS_MCP_INT_CMD_ERR			BIT(7)
+#define CDNS_MCP_INT_RESERVED2			GENMASK(6, 4)
 #define CDNS_MCP_INT_RX_NE			BIT(3)
 #define CDNS_MCP_INT_RX_WL			BIT(2)
 #define CDNS_MCP_INT_TXE			BIT(1)
 #define CDNS_MCP_INT_TXF			BIT(0)
+#define CDNS_MCP_INT_RESERVED (CDNS_MCP_INT_RESERVED1 | CDNS_MCP_INT_RESERVED2)
 
 #define CDNS_MCP_INTSET				0x4C
 
@@ -444,7 +447,8 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
 	time = wait_for_completion_timeout(&cdns->tx_complete,
 					   msecs_to_jiffies(CDNS_TX_TIMEOUT));
 	if (!time) {
-		dev_err(cdns->dev, "IO transfer timed out\n");
+		dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
+			cmd, msg->dev_num, msg->addr, msg->len);
 		msg->len = 0;
 		return SDW_CMD_TIMEOUT;
 	}
@@ -672,13 +676,36 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
 
 		/* first check if Slave reported multiple status */
 		if (set_status > 1) {
+			u32 val;
+
 			dev_warn_ratelimited(cdns->dev,
-					     "Slave reported multiple Status: %d\n",
-					     mask);
-			/*
-			 * TODO: we need to reread the status here by
-			 * issuing a PING cmd
-			 */
+					     "Slave %d reported multiple Status: %d\n",
+					     i, mask);
+
+			/* check latest status extracted from PING commands */
+			val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+			val >>= (i * 2);
+
+			switch (val & 0x3) {
+			case 0:
+				status[i] = SDW_SLAVE_UNATTACHED;
+				break;
+			case 1:
+				status[i] = SDW_SLAVE_ATTACHED;
+				break;
+			case 2:
+				status[i] = SDW_SLAVE_ALERT;
+				break;
+			case 3:
+			default:
+				status[i] = SDW_SLAVE_RESERVED;
+				break;
+			}
+
+			dev_warn_ratelimited(cdns->dev,
+					     "Slave %d status updated to %d\n",
+					     i, status[i]);
+
 		}
 	}
 
@@ -705,6 +732,10 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
 
 	int_status = cdns_readl(cdns, CDNS_MCP_INTSTAT);
 
+	/* check for reserved values read as zero */
+	if (int_status & CDNS_MCP_INT_RESERVED)
+		return IRQ_NONE;
+
 	if (!(int_status & CDNS_MCP_INT_IRQ))
 		return IRQ_NONE;
 
@@ -812,8 +843,9 @@ int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
 EXPORT_SYMBOL(sdw_cdns_exit_reset);
 
 /**
- * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts
  * @cdns: Cadence instance
+ * @state: True if we are trying to enable interrupt.
  */
 int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
 {
@@ -849,12 +881,21 @@ int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
 		mask = interrupt_mask;
 
 update_masks:
+	/* clear slave interrupt status before enabling interrupt */
+	if (state) {
+		u32 slave_state;
+
+		slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
+		cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave_state);
+		slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
+		cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave_state);
+	}
+
 	cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
 	cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
 	cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
 
-	/* commit changes */
-	return cdns_update_config(cdns);
+	return 0;
 }
 EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
 
@@ -948,8 +989,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
 	ret = cdns_allocate_pdi(cdns, &stream->out,
 				stream->num_out, offset);
 
-	offset += stream->num_out;
-
 	if (ret)
 		return ret;
 
@@ -1224,8 +1263,10 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
  * cdns_find_pdi() - Find a free PDI
  *
  * @cdns: Cadence instance
+ * @offset: Starting offset
  * @num: Number of PDIs
  * @pdi: PDI instances
+ * @dai_id: DAI id
  *
  * Find a PDI for a given PDI array. The PDI num and dai_id are
  * expected to match, return NULL otherwise.
@@ -1277,6 +1318,7 @@ EXPORT_SYMBOL(sdw_cdns_config_stream);
  * @stream: Stream to be allocated
  * @ch: Channel count
  * @dir: Data direction
+ * @dai_id: DAI id
  */
 struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
 					struct sdw_cdns_streams *stream,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 0371d3d..06ef3a3 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -880,6 +880,9 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
 				 "intel-sdw-ip-clock",
 				 &prop->mclk_freq);
 
+	/* the values reported by BIOS are the 2x clock, not the bus clock */
+	prop->mclk_freq /= 2;
+
 	fwnode_property_read_u32(link,
 				 "intel-quirk-mask",
 				 &quirk_mask);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
new file mode 100644
index 0000000..1c6c6a2
--- /dev/null
+++ b/drivers/soundwire/qcom.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/slimbus.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "bus.h"
+
+#define SWRM_COMP_HW_VERSION					0x00
+#define SWRM_COMP_CFG_ADDR					0x04
+#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK			BIT(1)
+#define SWRM_COMP_CFG_ENABLE_MSK				BIT(0)
+#define SWRM_COMP_PARAMS					0x100
+#define SWRM_COMP_PARAMS_DOUT_PORTS_MASK			GENMASK(4, 0)
+#define SWRM_COMP_PARAMS_DIN_PORTS_MASK				GENMASK(9, 5)
+#define SWRM_INTERRUPT_STATUS					0x200
+#define SWRM_INTERRUPT_STATUS_RMSK				GENMASK(16, 0)
+#define SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED		BIT(1)
+#define SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS		BIT(2)
+#define SWRM_INTERRUPT_STATUS_CMD_ERROR				BIT(7)
+#define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED		BIT(10)
+#define SWRM_INTERRUPT_MASK_ADDR				0x204
+#define SWRM_INTERRUPT_CLEAR					0x208
+#define SWRM_CMD_FIFO_WR_CMD					0x300
+#define SWRM_CMD_FIFO_RD_CMD					0x304
+#define SWRM_CMD_FIFO_CMD					0x308
+#define SWRM_CMD_FIFO_STATUS					0x30C
+#define SWRM_CMD_FIFO_CFG_ADDR					0x314
+#define SWRM_RD_WR_CMD_RETRIES					0x7
+#define SWRM_CMD_FIFO_RD_FIFO_ADDR				0x318
+#define SWRM_ENUMERATOR_CFG_ADDR				0x500
+#define SWRM_MCP_FRAME_CTRL_BANK_ADDR(m)		(0x101C + 0x40 * (m))
+#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT			3
+#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK			GENMASK(2, 0)
+#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK			GENMASK(7, 3)
+#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT			0
+#define SWRM_MCP_CFG_ADDR					0x1048
+#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK		GENMASK(21, 17)
+#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT		0x11
+#define SWRM_DEF_CMD_NO_PINGS					0x1f
+#define SWRM_MCP_STATUS						0x104C
+#define SWRM_MCP_STATUS_BANK_NUM_MASK				BIT(0)
+#define SWRM_MCP_SLV_STATUS					0x1090
+#define SWRM_MCP_SLV_STATUS_MASK				GENMASK(1, 0)
+#define SWRM_DP_PORT_CTRL_BANK(n, m)	(0x1124 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT				0x18
+#define SWRM_DP_PORT_CTRL_OFFSET2_SHFT				0x10
+#define SWRM_DP_PORT_CTRL_OFFSET1_SHFT				0x08
+#define SWRM_AHB_BRIDGE_WR_DATA_0				0xc85
+#define SWRM_AHB_BRIDGE_WR_ADDR_0				0xc89
+#define SWRM_AHB_BRIDGE_RD_ADDR_0				0xc8d
+#define SWRM_AHB_BRIDGE_RD_DATA_0				0xc91
+
+#define SWRM_REG_VAL_PACK(data, dev, id, reg)	\
+			((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
+
+#define SWRM_MAX_ROW_VAL	0 /* Rows = 48 */
+#define SWRM_DEFAULT_ROWS	48
+#define SWRM_MIN_COL_VAL	0 /* Cols = 2 */
+#define SWRM_DEFAULT_COL	16
+#define SWRM_MAX_COL_VAL	7
+#define SWRM_SPECIAL_CMD_ID	0xF
+#define MAX_FREQ_NUM		1
+#define TIMEOUT_MS		(2 * HZ)
+#define QCOM_SWRM_MAX_RD_LEN	0xf
+#define QCOM_SDW_MAX_PORTS	14
+#define DEFAULT_CLK_FREQ	9600000
+#define SWRM_MAX_DAIS		0xF
+
+struct qcom_swrm_port_config {
+	u8 si;
+	u8 off1;
+	u8 off2;
+};
+
+struct qcom_swrm_ctrl {
+	struct sdw_bus bus;
+	struct device *dev;
+	struct regmap *regmap;
+	struct completion *comp;
+	struct work_struct slave_work;
+	/* read/write lock */
+	spinlock_t comp_lock;
+	/* Port alloc/free lock */
+	struct mutex port_lock;
+	struct clk *hclk;
+	u8 wr_cmd_id;
+	u8 rd_cmd_id;
+	int irq;
+	unsigned int version;
+	int num_din_ports;
+	int num_dout_ports;
+	unsigned long dout_port_mask;
+	unsigned long din_port_mask;
+	struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
+	struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
+	enum sdw_slave_status status[SDW_MAX_DEVICES];
+	int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
+	int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
+};
+
+#define to_qcom_sdw(b)	container_of(b, struct qcom_swrm_ctrl, bus)
+
+static int qcom_swrm_abh_reg_read(struct qcom_swrm_ctrl *ctrl, int reg,
+				  u32 *val)
+{
+	struct regmap *wcd_regmap = ctrl->regmap;
+	int ret;
+
+	/* pg register + offset */
+	ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_RD_ADDR_0,
+			  (u8 *)&reg, 4);
+	if (ret < 0)
+		return SDW_CMD_FAIL;
+
+	ret = regmap_bulk_read(wcd_regmap, SWRM_AHB_BRIDGE_RD_DATA_0,
+			       val, 4);
+	if (ret < 0)
+		return SDW_CMD_FAIL;
+
+	return SDW_CMD_OK;
+}
+
+static int qcom_swrm_ahb_reg_write(struct qcom_swrm_ctrl *ctrl,
+				   int reg, int val)
+{
+	struct regmap *wcd_regmap = ctrl->regmap;
+	int ret;
+	/* pg register + offset */
+	ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_WR_DATA_0,
+			  (u8 *)&val, 4);
+	if (ret)
+		return SDW_CMD_FAIL;
+
+	/* write address register */
+	ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_WR_ADDR_0,
+			  (u8 *)&reg, 4);
+	if (ret)
+		return SDW_CMD_FAIL;
+
+	return SDW_CMD_OK;
+}
+
+static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *ctrl, u8 cmd_data,
+				     u8 dev_addr, u16 reg_addr)
+{
+	DECLARE_COMPLETION_ONSTACK(comp);
+	unsigned long flags;
+	u32 val;
+	int ret;
+
+	spin_lock_irqsave(&ctrl->comp_lock, flags);
+	ctrl->comp = &comp;
+	spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+	val = SWRM_REG_VAL_PACK(cmd_data, dev_addr,
+				SWRM_SPECIAL_CMD_ID, reg_addr);
+	ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_WR_CMD, val);
+	if (ret)
+		goto err;
+
+	ret = wait_for_completion_timeout(ctrl->comp,
+					  msecs_to_jiffies(TIMEOUT_MS));
+
+	if (!ret)
+		ret = SDW_CMD_IGNORED;
+	else
+		ret = SDW_CMD_OK;
+err:
+	spin_lock_irqsave(&ctrl->comp_lock, flags);
+	ctrl->comp = NULL;
+	spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+	return ret;
+}
+
+static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *ctrl,
+				     u8 dev_addr, u16 reg_addr,
+				     u32 len, u8 *rval)
+{
+	int i, ret;
+	u32 val;
+	DECLARE_COMPLETION_ONSTACK(comp);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->comp_lock, flags);
+	ctrl->comp = &comp;
+	spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+	val = SWRM_REG_VAL_PACK(len, dev_addr, SWRM_SPECIAL_CMD_ID, reg_addr);
+	ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_RD_CMD, val);
+	if (ret)
+		goto err;
+
+	ret = wait_for_completion_timeout(ctrl->comp,
+					  msecs_to_jiffies(TIMEOUT_MS));
+
+	if (!ret) {
+		ret = SDW_CMD_IGNORED;
+		goto err;
+	} else {
+		ret = SDW_CMD_OK;
+	}
+
+	for (i = 0; i < len; i++) {
+		ctrl->reg_read(ctrl, SWRM_CMD_FIFO_RD_FIFO_ADDR, &val);
+		rval[i] = val & 0xFF;
+	}
+
+err:
+	spin_lock_irqsave(&ctrl->comp_lock, flags);
+	ctrl->comp = NULL;
+	spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+	return ret;
+}
+
+static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
+{
+	u32 val;
+	int i;
+
+	ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
+
+	for (i = 0; i < SDW_MAX_DEVICES; i++) {
+		u32 s;
+
+		s = (val >> (i * 2));
+		s &= SWRM_MCP_SLV_STATUS_MASK;
+		ctrl->status[i] = s;
+	}
+}
+
+static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
+{
+	struct qcom_swrm_ctrl *ctrl = dev_id;
+	u32 sts, value;
+	unsigned long flags;
+
+	ctrl->reg_read(ctrl, SWRM_INTERRUPT_STATUS, &sts);
+
+	if (sts & SWRM_INTERRUPT_STATUS_CMD_ERROR) {
+		ctrl->reg_read(ctrl, SWRM_CMD_FIFO_STATUS, &value);
+		dev_err_ratelimited(ctrl->dev,
+				    "CMD error, fifo status 0x%x\n",
+				     value);
+		ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
+	}
+
+	if ((sts & SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED) ||
+	    sts & SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS)
+		schedule_work(&ctrl->slave_work);
+
+	/**
+	 * clear the interrupt before complete() is called, as complete can
+	 * schedule new read/writes which require interrupts, clearing the
+	 * interrupt would avoid missing interrupts in such cases.
+	 */
+	ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR, sts);
+
+	if (sts & SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED) {
+		spin_lock_irqsave(&ctrl->comp_lock, flags);
+		if (ctrl->comp)
+			complete(ctrl->comp);
+		spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+	}
+
+	return IRQ_HANDLED;
+}
+static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
+{
+	u32 val;
+
+	/* Clear Rows and Cols */
+	val = (SWRM_MAX_ROW_VAL << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT |
+	       SWRM_MIN_COL_VAL << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT);
+
+	ctrl->reg_write(ctrl, SWRM_MCP_FRAME_CTRL_BANK_ADDR(0), val);
+
+	/* Disable Auto enumeration */
+	ctrl->reg_write(ctrl, SWRM_ENUMERATOR_CFG_ADDR, 0);
+
+	/* Mask soundwire interrupts */
+	ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR,
+			SWRM_INTERRUPT_STATUS_RMSK);
+
+	/* Configure No pings */
+	ctrl->reg_read(ctrl, SWRM_MCP_CFG_ADDR, &val);
+	val &= ~SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK;
+	val |= (SWRM_DEF_CMD_NO_PINGS <<
+		SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT);
+	ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
+
+	/* Configure number of retries of a read/write cmd */
+	ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR, SWRM_RD_WR_CMD_RETRIES);
+
+	/* Set IRQ to PULSE */
+	ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR,
+			SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK |
+			SWRM_COMP_CFG_ENABLE_MSK);
+	return 0;
+}
+
+static enum sdw_command_response qcom_swrm_xfer_msg(struct sdw_bus *bus,
+						    struct sdw_msg *msg)
+{
+	struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+	int ret, i, len;
+
+	if (msg->flags == SDW_MSG_FLAG_READ) {
+		for (i = 0; i < msg->len;) {
+			if ((msg->len - i) < QCOM_SWRM_MAX_RD_LEN)
+				len = msg->len - i;
+			else
+				len = QCOM_SWRM_MAX_RD_LEN;
+
+			ret = qcom_swrm_cmd_fifo_rd_cmd(ctrl, msg->dev_num,
+							msg->addr + i, len,
+						       &msg->buf[i]);
+			if (ret)
+				return ret;
+
+			i = i + len;
+		}
+	} else if (msg->flags == SDW_MSG_FLAG_WRITE) {
+		for (i = 0; i < msg->len; i++) {
+			ret = qcom_swrm_cmd_fifo_wr_cmd(ctrl, msg->buf[i],
+							msg->dev_num,
+						       msg->addr + i);
+			if (ret)
+				return SDW_CMD_IGNORED;
+		}
+	}
+
+	return SDW_CMD_OK;
+}
+
+static int qcom_swrm_pre_bank_switch(struct sdw_bus *bus)
+{
+	u32 reg = SWRM_MCP_FRAME_CTRL_BANK_ADDR(bus->params.next_bank);
+	struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+	u32 val;
+
+	ctrl->reg_read(ctrl, reg, &val);
+
+	val &= ~SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK;
+	val &= ~SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK;
+
+	val |= (SWRM_MAX_ROW_VAL << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT |
+		SWRM_MAX_COL_VAL << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT);
+
+	return ctrl->reg_write(ctrl, reg, val);
+}
+
+static int qcom_swrm_port_params(struct sdw_bus *bus,
+				 struct sdw_port_params *p_params,
+				 unsigned int bank)
+{
+	/* TBD */
+	return 0;
+}
+
+static int qcom_swrm_transport_params(struct sdw_bus *bus,
+				      struct sdw_transport_params *params,
+				      enum sdw_reg_bank bank)
+{
+	struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+	u32 value;
+
+	value = params->offset1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
+	value |= params->offset2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
+	value |= params->sample_interval - 1;
+
+	return ctrl->reg_write(ctrl,
+			       SWRM_DP_PORT_CTRL_BANK((params->port_num), bank),
+			       value);
+}
+
+static int qcom_swrm_port_enable(struct sdw_bus *bus,
+				 struct sdw_enable_ch *enable_ch,
+				 unsigned int bank)
+{
+	u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank);
+	struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+	u32 val;
+
+	ctrl->reg_read(ctrl, reg, &val);
+
+	if (enable_ch->enable)
+		val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
+	else
+		val &= ~(0xff << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
+
+	return ctrl->reg_write(ctrl, reg, val);
+}
+
+static struct sdw_master_port_ops qcom_swrm_port_ops = {
+	.dpn_set_port_params = qcom_swrm_port_params,
+	.dpn_set_port_transport_params = qcom_swrm_transport_params,
+	.dpn_port_enable_ch = qcom_swrm_port_enable,
+};
+
+static struct sdw_master_ops qcom_swrm_ops = {
+	.xfer_msg = qcom_swrm_xfer_msg,
+	.pre_bank_switch = qcom_swrm_pre_bank_switch,
+};
+
+static int qcom_swrm_compute_params(struct sdw_bus *bus)
+{
+	struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+	struct sdw_master_runtime *m_rt;
+	struct sdw_slave_runtime *s_rt;
+	struct sdw_port_runtime *p_rt;
+	struct qcom_swrm_port_config *pcfg;
+	int i = 0;
+
+	list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+		list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+			pcfg = &ctrl->pconfig[p_rt->num - 1];
+			p_rt->transport_params.port_num = p_rt->num;
+			p_rt->transport_params.sample_interval = pcfg->si + 1;
+			p_rt->transport_params.offset1 = pcfg->off1;
+			p_rt->transport_params.offset2 = pcfg->off2;
+		}
+
+		list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+			list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+				pcfg = &ctrl->pconfig[i];
+				p_rt->transport_params.port_num = p_rt->num;
+				p_rt->transport_params.sample_interval =
+					pcfg->si + 1;
+				p_rt->transport_params.offset1 = pcfg->off1;
+				p_rt->transport_params.offset2 = pcfg->off2;
+				i++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static u32 qcom_swrm_freq_tbl[MAX_FREQ_NUM] = {
+	DEFAULT_CLK_FREQ,
+};
+
+static void qcom_swrm_slave_wq(struct work_struct *work)
+{
+	struct qcom_swrm_ctrl *ctrl =
+			container_of(work, struct qcom_swrm_ctrl, slave_work);
+
+	qcom_swrm_get_device_status(ctrl);
+	sdw_handle_slave_status(&ctrl->bus, ctrl->status);
+}
+
+
+static void qcom_swrm_stream_free_ports(struct qcom_swrm_ctrl *ctrl,
+					struct sdw_stream_runtime *stream)
+{
+	struct sdw_master_runtime *m_rt;
+	struct sdw_port_runtime *p_rt;
+	unsigned long *port_mask;
+
+	mutex_lock(&ctrl->port_lock);
+
+	list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+		if (m_rt->direction == SDW_DATA_DIR_RX)
+			port_mask = &ctrl->dout_port_mask;
+		else
+			port_mask = &ctrl->din_port_mask;
+
+		list_for_each_entry(p_rt, &m_rt->port_list, port_node)
+			clear_bit(p_rt->num - 1, port_mask);
+	}
+
+	mutex_unlock(&ctrl->port_lock);
+}
+
+static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
+					struct sdw_stream_runtime *stream,
+				       struct snd_pcm_hw_params *params,
+				       int direction)
+{
+	struct sdw_port_config pconfig[QCOM_SDW_MAX_PORTS];
+	struct sdw_stream_config sconfig;
+	struct sdw_master_runtime *m_rt;
+	struct sdw_slave_runtime *s_rt;
+	struct sdw_port_runtime *p_rt;
+	unsigned long *port_mask;
+	int i, maxport, pn, nports = 0, ret = 0;
+
+	mutex_lock(&ctrl->port_lock);
+	list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+		if (m_rt->direction == SDW_DATA_DIR_RX) {
+			maxport = ctrl->num_dout_ports;
+			port_mask = &ctrl->dout_port_mask;
+		} else {
+			maxport = ctrl->num_din_ports;
+			port_mask = &ctrl->din_port_mask;
+		}
+
+		list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+			list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+				/* Port numbers start from 1 - 14*/
+				pn = find_first_zero_bit(port_mask, maxport);
+				if (pn > (maxport - 1)) {
+					dev_err(ctrl->dev, "All ports busy\n");
+					ret = -EBUSY;
+					goto err;
+				}
+				set_bit(pn, port_mask);
+				pconfig[nports].num = pn + 1;
+				pconfig[nports].ch_mask = p_rt->ch_mask;
+				nports++;
+			}
+		}
+	}
+
+	if (direction == SNDRV_PCM_STREAM_CAPTURE)
+		sconfig.direction = SDW_DATA_DIR_TX;
+	else
+		sconfig.direction = SDW_DATA_DIR_RX;
+
+	/* hw parameters wil be ignored as we only support PDM */
+	sconfig.ch_count = 1;
+	sconfig.frame_rate = params_rate(params);
+	sconfig.type = stream->type;
+	sconfig.bps = 1;
+	sdw_stream_add_master(&ctrl->bus, &sconfig, pconfig,
+			      nports, stream);
+err:
+	if (ret) {
+		for (i = 0; i < nports; i++)
+			clear_bit(pconfig[i].num - 1, port_mask);
+	}
+
+	mutex_unlock(&ctrl->port_lock);
+
+	return ret;
+}
+
+static int qcom_swrm_hw_params(struct snd_pcm_substream *substream,
+			       struct snd_pcm_hw_params *params,
+			      struct snd_soc_dai *dai)
+{
+	struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+	struct sdw_stream_runtime *sruntime = ctrl->sruntime[dai->id];
+	int ret;
+
+	ret = qcom_swrm_stream_alloc_ports(ctrl, sruntime, params,
+					   substream->stream);
+	if (ret)
+		qcom_swrm_stream_free_ports(ctrl, sruntime);
+
+	return ret;
+}
+
+static int qcom_swrm_hw_free(struct snd_pcm_substream *substream,
+			     struct snd_soc_dai *dai)
+{
+	struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+	struct sdw_stream_runtime *sruntime = ctrl->sruntime[dai->id];
+
+	qcom_swrm_stream_free_ports(ctrl, sruntime);
+	sdw_stream_remove_master(&ctrl->bus, sruntime);
+
+	return 0;
+}
+
+static int qcom_swrm_set_sdw_stream(struct snd_soc_dai *dai,
+				    void *stream, int direction)
+{
+	struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+
+	ctrl->sruntime[dai->id] = stream;
+
+	return 0;
+}
+
+static int qcom_swrm_startup(struct snd_pcm_substream *substream,
+			     struct snd_soc_dai *dai)
+{
+	struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct sdw_stream_runtime *sruntime;
+	int ret, i;
+
+	sruntime = sdw_alloc_stream(dai->name);
+	if (!sruntime)
+		return -ENOMEM;
+
+	ctrl->sruntime[dai->id] = sruntime;
+
+	for (i = 0; i < rtd->num_codecs; i++) {
+		ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sruntime,
+						 substream->stream);
+		if (ret < 0 && ret != -ENOTSUPP) {
+			dev_err(dai->dev, "Failed to set sdw stream on %s",
+				rtd->codec_dais[i]->name);
+			sdw_release_stream(sruntime);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+
+	sdw_release_stream(ctrl->sruntime[dai->id]);
+	ctrl->sruntime[dai->id] = NULL;
+}
+
+static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
+	.hw_params = qcom_swrm_hw_params,
+	.hw_free = qcom_swrm_hw_free,
+	.startup = qcom_swrm_startup,
+	.shutdown = qcom_swrm_shutdown,
+	.set_sdw_stream = qcom_swrm_set_sdw_stream,
+};
+
+static const struct snd_soc_component_driver qcom_swrm_dai_component = {
+	.name = "soundwire",
+};
+
+static int qcom_swrm_register_dais(struct qcom_swrm_ctrl *ctrl)
+{
+	int num_dais = ctrl->num_dout_ports + ctrl->num_din_ports;
+	struct snd_soc_dai_driver *dais;
+	struct snd_soc_pcm_stream *stream;
+	struct device *dev = ctrl->dev;
+	int i;
+
+	/* PDM dais are only tested for now */
+	dais = devm_kcalloc(dev, num_dais, sizeof(*dais), GFP_KERNEL);
+	if (!dais)
+		return -ENOMEM;
+
+	for (i = 0; i < num_dais; i++) {
+		dais[i].name = devm_kasprintf(dev, GFP_KERNEL, "SDW Pin%d", i);
+		if (!dais[i].name)
+			return -ENOMEM;
+
+		if (i < ctrl->num_dout_ports)
+			stream = &dais[i].playback;
+		else
+			stream = &dais[i].capture;
+
+		stream->channels_min = 1;
+		stream->channels_max = 1;
+		stream->rates = SNDRV_PCM_RATE_48000;
+		stream->formats = SNDRV_PCM_FMTBIT_S16_LE;
+
+		dais[i].ops = &qcom_swrm_pdm_dai_ops;
+		dais[i].id = i;
+	}
+
+	return devm_snd_soc_register_component(ctrl->dev,
+						&qcom_swrm_dai_component,
+						dais, num_dais);
+}
+
+static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
+{
+	struct device_node *np = ctrl->dev->of_node;
+	u8 off1[QCOM_SDW_MAX_PORTS];
+	u8 off2[QCOM_SDW_MAX_PORTS];
+	u8 si[QCOM_SDW_MAX_PORTS];
+	int i, ret, nports, val;
+
+	ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
+
+	ctrl->num_dout_ports = val & SWRM_COMP_PARAMS_DOUT_PORTS_MASK;
+	ctrl->num_din_ports = (val & SWRM_COMP_PARAMS_DIN_PORTS_MASK) >> 5;
+
+	ret = of_property_read_u32(np, "qcom,din-ports", &val);
+	if (ret)
+		return ret;
+
+	if (val > ctrl->num_din_ports)
+		return -EINVAL;
+
+	ctrl->num_din_ports = val;
+
+	ret = of_property_read_u32(np, "qcom,dout-ports", &val);
+	if (ret)
+		return ret;
+
+	if (val > ctrl->num_dout_ports)
+		return -EINVAL;
+
+	ctrl->num_dout_ports = val;
+
+	nports = ctrl->num_dout_ports + ctrl->num_din_ports;
+
+	ret = of_property_read_u8_array(np, "qcom,ports-offset1",
+					off1, nports);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u8_array(np, "qcom,ports-offset2",
+					off2, nports);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u8_array(np, "qcom,ports-sinterval-low",
+					si, nports);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < nports; i++) {
+		ctrl->pconfig[i].si = si[i];
+		ctrl->pconfig[i].off1 = off1[i];
+		ctrl->pconfig[i].off2 = off2[i];
+	}
+
+	return 0;
+}
+
+static int qcom_swrm_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sdw_master_prop *prop;
+	struct sdw_bus_params *params;
+	struct qcom_swrm_ctrl *ctrl;
+	int ret;
+	u32 val;
+
+	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	if (dev->parent->bus == &slimbus_bus) {
+		ctrl->reg_read = qcom_swrm_abh_reg_read;
+		ctrl->reg_write = qcom_swrm_ahb_reg_write;
+		ctrl->regmap = dev_get_regmap(dev->parent, NULL);
+		if (!ctrl->regmap)
+			return -EINVAL;
+	} else {
+		/* Only WCD based SoundWire controller is supported */
+		return -ENOTSUPP;
+	}
+
+	ctrl->irq = of_irq_get(dev->of_node, 0);
+	if (ctrl->irq < 0)
+		return ctrl->irq;
+
+	ctrl->hclk = devm_clk_get(dev, "iface");
+	if (IS_ERR(ctrl->hclk))
+		return PTR_ERR(ctrl->hclk);
+
+	clk_prepare_enable(ctrl->hclk);
+
+	ctrl->dev = dev;
+	dev_set_drvdata(&pdev->dev, ctrl);
+	spin_lock_init(&ctrl->comp_lock);
+	mutex_init(&ctrl->port_lock);
+	INIT_WORK(&ctrl->slave_work, qcom_swrm_slave_wq);
+
+	ctrl->bus.dev = dev;
+	ctrl->bus.ops = &qcom_swrm_ops;
+	ctrl->bus.port_ops = &qcom_swrm_port_ops;
+	ctrl->bus.compute_params = &qcom_swrm_compute_params;
+
+	ret = qcom_swrm_get_port_config(ctrl);
+	if (ret)
+		return ret;
+
+	params = &ctrl->bus.params;
+	params->max_dr_freq = DEFAULT_CLK_FREQ;
+	params->curr_dr_freq = DEFAULT_CLK_FREQ;
+	params->col = SWRM_DEFAULT_COL;
+	params->row = SWRM_DEFAULT_ROWS;
+	ctrl->reg_read(ctrl, SWRM_MCP_STATUS, &val);
+	params->curr_bank = val & SWRM_MCP_STATUS_BANK_NUM_MASK;
+	params->next_bank = !params->curr_bank;
+
+	prop = &ctrl->bus.prop;
+	prop->max_clk_freq = DEFAULT_CLK_FREQ;
+	prop->num_clk_gears = 0;
+	prop->num_clk_freq = MAX_FREQ_NUM;
+	prop->clk_freq = &qcom_swrm_freq_tbl[0];
+	prop->default_col = SWRM_DEFAULT_COL;
+	prop->default_row = SWRM_DEFAULT_ROWS;
+
+	ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &ctrl->version);
+
+	ret = devm_request_threaded_irq(dev, ctrl->irq, NULL,
+					qcom_swrm_irq_handler,
+					IRQF_TRIGGER_RISING,
+					"soundwire", ctrl);
+	if (ret) {
+		dev_err(dev, "Failed to request soundwire irq\n");
+		goto err;
+	}
+
+	ret = sdw_add_bus_master(&ctrl->bus);
+	if (ret) {
+		dev_err(dev, "Failed to register Soundwire controller (%d)\n",
+			ret);
+		goto err;
+	}
+
+	qcom_swrm_init(ctrl);
+	ret = qcom_swrm_register_dais(ctrl);
+	if (ret)
+		goto err;
+
+	dev_info(dev, "Qualcomm Soundwire controller v%x.%x.%x Registered\n",
+		 (ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff,
+		 ctrl->version & 0xffff);
+
+	return 0;
+err:
+	clk_disable_unprepare(ctrl->hclk);
+	return ret;
+}
+
+static int qcom_swrm_remove(struct platform_device *pdev)
+{
+	struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
+
+	sdw_delete_bus_master(&ctrl->bus);
+	clk_disable_unprepare(ctrl->hclk);
+
+	return 0;
+}
+
+static const struct of_device_id qcom_swrm_of_match[] = {
+	{ .compatible = "qcom,soundwire-v1.3.0", },
+	{/* sentinel */},
+};
+
+MODULE_DEVICE_TABLE(of, qcom_swrm_of_match);
+
+static struct platform_driver qcom_swrm_driver = {
+	.probe	= &qcom_swrm_probe,
+	.remove = &qcom_swrm_remove,
+	.driver = {
+		.name	= "qcom-soundwire",
+		.of_match_table = qcom_swrm_of_match,
+	}
+};
+module_platform_driver(qcom_swrm_driver);
+
+MODULE_DESCRIPTION("Qualcomm soundwire driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index e69f94a..178ae92 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1554,8 +1554,6 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
 	sdw_acquire_bus_lock(stream);
 
 	ret = _sdw_prepare_stream(stream);
-	if (ret < 0)
-		pr_err("Prepare for stream:%s failed: %d\n", stream->name, ret);
 
 	sdw_release_bus_lock(stream);
 	return ret;
@@ -1622,8 +1620,6 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
 	sdw_acquire_bus_lock(stream);
 
 	ret = _sdw_enable_stream(stream);
-	if (ret < 0)
-		pr_err("Enable for stream:%s failed: %d\n", stream->name, ret);
 
 	sdw_release_bus_lock(stream);
 	return ret;
@@ -1698,8 +1694,6 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
 	sdw_acquire_bus_lock(stream);
 
 	ret = _sdw_disable_stream(stream);
-	if (ret < 0)
-		pr_err("Disable for stream:%s failed: %d\n", stream->name, ret);
 
 	sdw_release_bus_lock(stream);
 	return ret;
@@ -1756,8 +1750,6 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
 
 	sdw_acquire_bus_lock(stream);
 	ret = _sdw_deprepare_stream(stream);
-	if (ret < 0)
-		pr_err("De-prepare for stream:%d failed: %d\n", ret, ret);
 
 	sdw_release_bus_lock(stream);
 	return ret;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index c7266ef..1f59beb 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -646,8 +646,7 @@ static int orion_spi_probe(struct platform_device *pdev)
 
 	/* The following clock is only used by some SoCs */
 	spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
-	if (IS_ERR(spi->axi_clk) &&
-	    PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+	if (PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
 		status = -EPROBE_DEFER;
 		goto out_rel_clk;
 	}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index eaf753b..baccd7c 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -42,10 +42,6 @@
 
 source "drivers/staging/rts5208/Kconfig"
 
-source "drivers/staging/octeon/Kconfig"
-
-source "drivers/staging/octeon-usb/Kconfig"
-
 source "drivers/staging/vt6655/Kconfig"
 
 source "drivers/staging/vt6656/Kconfig"
@@ -116,8 +112,6 @@
 
 source "drivers/staging/kpc2000/Kconfig"
 
-source "drivers/staging/isdn/Kconfig"
-
 source "drivers/staging/wusbcore/Kconfig"
 source "drivers/staging/uwb/Kconfig"
 
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 0a4396c..fdd03fd 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -12,8 +12,6 @@
 obj-$(CONFIG_R8188EU)		+= rtl8188eu/
 obj-$(CONFIG_RTS5208)		+= rts5208/
 obj-$(CONFIG_NETLOGIC_XLR_NET)	+= netlogic/
-obj-$(CONFIG_OCTEON_ETHERNET)	+= octeon/
-obj-$(CONFIG_OCTEON_USB)	+= octeon-usb/
 obj-$(CONFIG_VT6655)		+= vt6655/
 obj-$(CONFIG_VT6656)		+= vt6656/
 obj-$(CONFIG_VME_BUS)		+= vme/
@@ -48,10 +46,9 @@
 obj-$(CONFIG_XIL_AXIS_FIFO)	+= axis-fifo/
 obj-$(CONFIG_FIELDBUS_DEV)     += fieldbus/
 obj-$(CONFIG_KPC2000)		+= kpc2000/
-obj-$(CONFIG_ISDN_CAPI)		+= isdn/
 obj-$(CONFIG_UWB)		+= uwb/
 obj-$(CONFIG_USB_WUSB)		+= wusbcore/
-obj-$(CONFIG_EXFAT_FS)		+= exfat/
+obj-$(CONFIG_STAGING_EXFAT_FS)	+= exfat/
 obj-$(CONFIG_QLGE)		+= qlge/
 obj-$(CONFIG_NET_VENDOR_HP)	+= hp/
 obj-$(CONFIG_WFX)		+= wfx/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 74d497d..5891d07 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -537,14 +537,14 @@ static int set_name(struct ashmem_area *asma, void __user *name)
 	len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
 	if (len < 0)
 		return len;
-	if (len == ASHMEM_NAME_LEN)
-		local_name[ASHMEM_NAME_LEN - 1] = '\0';
+
 	mutex_lock(&ashmem_mutex);
 	/* cannot change an existing mapping's name */
 	if (asma->file)
 		ret = -EINVAL;
 	else
-		strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+		strscpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name,
+			ASHMEM_NAME_LEN);
 
 	mutex_unlock(&ashmem_mutex);
 	return ret;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index c394686..38b51ea 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -274,18 +274,6 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
 	_ion_buffer_destroy(buffer);
 }
 
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
-	struct ion_buffer *buffer = dmabuf->priv;
-
-	return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
-			       void *ptr)
-{
-}
-
 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 					enum dma_data_direction direction)
 {
@@ -349,8 +337,6 @@ static const struct dma_buf_ops dma_buf_ops = {
 	.detach = ion_dma_buf_detatch,
 	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
 	.end_cpu_access = ion_dma_buf_end_cpu_access,
-	.map = ion_dma_buf_kmap,
-	.unmap = ion_dma_buf_kunmap,
 };
 
 static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 39e6c59..5801067 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -16,7 +16,7 @@
 
 #include <linux/kernel.h>
 #include <linux/wait.h>
-#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
 #include <linux/device.h>
 #include <linux/cdev.h>
 #include <linux/init.h>
@@ -133,9 +133,9 @@ struct axis_fifo {
 	int has_tx_fifo; /* whether the IP has the tx fifo enabled */
 
 	wait_queue_head_t read_queue; /* wait queue for asynchronos read */
-	spinlock_t read_queue_lock; /* lock for reading waitqueue */
+	struct mutex read_lock; /* lock for reading */
 	wait_queue_head_t write_queue; /* wait queue for asynchronos write */
-	spinlock_t write_queue_lock; /* lock for writing waitqueue */
+	struct mutex write_lock; /* lock for writing */
 	unsigned int write_flags; /* write file flags */
 	unsigned int read_flags; /* read file flags */
 
@@ -336,7 +336,21 @@ static void reset_ip_core(struct axis_fifo *fifo)
 	iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET);
 }
 
-/* reads a single packet from the fifo as dictated by the tlast signal */
+/**
+ * axis_fifo_write() - Read a packet from AXIS-FIFO character device.
+ * @f Open file.
+ * @buf User space buffer to read to.
+ * @len User space buffer length.
+ * @off Buffer offset.
+ *
+ * As defined by the device's documentation, we need to check the device's
+ * occupancy before reading the length register and then the data. All these
+ * operations must be executed atomically, in order and one after the other
+ * without missing any.
+ *
+ * Returns the number of bytes read from the device or negative error code
+ *	on failure.
+ */
 static ssize_t axis_fifo_read(struct file *f, char __user *buf,
 			      size_t len, loff_t *off)
 {
@@ -350,36 +364,37 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
 	u32 tmp_buf[READ_BUF_SIZE];
 
 	if (fifo->read_flags & O_NONBLOCK) {
-		/* opened in non-blocking mode
-		 * return if there are no packets available
+		/*
+		 * Device opened in non-blocking mode. Try to lock it and then
+		 * check if any packet is available.
 		 */
-		if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET))
+		if (!mutex_trylock(&fifo->read_lock))
 			return -EAGAIN;
+
+		if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) {
+			ret = -EAGAIN;
+			goto end_unlock;
+		}
 	} else {
 		/* opened in blocking mode
 		 * wait for a packet available interrupt (or timeout)
 		 * if nothing is currently available
 		 */
-		spin_lock_irq(&fifo->read_queue_lock);
-		ret = wait_event_interruptible_lock_irq_timeout
-			(fifo->read_queue,
-			 ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
-			 fifo->read_queue_lock,
-			 (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
+		mutex_lock(&fifo->read_lock);
+		ret = wait_event_interruptible_timeout(fifo->read_queue,
+			ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
+			(read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
 				MAX_SCHEDULE_TIMEOUT);
-		spin_unlock_irq(&fifo->read_queue_lock);
 
-		if (ret == 0) {
-			/* timeout occurred */
-			dev_dbg(fifo->dt_device, "read timeout");
-			return -EAGAIN;
-		} else if (ret == -ERESTARTSYS) {
-			/* signal received */
-			return -ERESTARTSYS;
-		} else if (ret < 0) {
-			dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
-				ret);
-			return ret;
+		if (ret <= 0) {
+			if (ret == 0) {
+				ret = -EAGAIN;
+			} else if (ret != -ERESTARTSYS) {
+				dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
+					ret);
+			}
+
+			goto end_unlock;
 		}
 	}
 
@@ -387,14 +402,16 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
 	if (!bytes_available) {
 		dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
 		reset_ip_core(fifo);
-		return -EIO;
+		ret = -EIO;
+		goto end_unlock;
 	}
 
 	if (bytes_available > len) {
 		dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
 			bytes_available, len);
 		reset_ip_core(fifo);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto end_unlock;
 	}
 
 	if (bytes_available % sizeof(u32)) {
@@ -403,7 +420,8 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
 		 */
 		dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
 		reset_ip_core(fifo);
-		return -EIO;
+		ret = -EIO;
+		goto end_unlock;
 	}
 
 	words_available = bytes_available / sizeof(u32);
@@ -423,16 +441,37 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
 		if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
 				 copy * sizeof(u32))) {
 			reset_ip_core(fifo);
-			return -EFAULT;
+			ret = -EFAULT;
+			goto end_unlock;
 		}
 
 		copied += copy;
 		words_available -= copy;
 	}
 
-	return bytes_available;
+	ret = bytes_available;
+
+end_unlock:
+	mutex_unlock(&fifo->read_lock);
+
+	return ret;
 }
 
+/**
+ * axis_fifo_write() - Write buffer to AXIS-FIFO character device.
+ * @f Open file.
+ * @buf User space buffer to write to the device.
+ * @len User space buffer length.
+ * @off Buffer offset.
+ *
+ * As defined by the device's documentation, we need to write to the device's
+ * data buffer then to the device's packet length register atomically. Also,
+ * we need to lock before checking if the device has available space to avoid
+ * any concurrency issue.
+ *
+ * Returns the number of bytes written to the device or negative error code
+ *	on failure.
+ */
 static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
 			       size_t len, loff_t *off)
 {
@@ -465,12 +504,17 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
 	}
 
 	if (fifo->write_flags & O_NONBLOCK) {
-		/* opened in non-blocking mode
-		 * return if there is not enough room available in the fifo
+		/*
+		 * Device opened in non-blocking mode. Try to lock it and then
+		 * check if there is any room to write the given buffer.
 		 */
+		if (!mutex_trylock(&fifo->write_lock))
+			return -EAGAIN;
+
 		if (words_to_write > ioread32(fifo->base_addr +
 					      XLLF_TDFV_OFFSET)) {
-			return -EAGAIN;
+			ret = -EAGAIN;
+			goto end_unlock;
 		}
 	} else {
 		/* opened in blocking mode */
@@ -478,30 +522,22 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
 		/* wait for an interrupt (or timeout) if there isn't
 		 * currently enough room in the fifo
 		 */
-		spin_lock_irq(&fifo->write_queue_lock);
-		ret = wait_event_interruptible_lock_irq_timeout
-			(fifo->write_queue,
-			 ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
+		mutex_lock(&fifo->write_lock);
+		ret = wait_event_interruptible_timeout(fifo->write_queue,
+			ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
 				>= words_to_write,
-			 fifo->write_queue_lock,
-			 (write_timeout >= 0) ?
-				msecs_to_jiffies(write_timeout) :
+			(write_timeout >= 0) ? msecs_to_jiffies(write_timeout) :
 				MAX_SCHEDULE_TIMEOUT);
-		spin_unlock_irq(&fifo->write_queue_lock);
 
-		if (ret == 0) {
-			/* timeout occurred */
-			dev_dbg(fifo->dt_device, "write timeout\n");
-			return -EAGAIN;
-		} else if (ret == -ERESTARTSYS) {
-			/* signal received */
-			return -ERESTARTSYS;
-		} else if (ret < 0) {
-			/* unknown error */
-			dev_err(fifo->dt_device,
-				"wait_event_interruptible_timeout() error in write (ret=%i)\n",
-				ret);
-			return ret;
+		if (ret <= 0) {
+			if (ret == 0) {
+				ret = -EAGAIN;
+			} else if (ret != -ERESTARTSYS) {
+				dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n",
+					ret);
+			}
+
+			goto end_unlock;
 		}
 	}
 
@@ -515,7 +551,8 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
 		if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
 				   copy * sizeof(u32))) {
 			reset_ip_core(fifo);
-			return -EFAULT;
+			ret = -EFAULT;
+			goto end_unlock;
 		}
 
 		for (i = 0; i < copy; i++)
@@ -526,10 +563,15 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
 		words_to_write -= copy;
 	}
 
-	/* write packet size to fifo */
-	iowrite32(copied * sizeof(u32), fifo->base_addr + XLLF_TLR_OFFSET);
+	ret = copied * sizeof(u32);
 
-	return (ssize_t)copied * sizeof(u32);
+	/* write packet size to fifo */
+	iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET);
+
+end_unlock:
+	mutex_unlock(&fifo->write_lock);
+
+	return ret;
 }
 
 static irqreturn_t axis_fifo_irq(int irq, void *dw)
@@ -789,8 +831,8 @@ static int axis_fifo_probe(struct platform_device *pdev)
 	init_waitqueue_head(&fifo->read_queue);
 	init_waitqueue_head(&fifo->write_queue);
 
-	spin_lock_init(&fifo->read_queue_lock);
-	spin_lock_init(&fifo->write_queue_lock);
+	mutex_init(&fifo->read_lock);
+	mutex_init(&fifo->write_lock);
 
 	/* ----------------------------
 	 *   init device memory space
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index f99211e..04e224f 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -279,7 +279,7 @@ static int das6402_ai_check_chanlist(struct comedi_device *dev,
 
 		if (aref0 == AREF_DIFF && chan > (s->n_chan / 2)) {
 			dev_dbg(dev->class_dev,
-				"chanlist differential channel to large\n");
+				"chanlist differential channel too large\n");
 			return -EINVAL;
 		}
 	}
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
index 0130019..292a19d 100644
--- a/drivers/staging/exfat/Kconfig
+++ b/drivers/staging/exfat/Kconfig
@@ -1,41 +1,41 @@
 # SPDX-License-Identifier: GPL-2.0
-config EXFAT_FS
+config STAGING_EXFAT_FS
 	tristate "exFAT fs support"
 	depends on BLOCK
 	select NLS
 	help
 	  This adds support for the exFAT file system.
 
-config EXFAT_DISCARD
+config STAGING_EXFAT_DISCARD
 	bool "enable discard support"
-	depends on EXFAT_FS
+	depends on STAGING_EXFAT_FS
 	default y
 
-config EXFAT_DELAYED_SYNC
+config STAGING_EXFAT_DELAYED_SYNC
 	bool "enable delayed sync"
-	depends on EXFAT_FS
+	depends on STAGING_EXFAT_FS
 	default n
 
-config EXFAT_KERNEL_DEBUG
+config STAGING_EXFAT_KERNEL_DEBUG
 	bool "enable kernel debug features via ioctl"
-	depends on EXFAT_FS
+	depends on STAGING_EXFAT_FS
 	default n
 
-config EXFAT_DEBUG_MSG
+config STAGING_EXFAT_DEBUG_MSG
 	bool "print debug messages"
-	depends on EXFAT_FS
+	depends on STAGING_EXFAT_FS
 	default n
 
-config EXFAT_DEFAULT_CODEPAGE
+config STAGING_EXFAT_DEFAULT_CODEPAGE
 	int "Default codepage for exFAT"
 	default 437
-	depends on EXFAT_FS
+	depends on STAGING_EXFAT_FS
 	help
 	  This option should be set to the codepage of your exFAT filesystems.
 
-config EXFAT_DEFAULT_IOCHARSET
+config STAGING_EXFAT_DEFAULT_IOCHARSET
 	string "Default iocharset for exFAT"
 	default "utf8"
-	depends on EXFAT_FS
+	depends on STAGING_EXFAT_FS
 	help
 	  Set this to the default input/output character set you'd like exFAT to use.
diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile
index 6c90aec..057556e 100644
--- a/drivers/staging/exfat/Makefile
+++ b/drivers/staging/exfat/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-or-later
 
-obj-$(CONFIG_EXFAT_FS) += exfat.o
+obj-$(CONFIG_STAGING_EXFAT_FS) += exfat.o
 
 exfat-y :=	exfat_core.o	\
 		exfat_super.o	\
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 51c665a..4d87360 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -9,7 +9,7 @@
 #include <linux/types.h>
 #include <linux/buffer_head.h>
 
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
   /* For Debugging Purpose */
 	/* IOCTL code 'f' used by
 	 *   - file systems typically #0~0x1F
@@ -22,9 +22,9 @@
 
 #define EXFAT_DEBUGFLAGS_INVALID_UMOUNT	0x01
 #define EXFAT_DEBUGFLAGS_ERROR_RW	0x02
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 
-#ifdef CONFIG_EXFAT_DEBUG_MSG
+#ifdef CONFIG_STAGING_EXFAT_DEBUG_MSG
 #define DEBUG	1
 #else
 #undef DEBUG
@@ -516,49 +516,6 @@ struct buf_cache_t {
 	struct buffer_head   *buf_bh;
 };
 
-struct fs_func {
-	s32	(*alloc_cluster)(struct super_block *sb, s32 num_alloc,
-				 struct chain_t *p_chain);
-	void	(*free_cluster)(struct super_block *sb, struct chain_t *p_chain,
-				s32 do_relse);
-	s32	(*count_used_clusters)(struct super_block *sb);
-
-	s32	(*init_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
-				  s32 entry, u32 type, u32 start_clu, u64 size);
-	s32	(*init_ext_entry)(struct super_block *sb, struct chain_t *p_dir,
-				  s32 entry, s32 num_entries,
-				  struct uni_name_t *p_uniname,
-				  struct dos_name_t *p_dosname);
-	s32	(*find_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
-				  struct uni_name_t *p_uniname, s32 num_entries,
-				  struct dos_name_t *p_dosname, u32 type);
-	void	(*delete_dir_entry)(struct super_block *sb,
-				    struct chain_t *p_dir, s32 entry,
-				    s32 offset, s32 num_entries);
-	void	(*get_uni_name_from_ext_entry)(struct super_block *sb,
-					       struct chain_t *p_dir, s32 entry,
-					       u16 *uniname);
-	s32	(*count_ext_entries)(struct super_block *sb,
-				     struct chain_t *p_dir, s32 entry,
-				     struct dentry_t *p_entry);
-	s32	(*calc_num_entries)(struct uni_name_t *p_uniname);
-
-	u32	(*get_entry_type)(struct dentry_t *p_entry);
-	void	(*set_entry_type)(struct dentry_t *p_entry, u32 type);
-	u32	(*get_entry_attr)(struct dentry_t *p_entry);
-	void	(*set_entry_attr)(struct dentry_t *p_entry, u32 attr);
-	u8	(*get_entry_flag)(struct dentry_t *p_entry);
-	void	(*set_entry_flag)(struct dentry_t *p_entry, u8 flag);
-	u32	(*get_entry_clu0)(struct dentry_t *p_entry);
-	void	(*set_entry_clu0)(struct dentry_t *p_entry, u32 clu0);
-	u64	(*get_entry_size)(struct dentry_t *p_entry);
-	void	(*set_entry_size)(struct dentry_t *p_entry, u64 size);
-	void	(*get_entry_time)(struct dentry_t *p_entry,
-				  struct timestamp_t *tp, u8 mode);
-	void	(*set_entry_time)(struct dentry_t *p_entry,
-				  struct timestamp_t *tp, u8 mode);
-};
-
 struct fs_info_t {
 	u32      drv;                    /* drive ID */
 	u32      vol_type;               /* volume FAT type */
@@ -597,7 +554,6 @@ struct fs_info_t {
 
 	u32 dev_ejected;	/* block device operation error flag */
 
-	struct fs_func *fs_func;
 	struct mutex v_mutex;
 
 	/* FAT cache */
@@ -661,10 +617,10 @@ struct exfat_mount_options {
 
 	/* on error: continue, panic, remount-ro */
 	unsigned char errors;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
 	/* flag on if -o dicard specified and device support discard() */
 	unsigned char discard;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
 };
 
 #define EXFAT_HASH_BITS		8
@@ -700,9 +656,9 @@ struct exfat_sb_info {
 
 	spinlock_t inode_hash_lock;
 	struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
 	long debug_flags;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 };
 
 /*
@@ -829,5 +785,40 @@ int exfat_bdev_write(struct super_block *sb, sector_t secno,
 	       struct buffer_head *bh, u32 num_secs, bool sync);
 int exfat_bdev_sync(struct super_block *sb);
 
+/* cluster operation functions */
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+			struct chain_t *p_chain);
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+			s32 do_relse);
+s32 exfat_count_used_clusters(struct super_block *sb);
+
+/* dir operation functions */
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+			 struct uni_name_t *p_uniname, s32 num_entries,
+			 struct dos_name_t *p_dosname, u32 type);
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+			    s32 entry, s32 order, s32 num_entries);
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+				       struct chain_t *p_dir, s32 entry,
+				       u16 *uniname);
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+			    s32 entry, struct dentry_t *p_entry);
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
+
+/* dir entry getter/setter */
+u32 exfat_get_entry_type(struct dentry_t *p_entry);
+u32 exfat_get_entry_attr(struct dentry_t *p_entry);
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
+u8 exfat_get_entry_flag(struct dentry_t *p_entry);
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags);
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
+u64 exfat_get_entry_size(struct dentry_t *p_entry);
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+			  u8 mode);
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+			  u8 mode);
+
 extern const u8 uni_upcase[];
 #endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
index 7bcd98b..0a3dc35 100644
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -31,17 +31,17 @@ void exfat_bdev_close(struct super_block *sb)
 }
 
 int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
-	      u32 num_secs, bool read)
+		    u32 num_secs, bool read)
 {
 	struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
 	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
 	long flags = sbi->debug_flags;
 
 	if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
 		return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 
 	if (!p_bd->opened)
 		return -ENODEV;
@@ -66,19 +66,19 @@ int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head *
 }
 
 int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
-	       u32 num_secs, bool sync)
+		     u32 num_secs, bool sync)
 {
 	s32 count;
 	struct buffer_head *bh2;
 	struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
 	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
 	long flags = sbi->debug_flags;
 
 	if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
 		return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 
 	if (!p_bd->opened)
 		return -ENODEV;
@@ -121,13 +121,13 @@ int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head
 int exfat_bdev_sync(struct super_block *sb)
 {
 	struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
 	long flags = sbi->debug_flags;
 
 	if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
 		return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 
 	if (!p_bd->opened)
 		return -ENODEV;
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index 794000e..07b460d0 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -177,11 +177,11 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
 {
 	int i, b;
 	sector_t sector;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
 	struct exfat_mount_options *opts = &sbi->options;
 	int ret;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
 	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
 	struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
 
@@ -192,7 +192,7 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
 
 	exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
 
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
 	if (opts->discard) {
 		ret = sb_issue_discard(sb, START_SECTOR(clu),
 				       (1 << p_fs->sectors_per_clu_bits),
@@ -204,7 +204,7 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
 			return ret;
 		}
 	}
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
 
 	return sector_write(sb, sector, p_fs->vol_amap[i], 0);
 }
@@ -249,7 +249,7 @@ static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
 	return CLUSTER_32(~0);
 }
 
-static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
 			struct chain_t *p_chain)
 {
 	s32 num_clusters = 0;
@@ -328,7 +328,7 @@ static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
 	return num_clusters;
 }
 
-static void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
 			s32 do_relse)
 {
 	s32 num_clusters = 0;
@@ -434,7 +434,7 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
 	return count;
 }
 
-static s32 exfat_count_used_clusters(struct super_block *sb)
+s32 exfat_count_used_clusters(struct super_block *sb)
 {
 	int i, map_i, map_b, count = 0;
 	u8 k;
@@ -499,7 +499,7 @@ s32 load_alloc_bitmap(struct super_block *sb)
 			if (!ep)
 				return -ENOENT;
 
-			type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+			type = exfat_get_entry_type((struct dentry_t *)ep);
 
 			if (type == TYPE_UNUSED)
 				break;
@@ -745,7 +745,7 @@ s32 load_upcase_table(struct super_block *sb)
 			if (!ep)
 				return -ENOENT;
 
-			type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+			type = exfat_get_entry_type((struct dentry_t *)ep);
 
 			if (type == TYPE_UNUSED)
 				break;
@@ -787,7 +787,7 @@ void free_upcase_table(struct super_block *sb)
  *  Directory Entry Management Functions
  */
 
-static u32 exfat_get_entry_type(struct dentry_t *p_entry)
+u32 exfat_get_entry_type(struct dentry_t *p_entry)
 {
 	struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
 
@@ -862,56 +862,56 @@ static void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
 	}
 }
 
-static u32 exfat_get_entry_attr(struct dentry_t *p_entry)
+u32 exfat_get_entry_attr(struct dentry_t *p_entry)
 {
 	struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
 
 	return (u32)GET16_A(ep->attr);
 }
 
-static void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
 {
 	struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
 
 	SET16_A(ep->attr, (u16)attr);
 }
 
-static u8 exfat_get_entry_flag(struct dentry_t *p_entry)
+u8 exfat_get_entry_flag(struct dentry_t *p_entry)
 {
 	struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
 
 	return ep->flags;
 }
 
-static void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
 {
 	struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
 
 	ep->flags = flags;
 }
 
-static u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
 {
 	struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
 
 	return GET32_A(ep->start_clu);
 }
 
-static void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
 {
 	struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
 
 	SET32_A(ep->start_clu, start_clu);
 }
 
-static u64 exfat_get_entry_size(struct dentry_t *p_entry)
+u64 exfat_get_entry_size(struct dentry_t *p_entry)
 {
 	struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
 
 	return GET64_A(ep->valid_size);
 }
 
-static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
 {
 	struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
 
@@ -919,7 +919,7 @@ static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
 	SET64_A(ep->size, size);
 }
 
-static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
 			  u8 mode)
 {
 	u16 t = 0x00, d = 0x21;
@@ -948,7 +948,7 @@ static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *t
 	tp->year = (d >> 9);
 }
 
-static void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
 			  u8 mode)
 {
 	u16 t, d;
@@ -1013,7 +1013,7 @@ static void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
 }
 
 static s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
-			 s32 entry, u32 type, u32 start_clu, u64 size)
+				s32 entry, u32 type, u32 start_clu, u64 size)
 {
 	sector_t sector;
 	u8 flags;
@@ -1088,20 +1088,19 @@ static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
 	return 0;
 }
 
-static void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
 			    s32 entry, s32 order, s32 num_entries)
 {
 	int i;
 	sector_t sector;
 	struct dentry_t *ep;
-	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
 
 	for (i = order; i < num_entries; i++) {
 		ep = get_entry_in_dir(sb, p_dir, entry + i, &sector);
 		if (!ep)
 			return;
 
-		p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
+		exfat_set_entry_type(ep, TYPE_DELETED);
 		exfat_buf_modify(sb, sector);
 	}
 }
@@ -1256,7 +1255,7 @@ static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
 }
 
 static s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
-		  sector_t *sector, s32 *offset)
+			 sector_t *sector, s32 *offset)
 {
 	s32 off, ret;
 	u32 clu = 0;
@@ -1366,7 +1365,7 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
 		goto err_out;
 
 	ep = (struct dentry_t *)(buf + off);
-	entry_type = p_fs->fs_func->get_entry_type(ep);
+	entry_type = exfat_get_entry_type(ep);
 
 	if ((entry_type != TYPE_FILE) && (entry_type != TYPE_DIR))
 		goto err_out;
@@ -1396,7 +1395,7 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
 		 * instead of copying whole sector, we will check every entry.
 		 * this will provide minimum stablity and consistency.
 		 */
-		entry_type = p_fs->fs_func->get_entry_type(ep);
+		entry_type = exfat_get_entry_type(ep);
 
 		if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED))
 			goto err_out;
@@ -1492,7 +1491,8 @@ void release_entry_set(struct entry_set_cache_t *es)
 
 /* search EMPTY CONTINUOUS "num_entries" entries */
 static s32 search_deleted_or_unused_entry(struct super_block *sb,
-				   struct chain_t *p_dir, s32 num_entries)
+					  struct chain_t *p_dir,
+					  s32 num_entries)
 {
 	int i, dentry, num_empty = 0;
 	s32 dentries_per_clu;
@@ -1539,7 +1539,7 @@ static s32 search_deleted_or_unused_entry(struct super_block *sb,
 			if (!ep)
 				return -1;
 
-			type = p_fs->fs_func->get_entry_type(ep);
+			type = exfat_get_entry_type(ep);
 
 			if (type == TYPE_UNUSED) {
 				num_empty++;
@@ -1613,7 +1613,7 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
 		clu.flags = p_dir->flags;
 
 		/* (1) allocate a cluster */
-		ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu);
+		ret = exfat_alloc_cluster(sb, 1, &clu);
 		if (ret < 1)
 			return -EIO;
 
@@ -1649,8 +1649,8 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
 					      fid->entry + 1, &sector);
 			if (!ep)
 				return -ENOENT;
-			p_fs->fs_func->set_entry_size(ep, size);
-			p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
+			exfat_set_entry_size(ep, size);
+			exfat_set_entry_flag(ep, p_dir->flags);
 			exfat_buf_modify(sb, sector);
 
 			update_dir_checksum(sb, &fid->dir,
@@ -1668,7 +1668,7 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
 }
 
 static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
-				     s32 order)
+					    s32 order)
 {
 	int i, len = 0;
 
@@ -1689,7 +1689,7 @@ static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *unina
  * -1 : (root dir, ".") it is the root dir itself
  * -2 : entry with the name does not exist
  */
-static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
 			 struct uni_name_t *p_uniname, s32 num_entries,
 			 struct dos_name_t *p_dosname, u32 type)
 {
@@ -1735,7 +1735,7 @@ static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
 			if (!ep)
 				return -2;
 
-			entry_type = p_fs->fs_func->get_entry_type(ep);
+			entry_type = exfat_get_entry_type(ep);
 			step = 1;
 
 			if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) {
@@ -1832,21 +1832,20 @@ static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
 	return -2;
 }
 
-static s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
 			    s32 entry, struct dentry_t *p_entry)
 {
 	int i, count = 0;
 	u32 type;
 	struct file_dentry_t *file_ep = (struct file_dentry_t *)p_entry;
 	struct dentry_t *ext_ep;
-	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
 
 	for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) {
 		ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL);
 		if (!ext_ep)
 			return -1;
 
-		type = p_fs->fs_func->get_entry_type(ext_ep);
+		type = exfat_get_entry_type(ext_ep);
 		if ((type == TYPE_EXTEND) || (type == TYPE_STREAM))
 			count++;
 		else
@@ -1884,7 +1883,7 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
 			if (!ep)
 				return -ENOENT;
 
-			entry_type = p_fs->fs_func->get_entry_type(ep);
+			entry_type = exfat_get_entry_type(ep);
 
 			if (entry_type == TYPE_UNUSED)
 				return count;
@@ -1940,7 +1939,7 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
 			if (!ep)
 				break;
 
-			type = p_fs->fs_func->get_entry_type(ep);
+			type = exfat_get_entry_type(ep);
 
 			if (type == TYPE_UNUSED)
 				return true;
@@ -1984,9 +1983,8 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
 				 struct dos_name_t *p_dosname)
 {
 	s32 num_entries;
-	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
 
-	num_entries = p_fs->fs_func->calc_num_entries(p_uniname);
+	num_entries = exfat_calc_num_entries(p_uniname);
 	if (num_entries == 0)
 		return -EINVAL;
 
@@ -1995,14 +1993,13 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
 	return 0;
 }
 
-static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
 				       struct chain_t *p_dir, s32 entry,
 				       u16 *uniname)
 {
 	int i;
 	struct dentry_t *ep;
 	struct entry_set_cache_t *es;
-	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
 
 	es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
 	if (!es || es->num_entries < 3) {
@@ -2020,7 +2017,7 @@ static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
 	 * So, the index of first file-name dentry should start from 2.
 	 */
 	for (i = 2; i < es->num_entries; i++, ep++) {
-		if (p_fs->fs_func->get_entry_type(ep) == TYPE_EXTEND)
+		if (exfat_get_entry_type(ep) == TYPE_EXTEND)
 			extract_uni_name_from_name_entry((struct name_dentry_t *)
 							 ep, uniname, i);
 		else
@@ -2032,7 +2029,7 @@ static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
 	release_entry_set(es);
 }
 
-static s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
 {
 	s32 len;
 
@@ -2100,36 +2097,6 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
 	return 0;
 }
 
-/*
- *  File Operation Functions
- */
-static struct fs_func exfat_fs_func = {
-	.alloc_cluster = exfat_alloc_cluster,
-	.free_cluster = exfat_free_cluster,
-	.count_used_clusters = exfat_count_used_clusters,
-
-	.init_dir_entry = exfat_init_dir_entry,
-	.init_ext_entry = exfat_init_ext_entry,
-	.find_dir_entry = exfat_find_dir_entry,
-	.delete_dir_entry = exfat_delete_dir_entry,
-	.get_uni_name_from_ext_entry = exfat_get_uni_name_from_ext_entry,
-	.count_ext_entries = exfat_count_ext_entries,
-	.calc_num_entries = exfat_calc_num_entries,
-
-	.get_entry_type = exfat_get_entry_type,
-	.set_entry_type = exfat_set_entry_type,
-	.get_entry_attr = exfat_get_entry_attr,
-	.set_entry_attr = exfat_set_entry_attr,
-	.get_entry_flag = exfat_get_entry_flag,
-	.set_entry_flag = exfat_set_entry_flag,
-	.get_entry_clu0 = exfat_get_entry_clu0,
-	.set_entry_clu0 = exfat_set_entry_clu0,
-	.get_entry_size = exfat_get_entry_size,
-	.set_entry_size = exfat_set_entry_size,
-	.get_entry_time = exfat_get_entry_time,
-	.set_entry_time = exfat_set_entry_time,
-};
-
 s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
 {
 	struct bpbex_t *p_bpb = (struct bpbex_t *)p_pbr->bpb;
@@ -2173,8 +2140,6 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
 	p_fs->clu_srch_ptr = 2;
 	p_fs->used_clusters = UINT_MAX;
 
-	p_fs->fs_func = &exfat_fs_func;
-
 	return 0;
 }
 
@@ -2187,7 +2152,6 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
 	struct dos_name_t dos_name;
 	struct super_block *sb = inode->i_sb;
 	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-	struct fs_func *fs_func = p_fs->fs_func;
 
 	ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
 					   &dos_name);
@@ -2204,7 +2168,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
 	clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
 
 	/* (1) allocate a cluster */
-	ret = fs_func->alloc_cluster(sb, 1, &clu);
+	ret = exfat_alloc_cluster(sb, 1, &clu);
 	if (ret < 0)
 		return ret;
 	else if (ret == 0)
@@ -2218,13 +2182,13 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
 
 	/* (2) update the directory entry */
 	/* make sub-dir entry in parent directory */
-	ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
-				      size);
+	ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
+				   size);
 	if (ret != 0)
 		return ret;
 
-	ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
-				      &dos_name);
+	ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+				   &dos_name);
 	if (ret != 0)
 		return ret;
 
@@ -2252,7 +2216,6 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
 	struct dos_name_t dos_name;
 	struct super_block *sb = inode->i_sb;
 	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-	struct fs_func *fs_func = p_fs->fs_func;
 
 	ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
 					   &dos_name);
@@ -2268,13 +2231,13 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
 	/* fill the dos name directory entry information of the created file.
 	 * the first cluster is not determined yet. (0)
 	 */
-	ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
-				      CLUSTER_32(0), 0);
+	ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
+				   CLUSTER_32(0), 0);
 	if (ret != 0)
 		return ret;
 
-	ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
-				      &dos_name);
+	ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+				   &dos_name);
 	if (ret != 0)
 		return ret;
 
@@ -2301,8 +2264,6 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
 	sector_t sector;
 	struct dentry_t *ep;
 	struct super_block *sb = inode->i_sb;
-	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-	struct fs_func *fs_func = p_fs->fs_func;
 
 	ep = get_entry_in_dir(sb, p_dir, entry, &sector);
 	if (!ep)
@@ -2311,7 +2272,7 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
 	exfat_buf_lock(sb, sector);
 
 	/* exfat_buf_lock() before call count_ext_entries() */
-	num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep);
+	num_entries = exfat_count_ext_entries(sb, p_dir, entry, ep);
 	if (num_entries < 0) {
 		exfat_buf_unlock(sb, sector);
 		return;
@@ -2321,7 +2282,7 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
 	exfat_buf_unlock(sb, sector);
 
 	/* (1) update the directory entry */
-	fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
+	exfat_delete_dir_entry(sb, p_dir, entry, 0, num_entries);
 }
 
 s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
@@ -2332,8 +2293,6 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
 	struct dos_name_t dos_name;
 	struct dentry_t *epold, *epnew;
 	struct super_block *sb = inode->i_sb;
-	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-	struct fs_func *fs_func = p_fs->fs_func;
 
 	epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
 	if (!epold)
@@ -2342,8 +2301,8 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
 	exfat_buf_lock(sb, sector_old);
 
 	/* exfat_buf_lock() before call count_ext_entries() */
-	num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry,
-						     epold);
+	num_old_entries = exfat_count_ext_entries(sb, p_dir, oldentry,
+						  epold);
 	if (num_old_entries < 0) {
 		exfat_buf_unlock(sb, sector_old);
 		return -ENOENT;
@@ -2371,10 +2330,10 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
 		}
 
 		memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
-		if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
-			fs_func->set_entry_attr(epnew,
-						fs_func->get_entry_attr(epnew) |
-						ATTR_ARCHIVE);
+		if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+			exfat_set_entry_attr(epnew,
+					     exfat_get_entry_attr(epnew) |
+					     ATTR_ARCHIVE);
 			fid->attr |= ATTR_ARCHIVE;
 		}
 		exfat_buf_modify(sb, sector_new);
@@ -2395,33 +2354,33 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
 		exfat_buf_modify(sb, sector_new);
 		exfat_buf_unlock(sb, sector_old);
 
-		ret = fs_func->init_ext_entry(sb, p_dir, newentry,
-					      num_new_entries, p_uniname,
-					      &dos_name);
+		ret = exfat_init_ext_entry(sb, p_dir, newentry,
+					   num_new_entries, p_uniname,
+					   &dos_name);
 		if (ret != 0)
 			return ret;
 
-		fs_func->delete_dir_entry(sb, p_dir, oldentry, 0,
-					  num_old_entries);
+		exfat_delete_dir_entry(sb, p_dir, oldentry, 0,
+				       num_old_entries);
 		fid->entry = newentry;
 	} else {
-		if (fs_func->get_entry_type(epold) == TYPE_FILE) {
-			fs_func->set_entry_attr(epold,
-						fs_func->get_entry_attr(epold) |
-						ATTR_ARCHIVE);
+		if (exfat_get_entry_type(epold) == TYPE_FILE) {
+			exfat_set_entry_attr(epold,
+					     exfat_get_entry_attr(epold) |
+					     ATTR_ARCHIVE);
 			fid->attr |= ATTR_ARCHIVE;
 		}
 		exfat_buf_modify(sb, sector_old);
 		exfat_buf_unlock(sb, sector_old);
 
-		ret = fs_func->init_ext_entry(sb, p_dir, oldentry,
-					      num_new_entries, p_uniname,
-					      &dos_name);
+		ret = exfat_init_ext_entry(sb, p_dir, oldentry,
+					   num_new_entries, p_uniname,
+					   &dos_name);
 		if (ret != 0)
 			return ret;
 
-		fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
-					  num_old_entries);
+		exfat_delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
+				       num_old_entries);
 	}
 
 	return 0;
@@ -2436,23 +2395,21 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
 	struct dos_name_t dos_name;
 	struct dentry_t *epmov, *epnew;
 	struct super_block *sb = inode->i_sb;
-	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-	struct fs_func *fs_func = p_fs->fs_func;
 
 	epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
 	if (!epmov)
 		return -ENOENT;
 
 	/* check if the source and target directory is the same */
-	if (fs_func->get_entry_type(epmov) == TYPE_DIR &&
-	    fs_func->get_entry_clu0(epmov) == p_newdir->dir)
+	if (exfat_get_entry_type(epmov) == TYPE_DIR &&
+	    exfat_get_entry_clu0(epmov) == p_newdir->dir)
 		return -EINVAL;
 
 	exfat_buf_lock(sb, sector_mov);
 
 	/* exfat_buf_lock() before call count_ext_entries() */
-	num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry,
-						     epmov);
+	num_old_entries = exfat_count_ext_entries(sb, p_olddir, oldentry,
+						  epmov);
 	if (num_old_entries < 0) {
 		exfat_buf_unlock(sb, sector_mov);
 		return -ENOENT;
@@ -2479,9 +2436,9 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
 	}
 
 	memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
-	if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
-		fs_func->set_entry_attr(epnew, fs_func->get_entry_attr(epnew) |
-					ATTR_ARCHIVE);
+	if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+		exfat_set_entry_attr(epnew, exfat_get_entry_attr(epnew) |
+				     ATTR_ARCHIVE);
 		fid->attr |= ATTR_ARCHIVE;
 	}
 	exfat_buf_modify(sb, sector_new);
@@ -2501,12 +2458,12 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
 	exfat_buf_modify(sb, sector_new);
 	exfat_buf_unlock(sb, sector_mov);
 
-	ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries,
-				      p_uniname, &dos_name);
+	ret = exfat_init_ext_entry(sb, p_newdir, newentry, num_new_entries,
+				   p_uniname, &dos_name);
 	if (ret != 0)
 		return ret;
 
-	fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
+	exfat_delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
 
 	fid->dir.dir = p_newdir->dir;
 	fid->dir.size = p_newdir->size;
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 9f91853..b81d2a8 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -38,8 +38,8 @@
 
 static struct kmem_cache *exfat_inode_cachep;
 
-static int exfat_default_codepage = CONFIG_EXFAT_DEFAULT_CODEPAGE;
-static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET;
+static int exfat_default_codepage = CONFIG_STAGING_EXFAT_DEFAULT_CODEPAGE;
+static char exfat_default_iocharset[] = CONFIG_STAGING_EXFAT_DEFAULT_IOCHARSET;
 
 #define INC_IVERSION(x) (inode_inc_iversion(x))
 #define GET_IVERSION(x) (inode_peek_iversion_raw(x))
@@ -365,7 +365,7 @@ static int ffsMountVol(struct super_block *sb)
 
 	if (p_bd->sector_size < sb->s_blocksize) {
 		printk(KERN_INFO "EXFAT: mount failed - sector size %d less than blocksize %ld\n",
-			p_bd->sector_size,  sb->s_blocksize);
+		       p_bd->sector_size,  sb->s_blocksize);
 		ret = -EINVAL;
 		goto out;
 	}
@@ -492,7 +492,7 @@ static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
 	mutex_lock(&p_fs->v_mutex);
 
 	if (p_fs->used_clusters == UINT_MAX)
-		p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb);
+		p_fs->used_clusters = exfat_count_used_clusters(sb);
 
 	info->FatType = p_fs->vol_type;
 	info->ClusterSize = p_fs->cluster_size;
@@ -565,8 +565,8 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
 		goto out;
 
 	/* search the file name for directories */
-	dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries,
-					       &dos_name, TYPE_ALL);
+	dentry = exfat_find_dir_entry(sb, &dir, &uni_name, num_entries,
+				      &dos_name, TYPE_ALL);
 	if (dentry < -1) {
 		ret = -ENOENT;
 		goto out;
@@ -595,18 +595,18 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
 		}
 		ep2 = ep + 1;
 
-		fid->type = p_fs->fs_func->get_entry_type(ep);
+		fid->type = exfat_get_entry_type(ep);
 		fid->rwoffset = 0;
 		fid->hint_last_off = -1;
-		fid->attr = p_fs->fs_func->get_entry_attr(ep);
+		fid->attr = exfat_get_entry_attr(ep);
 
-		fid->size = p_fs->fs_func->get_entry_size(ep2);
+		fid->size = exfat_get_entry_size(ep2);
 		if ((fid->type == TYPE_FILE) && (fid->size == 0)) {
 			fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
 			fid->start_clu = CLUSTER_32(~0);
 		} else {
-			fid->flags = p_fs->fs_func->get_entry_flag(ep2);
-			fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2);
+			fid->flags = exfat_get_entry_flag(ep2);
+			fid->start_clu = exfat_get_entry_clu0(ep2);
 		}
 
 		release_entry_set(es);
@@ -647,7 +647,7 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
 	/* create a new file */
 	ret = create_file(inode, &dir, &uni_name, mode, fid);
 
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
 	fs_sync(sb, true);
 	fs_set_vol_flags(sb, VOL_CLEAN);
 #endif
@@ -886,9 +886,9 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
 			new_clu.flags = fid->flags;
 
 			/* (1) allocate a chain of clusters */
-			num_alloced = p_fs->fs_func->alloc_cluster(sb,
-								   num_alloc,
-								   &new_clu);
+			num_alloced = exfat_alloc_cluster(sb,
+							  num_alloc,
+							  &new_clu);
 			if (num_alloced == 0)
 				break;
 			if (num_alloced < 0) {
@@ -991,24 +991,24 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
 		goto err_out;
 	ep2 = ep + 1;
 
-	p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
-	p_fs->fs_func->set_entry_attr(ep, fid->attr);
+	exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+	exfat_set_entry_attr(ep, fid->attr);
 
 	if (modified) {
-		if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags)
-			p_fs->fs_func->set_entry_flag(ep2, fid->flags);
+		if (exfat_get_entry_flag(ep2) != fid->flags)
+			exfat_set_entry_flag(ep2, fid->flags);
 
-		if (p_fs->fs_func->get_entry_size(ep2) != fid->size)
-			p_fs->fs_func->set_entry_size(ep2, fid->size);
+		if (exfat_get_entry_size(ep2) != fid->size)
+			exfat_set_entry_size(ep2, fid->size);
 
-		if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu)
-			p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu);
+		if (exfat_get_entry_clu0(ep2) != fid->start_clu)
+			exfat_set_entry_clu0(ep2, fid->start_clu);
 	}
 
 	update_dir_checksum_with_entry_set(sb, es);
 	release_entry_set(es);
 
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
 	fs_sync(sb, true);
 	fs_set_vol_flags(sb, VOL_CLEAN);
 #endif
@@ -1108,13 +1108,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
 		}
 	ep2 = ep + 1;
 
-	p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
-	p_fs->fs_func->set_entry_attr(ep, fid->attr);
+	exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+	exfat_set_entry_attr(ep, fid->attr);
 
-	p_fs->fs_func->set_entry_size(ep2, new_size);
+	exfat_set_entry_size(ep2, new_size);
 	if (new_size == 0) {
-		p_fs->fs_func->set_entry_flag(ep2, 0x01);
-		p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0));
+		exfat_set_entry_flag(ep2, 0x01);
+		exfat_set_entry_clu0(ep2, CLUSTER_32(0));
 	}
 
 	update_dir_checksum_with_entry_set(sb, es);
@@ -1127,14 +1127,14 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
 	}
 
 	/* (3) free the clusters */
-	p_fs->fs_func->free_cluster(sb, &clu, 0);
+	exfat_free_cluster(sb, &clu, 0);
 
 	/* hint information */
 	fid->hint_last_off = -1;
 	if (fid->rwoffset > fid->size)
 		fid->rwoffset = fid->size;
 
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
 	fs_sync(sb, true);
 	fs_set_vol_flags(sb, VOL_CLEAN);
 #endif
@@ -1217,7 +1217,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
 		goto out2;
 	}
 
-	if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+	if (exfat_get_entry_attr(ep) & ATTR_READONLY) {
 		ret = -EPERM;
 		goto out2;
 	}
@@ -1237,7 +1237,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
 		if (!ep)
 			goto out;
 
-		entry_type = p_fs->fs_func->get_entry_type(ep);
+		entry_type = exfat_get_entry_type(ep);
 
 		if (entry_type == TYPE_DIR) {
 			struct chain_t new_clu;
@@ -1274,15 +1274,15 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
 		if (!ep)
 			goto out;
 
-		num_entries = p_fs->fs_func->count_ext_entries(sb, p_dir,
-							       new_entry, ep);
+		num_entries = exfat_count_ext_entries(sb, p_dir,
+						      new_entry, ep);
 		if (num_entries < 0)
 			goto out;
-		p_fs->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0,
-						num_entries + 1);
+		exfat_delete_dir_entry(sb, p_dir, new_entry, 0,
+				       num_entries + 1);
 	}
 out:
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
 	fs_sync(sb, true);
 	fs_set_vol_flags(sb, VOL_CLEAN);
 #endif
@@ -1324,7 +1324,7 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
 		goto out;
 	}
 
-	if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+	if (exfat_get_entry_attr(ep) & ATTR_READONLY) {
 		ret = -EPERM;
 		goto out;
 	}
@@ -1338,13 +1338,13 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
 	clu_to_free.flags = fid->flags;
 
 	/* (2) free the clusters */
-	p_fs->fs_func->free_cluster(sb, &clu_to_free, 0);
+	exfat_free_cluster(sb, &clu_to_free, 0);
 
 	fid->size = 0;
 	fid->start_clu = CLUSTER_32(~0);
 	fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
 
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
 	fs_sync(sb, true);
 	fs_set_vol_flags(sb, VOL_CLEAN);
 #endif
@@ -1398,7 +1398,7 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
 		goto out;
 	}
 
-	type = p_fs->fs_func->get_entry_type(ep);
+	type = exfat_get_entry_type(ep);
 
 	if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
 	    ((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
@@ -1415,12 +1415,12 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
 
 	/* set the file attribute */
 	fid->attr = attr;
-	p_fs->fs_func->set_entry_attr(ep, attr);
+	exfat_set_entry_attr(ep, attr);
 
 	update_dir_checksum_with_entry_set(sb, es);
 	release_entry_set(es);
 
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
 	fs_sync(sb, true);
 	fs_set_vol_flags(sb, VOL_CLEAN);
 #endif
@@ -1481,7 +1481,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
 
 			count = count_dos_name_entries(sb, &dir, TYPE_DIR);
 			if (count < 0) {
-				ret = count; /* propogate error upward */
+				ret = count; /* propagate error upward */
 				goto out;
 			}
 			info->NumSubdirs = count;
@@ -1502,9 +1502,9 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
 	ep2 = ep + 1;
 
 	/* set FILE_INFO structure using the acquired struct dentry_t */
-	info->Attr = p_fs->fs_func->get_entry_attr(ep);
+	info->Attr = exfat_get_entry_attr(ep);
 
-	p_fs->fs_func->get_entry_time(ep, &tm, TM_CREATE);
+	exfat_get_entry_time(ep, &tm, TM_CREATE);
 	info->CreateTimestamp.Year = tm.year;
 	info->CreateTimestamp.Month = tm.mon;
 	info->CreateTimestamp.Day = tm.day;
@@ -1513,7 +1513,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
 	info->CreateTimestamp.Second = tm.sec;
 	info->CreateTimestamp.MilliSecond = 0;
 
-	p_fs->fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+	exfat_get_entry_time(ep, &tm, TM_MODIFY);
 	info->ModifyTimestamp.Year = tm.year;
 	info->ModifyTimestamp.Month = tm.mon;
 	info->ModifyTimestamp.Day = tm.day;
@@ -1528,13 +1528,13 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
 	/* XXX this is very bad for exfat cuz name is already included in es.
 	 * API should be revised
 	 */
-	p_fs->fs_func->get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
-						   uni_name.name);
+	exfat_get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
+					  uni_name.name);
 	nls_uniname_to_cstring(sb, info->Name, &uni_name);
 
 	info->NumSubdirs = 2;
 
-	info->Size = p_fs->fs_func->get_entry_size(ep2);
+	info->Size = exfat_get_entry_size(ep2);
 
 	release_entry_set(es);
 
@@ -1548,7 +1548,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
 
 		count = count_dos_name_entries(sb, &dir, TYPE_DIR);
 		if (count < 0) {
-			ret = count; /* propogate error upward */
+			ret = count; /* propagate error upward */
 			goto out;
 		}
 		info->NumSubdirs += count;
@@ -1602,7 +1602,7 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
 	}
 	ep2 = ep + 1;
 
-	p_fs->fs_func->set_entry_attr(ep, info->Attr);
+	exfat_set_entry_attr(ep, info->Attr);
 
 	/* set FILE_INFO structure using the acquired struct dentry_t */
 	tm.sec  = info->CreateTimestamp.Second;
@@ -1611,7 +1611,7 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
 	tm.day  = info->CreateTimestamp.Day;
 	tm.mon  = info->CreateTimestamp.Month;
 	tm.year = info->CreateTimestamp.Year;
-	p_fs->fs_func->set_entry_time(ep, &tm, TM_CREATE);
+	exfat_set_entry_time(ep, &tm, TM_CREATE);
 
 	tm.sec  = info->ModifyTimestamp.Second;
 	tm.min  = info->ModifyTimestamp.Minute;
@@ -1619,9 +1619,9 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
 	tm.day  = info->ModifyTimestamp.Day;
 	tm.mon  = info->ModifyTimestamp.Month;
 	tm.year = info->ModifyTimestamp.Year;
-	p_fs->fs_func->set_entry_time(ep, &tm, TM_MODIFY);
+	exfat_set_entry_time(ep, &tm, TM_MODIFY);
 
-	p_fs->fs_func->set_entry_size(ep2, info->Size);
+	exfat_set_entry_size(ep2, info->Size);
 
 	update_dir_checksum_with_entry_set(sb, es);
 	release_entry_set(es);
@@ -1704,7 +1704,7 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
 		new_clu.flags = fid->flags;
 
 		/* (1) allocate a cluster */
-		num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu);
+		num_alloced = exfat_alloc_cluster(sb, 1, &new_clu);
 		if (num_alloced < 0) {
 			ret = -EIO;
 			goto out;
@@ -1744,13 +1744,11 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
 
 		/* (3) update directory entry */
 		if (modified) {
-			if (p_fs->fs_func->get_entry_flag(ep) != fid->flags)
-				p_fs->fs_func->set_entry_flag(ep, fid->flags);
+			if (exfat_get_entry_flag(ep) != fid->flags)
+				exfat_set_entry_flag(ep, fid->flags);
 
-			if (p_fs->fs_func->get_entry_clu0(ep) != fid->start_clu)
-				p_fs->fs_func->set_entry_clu0(ep,
-							      fid->start_clu);
-
+			if (exfat_get_entry_clu0(ep) != fid->start_clu)
+				exfat_set_entry_clu0(ep, fid->start_clu);
 		}
 
 		update_dir_checksum_with_entry_set(sb, es);
@@ -1804,7 +1802,7 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
 
 	ret = create_dir(inode, &dir, &uni_name, fid);
 
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
 	fs_sync(sb, true);
 	fs_set_vol_flags(sb, VOL_CLEAN);
 #endif
@@ -1831,7 +1829,6 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
 	struct dentry_t *ep;
 	struct super_block *sb = inode->i_sb;
 	struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-	struct fs_func *fs_func = p_fs->fs_func;
 	struct file_id_t *fid = &(EXFAT_I(inode)->fid);
 
 	/* check the validity of pointer parameters */
@@ -1913,7 +1910,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
 				ret = -ENOENT;
 				goto out;
 			}
-			type = fs_func->get_entry_type(ep);
+			type = exfat_get_entry_type(ep);
 
 			if (type == TYPE_UNUSED)
 				break;
@@ -1922,9 +1919,9 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
 				continue;
 
 			exfat_buf_lock(sb, sector);
-			dir_entry->Attr = fs_func->get_entry_attr(ep);
+			dir_entry->Attr = exfat_get_entry_attr(ep);
 
-			fs_func->get_entry_time(ep, &tm, TM_CREATE);
+			exfat_get_entry_time(ep, &tm, TM_CREATE);
 			dir_entry->CreateTimestamp.Year = tm.year;
 			dir_entry->CreateTimestamp.Month = tm.mon;
 			dir_entry->CreateTimestamp.Day = tm.day;
@@ -1933,7 +1930,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
 			dir_entry->CreateTimestamp.Second = tm.sec;
 			dir_entry->CreateTimestamp.MilliSecond = 0;
 
-			fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+			exfat_get_entry_time(ep, &tm, TM_MODIFY);
 			dir_entry->ModifyTimestamp.Year = tm.year;
 			dir_entry->ModifyTimestamp.Month = tm.mon;
 			dir_entry->ModifyTimestamp.Day = tm.day;
@@ -1946,8 +1943,8 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
 			       sizeof(struct date_time_t));
 
 			*uni_name.name = 0x0;
-			fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry,
-							     uni_name.name);
+			exfat_get_uni_name_from_ext_entry(sb, &dir, dentry,
+							  uni_name.name);
 			nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
 			exfat_buf_unlock(sb, sector);
 
@@ -1957,7 +1954,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
 				goto out;
 			}
 
-			dir_entry->Size = fs_func->get_entry_size(ep);
+			dir_entry->Size = exfat_get_entry_size(ep);
 
 			/* hint information */
 			if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */
@@ -2047,13 +2044,13 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
 	remove_file(inode, &dir, dentry);
 
 	/* (2) free the clusters */
-	p_fs->fs_func->free_cluster(sb, &clu_to_free, 1);
+	exfat_free_cluster(sb, &clu_to_free, 1);
 
 	fid->size = 0;
 	fid->start_clu = CLUSTER_32(~0);
 	fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
 
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
 	fs_sync(sb, true);
 	fs_set_vol_flags(sb, VOL_CLEAN);
 #endif
@@ -2176,14 +2173,14 @@ static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
 				unsigned long arg)
 {
 	struct inode *inode = filp->f_path.dentry->d_inode;
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
 	unsigned int flags;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 
 	switch (cmd) {
 	case EXFAT_IOCTL_GET_VOLUME_ID:
 		return exfat_ioctl_volume_id(inode);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
 	case EXFAT_IOC_GET_DEBUGFLAGS: {
 		struct super_block *sb = inode->i_sb;
 		struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -2207,7 +2204,7 @@ static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
 
 		return 0;
 	}
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 	default:
 		return -ENOTTY; /* Inappropriate ioctl for device */
 	}
@@ -3400,7 +3397,7 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
 		seq_puts(m, ",errors=panic");
 	else
 		seq_puts(m, ",errors=remount-ro");
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
 	if (opts->discard)
 		seq_puts(m, ",discard");
 #endif
@@ -3481,7 +3478,7 @@ enum {
 	Opt_err_ro,
 	Opt_utf8_hack,
 	Opt_err,
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
 	Opt_discard,
 #endif /* EXFAT_CONFIG_DISCARD */
 };
@@ -3501,9 +3498,9 @@ static const match_table_t exfat_tokens = {
 	{Opt_err_panic, "errors=panic"},
 	{Opt_err_ro, "errors=remount-ro"},
 	{Opt_utf8_hack, "utf8"},
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
 	{Opt_discard, "discard"},
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
 	{Opt_err, NULL}
 };
 
@@ -3524,7 +3521,7 @@ static int parse_options(char *options, int silent, int *debug,
 	opts->iocharset = exfat_default_iocharset;
 	opts->casesensitive = 0;
 	opts->errors = EXFAT_ERRORS_RO;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
 	opts->discard = 0;
 #endif
 	*debug = 0;
@@ -3595,11 +3592,11 @@ static int parse_options(char *options, int silent, int *debug,
 		case Opt_debug:
 			*debug = 1;
 			break;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
 		case Opt_discard:
 			opts->discard = 1;
 			break;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
 		case Opt_utf8_hack:
 			break;
 		default:
@@ -3803,7 +3800,7 @@ static void __exit exfat_destroy_inodecache(void)
 	kmem_cache_destroy(exfat_inode_cachep);
 }
 
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
 static void exfat_debug_kill_sb(struct super_block *sb)
 {
 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -3831,17 +3828,17 @@ static void exfat_debug_kill_sb(struct super_block *sb)
 
 	kill_block_super(sb);
 }
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 
 static struct file_system_type exfat_fs_type = {
 	.owner       = THIS_MODULE,
 	.name        = "exfat",
 	.mount       = exfat_fs_mount,
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
 	.kill_sb    = exfat_debug_kill_sb,
 #else
 	.kill_sb    = kill_block_super,
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
 	.fs_flags    = FS_REQUIRES_DEV,
 };
 
diff --git a/drivers/staging/hp/hp100.c b/drivers/staging/hp/hp100.c
index 6ec78f5..e2f0b58 100644
--- a/drivers/staging/hp/hp100.c
+++ b/drivers/staging/hp/hp100.c
@@ -339,14 +339,11 @@ static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
 	if (sig == NULL)
 		goto err;
 
-	for (i = 0; i < ARRAY_SIZE(hp100_isa_tbl); i++) {
-		if (!strcmp(hp100_isa_tbl[i], sig))
-			break;
+	i = match_string(hp100_isa_tbl, ARRAY_SIZE(hp100_isa_tbl), sig);
+	if (i < 0)
+		goto err;
 
-	}
-
-	if (i < ARRAY_SIZE(hp100_isa_tbl))
-		return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
+	return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
  err:
 	return -ENODEV;
 
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index 3968713..39dfe3f 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -237,6 +237,12 @@ static const char * const adis16203_status_error_msgs[] = {
 	[ADIS16203_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
 };
 
+static const struct adis_timeout adis16203_timeouts = {
+	.reset_ms = ADIS16203_STARTUP_DELAY,
+	.sw_reset_ms = ADIS16203_STARTUP_DELAY,
+	.self_test_ms = ADIS16203_STARTUP_DELAY
+};
+
 static const struct adis_data adis16203_data = {
 	.read_delay = 20,
 	.msc_ctrl_reg = ADIS16203_MSC_CTRL,
@@ -245,7 +251,7 @@ static const struct adis_data adis16203_data = {
 
 	.self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
 	.self_test_no_autoclear = true,
-	.startup_delay = ADIS16203_STARTUP_DELAY,
+	.timeouts = &adis16203_timeouts,
 
 	.status_error_msgs = adis16203_status_error_msgs,
 	.status_error_mask = BIT(ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT) |
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index a480409..39eb836 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -359,6 +359,12 @@ static const char * const adis16240_status_error_msgs[] = {
 	[ADIS16240_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.225V",
 };
 
+static const struct adis_timeout adis16240_timeouts = {
+	.reset_ms = ADIS16240_STARTUP_DELAY,
+	.sw_reset_ms = ADIS16240_STARTUP_DELAY,
+	.self_test_ms = ADIS16240_STARTUP_DELAY,
+};
+
 static const struct adis_data adis16240_data = {
 	.write_delay = 35,
 	.read_delay = 35,
@@ -368,7 +374,7 @@ static const struct adis_data adis16240_data = {
 
 	.self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
 	.self_test_no_autoclear = true,
-	.startup_delay = ADIS16240_STARTUP_DELAY,
+	.timeouts = &adis16240_timeouts,
 
 	.status_error_msgs = adis16240_status_error_msgs,
 	.status_error_mask = BIT(ADIS16240_DIAG_STAT_PWRON_FAIL_BIT) |
@@ -399,6 +405,13 @@ static int adis16240_probe(struct spi_device *spi)
 	indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
+	spi->mode = SPI_MODE_3;
+	ret = spi_setup(spi);
+	if (ret) {
+		dev_err(&spi->dev, "spi_setup failed!\n");
+		return ret;
+	}
+
 	ret = adis_init(st, indio_dev, spi, &adis16240_data);
 	if (ret)
 		return ret;
diff --git a/drivers/staging/isdn/Kconfig b/drivers/staging/isdn/Kconfig
deleted file mode 100644
index faaf638..0000000
--- a/drivers/staging/isdn/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "ISDN CAPI drivers"
-	depends on ISDN_CAPI
-
-source "drivers/staging/isdn/avm/Kconfig"
-
-source "drivers/staging/isdn/gigaset/Kconfig"
-
-source "drivers/staging/isdn/hysdn/Kconfig"
-
-endmenu
-
diff --git a/drivers/staging/isdn/Makefile b/drivers/staging/isdn/Makefile
deleted file mode 100644
index 025504b..0000000
--- a/drivers/staging/isdn/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the kernel ISDN subsystem and device drivers.
-
-# Object files in subdirectories
-
-obj-$(CONFIG_CAPI_AVM)			+= avm/
-obj-$(CONFIG_HYSDN)			+= hysdn/
-obj-$(CONFIG_ISDN_DRV_GIGASET)		+= gigaset/
diff --git a/drivers/staging/isdn/TODO b/drivers/staging/isdn/TODO
deleted file mode 100644
index 9210d11..0000000
--- a/drivers/staging/isdn/TODO
+++ /dev/null
@@ -1,22 +0,0 @@
-TODO: Remove in late 2019 unless there are users
-
-
-I tried to find any indication of whether the capi drivers are
-still in use, and have not found anything  from a long time ago.
-
-With public ISDN networks almost completely shut down over the past 12
-months, there is very little you can actually do with this hardware. The
-main remaining use case would be to connect ISDN voice phones to an
-in-house installation with Asterisk or LCR, but anyone trying this in
-turn seems to be using either the mISDN driver stack, or out-of-tree
-drivers from the hardware vendors.
-
-I may of course have missed something, so I would suggest moving
-these into drivers/staging/ just in case someone still uses one
-of the three remaining in-kernel drivers (avm, hysdn, gigaset).
-
-If nobody complains, we can remove them entirely in six months,
-or otherwise move the core code and any drivers that are still
-needed back into drivers/isdn.
-
-  Arnd Bergmann <arnd@arndb.de>
diff --git a/drivers/staging/isdn/avm/Kconfig b/drivers/staging/isdn/avm/Kconfig
deleted file mode 100644
index 81483db..0000000
--- a/drivers/staging/isdn/avm/Kconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# ISDN AVM drivers
-#
-
-menuconfig CAPI_AVM
-	bool "Active AVM cards"
-	help
-	  Enable support for AVM active ISDN cards.
-
-if CAPI_AVM
-
-config ISDN_DRV_AVMB1_B1ISA
-	tristate "AVM B1 ISA support"
-	depends on ISA
-	help
-	  Enable support for the ISA version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_B1PCI
-	tristate "AVM B1 PCI support"
-	depends on PCI
-	help
-	  Enable support for the PCI version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_B1PCIV4
-	bool "AVM B1 PCI V4 support"
-	depends on ISDN_DRV_AVMB1_B1PCI
-	help
-	  Enable support for the V4 version of AVM B1 PCI card.
-
-config ISDN_DRV_AVMB1_T1ISA
-	tristate "AVM T1/T1-B ISA support"
-	depends on ISA
-	help
-	  Enable support for the AVM T1 T1B card.
-	  Note: This is a PRI card and handle 30 B-channels.
-
-config ISDN_DRV_AVMB1_B1PCMCIA
-	tristate "AVM B1/M1/M2 PCMCIA support"
-	depends on PCMCIA
-	help
-	  Enable support for the PCMCIA version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_AVM_CS
-	tristate "AVM B1/M1/M2 PCMCIA cs module"
-	depends on ISDN_DRV_AVMB1_B1PCMCIA
-	help
-	  Enable the PCMCIA client driver for the AVM B1/M1/M2
-	  PCMCIA cards.
-
-config ISDN_DRV_AVMB1_T1PCI
-	tristate "AVM T1/T1-B PCI support"
-	depends on PCI
-	help
-	  Enable support for the AVM T1 T1B card.
-	  Note: This is a PRI card and handle 30 B-channels.
-
-config ISDN_DRV_AVMB1_C4
-	tristate "AVM C4/C2 support"
-	depends on PCI
-	help
-	  Enable support for the AVM C4/C2 PCI cards.
-	  These cards handle 4/2 BRI ISDN lines (8/4 channels).
-
-endif # CAPI_AVM
diff --git a/drivers/staging/isdn/avm/Makefile b/drivers/staging/isdn/avm/Makefile
deleted file mode 100644
index 3830a05..0000000
--- a/drivers/staging/isdn/avm/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the AVM ISDN device drivers
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1ISA)	+= b1isa.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1PCI)	+= b1pci.o b1.o b1dma.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1PCMCIA)	+= b1pcmcia.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_AVM_CS)	+= avm_cs.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_T1ISA)	+= t1isa.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_T1PCI)	+= t1pci.o b1.o b1dma.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_C4)		+= c4.o b1.o
diff --git a/drivers/staging/isdn/avm/avm_cs.c b/drivers/staging/isdn/avm/avm_cs.c
deleted file mode 100644
index 62b8030..0000000
--- a/drivers/staging/isdn/avm/avm_cs.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/* $Id: avm_cs.c,v 1.4.6.3 2001/09/23 22:24:33 kai Exp $
- *
- * A PCMCIA client driver for AVM B1/M1/M2
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ciscode.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/cisreg.h>
-
-#include <linux/skbuff.h>
-#include <linux/capi.h>
-#include <linux/b1lli.h>
-#include <linux/b1pcmcia.h>
-
-/*====================================================================*/
-
-MODULE_DESCRIPTION("CAPI4Linux: PCMCIA client driver for AVM B1/M1/M2");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/*====================================================================*/
-
-static int avmcs_config(struct pcmcia_device *link);
-static void avmcs_release(struct pcmcia_device *link);
-static void avmcs_detach(struct pcmcia_device *p_dev);
-
-static int avmcs_probe(struct pcmcia_device *p_dev)
-{
-	/* General socket configuration */
-	p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-	p_dev->config_index = 1;
-	p_dev->config_regs = PRESENT_OPTION;
-
-	return avmcs_config(p_dev);
-} /* avmcs_attach */
-
-
-static void avmcs_detach(struct pcmcia_device *link)
-{
-	avmcs_release(link);
-} /* avmcs_detach */
-
-static int avmcs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
-{
-	p_dev->resource[0]->end = 16;
-	p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
-	p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
-
-	return pcmcia_request_io(p_dev);
-}
-
-static int avmcs_config(struct pcmcia_device *link)
-{
-	int i = -1;
-	char devname[128];
-	int cardtype;
-	int (*addcard)(unsigned int port, unsigned irq);
-
-	devname[0] = 0;
-	if (link->prod_id[1])
-		strlcpy(devname, link->prod_id[1], sizeof(devname));
-
-	/*
-	 * find IO port
-	 */
-	if (pcmcia_loop_config(link, avmcs_configcheck, NULL))
-		return -ENODEV;
-
-	do {
-		if (!link->irq) {
-			/* undo */
-			pcmcia_disable_device(link);
-			break;
-		}
-
-		/*
-		 * configure the PCMCIA socket
-		 */
-		i = pcmcia_enable_device(link);
-		if (i != 0) {
-			pcmcia_disable_device(link);
-			break;
-		}
-
-	} while (0);
-
-	if (devname[0]) {
-		char *s = strrchr(devname, ' ');
-		if (!s)
-			s = devname;
-		else s++;
-		if (strcmp("M1", s) == 0) {
-			cardtype = AVM_CARDTYPE_M1;
-		} else if (strcmp("M2", s) == 0) {
-			cardtype = AVM_CARDTYPE_M2;
-		} else {
-			cardtype = AVM_CARDTYPE_B1;
-		}
-	} else
-		cardtype = AVM_CARDTYPE_B1;
-
-	/* If any step failed, release any partially configured state */
-	if (i != 0) {
-		avmcs_release(link);
-		return -ENODEV;
-	}
-
-
-	switch (cardtype) {
-	case AVM_CARDTYPE_M1: addcard = b1pcmcia_addcard_m1; break;
-	case AVM_CARDTYPE_M2: addcard = b1pcmcia_addcard_m2; break;
-	default:
-	case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
-	}
-	if ((i = (*addcard)(link->resource[0]->start, link->irq)) < 0) {
-		dev_err(&link->dev,
-			"avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
-			(unsigned int) link->resource[0]->start, link->irq);
-		avmcs_release(link);
-		return -ENODEV;
-	}
-	return 0;
-
-} /* avmcs_config */
-
-
-static void avmcs_release(struct pcmcia_device *link)
-{
-	b1pcmcia_delcard(link->resource[0]->start, link->irq);
-	pcmcia_disable_device(link);
-} /* avmcs_release */
-
-
-static const struct pcmcia_device_id avmcs_ids[] = {
-	PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335),
-	PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M1", 0x95d42008, 0x81e10430),
-	PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M2", 0x95d42008, 0x18e8558a),
-	PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, avmcs_ids);
-
-static struct pcmcia_driver avmcs_driver = {
-	.owner	= THIS_MODULE,
-	.name		= "avm_cs",
-	.probe = avmcs_probe,
-	.remove	= avmcs_detach,
-	.id_table = avmcs_ids,
-};
-module_pcmcia_driver(avmcs_driver);
diff --git a/drivers/staging/isdn/avm/avmcard.h b/drivers/staging/isdn/avm/avmcard.h
deleted file mode 100644
index cdfa89c..0000000
--- a/drivers/staging/isdn/avm/avmcard.h
+++ /dev/null
@@ -1,581 +0,0 @@
-/* $Id: avmcard.h,v 1.1.4.1.2.1 2001/12/21 15:00:17 kai Exp $
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _AVMCARD_H_
-#define _AVMCARD_H_
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-
-#define	AVMB1_PORTLEN		0x1f
-#define AVM_MAXVERSION		8
-#define AVM_NCCI_PER_CHANNEL	4
-
-/*
- * Versions
- */
-
-#define	VER_DRIVER	0
-#define	VER_CARDTYPE	1
-#define	VER_HWID	2
-#define	VER_SERIAL	3
-#define	VER_OPTION	4
-#define	VER_PROTO	5
-#define	VER_PROFILE	6
-#define	VER_CAPI	7
-
-enum avmcardtype {
-	avm_b1isa,
-	avm_b1pci,
-	avm_b1pcmcia,
-	avm_m1,
-	avm_m2,
-	avm_t1isa,
-	avm_t1pci,
-	avm_c4,
-	avm_c2
-};
-
-typedef struct avmcard_dmabuf {
-	long        size;
-	u8       *dmabuf;
-	dma_addr_t  dmaaddr;
-} avmcard_dmabuf;
-
-typedef struct avmcard_dmainfo {
-	u32                recvlen;
-	avmcard_dmabuf       recvbuf;
-
-	avmcard_dmabuf       sendbuf;
-	struct sk_buff_head  send_queue;
-
-	struct pci_dev      *pcidev;
-} avmcard_dmainfo;
-
-typedef	struct avmctrl_info {
-	char cardname[32];
-
-	int versionlen;
-	char versionbuf[1024];
-	char *version[AVM_MAXVERSION];
-
-	char infobuf[128];	/* for function procinfo */
-
-	struct avmcard  *card;
-	struct capi_ctr  capi_ctrl;
-
-	struct list_head ncci_head;
-} avmctrl_info;
-
-typedef struct avmcard {
-	char name[32];
-
-	spinlock_t lock;
-	unsigned int port;
-	unsigned irq;
-	unsigned long membase;
-	enum avmcardtype cardtype;
-	unsigned char revision;
-	unsigned char class;
-	int cardnr; /* for t1isa */
-
-	char msgbuf[128];	/* capimsg msg part */
-	char databuf[2048];	/* capimsg data part */
-
-	void __iomem *mbase;
-	volatile u32 csr;
-	avmcard_dmainfo *dma;
-
-	struct avmctrl_info *ctrlinfo;
-
-	u_int nr_controllers;
-	u_int nlogcontr;
-	struct list_head list;
-} avmcard;
-
-extern int b1_irq_table[16];
-
-/*
- * LLI Messages to the ISDN-ControllerISDN Controller
- */
-
-#define	SEND_POLL		0x72	/*
-					 * after load <- RECEIVE_POLL
-					 */
-#define SEND_INIT		0x11	/*
-					 * first message <- RECEIVE_INIT
-					 * int32 NumApplications  int32
-					 * NumNCCIs int32 BoardNumber
-					 */
-#define SEND_REGISTER		0x12	/*
-					 * register an application int32
-					 * ApplIDId int32 NumMessages
-					 * int32 NumB3Connections int32
-					 * NumB3Blocks int32 B3Size
-					 *
-					 * AnzB3Connection != 0 &&
-					 * AnzB3Blocks >= 1 && B3Size >= 1
-					 */
-#define SEND_RELEASE		0x14	/*
-					 * deregister an application int32
-					 * ApplID
-					 */
-#define SEND_MESSAGE		0x15	/*
-					 * send capi-message int32 length
-					 * capi-data ...
-					 */
-#define SEND_DATA_B3_REQ	0x13	/*
-					 * send capi-data-message int32
-					 * MsgLength capi-data ... int32
-					 * B3Length data ....
-					 */
-
-#define SEND_CONFIG		0x21    /*
-					 */
-
-#define SEND_POLLACK		0x73    /* T1 Watchdog */
-
-/*
- * LLI Messages from the ISDN-ControllerISDN Controller
- */
-
-#define RECEIVE_POLL		0x32	/*
-					 * <- after SEND_POLL
-					 */
-#define RECEIVE_INIT		0x27	/*
-					 * <- after SEND_INIT int32 length
-					 * byte total length b1struct board
-					 * driver revision b1struct card
-					 * type b1struct reserved b1struct
-					 * serial number b1struct driver
-					 * capability b1struct d-channel
-					 * protocol b1struct CAPI-2.0
-					 * profile b1struct capi version
-					 */
-#define RECEIVE_MESSAGE		0x21	/*
-					 * <- after SEND_MESSAGE int32
-					 * AppllID int32 Length capi-data
-					 * ....
-					 */
-#define RECEIVE_DATA_B3_IND	0x22	/*
-					 * received data int32 AppllID
-					 * int32 Length capi-data ...
-					 * int32 B3Length data ...
-					 */
-#define RECEIVE_START		0x23	/*
-					 * Handshake
-					 */
-#define RECEIVE_STOP		0x24	/*
-					 * Handshake
-					 */
-#define RECEIVE_NEW_NCCI	0x25	/*
-					 * int32 AppllID int32 NCCI int32
-					 * WindowSize
-					 */
-#define RECEIVE_FREE_NCCI	0x26	/*
-					 * int32 AppllID int32 NCCI
-					 */
-#define RECEIVE_RELEASE		0x26	/*
-					 * int32 AppllID int32 0xffffffff
-					 */
-#define RECEIVE_TASK_READY	0x31	/*
-					 * int32 tasknr
-					 * int32 Length Taskname ...
-					 */
-#define RECEIVE_DEBUGMSG	0x71	/*
-					 * int32 Length message
-					 *
-					 */
-#define RECEIVE_POLLDWORD	0x75	/* t1pci in dword mode */
-
-#define WRITE_REGISTER		0x00
-#define READ_REGISTER		0x01
-
-/*
- * port offsets
- */
-
-#define B1_READ			0x00
-#define B1_WRITE		0x01
-#define B1_INSTAT		0x02
-#define B1_OUTSTAT		0x03
-#define B1_ANALYSE		0x04
-#define B1_REVISION		0x05
-#define B1_RESET		0x10
-
-
-#define B1_STAT0(cardtype)  ((cardtype) == avm_m1 ? 0x81200000l : 0x80A00000l)
-#define B1_STAT1(cardtype)  (0x80E00000l)
-
-/* ---------------------------------------------------------------- */
-
-static inline unsigned char b1outp(unsigned int base,
-				   unsigned short offset,
-				   unsigned char value)
-{
-	outb(value, base + offset);
-	return inb(base + B1_ANALYSE);
-}
-
-
-static inline int b1_rx_full(unsigned int base)
-{
-	return inb(base + B1_INSTAT) & 0x1;
-}
-
-static inline unsigned char b1_get_byte(unsigned int base)
-{
-	unsigned long stop = jiffies + 1 * HZ;	/* maximum wait time 1 sec */
-	while (!b1_rx_full(base) && time_before(jiffies, stop));
-	if (b1_rx_full(base))
-		return inb(base + B1_READ);
-	printk(KERN_CRIT "b1lli(0x%x): rx not full after 1 second\n", base);
-	return 0;
-}
-
-static inline unsigned int b1_get_word(unsigned int base)
-{
-	unsigned int val = 0;
-	val |= b1_get_byte(base);
-	val |= (b1_get_byte(base) << 8);
-	val |= (b1_get_byte(base) << 16);
-	val |= (b1_get_byte(base) << 24);
-	return val;
-}
-
-static inline int b1_tx_empty(unsigned int base)
-{
-	return inb(base + B1_OUTSTAT) & 0x1;
-}
-
-static inline void b1_put_byte(unsigned int base, unsigned char val)
-{
-	while (!b1_tx_empty(base));
-	b1outp(base, B1_WRITE, val);
-}
-
-static inline int b1_save_put_byte(unsigned int base, unsigned char val)
-{
-	unsigned long stop = jiffies + 2 * HZ;
-	while (!b1_tx_empty(base) && time_before(jiffies, stop));
-	if (!b1_tx_empty(base)) return -1;
-	b1outp(base, B1_WRITE, val);
-	return 0;
-}
-
-static inline void b1_put_word(unsigned int base, unsigned int val)
-{
-	b1_put_byte(base, val & 0xff);
-	b1_put_byte(base, (val >> 8) & 0xff);
-	b1_put_byte(base, (val >> 16) & 0xff);
-	b1_put_byte(base, (val >> 24) & 0xff);
-}
-
-static inline unsigned int b1_get_slice(unsigned int base,
-					unsigned char *dp)
-{
-	unsigned int len, i;
-
-	len = i = b1_get_word(base);
-	while (i-- > 0) *dp++ = b1_get_byte(base);
-	return len;
-}
-
-static inline void b1_put_slice(unsigned int base,
-				unsigned char *dp, unsigned int len)
-{
-	unsigned i = len;
-	b1_put_word(base, i);
-	while (i-- > 0)
-		b1_put_byte(base, *dp++);
-}
-
-static void b1_wr_reg(unsigned int base,
-		      unsigned int reg,
-		      unsigned int value)
-{
-	b1_put_byte(base, WRITE_REGISTER);
-	b1_put_word(base, reg);
-	b1_put_word(base, value);
-}
-
-static inline unsigned int b1_rd_reg(unsigned int base,
-				     unsigned int reg)
-{
-	b1_put_byte(base, READ_REGISTER);
-	b1_put_word(base, reg);
-	return b1_get_word(base);
-
-}
-
-static inline void b1_reset(unsigned int base)
-{
-	b1outp(base, B1_RESET, 0);
-	mdelay(55 * 2);	/* 2 TIC's */
-
-	b1outp(base, B1_RESET, 1);
-	mdelay(55 * 2);	/* 2 TIC's */
-
-	b1outp(base, B1_RESET, 0);
-	mdelay(55 * 2);	/* 2 TIC's */
-}
-
-static inline unsigned char b1_disable_irq(unsigned int base)
-{
-	return b1outp(base, B1_INSTAT, 0x00);
-}
-
-/* ---------------------------------------------------------------- */
-
-static inline void b1_set_test_bit(unsigned int base,
-				   enum avmcardtype cardtype,
-				   int onoff)
-{
-	b1_wr_reg(base, B1_STAT0(cardtype), onoff ? 0x21 : 0x20);
-}
-
-static inline int b1_get_test_bit(unsigned int base,
-				  enum avmcardtype cardtype)
-{
-	return (b1_rd_reg(base, B1_STAT0(cardtype)) & 0x01) != 0;
-}
-
-/* ---------------------------------------------------------------- */
-
-#define T1_FASTLINK		0x00
-#define T1_SLOWLINK		0x08
-
-#define T1_READ			B1_READ
-#define T1_WRITE		B1_WRITE
-#define T1_INSTAT		B1_INSTAT
-#define T1_OUTSTAT		B1_OUTSTAT
-#define T1_IRQENABLE		0x05
-#define T1_FIFOSTAT		0x06
-#define T1_RESETLINK		0x10
-#define T1_ANALYSE		0x11
-#define T1_IRQMASTER		0x12
-#define T1_IDENT		0x17
-#define T1_RESETBOARD		0x1f
-
-#define	T1F_IREADY		0x01
-#define	T1F_IHALF		0x02
-#define	T1F_IFULL		0x04
-#define	T1F_IEMPTY		0x08
-#define	T1F_IFLAGS		0xF0
-
-#define	T1F_OREADY		0x10
-#define	T1F_OHALF		0x20
-#define	T1F_OEMPTY		0x40
-#define	T1F_OFULL		0x80
-#define	T1F_OFLAGS		0xF0
-
-/* there are HEMA cards with 1k and 4k FIFO out */
-#define FIFO_OUTBSIZE		256
-#define FIFO_INPBSIZE		512
-
-#define HEMA_VERSION_ID		0
-#define HEMA_PAL_ID		0
-
-static inline void t1outp(unsigned int base,
-			  unsigned short offset,
-			  unsigned char value)
-{
-	outb(value, base + offset);
-}
-
-static inline unsigned char t1inp(unsigned int base,
-				  unsigned short offset)
-{
-	return inb(base + offset);
-}
-
-static inline int t1_isfastlink(unsigned int base)
-{
-	return (inb(base + T1_IDENT) & ~0x82) == 1;
-}
-
-static inline unsigned char t1_fifostatus(unsigned int base)
-{
-	return inb(base + T1_FIFOSTAT);
-}
-
-static inline unsigned int t1_get_slice(unsigned int base,
-					unsigned char *dp)
-{
-	unsigned int len, i;
-#ifdef FASTLINK_DEBUG
-	unsigned wcnt = 0, bcnt = 0;
-#endif
-
-	len = i = b1_get_word(base);
-	if (t1_isfastlink(base)) {
-		int status;
-		while (i > 0) {
-			status = t1_fifostatus(base) & (T1F_IREADY | T1F_IHALF);
-			if (i >= FIFO_INPBSIZE) status |= T1F_IFULL;
-
-			switch (status) {
-			case T1F_IREADY | T1F_IHALF | T1F_IFULL:
-				insb(base + B1_READ, dp, FIFO_INPBSIZE);
-				dp += FIFO_INPBSIZE;
-				i -= FIFO_INPBSIZE;
-#ifdef FASTLINK_DEBUG
-				wcnt += FIFO_INPBSIZE;
-#endif
-				break;
-			case T1F_IREADY | T1F_IHALF:
-				insb(base + B1_READ, dp, i);
-#ifdef FASTLINK_DEBUG
-				wcnt += i;
-#endif
-				dp += i;
-				i = 0;
-				break;
-			default:
-				*dp++ = b1_get_byte(base);
-				i--;
-#ifdef FASTLINK_DEBUG
-				bcnt++;
-#endif
-				break;
-			}
-		}
-#ifdef FASTLINK_DEBUG
-		if (wcnt)
-			printk(KERN_DEBUG "b1lli(0x%x): get_slice l=%d w=%d b=%d\n",
-			       base, len, wcnt, bcnt);
-#endif
-	} else {
-		while (i-- > 0)
-			*dp++ = b1_get_byte(base);
-	}
-	return len;
-}
-
-static inline void t1_put_slice(unsigned int base,
-				unsigned char *dp, unsigned int len)
-{
-	unsigned i = len;
-	b1_put_word(base, i);
-	if (t1_isfastlink(base)) {
-		int status;
-		while (i > 0) {
-			status = t1_fifostatus(base) & (T1F_OREADY | T1F_OHALF);
-			if (i >= FIFO_OUTBSIZE) status |= T1F_OEMPTY;
-			switch (status) {
-			case T1F_OREADY | T1F_OHALF | T1F_OEMPTY:
-				outsb(base + B1_WRITE, dp, FIFO_OUTBSIZE);
-				dp += FIFO_OUTBSIZE;
-				i -= FIFO_OUTBSIZE;
-				break;
-			case T1F_OREADY | T1F_OHALF:
-				outsb(base + B1_WRITE, dp, i);
-				dp += i;
-				i = 0;
-				break;
-			default:
-				b1_put_byte(base, *dp++);
-				i--;
-				break;
-			}
-		}
-	} else {
-		while (i-- > 0)
-			b1_put_byte(base, *dp++);
-	}
-}
-
-static inline void t1_disable_irq(unsigned int base)
-{
-	t1outp(base, T1_IRQMASTER, 0x00);
-}
-
-static inline void t1_reset(unsigned int base)
-{
-	/* reset T1 Controller */
-	b1_reset(base);
-	/* disable irq on HEMA */
-	t1outp(base, B1_INSTAT, 0x00);
-	t1outp(base, B1_OUTSTAT, 0x00);
-	t1outp(base, T1_IRQMASTER, 0x00);
-	/* reset HEMA board configuration */
-	t1outp(base, T1_RESETBOARD, 0xf);
-}
-
-static inline void b1_setinterrupt(unsigned int base, unsigned irq,
-				   enum avmcardtype cardtype)
-{
-	switch (cardtype) {
-	case avm_t1isa:
-		t1outp(base, B1_INSTAT, 0x00);
-		t1outp(base, B1_INSTAT, 0x02);
-		t1outp(base, T1_IRQMASTER, 0x08);
-		break;
-	case avm_b1isa:
-		b1outp(base, B1_INSTAT, 0x00);
-		b1outp(base, B1_RESET, b1_irq_table[irq]);
-		b1outp(base, B1_INSTAT, 0x02);
-		break;
-	default:
-	case avm_m1:
-	case avm_m2:
-	case avm_b1pci:
-		b1outp(base, B1_INSTAT, 0x00);
-		b1outp(base, B1_RESET, 0xf0);
-		b1outp(base, B1_INSTAT, 0x02);
-		break;
-	case avm_c4:
-	case avm_t1pci:
-		b1outp(base, B1_RESET, 0xf0);
-		break;
-	}
-}
-
-/* b1.c */
-avmcard *b1_alloc_card(int nr_controllers);
-void b1_free_card(avmcard *card);
-int b1_detect(unsigned int base, enum avmcardtype cardtype);
-void b1_getrevision(avmcard *card);
-int b1_load_t4file(avmcard *card, capiloaddatapart *t4file);
-int b1_load_config(avmcard *card, capiloaddatapart *config);
-int b1_loaded(avmcard *card);
-
-int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data);
-void b1_reset_ctr(struct capi_ctr *ctrl);
-void b1_register_appl(struct capi_ctr *ctrl, u16 appl,
-		      capi_register_params *rp);
-void b1_release_appl(struct capi_ctr *ctrl, u16 appl);
-u16  b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-void b1_parse_version(avmctrl_info *card);
-irqreturn_t b1_interrupt(int interrupt, void *devptr);
-
-int b1_proc_show(struct seq_file *m, void *v);
-
-avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *,
-				   long rsize, long ssize);
-void avmcard_dma_free(avmcard_dmainfo *);
-
-/* b1dma.c */
-int b1pciv4_detect(avmcard *card);
-int t1pci_detect(avmcard *card);
-void b1dma_reset(avmcard *card);
-irqreturn_t b1dma_interrupt(int interrupt, void *devptr);
-
-int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data);
-void b1dma_reset_ctr(struct capi_ctr *ctrl);
-void b1dma_remove_ctr(struct capi_ctr *ctrl);
-void b1dma_register_appl(struct capi_ctr *ctrl,
-			 u16 appl,
-			 capi_register_params *rp);
-void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl);
-u16  b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-int b1dma_proc_show(struct seq_file *m, void *v);
-
-#endif /* _AVMCARD_H_ */
diff --git a/drivers/staging/isdn/avm/b1.c b/drivers/staging/isdn/avm/b1.c
deleted file mode 100644
index 32ec8cf..0000000
--- a/drivers/staging/isdn/avm/b1.c
+++ /dev/null
@@ -1,819 +0,0 @@
-/* $Id: b1.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Common module for AVM B1 cards.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Common support for active AVM cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-int b1_irq_table[16] =
-{0,
- 0,
- 0,
- 192,				/* irq 3 */
- 32,				/* irq 4 */
- 160,				/* irq 5 */
- 96,				/* irq 6 */
- 224,				/* irq 7 */
- 0,
- 64,				/* irq 9 */
- 80,				/* irq 10 */
- 208,				/* irq 11 */
- 48,				/* irq 12 */
- 0,
- 0,
- 112,				/* irq 15 */
-};
-
-/* ------------------------------------------------------------- */
-
-avmcard *b1_alloc_card(int nr_controllers)
-{
-	avmcard *card;
-	avmctrl_info *cinfo;
-	int i;
-
-	card = kzalloc(sizeof(*card), GFP_KERNEL);
-	if (!card)
-		return NULL;
-
-	cinfo = kcalloc(nr_controllers, sizeof(*cinfo), GFP_KERNEL);
-	if (!cinfo) {
-		kfree(card);
-		return NULL;
-	}
-
-	card->ctrlinfo = cinfo;
-	for (i = 0; i < nr_controllers; i++) {
-		INIT_LIST_HEAD(&cinfo[i].ncci_head);
-		cinfo[i].card = card;
-	}
-	spin_lock_init(&card->lock);
-	card->nr_controllers = nr_controllers;
-
-	return card;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1_free_card(avmcard *card)
-{
-	kfree(card->ctrlinfo);
-	kfree(card);
-}
-
-/* ------------------------------------------------------------- */
-
-int b1_detect(unsigned int base, enum avmcardtype cardtype)
-{
-	int onoff, i;
-
-	/*
-	 * Statusregister 0000 00xx
-	 */
-	if ((inb(base + B1_INSTAT) & 0xfc)
-	    || (inb(base + B1_OUTSTAT) & 0xfc))
-		return 1;
-	/*
-	 * Statusregister 0000 001x
-	 */
-	b1outp(base, B1_INSTAT, 0x2);	/* enable irq */
-	/* b1outp(base, B1_OUTSTAT, 0x2); */
-	if ((inb(base + B1_INSTAT) & 0xfe) != 0x2
-	    /* || (inb(base + B1_OUTSTAT) & 0xfe) != 0x2 */)
-		return 2;
-	/*
-	 * Statusregister 0000 000x
-	 */
-	b1outp(base, B1_INSTAT, 0x0);	/* disable irq */
-	b1outp(base, B1_OUTSTAT, 0x0);
-	if ((inb(base + B1_INSTAT) & 0xfe)
-	    || (inb(base + B1_OUTSTAT) & 0xfe))
-		return 3;
-
-	for (onoff = !0, i = 0; i < 10; i++) {
-		b1_set_test_bit(base, cardtype, onoff);
-		if (b1_get_test_bit(base, cardtype) != onoff)
-			return 4;
-		onoff = !onoff;
-	}
-
-	if (cardtype == avm_m1)
-		return 0;
-
-	if ((b1_rd_reg(base, B1_STAT1(cardtype)) & 0x0f) != 0x01)
-		return 5;
-
-	return 0;
-}
-
-void b1_getrevision(avmcard *card)
-{
-	card->class = inb(card->port + B1_ANALYSE);
-	card->revision = inb(card->port + B1_REVISION);
-}
-
-#define FWBUF_SIZE	256
-int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
-{
-	unsigned char buf[FWBUF_SIZE];
-	unsigned char *dp;
-	int i, left;
-	unsigned int base = card->port;
-
-	dp = t4file->data;
-	left = t4file->len;
-	while (left > FWBUF_SIZE) {
-		if (t4file->user) {
-			if (copy_from_user(buf, dp, FWBUF_SIZE))
-				return -EFAULT;
-		} else {
-			memcpy(buf, dp, FWBUF_SIZE);
-		}
-		for (i = 0; i < FWBUF_SIZE; i++)
-			if (b1_save_put_byte(base, buf[i]) < 0) {
-				printk(KERN_ERR "%s: corrupted firmware file ?\n",
-				       card->name);
-				return -EIO;
-			}
-		left -= FWBUF_SIZE;
-		dp += FWBUF_SIZE;
-	}
-	if (left) {
-		if (t4file->user) {
-			if (copy_from_user(buf, dp, left))
-				return -EFAULT;
-		} else {
-			memcpy(buf, dp, left);
-		}
-		for (i = 0; i < left; i++)
-			if (b1_save_put_byte(base, buf[i]) < 0) {
-				printk(KERN_ERR "%s: corrupted firmware file ?\n",
-				       card->name);
-				return -EIO;
-			}
-	}
-	return 0;
-}
-
-int b1_load_config(avmcard *card, capiloaddatapart *config)
-{
-	unsigned char buf[FWBUF_SIZE];
-	unsigned char *dp;
-	unsigned int base = card->port;
-	int i, j, left;
-
-	dp = config->data;
-	left = config->len;
-	if (left) {
-		b1_put_byte(base, SEND_CONFIG);
-		b1_put_word(base, 1);
-		b1_put_byte(base, SEND_CONFIG);
-		b1_put_word(base, left);
-	}
-	while (left > FWBUF_SIZE) {
-		if (config->user) {
-			if (copy_from_user(buf, dp, FWBUF_SIZE))
-				return -EFAULT;
-		} else {
-			memcpy(buf, dp, FWBUF_SIZE);
-		}
-		for (i = 0; i < FWBUF_SIZE; ) {
-			b1_put_byte(base, SEND_CONFIG);
-			for (j = 0; j < 4; j++) {
-				b1_put_byte(base, buf[i++]);
-			}
-		}
-		left -= FWBUF_SIZE;
-		dp += FWBUF_SIZE;
-	}
-	if (left) {
-		if (config->user) {
-			if (copy_from_user(buf, dp, left))
-				return -EFAULT;
-		} else {
-			memcpy(buf, dp, left);
-		}
-		for (i = 0; i < left; ) {
-			b1_put_byte(base, SEND_CONFIG);
-			for (j = 0; j < 4; j++) {
-				if (i < left)
-					b1_put_byte(base, buf[i++]);
-				else
-					b1_put_byte(base, 0);
-			}
-		}
-	}
-	return 0;
-}
-
-int b1_loaded(avmcard *card)
-{
-	unsigned int base = card->port;
-	unsigned long stop;
-	unsigned char ans;
-	unsigned long tout = 2;
-
-	for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
-		if (b1_tx_empty(base))
-			break;
-	}
-	if (!b1_tx_empty(base)) {
-		printk(KERN_ERR "%s: b1_loaded: tx err, corrupted t4 file ?\n",
-		       card->name);
-		return 0;
-	}
-	b1_put_byte(base, SEND_POLL);
-	for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
-		if (b1_rx_full(base)) {
-			ans = b1_get_byte(base);
-			if (ans == RECEIVE_POLL)
-				return 1;
-
-			printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n",
-			       card->name, ans);
-			return 0;
-		}
-	}
-	printk(KERN_ERR "%s: b1_loaded: firmware not running\n", card->name);
-	return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-	unsigned long flags;
-	int retval;
-
-	b1_reset(port);
-	retval = b1_load_t4file(card, &data->firmware);
-
-	if (retval) {
-		b1_reset(port);
-		printk(KERN_ERR "%s: failed to load t4file!!\n",
-		       card->name);
-		return retval;
-	}
-
-	b1_disable_irq(port);
-
-	if (data->configuration.len > 0 && data->configuration.data) {
-		retval = b1_load_config(card, &data->configuration);
-		if (retval) {
-			b1_reset(port);
-			printk(KERN_ERR "%s: failed to load config!!\n",
-			       card->name);
-			return retval;
-		}
-	}
-
-	if (!b1_loaded(card)) {
-		printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
-		return -EIO;
-	}
-
-	spin_lock_irqsave(&card->lock, flags);
-	b1_setinterrupt(port, card->irq, card->cardtype);
-	b1_put_byte(port, SEND_INIT);
-	b1_put_word(port, CAPI_MAXAPPL);
-	b1_put_word(port, AVM_NCCI_PER_CHANNEL * 2);
-	b1_put_word(port, ctrl->cnr - 1);
-	spin_unlock_irqrestore(&card->lock, flags);
-
-	return 0;
-}
-
-void b1_reset_ctr(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-	unsigned long flags;
-
-	b1_reset(port);
-	b1_reset(port);
-
-	memset(cinfo->version, 0, sizeof(cinfo->version));
-	spin_lock_irqsave(&card->lock, flags);
-	capilib_release(&cinfo->ncci_head);
-	spin_unlock_irqrestore(&card->lock, flags);
-	capi_ctr_down(ctrl);
-}
-
-void b1_register_appl(struct capi_ctr *ctrl,
-		      u16 appl,
-		      capi_register_params *rp)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-	unsigned long flags;
-	int nconn, want = rp->level3cnt;
-
-	if (want > 0) nconn = want;
-	else nconn = ctrl->profile.nbchannel * -want;
-	if (nconn == 0) nconn = ctrl->profile.nbchannel;
-
-	spin_lock_irqsave(&card->lock, flags);
-	b1_put_byte(port, SEND_REGISTER);
-	b1_put_word(port, appl);
-	b1_put_word(port, 1024 * (nconn + 1));
-	b1_put_word(port, nconn);
-	b1_put_word(port, rp->datablkcnt);
-	b1_put_word(port, rp->datablklen);
-	spin_unlock_irqrestore(&card->lock, flags);
-}
-
-void b1_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-	unsigned long flags;
-
-	spin_lock_irqsave(&card->lock, flags);
-	capilib_release_appl(&cinfo->ncci_head, appl);
-	b1_put_byte(port, SEND_RELEASE);
-	b1_put_word(port, appl);
-	spin_unlock_irqrestore(&card->lock, flags);
-}
-
-u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-	unsigned long flags;
-	u16 len = CAPIMSG_LEN(skb->data);
-	u8 cmd = CAPIMSG_COMMAND(skb->data);
-	u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
-	u16 dlen, retval;
-
-	spin_lock_irqsave(&card->lock, flags);
-	if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
-		retval = capilib_data_b3_req(&cinfo->ncci_head,
-					     CAPIMSG_APPID(skb->data),
-					     CAPIMSG_NCCI(skb->data),
-					     CAPIMSG_MSGID(skb->data));
-		if (retval != CAPI_NOERROR) {
-			spin_unlock_irqrestore(&card->lock, flags);
-			return retval;
-		}
-
-		dlen = CAPIMSG_DATALEN(skb->data);
-
-		b1_put_byte(port, SEND_DATA_B3_REQ);
-		b1_put_slice(port, skb->data, len);
-		b1_put_slice(port, skb->data + len, dlen);
-	} else {
-		b1_put_byte(port, SEND_MESSAGE);
-		b1_put_slice(port, skb->data, len);
-	}
-	spin_unlock_irqrestore(&card->lock, flags);
-
-	dev_kfree_skb_any(skb);
-	return CAPI_NOERROR;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1_parse_version(avmctrl_info *cinfo)
-{
-	struct capi_ctr *ctrl = &cinfo->capi_ctrl;
-	avmcard *card = cinfo->card;
-	capi_profile *profp;
-	u8 *dversion;
-	u8 flag;
-	int i, j;
-
-	for (j = 0; j < AVM_MAXVERSION; j++)
-		cinfo->version[j] = "";
-	for (i = 0, j = 0;
-	     j < AVM_MAXVERSION && i < cinfo->versionlen;
-	     j++, i += cinfo->versionbuf[i] + 1)
-		cinfo->version[j] = &cinfo->versionbuf[i + 1];
-
-	strlcpy(ctrl->serial, cinfo->version[VER_SERIAL], sizeof(ctrl->serial));
-	memcpy(&ctrl->profile, cinfo->version[VER_PROFILE], sizeof(capi_profile));
-	strlcpy(ctrl->manu, "AVM GmbH", sizeof(ctrl->manu));
-	dversion = cinfo->version[VER_DRIVER];
-	ctrl->version.majorversion = 2;
-	ctrl->version.minorversion = 0;
-	ctrl->version.majormanuversion = (((dversion[0] - '0') & 0xf) << 4);
-	ctrl->version.majormanuversion |= ((dversion[2] - '0') & 0xf);
-	ctrl->version.minormanuversion = (dversion[3] - '0') << 4;
-	ctrl->version.minormanuversion |=
-		(dversion[5] - '0') * 10 + ((dversion[6] - '0') & 0xf);
-
-	profp = &ctrl->profile;
-
-	flag = ((u8 *)(profp->manu))[1];
-	switch (flag) {
-	case 0: if (cinfo->version[VER_CARDTYPE])
-			strcpy(cinfo->cardname, cinfo->version[VER_CARDTYPE]);
-		else strcpy(cinfo->cardname, "B1");
-		break;
-	case 3: strcpy(cinfo->cardname, "PCMCIA B"); break;
-	case 4: strcpy(cinfo->cardname, "PCMCIA M1"); break;
-	case 5: strcpy(cinfo->cardname, "PCMCIA M2"); break;
-	case 6: strcpy(cinfo->cardname, "B1 V3.0"); break;
-	case 7: strcpy(cinfo->cardname, "B1 PCI"); break;
-	default: sprintf(cinfo->cardname, "AVM?%u", (unsigned int)flag); break;
-	}
-	printk(KERN_NOTICE "%s: card %d \"%s\" ready.\n",
-	       card->name, ctrl->cnr, cinfo->cardname);
-
-	flag = ((u8 *)(profp->manu))[3];
-	if (flag)
-		printk(KERN_NOTICE "%s: card %d Protocol:%s%s%s%s%s%s%s\n",
-		       card->name,
-		       ctrl->cnr,
-		       (flag & 0x01) ? " DSS1" : "",
-		       (flag & 0x02) ? " CT1" : "",
-		       (flag & 0x04) ? " VN3" : "",
-		       (flag & 0x08) ? " NI1" : "",
-		       (flag & 0x10) ? " AUSTEL" : "",
-		       (flag & 0x20) ? " ESS" : "",
-		       (flag & 0x40) ? " 1TR6" : ""
-			);
-
-	flag = ((u8 *)(profp->manu))[5];
-	if (flag)
-		printk(KERN_NOTICE "%s: card %d Linetype:%s%s%s%s\n",
-		       card->name,
-		       ctrl->cnr,
-		       (flag & 0x01) ? " point to point" : "",
-		       (flag & 0x02) ? " point to multipoint" : "",
-		       (flag & 0x08) ? " leased line without D-channel" : "",
-		       (flag & 0x04) ? " leased line with D-channel" : ""
-			);
-}
-
-/* ------------------------------------------------------------- */
-
-irqreturn_t b1_interrupt(int interrupt, void *devptr)
-{
-	avmcard *card = devptr;
-	avmctrl_info *cinfo = &card->ctrlinfo[0];
-	struct capi_ctr *ctrl = &cinfo->capi_ctrl;
-	unsigned char b1cmd;
-	struct sk_buff *skb;
-
-	unsigned ApplId;
-	unsigned MsgLen;
-	unsigned DataB3Len;
-	unsigned NCCI;
-	unsigned WindowSize;
-	unsigned long flags;
-
-	spin_lock_irqsave(&card->lock, flags);
-
-	if (!b1_rx_full(card->port)) {
-		spin_unlock_irqrestore(&card->lock, flags);
-		return IRQ_NONE;
-	}
-
-	b1cmd = b1_get_byte(card->port);
-
-	switch (b1cmd) {
-
-	case RECEIVE_DATA_B3_IND:
-
-		ApplId = (unsigned) b1_get_word(card->port);
-		MsgLen = b1_get_slice(card->port, card->msgbuf);
-		DataB3Len = b1_get_slice(card->port, card->databuf);
-		spin_unlock_irqrestore(&card->lock, flags);
-
-		if (MsgLen < 30) { /* not CAPI 64Bit */
-			memset(card->msgbuf + MsgLen, 0, 30-MsgLen);
-			MsgLen = 30;
-			CAPIMSG_SETLEN(card->msgbuf, 30);
-		}
-
-		skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC);
-		if (!skb) {
-			printk(KERN_ERR "%s: incoming packet dropped\n",
-			       card->name);
-		} else {
-			skb_put_data(skb, card->msgbuf, MsgLen);
-			skb_put_data(skb, card->databuf, DataB3Len);
-			capi_ctr_handle_message(ctrl, ApplId, skb);
-		}
-		break;
-
-	case RECEIVE_MESSAGE:
-
-		ApplId = (unsigned) b1_get_word(card->port);
-		MsgLen = b1_get_slice(card->port, card->msgbuf);
-		skb = alloc_skb(MsgLen, GFP_ATOMIC);
-
-		if (!skb) {
-			printk(KERN_ERR "%s: incoming packet dropped\n",
-			       card->name);
-			spin_unlock_irqrestore(&card->lock, flags);
-		} else {
-			skb_put_data(skb, card->msgbuf, MsgLen);
-			if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
-				capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
-						     CAPIMSG_NCCI(skb->data),
-						     CAPIMSG_MSGID(skb->data));
-			spin_unlock_irqrestore(&card->lock, flags);
-			capi_ctr_handle_message(ctrl, ApplId, skb);
-		}
-		break;
-
-	case RECEIVE_NEW_NCCI:
-
-		ApplId = b1_get_word(card->port);
-		NCCI = b1_get_word(card->port);
-		WindowSize = b1_get_word(card->port);
-		capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
-		spin_unlock_irqrestore(&card->lock, flags);
-		break;
-
-	case RECEIVE_FREE_NCCI:
-
-		ApplId = b1_get_word(card->port);
-		NCCI = b1_get_word(card->port);
-		if (NCCI != 0xffffffff)
-			capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
-		spin_unlock_irqrestore(&card->lock, flags);
-		break;
-
-	case RECEIVE_START:
-		/* b1_put_byte(card->port, SEND_POLLACK); */
-		spin_unlock_irqrestore(&card->lock, flags);
-		capi_ctr_resume_output(ctrl);
-		break;
-
-	case RECEIVE_STOP:
-		spin_unlock_irqrestore(&card->lock, flags);
-		capi_ctr_suspend_output(ctrl);
-		break;
-
-	case RECEIVE_INIT:
-
-		cinfo->versionlen = b1_get_slice(card->port, cinfo->versionbuf);
-		spin_unlock_irqrestore(&card->lock, flags);
-		b1_parse_version(cinfo);
-		printk(KERN_INFO "%s: %s-card (%s) now active\n",
-		       card->name,
-		       cinfo->version[VER_CARDTYPE],
-		       cinfo->version[VER_DRIVER]);
-		capi_ctr_ready(ctrl);
-		break;
-
-	case RECEIVE_TASK_READY:
-		ApplId = (unsigned) b1_get_word(card->port);
-		MsgLen = b1_get_slice(card->port, card->msgbuf);
-		spin_unlock_irqrestore(&card->lock, flags);
-		card->msgbuf[MsgLen] = 0;
-		while (MsgLen > 0
-		       && (card->msgbuf[MsgLen - 1] == '\n'
-			   || card->msgbuf[MsgLen - 1] == '\r')) {
-			card->msgbuf[MsgLen - 1] = 0;
-			MsgLen--;
-		}
-		printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
-		       card->name, ApplId, card->msgbuf);
-		break;
-
-	case RECEIVE_DEBUGMSG:
-		MsgLen = b1_get_slice(card->port, card->msgbuf);
-		spin_unlock_irqrestore(&card->lock, flags);
-		card->msgbuf[MsgLen] = 0;
-		while (MsgLen > 0
-		       && (card->msgbuf[MsgLen - 1] == '\n'
-			   || card->msgbuf[MsgLen - 1] == '\r')) {
-			card->msgbuf[MsgLen - 1] = 0;
-			MsgLen--;
-		}
-		printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
-		break;
-
-	case 0xff:
-		spin_unlock_irqrestore(&card->lock, flags);
-		printk(KERN_ERR "%s: card removed ?\n", card->name);
-		return IRQ_NONE;
-	default:
-		spin_unlock_irqrestore(&card->lock, flags);
-		printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n",
-		       card->name, b1cmd);
-		return IRQ_HANDLED;
-	}
-	return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-int b1_proc_show(struct seq_file *m, void *v)
-{
-	struct capi_ctr *ctrl = m->private;
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	u8 flag;
-	char *s;
-
-	seq_printf(m, "%-16s %s\n", "name", card->name);
-	seq_printf(m, "%-16s 0x%x\n", "io", card->port);
-	seq_printf(m, "%-16s %d\n", "irq", card->irq);
-	switch (card->cardtype) {
-	case avm_b1isa: s = "B1 ISA"; break;
-	case avm_b1pci: s = "B1 PCI"; break;
-	case avm_b1pcmcia: s = "B1 PCMCIA"; break;
-	case avm_m1: s = "M1"; break;
-	case avm_m2: s = "M2"; break;
-	case avm_t1isa: s = "T1 ISA (HEMA)"; break;
-	case avm_t1pci: s = "T1 PCI"; break;
-	case avm_c4: s = "C4"; break;
-	case avm_c2: s = "C2"; break;
-	default: s = "???"; break;
-	}
-	seq_printf(m, "%-16s %s\n", "type", s);
-	if (card->cardtype == avm_t1isa)
-		seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
-
-	s = cinfo->version[VER_DRIVER];
-	if (s)
-		seq_printf(m, "%-16s %s\n", "ver_driver", s);
-
-	s = cinfo->version[VER_CARDTYPE];
-	if (s)
-		seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
-
-	s = cinfo->version[VER_SERIAL];
-	if (s)
-		seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
-	if (card->cardtype != avm_m1) {
-		flag = ((u8 *)(ctrl->profile.manu))[3];
-		if (flag)
-			seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
-				   "protocol",
-				   (flag & 0x01) ? " DSS1" : "",
-				   (flag & 0x02) ? " CT1" : "",
-				   (flag & 0x04) ? " VN3" : "",
-				   (flag & 0x08) ? " NI1" : "",
-				   (flag & 0x10) ? " AUSTEL" : "",
-				   (flag & 0x20) ? " ESS" : "",
-				   (flag & 0x40) ? " 1TR6" : ""
-				);
-	}
-	if (card->cardtype != avm_m1) {
-		flag = ((u8 *)(ctrl->profile.manu))[5];
-		if (flag)
-			seq_printf(m, "%-16s%s%s%s%s\n",
-				   "linetype",
-				   (flag & 0x01) ? " point to point" : "",
-				   (flag & 0x02) ? " point to multipoint" : "",
-				   (flag & 0x08) ? " leased line without D-channel" : "",
-				   (flag & 0x04) ? " leased line with D-channel" : ""
-				);
-	}
-	seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
-	return 0;
-}
-EXPORT_SYMBOL(b1_proc_show);
-
-/* ------------------------------------------------------------- */
-
-#ifdef CONFIG_PCI
-
-avmcard_dmainfo *
-avmcard_dma_alloc(char *name, struct pci_dev *pdev, long rsize, long ssize)
-{
-	avmcard_dmainfo *p;
-	void *buf;
-
-	p = kzalloc(sizeof(avmcard_dmainfo), GFP_KERNEL);
-	if (!p) {
-		printk(KERN_WARNING "%s: no memory.\n", name);
-		goto err;
-	}
-
-	p->recvbuf.size = rsize;
-	buf = pci_alloc_consistent(pdev, rsize, &p->recvbuf.dmaaddr);
-	if (!buf) {
-		printk(KERN_WARNING "%s: allocation of receive dma buffer failed.\n", name);
-		goto err_kfree;
-	}
-	p->recvbuf.dmabuf = buf;
-
-	p->sendbuf.size = ssize;
-	buf = pci_alloc_consistent(pdev, ssize, &p->sendbuf.dmaaddr);
-	if (!buf) {
-		printk(KERN_WARNING "%s: allocation of send dma buffer failed.\n", name);
-		goto err_free_consistent;
-	}
-
-	p->sendbuf.dmabuf = buf;
-	skb_queue_head_init(&p->send_queue);
-
-	return p;
-
-err_free_consistent:
-	pci_free_consistent(p->pcidev, p->recvbuf.size,
-			    p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
-err_kfree:
-	kfree(p);
-err:
-	return NULL;
-}
-
-void avmcard_dma_free(avmcard_dmainfo *p)
-{
-	pci_free_consistent(p->pcidev, p->recvbuf.size,
-			    p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
-	pci_free_consistent(p->pcidev, p->sendbuf.size,
-			    p->sendbuf.dmabuf, p->sendbuf.dmaaddr);
-	skb_queue_purge(&p->send_queue);
-	kfree(p);
-}
-
-EXPORT_SYMBOL(avmcard_dma_alloc);
-EXPORT_SYMBOL(avmcard_dma_free);
-
-#endif
-
-EXPORT_SYMBOL(b1_irq_table);
-
-EXPORT_SYMBOL(b1_alloc_card);
-EXPORT_SYMBOL(b1_free_card);
-EXPORT_SYMBOL(b1_detect);
-EXPORT_SYMBOL(b1_getrevision);
-EXPORT_SYMBOL(b1_load_t4file);
-EXPORT_SYMBOL(b1_load_config);
-EXPORT_SYMBOL(b1_loaded);
-EXPORT_SYMBOL(b1_load_firmware);
-EXPORT_SYMBOL(b1_reset_ctr);
-EXPORT_SYMBOL(b1_register_appl);
-EXPORT_SYMBOL(b1_release_appl);
-EXPORT_SYMBOL(b1_send_message);
-
-EXPORT_SYMBOL(b1_parse_version);
-EXPORT_SYMBOL(b1_interrupt);
-
-static int __init b1_init(void)
-{
-	char *p;
-	char rev[32];
-
-	p = strchr(revision, ':');
-	if (p && p[1]) {
-		strlcpy(rev, p + 2, 32);
-		p = strchr(rev, '$');
-		if (p && p > rev)
-			*(p - 1) = 0;
-	} else {
-		strcpy(rev, "1.0");
-	}
-	printk(KERN_INFO "b1: revision %s\n", rev);
-
-	return 0;
-}
-
-static void __exit b1_exit(void)
-{
-}
-
-module_init(b1_init);
-module_exit(b1_exit);
diff --git a/drivers/staging/isdn/avm/b1dma.c b/drivers/staging/isdn/avm/b1dma.c
deleted file mode 100644
index 6a3dc99..0000000
--- a/drivers/staging/isdn/avm/b1dma.c
+++ /dev/null
@@ -1,981 +0,0 @@
-/* $Id: b1dma.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Common module for AVM B1 cards that support dma with AMCC
- *
- * Copyright 2000 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-#undef AVM_B1DMA_DEBUG
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: DMA support for active AVM cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-static bool suppress_pollack = 0;
-module_param(suppress_pollack, bool, 0);
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_dispatch_tx(avmcard *card);
-
-/* ------------------------------------------------------------- */
-
-/* S5933 */
-
-#define	AMCC_RXPTR	0x24
-#define	AMCC_RXLEN	0x28
-#define	AMCC_TXPTR	0x2c
-#define	AMCC_TXLEN	0x30
-
-#define	AMCC_INTCSR	0x38
-#	define EN_READ_TC_INT		0x00008000L
-#	define EN_WRITE_TC_INT		0x00004000L
-#	define EN_TX_TC_INT		EN_READ_TC_INT
-#	define EN_RX_TC_INT		EN_WRITE_TC_INT
-#	define AVM_FLAG			0x30000000L
-
-#	define ANY_S5933_INT		0x00800000L
-#	define READ_TC_INT		0x00080000L
-#	define WRITE_TC_INT		0x00040000L
-#	define	TX_TC_INT		READ_TC_INT
-#	define	RX_TC_INT		WRITE_TC_INT
-#	define MASTER_ABORT_INT		0x00100000L
-#	define TARGET_ABORT_INT		0x00200000L
-#	define BUS_MASTER_INT		0x00200000L
-#	define ALL_INT			0x000C0000L
-
-#define	AMCC_MCSR	0x3c
-#	define A2P_HI_PRIORITY		0x00000100L
-#	define EN_A2P_TRANSFERS		0x00000400L
-#	define P2A_HI_PRIORITY		0x00001000L
-#	define EN_P2A_TRANSFERS		0x00004000L
-#	define RESET_A2P_FLAGS		0x04000000L
-#	define RESET_P2A_FLAGS		0x02000000L
-
-/* ------------------------------------------------------------- */
-
-static inline void b1dma_writel(avmcard *card, u32 value, int off)
-{
-	writel(value, card->mbase + off);
-}
-
-static inline u32 b1dma_readl(avmcard *card, int off)
-{
-	return readl(card->mbase + off);
-}
-
-/* ------------------------------------------------------------- */
-
-static inline int b1dma_tx_empty(unsigned int port)
-{
-	return inb(port + 0x03) & 0x1;
-}
-
-static inline int b1dma_rx_full(unsigned int port)
-{
-	return inb(port + 0x02) & 0x1;
-}
-
-static int b1dma_tolink(avmcard *card, void *buf, unsigned int len)
-{
-	unsigned long stop = jiffies + 1 * HZ;	/* maximum wait time 1 sec */
-	unsigned char *s = (unsigned char *)buf;
-	while (len--) {
-		while (!b1dma_tx_empty(card->port)
-		       && time_before(jiffies, stop));
-		if (!b1dma_tx_empty(card->port))
-			return -1;
-		t1outp(card->port, 0x01, *s++);
-	}
-	return 0;
-}
-
-static int b1dma_fromlink(avmcard *card, void *buf, unsigned int len)
-{
-	unsigned long stop = jiffies + 1 * HZ;	/* maximum wait time 1 sec */
-	unsigned char *s = (unsigned char *)buf;
-	while (len--) {
-		while (!b1dma_rx_full(card->port)
-		       && time_before(jiffies, stop));
-		if (!b1dma_rx_full(card->port))
-			return -1;
-		*s++ = t1inp(card->port, 0x00);
-	}
-	return 0;
-}
-
-static int WriteReg(avmcard *card, u32 reg, u8 val)
-{
-	u8 cmd = 0x00;
-	if (b1dma_tolink(card, &cmd, 1) == 0
-	    && b1dma_tolink(card, &reg, 4) == 0) {
-		u32 tmp = val;
-		return b1dma_tolink(card, &tmp, 4);
-	}
-	return -1;
-}
-
-static u8 ReadReg(avmcard *card, u32 reg)
-{
-	u8 cmd = 0x01;
-	if (b1dma_tolink(card, &cmd, 1) == 0
-	    && b1dma_tolink(card, &reg, 4) == 0) {
-		u32 tmp;
-		if (b1dma_fromlink(card, &tmp, 4) == 0)
-			return (u8)tmp;
-	}
-	return 0xff;
-}
-
-/* ------------------------------------------------------------- */
-
-static inline void _put_byte(void **pp, u8 val)
-{
-	u8 *s = *pp;
-	*s++ = val;
-	*pp = s;
-}
-
-static inline void _put_word(void **pp, u32 val)
-{
-	u8 *s = *pp;
-	*s++ = val & 0xff;
-	*s++ = (val >> 8) & 0xff;
-	*s++ = (val >> 16) & 0xff;
-	*s++ = (val >> 24) & 0xff;
-	*pp = s;
-}
-
-static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len)
-{
-	unsigned i = len;
-	_put_word(pp, i);
-	while (i-- > 0)
-		_put_byte(pp, *dp++);
-}
-
-static inline u8 _get_byte(void **pp)
-{
-	u8 *s = *pp;
-	u8 val;
-	val = *s++;
-	*pp = s;
-	return val;
-}
-
-static inline u32 _get_word(void **pp)
-{
-	u8 *s = *pp;
-	u32 val;
-	val = *s++;
-	val |= (*s++ << 8);
-	val |= (*s++ << 16);
-	val |= (*s++ << 24);
-	*pp = s;
-	return val;
-}
-
-static inline u32 _get_slice(void **pp, unsigned char *dp)
-{
-	unsigned int len, i;
-
-	len = i = _get_word(pp);
-	while (i-- > 0) *dp++ = _get_byte(pp);
-	return len;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_reset(avmcard *card)
-{
-	card->csr = 0x0;
-	b1dma_writel(card, card->csr, AMCC_INTCSR);
-	b1dma_writel(card, 0, AMCC_MCSR);
-	b1dma_writel(card, 0, AMCC_RXLEN);
-	b1dma_writel(card, 0, AMCC_TXLEN);
-
-	t1outp(card->port, 0x10, 0x00);
-	t1outp(card->port, 0x07, 0x00);
-
-	b1dma_writel(card, 0, AMCC_MCSR);
-	mdelay(10);
-	b1dma_writel(card, 0x0f000000, AMCC_MCSR); /* reset all */
-	mdelay(10);
-	b1dma_writel(card, 0, AMCC_MCSR);
-	if (card->cardtype == avm_t1pci)
-		mdelay(42);
-	else
-		mdelay(10);
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1dma_detect(avmcard *card)
-{
-	b1dma_writel(card, 0, AMCC_MCSR);
-	mdelay(10);
-	b1dma_writel(card, 0x0f000000, AMCC_MCSR); /* reset all */
-	mdelay(10);
-	b1dma_writel(card, 0, AMCC_MCSR);
-	mdelay(42);
-
-	b1dma_writel(card, 0, AMCC_RXLEN);
-	b1dma_writel(card, 0, AMCC_TXLEN);
-	card->csr = 0x0;
-	b1dma_writel(card, card->csr, AMCC_INTCSR);
-
-	if (b1dma_readl(card, AMCC_MCSR) != 0x000000E6)
-		return 1;
-
-	b1dma_writel(card, 0xffffffff, AMCC_RXPTR);
-	b1dma_writel(card, 0xffffffff, AMCC_TXPTR);
-	if (b1dma_readl(card, AMCC_RXPTR) != 0xfffffffc
-	    || b1dma_readl(card, AMCC_TXPTR) != 0xfffffffc)
-		return 2;
-
-	b1dma_writel(card, 0x0, AMCC_RXPTR);
-	b1dma_writel(card, 0x0, AMCC_TXPTR);
-	if (b1dma_readl(card, AMCC_RXPTR) != 0x0
-	    || b1dma_readl(card, AMCC_TXPTR) != 0x0)
-		return 3;
-
-	t1outp(card->port, 0x10, 0x00);
-	t1outp(card->port, 0x07, 0x00);
-
-	t1outp(card->port, 0x02, 0x02);
-	t1outp(card->port, 0x03, 0x02);
-
-	if ((t1inp(card->port, 0x02) & 0xFE) != 0x02
-	    || t1inp(card->port, 0x3) != 0x03)
-		return 4;
-
-	t1outp(card->port, 0x02, 0x00);
-	t1outp(card->port, 0x03, 0x00);
-
-	if ((t1inp(card->port, 0x02) & 0xFE) != 0x00
-	    || t1inp(card->port, 0x3) != 0x01)
-		return 5;
-
-	return 0;
-}
-
-int t1pci_detect(avmcard *card)
-{
-	int ret;
-
-	if ((ret = b1dma_detect(card)) != 0)
-		return ret;
-
-	/* Transputer test */
-
-	if (WriteReg(card, 0x80001000, 0x11) != 0
-	    || WriteReg(card, 0x80101000, 0x22) != 0
-	    || WriteReg(card, 0x80201000, 0x33) != 0
-	    || WriteReg(card, 0x80301000, 0x44) != 0)
-		return 6;
-
-	if (ReadReg(card, 0x80001000) != 0x11
-	    || ReadReg(card, 0x80101000) != 0x22
-	    || ReadReg(card, 0x80201000) != 0x33
-	    || ReadReg(card, 0x80301000) != 0x44)
-		return 7;
-
-	if (WriteReg(card, 0x80001000, 0x55) != 0
-	    || WriteReg(card, 0x80101000, 0x66) != 0
-	    || WriteReg(card, 0x80201000, 0x77) != 0
-	    || WriteReg(card, 0x80301000, 0x88) != 0)
-		return 8;
-
-	if (ReadReg(card, 0x80001000) != 0x55
-	    || ReadReg(card, 0x80101000) != 0x66
-	    || ReadReg(card, 0x80201000) != 0x77
-	    || ReadReg(card, 0x80301000) != 0x88)
-		return 9;
-
-	return 0;
-}
-
-int b1pciv4_detect(avmcard *card)
-{
-	int ret, i;
-
-	if ((ret = b1dma_detect(card)) != 0)
-		return ret;
-
-	for (i = 0; i < 5; i++) {
-		if (WriteReg(card, 0x80A00000, 0x21) != 0)
-			return 6;
-		if ((ReadReg(card, 0x80A00000) & 0x01) != 0x01)
-			return 7;
-	}
-	for (i = 0; i < 5; i++) {
-		if (WriteReg(card, 0x80A00000, 0x20) != 0)
-			return 8;
-		if ((ReadReg(card, 0x80A00000) & 0x01) != 0x00)
-			return 9;
-	}
-
-	return 0;
-}
-
-static void b1dma_queue_tx(avmcard *card, struct sk_buff *skb)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&card->lock, flags);
-
-	skb_queue_tail(&card->dma->send_queue, skb);
-
-	if (!(card->csr & EN_TX_TC_INT)) {
-		b1dma_dispatch_tx(card);
-		b1dma_writel(card, card->csr, AMCC_INTCSR);
-	}
-
-	spin_unlock_irqrestore(&card->lock, flags);
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_dispatch_tx(avmcard *card)
-{
-	avmcard_dmainfo *dma = card->dma;
-	struct sk_buff *skb;
-	u8 cmd, subcmd;
-	u16 len;
-	u32 txlen;
-	void *p;
-
-	skb = skb_dequeue(&dma->send_queue);
-
-	len = CAPIMSG_LEN(skb->data);
-
-	if (len) {
-		cmd = CAPIMSG_COMMAND(skb->data);
-		subcmd = CAPIMSG_SUBCOMMAND(skb->data);
-
-		p = dma->sendbuf.dmabuf;
-
-		if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
-			u16 dlen = CAPIMSG_DATALEN(skb->data);
-			_put_byte(&p, SEND_DATA_B3_REQ);
-			_put_slice(&p, skb->data, len);
-			_put_slice(&p, skb->data + len, dlen);
-		} else {
-			_put_byte(&p, SEND_MESSAGE);
-			_put_slice(&p, skb->data, len);
-		}
-		txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
-#ifdef AVM_B1DMA_DEBUG
-		printk(KERN_DEBUG "tx: put msg len=%d\n", txlen);
-#endif
-	} else {
-		txlen = skb->len - 2;
-#ifdef AVM_B1DMA_POLLDEBUG
-		if (skb->data[2] == SEND_POLLACK)
-			printk(KERN_INFO "%s: send ack\n", card->name);
-#endif
-#ifdef AVM_B1DMA_DEBUG
-		printk(KERN_DEBUG "tx: put 0x%x len=%d\n",
-		       skb->data[2], txlen);
-#endif
-		skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
-						 skb->len - 2);
-	}
-	txlen = (txlen + 3) & ~3;
-
-	b1dma_writel(card, dma->sendbuf.dmaaddr, AMCC_TXPTR);
-	b1dma_writel(card, txlen, AMCC_TXLEN);
-
-	card->csr |= EN_TX_TC_INT;
-
-	dev_kfree_skb_any(skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void queue_pollack(avmcard *card)
-{
-	struct sk_buff *skb;
-	void *p;
-
-	skb = alloc_skb(3, GFP_ATOMIC);
-	if (!skb) {
-		printk(KERN_CRIT "%s: no memory, lost poll ack\n",
-		       card->name);
-		return;
-	}
-	p = skb->data;
-	_put_byte(&p, 0);
-	_put_byte(&p, 0);
-	_put_byte(&p, SEND_POLLACK);
-	skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-	b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_handle_rx(avmcard *card)
-{
-	avmctrl_info *cinfo = &card->ctrlinfo[0];
-	avmcard_dmainfo *dma = card->dma;
-	struct capi_ctr *ctrl = &cinfo->capi_ctrl;
-	struct sk_buff *skb;
-	void *p = dma->recvbuf.dmabuf + 4;
-	u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize;
-	u8 b1cmd =  _get_byte(&p);
-
-#ifdef AVM_B1DMA_DEBUG
-	printk(KERN_DEBUG "rx: 0x%x %lu\n", b1cmd, (unsigned long)dma->recvlen);
-#endif
-
-	switch (b1cmd) {
-	case RECEIVE_DATA_B3_IND:
-
-		ApplId = (unsigned) _get_word(&p);
-		MsgLen = _get_slice(&p, card->msgbuf);
-		DataB3Len = _get_slice(&p, card->databuf);
-
-		if (MsgLen < 30) { /* not CAPI 64Bit */
-			memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
-			MsgLen = 30;
-			CAPIMSG_SETLEN(card->msgbuf, 30);
-		}
-		if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
-			printk(KERN_ERR "%s: incoming packet dropped\n",
-			       card->name);
-		} else {
-			skb_put_data(skb, card->msgbuf, MsgLen);
-			skb_put_data(skb, card->databuf, DataB3Len);
-			capi_ctr_handle_message(ctrl, ApplId, skb);
-		}
-		break;
-
-	case RECEIVE_MESSAGE:
-
-		ApplId = (unsigned) _get_word(&p);
-		MsgLen = _get_slice(&p, card->msgbuf);
-		if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
-			printk(KERN_ERR "%s: incoming packet dropped\n",
-			       card->name);
-		} else {
-			skb_put_data(skb, card->msgbuf, MsgLen);
-			if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) {
-				spin_lock(&card->lock);
-				capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
-						     CAPIMSG_NCCI(skb->data),
-						     CAPIMSG_MSGID(skb->data));
-				spin_unlock(&card->lock);
-			}
-			capi_ctr_handle_message(ctrl, ApplId, skb);
-		}
-		break;
-
-	case RECEIVE_NEW_NCCI:
-
-		ApplId = _get_word(&p);
-		NCCI = _get_word(&p);
-		WindowSize = _get_word(&p);
-		spin_lock(&card->lock);
-		capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
-		spin_unlock(&card->lock);
-		break;
-
-	case RECEIVE_FREE_NCCI:
-
-		ApplId = _get_word(&p);
-		NCCI = _get_word(&p);
-
-		if (NCCI != 0xffffffff) {
-			spin_lock(&card->lock);
-			capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
-			spin_unlock(&card->lock);
-		}
-		break;
-
-	case RECEIVE_START:
-#ifdef AVM_B1DMA_POLLDEBUG
-		printk(KERN_INFO "%s: receive poll\n", card->name);
-#endif
-		if (!suppress_pollack)
-			queue_pollack(card);
-		capi_ctr_resume_output(ctrl);
-		break;
-
-	case RECEIVE_STOP:
-		capi_ctr_suspend_output(ctrl);
-		break;
-
-	case RECEIVE_INIT:
-
-		cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
-		b1_parse_version(cinfo);
-		printk(KERN_INFO "%s: %s-card (%s) now active\n",
-		       card->name,
-		       cinfo->version[VER_CARDTYPE],
-		       cinfo->version[VER_DRIVER]);
-		capi_ctr_ready(ctrl);
-		break;
-
-	case RECEIVE_TASK_READY:
-		ApplId = (unsigned) _get_word(&p);
-		MsgLen = _get_slice(&p, card->msgbuf);
-		card->msgbuf[MsgLen] = 0;
-		while (MsgLen > 0
-		       && (card->msgbuf[MsgLen - 1] == '\n'
-			   || card->msgbuf[MsgLen - 1] == '\r')) {
-			card->msgbuf[MsgLen - 1] = 0;
-			MsgLen--;
-		}
-		printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
-		       card->name, ApplId, card->msgbuf);
-		break;
-
-	case RECEIVE_DEBUGMSG:
-		MsgLen = _get_slice(&p, card->msgbuf);
-		card->msgbuf[MsgLen] = 0;
-		while (MsgLen > 0
-		       && (card->msgbuf[MsgLen - 1] == '\n'
-			   || card->msgbuf[MsgLen - 1] == '\r')) {
-			card->msgbuf[MsgLen - 1] = 0;
-			MsgLen--;
-		}
-		printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
-		break;
-
-	default:
-		printk(KERN_ERR "%s: b1dma_interrupt: 0x%x ???\n",
-		       card->name, b1cmd);
-		return;
-	}
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_handle_interrupt(avmcard *card)
-{
-	u32 status;
-	u32 newcsr;
-
-	spin_lock(&card->lock);
-
-	status = b1dma_readl(card, AMCC_INTCSR);
-	if ((status & ANY_S5933_INT) == 0) {
-		spin_unlock(&card->lock);
-		return;
-	}
-
-	newcsr = card->csr | (status & ALL_INT);
-	if (status & TX_TC_INT) newcsr &= ~EN_TX_TC_INT;
-	if (status & RX_TC_INT) newcsr &= ~EN_RX_TC_INT;
-	b1dma_writel(card, newcsr, AMCC_INTCSR);
-
-	if ((status & RX_TC_INT) != 0) {
-		struct avmcard_dmainfo *dma = card->dma;
-		u32 rxlen;
-		if (card->dma->recvlen == 0) {
-			rxlen = b1dma_readl(card, AMCC_RXLEN);
-			if (rxlen == 0) {
-				dma->recvlen = *((u32 *)dma->recvbuf.dmabuf);
-				rxlen = (dma->recvlen + 3) & ~3;
-				b1dma_writel(card, dma->recvbuf.dmaaddr + 4, AMCC_RXPTR);
-				b1dma_writel(card, rxlen, AMCC_RXLEN);
-#ifdef AVM_B1DMA_DEBUG
-			} else {
-				printk(KERN_ERR "%s: rx not complete (%d).\n",
-				       card->name, rxlen);
-#endif
-			}
-		} else {
-			spin_unlock(&card->lock);
-			b1dma_handle_rx(card);
-			dma->recvlen = 0;
-			spin_lock(&card->lock);
-			b1dma_writel(card, dma->recvbuf.dmaaddr, AMCC_RXPTR);
-			b1dma_writel(card, 4, AMCC_RXLEN);
-		}
-	}
-
-	if ((status & TX_TC_INT) != 0) {
-		if (skb_queue_empty(&card->dma->send_queue))
-			card->csr &= ~EN_TX_TC_INT;
-		else
-			b1dma_dispatch_tx(card);
-	}
-	b1dma_writel(card, card->csr, AMCC_INTCSR);
-
-	spin_unlock(&card->lock);
-}
-
-irqreturn_t b1dma_interrupt(int interrupt, void *devptr)
-{
-	avmcard *card = devptr;
-
-	b1dma_handle_interrupt(card);
-	return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1dma_loaded(avmcard *card)
-{
-	unsigned long stop;
-	unsigned char ans;
-	unsigned long tout = 2;
-	unsigned int base = card->port;
-
-	for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
-		if (b1_tx_empty(base))
-			break;
-	}
-	if (!b1_tx_empty(base)) {
-		printk(KERN_ERR "%s: b1dma_loaded: tx err, corrupted t4 file ?\n",
-		       card->name);
-		return 0;
-	}
-	b1_put_byte(base, SEND_POLLACK);
-	for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
-		if (b1_rx_full(base)) {
-			if ((ans = b1_get_byte(base)) == RECEIVE_POLLDWORD) {
-				return 1;
-			}
-			printk(KERN_ERR "%s: b1dma_loaded: got 0x%x, firmware not running in dword mode\n", card->name, ans);
-			return 0;
-		}
-	}
-	printk(KERN_ERR "%s: b1dma_loaded: firmware not running\n", card->name);
-	return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_send_init(avmcard *card)
-{
-	struct sk_buff *skb;
-	void *p;
-
-	skb = alloc_skb(15, GFP_ATOMIC);
-	if (!skb) {
-		printk(KERN_CRIT "%s: no memory, lost register appl.\n",
-		       card->name);
-		return;
-	}
-	p = skb->data;
-	_put_byte(&p, 0);
-	_put_byte(&p, 0);
-	_put_byte(&p, SEND_INIT);
-	_put_word(&p, CAPI_MAXAPPL);
-	_put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
-	_put_word(&p, card->cardnr - 1);
-	skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-	b1dma_queue_tx(card, skb);
-}
-
-int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	int retval;
-
-	b1dma_reset(card);
-
-	if ((retval = b1_load_t4file(card, &data->firmware))) {
-		b1dma_reset(card);
-		printk(KERN_ERR "%s: failed to load t4file!!\n",
-		       card->name);
-		return retval;
-	}
-
-	if (data->configuration.len > 0 && data->configuration.data) {
-		if ((retval = b1_load_config(card, &data->configuration))) {
-			b1dma_reset(card);
-			printk(KERN_ERR "%s: failed to load config!!\n",
-			       card->name);
-			return retval;
-		}
-	}
-
-	if (!b1dma_loaded(card)) {
-		b1dma_reset(card);
-		printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
-		return -EIO;
-	}
-
-	card->csr = AVM_FLAG;
-	b1dma_writel(card, card->csr, AMCC_INTCSR);
-	b1dma_writel(card, EN_A2P_TRANSFERS | EN_P2A_TRANSFERS | A2P_HI_PRIORITY |
-		     P2A_HI_PRIORITY | RESET_A2P_FLAGS | RESET_P2A_FLAGS,
-		     AMCC_MCSR);
-	t1outp(card->port, 0x07, 0x30);
-	t1outp(card->port, 0x10, 0xF0);
-
-	card->dma->recvlen = 0;
-	b1dma_writel(card, card->dma->recvbuf.dmaaddr, AMCC_RXPTR);
-	b1dma_writel(card, 4, AMCC_RXLEN);
-	card->csr |= EN_RX_TC_INT;
-	b1dma_writel(card, card->csr, AMCC_INTCSR);
-
-	b1dma_send_init(card);
-
-	return 0;
-}
-
-void b1dma_reset_ctr(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned long flags;
-
-	spin_lock_irqsave(&card->lock, flags);
-	b1dma_reset(card);
-
-	memset(cinfo->version, 0, sizeof(cinfo->version));
-	capilib_release(&cinfo->ncci_head);
-	spin_unlock_irqrestore(&card->lock, flags);
-	capi_ctr_down(ctrl);
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_register_appl(struct capi_ctr *ctrl,
-			 u16 appl,
-			 capi_register_params *rp)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	struct sk_buff *skb;
-	int want = rp->level3cnt;
-	int nconn;
-	void *p;
-
-	if (want > 0) nconn = want;
-	else nconn = ctrl->profile.nbchannel * -want;
-	if (nconn == 0) nconn = ctrl->profile.nbchannel;
-
-	skb = alloc_skb(23, GFP_ATOMIC);
-	if (!skb) {
-		printk(KERN_CRIT "%s: no memory, lost register appl.\n",
-		       card->name);
-		return;
-	}
-	p = skb->data;
-	_put_byte(&p, 0);
-	_put_byte(&p, 0);
-	_put_byte(&p, SEND_REGISTER);
-	_put_word(&p, appl);
-	_put_word(&p, 1024 * (nconn + 1));
-	_put_word(&p, nconn);
-	_put_word(&p, rp->datablkcnt);
-	_put_word(&p, rp->datablklen);
-	skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-	b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	struct sk_buff *skb;
-	void *p;
-	unsigned long flags;
-
-	spin_lock_irqsave(&card->lock, flags);
-	capilib_release_appl(&cinfo->ncci_head, appl);
-	spin_unlock_irqrestore(&card->lock, flags);
-
-	skb = alloc_skb(7, GFP_ATOMIC);
-	if (!skb) {
-		printk(KERN_CRIT "%s: no memory, lost release appl.\n",
-		       card->name);
-		return;
-	}
-	p = skb->data;
-	_put_byte(&p, 0);
-	_put_byte(&p, 0);
-	_put_byte(&p, SEND_RELEASE);
-	_put_word(&p, appl);
-
-	skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-	b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	u16 retval = CAPI_NOERROR;
-
-	if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
-		unsigned long flags;
-		spin_lock_irqsave(&card->lock, flags);
-		retval = capilib_data_b3_req(&cinfo->ncci_head,
-					     CAPIMSG_APPID(skb->data),
-					     CAPIMSG_NCCI(skb->data),
-					     CAPIMSG_MSGID(skb->data));
-		spin_unlock_irqrestore(&card->lock, flags);
-	}
-	if (retval == CAPI_NOERROR)
-		b1dma_queue_tx(card, skb);
-
-	return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1dma_proc_show(struct seq_file *m, void *v)
-{
-	struct capi_ctr *ctrl = m->private;
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	u8 flag;
-	char *s;
-	u32 txoff, txlen, rxoff, rxlen, csr;
-	unsigned long flags;
-
-	seq_printf(m, "%-16s %s\n", "name", card->name);
-	seq_printf(m, "%-16s 0x%x\n", "io", card->port);
-	seq_printf(m, "%-16s %d\n", "irq", card->irq);
-	seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
-	switch (card->cardtype) {
-	case avm_b1isa: s = "B1 ISA"; break;
-	case avm_b1pci: s = "B1 PCI"; break;
-	case avm_b1pcmcia: s = "B1 PCMCIA"; break;
-	case avm_m1: s = "M1"; break;
-	case avm_m2: s = "M2"; break;
-	case avm_t1isa: s = "T1 ISA (HEMA)"; break;
-	case avm_t1pci: s = "T1 PCI"; break;
-	case avm_c4: s = "C4"; break;
-	case avm_c2: s = "C2"; break;
-	default: s = "???"; break;
-	}
-	seq_printf(m, "%-16s %s\n", "type", s);
-	if ((s = cinfo->version[VER_DRIVER]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_driver", s);
-	if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
-	if ((s = cinfo->version[VER_SERIAL]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
-	if (card->cardtype != avm_m1) {
-		flag = ((u8 *)(ctrl->profile.manu))[3];
-		if (flag)
-			seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
-				   "protocol",
-				   (flag & 0x01) ? " DSS1" : "",
-				   (flag & 0x02) ? " CT1" : "",
-				   (flag & 0x04) ? " VN3" : "",
-				   (flag & 0x08) ? " NI1" : "",
-				   (flag & 0x10) ? " AUSTEL" : "",
-				   (flag & 0x20) ? " ESS" : "",
-				   (flag & 0x40) ? " 1TR6" : ""
-				);
-	}
-	if (card->cardtype != avm_m1) {
-		flag = ((u8 *)(ctrl->profile.manu))[5];
-		if (flag)
-			seq_printf(m, "%-16s%s%s%s%s\n",
-				   "linetype",
-				   (flag & 0x01) ? " point to point" : "",
-				   (flag & 0x02) ? " point to multipoint" : "",
-				   (flag & 0x08) ? " leased line without D-channel" : "",
-				   (flag & 0x04) ? " leased line with D-channel" : ""
-				);
-	}
-	seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
-
-	spin_lock_irqsave(&card->lock, flags);
-
-	txoff = (dma_addr_t)b1dma_readl(card, AMCC_TXPTR)-card->dma->sendbuf.dmaaddr;
-	txlen = b1dma_readl(card, AMCC_TXLEN);
-
-	rxoff = (dma_addr_t)b1dma_readl(card, AMCC_RXPTR)-card->dma->recvbuf.dmaaddr;
-	rxlen = b1dma_readl(card, AMCC_RXLEN);
-
-	csr  = b1dma_readl(card, AMCC_INTCSR);
-
-	spin_unlock_irqrestore(&card->lock, flags);
-
-	seq_printf(m, "%-16s 0x%lx\n", "csr (cached)", (unsigned long)card->csr);
-	seq_printf(m, "%-16s 0x%lx\n", "csr", (unsigned long)csr);
-	seq_printf(m, "%-16s %lu\n", "txoff", (unsigned long)txoff);
-	seq_printf(m, "%-16s %lu\n", "txlen", (unsigned long)txlen);
-	seq_printf(m, "%-16s %lu\n", "rxoff", (unsigned long)rxoff);
-	seq_printf(m, "%-16s %lu\n", "rxlen", (unsigned long)rxlen);
-
-	return 0;
-}
-EXPORT_SYMBOL(b1dma_proc_show);
-
-/* ------------------------------------------------------------- */
-
-EXPORT_SYMBOL(b1dma_reset);
-EXPORT_SYMBOL(t1pci_detect);
-EXPORT_SYMBOL(b1pciv4_detect);
-EXPORT_SYMBOL(b1dma_interrupt);
-
-EXPORT_SYMBOL(b1dma_load_firmware);
-EXPORT_SYMBOL(b1dma_reset_ctr);
-EXPORT_SYMBOL(b1dma_register_appl);
-EXPORT_SYMBOL(b1dma_release_appl);
-EXPORT_SYMBOL(b1dma_send_message);
-
-static int __init b1dma_init(void)
-{
-	char *p;
-	char rev[32];
-
-	if ((p = strchr(revision, ':')) != NULL && p[1]) {
-		strlcpy(rev, p + 2, sizeof(rev));
-		if ((p = strchr(rev, '$')) != NULL && p > rev)
-			*(p - 1) = 0;
-	} else
-		strcpy(rev, "1.0");
-
-	printk(KERN_INFO "b1dma: revision %s\n", rev);
-
-	return 0;
-}
-
-static void __exit b1dma_exit(void)
-{
-}
-
-module_init(b1dma_init);
-module_exit(b1dma_exit);
diff --git a/drivers/staging/isdn/avm/b1isa.c b/drivers/staging/isdn/avm/b1isa.c
deleted file mode 100644
index cdfea72..0000000
--- a/drivers/staging/isdn/avm/b1isa.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/* $Id: b1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Module for AVM B1 ISA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 ISA card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static void b1isa_remove(struct pci_dev *pdev)
-{
-	avmctrl_info *cinfo = pci_get_drvdata(pdev);
-	avmcard *card;
-
-	if (!cinfo)
-		return;
-
-	card = cinfo->card;
-
-	b1_reset(card->port);
-	b1_reset(card->port);
-
-	detach_capi_ctr(&cinfo->capi_ctrl);
-	free_irq(card->irq, card);
-	release_region(card->port, AVMB1_PORTLEN);
-	b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static char *b1isa_procinfo(struct capi_ctr *ctrl);
-
-static int b1isa_probe(struct pci_dev *pdev)
-{
-	avmctrl_info *cinfo;
-	avmcard *card;
-	int retval;
-
-	card = b1_alloc_card(1);
-	if (!card) {
-		printk(KERN_WARNING "b1isa: no memory.\n");
-		retval = -ENOMEM;
-		goto err;
-	}
-
-	cinfo = card->ctrlinfo;
-
-	card->port = pci_resource_start(pdev, 0);
-	card->irq = pdev->irq;
-	card->cardtype = avm_b1isa;
-	sprintf(card->name, "b1isa-%x", card->port);
-
-	if (card->port != 0x150 && card->port != 0x250
-	    && card->port != 0x300 && card->port != 0x340) {
-		printk(KERN_WARNING "b1isa: invalid port 0x%x.\n", card->port);
-		retval = -EINVAL;
-		goto err_free;
-	}
-	if (b1_irq_table[card->irq & 0xf] == 0) {
-		printk(KERN_WARNING "b1isa: irq %d not valid.\n", card->irq);
-		retval = -EINVAL;
-		goto err_free;
-	}
-	if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
-		printk(KERN_WARNING "b1isa: ports 0x%03x-0x%03x in use.\n",
-		       card->port, card->port + AVMB1_PORTLEN);
-		retval = -EBUSY;
-		goto err_free;
-	}
-	retval = request_irq(card->irq, b1_interrupt, 0, card->name, card);
-	if (retval) {
-		printk(KERN_ERR "b1isa: unable to get IRQ %d.\n", card->irq);
-		goto err_release_region;
-	}
-	b1_reset(card->port);
-	if ((retval = b1_detect(card->port, card->cardtype)) != 0) {
-		printk(KERN_NOTICE "b1isa: NO card at 0x%x (%d)\n",
-		       card->port, retval);
-		retval = -ENODEV;
-		goto err_free_irq;
-	}
-	b1_reset(card->port);
-	b1_getrevision(card);
-
-	cinfo->capi_ctrl.owner = THIS_MODULE;
-	cinfo->capi_ctrl.driver_name   = "b1isa";
-	cinfo->capi_ctrl.driverdata    = cinfo;
-	cinfo->capi_ctrl.register_appl = b1_register_appl;
-	cinfo->capi_ctrl.release_appl  = b1_release_appl;
-	cinfo->capi_ctrl.send_message  = b1_send_message;
-	cinfo->capi_ctrl.load_firmware = b1_load_firmware;
-	cinfo->capi_ctrl.reset_ctr     = b1_reset_ctr;
-	cinfo->capi_ctrl.procinfo      = b1isa_procinfo;
-	cinfo->capi_ctrl.proc_show     = b1_proc_show;
-	strcpy(cinfo->capi_ctrl.name, card->name);
-
-	retval = attach_capi_ctr(&cinfo->capi_ctrl);
-	if (retval) {
-		printk(KERN_ERR "b1isa: attach controller failed.\n");
-		goto err_free_irq;
-	}
-
-	printk(KERN_INFO "b1isa: AVM B1 ISA at i/o %#x, irq %d, revision %d\n",
-	       card->port, card->irq, card->revision);
-
-	pci_set_drvdata(pdev, cinfo);
-	return 0;
-
-err_free_irq:
-	free_irq(card->irq, card);
-err_release_region:
-	release_region(card->port, AVMB1_PORTLEN);
-err_free:
-	b1_free_card(card);
-err:
-	return retval;
-}
-
-static char *b1isa_procinfo(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
-	if (!cinfo)
-		return "";
-	sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
-		cinfo->cardname[0] ? cinfo->cardname : "-",
-		cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
-		cinfo->card ? cinfo->card->port : 0x0,
-		cinfo->card ? cinfo->card->irq : 0,
-		cinfo->card ? cinfo->card->revision : 0
-		);
-	return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-#define MAX_CARDS 4
-static struct pci_dev isa_dev[MAX_CARDS];
-static int io[MAX_CARDS];
-static int irq[MAX_CARDS];
-
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-
-static int b1isa_add_card(struct capi_driver *driver, capicardparams *data)
-{
-	int i;
-
-	for (i = 0; i < MAX_CARDS; i++) {
-		if (isa_dev[i].resource[0].start)
-			continue;
-
-		isa_dev[i].resource[0].start = data->port;
-		isa_dev[i].irq = data->irq;
-
-		if (b1isa_probe(&isa_dev[i]) == 0)
-			return 0;
-	}
-	return -ENODEV;
-}
-
-static struct capi_driver capi_driver_b1isa = {
-	.name		= "b1isa",
-	.revision	= "1.0",
-	.add_card       = b1isa_add_card,
-};
-
-static int __init b1isa_init(void)
-{
-	char *p;
-	char rev[32];
-	int i;
-
-	if ((p = strchr(revision, ':')) != NULL && p[1]) {
-		strlcpy(rev, p + 2, 32);
-		if ((p = strchr(rev, '$')) != NULL && p > rev)
-			*(p - 1) = 0;
-	} else
-		strcpy(rev, "1.0");
-
-	for (i = 0; i < MAX_CARDS; i++) {
-		if (!io[i])
-			break;
-
-		isa_dev[i].resource[0].start = io[i];
-		isa_dev[i].irq = irq[i];
-
-		if (b1isa_probe(&isa_dev[i]) != 0)
-			return -ENODEV;
-	}
-
-	strlcpy(capi_driver_b1isa.revision, rev, 32);
-	register_capi_driver(&capi_driver_b1isa);
-	printk(KERN_INFO "b1isa: revision %s\n", rev);
-
-	return 0;
-}
-
-static void __exit b1isa_exit(void)
-{
-	int i;
-
-	for (i = 0; i < MAX_CARDS; i++) {
-		if (isa_dev[i].resource[0].start)
-			b1isa_remove(&isa_dev[i]);
-	}
-	unregister_capi_driver(&capi_driver_b1isa);
-}
-
-module_init(b1isa_init);
-module_exit(b1isa_exit);
diff --git a/drivers/staging/isdn/avm/b1pci.c b/drivers/staging/isdn/avm/b1pci.c
deleted file mode 100644
index b76b57a..0000000
--- a/drivers/staging/isdn/avm/b1pci.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/* $Id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM B1 PCI-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-static struct pci_device_id b1pci_pci_tbl[] = {
-	{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
-	{ }				/* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, b1pci_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 PCI card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static char *b1pci_procinfo(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
-	if (!cinfo)
-		return "";
-	sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
-		cinfo->cardname[0] ? cinfo->cardname : "-",
-		cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
-		cinfo->card ? cinfo->card->port : 0x0,
-		cinfo->card ? cinfo->card->irq : 0,
-		cinfo->card ? cinfo->card->revision : 0
-		);
-	return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
-{
-	avmcard *card;
-	avmctrl_info *cinfo;
-	int retval;
-
-	card = b1_alloc_card(1);
-	if (!card) {
-		printk(KERN_WARNING "b1pci: no memory.\n");
-		retval = -ENOMEM;
-		goto err;
-	}
-
-	cinfo = card->ctrlinfo;
-	sprintf(card->name, "b1pci-%x", p->port);
-	card->port = p->port;
-	card->irq = p->irq;
-	card->cardtype = avm_b1pci;
-
-	if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
-		printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
-		       card->port, card->port + AVMB1_PORTLEN);
-		retval = -EBUSY;
-		goto err_free;
-	}
-	b1_reset(card->port);
-	retval = b1_detect(card->port, card->cardtype);
-	if (retval) {
-		printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
-		       card->port, retval);
-		retval = -ENODEV;
-		goto err_release_region;
-	}
-	b1_reset(card->port);
-	b1_getrevision(card);
-
-	retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
-	if (retval) {
-		printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq);
-		retval = -EBUSY;
-		goto err_release_region;
-	}
-
-	cinfo->capi_ctrl.driver_name   = "b1pci";
-	cinfo->capi_ctrl.driverdata    = cinfo;
-	cinfo->capi_ctrl.register_appl = b1_register_appl;
-	cinfo->capi_ctrl.release_appl  = b1_release_appl;
-	cinfo->capi_ctrl.send_message  = b1_send_message;
-	cinfo->capi_ctrl.load_firmware = b1_load_firmware;
-	cinfo->capi_ctrl.reset_ctr     = b1_reset_ctr;
-	cinfo->capi_ctrl.procinfo      = b1pci_procinfo;
-	cinfo->capi_ctrl.proc_show     = b1_proc_show;
-	strcpy(cinfo->capi_ctrl.name, card->name);
-	cinfo->capi_ctrl.owner         = THIS_MODULE;
-
-	retval = attach_capi_ctr(&cinfo->capi_ctrl);
-	if (retval) {
-		printk(KERN_ERR "b1pci: attach controller failed.\n");
-		goto err_free_irq;
-	}
-
-	if (card->revision >= 4) {
-		printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, revision %d (no dma)\n",
-		       card->port, card->irq, card->revision);
-	} else {
-		printk(KERN_INFO "b1pci: AVM B1 PCI at i/o %#x, irq %d, revision %d\n",
-		       card->port, card->irq, card->revision);
-	}
-
-	pci_set_drvdata(pdev, card);
-	return 0;
-
-err_free_irq:
-	free_irq(card->irq, card);
-err_release_region:
-	release_region(card->port, AVMB1_PORTLEN);
-err_free:
-	b1_free_card(card);
-err:
-	return retval;
-}
-
-static void b1pci_remove(struct pci_dev *pdev)
-{
-	avmcard *card = pci_get_drvdata(pdev);
-	avmctrl_info *cinfo = card->ctrlinfo;
-	unsigned int port = card->port;
-
-	b1_reset(port);
-	b1_reset(port);
-
-	detach_capi_ctr(&cinfo->capi_ctrl);
-	free_irq(card->irq, card);
-	release_region(card->port, AVMB1_PORTLEN);
-	b1_free_card(card);
-}
-
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-/* ------------------------------------------------------------- */
-
-static char *b1pciv4_procinfo(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
-	if (!cinfo)
-		return "";
-	sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d",
-		cinfo->cardname[0] ? cinfo->cardname : "-",
-		cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
-		cinfo->card ? cinfo->card->port : 0x0,
-		cinfo->card ? cinfo->card->irq : 0,
-		cinfo->card ? cinfo->card->membase : 0,
-		cinfo->card ? cinfo->card->revision : 0
-		);
-	return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
-{
-	avmcard *card;
-	avmctrl_info *cinfo;
-	int retval;
-
-	card = b1_alloc_card(1);
-	if (!card) {
-		printk(KERN_WARNING "b1pci: no memory.\n");
-		retval = -ENOMEM;
-		goto err;
-	}
-
-	card->dma = avmcard_dma_alloc("b1pci", pdev, 2048 + 128, 2048 + 128);
-	if (!card->dma) {
-		printk(KERN_WARNING "b1pci: dma alloc.\n");
-		retval = -ENOMEM;
-		goto err_free;
-	}
-
-	cinfo = card->ctrlinfo;
-	sprintf(card->name, "b1pciv4-%x", p->port);
-	card->port = p->port;
-	card->irq = p->irq;
-	card->membase = p->membase;
-	card->cardtype = avm_b1pci;
-
-	if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
-		printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
-		       card->port, card->port + AVMB1_PORTLEN);
-		retval = -EBUSY;
-		goto err_free_dma;
-	}
-
-	card->mbase = ioremap(card->membase, 64);
-	if (!card->mbase) {
-		printk(KERN_NOTICE "b1pci: can't remap memory at 0x%lx\n",
-		       card->membase);
-		retval = -ENOMEM;
-		goto err_release_region;
-	}
-
-	b1dma_reset(card);
-
-	retval = b1pciv4_detect(card);
-	if (retval) {
-		printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
-		       card->port, retval);
-		retval = -ENODEV;
-		goto err_unmap;
-	}
-	b1dma_reset(card);
-	b1_getrevision(card);
-
-	retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
-	if (retval) {
-		printk(KERN_ERR "b1pci: unable to get IRQ %d.\n",
-		       card->irq);
-		retval = -EBUSY;
-		goto err_unmap;
-	}
-
-	cinfo->capi_ctrl.owner         = THIS_MODULE;
-	cinfo->capi_ctrl.driver_name   = "b1pciv4";
-	cinfo->capi_ctrl.driverdata    = cinfo;
-	cinfo->capi_ctrl.register_appl = b1dma_register_appl;
-	cinfo->capi_ctrl.release_appl  = b1dma_release_appl;
-	cinfo->capi_ctrl.send_message  = b1dma_send_message;
-	cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
-	cinfo->capi_ctrl.reset_ctr     = b1dma_reset_ctr;
-	cinfo->capi_ctrl.procinfo      = b1pciv4_procinfo;
-	cinfo->capi_ctrl.proc_show     = b1dma_proc_show;
-	strcpy(cinfo->capi_ctrl.name, card->name);
-
-	retval = attach_capi_ctr(&cinfo->capi_ctrl);
-	if (retval) {
-		printk(KERN_ERR "b1pci: attach controller failed.\n");
-		goto err_free_irq;
-	}
-	card->cardnr = cinfo->capi_ctrl.cnr;
-
-	printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, mem %#lx, revision %d (dma)\n",
-	       card->port, card->irq, card->membase, card->revision);
-
-	pci_set_drvdata(pdev, card);
-	return 0;
-
-err_free_irq:
-	free_irq(card->irq, card);
-err_unmap:
-	iounmap(card->mbase);
-err_release_region:
-	release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
-	avmcard_dma_free(card->dma);
-err_free:
-	b1_free_card(card);
-err:
-	return retval;
-
-}
-
-static void b1pciv4_remove(struct pci_dev *pdev)
-{
-	avmcard *card = pci_get_drvdata(pdev);
-	avmctrl_info *cinfo = card->ctrlinfo;
-
-	b1dma_reset(card);
-
-	detach_capi_ctr(&cinfo->capi_ctrl);
-	free_irq(card->irq, card);
-	iounmap(card->mbase);
-	release_region(card->port, AVMB1_PORTLEN);
-	avmcard_dma_free(card->dma);
-	b1_free_card(card);
-}
-
-#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
-
-static int b1pci_pci_probe(struct pci_dev *pdev,
-			   const struct pci_device_id *ent)
-{
-	struct capicardparams param;
-	int retval;
-
-	if (pci_enable_device(pdev) < 0) {
-		printk(KERN_ERR "b1pci: failed to enable AVM-B1\n");
-		return -ENODEV;
-	}
-	param.irq = pdev->irq;
-
-	if (pci_resource_start(pdev, 2)) { /* B1 PCI V4 */
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-		pci_set_master(pdev);
-#endif
-		param.membase = pci_resource_start(pdev, 0);
-		param.port = pci_resource_start(pdev, 2);
-
-		printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 V4 at i/o %#x, irq %d, mem %#x\n",
-		       param.port, param.irq, param.membase);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-		retval = b1pciv4_probe(&param, pdev);
-#else
-		retval = b1pci_probe(&param, pdev);
-#endif
-		if (retval != 0) {
-			printk(KERN_ERR "b1pci: no AVM-B1 V4 at i/o %#x, irq %d, mem %#x detected\n",
-			       param.port, param.irq, param.membase);
-		}
-	} else {
-		param.membase = 0;
-		param.port = pci_resource_start(pdev, 1);
-
-		printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 at i/o %#x, irq %d\n",
-		       param.port, param.irq);
-		retval = b1pci_probe(&param, pdev);
-		if (retval != 0) {
-			printk(KERN_ERR "b1pci: no AVM-B1 at i/o %#x, irq %d detected\n",
-			       param.port, param.irq);
-		}
-	}
-	return retval;
-}
-
-static void b1pci_pci_remove(struct pci_dev *pdev)
-{
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-	avmcard *card = pci_get_drvdata(pdev);
-
-	if (card->dma)
-		b1pciv4_remove(pdev);
-	else
-		b1pci_remove(pdev);
-#else
-	b1pci_remove(pdev);
-#endif
-}
-
-static struct pci_driver b1pci_pci_driver = {
-	.name		= "b1pci",
-	.id_table	= b1pci_pci_tbl,
-	.probe		= b1pci_pci_probe,
-	.remove		= b1pci_pci_remove,
-};
-
-static struct capi_driver capi_driver_b1pci = {
-	.name		= "b1pci",
-	.revision	= "1.0",
-};
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-static struct capi_driver capi_driver_b1pciv4 = {
-	.name		= "b1pciv4",
-	.revision	= "1.0",
-};
-#endif
-
-static int __init b1pci_init(void)
-{
-	char *p;
-	char rev[32];
-	int err;
-
-	if ((p = strchr(revision, ':')) != NULL && p[1]) {
-		strlcpy(rev, p + 2, 32);
-		if ((p = strchr(rev, '$')) != NULL && p > rev)
-			*(p - 1) = 0;
-	} else
-		strcpy(rev, "1.0");
-
-
-	err = pci_register_driver(&b1pci_pci_driver);
-	if (!err) {
-		strlcpy(capi_driver_b1pci.revision, rev, 32);
-		register_capi_driver(&capi_driver_b1pci);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-		strlcpy(capi_driver_b1pciv4.revision, rev, 32);
-		register_capi_driver(&capi_driver_b1pciv4);
-#endif
-		printk(KERN_INFO "b1pci: revision %s\n", rev);
-	}
-	return err;
-}
-
-static void __exit b1pci_exit(void)
-{
-	unregister_capi_driver(&capi_driver_b1pci);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-	unregister_capi_driver(&capi_driver_b1pciv4);
-#endif
-	pci_unregister_driver(&b1pci_pci_driver);
-}
-
-module_init(b1pci_init);
-module_exit(b1pci_exit);
diff --git a/drivers/staging/isdn/avm/b1pcmcia.c b/drivers/staging/isdn/avm/b1pcmcia.c
deleted file mode 100644
index 3aca16e..0000000
--- a/drivers/staging/isdn/avm/b1pcmcia.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* $Id: b1pcmcia.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM B1/M1/M2 PCMCIA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/capi.h>
-#include <linux/b1pcmcia.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM PCMCIA cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static void b1pcmcia_remove_ctr(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-
-	b1_reset(port);
-	b1_reset(port);
-
-	detach_capi_ctr(ctrl);
-	free_irq(card->irq, card);
-	b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static LIST_HEAD(cards);
-
-static char *b1pcmcia_procinfo(struct capi_ctr *ctrl);
-
-static int b1pcmcia_add_card(unsigned int port, unsigned irq,
-			     enum avmcardtype cardtype)
-{
-	avmctrl_info *cinfo;
-	avmcard *card;
-	char *cardname;
-	int retval;
-
-	card = b1_alloc_card(1);
-	if (!card) {
-		printk(KERN_WARNING "b1pcmcia: no memory.\n");
-		retval = -ENOMEM;
-		goto err;
-	}
-	cinfo = card->ctrlinfo;
-
-	switch (cardtype) {
-	case avm_m1: sprintf(card->name, "m1-%x", port); break;
-	case avm_m2: sprintf(card->name, "m2-%x", port); break;
-	default: sprintf(card->name, "b1pcmcia-%x", port); break;
-	}
-	card->port = port;
-	card->irq = irq;
-	card->cardtype = cardtype;
-
-	retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
-	if (retval) {
-		printk(KERN_ERR "b1pcmcia: unable to get IRQ %d.\n",
-		       card->irq);
-		retval = -EBUSY;
-		goto err_free;
-	}
-	b1_reset(card->port);
-	if ((retval = b1_detect(card->port, card->cardtype)) != 0) {
-		printk(KERN_NOTICE "b1pcmcia: NO card at 0x%x (%d)\n",
-		       card->port, retval);
-		retval = -ENODEV;
-		goto err_free_irq;
-	}
-	b1_reset(card->port);
-	b1_getrevision(card);
-
-	cinfo->capi_ctrl.owner         = THIS_MODULE;
-	cinfo->capi_ctrl.driver_name   = "b1pcmcia";
-	cinfo->capi_ctrl.driverdata    = cinfo;
-	cinfo->capi_ctrl.register_appl = b1_register_appl;
-	cinfo->capi_ctrl.release_appl  = b1_release_appl;
-	cinfo->capi_ctrl.send_message  = b1_send_message;
-	cinfo->capi_ctrl.load_firmware = b1_load_firmware;
-	cinfo->capi_ctrl.reset_ctr     = b1_reset_ctr;
-	cinfo->capi_ctrl.procinfo      = b1pcmcia_procinfo;
-	cinfo->capi_ctrl.proc_show     = b1_proc_show;
-	strcpy(cinfo->capi_ctrl.name, card->name);
-
-	retval = attach_capi_ctr(&cinfo->capi_ctrl);
-	if (retval) {
-		printk(KERN_ERR "b1pcmcia: attach controller failed.\n");
-		goto err_free_irq;
-	}
-	switch (cardtype) {
-	case avm_m1: cardname = "M1"; break;
-	case avm_m2: cardname = "M2"; break;
-	default: cardname = "B1 PCMCIA"; break;
-	}
-
-	printk(KERN_INFO "b1pcmcia: AVM %s at i/o %#x, irq %d, revision %d\n",
-	       cardname, card->port, card->irq, card->revision);
-
-	list_add(&card->list, &cards);
-	return cinfo->capi_ctrl.cnr;
-
-err_free_irq:
-	free_irq(card->irq, card);
-err_free:
-	b1_free_card(card);
-err:
-	return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static char *b1pcmcia_procinfo(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
-	if (!cinfo)
-		return "";
-	sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
-		cinfo->cardname[0] ? cinfo->cardname : "-",
-		cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
-		cinfo->card ? cinfo->card->port : 0x0,
-		cinfo->card ? cinfo->card->irq : 0,
-		cinfo->card ? cinfo->card->revision : 0
-		);
-	return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1pcmcia_addcard_b1(unsigned int port, unsigned irq)
-{
-	return b1pcmcia_add_card(port, irq, avm_b1pcmcia);
-}
-
-int b1pcmcia_addcard_m1(unsigned int port, unsigned irq)
-{
-	return b1pcmcia_add_card(port, irq, avm_m1);
-}
-
-int b1pcmcia_addcard_m2(unsigned int port, unsigned irq)
-{
-	return b1pcmcia_add_card(port, irq, avm_m2);
-}
-
-int b1pcmcia_delcard(unsigned int port, unsigned irq)
-{
-	struct list_head *l;
-	avmcard *card;
-
-	list_for_each(l, &cards) {
-		card = list_entry(l, avmcard, list);
-		if (card->port == port && card->irq == irq) {
-			b1pcmcia_remove_ctr(&card->ctrlinfo[0].capi_ctrl);
-			return 0;
-		}
-	}
-	return -ESRCH;
-}
-
-EXPORT_SYMBOL(b1pcmcia_addcard_b1);
-EXPORT_SYMBOL(b1pcmcia_addcard_m1);
-EXPORT_SYMBOL(b1pcmcia_addcard_m2);
-EXPORT_SYMBOL(b1pcmcia_delcard);
-
-static struct capi_driver capi_driver_b1pcmcia = {
-	.name		= "b1pcmcia",
-	.revision	= "1.0",
-};
-
-static int __init b1pcmcia_init(void)
-{
-	char *p;
-	char rev[32];
-
-	if ((p = strchr(revision, ':')) != NULL && p[1]) {
-		strlcpy(rev, p + 2, 32);
-		if ((p = strchr(rev, '$')) != NULL && p > rev)
-			*(p - 1) = 0;
-	} else
-		strcpy(rev, "1.0");
-
-	strlcpy(capi_driver_b1pcmcia.revision, rev, 32);
-	register_capi_driver(&capi_driver_b1pcmcia);
-	printk(KERN_INFO "b1pci: revision %s\n", rev);
-
-	return 0;
-}
-
-static void __exit b1pcmcia_exit(void)
-{
-	unregister_capi_driver(&capi_driver_b1pcmcia);
-}
-
-module_init(b1pcmcia_init);
-module_exit(b1pcmcia_exit);
diff --git a/drivers/staging/isdn/avm/c4.c b/drivers/staging/isdn/avm/c4.c
deleted file mode 100644
index ac72cd2..0000000
--- a/drivers/staging/isdn/avm/c4.c
+++ /dev/null
@@ -1,1317 +0,0 @@
-/* $Id: c4.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM C4 & C2 card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-#undef AVM_C4_DEBUG
-#undef AVM_C4_POLLDEBUG
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-static bool suppress_pollack;
-
-static const struct pci_device_id c4_pci_tbl[] = {
-	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
-	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
-	{ }			/* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, c4_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM C2/C4 cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-module_param(suppress_pollack, bool, 0);
-
-/* ------------------------------------------------------------- */
-
-static void c4_dispatch_tx(avmcard *card);
-
-/* ------------------------------------------------------------- */
-
-#define DC21285_DRAM_A0MR	0x40000000
-#define DC21285_DRAM_A1MR	0x40004000
-#define DC21285_DRAM_A2MR	0x40008000
-#define DC21285_DRAM_A3MR	0x4000C000
-
-#define	CAS_OFFSET	0x88
-
-#define DC21285_ARMCSR_BASE	0x42000000
-
-#define	PCI_OUT_INT_STATUS	0x30
-#define	PCI_OUT_INT_MASK	0x34
-#define	MAILBOX_0		0x50
-#define	MAILBOX_1		0x54
-#define	MAILBOX_2		0x58
-#define	MAILBOX_3		0x5C
-#define	DOORBELL		0x60
-#define	DOORBELL_SETUP		0x64
-
-#define CHAN_1_CONTROL		0x90
-#define CHAN_2_CONTROL		0xB0
-#define DRAM_TIMING		0x10C
-#define DRAM_ADDR_SIZE_0	0x110
-#define DRAM_ADDR_SIZE_1	0x114
-#define DRAM_ADDR_SIZE_2	0x118
-#define DRAM_ADDR_SIZE_3	0x11C
-#define	SA_CONTROL		0x13C
-#define	XBUS_CYCLE		0x148
-#define	XBUS_STROBE		0x14C
-#define	DBELL_PCI_MASK		0x150
-#define DBELL_SA_MASK		0x154
-
-#define SDRAM_SIZE		0x1000000
-
-/* ------------------------------------------------------------- */
-
-#define	MBOX_PEEK_POKE		MAILBOX_0
-
-#define DBELL_ADDR		0x01
-#define DBELL_DATA		0x02
-#define DBELL_RNWR		0x40
-#define DBELL_INIT		0x80
-
-/* ------------------------------------------------------------- */
-
-#define	MBOX_UP_ADDR		MAILBOX_0
-#define	MBOX_UP_LEN		MAILBOX_1
-#define	MBOX_DOWN_ADDR		MAILBOX_2
-#define	MBOX_DOWN_LEN		MAILBOX_3
-
-#define	DBELL_UP_HOST		0x00000100
-#define	DBELL_UP_ARM		0x00000200
-#define	DBELL_DOWN_HOST		0x00000400
-#define	DBELL_DOWN_ARM		0x00000800
-#define	DBELL_RESET_HOST	0x40000000
-#define	DBELL_RESET_ARM		0x80000000
-
-/* ------------------------------------------------------------- */
-
-#define	DRAM_TIMING_DEF		0x001A01A5
-#define DRAM_AD_SZ_DEF0		0x00000045
-#define DRAM_AD_SZ_NULL		0x00000000
-
-#define SA_CTL_ALLRIGHT		0x64AA0271
-
-#define	INIT_XBUS_CYCLE		0x100016DB
-#define	INIT_XBUS_STROBE	0xF1F1F1F1
-
-/* ------------------------------------------------------------- */
-
-#define	RESET_TIMEOUT		(15 * HZ)	/* 15 sec */
-#define	PEEK_POKE_TIMEOUT	(HZ / 10)	/* 0.1 sec */
-
-/* ------------------------------------------------------------- */
-
-#define c4outmeml(addr, value)	writel(value, addr)
-#define c4inmeml(addr)	readl(addr)
-#define c4outmemw(addr, value)	writew(value, addr)
-#define c4inmemw(addr)	readw(addr)
-#define c4outmemb(addr, value)	writeb(value, addr)
-#define c4inmemb(addr)	readb(addr)
-
-/* ------------------------------------------------------------- */
-
-static inline int wait_for_doorbell(avmcard *card, unsigned long t)
-{
-	unsigned long stop;
-
-	stop = jiffies + t;
-	while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
-		if (!time_before(jiffies, stop))
-			return -1;
-		mb();
-	}
-	return 0;
-}
-
-static int c4_poke(avmcard *card,  unsigned long off, unsigned long value)
-{
-
-	if (wait_for_doorbell(card, HZ / 10) < 0)
-		return -1;
-
-	c4outmeml(card->mbase + MBOX_PEEK_POKE, off);
-	c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
-
-	if (wait_for_doorbell(card, HZ / 10) < 0)
-		return -1;
-
-	c4outmeml(card->mbase + MBOX_PEEK_POKE, value);
-	c4outmeml(card->mbase + DOORBELL, DBELL_DATA | DBELL_ADDR);
-
-	return 0;
-}
-
-static int c4_peek(avmcard *card,  unsigned long off, unsigned long *valuep)
-{
-	if (wait_for_doorbell(card, HZ / 10) < 0)
-		return -1;
-
-	c4outmeml(card->mbase + MBOX_PEEK_POKE, off);
-	c4outmeml(card->mbase + DOORBELL, DBELL_RNWR | DBELL_ADDR);
-
-	if (wait_for_doorbell(card, HZ / 10) < 0)
-		return -1;
-
-	*valuep = c4inmeml(card->mbase + MBOX_PEEK_POKE);
-
-	return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_load_t4file(avmcard *card, capiloaddatapart *t4file)
-{
-	u32 val;
-	unsigned char *dp;
-	u_int left;
-	u32 loadoff = 0;
-
-	dp = t4file->data;
-	left = t4file->len;
-	while (left >= sizeof(u32)) {
-		if (t4file->user) {
-			if (copy_from_user(&val, dp, sizeof(val)))
-				return -EFAULT;
-		} else {
-			memcpy(&val, dp, sizeof(val));
-		}
-		if (c4_poke(card, loadoff, val)) {
-			printk(KERN_ERR "%s: corrupted firmware file ?\n",
-			       card->name);
-			return -EIO;
-		}
-		left -= sizeof(u32);
-		dp += sizeof(u32);
-		loadoff += sizeof(u32);
-	}
-	if (left) {
-		val = 0;
-		if (t4file->user) {
-			if (copy_from_user(&val, dp, left))
-				return -EFAULT;
-		} else {
-			memcpy(&val, dp, left);
-		}
-		if (c4_poke(card, loadoff, val)) {
-			printk(KERN_ERR "%s: corrupted firmware file ?\n",
-			       card->name);
-			return -EIO;
-		}
-	}
-	return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static inline void _put_byte(void **pp, u8 val)
-{
-	u8 *s = *pp;
-	*s++ = val;
-	*pp = s;
-}
-
-static inline void _put_word(void **pp, u32 val)
-{
-	u8 *s = *pp;
-	*s++ = val & 0xff;
-	*s++ = (val >> 8) & 0xff;
-	*s++ = (val >> 16) & 0xff;
-	*s++ = (val >> 24) & 0xff;
-	*pp = s;
-}
-
-static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len)
-{
-	unsigned i = len;
-	_put_word(pp, i);
-	while (i-- > 0)
-		_put_byte(pp, *dp++);
-}
-
-static inline u8 _get_byte(void **pp)
-{
-	u8 *s = *pp;
-	u8 val;
-	val = *s++;
-	*pp = s;
-	return val;
-}
-
-static inline u32 _get_word(void **pp)
-{
-	u8 *s = *pp;
-	u32 val;
-	val = *s++;
-	val |= (*s++ << 8);
-	val |= (*s++ << 16);
-	val |= (*s++ << 24);
-	*pp = s;
-	return val;
-}
-
-static inline u32 _get_slice(void **pp, unsigned char *dp)
-{
-	unsigned int len, i;
-
-	len = i = _get_word(pp);
-	while (i-- > 0) *dp++ = _get_byte(pp);
-	return len;
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_reset(avmcard *card)
-{
-	unsigned long stop;
-
-	c4outmeml(card->mbase + DOORBELL, DBELL_RESET_ARM);
-
-	stop = jiffies + HZ * 10;
-	while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
-		if (!time_before(jiffies, stop))
-			return;
-		c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
-		mb();
-	}
-
-	c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
-	c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_detect(avmcard *card)
-{
-	unsigned long stop, dummy;
-
-	c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x0c);
-	if (c4inmeml(card->mbase + PCI_OUT_INT_MASK) != 0x0c)
-		return	1;
-
-	c4outmeml(card->mbase + DOORBELL, DBELL_RESET_ARM);
-
-	stop = jiffies + HZ * 10;
-	while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
-		if (!time_before(jiffies, stop))
-			return 2;
-		c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
-		mb();
-	}
-
-	c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
-	c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);
-
-	c4outmeml(card->mbase + MAILBOX_0, 0x55aa55aa);
-	if (c4inmeml(card->mbase + MAILBOX_0) != 0x55aa55aa) return 3;
-
-	c4outmeml(card->mbase + MAILBOX_0, 0xaa55aa55);
-	if (c4inmeml(card->mbase + MAILBOX_0) != 0xaa55aa55) return 4;
-
-	if (c4_poke(card, DC21285_ARMCSR_BASE + DBELL_SA_MASK, 0)) return 5;
-	if (c4_poke(card, DC21285_ARMCSR_BASE + DBELL_PCI_MASK, 0)) return 6;
-	if (c4_poke(card, DC21285_ARMCSR_BASE + SA_CONTROL, SA_CTL_ALLRIGHT))
-		return 7;
-	if (c4_poke(card, DC21285_ARMCSR_BASE + XBUS_CYCLE, INIT_XBUS_CYCLE))
-		return 8;
-	if (c4_poke(card, DC21285_ARMCSR_BASE + XBUS_STROBE, INIT_XBUS_STROBE))
-		return 8;
-	if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_TIMING, 0)) return 9;
-
-	mdelay(1);
-
-	if (c4_peek(card, DC21285_DRAM_A0MR, &dummy)) return 10;
-	if (c4_peek(card, DC21285_DRAM_A1MR, &dummy)) return 11;
-	if (c4_peek(card, DC21285_DRAM_A2MR, &dummy)) return 12;
-	if (c4_peek(card, DC21285_DRAM_A3MR, &dummy)) return 13;
-
-	if (c4_poke(card, DC21285_DRAM_A0MR + CAS_OFFSET, 0)) return 14;
-	if (c4_poke(card, DC21285_DRAM_A1MR + CAS_OFFSET, 0)) return 15;
-	if (c4_poke(card, DC21285_DRAM_A2MR + CAS_OFFSET, 0)) return 16;
-	if (c4_poke(card, DC21285_DRAM_A3MR + CAS_OFFSET, 0)) return 17;
-
-	mdelay(1);
-
-	if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_TIMING, DRAM_TIMING_DEF))
-		return 18;
-
-	if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_0, DRAM_AD_SZ_DEF0))
-		return 19;
-	if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_1, DRAM_AD_SZ_NULL))
-		return 20;
-	if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_2, DRAM_AD_SZ_NULL))
-		return 21;
-	if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_3, DRAM_AD_SZ_NULL))
-		return 22;
-
-	/* Transputer test */
-
-	if (c4_poke(card, 0x000000, 0x11111111)
-	    || c4_poke(card, 0x400000, 0x22222222)
-	       || c4_poke(card, 0x800000, 0x33333333)
-	       || c4_poke(card, 0xC00000, 0x44444444))
-		return 23;
-
-	if (c4_peek(card, 0x000000, &dummy) || dummy != 0x11111111
-	    || c4_peek(card, 0x400000, &dummy) || dummy != 0x22222222
-	       || c4_peek(card, 0x800000, &dummy) || dummy != 0x33333333
-	       || c4_peek(card, 0xC00000, &dummy) || dummy != 0x44444444)
-		return 24;
-
-	if (c4_poke(card, 0x000000, 0x55555555)
-	    || c4_poke(card, 0x400000, 0x66666666)
-	       || c4_poke(card, 0x800000, 0x77777777)
-	       || c4_poke(card, 0xC00000, 0x88888888))
-		return 25;
-
-	if (c4_peek(card, 0x000000, &dummy) || dummy != 0x55555555
-	    || c4_peek(card, 0x400000, &dummy) || dummy != 0x66666666
-	       || c4_peek(card, 0x800000, &dummy) || dummy != 0x77777777
-	       || c4_peek(card, 0xC00000, &dummy) || dummy != 0x88888888)
-		return 26;
-
-	return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_dispatch_tx(avmcard *card)
-{
-	avmcard_dmainfo *dma = card->dma;
-	struct sk_buff *skb;
-	u8 cmd, subcmd;
-	u16 len;
-	u32 txlen;
-	void *p;
-
-
-	if (card->csr & DBELL_DOWN_ARM) { /* tx busy */
-		return;
-	}
-
-	skb = skb_dequeue(&dma->send_queue);
-	if (!skb) {
-#ifdef AVM_C4_DEBUG
-		printk(KERN_DEBUG "%s: tx underrun\n", card->name);
-#endif
-		return;
-	}
-
-	len = CAPIMSG_LEN(skb->data);
-
-	if (len) {
-		cmd = CAPIMSG_COMMAND(skb->data);
-		subcmd = CAPIMSG_SUBCOMMAND(skb->data);
-
-		p = dma->sendbuf.dmabuf;
-
-		if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
-			u16 dlen = CAPIMSG_DATALEN(skb->data);
-			_put_byte(&p, SEND_DATA_B3_REQ);
-			_put_slice(&p, skb->data, len);
-			_put_slice(&p, skb->data + len, dlen);
-		} else {
-			_put_byte(&p, SEND_MESSAGE);
-			_put_slice(&p, skb->data, len);
-		}
-		txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
-#ifdef AVM_C4_DEBUG
-		printk(KERN_DEBUG "%s: tx put msg len=%d\n", card->name, txlen);
-#endif
-	} else {
-		txlen = skb->len - 2;
-#ifdef AVM_C4_POLLDEBUG
-		if (skb->data[2] == SEND_POLLACK)
-			printk(KERN_INFO "%s: ack to c4\n", card->name);
-#endif
-#ifdef AVM_C4_DEBUG
-		printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n",
-		       card->name, skb->data[2], txlen);
-#endif
-		skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
-						 skb->len - 2);
-	}
-	txlen = (txlen + 3) & ~3;
-
-	c4outmeml(card->mbase + MBOX_DOWN_ADDR, dma->sendbuf.dmaaddr);
-	c4outmeml(card->mbase + MBOX_DOWN_LEN, txlen);
-
-	card->csr |= DBELL_DOWN_ARM;
-
-	c4outmeml(card->mbase + DOORBELL, DBELL_DOWN_ARM);
-
-	dev_kfree_skb_any(skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void queue_pollack(avmcard *card)
-{
-	struct sk_buff *skb;
-	void *p;
-
-	skb = alloc_skb(3, GFP_ATOMIC);
-	if (!skb) {
-		printk(KERN_CRIT "%s: no memory, lost poll ack\n",
-		       card->name);
-		return;
-	}
-	p = skb->data;
-	_put_byte(&p, 0);
-	_put_byte(&p, 0);
-	_put_byte(&p, SEND_POLLACK);
-	skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-	skb_queue_tail(&card->dma->send_queue, skb);
-	c4_dispatch_tx(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_handle_rx(avmcard *card)
-{
-	avmcard_dmainfo *dma = card->dma;
-	struct capi_ctr *ctrl;
-	avmctrl_info *cinfo;
-	struct sk_buff *skb;
-	void *p = dma->recvbuf.dmabuf;
-	u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize;
-	u8 b1cmd =  _get_byte(&p);
-	u32 cidx;
-
-
-#ifdef AVM_C4_DEBUG
-	printk(KERN_DEBUG "%s: rx 0x%x len=%lu\n", card->name,
-	       b1cmd, (unsigned long)dma->recvlen);
-#endif
-
-	switch (b1cmd) {
-	case RECEIVE_DATA_B3_IND:
-
-		ApplId = (unsigned) _get_word(&p);
-		MsgLen = _get_slice(&p, card->msgbuf);
-		DataB3Len = _get_slice(&p, card->databuf);
-		cidx = CAPIMSG_CONTROLLER(card->msgbuf)-card->cardnr;
-		if (cidx >= card->nlogcontr) cidx = 0;
-		ctrl = &card->ctrlinfo[cidx].capi_ctrl;
-
-		if (MsgLen < 30) { /* not CAPI 64Bit */
-			memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
-			MsgLen = 30;
-			CAPIMSG_SETLEN(card->msgbuf, 30);
-		}
-		if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
-			printk(KERN_ERR "%s: incoming packet dropped\n",
-			       card->name);
-		} else {
-			skb_put_data(skb, card->msgbuf, MsgLen);
-			skb_put_data(skb, card->databuf, DataB3Len);
-			capi_ctr_handle_message(ctrl, ApplId, skb);
-		}
-		break;
-
-	case RECEIVE_MESSAGE:
-
-		ApplId = (unsigned) _get_word(&p);
-		MsgLen = _get_slice(&p, card->msgbuf);
-		cidx = CAPIMSG_CONTROLLER(card->msgbuf)-card->cardnr;
-		if (cidx >= card->nlogcontr) cidx = 0;
-		cinfo = &card->ctrlinfo[cidx];
-		ctrl = &card->ctrlinfo[cidx].capi_ctrl;
-
-		if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
-			printk(KERN_ERR "%s: incoming packet dropped\n",
-			       card->name);
-		} else {
-			skb_put_data(skb, card->msgbuf, MsgLen);
-			if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
-				capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
-						     CAPIMSG_NCCI(skb->data),
-						     CAPIMSG_MSGID(skb->data));
-
-			capi_ctr_handle_message(ctrl, ApplId, skb);
-		}
-		break;
-
-	case RECEIVE_NEW_NCCI:
-
-		ApplId = _get_word(&p);
-		NCCI = _get_word(&p);
-		WindowSize = _get_word(&p);
-		cidx = (NCCI & 0x7f) - card->cardnr;
-		if (cidx >= card->nlogcontr) cidx = 0;
-
-		capilib_new_ncci(&card->ctrlinfo[cidx].ncci_head, ApplId, NCCI, WindowSize);
-
-		break;
-
-	case RECEIVE_FREE_NCCI:
-
-		ApplId = _get_word(&p);
-		NCCI = _get_word(&p);
-
-		if (NCCI != 0xffffffff) {
-			cidx = (NCCI & 0x7f) - card->cardnr;
-			if (cidx >= card->nlogcontr) cidx = 0;
-			capilib_free_ncci(&card->ctrlinfo[cidx].ncci_head, ApplId, NCCI);
-		}
-		break;
-
-	case RECEIVE_START:
-#ifdef AVM_C4_POLLDEBUG
-		printk(KERN_INFO "%s: poll from c4\n", card->name);
-#endif
-		if (!suppress_pollack)
-			queue_pollack(card);
-		for (cidx = 0; cidx < card->nr_controllers; cidx++) {
-			ctrl = &card->ctrlinfo[cidx].capi_ctrl;
-			capi_ctr_resume_output(ctrl);
-		}
-		break;
-
-	case RECEIVE_STOP:
-		for (cidx = 0; cidx < card->nr_controllers; cidx++) {
-			ctrl = &card->ctrlinfo[cidx].capi_ctrl;
-			capi_ctr_suspend_output(ctrl);
-		}
-		break;
-
-	case RECEIVE_INIT:
-
-		cidx = card->nlogcontr;
-		if (cidx >= card->nr_controllers) {
-			printk(KERN_ERR "%s: card with %d controllers ??\n",
-			       card->name, cidx + 1);
-			break;
-		}
-		card->nlogcontr++;
-		cinfo = &card->ctrlinfo[cidx];
-		ctrl = &cinfo->capi_ctrl;
-		cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
-		b1_parse_version(cinfo);
-		printk(KERN_INFO "%s: %s-card (%s) now active\n",
-		       card->name,
-		       cinfo->version[VER_CARDTYPE],
-		       cinfo->version[VER_DRIVER]);
-		capi_ctr_ready(&cinfo->capi_ctrl);
-		break;
-
-	case RECEIVE_TASK_READY:
-		ApplId = (unsigned) _get_word(&p);
-		MsgLen = _get_slice(&p, card->msgbuf);
-		card->msgbuf[MsgLen] = 0;
-		while (MsgLen > 0
-		       && (card->msgbuf[MsgLen - 1] == '\n'
-			   || card->msgbuf[MsgLen - 1] == '\r')) {
-			card->msgbuf[MsgLen - 1] = 0;
-			MsgLen--;
-		}
-		printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
-		       card->name, ApplId, card->msgbuf);
-		break;
-
-	case RECEIVE_DEBUGMSG:
-		MsgLen = _get_slice(&p, card->msgbuf);
-		card->msgbuf[MsgLen] = 0;
-		while (MsgLen > 0
-		       && (card->msgbuf[MsgLen - 1] == '\n'
-			   || card->msgbuf[MsgLen - 1] == '\r')) {
-			card->msgbuf[MsgLen - 1] = 0;
-			MsgLen--;
-		}
-		printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
-		break;
-
-	default:
-		printk(KERN_ERR "%s: c4_interrupt: 0x%x ???\n",
-		       card->name, b1cmd);
-		return;
-	}
-}
-
-/* ------------------------------------------------------------- */
-
-static irqreturn_t c4_handle_interrupt(avmcard *card)
-{
-	unsigned long flags;
-	u32 status;
-
-	spin_lock_irqsave(&card->lock, flags);
-	status = c4inmeml(card->mbase + DOORBELL);
-
-	if (status & DBELL_RESET_HOST) {
-		u_int i;
-		c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x0c);
-		spin_unlock_irqrestore(&card->lock, flags);
-		if (card->nlogcontr == 0)
-			return IRQ_HANDLED;
-		printk(KERN_ERR "%s: unexpected reset\n", card->name);
-		for (i = 0; i < card->nr_controllers; i++) {
-			avmctrl_info *cinfo = &card->ctrlinfo[i];
-			memset(cinfo->version, 0, sizeof(cinfo->version));
-			spin_lock_irqsave(&card->lock, flags);
-			capilib_release(&cinfo->ncci_head);
-			spin_unlock_irqrestore(&card->lock, flags);
-			capi_ctr_down(&cinfo->capi_ctrl);
-		}
-		card->nlogcontr = 0;
-		return IRQ_HANDLED;
-	}
-
-	status &= (DBELL_UP_HOST | DBELL_DOWN_HOST);
-	if (!status) {
-		spin_unlock_irqrestore(&card->lock, flags);
-		return IRQ_HANDLED;
-	}
-	c4outmeml(card->mbase + DOORBELL, status);
-
-	if ((status & DBELL_UP_HOST) != 0) {
-		card->dma->recvlen = c4inmeml(card->mbase + MBOX_UP_LEN);
-		c4outmeml(card->mbase + MBOX_UP_LEN, 0);
-		c4_handle_rx(card);
-		card->dma->recvlen = 0;
-		c4outmeml(card->mbase + MBOX_UP_LEN, card->dma->recvbuf.size);
-		c4outmeml(card->mbase + DOORBELL, DBELL_UP_ARM);
-	}
-
-	if ((status & DBELL_DOWN_HOST) != 0) {
-		card->csr &= ~DBELL_DOWN_ARM;
-		c4_dispatch_tx(card);
-	} else if (card->csr & DBELL_DOWN_HOST) {
-		if (c4inmeml(card->mbase + MBOX_DOWN_LEN) == 0) {
-			card->csr &= ~DBELL_DOWN_ARM;
-			c4_dispatch_tx(card);
-		}
-	}
-	spin_unlock_irqrestore(&card->lock, flags);
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t c4_interrupt(int interrupt, void *devptr)
-{
-	avmcard *card = devptr;
-
-	return c4_handle_interrupt(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_send_init(avmcard *card)
-{
-	struct sk_buff *skb;
-	void *p;
-	unsigned long flags;
-
-	skb = alloc_skb(15, GFP_ATOMIC);
-	if (!skb) {
-		printk(KERN_CRIT "%s: no memory, lost register appl.\n",
-		       card->name);
-		return;
-	}
-	p = skb->data;
-	_put_byte(&p, 0);
-	_put_byte(&p, 0);
-	_put_byte(&p, SEND_INIT);
-	_put_word(&p, CAPI_MAXAPPL);
-	_put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
-	_put_word(&p, card->cardnr - 1);
-	skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-	skb_queue_tail(&card->dma->send_queue, skb);
-	spin_lock_irqsave(&card->lock, flags);
-	c4_dispatch_tx(card);
-	spin_unlock_irqrestore(&card->lock, flags);
-}
-
-static int queue_sendconfigword(avmcard *card, u32 val)
-{
-	struct sk_buff *skb;
-	unsigned long flags;
-	void *p;
-
-	skb = alloc_skb(3 + 4, GFP_ATOMIC);
-	if (!skb) {
-		printk(KERN_CRIT "%s: no memory, send config\n",
-		       card->name);
-		return -ENOMEM;
-	}
-	p = skb->data;
-	_put_byte(&p, 0);
-	_put_byte(&p, 0);
-	_put_byte(&p, SEND_CONFIG);
-	_put_word(&p, val);
-	skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-	skb_queue_tail(&card->dma->send_queue, skb);
-	spin_lock_irqsave(&card->lock, flags);
-	c4_dispatch_tx(card);
-	spin_unlock_irqrestore(&card->lock, flags);
-	return 0;
-}
-
-static int queue_sendconfig(avmcard *card, char cval[4])
-{
-	struct sk_buff *skb;
-	unsigned long flags;
-	void *p;
-
-	skb = alloc_skb(3 + 4, GFP_ATOMIC);
-	if (!skb) {
-		printk(KERN_CRIT "%s: no memory, send config\n",
-		       card->name);
-		return -ENOMEM;
-	}
-	p = skb->data;
-	_put_byte(&p, 0);
-	_put_byte(&p, 0);
-	_put_byte(&p, SEND_CONFIG);
-	_put_byte(&p, cval[0]);
-	_put_byte(&p, cval[1]);
-	_put_byte(&p, cval[2]);
-	_put_byte(&p, cval[3]);
-	skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-	skb_queue_tail(&card->dma->send_queue, skb);
-
-	spin_lock_irqsave(&card->lock, flags);
-	c4_dispatch_tx(card);
-	spin_unlock_irqrestore(&card->lock, flags);
-	return 0;
-}
-
-static int c4_send_config(avmcard *card, capiloaddatapart *config)
-{
-	u8 val[4];
-	unsigned char *dp;
-	u_int left;
-	int retval;
-
-	if ((retval = queue_sendconfigword(card, 1)) != 0)
-		return retval;
-	if ((retval = queue_sendconfigword(card, config->len)) != 0)
-		return retval;
-
-	dp = config->data;
-	left = config->len;
-	while (left >= sizeof(u32)) {
-		if (config->user) {
-			if (copy_from_user(val, dp, sizeof(val)))
-				return -EFAULT;
-		} else {
-			memcpy(val, dp, sizeof(val));
-		}
-		if ((retval = queue_sendconfig(card, val)) != 0)
-			return retval;
-		left -= sizeof(val);
-		dp += sizeof(val);
-	}
-	if (left) {
-		memset(val, 0, sizeof(val));
-		if (config->user) {
-			if (copy_from_user(&val, dp, left))
-				return -EFAULT;
-		} else {
-			memcpy(&val, dp, left);
-		}
-		if ((retval = queue_sendconfig(card, val)) != 0)
-			return retval;
-	}
-
-	return 0;
-}
-
-static int c4_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	int retval;
-
-	if ((retval = c4_load_t4file(card, &data->firmware))) {
-		printk(KERN_ERR "%s: failed to load t4file!!\n",
-		       card->name);
-		c4_reset(card);
-		return retval;
-	}
-
-	card->csr = 0;
-	c4outmeml(card->mbase + MBOX_UP_LEN, 0);
-	c4outmeml(card->mbase + MBOX_DOWN_LEN, 0);
-	c4outmeml(card->mbase + DOORBELL, DBELL_INIT);
-	mdelay(1);
-	c4outmeml(card->mbase + DOORBELL,
-		  DBELL_UP_HOST | DBELL_DOWN_HOST | DBELL_RESET_HOST);
-
-	c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x08);
-
-	card->dma->recvlen = 0;
-	c4outmeml(card->mbase + MBOX_UP_ADDR, card->dma->recvbuf.dmaaddr);
-	c4outmeml(card->mbase + MBOX_UP_LEN, card->dma->recvbuf.size);
-	c4outmeml(card->mbase + DOORBELL, DBELL_UP_ARM);
-
-	if (data->configuration.len > 0 && data->configuration.data) {
-		retval = c4_send_config(card, &data->configuration);
-		if (retval) {
-			printk(KERN_ERR "%s: failed to set config!!\n",
-			       card->name);
-			c4_reset(card);
-			return retval;
-		}
-	}
-
-	c4_send_init(card);
-
-	return 0;
-}
-
-
-static void c4_reset_ctr(struct capi_ctr *ctrl)
-{
-	avmcard *card = ((avmctrl_info *)(ctrl->driverdata))->card;
-	avmctrl_info *cinfo;
-	u_int i;
-	unsigned long flags;
-
-	spin_lock_irqsave(&card->lock, flags);
-
-	c4_reset(card);
-
-	spin_unlock_irqrestore(&card->lock, flags);
-
-	for (i = 0; i < card->nr_controllers; i++) {
-		cinfo = &card->ctrlinfo[i];
-		memset(cinfo->version, 0, sizeof(cinfo->version));
-		capi_ctr_down(&cinfo->capi_ctrl);
-	}
-	card->nlogcontr = 0;
-}
-
-static void c4_remove(struct pci_dev *pdev)
-{
-	avmcard *card = pci_get_drvdata(pdev);
-	avmctrl_info *cinfo;
-	u_int i;
-
-	if (!card)
-		return;
-
-	c4_reset(card);
-
-	for (i = 0; i < card->nr_controllers; i++) {
-		cinfo = &card->ctrlinfo[i];
-		detach_capi_ctr(&cinfo->capi_ctrl);
-	}
-
-	free_irq(card->irq, card);
-	iounmap(card->mbase);
-	release_region(card->port, AVMB1_PORTLEN);
-	avmcard_dma_free(card->dma);
-	pci_set_drvdata(pdev, NULL);
-	b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-
-static void c4_register_appl(struct capi_ctr *ctrl,
-			     u16 appl,
-			     capi_register_params *rp)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	struct sk_buff *skb;
-	int want = rp->level3cnt;
-	unsigned long flags;
-	int nconn;
-	void *p;
-
-	if (ctrl->cnr == card->cardnr) {
-
-		if (want > 0) nconn = want;
-		else nconn = ctrl->profile.nbchannel * 4 * -want;
-		if (nconn == 0) nconn = ctrl->profile.nbchannel * 4;
-
-		skb = alloc_skb(23, GFP_ATOMIC);
-		if (!skb) {
-			printk(KERN_CRIT "%s: no memory, lost register appl.\n",
-			       card->name);
-			return;
-		}
-		p = skb->data;
-		_put_byte(&p, 0);
-		_put_byte(&p, 0);
-		_put_byte(&p, SEND_REGISTER);
-		_put_word(&p, appl);
-		_put_word(&p, 1024 * (nconn + 1));
-		_put_word(&p, nconn);
-		_put_word(&p, rp->datablkcnt);
-		_put_word(&p, rp->datablklen);
-		skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
-		skb_queue_tail(&card->dma->send_queue, skb);
-
-		spin_lock_irqsave(&card->lock, flags);
-		c4_dispatch_tx(card);
-		spin_unlock_irqrestore(&card->lock, flags);
-	}
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned long flags;
-	struct sk_buff *skb;
-	void *p;
-
-	spin_lock_irqsave(&card->lock, flags);
-	capilib_release_appl(&cinfo->ncci_head, appl);
-	spin_unlock_irqrestore(&card->lock, flags);
-
-	if (ctrl->cnr == card->cardnr) {
-		skb = alloc_skb(7, GFP_ATOMIC);
-		if (!skb) {
-			printk(KERN_CRIT "%s: no memory, lost release appl.\n",
-			       card->name);
-			return;
-		}
-		p = skb->data;
-		_put_byte(&p, 0);
-		_put_byte(&p, 0);
-		_put_byte(&p, SEND_RELEASE);
-		_put_word(&p, appl);
-
-		skb_put(skb, (u8 *)p - (u8 *)skb->data);
-		skb_queue_tail(&card->dma->send_queue, skb);
-		spin_lock_irqsave(&card->lock, flags);
-		c4_dispatch_tx(card);
-		spin_unlock_irqrestore(&card->lock, flags);
-	}
-}
-
-/* ------------------------------------------------------------- */
-
-
-static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	u16 retval = CAPI_NOERROR;
-	unsigned long flags;
-
-	spin_lock_irqsave(&card->lock, flags);
-	if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
-		retval = capilib_data_b3_req(&cinfo->ncci_head,
-					     CAPIMSG_APPID(skb->data),
-					     CAPIMSG_NCCI(skb->data),
-					     CAPIMSG_MSGID(skb->data));
-	}
-	if (retval == CAPI_NOERROR) {
-		skb_queue_tail(&card->dma->send_queue, skb);
-		c4_dispatch_tx(card);
-	}
-	spin_unlock_irqrestore(&card->lock, flags);
-	return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static char *c4_procinfo(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
-	if (!cinfo)
-		return "";
-	sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx",
-		cinfo->cardname[0] ? cinfo->cardname : "-",
-		cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
-		cinfo->card ? cinfo->card->port : 0x0,
-		cinfo->card ? cinfo->card->irq : 0,
-		cinfo->card ? cinfo->card->membase : 0
-		);
-	return cinfo->infobuf;
-}
-
-static int c4_proc_show(struct seq_file *m, void *v)
-{
-	struct capi_ctr *ctrl = m->private;
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	u8 flag;
-	char *s;
-
-	seq_printf(m, "%-16s %s\n", "name", card->name);
-	seq_printf(m, "%-16s 0x%x\n", "io", card->port);
-	seq_printf(m, "%-16s %d\n", "irq", card->irq);
-	seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
-	switch (card->cardtype) {
-	case avm_b1isa: s = "B1 ISA"; break;
-	case avm_b1pci: s = "B1 PCI"; break;
-	case avm_b1pcmcia: s = "B1 PCMCIA"; break;
-	case avm_m1: s = "M1"; break;
-	case avm_m2: s = "M2"; break;
-	case avm_t1isa: s = "T1 ISA (HEMA)"; break;
-	case avm_t1pci: s = "T1 PCI"; break;
-	case avm_c4: s = "C4"; break;
-	case avm_c2: s = "C2"; break;
-	default: s = "???"; break;
-	}
-	seq_printf(m, "%-16s %s\n", "type", s);
-	if ((s = cinfo->version[VER_DRIVER]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_driver", s);
-	if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
-	if ((s = cinfo->version[VER_SERIAL]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
-	if (card->cardtype != avm_m1) {
-		flag = ((u8 *)(ctrl->profile.manu))[3];
-		if (flag)
-			seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
-				   "protocol",
-				   (flag & 0x01) ? " DSS1" : "",
-				   (flag & 0x02) ? " CT1" : "",
-				   (flag & 0x04) ? " VN3" : "",
-				   (flag & 0x08) ? " NI1" : "",
-				   (flag & 0x10) ? " AUSTEL" : "",
-				   (flag & 0x20) ? " ESS" : "",
-				   (flag & 0x40) ? " 1TR6" : ""
-				);
-	}
-	if (card->cardtype != avm_m1) {
-		flag = ((u8 *)(ctrl->profile.manu))[5];
-		if (flag)
-			seq_printf(m, "%-16s%s%s%s%s\n",
-				   "linetype",
-				   (flag & 0x01) ? " point to point" : "",
-				   (flag & 0x02) ? " point to multipoint" : "",
-				   (flag & 0x08) ? " leased line without D-channel" : "",
-				   (flag & 0x04) ? " leased line with D-channel" : ""
-				);
-	}
-	seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
-	return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
-		       int nr_controllers)
-{
-	avmcard *card;
-	avmctrl_info *cinfo;
-	int retval;
-	int i;
-
-	card = b1_alloc_card(nr_controllers);
-	if (!card) {
-		printk(KERN_WARNING "c4: no memory.\n");
-		retval = -ENOMEM;
-		goto err;
-	}
-	card->dma = avmcard_dma_alloc("c4", dev, 2048 + 128, 2048 + 128);
-	if (!card->dma) {
-		printk(KERN_WARNING "c4: no memory.\n");
-		retval = -ENOMEM;
-		goto err_free;
-	}
-
-	sprintf(card->name, "c%d-%x", nr_controllers, p->port);
-	card->port = p->port;
-	card->irq = p->irq;
-	card->membase = p->membase;
-	card->cardtype = (nr_controllers == 4) ? avm_c4 : avm_c2;
-
-	if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
-		printk(KERN_WARNING "c4: ports 0x%03x-0x%03x in use.\n",
-		       card->port, card->port + AVMB1_PORTLEN);
-		retval = -EBUSY;
-		goto err_free_dma;
-	}
-
-	card->mbase = ioremap(card->membase, 128);
-	if (card->mbase == NULL) {
-		printk(KERN_NOTICE "c4: can't remap memory at 0x%lx\n",
-		       card->membase);
-		retval = -EIO;
-		goto err_release_region;
-	}
-
-	retval = c4_detect(card);
-	if (retval != 0) {
-		printk(KERN_NOTICE "c4: NO card at 0x%x error(%d)\n",
-		       card->port, retval);
-		retval = -EIO;
-		goto err_unmap;
-	}
-	c4_reset(card);
-
-	retval = request_irq(card->irq, c4_interrupt, IRQF_SHARED, card->name, card);
-	if (retval) {
-		printk(KERN_ERR "c4: unable to get IRQ %d.\n", card->irq);
-		retval = -EBUSY;
-		goto err_unmap;
-	}
-
-	for (i = 0; i < nr_controllers; i++) {
-		cinfo = &card->ctrlinfo[i];
-		cinfo->capi_ctrl.owner = THIS_MODULE;
-		cinfo->capi_ctrl.driver_name   = "c4";
-		cinfo->capi_ctrl.driverdata    = cinfo;
-		cinfo->capi_ctrl.register_appl = c4_register_appl;
-		cinfo->capi_ctrl.release_appl  = c4_release_appl;
-		cinfo->capi_ctrl.send_message  = c4_send_message;
-		cinfo->capi_ctrl.load_firmware = c4_load_firmware;
-		cinfo->capi_ctrl.reset_ctr     = c4_reset_ctr;
-		cinfo->capi_ctrl.procinfo      = c4_procinfo;
-		cinfo->capi_ctrl.proc_show     = c4_proc_show;
-		strcpy(cinfo->capi_ctrl.name, card->name);
-
-		retval = attach_capi_ctr(&cinfo->capi_ctrl);
-		if (retval) {
-			printk(KERN_ERR "c4: attach controller failed (%d).\n", i);
-			for (i--; i >= 0; i--) {
-				cinfo = &card->ctrlinfo[i];
-				detach_capi_ctr(&cinfo->capi_ctrl);
-			}
-			goto err_free_irq;
-		}
-		if (i == 0)
-			card->cardnr = cinfo->capi_ctrl.cnr;
-	}
-
-	printk(KERN_INFO "c4: AVM C%d at i/o %#x, irq %d, mem %#lx\n",
-	       nr_controllers, card->port, card->irq,
-	       card->membase);
-	pci_set_drvdata(dev, card);
-	return 0;
-
-err_free_irq:
-	free_irq(card->irq, card);
-err_unmap:
-	iounmap(card->mbase);
-err_release_region:
-	release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
-	avmcard_dma_free(card->dma);
-err_free:
-	b1_free_card(card);
-err:
-	return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_probe(struct pci_dev *dev, const struct pci_device_id *ent)
-{
-	int nr = ent->driver_data;
-	int retval = 0;
-	struct capicardparams param;
-
-	if (pci_enable_device(dev) < 0) {
-		printk(KERN_ERR "c4: failed to enable AVM-C%d\n", nr);
-		return -ENODEV;
-	}
-	pci_set_master(dev);
-
-	param.port = pci_resource_start(dev, 1);
-	param.irq = dev->irq;
-	param.membase = pci_resource_start(dev, 0);
-
-	printk(KERN_INFO "c4: PCI BIOS reports AVM-C%d at i/o %#x, irq %d, mem %#x\n",
-	       nr, param.port, param.irq, param.membase);
-
-	retval = c4_add_card(&param, dev, nr);
-	if (retval != 0) {
-		printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
-		       nr, param.port, param.irq, param.membase);
-		pci_disable_device(dev);
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static struct pci_driver c4_pci_driver = {
-	.name           = "c4",
-	.id_table       = c4_pci_tbl,
-	.probe          = c4_probe,
-	.remove         = c4_remove,
-};
-
-static struct capi_driver capi_driver_c2 = {
-	.name		= "c2",
-	.revision	= "1.0",
-};
-
-static struct capi_driver capi_driver_c4 = {
-	.name		= "c4",
-	.revision	= "1.0",
-};
-
-static int __init c4_init(void)
-{
-	char *p;
-	char rev[32];
-	int err;
-
-	if ((p = strchr(revision, ':')) != NULL && p[1]) {
-		strlcpy(rev, p + 2, 32);
-		if ((p = strchr(rev, '$')) != NULL && p > rev)
-			*(p - 1) = 0;
-	} else
-		strcpy(rev, "1.0");
-
-	err = pci_register_driver(&c4_pci_driver);
-	if (!err) {
-		strlcpy(capi_driver_c2.revision, rev, 32);
-		register_capi_driver(&capi_driver_c2);
-		strlcpy(capi_driver_c4.revision, rev, 32);
-		register_capi_driver(&capi_driver_c4);
-		printk(KERN_INFO "c4: revision %s\n", rev);
-	}
-	return err;
-}
-
-static void __exit c4_exit(void)
-{
-	unregister_capi_driver(&capi_driver_c2);
-	unregister_capi_driver(&capi_driver_c4);
-	pci_unregister_driver(&c4_pci_driver);
-}
-
-module_init(c4_init);
-module_exit(c4_exit);
diff --git a/drivers/staging/isdn/avm/t1isa.c b/drivers/staging/isdn/avm/t1isa.c
deleted file mode 100644
index 2153619..0000000
--- a/drivers/staging/isdn/avm/t1isa.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/* $Id: t1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Module for AVM T1 HEMA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/netdevice.h>
-#include <linux/kernelcapi.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 HEMA ISA card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static int hema_irq_table[16] =
-{0,
- 0,
- 0,
- 0x80,				/* irq 3 */
- 0,
- 0x90,				/* irq 5 */
- 0,
- 0xA0,				/* irq 7 */
- 0,
- 0xB0,				/* irq 9 */
- 0xC0,				/* irq 10 */
- 0xD0,				/* irq 11 */
- 0xE0,				/* irq 12 */
- 0,
- 0,
- 0xF0,				/* irq 15 */
-};
-
-static int t1_detectandinit(unsigned int base, unsigned irq, int cardnr)
-{
-	unsigned char cregs[8];
-	unsigned char reverse_cardnr;
-	unsigned char dummy;
-	int i;
-
-	reverse_cardnr =   ((cardnr & 0x01) << 3) | ((cardnr & 0x02) << 1)
-		| ((cardnr & 0x04) >> 1) | ((cardnr & 0x08) >> 3);
-	cregs[0] = (HEMA_VERSION_ID << 4) | (reverse_cardnr & 0xf);
-	cregs[1] = 0x00; /* fast & slow link connected to CON1 */
-	cregs[2] = 0x05; /* fast link 20MBit, slow link 20 MBit */
-	cregs[3] = 0;
-	cregs[4] = 0x11; /* zero wait state */
-	cregs[5] = hema_irq_table[irq & 0xf];
-	cregs[6] = 0;
-	cregs[7] = 0;
-
-	/*
-	 * no one else should use the ISA bus in this moment,
-	 * but no function there to prevent this :-(
-	 * save_flags(flags); cli();
-	 */
-
-	/* board reset */
-	t1outp(base, T1_RESETBOARD, 0xf);
-	mdelay(100);
-	dummy = t1inp(base, T1_FASTLINK + T1_OUTSTAT); /* first read */
-
-	/* write config */
-	dummy = (base >> 4) & 0xff;
-	for (i = 1; i <= 0xf; i++) t1outp(base, i, dummy);
-	t1outp(base, HEMA_PAL_ID & 0xf, dummy);
-	t1outp(base, HEMA_PAL_ID >> 4, cregs[0]);
-	for (i = 1; i < 7; i++) t1outp(base, 0, cregs[i]);
-	t1outp(base, ((base >> 4)) & 0x3, cregs[7]);
-	/* restore_flags(flags); */
-
-	mdelay(100);
-	t1outp(base, T1_FASTLINK + T1_RESETLINK, 0);
-	t1outp(base, T1_SLOWLINK + T1_RESETLINK, 0);
-	mdelay(10);
-	t1outp(base, T1_FASTLINK + T1_RESETLINK, 1);
-	t1outp(base, T1_SLOWLINK + T1_RESETLINK, 1);
-	mdelay(100);
-	t1outp(base, T1_FASTLINK + T1_RESETLINK, 0);
-	t1outp(base, T1_SLOWLINK + T1_RESETLINK, 0);
-	mdelay(10);
-	t1outp(base, T1_FASTLINK + T1_ANALYSE, 0);
-	mdelay(5);
-	t1outp(base, T1_SLOWLINK + T1_ANALYSE, 0);
-
-	if (t1inp(base, T1_FASTLINK + T1_OUTSTAT) != 0x1) /* tx empty */
-		return 1;
-	if (t1inp(base, T1_FASTLINK + T1_INSTAT) != 0x0) /* rx empty */
-		return 2;
-	if (t1inp(base, T1_FASTLINK + T1_IRQENABLE) != 0x0)
-		return 3;
-	if ((t1inp(base, T1_FASTLINK + T1_FIFOSTAT) & 0xf0) != 0x70)
-		return 4;
-	if ((t1inp(base, T1_FASTLINK + T1_IRQMASTER) & 0x0e) != 0)
-		return 5;
-	if ((t1inp(base, T1_FASTLINK + T1_IDENT) & 0x7d) != 1)
-		return 6;
-	if (t1inp(base, T1_SLOWLINK + T1_OUTSTAT) != 0x1) /* tx empty */
-		return 7;
-	if ((t1inp(base, T1_SLOWLINK + T1_IRQMASTER) & 0x0e) != 0)
-		return 8;
-	if ((t1inp(base, T1_SLOWLINK + T1_IDENT) & 0x7d) != 0)
-		return 9;
-	return 0;
-}
-
-static irqreturn_t t1isa_interrupt(int interrupt, void *devptr)
-{
-	avmcard *card = devptr;
-	avmctrl_info *cinfo = &card->ctrlinfo[0];
-	struct capi_ctr *ctrl = &cinfo->capi_ctrl;
-	unsigned char b1cmd;
-	struct sk_buff *skb;
-
-	unsigned ApplId;
-	unsigned MsgLen;
-	unsigned DataB3Len;
-	unsigned NCCI;
-	unsigned WindowSize;
-	unsigned long flags;
-
-	spin_lock_irqsave(&card->lock, flags);
-
-	while (b1_rx_full(card->port)) {
-
-		b1cmd = b1_get_byte(card->port);
-
-		switch (b1cmd) {
-
-		case RECEIVE_DATA_B3_IND:
-
-			ApplId = (unsigned) b1_get_word(card->port);
-			MsgLen = t1_get_slice(card->port, card->msgbuf);
-			DataB3Len = t1_get_slice(card->port, card->databuf);
-			spin_unlock_irqrestore(&card->lock, flags);
-
-			if (MsgLen < 30) { /* not CAPI 64Bit */
-				memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
-				MsgLen = 30;
-				CAPIMSG_SETLEN(card->msgbuf, 30);
-			}
-			if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
-				printk(KERN_ERR "%s: incoming packet dropped\n",
-				       card->name);
-			} else {
-				skb_put_data(skb, card->msgbuf, MsgLen);
-				skb_put_data(skb, card->databuf, DataB3Len);
-				capi_ctr_handle_message(ctrl, ApplId, skb);
-			}
-			break;
-
-		case RECEIVE_MESSAGE:
-
-			ApplId = (unsigned) b1_get_word(card->port);
-			MsgLen = t1_get_slice(card->port, card->msgbuf);
-			if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
-				spin_unlock_irqrestore(&card->lock, flags);
-				printk(KERN_ERR "%s: incoming packet dropped\n",
-				       card->name);
-			} else {
-				skb_put_data(skb, card->msgbuf, MsgLen);
-				if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3)
-					capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
-							     CAPIMSG_NCCI(skb->data),
-							     CAPIMSG_MSGID(skb->data));
-				spin_unlock_irqrestore(&card->lock, flags);
-				capi_ctr_handle_message(ctrl, ApplId, skb);
-			}
-			break;
-
-		case RECEIVE_NEW_NCCI:
-
-			ApplId = b1_get_word(card->port);
-			NCCI = b1_get_word(card->port);
-			WindowSize = b1_get_word(card->port);
-			capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
-			spin_unlock_irqrestore(&card->lock, flags);
-			break;
-
-		case RECEIVE_FREE_NCCI:
-
-			ApplId = b1_get_word(card->port);
-			NCCI = b1_get_word(card->port);
-			if (NCCI != 0xffffffff)
-				capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
-			spin_unlock_irqrestore(&card->lock, flags);
-			break;
-
-		case RECEIVE_START:
-			b1_put_byte(card->port, SEND_POLLACK);
-			spin_unlock_irqrestore(&card->lock, flags);
-			capi_ctr_resume_output(ctrl);
-			break;
-
-		case RECEIVE_STOP:
-			spin_unlock_irqrestore(&card->lock, flags);
-			capi_ctr_suspend_output(ctrl);
-			break;
-
-		case RECEIVE_INIT:
-
-			cinfo->versionlen = t1_get_slice(card->port, cinfo->versionbuf);
-			spin_unlock_irqrestore(&card->lock, flags);
-			b1_parse_version(cinfo);
-			printk(KERN_INFO "%s: %s-card (%s) now active\n",
-			       card->name,
-			       cinfo->version[VER_CARDTYPE],
-			       cinfo->version[VER_DRIVER]);
-			capi_ctr_ready(ctrl);
-			break;
-
-		case RECEIVE_TASK_READY:
-			ApplId = (unsigned) b1_get_word(card->port);
-			MsgLen = t1_get_slice(card->port, card->msgbuf);
-			spin_unlock_irqrestore(&card->lock, flags);
-			card->msgbuf[MsgLen] = 0;
-			while (MsgLen > 0
-			       && (card->msgbuf[MsgLen - 1] == '\n'
-				   || card->msgbuf[MsgLen - 1] == '\r')) {
-				card->msgbuf[MsgLen - 1] = 0;
-				MsgLen--;
-			}
-			printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
-			       card->name, ApplId, card->msgbuf);
-			break;
-
-		case RECEIVE_DEBUGMSG:
-			MsgLen = t1_get_slice(card->port, card->msgbuf);
-			spin_unlock_irqrestore(&card->lock, flags);
-			card->msgbuf[MsgLen] = 0;
-			while (MsgLen > 0
-			       && (card->msgbuf[MsgLen - 1] == '\n'
-				   || card->msgbuf[MsgLen - 1] == '\r')) {
-				card->msgbuf[MsgLen - 1] = 0;
-				MsgLen--;
-			}
-			printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
-			break;
-
-
-		case 0xff:
-			spin_unlock_irqrestore(&card->lock, flags);
-			printk(KERN_ERR "%s: card reseted ?\n", card->name);
-			return IRQ_HANDLED;
-		default:
-			spin_unlock_irqrestore(&card->lock, flags);
-			printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n",
-			       card->name, b1cmd);
-			return IRQ_NONE;
-		}
-	}
-	return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-
-static int t1isa_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-	unsigned long flags;
-	int retval;
-
-	t1_disable_irq(port);
-	b1_reset(port);
-
-	if ((retval = b1_load_t4file(card, &data->firmware))) {
-		b1_reset(port);
-		printk(KERN_ERR "%s: failed to load t4file!!\n",
-		       card->name);
-		return retval;
-	}
-
-	if (data->configuration.len > 0 && data->configuration.data) {
-		if ((retval = b1_load_config(card, &data->configuration))) {
-			b1_reset(port);
-			printk(KERN_ERR "%s: failed to load config!!\n",
-			       card->name);
-			return retval;
-		}
-	}
-
-	if (!b1_loaded(card)) {
-		printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
-		return -EIO;
-	}
-
-	spin_lock_irqsave(&card->lock, flags);
-	b1_setinterrupt(port, card->irq, card->cardtype);
-	b1_put_byte(port, SEND_INIT);
-	b1_put_word(port, CAPI_MAXAPPL);
-	b1_put_word(port, AVM_NCCI_PER_CHANNEL * 30);
-	b1_put_word(port, ctrl->cnr - 1);
-	spin_unlock_irqrestore(&card->lock, flags);
-
-	return 0;
-}
-
-static void t1isa_reset_ctr(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-	unsigned long flags;
-
-	t1_disable_irq(port);
-	b1_reset(port);
-	b1_reset(port);
-
-	memset(cinfo->version, 0, sizeof(cinfo->version));
-	spin_lock_irqsave(&card->lock, flags);
-	capilib_release(&cinfo->ncci_head);
-	spin_unlock_irqrestore(&card->lock, flags);
-	capi_ctr_down(ctrl);
-}
-
-static void t1isa_remove(struct pci_dev *pdev)
-{
-	avmctrl_info *cinfo = pci_get_drvdata(pdev);
-	avmcard *card;
-
-	if (!cinfo)
-		return;
-
-	card = cinfo->card;
-
-	t1_disable_irq(card->port);
-	b1_reset(card->port);
-	b1_reset(card->port);
-	t1_reset(card->port);
-
-	detach_capi_ctr(&cinfo->capi_ctrl);
-	free_irq(card->irq, card);
-	release_region(card->port, AVMB1_PORTLEN);
-	b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-static char *t1isa_procinfo(struct capi_ctr *ctrl);
-
-static int t1isa_probe(struct pci_dev *pdev, int cardnr)
-{
-	avmctrl_info *cinfo;
-	avmcard *card;
-	int retval;
-
-	card = b1_alloc_card(1);
-	if (!card) {
-		printk(KERN_WARNING "t1isa: no memory.\n");
-		retval = -ENOMEM;
-		goto err;
-	}
-
-	cinfo = card->ctrlinfo;
-	card->port = pci_resource_start(pdev, 0);
-	card->irq = pdev->irq;
-	card->cardtype = avm_t1isa;
-	card->cardnr = cardnr;
-	sprintf(card->name, "t1isa-%x", card->port);
-
-	if (!(((card->port & 0x7) == 0) && ((card->port & 0x30) != 0x30))) {
-		printk(KERN_WARNING "t1isa: invalid port 0x%x.\n", card->port);
-		retval = -EINVAL;
-		goto err_free;
-	}
-	if (hema_irq_table[card->irq & 0xf] == 0) {
-		printk(KERN_WARNING "t1isa: irq %d not valid.\n", card->irq);
-		retval = -EINVAL;
-		goto err_free;
-	}
-	if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
-		printk(KERN_INFO "t1isa: ports 0x%03x-0x%03x in use.\n",
-		       card->port, card->port + AVMB1_PORTLEN);
-		retval = -EBUSY;
-		goto err_free;
-	}
-	retval = request_irq(card->irq, t1isa_interrupt, 0, card->name, card);
-	if (retval) {
-		printk(KERN_INFO "t1isa: unable to get IRQ %d.\n", card->irq);
-		retval = -EBUSY;
-		goto err_release_region;
-	}
-
-	if ((retval = t1_detectandinit(card->port, card->irq, card->cardnr)) != 0) {
-		printk(KERN_INFO "t1isa: NO card at 0x%x (%d)\n",
-		       card->port, retval);
-		retval = -ENODEV;
-		goto err_free_irq;
-	}
-	t1_disable_irq(card->port);
-	b1_reset(card->port);
-
-	cinfo->capi_ctrl.owner = THIS_MODULE;
-	cinfo->capi_ctrl.driver_name   = "t1isa";
-	cinfo->capi_ctrl.driverdata    = cinfo;
-	cinfo->capi_ctrl.register_appl = b1_register_appl;
-	cinfo->capi_ctrl.release_appl  = b1_release_appl;
-	cinfo->capi_ctrl.send_message  = t1isa_send_message;
-	cinfo->capi_ctrl.load_firmware = t1isa_load_firmware;
-	cinfo->capi_ctrl.reset_ctr     = t1isa_reset_ctr;
-	cinfo->capi_ctrl.procinfo      = t1isa_procinfo;
-	cinfo->capi_ctrl.proc_show     = b1_proc_show;
-	strcpy(cinfo->capi_ctrl.name, card->name);
-
-	retval = attach_capi_ctr(&cinfo->capi_ctrl);
-	if (retval) {
-		printk(KERN_INFO "t1isa: attach controller failed.\n");
-		goto err_free_irq;
-	}
-
-	printk(KERN_INFO "t1isa: AVM T1 ISA at i/o %#x, irq %d, card %d\n",
-	       card->port, card->irq, card->cardnr);
-
-	pci_set_drvdata(pdev, cinfo);
-	return 0;
-
-err_free_irq:
-	free_irq(card->irq, card);
-err_release_region:
-	release_region(card->port, AVMB1_PORTLEN);
-err_free:
-	b1_free_card(card);
-err:
-	return retval;
-}
-
-static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-	avmcard *card = cinfo->card;
-	unsigned int port = card->port;
-	unsigned long flags;
-	u16 len = CAPIMSG_LEN(skb->data);
-	u8 cmd = CAPIMSG_COMMAND(skb->data);
-	u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
-	u16 dlen, retval;
-
-	spin_lock_irqsave(&card->lock, flags);
-	if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
-		retval = capilib_data_b3_req(&cinfo->ncci_head,
-					     CAPIMSG_APPID(skb->data),
-					     CAPIMSG_NCCI(skb->data),
-					     CAPIMSG_MSGID(skb->data));
-		if (retval != CAPI_NOERROR) {
-			spin_unlock_irqrestore(&card->lock, flags);
-			return retval;
-		}
-		dlen = CAPIMSG_DATALEN(skb->data);
-
-		b1_put_byte(port, SEND_DATA_B3_REQ);
-		t1_put_slice(port, skb->data, len);
-		t1_put_slice(port, skb->data + len, dlen);
-	} else {
-		b1_put_byte(port, SEND_MESSAGE);
-		t1_put_slice(port, skb->data, len);
-	}
-	spin_unlock_irqrestore(&card->lock, flags);
-	dev_kfree_skb_any(skb);
-	return CAPI_NOERROR;
-}
-/* ------------------------------------------------------------- */
-
-static char *t1isa_procinfo(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
-	if (!cinfo)
-		return "";
-	sprintf(cinfo->infobuf, "%s %s 0x%x %d %d",
-		cinfo->cardname[0] ? cinfo->cardname : "-",
-		cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
-		cinfo->card ? cinfo->card->port : 0x0,
-		cinfo->card ? cinfo->card->irq : 0,
-		cinfo->card ? cinfo->card->cardnr : 0
-		);
-	return cinfo->infobuf;
-}
-
-
-/* ------------------------------------------------------------- */
-
-#define MAX_CARDS 4
-static struct pci_dev isa_dev[MAX_CARDS];
-static int io[MAX_CARDS];
-static int irq[MAX_CARDS];
-static int cardnr[MAX_CARDS];
-
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_array(cardnr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_PARM_DESC(cardnr, "Card number(s) (as jumpered)");
-
-static int t1isa_add_card(struct capi_driver *driver, capicardparams *data)
-{
-	int i;
-
-	for (i = 0; i < MAX_CARDS; i++) {
-		if (isa_dev[i].resource[0].start)
-			continue;
-
-		isa_dev[i].resource[0].start = data->port;
-		isa_dev[i].irq = data->irq;
-
-		if (t1isa_probe(&isa_dev[i], data->cardnr) == 0)
-			return 0;
-	}
-	return -ENODEV;
-}
-
-static struct capi_driver capi_driver_t1isa = {
-	.name		= "t1isa",
-	.revision	= "1.0",
-	.add_card       = t1isa_add_card,
-};
-
-static int __init t1isa_init(void)
-{
-	char rev[32];
-	char *p;
-	int i;
-
-	if ((p = strchr(revision, ':')) != NULL && p[1]) {
-		strlcpy(rev, p + 2, 32);
-		if ((p = strchr(rev, '$')) != NULL && p > rev)
-			*(p - 1) = 0;
-	} else
-		strcpy(rev, "1.0");
-
-	for (i = 0; i < MAX_CARDS; i++) {
-		if (!io[i])
-			break;
-
-		isa_dev[i].resource[0].start = io[i];
-		isa_dev[i].irq = irq[i];
-
-		if (t1isa_probe(&isa_dev[i], cardnr[i]) != 0)
-			return -ENODEV;
-	}
-
-	strlcpy(capi_driver_t1isa.revision, rev, 32);
-	register_capi_driver(&capi_driver_t1isa);
-	printk(KERN_INFO "t1isa: revision %s\n", rev);
-
-	return 0;
-}
-
-static void __exit t1isa_exit(void)
-{
-	int i;
-
-	unregister_capi_driver(&capi_driver_t1isa);
-	for (i = 0; i < MAX_CARDS; i++) {
-		if (!io[i])
-			break;
-
-		t1isa_remove(&isa_dev[i]);
-	}
-}
-
-module_init(t1isa_init);
-module_exit(t1isa_exit);
diff --git a/drivers/staging/isdn/avm/t1pci.c b/drivers/staging/isdn/avm/t1pci.c
deleted file mode 100644
index f5ed1d5..0000000
--- a/drivers/staging/isdn/avm/t1pci.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/* $Id: t1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM T1 PCI-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-#undef CONFIG_T1PCI_DEBUG
-#undef CONFIG_T1PCI_POLLDEBUG
-
-/* ------------------------------------------------------------- */
-static char *revision = "$Revision: 1.1.2.2 $";
-/* ------------------------------------------------------------- */
-
-static struct pci_device_id t1pci_pci_tbl[] = {
-	{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, PCI_ANY_ID, PCI_ANY_ID },
-	{ }				/* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, t1pci_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 PCI card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static char *t1pci_procinfo(struct capi_ctr *ctrl);
-
-static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
-{
-	avmcard *card;
-	avmctrl_info *cinfo;
-	int retval;
-
-	card = b1_alloc_card(1);
-	if (!card) {
-		printk(KERN_WARNING "t1pci: no memory.\n");
-		retval = -ENOMEM;
-		goto err;
-	}
-
-	card->dma = avmcard_dma_alloc("t1pci", pdev, 2048 + 128, 2048 + 128);
-	if (!card->dma) {
-		printk(KERN_WARNING "t1pci: no memory.\n");
-		retval = -ENOMEM;
-		goto err_free;
-	}
-
-	cinfo = card->ctrlinfo;
-	sprintf(card->name, "t1pci-%x", p->port);
-	card->port = p->port;
-	card->irq = p->irq;
-	card->membase = p->membase;
-	card->cardtype = avm_t1pci;
-
-	if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
-		printk(KERN_WARNING "t1pci: ports 0x%03x-0x%03x in use.\n",
-		       card->port, card->port + AVMB1_PORTLEN);
-		retval = -EBUSY;
-		goto err_free_dma;
-	}
-
-	card->mbase = ioremap(card->membase, 64);
-	if (!card->mbase) {
-		printk(KERN_NOTICE "t1pci: can't remap memory at 0x%lx\n",
-		       card->membase);
-		retval = -EIO;
-		goto err_release_region;
-	}
-
-	b1dma_reset(card);
-
-	retval = t1pci_detect(card);
-	if (retval != 0) {
-		if (retval < 6)
-			printk(KERN_NOTICE "t1pci: NO card at 0x%x (%d)\n",
-			       card->port, retval);
-		else
-			printk(KERN_NOTICE "t1pci: card at 0x%x, but cable not connected or T1 has no power (%d)\n",
-			       card->port, retval);
-		retval = -EIO;
-		goto err_unmap;
-	}
-	b1dma_reset(card);
-
-	retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
-	if (retval) {
-		printk(KERN_ERR "t1pci: unable to get IRQ %d.\n", card->irq);
-		retval = -EBUSY;
-		goto err_unmap;
-	}
-
-	cinfo->capi_ctrl.owner         = THIS_MODULE;
-	cinfo->capi_ctrl.driver_name   = "t1pci";
-	cinfo->capi_ctrl.driverdata    = cinfo;
-	cinfo->capi_ctrl.register_appl = b1dma_register_appl;
-	cinfo->capi_ctrl.release_appl  = b1dma_release_appl;
-	cinfo->capi_ctrl.send_message  = b1dma_send_message;
-	cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
-	cinfo->capi_ctrl.reset_ctr     = b1dma_reset_ctr;
-	cinfo->capi_ctrl.procinfo      = t1pci_procinfo;
-	cinfo->capi_ctrl.proc_show     = b1dma_proc_show;
-	strcpy(cinfo->capi_ctrl.name, card->name);
-
-	retval = attach_capi_ctr(&cinfo->capi_ctrl);
-	if (retval) {
-		printk(KERN_ERR "t1pci: attach controller failed.\n");
-		retval = -EBUSY;
-		goto err_free_irq;
-	}
-	card->cardnr = cinfo->capi_ctrl.cnr;
-
-	printk(KERN_INFO "t1pci: AVM T1 PCI at i/o %#x, irq %d, mem %#lx\n",
-	       card->port, card->irq, card->membase);
-
-	pci_set_drvdata(pdev, card);
-	return 0;
-
-err_free_irq:
-	free_irq(card->irq, card);
-err_unmap:
-	iounmap(card->mbase);
-err_release_region:
-	release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
-	avmcard_dma_free(card->dma);
-err_free:
-	b1_free_card(card);
-err:
-	return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static void t1pci_remove(struct pci_dev *pdev)
-{
-	avmcard *card = pci_get_drvdata(pdev);
-	avmctrl_info *cinfo = card->ctrlinfo;
-
-	b1dma_reset(card);
-
-	detach_capi_ctr(&cinfo->capi_ctrl);
-	free_irq(card->irq, card);
-	iounmap(card->mbase);
-	release_region(card->port, AVMB1_PORTLEN);
-	avmcard_dma_free(card->dma);
-	b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static char *t1pci_procinfo(struct capi_ctr *ctrl)
-{
-	avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
-	if (!cinfo)
-		return "";
-	sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx",
-		cinfo->cardname[0] ? cinfo->cardname : "-",
-		cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
-		cinfo->card ? cinfo->card->port : 0x0,
-		cinfo->card ? cinfo->card->irq : 0,
-		cinfo->card ? cinfo->card->membase : 0
-		);
-	return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
-{
-	struct capicardparams param;
-	int retval;
-
-	if (pci_enable_device(dev) < 0) {
-		printk(KERN_ERR	"t1pci: failed to enable AVM-T1-PCI\n");
-		return -ENODEV;
-	}
-	pci_set_master(dev);
-
-	param.port = pci_resource_start(dev, 1);
-	param.irq = dev->irq;
-	param.membase = pci_resource_start(dev, 0);
-
-	printk(KERN_INFO "t1pci: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
-	       param.port, param.irq, param.membase);
-
-	retval = t1pci_add_card(&param, dev);
-	if (retval != 0) {
-		printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
-		       param.port, param.irq, param.membase);
-		pci_disable_device(dev);
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static struct pci_driver t1pci_pci_driver = {
-	.name           = "t1pci",
-	.id_table       = t1pci_pci_tbl,
-	.probe          = t1pci_probe,
-	.remove         = t1pci_remove,
-};
-
-static struct capi_driver capi_driver_t1pci = {
-	.name		= "t1pci",
-	.revision	= "1.0",
-};
-
-static int __init t1pci_init(void)
-{
-	char *p;
-	char rev[32];
-	int err;
-
-	if ((p = strchr(revision, ':')) != NULL && p[1]) {
-		strlcpy(rev, p + 2, 32);
-		if ((p = strchr(rev, '$')) != NULL && p > rev)
-			*(p - 1) = 0;
-	} else
-		strcpy(rev, "1.0");
-
-	err = pci_register_driver(&t1pci_pci_driver);
-	if (!err) {
-		strlcpy(capi_driver_t1pci.revision, rev, 32);
-		register_capi_driver(&capi_driver_t1pci);
-		printk(KERN_INFO "t1pci: revision %s\n", rev);
-	}
-	return err;
-}
-
-static void __exit t1pci_exit(void)
-{
-	unregister_capi_driver(&capi_driver_t1pci);
-	pci_unregister_driver(&t1pci_pci_driver);
-}
-
-module_init(t1pci_init);
-module_exit(t1pci_exit);
diff --git a/drivers/staging/isdn/gigaset/Kconfig b/drivers/staging/isdn/gigaset/Kconfig
deleted file mode 100644
index c593105..0000000
--- a/drivers/staging/isdn/gigaset/Kconfig
+++ /dev/null
@@ -1,62 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menuconfig ISDN_DRV_GIGASET
-	tristate "Siemens Gigaset support"
-	depends on TTY
-	select CRC_CCITT
-	select BITREVERSE
-	help
-	  This driver supports the Siemens Gigaset SX205/255 family of
-	  ISDN DECT bases, including the predecessors Gigaset 3070/3075
-	  and 4170/4175 and their T-Com versions Sinus 45isdn and Sinus
-	  721X.
-	  If you have one of these devices, say M here and for at least
-	  one of the connection specific parts that follow.
-	  This will build a module called "gigaset".
-	  Note: If you build your ISDN subsystem (ISDN_CAPI or ISDN_I4L)
-	  as a module, you have to build this driver as a module too,
-	  otherwise the Gigaset device won't show up as an ISDN device.
-
-if ISDN_DRV_GIGASET
-
-config GIGASET_CAPI
-	bool "Gigaset CAPI support"
-	depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m')
-	default 'y'
-	help
-	  Build the Gigaset driver as a CAPI 2.0 driver interfacing with
-	  the Kernel CAPI subsystem. To use it with the old ISDN4Linux
-	  subsystem you'll have to enable the capidrv glue driver.
-	  (select ISDN_CAPI_CAPIDRV.)
-	  Say N to build the old native ISDN4Linux variant.
-	  If unsure, say Y.
-
-config GIGASET_BASE
-	tristate "Gigaset base station support"
-	depends on USB
-	help
-	  Say M here if you want to use the USB interface of the Gigaset
-	  base for connection to your system.
-	  This will build a module called "bas_gigaset".
-
-config GIGASET_M105
-	tristate "Gigaset M105 support"
-	depends on USB
-	help
-	  Say M here if you want to connect to the Gigaset base via DECT
-	  using a Gigaset M105 (Sinus 45 Data 2) USB DECT device.
-	  This will build a module called "usb_gigaset".
-
-config GIGASET_M101
-	tristate "Gigaset M101 support"
-	help
-	  Say M here if you want to connect to the Gigaset base via DECT
-	  using a Gigaset M101 (Sinus 45 Data 1) RS232 DECT device.
-	  This will build a module called "ser_gigaset".
-
-config GIGASET_DEBUG
-	bool "Gigaset debugging"
-	help
-	  This enables debugging code in the Gigaset drivers.
-	  If in doubt, say yes.
-
-endif # ISDN_DRV_GIGASET
diff --git a/drivers/staging/isdn/gigaset/Makefile b/drivers/staging/isdn/gigaset/Makefile
deleted file mode 100644
index 9c01089..0000000
--- a/drivers/staging/isdn/gigaset/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
-
-ifdef CONFIG_GIGASET_CAPI
-gigaset-y += capi.o
-else
-gigaset-y += dummyll.o
-endif
-
-usb_gigaset-y := usb-gigaset.o
-ser_gigaset-y := ser-gigaset.o
-bas_gigaset-y := bas-gigaset.o isocdata.o
-
-obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset.o
-obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o
-obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o
-obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o
diff --git a/drivers/staging/isdn/gigaset/asyncdata.c b/drivers/staging/isdn/gigaset/asyncdata.c
deleted file mode 100644
index a34b3c9..0000000
--- a/drivers/staging/isdn/gigaset/asyncdata.c
+++ /dev/null
@@ -1,606 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Common data handling layer for ser_gigaset and usb_gigaset
- *
- * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
- *                       Hansjoerg Lipp <hjlipp@web.de>,
- *                       Stefan Eilers.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/crc-ccitt.h>
-#include <linux/bitrev.h>
-#include <linux/export.h>
-
-/* check if byte must be stuffed/escaped
- * I'm not sure which data should be encoded.
- * Therefore I will go the hard way and encode every value
- * less than 0x20, the flag sequence and the control escape char.
- */
-static inline int muststuff(unsigned char c)
-{
-	if (c < PPP_TRANS) return 1;
-	if (c == PPP_FLAG) return 1;
-	if (c == PPP_ESCAPE) return 1;
-	/* other possible candidates: */
-	/* 0x91: XON with parity set */
-	/* 0x93: XOFF with parity set */
-	return 0;
-}
-
-/* == data input =========================================================== */
-
-/* process a block of received bytes in command mode
- * (mstate != MS_LOCKED && (inputstate & INS_command))
- * Append received bytes to the command response buffer and forward them
- * line by line to the response handler. Exit whenever a mode/state change
- * might have occurred.
- * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
- * removed before passing the line to the response handler.
- * Return value:
- *	number of processed bytes
- */
-static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
-	unsigned char *src = inbuf->data + inbuf->head;
-	struct cardstate *cs = inbuf->cs;
-	unsigned cbytes = cs->cbytes;
-	unsigned procbytes = 0;
-	unsigned char c;
-
-	while (procbytes < numbytes) {
-		c = *src++;
-		procbytes++;
-
-		switch (c) {
-		case '\n':
-			if (cbytes == 0 && cs->respdata[0] == '\r') {
-				/* collapse LF with preceding CR */
-				cs->respdata[0] = 0;
-				break;
-			}
-			/* fall through */
-		case '\r':
-			/* end of message line, pass to response handler */
-			if (cbytes >= MAX_RESP_SIZE) {
-				dev_warn(cs->dev, "response too large (%d)\n",
-					 cbytes);
-				cbytes = MAX_RESP_SIZE;
-			}
-			cs->cbytes = cbytes;
-			gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
-					   cbytes, cs->respdata);
-			gigaset_handle_modem_response(cs);
-			cbytes = 0;
-
-			/* store EOL byte for CRLF collapsing */
-			cs->respdata[0] = c;
-
-			/* cs->dle may have changed */
-			if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
-				inbuf->inputstate &= ~INS_command;
-
-			/* return for reevaluating state */
-			goto exit;
-
-		case DLE_FLAG:
-			if (inbuf->inputstate & INS_DLE_char) {
-				/* quoted DLE: clear quote flag */
-				inbuf->inputstate &= ~INS_DLE_char;
-			} else if (cs->dle ||
-				   (inbuf->inputstate & INS_DLE_command)) {
-				/* DLE escape, pass up for handling */
-				inbuf->inputstate |= INS_DLE_char;
-				goto exit;
-			}
-			/* quoted or not in DLE mode: treat as regular data */
-			/* fall through */
-		default:
-			/* append to line buffer if possible */
-			if (cbytes < MAX_RESP_SIZE)
-				cs->respdata[cbytes] = c;
-			cbytes++;
-		}
-	}
-exit:
-	cs->cbytes = cbytes;
-	return procbytes;
-}
-
-/* process a block of received bytes in lock mode
- * All received bytes are passed unmodified to the tty i/f.
- * Return value:
- *	number of processed bytes
- */
-static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
-	unsigned char *src = inbuf->data + inbuf->head;
-
-	gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
-	gigaset_if_receive(inbuf->cs, src, numbytes);
-	return numbytes;
-}
-
-/* process a block of received bytes in HDLC data mode
- * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
- * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
- * When a frame is complete, check the FCS and pass valid frames to the LL.
- * If DLE is encountered, return immediately to let the caller handle it.
- * Return value:
- *	number of processed bytes
- */
-static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
-	struct cardstate *cs = inbuf->cs;
-	struct bc_state *bcs = cs->bcs;
-	int inputstate = bcs->inputstate;
-	__u16 fcs = bcs->rx_fcs;
-	struct sk_buff *skb = bcs->rx_skb;
-	unsigned char *src = inbuf->data + inbuf->head;
-	unsigned procbytes = 0;
-	unsigned char c;
-
-	if (inputstate & INS_byte_stuff) {
-		if (!numbytes)
-			return 0;
-		inputstate &= ~INS_byte_stuff;
-		goto byte_stuff;
-	}
-
-	while (procbytes < numbytes) {
-		c = *src++;
-		procbytes++;
-		if (c == DLE_FLAG) {
-			if (inputstate & INS_DLE_char) {
-				/* quoted DLE: clear quote flag */
-				inputstate &= ~INS_DLE_char;
-			} else if (cs->dle || (inputstate & INS_DLE_command)) {
-				/* DLE escape, pass up for handling */
-				inputstate |= INS_DLE_char;
-				break;
-			}
-		}
-
-		if (c == PPP_ESCAPE) {
-			/* byte stuffing indicator: pull in next byte */
-			if (procbytes >= numbytes) {
-				/* end of buffer, save for later processing */
-				inputstate |= INS_byte_stuff;
-				break;
-			}
-byte_stuff:
-			c = *src++;
-			procbytes++;
-			if (c == DLE_FLAG) {
-				if (inputstate & INS_DLE_char) {
-					/* quoted DLE: clear quote flag */
-					inputstate &= ~INS_DLE_char;
-				} else if (cs->dle ||
-					   (inputstate & INS_DLE_command)) {
-					/* DLE escape, pass up for handling */
-					inputstate |=
-						INS_DLE_char | INS_byte_stuff;
-					break;
-				}
-			}
-			c ^= PPP_TRANS;
-#ifdef CONFIG_GIGASET_DEBUG
-			if (!muststuff(c))
-				gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
-#endif
-		} else if (c == PPP_FLAG) {
-			/* end of frame: process content if any */
-			if (inputstate & INS_have_data) {
-				gig_dbg(DEBUG_HDLC,
-					"7e----------------------------");
-
-				/* check and pass received frame */
-				if (!skb) {
-					/* skipped frame */
-					gigaset_isdn_rcv_err(bcs);
-				} else if (skb->len < 2) {
-					/* frame too short for FCS */
-					dev_warn(cs->dev,
-						 "short frame (%d)\n",
-						 skb->len);
-					gigaset_isdn_rcv_err(bcs);
-					dev_kfree_skb_any(skb);
-				} else if (fcs != PPP_GOODFCS) {
-					/* frame check error */
-					dev_err(cs->dev,
-						"Checksum failed, %u bytes corrupted!\n",
-						skb->len);
-					gigaset_isdn_rcv_err(bcs);
-					dev_kfree_skb_any(skb);
-				} else {
-					/* good frame */
-					__skb_trim(skb, skb->len - 2);
-					gigaset_skb_rcvd(bcs, skb);
-				}
-
-				/* prepare reception of next frame */
-				inputstate &= ~INS_have_data;
-				skb = gigaset_new_rx_skb(bcs);
-			} else {
-				/* empty frame (7E 7E) */
-#ifdef CONFIG_GIGASET_DEBUG
-				++bcs->emptycount;
-#endif
-				if (!skb) {
-					/* skipped (?) */
-					gigaset_isdn_rcv_err(bcs);
-					skb = gigaset_new_rx_skb(bcs);
-				}
-			}
-
-			fcs = PPP_INITFCS;
-			continue;
-#ifdef CONFIG_GIGASET_DEBUG
-		} else if (muststuff(c)) {
-			/* Should not happen. Possible after ZDLE=1<CR><LF>. */
-			gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
-#endif
-		}
-
-		/* regular data byte, append to skb */
-#ifdef CONFIG_GIGASET_DEBUG
-		if (!(inputstate & INS_have_data)) {
-			gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
-				bcs->emptycount);
-			bcs->emptycount = 0;
-		}
-#endif
-		inputstate |= INS_have_data;
-		if (skb) {
-			if (skb->len >= bcs->rx_bufsize) {
-				dev_warn(cs->dev, "received packet too long\n");
-				dev_kfree_skb_any(skb);
-				/* skip remainder of packet */
-				bcs->rx_skb = skb = NULL;
-			} else {
-				__skb_put_u8(skb, c);
-				fcs = crc_ccitt_byte(fcs, c);
-			}
-		}
-	}
-
-	bcs->inputstate = inputstate;
-	bcs->rx_fcs = fcs;
-	return procbytes;
-}
-
-/* process a block of received bytes in transparent data mode
- * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
- * Invert bytes, undoing byte stuffing and watching for DLE escapes.
- * If DLE is encountered, return immediately to let the caller handle it.
- * Return value:
- *	number of processed bytes
- */
-static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
-	struct cardstate *cs = inbuf->cs;
-	struct bc_state *bcs = cs->bcs;
-	int inputstate = bcs->inputstate;
-	struct sk_buff *skb = bcs->rx_skb;
-	unsigned char *src = inbuf->data + inbuf->head;
-	unsigned procbytes = 0;
-	unsigned char c;
-
-	if (!skb) {
-		/* skip this block */
-		gigaset_new_rx_skb(bcs);
-		return numbytes;
-	}
-
-	while (procbytes < numbytes && skb->len < bcs->rx_bufsize) {
-		c = *src++;
-		procbytes++;
-
-		if (c == DLE_FLAG) {
-			if (inputstate & INS_DLE_char) {
-				/* quoted DLE: clear quote flag */
-				inputstate &= ~INS_DLE_char;
-			} else if (cs->dle || (inputstate & INS_DLE_command)) {
-				/* DLE escape, pass up for handling */
-				inputstate |= INS_DLE_char;
-				break;
-			}
-		}
-
-		/* regular data byte: append to current skb */
-		inputstate |= INS_have_data;
-		__skb_put_u8(skb, bitrev8(c));
-	}
-
-	/* pass data up */
-	if (inputstate & INS_have_data) {
-		gigaset_skb_rcvd(bcs, skb);
-		inputstate &= ~INS_have_data;
-		gigaset_new_rx_skb(bcs);
-	}
-
-	bcs->inputstate = inputstate;
-	return procbytes;
-}
-
-/* process DLE escapes
- * Called whenever a DLE sequence might be encountered in the input stream.
- * Either processes the entire DLE sequence or, if that isn't possible,
- * notes the fact that an initial DLE has been received in the INS_DLE_char
- * inputstate flag and resumes processing of the sequence on the next call.
- */
-static void handle_dle(struct inbuf_t *inbuf)
-{
-	struct cardstate *cs = inbuf->cs;
-
-	if (cs->mstate == MS_LOCKED)
-		return;		/* no DLE processing in lock mode */
-
-	if (!(inbuf->inputstate & INS_DLE_char)) {
-		/* no DLE pending */
-		if (inbuf->data[inbuf->head] == DLE_FLAG &&
-		    (cs->dle || inbuf->inputstate & INS_DLE_command)) {
-			/* start of DLE sequence */
-			inbuf->head++;
-			if (inbuf->head == inbuf->tail ||
-			    inbuf->head == RBUFSIZE) {
-				/* end of buffer, save for later processing */
-				inbuf->inputstate |= INS_DLE_char;
-				return;
-			}
-		} else {
-			/* regular data byte */
-			return;
-		}
-	}
-
-	/* consume pending DLE */
-	inbuf->inputstate &= ~INS_DLE_char;
-
-	switch (inbuf->data[inbuf->head]) {
-	case 'X':	/* begin of event message */
-		if (inbuf->inputstate & INS_command)
-			dev_notice(cs->dev,
-				   "received <DLE>X in command mode\n");
-		inbuf->inputstate |= INS_command | INS_DLE_command;
-		inbuf->head++;	/* byte consumed */
-		break;
-	case '.':	/* end of event message */
-		if (!(inbuf->inputstate & INS_DLE_command))
-			dev_notice(cs->dev,
-				   "received <DLE>. without <DLE>X\n");
-		inbuf->inputstate &= ~INS_DLE_command;
-		/* return to data mode if in DLE mode */
-		if (cs->dle)
-			inbuf->inputstate &= ~INS_command;
-		inbuf->head++;	/* byte consumed */
-		break;
-	case DLE_FLAG:	/* DLE in data stream */
-		/* mark as quoted */
-		inbuf->inputstate |= INS_DLE_char;
-		if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
-			dev_notice(cs->dev,
-				   "received <DLE><DLE> not in DLE mode\n");
-		break;	/* quoted byte left in buffer */
-	default:
-		dev_notice(cs->dev, "received <DLE><%02x>\n",
-			   inbuf->data[inbuf->head]);
-		/* quoted byte left in buffer */
-	}
-}
-
-/**
- * gigaset_m10x_input() - process a block of data received from the device
- * @inbuf:	received data and device descriptor structure.
- *
- * Called by hardware module {ser,usb}_gigaset with a block of received
- * bytes. Separates the bytes received over the serial data channel into
- * user data and command replies (locked/unlocked) according to the
- * current state of the interface.
- */
-void gigaset_m10x_input(struct inbuf_t *inbuf)
-{
-	struct cardstate *cs = inbuf->cs;
-	unsigned numbytes, procbytes;
-
-	gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
-
-	while (inbuf->head != inbuf->tail) {
-		/* check for DLE escape */
-		handle_dle(inbuf);
-
-		/* process a contiguous block of bytes */
-		numbytes = (inbuf->head > inbuf->tail ?
-			    RBUFSIZE : inbuf->tail) - inbuf->head;
-		gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
-		/*
-		 * numbytes may be 0 if handle_dle() ate the last byte.
-		 * This does no harm, *_loop() will just return 0 immediately.
-		 */
-
-		if (cs->mstate == MS_LOCKED)
-			procbytes = lock_loop(numbytes, inbuf);
-		else if (inbuf->inputstate & INS_command)
-			procbytes = cmd_loop(numbytes, inbuf);
-		else if (cs->bcs->proto2 == L2_HDLC)
-			procbytes = hdlc_loop(numbytes, inbuf);
-		else
-			procbytes = iraw_loop(numbytes, inbuf);
-		inbuf->head += procbytes;
-
-		/* check for buffer wraparound */
-		if (inbuf->head >= RBUFSIZE)
-			inbuf->head = 0;
-
-		gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
-	}
-}
-EXPORT_SYMBOL_GPL(gigaset_m10x_input);
-
-
-/* == data output ========================================================== */
-
-/*
- * Encode a data packet into an octet stuffed HDLC frame with FCS,
- * opening and closing flags, preserving headroom data.
- * parameters:
- *	skb		skb containing original packet (freed upon return)
- * Return value:
- *	pointer to newly allocated skb containing the result frame
- *	and the original link layer header, NULL on error
- */
-static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
-{
-	struct sk_buff *hdlc_skb;
-	__u16 fcs;
-	unsigned char c;
-	unsigned char *cp;
-	int len;
-	unsigned int stuf_cnt;
-
-	stuf_cnt = 0;
-	fcs = PPP_INITFCS;
-	cp = skb->data;
-	len = skb->len;
-	while (len--) {
-		if (muststuff(*cp))
-			stuf_cnt++;
-		fcs = crc_ccitt_byte(fcs, *cp++);
-	}
-	fcs ^= 0xffff;			/* complement */
-
-	/* size of new buffer: original size + number of stuffing bytes
-	 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
-	 * + room for link layer header
-	 */
-	hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
-	if (!hdlc_skb) {
-		dev_kfree_skb_any(skb);
-		return NULL;
-	}
-
-	/* Copy link layer header into new skb */
-	skb_reset_mac_header(hdlc_skb);
-	skb_reserve(hdlc_skb, skb->mac_len);
-	memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
-	hdlc_skb->mac_len = skb->mac_len;
-
-	/* Add flag sequence in front of everything.. */
-	skb_put_u8(hdlc_skb, PPP_FLAG);
-
-	/* Perform byte stuffing while copying data. */
-	while (skb->len--) {
-		if (muststuff(*skb->data)) {
-			skb_put_u8(hdlc_skb, PPP_ESCAPE);
-			skb_put_u8(hdlc_skb, (*skb->data++) ^ PPP_TRANS);
-		} else
-			skb_put_u8(hdlc_skb, *skb->data++);
-	}
-
-	/* Finally add FCS (byte stuffed) and flag sequence */
-	c = (fcs & 0x00ff);	/* least significant byte first */
-	if (muststuff(c)) {
-		skb_put_u8(hdlc_skb, PPP_ESCAPE);
-		c ^= PPP_TRANS;
-	}
-	skb_put_u8(hdlc_skb, c);
-
-	c = ((fcs >> 8) & 0x00ff);
-	if (muststuff(c)) {
-		skb_put_u8(hdlc_skb, PPP_ESCAPE);
-		c ^= PPP_TRANS;
-	}
-	skb_put_u8(hdlc_skb, c);
-
-	skb_put_u8(hdlc_skb, PPP_FLAG);
-
-	dev_kfree_skb_any(skb);
-	return hdlc_skb;
-}
-
-/*
- * Encode a data packet into an octet stuffed raw bit inverted frame,
- * preserving headroom data.
- * parameters:
- *	skb		skb containing original packet (freed upon return)
- * Return value:
- *	pointer to newly allocated skb containing the result frame
- *	and the original link layer header, NULL on error
- */
-static struct sk_buff *iraw_encode(struct sk_buff *skb)
-{
-	struct sk_buff *iraw_skb;
-	unsigned char c;
-	unsigned char *cp;
-	int len;
-
-	/* size of new buffer (worst case = every byte must be stuffed):
-	 * 2 * original size + room for link layer header
-	 */
-	iraw_skb = dev_alloc_skb(2 * skb->len + skb->mac_len);
-	if (!iraw_skb) {
-		dev_kfree_skb_any(skb);
-		return NULL;
-	}
-
-	/* copy link layer header into new skb */
-	skb_reset_mac_header(iraw_skb);
-	skb_reserve(iraw_skb, skb->mac_len);
-	memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
-	iraw_skb->mac_len = skb->mac_len;
-
-	/* copy and stuff data */
-	cp = skb->data;
-	len = skb->len;
-	while (len--) {
-		c = bitrev8(*cp++);
-		if (c == DLE_FLAG)
-			skb_put_u8(iraw_skb, c);
-		skb_put_u8(iraw_skb, c);
-	}
-	dev_kfree_skb_any(skb);
-	return iraw_skb;
-}
-
-/**
- * gigaset_m10x_send_skb() - queue an skb for sending
- * @bcs:	B channel descriptor structure.
- * @skb:	data to send.
- *
- * Called by LL to encode and queue an skb for sending, and start
- * transmission if necessary.
- * Once the payload data has been transmitted completely, gigaset_skb_sent()
- * will be called with the skb's link layer header preserved.
- *
- * Return value:
- *	number of bytes accepted for sending (skb->len) if ok,
- *	error code < 0 (eg. -ENOMEM) on error
- */
-int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
-{
-	struct cardstate *cs = bcs->cs;
-	unsigned len = skb->len;
-	unsigned long flags;
-
-	if (bcs->proto2 == L2_HDLC)
-		skb = HDLC_Encode(skb);
-	else
-		skb = iraw_encode(skb);
-	if (!skb) {
-		dev_err(cs->dev,
-			"unable to allocate memory for encoding!\n");
-		return -ENOMEM;
-	}
-
-	skb_queue_tail(&bcs->squeue, skb);
-	spin_lock_irqsave(&cs->lock, flags);
-	if (cs->connected)
-		tasklet_schedule(&cs->write_tasklet);
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	return len;	/* ok so far */
-}
-EXPORT_SYMBOL_GPL(gigaset_m10x_send_skb);
diff --git a/drivers/staging/isdn/gigaset/bas-gigaset.c b/drivers/staging/isdn/gigaset/bas-gigaset.c
deleted file mode 100644
index c334525..0000000
--- a/drivers/staging/isdn/gigaset/bas-gigaset.c
+++ /dev/null
@@ -1,2672 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * USB driver for Gigaset 307x base via direct USB connection.
- *
- * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>,
- *                       Tilman Schmidt <tilman@imap.cc>,
- *                       Stefan Eilers.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers"
-#define DRIVER_DESC "USB Driver for Gigaset 307x"
-
-
-/* Module parameters */
-
-static int startmode = SM_ISDN;
-static int cidmode = 1;
-
-module_param(startmode, int, S_IRUGO);
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
-MODULE_PARM_DESC(cidmode, "Call-ID mode");
-
-#define GIGASET_MINORS     1
-#define GIGASET_MINOR      16
-#define GIGASET_MODULENAME "bas_gigaset"
-#define GIGASET_DEVNAME    "ttyGB"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-/* interrupt pipe message size according to ibid. ch. 2.2 */
-#define IP_MSGSIZE 3
-
-/* Values for the Gigaset 307x */
-#define USB_GIGA_VENDOR_ID      0x0681
-#define USB_3070_PRODUCT_ID     0x0001
-#define USB_3075_PRODUCT_ID     0x0002
-#define USB_SX303_PRODUCT_ID    0x0021
-#define USB_SX353_PRODUCT_ID    0x0022
-
-/* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table[] = {
-	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
-	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
-	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
-	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) },
-	{ } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, gigaset_table);
-
-/*======================= local function prototypes ==========================*/
-
-/* function called if a new device belonging to this driver is connected */
-static int gigaset_probe(struct usb_interface *interface,
-			 const struct usb_device_id *id);
-
-/* Function will be called if the device is unplugged */
-static void gigaset_disconnect(struct usb_interface *interface);
-
-/* functions called before/after suspend */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
-static int gigaset_resume(struct usb_interface *intf);
-
-/* functions called before/after device reset */
-static int gigaset_pre_reset(struct usb_interface *intf);
-static int gigaset_post_reset(struct usb_interface *intf);
-
-static int atread_submit(struct cardstate *, int);
-static void stopurbs(struct bas_bc_state *);
-static int req_submit(struct bc_state *, int, int, int);
-static int atwrite_submit(struct cardstate *, unsigned char *, int);
-static int start_cbsend(struct cardstate *);
-
-/*============================================================================*/
-
-struct bas_cardstate {
-	struct usb_device	*udev;		/* USB device pointer */
-	struct cardstate	*cs;
-	struct usb_interface	*interface;	/* interface for this device */
-	unsigned char		minor;		/* starting minor number */
-
-	struct urb		*urb_ctrl;	/* control pipe default URB */
-	struct usb_ctrlrequest	dr_ctrl;
-	struct timer_list	timer_ctrl;	/* control request timeout */
-	int			retry_ctrl;
-
-	struct timer_list	timer_atrdy;	/* AT command ready timeout */
-	struct urb		*urb_cmd_out;	/* for sending AT commands */
-	struct usb_ctrlrequest	dr_cmd_out;
-	int			retry_cmd_out;
-
-	struct urb		*urb_cmd_in;	/* for receiving AT replies */
-	struct usb_ctrlrequest	dr_cmd_in;
-	struct timer_list	timer_cmd_in;	/* receive request timeout */
-	unsigned char		*rcvbuf;	/* AT reply receive buffer */
-
-	struct urb		*urb_int_in;	/* URB for interrupt pipe */
-	unsigned char		*int_in_buf;
-	struct work_struct	int_in_wq;	/* for usb_clear_halt() */
-	struct timer_list	timer_int_in;	/* int read retry delay */
-	int			retry_int_in;
-
-	spinlock_t		lock;		/* locks all following */
-	int			basstate;	/* bitmap (BS_*) */
-	int			pending;	/* uncompleted base request */
-	wait_queue_head_t	waitqueue;
-	int			rcvbuf_size;	/* size of AT receive buffer */
-						/* 0: no receive in progress */
-	int			retry_cmd_in;	/* receive req retry count */
-};
-
-/* status of direct USB connection to 307x base (bits in basstate) */
-#define BS_ATOPEN	0x001	/* AT channel open */
-#define BS_B1OPEN	0x002	/* B channel 1 open */
-#define BS_B2OPEN	0x004	/* B channel 2 open */
-#define BS_ATREADY	0x008	/* base ready for AT command */
-#define BS_INIT		0x010	/* base has signalled INIT_OK */
-#define BS_ATTIMER	0x020	/* waiting for HD_READY_SEND_ATDATA */
-#define BS_ATRDPEND	0x040	/* urb_cmd_in in use */
-#define BS_ATWRPEND	0x080	/* urb_cmd_out in use */
-#define BS_SUSPEND	0x100	/* USB port suspended */
-#define BS_RESETTING	0x200	/* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
-
-
-static struct gigaset_driver *driver;
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver gigaset_usb_driver = {
-	.name =         GIGASET_MODULENAME,
-	.probe =        gigaset_probe,
-	.disconnect =   gigaset_disconnect,
-	.id_table =     gigaset_table,
-	.suspend =	gigaset_suspend,
-	.resume =	gigaset_resume,
-	.reset_resume =	gigaset_post_reset,
-	.pre_reset =	gigaset_pre_reset,
-	.post_reset =	gigaset_post_reset,
-	.disable_hub_initiated_lpm = 1,
-};
-
-/* get message text for usb_submit_urb return code
- */
-static char *get_usb_rcmsg(int rc)
-{
-	static char unkmsg[28];
-
-	switch (rc) {
-	case 0:
-		return "success";
-	case -ENOMEM:
-		return "out of memory";
-	case -ENODEV:
-		return "device not present";
-	case -ENOENT:
-		return "endpoint not present";
-	case -ENXIO:
-		return "URB type not supported";
-	case -EINVAL:
-		return "invalid argument";
-	case -EAGAIN:
-		return "start frame too early or too much scheduled";
-	case -EFBIG:
-		return "too many isoc frames requested";
-	case -EPIPE:
-		return "endpoint stalled";
-	case -EMSGSIZE:
-		return "invalid packet size";
-	case -ENOSPC:
-		return "would overcommit USB bandwidth";
-	case -ESHUTDOWN:
-		return "device shut down";
-	case -EPERM:
-		return "reject flag set";
-	case -EHOSTUNREACH:
-		return "device suspended";
-	default:
-		snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", rc);
-		return unkmsg;
-	}
-}
-
-/* get message text for USB status code
- */
-static char *get_usb_statmsg(int status)
-{
-	static char unkmsg[28];
-
-	switch (status) {
-	case 0:
-		return "success";
-	case -ENOENT:
-		return "unlinked (sync)";
-	case -EINPROGRESS:
-		return "URB still pending";
-	case -EPROTO:
-		return "bitstuff error, timeout, or unknown USB error";
-	case -EILSEQ:
-		return "CRC mismatch, timeout, or unknown USB error";
-	case -ETIME:
-		return "USB response timeout";
-	case -EPIPE:
-		return "endpoint stalled";
-	case -ECOMM:
-		return "IN buffer overrun";
-	case -ENOSR:
-		return "OUT buffer underrun";
-	case -EOVERFLOW:
-		return "endpoint babble";
-	case -EREMOTEIO:
-		return "short packet";
-	case -ENODEV:
-		return "device removed";
-	case -EXDEV:
-		return "partial isoc transfer";
-	case -EINVAL:
-		return "ISO madness";
-	case -ECONNRESET:
-		return "unlinked (async)";
-	case -ESHUTDOWN:
-		return "device shut down";
-	default:
-		snprintf(unkmsg, sizeof(unkmsg), "unknown status %d", status);
-		return unkmsg;
-	}
-}
-
-/* usb_pipetype_str
- * retrieve string representation of USB pipe type
- */
-static inline char *usb_pipetype_str(int pipe)
-{
-	if (usb_pipeisoc(pipe))
-		return "Isoc";
-	if (usb_pipeint(pipe))
-		return "Int";
-	if (usb_pipecontrol(pipe))
-		return "Ctrl";
-	if (usb_pipebulk(pipe))
-		return "Bulk";
-	return "?";
-}
-
-/* dump_urb
- * write content of URB to syslog for debugging
- */
-static inline void dump_urb(enum debuglevel level, const char *tag,
-			    struct urb *urb)
-{
-#ifdef CONFIG_GIGASET_DEBUG
-	int i;
-	gig_dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb);
-	if (urb) {
-		gig_dbg(level,
-			"  dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
-			"hcpriv=0x%08lx, transfer_flags=0x%x,",
-			(unsigned long) urb->dev,
-			usb_pipetype_str(urb->pipe),
-			usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
-			usb_pipein(urb->pipe) ? "in" : "out",
-			(unsigned long) urb->hcpriv,
-			urb->transfer_flags);
-		gig_dbg(level,
-			"  transfer_buffer=0x%08lx[%d], actual_length=%d, "
-			"setup_packet=0x%08lx,",
-			(unsigned long) urb->transfer_buffer,
-			urb->transfer_buffer_length, urb->actual_length,
-			(unsigned long) urb->setup_packet);
-		gig_dbg(level,
-			"  start_frame=%d, number_of_packets=%d, interval=%d, "
-			"error_count=%d,",
-			urb->start_frame, urb->number_of_packets, urb->interval,
-			urb->error_count);
-		gig_dbg(level,
-			"  context=0x%08lx, complete=0x%08lx, "
-			"iso_frame_desc[]={",
-			(unsigned long) urb->context,
-			(unsigned long) urb->complete);
-		for (i = 0; i < urb->number_of_packets; i++) {
-			struct usb_iso_packet_descriptor *pifd
-				= &urb->iso_frame_desc[i];
-			gig_dbg(level,
-				"    {offset=%u, length=%u, actual_length=%u, "
-				"status=%u}",
-				pifd->offset, pifd->length, pifd->actual_length,
-				pifd->status);
-		}
-	}
-	gig_dbg(level, "}}");
-#endif
-}
-
-/* read/set modem control bits etc. (m10x only) */
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
-				  unsigned new_state)
-{
-	return -EINVAL;
-}
-
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
-	return -EINVAL;
-}
-
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
-	return -EINVAL;
-}
-
-/* set/clear bits in base connection state, return previous state
- */
-static inline int update_basstate(struct bas_cardstate *ucs,
-				  int set, int clear)
-{
-	unsigned long flags;
-	int state;
-
-	spin_lock_irqsave(&ucs->lock, flags);
-	state = ucs->basstate;
-	ucs->basstate = (state & ~clear) | set;
-	spin_unlock_irqrestore(&ucs->lock, flags);
-	return state;
-}
-
-/* error_hangup
- * hang up any existing connection because of an unrecoverable error
- * This function may be called from any context and takes care of scheduling
- * the necessary actions for execution outside of interrupt context.
- * cs->lock must not be held.
- * argument:
- *	B channel control structure
- */
-static inline void error_hangup(struct bc_state *bcs)
-{
-	struct cardstate *cs = bcs->cs;
-
-	gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL);
-	gigaset_schedule_event(cs);
-}
-
-/* error_reset
- * reset Gigaset device because of an unrecoverable error
- * This function may be called from any context, and takes care of
- * scheduling the necessary actions for execution outside of interrupt context.
- * cs->hw.bas->lock must not be held.
- * argument:
- *	controller state structure
- */
-static inline void error_reset(struct cardstate *cs)
-{
-	/* reset interrupt pipe to recover (ignore errors) */
-	update_basstate(cs->hw.bas, BS_RESETTING, 0);
-	if (req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT))
-		/* submission failed, escalate to USB port reset */
-		usb_queue_reset_device(cs->hw.bas->interface);
-}
-
-/* check_pending
- * check for completion of pending control request
- * parameter:
- *	ucs	hardware specific controller state structure
- */
-static void check_pending(struct bas_cardstate *ucs)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ucs->lock, flags);
-	switch (ucs->pending) {
-	case 0:
-		break;
-	case HD_OPEN_ATCHANNEL:
-		if (ucs->basstate & BS_ATOPEN)
-			ucs->pending = 0;
-		break;
-	case HD_OPEN_B1CHANNEL:
-		if (ucs->basstate & BS_B1OPEN)
-			ucs->pending = 0;
-		break;
-	case HD_OPEN_B2CHANNEL:
-		if (ucs->basstate & BS_B2OPEN)
-			ucs->pending = 0;
-		break;
-	case HD_CLOSE_ATCHANNEL:
-		if (!(ucs->basstate & BS_ATOPEN))
-			ucs->pending = 0;
-		break;
-	case HD_CLOSE_B1CHANNEL:
-		if (!(ucs->basstate & BS_B1OPEN))
-			ucs->pending = 0;
-		break;
-	case HD_CLOSE_B2CHANNEL:
-		if (!(ucs->basstate & BS_B2OPEN))
-			ucs->pending = 0;
-		break;
-	case HD_DEVICE_INIT_ACK:		/* no reply expected */
-		ucs->pending = 0;
-		break;
-	case HD_RESET_INTERRUPT_PIPE:
-		if (!(ucs->basstate & BS_RESETTING))
-			ucs->pending = 0;
-		break;
-	/*
-	 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
-	 * and should never end up here
-	 */
-	default:
-		dev_warn(&ucs->interface->dev,
-			 "unknown pending request 0x%02x cleared\n",
-			 ucs->pending);
-		ucs->pending = 0;
-	}
-
-	if (!ucs->pending)
-		del_timer(&ucs->timer_ctrl);
-
-	spin_unlock_irqrestore(&ucs->lock, flags);
-}
-
-/* cmd_in_timeout
- * timeout routine for command input request
- * argument:
- *	controller state structure
- */
-static void cmd_in_timeout(struct timer_list *t)
-{
-	struct bas_cardstate *ucs = from_timer(ucs, t, timer_cmd_in);
-	struct cardstate *cs = ucs->cs;
-	int rc;
-
-	if (!ucs->rcvbuf_size) {
-		gig_dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
-		return;
-	}
-
-	if (ucs->retry_cmd_in++ >= BAS_RETRY) {
-		dev_err(cs->dev,
-			"control read: timeout, giving up after %d tries\n",
-			ucs->retry_cmd_in);
-		kfree(ucs->rcvbuf);
-		ucs->rcvbuf = NULL;
-		ucs->rcvbuf_size = 0;
-		error_reset(cs);
-		return;
-	}
-
-	gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d",
-		__func__, ucs->retry_cmd_in);
-	rc = atread_submit(cs, BAS_TIMEOUT);
-	if (rc < 0) {
-		kfree(ucs->rcvbuf);
-		ucs->rcvbuf = NULL;
-		ucs->rcvbuf_size = 0;
-		if (rc != -ENODEV)
-			error_reset(cs);
-	}
-}
-
-/* read_ctrl_callback
- * USB completion handler for control pipe input
- * called by the USB subsystem in interrupt context
- * parameter:
- *	urb	USB request block
- *		urb->context = inbuf structure for controller state
- */
-static void read_ctrl_callback(struct urb *urb)
-{
-	struct inbuf_t *inbuf = urb->context;
-	struct cardstate *cs = inbuf->cs;
-	struct bas_cardstate *ucs = cs->hw.bas;
-	int status = urb->status;
-	unsigned numbytes;
-	int rc;
-
-	update_basstate(ucs, 0, BS_ATRDPEND);
-	wake_up(&ucs->waitqueue);
-	del_timer(&ucs->timer_cmd_in);
-
-	switch (status) {
-	case 0:				/* normal completion */
-		numbytes = urb->actual_length;
-		if (unlikely(numbytes != ucs->rcvbuf_size)) {
-			dev_warn(cs->dev,
-				 "control read: received %d chars, expected %d\n",
-				 numbytes, ucs->rcvbuf_size);
-			if (numbytes > ucs->rcvbuf_size)
-				numbytes = ucs->rcvbuf_size;
-		}
-
-		/* copy received bytes to inbuf, notify event layer */
-		if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) {
-			gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
-			gigaset_schedule_event(cs);
-		}
-		break;
-
-	case -ENOENT:			/* cancelled */
-	case -ECONNRESET:		/* cancelled (async) */
-	case -EINPROGRESS:		/* pending */
-	case -ENODEV:			/* device removed */
-	case -ESHUTDOWN:		/* device shut down */
-		/* no further action necessary */
-		gig_dbg(DEBUG_USBREQ, "%s: %s",
-			__func__, get_usb_statmsg(status));
-		break;
-
-	default:			/* other errors: retry */
-		if (ucs->retry_cmd_in++ < BAS_RETRY) {
-			gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__,
-				get_usb_statmsg(status), ucs->retry_cmd_in);
-			rc = atread_submit(cs, BAS_TIMEOUT);
-			if (rc >= 0)
-				/* successfully resubmitted, skip freeing */
-				return;
-			if (rc == -ENODEV)
-				/* disconnect, no further action necessary */
-				break;
-		}
-		dev_err(cs->dev, "control read: %s, giving up after %d tries\n",
-			get_usb_statmsg(status), ucs->retry_cmd_in);
-		error_reset(cs);
-	}
-
-	/* read finished, free buffer */
-	kfree(ucs->rcvbuf);
-	ucs->rcvbuf = NULL;
-	ucs->rcvbuf_size = 0;
-}
-
-/* atread_submit
- * submit an HD_READ_ATMESSAGE command URB and optionally start a timeout
- * parameters:
- *	cs	controller state structure
- *	timeout	timeout in 1/10 sec., 0: none
- * return value:
- *	0 on success
- *	-EBUSY if another request is pending
- *	any URB submission error code
- */
-static int atread_submit(struct cardstate *cs, int timeout)
-{
-	struct bas_cardstate *ucs = cs->hw.bas;
-	int basstate;
-	int ret;
-
-	gig_dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)",
-		ucs->rcvbuf_size);
-
-	basstate = update_basstate(ucs, BS_ATRDPEND, 0);
-	if (basstate & BS_ATRDPEND) {
-		dev_err(cs->dev,
-			"could not submit HD_READ_ATMESSAGE: URB busy\n");
-		return -EBUSY;
-	}
-
-	if (basstate & BS_SUSPEND) {
-		dev_notice(cs->dev,
-			   "HD_READ_ATMESSAGE not submitted, "
-			   "suspend in progress\n");
-		update_basstate(ucs, 0, BS_ATRDPEND);
-		/* treat like disconnect */
-		return -ENODEV;
-	}
-
-	ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
-	ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
-	ucs->dr_cmd_in.wValue = 0;
-	ucs->dr_cmd_in.wIndex = 0;
-	ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
-	usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
-			     usb_rcvctrlpipe(ucs->udev, 0),
-			     (unsigned char *) &ucs->dr_cmd_in,
-			     ucs->rcvbuf, ucs->rcvbuf_size,
-			     read_ctrl_callback, cs->inbuf);
-
-	ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
-	if (ret != 0) {
-		update_basstate(ucs, 0, BS_ATRDPEND);
-		dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
-			get_usb_rcmsg(ret));
-		return ret;
-	}
-
-	if (timeout > 0) {
-		gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
-		mod_timer(&ucs->timer_cmd_in, jiffies + timeout * HZ / 10);
-	}
-	return 0;
-}
-
-/* int_in_work
- * workqueue routine to clear halt on interrupt in endpoint
- */
-
-static void int_in_work(struct work_struct *work)
-{
-	struct bas_cardstate *ucs =
-		container_of(work, struct bas_cardstate, int_in_wq);
-	struct urb *urb = ucs->urb_int_in;
-	struct cardstate *cs = urb->context;
-	int rc;
-
-	/* clear halt condition */
-	rc = usb_clear_halt(ucs->udev, urb->pipe);
-	gig_dbg(DEBUG_USBREQ, "clear_halt: %s", get_usb_rcmsg(rc));
-	if (rc == 0)
-		/* success, resubmit interrupt read URB */
-		rc = usb_submit_urb(urb, GFP_ATOMIC);
-
-	switch (rc) {
-	case 0:		/* success */
-	case -ENODEV:	/* device gone */
-	case -EINVAL:	/* URB already resubmitted, or terminal badness */
-		break;
-	default:	/* failure: try to recover by resetting the device */
-		dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
-		rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
-		if (rc == 0) {
-			rc = usb_reset_device(ucs->udev);
-			usb_unlock_device(ucs->udev);
-		}
-	}
-	ucs->retry_int_in = 0;
-}
-
-/* int_in_resubmit
- * timer routine for interrupt read delayed resubmit
- * argument:
- *	controller state structure
- */
-static void int_in_resubmit(struct timer_list *t)
-{
-	struct bas_cardstate *ucs = from_timer(ucs, t, timer_int_in);
-	struct cardstate *cs = ucs->cs;
-	int rc;
-
-	if (ucs->retry_int_in++ >= BAS_RETRY) {
-		dev_err(cs->dev, "interrupt read: giving up after %d tries\n",
-			ucs->retry_int_in);
-		usb_queue_reset_device(ucs->interface);
-		return;
-	}
-
-	gig_dbg(DEBUG_USBREQ, "%s: retry %d", __func__, ucs->retry_int_in);
-	rc = usb_submit_urb(ucs->urb_int_in, GFP_ATOMIC);
-	if (rc != 0 && rc != -ENODEV) {
-		dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
-			get_usb_rcmsg(rc));
-		usb_queue_reset_device(ucs->interface);
-	}
-}
-
-/* read_int_callback
- * USB completion handler for interrupt pipe input
- * called by the USB subsystem in interrupt context
- * parameter:
- *	urb	USB request block
- *		urb->context = controller state structure
- */
-static void read_int_callback(struct urb *urb)
-{
-	struct cardstate *cs = urb->context;
-	struct bas_cardstate *ucs = cs->hw.bas;
-	struct bc_state *bcs;
-	int status = urb->status;
-	unsigned long flags;
-	int rc;
-	unsigned l;
-	int channel;
-
-	switch (status) {
-	case 0:			/* success */
-		ucs->retry_int_in = 0;
-		break;
-	case -EPIPE:			/* endpoint stalled */
-		schedule_work(&ucs->int_in_wq);
-		/* fall through */
-	case -ENOENT:			/* cancelled */
-	case -ECONNRESET:		/* cancelled (async) */
-	case -EINPROGRESS:		/* pending */
-	case -ENODEV:			/* device removed */
-	case -ESHUTDOWN:		/* device shut down */
-		/* no further action necessary */
-		gig_dbg(DEBUG_USBREQ, "%s: %s",
-			__func__, get_usb_statmsg(status));
-		return;
-	case -EPROTO:			/* protocol error or unplug */
-	case -EILSEQ:
-	case -ETIME:
-		/* resubmit after delay */
-		gig_dbg(DEBUG_USBREQ, "%s: %s",
-			__func__, get_usb_statmsg(status));
-		mod_timer(&ucs->timer_int_in, jiffies + HZ / 10);
-		return;
-	default:		/* other errors: just resubmit */
-		dev_warn(cs->dev, "interrupt read: %s\n",
-			 get_usb_statmsg(status));
-		goto resubmit;
-	}
-
-	/* drop incomplete packets even if the missing bytes wouldn't matter */
-	if (unlikely(urb->actual_length < IP_MSGSIZE)) {
-		dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n",
-			 urb->actual_length);
-		goto resubmit;
-	}
-
-	l = (unsigned) ucs->int_in_buf[1] +
-		(((unsigned) ucs->int_in_buf[2]) << 8);
-
-	gig_dbg(DEBUG_USBREQ, "<-------%d: 0x%02x (%u [0x%02x 0x%02x])",
-		urb->actual_length, (int)ucs->int_in_buf[0], l,
-		(int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]);
-
-	channel = 0;
-
-	switch (ucs->int_in_buf[0]) {
-	case HD_DEVICE_INIT_OK:
-		update_basstate(ucs, BS_INIT, 0);
-		break;
-
-	case HD_READY_SEND_ATDATA:
-		del_timer(&ucs->timer_atrdy);
-		update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
-		start_cbsend(cs);
-		break;
-
-	case HD_OPEN_B2CHANNEL_ACK:
-		++channel;
-		/* fall through */
-	case HD_OPEN_B1CHANNEL_ACK:
-		bcs = cs->bcs + channel;
-		update_basstate(ucs, BS_B1OPEN << channel, 0);
-		gigaset_bchannel_up(bcs);
-		break;
-
-	case HD_OPEN_ATCHANNEL_ACK:
-		update_basstate(ucs, BS_ATOPEN, 0);
-		start_cbsend(cs);
-		break;
-
-	case HD_CLOSE_B2CHANNEL_ACK:
-		++channel;
-		/* fall through */
-	case HD_CLOSE_B1CHANNEL_ACK:
-		bcs = cs->bcs + channel;
-		update_basstate(ucs, 0, BS_B1OPEN << channel);
-		stopurbs(bcs->hw.bas);
-		gigaset_bchannel_down(bcs);
-		break;
-
-	case HD_CLOSE_ATCHANNEL_ACK:
-		update_basstate(ucs, 0, BS_ATOPEN);
-		break;
-
-	case HD_B2_FLOW_CONTROL:
-		++channel;
-		/* fall through */
-	case HD_B1_FLOW_CONTROL:
-		bcs = cs->bcs + channel;
-		atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
-			   &bcs->hw.bas->corrbytes);
-		gig_dbg(DEBUG_ISO,
-			"Flow control (channel %d, sub %d): 0x%02x => %d",
-			channel, bcs->hw.bas->numsub, l,
-			atomic_read(&bcs->hw.bas->corrbytes));
-		break;
-
-	case HD_RECEIVEATDATA_ACK:	/* AT response ready to be received */
-		if (!l) {
-			dev_warn(cs->dev,
-				 "HD_RECEIVEATDATA_ACK with length 0 ignored\n");
-			break;
-		}
-		spin_lock_irqsave(&cs->lock, flags);
-		if (ucs->basstate & BS_ATRDPEND) {
-			spin_unlock_irqrestore(&cs->lock, flags);
-			dev_warn(cs->dev,
-				 "HD_RECEIVEATDATA_ACK(%d) during HD_READ_ATMESSAGE(%d) ignored\n",
-				 l, ucs->rcvbuf_size);
-			break;
-		}
-		if (ucs->rcvbuf_size) {
-			/* throw away previous buffer - we have no queue */
-			dev_err(cs->dev,
-				"receive AT data overrun, %d bytes lost\n",
-				ucs->rcvbuf_size);
-			kfree(ucs->rcvbuf);
-			ucs->rcvbuf_size = 0;
-		}
-		ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
-		if (ucs->rcvbuf == NULL) {
-			spin_unlock_irqrestore(&cs->lock, flags);
-			dev_err(cs->dev, "out of memory receiving AT data\n");
-			break;
-		}
-		ucs->rcvbuf_size = l;
-		ucs->retry_cmd_in = 0;
-		rc = atread_submit(cs, BAS_TIMEOUT);
-		if (rc < 0) {
-			kfree(ucs->rcvbuf);
-			ucs->rcvbuf = NULL;
-			ucs->rcvbuf_size = 0;
-		}
-		spin_unlock_irqrestore(&cs->lock, flags);
-		if (rc < 0 && rc != -ENODEV)
-			error_reset(cs);
-		break;
-
-	case HD_RESET_INTERRUPT_PIPE_ACK:
-		update_basstate(ucs, 0, BS_RESETTING);
-		dev_notice(cs->dev, "interrupt pipe reset\n");
-		break;
-
-	case HD_SUSPEND_END:
-		gig_dbg(DEBUG_USBREQ, "HD_SUSPEND_END");
-		break;
-
-	default:
-		dev_warn(cs->dev,
-			 "unknown Gigaset signal 0x%02x (%u) ignored\n",
-			 (int) ucs->int_in_buf[0], l);
-	}
-
-	check_pending(ucs);
-	wake_up(&ucs->waitqueue);
-
-resubmit:
-	rc = usb_submit_urb(urb, GFP_ATOMIC);
-	if (unlikely(rc != 0 && rc != -ENODEV)) {
-		dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
-			get_usb_rcmsg(rc));
-		error_reset(cs);
-	}
-}
-
-/* read_iso_callback
- * USB completion handler for B channel isochronous input
- * called by the USB subsystem in interrupt context
- * parameter:
- *	urb	USB request block of completed request
- *		urb->context = bc_state structure
- */
-static void read_iso_callback(struct urb *urb)
-{
-	struct bc_state *bcs;
-	struct bas_bc_state *ubc;
-	int status = urb->status;
-	unsigned long flags;
-	int i, rc;
-
-	/* status codes not worth bothering the tasklet with */
-	if (unlikely(status == -ENOENT ||
-		     status == -ECONNRESET ||
-		     status == -EINPROGRESS ||
-		     status == -ENODEV ||
-		     status == -ESHUTDOWN)) {
-		gig_dbg(DEBUG_ISO, "%s: %s",
-			__func__, get_usb_statmsg(status));
-		return;
-	}
-
-	bcs = urb->context;
-	ubc = bcs->hw.bas;
-
-	spin_lock_irqsave(&ubc->isoinlock, flags);
-	if (likely(ubc->isoindone == NULL)) {
-		/* pass URB to tasklet */
-		ubc->isoindone = urb;
-		ubc->isoinstatus = status;
-		tasklet_hi_schedule(&ubc->rcvd_tasklet);
-	} else {
-		/* tasklet still busy, drop data and resubmit URB */
-		gig_dbg(DEBUG_ISO, "%s: overrun", __func__);
-		ubc->loststatus = status;
-		for (i = 0; i < BAS_NUMFRAMES; i++) {
-			ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
-			if (unlikely(urb->iso_frame_desc[i].status != 0 &&
-				     urb->iso_frame_desc[i].status != -EINPROGRESS))
-				ubc->loststatus = urb->iso_frame_desc[i].status;
-			urb->iso_frame_desc[i].status = 0;
-			urb->iso_frame_desc[i].actual_length = 0;
-		}
-		if (likely(ubc->running)) {
-			/* urb->dev is clobbered by USB subsystem */
-			urb->dev = bcs->cs->hw.bas->udev;
-			urb->transfer_flags = URB_ISO_ASAP;
-			urb->number_of_packets = BAS_NUMFRAMES;
-			rc = usb_submit_urb(urb, GFP_ATOMIC);
-			if (unlikely(rc != 0 && rc != -ENODEV)) {
-				dev_err(bcs->cs->dev,
-					"could not resubmit isoc read URB: %s\n",
-					get_usb_rcmsg(rc));
-				dump_urb(DEBUG_ISO, "isoc read", urb);
-				error_hangup(bcs);
-			}
-		}
-	}
-	spin_unlock_irqrestore(&ubc->isoinlock, flags);
-}
-
-/* write_iso_callback
- * USB completion handler for B channel isochronous output
- * called by the USB subsystem in interrupt context
- * parameter:
- *	urb	USB request block of completed request
- *		urb->context = isow_urbctx_t structure
- */
-static void write_iso_callback(struct urb *urb)
-{
-	struct isow_urbctx_t *ucx;
-	struct bas_bc_state *ubc;
-	int status = urb->status;
-	unsigned long flags;
-
-	/* status codes not worth bothering the tasklet with */
-	if (unlikely(status == -ENOENT ||
-		     status == -ECONNRESET ||
-		     status == -EINPROGRESS ||
-		     status == -ENODEV ||
-		     status == -ESHUTDOWN)) {
-		gig_dbg(DEBUG_ISO, "%s: %s",
-			__func__, get_usb_statmsg(status));
-		return;
-	}
-
-	/* pass URB context to tasklet */
-	ucx = urb->context;
-	ubc = ucx->bcs->hw.bas;
-	ucx->status = status;
-
-	spin_lock_irqsave(&ubc->isooutlock, flags);
-	ubc->isooutovfl = ubc->isooutdone;
-	ubc->isooutdone = ucx;
-	spin_unlock_irqrestore(&ubc->isooutlock, flags);
-	tasklet_hi_schedule(&ubc->sent_tasklet);
-}
-
-/* starturbs
- * prepare and submit USB request blocks for isochronous input and output
- * argument:
- *	B channel control structure
- * return value:
- *	0 on success
- *	< 0 on error (no URBs submitted)
- */
-static int starturbs(struct bc_state *bcs)
-{
-	struct usb_device *udev = bcs->cs->hw.bas->udev;
-	struct bas_bc_state *ubc = bcs->hw.bas;
-	struct urb *urb;
-	int j, k;
-	int rc;
-
-	/* initialize L2 reception */
-	if (bcs->proto2 == L2_HDLC)
-		bcs->inputstate |= INS_flag_hunt;
-
-	/* submit all isochronous input URBs */
-	ubc->running = 1;
-	for (k = 0; k < BAS_INURBS; k++) {
-		urb = ubc->isoinurbs[k];
-		if (!urb) {
-			rc = -EFAULT;
-			goto error;
-		}
-		usb_fill_int_urb(urb, udev,
-				 usb_rcvisocpipe(udev, 3 + 2 * bcs->channel),
-				 ubc->isoinbuf + k * BAS_INBUFSIZE,
-				 BAS_INBUFSIZE, read_iso_callback, bcs,
-				 BAS_FRAMETIME);
-
-		urb->transfer_flags = URB_ISO_ASAP;
-		urb->number_of_packets = BAS_NUMFRAMES;
-		for (j = 0; j < BAS_NUMFRAMES; j++) {
-			urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
-			urb->iso_frame_desc[j].length = BAS_MAXFRAME;
-			urb->iso_frame_desc[j].status = 0;
-			urb->iso_frame_desc[j].actual_length = 0;
-		}
-
-		dump_urb(DEBUG_ISO, "Initial isoc read", urb);
-		rc = usb_submit_urb(urb, GFP_ATOMIC);
-		if (rc != 0)
-			goto error;
-	}
-
-	/* initialize L2 transmission */
-	gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG);
-
-	/* set up isochronous output URBs for flag idling */
-	for (k = 0; k < BAS_OUTURBS; ++k) {
-		urb = ubc->isoouturbs[k].urb;
-		if (!urb) {
-			rc = -EFAULT;
-			goto error;
-		}
-		usb_fill_int_urb(urb, udev,
-				 usb_sndisocpipe(udev, 4 + 2 * bcs->channel),
-				 ubc->isooutbuf->data,
-				 sizeof(ubc->isooutbuf->data),
-				 write_iso_callback, &ubc->isoouturbs[k],
-				 BAS_FRAMETIME);
-
-		urb->transfer_flags = URB_ISO_ASAP;
-		urb->number_of_packets = BAS_NUMFRAMES;
-		for (j = 0; j < BAS_NUMFRAMES; ++j) {
-			urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
-			urb->iso_frame_desc[j].length = BAS_NORMFRAME;
-			urb->iso_frame_desc[j].status = 0;
-			urb->iso_frame_desc[j].actual_length = 0;
-		}
-		ubc->isoouturbs[k].limit = -1;
-	}
-
-	/* keep one URB free, submit the others */
-	for (k = 0; k < BAS_OUTURBS - 1; ++k) {
-		dump_urb(DEBUG_ISO, "Initial isoc write", urb);
-		rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
-		if (rc != 0)
-			goto error;
-	}
-	dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
-	ubc->isooutfree = &ubc->isoouturbs[BAS_OUTURBS - 1];
-	ubc->isooutdone = ubc->isooutovfl = NULL;
-	return 0;
-error:
-	stopurbs(ubc);
-	return rc;
-}
-
-/* stopurbs
- * cancel the USB request blocks for isochronous input and output
- * errors are silently ignored
- * argument:
- *	B channel control structure
- */
-static void stopurbs(struct bas_bc_state *ubc)
-{
-	int k, rc;
-
-	ubc->running = 0;
-
-	for (k = 0; k < BAS_INURBS; ++k) {
-		rc = usb_unlink_urb(ubc->isoinurbs[k]);
-		gig_dbg(DEBUG_ISO,
-			"%s: isoc input URB %d unlinked, result = %s",
-			__func__, k, get_usb_rcmsg(rc));
-	}
-
-	for (k = 0; k < BAS_OUTURBS; ++k) {
-		rc = usb_unlink_urb(ubc->isoouturbs[k].urb);
-		gig_dbg(DEBUG_ISO,
-			"%s: isoc output URB %d unlinked, result = %s",
-			__func__, k, get_usb_rcmsg(rc));
-	}
-}
-
-/* Isochronous Write - Bottom Half */
-/* =============================== */
-
-/* submit_iso_write_urb
- * fill and submit the next isochronous write URB
- * parameters:
- *	ucx	context structure containing URB
- * return value:
- *	number of frames submitted in URB
- *	0 if URB not submitted because no data available (isooutbuf busy)
- *	error code < 0 on error
- */
-static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
-{
-	struct urb *urb = ucx->urb;
-	struct bas_bc_state *ubc = ucx->bcs->hw.bas;
-	struct usb_iso_packet_descriptor *ifd;
-	int corrbytes, nframe, rc;
-
-	/* urb->dev is clobbered by USB subsystem */
-	urb->dev = ucx->bcs->cs->hw.bas->udev;
-	urb->transfer_flags = URB_ISO_ASAP;
-	urb->transfer_buffer = ubc->isooutbuf->data;
-	urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
-
-	for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) {
-		ifd = &urb->iso_frame_desc[nframe];
-
-		/* compute frame length according to flow control */
-		ifd->length = BAS_NORMFRAME;
-		corrbytes = atomic_read(&ubc->corrbytes);
-		if (corrbytes != 0) {
-			gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
-				__func__, corrbytes);
-			if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
-				corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME;
-			else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME)
-				corrbytes = BAS_LOWFRAME - BAS_NORMFRAME;
-			ifd->length += corrbytes;
-			atomic_add(-corrbytes, &ubc->corrbytes);
-		}
-
-		/* retrieve block of data to send */
-		rc = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
-		if (rc < 0) {
-			if (rc == -EBUSY) {
-				gig_dbg(DEBUG_ISO,
-					"%s: buffer busy at frame %d",
-					__func__, nframe);
-				/* tasklet will be restarted from
-				   gigaset_isoc_send_skb() */
-			} else {
-				dev_err(ucx->bcs->cs->dev,
-					"%s: buffer error %d at frame %d\n",
-					__func__, rc, nframe);
-				return rc;
-			}
-			break;
-		}
-		ifd->offset = rc;
-		ucx->limit = ubc->isooutbuf->nextread;
-		ifd->status = 0;
-		ifd->actual_length = 0;
-	}
-	if (unlikely(nframe == 0))
-		return 0;	/* no data to send */
-	urb->number_of_packets = nframe;
-
-	rc = usb_submit_urb(urb, GFP_ATOMIC);
-	if (unlikely(rc)) {
-		if (rc == -ENODEV)
-			/* device removed - give up silently */
-			gig_dbg(DEBUG_ISO, "%s: disconnected", __func__);
-		else
-			dev_err(ucx->bcs->cs->dev,
-				"could not submit isoc write URB: %s\n",
-				get_usb_rcmsg(rc));
-		return rc;
-	}
-	++ubc->numsub;
-	return nframe;
-}
-
-/* write_iso_tasklet
- * tasklet scheduled when an isochronous output URB from the Gigaset device
- * has completed
- * parameter:
- *	data	B channel state structure
- */
-static void write_iso_tasklet(unsigned long data)
-{
-	struct bc_state *bcs = (struct bc_state *) data;
-	struct bas_bc_state *ubc = bcs->hw.bas;
-	struct cardstate *cs = bcs->cs;
-	struct isow_urbctx_t *done, *next, *ovfl;
-	struct urb *urb;
-	int status;
-	struct usb_iso_packet_descriptor *ifd;
-	unsigned long flags;
-	int i;
-	struct sk_buff *skb;
-	int len;
-	int rc;
-
-	/* loop while completed URBs arrive in time */
-	for (;;) {
-		if (unlikely(!(ubc->running))) {
-			gig_dbg(DEBUG_ISO, "%s: not running", __func__);
-			return;
-		}
-
-		/* retrieve completed URBs */
-		spin_lock_irqsave(&ubc->isooutlock, flags);
-		done = ubc->isooutdone;
-		ubc->isooutdone = NULL;
-		ovfl = ubc->isooutovfl;
-		ubc->isooutovfl = NULL;
-		spin_unlock_irqrestore(&ubc->isooutlock, flags);
-		if (ovfl) {
-			dev_err(cs->dev, "isoc write underrun\n");
-			error_hangup(bcs);
-			break;
-		}
-		if (!done)
-			break;
-
-		/* submit free URB if available */
-		spin_lock_irqsave(&ubc->isooutlock, flags);
-		next = ubc->isooutfree;
-		ubc->isooutfree = NULL;
-		spin_unlock_irqrestore(&ubc->isooutlock, flags);
-		if (next) {
-			rc = submit_iso_write_urb(next);
-			if (unlikely(rc <= 0 && rc != -ENODEV)) {
-				/* could not submit URB, put it back */
-				spin_lock_irqsave(&ubc->isooutlock, flags);
-				if (ubc->isooutfree == NULL) {
-					ubc->isooutfree = next;
-					next = NULL;
-				}
-				spin_unlock_irqrestore(&ubc->isooutlock, flags);
-				if (next) {
-					/* couldn't put it back */
-					dev_err(cs->dev,
-						"losing isoc write URB\n");
-					error_hangup(bcs);
-				}
-			}
-		}
-
-		/* process completed URB */
-		urb = done->urb;
-		status = done->status;
-		switch (status) {
-		case -EXDEV:			/* partial completion */
-			gig_dbg(DEBUG_ISO, "%s: URB partially completed",
-				__func__);
-			/* fall through - what's the difference anyway? */
-		case 0:				/* normal completion */
-			/* inspect individual frames
-			 * assumptions (for lack of documentation):
-			 * - actual_length bytes of first frame in error are
-			 *   successfully sent
-			 * - all following frames are not sent at all
-			 */
-			for (i = 0; i < BAS_NUMFRAMES; i++) {
-				ifd = &urb->iso_frame_desc[i];
-				if (ifd->status ||
-				    ifd->actual_length != ifd->length) {
-					dev_warn(cs->dev,
-						 "isoc write: frame %d[%d/%d]: %s\n",
-						 i, ifd->actual_length,
-						 ifd->length,
-						 get_usb_statmsg(ifd->status));
-					break;
-				}
-			}
-			break;
-		case -EPIPE:			/* stall - probably underrun */
-			dev_err(cs->dev, "isoc write: stalled\n");
-			error_hangup(bcs);
-			break;
-		default:			/* other errors */
-			dev_warn(cs->dev, "isoc write: %s\n",
-				 get_usb_statmsg(status));
-		}
-
-		/* mark the write buffer area covered by this URB as free */
-		if (done->limit >= 0)
-			ubc->isooutbuf->read = done->limit;
-
-		/* mark URB as free */
-		spin_lock_irqsave(&ubc->isooutlock, flags);
-		next = ubc->isooutfree;
-		ubc->isooutfree = done;
-		spin_unlock_irqrestore(&ubc->isooutlock, flags);
-		if (next) {
-			/* only one URB still active - resubmit one */
-			rc = submit_iso_write_urb(next);
-			if (unlikely(rc <= 0 && rc != -ENODEV)) {
-				/* couldn't submit */
-				error_hangup(bcs);
-			}
-		}
-	}
-
-	/* process queued SKBs */
-	while ((skb = skb_dequeue(&bcs->squeue))) {
-		/* copy to output buffer, doing L2 encapsulation */
-		len = skb->len;
-		if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) {
-			/* insufficient buffer space, push back onto queue */
-			skb_queue_head(&bcs->squeue, skb);
-			gig_dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d",
-				__func__, skb_queue_len(&bcs->squeue));
-			break;
-		}
-		skb_pull(skb, len);
-		gigaset_skb_sent(bcs, skb);
-		dev_kfree_skb_any(skb);
-	}
-}
-
-/* Isochronous Read - Bottom Half */
-/* ============================== */
-
-/* read_iso_tasklet
- * tasklet scheduled when an isochronous input URB from the Gigaset device
- * has completed
- * parameter:
- *	data	B channel state structure
- */
-static void read_iso_tasklet(unsigned long data)
-{
-	struct bc_state *bcs = (struct bc_state *) data;
-	struct bas_bc_state *ubc = bcs->hw.bas;
-	struct cardstate *cs = bcs->cs;
-	struct urb *urb;
-	int status;
-	struct usb_iso_packet_descriptor *ifd;
-	char *rcvbuf;
-	unsigned long flags;
-	int totleft, numbytes, offset, frame, rc;
-
-	/* loop while more completed URBs arrive in the meantime */
-	for (;;) {
-		/* retrieve URB */
-		spin_lock_irqsave(&ubc->isoinlock, flags);
-		urb = ubc->isoindone;
-		if (!urb) {
-			spin_unlock_irqrestore(&ubc->isoinlock, flags);
-			return;
-		}
-		status = ubc->isoinstatus;
-		ubc->isoindone = NULL;
-		if (unlikely(ubc->loststatus != -EINPROGRESS)) {
-			dev_warn(cs->dev,
-				 "isoc read overrun, URB dropped (status: %s, %d bytes)\n",
-				 get_usb_statmsg(ubc->loststatus),
-				 ubc->isoinlost);
-			ubc->loststatus = -EINPROGRESS;
-		}
-		spin_unlock_irqrestore(&ubc->isoinlock, flags);
-
-		if (unlikely(!(ubc->running))) {
-			gig_dbg(DEBUG_ISO,
-				"%s: channel not running, "
-				"dropped URB with status: %s",
-				__func__, get_usb_statmsg(status));
-			return;
-		}
-
-		switch (status) {
-		case 0:				/* normal completion */
-			break;
-		case -EXDEV:			/* inspect individual frames
-						   (we do that anyway) */
-			gig_dbg(DEBUG_ISO, "%s: URB partially completed",
-				__func__);
-			break;
-		case -ENOENT:
-		case -ECONNRESET:
-		case -EINPROGRESS:
-			gig_dbg(DEBUG_ISO, "%s: %s",
-				__func__, get_usb_statmsg(status));
-			continue;		/* -> skip */
-		case -EPIPE:
-			dev_err(cs->dev, "isoc read: stalled\n");
-			error_hangup(bcs);
-			continue;		/* -> skip */
-		default:			/* other error */
-			dev_warn(cs->dev, "isoc read: %s\n",
-				 get_usb_statmsg(status));
-			goto error;
-		}
-
-		rcvbuf = urb->transfer_buffer;
-		totleft = urb->actual_length;
-		for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
-			ifd = &urb->iso_frame_desc[frame];
-			numbytes = ifd->actual_length;
-			switch (ifd->status) {
-			case 0:			/* success */
-				break;
-			case -EPROTO:		/* protocol error or unplug */
-			case -EILSEQ:
-			case -ETIME:
-				/* probably just disconnected, ignore */
-				gig_dbg(DEBUG_ISO,
-					"isoc read: frame %d[%d]: %s\n",
-					frame, numbytes,
-					get_usb_statmsg(ifd->status));
-				break;
-			default:		/* other error */
-				/* report, assume transferred bytes are ok */
-				dev_warn(cs->dev,
-					 "isoc read: frame %d[%d]: %s\n",
-					 frame, numbytes,
-					 get_usb_statmsg(ifd->status));
-			}
-			if (unlikely(numbytes > BAS_MAXFRAME))
-				dev_warn(cs->dev,
-					 "isoc read: frame %d[%d]: %s\n",
-					 frame, numbytes,
-					 "exceeds max frame size");
-			if (unlikely(numbytes > totleft)) {
-				dev_warn(cs->dev,
-					 "isoc read: frame %d[%d]: %s\n",
-					 frame, numbytes,
-					 "exceeds total transfer length");
-				numbytes = totleft;
-			}
-			offset = ifd->offset;
-			if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
-				dev_warn(cs->dev,
-					 "isoc read: frame %d[%d]: %s\n",
-					 frame, numbytes,
-					 "exceeds end of buffer");
-				numbytes = BAS_INBUFSIZE - offset;
-			}
-			gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
-			totleft -= numbytes;
-		}
-		if (unlikely(totleft > 0))
-			dev_warn(cs->dev, "isoc read: %d data bytes missing\n",
-				 totleft);
-
-error:
-		/* URB processed, resubmit */
-		for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
-			urb->iso_frame_desc[frame].status = 0;
-			urb->iso_frame_desc[frame].actual_length = 0;
-		}
-		/* urb->dev is clobbered by USB subsystem */
-		urb->dev = bcs->cs->hw.bas->udev;
-		urb->transfer_flags = URB_ISO_ASAP;
-		urb->number_of_packets = BAS_NUMFRAMES;
-		rc = usb_submit_urb(urb, GFP_ATOMIC);
-		if (unlikely(rc != 0 && rc != -ENODEV)) {
-			dev_err(cs->dev,
-				"could not resubmit isoc read URB: %s\n",
-				get_usb_rcmsg(rc));
-			dump_urb(DEBUG_ISO, "resubmit isoc read", urb);
-			error_hangup(bcs);
-		}
-	}
-}
-
-/* Channel Operations */
-/* ================== */
-
-/* req_timeout
- * timeout routine for control output request
- * argument:
- *	controller state structure
- */
-static void req_timeout(struct timer_list *t)
-{
-	struct bas_cardstate *ucs = from_timer(ucs, t, timer_ctrl);
-	struct cardstate *cs = ucs->cs;
-	int pending;
-	unsigned long flags;
-
-	check_pending(ucs);
-
-	spin_lock_irqsave(&ucs->lock, flags);
-	pending = ucs->pending;
-	ucs->pending = 0;
-	spin_unlock_irqrestore(&ucs->lock, flags);
-
-	switch (pending) {
-	case 0:					/* no pending request */
-		gig_dbg(DEBUG_USBREQ, "%s: no request pending", __func__);
-		break;
-
-	case HD_OPEN_ATCHANNEL:
-		dev_err(cs->dev, "timeout opening AT channel\n");
-		error_reset(cs);
-		break;
-
-	case HD_OPEN_B1CHANNEL:
-		dev_err(cs->dev, "timeout opening channel 1\n");
-		error_hangup(&cs->bcs[0]);
-		break;
-
-	case HD_OPEN_B2CHANNEL:
-		dev_err(cs->dev, "timeout opening channel 2\n");
-		error_hangup(&cs->bcs[1]);
-		break;
-
-	case HD_CLOSE_ATCHANNEL:
-		dev_err(cs->dev, "timeout closing AT channel\n");
-		error_reset(cs);
-		break;
-
-	case HD_CLOSE_B1CHANNEL:
-		dev_err(cs->dev, "timeout closing channel 1\n");
-		error_reset(cs);
-		break;
-
-	case HD_CLOSE_B2CHANNEL:
-		dev_err(cs->dev, "timeout closing channel 2\n");
-		error_reset(cs);
-		break;
-
-	case HD_RESET_INTERRUPT_PIPE:
-		/* error recovery escalation */
-		dev_err(cs->dev,
-			"reset interrupt pipe timeout, attempting USB reset\n");
-		usb_queue_reset_device(ucs->interface);
-		break;
-
-	default:
-		dev_warn(cs->dev, "request 0x%02x timed out, clearing\n",
-			 pending);
-	}
-
-	wake_up(&ucs->waitqueue);
-}
-
-/* write_ctrl_callback
- * USB completion handler for control pipe output
- * called by the USB subsystem in interrupt context
- * parameter:
- *	urb	USB request block of completed request
- *		urb->context = hardware specific controller state structure
- */
-static void write_ctrl_callback(struct urb *urb)
-{
-	struct bas_cardstate *ucs = urb->context;
-	int status = urb->status;
-	int rc;
-	unsigned long flags;
-
-	/* check status */
-	switch (status) {
-	case 0:					/* normal completion */
-		spin_lock_irqsave(&ucs->lock, flags);
-		switch (ucs->pending) {
-		case HD_DEVICE_INIT_ACK:	/* no reply expected */
-			del_timer(&ucs->timer_ctrl);
-			ucs->pending = 0;
-			break;
-		}
-		spin_unlock_irqrestore(&ucs->lock, flags);
-		return;
-
-	case -ENOENT:			/* cancelled */
-	case -ECONNRESET:		/* cancelled (async) */
-	case -EINPROGRESS:		/* pending */
-	case -ENODEV:			/* device removed */
-	case -ESHUTDOWN:		/* device shut down */
-		/* ignore silently */
-		gig_dbg(DEBUG_USBREQ, "%s: %s",
-			__func__, get_usb_statmsg(status));
-		break;
-
-	default:				/* any failure */
-		/* don't retry if suspend requested */
-		if (++ucs->retry_ctrl > BAS_RETRY ||
-		    (ucs->basstate & BS_SUSPEND)) {
-			dev_err(&ucs->interface->dev,
-				"control request 0x%02x failed: %s\n",
-				ucs->dr_ctrl.bRequest,
-				get_usb_statmsg(status));
-			break;		/* give up */
-		}
-		dev_notice(&ucs->interface->dev,
-			   "control request 0x%02x: %s, retry %d\n",
-			   ucs->dr_ctrl.bRequest, get_usb_statmsg(status),
-			   ucs->retry_ctrl);
-		/* urb->dev is clobbered by USB subsystem */
-		urb->dev = ucs->udev;
-		rc = usb_submit_urb(urb, GFP_ATOMIC);
-		if (unlikely(rc)) {
-			dev_err(&ucs->interface->dev,
-				"could not resubmit request 0x%02x: %s\n",
-				ucs->dr_ctrl.bRequest, get_usb_rcmsg(rc));
-			break;
-		}
-		/* resubmitted */
-		return;
-	}
-
-	/* failed, clear pending request */
-	spin_lock_irqsave(&ucs->lock, flags);
-	del_timer(&ucs->timer_ctrl);
-	ucs->pending = 0;
-	spin_unlock_irqrestore(&ucs->lock, flags);
-	wake_up(&ucs->waitqueue);
-}
-
-/* req_submit
- * submit a control output request without message buffer to the Gigaset base
- * and optionally start a timeout
- * parameters:
- *	bcs	B channel control structure
- *	req	control request code (HD_*)
- *	val	control request parameter value (set to 0 if unused)
- *	timeout	timeout in seconds (0: no timeout)
- * return value:
- *	0 on success
- *	-EBUSY if another request is pending
- *	any URB submission error code
- */
-static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
-{
-	struct bas_cardstate *ucs = bcs->cs->hw.bas;
-	int ret;
-	unsigned long flags;
-
-	gig_dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val);
-
-	spin_lock_irqsave(&ucs->lock, flags);
-	if (ucs->pending) {
-		spin_unlock_irqrestore(&ucs->lock, flags);
-		dev_err(bcs->cs->dev,
-			"submission of request 0x%02x failed: "
-			"request 0x%02x still pending\n",
-			req, ucs->pending);
-		return -EBUSY;
-	}
-
-	ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ;
-	ucs->dr_ctrl.bRequest = req;
-	ucs->dr_ctrl.wValue = cpu_to_le16(val);
-	ucs->dr_ctrl.wIndex = 0;
-	ucs->dr_ctrl.wLength = 0;
-	usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
-			     usb_sndctrlpipe(ucs->udev, 0),
-			     (unsigned char *) &ucs->dr_ctrl, NULL, 0,
-			     write_ctrl_callback, ucs);
-	ucs->retry_ctrl = 0;
-	ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
-	if (unlikely(ret)) {
-		dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
-			req, get_usb_rcmsg(ret));
-		spin_unlock_irqrestore(&ucs->lock, flags);
-		return ret;
-	}
-	ucs->pending = req;
-
-	if (timeout > 0) {
-		gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
-		mod_timer(&ucs->timer_ctrl, jiffies + timeout * HZ / 10);
-	}
-
-	spin_unlock_irqrestore(&ucs->lock, flags);
-	return 0;
-}
-
-/* gigaset_init_bchannel
- * called by common.c to connect a B channel
- * initialize isochronous I/O and tell the Gigaset base to open the channel
- * argument:
- *	B channel control structure
- * return value:
- *	0 on success, error code < 0 on error
- */
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
-	struct cardstate *cs = bcs->cs;
-	int req, ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (unlikely(!cs->connected)) {
-		gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
-		spin_unlock_irqrestore(&cs->lock, flags);
-		return -ENODEV;
-	}
-
-	if (cs->hw.bas->basstate & BS_SUSPEND) {
-		dev_notice(cs->dev,
-			   "not starting isoc I/O, suspend in progress\n");
-		spin_unlock_irqrestore(&cs->lock, flags);
-		return -EHOSTUNREACH;
-	}
-
-	ret = starturbs(bcs);
-	if (ret < 0) {
-		spin_unlock_irqrestore(&cs->lock, flags);
-		dev_err(cs->dev,
-			"could not start isoc I/O for channel B%d: %s\n",
-			bcs->channel + 1,
-			ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
-		if (ret != -ENODEV)
-			error_hangup(bcs);
-		return ret;
-	}
-
-	req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
-	ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
-	if (ret < 0) {
-		dev_err(cs->dev, "could not open channel B%d\n",
-			bcs->channel + 1);
-		stopurbs(bcs->hw.bas);
-	}
-
-	spin_unlock_irqrestore(&cs->lock, flags);
-	if (ret < 0 && ret != -ENODEV)
-		error_hangup(bcs);
-	return ret;
-}
-
-/* gigaset_close_bchannel
- * called by common.c to disconnect a B channel
- * tell the Gigaset base to close the channel
- * stopping isochronous I/O and LL notification will be done when the
- * acknowledgement for the close arrives
- * argument:
- *	B channel control structure
- * return value:
- *	0 on success, error code < 0 on error
- */
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
-	struct cardstate *cs = bcs->cs;
-	int req, ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (unlikely(!cs->connected)) {
-		spin_unlock_irqrestore(&cs->lock, flags);
-		gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
-		return -ENODEV;
-	}
-
-	if (!(cs->hw.bas->basstate & (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
-		/* channel not running: just signal common.c */
-		spin_unlock_irqrestore(&cs->lock, flags);
-		gigaset_bchannel_down(bcs);
-		return 0;
-	}
-
-	/* channel running: tell device to close it */
-	req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
-	ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
-	if (ret < 0)
-		dev_err(cs->dev, "closing channel B%d failed\n",
-			bcs->channel + 1);
-
-	spin_unlock_irqrestore(&cs->lock, flags);
-	return ret;
-}
-
-/* Device Operations */
-/* ================= */
-
-/* complete_cb
- * unqueue first command buffer from queue, waking any sleepers
- * must be called with cs->cmdlock held
- * parameter:
- *	cs	controller state structure
- */
-static void complete_cb(struct cardstate *cs)
-{
-	struct cmdbuf_t *cb = cs->cmdbuf;
-
-	/* unqueue completed buffer */
-	cs->cmdbytes -= cs->curlen;
-	gig_dbg(DEBUG_OUTPUT, "write_command: sent %u bytes, %u left",
-		cs->curlen, cs->cmdbytes);
-	if (cb->next != NULL) {
-		cs->cmdbuf = cb->next;
-		cs->cmdbuf->prev = NULL;
-		cs->curlen = cs->cmdbuf->len;
-	} else {
-		cs->cmdbuf = NULL;
-		cs->lastcmdbuf = NULL;
-		cs->curlen = 0;
-	}
-
-	if (cb->wake_tasklet)
-		tasklet_schedule(cb->wake_tasklet);
-
-	kfree(cb);
-}
-
-/* write_command_callback
- * USB completion handler for AT command transmission
- * called by the USB subsystem in interrupt context
- * parameter:
- *	urb	USB request block of completed request
- *		urb->context = controller state structure
- */
-static void write_command_callback(struct urb *urb)
-{
-	struct cardstate *cs = urb->context;
-	struct bas_cardstate *ucs = cs->hw.bas;
-	int status = urb->status;
-	unsigned long flags;
-
-	update_basstate(ucs, 0, BS_ATWRPEND);
-	wake_up(&ucs->waitqueue);
-
-	/* check status */
-	switch (status) {
-	case 0:					/* normal completion */
-		break;
-	case -ENOENT:			/* cancelled */
-	case -ECONNRESET:		/* cancelled (async) */
-	case -EINPROGRESS:		/* pending */
-	case -ENODEV:			/* device removed */
-	case -ESHUTDOWN:		/* device shut down */
-		/* ignore silently */
-		gig_dbg(DEBUG_USBREQ, "%s: %s",
-			__func__, get_usb_statmsg(status));
-		return;
-	default:				/* any failure */
-		if (++ucs->retry_cmd_out > BAS_RETRY) {
-			dev_warn(cs->dev,
-				 "command write: %s, "
-				 "giving up after %d retries\n",
-				 get_usb_statmsg(status),
-				 ucs->retry_cmd_out);
-			break;
-		}
-		if (ucs->basstate & BS_SUSPEND) {
-			dev_warn(cs->dev,
-				 "command write: %s, "
-				 "won't retry - suspend requested\n",
-				 get_usb_statmsg(status));
-			break;
-		}
-		if (cs->cmdbuf == NULL) {
-			dev_warn(cs->dev,
-				 "command write: %s, "
-				 "cannot retry - cmdbuf gone\n",
-				 get_usb_statmsg(status));
-			break;
-		}
-		dev_notice(cs->dev, "command write: %s, retry %d\n",
-			   get_usb_statmsg(status), ucs->retry_cmd_out);
-		if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
-			/* resubmitted - bypass regular exit block */
-			return;
-		/* command send failed, assume base still waiting */
-		update_basstate(ucs, BS_ATREADY, 0);
-	}
-
-	spin_lock_irqsave(&cs->cmdlock, flags);
-	if (cs->cmdbuf != NULL)
-		complete_cb(cs);
-	spin_unlock_irqrestore(&cs->cmdlock, flags);
-}
-
-/* atrdy_timeout
- * timeout routine for AT command transmission
- * argument:
- *	controller state structure
- */
-static void atrdy_timeout(struct timer_list *t)
-{
-	struct bas_cardstate *ucs = from_timer(ucs, t, timer_atrdy);
-	struct cardstate *cs = ucs->cs;
-
-	dev_warn(cs->dev, "timeout waiting for HD_READY_SEND_ATDATA\n");
-
-	/* fake the missing signal - what else can I do? */
-	update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
-	start_cbsend(cs);
-}
-
-/* atwrite_submit
- * submit an HD_WRITE_ATMESSAGE command URB
- * parameters:
- *	cs	controller state structure
- *	buf	buffer containing command to send
- *	len	length of command to send
- * return value:
- *	0 on success
- *	-EBUSY if another request is pending
- *	any URB submission error code
- */
-static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
-{
-	struct bas_cardstate *ucs = cs->hw.bas;
-	int rc;
-
-	gig_dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len);
-
-	if (update_basstate(ucs, BS_ATWRPEND, 0) & BS_ATWRPEND) {
-		dev_err(cs->dev,
-			"could not submit HD_WRITE_ATMESSAGE: URB busy\n");
-		return -EBUSY;
-	}
-
-	ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ;
-	ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE;
-	ucs->dr_cmd_out.wValue = 0;
-	ucs->dr_cmd_out.wIndex = 0;
-	ucs->dr_cmd_out.wLength = cpu_to_le16(len);
-	usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
-			     usb_sndctrlpipe(ucs->udev, 0),
-			     (unsigned char *) &ucs->dr_cmd_out, buf, len,
-			     write_command_callback, cs);
-	rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
-	if (unlikely(rc)) {
-		update_basstate(ucs, 0, BS_ATWRPEND);
-		dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n",
-			get_usb_rcmsg(rc));
-		return rc;
-	}
-
-	/* submitted successfully, start timeout if necessary */
-	if (!(update_basstate(ucs, BS_ATTIMER, BS_ATREADY) & BS_ATTIMER)) {
-		gig_dbg(DEBUG_OUTPUT, "setting ATREADY timeout of %d/10 secs",
-			ATRDY_TIMEOUT);
-		mod_timer(&ucs->timer_atrdy, jiffies + ATRDY_TIMEOUT * HZ / 10);
-	}
-	return 0;
-}
-
-/* start_cbsend
- * start transmission of AT command queue if necessary
- * parameter:
- *	cs		controller state structure
- * return value:
- *	0 on success
- *	error code < 0 on error
- */
-static int start_cbsend(struct cardstate *cs)
-{
-	struct cmdbuf_t *cb;
-	struct bas_cardstate *ucs = cs->hw.bas;
-	unsigned long flags;
-	int rc;
-	int retval = 0;
-
-	/* check if suspend requested */
-	if (ucs->basstate & BS_SUSPEND) {
-		gig_dbg(DEBUG_OUTPUT, "suspending");
-		return -EHOSTUNREACH;
-	}
-
-	/* check if AT channel is open */
-	if (!(ucs->basstate & BS_ATOPEN)) {
-		gig_dbg(DEBUG_OUTPUT, "AT channel not open");
-		rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
-		if (rc < 0) {
-			/* flush command queue */
-			spin_lock_irqsave(&cs->cmdlock, flags);
-			while (cs->cmdbuf != NULL)
-				complete_cb(cs);
-			spin_unlock_irqrestore(&cs->cmdlock, flags);
-		}
-		return rc;
-	}
-
-	/* try to send first command in queue */
-	spin_lock_irqsave(&cs->cmdlock, flags);
-
-	while ((cb = cs->cmdbuf) != NULL && (ucs->basstate & BS_ATREADY)) {
-		ucs->retry_cmd_out = 0;
-		rc = atwrite_submit(cs, cb->buf, cb->len);
-		if (unlikely(rc)) {
-			retval = rc;
-			complete_cb(cs);
-		}
-	}
-
-	spin_unlock_irqrestore(&cs->cmdlock, flags);
-	return retval;
-}
-
-/* gigaset_write_cmd
- * This function is called by the device independent part of the driver
- * to transmit an AT command string to the Gigaset device.
- * It encapsulates the device specific method for transmission over the
- * direct USB connection to the base.
- * The command string is added to the queue of commands to send, and
- * USB transmission is started if necessary.
- * parameters:
- *	cs		controller state structure
- *	cb		command buffer structure
- * return value:
- *	number of bytes queued on success
- *	error code < 0 on error
- */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
-	unsigned long flags;
-	int rc;
-
-	gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
-			   DEBUG_TRANSCMD : DEBUG_LOCKCMD,
-			   "CMD Transmit", cb->len, cb->buf);
-
-	/* translate "+++" escape sequence sent as a single separate command
-	 * into "close AT channel" command for error recovery
-	 * The next command will reopen the AT channel automatically.
-	 */
-	if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) {
-		/* If an HD_RECEIVEATDATA_ACK message remains unhandled
-		 * because of an error, the base never sends another one.
-		 * The response channel is thus effectively blocked.
-		 * Closing and reopening the AT channel does *not* clear
-		 * this condition.
-		 * As a stopgap measure, submit a zero-length AT read
-		 * before closing the AT channel. This has the undocumented
-		 * effect of triggering a new HD_RECEIVEATDATA_ACK message
-		 * from the base if necessary.
-		 * The subsequent AT channel close then discards any pending
-		 * messages.
-		 */
-		spin_lock_irqsave(&cs->lock, flags);
-		if (!(cs->hw.bas->basstate & BS_ATRDPEND)) {
-			kfree(cs->hw.bas->rcvbuf);
-			cs->hw.bas->rcvbuf = NULL;
-			cs->hw.bas->rcvbuf_size = 0;
-			cs->hw.bas->retry_cmd_in = 0;
-			atread_submit(cs, 0);
-		}
-		spin_unlock_irqrestore(&cs->lock, flags);
-
-		rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
-		if (cb->wake_tasklet)
-			tasklet_schedule(cb->wake_tasklet);
-		if (!rc)
-			rc = cb->len;
-		kfree(cb);
-		return rc;
-	}
-
-	spin_lock_irqsave(&cs->cmdlock, flags);
-	cb->prev = cs->lastcmdbuf;
-	if (cs->lastcmdbuf)
-		cs->lastcmdbuf->next = cb;
-	else {
-		cs->cmdbuf = cb;
-		cs->curlen = cb->len;
-	}
-	cs->cmdbytes += cb->len;
-	cs->lastcmdbuf = cb;
-	spin_unlock_irqrestore(&cs->cmdlock, flags);
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (unlikely(!cs->connected)) {
-		spin_unlock_irqrestore(&cs->lock, flags);
-		gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
-		/* flush command queue */
-		spin_lock_irqsave(&cs->cmdlock, flags);
-		while (cs->cmdbuf != NULL)
-			complete_cb(cs);
-		spin_unlock_irqrestore(&cs->cmdlock, flags);
-		return -ENODEV;
-	}
-	rc = start_cbsend(cs);
-	spin_unlock_irqrestore(&cs->lock, flags);
-	return rc < 0 ? rc : cb->len;
-}
-
-/* gigaset_write_room
- * tty_driver.write_room interface routine
- * return number of characters the driver will accept to be written via
- * gigaset_write_cmd
- * parameter:
- *	controller state structure
- * return value:
- *	number of characters
- */
-static int gigaset_write_room(struct cardstate *cs)
-{
-	return IF_WRITEBUF;
-}
-
-/* gigaset_chars_in_buffer
- * tty_driver.chars_in_buffer interface routine
- * return number of characters waiting to be sent
- * parameter:
- *	controller state structure
- * return value:
- *	number of characters
- */
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
-	return cs->cmdbytes;
-}
-
-/* gigaset_brkchars
- * implementation of ioctl(GIGASET_BRKCHARS)
- * parameter:
- *	controller state structure
- * return value:
- *	-EINVAL (unimplemented function)
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
-	return -EINVAL;
-}
-
-
-/* Device Initialization/Shutdown */
-/* ============================== */
-
-/* Free hardware dependent part of the B channel structure
- * parameter:
- *	bcs	B channel structure
- */
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
-	struct bas_bc_state *ubc = bcs->hw.bas;
-	int i;
-
-	if (!ubc)
-		return;
-
-	/* kill URBs and tasklets before freeing - better safe than sorry */
-	ubc->running = 0;
-	gig_dbg(DEBUG_INIT, "%s: killing isoc URBs", __func__);
-	for (i = 0; i < BAS_OUTURBS; ++i) {
-		usb_kill_urb(ubc->isoouturbs[i].urb);
-		usb_free_urb(ubc->isoouturbs[i].urb);
-	}
-	for (i = 0; i < BAS_INURBS; ++i) {
-		usb_kill_urb(ubc->isoinurbs[i]);
-		usb_free_urb(ubc->isoinurbs[i]);
-	}
-	tasklet_kill(&ubc->sent_tasklet);
-	tasklet_kill(&ubc->rcvd_tasklet);
-	kfree(ubc->isooutbuf);
-	kfree(ubc);
-	bcs->hw.bas = NULL;
-}
-
-/* Initialize hardware dependent part of the B channel structure
- * parameter:
- *	bcs	B channel structure
- * return value:
- *	0 on success, error code < 0 on failure
- */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
-	int i;
-	struct bas_bc_state *ubc;
-
-	bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
-	if (!ubc) {
-		pr_err("out of memory\n");
-		return -ENOMEM;
-	}
-
-	ubc->running = 0;
-	atomic_set(&ubc->corrbytes, 0);
-	spin_lock_init(&ubc->isooutlock);
-	for (i = 0; i < BAS_OUTURBS; ++i) {
-		ubc->isoouturbs[i].urb = NULL;
-		ubc->isoouturbs[i].bcs = bcs;
-	}
-	ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
-	ubc->numsub = 0;
-	ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
-	if (!ubc->isooutbuf) {
-		pr_err("out of memory\n");
-		kfree(ubc);
-		bcs->hw.bas = NULL;
-		return -ENOMEM;
-	}
-	tasklet_init(&ubc->sent_tasklet,
-		     write_iso_tasklet, (unsigned long) bcs);
-
-	spin_lock_init(&ubc->isoinlock);
-	for (i = 0; i < BAS_INURBS; ++i)
-		ubc->isoinurbs[i] = NULL;
-	ubc->isoindone = NULL;
-	ubc->loststatus = -EINPROGRESS;
-	ubc->isoinlost = 0;
-	ubc->seqlen = 0;
-	ubc->inbyte = 0;
-	ubc->inbits = 0;
-	ubc->goodbytes = 0;
-	ubc->alignerrs = 0;
-	ubc->fcserrs = 0;
-	ubc->frameerrs = 0;
-	ubc->giants = 0;
-	ubc->runts = 0;
-	ubc->aborts = 0;
-	ubc->shared0s = 0;
-	ubc->stolen0s = 0;
-	tasklet_init(&ubc->rcvd_tasklet,
-		     read_iso_tasklet, (unsigned long) bcs);
-	return 0;
-}
-
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
-	struct bas_bc_state *ubc = bcs->hw.bas;
-
-	bcs->hw.bas->running = 0;
-	atomic_set(&bcs->hw.bas->corrbytes, 0);
-	bcs->hw.bas->numsub = 0;
-	spin_lock_init(&ubc->isooutlock);
-	spin_lock_init(&ubc->isoinlock);
-	ubc->loststatus = -EINPROGRESS;
-}
-
-static void gigaset_freecshw(struct cardstate *cs)
-{
-	/* timers, URBs and rcvbuf are disposed of in disconnect */
-	kfree(cs->hw.bas->int_in_buf);
-	kfree(cs->hw.bas);
-	cs->hw.bas = NULL;
-}
-
-/* Initialize hardware dependent part of the cardstate structure
- * parameter:
- *	cs	cardstate structure
- * return value:
- *	0 on success, error code < 0 on failure
- */
-static int gigaset_initcshw(struct cardstate *cs)
-{
-	struct bas_cardstate *ucs;
-
-	cs->hw.bas = ucs = kzalloc(sizeof(*ucs), GFP_KERNEL);
-	if (!ucs) {
-		pr_err("out of memory\n");
-		return -ENOMEM;
-	}
-	ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
-	if (!ucs->int_in_buf) {
-		kfree(ucs);
-		pr_err("out of memory\n");
-		return -ENOMEM;
-	}
-
-	spin_lock_init(&ucs->lock);
-	ucs->cs = cs;
-	timer_setup(&ucs->timer_ctrl, req_timeout, 0);
-	timer_setup(&ucs->timer_atrdy, atrdy_timeout, 0);
-	timer_setup(&ucs->timer_cmd_in, cmd_in_timeout, 0);
-	timer_setup(&ucs->timer_int_in, int_in_resubmit, 0);
-	init_waitqueue_head(&ucs->waitqueue);
-	INIT_WORK(&ucs->int_in_wq, int_in_work);
-
-	return 0;
-}
-
-/* freeurbs
- * unlink and deallocate all URBs unconditionally
- * caller must make sure that no commands are still in progress
- * parameter:
- *	cs	controller state structure
- */
-static void freeurbs(struct cardstate *cs)
-{
-	struct bas_cardstate *ucs = cs->hw.bas;
-	struct bas_bc_state *ubc;
-	int i, j;
-
-	gig_dbg(DEBUG_INIT, "%s: killing URBs", __func__);
-	for (j = 0; j < BAS_CHANNELS; ++j) {
-		ubc = cs->bcs[j].hw.bas;
-		for (i = 0; i < BAS_OUTURBS; ++i) {
-			usb_kill_urb(ubc->isoouturbs[i].urb);
-			usb_free_urb(ubc->isoouturbs[i].urb);
-			ubc->isoouturbs[i].urb = NULL;
-		}
-		for (i = 0; i < BAS_INURBS; ++i) {
-			usb_kill_urb(ubc->isoinurbs[i]);
-			usb_free_urb(ubc->isoinurbs[i]);
-			ubc->isoinurbs[i] = NULL;
-		}
-	}
-	usb_kill_urb(ucs->urb_int_in);
-	usb_free_urb(ucs->urb_int_in);
-	ucs->urb_int_in = NULL;
-	usb_kill_urb(ucs->urb_cmd_out);
-	usb_free_urb(ucs->urb_cmd_out);
-	ucs->urb_cmd_out = NULL;
-	usb_kill_urb(ucs->urb_cmd_in);
-	usb_free_urb(ucs->urb_cmd_in);
-	ucs->urb_cmd_in = NULL;
-	usb_kill_urb(ucs->urb_ctrl);
-	usb_free_urb(ucs->urb_ctrl);
-	ucs->urb_ctrl = NULL;
-}
-
-/* gigaset_probe
- * This function is called when a new USB device is connected.
- * It checks whether the new device is handled by this driver.
- */
-static int gigaset_probe(struct usb_interface *interface,
-			 const struct usb_device_id *id)
-{
-	struct usb_host_interface *hostif;
-	struct usb_device *udev = interface_to_usbdev(interface);
-	struct cardstate *cs = NULL;
-	struct bas_cardstate *ucs = NULL;
-	struct bas_bc_state *ubc;
-	struct usb_endpoint_descriptor *endpoint;
-	int i, j;
-	int rc;
-
-	gig_dbg(DEBUG_INIT,
-		"%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
-		__func__, le16_to_cpu(udev->descriptor.idVendor),
-		le16_to_cpu(udev->descriptor.idProduct));
-
-	/* set required alternate setting */
-	hostif = interface->cur_altsetting;
-	if (hostif->desc.bAlternateSetting != 3) {
-		gig_dbg(DEBUG_INIT,
-			"%s: wrong alternate setting %d - trying to switch",
-			__func__, hostif->desc.bAlternateSetting);
-		if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
-		    < 0) {
-			dev_warn(&udev->dev, "usb_set_interface failed, "
-				 "device %d interface %d altsetting %d\n",
-				 udev->devnum, hostif->desc.bInterfaceNumber,
-				 hostif->desc.bAlternateSetting);
-			return -ENODEV;
-		}
-		hostif = interface->cur_altsetting;
-	}
-
-	/* Reject application specific interfaces
-	 */
-	if (hostif->desc.bInterfaceClass != 255) {
-		dev_warn(&udev->dev, "%s: bInterfaceClass == %d\n",
-			 __func__, hostif->desc.bInterfaceClass);
-		return -ENODEV;
-	}
-
-	if (hostif->desc.bNumEndpoints < 1)
-		return -ENODEV;
-
-	dev_info(&udev->dev,
-		 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
-		 __func__, le16_to_cpu(udev->descriptor.idVendor),
-		 le16_to_cpu(udev->descriptor.idProduct));
-
-	/* allocate memory for our device state and initialize it */
-	cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
-			    GIGASET_MODULENAME);
-	if (!cs)
-		return -ENODEV;
-	ucs = cs->hw.bas;
-
-	/* save off device structure ptrs for later use */
-	usb_get_dev(udev);
-	ucs->udev = udev;
-	ucs->interface = interface;
-	cs->dev = &interface->dev;
-
-	/* allocate URBs:
-	 * - one for the interrupt pipe
-	 * - three for the different uses of the default control pipe
-	 * - three for each isochronous pipe
-	 */
-	if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) ||
-	    !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) ||
-	    !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) ||
-	    !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
-		goto allocerr;
-
-	for (j = 0; j < BAS_CHANNELS; ++j) {
-		ubc = cs->bcs[j].hw.bas;
-		for (i = 0; i < BAS_OUTURBS; ++i)
-			if (!(ubc->isoouturbs[i].urb =
-			      usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
-				goto allocerr;
-		for (i = 0; i < BAS_INURBS; ++i)
-			if (!(ubc->isoinurbs[i] =
-			      usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
-				goto allocerr;
-	}
-
-	ucs->rcvbuf = NULL;
-	ucs->rcvbuf_size = 0;
-
-	/* Fill the interrupt urb and send it to the core */
-	endpoint = &hostif->endpoint[0].desc;
-	usb_fill_int_urb(ucs->urb_int_in, udev,
-			 usb_rcvintpipe(udev,
-					usb_endpoint_num(endpoint)),
-			 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
-			 endpoint->bInterval);
-	rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
-	if (rc != 0) {
-		dev_err(cs->dev, "could not submit interrupt URB: %s\n",
-			get_usb_rcmsg(rc));
-		goto error;
-	}
-	ucs->retry_int_in = 0;
-
-	/* tell the device that the driver is ready */
-	rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
-	if (rc != 0)
-		goto error;
-
-	/* tell common part that the device is ready */
-	if (startmode == SM_LOCKED)
-		cs->mstate = MS_LOCKED;
-
-	/* save address of controller structure */
-	usb_set_intfdata(interface, cs);
-
-	rc = gigaset_start(cs);
-	if (rc < 0)
-		goto error;
-
-	return 0;
-
-allocerr:
-	dev_err(cs->dev, "could not allocate URBs\n");
-	rc = -ENOMEM;
-error:
-	freeurbs(cs);
-	usb_set_intfdata(interface, NULL);
-	usb_put_dev(udev);
-	gigaset_freecs(cs);
-	return rc;
-}
-
-/* gigaset_disconnect
- * This function is called when the Gigaset base is unplugged.
- */
-static void gigaset_disconnect(struct usb_interface *interface)
-{
-	struct cardstate *cs;
-	struct bas_cardstate *ucs;
-	int j;
-
-	cs = usb_get_intfdata(interface);
-
-	ucs = cs->hw.bas;
-
-	dev_info(cs->dev, "disconnecting Gigaset base\n");
-
-	/* mark base as not ready, all channels disconnected */
-	ucs->basstate = 0;
-
-	/* tell LL all channels are down */
-	for (j = 0; j < BAS_CHANNELS; ++j)
-		gigaset_bchannel_down(cs->bcs + j);
-
-	/* stop driver (common part) */
-	gigaset_stop(cs);
-
-	/* stop delayed work and URBs, free ressources */
-	del_timer_sync(&ucs->timer_ctrl);
-	del_timer_sync(&ucs->timer_atrdy);
-	del_timer_sync(&ucs->timer_cmd_in);
-	del_timer_sync(&ucs->timer_int_in);
-	cancel_work_sync(&ucs->int_in_wq);
-	freeurbs(cs);
-	usb_set_intfdata(interface, NULL);
-	kfree(ucs->rcvbuf);
-	ucs->rcvbuf = NULL;
-	ucs->rcvbuf_size = 0;
-	usb_put_dev(ucs->udev);
-	ucs->interface = NULL;
-	ucs->udev = NULL;
-	cs->dev = NULL;
-	gigaset_freecs(cs);
-}
-
-/* gigaset_suspend
- * This function is called before the USB connection is suspended
- * or before the USB device is reset.
- * In the latter case, message == PMSG_ON.
- */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
-{
-	struct cardstate *cs = usb_get_intfdata(intf);
-	struct bas_cardstate *ucs = cs->hw.bas;
-	int rc;
-
-	/* set suspend flag; this stops AT command/response traffic */
-	if (update_basstate(ucs, BS_SUSPEND, 0) & BS_SUSPEND) {
-		gig_dbg(DEBUG_SUSPEND, "already suspended");
-		return 0;
-	}
-
-	/* wait a bit for blocking conditions to go away */
-	rc = wait_event_timeout(ucs->waitqueue,
-				!(ucs->basstate &
-				  (BS_B1OPEN | BS_B2OPEN | BS_ATRDPEND | BS_ATWRPEND)),
-				BAS_TIMEOUT * HZ / 10);
-	gig_dbg(DEBUG_SUSPEND, "wait_event_timeout() -> %d", rc);
-
-	/* check for conditions preventing suspend */
-	if (ucs->basstate & (BS_B1OPEN | BS_B2OPEN | BS_ATRDPEND | BS_ATWRPEND)) {
-		dev_warn(cs->dev, "cannot suspend:\n");
-		if (ucs->basstate & BS_B1OPEN)
-			dev_warn(cs->dev, " B channel 1 open\n");
-		if (ucs->basstate & BS_B2OPEN)
-			dev_warn(cs->dev, " B channel 2 open\n");
-		if (ucs->basstate & BS_ATRDPEND)
-			dev_warn(cs->dev, " receiving AT reply\n");
-		if (ucs->basstate & BS_ATWRPEND)
-			dev_warn(cs->dev, " sending AT command\n");
-		update_basstate(ucs, 0, BS_SUSPEND);
-		return -EBUSY;
-	}
-
-	/* close AT channel if open */
-	if (ucs->basstate & BS_ATOPEN) {
-		gig_dbg(DEBUG_SUSPEND, "closing AT channel");
-		rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, 0);
-		if (rc) {
-			update_basstate(ucs, 0, BS_SUSPEND);
-			return rc;
-		}
-		wait_event_timeout(ucs->waitqueue, !ucs->pending,
-				   BAS_TIMEOUT * HZ / 10);
-		/* in case of timeout, proceed anyway */
-	}
-
-	/* kill all URBs and delayed work that might still be pending */
-	usb_kill_urb(ucs->urb_ctrl);
-	usb_kill_urb(ucs->urb_int_in);
-	del_timer_sync(&ucs->timer_ctrl);
-	del_timer_sync(&ucs->timer_atrdy);
-	del_timer_sync(&ucs->timer_cmd_in);
-	del_timer_sync(&ucs->timer_int_in);
-
-	/* don't try to cancel int_in_wq from within reset as it
-	 * might be the one requesting the reset
-	 */
-	if (message.event != PM_EVENT_ON)
-		cancel_work_sync(&ucs->int_in_wq);
-
-	gig_dbg(DEBUG_SUSPEND, "suspend complete");
-	return 0;
-}
-
-/* gigaset_resume
- * This function is called after the USB connection has been resumed.
- */
-static int gigaset_resume(struct usb_interface *intf)
-{
-	struct cardstate *cs = usb_get_intfdata(intf);
-	struct bas_cardstate *ucs = cs->hw.bas;
-	int rc;
-
-	/* resubmit interrupt URB for spontaneous messages from base */
-	rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
-	if (rc) {
-		dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
-			get_usb_rcmsg(rc));
-		return rc;
-	}
-	ucs->retry_int_in = 0;
-
-	/* clear suspend flag to reallow activity */
-	update_basstate(ucs, 0, BS_SUSPEND);
-
-	gig_dbg(DEBUG_SUSPEND, "resume complete");
-	return 0;
-}
-
-/* gigaset_pre_reset
- * This function is called before the USB connection is reset.
- */
-static int gigaset_pre_reset(struct usb_interface *intf)
-{
-	/* handle just like suspend */
-	return gigaset_suspend(intf, PMSG_ON);
-}
-
-/* gigaset_post_reset
- * This function is called after the USB connection has been reset.
- */
-static int gigaset_post_reset(struct usb_interface *intf)
-{
-	/* FIXME: send HD_DEVICE_INIT_ACK? */
-
-	/* resume operations */
-	return gigaset_resume(intf);
-}
-
-
-static const struct gigaset_ops gigops = {
-	.write_cmd = gigaset_write_cmd,
-	.write_room = gigaset_write_room,
-	.chars_in_buffer = gigaset_chars_in_buffer,
-	.brkchars = gigaset_brkchars,
-	.init_bchannel = gigaset_init_bchannel,
-	.close_bchannel = gigaset_close_bchannel,
-	.initbcshw = gigaset_initbcshw,
-	.freebcshw = gigaset_freebcshw,
-	.reinitbcshw = gigaset_reinitbcshw,
-	.initcshw = gigaset_initcshw,
-	.freecshw = gigaset_freecshw,
-	.set_modem_ctrl = gigaset_set_modem_ctrl,
-	.baud_rate = gigaset_baud_rate,
-	.set_line_ctrl = gigaset_set_line_ctrl,
-	.send_skb = gigaset_isoc_send_skb,
-	.handle_input = gigaset_isoc_input,
-};
-
-/* bas_gigaset_init
- * This function is called after the kernel module is loaded.
- */
-static int __init bas_gigaset_init(void)
-{
-	int result;
-
-	/* allocate memory for our driver state and initialize it */
-	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
-				    GIGASET_MODULENAME, GIGASET_DEVNAME,
-				    &gigops, THIS_MODULE);
-	if (driver == NULL)
-		goto error;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&gigaset_usb_driver);
-	if (result < 0) {
-		pr_err("error %d registering USB driver\n", -result);
-		goto error;
-	}
-
-	pr_info(DRIVER_DESC "\n");
-	return 0;
-
-error:
-	if (driver)
-		gigaset_freedriver(driver);
-	driver = NULL;
-	return -1;
-}
-
-/* bas_gigaset_exit
- * This function is called before the kernel module is unloaded.
- */
-static void __exit bas_gigaset_exit(void)
-{
-	struct bas_cardstate *ucs;
-	int i;
-
-	gigaset_blockdriver(driver); /* => probe will fail
-				      * => no gigaset_start any more
-				      */
-
-	/* stop all connected devices */
-	for (i = 0; i < driver->minors; i++) {
-		if (gigaset_shutdown(driver->cs + i) < 0)
-			continue;		/* no device */
-		/* from now on, no isdn callback should be possible */
-
-		/* close all still open channels */
-		ucs = driver->cs[i].hw.bas;
-		if (ucs->basstate & BS_B1OPEN) {
-			gig_dbg(DEBUG_INIT, "closing B1 channel");
-			usb_control_msg(ucs->udev,
-					usb_sndctrlpipe(ucs->udev, 0),
-					HD_CLOSE_B1CHANNEL, OUT_VENDOR_REQ,
-					0, 0, NULL, 0, BAS_TIMEOUT);
-		}
-		if (ucs->basstate & BS_B2OPEN) {
-			gig_dbg(DEBUG_INIT, "closing B2 channel");
-			usb_control_msg(ucs->udev,
-					usb_sndctrlpipe(ucs->udev, 0),
-					HD_CLOSE_B2CHANNEL, OUT_VENDOR_REQ,
-					0, 0, NULL, 0, BAS_TIMEOUT);
-		}
-		if (ucs->basstate & BS_ATOPEN) {
-			gig_dbg(DEBUG_INIT, "closing AT channel");
-			usb_control_msg(ucs->udev,
-					usb_sndctrlpipe(ucs->udev, 0),
-					HD_CLOSE_ATCHANNEL, OUT_VENDOR_REQ,
-					0, 0, NULL, 0, BAS_TIMEOUT);
-		}
-		ucs->basstate = 0;
-	}
-
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&gigaset_usb_driver);
-	/* this will call the disconnect-callback */
-	/* from now on, no disconnect/probe callback should be running */
-
-	gigaset_freedriver(driver);
-	driver = NULL;
-}
-
-
-module_init(bas_gigaset_init);
-module_exit(bas_gigaset_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/gigaset/capi.c b/drivers/staging/isdn/gigaset/capi.c
deleted file mode 100644
index 83d7dd4..0000000
--- a/drivers/staging/isdn/gigaset/capi.c
+++ /dev/null
@@ -1,2517 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Kernel CAPI interface for the Gigaset driver
- *
- * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/ratelimit.h>
-#include <linux/isdn/capilli.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/export.h>
-
-/* missing from kernelcapi.h */
-#define CapiNcpiNotSupportedByProtocol	0x0001
-#define CapiFlagsNotSupportedByProtocol	0x0002
-#define CapiAlertAlreadySent		0x0003
-#define CapiFacilitySpecificFunctionNotSupported	0x3011
-
-/* missing from capicmd.h */
-#define CAPI_CONNECT_IND_BASELEN	(CAPI_MSG_BASELEN + 4 + 2 + 8 * 1)
-#define CAPI_CONNECT_ACTIVE_IND_BASELEN	(CAPI_MSG_BASELEN + 4 + 3 * 1)
-#define CAPI_CONNECT_B3_IND_BASELEN	(CAPI_MSG_BASELEN + 4 + 1)
-#define CAPI_CONNECT_B3_ACTIVE_IND_BASELEN	(CAPI_MSG_BASELEN + 4 + 1)
-#define CAPI_DATA_B3_REQ_LEN64		(CAPI_MSG_BASELEN + 4 + 4 + 2 + 2 + 2 + 8)
-#define CAPI_DATA_B3_CONF_LEN		(CAPI_MSG_BASELEN + 4 + 2 + 2)
-#define CAPI_DISCONNECT_IND_LEN		(CAPI_MSG_BASELEN + 4 + 2)
-#define CAPI_DISCONNECT_B3_IND_BASELEN	(CAPI_MSG_BASELEN + 4 + 2 + 1)
-#define CAPI_FACILITY_CONF_BASELEN	(CAPI_MSG_BASELEN + 4 + 2 + 2 + 1)
-/* most _CONF messages contain only Controller/PLCI/NCCI and Info parameters */
-#define CAPI_STDCONF_LEN		(CAPI_MSG_BASELEN + 4 + 2)
-
-#define CAPI_FACILITY_HANDSET	0x0000
-#define CAPI_FACILITY_DTMF	0x0001
-#define CAPI_FACILITY_V42BIS	0x0002
-#define CAPI_FACILITY_SUPPSVC	0x0003
-#define CAPI_FACILITY_WAKEUP	0x0004
-#define CAPI_FACILITY_LI	0x0005
-
-#define CAPI_SUPPSVC_GETSUPPORTED	0x0000
-#define CAPI_SUPPSVC_LISTEN		0x0001
-
-/* missing from capiutil.h */
-#define CAPIMSG_PLCI_PART(m)	CAPIMSG_U8(m, 9)
-#define CAPIMSG_NCCI_PART(m)	CAPIMSG_U16(m, 10)
-#define CAPIMSG_HANDLE_REQ(m)	CAPIMSG_U16(m, 18) /* DATA_B3_REQ/_IND only! */
-#define CAPIMSG_FLAGS(m)	CAPIMSG_U16(m, 20)
-#define CAPIMSG_SETCONTROLLER(m, contr)	capimsg_setu8(m, 8, contr)
-#define CAPIMSG_SETPLCI_PART(m, plci)	capimsg_setu8(m, 9, plci)
-#define CAPIMSG_SETNCCI_PART(m, ncci)	capimsg_setu16(m, 10, ncci)
-#define CAPIMSG_SETFLAGS(m, flags)	capimsg_setu16(m, 20, flags)
-
-/* parameters with differing location in DATA_B3_CONF/_RESP: */
-#define CAPIMSG_SETHANDLE_CONF(m, handle)	capimsg_setu16(m, 12, handle)
-#define	CAPIMSG_SETINFO_CONF(m, info)		capimsg_setu16(m, 14, info)
-
-/* Flags (DATA_B3_REQ/_IND) */
-#define CAPI_FLAGS_DELIVERY_CONFIRMATION	0x04
-#define CAPI_FLAGS_RESERVED			(~0x1f)
-
-/* buffer sizes */
-#define MAX_BC_OCTETS 11
-#define MAX_HLC_OCTETS 3
-#define MAX_NUMBER_DIGITS 20
-#define MAX_FMT_IE_LEN 20
-
-/* values for bcs->apconnstate */
-#define APCONN_NONE	0	/* inactive/listening */
-#define APCONN_SETUP	1	/* connecting */
-#define APCONN_ACTIVE	2	/* B channel up */
-
-/* registered application data structure */
-struct gigaset_capi_appl {
-	struct list_head ctrlist;
-	struct gigaset_capi_appl *bcnext;
-	u16 id;
-	struct capi_register_params rp;
-	u16 nextMessageNumber;
-	u32 listenInfoMask;
-	u32 listenCIPmask;
-};
-
-/* CAPI specific controller data structure */
-struct gigaset_capi_ctr {
-	struct capi_ctr ctr;
-	struct list_head appls;
-	struct sk_buff_head sendqueue;
-	atomic_t sendqlen;
-	/* two _cmsg structures possibly used concurrently: */
-	_cmsg hcmsg;	/* for message composition triggered from hardware */
-	_cmsg acmsg;	/* for dissection of messages sent from application */
-	u8 bc_buf[MAX_BC_OCTETS + 1];
-	u8 hlc_buf[MAX_HLC_OCTETS + 1];
-	u8 cgpty_buf[MAX_NUMBER_DIGITS + 3];
-	u8 cdpty_buf[MAX_NUMBER_DIGITS + 2];
-};
-
-/* CIP Value table (from CAPI 2.0 standard, ch. 6.1) */
-static struct {
-	u8 *bc;
-	u8 *hlc;
-} cip2bchlc[] = {
-	[1] = { "8090A3", NULL },	/* Speech (A-law) */
-	[2] = { "8890", NULL },		/* Unrestricted digital information */
-	[3] = { "8990", NULL },		/* Restricted digital information */
-	[4] = { "9090A3", NULL },	/* 3,1 kHz audio (A-law) */
-	[5] = { "9190", NULL },		/* 7 kHz audio */
-	[6] = { "9890", NULL },		/* Video */
-	[7] = { "88C0C6E6", NULL },	/* Packet mode */
-	[8] = { "8890218F", NULL },	/* 56 kbit/s rate adaptation */
-	[9] = { "9190A5", NULL },	/* Unrestricted digital information
-					 * with tones/announcements */
-	[16] = { "8090A3", "9181" },	/* Telephony */
-	[17] = { "9090A3", "9184" },	/* Group 2/3 facsimile */
-	[18] = { "8890", "91A1" },	/* Group 4 facsimile Class 1 */
-	[19] = { "8890", "91A4" },	/* Teletex service basic and mixed mode
-					 * and Group 4 facsimile service
-					 * Classes II and III */
-	[20] = { "8890", "91A8" },	/* Teletex service basic and
-					 * processable mode */
-	[21] = { "8890", "91B1" },	/* Teletex service basic mode */
-	[22] = { "8890", "91B2" },	/* International interworking for
-					 * Videotex */
-	[23] = { "8890", "91B5" },	/* Telex */
-	[24] = { "8890", "91B8" },	/* Message Handling Systems
-					 * in accordance with X.400 */
-	[25] = { "8890", "91C1" },	/* OSI application
-					 * in accordance with X.200 */
-	[26] = { "9190A5", "9181" },	/* 7 kHz telephony */
-	[27] = { "9190A5", "916001" },	/* Video telephony, first connection */
-	[28] = { "8890", "916002" },	/* Video telephony, second connection */
-};
-
-/*
- * helper functions
- * ================
- */
-
-/*
- * emit unsupported parameter warning
- */
-static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
-					char *msgname, char *paramname)
-{
-	if (param && *param)
-		dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
-			 msgname, paramname);
-}
-
-/*
- * convert an IE from Gigaset hex string to ETSI binary representation
- * including length byte
- * return value: result length, -1 on error
- */
-static int encode_ie(char *in, u8 *out, int maxlen)
-{
-	int l = 0;
-	while (*in) {
-		if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
-			return -1;
-		out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
-		in += 2;
-	}
-	out[0] = l;
-	return l;
-}
-
-/*
- * convert an IE from ETSI binary representation including length byte
- * to Gigaset hex string
- */
-static void decode_ie(u8 *in, char *out)
-{
-	int i = *in;
-	while (i-- > 0) {
-		/* ToDo: conversion to upper case necessary? */
-		*out++ = toupper(hex_asc_hi(*++in));
-		*out++ = toupper(hex_asc_lo(*in));
-	}
-}
-
-/*
- * retrieve application data structure for an application ID
- */
-static inline struct gigaset_capi_appl *
-get_appl(struct gigaset_capi_ctr *iif, u16 appl)
-{
-	struct gigaset_capi_appl *ap;
-
-	list_for_each_entry(ap, &iif->appls, ctrlist)
-		if (ap->id == appl)
-			return ap;
-	return NULL;
-}
-
-/*
- * dump CAPI message to kernel messages for debugging
- */
-static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
-{
-#ifdef CONFIG_GIGASET_DEBUG
-	/* dump at most 20 messages in 20 secs */
-	static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
-	_cdebbuf *cdb;
-
-	if (!(gigaset_debuglevel & level))
-		return;
-	if (!___ratelimit(&msg_dump_ratelimit, tag))
-		return;
-
-	cdb = capi_cmsg2str(p);
-	if (cdb) {
-		gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf);
-		cdebbuf_free(cdb);
-	} else {
-		gig_dbg(level, "%s: [%d] %s", tag, p->ApplId,
-			capi_cmd2str(p->Command, p->Subcommand));
-	}
-#endif
-}
-
-static inline void dump_rawmsg(enum debuglevel level, const char *tag,
-			       unsigned char *data)
-{
-#ifdef CONFIG_GIGASET_DEBUG
-	char *dbgline;
-	int i, l;
-
-	if (!(gigaset_debuglevel & level))
-		return;
-
-	l = CAPIMSG_LEN(data);
-	if (l < 12) {
-		gig_dbg(level, "%s: ??? LEN=%04d", tag, l);
-		return;
-	}
-	gig_dbg(level, "%s: 0x%02x:0x%02x: ID=%03d #0x%04x LEN=%04d NCCI=0x%x",
-		tag, CAPIMSG_COMMAND(data), CAPIMSG_SUBCOMMAND(data),
-		CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
-		CAPIMSG_CONTROL(data));
-	l -= 12;
-	if (l <= 0)
-		return;
-	if (l > 64)
-		l = 64; /* arbitrary limit */
-	dbgline = kmalloc_array(3, l, GFP_ATOMIC);
-	if (!dbgline)
-		return;
-	for (i = 0; i < l; i++) {
-		dbgline[3 * i] = hex_asc_hi(data[12 + i]);
-		dbgline[3 * i + 1] = hex_asc_lo(data[12 + i]);
-		dbgline[3 * i + 2] = ' ';
-	}
-	dbgline[3 * l - 1] = '\0';
-	gig_dbg(level, "  %s", dbgline);
-	kfree(dbgline);
-	if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 &&
-	    (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ ||
-	     CAPIMSG_SUBCOMMAND(data) == CAPI_IND)) {
-		l = CAPIMSG_DATALEN(data);
-		gig_dbg(level, "   DataLength=%d", l);
-		if (l <= 0 || !(gigaset_debuglevel & DEBUG_LLDATA))
-			return;
-		if (l > 64)
-			l = 64; /* arbitrary limit */
-		dbgline = kmalloc_array(3, l, GFP_ATOMIC);
-		if (!dbgline)
-			return;
-		data += CAPIMSG_LEN(data);
-		for (i = 0; i < l; i++) {
-			dbgline[3 * i] = hex_asc_hi(data[i]);
-			dbgline[3 * i + 1] = hex_asc_lo(data[i]);
-			dbgline[3 * i + 2] = ' ';
-		}
-		dbgline[3 * l - 1] = '\0';
-		gig_dbg(level, "  %s", dbgline);
-		kfree(dbgline);
-	}
-#endif
-}
-
-/*
- * format CAPI IE as string
- */
-
-#ifdef CONFIG_GIGASET_DEBUG
-static const char *format_ie(const char *ie)
-{
-	static char result[3 * MAX_FMT_IE_LEN];
-	int len, count;
-	char *pout = result;
-
-	if (!ie)
-		return "NULL";
-
-	count = len = ie[0];
-	if (count > MAX_FMT_IE_LEN)
-		count = MAX_FMT_IE_LEN - 1;
-	while (count--) {
-		*pout++ = hex_asc_hi(*++ie);
-		*pout++ = hex_asc_lo(*ie);
-		*pout++ = ' ';
-	}
-	if (len > MAX_FMT_IE_LEN) {
-		*pout++ = '.';
-		*pout++ = '.';
-		*pout++ = '.';
-	}
-	*--pout = 0;
-	return result;
-}
-#endif
-
-/*
- * emit DATA_B3_CONF message
- */
-static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr,
-			      u16 appl, u16 msgid, int channel,
-			      u16 handle, u16 info)
-{
-	struct sk_buff *cskb;
-	u8 *msg;
-
-	cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
-	if (!cskb) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		return;
-	}
-	/* frequent message, avoid _cmsg overhead */
-	msg = __skb_put(cskb, CAPI_DATA_B3_CONF_LEN);
-	CAPIMSG_SETLEN(msg, CAPI_DATA_B3_CONF_LEN);
-	CAPIMSG_SETAPPID(msg, appl);
-	CAPIMSG_SETCOMMAND(msg, CAPI_DATA_B3);
-	CAPIMSG_SETSUBCOMMAND(msg,  CAPI_CONF);
-	CAPIMSG_SETMSGID(msg, msgid);
-	CAPIMSG_SETCONTROLLER(msg, ctr->cnr);
-	CAPIMSG_SETPLCI_PART(msg, channel);
-	CAPIMSG_SETNCCI_PART(msg, 1);
-	CAPIMSG_SETHANDLE_CONF(msg, handle);
-	CAPIMSG_SETINFO_CONF(msg, info);
-
-	/* emit message */
-	dump_rawmsg(DEBUG_MCMD, __func__, msg);
-	capi_ctr_handle_message(ctr, appl, cskb);
-}
-
-
-/*
- * driver interface functions
- * ==========================
- */
-
-/**
- * gigaset_skb_sent() - acknowledge transmission of outgoing skb
- * @bcs:	B channel descriptor structure.
- * @skb:	sent data.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when the data in a
- * skb has been successfully sent, for signalling completion to the LL.
- */
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
-{
-	struct cardstate *cs = bcs->cs;
-	struct gigaset_capi_ctr *iif = cs->iif;
-	struct gigaset_capi_appl *ap = bcs->ap;
-	unsigned char *req = skb_mac_header(dskb);
-	u16 flags;
-
-	/* update statistics */
-	++bcs->trans_up;
-
-	if (!ap) {
-		gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
-		return;
-	}
-
-	/* don't send further B3 messages if disconnected */
-	if (bcs->apconnstate < APCONN_ACTIVE) {
-		gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
-		return;
-	}
-
-	/*
-	 * send DATA_B3_CONF if "delivery confirmation" bit was set in request;
-	 * otherwise it has already been sent by do_data_b3_req()
-	 */
-	flags = CAPIMSG_FLAGS(req);
-	if (flags & CAPI_FLAGS_DELIVERY_CONFIRMATION)
-		send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req),
-				  bcs->channel + 1, CAPIMSG_HANDLE_REQ(req),
-				  (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) ?
-				  CapiFlagsNotSupportedByProtocol :
-				  CAPI_NOERROR);
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_sent);
-
-/**
- * gigaset_skb_rcvd() - pass received skb to LL
- * @bcs:	B channel descriptor structure.
- * @skb:	received data.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when user data has
- * been successfully received, for passing to the LL.
- * Warning: skb must not be accessed anymore!
- */
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
-{
-	struct cardstate *cs = bcs->cs;
-	struct gigaset_capi_ctr *iif = cs->iif;
-	struct gigaset_capi_appl *ap = bcs->ap;
-	int len = skb->len;
-
-	/* update statistics */
-	bcs->trans_down++;
-
-	if (!ap) {
-		gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-
-	/* don't send further B3 messages if disconnected */
-	if (bcs->apconnstate < APCONN_ACTIVE) {
-		gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-
-	/*
-	 * prepend DATA_B3_IND message to payload
-	 * Parameters: NCCI = 1, all others 0/unused
-	 * frequent message, avoid _cmsg overhead
-	 */
-	skb_push(skb, CAPI_DATA_B3_REQ_LEN);
-	CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN);
-	CAPIMSG_SETAPPID(skb->data, ap->id);
-	CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3);
-	CAPIMSG_SETSUBCOMMAND(skb->data,  CAPI_IND);
-	CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++);
-	CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr);
-	CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1);
-	CAPIMSG_SETNCCI_PART(skb->data, 1);
-	/* Data parameter not used */
-	CAPIMSG_SETDATALEN(skb->data, len);
-	/* Data handle parameter not used */
-	CAPIMSG_SETFLAGS(skb->data, 0);
-	/* Data64 parameter not present */
-
-	/* emit message */
-	dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
-	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
-
-/**
- * gigaset_isdn_rcv_err() - signal receive error
- * @bcs:	B channel descriptor structure.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when a receive error
- * has occurred, for signalling to the LL.
- */
-void gigaset_isdn_rcv_err(struct bc_state *bcs)
-{
-	/* if currently ignoring packets, just count down */
-	if (bcs->ignore) {
-		bcs->ignore--;
-		return;
-	}
-
-	/* update statistics */
-	bcs->corrupted++;
-
-	/* ToDo: signal error -> LL */
-}
-EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
-
-/**
- * gigaset_isdn_icall() - signal incoming call
- * @at_state:	connection state structure.
- *
- * Called by main module at tasklet level to notify the LL that an incoming
- * call has been received. @at_state contains the parameters of the call.
- *
- * Return value: call disposition (ICALL_*)
- */
-int gigaset_isdn_icall(struct at_state_t *at_state)
-{
-	struct cardstate *cs = at_state->cs;
-	struct bc_state *bcs = at_state->bcs;
-	struct gigaset_capi_ctr *iif = cs->iif;
-	struct gigaset_capi_appl *ap;
-	u32 actCIPmask;
-	struct sk_buff *skb;
-	unsigned int msgsize;
-	unsigned long flags;
-	int i;
-
-	/*
-	 * ToDo: signal calls without a free B channel, too
-	 * (requires a u8 handle for the at_state structure that can
-	 * be stored in the PLCI and used in the CONNECT_RESP message
-	 * handler to retrieve it)
-	 */
-	if (!bcs)
-		return ICALL_IGNORE;
-
-	/* prepare CONNECT_IND message, using B channel number as PLCI */
-	capi_cmsg_header(&iif->hcmsg, 0, CAPI_CONNECT, CAPI_IND, 0,
-			 iif->ctr.cnr | ((bcs->channel + 1) << 8));
-
-	/* minimum size, all structs empty */
-	msgsize = CAPI_CONNECT_IND_BASELEN;
-
-	/* Bearer Capability (mandatory) */
-	if (at_state->str_var[STR_ZBC]) {
-		/* pass on BC from Gigaset */
-		if (encode_ie(at_state->str_var[STR_ZBC], iif->bc_buf,
-			      MAX_BC_OCTETS) < 0) {
-			dev_warn(cs->dev, "RING ignored - bad BC %s\n",
-				 at_state->str_var[STR_ZBC]);
-			return ICALL_IGNORE;
-		}
-
-		/* look up corresponding CIP value */
-		iif->hcmsg.CIPValue = 0;	/* default if nothing found */
-		for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
-			if (cip2bchlc[i].bc != NULL &&
-			    cip2bchlc[i].hlc == NULL &&
-			    !strcmp(cip2bchlc[i].bc,
-				    at_state->str_var[STR_ZBC])) {
-				iif->hcmsg.CIPValue = i;
-				break;
-			}
-	} else {
-		/* no BC (internal call): assume CIP 1 (speech, A-law) */
-		iif->hcmsg.CIPValue = 1;
-		encode_ie(cip2bchlc[1].bc, iif->bc_buf, MAX_BC_OCTETS);
-	}
-	iif->hcmsg.BC = iif->bc_buf;
-	msgsize += iif->hcmsg.BC[0];
-
-	/* High Layer Compatibility (optional) */
-	if (at_state->str_var[STR_ZHLC]) {
-		/* pass on HLC from Gigaset */
-		if (encode_ie(at_state->str_var[STR_ZHLC], iif->hlc_buf,
-			      MAX_HLC_OCTETS) < 0) {
-			dev_warn(cs->dev, "RING ignored - bad HLC %s\n",
-				 at_state->str_var[STR_ZHLC]);
-			return ICALL_IGNORE;
-		}
-		iif->hcmsg.HLC = iif->hlc_buf;
-		msgsize += iif->hcmsg.HLC[0];
-
-		/* look up corresponding CIP value */
-		/* keep BC based CIP value if none found */
-		if (at_state->str_var[STR_ZBC])
-			for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
-				if (cip2bchlc[i].hlc != NULL &&
-				    !strcmp(cip2bchlc[i].hlc,
-					    at_state->str_var[STR_ZHLC]) &&
-				    !strcmp(cip2bchlc[i].bc,
-					    at_state->str_var[STR_ZBC])) {
-					iif->hcmsg.CIPValue = i;
-					break;
-				}
-	}
-
-	/* Called Party Number (optional) */
-	if (at_state->str_var[STR_ZCPN]) {
-		i = strlen(at_state->str_var[STR_ZCPN]);
-		if (i > MAX_NUMBER_DIGITS) {
-			dev_warn(cs->dev, "RING ignored - bad number %s\n",
-				 at_state->str_var[STR_ZBC]);
-			return ICALL_IGNORE;
-		}
-		iif->cdpty_buf[0] = i + 1;
-		iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */
-		memcpy(iif->cdpty_buf + 2, at_state->str_var[STR_ZCPN], i);
-		iif->hcmsg.CalledPartyNumber = iif->cdpty_buf;
-		msgsize += iif->hcmsg.CalledPartyNumber[0];
-	}
-
-	/* Calling Party Number (optional) */
-	if (at_state->str_var[STR_NMBR]) {
-		i = strlen(at_state->str_var[STR_NMBR]);
-		if (i > MAX_NUMBER_DIGITS) {
-			dev_warn(cs->dev, "RING ignored - bad number %s\n",
-				 at_state->str_var[STR_ZBC]);
-			return ICALL_IGNORE;
-		}
-		iif->cgpty_buf[0] = i + 2;
-		iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */
-		iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */
-		memcpy(iif->cgpty_buf + 3, at_state->str_var[STR_NMBR], i);
-		iif->hcmsg.CallingPartyNumber = iif->cgpty_buf;
-		msgsize += iif->hcmsg.CallingPartyNumber[0];
-	}
-
-	/* remaining parameters (not supported, always left NULL):
-	 * - CalledPartySubaddress
-	 * - CallingPartySubaddress
-	 * - AdditionalInfo
-	 *   - BChannelinformation
-	 *   - Keypadfacility
-	 *   - Useruserdata
-	 *   - Facilitydataarray
-	 */
-
-	gig_dbg(DEBUG_CMD, "icall: PLCI %x CIP %d BC %s",
-		iif->hcmsg.adr.adrPLCI, iif->hcmsg.CIPValue,
-		format_ie(iif->hcmsg.BC));
-	gig_dbg(DEBUG_CMD, "icall: HLC %s",
-		format_ie(iif->hcmsg.HLC));
-	gig_dbg(DEBUG_CMD, "icall: CgPty %s",
-		format_ie(iif->hcmsg.CallingPartyNumber));
-	gig_dbg(DEBUG_CMD, "icall: CdPty %s",
-		format_ie(iif->hcmsg.CalledPartyNumber));
-
-	/* scan application list for matching listeners */
-	spin_lock_irqsave(&bcs->aplock, flags);
-	if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) {
-		dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
-			 __func__, bcs->ap, bcs->apconnstate);
-		bcs->ap = NULL;
-		bcs->apconnstate = APCONN_NONE;
-	}
-	spin_unlock_irqrestore(&bcs->aplock, flags);
-	actCIPmask = 1 | (1 << iif->hcmsg.CIPValue);
-	list_for_each_entry(ap, &iif->appls, ctrlist)
-		if (actCIPmask & ap->listenCIPmask) {
-			/* build CONNECT_IND message for this application */
-			iif->hcmsg.ApplId = ap->id;
-			iif->hcmsg.Messagenumber = ap->nextMessageNumber++;
-
-			skb = alloc_skb(msgsize, GFP_ATOMIC);
-			if (!skb) {
-				dev_err(cs->dev, "%s: out of memory\n",
-					__func__);
-				break;
-			}
-			if (capi_cmsg2message(&iif->hcmsg,
-					      __skb_put(skb, msgsize))) {
-				dev_err(cs->dev, "%s: message parser failure\n",
-					__func__);
-				dev_kfree_skb_any(skb);
-				break;
-			}
-			dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
-
-			/* add to listeners on this B channel, update state */
-			spin_lock_irqsave(&bcs->aplock, flags);
-			ap->bcnext = bcs->ap;
-			bcs->ap = ap;
-			bcs->chstate |= CHS_NOTIFY_LL;
-			bcs->apconnstate = APCONN_SETUP;
-			spin_unlock_irqrestore(&bcs->aplock, flags);
-
-			/* emit message */
-			capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-		}
-
-	/*
-	 * Return "accept" if any listeners.
-	 * Gigaset will send ALERTING.
-	 * There doesn't seem to be a way to avoid this.
-	 */
-	return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE;
-}
-
-/*
- * send a DISCONNECT_IND message to an application
- * does not sleep, clobbers the controller's hcmsg structure
- */
-static void send_disconnect_ind(struct bc_state *bcs,
-				struct gigaset_capi_appl *ap, u16 reason)
-{
-	struct cardstate *cs = bcs->cs;
-	struct gigaset_capi_ctr *iif = cs->iif;
-	struct sk_buff *skb;
-
-	if (bcs->apconnstate == APCONN_NONE)
-		return;
-
-	capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND,
-			 ap->nextMessageNumber++,
-			 iif->ctr.cnr | ((bcs->channel + 1) << 8));
-	iif->hcmsg.Reason = reason;
-	skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC);
-	if (!skb) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		return;
-	}
-	if (capi_cmsg2message(&iif->hcmsg,
-			      __skb_put(skb, CAPI_DISCONNECT_IND_LEN))) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
-	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * send a DISCONNECT_B3_IND message to an application
- * Parameters: NCCI = 1, NCPI empty, Reason_B3 = 0
- * does not sleep, clobbers the controller's hcmsg structure
- */
-static void send_disconnect_b3_ind(struct bc_state *bcs,
-				   struct gigaset_capi_appl *ap)
-{
-	struct cardstate *cs = bcs->cs;
-	struct gigaset_capi_ctr *iif = cs->iif;
-	struct sk_buff *skb;
-
-	/* nothing to do if no logical connection active */
-	if (bcs->apconnstate < APCONN_ACTIVE)
-		return;
-	bcs->apconnstate = APCONN_SETUP;
-
-	capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
-			 ap->nextMessageNumber++,
-			 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
-	skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC);
-	if (!skb) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		return;
-	}
-	if (capi_cmsg2message(&iif->hcmsg,
-			  __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
-	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_connD() - signal D channel connect
- * @bcs:	B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the D channel
- * connection has been established.
- */
-void gigaset_isdn_connD(struct bc_state *bcs)
-{
-	struct cardstate *cs = bcs->cs;
-	struct gigaset_capi_ctr *iif = cs->iif;
-	struct gigaset_capi_appl *ap;
-	struct sk_buff *skb;
-	unsigned int msgsize;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bcs->aplock, flags);
-	ap = bcs->ap;
-	if (!ap) {
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-		gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
-		return;
-	}
-	if (bcs->apconnstate == APCONN_NONE) {
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-		dev_warn(cs->dev, "%s: application %u not connected\n",
-			 __func__, ap->id);
-		return;
-	}
-	spin_unlock_irqrestore(&bcs->aplock, flags);
-	while (ap->bcnext) {
-		/* this should never happen */
-		dev_warn(cs->dev, "%s: dropping extra application %u\n",
-			 __func__, ap->bcnext->id);
-		send_disconnect_ind(bcs, ap->bcnext,
-				    CapiCallGivenToOtherApplication);
-		ap->bcnext = ap->bcnext->bcnext;
-	}
-
-	/* prepare CONNECT_ACTIVE_IND message
-	 * Note: LLC not supported by device
-	 */
-	capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND,
-			 ap->nextMessageNumber++,
-			 iif->ctr.cnr | ((bcs->channel + 1) << 8));
-
-	/* minimum size, all structs empty */
-	msgsize = CAPI_CONNECT_ACTIVE_IND_BASELEN;
-
-	/* ToDo: set parameter: Connected number
-	 * (requires ev-layer state machine extension to collect
-	 * ZCON device reply)
-	 */
-
-	/* build and emit CONNECT_ACTIVE_IND message */
-	skb = alloc_skb(msgsize, GFP_ATOMIC);
-	if (!skb) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		return;
-	}
-	if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
-	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_hupD() - signal D channel hangup
- * @bcs:	B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the D channel
- * connection has been shut down.
- */
-void gigaset_isdn_hupD(struct bc_state *bcs)
-{
-	struct gigaset_capi_appl *ap;
-	unsigned long flags;
-
-	/*
-	 * ToDo: pass on reason code reported by device
-	 * (requires ev-layer state machine extension to collect
-	 * ZCAU device reply)
-	 */
-	spin_lock_irqsave(&bcs->aplock, flags);
-	while (bcs->ap != NULL) {
-		ap = bcs->ap;
-		bcs->ap = ap->bcnext;
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-		send_disconnect_b3_ind(bcs, ap);
-		send_disconnect_ind(bcs, ap, 0);
-		spin_lock_irqsave(&bcs->aplock, flags);
-	}
-	bcs->apconnstate = APCONN_NONE;
-	spin_unlock_irqrestore(&bcs->aplock, flags);
-}
-
-/**
- * gigaset_isdn_connB() - signal B channel connect
- * @bcs:	B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the B channel
- * connection has been established.
- */
-void gigaset_isdn_connB(struct bc_state *bcs)
-{
-	struct cardstate *cs = bcs->cs;
-	struct gigaset_capi_ctr *iif = cs->iif;
-	struct gigaset_capi_appl *ap;
-	struct sk_buff *skb;
-	unsigned long flags;
-	unsigned int msgsize;
-	u8 command;
-
-	spin_lock_irqsave(&bcs->aplock, flags);
-	ap = bcs->ap;
-	if (!ap) {
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-		gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
-		return;
-	}
-	if (!bcs->apconnstate) {
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-		dev_warn(cs->dev, "%s: application %u not connected\n",
-			 __func__, ap->id);
-		return;
-	}
-
-	/*
-	 * emit CONNECT_B3_ACTIVE_IND if we already got CONNECT_B3_REQ;
-	 * otherwise we have to emit CONNECT_B3_IND first, and follow up with
-	 * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP
-	 * Parameters in both cases always: NCCI = 1, NCPI empty
-	 */
-	if (bcs->apconnstate >= APCONN_ACTIVE) {
-		command = CAPI_CONNECT_B3_ACTIVE;
-		msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
-	} else {
-		command = CAPI_CONNECT_B3;
-		msgsize = CAPI_CONNECT_B3_IND_BASELEN;
-	}
-	bcs->apconnstate = APCONN_ACTIVE;
-
-	spin_unlock_irqrestore(&bcs->aplock, flags);
-
-	while (ap->bcnext) {
-		/* this should never happen */
-		dev_warn(cs->dev, "%s: dropping extra application %u\n",
-			 __func__, ap->bcnext->id);
-		send_disconnect_ind(bcs, ap->bcnext,
-				    CapiCallGivenToOtherApplication);
-		ap->bcnext = ap->bcnext->bcnext;
-	}
-
-	capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND,
-			 ap->nextMessageNumber++,
-			 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
-	skb = alloc_skb(msgsize, GFP_ATOMIC);
-	if (!skb) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		return;
-	}
-	if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
-	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_hupB() - signal B channel hangup
- * @bcs:	B channel descriptor structure.
- *
- * Called by main module to notify the LL that the B channel connection has
- * been shut down.
- */
-void gigaset_isdn_hupB(struct bc_state *bcs)
-{
-	struct gigaset_capi_appl *ap = bcs->ap;
-
-	/* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
-
-	if (!ap) {
-		gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
-		return;
-	}
-
-	send_disconnect_b3_ind(bcs, ap);
-}
-
-/**
- * gigaset_isdn_start() - signal device availability
- * @cs:		device descriptor structure.
- *
- * Called by main module to notify the LL that the device is available for
- * use.
- */
-void gigaset_isdn_start(struct cardstate *cs)
-{
-	struct gigaset_capi_ctr *iif = cs->iif;
-
-	/* fill profile data: manufacturer name */
-	strcpy(iif->ctr.manu, "Siemens");
-	/* CAPI and device version */
-	iif->ctr.version.majorversion = 2;		/* CAPI 2.0 */
-	iif->ctr.version.minorversion = 0;
-	/* ToDo: check/assert cs->gotfwver? */
-	iif->ctr.version.majormanuversion = cs->fwver[0];
-	iif->ctr.version.minormanuversion = cs->fwver[1];
-	/* number of B channels supported */
-	iif->ctr.profile.nbchannel = cs->channels;
-	/* global options: internal controller, supplementary services */
-	iif->ctr.profile.goptions = 0x11;
-	/* B1 protocols: 64 kbit/s HDLC or transparent */
-	iif->ctr.profile.support1 =  0x03;
-	/* B2 protocols: transparent only */
-	/* ToDo: X.75 SLP ? */
-	iif->ctr.profile.support2 =  0x02;
-	/* B3 protocols: transparent only */
-	iif->ctr.profile.support3 =  0x01;
-	/* no serial number */
-	strcpy(iif->ctr.serial, "0");
-	capi_ctr_ready(&iif->ctr);
-}
-
-/**
- * gigaset_isdn_stop() - signal device unavailability
- * @cs:		device descriptor structure.
- *
- * Called by main module to notify the LL that the device is no longer
- * available for use.
- */
-void gigaset_isdn_stop(struct cardstate *cs)
-{
-	struct gigaset_capi_ctr *iif = cs->iif;
-	capi_ctr_down(&iif->ctr);
-}
-
-/*
- * kernel CAPI callback methods
- * ============================
- */
-
-/*
- * register CAPI application
- */
-static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
-				  capi_register_params *rp)
-{
-	struct gigaset_capi_ctr *iif
-		= container_of(ctr, struct gigaset_capi_ctr, ctr);
-	struct cardstate *cs = ctr->driverdata;
-	struct gigaset_capi_appl *ap;
-
-	gig_dbg(DEBUG_CMD, "%s [%u] l3cnt=%u blkcnt=%u blklen=%u",
-		__func__, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
-
-	list_for_each_entry(ap, &iif->appls, ctrlist)
-		if (ap->id == appl) {
-			dev_notice(cs->dev,
-				   "application %u already registered\n", appl);
-			return;
-		}
-
-	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
-	if (!ap) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		return;
-	}
-	ap->id = appl;
-	ap->rp = *rp;
-
-	list_add(&ap->ctrlist, &iif->appls);
-	dev_info(cs->dev, "application %u registered\n", ap->id);
-}
-
-/*
- * remove CAPI application from channel
- * helper function to keep indentation levels down and stay in 80 columns
- */
-
-static inline void remove_appl_from_channel(struct bc_state *bcs,
-					    struct gigaset_capi_appl *ap)
-{
-	struct cardstate *cs = bcs->cs;
-	struct gigaset_capi_appl *bcap;
-	unsigned long flags;
-	int prevconnstate;
-
-	spin_lock_irqsave(&bcs->aplock, flags);
-	bcap = bcs->ap;
-	if (bcap == NULL) {
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-		return;
-	}
-
-	/* check first application on channel */
-	if (bcap == ap) {
-		bcs->ap = ap->bcnext;
-		if (bcs->ap != NULL) {
-			spin_unlock_irqrestore(&bcs->aplock, flags);
-			return;
-		}
-
-		/* none left, clear channel state */
-		prevconnstate = bcs->apconnstate;
-		bcs->apconnstate = APCONN_NONE;
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-
-		if (prevconnstate == APCONN_ACTIVE) {
-			dev_notice(cs->dev, "%s: hanging up channel %u\n",
-				   __func__, bcs->channel);
-			gigaset_add_event(cs, &bcs->at_state,
-					  EV_HUP, NULL, 0, NULL);
-			gigaset_schedule_event(cs);
-		}
-		return;
-	}
-
-	/* check remaining list */
-	do {
-		if (bcap->bcnext == ap) {
-			bcap->bcnext = bcap->bcnext->bcnext;
-			spin_unlock_irqrestore(&bcs->aplock, flags);
-			return;
-		}
-		bcap = bcap->bcnext;
-	} while (bcap != NULL);
-	spin_unlock_irqrestore(&bcs->aplock, flags);
-}
-
-/*
- * release CAPI application
- */
-static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl)
-{
-	struct gigaset_capi_ctr *iif
-		= container_of(ctr, struct gigaset_capi_ctr, ctr);
-	struct cardstate *cs = iif->ctr.driverdata;
-	struct gigaset_capi_appl *ap, *tmp;
-	unsigned ch;
-
-	gig_dbg(DEBUG_CMD, "%s [%u]", __func__, appl);
-
-	list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist)
-		if (ap->id == appl) {
-			/* remove from any channels */
-			for (ch = 0; ch < cs->channels; ch++)
-				remove_appl_from_channel(&cs->bcs[ch], ap);
-
-			/* remove from registration list */
-			list_del(&ap->ctrlist);
-			kfree(ap);
-			dev_info(cs->dev, "application %u released\n", appl);
-		}
-}
-
-/*
- * =====================================================================
- * outgoing CAPI message handler
- * =====================================================================
- */
-
-/*
- * helper function: emit reply message with given Info value
- */
-static void send_conf(struct gigaset_capi_ctr *iif,
-		      struct gigaset_capi_appl *ap,
-		      struct sk_buff *skb,
-		      u16 info)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-
-	/*
-	 * _CONF replies always only have NCCI and Info parameters
-	 * so they'll fit into the _REQ message skb
-	 */
-	capi_cmsg_answer(&iif->acmsg);
-	iif->acmsg.Info = info;
-	if (capi_cmsg2message(&iif->acmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	__skb_trim(skb, CAPI_STDCONF_LEN);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * process FACILITY_REQ message
- */
-static void do_facility_req(struct gigaset_capi_ctr *iif,
-			    struct gigaset_capi_appl *ap,
-			    struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-	_cmsg *cmsg = &iif->acmsg;
-	struct sk_buff *cskb;
-	u8 *pparam;
-	unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
-	u16 function, info;
-	static u8 confparam[10];	/* max. 9 octets + length byte */
-
-	/* decode message */
-	if (capi_message2cmsg(cmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
-	/*
-	 * Facility Request Parameter is not decoded by capi_message2cmsg()
-	 * encoding depends on Facility Selector
-	 */
-	switch (cmsg->FacilitySelector) {
-	case CAPI_FACILITY_DTMF:	/* ToDo */
-		info = CapiFacilityNotSupported;
-		confparam[0] = 2;	/* length */
-		/* DTMF information: Unknown DTMF request */
-		capimsg_setu16(confparam, 1, 2);
-		break;
-
-	case CAPI_FACILITY_V42BIS:	/* not supported */
-		info = CapiFacilityNotSupported;
-		confparam[0] = 2;	/* length */
-		/* V.42 bis information: not available */
-		capimsg_setu16(confparam, 1, 1);
-		break;
-
-	case CAPI_FACILITY_SUPPSVC:
-		/* decode Function parameter */
-		pparam = cmsg->FacilityRequestParameter;
-		if (pparam == NULL || pparam[0] < 2) {
-			dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
-				   "Facility Request Parameter");
-			send_conf(iif, ap, skb, CapiIllMessageParmCoding);
-			return;
-		}
-		function = CAPIMSG_U16(pparam, 1);
-		switch (function) {
-		case CAPI_SUPPSVC_GETSUPPORTED:
-			info = CapiSuccess;
-			/* Supplementary Service specific parameter */
-			confparam[3] = 6;	/* length */
-			/* Supplementary services info: Success */
-			capimsg_setu16(confparam, 4, CapiSuccess);
-			/* Supported Services: none */
-			capimsg_setu32(confparam, 6, 0);
-			break;
-		case CAPI_SUPPSVC_LISTEN:
-			if (pparam[0] < 7 || pparam[3] < 4) {
-				dev_notice(cs->dev, "%s: %s missing\n",
-					   "FACILITY_REQ", "Notification Mask");
-				send_conf(iif, ap, skb,
-					  CapiIllMessageParmCoding);
-				return;
-			}
-			if (CAPIMSG_U32(pparam, 4) != 0) {
-				dev_notice(cs->dev,
-					   "%s: unsupported supplementary service notification mask 0x%x\n",
-					   "FACILITY_REQ", CAPIMSG_U32(pparam, 4));
-				info = CapiFacilitySpecificFunctionNotSupported;
-				confparam[3] = 2;	/* length */
-				capimsg_setu16(confparam, 4,
-					       CapiSupplementaryServiceNotSupported);
-				break;
-			}
-			info = CapiSuccess;
-			confparam[3] = 2;	/* length */
-			capimsg_setu16(confparam, 4, CapiSuccess);
-			break;
-
-		/* ToDo: add supported services */
-
-		default:
-			dev_notice(cs->dev,
-				   "%s: unsupported supplementary service function 0x%04x\n",
-				   "FACILITY_REQ", function);
-			info = CapiFacilitySpecificFunctionNotSupported;
-			/* Supplementary Service specific parameter */
-			confparam[3] = 2;	/* length */
-			/* Supplementary services info: not supported */
-			capimsg_setu16(confparam, 4,
-				       CapiSupplementaryServiceNotSupported);
-		}
-
-		/* Facility confirmation parameter */
-		confparam[0] = confparam[3] + 3;	/* total length */
-		/* Function: copy from _REQ message */
-		capimsg_setu16(confparam, 1, function);
-		/* Supplementary Service specific parameter already set above */
-		break;
-
-	case CAPI_FACILITY_WAKEUP:	/* ToDo */
-		info = CapiFacilityNotSupported;
-		confparam[0] = 2;	/* length */
-		/* Number of accepted awake request parameters: 0 */
-		capimsg_setu16(confparam, 1, 0);
-		break;
-
-	default:
-		info = CapiFacilityNotSupported;
-		confparam[0] = 0;	/* empty struct */
-	}
-
-	/* send FACILITY_CONF with given Info and confirmation parameter */
-	dev_kfree_skb_any(skb);
-	capi_cmsg_answer(cmsg);
-	cmsg->Info = info;
-	cmsg->FacilityConfirmationParameter = confparam;
-	msgsize += confparam[0];	/* length */
-	cskb = alloc_skb(msgsize, GFP_ATOMIC);
-	if (!cskb) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		return;
-	}
-	if (capi_cmsg2message(cmsg, __skb_put(cskb, msgsize))) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(cskb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-	capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
-}
-
-
-/*
- * process LISTEN_REQ message
- * just store the masks in the application data structure
- */
-static void do_listen_req(struct gigaset_capi_ctr *iif,
-			  struct gigaset_capi_appl *ap,
-			  struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-
-	/* decode message */
-	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-
-	/* store listening parameters */
-	ap->listenInfoMask = iif->acmsg.InfoMask;
-	ap->listenCIPmask = iif->acmsg.CIPmask;
-	send_conf(iif, ap, skb, CapiSuccess);
-}
-
-/*
- * process ALERT_REQ message
- * nothing to do, Gigaset always alerts anyway
- */
-static void do_alert_req(struct gigaset_capi_ctr *iif,
-			 struct gigaset_capi_appl *ap,
-			 struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-
-	/* decode message */
-	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-	send_conf(iif, ap, skb, CapiAlertAlreadySent);
-}
-
-/*
- * process CONNECT_REQ message
- * allocate a B channel, prepare dial commands, queue a DIAL event,
- * emit CONNECT_CONF reply
- */
-static void do_connect_req(struct gigaset_capi_ctr *iif,
-			   struct gigaset_capi_appl *ap,
-			   struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-	_cmsg *cmsg = &iif->acmsg;
-	struct bc_state *bcs;
-	char **commands;
-	char *s;
-	u8 *pp;
-	unsigned long flags;
-	int i, l, lbc, lhlc;
-	u16 info;
-
-	/* decode message */
-	if (capi_message2cmsg(cmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
-	/* get free B channel & construct PLCI */
-	bcs = gigaset_get_free_channel(cs);
-	if (!bcs) {
-		dev_notice(cs->dev, "%s: no B channel available\n",
-			   "CONNECT_REQ");
-		send_conf(iif, ap, skb, CapiNoPlciAvailable);
-		return;
-	}
-	spin_lock_irqsave(&bcs->aplock, flags);
-	if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE)
-		dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
-			 __func__, bcs->ap, bcs->apconnstate);
-	ap->bcnext = NULL;
-	bcs->ap = ap;
-	bcs->apconnstate = APCONN_SETUP;
-	spin_unlock_irqrestore(&bcs->aplock, flags);
-
-	bcs->rx_bufsize = ap->rp.datablklen;
-	dev_kfree_skb(bcs->rx_skb);
-	gigaset_new_rx_skb(bcs);
-	cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
-
-	/* build command table */
-	commands = kcalloc(AT_NUM, sizeof(*commands), GFP_KERNEL);
-	if (!commands)
-		goto oom;
-
-	/* encode parameter: Called party number */
-	pp = cmsg->CalledPartyNumber;
-	if (pp == NULL || *pp == 0) {
-		dev_notice(cs->dev, "%s: %s missing\n",
-			   "CONNECT_REQ", "Called party number");
-		info = CapiIllMessageParmCoding;
-		goto error;
-	}
-	l = *pp++;
-	/* check type of number/numbering plan byte */
-	switch (*pp) {
-	case 0x80:	/* unknown type / unknown numbering plan */
-	case 0x81:	/* unknown type / ISDN/Telephony numbering plan */
-		break;
-	default:	/* others: warn about potential misinterpretation */
-		dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n",
-			   "CONNECT_REQ", "Called party number", *pp);
-	}
-	pp++;
-	l--;
-	/* translate "**" internal call prefix to CTP value */
-	if (l >= 2 && pp[0] == '*' && pp[1] == '*') {
-		s = "^SCTP=0\r";
-		pp += 2;
-		l -= 2;
-	} else {
-		s = "^SCTP=1\r";
-	}
-	commands[AT_TYPE] = kstrdup(s, GFP_KERNEL);
-	if (!commands[AT_TYPE])
-		goto oom;
-	commands[AT_DIAL] = kmalloc(l + 3, GFP_KERNEL);
-	if (!commands[AT_DIAL])
-		goto oom;
-	snprintf(commands[AT_DIAL], l + 3, "D%.*s\r", l, pp);
-
-	/* encode parameter: Calling party number */
-	pp = cmsg->CallingPartyNumber;
-	if (pp != NULL && *pp > 0) {
-		l = *pp++;
-
-		/* check type of number/numbering plan byte */
-		/* ToDo: allow for/handle Ext=1? */
-		switch (*pp) {
-		case 0x00:	/* unknown type / unknown numbering plan */
-		case 0x01:	/* unknown type / ISDN/Telephony num. plan */
-			break;
-		default:
-			dev_notice(cs->dev,
-				   "%s: %s type/plan 0x%02x unsupported\n",
-				   "CONNECT_REQ", "Calling party number", *pp);
-		}
-		pp++;
-		l--;
-
-		/* check presentation indicator */
-		if (!l) {
-			dev_notice(cs->dev, "%s: %s IE truncated\n",
-				   "CONNECT_REQ", "Calling party number");
-			info = CapiIllMessageParmCoding;
-			goto error;
-		}
-		switch (*pp & 0xfc) { /* ignore Screening indicator */
-		case 0x80:	/* Presentation allowed */
-			s = "^SCLIP=1\r";
-			break;
-		case 0xa0:	/* Presentation restricted */
-			s = "^SCLIP=0\r";
-			break;
-		default:
-			dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-				   "CONNECT_REQ",
-				   "Presentation/Screening indicator",
-				   *pp);
-			s = "^SCLIP=1\r";
-		}
-		commands[AT_CLIP] = kstrdup(s, GFP_KERNEL);
-		if (!commands[AT_CLIP])
-			goto oom;
-		pp++;
-		l--;
-
-		if (l) {
-			/* number */
-			commands[AT_MSN] = kmalloc(l + 8, GFP_KERNEL);
-			if (!commands[AT_MSN])
-				goto oom;
-			snprintf(commands[AT_MSN], l + 8, "^SMSN=%*s\r", l, pp);
-		}
-	}
-
-	/* check parameter: CIP Value */
-	if (cmsg->CIPValue >= ARRAY_SIZE(cip2bchlc) ||
-	    (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
-		dev_notice(cs->dev, "%s: unknown CIP value %d\n",
-			   "CONNECT_REQ", cmsg->CIPValue);
-		info = CapiCipValueUnknown;
-		goto error;
-	}
-
-	/*
-	 * check/encode parameters: BC & HLC
-	 * must be encoded together as device doesn't accept HLC separately
-	 * explicit parameters override values derived from CIP
-	 */
-
-	/* determine lengths */
-	if (cmsg->BC && cmsg->BC[0])		/* BC specified explicitly */
-		lbc = 2 * cmsg->BC[0];
-	else if (cip2bchlc[cmsg->CIPValue].bc)	/* BC derived from CIP */
-		lbc = strlen(cip2bchlc[cmsg->CIPValue].bc);
-	else					/* no BC */
-		lbc = 0;
-	if (cmsg->HLC && cmsg->HLC[0])		/* HLC specified explicitly */
-		lhlc = 2 * cmsg->HLC[0];
-	else if (cip2bchlc[cmsg->CIPValue].hlc)	/* HLC derived from CIP */
-		lhlc = strlen(cip2bchlc[cmsg->CIPValue].hlc);
-	else					/* no HLC */
-		lhlc = 0;
-
-	if (lbc) {
-		/* have BC: allocate and assemble command string */
-		l = lbc + 7;		/* "^SBC=" + value + "\r" + null byte */
-		if (lhlc)
-			l += lhlc + 7;	/* ";^SHLC=" + value */
-		commands[AT_BC] = kmalloc(l, GFP_KERNEL);
-		if (!commands[AT_BC])
-			goto oom;
-		strcpy(commands[AT_BC], "^SBC=");
-		if (cmsg->BC && cmsg->BC[0])	/* BC specified explicitly */
-			decode_ie(cmsg->BC, commands[AT_BC] + 5);
-		else				/* BC derived from CIP */
-			strcpy(commands[AT_BC] + 5,
-			       cip2bchlc[cmsg->CIPValue].bc);
-		if (lhlc) {
-			strcpy(commands[AT_BC] + lbc + 5, ";^SHLC=");
-			if (cmsg->HLC && cmsg->HLC[0])
-				/* HLC specified explicitly */
-				decode_ie(cmsg->HLC,
-					  commands[AT_BC] + lbc + 12);
-			else	/* HLC derived from CIP */
-				strcpy(commands[AT_BC] + lbc + 12,
-				       cip2bchlc[cmsg->CIPValue].hlc);
-		}
-		strcpy(commands[AT_BC] + l - 2, "\r");
-	} else {
-		/* no BC */
-		if (lhlc) {
-			dev_notice(cs->dev, "%s: cannot set HLC without BC\n",
-				   "CONNECT_REQ");
-			info = CapiIllMessageParmCoding; /* ? */
-			goto error;
-		}
-	}
-
-	/* check/encode parameter: B Protocol */
-	if (cmsg->BProtocol == CAPI_DEFAULT) {
-		bcs->proto2 = L2_HDLC;
-		dev_warn(cs->dev,
-			 "B2 Protocol X.75 SLP unsupported, using Transparent\n");
-	} else {
-		switch (cmsg->B1protocol) {
-		case 0:
-			bcs->proto2 = L2_HDLC;
-			break;
-		case 1:
-			bcs->proto2 = L2_VOICE;
-			break;
-		default:
-			dev_warn(cs->dev,
-				 "B1 Protocol %u unsupported, using Transparent\n",
-				 cmsg->B1protocol);
-			bcs->proto2 = L2_VOICE;
-		}
-		if (cmsg->B2protocol != 1)
-			dev_warn(cs->dev,
-				 "B2 Protocol %u unsupported, using Transparent\n",
-				 cmsg->B2protocol);
-		if (cmsg->B3protocol != 0)
-			dev_warn(cs->dev,
-				 "B3 Protocol %u unsupported, using Transparent\n",
-				 cmsg->B3protocol);
-		ignore_cstruct_param(cs, cmsg->B1configuration,
-				     "CONNECT_REQ", "B1 Configuration");
-		ignore_cstruct_param(cs, cmsg->B2configuration,
-				     "CONNECT_REQ", "B2 Configuration");
-		ignore_cstruct_param(cs, cmsg->B3configuration,
-				     "CONNECT_REQ", "B3 Configuration");
-	}
-	commands[AT_PROTO] = kmalloc(9, GFP_KERNEL);
-	if (!commands[AT_PROTO])
-		goto oom;
-	snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
-
-	/* ToDo: check/encode remaining parameters */
-	ignore_cstruct_param(cs, cmsg->CalledPartySubaddress,
-			     "CONNECT_REQ", "Called pty subaddr");
-	ignore_cstruct_param(cs, cmsg->CallingPartySubaddress,
-			     "CONNECT_REQ", "Calling pty subaddr");
-	ignore_cstruct_param(cs, cmsg->LLC,
-			     "CONNECT_REQ", "LLC");
-	if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
-		ignore_cstruct_param(cs, cmsg->BChannelinformation,
-				     "CONNECT_REQ", "B Channel Information");
-		ignore_cstruct_param(cs, cmsg->Keypadfacility,
-				     "CONNECT_REQ", "Keypad Facility");
-		ignore_cstruct_param(cs, cmsg->Useruserdata,
-				     "CONNECT_REQ", "User-User Data");
-		ignore_cstruct_param(cs, cmsg->Facilitydataarray,
-				     "CONNECT_REQ", "Facility Data Array");
-	}
-
-	/* encode parameter: B channel to use */
-	commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
-	if (!commands[AT_ISO])
-		goto oom;
-	snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
-		 (unsigned) bcs->channel + 1);
-
-	/* queue & schedule EV_DIAL event */
-	if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
-			       bcs->at_state.seq_index, NULL)) {
-		info = CAPI_MSGOSRESOURCEERR;
-		goto error;
-	}
-	gigaset_schedule_event(cs);
-	send_conf(iif, ap, skb, CapiSuccess);
-	return;
-
-oom:
-	dev_err(cs->dev, "%s: out of memory\n", __func__);
-	info = CAPI_MSGOSRESOURCEERR;
-error:
-	if (commands)
-		for (i = 0; i < AT_NUM; i++)
-			kfree(commands[i]);
-	kfree(commands);
-	gigaset_free_channel(bcs);
-	send_conf(iif, ap, skb, info);
-}
-
-/*
- * process CONNECT_RESP message
- * checks protocol parameters and queues an ACCEPT or HUP event
- */
-static void do_connect_resp(struct gigaset_capi_ctr *iif,
-			    struct gigaset_capi_appl *ap,
-			    struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-	_cmsg *cmsg = &iif->acmsg;
-	struct bc_state *bcs;
-	struct gigaset_capi_appl *oap;
-	unsigned long flags;
-	int channel;
-
-	/* decode message */
-	if (capi_message2cmsg(cmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-	dev_kfree_skb_any(skb);
-
-	/* extract and check channel number from PLCI */
-	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
-	if (!channel || channel > cs->channels) {
-		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "CONNECT_RESP", "PLCI", cmsg->adr.adrPLCI);
-		return;
-	}
-	bcs = cs->bcs + channel - 1;
-
-	switch (cmsg->Reject) {
-	case 0:		/* Accept */
-		/* drop all competing applications, keep only this one */
-		spin_lock_irqsave(&bcs->aplock, flags);
-		while (bcs->ap != NULL) {
-			oap = bcs->ap;
-			bcs->ap = oap->bcnext;
-			if (oap != ap) {
-				spin_unlock_irqrestore(&bcs->aplock, flags);
-				send_disconnect_ind(bcs, oap,
-						    CapiCallGivenToOtherApplication);
-				spin_lock_irqsave(&bcs->aplock, flags);
-			}
-		}
-		ap->bcnext = NULL;
-		bcs->ap = ap;
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-
-		bcs->rx_bufsize = ap->rp.datablklen;
-		dev_kfree_skb(bcs->rx_skb);
-		gigaset_new_rx_skb(bcs);
-		bcs->chstate |= CHS_NOTIFY_LL;
-
-		/* check/encode B channel protocol */
-		if (cmsg->BProtocol == CAPI_DEFAULT) {
-			bcs->proto2 = L2_HDLC;
-			dev_warn(cs->dev,
-				 "B2 Protocol X.75 SLP unsupported, using Transparent\n");
-		} else {
-			switch (cmsg->B1protocol) {
-			case 0:
-				bcs->proto2 = L2_HDLC;
-				break;
-			case 1:
-				bcs->proto2 = L2_VOICE;
-				break;
-			default:
-				dev_warn(cs->dev,
-					 "B1 Protocol %u unsupported, using Transparent\n",
-					 cmsg->B1protocol);
-				bcs->proto2 = L2_VOICE;
-			}
-			if (cmsg->B2protocol != 1)
-				dev_warn(cs->dev,
-					 "B2 Protocol %u unsupported, using Transparent\n",
-					 cmsg->B2protocol);
-			if (cmsg->B3protocol != 0)
-				dev_warn(cs->dev,
-					 "B3 Protocol %u unsupported, using Transparent\n",
-					 cmsg->B3protocol);
-			ignore_cstruct_param(cs, cmsg->B1configuration,
-					     "CONNECT_RESP", "B1 Configuration");
-			ignore_cstruct_param(cs, cmsg->B2configuration,
-					     "CONNECT_RESP", "B2 Configuration");
-			ignore_cstruct_param(cs, cmsg->B3configuration,
-					     "CONNECT_RESP", "B3 Configuration");
-		}
-
-		/* ToDo: check/encode remaining parameters */
-		ignore_cstruct_param(cs, cmsg->ConnectedNumber,
-				     "CONNECT_RESP", "Connected Number");
-		ignore_cstruct_param(cs, cmsg->ConnectedSubaddress,
-				     "CONNECT_RESP", "Connected Subaddress");
-		ignore_cstruct_param(cs, cmsg->LLC,
-				     "CONNECT_RESP", "LLC");
-		if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
-			ignore_cstruct_param(cs, cmsg->BChannelinformation,
-					     "CONNECT_RESP", "BChannel Information");
-			ignore_cstruct_param(cs, cmsg->Keypadfacility,
-					     "CONNECT_RESP", "Keypad Facility");
-			ignore_cstruct_param(cs, cmsg->Useruserdata,
-					     "CONNECT_RESP", "User-User Data");
-			ignore_cstruct_param(cs, cmsg->Facilitydataarray,
-					     "CONNECT_RESP", "Facility Data Array");
-		}
-
-		/* Accept call */
-		if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state,
-				       EV_ACCEPT, NULL, 0, NULL))
-			return;
-		gigaset_schedule_event(cs);
-		return;
-
-	case 1:			/* Ignore */
-		/* send DISCONNECT_IND to this application */
-		send_disconnect_ind(bcs, ap, 0);
-
-		/* remove it from the list of listening apps */
-		spin_lock_irqsave(&bcs->aplock, flags);
-		if (bcs->ap == ap) {
-			bcs->ap = ap->bcnext;
-			if (bcs->ap == NULL) {
-				/* last one: stop ev-layer hupD notifications */
-				bcs->apconnstate = APCONN_NONE;
-				bcs->chstate &= ~CHS_NOTIFY_LL;
-			}
-			spin_unlock_irqrestore(&bcs->aplock, flags);
-			return;
-		}
-		for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) {
-			if (oap->bcnext == ap) {
-				oap->bcnext = oap->bcnext->bcnext;
-				spin_unlock_irqrestore(&bcs->aplock, flags);
-				return;
-			}
-		}
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-		dev_err(cs->dev, "%s: application %u not found\n",
-			__func__, ap->id);
-		return;
-
-	default:		/* Reject */
-		/* drop all competing applications, keep only this one */
-		spin_lock_irqsave(&bcs->aplock, flags);
-		while (bcs->ap != NULL) {
-			oap = bcs->ap;
-			bcs->ap = oap->bcnext;
-			if (oap != ap) {
-				spin_unlock_irqrestore(&bcs->aplock, flags);
-				send_disconnect_ind(bcs, oap,
-						    CapiCallGivenToOtherApplication);
-				spin_lock_irqsave(&bcs->aplock, flags);
-			}
-		}
-		ap->bcnext = NULL;
-		bcs->ap = ap;
-		spin_unlock_irqrestore(&bcs->aplock, flags);
-
-		/* reject call - will trigger DISCONNECT_IND for this app */
-		dev_info(cs->dev, "%s: Reject=%x\n",
-			 "CONNECT_RESP", cmsg->Reject);
-		if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state,
-				       EV_HUP, NULL, 0, NULL))
-			return;
-		gigaset_schedule_event(cs);
-		return;
-	}
-}
-
-/*
- * process CONNECT_B3_REQ message
- * build NCCI and emit CONNECT_B3_CONF reply
- */
-static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
-			      struct gigaset_capi_appl *ap,
-			      struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-	_cmsg *cmsg = &iif->acmsg;
-	struct bc_state *bcs;
-	int channel;
-
-	/* decode message */
-	if (capi_message2cmsg(cmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
-	/* extract and check channel number from PLCI */
-	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
-	if (!channel || channel > cs->channels) {
-		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
-		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
-		return;
-	}
-	bcs = &cs->bcs[channel - 1];
-
-	/* mark logical connection active */
-	bcs->apconnstate = APCONN_ACTIVE;
-
-	/* build NCCI: always 1 (one B3 connection only) */
-	cmsg->adr.adrNCCI |= 1 << 16;
-
-	/* NCPI parameter: not applicable for B3 Transparent */
-	ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
-	send_conf(iif, ap, skb,
-		  (cmsg->NCPI && cmsg->NCPI[0]) ?
-		  CapiNcpiNotSupportedByProtocol : CapiSuccess);
-}
-
-/*
- * process CONNECT_B3_RESP message
- * Depending on the Reject parameter, either emit CONNECT_B3_ACTIVE_IND
- * or queue EV_HUP and emit DISCONNECT_B3_IND.
- * The emitted message is always shorter than the received one,
- * allowing to reuse the skb.
- */
-static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
-			       struct gigaset_capi_appl *ap,
-			       struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-	_cmsg *cmsg = &iif->acmsg;
-	struct bc_state *bcs;
-	int channel;
-	unsigned int msgsize;
-	u8 command;
-
-	/* decode message */
-	if (capi_message2cmsg(cmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
-	/* extract and check channel number and NCCI */
-	channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
-	if (!channel || channel > cs->channels ||
-	    ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
-		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	bcs = &cs->bcs[channel - 1];
-
-	if (cmsg->Reject) {
-		/* Reject: clear B3 connect received flag */
-		bcs->apconnstate = APCONN_SETUP;
-
-		/* trigger hangup, causing eventual DISCONNECT_IND */
-		if (!gigaset_add_event(cs, &bcs->at_state,
-				       EV_HUP, NULL, 0, NULL)) {
-			dev_kfree_skb_any(skb);
-			return;
-		}
-		gigaset_schedule_event(cs);
-
-		/* emit DISCONNECT_B3_IND */
-		command = CAPI_DISCONNECT_B3;
-		msgsize = CAPI_DISCONNECT_B3_IND_BASELEN;
-	} else {
-		/*
-		 * Accept: emit CONNECT_B3_ACTIVE_IND immediately, as
-		 * we only send CONNECT_B3_IND if the B channel is up
-		 */
-		command = CAPI_CONNECT_B3_ACTIVE;
-		msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
-	}
-	capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
-			 ap->nextMessageNumber++, cmsg->adr.adrNCCI);
-	__skb_trim(skb, msgsize);
-	if (capi_cmsg2message(cmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * process DISCONNECT_REQ message
- * schedule EV_HUP and emit DISCONNECT_B3_IND if necessary,
- * emit DISCONNECT_CONF reply
- */
-static void do_disconnect_req(struct gigaset_capi_ctr *iif,
-			      struct gigaset_capi_appl *ap,
-			      struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-	_cmsg *cmsg = &iif->acmsg;
-	struct bc_state *bcs;
-	_cmsg *b3cmsg;
-	struct sk_buff *b3skb;
-	int channel;
-
-	/* decode message */
-	if (capi_message2cmsg(cmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
-	/* extract and check channel number from PLCI */
-	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
-	if (!channel || channel > cs->channels) {
-		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
-		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
-		return;
-	}
-	bcs = cs->bcs + channel - 1;
-
-	/* ToDo: process parameter: Additional info */
-	if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
-		ignore_cstruct_param(cs, cmsg->BChannelinformation,
-				     "DISCONNECT_REQ", "B Channel Information");
-		ignore_cstruct_param(cs, cmsg->Keypadfacility,
-				     "DISCONNECT_REQ", "Keypad Facility");
-		ignore_cstruct_param(cs, cmsg->Useruserdata,
-				     "DISCONNECT_REQ", "User-User Data");
-		ignore_cstruct_param(cs, cmsg->Facilitydataarray,
-				     "DISCONNECT_REQ", "Facility Data Array");
-	}
-
-	/* skip if DISCONNECT_IND already sent */
-	if (!bcs->apconnstate)
-		return;
-
-	/* check for active logical connection */
-	if (bcs->apconnstate >= APCONN_ACTIVE) {
-		/* clear it */
-		bcs->apconnstate = APCONN_SETUP;
-
-		/*
-		 * emit DISCONNECT_B3_IND with cause 0x3301
-		 * use separate cmsg structure, as the content of iif->acmsg
-		 * is still needed for creating the _CONF message
-		 */
-		b3cmsg = kmalloc(sizeof(*b3cmsg), GFP_KERNEL);
-		if (!b3cmsg) {
-			dev_err(cs->dev, "%s: out of memory\n", __func__);
-			send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
-			return;
-		}
-		capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
-				 ap->nextMessageNumber++,
-				 cmsg->adr.adrPLCI | (1 << 16));
-		b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
-		b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
-		if (b3skb == NULL) {
-			dev_err(cs->dev, "%s: out of memory\n", __func__);
-			send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
-			kfree(b3cmsg);
-			return;
-		}
-		if (capi_cmsg2message(b3cmsg,
-				      __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
-			dev_err(cs->dev, "%s: message parser failure\n",
-				__func__);
-			kfree(b3cmsg);
-			dev_kfree_skb_any(b3skb);
-			return;
-		}
-		dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
-		kfree(b3cmsg);
-		capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
-	}
-
-	/* trigger hangup, causing eventual DISCONNECT_IND */
-	if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
-		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
-		return;
-	}
-	gigaset_schedule_event(cs);
-
-	/* emit reply */
-	send_conf(iif, ap, skb, CapiSuccess);
-}
-
-/*
- * process DISCONNECT_B3_REQ message
- * schedule EV_HUP and emit DISCONNECT_B3_CONF reply
- */
-static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
-				 struct gigaset_capi_appl *ap,
-				 struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-	_cmsg *cmsg = &iif->acmsg;
-	struct bc_state *bcs;
-	int channel;
-
-	/* decode message */
-	if (capi_message2cmsg(cmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
-	/* extract and check channel number and NCCI */
-	channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
-	if (!channel || channel > cs->channels ||
-	    ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
-		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
-		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
-		return;
-	}
-	bcs = &cs->bcs[channel - 1];
-
-	/* reject if logical connection not active */
-	if (bcs->apconnstate < APCONN_ACTIVE) {
-		send_conf(iif, ap, skb,
-			  CapiMessageNotSupportedInCurrentState);
-		return;
-	}
-
-	/* trigger hangup, causing eventual DISCONNECT_B3_IND */
-	if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
-		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
-		return;
-	}
-	gigaset_schedule_event(cs);
-
-	/* NCPI parameter: not applicable for B3 Transparent */
-	ignore_cstruct_param(cs, cmsg->NCPI,
-			     "DISCONNECT_B3_REQ", "NCPI");
-	send_conf(iif, ap, skb,
-		  (cmsg->NCPI && cmsg->NCPI[0]) ?
-		  CapiNcpiNotSupportedByProtocol : CapiSuccess);
-}
-
-/*
- * process DATA_B3_REQ message
- */
-static void do_data_b3_req(struct gigaset_capi_ctr *iif,
-			   struct gigaset_capi_appl *ap,
-			   struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-	struct bc_state *bcs;
-	int channel = CAPIMSG_PLCI_PART(skb->data);
-	u16 ncci = CAPIMSG_NCCI_PART(skb->data);
-	u16 msglen = CAPIMSG_LEN(skb->data);
-	u16 datalen = CAPIMSG_DATALEN(skb->data);
-	u16 flags = CAPIMSG_FLAGS(skb->data);
-	u16 msgid = CAPIMSG_MSGID(skb->data);
-	u16 handle = CAPIMSG_HANDLE_REQ(skb->data);
-
-	/* frequent message, avoid _cmsg overhead */
-	dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
-
-	/* check parameters */
-	if (channel == 0 || channel > cs->channels || ncci != 1) {
-		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data));
-		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
-		return;
-	}
-	bcs = &cs->bcs[channel - 1];
-	if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64)
-		dev_notice(cs->dev, "%s: unexpected length %d\n",
-			   "DATA_B3_REQ", msglen);
-	if (msglen + datalen != skb->len)
-		dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n",
-			   "DATA_B3_REQ", msglen, datalen, skb->len);
-	if (msglen + datalen > skb->len) {
-		/* message too short for announced data length */
-		send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */
-		return;
-	}
-	if (flags & CAPI_FLAGS_RESERVED) {
-		dev_notice(cs->dev, "%s: reserved flags set (%x)\n",
-			   "DATA_B3_REQ", flags);
-		send_conf(iif, ap, skb, CapiIllMessageParmCoding);
-		return;
-	}
-
-	/* reject if logical connection not active */
-	if (bcs->apconnstate < APCONN_ACTIVE) {
-		send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
-		return;
-	}
-
-	/* pull CAPI message into link layer header */
-	skb_reset_mac_header(skb);
-	skb->mac_len = msglen;
-	skb_pull(skb, msglen);
-
-	/* pass to device-specific module */
-	if (cs->ops->send_skb(bcs, skb) < 0) {
-		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
-		return;
-	}
-
-	/*
-	 * DATA_B3_CONF will be sent by gigaset_skb_sent() only if "delivery
-	 * confirmation" bit is set; otherwise we have to send it now
-	 */
-	if (!(flags & CAPI_FLAGS_DELIVERY_CONFIRMATION))
-		send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle,
-				  flags ? CapiFlagsNotSupportedByProtocol
-				  : CAPI_NOERROR);
-}
-
-/*
- * process RESET_B3_REQ message
- * just always reply "not supported by current protocol"
- */
-static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
-			    struct gigaset_capi_appl *ap,
-			    struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-
-	/* decode message */
-	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-	send_conf(iif, ap, skb,
-		  CapiResetProcedureNotSupportedByCurrentProtocol);
-}
-
-/*
- * unsupported CAPI message handler
- */
-static void do_unsupported(struct gigaset_capi_ctr *iif,
-			   struct gigaset_capi_appl *ap,
-			   struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-
-	/* decode message */
-	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-	send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
-}
-
-/*
- * CAPI message handler: no-op
- */
-static void do_nothing(struct gigaset_capi_ctr *iif,
-		       struct gigaset_capi_appl *ap,
-		       struct sk_buff *skb)
-{
-	struct cardstate *cs = iif->ctr.driverdata;
-
-	/* decode message */
-	if (capi_message2cmsg(&iif->acmsg, skb->data)) {
-		dev_err(cs->dev, "%s: message parser failure\n", __func__);
-		dev_kfree_skb_any(skb);
-		return;
-	}
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-	dev_kfree_skb_any(skb);
-}
-
-static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
-			    struct gigaset_capi_appl *ap,
-			    struct sk_buff *skb)
-{
-	dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
-	dev_kfree_skb_any(skb);
-}
-
-/* table of outgoing CAPI message handlers with lookup function */
-typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *,
-				    struct gigaset_capi_appl *,
-				    struct sk_buff *);
-
-static struct {
-	u16 cmd;
-	capi_send_handler_t handler;
-} capi_send_handler_table[] = {
-	/* most frequent messages first for faster lookup */
-	{ CAPI_DATA_B3_REQ, do_data_b3_req },
-	{ CAPI_DATA_B3_RESP, do_data_b3_resp },
-
-	{ CAPI_ALERT_REQ, do_alert_req },
-	{ CAPI_CONNECT_ACTIVE_RESP, do_nothing },
-	{ CAPI_CONNECT_B3_ACTIVE_RESP, do_nothing },
-	{ CAPI_CONNECT_B3_REQ, do_connect_b3_req },
-	{ CAPI_CONNECT_B3_RESP, do_connect_b3_resp },
-	{ CAPI_CONNECT_B3_T90_ACTIVE_RESP, do_nothing },
-	{ CAPI_CONNECT_REQ, do_connect_req },
-	{ CAPI_CONNECT_RESP, do_connect_resp },
-	{ CAPI_DISCONNECT_B3_REQ, do_disconnect_b3_req },
-	{ CAPI_DISCONNECT_B3_RESP, do_nothing },
-	{ CAPI_DISCONNECT_REQ, do_disconnect_req },
-	{ CAPI_DISCONNECT_RESP, do_nothing },
-	{ CAPI_FACILITY_REQ, do_facility_req },
-	{ CAPI_FACILITY_RESP, do_nothing },
-	{ CAPI_LISTEN_REQ, do_listen_req },
-	{ CAPI_SELECT_B_PROTOCOL_REQ, do_unsupported },
-	{ CAPI_RESET_B3_REQ, do_reset_b3_req },
-	{ CAPI_RESET_B3_RESP, do_nothing },
-
-	/*
-	 * ToDo: support overlap sending (requires ev-layer state
-	 * machine extension to generate additional ATD commands)
-	 */
-	{ CAPI_INFO_REQ, do_unsupported },
-	{ CAPI_INFO_RESP, do_nothing },
-
-	/*
-	 * ToDo: what's the proper response for these?
-	 */
-	{ CAPI_MANUFACTURER_REQ, do_nothing },
-	{ CAPI_MANUFACTURER_RESP, do_nothing },
-};
-
-/* look up handler */
-static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd)
-{
-	size_t i;
-
-	for (i = 0; i < ARRAY_SIZE(capi_send_handler_table); i++)
-		if (capi_send_handler_table[i].cmd == cmd)
-			return capi_send_handler_table[i].handler;
-	return NULL;
-}
-
-
-/**
- * gigaset_send_message() - accept a CAPI message from an application
- * @ctr:	controller descriptor structure.
- * @skb:	CAPI message.
- *
- * Return value: CAPI error code
- * Note: capidrv (and probably others, too) only uses the return value to
- * decide whether it has to free the skb (only if result != CAPI_NOERROR (0))
- */
-static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb)
-{
-	struct gigaset_capi_ctr *iif
-		= container_of(ctr, struct gigaset_capi_ctr, ctr);
-	struct cardstate *cs = ctr->driverdata;
-	struct gigaset_capi_appl *ap;
-	capi_send_handler_t handler;
-
-	/* can only handle linear sk_buffs */
-	if (skb_linearize(skb) < 0) {
-		dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__);
-		return CAPI_MSGOSRESOURCEERR;
-	}
-
-	/* retrieve application data structure */
-	ap = get_appl(iif, CAPIMSG_APPID(skb->data));
-	if (!ap) {
-		dev_notice(cs->dev, "%s: application %u not registered\n",
-			   __func__, CAPIMSG_APPID(skb->data));
-		return CAPI_ILLAPPNR;
-	}
-
-	/* look up command */
-	handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
-	if (!handler) {
-		/* unknown/unsupported message type */
-		if (printk_ratelimit())
-			dev_notice(cs->dev, "%s: unsupported message %u\n",
-				   __func__, CAPIMSG_CMD(skb->data));
-		return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
-	}
-
-	/* serialize */
-	if (atomic_add_return(1, &iif->sendqlen) > 1) {
-		/* queue behind other messages */
-		skb_queue_tail(&iif->sendqueue, skb);
-		return CAPI_NOERROR;
-	}
-
-	/* process message */
-	handler(iif, ap, skb);
-
-	/* process other messages arrived in the meantime */
-	while (atomic_sub_return(1, &iif->sendqlen) > 0) {
-		skb = skb_dequeue(&iif->sendqueue);
-		if (!skb) {
-			/* should never happen */
-			dev_err(cs->dev, "%s: send queue empty\n", __func__);
-			continue;
-		}
-		ap = get_appl(iif, CAPIMSG_APPID(skb->data));
-		if (!ap) {
-			/* could that happen? */
-			dev_warn(cs->dev, "%s: application %u vanished\n",
-				 __func__, CAPIMSG_APPID(skb->data));
-			continue;
-		}
-		handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
-		if (!handler) {
-			/* should never happen */
-			dev_err(cs->dev, "%s: handler %x vanished\n",
-				__func__, CAPIMSG_CMD(skb->data));
-			continue;
-		}
-		handler(iif, ap, skb);
-	}
-
-	return CAPI_NOERROR;
-}
-
-/**
- * gigaset_procinfo() - build single line description for controller
- * @ctr:	controller descriptor structure.
- *
- * Return value: pointer to generated string (null terminated)
- */
-static char *gigaset_procinfo(struct capi_ctr *ctr)
-{
-	return ctr->name;	/* ToDo: more? */
-}
-
-static int gigaset_proc_show(struct seq_file *m, void *v)
-{
-	struct capi_ctr *ctr = m->private;
-	struct cardstate *cs = ctr->driverdata;
-	char *s;
-	int i;
-
-	seq_printf(m, "%-16s %s\n", "name", ctr->name);
-	seq_printf(m, "%-16s %s %s\n", "dev",
-		   dev_driver_string(cs->dev), dev_name(cs->dev));
-	seq_printf(m, "%-16s %d\n", "id", cs->myid);
-	if (cs->gotfwver)
-		seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware",
-			   cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
-	seq_printf(m, "%-16s %d\n", "channels", cs->channels);
-	seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no");
-
-	switch (cs->mode) {
-	case M_UNKNOWN:
-		s = "unknown";
-		break;
-	case M_CONFIG:
-		s = "config";
-		break;
-	case M_UNIMODEM:
-		s = "Unimodem";
-		break;
-	case M_CID:
-		s = "CID";
-		break;
-	default:
-		s = "??";
-	}
-	seq_printf(m, "%-16s %s\n", "mode", s);
-
-	switch (cs->mstate) {
-	case MS_UNINITIALIZED:
-		s = "uninitialized";
-		break;
-	case MS_INIT:
-		s = "init";
-		break;
-	case MS_LOCKED:
-		s = "locked";
-		break;
-	case MS_SHUTDOWN:
-		s = "shutdown";
-		break;
-	case MS_RECOVER:
-		s = "recover";
-		break;
-	case MS_READY:
-		s = "ready";
-		break;
-	default:
-		s = "??";
-	}
-	seq_printf(m, "%-16s %s\n", "mstate", s);
-
-	seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no");
-	seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no");
-	seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no");
-	seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no");
-
-	for (i = 0; i < cs->channels; i++) {
-		seq_printf(m, "[%d]%-13s %d\n", i, "corrupted",
-			   cs->bcs[i].corrupted);
-		seq_printf(m, "[%d]%-13s %d\n", i, "trans_down",
-			   cs->bcs[i].trans_down);
-		seq_printf(m, "[%d]%-13s %d\n", i, "trans_up",
-			   cs->bcs[i].trans_up);
-		seq_printf(m, "[%d]%-13s %d\n", i, "chstate",
-			   cs->bcs[i].chstate);
-		switch (cs->bcs[i].proto2) {
-		case L2_BITSYNC:
-			s = "bitsync";
-			break;
-		case L2_HDLC:
-			s = "HDLC";
-			break;
-		case L2_VOICE:
-			s = "voice";
-			break;
-		default:
-			s = "??";
-		}
-		seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s);
-	}
-	return 0;
-}
-
-/**
- * gigaset_isdn_regdev() - register device to LL
- * @cs:		device descriptor structure.
- * @isdnid:	device name.
- *
- * Return value: 0 on success, error code < 0 on failure
- */
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
-{
-	struct gigaset_capi_ctr *iif;
-	int rc;
-
-	iif = kzalloc(sizeof(*iif), GFP_KERNEL);
-	if (!iif) {
-		pr_err("%s: out of memory\n", __func__);
-		return -ENOMEM;
-	}
-
-	/* prepare controller structure */
-	iif->ctr.owner         = THIS_MODULE;
-	iif->ctr.driverdata    = cs;
-	strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name) - 1);
-	iif->ctr.driver_name   = "gigaset";
-	iif->ctr.load_firmware = NULL;
-	iif->ctr.reset_ctr     = NULL;
-	iif->ctr.register_appl = gigaset_register_appl;
-	iif->ctr.release_appl  = gigaset_release_appl;
-	iif->ctr.send_message  = gigaset_send_message;
-	iif->ctr.procinfo      = gigaset_procinfo;
-	iif->ctr.proc_show     = gigaset_proc_show,
-	INIT_LIST_HEAD(&iif->appls);
-	skb_queue_head_init(&iif->sendqueue);
-	atomic_set(&iif->sendqlen, 0);
-
-	/* register controller with CAPI */
-	rc = attach_capi_ctr(&iif->ctr);
-	if (rc) {
-		pr_err("attach_capi_ctr failed (%d)\n", rc);
-		kfree(iif);
-		return rc;
-	}
-
-	cs->iif = iif;
-	cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
-	return 0;
-}
-
-/**
- * gigaset_isdn_unregdev() - unregister device from LL
- * @cs:		device descriptor structure.
- */
-void gigaset_isdn_unregdev(struct cardstate *cs)
-{
-	struct gigaset_capi_ctr *iif = cs->iif;
-
-	detach_capi_ctr(&iif->ctr);
-	kfree(iif);
-	cs->iif = NULL;
-}
-
-static struct capi_driver capi_driver_gigaset = {
-	.name		= "gigaset",
-	.revision	= "1.0",
-};
-
-/**
- * gigaset_isdn_regdrv() - register driver to LL
- */
-void gigaset_isdn_regdrv(void)
-{
-	pr_info("Kernel CAPI interface\n");
-	register_capi_driver(&capi_driver_gigaset);
-}
-
-/**
- * gigaset_isdn_unregdrv() - unregister driver from LL
- */
-void gigaset_isdn_unregdrv(void)
-{
-	unregister_capi_driver(&capi_driver_gigaset);
-}
diff --git a/drivers/staging/isdn/gigaset/common.c b/drivers/staging/isdn/gigaset/common.c
deleted file mode 100644
index 3bb8092..0000000
--- a/drivers/staging/isdn/gigaset/common.c
+++ /dev/null
@@ -1,1153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- *                       Hansjoerg Lipp <hjlipp@web.de>,
- *                       Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
-#define DRIVER_DESC "Driver for Gigaset 307x"
-
-#ifdef CONFIG_GIGASET_DEBUG
-#define DRIVER_DESC_DEBUG " (debug build)"
-#else
-#define DRIVER_DESC_DEBUG ""
-#endif
-
-/* Module parameters */
-int gigaset_debuglevel;
-EXPORT_SYMBOL_GPL(gigaset_debuglevel);
-module_param_named(debug, gigaset_debuglevel, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug level");
-
-/* driver state flags */
-#define VALID_MINOR	0x01
-#define VALID_ID	0x02
-
-/**
- * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging
- * @level:	debugging level.
- * @msg:	message prefix.
- * @len:	number of bytes to dump.
- * @buf:	data to dump.
- *
- * If the current debugging level includes one of the bits set in @level,
- * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio,
- * prefixed by the text @msg.
- */
-void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
-			size_t len, const unsigned char *buf)
-{
-	unsigned char outbuf[80];
-	unsigned char c;
-	size_t space = sizeof outbuf - 1;
-	unsigned char *out = outbuf;
-	size_t numin = len;
-
-	while (numin--) {
-		c = *buf++;
-		if (c == '~' || c == '^' || c == '\\') {
-			if (!space--)
-				break;
-			*out++ = '\\';
-		}
-		if (c & 0x80) {
-			if (!space--)
-				break;
-			*out++ = '~';
-			c ^= 0x80;
-		}
-		if (c < 0x20 || c == 0x7f) {
-			if (!space--)
-				break;
-			*out++ = '^';
-			c ^= 0x40;
-		}
-		if (!space--)
-			break;
-		*out++ = c;
-	}
-	*out = 0;
-
-	gig_dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf);
-}
-EXPORT_SYMBOL_GPL(gigaset_dbg_buffer);
-
-static int setflags(struct cardstate *cs, unsigned flags, unsigned delay)
-{
-	int r;
-
-	r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags);
-	cs->control_state = flags;
-	if (r < 0)
-		return r;
-
-	if (delay) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(delay * HZ / 1000);
-	}
-
-	return 0;
-}
-
-int gigaset_enterconfigmode(struct cardstate *cs)
-{
-	int i, r;
-
-	cs->control_state = TIOCM_RTS;
-
-	r = setflags(cs, TIOCM_DTR, 200);
-	if (r < 0)
-		goto error;
-	r = setflags(cs, 0, 200);
-	if (r < 0)
-		goto error;
-	for (i = 0; i < 5; ++i) {
-		r = setflags(cs, TIOCM_RTS, 100);
-		if (r < 0)
-			goto error;
-		r = setflags(cs, 0, 100);
-		if (r < 0)
-			goto error;
-	}
-	r = setflags(cs, TIOCM_RTS | TIOCM_DTR, 800);
-	if (r < 0)
-		goto error;
-
-	return 0;
-
-error:
-	dev_err(cs->dev, "error %d on setuartbits\n", -r);
-	cs->control_state = TIOCM_RTS | TIOCM_DTR;
-	cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS | TIOCM_DTR);
-
-	return -1;
-}
-
-static int test_timeout(struct at_state_t *at_state)
-{
-	if (!at_state->timer_expires)
-		return 0;
-
-	if (--at_state->timer_expires) {
-		gig_dbg(DEBUG_MCMD, "decreased timer of %p to %lu",
-			at_state, at_state->timer_expires);
-		return 0;
-	}
-
-	gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
-			  at_state->timer_index, NULL);
-	return 1;
-}
-
-static void timer_tick(struct timer_list *t)
-{
-	struct cardstate *cs = from_timer(cs, t, timer);
-	unsigned long flags;
-	unsigned channel;
-	struct at_state_t *at_state;
-	int timeout = 0;
-
-	spin_lock_irqsave(&cs->lock, flags);
-
-	for (channel = 0; channel < cs->channels; ++channel)
-		if (test_timeout(&cs->bcs[channel].at_state))
-			timeout = 1;
-
-	if (test_timeout(&cs->at_state))
-		timeout = 1;
-
-	list_for_each_entry(at_state, &cs->temp_at_states, list)
-		if (test_timeout(at_state))
-			timeout = 1;
-
-	if (cs->running) {
-		mod_timer(&cs->timer, jiffies + msecs_to_jiffies(GIG_TICK));
-		if (timeout) {
-			gig_dbg(DEBUG_EVENT, "scheduling timeout");
-			tasklet_schedule(&cs->event_tasklet);
-		}
-	}
-
-	spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-int gigaset_get_channel(struct bc_state *bcs)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&bcs->cs->lock, flags);
-	if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) {
-		gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
-			bcs->channel);
-		spin_unlock_irqrestore(&bcs->cs->lock, flags);
-		return -EBUSY;
-	}
-	++bcs->use_count;
-	bcs->busy = 1;
-	gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
-	spin_unlock_irqrestore(&bcs->cs->lock, flags);
-	return 0;
-}
-
-struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
-{
-	unsigned long flags;
-	int i;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (!try_module_get(cs->driver->owner)) {
-		gig_dbg(DEBUG_CHANNEL,
-			"could not get module for allocating channel");
-		spin_unlock_irqrestore(&cs->lock, flags);
-		return NULL;
-	}
-	for (i = 0; i < cs->channels; ++i)
-		if (!cs->bcs[i].use_count) {
-			++cs->bcs[i].use_count;
-			cs->bcs[i].busy = 1;
-			spin_unlock_irqrestore(&cs->lock, flags);
-			gig_dbg(DEBUG_CHANNEL, "allocated channel %d", i);
-			return cs->bcs + i;
-		}
-	module_put(cs->driver->owner);
-	spin_unlock_irqrestore(&cs->lock, flags);
-	gig_dbg(DEBUG_CHANNEL, "no free channel");
-	return NULL;
-}
-
-void gigaset_free_channel(struct bc_state *bcs)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&bcs->cs->lock, flags);
-	if (!bcs->busy) {
-		gig_dbg(DEBUG_CHANNEL, "could not free channel %d",
-			bcs->channel);
-		spin_unlock_irqrestore(&bcs->cs->lock, flags);
-		return;
-	}
-	--bcs->use_count;
-	bcs->busy = 0;
-	module_put(bcs->cs->driver->owner);
-	gig_dbg(DEBUG_CHANNEL, "freed channel %d", bcs->channel);
-	spin_unlock_irqrestore(&bcs->cs->lock, flags);
-}
-
-int gigaset_get_channels(struct cardstate *cs)
-{
-	unsigned long flags;
-	int i;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	for (i = 0; i < cs->channels; ++i)
-		if (cs->bcs[i].use_count) {
-			spin_unlock_irqrestore(&cs->lock, flags);
-			gig_dbg(DEBUG_CHANNEL,
-				"could not allocate all channels");
-			return -EBUSY;
-		}
-	for (i = 0; i < cs->channels; ++i)
-		++cs->bcs[i].use_count;
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	gig_dbg(DEBUG_CHANNEL, "allocated all channels");
-
-	return 0;
-}
-
-void gigaset_free_channels(struct cardstate *cs)
-{
-	unsigned long flags;
-	int i;
-
-	gig_dbg(DEBUG_CHANNEL, "unblocking all channels");
-	spin_lock_irqsave(&cs->lock, flags);
-	for (i = 0; i < cs->channels; ++i)
-		--cs->bcs[i].use_count;
-	spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-void gigaset_block_channels(struct cardstate *cs)
-{
-	unsigned long flags;
-	int i;
-
-	gig_dbg(DEBUG_CHANNEL, "blocking all channels");
-	spin_lock_irqsave(&cs->lock, flags);
-	for (i = 0; i < cs->channels; ++i)
-		++cs->bcs[i].use_count;
-	spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-static void clear_events(struct cardstate *cs)
-{
-	struct event_t *ev;
-	unsigned head, tail;
-	unsigned long flags;
-
-	spin_lock_irqsave(&cs->ev_lock, flags);
-
-	head = cs->ev_head;
-	tail = cs->ev_tail;
-
-	while (tail != head) {
-		ev = cs->events + head;
-		kfree(ev->ptr);
-		head = (head + 1) % MAX_EVENTS;
-	}
-
-	cs->ev_head = tail;
-
-	spin_unlock_irqrestore(&cs->ev_lock, flags);
-}
-
-/**
- * gigaset_add_event() - add event to device event queue
- * @cs:		device descriptor structure.
- * @at_state:	connection state structure.
- * @type:	event type.
- * @ptr:	pointer parameter for event.
- * @parameter:	integer parameter for event.
- * @arg:	pointer parameter for event.
- *
- * Allocate an event queue entry from the device's event queue, and set it up
- * with the parameters given.
- *
- * Return value: added event
- */
-struct event_t *gigaset_add_event(struct cardstate *cs,
-				  struct at_state_t *at_state, int type,
-				  void *ptr, int parameter, void *arg)
-{
-	unsigned long flags;
-	unsigned next, tail;
-	struct event_t *event = NULL;
-
-	gig_dbg(DEBUG_EVENT, "queueing event %d", type);
-
-	spin_lock_irqsave(&cs->ev_lock, flags);
-
-	tail = cs->ev_tail;
-	next = (tail + 1) % MAX_EVENTS;
-	if (unlikely(next == cs->ev_head))
-		dev_err(cs->dev, "event queue full\n");
-	else {
-		event = cs->events + tail;
-		event->type = type;
-		event->at_state = at_state;
-		event->cid = -1;
-		event->ptr = ptr;
-		event->arg = arg;
-		event->parameter = parameter;
-		cs->ev_tail = next;
-	}
-
-	spin_unlock_irqrestore(&cs->ev_lock, flags);
-
-	return event;
-}
-EXPORT_SYMBOL_GPL(gigaset_add_event);
-
-static void clear_at_state(struct at_state_t *at_state)
-{
-	int i;
-
-	for (i = 0; i < STR_NUM; ++i) {
-		kfree(at_state->str_var[i]);
-		at_state->str_var[i] = NULL;
-	}
-}
-
-static void dealloc_temp_at_states(struct cardstate *cs)
-{
-	struct at_state_t *cur, *next;
-
-	list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
-		list_del(&cur->list);
-		clear_at_state(cur);
-		kfree(cur);
-	}
-}
-
-static void gigaset_freebcs(struct bc_state *bcs)
-{
-	int i;
-
-	gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
-	bcs->cs->ops->freebcshw(bcs);
-
-	gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
-	clear_at_state(&bcs->at_state);
-	gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
-	dev_kfree_skb(bcs->rx_skb);
-	bcs->rx_skb = NULL;
-
-	for (i = 0; i < AT_NUM; ++i) {
-		kfree(bcs->commands[i]);
-		bcs->commands[i] = NULL;
-	}
-}
-
-static struct cardstate *alloc_cs(struct gigaset_driver *drv)
-{
-	unsigned long flags;
-	unsigned i;
-	struct cardstate *cs;
-	struct cardstate *ret = NULL;
-
-	spin_lock_irqsave(&drv->lock, flags);
-	if (drv->blocked)
-		goto exit;
-	for (i = 0; i < drv->minors; ++i) {
-		cs = drv->cs + i;
-		if (!(cs->flags & VALID_MINOR)) {
-			cs->flags = VALID_MINOR;
-			ret = cs;
-			break;
-		}
-	}
-exit:
-	spin_unlock_irqrestore(&drv->lock, flags);
-	return ret;
-}
-
-static void free_cs(struct cardstate *cs)
-{
-	cs->flags = 0;
-}
-
-static void make_valid(struct cardstate *cs, unsigned mask)
-{
-	unsigned long flags;
-	struct gigaset_driver *drv = cs->driver;
-	spin_lock_irqsave(&drv->lock, flags);
-	cs->flags |= mask;
-	spin_unlock_irqrestore(&drv->lock, flags);
-}
-
-static void make_invalid(struct cardstate *cs, unsigned mask)
-{
-	unsigned long flags;
-	struct gigaset_driver *drv = cs->driver;
-	spin_lock_irqsave(&drv->lock, flags);
-	cs->flags &= ~mask;
-	spin_unlock_irqrestore(&drv->lock, flags);
-}
-
-/**
- * gigaset_freecs() - free all associated ressources of a device
- * @cs:		device descriptor structure.
- *
- * Stops all tasklets and timers, unregisters the device from all
- * subsystems it was registered to, deallocates the device structure
- * @cs and all structures referenced from it.
- * Operations on the device should be stopped before calling this.
- */
-void gigaset_freecs(struct cardstate *cs)
-{
-	int i;
-	unsigned long flags;
-
-	if (!cs)
-		return;
-
-	mutex_lock(&cs->mutex);
-
-	spin_lock_irqsave(&cs->lock, flags);
-	cs->running = 0;
-	spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are
-						     not rescheduled below */
-
-	tasklet_kill(&cs->event_tasklet);
-	del_timer_sync(&cs->timer);
-
-	switch (cs->cs_init) {
-	default:
-		/* clear B channel structures */
-		for (i = 0; i < cs->channels; ++i) {
-			gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
-			gigaset_freebcs(cs->bcs + i);
-		}
-
-		/* clear device sysfs */
-		gigaset_free_dev_sysfs(cs);
-
-		gigaset_if_free(cs);
-
-		gig_dbg(DEBUG_INIT, "clearing hw");
-		cs->ops->freecshw(cs);
-
-		/* fall through */
-	case 2: /* error in initcshw */
-		/* Deregister from LL */
-		make_invalid(cs, VALID_ID);
-		gigaset_isdn_unregdev(cs);
-
-		/* fall through */
-	case 1: /* error when registering to LL */
-		gig_dbg(DEBUG_INIT, "clearing at_state");
-		clear_at_state(&cs->at_state);
-		dealloc_temp_at_states(cs);
-		clear_events(cs);
-		tty_port_destroy(&cs->port);
-
-		/* fall through */
-	case 0:	/* error in basic setup */
-		gig_dbg(DEBUG_INIT, "freeing inbuf");
-		kfree(cs->inbuf);
-		kfree(cs->bcs);
-	}
-
-	mutex_unlock(&cs->mutex);
-	free_cs(cs);
-}
-EXPORT_SYMBOL_GPL(gigaset_freecs);
-
-void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
-		     struct cardstate *cs, int cid)
-{
-	int i;
-
-	INIT_LIST_HEAD(&at_state->list);
-	at_state->waiting = 0;
-	at_state->getstring = 0;
-	at_state->pending_commands = 0;
-	at_state->timer_expires = 0;
-	at_state->timer_active = 0;
-	at_state->timer_index = 0;
-	at_state->seq_index = 0;
-	at_state->ConState = 0;
-	for (i = 0; i < STR_NUM; ++i)
-		at_state->str_var[i] = NULL;
-	at_state->int_var[VAR_ZDLE] = 0;
-	at_state->int_var[VAR_ZCTP] = -1;
-	at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
-	at_state->cs = cs;
-	at_state->bcs = bcs;
-	at_state->cid = cid;
-	if (!cid)
-		at_state->replystruct = cs->tabnocid;
-	else
-		at_state->replystruct = cs->tabcid;
-}
-
-
-static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
-/* inbuf->read must be allocated before! */
-{
-	inbuf->head = 0;
-	inbuf->tail = 0;
-	inbuf->cs = cs;
-	inbuf->inputstate = INS_command;
-}
-
-/**
- * gigaset_fill_inbuf() - append received data to input buffer
- * @inbuf:	buffer structure.
- * @src:	received data.
- * @numbytes:	number of bytes received.
- *
- * Return value: !=0 if some data was appended
- */
-int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
-		       unsigned numbytes)
-{
-	unsigned n, head, tail, bytesleft;
-
-	gig_dbg(DEBUG_INTR, "received %u bytes", numbytes);
-
-	if (!numbytes)
-		return 0;
-
-	bytesleft = numbytes;
-	tail = inbuf->tail;
-	head = inbuf->head;
-	gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
-
-	while (bytesleft) {
-		if (head > tail)
-			n = head - 1 - tail;
-		else if (head == 0)
-			n = (RBUFSIZE - 1) - tail;
-		else
-			n = RBUFSIZE - tail;
-		if (!n) {
-			dev_err(inbuf->cs->dev,
-				"buffer overflow (%u bytes lost)\n",
-				bytesleft);
-			break;
-		}
-		if (n > bytesleft)
-			n = bytesleft;
-		memcpy(inbuf->data + tail, src, n);
-		bytesleft -= n;
-		tail = (tail + n) % RBUFSIZE;
-		src += n;
-	}
-	gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
-	inbuf->tail = tail;
-	return numbytes != bytesleft;
-}
-EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
-
-/* Initialize the b-channel structure */
-static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs,
-			   int channel)
-{
-	int i;
-
-	bcs->tx_skb = NULL;
-
-	skb_queue_head_init(&bcs->squeue);
-
-	bcs->corrupted = 0;
-	bcs->trans_down = 0;
-	bcs->trans_up = 0;
-
-	gig_dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel);
-	gigaset_at_init(&bcs->at_state, bcs, cs, -1);
-
-#ifdef CONFIG_GIGASET_DEBUG
-	bcs->emptycount = 0;
-#endif
-
-	bcs->rx_bufsize = 0;
-	bcs->rx_skb = NULL;
-	bcs->rx_fcs = PPP_INITFCS;
-	bcs->inputstate = 0;
-	bcs->channel = channel;
-	bcs->cs = cs;
-
-	bcs->chstate = 0;
-	bcs->use_count = 1;
-	bcs->busy = 0;
-	bcs->ignore = cs->ignoreframes;
-
-	for (i = 0; i < AT_NUM; ++i)
-		bcs->commands[i] = NULL;
-
-	spin_lock_init(&bcs->aplock);
-	bcs->ap = NULL;
-	bcs->apconnstate = 0;
-
-	gig_dbg(DEBUG_INIT, "  setting up bcs[%d]->hw", channel);
-	return cs->ops->initbcshw(bcs);
-}
-
-/**
- * gigaset_initcs() - initialize device structure
- * @drv:	hardware driver the device belongs to
- * @channels:	number of B channels supported by device
- * @onechannel:	!=0 if B channel data and AT commands share one
- *		    communication channel (M10x),
- *		==0 if B channels have separate communication channels (base)
- * @ignoreframes:	number of frames to ignore after setting up B channel
- * @cidmode:	!=0: start in CallID mode
- * @modulename:	name of driver module for LL registration
- *
- * Allocate and initialize cardstate structure for Gigaset driver
- * Calls hardware dependent gigaset_initcshw() function
- * Calls B channel initialization function gigaset_initbcs() for each B channel
- *
- * Return value:
- *	pointer to cardstate structure
- */
-struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
-				 int onechannel, int ignoreframes,
-				 int cidmode, const char *modulename)
-{
-	struct cardstate *cs;
-	unsigned long flags;
-	int i;
-
-	gig_dbg(DEBUG_INIT, "allocating cs");
-	cs = alloc_cs(drv);
-	if (!cs) {
-		pr_err("maximum number of devices exceeded\n");
-		return NULL;
-	}
-
-	cs->cs_init = 0;
-	cs->channels = channels;
-	cs->onechannel = onechannel;
-	cs->ignoreframes = ignoreframes;
-	INIT_LIST_HEAD(&cs->temp_at_states);
-	cs->running = 0;
-	timer_setup(&cs->timer, timer_tick, 0);
-	spin_lock_init(&cs->ev_lock);
-	cs->ev_tail = 0;
-	cs->ev_head = 0;
-
-	tasklet_init(&cs->event_tasklet, gigaset_handle_event,
-		     (unsigned long) cs);
-	tty_port_init(&cs->port);
-	cs->commands_pending = 0;
-	cs->cur_at_seq = 0;
-	cs->gotfwver = -1;
-	cs->dev = NULL;
-	cs->tty_dev = NULL;
-	cs->cidmode = cidmode != 0;
-	cs->tabnocid = gigaset_tab_nocid;
-	cs->tabcid = gigaset_tab_cid;
-
-	init_waitqueue_head(&cs->waitqueue);
-	cs->waiting = 0;
-
-	cs->mode = M_UNKNOWN;
-	cs->mstate = MS_UNINITIALIZED;
-
-	cs->bcs = kmalloc_array(channels, sizeof(struct bc_state), GFP_KERNEL);
-	cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
-	if (!cs->bcs || !cs->inbuf) {
-		pr_err("out of memory\n");
-		goto error;
-	}
-	++cs->cs_init;
-
-	gig_dbg(DEBUG_INIT, "setting up at_state");
-	spin_lock_init(&cs->lock);
-	gigaset_at_init(&cs->at_state, NULL, cs, 0);
-	cs->dle = 0;
-	cs->cbytes = 0;
-
-	gig_dbg(DEBUG_INIT, "setting up inbuf");
-	gigaset_inbuf_init(cs->inbuf, cs);
-
-	cs->connected = 0;
-	cs->isdn_up = 0;
-
-	gig_dbg(DEBUG_INIT, "setting up cmdbuf");
-	cs->cmdbuf = cs->lastcmdbuf = NULL;
-	spin_lock_init(&cs->cmdlock);
-	cs->curlen = 0;
-	cs->cmdbytes = 0;
-
-	gig_dbg(DEBUG_INIT, "setting up iif");
-	if (gigaset_isdn_regdev(cs, modulename) < 0) {
-		pr_err("error registering ISDN device\n");
-		goto error;
-	}
-
-	make_valid(cs, VALID_ID);
-	++cs->cs_init;
-	gig_dbg(DEBUG_INIT, "setting up hw");
-	if (cs->ops->initcshw(cs) < 0)
-		goto error;
-
-	++cs->cs_init;
-
-	/* set up character device */
-	gigaset_if_init(cs);
-
-	/* set up device sysfs */
-	gigaset_init_dev_sysfs(cs);
-
-	/* set up channel data structures */
-	for (i = 0; i < channels; ++i) {
-		gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
-		if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) {
-			pr_err("could not allocate channel %d data\n", i);
-			goto error;
-		}
-	}
-
-	spin_lock_irqsave(&cs->lock, flags);
-	cs->running = 1;
-	spin_unlock_irqrestore(&cs->lock, flags);
-	cs->timer.expires = jiffies + msecs_to_jiffies(GIG_TICK);
-	add_timer(&cs->timer);
-
-	gig_dbg(DEBUG_INIT, "cs initialized");
-	return cs;
-
-error:
-	gig_dbg(DEBUG_INIT, "failed");
-	gigaset_freecs(cs);
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(gigaset_initcs);
-
-/* ReInitialize the b-channel structure on hangup */
-void gigaset_bcs_reinit(struct bc_state *bcs)
-{
-	struct sk_buff *skb;
-	struct cardstate *cs = bcs->cs;
-	unsigned long flags;
-
-	while ((skb = skb_dequeue(&bcs->squeue)) != NULL)
-		dev_kfree_skb(skb);
-
-	spin_lock_irqsave(&cs->lock, flags);
-	clear_at_state(&bcs->at_state);
-	bcs->at_state.ConState = 0;
-	bcs->at_state.timer_active = 0;
-	bcs->at_state.timer_expires = 0;
-	bcs->at_state.cid = -1;			/* No CID defined */
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	bcs->inputstate = 0;
-
-#ifdef CONFIG_GIGASET_DEBUG
-	bcs->emptycount = 0;
-#endif
-
-	bcs->rx_fcs = PPP_INITFCS;
-	bcs->chstate = 0;
-
-	bcs->ignore = cs->ignoreframes;
-	dev_kfree_skb(bcs->rx_skb);
-	bcs->rx_skb = NULL;
-
-	cs->ops->reinitbcshw(bcs);
-}
-
-static void cleanup_cs(struct cardstate *cs)
-{
-	struct cmdbuf_t *cb, *tcb;
-	int i;
-	unsigned long flags;
-
-	spin_lock_irqsave(&cs->lock, flags);
-
-	cs->mode = M_UNKNOWN;
-	cs->mstate = MS_UNINITIALIZED;
-
-	clear_at_state(&cs->at_state);
-	dealloc_temp_at_states(cs);
-	gigaset_at_init(&cs->at_state, NULL, cs, 0);
-
-	cs->inbuf->inputstate = INS_command;
-	cs->inbuf->head = 0;
-	cs->inbuf->tail = 0;
-
-	cb = cs->cmdbuf;
-	while (cb) {
-		tcb = cb;
-		cb = cb->next;
-		kfree(tcb);
-	}
-	cs->cmdbuf = cs->lastcmdbuf = NULL;
-	cs->curlen = 0;
-	cs->cmdbytes = 0;
-	cs->gotfwver = -1;
-	cs->dle = 0;
-	cs->cur_at_seq = 0;
-	cs->commands_pending = 0;
-	cs->cbytes = 0;
-
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	for (i = 0; i < cs->channels; ++i) {
-		gigaset_freebcs(cs->bcs + i);
-		if (gigaset_initbcs(cs->bcs + i, cs, i) < 0)
-			pr_err("could not allocate channel %d data\n", i);
-	}
-
-	if (cs->waiting) {
-		cs->cmd_result = -ENODEV;
-		cs->waiting = 0;
-		wake_up_interruptible(&cs->waitqueue);
-	}
-}
-
-
-/**
- * gigaset_start() - start device operations
- * @cs:		device descriptor structure.
- *
- * Prepares the device for use by setting up communication parameters,
- * scheduling an EV_START event to initiate device initialization, and
- * waiting for completion of the initialization.
- *
- * Return value:
- *	0 on success, error code < 0 on failure
- */
-int gigaset_start(struct cardstate *cs)
-{
-	unsigned long flags;
-
-	if (mutex_lock_interruptible(&cs->mutex))
-		return -EBUSY;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	cs->connected = 1;
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	if (cs->mstate != MS_LOCKED) {
-		cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS);
-		cs->ops->baud_rate(cs, B115200);
-		cs->ops->set_line_ctrl(cs, CS8);
-		cs->control_state = TIOCM_DTR | TIOCM_RTS;
-	}
-
-	cs->waiting = 1;
-
-	if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
-		cs->waiting = 0;
-		goto error;
-	}
-	gigaset_schedule_event(cs);
-
-	wait_event(cs->waitqueue, !cs->waiting);
-
-	mutex_unlock(&cs->mutex);
-	return 0;
-
-error:
-	mutex_unlock(&cs->mutex);
-	return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(gigaset_start);
-
-/**
- * gigaset_shutdown() - shut down device operations
- * @cs:		device descriptor structure.
- *
- * Deactivates the device by scheduling an EV_SHUTDOWN event and
- * waiting for completion of the shutdown.
- *
- * Return value:
- *	0 - success, -ENODEV - error (no device associated)
- */
-int gigaset_shutdown(struct cardstate *cs)
-{
-	mutex_lock(&cs->mutex);
-
-	if (!(cs->flags & VALID_MINOR)) {
-		mutex_unlock(&cs->mutex);
-		return -ENODEV;
-	}
-
-	cs->waiting = 1;
-
-	if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL))
-		goto exit;
-	gigaset_schedule_event(cs);
-
-	wait_event(cs->waitqueue, !cs->waiting);
-
-	cleanup_cs(cs);
-
-exit:
-	mutex_unlock(&cs->mutex);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(gigaset_shutdown);
-
-/**
- * gigaset_stop() - stop device operations
- * @cs:		device descriptor structure.
- *
- * Stops operations on the device by scheduling an EV_STOP event and
- * waiting for completion of the shutdown.
- */
-void gigaset_stop(struct cardstate *cs)
-{
-	mutex_lock(&cs->mutex);
-
-	cs->waiting = 1;
-
-	if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL))
-		goto exit;
-	gigaset_schedule_event(cs);
-
-	wait_event(cs->waitqueue, !cs->waiting);
-
-	cleanup_cs(cs);
-
-exit:
-	mutex_unlock(&cs->mutex);
-}
-EXPORT_SYMBOL_GPL(gigaset_stop);
-
-static LIST_HEAD(drivers);
-static DEFINE_SPINLOCK(driver_lock);
-
-struct cardstate *gigaset_get_cs_by_id(int id)
-{
-	unsigned long flags;
-	struct cardstate *ret = NULL;
-	struct cardstate *cs;
-	struct gigaset_driver *drv;
-	unsigned i;
-
-	spin_lock_irqsave(&driver_lock, flags);
-	list_for_each_entry(drv, &drivers, list) {
-		spin_lock(&drv->lock);
-		for (i = 0; i < drv->minors; ++i) {
-			cs = drv->cs + i;
-			if ((cs->flags & VALID_ID) && cs->myid == id) {
-				ret = cs;
-				break;
-			}
-		}
-		spin_unlock(&drv->lock);
-		if (ret)
-			break;
-	}
-	spin_unlock_irqrestore(&driver_lock, flags);
-	return ret;
-}
-
-static struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
-{
-	unsigned long flags;
-	struct cardstate *ret = NULL;
-	struct gigaset_driver *drv;
-	unsigned index;
-
-	spin_lock_irqsave(&driver_lock, flags);
-	list_for_each_entry(drv, &drivers, list) {
-		if (minor < drv->minor || minor >= drv->minor + drv->minors)
-			continue;
-		index = minor - drv->minor;
-		spin_lock(&drv->lock);
-		if (drv->cs[index].flags & VALID_MINOR)
-			ret = drv->cs + index;
-		spin_unlock(&drv->lock);
-		if (ret)
-			break;
-	}
-	spin_unlock_irqrestore(&driver_lock, flags);
-	return ret;
-}
-
-struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
-{
-	return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
-}
-
-/**
- * gigaset_freedriver() - free all associated ressources of a driver
- * @drv:	driver descriptor structure.
- *
- * Unregisters the driver from the system and deallocates the driver
- * structure @drv and all structures referenced from it.
- * All devices should be shut down before calling this.
- */
-void gigaset_freedriver(struct gigaset_driver *drv)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&driver_lock, flags);
-	list_del(&drv->list);
-	spin_unlock_irqrestore(&driver_lock, flags);
-
-	gigaset_if_freedriver(drv);
-
-	kfree(drv->cs);
-	kfree(drv);
-}
-EXPORT_SYMBOL_GPL(gigaset_freedriver);
-
-/**
- * gigaset_initdriver() - initialize driver structure
- * @minor:	First minor number
- * @minors:	Number of minors this driver can handle
- * @procname:	Name of the driver
- * @devname:	Name of the device files (prefix without minor number)
- *
- * Allocate and initialize gigaset_driver structure. Initialize interface.
- *
- * Return value:
- *	Pointer to the gigaset_driver structure on success, NULL on failure.
- */
-struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
-					  const char *procname,
-					  const char *devname,
-					  const struct gigaset_ops *ops,
-					  struct module *owner)
-{
-	struct gigaset_driver *drv;
-	unsigned long flags;
-	unsigned i;
-
-	drv = kmalloc(sizeof *drv, GFP_KERNEL);
-	if (!drv)
-		return NULL;
-
-	drv->have_tty = 0;
-	drv->minor = minor;
-	drv->minors = minors;
-	spin_lock_init(&drv->lock);
-	drv->blocked = 0;
-	drv->ops = ops;
-	drv->owner = owner;
-	INIT_LIST_HEAD(&drv->list);
-
-	drv->cs = kmalloc_array(minors, sizeof(*drv->cs), GFP_KERNEL);
-	if (!drv->cs)
-		goto error;
-
-	for (i = 0; i < minors; ++i) {
-		drv->cs[i].flags = 0;
-		drv->cs[i].driver = drv;
-		drv->cs[i].ops = drv->ops;
-		drv->cs[i].minor_index = i;
-		mutex_init(&drv->cs[i].mutex);
-	}
-
-	gigaset_if_initdriver(drv, procname, devname);
-
-	spin_lock_irqsave(&driver_lock, flags);
-	list_add(&drv->list, &drivers);
-	spin_unlock_irqrestore(&driver_lock, flags);
-
-	return drv;
-
-error:
-	kfree(drv);
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(gigaset_initdriver);
-
-/**
- * gigaset_blockdriver() - block driver
- * @drv:	driver descriptor structure.
- *
- * Prevents the driver from attaching new devices, in preparation for
- * deregistration.
- */
-void gigaset_blockdriver(struct gigaset_driver *drv)
-{
-	drv->blocked = 1;
-}
-EXPORT_SYMBOL_GPL(gigaset_blockdriver);
-
-static int __init gigaset_init_module(void)
-{
-	/* in accordance with the principle of least astonishment,
-	 * setting the 'debug' parameter to 1 activates a sensible
-	 * set of default debug levels
-	 */
-	if (gigaset_debuglevel == 1)
-		gigaset_debuglevel = DEBUG_DEFAULT;
-
-	pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
-	gigaset_isdn_regdrv();
-	return 0;
-}
-
-static void __exit gigaset_exit_module(void)
-{
-	gigaset_isdn_unregdrv();
-}
-
-module_init(gigaset_init_module);
-module_exit(gigaset_exit_module);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/gigaset/dummyll.c b/drivers/staging/isdn/gigaset/dummyll.c
deleted file mode 100644
index 4b9637e..0000000
--- a/drivers/staging/isdn/gigaset/dummyll.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Dummy LL interface for the Gigaset driver
- *
- * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include <linux/export.h>
-#include "gigaset.h"
-
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_sent);
-
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
-
-void gigaset_isdn_rcv_err(struct bc_state *bcs)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
-
-int gigaset_isdn_icall(struct at_state_t *at_state)
-{
-	return ICALL_IGNORE;
-}
-
-void gigaset_isdn_connD(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_hupD(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_connB(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_hupB(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_start(struct cardstate *cs)
-{
-}
-
-void gigaset_isdn_stop(struct cardstate *cs)
-{
-}
-
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
-{
-	return 0;
-}
-
-void gigaset_isdn_unregdev(struct cardstate *cs)
-{
-}
-
-void gigaset_isdn_regdrv(void)
-{
-	pr_info("no ISDN subsystem interface\n");
-}
-
-void gigaset_isdn_unregdrv(void)
-{
-}
diff --git a/drivers/staging/isdn/gigaset/ev-layer.c b/drivers/staging/isdn/gigaset/ev-layer.c
deleted file mode 100644
index f8bb186..0000000
--- a/drivers/staging/isdn/gigaset/ev-layer.c
+++ /dev/null
@@ -1,1910 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- *                       Hansjoerg Lipp <hjlipp@web.de>,
- *                       Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include <linux/export.h>
-#include "gigaset.h"
-
-/* ========================================================== */
-/* bit masks for pending commands */
-#define PC_DIAL		0x001
-#define PC_HUP		0x002
-#define PC_INIT		0x004
-#define PC_DLE0		0x008
-#define PC_DLE1		0x010
-#define PC_SHUTDOWN	0x020
-#define PC_ACCEPT	0x040
-#define PC_CID		0x080
-#define PC_NOCID	0x100
-#define PC_CIDMODE	0x200
-#define PC_UMMODE	0x400
-
-/* types of modem responses */
-#define RT_NOTHING	0
-#define RT_ZSAU		1
-#define RT_RING		2
-#define RT_NUMBER	3
-#define RT_STRING	4
-#define RT_ZCAU		6
-
-/* Possible ASCII responses */
-#define RSP_OK		0
-#define RSP_ERROR	1
-#define RSP_ZGCI	3
-#define RSP_RING	4
-#define RSP_ZVLS	5
-#define RSP_ZCAU	6
-
-/* responses with values to store in at_state */
-/* - numeric */
-#define RSP_VAR		100
-#define RSP_ZSAU	(RSP_VAR + VAR_ZSAU)
-#define RSP_ZDLE	(RSP_VAR + VAR_ZDLE)
-#define RSP_ZCTP	(RSP_VAR + VAR_ZCTP)
-/* - string */
-#define RSP_STR		(RSP_VAR + VAR_NUM)
-#define RSP_NMBR	(RSP_STR + STR_NMBR)
-#define RSP_ZCPN	(RSP_STR + STR_ZCPN)
-#define RSP_ZCON	(RSP_STR + STR_ZCON)
-#define RSP_ZBC		(RSP_STR + STR_ZBC)
-#define RSP_ZHLC	(RSP_STR + STR_ZHLC)
-
-#define RSP_WRONG_CID	-2	/* unknown cid in cmd */
-#define RSP_INVAL	-6	/* invalid response   */
-#define RSP_NODEV	-9	/* device not connected */
-
-#define RSP_NONE	-19
-#define RSP_STRING	-20
-#define RSP_NULL	-21
-#define RSP_INIT	-27
-#define RSP_ANY		-26
-#define RSP_LAST	-28
-
-/* actions for process_response */
-#define ACT_NOTHING		0
-#define ACT_SETDLE1		1
-#define ACT_SETDLE0		2
-#define ACT_FAILINIT		3
-#define ACT_HUPMODEM		4
-#define ACT_CONFIGMODE		5
-#define ACT_INIT		6
-#define ACT_DLE0		7
-#define ACT_DLE1		8
-#define ACT_FAILDLE0		9
-#define ACT_FAILDLE1		10
-#define ACT_RING		11
-#define ACT_CID			12
-#define ACT_FAILCID		13
-#define ACT_SDOWN		14
-#define ACT_FAILSDOWN		15
-#define ACT_DEBUG		16
-#define ACT_WARN		17
-#define ACT_DIALING		18
-#define ACT_ABORTDIAL		19
-#define ACT_DISCONNECT		20
-#define ACT_CONNECT		21
-#define ACT_REMOTEREJECT	22
-#define ACT_CONNTIMEOUT		23
-#define ACT_REMOTEHUP		24
-#define ACT_ABORTHUP		25
-#define ACT_ICALL		26
-#define ACT_ACCEPTED		27
-#define ACT_ABORTACCEPT		28
-#define ACT_TIMEOUT		29
-#define ACT_GETSTRING		30
-#define ACT_SETVER		31
-#define ACT_FAILVER		32
-#define ACT_GOTVER		33
-#define ACT_TEST		34
-#define ACT_ERROR		35
-#define ACT_ABORTCID		36
-#define ACT_ZCAU		37
-#define ACT_NOTIFY_BC_DOWN	38
-#define ACT_NOTIFY_BC_UP	39
-#define ACT_DIAL		40
-#define ACT_ACCEPT		41
-#define ACT_HUP			43
-#define ACT_IF_LOCK		44
-#define ACT_START		45
-#define ACT_STOP		46
-#define ACT_FAKEDLE0		47
-#define ACT_FAKEHUP		48
-#define ACT_FAKESDOWN		49
-#define ACT_SHUTDOWN		50
-#define ACT_PROC_CIDMODE	51
-#define ACT_UMODESET		52
-#define ACT_FAILUMODE		53
-#define ACT_CMODESET		54
-#define ACT_FAILCMODE		55
-#define ACT_IF_VER		56
-#define ACT_CMD			100
-
-/* at command sequences */
-#define SEQ_NONE	0
-#define SEQ_INIT	100
-#define SEQ_DLE0	200
-#define SEQ_DLE1	250
-#define SEQ_CID		300
-#define SEQ_NOCID	350
-#define SEQ_HUP		400
-#define SEQ_DIAL	600
-#define SEQ_ACCEPT	720
-#define SEQ_SHUTDOWN	500
-#define SEQ_CIDMODE	10
-#define SEQ_UMMODE	11
-
-
-/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
- * 400: hup, 500: reset, 600: dial, 700: ring */
-struct reply_t gigaset_tab_nocid[] =
-{
-/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
- * action, command */
-
-/* initialize device, set cid mode if possible */
-	{RSP_INIT,	 -1,  -1, SEQ_INIT,	100,  1, {ACT_TIMEOUT} },
-
-	{EV_TIMEOUT,	100, 100, -1,		101,  3, {0},	"Z\r"},
-	{RSP_OK,	101, 103, -1,		120,  5, {ACT_GETSTRING},
-								"+GMR\r"},
-
-	{EV_TIMEOUT,	101, 101, -1,		102,  5, {0},	"Z\r"},
-	{RSP_ERROR,	101, 101, -1,		102,  5, {0},	"Z\r"},
-
-	{EV_TIMEOUT,	102, 102, -1,		108,  5, {ACT_SETDLE1},
-								"^SDLE=0\r"},
-	{RSP_OK,	108, 108, -1,		104, -1},
-	{RSP_ZDLE,	104, 104,  0,		103,  5, {0},	"Z\r"},
-	{EV_TIMEOUT,	104, 104, -1,		  0,  0, {ACT_FAILINIT} },
-	{RSP_ERROR,	108, 108, -1,		  0,  0, {ACT_FAILINIT} },
-
-	{EV_TIMEOUT,	108, 108, -1,		105,  2, {ACT_SETDLE0,
-							  ACT_HUPMODEM,
-							  ACT_TIMEOUT} },
-	{EV_TIMEOUT,	105, 105, -1,		103,  5, {0},	"Z\r"},
-
-	{RSP_ERROR,	102, 102, -1,		107,  5, {0},	"^GETPRE\r"},
-	{RSP_OK,	107, 107, -1,		  0,  0, {ACT_CONFIGMODE} },
-	{RSP_ERROR,	107, 107, -1,		  0,  0, {ACT_FAILINIT} },
-	{EV_TIMEOUT,	107, 107, -1,		  0,  0, {ACT_FAILINIT} },
-
-	{RSP_ERROR,	103, 103, -1,		  0,  0, {ACT_FAILINIT} },
-	{EV_TIMEOUT,	103, 103, -1,		  0,  0, {ACT_FAILINIT} },
-
-	{RSP_STRING,	120, 120, -1,		121, -1, {ACT_SETVER} },
-
-	{EV_TIMEOUT,	120, 121, -1,		  0,  0, {ACT_FAILVER,
-							  ACT_INIT} },
-	{RSP_ERROR,	120, 121, -1,		  0,  0, {ACT_FAILVER,
-							  ACT_INIT} },
-	{RSP_OK,	121, 121, -1,		  0,  0, {ACT_GOTVER,
-							  ACT_INIT} },
-	{RSP_NONE,	121, 121, -1,		120,  0, {ACT_GETSTRING} },
-
-/* leave dle mode */
-	{RSP_INIT,	  0,   0, SEQ_DLE0,	201,  5, {0},	"^SDLE=0\r"},
-	{RSP_OK,	201, 201, -1,		202, -1},
-	{RSP_ZDLE,	202, 202,  0,		  0,  0, {ACT_DLE0} },
-	{RSP_NODEV,	200, 249, -1,		  0,  0, {ACT_FAKEDLE0} },
-	{RSP_ERROR,	200, 249, -1,		  0,  0, {ACT_FAILDLE0} },
-	{EV_TIMEOUT,	200, 249, -1,		  0,  0, {ACT_FAILDLE0} },
-
-/* enter dle mode */
-	{RSP_INIT,	  0,   0, SEQ_DLE1,	251,  5, {0},	"^SDLE=1\r"},
-	{RSP_OK,	251, 251, -1,		252, -1},
-	{RSP_ZDLE,	252, 252,  1,		  0,  0, {ACT_DLE1} },
-	{RSP_ERROR,	250, 299, -1,		  0,  0, {ACT_FAILDLE1} },
-	{EV_TIMEOUT,	250, 299, -1,		  0,  0, {ACT_FAILDLE1} },
-
-/* incoming call */
-	{RSP_RING,	 -1,  -1, -1,		 -1, -1, {ACT_RING} },
-
-/* get cid */
-	{RSP_INIT,	  0,   0, SEQ_CID,	301,  5, {0},	"^SGCI?\r"},
-	{RSP_OK,	301, 301, -1,		302, -1},
-	{RSP_ZGCI,	302, 302, -1,		  0,  0, {ACT_CID} },
-	{RSP_ERROR,	301, 349, -1,		  0,  0, {ACT_FAILCID} },
-	{EV_TIMEOUT,	301, 349, -1,		  0,  0, {ACT_FAILCID} },
-
-/* enter cid mode */
-	{RSP_INIT,	  0,   0, SEQ_CIDMODE,	150,  5, {0},	"^SGCI=1\r"},
-	{RSP_OK,	150, 150, -1,		  0,  0, {ACT_CMODESET} },
-	{RSP_ERROR,	150, 150, -1,		  0,  0, {ACT_FAILCMODE} },
-	{EV_TIMEOUT,	150, 150, -1,		  0,  0, {ACT_FAILCMODE} },
-
-/* leave cid mode */
-	{RSP_INIT,	  0,   0, SEQ_UMMODE,	160,  5, {0},	"Z\r"},
-	{RSP_OK,	160, 160, -1,		  0,  0, {ACT_UMODESET} },
-	{RSP_ERROR,	160, 160, -1,		  0,  0, {ACT_FAILUMODE} },
-	{EV_TIMEOUT,	160, 160, -1,		  0,  0, {ACT_FAILUMODE} },
-
-/* abort getting cid */
-	{RSP_INIT,	  0,   0, SEQ_NOCID,	  0,  0, {ACT_ABORTCID} },
-
-/* reset */
-	{RSP_INIT,	  0,   0, SEQ_SHUTDOWN,	504,  5, {0},	"Z\r"},
-	{RSP_OK,	504, 504, -1,		  0,  0, {ACT_SDOWN} },
-	{RSP_ERROR,	501, 599, -1,		  0,  0, {ACT_FAILSDOWN} },
-	{EV_TIMEOUT,	501, 599, -1,		  0,  0, {ACT_FAILSDOWN} },
-	{RSP_NODEV,	501, 599, -1,		  0,  0, {ACT_FAKESDOWN} },
-
-	{EV_PROC_CIDMODE, -1, -1, -1,		 -1, -1, {ACT_PROC_CIDMODE} },
-	{EV_IF_LOCK,	 -1,  -1, -1,		 -1, -1, {ACT_IF_LOCK} },
-	{EV_IF_VER,	 -1,  -1, -1,		 -1, -1, {ACT_IF_VER} },
-	{EV_START,	 -1,  -1, -1,		 -1, -1, {ACT_START} },
-	{EV_STOP,	 -1,  -1, -1,		 -1, -1, {ACT_STOP} },
-	{EV_SHUTDOWN,	 -1,  -1, -1,		 -1, -1, {ACT_SHUTDOWN} },
-
-/* misc. */
-	{RSP_ERROR,	 -1,  -1, -1,		 -1, -1, {ACT_ERROR} },
-	{RSP_ZCAU,	 -1,  -1, -1,		 -1, -1, {ACT_ZCAU} },
-	{RSP_NONE,	 -1,  -1, -1,		 -1, -1, {ACT_DEBUG} },
-	{RSP_ANY,	 -1,  -1, -1,		 -1, -1, {ACT_WARN} },
-	{RSP_LAST}
-};
-
-/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
- * 400: hup, 750: accepted icall */
-struct reply_t gigaset_tab_cid[] =
-{
-/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
- * action, command */
-
-/* dial */
-	{EV_DIAL,	 -1,  -1, -1,		 -1, -1, {ACT_DIAL} },
-	{RSP_INIT,	  0,   0, SEQ_DIAL,	601,  5, {ACT_CMD + AT_BC} },
-	{RSP_OK,	601, 601, -1,		603,  5, {ACT_CMD + AT_PROTO} },
-	{RSP_OK,	603, 603, -1,		604,  5, {ACT_CMD + AT_TYPE} },
-	{RSP_OK,	604, 604, -1,		605,  5, {ACT_CMD + AT_MSN} },
-	{RSP_NULL,	605, 605, -1,		606,  5, {ACT_CMD + AT_CLIP} },
-	{RSP_OK,	605, 605, -1,		606,  5, {ACT_CMD + AT_CLIP} },
-	{RSP_NULL,	606, 606, -1,		607,  5, {ACT_CMD + AT_ISO} },
-	{RSP_OK,	606, 606, -1,		607,  5, {ACT_CMD + AT_ISO} },
-	{RSP_OK,	607, 607, -1,		608,  5, {0},	"+VLS=17\r"},
-	{RSP_OK,	608, 608, -1,		609, -1},
-	{RSP_ZSAU,	609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
-	{RSP_OK,	610, 610, -1,		650,  0, {ACT_DIALING} },
-
-	{RSP_ERROR,	601, 610, -1,		  0,  0, {ACT_ABORTDIAL} },
-	{EV_TIMEOUT,	601, 610, -1,		  0,  0, {ACT_ABORTDIAL} },
-
-/* optional dialing responses */
-	{EV_BC_OPEN,	650, 650, -1,		651, -1},
-	{RSP_ZVLS,	609, 651, 17,		 -1, -1, {ACT_DEBUG} },
-	{RSP_ZCTP,	610, 651, -1,		 -1, -1, {ACT_DEBUG} },
-	{RSP_ZCPN,	610, 651, -1,		 -1, -1, {ACT_DEBUG} },
-	{RSP_ZSAU,	650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
-
-/* connect */
-	{RSP_ZSAU,	650, 650, ZSAU_ACTIVE,	800, -1, {ACT_CONNECT} },
-	{RSP_ZSAU,	651, 651, ZSAU_ACTIVE,	800, -1, {ACT_CONNECT,
-							  ACT_NOTIFY_BC_UP} },
-	{RSP_ZSAU,	750, 750, ZSAU_ACTIVE,	800, -1, {ACT_CONNECT} },
-	{RSP_ZSAU,	751, 751, ZSAU_ACTIVE,	800, -1, {ACT_CONNECT,
-							  ACT_NOTIFY_BC_UP} },
-	{EV_BC_OPEN,	800, 800, -1,		800, -1, {ACT_NOTIFY_BC_UP} },
-
-/* remote hangup */
-	{RSP_ZSAU,	650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
-	{RSP_ZSAU,	750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
-	{RSP_ZSAU,	800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
-
-/* hangup */
-	{EV_HUP,	 -1,  -1, -1,		 -1, -1, {ACT_HUP} },
-	{RSP_INIT,	 -1,  -1, SEQ_HUP,	401,  5, {0},	"+VLS=0\r"},
-	{RSP_OK,	401, 401, -1,		402,  5},
-	{RSP_ZVLS,	402, 402,  0,		403,  5},
-	{RSP_ZSAU,	403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
-	{RSP_ZSAU,	403, 403, ZSAU_NULL,	  0,  0, {ACT_DISCONNECT} },
-	{RSP_NODEV,	401, 403, -1,		  0,  0, {ACT_FAKEHUP} },
-	{RSP_ERROR,	401, 401, -1,		  0,  0, {ACT_ABORTHUP} },
-	{EV_TIMEOUT,	401, 403, -1,		  0,  0, {ACT_ABORTHUP} },
-
-	{EV_BC_CLOSED,	  0,   0, -1,		  0, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/* ring */
-	{RSP_ZBC,	700, 700, -1,		 -1, -1, {0} },
-	{RSP_ZHLC,	700, 700, -1,		 -1, -1, {0} },
-	{RSP_NMBR,	700, 700, -1,		 -1, -1, {0} },
-	{RSP_ZCPN,	700, 700, -1,		 -1, -1, {0} },
-	{RSP_ZCTP,	700, 700, -1,		 -1, -1, {0} },
-	{EV_TIMEOUT,	700, 700, -1,		720, 720, {ACT_ICALL} },
-	{EV_BC_CLOSED,	720, 720, -1,		  0, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/*accept icall*/
-	{EV_ACCEPT,	 -1,  -1, -1,		 -1, -1, {ACT_ACCEPT} },
-	{RSP_INIT,	720, 720, SEQ_ACCEPT,	721,  5, {ACT_CMD + AT_PROTO} },
-	{RSP_OK,	721, 721, -1,		722,  5, {ACT_CMD + AT_ISO} },
-	{RSP_OK,	722, 722, -1,		723,  5, {0},	"+VLS=17\r"},
-	{RSP_OK,	723, 723, -1,		724,  5, {0} },
-	{RSP_ZVLS,	724, 724, 17,		750, 50, {ACT_ACCEPTED} },
-	{RSP_ERROR,	721, 729, -1,		  0,  0, {ACT_ABORTACCEPT} },
-	{EV_TIMEOUT,	721, 729, -1,		  0,  0, {ACT_ABORTACCEPT} },
-	{RSP_ZSAU,	700, 729, ZSAU_NULL,	  0,  0, {ACT_ABORTACCEPT} },
-	{RSP_ZSAU,	700, 729, ZSAU_ACTIVE,	  0,  0, {ACT_ABORTACCEPT} },
-	{RSP_ZSAU,	700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
-
-	{EV_BC_OPEN,	750, 750, -1,		751, -1},
-	{EV_TIMEOUT,	750, 751, -1,		  0,  0, {ACT_CONNTIMEOUT} },
-
-/* B channel closed (general case) */
-	{EV_BC_CLOSED,	 -1,  -1, -1,		 -1, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/* misc. */
-	{RSP_ZCON,	 -1,  -1, -1,		 -1, -1, {ACT_DEBUG} },
-	{RSP_ZCAU,	 -1,  -1, -1,		 -1, -1, {ACT_ZCAU} },
-	{RSP_NONE,	 -1,  -1, -1,		 -1, -1, {ACT_DEBUG} },
-	{RSP_ANY,	 -1,  -1, -1,		 -1, -1, {ACT_WARN} },
-	{RSP_LAST}
-};
-
-
-static const struct resp_type_t {
-	char	*response;
-	int	resp_code;
-	int	type;
-}
-resp_type[] =
-{
-	{"OK",		RSP_OK,		RT_NOTHING},
-	{"ERROR",	RSP_ERROR,	RT_NOTHING},
-	{"ZSAU",	RSP_ZSAU,	RT_ZSAU},
-	{"ZCAU",	RSP_ZCAU,	RT_ZCAU},
-	{"RING",	RSP_RING,	RT_RING},
-	{"ZGCI",	RSP_ZGCI,	RT_NUMBER},
-	{"ZVLS",	RSP_ZVLS,	RT_NUMBER},
-	{"ZCTP",	RSP_ZCTP,	RT_NUMBER},
-	{"ZDLE",	RSP_ZDLE,	RT_NUMBER},
-	{"ZHLC",	RSP_ZHLC,	RT_STRING},
-	{"ZBC",		RSP_ZBC,	RT_STRING},
-	{"NMBR",	RSP_NMBR,	RT_STRING},
-	{"ZCPN",	RSP_ZCPN,	RT_STRING},
-	{"ZCON",	RSP_ZCON,	RT_STRING},
-	{NULL,		0,		0}
-};
-
-static const struct zsau_resp_t {
-	char	*str;
-	int	code;
-}
-zsau_resp[] =
-{
-	{"OUTGOING_CALL_PROCEEDING",	ZSAU_PROCEEDING},
-	{"CALL_DELIVERED",		ZSAU_CALL_DELIVERED},
-	{"ACTIVE",			ZSAU_ACTIVE},
-	{"DISCONNECT_IND",		ZSAU_DISCONNECT_IND},
-	{"NULL",			ZSAU_NULL},
-	{"DISCONNECT_REQ",		ZSAU_DISCONNECT_REQ},
-	{NULL,				ZSAU_UNKNOWN}
-};
-
-/* check for and remove fixed string prefix
- * If s starts with prefix terminated by a non-alphanumeric character,
- * return pointer to the first character after that, otherwise return NULL.
- */
-static char *skip_prefix(char *s, const char *prefix)
-{
-	while (*prefix)
-		if (*s++ != *prefix++)
-			return NULL;
-	if (isalnum(*s))
-		return NULL;
-	return s;
-}
-
-/* queue event with CID */
-static void add_cid_event(struct cardstate *cs, int cid, int type,
-			  void *ptr, int parameter)
-{
-	unsigned long flags;
-	unsigned next, tail;
-	struct event_t *event;
-
-	gig_dbg(DEBUG_EVENT, "queueing event %d for cid %d", type, cid);
-
-	spin_lock_irqsave(&cs->ev_lock, flags);
-
-	tail = cs->ev_tail;
-	next = (tail + 1) % MAX_EVENTS;
-	if (unlikely(next == cs->ev_head)) {
-		dev_err(cs->dev, "event queue full\n");
-		kfree(ptr);
-	} else {
-		event = cs->events + tail;
-		event->type = type;
-		event->cid = cid;
-		event->ptr = ptr;
-		event->arg = NULL;
-		event->parameter = parameter;
-		event->at_state = NULL;
-		cs->ev_tail = next;
-	}
-
-	spin_unlock_irqrestore(&cs->ev_lock, flags);
-}
-
-/**
- * gigaset_handle_modem_response() - process received modem response
- * @cs:		device descriptor structure.
- *
- * Called by asyncdata/isocdata if a block of data received from the
- * device must be processed as a modem command response. The data is
- * already in the cs structure.
- */
-void gigaset_handle_modem_response(struct cardstate *cs)
-{
-	char *eoc, *psep, *ptr;
-	const struct resp_type_t *rt;
-	const struct zsau_resp_t *zr;
-	int cid, parameter;
-	u8 type, value;
-
-	if (!cs->cbytes) {
-		/* ignore additional LFs/CRs (M10x config mode or cx100) */
-		gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
-		return;
-	}
-	cs->respdata[cs->cbytes] = 0;
-
-	if (cs->at_state.getstring) {
-		/* state machine wants next line verbatim */
-		cs->at_state.getstring = 0;
-		ptr = kstrdup(cs->respdata, GFP_ATOMIC);
-		gig_dbg(DEBUG_EVENT, "string==%s", ptr ? ptr : "NULL");
-		add_cid_event(cs, 0, RSP_STRING, ptr, 0);
-		return;
-	}
-
-	/* look up response type */
-	for (rt = resp_type; rt->response; ++rt) {
-		eoc = skip_prefix(cs->respdata, rt->response);
-		if (eoc)
-			break;
-	}
-	if (!rt->response) {
-		add_cid_event(cs, 0, RSP_NONE, NULL, 0);
-		gig_dbg(DEBUG_EVENT, "unknown modem response: '%s'\n",
-			cs->respdata);
-		return;
-	}
-
-	/* check for CID */
-	psep = strrchr(cs->respdata, ';');
-	if (psep &&
-	    !kstrtoint(psep + 1, 10, &cid) &&
-	    cid >= 1 && cid <= 65535) {
-		/* valid CID: chop it off */
-		*psep = 0;
-	} else {
-		/* no valid CID: leave unchanged */
-		cid = 0;
-	}
-
-	gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata);
-	if (cid)
-		gig_dbg(DEBUG_EVENT, "CID: %d", cid);
-
-	switch (rt->type) {
-	case RT_NOTHING:
-		/* check parameter separator */
-		if (*eoc)
-			goto bad_param;	/* extra parameter */
-
-		add_cid_event(cs, cid, rt->resp_code, NULL, 0);
-		break;
-
-	case RT_RING:
-		/* check parameter separator */
-		if (!*eoc)
-			eoc = NULL;	/* no parameter */
-		else if (*eoc++ != ',')
-			goto bad_param;
-
-		add_cid_event(cs, 0, rt->resp_code, NULL, cid);
-
-		/* process parameters as individual responses */
-		while (eoc) {
-			/* look up parameter type */
-			psep = NULL;
-			for (rt = resp_type; rt->response; ++rt) {
-				psep = skip_prefix(eoc, rt->response);
-				if (psep)
-					break;
-			}
-
-			/* all legal parameters are of type RT_STRING */
-			if (!psep || rt->type != RT_STRING) {
-				dev_warn(cs->dev,
-					 "illegal RING parameter: '%s'\n",
-					 eoc);
-				return;
-			}
-
-			/* skip parameter value separator */
-			if (*psep++ != '=')
-				goto bad_param;
-
-			/* look up end of parameter */
-			eoc = strchr(psep, ',');
-			if (eoc)
-				*eoc++ = 0;
-
-			/* retrieve parameter value */
-			ptr = kstrdup(psep, GFP_ATOMIC);
-
-			/* queue event */
-			add_cid_event(cs, cid, rt->resp_code, ptr, 0);
-		}
-		break;
-
-	case RT_ZSAU:
-		/* check parameter separator */
-		if (!*eoc) {
-			/* no parameter */
-			add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE);
-			break;
-		}
-		if (*eoc++ != '=')
-			goto bad_param;
-
-		/* look up parameter value */
-		for (zr = zsau_resp; zr->str; ++zr)
-			if (!strcmp(eoc, zr->str))
-				break;
-		if (!zr->str)
-			goto bad_param;
-
-		add_cid_event(cs, cid, rt->resp_code, NULL, zr->code);
-		break;
-
-	case RT_STRING:
-		/* check parameter separator */
-		if (*eoc++ != '=')
-			goto bad_param;
-
-		/* retrieve parameter value */
-		ptr = kstrdup(eoc, GFP_ATOMIC);
-
-		/* queue event */
-		add_cid_event(cs, cid, rt->resp_code, ptr, 0);
-		break;
-
-	case RT_ZCAU:
-		/* check parameter separators */
-		if (*eoc++ != '=')
-			goto bad_param;
-		psep = strchr(eoc, ',');
-		if (!psep)
-			goto bad_param;
-		*psep++ = 0;
-
-		/* decode parameter values */
-		if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) {
-			*--psep = ',';
-			goto bad_param;
-		}
-		parameter = (type << 8) | value;
-
-		add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
-		break;
-
-	case RT_NUMBER:
-		/* check parameter separator */
-		if (*eoc++ != '=')
-			goto bad_param;
-
-		/* decode parameter value */
-		if (kstrtoint(eoc, 10, &parameter))
-			goto bad_param;
-
-		/* special case ZDLE: set flag before queueing event */
-		if (rt->resp_code == RSP_ZDLE)
-			cs->dle = parameter;
-
-		add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
-		break;
-
-bad_param:
-		/* parameter unexpected, incomplete or malformed */
-		dev_warn(cs->dev, "bad parameter in response '%s'\n",
-			 cs->respdata);
-		add_cid_event(cs, cid, rt->resp_code, NULL, -1);
-		break;
-
-	default:
-		dev_err(cs->dev, "%s: internal error on '%s'\n",
-			__func__, cs->respdata);
-	}
-}
-EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
-
-/* disconnect_nobc
- * process closing of connection associated with given AT state structure
- * without B channel
- */
-static void disconnect_nobc(struct at_state_t **at_state_p,
-			    struct cardstate *cs)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	++(*at_state_p)->seq_index;
-
-	/* revert to selected idle mode */
-	if (!cs->cidmode) {
-		cs->at_state.pending_commands |= PC_UMMODE;
-		gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
-		cs->commands_pending = 1;
-	}
-
-	/* check for and deallocate temporary AT state */
-	if (!list_empty(&(*at_state_p)->list)) {
-		list_del(&(*at_state_p)->list);
-		kfree(*at_state_p);
-		*at_state_p = NULL;
-	}
-
-	spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-/* disconnect_bc
- * process closing of connection associated with given AT state structure
- * and B channel
- */
-static void disconnect_bc(struct at_state_t *at_state,
-			  struct cardstate *cs, struct bc_state *bcs)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	++at_state->seq_index;
-
-	/* revert to selected idle mode */
-	if (!cs->cidmode) {
-		cs->at_state.pending_commands |= PC_UMMODE;
-		gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
-		cs->commands_pending = 1;
-	}
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	/* invoke hardware specific handler */
-	cs->ops->close_bchannel(bcs);
-
-	/* notify LL */
-	if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
-		bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
-		gigaset_isdn_hupD(bcs);
-	}
-}
-
-/* get_free_channel
- * get a free AT state structure: either one of those associated with the
- * B channels of the Gigaset device, or if none of those is available,
- * a newly allocated one with bcs=NULL
- * The structure should be freed by calling disconnect_nobc() after use.
- */
-static inline struct at_state_t *get_free_channel(struct cardstate *cs,
-						  int cid)
-/* cids: >0: siemens-cid
- *        0: without cid
- *       -1: no cid assigned yet
- */
-{
-	unsigned long flags;
-	int i;
-	struct at_state_t *ret;
-
-	for (i = 0; i < cs->channels; ++i)
-		if (gigaset_get_channel(cs->bcs + i) >= 0) {
-			ret = &cs->bcs[i].at_state;
-			ret->cid = cid;
-			return ret;
-		}
-
-	spin_lock_irqsave(&cs->lock, flags);
-	ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC);
-	if (ret) {
-		gigaset_at_init(ret, NULL, cs, cid);
-		list_add(&ret->list, &cs->temp_at_states);
-	}
-	spin_unlock_irqrestore(&cs->lock, flags);
-	return ret;
-}
-
-static void init_failed(struct cardstate *cs, int mode)
-{
-	int i;
-	struct at_state_t *at_state;
-
-	cs->at_state.pending_commands &= ~PC_INIT;
-	cs->mode = mode;
-	cs->mstate = MS_UNINITIALIZED;
-	gigaset_free_channels(cs);
-	for (i = 0; i < cs->channels; ++i) {
-		at_state = &cs->bcs[i].at_state;
-		if (at_state->pending_commands & PC_CID) {
-			at_state->pending_commands &= ~PC_CID;
-			at_state->pending_commands |= PC_NOCID;
-			cs->commands_pending = 1;
-		}
-	}
-}
-
-static void schedule_init(struct cardstate *cs, int state)
-{
-	if (cs->at_state.pending_commands & PC_INIT) {
-		gig_dbg(DEBUG_EVENT, "not scheduling PC_INIT again");
-		return;
-	}
-	cs->mstate = state;
-	cs->mode = M_UNKNOWN;
-	gigaset_block_channels(cs);
-	cs->at_state.pending_commands |= PC_INIT;
-	gig_dbg(DEBUG_EVENT, "Scheduling PC_INIT");
-	cs->commands_pending = 1;
-}
-
-/* send an AT command
- * adding the "AT" prefix, cid and DLE encapsulation as appropriate
- */
-static void send_command(struct cardstate *cs, const char *cmd,
-			 struct at_state_t *at_state)
-{
-	int cid = at_state->cid;
-	struct cmdbuf_t *cb;
-	size_t buflen;
-
-	buflen = strlen(cmd) + 12; /* DLE ( A T 1 2 3 4 5 <cmd> DLE ) \0 */
-	cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, GFP_ATOMIC);
-	if (!cb) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		return;
-	}
-	if (cid > 0 && cid <= 65535)
-		cb->len = snprintf(cb->buf, buflen,
-				   cs->dle ? "\020(AT%d%s\020)" : "AT%d%s",
-				   cid, cmd);
-	else
-		cb->len = snprintf(cb->buf, buflen,
-				   cs->dle ? "\020(AT%s\020)" : "AT%s",
-				   cmd);
-	cb->offset = 0;
-	cb->next = NULL;
-	cb->wake_tasklet = NULL;
-	cs->ops->write_cmd(cs, cb);
-}
-
-static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid)
-{
-	struct at_state_t *at_state;
-	int i;
-	unsigned long flags;
-
-	if (cid == 0)
-		return &cs->at_state;
-
-	for (i = 0; i < cs->channels; ++i)
-		if (cid == cs->bcs[i].at_state.cid)
-			return &cs->bcs[i].at_state;
-
-	spin_lock_irqsave(&cs->lock, flags);
-
-	list_for_each_entry(at_state, &cs->temp_at_states, list)
-		if (cid == at_state->cid) {
-			spin_unlock_irqrestore(&cs->lock, flags);
-			return at_state;
-		}
-
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	return NULL;
-}
-
-static void bchannel_down(struct bc_state *bcs)
-{
-	if (bcs->chstate & CHS_B_UP) {
-		bcs->chstate &= ~CHS_B_UP;
-		gigaset_isdn_hupB(bcs);
-	}
-
-	if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
-		bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
-		gigaset_isdn_hupD(bcs);
-	}
-
-	gigaset_free_channel(bcs);
-
-	gigaset_bcs_reinit(bcs);
-}
-
-static void bchannel_up(struct bc_state *bcs)
-{
-	if (bcs->chstate & CHS_B_UP) {
-		dev_notice(bcs->cs->dev, "%s: B channel already up\n",
-			   __func__);
-		return;
-	}
-
-	bcs->chstate |= CHS_B_UP;
-	gigaset_isdn_connB(bcs);
-}
-
-static void start_dial(struct at_state_t *at_state, void *data,
-		       unsigned seq_index)
-{
-	struct bc_state *bcs = at_state->bcs;
-	struct cardstate *cs = at_state->cs;
-	char **commands = data;
-	unsigned long flags;
-	int i;
-
-	bcs->chstate |= CHS_NOTIFY_LL;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (at_state->seq_index != seq_index) {
-		spin_unlock_irqrestore(&cs->lock, flags);
-		goto error;
-	}
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	for (i = 0; i < AT_NUM; ++i) {
-		kfree(bcs->commands[i]);
-		bcs->commands[i] = commands[i];
-	}
-
-	at_state->pending_commands |= PC_CID;
-	gig_dbg(DEBUG_EVENT, "Scheduling PC_CID");
-	cs->commands_pending = 1;
-	return;
-
-error:
-	for (i = 0; i < AT_NUM; ++i) {
-		kfree(commands[i]);
-		commands[i] = NULL;
-	}
-	at_state->pending_commands |= PC_NOCID;
-	gig_dbg(DEBUG_EVENT, "Scheduling PC_NOCID");
-	cs->commands_pending = 1;
-	return;
-}
-
-static void start_accept(struct at_state_t *at_state)
-{
-	struct cardstate *cs = at_state->cs;
-	struct bc_state *bcs = at_state->bcs;
-	int i;
-
-	for (i = 0; i < AT_NUM; ++i) {
-		kfree(bcs->commands[i]);
-		bcs->commands[i] = NULL;
-	}
-
-	bcs->commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
-	bcs->commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
-	if (!bcs->commands[AT_PROTO] || !bcs->commands[AT_ISO]) {
-		dev_err(at_state->cs->dev, "out of memory\n");
-		/* error reset */
-		at_state->pending_commands |= PC_HUP;
-		gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
-		cs->commands_pending = 1;
-		return;
-	}
-
-	snprintf(bcs->commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
-	snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
-
-	at_state->pending_commands |= PC_ACCEPT;
-	gig_dbg(DEBUG_EVENT, "Scheduling PC_ACCEPT");
-	cs->commands_pending = 1;
-}
-
-static void do_start(struct cardstate *cs)
-{
-	gigaset_free_channels(cs);
-
-	if (cs->mstate != MS_LOCKED)
-		schedule_init(cs, MS_INIT);
-
-	cs->isdn_up = 1;
-	gigaset_isdn_start(cs);
-
-	cs->waiting = 0;
-	wake_up(&cs->waitqueue);
-}
-
-static void finish_shutdown(struct cardstate *cs)
-{
-	if (cs->mstate != MS_LOCKED) {
-		cs->mstate = MS_UNINITIALIZED;
-		cs->mode = M_UNKNOWN;
-	}
-
-	/* Tell the LL that the device is not available .. */
-	if (cs->isdn_up) {
-		cs->isdn_up = 0;
-		gigaset_isdn_stop(cs);
-	}
-
-	/* The rest is done by cleanup_cs() in process context. */
-
-	cs->cmd_result = -ENODEV;
-	cs->waiting = 0;
-	wake_up(&cs->waitqueue);
-}
-
-static void do_shutdown(struct cardstate *cs)
-{
-	gigaset_block_channels(cs);
-
-	if (cs->mstate == MS_READY) {
-		cs->mstate = MS_SHUTDOWN;
-		cs->at_state.pending_commands |= PC_SHUTDOWN;
-		gig_dbg(DEBUG_EVENT, "Scheduling PC_SHUTDOWN");
-		cs->commands_pending = 1;
-	} else
-		finish_shutdown(cs);
-}
-
-static void do_stop(struct cardstate *cs)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	cs->connected = 0;
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	do_shutdown(cs);
-}
-
-/* Entering cid mode or getting a cid failed:
- * try to initialize the device and try again.
- *
- * channel >= 0: getting cid for the channel failed
- * channel < 0:  entering cid mode failed
- *
- * returns 0 on success, <0 on failure
- */
-static int reinit_and_retry(struct cardstate *cs, int channel)
-{
-	int i;
-
-	if (--cs->retry_count <= 0)
-		return -EFAULT;
-
-	for (i = 0; i < cs->channels; ++i)
-		if (cs->bcs[i].at_state.cid > 0)
-			return -EBUSY;
-
-	if (channel < 0)
-		dev_warn(cs->dev,
-			 "Could not enter cid mode. Reinit device and try again.\n");
-	else {
-		dev_warn(cs->dev,
-			 "Could not get a call id. Reinit device and try again.\n");
-		cs->bcs[channel].at_state.pending_commands |= PC_CID;
-	}
-	schedule_init(cs, MS_INIT);
-	return 0;
-}
-
-static int at_state_invalid(struct cardstate *cs,
-			    struct at_state_t *test_ptr)
-{
-	unsigned long flags;
-	unsigned channel;
-	struct at_state_t *at_state;
-	int retval = 0;
-
-	spin_lock_irqsave(&cs->lock, flags);
-
-	if (test_ptr == &cs->at_state)
-		goto exit;
-
-	list_for_each_entry(at_state, &cs->temp_at_states, list)
-		if (at_state == test_ptr)
-			goto exit;
-
-	for (channel = 0; channel < cs->channels; ++channel)
-		if (&cs->bcs[channel].at_state == test_ptr)
-			goto exit;
-
-	retval = 1;
-exit:
-	spin_unlock_irqrestore(&cs->lock, flags);
-	return retval;
-}
-
-static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
-			 struct at_state_t *at_state)
-{
-	int retval;
-
-	retval = gigaset_isdn_icall(at_state);
-	switch (retval) {
-	case ICALL_ACCEPT:
-		break;
-	default:
-		dev_err(cs->dev, "internal error: disposition=%d\n", retval);
-		/* fall through */
-	case ICALL_IGNORE:
-	case ICALL_REJECT:
-		/* hang up actively
-		 * Device doc says that would reject the call.
-		 * In fact it doesn't.
-		 */
-		at_state->pending_commands |= PC_HUP;
-		cs->commands_pending = 1;
-		break;
-	}
-}
-
-static int do_lock(struct cardstate *cs)
-{
-	int mode;
-	int i;
-
-	switch (cs->mstate) {
-	case MS_UNINITIALIZED:
-	case MS_READY:
-		if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
-		    cs->at_state.pending_commands)
-			return -EBUSY;
-
-		for (i = 0; i < cs->channels; ++i)
-			if (cs->bcs[i].at_state.pending_commands)
-				return -EBUSY;
-
-		if (gigaset_get_channels(cs) < 0)
-			return -EBUSY;
-
-		break;
-	case MS_LOCKED:
-		break;
-	default:
-		return -EBUSY;
-	}
-
-	mode = cs->mode;
-	cs->mstate = MS_LOCKED;
-	cs->mode = M_UNKNOWN;
-
-	return mode;
-}
-
-static int do_unlock(struct cardstate *cs)
-{
-	if (cs->mstate != MS_LOCKED)
-		return -EINVAL;
-
-	cs->mstate = MS_UNINITIALIZED;
-	cs->mode = M_UNKNOWN;
-	gigaset_free_channels(cs);
-	if (cs->connected)
-		schedule_init(cs, MS_INIT);
-
-	return 0;
-}
-
-static void do_action(int action, struct cardstate *cs,
-		      struct bc_state *bcs,
-		      struct at_state_t **p_at_state, char **pp_command,
-		      int *p_genresp, int *p_resp_code,
-		      struct event_t *ev)
-{
-	struct at_state_t *at_state = *p_at_state;
-	struct bc_state *bcs2;
-	unsigned long flags;
-
-	int channel;
-
-	unsigned char *s, *e;
-	int i;
-	unsigned long val;
-
-	switch (action) {
-	case ACT_NOTHING:
-		break;
-	case ACT_TIMEOUT:
-		at_state->waiting = 1;
-		break;
-	case ACT_INIT:
-		cs->at_state.pending_commands &= ~PC_INIT;
-		cs->cur_at_seq = SEQ_NONE;
-		cs->mode = M_UNIMODEM;
-		spin_lock_irqsave(&cs->lock, flags);
-		if (!cs->cidmode) {
-			spin_unlock_irqrestore(&cs->lock, flags);
-			gigaset_free_channels(cs);
-			cs->mstate = MS_READY;
-			break;
-		}
-		spin_unlock_irqrestore(&cs->lock, flags);
-		cs->at_state.pending_commands |= PC_CIDMODE;
-		gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
-		cs->commands_pending = 1;
-		break;
-	case ACT_FAILINIT:
-		dev_warn(cs->dev, "Could not initialize the device.\n");
-		cs->dle = 0;
-		init_failed(cs, M_UNKNOWN);
-		cs->cur_at_seq = SEQ_NONE;
-		break;
-	case ACT_CONFIGMODE:
-		init_failed(cs, M_CONFIG);
-		cs->cur_at_seq = SEQ_NONE;
-		break;
-	case ACT_SETDLE1:
-		cs->dle = 1;
-		/* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
-		cs->inbuf[0].inputstate &=
-			~(INS_command | INS_DLE_command);
-		break;
-	case ACT_SETDLE0:
-		cs->dle = 0;
-		cs->inbuf[0].inputstate =
-			(cs->inbuf[0].inputstate & ~INS_DLE_command)
-			| INS_command;
-		break;
-	case ACT_CMODESET:
-		if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
-			gigaset_free_channels(cs);
-			cs->mstate = MS_READY;
-		}
-		cs->mode = M_CID;
-		cs->cur_at_seq = SEQ_NONE;
-		break;
-	case ACT_UMODESET:
-		cs->mode = M_UNIMODEM;
-		cs->cur_at_seq = SEQ_NONE;
-		break;
-	case ACT_FAILCMODE:
-		cs->cur_at_seq = SEQ_NONE;
-		if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
-			init_failed(cs, M_UNKNOWN);
-			break;
-		}
-		if (reinit_and_retry(cs, -1) < 0)
-			schedule_init(cs, MS_RECOVER);
-		break;
-	case ACT_FAILUMODE:
-		cs->cur_at_seq = SEQ_NONE;
-		schedule_init(cs, MS_RECOVER);
-		break;
-	case ACT_HUPMODEM:
-		/* send "+++" (hangup in unimodem mode) */
-		if (cs->connected) {
-			struct cmdbuf_t *cb;
-
-			cb = kmalloc(sizeof(struct cmdbuf_t) + 3, GFP_ATOMIC);
-			if (!cb) {
-				dev_err(cs->dev, "%s: out of memory\n",
-					__func__);
-				return;
-			}
-			memcpy(cb->buf, "+++", 3);
-			cb->len = 3;
-			cb->offset = 0;
-			cb->next = NULL;
-			cb->wake_tasklet = NULL;
-			cs->ops->write_cmd(cs, cb);
-		}
-		break;
-	case ACT_RING:
-		/* get fresh AT state structure for new CID */
-		at_state = get_free_channel(cs, ev->parameter);
-		if (!at_state) {
-			dev_warn(cs->dev,
-				 "RING ignored: could not allocate channel structure\n");
-			break;
-		}
-
-		/* initialize AT state structure
-		 * note that bcs may be NULL if no B channel is free
-		 */
-		at_state->ConState = 700;
-		for (i = 0; i < STR_NUM; ++i) {
-			kfree(at_state->str_var[i]);
-			at_state->str_var[i] = NULL;
-		}
-		at_state->int_var[VAR_ZCTP] = -1;
-
-		spin_lock_irqsave(&cs->lock, flags);
-		at_state->timer_expires = RING_TIMEOUT;
-		at_state->timer_active = 1;
-		spin_unlock_irqrestore(&cs->lock, flags);
-		break;
-	case ACT_ICALL:
-		handle_icall(cs, bcs, at_state);
-		break;
-	case ACT_FAILSDOWN:
-		dev_warn(cs->dev, "Could not shut down the device.\n");
-		/* fall through */
-	case ACT_FAKESDOWN:
-	case ACT_SDOWN:
-		cs->cur_at_seq = SEQ_NONE;
-		finish_shutdown(cs);
-		break;
-	case ACT_CONNECT:
-		if (cs->onechannel) {
-			at_state->pending_commands |= PC_DLE1;
-			cs->commands_pending = 1;
-			break;
-		}
-		bcs->chstate |= CHS_D_UP;
-		gigaset_isdn_connD(bcs);
-		cs->ops->init_bchannel(bcs);
-		break;
-	case ACT_DLE1:
-		cs->cur_at_seq = SEQ_NONE;
-		bcs = cs->bcs + cs->curchannel;
-
-		bcs->chstate |= CHS_D_UP;
-		gigaset_isdn_connD(bcs);
-		cs->ops->init_bchannel(bcs);
-		break;
-	case ACT_FAKEHUP:
-		at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
-		/* fall through */
-	case ACT_DISCONNECT:
-		cs->cur_at_seq = SEQ_NONE;
-		at_state->cid = -1;
-		if (!bcs) {
-			disconnect_nobc(p_at_state, cs);
-		} else if (cs->onechannel && cs->dle) {
-			/* Check for other open channels not needed:
-			 * DLE only used for M10x with one B channel.
-			 */
-			at_state->pending_commands |= PC_DLE0;
-			cs->commands_pending = 1;
-		} else {
-			disconnect_bc(at_state, cs, bcs);
-		}
-		break;
-	case ACT_FAKEDLE0:
-		at_state->int_var[VAR_ZDLE] = 0;
-		cs->dle = 0;
-		/* fall through */
-	case ACT_DLE0:
-		cs->cur_at_seq = SEQ_NONE;
-		bcs2 = cs->bcs + cs->curchannel;
-		disconnect_bc(&bcs2->at_state, cs, bcs2);
-		break;
-	case ACT_ABORTHUP:
-		cs->cur_at_seq = SEQ_NONE;
-		dev_warn(cs->dev, "Could not hang up.\n");
-		at_state->cid = -1;
-		if (!bcs)
-			disconnect_nobc(p_at_state, cs);
-		else if (cs->onechannel)
-			at_state->pending_commands |= PC_DLE0;
-		else
-			disconnect_bc(at_state, cs, bcs);
-		schedule_init(cs, MS_RECOVER);
-		break;
-	case ACT_FAILDLE0:
-		cs->cur_at_seq = SEQ_NONE;
-		dev_warn(cs->dev, "Error leaving DLE mode.\n");
-		cs->dle = 0;
-		bcs2 = cs->bcs + cs->curchannel;
-		disconnect_bc(&bcs2->at_state, cs, bcs2);
-		schedule_init(cs, MS_RECOVER);
-		break;
-	case ACT_FAILDLE1:
-		cs->cur_at_seq = SEQ_NONE;
-		dev_warn(cs->dev,
-			 "Could not enter DLE mode. Trying to hang up.\n");
-		channel = cs->curchannel;
-		cs->bcs[channel].at_state.pending_commands |= PC_HUP;
-		cs->commands_pending = 1;
-		break;
-
-	case ACT_CID: /* got cid; start dialing */
-		cs->cur_at_seq = SEQ_NONE;
-		channel = cs->curchannel;
-		if (ev->parameter > 0 && ev->parameter <= 65535) {
-			cs->bcs[channel].at_state.cid = ev->parameter;
-			cs->bcs[channel].at_state.pending_commands |=
-				PC_DIAL;
-			cs->commands_pending = 1;
-			break;
-		}
-		/* fall through - bad cid */
-	case ACT_FAILCID:
-		cs->cur_at_seq = SEQ_NONE;
-		channel = cs->curchannel;
-		if (reinit_and_retry(cs, channel) < 0) {
-			dev_warn(cs->dev,
-				 "Could not get a call ID. Cannot dial.\n");
-			bcs2 = cs->bcs + channel;
-			disconnect_bc(&bcs2->at_state, cs, bcs2);
-		}
-		break;
-	case ACT_ABORTCID:
-		cs->cur_at_seq = SEQ_NONE;
-		bcs2 = cs->bcs + cs->curchannel;
-		disconnect_bc(&bcs2->at_state, cs, bcs2);
-		break;
-
-	case ACT_DIALING:
-	case ACT_ACCEPTED:
-		cs->cur_at_seq = SEQ_NONE;
-		break;
-
-	case ACT_ABORTACCEPT:	/* hangup/error/timeout during ICALL procssng */
-		if (bcs)
-			disconnect_bc(at_state, cs, bcs);
-		else
-			disconnect_nobc(p_at_state, cs);
-		break;
-
-	case ACT_ABORTDIAL:	/* error/timeout during dial preparation */
-		cs->cur_at_seq = SEQ_NONE;
-		at_state->pending_commands |= PC_HUP;
-		cs->commands_pending = 1;
-		break;
-
-	case ACT_REMOTEREJECT:	/* DISCONNECT_IND after dialling */
-	case ACT_CONNTIMEOUT:	/* timeout waiting for ZSAU=ACTIVE */
-	case ACT_REMOTEHUP:	/* DISCONNECT_IND with established connection */
-		at_state->pending_commands |= PC_HUP;
-		cs->commands_pending = 1;
-		break;
-	case ACT_GETSTRING: /* warning: RING, ZDLE, ...
-			       are not handled properly anymore */
-		at_state->getstring = 1;
-		break;
-	case ACT_SETVER:
-		if (!ev->ptr) {
-			*p_genresp = 1;
-			*p_resp_code = RSP_ERROR;
-			break;
-		}
-		s = ev->ptr;
-
-		if (!strcmp(s, "OK")) {
-			/* OK without version string: assume old response */
-			*p_genresp = 1;
-			*p_resp_code = RSP_NONE;
-			break;
-		}
-
-		for (i = 0; i < 4; ++i) {
-			val = simple_strtoul(s, (char **) &e, 10);
-			if (val > INT_MAX || e == s)
-				break;
-			if (i == 3) {
-				if (*e)
-					break;
-			} else if (*e != '.')
-				break;
-			else
-				s = e + 1;
-			cs->fwver[i] = val;
-		}
-		if (i != 4) {
-			*p_genresp = 1;
-			*p_resp_code = RSP_ERROR;
-			break;
-		}
-		cs->gotfwver = 0;
-		break;
-	case ACT_GOTVER:
-		if (cs->gotfwver == 0) {
-			cs->gotfwver = 1;
-			gig_dbg(DEBUG_EVENT,
-				"firmware version %02d.%03d.%02d.%02d",
-				cs->fwver[0], cs->fwver[1],
-				cs->fwver[2], cs->fwver[3]);
-			break;
-		}
-		/* fall through */
-	case ACT_FAILVER:
-		cs->gotfwver = -1;
-		dev_err(cs->dev, "could not read firmware version.\n");
-		break;
-	case ACT_ERROR:
-		gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
-			__func__, at_state->ConState);
-		cs->cur_at_seq = SEQ_NONE;
-		break;
-	case ACT_DEBUG:
-		gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
-			__func__, ev->type, at_state->ConState);
-		break;
-	case ACT_WARN:
-		dev_warn(cs->dev, "%s: resp_code %d in ConState %d!\n",
-			 __func__, ev->type, at_state->ConState);
-		break;
-	case ACT_ZCAU:
-		dev_warn(cs->dev, "cause code %04x in connection state %d.\n",
-			 ev->parameter, at_state->ConState);
-		break;
-
-	/* events from the LL */
-
-	case ACT_DIAL:
-		if (!ev->ptr) {
-			*p_genresp = 1;
-			*p_resp_code = RSP_ERROR;
-			break;
-		}
-		start_dial(at_state, ev->ptr, ev->parameter);
-		break;
-	case ACT_ACCEPT:
-		start_accept(at_state);
-		break;
-	case ACT_HUP:
-		at_state->pending_commands |= PC_HUP;
-		gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
-		cs->commands_pending = 1;
-		break;
-
-	/* hotplug events */
-
-	case ACT_STOP:
-		do_stop(cs);
-		break;
-	case ACT_START:
-		do_start(cs);
-		break;
-
-	/* events from the interface */
-
-	case ACT_IF_LOCK:
-		cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
-		cs->waiting = 0;
-		wake_up(&cs->waitqueue);
-		break;
-	case ACT_IF_VER:
-		if (ev->parameter != 0)
-			cs->cmd_result = -EINVAL;
-		else if (cs->gotfwver != 1) {
-			cs->cmd_result = -ENOENT;
-		} else {
-			memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
-			cs->cmd_result = 0;
-		}
-		cs->waiting = 0;
-		wake_up(&cs->waitqueue);
-		break;
-
-	/* events from the proc file system */
-
-	case ACT_PROC_CIDMODE:
-		spin_lock_irqsave(&cs->lock, flags);
-		if (ev->parameter != cs->cidmode) {
-			cs->cidmode = ev->parameter;
-			if (ev->parameter) {
-				cs->at_state.pending_commands |= PC_CIDMODE;
-				gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
-			} else {
-				cs->at_state.pending_commands |= PC_UMMODE;
-				gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
-			}
-			cs->commands_pending = 1;
-		}
-		spin_unlock_irqrestore(&cs->lock, flags);
-		cs->waiting = 0;
-		wake_up(&cs->waitqueue);
-		break;
-
-	/* events from the hardware drivers */
-
-	case ACT_NOTIFY_BC_DOWN:
-		bchannel_down(bcs);
-		break;
-	case ACT_NOTIFY_BC_UP:
-		bchannel_up(bcs);
-		break;
-	case ACT_SHUTDOWN:
-		do_shutdown(cs);
-		break;
-
-
-	default:
-		if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
-			*pp_command = at_state->bcs->commands[action - ACT_CMD];
-			if (!*pp_command) {
-				*p_genresp = 1;
-				*p_resp_code = RSP_NULL;
-			}
-		} else
-			dev_err(cs->dev, "%s: action==%d!\n", __func__, action);
-	}
-}
-
-/* State machine to do the calling and hangup procedure */
-static void process_event(struct cardstate *cs, struct event_t *ev)
-{
-	struct bc_state *bcs;
-	char *p_command = NULL;
-	struct reply_t *rep;
-	int rcode;
-	int genresp = 0;
-	int resp_code = RSP_ERROR;
-	struct at_state_t *at_state;
-	int index;
-	int curact;
-	unsigned long flags;
-
-	if (ev->cid >= 0) {
-		at_state = at_state_from_cid(cs, ev->cid);
-		if (!at_state) {
-			gig_dbg(DEBUG_EVENT, "event %d for invalid cid %d",
-				ev->type, ev->cid);
-			gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
-					  NULL, 0, NULL);
-			return;
-		}
-	} else {
-		at_state = ev->at_state;
-		if (at_state_invalid(cs, at_state)) {
-			gig_dbg(DEBUG_EVENT, "event for invalid at_state %p",
-				at_state);
-			return;
-		}
-	}
-
-	gig_dbg(DEBUG_EVENT, "connection state %d, event %d",
-		at_state->ConState, ev->type);
-
-	bcs = at_state->bcs;
-
-	/* Setting the pointer to the dial array */
-	rep = at_state->replystruct;
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (ev->type == EV_TIMEOUT) {
-		if (ev->parameter != at_state->timer_index
-		    || !at_state->timer_active) {
-			ev->type = RSP_NONE; /* old timeout */
-			gig_dbg(DEBUG_EVENT, "old timeout");
-		} else {
-			if (at_state->waiting)
-				gig_dbg(DEBUG_EVENT, "stopped waiting");
-			else
-				gig_dbg(DEBUG_EVENT, "timeout occurred");
-		}
-	}
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	/* if the response belongs to a variable in at_state->int_var[VAR_XXXX]
-	   or at_state->str_var[STR_XXXX], set it */
-	if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) {
-		index = ev->type - RSP_VAR;
-		at_state->int_var[index] = ev->parameter;
-	} else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) {
-		index = ev->type - RSP_STR;
-		kfree(at_state->str_var[index]);
-		at_state->str_var[index] = ev->ptr;
-		ev->ptr = NULL; /* prevent process_events() from
-				   deallocating ptr */
-	}
-
-	if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING)
-		at_state->getstring = 0;
-
-	/* Search row in dial array which matches modem response and current
-	   constate */
-	for (;; rep++) {
-		rcode = rep->resp_code;
-		if (rcode == RSP_LAST) {
-			/* found nothing...*/
-			dev_warn(cs->dev, "%s: rcode=RSP_LAST: "
-				 "resp_code %d in ConState %d!\n",
-				 __func__, ev->type, at_state->ConState);
-			return;
-		}
-		if ((rcode == RSP_ANY || rcode == ev->type)
-		    && ((int) at_state->ConState >= rep->min_ConState)
-		    && (rep->max_ConState < 0
-			|| (int) at_state->ConState <= rep->max_ConState)
-		    && (rep->parameter < 0 || rep->parameter == ev->parameter))
-			break;
-	}
-
-	p_command = rep->command;
-
-	at_state->waiting = 0;
-	for (curact = 0; curact < MAXACT; ++curact) {
-		/* The row tells us what we should do  ..
-		 */
-		do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
-			  &genresp, &resp_code, ev);
-		if (!at_state)
-			/* at_state destroyed by disconnect */
-			return;
-	}
-
-	/* Jump to the next con-state regarding the array */
-	if (rep->new_ConState >= 0)
-		at_state->ConState = rep->new_ConState;
-
-	if (genresp) {
-		spin_lock_irqsave(&cs->lock, flags);
-		at_state->timer_expires = 0;
-		at_state->timer_active = 0;
-		spin_unlock_irqrestore(&cs->lock, flags);
-		gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
-	} else {
-		/* Send command to modem if not NULL... */
-		if (p_command) {
-			if (cs->connected)
-				send_command(cs, p_command, at_state);
-			else
-				gigaset_add_event(cs, at_state, RSP_NODEV,
-						  NULL, 0, NULL);
-		}
-
-		spin_lock_irqsave(&cs->lock, flags);
-		if (!rep->timeout) {
-			at_state->timer_expires = 0;
-			at_state->timer_active = 0;
-		} else if (rep->timeout > 0) { /* new timeout */
-			at_state->timer_expires = rep->timeout * 10;
-			at_state->timer_active = 1;
-			++at_state->timer_index;
-		}
-		spin_unlock_irqrestore(&cs->lock, flags);
-	}
-}
-
-static void schedule_sequence(struct cardstate *cs,
-			      struct at_state_t *at_state, int sequence)
-{
-	cs->cur_at_seq = sequence;
-	gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL);
-}
-
-static void process_command_flags(struct cardstate *cs)
-{
-	struct at_state_t *at_state = NULL;
-	struct bc_state *bcs;
-	int i;
-	int sequence;
-	unsigned long flags;
-
-	cs->commands_pending = 0;
-
-	if (cs->cur_at_seq) {
-		gig_dbg(DEBUG_EVENT, "not searching scheduled commands: busy");
-		return;
-	}
-
-	gig_dbg(DEBUG_EVENT, "searching scheduled commands");
-
-	sequence = SEQ_NONE;
-
-	/* clear pending_commands and hangup channels on shutdown */
-	if (cs->at_state.pending_commands & PC_SHUTDOWN) {
-		cs->at_state.pending_commands &= ~PC_CIDMODE;
-		for (i = 0; i < cs->channels; ++i) {
-			bcs = cs->bcs + i;
-			at_state = &bcs->at_state;
-			at_state->pending_commands &=
-				~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
-			if (at_state->cid > 0)
-				at_state->pending_commands |= PC_HUP;
-			if (at_state->pending_commands & PC_CID) {
-				at_state->pending_commands |= PC_NOCID;
-				at_state->pending_commands &= ~PC_CID;
-			}
-		}
-	}
-
-	/* clear pending_commands and hangup channels on reset */
-	if (cs->at_state.pending_commands & PC_INIT) {
-		cs->at_state.pending_commands &= ~PC_CIDMODE;
-		for (i = 0; i < cs->channels; ++i) {
-			bcs = cs->bcs + i;
-			at_state = &bcs->at_state;
-			at_state->pending_commands &=
-				~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
-			if (at_state->cid > 0)
-				at_state->pending_commands |= PC_HUP;
-			if (cs->mstate == MS_RECOVER) {
-				if (at_state->pending_commands & PC_CID) {
-					at_state->pending_commands |= PC_NOCID;
-					at_state->pending_commands &= ~PC_CID;
-				}
-			}
-		}
-	}
-
-	/* only switch back to unimodem mode if no commands are pending and
-	 * no channels are up */
-	spin_lock_irqsave(&cs->lock, flags);
-	if (cs->at_state.pending_commands == PC_UMMODE
-	    && !cs->cidmode
-	    && list_empty(&cs->temp_at_states)
-	    && cs->mode == M_CID) {
-		sequence = SEQ_UMMODE;
-		at_state = &cs->at_state;
-		for (i = 0; i < cs->channels; ++i) {
-			bcs = cs->bcs + i;
-			if (bcs->at_state.pending_commands ||
-			    bcs->at_state.cid > 0) {
-				sequence = SEQ_NONE;
-				break;
-			}
-		}
-	}
-	spin_unlock_irqrestore(&cs->lock, flags);
-	cs->at_state.pending_commands &= ~PC_UMMODE;
-	if (sequence != SEQ_NONE) {
-		schedule_sequence(cs, at_state, sequence);
-		return;
-	}
-
-	for (i = 0; i < cs->channels; ++i) {
-		bcs = cs->bcs + i;
-		if (bcs->at_state.pending_commands & PC_HUP) {
-			if (cs->dle) {
-				cs->curchannel = bcs->channel;
-				schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
-				return;
-			}
-			bcs->at_state.pending_commands &= ~PC_HUP;
-			if (bcs->at_state.pending_commands & PC_CID) {
-				/* not yet dialing: PC_NOCID is sufficient */
-				bcs->at_state.pending_commands |= PC_NOCID;
-				bcs->at_state.pending_commands &= ~PC_CID;
-			} else {
-				schedule_sequence(cs, &bcs->at_state, SEQ_HUP);
-				return;
-			}
-		}
-		if (bcs->at_state.pending_commands & PC_NOCID) {
-			bcs->at_state.pending_commands &= ~PC_NOCID;
-			cs->curchannel = bcs->channel;
-			schedule_sequence(cs, &cs->at_state, SEQ_NOCID);
-			return;
-		} else if (bcs->at_state.pending_commands & PC_DLE0) {
-			bcs->at_state.pending_commands &= ~PC_DLE0;
-			cs->curchannel = bcs->channel;
-			schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
-			return;
-		}
-	}
-
-	list_for_each_entry(at_state, &cs->temp_at_states, list)
-		if (at_state->pending_commands & PC_HUP) {
-			at_state->pending_commands &= ~PC_HUP;
-			schedule_sequence(cs, at_state, SEQ_HUP);
-			return;
-		}
-
-	if (cs->at_state.pending_commands & PC_INIT) {
-		cs->at_state.pending_commands &= ~PC_INIT;
-		cs->dle = 0;
-		cs->inbuf->inputstate = INS_command;
-		schedule_sequence(cs, &cs->at_state, SEQ_INIT);
-		return;
-	}
-	if (cs->at_state.pending_commands & PC_SHUTDOWN) {
-		cs->at_state.pending_commands &= ~PC_SHUTDOWN;
-		schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN);
-		return;
-	}
-	if (cs->at_state.pending_commands & PC_CIDMODE) {
-		cs->at_state.pending_commands &= ~PC_CIDMODE;
-		if (cs->mode == M_UNIMODEM) {
-			cs->retry_count = 1;
-			schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
-			return;
-		}
-	}
-
-	for (i = 0; i < cs->channels; ++i) {
-		bcs = cs->bcs + i;
-		if (bcs->at_state.pending_commands & PC_DLE1) {
-			bcs->at_state.pending_commands &= ~PC_DLE1;
-			cs->curchannel = bcs->channel;
-			schedule_sequence(cs, &cs->at_state, SEQ_DLE1);
-			return;
-		}
-		if (bcs->at_state.pending_commands & PC_ACCEPT) {
-			bcs->at_state.pending_commands &= ~PC_ACCEPT;
-			schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT);
-			return;
-		}
-		if (bcs->at_state.pending_commands & PC_DIAL) {
-			bcs->at_state.pending_commands &= ~PC_DIAL;
-			schedule_sequence(cs, &bcs->at_state, SEQ_DIAL);
-			return;
-		}
-		if (bcs->at_state.pending_commands & PC_CID) {
-			switch (cs->mode) {
-			case M_UNIMODEM:
-				cs->at_state.pending_commands |= PC_CIDMODE;
-				gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
-				cs->commands_pending = 1;
-				return;
-			case M_UNKNOWN:
-				schedule_init(cs, MS_INIT);
-				return;
-			}
-			bcs->at_state.pending_commands &= ~PC_CID;
-			cs->curchannel = bcs->channel;
-			cs->retry_count = 2;
-			schedule_sequence(cs, &cs->at_state, SEQ_CID);
-			return;
-		}
-	}
-}
-
-static void process_events(struct cardstate *cs)
-{
-	struct event_t *ev;
-	unsigned head, tail;
-	int i;
-	int check_flags = 0;
-	int was_busy;
-	unsigned long flags;
-
-	spin_lock_irqsave(&cs->ev_lock, flags);
-	head = cs->ev_head;
-
-	for (i = 0; i < 2 * MAX_EVENTS; ++i) {
-		tail = cs->ev_tail;
-		if (tail == head) {
-			if (!check_flags && !cs->commands_pending)
-				break;
-			check_flags = 0;
-			spin_unlock_irqrestore(&cs->ev_lock, flags);
-			process_command_flags(cs);
-			spin_lock_irqsave(&cs->ev_lock, flags);
-			tail = cs->ev_tail;
-			if (tail == head) {
-				if (!cs->commands_pending)
-					break;
-				continue;
-			}
-		}
-
-		ev = cs->events + head;
-		was_busy = cs->cur_at_seq != SEQ_NONE;
-		spin_unlock_irqrestore(&cs->ev_lock, flags);
-		process_event(cs, ev);
-		spin_lock_irqsave(&cs->ev_lock, flags);
-		kfree(ev->ptr);
-		ev->ptr = NULL;
-		if (was_busy && cs->cur_at_seq == SEQ_NONE)
-			check_flags = 1;
-
-		head = (head + 1) % MAX_EVENTS;
-		cs->ev_head = head;
-	}
-
-	spin_unlock_irqrestore(&cs->ev_lock, flags);
-
-	if (i == 2 * MAX_EVENTS) {
-		dev_err(cs->dev,
-			"infinite loop in process_events; aborting.\n");
-	}
-}
-
-/* tasklet scheduled on any event received from the Gigaset device
- * parameter:
- *	data	ISDN controller state structure
- */
-void gigaset_handle_event(unsigned long data)
-{
-	struct cardstate *cs = (struct cardstate *) data;
-
-	/* handle incoming data on control/common channel */
-	if (cs->inbuf->head != cs->inbuf->tail) {
-		gig_dbg(DEBUG_INTR, "processing new data");
-		cs->ops->handle_input(cs->inbuf);
-	}
-
-	process_events(cs);
-}
diff --git a/drivers/staging/isdn/gigaset/gigaset.h b/drivers/staging/isdn/gigaset/gigaset.h
deleted file mode 100644
index 0ecc2b5..0000000
--- a/drivers/staging/isdn/gigaset/gigaset.h
+++ /dev/null
@@ -1,827 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Siemens Gigaset 307x driver
- * Common header file for all connection variants
- *
- * Written by Stefan Eilers
- *        and Hansjoerg Lipp <hjlipp@web.de>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#ifndef GIGASET_H
-#define GIGASET_H
-
-/* define global prefix for pr_ macros in linux/kernel.h */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/ppp_defs.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/list.h>
-#include <linux/atomic.h>
-
-#define GIG_VERSION {0, 5, 0, 0}
-#define GIG_COMPAT  {0, 4, 0, 0}
-
-#define MAX_REC_PARAMS 10	/* Max. number of params in response string */
-#define MAX_RESP_SIZE 511	/* Max. size of a response string */
-
-#define MAX_EVENTS 64		/* size of event queue */
-
-#define RBUFSIZE 8192
-
-#define GIG_TICK 100		/* in milliseconds */
-
-/* timeout values (unit: 1 sec) */
-#define INIT_TIMEOUT 1
-
-/* timeout values (unit: 0.1 sec) */
-#define RING_TIMEOUT 3		/* for additional parameters to RING */
-#define BAS_TIMEOUT 20		/* for response to Base USB ops */
-#define ATRDY_TIMEOUT 3		/* for HD_READY_SEND_ATDATA */
-
-#define BAS_RETRY 3		/* max. retries for base USB ops */
-
-#define MAXACT 3
-
-extern int gigaset_debuglevel;	/* "needs" cast to (enum debuglevel) */
-
-/* debug flags, combine by adding/bitwise OR */
-enum debuglevel {
-	DEBUG_INTR	  = 0x00008, /* interrupt processing */
-	DEBUG_CMD	  = 0x00020, /* sent/received LL commands */
-	DEBUG_STREAM	  = 0x00040, /* application data stream I/O events */
-	DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
-	DEBUG_LLDATA	  = 0x00100, /* sent/received LL data */
-	DEBUG_EVENT	  = 0x00200, /* event processing */
-	DEBUG_HDLC	  = 0x00800, /* M10x HDLC processing */
-	DEBUG_CHANNEL	  = 0x01000, /* channel allocation/deallocation */
-	DEBUG_TRANSCMD	  = 0x02000, /* AT-COMMANDS+RESPONSES */
-	DEBUG_MCMD	  = 0x04000, /* COMMANDS THAT ARE SENT VERY OFTEN */
-	DEBUG_INIT	  = 0x08000, /* (de)allocation+initialization of data
-					structures */
-	DEBUG_SUSPEND	  = 0x10000, /* suspend/resume processing */
-	DEBUG_OUTPUT	  = 0x20000, /* output to device */
-	DEBUG_ISO	  = 0x40000, /* isochronous transfers */
-	DEBUG_IF	  = 0x80000, /* character device operations */
-	DEBUG_USBREQ	  = 0x100000, /* USB communication (except payload
-					 data) */
-	DEBUG_LOCKCMD	  = 0x200000, /* AT commands and responses when
-					 MS_LOCKED */
-
-	DEBUG_ANY	  = 0x3fffff, /* print message if any of the others is
-					 activated */
-};
-
-#ifdef CONFIG_GIGASET_DEBUG
-
-#define gig_dbg(level, format, arg...)					\
-	do {								\
-		if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \
-			printk(KERN_DEBUG KBUILD_MODNAME ": " format "\n", \
-			       ## arg);					\
-	} while (0)
-#define DEBUG_DEFAULT (DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ)
-
-#else
-
-#define gig_dbg(level, format, arg...) do {} while (0)
-#define DEBUG_DEFAULT 0
-
-#endif
-
-void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
-			size_t len, const unsigned char *buf);
-
-/* connection state */
-#define ZSAU_NONE			0
-#define ZSAU_PROCEEDING			1
-#define ZSAU_CALL_DELIVERED		2
-#define ZSAU_ACTIVE			3
-#define ZSAU_DISCONNECT_IND		4
-#define ZSAU_NULL			5
-#define ZSAU_DISCONNECT_REQ		6
-#define ZSAU_UNKNOWN			-1
-
-/* USB control transfer requests */
-#define OUT_VENDOR_REQ	(USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
-#define IN_VENDOR_REQ	(USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
-
-/* interrupt pipe messages */
-#define HD_B1_FLOW_CONTROL		0x80
-#define HD_B2_FLOW_CONTROL		0x81
-#define HD_RECEIVEATDATA_ACK		(0x35)		/* 3070 */
-#define HD_READY_SEND_ATDATA		(0x36)		/* 3070 */
-#define HD_OPEN_ATCHANNEL_ACK		(0x37)		/* 3070 */
-#define HD_CLOSE_ATCHANNEL_ACK		(0x38)		/* 3070 */
-#define HD_DEVICE_INIT_OK		(0x11)		/* ISurf USB + 3070 */
-#define HD_OPEN_B1CHANNEL_ACK		(0x51)		/* ISurf USB + 3070 */
-#define HD_OPEN_B2CHANNEL_ACK		(0x52)		/* ISurf USB + 3070 */
-#define HD_CLOSE_B1CHANNEL_ACK		(0x53)		/* ISurf USB + 3070 */
-#define HD_CLOSE_B2CHANNEL_ACK		(0x54)		/* ISurf USB + 3070 */
-#define HD_SUSPEND_END			(0x61)		/* ISurf USB */
-#define HD_RESET_INTERRUPT_PIPE_ACK	(0xFF)		/* ISurf USB + 3070 */
-
-/* control requests */
-#define	HD_OPEN_B1CHANNEL		(0x23)		/* ISurf USB + 3070 */
-#define	HD_CLOSE_B1CHANNEL		(0x24)		/* ISurf USB + 3070 */
-#define	HD_OPEN_B2CHANNEL		(0x25)		/* ISurf USB + 3070 */
-#define	HD_CLOSE_B2CHANNEL		(0x26)		/* ISurf USB + 3070 */
-#define HD_RESET_INTERRUPT_PIPE		(0x27)		/* ISurf USB + 3070 */
-#define	HD_DEVICE_INIT_ACK		(0x34)		/* ISurf USB + 3070 */
-#define	HD_WRITE_ATMESSAGE		(0x12)		/* 3070 */
-#define	HD_READ_ATMESSAGE		(0x13)		/* 3070 */
-#define	HD_OPEN_ATCHANNEL		(0x28)		/* 3070 */
-#define	HD_CLOSE_ATCHANNEL		(0x29)		/* 3070 */
-
-/* number of B channels supported by base driver */
-#define BAS_CHANNELS	2
-
-/* USB frames for isochronous transfer */
-#define BAS_FRAMETIME	1	/* number of milliseconds between frames */
-#define BAS_NUMFRAMES	8	/* number of frames per URB */
-#define BAS_MAXFRAME	16	/* allocated bytes per frame */
-#define BAS_NORMFRAME	8	/* send size without flow control */
-#define BAS_HIGHFRAME	10	/* "    "    with positive flow control */
-#define BAS_LOWFRAME	5	/* "    "    with negative flow control */
-#define BAS_CORRFRAMES	4	/* flow control multiplicator */
-
-#define BAS_INBUFSIZE	(BAS_MAXFRAME * BAS_NUMFRAMES)	/* size of isoc in buf
-							 * per URB */
-#define BAS_OUTBUFSIZE	4096		/* size of common isoc out buffer */
-#define BAS_OUTBUFPAD	BAS_MAXFRAME	/* size of pad area for isoc out buf */
-
-#define BAS_INURBS	3
-#define BAS_OUTURBS	3
-
-/* variable commands in struct bc_state */
-#define AT_ISO		0
-#define AT_DIAL		1
-#define AT_MSN		2
-#define AT_BC		3
-#define AT_PROTO	4
-#define AT_TYPE		5
-#define AT_CLIP		6
-/* total number */
-#define AT_NUM		7
-
-/* variables in struct at_state_t */
-/* - numeric */
-#define VAR_ZSAU	0
-#define VAR_ZDLE	1
-#define VAR_ZCTP	2
-/* total number */
-#define VAR_NUM		3
-/* - string */
-#define STR_NMBR	0
-#define STR_ZCPN	1
-#define STR_ZCON	2
-#define STR_ZBC		3
-#define STR_ZHLC	4
-/* total number */
-#define STR_NUM		5
-
-/* event types */
-#define EV_TIMEOUT	-105
-#define EV_IF_VER	-106
-#define EV_PROC_CIDMODE	-107
-#define EV_SHUTDOWN	-108
-#define EV_START	-110
-#define EV_STOP		-111
-#define EV_IF_LOCK	-112
-#define EV_ACCEPT	-114
-#define EV_DIAL		-115
-#define EV_HUP		-116
-#define EV_BC_OPEN	-117
-#define EV_BC_CLOSED	-118
-
-/* input state */
-#define INS_command	0x0001	/* receiving messages (not payload data) */
-#define INS_DLE_char	0x0002	/* DLE flag received (in DLE mode) */
-#define INS_byte_stuff	0x0004
-#define INS_have_data	0x0008
-#define INS_DLE_command	0x0020	/* DLE message start (<DLE> X) received */
-#define INS_flag_hunt	0x0040
-
-/* channel state */
-#define CHS_D_UP	0x01
-#define CHS_B_UP	0x02
-#define CHS_NOTIFY_LL	0x04
-
-#define ICALL_REJECT	0
-#define ICALL_ACCEPT	1
-#define ICALL_IGNORE	2
-
-/* device state */
-#define MS_UNINITIALIZED	0
-#define MS_INIT			1
-#define MS_LOCKED		2
-#define MS_SHUTDOWN		3
-#define MS_RECOVER		4
-#define MS_READY		5
-
-/* mode */
-#define M_UNKNOWN	0
-#define M_CONFIG	1
-#define M_UNIMODEM	2
-#define M_CID		3
-
-/* start mode */
-#define SM_LOCKED	0
-#define SM_ISDN		1 /* default */
-
-/* layer 2 protocols (AT^SBPR=...) */
-#define L2_BITSYNC	0
-#define L2_HDLC		1
-#define L2_VOICE	2
-
-struct gigaset_ops;
-struct gigaset_driver;
-
-struct usb_cardstate;
-struct ser_cardstate;
-struct bas_cardstate;
-
-struct bc_state;
-struct usb_bc_state;
-struct ser_bc_state;
-struct bas_bc_state;
-
-struct reply_t {
-	int	resp_code;	/* RSP_XXXX */
-	int	min_ConState;	/* <0 => ignore */
-	int	max_ConState;	/* <0 => ignore */
-	int	parameter;	/* e.g. ZSAU_XXXX <0: ignore*/
-	int	new_ConState;	/* <0 => ignore */
-	int	timeout;	/* >0 => *HZ; <=0 => TOUT_XXXX*/
-	int	action[MAXACT];	/* ACT_XXXX */
-	char	*command;	/* NULL==none */
-};
-
-extern struct reply_t gigaset_tab_cid[];
-extern struct reply_t gigaset_tab_nocid[];
-
-struct inbuf_t {
-	struct cardstate	*cs;
-	int			inputstate;
-	int			head, tail;
-	unsigned char		data[RBUFSIZE];
-};
-
-/* isochronous write buffer structure
- * circular buffer with pad area for extraction of complete USB frames
- * - data[read..nextread-1] is valid data already submitted to the USB subsystem
- * - data[nextread..write-1] is valid data yet to be sent
- * - data[write] is the next byte to write to
- *   - in byte-oriented L2 procotols, it is completely free
- *   - in bit-oriented L2 procotols, it may contain a partial byte of valid data
- * - data[write+1..read-1] is free
- * - wbits is the number of valid data bits in data[write], starting at the LSB
- * - writesem is the semaphore for writing to the buffer:
- *   if writesem <= 0, data[write..read-1] is currently being written to
- * - idle contains the byte value to repeat when the end of valid data is
- *   reached; if nextread==write (buffer contains no data to send), either the
- *   BAS_OUTBUFPAD bytes immediately before data[write] (if
- *   write>=BAS_OUTBUFPAD) or those of the pad area (if write<BAS_OUTBUFPAD)
- *   are also filled with that value
- */
-struct isowbuf_t {
-	int		read;
-	int		nextread;
-	int		write;
-	atomic_t	writesem;
-	int		wbits;
-	unsigned char	data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
-	unsigned char	idle;
-};
-
-/* isochronous write URB context structure
- * data to be stored along with the URB and retrieved when it is returned
- * as completed by the USB subsystem
- * - urb: pointer to the URB itself
- * - bcs: pointer to the B Channel control structure
- * - limit: end of write buffer area covered by this URB
- * - status: URB completion status
- */
-struct isow_urbctx_t {
-	struct urb *urb;
-	struct bc_state *bcs;
-	int limit;
-	int status;
-};
-
-/* AT state structure
- * data associated with the state of an ISDN connection, whether or not
- * it is currently assigned a B channel
- */
-struct at_state_t {
-	struct list_head	list;
-	int			waiting;
-	int			getstring;
-	unsigned		timer_index;
-	unsigned long		timer_expires;
-	int			timer_active;
-	unsigned int		ConState;	/* State of connection */
-	struct reply_t		*replystruct;
-	int			cid;
-	int			int_var[VAR_NUM];	/* see VAR_XXXX */
-	char			*str_var[STR_NUM];	/* see STR_XXXX */
-	unsigned		pending_commands;	/* see PC_XXXX */
-	unsigned		seq_index;
-
-	struct cardstate	*cs;
-	struct bc_state		*bcs;
-};
-
-struct event_t {
-	int type;
-	void *ptr, *arg;
-	int parameter;
-	int cid;
-	struct at_state_t *at_state;
-};
-
-/* This buffer holds all information about the used B-Channel */
-struct bc_state {
-	struct sk_buff *tx_skb;		/* Current transfer buffer to modem */
-	struct sk_buff_head squeue;	/* B-Channel send Queue */
-
-	/* Variables for debugging .. */
-	int corrupted;			/* Counter for corrupted packages */
-	int trans_down;			/* Counter of packages (downstream) */
-	int trans_up;			/* Counter of packages (upstream) */
-
-	struct at_state_t at_state;
-
-	/* receive buffer */
-	unsigned rx_bufsize;		/* max size accepted by application */
-	struct sk_buff *rx_skb;
-	__u16 rx_fcs;
-	int inputstate;			/* see INS_XXXX */
-
-	int channel;
-
-	struct cardstate *cs;
-
-	unsigned chstate;		/* bitmap (CHS_*) */
-	int ignore;
-	unsigned proto2;		/* layer 2 protocol (L2_*) */
-	char *commands[AT_NUM];		/* see AT_XXXX */
-
-#ifdef CONFIG_GIGASET_DEBUG
-	int emptycount;
-#endif
-	int busy;
-	int use_count;
-
-	/* private data of hardware drivers */
-	union {
-		struct ser_bc_state *ser;	/* serial hardware driver */
-		struct usb_bc_state *usb;	/* usb hardware driver (m105) */
-		struct bas_bc_state *bas;	/* usb hardware driver (base) */
-	} hw;
-
-	void *ap;			/* associated LL application */
-	int apconnstate;		/* LL application connection state */
-	spinlock_t aplock;
-};
-
-struct cardstate {
-	struct gigaset_driver *driver;
-	unsigned minor_index;
-	struct device *dev;
-	struct device *tty_dev;
-	unsigned flags;
-
-	const struct gigaset_ops *ops;
-
-	/* Stuff to handle communication */
-	wait_queue_head_t waitqueue;
-	int waiting;
-	int mode;			/* see M_XXXX */
-	int mstate;			/* Modem state: see MS_XXXX */
-					/* only changed by the event layer */
-	int cmd_result;
-
-	int channels;
-	struct bc_state *bcs;		/* Array of struct bc_state */
-
-	int onechannel;			/* data and commands transmitted in one
-					   stream (M10x) */
-
-	spinlock_t lock;
-	struct at_state_t at_state;	/* at_state_t for cid == 0 */
-	struct list_head temp_at_states;/* list of temporary "struct
-					   at_state_t"s without B channel */
-
-	struct inbuf_t *inbuf;
-
-	struct cmdbuf_t *cmdbuf, *lastcmdbuf;
-	spinlock_t cmdlock;
-	unsigned curlen, cmdbytes;
-
-	struct tty_port port;
-	struct tasklet_struct if_wake_tasklet;
-	unsigned control_state;
-
-	unsigned fwver[4];
-	int gotfwver;
-
-	unsigned running;		/* !=0 if events are handled */
-	unsigned connected;		/* !=0 if hardware is connected */
-	unsigned isdn_up;		/* !=0 after gigaset_isdn_start() */
-
-	unsigned cidmode;
-
-	int myid;			/* id for communication with LL */
-	void *iif;			/* LL interface structure */
-	unsigned short hw_hdr_len;	/* headroom needed in data skbs */
-
-	struct reply_t *tabnocid;
-	struct reply_t *tabcid;
-	int cs_init;
-	int ignoreframes;		/* frames to ignore after setting up the
-					   B channel */
-	struct mutex mutex;		/* locks this structure:
-					 *   connected is not changed,
-					 *   hardware_up is not changed,
-					 *   MState is not changed to or from
-					 *   MS_LOCKED */
-
-	struct timer_list timer;
-	int retry_count;
-	int dle;			/* !=0 if DLE mode is active
-					   (ZDLE=1 received -- M10x only) */
-	int cur_at_seq;			/* sequence of AT commands being
-					   processed */
-	int curchannel;			/* channel those commands are meant
-					   for */
-	int commands_pending;		/* flag(s) in xxx.commands_pending have
-					   been set */
-	struct tasklet_struct
-		event_tasklet;		/* tasklet for serializing AT commands.
-					 * Scheduled
-					 *   -> for modem reponses (and
-					 *      incoming data for M10x)
-					 *   -> on timeout
-					 *   -> after setting bits in
-					 *      xxx.at_state.pending_command
-					 *      (e.g. command from LL) */
-	struct tasklet_struct
-		write_tasklet;		/* tasklet for serial output
-					 * (not used in base driver) */
-
-	/* event queue */
-	struct event_t events[MAX_EVENTS];
-	unsigned ev_tail, ev_head;
-	spinlock_t ev_lock;
-
-	/* current modem response */
-	unsigned char respdata[MAX_RESP_SIZE + 1];
-	unsigned cbytes;
-
-	/* private data of hardware drivers */
-	union {
-		struct usb_cardstate *usb; /* USB hardware driver (m105) */
-		struct ser_cardstate *ser; /* serial hardware driver */
-		struct bas_cardstate *bas; /* USB hardware driver (base) */
-	} hw;
-};
-
-struct gigaset_driver {
-	struct list_head list;
-	spinlock_t lock;		/* locks minor tables and blocked */
-	struct tty_driver *tty;
-	unsigned have_tty;
-	unsigned minor;
-	unsigned minors;
-	struct cardstate *cs;
-	int blocked;
-
-	const struct gigaset_ops *ops;
-	struct module *owner;
-};
-
-struct cmdbuf_t {
-	struct cmdbuf_t *next, *prev;
-	int len, offset;
-	struct tasklet_struct *wake_tasklet;
-	unsigned char buf[0];
-};
-
-struct bas_bc_state {
-	/* isochronous output state */
-	int		running;
-	atomic_t	corrbytes;
-	spinlock_t	isooutlock;
-	struct isow_urbctx_t	isoouturbs[BAS_OUTURBS];
-	struct isow_urbctx_t	*isooutdone, *isooutfree, *isooutovfl;
-	struct isowbuf_t	*isooutbuf;
-	unsigned numsub;		/* submitted URB counter
-					   (for diagnostic messages only) */
-	struct tasklet_struct	sent_tasklet;
-
-	/* isochronous input state */
-	spinlock_t isoinlock;
-	struct urb *isoinurbs[BAS_INURBS];
-	unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
-	struct urb *isoindone;		/* completed isoc read URB */
-	int isoinstatus;		/* status of completed URB */
-	int loststatus;			/* status of dropped URB */
-	unsigned isoinlost;		/* number of bytes lost */
-	/* state of bit unstuffing algorithm
-	   (in addition to BC_state.inputstate) */
-	unsigned seqlen;		/* number of '1' bits not yet
-					   unstuffed */
-	unsigned inbyte, inbits;	/* collected bits for next byte */
-	/* statistics */
-	unsigned goodbytes;		/* bytes correctly received */
-	unsigned alignerrs;		/* frames with incomplete byte at end */
-	unsigned fcserrs;		/* FCS errors */
-	unsigned frameerrs;		/* framing errors */
-	unsigned giants;		/* long frames */
-	unsigned runts;			/* short frames */
-	unsigned aborts;		/* HDLC aborts */
-	unsigned shared0s;		/* '0' bits shared between flags */
-	unsigned stolen0s;		/* '0' stuff bits also serving as
-					   leading flag bits */
-	struct tasklet_struct rcvd_tasklet;
-};
-
-struct gigaset_ops {
-	/* Called from ev-layer.c/interface.c for sending AT commands to the
-	   device */
-	int (*write_cmd)(struct cardstate *cs, struct cmdbuf_t *cb);
-
-	/* Called from interface.c for additional device control */
-	int (*write_room)(struct cardstate *cs);
-	int (*chars_in_buffer)(struct cardstate *cs);
-	int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]);
-
-	/* Called from ev-layer.c after setting up connection
-	 * Should call gigaset_bchannel_up(), when finished. */
-	int (*init_bchannel)(struct bc_state *bcs);
-
-	/* Called from ev-layer.c after hanging up
-	 * Should call gigaset_bchannel_down(), when finished. */
-	int (*close_bchannel)(struct bc_state *bcs);
-
-	/* Called by gigaset_initcs() for setting up bcs->hw.xxx */
-	int (*initbcshw)(struct bc_state *bcs);
-
-	/* Called by gigaset_freecs() for freeing bcs->hw.xxx */
-	void (*freebcshw)(struct bc_state *bcs);
-
-	/* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */
-	void (*reinitbcshw)(struct bc_state *bcs);
-
-	/* Called by gigaset_initcs() for setting up cs->hw.xxx */
-	int (*initcshw)(struct cardstate *cs);
-
-	/* Called by gigaset_freecs() for freeing cs->hw.xxx */
-	void (*freecshw)(struct cardstate *cs);
-
-	/* Called from common.c/interface.c for additional serial port
-	   control */
-	int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state,
-			      unsigned new_state);
-	int (*baud_rate)(struct cardstate *cs, unsigned cflag);
-	int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
-
-	/* Called from LL interface to put an skb into the send-queue.
-	 * After sending is completed, gigaset_skb_sent() must be called
-	 * with the skb's link layer header preserved. */
-	int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
-
-	/* Called from ev-layer.c to process a block of data
-	 * received through the common/control channel. */
-	void (*handle_input)(struct inbuf_t *inbuf);
-
-};
-
-/* = Common structures and definitions =======================================
- */
-
-/* Parser states for DLE-Event:
- * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
- * <DLE_FLAG>:  0x10
- * <EVENT>:     ((a-z)* | (A-Z)* | (0-10)*)+
- */
-#define DLE_FLAG	0x10
-
-/* ===========================================================================
- *  Functions implemented in asyncdata.c
- */
-
-/* Called from LL interface to put an skb into the send queue. */
-int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
-
-/* Called from ev-layer.c to process a block of data
- * received through the common/control channel. */
-void gigaset_m10x_input(struct inbuf_t *inbuf);
-
-/* ===========================================================================
- *  Functions implemented in isocdata.c
- */
-
-/* Called from LL interface to put an skb into the send queue. */
-int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
-
-/* Called from ev-layer.c to process a block of data
- * received through the common/control channel. */
-void gigaset_isoc_input(struct inbuf_t *inbuf);
-
-/* Called from bas-gigaset.c to process a block of data
- * received through the isochronous channel */
-void gigaset_isoc_receive(unsigned char *src, unsigned count,
-			  struct bc_state *bcs);
-
-/* Called from bas-gigaset.c to put a block of data
- * into the isochronous output buffer */
-int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len);
-
-/* Called from bas-gigaset.c to initialize the isochronous output buffer */
-void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
-
-/* Called from bas-gigaset.c to retrieve a block of bytes for sending */
-int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
-
-/* ===========================================================================
- *  Functions implemented in LL interface
- */
-
-/* Called from common.c for setting up/shutting down with the ISDN subsystem */
-void gigaset_isdn_regdrv(void);
-void gigaset_isdn_unregdrv(void);
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid);
-void gigaset_isdn_unregdev(struct cardstate *cs);
-
-/* Called from hardware module to indicate completion of an skb */
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb);
-void gigaset_isdn_rcv_err(struct bc_state *bcs);
-
-/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
-void gigaset_isdn_start(struct cardstate *cs);
-void gigaset_isdn_stop(struct cardstate *cs);
-int gigaset_isdn_icall(struct at_state_t *at_state);
-void gigaset_isdn_connD(struct bc_state *bcs);
-void gigaset_isdn_hupD(struct bc_state *bcs);
-void gigaset_isdn_connB(struct bc_state *bcs);
-void gigaset_isdn_hupB(struct bc_state *bcs);
-
-/* ===========================================================================
- *  Functions implemented in ev-layer.c
- */
-
-/* tasklet called from common.c to process queued events */
-void gigaset_handle_event(unsigned long data);
-
-/* called from isocdata.c / asyncdata.c
- * when a complete modem response line has been received */
-void gigaset_handle_modem_response(struct cardstate *cs);
-
-/* ===========================================================================
- *  Functions implemented in proc.c
- */
-
-/* initialize sysfs for device */
-void gigaset_init_dev_sysfs(struct cardstate *cs);
-void gigaset_free_dev_sysfs(struct cardstate *cs);
-
-/* ===========================================================================
- *  Functions implemented in common.c/gigaset.h
- */
-
-void gigaset_bcs_reinit(struct bc_state *bcs);
-void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
-		     struct cardstate *cs, int cid);
-int gigaset_get_channel(struct bc_state *bcs);
-struct bc_state *gigaset_get_free_channel(struct cardstate *cs);
-void gigaset_free_channel(struct bc_state *bcs);
-int gigaset_get_channels(struct cardstate *cs);
-void gigaset_free_channels(struct cardstate *cs);
-void gigaset_block_channels(struct cardstate *cs);
-
-/* Allocate and initialize driver structure. */
-struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
-					  const char *procname,
-					  const char *devname,
-					  const struct gigaset_ops *ops,
-					  struct module *owner);
-
-/* Deallocate driver structure. */
-void gigaset_freedriver(struct gigaset_driver *drv);
-
-struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
-struct cardstate *gigaset_get_cs_by_id(int id);
-void gigaset_blockdriver(struct gigaset_driver *drv);
-
-/* Allocate and initialize card state. Calls hardware dependent
-   gigaset_init[b]cs(). */
-struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
-				 int onechannel, int ignoreframes,
-				 int cidmode, const char *modulename);
-
-/* Free card state. Calls hardware dependent gigaset_free[b]cs(). */
-void gigaset_freecs(struct cardstate *cs);
-
-/* Tell common.c that hardware and driver are ready. */
-int gigaset_start(struct cardstate *cs);
-
-/* Tell common.c that the device is not present any more. */
-void gigaset_stop(struct cardstate *cs);
-
-/* Tell common.c that the driver is being unloaded. */
-int gigaset_shutdown(struct cardstate *cs);
-
-/* Append event to the queue.
- * Returns NULL on failure or a pointer to the event on success.
- * ptr must be kmalloc()ed (and not be freed by the caller).
- */
-struct event_t *gigaset_add_event(struct cardstate *cs,
-				  struct at_state_t *at_state, int type,
-				  void *ptr, int parameter, void *arg);
-
-/* Called on CONFIG1 command from frontend. */
-int gigaset_enterconfigmode(struct cardstate *cs);
-
-/* cs->lock must not be locked */
-static inline void gigaset_schedule_event(struct cardstate *cs)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&cs->lock, flags);
-	if (cs->running)
-		tasklet_schedule(&cs->event_tasklet);
-	spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-/* Tell common.c that B channel has been closed. */
-/* cs->lock must not be locked */
-static inline void gigaset_bchannel_down(struct bc_state *bcs)
-{
-	gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL);
-	gigaset_schedule_event(bcs->cs);
-}
-
-/* Tell common.c that B channel has been opened. */
-/* cs->lock must not be locked */
-static inline void gigaset_bchannel_up(struct bc_state *bcs)
-{
-	gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL);
-	gigaset_schedule_event(bcs->cs);
-}
-
-/* set up next receive skb for data mode */
-static inline struct sk_buff *gigaset_new_rx_skb(struct bc_state *bcs)
-{
-	struct cardstate *cs = bcs->cs;
-	unsigned short hw_hdr_len = cs->hw_hdr_len;
-
-	if (bcs->ignore) {
-		bcs->rx_skb = NULL;
-	} else {
-		bcs->rx_skb = dev_alloc_skb(bcs->rx_bufsize + hw_hdr_len);
-		if (bcs->rx_skb == NULL)
-			dev_warn(cs->dev, "could not allocate skb\n");
-		else
-			skb_reserve(bcs->rx_skb, hw_hdr_len);
-	}
-	return bcs->rx_skb;
-}
-
-/* append received bytes to inbuf */
-int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
-		       unsigned numbytes);
-
-/* ===========================================================================
- *  Functions implemented in interface.c
- */
-
-/* initialize interface */
-void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
-			   const char *devname);
-/* release interface */
-void gigaset_if_freedriver(struct gigaset_driver *drv);
-/* add minor */
-void gigaset_if_init(struct cardstate *cs);
-/* remove minor */
-void gigaset_if_free(struct cardstate *cs);
-/* device received data */
-void gigaset_if_receive(struct cardstate *cs,
-			unsigned char *buffer, size_t len);
-
-#endif
diff --git a/drivers/staging/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c
deleted file mode 100644
index 9ddadd0..0000000
--- a/drivers/staging/isdn/gigaset/interface.c
+++ /dev/null
@@ -1,613 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * interface to user space for the gigaset driver
- *
- * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/gigaset_dev.h>
-#include <linux/tty_flip.h>
-#include <linux/module.h>
-
-/*** our ioctls ***/
-
-static int if_lock(struct cardstate *cs, int *arg)
-{
-	int cmd = *arg;
-
-	gig_dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd);
-
-	if (cmd > 1)
-		return -EINVAL;
-
-	if (cmd < 0) {
-		*arg = cs->mstate == MS_LOCKED;
-		return 0;
-	}
-
-	if (!cmd && cs->mstate == MS_LOCKED && cs->connected) {
-		cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS);
-		cs->ops->baud_rate(cs, B115200);
-		cs->ops->set_line_ctrl(cs, CS8);
-		cs->control_state = TIOCM_DTR | TIOCM_RTS;
-	}
-
-	cs->waiting = 1;
-	if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK,
-			       NULL, cmd, NULL)) {
-		cs->waiting = 0;
-		return -ENOMEM;
-	}
-	gigaset_schedule_event(cs);
-
-	wait_event(cs->waitqueue, !cs->waiting);
-
-	if (cs->cmd_result >= 0) {
-		*arg = cs->cmd_result;
-		return 0;
-	}
-
-	return cs->cmd_result;
-}
-
-static int if_version(struct cardstate *cs, unsigned arg[4])
-{
-	static const unsigned version[4] = GIG_VERSION;
-	static const unsigned compat[4] = GIG_COMPAT;
-	unsigned cmd = arg[0];
-
-	gig_dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd);
-
-	switch (cmd) {
-	case GIGVER_DRIVER:
-		memcpy(arg, version, sizeof version);
-		return 0;
-	case GIGVER_COMPAT:
-		memcpy(arg, compat, sizeof compat);
-		return 0;
-	case GIGVER_FWBASE:
-		cs->waiting = 1;
-		if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER,
-				       NULL, 0, arg)) {
-			cs->waiting = 0;
-			return -ENOMEM;
-		}
-		gigaset_schedule_event(cs);
-
-		wait_event(cs->waitqueue, !cs->waiting);
-
-		if (cs->cmd_result >= 0)
-			return 0;
-
-		return cs->cmd_result;
-	default:
-		return -EINVAL;
-	}
-}
-
-static int if_config(struct cardstate *cs, int *arg)
-{
-	gig_dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg);
-
-	if (*arg != 1)
-		return -EINVAL;
-
-	if (cs->mstate != MS_LOCKED)
-		return -EBUSY;
-
-	if (!cs->connected) {
-		pr_err("%s: not connected\n", __func__);
-		return -ENODEV;
-	}
-
-	*arg = 0;
-	return gigaset_enterconfigmode(cs);
-}
-
-/*** the terminal driver ***/
-
-static int if_open(struct tty_struct *tty, struct file *filp)
-{
-	struct cardstate *cs;
-
-	gig_dbg(DEBUG_IF, "%d+%d: %s()",
-		tty->driver->minor_start, tty->index, __func__);
-
-	cs = gigaset_get_cs_by_tty(tty);
-	if (!cs || !try_module_get(cs->driver->owner))
-		return -ENODEV;
-
-	if (mutex_lock_interruptible(&cs->mutex)) {
-		module_put(cs->driver->owner);
-		return -ERESTARTSYS;
-	}
-	tty->driver_data = cs;
-
-	++cs->port.count;
-
-	if (cs->port.count == 1) {
-		tty_port_tty_set(&cs->port, tty);
-		cs->port.low_latency = 1;
-	}
-
-	mutex_unlock(&cs->mutex);
-	return 0;
-}
-
-static void if_close(struct tty_struct *tty, struct file *filp)
-{
-	struct cardstate *cs = tty->driver_data;
-
-	if (!cs) { /* happens if we didn't find cs in open */
-		gig_dbg(DEBUG_IF, "%s: no cardstate", __func__);
-		return;
-	}
-
-	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
-	mutex_lock(&cs->mutex);
-
-	if (!cs->connected)
-		gig_dbg(DEBUG_IF, "not connected");	/* nothing to do */
-	else if (!cs->port.count)
-		dev_warn(cs->dev, "%s: device not opened\n", __func__);
-	else if (!--cs->port.count)
-		tty_port_tty_set(&cs->port, NULL);
-
-	mutex_unlock(&cs->mutex);
-
-	module_put(cs->driver->owner);
-}
-
-static int if_ioctl(struct tty_struct *tty,
-		    unsigned int cmd, unsigned long arg)
-{
-	struct cardstate *cs = tty->driver_data;
-	int retval = -ENODEV;
-	int int_arg;
-	unsigned char buf[6];
-	unsigned version[4];
-
-	gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
-
-	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS;
-
-	if (!cs->connected) {
-		gig_dbg(DEBUG_IF, "not connected");
-		retval = -ENODEV;
-	} else {
-		retval = 0;
-		switch (cmd) {
-		case GIGASET_REDIR:
-			retval = get_user(int_arg, (int __user *) arg);
-			if (retval >= 0)
-				retval = if_lock(cs, &int_arg);
-			if (retval >= 0)
-				retval = put_user(int_arg, (int __user *) arg);
-			break;
-		case GIGASET_CONFIG:
-			retval = get_user(int_arg, (int __user *) arg);
-			if (retval >= 0)
-				retval = if_config(cs, &int_arg);
-			if (retval >= 0)
-				retval = put_user(int_arg, (int __user *) arg);
-			break;
-		case GIGASET_BRKCHARS:
-			retval = copy_from_user(&buf,
-						(const unsigned char __user *) arg, 6)
-				? -EFAULT : 0;
-			if (retval >= 0) {
-				gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
-						   6, buf);
-				retval = cs->ops->brkchars(cs, buf);
-			}
-			break;
-		case GIGASET_VERSION:
-			retval = copy_from_user(version,
-						(unsigned __user *) arg, sizeof version)
-				? -EFAULT : 0;
-			if (retval >= 0)
-				retval = if_version(cs, version);
-			if (retval >= 0)
-				retval = copy_to_user((unsigned __user *) arg,
-						      version, sizeof version)
-					? -EFAULT : 0;
-			break;
-		default:
-			gig_dbg(DEBUG_IF, "%s: arg not supported - 0x%04x",
-				__func__, cmd);
-			retval = -ENOIOCTLCMD;
-		}
-	}
-
-	mutex_unlock(&cs->mutex);
-
-	return retval;
-}
-
-#ifdef CONFIG_COMPAT
-static long if_compat_ioctl(struct tty_struct *tty,
-		    unsigned int cmd, unsigned long arg)
-{
-	return if_ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static int if_tiocmget(struct tty_struct *tty)
-{
-	struct cardstate *cs = tty->driver_data;
-	int retval;
-
-	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
-	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS;
-
-	retval = cs->control_state & (TIOCM_RTS | TIOCM_DTR);
-
-	mutex_unlock(&cs->mutex);
-
-	return retval;
-}
-
-static int if_tiocmset(struct tty_struct *tty,
-		       unsigned int set, unsigned int clear)
-{
-	struct cardstate *cs = tty->driver_data;
-	int retval;
-	unsigned mc;
-
-	gig_dbg(DEBUG_IF, "%u: %s(0x%x, 0x%x)",
-		cs->minor_index, __func__, set, clear);
-
-	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS;
-
-	if (!cs->connected) {
-		gig_dbg(DEBUG_IF, "not connected");
-		retval = -ENODEV;
-	} else {
-		mc = (cs->control_state | set) & ~clear & (TIOCM_RTS | TIOCM_DTR);
-		retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc);
-		cs->control_state = mc;
-	}
-
-	mutex_unlock(&cs->mutex);
-
-	return retval;
-}
-
-static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
-	struct cardstate *cs = tty->driver_data;
-	struct cmdbuf_t *cb;
-	int retval;
-
-	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
-	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS;
-
-	if (!cs->connected) {
-		gig_dbg(DEBUG_IF, "not connected");
-		retval = -ENODEV;
-		goto done;
-	}
-	if (cs->mstate != MS_LOCKED) {
-		dev_warn(cs->dev, "can't write to unlocked device\n");
-		retval = -EBUSY;
-		goto done;
-	}
-	if (count <= 0) {
-		/* nothing to do */
-		retval = 0;
-		goto done;
-	}
-
-	cb = kmalloc(sizeof(struct cmdbuf_t) + count, GFP_KERNEL);
-	if (!cb) {
-		dev_err(cs->dev, "%s: out of memory\n", __func__);
-		retval = -ENOMEM;
-		goto done;
-	}
-
-	memcpy(cb->buf, buf, count);
-	cb->len = count;
-	cb->offset = 0;
-	cb->next = NULL;
-	cb->wake_tasklet = &cs->if_wake_tasklet;
-	retval = cs->ops->write_cmd(cs, cb);
-done:
-	mutex_unlock(&cs->mutex);
-	return retval;
-}
-
-static int if_write_room(struct tty_struct *tty)
-{
-	struct cardstate *cs = tty->driver_data;
-	int retval;
-
-	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
-	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS;
-
-	if (!cs->connected) {
-		gig_dbg(DEBUG_IF, "not connected");
-		retval = -ENODEV;
-	} else if (cs->mstate != MS_LOCKED) {
-		dev_warn(cs->dev, "can't write to unlocked device\n");
-		retval = -EBUSY;
-	} else
-		retval = cs->ops->write_room(cs);
-
-	mutex_unlock(&cs->mutex);
-
-	return retval;
-}
-
-static int if_chars_in_buffer(struct tty_struct *tty)
-{
-	struct cardstate *cs = tty->driver_data;
-	int retval = 0;
-
-	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
-	mutex_lock(&cs->mutex);
-
-	if (!cs->connected)
-		gig_dbg(DEBUG_IF, "not connected");
-	else if (cs->mstate != MS_LOCKED)
-		dev_warn(cs->dev, "can't write to unlocked device\n");
-	else
-		retval = cs->ops->chars_in_buffer(cs);
-
-	mutex_unlock(&cs->mutex);
-
-	return retval;
-}
-
-static void if_throttle(struct tty_struct *tty)
-{
-	struct cardstate *cs = tty->driver_data;
-
-	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
-	mutex_lock(&cs->mutex);
-
-	if (!cs->connected)
-		gig_dbg(DEBUG_IF, "not connected");	/* nothing to do */
-	else
-		gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
-
-	mutex_unlock(&cs->mutex);
-}
-
-static void if_unthrottle(struct tty_struct *tty)
-{
-	struct cardstate *cs = tty->driver_data;
-
-	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
-	mutex_lock(&cs->mutex);
-
-	if (!cs->connected)
-		gig_dbg(DEBUG_IF, "not connected");	/* nothing to do */
-	else
-		gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
-
-	mutex_unlock(&cs->mutex);
-}
-
-static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
-{
-	struct cardstate *cs = tty->driver_data;
-	unsigned int iflag;
-	unsigned int cflag;
-	unsigned int old_cflag;
-	unsigned int control_state, new_state;
-
-	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
-	mutex_lock(&cs->mutex);
-
-	if (!cs->connected) {
-		gig_dbg(DEBUG_IF, "not connected");
-		goto out;
-	}
-
-	iflag = tty->termios.c_iflag;
-	cflag = tty->termios.c_cflag;
-	old_cflag = old ? old->c_cflag : cflag;
-	gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
-		cs->minor_index, iflag, cflag, old_cflag);
-
-	/* get a local copy of the current port settings */
-	control_state = cs->control_state;
-
-	/*
-	 * Update baud rate.
-	 * Do not attempt to cache old rates and skip settings,
-	 * disconnects screw such tricks up completely.
-	 * Premature optimization is the root of all evil.
-	 */
-
-	/* reassert DTR and (maybe) RTS on transition from B0 */
-	if ((old_cflag & CBAUD) == B0) {
-		new_state = control_state | TIOCM_DTR;
-		/* don't set RTS if using hardware flow control */
-		if (!(old_cflag & CRTSCTS))
-			new_state |= TIOCM_RTS;
-		gig_dbg(DEBUG_IF, "%u: from B0 - set DTR%s",
-			cs->minor_index,
-			(new_state & TIOCM_RTS) ? " only" : "/RTS");
-		cs->ops->set_modem_ctrl(cs, control_state, new_state);
-		control_state = new_state;
-	}
-
-	cs->ops->baud_rate(cs, cflag & CBAUD);
-
-	if ((cflag & CBAUD) == B0) {
-		/* Drop RTS and DTR */
-		gig_dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index);
-		new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS);
-		cs->ops->set_modem_ctrl(cs, control_state, new_state);
-		control_state = new_state;
-	}
-
-	/*
-	 * Update line control register (LCR)
-	 */
-
-	cs->ops->set_line_ctrl(cs, cflag);
-
-	/* save off the modified port settings */
-	cs->control_state = control_state;
-
-out:
-	mutex_unlock(&cs->mutex);
-}
-
-static const struct tty_operations if_ops = {
-	.open =			if_open,
-	.close =		if_close,
-	.ioctl =		if_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl =		if_compat_ioctl,
-#endif
-	.write =		if_write,
-	.write_room =		if_write_room,
-	.chars_in_buffer =	if_chars_in_buffer,
-	.set_termios =		if_set_termios,
-	.throttle =		if_throttle,
-	.unthrottle =		if_unthrottle,
-	.tiocmget =		if_tiocmget,
-	.tiocmset =		if_tiocmset,
-};
-
-
-/* wakeup tasklet for the write operation */
-static void if_wake(unsigned long data)
-{
-	struct cardstate *cs = (struct cardstate *)data;
-
-	tty_port_tty_wakeup(&cs->port);
-}
-
-/*** interface to common ***/
-
-void gigaset_if_init(struct cardstate *cs)
-{
-	struct gigaset_driver *drv;
-
-	drv = cs->driver;
-	if (!drv->have_tty)
-		return;
-
-	tasklet_init(&cs->if_wake_tasklet, if_wake, (unsigned long) cs);
-
-	mutex_lock(&cs->mutex);
-	cs->tty_dev = tty_port_register_device(&cs->port, drv->tty,
-			cs->minor_index, NULL);
-
-	if (!IS_ERR(cs->tty_dev))
-		dev_set_drvdata(cs->tty_dev, cs);
-	else {
-		pr_warn("could not register device to the tty subsystem\n");
-		cs->tty_dev = NULL;
-	}
-	mutex_unlock(&cs->mutex);
-}
-
-void gigaset_if_free(struct cardstate *cs)
-{
-	struct gigaset_driver *drv;
-
-	drv = cs->driver;
-	if (!drv->have_tty)
-		return;
-
-	tasklet_disable(&cs->if_wake_tasklet);
-	tasklet_kill(&cs->if_wake_tasklet);
-	cs->tty_dev = NULL;
-	tty_unregister_device(drv->tty, cs->minor_index);
-}
-
-/**
- * gigaset_if_receive() - pass a received block of data to the tty device
- * @cs:		device descriptor structure.
- * @buffer:	received data.
- * @len:	number of bytes received.
- *
- * Called by asyncdata/isocdata if a block of data received from the
- * device must be sent to userspace through the ttyG* device.
- */
-void gigaset_if_receive(struct cardstate *cs,
-			unsigned char *buffer, size_t len)
-{
-	tty_insert_flip_string(&cs->port, buffer, len);
-	tty_flip_buffer_push(&cs->port);
-}
-EXPORT_SYMBOL_GPL(gigaset_if_receive);
-
-/* gigaset_if_initdriver
- * Initialize tty interface.
- * parameters:
- *	drv		Driver
- *	procname	Name of the driver (e.g. for /proc/tty/drivers)
- *	devname		Name of the device files (prefix without minor number)
- */
-void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
-			   const char *devname)
-{
-	int ret;
-	struct tty_driver *tty;
-
-	drv->have_tty = 0;
-
-	drv->tty = tty = alloc_tty_driver(drv->minors);
-	if (tty == NULL)
-		goto enomem;
-
-	tty->type =		TTY_DRIVER_TYPE_SERIAL;
-	tty->subtype =		SERIAL_TYPE_NORMAL;
-	tty->flags =		TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
-
-	tty->driver_name =	procname;
-	tty->name =		devname;
-	tty->minor_start =	drv->minor;
-
-	tty->init_termios          = tty_std_termios;
-	tty->init_termios.c_cflag  = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
-	tty_set_operations(tty, &if_ops);
-
-	ret = tty_register_driver(tty);
-	if (ret < 0) {
-		pr_err("error %d registering tty driver\n", ret);
-		goto error;
-	}
-	gig_dbg(DEBUG_IF, "tty driver initialized");
-	drv->have_tty = 1;
-	return;
-
-enomem:
-	pr_err("out of memory\n");
-error:
-	if (drv->tty)
-		put_tty_driver(drv->tty);
-}
-
-void gigaset_if_freedriver(struct gigaset_driver *drv)
-{
-	if (!drv->have_tty)
-		return;
-
-	drv->have_tty = 0;
-	tty_unregister_driver(drv->tty);
-	put_tty_driver(drv->tty);
-}
diff --git a/drivers/staging/isdn/gigaset/isocdata.c b/drivers/staging/isdn/gigaset/isocdata.c
deleted file mode 100644
index 3ecf6e3..0000000
--- a/drivers/staging/isdn/gigaset/isocdata.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Common data handling layer for bas_gigaset
- *
- * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
- *                       Hansjoerg Lipp <hjlipp@web.de>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/crc-ccitt.h>
-#include <linux/bitrev.h>
-
-/* access methods for isowbuf_t */
-/* ============================ */
-
-/* initialize buffer structure
- */
-void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
-{
-	iwb->read = 0;
-	iwb->nextread = 0;
-	iwb->write = 0;
-	atomic_set(&iwb->writesem, 1);
-	iwb->wbits = 0;
-	iwb->idle = idle;
-	memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD);
-}
-
-/* compute number of bytes which can be appended to buffer
- * so that there is still room to append a maximum frame of flags
- */
-static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
-{
-	int read, write, freebytes;
-
-	read = iwb->read;
-	write = iwb->write;
-	freebytes = read - write;
-	if (freebytes > 0) {
-		/* no wraparound: need padding space within regular area */
-		return freebytes - BAS_OUTBUFPAD;
-	} else if (read < BAS_OUTBUFPAD) {
-		/* wraparound: can use space up to end of regular area */
-		return BAS_OUTBUFSIZE - write;
-	} else {
-		/* following the wraparound yields more space */
-		return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD;
-	}
-}
-
-/* start writing
- * acquire the write semaphore
- * return 0 if acquired, <0 if busy
- */
-static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
-{
-	if (!atomic_dec_and_test(&iwb->writesem)) {
-		atomic_inc(&iwb->writesem);
-		gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore",
-			__func__);
-		return -EBUSY;
-	}
-	gig_dbg(DEBUG_ISO,
-		"%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
-		__func__, iwb->data[iwb->write], iwb->wbits);
-	return 0;
-}
-
-/* finish writing
- * release the write semaphore
- * returns the current write position
- */
-static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
-{
-	int write = iwb->write;
-	atomic_inc(&iwb->writesem);
-	return write;
-}
-
-/* append bits to buffer without any checks
- * - data contains bits to append, starting at LSB
- * - nbits is number of bits to append (0..24)
- * must be called with the write semaphore held
- * If more than nbits bits are set in data, the extraneous bits are set in the
- * buffer too, but the write position is only advanced by nbits.
- */
-static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
-{
-	int write = iwb->write;
-	data <<= iwb->wbits;
-	data |= iwb->data[write];
-	nbits += iwb->wbits;
-	while (nbits >= 8) {
-		iwb->data[write++] = data & 0xff;
-		write %= BAS_OUTBUFSIZE;
-		data >>= 8;
-		nbits -= 8;
-	}
-	iwb->wbits = nbits;
-	iwb->data[write] = data & 0xff;
-	iwb->write = write;
-}
-
-/* put final flag on HDLC bitstream
- * also sets the idle fill byte to the correspondingly shifted flag pattern
- * must be called with the write semaphore held
- */
-static inline void isowbuf_putflag(struct isowbuf_t *iwb)
-{
-	int write;
-
-	/* add two flags, thus reliably covering one byte */
-	isowbuf_putbits(iwb, 0x7e7e, 8);
-	/* recover the idle flag byte */
-	write = iwb->write;
-	iwb->idle = iwb->data[write];
-	gig_dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
-	/* mask extraneous bits in buffer */
-	iwb->data[write] &= (1 << iwb->wbits) - 1;
-}
-
-/* retrieve a block of bytes for sending
- * The requested number of bytes is provided as a contiguous block.
- * If necessary, the frame is filled to the requested number of bytes
- * with the idle value.
- * returns offset to frame, < 0 on busy or error
- */
-int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
-{
-	int read, write, limit, src, dst;
-	unsigned char pbyte;
-
-	read = iwb->nextread;
-	write = iwb->write;
-	if (likely(read == write)) {
-		/* return idle frame */
-		return read < BAS_OUTBUFPAD ?
-			BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD;
-	}
-
-	limit = read + size;
-	gig_dbg(DEBUG_STREAM, "%s: read=%d write=%d limit=%d",
-		__func__, read, write, limit);
-#ifdef CONFIG_GIGASET_DEBUG
-	if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) {
-		pr_err("invalid size %d\n", size);
-		return -EINVAL;
-	}
-#endif
-
-	if (read < write) {
-		/* no wraparound in valid data */
-		if (limit >= write) {
-			/* append idle frame */
-			if (isowbuf_startwrite(iwb) < 0)
-				return -EBUSY;
-			/* write position could have changed */
-			write = iwb->write;
-			if (limit >= write) {
-				pbyte = iwb->data[write]; /* save
-							     partial byte */
-				limit = write + BAS_OUTBUFPAD;
-				gig_dbg(DEBUG_STREAM,
-					"%s: filling %d->%d with %02x",
-					__func__, write, limit, iwb->idle);
-				if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE)
-					memset(iwb->data + write, iwb->idle,
-					       BAS_OUTBUFPAD);
-				else {
-					/* wraparound, fill entire pad area */
-					memset(iwb->data + write, iwb->idle,
-					       BAS_OUTBUFSIZE + BAS_OUTBUFPAD
-					       - write);
-					limit = 0;
-				}
-				gig_dbg(DEBUG_STREAM,
-					"%s: restoring %02x at %d",
-					__func__, pbyte, limit);
-				iwb->data[limit] = pbyte; /* restore
-							     partial byte */
-				iwb->write = limit;
-			}
-			isowbuf_donewrite(iwb);
-		}
-	} else {
-		/* valid data wraparound */
-		if (limit >= BAS_OUTBUFSIZE) {
-			/* copy wrapped part into pad area */
-			src = 0;
-			dst = BAS_OUTBUFSIZE;
-			while (dst < limit && src < write)
-				iwb->data[dst++] = iwb->data[src++];
-			if (dst <= limit) {
-				/* fill pad area with idle byte */
-				memset(iwb->data + dst, iwb->idle,
-				       BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst);
-			}
-			limit = src;
-		}
-	}
-	iwb->nextread = limit;
-	return read;
-}
-
-/* dump_bytes
- * write hex bytes to syslog for debugging
- */
-static inline void dump_bytes(enum debuglevel level, const char *tag,
-			      unsigned char *bytes, int count)
-{
-#ifdef CONFIG_GIGASET_DEBUG
-	unsigned char c;
-	static char dbgline[3 * 32 + 1];
-	int i = 0;
-
-	if (!(gigaset_debuglevel & level))
-		return;
-
-	while (count-- > 0) {
-		if (i > sizeof(dbgline) - 4) {
-			dbgline[i] = '\0';
-			gig_dbg(level, "%s:%s", tag, dbgline);
-			i = 0;
-		}
-		c = *bytes++;
-		dbgline[i] = (i && !(i % 12)) ? '-' : ' ';
-		i++;
-		dbgline[i++] = hex_asc_hi(c);
-		dbgline[i++] = hex_asc_lo(c);
-	}
-	dbgline[i] = '\0';
-	gig_dbg(level, "%s:%s", tag, dbgline);
-#endif
-}
-
-/*============================================================================*/
-
-/* bytewise HDLC bitstuffing via table lookup
- * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits
- * index: 256*(number of preceding '1' bits) + (next byte to stuff)
- * value: bit  9.. 0 = result bits
- *        bit 12..10 = number of trailing '1' bits in result
- *        bit 14..13 = number of bits added by stuffing
- */
-static const u16 stufftab[5 * 256] = {
-/* previous 1s = 0: */
-	0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
-	0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
-	0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
-	0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f,
-	0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
-	0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f,
-	0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
-	0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df,
-	0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f,
-	0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f,
-	0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af,
-	0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f,
-	0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf,
-	0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f,
-	0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
-	0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
-
-/* previous 1s = 1: */
-	0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
-	0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
-	0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
-	0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f,
-	0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f,
-	0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af,
-	0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf,
-	0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef,
-	0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f,
-	0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f,
-	0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f,
-	0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f,
-	0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f,
-	0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af,
-	0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
-	0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
-
-/* previous 1s = 2: */
-	0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
-	0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
-	0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
-	0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077,
-	0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097,
-	0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7,
-	0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7,
-	0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7,
-	0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517,
-	0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537,
-	0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557,
-	0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577,
-	0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997,
-	0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7,
-	0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
-	0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
-
-/* previous 1s = 3: */
-	0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
-	0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
-	0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
-	0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b,
-	0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b,
-	0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb,
-	0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db,
-	0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb,
-	0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b,
-	0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b,
-	0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b,
-	0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b,
-	0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b,
-	0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb,
-	0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
-	0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
-
-/* previous 1s = 4: */
-	0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
-	0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
-	0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
-	0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d,
-	0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d,
-	0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd,
-	0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd,
-	0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d,
-	0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d,
-	0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d,
-	0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d,
-	0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d,
-	0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d,
-	0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd,
-	0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd,
-	0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d
-};
-
-/* hdlc_bitstuff_byte
- * perform HDLC bitstuffing for one input byte (8 bits, LSB first)
- * parameters:
- *	cin	input byte
- *	ones	number of trailing '1' bits in result before this step
- *	iwb	pointer to output buffer structure
- *		(write semaphore must be held)
- * return value:
- *	number of trailing '1' bits in result after this step
- */
-
-static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
-				     int ones)
-{
-	u16 stuff;
-	int shiftinc, newones;
-
-	/* get stuffing information for input byte
-	 * value: bit  9.. 0 = result bits
-	 *        bit 12..10 = number of trailing '1' bits in result
-	 *        bit 14..13 = number of bits added by stuffing
-	 */
-	stuff = stufftab[256 * ones + cin];
-	shiftinc = (stuff >> 13) & 3;
-	newones = (stuff >> 10) & 7;
-	stuff &= 0x3ff;
-
-	/* append stuffed byte to output stream */
-	isowbuf_putbits(iwb, stuff, 8 + shiftinc);
-	return newones;
-}
-
-/* hdlc_buildframe
- * Perform HDLC framing with bitstuffing on a byte buffer
- * The input buffer is regarded as a sequence of bits, starting with the least
- * significant bit of the first byte and ending with the most significant bit
- * of the last byte. A 16 bit FCS is appended as defined by RFC 1662.
- * Whenever five consecutive '1' bits appear in the resulting bit sequence, a
- * '0' bit is inserted after them.
- * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110')
- * are appended to the output buffer starting at the given bit position, which
- * is assumed to already contain a leading flag.
- * The output buffer must have sufficient length; count + count/5 + 6 bytes
- * starting at *out are safe and are verified to be present.
- * parameters:
- *	in	input buffer
- *	count	number of bytes in input buffer
- *	iwb	pointer to output buffer structure
- *		(write semaphore must be held)
- * return value:
- *	position of end of packet in output buffer on success,
- *	-EAGAIN if write semaphore busy or buffer full
- */
-
-static inline int hdlc_buildframe(struct isowbuf_t *iwb,
-				  unsigned char *in, int count)
-{
-	int ones;
-	u16 fcs;
-	int end;
-	unsigned char c;
-
-	if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
-	    isowbuf_startwrite(iwb) < 0) {
-		gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
-			__func__, isowbuf_freebytes(iwb));
-		return -EAGAIN;
-	}
-
-	dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
-
-	/* bitstuff and checksum input data */
-	fcs = PPP_INITFCS;
-	ones = 0;
-	while (count-- > 0) {
-		c = *in++;
-		ones = hdlc_bitstuff_byte(iwb, c, ones);
-		fcs = crc_ccitt_byte(fcs, c);
-	}
-
-	/* bitstuff and append FCS
-	 * (complemented, least significant byte first) */
-	fcs ^= 0xffff;
-	ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
-	ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
-
-	/* put closing flag and repeat byte for flag idle */
-	isowbuf_putflag(iwb);
-	end = isowbuf_donewrite(iwb);
-	return end;
-}
-
-/* trans_buildframe
- * Append a block of 'transparent' data to the output buffer,
- * inverting the bytes.
- * The output buffer must have sufficient length; count bytes
- * starting at *out are safe and are verified to be present.
- * parameters:
- *	in	input buffer
- *	count	number of bytes in input buffer
- *	iwb	pointer to output buffer structure
- *		(write semaphore must be held)
- * return value:
- *	position of end of packet in output buffer on success,
- *	-EAGAIN if write semaphore busy or buffer full
- */
-
-static inline int trans_buildframe(struct isowbuf_t *iwb,
-				   unsigned char *in, int count)
-{
-	int write;
-	unsigned char c;
-
-	if (unlikely(count <= 0))
-		return iwb->write;
-
-	if (isowbuf_freebytes(iwb) < count ||
-	    isowbuf_startwrite(iwb) < 0) {
-		gig_dbg(DEBUG_ISO, "can't put %d bytes", count);
-		return -EAGAIN;
-	}
-
-	gig_dbg(DEBUG_STREAM, "put %d bytes", count);
-	dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
-
-	write = iwb->write;
-	do {
-		c = bitrev8(*in++);
-		iwb->data[write++] = c;
-		write %= BAS_OUTBUFSIZE;
-	} while (--count > 0);
-	iwb->write = write;
-	iwb->idle = c;
-
-	return isowbuf_donewrite(iwb);
-}
-
-int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
-{
-	int result;
-
-	switch (bcs->proto2) {
-	case L2_HDLC:
-		result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
-		gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d",
-			__func__, len, result);
-		break;
-	default:			/* assume transparent */
-		result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len);
-		gig_dbg(DEBUG_ISO, "%s: %d bytes trans -> %d",
-			__func__, len, result);
-	}
-	return result;
-}
-
-/* hdlc_putbyte
- * append byte c to current skb of B channel structure *bcs, updating fcs
- */
-static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
-{
-	bcs->rx_fcs = crc_ccitt_byte(bcs->rx_fcs, c);
-	if (bcs->rx_skb == NULL)
-		/* skipping */
-		return;
-	if (bcs->rx_skb->len >= bcs->rx_bufsize) {
-		dev_warn(bcs->cs->dev, "received oversized packet discarded\n");
-		bcs->hw.bas->giants++;
-		dev_kfree_skb_any(bcs->rx_skb);
-		bcs->rx_skb = NULL;
-		return;
-	}
-	__skb_put_u8(bcs->rx_skb, c);
-}
-
-/* hdlc_flush
- * drop partial HDLC data packet
- */
-static inline void hdlc_flush(struct bc_state *bcs)
-{
-	/* clear skb or allocate new if not skipping */
-	if (bcs->rx_skb != NULL)
-		skb_trim(bcs->rx_skb, 0);
-	else
-		gigaset_new_rx_skb(bcs);
-
-	/* reset packet state */
-	bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* hdlc_done
- * process completed HDLC data packet
- */
-static inline void hdlc_done(struct bc_state *bcs)
-{
-	struct cardstate *cs = bcs->cs;
-	struct sk_buff *procskb;
-	unsigned int len;
-
-	if (unlikely(bcs->ignore)) {
-		bcs->ignore--;
-		hdlc_flush(bcs);
-		return;
-	}
-	procskb = bcs->rx_skb;
-	if (procskb == NULL) {
-		/* previous error */
-		gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
-		gigaset_isdn_rcv_err(bcs);
-	} else if (procskb->len < 2) {
-		dev_notice(cs->dev, "received short frame (%d octets)\n",
-			   procskb->len);
-		bcs->hw.bas->runts++;
-		dev_kfree_skb_any(procskb);
-		gigaset_isdn_rcv_err(bcs);
-	} else if (bcs->rx_fcs != PPP_GOODFCS) {
-		dev_notice(cs->dev, "frame check error\n");
-		bcs->hw.bas->fcserrs++;
-		dev_kfree_skb_any(procskb);
-		gigaset_isdn_rcv_err(bcs);
-	} else {
-		len = procskb->len;
-		__skb_trim(procskb, len -= 2);	/* subtract FCS */
-		gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, len);
-		dump_bytes(DEBUG_STREAM_DUMP,
-			   "rcv data", procskb->data, len);
-		bcs->hw.bas->goodbytes += len;
-		gigaset_skb_rcvd(bcs, procskb);
-	}
-	gigaset_new_rx_skb(bcs);
-	bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* hdlc_frag
- * drop HDLC data packet with non-integral last byte
- */
-static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
-{
-	if (unlikely(bcs->ignore)) {
-		bcs->ignore--;
-		hdlc_flush(bcs);
-		return;
-	}
-
-	dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits);
-	bcs->hw.bas->alignerrs++;
-	gigaset_isdn_rcv_err(bcs);
-	__skb_trim(bcs->rx_skb, 0);
-	bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* bit counts lookup table for HDLC bit unstuffing
- * index: input byte
- * value: bit 0..3 = number of consecutive '1' bits starting from LSB
- *        bit 4..6 = number of consecutive '1' bits starting from MSB
- *		     (replacing 8 by 7 to make it fit; the algorithm won't care)
- *        bit 7 set if there are 5 or more "interior" consecutive '1' bits
- */
-static const unsigned char bitcounts[256] = {
-	0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
-	0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
-	0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
-	0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06,
-	0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
-	0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
-	0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
-	0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07,
-	0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
-	0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15,
-	0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
-	0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16,
-	0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24,
-	0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25,
-	0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34,
-	0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78
-};
-
-/* hdlc_unpack
- * perform HDLC frame processing (bit unstuffing, flag detection, FCS
- * calculation) on a sequence of received data bytes (8 bits each, LSB first)
- * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
- * notify of errors via gigaset_isdn_rcv_err
- * tally frames, errors etc. in BC structure counters
- * parameters:
- *	src	received data
- *	count	number of received bytes
- *	bcs	receiving B channel structure
- */
-static inline void hdlc_unpack(unsigned char *src, unsigned count,
-			       struct bc_state *bcs)
-{
-	struct bas_bc_state *ubc = bcs->hw.bas;
-	int inputstate;
-	unsigned seqlen, inbyte, inbits;
-
-	/* load previous state:
-	 * inputstate = set of flag bits:
-	 * - INS_flag_hunt: no complete opening flag received since connection
-	 *                  setup or last abort
-	 * - INS_have_data: at least one complete data byte received since last
-	 *                  flag
-	 * seqlen = number of consecutive '1' bits in last 7 input stream bits
-	 *          (0..7)
-	 * inbyte = accumulated partial data byte (if !INS_flag_hunt)
-	 * inbits = number of valid bits in inbyte, starting at LSB (0..6)
-	 */
-	inputstate = bcs->inputstate;
-	seqlen = ubc->seqlen;
-	inbyte = ubc->inbyte;
-	inbits = ubc->inbits;
-
-	/* bit unstuffing a byte a time
-	 * Take your time to understand this; it's straightforward but tedious.
-	 * The "bitcounts" lookup table is used to speed up the counting of
-	 * leading and trailing '1' bits.
-	 */
-	while (count--) {
-		unsigned char c = *src++;
-		unsigned char tabentry = bitcounts[c];
-		unsigned lead1 = tabentry & 0x0f;
-		unsigned trail1 = (tabentry >> 4) & 0x0f;
-
-		seqlen += lead1;
-
-		if (unlikely(inputstate & INS_flag_hunt)) {
-			if (c == PPP_FLAG) {
-				/* flag-in-one */
-				inputstate &= ~(INS_flag_hunt | INS_have_data);
-				inbyte = 0;
-				inbits = 0;
-			} else if (seqlen == 6 && trail1 != 7) {
-				/* flag completed & not followed by abort */
-				inputstate &= ~(INS_flag_hunt | INS_have_data);
-				inbyte = c >> (lead1 + 1);
-				inbits = 7 - lead1;
-				if (trail1 >= 8) {
-					/* interior stuffing:
-					 * omitting the MSB handles most cases,
-					 * correct the incorrectly handled
-					 * cases individually */
-					inbits--;
-					switch (c) {
-					case 0xbe:
-						inbyte = 0x3f;
-						break;
-					}
-				}
-			}
-			/* else: continue flag-hunting */
-		} else if (likely(seqlen < 5 && trail1 < 7)) {
-			/* streamlined case: 8 data bits, no stuffing */
-			inbyte |= c << inbits;
-			hdlc_putbyte(inbyte & 0xff, bcs);
-			inputstate |= INS_have_data;
-			inbyte >>= 8;
-			/* inbits unchanged */
-		} else if (likely(seqlen == 6 && inbits == 7 - lead1 &&
-				  trail1 + 1 == inbits &&
-				  !(inputstate & INS_have_data))) {
-			/* streamlined case: flag idle - state unchanged */
-		} else if (unlikely(seqlen > 6)) {
-			/* abort sequence */
-			ubc->aborts++;
-			hdlc_flush(bcs);
-			inputstate |= INS_flag_hunt;
-		} else if (seqlen == 6) {
-			/* closing flag, including (6 - lead1) '1's
-			 * and one '0' from inbits */
-			if (inbits > 7 - lead1) {
-				hdlc_frag(bcs, inbits + lead1 - 7);
-				inputstate &= ~INS_have_data;
-			} else {
-				if (inbits < 7 - lead1)
-					ubc->stolen0s++;
-				if (inputstate & INS_have_data) {
-					hdlc_done(bcs);
-					inputstate &= ~INS_have_data;
-				}
-			}
-
-			if (c == PPP_FLAG) {
-				/* complete flag, LSB overlaps preceding flag */
-				ubc->shared0s++;
-				inbits = 0;
-				inbyte = 0;
-			} else if (trail1 != 7) {
-				/* remaining bits */
-				inbyte = c >> (lead1 + 1);
-				inbits = 7 - lead1;
-				if (trail1 >= 8) {
-					/* interior stuffing:
-					 * omitting the MSB handles most cases,
-					 * correct the incorrectly handled
-					 * cases individually */
-					inbits--;
-					switch (c) {
-					case 0xbe:
-						inbyte = 0x3f;
-						break;
-					}
-				}
-			} else {
-				/* abort sequence follows,
-				 * skb already empty anyway */
-				ubc->aborts++;
-				inputstate |= INS_flag_hunt;
-			}
-		} else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */
-
-			if (c == PPP_FLAG) {
-				/* complete flag */
-				if (seqlen == 5)
-					ubc->stolen0s++;
-				if (inbits) {
-					hdlc_frag(bcs, inbits);
-					inbits = 0;
-					inbyte = 0;
-				} else if (inputstate & INS_have_data)
-					hdlc_done(bcs);
-				inputstate &= ~INS_have_data;
-			} else if (trail1 == 7) {
-				/* abort sequence */
-				ubc->aborts++;
-				hdlc_flush(bcs);
-				inputstate |= INS_flag_hunt;
-			} else {
-				/* stuffed data */
-				if (trail1 < 7) { /* => seqlen == 5 */
-					/* stuff bit at position lead1,
-					 * no interior stuffing */
-					unsigned char mask = (1 << lead1) - 1;
-					c = (c & mask) | ((c & ~mask) >> 1);
-					inbyte |= c << inbits;
-					inbits += 7;
-				} else if (seqlen < 5) { /* trail1 >= 8 */
-					/* interior stuffing:
-					 * omitting the MSB handles most cases,
-					 * correct the incorrectly handled
-					 * cases individually */
-					switch (c) {
-					case 0xbe:
-						c = 0x7e;
-						break;
-					}
-					inbyte |= c << inbits;
-					inbits += 7;
-				} else { /* seqlen == 5 && trail1 >= 8 */
-
-					/* stuff bit at lead1 *and* interior
-					 * stuffing -- unstuff individually */
-					switch (c) {
-					case 0x7d:
-						c = 0x3f;
-						break;
-					case 0xbe:
-						c = 0x3f;
-						break;
-					case 0x3e:
-						c = 0x1f;
-						break;
-					case 0x7c:
-						c = 0x3e;
-						break;
-					}
-					inbyte |= c << inbits;
-					inbits += 6;
-				}
-				if (inbits >= 8) {
-					inbits -= 8;
-					hdlc_putbyte(inbyte & 0xff, bcs);
-					inputstate |= INS_have_data;
-					inbyte >>= 8;
-				}
-			}
-		}
-		seqlen = trail1 & 7;
-	}
-
-	/* save new state */
-	bcs->inputstate = inputstate;
-	ubc->seqlen = seqlen;
-	ubc->inbyte = inbyte;
-	ubc->inbits = inbits;
-}
-
-/* trans_receive
- * pass on received USB frame transparently as SKB via gigaset_skb_rcvd
- * invert bytes
- * tally frames, errors etc. in BC structure counters
- * parameters:
- *	src	received data
- *	count	number of received bytes
- *	bcs	receiving B channel structure
- */
-static inline void trans_receive(unsigned char *src, unsigned count,
-				 struct bc_state *bcs)
-{
-	struct sk_buff *skb;
-	int dobytes;
-	unsigned char *dst;
-
-	if (unlikely(bcs->ignore)) {
-		bcs->ignore--;
-		return;
-	}
-	skb = bcs->rx_skb;
-	if (skb == NULL) {
-		skb = gigaset_new_rx_skb(bcs);
-		if (skb == NULL)
-			return;
-	}
-	dobytes = bcs->rx_bufsize - skb->len;
-	while (count > 0) {
-		dst = skb_put(skb, count < dobytes ? count : dobytes);
-		while (count > 0 && dobytes > 0) {
-			*dst++ = bitrev8(*src++);
-			count--;
-			dobytes--;
-		}
-		if (dobytes == 0) {
-			dump_bytes(DEBUG_STREAM_DUMP,
-				   "rcv data", skb->data, skb->len);
-			bcs->hw.bas->goodbytes += skb->len;
-			gigaset_skb_rcvd(bcs, skb);
-			skb = gigaset_new_rx_skb(bcs);
-			if (skb == NULL)
-				return;
-			dobytes = bcs->rx_bufsize;
-		}
-	}
-}
-
-void gigaset_isoc_receive(unsigned char *src, unsigned count,
-			  struct bc_state *bcs)
-{
-	switch (bcs->proto2) {
-	case L2_HDLC:
-		hdlc_unpack(src, count, bcs);
-		break;
-	default:		/* assume transparent */
-		trans_receive(src, count, bcs);
-	}
-}
-
-/* == data input =========================================================== */
-
-/* process a block of received bytes in command mode (mstate != MS_LOCKED)
- * Append received bytes to the command response buffer and forward them
- * line by line to the response handler.
- * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
- * removed before passing the line to the response handler.
- */
-static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
-{
-	struct cardstate *cs = inbuf->cs;
-	unsigned cbytes      = cs->cbytes;
-	unsigned char c;
-
-	while (numbytes--) {
-		c = *src++;
-		switch (c) {
-		case '\n':
-			if (cbytes == 0 && cs->respdata[0] == '\r') {
-				/* collapse LF with preceding CR */
-				cs->respdata[0] = 0;
-				break;
-			}
-			/* fall through */
-		case '\r':
-			/* end of message line, pass to response handler */
-			if (cbytes >= MAX_RESP_SIZE) {
-				dev_warn(cs->dev, "response too large (%d)\n",
-					 cbytes);
-				cbytes = MAX_RESP_SIZE;
-			}
-			cs->cbytes = cbytes;
-			gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
-					   cbytes, cs->respdata);
-			gigaset_handle_modem_response(cs);
-			cbytes = 0;
-
-			/* store EOL byte for CRLF collapsing */
-			cs->respdata[0] = c;
-			break;
-		default:
-			/* append to line buffer if possible */
-			if (cbytes < MAX_RESP_SIZE)
-				cs->respdata[cbytes] = c;
-			cbytes++;
-		}
-	}
-
-	/* save state */
-	cs->cbytes = cbytes;
-}
-
-
-/* process a block of data received through the control channel
- */
-void gigaset_isoc_input(struct inbuf_t *inbuf)
-{
-	struct cardstate *cs = inbuf->cs;
-	unsigned tail, head, numbytes;
-	unsigned char *src;
-
-	head = inbuf->head;
-	while (head != (tail = inbuf->tail)) {
-		gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
-		if (head > tail)
-			tail = RBUFSIZE;
-		src = inbuf->data + head;
-		numbytes = tail - head;
-		gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
-
-		if (cs->mstate == MS_LOCKED) {
-			gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
-					   numbytes, src);
-			gigaset_if_receive(inbuf->cs, src, numbytes);
-		} else {
-			cmd_loop(src, numbytes, inbuf);
-		}
-
-		head += numbytes;
-		if (head == RBUFSIZE)
-			head = 0;
-		gig_dbg(DEBUG_INTR, "setting head to %u", head);
-		inbuf->head = head;
-	}
-}
-
-
-/* == data output ========================================================== */
-
-/**
- * gigaset_isoc_send_skb() - queue an skb for sending
- * @bcs:	B channel descriptor structure.
- * @skb:	data to send.
- *
- * Called by LL to queue an skb for sending, and start transmission if
- * necessary.
- * Once the payload data has been transmitted completely, gigaset_skb_sent()
- * will be called with the skb's link layer header preserved.
- *
- * Return value:
- *	number of bytes accepted for sending (skb->len) if ok,
- *	error code < 0 (eg. -ENODEV) on error
- */
-int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
-{
-	int len = skb->len;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bcs->cs->lock, flags);
-	if (!bcs->cs->connected) {
-		spin_unlock_irqrestore(&bcs->cs->lock, flags);
-		return -ENODEV;
-	}
-
-	skb_queue_tail(&bcs->squeue, skb);
-	gig_dbg(DEBUG_ISO, "%s: skb queued, qlen=%d",
-		__func__, skb_queue_len(&bcs->squeue));
-
-	/* tasklet submits URB if necessary */
-	tasklet_schedule(&bcs->hw.bas->sent_tasklet);
-	spin_unlock_irqrestore(&bcs->cs->lock, flags);
-
-	return len;	/* ok so far */
-}
diff --git a/drivers/staging/isdn/gigaset/proc.c b/drivers/staging/isdn/gigaset/proc.c
deleted file mode 100644
index 8914439..0000000
--- a/drivers/staging/isdn/gigaset/proc.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- *                       Hansjoerg Lipp <hjlipp@web.de>,
- *                       Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-
-static ssize_t show_cidmode(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	struct cardstate *cs = dev_get_drvdata(dev);
-
-	return sprintf(buf, "%u\n", cs->cidmode);
-}
-
-static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	struct cardstate *cs = dev_get_drvdata(dev);
-	long int value;
-	char *end;
-
-	value = simple_strtol(buf, &end, 0);
-	while (*end)
-		if (!isspace(*end++))
-			return -EINVAL;
-	if (value < 0 || value > 1)
-		return -EINVAL;
-
-	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS;
-
-	cs->waiting = 1;
-	if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
-			       NULL, value, NULL)) {
-		cs->waiting = 0;
-		mutex_unlock(&cs->mutex);
-		return -ENOMEM;
-	}
-	gigaset_schedule_event(cs);
-
-	wait_event(cs->waitqueue, !cs->waiting);
-
-	mutex_unlock(&cs->mutex);
-
-	return count;
-}
-
-static DEVICE_ATTR(cidmode, S_IRUGO | S_IWUSR, show_cidmode, set_cidmode);
-
-/* free sysfs for device */
-void gigaset_free_dev_sysfs(struct cardstate *cs)
-{
-	if (!cs->tty_dev)
-		return;
-
-	gig_dbg(DEBUG_INIT, "removing sysfs entries");
-	device_remove_file(cs->tty_dev, &dev_attr_cidmode);
-}
-
-/* initialize sysfs for device */
-void gigaset_init_dev_sysfs(struct cardstate *cs)
-{
-	if (!cs->tty_dev)
-		return;
-
-	gig_dbg(DEBUG_INIT, "setting up sysfs");
-	if (device_create_file(cs->tty_dev, &dev_attr_cidmode))
-		pr_err("could not create sysfs attribute\n");
-}
diff --git a/drivers/staging/isdn/gigaset/ser-gigaset.c b/drivers/staging/isdn/gigaset/ser-gigaset.c
deleted file mode 100644
index 5587e9e7..0000000
--- a/drivers/staging/isdn/gigaset/ser-gigaset.c
+++ /dev/null
@@ -1,796 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* This is the serial hardware link layer (HLL) for the Gigaset 307x isdn
- * DECT base (aka Sinus 45 isdn) using the RS232 DECT data module M101,
- * written as a line discipline.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Tilman Schmidt"
-#define DRIVER_DESC "Serial Driver for Gigaset 307x using Siemens M101"
-
-#define GIGASET_MINORS     1
-#define GIGASET_MINOR      0
-#define GIGASET_MODULENAME "ser_gigaset"
-#define GIGASET_DEVNAME    "ttyGS"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_LDISC(N_GIGASET_M101);
-
-static int startmode = SM_ISDN;
-module_param(startmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "initial operation mode");
-static int cidmode = 1;
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(cidmode, "stay in CID mode when idle");
-
-static struct gigaset_driver *driver;
-
-struct ser_cardstate {
-	struct platform_device	dev;
-	struct tty_struct	*tty;
-	atomic_t		refcnt;
-	struct completion	dead_cmp;
-};
-
-static struct platform_driver device_driver = {
-	.driver = {
-		.name = GIGASET_MODULENAME,
-	},
-};
-
-static void flush_send_queue(struct cardstate *);
-
-/* transmit data from current open skb
- * result: number of bytes sent or error code < 0
- */
-static int write_modem(struct cardstate *cs)
-{
-	struct tty_struct *tty = cs->hw.ser->tty;
-	struct bc_state *bcs = &cs->bcs[0];	/* only one channel */
-	struct sk_buff *skb = bcs->tx_skb;
-	int sent = -EOPNOTSUPP;
-
-	WARN_ON(!tty || !tty->ops || !skb);
-
-	if (!skb->len) {
-		dev_kfree_skb_any(skb);
-		bcs->tx_skb = NULL;
-		return -EINVAL;
-	}
-
-	set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-	if (tty->ops->write)
-		sent = tty->ops->write(tty, skb->data, skb->len);
-	gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent);
-	if (sent < 0) {
-		/* error */
-		flush_send_queue(cs);
-		return sent;
-	}
-	skb_pull(skb, sent);
-	if (!skb->len) {
-		/* skb sent completely */
-		gigaset_skb_sent(bcs, skb);
-
-		gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
-			(unsigned long) skb);
-		dev_kfree_skb_any(skb);
-		bcs->tx_skb = NULL;
-	}
-	return sent;
-}
-
-/*
- * transmit first queued command buffer
- * result: number of bytes sent or error code < 0
- */
-static int send_cb(struct cardstate *cs)
-{
-	struct tty_struct *tty = cs->hw.ser->tty;
-	struct cmdbuf_t *cb, *tcb;
-	unsigned long flags;
-	int sent = 0;
-
-	WARN_ON(!tty || !tty->ops);
-
-	cb = cs->cmdbuf;
-	if (!cb)
-		return 0;	/* nothing to do */
-
-	if (cb->len) {
-		set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-		sent = tty->ops->write(tty, cb->buf + cb->offset, cb->len);
-		if (sent < 0) {
-			/* error */
-			gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent);
-			flush_send_queue(cs);
-			return sent;
-		}
-		cb->offset += sent;
-		cb->len -= sent;
-		gig_dbg(DEBUG_OUTPUT, "send_cb: sent %d, left %u, queued %u",
-			sent, cb->len, cs->cmdbytes);
-	}
-
-	while (cb && !cb->len) {
-		spin_lock_irqsave(&cs->cmdlock, flags);
-		cs->cmdbytes -= cs->curlen;
-		tcb = cb;
-		cs->cmdbuf = cb = cb->next;
-		if (cb) {
-			cb->prev = NULL;
-			cs->curlen = cb->len;
-		} else {
-			cs->lastcmdbuf = NULL;
-			cs->curlen = 0;
-		}
-		spin_unlock_irqrestore(&cs->cmdlock, flags);
-
-		if (tcb->wake_tasklet)
-			tasklet_schedule(tcb->wake_tasklet);
-		kfree(tcb);
-	}
-	return sent;
-}
-
-/*
- * send queue tasklet
- * If there is already a skb opened, put data to the transfer buffer
- * by calling "write_modem".
- * Otherwise take a new skb out of the queue.
- */
-static void gigaset_modem_fill(unsigned long data)
-{
-	struct cardstate *cs = (struct cardstate *) data;
-	struct bc_state *bcs;
-	struct sk_buff *nextskb;
-	int sent = 0;
-
-	if (!cs) {
-		gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
-		return;
-	}
-	bcs = cs->bcs;
-	if (!bcs) {
-		gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
-		return;
-	}
-	if (!bcs->tx_skb) {
-		/* no skb is being sent; send command if any */
-		sent = send_cb(cs);
-		gig_dbg(DEBUG_OUTPUT, "%s: send_cb -> %d", __func__, sent);
-		if (sent)
-			/* something sent or error */
-			return;
-
-		/* no command to send; get skb */
-		nextskb = skb_dequeue(&bcs->squeue);
-		if (!nextskb)
-			/* no skb either, nothing to do */
-			return;
-		bcs->tx_skb = nextskb;
-
-		gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
-			(unsigned long) bcs->tx_skb);
-	}
-
-	/* send skb */
-	gig_dbg(DEBUG_OUTPUT, "%s: tx_skb", __func__);
-	if (write_modem(cs) < 0)
-		gig_dbg(DEBUG_OUTPUT, "%s: write_modem failed", __func__);
-}
-
-/*
- * throw away all data queued for sending
- */
-static void flush_send_queue(struct cardstate *cs)
-{
-	struct sk_buff *skb;
-	struct cmdbuf_t *cb;
-	unsigned long flags;
-
-	/* command queue */
-	spin_lock_irqsave(&cs->cmdlock, flags);
-	while ((cb = cs->cmdbuf) != NULL) {
-		cs->cmdbuf = cb->next;
-		if (cb->wake_tasklet)
-			tasklet_schedule(cb->wake_tasklet);
-		kfree(cb);
-	}
-	cs->cmdbuf = cs->lastcmdbuf = NULL;
-	cs->cmdbytes = cs->curlen = 0;
-	spin_unlock_irqrestore(&cs->cmdlock, flags);
-
-	/* data queue */
-	if (cs->bcs->tx_skb)
-		dev_kfree_skb_any(cs->bcs->tx_skb);
-	while ((skb = skb_dequeue(&cs->bcs->squeue)) != NULL)
-		dev_kfree_skb_any(skb);
-}
-
-
-/* Gigaset Driver Interface */
-/* ======================== */
-
-/*
- * queue an AT command string for transmission to the Gigaset device
- * parameters:
- *	cs		controller state structure
- *	buf		buffer containing the string to send
- *	len		number of characters to send
- *	wake_tasklet	tasklet to run when transmission is complete, or NULL
- * return value:
- *	number of bytes queued, or error code < 0
- */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
-	unsigned long flags;
-
-	gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
-			   DEBUG_TRANSCMD : DEBUG_LOCKCMD,
-			   "CMD Transmit", cb->len, cb->buf);
-
-	spin_lock_irqsave(&cs->cmdlock, flags);
-	cb->prev = cs->lastcmdbuf;
-	if (cs->lastcmdbuf)
-		cs->lastcmdbuf->next = cb;
-	else {
-		cs->cmdbuf = cb;
-		cs->curlen = cb->len;
-	}
-	cs->cmdbytes += cb->len;
-	cs->lastcmdbuf = cb;
-	spin_unlock_irqrestore(&cs->cmdlock, flags);
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (cs->connected)
-		tasklet_schedule(&cs->write_tasklet);
-	spin_unlock_irqrestore(&cs->lock, flags);
-	return cb->len;
-}
-
-/*
- * tty_driver.write_room interface routine
- * return number of characters the driver will accept to be written
- * parameter:
- *	controller state structure
- * return value:
- *	number of characters
- */
-static int gigaset_write_room(struct cardstate *cs)
-{
-	unsigned bytes;
-
-	bytes = cs->cmdbytes;
-	return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
-}
-
-/*
- * tty_driver.chars_in_buffer interface routine
- * return number of characters waiting to be sent
- * parameter:
- *	controller state structure
- * return value:
- *	number of characters
- */
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
-	return cs->cmdbytes;
-}
-
-/*
- * implementation of ioctl(GIGASET_BRKCHARS)
- * parameter:
- *	controller state structure
- * return value:
- *	-EINVAL (unimplemented function)
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
-	/* not implemented */
-	return -EINVAL;
-}
-
-/*
- * Open B channel
- * Called by "do_action" in ev-layer.c
- */
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
-	/* nothing to do for M10x */
-	gigaset_bchannel_up(bcs);
-	return 0;
-}
-
-/*
- * Close B channel
- * Called by "do_action" in ev-layer.c
- */
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
-	/* nothing to do for M10x */
-	gigaset_bchannel_down(bcs);
-	return 0;
-}
-
-/*
- * Set up B channel structure
- * This is called by "gigaset_initcs" in common.c
- */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
-	/* unused */
-	bcs->hw.ser = NULL;
-	return 0;
-}
-
-/*
- * Free B channel structure
- * Called by "gigaset_freebcs" in common.c
- */
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
-	/* unused */
-}
-
-/*
- * Reinitialize B channel structure
- * This is called by "bcs_reinit" in common.c
- */
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
-	/* nothing to do for M10x */
-}
-
-/*
- * Free hardware specific device data
- * This will be called by "gigaset_freecs" in common.c
- */
-static void gigaset_freecshw(struct cardstate *cs)
-{
-	tasklet_kill(&cs->write_tasklet);
-	if (!cs->hw.ser)
-		return;
-	platform_device_unregister(&cs->hw.ser->dev);
-}
-
-static void gigaset_device_release(struct device *dev)
-{
-	kfree(container_of(dev, struct ser_cardstate, dev.dev));
-}
-
-/*
- * Set up hardware specific device data
- * This is called by "gigaset_initcs" in common.c
- */
-static int gigaset_initcshw(struct cardstate *cs)
-{
-	int rc;
-	struct ser_cardstate *scs;
-
-	scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
-	if (!scs) {
-		pr_err("out of memory\n");
-		return -ENOMEM;
-	}
-	cs->hw.ser = scs;
-
-	cs->hw.ser->dev.name = GIGASET_MODULENAME;
-	cs->hw.ser->dev.id = cs->minor_index;
-	cs->hw.ser->dev.dev.release = gigaset_device_release;
-	rc = platform_device_register(&cs->hw.ser->dev);
-	if (rc != 0) {
-		pr_err("error %d registering platform device\n", rc);
-		kfree(cs->hw.ser);
-		cs->hw.ser = NULL;
-		return rc;
-	}
-
-	tasklet_init(&cs->write_tasklet,
-		     gigaset_modem_fill, (unsigned long) cs);
-	return 0;
-}
-
-/*
- * set modem control lines
- * Parameters:
- *	card state structure
- *	modem control line state ([TIOCM_DTR]|[TIOCM_RTS])
- * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
- * and by "if_lock" and "if_termios" in interface.c
- */
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
-				  unsigned new_state)
-{
-	struct tty_struct *tty = cs->hw.ser->tty;
-	unsigned int set, clear;
-
-	WARN_ON(!tty || !tty->ops);
-	/* tiocmset is an optional tty driver method */
-	if (!tty->ops->tiocmset)
-		return -EINVAL;
-	set = new_state & ~old_state;
-	clear = old_state & ~new_state;
-	if (!set && !clear)
-		return 0;
-	gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear);
-	return tty->ops->tiocmset(tty, set, clear);
-}
-
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
-	return -EINVAL;
-}
-
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
-	return -EINVAL;
-}
-
-static const struct gigaset_ops ops = {
-	.write_cmd = gigaset_write_cmd,
-	.write_room = gigaset_write_room,
-	.chars_in_buffer = gigaset_chars_in_buffer,
-	.brkchars = gigaset_brkchars,
-	.init_bchannel = gigaset_init_bchannel,
-	.close_bchannel = gigaset_close_bchannel,
-	.initbcshw = gigaset_initbcshw,
-	.freebcshw = gigaset_freebcshw,
-	.reinitbcshw = gigaset_reinitbcshw,
-	.initcshw = gigaset_initcshw,
-	.freecshw = gigaset_freecshw,
-	.set_modem_ctrl = gigaset_set_modem_ctrl,
-	.baud_rate = gigaset_baud_rate,
-	.set_line_ctrl = gigaset_set_line_ctrl,
-	.send_skb = gigaset_m10x_send_skb,	/* asyncdata.c */
-	.handle_input = gigaset_m10x_input,	/* asyncdata.c */
-};
-
-
-/* Line Discipline Interface */
-/* ========================= */
-
-/* helper functions for cardstate refcounting */
-static struct cardstate *cs_get(struct tty_struct *tty)
-{
-	struct cardstate *cs = tty->disc_data;
-
-	if (!cs || !cs->hw.ser) {
-		gig_dbg(DEBUG_ANY, "%s: no cardstate", __func__);
-		return NULL;
-	}
-	atomic_inc(&cs->hw.ser->refcnt);
-	return cs;
-}
-
-static void cs_put(struct cardstate *cs)
-{
-	if (atomic_dec_and_test(&cs->hw.ser->refcnt))
-		complete(&cs->hw.ser->dead_cmp);
-}
-
-/*
- * Called by the tty driver when the line discipline is pushed onto the tty.
- * Called in process context.
- */
-static int
-gigaset_tty_open(struct tty_struct *tty)
-{
-	struct cardstate *cs;
-	int rc;
-
-	gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101");
-
-	pr_info(DRIVER_DESC "\n");
-
-	if (!driver) {
-		pr_err("%s: no driver structure\n", __func__);
-		return -ENODEV;
-	}
-
-	/* allocate memory for our device state and initialize it */
-	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
-	if (!cs) {
-		rc = -ENODEV;
-		goto error;
-	}
-
-	cs->dev = &cs->hw.ser->dev.dev;
-	cs->hw.ser->tty = tty;
-	atomic_set(&cs->hw.ser->refcnt, 1);
-	init_completion(&cs->hw.ser->dead_cmp);
-	tty->disc_data = cs;
-
-	/* Set the amount of data we're willing to receive per call
-	 * from the hardware driver to half of the input buffer size
-	 * to leave some reserve.
-	 * Note: We don't do flow control towards the hardware driver.
-	 * If more data is received than will fit into the input buffer,
-	 * it will be dropped and an error will be logged. This should
-	 * never happen as the device is slow and the buffer size ample.
-	 */
-	tty->receive_room = RBUFSIZE/2;
-
-	/* OK.. Initialization of the datastructures and the HW is done.. Now
-	 * startup system and notify the LL that we are ready to run
-	 */
-	if (startmode == SM_LOCKED)
-		cs->mstate = MS_LOCKED;
-	rc = gigaset_start(cs);
-	if (rc < 0) {
-		tasklet_kill(&cs->write_tasklet);
-		goto error;
-	}
-
-	gig_dbg(DEBUG_INIT, "Startup of HLL done");
-	return 0;
-
-error:
-	gig_dbg(DEBUG_INIT, "Startup of HLL failed");
-	tty->disc_data = NULL;
-	gigaset_freecs(cs);
-	return rc;
-}
-
-/*
- * Called by the tty driver when the line discipline is removed.
- * Called from process context.
- */
-static void
-gigaset_tty_close(struct tty_struct *tty)
-{
-	struct cardstate *cs = tty->disc_data;
-
-	gig_dbg(DEBUG_INIT, "Stopping HLL for Gigaset M101");
-
-	if (!cs) {
-		gig_dbg(DEBUG_INIT, "%s: no cardstate", __func__);
-		return;
-	}
-
-	/* prevent other callers from entering ldisc methods */
-	tty->disc_data = NULL;
-
-	if (!cs->hw.ser)
-		pr_err("%s: no hw cardstate\n", __func__);
-	else {
-		/* wait for running methods to finish */
-		if (!atomic_dec_and_test(&cs->hw.ser->refcnt))
-			wait_for_completion(&cs->hw.ser->dead_cmp);
-	}
-
-	/* stop operations */
-	gigaset_stop(cs);
-	tasklet_kill(&cs->write_tasklet);
-	flush_send_queue(cs);
-	cs->dev = NULL;
-	gigaset_freecs(cs);
-
-	gig_dbg(DEBUG_INIT, "Shutdown of HLL done");
-}
-
-/*
- * Called by the tty driver when the tty line is hung up.
- * Wait for I/O to driver to complete and unregister ISDN device.
- * This is already done by the close routine, so just call that.
- * Called from process context.
- */
-static int gigaset_tty_hangup(struct tty_struct *tty)
-{
-	gigaset_tty_close(tty);
-	return 0;
-}
-
-/*
- * Ioctl on the tty.
- * Called in process context only.
- * May be re-entered by multiple ioctl calling threads.
- */
-static int
-gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
-		  unsigned int cmd, unsigned long arg)
-{
-	struct cardstate *cs = cs_get(tty);
-	int rc, val;
-	int __user *p = (int __user *)arg;
-
-	if (!cs)
-		return -ENXIO;
-
-	switch (cmd) {
-
-	case FIONREAD:
-		/* unused, always return zero */
-		val = 0;
-		rc = put_user(val, p);
-		break;
-
-	case TCFLSH:
-		/* flush our buffers and the serial port's buffer */
-		switch (arg) {
-		case TCIFLUSH:
-			/* no own input buffer to flush */
-			break;
-		case TCIOFLUSH:
-		case TCOFLUSH:
-			flush_send_queue(cs);
-			break;
-		}
-		/* fall through */
-
-	default:
-		/* pass through to underlying serial device */
-		rc = n_tty_ioctl_helper(tty, file, cmd, arg);
-		break;
-	}
-	cs_put(cs);
-	return rc;
-}
-
-/*
- * Called by the tty driver when a block of data has been received.
- * Will not be re-entered while running but other ldisc functions
- * may be called in parallel.
- * Can be called from hard interrupt level as well as soft interrupt
- * level or mainline.
- * Parameters:
- *	tty	tty structure
- *	buf	buffer containing received characters
- *	cflags	buffer containing error flags for received characters (ignored)
- *	count	number of received characters
- */
-static void
-gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
-		    char *cflags, int count)
-{
-	struct cardstate *cs = cs_get(tty);
-	unsigned tail, head, n;
-	struct inbuf_t *inbuf;
-
-	if (!cs)
-		return;
-	inbuf = cs->inbuf;
-	if (!inbuf) {
-		dev_err(cs->dev, "%s: no inbuf\n", __func__);
-		cs_put(cs);
-		return;
-	}
-
-	tail = inbuf->tail;
-	head = inbuf->head;
-	gig_dbg(DEBUG_INTR, "buffer state: %u -> %u, receive %u bytes",
-		head, tail, count);
-
-	if (head <= tail) {
-		/* possible buffer wraparound */
-		n = min_t(unsigned, count, RBUFSIZE - tail);
-		memcpy(inbuf->data + tail, buf, n);
-		tail = (tail + n) % RBUFSIZE;
-		buf += n;
-		count -= n;
-	}
-
-	if (count > 0) {
-		/* tail < head and some data left */
-		n = head - tail - 1;
-		if (count > n) {
-			dev_err(cs->dev,
-				"inbuf overflow, discarding %d bytes\n",
-				count - n);
-			count = n;
-		}
-		memcpy(inbuf->data + tail, buf, count);
-		tail += count;
-	}
-
-	gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
-	inbuf->tail = tail;
-
-	/* Everything was received .. Push data into handler */
-	gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
-	gigaset_schedule_event(cs);
-	cs_put(cs);
-}
-
-/*
- * Called by the tty driver when there's room for more data to send.
- */
-static void
-gigaset_tty_wakeup(struct tty_struct *tty)
-{
-	struct cardstate *cs = cs_get(tty);
-
-	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-	if (!cs)
-		return;
-	tasklet_schedule(&cs->write_tasklet);
-	cs_put(cs);
-}
-
-static struct tty_ldisc_ops gigaset_ldisc = {
-	.owner		= THIS_MODULE,
-	.magic		= TTY_LDISC_MAGIC,
-	.name		= "ser_gigaset",
-	.open		= gigaset_tty_open,
-	.close		= gigaset_tty_close,
-	.hangup		= gigaset_tty_hangup,
-	.ioctl		= gigaset_tty_ioctl,
-	.receive_buf	= gigaset_tty_receive,
-	.write_wakeup	= gigaset_tty_wakeup,
-};
-
-
-/* Initialization / Shutdown */
-/* ========================= */
-
-static int __init ser_gigaset_init(void)
-{
-	int rc;
-
-	gig_dbg(DEBUG_INIT, "%s", __func__);
-	rc = platform_driver_register(&device_driver);
-	if (rc != 0) {
-		pr_err("error %d registering platform driver\n", rc);
-		return rc;
-	}
-
-	/* allocate memory for our driver state and initialize it */
-	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
-				    GIGASET_MODULENAME, GIGASET_DEVNAME,
-				    &ops, THIS_MODULE);
-	if (!driver) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
-	if (rc != 0) {
-		pr_err("error %d registering line discipline\n", rc);
-		goto error;
-	}
-
-	return 0;
-
-error:
-	if (driver) {
-		gigaset_freedriver(driver);
-		driver = NULL;
-	}
-	platform_driver_unregister(&device_driver);
-	return rc;
-}
-
-static void __exit ser_gigaset_exit(void)
-{
-	int rc;
-
-	gig_dbg(DEBUG_INIT, "%s", __func__);
-
-	if (driver) {
-		gigaset_freedriver(driver);
-		driver = NULL;
-	}
-
-	rc = tty_unregister_ldisc(N_GIGASET_M101);
-	if (rc != 0)
-		pr_err("error %d unregistering line discipline\n", rc);
-
-	platform_driver_unregister(&device_driver);
-}
-
-module_init(ser_gigaset_init);
-module_exit(ser_gigaset_exit);
diff --git a/drivers/staging/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c
deleted file mode 100644
index a20c0bf..0000000
--- a/drivers/staging/isdn/gigaset/usb-gigaset.c
+++ /dev/null
@@ -1,959 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * USB driver for Gigaset 307x directly or using M105 Data.
- *
- * Copyright (c) 2001 by Stefan Eilers
- *                   and Hansjoerg Lipp <hjlipp@web.de>.
- *
- * This driver was derived from the USB skeleton driver by
- * Greg Kroah-Hartman <greg@kroah.com>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers"
-#define DRIVER_DESC "USB Driver for Gigaset 307x using M105"
-
-/* Module parameters */
-
-static int startmode = SM_ISDN;
-static int cidmode = 1;
-
-module_param(startmode, int, S_IRUGO);
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
-MODULE_PARM_DESC(cidmode, "Call-ID mode");
-
-#define GIGASET_MINORS     1
-#define GIGASET_MINOR      8
-#define GIGASET_MODULENAME "usb_gigaset"
-#define GIGASET_DEVNAME    "ttyGU"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-/* Values for the Gigaset M105 Data */
-#define USB_M105_VENDOR_ID	0x0681
-#define USB_M105_PRODUCT_ID	0x0009
-
-/* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table[] = {
-	{ USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
-	{ }					/* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, gigaset_table);
-
-/*
- * Control requests (empty fields: 00)
- *
- *       RT|RQ|VALUE|INDEX|LEN  |DATA
- * In:
- *       C1 08             01
- *            Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:?
- *       C1 0F             ll ll
- *            Get device information/status (llll: 0x200 and 0x40 seen).
- *            Real size: I only saw MIN(llll,0x64).
- *            Contents: seems to be always the same...
- *              offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes)
- *              offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0"
- *              rest:        ?
- * Out:
- *       41 11
- *            Initialize/reset device ?
- *       41 00 xx 00
- *            ? (xx=00 or 01; 01 on start, 00 on close)
- *       41 07 vv mm
- *            Set/clear flags vv=value, mm=mask (see RQ 08)
- *       41 12 xx
- *            Used before the following configuration requests are issued
- *            (with xx=0x0f). I've seen other values<0xf, though.
- *       41 01 xx xx
- *            Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1.
- *       41 03 ps bb
- *            Set byte size and parity. p:  0x20=even,0x10=odd,0x00=no parity
- *                                     [    0x30: m, 0x40: s           ]
- *                                     [s:  0: 1 stop bit; 1: 1.5; 2: 2]
- *                                      bb: bits/byte (seen 7 and 8)
- *       41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00
- *            ??
- *            Initialization: 01, 40, 00, 00
- *            Open device:    00  40, 00, 00
- *            yy and zz seem to be equal, either 0x00 or 0x0a
- *            (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80)
- *       41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
- *            Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
- *            xx is usually 0x00 but was 0x7e before starting data transfer
- *            in unimodem mode. So, this might be an array of characters that
- *            need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
- *
- * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
- * flags per packet.
- */
-
-/* functions called if a device of this driver is connected/disconnected */
-static int gigaset_probe(struct usb_interface *interface,
-			 const struct usb_device_id *id);
-static void gigaset_disconnect(struct usb_interface *interface);
-
-/* functions called before/after suspend */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
-static int gigaset_resume(struct usb_interface *intf);
-static int gigaset_pre_reset(struct usb_interface *intf);
-
-static struct gigaset_driver *driver;
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver gigaset_usb_driver = {
-	.name =		GIGASET_MODULENAME,
-	.probe =	gigaset_probe,
-	.disconnect =	gigaset_disconnect,
-	.id_table =	gigaset_table,
-	.suspend =	gigaset_suspend,
-	.resume =	gigaset_resume,
-	.reset_resume =	gigaset_resume,
-	.pre_reset =	gigaset_pre_reset,
-	.post_reset =	gigaset_resume,
-	.disable_hub_initiated_lpm = 1,
-};
-
-struct usb_cardstate {
-	struct usb_device	*udev;		/* usb device pointer */
-	struct usb_interface	*interface;	/* interface for this device */
-	int			busy;		/* bulk output in progress */
-
-	/* Output buffer */
-	unsigned char		*bulk_out_buffer;
-	int			bulk_out_size;
-	int			bulk_out_epnum;
-	struct urb		*bulk_out_urb;
-
-	/* Input buffer */
-	unsigned char		*rcvbuf;
-	int			rcvbuf_size;
-	struct urb		*read_urb;
-
-	char			bchars[6];		/* for request 0x19 */
-};
-
-static inline unsigned tiocm_to_gigaset(unsigned state)
-{
-	return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
-}
-
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
-				  unsigned new_state)
-{
-	struct usb_device *udev = cs->hw.usb->udev;
-	unsigned mask, val;
-	int r;
-
-	mask = tiocm_to_gigaset(old_state ^ new_state);
-	val = tiocm_to_gigaset(new_state);
-
-	gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
-	r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
-			    (val & 0xff) | ((mask & 0xff) << 8), 0,
-			    NULL, 0, 2000 /* timeout? */);
-	if (r < 0)
-		return r;
-	return 0;
-}
-
-/*
- * Set M105 configuration value
- * using undocumented device commands reverse engineered from USB traces
- * of the Siemens Windows driver
- */
-static int set_value(struct cardstate *cs, u8 req, u16 val)
-{
-	struct usb_device *udev = cs->hw.usb->udev;
-	int r, r2;
-
-	gig_dbg(DEBUG_USBREQ, "request %02x (%04x)",
-		(unsigned)req, (unsigned)val);
-	r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x12, 0x41,
-			    0xf /*?*/, 0, NULL, 0, 2000 /*?*/);
-	/* no idea what this does */
-	if (r < 0) {
-		dev_err(&udev->dev, "error %d on request 0x12\n", -r);
-		return r;
-	}
-
-	r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, 0x41,
-			    val, 0, NULL, 0, 2000 /*?*/);
-	if (r < 0)
-		dev_err(&udev->dev, "error %d on request 0x%02x\n",
-			-r, (unsigned)req);
-
-	r2 = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
-			     0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/);
-	if (r2 < 0)
-		dev_err(&udev->dev, "error %d on request 0x19\n", -r2);
-
-	return r < 0 ? r : (r2 < 0 ? r2 : 0);
-}
-
-/*
- * set the baud rate on the internal serial adapter
- * using the undocumented parameter setting command
- */
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
-	u16 val;
-	u32 rate;
-
-	cflag &= CBAUD;
-
-	switch (cflag) {
-	case    B300: rate =     300; break;
-	case    B600: rate =     600; break;
-	case   B1200: rate =    1200; break;
-	case   B2400: rate =    2400; break;
-	case   B4800: rate =    4800; break;
-	case   B9600: rate =    9600; break;
-	case  B19200: rate =   19200; break;
-	case  B38400: rate =   38400; break;
-	case  B57600: rate =   57600; break;
-	case B115200: rate =  115200; break;
-	default:
-		rate =  9600;
-		dev_err(cs->dev, "unsupported baudrate request 0x%x,"
-			" using default of B9600\n", cflag);
-	}
-
-	val = 0x383fff / rate + 1;
-
-	return set_value(cs, 1, val);
-}
-
-/*
- * set the line format on the internal serial adapter
- * using the undocumented parameter setting command
- */
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
-	u16 val = 0;
-
-	/* set the parity */
-	if (cflag & PARENB)
-		val |= (cflag & PARODD) ? 0x10 : 0x20;
-
-	/* set the number of data bits */
-	switch (cflag & CSIZE) {
-	case CS5:
-		val |= 5 << 8; break;
-	case CS6:
-		val |= 6 << 8; break;
-	case CS7:
-		val |= 7 << 8; break;
-	case CS8:
-		val |= 8 << 8; break;
-	default:
-		dev_err(cs->dev, "CSIZE was not CS5-CS8, using default of 8\n");
-		val |= 8 << 8;
-		break;
-	}
-
-	/* set the number of stop bits */
-	if (cflag & CSTOPB) {
-		if ((cflag & CSIZE) == CS5)
-			val |= 1; /* 1.5 stop bits */
-		else
-			val |= 2; /* 2 stop bits */
-	}
-
-	return set_value(cs, 3, val);
-}
-
-
-/*============================================================================*/
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
-	/* nothing to do for M10x */
-	gigaset_bchannel_up(bcs);
-	return 0;
-}
-
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
-	/* nothing to do for M10x */
-	gigaset_bchannel_down(bcs);
-	return 0;
-}
-
-static int write_modem(struct cardstate *cs);
-static int send_cb(struct cardstate *cs);
-
-
-/* Write tasklet handler: Continue sending current skb, or send command, or
- * start sending an skb from the send queue.
- */
-static void gigaset_modem_fill(unsigned long data)
-{
-	struct cardstate *cs = (struct cardstate *) data;
-	struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
-
-	gig_dbg(DEBUG_OUTPUT, "modem_fill");
-
-	if (cs->hw.usb->busy) {
-		gig_dbg(DEBUG_OUTPUT, "modem_fill: busy");
-		return;
-	}
-
-again:
-	if (!bcs->tx_skb) {	/* no skb is being sent */
-		if (cs->cmdbuf) {	/* commands to send? */
-			gig_dbg(DEBUG_OUTPUT, "modem_fill: cb");
-			if (send_cb(cs) < 0) {
-				gig_dbg(DEBUG_OUTPUT,
-					"modem_fill: send_cb failed");
-				goto again; /* no callback will be called! */
-			}
-			return;
-		}
-
-		/* skbs to send? */
-		bcs->tx_skb = skb_dequeue(&bcs->squeue);
-		if (!bcs->tx_skb)
-			return;
-
-		gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)!",
-			(unsigned long) bcs->tx_skb);
-	}
-
-	gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
-	if (write_modem(cs) < 0) {
-		gig_dbg(DEBUG_OUTPUT, "modem_fill: write_modem failed");
-		goto again;	/* no callback will be called! */
-	}
-}
-
-/*
- * Interrupt Input URB completion routine
- */
-static void gigaset_read_int_callback(struct urb *urb)
-{
-	struct cardstate *cs = urb->context;
-	struct inbuf_t *inbuf = cs->inbuf;
-	int status = urb->status;
-	int r;
-	unsigned numbytes;
-	unsigned char *src;
-	unsigned long flags;
-
-	if (!status) {
-		numbytes = urb->actual_length;
-
-		if (numbytes) {
-			src = cs->hw.usb->rcvbuf;
-			if (unlikely(*src))
-				dev_warn(cs->dev,
-					 "%s: There was no leading 0, but 0x%02x!\n",
-					 __func__, (unsigned) *src);
-			++src; /* skip leading 0x00 */
-			--numbytes;
-			if (gigaset_fill_inbuf(inbuf, src, numbytes)) {
-				gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
-				gigaset_schedule_event(inbuf->cs);
-			}
-		} else
-			gig_dbg(DEBUG_INTR, "Received zero block length");
-	} else {
-		/* The urb might have been killed. */
-		gig_dbg(DEBUG_ANY, "%s - nonzero status received: %d",
-			__func__, status);
-		if (status == -ENOENT || status == -ESHUTDOWN)
-			/* killed or endpoint shutdown: don't resubmit */
-			return;
-	}
-
-	/* resubmit URB */
-	spin_lock_irqsave(&cs->lock, flags);
-	if (!cs->connected) {
-		spin_unlock_irqrestore(&cs->lock, flags);
-		pr_err("%s: disconnected\n", __func__);
-		return;
-	}
-	r = usb_submit_urb(urb, GFP_ATOMIC);
-	spin_unlock_irqrestore(&cs->lock, flags);
-	if (r)
-		dev_err(cs->dev, "error %d resubmitting URB\n", -r);
-}
-
-
-/* This callback routine is called when data was transmitted to the device. */
-static void gigaset_write_bulk_callback(struct urb *urb)
-{
-	struct cardstate *cs = urb->context;
-	int status = urb->status;
-	unsigned long flags;
-
-	switch (status) {
-	case 0:			/* normal completion */
-		break;
-	case -ENOENT:		/* killed */
-		gig_dbg(DEBUG_ANY, "%s: killed", __func__);
-		cs->hw.usb->busy = 0;
-		return;
-	default:
-		dev_err(cs->dev, "bulk transfer failed (status %d)\n",
-			-status);
-		/* That's all we can do. Communication problems
-		   are handled by timeouts or network protocols. */
-	}
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (!cs->connected) {
-		pr_err("%s: disconnected\n", __func__);
-	} else {
-		cs->hw.usb->busy = 0;
-		tasklet_schedule(&cs->write_tasklet);
-	}
-	spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-static int send_cb(struct cardstate *cs)
-{
-	struct cmdbuf_t *cb = cs->cmdbuf;
-	unsigned long flags;
-	int count;
-	int status = -ENOENT;
-	struct usb_cardstate *ucs = cs->hw.usb;
-
-	do {
-		if (!cb->len) {
-			spin_lock_irqsave(&cs->cmdlock, flags);
-			cs->cmdbytes -= cs->curlen;
-			gig_dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left",
-				cs->curlen, cs->cmdbytes);
-			cs->cmdbuf = cb->next;
-			if (cs->cmdbuf) {
-				cs->cmdbuf->prev = NULL;
-				cs->curlen = cs->cmdbuf->len;
-			} else {
-				cs->lastcmdbuf = NULL;
-				cs->curlen = 0;
-			}
-			spin_unlock_irqrestore(&cs->cmdlock, flags);
-
-			if (cb->wake_tasklet)
-				tasklet_schedule(cb->wake_tasklet);
-			kfree(cb);
-
-			cb = cs->cmdbuf;
-		}
-
-		if (cb) {
-			count = min(cb->len, ucs->bulk_out_size);
-			gig_dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count);
-
-			usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
-					  usb_sndbulkpipe(ucs->udev,
-							  ucs->bulk_out_epnum),
-					  cb->buf + cb->offset, count,
-					  gigaset_write_bulk_callback, cs);
-
-			cb->offset += count;
-			cb->len -= count;
-			ucs->busy = 1;
-
-			spin_lock_irqsave(&cs->lock, flags);
-			status = cs->connected ?
-				usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
-				-ENODEV;
-			spin_unlock_irqrestore(&cs->lock, flags);
-
-			if (status) {
-				ucs->busy = 0;
-				dev_err(cs->dev,
-					"could not submit urb (error %d)\n",
-					-status);
-				cb->len = 0; /* skip urb => remove cb+wakeup
-						in next loop cycle */
-			}
-		}
-	} while (cb && status); /* next command on error */
-
-	return status;
-}
-
-/* Send command to device. */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
-	unsigned long flags;
-	int len;
-
-	gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
-			   DEBUG_TRANSCMD : DEBUG_LOCKCMD,
-			   "CMD Transmit", cb->len, cb->buf);
-
-	spin_lock_irqsave(&cs->cmdlock, flags);
-	cb->prev = cs->lastcmdbuf;
-	if (cs->lastcmdbuf)
-		cs->lastcmdbuf->next = cb;
-	else {
-		cs->cmdbuf = cb;
-		cs->curlen = cb->len;
-	}
-	cs->cmdbytes += cb->len;
-	cs->lastcmdbuf = cb;
-	spin_unlock_irqrestore(&cs->cmdlock, flags);
-
-	spin_lock_irqsave(&cs->lock, flags);
-	len = cb->len;
-	if (cs->connected)
-		tasklet_schedule(&cs->write_tasklet);
-	spin_unlock_irqrestore(&cs->lock, flags);
-	return len;
-}
-
-static int gigaset_write_room(struct cardstate *cs)
-{
-	unsigned bytes;
-
-	bytes = cs->cmdbytes;
-	return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
-}
-
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
-	return cs->cmdbytes;
-}
-
-/*
- * set the break characters on the internal serial adapter
- * using undocumented device commands reverse engineered from USB traces
- * of the Siemens Windows driver
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
-	struct usb_device *udev = cs->hw.usb->udev;
-
-	gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
-	memcpy(cs->hw.usb->bchars, buf, 6);
-	return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
-			       0, 0, &buf, 6, 2000);
-}
-
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
-	/* unused */
-}
-
-/* Initialize the b-channel structure */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
-	/* unused */
-	bcs->hw.usb = NULL;
-	return 0;
-}
-
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
-	/* nothing to do for M10x */
-}
-
-static void gigaset_freecshw(struct cardstate *cs)
-{
-	tasklet_kill(&cs->write_tasklet);
-	kfree(cs->hw.usb);
-}
-
-static int gigaset_initcshw(struct cardstate *cs)
-{
-	struct usb_cardstate *ucs;
-
-	cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
-	if (!ucs) {
-		pr_err("out of memory\n");
-		return -ENOMEM;
-	}
-
-	ucs->bchars[0] = 0;
-	ucs->bchars[1] = 0;
-	ucs->bchars[2] = 0;
-	ucs->bchars[3] = 0;
-	ucs->bchars[4] = 0x11;
-	ucs->bchars[5] = 0x13;
-	tasklet_init(&cs->write_tasklet,
-		     gigaset_modem_fill, (unsigned long) cs);
-
-	return 0;
-}
-
-/* Send data from current skb to the device. */
-static int write_modem(struct cardstate *cs)
-{
-	int ret = 0;
-	int count;
-	struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
-	struct usb_cardstate *ucs = cs->hw.usb;
-	unsigned long flags;
-
-	gig_dbg(DEBUG_OUTPUT, "len: %d...", bcs->tx_skb->len);
-
-	if (!bcs->tx_skb->len) {
-		dev_kfree_skb_any(bcs->tx_skb);
-		bcs->tx_skb = NULL;
-		return -EINVAL;
-	}
-
-	/* Copy data to bulk out buffer and transmit data */
-	count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
-	skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
-	skb_pull(bcs->tx_skb, count);
-	ucs->busy = 1;
-	gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
-
-	spin_lock_irqsave(&cs->lock, flags);
-	if (cs->connected) {
-		usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
-				  usb_sndbulkpipe(ucs->udev,
-						  ucs->bulk_out_epnum),
-				  ucs->bulk_out_buffer, count,
-				  gigaset_write_bulk_callback, cs);
-		ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
-	} else {
-		ret = -ENODEV;
-	}
-	spin_unlock_irqrestore(&cs->lock, flags);
-
-	if (ret) {
-		dev_err(cs->dev, "could not submit urb (error %d)\n", -ret);
-		ucs->busy = 0;
-	}
-
-	if (!bcs->tx_skb->len) {
-		/* skb sent completely */
-		gigaset_skb_sent(bcs, bcs->tx_skb);
-
-		gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
-			(unsigned long) bcs->tx_skb);
-		dev_kfree_skb_any(bcs->tx_skb);
-		bcs->tx_skb = NULL;
-	}
-
-	return ret;
-}
-
-static int gigaset_probe(struct usb_interface *interface,
-			 const struct usb_device_id *id)
-{
-	int retval;
-	struct usb_device *udev = interface_to_usbdev(interface);
-	struct usb_host_interface *hostif = interface->cur_altsetting;
-	struct cardstate *cs = NULL;
-	struct usb_cardstate *ucs = NULL;
-	struct usb_endpoint_descriptor *endpoint;
-	int buffer_size;
-
-	gig_dbg(DEBUG_ANY, "%s: Check if device matches ...", __func__);
-
-	/* See if the device offered us matches what we can accept */
-	if ((le16_to_cpu(udev->descriptor.idVendor)  != USB_M105_VENDOR_ID) ||
-	    (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) {
-		gig_dbg(DEBUG_ANY, "device ID (0x%x, 0x%x) not for me - skip",
-			le16_to_cpu(udev->descriptor.idVendor),
-			le16_to_cpu(udev->descriptor.idProduct));
-		return -ENODEV;
-	}
-	if (hostif->desc.bInterfaceNumber != 0) {
-		gig_dbg(DEBUG_ANY, "interface %d not for me - skip",
-			hostif->desc.bInterfaceNumber);
-		return -ENODEV;
-	}
-	if (hostif->desc.bAlternateSetting != 0) {
-		dev_notice(&udev->dev, "unsupported altsetting %d - skip",
-			   hostif->desc.bAlternateSetting);
-		return -ENODEV;
-	}
-	if (hostif->desc.bInterfaceClass != 255) {
-		dev_notice(&udev->dev, "unsupported interface class %d - skip",
-			   hostif->desc.bInterfaceClass);
-		return -ENODEV;
-	}
-
-	if (hostif->desc.bNumEndpoints < 2) {
-		dev_err(&interface->dev, "missing endpoints\n");
-		return -ENODEV;
-	}
-
-	dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
-
-	/* allocate memory for our device state and initialize it */
-	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
-	if (!cs)
-		return -ENODEV;
-	ucs = cs->hw.usb;
-
-	/* save off device structure ptrs for later use */
-	usb_get_dev(udev);
-	ucs->udev = udev;
-	ucs->interface = interface;
-	cs->dev = &interface->dev;
-
-	/* save address of controller structure */
-	usb_set_intfdata(interface, cs);
-
-	endpoint = &hostif->endpoint[0].desc;
-
-	if (!usb_endpoint_is_bulk_out(endpoint)) {
-		dev_err(&interface->dev, "missing bulk-out endpoint\n");
-		retval = -ENODEV;
-		goto error;
-	}
-
-	buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
-	ucs->bulk_out_size = buffer_size;
-	ucs->bulk_out_epnum = usb_endpoint_num(endpoint);
-	ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
-	if (!ucs->bulk_out_buffer) {
-		dev_err(cs->dev, "Couldn't allocate bulk_out_buffer\n");
-		retval = -ENOMEM;
-		goto error;
-	}
-
-	ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (!ucs->bulk_out_urb) {
-		dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
-		retval = -ENOMEM;
-		goto error;
-	}
-
-	endpoint = &hostif->endpoint[1].desc;
-
-	if (!usb_endpoint_is_int_in(endpoint)) {
-		dev_err(&interface->dev, "missing int-in endpoint\n");
-		retval = -ENODEV;
-		goto error;
-	}
-
-	ucs->busy = 0;
-
-	ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (!ucs->read_urb) {
-		dev_err(cs->dev, "No free urbs available\n");
-		retval = -ENOMEM;
-		goto error;
-	}
-	buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
-	ucs->rcvbuf_size = buffer_size;
-	ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
-	if (!ucs->rcvbuf) {
-		dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
-		retval = -ENOMEM;
-		goto error;
-	}
-	/* Fill the interrupt urb and send it to the core */
-	usb_fill_int_urb(ucs->read_urb, udev,
-			 usb_rcvintpipe(udev, usb_endpoint_num(endpoint)),
-			 ucs->rcvbuf, buffer_size,
-			 gigaset_read_int_callback,
-			 cs, endpoint->bInterval);
-
-	retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
-	if (retval) {
-		dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
-		goto error;
-	}
-
-	/* tell common part that the device is ready */
-	if (startmode == SM_LOCKED)
-		cs->mstate = MS_LOCKED;
-
-	retval = gigaset_start(cs);
-	if (retval < 0) {
-		tasklet_kill(&cs->write_tasklet);
-		goto error;
-	}
-	return 0;
-
-error:
-	usb_kill_urb(ucs->read_urb);
-	kfree(ucs->bulk_out_buffer);
-	usb_free_urb(ucs->bulk_out_urb);
-	kfree(ucs->rcvbuf);
-	usb_free_urb(ucs->read_urb);
-	usb_set_intfdata(interface, NULL);
-	ucs->read_urb = ucs->bulk_out_urb = NULL;
-	ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
-	usb_put_dev(ucs->udev);
-	ucs->udev = NULL;
-	ucs->interface = NULL;
-	gigaset_freecs(cs);
-	return retval;
-}
-
-static void gigaset_disconnect(struct usb_interface *interface)
-{
-	struct cardstate *cs;
-	struct usb_cardstate *ucs;
-
-	cs = usb_get_intfdata(interface);
-	ucs = cs->hw.usb;
-
-	dev_info(cs->dev, "disconnecting Gigaset USB adapter\n");
-
-	usb_kill_urb(ucs->read_urb);
-
-	gigaset_stop(cs);
-
-	usb_set_intfdata(interface, NULL);
-	tasklet_kill(&cs->write_tasklet);
-
-	usb_kill_urb(ucs->bulk_out_urb);
-
-	kfree(ucs->bulk_out_buffer);
-	usb_free_urb(ucs->bulk_out_urb);
-	kfree(ucs->rcvbuf);
-	usb_free_urb(ucs->read_urb);
-	ucs->read_urb = ucs->bulk_out_urb = NULL;
-	ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
-
-	usb_put_dev(ucs->udev);
-	ucs->interface = NULL;
-	ucs->udev = NULL;
-	cs->dev = NULL;
-	gigaset_freecs(cs);
-}
-
-/* gigaset_suspend
- * This function is called before the USB connection is suspended or reset.
- */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
-{
-	struct cardstate *cs = usb_get_intfdata(intf);
-
-	/* stop activity */
-	cs->connected = 0;	/* prevent rescheduling */
-	usb_kill_urb(cs->hw.usb->read_urb);
-	tasklet_kill(&cs->write_tasklet);
-	usb_kill_urb(cs->hw.usb->bulk_out_urb);
-
-	gig_dbg(DEBUG_SUSPEND, "suspend complete");
-	return 0;
-}
-
-/* gigaset_resume
- * This function is called after the USB connection has been resumed or reset.
- */
-static int gigaset_resume(struct usb_interface *intf)
-{
-	struct cardstate *cs = usb_get_intfdata(intf);
-	int rc;
-
-	/* resubmit interrupt URB */
-	cs->connected = 1;
-	rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL);
-	if (rc) {
-		dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc);
-		return rc;
-	}
-
-	gig_dbg(DEBUG_SUSPEND, "resume complete");
-	return 0;
-}
-
-/* gigaset_pre_reset
- * This function is called before the USB connection is reset.
- */
-static int gigaset_pre_reset(struct usb_interface *intf)
-{
-	/* same as suspend */
-	return gigaset_suspend(intf, PMSG_ON);
-}
-
-static const struct gigaset_ops ops = {
-	.write_cmd = gigaset_write_cmd,
-	.write_room = gigaset_write_room,
-	.chars_in_buffer = gigaset_chars_in_buffer,
-	.brkchars = gigaset_brkchars,
-	.init_bchannel = gigaset_init_bchannel,
-	.close_bchannel = gigaset_close_bchannel,
-	.initbcshw = gigaset_initbcshw,
-	.freebcshw = gigaset_freebcshw,
-	.reinitbcshw = gigaset_reinitbcshw,
-	.initcshw = gigaset_initcshw,
-	.freecshw = gigaset_freecshw,
-	.set_modem_ctrl = gigaset_set_modem_ctrl,
-	.baud_rate = gigaset_baud_rate,
-	.set_line_ctrl = gigaset_set_line_ctrl,
-	.send_skb = gigaset_m10x_send_skb,
-	.handle_input = gigaset_m10x_input,
-};
-
-/*
- * This function is called while kernel-module is loaded
- */
-static int __init usb_gigaset_init(void)
-{
-	int result;
-
-	/* allocate memory for our driver state and initialize it */
-	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
-				    GIGASET_MODULENAME, GIGASET_DEVNAME,
-				    &ops, THIS_MODULE);
-	if (driver == NULL) {
-		result = -ENOMEM;
-		goto error;
-	}
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&gigaset_usb_driver);
-	if (result < 0) {
-		pr_err("error %d registering USB driver\n", -result);
-		goto error;
-	}
-
-	pr_info(DRIVER_DESC "\n");
-	return 0;
-
-error:
-	if (driver)
-		gigaset_freedriver(driver);
-	driver = NULL;
-	return result;
-}
-
-/*
- * This function is called while unloading the kernel-module
- */
-static void __exit usb_gigaset_exit(void)
-{
-	int i;
-
-	gigaset_blockdriver(driver); /* => probe will fail
-				      * => no gigaset_start any more
-				      */
-
-	/* stop all connected devices */
-	for (i = 0; i < driver->minors; i++)
-		gigaset_shutdown(driver->cs + i);
-
-	/* from now on, no isdn callback should be possible */
-
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&gigaset_usb_driver);
-	/* this will call the disconnect-callback */
-	/* from now on, no disconnect/probe callback should be running */
-
-	gigaset_freedriver(driver);
-	driver = NULL;
-}
-
-
-module_init(usb_gigaset_init);
-module_exit(usb_gigaset_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/hysdn/Kconfig b/drivers/staging/isdn/hysdn/Kconfig
deleted file mode 100644
index 4c8a928..0000000
--- a/drivers/staging/isdn/hysdn/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config HYSDN
-	tristate "Hypercope HYSDN cards (Champ, Ergo, Metro) support (module only)"
-	depends on m && PROC_FS && PCI
-	help
-	  Say Y here if you have one of Hypercope's active PCI ISDN cards
-	  Champ, Ergo and Metro. You will then get a module called hysdn.
-	  Please read the file <file:Documentation/isdn/hysdn.rst> for more
-	  information.
-
-config HYSDN_CAPI
-	bool "HYSDN CAPI 2.0 support"
-	depends on HYSDN && ISDN_CAPI
-	help
-	  Say Y here if you like to use Hypercope's CAPI 2.0 interface.
diff --git a/drivers/staging/isdn/hysdn/Makefile b/drivers/staging/isdn/hysdn/Makefile
deleted file mode 100644
index e01f17f..0000000
--- a/drivers/staging/isdn/hysdn/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-# Makefile for the hysdn ISDN device driver
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_HYSDN)		+= hysdn.o
-
-# Multipart objects.
-
-hysdn-y				:= hysdn_procconf.o hysdn_proclog.o boardergo.o \
-				   hysdn_boot.o hysdn_sched.o hysdn_net.o hysdn_init.o
-hysdn-$(CONFIG_HYSDN_CAPI)	+= hycapi.o
diff --git a/drivers/staging/isdn/hysdn/boardergo.c b/drivers/staging/isdn/hysdn/boardergo.c
deleted file mode 100644
index 2aa2a0e..0000000
--- a/drivers/staging/isdn/hysdn/boardergo.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/* $Id: boardergo.c,v 1.5.6.7 2001/11/06 21:58:19 kai Exp $
- *
- * Linux driver for HYSDN cards, specific routines for ergo type boards.
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * As all Linux supported cards Champ2, Ergo and Metro2/4 use the same
- * DPRAM interface and layout with only minor differences all related
- * stuff is done here, not in separate modules.
- *
- */
-
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "hysdn_defs.h"
-#include "boardergo.h"
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-/***************************************************/
-/* The cards interrupt handler. Called from system */
-/***************************************************/
-static irqreturn_t
-ergo_interrupt(int intno, void *dev_id)
-{
-	hysdn_card *card = dev_id;	/* parameter from irq */
-	tErgDpram *dpr;
-	unsigned long flags;
-	unsigned char volatile b;
-
-	if (!card)
-		return IRQ_NONE;		/* error -> spurious interrupt */
-	if (!card->irq_enabled)
-		return IRQ_NONE;		/* other device interrupting or irq switched off */
-
-	spin_lock_irqsave(&card->hysdn_lock, flags); /* no further irqs allowed */
-
-	if (!(bytein(card->iobase + PCI9050_INTR_REG) & PCI9050_INTR_REG_STAT1)) {
-		spin_unlock_irqrestore(&card->hysdn_lock, flags);	/* restore old state */
-		return IRQ_NONE;		/* no interrupt requested by E1 */
-	}
-	/* clear any pending ints on the board */
-	dpr = card->dpram;
-	b = dpr->ToPcInt;	/* clear for ergo */
-	b |= dpr->ToPcIntMetro;	/* same for metro */
-	b |= dpr->ToHyInt;	/* and for champ */
-
-	/* start kernel task immediately after leaving all interrupts */
-	if (!card->hw_lock)
-		schedule_work(&card->irq_queue);
-	spin_unlock_irqrestore(&card->hysdn_lock, flags);
-	return IRQ_HANDLED;
-}				/* ergo_interrupt */
-
-/******************************************************************************/
-/* ergo_irq_bh will be called as part of the kernel clearing its shared work  */
-/* queue sometime after a call to schedule_work has been made passing our     */
-/* work_struct. This task is the only one handling data transfer from or to   */
-/* the card after booting. The task may be queued from everywhere             */
-/* (interrupts included).                                                     */
-/******************************************************************************/
-static void
-ergo_irq_bh(struct work_struct *ugli_api)
-{
-	hysdn_card *card = container_of(ugli_api, hysdn_card, irq_queue);
-	tErgDpram *dpr;
-	int again;
-	unsigned long flags;
-
-	if (card->state != CARD_STATE_RUN)
-		return;		/* invalid call */
-
-	dpr = card->dpram;	/* point to DPRAM */
-
-	spin_lock_irqsave(&card->hysdn_lock, flags);
-	if (card->hw_lock) {
-		spin_unlock_irqrestore(&card->hysdn_lock, flags);	/* hardware currently unavailable */
-		return;
-	}
-	card->hw_lock = 1;	/* we now lock the hardware */
-
-	do {
-		again = 0;	/* assume loop not to be repeated */
-
-		if (!dpr->ToHyFlag) {
-			/* we are able to send a buffer */
-
-			if (hysdn_sched_tx(card, dpr->ToHyBuf, &dpr->ToHySize, &dpr->ToHyChannel,
-					   ERG_TO_HY_BUF_SIZE)) {
-				dpr->ToHyFlag = 1;	/* enable tx */
-				again = 1;	/* restart loop */
-			}
-		}		/* we are able to send a buffer */
-		if (dpr->ToPcFlag) {
-			/* a message has arrived for us, handle it */
-
-			if (hysdn_sched_rx(card, dpr->ToPcBuf, dpr->ToPcSize, dpr->ToPcChannel)) {
-				dpr->ToPcFlag = 0;	/* we worked the data */
-				again = 1;	/* restart loop */
-			}
-		}		/* a message has arrived for us */
-		if (again) {
-			dpr->ToHyInt = 1;
-			dpr->ToPcInt = 1;	/* interrupt to E1 for all cards */
-		} else
-			card->hw_lock = 0;	/* free hardware again */
-	} while (again);	/* until nothing more to do */
-
-	spin_unlock_irqrestore(&card->hysdn_lock, flags);
-}				/* ergo_irq_bh */
-
-
-/*********************************************************/
-/* stop the card (hardware reset) and disable interrupts */
-/*********************************************************/
-static void
-ergo_stopcard(hysdn_card *card)
-{
-	unsigned long flags;
-	unsigned char val;
-
-	hysdn_net_release(card);	/* first release the net device if existing */
-#ifdef CONFIG_HYSDN_CAPI
-	hycapi_capi_stop(card);
-#endif /* CONFIG_HYSDN_CAPI */
-	spin_lock_irqsave(&card->hysdn_lock, flags);
-	val = bytein(card->iobase + PCI9050_INTR_REG);	/* get actual value */
-	val &= ~(PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1);	/* mask irq */
-	byteout(card->iobase + PCI9050_INTR_REG, val);
-	card->irq_enabled = 0;
-	byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RESET);	/* reset E1 processor */
-	card->state = CARD_STATE_UNUSED;
-	card->err_log_state = ERRLOG_STATE_OFF;		/* currently no log active */
-
-	spin_unlock_irqrestore(&card->hysdn_lock, flags);
-}				/* ergo_stopcard */
-
-/**************************************************************************/
-/* enable or disable the cards error log. The event is queued if possible */
-/**************************************************************************/
-static void
-ergo_set_errlog_state(hysdn_card *card, int on)
-{
-	unsigned long flags;
-
-	if (card->state != CARD_STATE_RUN) {
-		card->err_log_state = ERRLOG_STATE_OFF;		/* must be off */
-		return;
-	}
-	spin_lock_irqsave(&card->hysdn_lock, flags);
-
-	if (((card->err_log_state == ERRLOG_STATE_OFF) && !on) ||
-	    ((card->err_log_state == ERRLOG_STATE_ON) && on)) {
-		spin_unlock_irqrestore(&card->hysdn_lock, flags);
-		return;		/* nothing to do */
-	}
-	if (on)
-		card->err_log_state = ERRLOG_STATE_START;	/* request start */
-	else
-		card->err_log_state = ERRLOG_STATE_STOP;	/* request stop */
-
-	spin_unlock_irqrestore(&card->hysdn_lock, flags);
-	schedule_work(&card->irq_queue);
-}				/* ergo_set_errlog_state */
-
-/******************************************/
-/* test the cards RAM and return 0 if ok. */
-/******************************************/
-static const char TestText[36] = "This Message is filler, why read it";
-
-static int
-ergo_testram(hysdn_card *card)
-{
-	tErgDpram *dpr = card->dpram;
-
-	memset(dpr->TrapTable, 0, sizeof(dpr->TrapTable));	/* clear all Traps */
-	dpr->ToHyInt = 1;	/* E1 INTR state forced */
-
-	memcpy(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText,
-	       sizeof(TestText));
-	if (memcmp(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText,
-		   sizeof(TestText)))
-		return (-1);
-
-	memcpy(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText,
-	       sizeof(TestText));
-	if (memcmp(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText,
-		   sizeof(TestText)))
-		return (-1);
-
-	return (0);
-}				/* ergo_testram */
-
-/*****************************************************************************/
-/* this function is intended to write stage 1 boot image to the cards buffer */
-/* this is done in two steps. First the 1024 hi-words are written (offs=0),  */
-/* then the 1024 lo-bytes are written. The remaining DPRAM is cleared, the   */
-/* PCI-write-buffers flushed and the card is taken out of reset.             */
-/* The function then waits for a reaction of the E1 processor or a timeout.  */
-/* Negative return values are interpreted as errors.                         */
-/*****************************************************************************/
-static int
-ergo_writebootimg(struct HYSDN_CARD *card, unsigned char *buf,
-		  unsigned long offs)
-{
-	unsigned char *dst;
-	tErgDpram *dpram;
-	int cnt = (BOOT_IMG_SIZE >> 2);		/* number of words to move and swap (byte order!) */
-
-	if (card->debug_flags & LOG_POF_CARD)
-		hysdn_addlog(card, "ERGO: write bootldr offs=0x%lx ", offs);
-
-	dst = card->dpram;	/* pointer to start of DPRAM */
-	dst += (offs + ERG_DPRAM_FILL_SIZE);	/* offset in the DPRAM */
-	while (cnt--) {
-		*dst++ = *(buf + 1);	/* high byte */
-		*dst++ = *buf;	/* low byte */
-		dst += 2;	/* point to next longword */
-		buf += 2;	/* buffer only filled with words */
-	}
-
-	/* if low words (offs = 2) have been written, clear the rest of the DPRAM, */
-	/* flush the PCI-write-buffer and take the E1 out of reset */
-	if (offs) {
-		memset(card->dpram, 0, ERG_DPRAM_FILL_SIZE);	/* fill the DPRAM still not cleared */
-		dpram = card->dpram;	/* get pointer to dpram structure */
-		dpram->ToHyNoDpramErrLog = 0xFF;	/* write a dpram register */
-		while (!dpram->ToHyNoDpramErrLog);	/* reread volatile register to flush PCI */
-
-		byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RUN);	/* start E1 processor */
-		/* the interrupts are still masked */
-
-		msleep_interruptible(20);		/* Timeout 20ms */
-
-		if (((tDpramBootSpooler *) card->dpram)->Len != DPRAM_SPOOLER_DATA_SIZE) {
-			if (card->debug_flags & LOG_POF_CARD)
-				hysdn_addlog(card, "ERGO: write bootldr no answer");
-			return (-ERR_BOOTIMG_FAIL);
-		}
-	}			/* start_boot_img */
-	return (0);		/* successful */
-}				/* ergo_writebootimg */
-
-/********************************************************************************/
-/* ergo_writebootseq writes the buffer containing len bytes to the E1 processor */
-/* using the boot spool mechanism. If everything works fine 0 is returned. In   */
-/* case of errors a negative error value is returned.                           */
-/********************************************************************************/
-static int
-ergo_writebootseq(struct HYSDN_CARD *card, unsigned char *buf, int len)
-{
-	tDpramBootSpooler *sp = (tDpramBootSpooler *) card->dpram;
-	unsigned char *dst;
-	unsigned char buflen;
-	int nr_write;
-	unsigned char tmp_rdptr;
-	unsigned char wr_mirror;
-	int i;
-
-	if (card->debug_flags & LOG_POF_CARD)
-		hysdn_addlog(card, "ERGO: write boot seq len=%d ", len);
-
-	dst = sp->Data;		/* point to data in spool structure */
-	buflen = sp->Len;	/* maximum len of spooled data */
-	wr_mirror = sp->WrPtr;	/* only once read */
-
-	/* try until all bytes written or error */
-	i = 0x1000;		/* timeout value */
-	while (len) {
-
-		/* first determine the number of bytes that may be buffered */
-		do {
-			tmp_rdptr = sp->RdPtr;	/* first read the pointer */
-			i--;	/* decrement timeout */
-		} while (i && (tmp_rdptr != sp->RdPtr));	/* wait for stable pointer */
-
-		if (!i) {
-			if (card->debug_flags & LOG_POF_CARD)
-				hysdn_addlog(card, "ERGO: write boot seq timeout");
-			return (-ERR_BOOTSEQ_FAIL);	/* value not stable -> timeout */
-		}
-		if ((nr_write = tmp_rdptr - wr_mirror - 1) < 0)
-			nr_write += buflen;	/* now we got number of free bytes - 1 in buffer */
-
-		if (!nr_write)
-			continue;	/* no free bytes in buffer */
-
-		if (nr_write > len)
-			nr_write = len;		/* limit if last few bytes */
-		i = 0x1000;	/* reset timeout value */
-
-		/* now we know how much bytes we may put in the puffer */
-		len -= nr_write;	/* we savely could adjust len before output */
-		while (nr_write--) {
-			*(dst + wr_mirror) = *buf++;	/* output one byte */
-			if (++wr_mirror >= buflen)
-				wr_mirror = 0;
-			sp->WrPtr = wr_mirror;	/* announce the next byte to E1 */
-		}		/* while (nr_write) */
-
-	}			/* while (len) */
-	return (0);
-}				/* ergo_writebootseq */
-
-/***********************************************************************************/
-/* ergo_waitpofready waits for a maximum of 10 seconds for the completition of the */
-/* boot process. If the process has been successful 0 is returned otherwise a     */
-/* negative error code is returned.                                                */
-/***********************************************************************************/
-static int
-ergo_waitpofready(struct HYSDN_CARD *card)
-{
-	tErgDpram *dpr = card->dpram;	/* pointer to DPRAM structure */
-	int timecnt = 10000 / 50;	/* timeout is 10 secs max. */
-	unsigned long flags;
-	int msg_size;
-	int i;
-
-	if (card->debug_flags & LOG_POF_CARD)
-		hysdn_addlog(card, "ERGO: waiting for pof ready");
-	while (timecnt--) {
-		/* wait until timeout  */
-
-		if (dpr->ToPcFlag) {
-			/* data has arrived */
-
-			if ((dpr->ToPcChannel != CHAN_SYSTEM) ||
-			    (dpr->ToPcSize < MIN_RDY_MSG_SIZE) ||
-			    (dpr->ToPcSize > MAX_RDY_MSG_SIZE) ||
-			    ((*(unsigned long *) dpr->ToPcBuf) != RDY_MAGIC))
-				break;	/* an error occurred */
-
-			/* Check for additional data delivered during SysReady */
-			msg_size = dpr->ToPcSize - RDY_MAGIC_SIZE;
-			if (msg_size > 0)
-				if (EvalSysrTokData(card, dpr->ToPcBuf + RDY_MAGIC_SIZE, msg_size))
-					break;
-
-			if (card->debug_flags & LOG_POF_RECORD)
-				hysdn_addlog(card, "ERGO: pof boot success");
-			spin_lock_irqsave(&card->hysdn_lock, flags);
-
-			card->state = CARD_STATE_RUN;	/* now card is running */
-			/* enable the cards interrupt */
-			byteout(card->iobase + PCI9050_INTR_REG,
-				bytein(card->iobase + PCI9050_INTR_REG) |
-				(PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1));
-			card->irq_enabled = 1;	/* we are ready to receive interrupts */
-
-			dpr->ToPcFlag = 0;	/* reset data indicator */
-			dpr->ToHyInt = 1;
-			dpr->ToPcInt = 1;	/* interrupt to E1 for all cards */
-
-			spin_unlock_irqrestore(&card->hysdn_lock, flags);
-			if ((hynet_enable & (1 << card->myid))
-			    && (i = hysdn_net_create(card)))
-			{
-				ergo_stopcard(card);
-				card->state = CARD_STATE_BOOTERR;
-				return (i);
-			}
-#ifdef CONFIG_HYSDN_CAPI
-			if ((i = hycapi_capi_create(card))) {
-				printk(KERN_WARNING "HYSDN: failed to create capi-interface.\n");
-			}
-#endif /* CONFIG_HYSDN_CAPI */
-			return (0);	/* success */
-		}		/* data has arrived */
-		msleep_interruptible(50);		/* Timeout 50ms */
-	}			/* wait until timeout */
-
-	if (card->debug_flags & LOG_POF_CARD)
-		hysdn_addlog(card, "ERGO: pof boot ready timeout");
-	return (-ERR_POF_TIMEOUT);
-}				/* ergo_waitpofready */
-
-
-
-/************************************************************************************/
-/* release the cards hardware. Before releasing do a interrupt disable and hardware */
-/* reset. Also unmap dpram.                                                         */
-/* Use only during module release.                                                  */
-/************************************************************************************/
-static void
-ergo_releasehardware(hysdn_card *card)
-{
-	ergo_stopcard(card);	/* first stop the card if not already done */
-	free_irq(card->irq, card);	/* release interrupt */
-	release_region(card->iobase + PCI9050_INTR_REG, 1);	/* release all io ports */
-	release_region(card->iobase + PCI9050_USER_IO, 1);
-	iounmap(card->dpram);
-	card->dpram = NULL;	/* release shared mem */
-}				/* ergo_releasehardware */
-
-
-/*********************************************************************************/
-/* acquire the needed hardware ports and map dpram. If an error occurs a nonzero */
-/* value is returned.                                                            */
-/* Use only during module init.                                                  */
-/*********************************************************************************/
-int
-ergo_inithardware(hysdn_card *card)
-{
-	if (!request_region(card->iobase + PCI9050_INTR_REG, 1, "HYSDN"))
-		return (-1);
-	if (!request_region(card->iobase + PCI9050_USER_IO, 1, "HYSDN")) {
-		release_region(card->iobase + PCI9050_INTR_REG, 1);
-		return (-1);	/* ports already in use */
-	}
-	card->memend = card->membase + ERG_DPRAM_PAGE_SIZE - 1;
-	if (!(card->dpram = ioremap(card->membase, ERG_DPRAM_PAGE_SIZE))) {
-		release_region(card->iobase + PCI9050_INTR_REG, 1);
-		release_region(card->iobase + PCI9050_USER_IO, 1);
-		return (-1);
-	}
-
-	ergo_stopcard(card);	/* disable interrupts */
-	if (request_irq(card->irq, ergo_interrupt, IRQF_SHARED, "HYSDN", card)) {
-		ergo_releasehardware(card); /* return the acquired hardware */
-		return (-1);
-	}
-	/* success, now setup the function pointers */
-	card->stopcard = ergo_stopcard;
-	card->releasehardware = ergo_releasehardware;
-	card->testram = ergo_testram;
-	card->writebootimg = ergo_writebootimg;
-	card->writebootseq = ergo_writebootseq;
-	card->waitpofready = ergo_waitpofready;
-	card->set_errlog_state = ergo_set_errlog_state;
-	INIT_WORK(&card->irq_queue, ergo_irq_bh);
-	spin_lock_init(&card->hysdn_lock);
-
-	return (0);
-}				/* ergo_inithardware */
diff --git a/drivers/staging/isdn/hysdn/boardergo.h b/drivers/staging/isdn/hysdn/boardergo.h
deleted file mode 100644
index e99bd81..0000000
--- a/drivers/staging/isdn/hysdn/boardergo.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* $Id: boardergo.h,v 1.2.6.1 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, definitions for ergo type boards (buffers..).
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-
-/************************************************/
-/* defines for the dual port memory of the card */
-/************************************************/
-#define ERG_DPRAM_PAGE_SIZE 0x2000	/* DPRAM occupies a 8K page */
-#define BOOT_IMG_SIZE 4096
-#define ERG_DPRAM_FILL_SIZE (ERG_DPRAM_PAGE_SIZE - BOOT_IMG_SIZE)
-
-#define ERG_TO_HY_BUF_SIZE  0x0E00	/* 3072 bytes buffer size to card */
-#define ERG_TO_PC_BUF_SIZE  0x0E00	/* 3072 bytes to PC, too */
-
-/* following DPRAM layout copied from OS2-driver boarderg.h */
-typedef struct ErgDpram_tag {
-	/*0000 */ unsigned char ToHyBuf[ERG_TO_HY_BUF_SIZE];
-	/*0E00 */ unsigned char ToPcBuf[ERG_TO_PC_BUF_SIZE];
-
-	/*1C00 */ unsigned char bSoftUart[SIZE_RSV_SOFT_UART];
-	/* size 0x1B0 */
-
-	/*1DB0 *//* tErrLogEntry */ unsigned char volatile ErrLogMsg[64];
-	/* size 64 bytes */
-	/*1DB0  unsigned long ulErrType;               */
-	/*1DB4  unsigned long ulErrSubtype;            */
-	/*1DB8  unsigned long ucTextSize;              */
-	/*1DB9  unsigned long ucText[ERRLOG_TEXT_SIZE]; *//* ASCIIZ of len ucTextSize-1 */
-	/*1DF0 */
-
-	/*1DF0 */ unsigned short volatile ToHyChannel;
-	/*1DF2 */ unsigned short volatile ToHySize;
-	/*1DF4 */ unsigned char volatile ToHyFlag;
-	/* !=0: msg for Hy waiting */
-	/*1DF5 */ unsigned char volatile ToPcFlag;
-	/* !=0: msg for PC waiting */
-	/*1DF6 */ unsigned short volatile ToPcChannel;
-	/*1DF8 */ unsigned short volatile ToPcSize;
-	/*1DFA */ unsigned char bRes1DBA[0x1E00 - 0x1DFA];
-	/* 6 bytes */
-
-	/*1E00 */ unsigned char bRestOfEntryTbl[0x1F00 - 0x1E00];
-	/*1F00 */ unsigned long TrapTable[62];
-	/*1FF8 */ unsigned char bRes1FF8[0x1FFB - 0x1FF8];
-	/* low part of reset vetor */
-	/*1FFB */ unsigned char ToPcIntMetro;
-	/* notes:
-	 * - metro has 32-bit boot ram - accessing
-	 *   ToPcInt and ToHyInt would be the same;
-	 *   so we moved ToPcInt to 1FFB.
-	 *   Because on the PC side both vars are
-	 *   readonly (reseting on int from E1 to PC),
-	 *   we can read both vars on both cards
-	 *   without destroying anything.
-	 * - 1FFB is the high byte of the reset vector,
-	 *   so E1 side should NOT change this byte
-	 *   when writing!
-	 */
-	/*1FFC */ unsigned char volatile ToHyNoDpramErrLog;
-	/* note: ToHyNoDpramErrLog is used to inform
-	 *       boot loader, not to use DPRAM based
-	 *       ErrLog; when DOS driver is rewritten
-	 *       this becomes obsolete
-	 */
-	/*1FFD */ unsigned char bRes1FFD;
-	/*1FFE */ unsigned char ToPcInt;
-	/* E1_intclear; on CHAMP2: E1_intset   */
-	/*1FFF */ unsigned char ToHyInt;
-	/* E1_intset;   on CHAMP2: E1_intclear */
-} tErgDpram;
-
-/**********************************************/
-/* PCI9050 controller local register offsets: */
-/* copied from boarderg.c                     */
-/**********************************************/
-#define PCI9050_INTR_REG    0x4C	/* Interrupt register */
-#define PCI9050_USER_IO     0x51	/* User I/O  register */
-
-/* bitmask for PCI9050_INTR_REG: */
-#define PCI9050_INTR_REG_EN1    0x01	/* 1= enable (def.), 0= disable */
-#define PCI9050_INTR_REG_POL1   0x02	/* 1= active high (def.), 0= active low */
-#define PCI9050_INTR_REG_STAT1  0x04	/* 1= intr. active, 0= intr. not active (def.) */
-#define PCI9050_INTR_REG_ENPCI  0x40	/* 1= PCI interrupts enable (def.) */
-
-/* bitmask for PCI9050_USER_IO: */
-#define PCI9050_USER_IO_EN3     0x02	/* 1= disable      , 0= enable (def.) */
-#define PCI9050_USER_IO_DIR3    0x04	/* 1= output (def.), 0= input         */
-#define PCI9050_USER_IO_DAT3    0x08	/* 1= high (def.)  , 0= low           */
-
-#define PCI9050_E1_RESET    (PCI9050_USER_IO_DIR3)		/* 0x04 */
-#define PCI9050_E1_RUN      (PCI9050_USER_IO_DAT3 | PCI9050_USER_IO_DIR3)		/* 0x0C */
diff --git a/drivers/staging/isdn/hysdn/hycapi.c b/drivers/staging/isdn/hysdn/hycapi.c
deleted file mode 100644
index a2c15cd..0000000
--- a/drivers/staging/isdn/hysdn/hycapi.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/* $Id: hycapi.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, CAPI2.0-Interface.
- *
- * Author    Ulrich Albrecht <u.albrecht@hypercope.de> for Hypercope GmbH
- * Copyright 2000 by Hypercope GmbH
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#define	VER_DRIVER	0
-#define	VER_CARDTYPE	1
-#define	VER_HWID	2
-#define	VER_SERIAL	3
-#define	VER_OPTION	4
-#define	VER_PROTO	5
-#define	VER_PROFILE	6
-#define	VER_CAPI	7
-
-#include "hysdn_defs.h"
-#include <linux/kernelcapi.h>
-
-static char hycapi_revision[] = "$Revision: 1.8.6.4 $";
-
-unsigned int hycapi_enable = 0xffffffff;
-module_param(hycapi_enable, uint, 0);
-
-typedef struct _hycapi_appl {
-	unsigned int ctrl_mask;
-	capi_register_params rp;
-	struct sk_buff *listen_req[CAPI_MAXCONTR];
-} hycapi_appl;
-
-static hycapi_appl hycapi_applications[CAPI_MAXAPPL];
-
-static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-
-static inline int _hycapi_appCheck(int app_id, int ctrl_no)
-{
-	if ((ctrl_no <= 0) || (ctrl_no > CAPI_MAXCONTR) || (app_id <= 0) ||
-	   (app_id > CAPI_MAXAPPL))
-	{
-		printk(KERN_ERR "HYCAPI: Invalid request app_id %d for controller %d", app_id, ctrl_no);
-		return -1;
-	}
-	return ((hycapi_applications[app_id - 1].ctrl_mask & (1 << (ctrl_no-1))) != 0);
-}
-
-/******************************
-Kernel-Capi callback reset_ctr
-******************************/
-
-static void
-hycapi_reset_ctr(struct capi_ctr *ctrl)
-{
-	hycapictrl_info *cinfo = ctrl->driverdata;
-
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "HYCAPI hycapi_reset_ctr\n");
-#endif
-	capilib_release(&cinfo->ncci_head);
-	capi_ctr_down(ctrl);
-}
-
-/******************************
-Kernel-Capi callback remove_ctr
-******************************/
-
-static void
-hycapi_remove_ctr(struct capi_ctr *ctrl)
-{
-	int i;
-	hycapictrl_info *cinfo = NULL;
-	hysdn_card *card = NULL;
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "HYCAPI hycapi_remove_ctr\n");
-#endif
-	cinfo = (hycapictrl_info *)(ctrl->driverdata);
-	if (!cinfo) {
-		printk(KERN_ERR "No hycapictrl_info set!");
-		return;
-	}
-	card = cinfo->card;
-	capi_ctr_suspend_output(ctrl);
-	for (i = 0; i < CAPI_MAXAPPL; i++) {
-		if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) {
-			kfree_skb(hycapi_applications[i].listen_req[ctrl->cnr - 1]);
-			hycapi_applications[i].listen_req[ctrl->cnr - 1] = NULL;
-		}
-	}
-	detach_capi_ctr(ctrl);
-	ctrl->driverdata = NULL;
-	kfree(card->hyctrlinfo);
-
-
-	card->hyctrlinfo = NULL;
-}
-
-/***********************************************************
-
-Queue a CAPI-message to the controller.
-
-***********************************************************/
-
-static void
-hycapi_sendmsg_internal(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
-	hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
-	hysdn_card *card = cinfo->card;
-
-	spin_lock_irq(&cinfo->lock);
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_send_message\n");
-#endif
-	cinfo->skbs[cinfo->in_idx++] = skb;	/* add to buffer list */
-	if (cinfo->in_idx >= HYSDN_MAX_CAPI_SKB)
-		cinfo->in_idx = 0;	/* wrap around */
-	cinfo->sk_count++;		/* adjust counter */
-	if (cinfo->sk_count >= HYSDN_MAX_CAPI_SKB) {
-		/* inform upper layers we're full */
-		printk(KERN_ERR "HYSDN Card%d: CAPI-buffer overrun!\n",
-		       card->myid);
-		capi_ctr_suspend_output(ctrl);
-	}
-	cinfo->tx_skb = skb;
-	spin_unlock_irq(&cinfo->lock);
-	schedule_work(&card->irq_queue);
-}
-
-/***********************************************************
-hycapi_register_internal
-
-Send down the CAPI_REGISTER-Command to the controller.
-This functions will also be used if the adapter has been rebooted to
-re-register any applications in the private list.
-
-************************************************************/
-
-static void
-hycapi_register_internal(struct capi_ctr *ctrl, __u16 appl,
-			 capi_register_params *rp)
-{
-	char ExtFeatureDefaults[] = "49  /0/0/0/0,*/1,*/2,*/3,*/4,*/5,*/6,*/7,*/8,*/9,*";
-	hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
-	hysdn_card *card = cinfo->card;
-	struct sk_buff *skb;
-	__u16 len;
-	__u8 _command = 0xa0, _subcommand = 0x80;
-	__u16 MessageNumber = 0x0000;
-	__u16 MessageBufferSize = 0;
-	int slen = strlen(ExtFeatureDefaults);
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_register_appl\n");
-#endif
-	MessageBufferSize = rp->level3cnt * rp->datablkcnt * rp->datablklen;
-
-	len = CAPI_MSG_BASELEN + 8 + slen + 1;
-	if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
-		printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n",
-		       card->myid);
-		return;
-	}
-	skb_put_data(skb, &len, sizeof(__u16));
-	skb_put_data(skb, &appl, sizeof(__u16));
-	skb_put_data(skb, &_command, sizeof(__u8));
-	skb_put_data(skb, &_subcommand, sizeof(__u8));
-	skb_put_data(skb, &MessageNumber, sizeof(__u16));
-	skb_put_data(skb, &MessageBufferSize, sizeof(__u16));
-	skb_put_data(skb, &(rp->level3cnt), sizeof(__u16));
-	skb_put_data(skb, &(rp->datablkcnt), sizeof(__u16));
-	skb_put_data(skb, &(rp->datablklen), sizeof(__u16));
-	skb_put_data(skb, ExtFeatureDefaults, slen);
-	hycapi_applications[appl - 1].ctrl_mask |= (1 << (ctrl->cnr - 1));
-	hycapi_send_message(ctrl, skb);
-}
-
-/************************************************************
-hycapi_restart_internal
-
-After an adapter has been rebootet, re-register all applications and
-send a LISTEN_REQ (if there has been such a thing )
-
-*************************************************************/
-
-static void hycapi_restart_internal(struct capi_ctr *ctrl)
-{
-	int i;
-	struct sk_buff *skb;
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_WARNING "HYSDN: hycapi_restart_internal");
-#endif
-	for (i = 0; i < CAPI_MAXAPPL; i++) {
-		if (_hycapi_appCheck(i + 1, ctrl->cnr) == 1) {
-			hycapi_register_internal(ctrl, i + 1,
-						 &hycapi_applications[i].rp);
-			if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) {
-				skb = skb_copy(hycapi_applications[i].listen_req[ctrl->cnr - 1], GFP_ATOMIC);
-				hycapi_sendmsg_internal(ctrl, skb);
-			}
-		}
-	}
-}
-
-/*************************************************************
-Register an application.
-Error-checking is done for CAPI-compliance.
-
-The application is recorded in the internal list.
-*************************************************************/
-
-static void
-hycapi_register_appl(struct capi_ctr *ctrl, __u16 appl,
-		     capi_register_params *rp)
-{
-	int MaxLogicalConnections = 0, MaxBDataBlocks = 0, MaxBDataLen = 0;
-	hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
-	hysdn_card *card = cinfo->card;
-	int chk = _hycapi_appCheck(appl, ctrl->cnr);
-	if (chk < 0) {
-		return;
-	}
-	if (chk == 1) {
-		printk(KERN_INFO "HYSDN: apl %d already registered\n", appl);
-		return;
-	}
-	MaxBDataBlocks = rp->datablkcnt > CAPI_MAXDATAWINDOW ? CAPI_MAXDATAWINDOW : rp->datablkcnt;
-	rp->datablkcnt = MaxBDataBlocks;
-	MaxBDataLen = rp->datablklen < 1024 ? 1024 : rp->datablklen;
-	rp->datablklen = MaxBDataLen;
-
-	MaxLogicalConnections = rp->level3cnt;
-	if (MaxLogicalConnections < 0) {
-		MaxLogicalConnections = card->bchans * -MaxLogicalConnections;
-	}
-	if (MaxLogicalConnections == 0) {
-		MaxLogicalConnections = card->bchans;
-	}
-
-	rp->level3cnt = MaxLogicalConnections;
-	memcpy(&hycapi_applications[appl - 1].rp,
-	       rp, sizeof(capi_register_params));
-}
-
-/*********************************************************************
-
-hycapi_release_internal
-
-Send down a CAPI_RELEASE to the controller.
-*********************************************************************/
-
-static void hycapi_release_internal(struct capi_ctr *ctrl, __u16 appl)
-{
-	hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
-	hysdn_card *card = cinfo->card;
-	struct sk_buff *skb;
-	__u16 len;
-	__u8 _command = 0xa1, _subcommand = 0x80;
-	__u16 MessageNumber = 0x0000;
-
-	capilib_release_appl(&cinfo->ncci_head, appl);
-
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_release_appl\n");
-#endif
-	len = CAPI_MSG_BASELEN;
-	if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
-		printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n",
-		       card->myid);
-		return;
-	}
-	skb_put_data(skb, &len, sizeof(__u16));
-	skb_put_data(skb, &appl, sizeof(__u16));
-	skb_put_data(skb, &_command, sizeof(__u8));
-	skb_put_data(skb, &_subcommand, sizeof(__u8));
-	skb_put_data(skb, &MessageNumber, sizeof(__u16));
-	hycapi_send_message(ctrl, skb);
-	hycapi_applications[appl - 1].ctrl_mask &= ~(1 << (ctrl->cnr - 1));
-}
-
-/******************************************************************
-hycapi_release_appl
-
-Release the application from the internal list an remove it's
-registration at controller-level
-******************************************************************/
-
-static void
-hycapi_release_appl(struct capi_ctr *ctrl, __u16 appl)
-{
-	int chk;
-
-	chk = _hycapi_appCheck(appl, ctrl->cnr);
-	if (chk < 0) {
-		printk(KERN_ERR "HYCAPI: Releasing invalid appl %d on controller %d\n", appl, ctrl->cnr);
-		return;
-	}
-	if (hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]) {
-		kfree_skb(hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]);
-		hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1] = NULL;
-	}
-	if (chk == 1)
-	{
-		hycapi_release_internal(ctrl, appl);
-	}
-}
-
-
-/**************************************************************
-Kill a single controller.
-**************************************************************/
-
-int hycapi_capi_release(hysdn_card *card)
-{
-	hycapictrl_info *cinfo = card->hyctrlinfo;
-	struct capi_ctr *ctrl;
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_capi_release\n");
-#endif
-	if (cinfo) {
-		ctrl = &cinfo->capi_ctrl;
-		hycapi_remove_ctr(ctrl);
-	}
-	return 0;
-}
-
-/**************************************************************
-hycapi_capi_stop
-
-Stop CAPI-Output on a card. (e.g. during reboot)
-***************************************************************/
-
-int hycapi_capi_stop(hysdn_card *card)
-{
-	hycapictrl_info *cinfo = card->hyctrlinfo;
-	struct capi_ctr *ctrl;
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_capi_stop\n");
-#endif
-	if (cinfo) {
-		ctrl = &cinfo->capi_ctrl;
-/*		ctrl->suspend_output(ctrl); */
-		capi_ctr_down(ctrl);
-	}
-	return 0;
-}
-
-/***************************************************************
-hycapi_send_message
-
-Send a message to the controller.
-
-Messages are parsed for their Command/Subcommand-type, and appropriate
-action's are performed.
-
-Note that we have to muck around with a 64Bit-DATA_REQ as there are
-firmware-releases that do not check the MsgLen-Indication!
-
-***************************************************************/
-
-static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
-	__u16 appl_id;
-	int _len, _len2;
-	__u8 msghead[64];
-	hycapictrl_info *cinfo = ctrl->driverdata;
-	u16 retval = CAPI_NOERROR;
-
-	appl_id = CAPIMSG_APPID(skb->data);
-	switch (_hycapi_appCheck(appl_id, ctrl->cnr))
-	{
-	case 0:
-/*			printk(KERN_INFO "Need to register\n"); */
-		hycapi_register_internal(ctrl,
-					 appl_id,
-					 &(hycapi_applications[appl_id - 1].rp));
-		break;
-	case 1:
-		break;
-	default:
-		printk(KERN_ERR "HYCAPI: Controller mixup!\n");
-		retval = CAPI_ILLAPPNR;
-		goto out;
-	}
-	switch (CAPIMSG_CMD(skb->data)) {
-	case CAPI_DISCONNECT_B3_RESP:
-		capilib_free_ncci(&cinfo->ncci_head, appl_id,
-				  CAPIMSG_NCCI(skb->data));
-		break;
-	case CAPI_DATA_B3_REQ:
-		_len = CAPIMSG_LEN(skb->data);
-		if (_len > 22) {
-			_len2 = _len - 22;
-			skb_copy_from_linear_data(skb, msghead, 22);
-			skb_copy_to_linear_data_offset(skb, _len2,
-						       msghead, 22);
-			skb_pull(skb, _len2);
-			CAPIMSG_SETLEN(skb->data, 22);
-			retval = capilib_data_b3_req(&cinfo->ncci_head,
-						     CAPIMSG_APPID(skb->data),
-						     CAPIMSG_NCCI(skb->data),
-						     CAPIMSG_MSGID(skb->data));
-		}
-		break;
-	case CAPI_LISTEN_REQ:
-		if (hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1])
-		{
-			kfree_skb(hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]);
-			hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1] = NULL;
-		}
-		if (!(hycapi_applications[appl_id  -1].listen_req[ctrl->cnr - 1] = skb_copy(skb, GFP_ATOMIC)))
-		{
-			printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n");
-		}
-		break;
-	default:
-		break;
-	}
-out:
-	if (retval == CAPI_NOERROR)
-		hycapi_sendmsg_internal(ctrl, skb);
-	else
-		dev_kfree_skb_any(skb);
-
-	return retval;
-}
-
-static int hycapi_proc_show(struct seq_file *m, void *v)
-{
-	struct capi_ctr *ctrl = m->private;
-	hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
-	hysdn_card *card = cinfo->card;
-	char *s;
-
-	seq_printf(m, "%-16s %s\n", "name", cinfo->cardname);
-	seq_printf(m, "%-16s 0x%x\n", "io", card->iobase);
-	seq_printf(m, "%-16s %d\n", "irq", card->irq);
-
-	switch (card->brdtype) {
-	case BD_PCCARD:  s = "HYSDN Hycard"; break;
-	case BD_ERGO: s = "HYSDN Ergo2"; break;
-	case BD_METRO: s = "HYSDN Metro4"; break;
-	case BD_CHAMP2: s = "HYSDN Champ2";	break;
-	case BD_PLEXUS: s = "HYSDN Plexus30"; break;
-	default: s = "???"; break;
-	}
-	seq_printf(m, "%-16s %s\n", "type", s);
-	if ((s = cinfo->version[VER_DRIVER]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_driver", s);
-	if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
-	if ((s = cinfo->version[VER_SERIAL]) != NULL)
-		seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
-	seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
-	return 0;
-}
-
-/**************************************************************
-hycapi_load_firmware
-
-This does NOT load any firmware, but the callback somehow is needed
-on capi-interface registration.
-
-**************************************************************/
-
-static int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_load_firmware\n");
-#endif
-	return 0;
-}
-
-
-static char *hycapi_procinfo(struct capi_ctr *ctrl)
-{
-	hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "%s\n", __func__);
-#endif
-	if (!cinfo)
-		return "";
-	sprintf(cinfo->infobuf, "%s %s 0x%x %d %s",
-		cinfo->cardname[0] ? cinfo->cardname : "-",
-		cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
-		cinfo->card ? cinfo->card->iobase : 0x0,
-		cinfo->card ? cinfo->card->irq : 0,
-		hycapi_revision
-		);
-	return cinfo->infobuf;
-}
-
-/******************************************************************
-hycapi_rx_capipkt
-
-Receive a capi-message.
-
-All B3_DATA_IND are converted to 64K-extension compatible format.
-New nccis are created if necessary.
-*******************************************************************/
-
-void
-hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf, unsigned short len)
-{
-	struct sk_buff *skb;
-	hycapictrl_info *cinfo = card->hyctrlinfo;
-	struct capi_ctr *ctrl;
-	__u16 ApplId;
-	__u16 MsgLen, info;
-	__u16 len2, CapiCmd;
-	__u32 CP64[2] = {0, 0};
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_rx_capipkt\n");
-#endif
-	if (!cinfo) {
-		return;
-	}
-	ctrl = &cinfo->capi_ctrl;
-	if (len < CAPI_MSG_BASELEN) {
-		printk(KERN_ERR "HYSDN Card%d: invalid CAPI-message, length %d!\n",
-		       card->myid, len);
-		return;
-	}
-	MsgLen = CAPIMSG_LEN(buf);
-	ApplId = CAPIMSG_APPID(buf);
-	CapiCmd = CAPIMSG_CMD(buf);
-
-	if ((CapiCmd == CAPI_DATA_B3_IND) && (MsgLen < 30)) {
-		len2 = len + (30 - MsgLen);
-		if (!(skb = alloc_skb(len2, GFP_ATOMIC))) {
-			printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
-			       card->myid);
-			return;
-		}
-		skb_put_data(skb, buf, MsgLen);
-		skb_put_data(skb, CP64, 2 * sizeof(__u32));
-		skb_put_data(skb, buf + MsgLen, len - MsgLen);
-		CAPIMSG_SETLEN(skb->data, 30);
-	} else {
-		if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
-			printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
-			       card->myid);
-			return;
-		}
-		skb_put_data(skb, buf, len);
-	}
-	switch (CAPIMSG_CMD(skb->data))
-	{
-	case CAPI_CONNECT_B3_CONF:
-/* Check info-field for error-indication: */
-		info = CAPIMSG_U16(skb->data, 12);
-		switch (info)
-		{
-		case 0:
-			capilib_new_ncci(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data),
-					 hycapi_applications[ApplId - 1].rp.datablkcnt);
-
-			break;
-		case 0x0001:
-			printk(KERN_ERR "HYSDN Card%d: NCPI not supported by current "
-			       "protocol. NCPI ignored.\n", card->myid);
-			break;
-		case 0x2001:
-			printk(KERN_ERR "HYSDN Card%d: Message not supported in"
-			       " current state\n", card->myid);
-			break;
-		case 0x2002:
-			printk(KERN_ERR "HYSDN Card%d: invalid PLCI\n", card->myid);
-			break;
-		case 0x2004:
-			printk(KERN_ERR "HYSDN Card%d: out of NCCI\n", card->myid);
-			break;
-		case 0x3008:
-			printk(KERN_ERR "HYSDN Card%d: NCPI not supported\n",
-			       card->myid);
-			break;
-		default:
-			printk(KERN_ERR "HYSDN Card%d: Info in CONNECT_B3_CONF: %d\n",
-			       card->myid, info);
-			break;
-		}
-		break;
-	case CAPI_CONNECT_B3_IND:
-		capilib_new_ncci(&cinfo->ncci_head, ApplId,
-				 CAPIMSG_NCCI(skb->data),
-				 hycapi_applications[ApplId - 1].rp.datablkcnt);
-		break;
-	case CAPI_DATA_B3_CONF:
-		capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
-				     CAPIMSG_NCCI(skb->data),
-				     CAPIMSG_MSGID(skb->data));
-		break;
-	default:
-		break;
-	}
-	capi_ctr_handle_message(ctrl, ApplId, skb);
-}
-
-/******************************************************************
-hycapi_tx_capiack
-
-Internally acknowledge a msg sent. This will remove the msg from the
-internal queue.
-
-*******************************************************************/
-
-void hycapi_tx_capiack(hysdn_card *card)
-{
-	hycapictrl_info *cinfo = card->hyctrlinfo;
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_tx_capiack\n");
-#endif
-	if (!cinfo) {
-		return;
-	}
-	spin_lock_irq(&cinfo->lock);
-	kfree_skb(cinfo->skbs[cinfo->out_idx]);		/* free skb */
-	cinfo->skbs[cinfo->out_idx++] = NULL;
-	if (cinfo->out_idx >= HYSDN_MAX_CAPI_SKB)
-		cinfo->out_idx = 0;	/* wrap around */
-
-	if (cinfo->sk_count-- == HYSDN_MAX_CAPI_SKB)	/* dec usage count */
-		capi_ctr_resume_output(&cinfo->capi_ctrl);
-	spin_unlock_irq(&cinfo->lock);
-}
-
-/***************************************************************
-hycapi_tx_capiget(hysdn_card *card)
-
-This is called when polling for messages to SEND.
-
-****************************************************************/
-
-struct sk_buff *
-hycapi_tx_capiget(hysdn_card *card)
-{
-	hycapictrl_info *cinfo = card->hyctrlinfo;
-	if (!cinfo) {
-		return (struct sk_buff *)NULL;
-	}
-	if (!cinfo->sk_count)
-		return (struct sk_buff *)NULL;	/* nothing available */
-
-	return (cinfo->skbs[cinfo->out_idx]);		/* next packet to send */
-}
-
-
-/**********************************************************
-int hycapi_init()
-
-attach the capi-driver to the kernel-capi.
-
-***********************************************************/
-
-int hycapi_init(void)
-{
-	int i;
-	for (i = 0; i < CAPI_MAXAPPL; i++) {
-		memset(&(hycapi_applications[i]), 0, sizeof(hycapi_appl));
-	}
-	return (0);
-}
-
-/**************************************************************
-hycapi_cleanup(void)
-
-detach the capi-driver to the kernel-capi. Actually this should
-free some more ressources. Do that later.
-**************************************************************/
-
-void
-hycapi_cleanup(void)
-{
-}
-
-/********************************************************************
-hycapi_capi_create(hysdn_card *card)
-
-Attach the card with its capi-ctrl.
-*********************************************************************/
-
-static void hycapi_fill_profile(hysdn_card *card)
-{
-	hycapictrl_info *cinfo = NULL;
-	struct capi_ctr *ctrl = NULL;
-	cinfo = card->hyctrlinfo;
-	if (!cinfo) return;
-	ctrl = &cinfo->capi_ctrl;
-	strcpy(ctrl->manu, "Hypercope");
-	ctrl->version.majorversion = 2;
-	ctrl->version.minorversion = 0;
-	ctrl->version.majormanuversion = 3;
-	ctrl->version.minormanuversion = 2;
-	ctrl->profile.ncontroller = card->myid;
-	ctrl->profile.nbchannel = card->bchans;
-	ctrl->profile.goptions = GLOBAL_OPTION_INTERNAL_CONTROLLER |
-		GLOBAL_OPTION_B_CHANNEL_OPERATION;
-	ctrl->profile.support1 =  B1_PROT_64KBIT_HDLC |
-		(card->faxchans ? B1_PROT_T30 : 0) |
-		B1_PROT_64KBIT_TRANSPARENT;
-	ctrl->profile.support2 = B2_PROT_ISO7776 |
-		(card->faxchans ? B2_PROT_T30 : 0) |
-		B2_PROT_TRANSPARENT;
-	ctrl->profile.support3 = B3_PROT_TRANSPARENT |
-		B3_PROT_T90NL |
-		(card->faxchans ? B3_PROT_T30 : 0) |
-		(card->faxchans ? B3_PROT_T30EXT : 0) |
-		B3_PROT_ISO8208;
-}
-
-int
-hycapi_capi_create(hysdn_card *card)
-{
-	hycapictrl_info *cinfo = NULL;
-	struct capi_ctr *ctrl = NULL;
-	int retval;
-#ifdef HYCAPI_PRINTFNAMES
-	printk(KERN_NOTICE "hycapi_capi_create\n");
-#endif
-	if ((hycapi_enable & (1 << card->myid)) == 0) {
-		return 1;
-	}
-	if (!card->hyctrlinfo) {
-		cinfo = kzalloc(sizeof(hycapictrl_info), GFP_ATOMIC);
-		if (!cinfo) {
-			printk(KERN_WARNING "HYSDN: no memory for capi-ctrl.\n");
-			return -ENOMEM;
-		}
-		card->hyctrlinfo = cinfo;
-		cinfo->card = card;
-		spin_lock_init(&cinfo->lock);
-		INIT_LIST_HEAD(&cinfo->ncci_head);
-
-		switch (card->brdtype) {
-		case BD_PCCARD:  strcpy(cinfo->cardname, "HYSDN Hycard"); break;
-		case BD_ERGO: strcpy(cinfo->cardname, "HYSDN Ergo2"); break;
-		case BD_METRO: strcpy(cinfo->cardname, "HYSDN Metro4"); break;
-		case BD_CHAMP2: strcpy(cinfo->cardname, "HYSDN Champ2"); break;
-		case BD_PLEXUS: strcpy(cinfo->cardname, "HYSDN Plexus30"); break;
-		default: strcpy(cinfo->cardname, "HYSDN ???"); break;
-		}
-
-		ctrl = &cinfo->capi_ctrl;
-		ctrl->driver_name   = "hycapi";
-		ctrl->driverdata    = cinfo;
-		ctrl->register_appl = hycapi_register_appl;
-		ctrl->release_appl  = hycapi_release_appl;
-		ctrl->send_message  = hycapi_send_message;
-		ctrl->load_firmware = hycapi_load_firmware;
-		ctrl->reset_ctr     = hycapi_reset_ctr;
-		ctrl->procinfo      = hycapi_procinfo;
-		ctrl->proc_show     = hycapi_proc_show;
-		strcpy(ctrl->name, cinfo->cardname);
-		ctrl->owner = THIS_MODULE;
-
-		retval = attach_capi_ctr(ctrl);
-		if (retval) {
-			printk(KERN_ERR "hycapi: attach controller failed.\n");
-			return -EBUSY;
-		}
-		/* fill in the blanks: */
-		hycapi_fill_profile(card);
-		capi_ctr_ready(ctrl);
-	} else {
-		/* resume output on stopped ctrl */
-		ctrl = &card->hyctrlinfo->capi_ctrl;
-		hycapi_fill_profile(card);
-		capi_ctr_ready(ctrl);
-		hycapi_restart_internal(ctrl);
-/*		ctrl->resume_output(ctrl); */
-	}
-	return 0;
-}
diff --git a/drivers/staging/isdn/hysdn/hysdn_boot.c b/drivers/staging/isdn/hysdn/hysdn_boot.c
deleted file mode 100644
index ba177c3..0000000
--- a/drivers/staging/isdn/hysdn/hysdn_boot.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/* $Id: hysdn_boot.c,v 1.4.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards
- * specific routines for booting and pof handling
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include "hysdn_defs.h"
-#include "hysdn_pof.h"
-
-/********************************/
-/* defines for pof read handler */
-/********************************/
-#define POF_READ_FILE_HEAD  0
-#define POF_READ_TAG_HEAD   1
-#define POF_READ_TAG_DATA   2
-
-/************************************************************/
-/* definition of boot specific data area. This data is only */
-/* needed during boot and so allocated dynamically.         */
-/************************************************************/
-struct boot_data {
-	unsigned short Cryptor;	/* for use with Decrypt function */
-	unsigned short Nrecs;	/* records remaining in file */
-	unsigned char pof_state;/* actual state of read handler */
-	unsigned char is_crypted;/* card data is crypted */
-	int BufSize;		/* actual number of bytes bufferd */
-	int last_error;		/* last occurred error */
-	unsigned short pof_recid;/* actual pof recid */
-	unsigned long pof_reclen;/* total length of pof record data */
-	unsigned long pof_recoffset;/* actual offset inside pof record */
-	union {
-		unsigned char BootBuf[BOOT_BUF_SIZE];/* buffer as byte count */
-		tPofRecHdr PofRecHdr;	/* header for actual record/chunk */
-		tPofFileHdr PofFileHdr;		/* header from POF file */
-		tPofTimeStamp PofTime;	/* time information */
-	} buf;
-};
-
-/*****************************************************/
-/*  start decryption of successive POF file chuncks.  */
-/*                                                   */
-/*  to be called at start of POF file reading,       */
-/*  before starting any decryption on any POF record. */
-/*****************************************************/
-static void
-StartDecryption(struct boot_data *boot)
-{
-	boot->Cryptor = CRYPT_STARTTERM;
-}				/* StartDecryption */
-
-
-/***************************************************************/
-/* decrypt complete BootBuf                                    */
-/* NOTE: decryption must be applied to all or none boot tags - */
-/*       to HI and LO boot loader and (all) seq tags, because  */
-/*       global Cryptor is started for whole POF.              */
-/***************************************************************/
-static void
-DecryptBuf(struct boot_data *boot, int cnt)
-{
-	unsigned char *bufp = boot->buf.BootBuf;
-
-	while (cnt--) {
-		boot->Cryptor = (boot->Cryptor >> 1) ^ ((boot->Cryptor & 1U) ? CRYPT_FEEDTERM : 0);
-		*bufp++ ^= (unsigned char)boot->Cryptor;
-	}
-}				/* DecryptBuf */
-
-/********************************************************************************/
-/* pof_handle_data executes the required actions dependent on the active record */
-/* id. If successful 0 is returned, a negative value shows an error.           */
-/********************************************************************************/
-static int
-pof_handle_data(hysdn_card *card, int datlen)
-{
-	struct boot_data *boot = card->boot;	/* pointer to boot specific data */
-	long l;
-	unsigned char *imgp;
-	int img_len;
-
-	/* handle the different record types */
-	switch (boot->pof_recid) {
-
-	case TAG_TIMESTMP:
-		if (card->debug_flags & LOG_POF_RECORD)
-			hysdn_addlog(card, "POF created %s", boot->buf.PofTime.DateTimeText);
-		break;
-
-	case TAG_CBOOTDTA:
-		DecryptBuf(boot, datlen);	/* we need to encrypt the buffer */
-		/* fall through */
-	case TAG_BOOTDTA:
-		if (card->debug_flags & LOG_POF_RECORD)
-			hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
-				     (boot->pof_recid == TAG_CBOOTDTA) ? "CBOOTDATA" : "BOOTDTA",
-				     datlen, boot->pof_recoffset);
-
-		if (boot->pof_reclen != POF_BOOT_LOADER_TOTAL_SIZE) {
-			boot->last_error = EPOF_BAD_IMG_SIZE;	/* invalid length */
-			return (boot->last_error);
-		}
-		imgp = boot->buf.BootBuf;	/* start of buffer */
-		img_len = datlen;	/* maximum length to transfer */
-
-		l = POF_BOOT_LOADER_OFF_IN_PAGE -
-			(boot->pof_recoffset & (POF_BOOT_LOADER_PAGE_SIZE - 1));
-		if (l > 0) {
-			/* buffer needs to be truncated */
-			imgp += l;	/* advance pointer */
-			img_len -= l;	/* adjust len */
-		}
-		/* at this point no special handling for data wrapping over buffer */
-		/* is necessary, because the boot image always will be adjusted to */
-		/* match a page boundary inside the buffer.                        */
-		/* The buffer for the boot image on the card is filled in 2 cycles */
-		/* first the 1024 hi-words are put in the buffer, then the low 1024 */
-		/* word are handled in the same way with different offset.         */
-
-		if (img_len > 0) {
-			/* data available for copy */
-			if ((boot->last_error =
-			     card->writebootimg(card, imgp,
-						(boot->pof_recoffset > POF_BOOT_LOADER_PAGE_SIZE) ? 2 : 0)) < 0)
-				return (boot->last_error);
-		}
-		break;	/* end of case boot image hi/lo */
-
-	case TAG_CABSDATA:
-		DecryptBuf(boot, datlen);	/* we need to encrypt the buffer */
-		/* fall through */
-	case TAG_ABSDATA:
-		if (card->debug_flags & LOG_POF_RECORD)
-			hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
-				     (boot->pof_recid == TAG_CABSDATA) ? "CABSDATA" : "ABSDATA",
-				     datlen, boot->pof_recoffset);
-
-		if ((boot->last_error = card->writebootseq(card, boot->buf.BootBuf, datlen)) < 0)
-			return (boot->last_error);	/* error writing data */
-
-		if (boot->pof_recoffset + datlen >= boot->pof_reclen)
-			return (card->waitpofready(card));	/* data completely spooled, wait for ready */
-
-		break;	/* end of case boot seq data */
-
-	default:
-		if (card->debug_flags & LOG_POF_RECORD)
-			hysdn_addlog(card, "POF got data(id=0x%lx) len=%d offs=0x%lx", boot->pof_recid,
-				     datlen, boot->pof_recoffset);
-
-		break;	/* simply skip record */
-	}			/* switch boot->pof_recid */
-
-	return (0);
-}				/* pof_handle_data */
-
-
-/******************************************************************************/
-/* pof_write_buffer is called when the buffer has been filled with the needed */
-/* number of data bytes. The number delivered is additionally supplied for    */
-/* verification. The functions handles the data and returns the needed number */
-/* of bytes for the next action. If the returned value is 0 or less an error  */
-/* occurred and booting must be aborted.                                       */
-/******************************************************************************/
-int
-pof_write_buffer(hysdn_card *card, int datlen)
-{
-	struct boot_data *boot = card->boot;	/* pointer to boot specific data */
-
-	if (!boot)
-		return (-EFAULT);	/* invalid call */
-	if (boot->last_error < 0)
-		return (boot->last_error);	/* repeated error */
-
-	if (card->debug_flags & LOG_POF_WRITE)
-		hysdn_addlog(card, "POF write: got %d bytes ", datlen);
-
-	switch (boot->pof_state) {
-	case POF_READ_FILE_HEAD:
-		if (card->debug_flags & LOG_POF_WRITE)
-			hysdn_addlog(card, "POF write: checking file header");
-
-		if (datlen != sizeof(tPofFileHdr)) {
-			boot->last_error = -EPOF_INTERNAL;
-			break;
-		}
-		if (boot->buf.PofFileHdr.Magic != TAGFILEMAGIC) {
-			boot->last_error = -EPOF_BAD_MAGIC;
-			break;
-		}
-		/* Setup the new state and vars */
-		boot->Nrecs = (unsigned short)(boot->buf.PofFileHdr.N_PofRecs);	/* limited to 65535 */
-		boot->pof_state = POF_READ_TAG_HEAD;	/* now start with single tags */
-		boot->last_error = sizeof(tPofRecHdr);	/* new length */
-		break;
-
-	case POF_READ_TAG_HEAD:
-		if (card->debug_flags & LOG_POF_WRITE)
-			hysdn_addlog(card, "POF write: checking tag header");
-
-		if (datlen != sizeof(tPofRecHdr)) {
-			boot->last_error = -EPOF_INTERNAL;
-			break;
-		}
-		boot->pof_recid = boot->buf.PofRecHdr.PofRecId;		/* actual pof recid */
-		boot->pof_reclen = boot->buf.PofRecHdr.PofRecDataLen;	/* total length */
-		boot->pof_recoffset = 0;	/* no starting offset */
-
-		if (card->debug_flags & LOG_POF_RECORD)
-			hysdn_addlog(card, "POF: got record id=0x%lx length=%ld ",
-				     boot->pof_recid, boot->pof_reclen);
-
-		boot->pof_state = POF_READ_TAG_DATA;	/* now start with tag data */
-		if (boot->pof_reclen < BOOT_BUF_SIZE)
-			boot->last_error = boot->pof_reclen;	/* limit size */
-		else
-			boot->last_error = BOOT_BUF_SIZE;	/* maximum */
-
-		if (!boot->last_error) {	/* no data inside record */
-			boot->pof_state = POF_READ_TAG_HEAD;	/* now start with single tags */
-			boot->last_error = sizeof(tPofRecHdr);	/* new length */
-		}
-		break;
-
-	case POF_READ_TAG_DATA:
-		if (card->debug_flags & LOG_POF_WRITE)
-			hysdn_addlog(card, "POF write: getting tag data");
-
-		if (datlen != boot->last_error) {
-			boot->last_error = -EPOF_INTERNAL;
-			break;
-		}
-		if ((boot->last_error = pof_handle_data(card, datlen)) < 0)
-			return (boot->last_error);	/* an error occurred */
-		boot->pof_recoffset += datlen;
-		if (boot->pof_recoffset >= boot->pof_reclen) {
-			boot->pof_state = POF_READ_TAG_HEAD;	/* now start with single tags */
-			boot->last_error = sizeof(tPofRecHdr);	/* new length */
-		} else {
-			if (boot->pof_reclen - boot->pof_recoffset < BOOT_BUF_SIZE)
-				boot->last_error = boot->pof_reclen - boot->pof_recoffset;	/* limit size */
-			else
-				boot->last_error = BOOT_BUF_SIZE;	/* maximum */
-		}
-		break;
-
-	default:
-		boot->last_error = -EPOF_INTERNAL;	/* unknown state */
-		break;
-	}			/* switch (boot->pof_state) */
-
-	return (boot->last_error);
-}				/* pof_write_buffer */
-
-
-/*******************************************************************************/
-/* pof_write_open is called when an open for boot on the cardlog device occurs. */
-/* The function returns the needed number of bytes for the next operation. If  */
-/* the returned number is less or equal 0 an error specified by this code      */
-/* occurred. Additionally the pointer to the buffer data area is set on success */
-/*******************************************************************************/
-int
-pof_write_open(hysdn_card *card, unsigned char **bufp)
-{
-	struct boot_data *boot;	/* pointer to boot specific data */
-
-	if (card->boot) {
-		if (card->debug_flags & LOG_POF_OPEN)
-			hysdn_addlog(card, "POF open: already opened for boot");
-		return (-ERR_ALREADY_BOOT);	/* boot already active */
-	}
-	/* error no mem available */
-	if (!(boot = kzalloc(sizeof(struct boot_data), GFP_KERNEL))) {
-		if (card->debug_flags & LOG_MEM_ERR)
-			hysdn_addlog(card, "POF open: unable to allocate mem");
-		return (-EFAULT);
-	}
-	card->boot = boot;
-	card->state = CARD_STATE_BOOTING;
-
-	card->stopcard(card);	/* first stop the card */
-	if (card->testram(card)) {
-		if (card->debug_flags & LOG_POF_OPEN)
-			hysdn_addlog(card, "POF open: DPRAM test failure");
-		boot->last_error = -ERR_BOARD_DPRAM;
-		card->state = CARD_STATE_BOOTERR;	/* show boot error */
-		return (boot->last_error);
-	}
-	boot->BufSize = 0;	/* Buffer is empty */
-	boot->pof_state = POF_READ_FILE_HEAD;	/* read file header */
-	StartDecryption(boot);	/* if POF File should be encrypted */
-
-	if (card->debug_flags & LOG_POF_OPEN)
-		hysdn_addlog(card, "POF open: success");
-
-	*bufp = boot->buf.BootBuf;	/* point to buffer */
-	return (sizeof(tPofFileHdr));
-}				/* pof_write_open */
-
-/********************************************************************************/
-/* pof_write_close is called when an close of boot on the cardlog device occurs. */
-/* The return value must be 0 if everything has happened as desired.            */
-/********************************************************************************/
-int
-pof_write_close(hysdn_card *card)
-{
-	struct boot_data *boot = card->boot;	/* pointer to boot specific data */
-
-	if (!boot)
-		return (-EFAULT);	/* invalid call */
-
-	card->boot = NULL;	/* no boot active */
-	kfree(boot);
-
-	if (card->state == CARD_STATE_RUN)
-		card->set_errlog_state(card, 1);	/* activate error log */
-
-	if (card->debug_flags & LOG_POF_OPEN)
-		hysdn_addlog(card, "POF close: success");
-
-	return (0);
-}				/* pof_write_close */
-
-/*********************************************************************************/
-/* EvalSysrTokData checks additional records delivered with the Sysready Message */
-/* when POF has been booted. A return value of 0 is used if no error occurred.    */
-/*********************************************************************************/
-int
-EvalSysrTokData(hysdn_card *card, unsigned char *cp, int len)
-{
-	u_char *p;
-	u_char crc;
-
-	if (card->debug_flags & LOG_POF_RECORD)
-		hysdn_addlog(card, "SysReady Token data length %d", len);
-
-	if (len < 2) {
-		hysdn_addlog(card, "SysReady Token Data to short");
-		return (1);
-	}
-	for (p = cp, crc = 0; p < (cp + len - 2); p++)
-		if ((crc & 0x80))
-			crc = (((u_char) (crc << 1)) + 1) + *p;
-		else
-			crc = ((u_char) (crc << 1)) + *p;
-	crc = ~crc;
-	if (crc != *(cp + len - 1)) {
-		hysdn_addlog(card, "SysReady Token Data invalid CRC");
-		return (1);
-	}
-	len--;			/* don't check CRC byte */
-	while (len > 0) {
-
-		if (*cp == SYSR_TOK_END)
-			return (0);	/* End of Token stream */
-
-		if (len < (*(cp + 1) + 2)) {
-			hysdn_addlog(card, "token 0x%x invalid length %d", *cp, *(cp + 1));
-			return (1);
-		}
-		switch (*cp) {
-		case SYSR_TOK_B_CHAN:	/* 1 */
-			if (*(cp + 1) != 1)
-				return (1);	/* length invalid */
-			card->bchans = *(cp + 2);
-			break;
-
-		case SYSR_TOK_FAX_CHAN:	/* 2 */
-			if (*(cp + 1) != 1)
-				return (1);	/* length invalid */
-			card->faxchans = *(cp + 2);
-			break;
-
-		case SYSR_TOK_MAC_ADDR:	/* 3 */
-			if (*(cp + 1) != 6)
-				return (1);	/* length invalid */
-			memcpy(card->mac_addr, cp + 2, 6);
-			break;
-
-		default:
-			hysdn_addlog(card, "unknown token 0x%02x length %d", *cp, *(cp + 1));
-			break;
-		}
-		len -= (*(cp + 1) + 2);		/* adjust len */
-		cp += (*(cp + 1) + 2);	/* and pointer */
-	}
-
-	hysdn_addlog(card, "no end token found");
-	return (1);
-}				/* EvalSysrTokData */
diff --git a/drivers/staging/isdn/hysdn/hysdn_defs.h b/drivers/staging/isdn/hysdn/hysdn_defs.h
deleted file mode 100644
index cdac46a..0000000
--- a/drivers/staging/isdn/hysdn/hysdn_defs.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/* $Id: hysdn_defs.h,v 1.5.6.3 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards
- * global definitions and exported vars and functions.
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef HYSDN_DEFS_H
-#define HYSDN_DEFS_H
-
-#include <linux/hysdn_if.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/skbuff.h>
-
-#include "ince1pc.h"
-
-#ifdef CONFIG_HYSDN_CAPI
-#include <linux/capi.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-
-/***************************/
-/*   CAPI-Profile values.  */
-/***************************/
-
-#define GLOBAL_OPTION_INTERNAL_CONTROLLER 0x0001
-#define GLOBAL_OPTION_EXTERNAL_CONTROLLER 0x0002
-#define GLOBAL_OPTION_HANDSET             0x0004
-#define GLOBAL_OPTION_DTMF                0x0008
-#define GLOBAL_OPTION_SUPPL_SERVICES      0x0010
-#define GLOBAL_OPTION_CHANNEL_ALLOCATION  0x0020
-#define GLOBAL_OPTION_B_CHANNEL_OPERATION 0x0040
-
-#define B1_PROT_64KBIT_HDLC        0x0001
-#define B1_PROT_64KBIT_TRANSPARENT 0x0002
-#define B1_PROT_V110_ASYNCH        0x0004
-#define B1_PROT_V110_SYNCH         0x0008
-#define B1_PROT_T30                0x0010
-#define B1_PROT_64KBIT_INV_HDLC    0x0020
-#define B1_PROT_56KBIT_TRANSPARENT 0x0040
-
-#define B2_PROT_ISO7776            0x0001
-#define B2_PROT_TRANSPARENT        0x0002
-#define B2_PROT_SDLC               0x0004
-#define B2_PROT_LAPD               0x0008
-#define B2_PROT_T30                0x0010
-#define B2_PROT_PPP                0x0020
-#define B2_PROT_TRANSPARENT_IGNORE_B1_FRAMING_ERRORS 0x0040
-
-#define B3_PROT_TRANSPARENT        0x0001
-#define B3_PROT_T90NL              0x0002
-#define B3_PROT_ISO8208            0x0004
-#define B3_PROT_X25_DCE            0x0008
-#define B3_PROT_T30                0x0010
-#define B3_PROT_T30EXT             0x0020
-
-#define HYSDN_MAXVERSION		8
-
-/* Number of sendbuffers in CAPI-queue */
-#define HYSDN_MAX_CAPI_SKB             20
-
-#endif /* CONFIG_HYSDN_CAPI*/
-
-/************************************************/
-/* constants and bits for debugging/log outputs */
-/************************************************/
-#define LOG_MAX_LINELEN 120
-#define DEB_OUT_SYSLOG  0x80000000	/* output to syslog instead of proc fs */
-#define LOG_MEM_ERR     0x00000001	/* log memory errors like kmalloc failure */
-#define LOG_POF_OPEN    0x00000010	/* log pof open and close activities */
-#define LOG_POF_RECORD  0x00000020	/* log pof record parser */
-#define LOG_POF_WRITE   0x00000040	/* log detailed pof write operation */
-#define LOG_POF_CARD    0x00000080	/* log pof related card functions */
-#define LOG_CNF_LINE    0x00000100	/* all conf lines are put to procfs */
-#define LOG_CNF_DATA    0x00000200	/* non comment conf lines are shown with channel */
-#define LOG_CNF_MISC    0x00000400	/* additional conf line debug outputs */
-#define LOG_SCHED_ASYN  0x00001000	/* debug schedulers async tx routines */
-#define LOG_PROC_OPEN   0x00100000	/* open and close from procfs are logged */
-#define LOG_PROC_ALL    0x00200000	/* all actions from procfs are logged */
-#define LOG_NET_INIT    0x00010000	/* network init and deinit logging */
-
-#define DEF_DEB_FLAGS   0x7fff000f	/* everything is logged to procfs */
-
-/**********************************/
-/* proc filesystem name constants */
-/**********************************/
-#define PROC_SUBDIR_NAME "hysdn"
-#define PROC_CONF_BASENAME "cardconf"
-#define PROC_LOG_BASENAME "cardlog"
-
-/***********************************/
-/* PCI 32 bit parms for IO and MEM */
-/***********************************/
-#define PCI_REG_PLX_MEM_BASE    0
-#define PCI_REG_PLX_IO_BASE     1
-#define PCI_REG_MEMORY_BASE     3
-
-/**************/
-/* card types */
-/**************/
-#define BD_NONE         0U
-#define BD_PERFORMANCE  1U
-#define BD_VALUE        2U
-#define BD_PCCARD       3U
-#define BD_ERGO         4U
-#define BD_METRO        5U
-#define BD_CHAMP2       6U
-#define BD_PLEXUS       7U
-
-/******************************************************/
-/* defined states for cards shown by reading cardconf */
-/******************************************************/
-#define CARD_STATE_UNUSED   0	/* never been used or booted */
-#define CARD_STATE_BOOTING  1	/* booting is in progress */
-#define CARD_STATE_BOOTERR  2	/* a previous boot was aborted */
-#define CARD_STATE_RUN      3	/* card is active */
-
-/*******************************/
-/* defines for error_log_state */
-/*******************************/
-#define ERRLOG_STATE_OFF   0	/* error log is switched off, nothing to do */
-#define ERRLOG_STATE_ON    1	/* error log is switched on, wait for data */
-#define ERRLOG_STATE_START 2	/* start error logging */
-#define ERRLOG_STATE_STOP  3	/* stop error logging */
-
-/*******************************/
-/* data structure for one card */
-/*******************************/
-typedef struct HYSDN_CARD {
-
-	/* general variables for the cards */
-	int myid;		/* own driver card id */
-	unsigned char bus;	/* pci bus the card is connected to */
-	unsigned char devfn;	/* slot+function bit encoded */
-	unsigned short subsysid;/* PCI subsystem id */
-	unsigned char brdtype;	/* type of card */
-	unsigned int bchans;	/* number of available B-channels */
-	unsigned int faxchans;	/* number of available fax-channels */
-	unsigned char mac_addr[6];/* MAC Address read from card */
-	unsigned int irq;	/* interrupt number */
-	unsigned int iobase;	/* IO-port base address */
-	unsigned long plxbase;	/* PLX memory base */
-	unsigned long membase;	/* DPRAM memory base */
-	unsigned long memend;	/* DPRAM memory end */
-	void *dpram;		/* mapped dpram */
-	int state;		/* actual state of card -> CARD_STATE_** */
-	struct HYSDN_CARD *next;	/* pointer to next card */
-
-	/* data areas for the /proc file system */
-	void *proclog;		/* pointer to proclog filesystem specific data */
-	void *procconf;		/* pointer to procconf filesystem specific data */
-
-	/* debugging and logging */
-	unsigned char err_log_state;/* actual error log state of the card */
-	unsigned long debug_flags;/* tells what should be debugged and where */
-	void (*set_errlog_state) (struct HYSDN_CARD *, int);
-
-	/* interrupt handler + interrupt synchronisation */
-	struct work_struct irq_queue;	/* interrupt task queue */
-	unsigned char volatile irq_enabled;/* interrupt enabled if != 0 */
-	unsigned char volatile hw_lock;/* hardware is currently locked -> no access */
-
-	/* boot process */
-	void *boot;		/* pointer to boot private data */
-	int (*writebootimg) (struct HYSDN_CARD *, unsigned char *, unsigned long);
-	int (*writebootseq) (struct HYSDN_CARD *, unsigned char *, int);
-	int (*waitpofready) (struct HYSDN_CARD *);
-	int (*testram) (struct HYSDN_CARD *);
-
-	/* scheduler for data transfer (only async parts) */
-	unsigned char async_data[256];/* async data to be sent (normally for config) */
-	unsigned short volatile async_len;/* length of data to sent */
-	unsigned short volatile async_channel;/* channel number for async transfer */
-	int volatile async_busy;	/* flag != 0 sending in progress */
-	int volatile net_tx_busy;	/* a network packet tx is in progress */
-
-	/* network interface */
-	void *netif;		/* pointer to network structure */
-
-	/* init and deinit stopcard for booting, too */
-	void (*stopcard) (struct HYSDN_CARD *);
-	void (*releasehardware) (struct HYSDN_CARD *);
-
-	spinlock_t hysdn_lock;
-#ifdef CONFIG_HYSDN_CAPI
-	struct hycapictrl_info {
-		char cardname[32];
-		spinlock_t lock;
-		int versionlen;
-		char versionbuf[1024];
-		char *version[HYSDN_MAXVERSION];
-
-		char infobuf[128];	/* for function procinfo */
-
-		struct HYSDN_CARD  *card;
-		struct capi_ctr capi_ctrl;
-		struct sk_buff *skbs[HYSDN_MAX_CAPI_SKB];
-		int in_idx, out_idx;	/* indexes to buffer ring */
-		int sk_count;		/* number of buffers currently in ring */
-		struct sk_buff *tx_skb;	/* buffer for tx operation */
-
-		struct list_head ncci_head;
-	} *hyctrlinfo;
-#endif /* CONFIG_HYSDN_CAPI */
-} hysdn_card;
-
-#ifdef CONFIG_HYSDN_CAPI
-typedef struct hycapictrl_info hycapictrl_info;
-#endif /* CONFIG_HYSDN_CAPI */
-
-
-/*****************/
-/* exported vars */
-/*****************/
-extern hysdn_card *card_root;	/* pointer to first card */
-
-
-
-/*************************/
-/* im/exported functions */
-/*************************/
-
-/* hysdn_procconf.c */
-extern int hysdn_procconf_init(void);	/* init proc config filesys */
-extern void hysdn_procconf_release(void);	/* deinit proc config filesys */
-
-/* hysdn_proclog.c */
-extern int hysdn_proclog_init(hysdn_card *);	/* init proc log entry */
-extern void hysdn_proclog_release(hysdn_card *);	/* deinit proc log entry */
-extern void hysdn_addlog(hysdn_card *, char *, ...);	/* output data to log */
-extern void hysdn_card_errlog(hysdn_card *, tErrLogEntry *, int);	/* output card log */
-
-/* boardergo.c */
-extern int ergo_inithardware(hysdn_card *card);	/* get hardware -> module init */
-
-/* hysdn_boot.c */
-extern int pof_write_close(hysdn_card *);	/* close proc file after writing pof */
-extern int pof_write_open(hysdn_card *, unsigned char **);	/* open proc file for writing pof */
-extern int pof_write_buffer(hysdn_card *, int);		/* write boot data to card */
-extern int EvalSysrTokData(hysdn_card *, unsigned char *, int);		/* Check Sysready Token Data */
-
-/* hysdn_sched.c */
-extern int hysdn_sched_tx(hysdn_card *, unsigned char *,
-			  unsigned short volatile *, unsigned short volatile *,
-			  unsigned short);
-extern int hysdn_sched_rx(hysdn_card *, unsigned char *, unsigned short,
-			  unsigned short);
-extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
-			    unsigned short);	/* send one cfg line */
-
-/* hysdn_net.c */
-extern unsigned int hynet_enable;
-extern int hysdn_net_create(hysdn_card *);	/* create a new net device */
-extern int hysdn_net_release(hysdn_card *);	/* delete the device */
-extern char *hysdn_net_getname(hysdn_card *);	/* get name of net interface */
-extern void hysdn_tx_netack(hysdn_card *);	/* acknowledge a packet tx */
-extern struct sk_buff *hysdn_tx_netget(hysdn_card *);	/* get next network packet */
-extern void hysdn_rx_netpkt(hysdn_card *, unsigned char *,
-			    unsigned short);	/* rxed packet from network */
-
-#ifdef CONFIG_HYSDN_CAPI
-extern unsigned int hycapi_enable;
-extern int hycapi_capi_create(hysdn_card *);	/* create a new capi device */
-extern int hycapi_capi_release(hysdn_card *);	/* delete the device */
-extern int hycapi_capi_stop(hysdn_card *card);   /* suspend */
-extern void hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf,
-			      unsigned short len);
-extern void hycapi_tx_capiack(hysdn_card *card);
-extern struct sk_buff *hycapi_tx_capiget(hysdn_card *card);
-extern int hycapi_init(void);
-extern void hycapi_cleanup(void);
-#endif /* CONFIG_HYSDN_CAPI */
-
-#endif /* HYSDN_DEFS_H */
diff --git a/drivers/staging/isdn/hysdn/hysdn_init.c b/drivers/staging/isdn/hysdn/hysdn_init.c
deleted file mode 100644
index 0db2f75..0000000
--- a/drivers/staging/isdn/hysdn/hysdn_init.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* $Id: hysdn_init.c,v 1.6.6.6 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, init functions.
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-
-#include "hysdn_defs.h"
-
-static struct pci_device_id hysdn_pci_tbl[] = {
-	{ PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
-	  PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_METRO, 0, 0, BD_METRO },
-	{ PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
-	  PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2, 0, 0, BD_CHAMP2 },
-	{ PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
-	  PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_ERGO, 0, 0, BD_ERGO },
-	{ PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
-	  PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO, 0, 0, BD_ERGO },
-
-	{ }				/* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, hysdn_pci_tbl);
-MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
-MODULE_AUTHOR("Werner Cornelius");
-MODULE_LICENSE("GPL");
-
-static int cardmax;		/* number of found cards */
-hysdn_card *card_root = NULL;	/* pointer to first card */
-static hysdn_card *card_last = NULL;	/* pointer to first card */
-
-
-/****************************************************************************/
-/* The module startup and shutdown code. Only compiled when used as module. */
-/* Using the driver as module is always advisable, because the booting      */
-/* image becomes smaller and the driver code is only loaded when needed.    */
-/* Additionally newer versions may be activated without rebooting.          */
-/****************************************************************************/
-
-/****************************************************************************/
-/* init_module is called once when the module is loaded to do all necessary */
-/* things like autodetect...                                                */
-/* If the return value of this function is 0 the init has been successful   */
-/* and the module is added to the list in /proc/modules, otherwise an error */
-/* is assumed and the module will not be kept in memory.                    */
-/****************************************************************************/
-
-static int hysdn_pci_init_one(struct pci_dev *akt_pcidev,
-			      const struct pci_device_id *ent)
-{
-	hysdn_card *card;
-	int rc;
-
-	rc = pci_enable_device(akt_pcidev);
-	if (rc)
-		return rc;
-
-	if (!(card = kzalloc(sizeof(hysdn_card), GFP_KERNEL))) {
-		printk(KERN_ERR "HYSDN: unable to alloc device mem \n");
-		rc = -ENOMEM;
-		goto err_out;
-	}
-	card->myid = cardmax;	/* set own id */
-	card->bus = akt_pcidev->bus->number;
-	card->devfn = akt_pcidev->devfn;	/* slot + function */
-	card->subsysid = akt_pcidev->subsystem_device;
-	card->irq = akt_pcidev->irq;
-	card->iobase = pci_resource_start(akt_pcidev, PCI_REG_PLX_IO_BASE);
-	card->plxbase = pci_resource_start(akt_pcidev, PCI_REG_PLX_MEM_BASE);
-	card->membase = pci_resource_start(akt_pcidev, PCI_REG_MEMORY_BASE);
-	card->brdtype = BD_NONE;	/* unknown */
-	card->debug_flags = DEF_DEB_FLAGS;	/* set default debug */
-	card->faxchans = 0;	/* default no fax channels */
-	card->bchans = 2;	/* and 2 b-channels */
-	card->brdtype = ent->driver_data;
-
-	if (ergo_inithardware(card)) {
-		printk(KERN_WARNING "HYSDN: card at io 0x%04x already in use\n", card->iobase);
-		rc = -EBUSY;
-		goto err_out_card;
-	}
-
-	cardmax++;
-	card->next = NULL;	/*end of chain */
-	if (card_last)
-		card_last->next = card;		/* pointer to next card */
-	else
-		card_root = card;
-	card_last = card;	/* new chain end */
-
-	pci_set_drvdata(akt_pcidev, card);
-	return 0;
-
-err_out_card:
-	kfree(card);
-err_out:
-	pci_disable_device(akt_pcidev);
-	return rc;
-}
-
-static void hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
-{
-	hysdn_card *card = pci_get_drvdata(akt_pcidev);
-
-	pci_set_drvdata(akt_pcidev, NULL);
-
-	if (card->stopcard)
-		card->stopcard(card);
-
-#ifdef CONFIG_HYSDN_CAPI
-	hycapi_capi_release(card);
-#endif
-
-	if (card->releasehardware)
-		card->releasehardware(card);   /* free all hardware resources */
-
-	if (card == card_root) {
-		card_root = card_root->next;
-		if (!card_root)
-			card_last = NULL;
-	} else {
-		hysdn_card *tmp = card_root;
-		while (tmp) {
-			if (tmp->next == card)
-				tmp->next = card->next;
-			card_last = tmp;
-			tmp = tmp->next;
-		}
-	}
-
-	kfree(card);
-	pci_disable_device(akt_pcidev);
-}
-
-static struct pci_driver hysdn_pci_driver = {
-	.name		= "hysdn",
-	.id_table	= hysdn_pci_tbl,
-	.probe		= hysdn_pci_init_one,
-	.remove		= hysdn_pci_remove_one,
-};
-
-static int hysdn_have_procfs;
-
-static int __init
-hysdn_init(void)
-{
-	int rc;
-
-	printk(KERN_NOTICE "HYSDN: module loaded\n");
-
-	rc = pci_register_driver(&hysdn_pci_driver);
-	if (rc)
-		return rc;
-
-	printk(KERN_INFO "HYSDN: %d card(s) found.\n", cardmax);
-
-	if (!hysdn_procconf_init())
-		hysdn_have_procfs = 1;
-
-#ifdef CONFIG_HYSDN_CAPI
-	if (cardmax > 0) {
-		if (hycapi_init()) {
-			printk(KERN_ERR "HYCAPI: init failed\n");
-
-			if (hysdn_have_procfs)
-				hysdn_procconf_release();
-
-			pci_unregister_driver(&hysdn_pci_driver);
-			return -ESPIPE;
-		}
-	}
-#endif /* CONFIG_HYSDN_CAPI */
-
-	return 0;		/* no error */
-}				/* init_module */
-
-
-/***********************************************************************/
-/* cleanup_module is called when the module is released by the kernel. */
-/* The routine is only called if init_module has been successful and   */
-/* the module counter has a value of 0. Otherwise this function will   */
-/* not be called. This function must release all resources still allo- */
-/* cated as after the return from this function the module code will   */
-/* be removed from memory.                                             */
-/***********************************************************************/
-static void __exit
-hysdn_exit(void)
-{
-	if (hysdn_have_procfs)
-		hysdn_procconf_release();
-
-	pci_unregister_driver(&hysdn_pci_driver);
-
-#ifdef CONFIG_HYSDN_CAPI
-	hycapi_cleanup();
-#endif /* CONFIG_HYSDN_CAPI */
-
-	printk(KERN_NOTICE "HYSDN: module unloaded\n");
-}				/* cleanup_module */
-
-module_init(hysdn_init);
-module_exit(hysdn_exit);
diff --git a/drivers/staging/isdn/hysdn/hysdn_net.c b/drivers/staging/isdn/hysdn/hysdn_net.c
deleted file mode 100644
index dcb9ef7..0000000
--- a/drivers/staging/isdn/hysdn/hysdn_net.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/* $Id: hysdn_net.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, net (ethernet type) handling routines.
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * This net module has been inspired by the skeleton driver from
- * Donald Becker (becker@CESDIS.gsfc.nasa.gov)
- *
- */
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inetdevice.h>
-
-#include "hysdn_defs.h"
-
-unsigned int hynet_enable = 0xffffffff;
-module_param(hynet_enable, uint, 0);
-
-#define MAX_SKB_BUFFERS 20	/* number of buffers for keeping TX-data */
-
-/****************************************************************************/
-/* structure containing the complete network data. The structure is aligned */
-/* in a way that both, the device and statistics are kept inside it.        */
-/* for proper access, the device structure MUST be the first var/struct     */
-/* inside the definition.                                                   */
-/****************************************************************************/
-struct net_local {
-	/* Tx control lock.  This protects the transmit buffer ring
-	 * state along with the "tx full" state of the driver.  This
-	 * means all netif_queue flow control actions are protected
-	 * by this lock as well.
-	 */
-	struct net_device *dev;
-	spinlock_t lock;
-	struct sk_buff *skbs[MAX_SKB_BUFFERS];	/* pointers to tx-skbs */
-	int in_idx, out_idx;	/* indexes to buffer ring */
-	int sk_count;		/* number of buffers currently in ring */
-};				/* net_local */
-
-
-
-/*********************************************************************/
-/* Open/initialize the board. This is called (in the current kernel) */
-/* sometime after booting when the 'ifconfig' program is run.        */
-/* This routine should set everything up anew at each open, even     */
-/* registers that "should" only need to be set once at boot, so that */
-/* there is non-reboot way to recover if something goes wrong.       */
-/*********************************************************************/
-static int
-net_open(struct net_device *dev)
-{
-	struct in_device *in_dev;
-	hysdn_card *card = dev->ml_priv;
-	int i;
-
-	netif_start_queue(dev);	/* start tx-queueing */
-
-	/* Fill in the MAC-level header (if not already set) */
-	if (!card->mac_addr[0]) {
-		for (i = 0; i < ETH_ALEN; i++)
-			dev->dev_addr[i] = 0xfc;
-		if ((in_dev = dev->ip_ptr) != NULL) {
-			const struct in_ifaddr *ifa;
-
-			rcu_read_lock();
-			ifa = rcu_dereference(in_dev->ifa_list);
-			if (ifa != NULL)
-				memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
-			rcu_read_unlock();
-		}
-	} else
-		memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
-
-	return (0);
-}				/* net_open */
-
-/*******************************************/
-/* flush the currently occupied tx-buffers */
-/* must only be called when device closed  */
-/*******************************************/
-static void
-flush_tx_buffers(struct net_local *nl)
-{
-
-	while (nl->sk_count) {
-		dev_kfree_skb(nl->skbs[nl->out_idx++]);		/* free skb */
-		if (nl->out_idx >= MAX_SKB_BUFFERS)
-			nl->out_idx = 0;	/* wrap around */
-		nl->sk_count--;
-	}
-}				/* flush_tx_buffers */
-
-
-/*********************************************************************/
-/* close/decativate the device. The device is not removed, but only  */
-/* deactivated.                                                      */
-/*********************************************************************/
-static int
-net_close(struct net_device *dev)
-{
-
-	netif_stop_queue(dev);	/* disable queueing */
-
-	flush_tx_buffers((struct net_local *) dev);
-
-	return (0);		/* success */
-}				/* net_close */
-
-/************************************/
-/* send a packet on this interface. */
-/* new style for kernel >= 2.3.33   */
-/************************************/
-static netdev_tx_t
-net_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
-	struct net_local *lp = (struct net_local *) dev;
-
-	spin_lock_irq(&lp->lock);
-
-	lp->skbs[lp->in_idx++] = skb;	/* add to buffer list */
-	if (lp->in_idx >= MAX_SKB_BUFFERS)
-		lp->in_idx = 0;	/* wrap around */
-	lp->sk_count++;		/* adjust counter */
-	netif_trans_update(dev);
-
-	/* If we just used up the very last entry in the
-	 * TX ring on this device, tell the queueing
-	 * layer to send no more.
-	 */
-	if (lp->sk_count >= MAX_SKB_BUFFERS)
-		netif_stop_queue(dev);
-
-	/* When the TX completion hw interrupt arrives, this
-	 * is when the transmit statistics are updated.
-	 */
-
-	spin_unlock_irq(&lp->lock);
-
-	if (lp->sk_count <= 3) {
-		schedule_work(&((hysdn_card *) dev->ml_priv)->irq_queue);
-	}
-	return NETDEV_TX_OK;	/* success */
-}				/* net_send_packet */
-
-
-
-/***********************************************************************/
-/* acknowlegde a packet send. The network layer will be informed about */
-/* completion                                                          */
-/***********************************************************************/
-void
-hysdn_tx_netack(hysdn_card *card)
-{
-	struct net_local *lp = card->netif;
-
-	if (!lp)
-		return;		/* non existing device */
-
-
-	if (!lp->sk_count)
-		return;		/* error condition */
-
-	lp->dev->stats.tx_packets++;
-	lp->dev->stats.tx_bytes += lp->skbs[lp->out_idx]->len;
-
-	dev_kfree_skb(lp->skbs[lp->out_idx++]);		/* free skb */
-	if (lp->out_idx >= MAX_SKB_BUFFERS)
-		lp->out_idx = 0;	/* wrap around */
-
-	if (lp->sk_count-- == MAX_SKB_BUFFERS)	/* dec usage count */
-		netif_start_queue((struct net_device *) lp);
-}				/* hysdn_tx_netack */
-
-/*****************************************************/
-/* we got a packet from the network, go and queue it */
-/*****************************************************/
-void
-hysdn_rx_netpkt(hysdn_card *card, unsigned char *buf, unsigned short len)
-{
-	struct net_local *lp = card->netif;
-	struct net_device *dev;
-	struct sk_buff *skb;
-
-	if (!lp)
-		return;		/* non existing device */
-
-	dev = lp->dev;
-	dev->stats.rx_bytes += len;
-
-	skb = dev_alloc_skb(len);
-	if (skb == NULL) {
-		printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
-		       dev->name);
-		dev->stats.rx_dropped++;
-		return;
-	}
-	/* copy the data */
-	skb_put_data(skb, buf, len);
-
-	/* determine the used protocol */
-	skb->protocol = eth_type_trans(skb, dev);
-
-	dev->stats.rx_packets++;	/* adjust packet count */
-
-	netif_rx(skb);
-}				/* hysdn_rx_netpkt */
-
-/*****************************************************/
-/* return the pointer to a network packet to be send */
-/*****************************************************/
-struct sk_buff *
-hysdn_tx_netget(hysdn_card *card)
-{
-	struct net_local *lp = card->netif;
-
-	if (!lp)
-		return (NULL);	/* non existing device */
-
-	if (!lp->sk_count)
-		return (NULL);	/* nothing available */
-
-	return (lp->skbs[lp->out_idx]);		/* next packet to send */
-}				/* hysdn_tx_netget */
-
-static const struct net_device_ops hysdn_netdev_ops = {
-	.ndo_open		= net_open,
-	.ndo_stop		= net_close,
-	.ndo_start_xmit		= net_send_packet,
-	.ndo_set_mac_address	= eth_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-
-/*****************************************************************************/
-/* hysdn_net_create creates a new net device for the given card. If a device */
-/* already exists, it will be deleted and created a new one. The return value */
-/* 0 announces success, else a negative error code will be returned.         */
-/*****************************************************************************/
-int
-hysdn_net_create(hysdn_card *card)
-{
-	struct net_device *dev;
-	int i;
-	struct net_local *lp;
-
-	if (!card) {
-		printk(KERN_WARNING "No card-pt in hysdn_net_create!\n");
-		return (-ENOMEM);
-	}
-	hysdn_net_release(card);	/* release an existing net device */
-
-	dev = alloc_etherdev(sizeof(struct net_local));
-	if (!dev) {
-		printk(KERN_WARNING "HYSDN: unable to allocate mem\n");
-		return (-ENOMEM);
-	}
-
-	lp = netdev_priv(dev);
-	lp->dev = dev;
-
-	dev->netdev_ops = &hysdn_netdev_ops;
-	spin_lock_init(&((struct net_local *) dev)->lock);
-
-	/* initialise necessary or informing fields */
-	dev->base_addr = card->iobase;	/* IO address */
-	dev->irq = card->irq;	/* irq */
-
-	dev->netdev_ops = &hysdn_netdev_ops;
-	if ((i = register_netdev(dev))) {
-		printk(KERN_WARNING "HYSDN: unable to create network device\n");
-		free_netdev(dev);
-		return (i);
-	}
-	dev->ml_priv = card;	/* remember pointer to own data structure */
-	card->netif = dev;	/* setup the local pointer */
-
-	if (card->debug_flags & LOG_NET_INIT)
-		hysdn_addlog(card, "network device created");
-	return 0;		/* and return success */
-}				/* hysdn_net_create */
-
-/***************************************************************************/
-/* hysdn_net_release deletes the net device for the given card. The return */
-/* value 0 announces success, else a negative error code will be returned. */
-/***************************************************************************/
-int
-hysdn_net_release(hysdn_card *card)
-{
-	struct net_device *dev = card->netif;
-
-	if (!dev)
-		return (0);	/* non existing */
-
-	card->netif = NULL;	/* clear out pointer */
-	net_close(dev);
-
-	flush_tx_buffers((struct net_local *) dev);	/* empty buffers */
-
-	unregister_netdev(dev);	/* release the device */
-	free_netdev(dev);	/* release the memory allocated */
-	if (card->debug_flags & LOG_NET_INIT)
-		hysdn_addlog(card, "network device deleted");
-
-	return (0);		/* always successful */
-}				/* hysdn_net_release */
-
-/*****************************************************************************/
-/* hysdn_net_getname returns a pointer to the name of the network interface. */
-/* if the interface is not existing, a "-" is returned.                      */
-/*****************************************************************************/
-char *
-hysdn_net_getname(hysdn_card *card)
-{
-	struct net_device *dev = card->netif;
-
-	if (!dev)
-		return ("-");	/* non existing */
-
-	return (dev->name);
-}				/* hysdn_net_getname */
diff --git a/drivers/staging/isdn/hysdn/hysdn_pof.h b/drivers/staging/isdn/hysdn/hysdn_pof.h
deleted file mode 100644
index f63f5fa..0000000
--- a/drivers/staging/isdn/hysdn/hysdn_pof.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* $Id: hysdn_pof.h,v 1.2.6.1 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, definitions used for handling pof-files.
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/************************/
-/* POF specific defines */
-/************************/
-#define BOOT_BUF_SIZE   0x1000	/* =4096, maybe moved to other h file */
-#define CRYPT_FEEDTERM  0x8142
-#define CRYPT_STARTTERM 0x81a5
-/*  max. timeout time in seconds
- *  from end of booting to POF is ready
- */
-#define POF_READY_TIME_OUT_SEC  10
-
-/**********************************/
-/* defines for 1.stage boot image */
-/**********************************/
-
-/*  the POF file record containing the boot loader image
- *  has 2 pages a 16KB:
- *  1. page contains the high 16-bit part of the 32-bit E1 words
- *  2. page contains the low  16-bit part of the 32-bit E1 words
- *
- *  In each 16KB page we assume the start of the boot loader code
- *  in the highest 2KB part (at offset 0x3800);
- *  the rest (0x0000..0x37FF) is assumed to contain 0 bytes.
- */
-
-#define POF_BOOT_LOADER_PAGE_SIZE   0x4000	/* =16384U */
-#define POF_BOOT_LOADER_TOTAL_SIZE  (2U * POF_BOOT_LOADER_PAGE_SIZE)
-
-#define POF_BOOT_LOADER_CODE_SIZE   0x0800	/* =2KB =2048U */
-
-/* offset in boot page, where loader code may start */
-/* =0x3800= 14336U */
-#define POF_BOOT_LOADER_OFF_IN_PAGE (POF_BOOT_LOADER_PAGE_SIZE-POF_BOOT_LOADER_CODE_SIZE)
-
-
-/*--------------------------------------POF file record structs------------*/
-typedef struct PofFileHdr_tag {	/* Pof file header */
-	/*00 */ unsigned long Magic __attribute__((packed));
-	/*04 */ unsigned long N_PofRecs __attribute__((packed));
-/*08 */
-} tPofFileHdr;
-
-typedef struct PofRecHdr_tag {	/* Pof record header */
-	/*00 */ unsigned short PofRecId __attribute__((packed));
-	/*02 */ unsigned long PofRecDataLen __attribute__((packed));
-/*06 */
-} tPofRecHdr;
-
-typedef struct PofTimeStamp_tag {
-	/*00 */ unsigned long UnixTime __attribute__((packed));
-	/*04 */ unsigned char DateTimeText[0x28];
-	/* =40 */
-/*2C */
-} tPofTimeStamp;
-
-/* tPofFileHdr.Magic value: */
-#define TAGFILEMAGIC 0x464F501AUL
-/* tPofRecHdr.PofRecId values: */
-#define TAG_ABSDATA  0x1000	/* abs. data */
-#define TAG_BOOTDTA  0x1001	/* boot data */
-#define TAG_COMMENT  0x0020
-#define TAG_SYSCALL  0x0021
-#define TAG_FLOWCTRL 0x0022
-#define TAG_TIMESTMP 0x0010	/* date/time stamp of version */
-#define TAG_CABSDATA 0x1100	/* crypted abs. data */
-#define TAG_CBOOTDTA 0x1101	/* crypted boot data */
diff --git a/drivers/staging/isdn/hysdn/hysdn_procconf.c b/drivers/staging/isdn/hysdn/hysdn_procconf.c
deleted file mode 100644
index 48afd9f..0000000
--- a/drivers/staging/isdn/hysdn/hysdn_procconf.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/* $Id: hysdn_procconf.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, /proc/net filesystem dir and conf functions.
- *
- * written by Werner Cornelius (werner@titro.de) for Hypercope GmbH
- *
- * Copyright 1999  by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/cred.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <net/net_namespace.h>
-
-#include "hysdn_defs.h"
-
-static DEFINE_MUTEX(hysdn_conf_mutex);
-
-#define INFO_OUT_LEN 80		/* length of info line including lf */
-
-/********************************************************/
-/* defines and data structure for conf write operations */
-/********************************************************/
-#define CONF_STATE_DETECT 0	/* waiting for detect */
-#define CONF_STATE_CONF   1	/* writing config data */
-#define CONF_STATE_POF    2	/* writing pof data */
-#define CONF_LINE_LEN   255	/* 255 chars max */
-
-struct conf_writedata {
-	hysdn_card *card;	/* card the device is connected to */
-	int buf_size;		/* actual number of bytes in the buffer */
-	int needed_size;	/* needed size when reading pof */
-	int state;		/* actual interface states from above constants */
-	unsigned char conf_line[CONF_LINE_LEN];	/* buffered conf line */
-	unsigned short channel;		/* active channel number */
-	unsigned char *pof_buffer;	/* buffer when writing pof */
-};
-
-/***********************************************************************/
-/* process_line parses one config line and transfers it to the card if */
-/* necessary.                                                          */
-/* if the return value is negative an error occurred.                   */
-/***********************************************************************/
-static int
-process_line(struct conf_writedata *cnf)
-{
-	unsigned char *cp = cnf->conf_line;
-	int i;
-
-	if (cnf->card->debug_flags & LOG_CNF_LINE)
-		hysdn_addlog(cnf->card, "conf line: %s", cp);
-
-	if (*cp == '-') {	/* option */
-		cp++;		/* point to option char */
-
-		if (*cp++ != 'c')
-			return (0);	/* option unknown or used */
-		i = 0;		/* start value for channel */
-		while ((*cp <= '9') && (*cp >= '0'))
-			i = i * 10 + *cp++ - '0';	/* get decimal number */
-		if (i > 65535) {
-			if (cnf->card->debug_flags & LOG_CNF_MISC)
-				hysdn_addlog(cnf->card, "conf channel invalid  %d", i);
-			return (-ERR_INV_CHAN);		/* invalid channel */
-		}
-		cnf->channel = i & 0xFFFF;	/* set new channel number */
-		return (0);	/* success */
-	}			/* option */
-	if (*cp == '*') {	/* line to send */
-		if (cnf->card->debug_flags & LOG_CNF_DATA)
-			hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp);
-		return (hysdn_tx_cfgline(cnf->card, cnf->conf_line + 1,
-					 cnf->channel));	/* send the line without * */
-	}			/* line to send */
-	return (0);
-}				/* process_line */
-
-/***********************************/
-/* conf file operations and tables */
-/***********************************/
-
-/****************************************************/
-/* write conf file -> boot or send cfg line to card */
-/****************************************************/
-static ssize_t
-hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
-	struct conf_writedata *cnf;
-	int i;
-	unsigned char ch, *cp;
-
-	if (!count)
-		return (0);	/* nothing to handle */
-
-	if (!(cnf = file->private_data))
-		return (-EFAULT);	/* should never happen */
-
-	if (cnf->state == CONF_STATE_DETECT) {	/* auto detect cnf or pof data */
-		if (copy_from_user(&ch, buf, 1))	/* get first char for detect */
-			return (-EFAULT);
-
-		if (ch == 0x1A) {
-			/* we detected a pof file */
-			if ((cnf->needed_size = pof_write_open(cnf->card, &cnf->pof_buffer)) <= 0)
-				return (cnf->needed_size);	/* an error occurred -> exit */
-			cnf->buf_size = 0;	/* buffer is empty */
-			cnf->state = CONF_STATE_POF;	/* new state */
-		} else {
-			/* conf data has been detected */
-			cnf->buf_size = 0;	/* buffer is empty */
-			cnf->state = CONF_STATE_CONF;	/* requested conf data write */
-			if (cnf->card->state != CARD_STATE_RUN)
-				return (-ERR_NOT_BOOTED);
-			cnf->conf_line[CONF_LINE_LEN - 1] = 0;	/* limit string length */
-			cnf->channel = 4098;	/* default channel for output */
-		}
-	}			/* state was auto detect */
-	if (cnf->state == CONF_STATE_POF) {	/* pof write active */
-		i = cnf->needed_size - cnf->buf_size;	/* bytes still missing for write */
-		if (i <= 0)
-			return (-EINVAL);	/* size error handling pof */
-
-		if (i < count)
-			count = i;	/* limit requested number of bytes */
-		if (copy_from_user(cnf->pof_buffer + cnf->buf_size, buf, count))
-			return (-EFAULT);	/* error while copying */
-		cnf->buf_size += count;
-
-		if (cnf->needed_size == cnf->buf_size) {
-			cnf->needed_size = pof_write_buffer(cnf->card, cnf->buf_size);	/* write data */
-			if (cnf->needed_size <= 0) {
-				cnf->card->state = CARD_STATE_BOOTERR;	/* show boot error */
-				return (cnf->needed_size);	/* an error occurred */
-			}
-			cnf->buf_size = 0;	/* buffer is empty again */
-		}
-	}
-	/* pof write active */
-	else {			/* conf write active */
-
-		if (cnf->card->state != CARD_STATE_RUN) {
-			if (cnf->card->debug_flags & LOG_CNF_MISC)
-				hysdn_addlog(cnf->card, "cnf write denied -> not booted");
-			return (-ERR_NOT_BOOTED);
-		}
-		i = (CONF_LINE_LEN - 1) - cnf->buf_size;	/* bytes available in buffer */
-		if (i > 0) {
-			/* copy remaining bytes into buffer */
-
-			if (count > i)
-				count = i;	/* limit transfer */
-			if (copy_from_user(cnf->conf_line + cnf->buf_size, buf, count))
-				return (-EFAULT);	/* error while copying */
-
-			i = count;	/* number of chars in buffer */
-			cp = cnf->conf_line + cnf->buf_size;
-			while (i) {
-				/* search for end of line */
-				if ((*cp < ' ') && (*cp != 9))
-					break;	/* end of line found */
-				cp++;
-				i--;
-			}	/* search for end of line */
-
-			if (i) {
-				/* delimiter found */
-				*cp++ = 0;	/* string termination */
-				count -= (i - 1);	/* subtract remaining bytes from count */
-				while ((i) && (*cp < ' ') && (*cp != 9)) {
-					i--;	/* discard next char */
-					count++;	/* mark as read */
-					cp++;	/* next char */
-				}
-				cnf->buf_size = 0;	/* buffer is empty after transfer */
-				if ((i = process_line(cnf)) < 0)	/* handle the line */
-					count = i;	/* return the error */
-			}
-			/* delimiter found */
-			else {
-				cnf->buf_size += count;		/* add chars to string */
-				if (cnf->buf_size >= CONF_LINE_LEN - 1) {
-					if (cnf->card->debug_flags & LOG_CNF_MISC)
-						hysdn_addlog(cnf->card, "cnf line too long %d chars pos %d", cnf->buf_size, count);
-					return (-ERR_CONF_LONG);
-				}
-			}	/* not delimited */
-
-		}
-		/* copy remaining bytes into buffer */
-		else {
-			if (cnf->card->debug_flags & LOG_CNF_MISC)
-				hysdn_addlog(cnf->card, "cnf line too long");
-			return (-ERR_CONF_LONG);
-		}
-	}			/* conf write active */
-
-	return (count);
-}				/* hysdn_conf_write */
-
-/*******************************************/
-/* read conf file -> output card info data */
-/*******************************************/
-static ssize_t
-hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
-	char *cp;
-
-	if (!(file->f_mode & FMODE_READ))
-		return -EPERM;	/* no permission to read */
-
-	if (!(cp = file->private_data))
-		return -EFAULT;	/* should never happen */
-
-	return simple_read_from_buffer(buf, count, off, cp, strlen(cp));
-}				/* hysdn_conf_read */
-
-/******************/
-/* open conf file */
-/******************/
-static int
-hysdn_conf_open(struct inode *ino, struct file *filep)
-{
-	hysdn_card *card;
-	struct conf_writedata *cnf;
-	char *cp, *tmp;
-
-	/* now search the addressed card */
-	mutex_lock(&hysdn_conf_mutex);
-	card = PDE_DATA(ino);
-	if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
-		hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x",
-			     filep->f_cred->fsuid, filep->f_cred->fsgid,
-			     filep->f_mode);
-
-	if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
-		/* write only access -> write boot file or conf line */
-
-		if (!(cnf = kmalloc(sizeof(struct conf_writedata), GFP_KERNEL))) {
-			mutex_unlock(&hysdn_conf_mutex);
-			return (-EFAULT);
-		}
-		cnf->card = card;
-		cnf->buf_size = 0;	/* nothing buffered */
-		cnf->state = CONF_STATE_DETECT;		/* start auto detect */
-		filep->private_data = cnf;
-
-	} else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
-		/* read access -> output card info data */
-
-		if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) {
-			mutex_unlock(&hysdn_conf_mutex);
-			return (-EFAULT);	/* out of memory */
-		}
-		filep->private_data = tmp;	/* start of string */
-
-		/* first output a headline */
-		sprintf(tmp, "id bus slot type irq iobase dp-mem     b-chans fax-chans state device");
-		cp = tmp;	/* start of string */
-		while (*cp)
-			cp++;
-		while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
-			*cp++ = ' ';
-		*cp++ = '\n';
-
-		/* and now the data */
-		sprintf(cp, "%d  %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d   %s",
-			card->myid,
-			card->bus,
-			PCI_SLOT(card->devfn),
-			card->brdtype,
-			card->irq,
-			card->iobase,
-			card->membase,
-			card->bchans,
-			card->faxchans,
-			card->state,
-			hysdn_net_getname(card));
-		while (*cp)
-			cp++;
-		while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
-			*cp++ = ' ';
-		*cp++ = '\n';
-		*cp = 0;	/* end of string */
-	} else {		/* simultaneous read/write access forbidden ! */
-		mutex_unlock(&hysdn_conf_mutex);
-		return (-EPERM);	/* no permission this time */
-	}
-	mutex_unlock(&hysdn_conf_mutex);
-	return nonseekable_open(ino, filep);
-}				/* hysdn_conf_open */
-
-/***************************/
-/* close a config file.    */
-/***************************/
-static int
-hysdn_conf_close(struct inode *ino, struct file *filep)
-{
-	hysdn_card *card;
-	struct conf_writedata *cnf;
-	int retval = 0;
-
-	mutex_lock(&hysdn_conf_mutex);
-	card = PDE_DATA(ino);
-	if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
-		hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x",
-			     filep->f_cred->fsuid, filep->f_cred->fsgid,
-			     filep->f_mode);
-
-	if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
-		/* write only access -> write boot file or conf line */
-		if (filep->private_data) {
-			cnf = filep->private_data;
-
-			if (cnf->state == CONF_STATE_POF)
-				retval = pof_write_close(cnf->card);	/* close the pof write */
-			kfree(filep->private_data);	/* free allocated memory for buffer */
-
-		}		/* handle write private data */
-	} else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
-		/* read access -> output card info data */
-
-		kfree(filep->private_data);	/* release memory */
-	}
-	mutex_unlock(&hysdn_conf_mutex);
-	return (retval);
-}				/* hysdn_conf_close */
-
-/******************************************************/
-/* table for conf filesystem functions defined above. */
-/******************************************************/
-static const struct file_operations conf_fops =
-{
-	.owner		= THIS_MODULE,
-	.llseek         = no_llseek,
-	.read           = hysdn_conf_read,
-	.write          = hysdn_conf_write,
-	.open           = hysdn_conf_open,
-	.release        = hysdn_conf_close,
-};
-
-/*****************************/
-/* hysdn subdir in /proc/net */
-/*****************************/
-struct proc_dir_entry *hysdn_proc_entry = NULL;
-
-/*******************************************************************************/
-/* hysdn_procconf_init is called when the module is loaded and after the cards */
-/* have been detected. The needed proc dir and card config files are created.  */
-/* The log init is called at last.                                             */
-/*******************************************************************************/
-int
-hysdn_procconf_init(void)
-{
-	hysdn_card *card;
-	unsigned char conf_name[20];
-
-	hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, init_net.proc_net);
-	if (!hysdn_proc_entry) {
-		printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
-		return (-1);
-	}
-	card = card_root;	/* point to first card */
-	while (card) {
-
-		sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
-		if ((card->procconf = (void *) proc_create_data(conf_name,
-							   S_IFREG | S_IRUGO | S_IWUSR,
-							   hysdn_proc_entry,
-							   &conf_fops,
-							   card)) != NULL) {
-			hysdn_proclog_init(card);	/* init the log file entry */
-		}
-		card = card->next;	/* next entry */
-	}
-
-	printk(KERN_NOTICE "HYSDN: procfs initialised\n");
-	return 0;
-}				/* hysdn_procconf_init */
-
-/*************************************************************************************/
-/* hysdn_procconf_release is called when the module is unloaded and before the cards */
-/* resources are released. The module counter is assumed to be 0 !                   */
-/*************************************************************************************/
-void
-hysdn_procconf_release(void)
-{
-	hysdn_card *card;
-	unsigned char conf_name[20];
-
-	card = card_root;	/* start with first card */
-	while (card) {
-
-		sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
-		if (card->procconf)
-			remove_proc_entry(conf_name, hysdn_proc_entry);
-
-		hysdn_proclog_release(card);	/* init the log file entry */
-
-		card = card->next;	/* point to next card */
-	}
-
-	remove_proc_entry(PROC_SUBDIR_NAME, init_net.proc_net);
-}
diff --git a/drivers/staging/isdn/hysdn/hysdn_proclog.c b/drivers/staging/isdn/hysdn/hysdn_proclog.c
deleted file mode 100644
index 6e898b9..0000000
--- a/drivers/staging/isdn/hysdn/hysdn_proclog.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/* $Id: hysdn_proclog.c,v 1.9.6.3 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, /proc/net filesystem log functions.
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-
-#include "hysdn_defs.h"
-
-/* the proc subdir for the interface is defined in the procconf module */
-extern struct proc_dir_entry *hysdn_proc_entry;
-
-static DEFINE_MUTEX(hysdn_log_mutex);
-static void put_log_buffer(hysdn_card *card, char *cp);
-
-/*************************************************/
-/* structure keeping ascii log for device output */
-/*************************************************/
-struct log_data {
-	struct log_data *next;
-	unsigned long usage_cnt;/* number of files still to work */
-	void *proc_ctrl;	/* pointer to own control procdata structure */
-	char log_start[2];	/* log string start (final len aligned by size) */
-};
-
-/**********************************************/
-/* structure holding proc entrys for one card */
-/**********************************************/
-struct procdata {
-	struct proc_dir_entry *log;	/* log entry */
-	char log_name[15];	/* log filename */
-	struct log_data *log_head, *log_tail;	/* head and tail for queue */
-	int if_used;		/* open count for interface */
-	unsigned char logtmp[LOG_MAX_LINELEN];
-	wait_queue_head_t rd_queue;
-};
-
-
-/**********************************************/
-/* log function for cards error log interface */
-/**********************************************/
-void
-hysdn_card_errlog(hysdn_card *card, tErrLogEntry *logp, int maxsize)
-{
-	char buf[ERRLOG_TEXT_SIZE + 40];
-
-	sprintf(buf, "LOG 0x%08lX 0x%08lX : %s\n", logp->ulErrType, logp->ulErrSubtype, logp->ucText);
-	put_log_buffer(card, buf);	/* output the string */
-}				/* hysdn_card_errlog */
-
-/***************************************************/
-/* Log function using format specifiers for output */
-/***************************************************/
-void
-hysdn_addlog(hysdn_card *card, char *fmt, ...)
-{
-	struct procdata *pd = card->proclog;
-	char *cp;
-	va_list args;
-
-	if (!pd)
-		return;		/* log structure non existent */
-
-	cp = pd->logtmp;
-	cp += sprintf(cp, "HYSDN: card %d ", card->myid);
-
-	va_start(args, fmt);
-	cp += vsprintf(cp, fmt, args);
-	va_end(args);
-	*cp++ = '\n';
-	*cp = 0;
-
-	if (card->debug_flags & DEB_OUT_SYSLOG)
-		printk(KERN_INFO "%s", pd->logtmp);
-	else
-		put_log_buffer(card, pd->logtmp);
-
-}				/* hysdn_addlog */
-
-/********************************************/
-/* put an log buffer into the log queue.    */
-/* This buffer will be kept until all files */
-/* opened for read got the contents.        */
-/* Flushes buffers not longer in use.       */
-/********************************************/
-static void
-put_log_buffer(hysdn_card *card, char *cp)
-{
-	struct log_data *ib;
-	struct procdata *pd = card->proclog;
-	unsigned long flags;
-
-	if (!pd)
-		return;
-	if (!cp)
-		return;
-	if (!*cp)
-		return;
-	if (pd->if_used <= 0)
-		return;		/* no open file for read */
-
-	if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC)))
-		return;	/* no memory */
-	strcpy(ib->log_start, cp);	/* set output string */
-	ib->next = NULL;
-	ib->proc_ctrl = pd;	/* point to own control structure */
-	spin_lock_irqsave(&card->hysdn_lock, flags);
-	ib->usage_cnt = pd->if_used;
-	if (!pd->log_head)
-		pd->log_head = ib;	/* new head */
-	else
-		pd->log_tail->next = ib;	/* follows existing messages */
-	pd->log_tail = ib;	/* new tail */
-
-	/* delete old entrys */
-	while (pd->log_head->next) {
-		if ((pd->log_head->usage_cnt <= 0) &&
-		    (pd->log_head->next->usage_cnt <= 0)) {
-			ib = pd->log_head;
-			pd->log_head = pd->log_head->next;
-			kfree(ib);
-		} else {
-			break;
-		}
-	}		/* pd->log_head->next */
-
-	spin_unlock_irqrestore(&card->hysdn_lock, flags);
-
-	wake_up_interruptible(&(pd->rd_queue));		/* announce new entry */
-}				/* put_log_buffer */
-
-
-/******************************/
-/* file operations and tables */
-/******************************/
-
-/****************************************/
-/* write log file -> set log level bits */
-/****************************************/
-static ssize_t
-hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
-	int rc;
-	hysdn_card *card = file->private_data;
-
-	rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags);
-	if (rc < 0)
-		return rc;
-	hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
-	return (count);
-}				/* hysdn_log_write */
-
-/******************/
-/* read log file */
-/******************/
-static ssize_t
-hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
-	struct log_data *inf;
-	int len;
-	hysdn_card *card = PDE_DATA(file_inode(file));
-
-	if (!(inf = *((struct log_data **) file->private_data))) {
-		struct procdata *pd = card->proclog;
-		if (file->f_flags & O_NONBLOCK)
-			return (-EAGAIN);
-
-		wait_event_interruptible(pd->rd_queue, (inf =
-				*((struct log_data **) file->private_data)));
-	}
-	if (!inf)
-		return (0);
-
-	inf->usage_cnt--;	/* new usage count */
-	file->private_data = &inf->next;	/* next structure */
-	if ((len = strlen(inf->log_start)) <= count) {
-		if (copy_to_user(buf, inf->log_start, len))
-			return -EFAULT;
-		*off += len;
-		return (len);
-	}
-	return (0);
-}				/* hysdn_log_read */
-
-/******************/
-/* open log file */
-/******************/
-static int
-hysdn_log_open(struct inode *ino, struct file *filep)
-{
-	hysdn_card *card = PDE_DATA(ino);
-
-	mutex_lock(&hysdn_log_mutex);
-	if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
-		/* write only access -> write log level only */
-		filep->private_data = card;	/* remember our own card */
-	} else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
-		struct procdata *pd = card->proclog;
-		unsigned long flags;
-
-		/* read access -> log/debug read */
-		spin_lock_irqsave(&card->hysdn_lock, flags);
-		pd->if_used++;
-		if (pd->log_head)
-			filep->private_data = &pd->log_tail->next;
-		else
-			filep->private_data = &pd->log_head;
-		spin_unlock_irqrestore(&card->hysdn_lock, flags);
-	} else {		/* simultaneous read/write access forbidden ! */
-		mutex_unlock(&hysdn_log_mutex);
-		return (-EPERM);	/* no permission this time */
-	}
-	mutex_unlock(&hysdn_log_mutex);
-	return nonseekable_open(ino, filep);
-}				/* hysdn_log_open */
-
-/*******************************************************************************/
-/* close a cardlog file. If the file has been opened for exclusive write it is */
-/* assumed as pof data input and the pof loader is noticed about.              */
-/* Otherwise file is handled as log output. In this case the interface usage   */
-/* count is decremented and all buffers are noticed of closing. If this file   */
-/* was the last one to be closed, all buffers are freed.                       */
-/*******************************************************************************/
-static int
-hysdn_log_close(struct inode *ino, struct file *filep)
-{
-	struct log_data *inf;
-	struct procdata *pd;
-	hysdn_card *card;
-	int retval = 0;
-
-	mutex_lock(&hysdn_log_mutex);
-	if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
-		/* write only access -> write debug level written */
-		retval = 0;	/* success */
-	} else {
-		/* read access -> log/debug read, mark one further file as closed */
-
-		inf = *((struct log_data **) filep->private_data);	/* get first log entry */
-		if (inf)
-			pd = (struct procdata *) inf->proc_ctrl;	/* still entries there */
-		else {
-			/* no info available -> search card */
-			card = PDE_DATA(file_inode(filep));
-			pd = card->proclog;	/* pointer to procfs log */
-		}
-		if (pd)
-			pd->if_used--;	/* decrement interface usage count by one */
-
-		while (inf) {
-			inf->usage_cnt--;	/* decrement usage count for buffers */
-			inf = inf->next;
-		}
-
-		if (pd)
-			if (pd->if_used <= 0)	/* delete buffers if last file closed */
-				while (pd->log_head) {
-					inf = pd->log_head;
-					pd->log_head = pd->log_head->next;
-					kfree(inf);
-				}
-	}			/* read access */
-	mutex_unlock(&hysdn_log_mutex);
-
-	return (retval);
-}				/* hysdn_log_close */
-
-/*************************************************/
-/* select/poll routine to be able using select() */
-/*************************************************/
-static __poll_t
-hysdn_log_poll(struct file *file, poll_table *wait)
-{
-	__poll_t mask = 0;
-	hysdn_card *card = PDE_DATA(file_inode(file));
-	struct procdata *pd = card->proclog;
-
-	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE)
-		return (mask);	/* no polling for write supported */
-
-	poll_wait(file, &(pd->rd_queue), wait);
-
-	if (*((struct log_data **) file->private_data))
-		mask |= EPOLLIN | EPOLLRDNORM;
-
-	return mask;
-}				/* hysdn_log_poll */
-
-/**************************************************/
-/* table for log filesystem functions defined above. */
-/**************************************************/
-static const struct file_operations log_fops =
-{
-	.owner		= THIS_MODULE,
-	.llseek         = no_llseek,
-	.read           = hysdn_log_read,
-	.write          = hysdn_log_write,
-	.poll           = hysdn_log_poll,
-	.open           = hysdn_log_open,
-	.release        = hysdn_log_close,
-};
-
-
-/***********************************************************************************/
-/* hysdn_proclog_init is called when the module is loaded after creating the cards */
-/* conf files.                                                                     */
-/***********************************************************************************/
-int
-hysdn_proclog_init(hysdn_card *card)
-{
-	struct procdata *pd;
-
-	/* create a cardlog proc entry */
-
-	if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
-		sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
-		pd->log = proc_create_data(pd->log_name,
-				      S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
-				      &log_fops, card);
-
-		init_waitqueue_head(&(pd->rd_queue));
-
-		card->proclog = (void *) pd;	/* remember procfs structure */
-	}
-	return (0);
-}				/* hysdn_proclog_init */
-
-/************************************************************************************/
-/* hysdn_proclog_release is called when the module is unloaded and before the cards */
-/* conf file is released                                                            */
-/* The module counter is assumed to be 0 !                                          */
-/************************************************************************************/
-void
-hysdn_proclog_release(hysdn_card *card)
-{
-	struct procdata *pd;
-
-	if ((pd = (struct procdata *) card->proclog) != NULL) {
-		if (pd->log)
-			remove_proc_entry(pd->log_name, hysdn_proc_entry);
-		kfree(pd);	/* release memory */
-		card->proclog = NULL;
-	}
-}				/* hysdn_proclog_release */
diff --git a/drivers/staging/isdn/hysdn/hysdn_sched.c b/drivers/staging/isdn/hysdn/hysdn_sched.c
deleted file mode 100644
index 31d7c14..0000000
--- a/drivers/staging/isdn/hysdn/hysdn_sched.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/* $Id: hysdn_sched.c,v 1.5.6.4 2001/11/06 21:58:19 kai Exp $
- *
- * Linux driver for HYSDN cards
- * scheduler routines for handling exchange card <-> pc.
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "hysdn_defs.h"
-
-/*****************************************************************************/
-/* hysdn_sched_rx is called from the cards handler to announce new data is   */
-/* available from the card. The routine has to handle the data and return    */
-/* with a nonzero code if the data could be worked (or even thrown away), if */
-/* no room to buffer the data is available a zero return tells the card      */
-/* to keep the data until later.                                             */
-/*****************************************************************************/
-int
-hysdn_sched_rx(hysdn_card *card, unsigned char *buf, unsigned short len,
-	       unsigned short chan)
-{
-
-	switch (chan) {
-	case CHAN_NDIS_DATA:
-		if (hynet_enable & (1 << card->myid)) {
-			/* give packet to network handler */
-			hysdn_rx_netpkt(card, buf, len);
-		}
-		break;
-
-	case CHAN_ERRLOG:
-		hysdn_card_errlog(card, (tErrLogEntry *) buf, len);
-		if (card->err_log_state == ERRLOG_STATE_ON)
-			card->err_log_state = ERRLOG_STATE_START;	/* start new fetch */
-		break;
-#ifdef CONFIG_HYSDN_CAPI
-	case CHAN_CAPI:
-/* give packet to CAPI handler */
-		if (hycapi_enable & (1 << card->myid)) {
-			hycapi_rx_capipkt(card, buf, len);
-		}
-		break;
-#endif /* CONFIG_HYSDN_CAPI */
-	default:
-		printk(KERN_INFO "irq message channel %d len %d unhandled \n", chan, len);
-		break;
-
-	}			/* switch rx channel */
-
-	return (1);		/* always handled */
-}				/* hysdn_sched_rx */
-
-/*****************************************************************************/
-/* hysdn_sched_tx is called from the cards handler to announce that there is */
-/* room in the tx-buffer to the card and data may be sent if needed.         */
-/* If the routine wants to send data it must fill buf, len and chan with the */
-/* appropriate data and return a nonzero value. With a zero return no new    */
-/* data to send is assumed. maxlen specifies the buffer size available for   */
-/* sending.                                                                  */
-/*****************************************************************************/
-int
-hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
-	       unsigned short volatile *len, unsigned short volatile *chan,
-	       unsigned short maxlen)
-{
-	struct sk_buff *skb;
-
-	if (card->net_tx_busy) {
-		card->net_tx_busy = 0;	/* reset flag */
-		hysdn_tx_netack(card);	/* acknowledge packet send */
-	}			/* a network packet has completely been transferred */
-	/* first of all async requests are handled */
-	if (card->async_busy) {
-		if (card->async_len <= maxlen) {
-			memcpy(buf, card->async_data, card->async_len);
-			*len = card->async_len;
-			*chan = card->async_channel;
-			card->async_busy = 0;	/* reset request */
-			return (1);
-		}
-		card->async_busy = 0;	/* in case of length error */
-	}			/* async request */
-	if ((card->err_log_state == ERRLOG_STATE_START) &&
-	    (maxlen >= ERRLOG_CMD_REQ_SIZE)) {
-		strcpy(buf, ERRLOG_CMD_REQ);	/* copy the command */
-		*len = ERRLOG_CMD_REQ_SIZE;	/* buffer length */
-		*chan = CHAN_ERRLOG;	/* and channel */
-		card->err_log_state = ERRLOG_STATE_ON;	/* new state is on */
-		return (1);	/* tell that data should be send */
-	}			/* error log start and able to send */
-	if ((card->err_log_state == ERRLOG_STATE_STOP) &&
-	    (maxlen >= ERRLOG_CMD_STOP_SIZE)) {
-		strcpy(buf, ERRLOG_CMD_STOP);	/* copy the command */
-		*len = ERRLOG_CMD_STOP_SIZE;	/* buffer length */
-		*chan = CHAN_ERRLOG;	/* and channel */
-		card->err_log_state = ERRLOG_STATE_OFF;		/* new state is off */
-		return (1);	/* tell that data should be send */
-	}			/* error log start and able to send */
-	/* now handle network interface packets */
-	if ((hynet_enable & (1 << card->myid)) &&
-	    (skb = hysdn_tx_netget(card)) != NULL)
-	{
-		if (skb->len <= maxlen) {
-			/* copy the packet to the buffer */
-			skb_copy_from_linear_data(skb, buf, skb->len);
-			*len = skb->len;
-			*chan = CHAN_NDIS_DATA;
-			card->net_tx_busy = 1;	/* we are busy sending network data */
-			return (1);	/* go and send the data */
-		} else
-			hysdn_tx_netack(card);	/* aknowledge packet -> throw away */
-	}			/* send a network packet if available */
-#ifdef CONFIG_HYSDN_CAPI
-	if (((hycapi_enable & (1 << card->myid))) &&
-	    ((skb = hycapi_tx_capiget(card)) != NULL))
-	{
-		if (skb->len <= maxlen) {
-			skb_copy_from_linear_data(skb, buf, skb->len);
-			*len = skb->len;
-			*chan = CHAN_CAPI;
-			hycapi_tx_capiack(card);
-			return (1);	/* go and send the data */
-		}
-	}
-#endif /* CONFIG_HYSDN_CAPI */
-	return (0);		/* nothing to send */
-}				/* hysdn_sched_tx */
-
-
-/*****************************************************************************/
-/* send one config line to the card and return 0 if successful, otherwise a */
-/* negative error code.                                                      */
-/* The function works with timeouts perhaps not giving the greatest speed    */
-/* sending the line, but this should be meaningless because only some lines  */
-/* are to be sent and this happens very seldom.                              */
-/*****************************************************************************/
-int
-hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
-{
-	int cnt = 50;		/* timeout intervalls */
-	unsigned long flags;
-
-	if (card->debug_flags & LOG_SCHED_ASYN)
-		hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1);
-
-	while (card->async_busy) {
-
-		if (card->debug_flags & LOG_SCHED_ASYN)
-			hysdn_addlog(card, "async tx-cfg delayed");
-
-		msleep_interruptible(20);		/* Timeout 20ms */
-		if (!--cnt)
-			return (-ERR_ASYNC_TIME);	/* timed out */
-	}			/* wait for buffer to become free */
-
-	spin_lock_irqsave(&card->hysdn_lock, flags);
-	strcpy(card->async_data, line);
-	card->async_len = strlen(line) + 1;
-	card->async_channel = chan;
-	card->async_busy = 1;	/* request transfer */
-
-	/* now queue the task */
-	schedule_work(&card->irq_queue);
-	spin_unlock_irqrestore(&card->hysdn_lock, flags);
-
-	if (card->debug_flags & LOG_SCHED_ASYN)
-		hysdn_addlog(card, "async tx-cfg data queued");
-
-	cnt++;			/* short delay */
-
-	while (card->async_busy) {
-
-		if (card->debug_flags & LOG_SCHED_ASYN)
-			hysdn_addlog(card, "async tx-cfg waiting for tx-ready");
-
-		msleep_interruptible(20);		/* Timeout 20ms */
-		if (!--cnt)
-			return (-ERR_ASYNC_TIME);	/* timed out */
-	}			/* wait for buffer to become free again */
-
-	if (card->debug_flags & LOG_SCHED_ASYN)
-		hysdn_addlog(card, "async tx-cfg data send");
-
-	return (0);		/* line send correctly */
-}				/* hysdn_tx_cfgline */
diff --git a/drivers/staging/isdn/hysdn/ince1pc.h b/drivers/staging/isdn/hysdn/ince1pc.h
deleted file mode 100644
index cab6836..0000000
--- a/drivers/staging/isdn/hysdn/ince1pc.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Linux driver for HYSDN cards
- * common definitions for both sides of the bus:
- * - conventions both spoolers must know
- * - channel numbers agreed upon
- *
- * Author    M. Steinkopf
- * Copyright 1999 by M. Steinkopf
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef __INCE1PC_H__
-#define __INCE1PC_H__
-
-/*  basic scalar definitions have same meanning,
- *  but their declaration location depends on environment
- */
-
-/*--------------------------------------channel numbers---------------------*/
-#define CHAN_SYSTEM     0x0001      /* system channel (spooler to spooler) */
-#define CHAN_ERRLOG     0x0005      /* error logger */
-#define CHAN_CAPI       0x0064      /* CAPI interface */
-#define CHAN_NDIS_DATA  0x1001      /* NDIS data transfer */
-
-/*--------------------------------------POF ready msg-----------------------*/
-/* NOTE: after booting POF sends system ready message to PC: */
-#define RDY_MAGIC       0x52535953UL    /* 'SYSR' reversed */
-#define RDY_MAGIC_SIZE  4               /* size in bytes */
-
-#define MAX_N_TOK_BYTES 255
-
-#define MIN_RDY_MSG_SIZE    RDY_MAGIC_SIZE
-#define MAX_RDY_MSG_SIZE    (RDY_MAGIC_SIZE + MAX_N_TOK_BYTES)
-
-#define SYSR_TOK_END            0
-#define SYSR_TOK_B_CHAN         1   /* nr. of B-Channels;   DataLen=1; def: 2 */
-#define SYSR_TOK_FAX_CHAN       2   /* nr. of FAX Channels; DataLen=1; def: 0 */
-#define SYSR_TOK_MAC_ADDR       3   /* MAC-Address; DataLen=6; def: auto */
-#define SYSR_TOK_ESC            255 /* undefined data size yet */
-/* default values, if not corrected by token: */
-#define SYSR_TOK_B_CHAN_DEF     2   /* assume 2 B-Channels */
-#define SYSR_TOK_FAX_CHAN_DEF   1   /* assume 1 FAX Channel */
-
-/*  syntax of new SYSR token stream:
- *  channel: CHAN_SYSTEM
- *  msgsize: MIN_RDY_MSG_SIZE <= x <= MAX_RDY_MSG_SIZE
- *           RDY_MAGIC_SIZE   <= x <= (RDY_MAGIC_SIZE+MAX_N_TOK_BYTES)
- *  msg    : 0 1 2 3 {4 5 6 ..}
- *           S Y S R  MAX_N_TOK_BYTES bytes of TokenStream
- *
- *  TokenStream     :=   empty
- *                     | {NonEndTokenChunk} EndToken RotlCRC
- *  NonEndTokenChunk:= NonEndTokenId DataLen [Data]
- *  NonEndTokenId   := 0x01 .. 0xFE                 1 BYTE
- *  DataLen         := 0x00 .. 0xFF                 1 BYTE
- *  Data            := DataLen bytes
- *  EndToken        := 0x00
- *  RotlCRC         := special 1 byte CRC over all NonEndTokenChunk bytes
- *                     s. RotlCRC algorithm
- *
- *  RotlCRC algorithm:
- *      ucSum= 0                        1 unsigned char
- *      for all NonEndTokenChunk bytes:
- *          ROTL(ucSum,1)               rotate left by 1
- *          ucSum += Char;              add current byte with swap around
- *      RotlCRC= ~ucSum;                invert all bits for result
- *
- *  note:
- *  - for 16-bit FIFO add padding 0 byte to achieve even token data bytes!
- */
-
-/*--------------------------------------error logger------------------------*/
-/* note: pof needs final 0 ! */
-#define ERRLOG_CMD_REQ          "ERRLOG ON"
-#define ERRLOG_CMD_REQ_SIZE     10              /* with final 0 byte ! */
-#define ERRLOG_CMD_STOP         "ERRLOG OFF"
-#define ERRLOG_CMD_STOP_SIZE    11              /* with final 0 byte ! */
-
-#define ERRLOG_ENTRY_SIZE       64      /* sizeof(tErrLogEntry) */
-					/* remaining text size = 55 */
-#define ERRLOG_TEXT_SIZE    (ERRLOG_ENTRY_SIZE - 2 * 4 - 1)
-
-typedef struct ErrLogEntry_tag {
-
-	/*00 */ unsigned long ulErrType;
-
-	/*04 */ unsigned long ulErrSubtype;
-
-	/*08 */ unsigned char ucTextSize;
-
-	/*09 */ unsigned char ucText[ERRLOG_TEXT_SIZE];
-	/* ASCIIZ of len ucTextSize-1 */
-
-/*40 */
-} tErrLogEntry;
-
-
-#if defined(__TURBOC__)
-#if sizeof(tErrLogEntry) != ERRLOG_ENTRY_SIZE
-#error size of tErrLogEntry != ERRLOG_ENTRY_SIZE
-#endif				/*  */
-#endif				/*  */
-
-/*--------------------------------------DPRAM boot spooler------------------*/
-/*  this is the struture used between pc and
- *  hyperstone to exchange boot data
- */
-#define DPRAM_SPOOLER_DATA_SIZE 0x20
-typedef struct DpramBootSpooler_tag {
-
-	/*00 */ unsigned char Len;
-
-	/*01 */ volatile unsigned char RdPtr;
-
-	/*02 */ unsigned char WrPtr;
-
-	/*03 */ unsigned char Data[DPRAM_SPOOLER_DATA_SIZE];
-
-/*23 */
-} tDpramBootSpooler;
-
-
-#define DPRAM_SPOOLER_MIN_SIZE  5       /* Len+RdPtr+Wrptr+2*data */
-#define DPRAM_SPOOLER_DEF_SIZE  0x23    /* current default size   */
-
-/*--------------------------------------HYCARD/ERGO DPRAM SoftUart----------*/
-/* at DPRAM offset 0x1C00: */
-#define SIZE_RSV_SOFT_UART  0x1B0   /* 432 bytes reserved for SoftUart */
-
-
-#endif	/* __INCE1PC_H__ */
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index 592099a..25bb5c9 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -32,7 +32,7 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Matt.Sickler@Daktronics.com");
 
-struct i2c_device {
+struct kpc_i2c {
 	unsigned long           smba;
 	struct i2c_adapter      adapter;
 	unsigned int            features;
@@ -131,7 +131,7 @@ struct i2c_device {
 /* Make sure the SMBus host is ready to start transmitting.
  * Return 0 if it is, -EBUSY if it is not.
  */
-static int i801_check_pre(struct i2c_device *priv)
+static int i801_check_pre(struct kpc_i2c *priv)
 {
 	int status;
 
@@ -156,7 +156,7 @@ static int i801_check_pre(struct i2c_device *priv)
 }
 
 /* Convert the status register to an error code, and clear it. */
-static int i801_check_post(struct i2c_device *priv, int status, int timeout)
+static int i801_check_post(struct kpc_i2c *priv, int status, int timeout)
 {
 	int result = 0;
 
@@ -206,7 +206,7 @@ static int i801_check_post(struct i2c_device *priv, int status, int timeout)
 	return result;
 }
 
-static int i801_transaction(struct i2c_device *priv, int xact)
+static int i801_transaction(struct kpc_i2c *priv, int xact)
 {
 	int status;
 	int result;
@@ -235,7 +235,7 @@ static int i801_transaction(struct i2c_device *priv, int xact)
 }
 
 /* wait for INTR bit as advised by Intel */
-static void i801_wait_hwpec(struct i2c_device *priv)
+static void i801_wait_hwpec(struct kpc_i2c *priv)
 {
 	int timeout = 0;
 	int status;
@@ -251,7 +251,7 @@ static void i801_wait_hwpec(struct i2c_device *priv)
 	outb_p(status, SMBHSTSTS(priv));
 }
 
-static int i801_block_transaction_by_block(struct i2c_device *priv,
+static int i801_block_transaction_by_block(struct kpc_i2c *priv,
 					   union i2c_smbus_data *data,
 					   char read_write, int hwpec)
 {
@@ -285,7 +285,7 @@ static int i801_block_transaction_by_block(struct i2c_device *priv,
 	return 0;
 }
 
-static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
+static int i801_block_transaction_byte_by_byte(struct kpc_i2c *priv,
 					       union i2c_smbus_data *data,
 					       char read_write, int command,
 					       int hwpec)
@@ -367,7 +367,7 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
 	return 0;
 }
 
-static int i801_set_block_buffer_mode(struct i2c_device *priv)
+static int i801_set_block_buffer_mode(struct kpc_i2c *priv)
 {
 	outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
 	if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
@@ -376,7 +376,7 @@ static int i801_set_block_buffer_mode(struct i2c_device *priv)
 }
 
 /* Block transaction function */
-static int i801_block_transaction(struct i2c_device *priv,
+static int i801_block_transaction(struct kpc_i2c *priv,
 				  union i2c_smbus_data *data, char read_write,
 				  int command, int hwpec)
 {
@@ -440,7 +440,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
 	int hwpec;
 	int block = 0;
 	int ret, xact = 0;
-	struct i2c_device *priv = i2c_get_adapdata(adap);
+	struct kpc_i2c *priv = i2c_get_adapdata(adap);
 
 	hwpec = (priv->features & FEATURE_SMBUS_PEC) &&
 		(flags & I2C_CLIENT_PEC) &&
@@ -572,9 +572,13 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
 	return 0;
 }
 
+#define enable_flag(x) (x)
+#define disable_flag(x) 0
+#define enable_flag_if(x, cond) ((cond) ? (x) : 0)
+
 static u32 i801_func(struct i2c_adapter *adapter)
 {
-	struct i2c_device *priv = i2c_get_adapdata(adapter);
+	struct kpc_i2c *priv = i2c_get_adapdata(adapter);
 
 	/* original settings
 	 * u32 f = I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
@@ -588,42 +592,42 @@ static u32 i801_func(struct i2c_adapter *adapter)
 	// http://lxr.free-electrons.com/source/include/uapi/linux/i2c.h#L85
 
 	u32 f =
-		I2C_FUNC_I2C                     | /* 0x00000001(I enabled this
-						    * one)
-						    */
-		!I2C_FUNC_10BIT_ADDR             |		/* 0x00000002 */
-		!I2C_FUNC_PROTOCOL_MANGLING      |		/* 0x00000004 */
-		((priv->features & FEATURE_SMBUS_PEC) ?
-			I2C_FUNC_SMBUS_PEC : 0)  |		/* 0x00000008 */
-		!I2C_FUNC_SMBUS_BLOCK_PROC_CALL  |		/* 0x00008000 */
-		I2C_FUNC_SMBUS_QUICK             |		/* 0x00010000 */
-		!I2C_FUNC_SMBUS_READ_BYTE	 |		/* 0x00020000 */
-		!I2C_FUNC_SMBUS_WRITE_BYTE       |		/* 0x00040000 */
-		!I2C_FUNC_SMBUS_READ_BYTE_DATA   |		/* 0x00080000 */
-		!I2C_FUNC_SMBUS_WRITE_BYTE_DATA  |		/* 0x00100000 */
-		!I2C_FUNC_SMBUS_READ_WORD_DATA   |		/* 0x00200000 */
-		!I2C_FUNC_SMBUS_WRITE_WORD_DATA  |		/* 0x00400000 */
-		!I2C_FUNC_SMBUS_PROC_CALL        |		/* 0x00800000 */
-		!I2C_FUNC_SMBUS_READ_BLOCK_DATA  |		/* 0x01000000 */
-		!I2C_FUNC_SMBUS_WRITE_BLOCK_DATA |		/* 0x02000000 */
-		((priv->features & FEATURE_I2C_BLOCK_READ) ?
-			I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) |	/* 0x04000000 */
-		I2C_FUNC_SMBUS_WRITE_I2C_BLOCK   |		/* 0x08000000 */
+		enable_flag(I2C_FUNC_I2C) | /* 0x00000001(I enabled this one) */
+		disable_flag(I2C_FUNC_10BIT_ADDR)             | /* 0x00000002 */
+		disable_flag(I2C_FUNC_PROTOCOL_MANGLING)      | /* 0x00000004 */
+		enable_flag_if(I2C_FUNC_SMBUS_PEC,
+			       priv->features & FEATURE_SMBUS_PEC) |
+								/* 0x00000008 */
+		disable_flag(I2C_FUNC_SMBUS_BLOCK_PROC_CALL)  | /* 0x00008000 */
+		enable_flag(I2C_FUNC_SMBUS_QUICK)             | /* 0x00010000 */
+		disable_flag(I2C_FUNC_SMBUS_READ_BYTE)	      |	/* 0x00020000 */
+		disable_flag(I2C_FUNC_SMBUS_WRITE_BYTE)       |	/* 0x00040000 */
+		disable_flag(I2C_FUNC_SMBUS_READ_BYTE_DATA)   |	/* 0x00080000 */
+		disable_flag(I2C_FUNC_SMBUS_WRITE_BYTE_DATA)  |	/* 0x00100000 */
+		disable_flag(I2C_FUNC_SMBUS_READ_WORD_DATA)   |	/* 0x00200000 */
+		disable_flag(I2C_FUNC_SMBUS_WRITE_WORD_DATA)  |	/* 0x00400000 */
+		disable_flag(I2C_FUNC_SMBUS_PROC_CALL)        |	/* 0x00800000 */
+		disable_flag(I2C_FUNC_SMBUS_READ_BLOCK_DATA)  |	/* 0x01000000 */
+		disable_flag(I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) |	/* 0x02000000 */
+		enable_flag_if(I2C_FUNC_SMBUS_READ_I2C_BLOCK,
+			       priv->features & FEATURE_I2C_BLOCK_READ) |
+								/* 0x04000000 */
+		enable_flag(I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)   |	/* 0x08000000 */
 
-		I2C_FUNC_SMBUS_BYTE              | /* _READ_BYTE  _WRITE_BYTE */
-		I2C_FUNC_SMBUS_BYTE_DATA         | /* _READ_BYTE_DATA
-						    * _WRITE_BYTE_DATA
-						    */
-		I2C_FUNC_SMBUS_WORD_DATA         | /* _READ_WORD_DATA
-						    * _WRITE_WORD_DATA
-						    */
-		I2C_FUNC_SMBUS_BLOCK_DATA        | /* _READ_BLOCK_DATA
-						    * _WRITE_BLOCK_DATA
-						    */
-		!I2C_FUNC_SMBUS_I2C_BLOCK        | /* _READ_I2C_BLOCK
-						    * _WRITE_I2C_BLOCK
-						    */
-		!I2C_FUNC_SMBUS_EMUL;              /* _QUICK  _BYTE
+		enable_flag(I2C_FUNC_SMBUS_BYTE) | /* _READ_BYTE  _WRITE_BYTE */
+		enable_flag(I2C_FUNC_SMBUS_BYTE_DATA)  | /* _READ_BYTE_DATA
+							  * _WRITE_BYTE_DATA
+							  */
+		enable_flag(I2C_FUNC_SMBUS_WORD_DATA)  | /* _READ_WORD_DATA
+							  * _WRITE_WORD_DATA
+							  */
+		enable_flag(I2C_FUNC_SMBUS_BLOCK_DATA) | /* _READ_BLOCK_DATA
+							  * _WRITE_BLOCK_DATA
+							  */
+		disable_flag(I2C_FUNC_SMBUS_I2C_BLOCK) | /* _READ_I2C_BLOCK
+							  * _WRITE_I2C_BLOCK
+							  */
+		disable_flag(I2C_FUNC_SMBUS_EMUL); /* _QUICK  _BYTE
 						    * _BYTE_DATA  _WORD_DATA
 						    * _PROC_CALL
 						    * _WRITE_BLOCK_DATA
@@ -632,6 +636,10 @@ static u32 i801_func(struct i2c_adapter *adapter)
 	return f;
 }
 
+#undef enable_flag
+#undef disable_flag
+#undef enable_flag_if
+
 static const struct i2c_algorithm smbus_algorithm = {
 	.smbus_xfer     = i801_access,
 	.functionality  = i801_func,
@@ -640,10 +648,10 @@ static const struct i2c_algorithm smbus_algorithm = {
 /********************************
  *** Part 2 - Driver Handlers ***
  ********************************/
-static int pi2c_probe(struct platform_device *pldev)
+static int kpc_i2c_probe(struct platform_device *pldev)
 {
 	int err;
-	struct i2c_device *priv;
+	struct kpc_i2c *priv;
 	struct resource *res;
 
 	priv = devm_kzalloc(&pldev->dev, sizeof(*priv), GFP_KERNEL);
@@ -692,11 +700,11 @@ static int pi2c_probe(struct platform_device *pldev)
 	return 0;
 }
 
-static int pi2c_remove(struct platform_device *pldev)
+static int kpc_i2c_remove(struct platform_device *pldev)
 {
-	struct i2c_device *lddev;
+	struct kpc_i2c *lddev;
 
-	lddev = (struct i2c_device *)platform_get_drvdata(pldev);
+	lddev = (struct kpc_i2c *)platform_get_drvdata(pldev);
 
 	i2c_del_adapter(&lddev->adapter);
 
@@ -710,12 +718,12 @@ static int pi2c_remove(struct platform_device *pldev)
 	return 0;
 }
 
-static struct platform_driver i2c_plat_driver_i = {
-	.probe      = pi2c_probe,
-	.remove     = pi2c_remove,
+static struct platform_driver kpc_i2c_driver = {
+	.probe      = kpc_i2c_probe,
+	.remove     = kpc_i2c_remove,
 	.driver     = {
 		.name   = KP_DRIVER_NAME_I2C,
 	},
 };
 
-module_platform_driver(i2c_plat_driver_i);
+module_platform_driver(kpc_i2c_driver);
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index cb52bd9..4052554 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -49,9 +49,7 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
 	u64 dma_addr;
 	u64 user_ctl;
 
-	BUG_ON(priv == NULL);
 	ldev = priv->ldev;
-	BUG_ON(ldev == NULL);
 
 	acd = kzalloc(sizeof(*acd), GFP_KERNEL);
 	if (!acd) {
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 642adc4..c394abf 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -38,4 +38,8 @@
 
 source "drivers/staging/media/soc_camera/Kconfig"
 
+source "drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig"
+
+source "drivers/staging/media/rkisp1/Kconfig"
+
 endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 2f1711a..ea9fce8 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -8,3 +8,5 @@
 obj-$(CONFIG_VIDEO_HANTRO)	+= hantro/
 obj-$(CONFIG_VIDEO_IPU3_IMGU)	+= ipu3/
 obj-$(CONFIG_SOC_CAMERA)	+= soc_camera/
+obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0)	+= phy-rockchip-dphy-rx0/
+obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1)	+= rkisp1/
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
index 5d6b038..496b30c 100644
--- a/drivers/staging/media/hantro/Makefile
+++ b/drivers/staging/media/hantro/Makefile
@@ -3,6 +3,7 @@
 hantro-vpu-y += \
 		hantro_drv.o \
 		hantro_v4l2.o \
+		hantro_postproc.o \
 		hantro_h1_jpeg_enc.o \
 		hantro_g1_h264_dec.o \
 		hantro_g1_mpeg2_dec.o \
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index deb90ae..b0faa43 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -60,6 +60,8 @@ struct hantro_irq {
  * @num_enc_fmts:		Number of encoder formats.
  * @dec_fmts:			Decoder formats.
  * @num_dec_fmts:		Number of decoder formats.
+ * @postproc_fmts:		Post-processor formats.
+ * @num_postproc_fmts:		Number of post-processor formats.
  * @codec:			Supported codecs
  * @codec_ops:			Codec ops.
  * @init:			Initialize hardware.
@@ -70,6 +72,7 @@ struct hantro_irq {
  * @num_clocks:			number of clocks in the array
  * @reg_names:			array of register range names
  * @num_regs:			number of register range names in the array
+ * @postproc_regs:		&struct hantro_postproc_regs pointer
  */
 struct hantro_variant {
 	unsigned int enc_offset;
@@ -78,6 +81,8 @@ struct hantro_variant {
 	unsigned int num_enc_fmts;
 	const struct hantro_fmt *dec_fmts;
 	unsigned int num_dec_fmts;
+	const struct hantro_fmt *postproc_fmts;
+	unsigned int num_postproc_fmts;
 	unsigned int codec;
 	const struct hantro_codec_ops *codec_ops;
 	int (*init)(struct hantro_dev *vpu);
@@ -88,6 +93,7 @@ struct hantro_variant {
 	int num_clocks;
 	const char * const *reg_names;
 	int num_regs;
+	const struct hantro_postproc_regs *postproc_regs;
 };
 
 /**
@@ -213,6 +219,7 @@ struct hantro_dev {
  *			context, and it's called right before
  *			calling v4l2_m2m_job_finish.
  * @codec_ops:		Set of operations related to codec mode.
+ * @postproc:		Post-processing context.
  * @jpeg_enc:		JPEG-encoding context.
  * @mpeg2_dec:		MPEG-2-decoding context.
  * @vp8_dec:		VP8-decoding context.
@@ -237,6 +244,7 @@ struct hantro_ctx {
 			  unsigned int bytesused);
 
 	const struct hantro_codec_ops *codec_ops;
+	struct hantro_postproc_ctx postproc;
 
 	/* Specific for particular codec modes. */
 	union {
@@ -274,6 +282,23 @@ struct hantro_reg {
 	u32 mask;
 };
 
+struct hantro_postproc_regs {
+	struct hantro_reg pipeline_en;
+	struct hantro_reg max_burst;
+	struct hantro_reg clk_gate;
+	struct hantro_reg out_swap32;
+	struct hantro_reg out_endian;
+	struct hantro_reg out_luma_base;
+	struct hantro_reg input_width;
+	struct hantro_reg input_height;
+	struct hantro_reg output_width;
+	struct hantro_reg output_height;
+	struct hantro_reg input_fmt;
+	struct hantro_reg output_fmt;
+	struct hantro_reg orig_width;
+	struct hantro_reg display_width;
+};
+
 /* Logging helpers */
 
 /**
@@ -352,16 +377,30 @@ static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
 	return val;
 }
 
-static inline void hantro_reg_write(struct hantro_dev *vpu,
-				    const struct hantro_reg *reg,
-				    u32 val)
+static inline u32 vdpu_read_mask(struct hantro_dev *vpu,
+				 const struct hantro_reg *reg,
+				 u32 val)
 {
 	u32 v;
 
 	v = vdpu_read(vpu, reg->base);
 	v &= ~(reg->mask << reg->shift);
 	v |= ((val & reg->mask) << reg->shift);
-	vdpu_write_relaxed(vpu, v, reg->base);
+	return v;
+}
+
+static inline void hantro_reg_write(struct hantro_dev *vpu,
+				    const struct hantro_reg *reg,
+				    u32 val)
+{
+	vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
+}
+
+static inline void hantro_reg_write_s(struct hantro_dev *vpu,
+				      const struct hantro_reg *reg,
+				      u32 val)
+{
+	vdpu_write(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
 }
 
 bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
@@ -381,4 +420,23 @@ hantro_get_dst_buf(struct hantro_ctx *ctx)
 	return v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 }
 
+static inline bool
+hantro_needs_postproc(struct hantro_ctx *ctx, const struct hantro_fmt *fmt)
+{
+	return fmt->fourcc != V4L2_PIX_FMT_NV12;
+}
+
+static inline dma_addr_t
+hantro_get_dec_buf_addr(struct hantro_ctx *ctx, struct vb2_buffer *vb)
+{
+	if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+		return ctx->postproc.dec_q[vb->index].dma;
+	return vb2_dma_contig_plane_dma_addr(vb, 0);
+}
+
+void hantro_postproc_disable(struct hantro_ctx *ctx);
+void hantro_postproc_enable(struct hantro_ctx *ctx);
+void hantro_postproc_free(struct hantro_ctx *ctx);
+int hantro_postproc_alloc(struct hantro_ctx *ctx);
+
 #endif /* HANTRO_H_ */
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 26108c9..97c615a 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -53,7 +53,7 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
 	if (index < 0)
 		return 0;
 	buf = vb2_get_buffer(q, index);
-	return vb2_dma_contig_plane_dma_addr(buf, 0);
+	return hantro_get_dec_buf_addr(ctx, buf);
 }
 
 static int
@@ -152,16 +152,21 @@ void hantro_watchdog(struct work_struct *work)
 	}
 }
 
-void hantro_prepare_run(struct hantro_ctx *ctx)
+void hantro_start_prepare_run(struct hantro_ctx *ctx)
 {
 	struct vb2_v4l2_buffer *src_buf;
 
 	src_buf = hantro_get_src_buf(ctx);
 	v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
 				&ctx->ctrl_handler);
+
+	if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+		hantro_postproc_enable(ctx);
+	else
+		hantro_postproc_disable(ctx);
 }
 
-void hantro_finish_run(struct hantro_ctx *ctx)
+void hantro_end_prepare_run(struct hantro_ctx *ctx)
 {
 	struct vb2_v4l2_buffer *src_buf;
 
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 3cd40a8..424c648 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -244,7 +244,7 @@ static void set_buffers(struct hantro_ctx *ctx)
 	vdpu_write_relaxed(vpu, src_dma, G1_REG_ADDR_STR);
 
 	/* Destination (decoded frame) buffer. */
-	dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+	dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
 	/* Adjust dma addr to start at second line for bottom field */
 	if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
 		offset = ALIGN(ctx->src_fmt.width, MB_DIM);
@@ -288,7 +288,7 @@ void hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
 	set_ref(ctx);
 	set_buffers(ctx);
 
-	hantro_finish_run(ctx);
+	hantro_end_prepare_run(ctx);
 
 	/* Start decoding! */
 	vdpu_write_relaxed(vpu,
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index f3bf67d..2404184 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -121,7 +121,7 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
 	vdpu_write_relaxed(vpu, addr, G1_REG_RLC_VLC_BASE);
 
 	/* Destination frame buffer */
-	addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+	addr = hantro_get_dec_buf_addr(ctx, dst_buf);
 	current_addr = addr;
 
 	if (picture->picture_structure == PICT_BOTTOM_FIELD)
@@ -168,7 +168,7 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
 	dst_buf = hantro_get_dst_buf(ctx);
 
 	/* Apply request controls if any */
-	hantro_prepare_run(ctx);
+	hantro_start_prepare_run(ctx);
 
 	slice_params = hantro_get_ctrl(ctx,
 				       V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
@@ -244,7 +244,7 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
 					&dst_buf->vb2_buf,
 					sequence, picture, slice_params);
 
-	hantro_finish_run(ctx);
+	hantro_end_prepare_run(ctx);
 
 	reg = G1_REG_DEC_E(1);
 	vdpu_write(vpu, reg, G1_SWREG(1));
diff --git a/drivers/staging/media/hantro/hantro_g1_regs.h b/drivers/staging/media/hantro/hantro_g1_regs.h
index 5c0ea79..c1756e3 100644
--- a/drivers/staging/media/hantro/hantro_g1_regs.h
+++ b/drivers/staging/media/hantro/hantro_g1_regs.h
@@ -9,6 +9,8 @@
 #ifndef HANTRO_G1_REGS_H_
 #define HANTRO_G1_REGS_H_
 
+#define G1_SWREG(nr)                 ((nr) * 4)
+
 /* Decoder registers. */
 #define G1_REG_INTERRUPT				0x004
 #define     G1_REG_INTERRUPT_DEC_PIC_INF		BIT(24)
@@ -298,4 +300,55 @@
 #define     G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(x)	(((x) & 0x3fff) << 0)
 #define G1_REG_SOFT_RESET				0x194
 
+/* Post-processor registers. */
+#define G1_REG_PP_INTERRUPT		G1_SWREG(60)
+#define    G1_REG_PP_READY_IRQ		BIT(12)
+#define    G1_REG_PP_IRQ		BIT(8)
+#define    G1_REG_PP_IRQ_DIS		BIT(4)
+#define    G1_REG_PP_PIPELINE_EN	BIT(1)
+#define    G1_REG_PP_EXTERNAL_TRIGGER	BIT(0)
+#define G1_REG_PP_DEV_CONFIG		G1_SWREG(61)
+#define     G1_REG_PP_AXI_RD_ID(v)	(((v) << 24) & GENMASK(31, 24))
+#define     G1_REG_PP_AXI_WR_ID(v)	(((v) << 16) & GENMASK(23, 16))
+#define     G1_REG_PP_INSWAP32_E(v)	((v) ? BIT(10) : 0)
+#define     G1_REG_PP_DATA_DISC_E(v)	((v) ? BIT(9) : 0)
+#define     G1_REG_PP_CLK_GATE_E(v)	((v) ? BIT(8) : 0)
+#define     G1_REG_PP_IN_ENDIAN(v)	((v) ? BIT(7) : 0)
+#define     G1_REG_PP_OUT_ENDIAN(v)	((v) ? BIT(6) : 0)
+#define     G1_REG_PP_OUTSWAP32_E(v)	((v) ? BIT(5) : 0)
+#define     G1_REG_PP_MAX_BURST(v)	(((v) << 0) & GENMASK(4, 0))
+#define G1_REG_PP_IN_LUMA_BASE		G1_SWREG(63)
+#define G1_REG_PP_IN_CB_BASE		G1_SWREG(64)
+#define G1_REG_PP_IN_CR_BASE		G1_SWREG(65)
+#define G1_REG_PP_OUT_LUMA_BASE		G1_SWREG(66)
+#define G1_REG_PP_OUT_CHROMA_BASE	G1_SWREG(67)
+#define G1_REG_PP_CONTRAST_ADJUST	G1_SWREG(68)
+#define G1_REG_PP_COLOR_CONVERSION	G1_SWREG(69)
+#define G1_REG_PP_COLOR_CONVERSION0	G1_SWREG(70)
+#define G1_REG_PP_COLOR_CONVERSION1	G1_SWREG(71)
+#define G1_REG_PP_INPUT_SIZE		G1_SWREG(72)
+#define    G1_REG_PP_INPUT_SIZE_HEIGHT(v) (((v) << 9) & GENMASK(16, 9))
+#define    G1_REG_PP_INPUT_SIZE_WIDTH(v)  (((v) << 0) & GENMASK(8, 0))
+#define G1_REG_PP_SCALING0		G1_SWREG(79)
+#define     G1_REG_PP_PADD_R(v)	(((v) << 23) & GENMASK(27, 23))
+#define     G1_REG_PP_PADD_G(v)	(((v) << 18) & GENMASK(22, 18))
+#define     G1_REG_PP_RANGEMAP_Y(v) ((v) ? BIT(31) : 0)
+#define     G1_REG_PP_RANGEMAP_C(v) ((v) ? BIT(30) : 0)
+#define     G1_REG_PP_YCBCR_RANGE(v) ((v) ? BIT(29) : 0)
+#define     G1_REG_PP_RGB_16(v) ((v) ? BIT(28) : 0)
+#define G1_REG_PP_SCALING1		G1_SWREG(80)
+#define     G1_REG_PP_PADD_B(v)	(((v) << 18) & GENMASK(22, 18))
+#define G1_REG_PP_MASK_R		G1_SWREG(82)
+#define G1_REG_PP_MASK_G		G1_SWREG(83)
+#define G1_REG_PP_MASK_B		G1_SWREG(84)
+#define G1_REG_PP_CONTROL		G1_SWREG(85)
+#define     G1_REG_PP_CONTROL_IN_FMT(v)	(((v) << 29) & GENMASK(31, 29))
+#define     G1_REG_PP_CONTROL_OUT_FMT(v) (((v) << 26) & GENMASK(28, 26))
+#define     G1_REG_PP_CONTROL_OUT_HEIGHT(v) (((v) << 15) & GENMASK(25, 15))
+#define     G1_REG_PP_CONTROL_OUT_WIDTH(v) (((v) << 4) & GENMASK(14, 4))
+#define G1_REG_PP_MASK1_ORIG_WIDTH	G1_SWREG(88)
+#define     G1_REG_PP_ORIG_WIDTH(v)	(((v) << 23) & GENMASK(31, 23))
+#define G1_REG_PP_DISPLAY_WIDTH		G1_SWREG(92)
+#define G1_REG_PP_FUSE			G1_SWREG(99)
+
 #endif /* HANTRO_G1_REGS_H_ */
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index cad1809..a5cdf15 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -422,7 +422,7 @@ static void cfg_buffers(struct hantro_ctx *ctx,
 	}
 	vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(0));
 
-	dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+	dst_dma = hantro_get_dec_buf_addr(ctx, &vb2_dst->vb2_buf);
 	vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
 }
 
@@ -435,7 +435,7 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
 	u32 mb_width, mb_height;
 	u32 reg;
 
-	hantro_prepare_run(ctx);
+	hantro_start_prepare_run(ctx);
 
 	hdr = hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER);
 	if (WARN_ON(!hdr))
@@ -496,7 +496,7 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
 	cfg_ref(ctx, hdr);
 	cfg_buffers(ctx, hdr);
 
-	hantro_finish_run(ctx);
+	hantro_end_prepare_run(ctx);
 
 	vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
 }
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index 938b48d..0d8afc3 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -87,7 +87,7 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
 	src_buf = hantro_get_src_buf(ctx);
 	dst_buf = hantro_get_dst_buf(ctx);
 
-	hantro_prepare_run(ctx);
+	hantro_start_prepare_run(ctx);
 
 	memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
 	jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
@@ -122,7 +122,7 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
 		| H1_REG_ENC_PIC_INTRA
 		| H1_REG_ENC_CTRL_EN_BIT;
 
-	hantro_finish_run(ctx);
+	hantro_end_prepare_run(ctx);
 
 	vepu_write(vpu, reg, H1_REG_ENC_CTRL);
 }
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index 568640ea..f2d3e81 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -562,7 +562,7 @@ int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
 	struct hantro_h264_dec_ctrls *ctrls = &h264_ctx->ctrls;
 	struct hantro_h264_reflist_builder reflist_builder;
 
-	hantro_prepare_run(ctx);
+	hantro_start_prepare_run(ctx);
 
 	ctrls->scaling =
 		hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX);
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index fa91dd1..2398d4c 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -28,11 +28,13 @@ struct hantro_variant;
  * @cpu:	CPU pointer to the buffer.
  * @dma:	DMA address of the buffer.
  * @size:	Size of the buffer.
+ * @attrs:	Attributes of the DMA mapping.
  */
 struct hantro_aux_buf {
 	void *cpu;
 	dma_addr_t dma;
 	size_t size;
+	unsigned long attrs;
 };
 
 /**
@@ -107,6 +109,15 @@ struct hantro_vp8_dec_hw_ctx {
 };
 
 /**
+ * struct hantro_postproc_ctx
+ *
+ * @dec_q:		References buffers, in decoder format.
+ */
+struct hantro_postproc_ctx {
+	struct hantro_aux_buf dec_q[VB2_MAX_FRAME];
+};
+
+/**
  * struct hantro_codec_ops - codec mode specific operations
  *
  * @init:	If needed, can be used for initialization.
@@ -141,14 +152,16 @@ extern const struct hantro_variant rk3399_vpu_variant;
 extern const struct hantro_variant rk3328_vpu_variant;
 extern const struct hantro_variant rk3288_vpu_variant;
 
+extern const struct hantro_postproc_regs hantro_g1_postproc_regs;
+
 extern const u32 hantro_vp8_dec_mc_filter[8][6];
 
 void hantro_watchdog(struct work_struct *work);
 void hantro_run(struct hantro_ctx *ctx);
 void hantro_irq_done(struct hantro_dev *vpu, unsigned int bytesused,
 		     enum vb2_buffer_state result);
-void hantro_prepare_run(struct hantro_ctx *ctx);
-void hantro_finish_run(struct hantro_ctx *ctx);
+void hantro_start_prepare_run(struct hantro_ctx *ctx);
+void hantro_end_prepare_run(struct hantro_ctx *ctx);
 
 void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
 void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
new file mode 100644
index 0000000..28a85d3
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_postproc.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro G1 post-processor support
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_g1_regs.h"
+
+#define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
+{ \
+	hantro_reg_write((vpu), \
+			 &((vpu)->variant->postproc_regs->reg_name), \
+			 (val)); \
+}
+
+#define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \
+{ \
+	hantro_reg_write_s((vpu), \
+			   &((vpu)->variant->postproc_regs->reg_name), \
+			   (val)); \
+}
+
+#define VPU_PP_IN_YUYV			0x0
+#define VPU_PP_IN_NV12			0x1
+#define VPU_PP_IN_YUV420		0x2
+#define VPU_PP_IN_YUV240_TILED		0x5
+#define VPU_PP_OUT_RGB			0x0
+#define VPU_PP_OUT_YUYV			0x3
+
+const struct hantro_postproc_regs hantro_g1_postproc_regs = {
+	.pipeline_en = {G1_REG_PP_INTERRUPT, 1, 0x1},
+	.max_burst = {G1_REG_PP_DEV_CONFIG, 0, 0x1f},
+	.clk_gate = {G1_REG_PP_DEV_CONFIG, 1, 0x1},
+	.out_swap32 = {G1_REG_PP_DEV_CONFIG, 5, 0x1},
+	.out_endian = {G1_REG_PP_DEV_CONFIG, 6, 0x1},
+	.out_luma_base = {G1_REG_PP_OUT_LUMA_BASE, 0, 0xffffffff},
+	.input_width = {G1_REG_PP_INPUT_SIZE, 0, 0x1ff},
+	.input_height = {G1_REG_PP_INPUT_SIZE, 9, 0x1ff},
+	.output_width = {G1_REG_PP_CONTROL, 4, 0x7ff},
+	.output_height = {G1_REG_PP_CONTROL, 15, 0x7ff},
+	.input_fmt = {G1_REG_PP_CONTROL, 29, 0x7},
+	.output_fmt = {G1_REG_PP_CONTROL, 26, 0x7},
+	.orig_width = {G1_REG_PP_MASK1_ORIG_WIDTH, 23, 0x1ff},
+	.display_width = {G1_REG_PP_DISPLAY_WIDTH, 0, 0xfff},
+};
+
+void hantro_postproc_enable(struct hantro_ctx *ctx)
+{
+	struct hantro_dev *vpu = ctx->dev;
+	struct vb2_v4l2_buffer *dst_buf;
+	u32 src_pp_fmt, dst_pp_fmt;
+	dma_addr_t dst_dma;
+
+	if (!vpu->variant->postproc_regs)
+		return;
+
+	/* Turn on pipeline mode. Must be done first. */
+	HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x1);
+
+	src_pp_fmt = VPU_PP_IN_NV12;
+
+	switch (ctx->vpu_dst_fmt->fourcc) {
+	case V4L2_PIX_FMT_YUYV:
+		dst_pp_fmt = VPU_PP_OUT_YUYV;
+		break;
+	default:
+		WARN(1, "output format %d not supported by the post-processor, this wasn't expected.",
+		     ctx->vpu_dst_fmt->fourcc);
+		dst_pp_fmt = 0;
+		break;
+	}
+
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+	dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+	HANTRO_PP_REG_WRITE(vpu, clk_gate, 0x1);
+	HANTRO_PP_REG_WRITE(vpu, out_endian, 0x1);
+	HANTRO_PP_REG_WRITE(vpu, out_swap32, 0x1);
+	HANTRO_PP_REG_WRITE(vpu, max_burst, 16);
+	HANTRO_PP_REG_WRITE(vpu, out_luma_base, dst_dma);
+	HANTRO_PP_REG_WRITE(vpu, input_width, MB_WIDTH(ctx->dst_fmt.width));
+	HANTRO_PP_REG_WRITE(vpu, input_height, MB_HEIGHT(ctx->dst_fmt.height));
+	HANTRO_PP_REG_WRITE(vpu, input_fmt, src_pp_fmt);
+	HANTRO_PP_REG_WRITE(vpu, output_fmt, dst_pp_fmt);
+	HANTRO_PP_REG_WRITE(vpu, output_width, ctx->dst_fmt.width);
+	HANTRO_PP_REG_WRITE(vpu, output_height, ctx->dst_fmt.height);
+	HANTRO_PP_REG_WRITE(vpu, orig_width, MB_WIDTH(ctx->dst_fmt.width));
+	HANTRO_PP_REG_WRITE(vpu, display_width, ctx->dst_fmt.width);
+}
+
+void hantro_postproc_free(struct hantro_ctx *ctx)
+{
+	struct hantro_dev *vpu = ctx->dev;
+	unsigned int i;
+
+	for (i = 0; i < VB2_MAX_FRAME; ++i) {
+		struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+
+		if (priv->cpu) {
+			dma_free_attrs(vpu->dev, priv->size, priv->cpu,
+				       priv->dma, priv->attrs);
+			priv->cpu = NULL;
+		}
+	}
+}
+
+int hantro_postproc_alloc(struct hantro_ctx *ctx)
+{
+	struct hantro_dev *vpu = ctx->dev;
+	struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+	struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
+	unsigned int num_buffers = cap_queue->num_buffers;
+	unsigned int i, buf_size;
+
+	buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
+
+	for (i = 0; i < num_buffers; ++i) {
+		struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+
+		/*
+		 * The buffers on this queue are meant as intermediate
+		 * buffers for the decoder, so no mapping is needed.
+		 */
+		priv->attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+		priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
+					    GFP_KERNEL, priv->attrs);
+		if (!priv->cpu)
+			return -ENOMEM;
+		priv->size = buf_size;
+	}
+	return 0;
+}
+
+void hantro_postproc_disable(struct hantro_ctx *ctx)
+{
+	struct hantro_dev *vpu = ctx->dev;
+
+	if (!vpu->variant->postproc_regs)
+		return;
+
+	HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x0);
+}
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 1dae76f..0198bcd 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -47,11 +47,30 @@ hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts)
 }
 
 static const struct hantro_fmt *
-hantro_find_format(const struct hantro_fmt *formats, unsigned int num_fmts,
-		   u32 fourcc)
+hantro_get_postproc_formats(const struct hantro_ctx *ctx,
+			    unsigned int *num_fmts)
 {
-	unsigned int i;
+	if (hantro_is_encoder_ctx(ctx)) {
+		*num_fmts = 0;
+		return NULL;
+	}
 
+	*num_fmts = ctx->dev->variant->num_postproc_fmts;
+	return ctx->dev->variant->postproc_fmts;
+}
+
+static const struct hantro_fmt *
+hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
+{
+	const struct hantro_fmt *formats;
+	unsigned int i, num_fmts;
+
+	formats = hantro_get_formats(ctx, &num_fmts);
+	for (i = 0; i < num_fmts; i++)
+		if (formats[i].fourcc == fourcc)
+			return &formats[i];
+
+	formats = hantro_get_postproc_formats(ctx, &num_fmts);
 	for (i = 0; i < num_fmts; i++)
 		if (formats[i].fourcc == fourcc)
 			return &formats[i];
@@ -59,11 +78,12 @@ hantro_find_format(const struct hantro_fmt *formats, unsigned int num_fmts,
 }
 
 static const struct hantro_fmt *
-hantro_get_default_fmt(const struct hantro_fmt *formats, unsigned int num_fmts,
-		       bool bitstream)
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream)
 {
-	unsigned int i;
+	const struct hantro_fmt *formats;
+	unsigned int i, num_fmts;
 
+	formats = hantro_get_formats(ctx, &num_fmts);
 	for (i = 0; i < num_fmts; i++) {
 		if (bitstream == (formats[i].codec_mode !=
 				  HANTRO_MODE_NONE))
@@ -89,8 +109,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
 				  struct v4l2_frmsizeenum *fsize)
 {
 	struct hantro_ctx *ctx = fh_to_ctx(priv);
-	const struct hantro_fmt *formats, *fmt;
-	unsigned int num_fmts;
+	const struct hantro_fmt *fmt;
 
 	if (fsize->index != 0) {
 		vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
@@ -98,8 +117,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
 		return -EINVAL;
 	}
 
-	formats = hantro_get_formats(ctx, &num_fmts);
-	fmt = hantro_find_format(formats, num_fmts, fsize->pixel_format);
+	fmt = hantro_find_format(ctx, fsize->pixel_format);
 	if (!fmt) {
 		vpu_debug(0, "unsupported bitstream format (%08x)\n",
 			  fsize->pixel_format);
@@ -150,6 +168,24 @@ static int vidioc_enum_fmt(struct file *file, void *priv,
 		}
 		++j;
 	}
+
+	/*
+	 * Enumerate post-processed formats. As per the specification,
+	 * we enumerated these formats after natively decoded formats
+	 * as a hint for applications on what's the preferred fomat.
+	 */
+	if (!capture)
+		return -EINVAL;
+	formats = hantro_get_postproc_formats(ctx, &num_fmts);
+	for (i = 0; i < num_fmts; i++) {
+		if (j == f->index) {
+			fmt = &formats[i];
+			f->pixelformat = fmt->fourcc;
+			return 0;
+		}
+		++j;
+	}
+
 	return -EINVAL;
 }
 
@@ -196,8 +232,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
 {
 	struct hantro_ctx *ctx = fh_to_ctx(priv);
 	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
-	const struct hantro_fmt *formats, *fmt, *vpu_fmt;
-	unsigned int num_fmts;
+	const struct hantro_fmt *fmt, *vpu_fmt;
 	bool coded;
 
 	coded = capture == hantro_is_encoder_ctx(ctx);
@@ -208,10 +243,9 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
 		  (pix_mp->pixelformat >> 16) & 0x7f,
 		  (pix_mp->pixelformat >> 24) & 0x7f);
 
-	formats = hantro_get_formats(ctx, &num_fmts);
-	fmt = hantro_find_format(formats, num_fmts, pix_mp->pixelformat);
+	fmt = hantro_find_format(ctx, pix_mp->pixelformat);
 	if (!fmt) {
-		fmt = hantro_get_default_fmt(formats, num_fmts, coded);
+		fmt = hantro_get_default_fmt(ctx, coded);
 		f->fmt.pix_mp.pixelformat = fmt->fourcc;
 	}
 
@@ -246,7 +280,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
 		 *
 		 * The H264 decoder needs extra space on the output buffers
 		 * to store motion vectors. This is needed for reference
-		 * frames.
+		 * frames and only if the format is non-post-processed NV12.
 		 *
 		 * Memory layout is as follow:
 		 *
@@ -260,7 +294,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
 		 * | MC sync          32 bytes |
 		 * +---------------------------+
 		 */
-		if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
+		if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE &&
+		    !hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
 			pix_mp->plane_fmt[0].sizeimage +=
 				64 * MB_WIDTH(pix_mp->width) *
 				     MB_WIDTH(pix_mp->height) + 32;
@@ -306,12 +341,10 @@ hantro_reset_fmt(struct v4l2_pix_format_mplane *fmt,
 static void
 hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
 {
-	const struct hantro_fmt *vpu_fmt, *formats;
+	const struct hantro_fmt *vpu_fmt;
 	struct v4l2_pix_format_mplane *fmt;
-	unsigned int num_fmts;
 
-	formats = hantro_get_formats(ctx, &num_fmts);
-	vpu_fmt = hantro_get_default_fmt(formats, num_fmts, true);
+	vpu_fmt = hantro_get_default_fmt(ctx, true);
 
 	if (hantro_is_encoder_ctx(ctx)) {
 		ctx->vpu_dst_fmt = vpu_fmt;
@@ -332,12 +365,10 @@ hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
 static void
 hantro_reset_raw_fmt(struct hantro_ctx *ctx)
 {
-	const struct hantro_fmt *raw_vpu_fmt, *formats;
+	const struct hantro_fmt *raw_vpu_fmt;
 	struct v4l2_pix_format_mplane *raw_fmt, *encoded_fmt;
-	unsigned int num_fmts;
 
-	formats = hantro_get_formats(ctx, &num_fmts);
-	raw_vpu_fmt = hantro_get_default_fmt(formats, num_fmts, false);
+	raw_vpu_fmt = hantro_get_default_fmt(ctx, false);
 
 	if (hantro_is_encoder_ctx(ctx)) {
 		ctx->vpu_src_fmt = raw_vpu_fmt;
@@ -384,8 +415,6 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
 	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
 	struct hantro_ctx *ctx = fh_to_ctx(priv);
 	struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
-	const struct hantro_fmt *formats;
-	unsigned int num_fmts;
 	int ret;
 
 	ret = vidioc_try_fmt_out_mplane(file, priv, f);
@@ -421,9 +450,7 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
 			return -EBUSY;
 	}
 
-	formats = hantro_get_formats(ctx, &num_fmts);
-	ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts,
-					      pix_mp->pixelformat);
+	ctx->vpu_src_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
 	ctx->src_fmt = *pix_mp;
 
 	/*
@@ -457,9 +484,7 @@ static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
 {
 	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
 	struct hantro_ctx *ctx = fh_to_ctx(priv);
-	const struct hantro_fmt *formats;
 	struct vb2_queue *vq;
-	unsigned int num_fmts;
 	int ret;
 
 	/* Change not allowed if queue is busy. */
@@ -488,9 +513,7 @@ static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
 	if (ret)
 		return ret;
 
-	formats = hantro_get_formats(ctx, &num_fmts);
-	ctx->vpu_dst_fmt = hantro_find_format(formats, num_fmts,
-					      pix_mp->pixelformat);
+	ctx->vpu_dst_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
 	ctx->dst_fmt = *pix_mp;
 
 	/*
@@ -650,10 +673,23 @@ static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
 
 		vpu_debug(4, "Codec mode = %d\n", codec_mode);
 		ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
-		if (ctx->codec_ops->init)
+		if (ctx->codec_ops->init) {
 			ret = ctx->codec_ops->init(ctx);
-	}
+			if (ret)
+				return ret;
+		}
 
+		if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) {
+			ret = hantro_postproc_alloc(ctx);
+			if (ret)
+				goto err_codec_exit;
+		}
+	}
+	return ret;
+
+err_codec_exit:
+	if (ctx->codec_ops->exit)
+		ctx->codec_ops->exit(ctx);
 	return ret;
 }
 
@@ -680,6 +716,7 @@ static void hantro_stop_streaming(struct vb2_queue *q)
 	struct hantro_ctx *ctx = vb2_get_drv_priv(q);
 
 	if (hantro_vq_is_coded(q)) {
+		hantro_postproc_free(ctx);
 		if (ctx->codec_ops && ctx->codec_ops->exit)
 			ctx->codec_ops->exit(ctx);
 	}
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
index f8db6fc..2f914b3 100644
--- a/drivers/staging/media/hantro/rk3288_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -56,6 +56,13 @@ static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
 	},
 };
 
+static const struct hantro_fmt rk3288_vpu_postproc_fmts[] = {
+	{
+		.fourcc = V4L2_PIX_FMT_YUYV,
+		.codec_mode = HANTRO_MODE_NONE,
+	},
+};
+
 static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
 	{
 		.fourcc = V4L2_PIX_FMT_NV12,
@@ -215,6 +222,9 @@ const struct hantro_variant rk3288_vpu_variant = {
 	.dec_offset = 0x400,
 	.dec_fmts = rk3288_vpu_dec_fmts,
 	.num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
+	.postproc_fmts = rk3288_vpu_postproc_fmts,
+	.num_postproc_fmts = ARRAY_SIZE(rk3288_vpu_postproc_fmts),
+	.postproc_regs = &hantro_g1_postproc_regs,
 	.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
 		 HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
 	.codec_ops = rk3288_vpu_codec_ops,
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 0678923..4c2d43f 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -118,7 +118,7 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
 	src_buf = hantro_get_src_buf(ctx);
 	dst_buf = hantro_get_dst_buf(ctx);
 
-	hantro_prepare_run(ctx);
+	hantro_start_prepare_run(ctx);
 
 	memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
 	jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
@@ -156,6 +156,6 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
 		| VEPU_REG_ENCODE_ENABLE;
 
 	/* Kick the watchdog and start encoding */
-	hantro_finish_run(ctx);
+	hantro_end_prepare_run(ctx);
 	vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
 }
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
index b40d2cd..7e9aad6 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -169,7 +169,7 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
 	src_buf = hantro_get_src_buf(ctx);
 	dst_buf = hantro_get_dst_buf(ctx);
 
-	hantro_prepare_run(ctx);
+	hantro_start_prepare_run(ctx);
 
 	slice_params = hantro_get_ctrl(ctx,
 				       V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
@@ -250,7 +250,7 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
 					 sequence, picture, slice_params);
 
 	/* Kick the watchdog and start decoding */
-	hantro_finish_run(ctx);
+	hantro_end_prepare_run(ctx);
 
 	reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
 	vdpu_write(vpu, reg, VDPU_SWREG(57));
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
index 76d7ed3..a4a792f 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
@@ -513,7 +513,7 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
 	u32 mb_width, mb_height;
 	u32 reg;
 
-	hantro_prepare_run(ctx);
+	hantro_start_prepare_run(ctx);
 
 	hdr = hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER);
 	if (WARN_ON(!hdr))
@@ -587,7 +587,7 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
 	cfg_ref(ctx, hdr);
 	cfg_buffers(ctx, hdr);
 
-	hantro_finish_run(ctx);
+	hantro_end_prepare_run(ctx);
 
 	hantro_reg_write(vpu, &vp8_dec_start_dec, 1);
 }
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 99166af..383abec 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -251,8 +251,6 @@ struct csi_state {
 
 	struct mipi_csis_event events[MIPI_CSIS_NUM_EVENTS];
 
-	struct v4l2_async_notifier subdev_notifier;
-
 	struct csis_hw_reset hw_reset;
 	struct regulator *mipi_phy_regulator;
 	bool sink_linked;
@@ -1104,7 +1102,6 @@ static int mipi_csis_remove(struct platform_device *pdev)
 
 	mipi_csis_debugfs_exit(state);
 	v4l2_async_unregister_subdev(&state->mipi_sd);
-	v4l2_async_notifier_unregister(&state->subdev_notifier);
 
 	pm_runtime_disable(&pdev->dev);
 	mipi_csis_pm_suspend(&pdev->dev, true);
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index b44bb4a..dc356d6 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -5,15 +5,9 @@
   as well as formats and the binary used to a request. Remove the
   opportunistic buffer management. (Sakari)
 
-- Using ENABLED and IMMUTABLE link flags for the links where those are
-  relevant. (Sakari)
-
 - IPU3 driver documentation (Laurent)
   Comments on configuring v4l2 subdevs for CIO2 and ImgU.
 
-- uAPI documentation:
-  Move acronyms to doc-rst file. (Mauro)
-
 - Switch to yavta from v4l2n in driver docs.
 
 - Elaborate the functionality of different selection rectangles in driver
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index fd1ed84..f36de50 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -1450,7 +1450,7 @@ bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe)
 bool imgu_css_queue_empty(struct imgu_css *css)
 {
 	unsigned int pipe;
-	bool ret = 0;
+	bool ret = false;
 
 	for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
 		ret &= imgu_css_pipe_queue_empty(css, pipe);
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 3c7ad1e..569e27b 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -1260,6 +1260,11 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
 		r = media_create_pad_link(&vdev->entity, 0, &sd->entity,
 					  node_num, flags);
 	} else {
+		if (node->id == IMGU_NODE_OUT) {
+			flags |= MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
+			node->enabled = true;
+		}
+
 		r = media_create_pad_link(&sd->entity, node_num, &vdev->entity,
 					  0, flags);
 	}
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 0a1a04f..5c5dabe 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -133,6 +133,8 @@ vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb)
 	struct amvdec_buffer *new_buf;
 
 	new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL);
+	if (!new_buf)
+		return;
 	new_buf->vb = vb;
 
 	mutex_lock(&sess->bufs_recycle_lock);
@@ -956,6 +958,10 @@ static const struct of_device_id vdec_dt_match[] = {
 	  .data = &vdec_platform_gxm },
 	{ .compatible = "amlogic,gxl-vdec",
 	  .data = &vdec_platform_gxl },
+	{ .compatible = "amlogic,g12a-vdec",
+	  .data = &vdec_platform_g12a },
+	{ .compatible = "amlogic,sm1-vdec",
+	  .data = &vdec_platform_sm1 },
 	{}
 };
 MODULE_DEVICE_TABLE(of, vdec_dt_match);
@@ -1003,6 +1009,16 @@ static int vdec_probe(struct platform_device *pdev)
 	if (IS_ERR(core->canvas))
 		return PTR_ERR(core->canvas);
 
+	of_id = of_match_node(vdec_dt_match, dev->of_node);
+	core->platform = of_id->data;
+
+	if (core->platform->revision == VDEC_REVISION_G12A ||
+	    core->platform->revision == VDEC_REVISION_SM1) {
+		core->vdec_hevcf_clk = devm_clk_get(dev, "vdec_hevcf");
+		if (IS_ERR(core->vdec_hevcf_clk))
+			return -EPROBE_DEFER;
+	}
+
 	core->dos_parser_clk = devm_clk_get(dev, "dos_parser");
 	if (IS_ERR(core->dos_parser_clk))
 		return -EPROBE_DEFER;
@@ -1045,8 +1061,6 @@ static int vdec_probe(struct platform_device *pdev)
 		goto err_vdev_release;
 	}
 
-	of_id = of_match_node(vdec_dt_match, dev->of_node);
-	core->platform = of_id->data;
 	core->vdev_dec = vdev;
 	core->dev_dec = dev;
 	mutex_init(&core->lock);
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
index d811e79..0faa1ec 100644
--- a/drivers/staging/media/meson/vdec/vdec.h
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -74,6 +74,7 @@ struct amvdec_core {
 	struct clk *dos_clk;
 	struct clk *vdec_1_clk;
 	struct clk *vdec_hevc_clk;
+	struct clk *vdec_hevcf_clk;
 
 	struct reset_control *esparser_reset;
 
diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c
index 3a15c6f..3fe2de0 100644
--- a/drivers/staging/media/meson/vdec/vdec_1.c
+++ b/drivers/staging/media/meson/vdec/vdec_1.c
@@ -18,6 +18,7 @@
 #define AO_RTI_GEN_PWR_SLEEP0	0xe8
 #define AO_RTI_GEN_PWR_ISO0	0xec
 	#define GEN_PWR_VDEC_1 (BIT(3) | BIT(2))
+	#define GEN_PWR_VDEC_1_SM1 (BIT(1))
 
 #define MC_SIZE			(4096 * 4)
 
@@ -142,12 +143,20 @@ static int vdec_1_stop(struct amvdec_session *sess)
 	amvdec_read_dos(core, DOS_SW_RESET0);
 
 	/* enable vdec1 isolation */
-	regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0xc0);
+	if (core->platform->revision == VDEC_REVISION_SM1)
+		regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+				   GEN_PWR_VDEC_1_SM1, GEN_PWR_VDEC_1_SM1);
+	else
+		regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0xc0);
 	/* power off vdec1 memories */
 	amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0xffffffff);
 	/* power off vdec1 */
-	regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
-			   GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
+	if (core->platform->revision == VDEC_REVISION_SM1)
+		regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+				   GEN_PWR_VDEC_1_SM1, GEN_PWR_VDEC_1_SM1);
+	else
+		regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+				   GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
 
 	clk_disable_unprepare(core->vdec_1_clk);
 
@@ -170,8 +179,12 @@ static int vdec_1_start(struct amvdec_session *sess)
 		return ret;
 
 	/* Enable power for VDEC_1 */
-	regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
-			   GEN_PWR_VDEC_1, 0);
+	if (core->platform->revision == VDEC_REVISION_SM1)
+		regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+				   GEN_PWR_VDEC_1_SM1, 0);
+	else
+		regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+				   GEN_PWR_VDEC_1, 0);
 	usleep_range(10, 20);
 
 	/* Reset VDEC1 */
@@ -183,7 +196,11 @@ static int vdec_1_start(struct amvdec_session *sess)
 	/* enable VDEC Memories */
 	amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0);
 	/* Remove VDEC1 Isolation */
-	regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0);
+	if (core->platform->revision == VDEC_REVISION_SM1)
+		regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+				   GEN_PWR_VDEC_1_SM1, 0);
+	else
+		regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0);
 	/* Reset DOS top registers */
 	amvdec_write_dos(core, DOS_VDEC_MCRCC_STALL_CTRL, 0);
 
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
index 824dbc7..ea39f82 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.c
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -82,6 +82,54 @@ static const struct amvdec_format vdec_formats_gxm[] = {
 	},
 };
 
+static const struct amvdec_format vdec_formats_g12a[] = {
+	{
+		.pixfmt = V4L2_PIX_FMT_MPEG1,
+		.min_buffers = 8,
+		.max_buffers = 8,
+		.max_width = 1920,
+		.max_height = 1080,
+		.vdec_ops = &vdec_1_ops,
+		.codec_ops = &codec_mpeg12_ops,
+		.firmware_path = "meson/vdec/gxl_mpeg12.bin",
+		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+	}, {
+		.pixfmt = V4L2_PIX_FMT_MPEG2,
+		.min_buffers = 8,
+		.max_buffers = 8,
+		.max_width = 1920,
+		.max_height = 1080,
+		.vdec_ops = &vdec_1_ops,
+		.codec_ops = &codec_mpeg12_ops,
+		.firmware_path = "meson/vdec/gxl_mpeg12.bin",
+		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+	},
+};
+
+static const struct amvdec_format vdec_formats_sm1[] = {
+	{
+		.pixfmt = V4L2_PIX_FMT_MPEG1,
+		.min_buffers = 8,
+		.max_buffers = 8,
+		.max_width = 1920,
+		.max_height = 1080,
+		.vdec_ops = &vdec_1_ops,
+		.codec_ops = &codec_mpeg12_ops,
+		.firmware_path = "meson/vdec/gxl_mpeg12.bin",
+		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+	}, {
+		.pixfmt = V4L2_PIX_FMT_MPEG2,
+		.min_buffers = 8,
+		.max_buffers = 8,
+		.max_width = 1920,
+		.max_height = 1080,
+		.vdec_ops = &vdec_1_ops,
+		.codec_ops = &codec_mpeg12_ops,
+		.firmware_path = "meson/vdec/gxl_mpeg12.bin",
+		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+	},
+};
+
 const struct vdec_platform vdec_platform_gxbb = {
 	.formats = vdec_formats_gxbb,
 	.num_formats = ARRAY_SIZE(vdec_formats_gxbb),
@@ -99,3 +147,15 @@ const struct vdec_platform vdec_platform_gxm = {
 	.num_formats = ARRAY_SIZE(vdec_formats_gxm),
 	.revision = VDEC_REVISION_GXM,
 };
+
+const struct vdec_platform vdec_platform_g12a = {
+	.formats = vdec_formats_g12a,
+	.num_formats = ARRAY_SIZE(vdec_formats_g12a),
+	.revision = VDEC_REVISION_G12A,
+};
+
+const struct vdec_platform vdec_platform_sm1 = {
+	.formats = vdec_formats_sm1,
+	.num_formats = ARRAY_SIZE(vdec_formats_sm1),
+	.revision = VDEC_REVISION_SM1,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h
index f602532..731877a 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.h
+++ b/drivers/staging/media/meson/vdec/vdec_platform.h
@@ -15,6 +15,8 @@ enum vdec_revision {
 	VDEC_REVISION_GXBB,
 	VDEC_REVISION_GXL,
 	VDEC_REVISION_GXM,
+	VDEC_REVISION_G12A,
+	VDEC_REVISION_SM1,
 };
 
 struct vdec_platform {
@@ -26,5 +28,7 @@ struct vdec_platform {
 extern const struct vdec_platform vdec_platform_gxbb;
 extern const struct vdec_platform vdec_platform_gxm;
 extern const struct vdec_platform vdec_platform_gxl;
+extern const struct vdec_platform vdec_platform_g12a;
+extern const struct vdec_platform vdec_platform_sm1;
 
 #endif
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml b/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml
new file mode 100644
index 0000000..5dacece
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/rockchip-mipi-dphy-rx0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip SoC MIPI RX0 D-PHY Device Tree Bindings
+
+maintainers:
+  - Helen Koike <helen.koike@collabora.com>
+  - Ezequiel Garcia <ezequiel@collabora.com>
+
+description: |
+  The Rockchip SoC has a MIPI D-PHY bus with an RX0 entry which connects to
+  the ISP1 (Image Signal Processing unit v1.0) for CSI cameras.
+
+properties:
+  compatible:
+    const: rockchip,rk3399-mipi-dphy-rx0
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: MIPI D-PHY ref clock
+      - description: MIPI D-PHY RX0 cfg clock
+      - description: Video in/out general register file clock
+
+  clock-names:
+    items:
+      - const: dphy-ref
+      - const: dphy-cfg
+      - const: grf
+
+  '#phy-cells':
+    const: 0
+
+  power-domains:
+    description: Video in/out power domain.
+    maxItems: 1
+
+required:
+  - compatible
+  - clocks
+  - clock-names
+  - '#phy-cells'
+  - power-domains
+
+additionalProperties: false
+
+examples:
+  - |
+
+    /*
+     * MIPI D-PHY RX0 use registers in "general register files", it
+     * should be a child of the GRF.
+     *
+     * grf: syscon@ff770000 {
+     *  compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+     *  ...
+     * };
+     */
+
+    #include <dt-bindings/clock/rk3399-cru.h>
+    #include <dt-bindings/power/rk3399-power.h>
+
+    mipi_dphy_rx0: mipi-dphy-rx0 {
+        compatible = "rockchip,rk3399-mipi-dphy-rx0";
+        clocks = <&cru SCLK_MIPIDPHY_REF>,
+                 <&cru SCLK_DPHY_RX0_CFG>,
+                 <&cru PCLK_VIO_GRF>;
+        clock-names = "dphy-ref", "dphy-cfg", "grf";
+        power-domains = <&power RK3399_PD_VIO>;
+        #phy-cells = <0>;
+    };
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig b/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
new file mode 100644
index 0000000..bd01476
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config PHY_ROCKCHIP_DPHY_RX0
+	tristate "Rockchip MIPI Synopsys DPHY RX0 driver"
+	depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+	select GENERIC_PHY_MIPI_DPHY
+	select GENERIC_PHY
+	help
+	  Enable this to support the Rockchip MIPI Synopsys DPHY RX0
+	  associated to the Rockchip ISP module present in RK3399 SoCs.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called phy-rockchip-dphy-rx0.
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile b/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile
new file mode 100644
index 0000000..507e5d0
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0)		+= phy-rockchip-dphy-rx0.o
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/TODO b/drivers/staging/media/phy-rockchip-dphy-rx0/TODO
new file mode 100644
index 0000000..ab612e5
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/TODO
@@ -0,0 +1,6 @@
+The main reason for keeping this in staging is because the only driver
+that uses this is rkisp1, which is also in staging. It should be moved together
+with rkisp1.
+
+Please CC patches to Linux Media <linux-media@vger.kernel.org> and
+Helen Koike <helen.koike@collabora.com>.
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
new file mode 100644
index 0000000..7c4df6d
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip MIPI Synopsys DPHY RX0 driver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on:
+ *
+ * drivers/media/platform/rockchip/isp1/mipi_dphy_sy.c
+ * in https://chromium.googlesource.com/chromiumos/third_party/kernel,
+ * chromeos-4.4 branch.
+ *
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ *   Jacob Chen <jacob2.chen@rock-chips.com>
+ *   Shunqian Zheng <zhengsq@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define RK3399_GRF_SOC_CON9		0x6224
+#define RK3399_GRF_SOC_CON21		0x6254
+#define RK3399_GRF_SOC_CON22		0x6258
+#define RK3399_GRF_SOC_CON23		0x625c
+#define RK3399_GRF_SOC_CON24		0x6260
+#define RK3399_GRF_SOC_CON25		0x6264
+#define RK3399_GRF_SOC_STATUS1		0xe2a4
+
+#define CLOCK_LANE_HS_RX_CONTROL	0x34
+#define LANE0_HS_RX_CONTROL		0x44
+#define LANE1_HS_RX_CONTROL		0x54
+#define LANE2_HS_RX_CONTROL		0x84
+#define LANE3_HS_RX_CONTROL		0x94
+#define LANES_THS_SETTLE_CONTROL	0x75
+#define THS_SETTLE_COUNTER_THRESHOLD	0x04
+
+struct hsfreq_range {
+	u16 range_h;
+	u8 cfg_bit;
+};
+
+static const struct hsfreq_range rk3399_mipidphy_hsfreq_ranges[] = {
+	{   89, 0x00 }, {   99, 0x10 }, {  109, 0x20 }, {  129, 0x01 },
+	{  139, 0x11 }, {  149, 0x21 }, {  169, 0x02 }, {  179, 0x12 },
+	{  199, 0x22 }, {  219, 0x03 }, {  239, 0x13 }, {  249, 0x23 },
+	{  269, 0x04 }, {  299, 0x14 }, {  329, 0x05 }, {  359, 0x15 },
+	{  399, 0x25 }, {  449, 0x06 }, {  499, 0x16 }, {  549, 0x07 },
+	{  599, 0x17 }, {  649, 0x08 }, {  699, 0x18 }, {  749, 0x09 },
+	{  799, 0x19 }, {  849, 0x29 }, {  899, 0x39 }, {  949, 0x0a },
+	{  999, 0x1a }, { 1049, 0x2a }, { 1099, 0x3a }, { 1149, 0x0b },
+	{ 1199, 0x1b }, { 1249, 0x2b }, { 1299, 0x3b }, { 1349, 0x0c },
+	{ 1399, 0x1c }, { 1449, 0x2c }, { 1500, 0x3c }
+};
+
+static const char * const rk3399_mipidphy_clks[] = {
+	"dphy-ref",
+	"dphy-cfg",
+	"grf",
+};
+
+enum dphy_reg_id {
+	GRF_DPHY_RX0_TURNDISABLE = 0,
+	GRF_DPHY_RX0_FORCERXMODE,
+	GRF_DPHY_RX0_FORCETXSTOPMODE,
+	GRF_DPHY_RX0_ENABLE,
+	GRF_DPHY_RX0_TESTCLR,
+	GRF_DPHY_RX0_TESTCLK,
+	GRF_DPHY_RX0_TESTEN,
+	GRF_DPHY_RX0_TESTDIN,
+	GRF_DPHY_RX0_TURNREQUEST,
+	GRF_DPHY_RX0_TESTDOUT,
+	GRF_DPHY_TX0_TURNDISABLE,
+	GRF_DPHY_TX0_FORCERXMODE,
+	GRF_DPHY_TX0_FORCETXSTOPMODE,
+	GRF_DPHY_TX0_TURNREQUEST,
+	GRF_DPHY_TX1RX1_TURNDISABLE,
+	GRF_DPHY_TX1RX1_FORCERXMODE,
+	GRF_DPHY_TX1RX1_FORCETXSTOPMODE,
+	GRF_DPHY_TX1RX1_ENABLE,
+	GRF_DPHY_TX1RX1_MASTERSLAVEZ,
+	GRF_DPHY_TX1RX1_BASEDIR,
+	GRF_DPHY_TX1RX1_ENABLECLK,
+	GRF_DPHY_TX1RX1_TURNREQUEST,
+	GRF_DPHY_RX1_SRC_SEL,
+	/* rk3288 only */
+	GRF_CON_DISABLE_ISP,
+	GRF_CON_ISP_DPHY_SEL,
+	GRF_DSI_CSI_TESTBUS_SEL,
+	GRF_DVP_V18SEL,
+	/* below is for rk3399 only */
+	GRF_DPHY_RX0_CLK_INV_SEL,
+	GRF_DPHY_RX1_CLK_INV_SEL,
+};
+
+struct dphy_reg {
+	u16 offset;
+	u8 mask;
+	u8 shift;
+};
+
+#define PHY_REG(_offset, _width, _shift) \
+	{ .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, }
+
+static const struct dphy_reg rk3399_grf_dphy_regs[] = {
+	[GRF_DPHY_RX0_TURNREQUEST] = PHY_REG(RK3399_GRF_SOC_CON9, 4, 0),
+	[GRF_DPHY_RX0_CLK_INV_SEL] = PHY_REG(RK3399_GRF_SOC_CON9, 1, 10),
+	[GRF_DPHY_RX1_CLK_INV_SEL] = PHY_REG(RK3399_GRF_SOC_CON9, 1, 11),
+	[GRF_DPHY_RX0_ENABLE] = PHY_REG(RK3399_GRF_SOC_CON21, 4, 0),
+	[GRF_DPHY_RX0_FORCERXMODE] = PHY_REG(RK3399_GRF_SOC_CON21, 4, 4),
+	[GRF_DPHY_RX0_FORCETXSTOPMODE] = PHY_REG(RK3399_GRF_SOC_CON21, 4, 8),
+	[GRF_DPHY_RX0_TURNDISABLE] = PHY_REG(RK3399_GRF_SOC_CON21, 4, 12),
+	[GRF_DPHY_TX0_FORCERXMODE] = PHY_REG(RK3399_GRF_SOC_CON22, 4, 0),
+	[GRF_DPHY_TX0_FORCETXSTOPMODE] = PHY_REG(RK3399_GRF_SOC_CON22, 4, 4),
+	[GRF_DPHY_TX0_TURNDISABLE] = PHY_REG(RK3399_GRF_SOC_CON22, 4, 8),
+	[GRF_DPHY_TX0_TURNREQUEST] = PHY_REG(RK3399_GRF_SOC_CON22, 4, 12),
+	[GRF_DPHY_TX1RX1_ENABLE] = PHY_REG(RK3399_GRF_SOC_CON23, 4, 0),
+	[GRF_DPHY_TX1RX1_FORCERXMODE] = PHY_REG(RK3399_GRF_SOC_CON23, 4, 4),
+	[GRF_DPHY_TX1RX1_FORCETXSTOPMODE] = PHY_REG(RK3399_GRF_SOC_CON23, 4, 8),
+	[GRF_DPHY_TX1RX1_TURNDISABLE] = PHY_REG(RK3399_GRF_SOC_CON23, 4, 12),
+	[GRF_DPHY_TX1RX1_TURNREQUEST] = PHY_REG(RK3399_GRF_SOC_CON24, 4, 0),
+	[GRF_DPHY_RX1_SRC_SEL] = PHY_REG(RK3399_GRF_SOC_CON24, 1, 4),
+	[GRF_DPHY_TX1RX1_BASEDIR] = PHY_REG(RK3399_GRF_SOC_CON24, 1, 5),
+	[GRF_DPHY_TX1RX1_ENABLECLK] = PHY_REG(RK3399_GRF_SOC_CON24, 1, 6),
+	[GRF_DPHY_TX1RX1_MASTERSLAVEZ] = PHY_REG(RK3399_GRF_SOC_CON24, 1, 7),
+	[GRF_DPHY_RX0_TESTDIN] = PHY_REG(RK3399_GRF_SOC_CON25, 8, 0),
+	[GRF_DPHY_RX0_TESTEN] = PHY_REG(RK3399_GRF_SOC_CON25, 1, 8),
+	[GRF_DPHY_RX0_TESTCLK] = PHY_REG(RK3399_GRF_SOC_CON25, 1, 9),
+	[GRF_DPHY_RX0_TESTCLR] = PHY_REG(RK3399_GRF_SOC_CON25, 1, 10),
+	[GRF_DPHY_RX0_TESTDOUT] = PHY_REG(RK3399_GRF_SOC_STATUS1, 8, 0),
+};
+
+struct rk_dphy_drv_data {
+	const char * const *clks;
+	unsigned int num_clks;
+	const struct hsfreq_range *hsfreq_ranges;
+	unsigned int num_hsfreq_ranges;
+	const struct dphy_reg *regs;
+};
+
+struct rk_dphy {
+	struct device *dev;
+	struct regmap *grf;
+	struct clk_bulk_data *clks;
+
+	const struct rk_dphy_drv_data *drv_data;
+	struct phy_configure_opts_mipi_dphy config;
+
+	u8 hsfreq;
+};
+
+static inline void rk_dphy_write_grf(struct rk_dphy *priv,
+				     unsigned int index, u8 value)
+{
+	const struct dphy_reg *reg = &priv->drv_data->regs[index];
+	/* Update high word */
+	unsigned int val = (value << reg->shift) |
+			   (reg->mask << (reg->shift + 16));
+
+	if (WARN_ON(!reg->offset))
+		return;
+	regmap_write(priv->grf, reg->offset, val);
+}
+
+static void rk_dphy_write(struct rk_dphy *priv, u8 test_code, u8 test_data)
+{
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTDIN, test_code);
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTEN, 1);
+	/*
+	 * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
+	 * is latched internally as the current test code. Test data is
+	 * programmed internally by rising edge on TESTCLK.
+	 * This code assumes that TESTCLK is already 1.
+	 */
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLK, 0);
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTEN, 0);
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTDIN, test_data);
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLK, 1);
+}
+
+static void rk_dphy_enable(struct rk_dphy *priv)
+{
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_FORCERXMODE, 0);
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_FORCETXSTOPMODE, 0);
+
+	/* Disable lane turn around, which is ignored in receive mode */
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TURNREQUEST, 0);
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TURNDISABLE, 0xf);
+
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_ENABLE,
+			  GENMASK(priv->config.lanes - 1, 0));
+
+	/* dphy start */
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLK, 1);
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLR, 1);
+	usleep_range(100, 150);
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLR, 0);
+	usleep_range(100, 150);
+
+	/* set clock lane */
+	/* HS hsfreq_range & lane 0  settle bypass */
+	rk_dphy_write(priv, CLOCK_LANE_HS_RX_CONTROL, 0);
+	/* HS RX Control of lane0 */
+	rk_dphy_write(priv, LANE0_HS_RX_CONTROL, priv->hsfreq << 1);
+	/* HS RX Control of lane1 */
+	rk_dphy_write(priv, LANE1_HS_RX_CONTROL, priv->hsfreq << 1);
+	/* HS RX Control of lane2 */
+	rk_dphy_write(priv, LANE2_HS_RX_CONTROL, priv->hsfreq << 1);
+	/* HS RX Control of lane3 */
+	rk_dphy_write(priv, LANE3_HS_RX_CONTROL, priv->hsfreq << 1);
+	/* HS RX Data Lanes Settle State Time Control */
+	rk_dphy_write(priv, LANES_THS_SETTLE_CONTROL,
+		      THS_SETTLE_COUNTER_THRESHOLD);
+
+	/* Normal operation */
+	rk_dphy_write(priv, 0x0, 0);
+}
+
+static int rk_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+	struct rk_dphy *priv = phy_get_drvdata(phy);
+	const struct rk_dphy_drv_data *drv_data = priv->drv_data;
+	struct phy_configure_opts_mipi_dphy *config = &opts->mipi_dphy;
+	unsigned int hsfreq = 0;
+	unsigned int i;
+	u64 data_rate_mbps;
+	int ret;
+
+	/* pass with phy_mipi_dphy_get_default_config (with pixel rate?) */
+	ret = phy_mipi_dphy_config_validate(config);
+	if (ret)
+		return ret;
+
+	data_rate_mbps = div_u64(config->hs_clk_rate, 1000 * 1000);
+
+	dev_dbg(priv->dev, "lanes %d - data_rate_mbps %llu\n",
+		config->lanes, data_rate_mbps);
+	for (i = 0; i < drv_data->num_hsfreq_ranges; i++) {
+		if (drv_data->hsfreq_ranges[i].range_h >= data_rate_mbps) {
+			hsfreq = drv_data->hsfreq_ranges[i].cfg_bit;
+			break;
+		}
+	}
+	if (!hsfreq)
+		return -EINVAL;
+
+	priv->hsfreq = hsfreq;
+	priv->config = *config;
+	return 0;
+}
+
+static int rk_dphy_power_on(struct phy *phy)
+{
+	struct rk_dphy *priv = phy_get_drvdata(phy);
+	int ret;
+
+	ret = clk_bulk_enable(priv->drv_data->num_clks, priv->clks);
+	if (ret)
+		return ret;
+
+	rk_dphy_enable(priv);
+
+	return 0;
+}
+
+static int rk_dphy_power_off(struct phy *phy)
+{
+	struct rk_dphy *priv = phy_get_drvdata(phy);
+
+	rk_dphy_write_grf(priv, GRF_DPHY_RX0_ENABLE, 0);
+	clk_bulk_disable(priv->drv_data->num_clks, priv->clks);
+	return 0;
+}
+
+static int rk_dphy_init(struct phy *phy)
+{
+	struct rk_dphy *priv = phy_get_drvdata(phy);
+
+	return clk_bulk_prepare(priv->drv_data->num_clks, priv->clks);
+}
+
+static int rk_dphy_exit(struct phy *phy)
+{
+	struct rk_dphy *priv = phy_get_drvdata(phy);
+
+	clk_bulk_unprepare(priv->drv_data->num_clks, priv->clks);
+	return 0;
+}
+
+static const struct phy_ops rk_dphy_ops = {
+	.power_on	= rk_dphy_power_on,
+	.power_off	= rk_dphy_power_off,
+	.init		= rk_dphy_init,
+	.exit		= rk_dphy_exit,
+	.configure	= rk_dphy_configure,
+	.owner		= THIS_MODULE,
+};
+
+static const struct rk_dphy_drv_data rk3399_mipidphy_drv_data = {
+	.clks = rk3399_mipidphy_clks,
+	.num_clks = ARRAY_SIZE(rk3399_mipidphy_clks),
+	.hsfreq_ranges = rk3399_mipidphy_hsfreq_ranges,
+	.num_hsfreq_ranges = ARRAY_SIZE(rk3399_mipidphy_hsfreq_ranges),
+	.regs = rk3399_grf_dphy_regs,
+};
+
+static const struct of_device_id rk_dphy_dt_ids[] = {
+	{
+		.compatible = "rockchip,rk3399-mipi-dphy-rx0",
+		.data = &rk3399_mipidphy_drv_data,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, rk_dphy_dt_ids);
+
+static int rk_dphy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	const struct rk_dphy_drv_data *drv_data;
+	struct phy_provider *phy_provider;
+	const struct of_device_id *of_id;
+	struct rk_dphy *priv;
+	struct phy *phy;
+	unsigned int i;
+	int ret;
+
+	if (!dev->parent || !dev->parent->of_node)
+		return -ENODEV;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->dev = dev;
+
+	priv->grf = syscon_node_to_regmap(dev->parent->of_node);
+	if (IS_ERR(priv->grf)) {
+		dev_err(dev, "Can't find GRF syscon\n");
+		return -ENODEV;
+	}
+
+	of_id = of_match_device(rk_dphy_dt_ids, dev);
+	if (!of_id)
+		return -EINVAL;
+
+	drv_data = of_id->data;
+	priv->drv_data = drv_data;
+	priv->clks = devm_kcalloc(&pdev->dev, drv_data->num_clks,
+				  sizeof(*priv->clks), GFP_KERNEL);
+	if (!priv->clks)
+		return -ENOMEM;
+	for (i = 0; i < drv_data->num_clks; i++)
+		priv->clks[i].id = drv_data->clks[i];
+	ret = devm_clk_bulk_get(&pdev->dev, drv_data->num_clks, priv->clks);
+	if (ret)
+		return ret;
+
+	phy = devm_phy_create(dev, np, &rk_dphy_ops);
+	if (IS_ERR(phy)) {
+		dev_err(dev, "failed to create phy\n");
+		return PTR_ERR(phy);
+	}
+	phy_set_drvdata(phy, priv);
+
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+	return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver rk_dphy_driver = {
+	.probe = rk_dphy_probe,
+	.driver = {
+		.name	= "rockchip-mipi-dphy-rx0",
+		.of_match_table = rk_dphy_dt_ids,
+	},
+};
+module_platform_driver(rk_dphy_driver);
+
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_DESCRIPTION("Rockchip MIPI Synopsys DPHY RX0 driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/staging/media/rkisp1/Documentation/devicetree/bindings/media/rockchip-isp1.yaml b/drivers/staging/media/rkisp1/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
new file mode 100644
index 0000000..af246b7
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
@@ -0,0 +1,192 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/rockchip-isp1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip SoC Image Signal Processing unit v1
+
+maintainers:
+  - Helen Koike <helen.koike@collabora.com>
+
+description: |
+  Rockchip ISP1 is the Camera interface for the Rockchip series of SoCs
+  which contains image processing, scaling, and compression functions.
+
+properties:
+  compatible:
+    const: rockchip,rk3399-cif-isp
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  iommus:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+  phys:
+    maxItems: 1
+    description: phandle for the PHY port
+
+  phy-names:
+    const: dphy
+
+  clocks:
+    items:
+      - description: ISP clock
+      - description: ISP AXI clock clock
+      - description: ISP AXI clock  wrapper clock
+      - description: ISP AHB clock clock
+      - description: ISP AHB wrapper clock
+
+  clock-names:
+    items:
+      - const: clk_isp
+      - const: aclk_isp
+      - const: aclk_isp_wrap
+      - const: hclk_isp
+      - const: hclk_isp_wrap
+
+  # See ./video-interfaces.txt for details
+  ports:
+    type: object
+    additionalProperties: false
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description: connection point for sensors at MIPI-DPHY RX0
+        additionalProperties: false
+
+        properties:
+          "#address-cells":
+            const: 1
+
+          "#size-cells":
+            const: 0
+
+          reg:
+            const: 0
+
+        patternProperties:
+          endpoint:
+            type: object
+            additionalProperties: false
+
+            properties:
+              reg:
+                maxItems: 1
+
+              data-lanes:
+                minItems: 1
+                maxItems: 4
+
+              remote-endpoint: true
+
+    required:
+      - port@0
+
+required:
+  - compatible
+  - interrupts
+  - clocks
+  - clock-names
+  - power-domains
+  - iommus
+  - phys
+  - phy-names
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+
+    #include <dt-bindings/clock/rk3399-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/power/rk3399-power.h>
+
+    parent0: parent@0 {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        isp0: isp0@ff910000 {
+            compatible = "rockchip,rk3399-cif-isp";
+            reg = <0x0 0xff910000 0x0 0x4000>;
+            interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
+            clocks = <&cru SCLK_ISP0>,
+                     <&cru ACLK_ISP0>, <&cru ACLK_ISP0_WRAPPER>,
+                     <&cru HCLK_ISP0>, <&cru HCLK_ISP0_WRAPPER>;
+            clock-names = "clk_isp",
+                          "aclk_isp", "aclk_isp_wrap",
+                          "hclk_isp", "hclk_isp_wrap";
+            power-domains = <&power RK3399_PD_ISP0>;
+            iommus = <&isp0_mmu>;
+            phys = <&dphy>;
+            phy-names = "dphy";
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                port@0 {
+                    #address-cells = <1>;
+                    #size-cells = <0>;
+                    reg = <0>;
+
+                    mipi_in_wcam: endpoint@0 {
+                        reg = <0>;
+                        remote-endpoint = <&wcam_out>;
+                        data-lanes = <1 2>;
+                    };
+
+                    mipi_in_ucam: endpoint@1 {
+                        reg = <1>;
+                        remote-endpoint = <&ucam_out>;
+                        data-lanes = <1>;
+                    };
+                };
+            };
+        };
+
+        i2c7: i2c@ff160000 {
+            clock-frequency = <400000>;
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            wcam: camera@36 {
+                compatible = "ovti,ov5695";
+                reg = <0x36>;
+
+                port {
+                    wcam_out: endpoint {
+                        remote-endpoint = <&mipi_in_wcam>;
+                        data-lanes = <1 2>;
+                    };
+                };
+            };
+
+            ucam: camera@3c {
+                compatible = "ovti,ov2685";
+                reg = <0x3c>;
+
+                  port {
+                      ucam_out: endpoint {
+                          remote-endpoint = <&mipi_in_ucam>;
+                          data-lanes = <1>;
+                      };
+                  };
+            };
+        };
+    };
diff --git a/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst
new file mode 100644
index 0000000..32034e4
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst
@@ -0,0 +1,23 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+.. _v4l2-meta-fmt-rkisp1-params:
+
+============================
+V4L2_META_FMT_RK_ISP1_PARAMS
+============================
+
+Rockchip ISP1 Parameters Data
+
+Description
+===========
+
+This format describes input parameters for the Rockchip ISP1.
+
+It uses c-struct :c:type:`rkisp1_params_cfg`, which is defined in
+the ``linux/rkisp1-config.h`` header file.
+
+The parameters consist of multiple modules.
+The module won't be updated if the corresponding bit was not set in module_*_update.
+
+.. kernel-doc:: include/uapi/linux/rkisp1-config.h
+   :functions: rkisp1_params_cfg
diff --git a/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst
new file mode 100644
index 0000000..4ad303f
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+.. _v4l2-meta-fmt-rkisp1-stat:
+
+=============================
+V4L2_META_FMT_RK_ISP1_STAT_3A
+=============================
+
+
+Rockchip ISP1 Statistics Data
+
+Description
+===========
+
+This format describes image color statistics information generated by the Rockchip
+ISP1.
+
+It uses c-struct :c:type:`rkisp1_stat_buffer`, which is defined in
+the ``linux/rkisp1-config.h`` header file.
+
+.. kernel-doc:: include/uapi/linux/rkisp1-config.h
+   :functions: rkisp1_stat_buffer
diff --git a/drivers/staging/media/rkisp1/Kconfig b/drivers/staging/media/rkisp1/Kconfig
new file mode 100644
index 0000000..b859a49
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_ROCKCHIP_ISP1
+	tristate "Rockchip Image Signal Processing v1 Unit driver"
+	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+	depends on ARCH_ROCKCHIP || COMPILE_TEST
+	select VIDEOBUF2_DMA_CONTIG
+	select VIDEOBUF2_VMALLOC
+	select V4L2_FWNODE
+	select PHY_ROCKCHIP_DPHY_RX0
+	default n
+	help
+	  Enable this to support the Image Signal Processing (ISP) module
+	  present in RK3399 SoCs.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called rockchip-isp1.
diff --git a/drivers/staging/media/rkisp1/Makefile b/drivers/staging/media/rkisp1/Makefile
new file mode 100644
index 0000000..69ca59c
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rockchip-isp1.o
+rockchip-isp1-objs += 	rkisp1-capture.o \
+			rkisp1-common.o \
+			rkisp1-dev.o \
+			rkisp1-isp.o \
+			rkisp1-resizer.o \
+			rkisp1-stats.o \
+			rkisp1-params.o
diff --git a/drivers/staging/media/rkisp1/TODO b/drivers/staging/media/rkisp1/TODO
new file mode 100644
index 0000000..03cd9a4
--- /dev/null
+++ b/drivers/staging/media/rkisp1/TODO
@@ -0,0 +1,23 @@
+* Fix serialization on subdev ops.
+* Don't use v4l2_async_notifier_parse_fwnode_endpoints_by_port().
+e.g. isp_parse_of_endpoints in drivers/media/platform/omap3isp/isp.c
+cio2_parse_firmware in drivers/media/pci/intel/ipu3/ipu3-cio2.c.
+* Fix pad format size for statistics and parameters entities.
+* Use threaded interrupt for rkisp1_stats_isr(), remove work queue.
+* Fix checkpatch errors.
+* Make sure uapi structs have the same size and layout in 32 and 62 bits,
+and that there are no holes in the structures (pahole is a utility that
+can be used to test this).
+* Review and comment every lock
+* Handle quantization
+* Document rkisp1-common.h
+* streaming paths (mainpath and selfpath) check if the other path is streaming
+in several places of the code, review this, specially that it doesn't seem it
+supports streaming from both paths at the same time.
+
+NOTES:
+* All v4l2-compliance test must pass.
+* Stats and params can be tested with libcamera and ChromiumOS stack.
+
+Please CC patches to Linux Media <linux-media@vger.kernel.org> and
+Helen Koike <helen.koike@collabora.com>.
diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c
new file mode 100644
index 0000000..524e0dd
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-capture.c
@@ -0,0 +1,1437 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - V4l capture device
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "rkisp1-common.h"
+
+/*
+ * NOTE: There are two capture video devices in rkisp1, selfpath and mainpath.
+ *
+ * differences between selfpath and mainpath
+ * available mp sink input: isp
+ * available sp sink input : isp, dma(TODO)
+ * available mp sink pad fmts: yuv422, raw
+ * available sp sink pad fmts: yuv422, yuv420......
+ * available mp source fmts: yuv, raw, jpeg(TODO)
+ * available sp source fmts: yuv, rgb
+ */
+
+#define RKISP1_SP_DEV_NAME	RKISP1_DRIVER_NAME "_selfpath"
+#define RKISP1_MP_DEV_NAME	RKISP1_DRIVER_NAME "_mainpath"
+
+#define RKISP1_MIN_BUFFERS_NEEDED 3
+
+enum rkisp1_plane {
+	RKISP1_PLANE_Y	= 0,
+	RKISP1_PLANE_CB	= 1,
+	RKISP1_PLANE_CR	= 2
+};
+
+/*
+ * @fourcc: pixel format
+ * @fmt_type: helper filed for pixel format
+ * @uv_swap: if cb cr swaped, for yuv
+ * @write_format: defines how YCbCr self picture data is written to memory
+ * @output_format: defines sp output format
+ */
+struct rkisp1_capture_fmt_cfg {
+	u32 fourcc;
+	u8 fmt_type;
+	u8 uv_swap;
+	u32 write_format;
+	u32 output_format;
+};
+
+struct rkisp1_capture_ops {
+	void (*config)(struct rkisp1_capture *cap);
+	void (*stop)(struct rkisp1_capture *cap);
+	void (*enable)(struct rkisp1_capture *cap);
+	void (*disable)(struct rkisp1_capture *cap);
+	void (*set_data_path)(struct rkisp1_capture *cap);
+	bool (*is_stopped)(struct rkisp1_capture *cap);
+};
+
+struct rkisp1_capture_config {
+	const struct rkisp1_capture_fmt_cfg *fmts;
+	int fmt_size;
+	struct {
+		u32 y_size_init;
+		u32 cb_size_init;
+		u32 cr_size_init;
+		u32 y_base_ad_init;
+		u32 cb_base_ad_init;
+		u32 cr_base_ad_init;
+		u32 y_offs_cnt_init;
+		u32 cb_offs_cnt_init;
+		u32 cr_offs_cnt_init;
+	} mi;
+};
+
+static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = {
+	/* yuv422 */
+	{
+		.fourcc = V4L2_PIX_FMT_YUYV,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YVYU,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+	}, {
+		.fourcc = V4L2_PIX_FMT_VYUY,
+		.fmt_type = RKISP1_FMT_YUV,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YUV422P,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV16,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV61,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YVU422M,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	},
+	/* yuv420 */
+	{
+		.fourcc = V4L2_PIX_FMT_NV21,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV12,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV21M,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV12M,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YUV420,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YVU420,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	},
+	/* yuv444 */
+	{
+		.fourcc = V4L2_PIX_FMT_YUV444M,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	},
+	/* yuv400 */
+	{
+		.fourcc = V4L2_PIX_FMT_GREY,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+	},
+	/* raw */
+	{
+		.fourcc = V4L2_PIX_FMT_SRGGB8,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SGRBG8,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SGBRG8,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SBGGR8,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SRGGB10,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SGRBG10,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SGBRG10,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SBGGR10,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SRGGB12,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SGRBG12,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SGBRG12,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+	}, {
+		.fourcc = V4L2_PIX_FMT_SBGGR12,
+		.fmt_type = RKISP1_FMT_BAYER,
+		.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+	},
+};
+
+static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
+	/* yuv422 */
+	{
+		.fourcc = V4L2_PIX_FMT_YUYV,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YVYU,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+	}, {
+		.fourcc = V4L2_PIX_FMT_VYUY,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YUV422P,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV16,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV61,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YVU422M,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+	},
+	/* yuv420 */
+	{
+		.fourcc = V4L2_PIX_FMT_NV21,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV12,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV21M,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV12M,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YUV420,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YVU420,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 1,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+	},
+	/* yuv444 */
+	{
+		.fourcc = V4L2_PIX_FMT_YUV444M,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV444,
+	},
+	/* yuv400 */
+	{
+		.fourcc = V4L2_PIX_FMT_GREY,
+		.fmt_type = RKISP1_FMT_YUV,
+		.uv_swap = 0,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV400,
+	},
+	/* rgb */
+	{
+		.fourcc = V4L2_PIX_FMT_RGB24,
+		.fmt_type = RKISP1_FMT_RGB,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB888,
+	}, {
+		.fourcc = V4L2_PIX_FMT_RGB565,
+		.fmt_type = RKISP1_FMT_RGB,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB565,
+	}, {
+		.fourcc = V4L2_PIX_FMT_BGR666,
+		.fmt_type = RKISP1_FMT_RGB,
+		.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+		.output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB666,
+	},
+};
+
+static const struct rkisp1_capture_config rkisp1_capture_config_mp = {
+	.fmts = rkisp1_mp_fmts,
+	.fmt_size = ARRAY_SIZE(rkisp1_mp_fmts),
+	.mi = {
+		.y_size_init =		RKISP1_CIF_MI_MP_Y_SIZE_INIT,
+		.cb_size_init =		RKISP1_CIF_MI_MP_CB_SIZE_INIT,
+		.cr_size_init =		RKISP1_CIF_MI_MP_CR_SIZE_INIT,
+		.y_base_ad_init =	RKISP1_CIF_MI_MP_Y_BASE_AD_INIT,
+		.cb_base_ad_init =	RKISP1_CIF_MI_MP_CB_BASE_AD_INIT,
+		.cr_base_ad_init =	RKISP1_CIF_MI_MP_CR_BASE_AD_INIT,
+		.y_offs_cnt_init =	RKISP1_CIF_MI_MP_Y_OFFS_CNT_INIT,
+		.cb_offs_cnt_init =	RKISP1_CIF_MI_MP_CB_OFFS_CNT_INIT,
+		.cr_offs_cnt_init =	RKISP1_CIF_MI_MP_CR_OFFS_CNT_INIT,
+	},
+};
+
+static const struct rkisp1_capture_config rkisp1_capture_config_sp = {
+	.fmts = rkisp1_sp_fmts,
+	.fmt_size = ARRAY_SIZE(rkisp1_sp_fmts),
+	.mi = {
+		.y_size_init =		RKISP1_CIF_MI_SP_Y_SIZE_INIT,
+		.cb_size_init =		RKISP1_CIF_MI_SP_CB_SIZE_INIT,
+		.cr_size_init =		RKISP1_CIF_MI_SP_CR_SIZE_INIT,
+		.y_base_ad_init =	RKISP1_CIF_MI_SP_Y_BASE_AD_INIT,
+		.cb_base_ad_init =	RKISP1_CIF_MI_SP_CB_BASE_AD_INIT,
+		.cr_base_ad_init =	RKISP1_CIF_MI_SP_CR_BASE_AD_INIT,
+		.y_offs_cnt_init =	RKISP1_CIF_MI_SP_Y_OFFS_CNT_INIT,
+		.cb_offs_cnt_init =	RKISP1_CIF_MI_SP_CB_OFFS_CNT_INIT,
+		.cr_offs_cnt_init =	RKISP1_CIF_MI_SP_CR_OFFS_CNT_INIT,
+	},
+};
+
+static inline struct rkisp1_vdev_node *
+rkisp1_vdev_to_node(struct video_device *vdev)
+{
+	return container_of(vdev, struct rkisp1_vdev_node, vdev);
+}
+
+/* ----------------------------------------------------------------------------
+ * Stream operations for self-picture path (sp) and main-picture path (mp)
+ */
+
+static void rkisp1_mi_config_ctrl(struct rkisp1_capture *cap)
+{
+	u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+
+	mi_ctrl &= ~GENMASK(17, 16);
+	mi_ctrl |= RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_64;
+
+	mi_ctrl &= ~GENMASK(19, 18);
+	mi_ctrl |= RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_64;
+
+	mi_ctrl |= RKISP1_CIF_MI_CTRL_INIT_BASE_EN |
+		   RKISP1_CIF_MI_CTRL_INIT_OFFSET_EN;
+
+	rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static u32 rkisp1_pixfmt_comp_size(const struct v4l2_pix_format_mplane *pixm,
+				   unsigned int component)
+{
+	/*
+	 * If packed format, then plane_fmt[0].sizeimage is the sum of all
+	 * components, so we need to calculate just the size of Y component.
+	 * See rkisp1_fill_pixfmt().
+	 */
+	if (!component && pixm->num_planes == 1)
+		return pixm->plane_fmt[0].bytesperline * pixm->height;
+	return pixm->plane_fmt[component].sizeimage;
+}
+
+static void rkisp1_irq_frame_end_enable(struct rkisp1_capture *cap)
+{
+	u32 mi_imsc = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_IMSC);
+
+	mi_imsc |= RKISP1_CIF_MI_FRAME(cap);
+	rkisp1_write(cap->rkisp1, mi_imsc, RKISP1_CIF_MI_IMSC);
+}
+
+static void rkisp1_mp_config(struct rkisp1_capture *cap)
+{
+	const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+	struct rkisp1_device *rkisp1 = cap->rkisp1;
+	u32 reg;
+
+	rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
+		     cap->config->mi.y_size_init);
+	rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
+		     cap->config->mi.cb_size_init);
+	rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR),
+		     cap->config->mi.cr_size_init);
+
+	rkisp1_irq_frame_end_enable(cap);
+	if (cap->pix.cfg->uv_swap) {
+		reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+
+		reg = (reg & ~BIT(0)) |
+		      RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
+		rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+	}
+
+	rkisp1_mi_config_ctrl(cap);
+
+	reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
+	reg &= ~RKISP1_MI_CTRL_MP_FMT_MASK;
+	reg |= cap->pix.cfg->write_format;
+	rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_CTRL);
+
+	reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
+	reg |= RKISP1_CIF_MI_MP_AUTOUPDATE_ENABLE;
+	rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_sp_config(struct rkisp1_capture *cap)
+{
+	const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+	struct rkisp1_device *rkisp1 = cap->rkisp1;
+	u32 mi_ctrl;
+
+	rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
+		     cap->config->mi.y_size_init);
+	rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
+		     cap->config->mi.cb_size_init);
+	rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR),
+		     cap->config->mi.cr_size_init);
+
+	rkisp1_write(rkisp1, pixm->width, RKISP1_CIF_MI_SP_Y_PIC_WIDTH);
+	rkisp1_write(rkisp1, pixm->height, RKISP1_CIF_MI_SP_Y_PIC_HEIGHT);
+	rkisp1_write(rkisp1, cap->sp_y_stride, RKISP1_CIF_MI_SP_Y_LLENGTH);
+
+	rkisp1_irq_frame_end_enable(cap);
+	if (cap->pix.cfg->uv_swap) {
+		u32 reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+
+		rkisp1_write(rkisp1, reg & ~BIT(1),
+			     RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+	}
+
+	rkisp1_mi_config_ctrl(cap);
+
+	mi_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
+	mi_ctrl &= ~RKISP1_MI_CTRL_SP_FMT_MASK;
+	mi_ctrl |= cap->pix.cfg->write_format |
+		   RKISP1_MI_CTRL_SP_INPUT_YUV422 |
+		   cap->pix.cfg->output_format |
+		   RKISP1_CIF_MI_SP_AUTOUPDATE_ENABLE;
+	rkisp1_write(rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_mp_disable(struct rkisp1_capture *cap)
+{
+	u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+
+	mi_ctrl &= ~(RKISP1_CIF_MI_CTRL_MP_ENABLE |
+		     RKISP1_CIF_MI_CTRL_RAW_ENABLE);
+	rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_sp_disable(struct rkisp1_capture *cap)
+{
+	u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+
+	mi_ctrl &= ~RKISP1_CIF_MI_CTRL_SP_ENABLE;
+	rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_mp_enable(struct rkisp1_capture *cap)
+{
+	const struct rkisp1_capture_fmt_cfg *isp_fmt = cap->pix.cfg;
+	u32 mi_ctrl;
+
+	rkisp1_mp_disable(cap);
+
+	mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+	if (isp_fmt->fmt_type == RKISP1_FMT_BAYER)
+		mi_ctrl |= RKISP1_CIF_MI_CTRL_RAW_ENABLE;
+	/* YUV */
+	else
+		mi_ctrl |= RKISP1_CIF_MI_CTRL_MP_ENABLE;
+
+	rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_sp_enable(struct rkisp1_capture *cap)
+{
+	u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+
+	mi_ctrl |= RKISP1_CIF_MI_CTRL_SP_ENABLE;
+	rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_mp_sp_stop(struct rkisp1_capture *cap)
+{
+	if (!cap->is_streaming)
+		return;
+	rkisp1_write(cap->rkisp1,
+		     RKISP1_CIF_MI_FRAME(cap), RKISP1_CIF_MI_ICR);
+	cap->ops->disable(cap);
+}
+
+static bool rkisp1_mp_is_stopped(struct rkisp1_capture *cap)
+{
+	u32 en = RKISP1_CIF_MI_CTRL_SHD_MP_IN_ENABLED |
+		 RKISP1_CIF_MI_CTRL_SHD_RAW_OUT_ENABLED;
+
+	return !(rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL_SHD) & en);
+}
+
+static bool rkisp1_sp_is_stopped(struct rkisp1_capture *cap)
+{
+	return !(rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL_SHD) &
+		 RKISP1_CIF_MI_CTRL_SHD_SP_IN_ENABLED);
+}
+
+static void rkisp1_mp_set_data_path(struct rkisp1_capture *cap)
+{
+	u32 dpcl = rkisp1_read(cap->rkisp1, RKISP1_CIF_VI_DPCL);
+
+	dpcl = dpcl | RKISP1_CIF_VI_DPCL_CHAN_MODE_MP |
+	       RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_MI;
+	rkisp1_write(cap->rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
+}
+
+static void rkisp1_sp_set_data_path(struct rkisp1_capture *cap)
+{
+	u32 dpcl = rkisp1_read(cap->rkisp1, RKISP1_CIF_VI_DPCL);
+
+	dpcl |= RKISP1_CIF_VI_DPCL_CHAN_MODE_SP;
+	rkisp1_write(cap->rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
+}
+
+static struct rkisp1_capture_ops rkisp1_capture_ops_mp = {
+	.config = rkisp1_mp_config,
+	.enable = rkisp1_mp_enable,
+	.disable = rkisp1_mp_disable,
+	.stop = rkisp1_mp_sp_stop,
+	.set_data_path = rkisp1_mp_set_data_path,
+	.is_stopped = rkisp1_mp_is_stopped,
+};
+
+static struct rkisp1_capture_ops rkisp1_capture_ops_sp = {
+	.config = rkisp1_sp_config,
+	.enable = rkisp1_sp_enable,
+	.disable = rkisp1_sp_disable,
+	.stop = rkisp1_mp_sp_stop,
+	.set_data_path = rkisp1_sp_set_data_path,
+	.is_stopped = rkisp1_sp_is_stopped,
+};
+
+/* ----------------------------------------------------------------------------
+ * Frame buffer operations
+ */
+
+static int rkisp1_dummy_buf_create(struct rkisp1_capture *cap)
+{
+	const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+	struct rkisp1_dummy_buffer *dummy_buf = &cap->buf.dummy;
+
+	dummy_buf->size = max3(rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
+			       rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
+			       rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
+
+	/* The driver never access vaddr, no mapping is required */
+	dummy_buf->vaddr = dma_alloc_attrs(cap->rkisp1->dev,
+					   dummy_buf->size,
+					   &dummy_buf->dma_addr,
+					   GFP_KERNEL,
+					   DMA_ATTR_NO_KERNEL_MAPPING);
+	if (!dummy_buf->vaddr)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void rkisp1_dummy_buf_destroy(struct rkisp1_capture *cap)
+{
+	dma_free_attrs(cap->rkisp1->dev,
+		       cap->buf.dummy.size, cap->buf.dummy.vaddr,
+		       cap->buf.dummy.dma_addr, DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
+{
+	/*
+	 * Use the dummy space allocated by dma_alloc_coherent to
+	 * throw data if there is no available buffer.
+	 */
+	if (cap->buf.next) {
+		u32 *buff_addr = cap->buf.next->buff_addr;
+
+		rkisp1_write(cap->rkisp1,
+			     buff_addr[RKISP1_PLANE_Y],
+			     cap->config->mi.y_base_ad_init);
+		rkisp1_write(cap->rkisp1,
+			     buff_addr[RKISP1_PLANE_CB],
+			     cap->config->mi.cb_base_ad_init);
+		rkisp1_write(cap->rkisp1,
+			     buff_addr[RKISP1_PLANE_CR],
+			     cap->config->mi.cr_base_ad_init);
+	} else {
+		rkisp1_write(cap->rkisp1,
+			     cap->buf.dummy.dma_addr,
+			     cap->config->mi.y_base_ad_init);
+		rkisp1_write(cap->rkisp1,
+			     cap->buf.dummy.dma_addr,
+			     cap->config->mi.cb_base_ad_init);
+		rkisp1_write(cap->rkisp1,
+			     cap->buf.dummy.dma_addr,
+			     cap->config->mi.cr_base_ad_init);
+	}
+
+	/* Set plane offsets */
+	rkisp1_write(cap->rkisp1, 0, cap->config->mi.y_offs_cnt_init);
+	rkisp1_write(cap->rkisp1, 0, cap->config->mi.cb_offs_cnt_init);
+	rkisp1_write(cap->rkisp1, 0, cap->config->mi.cr_offs_cnt_init);
+}
+
+/*
+ * This function is called when a frame end comes. The next frame
+ * is processing and we should set up buffer for next-next frame,
+ * otherwise it will overflow.
+ */
+static void rkisp1_handle_buffer(struct rkisp1_capture *cap)
+{
+	struct rkisp1_isp *isp = &cap->rkisp1->isp;
+	struct rkisp1_buffer *curr_buf = cap->buf.curr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cap->buf.lock, flags);
+
+	if (curr_buf) {
+		curr_buf->vb.sequence = atomic_read(&isp->frame_sequence);
+		curr_buf->vb.vb2_buf.timestamp = ktime_get_boottime_ns();
+		curr_buf->vb.field = V4L2_FIELD_NONE;
+		vb2_buffer_done(&curr_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+	} else {
+		cap->rkisp1->debug.frame_drop[cap->id]++;
+	}
+
+	cap->buf.curr = cap->buf.next;
+	cap->buf.next = NULL;
+
+	if (!list_empty(&cap->buf.queue)) {
+		cap->buf.next = list_first_entry(&cap->buf.queue,
+						 struct rkisp1_buffer,
+						 queue);
+		list_del(&cap->buf.next->queue);
+	}
+	spin_unlock_irqrestore(&cap->buf.lock, flags);
+
+	rkisp1_set_next_buf(cap);
+}
+
+void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
+{
+	unsigned int i;
+	u32 status;
+
+	status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
+	rkisp1_write(rkisp1, status, RKISP1_CIF_MI_ICR);
+
+	for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); ++i) {
+		struct rkisp1_capture *cap = &rkisp1->capture_devs[i];
+
+		if (!(status & RKISP1_CIF_MI_FRAME(cap)))
+			continue;
+		if (!cap->is_stopping) {
+			rkisp1_handle_buffer(cap);
+			continue;
+		}
+		/*
+		 * Make sure stream is actually stopped, whose state
+		 * can be read from the shadow register, before
+		 * wake_up() thread which would immediately free all
+		 * frame buffers. stop() takes effect at the next
+		 * frame end that sync the configurations to shadow
+		 * regs.
+		 */
+		if (!cap->ops->is_stopped(cap)) {
+			cap->ops->stop(cap);
+			continue;
+		}
+		cap->is_stopping = false;
+		cap->is_streaming = false;
+		wake_up(&cap->done);
+	}
+}
+
+/* ----------------------------------------------------------------------------
+ * Vb2 operations
+ */
+
+static int rkisp1_vb2_queue_setup(struct vb2_queue *queue,
+				  unsigned int *num_buffers,
+				  unsigned int *num_planes,
+				  unsigned int sizes[],
+				  struct device *alloc_devs[])
+{
+	struct rkisp1_capture *cap = queue->drv_priv;
+	const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+	unsigned int i;
+
+	if (*num_planes) {
+		if (*num_planes != pixm->num_planes)
+			return -EINVAL;
+
+		for (i = 0; i < pixm->num_planes; i++)
+			if (sizes[i] < pixm->plane_fmt[i].sizeimage)
+				return -EINVAL;
+	} else {
+		*num_planes = pixm->num_planes;
+		for (i = 0; i < pixm->num_planes; i++)
+			sizes[i] = pixm->plane_fmt[i].sizeimage;
+	}
+
+	return 0;
+}
+
+static void rkisp1_vb2_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct rkisp1_buffer *ispbuf =
+		container_of(vbuf, struct rkisp1_buffer, vb);
+	struct rkisp1_capture *cap = vb->vb2_queue->drv_priv;
+	const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+	unsigned long flags;
+	unsigned int i;
+
+	memset(ispbuf->buff_addr, 0, sizeof(ispbuf->buff_addr));
+	for (i = 0; i < pixm->num_planes; i++)
+		ispbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+	/* Convert to non-MPLANE */
+	if (pixm->num_planes == 1) {
+		ispbuf->buff_addr[RKISP1_PLANE_CB] =
+			ispbuf->buff_addr[RKISP1_PLANE_Y] +
+			rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y);
+		ispbuf->buff_addr[RKISP1_PLANE_CR] =
+			ispbuf->buff_addr[RKISP1_PLANE_CB] +
+			rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB);
+	}
+
+	spin_lock_irqsave(&cap->buf.lock, flags);
+
+	/*
+	 * If there's no next buffer assigned, queue this buffer directly
+	 * as the next buffer, and update the memory interface.
+	 */
+	if (cap->is_streaming && !cap->buf.next &&
+	    atomic_read(&cap->rkisp1->isp.frame_sequence) == -1) {
+		cap->buf.next = ispbuf;
+		rkisp1_set_next_buf(cap);
+	} else {
+		list_add_tail(&ispbuf->queue, &cap->buf.queue);
+	}
+	spin_unlock_irqrestore(&cap->buf.lock, flags);
+}
+
+static int rkisp1_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+	struct rkisp1_capture *cap = vb->vb2_queue->drv_priv;
+	unsigned int i;
+
+	for (i = 0; i < cap->pix.fmt.num_planes; i++) {
+		unsigned long size = cap->pix.fmt.plane_fmt[i].sizeimage;
+
+		if (vb2_plane_size(vb, i) < size) {
+			dev_err(cap->rkisp1->dev,
+				"User buffer too small (%ld < %ld)\n",
+				vb2_plane_size(vb, i), size);
+			return -EINVAL;
+		}
+		vb2_set_plane_payload(vb, i, size);
+	}
+
+	return 0;
+}
+
+static void rkisp1_return_all_buffers(struct rkisp1_capture *cap,
+				      enum vb2_buffer_state state)
+{
+	unsigned long flags;
+	struct rkisp1_buffer *buf;
+
+	spin_lock_irqsave(&cap->buf.lock, flags);
+	if (cap->buf.curr) {
+		vb2_buffer_done(&cap->buf.curr->vb.vb2_buf, state);
+		cap->buf.curr = NULL;
+	}
+	if (cap->buf.next) {
+		vb2_buffer_done(&cap->buf.next->vb.vb2_buf, state);
+		cap->buf.next = NULL;
+	}
+	while (!list_empty(&cap->buf.queue)) {
+		buf = list_first_entry(&cap->buf.queue,
+				       struct rkisp1_buffer, queue);
+		list_del(&buf->queue);
+		vb2_buffer_done(&buf->vb.vb2_buf, state);
+	}
+	spin_unlock_irqrestore(&cap->buf.lock, flags);
+}
+
+/*
+ * rkisp1_pipeline_sink_walk - Walk through the pipeline and call cb
+ * @from: entity at which to start pipeline walk
+ * @until: entity at which to stop pipeline walk
+ *
+ * Walk the entities chain starting at the pipeline video node and stop
+ * all subdevices in the chain.
+ *
+ * If the until argument isn't NULL, stop the pipeline walk when reaching the
+ * until entity. This is used to disable a partially started pipeline due to a
+ * subdev start error.
+ */
+static int rkisp1_pipeline_sink_walk(struct media_entity *from,
+				     struct media_entity *until,
+				     int (*cb)(struct media_entity *from,
+					       struct media_entity *curr))
+{
+	struct media_entity *entity = from;
+	struct media_pad *pad;
+	unsigned int i;
+	int ret;
+
+	while (1) {
+		pad = NULL;
+		/* Find remote source pad */
+		for (i = 0; i < entity->num_pads; i++) {
+			struct media_pad *spad = &entity->pads[i];
+
+			if (!(spad->flags & MEDIA_PAD_FL_SINK))
+				continue;
+			pad = media_entity_remote_pad(spad);
+			if (pad && is_media_entity_v4l2_subdev(pad->entity))
+				break;
+		}
+		if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+			break;
+
+		entity = pad->entity;
+		if (entity == until)
+			break;
+
+		ret = cb(from, entity);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int rkisp1_pipeline_disable_cb(struct media_entity *from,
+				      struct media_entity *curr)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(curr);
+
+	return v4l2_subdev_call(sd, video, s_stream, false);
+}
+
+static int rkisp1_pipeline_enable_cb(struct media_entity *from,
+				     struct media_entity *curr)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(curr);
+
+	return v4l2_subdev_call(sd, video, s_stream, true);
+}
+
+static void rkisp1_stream_stop(struct rkisp1_capture *cap)
+{
+	int ret;
+
+	/* Stream should stop in interrupt. If it dosn't, stop it by force. */
+	cap->is_stopping = true;
+	ret = wait_event_timeout(cap->done,
+				 !cap->is_streaming,
+				 msecs_to_jiffies(1000));
+	if (!ret) {
+		cap->rkisp1->debug.stop_timeout[cap->id]++;
+		cap->ops->stop(cap);
+		cap->is_stopping = false;
+		cap->is_streaming = false;
+	}
+}
+
+static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue)
+{
+	struct rkisp1_capture *cap = queue->drv_priv;
+	struct rkisp1_vdev_node *node = &cap->vnode;
+	struct rkisp1_device *rkisp1 = cap->rkisp1;
+	int ret;
+
+	rkisp1_stream_stop(cap);
+	media_pipeline_stop(&node->vdev.entity);
+	ret = rkisp1_pipeline_sink_walk(&node->vdev.entity, NULL,
+					rkisp1_pipeline_disable_cb);
+	if (ret)
+		dev_err(rkisp1->dev,
+			"pipeline stream-off failed error:%d\n", ret);
+
+	rkisp1_return_all_buffers(cap, VB2_BUF_STATE_ERROR);
+
+	ret = v4l2_pipeline_pm_use(&node->vdev.entity, 0);
+	if (ret)
+		dev_err(rkisp1->dev, "pipeline close failed error:%d\n", ret);
+
+	ret = pm_runtime_put(rkisp1->dev);
+	if (ret)
+		dev_err(rkisp1->dev, "power down failed error:%d\n", ret);
+
+	rkisp1_dummy_buf_destroy(cap);
+}
+
+/*
+ * Most of registers inside rockchip ISP1 have shadow register since
+ * they must be not be changed during processing a frame.
+ * Usually, each sub-module updates its shadow register after
+ * processing the last pixel of a frame.
+ */
+static void rkisp1_stream_start(struct rkisp1_capture *cap)
+{
+	struct rkisp1_device *rkisp1 = cap->rkisp1;
+	struct rkisp1_capture *other = &rkisp1->capture_devs[cap->id ^ 1];
+
+	cap->ops->set_data_path(cap);
+	cap->ops->config(cap);
+
+	/* Setup a buffer for the next frame */
+	rkisp1_handle_buffer(cap);
+	cap->ops->enable(cap);
+	/* It's safe to config ACTIVE and SHADOW regs for the
+	 * first stream. While when the second is starting, do NOT
+	 * force update because it also update the first one.
+	 *
+	 * The latter case would drop one more buf(that is 2) since
+	 * there's not buf in shadow when the second FE received. This's
+	 * also required because the second FE maybe corrupt especially
+	 * when run at 120fps.
+	 */
+	if (!other->is_streaming) {
+		/* force cfg update */
+		rkisp1_write(rkisp1,
+			     RKISP1_CIF_MI_INIT_SOFT_UPD, RKISP1_CIF_MI_INIT);
+		rkisp1_handle_buffer(cap);
+	}
+	cap->is_streaming = true;
+}
+
+static int
+rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
+{
+	struct rkisp1_capture *cap = queue->drv_priv;
+	struct media_entity *entity = &cap->vnode.vdev.entity;
+	int ret;
+
+	ret = rkisp1_dummy_buf_create(cap);
+	if (ret)
+		goto err_ret_buffers;
+
+	ret = pm_runtime_get_sync(cap->rkisp1->dev);
+	if (ret) {
+		dev_err(cap->rkisp1->dev, "power up failed %d\n", ret);
+		goto err_destroy_dummy;
+	}
+	ret = v4l2_pipeline_pm_use(entity, 1);
+	if (ret) {
+		dev_err(cap->rkisp1->dev, "open cif pipeline failed %d\n", ret);
+		goto err_pipe_pm_put;
+	}
+
+	rkisp1_stream_start(cap);
+
+	/* start sub-devices */
+	ret = rkisp1_pipeline_sink_walk(entity, NULL,
+					rkisp1_pipeline_enable_cb);
+	if (ret)
+		goto err_stop_stream;
+
+	ret = media_pipeline_start(entity, &cap->rkisp1->pipe);
+	if (ret) {
+		dev_err(cap->rkisp1->dev, "start pipeline failed %d\n", ret);
+		goto err_pipe_disable;
+	}
+
+	return 0;
+
+err_pipe_disable:
+	rkisp1_pipeline_sink_walk(entity, NULL, rkisp1_pipeline_disable_cb);
+err_stop_stream:
+	rkisp1_stream_stop(cap);
+	v4l2_pipeline_pm_use(entity, 0);
+err_pipe_pm_put:
+	pm_runtime_put(cap->rkisp1->dev);
+err_destroy_dummy:
+	rkisp1_dummy_buf_destroy(cap);
+err_ret_buffers:
+	rkisp1_return_all_buffers(cap, VB2_BUF_STATE_QUEUED);
+
+	return ret;
+}
+
+static struct vb2_ops rkisp1_vb2_ops = {
+	.queue_setup = rkisp1_vb2_queue_setup,
+	.buf_queue = rkisp1_vb2_buf_queue,
+	.buf_prepare = rkisp1_vb2_buf_prepare,
+	.wait_prepare = vb2_ops_wait_prepare,
+	.wait_finish = vb2_ops_wait_finish,
+	.stop_streaming = rkisp1_vb2_stop_streaming,
+	.start_streaming = rkisp1_vb2_start_streaming,
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTLs operations
+ */
+
+static const struct v4l2_format_info *
+rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
+		   enum rkisp1_stream_id id)
+{
+	struct v4l2_plane_pix_format *plane_y = &pixm->plane_fmt[0];
+	const struct v4l2_format_info *info;
+	unsigned int i;
+	u32 stride;
+
+	info = v4l2_format_info(pixm->pixelformat);
+	pixm->num_planes = info->mem_planes;
+	stride = info->bpp[0] * pixm->width;
+	/* Self path supports custom stride but Main path doesn't */
+	if (id == RKISP1_MAINPATH || plane_y->bytesperline < stride)
+		plane_y->bytesperline = stride;
+	plane_y->sizeimage = plane_y->bytesperline * pixm->height;
+
+	/* normalize stride to pixels per line */
+	stride = DIV_ROUND_UP(plane_y->bytesperline, info->bpp[0]);
+
+	for (i = 1; i < info->comp_planes; i++) {
+		struct v4l2_plane_pix_format *plane = &pixm->plane_fmt[i];
+
+		/* bytesperline for other components derive from Y component */
+		plane->bytesperline = DIV_ROUND_UP(stride, info->hdiv) *
+				      info->bpp[i];
+		plane->sizeimage = plane->bytesperline *
+				   DIV_ROUND_UP(pixm->height, info->vdiv);
+	}
+
+	/*
+	 * If pixfmt is packed, then plane_fmt[0] should contain the total size
+	 * considering all components. plane_fmt[i] for i > 0 should be ignored
+	 * by userspace as mem_planes == 1, but we are keeping information there
+	 * for convenience.
+	 */
+	if (info->mem_planes == 1)
+		for (i = 1; i < info->comp_planes; i++)
+			plane_y->sizeimage += pixm->plane_fmt[i].sizeimage;
+
+	return info;
+}
+
+static const struct rkisp1_capture_fmt_cfg *
+rkisp1_find_fmt_cfg(const struct rkisp1_capture *cap, const u32 pixelfmt)
+{
+	unsigned int i;
+
+	for (i = 0; i < cap->config->fmt_size; i++) {
+		if (cap->config->fmts[i].fourcc == pixelfmt)
+			return &cap->config->fmts[i];
+	}
+	return NULL;
+}
+
+static void rkisp1_try_fmt(const struct rkisp1_capture *cap,
+			   struct v4l2_pix_format_mplane *pixm,
+			   const struct rkisp1_capture_fmt_cfg **fmt_cfg,
+			   const struct v4l2_format_info **fmt_info)
+{
+	const struct rkisp1_capture_config *config = cap->config;
+	struct rkisp1_capture *other_cap =
+			&cap->rkisp1->capture_devs[cap->id ^ 1];
+	const struct rkisp1_capture_fmt_cfg *fmt;
+	const struct v4l2_format_info *info;
+	const unsigned int max_widths[] = { RKISP1_RSZ_MP_SRC_MAX_WIDTH,
+					    RKISP1_RSZ_SP_SRC_MAX_WIDTH };
+	const unsigned int max_heights[] = { RKISP1_RSZ_MP_SRC_MAX_HEIGHT,
+					     RKISP1_RSZ_SP_SRC_MAX_HEIGHT};
+
+	fmt = rkisp1_find_fmt_cfg(cap, pixm->pixelformat);
+	if (!fmt) {
+		fmt = config->fmts;
+		pixm->pixelformat = fmt->fourcc;
+	}
+
+	pixm->width = clamp_t(u32, pixm->width,
+			      RKISP1_RSZ_SRC_MIN_WIDTH, max_widths[cap->id]);
+	pixm->height = clamp_t(u32, pixm->height,
+			       RKISP1_RSZ_SRC_MIN_HEIGHT, max_heights[cap->id]);
+
+	pixm->field = V4L2_FIELD_NONE;
+	pixm->colorspace = V4L2_COLORSPACE_DEFAULT;
+	pixm->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+
+	info = rkisp1_fill_pixfmt(pixm, cap->id);
+
+	/* can not change quantization when stream-on */
+	if (other_cap->is_streaming)
+		pixm->quantization = other_cap->pix.fmt.quantization;
+	/* output full range by default, take effect in params */
+	else if (!pixm->quantization ||
+		 pixm->quantization > V4L2_QUANTIZATION_LIM_RANGE)
+		pixm->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+	if (fmt_cfg)
+		*fmt_cfg = fmt;
+	if (fmt_info)
+		*fmt_info = info;
+}
+
+static void rkisp1_set_fmt(struct rkisp1_capture *cap,
+			   struct v4l2_pix_format_mplane *pixm)
+{
+	rkisp1_try_fmt(cap, pixm, &cap->pix.cfg, &cap->pix.info);
+	cap->pix.fmt = *pixm;
+
+	/* SP supports custom stride in number of pixels of the Y plane */
+	if (cap->id == RKISP1_SELFPATH)
+		cap->sp_y_stride = pixm->plane_fmt[0].bytesperline /
+				   cap->pix.info->bpp[0];
+}
+
+static int rkisp1_try_fmt_vid_cap_mplane(struct file *file, void *fh,
+					 struct v4l2_format *f)
+{
+	struct rkisp1_capture *cap = video_drvdata(file);
+
+	rkisp1_try_fmt(cap, &f->fmt.pix_mp, NULL, NULL);
+
+	return 0;
+}
+
+static int rkisp1_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
+					  struct v4l2_fmtdesc *f)
+{
+	struct rkisp1_capture *cap = video_drvdata(file);
+	const struct rkisp1_capture_fmt_cfg *fmt = NULL;
+
+	if (f->index >= cap->config->fmt_size)
+		return -EINVAL;
+
+	fmt = &cap->config->fmts[f->index];
+	f->pixelformat = fmt->fourcc;
+
+	return 0;
+}
+
+static int rkisp1_s_fmt_vid_cap_mplane(struct file *file,
+				       void *priv, struct v4l2_format *f)
+{
+	struct rkisp1_capture *cap = video_drvdata(file);
+	struct rkisp1_vdev_node *node =
+				rkisp1_vdev_to_node(&cap->vnode.vdev);
+
+	if (vb2_is_busy(&node->buf_queue))
+		return -EBUSY;
+
+	rkisp1_set_fmt(cap, &f->fmt.pix_mp);
+
+	return 0;
+}
+
+static int rkisp1_g_fmt_vid_cap_mplane(struct file *file, void *fh,
+				       struct v4l2_format *f)
+{
+	struct rkisp1_capture *cap = video_drvdata(file);
+
+	f->fmt.pix_mp = cap->pix.fmt;
+
+	return 0;
+}
+
+static int
+rkisp1_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+{
+	struct rkisp1_capture *cap_dev = video_drvdata(file);
+	struct rkisp1_device *rkisp1 = cap_dev->rkisp1;
+
+	strscpy(cap->driver, rkisp1->dev->driver->name, sizeof(cap->driver));
+	strscpy(cap->card, rkisp1->dev->driver->name, sizeof(cap->card));
+	strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops rkisp1_v4l2_ioctl_ops = {
+	.vidioc_reqbufs = vb2_ioctl_reqbufs,
+	.vidioc_querybuf = vb2_ioctl_querybuf,
+	.vidioc_create_bufs = vb2_ioctl_create_bufs,
+	.vidioc_qbuf = vb2_ioctl_qbuf,
+	.vidioc_expbuf = vb2_ioctl_expbuf,
+	.vidioc_dqbuf = vb2_ioctl_dqbuf,
+	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+	.vidioc_streamon = vb2_ioctl_streamon,
+	.vidioc_streamoff = vb2_ioctl_streamoff,
+	.vidioc_try_fmt_vid_cap_mplane = rkisp1_try_fmt_vid_cap_mplane,
+	.vidioc_s_fmt_vid_cap_mplane = rkisp1_s_fmt_vid_cap_mplane,
+	.vidioc_g_fmt_vid_cap_mplane = rkisp1_g_fmt_vid_cap_mplane,
+	.vidioc_enum_fmt_vid_cap = rkisp1_enum_fmt_vid_cap_mplane,
+	.vidioc_querycap = rkisp1_querycap,
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int rkisp1_capture_link_validate(struct media_link *link)
+{
+	struct video_device *vdev =
+		media_entity_to_video_device(link->sink->entity);
+	struct v4l2_subdev *sd =
+		media_entity_to_v4l2_subdev(link->source->entity);
+	struct rkisp1_capture *cap = video_get_drvdata(vdev);
+	struct rkisp1_isp *isp = &cap->rkisp1->isp;
+	struct v4l2_subdev_format sd_fmt;
+	int ret;
+
+	if (cap->id == RKISP1_SELFPATH &&
+	    isp->src_fmt->mbus_code != MEDIA_BUS_FMT_YUYV8_2X8) {
+		dev_err(cap->rkisp1->dev,
+			"selfpath only supports MEDIA_BUS_FMT_YUYV8_2X8\n");
+		return -EPIPE;
+	}
+
+	if (cap->pix.cfg->fmt_type != isp->src_fmt->fmt_type) {
+		dev_err(cap->rkisp1->dev,
+			"format type mismatch in link '%s:%d->%s:%d'\n",
+			link->source->entity->name, link->source->index,
+			link->sink->entity->name, link->sink->index);
+		return -EPIPE;
+	}
+
+	sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	sd_fmt.pad = link->source->index;
+	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+	if (ret)
+		return ret;
+
+	if (sd_fmt.format.height != cap->pix.fmt.height ||
+	    sd_fmt.format.width != cap->pix.fmt.width)
+		return -EPIPE;
+
+	return 0;
+}
+
+/* ----------------------------------------------------------------------------
+ * core functions
+ */
+
+static const struct media_entity_operations rkisp1_media_ops = {
+	.link_validate = rkisp1_capture_link_validate,
+};
+
+static const struct v4l2_file_operations rkisp1_fops = {
+	.open = v4l2_fh_open,
+	.release = vb2_fop_release,
+	.unlocked_ioctl = video_ioctl2,
+	.poll = vb2_fop_poll,
+	.mmap = vb2_fop_mmap,
+};
+
+static void rkisp1_unregister_capture(struct rkisp1_capture *cap)
+{
+	media_entity_cleanup(&cap->vnode.vdev.entity);
+	video_unregister_device(&cap->vnode.vdev);
+}
+
+void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_capture *mp = &rkisp1->capture_devs[RKISP1_MAINPATH];
+	struct rkisp1_capture *sp = &rkisp1->capture_devs[RKISP1_SELFPATH];
+
+	rkisp1_unregister_capture(mp);
+	rkisp1_unregister_capture(sp);
+}
+
+static int rkisp1_register_capture(struct rkisp1_capture *cap)
+{
+	const char * const dev_names[] = {RKISP1_MP_DEV_NAME,
+					  RKISP1_SP_DEV_NAME};
+	struct v4l2_device *v4l2_dev = &cap->rkisp1->v4l2_dev;
+	struct video_device *vdev = &cap->vnode.vdev;
+	struct rkisp1_vdev_node *node;
+	struct vb2_queue *q;
+	int ret;
+
+	strscpy(vdev->name, dev_names[cap->id], sizeof(vdev->name));
+	node = rkisp1_vdev_to_node(vdev);
+	mutex_init(&node->vlock);
+
+	vdev->ioctl_ops = &rkisp1_v4l2_ioctl_ops;
+	vdev->release = video_device_release_empty;
+	vdev->fops = &rkisp1_fops;
+	vdev->minor = -1;
+	vdev->v4l2_dev = v4l2_dev;
+	vdev->lock = &node->vlock;
+	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+			    V4L2_CAP_STREAMING;
+	vdev->entity.ops = &rkisp1_media_ops;
+	video_set_drvdata(vdev, cap);
+	vdev->vfl_dir = VFL_DIR_RX;
+	node->pad.flags = MEDIA_PAD_FL_SINK;
+
+	q = &node->buf_queue;
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
+	q->drv_priv = cap;
+	q->ops = &rkisp1_vb2_ops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct rkisp1_buffer);
+	q->min_buffers_needed = RKISP1_MIN_BUFFERS_NEEDED;
+	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	q->lock = &node->vlock;
+	q->dev = cap->rkisp1->dev;
+	ret = vb2_queue_init(q);
+	if (ret) {
+		dev_err(cap->rkisp1->dev,
+			"vb2 queue init failed (err=%d)\n", ret);
+		return ret;
+	}
+
+	vdev->queue = q;
+
+	ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		dev_err(cap->rkisp1->dev,
+			"failed to register %s, ret=%d\n", vdev->name, ret);
+		return ret;
+	}
+	v4l2_info(v4l2_dev, "registered %s as /dev/video%d\n", vdev->name,
+		  vdev->num);
+
+	ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
+	if (ret) {
+		video_unregister_device(vdev);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void
+rkisp1_capture_init(struct rkisp1_device *rkisp1, enum rkisp1_stream_id id)
+{
+	struct rkisp1_capture *cap = &rkisp1->capture_devs[id];
+	struct v4l2_pix_format_mplane pixm;
+
+	memset(cap, 0, sizeof(*cap));
+	cap->id = id;
+	cap->rkisp1 = rkisp1;
+
+	INIT_LIST_HEAD(&cap->buf.queue);
+	init_waitqueue_head(&cap->done);
+	spin_lock_init(&cap->buf.lock);
+	if (cap->id == RKISP1_SELFPATH) {
+		cap->ops = &rkisp1_capture_ops_sp;
+		cap->config = &rkisp1_capture_config_sp;
+	} else {
+		cap->ops = &rkisp1_capture_ops_mp;
+		cap->config = &rkisp1_capture_config_mp;
+	}
+
+	cap->is_streaming = false;
+
+	memset(&pixm, 0, sizeof(pixm));
+	pixm.pixelformat = V4L2_PIX_FMT_YUYV;
+	pixm.width = RKISP1_DEFAULT_WIDTH;
+	pixm.height = RKISP1_DEFAULT_HEIGHT;
+	rkisp1_set_fmt(cap, &pixm);
+}
+
+int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_capture *cap;
+	unsigned int i, j;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); i++) {
+		rkisp1_capture_init(rkisp1, i);
+		cap = &rkisp1->capture_devs[i];
+		cap->rkisp1 = rkisp1;
+		ret = rkisp1_register_capture(cap);
+		if (ret)
+			goto err_unreg_capture_devs;
+	}
+
+	return 0;
+
+err_unreg_capture_devs:
+	for (j = 0; j < i; j++) {
+		cap = &rkisp1->capture_devs[j];
+		rkisp1_unregister_capture(cap);
+	}
+
+	return ret;
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-common.c b/drivers/staging/media/rkisp1/rkisp1-common.c
new file mode 100644
index 0000000..cf88966
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-common.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Common definitions
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ */
+
+#include <media/v4l2-rect.h>
+
+#include "rkisp1-common.h"
+
+static const struct v4l2_rect rkisp1_sd_min_crop = {
+	.width = RKISP1_ISP_MIN_WIDTH,
+	.height = RKISP1_ISP_MIN_HEIGHT,
+	.top = 0,
+	.left = 0,
+};
+
+void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop,
+				const struct v4l2_rect *bounds)
+{
+	v4l2_rect_set_min_size(crop, &rkisp1_sd_min_crop);
+	v4l2_rect_map_inside(crop, bounds);
+}
+
+void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
+			   const struct v4l2_mbus_framefmt *bounds)
+{
+	struct v4l2_rect crop_bounds = {
+		.left = 0,
+		.top = 0,
+		.width = bounds->width,
+		.height = bounds->height,
+	};
+
+	rkisp1_sd_adjust_crop_rect(crop, &crop_bounds);
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h
new file mode 100644
index 0000000..369a401
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-common.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Rockchip ISP1 Driver - Common definitions
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _RKISP1_COMMON_H
+#define _RKISP1_COMMON_H
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rkisp1-regs.h"
+#include "uapi/rkisp1-config.h"
+
+#define RKISP1_ISP_MAX_WIDTH		4032
+#define RKISP1_ISP_MAX_HEIGHT		3024
+#define RKISP1_ISP_MIN_WIDTH		32
+#define RKISP1_ISP_MIN_HEIGHT		32
+
+#define RKISP1_RSZ_MP_SRC_MAX_WIDTH		4416
+#define RKISP1_RSZ_MP_SRC_MAX_HEIGHT		3312
+#define RKISP1_RSZ_SP_SRC_MAX_WIDTH		1920
+#define RKISP1_RSZ_SP_SRC_MAX_HEIGHT		1920
+#define RKISP1_RSZ_SRC_MIN_WIDTH		32
+#define RKISP1_RSZ_SRC_MIN_HEIGHT		16
+
+#define RKISP1_DEFAULT_WIDTH		800
+#define RKISP1_DEFAULT_HEIGHT		600
+
+#define RKISP1_DRIVER_NAME	"rkisp1"
+#define RKISP1_BUS_INFO		"platform:" RKISP1_DRIVER_NAME
+
+#define RKISP1_MAX_BUS_CLK	8
+
+enum rkisp1_rsz_pad {
+	RKISP1_RSZ_PAD_SINK,
+	RKISP1_RSZ_PAD_SRC,
+};
+
+enum rkisp1_stream_id {
+	RKISP1_MAINPATH,
+	RKISP1_SELFPATH,
+};
+
+enum rkisp1_fmt_pix_type {
+	RKISP1_FMT_YUV,
+	RKISP1_FMT_RGB,
+	RKISP1_FMT_BAYER,
+	RKISP1_FMT_JPEG,
+};
+
+enum rkisp1_fmt_raw_pat_type {
+	RKISP1_RAW_RGGB = 0,
+	RKISP1_RAW_GRBG,
+	RKISP1_RAW_GBRG,
+	RKISP1_RAW_BGGR,
+};
+
+enum rkisp1_isp_pad {
+	RKISP1_ISP_PAD_SINK_VIDEO,
+	RKISP1_ISP_PAD_SINK_PARAMS,
+	RKISP1_ISP_PAD_SOURCE_VIDEO,
+	RKISP1_ISP_PAD_SOURCE_STATS,
+	RKISP1_ISP_PAD_MAX
+};
+
+/*
+ * struct rkisp1_sensor_async - Sensor information
+ * @mbus: media bus configuration
+ */
+struct rkisp1_sensor_async {
+	struct v4l2_async_subdev asd;
+	struct v4l2_mbus_config mbus;
+	unsigned int lanes;
+	struct v4l2_subdev *sd;
+	struct v4l2_ctrl *pixel_rate_ctrl;
+	struct phy *dphy;
+};
+
+/*
+ * struct rkisp1_isp - ISP sub-device
+ *
+ * See Cropping regions of ISP in rkisp1.c for details
+ * @sink_frm: input size, don't have to be equal to sensor size
+ * @sink_fmt: input format
+ * @sink_crop: crop for sink pad
+ * @src_fmt: output format
+ * @src_crop: output size
+ *
+ * @is_dphy_errctrl_disabled : if dphy errctrl is disabled (avoid endless interrupt)
+ * @frame_sequence: used to synchronize frame_id between video devices.
+ * @quantization: output quantization
+ */
+struct rkisp1_isp {
+	struct v4l2_subdev sd;
+	struct media_pad pads[RKISP1_ISP_PAD_MAX];
+	struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
+	const struct rkisp1_isp_mbus_info *sink_fmt;
+	const struct rkisp1_isp_mbus_info *src_fmt;
+	bool is_dphy_errctrl_disabled;
+	atomic_t frame_sequence;
+};
+
+struct rkisp1_vdev_node {
+	struct vb2_queue buf_queue;
+	struct mutex vlock; /* ioctl serialization mutex */
+	struct video_device vdev;
+	struct media_pad pad;
+};
+
+struct rkisp1_buffer {
+	struct vb2_v4l2_buffer vb;
+	struct list_head queue;
+	union {
+		u32 buff_addr[VIDEO_MAX_PLANES];
+		void *vaddr[VIDEO_MAX_PLANES];
+	};
+};
+
+struct rkisp1_dummy_buffer {
+	void *vaddr;
+	dma_addr_t dma_addr;
+	u32 size;
+};
+
+struct rkisp1_device;
+
+/*
+ * struct rkisp1_capture - ISP capture video device
+ *
+ * @pix.fmt: buffer format
+ * @pix.info: pixel information
+ * @pix.cfg: pixel configuration
+ *
+ * @buf.lock: lock to protect buf_queue
+ * @buf.queue: queued buffer list
+ * @buf.dummy: dummy space to store dropped data
+ *
+ * rkisp1 use shadowsock registers, so it need two buffer at a time
+ * @buf.curr: the buffer used for current frame
+ * @buf.next: the buffer used for next frame
+ */
+struct rkisp1_capture {
+	struct rkisp1_vdev_node vnode;
+	struct rkisp1_device *rkisp1;
+	enum rkisp1_stream_id id;
+	struct rkisp1_capture_ops *ops;
+	const struct rkisp1_capture_config *config;
+	bool is_streaming;
+	bool is_stopping;
+	wait_queue_head_t done;
+	unsigned int sp_y_stride;
+	struct {
+		/* protects queue, curr and next */
+		spinlock_t lock;
+		struct list_head queue;
+		struct rkisp1_dummy_buffer dummy;
+		struct rkisp1_buffer *curr;
+		struct rkisp1_buffer *next;
+	} buf;
+	struct {
+		const struct rkisp1_capture_fmt_cfg *cfg;
+		const struct v4l2_format_info *info;
+		struct v4l2_pix_format_mplane fmt;
+	} pix;
+};
+
+/*
+ * struct rkisp1_stats - ISP Statistics device
+ *
+ * @irq_lock: buffer queue lock
+ * @stat: stats buffer list
+ * @readout_wq: workqueue for statistics information read
+ */
+struct rkisp1_stats {
+	struct rkisp1_vdev_node vnode;
+	struct rkisp1_device *rkisp1;
+
+	spinlock_t irq_lock;
+	struct list_head stat;
+	struct v4l2_format vdev_fmt;
+	bool is_streaming;
+
+	struct workqueue_struct *readout_wq;
+	struct mutex wq_lock;
+};
+
+/*
+ * struct rkisp1_params - ISP input parameters device
+ *
+ * @cur_params: Current ISP parameters
+ * @is_first_params: the first params should take effect immediately
+ */
+struct rkisp1_params {
+	struct rkisp1_vdev_node vnode;
+	struct rkisp1_device *rkisp1;
+
+	spinlock_t config_lock;
+	struct list_head params;
+	struct rkisp1_params_cfg cur_params;
+	struct v4l2_format vdev_fmt;
+	bool is_streaming;
+	bool is_first_params;
+
+	enum v4l2_quantization quantization;
+	enum rkisp1_fmt_raw_pat_type raw_type;
+};
+
+struct rkisp1_resizer {
+	struct v4l2_subdev sd;
+	enum rkisp1_stream_id id;
+	struct rkisp1_device *rkisp1;
+	struct media_pad pads[RKISP1_ISP_PAD_MAX];
+	struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
+	const struct rkisp1_rsz_config *config;
+	enum rkisp1_fmt_pix_type fmt_type;
+};
+
+struct rkisp1_debug {
+	struct dentry *debugfs_dir;
+	unsigned long data_loss;
+	unsigned long pic_size_error;
+	unsigned long mipi_error;
+	unsigned long stats_error;
+	unsigned long stop_timeout[2];
+	unsigned long frame_drop[2];
+};
+
+/*
+ * struct rkisp1_device - ISP platform device
+ * @base_addr: base register address
+ * @active_sensor: sensor in-use, set when streaming on
+ * @isp: ISP sub-device
+ * @rkisp1_capture: capture video device
+ * @stats: ISP statistics output device
+ * @params: ISP input parameters device
+ */
+struct rkisp1_device {
+	void __iomem *base_addr;
+	int irq;
+	struct device *dev;
+	unsigned int clk_size;
+	struct clk_bulk_data clks[RKISP1_MAX_BUS_CLK];
+	struct v4l2_device v4l2_dev;
+	struct v4l2_ctrl_handler ctrl_handler;
+	struct media_device media_dev;
+	struct v4l2_async_notifier notifier;
+	struct rkisp1_sensor_async *active_sensor;
+	struct rkisp1_isp isp;
+	struct rkisp1_resizer resizer_devs[2];
+	struct rkisp1_capture capture_devs[2];
+	struct rkisp1_stats stats;
+	struct rkisp1_params params;
+	struct media_pipeline pipe;
+	struct vb2_alloc_ctx *alloc_ctx;
+	struct rkisp1_debug debug;
+};
+
+/*
+ * struct rkisp1_isp_mbus_info - ISP pad format info
+ *
+ * Translate mbus_code to hardware format values
+ *
+ * @bus_width: used for parallel
+ */
+struct rkisp1_isp_mbus_info {
+	u32 mbus_code;
+	enum rkisp1_fmt_pix_type fmt_type;
+	u32 mipi_dt;
+	u32 yuv_seq;
+	u8 bus_width;
+	enum rkisp1_fmt_raw_pat_type bayer_pat;
+	unsigned int direction;
+};
+
+static inline void
+rkisp1_write(struct rkisp1_device *rkisp1, u32 val, unsigned int addr)
+{
+	writel(val, rkisp1->base_addr + addr);
+}
+
+static inline u32 rkisp1_read(struct rkisp1_device *rkisp1, unsigned int addr)
+{
+	return readl(rkisp1->base_addr + addr);
+}
+
+void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop,
+				const struct v4l2_rect *bounds);
+
+void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
+			   const struct v4l2_mbus_framefmt *bounds);
+
+int rkisp1_isp_register(struct rkisp1_device *rkisp1,
+			struct v4l2_device *v4l2_dev);
+void rkisp1_isp_unregister(struct rkisp1_device *rkisp1);
+
+const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code);
+
+void rkisp1_isp_isr(struct rkisp1_device *rkisp1);
+void rkisp1_mipi_isr(struct rkisp1_device *rkisp1);
+void rkisp1_capture_isr(struct rkisp1_device *rkisp1);
+void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris);
+void rkisp1_params_isr(struct rkisp1_device *rkisp1, u32 isp_mis);
+
+int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1);
+void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1);
+
+int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1);
+void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1);
+
+int rkisp1_stats_register(struct rkisp1_stats *stats,
+			  struct v4l2_device *v4l2_dev,
+			  struct rkisp1_device *rkisp1);
+void rkisp1_stats_unregister(struct rkisp1_stats *stats);
+
+void rkisp1_params_configure(struct rkisp1_params *params,
+			     enum rkisp1_fmt_raw_pat_type bayer_pat,
+			     enum v4l2_quantization quantization);
+void rkisp1_params_disable(struct rkisp1_params *params);
+int rkisp1_params_register(struct rkisp1_params *params,
+			   struct v4l2_device *v4l2_dev,
+			   struct rkisp1_device *rkisp1);
+void rkisp1_params_unregister(struct rkisp1_params *params);
+
+void rkisp1_params_isr_handler(struct rkisp1_device *rkisp1, u32 isp_mis);
+
+#endif /* _RKISP1_COMMON_H */
diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c
new file mode 100644
index 0000000..558126e
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-dev.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Base driver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <media/v4l2-fwnode.h>
+
+#include "rkisp1-common.h"
+
+/*
+ * ISP Details
+ * -----------
+ *
+ * ISP Comprises with:
+ *	MIPI serial camera interface
+ *	Image Signal Processing
+ *	Many Image Enhancement Blocks
+ *	Crop
+ *	Resizer
+ *	RBG display ready image
+ *	Image Rotation
+ *
+ * ISP Block Diagram
+ * -----------------
+ *                                                             rkisp1-resizer.c          rkisp1-capture.c
+ *                                                          |====================|  |=======================|
+ *                                rkisp1-isp.c                              Main Picture Path
+ *                        |==========================|      |===============================================|
+ *                        +-----------+  +--+--+--+--+      +--------+  +--------+              +-----------+
+ *                        |           |  |  |  |  |  |      |        |  |        |              |           |
+ * +--------+    |\       |           |  |  |  |  |  |   -->|  Crop  |->|  RSZ   |------------->|           |
+ * |  MIPI  |--->|  \     |           |  |  |  |  |  |   |  |        |  |        |              |           |
+ * +--------+    |   |    |           |  |IE|IE|IE|IE|   |  +--------+  +--------+              |  Memory   |
+ *               |MUX|--->|    ISP    |->|0 |1 |2 |3 |---+                                      | Interface |
+ * +--------+    |   |    |           |  |  |  |  |  |   |  +--------+  +--------+  +--------+  |           |
+ * |Parallel|--->|  /     |           |  |  |  |  |  |   |  |        |  |        |  |        |  |           |
+ * +--------+    |/       |           |  |  |  |  |  |   -->|  Crop  |->|  RSZ   |->|  RGB   |->|           |
+ *                        |           |  |  |  |  |  |      |        |  |        |  | Rotate |  |           |
+ *                        +-----------+  +--+--+--+--+      +--------+  +--------+  +--------+  +-----------+
+ *                                               ^
+ * +--------+                                    |          |===============================================|
+ * |  DMA   |------------------------------------+                          Self Picture Path
+ * +--------+
+ *
+ *         rkisp1-stats.c        rkisp1-params.c
+ *       |===============|      |===============|
+ *       +---------------+      +---------------+
+ *       |               |      |               |
+ *       |      ISP      |      |      ISP      |
+ *       |               |      |               |
+ *       +---------------+      +---------------+
+ *
+ *
+ * Media Topology
+ * --------------
+ *      +----------+     +----------+
+ *      | Sensor 2 |     | Sensor X |
+ *      ------------ ... ------------
+ *      |    0     |     |    0     |
+ *      +----------+     +----------+      +-----------+
+ *                  \      |               |  params   |
+ *                   \     |               | (output)  |
+ *    +----------+    \    |               +-----------+
+ *    | Sensor 1 |     v   v                     |
+ *    ------------      +------+------+          |
+ *    |    0     |----->|  0   |  1   |<---------+
+ *    +----------+      |------+------|
+ *                      |     ISP     |
+ *                      |------+------|
+ *        +-------------|  2   |  3   |----------+
+ *        |             +------+------+          |
+ *        |                |                     |
+ *        v                v                     v
+ *  +- ---------+    +-----------+         +-----------+
+ *  |     0     |    |     0     |         |   stats   |
+ *  -------------    -------------         | (capture) |
+ *  |  Resizer  |    |  Resizer  |         +-----------+
+ *  ------------|    ------------|
+ *  |     1     |    |     1     |
+ *  +-----------+    +-----------+
+ *        |                |
+ *        v                v
+ *  +-----------+    +-----------+
+ *  | selfpath  |    | mainpath  |
+ *  | (capture) |    | (capture) |
+ *  +-----------+    +-----------+
+ */
+
+struct rkisp1_match_data {
+	const char * const *clks;
+	unsigned int size;
+};
+
+/* ----------------------------------------------------------------------------
+ * Sensor DT bindings
+ */
+
+static int rkisp1_create_links(struct rkisp1_device *rkisp1)
+{
+	struct media_entity *source, *sink;
+	unsigned int flags, source_pad;
+	struct v4l2_subdev *sd;
+	unsigned int i;
+	int ret;
+
+	/* sensor links */
+	flags = MEDIA_LNK_FL_ENABLED;
+	list_for_each_entry(sd, &rkisp1->v4l2_dev.subdevs, list) {
+		if (sd == &rkisp1->isp.sd ||
+		    sd == &rkisp1->resizer_devs[RKISP1_MAINPATH].sd ||
+		    sd == &rkisp1->resizer_devs[RKISP1_SELFPATH].sd)
+			continue;
+
+		ret = media_entity_get_fwnode_pad(&sd->entity, sd->fwnode,
+						  MEDIA_PAD_FL_SOURCE);
+		if (ret) {
+			dev_err(sd->dev, "failed to find src pad for %s\n",
+				sd->name);
+			return ret;
+		}
+		source_pad = ret;
+
+		ret = media_create_pad_link(&sd->entity, source_pad,
+					    &rkisp1->isp.sd.entity,
+					    RKISP1_ISP_PAD_SINK_VIDEO,
+					    flags);
+		if (ret)
+			return ret;
+
+		flags = 0;
+	}
+
+	flags = MEDIA_LNK_FL_ENABLED;
+
+	/* create ISP->RSZ->CAP links */
+	for (i = 0; i < 2; i++) {
+		source = &rkisp1->isp.sd.entity;
+		sink = &rkisp1->resizer_devs[i].sd.entity;
+		ret = media_create_pad_link(source, RKISP1_ISP_PAD_SOURCE_VIDEO,
+					    sink, RKISP1_RSZ_PAD_SINK, flags);
+		if (ret)
+			return ret;
+
+		source = sink;
+		sink = &rkisp1->capture_devs[i].vnode.vdev.entity;
+		ret = media_create_pad_link(source, RKISP1_RSZ_PAD_SRC,
+					    sink, 0, flags);
+		if (ret)
+			return ret;
+	}
+
+	/* params links */
+	source = &rkisp1->params.vnode.vdev.entity;
+	sink = &rkisp1->isp.sd.entity;
+	ret = media_create_pad_link(source, 0, sink,
+				    RKISP1_ISP_PAD_SINK_PARAMS, flags);
+	if (ret)
+		return ret;
+
+	/* 3A stats links */
+	source = &rkisp1->isp.sd.entity;
+	sink = &rkisp1->stats.vnode.vdev.entity;
+	return media_create_pad_link(source, RKISP1_ISP_PAD_SOURCE_STATS,
+				     sink, 0, flags);
+}
+
+static int rkisp1_subdev_notifier_bound(struct v4l2_async_notifier *notifier,
+					struct v4l2_subdev *sd,
+					struct v4l2_async_subdev *asd)
+{
+	struct rkisp1_device *rkisp1 =
+		container_of(notifier, struct rkisp1_device, notifier);
+	struct rkisp1_sensor_async *s_asd =
+		container_of(asd, struct rkisp1_sensor_async, asd);
+
+	s_asd->pixel_rate_ctrl = v4l2_ctrl_find(sd->ctrl_handler,
+						V4L2_CID_PIXEL_RATE);
+	s_asd->sd = sd;
+	s_asd->dphy = devm_phy_get(rkisp1->dev, "dphy");
+	if (IS_ERR(s_asd->dphy)) {
+		if (PTR_ERR(s_asd->dphy) != -EPROBE_DEFER)
+			dev_err(rkisp1->dev, "Couldn't get the MIPI D-PHY\n");
+		return PTR_ERR(s_asd->dphy);
+	}
+
+	phy_init(s_asd->dphy);
+
+	return 0;
+}
+
+static void rkisp1_subdev_notifier_unbind(struct v4l2_async_notifier *notifier,
+					  struct v4l2_subdev *sd,
+					  struct v4l2_async_subdev *asd)
+{
+	struct rkisp1_sensor_async *s_asd =
+		container_of(asd, struct rkisp1_sensor_async, asd);
+
+	phy_exit(s_asd->dphy);
+}
+
+static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+	struct rkisp1_device *rkisp1 =
+		container_of(notifier, struct rkisp1_device, notifier);
+	int ret;
+
+	mutex_lock(&rkisp1->media_dev.graph_mutex);
+	ret = rkisp1_create_links(rkisp1);
+	if (ret)
+		goto unlock;
+	ret = v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev);
+	if (ret)
+		goto unlock;
+
+	dev_dbg(rkisp1->dev, "Async subdev notifier completed\n");
+
+unlock:
+	mutex_unlock(&rkisp1->media_dev.graph_mutex);
+	return ret;
+}
+
+static int rkisp1_fwnode_parse(struct device *dev,
+			       struct v4l2_fwnode_endpoint *vep,
+			       struct v4l2_async_subdev *asd)
+{
+	struct rkisp1_sensor_async *s_asd =
+			container_of(asd, struct rkisp1_sensor_async, asd);
+
+	if (vep->bus_type != V4L2_MBUS_CSI2_DPHY) {
+		dev_err(dev, "Only CSI2 bus type is currently supported\n");
+		return -EINVAL;
+	}
+
+	if (vep->base.port != 0) {
+		dev_err(dev, "The ISP has only port 0\n");
+		return -EINVAL;
+	}
+
+	s_asd->mbus.type = vep->bus_type;
+	s_asd->mbus.flags = vep->bus.mipi_csi2.flags;
+	s_asd->lanes = vep->bus.mipi_csi2.num_data_lanes;
+
+	switch (vep->bus.mipi_csi2.num_data_lanes) {
+	case 1:
+		s_asd->mbus.flags |= V4L2_MBUS_CSI2_1_LANE;
+		break;
+	case 2:
+		s_asd->mbus.flags |= V4L2_MBUS_CSI2_2_LANE;
+		break;
+	case 3:
+		s_asd->mbus.flags |= V4L2_MBUS_CSI2_3_LANE;
+		break;
+	case 4:
+		s_asd->mbus.flags |= V4L2_MBUS_CSI2_4_LANE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_async_notifier_operations rkisp1_subdev_notifier_ops = {
+	.bound = rkisp1_subdev_notifier_bound,
+	.unbind = rkisp1_subdev_notifier_unbind,
+	.complete = rkisp1_subdev_notifier_complete,
+};
+
+static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
+{
+	struct v4l2_async_notifier *ntf = &rkisp1->notifier;
+	struct device *dev = rkisp1->dev;
+	int ret;
+
+	v4l2_async_notifier_init(ntf);
+
+	ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(dev, ntf,
+					sizeof(struct rkisp1_sensor_async),
+					0, rkisp1_fwnode_parse);
+	if (ret)
+		return ret;
+
+	if (list_empty(&ntf->asd_list))
+		return -ENODEV;
+
+	ntf->ops = &rkisp1_subdev_notifier_ops;
+
+	return v4l2_async_notifier_register(&rkisp1->v4l2_dev, ntf);
+}
+
+/* ----------------------------------------------------------------------------
+ * Power
+ */
+
+static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
+{
+	struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+
+	clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
+	return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
+{
+	struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pinctrl_pm_select_default_state(dev);
+	if (ret)
+		return ret;
+	ret = clk_bulk_prepare_enable(rkisp1->clk_size, rkisp1->clks);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct dev_pm_ops rkisp1_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(rkisp1_runtime_suspend, rkisp1_runtime_resume, NULL)
+};
+
+/* ----------------------------------------------------------------------------
+ * Core
+ */
+
+static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
+{
+	int ret;
+
+	ret = rkisp1_isp_register(rkisp1, &rkisp1->v4l2_dev);
+	if (ret)
+		return ret;
+
+	ret = rkisp1_resizer_devs_register(rkisp1);
+	if (ret)
+		goto err_unreg_isp_subdev;
+
+	ret = rkisp1_capture_devs_register(rkisp1);
+	if (ret)
+		goto err_unreg_resizer_devs;
+
+	ret = rkisp1_stats_register(&rkisp1->stats, &rkisp1->v4l2_dev, rkisp1);
+	if (ret)
+		goto err_unreg_capture_devs;
+
+	ret = rkisp1_params_register(&rkisp1->params,
+				     &rkisp1->v4l2_dev, rkisp1);
+	if (ret)
+		goto err_unreg_stats;
+
+	ret = rkisp1_subdev_notifier(rkisp1);
+	if (ret) {
+		dev_err(rkisp1->dev,
+			"Failed to register subdev notifier(%d)\n", ret);
+		goto err_unreg_params;
+	}
+
+	return 0;
+err_unreg_params:
+	rkisp1_params_unregister(&rkisp1->params);
+err_unreg_stats:
+	rkisp1_stats_unregister(&rkisp1->stats);
+err_unreg_capture_devs:
+	rkisp1_capture_devs_unregister(rkisp1);
+err_unreg_resizer_devs:
+	rkisp1_resizer_devs_unregister(rkisp1);
+err_unreg_isp_subdev:
+	rkisp1_isp_unregister(rkisp1);
+	return ret;
+}
+
+static irqreturn_t rkisp1_isr(int irq, void *ctx)
+{
+	struct device *dev = ctx;
+	struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+
+	/*
+	 * Call rkisp1_capture_isr() first to handle the frame that
+	 * potentially completed using the current frame_sequence number before
+	 * it is potentially incremented by rkisp1_isp_isr() in the vertical
+	 * sync.
+	 */
+	rkisp1_capture_isr(rkisp1);
+	rkisp1_isp_isr(rkisp1);
+	rkisp1_mipi_isr(rkisp1);
+
+	return IRQ_HANDLED;
+}
+
+static const char * const rk3399_isp_clks[] = {
+	"clk_isp",
+	"aclk_isp",
+	"hclk_isp",
+	"aclk_isp_wrap",
+	"hclk_isp_wrap",
+};
+
+static const struct rkisp1_match_data rk3399_isp_clk_data = {
+	.clks = rk3399_isp_clks,
+	.size = ARRAY_SIZE(rk3399_isp_clks),
+};
+
+static const struct of_device_id rkisp1_of_match[] = {
+	{
+		.compatible = "rockchip,rk3399-cif-isp",
+		.data = &rk3399_isp_clk_data,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, rkisp1_of_match);
+
+static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_debug *debug = &rkisp1->debug;
+
+	debug->debugfs_dir = debugfs_create_dir(RKISP1_DRIVER_NAME, NULL);
+	if (!debug->debugfs_dir) {
+		dev_dbg(rkisp1->dev, "failed to create debugfs directory\n");
+		return;
+	}
+	debugfs_create_ulong("data_loss", 0444, debug->debugfs_dir,
+			     &debug->data_loss);
+	debugfs_create_ulong("pic_size_error", 0444,  debug->debugfs_dir,
+			     &debug->pic_size_error);
+	debugfs_create_ulong("mipi_error", 0444, debug->debugfs_dir,
+			     &debug->mipi_error);
+	debugfs_create_ulong("stats_error", 0444, debug->debugfs_dir,
+			     &debug->stats_error);
+	debugfs_create_ulong("mp_stop_timeout", 0444, debug->debugfs_dir,
+			     &debug->stop_timeout[RKISP1_MAINPATH]);
+	debugfs_create_ulong("sp_stop_timeout", 0444, debug->debugfs_dir,
+			     &debug->stop_timeout[RKISP1_SELFPATH]);
+	debugfs_create_ulong("mp_frame_drop", 0444, debug->debugfs_dir,
+			     &debug->frame_drop[RKISP1_MAINPATH]);
+	debugfs_create_ulong("sp_frame_drop", 0444, debug->debugfs_dir,
+			     &debug->frame_drop[RKISP1_SELFPATH]);
+}
+
+static int rkisp1_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	const struct rkisp1_match_data *clk_data;
+	const struct of_device_id *match;
+	struct device *dev = &pdev->dev;
+	struct rkisp1_device *rkisp1;
+	struct v4l2_device *v4l2_dev;
+	unsigned int i;
+	int ret, irq;
+
+	match = of_match_node(rkisp1_of_match, node);
+	rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
+	if (!rkisp1)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, rkisp1);
+	rkisp1->dev = dev;
+
+	rkisp1->base_addr = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(rkisp1->base_addr))
+		return PTR_ERR(rkisp1->base_addr);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	ret = devm_request_irq(dev, irq, rkisp1_isr, IRQF_SHARED,
+			       dev_driver_string(dev), dev);
+	if (ret) {
+		dev_err(dev, "request irq failed: %d\n", ret);
+		return ret;
+	}
+
+	rkisp1->irq = irq;
+	clk_data = match->data;
+
+	for (i = 0; i < clk_data->size; i++)
+		rkisp1->clks[i].id = clk_data->clks[i];
+	ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks);
+	if (ret)
+		return ret;
+	rkisp1->clk_size = clk_data->size;
+
+	pm_runtime_enable(&pdev->dev);
+
+	strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
+		sizeof(rkisp1->media_dev.model));
+	rkisp1->media_dev.dev = &pdev->dev;
+	strscpy(rkisp1->media_dev.bus_info,
+		"platform: " RKISP1_DRIVER_NAME,
+		sizeof(rkisp1->media_dev.bus_info));
+	media_device_init(&rkisp1->media_dev);
+
+	v4l2_dev = &rkisp1->v4l2_dev;
+	v4l2_dev->mdev = &rkisp1->media_dev;
+	strscpy(v4l2_dev->name, RKISP1_DRIVER_NAME, sizeof(v4l2_dev->name));
+
+	ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev);
+	if (ret)
+		return ret;
+
+	ret = media_device_register(&rkisp1->media_dev);
+	if (ret) {
+		dev_err(dev, "Failed to register media device: %d\n", ret);
+		goto err_unreg_v4l2_dev;
+	}
+
+	ret = rkisp1_entities_register(rkisp1);
+	if (ret)
+		goto err_unreg_media_dev;
+
+	rkisp1_debug_init(rkisp1);
+
+	return 0;
+
+err_unreg_media_dev:
+	media_device_unregister(&rkisp1->media_dev);
+err_unreg_v4l2_dev:
+	v4l2_device_unregister(&rkisp1->v4l2_dev);
+	pm_runtime_disable(&pdev->dev);
+	return ret;
+}
+
+static int rkisp1_remove(struct platform_device *pdev)
+{
+	struct rkisp1_device *rkisp1 = platform_get_drvdata(pdev);
+
+	v4l2_async_notifier_unregister(&rkisp1->notifier);
+	v4l2_async_notifier_cleanup(&rkisp1->notifier);
+
+	rkisp1_params_unregister(&rkisp1->params);
+	rkisp1_stats_unregister(&rkisp1->stats);
+	rkisp1_capture_devs_unregister(rkisp1);
+	rkisp1_resizer_devs_unregister(rkisp1);
+	rkisp1_isp_unregister(rkisp1);
+
+	media_device_unregister(&rkisp1->media_dev);
+	v4l2_device_unregister(&rkisp1->v4l2_dev);
+
+	pm_runtime_disable(&pdev->dev);
+
+	debugfs_remove_recursive(rkisp1->debug.debugfs_dir);
+	return 0;
+}
+
+static struct platform_driver rkisp1_drv = {
+	.driver = {
+		.name = RKISP1_DRIVER_NAME,
+		.of_match_table = of_match_ptr(rkisp1_of_match),
+		.pm = &rkisp1_pm_ops,
+	},
+	.probe = rkisp1_probe,
+	.remove = rkisp1_remove,
+};
+
+module_platform_driver(rkisp1_drv);
+MODULE_DESCRIPTION("Rockchip ISP1 platform driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c
new file mode 100644
index 0000000..328c7ea
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-isp.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - ISP Subdevice
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-event.h>
+
+#include "rkisp1-common.h"
+
+#define RKISP1_DEF_SINK_PAD_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+#define RKISP1_DEF_SRC_PAD_FMT MEDIA_BUS_FMT_YUYV8_2X8
+
+#define RKISP1_ISP_DEV_NAME	RKISP1_DRIVER_NAME "_isp"
+
+#define RKISP1_DIR_SRC BIT(0)
+#define RKISP1_DIR_SINK BIT(1)
+#define RKISP1_DIR_SINK_SRC (RKISP1_DIR_SINK | RKISP1_DIR_SRC)
+
+/*
+ * NOTE: MIPI controller and input MUX are also configured in this file,
+ * because ISP Subdev is not only describe ISP submodule(input size,format,
+ * output size, format), but also a virtual route device.
+ */
+
+/*
+ * There are many variables named with format/frame in below code,
+ * please see here for their meaning.
+ * Cropping in the sink pad defines the image region from the sensor.
+ * Cropping in the source pad defines the region for the Image Stabilizer (IS)
+ *
+ * Cropping regions of ISP
+ *
+ * +---------------------------------------------------------+
+ * | Sensor image                                            |
+ * | +---------------------------------------------------+   |
+ * | | CIF_ISP_ACQ (for black level)                     |   |
+ * | | sink pad format                                   |   |
+ * | | +--------------------------------------------+    |   |
+ * | | |    CIF_ISP_OUT                             |    |   |
+ * | | |    sink pad crop                           |    |   |
+ * | | |    +---------------------------------+     |    |   |
+ * | | |    |   CIF_ISP_IS                    |     |    |   |
+ * | | |    |   source pad crop and format    |     |    |   |
+ * | | |    +---------------------------------+     |    |   |
+ * | | +--------------------------------------------+    |   |
+ * | +---------------------------------------------------+   |
+ * +---------------------------------------------------------+
+ */
+
+static const struct rkisp1_isp_mbus_info rkisp1_isp_formats[] = {
+	{
+		.mbus_code	= MEDIA_BUS_FMT_YUYV8_2X8,
+		.fmt_type	= RKISP1_FMT_YUV,
+		.direction	= RKISP1_DIR_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SRGGB10_1X10,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW10,
+		.bayer_pat	= RKISP1_RAW_RGGB,
+		.bus_width	= 10,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SBGGR10_1X10,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW10,
+		.bayer_pat	= RKISP1_RAW_BGGR,
+		.bus_width	= 10,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SGBRG10_1X10,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW10,
+		.bayer_pat	= RKISP1_RAW_GBRG,
+		.bus_width	= 10,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SGRBG10_1X10,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW10,
+		.bayer_pat	= RKISP1_RAW_GRBG,
+		.bus_width	= 10,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SRGGB12_1X12,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW12,
+		.bayer_pat	= RKISP1_RAW_RGGB,
+		.bus_width	= 12,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SBGGR12_1X12,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW12,
+		.bayer_pat	= RKISP1_RAW_BGGR,
+		.bus_width	= 12,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SGBRG12_1X12,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW12,
+		.bayer_pat	= RKISP1_RAW_GBRG,
+		.bus_width	= 12,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SGRBG12_1X12,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW12,
+		.bayer_pat	= RKISP1_RAW_GRBG,
+		.bus_width	= 12,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SRGGB8_1X8,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW8,
+		.bayer_pat	= RKISP1_RAW_RGGB,
+		.bus_width	= 8,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SBGGR8_1X8,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW8,
+		.bayer_pat	= RKISP1_RAW_BGGR,
+		.bus_width	= 8,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SGBRG8_1X8,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW8,
+		.bayer_pat	= RKISP1_RAW_GBRG,
+		.bus_width	= 8,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_SGRBG8_1X8,
+		.fmt_type	= RKISP1_FMT_BAYER,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_RAW8,
+		.bayer_pat	= RKISP1_RAW_GRBG,
+		.bus_width	= 8,
+		.direction	= RKISP1_DIR_SINK_SRC,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_YUYV8_1X16,
+		.fmt_type	= RKISP1_FMT_YUV,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_YUV422_8b,
+		.yuv_seq	= RKISP1_CIF_ISP_ACQ_PROP_YCBYCR,
+		.bus_width	= 16,
+		.direction	= RKISP1_DIR_SINK,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_YVYU8_1X16,
+		.fmt_type	= RKISP1_FMT_YUV,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_YUV422_8b,
+		.yuv_seq	= RKISP1_CIF_ISP_ACQ_PROP_YCRYCB,
+		.bus_width	= 16,
+		.direction	= RKISP1_DIR_SINK,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_UYVY8_1X16,
+		.fmt_type	= RKISP1_FMT_YUV,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_YUV422_8b,
+		.yuv_seq	= RKISP1_CIF_ISP_ACQ_PROP_CBYCRY,
+		.bus_width	= 16,
+		.direction	= RKISP1_DIR_SINK,
+	}, {
+		.mbus_code	= MEDIA_BUS_FMT_VYUY8_1X16,
+		.fmt_type	= RKISP1_FMT_YUV,
+		.mipi_dt	= RKISP1_CIF_CSI2_DT_YUV422_8b,
+		.yuv_seq	= RKISP1_CIF_ISP_ACQ_PROP_CRYCBY,
+		.bus_width	= 16,
+		.direction	= RKISP1_DIR_SINK,
+	},
+};
+
+/* ----------------------------------------------------------------------------
+ * Helpers
+ */
+
+const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(rkisp1_isp_formats); i++) {
+		const struct rkisp1_isp_mbus_info *fmt = &rkisp1_isp_formats[i];
+
+		if (fmt->mbus_code == mbus_code)
+			return fmt;
+	}
+
+	return NULL;
+}
+
+static struct v4l2_subdev *rkisp1_get_remote_sensor(struct v4l2_subdev *sd)
+{
+	struct media_pad *local, *remote;
+	struct media_entity *sensor_me;
+
+	local = &sd->entity.pads[RKISP1_ISP_PAD_SINK_VIDEO];
+	remote = media_entity_remote_pad(local);
+	if (!remote) {
+		dev_warn(sd->dev, "No link between isp and sensor\n");
+		return NULL;
+	}
+
+	sensor_me = remote->entity;
+	return media_entity_to_v4l2_subdev(sensor_me);
+}
+
+static struct v4l2_mbus_framefmt *
+rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
+		       struct v4l2_subdev_pad_config *cfg,
+		       unsigned int pad, u32 which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(&isp->sd, cfg, pad);
+	else
+		return v4l2_subdev_get_try_format(&isp->sd, isp->pad_cfg, pad);
+}
+
+static struct v4l2_rect *
+rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
+			struct v4l2_subdev_pad_config *cfg,
+			unsigned int pad, u32 which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_crop(&isp->sd, cfg, pad);
+	else
+		return v4l2_subdev_get_try_crop(&isp->sd, isp->pad_cfg, pad);
+}
+
+/* ----------------------------------------------------------------------------
+ * Camera Interface registers configurations
+ */
+
+/*
+ * Image Stabilization.
+ * This should only be called when configuring CIF
+ * or at the frame end interrupt
+ */
+static void rkisp1_config_ism(struct rkisp1_device *rkisp1)
+{
+	struct v4l2_rect *src_crop =
+		rkisp1_isp_get_pad_crop(&rkisp1->isp, NULL,
+					RKISP1_ISP_PAD_SOURCE_VIDEO,
+					V4L2_SUBDEV_FORMAT_ACTIVE);
+	u32 val;
+
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_RECENTER);
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_MAX_DX);
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_MAX_DY);
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_DISPLACE);
+	rkisp1_write(rkisp1, src_crop->left, RKISP1_CIF_ISP_IS_H_OFFS);
+	rkisp1_write(rkisp1, src_crop->top, RKISP1_CIF_ISP_IS_V_OFFS);
+	rkisp1_write(rkisp1, src_crop->width, RKISP1_CIF_ISP_IS_H_SIZE);
+	rkisp1_write(rkisp1, src_crop->height, RKISP1_CIF_ISP_IS_V_SIZE);
+
+	/* IS(Image Stabilization) is always on, working as output crop */
+	rkisp1_write(rkisp1, 1, RKISP1_CIF_ISP_IS_CTRL);
+	val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
+	val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD;
+	rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
+}
+
+/*
+ * configure ISP blocks with input format, size......
+ */
+static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
+{
+	u32 isp_ctrl = 0, irq_mask = 0, acq_mult = 0, signal = 0;
+	const struct rkisp1_isp_mbus_info *src_fmt, *sink_fmt;
+	struct rkisp1_sensor_async *sensor;
+	struct v4l2_mbus_framefmt *sink_frm;
+	struct v4l2_rect *sink_crop;
+
+	sensor = rkisp1->active_sensor;
+	sink_fmt = rkisp1->isp.sink_fmt;
+	src_fmt = rkisp1->isp.src_fmt;
+	sink_frm = rkisp1_isp_get_pad_fmt(&rkisp1->isp, NULL,
+					  RKISP1_ISP_PAD_SINK_VIDEO,
+					  V4L2_SUBDEV_FORMAT_ACTIVE);
+	sink_crop = rkisp1_isp_get_pad_crop(&rkisp1->isp, NULL,
+					    RKISP1_ISP_PAD_SINK_VIDEO,
+					    V4L2_SUBDEV_FORMAT_ACTIVE);
+
+	if (sink_fmt->fmt_type == RKISP1_FMT_BAYER) {
+		acq_mult = 1;
+		if (src_fmt->fmt_type == RKISP1_FMT_BAYER) {
+			if (sensor->mbus.type == V4L2_MBUS_BT656)
+				isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT_ITU656;
+			else
+				isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT;
+		} else {
+			rkisp1_write(rkisp1, RKISP1_CIF_ISP_DEMOSAIC_TH(0xc),
+				     RKISP1_CIF_ISP_DEMOSAIC);
+
+			if (sensor->mbus.type == V4L2_MBUS_BT656)
+				isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU656;
+			else
+				isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU601;
+		}
+	} else if (sink_fmt->fmt_type == RKISP1_FMT_YUV) {
+		acq_mult = 2;
+		if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+			isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
+		} else {
+			if (sensor->mbus.type == V4L2_MBUS_BT656)
+				isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656;
+			else
+				isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
+		}
+
+		irq_mask |= RKISP1_CIF_ISP_DATA_LOSS;
+	}
+
+	/* Set up input acquisition properties */
+	if (sensor->mbus.type == V4L2_MBUS_BT656 ||
+	    sensor->mbus.type == V4L2_MBUS_PARALLEL) {
+		if (sensor->mbus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+			signal = RKISP1_CIF_ISP_ACQ_PROP_POS_EDGE;
+	}
+
+	if (sensor->mbus.type == V4L2_MBUS_PARALLEL) {
+		if (sensor->mbus.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+			signal |= RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW;
+
+		if (sensor->mbus.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+			signal |= RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW;
+	}
+
+	rkisp1_write(rkisp1, isp_ctrl, RKISP1_CIF_ISP_CTRL);
+	rkisp1_write(rkisp1, signal | sink_fmt->yuv_seq |
+		     RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT(sink_fmt->bayer_pat) |
+		     RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ALL,
+		     RKISP1_CIF_ISP_ACQ_PROP);
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_NR_FRAMES);
+
+	/* Acquisition Size */
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_H_OFFS);
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_V_OFFS);
+	rkisp1_write(rkisp1,
+		     acq_mult * sink_frm->width, RKISP1_CIF_ISP_ACQ_H_SIZE);
+	rkisp1_write(rkisp1, sink_frm->height, RKISP1_CIF_ISP_ACQ_V_SIZE);
+
+	/* ISP Out Area */
+	rkisp1_write(rkisp1, sink_crop->left, RKISP1_CIF_ISP_OUT_H_OFFS);
+	rkisp1_write(rkisp1, sink_crop->top, RKISP1_CIF_ISP_OUT_V_OFFS);
+	rkisp1_write(rkisp1, sink_crop->width, RKISP1_CIF_ISP_OUT_H_SIZE);
+	rkisp1_write(rkisp1, sink_crop->height, RKISP1_CIF_ISP_OUT_V_SIZE);
+
+	irq_mask |= RKISP1_CIF_ISP_FRAME | RKISP1_CIF_ISP_V_START |
+		    RKISP1_CIF_ISP_PIC_SIZE_ERROR | RKISP1_CIF_ISP_FRAME_IN;
+	rkisp1_write(rkisp1, irq_mask, RKISP1_CIF_ISP_IMSC);
+
+	if (src_fmt->fmt_type == RKISP1_FMT_BAYER) {
+		rkisp1_params_disable(&rkisp1->params);
+	} else {
+		struct v4l2_mbus_framefmt *src_frm;
+
+		src_frm = rkisp1_isp_get_pad_fmt(&rkisp1->isp, NULL,
+						 RKISP1_ISP_PAD_SINK_VIDEO,
+						 V4L2_SUBDEV_FORMAT_ACTIVE);
+		rkisp1_params_configure(&rkisp1->params, sink_fmt->bayer_pat,
+					src_frm->quantization);
+	}
+
+	return 0;
+}
+
+static int rkisp1_config_dvp(struct rkisp1_device *rkisp1)
+{
+	const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt;
+	u32 val, input_sel;
+
+	switch (sink_fmt->bus_width) {
+	case 8:
+		input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_ZERO;
+		break;
+	case 10:
+		input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO;
+		break;
+	case 12:
+		input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_12B;
+		break;
+	default:
+		dev_err(rkisp1->dev, "Invalid bus width\n");
+		return -EINVAL;
+	}
+
+	val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_ACQ_PROP);
+	rkisp1_write(rkisp1, val | input_sel, RKISP1_CIF_ISP_ACQ_PROP);
+
+	return 0;
+}
+
+static int rkisp1_config_mipi(struct rkisp1_device *rkisp1)
+{
+	const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt;
+	unsigned int lanes;
+	u32 mipi_ctrl;
+
+	/*
+	 * rkisp1->active_sensor->mbus is set in isp or d-phy notifier_bound
+	 * function
+	 */
+	switch (rkisp1->active_sensor->mbus.flags & V4L2_MBUS_CSI2_LANES) {
+	case V4L2_MBUS_CSI2_4_LANE:
+		lanes = 4;
+		break;
+	case V4L2_MBUS_CSI2_3_LANE:
+		lanes = 3;
+		break;
+	case V4L2_MBUS_CSI2_2_LANE:
+		lanes = 2;
+		break;
+	case V4L2_MBUS_CSI2_1_LANE:
+		lanes = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mipi_ctrl = RKISP1_CIF_MIPI_CTRL_NUM_LANES(lanes - 1) |
+		    RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(0xf) |
+		    RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP |
+		    RKISP1_CIF_MIPI_CTRL_CLOCKLANE_ENA;
+
+	rkisp1_write(rkisp1, mipi_ctrl, RKISP1_CIF_MIPI_CTRL);
+
+	/* Configure Data Type and Virtual Channel */
+	rkisp1_write(rkisp1,
+		     RKISP1_CIF_MIPI_DATA_SEL_DT(sink_fmt->mipi_dt) |
+		     RKISP1_CIF_MIPI_DATA_SEL_VC(0),
+		     RKISP1_CIF_MIPI_IMG_DATA_SEL);
+
+	/* Clear MIPI interrupts */
+	rkisp1_write(rkisp1, ~0, RKISP1_CIF_MIPI_ICR);
+	/*
+	 * Disable RKISP1_CIF_MIPI_ERR_DPHY interrupt here temporary for
+	 * isp bus may be dead when switch isp.
+	 */
+	rkisp1_write(rkisp1,
+		     RKISP1_CIF_MIPI_FRAME_END | RKISP1_CIF_MIPI_ERR_CSI |
+		     RKISP1_CIF_MIPI_ERR_DPHY |
+		     RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(0x03) |
+		     RKISP1_CIF_MIPI_ADD_DATA_OVFLW,
+		     RKISP1_CIF_MIPI_IMSC);
+
+	dev_dbg(rkisp1->dev, "\n  MIPI_CTRL 0x%08x\n"
+		"  MIPI_IMG_DATA_SEL 0x%08x\n"
+		"  MIPI_STATUS 0x%08x\n"
+		"  MIPI_IMSC 0x%08x\n",
+		rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL),
+		rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL),
+		rkisp1_read(rkisp1, RKISP1_CIF_MIPI_STATUS),
+		rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC));
+
+	return 0;
+}
+
+/* Configure MUX */
+static int rkisp1_config_path(struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_sensor_async *sensor = rkisp1->active_sensor;
+	u32 dpcl = rkisp1_read(rkisp1, RKISP1_CIF_VI_DPCL);
+	int ret = 0;
+
+	if (sensor->mbus.type == V4L2_MBUS_BT656 ||
+	    sensor->mbus.type == V4L2_MBUS_PARALLEL) {
+		ret = rkisp1_config_dvp(rkisp1);
+		dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_PARALLEL;
+	} else if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+		ret = rkisp1_config_mipi(rkisp1);
+		dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_MIPI;
+	}
+
+	rkisp1_write(rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
+
+	return ret;
+}
+
+/* Hardware configure Entry */
+static int rkisp1_config_cif(struct rkisp1_device *rkisp1)
+{
+	u32 cif_id;
+	int ret;
+
+	cif_id = rkisp1_read(rkisp1, RKISP1_CIF_VI_ID);
+	dev_dbg(rkisp1->dev, "CIF_ID 0x%08x\n", cif_id);
+
+	ret = rkisp1_config_isp(rkisp1);
+	if (ret)
+		return ret;
+	ret = rkisp1_config_path(rkisp1);
+	if (ret)
+		return ret;
+	rkisp1_config_ism(rkisp1);
+
+	return 0;
+}
+
+static int rkisp1_isp_stop(struct rkisp1_device *rkisp1)
+{
+	u32 val;
+
+	/*
+	 * ISP(mi) stop in mi frame end -> Stop ISP(mipi) ->
+	 * Stop ISP(isp) ->wait for ISP isp off
+	 */
+	/* stop and clear MI, MIPI, and ISP interrupts */
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_MIPI_IMSC);
+	rkisp1_write(rkisp1, ~0, RKISP1_CIF_MIPI_ICR);
+
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IMSC);
+	rkisp1_write(rkisp1, ~0, RKISP1_CIF_ISP_ICR);
+
+	rkisp1_write(rkisp1, 0, RKISP1_CIF_MI_IMSC);
+	rkisp1_write(rkisp1, ~0, RKISP1_CIF_MI_ICR);
+	val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
+	rkisp1_write(rkisp1, val & (~RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA),
+		     RKISP1_CIF_MIPI_CTRL);
+	/* stop ISP */
+	val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
+	val &= ~(RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE |
+		 RKISP1_CIF_ISP_CTRL_ISP_ENABLE);
+	rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
+
+	val = rkisp1_read(rkisp1,	RKISP1_CIF_ISP_CTRL);
+	rkisp1_write(rkisp1, val | RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD,
+		     RKISP1_CIF_ISP_CTRL);
+
+	readx_poll_timeout(readl, rkisp1->base_addr + RKISP1_CIF_ISP_RIS,
+			   val, val & RKISP1_CIF_ISP_OFF, 20, 100);
+	rkisp1_write(rkisp1,
+		     RKISP1_CIF_IRCL_MIPI_SW_RST | RKISP1_CIF_IRCL_ISP_SW_RST,
+		     RKISP1_CIF_IRCL);
+	rkisp1_write(rkisp1, 0x0, RKISP1_CIF_IRCL);
+
+	return 0;
+}
+
+static void rkisp1_config_clk(struct rkisp1_device *rkisp1)
+{
+	u32 val = RKISP1_CIF_ICCL_ISP_CLK | RKISP1_CIF_ICCL_CP_CLK |
+		  RKISP1_CIF_ICCL_MRSZ_CLK | RKISP1_CIF_ICCL_SRSZ_CLK |
+		  RKISP1_CIF_ICCL_JPEG_CLK | RKISP1_CIF_ICCL_MI_CLK |
+		  RKISP1_CIF_ICCL_IE_CLK | RKISP1_CIF_ICCL_MIPI_CLK |
+		  RKISP1_CIF_ICCL_DCROP_CLK;
+
+	rkisp1_write(rkisp1, val, RKISP1_CIF_ICCL);
+}
+
+static int rkisp1_isp_start(struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_sensor_async *sensor = rkisp1->active_sensor;
+	u32 val;
+
+	rkisp1_config_clk(rkisp1);
+
+	/* Activate MIPI */
+	if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+		val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
+		rkisp1_write(rkisp1, val | RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA,
+			     RKISP1_CIF_MIPI_CTRL);
+	}
+	/* Activate ISP */
+	val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
+	val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD |
+	       RKISP1_CIF_ISP_CTRL_ISP_ENABLE |
+	       RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE;
+	rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
+
+	/*
+	 * CIF spec says to wait for sufficient time after enabling
+	 * the MIPI interface and before starting the sensor output.
+	 */
+	usleep_range(1000, 1200);
+
+	return 0;
+}
+
+/* ----------------------------------------------------------------------------
+ * Subdev pad operations
+ */
+
+static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
+				     struct v4l2_subdev_pad_config *cfg,
+				     struct v4l2_subdev_mbus_code_enum *code)
+{
+	unsigned int i, dir;
+	int pos = 0;
+
+	if (code->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
+		dir = RKISP1_DIR_SINK;
+	} else if (code->pad == RKISP1_ISP_PAD_SOURCE_VIDEO) {
+		dir = RKISP1_DIR_SRC;
+	} else {
+		if (code->index > 0)
+			return -EINVAL;
+		code->code = MEDIA_BUS_FMT_FIXED;
+		return 0;
+	}
+
+	if (code->index >= ARRAY_SIZE(rkisp1_isp_formats))
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(rkisp1_isp_formats); i++) {
+		const struct rkisp1_isp_mbus_info *fmt = &rkisp1_isp_formats[i];
+
+		if (fmt->direction & dir)
+			pos++;
+
+		if (code->index == pos - 1) {
+			code->code = fmt->mbus_code;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_pad_config *cfg)
+{
+	struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+	struct v4l2_rect *sink_crop, *src_crop;
+
+	sink_fmt = v4l2_subdev_get_try_format(sd, cfg,
+					      RKISP1_ISP_PAD_SINK_VIDEO);
+	sink_fmt->width = RKISP1_DEFAULT_WIDTH;
+	sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
+	sink_fmt->field = V4L2_FIELD_NONE;
+	sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
+
+	sink_crop = v4l2_subdev_get_try_crop(sd, cfg,
+					     RKISP1_ISP_PAD_SINK_VIDEO);
+	sink_crop->width = RKISP1_DEFAULT_WIDTH;
+	sink_crop->height = RKISP1_DEFAULT_HEIGHT;
+	sink_crop->left = 0;
+	sink_crop->top = 0;
+
+	src_fmt = v4l2_subdev_get_try_format(sd, cfg,
+					     RKISP1_ISP_PAD_SOURCE_VIDEO);
+	*src_fmt = *sink_fmt;
+	src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
+	src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+	src_crop = v4l2_subdev_get_try_crop(sd, cfg,
+					    RKISP1_ISP_PAD_SOURCE_VIDEO);
+	*src_crop = *sink_crop;
+
+	sink_fmt = v4l2_subdev_get_try_format(sd, cfg,
+					      RKISP1_ISP_PAD_SINK_PARAMS);
+	src_fmt = v4l2_subdev_get_try_format(sd, cfg,
+					     RKISP1_ISP_PAD_SOURCE_STATS);
+	sink_fmt->width = RKISP1_DEFAULT_WIDTH;
+	sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
+	sink_fmt->field = V4L2_FIELD_NONE;
+	sink_fmt->code = MEDIA_BUS_FMT_FIXED;
+	*src_fmt = *sink_fmt;
+
+	return 0;
+}
+
+static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
+				   struct v4l2_subdev_pad_config *cfg,
+				   struct v4l2_mbus_framefmt *format,
+				   unsigned int which)
+{
+	const struct rkisp1_isp_mbus_info *mbus_info;
+	struct v4l2_mbus_framefmt *src_fmt;
+	const struct v4l2_rect *src_crop;
+
+	src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg,
+					 RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+	src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+					   RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+
+	src_fmt->code = format->code;
+	mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
+	if (!mbus_info) {
+		src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
+		mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
+	}
+	if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+		isp->src_fmt = mbus_info;
+	src_fmt->width  = src_crop->width;
+	src_fmt->height = src_crop->height;
+	src_fmt->quantization = format->quantization;
+	/* full range by default */
+	if (!src_fmt->quantization)
+		src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+	*format = *src_fmt;
+}
+
+static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_rect *r, unsigned int which)
+{
+	struct v4l2_mbus_framefmt *src_fmt;
+	const struct v4l2_rect *sink_crop;
+	struct v4l2_rect *src_crop;
+
+	src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+					   RKISP1_ISP_PAD_SOURCE_VIDEO,
+					   which);
+	sink_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+					    RKISP1_ISP_PAD_SINK_VIDEO,
+					    which);
+
+	src_crop->left = ALIGN(r->left, 2);
+	src_crop->width = ALIGN(r->width, 2);
+	src_crop->top = r->top;
+	src_crop->height = r->height;
+	rkisp1_sd_adjust_crop_rect(src_crop, sink_crop);
+
+	*r = *src_crop;
+
+	/* Propagate to out format */
+	src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg,
+					 RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+	rkisp1_isp_set_src_fmt(isp, cfg, src_fmt, which);
+}
+
+static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
+				     struct v4l2_subdev_pad_config *cfg,
+				     struct v4l2_rect *r, unsigned int which)
+{
+	struct v4l2_rect *sink_crop, *src_crop;
+	struct v4l2_mbus_framefmt *sink_fmt;
+
+	sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+					    which);
+	sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+					  which);
+
+	sink_crop->left = ALIGN(r->left, 2);
+	sink_crop->width = ALIGN(r->width, 2);
+	sink_crop->top = r->top;
+	sink_crop->height = r->height;
+	rkisp1_sd_adjust_crop(sink_crop, sink_fmt);
+
+	*r = *sink_crop;
+
+	/* Propagate to out crop */
+	src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+					   RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+	rkisp1_isp_set_src_crop(isp, cfg, src_crop, which);
+}
+
+static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_mbus_framefmt *format,
+				    unsigned int which)
+{
+	const struct rkisp1_isp_mbus_info *mbus_info;
+	struct v4l2_mbus_framefmt *sink_fmt;
+	struct v4l2_rect *sink_crop;
+
+	sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+					  which);
+	sink_fmt->code = format->code;
+	mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+	if (!mbus_info) {
+		sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
+		mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+	}
+	if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+		isp->sink_fmt = mbus_info;
+
+	sink_fmt->width = clamp_t(u32, format->width,
+				  RKISP1_ISP_MIN_WIDTH,
+				  RKISP1_ISP_MAX_WIDTH);
+	sink_fmt->height = clamp_t(u32, format->height,
+				   RKISP1_ISP_MIN_HEIGHT,
+				   RKISP1_ISP_MAX_HEIGHT);
+
+	*format = *sink_fmt;
+
+	/* Propagate to in crop */
+	sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+					    which);
+	rkisp1_isp_set_sink_crop(isp, cfg, sink_crop, which);
+}
+
+static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_pad_config *cfg,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+
+	fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, fmt->which);
+	return 0;
+}
+
+static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_pad_config *cfg,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+
+	if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
+		rkisp1_isp_set_sink_fmt(isp, cfg, &fmt->format, fmt->which);
+	else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
+		rkisp1_isp_set_src_fmt(isp, cfg, &fmt->format, fmt->which);
+	else
+		fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad,
+						      fmt->which);
+
+	return 0;
+}
+
+static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_subdev_selection *sel)
+{
+	struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+
+	if (sel->pad != RKISP1_ISP_PAD_SOURCE_VIDEO &&
+	    sel->pad != RKISP1_ISP_PAD_SINK_VIDEO)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
+			struct v4l2_mbus_framefmt *fmt;
+
+			fmt = rkisp1_isp_get_pad_fmt(isp, cfg, sel->pad,
+						     sel->which);
+			sel->r.height = fmt->height;
+			sel->r.width = fmt->width;
+			sel->r.left = 0;
+			sel->r.top = 0;
+		} else {
+			sel->r = *rkisp1_isp_get_pad_crop(isp, cfg,
+						RKISP1_ISP_PAD_SINK_VIDEO,
+						sel->which);
+		}
+		break;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = *rkisp1_isp_get_pad_crop(isp, cfg, sel->pad,
+						  sel->which);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_subdev_selection *sel)
+{
+	struct rkisp1_device *rkisp1 =
+		container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev);
+	struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+
+	if (sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
+
+	dev_dbg(rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+		sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+	if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
+		rkisp1_isp_set_sink_crop(isp, cfg, &sel->r, sel->which);
+	else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
+		rkisp1_isp_set_src_crop(isp, cfg, &sel->r, sel->which);
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int rkisp1_subdev_link_validate(struct media_link *link)
+{
+	if (link->sink->index == RKISP1_ISP_PAD_SINK_PARAMS)
+		return 0;
+
+	return v4l2_subdev_link_validate(link);
+}
+
+static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = {
+	.enum_mbus_code = rkisp1_isp_enum_mbus_code,
+	.get_selection = rkisp1_isp_get_selection,
+	.set_selection = rkisp1_isp_set_selection,
+	.init_cfg = rkisp1_isp_init_config,
+	.get_fmt = rkisp1_isp_get_fmt,
+	.set_fmt = rkisp1_isp_set_fmt,
+	.link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* ----------------------------------------------------------------------------
+ * Stream operations
+ */
+
+static int rkisp1_mipi_csi2_start(struct rkisp1_isp *isp,
+				  struct rkisp1_sensor_async *sensor)
+{
+	union phy_configure_opts opts;
+	struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
+	s64 pixel_clock;
+
+	if (!sensor->pixel_rate_ctrl) {
+		dev_warn(sensor->sd->dev, "No pixel rate control in subdev\n");
+		return -EPIPE;
+	}
+
+	pixel_clock = v4l2_ctrl_g_ctrl_int64(sensor->pixel_rate_ctrl);
+	if (!pixel_clock) {
+		dev_err(sensor->sd->dev, "Invalid pixel rate value\n");
+		return -EINVAL;
+	}
+
+	phy_mipi_dphy_get_default_config(pixel_clock, isp->sink_fmt->bus_width,
+					 sensor->lanes, cfg);
+	phy_set_mode(sensor->dphy, PHY_MODE_MIPI_DPHY);
+	phy_configure(sensor->dphy, &opts);
+	phy_power_on(sensor->dphy);
+
+	return 0;
+}
+
+static void rkisp1_mipi_csi2_stop(struct rkisp1_sensor_async *sensor)
+{
+	phy_power_off(sensor->dphy);
+}
+
+static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct rkisp1_device *rkisp1 =
+		container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev);
+	struct v4l2_subdev *sensor_sd;
+	int ret = 0;
+
+	if (!enable) {
+		ret = rkisp1_isp_stop(rkisp1);
+		if (ret)
+			return ret;
+		rkisp1_mipi_csi2_stop(rkisp1->active_sensor);
+		return 0;
+	}
+
+	sensor_sd = rkisp1_get_remote_sensor(sd);
+	if (!sensor_sd)
+		return -ENODEV;
+	rkisp1->active_sensor = container_of(sensor_sd->asd,
+					     struct rkisp1_sensor_async, asd);
+
+	atomic_set(&rkisp1->isp.frame_sequence, -1);
+	ret = rkisp1_config_cif(rkisp1);
+	if (ret)
+		return ret;
+
+	if (rkisp1->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY)
+		return -EINVAL;
+
+	ret = rkisp1_mipi_csi2_start(&rkisp1->isp, rkisp1->active_sensor);
+	if (ret)
+		return ret;
+
+	ret = rkisp1_isp_start(rkisp1);
+	if (ret)
+		rkisp1_mipi_csi2_stop(rkisp1->active_sensor);
+
+	return ret;
+}
+
+static int rkisp1_isp_subs_evt(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+			       struct v4l2_event_subscription *sub)
+{
+	if (sub->type != V4L2_EVENT_FRAME_SYNC)
+		return -EINVAL;
+
+	/* V4L2_EVENT_FRAME_SYNC doesn't require an id, so zero should be set */
+	if (sub->id != 0)
+		return -EINVAL;
+
+	return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
+static const struct media_entity_operations rkisp1_isp_media_ops = {
+	.link_validate = rkisp1_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops rkisp1_isp_video_ops = {
+	.s_stream = rkisp1_isp_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops rkisp1_isp_core_ops = {
+	.subscribe_event = rkisp1_isp_subs_evt,
+	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops rkisp1_isp_ops = {
+	.core = &rkisp1_isp_core_ops,
+	.video = &rkisp1_isp_video_ops,
+	.pad = &rkisp1_isp_pad_ops,
+};
+
+int rkisp1_isp_register(struct rkisp1_device *rkisp1,
+			struct v4l2_device *v4l2_dev)
+{
+	struct rkisp1_isp *isp = &rkisp1->isp;
+	struct media_pad *pads = isp->pads;
+	struct v4l2_subdev *sd = &isp->sd;
+	int ret;
+
+	v4l2_subdev_init(sd, &rkisp1_isp_ops);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	sd->entity.ops = &rkisp1_isp_media_ops;
+	sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+	sd->owner = THIS_MODULE;
+	strscpy(sd->name, RKISP1_ISP_DEV_NAME, sizeof(sd->name));
+
+	pads[RKISP1_ISP_PAD_SINK_VIDEO].flags = MEDIA_PAD_FL_SINK |
+						MEDIA_PAD_FL_MUST_CONNECT;
+	pads[RKISP1_ISP_PAD_SINK_PARAMS].flags = MEDIA_PAD_FL_SINK;
+	pads[RKISP1_ISP_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
+	pads[RKISP1_ISP_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
+
+	isp->sink_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SINK_PAD_FMT);
+	isp->src_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SRC_PAD_FMT);
+
+	ret = media_entity_pads_init(&sd->entity, RKISP1_ISP_PAD_MAX, pads);
+	if (ret)
+		return ret;
+
+	ret = v4l2_device_register_subdev(v4l2_dev, sd);
+	if (ret) {
+		dev_err(sd->dev, "Failed to register isp subdev\n");
+		goto err_cleanup_media_entity;
+	}
+
+	rkisp1_isp_init_config(sd, rkisp1->isp.pad_cfg);
+	return 0;
+
+err_cleanup_media_entity:
+	media_entity_cleanup(&sd->entity);
+
+	return ret;
+}
+
+void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
+{
+	struct v4l2_subdev *sd = &rkisp1->isp.sd;
+
+	v4l2_device_unregister_subdev(sd);
+	media_entity_cleanup(&sd->entity);
+}
+
+/* ----------------------------------------------------------------------------
+ * Interrupt handlers
+ */
+
+void rkisp1_mipi_isr(struct rkisp1_device *rkisp1)
+{
+	u32 val, status;
+
+	status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
+	if (!status)
+		return;
+
+	rkisp1_write(rkisp1, status, RKISP1_CIF_MIPI_ICR);
+
+	/*
+	 * Disable DPHY errctrl interrupt, because this dphy
+	 * erctrl signal is asserted until the next changes
+	 * of line state. This time is may be too long and cpu
+	 * is hold in this interrupt.
+	 */
+	if (status & RKISP1_CIF_MIPI_ERR_CTRL(0x0f)) {
+		val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
+		rkisp1_write(rkisp1, val & ~RKISP1_CIF_MIPI_ERR_CTRL(0x0f),
+			     RKISP1_CIF_MIPI_IMSC);
+		rkisp1->isp.is_dphy_errctrl_disabled = true;
+	}
+
+	/*
+	 * Enable DPHY errctrl interrupt again, if mipi have receive
+	 * the whole frame without any error.
+	 */
+	if (status == RKISP1_CIF_MIPI_FRAME_END) {
+		/*
+		 * Enable DPHY errctrl interrupt again, if mipi have receive
+		 * the whole frame without any error.
+		 */
+		if (rkisp1->isp.is_dphy_errctrl_disabled) {
+			val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
+			val |= RKISP1_CIF_MIPI_ERR_CTRL(0x0f);
+			rkisp1_write(rkisp1, val, RKISP1_CIF_MIPI_IMSC);
+			rkisp1->isp.is_dphy_errctrl_disabled = false;
+		}
+	} else {
+		rkisp1->debug.mipi_error++;
+	}
+}
+
+static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
+{
+	struct v4l2_event event = {
+		.type = V4L2_EVENT_FRAME_SYNC,
+	};
+
+	/*
+	 * Increment the frame sequence on the vsync signal.
+	 * This will allow applications to detect dropped.
+	 * Note that there is a debugfs counter for dropped
+	 * frames, but using this event is more accurate.
+	 */
+	event.u.frame_sync.frame_sequence =
+		atomic_inc_return(&isp->frame_sequence);
+	v4l2_event_queue(isp->sd.devnode, &event);
+}
+
+void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
+{
+	u32 status, isp_err;
+
+	status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
+	if (!status)
+		return;
+
+	rkisp1_write(rkisp1, status, RKISP1_CIF_ISP_ICR);
+
+	/* Vertical sync signal, starting generating new frame */
+	if (status & RKISP1_CIF_ISP_V_START)
+		rkisp1_isp_queue_event_sof(&rkisp1->isp);
+
+	if (status & RKISP1_CIF_ISP_PIC_SIZE_ERROR) {
+		/* Clear pic_size_error */
+		isp_err = rkisp1_read(rkisp1, RKISP1_CIF_ISP_ERR);
+		rkisp1_write(rkisp1, isp_err, RKISP1_CIF_ISP_ERR_CLR);
+		rkisp1->debug.pic_size_error++;
+	} else if (status & RKISP1_CIF_ISP_DATA_LOSS) {
+		/* keep track of data_loss in debugfs */
+		rkisp1->debug.data_loss++;
+	}
+
+	if (status & RKISP1_CIF_ISP_FRAME) {
+		u32 isp_ris;
+
+		/* New frame from the sensor received */
+		isp_ris = rkisp1_read(rkisp1, RKISP1_CIF_ISP_RIS);
+		if (isp_ris & (RKISP1_CIF_ISP_AWB_DONE |
+			       RKISP1_CIF_ISP_AFM_FIN |
+			       RKISP1_CIF_ISP_EXP_END |
+			       RKISP1_CIF_ISP_HIST_MEASURE_RDY))
+			rkisp1_stats_isr(&rkisp1->stats, isp_ris);
+	}
+
+	/*
+	 * Then update changed configs. Some of them involve
+	 * lot of register writes. Do those only one per frame.
+	 * Do the updates in the order of the processing flow.
+	 */
+	rkisp1_params_isr(rkisp1, status);
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-params.c b/drivers/staging/media/rkisp1/rkisp1-params.c
new file mode 100644
index 0000000..781f0ca
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-params.c
@@ -0,0 +1,1630 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Params subdevice
+ *
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>	/* for ISP params */
+
+#include "rkisp1-common.h"
+
+#define RKISP1_PARAMS_DEV_NAME	RKISP1_DRIVER_NAME "_params"
+
+#define RKISP1_ISP_PARAMS_REQ_BUFS_MIN	2
+#define RKISP1_ISP_PARAMS_REQ_BUFS_MAX	8
+
+#define RKISP1_ISP_DPCC_LINE_THRESH(n) \
+			(RKISP1_CIF_ISP_DPCC_LINE_THRESH_1 + 0x14 * (n))
+#define RKISP1_ISP_DPCC_LINE_MAD_FAC(n) \
+			(RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1 + 0x14 * (n))
+#define RKISP1_ISP_DPCC_PG_FAC(n) \
+			(RKISP1_CIF_ISP_DPCC_PG_FAC_1 + 0x14 * (n))
+#define RKISP1_ISP_DPCC_RND_THRESH(n) \
+			(RKISP1_CIF_ISP_DPCC_RND_THRESH_1 + 0x14 * (n))
+#define RKISP1_ISP_DPCC_RG_FAC(n) \
+			(RKISP1_CIF_ISP_DPCC_RG_FAC_1 + 0x14 * (n))
+#define RKISP1_ISP_CC_COEFF(n) \
+			(RKISP1_CIF_ISP_CC_COEFF_0 + (n) * 4)
+
+static inline void
+rkisp1_param_set_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
+{
+	u32 val;
+
+	val = rkisp1_read(params->rkisp1, reg);
+	rkisp1_write(params->rkisp1, val | bit_mask, reg);
+}
+
+static inline void
+rkisp1_param_clear_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
+{
+	u32 val;
+
+	val = rkisp1_read(params->rkisp1, reg);
+	rkisp1_write(params->rkisp1, val & ~bit_mask, reg);
+}
+
+/* ISP BP interface function */
+static void rkisp1_dpcc_config(struct rkisp1_params *params,
+			       const struct rkisp1_cif_isp_dpcc_config *arg)
+{
+	unsigned int i;
+	u32 mode;
+
+	/* avoid to override the old enable value */
+	mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE);
+	mode &= RKISP1_CIF_ISP_DPCC_ENA;
+	mode |= arg->mode & ~RKISP1_CIF_ISP_DPCC_ENA;
+	rkisp1_write(params->rkisp1, mode, RKISP1_CIF_ISP_DPCC_MODE);
+	rkisp1_write(params->rkisp1, arg->output_mode,
+		     RKISP1_CIF_ISP_DPCC_OUTPUT_MODE);
+	rkisp1_write(params->rkisp1, arg->set_use,
+		     RKISP1_CIF_ISP_DPCC_SET_USE);
+
+	rkisp1_write(params->rkisp1, arg->methods[0].method,
+		     RKISP1_CIF_ISP_DPCC_METHODS_SET_1);
+	rkisp1_write(params->rkisp1, arg->methods[1].method,
+		     RKISP1_CIF_ISP_DPCC_METHODS_SET_2);
+	rkisp1_write(params->rkisp1, arg->methods[2].method,
+		     RKISP1_CIF_ISP_DPCC_METHODS_SET_3);
+	for (i = 0; i < RKISP1_CIF_ISP_DPCC_METHODS_MAX; i++) {
+		rkisp1_write(params->rkisp1, arg->methods[i].line_thresh,
+			     RKISP1_ISP_DPCC_LINE_THRESH(i));
+		rkisp1_write(params->rkisp1, arg->methods[i].line_mad_fac,
+			     RKISP1_ISP_DPCC_LINE_MAD_FAC(i));
+		rkisp1_write(params->rkisp1, arg->methods[i].pg_fac,
+			     RKISP1_ISP_DPCC_PG_FAC(i));
+		rkisp1_write(params->rkisp1, arg->methods[i].rnd_thresh,
+			     RKISP1_ISP_DPCC_RND_THRESH(i));
+		rkisp1_write(params->rkisp1, arg->methods[i].rg_fac,
+			     RKISP1_ISP_DPCC_RG_FAC(i));
+	}
+
+	rkisp1_write(params->rkisp1, arg->rnd_offs,
+		     RKISP1_CIF_ISP_DPCC_RND_OFFS);
+	rkisp1_write(params->rkisp1, arg->ro_limits,
+		     RKISP1_CIF_ISP_DPCC_RO_LIMITS);
+}
+
+/* ISP black level subtraction interface function */
+static void rkisp1_bls_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_bls_config *arg)
+{
+	/* avoid to override the old enable value */
+	u32 new_control;
+
+	new_control = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_BLS_CTRL);
+	new_control &= RKISP1_CIF_ISP_BLS_ENA;
+	/* fixed subtraction values */
+	if (!arg->enable_auto) {
+		const struct rkisp1_cif_isp_bls_fixed_val *pval =
+								&arg->fixed_val;
+
+		switch (params->raw_type) {
+		case RKISP1_RAW_BGGR:
+			rkisp1_write(params->rkisp1,
+				     pval->r, RKISP1_CIF_ISP_BLS_D_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->gr, RKISP1_CIF_ISP_BLS_C_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->gb, RKISP1_CIF_ISP_BLS_B_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->b, RKISP1_CIF_ISP_BLS_A_FIXED);
+			break;
+		case RKISP1_RAW_GBRG:
+			rkisp1_write(params->rkisp1,
+				     pval->r, RKISP1_CIF_ISP_BLS_C_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->gr, RKISP1_CIF_ISP_BLS_D_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->gb, RKISP1_CIF_ISP_BLS_A_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->b, RKISP1_CIF_ISP_BLS_B_FIXED);
+			break;
+		case RKISP1_RAW_GRBG:
+			rkisp1_write(params->rkisp1,
+				     pval->r, RKISP1_CIF_ISP_BLS_B_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->gr, RKISP1_CIF_ISP_BLS_A_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->gb, RKISP1_CIF_ISP_BLS_D_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->b, RKISP1_CIF_ISP_BLS_C_FIXED);
+			break;
+		case RKISP1_RAW_RGGB:
+			rkisp1_write(params->rkisp1,
+				     pval->r, RKISP1_CIF_ISP_BLS_A_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->gr, RKISP1_CIF_ISP_BLS_B_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->gb, RKISP1_CIF_ISP_BLS_C_FIXED);
+			rkisp1_write(params->rkisp1,
+				     pval->b, RKISP1_CIF_ISP_BLS_D_FIXED);
+			break;
+		default:
+			break;
+		}
+
+	} else {
+		if (arg->en_windows & BIT(1)) {
+			rkisp1_write(params->rkisp1, arg->bls_window2.h_offs,
+				     RKISP1_CIF_ISP_BLS_H2_START);
+			rkisp1_write(params->rkisp1, arg->bls_window2.h_size,
+				     RKISP1_CIF_ISP_BLS_H2_STOP);
+			rkisp1_write(params->rkisp1, arg->bls_window2.v_offs,
+				     RKISP1_CIF_ISP_BLS_V2_START);
+			rkisp1_write(params->rkisp1, arg->bls_window2.v_size,
+				     RKISP1_CIF_ISP_BLS_V2_STOP);
+			new_control |= RKISP1_CIF_ISP_BLS_WINDOW_2;
+		}
+
+		if (arg->en_windows & BIT(0)) {
+			rkisp1_write(params->rkisp1, arg->bls_window1.h_offs,
+				     RKISP1_CIF_ISP_BLS_H1_START);
+			rkisp1_write(params->rkisp1, arg->bls_window1.h_size,
+				     RKISP1_CIF_ISP_BLS_H1_STOP);
+			rkisp1_write(params->rkisp1, arg->bls_window1.v_offs,
+				     RKISP1_CIF_ISP_BLS_V1_START);
+			rkisp1_write(params->rkisp1, arg->bls_window1.v_size,
+				     RKISP1_CIF_ISP_BLS_V1_STOP);
+			new_control |= RKISP1_CIF_ISP_BLS_WINDOW_1;
+		}
+
+		rkisp1_write(params->rkisp1, arg->bls_samples,
+			     RKISP1_CIF_ISP_BLS_SAMPLES);
+
+		new_control |= RKISP1_CIF_ISP_BLS_MODE_MEASURED;
+	}
+	rkisp1_write(params->rkisp1, new_control, RKISP1_CIF_ISP_BLS_CTRL);
+}
+
+/* ISP LS correction interface function */
+static void
+rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
+				const struct rkisp1_cif_isp_lsc_config *pconfig)
+{
+	unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
+
+	isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
+
+	/* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
+	sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+		    RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
+		    RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
+	rkisp1_write(params->rkisp1, sram_addr,
+		     RKISP1_CIF_ISP_LSC_R_TABLE_ADDR);
+	rkisp1_write(params->rkisp1, sram_addr,
+		     RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR);
+	rkisp1_write(params->rkisp1, sram_addr,
+		     RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR);
+	rkisp1_write(params->rkisp1, sram_addr,
+		     RKISP1_CIF_ISP_LSC_B_TABLE_ADDR);
+
+	/* program data tables (table size is 9 * 17 = 153) */
+	for (i = 0;
+	     i < RKISP1_CIF_ISP_LSC_SECTORS_MAX * RKISP1_CIF_ISP_LSC_SECTORS_MAX;
+	     i += RKISP1_CIF_ISP_LSC_SECTORS_MAX) {
+		/*
+		 * 17 sectors with 2 values in one DWORD = 9
+		 * DWORDs (2nd value of last DWORD unused)
+		 */
+		for (j = 0; j < RKISP1_CIF_ISP_LSC_SECTORS_MAX - 1; j += 2) {
+			data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i + j],
+							     pconfig->r_data_tbl[i + j + 1]);
+			rkisp1_write(params->rkisp1, data,
+				     RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+			data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i + j],
+							     pconfig->gr_data_tbl[i + j + 1]);
+			rkisp1_write(params->rkisp1, data,
+				     RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+			data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i + j],
+							     pconfig->gb_data_tbl[i + j + 1]);
+			rkisp1_write(params->rkisp1, data,
+				     RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+			data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i + j],
+							     pconfig->b_data_tbl[i + j + 1]);
+			rkisp1_write(params->rkisp1, data,
+				     RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+		}
+		data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i + j], 0);
+		rkisp1_write(params->rkisp1, data,
+			     RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+		data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i + j], 0);
+		rkisp1_write(params->rkisp1, data,
+			     RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+		data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i + j], 0);
+		rkisp1_write(params->rkisp1, data,
+			     RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+		data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i + j], 0);
+		rkisp1_write(params->rkisp1, data,
+			     RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+	}
+	isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+			    RKISP1_CIF_ISP_LSC_TABLE_0 :
+			    RKISP1_CIF_ISP_LSC_TABLE_1;
+	rkisp1_write(params->rkisp1, isp_lsc_table_sel,
+		     RKISP1_CIF_ISP_LSC_TABLE_SEL);
+}
+
+static void rkisp1_lsc_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_lsc_config *arg)
+{
+	unsigned int i, data;
+	u32 lsc_ctrl;
+
+	/* To config must be off , store the current status firstly */
+	lsc_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_CTRL);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
+				RKISP1_CIF_ISP_LSC_CTRL_ENA);
+	rkisp1_lsc_correct_matrix_config(params, arg);
+
+	for (i = 0; i < 4; i++) {
+		/* program x size tables */
+		data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_size_tbl[i * 2],
+						    arg->x_size_tbl[i * 2 + 1]);
+		rkisp1_write(params->rkisp1, data,
+			     RKISP1_CIF_ISP_LSC_XSIZE_01 + i * 4);
+
+		/* program x grad tables */
+		data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_grad_tbl[i * 2],
+						    arg->x_grad_tbl[i * 2 + 1]);
+		rkisp1_write(params->rkisp1, data,
+			     RKISP1_CIF_ISP_LSC_XGRAD_01 + i * 4);
+
+		/* program y size tables */
+		data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_size_tbl[i * 2],
+						    arg->y_size_tbl[i * 2 + 1]);
+		rkisp1_write(params->rkisp1, data,
+			     RKISP1_CIF_ISP_LSC_YSIZE_01 + i * 4);
+
+		/* program y grad tables */
+		data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_grad_tbl[i * 2],
+						    arg->y_grad_tbl[i * 2 + 1]);
+		rkisp1_write(params->rkisp1, data,
+			     RKISP1_CIF_ISP_LSC_YGRAD_01 + i * 4);
+	}
+
+	/* restore the lsc ctrl status */
+	if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA) {
+		rkisp1_param_set_bits(params,
+				      RKISP1_CIF_ISP_LSC_CTRL,
+				      RKISP1_CIF_ISP_LSC_CTRL_ENA);
+	} else {
+		rkisp1_param_clear_bits(params,
+					RKISP1_CIF_ISP_LSC_CTRL,
+					RKISP1_CIF_ISP_LSC_CTRL_ENA);
+	}
+}
+
+/* ISP Filtering function */
+static void rkisp1_flt_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_flt_config *arg)
+{
+	u32 filt_mode;
+
+	rkisp1_write(params->rkisp1,
+		     arg->thresh_bl0, RKISP1_CIF_ISP_FILT_THRESH_BL0);
+	rkisp1_write(params->rkisp1,
+		     arg->thresh_bl1, RKISP1_CIF_ISP_FILT_THRESH_BL1);
+	rkisp1_write(params->rkisp1,
+		     arg->thresh_sh0, RKISP1_CIF_ISP_FILT_THRESH_SH0);
+	rkisp1_write(params->rkisp1,
+		     arg->thresh_sh1, RKISP1_CIF_ISP_FILT_THRESH_SH1);
+	rkisp1_write(params->rkisp1, arg->fac_bl0, RKISP1_CIF_ISP_FILT_FAC_BL0);
+	rkisp1_write(params->rkisp1, arg->fac_bl1, RKISP1_CIF_ISP_FILT_FAC_BL1);
+	rkisp1_write(params->rkisp1, arg->fac_mid, RKISP1_CIF_ISP_FILT_FAC_MID);
+	rkisp1_write(params->rkisp1, arg->fac_sh0, RKISP1_CIF_ISP_FILT_FAC_SH0);
+	rkisp1_write(params->rkisp1, arg->fac_sh1, RKISP1_CIF_ISP_FILT_FAC_SH1);
+	rkisp1_write(params->rkisp1,
+		     arg->lum_weight, RKISP1_CIF_ISP_FILT_LUM_WEIGHT);
+
+	rkisp1_write(params->rkisp1,
+		     (arg->mode ? RKISP1_CIF_ISP_FLT_MODE_DNR : 0) |
+		     RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
+		     RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
+		     RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1),
+		     RKISP1_CIF_ISP_FILT_MODE);
+
+	/* avoid to override the old enable value */
+	filt_mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_FILT_MODE);
+	filt_mode &= RKISP1_CIF_ISP_FLT_ENA;
+	if (arg->mode)
+		filt_mode |= RKISP1_CIF_ISP_FLT_MODE_DNR;
+	filt_mode |= RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
+		     RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
+		     RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1);
+	rkisp1_write(params->rkisp1, filt_mode, RKISP1_CIF_ISP_FILT_MODE);
+}
+
+/* ISP demosaic interface function */
+static int rkisp1_bdm_config(struct rkisp1_params *params,
+			     const struct rkisp1_cif_isp_bdm_config *arg)
+{
+	u32 bdm_th;
+
+	/* avoid to override the old enable value */
+	bdm_th = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DEMOSAIC);
+	bdm_th &= RKISP1_CIF_ISP_DEMOSAIC_BYPASS;
+	bdm_th |= arg->demosaic_th & ~RKISP1_CIF_ISP_DEMOSAIC_BYPASS;
+	/* set demosaic threshold */
+	rkisp1_write(params->rkisp1, bdm_th, RKISP1_CIF_ISP_DEMOSAIC);
+	return 0;
+}
+
+/* ISP GAMMA correction interface function */
+static void rkisp1_sdg_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_sdg_config *arg)
+{
+	unsigned int i;
+
+	rkisp1_write(params->rkisp1,
+		     arg->xa_pnts.gamma_dx0, RKISP1_CIF_ISP_GAMMA_DX_LO);
+	rkisp1_write(params->rkisp1,
+		     arg->xa_pnts.gamma_dx1, RKISP1_CIF_ISP_GAMMA_DX_HI);
+
+	for (i = 0; i < RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE; i++) {
+		rkisp1_write(params->rkisp1, arg->curve_r.gamma_y[i],
+			     RKISP1_CIF_ISP_GAMMA_R_Y0 + i * 4);
+		rkisp1_write(params->rkisp1, arg->curve_g.gamma_y[i],
+			     RKISP1_CIF_ISP_GAMMA_G_Y0 + i * 4);
+		rkisp1_write(params->rkisp1, arg->curve_b.gamma_y[i],
+			     RKISP1_CIF_ISP_GAMMA_B_Y0 + i * 4);
+	}
+}
+
+/* ISP GAMMA correction interface function */
+static void rkisp1_goc_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_goc_config *arg)
+{
+	unsigned int i;
+
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+				RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+	rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
+
+	for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES; i++)
+		rkisp1_write(params->rkisp1, arg->gamma_y[i],
+			     RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
+}
+
+/* ISP Cross Talk */
+static void rkisp1_ctk_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_ctk_config *arg)
+{
+	rkisp1_write(params->rkisp1, arg->coeff0, RKISP1_CIF_ISP_CT_COEFF_0);
+	rkisp1_write(params->rkisp1, arg->coeff1, RKISP1_CIF_ISP_CT_COEFF_1);
+	rkisp1_write(params->rkisp1, arg->coeff2, RKISP1_CIF_ISP_CT_COEFF_2);
+	rkisp1_write(params->rkisp1, arg->coeff3, RKISP1_CIF_ISP_CT_COEFF_3);
+	rkisp1_write(params->rkisp1, arg->coeff4, RKISP1_CIF_ISP_CT_COEFF_4);
+	rkisp1_write(params->rkisp1, arg->coeff5, RKISP1_CIF_ISP_CT_COEFF_5);
+	rkisp1_write(params->rkisp1, arg->coeff6, RKISP1_CIF_ISP_CT_COEFF_6);
+	rkisp1_write(params->rkisp1, arg->coeff7, RKISP1_CIF_ISP_CT_COEFF_7);
+	rkisp1_write(params->rkisp1, arg->coeff8, RKISP1_CIF_ISP_CT_COEFF_8);
+	rkisp1_write(params->rkisp1, arg->ct_offset_r,
+		     RKISP1_CIF_ISP_CT_OFFSET_R);
+	rkisp1_write(params->rkisp1, arg->ct_offset_g,
+		     RKISP1_CIF_ISP_CT_OFFSET_G);
+	rkisp1_write(params->rkisp1, arg->ct_offset_b,
+		     RKISP1_CIF_ISP_CT_OFFSET_B);
+}
+
+static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
+{
+	if (en)
+		return;
+
+	/* Write back the default values. */
+	rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_0);
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_1);
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_2);
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_3);
+	rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_4);
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_5);
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_6);
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_7);
+	rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_8);
+
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_R);
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_G);
+	rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_B);
+}
+
+/* ISP White Balance Mode */
+static void rkisp1_awb_meas_config(struct rkisp1_params *params,
+			const struct rkisp1_cif_isp_awb_meas_config *arg)
+{
+	u32 reg_val = 0;
+	/* based on the mode,configure the awb module */
+	if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
+		/* Reference Cb and Cr */
+		rkisp1_write(params->rkisp1,
+			     RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
+			     arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF);
+		/* Yc Threshold */
+		rkisp1_write(params->rkisp1,
+			     RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
+			     RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
+			     RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
+			     arg->min_c, RKISP1_CIF_ISP_AWB_THRESH);
+	}
+
+	reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+	if (arg->enable_ymax_cmp)
+		reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+	else
+		reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+	rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+
+	/* window offset */
+	rkisp1_write(params->rkisp1,
+		     arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS);
+	rkisp1_write(params->rkisp1,
+		     arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS);
+	/* AWB window size */
+	rkisp1_write(params->rkisp1,
+		     arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE);
+	rkisp1_write(params->rkisp1,
+		     arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE);
+	/* Number of frames */
+	rkisp1_write(params->rkisp1,
+		     arg->frames, RKISP1_CIF_ISP_AWB_FRAMES);
+}
+
+static void
+rkisp1_awb_meas_enable(struct rkisp1_params *params,
+		       const struct rkisp1_cif_isp_awb_meas_config *arg,
+		       bool en)
+{
+	u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+
+	/* switch off */
+	reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
+
+	if (en) {
+		if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_RGB)
+			reg_val |= RKISP1_CIF_ISP_AWB_MODE_RGB_EN;
+		else
+			reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
+
+		rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+
+		/* Measurements require AWB block be active. */
+		rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+				      RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+	} else {
+		rkisp1_write(params->rkisp1,
+			     reg_val, RKISP1_CIF_ISP_AWB_PROP);
+		rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+					RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+	}
+}
+
+static void
+rkisp1_awb_gain_config(struct rkisp1_params *params,
+		       const struct rkisp1_cif_isp_awb_gain_config *arg)
+{
+	rkisp1_write(params->rkisp1,
+		     RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
+		     arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G);
+
+	rkisp1_write(params->rkisp1,
+		     RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
+		     arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB);
+}
+
+static void rkisp1_aec_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_aec_config *arg)
+{
+	unsigned int block_hsize, block_vsize;
+	u32 exp_ctrl;
+
+	/* avoid to override the old enable value */
+	exp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL);
+	exp_ctrl &= RKISP1_CIF_ISP_EXP_ENA;
+	if (arg->autostop)
+		exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP;
+	if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
+		exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
+	rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
+
+	rkisp1_write(params->rkisp1,
+		     arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET);
+	rkisp1_write(params->rkisp1,
+		     arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET);
+
+	block_hsize = arg->meas_window.h_size /
+		      RKISP1_CIF_ISP_EXP_COLUMN_NUM - 1;
+	block_vsize = arg->meas_window.v_size /
+		      RKISP1_CIF_ISP_EXP_ROW_NUM - 1;
+
+	rkisp1_write(params->rkisp1,
+		     RKISP1_CIF_ISP_EXP_H_SIZE_SET(block_hsize),
+		     RKISP1_CIF_ISP_EXP_H_SIZE);
+	rkisp1_write(params->rkisp1,
+		     RKISP1_CIF_ISP_EXP_V_SIZE_SET(block_vsize),
+		     RKISP1_CIF_ISP_EXP_V_SIZE);
+}
+
+static void rkisp1_cproc_config(struct rkisp1_params *params,
+				const struct rkisp1_cif_isp_cproc_config *arg)
+{
+	struct rkisp1_cif_isp_isp_other_cfg *cur_other_cfg =
+						&params->cur_params.others;
+	struct rkisp1_cif_isp_ie_config *cur_ie_config =
+						&cur_other_cfg->ie_config;
+	u32 effect = cur_ie_config->effect;
+	u32 quantization = params->quantization;
+
+	rkisp1_write(params->rkisp1, arg->contrast, RKISP1_CIF_C_PROC_CONTRAST);
+	rkisp1_write(params->rkisp1, arg->hue, RKISP1_CIF_C_PROC_HUE);
+	rkisp1_write(params->rkisp1, arg->sat, RKISP1_CIF_C_PROC_SATURATION);
+	rkisp1_write(params->rkisp1, arg->brightness,
+		     RKISP1_CIF_C_PROC_BRIGHTNESS);
+
+	if (quantization != V4L2_QUANTIZATION_FULL_RANGE ||
+	    effect != V4L2_COLORFX_NONE) {
+		rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
+					RKISP1_CIF_C_PROC_YOUT_FULL |
+					RKISP1_CIF_C_PROC_YIN_FULL |
+					RKISP1_CIF_C_PROC_COUT_FULL);
+	} else {
+		rkisp1_param_set_bits(params, RKISP1_CIF_C_PROC_CTRL,
+				      RKISP1_CIF_C_PROC_YOUT_FULL |
+				      RKISP1_CIF_C_PROC_YIN_FULL |
+				      RKISP1_CIF_C_PROC_COUT_FULL);
+	}
+}
+
+static void rkisp1_hst_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_hst_config *arg)
+{
+	unsigned int block_hsize, block_vsize;
+	static const u32 hist_weight_regs[] = {
+		RKISP1_CIF_ISP_HIST_WEIGHT_00TO30,
+		RKISP1_CIF_ISP_HIST_WEIGHT_40TO21,
+		RKISP1_CIF_ISP_HIST_WEIGHT_31TO12,
+		RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
+		RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
+		RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
+		RKISP1_CIF_ISP_HIST_WEIGHT_44,
+	};
+	const u8 *weight;
+	unsigned int i;
+	u32 hist_prop;
+
+	/* avoid to override the old enable value */
+	hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP);
+	hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
+	hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET(arg->histogram_predivider);
+	rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP);
+	rkisp1_write(params->rkisp1,
+		     arg->meas_window.h_offs,
+		     RKISP1_CIF_ISP_HIST_H_OFFS);
+	rkisp1_write(params->rkisp1,
+		     arg->meas_window.v_offs,
+		     RKISP1_CIF_ISP_HIST_V_OFFS);
+
+	block_hsize = arg->meas_window.h_size /
+		      RKISP1_CIF_ISP_HIST_COLUMN_NUM - 1;
+	block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM - 1;
+
+	rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE);
+	rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE);
+
+	weight = arg->hist_weight;
+	for (i = 0; i < ARRAY_SIZE(hist_weight_regs); ++i, weight += 4)
+		rkisp1_write(params->rkisp1,
+			     RKISP1_CIF_ISP_HIST_WEIGHT_SET(weight[0],
+							    weight[1],
+							    weight[2],
+							    weight[3]),
+				 hist_weight_regs[i]);
+}
+
+static void
+rkisp1_hst_enable(struct rkisp1_params *params,
+		  const struct rkisp1_cif_isp_hst_config *arg, bool en)
+{
+	if (en)	{
+		u32 hist_prop = rkisp1_read(params->rkisp1,
+					    RKISP1_CIF_ISP_HIST_PROP);
+
+		hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
+		hist_prop |= arg->mode;
+		rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+				      hist_prop);
+	} else {
+		rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+					RKISP1_CIF_ISP_HIST_PROP_MODE_MASK);
+	}
+}
+
+static void rkisp1_afm_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_afc_config *arg)
+{
+	size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
+				  arg->num_afm_win);
+	u32 afm_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL);
+	unsigned int i;
+
+	/* Switch off to configure. */
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+				RKISP1_CIF_ISP_AFM_ENA);
+
+	for (i = 0; i < num_of_win; i++) {
+		rkisp1_write(params->rkisp1,
+			     RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
+			     RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs),
+			     RKISP1_CIF_ISP_AFM_LT_A + i * 8);
+		rkisp1_write(params->rkisp1,
+			     RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
+							 arg->afm_win[i].h_offs) |
+			     RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
+							 arg->afm_win[i].v_offs),
+			     RKISP1_CIF_ISP_AFM_RB_A + i * 8);
+	}
+	rkisp1_write(params->rkisp1, arg->thres, RKISP1_CIF_ISP_AFM_THRES);
+	rkisp1_write(params->rkisp1, arg->var_shift,
+		     RKISP1_CIF_ISP_AFM_VAR_SHIFT);
+	/* restore afm status */
+	rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
+}
+
+static void rkisp1_ie_config(struct rkisp1_params *params,
+			     const struct rkisp1_cif_isp_ie_config *arg)
+{
+	u32 eff_ctrl;
+
+	eff_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL);
+	eff_ctrl &= ~RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK;
+
+	if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+		eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_YCBCR_FULL;
+
+	switch (arg->effect) {
+	case V4L2_COLORFX_SEPIA:
+		eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA;
+		break;
+	case V4L2_COLORFX_SET_CBCR:
+		rkisp1_write(params->rkisp1, arg->eff_tint,
+			     RKISP1_CIF_IMG_EFF_TINT);
+		eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA;
+		break;
+		/*
+		 * Color selection is similar to water color(AQUA):
+		 * grayscale + selected color w threshold
+		 */
+	case V4L2_COLORFX_AQUA:
+		eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL;
+		rkisp1_write(params->rkisp1, arg->color_sel,
+			     RKISP1_CIF_IMG_EFF_COLOR_SEL);
+		break;
+	case V4L2_COLORFX_EMBOSS:
+		eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS;
+		rkisp1_write(params->rkisp1, arg->eff_mat_1,
+			     RKISP1_CIF_IMG_EFF_MAT_1);
+		rkisp1_write(params->rkisp1, arg->eff_mat_2,
+			     RKISP1_CIF_IMG_EFF_MAT_2);
+		rkisp1_write(params->rkisp1, arg->eff_mat_3,
+			     RKISP1_CIF_IMG_EFF_MAT_3);
+		break;
+	case V4L2_COLORFX_SKETCH:
+		eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH;
+		rkisp1_write(params->rkisp1, arg->eff_mat_3,
+			     RKISP1_CIF_IMG_EFF_MAT_3);
+		rkisp1_write(params->rkisp1, arg->eff_mat_4,
+			     RKISP1_CIF_IMG_EFF_MAT_4);
+		rkisp1_write(params->rkisp1, arg->eff_mat_5,
+			     RKISP1_CIF_IMG_EFF_MAT_5);
+		break;
+	case V4L2_COLORFX_BW:
+		eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE;
+		break;
+	case V4L2_COLORFX_NEGATIVE:
+		eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE;
+		break;
+	default:
+		break;
+	}
+
+	rkisp1_write(params->rkisp1, eff_ctrl, RKISP1_CIF_IMG_EFF_CTRL);
+}
+
+static void rkisp1_ie_enable(struct rkisp1_params *params, bool en)
+{
+	if (en) {
+		rkisp1_param_set_bits(params, RKISP1_CIF_ICCL,
+				      RKISP1_CIF_ICCL_IE_CLK);
+		rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL_ENABLE,
+			     RKISP1_CIF_IMG_EFF_CTRL);
+		rkisp1_param_set_bits(params, RKISP1_CIF_IMG_EFF_CTRL,
+				      RKISP1_CIF_IMG_EFF_CTRL_CFG_UPD);
+	} else {
+		rkisp1_param_clear_bits(params, RKISP1_CIF_IMG_EFF_CTRL,
+					RKISP1_CIF_IMG_EFF_CTRL_ENABLE);
+		rkisp1_param_clear_bits(params, RKISP1_CIF_ICCL,
+					RKISP1_CIF_ICCL_IE_CLK);
+	}
+}
+
+static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range)
+{
+	static const u16 full_range_coeff[] = {
+		0x0026, 0x004b, 0x000f,
+		0x01ea, 0x01d6, 0x0040,
+		0x0040, 0x01ca, 0x01f6
+	};
+	static const u16 limited_range_coeff[] = {
+		0x0021, 0x0040, 0x000d,
+		0x01ed, 0x01db, 0x0038,
+		0x0038, 0x01d1, 0x01f7,
+	};
+	unsigned int i;
+
+	if (full_range) {
+		for (i = 0; i < ARRAY_SIZE(full_range_coeff); i++)
+			rkisp1_write(params->rkisp1, full_range_coeff[i],
+				     RKISP1_CIF_ISP_CC_COEFF_0 + i * 4);
+
+		rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+				      RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
+				      RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
+	} else {
+		for (i = 0; i < ARRAY_SIZE(limited_range_coeff); i++)
+			rkisp1_write(params->rkisp1, limited_range_coeff[i],
+				     RKISP1_CIF_ISP_CC_COEFF_0 + i * 4);
+
+		rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+					RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
+					RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
+	}
+}
+
+/* ISP De-noise Pre-Filter(DPF) function */
+static void rkisp1_dpf_config(struct rkisp1_params *params,
+			      const struct rkisp1_cif_isp_dpf_config *arg)
+{
+	unsigned int isp_dpf_mode, spatial_coeff, i;
+
+	switch (arg->gain.mode) {
+	case RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS:
+		isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN |
+			       RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
+		break;
+	case RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS:
+		isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP;
+		break;
+	case RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS:
+		isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN |
+			       RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP |
+			       RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP;
+		break;
+	case RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS:
+		isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
+		break;
+	case RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS:
+		isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP |
+			       RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
+		break;
+	case RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED:
+	default:
+		isp_dpf_mode = 0;
+		break;
+	}
+
+	if (arg->nll.scale_mode == RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC)
+		isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_NLL_SEGMENTATION;
+	if (arg->rb_flt.fltsize == RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9)
+		isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_RB_FLTSIZE_9x9;
+	if (!arg->rb_flt.r_enable)
+		isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_R_FLT_DIS;
+	if (!arg->rb_flt.b_enable)
+		isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_B_FLT_DIS;
+	if (!arg->g_flt.gb_enable)
+		isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_GB_FLT_DIS;
+	if (!arg->g_flt.gr_enable)
+		isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_GR_FLT_DIS;
+
+	rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPF_MODE,
+			      isp_dpf_mode);
+	rkisp1_write(params->rkisp1, arg->gain.nf_b_gain,
+		     RKISP1_CIF_ISP_DPF_NF_GAIN_B);
+	rkisp1_write(params->rkisp1, arg->gain.nf_r_gain,
+		     RKISP1_CIF_ISP_DPF_NF_GAIN_R);
+	rkisp1_write(params->rkisp1, arg->gain.nf_gb_gain,
+		     RKISP1_CIF_ISP_DPF_NF_GAIN_GB);
+	rkisp1_write(params->rkisp1, arg->gain.nf_gr_gain,
+		     RKISP1_CIF_ISP_DPF_NF_GAIN_GR);
+
+	for (i = 0; i < RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS; i++) {
+		rkisp1_write(params->rkisp1, arg->nll.coeff[i],
+			     RKISP1_CIF_ISP_DPF_NULL_COEFF_0 + i * 4);
+	}
+
+	spatial_coeff = arg->g_flt.spatial_coeff[0] |
+			(arg->g_flt.spatial_coeff[1] << 8) |
+			(arg->g_flt.spatial_coeff[2] << 16) |
+			(arg->g_flt.spatial_coeff[3] << 24);
+	rkisp1_write(params->rkisp1, spatial_coeff,
+		     RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4);
+
+	spatial_coeff = arg->g_flt.spatial_coeff[4] |
+			(arg->g_flt.spatial_coeff[5] << 8);
+	rkisp1_write(params->rkisp1, spatial_coeff,
+		     RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6);
+
+	spatial_coeff = arg->rb_flt.spatial_coeff[0] |
+			(arg->rb_flt.spatial_coeff[1] << 8) |
+			(arg->rb_flt.spatial_coeff[2] << 16) |
+			(arg->rb_flt.spatial_coeff[3] << 24);
+	rkisp1_write(params->rkisp1, spatial_coeff,
+		     RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4);
+
+	spatial_coeff = arg->rb_flt.spatial_coeff[4] |
+			(arg->rb_flt.spatial_coeff[5] << 8);
+	rkisp1_write(params->rkisp1, spatial_coeff,
+		     RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6);
+}
+
+static void
+rkisp1_dpf_strength_config(struct rkisp1_params *params,
+			   const struct rkisp1_cif_isp_dpf_strength_config *arg)
+{
+	rkisp1_write(params->rkisp1, arg->b, RKISP1_CIF_ISP_DPF_STRENGTH_B);
+	rkisp1_write(params->rkisp1, arg->g, RKISP1_CIF_ISP_DPF_STRENGTH_G);
+	rkisp1_write(params->rkisp1, arg->r, RKISP1_CIF_ISP_DPF_STRENGTH_R);
+}
+
+static void
+rkisp1_isp_isr_other_config(struct rkisp1_params *params,
+			    const struct rkisp1_params_cfg *new_params)
+{
+	unsigned int module_en_update, module_cfg_update, module_ens;
+
+	module_en_update = new_params->module_en_update;
+	module_cfg_update = new_params->module_cfg_update;
+	module_ens = new_params->module_ens;
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_DPCC) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPCC)) {
+		/*update dpc config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_DPCC))
+			rkisp1_dpcc_config(params,
+					   &new_params->others.dpcc_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_DPCC) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_DPCC))
+				rkisp1_param_set_bits(params,
+						      RKISP1_CIF_ISP_DPCC_MODE,
+						      RKISP1_CIF_ISP_DPCC_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+						RKISP1_CIF_ISP_DPCC_MODE,
+						RKISP1_CIF_ISP_DPCC_ENA);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_BLS) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_BLS)) {
+		/* update bls config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_BLS))
+			rkisp1_bls_config(params,
+					  &new_params->others.bls_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_BLS) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_BLS))
+				rkisp1_param_set_bits(params,
+						      RKISP1_CIF_ISP_BLS_CTRL,
+						      RKISP1_CIF_ISP_BLS_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+							RKISP1_CIF_ISP_BLS_CTRL,
+							RKISP1_CIF_ISP_BLS_ENA);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_SDG) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_SDG)) {
+		/* update sdg config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_SDG))
+			rkisp1_sdg_config(params,
+					  &new_params->others.sdg_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_SDG) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_SDG))
+				rkisp1_param_set_bits(params,
+					RKISP1_CIF_ISP_CTRL,
+					RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+					RKISP1_CIF_ISP_CTRL,
+					RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_LSC) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC)) {
+		/* update lsc config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC))
+			rkisp1_lsc_config(params,
+					  &new_params->others.lsc_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_LSC))
+				rkisp1_param_set_bits(params,
+						RKISP1_CIF_ISP_LSC_CTRL,
+						RKISP1_CIF_ISP_LSC_CTRL_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+						RKISP1_CIF_ISP_LSC_CTRL,
+						RKISP1_CIF_ISP_LSC_CTRL_ENA);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN)) {
+		/* update awb gains */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN))
+			rkisp1_awb_gain_config(params,
+					&new_params->others.awb_gain_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_AWB_GAIN))
+				rkisp1_param_set_bits(params,
+					RKISP1_CIF_ISP_CTRL,
+					RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+					RKISP1_CIF_ISP_CTRL,
+					RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_BDM) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_BDM)) {
+		/* update bdm config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_BDM))
+			rkisp1_bdm_config(params,
+					  &new_params->others.bdm_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_BDM) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_BDM))
+				rkisp1_param_set_bits(params,
+						RKISP1_CIF_ISP_DEMOSAIC,
+						RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+			else
+				rkisp1_param_clear_bits(params,
+						RKISP1_CIF_ISP_DEMOSAIC,
+						RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_FLT) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_FLT)) {
+		/* update filter config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_FLT))
+			rkisp1_flt_config(params,
+					  &new_params->others.flt_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_FLT) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_FLT))
+				rkisp1_param_set_bits(params,
+						      RKISP1_CIF_ISP_FILT_MODE,
+						      RKISP1_CIF_ISP_FLT_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+						RKISP1_CIF_ISP_FILT_MODE,
+						RKISP1_CIF_ISP_FLT_ENA);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_CTK) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_CTK)) {
+		/* update ctk config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_CTK))
+			rkisp1_ctk_config(params,
+					  &new_params->others.ctk_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_CTK)
+			rkisp1_ctk_enable(params,
+				!!(module_ens & RKISP1_CIF_ISP_MODULE_CTK));
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_GOC) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_GOC)) {
+		/* update goc config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_GOC))
+			rkisp1_goc_config(params,
+					  &new_params->others.goc_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_GOC) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_GOC))
+				rkisp1_param_set_bits(params,
+					RKISP1_CIF_ISP_CTRL,
+					RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+					RKISP1_CIF_ISP_CTRL,
+					RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_CPROC) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_CPROC)) {
+		/* update cproc config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_CPROC)) {
+			rkisp1_cproc_config(params,
+					    &new_params->others.cproc_config);
+		}
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_CPROC) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_CPROC))
+				rkisp1_param_set_bits(params,
+						RKISP1_CIF_C_PROC_CTRL,
+						RKISP1_CIF_C_PROC_CTR_ENABLE);
+			else
+				rkisp1_param_clear_bits(params,
+						RKISP1_CIF_C_PROC_CTRL,
+						RKISP1_CIF_C_PROC_CTR_ENABLE);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_IE) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_IE)) {
+		/* update ie config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_IE))
+			rkisp1_ie_config(params,
+					 &new_params->others.ie_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_IE)
+			rkisp1_ie_enable(params,
+				!!(module_ens & RKISP1_CIF_ISP_MODULE_IE));
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_DPF) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPF)) {
+		/* update dpf  config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_DPF))
+			rkisp1_dpf_config(params,
+					  &new_params->others.dpf_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_DPF) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_DPF))
+				rkisp1_param_set_bits(params,
+						   RKISP1_CIF_ISP_DPF_MODE,
+						   RKISP1_CIF_ISP_DPF_MODE_EN);
+			else
+				rkisp1_param_clear_bits(params,
+						RKISP1_CIF_ISP_DPF_MODE,
+						RKISP1_CIF_ISP_DPF_MODE_EN);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_DPF_STRENGTH) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPF_STRENGTH)) {
+		/* update dpf strength config */
+		rkisp1_dpf_strength_config(params,
+				&new_params->others.dpf_strength_config);
+	}
+}
+
+static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
+				       struct  rkisp1_params_cfg *new_params)
+{
+	unsigned int module_en_update, module_cfg_update, module_ens;
+
+	module_en_update = new_params->module_en_update;
+	module_cfg_update = new_params->module_cfg_update;
+	module_ens = new_params->module_ens;
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_AWB) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB)) {
+		/* update awb config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB))
+			rkisp1_awb_meas_config(params,
+					&new_params->meas.awb_meas_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB)
+			rkisp1_awb_meas_enable(params,
+				&new_params->meas.awb_meas_config,
+				!!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_AFC) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_AFC)) {
+		/* update afc config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_AFC))
+			rkisp1_afm_config(params,
+					  &new_params->meas.afc_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_AFC) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_AFC))
+				rkisp1_param_set_bits(params,
+						      RKISP1_CIF_ISP_AFM_CTRL,
+						      RKISP1_CIF_ISP_AFM_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+							RKISP1_CIF_ISP_AFM_CTRL,
+							RKISP1_CIF_ISP_AFM_ENA);
+		}
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_HST) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_HST)) {
+		/* update hst config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_HST))
+			rkisp1_hst_config(params,
+					  &new_params->meas.hst_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_HST)
+			rkisp1_hst_enable(params,
+				&new_params->meas.hst_config,
+				!!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
+	}
+
+	if ((module_en_update & RKISP1_CIF_ISP_MODULE_AEC) ||
+	    (module_cfg_update & RKISP1_CIF_ISP_MODULE_AEC)) {
+		/* update aec config */
+		if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_AEC))
+			rkisp1_aec_config(params,
+					  &new_params->meas.aec_config);
+
+		if (module_en_update & RKISP1_CIF_ISP_MODULE_AEC) {
+			if (!!(module_ens & RKISP1_CIF_ISP_MODULE_AEC))
+				rkisp1_param_set_bits(params,
+						      RKISP1_CIF_ISP_EXP_CTRL,
+						      RKISP1_CIF_ISP_EXP_ENA);
+			else
+				rkisp1_param_clear_bits(params,
+							RKISP1_CIF_ISP_EXP_CTRL,
+							RKISP1_CIF_ISP_EXP_ENA);
+		}
+	}
+}
+
+void rkisp1_params_isr(struct rkisp1_device *rkisp1, u32 isp_mis)
+{
+	unsigned int frame_sequence = atomic_read(&rkisp1->isp.frame_sequence);
+	struct rkisp1_params *params = &rkisp1->params;
+	struct rkisp1_params_cfg *new_params;
+	struct rkisp1_buffer *cur_buf = NULL;
+
+	spin_lock(&params->config_lock);
+	if (!params->is_streaming) {
+		spin_unlock(&params->config_lock);
+		return;
+	}
+
+	/* get one empty buffer */
+	if (!list_empty(&params->params))
+		cur_buf = list_first_entry(&params->params,
+					   struct rkisp1_buffer, queue);
+	spin_unlock(&params->config_lock);
+
+	if (!cur_buf)
+		return;
+
+	new_params = (struct rkisp1_params_cfg *)(cur_buf->vaddr[0]);
+
+	if (isp_mis & RKISP1_CIF_ISP_FRAME) {
+		u32 isp_ctrl;
+
+		rkisp1_isp_isr_other_config(params, new_params);
+		rkisp1_isp_isr_meas_config(params, new_params);
+
+		/* update shadow register immediately */
+		isp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_CTRL);
+		isp_ctrl |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD;
+		rkisp1_write(params->rkisp1, isp_ctrl, RKISP1_CIF_ISP_CTRL);
+
+		spin_lock(&params->config_lock);
+		list_del(&cur_buf->queue);
+		spin_unlock(&params->config_lock);
+
+		cur_buf->vb.sequence = frame_sequence;
+		vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+	}
+}
+
+static const struct rkisp1_cif_isp_awb_meas_config rkisp1_awb_params_default_config = {
+	{
+		0, 0, RKISP1_DEFAULT_WIDTH, RKISP1_DEFAULT_HEIGHT
+	},
+	RKISP1_CIF_ISP_AWB_MODE_YCBCR, 200, 30, 20, 20, 0, 128, 128
+};
+
+static const struct rkisp1_cif_isp_aec_config rkisp1_aec_params_default_config = {
+	RKISP1_CIF_ISP_EXP_MEASURING_MODE_0,
+	RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0,
+	{
+		RKISP1_DEFAULT_WIDTH >> 2, RKISP1_DEFAULT_HEIGHT >> 2,
+		RKISP1_DEFAULT_WIDTH >> 1, RKISP1_DEFAULT_HEIGHT >> 1
+	}
+};
+
+static const struct rkisp1_cif_isp_hst_config rkisp1_hst_params_default_config = {
+	RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
+	3,
+	{
+		RKISP1_DEFAULT_WIDTH >> 2, RKISP1_DEFAULT_HEIGHT >> 2,
+		RKISP1_DEFAULT_WIDTH >> 1, RKISP1_DEFAULT_HEIGHT >> 1
+	},
+	{
+		0, /* To be filled in with 0x01 at runtime. */
+	}
+};
+
+static const struct rkisp1_cif_isp_afc_config rkisp1_afc_params_default_config = {
+	1,
+	{
+		{
+			300, 225, 200, 150
+		}
+	},
+	4,
+	14
+};
+
+static void rkisp1_params_config_parameter(struct rkisp1_params *params)
+{
+	struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
+
+	spin_lock(&params->config_lock);
+
+	rkisp1_awb_meas_config(params, &rkisp1_awb_params_default_config);
+	rkisp1_awb_meas_enable(params, &rkisp1_awb_params_default_config,
+			       true);
+
+	rkisp1_aec_config(params, &rkisp1_aec_params_default_config);
+	rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
+			      RKISP1_CIF_ISP_EXP_ENA);
+
+	rkisp1_afm_config(params, &rkisp1_afc_params_default_config);
+	rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+			      RKISP1_CIF_ISP_AFM_ENA);
+
+	memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight));
+	rkisp1_hst_config(params, &hst);
+	rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+			      ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK |
+			      rkisp1_hst_params_default_config.mode);
+
+	/* set the  range */
+	if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+		rkisp1_csm_config(params, true);
+	else
+		rkisp1_csm_config(params, false);
+
+	/* override the default things */
+	rkisp1_isp_isr_other_config(params, &params->cur_params);
+	rkisp1_isp_isr_meas_config(params, &params->cur_params);
+
+	spin_unlock(&params->config_lock);
+}
+
+/* Not called when the camera active, thus not isr protection. */
+void rkisp1_params_configure(struct rkisp1_params *params,
+			     enum rkisp1_fmt_raw_pat_type bayer_pat,
+			     enum v4l2_quantization quantization)
+{
+	params->quantization = quantization;
+	params->raw_type = bayer_pat;
+	rkisp1_params_config_parameter(params);
+}
+
+/* Not called when the camera active, thus not isr protection. */
+void rkisp1_params_disable(struct rkisp1_params *params)
+{
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
+				RKISP1_CIF_ISP_DPCC_ENA);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
+				RKISP1_CIF_ISP_LSC_CTRL_ENA);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL,
+				RKISP1_CIF_ISP_BLS_ENA);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+				RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+				RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DEMOSAIC,
+				RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE,
+				RKISP1_CIF_ISP_FLT_ENA);
+	rkisp1_awb_meas_enable(params, NULL, false);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+				RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
+				RKISP1_CIF_ISP_EXP_ENA);
+	rkisp1_ctk_enable(params, false);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
+				RKISP1_CIF_C_PROC_CTR_ENABLE);
+	rkisp1_hst_enable(params, NULL, false);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+				RKISP1_CIF_ISP_AFM_ENA);
+	rkisp1_ie_enable(params, false);
+	rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPF_MODE,
+				RKISP1_CIF_ISP_DPF_MODE_EN);
+}
+
+static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv,
+					   struct v4l2_fmtdesc *f)
+{
+	struct video_device *video = video_devdata(file);
+	struct rkisp1_params *params = video_get_drvdata(video);
+
+	if (f->index > 0 || f->type != video->queue->type)
+		return -EINVAL;
+
+	f->pixelformat = params->vdev_fmt.fmt.meta.dataformat;
+
+	return 0;
+}
+
+static int rkisp1_params_g_fmt_meta_out(struct file *file, void *fh,
+					struct v4l2_format *f)
+{
+	struct video_device *video = video_devdata(file);
+	struct rkisp1_params *params = video_get_drvdata(video);
+	struct v4l2_meta_format *meta = &f->fmt.meta;
+
+	if (f->type != video->queue->type)
+		return -EINVAL;
+
+	memset(meta, 0, sizeof(*meta));
+	meta->dataformat = params->vdev_fmt.fmt.meta.dataformat;
+	meta->buffersize = params->vdev_fmt.fmt.meta.buffersize;
+
+	return 0;
+}
+
+static int rkisp1_params_querycap(struct file *file,
+				  void *priv, struct v4l2_capability *cap)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
+	strscpy(cap->card, vdev->name, sizeof(cap->card));
+	strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
+
+	return 0;
+}
+
+/* ISP params video device IOCTLs */
+static const struct v4l2_ioctl_ops rkisp1_params_ioctl = {
+	.vidioc_reqbufs = vb2_ioctl_reqbufs,
+	.vidioc_querybuf = vb2_ioctl_querybuf,
+	.vidioc_create_bufs = vb2_ioctl_create_bufs,
+	.vidioc_qbuf = vb2_ioctl_qbuf,
+	.vidioc_dqbuf = vb2_ioctl_dqbuf,
+	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+	.vidioc_expbuf = vb2_ioctl_expbuf,
+	.vidioc_streamon = vb2_ioctl_streamon,
+	.vidioc_streamoff = vb2_ioctl_streamoff,
+	.vidioc_enum_fmt_meta_out = rkisp1_params_enum_fmt_meta_out,
+	.vidioc_g_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
+	.vidioc_s_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
+	.vidioc_try_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
+	.vidioc_querycap = rkisp1_params_querycap,
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int rkisp1_params_vb2_queue_setup(struct vb2_queue *vq,
+					 unsigned int *num_buffers,
+					 unsigned int *num_planes,
+					 unsigned int sizes[],
+					 struct device *alloc_devs[])
+{
+	struct rkisp1_params *params = vq->drv_priv;
+
+	*num_buffers = clamp_t(u32, *num_buffers,
+			       RKISP1_ISP_PARAMS_REQ_BUFS_MIN,
+			       RKISP1_ISP_PARAMS_REQ_BUFS_MAX);
+
+	*num_planes = 1;
+
+	sizes[0] = sizeof(struct rkisp1_params_cfg);
+
+	INIT_LIST_HEAD(&params->params);
+	params->is_first_params = true;
+
+	return 0;
+}
+
+static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct rkisp1_buffer *params_buf =
+		container_of(vbuf, struct rkisp1_buffer, vb);
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct rkisp1_params *params = vq->drv_priv;
+	struct rkisp1_params_cfg *new_params;
+	unsigned long flags;
+	unsigned int frame_sequence =
+		atomic_read(&params->rkisp1->isp.frame_sequence);
+
+	if (params->is_first_params) {
+		new_params = (struct rkisp1_params_cfg *)
+			(vb2_plane_vaddr(vb, 0));
+		vbuf->sequence = frame_sequence;
+		vb2_buffer_done(&params_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+		params->is_first_params = false;
+		params->cur_params = *new_params;
+		return;
+	}
+
+	params_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
+	spin_lock_irqsave(&params->config_lock, flags);
+	list_add_tail(&params_buf->queue, &params->params);
+	spin_unlock_irqrestore(&params->config_lock, flags);
+}
+
+static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+	if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_params_cfg))
+		return -EINVAL;
+
+	vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_params_cfg));
+
+	return 0;
+}
+
+static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq)
+{
+	struct rkisp1_params *params = vq->drv_priv;
+	struct rkisp1_buffer *buf;
+	unsigned long flags;
+	unsigned int i;
+
+	/* stop params input firstly */
+	spin_lock_irqsave(&params->config_lock, flags);
+	params->is_streaming = false;
+	spin_unlock_irqrestore(&params->config_lock, flags);
+
+	for (i = 0; i < RKISP1_ISP_PARAMS_REQ_BUFS_MAX; i++) {
+		spin_lock_irqsave(&params->config_lock, flags);
+		if (!list_empty(&params->params)) {
+			buf = list_first_entry(&params->params,
+					       struct rkisp1_buffer, queue);
+			list_del(&buf->queue);
+			spin_unlock_irqrestore(&params->config_lock,
+					       flags);
+		} else {
+			spin_unlock_irqrestore(&params->config_lock,
+					       flags);
+			break;
+		}
+
+		if (buf)
+			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+		buf = NULL;
+	}
+}
+
+static int
+rkisp1_params_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
+{
+	struct rkisp1_params *params = queue->drv_priv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&params->config_lock, flags);
+	params->is_streaming = true;
+	spin_unlock_irqrestore(&params->config_lock, flags);
+
+	return 0;
+}
+
+static struct vb2_ops rkisp1_params_vb2_ops = {
+	.queue_setup = rkisp1_params_vb2_queue_setup,
+	.wait_prepare = vb2_ops_wait_prepare,
+	.wait_finish = vb2_ops_wait_finish,
+	.buf_queue = rkisp1_params_vb2_buf_queue,
+	.buf_prepare = rkisp1_params_vb2_buf_prepare,
+	.start_streaming = rkisp1_params_vb2_start_streaming,
+	.stop_streaming = rkisp1_params_vb2_stop_streaming,
+
+};
+
+static struct v4l2_file_operations rkisp1_params_fops = {
+	.mmap = vb2_fop_mmap,
+	.unlocked_ioctl = video_ioctl2,
+	.poll = vb2_fop_poll,
+	.open = v4l2_fh_open,
+	.release = vb2_fop_release
+};
+
+static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
+					struct rkisp1_params *params)
+{
+	struct rkisp1_vdev_node *node;
+
+	node = container_of(q, struct rkisp1_vdev_node, buf_queue);
+
+	q->type = V4L2_BUF_TYPE_META_OUTPUT;
+	q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+	q->drv_priv = params;
+	q->ops = &rkisp1_params_vb2_ops;
+	q->mem_ops = &vb2_vmalloc_memops;
+	q->buf_struct_size = sizeof(struct rkisp1_buffer);
+	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	q->lock = &node->vlock;
+
+	return vb2_queue_init(q);
+}
+
+static void rkisp1_init_params(struct rkisp1_params *params)
+{
+	params->vdev_fmt.fmt.meta.dataformat =
+		V4L2_META_FMT_RK_ISP1_PARAMS;
+	params->vdev_fmt.fmt.meta.buffersize =
+		sizeof(struct rkisp1_params_cfg);
+}
+
+int rkisp1_params_register(struct rkisp1_params *params,
+			   struct v4l2_device *v4l2_dev,
+			   struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_vdev_node *node = &params->vnode;
+	struct video_device *vdev = &node->vdev;
+	int ret;
+
+	params->rkisp1 = rkisp1;
+	mutex_init(&node->vlock);
+	spin_lock_init(&params->config_lock);
+
+	strscpy(vdev->name, RKISP1_PARAMS_DEV_NAME, sizeof(vdev->name));
+
+	video_set_drvdata(vdev, params);
+	vdev->ioctl_ops = &rkisp1_params_ioctl;
+	vdev->fops = &rkisp1_params_fops;
+	vdev->release = video_device_release_empty;
+	/*
+	 * Provide a mutex to v4l2 core. It will be used
+	 * to protect all fops and v4l2 ioctls.
+	 */
+	vdev->lock = &node->vlock;
+	vdev->v4l2_dev = v4l2_dev;
+	vdev->queue = &node->buf_queue;
+	vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_META_OUTPUT;
+	vdev->vfl_dir = VFL_DIR_TX;
+	rkisp1_params_init_vb2_queue(vdev->queue, params);
+	rkisp1_init_params(params);
+	video_set_drvdata(vdev, params);
+
+	node->pad.flags = MEDIA_PAD_FL_SOURCE;
+	ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
+	if (ret)
+		goto err_release_queue;
+	ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		dev_err(&vdev->dev,
+			"failed to register %s, ret=%d\n", vdev->name, ret);
+		goto err_cleanup_media_entity;
+	}
+	return 0;
+err_cleanup_media_entity:
+	media_entity_cleanup(&vdev->entity);
+err_release_queue:
+	vb2_queue_release(vdev->queue);
+	return ret;
+}
+
+void rkisp1_params_unregister(struct rkisp1_params *params)
+{
+	struct rkisp1_vdev_node *node = &params->vnode;
+	struct video_device *vdev = &node->vdev;
+
+	video_unregister_device(vdev);
+	media_entity_cleanup(&vdev->entity);
+	vb2_queue_release(vdev->queue);
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-regs.h b/drivers/staging/media/rkisp1/rkisp1-regs.h
new file mode 100644
index 0000000..46018f4
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-regs.h
@@ -0,0 +1,1264 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Rockchip ISP1 Driver - Registers header
+ *
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _RKISP1_REGS_H
+#define _RKISP1_REGS_H
+
+/* ISP_CTRL */
+#define RKISP1_CIF_ISP_CTRL_ISP_ENABLE			BIT(0)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT		(0 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656		BIT(1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601		(2 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU601	(3 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_DATA_MODE		(4 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU656	(5 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT_ITU656	(6 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE		BIT(4)
+#define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA		BIT(6)
+#define RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA			BIT(7)
+#define RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD_PERMANENT	BIT(8)
+#define RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD			BIT(9)
+#define RKISP1_CIF_ISP_CTRL_ISP_GEN_CFG_UPD		BIT(10)
+#define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA		BIT(11)
+#define RKISP1_CIF_ISP_CTRL_ISP_FLASH_MODE_ENA		BIT(12)
+#define RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA		BIT(13)
+#define RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA		BIT(14)
+
+/* ISP_ACQ_PROP */
+#define RKISP1_CIF_ISP_ACQ_PROP_POS_EDGE		BIT(0)
+#define RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW		BIT(1)
+#define RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW		BIT(2)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_RGGB		(0 << 3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_GRBG		BIT(3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_GBRG		(2 << 3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_BGGR		(3 << 3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT(pat)		((pat) << 3)
+#define RKISP1_CIF_ISP_ACQ_PROP_YCBYCR			(0 << 7)
+#define RKISP1_CIF_ISP_ACQ_PROP_YCRYCB			BIT(7)
+#define RKISP1_CIF_ISP_ACQ_PROP_CBYCRY			(2 << 7)
+#define RKISP1_CIF_ISP_ACQ_PROP_CRYCBY			(3 << 7)
+#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ALL		(0 << 9)
+#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_EVEN		BIT(9)
+#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ODD		(2 << 9)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_12B		(0 << 12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO		BIT(12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_MSB		(2 << 12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_ZERO		(3 << 12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_MSB		(4 << 12)
+
+/* VI_DPCL */
+#define RKISP1_CIF_VI_DPCL_DMA_JPEG			(0 << 0)
+#define RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_MI		BIT(0)
+#define RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_JPEG		(2 << 0)
+#define RKISP1_CIF_VI_DPCL_CHAN_MODE_MP			BIT(2)
+#define RKISP1_CIF_VI_DPCL_CHAN_MODE_SP			(2 << 2)
+#define RKISP1_CIF_VI_DPCL_CHAN_MODE_MPSP		(3 << 2)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_SPMUX			(0 << 4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_SI			BIT(4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_IE			(2 << 4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_JPEG			(3 << 4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_ISP			(4 << 4)
+#define RKISP1_CIF_VI_DPCL_IF_SEL_PARALLEL		(0 << 8)
+#define RKISP1_CIF_VI_DPCL_IF_SEL_SMIA			BIT(8)
+#define RKISP1_CIF_VI_DPCL_IF_SEL_MIPI			(2 << 8)
+#define RKISP1_CIF_VI_DPCL_DMA_IE_MUX_DMA		BIT(10)
+#define RKISP1_CIF_VI_DPCL_DMA_SP_MUX_DMA		BIT(11)
+
+/* ISP_IMSC - ISP_MIS - ISP_RIS - ISP_ICR - ISP_ISR */
+#define RKISP1_CIF_ISP_OFF				BIT(0)
+#define RKISP1_CIF_ISP_FRAME				BIT(1)
+#define RKISP1_CIF_ISP_DATA_LOSS			BIT(2)
+#define RKISP1_CIF_ISP_PIC_SIZE_ERROR			BIT(3)
+#define RKISP1_CIF_ISP_AWB_DONE				BIT(4)
+#define RKISP1_CIF_ISP_FRAME_IN				BIT(5)
+#define RKISP1_CIF_ISP_V_START				BIT(6)
+#define RKISP1_CIF_ISP_H_START				BIT(7)
+#define RKISP1_CIF_ISP_FLASH_ON				BIT(8)
+#define RKISP1_CIF_ISP_FLASH_OFF			BIT(9)
+#define RKISP1_CIF_ISP_SHUTTER_ON			BIT(10)
+#define RKISP1_CIF_ISP_SHUTTER_OFF			BIT(11)
+#define RKISP1_CIF_ISP_AFM_SUM_OF			BIT(12)
+#define RKISP1_CIF_ISP_AFM_LUM_OF			BIT(13)
+#define RKISP1_CIF_ISP_AFM_FIN				BIT(14)
+#define RKISP1_CIF_ISP_HIST_MEASURE_RDY			BIT(15)
+#define RKISP1_CIF_ISP_FLASH_CAP			BIT(17)
+#define RKISP1_CIF_ISP_EXP_END				BIT(18)
+#define RKISP1_CIF_ISP_VSM_END				BIT(19)
+
+/* ISP_ERR */
+#define RKISP1_CIF_ISP_ERR_INFORM_SIZE			BIT(0)
+#define RKISP1_CIF_ISP_ERR_IS_SIZE			BIT(1)
+#define RKISP1_CIF_ISP_ERR_OUTFORM_SIZE			BIT(2)
+
+/* MI_CTRL */
+#define RKISP1_CIF_MI_CTRL_MP_ENABLE			BIT(0)
+#define RKISP1_CIF_MI_CTRL_SP_ENABLE			(2 << 0)
+#define RKISP1_CIF_MI_CTRL_JPEG_ENABLE			(4 << 0)
+#define RKISP1_CIF_MI_CTRL_RAW_ENABLE			(8 << 0)
+#define RKISP1_CIF_MI_CTRL_HFLIP			BIT(4)
+#define RKISP1_CIF_MI_CTRL_VFLIP			BIT(5)
+#define RKISP1_CIF_MI_CTRL_ROT				BIT(6)
+#define RKISP1_CIF_MI_BYTE_SWAP				BIT(7)
+#define RKISP1_CIF_MI_SP_Y_FULL_YUV2RGB			BIT(8)
+#define RKISP1_CIF_MI_SP_CBCR_FULL_YUV2RGB		BIT(9)
+#define RKISP1_CIF_MI_SP_422NONCOSITEED			BIT(10)
+#define RKISP1_CIF_MI_MP_PINGPONG_ENABEL		BIT(11)
+#define RKISP1_CIF_MI_SP_PINGPONG_ENABEL		BIT(12)
+#define RKISP1_CIF_MI_MP_AUTOUPDATE_ENABLE		BIT(13)
+#define RKISP1_CIF_MI_SP_AUTOUPDATE_ENABLE		BIT(14)
+#define RKISP1_CIF_MI_LAST_PIXEL_SIG_ENABLE		BIT(15)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_16		(0 << 16)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_32		BIT(16)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_64		(2 << 16)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_16		(0 << 18)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_32		BIT(18)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_64		(2 << 18)
+#define RKISP1_CIF_MI_CTRL_INIT_BASE_EN			BIT(20)
+#define RKISP1_CIF_MI_CTRL_INIT_OFFSET_EN		BIT(21)
+#define RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8		(0 << 22)
+#define RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA		BIT(22)
+#define RKISP1_MI_CTRL_MP_WRITE_YUVINT			(2 << 22)
+#define RKISP1_MI_CTRL_MP_WRITE_RAW12			(2 << 22)
+#define RKISP1_MI_CTRL_SP_WRITE_PLA			(0 << 24)
+#define RKISP1_MI_CTRL_SP_WRITE_SPLA			BIT(24)
+#define RKISP1_MI_CTRL_SP_WRITE_INT			(2 << 24)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV400			(0 << 26)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV420			BIT(26)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV422			(2 << 26)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV444			(3 << 26)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV400			(0 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV420			BIT(28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV422			(2 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV444			(3 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_RGB565			(4 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_RGB666			(5 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_RGB888			(6 << 28)
+
+#define RKISP1_MI_CTRL_MP_FMT_MASK			GENMASK(23, 22)
+#define RKISP1_MI_CTRL_SP_FMT_MASK			GENMASK(30, 24)
+
+/* MI_INIT */
+#define RKISP1_CIF_MI_INIT_SKIP				BIT(2)
+#define RKISP1_CIF_MI_INIT_SOFT_UPD			BIT(4)
+
+/* MI_CTRL_SHD */
+#define RKISP1_CIF_MI_CTRL_SHD_MP_IN_ENABLED		BIT(0)
+#define RKISP1_CIF_MI_CTRL_SHD_SP_IN_ENABLED		BIT(1)
+#define RKISP1_CIF_MI_CTRL_SHD_JPEG_IN_ENABLED		BIT(2)
+#define RKISP1_CIF_MI_CTRL_SHD_RAW_IN_ENABLED		BIT(3)
+#define RKISP1_CIF_MI_CTRL_SHD_MP_OUT_ENABLED		BIT(16)
+#define RKISP1_CIF_MI_CTRL_SHD_SP_OUT_ENABLED		BIT(17)
+#define RKISP1_CIF_MI_CTRL_SHD_JPEG_OUT_ENABLED		BIT(18)
+#define RKISP1_CIF_MI_CTRL_SHD_RAW_OUT_ENABLED		BIT(19)
+
+/* RSZ_CTRL */
+#define RKISP1_CIF_RSZ_CTRL_SCALE_HY_ENABLE		BIT(0)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_HC_ENABLE		BIT(1)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_VY_ENABLE		BIT(2)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_VC_ENABLE		BIT(3)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_HY_UP			BIT(4)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_HC_UP			BIT(5)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_VY_UP			BIT(6)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_VC_UP			BIT(7)
+#define RKISP1_CIF_RSZ_CTRL_CFG_UPD			BIT(8)
+#define RKISP1_CIF_RSZ_CTRL_CFG_UPD_AUTO		BIT(9)
+#define RKISP1_CIF_RSZ_SCALER_FACTOR			BIT(16)
+
+/* MI_IMSC - MI_MIS - MI_RIS - MI_ICR - MI_ISR */
+#define RKISP1_CIF_MI_FRAME(stream)			BIT((stream)->id)
+#define RKISP1_CIF_MI_MBLK_LINE				BIT(2)
+#define RKISP1_CIF_MI_FILL_MP_Y				BIT(3)
+#define RKISP1_CIF_MI_WRAP_MP_Y				BIT(4)
+#define RKISP1_CIF_MI_WRAP_MP_CB			BIT(5)
+#define RKISP1_CIF_MI_WRAP_MP_CR			BIT(6)
+#define RKISP1_CIF_MI_WRAP_SP_Y				BIT(7)
+#define RKISP1_CIF_MI_WRAP_SP_CB			BIT(8)
+#define RKISP1_CIF_MI_WRAP_SP_CR			BIT(9)
+#define RKISP1_CIF_MI_DMA_READY				BIT(11)
+
+/* MI_STATUS */
+#define RKISP1_CIF_MI_STATUS_MP_Y_FIFO_FULL		BIT(0)
+#define RKISP1_CIF_MI_STATUS_SP_Y_FIFO_FULL		BIT(4)
+
+/* MI_DMA_CTRL */
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_16		(0 << 0)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_32		BIT(0)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_64		(2 << 0)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_16	(0 << 2)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_32	BIT(2)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_64	(2 << 2)
+#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_PLANAR		(0 << 4)
+#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_SPLANAR		BIT(4)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV400		(0 << 6)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV420		BIT(6)
+#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_PACKED		(2 << 4)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV422		(2 << 6)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV444		(3 << 6)
+#define RKISP1_CIF_MI_DMA_CTRL_BYTE_SWAP		BIT(8)
+#define RKISP1_CIF_MI_DMA_CTRL_CONTINUOUS_ENA		BIT(9)
+#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_NO		(0 << 12)
+#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_8BIT		BIT(12)
+#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_16BIT		(2 << 12)
+/* MI_DMA_START */
+#define RKISP1_CIF_MI_DMA_START_ENABLE			BIT(0)
+/* MI_XTD_FORMAT_CTRL  */
+#define RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP	BIT(0)
+#define RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP	BIT(1)
+#define RKISP1_CIF_MI_XTD_FMT_CTRL_DMA_CB_CR_SWAP	BIT(2)
+
+/* CCL */
+#define RKISP1_CIF_CCL_CIF_CLK_DIS			BIT(2)
+/* ICCL */
+#define RKISP1_CIF_ICCL_ISP_CLK				BIT(0)
+#define RKISP1_CIF_ICCL_CP_CLK				BIT(1)
+#define RKISP1_CIF_ICCL_RES_2				BIT(2)
+#define RKISP1_CIF_ICCL_MRSZ_CLK			BIT(3)
+#define RKISP1_CIF_ICCL_SRSZ_CLK			BIT(4)
+#define RKISP1_CIF_ICCL_JPEG_CLK			BIT(5)
+#define RKISP1_CIF_ICCL_MI_CLK				BIT(6)
+#define RKISP1_CIF_ICCL_RES_7				BIT(7)
+#define RKISP1_CIF_ICCL_IE_CLK				BIT(8)
+#define RKISP1_CIF_ICCL_SIMP_CLK			BIT(9)
+#define RKISP1_CIF_ICCL_SMIA_CLK			BIT(10)
+#define RKISP1_CIF_ICCL_MIPI_CLK			BIT(11)
+#define RKISP1_CIF_ICCL_DCROP_CLK			BIT(12)
+/* IRCL */
+#define RKISP1_CIF_IRCL_ISP_SW_RST			BIT(0)
+#define RKISP1_CIF_IRCL_CP_SW_RST			BIT(1)
+#define RKISP1_CIF_IRCL_YCS_SW_RST			BIT(2)
+#define RKISP1_CIF_IRCL_MRSZ_SW_RST			BIT(3)
+#define RKISP1_CIF_IRCL_SRSZ_SW_RST			BIT(4)
+#define RKISP1_CIF_IRCL_JPEG_SW_RST			BIT(5)
+#define RKISP1_CIF_IRCL_MI_SW_RST			BIT(6)
+#define RKISP1_CIF_IRCL_CIF_SW_RST			BIT(7)
+#define RKISP1_CIF_IRCL_IE_SW_RST			BIT(8)
+#define RKISP1_CIF_IRCL_SI_SW_RST			BIT(9)
+#define RKISP1_CIF_IRCL_MIPI_SW_RST			BIT(11)
+
+/* C_PROC_CTR */
+#define RKISP1_CIF_C_PROC_CTR_ENABLE			BIT(0)
+#define RKISP1_CIF_C_PROC_YOUT_FULL			BIT(1)
+#define RKISP1_CIF_C_PROC_YIN_FULL			BIT(2)
+#define RKISP1_CIF_C_PROC_COUT_FULL			BIT(3)
+#define RKISP1_CIF_C_PROC_CTRL_RESERVED			0xFFFFFFFE
+#define RKISP1_CIF_C_PROC_CONTRAST_RESERVED		0xFFFFFF00
+#define RKISP1_CIF_C_PROC_BRIGHTNESS_RESERVED		0xFFFFFF00
+#define RKISP1_CIF_C_PROC_HUE_RESERVED			0xFFFFFF00
+#define RKISP1_CIF_C_PROC_SATURATION_RESERVED		0xFFFFFF00
+#define RKISP1_CIF_C_PROC_MACC_RESERVED			0xE000E000
+#define RKISP1_CIF_C_PROC_TONE_RESERVED			0xF000
+/* DUAL_CROP_CTRL */
+#define RKISP1_CIF_DUAL_CROP_MP_MODE_BYPASS		(0 << 0)
+#define RKISP1_CIF_DUAL_CROP_MP_MODE_YUV		BIT(0)
+#define RKISP1_CIF_DUAL_CROP_MP_MODE_RAW		(2 << 0)
+#define RKISP1_CIF_DUAL_CROP_SP_MODE_BYPASS		(0 << 2)
+#define RKISP1_CIF_DUAL_CROP_SP_MODE_YUV		BIT(2)
+#define RKISP1_CIF_DUAL_CROP_SP_MODE_RAW		(2 << 2)
+#define RKISP1_CIF_DUAL_CROP_CFG_UPD_PERMANENT		BIT(4)
+#define RKISP1_CIF_DUAL_CROP_CFG_UPD			BIT(5)
+#define RKISP1_CIF_DUAL_CROP_GEN_CFG_UPD		BIT(6)
+
+/* IMG_EFF_CTRL */
+#define RKISP1_CIF_IMG_EFF_CTRL_ENABLE			BIT(0)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE		(0 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE		BIT(1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA		(2 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL		(3 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS		(4 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH		(5 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SHARPEN		(6 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_CFG_UPD			BIT(4)
+#define RKISP1_CIF_IMG_EFF_CTRL_YCBCR_FULL		BIT(5)
+
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE_SHIFT	0
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE_SHIFT	1
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA_SHIFT	2
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL_SHIFT	3
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS_SHIFT	4
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH_SHIFT	5
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SHARPEN_SHIFT	6
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK		0xE
+
+/* IMG_EFF_COLOR_SEL */
+#define RKISP1_CIF_IMG_EFF_COLOR_RGB			0
+#define RKISP1_CIF_IMG_EFF_COLOR_B			BIT(0)
+#define RKISP1_CIF_IMG_EFF_COLOR_G			(2 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_GB			(3 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_R			(4 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_RB			(5 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_RG			(6 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_RGB2			(7 << 0)
+
+/* MIPI_CTRL */
+#define RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA			BIT(0)
+#define RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(a)		(((a) & 0xF) << 8)
+#define RKISP1_CIF_MIPI_CTRL_NUM_LANES(a)		(((a) & 0x3) << 12)
+#define RKISP1_CIF_MIPI_CTRL_ERR_SOT_HS_SKIP		BIT(16)
+#define RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP	BIT(17)
+#define RKISP1_CIF_MIPI_CTRL_CLOCKLANE_ENA		BIT(18)
+
+/* MIPI_DATA_SEL */
+#define RKISP1_CIF_MIPI_DATA_SEL_VC(a)			(((a) & 0x3) << 6)
+#define RKISP1_CIF_MIPI_DATA_SEL_DT(a)			(((a) & 0x3F) << 0)
+/* MIPI DATA_TYPE */
+#define RKISP1_CIF_CSI2_DT_YUV420_8b			0x18
+#define RKISP1_CIF_CSI2_DT_YUV420_10b			0x19
+#define RKISP1_CIF_CSI2_DT_YUV422_8b			0x1E
+#define RKISP1_CIF_CSI2_DT_YUV422_10b			0x1F
+#define RKISP1_CIF_CSI2_DT_RGB565			0x22
+#define RKISP1_CIF_CSI2_DT_RGB666			0x23
+#define RKISP1_CIF_CSI2_DT_RGB888			0x24
+#define RKISP1_CIF_CSI2_DT_RAW8				0x2A
+#define RKISP1_CIF_CSI2_DT_RAW10			0x2B
+#define RKISP1_CIF_CSI2_DT_RAW12			0x2C
+
+/* MIPI_IMSC, MIPI_RIS, MIPI_MIS, MIPI_ICR, MIPI_ISR */
+#define RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(a)		(((a) & 0xF) << 0)
+#define RKISP1_CIF_MIPI_ERR_SOT(a)			(((a) & 0xF) << 4)
+#define RKISP1_CIF_MIPI_ERR_SOT_SYNC(a)			(((a) & 0xF) << 8)
+#define RKISP1_CIF_MIPI_ERR_EOT_SYNC(a)			(((a) & 0xF) << 12)
+#define RKISP1_CIF_MIPI_ERR_CTRL(a)			(((a) & 0xF) << 16)
+#define RKISP1_CIF_MIPI_ERR_PROTOCOL			BIT(20)
+#define RKISP1_CIF_MIPI_ERR_ECC1			BIT(21)
+#define RKISP1_CIF_MIPI_ERR_ECC2			BIT(22)
+#define RKISP1_CIF_MIPI_ERR_CS				BIT(23)
+#define RKISP1_CIF_MIPI_FRAME_END			BIT(24)
+#define RKISP1_CIF_MIPI_ADD_DATA_OVFLW			BIT(25)
+#define RKISP1_CIF_MIPI_ADD_DATA_WATER_MARK		BIT(26)
+
+#define RKISP1_CIF_MIPI_ERR_CSI  (RKISP1_CIF_MIPI_ERR_PROTOCOL | \
+	RKISP1_CIF_MIPI_ERR_ECC1 | \
+	RKISP1_CIF_MIPI_ERR_ECC2 | \
+	RKISP1_CIF_MIPI_ERR_CS)
+
+#define RKISP1_CIF_MIPI_ERR_DPHY  (RKISP1_CIF_MIPI_ERR_SOT(3) | \
+	RKISP1_CIF_MIPI_ERR_SOT_SYNC(3) | \
+	RKISP1_CIF_MIPI_ERR_EOT_SYNC(3) | \
+	RKISP1_CIF_MIPI_ERR_CTRL(3))
+
+/* SUPER_IMPOSE */
+#define RKISP1_CIF_SUPER_IMP_CTRL_NORMAL_MODE		BIT(0)
+#define RKISP1_CIF_SUPER_IMP_CTRL_REF_IMG_MEM		BIT(1)
+#define RKISP1_CIF_SUPER_IMP_CTRL_TRANSP_DIS		BIT(2)
+
+/* ISP HISTOGRAM CALCULATION : ISP_HIST_PROP */
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS		(0 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB		BIT(0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED		(2 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN		(3 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE		(4 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM		(5 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK		0x7
+#define RKISP1_CIF_ISP_HIST_PREDIV_SET(x)		(((x) & 0x7F) << 3)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_SET(v0, v1, v2, v3)	\
+				     (((v0) & 0x1F) | (((v1) & 0x1F) << 8)  |\
+				     (((v2) & 0x1F) << 16) | \
+				     (((v3) & 0x1F) << 24))
+
+#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED	0xFFFFF000
+#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED	0xFFFFF800
+#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED		0xE0E0E0E0
+#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER		0x0000007F
+#define RKISP1_CIF_ISP_HIST_ROW_NUM			5
+#define RKISP1_CIF_ISP_HIST_COLUMN_NUM			5
+
+/* AUTO FOCUS MEASUREMENT:  ISP_AFM_CTRL */
+#define RKISP1_ISP_AFM_CTRL_ENABLE			BIT(0)
+
+/* SHUTTER CONTROL */
+#define RKISP1_CIF_ISP_SH_CTRL_SH_ENA			BIT(0)
+#define RKISP1_CIF_ISP_SH_CTRL_REP_EN			BIT(1)
+#define RKISP1_CIF_ISP_SH_CTRL_SRC_SH_TRIG		BIT(2)
+#define RKISP1_CIF_ISP_SH_CTRL_EDGE_POS			BIT(3)
+#define RKISP1_CIF_ISP_SH_CTRL_POL_LOW			BIT(4)
+
+/* FLASH MODULE */
+/* ISP_FLASH_CMD */
+#define RKISP1_CIFFLASH_CMD_PRELIGHT_ON			BIT(0)
+#define RKISP1_CIFFLASH_CMD_FLASH_ON			BIT(1)
+#define RKISP1_CIFFLASH_CMD_PRE_FLASH_ON		BIT(2)
+/* ISP_FLASH_CONFIG */
+#define RKISP1_CIFFLASH_CONFIG_PRELIGHT_END		BIT(0)
+#define RKISP1_CIFFLASH_CONFIG_VSYNC_POS		BIT(1)
+#define RKISP1_CIFFLASH_CONFIG_PRELIGHT_LOW		BIT(2)
+#define RKISP1_CIFFLASH_CONFIG_SRC_FL_TRIG		BIT(3)
+#define RKISP1_CIFFLASH_CONFIG_DELAY(a)			(((a) & 0xF) << 4)
+
+/* Demosaic:  ISP_DEMOSAIC */
+#define RKISP1_CIF_ISP_DEMOSAIC_BYPASS			BIT(10)
+#define RKISP1_CIF_ISP_DEMOSAIC_TH(x)			((x) & 0xFF)
+
+/* AWB */
+/* ISP_AWB_PROP */
+#define RKISP1_CIF_ISP_AWB_YMAX_CMP_EN			BIT(2)
+#define RKISP1_CIF_ISP_AWB_YMAX_READ(x)			(((x) >> 2) & 1)
+#define RKISP1_CIF_ISP_AWB_MODE_RGB_EN			((1 << 31) | (0x2 << 0))
+#define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN		((0 << 31) | (0x2 << 0))
+#define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN		((0 << 31) | (0x2 << 0))
+#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE		0xFFFFFFFC
+#define RKISP1_CIF_ISP_AWB_MODE_READ(x)			((x) & 3)
+/* ISP_AWB_GAIN_RB, ISP_AWB_GAIN_G  */
+#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x)		(((x) & 0x3FF) << 16)
+#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x)		(((x) >> 16) & 0x3FF)
+#define RKISP1_CIF_ISP_AWB_GAIN_B_SET(x)		((x) & 0x3FFF)
+#define RKISP1_CIF_ISP_AWB_GAIN_B_READ(x)		((x) & 0x3FFF)
+/* ISP_AWB_REF */
+#define RKISP1_CIF_ISP_AWB_REF_CR_SET(x)		(((x) & 0xFF) << 8)
+#define RKISP1_CIF_ISP_AWB_REF_CR_READ(x)		(((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_REF_CB_READ(x)		((x) & 0xFF)
+/* ISP_AWB_THRESH */
+#define RKISP1_CIF_ISP_AWB_MAX_CS_SET(x)		(((x) & 0xFF) << 8)
+#define RKISP1_CIF_ISP_AWB_MAX_CS_READ(x)		(((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_MIN_C_READ(x)		((x) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_MIN_Y_SET(x)			(((x) & 0xFF) << 16)
+#define RKISP1_CIF_ISP_AWB_MIN_Y_READ(x)		(((x) >> 16) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_MAX_Y_SET(x)			(((x) & 0xFF) << 24)
+#define RKISP1_CIF_ISP_AWB_MAX_Y_READ(x)			(((x) >> 24) & 0xFF)
+/* ISP_AWB_MEAN */
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(x)		((x) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(x)		(((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(x)		(((x) >> 16) & 0xFF)
+/* ISP_AWB_WHITE_CNT */
+#define RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(x)		((x) & 0x3FFFFFF)
+
+#define RKISP1_CIF_ISP_AWB_GAINS_MAX_VAL		0x000003FF
+#define RKISP1_CIF_ISP_AWB_WINDOW_OFFSET_MAX		0x00000FFF
+#define RKISP1_CIF_ISP_AWB_WINDOW_MAX_SIZE		0x00001FFF
+#define RKISP1_CIF_ISP_AWB_CBCR_MAX_REF			0x000000FF
+#define RKISP1_CIF_ISP_AWB_THRES_MAX_YC			0x000000FF
+
+/* AE */
+/* ISP_EXP_CTRL */
+#define RKISP1_CIF_ISP_EXP_ENA				BIT(0)
+#define RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP		BIT(1)
+/*
+ *'1' luminance calculation according to  Y=(R+G+B) x 0.332 (85/256)
+ *'0' luminance calculation according to Y=16+0.25R+0.5G+0.1094B
+ */
+#define RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1		BIT(31)
+
+/* ISP_EXP_H_SIZE */
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET(x)		((x) & 0x7FF)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK			0x000007FF
+/* ISP_EXP_V_SIZE : vertical size must be a multiple of 2). */
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET(x)		((x) & 0x7FE)
+
+/* ISP_EXP_H_OFFSET */
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET(x)		((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS			2424
+/* ISP_EXP_V_OFFSET */
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET(x)		((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS			1806
+
+#define RKISP1_CIF_ISP_EXP_ROW_NUM			5
+#define RKISP1_CIF_ISP_EXP_COLUMN_NUM			5
+#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS \
+	(RKISP1_CIF_ISP_EXP_ROW_NUM * RKISP1_CIF_ISP_EXP_COLUMN_NUM)
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE		516
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE		35
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE		390
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE		28
+#define RKISP1_CIF_ISP_EXP_MAX_HSIZE	\
+	(RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_HSIZE	\
+	(RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_MAX_VSIZE	\
+	(RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_VSIZE	\
+	(RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
+
+/* LSC: ISP_LSC_CTRL */
+#define RKISP1_CIF_ISP_LSC_CTRL_ENA			BIT(0)
+#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED		0xFC00FC00
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED		0xF000F000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED		0xF000F000
+#define RKISP1_CIF_ISP_LSC_SECTORS_MAX			17
+#define RKISP1_CIF_ISP_LSC_TABLE_DATA(v0, v1)     \
+	(((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
+#define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1)      \
+	(((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1)      \
+	(((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+
+/* LSC: ISP_LSC_TABLE_SEL */
+#define RKISP1_CIF_ISP_LSC_TABLE_0			0
+#define RKISP1_CIF_ISP_LSC_TABLE_1			1
+
+/* LSC: ISP_LSC_STATUS */
+#define RKISP1_CIF_ISP_LSC_ACTIVE_TABLE			BIT(1)
+#define RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0		0
+#define RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153		153
+
+/* FLT */
+/* ISP_FILT_MODE */
+#define RKISP1_CIF_ISP_FLT_ENA				BIT(0)
+
+/*
+ * 0: green filter static mode (active filter factor = FILT_FAC_MID)
+ * 1: dynamic noise reduction/sharpen Default
+ */
+#define RKISP1_CIF_ISP_FLT_MODE_DNR			BIT(1)
+#define RKISP1_CIF_ISP_FLT_MODE_MAX			1
+#define RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(x)		(((x) & 0x3) << 4)
+#define RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(x)		(((x) & 0x3) << 6)
+#define RKISP1_CIF_ISP_FLT_CHROMA_MODE_MAX		3
+#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1(x)		(((x) & 0xF) << 8)
+#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1_MAX		8
+#define RKISP1_CIF_ISP_FLT_THREAD_RESERVED		0xFFFFFC00
+#define RKISP1_CIF_ISP_FLT_FAC_RESERVED			0xFFFFFFC0
+#define RKISP1_CIF_ISP_FLT_LUM_WEIGHT_RESERVED		0xFFF80000
+
+#define RKISP1_CIF_ISP_CTK_COEFF_RESERVED		0xFFFFF800
+#define RKISP1_CIF_ISP_XTALK_OFFSET_RESERVED		0xFFFFF000
+
+/* GOC */
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_EQU		BIT(0)
+#define RKISP1_CIF_ISP_GOC_MODE_MAX			1
+#define RKISP1_CIF_ISP_GOC_RESERVED			0xFFFFF800
+/* ISP_CTRL BIT 11*/
+#define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA_READ(x)	(((x) >> 11) & 1)
+
+/* DPCC */
+/* ISP_DPCC_MODE */
+#define RKISP1_CIF_ISP_DPCC_ENA				BIT(0)
+#define RKISP1_CIF_ISP_DPCC_MODE_MAX			0x07
+#define RKISP1_CIF_ISP_DPCC_OUTPUTMODE_MAX		0x0F
+#define RKISP1_CIF_ISP_DPCC_SETUSE_MAX			0x0F
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RESERVED	0xFFFFE000
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RESERVED	0xFFFF0000
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RESERVED	0xFFFFC0C0
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_RESERVED		0xFFFFC0C0
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RESERVED		0xFFFF0000
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_RESERVED		0xFFFFC0C0
+#define RKISP1_CIF_ISP_DPCC_RO_LIMIT_RESERVED		0xFFFFF000
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_RESERVED		0xFFFFF000
+
+/* BLS */
+/* ISP_BLS_CTRL */
+#define RKISP1_CIF_ISP_BLS_ENA				BIT(0)
+#define RKISP1_CIF_ISP_BLS_MODE_MEASURED		BIT(1)
+#define RKISP1_CIF_ISP_BLS_MODE_FIXED			0
+#define RKISP1_CIF_ISP_BLS_WINDOW_1			BIT(2)
+#define RKISP1_CIF_ISP_BLS_WINDOW_2			(2 << 2)
+
+/* GAMMA-IN */
+#define RKISP1_CIFISP_DEGAMMA_X_RESERVED	\
+	((1 << 31) | (1 << 27) | (1 << 23) | (1 << 19) |\
+	(1 << 15) | (1 << 11) | (1 << 7) | (1 << 3))
+#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED		0xFFFFF000
+
+/* AFM */
+#define RKISP1_CIF_ISP_AFM_ENA				BIT(0)
+#define RKISP1_CIF_ISP_AFM_THRES_RESERVED		0xFFFF0000
+#define RKISP1_CIF_ISP_AFM_VAR_SHIFT_RESERVED		0xFFF8FFF8
+#define RKISP1_CIF_ISP_AFM_WINDOW_X_RESERVED		0xE000
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y_RESERVED		0xF000
+#define RKISP1_CIF_ISP_AFM_WINDOW_X_MIN			0x5
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y_MIN			0x2
+#define RKISP1_CIF_ISP_AFM_WINDOW_X(x)			(((x) & 0x1FFF) << 16)
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x)			((x) & 0x1FFF)
+
+/* DPF */
+#define RKISP1_CIF_ISP_DPF_MODE_EN			BIT(0)
+#define RKISP1_CIF_ISP_DPF_MODE_B_FLT_DIS		BIT(1)
+#define RKISP1_CIF_ISP_DPF_MODE_GB_FLT_DIS		BIT(2)
+#define RKISP1_CIF_ISP_DPF_MODE_GR_FLT_DIS		BIT(3)
+#define RKISP1_CIF_ISP_DPF_MODE_R_FLT_DIS		BIT(4)
+#define RKISP1_CIF_ISP_DPF_MODE_RB_FLTSIZE_9x9		BIT(5)
+#define RKISP1_CIF_ISP_DPF_MODE_NLL_SEGMENTATION	BIT(6)
+#define RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP		BIT(7)
+#define RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP		BIT(8)
+#define RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN		BIT(9)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_RESERVED		0xFFFFF000
+#define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX		0x1F
+#define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX		0x3FF
+
+/* =================================================================== */
+/*                            CIF Registers                            */
+/* =================================================================== */
+#define RKISP1_CIF_CTRL_BASE			0x00000000
+#define RKISP1_CIF_CCL				(RKISP1_CIF_CTRL_BASE + 0x00000000)
+#define RKISP1_CIF_VI_ID			(RKISP1_CIF_CTRL_BASE + 0x00000008)
+#define RKISP1_CIF_ICCL				(RKISP1_CIF_CTRL_BASE + 0x00000010)
+#define RKISP1_CIF_IRCL				(RKISP1_CIF_CTRL_BASE + 0x00000014)
+#define RKISP1_CIF_VI_DPCL			(RKISP1_CIF_CTRL_BASE + 0x00000018)
+
+#define RKISP1_CIF_IMG_EFF_BASE			0x00000200
+#define RKISP1_CIF_IMG_EFF_CTRL			(RKISP1_CIF_IMG_EFF_BASE + 0x00000000)
+#define RKISP1_CIF_IMG_EFF_COLOR_SEL		(RKISP1_CIF_IMG_EFF_BASE + 0x00000004)
+#define RKISP1_CIF_IMG_EFF_MAT_1		(RKISP1_CIF_IMG_EFF_BASE + 0x00000008)
+#define RKISP1_CIF_IMG_EFF_MAT_2		(RKISP1_CIF_IMG_EFF_BASE + 0x0000000C)
+#define RKISP1_CIF_IMG_EFF_MAT_3		(RKISP1_CIF_IMG_EFF_BASE + 0x00000010)
+#define RKISP1_CIF_IMG_EFF_MAT_4		(RKISP1_CIF_IMG_EFF_BASE + 0x00000014)
+#define RKISP1_CIF_IMG_EFF_MAT_5		(RKISP1_CIF_IMG_EFF_BASE + 0x00000018)
+#define RKISP1_CIF_IMG_EFF_TINT			(RKISP1_CIF_IMG_EFF_BASE + 0x0000001C)
+#define RKISP1_CIF_IMG_EFF_CTRL_SHD		(RKISP1_CIF_IMG_EFF_BASE + 0x00000020)
+#define RKISP1_CIF_IMG_EFF_SHARPEN		(RKISP1_CIF_IMG_EFF_BASE + 0x00000024)
+
+#define RKISP1_CIF_SUPER_IMP_BASE		0x00000300
+#define RKISP1_CIF_SUPER_IMP_CTRL		(RKISP1_CIF_SUPER_IMP_BASE + 0x00000000)
+#define RKISP1_CIF_SUPER_IMP_OFFSET_X		(RKISP1_CIF_SUPER_IMP_BASE + 0x00000004)
+#define RKISP1_CIF_SUPER_IMP_OFFSET_Y		(RKISP1_CIF_SUPER_IMP_BASE + 0x00000008)
+#define RKISP1_CIF_SUPER_IMP_COLOR_Y		(RKISP1_CIF_SUPER_IMP_BASE + 0x0000000C)
+#define RKISP1_CIF_SUPER_IMP_COLOR_CB		(RKISP1_CIF_SUPER_IMP_BASE + 0x00000010)
+#define RKISP1_CIF_SUPER_IMP_COLOR_CR		(RKISP1_CIF_SUPER_IMP_BASE + 0x00000014)
+
+#define RKISP1_CIF_ISP_BASE			0x00000400
+#define RKISP1_CIF_ISP_CTRL			(RKISP1_CIF_ISP_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_ACQ_PROP			(RKISP1_CIF_ISP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_ACQ_H_OFFS		(RKISP1_CIF_ISP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_ACQ_V_OFFS		(RKISP1_CIF_ISP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_ACQ_H_SIZE		(RKISP1_CIF_ISP_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_ACQ_V_SIZE		(RKISP1_CIF_ISP_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_ACQ_NR_FRAMES		(RKISP1_CIF_ISP_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_GAMMA_DX_LO		(RKISP1_CIF_ISP_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_GAMMA_DX_HI		(RKISP1_CIF_ISP_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_GAMMA_R_Y0		(RKISP1_CIF_ISP_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_GAMMA_R_Y1		(RKISP1_CIF_ISP_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_GAMMA_R_Y2		(RKISP1_CIF_ISP_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y3		(RKISP1_CIF_ISP_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_GAMMA_R_Y4		(RKISP1_CIF_ISP_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_GAMMA_R_Y5		(RKISP1_CIF_ISP_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_GAMMA_R_Y6		(RKISP1_CIF_ISP_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y7		(RKISP1_CIF_ISP_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_GAMMA_R_Y8		(RKISP1_CIF_ISP_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_GAMMA_R_Y9		(RKISP1_CIF_ISP_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_GAMMA_R_Y10		(RKISP1_CIF_ISP_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y11		(RKISP1_CIF_ISP_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_GAMMA_R_Y12		(RKISP1_CIF_ISP_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_GAMMA_R_Y13		(RKISP1_CIF_ISP_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_GAMMA_R_Y14		(RKISP1_CIF_ISP_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y15		(RKISP1_CIF_ISP_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_GAMMA_R_Y16		(RKISP1_CIF_ISP_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_GAMMA_G_Y0		(RKISP1_CIF_ISP_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_GAMMA_G_Y1		(RKISP1_CIF_ISP_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y2		(RKISP1_CIF_ISP_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_GAMMA_G_Y3		(RKISP1_CIF_ISP_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_GAMMA_G_Y4		(RKISP1_CIF_ISP_BASE + 0x00000078)
+#define RKISP1_CIF_ISP_GAMMA_G_Y5		(RKISP1_CIF_ISP_BASE + 0x0000007C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y6		(RKISP1_CIF_ISP_BASE + 0x00000080)
+#define RKISP1_CIF_ISP_GAMMA_G_Y7		(RKISP1_CIF_ISP_BASE + 0x00000084)
+#define RKISP1_CIF_ISP_GAMMA_G_Y8		(RKISP1_CIF_ISP_BASE + 0x00000088)
+#define RKISP1_CIF_ISP_GAMMA_G_Y9		(RKISP1_CIF_ISP_BASE + 0x0000008C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y10		(RKISP1_CIF_ISP_BASE + 0x00000090)
+#define RKISP1_CIF_ISP_GAMMA_G_Y11		(RKISP1_CIF_ISP_BASE + 0x00000094)
+#define RKISP1_CIF_ISP_GAMMA_G_Y12		(RKISP1_CIF_ISP_BASE + 0x00000098)
+#define RKISP1_CIF_ISP_GAMMA_G_Y13		(RKISP1_CIF_ISP_BASE + 0x0000009C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y14		(RKISP1_CIF_ISP_BASE + 0x000000A0)
+#define RKISP1_CIF_ISP_GAMMA_G_Y15		(RKISP1_CIF_ISP_BASE + 0x000000A4)
+#define RKISP1_CIF_ISP_GAMMA_G_Y16		(RKISP1_CIF_ISP_BASE + 0x000000A8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y0		(RKISP1_CIF_ISP_BASE + 0x000000AC)
+#define RKISP1_CIF_ISP_GAMMA_B_Y1		(RKISP1_CIF_ISP_BASE + 0x000000B0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y2		(RKISP1_CIF_ISP_BASE + 0x000000B4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y3		(RKISP1_CIF_ISP_BASE + 0x000000B8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y4		(RKISP1_CIF_ISP_BASE + 0x000000BC)
+#define RKISP1_CIF_ISP_GAMMA_B_Y5		(RKISP1_CIF_ISP_BASE + 0x000000C0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y6		(RKISP1_CIF_ISP_BASE + 0x000000C4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y7		(RKISP1_CIF_ISP_BASE + 0x000000C8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y8		(RKISP1_CIF_ISP_BASE + 0x000000CC)
+#define RKISP1_CIF_ISP_GAMMA_B_Y9		(RKISP1_CIF_ISP_BASE + 0x000000D0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y10		(RKISP1_CIF_ISP_BASE + 0x000000D4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y11		(RKISP1_CIF_ISP_BASE + 0x000000D8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y12		(RKISP1_CIF_ISP_BASE + 0x000000DC)
+#define RKISP1_CIF_ISP_GAMMA_B_Y13		(RKISP1_CIF_ISP_BASE + 0x000000E0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y14		(RKISP1_CIF_ISP_BASE + 0x000000E4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y15		(RKISP1_CIF_ISP_BASE + 0x000000E8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y16		(RKISP1_CIF_ISP_BASE + 0x000000EC)
+#define RKISP1_CIF_ISP_AWB_PROP			(RKISP1_CIF_ISP_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_AWB_WND_H_OFFS		(RKISP1_CIF_ISP_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_AWB_WND_V_OFFS		(RKISP1_CIF_ISP_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_AWB_WND_H_SIZE		(RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_WND_V_SIZE		(RKISP1_CIF_ISP_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_AWB_FRAMES		(RKISP1_CIF_ISP_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_AWB_REF			(RKISP1_CIF_ISP_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_AWB_THRESH		(RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_GAIN_G		(RKISP1_CIF_ISP_BASE + 0x00000138)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB		(RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT		(RKISP1_CIF_ISP_BASE + 0x00000140)
+#define RKISP1_CIF_ISP_AWB_MEAN			(RKISP1_CIF_ISP_BASE + 0x00000144)
+#define RKISP1_CIF_ISP_CC_COEFF_0		(RKISP1_CIF_ISP_BASE + 0x00000170)
+#define RKISP1_CIF_ISP_CC_COEFF_1		(RKISP1_CIF_ISP_BASE + 0x00000174)
+#define RKISP1_CIF_ISP_CC_COEFF_2		(RKISP1_CIF_ISP_BASE + 0x00000178)
+#define RKISP1_CIF_ISP_CC_COEFF_3		(RKISP1_CIF_ISP_BASE + 0x0000017C)
+#define RKISP1_CIF_ISP_CC_COEFF_4		(RKISP1_CIF_ISP_BASE + 0x00000180)
+#define RKISP1_CIF_ISP_CC_COEFF_5		(RKISP1_CIF_ISP_BASE + 0x00000184)
+#define RKISP1_CIF_ISP_CC_COEFF_6		(RKISP1_CIF_ISP_BASE + 0x00000188)
+#define RKISP1_CIF_ISP_CC_COEFF_7		(RKISP1_CIF_ISP_BASE + 0x0000018C)
+#define RKISP1_CIF_ISP_CC_COEFF_8		(RKISP1_CIF_ISP_BASE + 0x00000190)
+#define RKISP1_CIF_ISP_OUT_H_OFFS		(RKISP1_CIF_ISP_BASE + 0x00000194)
+#define RKISP1_CIF_ISP_OUT_V_OFFS		(RKISP1_CIF_ISP_BASE + 0x00000198)
+#define RKISP1_CIF_ISP_OUT_H_SIZE		(RKISP1_CIF_ISP_BASE + 0x0000019C)
+#define RKISP1_CIF_ISP_OUT_V_SIZE		(RKISP1_CIF_ISP_BASE + 0x000001A0)
+#define RKISP1_CIF_ISP_DEMOSAIC			(RKISP1_CIF_ISP_BASE + 0x000001A4)
+#define RKISP1_CIF_ISP_FLAGS_SHD		(RKISP1_CIF_ISP_BASE + 0x000001A8)
+#define RKISP1_CIF_ISP_OUT_H_OFFS_SHD		(RKISP1_CIF_ISP_BASE + 0x000001AC)
+#define RKISP1_CIF_ISP_OUT_V_OFFS_SHD		(RKISP1_CIF_ISP_BASE + 0x000001B0)
+#define RKISP1_CIF_ISP_OUT_H_SIZE_SHD		(RKISP1_CIF_ISP_BASE + 0x000001B4)
+#define RKISP1_CIF_ISP_OUT_V_SIZE_SHD		(RKISP1_CIF_ISP_BASE + 0x000001B8)
+#define RKISP1_CIF_ISP_IMSC			(RKISP1_CIF_ISP_BASE + 0x000001BC)
+#define RKISP1_CIF_ISP_RIS			(RKISP1_CIF_ISP_BASE + 0x000001C0)
+#define RKISP1_CIF_ISP_MIS			(RKISP1_CIF_ISP_BASE + 0x000001C4)
+#define RKISP1_CIF_ISP_ICR			(RKISP1_CIF_ISP_BASE + 0x000001C8)
+#define RKISP1_CIF_ISP_ISR			(RKISP1_CIF_ISP_BASE + 0x000001CC)
+#define RKISP1_CIF_ISP_CT_COEFF_0		(RKISP1_CIF_ISP_BASE + 0x000001D0)
+#define RKISP1_CIF_ISP_CT_COEFF_1		(RKISP1_CIF_ISP_BASE + 0x000001D4)
+#define RKISP1_CIF_ISP_CT_COEFF_2		(RKISP1_CIF_ISP_BASE + 0x000001D8)
+#define RKISP1_CIF_ISP_CT_COEFF_3		(RKISP1_CIF_ISP_BASE + 0x000001DC)
+#define RKISP1_CIF_ISP_CT_COEFF_4		(RKISP1_CIF_ISP_BASE + 0x000001E0)
+#define RKISP1_CIF_ISP_CT_COEFF_5		(RKISP1_CIF_ISP_BASE + 0x000001E4)
+#define RKISP1_CIF_ISP_CT_COEFF_6		(RKISP1_CIF_ISP_BASE + 0x000001E8)
+#define RKISP1_CIF_ISP_CT_COEFF_7		(RKISP1_CIF_ISP_BASE + 0x000001EC)
+#define RKISP1_CIF_ISP_CT_COEFF_8		(RKISP1_CIF_ISP_BASE + 0x000001F0)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE		(RKISP1_CIF_ISP_BASE + 0x000001F4)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0		(RKISP1_CIF_ISP_BASE + 0x000001F8)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1		(RKISP1_CIF_ISP_BASE + 0x000001FC)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2		(RKISP1_CIF_ISP_BASE + 0x00000200)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3		(RKISP1_CIF_ISP_BASE + 0x00000204)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4		(RKISP1_CIF_ISP_BASE + 0x00000208)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5		(RKISP1_CIF_ISP_BASE + 0x0000020C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6		(RKISP1_CIF_ISP_BASE + 0x00000210)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7		(RKISP1_CIF_ISP_BASE + 0x00000214)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8		(RKISP1_CIF_ISP_BASE + 0x00000218)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9		(RKISP1_CIF_ISP_BASE + 0x0000021C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10		(RKISP1_CIF_ISP_BASE + 0x00000220)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11		(RKISP1_CIF_ISP_BASE + 0x00000224)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12		(RKISP1_CIF_ISP_BASE + 0x00000228)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13		(RKISP1_CIF_ISP_BASE + 0x0000022C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14		(RKISP1_CIF_ISP_BASE + 0x00000230)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15		(RKISP1_CIF_ISP_BASE + 0x00000234)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16		(RKISP1_CIF_ISP_BASE + 0x00000238)
+#define RKISP1_CIF_ISP_ERR			(RKISP1_CIF_ISP_BASE + 0x0000023C)
+#define RKISP1_CIF_ISP_ERR_CLR			(RKISP1_CIF_ISP_BASE + 0x00000240)
+#define RKISP1_CIF_ISP_FRAME_COUNT		(RKISP1_CIF_ISP_BASE + 0x00000244)
+#define RKISP1_CIF_ISP_CT_OFFSET_R		(RKISP1_CIF_ISP_BASE + 0x00000248)
+#define RKISP1_CIF_ISP_CT_OFFSET_G		(RKISP1_CIF_ISP_BASE + 0x0000024C)
+#define RKISP1_CIF_ISP_CT_OFFSET_B		(RKISP1_CIF_ISP_BASE + 0x00000250)
+
+#define RKISP1_CIF_ISP_FLASH_BASE		0x00000660
+#define RKISP1_CIF_ISP_FLASH_CMD		(RKISP1_CIF_ISP_FLASH_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_FLASH_CONFIG		(RKISP1_CIF_ISP_FLASH_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_FLASH_PREDIV		(RKISP1_CIF_ISP_FLASH_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_FLASH_DELAY		(RKISP1_CIF_ISP_FLASH_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_FLASH_TIME		(RKISP1_CIF_ISP_FLASH_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_FLASH_MAXP		(RKISP1_CIF_ISP_FLASH_BASE + 0x00000014)
+
+#define RKISP1_CIF_ISP_SH_BASE			0x00000680
+#define RKISP1_CIF_ISP_SH_CTRL			(RKISP1_CIF_ISP_SH_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_SH_PREDIV		(RKISP1_CIF_ISP_SH_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_SH_DELAY			(RKISP1_CIF_ISP_SH_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_SH_TIME			(RKISP1_CIF_ISP_SH_BASE + 0x0000000C)
+
+#define RKISP1_CIF_C_PROC_BASE			0x00000800
+#define RKISP1_CIF_C_PROC_CTRL			(RKISP1_CIF_C_PROC_BASE + 0x00000000)
+#define RKISP1_CIF_C_PROC_CONTRAST		(RKISP1_CIF_C_PROC_BASE + 0x00000004)
+#define RKISP1_CIF_C_PROC_BRIGHTNESS		(RKISP1_CIF_C_PROC_BASE + 0x00000008)
+#define RKISP1_CIF_C_PROC_SATURATION		(RKISP1_CIF_C_PROC_BASE + 0x0000000C)
+#define RKISP1_CIF_C_PROC_HUE			(RKISP1_CIF_C_PROC_BASE + 0x00000010)
+
+#define RKISP1_CIF_DUAL_CROP_BASE		0x00000880
+#define RKISP1_CIF_DUAL_CROP_CTRL		(RKISP1_CIF_DUAL_CROP_BASE + 0x00000000)
+#define RKISP1_CIF_DUAL_CROP_M_H_OFFS		(RKISP1_CIF_DUAL_CROP_BASE + 0x00000004)
+#define RKISP1_CIF_DUAL_CROP_M_V_OFFS		(RKISP1_CIF_DUAL_CROP_BASE + 0x00000008)
+#define RKISP1_CIF_DUAL_CROP_M_H_SIZE		(RKISP1_CIF_DUAL_CROP_BASE + 0x0000000C)
+#define RKISP1_CIF_DUAL_CROP_M_V_SIZE		(RKISP1_CIF_DUAL_CROP_BASE + 0x00000010)
+#define RKISP1_CIF_DUAL_CROP_S_H_OFFS		(RKISP1_CIF_DUAL_CROP_BASE + 0x00000014)
+#define RKISP1_CIF_DUAL_CROP_S_V_OFFS		(RKISP1_CIF_DUAL_CROP_BASE + 0x00000018)
+#define RKISP1_CIF_DUAL_CROP_S_H_SIZE		(RKISP1_CIF_DUAL_CROP_BASE + 0x0000001C)
+#define RKISP1_CIF_DUAL_CROP_S_V_SIZE		(RKISP1_CIF_DUAL_CROP_BASE + 0x00000020)
+#define RKISP1_CIF_DUAL_CROP_M_H_OFFS_SHD	(RKISP1_CIF_DUAL_CROP_BASE + 0x00000024)
+#define RKISP1_CIF_DUAL_CROP_M_V_OFFS_SHD	(RKISP1_CIF_DUAL_CROP_BASE + 0x00000028)
+#define RKISP1_CIF_DUAL_CROP_M_H_SIZE_SHD	(RKISP1_CIF_DUAL_CROP_BASE + 0x0000002C)
+#define RKISP1_CIF_DUAL_CROP_M_V_SIZE_SHD	(RKISP1_CIF_DUAL_CROP_BASE + 0x00000030)
+#define RKISP1_CIF_DUAL_CROP_S_H_OFFS_SHD	(RKISP1_CIF_DUAL_CROP_BASE + 0x00000034)
+#define RKISP1_CIF_DUAL_CROP_S_V_OFFS_SHD	(RKISP1_CIF_DUAL_CROP_BASE + 0x00000038)
+#define RKISP1_CIF_DUAL_CROP_S_H_SIZE_SHD	(RKISP1_CIF_DUAL_CROP_BASE + 0x0000003C)
+#define RKISP1_CIF_DUAL_CROP_S_V_SIZE_SHD	(RKISP1_CIF_DUAL_CROP_BASE + 0x00000040)
+
+#define RKISP1_CIF_MRSZ_BASE			0x00000C00
+#define RKISP1_CIF_MRSZ_CTRL			(RKISP1_CIF_MRSZ_BASE + 0x00000000)
+#define RKISP1_CIF_MRSZ_SCALE_HY		(RKISP1_CIF_MRSZ_BASE + 0x00000004)
+#define RKISP1_CIF_MRSZ_SCALE_HCB		(RKISP1_CIF_MRSZ_BASE + 0x00000008)
+#define RKISP1_CIF_MRSZ_SCALE_HCR		(RKISP1_CIF_MRSZ_BASE + 0x0000000C)
+#define RKISP1_CIF_MRSZ_SCALE_VY		(RKISP1_CIF_MRSZ_BASE + 0x00000010)
+#define RKISP1_CIF_MRSZ_SCALE_VC		(RKISP1_CIF_MRSZ_BASE + 0x00000014)
+#define RKISP1_CIF_MRSZ_PHASE_HY		(RKISP1_CIF_MRSZ_BASE + 0x00000018)
+#define RKISP1_CIF_MRSZ_PHASE_HC		(RKISP1_CIF_MRSZ_BASE + 0x0000001C)
+#define RKISP1_CIF_MRSZ_PHASE_VY		(RKISP1_CIF_MRSZ_BASE + 0x00000020)
+#define RKISP1_CIF_MRSZ_PHASE_VC		(RKISP1_CIF_MRSZ_BASE + 0x00000024)
+#define RKISP1_CIF_MRSZ_SCALE_LUT_ADDR		(RKISP1_CIF_MRSZ_BASE + 0x00000028)
+#define RKISP1_CIF_MRSZ_SCALE_LUT		(RKISP1_CIF_MRSZ_BASE + 0x0000002C)
+#define RKISP1_CIF_MRSZ_CTRL_SHD		(RKISP1_CIF_MRSZ_BASE + 0x00000030)
+#define RKISP1_CIF_MRSZ_SCALE_HY_SHD		(RKISP1_CIF_MRSZ_BASE + 0x00000034)
+#define RKISP1_CIF_MRSZ_SCALE_HCB_SHD		(RKISP1_CIF_MRSZ_BASE + 0x00000038)
+#define RKISP1_CIF_MRSZ_SCALE_HCR_SHD		(RKISP1_CIF_MRSZ_BASE + 0x0000003C)
+#define RKISP1_CIF_MRSZ_SCALE_VY_SHD		(RKISP1_CIF_MRSZ_BASE + 0x00000040)
+#define RKISP1_CIF_MRSZ_SCALE_VC_SHD		(RKISP1_CIF_MRSZ_BASE + 0x00000044)
+#define RKISP1_CIF_MRSZ_PHASE_HY_SHD		(RKISP1_CIF_MRSZ_BASE + 0x00000048)
+#define RKISP1_CIF_MRSZ_PHASE_HC_SHD		(RKISP1_CIF_MRSZ_BASE + 0x0000004C)
+#define RKISP1_CIF_MRSZ_PHASE_VY_SHD		(RKISP1_CIF_MRSZ_BASE + 0x00000050)
+#define RKISP1_CIF_MRSZ_PHASE_VC_SHD		(RKISP1_CIF_MRSZ_BASE + 0x00000054)
+
+#define RKISP1_CIF_SRSZ_BASE			0x00001000
+#define RKISP1_CIF_SRSZ_CTRL			(RKISP1_CIF_SRSZ_BASE + 0x00000000)
+#define RKISP1_CIF_SRSZ_SCALE_HY		(RKISP1_CIF_SRSZ_BASE + 0x00000004)
+#define RKISP1_CIF_SRSZ_SCALE_HCB		(RKISP1_CIF_SRSZ_BASE + 0x00000008)
+#define RKISP1_CIF_SRSZ_SCALE_HCR		(RKISP1_CIF_SRSZ_BASE + 0x0000000C)
+#define RKISP1_CIF_SRSZ_SCALE_VY		(RKISP1_CIF_SRSZ_BASE + 0x00000010)
+#define RKISP1_CIF_SRSZ_SCALE_VC		(RKISP1_CIF_SRSZ_BASE + 0x00000014)
+#define RKISP1_CIF_SRSZ_PHASE_HY		(RKISP1_CIF_SRSZ_BASE + 0x00000018)
+#define RKISP1_CIF_SRSZ_PHASE_HC		(RKISP1_CIF_SRSZ_BASE + 0x0000001C)
+#define RKISP1_CIF_SRSZ_PHASE_VY		(RKISP1_CIF_SRSZ_BASE + 0x00000020)
+#define RKISP1_CIF_SRSZ_PHASE_VC		(RKISP1_CIF_SRSZ_BASE + 0x00000024)
+#define RKISP1_CIF_SRSZ_SCALE_LUT_ADDR		(RKISP1_CIF_SRSZ_BASE + 0x00000028)
+#define RKISP1_CIF_SRSZ_SCALE_LUT		(RKISP1_CIF_SRSZ_BASE + 0x0000002C)
+#define RKISP1_CIF_SRSZ_CTRL_SHD		(RKISP1_CIF_SRSZ_BASE + 0x00000030)
+#define RKISP1_CIF_SRSZ_SCALE_HY_SHD		(RKISP1_CIF_SRSZ_BASE + 0x00000034)
+#define RKISP1_CIF_SRSZ_SCALE_HCB_SHD		(RKISP1_CIF_SRSZ_BASE + 0x00000038)
+#define RKISP1_CIF_SRSZ_SCALE_HCR_SHD		(RKISP1_CIF_SRSZ_BASE + 0x0000003C)
+#define RKISP1_CIF_SRSZ_SCALE_VY_SHD		(RKISP1_CIF_SRSZ_BASE + 0x00000040)
+#define RKISP1_CIF_SRSZ_SCALE_VC_SHD		(RKISP1_CIF_SRSZ_BASE + 0x00000044)
+#define RKISP1_CIF_SRSZ_PHASE_HY_SHD		(RKISP1_CIF_SRSZ_BASE + 0x00000048)
+#define RKISP1_CIF_SRSZ_PHASE_HC_SHD		(RKISP1_CIF_SRSZ_BASE + 0x0000004C)
+#define RKISP1_CIF_SRSZ_PHASE_VY_SHD		(RKISP1_CIF_SRSZ_BASE + 0x00000050)
+#define RKISP1_CIF_SRSZ_PHASE_VC_SHD		(RKISP1_CIF_SRSZ_BASE + 0x00000054)
+
+#define RKISP1_CIF_MI_BASE			0x00001400
+#define RKISP1_CIF_MI_CTRL			(RKISP1_CIF_MI_BASE + 0x00000000)
+#define RKISP1_CIF_MI_INIT			(RKISP1_CIF_MI_BASE + 0x00000004)
+#define RKISP1_CIF_MI_MP_Y_BASE_AD_INIT		(RKISP1_CIF_MI_BASE + 0x00000008)
+#define RKISP1_CIF_MI_MP_Y_SIZE_INIT		(RKISP1_CIF_MI_BASE + 0x0000000C)
+#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_INIT	(RKISP1_CIF_MI_BASE + 0x00000010)
+#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_START	(RKISP1_CIF_MI_BASE + 0x00000014)
+#define RKISP1_CIF_MI_MP_Y_IRQ_OFFS_INIT	(RKISP1_CIF_MI_BASE + 0x00000018)
+#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT	(RKISP1_CIF_MI_BASE + 0x0000001C)
+#define RKISP1_CIF_MI_MP_CB_SIZE_INIT		(RKISP1_CIF_MI_BASE + 0x00000020)
+#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_INIT	(RKISP1_CIF_MI_BASE + 0x00000024)
+#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_START	(RKISP1_CIF_MI_BASE + 0x00000028)
+#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT	(RKISP1_CIF_MI_BASE + 0x0000002C)
+#define RKISP1_CIF_MI_MP_CR_SIZE_INIT		(RKISP1_CIF_MI_BASE + 0x00000030)
+#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_INIT	(RKISP1_CIF_MI_BASE + 0x00000034)
+#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_START	(RKISP1_CIF_MI_BASE + 0x00000038)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT		(RKISP1_CIF_MI_BASE + 0x0000003C)
+#define RKISP1_CIF_MI_SP_Y_SIZE_INIT		(RKISP1_CIF_MI_BASE + 0x00000040)
+#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_INIT	(RKISP1_CIF_MI_BASE + 0x00000044)
+#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_START	(RKISP1_CIF_MI_BASE + 0x00000048)
+#define RKISP1_CIF_MI_SP_Y_LLENGTH		(RKISP1_CIF_MI_BASE + 0x0000004C)
+#define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT	(RKISP1_CIF_MI_BASE + 0x00000050)
+#define RKISP1_CIF_MI_SP_CB_SIZE_INIT		(RKISP1_CIF_MI_BASE + 0x00000054)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_INIT	(RKISP1_CIF_MI_BASE + 0x00000058)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_START	(RKISP1_CIF_MI_BASE + 0x0000005C)
+#define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT	(RKISP1_CIF_MI_BASE + 0x00000060)
+#define RKISP1_CIF_MI_SP_CR_SIZE_INIT		(RKISP1_CIF_MI_BASE + 0x00000064)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_INIT	(RKISP1_CIF_MI_BASE + 0x00000068)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_START	(RKISP1_CIF_MI_BASE + 0x0000006C)
+#define RKISP1_CIF_MI_BYTE_CNT			(RKISP1_CIF_MI_BASE + 0x00000070)
+#define RKISP1_CIF_MI_CTRL_SHD			(RKISP1_CIF_MI_BASE + 0x00000074)
+#define RKISP1_CIF_MI_MP_Y_BASE_AD_SHD		(RKISP1_CIF_MI_BASE + 0x00000078)
+#define RKISP1_CIF_MI_MP_Y_SIZE_SHD		(RKISP1_CIF_MI_BASE + 0x0000007C)
+#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_SHD		(RKISP1_CIF_MI_BASE + 0x00000080)
+#define RKISP1_CIF_MI_MP_Y_IRQ_OFFS_SHD		(RKISP1_CIF_MI_BASE + 0x00000084)
+#define RKISP1_CIF_MI_MP_CB_BASE_AD_SHD		(RKISP1_CIF_MI_BASE + 0x00000088)
+#define RKISP1_CIF_MI_MP_CB_SIZE_SHD		(RKISP1_CIF_MI_BASE + 0x0000008C)
+#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_SHD	(RKISP1_CIF_MI_BASE + 0x00000090)
+#define RKISP1_CIF_MI_MP_CR_BASE_AD_SHD		(RKISP1_CIF_MI_BASE + 0x00000094)
+#define RKISP1_CIF_MI_MP_CR_SIZE_SHD		(RKISP1_CIF_MI_BASE + 0x00000098)
+#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_SHD	(RKISP1_CIF_MI_BASE + 0x0000009C)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_SHD		(RKISP1_CIF_MI_BASE + 0x000000A0)
+#define RKISP1_CIF_MI_SP_Y_SIZE_SHD		(RKISP1_CIF_MI_BASE + 0x000000A4)
+#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_SHD		(RKISP1_CIF_MI_BASE + 0x000000A8)
+#define RKISP1_CIF_MI_SP_CB_BASE_AD_SHD		(RKISP1_CIF_MI_BASE + 0x000000B0)
+#define RKISP1_CIF_MI_SP_CB_SIZE_SHD		(RKISP1_CIF_MI_BASE + 0x000000B4)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_SHD	(RKISP1_CIF_MI_BASE + 0x000000B8)
+#define RKISP1_CIF_MI_SP_CR_BASE_AD_SHD		(RKISP1_CIF_MI_BASE + 0x000000BC)
+#define RKISP1_CIF_MI_SP_CR_SIZE_SHD		(RKISP1_CIF_MI_BASE + 0x000000C0)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_SHD	(RKISP1_CIF_MI_BASE + 0x000000C4)
+#define RKISP1_CIF_MI_DMA_Y_PIC_START_AD	(RKISP1_CIF_MI_BASE + 0x000000C8)
+#define RKISP1_CIF_MI_DMA_Y_PIC_WIDTH		(RKISP1_CIF_MI_BASE + 0x000000CC)
+#define RKISP1_CIF_MI_DMA_Y_LLENGTH		(RKISP1_CIF_MI_BASE + 0x000000D0)
+#define RKISP1_CIF_MI_DMA_Y_PIC_SIZE		(RKISP1_CIF_MI_BASE + 0x000000D4)
+#define RKISP1_CIF_MI_DMA_CB_PIC_START_AD	(RKISP1_CIF_MI_BASE + 0x000000D8)
+#define RKISP1_CIF_MI_DMA_CR_PIC_START_AD	(RKISP1_CIF_MI_BASE + 0x000000E8)
+#define RKISP1_CIF_MI_IMSC			(RKISP1_CIF_MI_BASE + 0x000000F8)
+#define RKISP1_CIF_MI_RIS			(RKISP1_CIF_MI_BASE + 0x000000FC)
+#define RKISP1_CIF_MI_MIS			(RKISP1_CIF_MI_BASE + 0x00000100)
+#define RKISP1_CIF_MI_ICR			(RKISP1_CIF_MI_BASE + 0x00000104)
+#define RKISP1_CIF_MI_ISR			(RKISP1_CIF_MI_BASE + 0x00000108)
+#define RKISP1_CIF_MI_STATUS			(RKISP1_CIF_MI_BASE + 0x0000010C)
+#define RKISP1_CIF_MI_STATUS_CLR		(RKISP1_CIF_MI_BASE + 0x00000110)
+#define RKISP1_CIF_MI_SP_Y_PIC_WIDTH		(RKISP1_CIF_MI_BASE + 0x00000114)
+#define RKISP1_CIF_MI_SP_Y_PIC_HEIGHT		(RKISP1_CIF_MI_BASE + 0x00000118)
+#define RKISP1_CIF_MI_SP_Y_PIC_SIZE		(RKISP1_CIF_MI_BASE + 0x0000011C)
+#define RKISP1_CIF_MI_DMA_CTRL			(RKISP1_CIF_MI_BASE + 0x00000120)
+#define RKISP1_CIF_MI_DMA_START			(RKISP1_CIF_MI_BASE + 0x00000124)
+#define RKISP1_CIF_MI_DMA_STATUS		(RKISP1_CIF_MI_BASE + 0x00000128)
+#define RKISP1_CIF_MI_PIXEL_COUNT		(RKISP1_CIF_MI_BASE + 0x0000012C)
+#define RKISP1_CIF_MI_MP_Y_BASE_AD_INIT2	(RKISP1_CIF_MI_BASE + 0x00000130)
+#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT2	(RKISP1_CIF_MI_BASE + 0x00000134)
+#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT2	(RKISP1_CIF_MI_BASE + 0x00000138)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT2	(RKISP1_CIF_MI_BASE + 0x0000013C)
+#define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT2	(RKISP1_CIF_MI_BASE + 0x00000140)
+#define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT2	(RKISP1_CIF_MI_BASE + 0x00000144)
+#define RKISP1_CIF_MI_XTD_FORMAT_CTRL		(RKISP1_CIF_MI_BASE + 0x00000148)
+
+#define RKISP1_CIF_SMIA_BASE			0x00001A00
+#define RKISP1_CIF_SMIA_CTRL			(RKISP1_CIF_SMIA_BASE + 0x00000000)
+#define RKISP1_CIF_SMIA_STATUS			(RKISP1_CIF_SMIA_BASE + 0x00000004)
+#define RKISP1_CIF_SMIA_IMSC			(RKISP1_CIF_SMIA_BASE + 0x00000008)
+#define RKISP1_CIF_SMIA_RIS			(RKISP1_CIF_SMIA_BASE + 0x0000000C)
+#define RKISP1_CIF_SMIA_MIS			(RKISP1_CIF_SMIA_BASE + 0x00000010)
+#define RKISP1_CIF_SMIA_ICR			(RKISP1_CIF_SMIA_BASE + 0x00000014)
+#define RKISP1_CIF_SMIA_ISR			(RKISP1_CIF_SMIA_BASE + 0x00000018)
+#define RKISP1_CIF_SMIA_DATA_FORMAT_SEL		(RKISP1_CIF_SMIA_BASE + 0x0000001C)
+#define RKISP1_CIF_SMIA_SOF_EMB_DATA_LINES	(RKISP1_CIF_SMIA_BASE + 0x00000020)
+#define RKISP1_CIF_SMIA_EMB_HSTART		(RKISP1_CIF_SMIA_BASE + 0x00000024)
+#define RKISP1_CIF_SMIA_EMB_HSIZE		(RKISP1_CIF_SMIA_BASE + 0x00000028)
+#define RKISP1_CIF_SMIA_EMB_VSTART		(RKISP1_CIF_SMIA_BASE + 0x0000002c)
+#define RKISP1_CIF_SMIA_NUM_LINES		(RKISP1_CIF_SMIA_BASE + 0x00000030)
+#define RKISP1_CIF_SMIA_EMB_DATA_FIFO		(RKISP1_CIF_SMIA_BASE + 0x00000034)
+#define RKISP1_CIF_SMIA_EMB_DATA_WATERMARK	(RKISP1_CIF_SMIA_BASE + 0x00000038)
+
+#define RKISP1_CIF_MIPI_BASE			0x00001C00
+#define RKISP1_CIF_MIPI_CTRL			(RKISP1_CIF_MIPI_BASE + 0x00000000)
+#define RKISP1_CIF_MIPI_STATUS			(RKISP1_CIF_MIPI_BASE + 0x00000004)
+#define RKISP1_CIF_MIPI_IMSC			(RKISP1_CIF_MIPI_BASE + 0x00000008)
+#define RKISP1_CIF_MIPI_RIS			(RKISP1_CIF_MIPI_BASE + 0x0000000C)
+#define RKISP1_CIF_MIPI_MIS			(RKISP1_CIF_MIPI_BASE + 0x00000010)
+#define RKISP1_CIF_MIPI_ICR			(RKISP1_CIF_MIPI_BASE + 0x00000014)
+#define RKISP1_CIF_MIPI_ISR			(RKISP1_CIF_MIPI_BASE + 0x00000018)
+#define RKISP1_CIF_MIPI_CUR_DATA_ID		(RKISP1_CIF_MIPI_BASE + 0x0000001C)
+#define RKISP1_CIF_MIPI_IMG_DATA_SEL		(RKISP1_CIF_MIPI_BASE + 0x00000020)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_1		(RKISP1_CIF_MIPI_BASE + 0x00000024)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_2		(RKISP1_CIF_MIPI_BASE + 0x00000028)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_3		(RKISP1_CIF_MIPI_BASE + 0x0000002C)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_4		(RKISP1_CIF_MIPI_BASE + 0x00000030)
+#define RKISP1_CIF_MIPI_ADD_DATA_FIFO		(RKISP1_CIF_MIPI_BASE + 0x00000034)
+#define RKISP1_CIF_MIPI_FIFO_FILL_LEVEL		(RKISP1_CIF_MIPI_BASE + 0x00000038)
+#define RKISP1_CIF_MIPI_COMPRESSED_MODE		(RKISP1_CIF_MIPI_BASE + 0x0000003C)
+#define RKISP1_CIF_MIPI_FRAME			(RKISP1_CIF_MIPI_BASE + 0x00000040)
+#define RKISP1_CIF_MIPI_GEN_SHORT_DT		(RKISP1_CIF_MIPI_BASE + 0x00000044)
+#define RKISP1_CIF_MIPI_GEN_SHORT_8_9		(RKISP1_CIF_MIPI_BASE + 0x00000048)
+#define RKISP1_CIF_MIPI_GEN_SHORT_A_B		(RKISP1_CIF_MIPI_BASE + 0x0000004C)
+#define RKISP1_CIF_MIPI_GEN_SHORT_C_D		(RKISP1_CIF_MIPI_BASE + 0x00000050)
+#define RKISP1_CIF_MIPI_GEN_SHORT_E_F		(RKISP1_CIF_MIPI_BASE + 0x00000054)
+
+#define RKISP1_CIF_ISP_AFM_BASE			0x00002000
+#define RKISP1_CIF_ISP_AFM_CTRL			(RKISP1_CIF_ISP_AFM_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_AFM_LT_A			(RKISP1_CIF_ISP_AFM_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_AFM_RB_A			(RKISP1_CIF_ISP_AFM_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_AFM_LT_B			(RKISP1_CIF_ISP_AFM_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_AFM_RB_B			(RKISP1_CIF_ISP_AFM_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_AFM_LT_C			(RKISP1_CIF_ISP_AFM_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_AFM_RB_C			(RKISP1_CIF_ISP_AFM_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_AFM_THRES		(RKISP1_CIF_ISP_AFM_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_AFM_VAR_SHIFT		(RKISP1_CIF_ISP_AFM_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_AFM_SUM_A		(RKISP1_CIF_ISP_AFM_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_AFM_SUM_B		(RKISP1_CIF_ISP_AFM_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_AFM_SUM_C		(RKISP1_CIF_ISP_AFM_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_AFM_LUM_A		(RKISP1_CIF_ISP_AFM_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_AFM_LUM_B		(RKISP1_CIF_ISP_AFM_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_AFM_LUM_C		(RKISP1_CIF_ISP_AFM_BASE + 0x00000038)
+
+#define RKISP1_CIF_ISP_LSC_BASE			0x00002200
+#define RKISP1_CIF_ISP_LSC_CTRL			(RKISP1_CIF_ISP_LSC_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_LSC_R_TABLE_ADDR		(RKISP1_CIF_ISP_LSC_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR	(RKISP1_CIF_ISP_LSC_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_LSC_B_TABLE_ADDR		(RKISP1_CIF_ISP_LSC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR	(RKISP1_CIF_ISP_LSC_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_LSC_R_TABLE_DATA		(RKISP1_CIF_ISP_LSC_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_LSC_GR_TABLE_DATA	(RKISP1_CIF_ISP_LSC_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_LSC_B_TABLE_DATA		(RKISP1_CIF_ISP_LSC_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_LSC_GB_TABLE_DATA	(RKISP1_CIF_ISP_LSC_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_LSC_XGRAD_01		(RKISP1_CIF_ISP_LSC_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_LSC_XGRAD_23		(RKISP1_CIF_ISP_LSC_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_LSC_XGRAD_45		(RKISP1_CIF_ISP_LSC_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_LSC_XGRAD_67		(RKISP1_CIF_ISP_LSC_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_LSC_YGRAD_01		(RKISP1_CIF_ISP_LSC_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_LSC_YGRAD_23		(RKISP1_CIF_ISP_LSC_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_LSC_YGRAD_45		(RKISP1_CIF_ISP_LSC_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_LSC_YGRAD_67		(RKISP1_CIF_ISP_LSC_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_LSC_XSIZE_01		(RKISP1_CIF_ISP_LSC_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_LSC_XSIZE_23		(RKISP1_CIF_ISP_LSC_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_LSC_XSIZE_45		(RKISP1_CIF_ISP_LSC_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_LSC_XSIZE_67		(RKISP1_CIF_ISP_LSC_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_LSC_YSIZE_01		(RKISP1_CIF_ISP_LSC_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_LSC_YSIZE_23		(RKISP1_CIF_ISP_LSC_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_LSC_YSIZE_45		(RKISP1_CIF_ISP_LSC_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_LSC_YSIZE_67		(RKISP1_CIF_ISP_LSC_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_LSC_TABLE_SEL		(RKISP1_CIF_ISP_LSC_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_LSC_STATUS		(RKISP1_CIF_ISP_LSC_BASE + 0x00000068)
+
+#define RKISP1_CIF_ISP_IS_BASE			0x00002300
+#define RKISP1_CIF_ISP_IS_CTRL			(RKISP1_CIF_ISP_IS_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_IS_RECENTER		(RKISP1_CIF_ISP_IS_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_IS_H_OFFS		(RKISP1_CIF_ISP_IS_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_IS_V_OFFS		(RKISP1_CIF_ISP_IS_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_IS_H_SIZE		(RKISP1_CIF_ISP_IS_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_IS_V_SIZE		(RKISP1_CIF_ISP_IS_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_IS_MAX_DX		(RKISP1_CIF_ISP_IS_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_IS_MAX_DY		(RKISP1_CIF_ISP_IS_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_IS_DISPLACE		(RKISP1_CIF_ISP_IS_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_IS_H_OFFS_SHD		(RKISP1_CIF_ISP_IS_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_IS_V_OFFS_SHD		(RKISP1_CIF_ISP_IS_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_IS_H_SIZE_SHD		(RKISP1_CIF_ISP_IS_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_IS_V_SIZE_SHD		(RKISP1_CIF_ISP_IS_BASE + 0x00000030)
+
+#define RKISP1_CIF_ISP_HIST_BASE		0x00002400
+
+#define RKISP1_CIF_ISP_HIST_PROP		(RKISP1_CIF_ISP_HIST_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_HIST_H_OFFS		(RKISP1_CIF_ISP_HIST_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_HIST_V_OFFS		(RKISP1_CIF_ISP_HIST_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_HIST_H_SIZE		(RKISP1_CIF_ISP_HIST_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_V_SIZE		(RKISP1_CIF_ISP_HIST_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_HIST_BIN_0		(RKISP1_CIF_ISP_HIST_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_HIST_BIN_1		(RKISP1_CIF_ISP_HIST_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_HIST_BIN_2		(RKISP1_CIF_ISP_HIST_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_BIN_3		(RKISP1_CIF_ISP_HIST_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_HIST_BIN_4		(RKISP1_CIF_ISP_HIST_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_HIST_BIN_5		(RKISP1_CIF_ISP_HIST_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_HIST_BIN_6		(RKISP1_CIF_ISP_HIST_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_BIN_7		(RKISP1_CIF_ISP_HIST_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_HIST_BIN_8		(RKISP1_CIF_ISP_HIST_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_HIST_BIN_9		(RKISP1_CIF_ISP_HIST_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_HIST_BIN_10		(RKISP1_CIF_ISP_HIST_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_11		(RKISP1_CIF_ISP_HIST_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_HIST_BIN_12		(RKISP1_CIF_ISP_HIST_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_HIST_BIN_13		(RKISP1_CIF_ISP_HIST_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_HIST_BIN_14		(RKISP1_CIF_ISP_HIST_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_HIST_BIN_15		(RKISP1_CIF_ISP_HIST_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30	(RKISP1_CIF_ISP_HIST_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21	(RKISP1_CIF_ISP_HIST_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12	(RKISP1_CIF_ISP_HIST_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03	(RKISP1_CIF_ISP_HIST_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43	(RKISP1_CIF_ISP_HIST_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34	(RKISP1_CIF_ISP_HIST_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_44		(RKISP1_CIF_ISP_HIST_BASE + 0x0000006C)
+
+#define RKISP1_CIF_ISP_FILT_BASE		0x00002500
+#define RKISP1_CIF_ISP_FILT_MODE		(RKISP1_CIF_ISP_FILT_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_FILT_THRESH_BL0		(RKISP1_CIF_ISP_FILT_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_FILT_THRESH_BL1		(RKISP1_CIF_ISP_FILT_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_FILT_THRESH_SH0		(RKISP1_CIF_ISP_FILT_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_FILT_THRESH_SH1		(RKISP1_CIF_ISP_FILT_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_FILT_LUM_WEIGHT		(RKISP1_CIF_ISP_FILT_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_FILT_FAC_SH1		(RKISP1_CIF_ISP_FILT_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_FILT_FAC_SH0		(RKISP1_CIF_ISP_FILT_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_FILT_FAC_MID		(RKISP1_CIF_ISP_FILT_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_FILT_FAC_BL0		(RKISP1_CIF_ISP_FILT_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_FILT_FAC_BL1		(RKISP1_CIF_ISP_FILT_BASE + 0x0000004C)
+
+#define RKISP1_CIF_ISP_CAC_BASE			0x00002580
+#define RKISP1_CIF_ISP_CAC_CTRL			(RKISP1_CIF_ISP_CAC_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_CAC_COUNT_START		(RKISP1_CIF_ISP_CAC_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_CAC_A			(RKISP1_CIF_ISP_CAC_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_CAC_B			(RKISP1_CIF_ISP_CAC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_CAC_C			(RKISP1_CIF_ISP_CAC_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_X_NORM			(RKISP1_CIF_ISP_CAC_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_Y_NORM			(RKISP1_CIF_ISP_CAC_BASE + 0x00000018)
+
+#define RKISP1_CIF_ISP_EXP_BASE			0x00002600
+#define RKISP1_CIF_ISP_EXP_CTRL			(RKISP1_CIF_ISP_EXP_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_EXP_H_OFFSET		(RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET		(RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_EXP_H_SIZE		(RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_EXP_V_SIZE		(RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_EXP_MEAN_00		(RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_EXP_MEAN_10		(RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_EXP_MEAN_20		(RKISP1_CIF_ISP_EXP_BASE + 0x0000001c)
+#define RKISP1_CIF_ISP_EXP_MEAN_30		(RKISP1_CIF_ISP_EXP_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_EXP_MEAN_40		(RKISP1_CIF_ISP_EXP_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_EXP_MEAN_01		(RKISP1_CIF_ISP_EXP_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_EXP_MEAN_11		(RKISP1_CIF_ISP_EXP_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_EXP_MEAN_21		(RKISP1_CIF_ISP_EXP_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_EXP_MEAN_31		(RKISP1_CIF_ISP_EXP_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_EXP_MEAN_41		(RKISP1_CIF_ISP_EXP_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_EXP_MEAN_02		(RKISP1_CIF_ISP_EXP_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_EXP_MEAN_12		(RKISP1_CIF_ISP_EXP_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_EXP_MEAN_22		(RKISP1_CIF_ISP_EXP_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_EXP_MEAN_32		(RKISP1_CIF_ISP_EXP_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_EXP_MEAN_42		(RKISP1_CIF_ISP_EXP_BASE + 0x0000004c)
+#define RKISP1_CIF_ISP_EXP_MEAN_03		(RKISP1_CIF_ISP_EXP_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_EXP_MEAN_13		(RKISP1_CIF_ISP_EXP_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_EXP_MEAN_23		(RKISP1_CIF_ISP_EXP_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_EXP_MEAN_33		(RKISP1_CIF_ISP_EXP_BASE + 0x0000005c)
+#define RKISP1_CIF_ISP_EXP_MEAN_43		(RKISP1_CIF_ISP_EXP_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_EXP_MEAN_04		(RKISP1_CIF_ISP_EXP_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_EXP_MEAN_14		(RKISP1_CIF_ISP_EXP_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_EXP_MEAN_24		(RKISP1_CIF_ISP_EXP_BASE + 0x0000006c)
+#define RKISP1_CIF_ISP_EXP_MEAN_34		(RKISP1_CIF_ISP_EXP_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_EXP_MEAN_44		(RKISP1_CIF_ISP_EXP_BASE + 0x00000074)
+
+#define RKISP1_CIF_ISP_BLS_BASE			0x00002700
+#define RKISP1_CIF_ISP_BLS_CTRL			(RKISP1_CIF_ISP_BLS_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_BLS_SAMPLES		(RKISP1_CIF_ISP_BLS_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_BLS_H1_START		(RKISP1_CIF_ISP_BLS_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_BLS_H1_STOP		(RKISP1_CIF_ISP_BLS_BASE + 0x0000000c)
+#define RKISP1_CIF_ISP_BLS_V1_START		(RKISP1_CIF_ISP_BLS_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_BLS_V1_STOP		(RKISP1_CIF_ISP_BLS_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_BLS_H2_START		(RKISP1_CIF_ISP_BLS_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_BLS_H2_STOP		(RKISP1_CIF_ISP_BLS_BASE + 0x0000001c)
+#define RKISP1_CIF_ISP_BLS_V2_START		(RKISP1_CIF_ISP_BLS_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_BLS_V2_STOP		(RKISP1_CIF_ISP_BLS_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_BLS_A_FIXED		(RKISP1_CIF_ISP_BLS_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_BLS_B_FIXED		(RKISP1_CIF_ISP_BLS_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_BLS_C_FIXED		(RKISP1_CIF_ISP_BLS_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_BLS_D_FIXED		(RKISP1_CIF_ISP_BLS_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_BLS_A_MEASURED		(RKISP1_CIF_ISP_BLS_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_BLS_B_MEASURED		(RKISP1_CIF_ISP_BLS_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_BLS_C_MEASURED		(RKISP1_CIF_ISP_BLS_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_BLS_D_MEASURED		(RKISP1_CIF_ISP_BLS_BASE + 0x00000044)
+
+#define RKISP1_CIF_ISP_DPF_BASE			0x00002800
+#define RKISP1_CIF_ISP_DPF_MODE			(RKISP1_CIF_ISP_DPF_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_DPF_STRENGTH_R		(RKISP1_CIF_ISP_DPF_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_DPF_STRENGTH_G		(RKISP1_CIF_ISP_DPF_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_DPF_STRENGTH_B		(RKISP1_CIF_ISP_DPF_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4	(RKISP1_CIF_ISP_DPF_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6	(RKISP1_CIF_ISP_DPF_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4	(RKISP1_CIF_ISP_DPF_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6	(RKISP1_CIF_ISP_DPF_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_0		(RKISP1_CIF_ISP_DPF_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_1		(RKISP1_CIF_ISP_DPF_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_2		(RKISP1_CIF_ISP_DPF_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_3		(RKISP1_CIF_ISP_DPF_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_4		(RKISP1_CIF_ISP_DPF_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_5		(RKISP1_CIF_ISP_DPF_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_6		(RKISP1_CIF_ISP_DPF_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_7		(RKISP1_CIF_ISP_DPF_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_8		(RKISP1_CIF_ISP_DPF_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_9		(RKISP1_CIF_ISP_DPF_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_10	(RKISP1_CIF_ISP_DPF_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_11	(RKISP1_CIF_ISP_DPF_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_12	(RKISP1_CIF_ISP_DPF_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_13	(RKISP1_CIF_ISP_DPF_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_14	(RKISP1_CIF_ISP_DPF_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_15	(RKISP1_CIF_ISP_DPF_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_16	(RKISP1_CIF_ISP_DPF_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_R		(RKISP1_CIF_ISP_DPF_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_GR		(RKISP1_CIF_ISP_DPF_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_GB		(RKISP1_CIF_ISP_DPF_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_B		(RKISP1_CIF_ISP_DPF_BASE + 0x00000070)
+
+#define RKISP1_CIF_ISP_DPCC_BASE		0x00002900
+#define RKISP1_CIF_ISP_DPCC_MODE		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_DPCC_SET_USE		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_1	(RKISP1_CIF_ISP_DPCC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_2	(RKISP1_CIF_ISP_DPCC_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_3	(RKISP1_CIF_ISP_DPCC_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_1	(RKISP1_CIF_ISP_DPCC_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1	(RKISP1_CIF_ISP_DPCC_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_1		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_1	(RKISP1_CIF_ISP_DPCC_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_1		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_2	(RKISP1_CIF_ISP_DPCC_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_2	(RKISP1_CIF_ISP_DPCC_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_2		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_2	(RKISP1_CIF_ISP_DPCC_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_2		(RKISP1_CIF_ISP_DPCC_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_3	(RKISP1_CIF_ISP_DPCC_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_3	(RKISP1_CIF_ISP_DPCC_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_3		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_3	(RKISP1_CIF_ISP_DPCC_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_3		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_DPCC_BPT_CTRL		(RKISP1_CIF_ISP_DPCC_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_DPCC_BPT_NUMBER		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_DPCC_BPT_ADDR		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_DPCC_BPT_DATA		(RKISP1_CIF_ISP_DPCC_BASE + 0x00000068)
+
+#define RKISP1_CIF_ISP_WDR_BASE			0x00002A00
+#define RKISP1_CIF_ISP_WDR_CTRL			(RKISP1_CIF_ISP_WDR_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_1		(RKISP1_CIF_ISP_WDR_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_2		(RKISP1_CIF_ISP_WDR_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_3		(RKISP1_CIF_ISP_WDR_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_4		(RKISP1_CIF_ISP_WDR_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0	(RKISP1_CIF_ISP_WDR_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1	(RKISP1_CIF_ISP_WDR_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2	(RKISP1_CIF_ISP_WDR_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3	(RKISP1_CIF_ISP_WDR_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4	(RKISP1_CIF_ISP_WDR_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5	(RKISP1_CIF_ISP_WDR_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6	(RKISP1_CIF_ISP_WDR_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7	(RKISP1_CIF_ISP_WDR_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8	(RKISP1_CIF_ISP_WDR_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9	(RKISP1_CIF_ISP_WDR_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10	(RKISP1_CIF_ISP_WDR_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11	(RKISP1_CIF_ISP_WDR_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12	(RKISP1_CIF_ISP_WDR_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13	(RKISP1_CIF_ISP_WDR_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14	(RKISP1_CIF_ISP_WDR_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15	(RKISP1_CIF_ISP_WDR_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16	(RKISP1_CIF_ISP_WDR_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17	(RKISP1_CIF_ISP_WDR_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18	(RKISP1_CIF_ISP_WDR_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19	(RKISP1_CIF_ISP_WDR_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20	(RKISP1_CIF_ISP_WDR_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21	(RKISP1_CIF_ISP_WDR_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22	(RKISP1_CIF_ISP_WDR_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23	(RKISP1_CIF_ISP_WDR_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24	(RKISP1_CIF_ISP_WDR_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25	(RKISP1_CIF_ISP_WDR_BASE + 0x00000078)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26	(RKISP1_CIF_ISP_WDR_BASE + 0x0000007C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27	(RKISP1_CIF_ISP_WDR_BASE + 0x00000080)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28	(RKISP1_CIF_ISP_WDR_BASE + 0x00000084)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29	(RKISP1_CIF_ISP_WDR_BASE + 0x00000088)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30	(RKISP1_CIF_ISP_WDR_BASE + 0x0000008C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31	(RKISP1_CIF_ISP_WDR_BASE + 0x00000090)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32	(RKISP1_CIF_ISP_WDR_BASE + 0x00000094)
+#define RKISP1_CIF_ISP_WDR_OFFSET		(RKISP1_CIF_ISP_WDR_BASE + 0x00000098)
+#define RKISP1_CIF_ISP_WDR_DELTAMIN		(RKISP1_CIF_ISP_WDR_BASE + 0x0000009C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_1_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000A0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_2_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000A4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_3_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000A8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_4_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000AC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000B0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000B4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000B8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000BC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000C0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000C4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000C8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000CC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000D0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000D4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000D8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000DC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000E0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000E4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000E8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000EC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000F0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000F4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000F8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x000000FC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000100)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000104)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000108)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x0000010C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32_SHD	(RKISP1_CIF_ISP_WDR_BASE + 0x00000130)
+
+#define RKISP1_CIF_ISP_VSM_BASE			0x00002F00
+#define RKISP1_CIF_ISP_VSM_MODE			(RKISP1_CIF_ISP_VSM_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_VSM_H_OFFS		(RKISP1_CIF_ISP_VSM_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_VSM_V_OFFS		(RKISP1_CIF_ISP_VSM_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_VSM_H_SIZE		(RKISP1_CIF_ISP_VSM_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_VSM_V_SIZE		(RKISP1_CIF_ISP_VSM_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_VSM_H_SEGMENTS		(RKISP1_CIF_ISP_VSM_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_VSM_V_SEGMENTS		(RKISP1_CIF_ISP_VSM_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_VSM_DELTA_H		(RKISP1_CIF_ISP_VSM_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_VSM_DELTA_V		(RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
+
+#endif /* _RKISP1_REGS_H */
diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c
new file mode 100644
index 0000000..8cdc29c
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - V4l resizer device
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include "rkisp1-common.h"
+
+#define RKISP1_RSZ_SP_DEV_NAME	RKISP1_DRIVER_NAME "_resizer_selfpath"
+#define RKISP1_RSZ_MP_DEV_NAME	RKISP1_DRIVER_NAME "_resizer_mainpath"
+
+#define RKISP1_DEF_FMT MEDIA_BUS_FMT_YUYV8_2X8
+#define RKISP1_DEF_FMT_TYPE RKISP1_FMT_YUV
+
+#define RKISP1_MBUS_FMT_HDIV 2
+#define RKISP1_MBUS_FMT_VDIV 1
+
+enum rkisp1_shadow_regs_when {
+	RKISP1_SHADOW_REGS_SYNC,
+	RKISP1_SHADOW_REGS_ASYNC,
+};
+
+struct rkisp1_rsz_config {
+	/* constrains */
+	const int max_rsz_width;
+	const int max_rsz_height;
+	const int min_rsz_width;
+	const int min_rsz_height;
+	/* registers */
+	struct {
+		u32 ctrl;
+		u32 ctrl_shd;
+		u32 scale_hy;
+		u32 scale_hcr;
+		u32 scale_hcb;
+		u32 scale_vy;
+		u32 scale_vc;
+		u32 scale_lut;
+		u32 scale_lut_addr;
+		u32 scale_hy_shd;
+		u32 scale_hcr_shd;
+		u32 scale_hcb_shd;
+		u32 scale_vy_shd;
+		u32 scale_vc_shd;
+		u32 phase_hy;
+		u32 phase_hc;
+		u32 phase_vy;
+		u32 phase_vc;
+		u32 phase_hy_shd;
+		u32 phase_hc_shd;
+		u32 phase_vy_shd;
+		u32 phase_vc_shd;
+	} rsz;
+	struct {
+		u32 ctrl;
+		u32 yuvmode_mask;
+		u32 rawmode_mask;
+		u32 h_offset;
+		u32 v_offset;
+		u32 h_size;
+		u32 v_size;
+	} dual_crop;
+};
+
+static const struct rkisp1_rsz_config rkisp1_rsz_config_mp = {
+	/* constraints */
+	.max_rsz_width = RKISP1_RSZ_MP_SRC_MAX_WIDTH,
+	.max_rsz_height = RKISP1_RSZ_MP_SRC_MAX_HEIGHT,
+	.min_rsz_width = RKISP1_RSZ_SRC_MIN_WIDTH,
+	.min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
+	/* registers */
+	.rsz = {
+		.ctrl =			RKISP1_CIF_MRSZ_CTRL,
+		.scale_hy =		RKISP1_CIF_MRSZ_SCALE_HY,
+		.scale_hcr =		RKISP1_CIF_MRSZ_SCALE_HCR,
+		.scale_hcb =		RKISP1_CIF_MRSZ_SCALE_HCB,
+		.scale_vy =		RKISP1_CIF_MRSZ_SCALE_VY,
+		.scale_vc =		RKISP1_CIF_MRSZ_SCALE_VC,
+		.scale_lut =		RKISP1_CIF_MRSZ_SCALE_LUT,
+		.scale_lut_addr =	RKISP1_CIF_MRSZ_SCALE_LUT_ADDR,
+		.scale_hy_shd =		RKISP1_CIF_MRSZ_SCALE_HY_SHD,
+		.scale_hcr_shd =	RKISP1_CIF_MRSZ_SCALE_HCR_SHD,
+		.scale_hcb_shd =	RKISP1_CIF_MRSZ_SCALE_HCB_SHD,
+		.scale_vy_shd =		RKISP1_CIF_MRSZ_SCALE_VY_SHD,
+		.scale_vc_shd =		RKISP1_CIF_MRSZ_SCALE_VC_SHD,
+		.phase_hy =		RKISP1_CIF_MRSZ_PHASE_HY,
+		.phase_hc =		RKISP1_CIF_MRSZ_PHASE_HC,
+		.phase_vy =		RKISP1_CIF_MRSZ_PHASE_VY,
+		.phase_vc =		RKISP1_CIF_MRSZ_PHASE_VC,
+		.ctrl_shd =		RKISP1_CIF_MRSZ_CTRL_SHD,
+		.phase_hy_shd =		RKISP1_CIF_MRSZ_PHASE_HY_SHD,
+		.phase_hc_shd =		RKISP1_CIF_MRSZ_PHASE_HC_SHD,
+		.phase_vy_shd =		RKISP1_CIF_MRSZ_PHASE_VY_SHD,
+		.phase_vc_shd =		RKISP1_CIF_MRSZ_PHASE_VC_SHD,
+	},
+	.dual_crop = {
+		.ctrl =			RKISP1_CIF_DUAL_CROP_CTRL,
+		.yuvmode_mask =		RKISP1_CIF_DUAL_CROP_MP_MODE_YUV,
+		.rawmode_mask =		RKISP1_CIF_DUAL_CROP_MP_MODE_RAW,
+		.h_offset =		RKISP1_CIF_DUAL_CROP_M_H_OFFS,
+		.v_offset =		RKISP1_CIF_DUAL_CROP_M_V_OFFS,
+		.h_size =		RKISP1_CIF_DUAL_CROP_M_H_SIZE,
+		.v_size =		RKISP1_CIF_DUAL_CROP_M_V_SIZE,
+	},
+};
+
+static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
+	/* constraints */
+	.max_rsz_width = RKISP1_RSZ_SP_SRC_MAX_WIDTH,
+	.max_rsz_height = RKISP1_RSZ_SP_SRC_MAX_HEIGHT,
+	.min_rsz_width = RKISP1_RSZ_SRC_MIN_WIDTH,
+	.min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
+	/* registers */
+	.rsz = {
+		.ctrl =			RKISP1_CIF_SRSZ_CTRL,
+		.scale_hy =		RKISP1_CIF_SRSZ_SCALE_HY,
+		.scale_hcr =		RKISP1_CIF_SRSZ_SCALE_HCR,
+		.scale_hcb =		RKISP1_CIF_SRSZ_SCALE_HCB,
+		.scale_vy =		RKISP1_CIF_SRSZ_SCALE_VY,
+		.scale_vc =		RKISP1_CIF_SRSZ_SCALE_VC,
+		.scale_lut =		RKISP1_CIF_SRSZ_SCALE_LUT,
+		.scale_lut_addr =	RKISP1_CIF_SRSZ_SCALE_LUT_ADDR,
+		.scale_hy_shd =		RKISP1_CIF_SRSZ_SCALE_HY_SHD,
+		.scale_hcr_shd =	RKISP1_CIF_SRSZ_SCALE_HCR_SHD,
+		.scale_hcb_shd =	RKISP1_CIF_SRSZ_SCALE_HCB_SHD,
+		.scale_vy_shd =		RKISP1_CIF_SRSZ_SCALE_VY_SHD,
+		.scale_vc_shd =		RKISP1_CIF_SRSZ_SCALE_VC_SHD,
+		.phase_hy =		RKISP1_CIF_SRSZ_PHASE_HY,
+		.phase_hc =		RKISP1_CIF_SRSZ_PHASE_HC,
+		.phase_vy =		RKISP1_CIF_SRSZ_PHASE_VY,
+		.phase_vc =		RKISP1_CIF_SRSZ_PHASE_VC,
+		.ctrl_shd =		RKISP1_CIF_SRSZ_CTRL_SHD,
+		.phase_hy_shd =		RKISP1_CIF_SRSZ_PHASE_HY_SHD,
+		.phase_hc_shd =		RKISP1_CIF_SRSZ_PHASE_HC_SHD,
+		.phase_vy_shd =		RKISP1_CIF_SRSZ_PHASE_VY_SHD,
+		.phase_vc_shd =		RKISP1_CIF_SRSZ_PHASE_VC_SHD,
+	},
+	.dual_crop = {
+		.ctrl =			RKISP1_CIF_DUAL_CROP_CTRL,
+		.yuvmode_mask =		RKISP1_CIF_DUAL_CROP_SP_MODE_YUV,
+		.rawmode_mask =		RKISP1_CIF_DUAL_CROP_SP_MODE_RAW,
+		.h_offset =		RKISP1_CIF_DUAL_CROP_S_H_OFFS,
+		.v_offset =		RKISP1_CIF_DUAL_CROP_S_V_OFFS,
+		.h_size =		RKISP1_CIF_DUAL_CROP_S_H_SIZE,
+		.v_size =		RKISP1_CIF_DUAL_CROP_S_V_SIZE,
+	},
+};
+
+static struct v4l2_mbus_framefmt *
+rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz,
+		       struct v4l2_subdev_pad_config *cfg,
+		       unsigned int pad, u32 which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(&rsz->sd, cfg, pad);
+	else
+		return v4l2_subdev_get_try_format(&rsz->sd, rsz->pad_cfg, pad);
+}
+
+static struct v4l2_rect *
+rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
+			struct v4l2_subdev_pad_config *cfg,
+			unsigned int pad, u32 which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_crop(&rsz->sd, cfg, pad);
+	else
+		return v4l2_subdev_get_try_crop(&rsz->sd, rsz->pad_cfg, pad);
+}
+
+/* ----------------------------------------------------------------------------
+ * Dual crop hw configs
+ */
+
+static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz,
+				 enum rkisp1_shadow_regs_when when)
+{
+	u32 dc_ctrl = rkisp1_read(rsz->rkisp1, rsz->config->dual_crop.ctrl);
+	u32 mask = ~(rsz->config->dual_crop.yuvmode_mask |
+		     rsz->config->dual_crop.rawmode_mask);
+
+	dc_ctrl &= mask;
+	if (when == RKISP1_SHADOW_REGS_ASYNC)
+		dc_ctrl |= RKISP1_CIF_DUAL_CROP_GEN_CFG_UPD;
+	else
+		dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
+	rkisp1_write(rsz->rkisp1, dc_ctrl, rsz->config->dual_crop.ctrl);
+}
+
+/* configure dual-crop unit */
+static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
+{
+	struct rkisp1_device *rkisp1 = rsz->rkisp1;
+	struct v4l2_mbus_framefmt *sink_fmt;
+	struct v4l2_rect *sink_crop;
+	u32 dc_ctrl;
+
+	sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
+					    V4L2_SUBDEV_FORMAT_ACTIVE);
+	sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
+					  V4L2_SUBDEV_FORMAT_ACTIVE);
+
+	if (sink_crop->width == sink_fmt->width &&
+	    sink_crop->height == sink_fmt->height &&
+	    sink_crop->left == 0 && sink_crop->top == 0) {
+		rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_SYNC);
+		dev_dbg(rkisp1->dev, "capture %d crop disabled\n", rsz->id);
+		return;
+	}
+
+	dc_ctrl = rkisp1_read(rkisp1, rsz->config->dual_crop.ctrl);
+	rkisp1_write(rkisp1, sink_crop->left, rsz->config->dual_crop.h_offset);
+	rkisp1_write(rkisp1, sink_crop->top, rsz->config->dual_crop.v_offset);
+	rkisp1_write(rkisp1, sink_crop->width, rsz->config->dual_crop.h_size);
+	rkisp1_write(rkisp1, sink_crop->height, rsz->config->dual_crop.v_size);
+	dc_ctrl |= rsz->config->dual_crop.yuvmode_mask;
+	dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
+	rkisp1_write(rkisp1, dc_ctrl, rsz->config->dual_crop.ctrl);
+
+	dev_dbg(rkisp1->dev, "stream %d crop: %dx%d -> %dx%d\n", rsz->id,
+		sink_fmt->width, sink_fmt->height,
+		sink_crop->width, sink_crop->height);
+}
+
+/* ----------------------------------------------------------------------------
+ * Resizer hw configs
+ */
+
+static void rkisp1_rsz_dump_regs(struct rkisp1_resizer *rsz)
+{
+	dev_dbg(rsz->rkisp1->dev,
+		"RSZ_CTRL 0x%08x/0x%08x\n"
+		"RSZ_SCALE_HY %d/%d\n"
+		"RSZ_SCALE_HCB %d/%d\n"
+		"RSZ_SCALE_HCR %d/%d\n"
+		"RSZ_SCALE_VY %d/%d\n"
+		"RSZ_SCALE_VC %d/%d\n"
+		"RSZ_PHASE_HY %d/%d\n"
+		"RSZ_PHASE_HC %d/%d\n"
+		"RSZ_PHASE_VY %d/%d\n"
+		"RSZ_PHASE_VC %d/%d\n",
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hy),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hy_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcb),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcb_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcr),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcr_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vy),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vy_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vc),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vc_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hy),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hy_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hc),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hc_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vy),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vy_shd),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vc),
+		rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vc_shd));
+}
+
+static void rkisp1_rsz_update_shadow(struct rkisp1_resizer *rsz,
+				     enum rkisp1_shadow_regs_when when)
+{
+	u32 ctrl_cfg = rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl);
+
+	if (when == RKISP1_SHADOW_REGS_ASYNC)
+		ctrl_cfg |= RKISP1_CIF_RSZ_CTRL_CFG_UPD_AUTO;
+	else
+		ctrl_cfg |= RKISP1_CIF_RSZ_CTRL_CFG_UPD;
+
+	rkisp1_write(rsz->rkisp1, ctrl_cfg, rsz->config->rsz.ctrl);
+}
+
+static u32 rkisp1_rsz_calc_ratio(u32 len_sink, u32 len_src)
+{
+	if (len_sink < len_src)
+		return ((len_sink - 1) * RKISP1_CIF_RSZ_SCALER_FACTOR) /
+		       (len_src - 1);
+
+	return ((len_src - 1) * RKISP1_CIF_RSZ_SCALER_FACTOR) /
+	       (len_sink - 1) + 1;
+}
+
+static void rkisp1_rsz_disable(struct rkisp1_resizer *rsz,
+			       enum rkisp1_shadow_regs_when when)
+{
+	rkisp1_write(rsz->rkisp1, 0, rsz->config->rsz.ctrl);
+
+	if (when == RKISP1_SHADOW_REGS_SYNC)
+		rkisp1_rsz_update_shadow(rsz, when);
+}
+
+static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
+				   struct v4l2_rect *sink_y,
+				   struct v4l2_rect *sink_c,
+				   struct v4l2_rect *src_y,
+				   struct v4l2_rect *src_c,
+				   enum rkisp1_shadow_regs_when when)
+{
+	struct rkisp1_device *rkisp1 = rsz->rkisp1;
+	u32 ratio, rsz_ctrl = 0;
+	unsigned int i;
+
+	/* No phase offset */
+	rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_hy);
+	rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_hc);
+	rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_vy);
+	rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_vc);
+
+	/* Linear interpolation */
+	for (i = 0; i < 64; i++) {
+		rkisp1_write(rkisp1, i, rsz->config->rsz.scale_lut_addr);
+		rkisp1_write(rkisp1, i, rsz->config->rsz.scale_lut);
+	}
+
+	if (sink_y->width != src_y->width) {
+		rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HY_ENABLE;
+		if (sink_y->width < src_y->width)
+			rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HY_UP;
+		ratio = rkisp1_rsz_calc_ratio(sink_y->width, src_y->width);
+		rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hy);
+	}
+
+	if (sink_c->width != src_c->width) {
+		rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HC_ENABLE;
+		if (sink_c->width < src_c->width)
+			rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HC_UP;
+		ratio = rkisp1_rsz_calc_ratio(sink_c->width, src_c->width);
+		rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hcb);
+		rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hcr);
+	}
+
+	if (sink_y->height != src_y->height) {
+		rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VY_ENABLE;
+		if (sink_y->height < src_y->height)
+			rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VY_UP;
+		ratio = rkisp1_rsz_calc_ratio(sink_y->height, src_y->height);
+		rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_vy);
+	}
+
+	if (sink_c->height != src_c->height) {
+		rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VC_ENABLE;
+		if (sink_c->height < src_c->height)
+			rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VC_UP;
+		ratio = rkisp1_rsz_calc_ratio(sink_c->height, src_c->height);
+		rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_vc);
+	}
+
+	rkisp1_write(rkisp1, rsz_ctrl, rsz->config->rsz.ctrl);
+
+	rkisp1_rsz_update_shadow(rsz, when);
+}
+
+static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
+			      enum rkisp1_shadow_regs_when when)
+{
+	u8 hdiv = RKISP1_MBUS_FMT_HDIV, vdiv = RKISP1_MBUS_FMT_VDIV;
+	struct v4l2_rect sink_y, sink_c, src_y, src_c;
+	struct v4l2_mbus_framefmt *src_fmt;
+	struct v4l2_rect *sink_crop;
+
+	sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
+					    V4L2_SUBDEV_FORMAT_ACTIVE);
+	src_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SRC,
+					 V4L2_SUBDEV_FORMAT_ACTIVE);
+
+	if (rsz->fmt_type == RKISP1_FMT_BAYER) {
+		rkisp1_rsz_disable(rsz, when);
+		return;
+	}
+
+	sink_y.width = sink_crop->width;
+	sink_y.height = sink_crop->height;
+	src_y.width = src_fmt->width;
+	src_y.height = src_fmt->height;
+
+	sink_c.width = sink_y.width / RKISP1_MBUS_FMT_HDIV;
+	sink_c.height = sink_y.height / RKISP1_MBUS_FMT_VDIV;
+
+	if (rsz->fmt_type == RKISP1_FMT_YUV) {
+		struct rkisp1_capture *cap =
+			&rsz->rkisp1->capture_devs[rsz->id];
+		const struct v4l2_format_info *pixfmt_info =
+			v4l2_format_info(cap->pix.fmt.pixelformat);
+
+		hdiv = pixfmt_info->hdiv;
+		vdiv = pixfmt_info->vdiv;
+	}
+	src_c.width = src_y.width / hdiv;
+	src_c.height = src_y.height / vdiv;
+
+	if (sink_c.width == src_c.width && sink_c.height == src_c.height) {
+		rkisp1_rsz_disable(rsz, when);
+		return;
+	}
+
+	dev_dbg(rsz->rkisp1->dev, "stream %d rsz/scale: %dx%d -> %dx%d\n",
+		rsz->id, sink_crop->width, sink_crop->height,
+		src_fmt->width, src_fmt->height);
+	dev_dbg(rsz->rkisp1->dev, "chroma scaling %dx%d -> %dx%d\n",
+		sink_c.width, sink_c.height, src_c.width, src_c.height);
+
+	/* set values in the hw */
+	rkisp1_rsz_config_regs(rsz, &sink_y, &sink_c, &src_y, &src_c, when);
+
+	rkisp1_rsz_dump_regs(rsz);
+}
+
+/* ----------------------------------------------------------------------------
+ * Subdev pad operations
+ */
+
+static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
+				     struct v4l2_subdev_pad_config *cfg,
+				     struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct rkisp1_resizer *rsz =
+		container_of(sd, struct rkisp1_resizer, sd);
+	struct v4l2_subdev_pad_config dummy_cfg;
+	u32 pad = code->pad;
+	int ret;
+
+	/* supported mbus codes are the same in isp sink pad */
+	code->pad = RKISP1_ISP_PAD_SINK_VIDEO;
+	ret = v4l2_subdev_call(&rsz->rkisp1->isp.sd, pad, enum_mbus_code,
+			       &dummy_cfg, code);
+
+	/* restore pad */
+	code->pad = pad;
+	return ret;
+}
+
+static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_pad_config *cfg)
+{
+	struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+	struct v4l2_rect *sink_crop;
+
+	sink_fmt = v4l2_subdev_get_try_format(sd, cfg, RKISP1_RSZ_PAD_SRC);
+	sink_fmt->width = RKISP1_DEFAULT_WIDTH;
+	sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
+	sink_fmt->field = V4L2_FIELD_NONE;
+	sink_fmt->code = RKISP1_DEF_FMT;
+
+	sink_crop = v4l2_subdev_get_try_crop(sd, cfg, RKISP1_RSZ_PAD_SINK);
+	sink_crop->width = RKISP1_DEFAULT_WIDTH;
+	sink_crop->height = RKISP1_DEFAULT_HEIGHT;
+	sink_crop->left = 0;
+	sink_crop->top = 0;
+
+	src_fmt = v4l2_subdev_get_try_format(sd, cfg, RKISP1_RSZ_PAD_SINK);
+	*src_fmt = *sink_fmt;
+
+	/* NOTE: there is no crop in the source pad, only in the sink */
+
+	return 0;
+}
+
+static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
+				   struct v4l2_subdev_pad_config *cfg,
+				   struct v4l2_mbus_framefmt *format,
+				   unsigned int which)
+{
+	struct v4l2_mbus_framefmt *src_fmt;
+
+	src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+	src_fmt->width = clamp_t(u32, format->width,
+				 rsz->config->min_rsz_width,
+				 rsz->config->max_rsz_width);
+	src_fmt->height = clamp_t(u32, format->height,
+				  rsz->config->min_rsz_height,
+				  rsz->config->max_rsz_height);
+
+	*format = *src_fmt;
+}
+
+static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
+				     struct v4l2_subdev_pad_config *cfg,
+				     struct v4l2_rect *r,
+				     unsigned int which)
+{
+	const struct rkisp1_isp_mbus_info *mbus_info;
+	struct v4l2_mbus_framefmt *sink_fmt;
+	struct v4l2_rect *sink_crop;
+
+	sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
+	sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+					    which);
+
+	/* Not crop for MP bayer raw data */
+	mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+
+	if (rsz->id == RKISP1_MAINPATH &&
+	    mbus_info->fmt_type == RKISP1_FMT_BAYER) {
+		sink_crop->left = 0;
+		sink_crop->top = 0;
+		sink_crop->width = sink_fmt->width;
+		sink_crop->height = sink_fmt->height;
+		return;
+	}
+
+	sink_crop->left = ALIGN(r->left, 2);
+	sink_crop->width = ALIGN(r->width, 2);
+	sink_crop->top = r->top;
+	sink_crop->height = r->height;
+	rkisp1_sd_adjust_crop(sink_crop, sink_fmt);
+
+	*r = *sink_crop;
+}
+
+static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_mbus_framefmt *format,
+				    unsigned int which)
+{
+	const struct rkisp1_isp_mbus_info *mbus_info;
+	struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+	struct v4l2_rect *sink_crop;
+
+	sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
+	src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+	sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+					    which);
+	sink_fmt->code = format->code;
+	mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+	if (!mbus_info) {
+		sink_fmt->code = RKISP1_DEF_FMT;
+		mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+	}
+	if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+		rsz->fmt_type = mbus_info->fmt_type;
+
+	if (rsz->id == RKISP1_MAINPATH &&
+	    mbus_info->fmt_type == RKISP1_FMT_BAYER) {
+		sink_crop->left = 0;
+		sink_crop->top = 0;
+		sink_crop->width = sink_fmt->width;
+		sink_crop->height = sink_fmt->height;
+		return;
+	}
+
+	/* Propagete to source pad */
+	src_fmt->code = sink_fmt->code;
+
+	sink_fmt->width = clamp_t(u32, format->width,
+				  rsz->config->min_rsz_width,
+				  rsz->config->max_rsz_width);
+	sink_fmt->height = clamp_t(u32, format->height,
+				   rsz->config->min_rsz_height,
+				   rsz->config->max_rsz_height);
+
+	*format = *sink_fmt;
+
+	/* Update sink crop */
+	rkisp1_rsz_set_sink_crop(rsz, cfg, sink_crop, which);
+}
+
+static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_pad_config *cfg,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct rkisp1_resizer *rsz =
+		container_of(sd, struct rkisp1_resizer, sd);
+
+	fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, cfg, fmt->pad, fmt->which);
+	return 0;
+}
+
+static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_pad_config *cfg,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct rkisp1_resizer *rsz =
+		container_of(sd, struct rkisp1_resizer, sd);
+
+	if (fmt->pad == RKISP1_RSZ_PAD_SINK)
+		rkisp1_rsz_set_sink_fmt(rsz, cfg, &fmt->format, fmt->which);
+	else
+		rkisp1_rsz_set_src_fmt(rsz, cfg, &fmt->format, fmt->which);
+
+	return 0;
+}
+
+static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_subdev_selection *sel)
+{
+	struct rkisp1_resizer *rsz =
+		container_of(sd, struct rkisp1_resizer, sd);
+	struct v4l2_mbus_framefmt *mf_sink;
+
+	if (sel->pad == RKISP1_RSZ_PAD_SRC)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		mf_sink = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+						 sel->which);
+		sel->r.height = mf_sink->height;
+		sel->r.width = mf_sink->width;
+		sel->r.left = 0;
+		sel->r.top = 0;
+		break;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = *rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+						  sel->which);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_subdev_selection *sel)
+{
+	struct rkisp1_resizer *rsz =
+		container_of(sd, struct rkisp1_resizer, sd);
+
+	if (sel->target != V4L2_SEL_TGT_CROP || sel->pad == RKISP1_RSZ_PAD_SRC)
+		return -EINVAL;
+
+	dev_dbg(sd->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+		sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+	rkisp1_rsz_set_sink_crop(rsz, cfg, &sel->r, sel->which);
+
+	return 0;
+}
+
+static const struct media_entity_operations rkisp1_rsz_media_ops = {
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_pad_ops rkisp1_rsz_pad_ops = {
+	.enum_mbus_code = rkisp1_rsz_enum_mbus_code,
+	.get_selection = rkisp1_rsz_get_selection,
+	.set_selection = rkisp1_rsz_set_selection,
+	.init_cfg = rkisp1_rsz_init_config,
+	.get_fmt = rkisp1_rsz_get_fmt,
+	.set_fmt = rkisp1_rsz_set_fmt,
+	.link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* ----------------------------------------------------------------------------
+ * Stream operations
+ */
+
+static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct rkisp1_resizer *rsz =
+		container_of(sd, struct rkisp1_resizer, sd);
+	struct rkisp1_device *rkisp1 = rsz->rkisp1;
+	struct rkisp1_capture *other = &rkisp1->capture_devs[rsz->id ^ 1];
+	enum rkisp1_shadow_regs_when when = RKISP1_SHADOW_REGS_SYNC;
+
+	if (!enable) {
+		rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
+		rkisp1_rsz_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
+		return 0;
+	}
+
+	if (other->is_streaming)
+		when = RKISP1_SHADOW_REGS_ASYNC;
+
+	rkisp1_rsz_config(rsz, when);
+	rkisp1_dcrop_config(rsz);
+
+	return 0;
+}
+
+static const struct v4l2_subdev_video_ops rkisp1_rsz_video_ops = {
+	.s_stream = rkisp1_rsz_s_stream,
+};
+
+static const struct v4l2_subdev_ops rkisp1_rsz_ops = {
+	.video = &rkisp1_rsz_video_ops,
+	.pad = &rkisp1_rsz_pad_ops,
+};
+
+static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
+{
+	v4l2_device_unregister_subdev(&rsz->sd);
+	media_entity_cleanup(&rsz->sd.entity);
+}
+
+static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
+{
+	const char * const dev_names[] = {RKISP1_RSZ_MP_DEV_NAME,
+					  RKISP1_RSZ_SP_DEV_NAME};
+	struct media_pad *pads = rsz->pads;
+	struct v4l2_subdev *sd = &rsz->sd;
+	int ret;
+
+	if (rsz->id == RKISP1_SELFPATH)
+		rsz->config = &rkisp1_rsz_config_sp;
+	else
+		rsz->config = &rkisp1_rsz_config_mp;
+
+	v4l2_subdev_init(sd, &rkisp1_rsz_ops);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	sd->entity.ops = &rkisp1_rsz_media_ops;
+	sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+	sd->owner = THIS_MODULE;
+	strscpy(sd->name, dev_names[rsz->id], sizeof(sd->name));
+
+	pads[RKISP1_RSZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
+					  MEDIA_PAD_FL_MUST_CONNECT;
+	pads[RKISP1_RSZ_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
+					 MEDIA_PAD_FL_MUST_CONNECT;
+
+	rsz->fmt_type = RKISP1_DEF_FMT_TYPE;
+
+	ret = media_entity_pads_init(&sd->entity, 2, pads);
+	if (ret)
+		return ret;
+
+	ret = v4l2_device_register_subdev(&rsz->rkisp1->v4l2_dev, sd);
+	if (ret) {
+		dev_err(sd->dev, "Failed to register resizer subdev\n");
+		goto err_cleanup_media_entity;
+	}
+
+	rkisp1_rsz_init_config(sd, rsz->pad_cfg);
+	return 0;
+
+err_cleanup_media_entity:
+	media_entity_cleanup(&sd->entity);
+
+	return ret;
+}
+
+int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_resizer *rsz;
+	unsigned int i, j;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(rkisp1->resizer_devs); i++) {
+		rsz = &rkisp1->resizer_devs[i];
+		rsz->rkisp1 = rkisp1;
+		rsz->id = i;
+		ret = rkisp1_rsz_register(rsz);
+		if (ret)
+			goto err_unreg_resizer_devs;
+	}
+
+	return 0;
+
+err_unreg_resizer_devs:
+	for (j = 0; j < i; j++) {
+		rsz = &rkisp1->resizer_devs[j];
+		rkisp1_rsz_unregister(rsz);
+	}
+
+	return ret;
+}
+
+void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_resizer *mp = &rkisp1->resizer_devs[RKISP1_MAINPATH];
+	struct rkisp1_resizer *sp = &rkisp1->resizer_devs[RKISP1_SELFPATH];
+
+	rkisp1_rsz_unregister(mp);
+	rkisp1_rsz_unregister(sp);
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-stats.c b/drivers/staging/media/rkisp1/rkisp1-stats.c
new file mode 100644
index 0000000..d98ea15
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-stats.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Stats subdevice
+ *
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>	/* for ISP statistics */
+
+#include "rkisp1-common.h"
+
+#define RKISP1_STATS_DEV_NAME	RKISP1_DRIVER_NAME "_stats"
+
+#define RKISP1_ISP_STATS_REQ_BUFS_MIN 2
+#define RKISP1_ISP_STATS_REQ_BUFS_MAX 8
+
+enum rkisp1_isp_readout_cmd {
+	RKISP1_ISP_READOUT_MEAS,
+	RKISP1_ISP_READOUT_META,
+};
+
+struct rkisp1_isp_readout_work {
+	struct work_struct work;
+	struct rkisp1_stats *stats;
+
+	unsigned int frame_id;
+	unsigned int isp_ris;
+	enum rkisp1_isp_readout_cmd readout;
+	struct vb2_buffer *vb;
+};
+
+static int rkisp1_stats_enum_fmt_meta_cap(struct file *file, void *priv,
+					  struct v4l2_fmtdesc *f)
+{
+	struct video_device *video = video_devdata(file);
+	struct rkisp1_stats *stats = video_get_drvdata(video);
+
+	if (f->index > 0 || f->type != video->queue->type)
+		return -EINVAL;
+
+	f->pixelformat = stats->vdev_fmt.fmt.meta.dataformat;
+	return 0;
+}
+
+static int rkisp1_stats_g_fmt_meta_cap(struct file *file, void *priv,
+				       struct v4l2_format *f)
+{
+	struct video_device *video = video_devdata(file);
+	struct rkisp1_stats *stats = video_get_drvdata(video);
+	struct v4l2_meta_format *meta = &f->fmt.meta;
+
+	if (f->type != video->queue->type)
+		return -EINVAL;
+
+	memset(meta, 0, sizeof(*meta));
+	meta->dataformat = stats->vdev_fmt.fmt.meta.dataformat;
+	meta->buffersize = stats->vdev_fmt.fmt.meta.buffersize;
+
+	return 0;
+}
+
+static int rkisp1_stats_querycap(struct file *file,
+				 void *priv, struct v4l2_capability *cap)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
+	strscpy(cap->card, vdev->name, sizeof(cap->card));
+	strscpy(cap->bus_info, "platform: " RKISP1_DRIVER_NAME,
+		sizeof(cap->bus_info));
+
+	return 0;
+}
+
+/* ISP video device IOCTLs */
+static const struct v4l2_ioctl_ops rkisp1_stats_ioctl = {
+	.vidioc_reqbufs = vb2_ioctl_reqbufs,
+	.vidioc_querybuf = vb2_ioctl_querybuf,
+	.vidioc_create_bufs = vb2_ioctl_create_bufs,
+	.vidioc_qbuf = vb2_ioctl_qbuf,
+	.vidioc_dqbuf = vb2_ioctl_dqbuf,
+	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+	.vidioc_expbuf = vb2_ioctl_expbuf,
+	.vidioc_streamon = vb2_ioctl_streamon,
+	.vidioc_streamoff = vb2_ioctl_streamoff,
+	.vidioc_enum_fmt_meta_cap = rkisp1_stats_enum_fmt_meta_cap,
+	.vidioc_g_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
+	.vidioc_s_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
+	.vidioc_try_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
+	.vidioc_querycap = rkisp1_stats_querycap,
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations rkisp1_stats_fops = {
+	.mmap = vb2_fop_mmap,
+	.unlocked_ioctl = video_ioctl2,
+	.poll = vb2_fop_poll,
+	.open = v4l2_fh_open,
+	.release = vb2_fop_release
+};
+
+static int rkisp1_stats_vb2_queue_setup(struct vb2_queue *vq,
+					unsigned int *num_buffers,
+					unsigned int *num_planes,
+					unsigned int sizes[],
+					struct device *alloc_devs[])
+{
+	struct rkisp1_stats *stats = vq->drv_priv;
+
+	*num_planes = 1;
+
+	*num_buffers = clamp_t(u32, *num_buffers, RKISP1_ISP_STATS_REQ_BUFS_MIN,
+			       RKISP1_ISP_STATS_REQ_BUFS_MAX);
+
+	sizes[0] = sizeof(struct rkisp1_stat_buffer);
+
+	INIT_LIST_HEAD(&stats->stat);
+
+	return 0;
+}
+
+static void rkisp1_stats_vb2_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct rkisp1_buffer *stats_buf =
+		container_of(vbuf, struct rkisp1_buffer, vb);
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct rkisp1_stats *stats_dev = vq->drv_priv;
+
+	stats_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
+
+	mutex_lock(&stats_dev->wq_lock);
+	list_add_tail(&stats_buf->queue, &stats_dev->stat);
+	mutex_unlock(&stats_dev->wq_lock);
+}
+
+static int rkisp1_stats_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+	if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_stat_buffer))
+		return -EINVAL;
+
+	vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_stat_buffer));
+
+	return 0;
+}
+
+static void rkisp1_stats_vb2_stop_streaming(struct vb2_queue *vq)
+{
+	struct rkisp1_stats *stats = vq->drv_priv;
+	struct rkisp1_buffer *buf;
+	unsigned long flags;
+	unsigned int i;
+
+	/* Make sure no new work queued in isr before draining wq */
+	spin_lock_irqsave(&stats->irq_lock, flags);
+	stats->is_streaming = false;
+	spin_unlock_irqrestore(&stats->irq_lock, flags);
+
+	drain_workqueue(stats->readout_wq);
+
+	mutex_lock(&stats->wq_lock);
+	for (i = 0; i < RKISP1_ISP_STATS_REQ_BUFS_MAX; i++) {
+		if (list_empty(&stats->stat))
+			break;
+		buf = list_first_entry(&stats->stat,
+				       struct rkisp1_buffer, queue);
+		list_del(&buf->queue);
+		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+	}
+	mutex_unlock(&stats->wq_lock);
+}
+
+static int
+rkisp1_stats_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
+{
+	struct rkisp1_stats *stats = queue->drv_priv;
+
+	stats->is_streaming = true;
+
+	return 0;
+}
+
+static const struct vb2_ops rkisp1_stats_vb2_ops = {
+	.queue_setup = rkisp1_stats_vb2_queue_setup,
+	.buf_queue = rkisp1_stats_vb2_buf_queue,
+	.buf_prepare = rkisp1_stats_vb2_buf_prepare,
+	.wait_prepare = vb2_ops_wait_prepare,
+	.wait_finish = vb2_ops_wait_finish,
+	.stop_streaming = rkisp1_stats_vb2_stop_streaming,
+	.start_streaming = rkisp1_stats_vb2_start_streaming,
+};
+
+static int
+rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
+{
+	struct rkisp1_vdev_node *node;
+
+	node = container_of(q, struct rkisp1_vdev_node, buf_queue);
+
+	q->type = V4L2_BUF_TYPE_META_CAPTURE;
+	q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+	q->drv_priv = stats;
+	q->ops = &rkisp1_stats_vb2_ops;
+	q->mem_ops = &vb2_vmalloc_memops;
+	q->buf_struct_size = sizeof(struct rkisp1_buffer);
+	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	q->lock = &node->vlock;
+
+	return vb2_queue_init(q);
+}
+
+static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats,
+				      struct rkisp1_stat_buffer *pbuf)
+{
+	/* Protect against concurrent access from ISR? */
+	struct rkisp1_device *rkisp1 = stats->rkisp1;
+	u32 reg_val;
+
+	pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
+	reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT);
+	pbuf->params.awb.awb_mean[0].cnt =
+				RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
+	reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN);
+
+	pbuf->params.awb.awb_mean[0].mean_cr_or_r =
+				RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
+	pbuf->params.awb.awb_mean[0].mean_cb_or_b =
+				RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
+	pbuf->params.awb.awb_mean[0].mean_y_or_g =
+				RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
+}
+
+static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
+				      struct rkisp1_stat_buffer *pbuf)
+{
+	struct rkisp1_device *rkisp1 = stats->rkisp1;
+	unsigned int i;
+
+	pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
+	for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX; i++)
+		pbuf->params.ae.exp_mean[i] =
+			(u8)rkisp1_read(rkisp1,
+					RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
+}
+
+static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
+				      struct rkisp1_stat_buffer *pbuf)
+{
+	struct rkisp1_device *rkisp1 = stats->rkisp1;
+	struct rkisp1_cif_isp_af_stat *af;
+
+	pbuf->meas_type = RKISP1_CIF_ISP_STAT_AFM_FIN;
+
+	af = &pbuf->params.af;
+	af->window[0].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_A);
+	af->window[0].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_A);
+	af->window[1].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_B);
+	af->window[1].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_B);
+	af->window[2].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_C);
+	af->window[2].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_C);
+}
+
+static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats,
+				      struct rkisp1_stat_buffer *pbuf)
+{
+	struct rkisp1_device *rkisp1 = stats->rkisp1;
+	unsigned int i;
+
+	pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
+	for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX; i++)
+		pbuf->params.hist.hist_bins[i] =
+			(u8)rkisp1_read(rkisp1,
+					RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+}
+
+static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
+				      struct rkisp1_stat_buffer *pbuf)
+{
+	struct rkisp1_device *rkisp1 = stats->rkisp1;
+	const struct rkisp1_isp_mbus_info *in_fmt = rkisp1->isp.sink_fmt;
+	struct rkisp1_cif_isp_bls_meas_val *bls_val;
+
+	bls_val = &pbuf->params.ae.bls_val;
+	if (in_fmt->bayer_pat == RKISP1_RAW_BGGR) {
+		bls_val->meas_b =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
+		bls_val->meas_gb =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
+		bls_val->meas_gr =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
+		bls_val->meas_r =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
+	} else if (in_fmt->bayer_pat == RKISP1_RAW_GBRG) {
+		bls_val->meas_gb =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
+		bls_val->meas_b =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
+		bls_val->meas_r =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
+		bls_val->meas_gr =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
+	} else if (in_fmt->bayer_pat == RKISP1_RAW_GRBG) {
+		bls_val->meas_gr =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
+		bls_val->meas_r =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
+		bls_val->meas_b =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
+		bls_val->meas_gb =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
+	} else if (in_fmt->bayer_pat == RKISP1_RAW_RGGB) {
+		bls_val->meas_r =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
+		bls_val->meas_gr =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
+		bls_val->meas_gb =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
+		bls_val->meas_b =
+			rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
+	}
+}
+
+static void
+rkisp1_stats_send_measurement(struct rkisp1_stats *stats,
+			      struct rkisp1_isp_readout_work *meas_work)
+{
+	struct rkisp1_stat_buffer *cur_stat_buf;
+	struct rkisp1_buffer *cur_buf = NULL;
+	unsigned int frame_sequence =
+		atomic_read(&stats->rkisp1->isp.frame_sequence);
+	u64 timestamp = ktime_get_ns();
+
+	if (frame_sequence != meas_work->frame_id) {
+		dev_warn(stats->rkisp1->dev,
+			 "Measurement late(%d, %d)\n",
+			 frame_sequence, meas_work->frame_id);
+		frame_sequence = meas_work->frame_id;
+	}
+
+	mutex_lock(&stats->wq_lock);
+	/* get one empty buffer */
+	if (!list_empty(&stats->stat)) {
+		cur_buf = list_first_entry(&stats->stat,
+					   struct rkisp1_buffer, queue);
+		list_del(&cur_buf->queue);
+	}
+	mutex_unlock(&stats->wq_lock);
+
+	if (!cur_buf)
+		return;
+
+	cur_stat_buf =
+		(struct rkisp1_stat_buffer *)(cur_buf->vaddr[0]);
+
+	if (meas_work->isp_ris & RKISP1_CIF_ISP_AWB_DONE) {
+		rkisp1_stats_get_awb_meas(stats, cur_stat_buf);
+		cur_stat_buf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
+	}
+
+	if (meas_work->isp_ris & RKISP1_CIF_ISP_AFM_FIN) {
+		rkisp1_stats_get_afc_meas(stats, cur_stat_buf);
+		cur_stat_buf->meas_type |= RKISP1_CIF_ISP_STAT_AFM_FIN;
+	}
+
+	if (meas_work->isp_ris & RKISP1_CIF_ISP_EXP_END) {
+		rkisp1_stats_get_aec_meas(stats, cur_stat_buf);
+		rkisp1_stats_get_bls_meas(stats, cur_stat_buf);
+		cur_stat_buf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
+	}
+
+	if (meas_work->isp_ris & RKISP1_CIF_ISP_HIST_MEASURE_RDY) {
+		rkisp1_stats_get_hst_meas(stats, cur_stat_buf);
+		cur_stat_buf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
+	}
+
+	vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
+			      sizeof(struct rkisp1_stat_buffer));
+	cur_buf->vb.sequence = frame_sequence;
+	cur_buf->vb.vb2_buf.timestamp = timestamp;
+	vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static void rkisp1_stats_readout_work(struct work_struct *work)
+{
+	struct rkisp1_isp_readout_work *readout_work =
+		container_of(work, struct rkisp1_isp_readout_work, work);
+	struct rkisp1_stats *stats = readout_work->stats;
+
+	if (readout_work->readout == RKISP1_ISP_READOUT_MEAS)
+		rkisp1_stats_send_measurement(stats, readout_work);
+
+	kfree(readout_work);
+}
+
+void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris)
+{
+	unsigned int frame_sequence =
+		atomic_read(&stats->rkisp1->isp.frame_sequence);
+	struct rkisp1_device *rkisp1 = stats->rkisp1;
+	struct rkisp1_isp_readout_work *work;
+	unsigned int isp_mis_tmp = 0;
+	u32 val;
+
+	spin_lock(&stats->irq_lock);
+
+	val = RKISP1_CIF_ISP_AWB_DONE | RKISP1_CIF_ISP_AFM_FIN |
+	      RKISP1_CIF_ISP_EXP_END | RKISP1_CIF_ISP_HIST_MEASURE_RDY;
+	rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_ICR);
+
+	isp_mis_tmp = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
+	if (isp_mis_tmp &
+	    (RKISP1_CIF_ISP_AWB_DONE | RKISP1_CIF_ISP_AFM_FIN |
+	     RKISP1_CIF_ISP_EXP_END | RKISP1_CIF_ISP_HIST_MEASURE_RDY))
+		rkisp1->debug.stats_error++;
+
+	if (!stats->is_streaming)
+		goto unlock;
+	if (isp_ris & (RKISP1_CIF_ISP_AWB_DONE |
+		       RKISP1_CIF_ISP_AFM_FIN |
+		       RKISP1_CIF_ISP_EXP_END |
+		       RKISP1_CIF_ISP_HIST_MEASURE_RDY)) {
+		work = kzalloc(sizeof(*work), GFP_ATOMIC);
+		if (work) {
+			INIT_WORK(&work->work,
+				  rkisp1_stats_readout_work);
+			work->readout = RKISP1_ISP_READOUT_MEAS;
+			work->stats = stats;
+			work->frame_id = frame_sequence;
+			work->isp_ris = isp_ris;
+			if (!queue_work(stats->readout_wq,
+					&work->work))
+				kfree(work);
+		} else {
+			dev_err(stats->rkisp1->dev,
+				"Could not allocate work\n");
+		}
+	}
+
+unlock:
+	spin_unlock(&stats->irq_lock);
+}
+
+static void rkisp1_init_stats(struct rkisp1_stats *stats)
+{
+	stats->vdev_fmt.fmt.meta.dataformat =
+		V4L2_META_FMT_RK_ISP1_STAT_3A;
+	stats->vdev_fmt.fmt.meta.buffersize =
+		sizeof(struct rkisp1_stat_buffer);
+}
+
+int rkisp1_stats_register(struct rkisp1_stats *stats,
+			  struct v4l2_device *v4l2_dev,
+			  struct rkisp1_device *rkisp1)
+{
+	struct rkisp1_vdev_node *node = &stats->vnode;
+	struct video_device *vdev = &node->vdev;
+	int ret;
+
+	stats->rkisp1 = rkisp1;
+	mutex_init(&stats->wq_lock);
+	mutex_init(&node->vlock);
+	INIT_LIST_HEAD(&stats->stat);
+	spin_lock_init(&stats->irq_lock);
+
+	strscpy(vdev->name, RKISP1_STATS_DEV_NAME, sizeof(vdev->name));
+
+	video_set_drvdata(vdev, stats);
+	vdev->ioctl_ops = &rkisp1_stats_ioctl;
+	vdev->fops = &rkisp1_stats_fops;
+	vdev->release = video_device_release_empty;
+	vdev->lock = &node->vlock;
+	vdev->v4l2_dev = v4l2_dev;
+	vdev->queue = &node->buf_queue;
+	vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
+	vdev->vfl_dir =  VFL_DIR_RX;
+	rkisp1_stats_init_vb2_queue(vdev->queue, stats);
+	rkisp1_init_stats(stats);
+	video_set_drvdata(vdev, stats);
+
+	node->pad.flags = MEDIA_PAD_FL_SINK;
+	ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
+	if (ret)
+		goto err_release_queue;
+
+	ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		dev_err(&vdev->dev,
+			"failed to register %s, ret=%d\n", vdev->name, ret);
+		goto err_cleanup_media_entity;
+	}
+
+	stats->readout_wq = alloc_workqueue("measurement_queue",
+					    WQ_UNBOUND | WQ_MEM_RECLAIM,
+					    1);
+
+	if (!stats->readout_wq) {
+		ret = -ENOMEM;
+		goto err_unreg_vdev;
+	}
+
+	return 0;
+
+err_unreg_vdev:
+	video_unregister_device(vdev);
+err_cleanup_media_entity:
+	media_entity_cleanup(&vdev->entity);
+err_release_queue:
+	vb2_queue_release(vdev->queue);
+	mutex_destroy(&node->vlock);
+	mutex_destroy(&stats->wq_lock);
+	return ret;
+}
+
+void rkisp1_stats_unregister(struct rkisp1_stats *stats)
+{
+	struct rkisp1_vdev_node *node = &stats->vnode;
+	struct video_device *vdev = &node->vdev;
+
+	destroy_workqueue(stats->readout_wq);
+	video_unregister_device(vdev);
+	media_entity_cleanup(&vdev->entity);
+	vb2_queue_release(vdev->queue);
+	mutex_destroy(&node->vlock);
+	mutex_destroy(&stats->wq_lock);
+}
diff --git a/drivers/staging/media/rkisp1/uapi/rkisp1-config.h b/drivers/staging/media/rkisp1/uapi/rkisp1-config.h
new file mode 100644
index 0000000..ca0d031
--- /dev/null
+++ b/drivers/staging/media/rkisp1/uapi/rkisp1-config.h
@@ -0,0 +1,819 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Rockchip ISP1 userspace API
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+/*
+ * TODO: Improve documentation, mostly regarding abbreviation and hardware
+ * specificities. Reference: "REF_01 - ISP_user_manual, Rev 2.57" (not public)
+ */
+
+#ifndef _UAPI_RKISP1_CONFIG_H
+#define _UAPI_RKISP1_CONFIG_H
+
+#include <linux/types.h>
+
+/* Vendor specific - used for RK_ISP1 camera sub-system */
+#define V4L2_META_FMT_RK_ISP1_PARAMS   v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 params */
+#define V4L2_META_FMT_RK_ISP1_STAT_3A  v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A statistics */
+
+#define RKISP1_CIF_ISP_MODULE_DPCC		BIT(0)
+#define RKISP1_CIF_ISP_MODULE_BLS		BIT(1)
+#define RKISP1_CIF_ISP_MODULE_SDG		BIT(2)
+#define RKISP1_CIF_ISP_MODULE_HST		BIT(3)
+#define RKISP1_CIF_ISP_MODULE_LSC		BIT(4)
+#define RKISP1_CIF_ISP_MODULE_AWB_GAIN		BIT(5)
+#define RKISP1_CIF_ISP_MODULE_FLT		BIT(6)
+#define RKISP1_CIF_ISP_MODULE_BDM		BIT(7)
+#define RKISP1_CIF_ISP_MODULE_CTK		BIT(8)
+#define RKISP1_CIF_ISP_MODULE_GOC		BIT(9)
+#define RKISP1_CIF_ISP_MODULE_CPROC		BIT(10)
+#define RKISP1_CIF_ISP_MODULE_AFC		BIT(11)
+#define RKISP1_CIF_ISP_MODULE_AWB		BIT(12)
+#define RKISP1_CIF_ISP_MODULE_IE		BIT(13)
+#define RKISP1_CIF_ISP_MODULE_AEC		BIT(14)
+#define RKISP1_CIF_ISP_MODULE_WDR		BIT(15)
+#define RKISP1_CIF_ISP_MODULE_DPF		BIT(16)
+#define RKISP1_CIF_ISP_MODULE_DPF_STRENGTH	BIT(17)
+
+#define RKISP1_CIF_ISP_CTK_COEFF_MAX            0x100
+#define RKISP1_CIF_ISP_CTK_OFFSET_MAX           0x800
+
+#define RKISP1_CIF_ISP_AE_MEAN_MAX              25
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX           16
+#define RKISP1_CIF_ISP_AFM_MAX_WINDOWS          3
+#define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE       17
+
+#define RKISP1_CIF_ISP_BDM_MAX_TH               0xff
+
+/*
+ * Black level compensation
+ */
+/* maximum value for horizontal start address */
+#define RKISP1_CIF_ISP_BLS_START_H_MAX             0x00000fff
+/* maximum value for horizontal stop address */
+#define RKISP1_CIF_ISP_BLS_STOP_H_MAX              0x00000fff
+/* maximum value for vertical start address */
+#define RKISP1_CIF_ISP_BLS_START_V_MAX             0x00000fff
+/* maximum value for vertical stop address */
+#define RKISP1_CIF_ISP_BLS_STOP_V_MAX              0x00000fff
+/* maximum is 2^18 = 262144*/
+#define RKISP1_CIF_ISP_BLS_SAMPLES_MAX             0x00000012
+/* maximum value for fixed black level */
+#define RKISP1_CIF_ISP_BLS_FIX_SUB_MAX             0x00000fff
+/* minimum value for fixed black level */
+#define RKISP1_CIF_ISP_BLS_FIX_SUB_MIN             0xfffff000
+/* 13 bit range (signed)*/
+#define RKISP1_CIF_ISP_BLS_FIX_MASK                0x00001fff
+
+/*
+ * Automatic white balance measurments
+ */
+#define RKISP1_CIF_ISP_AWB_MAX_GRID                1
+#define RKISP1_CIF_ISP_AWB_MAX_FRAMES              7
+
+/*
+ * Gamma out
+ */
+/* Maximum number of color samples supported */
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES       17
+
+/*
+ * Lens shade correction
+ */
+#define RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE           8
+#define RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE           8
+/*
+ * The following matches the tuning process,
+ * not the max capabilities of the chip.
+ * Last value unused.
+ */
+#define	RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE           290
+
+/*
+ * Histogram calculation
+ */
+/* Last 3 values unused. */
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE 28
+
+/*
+ * Defect Pixel Cluster Correction
+ */
+#define RKISP1_CIF_ISP_DPCC_METHODS_MAX       3
+
+/*
+ * Denoising pre filter
+ */
+#define RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS      17
+#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS  6
+
+/*
+ * Measurement types
+ */
+#define RKISP1_CIF_ISP_STAT_AWB           BIT(0)
+#define RKISP1_CIF_ISP_STAT_AUTOEXP       BIT(1)
+#define RKISP1_CIF_ISP_STAT_AFM_FIN       BIT(2)
+#define RKISP1_CIF_ISP_STAT_HIST          BIT(3)
+
+enum rkisp1_cif_isp_histogram_mode {
+	RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
+	RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
+	RKISP1_CIF_ISP_HISTOGRAM_MODE_R_HISTOGRAM,
+	RKISP1_CIF_ISP_HISTOGRAM_MODE_G_HISTOGRAM,
+	RKISP1_CIF_ISP_HISTOGRAM_MODE_B_HISTOGRAM,
+	RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM
+};
+
+enum rkisp1_cif_isp_awb_mode_type {
+	RKISP1_CIF_ISP_AWB_MODE_MANUAL,
+	RKISP1_CIF_ISP_AWB_MODE_RGB,
+	RKISP1_CIF_ISP_AWB_MODE_YCBCR
+};
+
+enum rkisp1_cif_isp_flt_mode {
+	RKISP1_CIF_ISP_FLT_STATIC_MODE,
+	RKISP1_CIF_ISP_FLT_DYNAMIC_MODE
+};
+
+/**
+ * enum rkisp1_cif_isp_exp_ctrl_autostop - stop modes
+ * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0: continuous measurement
+ * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1: stop measuring after a complete frame
+ */
+enum rkisp1_cif_isp_exp_ctrl_autostop {
+	RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0 = 0,
+	RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1 = 1,
+};
+
+/**
+ * enum rkisp1_cif_isp_exp_meas_mode - Exposure measure mode
+ * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_0: Y = 16 + 0.25R + 0.5G + 0.1094B
+ * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_1: Y = (R + G + B) x (85/256)
+ */
+enum rkisp1_cif_isp_exp_meas_mode {
+	RKISP1_CIF_ISP_EXP_MEASURING_MODE_0,
+	RKISP1_CIF_ISP_EXP_MEASURING_MODE_1,
+};
+
+/*---------- PART1: Input Parameters ------------*/
+
+struct rkisp1_cif_isp_window {
+	__u16 h_offs;
+	__u16 v_offs;
+	__u16 h_size;
+	__u16 v_size;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_bls_fixed_val - BLS fixed subtraction values
+ *
+ * The values will be subtracted from the sensor
+ * values. Therefore a negative value means addition instead of subtraction!
+ *
+ * @r: Fixed (signed!) subtraction value for Bayer pattern R
+ * @gr: Fixed (signed!) subtraction value for Bayer pattern Gr
+ * @gb: Fixed (signed!) subtraction value for Bayer pattern Gb
+ * @b: Fixed (signed!) subtraction value for Bayer pattern B
+ */
+struct rkisp1_cif_isp_bls_fixed_val {
+	__s16 r;
+	__s16 gr;
+	__s16 gb;
+	__s16 b;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_bls_config - Configuration used by black level subtraction
+ *
+ * @enable_auto: Automatic mode activated means that the measured values
+ *		 are subtracted. Otherwise the fixed subtraction
+ *		 values will be subtracted.
+ * @en_windows: enabled window
+ * @bls_window1: Measurement window 1 size
+ * @bls_window2: Measurement window 2 size
+ * @bls_samples: Set amount of measured pixels for each Bayer position
+ *		 (A, B,C and D) to 2^bls_samples.
+ * @fixed_val: Fixed subtraction values
+ */
+struct rkisp1_cif_isp_bls_config {
+	__u8 enable_auto;
+	__u8 en_windows;
+	struct rkisp1_cif_isp_window bls_window1;
+	struct rkisp1_cif_isp_window bls_window2;
+	__u8 bls_samples;
+	struct rkisp1_cif_isp_bls_fixed_val fixed_val;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpcc_methods_config - Methods Configuration used by DPCC
+ *
+ * Methods Configuration used by Defect Pixel Cluster Correction
+ *
+ * @method: Method enable bits
+ * @line_thresh: Line threshold
+ * @line_mad_fac: Line MAD factor
+ * @pg_fac: Peak gradient factor
+ * @rnd_thresh: Rank Neighbor Difference threshold
+ * @rg_fac: Rank gradient factor
+ */
+struct rkisp1_cif_isp_dpcc_methods_config {
+	__u32 method;
+	__u32 line_thresh;
+	__u32 line_mad_fac;
+	__u32 pg_fac;
+	__u32 rnd_thresh;
+	__u32 rg_fac;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
+ *
+ * Configuration used by Defect Pixel Cluster Correction
+ *
+ * @mode: dpcc output mode
+ * @output_mode: whether use hard coded methods
+ * @set_use: stage1 methods set
+ * @methods: methods config
+ * @ro_limits: rank order limits
+ * @rnd_offs: differential rank offsets for rank neighbor difference
+ */
+struct rkisp1_cif_isp_dpcc_config {
+	__u32 mode;
+	__u32 output_mode;
+	__u32 set_use;
+	struct rkisp1_cif_isp_dpcc_methods_config methods[RKISP1_CIF_ISP_DPCC_METHODS_MAX];
+	__u32 ro_limits;
+	__u32 rnd_offs;
+} __packed;
+
+struct rkisp1_cif_isp_gamma_corr_curve {
+	__u16 gamma_y[RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE];
+} __packed;
+
+struct rkisp1_cif_isp_gamma_curve_x_axis_pnts {
+	__u32 gamma_dx0;
+	__u32 gamma_dx1;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_sdg_config - Configuration used by sensor degamma
+ *
+ * @curve_x: gamma curve point definition axis for x
+ * @xa_pnts: x increments
+ */
+struct rkisp1_cif_isp_sdg_config {
+	struct rkisp1_cif_isp_gamma_corr_curve curve_r;
+	struct rkisp1_cif_isp_gamma_corr_curve curve_g;
+	struct rkisp1_cif_isp_gamma_corr_curve curve_b;
+	struct rkisp1_cif_isp_gamma_curve_x_axis_pnts xa_pnts;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_lsc_config - Configuration used by Lens shading correction
+ *
+ * refer to REF_01 for details
+ */
+struct rkisp1_cif_isp_lsc_config {
+	__u32 r_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+	__u32 gr_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+	__u32 gb_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+	__u32 b_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+
+	__u32 x_grad_tbl[RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE];
+	__u32 y_grad_tbl[RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE];
+
+	__u32 x_size_tbl[RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE];
+	__u32 y_size_tbl[RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE];
+	__u16 config_width;
+	__u16 config_height;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_ie_config - Configuration used by image effects
+ *
+ * @eff_mat_1: 3x3 Matrix Coefficients for Emboss Effect 1
+ * @eff_mat_2: 3x3 Matrix Coefficients for Emboss Effect 2
+ * @eff_mat_3: 3x3 Matrix Coefficients for Emboss 3/Sketch 1
+ * @eff_mat_4: 3x3 Matrix Coefficients for Sketch Effect 2
+ * @eff_mat_5: 3x3 Matrix Coefficients for Sketch Effect 3
+ * @eff_tint: Chrominance increment values of tint (used for sepia effect)
+ */
+struct rkisp1_cif_isp_ie_config {
+	__u16 effect;
+	__u16 color_sel;
+	__u16 eff_mat_1;
+	__u16 eff_mat_2;
+	__u16 eff_mat_3;
+	__u16 eff_mat_4;
+	__u16 eff_mat_5;
+	__u16 eff_tint;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_cproc_config - Configuration used by Color Processing
+ *
+ * @c_out_range: Chrominance pixel clipping range at output.
+ *		 (0 for limit, 1 for full)
+ * @y_in_range: Luminance pixel clipping range at output.
+ * @y_out_range: Luminance pixel clipping range at output.
+ * @contrast: 00~ff, 0.0~1.992
+ * @brightness: 80~7F, -128~+127
+ * @sat: saturation, 00~FF, 0.0~1.992
+ * @hue: 80~7F, -90~+87.188
+ */
+struct rkisp1_cif_isp_cproc_config {
+	__u8 c_out_range;
+	__u8 y_in_range;
+	__u8 y_out_range;
+	__u8 contrast;
+	__u8 brightness;
+	__u8 sat;
+	__u8 hue;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_awb_meas_config - Configuration used by auto white balance
+ *
+ * @awb_wnd: white balance measurement window (in pixels)
+ *	     (from enum rkisp1_cif_isp_awb_mode_type)
+ * @max_y: only pixels values < max_y contribute to awb measurement, set to 0
+ *	   to disable this feature
+ * @min_y: only pixels values > min_y contribute to awb measurement
+ * @max_csum: Chrominance sum maximum value, only consider pixels with Cb+Cr,
+ *	      smaller than threshold for awb measurements
+ * @min_c: Chrominance minimum value, only consider pixels with Cb/Cr
+ *	   each greater than threshold value for awb measurements
+ * @frames: number of frames - 1 used for mean value calculation
+ *	    (ucFrames=0 means 1 Frame)
+ * @awb_ref_cr: reference Cr value for AWB regulation, target for AWB
+ * @awb_ref_cb: reference Cb value for AWB regulation, target for AWB
+ */
+struct rkisp1_cif_isp_awb_meas_config {
+	/*
+	 * Note: currently the h and v offsets are mapped to grid offsets
+	 */
+	struct rkisp1_cif_isp_window awb_wnd;
+	__u32 awb_mode;
+	__u8 max_y;
+	__u8 min_y;
+	__u8 max_csum;
+	__u8 min_c;
+	__u8 frames;
+	__u8 awb_ref_cr;
+	__u8 awb_ref_cb;
+	__u8 enable_ymax_cmp;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_awb_gain_config - Configuration used by auto white balance gain
+ *
+ * out_data_x = ( AWB_GEAIN_X * in_data + 128) >> 8
+ */
+struct rkisp1_cif_isp_awb_gain_config {
+	__u16 gain_red;
+	__u16 gain_green_r;
+	__u16 gain_blue;
+	__u16 gain_green_b;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_flt_config - Configuration used by ISP filtering
+ *
+ * @mode: ISP_FILT_MODE register fields (from enum rkisp1_cif_isp_flt_mode)
+ * @grn_stage1: ISP_FILT_MODE register fields
+ * @chr_h_mode: ISP_FILT_MODE register fields
+ * @chr_v_mode: ISP_FILT_MODE register fields
+ *
+ * refer to REF_01 for details.
+ */
+
+struct rkisp1_cif_isp_flt_config {
+	__u32 mode;
+	__u8 grn_stage1;
+	__u8 chr_h_mode;
+	__u8 chr_v_mode;
+	__u32 thresh_bl0;
+	__u32 thresh_bl1;
+	__u32 thresh_sh0;
+	__u32 thresh_sh1;
+	__u32 lum_weight;
+	__u32 fac_sh1;
+	__u32 fac_sh0;
+	__u32 fac_mid;
+	__u32 fac_bl0;
+	__u32 fac_bl1;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_bdm_config - Configuration used by Bayer DeMosaic
+ *
+ * @demosaic_th: threshod for bayer demosaicing texture detection
+ */
+struct rkisp1_cif_isp_bdm_config {
+	__u8 demosaic_th;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_ctk_config - Configuration used by Cross Talk correction
+ *
+ * @coeff: color correction matrix
+ * @ct_offset_b: offset for the crosstalk correction matrix
+ */
+struct rkisp1_cif_isp_ctk_config {
+	__u16 coeff0;
+	__u16 coeff1;
+	__u16 coeff2;
+	__u16 coeff3;
+	__u16 coeff4;
+	__u16 coeff5;
+	__u16 coeff6;
+	__u16 coeff7;
+	__u16 coeff8;
+	__u16 ct_offset_r;
+	__u16 ct_offset_g;
+	__u16 ct_offset_b;
+} __packed;
+
+enum rkisp1_cif_isp_goc_mode {
+	RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC,
+	RKISP1_CIF_ISP_GOC_MODE_EQUIDISTANT
+};
+
+/**
+ * struct rkisp1_cif_isp_goc_config - Configuration used by Gamma Out correction
+ *
+ * @mode: goc mode (from enum rkisp1_cif_isp_goc_mode)
+ * @gamma_y: gamma out curve y-axis for all color components
+ */
+struct rkisp1_cif_isp_goc_config {
+	__u32 mode;
+	__u16 gamma_y[RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES];
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_hst_config - Configuration used by Histogram
+ *
+ * @mode: histogram mode (from enum rkisp1_cif_isp_histogram_mode)
+ * @histogram_predivider: process every stepsize pixel, all other pixels are
+ *			  skipped
+ * @meas_window: coordinates of the measure window
+ * @hist_weight: weighting factor for sub-windows
+ */
+struct rkisp1_cif_isp_hst_config {
+	__u32 mode;
+	__u8 histogram_predivider;
+	struct rkisp1_cif_isp_window meas_window;
+	__u8 hist_weight[RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE];
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_aec_config - Configuration used by Auto Exposure Control
+ *
+ * @mode: Exposure measure mode (from enum rkisp1_cif_isp_exp_meas_mode)
+ * @autostop: stop mode (from enum rkisp1_cif_isp_exp_ctrl_autostop)
+ * @meas_window: coordinates of the measure window
+ */
+struct rkisp1_cif_isp_aec_config {
+	__u32 mode;
+	__u32 autostop;
+	struct rkisp1_cif_isp_window meas_window;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_afc_config - Configuration used by Auto Focus Control
+ *
+ * @num_afm_win: max RKISP1_CIF_ISP_AFM_MAX_WINDOWS
+ * @afm_win: coordinates of the meas window
+ * @thres: threshold used for minimizing the influence of noise
+ * @var_shift: the number of bits for the shift operation at the end of the
+ *	       calculation chain.
+ */
+struct rkisp1_cif_isp_afc_config {
+	__u8 num_afm_win;
+	struct rkisp1_cif_isp_window afm_win[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
+	__u32 thres;
+	__u32 var_shift;
+} __packed;
+
+/**
+ * enum rkisp1_cif_isp_dpf_gain_usage - dpf gain usage
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED: don't use any gains in preprocessing stage
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS: use only the noise function gains from
+ *				    registers DPF_NF_GAIN_R, ...
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS:  use only the gains from LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS: use the noise function gains and the
+ *					gains from LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS: use only the gains from AWB module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS: use the gains from AWB and LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX: upper border (only for an internal evaluation)
+ */
+enum rkisp1_cif_isp_dpf_gain_usage {
+	RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED,
+	RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS,
+	RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS,
+	RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS,
+	RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS,
+	RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS,
+	RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_rb_filtersize - Red and blue filter sizes
+ * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9: red and blue filter kernel size 13x9
+ *				   (means 7x5 active pixel)
+ * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9: red and blue filter kernel size 9x9
+ *				   (means 5x5 active pixel)
+ */
+enum rkisp1_cif_isp_dpf_rb_filtersize {
+	RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9,
+	RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9,
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_nll_scale_mode - dpf noise level scale mode
+ * @RKISP1_CIF_ISP_NLL_SCALE_LINEAR: use a linear scaling
+ * @RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC: use a logarithmic scaling
+ */
+enum rkisp1_cif_isp_dpf_nll_scale_mode {
+	RKISP1_CIF_ISP_NLL_SCALE_LINEAR,
+	RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC,
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_nll - Noise level lookup
+ *
+ * @coeff: Noise level Lookup coefficient
+ * @scale_mode: dpf noise level scale mode (from enum rkisp1_cif_isp_dpf_nll_scale_mode)
+ */
+struct rkisp1_cif_isp_dpf_nll {
+	__u16 coeff[RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS];
+	__u32 scale_mode;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_rb_flt - Red blue filter config
+ *
+ * @fltsize: The filter size for the red and blue pixels
+ *	     (from enum rkisp1_cif_isp_dpf_rb_filtersize)
+ * @spatial_coeff: Spatial weights
+ * @r_enable: enable filter processing for red pixels
+ * @b_enable: enable filter processing for blue pixels
+ */
+struct rkisp1_cif_isp_dpf_rb_flt {
+	__u32 fltsize;
+	__u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
+	__u8 r_enable;
+	__u8 b_enable;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_g_flt - Green filter Configuration
+ *
+ * @spatial_coeff: Spatial weights
+ * @gr_enable: enable filter processing for green pixels in green/red lines
+ * @gb_enable: enable filter processing for green pixels in green/blue lines
+ */
+struct rkisp1_cif_isp_dpf_g_flt {
+	__u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
+	__u8 gr_enable;
+	__u8 gb_enable;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_gain - Noise function Configuration
+ *
+ * @mode: dpf gain usage  (from enum rkisp1_cif_isp_dpf_gain_usage)
+ * @nf_r_gain: Noise function Gain that replaces the AWB gain for red pixels
+ * @nf_b_gain: Noise function Gain that replaces the AWB gain for blue pixels
+ * @nf_gr_gain: Noise function Gain that replaces the AWB gain
+ *		for green pixels in a red line
+ * @nf_gb_gain: Noise function Gain that replaces the AWB gain
+ *		for green pixels in a blue line
+ */
+struct rkisp1_cif_isp_dpf_gain {
+	__u32 mode;
+	__u16 nf_r_gain;
+	__u16 nf_b_gain;
+	__u16 nf_gr_gain;
+	__u16 nf_gb_gain;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_config - Configuration used by De-noising pre-filter
+ *
+ * @gain: noise function gain
+ * @g_flt: green filter config
+ * @rb_flt: red blue filter config
+ * @nll: noise level lookup
+ */
+struct rkisp1_cif_isp_dpf_config {
+	struct rkisp1_cif_isp_dpf_gain gain;
+	struct rkisp1_cif_isp_dpf_g_flt g_flt;
+	struct rkisp1_cif_isp_dpf_rb_flt rb_flt;
+	struct rkisp1_cif_isp_dpf_nll nll;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_strength_config - strength of the filter
+ *
+ * @r: filter strength of the RED filter
+ * @g: filter strength of the GREEN filter
+ * @b: filter strength of the BLUE filter
+ */
+struct rkisp1_cif_isp_dpf_strength_config {
+	__u8 r;
+	__u8 g;
+	__u8 b;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_isp_other_cfg - Parameters for some blocks in rockchip isp1
+ *
+ * @dpcc_config: Defect Pixel Cluster Correction config
+ * @bls_config: Black Level Subtraction config
+ * @sdg_config: sensor degamma config
+ * @lsc_config: Lens Shade config
+ * @awb_gain_config: Auto White balance gain config
+ * @flt_config: filter config
+ * @bdm_config: demosaic config
+ * @ctk_config: cross talk config
+ * @goc_config: gamma out config
+ * @bls_config: black level subtraction config
+ * @dpf_config: De-noising pre-filter config
+ * @dpf_strength_config: dpf strength config
+ * @cproc_config: color process config
+ * @ie_config: image effects config
+ */
+struct rkisp1_cif_isp_isp_other_cfg {
+	struct rkisp1_cif_isp_dpcc_config dpcc_config;
+	struct rkisp1_cif_isp_bls_config bls_config;
+	struct rkisp1_cif_isp_sdg_config sdg_config;
+	struct rkisp1_cif_isp_lsc_config lsc_config;
+	struct rkisp1_cif_isp_awb_gain_config awb_gain_config;
+	struct rkisp1_cif_isp_flt_config flt_config;
+	struct rkisp1_cif_isp_bdm_config bdm_config;
+	struct rkisp1_cif_isp_ctk_config ctk_config;
+	struct rkisp1_cif_isp_goc_config goc_config;
+	struct rkisp1_cif_isp_dpf_config dpf_config;
+	struct rkisp1_cif_isp_dpf_strength_config dpf_strength_config;
+	struct rkisp1_cif_isp_cproc_config cproc_config;
+	struct rkisp1_cif_isp_ie_config ie_config;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_isp_meas_cfg - Rockchip ISP1 Measure Parameters
+ *
+ * @awb_meas_config: auto white balance config
+ * @hst_config: histogram config
+ * @aec_config: auto exposure config
+ * @afc_config: auto focus config
+ */
+struct rkisp1_cif_isp_isp_meas_cfg {
+	struct rkisp1_cif_isp_awb_meas_config awb_meas_config;
+	struct rkisp1_cif_isp_hst_config hst_config;
+	struct rkisp1_cif_isp_aec_config aec_config;
+	struct rkisp1_cif_isp_afc_config afc_config;
+} __packed;
+
+/**
+ * struct rkisp1_params_cfg - Rockchip ISP1 Input Parameters Meta Data
+ *
+ * @module_en_update: mask the enable bits of which module should be updated
+ * @module_ens: mask the enable value of each module, only update the module
+ *		which correspond bit was set in module_en_update
+ * @module_cfg_update: mask the config bits of which module should be updated
+ * @meas: measurement config
+ * @others: other config
+ */
+struct rkisp1_params_cfg {
+	__u32 module_en_update;
+	__u32 module_ens;
+	__u32 module_cfg_update;
+
+	struct rkisp1_cif_isp_isp_meas_cfg meas;
+	struct rkisp1_cif_isp_isp_other_cfg others;
+} __packed;
+
+/*---------- PART2: Measurement Statistics ------------*/
+
+/**
+ * struct rkisp1_cif_isp_awb_meas - AWB measured values
+ *
+ * @cnt: White pixel count, number of "white pixels" found during last
+ *	 measurement
+ * @mean_y_or_g: Mean value of Y within window and frames,
+ *		 Green if RGB is selected.
+ * @mean_cb_or_b: Mean value of Cb within window and frames,
+ *		  Blue if RGB is selected.
+ * @mean_cr_or_r: Mean value of Cr within window and frames,
+ *		  Red if RGB is selected.
+ */
+struct rkisp1_cif_isp_awb_meas {
+	__u32 cnt;
+	__u8 mean_y_or_g;
+	__u8 mean_cb_or_b;
+	__u8 mean_cr_or_r;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_awb_stat - statistics automatic white balance data
+ *
+ * @awb_mean: Mean measured data
+ */
+struct rkisp1_cif_isp_awb_stat {
+	struct rkisp1_cif_isp_awb_meas awb_mean[RKISP1_CIF_ISP_AWB_MAX_GRID];
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_bls_meas_val - BLS measured values
+ *
+ * @meas_r: Mean measured value for Bayer pattern R
+ * @meas_gr: Mean measured value for Bayer pattern Gr
+ * @meas_gb: Mean measured value for Bayer pattern Gb
+ * @meas_b: Mean measured value for Bayer pattern B
+ */
+struct rkisp1_cif_isp_bls_meas_val {
+	__u16 meas_r;
+	__u16 meas_gr;
+	__u16 meas_gb;
+	__u16 meas_b;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_ae_stat - statistics auto exposure data
+ *
+ * @exp_mean: Mean luminance value of block xx
+ * @bls_val:  BLS measured values
+ *
+ * Image is divided into 5x5 blocks.
+ */
+struct rkisp1_cif_isp_ae_stat {
+	__u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
+	struct rkisp1_cif_isp_bls_meas_val bls_val;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_af_meas_val - AF measured values
+ *
+ * @sum: sharpness, refer to REF_01 for definition
+ * @lum: luminance, refer to REF_01 for definition
+ */
+struct rkisp1_cif_isp_af_meas_val {
+	__u32 sum;
+	__u32 lum;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_af_stat - statistics auto focus data
+ *
+ * @window: AF measured value of window x
+ *
+ * The module measures the sharpness in 3 windows of selectable size via
+ * register settings(ISP_AFM_*_A/B/C)
+ */
+struct rkisp1_cif_isp_af_stat {
+	struct rkisp1_cif_isp_af_meas_val window[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_hist_stat - statistics histogram data
+ *
+ * @hist_bins: measured bin counters
+ *
+ * Measurement window divided into 25 sub-windows, set
+ * with ISP_HIST_XXX
+ */
+struct rkisp1_cif_isp_hist_stat {
+	__u16 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
+} __packed;
+
+/**
+ * struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Data
+ *
+ * @rkisp1_cif_isp_awb_stat: statistics data for automatic white balance
+ * @rkisp1_cif_isp_ae_stat: statistics data for auto exposure
+ * @rkisp1_cif_isp_af_stat: statistics data for auto focus
+ * @rkisp1_cif_isp_hist_stat: statistics histogram data
+ */
+struct rkisp1_cif_isp_stat {
+	struct rkisp1_cif_isp_awb_stat awb;
+	struct rkisp1_cif_isp_ae_stat ae;
+	struct rkisp1_cif_isp_af_stat af;
+	struct rkisp1_cif_isp_hist_stat hist;
+} __packed;
+
+/**
+ * struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Meta Data
+ *
+ * @meas_type: measurement types (RKISP1_CIF_ISP_STAT_ definitions)
+ * @frame_id: frame ID for sync
+ * @params: statistics data
+ */
+struct rkisp1_stat_buffer {
+	__u32 meas_type;
+	__u32 frame_id;
+	struct rkisp1_cif_isp_stat params;
+} __packed;
+
+#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index 6945dc7..ce497d0 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -7,6 +7,7 @@
  * Copyright (C) 2018 Bootlin
  */
 
+#include <linux/delay.h>
 #include <linux/types.h>
 
 #include <media/videobuf2-dma-contig.h>
@@ -220,6 +221,23 @@ static void cedrus_h265_pred_weight_write(struct cedrus_dev *dev,
 	}
 }
 
+static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
+{
+	int count = 0;
+
+	while (count < num) {
+		int tmp = min(num - count, 32);
+
+		cedrus_write(dev, VE_DEC_H265_TRIGGER,
+			     VE_DEC_H265_TRIGGER_FLUSH_BITS |
+			     VE_DEC_H265_TRIGGER_TYPE_N_BITS(tmp));
+		while (cedrus_read(dev, VE_DEC_H265_STATUS) & VE_DEC_H265_STATUS_VLD_BUSY)
+			udelay(1);
+
+		count += tmp;
+	}
+}
+
 static void cedrus_h265_setup(struct cedrus_ctx *ctx,
 			      struct cedrus_run *run)
 {
@@ -280,10 +298,9 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
 
 	/* Source offset and length in bits. */
 
-	reg = slice_params->data_bit_offset;
-	cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, reg);
+	cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
 
-	reg = slice_params->bit_size - slice_params->data_bit_offset;
+	reg = slice_params->bit_size;
 	cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
 
 	/* Source beginning and end addresses. */
@@ -316,6 +333,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
 	/* Initialize bitstream access. */
 	cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
 
+	cedrus_h265_skip_bits(dev, slice_params->data_bit_offset);
+
 	/* Bitstream parameters. */
 
 	reg = VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(slice_params->nal_unit_type) |
@@ -332,6 +351,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
 	      VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_luma_coding_block_size) |
 	      VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_luma_coding_block_size_minus3) |
 	      VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(sps->bit_depth_chroma_minus8) |
+	      VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(sps->bit_depth_luma_minus8) |
 	      VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(sps->chroma_format_idc);
 
 	reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index 7beb03d..66b152f 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -424,6 +424,7 @@
 
 #define VE_DEC_H265_TRIGGER			(VE_ENGINE_DEC_H265 + 0x34)
 
+#define VE_DEC_H265_TRIGGER_TYPE_N_BITS(x)	(((x) & 0x3f) << 8)
 #define VE_DEC_H265_TRIGGER_STCD_VC1		(0x02 << 4)
 #define VE_DEC_H265_TRIGGER_STCD_AVS		(0x01 << 4)
 #define VE_DEC_H265_TRIGGER_STCD_HEVC		(0x00 << 4)
diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig
index ba49ea5..0dc78af 100644
--- a/drivers/staging/media/tegra-vde/Kconfig
+++ b/drivers/staging/media/tegra-vde/Kconfig
@@ -3,7 +3,7 @@
 	tristate "NVIDIA Tegra Video Decoder Engine driver"
 	depends on ARCH_TEGRA || COMPILE_TEST
 	select DMA_SHARED_BUFFER
-	select IOMMU_IOVA if (IOMMU_SUPPORT || COMPILE_TEST)
+	select IOMMU_IOVA
 	select SRAM
 	help
 	    Say Y here to enable support for the NVIDIA Tegra video decoder
diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c
index 3466dad..e18fd48 100644
--- a/drivers/staging/media/tegra-vde/vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -1150,8 +1150,7 @@ static int tegra_vde_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int tegra_vde_pm_suspend(struct device *dev)
+static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
 {
 	struct tegra_vde *vde = dev_get_drvdata(dev);
 	int err;
@@ -1165,7 +1164,7 @@ static int tegra_vde_pm_suspend(struct device *dev)
 	return 0;
 }
 
-static int tegra_vde_pm_resume(struct device *dev)
+static __maybe_unused int tegra_vde_pm_resume(struct device *dev)
 {
 	struct tegra_vde *vde = dev_get_drvdata(dev);
 	int err;
@@ -1178,7 +1177,6 @@ static int tegra_vde_pm_resume(struct device *dev)
 
 	return 0;
 }
-#endif
 
 static const struct dev_pm_ops tegra_vde_pm_ops = {
 	SET_RUNTIME_PM_OPS(tegra_vde_runtime_suspend,
diff --git a/drivers/staging/media/tegra-vde/vde.h b/drivers/staging/media/tegra-vde/vde.h
index d369f14..5561291 100644
--- a/drivers/staging/media/tegra-vde/vde.h
+++ b/drivers/staging/media/tegra-vde/vde.h
@@ -10,11 +10,11 @@
 
 #include <linux/completion.h>
 #include <linux/dma-direction.h>
+#include <linux/iova.h>
 #include <linux/list.h>
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
 #include <linux/types.h>
-#include <linux/iova.h>
 
 struct clk;
 struct dma_buf;
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index 85ea5a4..20a99ec 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -2,7 +2,6 @@
 obj-$(CONFIG_MOST) += most_core.o
 most_core-y := core.o
 most_core-y += configfs.o
-ccflags-y += -I $(srctree)/drivers/staging/
 
 obj-$(CONFIG_MOST_CDEV)	+= cdev/
 obj-$(CONFIG_MOST_NET)	+= net/
diff --git a/drivers/staging/most/cdev/Makefile b/drivers/staging/most/cdev/Makefile
index 9f4a8b8..ef90cd7 100644
--- a/drivers/staging/most/cdev/Makefile
+++ b/drivers/staging/most/cdev/Makefile
@@ -2,4 +2,3 @@
 obj-$(CONFIG_MOST_CDEV) += most_cdev.o
 
 most_cdev-objs := cdev.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index f880147..71943d1 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -16,7 +16,8 @@
 #include <linux/kfifo.h>
 #include <linux/uaccess.h>
 #include <linux/idr.h>
-#include "most/core.h"
+
+#include "../most.h"
 
 #define CHRDEV_REGION_SIZE 50
 
@@ -25,7 +26,7 @@ static struct cdev_component {
 	struct ida minor_id;
 	unsigned int major;
 	struct class *class;
-	struct core_component cc;
+	struct most_component cc;
 } comp;
 
 struct comp_channel {
diff --git a/drivers/staging/most/configfs.c b/drivers/staging/most/configfs.c
index 34a9fb5..9a96122 100644
--- a/drivers/staging/most/configfs.c
+++ b/drivers/staging/most/configfs.c
@@ -10,7 +10,10 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/configfs.h>
-#include <most/core.h>
+
+#include "most.h"
+
+#define MAX_STRING_SIZE 80
 
 struct mdev_link {
 	struct config_item item;
@@ -22,13 +25,13 @@ struct mdev_link {
 	u16 subbuffer_size;
 	u16 packets_per_xact;
 	u16 dbr_size;
-	char datatype[PAGE_SIZE];
-	char direction[PAGE_SIZE];
-	char name[PAGE_SIZE];
-	char device[PAGE_SIZE];
-	char channel[PAGE_SIZE];
-	char comp[PAGE_SIZE];
-	char comp_params[PAGE_SIZE];
+	char datatype[MAX_STRING_SIZE];
+	char direction[MAX_STRING_SIZE];
+	char name[MAX_STRING_SIZE];
+	char device[MAX_STRING_SIZE];
+	char channel[MAX_STRING_SIZE];
+	char comp[MAX_STRING_SIZE];
+	char comp_params[MAX_STRING_SIZE];
 };
 
 static struct list_head mdev_link_list;
@@ -125,6 +128,8 @@ static ssize_t mdev_link_create_link_store(struct config_item *item,
 		return ret;
 	list_add_tail(&mdev_link->list, &mdev_link_list);
 	mdev_link->create_link = tmp;
+	mdev_link->destroy_link = false;
+
 	return count;
 }
 
@@ -140,13 +145,16 @@ static ssize_t mdev_link_destroy_link_store(struct config_item *item,
 		return ret;
 	if (!tmp)
 		return count;
-	mdev_link->destroy_link = tmp;
+
 	ret = most_remove_link(mdev_link->device, mdev_link->channel,
 			       mdev_link->comp);
 	if (ret)
 		return ret;
 	if (!list_empty(&mdev_link_list))
 		list_del(&mdev_link->list);
+
+	mdev_link->destroy_link = tmp;
+
 	return count;
 }
 
@@ -197,7 +205,7 @@ static ssize_t mdev_link_device_store(struct config_item *item,
 {
 	struct mdev_link *mdev_link = to_mdev_link(item);
 
-	strcpy(mdev_link->device, page);
+	strlcpy(mdev_link->device, page, sizeof(mdev_link->device));
 	strim(mdev_link->device);
 	return count;
 }
@@ -212,7 +220,7 @@ static ssize_t mdev_link_channel_store(struct config_item *item,
 {
 	struct mdev_link *mdev_link = to_mdev_link(item);
 
-	strcpy(mdev_link->channel, page);
+	strlcpy(mdev_link->channel, page, sizeof(mdev_link->channel));
 	strim(mdev_link->channel);
 	return count;
 }
@@ -227,7 +235,8 @@ static ssize_t mdev_link_comp_store(struct config_item *item,
 {
 	struct mdev_link *mdev_link = to_mdev_link(item);
 
-	strcpy(mdev_link->comp, page);
+	strlcpy(mdev_link->comp, page, sizeof(mdev_link->comp));
+	strim(mdev_link->comp);
 	return count;
 }
 
@@ -242,7 +251,8 @@ static ssize_t mdev_link_comp_params_store(struct config_item *item,
 {
 	struct mdev_link *mdev_link = to_mdev_link(item);
 
-	strcpy(mdev_link->comp_params, page);
+	strlcpy(mdev_link->comp_params, page, sizeof(mdev_link->comp_params));
+	strim(mdev_link->comp_params);
 	return count;
 }
 
@@ -373,13 +383,20 @@ static void mdev_link_release(struct config_item *item)
 	struct mdev_link *mdev_link = to_mdev_link(item);
 	int ret;
 
-	if (!list_empty(&mdev_link_list)) {
-		ret = most_remove_link(mdev_link->device, mdev_link->channel,
-				       mdev_link->comp);
-		if (ret && (ret != -ENODEV))
-			pr_err("Removing link failed.\n");
-		list_del(&mdev_link->list);
+	if (mdev_link->destroy_link)
+		goto free_item;
+
+	ret = most_remove_link(mdev_link->device, mdev_link->channel,
+			       mdev_link->comp);
+	if (ret) {
+		pr_err("Removing link failed.\n");
+		goto free_item;
 	}
+
+	if (!list_empty(&mdev_link_list))
+		list_del(&mdev_link->list);
+
+free_item:
 	kfree(to_mdev_link(item));
 }
 
@@ -630,7 +647,7 @@ static struct most_sound most_sound_subsys = {
 	},
 };
 
-int most_register_configfs_subsys(struct core_component *c)
+int most_register_configfs_subsys(struct most_component *c)
 {
 	int ret;
 
@@ -674,7 +691,7 @@ void most_interface_register_notify(const char *mdev)
 		most_cfg_complete("sound");
 }
 
-void most_deregister_configfs_subsys(struct core_component *c)
+void most_deregister_configfs_subsys(struct most_component *c)
 {
 	if (!strcmp(c->name, "cdev"))
 		configfs_unregister_subsystem(&most_cdev.subsys);
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 51a6b41..0c4ae69 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -2,10 +2,9 @@
 /*
  * core.c - Implementation of core module of MOST Linux driver stack
  *
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
@@ -21,25 +20,18 @@
 #include <linux/kthread.h>
 #include <linux/dma-mapping.h>
 #include <linux/idr.h>
-#include <most/core.h>
+
+#include "most.h"
 
 #define MAX_CHANNELS	64
 #define STRING_SIZE	80
 
 static struct ida mdev_id;
 static int dummy_num_buffers;
-
-static struct mostcore {
-	struct device dev;
-	struct device_driver drv;
-	struct bus_type bus;
-	struct list_head comp_list;
-} mc;
-
-#define to_driver(d) container_of(d, struct mostcore, drv)
+static struct list_head comp_list;
 
 struct pipe {
-	struct core_component *comp;
+	struct most_component *comp;
 	int refs;
 	int num_buffers;
 };
@@ -151,7 +143,7 @@ static void flush_channel_fifos(struct most_channel *c)
 	spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
 
 	if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
-		pr_info("WARN: fifo | trash fifo not empty\n");
+		dev_warn(&c->dev, "Channel or trash fifo not empty\n");
 }
 
 /**
@@ -402,7 +394,7 @@ static ssize_t description_show(struct device *dev,
 				struct device_attribute *attr,
 				char *buf)
 {
-	struct most_interface *iface = to_most_interface(dev);
+	struct most_interface *iface = dev_get_drvdata(dev);
 
 	return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
 }
@@ -411,7 +403,7 @@ static ssize_t interface_show(struct device *dev,
 			      struct device_attribute *attr,
 			      char *buf)
 {
-	struct most_interface *iface = to_most_interface(dev);
+	struct most_interface *iface = dev_get_drvdata(dev);
 
 	switch (iface->interface) {
 	case ITYPE_LOOPBACK:
@@ -454,11 +446,11 @@ static const struct attribute_group *interface_attr_groups[] = {
 	NULL,
 };
 
-static struct core_component *match_component(char *name)
+static struct most_component *match_component(char *name)
 {
-	struct core_component *comp;
+	struct most_component *comp;
 
-	list_for_each_entry(comp, &mc.comp_list, list) {
+	list_for_each_entry(comp, &comp_list, list) {
 		if (!strcmp(comp->name, name))
 			return comp;
 	}
@@ -476,7 +468,7 @@ static int print_links(struct device *dev, void *data)
 	int offs = d->offs;
 	char *buf = d->buf;
 	struct most_channel *c;
-	struct most_interface *iface = to_most_interface(dev);
+	struct most_interface *iface = dev_get_drvdata(dev);
 
 	list_for_each_entry(c, &iface->p->channel_list, list) {
 		if (c->pipe0.comp) {
@@ -484,7 +476,7 @@ static int print_links(struct device *dev, void *data)
 					 PAGE_SIZE - offs,
 					 "%s:%s:%s\n",
 					 c->pipe0.comp->name,
-					 dev_name(&iface->dev),
+					 dev_name(iface->dev),
 					 dev_name(&c->dev));
 		}
 		if (c->pipe1.comp) {
@@ -492,7 +484,7 @@ static int print_links(struct device *dev, void *data)
 					 PAGE_SIZE - offs,
 					 "%s:%s:%s\n",
 					 c->pipe1.comp->name,
-					 dev_name(&iface->dev),
+					 dev_name(iface->dev),
 					 dev_name(&c->dev));
 		}
 	}
@@ -500,20 +492,33 @@ static int print_links(struct device *dev, void *data)
 	return 0;
 }
 
+static int most_match(struct device *dev, struct device_driver *drv)
+{
+	if (!strcmp(dev_name(dev), "most"))
+		return 0;
+	else
+		return 1;
+}
+
+static struct bus_type mostbus = {
+	.name = "most",
+	.match = most_match,
+};
+
 static ssize_t links_show(struct device_driver *drv, char *buf)
 {
 	struct show_links_data d = { .buf = buf };
 
-	bus_for_each_dev(&mc.bus, NULL, &d, print_links);
+	bus_for_each_dev(&mostbus, NULL, &d, print_links);
 	return d.offs;
 }
 
 static ssize_t components_show(struct device_driver *drv, char *buf)
 {
-	struct core_component *comp;
+	struct most_component *comp;
 	int offs = 0;
 
-	list_for_each_entry(comp, &mc.comp_list, list) {
+	list_for_each_entry(comp, &comp_list, list) {
 		offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
 				 comp->name);
 	}
@@ -531,10 +536,11 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
 	struct most_interface *iface;
 	struct most_channel *c, *tmp;
 
-	dev = bus_find_device_by_name(&mc.bus, NULL, mdev);
+	dev = bus_find_device_by_name(&mostbus, NULL, mdev);
 	if (!dev)
 		return NULL;
-	iface = to_most_interface(dev);
+	put_device(dev);
+	iface = dev_get_drvdata(dev);
 	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
 		if (!strcmp(dev_name(&c->dev), mdev_ch))
 			return c;
@@ -544,12 +550,12 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
 
 static
 inline int link_channel_to_component(struct most_channel *c,
-				     struct core_component *comp,
+				     struct most_component *comp,
 				     char *name,
 				     char *comp_param)
 {
 	int ret;
-	struct core_component **comp_ptr;
+	struct most_component **comp_ptr;
 
 	if (!c->pipe0.comp)
 		comp_ptr = &c->pipe0.comp;
@@ -623,7 +629,7 @@ int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
 	}
 
 	if (i == ARRAY_SIZE(ch_data_type))
-		pr_info("WARN: invalid attribute settings\n");
+		dev_warn(&c->dev, "Invalid attribute settings\n");
 	return 0;
 }
 
@@ -642,7 +648,7 @@ int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
 	} else if (!strcmp(buf, "tx")) {
 		c->cfg.direction = MOST_CH_TX;
 	} else {
-		pr_info("Invalid direction\n");
+		dev_err(&c->dev, "Invalid direction\n");
 		return -ENODATA;
 	}
 	return 0;
@@ -660,7 +666,7 @@ int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
 
 int most_cfg_complete(char *comp_name)
 {
-	struct core_component *comp;
+	struct most_component *comp;
 
 	comp = match_component(comp_name);
 	if (!comp)
@@ -673,7 +679,7 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
 		  char *comp_param)
 {
 	struct most_channel *c = get_channel(mdev, mdev_ch);
-	struct core_component *comp = match_component(comp_name);
+	struct most_component *comp = match_component(comp_name);
 
 	if (!c || !comp)
 		return -ENODEV;
@@ -684,7 +690,7 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
 int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
 {
 	struct most_channel *c;
-	struct core_component *comp;
+	struct most_component *comp;
 
 	comp = match_component(comp_name);
 	if (!comp)
@@ -722,13 +728,11 @@ static const struct attribute_group *mc_attr_groups[] = {
 	NULL,
 };
 
-static int most_match(struct device *dev, struct device_driver *drv)
-{
-	if (!strcmp(dev_name(dev), "most"))
-		return 0;
-	else
-		return 1;
-}
+static struct device_driver mostbus_driver = {
+	.name = "most_core",
+	.bus = &mostbus,
+	.groups = mc_attr_groups,
+};
 
 static inline void trash_mbo(struct mbo *mbo)
 {
@@ -795,7 +799,7 @@ static int hdm_enqueue_thread(void *data)
 		mutex_unlock(&c->nq_mutex);
 
 		if (unlikely(ret)) {
-			pr_err("hdm enqueue failed\n");
+			dev_err(&c->dev, "Buffer enqueue failed\n");
 			nq_hdm_mbo(mbo);
 			c->hdm_enqueue_task = NULL;
 			return 0;
@@ -922,7 +926,7 @@ static int arm_mbo_chain(struct most_channel *c, int dir,
 void most_submit_mbo(struct mbo *mbo)
 {
 	if (WARN_ONCE(!mbo || !mbo->context,
-		      "bad mbo or missing channel reference\n"))
+		      "Bad buffer or missing channel reference\n"))
 		return;
 
 	nq_hdm_mbo(mbo);
@@ -941,8 +945,6 @@ static void most_write_completion(struct mbo *mbo)
 	struct most_channel *c;
 
 	c = mbo->context;
-	if (mbo->status == MBO_E_INVAL)
-		pr_info("WARN: Tx MBO status: invalid\n");
 	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
 		trash_mbo(mbo);
 	else
@@ -950,7 +952,7 @@ static void most_write_completion(struct mbo *mbo)
 }
 
 int channel_has_mbo(struct most_interface *iface, int id,
-		    struct core_component *comp)
+		    struct most_component *comp)
 {
 	struct most_channel *c = iface->p->channel[id];
 	unsigned long flags;
@@ -981,7 +983,7 @@ EXPORT_SYMBOL_GPL(channel_has_mbo);
  * Returns a pointer to MBO on success or NULL otherwise.
  */
 struct mbo *most_get_mbo(struct most_interface *iface, int id,
-			 struct core_component *comp)
+			 struct most_component *comp)
 {
 	struct mbo *mbo;
 	struct most_channel *c;
@@ -1087,7 +1089,7 @@ static void most_read_completion(struct mbo *mbo)
  * Returns 0 on success or error code otherwise.
  */
 int most_start_channel(struct most_interface *iface, int id,
-		       struct core_component *comp)
+		       struct most_component *comp)
 {
 	int num_buffer;
 	int ret;
@@ -1101,14 +1103,14 @@ int most_start_channel(struct most_interface *iface, int id,
 		goto out; /* already started by another component */
 
 	if (!try_module_get(iface->mod)) {
-		pr_info("failed to acquire HDM lock\n");
+		dev_err(&c->dev, "Failed to acquire HDM lock\n");
 		mutex_unlock(&c->start_mutex);
 		return -ENOLCK;
 	}
 
 	c->cfg.extra_len = 0;
 	if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
-		pr_info("channel configuration failed. Go check settings...\n");
+		dev_err(&c->dev, "Channel configuration failed. Go check settings...\n");
 		ret = -EINVAL;
 		goto err_put_module;
 	}
@@ -1157,7 +1159,7 @@ EXPORT_SYMBOL_GPL(most_start_channel);
  * @comp: driver component
  */
 int most_stop_channel(struct most_interface *iface, int id,
-		      struct core_component *comp)
+		      struct most_component *comp)
 {
 	struct most_channel *c;
 
@@ -1182,8 +1184,8 @@ int most_stop_channel(struct most_interface *iface, int id,
 
 	c->is_poisoned = true;
 	if (c->iface->poison_channel(c->iface, c->channel_id)) {
-		pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
-		       c->iface->description);
+		dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id,
+			c->iface->description);
 		mutex_unlock(&c->start_mutex);
 		return -EAGAIN;
 	}
@@ -1192,7 +1194,7 @@ int most_stop_channel(struct most_interface *iface, int id,
 
 #ifdef CMPL_INTERRUPTIBLE
 	if (wait_for_completion_interruptible(&c->cleanup)) {
-		pr_info("Interrupted while clean up ch %d\n", c->channel_id);
+		dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id);
 		mutex_unlock(&c->start_mutex);
 		return -EINTR;
 	}
@@ -1215,14 +1217,13 @@ EXPORT_SYMBOL_GPL(most_stop_channel);
  * most_register_component - registers a driver component with the core
  * @comp: driver component
  */
-int most_register_component(struct core_component *comp)
+int most_register_component(struct most_component *comp)
 {
 	if (!comp) {
 		pr_err("Bad component\n");
 		return -EINVAL;
 	}
-	list_add_tail(&comp->list, &mc.comp_list);
-	pr_info("registered new core component %s\n", comp->name);
+	list_add_tail(&comp->list, &comp_list);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(most_register_component);
@@ -1231,9 +1232,9 @@ static int disconnect_channels(struct device *dev, void *data)
 {
 	struct most_interface *iface;
 	struct most_channel *c, *tmp;
-	struct core_component *comp = data;
+	struct most_component *comp = data;
 
-	iface = to_most_interface(dev);
+	iface = dev_get_drvdata(dev);
 	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
 		if (c->pipe0.comp == comp || c->pipe1.comp == comp)
 			comp->disconnect_channel(c->iface, c->channel_id);
@@ -1249,28 +1250,24 @@ static int disconnect_channels(struct device *dev, void *data)
  * most_deregister_component - deregisters a driver component with the core
  * @comp: driver component
  */
-int most_deregister_component(struct core_component *comp)
+int most_deregister_component(struct most_component *comp)
 {
 	if (!comp) {
 		pr_err("Bad component\n");
 		return -EINVAL;
 	}
 
-	bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
+	bus_for_each_dev(&mostbus, NULL, comp, disconnect_channels);
 	list_del(&comp->list);
-	pr_info("deregistering component %s\n", comp->name);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(most_deregister_component);
 
-static void release_interface(struct device *dev)
-{
-	pr_info("releasing interface dev %s...\n", dev_name(dev));
-}
-
 static void release_channel(struct device *dev)
 {
-	pr_info("releasing channel dev %s...\n", dev_name(dev));
+	struct most_channel *c = to_channel(dev);
+
+	kfree(c);
 }
 
 /**
@@ -1288,13 +1285,13 @@ int most_register_interface(struct most_interface *iface)
 
 	if (!iface || !iface->enqueue || !iface->configure ||
 	    !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
-		pr_err("Bad interface or channel overflow\n");
+		dev_err(iface->dev, "Bad interface or channel overflow\n");
 		return -EINVAL;
 	}
 
 	id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
 	if (id < 0) {
-		pr_info("Failed to alloc mdev ID\n");
+		dev_err(iface->dev, "Failed to allocate device ID\n");
 		return id;
 	}
 
@@ -1307,14 +1304,13 @@ int most_register_interface(struct most_interface *iface)
 	INIT_LIST_HEAD(&iface->p->channel_list);
 	iface->p->dev_id = id;
 	strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
-	iface->dev.init_name = iface->p->name;
-	iface->dev.bus = &mc.bus;
-	iface->dev.parent = &mc.dev;
-	iface->dev.groups = interface_attr_groups;
-	iface->dev.release = release_interface;
-	if (device_register(&iface->dev)) {
-		pr_err("registering iface->dev failed\n");
+	iface->dev->bus = &mostbus;
+	iface->dev->groups = interface_attr_groups;
+	dev_set_drvdata(iface->dev, iface);
+	if (device_register(iface->dev)) {
+		dev_err(iface->dev, "Failed to register interface device\n");
 		kfree(iface->p);
+		put_device(iface->dev);
 		ida_simple_remove(&mdev_id, id);
 		return -ENOMEM;
 	}
@@ -1330,7 +1326,7 @@ int most_register_interface(struct most_interface *iface)
 		else
 			snprintf(c->name, STRING_SIZE, "%s", name_suffix);
 		c->dev.init_name = c->name;
-		c->dev.parent = &iface->dev;
+		c->dev.parent = iface->dev;
 		c->dev.groups = channel_attr_groups;
 		c->dev.release = release_channel;
 		iface->p->channel[i] = c;
@@ -1356,26 +1352,23 @@ int most_register_interface(struct most_interface *iface)
 		mutex_init(&c->nq_mutex);
 		list_add_tail(&c->list, &iface->p->channel_list);
 		if (device_register(&c->dev)) {
-			pr_err("registering c->dev failed\n");
+			dev_err(&c->dev, "Failed to register channel device\n");
 			goto err_free_most_channel;
 		}
 	}
-	pr_info("registered new device mdev%d (%s)\n",
-		id, iface->description);
 	most_interface_register_notify(iface->description);
 	return 0;
 
 err_free_most_channel:
-	kfree(c);
+	put_device(&c->dev);
 
 err_free_resources:
 	while (i > 0) {
 		c = iface->p->channel[--i];
 		device_unregister(&c->dev);
-		kfree(c);
 	}
 	kfree(iface->p);
-	device_unregister(&iface->dev);
+	device_unregister(iface->dev);
 	ida_simple_remove(&mdev_id, id);
 	return -ENOMEM;
 }
@@ -1393,8 +1386,6 @@ void most_deregister_interface(struct most_interface *iface)
 	int i;
 	struct most_channel *c;
 
-	pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev),
-		iface->description);
 	for (i = 0; i < iface->num_channels; i++) {
 		c = iface->p->channel[i];
 		if (c->pipe0.comp)
@@ -1407,12 +1398,11 @@ void most_deregister_interface(struct most_interface *iface)
 		c->pipe1.comp = NULL;
 		list_del(&c->list);
 		device_unregister(&c->dev);
-		kfree(c);
 	}
 
 	ida_simple_remove(&mdev_id, iface->p->dev_id);
 	kfree(iface->p);
-	device_unregister(&iface->dev);
+	device_unregister(iface->dev);
 }
 EXPORT_SYMBOL_GPL(most_deregister_interface);
 
@@ -1462,57 +1452,35 @@ void most_resume_enqueue(struct most_interface *iface, int id)
 }
 EXPORT_SYMBOL_GPL(most_resume_enqueue);
 
-static void release_most_sub(struct device *dev)
-{
-	pr_info("releasing most_subsystem\n");
-}
-
 static int __init most_init(void)
 {
 	int err;
 
-	pr_info("init()\n");
-	INIT_LIST_HEAD(&mc.comp_list);
+	INIT_LIST_HEAD(&comp_list);
 	ida_init(&mdev_id);
 
-	mc.bus.name = "most",
-	mc.bus.match = most_match,
-	mc.drv.name = "most_core",
-	mc.drv.bus = &mc.bus,
-	mc.drv.groups = mc_attr_groups;
-
-	err = bus_register(&mc.bus);
+	err = bus_register(&mostbus);
 	if (err) {
-		pr_info("Cannot register most bus\n");
+		pr_err("Failed to register most bus\n");
 		return err;
 	}
-	err = driver_register(&mc.drv);
+	err = driver_register(&mostbus_driver);
 	if (err) {
-		pr_info("Cannot register core driver\n");
+		pr_err("Failed to register core driver\n");
 		goto err_unregister_bus;
 	}
-	mc.dev.init_name = "most_bus";
-	mc.dev.release = release_most_sub;
-	if (device_register(&mc.dev)) {
-		err = -ENOMEM;
-		goto err_unregister_driver;
-	}
 	configfs_init();
 	return 0;
 
-err_unregister_driver:
-	driver_unregister(&mc.drv);
 err_unregister_bus:
-	bus_unregister(&mc.bus);
+	bus_unregister(&mostbus);
 	return err;
 }
 
 static void __exit most_exit(void)
 {
-	pr_info("exit core module\n");
-	device_unregister(&mc.dev);
-	driver_unregister(&mc.drv);
-	bus_unregister(&mc.bus);
+	driver_unregister(&mostbus_driver);
+	bus_unregister(&mostbus);
 	ida_destroy(&mdev_id);
 }
 
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/core.h
deleted file mode 100644
index 49859ae..0000000
--- a/drivers/staging/most/core.h
+++ /dev/null
@@ -1,339 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * most.h - API for component and adapter drivers
- *
- * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
- */
-
-#ifndef __MOST_CORE_H__
-#define __MOST_CORE_H__
-
-#include <linux/types.h>
-#include <linux/device.h>
-
-struct module;
-struct interface_private;
-
-/**
- * Interface type
- */
-enum most_interface_type {
-	ITYPE_LOOPBACK = 1,
-	ITYPE_I2C,
-	ITYPE_I2S,
-	ITYPE_TSI,
-	ITYPE_HBI,
-	ITYPE_MEDIALB_DIM,
-	ITYPE_MEDIALB_DIM2,
-	ITYPE_USB,
-	ITYPE_PCIE
-};
-
-/**
- * Channel direction.
- */
-enum most_channel_direction {
-	MOST_CH_RX = 1 << 0,
-	MOST_CH_TX = 1 << 1,
-};
-
-/**
- * Channel data type.
- */
-enum most_channel_data_type {
-	MOST_CH_CONTROL = 1 << 0,
-	MOST_CH_ASYNC = 1 << 1,
-	MOST_CH_ISOC = 1 << 2,
-	MOST_CH_SYNC = 1 << 5,
-};
-
-enum mbo_status_flags {
-	/* MBO was processed successfully (data was send or received )*/
-	MBO_SUCCESS = 0,
-	/* The MBO contains wrong or missing information.  */
-	MBO_E_INVAL,
-	/* MBO was completed as HDM Channel will be closed */
-	MBO_E_CLOSE,
-};
-
-/**
- * struct most_channel_capability - Channel capability
- * @direction: Supported channel directions.
- * The value is bitwise OR-combination of the values from the
- * enumeration most_channel_direction. Zero is allowed value and means
- * "channel may not be used".
- * @data_type: Supported channel data types.
- * The value is bitwise OR-combination of the values from the
- * enumeration most_channel_data_type. Zero is allowed value and means
- * "channel may not be used".
- * @num_buffers_packet: Maximum number of buffers supported by this channel
- * for packet data types (Async,Control,QoS)
- * @buffer_size_packet: Maximum buffer size supported by this channel
- * for packet data types (Async,Control,QoS)
- * @num_buffers_streaming: Maximum number of buffers supported by this channel
- * for streaming data types (Sync,AV Packetized)
- * @buffer_size_streaming: Maximum buffer size supported by this channel
- * for streaming data types (Sync,AV Packetized)
- * @name_suffix: Optional suffix providean by an HDM that is attached to the
- * regular channel name.
- *
- * Describes the capabilities of a MOST channel like supported Data Types
- * and directions. This information is provided by an HDM for the MostCore.
- *
- * The Core creates read only sysfs attribute files in
- * /sys/devices/most/mdev#/<channel>/ with the
- * following attributes:
- *	-available_directions
- *	-available_datatypes
- *	-number_of_packet_buffers
- *	-number_of_stream_buffers
- *	-size_of_packet_buffer
- *	-size_of_stream_buffer
- * where content of each file is a string with all supported properties of this
- * very channel attribute.
- */
-struct most_channel_capability {
-	u16 direction;
-	u16 data_type;
-	u16 num_buffers_packet;
-	u16 buffer_size_packet;
-	u16 num_buffers_streaming;
-	u16 buffer_size_streaming;
-	const char *name_suffix;
-};
-
-/**
- * struct most_channel_config - stores channel configuration
- * @direction: direction of the channel
- * @data_type: data type travelling over this channel
- * @num_buffers: number of buffers
- * @buffer_size: size of a buffer for AIM.
- * Buffer size may be cutted down by HDM in a configure callback
- * to match to a given interface and channel type.
- * @extra_len: additional buffer space for internal HDM purposes like padding.
- * May be set by HDM in a configure callback if needed.
- * @subbuffer_size: size of a subbuffer
- * @packets_per_xact: number of MOST frames that are packet inside one USB
- *		      packet. This is USB specific
- *
- * Describes the configuration for a MOST channel. This information is
- * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
- * parameter of the "configure" function call.
- */
-struct most_channel_config {
-	enum most_channel_direction direction;
-	enum most_channel_data_type data_type;
-	u16 num_buffers;
-	u16 buffer_size;
-	u16 extra_len;
-	u16 subbuffer_size;
-	u16 packets_per_xact;
-	u16 dbr_size;
-};
-
-/*
- * struct mbo - MOST Buffer Object.
- * @context: context for core completion handler
- * @priv: private data for HDM
- *
- *	public: documented fields that are used for the communications
- *	between MostCore and HDMs
- *
- * @list: list head for use by the mbo's current owner
- * @ifp: (in) associated interface instance
- * @num_buffers_ptr: amount of pool buffers
- * @hdm_channel_id: (in) HDM channel instance
- * @virt_address: (in) kernel virtual address of the buffer
- * @bus_address: (in) bus address of the buffer
- * @buffer_length: (in) buffer payload length
- * @processed_length: (out) processed length
- * @status: (out) transfer status
- * @complete: (in) completion routine
- *
- * The core allocates and initializes the MBO.
- *
- * The HDM receives MBO for transfer from the core with the call to enqueue().
- * The HDM copies the data to- or from the buffer depending on configured
- * channel direction, set "processed_length" and "status" and completes
- * the transfer procedure by calling the completion routine.
- *
- * Finally, the MBO is being deallocated or recycled for further
- * transfers of the same or a different HDM.
- *
- * Directions of usage:
- * The core driver should never access any MBO fields (even if marked
- * as "public") while the MBO is owned by an HDM. The ownership starts with
- * the call of enqueue() and ends with the call of its complete() routine.
- *
- *					II.
- * Every HDM attached to the core driver _must_ ensure that it returns any MBO
- * it owns (due to a previous call to enqueue() by the core driver) before it
- * de-registers an interface or gets unloaded from the kernel. If this direction
- * is violated memory leaks will occur, since the core driver does _not_ track
- * MBOs it is currently not in control of.
- *
- */
-struct mbo {
-	void *context;
-	void *priv;
-	struct list_head list;
-	struct most_interface *ifp;
-	int *num_buffers_ptr;
-	u16 hdm_channel_id;
-	void *virt_address;
-	dma_addr_t bus_address;
-	u16 buffer_length;
-	u16 processed_length;
-	enum mbo_status_flags status;
-	void (*complete)(struct mbo *mbo);
-};
-
-/**
- * Interface instance description.
- *
- * Describes an interface of a MOST device the core driver is bound to.
- * This structure is allocated and initialized in the HDM. MostCore may not
- * modify this structure.
- *
- * @dev: the actual device
- * @mod: module
- * @interface Interface type. \sa most_interface_type.
- * @description PRELIMINARY.
- *   Unique description of the device instance from point of view of the
- *   interface in free text form (ASCII).
- *   It may be a hexadecimal presentation of the memory address for the MediaLB
- *   IP or USB device ID with USB properties for USB interface, etc.
- * @num_channels Number of channels and size of the channel_vector.
- * @channel_vector Properties of the channels.
- *   Array index represents channel ID by the driver.
- * @configure Callback to change data type for the channel of the
- *   interface instance. May be zero if the instance of the interface is not
- *   configurable. Parameter channel_config describes direction and data
- *   type for the channel, configured by the higher level. The content of
- * @enqueue Delivers MBO to the HDM for processing.
- *   After HDM completes Rx- or Tx- operation the processed MBO shall
- *   be returned back to the MostCore using completion routine.
- *   The reason to get the MBO delivered from the MostCore after the channel
- *   is poisoned is the re-opening of the channel by the application.
- *   In this case the HDM shall hold MBOs and service the channel as usual.
- *   The HDM must be able to hold at least one MBO for each channel.
- *   The callback returns a negative value on error, otherwise 0.
- * @poison_channel Informs HDM about closing the channel. The HDM shall
- *   cancel all transfers and synchronously or asynchronously return
- *   all enqueued for this channel MBOs using the completion routine.
- *   The callback returns a negative value on error, otherwise 0.
- * @request_netinfo: triggers retrieving of network info from the HDM by
- *   means of "Message exchange over MDP/MEP"
- *   The call of the function request_netinfo with the parameter on_netinfo as
- *   NULL prohibits use of the previously obtained function pointer.
- * @priv Private field used by mostcore to store context information.
- */
-struct most_interface {
-	struct device dev;
-	struct device *driver_dev;
-	struct module *mod;
-	enum most_interface_type interface;
-	const char *description;
-	unsigned int num_channels;
-	struct most_channel_capability *channel_vector;
-	void *(*dma_alloc)(struct mbo *mbo, u32 size);
-	void (*dma_free)(struct mbo *mbo, u32 size);
-	int (*configure)(struct most_interface *iface, int channel_idx,
-			 struct most_channel_config *channel_config);
-	int (*enqueue)(struct most_interface *iface, int channel_idx,
-		       struct mbo *mbo);
-	int (*poison_channel)(struct most_interface *iface, int channel_idx);
-	void (*request_netinfo)(struct most_interface *iface, int channel_idx,
-				void (*on_netinfo)(struct most_interface *iface,
-						   unsigned char link_stat,
-						   unsigned char *mac_addr));
-	void *priv;
-	struct interface_private *p;
-};
-
-#define to_most_interface(d) container_of(d, struct most_interface, dev)
-
-/**
- * struct core_component - identifies a loadable component for the mostcore
- * @list: list_head
- * @name: component name
- * @probe_channel: function for core to notify driver about channel connection
- * @disconnect_channel: callback function to disconnect a certain channel
- * @rx_completion: completion handler for received packets
- * @tx_completion: completion handler for transmitted packets
- */
-struct core_component {
-	struct list_head list;
-	const char *name;
-	struct module *mod;
-	int (*probe_channel)(struct most_interface *iface, int channel_idx,
-			     struct most_channel_config *cfg, char *name,
-			     char *param);
-	int (*disconnect_channel)(struct most_interface *iface,
-				  int channel_idx);
-	int (*rx_completion)(struct mbo *mbo);
-	int (*tx_completion)(struct most_interface *iface, int channel_idx);
-	int (*cfg_complete)(void);
-};
-
-/**
- * most_register_interface - Registers instance of the interface.
- * @iface: Pointer to the interface instance description.
- *
- * Returns a pointer to the kobject of the generated instance.
- *
- * Note: HDM has to ensure that any reference held on the kobj is
- * released before deregistering the interface.
- */
-int most_register_interface(struct most_interface *iface);
-
-/**
- * Deregisters instance of the interface.
- * @intf_instance Pointer to the interface instance description.
- */
-void most_deregister_interface(struct most_interface *iface);
-void most_submit_mbo(struct mbo *mbo);
-
-/**
- * most_stop_enqueue - prevents core from enqueing MBOs
- * @iface: pointer to interface
- * @channel_idx: channel index
- */
-void most_stop_enqueue(struct most_interface *iface, int channel_idx);
-
-/**
- * most_resume_enqueue - allow core to enqueue MBOs again
- * @iface: pointer to interface
- * @channel_idx: channel index
- *
- * This clears the enqueue halt flag and enqueues all MBOs currently
- * in wait fifo.
- */
-void most_resume_enqueue(struct most_interface *iface, int channel_idx);
-int most_register_component(struct core_component *comp);
-int most_deregister_component(struct core_component *comp);
-struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
-			 struct core_component *comp);
-void most_put_mbo(struct mbo *mbo);
-int channel_has_mbo(struct most_interface *iface, int channel_idx,
-		    struct core_component *comp);
-int most_start_channel(struct most_interface *iface, int channel_idx,
-		       struct core_component *comp);
-int most_stop_channel(struct most_interface *iface, int channel_idx,
-		      struct core_component *comp);
-int __init configfs_init(void);
-int most_register_configfs_subsys(struct core_component *comp);
-void most_deregister_configfs_subsys(struct core_component *comp);
-int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
-		  char *comp_param);
-int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
-int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val);
-int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val);
-int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val);
-int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val);
-int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf);
-int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf);
-int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val);
-int most_cfg_complete(char *comp_name);
-void most_interface_register_notify(const char *mdev_name);
-#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
index 116f04d..861adac 100644
--- a/drivers/staging/most/dim2/Makefile
+++ b/drivers/staging/most/dim2/Makefile
@@ -2,4 +2,3 @@
 obj-$(CONFIG_MOST_DIM2) += most_dim2.o
 
 most_dim2-objs := dim2.o hal.o sysfs.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 64c9791..1659328 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -21,7 +21,7 @@
 #include <linux/sched.h>
 #include <linux/kthread.h>
 
-#include "most/core.h"
+#include "../most.h"
 #include "hal.h"
 #include "errors.h"
 #include "sysfs.h"
@@ -854,8 +854,9 @@ static int dim2_probe(struct platform_device *pdev)
 	dev->most_iface.poison_channel = poison_channel;
 	dev->most_iface.request_netinfo = request_netinfo;
 	dev->most_iface.driver_dev = &pdev->dev;
+	dev->most_iface.dev = &dev->dev;
 	dev->dev.init_name = "dim2_state";
-	dev->dev.parent = &dev->most_iface.dev;
+	dev->dev.parent = &pdev->dev;
 
 	ret = most_register_interface(&dev->most_iface);
 	if (ret) {
diff --git a/drivers/staging/most/i2c/Makefile b/drivers/staging/most/i2c/Makefile
index 2b3769d..71099dd 100644
--- a/drivers/staging/most/i2c/Makefile
+++ b/drivers/staging/most/i2c/Makefile
@@ -2,4 +2,3 @@
 obj-$(CONFIG_MOST_I2C) += most_i2c.o
 
 most_i2c-objs := i2c.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 4a4fc10..2980f70 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/err.h>
 
-#include "most/core.h"
+#include "../most.h"
 
 enum { CH_RX, CH_TX, NUM_CHANNELS };
 
diff --git a/drivers/staging/most/most.h b/drivers/staging/most/most.h
new file mode 100644
index 0000000..232e01b
--- /dev/null
+++ b/drivers/staging/most/most.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * most.h - API for component and adapter drivers
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#ifndef __MOST_CORE_H__
+#define __MOST_CORE_H__
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct module;
+struct interface_private;
+
+/**
+ * Interface type
+ */
+enum most_interface_type {
+	ITYPE_LOOPBACK = 1,
+	ITYPE_I2C,
+	ITYPE_I2S,
+	ITYPE_TSI,
+	ITYPE_HBI,
+	ITYPE_MEDIALB_DIM,
+	ITYPE_MEDIALB_DIM2,
+	ITYPE_USB,
+	ITYPE_PCIE
+};
+
+/**
+ * Channel direction.
+ */
+enum most_channel_direction {
+	MOST_CH_RX = 1 << 0,
+	MOST_CH_TX = 1 << 1,
+};
+
+/**
+ * Channel data type.
+ */
+enum most_channel_data_type {
+	MOST_CH_CONTROL = 1 << 0,
+	MOST_CH_ASYNC = 1 << 1,
+	MOST_CH_ISOC = 1 << 2,
+	MOST_CH_SYNC = 1 << 5,
+};
+
+enum most_status_flags {
+	/* MBO was processed successfully (data was send or received )*/
+	MBO_SUCCESS = 0,
+	/* The MBO contains wrong or missing information.  */
+	MBO_E_INVAL,
+	/* MBO was completed as HDM Channel will be closed */
+	MBO_E_CLOSE,
+};
+
+/**
+ * struct most_channel_capability - Channel capability
+ * @direction: Supported channel directions.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_direction. Zero is allowed value and means
+ * "channel may not be used".
+ * @data_type: Supported channel data types.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_data_type. Zero is allowed value and means
+ * "channel may not be used".
+ * @num_buffers_packet: Maximum number of buffers supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @buffer_size_packet: Maximum buffer size supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @num_buffers_streaming: Maximum number of buffers supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @buffer_size_streaming: Maximum buffer size supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @name_suffix: Optional suffix providean by an HDM that is attached to the
+ * regular channel name.
+ *
+ * Describes the capabilities of a MOST channel like supported Data Types
+ * and directions. This information is provided by an HDM for the MostCore.
+ *
+ * The Core creates read only sysfs attribute files in
+ * /sys/devices/most/mdev#/<channel>/ with the
+ * following attributes:
+ *	-available_directions
+ *	-available_datatypes
+ *	-number_of_packet_buffers
+ *	-number_of_stream_buffers
+ *	-size_of_packet_buffer
+ *	-size_of_stream_buffer
+ * where content of each file is a string with all supported properties of this
+ * very channel attribute.
+ */
+struct most_channel_capability {
+	u16 direction;
+	u16 data_type;
+	u16 num_buffers_packet;
+	u16 buffer_size_packet;
+	u16 num_buffers_streaming;
+	u16 buffer_size_streaming;
+	const char *name_suffix;
+};
+
+/**
+ * struct most_channel_config - stores channel configuration
+ * @direction: direction of the channel
+ * @data_type: data type travelling over this channel
+ * @num_buffers: number of buffers
+ * @buffer_size: size of a buffer for AIM.
+ * Buffer size may be cutted down by HDM in a configure callback
+ * to match to a given interface and channel type.
+ * @extra_len: additional buffer space for internal HDM purposes like padding.
+ * May be set by HDM in a configure callback if needed.
+ * @subbuffer_size: size of a subbuffer
+ * @packets_per_xact: number of MOST frames that are packet inside one USB
+ *		      packet. This is USB specific
+ *
+ * Describes the configuration for a MOST channel. This information is
+ * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
+ * parameter of the "configure" function call.
+ */
+struct most_channel_config {
+	enum most_channel_direction direction;
+	enum most_channel_data_type data_type;
+	u16 num_buffers;
+	u16 buffer_size;
+	u16 extra_len;
+	u16 subbuffer_size;
+	u16 packets_per_xact;
+	u16 dbr_size;
+};
+
+/*
+ * struct mbo - MOST Buffer Object.
+ * @context: context for core completion handler
+ * @priv: private data for HDM
+ *
+ *	public: documented fields that are used for the communications
+ *	between MostCore and HDMs
+ *
+ * @list: list head for use by the mbo's current owner
+ * @ifp: (in) associated interface instance
+ * @num_buffers_ptr: amount of pool buffers
+ * @hdm_channel_id: (in) HDM channel instance
+ * @virt_address: (in) kernel virtual address of the buffer
+ * @bus_address: (in) bus address of the buffer
+ * @buffer_length: (in) buffer payload length
+ * @processed_length: (out) processed length
+ * @status: (out) transfer status
+ * @complete: (in) completion routine
+ *
+ * The core allocates and initializes the MBO.
+ *
+ * The HDM receives MBO for transfer from the core with the call to enqueue().
+ * The HDM copies the data to- or from the buffer depending on configured
+ * channel direction, set "processed_length" and "status" and completes
+ * the transfer procedure by calling the completion routine.
+ *
+ * Finally, the MBO is being deallocated or recycled for further
+ * transfers of the same or a different HDM.
+ *
+ * Directions of usage:
+ * The core driver should never access any MBO fields (even if marked
+ * as "public") while the MBO is owned by an HDM. The ownership starts with
+ * the call of enqueue() and ends with the call of its complete() routine.
+ *
+ *					II.
+ * Every HDM attached to the core driver _must_ ensure that it returns any MBO
+ * it owns (due to a previous call to enqueue() by the core driver) before it
+ * de-registers an interface or gets unloaded from the kernel. If this direction
+ * is violated memory leaks will occur, since the core driver does _not_ track
+ * MBOs it is currently not in control of.
+ *
+ */
+struct mbo {
+	void *context;
+	void *priv;
+	struct list_head list;
+	struct most_interface *ifp;
+	int *num_buffers_ptr;
+	u16 hdm_channel_id;
+	void *virt_address;
+	dma_addr_t bus_address;
+	u16 buffer_length;
+	u16 processed_length;
+	enum most_status_flags status;
+	void (*complete)(struct mbo *mbo);
+};
+
+/**
+ * Interface instance description.
+ *
+ * Describes an interface of a MOST device the core driver is bound to.
+ * This structure is allocated and initialized in the HDM. MostCore may not
+ * modify this structure.
+ *
+ * @dev: the actual device
+ * @mod: module
+ * @interface Interface type. \sa most_interface_type.
+ * @description PRELIMINARY.
+ *   Unique description of the device instance from point of view of the
+ *   interface in free text form (ASCII).
+ *   It may be a hexadecimal presentation of the memory address for the MediaLB
+ *   IP or USB device ID with USB properties for USB interface, etc.
+ * @num_channels Number of channels and size of the channel_vector.
+ * @channel_vector Properties of the channels.
+ *   Array index represents channel ID by the driver.
+ * @configure Callback to change data type for the channel of the
+ *   interface instance. May be zero if the instance of the interface is not
+ *   configurable. Parameter channel_config describes direction and data
+ *   type for the channel, configured by the higher level. The content of
+ * @enqueue Delivers MBO to the HDM for processing.
+ *   After HDM completes Rx- or Tx- operation the processed MBO shall
+ *   be returned back to the MostCore using completion routine.
+ *   The reason to get the MBO delivered from the MostCore after the channel
+ *   is poisoned is the re-opening of the channel by the application.
+ *   In this case the HDM shall hold MBOs and service the channel as usual.
+ *   The HDM must be able to hold at least one MBO for each channel.
+ *   The callback returns a negative value on error, otherwise 0.
+ * @poison_channel Informs HDM about closing the channel. The HDM shall
+ *   cancel all transfers and synchronously or asynchronously return
+ *   all enqueued for this channel MBOs using the completion routine.
+ *   The callback returns a negative value on error, otherwise 0.
+ * @request_netinfo: triggers retrieving of network info from the HDM by
+ *   means of "Message exchange over MDP/MEP"
+ *   The call of the function request_netinfo with the parameter on_netinfo as
+ *   NULL prohibits use of the previously obtained function pointer.
+ * @priv Private field used by mostcore to store context information.
+ */
+struct most_interface {
+	struct device *dev;
+	struct device *driver_dev;
+	struct module *mod;
+	enum most_interface_type interface;
+	const char *description;
+	unsigned int num_channels;
+	struct most_channel_capability *channel_vector;
+	void *(*dma_alloc)(struct mbo *mbo, u32 size);
+	void (*dma_free)(struct mbo *mbo, u32 size);
+	int (*configure)(struct most_interface *iface, int channel_idx,
+			 struct most_channel_config *channel_config);
+	int (*enqueue)(struct most_interface *iface, int channel_idx,
+		       struct mbo *mbo);
+	int (*poison_channel)(struct most_interface *iface, int channel_idx);
+	void (*request_netinfo)(struct most_interface *iface, int channel_idx,
+				void (*on_netinfo)(struct most_interface *iface,
+						   unsigned char link_stat,
+						   unsigned char *mac_addr));
+	void *priv;
+	struct interface_private *p;
+};
+
+/**
+ * struct most_component - identifies a loadable component for the mostcore
+ * @list: list_head
+ * @name: component name
+ * @probe_channel: function for core to notify driver about channel connection
+ * @disconnect_channel: callback function to disconnect a certain channel
+ * @rx_completion: completion handler for received packets
+ * @tx_completion: completion handler for transmitted packets
+ */
+struct most_component {
+	struct list_head list;
+	const char *name;
+	struct module *mod;
+	int (*probe_channel)(struct most_interface *iface, int channel_idx,
+			     struct most_channel_config *cfg, char *name,
+			     char *param);
+	int (*disconnect_channel)(struct most_interface *iface,
+				  int channel_idx);
+	int (*rx_completion)(struct mbo *mbo);
+	int (*tx_completion)(struct most_interface *iface, int channel_idx);
+	int (*cfg_complete)(void);
+};
+
+/**
+ * most_register_interface - Registers instance of the interface.
+ * @iface: Pointer to the interface instance description.
+ *
+ * Returns a pointer to the kobject of the generated instance.
+ *
+ * Note: HDM has to ensure that any reference held on the kobj is
+ * released before deregistering the interface.
+ */
+int most_register_interface(struct most_interface *iface);
+
+/**
+ * Deregisters instance of the interface.
+ * @intf_instance Pointer to the interface instance description.
+ */
+void most_deregister_interface(struct most_interface *iface);
+void most_submit_mbo(struct mbo *mbo);
+
+/**
+ * most_stop_enqueue - prevents core from enqueing MBOs
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ */
+void most_stop_enqueue(struct most_interface *iface, int channel_idx);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * in wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int channel_idx);
+int most_register_component(struct most_component *comp);
+int most_deregister_component(struct most_component *comp);
+struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
+			 struct most_component *comp);
+void most_put_mbo(struct mbo *mbo);
+int channel_has_mbo(struct most_interface *iface, int channel_idx,
+		    struct most_component *comp);
+int most_start_channel(struct most_interface *iface, int channel_idx,
+		       struct most_component *comp);
+int most_stop_channel(struct most_interface *iface, int channel_idx,
+		      struct most_component *comp);
+int __init configfs_init(void);
+int most_register_configfs_subsys(struct most_component *comp);
+void most_deregister_configfs_subsys(struct most_component *comp);
+int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
+		  char *comp_param);
+int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
+int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf);
+int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf);
+int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val);
+int most_cfg_complete(char *comp_name);
+void most_interface_register_notify(const char *mdev_name);
+#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/most/net/Makefile b/drivers/staging/most/net/Makefile
index f0ac64d..1582c97 100644
--- a/drivers/staging/most/net/Makefile
+++ b/drivers/staging/most/net/Makefile
@@ -2,4 +2,3 @@
 obj-$(CONFIG_MOST_NET) += most_net.o
 
 most_net-objs := net.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index 6cab1bb..5547e36 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -15,7 +15,8 @@
 #include <linux/list.h>
 #include <linux/wait.h>
 #include <linux/kobject.h>
-#include "most/core.h"
+
+#include "../most.h"
 
 #define MEP_HDR_LEN 8
 #define MDP_HDR_LEN 16
@@ -70,7 +71,7 @@ struct net_dev_context {
 static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
 static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
 static DEFINE_SPINLOCK(list_lock); /* list_head, ch->linked = false, dev_hold */
-static struct core_component comp;
+static struct most_component comp;
 
 static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
 {
@@ -81,6 +82,11 @@ static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
 	unsigned int payload_len = skb->len - ETH_HLEN;
 	unsigned int mdp_len = payload_len + MDP_HDR_LEN;
 
+	if (mdp_len < skb->len) {
+		pr_err("drop: too large packet! (%u)\n", skb->len);
+		return -EINVAL;
+	}
+
 	if (mbo->buffer_length < mdp_len) {
 		pr_err("drop: too small buffer! (%d for %d)\n",
 		       mbo->buffer_length, mdp_len);
@@ -128,6 +134,11 @@ static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
 	u8 *buff = mbo->virt_address;
 	unsigned int mep_len = skb->len + MEP_HDR_LEN;
 
+	if (mep_len < skb->len) {
+		pr_err("drop: too large packet! (%u)\n", skb->len);
+		return -EINVAL;
+	}
+
 	if (mbo->buffer_length < mep_len) {
 		pr_err("drop: too small buffer! (%d for %d)\n",
 		       mbo->buffer_length, mep_len);
@@ -497,7 +508,7 @@ static int comp_rx_data(struct mbo *mbo)
 	return ret;
 }
 
-static struct core_component comp = {
+static struct most_component comp = {
 	.mod = THIS_MODULE,
 	.name = "net",
 	.probe_channel = comp_probe_channel,
diff --git a/drivers/staging/most/sound/Makefile b/drivers/staging/most/sound/Makefile
index a3d086c..f0cd9d8 100644
--- a/drivers/staging/most/sound/Makefile
+++ b/drivers/staging/most/sound/Makefile
@@ -2,4 +2,3 @@
 obj-$(CONFIG_MOST_SOUND) += most_sound.o
 
 most_sound-objs := sound.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 723d0bd..44cf233 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -17,12 +17,13 @@
 #include <sound/pcm_params.h>
 #include <linux/sched.h>
 #include <linux/kthread.h>
-#include <most/core.h>
+
+#include "../most.h"
 
 #define DRIVER_NAME "sound"
 #define STRING_SIZE	80
 
-static struct core_component comp;
+static struct most_component comp;
 
 /**
  * struct channel - private structure to keep channel specific data
@@ -323,45 +324,6 @@ static int pcm_close(struct snd_pcm_substream *substream)
 }
 
 /**
- * pcm_hw_params - implements hw_params callback function for PCM middle layer
- * @substream: sub-stream pointer
- * @hw_params: contains the hardware parameters set by the application
- *
- * This is called when the hardware parameters is set by the application, that
- * is, once when the buffer size, the period size, the format, etc. are defined
- * for the PCM substream. Many hardware setups should be done is this callback,
- * including the allocation of buffers.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_params(struct snd_pcm_substream *substream,
-			 struct snd_pcm_hw_params *hw_params)
-{
-	struct channel *channel = substream->private_data;
-
-	if ((params_channels(hw_params) > channel->pcm_hardware.channels_max) ||
-	    (params_channels(hw_params) < channel->pcm_hardware.channels_min)) {
-		pr_err("Requested number of channels not supported.\n");
-		return -EINVAL;
-	}
-	return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
-}
-
-/**
- * pcm_hw_free - implements hw_free callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This is called to release the resources allocated via hw_params.
- * This function will be always called before the close callback is called.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	return snd_pcm_lib_free_pages(substream);
-}
-
-/**
  * pcm_prepare - implements prepare callback function for PCM middle layer
  * @substream: substream pointer
  *
@@ -462,9 +424,6 @@ static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
 static const struct snd_pcm_ops pcm_ops = {
 	.open       = pcm_open,
 	.close      = pcm_close,
-	.ioctl      = snd_pcm_lib_ioctl,
-	.hw_params  = pcm_hw_params,
-	.hw_free    = pcm_hw_free,
 	.prepare    = pcm_prepare,
 	.trigger    = pcm_trigger,
 	.pointer    = pcm_pointer,
@@ -661,8 +620,7 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
 	pcm->private_data = channel;
 	strscpy(pcm->name, device_name, sizeof(pcm->name));
 	snd_pcm_set_ops(pcm, direction, &pcm_ops);
-	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
-					      NULL, 0, 0);
+	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
 
 	return 0;
 
@@ -779,9 +737,9 @@ static int audio_tx_completion(struct most_interface *iface, int channel_id)
 }
 
 /**
- * Initialization of the struct core_component
+ * Initialization of the struct most_component
  */
-static struct core_component comp = {
+static struct most_component comp = {
 	.mod = THIS_MODULE,
 	.name = DRIVER_NAME,
 	.probe_channel = audio_probe_channel,
diff --git a/drivers/staging/most/usb/Makefile b/drivers/staging/most/usb/Makefile
index 83cf2ea..c2b2073 100644
--- a/drivers/staging/most/usb/Makefile
+++ b/drivers/staging/most/usb/Makefile
@@ -2,4 +2,3 @@
 obj-$(CONFIG_MOST_USB) += most_usb.o
 
 most_usb-objs := usb.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
index 360cb5b..0bda88c 100644
--- a/drivers/staging/most/usb/usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -23,7 +23,8 @@
 #include <linux/dma-mapping.h>
 #include <linux/etherdevice.h>
 #include <linux/uaccess.h>
-#include "most/core.h"
+
+#include "../most.h"
 
 #define USB_MTU			512
 #define NO_ISOCHRONOUS_URB	0
@@ -101,6 +102,7 @@ struct clear_hold_work {
  * @poll_work_obj: work for polling link status
  */
 struct most_dev {
+	struct device dev;
 	struct usb_device *usb_device;
 	struct most_interface iface;
 	struct most_channel_capability *cap;
@@ -122,6 +124,7 @@ struct most_dev {
 };
 
 #define to_mdev(d) container_of(d, struct most_dev, iface)
+#define to_mdev_from_dev(d) container_of(d, struct most_dev, dev)
 #define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
 
 static void wq_clear_halt(struct work_struct *wq_obj);
@@ -1022,6 +1025,12 @@ static void release_dci(struct device *dev)
 	kfree(dci);
 }
 
+static void release_mdev(struct device *dev)
+{
+	struct most_dev *mdev = to_mdev_from_dev(dev);
+
+	kfree(mdev);
+}
 /**
  * hdm_probe - probe function of USB device driver
  * @interface: Interface of the attached USB device
@@ -1060,6 +1069,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
 	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
 
 	mdev->iface.mod = hdm_usb_fops.owner;
+	mdev->iface.dev = &mdev->dev;
 	mdev->iface.driver_dev = &interface->dev;
 	mdev->iface.interface = ITYPE_USB;
 	mdev->iface.configure = hdm_configure_channel;
@@ -1078,6 +1088,9 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
 		 usb_dev->config->desc.bConfigurationValue,
 		 usb_iface_desc->desc.bInterfaceNumber);
 
+	mdev->dev.init_name = mdev->description;
+	mdev->dev.parent = &interface->dev;
+	mdev->dev.release = release_mdev;
 	mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
 	if (!mdev->conf)
 		goto err_free_mdev;
@@ -1151,7 +1164,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
 		}
 
 		mdev->dci->dev.init_name = "dci";
-		mdev->dci->dev.parent = &mdev->iface.dev;
+		mdev->dci->dev.parent = get_device(mdev->iface.dev);
 		mdev->dci->dev.groups = dci_attr_groups;
 		mdev->dci->dev.release = release_dci;
 		if (device_register(&mdev->dci->dev)) {
@@ -1165,7 +1178,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
 	mutex_unlock(&mdev->io_mutex);
 	return 0;
 err_free_dci:
-	kfree(mdev->dci);
+	put_device(&mdev->dci->dev);
 err_free_busy_urbs:
 	kfree(mdev->busy_urbs);
 err_free_ep_address:
@@ -1175,7 +1188,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
 err_free_conf:
 	kfree(mdev->conf);
 err_free_mdev:
-	kfree(mdev);
+	put_device(&mdev->dev);
 err_out_of_memory:
 	if (ret == 0 || ret == -ENOMEM) {
 		ret = -ENOMEM;
@@ -1205,14 +1218,15 @@ static void hdm_disconnect(struct usb_interface *interface)
 	del_timer_sync(&mdev->link_stat_timer);
 	cancel_work_sync(&mdev->poll_work_obj);
 
-	device_unregister(&mdev->dci->dev);
+	if (mdev->dci)
+		device_unregister(&mdev->dci->dev);
 	most_deregister_interface(&mdev->iface);
 
 	kfree(mdev->busy_urbs);
 	kfree(mdev->cap);
 	kfree(mdev->conf);
 	kfree(mdev->ep_address);
-	kfree(mdev);
+	put_device(&mdev->dev);
 }
 
 static struct usb_driver hdm_usb = {
diff --git a/drivers/staging/most/video/Makefile b/drivers/staging/most/video/Makefile
index 2d857d3..856175f 100644
--- a/drivers/staging/most/video/Makefile
+++ b/drivers/staging/most/video/Makefile
@@ -2,4 +2,3 @@
 obj-$(CONFIG_MOST_VIDEO) += most_video.o
 
 most_video-objs := video.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 10c1ef7..d32ae49 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -21,11 +21,11 @@
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-fh.h>
 
-#include "most/core.h"
+#include "../most.h"
 
 #define V4L2_CMP_MAX_INPUT  1
 
-static struct core_component comp;
+static struct most_component comp;
 
 struct most_video_dev {
 	struct most_interface *iface;
@@ -527,7 +527,7 @@ static int comp_disconnect_channel(struct most_interface *iface,
 	return 0;
 }
 
-static struct core_component comp = {
+static struct most_component comp = {
 	.mod = THIS_MODULE,
 	.name = "video",
 	.probe_channel = comp_probe_channel,
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index a4c0811..d89d68f 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -138,7 +138,7 @@ i2s: i2s@a00 {
 
 		memc: memc@5000 {
 			compatible = "mtk,mt7621-memc";
-			reg = <0x300 0x100>;
+			reg = <0x5000 0x1000>;
 		};
 
 		cpc: cpc@1fbf0000 {
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index 01dbb66..386d619 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -123,6 +123,8 @@ static int nvec_kbd_probe(struct platform_device *pdev)
 		keycodes[j++] = extcode_tab_us102[i];
 
 	idev = devm_input_allocate_device(&pdev->dev);
+	if (!idev)
+		return -ENOMEM;
 	idev->name = "nvec keyboard";
 	idev->phys = "nvec";
 	idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_LED);
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
deleted file mode 100644
index 6a5d842..0000000
--- a/drivers/staging/octeon-usb/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config OCTEON_USB
-	tristate "Cavium Networks Octeon USB support"
-	depends on CAVIUM_OCTEON_SOC && USB
-	help
-	  This driver supports USB host controller on some Cavium
-	  Networks' products in the Octeon family.
-
-	  To compile this driver as a module, choose M here. The module
-	  will be called octeon-hcd.
-
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
deleted file mode 100644
index 9873a01..0000000
--- a/drivers/staging/octeon-usb/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-${CONFIG_OCTEON_USB} := octeon-hcd.o
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
deleted file mode 100644
index 2b29acc..0000000
--- a/drivers/staging/octeon-usb/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-This driver is functional and has been tested on EdgeRouter Lite,
-D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage.
-
-TODO:
-	- kernel coding style
-	- checkpatch warnings
-
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
deleted file mode 100644
index 582c918..0000000
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ /dev/null
@@ -1,3737 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2008 Cavium Networks
- *
- * Some parts of the code were originally released under BSD license:
- *
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *   * Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *
- *   * Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.
- *
- *   * Neither the name of Cavium Networks nor the names of
- *     its contributors may be used to endorse or promote products
- *     derived from this software without specific prior written
- *     permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- */
-
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/usb/hcd.h>
-#include <linux/prefetch.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "octeon-hcd.h"
-
-/**
- * enum cvmx_usb_speed - the possible USB device speeds
- *
- * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
- * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
- * @CVMX_USB_SPEED_LOW:  Device is operation at 1.5Mbps
- */
-enum cvmx_usb_speed {
-	CVMX_USB_SPEED_HIGH = 0,
-	CVMX_USB_SPEED_FULL = 1,
-	CVMX_USB_SPEED_LOW = 2,
-};
-
-/**
- * enum cvmx_usb_transfer - the possible USB transfer types
- *
- * @CVMX_USB_TRANSFER_CONTROL:	   USB transfer type control for hub and status
- *				   transfers
- * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
- *				   priority periodic transfers
- * @CVMX_USB_TRANSFER_BULK:	   USB transfer type bulk for large low priority
- *				   transfers
- * @CVMX_USB_TRANSFER_INTERRUPT:   USB transfer type interrupt for high priority
- *				   periodic transfers
- */
-enum cvmx_usb_transfer {
-	CVMX_USB_TRANSFER_CONTROL = 0,
-	CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
-	CVMX_USB_TRANSFER_BULK = 2,
-	CVMX_USB_TRANSFER_INTERRUPT = 3,
-};
-
-/**
- * enum cvmx_usb_direction - the transfer directions
- *
- * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
- * @CVMX_USB_DIRECTION_IN:  Data is transferring from the device/host to Octeon
- */
-enum cvmx_usb_direction {
-	CVMX_USB_DIRECTION_OUT,
-	CVMX_USB_DIRECTION_IN,
-};
-
-/**
- * enum cvmx_usb_status - possible callback function status codes
- *
- * @CVMX_USB_STATUS_OK:		  The transaction / operation finished without
- *				  any errors
- * @CVMX_USB_STATUS_SHORT:	  FIXME: This is currently not implemented
- * @CVMX_USB_STATUS_CANCEL:	  The transaction was canceled while in flight
- *				  by a user call to cvmx_usb_cancel
- * @CVMX_USB_STATUS_ERROR:	  The transaction aborted with an unexpected
- *				  error status
- * @CVMX_USB_STATUS_STALL:	  The transaction received a USB STALL response
- *				  from the device
- * @CVMX_USB_STATUS_XACTERR:	  The transaction failed with an error from the
- *				  device even after a number of retries
- * @CVMX_USB_STATUS_DATATGLERR:	  The transaction failed with a data toggle
- *				  error even after a number of retries
- * @CVMX_USB_STATUS_BABBLEERR:	  The transaction failed with a babble error
- * @CVMX_USB_STATUS_FRAMEERR:	  The transaction failed with a frame error
- *				  even after a number of retries
- */
-enum cvmx_usb_status {
-	CVMX_USB_STATUS_OK,
-	CVMX_USB_STATUS_SHORT,
-	CVMX_USB_STATUS_CANCEL,
-	CVMX_USB_STATUS_ERROR,
-	CVMX_USB_STATUS_STALL,
-	CVMX_USB_STATUS_XACTERR,
-	CVMX_USB_STATUS_DATATGLERR,
-	CVMX_USB_STATUS_BABBLEERR,
-	CVMX_USB_STATUS_FRAMEERR,
-};
-
-/**
- * struct cvmx_usb_port_status - the USB port status information
- *
- * @port_enabled:	1 = Usb port is enabled, 0 = disabled
- * @port_over_current:	1 = Over current detected, 0 = Over current not
- *			detected. Octeon doesn't support over current detection.
- * @port_powered:	1 = Port power is being supplied to the device, 0 =
- *			power is off. Octeon doesn't support turning port power
- *			off.
- * @port_speed:		Current port speed.
- * @connected:		1 = A device is connected to the port, 0 = No device is
- *			connected.
- * @connect_change:	1 = Device connected state changed since the last set
- *			status call.
- */
-struct cvmx_usb_port_status {
-	u32 reserved			: 25;
-	u32 port_enabled		: 1;
-	u32 port_over_current		: 1;
-	u32 port_powered		: 1;
-	enum cvmx_usb_speed port_speed	: 2;
-	u32 connected			: 1;
-	u32 connect_change		: 1;
-};
-
-/**
- * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
- *
- * @offset:	This is the offset in bytes into the main buffer where this data
- *		is stored.
- * @length:	This is the length in bytes of the data.
- * @status:	This is the status of this individual packet transfer.
- */
-struct cvmx_usb_iso_packet {
-	int offset;
-	int length;
-	enum cvmx_usb_status status;
-};
-
-/**
- * enum cvmx_usb_initialize_flags - flags used by the initialization function
- *
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI:    The USB port uses a 12MHz crystal
- *					      as clock source at USB_XO and
- *					      USB_XI.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND:   The USB port uses 12/24/48MHz 2.5V
- *					      board clock source at USB_XO.
- *					      USB_XI should be tied to GND.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:    Speed of reference clock or
- *					      crystal
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:    Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:    Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA:	      Disable DMA and used polled IO for
- *					      data transfer use for the USB
- */
-enum cvmx_usb_initialize_flags {
-	CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI		= 1 << 0,
-	CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND		= 1 << 1,
-	CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK	= 3 << 3,
-	CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ		= 1 << 3,
-	CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ		= 2 << 3,
-	CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ		= 3 << 3,
-	/* Bits 3-4 used to encode the clock frequency */
-	CVMX_USB_INITIALIZE_FLAGS_NO_DMA		= 1 << 5,
-};
-
-/**
- * enum cvmx_usb_pipe_flags - internal flags for a pipe.
- *
- * @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
- *				   actively using hardware.
- * @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed
- *				   pipe is in the ping state.
- */
-enum cvmx_usb_pipe_flags {
-	CVMX_USB_PIPE_FLAGS_SCHEDULED	= 1 << 17,
-	CVMX_USB_PIPE_FLAGS_NEED_PING	= 1 << 18,
-};
-
-/* Maximum number of times to retry failed transactions */
-#define MAX_RETRIES		3
-
-/* Maximum number of hardware channels supported by the USB block */
-#define MAX_CHANNELS		8
-
-/*
- * The low level hardware can transfer a maximum of this number of bytes in each
- * transfer. The field is 19 bits wide
- */
-#define MAX_TRANSFER_BYTES	((1 << 19) - 1)
-
-/*
- * The low level hardware can transfer a maximum of this number of packets in
- * each transfer. The field is 10 bits wide
- */
-#define MAX_TRANSFER_PACKETS	((1 << 10) - 1)
-
-/**
- * Logical transactions may take numerous low level
- * transactions, especially when splits are concerned. This
- * enum represents all of the possible stages a transaction can
- * be in. Note that split completes are always even. This is so
- * the NAK handler can backup to the previous low level
- * transaction with a simple clearing of bit 0.
- */
-enum cvmx_usb_stage {
-	CVMX_USB_STAGE_NON_CONTROL,
-	CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
-	CVMX_USB_STAGE_SETUP,
-	CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
-	CVMX_USB_STAGE_DATA,
-	CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
-	CVMX_USB_STAGE_STATUS,
-	CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
-};
-
-/**
- * struct cvmx_usb_transaction - describes each pending USB transaction
- *				 regardless of type. These are linked together
- *				 to form a list of pending requests for a pipe.
- *
- * @node:		List node for transactions in the pipe.
- * @type:		Type of transaction, duplicated of the pipe.
- * @flags:		State flags for this transaction.
- * @buffer:		User's physical buffer address to read/write.
- * @buffer_length:	Size of the user's buffer in bytes.
- * @control_header:	For control transactions, physical address of the 8
- *			byte standard header.
- * @iso_start_frame:	For ISO transactions, the starting frame number.
- * @iso_number_packets:	For ISO transactions, the number of packets in the
- *			request.
- * @iso_packets:	For ISO transactions, the sub packets in the request.
- * @actual_bytes:	Actual bytes transfer for this transaction.
- * @stage:		For control transactions, the current stage.
- * @urb:		URB.
- */
-struct cvmx_usb_transaction {
-	struct list_head node;
-	enum cvmx_usb_transfer type;
-	u64 buffer;
-	int buffer_length;
-	u64 control_header;
-	int iso_start_frame;
-	int iso_number_packets;
-	struct cvmx_usb_iso_packet *iso_packets;
-	int xfersize;
-	int pktcnt;
-	int retries;
-	int actual_bytes;
-	enum cvmx_usb_stage stage;
-	struct urb *urb;
-};
-
-/**
- * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
- *			  and some USB device. It contains a list of pending
- *			  request to the device.
- *
- * @node:		List node for pipe list
- * @next:		Pipe after this one in the list
- * @transactions:	List of pending transactions
- * @interval:		For periodic pipes, the interval between packets in
- *			frames
- * @next_tx_frame:	The next frame this pipe is allowed to transmit on
- * @flags:		State flags for this pipe
- * @device_speed:	Speed of device connected to this pipe
- * @transfer_type:	Type of transaction supported by this pipe
- * @transfer_dir:	IN or OUT. Ignored for Control
- * @multi_count:	Max packet in a row for the device
- * @max_packet:		The device's maximum packet size in bytes
- * @device_addr:	USB device address at other end of pipe
- * @endpoint_num:	USB endpoint number at other end of pipe
- * @hub_device_addr:	Hub address this device is connected to
- * @hub_port:		Hub port this device is connected to
- * @pid_toggle:		This toggles between 0/1 on every packet send to track
- *			the data pid needed
- * @channel:		Hardware DMA channel for this pipe
- * @split_sc_frame:	The low order bits of the frame number the split
- *			complete should be sent on
- */
-struct cvmx_usb_pipe {
-	struct list_head node;
-	struct list_head transactions;
-	u64 interval;
-	u64 next_tx_frame;
-	enum cvmx_usb_pipe_flags flags;
-	enum cvmx_usb_speed device_speed;
-	enum cvmx_usb_transfer transfer_type;
-	enum cvmx_usb_direction transfer_dir;
-	int multi_count;
-	u16 max_packet;
-	u8 device_addr;
-	u8 endpoint_num;
-	u8 hub_device_addr;
-	u8 hub_port;
-	u8 pid_toggle;
-	u8 channel;
-	s8 split_sc_frame;
-};
-
-struct cvmx_usb_tx_fifo {
-	struct {
-		int channel;
-		int size;
-		u64 address;
-	} entry[MAX_CHANNELS + 1];
-	int head;
-	int tail;
-};
-
-/**
- * struct octeon_hcd - the state of the USB block
- *
- * lock:		   Serialization lock.
- * init_flags:		   Flags passed to initialize.
- * index:		   Which USB block this is for.
- * idle_hardware_channels: Bit set for every idle hardware channel.
- * usbcx_hprt:		   Stored port status so we don't need to read a CSR to
- *			   determine splits.
- * pipe_for_channel:	   Map channels to pipes.
- * pipe:		   Storage for pipes.
- * indent:		   Used by debug output to indent functions.
- * port_status:		   Last port status used for change notification.
- * idle_pipes:		   List of open pipes that have no transactions.
- * active_pipes:	   Active pipes indexed by transfer type.
- * frame_number:	   Increments every SOF interrupt for time keeping.
- * active_split:	   Points to the current active split, or NULL.
- */
-struct octeon_hcd {
-	spinlock_t lock; /* serialization lock */
-	int init_flags;
-	int index;
-	int idle_hardware_channels;
-	union cvmx_usbcx_hprt usbcx_hprt;
-	struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
-	int indent;
-	struct cvmx_usb_port_status port_status;
-	struct list_head idle_pipes;
-	struct list_head active_pipes[4];
-	u64 frame_number;
-	struct cvmx_usb_transaction *active_split;
-	struct cvmx_usb_tx_fifo periodic;
-	struct cvmx_usb_tx_fifo nonperiodic;
-};
-
-/*
- * This macro logically sets a single field in a CSR. It does the sequence
- * read, modify, and write
- */
-#define USB_SET_FIELD32(address, _union, field, value)		\
-	do {							\
-		union _union c;					\
-								\
-		c.u32 = cvmx_usb_read_csr32(usb, address);	\
-		c.s.field = value;				\
-		cvmx_usb_write_csr32(usb, address, c.u32);	\
-	} while (0)
-
-/* Returns the IO address to push/pop stuff data from the FIFOs */
-#define USB_FIFO_ADDRESS(channel, usb_index) \
-	(CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000)
-
-/**
- * struct octeon_temp_buffer - a bounce buffer for USB transfers
- * @orig_buffer: the original buffer passed by the USB stack
- * @data:	 the newly allocated temporary buffer (excluding meta-data)
- *
- * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If
- * the buffer is too short, we need to allocate a temporary one, and this struct
- * represents it.
- */
-struct octeon_temp_buffer {
-	void *orig_buffer;
-	u8 data[0];
-};
-
-static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
-{
-	return container_of((void *)p, struct usb_hcd, hcd_priv);
-}
-
-/**
- * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
- *                            (if needed)
- * @urb:	URB.
- * @mem_flags:	Memory allocation flags.
- *
- * This function allocates a temporary bounce buffer whenever it's needed
- * due to HW limitations.
- */
-static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
-{
-	struct octeon_temp_buffer *temp;
-
-	if (urb->num_sgs || urb->sg ||
-	    (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) ||
-	    !(urb->transfer_buffer_length % sizeof(u32)))
-		return 0;
-
-	temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) +
-		       sizeof(*temp), mem_flags);
-	if (!temp)
-		return -ENOMEM;
-
-	temp->orig_buffer = urb->transfer_buffer;
-	if (usb_urb_dir_out(urb))
-		memcpy(temp->data, urb->transfer_buffer,
-		       urb->transfer_buffer_length);
-	urb->transfer_buffer = temp->data;
-	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
-
-	return 0;
-}
-
-/**
- * octeon_free_temp_buffer - free a temporary buffer used by USB transfers.
- * @urb: URB.
- *
- * Frees a buffer allocated by octeon_alloc_temp_buffer().
- */
-static void octeon_free_temp_buffer(struct urb *urb)
-{
-	struct octeon_temp_buffer *temp;
-	size_t length;
-
-	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
-		return;
-
-	temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer,
-			    data);
-	if (usb_urb_dir_in(urb)) {
-		if (usb_pipeisoc(urb->pipe))
-			length = urb->transfer_buffer_length;
-		else
-			length = urb->actual_length;
-
-		memcpy(temp->orig_buffer, urb->transfer_buffer, length);
-	}
-	urb->transfer_buffer = temp->orig_buffer;
-	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
-	kfree(temp);
-}
-
-/**
- * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma().
- * @hcd:	USB HCD structure.
- * @urb:	URB.
- * @mem_flags:	Memory allocation flags.
- */
-static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
-				  gfp_t mem_flags)
-{
-	int ret;
-
-	ret = octeon_alloc_temp_buffer(urb, mem_flags);
-	if (ret)
-		return ret;
-
-	ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
-	if (ret)
-		octeon_free_temp_buffer(urb);
-
-	return ret;
-}
-
-/**
- * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma()
- * @hcd:	USB HCD structure.
- * @urb:	URB.
- */
-static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
-	usb_hcd_unmap_urb_for_dma(hcd, urb);
-	octeon_free_temp_buffer(urb);
-}
-
-/**
- * Read a USB 32bit CSR. It performs the necessary address swizzle
- * for 32bit CSRs and logs the value in a readable format if
- * debugging is on.
- *
- * @usb:     USB block this access is for
- * @address: 64bit address to read
- *
- * Returns: Result of the read
- */
-static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address)
-{
-	return cvmx_read64_uint32(address ^ 4);
-}
-
-/**
- * Write a USB 32bit CSR. It performs the necessary address
- * swizzle for 32bit CSRs and logs the value in a readable format
- * if debugging is on.
- *
- * @usb:     USB block this access is for
- * @address: 64bit address to write
- * @value:   Value to write
- */
-static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb,
-					u64 address, u32 value)
-{
-	cvmx_write64_uint32(address ^ 4, value);
-	cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
-}
-
-/**
- * Return non zero if this pipe connects to a non HIGH speed
- * device through a high speed hub.
- *
- * @usb:    USB block this access is for
- * @pipe:   Pipe to check
- *
- * Returns: Non zero if we need to do split transactions
- */
-static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb,
-					    struct cvmx_usb_pipe *pipe)
-{
-	return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
-	       usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
-}
-
-/**
- * Trivial utility function to return the correct PID for a pipe
- *
- * @pipe:   pipe to check
- *
- * Returns: PID for pipe
- */
-static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
-{
-	if (pipe->pid_toggle)
-		return 2; /* Data1 */
-	return 0; /* Data0 */
-}
-
-/* Loops through register until txfflsh or rxfflsh become zero.*/
-static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type)
-{
-	int result;
-	u64 address = CVMX_USBCX_GRSTCTL(usb->index);
-	u64 done = cvmx_get_cycle() + 100 *
-		   (u64)octeon_get_clock_rate / 1000000;
-	union cvmx_usbcx_grstctl c;
-
-	while (1) {
-		c.u32 = cvmx_usb_read_csr32(usb, address);
-		if (fflsh_type == 0 && c.s.txfflsh == 0) {
-			result = 0;
-			break;
-		} else if (fflsh_type == 1 && c.s.rxfflsh == 0) {
-			result = 0;
-			break;
-		} else if (cvmx_get_cycle() > done) {
-			result = -1;
-			break;
-		}
-
-		__delay(100);
-	}
-	return result;
-}
-
-static void cvmx_fifo_setup(struct octeon_hcd *usb)
-{
-	union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
-	union cvmx_usbcx_gnptxfsiz npsiz;
-	union cvmx_usbcx_hptxfsiz psiz;
-
-	usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb,
-						CVMX_USBCX_GHWCFG3(usb->index));
-
-	/*
-	 * Program the USBC_GRXFSIZ register to select the size of the receive
-	 * FIFO (25%).
-	 */
-	USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz,
-			rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
-
-	/*
-	 * Program the USBC_GNPTXFSIZ register to select the size and the start
-	 * address of the non-periodic transmit FIFO for nonperiodic
-	 * transactions (50%).
-	 */
-	npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
-	npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
-	npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32);
-
-	/*
-	 * Program the USBC_HPTXFSIZ register to select the size and start
-	 * address of the periodic transmit FIFO for periodic transactions
-	 * (25%).
-	 */
-	psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
-	psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
-	psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32);
-
-	/* Flush all FIFOs */
-	USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
-			cvmx_usbcx_grstctl, txfnum, 0x10);
-	USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
-			cvmx_usbcx_grstctl, txfflsh, 1);
-	cvmx_wait_tx_rx(usb, 0);
-	USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
-			cvmx_usbcx_grstctl, rxfflsh, 1);
-	cvmx_wait_tx_rx(usb, 1);
-}
-
-/**
- * Shutdown a USB port after a call to cvmx_usb_initialize().
- * The port should be disabled with all pipes closed when this
- * function is called.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_shutdown(struct octeon_hcd *usb)
-{
-	union cvmx_usbnx_clk_ctl usbn_clk_ctl;
-
-	/* Make sure all pipes are closed */
-	if (!list_empty(&usb->idle_pipes) ||
-	    !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) ||
-	    !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) ||
-	    !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) ||
-	    !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK]))
-		return -EBUSY;
-
-	/* Disable the clocks and put them in power on reset */
-	usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
-	usbn_clk_ctl.s.enable = 1;
-	usbn_clk_ctl.s.por = 1;
-	usbn_clk_ctl.s.hclk_rst = 1;
-	usbn_clk_ctl.s.prst = 0;
-	usbn_clk_ctl.s.hrst = 0;
-	cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-	return 0;
-}
-
-/**
- * Initialize a USB port for use. This must be called before any
- * other access to the Octeon USB port is made. The port starts
- * off in the disabled state.
- *
- * @dev:	 Pointer to struct device for logging purposes.
- * @usb:	 Pointer to struct octeon_hcd.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_initialize(struct device *dev,
-			       struct octeon_hcd *usb)
-{
-	int channel;
-	int divisor;
-	int retries = 0;
-	union cvmx_usbcx_hcfg usbcx_hcfg;
-	union cvmx_usbnx_clk_ctl usbn_clk_ctl;
-	union cvmx_usbcx_gintsts usbc_gintsts;
-	union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
-	union cvmx_usbcx_gintmsk usbcx_gintmsk;
-	union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
-	union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
-
-retry:
-	/*
-	 * Power On Reset and PHY Initialization
-	 *
-	 * 1. Wait for DCOK to assert (nothing to do)
-	 *
-	 * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
-	 *     USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
-	 */
-	usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
-	usbn_clk_ctl.s.por = 1;
-	usbn_clk_ctl.s.hrst = 0;
-	usbn_clk_ctl.s.prst = 0;
-	usbn_clk_ctl.s.hclk_rst = 0;
-	usbn_clk_ctl.s.enable = 0;
-	/*
-	 * 2b. Select the USB reference clock/crystal parameters by writing
-	 *     appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
-	 */
-	if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
-		/*
-		 * The USB port uses 12/24/48MHz 2.5V board clock
-		 * source at USB_XO. USB_XI should be tied to GND.
-		 * Most Octeon evaluation boards require this setting
-		 */
-		if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
-		    OCTEON_IS_MODEL(OCTEON_CN56XX) ||
-		    OCTEON_IS_MODEL(OCTEON_CN50XX))
-			/* From CN56XX,CN50XX,CN31XX,CN30XX manuals */
-			usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */
-		else
-			/* From CN52XX manual */
-			usbn_clk_ctl.s.p_rtype = 1;
-
-		switch (usb->init_flags &
-			CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
-		case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
-			usbn_clk_ctl.s.p_c_sel = 0;
-			break;
-		case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
-			usbn_clk_ctl.s.p_c_sel = 1;
-			break;
-		case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
-			usbn_clk_ctl.s.p_c_sel = 2;
-			break;
-		}
-	} else {
-		/*
-		 * The USB port uses a 12MHz crystal as clock source
-		 * at USB_XO and USB_XI
-		 */
-		if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
-			/* From CN31XX,CN30XX manual */
-			usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */
-		else
-			/* From CN56XX,CN52XX,CN50XX manuals. */
-			usbn_clk_ctl.s.p_rtype = 0;
-
-		usbn_clk_ctl.s.p_c_sel = 0;
-	}
-	/*
-	 * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
-	 *     setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
-	 *     such that USB is as close as possible to 125Mhz
-	 */
-	divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000);
-	/* Lower than 4 doesn't seem to work properly */
-	if (divisor < 4)
-		divisor = 4;
-	usbn_clk_ctl.s.divide = divisor;
-	usbn_clk_ctl.s.divide2 = 0;
-	cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-
-	/* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
-	usbn_clk_ctl.s.hclk_rst = 1;
-	cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-	/* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
-	__delay(64);
-	/*
-	 * 3. Program the power-on reset field in the USBN clock-control
-	 *    register:
-	 *    USBN_CLK_CTL[POR] = 0
-	 */
-	usbn_clk_ctl.s.por = 0;
-	cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-	/* 4. Wait 1 ms for PHY clock to start */
-	mdelay(1);
-	/*
-	 * 5. Program the Reset input from automatic test equipment field in the
-	 *    USBP control and status register:
-	 *    USBN_USBP_CTL_STATUS[ATE_RESET] = 1
-	 */
-	usbn_usbp_ctl_status.u64 =
-		cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
-	usbn_usbp_ctl_status.s.ate_reset = 1;
-	cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
-			    usbn_usbp_ctl_status.u64);
-	/* 6. Wait 10 cycles */
-	__delay(10);
-	/*
-	 * 7. Clear ATE_RESET field in the USBN clock-control register:
-	 *    USBN_USBP_CTL_STATUS[ATE_RESET] = 0
-	 */
-	usbn_usbp_ctl_status.s.ate_reset = 0;
-	cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
-			    usbn_usbp_ctl_status.u64);
-	/*
-	 * 8. Program the PHY reset field in the USBN clock-control register:
-	 *    USBN_CLK_CTL[PRST] = 1
-	 */
-	usbn_clk_ctl.s.prst = 1;
-	cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-	/*
-	 * 9. Program the USBP control and status register to select host or
-	 *    device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
-	 *    device
-	 */
-	usbn_usbp_ctl_status.s.hst_mode = 0;
-	cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
-			    usbn_usbp_ctl_status.u64);
-	/* 10. Wait 1 us */
-	udelay(1);
-	/*
-	 * 11. Program the hreset_n field in the USBN clock-control register:
-	 *     USBN_CLK_CTL[HRST] = 1
-	 */
-	usbn_clk_ctl.s.hrst = 1;
-	cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-	/* 12. Proceed to USB core initialization */
-	usbn_clk_ctl.s.enable = 1;
-	cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-	udelay(1);
-
-	/*
-	 * USB Core Initialization
-	 *
-	 * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
-	 *    determine USB core configuration parameters.
-	 *
-	 *    Nothing needed
-	 *
-	 * 2. Program the following fields in the global AHB configuration
-	 *    register (USBC_GAHBCFG)
-	 *    DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
-	 *    Burst length, USBC_GAHBCFG[HBSTLEN] = 0
-	 *    Nonperiodic TxFIFO empty level (slave mode only),
-	 *    USBC_GAHBCFG[NPTXFEMPLVL]
-	 *    Periodic TxFIFO empty level (slave mode only),
-	 *    USBC_GAHBCFG[PTXFEMPLVL]
-	 *    Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
-	 */
-	usbcx_gahbcfg.u32 = 0;
-	usbcx_gahbcfg.s.dmaen = !(usb->init_flags &
-				  CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
-	usbcx_gahbcfg.s.hbstlen = 0;
-	usbcx_gahbcfg.s.nptxfemplvl = 1;
-	usbcx_gahbcfg.s.ptxfemplvl = 1;
-	usbcx_gahbcfg.s.glblintrmsk = 1;
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
-			     usbcx_gahbcfg.u32);
-
-	/*
-	 * 3. Program the following fields in USBC_GUSBCFG register.
-	 *    HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
-	 *    ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
-	 *    USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
-	 *    PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
-	 */
-	usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb,
-						CVMX_USBCX_GUSBCFG(usb->index));
-	usbcx_gusbcfg.s.toutcal = 0;
-	usbcx_gusbcfg.s.ddrsel = 0;
-	usbcx_gusbcfg.s.usbtrdtim = 0x5;
-	usbcx_gusbcfg.s.phylpwrclksel = 0;
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
-			     usbcx_gusbcfg.u32);
-
-	/*
-	 * 4. The software must unmask the following bits in the USBC_GINTMSK
-	 *    register.
-	 *    OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
-	 *    Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
-	 */
-	usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb,
-						CVMX_USBCX_GINTMSK(usb->index));
-	usbcx_gintmsk.s.otgintmsk = 1;
-	usbcx_gintmsk.s.modemismsk = 1;
-	usbcx_gintmsk.s.hchintmsk = 1;
-	usbcx_gintmsk.s.sofmsk = 0;
-	/* We need RX FIFO interrupts if we don't have DMA */
-	if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
-		usbcx_gintmsk.s.rxflvlmsk = 1;
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
-			     usbcx_gintmsk.u32);
-
-	/*
-	 * Disable all channel interrupts. We'll enable them per channel later.
-	 */
-	for (channel = 0; channel < 8; channel++)
-		cvmx_usb_write_csr32(usb,
-				     CVMX_USBCX_HCINTMSKX(channel, usb->index),
-				     0);
-
-	/*
-	 * Host Port Initialization
-	 *
-	 * 1. Program the host-port interrupt-mask field to unmask,
-	 *    USBC_GINTMSK[PRTINT] = 1
-	 */
-	USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
-			cvmx_usbcx_gintmsk, prtintmsk, 1);
-	USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
-			cvmx_usbcx_gintmsk, disconnintmsk, 1);
-
-	/*
-	 * 2. Program the USBC_HCFG register to select full-speed host
-	 *    or high-speed host.
-	 */
-	usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
-	usbcx_hcfg.s.fslssupp = 0;
-	usbcx_hcfg.s.fslspclksel = 0;
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
-
-	cvmx_fifo_setup(usb);
-
-	/*
-	 * If the controller is getting port events right after the reset, it
-	 * means the initialization failed. Try resetting the controller again
-	 * in such case. This is seen to happen after cold boot on DSR-1000N.
-	 */
-	usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
-					       CVMX_USBCX_GINTSTS(usb->index));
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
-			     usbc_gintsts.u32);
-	dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32);
-	if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint)
-		return 0;
-	if (retries++ >= 5)
-		return -EAGAIN;
-	dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n",
-		 (int)usbc_gintsts.u32);
-	msleep(50);
-	cvmx_usb_shutdown(usb);
-	msleep(50);
-	goto retry;
-}
-
-/**
- * Reset a USB port. After this call succeeds, the USB port is
- * online and servicing requests.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_reset_port(struct octeon_hcd *usb)
-{
-	usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
-						  CVMX_USBCX_HPRT(usb->index));
-
-	/* Program the port reset bit to start the reset process */
-	USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
-			prtrst, 1);
-
-	/*
-	 * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
-	 * process to complete.
-	 */
-	mdelay(50);
-
-	/* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
-	USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
-			prtrst, 0);
-
-	/*
-	 * Read the port speed field to get the enumerated speed,
-	 * USBC_HPRT[PRTSPD].
-	 */
-	usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
-						  CVMX_USBCX_HPRT(usb->index));
-}
-
-/**
- * Disable a USB port. After this call the USB port will not
- * generate data transfers and will not generate events.
- * Transactions in process will fail and call their
- * associated callbacks.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_disable(struct octeon_hcd *usb)
-{
-	/* Disable the port */
-	USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
-			prtena, 1);
-	return 0;
-}
-
-/**
- * Get the current state of the USB port. Use this call to
- * determine if the usb port has anything connected, is enabled,
- * or has some sort of error condition. The return value of this
- * call has "changed" bits to signal of the value of some fields
- * have changed between calls.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: Port status information
- */
-static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb)
-{
-	union cvmx_usbcx_hprt usbc_hprt;
-	struct cvmx_usb_port_status result;
-
-	memset(&result, 0, sizeof(result));
-
-	usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
-	result.port_enabled = usbc_hprt.s.prtena;
-	result.port_over_current = usbc_hprt.s.prtovrcurract;
-	result.port_powered = usbc_hprt.s.prtpwr;
-	result.port_speed = usbc_hprt.s.prtspd;
-	result.connected = usbc_hprt.s.prtconnsts;
-	result.connect_change =
-		result.connected != usb->port_status.connected;
-
-	return result;
-}
-
-/**
- * Open a virtual pipe between the host and a USB device. A pipe
- * must be opened before data can be transferred between a device
- * and Octeon.
- *
- * @usb:	     USB device state populated by cvmx_usb_initialize().
- * @device_addr:
- *		     USB device address to open the pipe to
- *		     (0-127).
- * @endpoint_num:
- *		     USB endpoint number to open the pipe to
- *		     (0-15).
- * @device_speed:
- *		     The speed of the device the pipe is going
- *		     to. This must match the device's speed,
- *		     which may be different than the port speed.
- * @max_packet:	     The maximum packet length the device can
- *		     transmit/receive (low speed=0-8, full
- *		     speed=0-1023, high speed=0-1024). This value
- *		     comes from the standard endpoint descriptor
- *		     field wMaxPacketSize bits <10:0>.
- * @transfer_type:
- *		     The type of transfer this pipe is for.
- * @transfer_dir:
- *		     The direction the pipe is in. This is not
- *		     used for control pipes.
- * @interval:	     For ISOCHRONOUS and INTERRUPT transfers,
- *		     this is how often the transfer is scheduled
- *		     for. All other transfers should specify
- *		     zero. The units are in frames (8000/sec at
- *		     high speed, 1000/sec for full speed).
- * @multi_count:
- *		     For high speed devices, this is the maximum
- *		     allowed number of packet per microframe.
- *		     Specify zero for non high speed devices. This
- *		     value comes from the standard endpoint descriptor
- *		     field wMaxPacketSize bits <12:11>.
- * @hub_device_addr:
- *		     Hub device address this device is connected
- *		     to. Devices connected directly to Octeon
- *		     use zero. This is only used when the device
- *		     is full/low speed behind a high speed hub.
- *		     The address will be of the high speed hub,
- *		     not and full speed hubs after it.
- * @hub_port:	     Which port on the hub the device is
- *		     connected. Use zero for devices connected
- *		     directly to Octeon. Like hub_device_addr,
- *		     this is only used for full/low speed
- *		     devices behind a high speed hub.
- *
- * Returns: A non-NULL value is a pipe. NULL means an error.
- */
-static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb,
-						int device_addr,
-						int endpoint_num,
-						enum cvmx_usb_speed
-							device_speed,
-						int max_packet,
-						enum cvmx_usb_transfer
-							transfer_type,
-						enum cvmx_usb_direction
-							transfer_dir,
-						int interval, int multi_count,
-						int hub_device_addr,
-						int hub_port)
-{
-	struct cvmx_usb_pipe *pipe;
-
-	pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC);
-	if (!pipe)
-		return NULL;
-	if ((device_speed == CVMX_USB_SPEED_HIGH) &&
-	    (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
-	    (transfer_type == CVMX_USB_TRANSFER_BULK))
-		pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
-	pipe->device_addr = device_addr;
-	pipe->endpoint_num = endpoint_num;
-	pipe->device_speed = device_speed;
-	pipe->max_packet = max_packet;
-	pipe->transfer_type = transfer_type;
-	pipe->transfer_dir = transfer_dir;
-	INIT_LIST_HEAD(&pipe->transactions);
-
-	/*
-	 * All pipes use interval to rate limit NAK processing. Force an
-	 * interval if one wasn't supplied
-	 */
-	if (!interval)
-		interval = 1;
-	if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-		pipe->interval = interval * 8;
-		/* Force start splits to be schedule on uFrame 0 */
-		pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) +
-					pipe->interval;
-	} else {
-		pipe->interval = interval;
-		pipe->next_tx_frame = usb->frame_number + pipe->interval;
-	}
-	pipe->multi_count = multi_count;
-	pipe->hub_device_addr = hub_device_addr;
-	pipe->hub_port = hub_port;
-	pipe->pid_toggle = 0;
-	pipe->split_sc_frame = -1;
-	list_add_tail(&pipe->node, &usb->idle_pipes);
-
-	/*
-	 * We don't need to tell the hardware about this pipe yet since
-	 * it doesn't have any submitted requests
-	 */
-
-	return pipe;
-}
-
-/**
- * Poll the RX FIFOs and remove data as needed. This function is only used
- * in non DMA mode. It is very important that this function be called quickly
- * enough to prevent FIFO overflow.
- *
- * @usb:	USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb)
-{
-	union cvmx_usbcx_grxstsph rx_status;
-	int channel;
-	int bytes;
-	u64 address;
-	u32 *ptr;
-
-	rx_status.u32 = cvmx_usb_read_csr32(usb,
-					    CVMX_USBCX_GRXSTSPH(usb->index));
-	/* Only read data if IN data is there */
-	if (rx_status.s.pktsts != 2)
-		return;
-	/* Check if no data is available */
-	if (!rx_status.s.bcnt)
-		return;
-
-	channel = rx_status.s.chnum;
-	bytes = rx_status.s.bcnt;
-	if (!bytes)
-		return;
-
-	/* Get where the DMA engine would have written this data */
-	address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) +
-				     channel * 8);
-
-	ptr = cvmx_phys_to_ptr(address);
-	cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8,
-			    address + bytes);
-
-	/* Loop writing the FIFO data for this packet into memory */
-	while (bytes > 0) {
-		*ptr++ = cvmx_usb_read_csr32(usb,
-					USB_FIFO_ADDRESS(channel, usb->index));
-		bytes -= 4;
-	}
-	CVMX_SYNCW;
-}
-
-/**
- * Fill the TX hardware fifo with data out of the software
- * fifos
- *
- * @usb:	    USB device state populated by cvmx_usb_initialize().
- * @fifo:	    Software fifo to use
- * @available:	    Amount of space in the hardware fifo
- *
- * Returns: Non zero if the hardware fifo was too small and needs
- *	    to be serviced again.
- */
-static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb,
-			       struct cvmx_usb_tx_fifo *fifo, int available)
-{
-	/*
-	 * We're done either when there isn't anymore space or the software FIFO
-	 * is empty
-	 */
-	while (available && (fifo->head != fifo->tail)) {
-		int i = fifo->tail;
-		const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
-		u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
-						   usb->index) ^ 4;
-		int words = available;
-
-		/* Limit the amount of data to what the SW fifo has */
-		if (fifo->entry[i].size <= available) {
-			words = fifo->entry[i].size;
-			fifo->tail++;
-			if (fifo->tail > MAX_CHANNELS)
-				fifo->tail = 0;
-		}
-
-		/* Update the next locations and counts */
-		available -= words;
-		fifo->entry[i].address += words * 4;
-		fifo->entry[i].size -= words;
-
-		/*
-		 * Write the HW fifo data. The read every three writes is due
-		 * to an errata on CN3XXX chips
-		 */
-		while (words > 3) {
-			cvmx_write64_uint32(csr_address, *ptr++);
-			cvmx_write64_uint32(csr_address, *ptr++);
-			cvmx_write64_uint32(csr_address, *ptr++);
-			cvmx_read64_uint64(
-					CVMX_USBNX_DMA0_INB_CHN0(usb->index));
-			words -= 3;
-		}
-		cvmx_write64_uint32(csr_address, *ptr++);
-		if (--words) {
-			cvmx_write64_uint32(csr_address, *ptr++);
-			if (--words)
-				cvmx_write64_uint32(csr_address, *ptr++);
-		}
-		cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
-	}
-	return fifo->head != fifo->tail;
-}
-
-/**
- * Check the hardware FIFOs and fill them as needed
- *
- * @usb:	USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb)
-{
-	if (usb->periodic.head != usb->periodic.tail) {
-		union cvmx_usbcx_hptxsts tx_status;
-
-		tx_status.u32 = cvmx_usb_read_csr32(usb,
-					CVMX_USBCX_HPTXSTS(usb->index));
-		if (cvmx_usb_fill_tx_hw(usb, &usb->periodic,
-					tx_status.s.ptxfspcavail))
-			USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
-					cvmx_usbcx_gintmsk, ptxfempmsk, 1);
-		else
-			USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
-					cvmx_usbcx_gintmsk, ptxfempmsk, 0);
-	}
-
-	if (usb->nonperiodic.head != usb->nonperiodic.tail) {
-		union cvmx_usbcx_gnptxsts tx_status;
-
-		tx_status.u32 = cvmx_usb_read_csr32(usb,
-					CVMX_USBCX_GNPTXSTS(usb->index));
-		if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic,
-					tx_status.s.nptxfspcavail))
-			USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
-					cvmx_usbcx_gintmsk, nptxfempmsk, 1);
-		else
-			USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
-					cvmx_usbcx_gintmsk, nptxfempmsk, 0);
-	}
-}
-
-/**
- * Fill the TX FIFO with an outgoing packet
- *
- * @usb:	  USB device state populated by cvmx_usb_initialize().
- * @channel:	  Channel number to get packet from
- */
-static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel)
-{
-	union cvmx_usbcx_hccharx hcchar;
-	union cvmx_usbcx_hcspltx usbc_hcsplt;
-	union cvmx_usbcx_hctsizx usbc_hctsiz;
-	struct cvmx_usb_tx_fifo *fifo;
-
-	/* We only need to fill data on outbound channels */
-	hcchar.u32 = cvmx_usb_read_csr32(usb,
-			CVMX_USBCX_HCCHARX(channel, usb->index));
-	if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
-		return;
-
-	/* OUT Splits only have data on the start and not the complete */
-	usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb,
-				CVMX_USBCX_HCSPLTX(channel, usb->index));
-	if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
-		return;
-
-	/*
-	 * Find out how many bytes we need to fill and convert it into 32bit
-	 * words.
-	 */
-	usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
-				CVMX_USBCX_HCTSIZX(channel, usb->index));
-	if (!usbc_hctsiz.s.xfersize)
-		return;
-
-	if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
-	    (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
-		fifo = &usb->periodic;
-	else
-		fifo = &usb->nonperiodic;
-
-	fifo->entry[fifo->head].channel = channel;
-	fifo->entry[fifo->head].address =
-		cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
-				   channel * 8);
-	fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2;
-	fifo->head++;
-	if (fifo->head > MAX_CHANNELS)
-		fifo->head = 0;
-
-	cvmx_usb_poll_tx_fifo(usb);
-}
-
-/**
- * Perform channel specific setup for Control transactions. All
- * the generic stuff will already have been done in cvmx_usb_start_channel().
- *
- * @usb:	  USB device state populated by cvmx_usb_initialize().
- * @channel:	  Channel to setup
- * @pipe:	  Pipe for control transaction
- */
-static void cvmx_usb_start_channel_control(struct octeon_hcd *usb,
-					   int channel,
-					   struct cvmx_usb_pipe *pipe)
-{
-	struct usb_hcd *hcd = octeon_to_hcd(usb);
-	struct device *dev = hcd->self.controller;
-	struct cvmx_usb_transaction *transaction =
-		list_first_entry(&pipe->transactions, typeof(*transaction),
-				 node);
-	struct usb_ctrlrequest *header =
-		cvmx_phys_to_ptr(transaction->control_header);
-	int bytes_to_transfer = transaction->buffer_length -
-		transaction->actual_bytes;
-	int packets_to_transfer;
-	union cvmx_usbcx_hctsizx usbc_hctsiz;
-
-	usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
-				CVMX_USBCX_HCTSIZX(channel, usb->index));
-
-	switch (transaction->stage) {
-	case CVMX_USB_STAGE_NON_CONTROL:
-	case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
-		dev_err(dev, "%s: ERROR - Non control stage\n", __func__);
-		break;
-	case CVMX_USB_STAGE_SETUP:
-		usbc_hctsiz.s.pid = 3; /* Setup */
-		bytes_to_transfer = sizeof(*header);
-		/* All Control operations start with a setup going OUT */
-		USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
-				cvmx_usbcx_hccharx, epdir,
-				CVMX_USB_DIRECTION_OUT);
-		/*
-		 * Setup send the control header instead of the buffer data. The
-		 * buffer data will be used in the next stage
-		 */
-		cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
-					channel * 8,
-				    transaction->control_header);
-		break;
-	case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
-		usbc_hctsiz.s.pid = 3; /* Setup */
-		bytes_to_transfer = 0;
-		/* All Control operations start with a setup going OUT */
-		USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
-				cvmx_usbcx_hccharx, epdir,
-				CVMX_USB_DIRECTION_OUT);
-
-		USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
-				cvmx_usbcx_hcspltx, compsplt, 1);
-		break;
-	case CVMX_USB_STAGE_DATA:
-		usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
-		if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-			if (header->bRequestType & USB_DIR_IN)
-				bytes_to_transfer = 0;
-			else if (bytes_to_transfer > pipe->max_packet)
-				bytes_to_transfer = pipe->max_packet;
-		}
-		USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
-				cvmx_usbcx_hccharx, epdir,
-				((header->bRequestType & USB_DIR_IN) ?
-					CVMX_USB_DIRECTION_IN :
-					CVMX_USB_DIRECTION_OUT));
-		break;
-	case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
-		usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
-		if (!(header->bRequestType & USB_DIR_IN))
-			bytes_to_transfer = 0;
-		USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
-				cvmx_usbcx_hccharx, epdir,
-				((header->bRequestType & USB_DIR_IN) ?
-					CVMX_USB_DIRECTION_IN :
-					CVMX_USB_DIRECTION_OUT));
-		USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
-				cvmx_usbcx_hcspltx, compsplt, 1);
-		break;
-	case CVMX_USB_STAGE_STATUS:
-		usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
-		bytes_to_transfer = 0;
-		USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
-				cvmx_usbcx_hccharx, epdir,
-				((header->bRequestType & USB_DIR_IN) ?
-					CVMX_USB_DIRECTION_OUT :
-					CVMX_USB_DIRECTION_IN));
-		break;
-	case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
-		usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
-		bytes_to_transfer = 0;
-		USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
-				cvmx_usbcx_hccharx, epdir,
-				((header->bRequestType & USB_DIR_IN) ?
-					CVMX_USB_DIRECTION_OUT :
-					CVMX_USB_DIRECTION_IN));
-		USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
-				cvmx_usbcx_hcspltx, compsplt, 1);
-		break;
-	}
-
-	/*
-	 * Make sure the transfer never exceeds the byte limit of the hardware.
-	 * Further bytes will be sent as continued transactions
-	 */
-	if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
-		/* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
-		bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
-		bytes_to_transfer *= pipe->max_packet;
-	}
-
-	/*
-	 * Calculate the number of packets to transfer. If the length is zero
-	 * we still need to transfer one packet
-	 */
-	packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
-					   pipe->max_packet);
-	if (packets_to_transfer == 0) {
-		packets_to_transfer = 1;
-	} else if ((packets_to_transfer > 1) &&
-			(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
-		/*
-		 * Limit to one packet when not using DMA. Channels must be
-		 * restarted between every packet for IN transactions, so there
-		 * is no reason to do multiple packets in a row
-		 */
-		packets_to_transfer = 1;
-		bytes_to_transfer = packets_to_transfer * pipe->max_packet;
-	} else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
-		/*
-		 * Limit the number of packet and data transferred to what the
-		 * hardware can handle
-		 */
-		packets_to_transfer = MAX_TRANSFER_PACKETS;
-		bytes_to_transfer = packets_to_transfer * pipe->max_packet;
-	}
-
-	usbc_hctsiz.s.xfersize = bytes_to_transfer;
-	usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index),
-			     usbc_hctsiz.u32);
-}
-
-/**
- * Start a channel to perform the pipe's head transaction
- *
- * @usb:	  USB device state populated by cvmx_usb_initialize().
- * @channel:	  Channel to setup
- * @pipe:	  Pipe to start
- */
-static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
-				   struct cvmx_usb_pipe *pipe)
-{
-	struct cvmx_usb_transaction *transaction =
-		list_first_entry(&pipe->transactions, typeof(*transaction),
-				 node);
-
-	/* Make sure all writes to the DMA region get flushed */
-	CVMX_SYNCW;
-
-	/* Attach the channel to the pipe */
-	usb->pipe_for_channel[channel] = pipe;
-	pipe->channel = channel;
-	pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
-	/* Mark this channel as in use */
-	usb->idle_hardware_channels &= ~(1 << channel);
-
-	/* Enable the channel interrupt bits */
-	{
-		union cvmx_usbcx_hcintx usbc_hcint;
-		union cvmx_usbcx_hcintmskx usbc_hcintmsk;
-		union cvmx_usbcx_haintmsk usbc_haintmsk;
-
-		/* Clear all channel status bits */
-		usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
-					CVMX_USBCX_HCINTX(channel, usb->index));
-
-		cvmx_usb_write_csr32(usb,
-				     CVMX_USBCX_HCINTX(channel, usb->index),
-				     usbc_hcint.u32);
-
-		usbc_hcintmsk.u32 = 0;
-		usbc_hcintmsk.s.chhltdmsk = 1;
-		if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
-			/*
-			 * Channels need these extra interrupts when we aren't
-			 * in DMA mode.
-			 */
-			usbc_hcintmsk.s.datatglerrmsk = 1;
-			usbc_hcintmsk.s.frmovrunmsk = 1;
-			usbc_hcintmsk.s.bblerrmsk = 1;
-			usbc_hcintmsk.s.xacterrmsk = 1;
-			if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-				/*
-				 * Splits don't generate xfercompl, so we need
-				 * ACK and NYET.
-				 */
-				usbc_hcintmsk.s.nyetmsk = 1;
-				usbc_hcintmsk.s.ackmsk = 1;
-			}
-			usbc_hcintmsk.s.nakmsk = 1;
-			usbc_hcintmsk.s.stallmsk = 1;
-			usbc_hcintmsk.s.xfercomplmsk = 1;
-		}
-		cvmx_usb_write_csr32(usb,
-				     CVMX_USBCX_HCINTMSKX(channel, usb->index),
-				     usbc_hcintmsk.u32);
-
-		/* Enable the channel interrupt to propagate */
-		usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb,
-					CVMX_USBCX_HAINTMSK(usb->index));
-		usbc_haintmsk.s.haintmsk |= 1 << channel;
-		cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index),
-				     usbc_haintmsk.u32);
-	}
-
-	/* Setup the location the DMA engine uses. */
-	{
-		u64 reg;
-		u64 dma_address = transaction->buffer +
-				  transaction->actual_bytes;
-
-		if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
-			dma_address = transaction->buffer +
-					transaction->iso_packets[0].offset +
-					transaction->actual_bytes;
-
-		if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)
-			reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index);
-		else
-			reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index);
-		cvmx_write64_uint64(reg + channel * 8, dma_address);
-	}
-
-	/* Setup both the size of the transfer and the SPLIT characteristics */
-	{
-		union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
-		union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
-		int packets_to_transfer;
-		int bytes_to_transfer = transaction->buffer_length -
-			transaction->actual_bytes;
-
-		/*
-		 * ISOCHRONOUS transactions store each individual transfer size
-		 * in the packet structure, not the global buffer_length
-		 */
-		if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
-			bytes_to_transfer =
-				transaction->iso_packets[0].length -
-				transaction->actual_bytes;
-
-		/*
-		 * We need to do split transactions when we are talking to non
-		 * high speed devices that are behind a high speed hub
-		 */
-		if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-			/*
-			 * On the start split phase (stage is even) record the
-			 * frame number we will need to send the split complete.
-			 * We only store the lower two bits since the time ahead
-			 * can only be two frames
-			 */
-			if ((transaction->stage & 1) == 0) {
-				if (transaction->type == CVMX_USB_TRANSFER_BULK)
-					pipe->split_sc_frame =
-						(usb->frame_number + 1) & 0x7f;
-				else
-					pipe->split_sc_frame =
-						(usb->frame_number + 2) & 0x7f;
-			} else {
-				pipe->split_sc_frame = -1;
-			}
-
-			usbc_hcsplt.s.spltena = 1;
-			usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
-			usbc_hcsplt.s.prtaddr = pipe->hub_port;
-			usbc_hcsplt.s.compsplt = (transaction->stage ==
-				CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
-
-			/*
-			 * SPLIT transactions can only ever transmit one data
-			 * packet so limit the transfer size to the max packet
-			 * size
-			 */
-			if (bytes_to_transfer > pipe->max_packet)
-				bytes_to_transfer = pipe->max_packet;
-
-			/*
-			 * ISOCHRONOUS OUT splits are unique in that they limit
-			 * data transfers to 188 byte chunks representing the
-			 * begin/middle/end of the data or all
-			 */
-			if (!usbc_hcsplt.s.compsplt &&
-			    (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
-			    (pipe->transfer_type ==
-			     CVMX_USB_TRANSFER_ISOCHRONOUS)) {
-				/*
-				 * Clear the split complete frame number as
-				 * there isn't going to be a split complete
-				 */
-				pipe->split_sc_frame = -1;
-				/*
-				 * See if we've started this transfer and sent
-				 * data
-				 */
-				if (transaction->actual_bytes == 0) {
-					/*
-					 * Nothing sent yet, this is either a
-					 * begin or the entire payload
-					 */
-					if (bytes_to_transfer <= 188)
-						/* Entire payload in one go */
-						usbc_hcsplt.s.xactpos = 3;
-					else
-						/* First part of payload */
-						usbc_hcsplt.s.xactpos = 2;
-				} else {
-					/*
-					 * Continuing the previous data, we must
-					 * either be in the middle or at the end
-					 */
-					if (bytes_to_transfer <= 188)
-						/* End of payload */
-						usbc_hcsplt.s.xactpos = 1;
-					else
-						/* Middle of payload */
-						usbc_hcsplt.s.xactpos = 0;
-				}
-				/*
-				 * Again, the transfer size is limited to 188
-				 * bytes
-				 */
-				if (bytes_to_transfer > 188)
-					bytes_to_transfer = 188;
-			}
-		}
-
-		/*
-		 * Make sure the transfer never exceeds the byte limit of the
-		 * hardware. Further bytes will be sent as continued
-		 * transactions
-		 */
-		if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
-			/*
-			 * Round MAX_TRANSFER_BYTES to a multiple of out packet
-			 * size
-			 */
-			bytes_to_transfer = MAX_TRANSFER_BYTES /
-				pipe->max_packet;
-			bytes_to_transfer *= pipe->max_packet;
-		}
-
-		/*
-		 * Calculate the number of packets to transfer. If the length is
-		 * zero we still need to transfer one packet
-		 */
-		packets_to_transfer =
-			DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
-		if (packets_to_transfer == 0) {
-			packets_to_transfer = 1;
-		} else if ((packets_to_transfer > 1) &&
-			   (usb->init_flags &
-			    CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
-			/*
-			 * Limit to one packet when not using DMA. Channels must
-			 * be restarted between every packet for IN
-			 * transactions, so there is no reason to do multiple
-			 * packets in a row
-			 */
-			packets_to_transfer = 1;
-			bytes_to_transfer = packets_to_transfer *
-				pipe->max_packet;
-		} else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
-			/*
-			 * Limit the number of packet and data transferred to
-			 * what the hardware can handle
-			 */
-			packets_to_transfer = MAX_TRANSFER_PACKETS;
-			bytes_to_transfer = packets_to_transfer *
-				pipe->max_packet;
-		}
-
-		usbc_hctsiz.s.xfersize = bytes_to_transfer;
-		usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
-		/* Update the DATA0/DATA1 toggle */
-		usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
-		/*
-		 * High speed pipes may need a hardware ping before they start
-		 */
-		if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING)
-			usbc_hctsiz.s.dopng = 1;
-
-		cvmx_usb_write_csr32(usb,
-				     CVMX_USBCX_HCSPLTX(channel, usb->index),
-				     usbc_hcsplt.u32);
-		cvmx_usb_write_csr32(usb,
-				     CVMX_USBCX_HCTSIZX(channel, usb->index),
-				     usbc_hctsiz.u32);
-	}
-
-	/* Setup the Host Channel Characteristics Register */
-	{
-		union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
-
-		/*
-		 * Set the startframe odd/even properly. This is only used for
-		 * periodic
-		 */
-		usbc_hcchar.s.oddfrm = usb->frame_number & 1;
-
-		/*
-		 * Set the number of back to back packets allowed by this
-		 * endpoint. Split transactions interpret "ec" as the number of
-		 * immediate retries of failure. These retries happen too
-		 * quickly, so we disable these entirely for splits
-		 */
-		if (cvmx_usb_pipe_needs_split(usb, pipe))
-			usbc_hcchar.s.ec = 1;
-		else if (pipe->multi_count < 1)
-			usbc_hcchar.s.ec = 1;
-		else if (pipe->multi_count > 3)
-			usbc_hcchar.s.ec = 3;
-		else
-			usbc_hcchar.s.ec = pipe->multi_count;
-
-		/* Set the rest of the endpoint specific settings */
-		usbc_hcchar.s.devaddr = pipe->device_addr;
-		usbc_hcchar.s.eptype = transaction->type;
-		usbc_hcchar.s.lspddev =
-			(pipe->device_speed == CVMX_USB_SPEED_LOW);
-		usbc_hcchar.s.epdir = pipe->transfer_dir;
-		usbc_hcchar.s.epnum = pipe->endpoint_num;
-		usbc_hcchar.s.mps = pipe->max_packet;
-		cvmx_usb_write_csr32(usb,
-				     CVMX_USBCX_HCCHARX(channel, usb->index),
-				     usbc_hcchar.u32);
-	}
-
-	/* Do transaction type specific fixups as needed */
-	switch (transaction->type) {
-	case CVMX_USB_TRANSFER_CONTROL:
-		cvmx_usb_start_channel_control(usb, channel, pipe);
-		break;
-	case CVMX_USB_TRANSFER_BULK:
-	case CVMX_USB_TRANSFER_INTERRUPT:
-		break;
-	case CVMX_USB_TRANSFER_ISOCHRONOUS:
-		if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
-			/*
-			 * ISO transactions require different PIDs depending on
-			 * direction and how many packets are needed
-			 */
-			if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
-				if (pipe->multi_count < 2) /* Need DATA0 */
-					USB_SET_FIELD32(
-						CVMX_USBCX_HCTSIZX(channel,
-								   usb->index),
-						cvmx_usbcx_hctsizx, pid, 0);
-				else /* Need MDATA */
-					USB_SET_FIELD32(
-						CVMX_USBCX_HCTSIZX(channel,
-								   usb->index),
-						cvmx_usbcx_hctsizx, pid, 3);
-			}
-		}
-		break;
-	}
-	{
-		union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 =
-			cvmx_usb_read_csr32(usb,
-					    CVMX_USBCX_HCTSIZX(channel,
-							       usb->index))
-		};
-		transaction->xfersize = usbc_hctsiz.s.xfersize;
-		transaction->pktcnt = usbc_hctsiz.s.pktcnt;
-	}
-	/* Remember when we start a split transaction */
-	if (cvmx_usb_pipe_needs_split(usb, pipe))
-		usb->active_split = transaction;
-	USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
-			cvmx_usbcx_hccharx, chena, 1);
-	if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
-		cvmx_usb_fill_tx_fifo(usb, channel);
-}
-
-/**
- * Find a pipe that is ready to be scheduled to hardware.
- * @usb:	 USB device state populated by cvmx_usb_initialize().
- * @xfer_type:	 Transfer type
- *
- * Returns: Pipe or NULL if none are ready
- */
-static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb,
-		enum cvmx_usb_transfer xfer_type)
-{
-	struct list_head *list = usb->active_pipes + xfer_type;
-	u64 current_frame = usb->frame_number;
-	struct cvmx_usb_pipe *pipe;
-
-	list_for_each_entry(pipe, list, node) {
-		struct cvmx_usb_transaction *t =
-			list_first_entry(&pipe->transactions, typeof(*t),
-					 node);
-		if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
-		    (pipe->next_tx_frame <= current_frame) &&
-		    ((pipe->split_sc_frame == -1) ||
-		     ((((int)current_frame - pipe->split_sc_frame) & 0x7f) <
-		      0x40)) &&
-		    (!usb->active_split || (usb->active_split == t))) {
-			prefetch(t);
-			return pipe;
-		}
-	}
-	return NULL;
-}
-
-static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb,
-						int is_sof)
-{
-	struct cvmx_usb_pipe *pipe;
-
-	/* Find a pipe needing service. */
-	if (is_sof) {
-		/*
-		 * Only process periodic pipes on SOF interrupts. This way we
-		 * are sure that the periodic data is sent in the beginning of
-		 * the frame.
-		 */
-		pipe = cvmx_usb_find_ready_pipe(usb,
-						CVMX_USB_TRANSFER_ISOCHRONOUS);
-		if (pipe)
-			return pipe;
-		pipe = cvmx_usb_find_ready_pipe(usb,
-						CVMX_USB_TRANSFER_INTERRUPT);
-		if (pipe)
-			return pipe;
-	}
-	pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL);
-	if (pipe)
-		return pipe;
-	return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK);
-}
-
-/**
- * Called whenever a pipe might need to be scheduled to the
- * hardware.
- *
- * @usb:	 USB device state populated by cvmx_usb_initialize().
- * @is_sof:	 True if this schedule was called on a SOF interrupt.
- */
-static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof)
-{
-	int channel;
-	struct cvmx_usb_pipe *pipe;
-	int need_sof;
-	enum cvmx_usb_transfer ttype;
-
-	if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
-		/*
-		 * Without DMA we need to be careful to not schedule something
-		 * at the end of a frame and cause an overrun.
-		 */
-		union cvmx_usbcx_hfnum hfnum = {
-			.u32 = cvmx_usb_read_csr32(usb,
-						CVMX_USBCX_HFNUM(usb->index))
-		};
-
-		union cvmx_usbcx_hfir hfir = {
-			.u32 = cvmx_usb_read_csr32(usb,
-						CVMX_USBCX_HFIR(usb->index))
-		};
-
-		if (hfnum.s.frrem < hfir.s.frint / 4)
-			goto done;
-	}
-
-	while (usb->idle_hardware_channels) {
-		/* Find an idle channel */
-		channel = __fls(usb->idle_hardware_channels);
-		if (unlikely(channel > 7))
-			break;
-
-		pipe = cvmx_usb_next_pipe(usb, is_sof);
-		if (!pipe)
-			break;
-
-		cvmx_usb_start_channel(usb, channel, pipe);
-	}
-
-done:
-	/*
-	 * Only enable SOF interrupts when we have transactions pending in the
-	 * future that might need to be scheduled
-	 */
-	need_sof = 0;
-	for (ttype = CVMX_USB_TRANSFER_CONTROL;
-	     ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
-		list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
-			if (pipe->next_tx_frame > usb->frame_number) {
-				need_sof = 1;
-				break;
-			}
-		}
-	}
-	USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
-			cvmx_usbcx_gintmsk, sofmsk, need_sof);
-}
-
-static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb,
-					     enum cvmx_usb_status status,
-					     struct cvmx_usb_pipe *pipe,
-					     struct cvmx_usb_transaction
-						*transaction,
-					     int bytes_transferred,
-					     struct urb *urb)
-{
-	struct usb_hcd *hcd = octeon_to_hcd(usb);
-	struct device *dev = hcd->self.controller;
-
-	if (likely(status == CVMX_USB_STATUS_OK))
-		urb->actual_length = bytes_transferred;
-	else
-		urb->actual_length = 0;
-
-	urb->hcpriv = NULL;
-
-	/* For Isochronous transactions we need to update the URB packet status
-	 * list from data in our private copy
-	 */
-	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
-		int i;
-		/*
-		 * The pointer to the private list is stored in the setup_packet
-		 * field.
-		 */
-		struct cvmx_usb_iso_packet *iso_packet =
-			(struct cvmx_usb_iso_packet *)urb->setup_packet;
-		/* Recalculate the transfer size by adding up each packet */
-		urb->actual_length = 0;
-		for (i = 0; i < urb->number_of_packets; i++) {
-			if (iso_packet[i].status == CVMX_USB_STATUS_OK) {
-				urb->iso_frame_desc[i].status = 0;
-				urb->iso_frame_desc[i].actual_length =
-					iso_packet[i].length;
-				urb->actual_length +=
-					urb->iso_frame_desc[i].actual_length;
-			} else {
-				dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
-					i, urb->number_of_packets,
-					iso_packet[i].status, pipe,
-					transaction, iso_packet[i].length);
-				urb->iso_frame_desc[i].status = -EREMOTEIO;
-			}
-		}
-		/* Free the private list now that we don't need it anymore */
-		kfree(iso_packet);
-		urb->setup_packet = NULL;
-	}
-
-	switch (status) {
-	case CVMX_USB_STATUS_OK:
-		urb->status = 0;
-		break;
-	case CVMX_USB_STATUS_CANCEL:
-		if (urb->status == 0)
-			urb->status = -ENOENT;
-		break;
-	case CVMX_USB_STATUS_STALL:
-		dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
-			pipe, transaction, bytes_transferred);
-		urb->status = -EPIPE;
-		break;
-	case CVMX_USB_STATUS_BABBLEERR:
-		dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
-			pipe, transaction, bytes_transferred);
-		urb->status = -EPIPE;
-		break;
-	case CVMX_USB_STATUS_SHORT:
-		dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
-			pipe, transaction, bytes_transferred);
-		urb->status = -EREMOTEIO;
-		break;
-	case CVMX_USB_STATUS_ERROR:
-	case CVMX_USB_STATUS_XACTERR:
-	case CVMX_USB_STATUS_DATATGLERR:
-	case CVMX_USB_STATUS_FRAMEERR:
-		dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
-			status, pipe, transaction, bytes_transferred);
-		urb->status = -EPROTO;
-		break;
-	}
-	usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb);
-	spin_unlock(&usb->lock);
-	usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status);
-	spin_lock(&usb->lock);
-}
-
-/**
- * Signal the completion of a transaction and free it. The
- * transaction will be removed from the pipe transaction list.
- *
- * @usb:	 USB device state populated by cvmx_usb_initialize().
- * @pipe:	 Pipe the transaction is on
- * @transaction:
- *		 Transaction that completed
- * @complete_code:
- *		 Completion code
- */
-static void cvmx_usb_complete(struct octeon_hcd *usb,
-			      struct cvmx_usb_pipe *pipe,
-			      struct cvmx_usb_transaction *transaction,
-			      enum cvmx_usb_status complete_code)
-{
-	/* If this was a split then clear our split in progress marker */
-	if (usb->active_split == transaction)
-		usb->active_split = NULL;
-
-	/*
-	 * Isochronous transactions need extra processing as they might not be
-	 * done after a single data transfer
-	 */
-	if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
-		/* Update the number of bytes transferred in this ISO packet */
-		transaction->iso_packets[0].length = transaction->actual_bytes;
-		transaction->iso_packets[0].status = complete_code;
-
-		/*
-		 * If there are more ISOs pending and we succeeded, schedule the
-		 * next one
-		 */
-		if ((transaction->iso_number_packets > 1) &&
-		    (complete_code == CVMX_USB_STATUS_OK)) {
-			/* No bytes transferred for this packet as of yet */
-			transaction->actual_bytes = 0;
-			/* One less ISO waiting to transfer */
-			transaction->iso_number_packets--;
-			/* Increment to the next location in our packet array */
-			transaction->iso_packets++;
-			transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-			return;
-		}
-	}
-
-	/* Remove the transaction from the pipe list */
-	list_del(&transaction->node);
-	if (list_empty(&pipe->transactions))
-		list_move_tail(&pipe->node, &usb->idle_pipes);
-	octeon_usb_urb_complete_callback(usb, complete_code, pipe,
-					 transaction,
-					 transaction->actual_bytes,
-					 transaction->urb);
-	kfree(transaction);
-}
-
-/**
- * Submit a usb transaction to a pipe. Called for all types
- * of transactions.
- *
- * @usb:
- * @pipe:	    Which pipe to submit to.
- * @type:	    Transaction type
- * @buffer:	    User buffer for the transaction
- * @buffer_length:
- *		    User buffer's length in bytes
- * @control_header:
- *		    For control transactions, the 8 byte standard header
- * @iso_start_frame:
- *		    For ISO transactions, the start frame
- * @iso_number_packets:
- *		    For ISO, the number of packet in the transaction.
- * @iso_packets:
- *		    A description of each ISO packet
- * @urb:	    URB for the callback
- *
- * Returns: Transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
-				struct octeon_hcd *usb,
-				struct cvmx_usb_pipe *pipe,
-				enum cvmx_usb_transfer type,
-				u64 buffer,
-				int buffer_length,
-				u64 control_header,
-				int iso_start_frame,
-				int iso_number_packets,
-				struct cvmx_usb_iso_packet *iso_packets,
-				struct urb *urb)
-{
-	struct cvmx_usb_transaction *transaction;
-
-	if (unlikely(pipe->transfer_type != type))
-		return NULL;
-
-	transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC);
-	if (unlikely(!transaction))
-		return NULL;
-
-	transaction->type = type;
-	transaction->buffer = buffer;
-	transaction->buffer_length = buffer_length;
-	transaction->control_header = control_header;
-	/* FIXME: This is not used, implement it. */
-	transaction->iso_start_frame = iso_start_frame;
-	transaction->iso_number_packets = iso_number_packets;
-	transaction->iso_packets = iso_packets;
-	transaction->urb = urb;
-	if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
-		transaction->stage = CVMX_USB_STAGE_SETUP;
-	else
-		transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-
-	if (!list_empty(&pipe->transactions)) {
-		list_add_tail(&transaction->node, &pipe->transactions);
-	} else {
-		list_add_tail(&transaction->node, &pipe->transactions);
-		list_move_tail(&pipe->node,
-			       &usb->active_pipes[pipe->transfer_type]);
-
-		/*
-		 * We may need to schedule the pipe if this was the head of the
-		 * pipe.
-		 */
-		cvmx_usb_schedule(usb, 0);
-	}
-
-	return transaction;
-}
-
-/**
- * Call to submit a USB Bulk transfer to a pipe.
- *
- * @usb:	    USB device state populated by cvmx_usb_initialize().
- * @pipe:	    Handle to the pipe for the transfer.
- * @urb:	    URB.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
-						struct octeon_hcd *usb,
-						struct cvmx_usb_pipe *pipe,
-						struct urb *urb)
-{
-	return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
-					   urb->transfer_dma,
-					   urb->transfer_buffer_length,
-					   0, /* control_header */
-					   0, /* iso_start_frame */
-					   0, /* iso_number_packets */
-					   NULL, /* iso_packets */
-					   urb);
-}
-
-/**
- * Call to submit a USB Interrupt transfer to a pipe.
- *
- * @usb:	    USB device state populated by cvmx_usb_initialize().
- * @pipe:	    Handle to the pipe for the transfer.
- * @urb:	    URB returned when the callback is called.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
-						struct octeon_hcd *usb,
-						struct cvmx_usb_pipe *pipe,
-						struct urb *urb)
-{
-	return cvmx_usb_submit_transaction(usb, pipe,
-					   CVMX_USB_TRANSFER_INTERRUPT,
-					   urb->transfer_dma,
-					   urb->transfer_buffer_length,
-					   0, /* control_header */
-					   0, /* iso_start_frame */
-					   0, /* iso_number_packets */
-					   NULL, /* iso_packets */
-					   urb);
-}
-
-/**
- * Call to submit a USB Control transfer to a pipe.
- *
- * @usb:	    USB device state populated by cvmx_usb_initialize().
- * @pipe:	    Handle to the pipe for the transfer.
- * @urb:	    URB.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_control(
-						struct octeon_hcd *usb,
-						struct cvmx_usb_pipe *pipe,
-						struct urb *urb)
-{
-	int buffer_length = urb->transfer_buffer_length;
-	u64 control_header = urb->setup_dma;
-	struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header);
-
-	if ((header->bRequestType & USB_DIR_IN) == 0)
-		buffer_length = le16_to_cpu(header->wLength);
-
-	return cvmx_usb_submit_transaction(usb, pipe,
-					   CVMX_USB_TRANSFER_CONTROL,
-					   urb->transfer_dma, buffer_length,
-					   control_header,
-					   0, /* iso_start_frame */
-					   0, /* iso_number_packets */
-					   NULL, /* iso_packets */
-					   urb);
-}
-
-/**
- * Call to submit a USB Isochronous transfer to a pipe.
- *
- * @usb:	    USB device state populated by cvmx_usb_initialize().
- * @pipe:	    Handle to the pipe for the transfer.
- * @urb:	    URB returned when the callback is called.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
-						struct octeon_hcd *usb,
-						struct cvmx_usb_pipe *pipe,
-						struct urb *urb)
-{
-	struct cvmx_usb_iso_packet *packets;
-
-	packets = (struct cvmx_usb_iso_packet *)urb->setup_packet;
-	return cvmx_usb_submit_transaction(usb, pipe,
-					   CVMX_USB_TRANSFER_ISOCHRONOUS,
-					   urb->transfer_dma,
-					   urb->transfer_buffer_length,
-					   0, /* control_header */
-					   urb->start_frame,
-					   urb->number_of_packets,
-					   packets, urb);
-}
-
-/**
- * Cancel one outstanding request in a pipe. Canceling a request
- * can fail if the transaction has already completed before cancel
- * is called. Even after a successful cancel call, it may take
- * a frame or two for the cvmx_usb_poll() function to call the
- * associated callback.
- *
- * @usb:	 USB device state populated by cvmx_usb_initialize().
- * @pipe:	 Pipe to cancel requests in.
- * @transaction: Transaction to cancel, returned by the submit function.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_cancel(struct octeon_hcd *usb,
-			   struct cvmx_usb_pipe *pipe,
-			   struct cvmx_usb_transaction *transaction)
-{
-	/*
-	 * If the transaction is the HEAD of the queue and scheduled. We need to
-	 * treat it special
-	 */
-	if (list_first_entry(&pipe->transactions, typeof(*transaction), node) ==
-	    transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
-		union cvmx_usbcx_hccharx usbc_hcchar;
-
-		usb->pipe_for_channel[pipe->channel] = NULL;
-		pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
-		CVMX_SYNCW;
-
-		usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
-				CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
-		/*
-		 * If the channel isn't enabled then the transaction already
-		 * completed.
-		 */
-		if (usbc_hcchar.s.chena) {
-			usbc_hcchar.s.chdis = 1;
-			cvmx_usb_write_csr32(usb,
-					     CVMX_USBCX_HCCHARX(pipe->channel,
-								usb->index),
-					     usbc_hcchar.u32);
-		}
-	}
-	cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL);
-	return 0;
-}
-
-/**
- * Cancel all outstanding requests in a pipe. Logically all this
- * does is call cvmx_usb_cancel() in a loop.
- *
- * @usb:	 USB device state populated by cvmx_usb_initialize().
- * @pipe:	 Pipe to cancel requests in.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_cancel_all(struct octeon_hcd *usb,
-			       struct cvmx_usb_pipe *pipe)
-{
-	struct cvmx_usb_transaction *transaction, *next;
-
-	/* Simply loop through and attempt to cancel each transaction */
-	list_for_each_entry_safe(transaction, next, &pipe->transactions, node) {
-		int result = cvmx_usb_cancel(usb, pipe, transaction);
-
-		if (unlikely(result != 0))
-			return result;
-	}
-	return 0;
-}
-
-/**
- * Close a pipe created with cvmx_usb_open_pipe().
- *
- * @usb:	 USB device state populated by cvmx_usb_initialize().
- * @pipe:	 Pipe to close.
- *
- * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
- *	    outstanding transfers.
- */
-static int cvmx_usb_close_pipe(struct octeon_hcd *usb,
-			       struct cvmx_usb_pipe *pipe)
-{
-	/* Fail if the pipe has pending transactions */
-	if (!list_empty(&pipe->transactions))
-		return -EBUSY;
-
-	list_del(&pipe->node);
-	kfree(pipe);
-
-	return 0;
-}
-
-/**
- * Get the current USB protocol level frame number. The frame
- * number is always in the range of 0-0x7ff.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: USB frame number
- */
-static int cvmx_usb_get_frame_number(struct octeon_hcd *usb)
-{
-	union cvmx_usbcx_hfnum usbc_hfnum;
-
-	usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
-
-	return usbc_hfnum.s.frnum;
-}
-
-static void cvmx_usb_transfer_control(struct octeon_hcd *usb,
-				      struct cvmx_usb_pipe *pipe,
-				      struct cvmx_usb_transaction *transaction,
-				      union cvmx_usbcx_hccharx usbc_hcchar,
-				      int buffer_space_left,
-				      int bytes_in_last_packet)
-{
-	switch (transaction->stage) {
-	case CVMX_USB_STAGE_NON_CONTROL:
-	case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
-		/* This should be impossible */
-		cvmx_usb_complete(usb, pipe, transaction,
-				  CVMX_USB_STATUS_ERROR);
-		break;
-	case CVMX_USB_STAGE_SETUP:
-		pipe->pid_toggle = 1;
-		if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-			transaction->stage =
-				CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
-		} else {
-			struct usb_ctrlrequest *header =
-				cvmx_phys_to_ptr(transaction->control_header);
-			if (header->wLength)
-				transaction->stage = CVMX_USB_STAGE_DATA;
-			else
-				transaction->stage = CVMX_USB_STAGE_STATUS;
-		}
-		break;
-	case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
-		{
-			struct usb_ctrlrequest *header =
-				cvmx_phys_to_ptr(transaction->control_header);
-			if (header->wLength)
-				transaction->stage = CVMX_USB_STAGE_DATA;
-			else
-				transaction->stage = CVMX_USB_STAGE_STATUS;
-		}
-		break;
-	case CVMX_USB_STAGE_DATA:
-		if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-			transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
-			/*
-			 * For setup OUT data that are splits,
-			 * the hardware doesn't appear to count
-			 * transferred data. Here we manually
-			 * update the data transferred
-			 */
-			if (!usbc_hcchar.s.epdir) {
-				if (buffer_space_left < pipe->max_packet)
-					transaction->actual_bytes +=
-						buffer_space_left;
-				else
-					transaction->actual_bytes +=
-						pipe->max_packet;
-			}
-		} else if ((buffer_space_left == 0) ||
-			   (bytes_in_last_packet < pipe->max_packet)) {
-			pipe->pid_toggle = 1;
-			transaction->stage = CVMX_USB_STAGE_STATUS;
-		}
-		break;
-	case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
-		if ((buffer_space_left == 0) ||
-		    (bytes_in_last_packet < pipe->max_packet)) {
-			pipe->pid_toggle = 1;
-			transaction->stage = CVMX_USB_STAGE_STATUS;
-		} else {
-			transaction->stage = CVMX_USB_STAGE_DATA;
-		}
-		break;
-	case CVMX_USB_STAGE_STATUS:
-		if (cvmx_usb_pipe_needs_split(usb, pipe))
-			transaction->stage =
-				CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
-		else
-			cvmx_usb_complete(usb, pipe, transaction,
-					  CVMX_USB_STATUS_OK);
-		break;
-	case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
-		cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
-		break;
-	}
-}
-
-static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb,
-				   struct cvmx_usb_pipe *pipe,
-				   struct cvmx_usb_transaction *transaction,
-				   union cvmx_usbcx_hcintx usbc_hcint,
-				   int buffer_space_left,
-				   int bytes_in_last_packet)
-{
-	/*
-	 * The only time a bulk transfer isn't complete when it finishes with
-	 * an ACK is during a split transaction. For splits we need to continue
-	 * the transfer if more data is needed.
-	 */
-	if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-		if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
-			transaction->stage =
-				CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
-		else if (buffer_space_left &&
-			 (bytes_in_last_packet == pipe->max_packet))
-			transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-		else
-			cvmx_usb_complete(usb, pipe, transaction,
-					  CVMX_USB_STATUS_OK);
-	} else {
-		if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
-		    (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
-		    (usbc_hcint.s.nak))
-			pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
-		if (!buffer_space_left ||
-		    (bytes_in_last_packet < pipe->max_packet))
-			cvmx_usb_complete(usb, pipe, transaction,
-					  CVMX_USB_STATUS_OK);
-	}
-}
-
-static void cvmx_usb_transfer_intr(struct octeon_hcd *usb,
-				   struct cvmx_usb_pipe *pipe,
-				   struct cvmx_usb_transaction *transaction,
-				   int buffer_space_left,
-				   int bytes_in_last_packet)
-{
-	if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-		if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) {
-			transaction->stage =
-				CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
-		} else if (buffer_space_left &&
-			   (bytes_in_last_packet == pipe->max_packet)) {
-			transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-		} else {
-			pipe->next_tx_frame += pipe->interval;
-			cvmx_usb_complete(usb, pipe, transaction,
-					  CVMX_USB_STATUS_OK);
-		}
-	} else if (!buffer_space_left ||
-		   (bytes_in_last_packet < pipe->max_packet)) {
-		pipe->next_tx_frame += pipe->interval;
-		cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
-	}
-}
-
-static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb,
-				   struct cvmx_usb_pipe *pipe,
-				   struct cvmx_usb_transaction *transaction,
-				   int buffer_space_left,
-				   int bytes_in_last_packet,
-				   int bytes_this_transfer)
-{
-	if (cvmx_usb_pipe_needs_split(usb, pipe)) {
-		/*
-		 * ISOCHRONOUS OUT splits don't require a complete split stage.
-		 * Instead they use a sequence of begin OUT splits to transfer
-		 * the data 188 bytes at a time. Once the transfer is complete,
-		 * the pipe sleeps until the next schedule interval.
-		 */
-		if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
-			/*
-			 * If no space left or this wasn't a max size packet
-			 * then this transfer is complete. Otherwise start it
-			 * again to send the next 188 bytes
-			 */
-			if (!buffer_space_left || (bytes_this_transfer < 188)) {
-				pipe->next_tx_frame += pipe->interval;
-				cvmx_usb_complete(usb, pipe, transaction,
-						  CVMX_USB_STATUS_OK);
-			}
-			return;
-		}
-		if (transaction->stage ==
-		    CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
-			/*
-			 * We are in the incoming data phase. Keep getting data
-			 * until we run out of space or get a small packet
-			 */
-			if ((buffer_space_left == 0) ||
-			    (bytes_in_last_packet < pipe->max_packet)) {
-				pipe->next_tx_frame += pipe->interval;
-				cvmx_usb_complete(usb, pipe, transaction,
-						  CVMX_USB_STATUS_OK);
-			}
-		} else {
-			transaction->stage =
-				CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
-		}
-	} else {
-		pipe->next_tx_frame += pipe->interval;
-		cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
-	}
-}
-
-/**
- * Poll a channel for status
- *
- * @usb:     USB device
- * @channel: Channel to poll
- *
- * Returns: Zero on success
- */
-static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel)
-{
-	struct usb_hcd *hcd = octeon_to_hcd(usb);
-	struct device *dev = hcd->self.controller;
-	union cvmx_usbcx_hcintx usbc_hcint;
-	union cvmx_usbcx_hctsizx usbc_hctsiz;
-	union cvmx_usbcx_hccharx usbc_hcchar;
-	struct cvmx_usb_pipe *pipe;
-	struct cvmx_usb_transaction *transaction;
-	int bytes_this_transfer;
-	int bytes_in_last_packet;
-	int packets_processed;
-	int buffer_space_left;
-
-	/* Read the interrupt status bits for the channel */
-	usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
-				CVMX_USBCX_HCINTX(channel, usb->index));
-
-	if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
-		usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
-				CVMX_USBCX_HCCHARX(channel, usb->index));
-
-		if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
-			/*
-			 * There seems to be a bug in CN31XX which can cause
-			 * interrupt IN transfers to get stuck until we do a
-			 * write of HCCHARX without changing things
-			 */
-			cvmx_usb_write_csr32(usb,
-					     CVMX_USBCX_HCCHARX(channel,
-								usb->index),
-					     usbc_hcchar.u32);
-			return 0;
-		}
-
-		/*
-		 * In non DMA mode the channels don't halt themselves. We need
-		 * to manually disable channels that are left running
-		 */
-		if (!usbc_hcint.s.chhltd) {
-			if (usbc_hcchar.s.chena) {
-				union cvmx_usbcx_hcintmskx hcintmsk;
-				/* Disable all interrupts except CHHLTD */
-				hcintmsk.u32 = 0;
-				hcintmsk.s.chhltdmsk = 1;
-				cvmx_usb_write_csr32(usb,
-						     CVMX_USBCX_HCINTMSKX(channel, usb->index),
-						     hcintmsk.u32);
-				usbc_hcchar.s.chdis = 1;
-				cvmx_usb_write_csr32(usb,
-						     CVMX_USBCX_HCCHARX(channel, usb->index),
-						     usbc_hcchar.u32);
-				return 0;
-			} else if (usbc_hcint.s.xfercompl) {
-				/*
-				 * Successful IN/OUT with transfer complete.
-				 * Channel halt isn't needed.
-				 */
-			} else {
-				dev_err(dev, "USB%d: Channel %d interrupt without halt\n",
-					usb->index, channel);
-				return 0;
-			}
-		}
-	} else {
-		/*
-		 * There is are no interrupts that we need to process when the
-		 * channel is still running
-		 */
-		if (!usbc_hcint.s.chhltd)
-			return 0;
-	}
-
-	/* Disable the channel interrupts now that it is done */
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
-	usb->idle_hardware_channels |= (1 << channel);
-
-	/* Make sure this channel is tied to a valid pipe */
-	pipe = usb->pipe_for_channel[channel];
-	prefetch(pipe);
-	if (!pipe)
-		return 0;
-	transaction = list_first_entry(&pipe->transactions,
-				       typeof(*transaction),
-				       node);
-	prefetch(transaction);
-
-	/*
-	 * Disconnect this pipe from the HW channel. Later the schedule
-	 * function will figure out which pipe needs to go
-	 */
-	usb->pipe_for_channel[channel] = NULL;
-	pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
-	/*
-	 * Read the channel config info so we can figure out how much data
-	 * transferred
-	 */
-	usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
-			CVMX_USBCX_HCCHARX(channel, usb->index));
-	usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
-			CVMX_USBCX_HCTSIZX(channel, usb->index));
-
-	/*
-	 * Calculating the number of bytes successfully transferred is dependent
-	 * on the transfer direction
-	 */
-	packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
-	if (usbc_hcchar.s.epdir) {
-		/*
-		 * IN transactions are easy. For every byte received the
-		 * hardware decrements xfersize. All we need to do is subtract
-		 * the current value of xfersize from its starting value and we
-		 * know how many bytes were written to the buffer
-		 */
-		bytes_this_transfer = transaction->xfersize -
-			usbc_hctsiz.s.xfersize;
-	} else {
-		/*
-		 * OUT transaction don't decrement xfersize. Instead pktcnt is
-		 * decremented on every successful packet send. The hardware
-		 * does this when it receives an ACK, or NYET. If it doesn't
-		 * receive one of these responses pktcnt doesn't change
-		 */
-		bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
-		/*
-		 * The last packet may not be a full transfer if we didn't have
-		 * enough data
-		 */
-		if (bytes_this_transfer > transaction->xfersize)
-			bytes_this_transfer = transaction->xfersize;
-	}
-	/* Figure out how many bytes were in the last packet of the transfer */
-	if (packets_processed)
-		bytes_in_last_packet = bytes_this_transfer -
-			(packets_processed - 1) * usbc_hcchar.s.mps;
-	else
-		bytes_in_last_packet = bytes_this_transfer;
-
-	/*
-	 * As a special case, setup transactions output the setup header, not
-	 * the user's data. For this reason we don't count setup data as bytes
-	 * transferred
-	 */
-	if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
-	    (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
-		bytes_this_transfer = 0;
-
-	/*
-	 * Add the bytes transferred to the running total. It is important that
-	 * bytes_this_transfer doesn't count any data that needs to be
-	 * retransmitted
-	 */
-	transaction->actual_bytes += bytes_this_transfer;
-	if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
-		buffer_space_left = transaction->iso_packets[0].length -
-			transaction->actual_bytes;
-	else
-		buffer_space_left = transaction->buffer_length -
-			transaction->actual_bytes;
-
-	/*
-	 * We need to remember the PID toggle state for the next transaction.
-	 * The hardware already updated it for the next transaction
-	 */
-	pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
-
-	/*
-	 * For high speed bulk out, assume the next transaction will need to do
-	 * a ping before proceeding. If this isn't true the ACK processing below
-	 * will clear this flag
-	 */
-	if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
-	    (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
-	    (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
-		pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
-
-	if (WARN_ON_ONCE(bytes_this_transfer < 0)) {
-		/*
-		 * In some rare cases the DMA engine seems to get stuck and
-		 * keeps substracting same byte count over and over again. In
-		 * such case we just need to fail every transaction.
-		 */
-		cvmx_usb_complete(usb, pipe, transaction,
-				  CVMX_USB_STATUS_ERROR);
-		return 0;
-	}
-
-	if (usbc_hcint.s.stall) {
-		/*
-		 * STALL as a response means this transaction cannot be
-		 * completed because the device can't process transactions. Tell
-		 * the user. Any data that was transferred will be counted on
-		 * the actual bytes transferred
-		 */
-		pipe->pid_toggle = 0;
-		cvmx_usb_complete(usb, pipe, transaction,
-				  CVMX_USB_STATUS_STALL);
-	} else if (usbc_hcint.s.xacterr) {
-		/*
-		 * XactErr as a response means the device signaled
-		 * something wrong with the transfer. For example, PID
-		 * toggle errors cause these.
-		 */
-		cvmx_usb_complete(usb, pipe, transaction,
-				  CVMX_USB_STATUS_XACTERR);
-	} else if (usbc_hcint.s.bblerr) {
-		/* Babble Error (BblErr) */
-		cvmx_usb_complete(usb, pipe, transaction,
-				  CVMX_USB_STATUS_BABBLEERR);
-	} else if (usbc_hcint.s.datatglerr) {
-		/* Data toggle error */
-		cvmx_usb_complete(usb, pipe, transaction,
-				  CVMX_USB_STATUS_DATATGLERR);
-	} else if (usbc_hcint.s.nyet) {
-		/*
-		 * NYET as a response is only allowed in three cases: as a
-		 * response to a ping, as a response to a split transaction, and
-		 * as a response to a bulk out. The ping case is handled by
-		 * hardware, so we only have splits and bulk out
-		 */
-		if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
-			transaction->retries = 0;
-			/*
-			 * If there is more data to go then we need to try
-			 * again. Otherwise this transaction is complete
-			 */
-			if ((buffer_space_left == 0) ||
-			    (bytes_in_last_packet < pipe->max_packet))
-				cvmx_usb_complete(usb, pipe,
-						  transaction,
-						  CVMX_USB_STATUS_OK);
-		} else {
-			/*
-			 * Split transactions retry the split complete 4 times
-			 * then rewind to the start split and do the entire
-			 * transactions again
-			 */
-			transaction->retries++;
-			if ((transaction->retries & 0x3) == 0) {
-				/*
-				 * Rewind to the beginning of the transaction by
-				 * anding off the split complete bit
-				 */
-				transaction->stage &= ~1;
-				pipe->split_sc_frame = -1;
-			}
-		}
-	} else if (usbc_hcint.s.ack) {
-		transaction->retries = 0;
-		/*
-		 * The ACK bit can only be checked after the other error bits.
-		 * This is because a multi packet transfer may succeed in a
-		 * number of packets and then get a different response on the
-		 * last packet. In this case both ACK and the last response bit
-		 * will be set. If none of the other response bits is set, then
-		 * the last packet must have been an ACK
-		 *
-		 * Since we got an ACK, we know we don't need to do a ping on
-		 * this pipe
-		 */
-		pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING;
-
-		switch (transaction->type) {
-		case CVMX_USB_TRANSFER_CONTROL:
-			cvmx_usb_transfer_control(usb, pipe, transaction,
-						  usbc_hcchar,
-						  buffer_space_left,
-						  bytes_in_last_packet);
-			break;
-		case CVMX_USB_TRANSFER_BULK:
-			cvmx_usb_transfer_bulk(usb, pipe, transaction,
-					       usbc_hcint, buffer_space_left,
-					       bytes_in_last_packet);
-			break;
-		case CVMX_USB_TRANSFER_INTERRUPT:
-			cvmx_usb_transfer_intr(usb, pipe, transaction,
-					       buffer_space_left,
-					       bytes_in_last_packet);
-			break;
-		case CVMX_USB_TRANSFER_ISOCHRONOUS:
-			cvmx_usb_transfer_isoc(usb, pipe, transaction,
-					       buffer_space_left,
-					       bytes_in_last_packet,
-					       bytes_this_transfer);
-			break;
-		}
-	} else if (usbc_hcint.s.nak) {
-		/*
-		 * If this was a split then clear our split in progress marker.
-		 */
-		if (usb->active_split == transaction)
-			usb->active_split = NULL;
-		/*
-		 * NAK as a response means the device couldn't accept the
-		 * transaction, but it should be retried in the future. Rewind
-		 * to the beginning of the transaction by anding off the split
-		 * complete bit. Retry in the next interval
-		 */
-		transaction->retries = 0;
-		transaction->stage &= ~1;
-		pipe->next_tx_frame += pipe->interval;
-		if (pipe->next_tx_frame < usb->frame_number)
-			pipe->next_tx_frame = usb->frame_number +
-				pipe->interval -
-				(usb->frame_number - pipe->next_tx_frame) %
-				pipe->interval;
-	} else {
-		struct cvmx_usb_port_status port;
-
-		port = cvmx_usb_get_status(usb);
-		if (port.port_enabled) {
-			/* We'll retry the exact same transaction again */
-			transaction->retries++;
-		} else {
-			/*
-			 * We get channel halted interrupts with no result bits
-			 * sets when the cable is unplugged
-			 */
-			cvmx_usb_complete(usb, pipe, transaction,
-					  CVMX_USB_STATUS_ERROR);
-		}
-	}
-	return 0;
-}
-
-static void octeon_usb_port_callback(struct octeon_hcd *usb)
-{
-	spin_unlock(&usb->lock);
-	usb_hcd_poll_rh_status(octeon_to_hcd(usb));
-	spin_lock(&usb->lock);
-}
-
-/**
- * Poll the USB block for status and call all needed callback
- * handlers. This function is meant to be called in the interrupt
- * handler for the USB controller. It can also be called
- * periodically in a loop for non-interrupt based operation.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_poll(struct octeon_hcd *usb)
-{
-	union cvmx_usbcx_hfnum usbc_hfnum;
-	union cvmx_usbcx_gintsts usbc_gintsts;
-
-	prefetch_range(usb, sizeof(*usb));
-
-	/* Update the frame counter */
-	usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
-	if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum)
-		usb->frame_number += 0x4000;
-	usb->frame_number &= ~0x3fffull;
-	usb->frame_number |= usbc_hfnum.s.frnum;
-
-	/* Read the pending interrupts */
-	usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
-					       CVMX_USBCX_GINTSTS(usb->index));
-
-	/* Clear the interrupts now that we know about them */
-	cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
-			     usbc_gintsts.u32);
-
-	if (usbc_gintsts.s.rxflvl) {
-		/*
-		 * RxFIFO Non-Empty (RxFLvl)
-		 * Indicates that there is at least one packet pending to be
-		 * read from the RxFIFO.
-		 *
-		 * In DMA mode this is handled by hardware
-		 */
-		if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
-			cvmx_usb_poll_rx_fifo(usb);
-	}
-	if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
-		/* Fill the Tx FIFOs when not in DMA mode */
-		if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
-			cvmx_usb_poll_tx_fifo(usb);
-	}
-	if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
-		union cvmx_usbcx_hprt usbc_hprt;
-		/*
-		 * Disconnect Detected Interrupt (DisconnInt)
-		 * Asserted when a device disconnect is detected.
-		 *
-		 * Host Port Interrupt (PrtInt)
-		 * The core sets this bit to indicate a change in port status of
-		 * one of the O2P USB core ports in Host mode. The application
-		 * must read the Host Port Control and Status (HPRT) register to
-		 * determine the exact event that caused this interrupt. The
-		 * application must clear the appropriate status bit in the Host
-		 * Port Control and Status register to clear this bit.
-		 *
-		 * Call the user's port callback
-		 */
-		octeon_usb_port_callback(usb);
-		/* Clear the port change bits */
-		usbc_hprt.u32 =
-			cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
-		usbc_hprt.s.prtena = 0;
-		cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
-				     usbc_hprt.u32);
-	}
-	if (usbc_gintsts.s.hchint) {
-		/*
-		 * Host Channels Interrupt (HChInt)
-		 * The core sets this bit to indicate that an interrupt is
-		 * pending on one of the channels of the core (in Host mode).
-		 * The application must read the Host All Channels Interrupt
-		 * (HAINT) register to determine the exact number of the channel
-		 * on which the interrupt occurred, and then read the
-		 * corresponding Host Channel-n Interrupt (HCINTn) register to
-		 * determine the exact cause of the interrupt. The application
-		 * must clear the appropriate status bit in the HCINTn register
-		 * to clear this bit.
-		 */
-		union cvmx_usbcx_haint usbc_haint;
-
-		usbc_haint.u32 = cvmx_usb_read_csr32(usb,
-					CVMX_USBCX_HAINT(usb->index));
-		while (usbc_haint.u32) {
-			int channel;
-
-			channel = __fls(usbc_haint.u32);
-			cvmx_usb_poll_channel(usb, channel);
-			usbc_haint.u32 ^= 1 << channel;
-		}
-	}
-
-	cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
-
-	return 0;
-}
-
-/* convert between an HCD pointer and the corresponding struct octeon_hcd */
-static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
-{
-	return (struct octeon_hcd *)(hcd->hcd_priv);
-}
-
-static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
-{
-	struct octeon_hcd *usb = hcd_to_octeon(hcd);
-	unsigned long flags;
-
-	spin_lock_irqsave(&usb->lock, flags);
-	cvmx_usb_poll(usb);
-	spin_unlock_irqrestore(&usb->lock, flags);
-	return IRQ_HANDLED;
-}
-
-static int octeon_usb_start(struct usb_hcd *hcd)
-{
-	hcd->state = HC_STATE_RUNNING;
-	return 0;
-}
-
-static void octeon_usb_stop(struct usb_hcd *hcd)
-{
-	hcd->state = HC_STATE_HALT;
-}
-
-static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
-{
-	struct octeon_hcd *usb = hcd_to_octeon(hcd);
-
-	return cvmx_usb_get_frame_number(usb);
-}
-
-static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
-				  struct urb *urb,
-				  gfp_t mem_flags)
-{
-	struct octeon_hcd *usb = hcd_to_octeon(hcd);
-	struct device *dev = hcd->self.controller;
-	struct cvmx_usb_transaction *transaction = NULL;
-	struct cvmx_usb_pipe *pipe;
-	unsigned long flags;
-	struct cvmx_usb_iso_packet *iso_packet;
-	struct usb_host_endpoint *ep = urb->ep;
-	int rc;
-
-	urb->status = 0;
-	spin_lock_irqsave(&usb->lock, flags);
-
-	rc = usb_hcd_link_urb_to_ep(hcd, urb);
-	if (rc) {
-		spin_unlock_irqrestore(&usb->lock, flags);
-		return rc;
-	}
-
-	if (!ep->hcpriv) {
-		enum cvmx_usb_transfer transfer_type;
-		enum cvmx_usb_speed speed;
-		int split_device = 0;
-		int split_port = 0;
-
-		switch (usb_pipetype(urb->pipe)) {
-		case PIPE_ISOCHRONOUS:
-			transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS;
-			break;
-		case PIPE_INTERRUPT:
-			transfer_type = CVMX_USB_TRANSFER_INTERRUPT;
-			break;
-		case PIPE_CONTROL:
-			transfer_type = CVMX_USB_TRANSFER_CONTROL;
-			break;
-		default:
-			transfer_type = CVMX_USB_TRANSFER_BULK;
-			break;
-		}
-		switch (urb->dev->speed) {
-		case USB_SPEED_LOW:
-			speed = CVMX_USB_SPEED_LOW;
-			break;
-		case USB_SPEED_FULL:
-			speed = CVMX_USB_SPEED_FULL;
-			break;
-		default:
-			speed = CVMX_USB_SPEED_HIGH;
-			break;
-		}
-		/*
-		 * For slow devices on high speed ports we need to find the hub
-		 * that does the speed translation so we know where to send the
-		 * split transactions.
-		 */
-		if (speed != CVMX_USB_SPEED_HIGH) {
-			/*
-			 * Start at this device and work our way up the usb
-			 * tree.
-			 */
-			struct usb_device *dev = urb->dev;
-
-			while (dev->parent) {
-				/*
-				 * If our parent is high speed then he'll
-				 * receive the splits.
-				 */
-				if (dev->parent->speed == USB_SPEED_HIGH) {
-					split_device = dev->parent->devnum;
-					split_port = dev->portnum;
-					break;
-				}
-				/*
-				 * Move up the tree one level. If we make it all
-				 * the way up the tree, then the port must not
-				 * be in high speed mode and we don't need a
-				 * split.
-				 */
-				dev = dev->parent;
-			}
-		}
-		pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe),
-					  usb_pipeendpoint(urb->pipe), speed,
-					  le16_to_cpu(ep->desc.wMaxPacketSize)
-					  & 0x7ff,
-					  transfer_type,
-					  usb_pipein(urb->pipe) ?
-						CVMX_USB_DIRECTION_IN :
-						CVMX_USB_DIRECTION_OUT,
-					  urb->interval,
-					  (le16_to_cpu(ep->desc.wMaxPacketSize)
-					   >> 11) & 0x3,
-					  split_device, split_port);
-		if (!pipe) {
-			usb_hcd_unlink_urb_from_ep(hcd, urb);
-			spin_unlock_irqrestore(&usb->lock, flags);
-			dev_dbg(dev, "Failed to create pipe\n");
-			return -ENOMEM;
-		}
-		ep->hcpriv = pipe;
-	} else {
-		pipe = ep->hcpriv;
-	}
-
-	switch (usb_pipetype(urb->pipe)) {
-	case PIPE_ISOCHRONOUS:
-		dev_dbg(dev, "Submit isochronous to %d.%d\n",
-			usb_pipedevice(urb->pipe),
-			usb_pipeendpoint(urb->pipe));
-		/*
-		 * Allocate a structure to use for our private list of
-		 * isochronous packets.
-		 */
-		iso_packet = kmalloc_array(urb->number_of_packets,
-					   sizeof(struct cvmx_usb_iso_packet),
-					   GFP_ATOMIC);
-		if (iso_packet) {
-			int i;
-			/* Fill the list with the data from the URB */
-			for (i = 0; i < urb->number_of_packets; i++) {
-				iso_packet[i].offset =
-					urb->iso_frame_desc[i].offset;
-				iso_packet[i].length =
-					urb->iso_frame_desc[i].length;
-				iso_packet[i].status = CVMX_USB_STATUS_ERROR;
-			}
-			/*
-			 * Store a pointer to the list in the URB setup_packet
-			 * field. We know this currently isn't being used and
-			 * this saves us a bunch of logic.
-			 */
-			urb->setup_packet = (char *)iso_packet;
-			transaction = cvmx_usb_submit_isochronous(usb,
-								  pipe, urb);
-			/*
-			 * If submit failed we need to free our private packet
-			 * list.
-			 */
-			if (!transaction) {
-				urb->setup_packet = NULL;
-				kfree(iso_packet);
-			}
-		}
-		break;
-	case PIPE_INTERRUPT:
-		dev_dbg(dev, "Submit interrupt to %d.%d\n",
-			usb_pipedevice(urb->pipe),
-			usb_pipeendpoint(urb->pipe));
-		transaction = cvmx_usb_submit_interrupt(usb, pipe, urb);
-		break;
-	case PIPE_CONTROL:
-		dev_dbg(dev, "Submit control to %d.%d\n",
-			usb_pipedevice(urb->pipe),
-			usb_pipeendpoint(urb->pipe));
-		transaction = cvmx_usb_submit_control(usb, pipe, urb);
-		break;
-	case PIPE_BULK:
-		dev_dbg(dev, "Submit bulk to %d.%d\n",
-			usb_pipedevice(urb->pipe),
-			usb_pipeendpoint(urb->pipe));
-		transaction = cvmx_usb_submit_bulk(usb, pipe, urb);
-		break;
-	}
-	if (!transaction) {
-		usb_hcd_unlink_urb_from_ep(hcd, urb);
-		spin_unlock_irqrestore(&usb->lock, flags);
-		dev_dbg(dev, "Failed to submit\n");
-		return -ENOMEM;
-	}
-	urb->hcpriv = transaction;
-	spin_unlock_irqrestore(&usb->lock, flags);
-	return 0;
-}
-
-static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
-				  struct urb *urb,
-				  int status)
-{
-	struct octeon_hcd *usb = hcd_to_octeon(hcd);
-	unsigned long flags;
-	int rc;
-
-	if (!urb->dev)
-		return -EINVAL;
-
-	spin_lock_irqsave(&usb->lock, flags);
-
-	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
-	if (rc)
-		goto out;
-
-	urb->status = status;
-	cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv);
-
-out:
-	spin_unlock_irqrestore(&usb->lock, flags);
-
-	return rc;
-}
-
-static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
-					struct usb_host_endpoint *ep)
-{
-	struct device *dev = hcd->self.controller;
-
-	if (ep->hcpriv) {
-		struct octeon_hcd *usb = hcd_to_octeon(hcd);
-		struct cvmx_usb_pipe *pipe = ep->hcpriv;
-		unsigned long flags;
-
-		spin_lock_irqsave(&usb->lock, flags);
-		cvmx_usb_cancel_all(usb, pipe);
-		if (cvmx_usb_close_pipe(usb, pipe))
-			dev_dbg(dev, "Closing pipe %p failed\n", pipe);
-		spin_unlock_irqrestore(&usb->lock, flags);
-		ep->hcpriv = NULL;
-	}
-}
-
-static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
-{
-	struct octeon_hcd *usb = hcd_to_octeon(hcd);
-	struct cvmx_usb_port_status port_status;
-	unsigned long flags;
-
-	spin_lock_irqsave(&usb->lock, flags);
-	port_status = cvmx_usb_get_status(usb);
-	spin_unlock_irqrestore(&usb->lock, flags);
-	buf[0] = port_status.connect_change << 1;
-
-	return buf[0] != 0;
-}
-
-static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
-				  u16 wIndex, char *buf, u16 wLength)
-{
-	struct octeon_hcd *usb = hcd_to_octeon(hcd);
-	struct device *dev = hcd->self.controller;
-	struct cvmx_usb_port_status usb_port_status;
-	int port_status;
-	struct usb_hub_descriptor *desc;
-	unsigned long flags;
-
-	switch (typeReq) {
-	case ClearHubFeature:
-		dev_dbg(dev, "ClearHubFeature\n");
-		switch (wValue) {
-		case C_HUB_LOCAL_POWER:
-		case C_HUB_OVER_CURRENT:
-			/* Nothing required here */
-			break;
-		default:
-			return -EINVAL;
-		}
-		break;
-	case ClearPortFeature:
-		dev_dbg(dev, "ClearPortFeature\n");
-		if (wIndex != 1) {
-			dev_dbg(dev, " INVALID\n");
-			return -EINVAL;
-		}
-
-		switch (wValue) {
-		case USB_PORT_FEAT_ENABLE:
-			dev_dbg(dev, " ENABLE\n");
-			spin_lock_irqsave(&usb->lock, flags);
-			cvmx_usb_disable(usb);
-			spin_unlock_irqrestore(&usb->lock, flags);
-			break;
-		case USB_PORT_FEAT_SUSPEND:
-			dev_dbg(dev, " SUSPEND\n");
-			/* Not supported on Octeon */
-			break;
-		case USB_PORT_FEAT_POWER:
-			dev_dbg(dev, " POWER\n");
-			/* Not supported on Octeon */
-			break;
-		case USB_PORT_FEAT_INDICATOR:
-			dev_dbg(dev, " INDICATOR\n");
-			/* Port inidicator not supported */
-			break;
-		case USB_PORT_FEAT_C_CONNECTION:
-			dev_dbg(dev, " C_CONNECTION\n");
-			/* Clears drivers internal connect status change flag */
-			spin_lock_irqsave(&usb->lock, flags);
-			usb->port_status = cvmx_usb_get_status(usb);
-			spin_unlock_irqrestore(&usb->lock, flags);
-			break;
-		case USB_PORT_FEAT_C_RESET:
-			dev_dbg(dev, " C_RESET\n");
-			/*
-			 * Clears the driver's internal Port Reset Change flag.
-			 */
-			spin_lock_irqsave(&usb->lock, flags);
-			usb->port_status = cvmx_usb_get_status(usb);
-			spin_unlock_irqrestore(&usb->lock, flags);
-			break;
-		case USB_PORT_FEAT_C_ENABLE:
-			dev_dbg(dev, " C_ENABLE\n");
-			/*
-			 * Clears the driver's internal Port Enable/Disable
-			 * Change flag.
-			 */
-			spin_lock_irqsave(&usb->lock, flags);
-			usb->port_status = cvmx_usb_get_status(usb);
-			spin_unlock_irqrestore(&usb->lock, flags);
-			break;
-		case USB_PORT_FEAT_C_SUSPEND:
-			dev_dbg(dev, " C_SUSPEND\n");
-			/*
-			 * Clears the driver's internal Port Suspend Change
-			 * flag, which is set when resume signaling on the host
-			 * port is complete.
-			 */
-			break;
-		case USB_PORT_FEAT_C_OVER_CURRENT:
-			dev_dbg(dev, " C_OVER_CURRENT\n");
-			/* Clears the driver's overcurrent Change flag */
-			spin_lock_irqsave(&usb->lock, flags);
-			usb->port_status = cvmx_usb_get_status(usb);
-			spin_unlock_irqrestore(&usb->lock, flags);
-			break;
-		default:
-			dev_dbg(dev, " UNKNOWN\n");
-			return -EINVAL;
-		}
-		break;
-	case GetHubDescriptor:
-		dev_dbg(dev, "GetHubDescriptor\n");
-		desc = (struct usb_hub_descriptor *)buf;
-		desc->bDescLength = 9;
-		desc->bDescriptorType = 0x29;
-		desc->bNbrPorts = 1;
-		desc->wHubCharacteristics = cpu_to_le16(0x08);
-		desc->bPwrOn2PwrGood = 1;
-		desc->bHubContrCurrent = 0;
-		desc->u.hs.DeviceRemovable[0] = 0;
-		desc->u.hs.DeviceRemovable[1] = 0xff;
-		break;
-	case GetHubStatus:
-		dev_dbg(dev, "GetHubStatus\n");
-		*(__le32 *)buf = 0;
-		break;
-	case GetPortStatus:
-		dev_dbg(dev, "GetPortStatus\n");
-		if (wIndex != 1) {
-			dev_dbg(dev, " INVALID\n");
-			return -EINVAL;
-		}
-
-		spin_lock_irqsave(&usb->lock, flags);
-		usb_port_status = cvmx_usb_get_status(usb);
-		spin_unlock_irqrestore(&usb->lock, flags);
-		port_status = 0;
-
-		if (usb_port_status.connect_change) {
-			port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
-			dev_dbg(dev, " C_CONNECTION\n");
-		}
-
-		if (usb_port_status.port_enabled) {
-			port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
-			dev_dbg(dev, " C_ENABLE\n");
-		}
-
-		if (usb_port_status.connected) {
-			port_status |= (1 << USB_PORT_FEAT_CONNECTION);
-			dev_dbg(dev, " CONNECTION\n");
-		}
-
-		if (usb_port_status.port_enabled) {
-			port_status |= (1 << USB_PORT_FEAT_ENABLE);
-			dev_dbg(dev, " ENABLE\n");
-		}
-
-		if (usb_port_status.port_over_current) {
-			port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
-			dev_dbg(dev, " OVER_CURRENT\n");
-		}
-
-		if (usb_port_status.port_powered) {
-			port_status |= (1 << USB_PORT_FEAT_POWER);
-			dev_dbg(dev, " POWER\n");
-		}
-
-		if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) {
-			port_status |= USB_PORT_STAT_HIGH_SPEED;
-			dev_dbg(dev, " HIGHSPEED\n");
-		} else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) {
-			port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
-			dev_dbg(dev, " LOWSPEED\n");
-		}
-
-		*((__le32 *)buf) = cpu_to_le32(port_status);
-		break;
-	case SetHubFeature:
-		dev_dbg(dev, "SetHubFeature\n");
-		/* No HUB features supported */
-		break;
-	case SetPortFeature:
-		dev_dbg(dev, "SetPortFeature\n");
-		if (wIndex != 1) {
-			dev_dbg(dev, " INVALID\n");
-			return -EINVAL;
-		}
-
-		switch (wValue) {
-		case USB_PORT_FEAT_SUSPEND:
-			dev_dbg(dev, " SUSPEND\n");
-			return -EINVAL;
-		case USB_PORT_FEAT_POWER:
-			dev_dbg(dev, " POWER\n");
-			/*
-			 * Program the port power bit to drive VBUS on the USB.
-			 */
-			spin_lock_irqsave(&usb->lock, flags);
-			USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
-					cvmx_usbcx_hprt, prtpwr, 1);
-			spin_unlock_irqrestore(&usb->lock, flags);
-			return 0;
-		case USB_PORT_FEAT_RESET:
-			dev_dbg(dev, " RESET\n");
-			spin_lock_irqsave(&usb->lock, flags);
-			cvmx_usb_reset_port(usb);
-			spin_unlock_irqrestore(&usb->lock, flags);
-			return 0;
-		case USB_PORT_FEAT_INDICATOR:
-			dev_dbg(dev, " INDICATOR\n");
-			/* Not supported */
-			break;
-		default:
-			dev_dbg(dev, " UNKNOWN\n");
-			return -EINVAL;
-		}
-		break;
-	default:
-		dev_dbg(dev, "Unknown root hub request\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static const struct hc_driver octeon_hc_driver = {
-	.description		= "Octeon USB",
-	.product_desc		= "Octeon Host Controller",
-	.hcd_priv_size		= sizeof(struct octeon_hcd),
-	.irq			= octeon_usb_irq,
-	.flags			= HCD_MEMORY | HCD_DMA | HCD_USB2,
-	.start			= octeon_usb_start,
-	.stop			= octeon_usb_stop,
-	.urb_enqueue		= octeon_usb_urb_enqueue,
-	.urb_dequeue		= octeon_usb_urb_dequeue,
-	.endpoint_disable	= octeon_usb_endpoint_disable,
-	.get_frame_number	= octeon_usb_get_frame_number,
-	.hub_status_data	= octeon_usb_hub_status_data,
-	.hub_control		= octeon_usb_hub_control,
-	.map_urb_for_dma	= octeon_map_urb_for_dma,
-	.unmap_urb_for_dma	= octeon_unmap_urb_for_dma,
-};
-
-static int octeon_usb_probe(struct platform_device *pdev)
-{
-	int status;
-	int initialize_flags;
-	int usb_num;
-	struct resource *res_mem;
-	struct device_node *usbn_node;
-	int irq = platform_get_irq(pdev, 0);
-	struct device *dev = &pdev->dev;
-	struct octeon_hcd *usb;
-	struct usb_hcd *hcd;
-	u32 clock_rate = 48000000;
-	bool is_crystal_clock = false;
-	const char *clock_type;
-	int i;
-
-	if (!dev->of_node) {
-		dev_err(dev, "Error: empty of_node\n");
-		return -ENXIO;
-	}
-	usbn_node = dev->of_node->parent;
-
-	i = of_property_read_u32(usbn_node,
-				 "clock-frequency", &clock_rate);
-	if (i)
-		i = of_property_read_u32(usbn_node,
-					 "refclk-frequency", &clock_rate);
-	if (i) {
-		dev_err(dev, "No USBN \"clock-frequency\"\n");
-		return -ENXIO;
-	}
-	switch (clock_rate) {
-	case 12000000:
-		initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
-		break;
-	case 24000000:
-		initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
-		break;
-	case 48000000:
-		initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
-		break;
-	default:
-		dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n",
-			clock_rate);
-		return -ENXIO;
-	}
-
-	i = of_property_read_string(usbn_node,
-				    "cavium,refclk-type", &clock_type);
-	if (i)
-		i = of_property_read_string(usbn_node,
-					    "refclk-type", &clock_type);
-
-	if (!i && strcmp("crystal", clock_type) == 0)
-		is_crystal_clock = true;
-
-	if (is_crystal_clock)
-		initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
-	else
-		initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
-	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res_mem) {
-		dev_err(dev, "found no memory resource\n");
-		return -ENXIO;
-	}
-	usb_num = (res_mem->start >> 44) & 1;
-
-	if (irq < 0) {
-		/* Defective device tree, but we know how to fix it. */
-		irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
-
-		irq = irq_create_mapping(NULL, hwirq);
-	}
-
-	/*
-	 * Set the DMA mask to 64bits so we get buffers already translated for
-	 * DMA.
-	 */
-	i = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
-	if (i)
-		return i;
-
-	/*
-	 * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
-	 * IOB priority registers.  Under heavy network load USB
-	 * hardware can be starved by the IOB causing a crash.  Give
-	 * it a priority boost if it has been waiting more than 400
-	 * cycles to avoid this situation.
-	 *
-	 * Testing indicates that a cnt_val of 8192 is not sufficient,
-	 * but no failures are seen with 4096.  We choose a value of
-	 * 400 to give a safety factor of 10.
-	 */
-	if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
-		union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
-		pri_cnt.u64 = 0;
-		pri_cnt.s.cnt_enb = 1;
-		pri_cnt.s.cnt_val = 400;
-		cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
-	}
-
-	hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
-	if (!hcd) {
-		dev_dbg(dev, "Failed to allocate memory for HCD\n");
-		return -1;
-	}
-	hcd->uses_new_polling = 1;
-	usb = (struct octeon_hcd *)hcd->hcd_priv;
-
-	spin_lock_init(&usb->lock);
-
-	usb->init_flags = initialize_flags;
-
-	/* Initialize the USB state structure */
-	usb->index = usb_num;
-	INIT_LIST_HEAD(&usb->idle_pipes);
-	for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++)
-		INIT_LIST_HEAD(&usb->active_pipes[i]);
-
-	/* Due to an errata, CN31XX doesn't support DMA */
-	if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
-		usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
-		/* Only use one channel with non DMA */
-		usb->idle_hardware_channels = 0x1;
-	} else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
-		/* CN5XXX have an errata with channel 3 */
-		usb->idle_hardware_channels = 0xf7;
-	} else {
-		usb->idle_hardware_channels = 0xff;
-	}
-
-	status = cvmx_usb_initialize(dev, usb);
-	if (status) {
-		dev_dbg(dev, "USB initialization failed with %d\n", status);
-		usb_put_hcd(hcd);
-		return -1;
-	}
-
-	status = usb_add_hcd(hcd, irq, 0);
-	if (status) {
-		dev_dbg(dev, "USB add HCD failed with %d\n", status);
-		usb_put_hcd(hcd);
-		return -1;
-	}
-	device_wakeup_enable(hcd->self.controller);
-
-	dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
-
-	return 0;
-}
-
-static int octeon_usb_remove(struct platform_device *pdev)
-{
-	int status;
-	struct device *dev = &pdev->dev;
-	struct usb_hcd *hcd = dev_get_drvdata(dev);
-	struct octeon_hcd *usb = hcd_to_octeon(hcd);
-	unsigned long flags;
-
-	usb_remove_hcd(hcd);
-	spin_lock_irqsave(&usb->lock, flags);
-	status = cvmx_usb_shutdown(usb);
-	spin_unlock_irqrestore(&usb->lock, flags);
-	if (status)
-		dev_dbg(dev, "USB shutdown failed with %d\n", status);
-
-	usb_put_hcd(hcd);
-
-	return 0;
-}
-
-static const struct of_device_id octeon_usb_match[] = {
-	{
-		.compatible = "cavium,octeon-5750-usbc",
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(of, octeon_usb_match);
-
-static struct platform_driver octeon_usb_driver = {
-	.driver = {
-		.name		= "octeon-hcd",
-		.of_match_table = octeon_usb_match,
-	},
-	.probe      = octeon_usb_probe,
-	.remove     = octeon_usb_remove,
-};
-
-static int __init octeon_usb_driver_init(void)
-{
-	if (usb_disabled())
-		return 0;
-
-	return platform_driver_register(&octeon_usb_driver);
-}
-module_init(octeon_usb_driver_init);
-
-static void __exit octeon_usb_driver_exit(void)
-{
-	if (usb_disabled())
-		return;
-
-	platform_driver_unregister(&octeon_usb_driver);
-}
-module_exit(octeon_usb_driver_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
-MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
deleted file mode 100644
index 9ed619c..0000000
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ /dev/null
@@ -1,1847 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Octeon HCD hardware register definitions.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Some parts of the code were originally released under BSD license:
- *
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *   * Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *
- *   * Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.
- *
- *   * Neither the name of Cavium Networks nor the names of
- *     its contributors may be used to endorse or promote products
- *     derived from this software without specific prior written
- *     permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- */
-
-#ifndef __OCTEON_HCD_H__
-#define __OCTEON_HCD_H__
-
-#include <asm/bitfield.h>
-
-#define CVMX_USBCXBASE 0x00016F0010000000ull
-#define CVMX_USBCXREG1(reg, bid) \
-	(CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
-	 ((bid) & 1) * 0x100000000000ull)
-#define CVMX_USBCXREG2(reg, bid, off) \
-	(CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
-	 (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32)
-
-#define CVMX_USBCX_GAHBCFG(bid)		CVMX_USBCXREG1(0x008, bid)
-#define CVMX_USBCX_GHWCFG3(bid)		CVMX_USBCXREG1(0x04c, bid)
-#define CVMX_USBCX_GINTMSK(bid)		CVMX_USBCXREG1(0x018, bid)
-#define CVMX_USBCX_GINTSTS(bid)		CVMX_USBCXREG1(0x014, bid)
-#define CVMX_USBCX_GNPTXFSIZ(bid)	CVMX_USBCXREG1(0x028, bid)
-#define CVMX_USBCX_GNPTXSTS(bid)	CVMX_USBCXREG1(0x02c, bid)
-#define CVMX_USBCX_GOTGCTL(bid)		CVMX_USBCXREG1(0x000, bid)
-#define CVMX_USBCX_GRSTCTL(bid)		CVMX_USBCXREG1(0x010, bid)
-#define CVMX_USBCX_GRXFSIZ(bid)		CVMX_USBCXREG1(0x024, bid)
-#define CVMX_USBCX_GRXSTSPH(bid)	CVMX_USBCXREG1(0x020, bid)
-#define CVMX_USBCX_GUSBCFG(bid)		CVMX_USBCXREG1(0x00c, bid)
-#define CVMX_USBCX_HAINT(bid)		CVMX_USBCXREG1(0x414, bid)
-#define CVMX_USBCX_HAINTMSK(bid)	CVMX_USBCXREG1(0x418, bid)
-#define CVMX_USBCX_HCCHARX(off, bid)	CVMX_USBCXREG2(0x500, bid, off)
-#define CVMX_USBCX_HCFG(bid)		CVMX_USBCXREG1(0x400, bid)
-#define CVMX_USBCX_HCINTMSKX(off, bid)	CVMX_USBCXREG2(0x50c, bid, off)
-#define CVMX_USBCX_HCINTX(off, bid)	CVMX_USBCXREG2(0x508, bid, off)
-#define CVMX_USBCX_HCSPLTX(off, bid)	CVMX_USBCXREG2(0x504, bid, off)
-#define CVMX_USBCX_HCTSIZX(off, bid)	CVMX_USBCXREG2(0x510, bid, off)
-#define CVMX_USBCX_HFIR(bid)		CVMX_USBCXREG1(0x404, bid)
-#define CVMX_USBCX_HFNUM(bid)		CVMX_USBCXREG1(0x408, bid)
-#define CVMX_USBCX_HPRT(bid)		CVMX_USBCXREG1(0x440, bid)
-#define CVMX_USBCX_HPTXFSIZ(bid)	CVMX_USBCXREG1(0x100, bid)
-#define CVMX_USBCX_HPTXSTS(bid)		CVMX_USBCXREG1(0x410, bid)
-
-#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull)
-#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull)
-
-#define CVMX_USBNXREG1(reg, bid) \
-	(CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid))
-#define CVMX_USBNXREG2(reg, bid) \
-	(CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid))
-
-#define CVMX_USBNX_CLK_CTL(bid)		CVMX_USBNXREG1(0x10, bid)
-#define CVMX_USBNX_DMA0_INB_CHN0(bid)	CVMX_USBNXREG2(0x818, bid)
-#define CVMX_USBNX_DMA0_OUTB_CHN0(bid)	CVMX_USBNXREG2(0x858, bid)
-#define CVMX_USBNX_USBP_CTL_STATUS(bid)	CVMX_USBNXREG1(0x18, bid)
-
-/**
- * cvmx_usbc#_gahbcfg
- *
- * Core AHB Configuration Register (GAHBCFG)
- *
- * This register can be used to configure the core after power-on or a change in
- * mode of operation. This register mainly contains AHB system-related
- * configuration parameters. The AHB is the processor interface to the O2P USB
- * core. In general, software need not know about this interface except to
- * program the values as specified.
- *
- * The application must program this register as part of the O2P USB core
- * initialization. Do not change this register after the initial programming.
- */
-union cvmx_usbcx_gahbcfg {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_gahbcfg_s
-	 * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl)
-	 *	Software should set this bit to 0x1.
-	 *	Indicates when the Periodic TxFIFO Empty Interrupt bit in the
-	 *	Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This
-	 *	bit is used only in Slave mode.
-	 *	* 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic
-	 *	TxFIFO is half empty
-	 *	* 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic
-	 *	TxFIFO is completely empty
-	 * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl)
-	 *	Software should set this bit to 0x1.
-	 *	Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in
-	 *	the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered.
-	 *	This bit is used only in Slave mode.
-	 *	* 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non-
-	 *	Periodic TxFIFO is half empty
-	 *	* 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non-
-	 *	Periodic TxFIFO is completely empty
-	 * @dmaen: DMA Enable (DMAEn)
-	 *	* 1'b0: Core operates in Slave mode
-	 *	* 1'b1: Core operates in a DMA mode
-	 * @hbstlen: Burst Length/Type (HBstLen)
-	 *	This field has not effect and should be left as 0x0.
-	 * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk)
-	 *	Software should set this field to 0x1.
-	 *	The application uses this bit to mask or unmask the interrupt
-	 *	line assertion to itself. Irrespective of this bit's setting,
-	 *	the interrupt status registers are updated by the core.
-	 *	* 1'b0: Mask the interrupt assertion to the application.
-	 *	* 1'b1: Unmask the interrupt assertion to the application.
-	 */
-	struct cvmx_usbcx_gahbcfg_s {
-		__BITFIELD_FIELD(u32 reserved_9_31	: 23,
-		__BITFIELD_FIELD(u32 ptxfemplvl		: 1,
-		__BITFIELD_FIELD(u32 nptxfemplvl	: 1,
-		__BITFIELD_FIELD(u32 reserved_6_6	: 1,
-		__BITFIELD_FIELD(u32 dmaen		: 1,
-		__BITFIELD_FIELD(u32 hbstlen		: 4,
-		__BITFIELD_FIELD(u32 glblintrmsk	: 1,
-		;)))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_ghwcfg3
- *
- * User HW Config3 Register (GHWCFG3)
- *
- * This register contains the configuration options of the O2P USB core.
- */
-union cvmx_usbcx_ghwcfg3 {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_ghwcfg3_s
-	 * @dfifodepth: DFIFO Depth (DfifoDepth)
-	 *	This value is in terms of 32-bit words.
-	 *	* Minimum value is 32
-	 *	* Maximum value is 32768
-	 * @ahbphysync: AHB and PHY Synchronous (AhbPhySync)
-	 *	Indicates whether AHB and PHY clocks are synchronous to
-	 *	each other.
-	 *	* 1'b0: No
-	 *	* 1'b1: Yes
-	 *	This bit is tied to 1.
-	 * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType)
-	 *	* 1'b0: Asynchronous reset is used in the core
-	 *	* 1'b1: Synchronous reset is used in the core
-	 * @optfeature: Optional Features Removed (OptFeature)
-	 *	Indicates whether the User ID register, GPIO interface ports,
-	 *	and SOF toggle and counter ports were removed for gate count
-	 *	optimization.
-	 * @vendor_control_interface_support: Vendor Control Interface Support
-	 *	* 1'b0: Vendor Control Interface is not available on the core.
-	 *	* 1'b1: Vendor Control Interface is available.
-	 * @i2c_selection: I2C Selection
-	 *	* 1'b0: I2C Interface is not available on the core.
-	 *	* 1'b1: I2C Interface is available on the core.
-	 * @otgen: OTG Function Enabled (OtgEn)
-	 *	The application uses this bit to indicate the O2P USB core's
-	 *	OTG capabilities.
-	 *	* 1'b0: Not OTG capable
-	 *	* 1'b1: OTG Capable
-	 * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth)
-	 *	* 3'b000: 4 bits
-	 *	* 3'b001: 5 bits
-	 *	* 3'b010: 6 bits
-	 *	* 3'b011: 7 bits
-	 *	* 3'b100: 8 bits
-	 *	* 3'b101: 9 bits
-	 *	* 3'b110: 10 bits
-	 *	* Others: Reserved
-	 * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth)
-	 *	* 4'b0000: 11 bits
-	 *	* 4'b0001: 12 bits
-	 *	- ...
-	 *	* 4'b1000: 19 bits
-	 *	* Others: Reserved
-	 */
-	struct cvmx_usbcx_ghwcfg3_s {
-		__BITFIELD_FIELD(u32 dfifodepth				: 16,
-		__BITFIELD_FIELD(u32 reserved_13_15			: 3,
-		__BITFIELD_FIELD(u32 ahbphysync				: 1,
-		__BITFIELD_FIELD(u32 rsttype				: 1,
-		__BITFIELD_FIELD(u32 optfeature				: 1,
-		__BITFIELD_FIELD(u32 vendor_control_interface_support	: 1,
-		__BITFIELD_FIELD(u32 i2c_selection			: 1,
-		__BITFIELD_FIELD(u32 otgen				: 1,
-		__BITFIELD_FIELD(u32 pktsizewidth			: 3,
-		__BITFIELD_FIELD(u32 xfersizewidth			: 4,
-		;))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_gintmsk
- *
- * Core Interrupt Mask Register (GINTMSK)
- *
- * This register works with the Core Interrupt register to interrupt the
- * application. When an interrupt bit is masked, the interrupt associated with
- * that bit will not be generated. However, the Core Interrupt (GINTSTS)
- * register bit corresponding to that interrupt will still be set.
- * Mask interrupt: 1'b0, Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_gintmsk {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_gintmsk_s
-	 * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask
-	 *	(WkUpIntMsk)
-	 * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask
-	 *	(SessReqIntMsk)
-	 * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk)
-	 * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk)
-	 * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk)
-	 * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk)
-	 * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk)
-	 * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk)
-	 * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk)
-	 *	Incomplete Isochronous OUT Transfer Mask
-	 *	(incompISOOUTMsk)
-	 * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask
-	 *		    (incompISOINMsk)
-	 * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk)
-	 * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk)
-	 * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk)
-	 * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk)
-	 * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask
-	 *	(ISOOutDropMsk)
-	 * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk)
-	 * @usbrstmsk: USB Reset Mask (USBRstMsk)
-	 * @usbsuspmsk: USB Suspend Mask (USBSuspMsk)
-	 * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk)
-	 * @i2cint: I2C Interrupt Mask (I2CINT)
-	 * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk)
-	 *	I2C Carkit Interrupt Mask (I2CCKINTMsk)
-	 * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk)
-	 * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask
-	 *		  (GINNakEffMsk)
-	 * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk)
-	 * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk)
-	 * @sofmsk: Start of (micro)Frame Mask (SofMsk)
-	 * @otgintmsk: OTG Interrupt Mask (OTGIntMsk)
-	 * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk)
-	 */
-	struct cvmx_usbcx_gintmsk_s {
-		__BITFIELD_FIELD(u32 wkupintmsk		: 1,
-		__BITFIELD_FIELD(u32 sessreqintmsk	: 1,
-		__BITFIELD_FIELD(u32 disconnintmsk	: 1,
-		__BITFIELD_FIELD(u32 conidstschngmsk	: 1,
-		__BITFIELD_FIELD(u32 reserved_27_27	: 1,
-		__BITFIELD_FIELD(u32 ptxfempmsk		: 1,
-		__BITFIELD_FIELD(u32 hchintmsk		: 1,
-		__BITFIELD_FIELD(u32 prtintmsk		: 1,
-		__BITFIELD_FIELD(u32 reserved_23_23	: 1,
-		__BITFIELD_FIELD(u32 fetsuspmsk		: 1,
-		__BITFIELD_FIELD(u32 incomplpmsk	: 1,
-		__BITFIELD_FIELD(u32 incompisoinmsk	: 1,
-		__BITFIELD_FIELD(u32 oepintmsk		: 1,
-		__BITFIELD_FIELD(u32 inepintmsk		: 1,
-		__BITFIELD_FIELD(u32 epmismsk		: 1,
-		__BITFIELD_FIELD(u32 reserved_16_16	: 1,
-		__BITFIELD_FIELD(u32 eopfmsk		: 1,
-		__BITFIELD_FIELD(u32 isooutdropmsk	: 1,
-		__BITFIELD_FIELD(u32 enumdonemsk	: 1,
-		__BITFIELD_FIELD(u32 usbrstmsk		: 1,
-		__BITFIELD_FIELD(u32 usbsuspmsk		: 1,
-		__BITFIELD_FIELD(u32 erlysuspmsk	: 1,
-		__BITFIELD_FIELD(u32 i2cint		: 1,
-		__BITFIELD_FIELD(u32 ulpickintmsk	: 1,
-		__BITFIELD_FIELD(u32 goutnakeffmsk	: 1,
-		__BITFIELD_FIELD(u32 ginnakeffmsk	: 1,
-		__BITFIELD_FIELD(u32 nptxfempmsk	: 1,
-		__BITFIELD_FIELD(u32 rxflvlmsk		: 1,
-		__BITFIELD_FIELD(u32 sofmsk		: 1,
-		__BITFIELD_FIELD(u32 otgintmsk		: 1,
-		__BITFIELD_FIELD(u32 modemismsk		: 1,
-		__BITFIELD_FIELD(u32 reserved_0_0	: 1,
-		;))))))))))))))))))))))))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_gintsts
- *
- * Core Interrupt Register (GINTSTS)
- *
- * This register interrupts the application for system-level events in the
- * current mode of operation (Device mode or Host mode). It is shown in
- * Interrupt. Some of the bits in this register are valid only in Host mode,
- * while others are valid in Device mode only. This register also indicates the
- * current mode of operation. In order to clear the interrupt status bits of
- * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status
- * interrupts are read only; once software reads from or writes to the FIFO
- * while servicing these interrupts, FIFO interrupt conditions are cleared
- * automatically.
- */
-union cvmx_usbcx_gintsts {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_gintsts_s
-	 * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt)
-	 *	In Device mode, this interrupt is asserted when a resume is
-	 *	detected on the USB. In Host mode, this interrupt is asserted
-	 *	when a remote wakeup is detected on the USB.
-	 *	For more information on how to use this interrupt, see "Partial
-	 *	Power-Down and Clock Gating Programming Model" on
-	 *	page 353.
-	 * @sessreqint: Session Request/New Session Detected Interrupt
-	 *		(SessReqInt)
-	 *	In Host mode, this interrupt is asserted when a session request
-	 *	is detected from the device. In Device mode, this interrupt is
-	 *	asserted when the utmiotg_bvalid signal goes high.
-	 *	For more information on how to use this interrupt, see "Partial
-	 *	Power-Down and Clock Gating Programming Model" on
-	 *	page 353.
-	 * @disconnint: Disconnect Detected Interrupt (DisconnInt)
-	 *	Asserted when a device disconnect is detected.
-	 * @conidstschng: Connector ID Status Change (ConIDStsChng)
-	 *	The core sets this bit when there is a change in connector ID
-	 *	status.
-	 * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp)
-	 *	Asserted when the Periodic Transmit FIFO is either half or
-	 *	completely empty and there is space for at least one entry to be
-	 *	written in the Periodic Request Queue. The half or completely
-	 *	empty status is determined by the Periodic TxFIFO Empty Level
-	 *	bit in the Core AHB Configuration register
-	 *	(GAHBCFG.PTxFEmpLvl).
-	 * @hchint: Host Channels Interrupt (HChInt)
-	 *	The core sets this bit to indicate that an interrupt is pending
-	 *	on one of the channels of the core (in Host mode). The
-	 *	application must read the Host All Channels Interrupt (HAINT)
-	 *	register to determine the exact number of the channel on which
-	 *	the interrupt occurred, and then read the corresponding Host
-	 *	Channel-n Interrupt (HCINTn) register to determine the exact
-	 *	cause of the interrupt. The application must clear the
-	 *	appropriate status bit in the HCINTn register to clear this bit.
-	 * @prtint: Host Port Interrupt (PrtInt)
-	 *	The core sets this bit to indicate a change in port status of
-	 *	one of the O2P USB core ports in Host mode. The application must
-	 *	read the Host Port Control and Status (HPRT) register to
-	 *	determine the exact event that caused this interrupt. The
-	 *	application must clear the appropriate status bit in the Host
-	 *	Port Control and Status register to clear this bit.
-	 * @fetsusp: Data Fetch Suspended (FetSusp)
-	 *	This interrupt is valid only in DMA mode. This interrupt
-	 *	indicates that the core has stopped fetching data for IN
-	 *	endpoints due to the unavailability of TxFIFO space or Request
-	 *	Queue space. This interrupt is used by the application for an
-	 *	endpoint mismatch algorithm.
-	 * @incomplp: Incomplete Periodic Transfer (incomplP)
-	 *	In Host mode, the core sets this interrupt bit when there are
-	 *	incomplete periodic transactions still pending which are
-	 *	scheduled for the current microframe.
-	 *	Incomplete Isochronous OUT Transfer (incompISOOUT)
-	 *	The Device mode, the core sets this interrupt to indicate that
-	 *	there is at least one isochronous OUT endpoint on which the
-	 *	transfer is not completed in the current microframe. This
-	 *	interrupt is asserted along with the End of Periodic Frame
-	 *	Interrupt (EOPF) bit in this register.
-	 * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN)
-	 *	The core sets this interrupt to indicate that there is at least
-	 *	one isochronous IN endpoint on which the transfer is not
-	 *	completed in the current microframe. This interrupt is asserted
-	 *	along with the End of Periodic Frame Interrupt (EOPF) bit in
-	 *	this register.
-	 * @oepint: OUT Endpoints Interrupt (OEPInt)
-	 *	The core sets this bit to indicate that an interrupt is pending
-	 *	on one of the OUT endpoints of the core (in Device mode). The
-	 *	application must read the Device All Endpoints Interrupt
-	 *	(DAINT) register to determine the exact number of the OUT
-	 *	endpoint on which the interrupt occurred, and then read the
-	 *	corresponding Device OUT Endpoint-n Interrupt (DOEPINTn)
-	 *	register to determine the exact cause of the interrupt. The
-	 *	application must clear the appropriate status bit in the
-	 *	corresponding DOEPINTn register to clear this bit.
-	 * @iepint: IN Endpoints Interrupt (IEPInt)
-	 *	The core sets this bit to indicate that an interrupt is pending
-	 *	on one of the IN endpoints of the core (in Device mode). The
-	 *	application must read the Device All Endpoints Interrupt
-	 *	(DAINT) register to determine the exact number of the IN
-	 *	endpoint on which the interrupt occurred, and then read the
-	 *	corresponding Device IN Endpoint-n Interrupt (DIEPINTn)
-	 *	register to determine the exact cause of the interrupt. The
-	 *	application must clear the appropriate status bit in the
-	 *	corresponding DIEPINTn register to clear this bit.
-	 * @epmis: Endpoint Mismatch Interrupt (EPMis)
-	 *	Indicates that an IN token has been received for a non-periodic
-	 *	endpoint, but the data for another endpoint is present in the
-	 *	top of the Non-Periodic Transmit FIFO and the IN endpoint
-	 *	mismatch count programmed by the application has expired.
-	 * @eopf: End of Periodic Frame Interrupt (EOPF)
-	 *	Indicates that the period specified in the Periodic Frame
-	 *	Interval field of the Device Configuration register
-	 *	(DCFG.PerFrInt) has been reached in the current microframe.
-	 * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop)
-	 *	The core sets this bit when it fails to write an isochronous OUT
-	 *	packet into the RxFIFO because the RxFIFO doesn't have
-	 *	enough space to accommodate a maximum packet size packet
-	 *	for the isochronous OUT endpoint.
-	 * @enumdone: Enumeration Done (EnumDone)
-	 *	The core sets this bit to indicate that speed enumeration is
-	 *	complete. The application must read the Device Status (DSTS)
-	 *	register to obtain the enumerated speed.
-	 * @usbrst: USB Reset (USBRst)
-	 *	The core sets this bit to indicate that a reset is detected on
-	 *	the USB.
-	 * @usbsusp: USB Suspend (USBSusp)
-	 *	The core sets this bit to indicate that a suspend was detected
-	 *	on the USB. The core enters the Suspended state when there
-	 *	is no activity on the phy_line_state_i signal for an extended
-	 *	period of time.
-	 * @erlysusp: Early Suspend (ErlySusp)
-	 *	The core sets this bit to indicate that an Idle state has been
-	 *	detected on the USB for 3 ms.
-	 * @i2cint: I2C Interrupt (I2CINT)
-	 *	This bit is always 0x0.
-	 * @ulpickint: ULPI Carkit Interrupt (ULPICKINT)
-	 *	This bit is always 0x0.
-	 * @goutnakeff: Global OUT NAK Effective (GOUTNakEff)
-	 *	Indicates that the Set Global OUT NAK bit in the Device Control
-	 *	register (DCTL.SGOUTNak), set by the application, has taken
-	 *	effect in the core. This bit can be cleared by writing the Clear
-	 *	Global OUT NAK bit in the Device Control register
-	 *	(DCTL.CGOUTNak).
-	 * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff)
-	 *	Indicates that the Set Global Non-Periodic IN NAK bit in the
-	 *	Device Control register (DCTL.SGNPInNak), set by the
-	 *	application, has taken effect in the core. That is, the core has
-	 *	sampled the Global IN NAK bit set by the application. This bit
-	 *	can be cleared by clearing the Clear Global Non-Periodic IN
-	 *	NAK bit in the Device Control register (DCTL.CGNPInNak).
-	 *	This interrupt does not necessarily mean that a NAK handshake
-	 *	is sent out on the USB. The STALL bit takes precedence over
-	 *	the NAK bit.
-	 * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp)
-	 *	This interrupt is asserted when the Non-Periodic TxFIFO is
-	 *	either half or completely empty, and there is space for at least
-	 *	one entry to be written to the Non-Periodic Transmit Request
-	 *	Queue. The half or completely empty status is determined by
-	 *	the Non-Periodic TxFIFO Empty Level bit in the Core AHB
-	 *	Configuration register (GAHBCFG.NPTxFEmpLvl).
-	 * @rxflvl: RxFIFO Non-Empty (RxFLvl)
-	 *	Indicates that there is at least one packet pending to be read
-	 *	from the RxFIFO.
-	 * @sof: Start of (micro)Frame (Sof)
-	 *	In Host mode, the core sets this bit to indicate that an SOF
-	 *	(FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the
-	 *	USB. The application must write a 1 to this bit to clear the
-	 *	interrupt.
-	 *	In Device mode, in the core sets this bit to indicate that an
-	 *	SOF token has been received on the USB. The application can read
-	 *	the Device Status register to get the current (micro)frame
-	 *	number. This interrupt is seen only when the core is operating
-	 *	at either HS or FS.
-	 * @otgint: OTG Interrupt (OTGInt)
-	 *	The core sets this bit to indicate an OTG protocol event. The
-	 *	application must read the OTG Interrupt Status (GOTGINT)
-	 *	register to determine the exact event that caused this
-	 *	interrupt. The application must clear the appropriate status bit
-	 *	in the GOTGINT register to clear this bit.
-	 * @modemis: Mode Mismatch Interrupt (ModeMis)
-	 *	The core sets this bit when the application is trying to access:
-	 *	* A Host mode register, when the core is operating in Device
-	 *	mode
-	 *	* A Device mode register, when the core is operating in Host
-	 *	mode
-	 *	The register access is completed on the AHB with an OKAY
-	 *	response, but is ignored by the core internally and doesn't
-	 *	affect the operation of the core.
-	 * @curmod: Current Mode of Operation (CurMod)
-	 *	Indicates the current mode of operation.
-	 *	* 1'b0: Device mode
-	 *	* 1'b1: Host mode
-	 */
-	struct cvmx_usbcx_gintsts_s {
-		__BITFIELD_FIELD(u32 wkupint		: 1,
-		__BITFIELD_FIELD(u32 sessreqint		: 1,
-		__BITFIELD_FIELD(u32 disconnint		: 1,
-		__BITFIELD_FIELD(u32 conidstschng	: 1,
-		__BITFIELD_FIELD(u32 reserved_27_27	: 1,
-		__BITFIELD_FIELD(u32 ptxfemp		: 1,
-		__BITFIELD_FIELD(u32 hchint		: 1,
-		__BITFIELD_FIELD(u32 prtint		: 1,
-		__BITFIELD_FIELD(u32 reserved_23_23	: 1,
-		__BITFIELD_FIELD(u32 fetsusp		: 1,
-		__BITFIELD_FIELD(u32 incomplp		: 1,
-		__BITFIELD_FIELD(u32 incompisoin	: 1,
-		__BITFIELD_FIELD(u32 oepint		: 1,
-		__BITFIELD_FIELD(u32 iepint		: 1,
-		__BITFIELD_FIELD(u32 epmis		: 1,
-		__BITFIELD_FIELD(u32 reserved_16_16	: 1,
-		__BITFIELD_FIELD(u32 eopf		: 1,
-		__BITFIELD_FIELD(u32 isooutdrop		: 1,
-		__BITFIELD_FIELD(u32 enumdone		: 1,
-		__BITFIELD_FIELD(u32 usbrst		: 1,
-		__BITFIELD_FIELD(u32 usbsusp		: 1,
-		__BITFIELD_FIELD(u32 erlysusp		: 1,
-		__BITFIELD_FIELD(u32 i2cint		: 1,
-		__BITFIELD_FIELD(u32 ulpickint		: 1,
-		__BITFIELD_FIELD(u32 goutnakeff		: 1,
-		__BITFIELD_FIELD(u32 ginnakeff		: 1,
-		__BITFIELD_FIELD(u32 nptxfemp		: 1,
-		__BITFIELD_FIELD(u32 rxflvl		: 1,
-		__BITFIELD_FIELD(u32 sof		: 1,
-		__BITFIELD_FIELD(u32 otgint		: 1,
-		__BITFIELD_FIELD(u32 modemis		: 1,
-		__BITFIELD_FIELD(u32 curmod		: 1,
-		;))))))))))))))))))))))))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_gnptxfsiz
- *
- * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ)
- *
- * The application can program the RAM size and the memory start address for the
- * Non-Periodic TxFIFO.
- */
-union cvmx_usbcx_gnptxfsiz {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_gnptxfsiz_s
-	 * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep)
-	 *	This value is in terms of 32-bit words.
-	 *	Minimum value is 16
-	 *	Maximum value is 32768
-	 * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr)
-	 *	This field contains the memory start address for Non-Periodic
-	 *	Transmit FIFO RAM.
-	 */
-	struct cvmx_usbcx_gnptxfsiz_s {
-		__BITFIELD_FIELD(u32 nptxfdep		: 16,
-		__BITFIELD_FIELD(u32 nptxfstaddr	: 16,
-		;))
-	} s;
-};
-
-/**
- * cvmx_usbc#_gnptxsts
- *
- * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS)
- *
- * This read-only register contains the free space information for the
- * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue.
- */
-union cvmx_usbcx_gnptxsts {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_gnptxsts_s
-	 * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
-	 *	Entry in the Non-Periodic Tx Request Queue that is currently
-	 *	being processed by the MAC.
-	 *	* Bits [30:27]: Channel/endpoint number
-	 *	* Bits [26:25]:
-	 *	- 2'b00: IN/OUT token
-	 *	- 2'b01: Zero-length transmit packet (device IN/host OUT)
-	 *	- 2'b10: PING/CSPLIT token
-	 *	- 2'b11: Channel halt command
-	 *	* Bit [24]: Terminate (last entry for selected channel/endpoint)
-	 * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available
-	 *	(NPTxQSpcAvail)
-	 *	Indicates the amount of free space available in the Non-
-	 *	Periodic Transmit Request Queue. This queue holds both IN
-	 *	and OUT requests in Host mode. Device mode has only IN
-	 *	requests.
-	 *	* 8'h0: Non-Periodic Transmit Request Queue is full
-	 *	* 8'h1: 1 location available
-	 *	* 8'h2: 2 locations available
-	 *	* n: n locations available (0..8)
-	 *	* Others: Reserved
-	 * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail)
-	 *	Indicates the amount of free space available in the Non-
-	 *	Periodic TxFIFO.
-	 *	Values are in terms of 32-bit words.
-	 *	* 16'h0: Non-Periodic TxFIFO is full
-	 *	* 16'h1: 1 word available
-	 *	* 16'h2: 2 words available
-	 *	* 16'hn: n words available (where 0..32768)
-	 *	* 16'h8000: 32768 words available
-	 *	* Others: Reserved
-	 */
-	struct cvmx_usbcx_gnptxsts_s {
-		__BITFIELD_FIELD(u32 reserved_31_31	: 1,
-		__BITFIELD_FIELD(u32 nptxqtop		: 7,
-		__BITFIELD_FIELD(u32 nptxqspcavail	: 8,
-		__BITFIELD_FIELD(u32 nptxfspcavail	: 16,
-		;))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_grstctl
- *
- * Core Reset Register (GRSTCTL)
- *
- * The application uses this register to reset various hardware features inside
- * the core.
- */
-union cvmx_usbcx_grstctl {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_grstctl_s
-	 * @ahbidle: AHB Master Idle (AHBIdle)
-	 *	Indicates that the AHB Master State Machine is in the IDLE
-	 *	condition.
-	 * @dmareq: DMA Request Signal (DMAReq)
-	 *	Indicates that the DMA request is in progress. Used for debug.
-	 * @txfnum: TxFIFO Number (TxFNum)
-	 *	This is the FIFO number that must be flushed using the TxFIFO
-	 *	Flush bit. This field must not be changed until the core clears
-	 *	the TxFIFO Flush bit.
-	 *	* 5'h0: Non-Periodic TxFIFO flush
-	 *	* 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic
-	 *	TxFIFO flush in Host mode
-	 *	* 5'h2: Periodic TxFIFO 2 flush in Device mode
-	 *	- ...
-	 *	* 5'hF: Periodic TxFIFO 15 flush in Device mode
-	 *	* 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the
-	 *	core
-	 * @txfflsh: TxFIFO Flush (TxFFlsh)
-	 *	This bit selectively flushes a single or all transmit FIFOs, but
-	 *	cannot do so if the core is in the midst of a transaction.
-	 *	The application must only write this bit after checking that the
-	 *	core is neither writing to the TxFIFO nor reading from the
-	 *	TxFIFO.
-	 *	The application must wait until the core clears this bit before
-	 *	performing any operations. This bit takes 8 clocks (of phy_clk
-	 *	or hclk, whichever is slower) to clear.
-	 * @rxfflsh: RxFIFO Flush (RxFFlsh)
-	 *	The application can flush the entire RxFIFO using this bit, but
-	 *	must first ensure that the core is not in the middle of a
-	 *	transaction.
-	 *	The application must only write to this bit after checking that
-	 *	the core is neither reading from the RxFIFO nor writing to the
-	 *	RxFIFO.
-	 *	The application must wait until the bit is cleared before
-	 *	performing any other operations. This bit will take 8 clocks
-	 *	(slowest of PHY or AHB clock) to clear.
-	 * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh)
-	 *	The application writes this bit to flush the IN Token Sequence
-	 *	Learning Queue.
-	 * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst)
-	 *	The application writes this bit to reset the (micro)frame number
-	 *	counter inside the core. When the (micro)frame counter is reset,
-	 *	the subsequent SOF sent out by the core will have a
-	 *	(micro)frame number of 0.
-	 * @hsftrst: HClk Soft Reset (HSftRst)
-	 *	The application uses this bit to flush the control logic in the
-	 *	AHB Clock domain. Only AHB Clock Domain pipelines are reset.
-	 *	* FIFOs are not flushed with this bit.
-	 *	* All state machines in the AHB clock domain are reset to the
-	 *	Idle state after terminating the transactions on the AHB,
-	 *	following the protocol.
-	 *	* CSR control bits used by the AHB clock domain state
-	 *	machines are cleared.
-	 *	* To clear this interrupt, status mask bits that control the
-	 *	interrupt status and are generated by the AHB clock domain
-	 *	state machine are cleared.
-	 *	* Because interrupt status bits are not cleared, the application
-	 *	can get the status of any core events that occurred after it set
-	 *	this bit.
-	 *	This is a self-clearing bit that the core clears after all
-	 *	necessary logic is reset in the core. This may take several
-	 *	clocks, depending on the core's current state.
-	 * @csftrst: Core Soft Reset (CSftRst)
-	 *	Resets the hclk and phy_clock domains as follows:
-	 *	* Clears the interrupts and all the CSR registers except the
-	 *	following register bits:
-	 *	- PCGCCTL.RstPdwnModule
-	 *	- PCGCCTL.GateHclk
-	 *	- PCGCCTL.PwrClmp
-	 *	- PCGCCTL.StopPPhyLPwrClkSelclk
-	 *	- GUSBCFG.PhyLPwrClkSel
-	 *	- GUSBCFG.DDRSel
-	 *	- GUSBCFG.PHYSel
-	 *	- GUSBCFG.FSIntf
-	 *	- GUSBCFG.ULPI_UTMI_Sel
-	 *	- GUSBCFG.PHYIf
-	 *	- HCFG.FSLSPclkSel
-	 *	- DCFG.DevSpd
-	 *	* All module state machines (except the AHB Slave Unit) are
-	 *	reset to the IDLE state, and all the transmit FIFOs and the
-	 *	receive FIFO are flushed.
-	 *	* Any transactions on the AHB Master are terminated as soon
-	 *	as possible, after gracefully completing the last data phase of
-	 *	an AHB transfer. Any transactions on the USB are terminated
-	 *	immediately.
-	 *	The application can write to this bit any time it wants to reset
-	 *	the core. This is a self-clearing bit and the core clears this
-	 *	bit after all the necessary logic is reset in the core, which
-	 *	may take several clocks, depending on the current state of the
-	 *	core. Once this bit is cleared software should wait at least 3
-	 *	PHY clocks before doing any access to the PHY domain
-	 *	(synchronization delay). Software should also should check that
-	 *	bit 31 of this register is 1 (AHB Master is IDLE) before
-	 *	starting any operation.
-	 *	Typically software reset is used during software development
-	 *	and also when you dynamically change the PHY selection bits
-	 *	in the USB configuration registers listed above. When you
-	 *	change the PHY, the corresponding clock for the PHY is
-	 *	selected and used in the PHY domain. Once a new clock is
-	 *	selected, the PHY domain has to be reset for proper operation.
-	 */
-	struct cvmx_usbcx_grstctl_s {
-		__BITFIELD_FIELD(u32 ahbidle		: 1,
-		__BITFIELD_FIELD(u32 dmareq		: 1,
-		__BITFIELD_FIELD(u32 reserved_11_29	: 19,
-		__BITFIELD_FIELD(u32 txfnum		: 5,
-		__BITFIELD_FIELD(u32 txfflsh		: 1,
-		__BITFIELD_FIELD(u32 rxfflsh		: 1,
-		__BITFIELD_FIELD(u32 intknqflsh		: 1,
-		__BITFIELD_FIELD(u32 frmcntrrst		: 1,
-		__BITFIELD_FIELD(u32 hsftrst		: 1,
-		__BITFIELD_FIELD(u32 csftrst		: 1,
-		;))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_grxfsiz
- *
- * Receive FIFO Size Register (GRXFSIZ)
- *
- * The application can program the RAM size that must be allocated to the
- * RxFIFO.
- */
-union cvmx_usbcx_grxfsiz {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_grxfsiz_s
-	 * @rxfdep: RxFIFO Depth (RxFDep)
-	 *	This value is in terms of 32-bit words.
-	 *	* Minimum value is 16
-	 *	* Maximum value is 32768
-	 */
-	struct cvmx_usbcx_grxfsiz_s {
-		__BITFIELD_FIELD(u32 reserved_16_31	: 16,
-		__BITFIELD_FIELD(u32 rxfdep		: 16,
-		;))
-	} s;
-};
-
-/**
- * cvmx_usbc#_grxstsph
- *
- * Receive Status Read and Pop Register, Host Mode (GRXSTSPH)
- *
- * A read to the Receive Status Read and Pop register returns and additionally
- * pops the top data entry out of the RxFIFO.
- * This Description is only valid when the core is in Host Mode. For Device Mode
- * use USBC_GRXSTSPD instead.
- * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the
- *	 same offset in the O2P USB core. The offset difference shown in this
- *	 document is for software clarity and is actually ignored by the
- *       hardware.
- */
-union cvmx_usbcx_grxstsph {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_grxstsph_s
-	 * @pktsts: Packet Status (PktSts)
-	 *	Indicates the status of the received packet
-	 *	* 4'b0010: IN data packet received
-	 *	* 4'b0011: IN transfer completed (triggers an interrupt)
-	 *	* 4'b0101: Data toggle error (triggers an interrupt)
-	 *	* 4'b0111: Channel halted (triggers an interrupt)
-	 *	* Others: Reserved
-	 * @dpid: Data PID (DPID)
-	 *	* 2'b00: DATA0
-	 *	* 2'b10: DATA1
-	 *	* 2'b01: DATA2
-	 *	* 2'b11: MDATA
-	 * @bcnt: Byte Count (BCnt)
-	 *	Indicates the byte count of the received IN data packet
-	 * @chnum: Channel Number (ChNum)
-	 *	Indicates the channel number to which the current received
-	 *	packet belongs.
-	 */
-	struct cvmx_usbcx_grxstsph_s {
-		__BITFIELD_FIELD(u32 reserved_21_31	: 11,
-		__BITFIELD_FIELD(u32 pktsts		: 4,
-		__BITFIELD_FIELD(u32 dpid		: 2,
-		__BITFIELD_FIELD(u32 bcnt		: 11,
-		__BITFIELD_FIELD(u32 chnum		: 4,
-		;)))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_gusbcfg
- *
- * Core USB Configuration Register (GUSBCFG)
- *
- * This register can be used to configure the core after power-on or a changing
- * to Host mode or Device mode. It contains USB and USB-PHY related
- * configuration parameters. The application must program this register before
- * starting any transactions on either the AHB or the USB. Do not make changes
- * to this register after the initial programming.
- */
-union cvmx_usbcx_gusbcfg {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_gusbcfg_s
-	 * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel)
-	 *	This bit is always 0x0.
-	 * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel)
-	 *	Software should set this bit to 0x0.
-	 *	Selects either 480-MHz or 48-MHz (low-power) PHY mode. In
-	 *	FS and LS modes, the PHY can usually operate on a 48-MHz
-	 *	clock to save power.
-	 *	* 1'b0: 480-MHz Internal PLL clock
-	 *	* 1'b1: 48-MHz External Clock
-	 *	In 480 MHz mode, the UTMI interface operates at either 60 or
-	 *	30-MHz, depending upon whether 8- or 16-bit data width is
-	 *	selected. In 48-MHz mode, the UTMI interface operates at 48
-	 *	MHz in FS mode and at either 48 or 6 MHz in LS mode
-	 *	(depending on the PHY vendor).
-	 *	This bit drives the utmi_fsls_low_power core output signal, and
-	 *	is valid only for UTMI+ PHYs.
-	 * @usbtrdtim: USB Turnaround Time (USBTrdTim)
-	 *	Sets the turnaround time in PHY clocks.
-	 *	Specifies the response time for a MAC request to the Packet
-	 *	FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM).
-	 *	This must be programmed to 0x5.
-	 * @hnpcap: HNP-Capable (HNPCap)
-	 *	This bit is always 0x0.
-	 * @srpcap: SRP-Capable (SRPCap)
-	 *	This bit is always 0x0.
-	 * @ddrsel: ULPI DDR Select (DDRSel)
-	 *	Software should set this bit to 0x0.
-	 * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial
-	 *	Software should set this bit to 0x0.
-	 * @fsintf: Full-Speed Serial Interface Select (FSIntf)
-	 *	Software should set this bit to 0x0.
-	 * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel)
-	 *	This bit is always 0x0.
-	 * @phyif: PHY Interface (PHYIf)
-	 *	This bit is always 0x1.
-	 * @toutcal: HS/FS Timeout Calibration (TOutCal)
-	 *	The number of PHY clocks that the application programs in this
-	 *	field is added to the high-speed/full-speed interpacket timeout
-	 *	duration in the core to account for any additional delays
-	 *	introduced by the PHY. This may be required, since the delay
-	 *	introduced by the PHY in generating the linestate condition may
-	 *	vary from one PHY to another.
-	 *	The USB standard timeout value for high-speed operation is
-	 *	736 to 816 (inclusive) bit times. The USB standard timeout
-	 *	value for full-speed operation is 16 to 18 (inclusive) bit
-	 *	times. The application must program this field based on the
-	 *	speed of enumeration. The number of bit times added per PHY
-	 *	clock are:
-	 *	High-speed operation:
-	 *	* One 30-MHz PHY clock = 16 bit times
-	 *	* One 60-MHz PHY clock = 8 bit times
-	 *	Full-speed operation:
-	 *	* One 30-MHz PHY clock = 0.4 bit times
-	 *	* One 60-MHz PHY clock = 0.2 bit times
-	 *	* One 48-MHz PHY clock = 0.25 bit times
-	 */
-	struct cvmx_usbcx_gusbcfg_s {
-		__BITFIELD_FIELD(u32 reserved_17_31	: 15,
-		__BITFIELD_FIELD(u32 otgi2csel		: 1,
-		__BITFIELD_FIELD(u32 phylpwrclksel	: 1,
-		__BITFIELD_FIELD(u32 reserved_14_14	: 1,
-		__BITFIELD_FIELD(u32 usbtrdtim		: 4,
-		__BITFIELD_FIELD(u32 hnpcap		: 1,
-		__BITFIELD_FIELD(u32 srpcap		: 1,
-		__BITFIELD_FIELD(u32 ddrsel		: 1,
-		__BITFIELD_FIELD(u32 physel		: 1,
-		__BITFIELD_FIELD(u32 fsintf		: 1,
-		__BITFIELD_FIELD(u32 ulpi_utmi_sel	: 1,
-		__BITFIELD_FIELD(u32 phyif		: 1,
-		__BITFIELD_FIELD(u32 toutcal		: 3,
-		;)))))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_haint
- *
- * Host All Channels Interrupt Register (HAINT)
- *
- * When a significant event occurs on a channel, the Host All Channels Interrupt
- * register interrupts the application using the Host Channels Interrupt bit of
- * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt.
- * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in
- * this register are set and cleared when the application sets and clears bits
- * in the corresponding Host Channel-n Interrupt register.
- */
-union cvmx_usbcx_haint {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_haint_s
-	 * @haint: Channel Interrupts (HAINT)
-	 *	One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15
-	 */
-	struct cvmx_usbcx_haint_s {
-		__BITFIELD_FIELD(u32 reserved_16_31	: 16,
-		__BITFIELD_FIELD(u32 haint		: 16,
-		;))
-	} s;
-};
-
-/**
- * cvmx_usbc#_haintmsk
- *
- * Host All Channels Interrupt Mask Register (HAINTMSK)
- *
- * The Host All Channel Interrupt Mask register works with the Host All Channel
- * Interrupt register to interrupt the application when an event occurs on a
- * channel. There is one interrupt mask bit per channel, up to a maximum of 16
- * bits.
- * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_haintmsk {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_haintmsk_s
-	 * @haintmsk: Channel Interrupt Mask (HAINTMsk)
-	 *	One bit per channel: Bit 0 for channel 0, bit 15 for channel 15
-	 */
-	struct cvmx_usbcx_haintmsk_s {
-		__BITFIELD_FIELD(u32 reserved_16_31	: 16,
-		__BITFIELD_FIELD(u32 haintmsk		: 16,
-		;))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hcchar#
- *
- * Host Channel-n Characteristics Register (HCCHAR)
- *
- */
-union cvmx_usbcx_hccharx {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hccharx_s
-	 * @chena: Channel Enable (ChEna)
-	 *	This field is set by the application and cleared by the OTG
-	 *	host.
-	 *	* 1'b0: Channel disabled
-	 *	* 1'b1: Channel enabled
-	 * @chdis: Channel Disable (ChDis)
-	 *	The application sets this bit to stop transmitting/receiving
-	 *	data on a channel, even before the transfer for that channel is
-	 *	complete. The application must wait for the Channel Disabled
-	 *	interrupt before treating the channel as disabled.
-	 * @oddfrm: Odd Frame (OddFrm)
-	 *	This field is set (reset) by the application to indicate that
-	 *	the OTG host must perform a transfer in an odd (micro)frame.
-	 *	This field is applicable for only periodic (isochronous and
-	 *	interrupt) transactions.
-	 *	* 1'b0: Even (micro)frame
-	 *	* 1'b1: Odd (micro)frame
-	 * @devaddr: Device Address (DevAddr)
-	 *	This field selects the specific device serving as the data
-	 *	source or sink.
-	 * @ec: Multi Count (MC) / Error Count (EC)
-	 *	When the Split Enable bit of the Host Channel-n Split Control
-	 *	register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates
-	 *	to the host the number of transactions that should be executed
-	 *	per microframe for this endpoint.
-	 *	* 2'b00: Reserved. This field yields undefined results.
-	 *	* 2'b01: 1 transaction
-	 *	* 2'b10: 2 transactions to be issued for this endpoint per
-	 *	microframe
-	 *	* 2'b11: 3 transactions to be issued for this endpoint per
-	 *	microframe
-	 *	When HCSPLTn.SpltEna is set (1'b1), this field indicates the
-	 *	number of immediate retries to be performed for a periodic split
-	 *	transactions on transaction errors. This field must be set to at
-	 *	least 2'b01.
-	 * @eptype: Endpoint Type (EPType)
-	 *	Indicates the transfer type selected.
-	 *	* 2'b00: Control
-	 *	* 2'b01: Isochronous
-	 *	* 2'b10: Bulk
-	 *	* 2'b11: Interrupt
-	 * @lspddev: Low-Speed Device (LSpdDev)
-	 *	This field is set by the application to indicate that this
-	 *	channel is communicating to a low-speed device.
-	 * @epdir: Endpoint Direction (EPDir)
-	 *	Indicates whether the transaction is IN or OUT.
-	 *	* 1'b0: OUT
-	 *	* 1'b1: IN
-	 * @epnum: Endpoint Number (EPNum)
-	 *	Indicates the endpoint number on the device serving as the
-	 *	data source or sink.
-	 * @mps: Maximum Packet Size (MPS)
-	 *	Indicates the maximum packet size of the associated endpoint.
-	 */
-	struct cvmx_usbcx_hccharx_s {
-		__BITFIELD_FIELD(u32 chena		: 1,
-		__BITFIELD_FIELD(u32 chdis		: 1,
-		__BITFIELD_FIELD(u32 oddfrm		: 1,
-		__BITFIELD_FIELD(u32 devaddr		: 7,
-		__BITFIELD_FIELD(u32 ec			: 2,
-		__BITFIELD_FIELD(u32 eptype		: 2,
-		__BITFIELD_FIELD(u32 lspddev		: 1,
-		__BITFIELD_FIELD(u32 reserved_16_16	: 1,
-		__BITFIELD_FIELD(u32 epdir		: 1,
-		__BITFIELD_FIELD(u32 epnum		: 4,
-		__BITFIELD_FIELD(u32 mps		: 11,
-		;)))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hcfg
- *
- * Host Configuration Register (HCFG)
- *
- * This register configures the core after power-on. Do not make changes to this
- * register after initializing the host.
- */
-union cvmx_usbcx_hcfg {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hcfg_s
-	 * @fslssupp: FS- and LS-Only Support (FSLSSupp)
-	 *	The application uses this bit to control the core's enumeration
-	 *	speed. Using this bit, the application can make the core
-	 *	enumerate as a FS host, even if the connected device supports
-	 *	HS traffic. Do not make changes to this field after initial
-	 *	programming.
-	 *	* 1'b0: HS/FS/LS, based on the maximum speed supported by
-	 *	the connected device
-	 *	* 1'b1: FS/LS-only, even if the connected device can support HS
-	 * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel)
-	 *	When the core is in FS Host mode
-	 *	* 2'b00: PHY clock is running at 30/60 MHz
-	 *	* 2'b01: PHY clock is running at 48 MHz
-	 *	* Others: Reserved
-	 *	When the core is in LS Host mode
-	 *	* 2'b00: PHY clock is running at 30/60 MHz. When the
-	 *	UTMI+/ULPI PHY Low Power mode is not selected, use
-	 *	30/60 MHz.
-	 *	* 2'b01: PHY clock is running at 48 MHz. When the UTMI+
-	 *	PHY Low Power mode is selected, use 48MHz if the PHY
-	 *	supplies a 48 MHz clock during LS mode.
-	 *	* 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode,
-	 *	use 6 MHz when the UTMI+ PHY Low Power mode is
-	 *	selected and the PHY supplies a 6 MHz clock during LS
-	 *	mode. If you select a 6 MHz clock during LS mode, you must
-	 *	do a soft reset.
-	 *	* 2'b11: Reserved
-	 */
-	struct cvmx_usbcx_hcfg_s {
-		__BITFIELD_FIELD(u32 reserved_3_31	: 29,
-		__BITFIELD_FIELD(u32 fslssupp		: 1,
-		__BITFIELD_FIELD(u32 fslspclksel	: 2,
-		;)))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hcint#
- *
- * Host Channel-n Interrupt Register (HCINT)
- *
- * This register indicates the status of a channel with respect to USB- and
- * AHB-related events. The application must read this register when the Host
- * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is
- * set. Before the application can read this register, it must first read
- * the Host All Channels Interrupt (HAINT) register to get the exact channel
- * number for the Host Channel-n Interrupt register. The application must clear
- * the appropriate bit in this register to clear the corresponding bits in the
- * HAINT and GINTSTS registers.
- */
-union cvmx_usbcx_hcintx {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hcintx_s
-	 * @datatglerr: Data Toggle Error (DataTglErr)
-	 * @frmovrun: Frame Overrun (FrmOvrun)
-	 * @bblerr: Babble Error (BblErr)
-	 * @xacterr: Transaction Error (XactErr)
-	 * @nyet: NYET Response Received Interrupt (NYET)
-	 * @ack: ACK Response Received Interrupt (ACK)
-	 * @nak: NAK Response Received Interrupt (NAK)
-	 * @stall: STALL Response Received Interrupt (STALL)
-	 * @ahberr: This bit is always 0x0.
-	 * @chhltd: Channel Halted (ChHltd)
-	 *	Indicates the transfer completed abnormally either because of
-	 *	any USB transaction error or in response to disable request by
-	 *	the application.
-	 * @xfercompl: Transfer Completed (XferCompl)
-	 *	Transfer completed normally without any errors.
-	 */
-	struct cvmx_usbcx_hcintx_s {
-		__BITFIELD_FIELD(u32 reserved_11_31	: 21,
-		__BITFIELD_FIELD(u32 datatglerr		: 1,
-		__BITFIELD_FIELD(u32 frmovrun		: 1,
-		__BITFIELD_FIELD(u32 bblerr		: 1,
-		__BITFIELD_FIELD(u32 xacterr		: 1,
-		__BITFIELD_FIELD(u32 nyet		: 1,
-		__BITFIELD_FIELD(u32 ack		: 1,
-		__BITFIELD_FIELD(u32 nak		: 1,
-		__BITFIELD_FIELD(u32 stall		: 1,
-		__BITFIELD_FIELD(u32 ahberr		: 1,
-		__BITFIELD_FIELD(u32 chhltd		: 1,
-		__BITFIELD_FIELD(u32 xfercompl		: 1,
-		;))))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hcintmsk#
- *
- * Host Channel-n Interrupt Mask Register (HCINTMSKn)
- *
- * This register reflects the mask for each channel status described in the
- * previous section.
- * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_hcintmskx {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hcintmskx_s
-	 * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk)
-	 * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk)
-	 * @bblerrmsk: Babble Error Mask (BblErrMsk)
-	 * @xacterrmsk: Transaction Error Mask (XactErrMsk)
-	 * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk)
-	 * @ackmsk: ACK Response Received Interrupt Mask (AckMsk)
-	 * @nakmsk: NAK Response Received Interrupt Mask (NakMsk)
-	 * @stallmsk: STALL Response Received Interrupt Mask (StallMsk)
-	 * @ahberrmsk: AHB Error Mask (AHBErrMsk)
-	 * @chhltdmsk: Channel Halted Mask (ChHltdMsk)
-	 * @xfercomplmsk: Transfer Completed Mask (XferComplMsk)
-	 */
-	struct cvmx_usbcx_hcintmskx_s {
-		__BITFIELD_FIELD(u32 reserved_11_31		: 21,
-		__BITFIELD_FIELD(u32 datatglerrmsk		: 1,
-		__BITFIELD_FIELD(u32 frmovrunmsk		: 1,
-		__BITFIELD_FIELD(u32 bblerrmsk			: 1,
-		__BITFIELD_FIELD(u32 xacterrmsk			: 1,
-		__BITFIELD_FIELD(u32 nyetmsk			: 1,
-		__BITFIELD_FIELD(u32 ackmsk			: 1,
-		__BITFIELD_FIELD(u32 nakmsk			: 1,
-		__BITFIELD_FIELD(u32 stallmsk			: 1,
-		__BITFIELD_FIELD(u32 ahberrmsk			: 1,
-		__BITFIELD_FIELD(u32 chhltdmsk			: 1,
-		__BITFIELD_FIELD(u32 xfercomplmsk		: 1,
-		;))))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hcsplt#
- *
- * Host Channel-n Split Control Register (HCSPLT)
- *
- */
-union cvmx_usbcx_hcspltx {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hcspltx_s
-	 * @spltena: Split Enable (SpltEna)
-	 *	The application sets this field to indicate that this channel is
-	 *	enabled to perform split transactions.
-	 * @compsplt: Do Complete Split (CompSplt)
-	 *	The application sets this field to request the OTG host to
-	 *	perform a complete split transaction.
-	 * @xactpos: Transaction Position (XactPos)
-	 *	This field is used to determine whether to send all, first,
-	 *	middle, or last payloads with each OUT transaction.
-	 *	* 2'b11: All. This is the entire data payload is of this
-	 *	transaction (which is less than or equal to 188 bytes).
-	 *	* 2'b10: Begin. This is the first data payload of this
-	 *	transaction (which is larger than 188 bytes).
-	 *	* 2'b00: Mid. This is the middle payload of this transaction
-	 *	(which is larger than 188 bytes).
-	 *	* 2'b01: End. This is the last payload of this transaction
-	 *	(which is larger than 188 bytes).
-	 * @hubaddr: Hub Address (HubAddr)
-	 *	This field holds the device address of the transaction
-	 *	translator's hub.
-	 * @prtaddr: Port Address (PrtAddr)
-	 *	This field is the port number of the recipient transaction
-	 *	translator.
-	 */
-	struct cvmx_usbcx_hcspltx_s {
-		__BITFIELD_FIELD(u32 spltena			: 1,
-		__BITFIELD_FIELD(u32 reserved_17_30		: 14,
-		__BITFIELD_FIELD(u32 compsplt			: 1,
-		__BITFIELD_FIELD(u32 xactpos			: 2,
-		__BITFIELD_FIELD(u32 hubaddr			: 7,
-		__BITFIELD_FIELD(u32 prtaddr			: 7,
-		;))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hctsiz#
- *
- * Host Channel-n Transfer Size Register (HCTSIZ)
- *
- */
-union cvmx_usbcx_hctsizx {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hctsizx_s
-	 * @dopng: Do Ping (DoPng)
-	 *	Setting this field to 1 directs the host to do PING protocol.
-	 * @pid: PID (Pid)
-	 *	The application programs this field with the type of PID to use
-	 *	for the initial transaction. The host will maintain this field
-	 *	for the rest of the transfer.
-	 *	* 2'b00: DATA0
-	 *	* 2'b01: DATA2
-	 *	* 2'b10: DATA1
-	 *	* 2'b11: MDATA (non-control)/SETUP (control)
-	 * @pktcnt: Packet Count (PktCnt)
-	 *	This field is programmed by the application with the expected
-	 *	number of packets to be transmitted (OUT) or received (IN).
-	 *	The host decrements this count on every successful
-	 *	transmission or reception of an OUT/IN packet. Once this count
-	 *	reaches zero, the application is interrupted to indicate normal
-	 *	completion.
-	 * @xfersize: Transfer Size (XferSize)
-	 *	For an OUT, this field is the number of data bytes the host will
-	 *	send during the transfer.
-	 *	For an IN, this field is the buffer size that the application
-	 *	has reserved for the transfer. The application is expected to
-	 *	program this field as an integer multiple of the maximum packet
-	 *	size for IN transactions (periodic and non-periodic).
-	 */
-	struct cvmx_usbcx_hctsizx_s {
-		__BITFIELD_FIELD(u32 dopng		: 1,
-		__BITFIELD_FIELD(u32 pid		: 2,
-		__BITFIELD_FIELD(u32 pktcnt		: 10,
-		__BITFIELD_FIELD(u32 xfersize		: 19,
-		;))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hfir
- *
- * Host Frame Interval Register (HFIR)
- *
- * This register stores the frame interval information for the current speed to
- * which the O2P USB core has enumerated.
- */
-union cvmx_usbcx_hfir {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hfir_s
-	 * @frint: Frame Interval (FrInt)
-	 *	The value that the application programs to this field specifies
-	 *	the interval between two consecutive SOFs (FS) or micro-
-	 *	SOFs (HS) or Keep-Alive tokens (HS). This field contains the
-	 *	number of PHY clocks that constitute the required frame
-	 *	interval. The default value set in this field for a FS operation
-	 *	when the PHY clock frequency is 60 MHz. The application can
-	 *	write a value to this register only after the Port Enable bit of
-	 *	the Host Port Control and Status register (HPRT.PrtEnaPort)
-	 *	has been set. If no value is programmed, the core calculates
-	 *	the value based on the PHY clock specified in the FS/LS PHY
-	 *	Clock Select field of the Host Configuration register
-	 *	(HCFG.FSLSPclkSel). Do not change the value of this field
-	 *	after the initial configuration.
-	 *	* 125 us (PHY clock frequency for HS)
-	 *	* 1 ms (PHY clock frequency for FS/LS)
-	 */
-	struct cvmx_usbcx_hfir_s {
-		__BITFIELD_FIELD(u32 reserved_16_31		: 16,
-		__BITFIELD_FIELD(u32 frint			: 16,
-		;))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hfnum
- *
- * Host Frame Number/Frame Time Remaining Register (HFNUM)
- *
- * This register indicates the current frame number.
- * It also indicates the time remaining (in terms of the number of PHY clocks)
- * in the current (micro)frame.
- */
-union cvmx_usbcx_hfnum {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hfnum_s
-	 * @frrem: Frame Time Remaining (FrRem)
-	 *	Indicates the amount of time remaining in the current
-	 *	microframe (HS) or frame (FS/LS), in terms of PHY clocks.
-	 *	This field decrements on each PHY clock. When it reaches
-	 *	zero, this field is reloaded with the value in the Frame
-	 *	Interval register and a new SOF is transmitted on the USB.
-	 * @frnum: Frame Number (FrNum)
-	 *	This field increments when a new SOF is transmitted on the
-	 *	USB, and is reset to 0 when it reaches 16'h3FFF.
-	 */
-	struct cvmx_usbcx_hfnum_s {
-		__BITFIELD_FIELD(u32 frrem		: 16,
-		__BITFIELD_FIELD(u32 frnum		: 16,
-		;))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hprt
- *
- * Host Port Control and Status Register (HPRT)
- *
- * This register is available in both Host and Device modes.
- * Currently, the OTG Host supports only one port.
- * A single register holds USB port-related information such as USB reset,
- * enable, suspend, resume, connect status, and test mode for each port. The
- * R_SS_WC bits in this register can trigger an interrupt to the application
- * through the Host Port Interrupt bit of the Core Interrupt register
- * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this
- * register and clear the bit that caused the interrupt. For the R_SS_WC bits,
- * the application must write a 1 to the bit to clear the interrupt.
- */
-union cvmx_usbcx_hprt {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hprt_s
-	 * @prtspd: Port Speed (PrtSpd)
-	 *	Indicates the speed of the device attached to this port.
-	 *	* 2'b00: High speed
-	 *	* 2'b01: Full speed
-	 *	* 2'b10: Low speed
-	 *	* 2'b11: Reserved
-	 * @prttstctl: Port Test Control (PrtTstCtl)
-	 *	The application writes a nonzero value to this field to put
-	 *	the port into a Test mode, and the corresponding pattern is
-	 *	signaled on the port.
-	 *	* 4'b0000: Test mode disabled
-	 *	* 4'b0001: Test_J mode
-	 *	* 4'b0010: Test_K mode
-	 *	* 4'b0011: Test_SE0_NAK mode
-	 *	* 4'b0100: Test_Packet mode
-	 *	* 4'b0101: Test_Force_Enable
-	 *	* Others: Reserved
-	 *	PrtSpd must be zero (i.e. the interface must be in high-speed
-	 *	mode) to use the PrtTstCtl test modes.
-	 * @prtpwr: Port Power (PrtPwr)
-	 *	The application uses this field to control power to this port,
-	 *	and the core clears this bit on an overcurrent condition.
-	 *	* 1'b0: Power off
-	 *	* 1'b1: Power on
-	 * @prtlnsts: Port Line Status (PrtLnSts)
-	 *	Indicates the current logic level USB data lines
-	 *	* Bit [10]: Logic level of D-
-	 *	* Bit [11]: Logic level of D+
-	 * @prtrst: Port Reset (PrtRst)
-	 *	When the application sets this bit, a reset sequence is
-	 *	started on this port. The application must time the reset
-	 *	period and clear this bit after the reset sequence is
-	 *	complete.
-	 *	* 1'b0: Port not in reset
-	 *	* 1'b1: Port in reset
-	 *	The application must leave this bit set for at least a
-	 *	minimum duration mentioned below to start a reset on the
-	 *	port. The application can leave it set for another 10 ms in
-	 *	addition to the required minimum duration, before clearing
-	 *	the bit, even though there is no maximum limit set by the
-	 *	USB standard.
-	 *	* High speed: 50 ms
-	 *	* Full speed/Low speed: 10 ms
-	 * @prtsusp: Port Suspend (PrtSusp)
-	 *	The application sets this bit to put this port in Suspend
-	 *	mode. The core only stops sending SOFs when this is set.
-	 *	To stop the PHY clock, the application must set the Port
-	 *	Clock Stop bit, which will assert the suspend input pin of
-	 *	the PHY.
-	 *	The read value of this bit reflects the current suspend
-	 *	status of the port. This bit is cleared by the core after a
-	 *	remote wakeup signal is detected or the application sets
-	 *	the Port Reset bit or Port Resume bit in this register or the
-	 *	Resume/Remote Wakeup Detected Interrupt bit or
-	 *	Disconnect Detected Interrupt bit in the Core Interrupt
-	 *	register (GINTSTS.WkUpInt or GINTSTS.DisconnInt,
-	 *	respectively).
-	 *	* 1'b0: Port not in Suspend mode
-	 *	* 1'b1: Port in Suspend mode
-	 * @prtres: Port Resume (PrtRes)
-	 *	The application sets this bit to drive resume signaling on
-	 *	the port. The core continues to drive the resume signal
-	 *	until the application clears this bit.
-	 *	If the core detects a USB remote wakeup sequence, as
-	 *	indicated by the Port Resume/Remote Wakeup Detected
-	 *	Interrupt bit of the Core Interrupt register
-	 *	(GINTSTS.WkUpInt), the core starts driving resume
-	 *	signaling without application intervention and clears this bit
-	 *	when it detects a disconnect condition. The read value of
-	 *	this bit indicates whether the core is currently driving
-	 *	resume signaling.
-	 *	* 1'b0: No resume driven
-	 *	* 1'b1: Resume driven
-	 * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng)
-	 *	The core sets this bit when the status of the Port
-	 *	Overcurrent Active bit (bit 4) in this register changes.
-	 * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct)
-	 *	Indicates the overcurrent condition of the port.
-	 *	* 1'b0: No overcurrent condition
-	 *	* 1'b1: Overcurrent condition
-	 * @prtenchng: Port Enable/Disable Change (PrtEnChng)
-	 *	The core sets this bit when the status of the Port Enable bit
-	 *	[2] of this register changes.
-	 * @prtena: Port Enable (PrtEna)
-	 *	A port is enabled only by the core after a reset sequence,
-	 *	and is disabled by an overcurrent condition, a disconnect
-	 *	condition, or by the application clearing this bit. The
-	 *	application cannot set this bit by a register write. It can only
-	 *	clear it to disable the port. This bit does not trigger any
-	 *	interrupt to the application.
-	 *	* 1'b0: Port disabled
-	 *	* 1'b1: Port enabled
-	 * @prtconndet: Port Connect Detected (PrtConnDet)
-	 *	The core sets this bit when a device connection is detected
-	 *	to trigger an interrupt to the application using the Host Port
-	 *	Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt).
-	 *	The application must write a 1 to this bit to clear the
-	 *	interrupt.
-	 * @prtconnsts: Port Connect Status (PrtConnSts)
-	 *	* 0: No device is attached to the port.
-	 *	* 1: A device is attached to the port.
-	 */
-	struct cvmx_usbcx_hprt_s {
-		__BITFIELD_FIELD(u32 reserved_19_31	: 13,
-		__BITFIELD_FIELD(u32 prtspd		: 2,
-		__BITFIELD_FIELD(u32 prttstctl		: 4,
-		__BITFIELD_FIELD(u32 prtpwr		: 1,
-		__BITFIELD_FIELD(u32 prtlnsts		: 2,
-		__BITFIELD_FIELD(u32 reserved_9_9	: 1,
-		__BITFIELD_FIELD(u32 prtrst		: 1,
-		__BITFIELD_FIELD(u32 prtsusp		: 1,
-		__BITFIELD_FIELD(u32 prtres		: 1,
-		__BITFIELD_FIELD(u32 prtovrcurrchng	: 1,
-		__BITFIELD_FIELD(u32 prtovrcurract	: 1,
-		__BITFIELD_FIELD(u32 prtenchng		: 1,
-		__BITFIELD_FIELD(u32 prtena		: 1,
-		__BITFIELD_FIELD(u32 prtconndet		: 1,
-		__BITFIELD_FIELD(u32 prtconnsts		: 1,
-		;)))))))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hptxfsiz
- *
- * Host Periodic Transmit FIFO Size Register (HPTXFSIZ)
- *
- * This register holds the size and the memory start address of the Periodic
- * TxFIFO, as shown in Figures 310 and 311.
- */
-union cvmx_usbcx_hptxfsiz {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hptxfsiz_s
-	 * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize)
-	 *	This value is in terms of 32-bit words.
-	 *	* Minimum value is 16
-	 *	* Maximum value is 32768
-	 * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr)
-	 */
-	struct cvmx_usbcx_hptxfsiz_s {
-		__BITFIELD_FIELD(u32 ptxfsize	: 16,
-		__BITFIELD_FIELD(u32 ptxfstaddr	: 16,
-		;))
-	} s;
-};
-
-/**
- * cvmx_usbc#_hptxsts
- *
- * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS)
- *
- * This read-only register contains the free space information for the Periodic
- * TxFIFO and the Periodic Transmit Request Queue
- */
-union cvmx_usbcx_hptxsts {
-	u32 u32;
-	/**
-	 * struct cvmx_usbcx_hptxsts_s
-	 * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop)
-	 *	This indicates the entry in the Periodic Tx Request Queue that
-	 *	is currently being processes by the MAC.
-	 *	This register is used for debugging.
-	 *	* Bit [31]: Odd/Even (micro)frame
-	 *	- 1'b0: send in even (micro)frame
-	 *	- 1'b1: send in odd (micro)frame
-	 *	* Bits [30:27]: Channel/endpoint number
-	 *	* Bits [26:25]: Type
-	 *	- 2'b00: IN/OUT
-	 *	- 2'b01: Zero-length packet
-	 *	- 2'b10: CSPLIT
-	 *	- 2'b11: Disable channel command
-	 *	* Bit [24]: Terminate (last entry for the selected
-	 *	channel/endpoint)
-	 * @ptxqspcavail: Periodic Transmit Request Queue Space Available
-	 *	(PTxQSpcAvail)
-	 *	Indicates the number of free locations available to be written
-	 *	in the Periodic Transmit Request Queue. This queue holds both
-	 *	IN and OUT requests.
-	 *	* 8'h0: Periodic Transmit Request Queue is full
-	 *	* 8'h1: 1 location available
-	 *	* 8'h2: 2 locations available
-	 *	* n: n locations available (0..8)
-	 *	* Others: Reserved
-	 * @ptxfspcavail: Periodic Transmit Data FIFO Space Available
-	 *		  (PTxFSpcAvail)
-	 *	Indicates the number of free locations available to be written
-	 *	to in the Periodic TxFIFO.
-	 *	Values are in terms of 32-bit words
-	 *	* 16'h0: Periodic TxFIFO is full
-	 *	* 16'h1: 1 word available
-	 *	* 16'h2: 2 words available
-	 *	* 16'hn: n words available (where 0..32768)
-	 *	* 16'h8000: 32768 words available
-	 *	* Others: Reserved
-	 */
-	struct cvmx_usbcx_hptxsts_s {
-		__BITFIELD_FIELD(u32 ptxqtop		: 8,
-		__BITFIELD_FIELD(u32 ptxqspcavail	: 8,
-		__BITFIELD_FIELD(u32 ptxfspcavail	: 16,
-		;)))
-	} s;
-};
-
-/**
- * cvmx_usbn#_clk_ctl
- *
- * USBN_CLK_CTL = USBN's Clock Control
- *
- * This register is used to control the frequency of the hclk and the
- * hreset and phy_rst signals.
- */
-union cvmx_usbnx_clk_ctl {
-	u64 u64;
-	/**
-	 * struct cvmx_usbnx_clk_ctl_s
-	 * @divide2: The 'hclk' used by the USB subsystem is derived
-	 *	from the eclk.
-	 *	Also see the field DIVIDE. DIVIDE2<1> must currently
-	 *	be zero because it is not implemented, so the maximum
-	 *	ratio of eclk/hclk is currently 16.
-	 *	The actual divide number for hclk is:
-	 *	(DIVIDE2 + 1) * (DIVIDE + 1)
-	 * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
-	 *	generate the hclk in the USB Subsystem is held
-	 *	in reset. This bit must be set to '0' before
-	 *	changing the value os DIVIDE in this register.
-	 *	The reset to the HCLK_DIVIDERis also asserted
-	 *	when core reset is asserted.
-	 * @p_x_on: Force USB-PHY on during suspend.
-	 *	'1' USB-PHY XO block is powered-down during
-	 *	suspend.
-	 *	'0' USB-PHY XO block is powered-up during
-	 *	suspend.
-	 *	The value of this field must be set while POR is
-	 *	active.
-	 * @p_rtype: PHY reference clock type
-	 *	On CN50XX/CN52XX/CN56XX the values are:
-	 *		'0' The USB-PHY uses a 12MHz crystal as a clock source
-	 *		    at the USB_XO and USB_XI pins.
-	 *		'1' Reserved.
-	 *		'2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the
-	 *		    USB_XO pin. USB_XI should be tied to ground in this
-	 *		    case.
-	 *		'3' Reserved.
-	 *	On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are:
-	 *		'0' Reserved.
-	 *		'1' Reserved.
-	 *		'2' The PHY PLL uses the XO block output as a reference.
-	 *		    The XO block uses an external clock supplied on the
-	 *		    XO pin. USB_XI should be tied to ground for this
-	 *		    usage.
-	 *		'3' The XO block uses the clock from a crystal.
-	 * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
-	 *	remain powered in Suspend Mode.
-	 *	'1' The USB-PHY XO Bias, Bandgap and PLL are
-	 *	powered down in suspend mode.
-	 *	The value of this field must be set while POR is
-	 *	active.
-	 * @p_c_sel: Phy clock speed select.
-	 *	Selects the reference clock / crystal frequency.
-	 *	'11': Reserved
-	 *	'10': 48 MHz (reserved when a crystal is used)
-	 *	'01': 24 MHz (reserved when a crystal is used)
-	 *	'00': 12 MHz
-	 *	The value of this field must be set while POR is
-	 *	active.
-	 *	NOTE: if a crystal is used as a reference clock,
-	 *	this field must be set to 12 MHz.
-	 * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
-	 * @sd_mode: Scaledown mode for the USBC. Control timing events
-	 *	in the USBC, for normal operation this must be '0'.
-	 * @s_bist: Starts bist on the hclk memories, during the '0'
-	 *	to '1' transition.
-	 * @por: Power On Reset for the PHY.
-	 *	Resets all the PHYS registers and state machines.
-	 * @enable: When '1' allows the generation of the hclk. When
-	 *	'0' the hclk will not be generated. SEE DIVIDE
-	 *	field of this register.
-	 * @prst: When this field is '0' the reset associated with
-	 *	the phy_clk functionality in the USB Subsystem is
-	 *	help in reset. This bit should not be set to '1'
-	 *	until the time it takes 6 clocks (hclk or phy_clk,
-	 *	whichever is slower) has passed. Under normal
-	 *	operation once this bit is set to '1' it should not
-	 *	be set to '0'.
-	 * @hrst: When this field is '0' the reset associated with
-	 *	the hclk functioanlity in the USB Subsystem is
-	 *	held in reset.This bit should not be set to '1'
-	 *	until 12ms after phy_clk is stable. Under normal
-	 *	operation, once this bit is set to '1' it should
-	 *	not be set to '0'.
-	 * @divide: The frequency of 'hclk' used by the USB subsystem
-	 *	is the eclk frequency divided by the value of
-	 *	(DIVIDE2 + 1) * (DIVIDE + 1), also see the field
-	 *	DIVIDE2 of this register.
-	 *	The hclk frequency should be less than 125Mhz.
-	 *	After writing a value to this field the SW should
-	 *	read the field for the value written.
-	 *	The ENABLE field of this register should not be set
-	 *	until AFTER this field is set and then read.
-	 */
-	struct cvmx_usbnx_clk_ctl_s {
-		__BITFIELD_FIELD(u64 reserved_20_63	: 44,
-		__BITFIELD_FIELD(u64 divide2		: 2,
-		__BITFIELD_FIELD(u64 hclk_rst		: 1,
-		__BITFIELD_FIELD(u64 p_x_on		: 1,
-		__BITFIELD_FIELD(u64 p_rtype		: 2,
-		__BITFIELD_FIELD(u64 p_com_on		: 1,
-		__BITFIELD_FIELD(u64 p_c_sel		: 2,
-		__BITFIELD_FIELD(u64 cdiv_byp		: 1,
-		__BITFIELD_FIELD(u64 sd_mode		: 2,
-		__BITFIELD_FIELD(u64 s_bist		: 1,
-		__BITFIELD_FIELD(u64 por		: 1,
-		__BITFIELD_FIELD(u64 enable		: 1,
-		__BITFIELD_FIELD(u64 prst		: 1,
-		__BITFIELD_FIELD(u64 hrst		: 1,
-		__BITFIELD_FIELD(u64 divide		: 3,
-		;)))))))))))))))
-	} s;
-};
-
-/**
- * cvmx_usbn#_usbp_ctl_status
- *
- * USBN_USBP_CTL_STATUS = USBP Control And Status Register
- *
- * Contains general control and status information for the USBN block.
- */
-union cvmx_usbnx_usbp_ctl_status {
-	u64 u64;
-	/**
-	 * struct cvmx_usbnx_usbp_ctl_status_s
-	 * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
-	 * @txvreftune: HS DC Voltage Level Adjustment
-	 * @txfslstune: FS/LS Source Impedance Adjustment
-	 * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
-	 * @sqrxtune: Squelch Threshold Adjustment
-	 * @compdistune: Disconnect Threshold Adjustment
-	 * @otgtune: VBUS Valid Threshold Adjustment
-	 * @otgdisable: OTG Block Disable
-	 * @portreset: Per_Port Reset
-	 * @drvvbus: Drive VBUS
-	 * @lsbist: Low-Speed BIST Enable.
-	 * @fsbist: Full-Speed BIST Enable.
-	 * @hsbist: High-Speed BIST Enable.
-	 * @bist_done: PHY Bist Done.
-	 *	Asserted at the end of the PHY BIST sequence.
-	 * @bist_err: PHY Bist Error.
-	 *	Indicates an internal error was detected during
-	 *	the BIST sequence.
-	 * @tdata_out: PHY Test Data Out.
-	 *	Presents either internally generated signals or
-	 *	test register contents, based upon the value of
-	 *	test_data_out_sel.
-	 * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
-	 *	Normally should be set to zero.
-	 *	When customers have no intent to use USB PHY
-	 *	interface, they should:
-	 *	- still provide 3.3V to USB_VDD33, and
-	 *	- tie USB_REXT to 3.3V supply, and
-	 *	- set USBN*_USBP_CTL_STATUS[SIDDQ]=1
-	 * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
-	 * @dma_bmode: When set to 1 the L2C DMA address will be updated
-	 *	with byte-counts between packets. When set to 0
-	 *	the L2C DMA address is incremented to the next
-	 *	4-byte aligned address after adding byte-count.
-	 * @usbc_end: Bigendian input to the USB Core. This should be
-	 *	set to '0' for operation.
-	 * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
-	 * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
-	 * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
-	 *	This signal enables the pull-down resistance on
-	 *	the D+ line. '1' pull down-resistance is connected
-	 *	to D+/ '0' pull down resistance is not connected
-	 *	to D+. When an A/B device is acting as a host
-	 *	(downstream-facing port), dp_pulldown and
-	 *	dm_pulldown are enabled. This must not toggle
-	 *	during normal operation.
-	 * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
-	 *	This signal enables the pull-down resistance on
-	 *	the D- line. '1' pull down-resistance is connected
-	 *	to D-. '0' pull down resistance is not connected
-	 *	to D-. When an A/B device is acting as a host
-	 *	(downstream-facing port), dp_pulldown and
-	 *	dm_pulldown are enabled. This must not toggle
-	 *	during normal operation.
-	 * @hst_mode: When '0' the USB is acting as HOST, when '1'
-	 *	USB is acting as device. This field needs to be
-	 *	set while the USB is in reset.
-	 * @tuning: Transmitter Tuning for High-Speed Operation.
-	 *	Tunes the current supply and rise/fall output
-	 *	times for high-speed operation.
-	 *	[20:19] == 11: Current supply increased
-	 *	approximately 9%
-	 *	[20:19] == 10: Current supply increased
-	 *	approximately 4.5%
-	 *	[20:19] == 01: Design default.
-	 *	[20:19] == 00: Current supply decreased
-	 *	approximately 4.5%
-	 *	[22:21] == 11: Rise and fall times are increased.
-	 *	[22:21] == 10: Design default.
-	 *	[22:21] == 01: Rise and fall times are decreased.
-	 *	[22:21] == 00: Rise and fall times are decreased
-	 *	further as compared to the 01 setting.
-	 * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
-	 *	Enables or disables bit stuffing on data[15:8]
-	 *	when bit-stuffing is enabled.
-	 * @tx_bs_en: Transmit Bit Stuffing on [7:0].
-	 *	Enables or disables bit stuffing on data[7:0]
-	 *	when bit-stuffing is enabled.
-	 * @loop_enb: PHY Loopback Test Enable.
-	 *	'1': During data transmission the receive is
-	 *	enabled.
-	 *	'0': During data transmission the receive is
-	 *	disabled.
-	 *	Must be '0' for normal operation.
-	 * @vtest_enb: Analog Test Pin Enable.
-	 *	'1' The PHY's analog_test pin is enabled for the
-	 *	input and output of applicable analog test signals.
-	 *	'0' THe analog_test pin is disabled.
-	 * @bist_enb: Built-In Self Test Enable.
-	 *	Used to activate BIST in the PHY.
-	 * @tdata_sel: Test Data Out Select.
-	 *	'1' test_data_out[3:0] (PHY) register contents
-	 *	are output. '0' internally generated signals are
-	 *	output.
-	 * @taddr_in: Mode Address for Test Interface.
-	 *	Specifies the register address for writing to or
-	 *	reading from the PHY test interface register.
-	 * @tdata_in: Internal Testing Register Input Data and Select
-	 *	This is a test bus. Data is present on [3:0],
-	 *	and its corresponding select (enable) is present
-	 *	on bits [7:4].
-	 * @ate_reset: Reset input from automatic test equipment.
-	 *	This is a test signal. When the USB Core is
-	 *	powered up (not in Susned Mode), an automatic
-	 *	tester can use this to disable phy_clock and
-	 *	free_clk, then re-enable them with an aligned
-	 *	phase.
-	 *	'1': The phy_clk and free_clk outputs are
-	 *	disabled. "0": The phy_clock and free_clk outputs
-	 *	are available within a specific period after the
-	 *	de-assertion.
-	 */
-	struct cvmx_usbnx_usbp_ctl_status_s {
-		__BITFIELD_FIELD(u64 txrisetune		: 1,
-		__BITFIELD_FIELD(u64 txvreftune		: 4,
-		__BITFIELD_FIELD(u64 txfslstune		: 4,
-		__BITFIELD_FIELD(u64 txhsxvtune		: 2,
-		__BITFIELD_FIELD(u64 sqrxtune		: 3,
-		__BITFIELD_FIELD(u64 compdistune	: 3,
-		__BITFIELD_FIELD(u64 otgtune		: 3,
-		__BITFIELD_FIELD(u64 otgdisable		: 1,
-		__BITFIELD_FIELD(u64 portreset		: 1,
-		__BITFIELD_FIELD(u64 drvvbus		: 1,
-		__BITFIELD_FIELD(u64 lsbist		: 1,
-		__BITFIELD_FIELD(u64 fsbist		: 1,
-		__BITFIELD_FIELD(u64 hsbist		: 1,
-		__BITFIELD_FIELD(u64 bist_done		: 1,
-		__BITFIELD_FIELD(u64 bist_err		: 1,
-		__BITFIELD_FIELD(u64 tdata_out		: 4,
-		__BITFIELD_FIELD(u64 siddq		: 1,
-		__BITFIELD_FIELD(u64 txpreemphasistune	: 1,
-		__BITFIELD_FIELD(u64 dma_bmode		: 1,
-		__BITFIELD_FIELD(u64 usbc_end		: 1,
-		__BITFIELD_FIELD(u64 usbp_bist		: 1,
-		__BITFIELD_FIELD(u64 tclk		: 1,
-		__BITFIELD_FIELD(u64 dp_pulld		: 1,
-		__BITFIELD_FIELD(u64 dm_pulld		: 1,
-		__BITFIELD_FIELD(u64 hst_mode		: 1,
-		__BITFIELD_FIELD(u64 tuning		: 4,
-		__BITFIELD_FIELD(u64 tx_bs_enh		: 1,
-		__BITFIELD_FIELD(u64 tx_bs_en		: 1,
-		__BITFIELD_FIELD(u64 loop_enb		: 1,
-		__BITFIELD_FIELD(u64 vtest_enb		: 1,
-		__BITFIELD_FIELD(u64 bist_enb		: 1,
-		__BITFIELD_FIELD(u64 tdata_sel		: 1,
-		__BITFIELD_FIELD(u64 taddr_in		: 4,
-		__BITFIELD_FIELD(u64 tdata_in		: 8,
-		__BITFIELD_FIELD(u64 ate_reset		: 1,
-		;)))))))))))))))))))))))))))))))))))
-	} s;
-};
-
-#endif /* __OCTEON_HCD_H__ */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
deleted file mode 100644
index e7f4ddc..0000000
--- a/drivers/staging/octeon/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config OCTEON_ETHERNET
-	tristate "Cavium Networks Octeon Ethernet support"
-	depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
-	depends on NETDEVICES
-	depends on BROKEN
-	select PHYLIB
-	select MDIO_OCTEON
-	help
-	  This driver supports the builtin ethernet ports on Cavium
-	  Networks' products in the Octeon family. This driver supports the
-	  CN3XXX and CN5XXX Octeon processors.
-
-	  To compile this driver as a module, choose M here.  The module
-	  will be called octeon-ethernet.
-
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
deleted file mode 100644
index 3887cf5..0000000
--- a/drivers/staging/octeon/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Copyright (C) 2005-2009 Cavium Networks
-#
-
-#
-# Makefile for Cavium OCTEON on-board ethernet driver
-#
-
-obj-${CONFIG_OCTEON_ETHERNET} :=  octeon-ethernet.o
-
-octeon-ethernet-y := ethernet.o
-octeon-ethernet-y += ethernet-mdio.o
-octeon-ethernet-y += ethernet-mem.o
-octeon-ethernet-y += ethernet-rgmii.o
-octeon-ethernet-y += ethernet-rx.o
-octeon-ethernet-y += ethernet-sgmii.o
-octeon-ethernet-y += ethernet-spi.o
-octeon-ethernet-y += ethernet-tx.o
diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO
deleted file mode 100644
index 67a0a1f6..0000000
--- a/drivers/staging/octeon/TODO
+++ /dev/null
@@ -1,9 +0,0 @@
-This driver is functional and supports Ethernet on OCTEON+/OCTEON2/OCTEON3
-chips at least up to CN7030.
-
-TODO:
-	- general code review and clean up
-	- make driver self-contained instead of being split between staging and
-	  arch/mips/cavium-octeon.
-
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
deleted file mode 100644
index ef9e767..0000000
--- a/drivers/staging/octeon/ethernet-defines.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-/*
- * A few defines are used to control the operation of this driver:
- *  USE_ASYNC_IOBDMA
- *      Use asynchronous IO access to hardware. This uses Octeon's asynchronous
- *      IOBDMAs to issue IO accesses without stalling. Set this to zero
- *      to disable this. Note that IOBDMAs require CVMSEG.
- *  REUSE_SKBUFFS_WITHOUT_FREE
- *      Allows the TX path to free an skbuff into the FPA hardware pool. This
- *      can significantly improve performance for forwarding and bridging, but
- *      may be somewhat dangerous. Checks are made, but if any buffer is reused
- *      without the proper Linux cleanup, the networking stack may have very
- *      bizarre bugs.
- */
-#ifndef __ETHERNET_DEFINES_H__
-#define __ETHERNET_DEFINES_H__
-
-#ifdef CONFIG_NETFILTER
-#define REUSE_SKBUFFS_WITHOUT_FREE  0
-#else
-#define REUSE_SKBUFFS_WITHOUT_FREE  1
-#endif
-
-#define USE_ASYNC_IOBDMA            (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
-
-/* Maximum number of SKBs to try to free per xmit packet. */
-#define MAX_OUT_QUEUE_DEPTH 1000
-
-#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(u32))
-#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(u32))
-
-#define TOTAL_NUMBER_OF_PORTS       (CVMX_PIP_NUM_INPUT_PORTS + 1)
-
-#endif /* __ETHERNET_DEFINES_H__ */
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
deleted file mode 100644
index c798672..0000000
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ /dev/null
@@ -1,178 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/ratelimit.h>
-#include <linux/of_mdio.h>
-#include <generated/utsrelease.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-
-static void cvm_oct_get_drvinfo(struct net_device *dev,
-				struct ethtool_drvinfo *info)
-{
-	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
-	strlcpy(info->version, UTS_RELEASE, sizeof(info->version));
-	strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
-}
-
-static int cvm_oct_nway_reset(struct net_device *dev)
-{
-	if (!capable(CAP_NET_ADMIN))
-		return -EPERM;
-
-	if (dev->phydev)
-		return phy_start_aneg(dev->phydev);
-
-	return -EINVAL;
-}
-
-const struct ethtool_ops cvm_oct_ethtool_ops = {
-	.get_drvinfo = cvm_oct_get_drvinfo,
-	.nway_reset = cvm_oct_nway_reset,
-	.get_link = ethtool_op_get_link,
-	.get_link_ksettings = phy_ethtool_get_link_ksettings,
-	.set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-/**
- * cvm_oct_ioctl - IOCTL support for PHY control
- * @dev:    Device to change
- * @rq:     the request
- * @cmd:    the command
- *
- * Returns Zero on success
- */
-int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-	if (!netif_running(dev))
-		return -EINVAL;
-
-	if (!dev->phydev)
-		return -EINVAL;
-
-	return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-void cvm_oct_note_carrier(struct octeon_ethernet *priv,
-			  union cvmx_helper_link_info li)
-{
-	if (li.s.link_up) {
-		pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
-				      netdev_name(priv->netdev), li.s.speed,
-				      (li.s.full_duplex) ? "Full" : "Half",
-				      priv->port, priv->queue);
-	} else {
-		pr_notice_ratelimited("%s: Link down\n",
-				      netdev_name(priv->netdev));
-	}
-}
-
-void cvm_oct_adjust_link(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	union cvmx_helper_link_info link_info;
-
-	link_info.u64		= 0;
-	link_info.s.link_up	= dev->phydev->link ? 1 : 0;
-	link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0;
-	link_info.s.speed	= dev->phydev->speed;
-	priv->link_info		= link_info.u64;
-
-	/*
-	 * The polling task need to know about link status changes.
-	 */
-	if (priv->poll)
-		priv->poll(dev);
-
-	if (priv->last_link != dev->phydev->link) {
-		priv->last_link = dev->phydev->link;
-		cvmx_helper_link_set(priv->port, link_info);
-		cvm_oct_note_carrier(priv, link_info);
-	}
-}
-
-int cvm_oct_common_stop(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	int interface = INTERFACE(priv->port);
-	union cvmx_helper_link_info link_info;
-	union cvmx_gmxx_prtx_cfg gmx_cfg;
-	int index = INDEX(priv->port);
-
-	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-	gmx_cfg.s.en = 0;
-	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
-	priv->poll = NULL;
-
-	if (dev->phydev)
-		phy_disconnect(dev->phydev);
-
-	if (priv->last_link) {
-		link_info.u64 = 0;
-		priv->last_link = 0;
-
-		cvmx_helper_link_set(priv->port, link_info);
-		cvm_oct_note_carrier(priv, link_info);
-	}
-	return 0;
-}
-
-/**
- * cvm_oct_phy_setup_device - setup the PHY
- *
- * @dev:    Device to setup
- *
- * Returns Zero on success, negative on failure
- */
-int cvm_oct_phy_setup_device(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	struct device_node *phy_node;
-	struct phy_device *phydev = NULL;
-
-	if (!priv->of_node)
-		goto no_phy;
-
-	phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
-	if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
-		int rc;
-
-		rc = of_phy_register_fixed_link(priv->of_node);
-		if (rc)
-			return rc;
-
-		phy_node = of_node_get(priv->of_node);
-	}
-	if (!phy_node)
-		goto no_phy;
-
-	phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
-				priv->phy_mode);
-	of_node_put(phy_node);
-
-	if (!phydev)
-		return -ENODEV;
-
-	priv->last_link = 0;
-	phy_start(phydev);
-
-	return 0;
-no_phy:
-	/* If there is no phy, assume a direct MAC connection and that
-	 * the link is up.
-	 */
-	netif_carrier_on(dev);
-	return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
deleted file mode 100644
index e3771d4..0000000
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/string.h>
-#include <linux/ethtool.h>
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-extern const struct ethtool_ops cvm_oct_ethtool_ops;
-
-void octeon_mdiobus_force_mod_depencency(void);
-
-int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
deleted file mode 100644
index 5325949..0000000
--- a/drivers/staging/octeon/ethernet-mem.c
+++ /dev/null
@@ -1,154 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-mem.h"
-#include "ethernet-defines.h"
-
-/**
- * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
- * @pool:     Pool to allocate an skbuff for
- * @size:     Size of the buffer needed for the pool
- * @elements: Number of buffers to allocate
- *
- * Returns the actual number of buffers allocated.
- */
-static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
-{
-	int freed = elements;
-
-	while (freed) {
-		struct sk_buff *skb = dev_alloc_skb(size + 256);
-
-		if (unlikely(!skb))
-			break;
-		skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
-		*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
-		cvmx_fpa_free(skb->data, pool, size / 128);
-		freed--;
-	}
-	return elements - freed;
-}
-
-/**
- * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
- * @pool:     Pool to allocate an skbuff for
- * @size:     Size of the buffer needed for the pool
- * @elements: Number of buffers to allocate
- */
-static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
-{
-	char *memory;
-
-	do {
-		memory = cvmx_fpa_alloc(pool);
-		if (memory) {
-			struct sk_buff *skb =
-			    *(struct sk_buff **)(memory - sizeof(void *));
-			elements--;
-			dev_kfree_skb(skb);
-		}
-	} while (memory);
-
-	if (elements < 0)
-		pr_warn("Freeing of pool %u had too many skbuffs (%d)\n",
-			pool, elements);
-	else if (elements > 0)
-		pr_warn("Freeing of pool %u is missing %d skbuffs\n",
-			pool, elements);
-}
-
-/**
- * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
- * @pool:     Pool to populate
- * @size:     Size of each buffer in the pool
- * @elements: Number of buffers to allocate
- *
- * Returns the actual number of buffers allocated.
- */
-static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
-{
-	char *memory;
-	char *fpa;
-	int freed = elements;
-
-	while (freed) {
-		/*
-		 * FPA memory must be 128 byte aligned.  Since we are
-		 * aligning we need to save the original pointer so we
-		 * can feed it to kfree when the memory is returned to
-		 * the kernel.
-		 *
-		 * We allocate an extra 256 bytes to allow for
-		 * alignment and space for the original pointer saved
-		 * just before the block.
-		 */
-		memory = kmalloc(size + 256, GFP_ATOMIC);
-		if (unlikely(!memory)) {
-			pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
-				elements * size, pool);
-			break;
-		}
-		fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
-		*((char **)fpa - 1) = memory;
-		cvmx_fpa_free(fpa, pool, 0);
-		freed--;
-	}
-	return elements - freed;
-}
-
-/**
- * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
- * @pool:     FPA pool to free
- * @size:     Size of each buffer in the pool
- * @elements: Number of buffers that should be in the pool
- */
-static void cvm_oct_free_hw_memory(int pool, int size, int elements)
-{
-	char *memory;
-	char *fpa;
-
-	do {
-		fpa = cvmx_fpa_alloc(pool);
-		if (fpa) {
-			elements--;
-			fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
-			memory = *((char **)fpa - 1);
-			kfree(memory);
-		}
-	} while (fpa);
-
-	if (elements < 0)
-		pr_warn("Freeing of pool %u had too many buffers (%d)\n",
-			pool, elements);
-	else if (elements > 0)
-		pr_warn("Warning: Freeing of pool %u is missing %d buffers\n",
-			pool, elements);
-}
-
-int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
-{
-	int freed;
-
-	if (pool == CVMX_FPA_PACKET_POOL)
-		freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
-	else
-		freed = cvm_oct_fill_hw_memory(pool, size, elements);
-	return freed;
-}
-
-void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
-{
-	if (pool == CVMX_FPA_PACKET_POOL)
-		cvm_oct_free_hw_skbuff(pool, size, elements);
-	else
-		cvm_oct_free_hw_memory(pool, size, elements);
-}
diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h
deleted file mode 100644
index 692dcdb..0000000
--- a/drivers/staging/octeon/ethernet-mem.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
-void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
deleted file mode 100644
index 0c4fac3..0000000
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-#include <linux/ratelimit.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-#include "ethernet-mdio.h"
-
-static DEFINE_SPINLOCK(global_register_lock);
-
-static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
-{
-	union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
-	union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
-	union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
-	int interface = INTERFACE(priv->port);
-	int index = INDEX(priv->port);
-
-	/* Set preamble checking. */
-	gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index,
-								   interface));
-	gmxx_rxx_frm_ctl.s.pre_chk = enable;
-	cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
-		       gmxx_rxx_frm_ctl.u64);
-
-	/* Set FCS stripping. */
-	ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
-	if (enable)
-		ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
-	else
-		ipd_sub_port_fcs.s.port_bit &=
-					0xffffffffull ^ (1ull << priv->port);
-	cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
-
-	/* Clear any error bits. */
-	gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index,
-								   interface));
-	cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
-		       gmxx_rxx_int_reg.u64);
-}
-
-static void cvm_oct_check_preamble_errors(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	union cvmx_helper_link_info link_info;
-	unsigned long flags;
-
-	link_info.u64 = priv->link_info;
-
-	/*
-	 * Take the global register lock since we are going to
-	 * touch registers that affect more than one port.
-	 */
-	spin_lock_irqsave(&global_register_lock, flags);
-
-	if (link_info.s.speed == 10 && priv->last_speed == 10) {
-		/*
-		 * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are
-		 * getting preamble errors.
-		 */
-		int interface = INTERFACE(priv->port);
-		int index = INDEX(priv->port);
-		union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
-
-		gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
-							(index, interface));
-		if (gmxx_rxx_int_reg.s.pcterr) {
-			/*
-			 * We are getting preamble errors at 10Mbps. Most
-			 * likely the PHY is giving us packets with misaligned
-			 * preambles. In order to get these packets we need to
-			 * disable preamble checking and do it in software.
-			 */
-			cvm_oct_set_hw_preamble(priv, false);
-			printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
-					   dev->name);
-		}
-	} else {
-		/*
-		 * Since the 10Mbps preamble workaround is allowed we need to
-		 * enable preamble checking, FCS stripping, and clear error
-		 * bits on every speed change. If errors occur during 10Mbps
-		 * operation the above code will change this stuff
-		 */
-		if (priv->last_speed != link_info.s.speed)
-			cvm_oct_set_hw_preamble(priv, true);
-		priv->last_speed = link_info.s.speed;
-	}
-	spin_unlock_irqrestore(&global_register_lock, flags);
-}
-
-static void cvm_oct_rgmii_poll(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	union cvmx_helper_link_info link_info;
-	bool status_change;
-
-	link_info = cvmx_helper_link_get(priv->port);
-	if (priv->link_info != link_info.u64 &&
-	    cvmx_helper_link_set(priv->port, link_info))
-		link_info.u64 = priv->link_info;
-	status_change = priv->link_info != link_info.u64;
-	priv->link_info = link_info.u64;
-
-	cvm_oct_check_preamble_errors(dev);
-
-	if (likely(!status_change))
-		return;
-
-	/* Tell core. */
-	if (link_info.s.link_up) {
-		if (!netif_carrier_ok(dev))
-			netif_carrier_on(dev);
-	} else if (netif_carrier_ok(dev)) {
-		netif_carrier_off(dev);
-	}
-	cvm_oct_note_carrier(priv, link_info);
-}
-
-int cvm_oct_rgmii_open(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	int ret;
-
-	ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
-	if (ret)
-		return ret;
-
-	if (dev->phydev) {
-		/*
-		 * In phydev mode, we need still periodic polling for the
-		 * preamble error checking, and we also need to call this
-		 * function on every link state change.
-		 *
-		 * Only true RGMII ports need to be polled. In GMII mode, port
-		 * 0 is really a RGMII port.
-		 */
-		if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII &&
-		     priv->port  == 0) ||
-		    (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
-			priv->poll = cvm_oct_check_preamble_errors;
-			cvm_oct_check_preamble_errors(dev);
-		}
-	}
-
-	return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
deleted file mode 100644
index 2c16230..0000000
--- a/drivers/staging/octeon/ethernet-rx.c
+++ /dev/null
@@ -1,538 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/cache.h>
-#include <linux/cpumask.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/string.h>
-#include <linux/prefetch.h>
-#include <linux/ratelimit.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mem.h"
-#include "ethernet-rx.h"
-#include "ethernet-util.h"
-
-static atomic_t oct_rx_ready = ATOMIC_INIT(0);
-
-static struct oct_rx_group {
-	int irq;
-	int group;
-	struct napi_struct napi;
-} oct_rx_group[16];
-
-/**
- * cvm_oct_do_interrupt - interrupt handler.
- * @irq: Interrupt number.
- * @napi_id: Cookie to identify the NAPI instance.
- *
- * The interrupt occurs whenever the POW has packets in our group.
- *
- */
-static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
-{
-	/* Disable the IRQ and start napi_poll. */
-	disable_irq_nosync(irq);
-	napi_schedule(napi_id);
-
-	return IRQ_HANDLED;
-}
-
-/**
- * cvm_oct_check_rcv_error - process receive errors
- * @work: Work queue entry pointing to the packet.
- *
- * Returns Non-zero if the packet can be dropped, zero otherwise.
- */
-static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
-{
-	int port;
-
-	if (octeon_has_feature(OCTEON_FEATURE_PKND))
-		port = work->word0.pip.cn68xx.pknd;
-	else
-		port = work->word1.cn38xx.ipprt;
-
-	if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
-		/*
-		 * Ignore length errors on min size packets. Some
-		 * equipment incorrectly pads packets to 64+4FCS
-		 * instead of 60+4FCS.  Note these packets still get
-		 * counted as frame errors.
-		 */
-	} else if (work->word2.snoip.err_code == 5 ||
-		   work->word2.snoip.err_code == 7) {
-		/*
-		 * We received a packet with either an alignment error
-		 * or a FCS error. This may be signalling that we are
-		 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
-		 * off. If this is the case we need to parse the
-		 * packet to determine if we can remove a non spec
-		 * preamble and generate a correct packet.
-		 */
-		int interface = cvmx_helper_get_interface_num(port);
-		int index = cvmx_helper_get_interface_index_num(port);
-		union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
-
-		gmxx_rxx_frm_ctl.u64 =
-		    cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
-		if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
-			u8 *ptr =
-			    cvmx_phys_to_ptr(work->packet_ptr.s.addr);
-			int i = 0;
-
-			while (i < work->word1.len - 1) {
-				if (*ptr != 0x55)
-					break;
-				ptr++;
-				i++;
-			}
-
-			if (*ptr == 0xd5) {
-				/* Port received 0xd5 preamble */
-				work->packet_ptr.s.addr += i + 1;
-				work->word1.len -= i + 5;
-			} else if ((*ptr & 0xf) == 0xd) {
-				/* Port received 0xd preamble */
-				work->packet_ptr.s.addr += i;
-				work->word1.len -= i + 4;
-				for (i = 0; i < work->word1.len; i++) {
-					*ptr =
-					    ((*ptr & 0xf0) >> 4) |
-					    ((*(ptr + 1) & 0xf) << 4);
-					ptr++;
-				}
-			} else {
-				printk_ratelimited("Port %d unknown preamble, packet dropped\n",
-						   port);
-				cvm_oct_free_work(work);
-				return 1;
-			}
-		}
-	} else {
-		printk_ratelimited("Port %d receive error code %d, packet dropped\n",
-				   port, work->word2.snoip.err_code);
-		cvm_oct_free_work(work);
-		return 1;
-	}
-
-	return 0;
-}
-
-static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
-{
-	int segments = work->word2.s.bufs;
-	union cvmx_buf_ptr segment_ptr = work->packet_ptr;
-	int len = work->word1.len;
-	int segment_size;
-
-	while (segments--) {
-		union cvmx_buf_ptr next_ptr;
-
-		next_ptr = *(union cvmx_buf_ptr *)
-			cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
-
-		/*
-		 * Octeon Errata PKI-100: The segment size is wrong.
-		 *
-		 * Until it is fixed, calculate the segment size based on
-		 * the packet pool buffer size.
-		 * When it is fixed, the following line should be replaced
-		 * with this one:
-		 * int segment_size = segment_ptr.s.size;
-		 */
-		segment_size =
-			CVMX_FPA_PACKET_POOL_SIZE -
-			(segment_ptr.s.addr -
-			 (((segment_ptr.s.addr >> 7) -
-			   segment_ptr.s.back) << 7));
-
-		/* Don't copy more than what is left in the packet */
-		if (segment_size > len)
-			segment_size = len;
-
-		/* Copy the data into the packet */
-		skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
-			     segment_size);
-		len -= segment_size;
-		segment_ptr = next_ptr;
-	}
-}
-
-static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
-{
-	const int	coreid = cvmx_get_core_num();
-	u64	old_group_mask;
-	u64	old_scratch;
-	int		rx_count = 0;
-	int		did_work_request = 0;
-	int		packet_not_copied;
-
-	/* Prefetch cvm_oct_device since we know we need it soon */
-	prefetch(cvm_oct_device);
-
-	if (USE_ASYNC_IOBDMA) {
-		/* Save scratch in case userspace is using it */
-		CVMX_SYNCIOBDMA;
-		old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-	}
-
-	/* Only allow work for our group (and preserve priorities) */
-	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
-		old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
-		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
-			       BIT(rx_group->group));
-		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
-	} else {
-		old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
-		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
-			       (old_group_mask & ~0xFFFFull) |
-			       BIT(rx_group->group));
-	}
-
-	if (USE_ASYNC_IOBDMA) {
-		cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
-		did_work_request = 1;
-	}
-
-	while (rx_count < budget) {
-		struct sk_buff *skb = NULL;
-		struct sk_buff **pskb = NULL;
-		int skb_in_hw;
-		struct cvmx_wqe *work;
-		int port;
-
-		if (USE_ASYNC_IOBDMA && did_work_request)
-			work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
-		else
-			work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
-
-		prefetch(work);
-		did_work_request = 0;
-		if (!work) {
-			if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
-				cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
-					       BIT(rx_group->group));
-				cvmx_write_csr(CVMX_SSO_WQ_INT,
-					       BIT(rx_group->group));
-			} else {
-				union cvmx_pow_wq_int wq_int;
-
-				wq_int.u64 = 0;
-				wq_int.s.iq_dis = BIT(rx_group->group);
-				wq_int.s.wq_int = BIT(rx_group->group);
-				cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
-			}
-			break;
-		}
-		pskb = (struct sk_buff **)
-			(cvm_oct_get_buffer_ptr(work->packet_ptr) -
-			sizeof(void *));
-		prefetch(pskb);
-
-		if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
-			cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
-							    CVMX_POW_NO_WAIT);
-			did_work_request = 1;
-		}
-		rx_count++;
-
-		skb_in_hw = work->word2.s.bufs == 1;
-		if (likely(skb_in_hw)) {
-			skb = *pskb;
-			prefetch(&skb->head);
-			prefetch(&skb->len);
-		}
-
-		if (octeon_has_feature(OCTEON_FEATURE_PKND))
-			port = work->word0.pip.cn68xx.pknd;
-		else
-			port = work->word1.cn38xx.ipprt;
-
-		prefetch(cvm_oct_device[port]);
-
-		/* Immediately throw away all packets with receive errors */
-		if (unlikely(work->word2.snoip.rcv_error)) {
-			if (cvm_oct_check_rcv_error(work))
-				continue;
-		}
-
-		/*
-		 * We can only use the zero copy path if skbuffs are
-		 * in the FPA pool and the packet fits in a single
-		 * buffer.
-		 */
-		if (likely(skb_in_hw)) {
-			skb->data = skb->head + work->packet_ptr.s.addr -
-				cvmx_ptr_to_phys(skb->head);
-			prefetch(skb->data);
-			skb->len = work->word1.len;
-			skb_set_tail_pointer(skb, skb->len);
-			packet_not_copied = 1;
-		} else {
-			/*
-			 * We have to copy the packet. First allocate
-			 * an skbuff for it.
-			 */
-			skb = dev_alloc_skb(work->word1.len);
-			if (!skb) {
-				cvm_oct_free_work(work);
-				continue;
-			}
-
-			/*
-			 * Check if we've received a packet that was
-			 * entirely stored in the work entry.
-			 */
-			if (unlikely(work->word2.s.bufs == 0)) {
-				u8 *ptr = work->packet_data;
-
-				if (likely(!work->word2.s.not_IP)) {
-					/*
-					 * The beginning of the packet
-					 * moves for IP packets.
-					 */
-					if (work->word2.s.is_v6)
-						ptr += 2;
-					else
-						ptr += 6;
-				}
-				skb_put_data(skb, ptr, work->word1.len);
-				/* No packet buffers to free */
-			} else {
-				copy_segments_to_skb(work, skb);
-			}
-			packet_not_copied = 0;
-		}
-		if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
-			   cvm_oct_device[port])) {
-			struct net_device *dev = cvm_oct_device[port];
-
-			/*
-			 * Only accept packets for devices that are
-			 * currently up.
-			 */
-			if (likely(dev->flags & IFF_UP)) {
-				skb->protocol = eth_type_trans(skb, dev);
-				skb->dev = dev;
-
-				if (unlikely(work->word2.s.not_IP ||
-					     work->word2.s.IP_exc ||
-					     work->word2.s.L4_error ||
-					     !work->word2.s.tcp_or_udp))
-					skb->ip_summed = CHECKSUM_NONE;
-				else
-					skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-				/* Increment RX stats for virtual ports */
-				if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
-					dev->stats.rx_packets++;
-					dev->stats.rx_bytes += skb->len;
-				}
-				netif_receive_skb(skb);
-			} else {
-				/*
-				 * Drop any packet received for a device that
-				 * isn't up.
-				 */
-				dev->stats.rx_dropped++;
-				dev_kfree_skb_irq(skb);
-			}
-		} else {
-			/*
-			 * Drop any packet received for a device that
-			 * doesn't exist.
-			 */
-			printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
-					   port);
-			dev_kfree_skb_irq(skb);
-		}
-		/*
-		 * Check to see if the skbuff and work share the same
-		 * packet buffer.
-		 */
-		if (likely(packet_not_copied)) {
-			/*
-			 * This buffer needs to be replaced, increment
-			 * the number of buffers we need to free by
-			 * one.
-			 */
-			cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
-					      1);
-
-			cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
-		} else {
-			cvm_oct_free_work(work);
-		}
-	}
-	/* Restore the original POW group mask */
-	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
-		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
-		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
-	} else {
-		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
-	}
-
-	if (USE_ASYNC_IOBDMA) {
-		/* Restore the scratch area */
-		cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
-	}
-	cvm_oct_rx_refill_pool(0);
-
-	return rx_count;
-}
-
-/**
- * cvm_oct_napi_poll - the NAPI poll function.
- * @napi: The NAPI instance.
- * @budget: Maximum number of packets to receive.
- *
- * Returns the number of packets processed.
- */
-static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
-{
-	struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
-						     napi);
-	int rx_count;
-
-	rx_count = cvm_oct_poll(rx_group, budget);
-
-	if (rx_count < budget) {
-		/* No more work */
-		napi_complete_done(napi, rx_count);
-		enable_irq(rx_group->irq);
-	}
-	return rx_count;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * cvm_oct_poll_controller - poll for receive packets
- * device.
- *
- * @dev:    Device to poll. Unused
- */
-void cvm_oct_poll_controller(struct net_device *dev)
-{
-	int i;
-
-	if (!atomic_read(&oct_rx_ready))
-		return;
-
-	for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
-		if (!(pow_receive_groups & BIT(i)))
-			continue;
-
-		cvm_oct_poll(&oct_rx_group[i], 16);
-	}
-}
-#endif
-
-void cvm_oct_rx_initialize(void)
-{
-	int i;
-	struct net_device *dev_for_napi = NULL;
-
-	for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
-		if (cvm_oct_device[i]) {
-			dev_for_napi = cvm_oct_device[i];
-			break;
-		}
-	}
-
-	if (!dev_for_napi)
-		panic("No net_devices were allocated.");
-
-	for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
-		int ret;
-
-		if (!(pow_receive_groups & BIT(i)))
-			continue;
-
-		netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
-			       cvm_oct_napi_poll, rx_napi_weight);
-		napi_enable(&oct_rx_group[i].napi);
-
-		oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
-		oct_rx_group[i].group = i;
-
-		/* Register an IRQ handler to receive POW interrupts */
-		ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
-				  "Ethernet", &oct_rx_group[i].napi);
-		if (ret)
-			panic("Could not acquire Ethernet IRQ %d\n",
-			      oct_rx_group[i].irq);
-
-		disable_irq_nosync(oct_rx_group[i].irq);
-
-		/* Enable POW interrupt when our port has at least one packet */
-		if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
-			union cvmx_sso_wq_int_thrx int_thr;
-			union cvmx_pow_wq_int_pc int_pc;
-
-			int_thr.u64 = 0;
-			int_thr.s.tc_en = 1;
-			int_thr.s.tc_thr = 1;
-			cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
-
-			int_pc.u64 = 0;
-			int_pc.s.pc_thr = 5;
-			cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
-		} else {
-			union cvmx_pow_wq_int_thrx int_thr;
-			union cvmx_pow_wq_int_pc int_pc;
-
-			int_thr.u64 = 0;
-			int_thr.s.tc_en = 1;
-			int_thr.s.tc_thr = 1;
-			cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
-
-			int_pc.u64 = 0;
-			int_pc.s.pc_thr = 5;
-			cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
-		}
-
-		/* Schedule NAPI now. This will indirectly enable the
-		 * interrupt.
-		 */
-		napi_schedule(&oct_rx_group[i].napi);
-	}
-	atomic_inc(&oct_rx_ready);
-}
-
-void cvm_oct_rx_shutdown(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
-		if (!(pow_receive_groups & BIT(i)))
-			continue;
-
-		/* Disable POW interrupt */
-		if (OCTEON_IS_MODEL(OCTEON_CN68XX))
-			cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
-		else
-			cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
-
-		/* Free the interrupt handler */
-		free_irq(oct_rx_group[i].irq, cvm_oct_device);
-
-		netif_napi_del(&oct_rx_group[i].napi);
-	}
-}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
deleted file mode 100644
index ff6482f..0000000
--- a/drivers/staging/octeon/ethernet-rx.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-void cvm_oct_poll_controller(struct net_device *dev);
-void cvm_oct_rx_initialize(void);
-void cvm_oct_rx_shutdown(void);
-
-static inline void cvm_oct_rx_refill_pool(int fill_threshold)
-{
-	int number_to_free;
-	int num_freed;
-	/* Refill the packet buffer pool */
-	number_to_free =
-		cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-
-	if (number_to_free > fill_threshold) {
-		cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
-				      -number_to_free);
-		num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
-						 CVMX_FPA_PACKET_POOL_SIZE,
-						 number_to_free);
-		if (num_freed != number_to_free) {
-			cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
-					      number_to_free - num_freed);
-		}
-	}
-}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
deleted file mode 100644
index d7fbd91..0000000
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/phy.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/ratelimit.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-#include "ethernet-mdio.h"
-
-int cvm_oct_sgmii_open(struct net_device *dev)
-{
-	return cvm_oct_common_open(dev, cvm_oct_link_poll);
-}
-
-int cvm_oct_sgmii_init(struct net_device *dev)
-{
-	cvm_oct_common_init(dev);
-
-	/* FIXME: Need autoneg logic */
-	return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
deleted file mode 100644
index c582403e..0000000
--- a/drivers/staging/octeon/ethernet-spi.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-
-static int number_spi_ports;
-static int need_retrain[2] = { 0, 0 };
-
-static void cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg, int index)
-{
-	if (spx_int_reg.s.spf)
-		pr_err("SPI%d: SRX Spi4 interface down\n", index);
-	if (spx_int_reg.s.calerr)
-		pr_err("SPI%d: SRX Spi4 Calendar table parity error\n", index);
-	if (spx_int_reg.s.syncerr)
-		pr_err("SPI%d: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n",
-		       index);
-	if (spx_int_reg.s.diperr)
-		pr_err("SPI%d: SRX Spi4 DIP4 error\n", index);
-	if (spx_int_reg.s.tpaovr)
-		pr_err("SPI%d: SRX Selected port has hit TPA overflow\n",
-		       index);
-	if (spx_int_reg.s.rsverr)
-		pr_err("SPI%d: SRX Spi4 reserved control word detected\n",
-		       index);
-	if (spx_int_reg.s.drwnng)
-		pr_err("SPI%d: SRX Spi4 receive FIFO drowning/overflow\n",
-		       index);
-	if (spx_int_reg.s.clserr)
-		pr_err("SPI%d: SRX Spi4 packet closed on non-16B alignment without EOP\n",
-		       index);
-	if (spx_int_reg.s.spiovr)
-		pr_err("SPI%d: SRX Spi4 async FIFO overflow\n", index);
-	if (spx_int_reg.s.abnorm)
-		pr_err("SPI%d: SRX Abnormal packet termination (ERR bit)\n",
-		       index);
-	if (spx_int_reg.s.prtnxa)
-		pr_err("SPI%d: SRX Port out of range\n", index);
-}
-
-static void cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg, int index)
-{
-	if (stx_int_reg.s.syncerr)
-		pr_err("SPI%d: STX Interface encountered a fatal error\n",
-		       index);
-	if (stx_int_reg.s.frmerr)
-		pr_err("SPI%d: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n",
-		       index);
-	if (stx_int_reg.s.unxfrm)
-		pr_err("SPI%d: STX Unexpected framing sequence\n", index);
-	if (stx_int_reg.s.nosync)
-		pr_err("SPI%d: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n",
-		       index);
-	if (stx_int_reg.s.diperr)
-		pr_err("SPI%d: STX DIP2 error on the Spi4 Status channel\n",
-		       index);
-	if (stx_int_reg.s.datovr)
-		pr_err("SPI%d: STX Spi4 FIFO overflow error\n", index);
-	if (stx_int_reg.s.ovrbst)
-		pr_err("SPI%d: STX Transmit packet burst too big\n", index);
-	if (stx_int_reg.s.calpar1)
-		pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
-		       index, 1);
-	if (stx_int_reg.s.calpar0)
-		pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
-		       index, 0);
-}
-
-static irqreturn_t cvm_oct_spi_spx_int(int index)
-{
-	union cvmx_spxx_int_reg spx_int_reg;
-	union cvmx_stxx_int_reg stx_int_reg;
-
-	spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(index));
-	cvmx_write_csr(CVMX_SPXX_INT_REG(index), spx_int_reg.u64);
-	if (!need_retrain[index]) {
-		spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(index));
-		cvm_oct_spxx_int_pr(spx_int_reg, index);
-	}
-
-	stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(index));
-	cvmx_write_csr(CVMX_STXX_INT_REG(index), stx_int_reg.u64);
-	if (!need_retrain[index]) {
-		stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(index));
-		cvm_oct_stxx_int_pr(stx_int_reg, index);
-	}
-
-	cvmx_write_csr(CVMX_SPXX_INT_MSK(index), 0);
-	cvmx_write_csr(CVMX_STXX_INT_MSK(index), 0);
-	need_retrain[index] = 1;
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
-{
-	irqreturn_t return_status = IRQ_NONE;
-	union cvmx_npi_rsl_int_blocks rsl_int_blocks;
-
-	/* Check and see if this interrupt was caused by the GMX block */
-	rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
-	if (rsl_int_blocks.s.spx1) /* 19 - SPX1_INT_REG & STX1_INT_REG */
-		return_status = cvm_oct_spi_spx_int(1);
-
-	if (rsl_int_blocks.s.spx0) /* 18 - SPX0_INT_REG & STX0_INT_REG */
-		return_status = cvm_oct_spi_spx_int(0);
-
-	return return_status;
-}
-
-static void cvm_oct_spi_enable_error_reporting(int interface)
-{
-	union cvmx_spxx_int_msk spxx_int_msk;
-	union cvmx_stxx_int_msk stxx_int_msk;
-
-	spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
-	spxx_int_msk.s.calerr = 1;
-	spxx_int_msk.s.syncerr = 1;
-	spxx_int_msk.s.diperr = 1;
-	spxx_int_msk.s.tpaovr = 1;
-	spxx_int_msk.s.rsverr = 1;
-	spxx_int_msk.s.drwnng = 1;
-	spxx_int_msk.s.clserr = 1;
-	spxx_int_msk.s.spiovr = 1;
-	spxx_int_msk.s.abnorm = 1;
-	spxx_int_msk.s.prtnxa = 1;
-	cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
-
-	stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
-	stxx_int_msk.s.frmerr = 1;
-	stxx_int_msk.s.unxfrm = 1;
-	stxx_int_msk.s.nosync = 1;
-	stxx_int_msk.s.diperr = 1;
-	stxx_int_msk.s.datovr = 1;
-	stxx_int_msk.s.ovrbst = 1;
-	stxx_int_msk.s.calpar1 = 1;
-	stxx_int_msk.s.calpar0 = 1;
-	cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
-}
-
-static void cvm_oct_spi_poll(struct net_device *dev)
-{
-	static int spi4000_port;
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	int interface;
-
-	for (interface = 0; interface < 2; interface++) {
-		if ((priv->port == interface * 16) && need_retrain[interface]) {
-			if (cvmx_spi_restart_interface
-			    (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
-				need_retrain[interface] = 0;
-				cvm_oct_spi_enable_error_reporting(interface);
-			}
-		}
-
-		/*
-		 * The SPI4000 TWSI interface is very slow. In order
-		 * not to bring the system to a crawl, we only poll a
-		 * single port every second. This means negotiation
-		 * speed changes take up to 10 seconds, but at least
-		 * we don't waste absurd amounts of time waiting for
-		 * TWSI.
-		 */
-		if (priv->port == spi4000_port) {
-			/*
-			 * This function does nothing if it is called on an
-			 * interface without a SPI4000.
-			 */
-			cvmx_spi4000_check_speed(interface, priv->port);
-			/*
-			 * Normal ordering increments. By decrementing
-			 * we only match once per iteration.
-			 */
-			spi4000_port--;
-			if (spi4000_port < 0)
-				spi4000_port = 10;
-		}
-	}
-}
-
-int cvm_oct_spi_init(struct net_device *dev)
-{
-	int r;
-	struct octeon_ethernet *priv = netdev_priv(dev);
-
-	if (number_spi_ports == 0) {
-		r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
-				IRQF_SHARED, "SPI", &number_spi_ports);
-		if (r)
-			return r;
-	}
-	number_spi_ports++;
-
-	if ((priv->port == 0) || (priv->port == 16)) {
-		cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
-		priv->poll = cvm_oct_spi_poll;
-	}
-	cvm_oct_common_init(dev);
-	return 0;
-}
-
-void cvm_oct_spi_uninit(struct net_device *dev)
-{
-	int interface;
-
-	cvm_oct_common_uninit(dev);
-	number_spi_ports--;
-	if (number_spi_ports == 0) {
-		for (interface = 0; interface < 2; interface++) {
-			cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
-			cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
-		}
-		free_irq(OCTEON_IRQ_RML, &number_spi_ports);
-	}
-}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
deleted file mode 100644
index b334cf8..0000000
--- a/drivers/staging/octeon/ethernet-tx.c
+++ /dev/null
@@ -1,717 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/ratelimit.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-#include <linux/atomic.h>
-#include <net/sch_generic.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-tx.h"
-#include "ethernet-util.h"
-
-#define CVM_OCT_SKB_CB(skb)	((u64 *)((skb)->cb))
-
-/*
- * You can define GET_SKBUFF_QOS() to override how the skbuff output
- * function determines which output queue is used. The default
- * implementation always uses the base queue for the port. If, for
- * example, you wanted to use the skb->priority field, define
- * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
- */
-#ifndef GET_SKBUFF_QOS
-#define GET_SKBUFF_QOS(skb) 0
-#endif
-
-static void cvm_oct_tx_do_cleanup(unsigned long arg);
-static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
-
-/* Maximum number of SKBs to try to free per xmit packet. */
-#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
-
-static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
-{
-	int undo;
-
-	undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
-						   MAX_SKB_TO_FREE;
-	if (undo > 0)
-		cvmx_fau_atomic_add32(fau, -undo);
-	skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
-						       -skb_to_free;
-	return skb_to_free;
-}
-
-static void cvm_oct_kick_tx_poll_watchdog(void)
-{
-	union cvmx_ciu_timx ciu_timx;
-
-	ciu_timx.u64 = 0;
-	ciu_timx.s.one_shot = 1;
-	ciu_timx.s.len = cvm_oct_tx_poll_interval;
-	cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
-}
-
-static void cvm_oct_free_tx_skbs(struct net_device *dev)
-{
-	int skb_to_free;
-	int qos, queues_per_port;
-	int total_freed = 0;
-	int total_remaining = 0;
-	unsigned long flags;
-	struct octeon_ethernet *priv = netdev_priv(dev);
-
-	queues_per_port = cvmx_pko_get_num_queues(priv->port);
-	/* Drain any pending packets in the free list */
-	for (qos = 0; qos < queues_per_port; qos++) {
-		if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
-			continue;
-		skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
-						       MAX_SKB_TO_FREE);
-		skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
-							 priv->fau + qos * 4);
-		total_freed += skb_to_free;
-		if (skb_to_free > 0) {
-			struct sk_buff *to_free_list = NULL;
-
-			spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
-			while (skb_to_free > 0) {
-				struct sk_buff *t;
-
-				t = __skb_dequeue(&priv->tx_free_list[qos]);
-				t->next = to_free_list;
-				to_free_list = t;
-				skb_to_free--;
-			}
-			spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
-					       flags);
-			/* Do the actual freeing outside of the lock. */
-			while (to_free_list) {
-				struct sk_buff *t = to_free_list;
-
-				to_free_list = to_free_list->next;
-				dev_kfree_skb_any(t);
-			}
-		}
-		total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
-	}
-	if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
-		netif_wake_queue(dev);
-	if (total_remaining)
-		cvm_oct_kick_tx_poll_watchdog();
-}
-
-/**
- * cvm_oct_xmit - transmit a packet
- * @skb:    Packet to send
- * @dev:    Device info structure
- *
- * Returns Always returns NETDEV_TX_OK
- */
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	union cvmx_pko_command_word0 pko_command;
-	union cvmx_buf_ptr hw_buffer;
-	u64 old_scratch;
-	u64 old_scratch2;
-	int qos;
-	int i;
-	enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	struct sk_buff *to_free_list;
-	int skb_to_free;
-	int buffers_to_free;
-	u32 total_to_clean;
-	unsigned long flags;
-#if REUSE_SKBUFFS_WITHOUT_FREE
-	unsigned char *fpa_head;
-#endif
-
-	/*
-	 * Prefetch the private data structure.  It is larger than the
-	 * one cache line.
-	 */
-	prefetch(priv);
-
-	/*
-	 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
-	 * completely remove "qos" in the event neither interface
-	 * supports multiple queues per port.
-	 */
-	if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
-	    (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
-		qos = GET_SKBUFF_QOS(skb);
-		if (qos <= 0)
-			qos = 0;
-		else if (qos >= cvmx_pko_get_num_queues(priv->port))
-			qos = 0;
-	} else {
-		qos = 0;
-	}
-
-	if (USE_ASYNC_IOBDMA) {
-		/* Save scratch in case userspace is using it */
-		CVMX_SYNCIOBDMA;
-		old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-		old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-
-		/*
-		 * Fetch and increment the number of packets to be
-		 * freed.
-		 */
-		cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
-					       FAU_NUM_PACKET_BUFFERS_TO_FREE,
-					       0);
-		cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
-					       priv->fau + qos * 4,
-					       MAX_SKB_TO_FREE);
-	}
-
-	/*
-	 * We have space for 6 segment pointers, If there will be more
-	 * than that, we must linearize.
-	 */
-	if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
-		if (unlikely(__skb_linearize(skb))) {
-			queue_type = QUEUE_DROP;
-			if (USE_ASYNC_IOBDMA) {
-				/*
-				 * Get the number of skbuffs in use
-				 * by the hardware
-				 */
-				CVMX_SYNCIOBDMA;
-				skb_to_free =
-					cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-			} else {
-				/*
-				 * Get the number of skbuffs in use
-				 * by the hardware
-				 */
-				skb_to_free =
-				     cvmx_fau_fetch_and_add32(priv->fau +
-							      qos * 4,
-							      MAX_SKB_TO_FREE);
-			}
-			skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
-								 priv->fau +
-								 qos * 4);
-			spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
-			goto skip_xmit;
-		}
-	}
-
-	/*
-	 * The CN3XXX series of parts has an errata (GMX-401) which
-	 * causes the GMX block to hang if a collision occurs towards
-	 * the end of a <68 byte packet. As a workaround for this, we
-	 * pad packets to be 68 bytes whenever we are in half duplex
-	 * mode. We don't handle the case of having a small packet but
-	 * no room to add the padding.  The kernel should always give
-	 * us at least a cache line
-	 */
-	if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
-		union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
-		int interface = INTERFACE(priv->port);
-		int index = INDEX(priv->port);
-
-		if (interface < 2) {
-			/* We only need to pad packet in half duplex mode */
-			gmx_prt_cfg.u64 =
-			    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-			if (gmx_prt_cfg.s.duplex == 0) {
-				int add_bytes = 64 - skb->len;
-
-				if ((skb_tail_pointer(skb) + add_bytes) <=
-				    skb_end_pointer(skb))
-					__skb_put_zero(skb, add_bytes);
-			}
-		}
-	}
-
-	/* Build the PKO command */
-	pko_command.u64 = 0;
-#ifdef __LITTLE_ENDIAN
-	pko_command.s.le = 1;
-#endif
-	pko_command.s.n2 = 1;	/* Don't pollute L2 with the outgoing packet */
-	pko_command.s.segs = 1;
-	pko_command.s.total_bytes = skb->len;
-	pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
-	pko_command.s.subone0 = 1;
-
-	pko_command.s.dontfree = 1;
-
-	/* Build the PKO buffer pointer */
-	hw_buffer.u64 = 0;
-	if (skb_shinfo(skb)->nr_frags == 0) {
-		hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
-		hw_buffer.s.pool = 0;
-		hw_buffer.s.size = skb->len;
-	} else {
-		hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
-		hw_buffer.s.pool = 0;
-		hw_buffer.s.size = skb_headlen(skb);
-		CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
-		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-			skb_frag_t *fs = skb_shinfo(skb)->frags + i;
-
-			hw_buffer.s.addr =
-				XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
-			hw_buffer.s.size = skb_frag_size(fs);
-			CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
-		}
-		hw_buffer.s.addr =
-			XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
-		hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
-		pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
-		pko_command.s.gather = 1;
-		goto dont_put_skbuff_in_hw;
-	}
-
-	/*
-	 * See if we can put this skb in the FPA pool. Any strange
-	 * behavior from the Linux networking stack will most likely
-	 * be caused by a bug in the following code. If some field is
-	 * in use by the network stack and gets carried over when a
-	 * buffer is reused, bad things may happen.  If in doubt and
-	 * you dont need the absolute best performance, disable the
-	 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
-	 * shown a 25% increase in performance under some loads.
-	 */
-#if REUSE_SKBUFFS_WITHOUT_FREE
-	fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
-	if (unlikely(skb->data < fpa_head)) {
-		/* TX buffer beginning can't meet FPA alignment constraints */
-		goto dont_put_skbuff_in_hw;
-	}
-	if (unlikely
-	    ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
-		/* TX buffer isn't large enough for the FPA */
-		goto dont_put_skbuff_in_hw;
-	}
-	if (unlikely(skb_shared(skb))) {
-		/* TX buffer sharing data with someone else */
-		goto dont_put_skbuff_in_hw;
-	}
-	if (unlikely(skb_cloned(skb))) {
-		/* TX buffer has been cloned */
-		goto dont_put_skbuff_in_hw;
-	}
-	if (unlikely(skb_header_cloned(skb))) {
-		/* TX buffer header has been cloned */
-		goto dont_put_skbuff_in_hw;
-	}
-	if (unlikely(skb->destructor)) {
-		/* TX buffer has a destructor */
-		goto dont_put_skbuff_in_hw;
-	}
-	if (unlikely(skb_shinfo(skb)->nr_frags)) {
-		/* TX buffer has fragments */
-		goto dont_put_skbuff_in_hw;
-	}
-	if (unlikely
-	    (skb->truesize !=
-	     sizeof(*skb) + skb_end_offset(skb))) {
-		/* TX buffer truesize has been changed */
-		goto dont_put_skbuff_in_hw;
-	}
-
-	/*
-	 * We can use this buffer in the FPA.  We don't need the FAU
-	 * update anymore
-	 */
-	pko_command.s.dontfree = 0;
-
-	hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
-			   ((unsigned long)fpa_head >> 7);
-
-	*(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
-
-	/*
-	 * The skbuff will be reused without ever being freed. We must
-	 * cleanup a bunch of core things.
-	 */
-	dst_release(skb_dst(skb));
-	skb_dst_set(skb, NULL);
-	skb_ext_reset(skb);
-	nf_reset_ct(skb);
-
-#ifdef CONFIG_NET_SCHED
-	skb->tc_index = 0;
-	skb_reset_tc(skb);
-#endif /* CONFIG_NET_SCHED */
-#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
-
-dont_put_skbuff_in_hw:
-
-	/* Check if we can use the hardware checksumming */
-	if ((skb->protocol == htons(ETH_P_IP)) &&
-	    (ip_hdr(skb)->version == 4) &&
-	    (ip_hdr(skb)->ihl == 5) &&
-	    ((ip_hdr(skb)->frag_off == 0) ||
-	     (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
-	    ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
-	     (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
-		/* Use hardware checksum calc */
-		pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
-	}
-
-	if (USE_ASYNC_IOBDMA) {
-		/* Get the number of skbuffs in use by the hardware */
-		CVMX_SYNCIOBDMA;
-		skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-		buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-	} else {
-		/* Get the number of skbuffs in use by the hardware */
-		skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
-						       MAX_SKB_TO_FREE);
-		buffers_to_free =
-		    cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-	}
-
-	skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
-						 priv->fau + qos * 4);
-
-	/*
-	 * If we're sending faster than the receive can free them then
-	 * don't do the HW free.
-	 */
-	if ((buffers_to_free < -100) && !pko_command.s.dontfree)
-		pko_command.s.dontfree = 1;
-
-	if (pko_command.s.dontfree) {
-		queue_type = QUEUE_CORE;
-		pko_command.s.reg0 = priv->fau + qos * 4;
-	} else {
-		queue_type = QUEUE_HW;
-	}
-	if (USE_ASYNC_IOBDMA)
-		cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
-					       FAU_TOTAL_TX_TO_CLEAN, 1);
-
-	spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
-
-	/* Drop this packet if we have too many already queued to the HW */
-	if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
-		     MAX_OUT_QUEUE_DEPTH)) {
-		if (dev->tx_queue_len != 0) {
-			/* Drop the lock when notifying the core.  */
-			spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
-					       flags);
-			netif_stop_queue(dev);
-			spin_lock_irqsave(&priv->tx_free_list[qos].lock,
-					  flags);
-		} else {
-			/* If not using normal queueing.  */
-			queue_type = QUEUE_DROP;
-			goto skip_xmit;
-		}
-	}
-
-	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
-				     CVMX_PKO_LOCK_NONE);
-
-	/* Send the packet to the output queue */
-	if (unlikely(cvmx_pko_send_packet_finish(priv->port,
-						 priv->queue + qos,
-						 pko_command, hw_buffer,
-						 CVMX_PKO_LOCK_NONE))) {
-		printk_ratelimited("%s: Failed to send the packet\n",
-				   dev->name);
-		queue_type = QUEUE_DROP;
-	}
-skip_xmit:
-	to_free_list = NULL;
-
-	switch (queue_type) {
-	case QUEUE_DROP:
-		skb->next = to_free_list;
-		to_free_list = skb;
-		dev->stats.tx_dropped++;
-		break;
-	case QUEUE_HW:
-		cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
-		break;
-	case QUEUE_CORE:
-		__skb_queue_tail(&priv->tx_free_list[qos], skb);
-		break;
-	default:
-		BUG();
-	}
-
-	while (skb_to_free > 0) {
-		struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
-
-		t->next = to_free_list;
-		to_free_list = t;
-		skb_to_free--;
-	}
-
-	spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
-
-	/* Do the actual freeing outside of the lock. */
-	while (to_free_list) {
-		struct sk_buff *t = to_free_list;
-
-		to_free_list = to_free_list->next;
-		dev_kfree_skb_any(t);
-	}
-
-	if (USE_ASYNC_IOBDMA) {
-		CVMX_SYNCIOBDMA;
-		total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-		/* Restore the scratch area */
-		cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
-		cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
-	} else {
-		total_to_clean =
-			cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
-	}
-
-	if (total_to_clean & 0x3ff) {
-		/*
-		 * Schedule the cleanup tasklet every 1024 packets for
-		 * the pathological case of high traffic on one port
-		 * delaying clean up of packets on a different port
-		 * that is blocked waiting for the cleanup.
-		 */
-		tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
-	}
-
-	cvm_oct_kick_tx_poll_watchdog();
-
-	return NETDEV_TX_OK;
-}
-
-/**
- * cvm_oct_xmit_pow - transmit a packet to the POW
- * @skb:    Packet to send
- * @dev:    Device info structure
-
- * Returns Always returns zero
- */
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	void *packet_buffer;
-	void *copy_location;
-
-	/* Get a work queue entry */
-	struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
-
-	if (unlikely(!work)) {
-		printk_ratelimited("%s: Failed to allocate a work queue entry\n",
-				   dev->name);
-		dev->stats.tx_dropped++;
-		dev_kfree_skb_any(skb);
-		return 0;
-	}
-
-	/* Get a packet buffer */
-	packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
-	if (unlikely(!packet_buffer)) {
-		printk_ratelimited("%s: Failed to allocate a packet buffer\n",
-				   dev->name);
-		cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
-		dev->stats.tx_dropped++;
-		dev_kfree_skb_any(skb);
-		return 0;
-	}
-
-	/*
-	 * Calculate where we need to copy the data to. We need to
-	 * leave 8 bytes for a next pointer (unused). We also need to
-	 * include any configure skip. Then we need to align the IP
-	 * packet src and dest into the same 64bit word. The below
-	 * calculation may add a little extra, but that doesn't
-	 * hurt.
-	 */
-	copy_location = packet_buffer + sizeof(u64);
-	copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
-
-	/*
-	 * We have to copy the packet since whoever processes this
-	 * packet will free it to a hardware pool. We can't use the
-	 * trick of counting outstanding packets like in
-	 * cvm_oct_xmit.
-	 */
-	memcpy(copy_location, skb->data, skb->len);
-
-	/*
-	 * Fill in some of the work queue fields. We may need to add
-	 * more if the software at the other end needs them.
-	 */
-	if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
-		work->word0.pip.cn38xx.hw_chksum = skb->csum;
-	work->word1.len = skb->len;
-	cvmx_wqe_set_port(work, priv->port);
-	cvmx_wqe_set_qos(work, priv->port & 0x7);
-	cvmx_wqe_set_grp(work, pow_send_group);
-	work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
-	work->word1.tag = pow_send_group;	/* FIXME */
-	/* Default to zero. Sets of zero later are commented out */
-	work->word2.u64 = 0;
-	work->word2.s.bufs = 1;
-	work->packet_ptr.u64 = 0;
-	work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
-	work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
-	work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
-	work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
-
-	if (skb->protocol == htons(ETH_P_IP)) {
-		work->word2.s.ip_offset = 14;
-#if 0
-		work->word2.s.vlan_valid = 0;	/* FIXME */
-		work->word2.s.vlan_cfi = 0;	/* FIXME */
-		work->word2.s.vlan_id = 0;	/* FIXME */
-		work->word2.s.dec_ipcomp = 0;	/* FIXME */
-#endif
-		work->word2.s.tcp_or_udp =
-		    (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
-		    (ip_hdr(skb)->protocol == IPPROTO_UDP);
-#if 0
-		/* FIXME */
-		work->word2.s.dec_ipsec = 0;
-		/* We only support IPv4 right now */
-		work->word2.s.is_v6 = 0;
-		/* Hardware would set to zero */
-		work->word2.s.software = 0;
-		/* No error, packet is internal */
-		work->word2.s.L4_error = 0;
-#endif
-		work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
-					  (ip_hdr(skb)->frag_off ==
-					      cpu_to_be16(1 << 14)));
-#if 0
-		/* Assume Linux is sending a good packet */
-		work->word2.s.IP_exc = 0;
-#endif
-		work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
-		work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
-#if 0
-		/* This is an IP packet */
-		work->word2.s.not_IP = 0;
-		/* No error, packet is internal */
-		work->word2.s.rcv_error = 0;
-		/* No error, packet is internal */
-		work->word2.s.err_code = 0;
-#endif
-
-		/*
-		 * When copying the data, include 4 bytes of the
-		 * ethernet header to align the same way hardware
-		 * does.
-		 */
-		memcpy(work->packet_data, skb->data + 10,
-		       sizeof(work->packet_data));
-	} else {
-#if 0
-		work->word2.snoip.vlan_valid = 0;	/* FIXME */
-		work->word2.snoip.vlan_cfi = 0;	/* FIXME */
-		work->word2.snoip.vlan_id = 0;	/* FIXME */
-		work->word2.snoip.software = 0;	/* Hardware would set to zero */
-#endif
-		work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
-		work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
-		work->word2.snoip.is_bcast =
-		    (skb->pkt_type == PACKET_BROADCAST);
-		work->word2.snoip.is_mcast =
-		    (skb->pkt_type == PACKET_MULTICAST);
-		work->word2.snoip.not_IP = 1;	/* IP was done up above */
-#if 0
-		/* No error, packet is internal */
-		work->word2.snoip.rcv_error = 0;
-		/* No error, packet is internal */
-		work->word2.snoip.err_code = 0;
-#endif
-		memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
-	}
-
-	/* Submit the packet to the POW */
-	cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
-			     cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-	dev_consume_skb_any(skb);
-	return 0;
-}
-
-/**
- * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
- * @dev:    Device being shutdown
- *
- */
-void cvm_oct_tx_shutdown_dev(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	unsigned long flags;
-	int qos;
-
-	for (qos = 0; qos < 16; qos++) {
-		spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
-		while (skb_queue_len(&priv->tx_free_list[qos]))
-			dev_kfree_skb_any(__skb_dequeue
-					  (&priv->tx_free_list[qos]));
-		spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
-	}
-}
-
-static void cvm_oct_tx_do_cleanup(unsigned long arg)
-{
-	int port;
-
-	for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
-		if (cvm_oct_device[port]) {
-			struct net_device *dev = cvm_oct_device[port];
-
-			cvm_oct_free_tx_skbs(dev);
-		}
-	}
-}
-
-static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
-{
-	/* Disable the interrupt.  */
-	cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
-	/* Do the work in the tasklet.  */
-	tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
-	return IRQ_HANDLED;
-}
-
-void cvm_oct_tx_initialize(void)
-{
-	int i;
-
-	/* Disable the interrupt.  */
-	cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
-	/* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
-	i = request_irq(OCTEON_IRQ_TIMER1,
-			cvm_oct_tx_cleanup_watchdog, 0,
-			"Ethernet", cvm_oct_device);
-
-	if (i)
-		panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
-}
-
-void cvm_oct_tx_shutdown(void)
-{
-	/* Free the interrupt handler */
-	free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
-}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
deleted file mode 100644
index 78936e9..0000000
--- a/drivers/staging/octeon/ethernet-tx.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
-			 int do_free, int qos);
-void cvm_oct_tx_initialize(void);
-void cvm_oct_tx_shutdown(void);
-void cvm_oct_tx_shutdown_dev(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
deleted file mode 100644
index 2af83a1..0000000
--- a/drivers/staging/octeon/ethernet-util.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-/**
- * cvm_oct_get_buffer_ptr - convert packet data address to pointer
- * @packet_ptr: Packet data hardware address
- *
- * Returns Packet buffer pointer
- */
-static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
-{
-	return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back)
-				<< 7);
-}
-
-/**
- * INTERFACE - convert IPD port to logical interface
- * @ipd_port: Port to check
- *
- * Returns Logical interface
- */
-static inline int INTERFACE(int ipd_port)
-{
-	int interface;
-
-	if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
-		return 10;
-	interface = cvmx_helper_get_interface_num(ipd_port);
-	if (interface >= 0)
-		return interface;
-	panic("Illegal ipd_port %d passed to %s\n", ipd_port, __func__);
-}
-
-/**
- * INDEX - convert IPD/PKO port number to the port's interface index
- * @ipd_port: Port to check
- *
- * Returns Index into interface port list
- */
-static inline int INDEX(int ipd_port)
-{
-	return cvmx_helper_get_interface_index_num(ipd_port);
-}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
deleted file mode 100644
index f42c381..0000000
--- a/drivers/staging/octeon/ethernet.c
+++ /dev/null
@@ -1,992 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/of_net.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mem.h"
-#include "ethernet-rx.h"
-#include "ethernet-tx.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-
-#define OCTEON_MAX_MTU 65392
-
-static int num_packet_buffers = 1024;
-module_param(num_packet_buffers, int, 0444);
-MODULE_PARM_DESC(num_packet_buffers, "\n"
-	"\tNumber of packet buffers to allocate and store in the\n"
-	"\tFPA. By default, 1024 packet buffers are used.\n");
-
-static int pow_receive_group = 15;
-module_param(pow_receive_group, int, 0444);
-MODULE_PARM_DESC(pow_receive_group, "\n"
-	"\tPOW group to receive packets from. All ethernet hardware\n"
-	"\twill be configured to send incoming packets to this POW\n"
-	"\tgroup. Also any other software can submit packets to this\n"
-	"\tgroup for the kernel to process.");
-
-static int receive_group_order;
-module_param(receive_group_order, int, 0444);
-MODULE_PARM_DESC(receive_group_order, "\n"
-	"\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
-	"\twill be configured to send incoming packets to multiple POW\n"
-	"\tgroups. pow_receive_group parameter is ignored when multiple\n"
-	"\tgroups are taken into use and groups are allocated starting\n"
-	"\tfrom 0. By default, a single group is used.\n");
-
-int pow_send_group = -1;
-module_param(pow_send_group, int, 0644);
-MODULE_PARM_DESC(pow_send_group, "\n"
-	"\tPOW group to send packets to other software on. This\n"
-	"\tcontrols the creation of the virtual device pow0.\n"
-	"\talways_use_pow also depends on this value.");
-
-int always_use_pow;
-module_param(always_use_pow, int, 0444);
-MODULE_PARM_DESC(always_use_pow, "\n"
-	"\tWhen set, always send to the pow group. This will cause\n"
-	"\tpackets sent to real ethernet devices to be sent to the\n"
-	"\tPOW group instead of the hardware. Unless some other\n"
-	"\tapplication changes the config, packets will still be\n"
-	"\treceived from the low level hardware. Use this option\n"
-	"\tto allow a CVMX app to intercept all packets from the\n"
-	"\tlinux kernel. You must specify pow_send_group along with\n"
-	"\tthis option.");
-
-char pow_send_list[128] = "";
-module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
-MODULE_PARM_DESC(pow_send_list, "\n"
-	"\tComma separated list of ethernet devices that should use the\n"
-	"\tPOW for transmit instead of the actual ethernet hardware. This\n"
-	"\tis a per port version of always_use_pow. always_use_pow takes\n"
-	"\tprecedence over this list. For example, setting this to\n"
-	"\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
-	"\tusing the pow_send_group.");
-
-int rx_napi_weight = 32;
-module_param(rx_napi_weight, int, 0444);
-MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
-
-/* Mask indicating which receive groups are in use. */
-int pow_receive_groups;
-
-/*
- * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
- *
- * Set to one right before cvm_oct_poll_queue is destroyed.
- */
-atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
-
-/*
- * Array of every ethernet device owned by this driver indexed by
- * the ipd input port number.
- */
-struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
-
-u64 cvm_oct_tx_poll_interval;
-
-static void cvm_oct_rx_refill_worker(struct work_struct *work);
-static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
-
-static void cvm_oct_rx_refill_worker(struct work_struct *work)
-{
-	/*
-	 * FPA 0 may have been drained, try to refill it if we need
-	 * more than num_packet_buffers / 2, otherwise normal receive
-	 * processing will refill it.  If it were drained, no packets
-	 * could be received so cvm_oct_napi_poll would never be
-	 * invoked to do the refill.
-	 */
-	cvm_oct_rx_refill_pool(num_packet_buffers / 2);
-
-	if (!atomic_read(&cvm_oct_poll_queue_stopping))
-		schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
-}
-
-static void cvm_oct_periodic_worker(struct work_struct *work)
-{
-	struct octeon_ethernet *priv = container_of(work,
-						    struct octeon_ethernet,
-						    port_periodic_work.work);
-
-	if (priv->poll)
-		priv->poll(cvm_oct_device[priv->port]);
-
-	cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats
-						(cvm_oct_device[priv->port]);
-
-	if (!atomic_read(&cvm_oct_poll_queue_stopping))
-		schedule_delayed_work(&priv->port_periodic_work, HZ);
-}
-
-static void cvm_oct_configure_common_hw(void)
-{
-	/* Setup the FPA */
-	cvmx_fpa_enable();
-	cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
-			     num_packet_buffers);
-	cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
-			     num_packet_buffers);
-	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
-		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
-				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
-
-#ifdef __LITTLE_ENDIAN
-	{
-		union cvmx_ipd_ctl_status ipd_ctl_status;
-
-		ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
-		ipd_ctl_status.s.pkt_lend = 1;
-		ipd_ctl_status.s.wqe_lend = 1;
-		cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
-	}
-#endif
-
-	cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
-}
-
-/**
- * cvm_oct_free_work- Free a work queue entry
- *
- * @work_queue_entry: Work queue entry to free
- *
- * Returns Zero on success, Negative on failure.
- */
-int cvm_oct_free_work(void *work_queue_entry)
-{
-	struct cvmx_wqe *work = work_queue_entry;
-
-	int segments = work->word2.s.bufs;
-	union cvmx_buf_ptr segment_ptr = work->packet_ptr;
-
-	while (segments--) {
-		union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
-			cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
-		if (unlikely(!segment_ptr.s.i))
-			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
-				      segment_ptr.s.pool,
-				      CVMX_FPA_PACKET_POOL_SIZE / 128);
-		segment_ptr = next_ptr;
-	}
-	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
-
-	return 0;
-}
-EXPORT_SYMBOL(cvm_oct_free_work);
-
-/**
- * cvm_oct_common_get_stats - get the low level ethernet statistics
- * @dev:    Device to get the statistics from
- *
- * Returns Pointer to the statistics
- */
-static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
-{
-	cvmx_pip_port_status_t rx_status;
-	cvmx_pko_port_status_t tx_status;
-	struct octeon_ethernet *priv = netdev_priv(dev);
-
-	if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
-		if (octeon_is_simulation()) {
-			/* The simulator doesn't support statistics */
-			memset(&rx_status, 0, sizeof(rx_status));
-			memset(&tx_status, 0, sizeof(tx_status));
-		} else {
-			cvmx_pip_get_port_status(priv->port, 1, &rx_status);
-			cvmx_pko_get_port_status(priv->port, 1, &tx_status);
-		}
-
-		dev->stats.rx_packets += rx_status.inb_packets;
-		dev->stats.tx_packets += tx_status.packets;
-		dev->stats.rx_bytes += rx_status.inb_octets;
-		dev->stats.tx_bytes += tx_status.octets;
-		dev->stats.multicast += rx_status.multicast_packets;
-		dev->stats.rx_crc_errors += rx_status.inb_errors;
-		dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
-		dev->stats.rx_dropped += rx_status.dropped_packets;
-	}
-
-	return &dev->stats;
-}
-
-/**
- * cvm_oct_common_change_mtu - change the link MTU
- * @dev:     Device to change
- * @new_mtu: The new MTU
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	int interface = INTERFACE(priv->port);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
-	int vlan_bytes = VLAN_HLEN;
-#else
-	int vlan_bytes = 0;
-#endif
-	int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
-
-	dev->mtu = new_mtu;
-
-	if ((interface < 2) &&
-	    (cvmx_helper_interface_get_mode(interface) !=
-		CVMX_HELPER_INTERFACE_MODE_SPI)) {
-		int index = INDEX(priv->port);
-		/* Add ethernet header and FCS, and VLAN if configured. */
-		int max_packet = new_mtu + mtu_overhead;
-
-		if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
-		    OCTEON_IS_MODEL(OCTEON_CN58XX)) {
-			/* Signal errors on packets larger than the MTU */
-			cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
-				       max_packet);
-		} else {
-			/*
-			 * Set the hardware to truncate packets larger
-			 * than the MTU and smaller the 64 bytes.
-			 */
-			union cvmx_pip_frm_len_chkx frm_len_chk;
-
-			frm_len_chk.u64 = 0;
-			frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
-			frm_len_chk.s.maxlen = max_packet;
-			cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
-				       frm_len_chk.u64);
-		}
-		/*
-		 * Set the hardware to truncate packets larger than
-		 * the MTU. The jabber register must be set to a
-		 * multiple of 8 bytes, so round up.
-		 */
-		cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
-			       (max_packet + 7) & ~7u);
-	}
-	return 0;
-}
-
-/**
- * cvm_oct_common_set_multicast_list - set the multicast list
- * @dev:    Device to work on
- */
-static void cvm_oct_common_set_multicast_list(struct net_device *dev)
-{
-	union cvmx_gmxx_prtx_cfg gmx_cfg;
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	int interface = INTERFACE(priv->port);
-
-	if ((interface < 2) &&
-	    (cvmx_helper_interface_get_mode(interface) !=
-		CVMX_HELPER_INTERFACE_MODE_SPI)) {
-		union cvmx_gmxx_rxx_adr_ctl control;
-		int index = INDEX(priv->port);
-
-		control.u64 = 0;
-		control.s.bcst = 1;	/* Allow broadcast MAC addresses */
-
-		if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
-		    (dev->flags & IFF_PROMISC))
-			/* Force accept multicast packets */
-			control.s.mcst = 2;
-		else
-			/* Force reject multicast packets */
-			control.s.mcst = 1;
-
-		if (dev->flags & IFF_PROMISC)
-			/*
-			 * Reject matches if promisc. Since CAM is
-			 * shut off, should accept everything.
-			 */
-			control.s.cam_mode = 0;
-		else
-			/* Filter packets based on the CAM */
-			control.s.cam_mode = 1;
-
-		gmx_cfg.u64 =
-		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
-			       gmx_cfg.u64 & ~1ull);
-
-		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
-			       control.u64);
-		if (dev->flags & IFF_PROMISC)
-			cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
-				       (index, interface), 0);
-		else
-			cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
-				       (index, interface), 1);
-
-		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
-			       gmx_cfg.u64);
-	}
-}
-
-static int cvm_oct_set_mac_filter(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	union cvmx_gmxx_prtx_cfg gmx_cfg;
-	int interface = INTERFACE(priv->port);
-
-	if ((interface < 2) &&
-	    (cvmx_helper_interface_get_mode(interface) !=
-		CVMX_HELPER_INTERFACE_MODE_SPI)) {
-		int i;
-		u8 *ptr = dev->dev_addr;
-		u64 mac = 0;
-		int index = INDEX(priv->port);
-
-		for (i = 0; i < 6; i++)
-			mac = (mac << 8) | (u64)ptr[i];
-
-		gmx_cfg.u64 =
-		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
-			       gmx_cfg.u64 & ~1ull);
-
-		cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
-		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
-			       ptr[0]);
-		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
-			       ptr[1]);
-		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
-			       ptr[2]);
-		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
-			       ptr[3]);
-		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
-			       ptr[4]);
-		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
-			       ptr[5]);
-		cvm_oct_common_set_multicast_list(dev);
-		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
-			       gmx_cfg.u64);
-	}
-	return 0;
-}
-
-/**
- * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
- * @dev:    The device in question.
- * @addr:   Socket address.
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
-{
-	int r = eth_mac_addr(dev, addr);
-
-	if (r)
-		return r;
-	return cvm_oct_set_mac_filter(dev);
-}
-
-/**
- * cvm_oct_common_init - per network device initialization
- * @dev:    Device to initialize
- *
- * Returns Zero on success
- */
-int cvm_oct_common_init(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	const u8 *mac = NULL;
-
-	if (priv->of_node)
-		mac = of_get_mac_address(priv->of_node);
-
-	if (!IS_ERR_OR_NULL(mac))
-		ether_addr_copy(dev->dev_addr, mac);
-	else
-		eth_hw_addr_random(dev);
-
-	/*
-	 * Force the interface to use the POW send if always_use_pow
-	 * was specified or it is in the pow send list.
-	 */
-	if ((pow_send_group != -1) &&
-	    (always_use_pow || strstr(pow_send_list, dev->name)))
-		priv->queue = -1;
-
-	if (priv->queue != -1)
-		dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
-
-	/* We do our own locking, Linux doesn't need to */
-	dev->features |= NETIF_F_LLTX;
-	dev->ethtool_ops = &cvm_oct_ethtool_ops;
-
-	cvm_oct_set_mac_filter(dev);
-	dev_set_mtu(dev, dev->mtu);
-
-	/*
-	 * Zero out stats for port so we won't mistakenly show
-	 * counters from the bootloader.
-	 */
-	memset(dev->netdev_ops->ndo_get_stats(dev), 0,
-	       sizeof(struct net_device_stats));
-
-	if (dev->netdev_ops->ndo_stop)
-		dev->netdev_ops->ndo_stop(dev);
-
-	return 0;
-}
-
-void cvm_oct_common_uninit(struct net_device *dev)
-{
-	if (dev->phydev)
-		phy_disconnect(dev->phydev);
-}
-
-int cvm_oct_common_open(struct net_device *dev,
-			void (*link_poll)(struct net_device *))
-{
-	union cvmx_gmxx_prtx_cfg gmx_cfg;
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	int interface = INTERFACE(priv->port);
-	int index = INDEX(priv->port);
-	union cvmx_helper_link_info link_info;
-	int rv;
-
-	rv = cvm_oct_phy_setup_device(dev);
-	if (rv)
-		return rv;
-
-	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-	gmx_cfg.s.en = 1;
-	if (octeon_has_feature(OCTEON_FEATURE_PKND))
-		gmx_cfg.s.pknd = priv->port;
-	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
-	if (octeon_is_simulation())
-		return 0;
-
-	if (dev->phydev) {
-		int r = phy_read_status(dev->phydev);
-
-		if (r == 0 && dev->phydev->link == 0)
-			netif_carrier_off(dev);
-		cvm_oct_adjust_link(dev);
-	} else {
-		link_info = cvmx_helper_link_get(priv->port);
-		if (!link_info.s.link_up)
-			netif_carrier_off(dev);
-		priv->poll = link_poll;
-		link_poll(dev);
-	}
-
-	return 0;
-}
-
-void cvm_oct_link_poll(struct net_device *dev)
-{
-	struct octeon_ethernet *priv = netdev_priv(dev);
-	union cvmx_helper_link_info link_info;
-
-	link_info = cvmx_helper_link_get(priv->port);
-	if (link_info.u64 == priv->link_info)
-		return;
-
-	if (cvmx_helper_link_set(priv->port, link_info))
-		link_info.u64 = priv->link_info;
-	else
-		priv->link_info = link_info.u64;
-
-	if (link_info.s.link_up) {
-		if (!netif_carrier_ok(dev))
-			netif_carrier_on(dev);
-	} else if (netif_carrier_ok(dev)) {
-		netif_carrier_off(dev);
-	}
-	cvm_oct_note_carrier(priv, link_info);
-}
-
-static int cvm_oct_xaui_open(struct net_device *dev)
-{
-	return cvm_oct_common_open(dev, cvm_oct_link_poll);
-}
-
-static const struct net_device_ops cvm_oct_npi_netdev_ops = {
-	.ndo_init		= cvm_oct_common_init,
-	.ndo_uninit		= cvm_oct_common_uninit,
-	.ndo_start_xmit		= cvm_oct_xmit,
-	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
-	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
-	.ndo_do_ioctl		= cvm_oct_ioctl,
-	.ndo_change_mtu		= cvm_oct_common_change_mtu,
-	.ndo_get_stats		= cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
-	.ndo_init		= cvm_oct_common_init,
-	.ndo_uninit		= cvm_oct_common_uninit,
-	.ndo_open		= cvm_oct_xaui_open,
-	.ndo_stop		= cvm_oct_common_stop,
-	.ndo_start_xmit		= cvm_oct_xmit,
-	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
-	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
-	.ndo_do_ioctl		= cvm_oct_ioctl,
-	.ndo_change_mtu		= cvm_oct_common_change_mtu,
-	.ndo_get_stats		= cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
-	.ndo_init		= cvm_oct_sgmii_init,
-	.ndo_uninit		= cvm_oct_common_uninit,
-	.ndo_open		= cvm_oct_sgmii_open,
-	.ndo_stop		= cvm_oct_common_stop,
-	.ndo_start_xmit		= cvm_oct_xmit,
-	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
-	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
-	.ndo_do_ioctl		= cvm_oct_ioctl,
-	.ndo_change_mtu		= cvm_oct_common_change_mtu,
-	.ndo_get_stats		= cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_spi_netdev_ops = {
-	.ndo_init		= cvm_oct_spi_init,
-	.ndo_uninit		= cvm_oct_spi_uninit,
-	.ndo_start_xmit		= cvm_oct_xmit,
-	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
-	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
-	.ndo_do_ioctl		= cvm_oct_ioctl,
-	.ndo_change_mtu		= cvm_oct_common_change_mtu,
-	.ndo_get_stats		= cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
-	.ndo_init		= cvm_oct_common_init,
-	.ndo_uninit		= cvm_oct_common_uninit,
-	.ndo_open		= cvm_oct_rgmii_open,
-	.ndo_stop		= cvm_oct_common_stop,
-	.ndo_start_xmit		= cvm_oct_xmit,
-	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
-	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
-	.ndo_do_ioctl		= cvm_oct_ioctl,
-	.ndo_change_mtu		= cvm_oct_common_change_mtu,
-	.ndo_get_stats		= cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_pow_netdev_ops = {
-	.ndo_init		= cvm_oct_common_init,
-	.ndo_start_xmit		= cvm_oct_xmit_pow,
-	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
-	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
-	.ndo_do_ioctl		= cvm_oct_ioctl,
-	.ndo_change_mtu		= cvm_oct_common_change_mtu,
-	.ndo_get_stats		= cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= cvm_oct_poll_controller,
-#endif
-};
-
-static struct device_node *cvm_oct_of_get_child
-				(const struct device_node *parent, int reg_val)
-{
-	struct device_node *node = NULL;
-	int size;
-	const __be32 *addr;
-
-	for (;;) {
-		node = of_get_next_child(parent, node);
-		if (!node)
-			break;
-		addr = of_get_property(node, "reg", &size);
-		if (addr && (be32_to_cpu(*addr) == reg_val))
-			break;
-	}
-	return node;
-}
-
-static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
-						 int interface, int port)
-{
-	struct device_node *ni, *np;
-
-	ni = cvm_oct_of_get_child(pip, interface);
-	if (!ni)
-		return NULL;
-
-	np = cvm_oct_of_get_child(ni, port);
-	of_node_put(ni);
-
-	return np;
-}
-
-static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
-				int port)
-{
-	struct device_node *np = priv->of_node;
-	u32 delay_value;
-	bool rx_delay;
-	bool tx_delay;
-
-	/* By default, both RX/TX delay is enabled in
-	 * __cvmx_helper_rgmii_enable().
-	 */
-	rx_delay = true;
-	tx_delay = true;
-
-	if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
-		cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
-		rx_delay = delay_value > 0;
-	}
-	if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
-		cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
-		tx_delay = delay_value > 0;
-	}
-
-	if (!rx_delay && !tx_delay)
-		priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
-	else if (!rx_delay)
-		priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
-	else if (!tx_delay)
-		priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
-	else
-		priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
-}
-
-static int cvm_oct_probe(struct platform_device *pdev)
-{
-	int num_interfaces;
-	int interface;
-	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
-	int qos;
-	struct device_node *pip;
-	int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
-
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
-	mtu_overhead += VLAN_HLEN;
-#endif
-
-	octeon_mdiobus_force_mod_depencency();
-
-	pip = pdev->dev.of_node;
-	if (!pip) {
-		pr_err("Error: No 'pip' in /aliases\n");
-		return -EINVAL;
-	}
-
-	cvm_oct_configure_common_hw();
-
-	cvmx_helper_initialize_packet_io_global();
-
-	if (receive_group_order) {
-		if (receive_group_order > 4)
-			receive_group_order = 4;
-		pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
-	} else {
-		pow_receive_groups = BIT(pow_receive_group);
-	}
-
-	/* Change the input group for all ports before input is enabled */
-	num_interfaces = cvmx_helper_get_number_of_interfaces();
-	for (interface = 0; interface < num_interfaces; interface++) {
-		int num_ports = cvmx_helper_ports_on_interface(interface);
-		int port;
-
-		for (port = cvmx_helper_get_ipd_port(interface, 0);
-		     port < cvmx_helper_get_ipd_port(interface, num_ports);
-		     port++) {
-			union cvmx_pip_prt_tagx pip_prt_tagx;
-
-			pip_prt_tagx.u64 =
-			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
-
-			if (receive_group_order) {
-				int tag_mask;
-
-				/* We support only 16 groups at the moment, so
-				 * always disable the two additional "hidden"
-				 * tag_mask bits on CN68XX.
-				 */
-				if (OCTEON_IS_MODEL(OCTEON_CN68XX))
-					pip_prt_tagx.u64 |= 0x3ull << 44;
-
-				tag_mask = ~((1 << receive_group_order) - 1);
-				pip_prt_tagx.s.grptagbase	= 0;
-				pip_prt_tagx.s.grptagmask	= tag_mask;
-				pip_prt_tagx.s.grptag		= 1;
-				pip_prt_tagx.s.tag_mode		= 0;
-				pip_prt_tagx.s.inc_prt_flag	= 1;
-				pip_prt_tagx.s.ip6_dprt_flag	= 1;
-				pip_prt_tagx.s.ip4_dprt_flag	= 1;
-				pip_prt_tagx.s.ip6_sprt_flag	= 1;
-				pip_prt_tagx.s.ip4_sprt_flag	= 1;
-				pip_prt_tagx.s.ip6_dst_flag	= 1;
-				pip_prt_tagx.s.ip4_dst_flag	= 1;
-				pip_prt_tagx.s.ip6_src_flag	= 1;
-				pip_prt_tagx.s.ip4_src_flag	= 1;
-				pip_prt_tagx.s.grp		= 0;
-			} else {
-				pip_prt_tagx.s.grptag	= 0;
-				pip_prt_tagx.s.grp	= pow_receive_group;
-			}
-
-			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
-				       pip_prt_tagx.u64);
-		}
-	}
-
-	cvmx_helper_ipd_and_packet_input_enable();
-
-	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
-
-	/*
-	 * Initialize the FAU used for counting packet buffers that
-	 * need to be freed.
-	 */
-	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-
-	/* Initialize the FAU used for counting tx SKBs that need to be freed */
-	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
-
-	if ((pow_send_group != -1)) {
-		struct net_device *dev;
-
-		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
-		if (dev) {
-			/* Initialize the device private structure. */
-			struct octeon_ethernet *priv = netdev_priv(dev);
-
-			SET_NETDEV_DEV(dev, &pdev->dev);
-			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
-			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
-			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
-			priv->queue = -1;
-			strscpy(dev->name, "pow%d", sizeof(dev->name));
-			for (qos = 0; qos < 16; qos++)
-				skb_queue_head_init(&priv->tx_free_list[qos]);
-			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
-			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
-
-			if (register_netdev(dev) < 0) {
-				pr_err("Failed to register ethernet device for POW\n");
-				free_netdev(dev);
-			} else {
-				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
-				pr_info("%s: POW send group %d, receive group %d\n",
-					dev->name, pow_send_group,
-					pow_receive_group);
-			}
-		} else {
-			pr_err("Failed to allocate ethernet device for POW\n");
-		}
-	}
-
-	num_interfaces = cvmx_helper_get_number_of_interfaces();
-	for (interface = 0; interface < num_interfaces; interface++) {
-		cvmx_helper_interface_mode_t imode =
-		    cvmx_helper_interface_get_mode(interface);
-		int num_ports = cvmx_helper_ports_on_interface(interface);
-		int port;
-		int port_index;
-
-		for (port_index = 0,
-		     port = cvmx_helper_get_ipd_port(interface, 0);
-		     port < cvmx_helper_get_ipd_port(interface, num_ports);
-		     port_index++, port++) {
-			struct octeon_ethernet *priv;
-			struct net_device *dev =
-			    alloc_etherdev(sizeof(struct octeon_ethernet));
-			if (!dev) {
-				pr_err("Failed to allocate ethernet device for port %d\n",
-				       port);
-				continue;
-			}
-
-			/* Initialize the device private structure. */
-			SET_NETDEV_DEV(dev, &pdev->dev);
-			priv = netdev_priv(dev);
-			priv->netdev = dev;
-			priv->of_node = cvm_oct_node_for_port(pip, interface,
-							      port_index);
-
-			INIT_DELAYED_WORK(&priv->port_periodic_work,
-					  cvm_oct_periodic_worker);
-			priv->imode = imode;
-			priv->port = port;
-			priv->queue = cvmx_pko_get_base_queue(priv->port);
-			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
-			priv->phy_mode = PHY_INTERFACE_MODE_NA;
-			for (qos = 0; qos < 16; qos++)
-				skb_queue_head_init(&priv->tx_free_list[qos]);
-			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
-			     qos++)
-				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
-			dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
-			dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
-
-			switch (priv->imode) {
-			/* These types don't support ports to IPD/PKO */
-			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
-			case CVMX_HELPER_INTERFACE_MODE_PCIE:
-			case CVMX_HELPER_INTERFACE_MODE_PICMG:
-				break;
-
-			case CVMX_HELPER_INTERFACE_MODE_NPI:
-				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
-				strscpy(dev->name, "npi%d", sizeof(dev->name));
-				break;
-
-			case CVMX_HELPER_INTERFACE_MODE_XAUI:
-				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
-				strscpy(dev->name, "xaui%d", sizeof(dev->name));
-				break;
-
-			case CVMX_HELPER_INTERFACE_MODE_LOOP:
-				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
-				strscpy(dev->name, "loop%d", sizeof(dev->name));
-				break;
-
-			case CVMX_HELPER_INTERFACE_MODE_SGMII:
-				priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
-				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
-				strscpy(dev->name, "eth%d", sizeof(dev->name));
-				break;
-
-			case CVMX_HELPER_INTERFACE_MODE_SPI:
-				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
-				strscpy(dev->name, "spi%d", sizeof(dev->name));
-				break;
-
-			case CVMX_HELPER_INTERFACE_MODE_GMII:
-				priv->phy_mode = PHY_INTERFACE_MODE_GMII;
-				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
-				strscpy(dev->name, "eth%d", sizeof(dev->name));
-				break;
-
-			case CVMX_HELPER_INTERFACE_MODE_RGMII:
-				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
-				strscpy(dev->name, "eth%d", sizeof(dev->name));
-				cvm_set_rgmii_delay(priv, interface,
-						    port_index);
-				break;
-			}
-
-			if (!dev->netdev_ops) {
-				free_netdev(dev);
-			} else if (register_netdev(dev) < 0) {
-				pr_err("Failed to register ethernet device for interface %d, port %d\n",
-				       interface, priv->port);
-				free_netdev(dev);
-			} else {
-				cvm_oct_device[priv->port] = dev;
-				fau -=
-				    cvmx_pko_get_num_queues(priv->port) *
-				    sizeof(u32);
-				schedule_delayed_work(&priv->port_periodic_work,
-						      HZ);
-			}
-		}
-	}
-
-	cvm_oct_tx_initialize();
-	cvm_oct_rx_initialize();
-
-	/*
-	 * 150 uS: about 10 1500-byte packets at 1GE.
-	 */
-	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
-
-	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
-
-	return 0;
-}
-
-static int cvm_oct_remove(struct platform_device *pdev)
-{
-	int port;
-
-	cvmx_ipd_disable();
-
-	atomic_inc_return(&cvm_oct_poll_queue_stopping);
-	cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
-
-	cvm_oct_rx_shutdown();
-	cvm_oct_tx_shutdown();
-
-	cvmx_pko_disable();
-
-	/* Free the ethernet devices */
-	for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
-		if (cvm_oct_device[port]) {
-			struct net_device *dev = cvm_oct_device[port];
-			struct octeon_ethernet *priv = netdev_priv(dev);
-
-			cancel_delayed_work_sync(&priv->port_periodic_work);
-
-			cvm_oct_tx_shutdown_dev(dev);
-			unregister_netdev(dev);
-			free_netdev(dev);
-			cvm_oct_device[port] = NULL;
-		}
-	}
-
-	cvmx_pko_shutdown();
-
-	cvmx_ipd_free_ptr();
-
-	/* Free the HW pools */
-	cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
-			      num_packet_buffers);
-	cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
-			      num_packet_buffers);
-	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
-		cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
-				      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
-	return 0;
-}
-
-static const struct of_device_id cvm_oct_match[] = {
-	{
-		.compatible = "cavium,octeon-3860-pip",
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(of, cvm_oct_match);
-
-static struct platform_driver cvm_oct_driver = {
-	.probe		= cvm_oct_probe,
-	.remove		= cvm_oct_remove,
-	.driver		= {
-		.name	= KBUILD_MODNAME,
-		.of_match_table = cvm_oct_match,
-	},
-};
-
-module_platform_driver(cvm_oct_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
deleted file mode 100644
index a614070..0000000
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-/*
- * External interface for the Cavium Octeon ethernet driver.
- */
-#ifndef OCTEON_ETHERNET_H
-#define OCTEON_ETHERNET_H
-
-#include <linux/of.h>
-#include <linux/phy.h>
-
-#ifdef CONFIG_CAVIUM_OCTEON_SOC
-
-#include <asm/octeon/octeon.h>
-
-#include <asm/octeon/cvmx-asxx-defs.h>
-#include <asm/octeon/cvmx-config.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-gmxx-defs.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-helper-util.h>
-#include <asm/octeon/cvmx-ipd.h>
-#include <asm/octeon/cvmx-ipd-defs.h>
-#include <asm/octeon/cvmx-npi-defs.h>
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-pko.h>
-#include <asm/octeon/cvmx-pow.h>
-#include <asm/octeon/cvmx-scratch.h>
-#include <asm/octeon/cvmx-spi.h>
-#include <asm/octeon/cvmx-spxx-defs.h>
-#include <asm/octeon/cvmx-stxx-defs.h>
-#include <asm/octeon/cvmx-wqe.h>
-
-#else
-
-#include "octeon-stubs.h"
-
-#endif
-
-/**
- * This is the definition of the Ethernet driver's private
- * driver state stored in netdev_priv(dev).
- */
-struct octeon_ethernet {
-	/* PKO hardware output port */
-	int port;
-	/* PKO hardware queue for the port */
-	int queue;
-	/* Hardware fetch and add to count outstanding tx buffers */
-	int fau;
-	/* My netdev. */
-	struct net_device *netdev;
-	/*
-	 * Type of port. This is one of the enums in
-	 * cvmx_helper_interface_mode_t
-	 */
-	int imode;
-	/* PHY mode */
-	phy_interface_t phy_mode;
-	/* List of outstanding tx buffers per queue */
-	struct sk_buff_head tx_free_list[16];
-	unsigned int last_speed;
-	unsigned int last_link;
-	/* Last negotiated link state */
-	u64 link_info;
-	/* Called periodically to check link status */
-	void (*poll)(struct net_device *dev);
-	struct delayed_work	port_periodic_work;
-	struct device_node	*of_node;
-};
-
-int cvm_oct_free_work(void *work_queue_entry);
-
-int cvm_oct_rgmii_open(struct net_device *dev);
-
-int cvm_oct_sgmii_init(struct net_device *dev);
-int cvm_oct_sgmii_open(struct net_device *dev);
-
-int cvm_oct_spi_init(struct net_device *dev);
-void cvm_oct_spi_uninit(struct net_device *dev);
-
-int cvm_oct_common_init(struct net_device *dev);
-void cvm_oct_common_uninit(struct net_device *dev);
-void cvm_oct_adjust_link(struct net_device *dev);
-int cvm_oct_common_stop(struct net_device *dev);
-int cvm_oct_common_open(struct net_device *dev,
-			void (*link_poll)(struct net_device *));
-void cvm_oct_note_carrier(struct octeon_ethernet *priv,
-			  union cvmx_helper_link_info li);
-void cvm_oct_link_poll(struct net_device *dev);
-
-extern int always_use_pow;
-extern int pow_send_group;
-extern int pow_receive_groups;
-extern char pow_send_list[];
-extern struct net_device *cvm_oct_device[];
-extern atomic_t cvm_oct_poll_queue_stopping;
-extern u64 cvm_oct_tx_poll_interval;
-
-extern int rx_napi_weight;
-
-#endif
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
deleted file mode 100644
index 79213c04..0000000
--- a/drivers/staging/octeon/octeon-stubs.h
+++ /dev/null
@@ -1,1433 +0,0 @@
-#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE	512
-
-#ifndef XKPHYS_TO_PHYS
-# define XKPHYS_TO_PHYS(p)			(p)
-#endif
-
-#define OCTEON_IRQ_WORKQ0 0
-#define OCTEON_IRQ_RML 0
-#define OCTEON_IRQ_TIMER1 0
-#define OCTEON_IS_MODEL(x) 0
-#define octeon_has_feature(x)	0
-#define octeon_get_clock_rate()	0
-
-#define CVMX_SYNCIOBDMA		do { } while(0)
-
-#define CVMX_HELPER_INPUT_TAG_TYPE	0
-#define CVMX_HELPER_FIRST_MBUFF_SKIP	7
-#define CVMX_FAU_REG_END		(2048)
-#define CVMX_FPA_OUTPUT_BUFFER_POOL	    (2)
-#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE    16
-#define CVMX_FPA_PACKET_POOL		    (0)
-#define CVMX_FPA_PACKET_POOL_SIZE	    16
-#define CVMX_FPA_WQE_POOL		    (1)
-#define CVMX_FPA_WQE_POOL_SIZE		    16
-#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CTL(a, b)	((a)+(b))
-#define CVMX_GMXX_PRTX_CFG(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_FRM_MAX(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_JABBER(a, b)	((a)+(b))
-#define CVMX_IPD_CTL_STATUS		0
-#define CVMX_PIP_FRM_LEN_CHKX(a)	(a)
-#define CVMX_PIP_NUM_INPUT_PORTS	1
-#define CVMX_SCR_SCRATCH		0
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0	2
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1	2
-#define CVMX_IPD_SUB_PORT_FCS		0
-#define CVMX_SSO_WQ_IQ_DIS		0
-#define CVMX_SSO_WQ_INT			0
-#define CVMX_POW_WQ_INT			0
-#define CVMX_SSO_WQ_INT_PC		0
-#define CVMX_NPI_RSL_INT_BLOCKS		0
-#define CVMX_POW_WQ_INT_PC		0
-
-union cvmx_pip_wqe_word2 {
-	uint64_t u64;
-	struct {
-		uint64_t bufs:8;
-		uint64_t ip_offset:8;
-		uint64_t vlan_valid:1;
-		uint64_t vlan_stacked:1;
-		uint64_t unassigned:1;
-		uint64_t vlan_cfi:1;
-		uint64_t vlan_id:12;
-		uint64_t pr:4;
-		uint64_t unassigned2:8;
-		uint64_t dec_ipcomp:1;
-		uint64_t tcp_or_udp:1;
-		uint64_t dec_ipsec:1;
-		uint64_t is_v6:1;
-		uint64_t software:1;
-		uint64_t L4_error:1;
-		uint64_t is_frag:1;
-		uint64_t IP_exc:1;
-		uint64_t is_bcast:1;
-		uint64_t is_mcast:1;
-		uint64_t not_IP:1;
-		uint64_t rcv_error:1;
-		uint64_t err_code:8;
-	} s;
-	struct {
-		uint64_t bufs:8;
-		uint64_t ip_offset:8;
-		uint64_t vlan_valid:1;
-		uint64_t vlan_stacked:1;
-		uint64_t unassigned:1;
-		uint64_t vlan_cfi:1;
-		uint64_t vlan_id:12;
-		uint64_t port:12;
-		uint64_t dec_ipcomp:1;
-		uint64_t tcp_or_udp:1;
-		uint64_t dec_ipsec:1;
-		uint64_t is_v6:1;
-		uint64_t software:1;
-		uint64_t L4_error:1;
-		uint64_t is_frag:1;
-		uint64_t IP_exc:1;
-		uint64_t is_bcast:1;
-		uint64_t is_mcast:1;
-		uint64_t not_IP:1;
-		uint64_t rcv_error:1;
-		uint64_t err_code:8;
-	} s_cn68xx;
-
-	struct {
-		uint64_t unused1:16;
-		uint64_t vlan:16;
-		uint64_t unused2:32;
-	} svlan;
-	struct {
-		uint64_t bufs:8;
-		uint64_t unused:8;
-		uint64_t vlan_valid:1;
-		uint64_t vlan_stacked:1;
-		uint64_t unassigned:1;
-		uint64_t vlan_cfi:1;
-		uint64_t vlan_id:12;
-		uint64_t pr:4;
-		uint64_t unassigned2:12;
-		uint64_t software:1;
-		uint64_t unassigned3:1;
-		uint64_t is_rarp:1;
-		uint64_t is_arp:1;
-		uint64_t is_bcast:1;
-		uint64_t is_mcast:1;
-		uint64_t not_IP:1;
-		uint64_t rcv_error:1;
-		uint64_t err_code:8;
-	} snoip;
-
-};
-
-union cvmx_pip_wqe_word0 {
-	struct {
-		uint64_t next_ptr:40;
-		uint8_t unused;
-		__wsum hw_chksum;
-	} cn38xx;
-	struct {
-		uint64_t pknd:6;        /* 0..5 */
-		uint64_t unused2:2;     /* 6..7 */
-		uint64_t bpid:6;        /* 8..13 */
-		uint64_t unused1:18;    /* 14..31 */
-		uint64_t l2ptr:8;       /* 32..39 */
-		uint64_t l3ptr:8;       /* 40..47 */
-		uint64_t unused0:8;     /* 48..55 */
-		uint64_t l4ptr:8;       /* 56..63 */
-	} cn68xx;
-};
-
-union cvmx_wqe_word0 {
-	uint64_t u64;
-	union cvmx_pip_wqe_word0 pip;
-};
-
-union cvmx_wqe_word1 {
-	uint64_t u64;
-	struct {
-		uint64_t tag:32;
-		uint64_t tag_type:2;
-		uint64_t varies:14;
-		uint64_t len:16;
-	};
-	struct {
-		uint64_t tag:32;
-		uint64_t tag_type:2;
-		uint64_t zero_2:3;
-		uint64_t grp:6;
-		uint64_t zero_1:1;
-		uint64_t qos:3;
-		uint64_t zero_0:1;
-		uint64_t len:16;
-	} cn68xx;
-	struct {
-		uint64_t tag:32;
-		uint64_t tag_type:2;
-		uint64_t zero_2:1;
-		uint64_t grp:4;
-		uint64_t qos:3;
-		uint64_t ipprt:6;
-		uint64_t len:16;
-	} cn38xx;
-};
-
-union cvmx_buf_ptr {
-	void *ptr;
-	uint64_t u64;
-	struct {
-		uint64_t i:1;
-		uint64_t back:4;
-		uint64_t pool:3;
-		uint64_t size:16;
-		uint64_t addr:40;
-	} s;
-};
-
-struct cvmx_wqe {
-	union cvmx_wqe_word0 word0;
-	union cvmx_wqe_word1 word1;
-	union cvmx_pip_wqe_word2 word2;
-	union cvmx_buf_ptr packet_ptr;
-	uint8_t packet_data[96];
-};
-
-union cvmx_helper_link_info {
-	uint64_t u64;
-	struct {
-		uint64_t reserved_20_63:44;
-		uint64_t link_up:1;	    /**< Is the physical link up? */
-		uint64_t full_duplex:1;	    /**< 1 if the link is full duplex */
-		uint64_t speed:18;	    /**< Speed of the link in Mbps */
-	} s;
-};
-
-enum cvmx_fau_reg_32 {
-	CVMX_FAU_REG_32_START	= 0,
-};
-
-enum cvmx_fau_op_size {
-	CVMX_FAU_OP_SIZE_8 = 0,
-	CVMX_FAU_OP_SIZE_16 = 1,
-	CVMX_FAU_OP_SIZE_32 = 2,
-	CVMX_FAU_OP_SIZE_64 = 3
-};
-
-typedef enum {
-	CVMX_SPI_MODE_UNKNOWN = 0,
-	CVMX_SPI_MODE_TX_HALFPLEX = 1,
-	CVMX_SPI_MODE_RX_HALFPLEX = 2,
-	CVMX_SPI_MODE_DUPLEX = 3
-} cvmx_spi_mode_t;
-
-typedef enum {
-	CVMX_HELPER_INTERFACE_MODE_DISABLED,
-	CVMX_HELPER_INTERFACE_MODE_RGMII,
-	CVMX_HELPER_INTERFACE_MODE_GMII,
-	CVMX_HELPER_INTERFACE_MODE_SPI,
-	CVMX_HELPER_INTERFACE_MODE_PCIE,
-	CVMX_HELPER_INTERFACE_MODE_XAUI,
-	CVMX_HELPER_INTERFACE_MODE_SGMII,
-	CVMX_HELPER_INTERFACE_MODE_PICMG,
-	CVMX_HELPER_INTERFACE_MODE_NPI,
-	CVMX_HELPER_INTERFACE_MODE_LOOP,
-} cvmx_helper_interface_mode_t;
-
-typedef enum {
-	CVMX_POW_WAIT = 1,
-	CVMX_POW_NO_WAIT = 0,
-} cvmx_pow_wait_t;
-
-typedef enum {
-	CVMX_PKO_LOCK_NONE = 0,
-	CVMX_PKO_LOCK_ATOMIC_TAG = 1,
-	CVMX_PKO_LOCK_CMD_QUEUE = 2,
-} cvmx_pko_lock_t;
-
-typedef enum {
-	CVMX_PKO_SUCCESS,
-	CVMX_PKO_INVALID_PORT,
-	CVMX_PKO_INVALID_QUEUE,
-	CVMX_PKO_INVALID_PRIORITY,
-	CVMX_PKO_NO_MEMORY,
-	CVMX_PKO_PORT_ALREADY_SETUP,
-	CVMX_PKO_CMD_QUEUE_INIT_ERROR
-} cvmx_pko_status_t;
-
-enum cvmx_pow_tag_type {
-	CVMX_POW_TAG_TYPE_ORDERED   = 0L,
-	CVMX_POW_TAG_TYPE_ATOMIC    = 1L,
-	CVMX_POW_TAG_TYPE_NULL	    = 2L,
-	CVMX_POW_TAG_TYPE_NULL_NULL = 3L
-};
-
-union cvmx_ipd_ctl_status {
-	uint64_t u64;
-	struct cvmx_ipd_ctl_status_s {
-		uint64_t reserved_18_63:46;
-		uint64_t use_sop:1;
-		uint64_t rst_done:1;
-		uint64_t clken:1;
-		uint64_t no_wptr:1;
-		uint64_t pq_apkt:1;
-		uint64_t pq_nabuf:1;
-		uint64_t ipd_full:1;
-		uint64_t pkt_off:1;
-		uint64_t len_m8:1;
-		uint64_t reset:1;
-		uint64_t addpkt:1;
-		uint64_t naddbuf:1;
-		uint64_t pkt_lend:1;
-		uint64_t wqe_lend:1;
-		uint64_t pbp_en:1;
-		uint64_t opc_mode:2;
-		uint64_t ipd_en:1;
-	} s;
-	struct cvmx_ipd_ctl_status_cn30xx {
-		uint64_t reserved_10_63:54;
-		uint64_t len_m8:1;
-		uint64_t reset:1;
-		uint64_t addpkt:1;
-		uint64_t naddbuf:1;
-		uint64_t pkt_lend:1;
-		uint64_t wqe_lend:1;
-		uint64_t pbp_en:1;
-		uint64_t opc_mode:2;
-		uint64_t ipd_en:1;
-	} cn30xx;
-	struct cvmx_ipd_ctl_status_cn38xxp2 {
-		uint64_t reserved_9_63:55;
-		uint64_t reset:1;
-		uint64_t addpkt:1;
-		uint64_t naddbuf:1;
-		uint64_t pkt_lend:1;
-		uint64_t wqe_lend:1;
-		uint64_t pbp_en:1;
-		uint64_t opc_mode:2;
-		uint64_t ipd_en:1;
-	} cn38xxp2;
-	struct cvmx_ipd_ctl_status_cn50xx {
-		uint64_t reserved_15_63:49;
-		uint64_t no_wptr:1;
-		uint64_t pq_apkt:1;
-		uint64_t pq_nabuf:1;
-		uint64_t ipd_full:1;
-		uint64_t pkt_off:1;
-		uint64_t len_m8:1;
-		uint64_t reset:1;
-		uint64_t addpkt:1;
-		uint64_t naddbuf:1;
-		uint64_t pkt_lend:1;
-		uint64_t wqe_lend:1;
-		uint64_t pbp_en:1;
-		uint64_t opc_mode:2;
-		uint64_t ipd_en:1;
-	} cn50xx;
-	struct cvmx_ipd_ctl_status_cn58xx {
-		uint64_t reserved_12_63:52;
-		uint64_t ipd_full:1;
-		uint64_t pkt_off:1;
-		uint64_t len_m8:1;
-		uint64_t reset:1;
-		uint64_t addpkt:1;
-		uint64_t naddbuf:1;
-		uint64_t pkt_lend:1;
-		uint64_t wqe_lend:1;
-		uint64_t pbp_en:1;
-		uint64_t opc_mode:2;
-		uint64_t ipd_en:1;
-	} cn58xx;
-	struct cvmx_ipd_ctl_status_cn63xxp1 {
-		uint64_t reserved_16_63:48;
-		uint64_t clken:1;
-		uint64_t no_wptr:1;
-		uint64_t pq_apkt:1;
-		uint64_t pq_nabuf:1;
-		uint64_t ipd_full:1;
-		uint64_t pkt_off:1;
-		uint64_t len_m8:1;
-		uint64_t reset:1;
-		uint64_t addpkt:1;
-		uint64_t naddbuf:1;
-		uint64_t pkt_lend:1;
-		uint64_t wqe_lend:1;
-		uint64_t pbp_en:1;
-		uint64_t opc_mode:2;
-		uint64_t ipd_en:1;
-	} cn63xxp1;
-};
-
-union cvmx_ipd_sub_port_fcs {
-	uint64_t u64;
-	struct cvmx_ipd_sub_port_fcs_s {
-		uint64_t port_bit:32;
-		uint64_t reserved_32_35:4;
-		uint64_t port_bit2:4;
-		uint64_t reserved_40_63:24;
-	} s;
-	struct cvmx_ipd_sub_port_fcs_cn30xx {
-		uint64_t port_bit:3;
-		uint64_t reserved_3_63:61;
-	} cn30xx;
-	struct cvmx_ipd_sub_port_fcs_cn38xx {
-		uint64_t port_bit:32;
-		uint64_t reserved_32_63:32;
-	} cn38xx;
-};
-
-union cvmx_ipd_sub_port_qos_cnt {
-	uint64_t u64;
-	struct cvmx_ipd_sub_port_qos_cnt_s {
-		uint64_t cnt:32;
-		uint64_t port_qos:9;
-		uint64_t reserved_41_63:23;
-	} s;
-};
-typedef struct {
-	uint32_t dropped_octets;
-	uint32_t dropped_packets;
-	uint32_t pci_raw_packets;
-	uint32_t octets;
-	uint32_t packets;
-	uint32_t multicast_packets;
-	uint32_t broadcast_packets;
-	uint32_t len_64_packets;
-	uint32_t len_65_127_packets;
-	uint32_t len_128_255_packets;
-	uint32_t len_256_511_packets;
-	uint32_t len_512_1023_packets;
-	uint32_t len_1024_1518_packets;
-	uint32_t len_1519_max_packets;
-	uint32_t fcs_align_err_packets;
-	uint32_t runt_packets;
-	uint32_t runt_crc_packets;
-	uint32_t oversize_packets;
-	uint32_t oversize_crc_packets;
-	uint32_t inb_packets;
-	uint64_t inb_octets;
-	uint16_t inb_errors;
-} cvmx_pip_port_status_t;
-
-typedef struct {
-	uint32_t packets;
-	uint64_t octets;
-	uint64_t doorbell;
-} cvmx_pko_port_status_t;
-
-union cvmx_pip_frm_len_chkx {
-	uint64_t u64;
-	struct cvmx_pip_frm_len_chkx_s {
-		uint64_t reserved_32_63:32;
-		uint64_t maxlen:16;
-		uint64_t minlen:16;
-	} s;
-};
-
-union cvmx_gmxx_rxx_frm_ctl {
-	uint64_t u64;
-	struct cvmx_gmxx_rxx_frm_ctl_s {
-		uint64_t pre_chk:1;
-		uint64_t pre_strp:1;
-		uint64_t ctl_drp:1;
-		uint64_t ctl_bck:1;
-		uint64_t ctl_mcst:1;
-		uint64_t ctl_smac:1;
-		uint64_t pre_free:1;
-		uint64_t vlan_len:1;
-		uint64_t pad_len:1;
-		uint64_t pre_align:1;
-		uint64_t null_dis:1;
-		uint64_t reserved_11_11:1;
-		uint64_t ptp_mode:1;
-		uint64_t reserved_13_63:51;
-	} s;
-	struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
-		uint64_t pre_chk:1;
-		uint64_t pre_strp:1;
-		uint64_t ctl_drp:1;
-		uint64_t ctl_bck:1;
-		uint64_t ctl_mcst:1;
-		uint64_t ctl_smac:1;
-		uint64_t pre_free:1;
-		uint64_t vlan_len:1;
-		uint64_t pad_len:1;
-		uint64_t reserved_9_63:55;
-	} cn30xx;
-	struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
-		uint64_t pre_chk:1;
-		uint64_t pre_strp:1;
-		uint64_t ctl_drp:1;
-		uint64_t ctl_bck:1;
-		uint64_t ctl_mcst:1;
-		uint64_t ctl_smac:1;
-		uint64_t pre_free:1;
-		uint64_t vlan_len:1;
-		uint64_t reserved_8_63:56;
-	} cn31xx;
-	struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
-		uint64_t pre_chk:1;
-		uint64_t pre_strp:1;
-		uint64_t ctl_drp:1;
-		uint64_t ctl_bck:1;
-		uint64_t ctl_mcst:1;
-		uint64_t ctl_smac:1;
-		uint64_t pre_free:1;
-		uint64_t reserved_7_8:2;
-		uint64_t pre_align:1;
-		uint64_t null_dis:1;
-		uint64_t reserved_11_63:53;
-	} cn50xx;
-	struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
-		uint64_t pre_chk:1;
-		uint64_t pre_strp:1;
-		uint64_t ctl_drp:1;
-		uint64_t ctl_bck:1;
-		uint64_t ctl_mcst:1;
-		uint64_t ctl_smac:1;
-		uint64_t pre_free:1;
-		uint64_t reserved_7_8:2;
-		uint64_t pre_align:1;
-		uint64_t reserved_10_63:54;
-	} cn56xxp1;
-	struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
-		uint64_t pre_chk:1;
-		uint64_t pre_strp:1;
-		uint64_t ctl_drp:1;
-		uint64_t ctl_bck:1;
-		uint64_t ctl_mcst:1;
-		uint64_t ctl_smac:1;
-		uint64_t pre_free:1;
-		uint64_t vlan_len:1;
-		uint64_t pad_len:1;
-		uint64_t pre_align:1;
-		uint64_t null_dis:1;
-		uint64_t reserved_11_63:53;
-	} cn58xx;
-	struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
-		uint64_t pre_chk:1;
-		uint64_t pre_strp:1;
-		uint64_t ctl_drp:1;
-		uint64_t ctl_bck:1;
-		uint64_t ctl_mcst:1;
-		uint64_t ctl_smac:1;
-		uint64_t pre_free:1;
-		uint64_t reserved_7_8:2;
-		uint64_t pre_align:1;
-		uint64_t null_dis:1;
-		uint64_t reserved_11_11:1;
-		uint64_t ptp_mode:1;
-		uint64_t reserved_13_63:51;
-	} cn61xx;
-};
-
-union cvmx_gmxx_rxx_int_reg {
-	uint64_t u64;
-	struct cvmx_gmxx_rxx_int_reg_s {
-		uint64_t minerr:1;
-		uint64_t carext:1;
-		uint64_t maxerr:1;
-		uint64_t jabber:1;
-		uint64_t fcserr:1;
-		uint64_t alnerr:1;
-		uint64_t lenerr:1;
-		uint64_t rcverr:1;
-		uint64_t skperr:1;
-		uint64_t niberr:1;
-		uint64_t ovrerr:1;
-		uint64_t pcterr:1;
-		uint64_t rsverr:1;
-		uint64_t falerr:1;
-		uint64_t coldet:1;
-		uint64_t ifgerr:1;
-		uint64_t phy_link:1;
-		uint64_t phy_spd:1;
-		uint64_t phy_dupx:1;
-		uint64_t pause_drp:1;
-		uint64_t loc_fault:1;
-		uint64_t rem_fault:1;
-		uint64_t bad_seq:1;
-		uint64_t bad_term:1;
-		uint64_t unsop:1;
-		uint64_t uneop:1;
-		uint64_t undat:1;
-		uint64_t hg2fld:1;
-		uint64_t hg2cc:1;
-		uint64_t reserved_29_63:35;
-	} s;
-	struct cvmx_gmxx_rxx_int_reg_cn30xx {
-		uint64_t minerr:1;
-		uint64_t carext:1;
-		uint64_t maxerr:1;
-		uint64_t jabber:1;
-		uint64_t fcserr:1;
-		uint64_t alnerr:1;
-		uint64_t lenerr:1;
-		uint64_t rcverr:1;
-		uint64_t skperr:1;
-		uint64_t niberr:1;
-		uint64_t ovrerr:1;
-		uint64_t pcterr:1;
-		uint64_t rsverr:1;
-		uint64_t falerr:1;
-		uint64_t coldet:1;
-		uint64_t ifgerr:1;
-		uint64_t phy_link:1;
-		uint64_t phy_spd:1;
-		uint64_t phy_dupx:1;
-		uint64_t reserved_19_63:45;
-	} cn30xx;
-	struct cvmx_gmxx_rxx_int_reg_cn50xx {
-		uint64_t reserved_0_0:1;
-		uint64_t carext:1;
-		uint64_t reserved_2_2:1;
-		uint64_t jabber:1;
-		uint64_t fcserr:1;
-		uint64_t alnerr:1;
-		uint64_t reserved_6_6:1;
-		uint64_t rcverr:1;
-		uint64_t skperr:1;
-		uint64_t niberr:1;
-		uint64_t ovrerr:1;
-		uint64_t pcterr:1;
-		uint64_t rsverr:1;
-		uint64_t falerr:1;
-		uint64_t coldet:1;
-		uint64_t ifgerr:1;
-		uint64_t phy_link:1;
-		uint64_t phy_spd:1;
-		uint64_t phy_dupx:1;
-		uint64_t pause_drp:1;
-		uint64_t reserved_20_63:44;
-	} cn50xx;
-	struct cvmx_gmxx_rxx_int_reg_cn52xx {
-		uint64_t reserved_0_0:1;
-		uint64_t carext:1;
-		uint64_t reserved_2_2:1;
-		uint64_t jabber:1;
-		uint64_t fcserr:1;
-		uint64_t reserved_5_6:2;
-		uint64_t rcverr:1;
-		uint64_t skperr:1;
-		uint64_t reserved_9_9:1;
-		uint64_t ovrerr:1;
-		uint64_t pcterr:1;
-		uint64_t rsverr:1;
-		uint64_t falerr:1;
-		uint64_t coldet:1;
-		uint64_t ifgerr:1;
-		uint64_t reserved_16_18:3;
-		uint64_t pause_drp:1;
-		uint64_t loc_fault:1;
-		uint64_t rem_fault:1;
-		uint64_t bad_seq:1;
-		uint64_t bad_term:1;
-		uint64_t unsop:1;
-		uint64_t uneop:1;
-		uint64_t undat:1;
-		uint64_t hg2fld:1;
-		uint64_t hg2cc:1;
-		uint64_t reserved_29_63:35;
-	} cn52xx;
-	struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
-		uint64_t reserved_0_0:1;
-		uint64_t carext:1;
-		uint64_t reserved_2_2:1;
-		uint64_t jabber:1;
-		uint64_t fcserr:1;
-		uint64_t reserved_5_6:2;
-		uint64_t rcverr:1;
-		uint64_t skperr:1;
-		uint64_t reserved_9_9:1;
-		uint64_t ovrerr:1;
-		uint64_t pcterr:1;
-		uint64_t rsverr:1;
-		uint64_t falerr:1;
-		uint64_t coldet:1;
-		uint64_t ifgerr:1;
-		uint64_t reserved_16_18:3;
-		uint64_t pause_drp:1;
-		uint64_t loc_fault:1;
-		uint64_t rem_fault:1;
-		uint64_t bad_seq:1;
-		uint64_t bad_term:1;
-		uint64_t unsop:1;
-		uint64_t uneop:1;
-		uint64_t undat:1;
-		uint64_t reserved_27_63:37;
-	} cn56xxp1;
-	struct cvmx_gmxx_rxx_int_reg_cn58xx {
-		uint64_t minerr:1;
-		uint64_t carext:1;
-		uint64_t maxerr:1;
-		uint64_t jabber:1;
-		uint64_t fcserr:1;
-		uint64_t alnerr:1;
-		uint64_t lenerr:1;
-		uint64_t rcverr:1;
-		uint64_t skperr:1;
-		uint64_t niberr:1;
-		uint64_t ovrerr:1;
-		uint64_t pcterr:1;
-		uint64_t rsverr:1;
-		uint64_t falerr:1;
-		uint64_t coldet:1;
-		uint64_t ifgerr:1;
-		uint64_t phy_link:1;
-		uint64_t phy_spd:1;
-		uint64_t phy_dupx:1;
-		uint64_t pause_drp:1;
-		uint64_t reserved_20_63:44;
-	} cn58xx;
-	struct cvmx_gmxx_rxx_int_reg_cn61xx {
-		uint64_t minerr:1;
-		uint64_t carext:1;
-		uint64_t reserved_2_2:1;
-		uint64_t jabber:1;
-		uint64_t fcserr:1;
-		uint64_t reserved_5_6:2;
-		uint64_t rcverr:1;
-		uint64_t skperr:1;
-		uint64_t reserved_9_9:1;
-		uint64_t ovrerr:1;
-		uint64_t pcterr:1;
-		uint64_t rsverr:1;
-		uint64_t falerr:1;
-		uint64_t coldet:1;
-		uint64_t ifgerr:1;
-		uint64_t reserved_16_18:3;
-		uint64_t pause_drp:1;
-		uint64_t loc_fault:1;
-		uint64_t rem_fault:1;
-		uint64_t bad_seq:1;
-		uint64_t bad_term:1;
-		uint64_t unsop:1;
-		uint64_t uneop:1;
-		uint64_t undat:1;
-		uint64_t hg2fld:1;
-		uint64_t hg2cc:1;
-		uint64_t reserved_29_63:35;
-	} cn61xx;
-};
-
-union cvmx_gmxx_prtx_cfg {
-	uint64_t u64;
-	struct cvmx_gmxx_prtx_cfg_s {
-		uint64_t reserved_22_63:42;
-		uint64_t pknd:6;
-		uint64_t reserved_14_15:2;
-		uint64_t tx_idle:1;
-		uint64_t rx_idle:1;
-		uint64_t reserved_9_11:3;
-		uint64_t speed_msb:1;
-		uint64_t reserved_4_7:4;
-		uint64_t slottime:1;
-		uint64_t duplex:1;
-		uint64_t speed:1;
-		uint64_t en:1;
-	} s;
-	struct cvmx_gmxx_prtx_cfg_cn30xx {
-		uint64_t reserved_4_63:60;
-		uint64_t slottime:1;
-		uint64_t duplex:1;
-		uint64_t speed:1;
-		uint64_t en:1;
-	} cn30xx;
-	struct cvmx_gmxx_prtx_cfg_cn52xx {
-		uint64_t reserved_14_63:50;
-		uint64_t tx_idle:1;
-		uint64_t rx_idle:1;
-		uint64_t reserved_9_11:3;
-		uint64_t speed_msb:1;
-		uint64_t reserved_4_7:4;
-		uint64_t slottime:1;
-		uint64_t duplex:1;
-		uint64_t speed:1;
-		uint64_t en:1;
-	} cn52xx;
-};
-
-union cvmx_gmxx_rxx_adr_ctl {
-	uint64_t u64;
-	struct cvmx_gmxx_rxx_adr_ctl_s {
-		uint64_t reserved_4_63:60;
-		uint64_t cam_mode:1;
-		uint64_t mcst:2;
-		uint64_t bcst:1;
-	} s;
-};
-
-union cvmx_pip_prt_tagx {
-	uint64_t u64;
-	struct cvmx_pip_prt_tagx_s {
-		uint64_t reserved_54_63:10;
-		uint64_t portadd_en:1;
-		uint64_t inc_hwchk:1;
-		uint64_t reserved_50_51:2;
-		uint64_t grptagbase_msb:2;
-		uint64_t reserved_46_47:2;
-		uint64_t grptagmask_msb:2;
-		uint64_t reserved_42_43:2;
-		uint64_t grp_msb:2;
-		uint64_t grptagbase:4;
-		uint64_t grptagmask:4;
-		uint64_t grptag:1;
-		uint64_t grptag_mskip:1;
-		uint64_t tag_mode:2;
-		uint64_t inc_vs:2;
-		uint64_t inc_vlan:1;
-		uint64_t inc_prt_flag:1;
-		uint64_t ip6_dprt_flag:1;
-		uint64_t ip4_dprt_flag:1;
-		uint64_t ip6_sprt_flag:1;
-		uint64_t ip4_sprt_flag:1;
-		uint64_t ip6_nxth_flag:1;
-		uint64_t ip4_pctl_flag:1;
-		uint64_t ip6_dst_flag:1;
-		uint64_t ip4_dst_flag:1;
-		uint64_t ip6_src_flag:1;
-		uint64_t ip4_src_flag:1;
-		uint64_t tcp6_tag_type:2;
-		uint64_t tcp4_tag_type:2;
-		uint64_t ip6_tag_type:2;
-		uint64_t ip4_tag_type:2;
-		uint64_t non_tag_type:2;
-		uint64_t grp:4;
-	} s;
-	struct cvmx_pip_prt_tagx_cn30xx {
-		uint64_t reserved_40_63:24;
-		uint64_t grptagbase:4;
-		uint64_t grptagmask:4;
-		uint64_t grptag:1;
-		uint64_t reserved_30_30:1;
-		uint64_t tag_mode:2;
-		uint64_t inc_vs:2;
-		uint64_t inc_vlan:1;
-		uint64_t inc_prt_flag:1;
-		uint64_t ip6_dprt_flag:1;
-		uint64_t ip4_dprt_flag:1;
-		uint64_t ip6_sprt_flag:1;
-		uint64_t ip4_sprt_flag:1;
-		uint64_t ip6_nxth_flag:1;
-		uint64_t ip4_pctl_flag:1;
-		uint64_t ip6_dst_flag:1;
-		uint64_t ip4_dst_flag:1;
-		uint64_t ip6_src_flag:1;
-		uint64_t ip4_src_flag:1;
-		uint64_t tcp6_tag_type:2;
-		uint64_t tcp4_tag_type:2;
-		uint64_t ip6_tag_type:2;
-		uint64_t ip4_tag_type:2;
-		uint64_t non_tag_type:2;
-		uint64_t grp:4;
-	} cn30xx;
-	struct cvmx_pip_prt_tagx_cn50xx {
-		uint64_t reserved_40_63:24;
-		uint64_t grptagbase:4;
-		uint64_t grptagmask:4;
-		uint64_t grptag:1;
-		uint64_t grptag_mskip:1;
-		uint64_t tag_mode:2;
-		uint64_t inc_vs:2;
-		uint64_t inc_vlan:1;
-		uint64_t inc_prt_flag:1;
-		uint64_t ip6_dprt_flag:1;
-		uint64_t ip4_dprt_flag:1;
-		uint64_t ip6_sprt_flag:1;
-		uint64_t ip4_sprt_flag:1;
-		uint64_t ip6_nxth_flag:1;
-		uint64_t ip4_pctl_flag:1;
-		uint64_t ip6_dst_flag:1;
-		uint64_t ip4_dst_flag:1;
-		uint64_t ip6_src_flag:1;
-		uint64_t ip4_src_flag:1;
-		uint64_t tcp6_tag_type:2;
-		uint64_t tcp4_tag_type:2;
-		uint64_t ip6_tag_type:2;
-		uint64_t ip4_tag_type:2;
-		uint64_t non_tag_type:2;
-		uint64_t grp:4;
-	} cn50xx;
-};
-
-union cvmx_spxx_int_reg {
-	uint64_t u64;
-	struct cvmx_spxx_int_reg_s {
-		uint64_t reserved_32_63:32;
-		uint64_t spf:1;
-		uint64_t reserved_12_30:19;
-		uint64_t calerr:1;
-		uint64_t syncerr:1;
-		uint64_t diperr:1;
-		uint64_t tpaovr:1;
-		uint64_t rsverr:1;
-		uint64_t drwnng:1;
-		uint64_t clserr:1;
-		uint64_t spiovr:1;
-		uint64_t reserved_2_3:2;
-		uint64_t abnorm:1;
-		uint64_t prtnxa:1;
-	} s;
-};
-
-union cvmx_spxx_int_msk {
-	uint64_t u64;
-	struct cvmx_spxx_int_msk_s {
-		uint64_t reserved_12_63:52;
-		uint64_t calerr:1;
-		uint64_t syncerr:1;
-		uint64_t diperr:1;
-		uint64_t tpaovr:1;
-		uint64_t rsverr:1;
-		uint64_t drwnng:1;
-		uint64_t clserr:1;
-		uint64_t spiovr:1;
-		uint64_t reserved_2_3:2;
-		uint64_t abnorm:1;
-		uint64_t prtnxa:1;
-	} s;
-};
-
-union cvmx_pow_wq_int {
-	uint64_t u64;
-	struct cvmx_pow_wq_int_s {
-		uint64_t wq_int:16;
-		uint64_t iq_dis:16;
-		uint64_t reserved_32_63:32;
-	} s;
-};
-
-union cvmx_sso_wq_int_thrx {
-	uint64_t u64;
-	struct {
-		uint64_t iq_thr:12;
-		uint64_t reserved_12_13:2;
-		uint64_t ds_thr:12;
-		uint64_t reserved_26_27:2;
-		uint64_t tc_thr:4;
-		uint64_t tc_en:1;
-		uint64_t reserved_33_63:31;
-	} s;
-};
-
-union cvmx_stxx_int_reg {
-	uint64_t u64;
-	struct cvmx_stxx_int_reg_s {
-		uint64_t reserved_9_63:55;
-		uint64_t syncerr:1;
-		uint64_t frmerr:1;
-		uint64_t unxfrm:1;
-		uint64_t nosync:1;
-		uint64_t diperr:1;
-		uint64_t datovr:1;
-		uint64_t ovrbst:1;
-		uint64_t calpar1:1;
-		uint64_t calpar0:1;
-	} s;
-};
-
-union cvmx_stxx_int_msk {
-	uint64_t u64;
-	struct cvmx_stxx_int_msk_s {
-		uint64_t reserved_8_63:56;
-		uint64_t frmerr:1;
-		uint64_t unxfrm:1;
-		uint64_t nosync:1;
-		uint64_t diperr:1;
-		uint64_t datovr:1;
-		uint64_t ovrbst:1;
-		uint64_t calpar1:1;
-		uint64_t calpar0:1;
-	} s;
-};
-
-union cvmx_pow_wq_int_pc {
-	uint64_t u64;
-	struct cvmx_pow_wq_int_pc_s {
-		uint64_t reserved_0_7:8;
-		uint64_t pc_thr:20;
-		uint64_t reserved_28_31:4;
-		uint64_t pc:28;
-		uint64_t reserved_60_63:4;
-	} s;
-};
-
-union cvmx_pow_wq_int_thrx {
-	uint64_t u64;
-	struct cvmx_pow_wq_int_thrx_s {
-		uint64_t reserved_29_63:35;
-		uint64_t tc_en:1;
-		uint64_t tc_thr:4;
-		uint64_t reserved_23_23:1;
-		uint64_t ds_thr:11;
-		uint64_t reserved_11_11:1;
-		uint64_t iq_thr:11;
-	} s;
-	struct cvmx_pow_wq_int_thrx_cn30xx {
-		uint64_t reserved_29_63:35;
-		uint64_t tc_en:1;
-		uint64_t tc_thr:4;
-		uint64_t reserved_18_23:6;
-		uint64_t ds_thr:6;
-		uint64_t reserved_6_11:6;
-		uint64_t iq_thr:6;
-	} cn30xx;
-	struct cvmx_pow_wq_int_thrx_cn31xx {
-		uint64_t reserved_29_63:35;
-		uint64_t tc_en:1;
-		uint64_t tc_thr:4;
-		uint64_t reserved_20_23:4;
-		uint64_t ds_thr:8;
-		uint64_t reserved_8_11:4;
-		uint64_t iq_thr:8;
-	} cn31xx;
-	struct cvmx_pow_wq_int_thrx_cn52xx {
-		uint64_t reserved_29_63:35;
-		uint64_t tc_en:1;
-		uint64_t tc_thr:4;
-		uint64_t reserved_21_23:3;
-		uint64_t ds_thr:9;
-		uint64_t reserved_9_11:3;
-		uint64_t iq_thr:9;
-	} cn52xx;
-	struct cvmx_pow_wq_int_thrx_cn63xx {
-		uint64_t reserved_29_63:35;
-		uint64_t tc_en:1;
-		uint64_t tc_thr:4;
-		uint64_t reserved_22_23:2;
-		uint64_t ds_thr:10;
-		uint64_t reserved_10_11:2;
-		uint64_t iq_thr:10;
-	} cn63xx;
-};
-
-union cvmx_npi_rsl_int_blocks {
-	uint64_t u64;
-	struct cvmx_npi_rsl_int_blocks_s {
-		uint64_t reserved_32_63:32;
-		uint64_t rint_31:1;
-		uint64_t iob:1;
-		uint64_t reserved_28_29:2;
-		uint64_t rint_27:1;
-		uint64_t rint_26:1;
-		uint64_t rint_25:1;
-		uint64_t rint_24:1;
-		uint64_t asx1:1;
-		uint64_t asx0:1;
-		uint64_t rint_21:1;
-		uint64_t pip:1;
-		uint64_t spx1:1;
-		uint64_t spx0:1;
-		uint64_t lmc:1;
-		uint64_t l2c:1;
-		uint64_t rint_15:1;
-		uint64_t reserved_13_14:2;
-		uint64_t pow:1;
-		uint64_t tim:1;
-		uint64_t pko:1;
-		uint64_t ipd:1;
-		uint64_t rint_8:1;
-		uint64_t zip:1;
-		uint64_t dfa:1;
-		uint64_t fpa:1;
-		uint64_t key:1;
-		uint64_t npi:1;
-		uint64_t gmx1:1;
-		uint64_t gmx0:1;
-		uint64_t mio:1;
-	} s;
-	struct cvmx_npi_rsl_int_blocks_cn30xx {
-		uint64_t reserved_32_63:32;
-		uint64_t rint_31:1;
-		uint64_t iob:1;
-		uint64_t rint_29:1;
-		uint64_t rint_28:1;
-		uint64_t rint_27:1;
-		uint64_t rint_26:1;
-		uint64_t rint_25:1;
-		uint64_t rint_24:1;
-		uint64_t asx1:1;
-		uint64_t asx0:1;
-		uint64_t rint_21:1;
-		uint64_t pip:1;
-		uint64_t spx1:1;
-		uint64_t spx0:1;
-		uint64_t lmc:1;
-		uint64_t l2c:1;
-		uint64_t rint_15:1;
-		uint64_t rint_14:1;
-		uint64_t usb:1;
-		uint64_t pow:1;
-		uint64_t tim:1;
-		uint64_t pko:1;
-		uint64_t ipd:1;
-		uint64_t rint_8:1;
-		uint64_t zip:1;
-		uint64_t dfa:1;
-		uint64_t fpa:1;
-		uint64_t key:1;
-		uint64_t npi:1;
-		uint64_t gmx1:1;
-		uint64_t gmx0:1;
-		uint64_t mio:1;
-	} cn30xx;
-	struct cvmx_npi_rsl_int_blocks_cn38xx {
-		uint64_t reserved_32_63:32;
-		uint64_t rint_31:1;
-		uint64_t iob:1;
-		uint64_t rint_29:1;
-		uint64_t rint_28:1;
-		uint64_t rint_27:1;
-		uint64_t rint_26:1;
-		uint64_t rint_25:1;
-		uint64_t rint_24:1;
-		uint64_t asx1:1;
-		uint64_t asx0:1;
-		uint64_t rint_21:1;
-		uint64_t pip:1;
-		uint64_t spx1:1;
-		uint64_t spx0:1;
-		uint64_t lmc:1;
-		uint64_t l2c:1;
-		uint64_t rint_15:1;
-		uint64_t rint_14:1;
-		uint64_t rint_13:1;
-		uint64_t pow:1;
-		uint64_t tim:1;
-		uint64_t pko:1;
-		uint64_t ipd:1;
-		uint64_t rint_8:1;
-		uint64_t zip:1;
-		uint64_t dfa:1;
-		uint64_t fpa:1;
-		uint64_t key:1;
-		uint64_t npi:1;
-		uint64_t gmx1:1;
-		uint64_t gmx0:1;
-		uint64_t mio:1;
-	} cn38xx;
-	struct cvmx_npi_rsl_int_blocks_cn50xx {
-		uint64_t reserved_31_63:33;
-		uint64_t iob:1;
-		uint64_t lmc1:1;
-		uint64_t agl:1;
-		uint64_t reserved_24_27:4;
-		uint64_t asx1:1;
-		uint64_t asx0:1;
-		uint64_t reserved_21_21:1;
-		uint64_t pip:1;
-		uint64_t spx1:1;
-		uint64_t spx0:1;
-		uint64_t lmc:1;
-		uint64_t l2c:1;
-		uint64_t reserved_15_15:1;
-		uint64_t rad:1;
-		uint64_t usb:1;
-		uint64_t pow:1;
-		uint64_t tim:1;
-		uint64_t pko:1;
-		uint64_t ipd:1;
-		uint64_t reserved_8_8:1;
-		uint64_t zip:1;
-		uint64_t dfa:1;
-		uint64_t fpa:1;
-		uint64_t key:1;
-		uint64_t npi:1;
-		uint64_t gmx1:1;
-		uint64_t gmx0:1;
-		uint64_t mio:1;
-	} cn50xx;
-};
-
-union cvmx_pko_command_word0 {
-	uint64_t u64;
-	struct {
-		uint64_t total_bytes:16;
-		uint64_t segs:6;
-		uint64_t dontfree:1;
-		uint64_t ignore_i:1;
-		uint64_t ipoffp1:7;
-		uint64_t gather:1;
-		uint64_t rsp:1;
-		uint64_t wqp:1;
-		uint64_t n2:1;
-		uint64_t le:1;
-		uint64_t reg0:11;
-		uint64_t subone0:1;
-		uint64_t reg1:11;
-		uint64_t subone1:1;
-		uint64_t size0:2;
-		uint64_t size1:2;
-	} s;
-};
-
-union cvmx_ciu_timx {
-	uint64_t u64;
-	struct cvmx_ciu_timx_s {
-		uint64_t reserved_37_63:27;
-		uint64_t one_shot:1;
-		uint64_t len:36;
-	} s;
-};
-
-union cvmx_gmxx_rxx_rx_inbnd {
-	uint64_t u64;
-	struct cvmx_gmxx_rxx_rx_inbnd_s {
-		uint64_t status:1;
-		uint64_t speed:2;
-		uint64_t duplex:1;
-		uint64_t reserved_4_63:60;
-	} s;
-};
-
-static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg,
-					       int32_t value)
-{
-	return value;
-}
-
-static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg,
-					 int32_t value)
-{ }
-
-static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg,
-					   int32_t value)
-{ }
-
-static inline uint64_t cvmx_scratch_read64(uint64_t address)
-{
-	return 0;
-}
-
-static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
-{ }
-
-static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
-{
-	return 0;
-}
-
-static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
-{
-	return (void *)(uintptr_t)(physical_address);
-}
-
-static inline uint64_t cvmx_ptr_to_phys(void *ptr)
-{
-	return (unsigned long)ptr;
-}
-
-static inline int cvmx_helper_get_interface_num(int ipd_port)
-{
-	return ipd_port;
-}
-
-static inline int cvmx_helper_get_interface_index_num(int ipd_port)
-{
-	return ipd_port;
-}
-
-static inline void cvmx_fpa_enable(void)
-{ }
-
-static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
-{
-	return 0;
-}
-
-static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
-{ }
-
-static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
-{
-	return 0;
-}
-
-static inline void *cvmx_fpa_alloc(uint64_t pool)
-{
-	return NULL;
-}
-
-static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
-				 uint64_t num_cache_lines)
-{ }
-
-static inline int octeon_is_simulation(void)
-{
-	return 1;
-}
-
-static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
-					    cvmx_pip_port_status_t *status)
-{ }
-
-static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
-					    cvmx_pko_port_status_t *status)
-{ }
-
-static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
-								   interface)
-{
-	return 0;
-}
-
-static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
-{
-	union cvmx_helper_link_info ret = { .u64 = 0 };
-
-	return ret;
-}
-
-static inline int cvmx_helper_link_set(int ipd_port,
-				       union cvmx_helper_link_info link_info)
-{
-	return 0;
-}
-
-static inline int cvmx_helper_initialize_packet_io_global(void)
-{
-	return 0;
-}
-
-static inline int cvmx_helper_get_number_of_interfaces(void)
-{
-	return 2;
-}
-
-static inline int cvmx_helper_ports_on_interface(int interface)
-{
-	return 1;
-}
-
-static inline int cvmx_helper_get_ipd_port(int interface, int port)
-{
-	return 0;
-}
-
-static inline int cvmx_helper_ipd_and_packet_input_enable(void)
-{
-	return 0;
-}
-
-static inline void cvmx_ipd_disable(void)
-{ }
-
-static inline void cvmx_ipd_free_ptr(void)
-{ }
-
-static inline void cvmx_pko_disable(void)
-{ }
-
-static inline void cvmx_pko_shutdown(void)
-{ }
-
-static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
-{
-	return port;
-}
-
-static inline int cvmx_pko_get_base_queue(int port)
-{
-	return port;
-}
-
-static inline int cvmx_pko_get_num_queues(int port)
-{
-	return port;
-}
-
-static inline unsigned int cvmx_get_core_num(void)
-{
-	return 0;
-}
-
-static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
-						       cvmx_pow_wait_t wait)
-{ }
-
-static inline void cvmx_pow_work_request_async(int scr_addr,
-						       cvmx_pow_wait_t wait)
-{ }
-
-static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
-{
-	struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
-
-	return wqe;
-}
-
-static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
-{
-	return (void *)(unsigned long)wait;
-}
-
-static inline int cvmx_spi_restart_interface(int interface,
-					cvmx_spi_mode_t mode, int timeout)
-{
-	return 0;
-}
-
-static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
-						  enum cvmx_fau_reg_32 reg,
-						  int32_t value)
-{ }
-
-static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
-	int interface,
-	int port)
-{
-	union cvmx_gmxx_rxx_rx_inbnd r;
-
-	r.u64 = 0;
-	return r;
-}
-
-static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
-						cvmx_pko_lock_t use_locking)
-{ }
-
-static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
-		uint64_t queue, union cvmx_pko_command_word0 pko_command,
-		union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
-{
-	return 0;
-}
-
-static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
-{ }
-
-static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
-{ }
-
-static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
-{
-	return 0;
-}
-
-static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
-{ }
-
-static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
-					enum cvmx_pow_tag_type tag_type,
-					uint64_t qos, uint64_t grp)
-{ }
-
-#define CVMX_ASXX_RX_CLK_SETX(a, b)	((a)+(b))
-#define CVMX_ASXX_TX_CLK_SETX(a, b)	((a)+(b))
-#define CVMX_CIU_TIMX(a)		(a)
-#define CVMX_GMXX_RXX_ADR_CAM0(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM1(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM2(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM3(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM4(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM5(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_FRM_CTL(a, b)	((a)+(b))
-#define CVMX_GMXX_RXX_INT_REG(a, b)	((a)+(b))
-#define CVMX_GMXX_SMACX(a, b)		((a)+(b))
-#define CVMX_PIP_PRT_TAGX(a)		(a)
-#define CVMX_POW_PP_GRP_MSKX(a)		(a)
-#define CVMX_POW_WQ_INT_THRX(a)		(a)
-#define CVMX_SPXX_INT_MSK(a)		(a)
-#define CVMX_SPXX_INT_REG(a)		(a)
-#define CVMX_SSO_PPX_GRP_MSK(a)		(a)
-#define CVMX_SSO_WQ_INT_THRX(a)		(a)
-#define CVMX_STXX_INT_MSK(a)		(a)
-#define CVMX_STXX_INT_REG(a)		(a)
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 6ec7e3c..4bc5d5f 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -63,7 +63,6 @@
 #define UDELAY_COUNT 3
 #define UDELAY_DELAY 100
 
-
 #define TX_DESC_PER_IOCB 8
 
 #if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
@@ -1627,18 +1626,18 @@ enum {
 #define MPI_COREDUMP_COOKIE 0x5555aaaa
 struct mpi_coredump_global_header {
 	u32	cookie;
-	u8	idString[16];
-	u32	timeLo;
-	u32	timeHi;
-	u32	imageSize;
-	u32	headerSize;
+	u8	id_string[16];
+	u32	time_lo;
+	u32	time_hi;
+	u32	image_size;
+	u32	header_size;
 	u8	info[220];
 };
 
 struct mpi_coredump_segment_header {
 	u32	cookie;
-	u32	segNum;
-	u32	segSize;
+	u32	seg_num;
+	u32	seg_size;
 	u32	extra;
 	u8	description[16];
 };
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 83f34ca..8cf3961 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -142,10 +142,10 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
 	u32 *direct_ptr, temp;
 	u32 *indirect_ptr;
 
-
 	/* The XAUI needs to be read out per port */
 	status = ql_read_other_func_serdes_reg(qdev,
-			XG_SERDES_XAUI_HSS_PCS_START, &temp);
+					       XG_SERDES_XAUI_HSS_PCS_START,
+					       &temp);
 	if (status)
 		temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
 
@@ -297,7 +297,6 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
 				   xfi_direct_valid, xfi_indirect_valid);
 
-
 	/* Get XAUI_XFI_HSS_PLL register block. */
 	if (qdev->func & 1) {
 		direct_ptr =
@@ -482,7 +481,8 @@ static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
 	int status;
 
 	for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
-		status = ql_write_mpi_reg(qdev, RISC_124,
+		status = ql_write_mpi_reg(qdev,
+					  RISC_124,
 				(SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
 		if (status)
 			goto end;
@@ -702,8 +702,8 @@ static void ql_build_coredump_seg_header(
 {
 	memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
 	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
-	seg_hdr->segNum = seg_number;
-	seg_hdr->segSize = seg_size;
+	seg_hdr->seg_num = seg_number;
+	seg_hdr->seg_size = seg_size;
 	strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
 }
 
@@ -741,12 +741,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
 	memset(&(mpi_coredump->mpi_global_header), 0,
 	       sizeof(struct mpi_coredump_global_header));
 	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
-	mpi_coredump->mpi_global_header.headerSize =
+	mpi_coredump->mpi_global_header.header_size =
 		sizeof(struct mpi_coredump_global_header);
-	mpi_coredump->mpi_global_header.imageSize =
+	mpi_coredump->mpi_global_header.image_size =
 		sizeof(struct ql_mpi_coredump);
-	strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
-		sizeof(mpi_coredump->mpi_global_header.idString));
+	strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
+		sizeof(mpi_coredump->mpi_global_header.id_string));
 
 	/* Get generic NIC reg dump */
 	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
@@ -1108,7 +1108,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
 				+ sizeof(mpi_coredump->nic_routing_words),
 				"Routing Words");
 	status = ql_get_routing_entries(qdev,
-			&mpi_coredump->nic_routing_words[0]);
+					&mpi_coredump->nic_routing_words[0]);
 	if (status)
 		goto err;
 
@@ -1227,17 +1227,15 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
 {
 	int i, status;
 
-
 	memset(&(mpi_coredump->mpi_global_header), 0,
 	       sizeof(struct mpi_coredump_global_header));
 	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
-	mpi_coredump->mpi_global_header.headerSize =
+	mpi_coredump->mpi_global_header.header_size =
 		sizeof(struct mpi_coredump_global_header);
-	mpi_coredump->mpi_global_header.imageSize =
+	mpi_coredump->mpi_global_header.image_size =
 		sizeof(struct ql_reg_dump);
-	strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
-		sizeof(mpi_coredump->mpi_global_header.idString));
-
+	strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
+		sizeof(mpi_coredump->mpi_global_header.id_string));
 
 	/* segment 16 */
 	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index 56d116d..790997a 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -32,7 +32,6 @@
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 
-
 #include "qlge.h"
 
 struct ql_stats {
@@ -197,8 +196,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
 	 */
 	cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
 	if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
-		le16_to_cpu(cqicb->pkt_delay) !=
-				qdev->tx_max_coalesced_frames) {
+	    le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
 		for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
 			rx_ring = &qdev->rx_ring[i];
 			cqicb = (struct cqicb *)rx_ring;
@@ -207,7 +205,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
 			    cpu_to_le16(qdev->tx_max_coalesced_frames);
 			cqicb->flags = FLAGS_LI;
 			status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
-						CFG_LCQ, rx_ring->cq_id);
+					      CFG_LCQ, rx_ring->cq_id);
 			if (status) {
 				netif_err(qdev, ifup, qdev->ndev,
 					  "Failed to load CQICB.\n");
@@ -219,8 +217,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
 	/* Update the inbound (RSS) handler queues if they changed. */
 	cqicb = (struct cqicb *)&qdev->rx_ring[0];
 	if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
-		le16_to_cpu(cqicb->pkt_delay) !=
-					qdev->rx_max_coalesced_frames) {
+	    le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
 		for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
 			rx_ring = &qdev->rx_ring[i];
 			cqicb = (struct cqicb *)rx_ring;
@@ -229,7 +226,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
 			    cpu_to_le16(qdev->rx_max_coalesced_frames);
 			cqicb->flags = FLAGS_LI;
 			status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
-						CFG_LCQ, rx_ring->cq_id);
+					      CFG_LCQ, rx_ring->cq_id);
 			if (status) {
 				netif_err(qdev, ifup, qdev->ndev,
 					  "Failed to load CQICB.\n");
@@ -332,6 +329,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
 static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
 	int index;
+
 	switch (stringset) {
 	case ETH_SS_TEST:
 		memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
@@ -339,8 +337,8 @@ static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 	case ETH_SS_STATS:
 		for (index = 0; index < QLGE_STATS_LEN; index++) {
 			memcpy(buf + index * ETH_GSTRING_LEN,
-				ql_gstrings_stats[index].stat_string,
-				ETH_GSTRING_LEN);
+			       ql_gstrings_stats[index].stat_string,
+			       ETH_GSTRING_LEN);
 		}
 		break;
 	}
@@ -412,6 +410,7 @@ static void ql_get_drvinfo(struct net_device *ndev,
 			   struct ethtool_drvinfo *drvinfo)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
+
 	strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
 	strlcpy(drvinfo->version, qlge_driver_version,
 		sizeof(drvinfo->version));
@@ -431,7 +430,7 @@ static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 
 	/* WOL is only supported for mezz card. */
 	if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
-			ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+	    ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
 		wol->supported = WAKE_MAGIC;
 		wol->wolopts = qdev->wol;
 	}
@@ -444,9 +443,9 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 
 	/* WOL is only supported for mezz card. */
 	if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
-			ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+	    ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
 		netif_info(qdev, drv, qdev->ndev,
-				"WOL is only supported for mezz card\n");
+			   "WOL is only supported for mezz card\n");
 		return -EOPNOTSUPP;
 	}
 	if (wol->wolopts & ~WAKE_MAGIC)
@@ -506,7 +505,7 @@ static void ql_stop_loopback(struct ql_adapter *qdev)
 }
 
 static void ql_create_lb_frame(struct sk_buff *skb,
-					unsigned int frame_size)
+			       unsigned int frame_size)
 {
 	memset(skb->data, 0xFF, frame_size);
 	frame_size &= ~1;
@@ -516,13 +515,13 @@ static void ql_create_lb_frame(struct sk_buff *skb,
 }
 
 void ql_check_lb_frame(struct ql_adapter *qdev,
-					struct sk_buff *skb)
+		       struct sk_buff *skb)
 {
 	unsigned int frame_size = skb->len;
 
 	if ((*(skb->data + 3) == 0xFF) &&
-		(*(skb->data + frame_size / 2 + 10) == 0xBE) &&
-		(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+	    (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+	    (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
 			atomic_dec(&qdev->lb_count);
 			return;
 	}
@@ -566,7 +565,7 @@ static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
 }
 
 static void ql_self_test(struct net_device *ndev,
-				struct ethtool_test *eth_test, u64 *data)
+			 struct ethtool_test *eth_test, u64 *data)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 
@@ -672,7 +671,7 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
 }
 
 static void ql_get_pauseparam(struct net_device *netdev,
-			struct ethtool_pauseparam *pause)
+			      struct ethtool_pauseparam *pause)
 {
 	struct ql_adapter *qdev = netdev_priv(netdev);
 
@@ -684,7 +683,7 @@ static void ql_get_pauseparam(struct net_device *netdev,
 }
 
 static int ql_set_pauseparam(struct net_device *netdev,
-			struct ethtool_pauseparam *pause)
+			     struct ethtool_pauseparam *pause)
 {
 	struct ql_adapter *qdev = netdev_priv(netdev);
 	int status = 0;
@@ -703,12 +702,14 @@ static int ql_set_pauseparam(struct net_device *netdev,
 static u32 ql_get_msglevel(struct net_device *ndev)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
+
 	return qdev->msg_enable;
 }
 
 static void ql_set_msglevel(struct net_device *ndev, u32 value)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
+
 	qdev->msg_enable = value;
 }
 
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index ed5440d..ef8037d 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -77,14 +77,12 @@ MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
 static int qlge_mpi_coredump;
 module_param(qlge_mpi_coredump, int, 0);
 MODULE_PARM_DESC(qlge_mpi_coredump,
-		"Option to enable MPI firmware dump. "
-		"Default is OFF - Do Not allocate memory. ");
+		 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
 
 static int qlge_force_coredump;
 module_param(qlge_force_coredump, int, 0);
 MODULE_PARM_DESC(qlge_force_coredump,
-		"Option to allow force of firmware core dump. "
-		"Default is OFF - Do not allow.");
+		 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
 
 static const struct pci_device_id qlge_pci_tbl[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
@@ -178,8 +176,9 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
 				    "register 0x%.08x access error, value = 0x%.08x!.\n",
 				    reg, temp);
 			return -EIO;
-		} else if (temp & bit)
+		} else if (temp & bit) {
 			return 0;
+		}
 		udelay(UDELAY_DELAY);
 	}
 	netif_alert(qdev, probe, qdev->ndev,
@@ -206,7 +205,6 @@ static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
 	return -ETIMEDOUT;
 }
 
-
 /* Used to issue init control blocks to hw. Maps control block,
  * sets address, triggers download, waits for completion.
  */
@@ -270,36 +268,34 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
 		{
 			status =
 			    ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+					    MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 			status =
-			    ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+			    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 			if (status)
 				goto exit;
 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
 			status =
-			    ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 			status =
-			    ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+			    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 			if (status)
 				goto exit;
 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
 			if (type == MAC_ADDR_TYPE_CAM_MAC) {
 				status =
 				    ql_wait_reg_rdy(qdev,
-					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+						    MAC_ADDR_IDX, MAC_ADDR_MW,
+						    0);
 				if (status)
 					goto exit;
 				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -343,7 +339,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 
 			status =
 				ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+						MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
@@ -352,7 +348,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 			ql_write32(qdev, MAC_ADDR_DATA, lower);
 			status =
 				ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+						MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
@@ -362,7 +358,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 			ql_write32(qdev, MAC_ADDR_DATA, upper);
 			status =
 				ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+						MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			break;
@@ -375,8 +371,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
 			    (addr[5]);
 			status =
-			    ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -384,8 +379,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 				   type);	/* type */
 			ql_write32(qdev, MAC_ADDR_DATA, lower);
 			status =
-			    ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -393,16 +387,15 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 				   type);	/* type */
 			ql_write32(qdev, MAC_ADDR_DATA, upper);
 			status =
-			    ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
 				   type);	/* type */
 			/* This field should also include the queue id
-			   and possibly the function id.  Right now we hardcode
-			   the route field to NIC core.
+			 * and possibly the function id.  Right now we hardcode
+			 * the route field to NIC core.
 			 */
 			cam_output = (CAM_OUT_ROUTE_NIC |
 				      (qdev->
@@ -423,8 +416,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 			 * That's bit-27 we're talking about.
 			 */
 			status =
-			    ql_wait_reg_rdy(qdev,
-				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 			if (status)
 				goto exit;
 			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
@@ -467,7 +459,8 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
 	if (status)
 		return status;
 	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
-			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+				     MAC_ADDR_TYPE_CAM_MAC,
+				     qdev->func * MAX_CQ);
 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 	if (status)
 		netif_err(qdev, ifup, qdev->ndev,
@@ -672,17 +665,17 @@ static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
 	int status = 0;
 	/* wait for reg to come ready */
 	status = ql_wait_reg_rdy(qdev,
-			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+				 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 	if (status)
 		goto exit;
 	/* set up for reg read */
 	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
 	/* wait for reg to come ready */
 	status = ql_wait_reg_rdy(qdev,
-			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+				 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 	if (status)
 		goto exit;
-	 /* This data is stored on flash as an array of
+	/* This data is stored on flash as an array of
 	 * __le32.  Since ql_read32() returns cpu endian
 	 * we need to swap it back.
 	 */
@@ -721,8 +714,9 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
 	}
 
 	status = ql_validate_flash(qdev,
-			sizeof(struct flash_params_8000) / sizeof(u16),
-			"8000");
+				   sizeof(struct flash_params_8000) /
+				   sizeof(u16),
+				   "8000");
 	if (status) {
 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 		status = -EINVAL;
@@ -734,12 +728,12 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
 	 */
 	if (qdev->flash.flash_params_8000.data_type1 == 2)
 		memcpy(mac_addr,
-			qdev->flash.flash_params_8000.mac_addr1,
-			qdev->ndev->addr_len);
+		       qdev->flash.flash_params_8000.mac_addr1,
+		       qdev->ndev->addr_len);
 	else
 		memcpy(mac_addr,
-			qdev->flash.flash_params_8000.mac_addr,
-			qdev->ndev->addr_len);
+		       qdev->flash.flash_params_8000.mac_addr,
+		       qdev->ndev->addr_len);
 
 	if (!is_valid_ether_addr(mac_addr)) {
 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
@@ -748,8 +742,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
 	}
 
 	memcpy(qdev->ndev->dev_addr,
-		mac_addr,
-		qdev->ndev->addr_len);
+	       mac_addr,
+	       qdev->ndev->addr_len);
 
 exit:
 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
@@ -784,8 +778,9 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
 	}
 
 	status = ql_validate_flash(qdev,
-			sizeof(struct flash_params_8012) / sizeof(u16),
-			"8012");
+				   sizeof(struct flash_params_8012) /
+				   sizeof(u16),
+				   "8012");
 	if (status) {
 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 		status = -EINVAL;
@@ -798,8 +793,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
 	}
 
 	memcpy(qdev->ndev->dev_addr,
-		qdev->flash.flash_params_8012.mac_addr,
-		qdev->ndev->addr_len);
+	       qdev->flash.flash_params_8012.mac_addr,
+	       qdev->ndev->addr_len);
 
 exit:
 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
@@ -815,7 +810,7 @@ static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
 	int status;
 	/* wait for reg to come ready */
 	status = ql_wait_reg_rdy(qdev,
-			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+				 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 	if (status)
 		return status;
 	/* write the data to the data reg */
@@ -834,14 +829,14 @@ int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 	int status = 0;
 	/* wait for reg to come ready */
 	status = ql_wait_reg_rdy(qdev,
-			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+				 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 	if (status)
 		goto exit;
 	/* set up for reg read */
 	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
 	/* wait for reg to come ready */
 	status = ql_wait_reg_rdy(qdev,
-			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+				 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 	if (status)
 		goto exit;
 	/* get the data */
@@ -1436,10 +1431,9 @@ static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
 
 /* Process an inbound completion from an rx ring. */
 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
-					struct rx_ring *rx_ring,
-					struct ib_mac_iocb_rsp *ib_mac_rsp,
-					u32 length,
-					u16 vlan_id)
+				       struct rx_ring *rx_ring,
+				       struct ib_mac_iocb_rsp *ib_mac_rsp,
+				       u32 length, u16 vlan_id)
 {
 	struct sk_buff *skb;
 	struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
@@ -1483,10 +1477,9 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
 
 /* Process an inbound completion from an rx ring. */
 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
-					struct rx_ring *rx_ring,
-					struct ib_mac_iocb_rsp *ib_mac_rsp,
-					u32 length,
-					u16 vlan_id)
+				   struct rx_ring *rx_ring,
+				   struct ib_mac_iocb_rsp *ib_mac_rsp,
+				   u32 length, u16 vlan_id)
 {
 	struct net_device *ndev = qdev->ndev;
 	struct sk_buff *skb = NULL;
@@ -1528,8 +1521,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
 		     length);
 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
-				lbq_desc->p.pg_chunk.offset + hlen,
-				length - hlen);
+			   lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
 	skb->len += length - hlen;
 	skb->data_len += length - hlen;
 	skb->truesize += length - hlen;
@@ -1540,7 +1532,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
 	skb_checksum_none_assert(skb);
 
 	if ((ndev->features & NETIF_F_RXCSUM) &&
-		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
 		/* TCP frame. */
 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1576,10 +1568,9 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
 
 /* Process an inbound completion from an rx ring. */
 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
-					struct rx_ring *rx_ring,
-					struct ib_mac_iocb_rsp *ib_mac_rsp,
-					u32 length,
-					u16 vlan_id)
+				  struct rx_ring *rx_ring,
+				  struct ib_mac_iocb_rsp *ib_mac_rsp,
+				  u32 length, u16 vlan_id)
 {
 	struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
 	struct net_device *ndev = qdev->ndev;
@@ -1648,7 +1639,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
 	 * csum or frame errors.
 	 */
 	if ((ndev->features & NETIF_F_RXCSUM) &&
-		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
 		/* TCP frame. */
 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1779,8 +1770,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
 				     lbq_desc->p.pg_chunk.offset, length);
 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
-						lbq_desc->p.pg_chunk.offset,
-						length);
+					   lbq_desc->p.pg_chunk.offset, length);
 			skb->len += length;
 			skb->data_len += length;
 			skb->truesize += length;
@@ -1804,10 +1794,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
 				     length);
-			skb_fill_page_desc(skb, 0,
-						lbq_desc->p.pg_chunk.page,
-						lbq_desc->p.pg_chunk.offset,
-						length);
+			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+					   lbq_desc->p.pg_chunk.offset,
+					   length);
 			skb->len += length;
 			skb->data_len += length;
 			skb->truesize += length;
@@ -1857,9 +1846,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 				     "Adding page %d to skb for %d bytes.\n",
 				     i, size);
 			skb_fill_page_desc(skb, i,
-						lbq_desc->p.pg_chunk.page,
-						lbq_desc->p.pg_chunk.offset,
-						size);
+					   lbq_desc->p.pg_chunk.page,
+					   lbq_desc->p.pg_chunk.offset, size);
 			skb->len += size;
 			skb->data_len += size;
 			skb->truesize += size;
@@ -1875,9 +1863,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 
 /* Process an inbound completion from an rx ring. */
 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
-				   struct rx_ring *rx_ring,
-				   struct ib_mac_iocb_rsp *ib_mac_rsp,
-				   u16 vlan_id)
+					 struct rx_ring *rx_ring,
+					 struct ib_mac_iocb_rsp *ib_mac_rsp,
+					 u16 vlan_id)
 {
 	struct net_device *ndev = qdev->ndev;
 	struct sk_buff *skb = NULL;
@@ -1938,7 +1926,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
 	 * csum or frame errors.
 	 */
 	if ((ndev->features & NETIF_F_RXCSUM) &&
-		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
 		/* TCP frame. */
 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1970,8 +1958,8 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
 
 /* Process an inbound completion from an rx ring. */
 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
-					struct rx_ring *rx_ring,
-					struct ib_mac_iocb_rsp *ib_mac_rsp)
+					    struct rx_ring *rx_ring,
+					    struct ib_mac_iocb_rsp *ib_mac_rsp)
 {
 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
 	u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
@@ -1986,34 +1974,34 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
 		 * separate buffers.
 		 */
 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
-						vlan_id);
+					     vlan_id);
 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
 		/* The data fit in a single small buffer.
 		 * Allocate a new skb, copy the data and
 		 * return the buffer to the free pool.
 		 */
-		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
-						length, vlan_id);
+		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
+				      vlan_id);
 	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
 		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
 		/* TCP packet in a page chunk that's been checksummed.
 		 * Tack it on to our GRO skb and let it go.
 		 */
-		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
-						length, vlan_id);
+		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
+					   vlan_id);
 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
 		/* Non-TCP packet in a page chunk. Allocate an
 		 * skb, tack it on frags, and send it up.
 		 */
-		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
-						length, vlan_id);
+		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
+				       vlan_id);
 	} else {
 		/* Non-TCP/UDP large frames that span multiple buffers
 		 * can be processed corrrectly by the split frame logic.
 		 */
 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
-						vlan_id);
+					     vlan_id);
 	}
 
 	return (unsigned long)length;
@@ -2222,15 +2210,16 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
 		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
 
 	/* Service the TX rings first.  They start
-	 * right after the RSS rings. */
+	 * right after the RSS rings.
+	 */
 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
 		trx_ring = &qdev->rx_ring[i];
 		/* If this TX completion ring belongs to this vector and
 		 * it's not empty then service it.
 		 */
 		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
-			(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
-					trx_ring->cnsmr_idx)) {
+		    (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
+		     trx_ring->cnsmr_idx)) {
 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
 				     "%s: Servicing TX completion ring %d.\n",
 				     __func__, trx_ring->cq_id);
@@ -2304,7 +2293,7 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
 }
 
 static int qlge_set_features(struct net_device *ndev,
-	netdev_features_t features)
+			     netdev_features_t features)
 {
 	netdev_features_t changed = ndev->features ^ features;
 	int err;
@@ -2447,7 +2436,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
 	 * Check MPI processor activity.
 	 */
 	if ((var & STS_PI) &&
-		(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
+	    (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
 		/*
 		 * We've got an async event or mailbox completion.
 		 * Handle it and clear the source of the interrupt.
@@ -2456,7 +2445,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
 			  "Got MPI processor interrupt.\n");
 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
 		queue_delayed_work_on(smp_processor_id(),
-				qdev->workqueue, &qdev->mpi_work, 0);
+				      qdev->workqueue, &qdev->mpi_work, 0);
 		work_done++;
 	}
 
@@ -2639,7 +2628,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
 	return NETDEV_TX_OK;
 }
 
-
 static void ql_free_shadow_space(struct ql_adapter *qdev)
 {
 	if (qdev->rx_ring_shadow_reg_area) {
@@ -2887,7 +2875,8 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
 }
 
 /* Allocate queues and buffers for this completions queue based
- * on the values in the parameter structure. */
+ * on the values in the parameter structure.
+ */
 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
 				 struct rx_ring *rx_ring)
 {
@@ -3530,19 +3519,17 @@ static int ql_route_initialize(struct ql_adapter *qdev)
 		return status;
 
 	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
-						RT_IDX_IP_CSUM_ERR, 1);
+				    RT_IDX_IP_CSUM_ERR, 1);
 	if (status) {
 		netif_err(qdev, ifup, qdev->ndev,
-			"Failed to init routing register "
-			"for IP CSUM error packets.\n");
+			  "Failed to init routing register for IP CSUM error packets.\n");
 		goto exit;
 	}
 	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
-						RT_IDX_TU_CSUM_ERR, 1);
+				    RT_IDX_TU_CSUM_ERR, 1);
 	if (status) {
 		netif_err(qdev, ifup, qdev->ndev,
-			"Failed to init routing register "
-			"for TCP/UDP CSUM error packets.\n");
+			  "Failed to init routing register for TCP/UDP CSUM error packets.\n");
 		goto exit;
 	}
 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
@@ -3556,7 +3543,7 @@ static int ql_route_initialize(struct ql_adapter *qdev)
 	 */
 	if (qdev->rss_ring_count > 1) {
 		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
-					RT_IDX_RSS_MATCH, 1);
+					    RT_IDX_RSS_MATCH, 1);
 		if (status) {
 			netif_err(qdev, ifup, qdev->ndev,
 				  "Failed to init routing register for MATCH RSS packets.\n");
@@ -3654,7 +3641,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
 
 	/* Default WOL is enable on Mezz cards */
 	if (qdev->pdev->subsystem_device == 0x0068 ||
-			qdev->pdev->subsystem_device == 0x0180)
+	    qdev->pdev->subsystem_device == 0x0180)
 		qdev->wol = WAKE_MAGIC;
 
 	/* Start up the rx queues. */
@@ -3731,8 +3718,9 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
 
 		/* Wait for the NIC and MGMNT FIFOs to empty. */
 		ql_wait_fifo_empty(qdev);
-	} else
+	} else {
 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
+	}
 
 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
 
@@ -3880,7 +3868,7 @@ static int ql_adapter_up(struct ql_adapter *qdev)
 	 * link is up the turn on the carrier.
 	 */
 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
-			(ql_read32(qdev, STS) & qdev->port_link_up))
+	    (ql_read32(qdev, STS) & qdev->port_link_up))
 		ql_link_on(qdev);
 	/* Restore rx mode. */
 	clear_bit(QL_ALLMULTI, &qdev->flags);
@@ -4099,21 +4087,20 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
 	struct ql_adapter *qdev = netdev_priv(ndev);
 	int status;
 
-	if (ndev->mtu == 1500 && new_mtu == 9000) {
+	if (ndev->mtu == 1500 && new_mtu == 9000)
 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
-	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
+	else if (ndev->mtu == 9000 && new_mtu == 1500)
 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
-	} else
+	else
 		return -EINVAL;
 
 	queue_delayed_work(qdev->workqueue,
-			&qdev->mpi_port_cfg_work, 3*HZ);
+			   &qdev->mpi_port_cfg_work, 3 * HZ);
 
 	ndev->mtu = new_mtu;
 
-	if (!netif_running(qdev->ndev)) {
+	if (!netif_running(qdev->ndev))
 		return 0;
-	}
 
 	status = ql_change_rx_buffers(qdev);
 	if (status) {
@@ -4267,7 +4254,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
 	if (status)
 		return status;
 	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
-			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+				     MAC_ADDR_TYPE_CAM_MAC,
+				     qdev->func * MAX_CQ);
 	if (status)
 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -4334,7 +4322,7 @@ static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
 	u32 nic_func1, nic_func2;
 
 	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
-			&temp);
+				 &temp);
 	if (status)
 		return status;
 
@@ -4578,11 +4566,12 @@ static int qlge_probe(struct pci_dev *pdev,
 {
 	struct net_device *ndev = NULL;
 	struct ql_adapter *qdev = NULL;
-	static int cards_found = 0;
+	static int cards_found;
 	int err = 0;
 
 	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
-			min(MAX_CPUS, netif_get_num_default_rss_queues()));
+				 min(MAX_CPUS,
+				     netif_get_num_default_rss_queues()));
 	if (!ndev)
 		return -ENOMEM;
 
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 9e422bb..bb03b2f 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -134,7 +134,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
 	for (i = 0; i < mbcp->out_count; i++) {
 		status =
 		    ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
-				     &mbcp->mbox_out[i]);
+				    &mbcp->mbox_out[i]);
 		if (status) {
 			netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
 			break;
@@ -184,7 +184,7 @@ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
 	 */
 	for (i = 0; i < mbcp->in_count; i++) {
 		status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
-						mbcp->mbox_in[i]);
+					  mbcp->mbox_in[i]);
 		if (status)
 			goto end;
 	}
@@ -293,7 +293,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
 		 */
 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
 		queue_delayed_work(qdev->workqueue,
-				&qdev->mpi_port_cfg_work, 0);
+				   &qdev->mpi_port_cfg_work, 0);
 	}
 
 	ql_link_on(qdev);
@@ -544,7 +544,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
 	if (status)
 		goto end;
 
-
 	/* If we're generating a system error, then there's nothing
 	 * to wait for.
 	 */
@@ -730,7 +729,6 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
 	mbcp->mbox_in[1] = qdev->link_config;
 	mbcp->mbox_in[2] = qdev->max_frame_size;
 
-
 	status = ql_mailbox_command(qdev, mbcp);
 	if (status)
 		return status;
@@ -747,7 +745,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
 }
 
 static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
-	u32 size)
+			  u32 size)
 {
 	int status = 0;
 	struct mbox_params mbc;
@@ -768,7 +766,6 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
 	mbcp->mbox_in[7] = LSW(MSD(req_dma));
 	mbcp->mbox_in[8] = MSW(addr);
 
-
 	status = ql_mailbox_command(qdev, mbcp);
 	if (status)
 		return status;
@@ -782,14 +779,14 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
 
 /* Issue a mailbox command to dump RISC RAM. */
 int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
-		u32 ram_addr, int word_count)
+			  u32 ram_addr, int word_count)
 {
 	int status;
 	char *my_buf;
 	dma_addr_t buf_dma;
 
 	my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
-					&buf_dma);
+				      &buf_dma);
 	if (!my_buf)
 		return -EIO;
 
@@ -798,7 +795,7 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
 		memcpy(buf, my_buf, word_count * sizeof(u32));
 
 	pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
-				buf_dma);
+			    buf_dma);
 	return status;
 }
 
@@ -850,7 +847,6 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
 	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
 	mbcp->mbox_in[1] = wol;
 
-
 	status = ql_mailbox_command(qdev, mbcp);
 	if (status)
 		return status;
@@ -922,7 +918,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
 		 */
 		wait_time =
 			wait_for_completion_timeout(&qdev->ide_completion,
-							wait_time);
+						    wait_time);
 		if (!wait_time) {
 			netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
 			break;
@@ -965,7 +961,6 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
 	mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
 	mbcp->mbox_in[1] = led_config;
 
-
 	status = ql_mailbox_command(qdev, mbcp);
 	if (status)
 		return status;
@@ -1130,8 +1125,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
 	}
 
 	if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
-			qdev->max_frame_size ==
-			CFG_DEFAULT_MAX_FRAME_SIZE)
+	    qdev->max_frame_size == CFG_DEFAULT_MAX_FRAME_SIZE)
 		goto end;
 
 	qdev->link_config |=	CFG_JUMBO_FRAME_SIZE;
@@ -1278,7 +1272,7 @@ void ql_mpi_reset_work(struct work_struct *work)
 		netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
 		qdev->core_is_dumped = 1;
 		queue_delayed_work(qdev->workqueue,
-			&qdev->mpi_core_to_log, 5 * HZ);
+				   &qdev->mpi_core_to_log, 5 * HZ);
 	}
 	ql_soft_reset_mpi_risc(qdev);
 }
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 88e42cc..93283c7 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -429,7 +429,7 @@ static void update_bmc_sta(struct adapter *padapter)
 
 		/* prepare for add_RATid */
 		supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
-		network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates, supportRateNum, 1);
+		network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates);
 
 		memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
 		psta->bssratelen = supportRateNum;
@@ -802,7 +802,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
 		supportRateNum += ie_len;
 	}
 
-	network_type = rtw_check_network_type(supportRate, supportRateNum, channel);
+	network_type = rtw_check_network_type(supportRate);
 
 	rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index d9b0f9e..c525682 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -402,7 +402,6 @@ static u16 Efuse_GetCurrentSize(struct adapter *pAdapter)
 int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
 {
 	u8 ReadState = PG_STATE_HEADER;
-	int	bContinual = true;
 	int	bDataEmpty = true;
 	u8 efuse_data, word_cnts = 0;
 	u16	efuse_addr = 0;
@@ -422,7 +421,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
 	/*  <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
 	/*  Skip dummy parts to prevent unexpected data read from Efuse. */
 	/*  By pass right now. 2009.02.19. */
-	while (bContinual && AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+	while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
 		/*   Header Read ------------- */
 		if (ReadState & PG_STATE_HEADER) {
 			if (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data) && (efuse_data != 0xFF)) {
@@ -464,7 +463,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
 					ReadState = PG_STATE_HEADER;
 				}
 			} else {
-				bContinual = false;
+				break;
 			}
 		} else if (ReadState & PG_STATE_DATA) {
 			/*   Data section Read ------------- */
@@ -498,8 +497,8 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
 
 			if (!PgWriteSuccess)
 				return false;
-			else
-				efuse_addr = Efuse_GetCurrentSize(pAdapter);
+
+			efuse_addr = Efuse_GetCurrentSize(pAdapter);
 		} else {
 			efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
 		}
@@ -714,10 +713,9 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
 				if (ALL_WORDS_DISABLED(efuse_data)) {
 					ret = false;
 					break;
-				} else {
-					curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
-					curPkt.word_en = efuse_data & 0x0F;
 				}
+				curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+				curPkt.word_en = efuse_data & 0x0F;
 			} else {
 				cur_header  =  efuse_data;
 				curPkt.offset = (cur_header >> 4) & 0x0F;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index cc1b543..29f6154 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -98,7 +98,7 @@ bool rtw_is_cckratesonly_included(u8 *rate)
 	return true;
 }
 
-int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
+int rtw_check_network_type(unsigned char *rate)
 {
 	/*  could be pure B, pure G, or B/G */
 	if (rtw_is_cckratesonly_included(rate))
@@ -157,11 +157,10 @@ u8 *rtw_get_ie(u8 *pbuf, int index, uint *len, int limit)
 		if (*p == index) {
 			*len = *(p + 1);
 			return p;
-		} else {
-			tmp = *(p + 1);
-			p += (tmp + 2);
-			i += (tmp + 2);
 		}
+		tmp = *(p + 1);
+		p += (tmp + 2);
+		i += (tmp + 2);
 		if (i >= limit)
 			break;
 	}
@@ -295,10 +294,9 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, uint *wpa_ie_len, int limit)
 				goto check_next_ie;
 			*wpa_ie_len = *(pbuf + 1);
 			return pbuf;
-		} else {
-			*wpa_ie_len = 0;
-			return NULL;
 		}
+		*wpa_ie_len = 0;
+		return NULL;
 
 check_next_ie:
 		limit_new = limit - (pbuf - pie) - 2 - len;
@@ -596,9 +594,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
 			cnt += in_ie[cnt + 1] + 2;
 
 			break;
-		} else {
-			cnt += in_ie[cnt + 1] + 2; /* goto next */
 		}
+		cnt += in_ie[cnt + 1] + 2; /* goto next */
 	}
 	return wpsie_ptr;
 }
@@ -642,9 +639,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
 			if (len_attr)
 				*len_attr = attr_len;
 			break;
-		} else {
-			attr_ptr += attr_len; /* goto next */
 		}
+		attr_ptr += attr_len; /* goto next */
 	}
 	return target_attr_ptr;
 }
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index e984b46..36841d2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -291,7 +291,7 @@ static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
 			remain_len = ies_len - (next_ie - ies);
 
 			ssid_ie[1] = 0;
-			memcpy(ssid_ie+2, next_ie, remain_len);
+			memcpy(ssid_ie + 2, next_ie, remain_len);
 			len_diff -= ssid_len_ori;
 
 			break;
@@ -363,14 +363,14 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
 
 		memcpy(pframe, cur_network->ies, cur_network->ie_length);
 		len_diff = update_hidden_ssid(
-			pframe+_BEACON_IE_OFFSET_
-			, cur_network->ie_length-_BEACON_IE_OFFSET_
+			pframe + _BEACON_IE_OFFSET_
+			, cur_network->ie_length - _BEACON_IE_OFFSET_
 			, pmlmeinfo->hidden_ssid_mode
 			);
-		pframe += (cur_network->ie_length+len_diff);
-		pattrib->pktlen += (cur_network->ie_length+len_diff);
-		wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
-			pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+		pframe += (cur_network->ie_length + len_diff);
+		pattrib->pktlen += (cur_network->ie_length + len_diff);
+		wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
+			pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
 		if (wps_ie && wps_ielen > 0)
 			rtw_get_wps_attr_content(wps_ie,  wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
 		if (sr != 0)
@@ -504,7 +504,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
 
 #if defined(CONFIG_88EU_AP_MODE)
 	if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
-		pwps_ie = rtw_get_wps_ie(cur_network->ies+_FIXED_IE_LENGTH_, cur_network->ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+		pwps_ie = rtw_get_wps_ie(cur_network->ies + _FIXED_IE_LENGTH_, cur_network->ie_length - _FIXED_IE_LENGTH_, NULL, &wps_ielen);
 
 		/* inerset & update wps_probe_resp_ie */
 		if (pmlmepriv->wps_probe_resp_ie && pwps_ie && wps_ielen > 0) {
@@ -522,13 +522,13 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
 			pattrib->pktlen += wps_offset;
 
 			wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
-			if ((wps_offset+wps_ielen+2) <= MAX_IE_SZ) {
-				memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen+2);
-				pframe += wps_ielen+2;
-				pattrib->pktlen += wps_ielen+2;
+			if ((wps_offset + wps_ielen + 2) <= MAX_IE_SZ) {
+				memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen + 2);
+				pframe += wps_ielen + 2;
+				pattrib->pktlen += wps_ielen + 2;
 			}
 
-			if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+			if ((wps_offset + wps_ielen + 2 + remainder_ielen) <= MAX_IE_SZ) {
 				memcpy(pframe, premainder_ie, remainder_ielen);
 				pframe += remainder_ielen;
 				pattrib->pktlen += remainder_ielen;
@@ -751,7 +751,7 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
 	struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
 
 	pmgntframe = alloc_mgtxmitframe(pxmitpriv);
-	if (pmgntframe == NULL)
+	if (!pmgntframe)
 		return;
 
 	/* update attribute */
@@ -943,7 +943,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
 		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, pstat->bssratelen, pstat->bssrateset, &pattrib->pktlen);
 	} else {
 		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pstat->bssrateset, &pattrib->pktlen);
-		pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, pstat->bssratelen-8, pstat->bssrateset+8, &pattrib->pktlen);
+		pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, pstat->bssratelen - 8, pstat->bssrateset + 8, &pattrib->pktlen);
 	}
 
 	if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
@@ -952,17 +952,17 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
 		/* FILL HT CAP INFO IE */
 		pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
 		if (pbuf && ie_len > 0) {
-			memcpy(pframe, pbuf, ie_len+2);
-			pframe += (ie_len+2);
-			pattrib->pktlen += (ie_len+2);
+			memcpy(pframe, pbuf, ie_len + 2);
+			pframe += (ie_len + 2);
+			pattrib->pktlen += (ie_len + 2);
 		}
 
 		/* FILL HT ADD INFO IE */
 		pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
 		if (pbuf && ie_len > 0) {
-			memcpy(pframe, pbuf, ie_len+2);
-			pframe += (ie_len+2);
-			pattrib->pktlen += (ie_len+2);
+			memcpy(pframe, pbuf, ie_len + 2);
+			pframe += (ie_len + 2);
+			pattrib->pktlen += (ie_len + 2);
 		}
 	}
 
@@ -973,14 +973,14 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
 
 		for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
 			pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
-			if (pbuf && !memcmp(pbuf+2, WMM_PARA_IE, 6)) {
-				memcpy(pframe, pbuf, ie_len+2);
-				pframe += (ie_len+2);
-				pattrib->pktlen += (ie_len+2);
+			if (pbuf && !memcmp(pbuf + 2, WMM_PARA_IE, 6)) {
+				memcpy(pframe, pbuf, ie_len + 2);
+				pframe += (ie_len + 2);
+				pattrib->pktlen += (ie_len + 2);
 				break;
 			}
 
-			if ((pbuf == NULL) || (ie_len == 0))
+			if (!pbuf || ie_len == 0)
 				break;
 		}
 	}
@@ -1082,8 +1082,8 @@ static void issue_assocreq(struct adapter *padapter)
 		/*  Check if the AP's supported rates are also supported by STA. */
 		for (j = 0; j < sta_bssrate_len; j++) {
 			/*  Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
-			if ((pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK)
-					== (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK))
+			if ((pmlmeinfo->network.SupportedRates[i] | IEEE80211_BASIC_RATE_MASK)
+					== (sta_bssrate[j] | IEEE80211_BASIC_RATE_MASK))
 				break;
 		}
 
@@ -1120,7 +1120,7 @@ static void issue_assocreq(struct adapter *padapter)
 	/* HT caps */
 	if (padapter->mlmepriv.htpriv.ht_option) {
 		p = rtw_get_ie((pmlmeinfo->network.ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.ie_length - sizeof(struct ndis_802_11_fixed_ie)));
-		if ((p != NULL) && (!(is_ap_in_tkip(padapter)))) {
+		if (p && !is_ap_in_tkip(padapter)) {
 			memcpy(&pmlmeinfo->HT_caps, p + 2, sizeof(struct ieee80211_ht_cap));
 
 			/* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
@@ -1263,7 +1263,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da,
 	struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
 
 	/* da == NULL, assume it's null data for sta to ap*/
-	if (da == NULL)
+	if (!da)
 		da = pnetwork->MacAddress;
 
 	do {
@@ -1392,7 +1392,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
 	struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
 
 	/* da == NULL, assume it's null data for sta to ap*/
-	if (da == NULL)
+	if (!da)
 		da = pnetwork->MacAddress;
 
 	do {
@@ -1444,7 +1444,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
 	__le16 le_tmp;
 
 	pmgntframe = alloc_mgtxmitframe(pxmitpriv);
-	if (pmgntframe == NULL)
+	if (!pmgntframe)
 		goto exit;
 
 	/* update attribute */
@@ -1780,7 +1780,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
 			pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
 
 			p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
-			if ((p == NULL) || (len == 0)) { /* non-HT */
+			if (!p || len == 0) { /* non-HT */
 				if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
 					continue;
 
@@ -1833,7 +1833,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
 			return _SUCCESS;
 
 	psta = rtw_get_stainfo(pstapriv, addr);
-	if (psta == NULL)
+	if (!psta)
 		return _SUCCESS;
 
 	if (initiator == 0) { /*  recipient */
@@ -1865,6 +1865,7 @@ unsigned int send_beacon(struct adapter *padapter)
 	int issue = 0;
 	int poll = 0;
 	unsigned long start = jiffies;
+	u32 passing_time;
 
 	rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
 	do {
@@ -1874,7 +1875,7 @@ unsigned int send_beacon(struct adapter *padapter)
 			yield();
 			rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
 			poll++;
-		} while ((poll%10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+		} while ((poll % 10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
 	} while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
 
 	if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
@@ -1883,15 +1884,14 @@ unsigned int send_beacon(struct adapter *padapter)
 		DBG_88E("%s fail! %u ms\n", __func__,
 			jiffies_to_msecs(jiffies - start));
 		return _FAIL;
-	} else {
-		u32 passing_time = jiffies_to_msecs(jiffies - start);
-
-		if (passing_time > 100 || issue > 3)
-			DBG_88E("%s success, issue:%d, poll:%d, %u ms\n",
-				__func__, issue, poll,
-				jiffies_to_msecs(jiffies - start));
-		return _SUCCESS;
 	}
+	passing_time = jiffies_to_msecs(jiffies - start);
+
+	if (passing_time > 100 || issue > 3)
+		DBG_88E("%s success, issue:%d, poll:%d, %u ms\n",
+			__func__, issue, poll,
+			jiffies_to_msecs(jiffies - start));
+	return _SUCCESS;
 }
 
 /****************************************************************************
@@ -2082,7 +2082,7 @@ static u8 collect_bss_info(struct adapter *padapter,
 	/* checking rate info... */
 	i = 0;
 	p = rtw_get_ie(bssid->ies + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
-	if (p != NULL) {
+	if (p) {
 		if (len > NDIS_802_11_LENGTH_RATES_EX) {
 			DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
 			return _FAIL;
@@ -2093,7 +2093,7 @@ static u8 collect_bss_info(struct adapter *padapter,
 
 	p = rtw_get_ie(bssid->ies + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
 	if (p) {
-		if (len > (NDIS_802_11_LENGTH_RATES_EX-i)) {
+		if (len > (NDIS_802_11_LENGTH_RATES_EX - i)) {
 			DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
 			return _FAIL;
 		}
@@ -2518,7 +2518,7 @@ static unsigned int OnProbeReq(struct adapter *padapter,
 		return _SUCCESS;
 
 	if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
-	    !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE))
+	    !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE))
 		return _SUCCESS;
 
 	p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, &ielen,
@@ -2526,7 +2526,7 @@ static unsigned int OnProbeReq(struct adapter *padapter,
 
 	/* check (wildcard) SSID */
 	if (p) {
-		if ((ielen != 0 && memcmp((void *)(p+2), (void *)cur->ssid.ssid, cur->ssid.ssid_length)) ||
+		if ((ielen != 0 && memcmp((void *)(p + 2), (void *)cur->ssid.ssid, cur->ssid.ssid_length)) ||
 		    (ielen == 0 && pmlmeinfo->hidden_ssid_mode))
 			return _SUCCESS;
 
@@ -2583,7 +2583,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
 			}
 
 			/* check the vendor of the assoc AP */
-			pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct ieee80211_hdr_3addr), len-sizeof(struct ieee80211_hdr_3addr));
+			pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr));
 
 			/* update TSF Value */
 			update_TSF(pmlmeext, pframe, len);
@@ -2596,7 +2596,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
 
 		if (((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
 			psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
-			if (psta != NULL) {
+			if (psta) {
 				ret = rtw_check_bcn_info(padapter, pframe, len);
 				if (!ret) {
 						DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
@@ -2610,7 +2610,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
 			}
 		} else if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
 			psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
-			if (psta != NULL) {
+			if (psta) {
 				/* update WMM, ERP in the beacon */
 				/* todo: the timer is used instead of the number of the beacon received */
 				if ((sta_rx_pkts(psta) & 0xf) == 0)
@@ -2729,7 +2729,7 @@ static unsigned int OnAuth(struct adapter *padapter,
 
 	if ((pstat->auth_seq + 1) != seq) {
 		DBG_88E("(1)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
-			seq, pstat->auth_seq+1);
+			seq, pstat->auth_seq + 1);
 		status = _STATS_OUT_OF_AUTH_SEQ_;
 		goto auth_fail;
 	}
@@ -2742,7 +2742,7 @@ static unsigned int OnAuth(struct adapter *padapter,
 			pstat->authalg = algorithm;
 		} else {
 			DBG_88E("(2)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
-				seq, pstat->auth_seq+1);
+				seq, pstat->auth_seq + 1);
 			status = _STATS_OUT_OF_AUTH_SEQ_;
 			goto auth_fail;
 		}
@@ -2761,7 +2761,7 @@ static unsigned int OnAuth(struct adapter *padapter,
 			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &ie_len,
 					len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
 
-			if ((p == NULL) || (ie_len <= 0)) {
+			if (!p || ie_len <= 0) {
 				DBG_88E("auth rejected because challenge failure!(1)\n");
 				status = _STATS_CHALLENGE_FAIL_;
 				goto auth_fail;
@@ -2779,7 +2779,7 @@ static unsigned int OnAuth(struct adapter *padapter,
 			}
 		} else {
 			DBG_88E("(3)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
-				seq, pstat->auth_seq+1);
+				seq, pstat->auth_seq + 1);
 			status = _STATS_OUT_OF_AUTH_SEQ_;
 			goto auth_fail;
 		}
@@ -2855,7 +2855,7 @@ static unsigned int OnAuthClient(struct adapter *padapter,
 			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &len,
 				pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
 
-			if (p == NULL)
+			if (!p)
 				goto authclnt_fail;
 
 			memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
@@ -2864,10 +2864,9 @@ static unsigned int OnAuthClient(struct adapter *padapter,
 			set_link_timer(pmlmeext, REAUTH_TO);
 
 			return _SUCCESS;
-		} else {
-			/*  open system */
-			go2asoc = 1;
 		}
+		/*  open system */
+		go2asoc = 1;
 	} else if (seq == 4) {
 		if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
 			go2asoc = 1;
@@ -2975,7 +2974,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 		goto OnAssocReqFail;
 	} else {
 		/*  check if ssid match */
-		if (memcmp((void *)(p+2), cur->ssid.ssid, cur->ssid.ssid_length))
+		if (memcmp((void *)(p + 2), cur->ssid.ssid, cur->ssid.ssid_length))
 			status = _STATS_FAILURE_;
 
 		if (ie_len != cur->ssid.ssid_length)
@@ -2987,7 +2986,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 
 	/*  check if the supported rate is ok */
 	p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
-	if (p == NULL) {
+	if (!p) {
 		DBG_88E("Rx a sta assoc-req which supported rate is empty!\n");
 		/*  use our own rate set as statoin used */
 		/* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
@@ -2996,14 +2995,15 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 		status = _STATS_FAILURE_;
 		goto OnAssocReqFail;
 	} else {
-		memcpy(supportRate, p+2, ie_len);
+		memcpy(supportRate, p + 2, ie_len);
 		supportRateNum = ie_len;
 
 		p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
 				pkt_len - WLAN_HDR_A3_LEN - ie_offset);
-		if (p !=  NULL) {
+		if (p) {
 			if (supportRateNum <= sizeof(supportRate)) {
-				memcpy(supportRate+supportRateNum, p+2, ie_len);
+				memcpy(supportRate + supportRateNum,
+				       p + 2, ie_len);
 				supportRateNum += ie_len;
 			}
 		}
@@ -3031,7 +3031,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 		wpa_ie = elems.rsn_ie;
 		wpa_ie_len = elems.rsn_ie_len;
 
-		if (rtw_parse_wpa2_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+		if (rtw_parse_wpa2_ie(wpa_ie - 2, wpa_ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
 			pstat->dot8021xalg = 1;/* psk,  todo:802.1x */
 			pstat->wpa_psk |= BIT(1);
 
@@ -3052,7 +3052,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 		wpa_ie = elems.wpa_ie;
 		wpa_ie_len = elems.wpa_ie_len;
 
-		if (rtw_parse_wpa_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+		if (rtw_parse_wpa_ie(wpa_ie - 2, wpa_ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
 			pstat->dot8021xalg = 1;/* psk,  todo:802.1x */
 			pstat->wpa_psk |= BIT(0);
 
@@ -3094,7 +3094,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 
 		/*  AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
 		/*  that the selected registrar of AP is _FLASE */
-		if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
+		if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS | WLAN_STA_MAYBE_WPS))) {
 			if (pmlmepriv->wps_beacon_ie) {
 				u8 selected_registrar = 0;
 
@@ -3131,7 +3131,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 			copy_len = min_t(int, wpa_ie_len + 2, sizeof(pstat->wpa_ie));
 		}
 		if (copy_len > 0)
-			memcpy(pstat->wpa_ie, wpa_ie-2, copy_len);
+			memcpy(pstat->wpa_ie, wpa_ie - 2, copy_len);
 	}
 	/*  check if there is WMM IE & support WWM-PS */
 	pstat->flags &= ~WLAN_STA_WME;
@@ -3146,14 +3146,14 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 		p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
 		for (;;) {
 			p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
-			if (p != NULL) {
-				if (!memcmp(p+2, WMM_IE, 6)) {
+			if (p) {
+				if (!memcmp(p + 2, WMM_IE, 6)) {
 					pstat->flags |= WLAN_STA_WME;
 
 					pstat->qos_option = 1;
-					pstat->qos_info = *(p+8);
+					pstat->qos_info = *(p + 8);
 
-					pstat->max_sp_len = (pstat->qos_info>>5) & 0x3;
+					pstat->max_sp_len = (pstat->qos_info >> 5) & 0x3;
 
 					if ((pstat->qos_info & 0xf) != 0xf)
 						pstat->has_legacy_ac = true;
@@ -3162,22 +3162,22 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 
 					if (pstat->qos_info & 0xf) {
 						if (pstat->qos_info & BIT(0))
-							pstat->uapsd_vo = BIT(0)|BIT(1);
+							pstat->uapsd_vo = BIT(0) | BIT(1);
 						else
 							pstat->uapsd_vo = 0;
 
 						if (pstat->qos_info & BIT(1))
-							pstat->uapsd_vi = BIT(0)|BIT(1);
+							pstat->uapsd_vi = BIT(0) | BIT(1);
 						else
 							pstat->uapsd_vi = 0;
 
 						if (pstat->qos_info & BIT(2))
-							pstat->uapsd_bk = BIT(0)|BIT(1);
+							pstat->uapsd_bk = BIT(0) | BIT(1);
 						else
 							pstat->uapsd_bk = 0;
 
 						if (pstat->qos_info & BIT(3))
-							pstat->uapsd_be = BIT(0)|BIT(1);
+							pstat->uapsd_be = BIT(0) | BIT(1);
 						else
 							pstat->uapsd_be = 0;
 					}
@@ -3245,7 +3245,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 		DBG_88E("  old AID %d\n", pstat->aid);
 	} else {
 		for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
-			if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+			if (!pstapriv->sta_aid[pstat->aid - 1])
 				break;
 
 		/* if (pstat->aid > NUM_STA) { */
@@ -3452,14 +3452,13 @@ static unsigned int OnDeAuth(struct adapter *padapter,
 		}
 
 		return _SUCCESS;
-	} else
-#endif
-	{
-		DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
-			      reason, GetAddr3Ptr(pframe));
-
-		receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
 	}
+#endif
+	DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
+		      reason, GetAddr3Ptr(pframe));
+
+	receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+
 	pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
 	return _SUCCESS;
 }
@@ -3506,14 +3505,13 @@ static unsigned int OnDisassoc(struct adapter *padapter,
 		}
 
 		return _SUCCESS;
-	} else
-#endif
-	{
-		DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
-			      reason, GetAddr3Ptr(pframe));
-
-		receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
 	}
+#endif
+	DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
+		      reason, GetAddr3Ptr(pframe));
+
+	receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+
 	pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
 	return _SUCCESS;
 }
@@ -3958,7 +3956,7 @@ static void init_channel_list(struct adapter *padapter,
 			    ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
 				continue;
 
-			if (reg == NULL) {
+			if (!reg) {
 				reg = &channel_list->reg_class[cla];
 				cla++;
 				reg->reg_class = o->op_class;
@@ -4515,7 +4513,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
 		/* set per sta rate after updating HT cap. */
 		set_sta_rate(padapter, psta);
 		rtw_hal_set_hwreg(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
-		media_status = (psta->mac_id<<8)|1; /*   MACID|OPMODE: 1 means connect */
+		media_status = (psta->mac_id << 8) | 1; /*   MACID|OPMODE: 1 means connect */
 		rtw_hal_set_hwreg(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
 	}
 
@@ -4844,7 +4842,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
 		pmlmeinfo->state = WIFI_FW_AP_STATE;
 		type = _HW_STATE_AP_;
 	} else if (psetop->mode == Ndis802_11Infrastructure) {
-		pmlmeinfo->state &= ~(BIT(0)|BIT(1));/*  clear state */
+		pmlmeinfo->state &= ~(BIT(0) | BIT(1));/*  clear state */
 		pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to	STATION_STATE */
 		type = _HW_STATE_STATION_;
 	} else if (psetop->mode == Ndis802_11IBSS) {
@@ -5040,7 +5038,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
 	u8 val8;
 
 	if (is_client_associated_to_ap(padapter))
-		issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
+		issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
 
 	rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
 	rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
@@ -5084,7 +5082,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter,
 	struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
 
 	/* clear out first */
-	memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
+	memset(out, 0, sizeof(struct rtw_ieee80211_channel) * out_num);
 
 	/* acquire channels from in */
 	j = 0;
@@ -5263,7 +5261,7 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
 
 			DBG_88E("r871x_set_stakey_hdl(): enc_algorithm=%d\n", pparm->algorithm);
 
-			if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA-4))) {
+			if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA - 4))) {
 				DBG_88E("r871x_set_stakey_hdl():set_stakey failed, mac_id(aid)=%d\n", psta->mac_id);
 				return H2C_REJECTED;
 			}
@@ -5276,10 +5274,10 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
 			write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
 
 			return H2C_SUCCESS_RSP;
-		} else {
-			DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
-			return H2C_REJECTED;
 		}
+
+		DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
+		return H2C_REJECTED;
 	}
 
 	/* below for sta mode */
@@ -5333,14 +5331,14 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
 
 	ptxBeacon_parm = kmemdup(&pmlmeinfo->network,
 				sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
-	if (ptxBeacon_parm == NULL) {
+	if (!ptxBeacon_parm) {
 		kfree(ph2c);
 		res = _FAIL;
 		goto exit;
 	}
 
-	len_diff = update_hidden_ssid(ptxBeacon_parm->ies+_BEACON_IE_OFFSET_,
-				      ptxBeacon_parm->ie_length-_BEACON_IE_OFFSET_,
+	len_diff = update_hidden_ssid(ptxBeacon_parm->ies + _BEACON_IE_OFFSET_,
+				      ptxBeacon_parm->ie_length - _BEACON_IE_OFFSET_,
 				      pmlmeinfo->hidden_ssid_mode);
 	ptxBeacon_parm->ie_length += len_diff;
 
@@ -5361,7 +5359,7 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
 
 	peventbuf = (uint *)pbuf;
 	evt_sz = (u16)(*peventbuf & 0xffff);
-	evt_code = (u8)((*peventbuf>>16) & 0xff);
+	evt_code = (u8)((*peventbuf >> 16) & 0xff);
 
 	/*  checking if event code is valid */
 	if (evt_code >= MAX_C2HEVT) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 03dc7e5..c4f5850 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -154,8 +154,8 @@ void ips_enter(struct adapter *padapter)
 int ips_leave(struct adapter *padapter)
 {
 	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-	struct security_priv *psecuritypriv = &(padapter->securitypriv);
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct security_priv *psecuritypriv = &padapter->securitypriv;
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	int result = _SUCCESS;
 	int keyid;
 
@@ -200,28 +200,24 @@ int ips_leave(struct adapter *padapter)
 
 static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
 {
-	struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
-	bool ret = false;
+	struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
 
 	if (time_after_eq(adapter->pwrctrlpriv.ips_deny_time, jiffies))
-		goto exit;
+		return false;
 
 	if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
 	    check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS) ||
 	    check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
 	    check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE))
-		goto exit;
+		return false;
 
-	ret = true;
-
-exit:
-	return ret;
+	return true;
 }
 
 void rtw_ps_processor(struct adapter *padapter)
 {
 	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	enum rt_rf_power_state rfpwrstate;
 
 	pwrpriv->ps_processing = true;
@@ -282,7 +278,7 @@ static void pwr_state_check_handler(struct timer_list *t)
  */
 void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
 {
-	u8	rpwm;
+	u8 rpwm;
 	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
 
 	pslv = PS_STATE(pslv);
@@ -335,8 +331,8 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
 static u8 PS_RDY_CHECK(struct adapter *padapter)
 {
 	unsigned long curr_time, delta_time;
-	struct pwrctrl_priv	*pwrpriv = &padapter->pwrctrlpriv;
-	struct mlme_priv	*pmlmepriv = &(padapter->mlmepriv);
+	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
 	curr_time = jiffies;
 	delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
@@ -437,7 +433,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
 /*  */
 void LPS_Enter(struct adapter *padapter)
 {
-	struct pwrctrl_priv	*pwrpriv = &padapter->pwrctrlpriv;
+	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
 
 	if (!PS_RDY_CHECK(padapter))
 		return;
@@ -463,7 +459,7 @@ void LPS_Enter(struct adapter *padapter)
 /*		Leave the leisure power save mode. */
 void LPS_Leave(struct adapter *padapter)
 {
-	struct pwrctrl_priv	*pwrpriv = &padapter->pwrctrlpriv;
+	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
 
 	if (pwrpriv->bLeisurePs) {
 		if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
@@ -483,8 +479,8 @@ void LPS_Leave(struct adapter *padapter)
 /*  */
 void LeaveAllPowerSaveMode(struct adapter *Adapter)
 {
-	struct mlme_priv	*pmlmepriv = &(Adapter->mlmepriv);
-	u8	enqueue = 0;
+	struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+	u8 enqueue = 0;
 
 	if (check_fwstate(pmlmepriv, _FW_LINKED))
 		rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
@@ -611,7 +607,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
 
 int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
 {
-	int	ret = 0;
+	int ret = 0;
 	struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
 
 	if (mode < PS_MODE_NUM) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index af8a79c..6df873e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1247,9 +1247,8 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
 				if (ralink_vendor_flag) {
 					DBG_88E("link to Tenda W311R AP\n");
 					return HT_IOT_PEER_TENDA;
-				} else {
-					DBG_88E("Capture EPIGRAM_OUI\n");
 				}
+				DBG_88E("Capture EPIGRAM_OUI\n");
 			} else {
 				break;
 			}
@@ -1266,10 +1265,9 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
 	} else if (ralink_vendor_flag && epigram_vendor_flag) {
 		DBG_88E("link to Tenda W311R AP\n");
 		return HT_IOT_PEER_TENDA;
-	} else {
-		DBG_88E("link to new AP\n");
-		return HT_IOT_PEER_UNKNOWN;
 	}
+	DBG_88E("link to new AP\n");
+	return HT_IOT_PEER_UNKNOWN;
 }
 
 void update_IOT_info(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index c375916..258531b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -1022,10 +1022,10 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
 			ClearMFrag(mem_start);
 
 			break;
-		} else {
-			RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
 		}
 
+		RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
+
 		addr = (size_t)(pframe);
 
 		mem_start = (unsigned char *)round_up(addr, 4) + hw_hdr_offset;
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 4e2f6cb..7489491 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -967,10 +967,11 @@ void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
 
 		pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
 		return;
-	} else {
-		rtl88eu_dm_txpower_tracking_callback_thermalmeter(Adapter);
-		pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
 	}
+
+	rtl88eu_dm_txpower_tracking_callback_thermalmeter(Adapter);
+	pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
+
 }
 
 /* 3============================================================ */
diff --git a/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
index 251bd8a..a55a0d8b 100644
--- a/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
+++ b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
@@ -154,35 +154,37 @@ void rtl88eu_dm_update_rx_idle_ant(struct odm_dm_struct *dm_odm, u8 ant)
 	struct adapter *adapter = dm_odm->Adapter;
 	u32 default_ant, optional_ant;
 
-	if (dm_fat_tbl->RxIdleAnt != ant) {
-		if (ant == MAIN_ANT) {
-			default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
-				       MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
-			optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
-					AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
-		} else {
-			default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
-				       AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
-			optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
-					MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
-		}
+	if (dm_fat_tbl->RxIdleAnt == ant)
+		return;
 
-		if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
-			phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
-				       BIT(5) | BIT(4) | BIT(3), default_ant);
-			phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
-				       BIT(8) | BIT(7) | BIT(6), optional_ant);
-			phy_set_bb_reg(adapter, ODM_REG_ANTSEL_CTRL_11N,
-				       BIT(14) | BIT(13) | BIT(12), default_ant);
-			phy_set_bb_reg(adapter, ODM_REG_RESP_TX_11N,
-				       BIT(6) | BIT(7), default_ant);
-		} else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
-			phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
-				       BIT(5) | BIT(4) | BIT(3), default_ant);
-			phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
-				       BIT(8) | BIT(7) | BIT(6), optional_ant);
-		}
+	if (ant == MAIN_ANT) {
+		default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+			       MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+		optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+				AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+	} else {
+		default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+			       AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+		optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+				MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
 	}
+
+	if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+		phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+			       BIT(5) | BIT(4) | BIT(3), default_ant);
+		phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+			       BIT(8) | BIT(7) | BIT(6), optional_ant);
+		phy_set_bb_reg(adapter, ODM_REG_ANTSEL_CTRL_11N,
+			       BIT(14) | BIT(13) | BIT(12), default_ant);
+		phy_set_bb_reg(adapter, ODM_REG_RESP_TX_11N,
+			       BIT(6) | BIT(7), default_ant);
+	} else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
+		phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+			       BIT(5) | BIT(4) | BIT(3), default_ant);
+		phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+			       BIT(8) | BIT(7) | BIT(6), optional_ant);
+	}
+
 	dm_fat_tbl->RxIdleAnt = ant;
 }
 
@@ -303,6 +305,7 @@ void rtl88eu_dm_antenna_diversity(struct odm_dm_struct *dm_odm)
 
 	if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
 		return;
+
 	if (!dm_odm->bLinked) {
 		ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
 			     ("ODM_AntennaDiversity_88E(): No Link.\n"));
@@ -318,19 +321,20 @@ void rtl88eu_dm_antenna_diversity(struct odm_dm_struct *dm_odm)
 			dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
 		}
 		return;
-	} else {
-		if (!dm_fat_tbl->bBecomeLinked) {
-			ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
-				     ("Need to Turn on HW AntDiv\n"));
-			phy_set_bb_reg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1);
-			phy_set_bb_reg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N,
-				       BIT(15), 1);
-			if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
-				phy_set_bb_reg(adapter, ODM_REG_TX_ANT_CTRL_11N,
-					       BIT(21), 1);
-			dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
-		}
 	}
+
+	if (!dm_fat_tbl->bBecomeLinked) {
+		ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+			     ("Need to Turn on HW AntDiv\n"));
+		phy_set_bb_reg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1);
+		phy_set_bb_reg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N,
+			       BIT(15), 1);
+		if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+			phy_set_bb_reg(adapter, ODM_REG_TX_ANT_CTRL_11N,
+				       BIT(21), 1);
+		dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
+	}
+
 	if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ||
 	    (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV))
 		rtl88eu_dm_hw_ant_div(dm_odm);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 51c40ab..afaf9e5 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -923,27 +923,27 @@ static bool simularity_compare(struct adapter *adapt, s32 resulta[][8],
 			}
 		}
 		return result;
-	} else {
-		if (!(sim_bitmap & 0x03)) {		   /* path A TX OK */
-			for (i = 0; i < 2; i++)
-				resulta[3][i] = resulta[c1][i];
-		}
-		if (!(sim_bitmap & 0x0c)) {		   /* path A RX OK */
-			for (i = 2; i < 4; i++)
-				resulta[3][i] = resulta[c1][i];
-		}
-
-		if (!(sim_bitmap & 0x30)) { /* path B TX OK */
-			for (i = 4; i < 6; i++)
-				resulta[3][i] = resulta[c1][i];
-		}
-
-		if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
-			for (i = 6; i < 8; i++)
-				resulta[3][i] = resulta[c1][i];
-		}
-		return false;
 	}
+
+	if (!(sim_bitmap & 0x03)) {		   /* path A TX OK */
+		for (i = 0; i < 2; i++)
+			resulta[3][i] = resulta[c1][i];
+	}
+	if (!(sim_bitmap & 0x0c)) {		   /* path A RX OK */
+		for (i = 2; i < 4; i++)
+			resulta[3][i] = resulta[c1][i];
+	}
+
+	if (!(sim_bitmap & 0x30)) { /* path B TX OK */
+		for (i = 4; i < 6; i++)
+			resulta[3][i] = resulta[c1][i];
+	}
+
+	if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
+		for (i = 6; i < 8; i++)
+			resulta[3][i] = resulta[c1][i];
+	}
+	return false;
 }
 
 static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
@@ -1053,10 +1053,9 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
 				result[t][3] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_A_2,
 								 bMaskDWord)&0x3FF0000)>>16;
 			break;
-		} else {
-			ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
-				     ("Path A Rx IQK Fail!!\n"));
 		}
+		ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+			     ("Path A Rx IQK Fail!!\n"));
 	}
 
 	if (path_a_ok == 0x00) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 545d6a6..241f55b 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -21,7 +21,7 @@
 /*  Initialize GPIO setting registers */
 static void dm_InitGPIOSetting(struct adapter *Adapter)
 {
-	u8	tmp1byte;
+	u8 tmp1byte;
 
 	tmp1byte = usb_read8(Adapter, REG_GPIO_MUXCFG);
 	tmp1byte &= (GPIOSEL_GPIO | ~GPIOSEL_ENBT);
@@ -35,8 +35,8 @@ static void dm_InitGPIOSetting(struct adapter *Adapter)
 static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
 {
 	struct hal_data_8188e *hal_data = Adapter->HalData;
-	struct dm_priv	*pdmpriv = &hal_data->dmpriv;
-	struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+	struct dm_priv *pdmpriv = &hal_data->dmpriv;
+	struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
 
 	/*  Init Value */
 	memset(dm_odm, 0, sizeof(*dm_odm));
@@ -51,44 +51,46 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
 
 	dm_odm->AntDivType = hal_data->TRxAntDivType;
 
-	/*  Tx power tracking BB swing table. */
-	/*  The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+	/* Tx power tracking BB swing table.
+	 * The base index =
+	 * 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB
+	 */
 	dm_odm->BbSwingIdxOfdm = 12; /*  Set defalut value as index 12. */
 	dm_odm->BbSwingIdxOfdmCurrent = 12;
 	dm_odm->BbSwingFlagOfdm = false;
 
-	pdmpriv->InitODMFlag =	ODM_RF_CALIBRATION |
-				ODM_RF_TX_PWR_TRACK;
+	pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+			       ODM_RF_TX_PWR_TRACK;
 
 	dm_odm->SupportAbility = pdmpriv->InitODMFlag;
 }
 
 static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
 {
-	struct mlme_ext_priv	*pmlmeext = &Adapter->mlmeextpriv;
-	struct mlme_priv	*pmlmepriv = &Adapter->mlmepriv;
+	struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+	struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
 	struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
 	struct hal_data_8188e *hal_data = Adapter->HalData;
-	struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
-	struct dm_priv	*pdmpriv = &hal_data->dmpriv;
+	struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+	struct dm_priv *pdmpriv = &hal_data->dmpriv;
 	int i;
 
-	pdmpriv->InitODMFlag =	ODM_BB_DIG |
-				ODM_BB_RA_MASK |
-				ODM_BB_DYNAMIC_TXPWR |
-				ODM_BB_FA_CNT |
-				ODM_BB_RSSI_MONITOR |
-				ODM_BB_CCK_PD |
-				ODM_BB_PWR_SAVE |
-				ODM_MAC_EDCA_TURBO |
-				ODM_RF_CALIBRATION |
-				ODM_RF_TX_PWR_TRACK;
+	pdmpriv->InitODMFlag = ODM_BB_DIG |
+			       ODM_BB_RA_MASK |
+			       ODM_BB_DYNAMIC_TXPWR |
+			       ODM_BB_FA_CNT |
+			       ODM_BB_RSSI_MONITOR |
+			       ODM_BB_CCK_PD |
+			       ODM_BB_PWR_SAVE |
+			       ODM_MAC_EDCA_TURBO |
+			       ODM_RF_CALIBRATION |
+			       ODM_RF_TX_PWR_TRACK;
 	if (hal_data->AntDivCfg)
 		pdmpriv->InitODMFlag |= ODM_BB_ANT_DIV;
 
 	if (Adapter->registrypriv.mp_mode == 1) {
-		pdmpriv->InitODMFlag =	ODM_RF_CALIBRATION |
-					ODM_RF_TX_PWR_TRACK;
+		pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+				       ODM_RF_TX_PWR_TRACK;
 	}
 
 	dm_odm->SupportAbility = pdmpriv->InitODMFlag;
@@ -106,20 +108,23 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
 	dm_odm->pbPowerSaving = (bool *)&pwrctrlpriv->bpower_saving;
 	dm_odm->AntDivType = hal_data->TRxAntDivType;
 
-	/*  Tx power tracking BB swing table. */
-	/*  The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+	/* Tx power tracking BB swing table.
+	 * The base index =
+	 * 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB
+	 */
 	dm_odm->BbSwingIdxOfdm = 12; /*  Set defalut value as index 12. */
 	dm_odm->BbSwingIdxOfdmCurrent = 12;
 	dm_odm->BbSwingFlagOfdm = false;
 
 	for (i = 0; i < NUM_STA; i++)
-		ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i, NULL);
+		ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i,
+					NULL);
 }
 
 void rtl8188e_InitHalDm(struct adapter *Adapter)
 {
-	struct dm_priv	*pdmpriv = &Adapter->HalData->dmpriv;
-	struct odm_dm_struct *dm_odm = &(Adapter->HalData->odmpriv);
+	struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
+	struct odm_dm_struct *dm_odm = &Adapter->HalData->odmpriv;
 
 	dm_InitGPIOSetting(Adapter);
 	pdmpriv->DM_Type = DM_Type_ByDriver;
@@ -162,7 +167,7 @@ void rtw_hal_dm_watchdog(struct adapter *Adapter)
 
 void rtw_hal_dm_init(struct adapter *Adapter)
 {
-	struct dm_priv	*pdmpriv = &Adapter->HalData->dmpriv;
+	struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
 	struct odm_dm_struct *podmpriv = &Adapter->HalData->odmpriv;
 
 	memset(pdmpriv, 0, sizeof(struct dm_priv));
@@ -172,23 +177,28 @@ void rtw_hal_dm_init(struct adapter *Adapter)
 
 /*  Add new function to reset the state of antenna diversity before link. */
 /*  Compare RSSI for deciding antenna */
-void rtw_hal_antdiv_rssi_compared(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
+void rtw_hal_antdiv_rssi_compared(struct adapter *Adapter,
+				  struct wlan_bssid_ex *dst,
+				  struct wlan_bssid_ex *src)
 {
 	if (Adapter->HalData->AntDivCfg != 0) {
-		/* select optimum_antenna for before linked =>For antenna diversity */
-		if (dst->Rssi >=  src->Rssi) {/* keep org parameter */
+		/* select optimum_antenna for before linked => For antenna
+		 * diversity
+		 */
+		if (dst->Rssi >= src->Rssi) {/* keep org parameter */
 			src->Rssi = dst->Rssi;
-			src->PhyInfo.Optimum_antenna = dst->PhyInfo.Optimum_antenna;
+			src->PhyInfo.Optimum_antenna =
+				dst->PhyInfo.Optimum_antenna;
 		}
 	}
 }
 
 /*  Add new function to reset the state of antenna diversity before link. */
-u8 rtw_hal_antdiv_before_linked(struct adapter *Adapter)
+bool rtw_hal_antdiv_before_linked(struct adapter *Adapter)
 {
 	struct odm_dm_struct *dm_odm = &Adapter->HalData->odmpriv;
 	struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
-	struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
+	struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
 
 	/*  Condition that does not need to use antenna diversity. */
 	if (Adapter->HalData->AntDivCfg == 0)
@@ -197,15 +207,16 @@ u8 rtw_hal_antdiv_before_linked(struct adapter *Adapter)
 	if (check_fwstate(pmlmepriv, _FW_LINKED))
 		return false;
 
-	if (dm_swat_tbl->SWAS_NoLink_State == 0) {
-		/* switch channel */
-		dm_swat_tbl->SWAS_NoLink_State = 1;
-		dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ? Antenna_B : Antenna_A;
-
-		rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
-		return true;
-	} else {
+	if (dm_swat_tbl->SWAS_NoLink_State != 0) {
 		dm_swat_tbl->SWAS_NoLink_State = 0;
 		return false;
 	}
+
+	/* switch channel */
+	dm_swat_tbl->SWAS_NoLink_State = 1;
+	dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ?
+				  Antenna_B : Antenna_A;
+
+	rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
+	return true;
 }
diff --git a/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h b/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
index da66695..0c5b2b0 100644
--- a/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
+++ b/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
@@ -15,11 +15,6 @@
 #define MAX_TXPWR_IDX_NMODE_92S		63
 #define Reset_Cnt_Limit			3
 
-#define IQK_MAC_REG_NUM			4
-#define IQK_ADDA_REG_NUM		16
-#define IQK_BB_REG_NUM			9
-#define HP_THERMAL_NUM			8
-
 #define MAX_AGGR_NUM			0x07
 
 
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 516a896..39df305 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -209,7 +209,7 @@ void	rtw_hal_set_bwmode(struct adapter *padapter,
 void	rtw_hal_set_chan(struct adapter *padapter, u8 channel);
 void	rtw_hal_dm_watchdog(struct adapter *padapter);
 
-u8	rtw_hal_antdiv_before_linked(struct adapter *padapter);
+bool rtw_hal_antdiv_before_linked(struct adapter *padapter);
 void	rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
 				     struct wlan_bssid_ex *dst,
 				     struct wlan_bssid_ex *src);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index d569fe5..75f0ebe 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -765,7 +765,7 @@ bool rtw_is_cckrates_included(u8 *rate);
 
 bool rtw_is_cckratesonly_included(u8 *rate);
 
-int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
+int rtw_check_network_type(unsigned char *rate);
 
 void rtw_get_bcn_info(struct wlan_network *pnetwork);
 
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 8245cea..9d39fe1 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -239,7 +239,6 @@ struct odm_rate_adapt {
 
 #define IQK_MAC_REG_NUM		4
 #define IQK_ADDA_REG_NUM	16
-#define IQK_BB_REG_NUM_MAX	10
 #define IQK_BB_REG_NUM		9
 #define HP_THERMAL_NUM		8
 
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index 1920433..910b982 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -10,12 +10,7 @@ enum{
 	UP_LINK,
 	DOWN_LINK,
 };
-/*  duplicate code,will move to ODM ######### */
-#define IQK_MAC_REG_NUM		4
-#define IQK_ADDA_REG_NUM		16
-#define IQK_BB_REG_NUM			9
-#define HP_THERMAL_NUM		8
-/*  duplicate code,will move to ODM ######### */
+
 struct	dm_priv {
 	u8	DM_Type;
 	u8	DMFlag;
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index b5dfb22..c3847cf 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -19,9 +19,6 @@
 #define SHORT_SLOT_TIME			9
 #define NON_SHORT_SLOT_TIME		20
 
-#define RTL8711_RF_MAX_SENS		6
-#define RTL8711_RF_DEF_SENS		4
-
 /*  We now define the following channels as the max channels in each
  * channel plan.
  */
@@ -30,8 +27,6 @@
 #define	MAX_CHANNEL_NUM_2G		14
 #define	MAX_CHANNEL_NUM			14	/* 2.4 GHz only */
 
-#define NUM_REGULATORYS	1
-
 /* Country codes */
 #define USA				0x555320
 #define EUROPE				0x1 /* temp, should be provided later */
@@ -74,17 +69,6 @@ enum	_REG_PREAMBLE_MODE {
 	PREAMBLE_SHORT	= 3,
 };
 
-enum _RTL8712_RF_MIMO_CONFIG_ {
-	RTL8712_RFCONFIG_1T = 0x10,
-	RTL8712_RFCONFIG_2T = 0x20,
-	RTL8712_RFCONFIG_1R = 0x01,
-	RTL8712_RFCONFIG_2R = 0x02,
-	RTL8712_RFCONFIG_1T1R = 0x11,
-	RTL8712_RFCONFIG_1T2R = 0x12,
-	RTL8712_RFCONFIG_TURBO = 0x92,
-	RTL8712_RFCONFIG_2T2R = 0x22
-};
-
 enum rf90_radio_path {
 	RF90_PATH_A = 0,		/* Radio Path A */
 	RF90_PATH_B = 1,		/* Radio Path B */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 710c33f..9b6ea86 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -222,18 +222,21 @@ static char *translate_scan(struct adapter *padapter,
 
 	/* parsing WPA/WPA2 IE */
 	{
-		u8 buf[MAX_WPA_IE_LEN];
+		u8 *buf;
 		u8 wpa_ie[255], rsn_ie[255];
 		u16 wpa_len = 0, rsn_len = 0;
 		u8 *p;
 
+		buf = kzalloc(MAX_WPA_IE_LEN, GFP_ATOMIC);
+		if (!buf)
+			return start;
+
 		rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
 		RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.ssid.ssid));
 		RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
 
 		if (wpa_len > 0) {
 			p = buf;
-			memset(buf, 0, MAX_WPA_IE_LEN);
 			p += sprintf(p, "wpa_ie=");
 			for (i = 0; i < wpa_len; i++)
 				p += sprintf(p, "%02x", wpa_ie[i]);
@@ -250,7 +253,6 @@ static char *translate_scan(struct adapter *padapter,
 		}
 		if (rsn_len > 0) {
 			p = buf;
-			memset(buf, 0, MAX_WPA_IE_LEN);
 			p += sprintf(p, "rsn_ie=");
 			for (i = 0; i < rsn_len; i++)
 				p += sprintf(p, "%02x", rsn_ie[i]);
@@ -264,6 +266,7 @@ static char *translate_scan(struct adapter *padapter,
 			iwe.u.data.length = rsn_len;
 			start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
 		}
+		kfree(buf);
 	}
 
 	{/* parsing WPS IE */
@@ -593,9 +596,8 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
 					set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
 					cnt += buf[cnt+1]+2;
 					break;
-				} else {
-					cnt += buf[cnt+1]+2; /* goto next */
 				}
+				cnt += buf[cnt+1]+2; /* goto next */
 			}
 		}
 	}
@@ -770,8 +772,7 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
 		DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
 		if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
 			return ret;
-		else
-			ret = true;
+		ret = true;
 		blInserted = false;
 
 		/* overwrite PMKID */
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 60c6527..11183b9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -1973,18 +1973,11 @@ u8 rtl92e_rx_db_to_percent(s8 antpower)
 
 u8 rtl92e_evm_db_to_percent(s8 value)
 {
-	s8 ret_val;
+	s8 ret_val = clamp(-value, 0, 33) * 3;
 
-	ret_val = value;
-
-	if (ret_val >= 0)
-		ret_val = 0;
-	if (ret_val <= -33)
-		ret_val = -33;
-	ret_val = 0 - ret_val;
-	ret_val *= 3;
 	if (ret_val == 99)
 		ret_val = 100;
+
 	return ret_val;
 }
 
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index dcd51bf..0be7426 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -1,13 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
 NIC_SELECT = RTL8192U
 
-ccflags-y := -std=gnu89
-ccflags-y += -O2
-
 ccflags-y += -DCONFIG_FORCE_HARD_FLOAT=y
 ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
 ccflags-y += -DTHOMAS_BEACON -DTHOMAS_TASKLET -DTHOMAS_SKB -DTHOMAS_TURBO
-ccflags-y += -I $(srctree)/$(src)/ieee80211
 
 r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o		\
 		  r8190_rtl8256.o r819xU_phy.o r819xU_firmware.o	\
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
deleted file mode 100644
index 0d4d648..0000000
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ /dev/null
@@ -1,27 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-NIC_SELECT = RTL8192U
-
-ccflags-y := -O2
-ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
-
-ieee80211-rsl-objs := ieee80211_rx.o \
-		      ieee80211_softmac.o \
-		      ieee80211_tx.o \
-		      ieee80211_wx.o \
-		      ieee80211_module.o \
-		      ieee80211_softmac_wx.o\
-		      rtl819x_HTProc.o\
-		      rtl819x_TSProc.o\
-		      rtl819x_BAProc.o\
-		      dot11d.o
-
-ieee80211_crypt-rsl-objs := ieee80211_crypt.o
-ieee80211_crypt_tkip-rsl-objs := ieee80211_crypt_tkip.o
-ieee80211_crypt_ccmp-rsl-objs := ieee80211_crypt_ccmp.o
-ieee80211_crypt_wep-rsl-objs := ieee80211_crypt_wep.o
-
-obj-m +=ieee80211-rsl.o
-obj-m +=ieee80211_crypt-rsl.o
-obj-m +=ieee80211_crypt_wep-rsl.o
-obj-m +=ieee80211_crypt_tkip-rsl.o
-obj-m +=ieee80211_crypt_ccmp-rsl.o
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 4a6c3f6..aa9dab8 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -264,12 +264,12 @@ static int open_debug_level(struct inode *inode, struct file *file)
 	return single_open(file, show_debug_level, NULL);
 }
 
-static const struct file_operations fops = {
-	.open = open_debug_level,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.write = write_debug_level,
-	.release = single_release,
+static const struct proc_ops debug_level_proc_ops = {
+	.proc_open	= open_debug_level,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= write_debug_level,
+	.proc_release	= single_release,
 };
 
 int __init ieee80211_debug_init(void)
@@ -284,7 +284,7 @@ int __init ieee80211_debug_init(void)
 				" proc directory\n");
 		return -EIO;
 	}
-	e = proc_create("debug_level", 0644, ieee80211_proc, &fops);
+	e = proc_create("debug_level", 0644, ieee80211_proc, &debug_level_proc_ops);
 	if (!e) {
 		remove_proc_entry(DRV_NAME, init_net.proc_net);
 		ieee80211_proc = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 00fea12..e101f7b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -232,8 +232,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
 
 	#ifdef NOT_YET
 	if (ieee->iw_mode == IW_MODE_MASTER) {
-		printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
-		       ieee->dev->name);
+		netdev_dbg(ieee->dev, "Master mode not yet supported.\n");
 		return 0;
 /*
   hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *)
@@ -261,9 +260,9 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
 
 	    if (ieee->iw_mode == IW_MODE_MASTER) {
 		if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
-			printk(KERN_DEBUG "%s: unknown management frame "
+			netdev_dbg(skb->dev, "unknown management frame "
 			       "(type=0x%02x, stype=0x%02x) dropped\n",
-			       skb->dev->name, type, stype);
+			       type, stype);
 			return -1;
 		}
 
@@ -271,8 +270,8 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
 		return 0;
 	}
 
-	printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
-	       "received in non-Host AP mode\n", skb->dev->name);
+	netdev_dbg(skb->dev, "hostap_rx_frame_mgmt: management frame "
+	       "received in non-Host AP mode\n");
 	return -1;
 	#endif
 }
@@ -349,9 +348,9 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
 	if (ieee->tkip_countermeasures &&
 	    strcmp(crypt->ops->name, "TKIP") == 0) {
 		if (net_ratelimit()) {
-			printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
+			netdev_dbg(ieee->dev, "TKIP countermeasures: dropped "
 			       "received packet from %pM\n",
-			       ieee->dev->name, hdr->addr2);
+			       hdr->addr2);
 		}
 		return -1;
 	}
@@ -397,9 +396,9 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
 	res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
 	atomic_dec(&crypt->refcnt);
 	if (res < 0) {
-		printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
+		netdev_dbg(ieee->dev, "MSDU decryption/MIC verification failed"
 		       " (SA=%pM keyidx=%d)\n",
-		       ieee->dev->name, hdr->addr2, keyidx);
+		       hdr->addr2, keyidx);
 		return -1;
 	}
 
@@ -749,7 +748,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
 	kfree(prxbIndicateArray);
 }
 
-static u8 parse_subframe(struct sk_buff *skb,
+static u8 parse_subframe(struct ieee80211_device *ieee,
+			 struct sk_buff *skb,
 			 struct ieee80211_rx_stats *rx_stats,
 			 struct ieee80211_rxb *rxb, u8 *src, u8 *dst)
 {
@@ -810,11 +810,11 @@ static u8 parse_subframe(struct sk_buff *skb,
 			nSubframe_Length = (nSubframe_Length >> 8) + (nSubframe_Length << 8);
 
 			if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
-				printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\
-						__func__, rxb->nr_subframes);
-				printk("%s: A-MSDU parse error!! Subframe Length: %d\n", __func__, nSubframe_Length);
-				printk("nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
-				printk("The Packet SeqNum is %d\n", SeqNum);
+				netdev_dbg(ieee->dev, "A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",
+					   rxb->nr_subframes);
+				netdev_dbg(ieee->dev, "A-MSDU parse error!! Subframe Length: %d\n", nSubframe_Length);
+				netdev_dbg(ieee->dev, "nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
+				netdev_dbg(ieee->dev, "The Packet SeqNum is %d\n", SeqNum);
 				return 0;
 			}
 
@@ -904,8 +904,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 	stats = &ieee->stats;
 
 	if (skb->len < 10) {
-		printk(KERN_INFO "%s: SKB length < 10\n",
-		       dev->name);
+		netdev_info(dev, "SKB length < 10\n");
 		goto rx_dropped;
 	}
 
@@ -919,7 +918,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 
 	if (HTCCheck(ieee, skb->data)) {
 		if (net_ratelimit())
-			printk("find HTCControl\n");
+			netdev_warn(dev, "find HTCControl\n");
 		hdrlen += 4;
 		rx_stats->bContainHTC = true;
 	}
@@ -1113,7 +1112,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 
 	if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
 	    (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
-		printk("decrypt frame error\n");
+		netdev_dbg(ieee->dev, "decrypt frame error\n");
 		goto rx_dropped;
 	}
 
@@ -1141,9 +1140,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 			flen -= hdrlen;
 
 		if (frag_skb->tail + flen > frag_skb->end) {
-			printk(KERN_WARNING "%s: host decrypted and "
-			       "reassembled frame did not fit skb\n",
-			       dev->name);
+			netdev_warn(dev, "host decrypted and "
+			       "reassembled frame did not fit skb\n");
 			ieee80211_frag_cache_invalidate(ieee, hdr);
 			goto rx_dropped;
 		}
@@ -1178,7 +1176,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 	 * encrypted/authenticated */
 	if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
 	    ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
-		printk("==>decrypt msdu error\n");
+		netdev_dbg(ieee->dev, "==>decrypt msdu error\n");
 		goto rx_dropped;
 	}
 
@@ -1250,7 +1248,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 		goto rx_dropped;
 	/* to parse amsdu packets */
 	/* qos data packets & reserved bit is 1 */
-	if (parse_subframe(skb, rx_stats, rxb, src, dst) == 0) {
+	if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) {
 		/* only to free rxb, and not submit the packets to upper layer */
 		for (i = 0; i < rxb->nr_subframes; i++) {
 			dev_kfree_skb(rxb->subframes[i]);
@@ -1863,7 +1861,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
 				info_element->data[0] == 0x00 &&
 				info_element->data[1] == 0x13 &&
 				info_element->data[2] == 0x74)) {
-				printk("========>%s(): athros AP is exist\n", __func__);
+				netdev_dbg(ieee->dev, "========> athros AP is exist\n");
 				network->atheros_cap_exist = true;
 			} else
 				network->atheros_cap_exist = false;
@@ -1980,8 +1978,8 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
 			}
 			break;
 		case MFIE_TYPE_QOS_PARAMETER:
-			printk(KERN_ERR
-			       "QoS Error need to parse QOS_PARAMETER IE\n");
+			netdev_err(ieee->dev,
+				   "QoS Error need to parse QOS_PARAMETER IE\n");
 			break;
 
 		case MFIE_TYPE_COUNTRY:
@@ -2357,14 +2355,14 @@ static inline void ieee80211_process_probe_response(
 			if (IS_COUNTRY_IE_VALID(ieee)) {
 				// Case 1: Country code
 				if (!is_legal_channel(ieee, network->channel)) {
-					printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
+					netdev_warn(ieee->dev, "GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
 					goto out;
 				}
 			} else {
 				// Case 2: No any country code.
 				// Filter over channel ch12~14
 				if (network->channel > 11) {
-					printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
+					netdev_warn(ieee->dev, "GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
 					goto out;
 				}
 			}
@@ -2372,14 +2370,14 @@ static inline void ieee80211_process_probe_response(
 			if (IS_COUNTRY_IE_VALID(ieee)) {
 				// Case 1: Country code
 				if (!is_legal_channel(ieee, network->channel)) {
-					printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
+					netdev_warn(ieee->dev, "GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
 					goto out;
 				}
 			} else {
 				// Case 2: No any country code.
 				// Filter over channel ch12~14
 				if (network->channel > 14) {
-					printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
+					netdev_warn(ieee->dev, "GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
 					goto out;
 				}
 			}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 482382a..89dd1fb 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -60,7 +60,7 @@ double __extendsfdf2(float a)
 #include <linux/seq_file.h>
 /* FIXME: check if 2.6.7 is ok */
 
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
 /* set here to open your trace code. */
 u32 rt_global_debug_component = COMP_DOWN	|
 				COMP_SEC	|
@@ -3954,18 +3954,11 @@ static u8 rtl819x_query_rxpwrpercentage(s8 antpower)
 
 static u8 rtl819x_evm_dbtopercentage(s8 value)
 {
-	s8 ret_val;
+	s8 ret_val = clamp(-value, 0, 33) * 3;
 
-	ret_val = value;
-
-	if (ret_val >= 0)
-		ret_val = 0;
-	if (ret_val <= -33)
-		ret_val = -33;
-	ret_val = 0 - ret_val;
-	ret_val *= 3;
 	if (ret_val == 99)
 		ret_val = 100;
+
 	return ret_val;
 }
 
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 5822bb7..0118edb 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -23,7 +23,7 @@
 #include "r8192U.h"
 #include "r8192U_hw.h"
 
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
 #include "r8192U_wx.h"
 
 #define RATE_COUNT 12
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index c04d8ec..555e525 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -7,7 +7,7 @@
 #include "r8192U_dm.h"
 #include "r819xU_firmware_img.h"
 
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
 #include <linux/bitops.h>
 
 static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 00ea0beb..1167739 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -663,17 +663,11 @@ static u8 evm_db2percentage(s8 value)
 	/*
 	 * -33dB~0dB to 0%~99%
 	 */
-	s8 ret_val;
+	s8 ret_val = clamp(-value, 0, 33) * 3;
 
-	ret_val = value;
-	if (ret_val >= 0)
-		ret_val = 0;
-	if (ret_val <= -33)
-		ret_val = -33;
-	ret_val = -ret_val;
-	ret_val *= 3;
 	if (ret_val == 99)
 		ret_val = 100;
+
 	return ret_val;
 }
 
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 5e687f6..c642825 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -207,10 +207,10 @@ static RT_CHANNEL_PLAN_MAP	RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
 	{0x02, 0x1F},	/* 0x57, RT_CHANNEL_DOMAIN_FCC1_FCC10 */
 };
 
-static RT_CHANNEL_PLAN_MAP	RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02}; /* use the conbination for max channel numbers */
+ /* use the combination for max channel numbers */
+static RT_CHANNEL_PLAN_MAP RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02};
 
-/*
- * Search the @param ch in given @param ch_set
+/* Search the @param ch in given @param ch_set
  * @ch_set: the given channel set
  * @ch: the given channel number
  *
@@ -229,8 +229,7 @@ int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch)
 	return i;
 }
 
-/*
- * Check the @param ch is fit with setband setting of @param adapter
+/* Check the @param ch is fit with setband setting of @param adapter
  * @adapter: the given adapter
  * @ch: the given channel number
  *
@@ -2265,7 +2264,7 @@ inline struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
 
 /****************************************************************************
 
-Following are some TX fuctions for WiFi MLME
+Following are some TX functions for WiFi MLME
 
 *****************************************************************************/
 
@@ -2956,7 +2955,7 @@ int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
 	return ret;
 }
 
-/*  if psta == NULL, indiate we are station(client) now... */
+/*  if psta == NULL, indicate we are station(client) now... */
 void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
 {
 	struct xmit_frame			*pmgntframe;
@@ -3356,9 +3355,11 @@ void issue_assocreq(struct adapter *padapter)
 					(!memcmp(pIE->data, WPS_OUI, 4))) {
 				vs_ie_length = pIE->Length;
 				if ((!padapter->registrypriv.wifi_spec) && (!memcmp(pIE->data, WPS_OUI, 4))) {
-					/* Commented by Kurt 20110629 */
-					/* In some older APs, WPS handshake */
-					/* would be fail if we append vender extensions informations to AP */
+					/* Commented by Kurt 20110629
+					 * In some older APs, WPS handshake
+					 * would be fail if we append vendor
+					 * extensions information to AP
+					 */
 
 					vs_ie_length = 14;
 				}
@@ -3406,7 +3407,7 @@ void issue_assocreq(struct adapter *padapter)
 		rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
 }
 
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
 static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
 			   unsigned int power_mode, bool wait_ack)
 {
@@ -3481,7 +3482,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
 /*
  * [IMPORTANT] Don't call this function in interrupt context
  *
- * When wait_ms > 0, this function shoule be called at process context
+ * When wait_ms > 0, this function should be called at process context
  * da == NULL for station mode
  */
 int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
@@ -3493,7 +3494,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
 	struct sta_info *psta;
 
 
-	/* da == NULL, assum it's null data for sta to ap*/
+	/* da == NULL, assume it's null data for sta to ap*/
 	if (!da)
 		da = get_my_bssid(&(pmlmeinfo->network));
 
@@ -3558,14 +3559,14 @@ s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
 	pmlmeext = &padapter->mlmeextpriv;
 	pmlmeinfo = &pmlmeext->mlmext_info;
 
-	/* da == NULL, assum it's null data for sta to ap*/
+	/* da == NULL, assume it's null data for sta to ap*/
 	if (!da)
 		da = get_my_bssid(&(pmlmeinfo->network));
 
 	return _issue_nulldata(padapter, da, 0, false);
 }
 
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
 static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
 			       u16 tid, bool wait_ack)
 {
@@ -3644,7 +3645,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
 	return ret;
 }
 
-/* when wait_ms >0 , this function shoule be called at process context */
+/* when wait_ms >0 , this function should be called at process context */
 /* da == NULL for station mode */
 int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
 {
@@ -3653,7 +3654,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
 	struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
 	struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
 
-	/* da == NULL, assum it's null data for sta to ap*/
+	/* da == NULL, assume it's null data for sta to ap*/
 	if (!da)
 		da = get_my_bssid(&(pmlmeinfo->network));
 
@@ -4264,7 +4265,7 @@ unsigned int send_beacon(struct adapter *padapter)
 
 /****************************************************************************
 
-Following are some utitity fuctions for WiFi MLME
+Following are some utility functions for WiFi MLME
 
 *****************************************************************************/
 
@@ -4568,7 +4569,7 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
 	}
 	#endif
 
-	/*  mark bss info receving from nearby channel as SignalQuality 101 */
+	/*  mark bss info receiving from nearby channel as SignalQuality 101 */
 	if (bssid->Configuration.DSConfig != rtw_get_oper_ch(padapter))
 		bssid->PhyInfo.SignalQuality = 101;
 
@@ -4589,7 +4590,7 @@ void start_create_ibss(struct adapter *padapter)
 	/* update wireless mode */
 	update_wireless_mode(padapter);
 
-	/* udpate capability */
+	/* update capability */
 	caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
 	update_capinfo(padapter, caps);
 	if (caps&cap_IBSS) {/* adhoc master */
@@ -4644,7 +4645,7 @@ void start_clnt_join(struct adapter *padapter)
 	/* update wireless mode */
 	update_wireless_mode(padapter);
 
-	/* udpate capability */
+	/* update capability */
 	caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
 	update_capinfo(padapter, caps);
 	if (caps&cap_ESS) {
@@ -5379,8 +5380,7 @@ static void rtw_mlmeext_disconnect(struct adapter *padapter)
 
 	/* set_opmode_cmd(padapter, infra_client_with_mlme); */
 
-	/*
-	 * For safety, prevent from keeping macid sleep.
+	/* For safety, prevent from keeping macid sleep.
 	 * If we can sure all power mode enter/leave are paired,
 	 * this check can be removed.
 	 * Lucas@20131113
@@ -5441,7 +5441,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
 	/* turn on dynamic functions */
 	Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
 
-	/*  update IOT-releated issue */
+	/*  update IOT-related issue */
 	update_IOT_info(padapter);
 
 	rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
@@ -5449,7 +5449,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
 	/* BCN interval */
 	rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
 
-	/* udpate capability */
+	/* update capability */
 	update_capinfo(padapter, pmlmeinfo->capability);
 
 	/* WMM, Update EDCA param */
@@ -6385,7 +6385,9 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
 		Save_DM_Func_Flag(padapter);
 		Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
 
-		/* config the initial gain under scaning, need to write the BB registers */
+		/* config the initial gain under scanning, need to write the BB
+		 * registers
+		 */
 		initialgain = 0x1e;
 
 		rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.c b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
index beb4002..357802d 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
@@ -622,33 +622,3 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
 
 	pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
 }
-
-
-
-
-/* 3 ============================================================ */
-/* 3 IQ Calibration */
-/* 3 ============================================================ */
-
-u8 ODM_GetRightChnlPlaceforIQK(u8 chnl)
-{
-	u8 channel_all[ODM_TARGET_CHNL_NUM_2G_5G] = {
-		1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
-		36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
-		60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
-		114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
-		134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
-		161, 163, 165
-	};
-	u8 place = chnl;
-
-
-	if (chnl > 14) {
-		for (place = 14; place < sizeof(channel_all); place++) {
-			if (channel_all[place] == chnl)
-				return place-13;
-		}
-	}
-	return 0;
-
-}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.h b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
index 3d6f68b..643fcf3 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.h
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
@@ -44,12 +44,4 @@ void ODM_ClearTxPowerTrackingState(PDM_ODM_T pDM_Odm);
 
 void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter);
 
-
-
-#define ODM_TARGET_CHNL_NUM_2G_5G 59
-
-
-u8 ODM_GetRightChnlPlaceforIQK(u8 chnl);
-
-
 #endif	/*  #ifndef __HAL_PHY_RF_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 1ca9063..85ea535 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
+/*****************************************************************************
  *
  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
  *
@@ -82,7 +82,9 @@ static void setIqkMatrix_8723B(
 		/* if (RFPath == ODM_RF_PATH_A) */
 		switch (RFPath) {
 		case ODM_RF_PATH_A:
-			/* wirte new elements A, C, D to regC80 and regC94, element B is always 0 */
+			/* write new elements A, C, D to regC80 and regC94,
+			 * element B is always 0
+			 */
 			value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
 			PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, bMaskDWord, value32);
 
@@ -93,7 +95,9 @@ static void setIqkMatrix_8723B(
 			PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT24, value32);
 			break;
 		case ODM_RF_PATH_B:
-			/* wirte new elements A, C, D to regC88 and regC9C, element B is always 0 */
+			/* write new elements A, C, D to regC88 and regC9C,
+			 * element B is always 0
+			 */
 			value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
 			PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
 
@@ -166,7 +170,7 @@ void DoIQK_8723B(
 /*-----------------------------------------------------------------------------
  * Function:	odm_TxPwrTrackSetPwr88E()
  *
- * Overview:	88E change all channel tx power accordign to flag.
+ * Overview:	88E change all channel tx power according to flag.
  *			OFDM & CCK are all different.
  *
  * Input:		NONE
@@ -1788,7 +1792,7 @@ void PHY_IQCalibrate_8723B(
 	PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
 
 	s32 result[4][8];	/* last is final result */
-	u8 i, final_candidate, Indexforchannel;
+	u8 i, final_candidate;
 	bool bPathAOK, bPathBOK;
 	s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC, RegTmp = 0;
 	bool is12simular, is13simular, is23simular;
@@ -1993,17 +1997,14 @@ void PHY_IQCalibrate_8723B(
 			_PHY_PathBFillIQKMatrix8723B(padapter, bPathBOK, result, final_candidate, (RegEC4 == 0));
 	}
 
-	Indexforchannel = ODM_GetRightChnlPlaceforIQK(pHalData->CurrentChannel);
-
 /* To Fix BSOD when final_candidate is 0xff */
 /* by sherry 20120321 */
 	if (final_candidate < 4) {
 		for (i = 0; i < IQK_Matrix_REG_NUM; i++)
-			pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][i] = result[final_candidate][i];
-		pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].bIQKDone = true;
+			pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].Value[0][i] = result[final_candidate][i];
+		pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].bIQKDone = true;
 	}
-	/* RT_DISP(FINIT, INIT_IQK, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel)); */
-	ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,  ("\nIQK OK Indexforchannel %d.\n", Indexforchannel));
+	ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,  ("\nIQK OK Indexforchannel %d.\n", 0));
 
 	_PHY_SaveADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
 
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 66127f6..de8caa6 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -750,7 +750,7 @@ static void Hal_BT_EfusePowerSwitch(
 		rtw_write8(padapter, 0x6B, tempval);
 
 		/*  Attention!! Between 0x6A[14] and 0x6A[15] setting need 100us delay */
-		/*  So don't wirte 0x6A[14]= 1 and 0x6A[15]= 0 together! */
+		/*  So don't write 0x6A[14]= 1 and 0x6A[15]= 0 together! */
 		msleep(1);
 		/*  disable BT output isolation */
 		/*  0x6A[15] = 0 */
@@ -765,7 +765,7 @@ static void Hal_BT_EfusePowerSwitch(
 		rtw_write8(padapter, 0x6B, tempval);
 
 		/*  Attention!! Between 0x6A[14] and 0x6A[15] setting need 100us delay */
-		/*  So don't wirte 0x6A[14]= 1 and 0x6A[15]= 0 together! */
+		/*  So don't write 0x6A[14]= 1 and 0x6A[15]= 0 together! */
 
 		/*  disable BT power cut */
 		/*  0x6A[14] = 1 */
@@ -1231,7 +1231,7 @@ static u16 hal_EfuseGetCurrentSize_WiFi(
 	goto exit;
 
 error:
-	/*  report max size to prevent wirte efuse */
+	/*  report max size to prevent write efuse */
 	EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &efuse_addr, bPseudoTest);
 
 exit:
@@ -2237,7 +2237,7 @@ void rtl8723b_InitAntenna_Selection(struct adapter *padapter)
 	u8 val;
 
 	val = rtw_read8(padapter, REG_LEDCFG2);
-	/*  Let 8051 take control antenna settting */
+	/*  Let 8051 take control antenna setting */
 	val |= BIT(7); /*  DPDT_SEL_EN, 0x4C[23] */
 	rtw_write8(padapter, REG_LEDCFG2, val);
 }
@@ -3191,22 +3191,26 @@ static void rtl8723b_fill_default_txdesc(
 	if (bmcst)
 		ptxdesc->bmc = 1;
 
-	/*  2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
-	/*  (1) The sequence number of each non-Qos frame / broadcast / multicast / */
-	/*  mgnt frame should be controled by Hw because Fw will also send null data */
-	/*  which we cannot control when Fw LPS enable. */
-	/*  --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
-	/*  (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
-	/*  (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
-	/*  2010.06.23. Added by tynli. */
+	/* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS.
+	 * (1) The sequence number of each non-Qos frame / broadcast /
+	 * multicast / mgnt frame should be controlled by Hw because Fw
+	 * will also send null data which we cannot control when Fw LPS
+	 * enable.
+	 * --> default enable non-Qos data sequense number. 2010.06.23.
+	 * by tynli.
+	 * (2) Enable HW SEQ control for beacon packet, because we use
+	 * Hw beacon.
+	 * (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos
+	 * packets.
+	 * 2010.06.23. Added by tynli.
+	 */
 	if (!pattrib->qos_en) /*  Hw set sequence number */
 		ptxdesc->en_hwseq = 1; /*  HWSEQ_EN */
 }
 
-/*
- *Description:
+/* Description:
  *
- *Parameters:
+ * Parameters:
  *	pxmitframe	xmitframe
  *	pbuf		where to fill tx desc
  */
@@ -3543,7 +3547,7 @@ static void hw_var_set_mlme_sitesurvey(struct adapter *padapter, u8 variable, u8
 			rtw_write8(padapter, reg_bcn_ctl, val8);
 		}
 
-		/*  Save orignal RRSR setting. */
+		/*  Save original RRSR setting. */
 		pHalData->RegRRSR = rtw_read16(padapter, REG_RRSR);
 	} else {
 		/*  sitesurvey done */
@@ -3561,7 +3565,7 @@ static void hw_var_set_mlme_sitesurvey(struct adapter *padapter, u8 variable, u8
 		value_rcr |= rcr_clear_bit;
 		rtw_write32(padapter, REG_RCR, value_rcr);
 
-		/*  Restore orignal RRSR setting. */
+		/*  Restore original RRSR setting. */
 		rtw_write16(padapter, REG_RRSR, pHalData->RegRRSR);
 	}
 }
@@ -4329,8 +4333,7 @@ void GetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
 	}
 }
 
-/*
- *Description:
+/* Description:
  *	Change default setting of specified variable.
  */
 u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval)
@@ -4348,8 +4351,7 @@ u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, v
 	return bResult;
 }
 
-/*
- *Description:
+/* Description:
  *	Query setting of specified variable.
  */
 u8 GetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval)
diff --git a/drivers/staging/rts5208/Makefile b/drivers/staging/rts5208/Makefile
index 6a934c4..3c9e979 100644
--- a/drivers/staging/rts5208/Makefile
+++ b/drivers/staging/rts5208/Makefile
@@ -1,7 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_RTS5208) := rts5208.o
 
-ccflags-y := -Idrivers/scsi
-
 rts5208-y := rtsx.o rtsx_chip.o rtsx_transport.o rtsx_scsi.o \
 	rtsx_card.o general.o sd.o xd.o ms.o spi.o
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index fbb42e5..be0053c 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -831,7 +831,8 @@ static int rtsx_probe(struct pci_dev *pci,
 	host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
 	if (!host) {
 		dev_err(&pci->dev, "Unable to allocate the scsi host\n");
-		return -ENOMEM;
+		err = -ENOMEM;
+		goto scsi_host_alloc_fail;
 	}
 
 	dev = host_to_rtsx(host);
@@ -971,7 +972,8 @@ static int rtsx_probe(struct pci_dev *pci,
 	kfree(dev->chip);
 chip_alloc_fail:
 	dev_err(&pci->dev, "%s failed\n", __func__);
-
+scsi_host_alloc_fail:
+	pci_release_regions(pci);
 	return err;
 }
 
@@ -983,6 +985,7 @@ static void rtsx_remove(struct pci_dev *pci)
 
 	quiesce_and_remove_host(dev);
 	release_everything(dev);
+	pci_release_regions(pci);
 }
 
 /* PCI IDs */
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index afe43fa..54d9e2f 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -13,5 +13,5 @@
 obj-$(CONFIG_SND_BCM2835)	+= bcm2835-audio/
 obj-$(CONFIG_VIDEO_BCM2835)	+= bcm2835-camera/
 
-ccflags-y += -Idrivers/staging/vc04_services -D__VCCOREVER__=0x04000000
+ccflags-y += -D__VCCOREVER__=0x04000000
 
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index 826016c..3348518 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -193,17 +193,6 @@ static int snd_bcm2835_playback_close(struct snd_pcm_substream *substream)
 	return 0;
 }
 
-static int snd_bcm2835_pcm_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
-}
-
-static int snd_bcm2835_pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	return snd_pcm_lib_free_pages(substream);
-}
-
 static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
 {
 	struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
@@ -316,9 +305,6 @@ snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream)
 static const struct snd_pcm_ops snd_bcm2835_playback_ops = {
 	.open = snd_bcm2835_playback_open,
 	.close = snd_bcm2835_playback_close,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = snd_bcm2835_pcm_hw_params,
-	.hw_free = snd_bcm2835_pcm_hw_free,
 	.prepare = snd_bcm2835_pcm_prepare,
 	.trigger = snd_bcm2835_pcm_trigger,
 	.pointer = snd_bcm2835_pcm_pointer,
@@ -328,9 +314,6 @@ static const struct snd_pcm_ops snd_bcm2835_playback_ops = {
 static const struct snd_pcm_ops snd_bcm2835_playback_spdif_ops = {
 	.open = snd_bcm2835_playback_spdif_open,
 	.close = snd_bcm2835_playback_close,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = snd_bcm2835_pcm_hw_params,
-	.hw_free = snd_bcm2835_pcm_hw_free,
 	.prepare = snd_bcm2835_pcm_prepare,
 	.trigger = snd_bcm2835_pcm_trigger,
 	.pointer = snd_bcm2835_pcm_pointer,
@@ -362,7 +345,7 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
 			spdif ? &snd_bcm2835_playback_spdif_ops :
 			&snd_bcm2835_playback_ops);
 
-	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
 		chip->card->dev, 128 * 1024, 128 * 1024);
 
 	if (spdif)
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index beb6a00..1ef31a9 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -60,6 +60,9 @@ MODULE_PARM_DESC(max_video_width, "Threshold for video mode");
 module_param(max_video_height, int, 0644);
 MODULE_PARM_DESC(max_video_height, "Threshold for video mode");
 
+/* camera instance counter */
+static atomic_t camera_instance = ATOMIC_INIT(0);
+
 /* global device data array */
 static struct bm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
 
@@ -1870,7 +1873,6 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
 
 		/* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
 		mutex_init(&dev->mutex);
-		dev->camera_num = camera;
 		dev->max_width = resolutions[camera][0];
 		dev->max_height = resolutions[camera][1];
 
@@ -1886,8 +1888,9 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
 		dev->capture.fmt = &formats[3]; /* JPEG */
 
 		/* v4l device registration */
-		snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
-			 "%s", BM2835_MMAL_MODULE_NAME);
+		dev->camera_num = v4l2_device_set_name(&dev->v4l2_dev,
+						       BM2835_MMAL_MODULE_NAME,
+						       &camera_instance);
 		ret = v4l2_device_register(NULL, &dev->v4l2_dev);
 		if (ret) {
 			dev_err(&pdev->dev, "%s: could not register V4L2 device: %d\n",
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index 56b1037..ff2b960 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -4,8 +4,8 @@
 #ifndef VCHI_H_
 #define VCHI_H_
 
-#include "interface/vchi/vchi_cfg.h"
-#include "interface/vchi/vchi_common.h"
+#include "vchi_cfg.h"
+#include "vchi_common.h"
 
 /******************************************************************************
  * Global defs
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 0ce3b08..efdd3b1 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -3,7 +3,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 
-#include "interface/vchi/vchi.h"
+#include "../vchi/vchi.h"
 #include "vchiq.h"
 #include "vchiq_core.h"
 
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index fb2855e..d6ca6e5 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -758,7 +758,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
  */
 bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH)
 {
-	bool ret = true;
+	bool ret;
 	unsigned char byPwr = 0;
 	unsigned char byDec = 0;
 
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 4e651b6..f18e059 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -381,8 +381,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
 
 	dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
 
-	if ((priv->rf_type == RF_AL2230) ||
-	    (priv->rf_type == RF_AL2230S)) {
+	if (priv->rf_type == RF_AL2230 ||
+	    priv->rf_type == RF_AL2230S) {
 		priv->bb_rx_conf = vnt_vt3184_al2230[10];
 		length = sizeof(vnt_vt3184_al2230);
 		addr = vnt_vt3184_al2230;
@@ -461,8 +461,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
 	if (ret)
 		goto end;
 
-	if ((priv->rf_type == RF_VT3226) ||
-	    (priv->rf_type == RF_VT3342A0)) {
+	if (priv->rf_type == RF_VT3226 ||
+	    priv->rf_type == RF_VT3342A0) {
 		ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
 					 MAC_REG_ITRTMSET, 0x23);
 		if (ret)
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 50e1c89..fe6c112 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -52,6 +52,8 @@
 #define RATE_AUTO	12
 
 #define MAX_RATE			12
+#define VNT_B_RATES	(BIT(RATE_1M) | BIT(RATE_2M) |\
+			BIT(RATE_5M) | BIT(RATE_11M))
 
 /*
  * device specific
@@ -204,6 +206,22 @@ enum {
 	CONTEXT_BEACON_PACKET
 };
 
+struct vnt_rx_header {
+	u32 wbk_status;
+	u8 rx_sts;
+	u8 rx_rate;
+	u16 pay_load_len;
+} __packed;
+
+struct vnt_rx_tail {
+	__le64 tsf_time;
+	u8 sq;
+	u8 new_rsr;
+	u8 rssi;
+	u8 rsr;
+	u8 sq_3;
+} __packed;
+
 /* RCB (Receive Control Block) */
 struct vnt_rcb {
 	void *priv;
@@ -262,7 +280,6 @@ struct vnt_private {
 	struct usb_interface *intf;
 
 	u64 tsf_time;
-	u8 rx_rate;
 
 	u32 rx_buf_sz;
 	int mc_list_count;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 3b94e80f..821aae8 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -29,27 +29,21 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
 	struct ieee80211_hw *hw = priv->hw;
 	struct ieee80211_supported_band *sband;
 	struct sk_buff *skb;
-	struct ieee80211_rx_status rx_status = { 0 };
-	struct ieee80211_hdr *hdr;
-	__le16 fc;
-	u8 *rsr, *new_rsr, *rssi;
-	__le64 *tsf_time;
+	struct ieee80211_rx_status *rx_status;
+	struct vnt_rx_header *head;
+	struct vnt_rx_tail *tail;
 	u32 frame_size;
-	int ii, r;
-	u8 *rx_rate, *sq, *sq_3;
-	u32 wbk_status;
-	u8 *skb_data;
-	u16 *pay_load_len;
-	u16 pay_load_with_padding;
+	int ii;
+	u16 rx_bitrate, pay_load_with_padding;
 	u8 rate_idx = 0;
-	u8 rate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
 	long rx_dbm;
 
 	skb = ptr_rcb->skb;
+	rx_status = IEEE80211_SKB_RXCB(skb);
 
 	/* [31:16]RcvByteCount ( not include 4-byte Status ) */
-	wbk_status = *((u32 *)(skb->data));
-	frame_size = wbk_status >> 16;
+	head = (struct vnt_rx_header *)skb->data;
+	frame_size = head->wbk_status >> 16;
 	frame_size += 4;
 
 	if (bytes_received != frame_size) {
@@ -63,106 +57,66 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
 		return false;
 	}
 
-	skb_data = (u8 *)skb->data;
-
-	rx_rate = skb_data + 5;
-
 	/* real Frame Size = USBframe_size -4WbkStatus - 4RxStatus */
 	/* -8TSF - 4RSR - 4SQ3 - ?Padding */
 
 	/* if SQ3 the range is 24~27, if no SQ3 the range is 20~23 */
 
-	pay_load_len = (u16 *)(skb_data + 6);
-
 	/*Fix hardware bug => PLCP_Length error */
-	if (((bytes_received - (*pay_load_len)) > 27) ||
-	    ((bytes_received - (*pay_load_len)) < 24) ||
-	    (bytes_received < (*pay_load_len))) {
+	if (((bytes_received - head->pay_load_len) > 27) ||
+	    ((bytes_received - head->pay_load_len) < 24) ||
+	    (bytes_received < head->pay_load_len)) {
 		dev_dbg(&priv->usb->dev, "Wrong PLCP Length %x\n",
-			*pay_load_len);
+			head->pay_load_len);
 		return false;
 	}
 
 	sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
-
-	for (r = RATE_1M; r < MAX_RATE; r++) {
-		if (*rx_rate == rate[r])
-			break;
-	}
-
-	priv->rx_rate = r;
+	rx_bitrate = head->rx_rate * 5; /* rx_rate * 5 */
 
 	for (ii = 0; ii < sband->n_bitrates; ii++) {
-		if (sband->bitrates[ii].hw_value == r) {
+		if (sband->bitrates[ii].bitrate == rx_bitrate) {
 			rate_idx = ii;
 				break;
 		}
 	}
 
 	if (ii == sband->n_bitrates) {
-		dev_dbg(&priv->usb->dev, "Wrong RxRate %x\n", *rx_rate);
+		dev_dbg(&priv->usb->dev, "Wrong Rx Bit Rate %d\n", rx_bitrate);
 		return false;
 	}
 
-	pay_load_with_padding = ((*pay_load_len / 4) +
-		((*pay_load_len % 4) ? 1 : 0)) * 4;
+	pay_load_with_padding = ((head->pay_load_len / 4) +
+		((head->pay_load_len % 4) ? 1 : 0)) * 4;
 
-	tsf_time = (__le64 *)(skb_data + 8 + pay_load_with_padding);
+	tail = (struct vnt_rx_tail *)(skb->data +
+				      sizeof(*head) + pay_load_with_padding);
+	priv->tsf_time = le64_to_cpu(tail->tsf_time);
 
-	priv->tsf_time = le64_to_cpu(*tsf_time);
-
-	if (priv->bb_type == BB_TYPE_11G) {
-		sq_3 = skb_data + 8 + pay_load_with_padding + 12;
-		sq = sq_3;
-	} else {
-		sq = skb_data + 8 + pay_load_with_padding + 8;
-		sq_3 = sq;
-	}
-
-	new_rsr = skb_data + 8 + pay_load_with_padding + 9;
-	rssi = skb_data + 8 + pay_load_with_padding + 10;
-
-	rsr = skb_data + 8 + pay_load_with_padding + 11;
-	if (*rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
+	if (tail->rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
 		return false;
 
-	frame_size = *pay_load_len;
-
-	vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
+	vnt_rf_rssi_to_dbm(priv, tail->rssi, &rx_dbm);
 
 	priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
 	priv->current_rssi = priv->bb_pre_ed_rssi;
 
-	skb_pull(skb, 8);
-	skb_trim(skb, frame_size);
+	skb_pull(skb, sizeof(*head));
+	skb_trim(skb, head->pay_load_len);
 
-	rx_status.mactime = priv->tsf_time;
-	rx_status.band = hw->conf.chandef.chan->band;
-	rx_status.signal = rx_dbm;
-	rx_status.flag = 0;
-	rx_status.freq = hw->conf.chandef.chan->center_freq;
+	rx_status->mactime = priv->tsf_time;
+	rx_status->band = hw->conf.chandef.chan->band;
+	rx_status->signal = rx_dbm;
+	rx_status->flag = 0;
+	rx_status->freq = hw->conf.chandef.chan->center_freq;
 
-	if (!(*rsr & RSR_CRCOK))
-		rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
+	if (!(tail->rsr & RSR_CRCOK))
+		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
 
-	hdr = (struct ieee80211_hdr *)(skb->data);
-	fc = hdr->frame_control;
+	rx_status->rate_idx = rate_idx;
 
-	rx_status.rate_idx = rate_idx;
-
-	if (ieee80211_has_protected(fc)) {
-		if (priv->local_id > REV_ID_VT3253_A1) {
-			rx_status.flag |= RX_FLAG_DECRYPTED;
-
-			/* Drop packet */
-			if (!(*new_rsr & NEWRSR_DECRYPTOK)) {
-				dev_kfree_skb(skb);
-				return true;
-			}
-		}
-	}
-
-	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+	if (tail->new_rsr & NEWRSR_DECRYPTOK)
+		rx_status->flag |= RX_FLAG_DECRYPTED;
 
 	ieee80211_rx_irqsafe(priv->hw, skb);
 
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index 60a00af..70358d4 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -30,7 +30,6 @@ int vnt_download_firmware(struct vnt_private *priv)
 {
 	struct device *dev = &priv->usb->dev;
 	const struct firmware *fw;
-	void *buffer = NULL;
 	u16 length;
 	int ii;
 	int ret = 0;
@@ -44,26 +43,17 @@ int vnt_download_firmware(struct vnt_private *priv)
 		goto end;
 	}
 
-	buffer = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
-	if (!buffer) {
-		ret = -ENOMEM;
-		goto free_fw;
-	}
-
 	for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
 		length = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
-		memcpy(buffer, fw->data + ii, length);
 
 		ret = vnt_control_out(priv, 0, 0x1200 + ii, 0x0000, length,
-				      buffer);
+				      fw->data + ii);
 		if (ret)
-			goto free_buffer;
+			goto free_fw;
 
 		dev_dbg(dev, "Download firmware...%d %zu\n", ii, fw->size);
 	}
 
-free_buffer:
-	kfree(buffer);
 free_fw:
 	release_firmware(fw);
 end:
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index f409479..af21586 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -99,9 +99,11 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
 
 	info->status.rates[0].count = tx_retry;
 
-	if (!(tsr & (TSR_TMO | TSR_RETRYTMO))) {
+	if (!(tsr & TSR_TMO)) {
 		info->status.rates[0].idx = idx;
-		info->flags |= IEEE80211_TX_STAT_ACK;
+
+		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+			info->flags |= IEEE80211_TX_STAT_ACK;
 	}
 
 	ieee80211_tx_status_irqsafe(priv->hw, context->skb);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 9cb924c..5e48b3dd 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1015,6 +1015,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
 	ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
 	ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
 	ieee80211_hw_set(priv->hw, SUPPORTS_PS);
+	ieee80211_hw_set(priv->hw, PS_NULLFUNC_STACK);
 
 	priv->hw->max_signal = 100;
 
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index f9020a4..29caba7 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -278,11 +278,9 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
 			  PK_TYPE_11B, &buf->b);
 
 	/* Get Duration and TimeStamp */
-	if (ieee80211_is_pspoll(hdr->frame_control)) {
-		__le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-
-		buf->duration_a = dur;
-		buf->duration_b = dur;
+	if (ieee80211_is_nullfunc(hdr->frame_control)) {
+		buf->duration_a = hdr->duration_id;
+		buf->duration_b = hdr->duration_id;
 	} else {
 		buf->duration_a = vnt_get_duration_le(priv,
 						tx_context->pkt_type, need_ack);
@@ -371,10 +369,8 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
 			  tx_context->pkt_type, &buf->ab);
 
 	/* Get Duration and TimeStampOff */
-	if (ieee80211_is_pspoll(hdr->frame_control)) {
-		__le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-
-		buf->duration = dur;
+	if (ieee80211_is_nullfunc(hdr->frame_control)) {
+		buf->duration = hdr->duration_id;
 	} else {
 		buf->duration = vnt_get_duration_le(priv, tx_context->pkt_type,
 						    need_ack);
@@ -815,10 +811,14 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 		if (info->band == NL80211_BAND_5GHZ) {
 			pkt_type = PK_TYPE_11A;
 		} else {
-			if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
-				pkt_type = PK_TYPE_11GB;
-			else
-				pkt_type = PK_TYPE_11GA;
+			if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+				if (priv->basic_rates & VNT_B_RATES)
+					pkt_type = PK_TYPE_11GB;
+				else
+					pkt_type = PK_TYPE_11GA;
+			} else {
+				pkt_type = PK_TYPE_11A;
+			}
 		}
 	} else {
 		pkt_type = PK_TYPE_11B;
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index d977d47..7bfccc4 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -34,7 +34,7 @@
 #define USB_CTL_WAIT	500 /* ms */
 
 int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
-		    u16 index, u16 length, u8 *buffer)
+		    u16 index, u16 length, const u8 *buffer)
 {
 	int ret = 0;
 	u8 *usb_buffer;
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index b65d9c0..4e3341b 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -21,7 +21,7 @@
 #define VNT_REG_BLOCK_SIZE	64
 
 int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
-		    u16 index, u16 length, u8 *buffer);
+		    u16 index, u16 length, const u8 *buffer);
 int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
 		   u16 index, u16 length,  u8 *buffer);
 
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
index e447722..efcb7c6 100644
--- a/drivers/staging/wfx/TODO
+++ b/drivers/staging/wfx/TODO
@@ -1,17 +1,60 @@
 This is a list of things that need to be done to get this driver out of the
 staging directory.
 
-  - I have to take a decision about secure link support. I can:
-      - drop completely
-      - keep it in an external patch (my preferred option)
-      - replace call to mbedtls with kernel crypto API (necessitate a
-        bunch of work)
-      - pull mbedtls in kernel (non-realistic)
+  - All structures defined in hif_api_*.h are intended to sent/received to/from
+    hardware. All their members whould be declared __le32 or __le16.
+    See:
+       https://lore.kernel.org/lkml/20191111202852.GX26530@ZenIV.linux.org.uk
 
-  - mac80211 interface does not (yet) have expected quality to be placed
-    outside of staging:
-      - Some processings are redundant with mac80211 ones
-      - Many members from wfx_dev/wfx_vif can be retrieved from mac80211
-        structures
-      - Some functions are too complex
-      - ...
+  - Once previous item done, it will be possible to audit the driver with
+    `sparse'. It will probably find tons of problems with big endian
+    architectures.
+
+  - hif_api_*.h whave been imported from firmware code. Some of the structures
+    are never used in driver.
+
+  - Driver try to maintains power save status of the stations. However, this
+    work is already done by mac80211. sta_asleep_mask and pspoll_mask should be
+    dropped.
+
+  - wfx_tx_queues_get() should be reworked. It currently try compute itself the
+    QoS policy. However, firmware already do the job. Firmware would prefer to
+    have a few packets in each queue and be able to choose itself which queue to
+    use.
+
+  - As suggested by Felix, rate control could be improved following this idea:
+        https://lore.kernel.org/lkml/3099559.gv3Q75KnN1@pc-42/
+
+  - When driver is about to loose BSS, it forge its own Null Func request (see
+    wfx_cqm_bssloss_sm()). It should use mechanism provided by mac80211.
+
+  - AP is actually is setup after a call to wfx_bss_info_changed(). Yet,
+    ieee80211_ops provide callback start_ap().
+
+  - The current process for joining a network is incredibly complex. Should be
+    reworked.
+
+  - Monitoring mode is not implemented despite being mandatory by mac80211.
+
+  - "compatible" value are not correct. They should be "vendor,chip". See:
+       https://lore.kernel.org/driverdev-devel/5226570.CMH5hVlZcI@pc-42
+
+  - The "state" field from wfx_vif should be replaced by "vif->type".
+
+  - It seems that wfx_upload_keys() is useless.
+
+  - "event_queue" from wfx_vif seems overkill. These event are rare and they
+     probably could be handled in a simpler fashion.
+
+  - Feature called "secure link" should be either developed (using kernel
+    crypto API) or dropped.
+
+  - In wfx_cmd_send(), "async" allow to send command without waiting the reply.
+    It may help in some situation, but it is not yet used. In add, it may cause
+    some trouble:
+      https://lore.kernel.org/driverdev-devel/alpine.DEB.2.21.1910041317381.2992@hadrien/
+    So, fix it (by replacing the mutex with a semaphore) or drop it.
+
+  - Chip support P2P, but driver does not implement it.
+
+  - Chip support kind of Mesh, but driver does not implement it.
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index 2432ba9..983c41d 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -271,8 +271,7 @@ static void bh_work(struct work_struct *work)
 
 	if (last_op_is_rx)
 		ack_sdio_data(wdev);
-	if (!wdev->hif.tx_buffers_used && !work_pending(work) &&
-	    !atomic_read(&wdev->scan_in_progress)) {
+	if (!wdev->hif.tx_buffers_used && !work_pending(work)) {
 		device_release(wdev);
 		release_chip = true;
 	}
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index ab0cda1..40bc330 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -107,6 +107,8 @@ static int wfx_spi_copy_to_io(void *priv, unsigned int addr,
 
 	cpu_to_le16s(&regaddr);
 
+	// Register address and CONFIG content always use 16bit big endian
+	// ("BADC" order)
 	if (bus->need_swab)
 		swab16s(&regaddr);
 	if (bus->need_swab && addr == WFX_REG_CONFIG)
@@ -183,7 +185,7 @@ static int wfx_spi_probe(struct spi_device *func)
 	if (func->bits_per_word != 16 && func->bits_per_word != 8)
 		dev_warn(&func->dev, "unusual bits/word value: %d\n",
 			 func->bits_per_word);
-	if (func->max_speed_hz > 49000000)
+	if (func->max_speed_hz > 50000000)
 		dev_warn(&func->dev, "%dHz is a very high speed\n",
 			 func->max_speed_hz);
 
@@ -223,8 +225,7 @@ static int wfx_spi_probe(struct spi_device *func)
 	return ret;
 }
 
-/* Disconnect Function to be called by SPI stack when device is disconnected */
-static int wfx_spi_disconnect(struct spi_device *func)
+static int wfx_spi_remove(struct spi_device *func)
 {
 	struct wfx_spi_priv *bus = spi_get_drvdata(func);
 
@@ -263,5 +264,5 @@ struct spi_driver wfx_spi_driver = {
 	},
 	.id_table = wfx_spi_id,
 	.probe = wfx_spi_probe,
-	.remove = wfx_spi_disconnect,
+	.remove = wfx_spi_remove,
 };
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index e7fcce8..5d19845 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -13,42 +13,9 @@
 #include "bh.h"
 #include "sta.h"
 
-static int wfx_handle_pspoll(struct wfx_vif *wvif, struct sk_buff *skb)
-{
-	struct ieee80211_sta *sta;
-	struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
-	int link_id = 0;
-	u32 pspoll_mask = 0;
-	int i;
-
-	if (wvif->state != WFX_STATE_AP)
-		return 1;
-	if (!ether_addr_equal(wvif->vif->addr, pspoll->bssid))
-		return 1;
-
-	rcu_read_lock();
-	sta = ieee80211_find_sta(wvif->vif, pspoll->ta);
-	if (sta)
-		link_id = ((struct wfx_sta_priv *)&sta->drv_priv)->link_id;
-	rcu_read_unlock();
-	if (link_id)
-		pspoll_mask = BIT(link_id);
-	else
-		return 1;
-
-	wvif->pspoll_mask |= pspoll_mask;
-	/* Do not report pspols if data for given link id is queued already. */
-	for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
-		if (wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
-						pspoll_mask)) {
-			wfx_bh_request_tx(wvif->wdev);
-			return 1;
-		}
-	}
-	return 0;
-}
-
-static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, struct sk_buff *skb)
+static int wfx_drop_encrypt_data(struct wfx_dev *wdev,
+				 const struct hif_ind_rx *arg,
+				 struct sk_buff *skb)
 {
 	struct ieee80211_hdr *frame = (struct ieee80211_hdr *) skb->data;
 	size_t hdrlen = ieee80211_hdrlen(frame->frame_control);
@@ -98,15 +65,12 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, s
 
 }
 
-void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
-	       struct sk_buff *skb)
+void wfx_rx_cb(struct wfx_vif *wvif,
+	       const struct hif_ind_rx *arg, struct sk_buff *skb)
 {
-	int link_id = arg->rx_flags.peer_sta_id;
 	struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
 	struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
-	struct wfx_link_entry *entry = NULL;
-	bool early_data = false;
 
 	memset(hdr, 0, sizeof(*hdr));
 
@@ -116,14 +80,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
 	     ieee80211_is_beacon(frame->frame_control)))
 		goto drop;
 
-	if (link_id && link_id <= WFX_MAX_STA_IN_AP_MODE) {
-		entry = &wvif->link_id_db[link_id - 1];
-		entry->timestamp = jiffies;
-		if (entry->status == WFX_LINK_SOFT &&
-		    ieee80211_is_data(frame->frame_control))
-			early_data = true;
-	}
-
 	if (arg->status == HIF_STATUS_MICFAILURE)
 		hdr->flag |= RX_FLAG_MMIC_ERROR;
 	else if (arg->status)
@@ -134,10 +90,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
 		goto drop;
 	}
 
-	if (ieee80211_is_pspoll(frame->frame_control))
-		if (wfx_handle_pspoll(wvif, skb))
-			goto drop;
-
 	hdr->band = NL80211_BAND_2GHZ;
 	hdr->freq = ieee80211_channel_to_frequency(arg->channel_number,
 						   hdr->band);
@@ -171,20 +123,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
 	    !arg->status && wvif->vif &&
 	    ether_addr_equal(ieee80211_get_SA(frame),
 			     wvif->vif->bss_conf.bssid)) {
-		const u8 *tim_ie;
-		u8 *ies = mgmt->u.beacon.variable;
-		size_t ies_len = skb->len - (ies - skb->data);
-
-		tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
-		if (tim_ie) {
-			struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2];
-
-			if (wvif->dtim_period != tim->dtim_period) {
-				wvif->dtim_period = tim->dtim_period;
-				schedule_work(&wvif->set_beacon_wakeup_period_work);
-			}
-		}
-
 		/* Disable beacon filter once we're associated... */
 		if (wvif->disable_beacon_filter &&
 		    (wvif->vif->bss_conf.assoc ||
@@ -193,18 +131,7 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
 			schedule_work(&wvif->update_filtering_work);
 		}
 	}
-
-	if (early_data) {
-		spin_lock_bh(&wvif->ps_state_lock);
-		/* Double-check status with lock held */
-		if (entry->status == WFX_LINK_SOFT)
-			skb_queue_tail(&entry->rx_queue, skb);
-		else
-			ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
-		spin_unlock_bh(&wvif->ps_state_lock);
-	} else {
-		ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
-	}
+	ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
 
 	return;
 
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
index a50ce35..61c28bf 100644
--- a/drivers/staging/wfx/data_rx.h
+++ b/drivers/staging/wfx/data_rx.h
@@ -13,7 +13,7 @@
 struct wfx_vif;
 struct sk_buff;
 
-void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
-	       struct sk_buff *skb);
+void wfx_rx_cb(struct wfx_vif *wvif,
+	       const struct hif_ind_rx *arg, struct sk_buff *skb);
 
 #endif /* WFX_DATA_RX_H */
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index b13d734..20f4740 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -17,7 +17,6 @@
 #include "hif_tx_mib.h"
 
 #define WFX_INVALID_RATE_ID    15
-#define WFX_LINK_ID_NO_ASSOC   15
 #define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
 
 static int wfx_get_hw_rate(struct wfx_dev *wdev,
@@ -169,7 +168,8 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
 	wfx_tx_policy_build(wvif, &wanted, rates);
 
 	spin_lock_bh(&cache->lock);
-	if (WARN_ON(list_empty(&cache->free))) {
+	if (list_empty(&cache->free)) {
+		WARN(1, "unable to get a valid Tx policy");
 		spin_unlock_bh(&cache->lock);
 		return WFX_INVALID_RATE_ID;
 	}
@@ -216,38 +216,25 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
 
 static int wfx_tx_policy_upload(struct wfx_vif *wvif)
 {
+	struct tx_policy *policies = wvif->tx_policy_cache.cache;
+	u8 tmp_rates[12];
 	int i;
-	struct tx_policy_cache *cache = &wvif->tx_policy_cache;
-	struct hif_mib_set_tx_rate_retry_policy *arg =
-		kzalloc(struct_size(arg,
-				    tx_rate_retry_policy,
-				    HIF_MIB_NUM_TX_RATE_RETRY_POLICIES),
-			GFP_KERNEL);
-	struct hif_mib_tx_rate_retry_policy *dst;
 
-	spin_lock_bh(&cache->lock);
-	/* Upload only modified entries. */
-	for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) {
-		struct tx_policy *src = &cache->cache[i];
-
-		if (!src->uploaded && memzcmp(src->rates, sizeof(src->rates))) {
-			dst = arg->tx_rate_retry_policy +
-				arg->num_tx_rate_policies;
-
-			dst->policy_index = i;
-			dst->short_retry_count = 255;
-			dst->long_retry_count = 255;
-			dst->first_rate_sel = 1;
-			dst->terminate = 1;
-			dst->count_init = 1;
-			memcpy(&dst->rates, src->rates, sizeof(src->rates));
-			src->uploaded = true;
-			arg->num_tx_rate_policies++;
+	do {
+		spin_lock_bh(&wvif->tx_policy_cache.lock);
+		for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+			if (!policies[i].uploaded &&
+			    memzcmp(policies[i].rates, sizeof(policies[i].rates)))
+				break;
+		if (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES) {
+			policies[i].uploaded = 1;
+			memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
+			spin_unlock_bh(&wvif->tx_policy_cache.lock);
+			hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
+		} else {
+			spin_unlock_bh(&wvif->tx_policy_cache.lock);
 		}
-	}
-	spin_unlock_bh(&cache->lock);
-	hif_set_tx_rate_retry_policy(wvif, arg);
-	kfree(arg);
+	} while (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES);
 	return 0;
 }
 
@@ -277,164 +264,6 @@ void wfx_tx_policy_init(struct wfx_vif *wvif)
 		list_add(&cache->cache[i].link, &cache->free);
 }
 
-/* Link ID related functions */
-
-static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
-{
-	int i, ret = 0;
-	unsigned long max_inactivity = 0;
-	unsigned long now = jiffies;
-
-	spin_lock_bh(&wvif->ps_state_lock);
-	for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
-		if (!wvif->link_id_db[i].status) {
-			ret = i + 1;
-			break;
-		} else if (wvif->link_id_db[i].status != WFX_LINK_HARD &&
-			   !wvif->wdev->tx_queue_stats.link_map_cache[i + 1]) {
-			unsigned long inactivity =
-				now - wvif->link_id_db[i].timestamp;
-
-			if (inactivity < max_inactivity)
-				continue;
-			max_inactivity = inactivity;
-			ret = i + 1;
-		}
-	}
-
-	if (ret) {
-		struct wfx_link_entry *entry = &wvif->link_id_db[ret - 1];
-
-		entry->status = WFX_LINK_RESERVE;
-		ether_addr_copy(entry->mac, mac);
-		memset(&entry->buffered, 0, WFX_MAX_TID);
-		skb_queue_head_init(&entry->rx_queue);
-		wfx_tx_lock(wvif->wdev);
-
-		if (!schedule_work(&wvif->link_id_work))
-			wfx_tx_unlock(wvif->wdev);
-	} else {
-		dev_info(wvif->wdev->dev, "no more link-id available\n");
-	}
-	spin_unlock_bh(&wvif->ps_state_lock);
-	return ret;
-}
-
-int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac)
-{
-	int i, ret = 0;
-
-	spin_lock_bh(&wvif->ps_state_lock);
-	for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
-		if (ether_addr_equal(mac, wvif->link_id_db[i].mac) &&
-		    wvif->link_id_db[i].status) {
-			wvif->link_id_db[i].timestamp = jiffies;
-			ret = i + 1;
-			break;
-		}
-	}
-	spin_unlock_bh(&wvif->ps_state_lock);
-	return ret;
-}
-
-static int wfx_map_link(struct wfx_vif *wvif,
-			struct wfx_link_entry *link_entry, int sta_id)
-{
-	int ret;
-
-	ret = hif_map_link(wvif, link_entry->mac, 0, sta_id);
-
-	if (ret == 0)
-		/* Save the MAC address currently associated with the peer
-		 * for future unmap request
-		 */
-		ether_addr_copy(link_entry->old_mac, link_entry->mac);
-
-	return ret;
-}
-
-int wfx_unmap_link(struct wfx_vif *wvif, int sta_id)
-{
-	u8 *mac_addr = NULL;
-
-	if (sta_id)
-		mac_addr = wvif->link_id_db[sta_id - 1].old_mac;
-
-	return hif_map_link(wvif, mac_addr, 1, sta_id);
-}
-
-void wfx_link_id_gc_work(struct work_struct *work)
-{
-	struct wfx_vif *wvif =
-		container_of(work, struct wfx_vif, link_id_gc_work.work);
-	unsigned long now = jiffies;
-	unsigned long next_gc = -1;
-	long ttl;
-	u32 mask;
-	int i;
-
-	if (wvif->state != WFX_STATE_AP)
-		return;
-
-	wfx_tx_lock_flush(wvif->wdev);
-	spin_lock_bh(&wvif->ps_state_lock);
-	for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
-		bool need_reset = false;
-
-		mask = BIT(i + 1);
-		if (wvif->link_id_db[i].status == WFX_LINK_RESERVE ||
-		    (wvif->link_id_db[i].status == WFX_LINK_HARD &&
-		     !(wvif->link_id_map & mask))) {
-			if (wvif->link_id_map & mask) {
-				wvif->sta_asleep_mask &= ~mask;
-				wvif->pspoll_mask &= ~mask;
-				need_reset = true;
-			}
-			wvif->link_id_map |= mask;
-			if (wvif->link_id_db[i].status != WFX_LINK_HARD)
-				wvif->link_id_db[i].status = WFX_LINK_SOFT;
-
-			spin_unlock_bh(&wvif->ps_state_lock);
-			if (need_reset)
-				wfx_unmap_link(wvif, i + 1);
-			wfx_map_link(wvif, &wvif->link_id_db[i], i + 1);
-			next_gc = min(next_gc, WFX_LINK_ID_GC_TIMEOUT);
-			spin_lock_bh(&wvif->ps_state_lock);
-		} else if (wvif->link_id_db[i].status == WFX_LINK_SOFT) {
-			ttl = wvif->link_id_db[i].timestamp - now +
-					WFX_LINK_ID_GC_TIMEOUT;
-			if (ttl <= 0) {
-				need_reset = true;
-				wvif->link_id_db[i].status = WFX_LINK_OFF;
-				wvif->link_id_map &= ~mask;
-				wvif->sta_asleep_mask &= ~mask;
-				wvif->pspoll_mask &= ~mask;
-				spin_unlock_bh(&wvif->ps_state_lock);
-				wfx_unmap_link(wvif, i + 1);
-				spin_lock_bh(&wvif->ps_state_lock);
-			} else {
-				next_gc = min_t(unsigned long, next_gc, ttl);
-			}
-		}
-		if (need_reset)
-			skb_queue_purge(&wvif->link_id_db[i].rx_queue);
-	}
-	spin_unlock_bh(&wvif->ps_state_lock);
-	if (next_gc != -1)
-		schedule_delayed_work(&wvif->link_id_gc_work, next_gc);
-	wfx_tx_unlock(wvif->wdev);
-}
-
-void wfx_link_id_work(struct work_struct *work)
-{
-	struct wfx_vif *wvif =
-		container_of(work, struct wfx_vif, link_id_work);
-
-	wfx_tx_flush(wvif->wdev);
-	wfx_link_id_gc_work(&wvif->link_id_gc_work.work);
-	wfx_tx_unlock(wvif->wdev);
-}
-
 /* Tx implementation */
 
 static bool ieee80211_is_action_back(struct ieee80211_hdr *hdr)
@@ -453,29 +282,21 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
 			     struct ieee80211_sta *sta)
 {
 	u32 mask = ~BIT(tx_priv->raw_link_id);
+	struct wfx_sta_priv *sta_priv;
+	int tid = ieee80211_get_tid(hdr);
 
 	spin_lock_bh(&wvif->ps_state_lock);
-	if (ieee80211_is_auth(hdr->frame_control)) {
+	if (ieee80211_is_auth(hdr->frame_control))
 		wvif->sta_asleep_mask &= mask;
-		wvif->pspoll_mask &= mask;
-	}
-
-	if (tx_priv->link_id == WFX_LINK_ID_AFTER_DTIM &&
-	    !wvif->mcast_buffered) {
-		wvif->mcast_buffered = true;
-		if (wvif->sta_asleep_mask)
-			schedule_work(&wvif->mcast_start_work);
-	}
-
-	if (tx_priv->raw_link_id) {
-		wvif->link_id_db[tx_priv->raw_link_id - 1].timestamp = jiffies;
-		if (tx_priv->tid < WFX_MAX_TID)
-			wvif->link_id_db[tx_priv->raw_link_id - 1].buffered[tx_priv->tid]++;
-	}
 	spin_unlock_bh(&wvif->ps_state_lock);
 
-	if (sta)
-		ieee80211_sta_set_buffered(sta, tx_priv->tid, true);
+	if (sta) {
+		sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+		spin_lock_bh(&sta_priv->lock);
+		sta_priv->buffered[tid]++;
+		ieee80211_sta_set_buffered(sta, tid, true);
+		spin_unlock_bh(&sta_priv->lock);
+	}
 }
 
 static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
@@ -485,7 +306,6 @@ static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
 	struct wfx_sta_priv *sta_priv =
 		sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL;
 	const u8 *da = ieee80211_get_DA(hdr);
-	int ret;
 
 	if (sta_priv && sta_priv->link_id)
 		return sta_priv->link_id;
@@ -493,14 +313,7 @@ static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
 		return 0;
 	if (is_multicast_ether_addr(da))
 		return 0;
-	ret = wfx_find_link_id(wvif, da);
-	if (!ret)
-		ret = wfx_alloc_link_id(wvif, da);
-	if (!ret) {
-		dev_err(wvif->wdev->dev, "no more link-id available\n");
-		return WFX_LINK_ID_NO_ASSOC;
-	}
-	return ret;
+	return WFX_LINK_ID_NO_ASSOC;
 }
 
 static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
@@ -597,17 +410,6 @@ static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, str
 	return ret;
 }
 
-static u8 wfx_tx_get_tid(struct ieee80211_hdr *hdr)
-{
-	// FIXME: ieee80211_get_tid(hdr) should be sufficient for all cases.
-	if (!ieee80211_is_data(hdr->frame_control))
-		return WFX_MAX_TID;
-	if (ieee80211_is_data_qos(hdr->frame_control))
-		return ieee80211_get_tid(hdr);
-	else
-		return 0;
-}
-
 static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
 {
 	int mic_space;
@@ -639,7 +441,6 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
 	memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
 	// Fill tx_priv
 	tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
-	tx_priv->tid = wfx_tx_get_tid(hdr);
 	tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
 	tx_priv->link_id = tx_priv->raw_link_id;
 	if (ieee80211_has_protected(hdr->frame_control))
@@ -668,9 +469,15 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
 
 	// Fill tx request
 	req = (struct hif_req_tx *)hif_msg->body;
-	req->packet_id = queue_id << 16 |
-			 IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+	// packet_id just need to be unique on device. 32bits are more than
+	// necessary for that task, so we tae advantage of it to add some extra
+	// data for debug.
+	req->packet_id = queue_id << 28 |
+			 IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16 |
+			 (atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF);
 	req->data_flags.fc_offset = offset;
+	if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+		req->data_flags.after_dtim = 1;
 	req->queue_id.peer_sta_id = tx_priv->raw_link_id;
 	// Queue index are inverted between firmware and Linux
 	req->queue_id.queue_id = 3 - queue_id;
@@ -680,6 +487,8 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
 	// Auxiliary operations
 	wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
 	wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+	if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+		schedule_work(&wvif->update_tim_work);
 	wfx_bh_request_tx(wvif->wdev);
 	return 0;
 }
@@ -719,7 +528,7 @@ void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
 	ieee80211_tx_status_irqsafe(wdev->hw, skb);
 }
 
-void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
 {
 	int i;
 	int tx_count;
@@ -791,14 +600,11 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
 		else
 			tx_info->flags |= IEEE80211_TX_STAT_ACK;
 	} else if (arg->status == HIF_REQUEUE) {
-		/* "REQUEUE" means "implicit suspend" */
-		struct hif_ind_suspend_resume_tx suspend = {
-			.suspend_resume_flags.resume = 0,
-			.suspend_resume_flags.bc_mc_only = 1,
-		};
-
 		WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
-		wfx_suspend_resume(wvif, &suspend);
+		if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+			wvif->after_dtim_tx_allowed = false; // DTIM period elapsed
+			schedule_work(&wvif->update_tim_work);
+		}
 		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
 	} else {
 		if (wvif->bss_loss_state &&
@@ -808,31 +614,25 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
 	wfx_pending_remove(wvif->wdev, skb);
 }
 
-static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb,
-				   struct hif_req_tx *req)
+static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb)
 {
-	struct ieee80211_sta *sta;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	int tid = wfx_tx_get_tid(hdr);
-	int raw_link_id = req->queue_id.peer_sta_id;
-	u8 *buffered;
+	struct ieee80211_sta *sta;
+	struct wfx_sta_priv *sta_priv;
+	int tid = ieee80211_get_tid(hdr);
 
-	if (raw_link_id && tid < WFX_MAX_TID) {
-		buffered = wvif->link_id_db[raw_link_id - 1].buffered;
-
-		spin_lock_bh(&wvif->ps_state_lock);
-		WARN(!buffered[tid], "inconsistent notification");
-		buffered[tid]--;
-		spin_unlock_bh(&wvif->ps_state_lock);
-
-		if (!buffered[tid]) {
-			rcu_read_lock();
-			sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
-			if (sta)
-				ieee80211_sta_set_buffered(sta, tid, false);
-			rcu_read_unlock();
-		}
+	rcu_read_lock(); // protect sta
+	sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+	if (sta) {
+		sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+		spin_lock_bh(&sta_priv->lock);
+		WARN(!sta_priv->buffered[tid], "inconsistent notification");
+		sta_priv->buffered[tid]--;
+		if (!sta_priv->buffered[tid])
+			ieee80211_sta_set_buffered(sta, tid, false);
+		spin_unlock_bh(&sta_priv->lock);
 	}
+	rcu_read_unlock();
 }
 
 void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
@@ -846,7 +646,7 @@ void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
 
 	WARN_ON(!wvif);
 	skb_pull(skb, offset);
-	wfx_notify_buffered_tx(wvif, skb, req);
+	wfx_notify_buffered_tx(wvif, skb);
 	wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
 	ieee80211_tx_status_irqsafe(wdev->hw, skb);
 }
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index 0fc388d..04b2147 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -14,29 +14,10 @@
 #include "hif_api_cmd.h"
 #include "hif_api_mib.h"
 
-// FIXME: use IEEE80211_NUM_TIDS
-#define WFX_MAX_TID               8
-
 struct wfx_tx_priv;
 struct wfx_dev;
 struct wfx_vif;
 
-enum wfx_link_status {
-	WFX_LINK_OFF,
-	WFX_LINK_RESERVE,
-	WFX_LINK_SOFT,
-	WFX_LINK_HARD,
-};
-
-struct wfx_link_entry {
-	unsigned long		timestamp;
-	enum wfx_link_status	status;
-	u8			mac[ETH_ALEN];
-	u8			old_mac[ETH_ALEN];
-	u8			buffered[WFX_MAX_TID];
-	struct sk_buff_head	rx_queue;
-};
-
 struct tx_policy {
 	struct list_head link;
 	int usage_count;
@@ -57,7 +38,6 @@ struct wfx_tx_priv {
 	struct ieee80211_key_conf *hw_key;
 	u8 link_id;
 	u8 raw_link_id;
-	u8 tid;
 } __packed;
 
 void wfx_tx_policy_init(struct wfx_vif *wvif);
@@ -65,14 +45,9 @@ void wfx_tx_policy_upload_work(struct work_struct *work);
 
 void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
 	    struct sk_buff *skb);
-void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg);
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg);
 void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
 
-int wfx_unmap_link(struct wfx_vif *wvif, int link_id);
-void wfx_link_id_work(struct work_struct *work);
-void wfx_link_id_gc_work(struct work_struct *work);
-int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac);
-
 static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
 {
 	struct ieee80211_tx_info *tx_info;
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
index d17a752..1164aba 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/staging/wfx/debug.c
@@ -145,7 +145,7 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
 		   st->pwr_clk_freq,
 		   st->is_ext_pwr_clk ? "yes" : "no");
 	seq_printf(seq,
-		   "N. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
+		   "Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
 		   st->nb_rx_frame, st->per_total, st->throughput);
 	seq_puts(seq, "       Num. of      PER     RSSI      SNR      CFO\n");
 	seq_puts(seq, "        frames  (x10e4)    (dBm)     (dB)    (kHz)\n");
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
index dbf8bda..9d61082 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/staging/wfx/fwio.c
@@ -61,7 +61,7 @@
 #define DCA_TIMEOUT  50 // milliseconds
 #define WAKEUP_TIMEOUT 200 // milliseconds
 
-static const char * const fwio_error_strings[] = {
+static const char * const fwio_errors[] = {
 	[ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
 	[ERR_SIG_VERIF_FAILED] = "Signature verification failed",
 	[ERR_AES_CTRL_KEY] = "AES control key not initialized",
@@ -220,22 +220,16 @@ static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
 
 static void print_boot_status(struct wfx_dev *wdev)
 {
-	u32 val32;
+	u32 reg;
 
-	sram_reg_read(wdev, WFX_STATUS_INFO, &val32);
-	if (val32 == 0x12345678) {
-		dev_info(wdev->dev, "no error reported by secure boot\n");
-	} else {
-		sram_reg_read(wdev, WFX_ERR_INFO, &val32);
-		if (val32 < ARRAY_SIZE(fwio_error_strings) &&
-		    fwio_error_strings[val32])
-			dev_info(wdev->dev, "secure boot error: %s\n",
-				 fwio_error_strings[val32]);
-		else
-			dev_info(wdev->dev,
-				 "secure boot error: Unknown (0x%02x)\n",
-				 val32);
-	}
+	sram_reg_read(wdev, WFX_STATUS_INFO, &reg);
+	if (reg == 0x12345678)
+		return;
+	sram_reg_read(wdev, WFX_ERR_INFO, &reg);
+	if (reg < ARRAY_SIZE(fwio_errors) && fwio_errors[reg])
+		dev_info(wdev->dev, "secure boot: %s\n", fwio_errors[reg]);
+	else
+		dev_info(wdev->dev, "secure boot: Error %#02x\n", reg);
 }
 
 static int load_firmware_secure(struct wfx_dev *wdev)
@@ -345,7 +339,7 @@ int wfx_init_device(struct wfx_dev *wdev)
 	ktime_t now, start;
 	u32 reg;
 
-	reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_WORD_MODE2;
+	reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_BYTE_ORDER_ABCD;
 	if (wdev->pdata.use_rising_clk)
 		reg |= CFG_CLK_RISE_EDGE;
 	ret = config_reg_write(wdev, reg);
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index c15831d..5554d6e 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -137,7 +137,7 @@ struct hif_ie_tlv {
 
 struct hif_req_update_ie {
 	struct hif_ie_flags ie_flags;
-	u16   num_i_es;
+	u16   num_ies;
 	struct hif_ie_tlv ie[];
 } __packed;
 
@@ -180,7 +180,7 @@ struct hif_req_start_scan {
 	struct hif_auto_scan_param auto_scan_param;
 	u8    num_of_probe_requests;
 	u8    probe_delay;
-	u8    num_of_ssi_ds;
+	u8    num_of_ssids;
 	u8    num_of_channels;
 	u32   min_channel_time;
 	u32   max_channel_time;
@@ -188,7 +188,7 @@ struct hif_req_start_scan {
 	u8    ssid_and_channel_lists[];
 } __packed;
 
-struct hif_start_scan_req_cstnbssid_body {
+struct hif_req_start_scan_alt {
 	u8    band;
 	struct hif_scan_type scan_type;
 	struct hif_scan_flags scan_flags;
@@ -196,7 +196,7 @@ struct hif_start_scan_req_cstnbssid_body {
 	struct hif_auto_scan_param auto_scan_param;
 	u8    num_of_probe_requests;
 	u8    probe_delay;
-	u8    num_of_ssi_ds;
+	u8    num_of_ssids;
 	u8    num_of_channels;
 	u32   min_channel_time;
 	u32   max_channel_time;
@@ -253,7 +253,8 @@ struct hif_queue {
 struct hif_data_flags {
 	u8    more:1;
 	u8    fc_offset:3;
-	u8    reserved:4;
+	u8    after_dtim:1;
+	u8    reserved:3;
 } __packed;
 
 struct hif_tx_flags {
@@ -377,17 +378,6 @@ struct hif_cnf_edca_queue_params {
 	u32   status;
 } __packed;
 
-enum hif_ap_mode {
-	HIF_MODE_IBSS                              = 0x0,
-	HIF_MODE_BSS                               = 0x1
-};
-
-enum hif_preamble {
-	HIF_PREAMBLE_LONG                          = 0x0,
-	HIF_PREAMBLE_SHORT                         = 0x1,
-	HIF_PREAMBLE_SHORT_LONG12                  = 0x2
-};
-
 struct hif_join_flags {
 	u8    reserved1:2;
 	u8    force_no_beacon:1;
@@ -396,14 +386,16 @@ struct hif_join_flags {
 } __packed;
 
 struct hif_req_join {
-	u8    mode;
+	u8    infrastructure_bss_mode:1;
+	u8    reserved1:7;
 	u8    band;
 	u16   channel_number;
 	u8    bssid[ETH_ALEN];
 	u16   atim_window;
-	u8    preamble_type;
+	u8    short_preamble:1;
+	u8    reserved2:7;
 	u8    probe_for_join;
-	u8    reserved;
+	u8    reserved3;
 	struct hif_join_flags join_flags;
 	u32   ssid_length;
 	u8    ssid[HIF_API_SSID_SIZE];
@@ -466,8 +458,9 @@ struct hif_req_start {
 	u32   reserved1;
 	u32   beacon_interval;
 	u8    dtim_period;
-	u8    preamble_type;
-	u8    reserved2;
+	u8    short_preamble:1;
+	u8    reserved2:7;
+	u8    reserved3;
 	u8    ssid_length;
 	u8    ssid[HIF_API_SSID_SIZE];
 	u32   basic_rate_set;
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
index 94b789c..0c67cd4 100644
--- a/drivers/staging/wfx/hif_api_mib.h
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -181,19 +181,13 @@ struct hif_mib_ipv6_addr_data_frame_condition {
 	u8    i_pv6_address[HIF_API_IPV6_ADDRESS_SIZE];
 } __packed;
 
-union hif_addr_type {
-	u8 value;
-	struct {
-		u8    type_unicast:1;
-		u8    type_multicast:1;
-		u8    type_broadcast:1;
-		u8    reserved:5;
-	} bits;
-};
+#define HIF_FILTER_UNICAST   0x1
+#define HIF_FILTER_MULTICAST 0x2
+#define HIF_FILTER_BROADCAST 0x4
 
 struct hif_mib_uc_mc_bc_data_frame_condition {
 	u8    condition_idx;
-	union hif_addr_type param;
+	u8    allowed_frames;
 	u8    reserved[2];
 } __packed;
 
@@ -212,9 +206,11 @@ struct hif_mib_config_data_filter {
 } __packed;
 
 struct hif_mib_set_data_filtering {
-	u8    default_filter;
-	u8    enable;
-	u8    reserved[2];
+	u8    invert_matching:1;
+	u8    reserved1:7;
+	u8    enable:1;
+	u8    reserved2:7;
+	u8    reserved3[2];
 } __packed;
 
 enum hif_arp_ns_frame_treatment {
@@ -395,11 +391,6 @@ struct hif_mib_non_erp_protection {
 	u8   reserved2[3];
 } __packed;
 
-enum hif_tx_mode {
-	HIF_TX_MODE_MIXED                        = 0x0,
-	HIF_TX_MODE_GREENFIELD                   = 0x1
-};
-
 enum hif_tmplt {
 	HIF_TMPLT_PRBREQ                           = 0x0,
 	HIF_TMPLT_BCN                              = 0x1,
@@ -471,9 +462,11 @@ struct hif_mib_set_association_mode {
 	u8    mode:1;
 	u8    rateset:1;
 	u8    spacing:1;
-	u8    reserved:4;
-	u8    preamble_type;
-	u8    mixed_or_greenfield_type;
+	u8    reserved1:4;
+	u8    short_preamble:1;
+	u8    reserved2:7;
+	u8    greenfield:1;
+	u8    reserved3:7;
 	u8    mpdu_start_spacing;
 	u32   basic_rate_set;
 } __packed;
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
index 820de21..33c22c5 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/staging/wfx/hif_rx.c
@@ -18,8 +18,8 @@
 #include "secure_link.h"
 #include "hif_api_cmd.h"
 
-static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
-			       void *buf)
+static int hif_generic_confirm(struct wfx_dev *wdev,
+			       const struct hif_msg *hif, const void *buf)
 {
 	// All confirm messages start with status
 	int status = le32_to_cpu(*((__le32 *) buf));
@@ -59,9 +59,10 @@ static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
 	return status;
 }
 
-static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
+static int hif_tx_confirm(struct wfx_dev *wdev,
+			  const struct hif_msg *hif, const void *buf)
 {
-	struct hif_cnf_tx *body = buf;
+	const struct hif_cnf_tx *body = buf;
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
 
 	WARN_ON(!wvif);
@@ -72,31 +73,27 @@ static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
 	return 0;
 }
 
-static int hif_multi_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
-				void *buf)
+static int hif_multi_tx_confirm(struct wfx_dev *wdev,
+				const struct hif_msg *hif, const void *buf)
 {
-	struct hif_cnf_multi_transmit *body = buf;
-	struct hif_cnf_tx *buf_loc = (struct hif_cnf_tx *) &body->tx_conf_payload;
+	const struct hif_cnf_multi_transmit *body = buf;
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
-	int count = body->num_tx_confs;
 	int i;
 
-	WARN(count <= 0, "corrupted message");
+	WARN(body->num_tx_confs <= 0, "corrupted message");
 	WARN_ON(!wvif);
 	if (!wvif)
 		return -EFAULT;
 
-	for (i = 0; i < count; ++i) {
-		wfx_tx_confirm_cb(wvif, buf_loc);
-		buf_loc++;
-	}
+	for (i = 0; i < body->num_tx_confs; i++)
+		wfx_tx_confirm_cb(wvif, &body->tx_conf_payload[i]);
 	return 0;
 }
 
-static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
-				  void *buf)
+static int hif_startup_indication(struct wfx_dev *wdev,
+				  const struct hif_msg *hif, const void *buf)
 {
-	struct hif_ind_startup *body = buf;
+	const struct hif_ind_startup *body = buf;
 
 	if (body->status || body->firmware_type > 4) {
 		dev_err(wdev->dev, "received invalid startup indication");
@@ -112,8 +109,8 @@ static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
 	return 0;
 }
 
-static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
-				 void *buf)
+static int hif_wakeup_indication(struct wfx_dev *wdev,
+				 const struct hif_msg *hif, const void *buf)
 {
 	if (!wdev->pdata.gpio_wakeup
 	    || !gpiod_get_value(wdev->pdata.gpio_wakeup)) {
@@ -123,25 +120,27 @@ static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
 	return 0;
 }
 
-static int hif_keys_indication(struct wfx_dev *wdev, struct hif_msg *hif,
-			       void *buf)
+static int hif_keys_indication(struct wfx_dev *wdev,
+			       const struct hif_msg *hif, const void *buf)
 {
-	struct hif_ind_sl_exchange_pub_keys *body = buf;
+	const struct hif_ind_sl_exchange_pub_keys *body = buf;
+	u8 pubkey[API_NCP_PUB_KEY_SIZE];
 
-	// Compatibility with legacy secure link
-	if (body->status == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
-		body->status = 0;
-	if (body->status)
+	// SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS is used by legacy secure link
+	if (body->status && body->status != SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
 		dev_warn(wdev->dev, "secure link negociation error\n");
-	wfx_sl_check_pubkey(wdev, body->ncp_pub_key, body->ncp_pub_key_mac);
+	memcpy(pubkey, body->ncp_pub_key, sizeof(pubkey));
+	memreverse(pubkey, sizeof(pubkey));
+	wfx_sl_check_pubkey(wdev, pubkey, body->ncp_pub_key_mac);
 	return 0;
 }
 
-static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
-				  void *buf, struct sk_buff *skb)
+static int hif_receive_indication(struct wfx_dev *wdev,
+				  const struct hif_msg *hif,
+				  const void *buf, struct sk_buff *skb)
 {
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
-	struct hif_ind_rx *body = buf;
+	const struct hif_ind_rx *body = buf;
 
 	if (!wvif) {
 		dev_warn(wdev->dev, "ignore rx data for non-existent vif %d\n",
@@ -154,11 +153,11 @@ static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
 	return 0;
 }
 
-static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
-				void *buf)
+static int hif_event_indication(struct wfx_dev *wdev,
+				const struct hif_msg *hif, const void *buf)
 {
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
-	struct hif_ind_event *body = buf;
+	const struct hif_ind_event *body = buf;
 	struct wfx_hif_event *event;
 	int first;
 
@@ -183,7 +182,8 @@ static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
 }
 
 static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
-					   struct hif_msg *hif, void *buf)
+					   const struct hif_msg *hif,
+					   const void *buf)
 {
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
 
@@ -194,19 +194,20 @@ static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
 }
 
 static int hif_scan_complete_indication(struct wfx_dev *wdev,
-					struct hif_msg *hif, void *buf)
+					const struct hif_msg *hif,
+					const void *buf)
 {
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
-	struct hif_ind_scan_cmpl *body = buf;
 
 	WARN_ON(!wvif);
-	wfx_scan_complete_cb(wvif, body);
+	wfx_scan_complete(wvif);
 
 	return 0;
 }
 
 static int hif_join_complete_indication(struct wfx_dev *wdev,
-					struct hif_msg *hif, void *buf)
+					const struct hif_msg *hif,
+					const void *buf)
 {
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
 
@@ -217,21 +218,26 @@ static int hif_join_complete_indication(struct wfx_dev *wdev,
 }
 
 static int hif_suspend_resume_indication(struct wfx_dev *wdev,
-					 struct hif_msg *hif, void *buf)
+					 const struct hif_msg *hif,
+					 const void *buf)
 {
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
-	struct hif_ind_suspend_resume_tx *body = buf;
+	const struct hif_ind_suspend_resume_tx *body = buf;
 
 	WARN_ON(!wvif);
-	wfx_suspend_resume(wvif, body);
+	WARN(!body->suspend_resume_flags.bc_mc_only, "unsupported suspend/resume notification");
+	if (body->suspend_resume_flags.resume)
+		wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
+	else
+		wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
 
 	return 0;
 }
 
-static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
-				void *buf)
+static int hif_error_indication(struct wfx_dev *wdev,
+				const struct hif_msg *hif, const void *buf)
 {
-	struct hif_ind_error *body = buf;
+	const struct hif_ind_error *body = buf;
 	u8 *pRollback = (u8 *) body->data;
 	u32 *pStatus = (u32 *) body->data;
 
@@ -268,10 +274,10 @@ static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
 	return 0;
 }
 
-static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
-				  void *buf)
+static int hif_generic_indication(struct wfx_dev *wdev,
+				  const struct hif_msg *hif, const void *buf)
 {
-	struct hif_ind_generic *body = buf;
+	const struct hif_ind_generic *body = buf;
 
 	switch (body->indication_type) {
 	case HIF_GENERIC_INDICATION_TYPE_RAW:
@@ -299,9 +305,10 @@ static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
 }
 
 static int hif_exception_indication(struct wfx_dev *wdev,
-				    struct hif_msg *hif, void *buf)
+				    const struct hif_msg *hif, const void *buf)
 {
 	size_t len = hif->len - 4; // drop header
+
 	dev_err(wdev->dev, "firmware exception\n");
 	print_hex_dump_bytes("Dump: ", DUMP_PREFIX_NONE, buf, len);
 	wdev->chip_frozen = 1;
@@ -311,7 +318,8 @@ static int hif_exception_indication(struct wfx_dev *wdev,
 
 static const struct {
 	int msg_id;
-	int (*handler)(struct wfx_dev *wdev, struct hif_msg *hif, void *buf);
+	int (*handler)(struct wfx_dev *wdev,
+		       const struct hif_msg *hif, const void *buf);
 } hif_handlers[] = {
 	/* Confirmations */
 	{ HIF_CNF_ID_TX,                   hif_tx_confirm },
@@ -335,7 +343,7 @@ static const struct {
 void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
 {
 	int i;
-	struct hif_msg *hif = (struct hif_msg *) skb->data;
+	const struct hif_msg *hif = (const struct hif_msg *)skb->data;
 	int hif_id = hif->id;
 
 	if (hif_id == HIF_IND_ID_RX) {
@@ -358,7 +366,12 @@ void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
 			goto free;
 		}
 	}
-	dev_err(wdev->dev, "unsupported HIF ID %02x\n", hif_id);
+	if (hif_id & 0x80)
+		dev_err(wdev->dev, "unsupported HIF indication: ID %02x\n",
+			hif_id);
+	else
+		dev_err(wdev->dev, "unexpected HIF confirmation: ID %02x\n",
+			hif_id);
 free:
 	dev_kfree_skb(skb);
 }
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index cb7cddc..2428363 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -6,7 +6,6 @@
  * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
  * Copyright (c) 2010, ST-Ericsson
  */
-#include <linux/skbuff.h>
 #include <linux/etherdevice.h>
 
 #include "hif_tx.h"
@@ -221,41 +220,59 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
 	return ret;
 }
 
-int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg)
+int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
+	     int chan_start_idx, int chan_num)
 {
 	int ret, i;
 	struct hif_msg *hif;
-	struct hif_ssid_def *ssids;
-	size_t buf_len = sizeof(struct hif_req_start_scan) +
-		arg->scan_req.num_of_channels * sizeof(u8) +
-		arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
-	struct hif_req_start_scan *body = wfx_alloc_hif(buf_len, &hif);
-	u8 *ptr = (u8 *) body + sizeof(*body);
+	size_t buf_len =
+		sizeof(struct hif_req_start_scan_alt) + chan_num * sizeof(u8);
+	struct hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif);
+	int tmo_chan_fg, tmo_chan_bg, tmo;
 
-	WARN(arg->scan_req.num_of_channels > HIF_API_MAX_NB_CHANNELS, "invalid params");
-	WARN(arg->scan_req.num_of_ssi_ds > 2, "invalid params");
-	WARN(arg->scan_req.band > 1, "invalid params");
+	WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params");
+	WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params");
 
-	// FIXME: This API is unnecessary complex, fixing NumOfChannels and
-	// adding a member SsidDef at end of struct hif_req_start_scan would
-	// simplify that a lot.
-	memcpy(body, &arg->scan_req, sizeof(*body));
-	cpu_to_le32s(&body->min_channel_time);
-	cpu_to_le32s(&body->max_channel_time);
-	cpu_to_le32s(&body->tx_power_level);
-	memcpy(ptr, arg->ssids,
-	       arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def));
-	ssids = (struct hif_ssid_def *) ptr;
-	for (i = 0; i < body->num_of_ssi_ds; ++i)
-		cpu_to_le32s(&ssids[i].ssid_length);
-	ptr += arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
-	memcpy(ptr, arg->ch, arg->scan_req.num_of_channels * sizeof(u8));
-	ptr += arg->scan_req.num_of_channels * sizeof(u8);
-	WARN(buf_len != ptr - (u8 *) body, "allocation size mismatch");
+	compiletime_assert(IEEE80211_MAX_SSID_LEN == HIF_API_SSID_SIZE,
+			   "API inconsistency");
+	for (i = 0; i < req->n_ssids; i++) {
+		memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid,
+		       IEEE80211_MAX_SSID_LEN);
+		body->ssid_def[i].ssid_length =
+			cpu_to_le32(req->ssids[i].ssid_len);
+	}
+	body->num_of_ssids = HIF_API_MAX_NB_SSIDS;
+	// Background scan is always a good idea
+	body->scan_type.type = 1;
+	body->scan_flags.fbg = 1;
+	body->tx_power_level =
+		cpu_to_le32(req->channels[chan_start_idx]->max_power);
+	body->num_of_channels = chan_num;
+	for (i = 0; i < chan_num; i++)
+		body->channel_list[i] =
+			req->channels[i + chan_start_idx]->hw_value;
+	if (req->no_cck)
+		body->max_transmit_rate = API_RATE_INDEX_G_6MBPS;
+	else
+		body->max_transmit_rate = API_RATE_INDEX_B_1MBPS;
+	if (req->channels[chan_start_idx]->flags & IEEE80211_CHAN_NO_IR) {
+		body->min_channel_time = cpu_to_le32(50);
+		body->max_channel_time = cpu_to_le32(150);
+	} else {
+		body->min_channel_time = cpu_to_le32(10);
+		body->max_channel_time = cpu_to_le32(50);
+		body->num_of_probe_requests = 2;
+		body->probe_delay = 100;
+	}
+	tmo_chan_bg = le32_to_cpu(body->max_channel_time) * USEC_PER_TU;
+	tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
+	tmo_chan_fg *= body->num_of_probe_requests;
+	tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg);
+
 	wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
 	ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
 	kfree(hif);
-	return ret;
+	return ret ? ret : usecs_to_jiffies(tmo);
 }
 
 int hif_stop_scan(struct wfx_vif *wvif)
@@ -271,18 +288,29 @@ int hif_stop_scan(struct wfx_vif *wvif)
 	return ret;
 }
 
-int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg)
+int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+	     const struct ieee80211_channel *channel, const u8 *ssidie)
 {
 	int ret;
 	struct hif_msg *hif;
 	struct hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
 
-	memcpy(body, arg, sizeof(struct hif_req_join));
-	cpu_to_le16s(&body->channel_number);
-	cpu_to_le16s(&body->atim_window);
-	cpu_to_le32s(&body->ssid_length);
-	cpu_to_le32s(&body->beacon_interval);
-	cpu_to_le32s(&body->basic_rate_set);
+	WARN_ON(!conf->basic_rates);
+	body->infrastructure_bss_mode = !conf->ibss_joined;
+	body->short_preamble = conf->use_short_preamble;
+	if (channel && channel->flags & IEEE80211_CHAN_NO_IR)
+		body->probe_for_join = 0;
+	else
+		body->probe_for_join = 1;
+	body->channel_number = cpu_to_le16(channel->hw_value);
+	body->beacon_interval = cpu_to_le32(conf->beacon_int);
+	body->basic_rate_set =
+		cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
+	memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
+	if (!conf->ibss_joined && ssidie) {
+		body->ssid_length = cpu_to_le32(ssidie[1]);
+		memcpy(body->ssid, &ssidie[2], ssidie[1]);
+	}
 	wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
 	ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
 	kfree(hif);
@@ -341,19 +369,28 @@ int hif_remove_key(struct wfx_dev *wdev, int idx)
 	return ret;
 }
 
-int hif_set_edca_queue_params(struct wfx_vif *wvif,
-			      const struct hif_req_edca_queue_params *arg)
+int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
+			      const struct ieee80211_tx_queue_params *arg)
 {
 	int ret;
 	struct hif_msg *hif;
 	struct hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body),
 							       &hif);
 
-	// NOTE: queues numerotation are not the same between WFx and Linux
-	memcpy(body, arg, sizeof(*body));
-	cpu_to_le16s(&body->cw_min);
-	cpu_to_le16s(&body->cw_max);
-	cpu_to_le16s(&body->tx_op_limit);
+	if (!body)
+		return -ENOMEM;
+
+	WARN_ON(arg->aifs > 255);
+	body->aifsn = arg->aifs;
+	body->cw_min = cpu_to_le16(arg->cw_min);
+	body->cw_max = cpu_to_le16(arg->cw_max);
+	body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP);
+	body->queue_id = 3 - queue;
+	// API 2.0 has changed queue IDs values
+	if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE)
+		body->queue_id = HIF_QUEUE_ID_BACKGROUND;
+	if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK)
+		body->queue_id = HIF_QUEUE_ID_BESTEFFORT;
 	wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS,
 			sizeof(*body));
 	ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -361,43 +398,57 @@ int hif_set_edca_queue_params(struct wfx_vif *wvif,
 	return ret;
 }
 
-int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
 {
 	int ret;
 	struct hif_msg *hif;
 	struct hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif);
 
-	memcpy(body, arg, sizeof(*body));
+	if (!body)
+		return -ENOMEM;
+
+	if (ps) {
+		body->pm_mode.enter_psm = 1;
+		// Firmware does not support more than 128ms
+		body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255);
+		if (body->fast_psm_idle_period)
+			body->pm_mode.fast_psm = 1;
+	}
 	wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
 	ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
 	kfree(hif);
 	return ret;
 }
 
-int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg)
+int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+	      const struct ieee80211_channel *channel)
 {
 	int ret;
 	struct hif_msg *hif;
 	struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
 
-	memcpy(body, arg, sizeof(*body));
-	cpu_to_le16s(&body->channel_number);
-	cpu_to_le32s(&body->beacon_interval);
-	cpu_to_le32s(&body->basic_rate_set);
+	body->dtim_period = conf->dtim_period,
+	body->short_preamble = conf->use_short_preamble,
+	body->channel_number = cpu_to_le16(channel->hw_value),
+	body->beacon_interval = cpu_to_le32(conf->beacon_int);
+	body->basic_rate_set =
+		cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
+	body->ssid_length = conf->ssid_len;
+	memcpy(body->ssid, conf->ssid, conf->ssid_len);
 	wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
 	ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
 	kfree(hif);
 	return ret;
 }
 
-int hif_beacon_transmit(struct wfx_vif *wvif, bool enable_beaconing)
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable)
 {
 	int ret;
 	struct hif_msg *hif;
 	struct hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body),
 							     &hif);
 
-	body->enable_beaconing = enable_beaconing ? 1 : 0;
+	body->enable_beaconing = enable ? 1 : 0;
 	wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT,
 			sizeof(*body));
 	ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -421,16 +472,15 @@ int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
 	return ret;
 }
 
-int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
-		  const u8 *ies, size_t ies_len)
+int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
 {
 	int ret;
 	struct hif_msg *hif;
 	int buf_len = sizeof(struct hif_req_update_ie) + ies_len;
 	struct hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
 
-	memcpy(&body->ie_flags, target_frame, sizeof(struct hif_ie_flags));
-	body->num_i_es = cpu_to_le16(1);
+	body->ie_flags.beacon = 1;
+	body->num_ies = cpu_to_le16(1);
 	memcpy(body->ie, ies, ies_len);
 	wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
 	ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
index f61ae7b..20977e4 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/staging/wfx/hif_tx.h
@@ -12,15 +12,13 @@
 
 #include "hif_api_cmd.h"
 
+struct ieee80211_channel;
+struct ieee80211_bss_conf;
+struct ieee80211_tx_queue_params;
+struct cfg80211_scan_request;
 struct wfx_dev;
 struct wfx_vif;
 
-struct wfx_scan_params {
-	struct hif_req_start_scan scan_req;
-	struct hif_ssid_def *ssids;
-	u8 *ch;
-};
-
 struct wfx_hif_cmd {
 	struct mutex      lock;
 	struct mutex      key_renew_lock;
@@ -44,21 +42,23 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
 		 void *buf, size_t buf_size);
 int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
 		  void *buf, size_t buf_size);
-int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg);
+int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
+	     int chan_start, int chan_num);
 int hif_stop_scan(struct wfx_vif *wvif);
-int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg);
-int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+	     const struct ieee80211_channel *channel, const u8 *ssidie);
+int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout);
 int hif_set_bss_params(struct wfx_vif *wvif,
 		       const struct hif_req_set_bss_params *arg);
 int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg);
 int hif_remove_key(struct wfx_dev *wdev, int idx);
-int hif_set_edca_queue_params(struct wfx_vif *wvif,
-			      const struct hif_req_edca_queue_params *arg);
-int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg);
+int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
+			      const struct ieee80211_tx_queue_params *arg);
+int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+	      const struct ieee80211_channel *channel);
 int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
 int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
-int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
-		  const u8 *ies, size_t ies_len);
+int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len);
 int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
 		       int destination);
 int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
index 9be7488..bf3769c 100644
--- a/drivers/staging/wfx/hif_tx_mib.h
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -15,13 +15,15 @@
 #include "hif_tx.h"
 #include "hif_api_mib.h"
 
-static inline int hif_set_output_power(struct wfx_vif *wvif, int power_level)
+static inline int hif_set_output_power(struct wfx_vif *wvif, int val)
 {
-	__le32 val = cpu_to_le32(power_level);
+	struct hif_mib_current_tx_power_level arg = {
+		.power_level = cpu_to_le32(val * 10),
+	};
 
 	return hif_write_mib(wvif->wdev, wvif->id,
 			     HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
-			     &val, sizeof(val));
+			     &arg, sizeof(arg));
 }
 
 static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
@@ -42,10 +44,25 @@ static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
 }
 
 static inline int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
-					      struct hif_mib_rcpi_rssi_threshold *arg)
+					      int rssi_thold, int rssi_hyst)
 {
+	struct hif_mib_rcpi_rssi_threshold arg = {
+		.rolling_average_count = 8,
+		.detection = 1,
+	};
+
+	if (!rssi_thold && !rssi_hyst) {
+		arg.upperthresh = 1;
+		arg.lowerthresh = 1;
+	} else {
+		arg.upper_threshold = rssi_thold + rssi_hyst;
+		arg.upper_threshold = (arg.upper_threshold + 110) * 2;
+		arg.lower_threshold = rssi_thold;
+		arg.lower_threshold = (arg.lower_threshold + 110) * 2;
+	}
+
 	return hif_write_mib(wvif->wdev, wvif->id,
-			     HIF_MIB_ID_RCPI_RSSI_THRESHOLD, arg, sizeof(*arg));
+			     HIF_MIB_ID_RCPI_RSSI_THRESHOLD, &arg, sizeof(arg));
 }
 
 static inline int hif_get_counters_table(struct wfx_dev *wdev,
@@ -130,8 +147,17 @@ static inline int hif_set_operational_mode(struct wfx_dev *wdev,
 }
 
 static inline int hif_set_template_frame(struct wfx_vif *wvif,
-					 struct hif_mib_template_frame *arg)
+					 struct sk_buff *skb,
+					 u8 frame_type, int init_rate)
 {
+	struct hif_mib_template_frame *arg;
+
+	skb_push(skb, 4);
+	arg = (struct hif_mib_template_frame *)skb->data;
+	skb_pull(skb, 4);
+	arg->init_rate = init_rate;
+	arg->frame_type = frame_type;
+	arg->frame_length = cpu_to_le16(skb->len);
 	return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
 			     arg, sizeof(*arg));
 }
@@ -165,50 +191,105 @@ static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
 }
 
 static inline int hif_set_association_mode(struct wfx_vif *wvif,
-					   struct hif_mib_set_association_mode *arg)
+					   struct ieee80211_bss_conf *info,
+					   struct ieee80211_sta_ht_cap *ht_cap)
 {
+	int basic_rates = wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates);
+	struct hif_mib_set_association_mode val = {
+		.preambtype_use = 1,
+		.mode = 1,
+		.rateset = 1,
+		.spacing = 1,
+		.short_preamble = info->use_short_preamble,
+		.basic_rate_set = cpu_to_le32(basic_rates)
+	};
+
+	// FIXME: it is strange to not retrieve all information from bss_info
+	if (ht_cap && ht_cap->ht_supported) {
+		val.mpdu_start_spacing = ht_cap->ampdu_density;
+		if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
+			val.greenfield = !!(ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD);
+	}
+
 	return hif_write_mib(wvif->wdev, wvif->id,
-			     HIF_MIB_ID_SET_ASSOCIATION_MODE, arg, sizeof(*arg));
+			     HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
 }
 
 static inline int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
-					       struct hif_mib_set_tx_rate_retry_policy *arg)
+					       int policy_index, uint8_t *rates)
 {
-	size_t size = struct_size(arg, tx_rate_retry_policy,
-				  arg->num_tx_rate_policies);
+	struct hif_mib_set_tx_rate_retry_policy *arg;
+	size_t size = struct_size(arg, tx_rate_retry_policy, 1);
+	int ret;
 
-	return hif_write_mib(wvif->wdev, wvif->id,
-			     HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+	arg = kzalloc(size, GFP_KERNEL);
+	arg->num_tx_rate_policies = 1;
+	arg->tx_rate_retry_policy[0].policy_index = policy_index;
+	arg->tx_rate_retry_policy[0].short_retry_count = 255;
+	arg->tx_rate_retry_policy[0].long_retry_count = 255;
+	arg->tx_rate_retry_policy[0].first_rate_sel = 1;
+	arg->tx_rate_retry_policy[0].terminate = 1;
+	arg->tx_rate_retry_policy[0].count_init = 1;
+	memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
+	       sizeof(arg->tx_rate_retry_policy[0].rates));
+	ret = hif_write_mib(wvif->wdev, wvif->id,
+			    HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+	kfree(arg);
+	return ret;
 }
 
 static inline int hif_set_mac_addr_condition(struct wfx_vif *wvif,
-					     struct hif_mib_mac_addr_data_frame_condition *arg)
+					     int idx, const u8 *mac_addr)
 {
+	struct hif_mib_mac_addr_data_frame_condition val = {
+		.condition_idx = idx,
+		.address_type = HIF_MAC_ADDR_A1,
+	};
+
+	ether_addr_copy(val.mac_address, mac_addr);
 	return hif_write_mib(wvif->wdev, wvif->id,
 			     HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
-			     arg, sizeof(*arg));
+			     &val, sizeof(val));
 }
 
 static inline int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
-					     struct hif_mib_uc_mc_bc_data_frame_condition *arg)
+					     int idx, u8 allowed_frames)
 {
+	struct hif_mib_uc_mc_bc_data_frame_condition val = {
+		.condition_idx = idx,
+		.allowed_frames = allowed_frames,
+	};
+
 	return hif_write_mib(wvif->wdev, wvif->id,
 			     HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
-			     arg, sizeof(*arg));
+			     &val, sizeof(val));
 }
 
-static inline int hif_set_config_data_filter(struct wfx_vif *wvif,
-					     struct hif_mib_config_data_filter *arg)
+static inline int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable,
+					     int idx, int mac_filters,
+					     int frames_types_filters)
 {
+	struct hif_mib_config_data_filter val = {
+		.enable = enable,
+		.filter_idx = idx,
+		.mac_cond = mac_filters,
+		.uc_mc_bc_cond = frames_types_filters,
+	};
+
 	return hif_write_mib(wvif->wdev, wvif->id,
-			     HIF_MIB_ID_CONFIG_DATA_FILTER, arg, sizeof(*arg));
+			     HIF_MIB_ID_CONFIG_DATA_FILTER, &val, sizeof(val));
 }
 
 static inline int hif_set_data_filtering(struct wfx_vif *wvif,
-					 struct hif_mib_set_data_filtering *arg)
+					 bool enable, bool invert)
 {
+	struct hif_mib_set_data_filtering val = {
+		.enable = enable,
+		.invert_matching = invert,
+	};
+
 	return hif_write_mib(wvif->wdev, wvif->id,
-			     HIF_MIB_ID_SET_DATA_FILTERING, arg, sizeof(*arg));
+			     HIF_MIB_ID_SET_DATA_FILTERING, &val, sizeof(val));
 }
 
 static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
@@ -221,34 +302,56 @@ static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
 			     &arg, sizeof(arg));
 };
 
-static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif,
-					  struct hif_mib_arp_ip_addr_table *fp)
+static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx,
+					  __be32 *addr)
 {
+	struct hif_mib_arp_ip_addr_table arg = {
+		.condition_idx = idx,
+		.arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
+	};
+
+	if (addr) {
+		// Caution: type of addr is __be32
+		memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
+		arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+	}
 	return hif_write_mib(wvif->wdev, wvif->id,
 			     HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
-			     fp, sizeof(*fp));
+			     &arg, sizeof(arg));
 }
 
-static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev,
-					bool enabled)
+static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
 {
-	__le32 arg = enabled ? cpu_to_le32(1) : 0;
+	struct hif_mib_gl_set_multi_msg arg = {
+		.enable_multi_tx_conf = enable,
+	};
 
 	return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
 			     &arg, sizeof(arg));
 }
 
-static inline int hif_set_uapsd_info(struct wfx_vif *wvif,
-				     struct hif_mib_set_uapsd_information *arg)
+static inline int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
 {
+	struct hif_mib_set_uapsd_information arg = { };
+
+	if (val & BIT(IEEE80211_AC_VO))
+		arg.trig_voice = 1;
+	if (val & BIT(IEEE80211_AC_VI))
+		arg.trig_video = 1;
+	if (val & BIT(IEEE80211_AC_BE))
+		arg.trig_be = 1;
+	if (val & BIT(IEEE80211_AC_BK))
+		arg.trig_bckgrnd = 1;
 	return hif_write_mib(wvif->wdev, wvif->id,
 			     HIF_MIB_ID_SET_UAPSD_INFORMATION,
-			     arg, sizeof(*arg));
+			     &arg, sizeof(arg));
 }
 
 static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
 {
-	__le32 arg = enable ? cpu_to_le32(1) : 0;
+	struct hif_mib_non_erp_protection arg = {
+		.use_cts_to_self = enable,
+	};
 
 	return hif_write_mib(wvif->wdev, wvif->id,
 			     HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
@@ -256,16 +359,18 @@ static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
 
 static inline int hif_slot_time(struct wfx_vif *wvif, int val)
 {
-	__le32 arg = cpu_to_le32(val);
+	struct hif_mib_slot_time arg = {
+		.slot_time = cpu_to_le32(val),
+	};
 
 	return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
 			     &arg, sizeof(arg));
 }
 
-static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
+static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool enable)
 {
 	struct hif_mib_set_ht_protection arg = {
-		.dual_cts_prot = val,
+		.dual_cts_prot = enable,
 	};
 
 	return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_HT_PROTECTION,
@@ -274,7 +379,9 @@ static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
 
 static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
 {
-	__le32 arg = cpu_to_le32(val);
+	struct hif_mib_wep_default_key_id arg = {
+		.wep_default_key_id = val,
+	};
 
 	return hif_write_mib(wvif->wdev, wvif->id,
 			     HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
@@ -283,7 +390,9 @@ static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
 
 static inline int hif_rts_threshold(struct wfx_vif *wvif, int val)
 {
-	__le32 arg = cpu_to_le32(val > 0 ? val : 0xFFFF);
+	struct hif_mib_dot11_rts_threshold arg = {
+		.threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
+	};
 
 	return hif_write_mib(wvif->wdev, wvif->id,
 			     HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
index b2c1a66..4b6ef06 100644
--- a/drivers/staging/wfx/hwio.h
+++ b/drivers/staging/wfx/hwio.h
@@ -37,16 +37,11 @@ int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
 #define CFG_ERR_HOST_NO_IN_QUEUE   0x00000040
 #define CFG_ERR_HOST_CRC_MISS      0x00000080 // only with SDIO
 #define CFG_SPI_IGNORE_CS          0x00000080 // only with SPI
-/* Bytes ordering (only writable in SPI): */
-#define CFG_WORD_MODE_MASK         0x00000300
-/*
- * B1,B0,B3,B2 (In SPI, register address and
- * CONFIG data always use this mode)
- */
-#define     CFG_WORD_MODE0         0x00000000
-#define     CFG_WORD_MODE1         0x00000100 //   B3,B2,B1,B0
-#define     CFG_WORD_MODE2         0x00000200 //   B0,B1,B2,B3 (SDIO)
-#define CFG_DIRECT_ACCESS_MODE     0x00000400 // Direct or queue access mode
+#define CFG_BYTE_ORDER_MASK        0x00000300 // only writable with SPI
+#define     CFG_BYTE_ORDER_BADC    0x00000000
+#define     CFG_BYTE_ORDER_DCBA    0x00000100
+#define     CFG_BYTE_ORDER_ABCD    0x00000200 // SDIO always use this value
+#define CFG_DIRECT_ACCESS_MODE     0x00000400
 #define CFG_PREFETCH_AHB           0x00000800
 #define CFG_DISABLE_CPU_CLK        0x00001000
 #define CFG_PREFETCH_SRAM          0x00002000
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 3b47b6c..84adad6 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -131,10 +131,11 @@ static const struct ieee80211_ops wfx_ops = {
 	.stop			= wfx_stop,
 	.add_interface		= wfx_add_interface,
 	.remove_interface	= wfx_remove_interface,
-	.config			= wfx_config,
+	.config                 = wfx_config,
 	.tx			= wfx_tx,
 	.conf_tx		= wfx_conf_tx,
 	.hw_scan		= wfx_hw_scan,
+	.cancel_hw_scan		= wfx_cancel_hw_scan,
 	.sta_add		= wfx_sta_add,
 	.sta_remove		= wfx_sta_remove,
 	.sta_notify		= wfx_sta_notify,
@@ -182,7 +183,7 @@ struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
 	} else {
 		ret = devm_gpiod_get(dev, label, GPIOD_OUT_LOW);
 	}
-	if (IS_ERR(ret) || !ret) {
+	if (IS_ERR_OR_NULL(ret)) {
 		if (!ret || PTR_ERR(ret) == -ENOENT)
 			dev_warn(dev, "gpio %s is not defined\n", label);
 		else
@@ -297,6 +298,11 @@ struct wfx_dev *wfx_init_common(struct device *dev,
 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				     BIT(NL80211_IFTYPE_ADHOC) |
 				     BIT(NL80211_IFTYPE_AP);
+	hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+					NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+					NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+					NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+	hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
 	hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 	hw->wiphy->max_ap_assoc_sta = WFX_MAX_STA_IN_AP_MODE;
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 680fed3..0bcc61f 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -31,8 +31,6 @@ void wfx_tx_flush(struct wfx_dev *wdev)
 {
 	int ret;
 
-	WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
-
 	// Do not wait for any reply if chip is frozen
 	if (wdev->chip_frozen)
 		return;
@@ -177,11 +175,9 @@ void wfx_tx_queues_deinit(struct wfx_dev *wdev)
 	wfx_tx_queues_clear(wdev);
 }
 
-size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
-				   u32 link_id_map)
+int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map)
 {
-	size_t ret;
-	int i, bit;
+	int ret, i;
 
 	if (!link_id_map)
 		return 0;
@@ -191,11 +187,9 @@ size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
 		ret = skb_queue_len(&queue->queue);
 	} else {
 		ret = 0;
-		for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
-		     ++i, bit <<= 1) {
-			if (link_id_map & bit)
+		for (i = 0; i < ARRAY_SIZE(queue->link_map_cache); i++)
+			if (link_id_map & BIT(i))
 				ret += queue->link_map_cache[i];
-		}
 	}
 	spin_unlock_bh(&queue->queue.lock);
 	return ret;
@@ -237,7 +231,6 @@ static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
 			break;
 		}
 	}
-	WARN_ON(!skb);
 	if (skb) {
 		tx_priv = wfx_skb_tx_priv(skb);
 		tx_priv->xmit_timestamp = ktime_get();
@@ -362,80 +355,38 @@ bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
 static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
 			       struct wfx_queue *queue)
 {
-	bool handled = false;
-	struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
 	struct hif_req_tx *req = wfx_skb_txreq(skb);
-	struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
+	struct ieee80211_key_conf *hw_key = wfx_skb_tx_priv(skb)->hw_key;
+	struct ieee80211_hdr *frame =
+		(struct ieee80211_hdr *)(req->frame + req->data_flags.fc_offset);
 
-	enum {
-		do_probe,
-		do_drop,
-		do_wep,
-		do_tx,
-	} action = do_tx;
-
-	switch (wvif->vif->type) {
-	case NL80211_IFTYPE_STATION:
-		if (wvif->state < WFX_STATE_PRE_STA)
-			action = do_drop;
-		break;
-	case NL80211_IFTYPE_AP:
-		if (!wvif->state) {
-			action = do_drop;
-		} else if (!(BIT(tx_priv->raw_link_id) &
-			     (BIT(0) | wvif->link_id_map))) {
-			dev_warn(wvif->wdev->dev, "a frame with expired link-id is dropped\n");
-			action = do_drop;
+	// FIXME: mac80211 is smart enough to handle BSS loss. Driver should not
+	// try to do anything about that.
+	if (ieee80211_is_nullfunc(frame->frame_control)) {
+		mutex_lock(&wvif->bss_loss_lock);
+		if (wvif->bss_loss_state) {
+			wvif->bss_loss_confirm_id = req->packet_id;
+			req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
 		}
-		break;
-	case NL80211_IFTYPE_ADHOC:
-		if (wvif->state != WFX_STATE_IBSS)
-			action = do_drop;
-		break;
-	case NL80211_IFTYPE_MONITOR:
-	default:
-		action = do_drop;
-		break;
+		mutex_unlock(&wvif->bss_loss_lock);
 	}
 
-	if (action == do_tx) {
-		if (ieee80211_is_nullfunc(frame->frame_control)) {
-			mutex_lock(&wvif->bss_loss_lock);
-			if (wvif->bss_loss_state) {
-				wvif->bss_loss_confirm_id = req->packet_id;
-				req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
-			}
-			mutex_unlock(&wvif->bss_loss_lock);
-		} else if (ieee80211_has_protected(frame->frame_control) &&
-			   tx_priv->hw_key &&
-			   tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
-			   (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-			    tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
-			action = do_wep;
-		}
-	}
-
-	switch (action) {
-	case do_drop:
-		wfx_pending_remove(wvif->wdev, skb);
-		handled = true;
-		break;
-	case do_wep:
+	// FIXME: identify the exact scenario matched by this condition. Does it
+	// happen yet?
+	if (ieee80211_has_protected(frame->frame_control) &&
+	    hw_key && hw_key->keyidx != wvif->wep_default_key_id &&
+	    (hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	     hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
 		wfx_tx_lock(wvif->wdev);
 		WARN_ON(wvif->wep_pending_skb);
-		wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
+		wvif->wep_default_key_id = hw_key->keyidx;
 		wvif->wep_pending_skb = skb;
 		if (!schedule_work(&wvif->wep_key_work))
 			wfx_tx_unlock(wvif->wdev);
-		handled = true;
-		break;
-	case do_tx:
-		break;
-	default:
-		/* Do nothing */
-		break;
+		return true;
+	} else {
+		return false;
 	}
-	return handled;
 }
 
 static int wfx_get_prio_queue(struct wfx_vif *wvif,
@@ -443,7 +394,7 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
 {
 	static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
 		BIT(WFX_LINK_ID_UAPSD);
-	struct hif_req_edca_queue_params *edca;
+	const struct ieee80211_tx_queue_params *edca;
 	unsigned int score, best = -1;
 	int winner = -1;
 	int i;
@@ -452,13 +403,13 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
 	for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
 		int queued;
 
-		edca = &wvif->edca.params[i];
+		edca = &wvif->edca_params[i];
 		queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
 				tx_allowed_mask);
 		if (!queued)
 			continue;
 		*total += queued;
-		score = ((edca->aifsn + edca->cw_min) << 16) +
+		score = ((edca->aifs + edca->cw_min) << 16) +
 			((edca->cw_max - edca->cw_min) *
 			 (get_random_int() & 0xFFFF));
 		if (score < best && (winner < 0 || i != 3)) {
@@ -480,94 +431,100 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
 
 static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
 				     struct wfx_queue **queue_p,
-				     u32 *tx_allowed_mask_p,
-				     bool *more)
+				     u32 *tx_allowed_mask_p)
 {
 	int idx;
 	u32 tx_allowed_mask;
 	int total = 0;
 
-	/* Search for a queue with multicast frames buffered */
-	if (wvif->mcast_tx) {
-		tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
-		idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
-		if (idx >= 0) {
-			*more = total > 1;
-			goto found;
-		}
-	}
-
 	/* Search for unicast traffic */
 	tx_allowed_mask = ~wvif->sta_asleep_mask;
 	tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
-	if (wvif->sta_asleep_mask) {
-		tx_allowed_mask |= wvif->pspoll_mask;
+	if (wvif->sta_asleep_mask)
 		tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
-	} else {
+	else
 		tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
-	}
 	idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
 	if (idx < 0)
 		return -ENOENT;
 
-found:
 	*queue_p = &wvif->wdev->tx_queue[idx];
 	*tx_allowed_mask_p = tx_allowed_mask;
 	return 0;
 }
 
+struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif)
+{
+	struct wfx_dev *wdev = wvif->wdev;
+	struct ieee80211_tx_info *tx_info;
+	struct hif_msg *hif;
+	struct sk_buff *skb;
+	int i;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+		skb_queue_walk(&wdev->tx_queue[i].queue, skb) {
+			tx_info = IEEE80211_SKB_CB(skb);
+			hif = (struct hif_msg *)skb->data;
+			if ((tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) &&
+			    (hif->interface == wvif->id))
+				return (struct hif_msg *)skb->data;
+		}
+	}
+	return NULL;
+}
+
 struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
 {
 	struct sk_buff *skb;
 	struct hif_msg *hif = NULL;
-	struct hif_req_tx *req = NULL;
 	struct wfx_queue *queue = NULL;
 	struct wfx_queue *vif_queue = NULL;
 	u32 tx_allowed_mask = 0;
 	u32 vif_tx_allowed_mask = 0;
 	const struct wfx_tx_priv *tx_priv = NULL;
 	struct wfx_vif *wvif;
-	/* More is used only for broadcasts. */
-	bool more = false;
-	bool vif_more = false;
 	int not_found;
 	int burst;
+	int i;
+
+	if (atomic_read(&wdev->tx_lock))
+		return NULL;
+
+	wvif = NULL;
+	while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+		if (wvif->after_dtim_tx_allowed) {
+			for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+				skb = wfx_tx_queue_get(wvif->wdev,
+						       &wdev->tx_queue[i],
+						       BIT(WFX_LINK_ID_AFTER_DTIM));
+				if (skb) {
+					hif = (struct hif_msg *)skb->data;
+					// Cannot happen since only one vif can
+					// be AP at time
+					WARN_ON(wvif->id != hif->interface);
+					return hif;
+				}
+			}
+			// No more multicast to sent
+			wvif->after_dtim_tx_allowed = false;
+			schedule_work(&wvif->update_tim_work);
+		}
+	}
 
 	for (;;) {
 		int ret = -ENOENT;
 		int queue_num;
-		struct ieee80211_hdr *hdr;
-
-		if (atomic_read(&wdev->tx_lock))
-			return NULL;
 
 		wvif = NULL;
 		while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
 			spin_lock_bh(&wvif->ps_state_lock);
 
 			not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
-							  &vif_tx_allowed_mask,
-							  &vif_more);
-
-			if (wvif->mcast_buffered && (not_found || !vif_more) &&
-					(wvif->mcast_tx ||
-					 !wvif->sta_asleep_mask)) {
-				wvif->mcast_buffered = false;
-				if (wvif->mcast_tx) {
-					wvif->mcast_tx = false;
-					schedule_work(&wvif->mcast_stop_work);
-				}
-			}
+							  &vif_tx_allowed_mask);
 
 			spin_unlock_bh(&wvif->ps_state_lock);
 
-			if (vif_more) {
-				more = true;
-				tx_allowed_mask = vif_tx_allowed_mask;
-				queue = vif_queue;
-				ret = 0;
-				break;
-			} else if (!not_found) {
+			if (!not_found) {
 				if (queue && queue != vif_queue)
 					dev_info(wdev->dev, "vifs disagree about queue priority\n");
 				tx_allowed_mask |= vif_tx_allowed_mask;
@@ -592,11 +549,9 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
 		if (hif_handle_tx_data(wvif, skb, queue))
 			continue;  /* Handled by WSM */
 
-		wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
-
 		/* allow bursting if txop is set */
-		if (wvif->edca.params[queue_num].tx_op_limit)
-			burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
+		if (wvif->edca_params[queue_num].txop)
+			burst = wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
 		else
 			burst = 1;
 
@@ -606,15 +561,6 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
 		else
 			wdev->tx_burst_idx = -1;
 
-		/* more buffered multicast/broadcast frames
-		 *  ==> set MoreData flag in IEEE 802.11 header
-		 *  to inform PS STAs
-		 */
-		if (more) {
-			req = (struct hif_req_tx *) hif->body;
-			hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
-			hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-		}
 		return hif;
 	}
 }
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
index 21566e4..90bb060 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/staging/wfx/queue.h
@@ -13,9 +13,10 @@
 #include "hif_api_cmd.h"
 
 #define WFX_MAX_STA_IN_AP_MODE    14
-#define WFX_LINK_ID_AFTER_DTIM    (WFX_MAX_STA_IN_AP_MODE + 1)
-#define WFX_LINK_ID_UAPSD         (WFX_MAX_STA_IN_AP_MODE + 2)
-#define WFX_LINK_ID_MAX           (WFX_MAX_STA_IN_AP_MODE + 3)
+#define WFX_LINK_ID_NO_ASSOC      15
+#define WFX_LINK_ID_AFTER_DTIM    (WFX_LINK_ID_NO_ASSOC + 1)
+#define WFX_LINK_ID_UAPSD         (WFX_LINK_ID_NO_ASSOC + 2)
+#define WFX_LINK_ID_MAX           (WFX_LINK_ID_NO_ASSOC + 3)
 
 struct wfx_dev;
 struct wfx_vif;
@@ -46,10 +47,11 @@ void wfx_tx_queues_clear(struct wfx_dev *wdev);
 bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
 void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
 struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
+struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif);
 
 void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
 		      struct sk_buff *skb);
-size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
 
 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
 int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index 35fcf91..6e1e500 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -16,279 +16,118 @@ static void __ieee80211_scan_completed_compat(struct ieee80211_hw *hw,
 					      bool aborted)
 {
 	struct cfg80211_scan_info info = {
-		.aborted = aborted ? 1 : 0,
+		.aborted = aborted,
 	};
 
 	ieee80211_scan_completed(hw, &info);
 }
 
-static void wfx_scan_restart_delayed(struct wfx_vif *wvif)
+static int update_probe_tmpl(struct wfx_vif *wvif,
+			     struct cfg80211_scan_request *req)
 {
-	if (wvif->delayed_unjoin) {
-		wvif->delayed_unjoin = false;
-		if (!schedule_work(&wvif->unjoin_work))
-			wfx_tx_unlock(wvif->wdev);
-	} else if (wvif->delayed_link_loss) {
-		wvif->delayed_link_loss = 0;
-		wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
-	}
+	struct sk_buff *skb;
+
+	skb = ieee80211_probereq_get(wvif->wdev->hw, wvif->vif->addr,
+				     NULL, 0, req->ie_len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put_data(skb, req->ie, req->ie_len);
+	hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBREQ, 0);
+	dev_kfree_skb(skb);
+	return 0;
 }
 
-static int wfx_scan_start(struct wfx_vif *wvif, struct wfx_scan_params *scan)
+static int send_scan_req(struct wfx_vif *wvif,
+			 struct cfg80211_scan_request *req, int start_idx)
 {
-	int ret;
-	int tmo = 500;
+	int i, ret, timeout;
+	struct ieee80211_channel *ch_start, *ch_cur;
+
+	for (i = start_idx; i < req->n_channels; i++) {
+		ch_start = req->channels[start_idx];
+		ch_cur = req->channels[i];
+		WARN(ch_cur->band != NL80211_BAND_2GHZ, "band not supported");
+		if (ch_cur->max_power != ch_start->max_power)
+			break;
+		if ((ch_cur->flags ^ ch_start->flags) & IEEE80211_CHAN_NO_IR)
+			break;
+	}
+	wfx_tx_lock_flush(wvif->wdev);
+	wvif->scan_abort = false;
+	reinit_completion(&wvif->scan_complete);
+	timeout = hif_scan(wvif, req, start_idx, i - start_idx);
+	if (timeout < 0)
+		return timeout;
+	ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
+	if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
+		hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
+	wfx_tx_unlock(wvif->wdev);
+	if (!ret) {
+		dev_notice(wvif->wdev->dev, "scan timeout\n");
+		hif_stop_scan(wvif);
+		return -ETIMEDOUT;
+	}
+	if (wvif->scan_abort) {
+		dev_notice(wvif->wdev->dev, "scan abort\n");
+		return -ECONNABORTED;
+	}
+	return i - start_idx;
+}
+
+/*
+ * It is not really necessary to run scan request asynchronously. However,
+ * there is a bug in "iw scan" when ieee80211_scan_completed() is called before
+ * wfx_hw_scan() return
+ */
+void wfx_hw_scan_work(struct work_struct *work)
+{
+	struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work);
+	struct ieee80211_scan_request *hw_req = wvif->scan_req;
+	int chan_cur, ret;
+
+	mutex_lock(&wvif->scan_lock);
+	mutex_lock(&wvif->wdev->conf_mutex);
+	update_probe_tmpl(wvif, &hw_req->req);
+	wfx_fwd_probe_req(wvif, true);
+	chan_cur = 0;
+	do {
+		ret = send_scan_req(wvif, &hw_req->req, chan_cur);
+		if (ret > 0)
+			chan_cur += ret;
+	} while (ret > 0 && chan_cur < hw_req->req.n_channels);
+	mutex_unlock(&wvif->wdev->conf_mutex);
+	mutex_unlock(&wvif->scan_lock);
+	__ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0);
+}
+
+int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		struct ieee80211_scan_request *hw_req)
+{
+	struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+	WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS);
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		return -EOPNOTSUPP;
 
 	if (wvif->state == WFX_STATE_PRE_STA)
 		return -EBUSY;
 
-	tmo += scan->scan_req.num_of_channels *
-	       ((20 * (scan->scan_req.max_channel_time)) + 10);
-	atomic_set(&wvif->scan.in_progress, 1);
-	atomic_set(&wvif->wdev->scan_in_progress, 1);
-
-	schedule_delayed_work(&wvif->scan.timeout, msecs_to_jiffies(tmo));
-	ret = hif_scan(wvif, scan);
-	if (ret) {
-		wfx_scan_failed_cb(wvif);
-		atomic_set(&wvif->scan.in_progress, 0);
-		atomic_set(&wvif->wdev->scan_in_progress, 0);
-		cancel_delayed_work_sync(&wvif->scan.timeout);
-		wfx_scan_restart_delayed(wvif);
-	}
-	return ret;
-}
-
-int wfx_hw_scan(struct ieee80211_hw *hw,
-		   struct ieee80211_vif *vif,
-		   struct ieee80211_scan_request *hw_req)
-{
-	struct wfx_dev *wdev = hw->priv;
-	struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
-	struct cfg80211_scan_request *req = &hw_req->req;
-	struct sk_buff *skb;
-	int i, ret;
-	struct hif_mib_template_frame *p;
-
-	if (!wvif)
-		return -EINVAL;
-
-	if (wvif->state == WFX_STATE_AP)
-		return -EOPNOTSUPP;
-
-	if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
-		req->n_ssids = 0;
-
-	if (req->n_ssids > HIF_API_MAX_NB_SSIDS)
-		return -EINVAL;
-
-	skb = ieee80211_probereq_get(hw, wvif->vif->addr, NULL, 0, req->ie_len);
-	if (!skb)
-		return -ENOMEM;
-
-	if (req->ie_len)
-		memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
-
-	mutex_lock(&wdev->conf_mutex);
-
-	p = (struct hif_mib_template_frame *)skb_push(skb, 4);
-	p->frame_type = HIF_TMPLT_PRBREQ;
-	p->frame_length = cpu_to_le16(skb->len - 4);
-	ret = hif_set_template_frame(wvif, p);
-	skb_pull(skb, 4);
-
-	if (!ret)
-		/* Host want to be the probe responder. */
-		ret = wfx_fwd_probe_req(wvif, true);
-	if (ret) {
-		mutex_unlock(&wdev->conf_mutex);
-		dev_kfree_skb(skb);
-		return ret;
-	}
-
-	wfx_tx_lock_flush(wdev);
-
-	WARN(wvif->scan.req, "unexpected concurrent scan");
-	wvif->scan.req = req;
-	wvif->scan.n_ssids = 0;
-	wvif->scan.status = 0;
-	wvif->scan.begin = &req->channels[0];
-	wvif->scan.curr = wvif->scan.begin;
-	wvif->scan.end = &req->channels[req->n_channels];
-	wvif->scan.output_power = wdev->output_power;
-
-	for (i = 0; i < req->n_ssids; ++i) {
-		struct hif_ssid_def *dst = &wvif->scan.ssids[wvif->scan.n_ssids];
-
-		memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
-		dst->ssid_length = req->ssids[i].ssid_len;
-		++wvif->scan.n_ssids;
-	}
-
-	mutex_unlock(&wdev->conf_mutex);
-
-	if (skb)
-		dev_kfree_skb(skb);
-	schedule_work(&wvif->scan.work);
+	wvif->scan_req = hw_req;
+	schedule_work(&wvif->scan_work);
 	return 0;
 }
 
-void wfx_scan_work(struct work_struct *work)
+void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
-	struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan.work);
-	struct ieee80211_channel **it;
-	struct wfx_scan_params scan = {
-		.scan_req.scan_type.type = 0,    /* Foreground */
-	};
-	struct ieee80211_channel *first;
-	bool first_run = (wvif->scan.begin == wvif->scan.curr &&
-			  wvif->scan.begin != wvif->scan.end);
-	int i;
+	struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
 
-	down(&wvif->scan.lock);
-	mutex_lock(&wvif->wdev->conf_mutex);
-
-	if (first_run) {
-		if (wvif->state == WFX_STATE_STA &&
-		    !(wvif->powersave_mode.pm_mode.enter_psm)) {
-			struct hif_req_set_pm_mode pm = wvif->powersave_mode;
-
-			pm.pm_mode.enter_psm = 1;
-			wfx_set_pm(wvif, &pm);
-		}
-	}
-
-	if (!wvif->scan.req || wvif->scan.curr == wvif->scan.end) {
-		if (wvif->scan.output_power != wvif->wdev->output_power)
-			hif_set_output_power(wvif,
-					     wvif->wdev->output_power * 10);
-
-		if (wvif->scan.status < 0)
-			dev_warn(wvif->wdev->dev, "scan failed\n");
-		else if (wvif->scan.req)
-			dev_dbg(wvif->wdev->dev, "scan completed\n");
-		else
-			dev_dbg(wvif->wdev->dev, "scan canceled\n");
-
-		wvif->scan.req = NULL;
-		wfx_scan_restart_delayed(wvif);
-		wfx_tx_unlock(wvif->wdev);
-		mutex_unlock(&wvif->wdev->conf_mutex);
-		__ieee80211_scan_completed_compat(wvif->wdev->hw,
-						  wvif->scan.status ? 1 : 0);
-		up(&wvif->scan.lock);
-		if (wvif->state == WFX_STATE_STA &&
-		    !(wvif->powersave_mode.pm_mode.enter_psm))
-			wfx_set_pm(wvif, &wvif->powersave_mode);
-		return;
-	}
-	first = *wvif->scan.curr;
-
-	for (it = wvif->scan.curr + 1, i = 1;
-	     it != wvif->scan.end && i < HIF_API_MAX_NB_CHANNELS;
-	     ++it, ++i) {
-		if ((*it)->band != first->band)
-			break;
-		if (((*it)->flags ^ first->flags) &
-				IEEE80211_CHAN_NO_IR)
-			break;
-		if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
-		    (*it)->max_power != first->max_power)
-			break;
-	}
-	scan.scan_req.band = first->band;
-
-	if (wvif->scan.req->no_cck)
-		scan.scan_req.max_transmit_rate = API_RATE_INDEX_G_6MBPS;
-	else
-		scan.scan_req.max_transmit_rate = API_RATE_INDEX_B_1MBPS;
-	scan.scan_req.num_of_probe_requests =
-		(first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
-	scan.scan_req.num_of_ssi_ds = wvif->scan.n_ssids;
-	scan.ssids = &wvif->scan.ssids[0];
-	scan.scan_req.num_of_channels = it - wvif->scan.curr;
-	scan.scan_req.probe_delay = 100;
-	// FIXME: Check if FW can do active scan while joined.
-	if (wvif->state == WFX_STATE_STA) {
-		scan.scan_req.scan_type.type = 1;
-		scan.scan_req.scan_flags.fbg = 1;
-	}
-
-	scan.ch = kcalloc(scan.scan_req.num_of_channels,
-			  sizeof(u8), GFP_KERNEL);
-
-	if (!scan.ch) {
-		wvif->scan.status = -ENOMEM;
-		goto fail;
-	}
-	for (i = 0; i < scan.scan_req.num_of_channels; ++i)
-		scan.ch[i] = wvif->scan.curr[i]->hw_value;
-
-	if (wvif->scan.curr[0]->flags & IEEE80211_CHAN_NO_IR) {
-		scan.scan_req.min_channel_time = 50;
-		scan.scan_req.max_channel_time = 150;
-	} else {
-		scan.scan_req.min_channel_time = 10;
-		scan.scan_req.max_channel_time = 50;
-	}
-	if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
-	    wvif->scan.output_power != first->max_power) {
-		wvif->scan.output_power = first->max_power;
-		hif_set_output_power(wvif, wvif->scan.output_power * 10);
-	}
-	wvif->scan.status = wfx_scan_start(wvif, &scan);
-	kfree(scan.ch);
-	if (wvif->scan.status)
-		goto fail;
-	wvif->scan.curr = it;
-	mutex_unlock(&wvif->wdev->conf_mutex);
-	return;
-
-fail:
-	wvif->scan.curr = wvif->scan.end;
-	mutex_unlock(&wvif->wdev->conf_mutex);
-	up(&wvif->scan.lock);
-	schedule_work(&wvif->scan.work);
+	wvif->scan_abort = true;
+	hif_stop_scan(wvif);
 }
 
-static void wfx_scan_complete(struct wfx_vif *wvif)
+void wfx_scan_complete(struct wfx_vif *wvif)
 {
-	up(&wvif->scan.lock);
-	atomic_set(&wvif->wdev->scan_in_progress, 0);
-
-	wfx_scan_work(&wvif->scan.work);
-}
-
-void wfx_scan_failed_cb(struct wfx_vif *wvif)
-{
-	if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
-		wvif->scan.status = -EIO;
-		schedule_work(&wvif->scan.timeout.work);
-	}
-}
-
-void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg)
-{
-	if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
-		wvif->scan.status = 1;
-		schedule_work(&wvif->scan.timeout.work);
-	}
-}
-
-void wfx_scan_timeout(struct work_struct *work)
-{
-	struct wfx_vif *wvif = container_of(work, struct wfx_vif,
-					    scan.timeout.work);
-
-	if (atomic_xchg(&wvif->scan.in_progress, 0)) {
-		if (wvif->scan.status > 0) {
-			wvif->scan.status = 0;
-		} else if (!wvif->scan.status) {
-			dev_warn(wvif->wdev->dev, "timeout waiting for scan complete notification\n");
-			wvif->scan.status = -ETIMEDOUT;
-			wvif->scan.curr = wvif->scan.end;
-			hif_stop_scan(wvif);
-		}
-		wfx_scan_complete(wvif);
-	}
+	complete(&wvif->scan_complete);
 }
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
index b4ddd07..2eb786c 100644
--- a/drivers/staging/wfx/scan.h
+++ b/drivers/staging/wfx/scan.h
@@ -8,35 +8,15 @@
 #ifndef WFX_SCAN_H
 #define WFX_SCAN_H
 
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
 #include <net/mac80211.h>
 
-#include "hif_api_cmd.h"
-
 struct wfx_dev;
 struct wfx_vif;
 
-struct wfx_scan {
-	struct semaphore lock;
-	struct work_struct work;
-	struct delayed_work timeout;
-	struct cfg80211_scan_request *req;
-	struct ieee80211_channel **begin;
-	struct ieee80211_channel **curr;
-	struct ieee80211_channel **end;
-	struct hif_ssid_def ssids[HIF_API_MAX_NB_SSIDS];
-	int output_power;
-	int n_ssids;
-	int status;
-	atomic_t in_progress;
-};
-
+void wfx_hw_scan_work(struct work_struct *work);
 int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		struct ieee80211_scan_request *req);
-void wfx_scan_work(struct work_struct *work);
-void wfx_scan_timeout(struct work_struct *work);
-void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg);
-void wfx_scan_failed_cb(struct wfx_vif *wvif);
+void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_scan_complete(struct wfx_vif *wvif);
 
 #endif /* WFX_SCAN_H */
diff --git a/drivers/staging/wfx/secure_link.h b/drivers/staging/wfx/secure_link.h
index 666b26e..c3d055b 100644
--- a/drivers/staging/wfx/secure_link.h
+++ b/drivers/staging/wfx/secure_link.h
@@ -25,14 +25,16 @@ static inline int wfx_sl_decode(struct wfx_dev *wdev, struct hif_sl_msg *m)
 	return -EIO;
 }
 
-static inline int wfx_sl_encode(struct wfx_dev *wdev, struct hif_msg *input,
+static inline int wfx_sl_encode(struct wfx_dev *wdev,
+				const struct hif_msg *input,
 				struct hif_sl_msg *output)
 {
 	return -EIO;
 }
 
-static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev, u8 *ncp_pubkey,
-				      u8 *ncp_pubmac)
+static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev,
+				      const u8 *ncp_pubkey,
+				      const u8 *ncp_pubmac)
 {
 	return -EIO;
 }
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index 471dd15..03d0f22 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -17,10 +17,9 @@
 #include "hif_tx.h"
 #include "hif_tx_mib.h"
 
-#define TXOP_UNIT 32
 #define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
 
-static u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
+u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
 {
 	int i;
 	u32 ret = 0;
@@ -64,13 +63,8 @@ void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
 	int tx = 0;
 
 	mutex_lock(&wvif->bss_loss_lock);
-	wvif->delayed_link_loss = 0;
 	cancel_work_sync(&wvif->bss_params_work);
 
-	/* If we have a pending unjoin */
-	if (wvif->delayed_unjoin)
-		goto end;
-
 	if (init) {
 		schedule_delayed_work(&wvif->bss_loss_work, HZ);
 		wvif->bss_loss_state = 0;
@@ -94,62 +88,30 @@ void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
 	// FIXME: call ieee80211_beacon_loss/ieee80211_connection_loss instead
 	if (tx) {
 		struct sk_buff *skb;
+		struct ieee80211_hdr *hdr;
+		struct ieee80211_tx_control control = { };
 
 		wvif->bss_loss_state++;
 
 		skb = ieee80211_nullfunc_get(wvif->wdev->hw, wvif->vif, false);
 		if (!skb)
 			goto end;
+		hdr = (struct ieee80211_hdr *)skb->data;
 		memset(IEEE80211_SKB_CB(skb), 0,
 		       sizeof(*IEEE80211_SKB_CB(skb)));
 		IEEE80211_SKB_CB(skb)->control.vif = wvif->vif;
 		IEEE80211_SKB_CB(skb)->driver_rates[0].idx = 0;
 		IEEE80211_SKB_CB(skb)->driver_rates[0].count = 1;
 		IEEE80211_SKB_CB(skb)->driver_rates[1].idx = -1;
-		wfx_tx(wvif->wdev->hw, NULL, skb);
+		rcu_read_lock(); // protect control.sta
+		control.sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+		wfx_tx(wvif->wdev->hw, &control, skb);
+		rcu_read_unlock();
 	}
 end:
 	mutex_unlock(&wvif->bss_loss_lock);
 }
 
-static int wfx_set_uapsd_param(struct wfx_vif *wvif,
-			   const struct wfx_edca_params *arg)
-{
-	/* Here's the mapping AC [queue, bit]
-	 *  VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
-	 */
-
-	if (arg->uapsd_enable[IEEE80211_AC_VO])
-		wvif->uapsd_info.trig_voice = 1;
-	else
-		wvif->uapsd_info.trig_voice = 0;
-
-	if (arg->uapsd_enable[IEEE80211_AC_VI])
-		wvif->uapsd_info.trig_video = 1;
-	else
-		wvif->uapsd_info.trig_video = 0;
-
-	if (arg->uapsd_enable[IEEE80211_AC_BE])
-		wvif->uapsd_info.trig_be = 1;
-	else
-		wvif->uapsd_info.trig_be = 0;
-
-	if (arg->uapsd_enable[IEEE80211_AC_BK])
-		wvif->uapsd_info.trig_bckgrnd = 1;
-	else
-		wvif->uapsd_info.trig_bckgrnd = 0;
-
-	/* Currently pseudo U-APSD operation is not supported, so setting
-	 * MinAutoTriggerInterval, MaxAutoTriggerInterval and
-	 * AutoTriggerStep to 0
-	 */
-	wvif->uapsd_info.min_auto_trigger_interval = 0;
-	wvif->uapsd_info.max_auto_trigger_interval = 0;
-	wvif->uapsd_info.auto_trigger_step = 0;
-
-	return hif_set_uapsd_info(wvif, &wvif->uapsd_info);
-}
-
 int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
 {
 	wvif->fwd_probe_req = enable;
@@ -160,63 +122,31 @@ int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
 static int wfx_set_mcast_filter(struct wfx_vif *wvif,
 				    struct wfx_grp_addr_table *fp)
 {
-	int i, ret;
-	struct hif_mib_config_data_filter config = { };
-	struct hif_mib_set_data_filtering filter_data = { };
-	struct hif_mib_mac_addr_data_frame_condition filter_addr_val = { };
-	struct hif_mib_uc_mc_bc_data_frame_condition filter_addr_type = { };
+	int i;
 
 	// Temporary workaround for filters
-	return hif_set_data_filtering(wvif, &filter_data);
+	return hif_set_data_filtering(wvif, false, true);
 
-	if (!fp->enable) {
-		filter_data.enable = 0;
-		return hif_set_data_filtering(wvif, &filter_data);
-	}
+	if (!fp->enable)
+		return hif_set_data_filtering(wvif, false, true);
 
-	// A1 Address match on list
-	for (i = 0; i < fp->num_addresses; i++) {
-		filter_addr_val.condition_idx = i;
-		filter_addr_val.address_type = HIF_MAC_ADDR_A1;
-		ether_addr_copy(filter_addr_val.mac_address,
-				fp->address_list[i]);
-		ret = hif_set_mac_addr_condition(wvif,
-						 &filter_addr_val);
-		if (ret)
-			return ret;
-		config.mac_cond |= 1 << i;
-	}
+	for (i = 0; i < fp->num_addresses; i++)
+		hif_set_mac_addr_condition(wvif, i, fp->address_list[i]);
+	hif_set_uc_mc_bc_condition(wvif, 0,
+				   HIF_FILTER_UNICAST | HIF_FILTER_BROADCAST);
+	hif_set_config_data_filter(wvif, true, 0, BIT(1),
+				   BIT(fp->num_addresses) - 1);
+	hif_set_data_filtering(wvif, true, true);
 
-	// Accept unicast and broadcast
-	filter_addr_type.condition_idx = 0;
-	filter_addr_type.param.bits.type_unicast = 1;
-	filter_addr_type.param.bits.type_broadcast = 1;
-	ret = hif_set_uc_mc_bc_condition(wvif, &filter_addr_type);
-	if (ret)
-		return ret;
-
-	config.uc_mc_bc_cond = 1;
-	config.filter_idx = 0; // TODO #define MULTICAST_FILTERING 0
-	config.enable = 1;
-	ret = hif_set_config_data_filter(wvif, &config);
-	if (ret)
-		return ret;
-
-	// discard all data frames except match filter
-	filter_data.enable = 1;
-	filter_data.default_filter = 1; // discard all
-	ret = hif_set_data_filtering(wvif, &filter_data);
-
-	return ret;
+	return 0;
 }
 
 void wfx_update_filtering(struct wfx_vif *wvif)
 {
 	int ret;
-	bool is_sta = wvif->vif && NL80211_IFTYPE_STATION == wvif->vif->type;
-	bool filter_bssid = wvif->filter_bssid;
-	bool fwd_probe_req = wvif->fwd_probe_req;
-	struct hif_mib_bcn_filter_enable bf_ctrl;
+	int bf_enable;
+	int bf_count;
+	int n_filter_ies;
 	struct hif_ie_table_entry filter_ies[] = {
 		{
 			.ie_id        = WLAN_EID_VENDOR_SPECIFIC,
@@ -236,33 +166,29 @@ void wfx_update_filtering(struct wfx_vif *wvif)
 			.has_appeared = 1,
 		}
 	};
-	int n_filter_ies;
 
 	if (wvif->state == WFX_STATE_PASSIVE)
 		return;
 
 	if (wvif->disable_beacon_filter) {
-		bf_ctrl.enable = 0;
-		bf_ctrl.bcn_count = 1;
+		bf_enable = 0;
+		bf_count = 1;
 		n_filter_ies = 0;
-	} else if (!is_sta) {
-		bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE |
-				 HIF_BEACON_FILTER_AUTO_ERP;
-		bf_ctrl.bcn_count = 0;
+	} else if (wvif->vif->type != NL80211_IFTYPE_STATION) {
+		bf_enable = HIF_BEACON_FILTER_ENABLE | HIF_BEACON_FILTER_AUTO_ERP;
+		bf_count = 0;
 		n_filter_ies = 2;
 	} else {
-		bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE;
-		bf_ctrl.bcn_count = 0;
+		bf_enable = HIF_BEACON_FILTER_ENABLE;
+		bf_count = 0;
 		n_filter_ies = 3;
 	}
 
-	ret = hif_set_rx_filter(wvif, filter_bssid, fwd_probe_req);
+	ret = hif_set_rx_filter(wvif, wvif->filter_bssid, wvif->fwd_probe_req);
 	if (!ret)
-		ret = hif_set_beacon_filter_table(wvif, n_filter_ies,
-						  filter_ies);
+		ret = hif_set_beacon_filter_table(wvif, n_filter_ies, filter_ies);
 	if (!ret)
-		ret = hif_beacon_filter_control(wvif, bf_ctrl.enable,
-						bf_ctrl.bcn_count);
+		ret = hif_beacon_filter_control(wvif, bf_enable, bf_count);
 	if (!ret)
 		ret = wfx_set_mcast_filter(wvif, &wvif->mcast_filter);
 	if (ret)
@@ -316,93 +242,74 @@ void wfx_configure_filter(struct ieee80211_hw *hw,
 	*total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_PROBE_REQ;
 
 	while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
-		down(&wvif->scan.lock);
+		mutex_lock(&wvif->scan_lock);
 		wvif->filter_bssid = (*total_flags &
 				      (FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 0 : 1;
 		wvif->disable_beacon_filter = !(*total_flags & FIF_PROBE_REQ);
 		wfx_fwd_probe_req(wvif, true);
 		wfx_update_filtering(wvif);
-		up(&wvif->scan.lock);
+		mutex_unlock(&wvif->scan_lock);
 	}
 }
 
+static int wfx_update_pm(struct wfx_vif *wvif)
+{
+	struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
+	bool ps = conf->flags & IEEE80211_CONF_PS;
+	int ps_timeout = conf->dynamic_ps_timeout;
+	struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
+
+	WARN_ON(conf->dynamic_ps_timeout < 0);
+	if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
+		return 0;
+	if (!ps)
+		ps_timeout = 0;
+	if (wvif->uapsd_mask)
+		ps_timeout = 0;
+
+	// Kernel disable powersave when an AP is in use. In contrary, it is
+	// absolutely necessary to enable legacy powersave for WF200 if channels
+	// are differents.
+	if (wdev_to_wvif(wvif->wdev, 0))
+		chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan;
+	if (wdev_to_wvif(wvif->wdev, 1))
+		chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan;
+	if (chan0 && chan1 && chan0->hw_value != chan1->hw_value &&
+	    wvif->vif->type != NL80211_IFTYPE_AP) {
+		ps = true;
+		ps_timeout = 0;
+	}
+
+	if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
+					 TU_TO_JIFFIES(512)))
+		dev_warn(wvif->wdev->dev,
+			 "timeout while waiting of set_pm_mode_complete\n");
+	return hif_set_pm(wvif, ps, ps_timeout);
+}
+
 int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		   u16 queue, const struct ieee80211_tx_queue_params *params)
 {
 	struct wfx_dev *wdev = hw->priv;
 	struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+	int old_uapsd = wvif->uapsd_mask;
 	int ret = 0;
-	/* To prevent re-applying PM request OID again and again*/
-	u16 old_uapsd_flags, new_uapsd_flags;
-	struct hif_req_edca_queue_params *edca;
+
+	WARN_ON(queue >= hw->queues);
 
 	mutex_lock(&wdev->conf_mutex);
-
-	if (queue < hw->queues) {
-		old_uapsd_flags = *((u16 *) &wvif->uapsd_info);
-		edca = &wvif->edca.params[queue];
-
-		wvif->edca.uapsd_enable[queue] = params->uapsd;
-		edca->aifsn = params->aifs;
-		edca->cw_min = params->cw_min;
-		edca->cw_max = params->cw_max;
-		edca->tx_op_limit = params->txop * TXOP_UNIT;
-		edca->allowed_medium_time = 0;
-		ret = hif_set_edca_queue_params(wvif, edca);
-		if (ret) {
-			ret = -EINVAL;
-			goto out;
-		}
-
-		if (wvif->vif->type == NL80211_IFTYPE_STATION) {
-			ret = wfx_set_uapsd_param(wvif, &wvif->edca);
-			new_uapsd_flags = *((u16 *) &wvif->uapsd_info);
-			if (!ret && wvif->setbssparams_done &&
-			    wvif->state == WFX_STATE_STA &&
-			    old_uapsd_flags != new_uapsd_flags)
-				ret = wfx_set_pm(wvif, &wvif->powersave_mode);
-		}
-	} else {
-		ret = -EINVAL;
+	assign_bit(queue, &wvif->uapsd_mask, params->uapsd);
+	memcpy(&wvif->edca_params[queue], params, sizeof(*params));
+	hif_set_edca_queue_params(wvif, queue, params);
+	if (wvif->vif->type == NL80211_IFTYPE_STATION &&
+	    old_uapsd != wvif->uapsd_mask) {
+		hif_set_uapsd_info(wvif, wvif->uapsd_mask);
+		wfx_update_pm(wvif);
 	}
-
-out:
 	mutex_unlock(&wdev->conf_mutex);
 	return ret;
 }
 
-int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
-{
-	struct hif_req_set_pm_mode pm = *arg;
-	u16 uapsd_flags;
-	int ret;
-
-	if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
-		return 0;
-
-	memcpy(&uapsd_flags, &wvif->uapsd_info, sizeof(uapsd_flags));
-
-	if (uapsd_flags != 0)
-		pm.pm_mode.fast_psm = 0;
-
-	// Kernel disable PowerSave when multiple vifs are in use. In contrary,
-	// it is absolutly necessary to enable PowerSave for WF200
-	if (wvif_count(wvif->wdev) > 1) {
-		pm.pm_mode.enter_psm = 1;
-		pm.pm_mode.fast_psm = 0;
-	}
-
-	if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
-					 msecs_to_jiffies(300)))
-		dev_warn(wvif->wdev->dev,
-			 "timeout while waiting of set_pm_mode_complete\n");
-	ret = hif_set_pm(wvif, &pm);
-	// FIXME: why ?
-	if (-ETIMEDOUT == wvif->scan.status)
-		wvif->scan.status = 1;
-	return ret;
-}
-
 int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
 	struct wfx_dev *wdev = hw->priv;
@@ -413,56 +320,27 @@ int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 	return 0;
 }
 
-/* If successful, LOCKS the TX queue! */
 static int __wfx_flush(struct wfx_dev *wdev, bool drop)
 {
-	int ret;
-
 	for (;;) {
-		if (drop) {
+		if (drop)
 			wfx_tx_queues_clear(wdev);
-		} else {
-			ret = wait_event_timeout(
-				wdev->tx_queue_stats.wait_link_id_empty,
-				wfx_tx_queues_is_empty(wdev),
-				2 * HZ);
-		}
-
-		if (!drop && ret <= 0) {
-			ret = -ETIMEDOUT;
-			break;
-		}
-		ret = 0;
-
-		wfx_tx_lock_flush(wdev);
-		if (!wfx_tx_queues_is_empty(wdev)) {
-			/* Highly unlikely: WSM requeued frames. */
-			wfx_tx_unlock(wdev);
-			continue;
-		}
-		break;
+		if (wait_event_timeout(wdev->tx_queue_stats.wait_link_id_empty,
+				       wfx_tx_queues_is_empty(wdev),
+				       2 * HZ) <= 0)
+			return -ETIMEDOUT;
+		wfx_tx_flush(wdev);
+		if (wfx_tx_queues_is_empty(wdev))
+			return 0;
+		dev_warn(wdev->dev, "frames queued while flushing tx queues");
 	}
-	return ret;
 }
 
 void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		  u32 queues, bool drop)
 {
-	struct wfx_dev *wdev = hw->priv;
-	struct wfx_vif *wvif;
-
-	if (vif) {
-		wvif = (struct wfx_vif *) vif->drv_priv;
-		if (wvif->vif->type == NL80211_IFTYPE_MONITOR)
-			drop = true;
-		if (wvif->vif->type == NL80211_IFTYPE_AP &&
-		    !wvif->enable_beacon)
-			drop = true;
-	}
-
-	// FIXME: only flush requested vif
-	if (!__wfx_flush(wdev, drop))
-		wfx_tx_unlock(wdev);
+	// FIXME: only flush requested vif and queues
+	__wfx_flush(hw->priv, drop);
 }
 
 /* WSM callbacks */
@@ -476,7 +354,7 @@ static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
 	int cqm_evt;
 
 	rcpi_rssi = raw_rcpi_rssi / 2 - 110;
-	if (rcpi_rssi <= wvif->cqm_rssi_thold)
+	if (rcpi_rssi <= wvif->vif->bss_conf.cqm_rssi_thold)
 		cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
 	else
 		cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
@@ -499,18 +377,9 @@ static void wfx_event_handler_work(struct work_struct *work)
 		switch (event->evt.event_id) {
 		case HIF_EVENT_IND_BSSLOST:
 			cancel_work_sync(&wvif->unjoin_work);
-			if (!down_trylock(&wvif->scan.lock)) {
-				wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
-				up(&wvif->scan.lock);
-			} else {
-				/* Scan is in progress. Delay reporting.
-				 * Scan complete will trigger bss_loss_work
-				 */
-				wvif->delayed_link_loss = 1;
-				/* Also start a watchdog. */
-				schedule_delayed_work(&wvif->bss_loss_work,
-						      5 * HZ);
-			}
+			mutex_lock(&wvif->scan_lock);
+			wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+			mutex_unlock(&wvif->scan_lock);
 			break;
 		case HIF_EVENT_IND_BSSREGAINED:
 			wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
@@ -554,30 +423,10 @@ static void wfx_bss_params_work(struct work_struct *work)
 	mutex_unlock(&wvif->wdev->conf_mutex);
 }
 
-static void wfx_set_beacon_wakeup_period_work(struct work_struct *work)
-{
-	struct wfx_vif *wvif = container_of(work, struct wfx_vif,
-					    set_beacon_wakeup_period_work);
-
-	hif_set_beacon_wakeup_period(wvif, wvif->dtim_period,
-				     wvif->dtim_period);
-}
-
 static void wfx_do_unjoin(struct wfx_vif *wvif)
 {
 	mutex_lock(&wvif->wdev->conf_mutex);
 
-	if (atomic_read(&wvif->scan.in_progress)) {
-		if (wvif->delayed_unjoin)
-			dev_dbg(wvif->wdev->dev,
-				"delayed unjoin is already scheduled\n");
-		else
-			wvif->delayed_unjoin = true;
-		goto done;
-	}
-
-	wvif->delayed_link_loss = false;
-
 	if (!wvif->state)
 		goto done;
 
@@ -585,7 +434,6 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
 		goto done;
 
 	cancel_work_sync(&wvif->update_filtering_work);
-	cancel_work_sync(&wvif->set_beacon_wakeup_period_work);
 	wvif->state = WFX_STATE_PASSIVE;
 
 	/* Unjoin is a reset. */
@@ -593,8 +441,6 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
 	hif_keep_alive_period(wvif, 0);
 	hif_reset(wvif, false);
 	wfx_tx_policy_init(wvif);
-	hif_set_output_power(wvif, wvif->wdev->output_power * 10);
-	wvif->dtim_period = 0;
 	hif_set_macaddr(wvif, wvif->vif->addr);
 	wfx_free_event_queue(wvif);
 	cancel_work_sync(&wvif->event_handler_work);
@@ -606,8 +452,6 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
 	wvif->disable_beacon_filter = false;
 	wfx_update_filtering(wvif);
 	memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
-	wvif->setbssparams_done = false;
-	memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
 
 done:
 	mutex_unlock(&wvif->wdev->conf_mutex);
@@ -644,33 +488,21 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
 	hif_set_mfp(wvif, mfpc, mfpr);
 }
 
-/* MUST be called with tx_lock held!  It will be unlocked for us. */
 static void wfx_do_join(struct wfx_vif *wvif)
 {
-	const u8 *bssid;
+	int ret;
+	const u8 *ssidie;
 	struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
 	struct cfg80211_bss *bss = NULL;
-	struct hif_req_join join = {
-		.mode = conf->ibss_joined ? HIF_MODE_IBSS : HIF_MODE_BSS,
-		.preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
-		.probe_for_join = 1,
-		.atim_window = 0,
-		.basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
-						      conf->basic_rates),
-	};
 
-	if (wvif->channel->flags & IEEE80211_CHAN_NO_IR)
-		join.probe_for_join = 0;
+	wfx_tx_lock_flush(wvif->wdev);
 
 	if (wvif->state)
 		wfx_do_unjoin(wvif);
 
-	bssid = wvif->vif->bss_conf.bssid;
-
 	bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
-			       bssid, NULL, 0,
+			       conf->bssid, NULL, 0,
 			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
-
 	if (!bss && !conf->ibss_joined) {
 		wfx_tx_unlock(wvif->wdev);
 		return;
@@ -678,41 +510,15 @@ static void wfx_do_join(struct wfx_vif *wvif)
 
 	mutex_lock(&wvif->wdev->conf_mutex);
 
-	/* Under the conf lock: check scan status and
-	 * bail out if it is in progress.
-	 */
-	if (atomic_read(&wvif->scan.in_progress)) {
-		wfx_tx_unlock(wvif->wdev);
-		goto done_put;
-	}
-
-	/* Sanity check basic rates */
-	if (!join.basic_rate_set)
-		join.basic_rate_set = 7;
-
 	/* Sanity check beacon interval */
 	if (!wvif->beacon_int)
 		wvif->beacon_int = 1;
 
-	join.beacon_interval = wvif->beacon_int;
-
-	// DTIM period will be set on first Beacon
-	wvif->dtim_period = 0;
-
-	join.channel_number = wvif->channel->hw_value;
-	memcpy(join.bssid, bssid, sizeof(join.bssid));
-
-	if (!conf->ibss_joined) {
-		const u8 *ssidie;
-
-		rcu_read_lock();
+	rcu_read_lock();
+	if (!conf->ibss_joined)
 		ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
-		if (ssidie) {
-			join.ssid_length = ssidie[1];
-			memcpy(join.ssid, &ssidie[2], join.ssid_length);
-		}
-		rcu_read_unlock();
-	}
+	else
+		ssidie = NULL;
 
 	wfx_tx_flush(wvif->wdev);
 
@@ -723,7 +529,9 @@ static void wfx_do_join(struct wfx_vif *wvif)
 
 	/* Perform actual join */
 	wvif->wdev->tx_burst_idx = -1;
-	if (hif_join(wvif, &join)) {
+	ret = hif_join(wvif, conf, wvif->channel, ssidie);
+	rcu_read_unlock();
+	if (ret) {
 		ieee80211_connection_loss(wvif->vif);
 		wvif->join_complete_status = -1;
 		/* Tx lock still held, unjoin will clear it. */
@@ -749,7 +557,6 @@ static void wfx_do_join(struct wfx_vif *wvif)
 	}
 	wfx_update_filtering(wvif);
 
-done_put:
 	mutex_unlock(&wvif->wdev->conf_mutex);
 	if (bss)
 		cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
@@ -766,30 +573,26 @@ static void wfx_unjoin_work(struct work_struct *work)
 int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		struct ieee80211_sta *sta)
 {
-	struct wfx_dev *wdev = hw->priv;
 	struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
 	struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
-	struct wfx_link_entry *entry;
-	struct sk_buff *skb;
 
-	if (wvif->vif->type != NL80211_IFTYPE_AP)
-		return 0;
-
+	spin_lock_init(&sta_priv->lock);
 	sta_priv->vif_id = wvif->id;
-	sta_priv->link_id = wfx_find_link_id(wvif, sta->addr);
-	if (!sta_priv->link_id) {
-		dev_warn(wdev->dev, "mo more link-id available\n");
-		return -ENOENT;
-	}
 
-	entry = &wvif->link_id_db[sta_priv->link_id - 1];
+	// FIXME: in station mode, the current API interprets new link-id as a
+	// tdls peer.
+	if (vif->type == NL80211_IFTYPE_STATION)
+		return 0;
+	sta_priv->link_id = ffz(wvif->link_id_map);
+	wvif->link_id_map |= BIT(sta_priv->link_id);
+	WARN_ON(!sta_priv->link_id);
+	WARN_ON(sta_priv->link_id >= WFX_MAX_STA_IN_AP_MODE);
+	hif_map_link(wvif, sta->addr, 0, sta_priv->link_id);
+
 	spin_lock_bh(&wvif->ps_state_lock);
 	if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
 					IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
 		wvif->sta_asleep_mask |= BIT(sta_priv->link_id);
-	entry->status = WFX_LINK_HARD;
-	while ((skb = skb_dequeue(&entry->rx_queue)))
-		ieee80211_rx_irqsafe(wdev->hw, skb);
 	spin_unlock_bh(&wvif->ps_state_lock);
 	return 0;
 }
@@ -797,225 +600,116 @@ int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		   struct ieee80211_sta *sta)
 {
-	struct wfx_dev *wdev = hw->priv;
 	struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
 	struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
-	struct wfx_link_entry *entry;
+	int i;
 
-	if (wvif->vif->type != NL80211_IFTYPE_AP || !sta_priv->link_id)
+	for (i = 0; i < ARRAY_SIZE(sta_priv->buffered); i++)
+		WARN(sta_priv->buffered[i], "release station while Tx is in progress");
+	// FIXME: see note in wfx_sta_add()
+	if (vif->type == NL80211_IFTYPE_STATION)
 		return 0;
-
-	entry = &wvif->link_id_db[sta_priv->link_id - 1];
-	spin_lock_bh(&wvif->ps_state_lock);
-	entry->status = WFX_LINK_RESERVE;
-	entry->timestamp = jiffies;
-	wfx_tx_lock(wdev);
-	if (!schedule_work(&wvif->link_id_work))
-		wfx_tx_unlock(wdev);
-	spin_unlock_bh(&wvif->ps_state_lock);
-	flush_work(&wvif->link_id_work);
+	// FIXME add a mutex?
+	hif_map_link(wvif, sta->addr, 1, sta_priv->link_id);
+	wvif->link_id_map &= ~BIT(sta_priv->link_id);
 	return 0;
 }
 
-static void wfx_set_cts_work(struct work_struct *work)
-{
-	struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_cts_work);
-	u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
-	struct hif_ie_flags target_frame = {
-		.beacon = 1,
-	};
-
-	mutex_lock(&wvif->wdev->conf_mutex);
-	erp_ie[2] = wvif->erp_info;
-	mutex_unlock(&wvif->wdev->conf_mutex);
-
-	hif_erp_use_protection(wvif, erp_ie[2] & WLAN_ERP_USE_PROTECTION);
-
-	if (wvif->vif->type != NL80211_IFTYPE_STATION)
-		hif_update_ie(wvif, &target_frame, erp_ie, sizeof(erp_ie));
-}
-
 static int wfx_start_ap(struct wfx_vif *wvif)
 {
 	int ret;
-	struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
-	struct hif_req_start start = {
-		.channel_number = wvif->channel->hw_value,
-		.beacon_interval = conf->beacon_int,
-		.dtim_period = conf->dtim_period,
-		.preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
-		.basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
-						      conf->basic_rates),
-	};
 
-	memset(start.ssid, 0, sizeof(start.ssid));
-	if (!conf->hidden_ssid) {
-		start.ssid_length = conf->ssid_len;
-		memcpy(start.ssid, conf->ssid, start.ssid_length);
-	}
-
-	wvif->beacon_int = conf->beacon_int;
-	wvif->dtim_period = conf->dtim_period;
-
-	memset(&wvif->link_id_db, 0, sizeof(wvif->link_id_db));
-
+	wvif->beacon_int = wvif->vif->bss_conf.beacon_int;
 	wvif->wdev->tx_burst_idx = -1;
-	ret = hif_start(wvif, &start);
-	if (!ret)
-		ret = wfx_upload_keys(wvif);
-	if (!ret) {
-		if (wvif_count(wvif->wdev) <= 1)
-			hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
-		wvif->state = WFX_STATE_AP;
-		wfx_update_filtering(wvif);
-	}
-	return ret;
+	ret = hif_start(wvif, &wvif->vif->bss_conf, wvif->channel);
+	if (ret)
+		return ret;
+	ret = wfx_upload_keys(wvif);
+	if (ret)
+		return ret;
+	if (wvif_count(wvif->wdev) <= 1)
+		hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+	wvif->state = WFX_STATE_AP;
+	wfx_update_filtering(wvif);
+	return 0;
 }
 
 static int wfx_update_beaconing(struct wfx_vif *wvif)
 {
-	struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
-
-	if (wvif->vif->type == NL80211_IFTYPE_AP) {
-		/* TODO: check if changed channel, band */
-		if (wvif->state != WFX_STATE_AP ||
-		    wvif->beacon_int != conf->beacon_int) {
-			wfx_tx_lock_flush(wvif->wdev);
-			if (wvif->state != WFX_STATE_PASSIVE) {
-				hif_reset(wvif, false);
-				wfx_tx_policy_init(wvif);
-			}
-			wvif->state = WFX_STATE_PASSIVE;
-			wfx_start_ap(wvif);
-			wfx_tx_unlock(wvif->wdev);
-		} else {
-		}
-	}
+	if (wvif->vif->type != NL80211_IFTYPE_AP)
+		return 0;
+	if (wvif->state == WFX_STATE_AP &&
+	    wvif->beacon_int == wvif->vif->bss_conf.beacon_int)
+		return 0;
+	wfx_tx_lock_flush(wvif->wdev);
+	hif_reset(wvif, false);
+	wfx_tx_policy_init(wvif);
+	wvif->state = WFX_STATE_PASSIVE;
+	wfx_start_ap(wvif);
+	wfx_tx_unlock(wvif->wdev);
 	return 0;
 }
 
-static int wfx_upload_beacon(struct wfx_vif *wvif)
+static int wfx_upload_ap_templates(struct wfx_vif *wvif)
 {
-	int ret = 0;
-	struct sk_buff *skb = NULL;
-	struct ieee80211_mgmt *mgmt;
-	struct hif_mib_template_frame *p;
+	struct sk_buff *skb;
 
 	if (wvif->vif->type == NL80211_IFTYPE_STATION ||
 	    wvif->vif->type == NL80211_IFTYPE_MONITOR ||
 	    wvif->vif->type == NL80211_IFTYPE_UNSPECIFIED)
-		goto done;
+		return 0;
 
 	skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
-
 	if (!skb)
 		return -ENOMEM;
-
-	p = (struct hif_mib_template_frame *) skb_push(skb, 4);
-	p->frame_type = HIF_TMPLT_BCN;
-	p->init_rate = API_RATE_INDEX_B_1MBPS; /* 1Mbps DSSS */
-	p->frame_length = cpu_to_le16(skb->len - 4);
-
-	ret = hif_set_template_frame(wvif, p);
-
-	skb_pull(skb, 4);
-
-	if (ret)
-		goto done;
-	/* TODO: Distill probe resp; remove TIM and any other beacon-specific
-	 * IEs
-	 */
-	mgmt = (void *)skb->data;
-	mgmt->frame_control =
-		cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
-
-	p->frame_type = HIF_TMPLT_PRBRES;
-
-	ret = hif_set_template_frame(wvif, p);
-	wfx_fwd_probe_req(wvif, false);
-
-done:
+	hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN,
+			       API_RATE_INDEX_B_1MBPS);
 	dev_kfree_skb(skb);
-	return ret;
-}
 
-static int wfx_is_ht(const struct wfx_ht_info *ht_info)
-{
-	return ht_info->channel_type != NL80211_CHAN_NO_HT;
-}
-
-static int wfx_ht_greenfield(const struct wfx_ht_info *ht_info)
-{
-	return wfx_is_ht(ht_info) &&
-		(ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
-		!(ht_info->operation_mode &
-		  IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-}
-
-static int wfx_ht_ampdu_density(const struct wfx_ht_info *ht_info)
-{
-	if (!wfx_is_ht(ht_info))
-		return 0;
-	return ht_info->ht_cap.ampdu_density;
+	skb = ieee80211_proberesp_get(wvif->wdev->hw, wvif->vif);
+	if (!skb)
+		return -ENOMEM;
+	hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES,
+			       API_RATE_INDEX_B_1MBPS);
+	dev_kfree_skb(skb);
+	return 0;
 }
 
 static void wfx_join_finalize(struct wfx_vif *wvif,
 			      struct ieee80211_bss_conf *info)
 {
 	struct ieee80211_sta *sta = NULL;
-	struct hif_mib_set_association_mode association_mode = { };
 
-	if (info->dtim_period)
-		wvif->dtim_period = info->dtim_period;
 	wvif->beacon_int = info->beacon_int;
-
-	rcu_read_lock();
+	rcu_read_lock(); // protect sta
 	if (info->bssid && !info->ibss_joined)
 		sta = ieee80211_find_sta(wvif->vif, info->bssid);
-	if (sta) {
-		wvif->ht_info.ht_cap = sta->ht_cap;
+	if (sta)
 		wvif->bss_params.operational_rate_set =
 			wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
-		wvif->ht_info.operation_mode = info->ht_operation_mode;
-	} else {
-		memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+	else
 		wvif->bss_params.operational_rate_set = -1;
-	}
-	rcu_read_unlock();
-
-	/* Non Greenfield stations present */
-	if (wvif->ht_info.operation_mode &
-	    IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
+	if (sta &&
+	    info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
 		hif_dual_cts_protection(wvif, true);
 	else
 		hif_dual_cts_protection(wvif, false);
 
-	association_mode.preambtype_use = 1;
-	association_mode.mode = 1;
-	association_mode.rateset = 1;
-	association_mode.spacing = 1;
-	association_mode.preamble_type = info->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG;
-	association_mode.basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates));
-	association_mode.mixed_or_greenfield_type = wfx_ht_greenfield(&wvif->ht_info);
-	association_mode.mpdu_start_spacing = wfx_ht_ampdu_density(&wvif->ht_info);
-
 	wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
 	cancel_work_sync(&wvif->unjoin_work);
 
 	wvif->bss_params.beacon_lost_count = 20;
 	wvif->bss_params.aid = info->aid;
 
-	if (wvif->dtim_period < 1)
-		wvif->dtim_period = 1;
-
-	hif_set_association_mode(wvif, &association_mode);
+	hif_set_association_mode(wvif, info, sta ? &sta->ht_cap : NULL);
+	rcu_read_unlock();
 
 	if (!info->ibss_joined) {
 		hif_keep_alive_period(wvif, 30 /* sec */);
 		hif_set_bss_params(wvif, &wvif->bss_params);
-		wvif->setbssparams_done = true;
-		wfx_set_beacon_wakeup_period_work(&wvif->set_beacon_wakeup_period_work);
-		wfx_set_pm(wvif, &wvif->powersave_mode);
+		hif_set_beacon_wakeup_period(wvif, info->dtim_period,
+					     info->dtim_period);
+		wfx_update_pm(wvif);
 	}
 }
 
@@ -1028,48 +722,40 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
 	struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
 	bool do_join = false;
 	int i;
-	int nb_arp_addr;
 
 	mutex_lock(&wdev->conf_mutex);
 
 	/* TODO: BSS_CHANGED_QOS */
 	if (changed & BSS_CHANGED_ARP_FILTER) {
-		struct hif_mib_arp_ip_addr_table filter = { };
-
-		nb_arp_addr = info->arp_addr_cnt;
-		if (nb_arp_addr <= 0 || nb_arp_addr > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
-			nb_arp_addr = 0;
-
 		for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
-			filter.condition_idx = i;
-			if (i < nb_arp_addr) {
-				// Caution: type of arp_addr_list[i] is __be32
-				memcpy(filter.ipv4_address,
-				       &info->arp_addr_list[i],
-				       sizeof(filter.ipv4_address));
-				filter.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
-			} else {
-				filter.arp_enable = HIF_ARP_NS_FILTERING_DISABLE;
-			}
-			hif_set_arp_ipv4_filter(wvif, &filter);
+			__be32 *arp_addr = &info->arp_addr_list[i];
+
+			if (info->arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+				arp_addr = NULL;
+			if (i >= info->arp_addr_cnt)
+				arp_addr = NULL;
+			hif_set_arp_ipv4_filter(wvif, i, arp_addr);
 		}
 	}
 
-	if (changed &
-	    (BSS_CHANGED_BEACON | BSS_CHANGED_AP_PROBE_RESP |
-	     BSS_CHANGED_BSSID | BSS_CHANGED_SSID | BSS_CHANGED_IBSS)) {
+	if (changed & BSS_CHANGED_BEACON ||
+	    changed & BSS_CHANGED_AP_PROBE_RESP ||
+	    changed & BSS_CHANGED_BSSID ||
+	    changed & BSS_CHANGED_SSID ||
+	    changed & BSS_CHANGED_IBSS) {
 		wvif->beacon_int = info->beacon_int;
 		wfx_update_beaconing(wvif);
-		wfx_upload_beacon(wvif);
+		wfx_upload_ap_templates(wvif);
+		wfx_fwd_probe_req(wvif, false);
 	}
 
 	if (changed & BSS_CHANGED_BEACON_ENABLED &&
-	    wvif->state != WFX_STATE_IBSS) {
-		if (wvif->enable_beacon != info->enable_beacon) {
-			hif_beacon_transmit(wvif, info->enable_beacon);
-			wvif->enable_beacon = info->enable_beacon;
-		}
-	}
+	    wvif->state != WFX_STATE_IBSS)
+		hif_beacon_transmit(wvif, info->enable_beacon);
+
+	if (changed & BSS_CHANGED_BEACON_INFO)
+		hif_set_beacon_wakeup_period(wvif, info->dtim_period,
+					     info->dtim_period);
 
 	/* assoc/disassoc, or maybe AID changed */
 	if (changed & BSS_CHANGED_ASSOC) {
@@ -1095,10 +781,11 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
 		if (changed & BSS_CHANGED_BSSID)
 			do_join = true;
 
-		if (changed &
-		    (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID |
-		     BSS_CHANGED_IBSS | BSS_CHANGED_BASIC_RATES |
-		     BSS_CHANGED_HT)) {
+		if (changed & BSS_CHANGED_ASSOC ||
+		    changed & BSS_CHANGED_BSSID ||
+		    changed & BSS_CHANGED_IBSS ||
+		    changed & BSS_CHANGED_BASIC_RATES ||
+		    changed & BSS_CHANGED_HT) {
 			if (info->assoc) {
 				if (wvif->state < WFX_STATE_PRE_STA) {
 					ieee80211_connection_loss(vif);
@@ -1119,106 +806,50 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
 		}
 	}
 
-	/* ERP Protection */
-	if (changed & (BSS_CHANGED_ASSOC |
-		       BSS_CHANGED_ERP_CTS_PROT |
-		       BSS_CHANGED_ERP_PREAMBLE)) {
-		u32 prev_erp_info = wvif->erp_info;
+	if (changed & BSS_CHANGED_ASSOC ||
+	    changed & BSS_CHANGED_ERP_CTS_PROT ||
+	    changed & BSS_CHANGED_ERP_PREAMBLE) {
+		u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
 
+		hif_erp_use_protection(wvif, info->use_cts_prot);
 		if (info->use_cts_prot)
-			wvif->erp_info |= WLAN_ERP_USE_PROTECTION;
-		else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
-			wvif->erp_info &= ~WLAN_ERP_USE_PROTECTION;
-
+			erp_ie[2] |= WLAN_ERP_USE_PROTECTION;
 		if (info->use_short_preamble)
-			wvif->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
-		else
-			wvif->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
-
-		if (prev_erp_info != wvif->erp_info)
-			schedule_work(&wvif->set_cts_work);
+			erp_ie[2] |= WLAN_ERP_BARKER_PREAMBLE;
+		if (wvif->vif->type != NL80211_IFTYPE_STATION)
+			hif_update_ie_beacon(wvif, erp_ie, sizeof(erp_ie));
 	}
 
-	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT))
+	if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_ERP_SLOT)
 		hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
 
-	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
-		struct hif_mib_rcpi_rssi_threshold th = {
-			.rolling_average_count = 8,
-			.detection = 1,
-		};
+	if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_CQM)
+		hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold,
+					    info->cqm_rssi_hyst);
 
-		wvif->cqm_rssi_thold = info->cqm_rssi_thold;
+	if (changed & BSS_CHANGED_TXPOWER)
+		hif_set_output_power(wvif, info->txpower);
 
-		if (!info->cqm_rssi_thold && !info->cqm_rssi_hyst) {
-			th.upperthresh = 1;
-			th.lowerthresh = 1;
-		} else {
-			/* FIXME It's not a correct way of setting threshold.
-			 * Upper and lower must be set equal here and adjusted
-			 * in callback. However current implementation is much
-			 * more reliable and stable.
-			 */
-			/* RSSI: signed Q8.0, RCPI: unsigned Q7.1
-			 * RSSI = RCPI / 2 - 110
-			 */
-			th.upper_threshold = info->cqm_rssi_thold + info->cqm_rssi_hyst;
-			th.upper_threshold = (th.upper_threshold + 110) * 2;
-			th.lower_threshold = info->cqm_rssi_thold;
-			th.lower_threshold = (th.lower_threshold + 110) * 2;
-		}
-		hif_set_rcpi_rssi_threshold(wvif, &th);
-	}
+	if (changed & BSS_CHANGED_PS)
+		wfx_update_pm(wvif);
 
-	if (changed & BSS_CHANGED_TXPOWER &&
-	    info->txpower != wdev->output_power) {
-		wdev->output_power = info->txpower;
-		hif_set_output_power(wvif, wdev->output_power * 10);
-	}
 	mutex_unlock(&wdev->conf_mutex);
 
-	if (do_join) {
-		wfx_tx_lock_flush(wdev);
-		wfx_do_join(wvif); /* Will unlock it for us */
-	}
+	if (do_join)
+		wfx_do_join(wvif);
 }
 
-static void wfx_ps_notify(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd,
-			  int link_id)
+static void wfx_ps_notify_sta(struct wfx_vif *wvif,
+			      enum sta_notify_cmd notify_cmd, int link_id)
 {
-	u32 bit, prev;
-
 	spin_lock_bh(&wvif->ps_state_lock);
-	/* Zero link id means "for all link IDs" */
-	if (link_id) {
-		bit = BIT(link_id);
-	} else if (notify_cmd != STA_NOTIFY_AWAKE) {
-		dev_warn(wvif->wdev->dev, "unsupported notify command\n");
-		bit = 0;
-	} else {
-		bit = wvif->link_id_map;
-	}
-	prev = wvif->sta_asleep_mask & bit;
-
-	switch (notify_cmd) {
-	case STA_NOTIFY_SLEEP:
-		if (!prev) {
-			if (wvif->mcast_buffered && !wvif->sta_asleep_mask)
-				schedule_work(&wvif->mcast_start_work);
-			wvif->sta_asleep_mask |= bit;
-		}
-		break;
-	case STA_NOTIFY_AWAKE:
-		if (prev) {
-			wvif->sta_asleep_mask &= ~bit;
-			wvif->pspoll_mask &= ~bit;
-			if (link_id && !wvif->sta_asleep_mask)
-				schedule_work(&wvif->mcast_stop_work);
-			wfx_bh_request_tx(wvif->wdev);
-		}
-		break;
-	}
+	if (notify_cmd == STA_NOTIFY_SLEEP)
+		wvif->sta_asleep_mask |= BIT(link_id);
+	else // notify_cmd == STA_NOTIFY_AWAKE
+		wvif->sta_asleep_mask &= ~BIT(link_id);
 	spin_unlock_bh(&wvif->ps_state_lock);
+	if (notify_cmd == STA_NOTIFY_AWAKE)
+		wfx_bh_request_tx(wvif->wdev);
 }
 
 void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -1227,23 +858,19 @@ void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 	struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
 	struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
 
-	wfx_ps_notify(wvif, notify_cmd, sta_priv->link_id);
+	wfx_ps_notify_sta(wvif, notify_cmd, sta_priv->link_id);
 }
 
-static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
+static int wfx_update_tim(struct wfx_vif *wvif)
 {
 	struct sk_buff *skb;
-	struct hif_ie_flags target_frame = {
-		.beacon = 1,
-	};
 	u16 tim_offset, tim_length;
 	u8 *tim_ptr;
 
 	skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
 				       &tim_offset, &tim_length);
 	if (!skb) {
-		if (!__wfx_flush(wvif->wdev, true))
-			wfx_tx_unlock(wvif->wdev);
+		__wfx_flush(wvif->wdev, true);
 		return -ENOENT;
 	}
 	tim_ptr = skb->data + tim_offset;
@@ -1255,23 +882,23 @@ static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
 		tim_ptr[2] = 0;
 
 		/* Set/reset aid0 bit */
-		if (aid0_bit_set)
+		if (wfx_tx_queues_get_after_dtim(wvif))
 			tim_ptr[4] |= 1;
 		else
 			tim_ptr[4] &= ~1;
 	}
 
-	hif_update_ie(wvif, &target_frame, tim_ptr, tim_length);
+	hif_update_ie_beacon(wvif, tim_ptr, tim_length);
 	dev_kfree_skb(skb);
 
 	return 0;
 }
 
-static void wfx_set_tim_work(struct work_struct *work)
+static void wfx_update_tim_work(struct work_struct *work)
 {
-	struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_tim_work);
+	struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work);
 
-	wfx_set_tim_impl(wvif, wvif->aid0_bit_set);
+	wfx_update_tim(wvif);
 }
 
 int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
@@ -1280,50 +907,16 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
 	struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *) &sta->drv_priv;
 	struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
 
-	schedule_work(&wvif->set_tim_work);
+	schedule_work(&wvif->update_tim_work);
 	return 0;
 }
 
-static void wfx_mcast_start_work(struct work_struct *work)
+void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd)
 {
-	struct wfx_vif *wvif = container_of(work, struct wfx_vif,
-					    mcast_start_work);
-	long tmo = wvif->dtim_period * TU_TO_JIFFIES(wvif->beacon_int + 20);
-
-	cancel_work_sync(&wvif->mcast_stop_work);
-	if (!wvif->aid0_bit_set) {
-		wfx_tx_lock_flush(wvif->wdev);
-		wfx_set_tim_impl(wvif, true);
-		wvif->aid0_bit_set = true;
-		mod_timer(&wvif->mcast_timeout, jiffies + tmo);
-		wfx_tx_unlock(wvif->wdev);
-	}
-}
-
-static void wfx_mcast_stop_work(struct work_struct *work)
-{
-	struct wfx_vif *wvif = container_of(work, struct wfx_vif,
-					    mcast_stop_work);
-
-	if (wvif->aid0_bit_set) {
-		del_timer_sync(&wvif->mcast_timeout);
-		wfx_tx_lock_flush(wvif->wdev);
-		wvif->aid0_bit_set = false;
-		wfx_set_tim_impl(wvif, false);
-		wfx_tx_unlock(wvif->wdev);
-	}
-}
-
-static void wfx_mcast_timeout(struct timer_list *t)
-{
-	struct wfx_vif *wvif = from_timer(wvif, t, mcast_timeout);
-
-	dev_warn(wvif->wdev->dev, "multicast delivery timeout\n");
-	spin_lock_bh(&wvif->ps_state_lock);
-	wvif->mcast_tx = wvif->aid0_bit_set && wvif->mcast_buffered;
-	if (wvif->mcast_tx)
-		wfx_bh_request_tx(wvif->wdev);
-	spin_unlock_bh(&wvif->ps_state_lock);
+	WARN(!wfx_tx_queues_get_after_dtim(wvif), "incorrect sequence");
+	WARN(wvif->after_dtim_tx_allowed, "incorrect sequence");
+	wvif->after_dtim_tx_allowed = true;
+	wfx_bh_request_tx(wvif->wdev);
 }
 
 int wfx_ampdu_action(struct ieee80211_hw *hw,
@@ -1341,35 +934,6 @@ int wfx_ampdu_action(struct ieee80211_hw *hw,
 	return -ENOTSUPP;
 }
 
-void wfx_suspend_resume(struct wfx_vif *wvif,
-			struct hif_ind_suspend_resume_tx *arg)
-{
-	if (arg->suspend_resume_flags.bc_mc_only) {
-		bool cancel_tmo = false;
-
-		spin_lock_bh(&wvif->ps_state_lock);
-		if (!arg->suspend_resume_flags.resume)
-			wvif->mcast_tx = false;
-		else
-			wvif->mcast_tx = wvif->aid0_bit_set &&
-					 wvif->mcast_buffered;
-		if (wvif->mcast_tx) {
-			cancel_tmo = true;
-			wfx_bh_request_tx(wvif->wdev);
-		}
-		spin_unlock_bh(&wvif->ps_state_lock);
-		if (cancel_tmo)
-			del_timer_sync(&wvif->mcast_timeout);
-	} else if (arg->suspend_resume_flags.resume) {
-		// FIXME: should change each station status independently
-		wfx_ps_notify(wvif, STA_NOTIFY_AWAKE, 0);
-		wfx_bh_request_tx(wvif->wdev);
-	} else {
-		// FIXME: should change each station status independently
-		wfx_ps_notify(wvif, STA_NOTIFY_SLEEP, 0);
-	}
-}
-
 int wfx_add_chanctx(struct ieee80211_hw *hw,
 		    struct ieee80211_chanctx_conf *conf)
 {
@@ -1395,7 +959,6 @@ int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 
 	WARN(wvif->channel, "channel overwrite");
 	wvif->channel = ch;
-	wvif->ht_info.channel_type = cfg80211_get_chandef_type(&conf->def);
 
 	return 0;
 }
@@ -1413,97 +976,14 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
 
 int wfx_config(struct ieee80211_hw *hw, u32 changed)
 {
-	int ret = 0;
-	struct wfx_dev *wdev = hw->priv;
-	struct ieee80211_conf *conf = &hw->conf;
-	struct wfx_vif *wvif;
-
-	// FIXME: Interface id should not been hardcoded
-	wvif = wdev_to_wvif(wdev, 0);
-	if (!wvif) {
-		WARN(1, "interface 0 does not exist anymore");
-		return 0;
-	}
-
-	down(&wvif->scan.lock);
-	mutex_lock(&wdev->conf_mutex);
-	if (changed & IEEE80211_CONF_CHANGE_POWER) {
-		wdev->output_power = conf->power_level;
-		hif_set_output_power(wvif, wdev->output_power * 10);
-	}
-
-	if (changed & IEEE80211_CONF_CHANGE_PS) {
-		wvif = NULL;
-		while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
-			memset(&wvif->powersave_mode, 0,
-			       sizeof(wvif->powersave_mode));
-			if (conf->flags & IEEE80211_CONF_PS) {
-				wvif->powersave_mode.pm_mode.enter_psm = 1;
-				if (conf->dynamic_ps_timeout > 0) {
-					wvif->powersave_mode.pm_mode.fast_psm = 1;
-					/*
-					 * Firmware does not support more than
-					 * 128ms
-					 */
-					wvif->powersave_mode.fast_psm_idle_period =
-						min(conf->dynamic_ps_timeout *
-						    2, 255);
-				}
-			}
-			if (wvif->state == WFX_STATE_STA && wvif->bss_params.aid)
-				wfx_set_pm(wvif, &wvif->powersave_mode);
-		}
-		wvif = wdev_to_wvif(wdev, 0);
-	}
-
-	mutex_unlock(&wdev->conf_mutex);
-	up(&wvif->scan.lock);
-	return ret;
+	return 0;
 }
 
 int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
-	int i;
+	int i, ret = 0;
 	struct wfx_dev *wdev = hw->priv;
 	struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
-	// FIXME: parameters are set by kernel juste after interface_add.
-	// Keep struct hif_req_edca_queue_params blank?
-	struct hif_req_edca_queue_params default_edca_params[] = {
-		[IEEE80211_AC_VO] = {
-			.queue_id = HIF_QUEUE_ID_VOICE,
-			.aifsn = 2,
-			.cw_min = 3,
-			.cw_max = 7,
-			.tx_op_limit = TXOP_UNIT * 47,
-		},
-		[IEEE80211_AC_VI] = {
-			.queue_id = HIF_QUEUE_ID_VIDEO,
-			.aifsn = 2,
-			.cw_min = 7,
-			.cw_max = 15,
-			.tx_op_limit = TXOP_UNIT * 94,
-		},
-		[IEEE80211_AC_BE] = {
-			.queue_id = HIF_QUEUE_ID_BESTEFFORT,
-			.aifsn = 3,
-			.cw_min = 15,
-			.cw_max = 1023,
-			.tx_op_limit = TXOP_UNIT * 0,
-		},
-		[IEEE80211_AC_BK] = {
-			.queue_id = HIF_QUEUE_ID_BACKGROUND,
-			.aifsn = 7,
-			.cw_min = 15,
-			.cw_max = 1023,
-			.tx_op_limit = TXOP_UNIT * 0,
-		},
-	};
-
-	BUILD_BUG_ON(ARRAY_SIZE(default_edca_params) != ARRAY_SIZE(wvif->edca.params));
-	if (wfx_api_older_than(wdev, 2, 0)) {
-		default_edca_params[IEEE80211_AC_BE].queue_id = HIF_QUEUE_ID_BACKGROUND;
-		default_edca_params[IEEE80211_AC_BK].queue_id = HIF_QUEUE_ID_BESTEFFORT;
-	}
 
 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
 			     IEEE80211_VIF_SUPPORTS_UAPSD |
@@ -1536,51 +1016,37 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 	wvif->vif = vif;
 	wvif->wdev = wdev;
 
-	INIT_WORK(&wvif->link_id_work, wfx_link_id_work);
-	INIT_DELAYED_WORK(&wvif->link_id_gc_work, wfx_link_id_gc_work);
-
+	wvif->link_id_map = 1; // link-id 0 is reserved for multicast
 	spin_lock_init(&wvif->ps_state_lock);
-	INIT_WORK(&wvif->set_tim_work, wfx_set_tim_work);
+	INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work);
 
-	INIT_WORK(&wvif->mcast_start_work, wfx_mcast_start_work);
-	INIT_WORK(&wvif->mcast_stop_work, wfx_mcast_stop_work);
-	timer_setup(&wvif->mcast_timeout, wfx_mcast_timeout, 0);
+	memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
 
-	wvif->setbssparams_done = false;
 	mutex_init(&wvif->bss_loss_lock);
 	INIT_DELAYED_WORK(&wvif->bss_loss_work, wfx_bss_loss_work);
 
 	wvif->wep_default_key_id = -1;
 	INIT_WORK(&wvif->wep_key_work, wfx_wep_key_work);
 
-	sema_init(&wvif->scan.lock, 1);
-	INIT_WORK(&wvif->scan.work, wfx_scan_work);
-	INIT_DELAYED_WORK(&wvif->scan.timeout, wfx_scan_timeout);
-
 	spin_lock_init(&wvif->event_queue_lock);
 	INIT_LIST_HEAD(&wvif->event_queue);
 	INIT_WORK(&wvif->event_handler_work, wfx_event_handler_work);
 
 	init_completion(&wvif->set_pm_mode_complete);
 	complete(&wvif->set_pm_mode_complete);
-	INIT_WORK(&wvif->set_beacon_wakeup_period_work,
-		  wfx_set_beacon_wakeup_period_work);
 	INIT_WORK(&wvif->update_filtering_work, wfx_update_filtering_work);
 	INIT_WORK(&wvif->bss_params_work, wfx_bss_params_work);
-	INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work);
 	INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
+	INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
+
+	mutex_init(&wvif->scan_lock);
+	init_completion(&wvif->scan_complete);
+	INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
 
 	INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
 	mutex_unlock(&wdev->conf_mutex);
 
 	hif_set_macaddr(wvif, vif->addr);
-	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
-		memcpy(&wvif->edca.params[i], &default_edca_params[i],
-		       sizeof(default_edca_params[i]));
-		wvif->edca.uapsd_enable[i] = false;
-		hif_set_edca_queue_params(wvif, &wvif->edca.params[i]);
-	}
-	wfx_set_uapsd_param(wvif, &wvif->edca);
 
 	wfx_tx_policy_init(wvif);
 	wvif = NULL;
@@ -1591,9 +1057,9 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 		else
 			hif_set_block_ack_policy(wvif, 0x00, 0x00);
 		// Combo force powersave mode. We can re-enable it now
-		wfx_set_pm(wvif, &wvif->powersave_mode);
+		ret = wfx_update_pm(wvif);
 	}
-	return 0;
+	return ret;
 }
 
 void wfx_remove_interface(struct ieee80211_hw *hw,
@@ -1601,15 +1067,11 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
 {
 	struct wfx_dev *wdev = hw->priv;
 	struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
-	int i;
 
-	// If scan is in progress, stop it
-	while (down_trylock(&wvif->scan.lock))
-		schedule();
-	up(&wvif->scan.lock);
 	wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
 
 	mutex_lock(&wdev->conf_mutex);
+	WARN(wvif->link_id_map != 1, "corrupted state");
 	switch (wvif->state) {
 	case WFX_STATE_PRE_STA:
 	case WFX_STATE_STA:
@@ -1619,19 +1081,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
 			wfx_tx_unlock(wdev);
 		break;
 	case WFX_STATE_AP:
-		for (i = 0; wvif->link_id_map; ++i) {
-			if (wvif->link_id_map & BIT(i)) {
-				wfx_unmap_link(wvif, i);
-				wvif->link_id_map &= ~BIT(i);
-			}
-		}
-		memset(wvif->link_id_db, 0, sizeof(wvif->link_id_db));
 		wvif->sta_asleep_mask = 0;
-		wvif->enable_beacon = false;
-		wvif->mcast_tx = false;
-		wvif->aid0_bit_set = false;
-		wvif->mcast_buffered = false;
-		wvif->pspoll_mask = 0;
 		/* reset.link_id = 0; */
 		hif_reset(wvif, false);
 		break;
@@ -1646,12 +1096,8 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
 	/* FIXME: In add to reset MAC address, try to reset interface */
 	hif_set_macaddr(wvif, NULL);
 
-	cancel_delayed_work_sync(&wvif->scan.timeout);
-
 	wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
 	cancel_work_sync(&wvif->unjoin_work);
-	cancel_delayed_work_sync(&wvif->link_id_gc_work);
-	del_timer_sync(&wvif->mcast_timeout);
 	wfx_free_event_queue(wvif);
 
 	wdev->vif[wvif->id] = NULL;
@@ -1666,7 +1112,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
 		else
 			hif_set_block_ack_policy(wvif, 0x00, 0x00);
 		// Combo force powersave mode. We can re-enable it now
-		wfx_set_pm(wvif, &wvif->powersave_mode);
+		wfx_update_pm(wvif);
 	}
 }
 
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index 4ccf1b1..cf99a8a 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -23,23 +23,11 @@ enum wfx_state {
 	WFX_STATE_AP,
 };
 
-struct wfx_ht_info {
-	struct ieee80211_sta_ht_cap ht_cap;
-	enum nl80211_channel_type channel_type;
-	u16 operation_mode;
-};
-
 struct wfx_hif_event {
 	struct list_head link;
 	struct hif_ind_event evt;
 };
 
-struct wfx_edca_params {
-	/* NOTE: index is a linux queue id. */
-	struct hif_req_edca_queue_params params[IEEE80211_NUM_ACS];
-	bool uapsd_enable[IEEE80211_NUM_ACS];
-};
-
 struct wfx_grp_addr_table {
 	bool enable;
 	int num_addresses;
@@ -49,6 +37,9 @@ struct wfx_grp_addr_table {
 struct wfx_sta_priv {
 	int link_id;
 	int vif_id;
+	u8 buffered[IEEE80211_NUM_TIDS];
+	// Ensure atomicity of "buffered" and calls to ieee80211_sta_set_buffered()
+	spinlock_t lock;
 };
 
 // mac80211 interface
@@ -91,13 +82,12 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
 			      struct ieee80211_chanctx_conf *conf);
 
 // WSM Callbacks
-void wfx_suspend_resume(struct wfx_vif *wvif,
-			struct hif_ind_suspend_resume_tx *arg);
+void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd);
 
 // Other Helpers
 void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad);
 void wfx_update_filtering(struct wfx_vif *wvif);
-int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
 int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable);
+u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates);
 
 #endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index 3f6198a..30c6a13 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -153,7 +153,7 @@ hif_mib_list_enum
 #define hif_mib_list hif_mib_list_enum { -1, NULL }
 
 DECLARE_EVENT_CLASS(hif_data,
-	TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+	TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
 	TP_ARGS(hif, tx_fill_level, is_recv),
 	TP_STRUCT__entry(
 		__field(int, tx_fill_level)
@@ -203,12 +203,12 @@ DECLARE_EVENT_CLASS(hif_data,
 	)
 );
 DEFINE_EVENT(hif_data, hif_send,
-	TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+	TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
 	TP_ARGS(hif, tx_fill_level, is_recv));
 #define _trace_hif_send(hif, tx_fill_level)\
 	trace_hif_send(hif, tx_fill_level, false)
 DEFINE_EVENT(hif_data, hif_recv,
-	TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+	TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
 	TP_ARGS(hif, tx_fill_level, is_recv));
 #define _trace_hif_recv(hif, tx_fill_level)\
 	trace_hif_recv(hif, tx_fill_level, true)
@@ -359,7 +359,8 @@ TRACE_EVENT(bh_stats,
 	trace_bh_stats(ind, req, cnf, busy, release)
 
 TRACE_EVENT(tx_stats,
-	TP_PROTO(struct hif_cnf_tx *tx_cnf, struct sk_buff *skb, int delay),
+	TP_PROTO(const struct hif_cnf_tx *tx_cnf, const struct sk_buff *skb,
+		 int delay),
 	TP_ARGS(tx_cnf, skb, delay),
 	TP_STRUCT__entry(
 		__field(int, pkt_id)
@@ -375,8 +376,9 @@ TRACE_EVENT(tx_stats,
 		// Keep sync with wfx_rates definition in main.c
 		static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9,
 					       10, 11, 12, 13 };
-		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-		struct ieee80211_tx_rate *rates = tx_info->driver_rates;
+		const struct ieee80211_tx_info *tx_info =
+			(const struct ieee80211_tx_info *)skb->cb;
+		const struct ieee80211_tx_rate *rates = tx_info->driver_rates;
 		int i;
 
 		__entry->pkt_id = tx_cnf->packet_id;
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 781a8c8..8b85bb1 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -26,6 +26,9 @@
 #include "hif_tx.h"
 #include "hif_api_general.h"
 
+#define USEC_PER_TXOP 32 // see struct ieee80211_tx_queue_params
+#define USEC_PER_TU 1024
+
 struct hwbus_ops;
 
 struct wfx_dev {
@@ -51,14 +54,12 @@ struct wfx_dev {
 	int			tx_burst_idx;
 	atomic_t		tx_lock;
 
+	atomic_t		packet_id;
 	u32			key_map;
 	struct hif_req_add_key	keys[MAX_KEY_ENTRIES];
 
 	struct hif_rx_stats	rx_stats;
 	struct mutex		rx_stats_lock;
-
-	int			output_power;
-	atomic_t		scan_in_progress;
 };
 
 struct wfx_vif {
@@ -68,24 +69,15 @@ struct wfx_vif {
 	int			id;
 	enum wfx_state		state;
 
-	int			delayed_link_loss;
 	int			bss_loss_state;
 	u32			bss_loss_confirm_id;
 	struct mutex		bss_loss_lock;
 	struct delayed_work	bss_loss_work;
 
 	u32			link_id_map;
-	struct wfx_link_entry	link_id_db[WFX_MAX_STA_IN_AP_MODE];
-	struct delayed_work	link_id_gc_work;
-	struct work_struct	link_id_work;
 
-	bool			aid0_bit_set;
-	bool			mcast_tx;
-	bool			mcast_buffered;
+	bool			after_dtim_tx_allowed;
 	struct wfx_grp_addr_table mcast_filter;
-	struct timer_list	mcast_timeout;
-	struct work_struct	mcast_start_work;
-	struct work_struct	mcast_stop_work;
 
 	s8			wep_default_key_id;
 	struct sk_buff		*wep_pending_skb;
@@ -95,37 +87,30 @@ struct wfx_vif {
 	struct work_struct	tx_policy_upload_work;
 
 	u32			sta_asleep_mask;
-	u32			pspoll_mask;
 	spinlock_t		ps_state_lock;
-	struct work_struct	set_tim_work;
+	struct work_struct	update_tim_work;
 
-	int			dtim_period;
 	int			beacon_int;
-	bool			enable_beacon;
-	struct work_struct	set_beacon_wakeup_period_work;
-
 	bool			filter_bssid;
 	bool			fwd_probe_req;
 	bool			disable_beacon_filter;
 	struct work_struct	update_filtering_work;
 
-	u32			erp_info;
-	int			cqm_rssi_thold;
-	bool			setbssparams_done;
-	struct wfx_ht_info	ht_info;
-	struct wfx_edca_params	edca;
-	struct hif_mib_set_uapsd_information uapsd_info;
+	unsigned long		uapsd_mask;
+	struct ieee80211_tx_queue_params edca_params[IEEE80211_NUM_ACS];
 	struct hif_req_set_bss_params bss_params;
 	struct work_struct	bss_params_work;
-	struct work_struct	set_cts_work;
 
 	int			join_complete_status;
-	bool			delayed_unjoin;
 	struct work_struct	unjoin_work;
 
-	struct wfx_scan		scan;
+	/* avoid some operations in parallel with scan */
+	struct mutex		scan_lock;
+	struct work_struct	scan_work;
+	struct completion	scan_complete;
+	bool			scan_abort;
+	struct ieee80211_scan_request *scan_req;
 
-	struct hif_req_set_pm_mode powersave_mode;
 	struct completion	set_pm_mode_complete;
 
 	struct list_head	event_queue;
diff --git a/drivers/staging/wilc1000/fw.h b/drivers/staging/wilc1000/fw.h
new file mode 100644
index 0000000..a76e1de
--- /dev/null
+++ b/drivers/staging/wilc1000/fw.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
+#ifndef WILC_FW_H
+#define WILC_FW_H
+
+#include <linux/ieee80211.h>
+
+#define WILC_MAX_NUM_STA			9
+#define WILC_MAX_RATES_SUPPORTED		12
+#define WILC_MAX_NUM_PMKIDS			16
+#define WILC_MAX_NUM_SCANNED_CH			14
+
+struct wilc_assoc_resp {
+	__le16 capab_info;
+	__le16 status_code;
+	__le16 aid;
+} __packed;
+
+struct wilc_pmkid {
+	u8 bssid[ETH_ALEN];
+	u8 pmkid[WLAN_PMKID_LEN];
+} __packed;
+
+struct wilc_pmkid_attr {
+	u8 numpmkid;
+	struct wilc_pmkid pmkidlist[WILC_MAX_NUM_PMKIDS];
+} __packed;
+
+struct wilc_reg_frame {
+	u8 reg;
+	u8 reg_id;
+	__le16 frame_type;
+} __packed;
+
+struct wilc_drv_handler {
+	__le32 handler;
+	u8 mode;
+} __packed;
+
+struct wilc_wep_key {
+	u8 index;
+	u8 key_len;
+	u8 key[0];
+} __packed;
+
+struct wilc_sta_wpa_ptk {
+	u8 mac_addr[ETH_ALEN];
+	u8 key_len;
+	u8 key[0];
+} __packed;
+
+struct wilc_ap_wpa_ptk {
+	u8 mac_addr[ETH_ALEN];
+	u8 index;
+	u8 key_len;
+	u8 key[0];
+} __packed;
+
+struct wilc_gtk_key {
+	u8 mac_addr[ETH_ALEN];
+	u8 rsc[8];
+	u8 index;
+	u8 key_len;
+	u8 key[0];
+} __packed;
+
+struct wilc_op_mode {
+	__le32 mode;
+} __packed;
+
+struct wilc_noa_opp_enable {
+	u8 ct_window;
+	u8 cnt;
+	__le32 duration;
+	__le32 interval;
+	__le32 start_time;
+} __packed;
+
+struct wilc_noa_opp_disable {
+	u8 cnt;
+	__le32 duration;
+	__le32 interval;
+	__le32 start_time;
+} __packed;
+
+struct wilc_join_bss_param {
+	char ssid[IEEE80211_MAX_SSID_LEN];
+	u8 ssid_terminator;
+	u8 bss_type;
+	u8 ch;
+	__le16 cap_info;
+	u8 sa[ETH_ALEN];
+	u8 bssid[ETH_ALEN];
+	__le16 beacon_period;
+	u8 dtim_period;
+	u8 supp_rates[WILC_MAX_RATES_SUPPORTED + 1];
+	u8 wmm_cap;
+	u8 uapsd_cap;
+	u8 ht_capable;
+	u8 rsn_found;
+	u8 rsn_grp_policy;
+	u8 mode_802_11i;
+	u8 p_suites[3];
+	u8 akm_suites[3];
+	u8 rsn_cap[2];
+	u8 noa_enabled;
+	__le32 tsf_lo;
+	u8 idx;
+	u8 opp_enabled;
+	union {
+		struct wilc_noa_opp_disable opp_dis;
+		struct wilc_noa_opp_enable opp_en;
+	};
+} __packed;
+#endif
diff --git a/drivers/staging/wilc1000/hif.c b/drivers/staging/wilc1000/hif.c
index 349e45d..658790b 100644
--- a/drivers/staging/wilc1000/hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -10,7 +10,6 @@
 #define WILC_HIF_CONNECT_TIMEOUT_MS             9500
 
 #define WILC_FALSE_FRMWR_CHANNEL		100
-#define WILC_MAX_RATES_SUPPORTED		12
 
 struct wilc_rcvd_mac_info {
 	u8 status;
@@ -27,48 +26,6 @@ struct wilc_del_all_sta {
 	u8 mac[WILC_MAX_NUM_STA][ETH_ALEN];
 };
 
-struct wilc_op_mode {
-	__le32 mode;
-};
-
-struct wilc_reg_frame {
-	u8 reg;
-	u8 reg_id;
-	__le16 frame_type;
-} __packed;
-
-struct wilc_drv_handler {
-	__le32 handler;
-	u8 mode;
-} __packed;
-
-struct wilc_wep_key {
-	u8 index;
-	u8 key_len;
-	u8 key[0];
-} __packed;
-
-struct wilc_sta_wpa_ptk {
-	u8 mac_addr[ETH_ALEN];
-	u8 key_len;
-	u8 key[0];
-} __packed;
-
-struct wilc_ap_wpa_ptk {
-	u8 mac_addr[ETH_ALEN];
-	u8 index;
-	u8 key_len;
-	u8 key[0];
-} __packed;
-
-struct wilc_gtk_key {
-	u8 mac_addr[ETH_ALEN];
-	u8 rsc[8];
-	u8 index;
-	u8 key_len;
-	u8 key[0];
-} __packed;
-
 union wilc_message_body {
 	struct wilc_rcvd_net_info net_info;
 	struct wilc_rcvd_mac_info mac_info;
@@ -86,51 +43,6 @@ struct host_if_msg {
 	bool is_sync;
 };
 
-struct wilc_noa_opp_enable {
-	u8 ct_window;
-	u8 cnt;
-	__le32 duration;
-	__le32 interval;
-	__le32 start_time;
-} __packed;
-
-struct wilc_noa_opp_disable {
-	u8 cnt;
-	__le32 duration;
-	__le32 interval;
-	__le32 start_time;
-} __packed;
-
-struct wilc_join_bss_param {
-	char ssid[IEEE80211_MAX_SSID_LEN];
-	u8 ssid_terminator;
-	u8 bss_type;
-	u8 ch;
-	__le16 cap_info;
-	u8 sa[ETH_ALEN];
-	u8 bssid[ETH_ALEN];
-	__le16 beacon_period;
-	u8 dtim_period;
-	u8 supp_rates[WILC_MAX_RATES_SUPPORTED + 1];
-	u8 wmm_cap;
-	u8 uapsd_cap;
-	u8 ht_capable;
-	u8 rsn_found;
-	u8 rsn_grp_policy;
-	u8 mode_802_11i;
-	u8 p_suites[3];
-	u8 akm_suites[3];
-	u8 rsn_cap[2];
-	u8 noa_enabled;
-	__le32 tsf_lo;
-	u8 idx;
-	u8 opp_enabled;
-	union {
-		struct wilc_noa_opp_disable opp_dis;
-		struct wilc_noa_opp_enable opp_en;
-	};
-} __packed;
-
 /* 'msg' should be free by the caller for syc */
 static struct host_if_msg*
 wilc_alloc_work(struct wilc_vif *vif, void (*work_fun)(struct work_struct *),
@@ -640,7 +552,7 @@ static s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
 {
 	u8 *ies;
 	u16 ies_len;
-	struct assoc_resp *res = (struct assoc_resp *)buffer;
+	struct wilc_assoc_resp *res = (struct wilc_assoc_resp *)buffer;
 
 	ret_conn_info->status = le16_to_cpu(res->status_code);
 	if (ret_conn_info->status == WLAN_STATUS_SUCCESS) {
diff --git a/drivers/staging/wilc1000/hif.h b/drivers/staging/wilc1000/hif.h
index 22ee6ff..db91791 100644
--- a/drivers/staging/wilc1000/hif.h
+++ b/drivers/staging/wilc1000/hif.h
@@ -17,14 +17,11 @@ enum {
 	WILC_CLIENT_MODE = 0x4
 };
 
-#define WILC_MAX_NUM_STA			9
-#define WILC_MAX_NUM_SCANNED_CH			14
 #define WILC_MAX_NUM_PROBED_SSID		10
 
 #define WILC_TX_MIC_KEY_LEN			8
 #define WILC_RX_MIC_KEY_LEN			8
 
-#define WILC_MAX_NUM_PMKIDS			16
 #define WILC_ADD_STA_LENGTH			40
 #define WILC_NUM_CONCURRENT_IFC			2
 
@@ -35,12 +32,6 @@ enum {
 
 #define WILC_MAX_ASSOC_RESP_FRAME_SIZE   256
 
-struct assoc_resp {
-	__le16 capab_info;
-	__le16 status_code;
-	__le16 aid;
-} __packed;
-
 struct rf_info {
 	u8 link_speed;
 	s8 rssi;
@@ -59,16 +50,6 @@ enum host_if_state {
 	HOST_IF_FORCE_32BIT		= 0xFFFFFFFF
 };
 
-struct wilc_pmkid {
-	u8 bssid[ETH_ALEN];
-	u8 pmkid[WLAN_PMKID_LEN];
-} __packed;
-
-struct wilc_pmkid_attr {
-	u8 numpmkid;
-	struct wilc_pmkid pmkidlist[WILC_MAX_NUM_PMKIDS];
-} __packed;
-
 struct cfg_param_attr {
 	u32 flag;
 	u16 short_retry_limit;
diff --git a/drivers/staging/wilc1000/netdev.c b/drivers/staging/wilc1000/netdev.c
index d2c0b0f..fce5bf2 100644
--- a/drivers/staging/wilc1000/netdev.c
+++ b/drivers/staging/wilc1000/netdev.c
@@ -96,21 +96,18 @@ void wilc_mac_indicate(struct wilc *wilc)
 
 static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
 {
-	u8 *bssid, *bssid1;
 	struct net_device *ndev = NULL;
 	struct wilc_vif *vif;
-
-	bssid = mac_header + 10;
-	bssid1 = mac_header + 4;
+	struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
 
 	list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
 		if (vif->mode == WILC_STATION_MODE)
-			if (ether_addr_equal_unaligned(bssid, vif->bssid)) {
+			if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
 				ndev = vif->ndev;
 				goto out;
 			}
 		if (vif->mode == WILC_AP_MODE)
-			if (ether_addr_equal_unaligned(bssid1, vif->bssid)) {
+			if (ether_addr_equal_unaligned(h->addr1, vif->bssid)) {
 				ndev = vif->ndev;
 				goto out;
 			}
@@ -177,7 +174,7 @@ static int wilc_txq_task(void *vp)
 				}
 				srcu_read_unlock(&wl->srcu, srcu_idx);
 			}
-		} while (ret == -ENOBUFS && !wl->close);
+		} while (ret == WILC_VMM_ENTRY_FULL_RETRY && !wl->close);
 	}
 	return 0;
 }
@@ -186,7 +183,7 @@ static int wilc_wlan_get_firmware(struct net_device *dev)
 {
 	struct wilc_vif *vif = netdev_priv(dev);
 	struct wilc *wilc = vif->wilc;
-	int chip_id, ret = 0;
+	int chip_id;
 	const struct firmware *wilc_firmware;
 	char *firmware;
 
@@ -201,14 +198,11 @@ static int wilc_wlan_get_firmware(struct net_device *dev)
 
 	if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
 		netdev_err(dev, "%s - firmware not available\n", firmware);
-		ret = -1;
-		goto fail;
+		return -EINVAL;
 	}
 	wilc->firmware = wilc_firmware;
 
-fail:
-
-	return ret;
+	return 0;
 }
 
 static int wilc_start_firmware(struct net_device *dev)
@@ -218,7 +212,7 @@ static int wilc_start_firmware(struct net_device *dev)
 	int ret = 0;
 
 	ret = wilc_wlan_start(wilc);
-	if (ret < 0)
+	if (ret)
 		return ret;
 
 	if (!wait_for_completion_timeout(&wilc->sync_event,
@@ -241,7 +235,7 @@ static int wilc1000_firmware_download(struct net_device *dev)
 
 	ret = wilc_wlan_firmware_download(wilc, wilc->firmware->data,
 					  wilc->firmware->size);
-	if (ret < 0)
+	if (ret)
 		return ret;
 
 	release_firmware(wilc->firmware);
@@ -420,7 +414,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
 	return 0;
 
 fail:
-	return -1;
+	return -EINVAL;
 }
 
 static void wlan_deinitialize_threads(struct net_device *dev)
@@ -500,14 +494,12 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
 		wl->close = 0;
 
 		ret = wilc_wlan_init(dev);
-		if (ret < 0)
-			return -EIO;
+		if (ret)
+			return ret;
 
 		ret = wlan_initialize_threads(dev);
-		if (ret < 0) {
-			ret = -EIO;
+		if (ret)
 			goto fail_wilc_wlan;
-		}
 
 		if (wl->gpio_irq && init_irq(dev)) {
 			ret = -EIO;
@@ -521,22 +513,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
 			goto fail_irq_init;
 		}
 
-		if (wilc_wlan_get_firmware(dev)) {
-			ret = -EIO;
+		ret = wilc_wlan_get_firmware(dev);
+		if (ret)
 			goto fail_irq_enable;
-		}
 
 		ret = wilc1000_firmware_download(dev);
-		if (ret < 0) {
-			ret = -EIO;
+		if (ret)
 			goto fail_irq_enable;
-		}
 
 		ret = wilc_start_firmware(dev);
-		if (ret < 0) {
-			ret = -EIO;
+		if (ret)
 			goto fail_irq_enable;
-		}
 
 		if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) {
 			int size;
@@ -548,11 +535,10 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
 			firmware_ver[size] = '\0';
 			netdev_dbg(dev, "Firmware Ver = %s\n", firmware_ver);
 		}
-		ret = wilc_init_fw_config(dev, vif);
 
-		if (ret < 0) {
+		ret = wilc_init_fw_config(dev, vif);
+		if (ret) {
 			netdev_err(dev, "Failed to configure firmware\n");
-			ret = -EIO;
 			goto fail_fw_start;
 		}
 		wl->initialized = true;
@@ -603,11 +589,11 @@ static int wilc_mac_open(struct net_device *ndev)
 	netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
 
 	ret = wilc_init_host_int(ndev);
-	if (ret < 0)
+	if (ret)
 		return ret;
 
 	ret = wilc_wlan_initialize(ndev, vif);
-	if (ret < 0) {
+	if (ret) {
 		wilc_deinit_host_int(ndev);
 		return ret;
 	}
@@ -840,7 +826,7 @@ static const struct net_device_ops wilc_netdev_ops = {
 void wilc_netdev_cleanup(struct wilc *wilc)
 {
 	struct wilc_vif *vif;
-	int srcu_idx;
+	int srcu_idx, ifc_cnt = 0;
 
 	if (!wilc)
 		return;
@@ -861,7 +847,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
 	flush_workqueue(wilc->hif_workqueue);
 	destroy_workqueue(wilc->hif_workqueue);
 
-	do {
+	while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
 		mutex_lock(&wilc->vif_mutex);
 		if (wilc->vif_num <= 0) {
 			mutex_unlock(&wilc->vif_mutex);
@@ -874,7 +860,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
 		wilc->vif_num--;
 		mutex_unlock(&wilc->vif_mutex);
 		synchronize_srcu(&wilc->srcu);
-	} while (1);
+		ifc_cnt++;
+	}
 
 	wilc_wlan_cfg_deinit(wilc);
 	wlan_deinit_locks(wilc);
diff --git a/drivers/staging/wilc1000/netdev.h b/drivers/staging/wilc1000/netdev.h
index cd8f0d7..d5f7a60 100644
--- a/drivers/staging/wilc1000/netdev.h
+++ b/drivers/staging/wilc1000/netdev.h
@@ -21,7 +21,6 @@
 #define FLOW_CONTROL_LOWER_THRESHOLD		128
 #define FLOW_CONTROL_UPPER_THRESHOLD		256
 
-#define WILC_MAX_NUM_PMKIDS			16
 #define PMKID_FOUND				1
 #define NUM_STA_ASSOCIATED			8
 
diff --git a/drivers/staging/wilc1000/sdio.c b/drivers/staging/wilc1000/sdio.c
index 319e039..ca99335 100644
--- a/drivers/staging/wilc1000/sdio.c
+++ b/drivers/staging/wilc1000/sdio.c
@@ -273,7 +273,7 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Failed cmd52, set 0x10c data...\n");
-		goto fail;
+		return ret;
 	}
 
 	cmd.address = 0x10d;
@@ -281,7 +281,7 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Failed cmd52, set 0x10d data...\n");
-		goto fail;
+		return ret;
 	}
 
 	cmd.address = 0x10e;
@@ -289,11 +289,9 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Failed cmd52, set 0x10e data...\n");
-		goto fail;
+		return ret;
 	}
 
-	return 1;
-fail:
 	return 0;
 }
 
@@ -311,7 +309,7 @@ static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Failed cmd52, set 0x10 data...\n");
-		goto fail;
+		return ret;
 	}
 
 	cmd.address = 0x11;
@@ -319,11 +317,9 @@ static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Failed cmd52, set 0x11 data...\n");
-		goto fail;
+		return ret;
 	}
 
-	return 1;
-fail:
 	return 0;
 }
 
@@ -347,18 +343,16 @@ static int wilc_sdio_set_func1_block_size(struct wilc *wilc, u32 block_size)
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Failed cmd52, set 0x110 data...\n");
-		goto fail;
+		return ret;
 	}
 	cmd.address = 0x111;
 	cmd.data = (u8)(block_size >> 8);
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Failed cmd52, set 0x111 data...\n");
-		goto fail;
+		return ret;
 	}
 
-	return 1;
-fail:
 	return 0;
 }
 
@@ -384,19 +378,18 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
 		cmd.address = addr;
 		cmd.data = data;
 		ret = wilc_sdio_cmd52(wilc, &cmd);
-		if (ret) {
+		if (ret)
 			dev_err(&func->dev,
 				"Failed cmd 52, read reg (%08x) ...\n", addr);
-			goto fail;
-		}
 	} else {
 		struct sdio_cmd53 cmd;
 
 		/**
 		 *      set the AHB address
 		 **/
-		if (!wilc_sdio_set_func0_csa_address(wilc, addr))
-			goto fail;
+		ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+		if (ret)
+			return ret;
 
 		cmd.read_write = 1;
 		cmd.function = 0;
@@ -407,18 +400,12 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
 		cmd.buffer = (u8 *)&data;
 		cmd.block_size = sdio_priv->block_size;
 		ret = wilc_sdio_cmd53(wilc, &cmd);
-		if (ret) {
+		if (ret)
 			dev_err(&func->dev,
 				"Failed cmd53, write reg (%08x)...\n", addr);
-			goto fail;
-		}
 	}
 
-	return 1;
-
-fail:
-
-	return 0;
+	return ret;
 }
 
 static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
@@ -470,14 +457,15 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
 		cmd.buffer = buf;
 		cmd.block_size = block_size;
 		if (addr > 0) {
-			if (!wilc_sdio_set_func0_csa_address(wilc, addr))
-				goto fail;
+			ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+			if (ret)
+				return ret;
 		}
 		ret = wilc_sdio_cmd53(wilc, &cmd);
 		if (ret) {
 			dev_err(&func->dev,
 				"Failed cmd53 [%x], block send...\n", addr);
-			goto fail;
+			return ret;
 		}
 		if (addr > 0)
 			addr += nblk * block_size;
@@ -493,21 +481,18 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
 		cmd.block_size = block_size;
 
 		if (addr > 0) {
-			if (!wilc_sdio_set_func0_csa_address(wilc, addr))
-				goto fail;
+			ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+			if (ret)
+				return ret;
 		}
 		ret = wilc_sdio_cmd53(wilc, &cmd);
 		if (ret) {
 			dev_err(&func->dev,
 				"Failed cmd53 [%x], bytes send...\n", addr);
-			goto fail;
+			return ret;
 		}
 	}
 
-	return 1;
-
-fail:
-
 	return 0;
 }
 
@@ -528,14 +513,15 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
 		if (ret) {
 			dev_err(&func->dev,
 				"Failed cmd 52, read reg (%08x) ...\n", addr);
-			goto fail;
+			return ret;
 		}
 		*data = cmd.data;
 	} else {
 		struct sdio_cmd53 cmd;
 
-		if (!wilc_sdio_set_func0_csa_address(wilc, addr))
-			goto fail;
+		ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+		if (ret)
+			return ret;
 
 		cmd.read_write = 0;
 		cmd.function = 0;
@@ -550,16 +536,11 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
 		if (ret) {
 			dev_err(&func->dev,
 				"Failed cmd53, read reg (%08x)...\n", addr);
-			goto fail;
+			return ret;
 		}
 	}
 
 	le32_to_cpus(data);
-
-	return 1;
-
-fail:
-
 	return 0;
 }
 
@@ -612,14 +593,15 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
 		cmd.buffer = buf;
 		cmd.block_size = block_size;
 		if (addr > 0) {
-			if (!wilc_sdio_set_func0_csa_address(wilc, addr))
-				goto fail;
+			ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+			if (ret)
+				return ret;
 		}
 		ret = wilc_sdio_cmd53(wilc, &cmd);
 		if (ret) {
 			dev_err(&func->dev,
 				"Failed cmd53 [%x], block read...\n", addr);
-			goto fail;
+			return ret;
 		}
 		if (addr > 0)
 			addr += nblk * block_size;
@@ -635,21 +617,18 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
 		cmd.block_size = block_size;
 
 		if (addr > 0) {
-			if (!wilc_sdio_set_func0_csa_address(wilc, addr))
-				goto fail;
+			ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+			if (ret)
+				return ret;
 		}
 		ret = wilc_sdio_cmd53(wilc, &cmd);
 		if (ret) {
 			dev_err(&func->dev,
 				"Failed cmd53 [%x], bytes read...\n", addr);
-			goto fail;
+			return ret;
 		}
 	}
 
-	return 1;
-
-fail:
-
 	return 0;
 }
 
@@ -661,7 +640,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
 
 static int wilc_sdio_deinit(struct wilc *wilc)
 {
-	return 1;
+	return 0;
 }
 
 static int wilc_sdio_init(struct wilc *wilc, bool resume)
@@ -686,15 +665,16 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Fail cmd 52, enable csa...\n");
-		goto fail;
+		return ret;
 	}
 
 	/**
 	 *      function 0 block size
 	 **/
-	if (!wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
+	ret = wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE);
+	if (ret) {
 		dev_err(&func->dev, "Fail cmd 52, set func 0 block size...\n");
-		goto fail;
+		return ret;
 	}
 	sdio_priv->block_size = WILC_SDIO_BLOCK_SIZE;
 
@@ -710,7 +690,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
 	if (ret) {
 		dev_err(&func->dev,
 			"Fail cmd 52, set IOE register...\n");
-		goto fail;
+		return ret;
 	}
 
 	/**
@@ -727,7 +707,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
 		if (ret) {
 			dev_err(&func->dev,
 				"Fail cmd 52, get IOR register...\n");
-			goto fail;
+			return ret;
 		}
 		if (cmd.data == 0x2)
 			break;
@@ -735,15 +715,16 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
 
 	if (loop <= 0) {
 		dev_err(&func->dev, "Fail func 1 is not ready...\n");
-		goto fail;
+		return -EINVAL;
 	}
 
 	/**
 	 *      func 1 is ready, set func 1 block size
 	 **/
-	if (!wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
+	ret = wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE);
+	if (ret) {
 		dev_err(&func->dev, "Fail set func 1 block size...\n");
-		goto fail;
+		return ret;
 	}
 
 	/**
@@ -757,16 +738,17 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
 	ret = wilc_sdio_cmd52(wilc, &cmd);
 	if (ret) {
 		dev_err(&func->dev, "Fail cmd 52, set IEN register...\n");
-		goto fail;
+		return ret;
 	}
 
 	/**
 	 *      make sure can read back chip id correctly
 	 **/
 	if (!resume) {
-		if (!wilc_sdio_read_reg(wilc, 0x1000, &chipid)) {
+		ret = wilc_sdio_read_reg(wilc, 0x1000, &chipid);
+		if (ret) {
 			dev_err(&func->dev, "Fail cmd read chip id...\n");
-			goto fail;
+			return ret;
 		}
 		dev_err(&func->dev, "chipid (%08x)\n", chipid);
 		if ((chipid & 0xfff) > 0x2a0)
@@ -777,10 +759,6 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
 			 sdio_priv->has_thrpt_enh3);
 	}
 
-	return 1;
-
-fail:
-
 	return 0;
 }
 
@@ -806,7 +784,7 @@ static int wilc_sdio_read_size(struct wilc *wilc, u32 *size)
 	tmp |= (cmd.data << 8);
 
 	*size = tmp;
-	return 1;
+	return 0;
 }
 
 static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
@@ -865,7 +843,7 @@ static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
 
 	*int_status = tmp;
 
-	return 1;
+	return 0;
 }
 
 static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
@@ -909,10 +887,10 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
 				dev_err(&func->dev,
 					"Failed cmd52, set 0xf8 data (%d) ...\n",
 					__LINE__);
-				goto fail;
+				return ret;
 			}
 		}
-		return 1;
+		return 0;
 	}
 	if (sdio_priv->irq_gpio) {
 		/* has_thrpt_enh2 uses register 0xf8 to clear interrupts. */
@@ -926,7 +904,6 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
 		if (flags) {
 			int i;
 
-			ret = 1;
 			for (i = 0; i < sdio_priv->nint; i++) {
 				if (flags & 1) {
 					struct sdio_cmd52 cmd;
@@ -942,15 +919,12 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
 						dev_err(&func->dev,
 							"Failed cmd52, set 0xf8 data (%d) ...\n",
 							__LINE__);
-						goto fail;
+						return ret;
 					}
 				}
-				if (!ret)
-					break;
 				flags >>= 1;
 			}
-			if (!ret)
-				goto fail;
+
 			for (i = sdio_priv->nint; i < MAX_NUM_INT; i++) {
 				if (flags & 1)
 					dev_err(&func->dev,
@@ -985,11 +959,9 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
 			dev_err(&func->dev,
 				"Failed cmd52, set 0xf6 data (%d) ...\n",
 				__LINE__);
-			goto fail;
+			return ret;
 		}
 	}
-	return 1;
-fail:
 	return 0;
 }
 
@@ -1001,12 +973,12 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
 
 	if (nint > MAX_NUM_INT) {
 		dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
-		return 0;
+		return -EINVAL;
 	}
 	if (nint > MAX_NUN_INT_THRPT_ENH2) {
 		dev_err(&func->dev,
 			"Cannot support more than 5 interrupts when has_thrpt_enh2=1.\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	sdio_priv->nint = nint;
@@ -1014,15 +986,15 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
 	/**
 	 *      Disable power sequencer
 	 **/
-	if (!wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
+	if (wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
 		dev_err(&func->dev, "Failed read misc reg...\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	reg &= ~BIT(8);
-	if (!wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
+	if (wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
 		dev_err(&func->dev, "Failed write misc reg...\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	if (sdio_priv->irq_gpio) {
@@ -1033,59 +1005,59 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
 		 *      interrupt pin mux select
 		 **/
 		ret = wilc_sdio_read_reg(wilc, WILC_PIN_MUX_0, &reg);
-		if (!ret) {
+		if (ret) {
 			dev_err(&func->dev, "Failed read reg (%08x)...\n",
 				WILC_PIN_MUX_0);
-			return 0;
+			return ret;
 		}
 		reg |= BIT(8);
 		ret = wilc_sdio_write_reg(wilc, WILC_PIN_MUX_0, reg);
-		if (!ret) {
+		if (ret) {
 			dev_err(&func->dev, "Failed write reg (%08x)...\n",
 				WILC_PIN_MUX_0);
-			return 0;
+			return ret;
 		}
 
 		/**
 		 *      interrupt enable
 		 **/
 		ret = wilc_sdio_read_reg(wilc, WILC_INTR_ENABLE, &reg);
-		if (!ret) {
+		if (ret) {
 			dev_err(&func->dev, "Failed read reg (%08x)...\n",
 				WILC_INTR_ENABLE);
-			return 0;
+			return ret;
 		}
 
 		for (i = 0; (i < 5) && (nint > 0); i++, nint--)
 			reg |= BIT((27 + i));
 		ret = wilc_sdio_write_reg(wilc, WILC_INTR_ENABLE, reg);
-		if (!ret) {
+		if (ret) {
 			dev_err(&func->dev, "Failed write reg (%08x)...\n",
 				WILC_INTR_ENABLE);
-			return 0;
+			return ret;
 		}
 		if (nint) {
 			ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
-			if (!ret) {
+			if (ret) {
 				dev_err(&func->dev,
 					"Failed read reg (%08x)...\n",
 					WILC_INTR2_ENABLE);
-				return 0;
+				return ret;
 			}
 
 			for (i = 0; (i < 3) && (nint > 0); i++, nint--)
 				reg |= BIT(i);
 
 			ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
-			if (!ret) {
+			if (ret) {
 				dev_err(&func->dev,
 					"Failed write reg (%08x)...\n",
 					WILC_INTR2_ENABLE);
-				return 0;
+				return ret;
 			}
 		}
 	}
-	return 1;
+	return 0;
 }
 
 /* Global sdio HIF function table */
diff --git a/drivers/staging/wilc1000/spi.c b/drivers/staging/wilc1000/spi.c
index 55f8757..3ffc7b4 100644
--- a/drivers/staging/wilc1000/spi.c
+++ b/drivers/staging/wilc1000/spi.c
@@ -13,7 +13,6 @@
 struct wilc_spi {
 	int crc_off;
 	int nint;
-	int has_thrpt_enh;
 };
 
 static const struct wilc_hif_func wilc_hif_spi;
@@ -89,11 +88,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
 #define CMD_SINGLE_READ				0xca
 #define CMD_RESET				0xcf
 
-#define N_OK					1
-#define N_FAIL					0
-#define N_RESET					-1
-#define N_RETRY					-2
-
 #define DATA_PKT_SZ_256				256
 #define DATA_PKT_SZ_512				512
 #define DATA_PKT_SZ_1K				1024
@@ -300,7 +294,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 	u32 len2;
 	u8 rsp;
 	int len = 0;
-	int result = N_OK;
+	int result = 0;
 	int retry;
 	u8 crc[2];
 
@@ -388,11 +382,11 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 		break;
 
 	default:
-		result = N_FAIL;
+		result = -EINVAL;
 		break;
 	}
 
-	if (result != N_OK)
+	if (result)
 		return result;
 
 	if (!spi_priv->crc_off)
@@ -425,7 +419,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 	if (len2 > ARRAY_SIZE(wb)) {
 		dev_err(&spi->dev, "spi buffer size too small (%d) (%zu)\n",
 			len2, ARRAY_SIZE(wb));
-		return N_FAIL;
+		return -EINVAL;
 	}
 	/* zero spi write buffers. */
 	for (wix = len; wix < len2; wix++)
@@ -434,7 +428,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 
 	if (wilc_spi_tx_rx(wilc, wb, rb, len2)) {
 		dev_err(&spi->dev, "Failed cmd write, bus error...\n");
-		return N_FAIL;
+		return -EINVAL;
 	}
 
 	/*
@@ -449,7 +443,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 		dev_err(&spi->dev,
 			"Failed cmd response, cmd (%02x), resp (%02x)\n",
 			cmd, rsp);
-		return N_FAIL;
+		return -EINVAL;
 	}
 
 	/*
@@ -459,7 +453,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 	if (rsp != 0x00) {
 		dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
 			rsp);
-		return N_FAIL;
+		return -EINVAL;
 	}
 
 	if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ ||
@@ -486,7 +480,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 		if (retry <= 0) {
 			dev_err(&spi->dev,
 				"Error, data read response (%02x)\n", rsp);
-			return N_RESET;
+			return -EAGAIN;
 		}
 	}
 
@@ -502,7 +496,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 		} else {
 			dev_err(&spi->dev,
 				"buffer overrun when reading data.\n");
-			return N_FAIL;
+			return -EINVAL;
 		}
 
 		if (!spi_priv->crc_off) {
@@ -515,7 +509,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 			} else {
 				dev_err(&spi->dev,
 					"buffer overrun when reading crc.\n");
-				return N_FAIL;
+				return -EINVAL;
 			}
 		}
 	} else if ((cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) {
@@ -541,7 +535,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 			if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
 				dev_err(&spi->dev,
 					"Failed block read, bus err\n");
-				return N_FAIL;
+				return -EINVAL;
 			}
 
 			/*
@@ -550,7 +544,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 			if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
 				dev_err(&spi->dev,
 					"Failed block crc read, bus err\n");
-				return N_FAIL;
+				return -EINVAL;
 			}
 
 			ix += nbytes;
@@ -582,14 +576,14 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 				if (wilc_spi_rx(wilc, &rsp, 1)) {
 					dev_err(&spi->dev,
 						"Failed resp read, bus err\n");
-					result = N_FAIL;
+					result = -EINVAL;
 					break;
 				}
 				if (((rsp >> 4) & 0xf) == 0xf)
 					break;
 			} while (retry--);
 
-			if (result == N_FAIL)
+			if (result)
 				break;
 
 			/*
@@ -598,7 +592,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 			if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
 				dev_err(&spi->dev,
 					"Failed block read, bus err\n");
-				result = N_FAIL;
+				result = -EINVAL;
 				break;
 			}
 
@@ -608,7 +602,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
 			if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
 				dev_err(&spi->dev,
 					"Failed block crc read, bus err\n");
-				result = N_FAIL;
+				result = -EINVAL;
 				break;
 			}
 
@@ -624,7 +618,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
 	struct spi_device *spi = to_spi_device(wilc->dev);
 	struct wilc_spi *spi_priv = wilc->bus_data;
 	int ix, nbytes;
-	int result = 1;
+	int result = 0;
 	u8 cmd, order, crc[2] = {0};
 
 	/*
@@ -652,7 +646,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
 		if (wilc_spi_tx(wilc, &cmd, 1)) {
 			dev_err(&spi->dev,
 				"Failed data block cmd write, bus error...\n");
-			result = N_FAIL;
+			result = -EINVAL;
 			break;
 		}
 
@@ -662,7 +656,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
 		if (wilc_spi_tx(wilc, &b[ix], nbytes)) {
 			dev_err(&spi->dev,
 				"Failed data block write, bus error...\n");
-			result = N_FAIL;
+			result = -EINVAL;
 			break;
 		}
 
@@ -672,7 +666,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
 		if (!spi_priv->crc_off) {
 			if (wilc_spi_tx(wilc, crc, 2)) {
 				dev_err(&spi->dev, "Failed data block crc write, bus error...\n");
-				result = N_FAIL;
+				result = -EINVAL;
 				break;
 			}
 		}
@@ -701,7 +695,7 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat)
 	cpu_to_le32s(&dat);
 	result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4,
 				  0);
-	if (result != N_OK)
+	if (result)
 		dev_err(&spi->dev, "Failed internal write cmd...\n");
 
 	return result;
@@ -714,14 +708,14 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
 
 	result = spi_cmd_complete(wilc, CMD_INTERNAL_READ, adr, (u8 *)data, 4,
 				  0);
-	if (result != N_OK) {
+	if (result) {
 		dev_err(&spi->dev, "Failed internal read cmd...\n");
-		return 0;
+		return result;
 	}
 
 	le32_to_cpus(data);
 
-	return 1;
+	return result;
 }
 
 /********************************************
@@ -733,7 +727,7 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
 static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
 {
 	struct spi_device *spi = to_spi_device(wilc->dev);
-	int result = N_OK;
+	int result;
 	u8 cmd = CMD_SINGLE_WRITE;
 	u8 clockless = 0;
 
@@ -745,7 +739,7 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
 	}
 
 	result = spi_cmd_complete(wilc, cmd, addr, (u8 *)&data, 4, clockless);
-	if (result != N_OK)
+	if (result)
 		dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr);
 
 	return result;
@@ -760,29 +754,29 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
 	 * has to be greated than 4
 	 */
 	if (size <= 4)
-		return 0;
+		return -EINVAL;
 
 	result = spi_cmd_complete(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size, 0);
-	if (result != N_OK) {
+	if (result) {
 		dev_err(&spi->dev,
 			"Failed cmd, write block (%08x)...\n", addr);
-		return 0;
+		return result;
 	}
 
 	/*
 	 * Data
 	 */
 	result = spi_data_write(wilc, buf, size);
-	if (result != N_OK)
+	if (result)
 		dev_err(&spi->dev, "Failed block data write...\n");
 
-	return 1;
+	return result;
 }
 
 static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
 {
 	struct spi_device *spi = to_spi_device(wilc->dev);
-	int result = N_OK;
+	int result;
 	u8 cmd = CMD_SINGLE_READ;
 	u8 clockless = 0;
 
@@ -793,14 +787,14 @@ static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
 	}
 
 	result = spi_cmd_complete(wilc, cmd, addr, (u8 *)data, 4, clockless);
-	if (result != N_OK) {
+	if (result) {
 		dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr);
-		return 0;
+		return result;
 	}
 
 	le32_to_cpus(data);
 
-	return 1;
+	return 0;
 }
 
 static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
@@ -809,15 +803,13 @@ static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
 	int result;
 
 	if (size <= 4)
-		return 0;
+		return -EINVAL;
 
 	result = spi_cmd_complete(wilc, CMD_DMA_EXT_READ, addr, buf, size, 0);
-	if (result != N_OK) {
+	if (result)
 		dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr);
-		return 0;
-	}
 
-	return 1;
+	return result;
 }
 
 /********************************************
@@ -831,7 +823,7 @@ static int wilc_spi_deinit(struct wilc *wilc)
 	/*
 	 * TODO:
 	 */
-	return 1;
+	return 0;
 }
 
 static int wilc_spi_init(struct wilc *wilc, bool resume)
@@ -841,13 +833,14 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
 	u32 reg;
 	u32 chipid;
 	static int isinit;
+	int ret;
 
 	if (isinit) {
-		if (!wilc_spi_read_reg(wilc, 0x1000, &chipid)) {
+		ret = wilc_spi_read_reg(wilc, 0x1000, &chipid);
+		if (ret)
 			dev_err(&spi->dev, "Fail cmd read chip id...\n");
-			return 0;
-		}
-		return 1;
+
+		return ret;
 	}
 
 	/*
@@ -859,7 +852,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
 	 * way to reset
 	 */
 	/* the SPI to it's initial value. */
-	if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
+	ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
+	if (ret) {
 		/*
 		 * Read failed. Try with CRC off. This might happen when module
 		 * is removed but chip isn't reset
@@ -867,24 +861,26 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
 		spi_priv->crc_off = 1;
 		dev_err(&spi->dev,
 			"Failed read with CRC on, retrying with CRC off\n");
-		if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
+		ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
+		if (ret) {
 			/*
 			 * Read failed with both CRC on and off,
 			 * something went bad
 			 */
 			dev_err(&spi->dev, "Failed internal read protocol\n");
-			return 0;
+			return ret;
 		}
 	}
 	if (spi_priv->crc_off == 0) {
 		reg &= ~0xc; /* disable crc checking */
 		reg &= ~0x70;
 		reg |= (0x5 << 4);
-		if (!spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg)) {
+		ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg);
+		if (ret) {
 			dev_err(&spi->dev,
 				"[wilc spi %d]: Failed internal write reg\n",
 				__LINE__);
-			return 0;
+			return ret;
 		}
 		spi_priv->crc_off = 1;
 	}
@@ -892,168 +888,35 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
 	/*
 	 * make sure can read back chip id correctly
 	 */
-	if (!wilc_spi_read_reg(wilc, 0x1000, &chipid)) {
+	ret = wilc_spi_read_reg(wilc, 0x1000, &chipid);
+	if (ret) {
 		dev_err(&spi->dev, "Fail cmd read chip id...\n");
-		return 0;
+		return ret;
 	}
 
-	spi_priv->has_thrpt_enh = 1;
-
 	isinit = 1;
 
-	return 1;
+	return 0;
 }
 
 static int wilc_spi_read_size(struct wilc *wilc, u32 *size)
 {
-	struct spi_device *spi = to_spi_device(wilc->dev);
-	struct wilc_spi *spi_priv = wilc->bus_data;
 	int ret;
 
-	if (spi_priv->has_thrpt_enh) {
-		ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
-					size);
-		*size = *size  & IRQ_DMA_WD_CNT_MASK;
-	} else {
-		u32 tmp;
-		u32 byte_cnt;
-
-		ret = wilc_spi_read_reg(wilc, WILC_VMM_TO_HOST_SIZE,
-					&byte_cnt);
-		if (!ret) {
-			dev_err(&spi->dev,
-				"Failed read WILC_VMM_TO_HOST_SIZE ...\n");
-			return ret;
-		}
-		tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
-		*size = tmp;
-	}
+	ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, size);
+	*size = *size & IRQ_DMA_WD_CNT_MASK;
 
 	return ret;
 }
 
 static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
 {
-	struct spi_device *spi = to_spi_device(wilc->dev);
-	struct wilc_spi *spi_priv = wilc->bus_data;
-	int ret;
-	u32 tmp;
-	u32 byte_cnt;
-	bool unexpected_irq;
-	int j;
-	u32 unknown_mask;
-	u32 irq_flags;
-	int k = IRG_FLAGS_OFFSET + 5;
-
-	if (spi_priv->has_thrpt_enh)
-		return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
-					 int_status);
-	ret = wilc_spi_read_reg(wilc, WILC_VMM_TO_HOST_SIZE, &byte_cnt);
-	if (!ret) {
-		dev_err(&spi->dev,
-			"Failed read WILC_VMM_TO_HOST_SIZE ...\n");
-		return ret;
-	}
-	tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
-
-	j = 0;
-	do {
-		wilc_spi_read_reg(wilc, 0x1a90, &irq_flags);
-		tmp |= ((irq_flags >> 27) << IRG_FLAGS_OFFSET);
-
-		if (spi_priv->nint > 5) {
-			wilc_spi_read_reg(wilc, 0x1a94, &irq_flags);
-			tmp |= (((irq_flags >> 0) & 0x7) << k);
-		}
-
-		unknown_mask = ~((1ul << spi_priv->nint) - 1);
-
-		unexpected_irq = (tmp >> IRG_FLAGS_OFFSET) & unknown_mask;
-		if (unexpected_irq) {
-			dev_err(&spi->dev,
-				"Unexpected interrupt(2):j=%d,tmp=%x,mask=%x\n",
-				j, tmp, unknown_mask);
-		}
-
-		j++;
-	} while (unexpected_irq);
-
-	*int_status = tmp;
-
-	return ret;
+	return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, int_status);
 }
 
 static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val)
 {
-	struct spi_device *spi = to_spi_device(wilc->dev);
-	struct wilc_spi *spi_priv = wilc->bus_data;
-	int ret;
-	u32 flags;
-	u32 tbl_ctl;
-
-	if (spi_priv->has_thrpt_enh) {
-		return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE,
-					  val);
-	}
-
-	flags = val & (BIT(MAX_NUM_INT) - 1);
-	if (flags) {
-		int i;
-
-		ret = 1;
-		for (i = 0; i < spi_priv->nint; i++) {
-			/*
-			 * No matter what you write 1 or 0,
-			 * it will clear interrupt.
-			 */
-			if (flags & 1)
-				ret = wilc_spi_write_reg(wilc,
-							 0x10c8 + i * 4, 1);
-			if (!ret)
-				break;
-			flags >>= 1;
-		}
-		if (!ret) {
-			dev_err(&spi->dev,
-				"Failed wilc_spi_write_reg, set reg %x ...\n",
-				0x10c8 + i * 4);
-			return ret;
-		}
-		for (i = spi_priv->nint; i < MAX_NUM_INT; i++) {
-			if (flags & 1)
-				dev_err(&spi->dev,
-					"Unexpected interrupt cleared %d...\n",
-					i);
-			flags >>= 1;
-		}
-	}
-
-	tbl_ctl = 0;
-	/* select VMM table 0 */
-	if (val & SEL_VMM_TBL0)
-		tbl_ctl |= BIT(0);
-	/* select VMM table 1 */
-	if (val & SEL_VMM_TBL1)
-		tbl_ctl |= BIT(1);
-
-	ret = wilc_spi_write_reg(wilc, WILC_VMM_TBL_CTL, tbl_ctl);
-	if (!ret) {
-		dev_err(&spi->dev, "fail write reg vmm_tbl_ctl...\n");
-		return ret;
-	}
-
-	if (val & EN_VMM) {
-		/*
-		 * enable vmm transfer.
-		 */
-		ret = wilc_spi_write_reg(wilc, WILC_VMM_CORE_CTL, 1);
-		if (!ret) {
-			dev_err(&spi->dev, "fail write reg vmm_core_ctl...\n");
-			return ret;
-		}
-	}
-
-	return ret;
+	return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE, val);
 }
 
 static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
@@ -1065,7 +928,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
 
 	if (nint > MAX_NUM_INT) {
 		dev_err(&spi->dev, "Too many interrupts (%d)...\n", nint);
-		return 0;
+		return -EINVAL;
 	}
 
 	spi_priv->nint = nint;
@@ -1074,58 +937,58 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
 	 * interrupt pin mux select
 	 */
 	ret = wilc_spi_read_reg(wilc, WILC_PIN_MUX_0, &reg);
-	if (!ret) {
+	if (ret) {
 		dev_err(&spi->dev, "Failed read reg (%08x)...\n",
 			WILC_PIN_MUX_0);
-		return 0;
+		return ret;
 	}
 	reg |= BIT(8);
 	ret = wilc_spi_write_reg(wilc, WILC_PIN_MUX_0, reg);
-	if (!ret) {
+	if (ret) {
 		dev_err(&spi->dev, "Failed write reg (%08x)...\n",
 			WILC_PIN_MUX_0);
-		return 0;
+		return ret;
 	}
 
 	/*
 	 * interrupt enable
 	 */
 	ret = wilc_spi_read_reg(wilc, WILC_INTR_ENABLE, &reg);
-	if (!ret) {
+	if (ret) {
 		dev_err(&spi->dev, "Failed read reg (%08x)...\n",
 			WILC_INTR_ENABLE);
-		return 0;
+		return ret;
 	}
 
 	for (i = 0; (i < 5) && (nint > 0); i++, nint--)
 		reg |= (BIT((27 + i)));
 
 	ret = wilc_spi_write_reg(wilc, WILC_INTR_ENABLE, reg);
-	if (!ret) {
+	if (ret) {
 		dev_err(&spi->dev, "Failed write reg (%08x)...\n",
 			WILC_INTR_ENABLE);
-		return 0;
+		return ret;
 	}
 	if (nint) {
 		ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
-		if (!ret) {
+		if (ret) {
 			dev_err(&spi->dev, "Failed read reg (%08x)...\n",
 				WILC_INTR2_ENABLE);
-			return 0;
+			return ret;
 		}
 
 		for (i = 0; (i < 3) && (nint > 0); i++, nint--)
 			reg |= BIT(i);
 
 		ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
-		if (!ret) {
+		if (ret) {
 			dev_err(&spi->dev, "Failed write reg (%08x)...\n",
 				WILC_INTR2_ENABLE);
-			return 0;
+			return ret;
 		}
 	}
 
-	return 1;
+	return 0;
 }
 
 /* Global spi HIF function table */
diff --git a/drivers/staging/wilc1000/wlan.c b/drivers/staging/wilc1000/wlan.c
index d3de761..601e4d1 100644
--- a/drivers/staging/wilc1000/wlan.c
+++ b/drivers/staging/wilc1000/wlan.c
@@ -489,50 +489,44 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 	struct wilc_vif *vif;
 
 	if (wilc->quit)
-		goto out;
+		goto out_update_cnt;
 
 	mutex_lock(&wilc->txq_add_to_head_cs);
 	tqe = wilc_wlan_txq_get_first(wilc);
 	if (!tqe)
-		goto out;
+		goto out_unlock;
 	dev = tqe->vif->ndev;
 	wilc_wlan_txq_filter_dup_tcp_ack(dev);
 	i = 0;
 	sum = 0;
-	do {
-		if (tqe && (i < (WILC_VMM_TBL_SIZE - 1))) {
-			if (tqe->type == WILC_CFG_PKT)
-				vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET;
+	while (tqe && (i < (WILC_VMM_TBL_SIZE - 1))) {
+		if (tqe->type == WILC_CFG_PKT)
+			vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET;
+		else if (tqe->type == WILC_NET_PKT)
+			vmm_sz = ETH_ETHERNET_HDR_OFFSET;
+		else
+			vmm_sz = HOST_HDR_OFFSET;
 
-			else if (tqe->type == WILC_NET_PKT)
-				vmm_sz = ETH_ETHERNET_HDR_OFFSET;
+		vmm_sz += tqe->buffer_size;
 
-			else
-				vmm_sz = HOST_HDR_OFFSET;
+		if (vmm_sz & 0x3)
+			vmm_sz = (vmm_sz + 4) & ~0x3;
 
-			vmm_sz += tqe->buffer_size;
-
-			if (vmm_sz & 0x3)
-				vmm_sz = (vmm_sz + 4) & ~0x3;
-
-			if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
-				break;
-
-			vmm_table[i] = vmm_sz / 4;
-			if (tqe->type == WILC_CFG_PKT)
-				vmm_table[i] |= BIT(10);
-			cpu_to_le32s(&vmm_table[i]);
-
-			i++;
-			sum += vmm_sz;
-			tqe = wilc_wlan_txq_get_next(wilc, tqe);
-		} else {
+		if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
 			break;
-		}
-	} while (1);
+
+		vmm_table[i] = vmm_sz / 4;
+		if (tqe->type == WILC_CFG_PKT)
+			vmm_table[i] |= BIT(10);
+		cpu_to_le32s(&vmm_table[i]);
+
+		i++;
+		sum += vmm_sz;
+		tqe = wilc_wlan_txq_get_next(wilc, tqe);
+	}
 
 	if (i == 0)
-		goto out;
+		goto out_unlock;
 	vmm_table[i] = 0x0;
 
 	acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
@@ -540,7 +534,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 	func = wilc->hif_func;
 	do {
 		ret = func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg);
-		if (!ret)
+		if (ret)
 			break;
 
 		if ((reg & 0x1) == 0)
@@ -554,7 +548,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 		}
 	} while (!wilc->quit);
 
-	if (!ret)
+	if (ret)
 		goto out_release_bus;
 
 	timeout = 200;
@@ -563,16 +557,16 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 					 WILC_VMM_TBL_RX_SHADOW_BASE,
 					 (u8 *)vmm_table,
 					 ((i + 1) * 4));
-		if (!ret)
+		if (ret)
 			break;
 
 		ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x2);
-		if (!ret)
+		if (ret)
 			break;
 
 		do {
 			ret = func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, &reg);
-			if (!ret)
+			if (ret)
 				break;
 			if ((reg >> 2) & 0x1) {
 				entries = ((reg >> 3) & 0x3f);
@@ -585,27 +579,27 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 			break;
 		}
 
-		if (!ret)
+		if (ret)
 			break;
 
 		if (entries == 0) {
 			ret = func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg);
-			if (!ret)
+			if (ret)
 				break;
 			reg &= ~BIT(0);
 			ret = func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, reg);
-			if (!ret)
-				break;
-			break;
 		}
-		break;
-	} while (1);
+	} while (0);
 
-	if (!ret)
+	if (ret)
 		goto out_release_bus;
 
 	if (entries == 0) {
-		ret = -ENOBUFS;
+		/*
+		 * No VMM space available in firmware so retry to transmit
+		 * the packet from tx queue.
+		 */
+		ret = WILC_VMM_ENTRY_FULL_RETRY;
 		goto out_release_bus;
 	}
 
@@ -664,7 +658,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 	acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
 
 	ret = func->hif_clear_int_ext(wilc, ENABLE_TX_VMM);
-	if (!ret)
+	if (ret)
 		goto out_release_bus;
 
 	ret = func->hif_block_tx_ext(wilc, 0, txb, offset);
@@ -672,9 +666,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 out_release_bus:
 	release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
 
-out:
+out_unlock:
 	mutex_unlock(&wilc->txq_add_to_head_cs);
 
+out_update_cnt:
 	*txq_count = wilc->txq_entries;
 	return ret;
 }
@@ -725,9 +720,7 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
 			}
 		}
 		offset += tp_len;
-		if (offset >= size)
-			break;
-	} while (1);
+	} while (offset < size);
 }
 
 static void wilc_wlan_handle_rxq(struct wilc *wilc)
@@ -736,11 +729,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
 	u8 *buffer;
 	struct rxq_entry_t *rqe;
 
-	do {
-		if (wilc->quit) {
-			complete(&wilc->cfg_event);
-			break;
-		}
+	while (!wilc->quit) {
 		rqe = wilc_wlan_rxq_remove(wilc);
 		if (!rqe)
 			break;
@@ -750,7 +739,9 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
 		wilc_wlan_handle_rx_buff(wilc, buffer, size);
 
 		kfree(rqe);
-	} while (1);
+	}
+	if (wilc->quit)
+		complete(&wilc->cfg_event);
 }
 
 static void wilc_unknown_isr_ext(struct wilc *wilc)
@@ -785,7 +776,7 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
 
 	wilc->hif_func->hif_clear_int_ext(wilc, DATA_INT_CLR | ENABLE_RX_VMM);
 	ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
-	if (!ret)
+	if (ret)
 		return;
 
 	offset += size;
@@ -846,7 +837,7 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
 			memcpy(dma_buffer, &buffer[offset], size2);
 			ret = wilc->hif_func->hif_block_tx(wilc, addr,
 							   dma_buffer, size2);
-			if (!ret)
+			if (ret)
 				break;
 
 			addr += size2;
@@ -855,17 +846,15 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
 		}
 		release_bus(wilc, WILC_BUS_RELEASE_ONLY);
 
-		if (!ret) {
-			ret = -EIO;
+		if (ret)
 			goto fail;
-		}
 	} while (offset < buffer_size);
 
 fail:
 
 	kfree(dma_buffer);
 
-	return (ret < 0) ? ret : 0;
+	return ret;
 }
 
 int wilc_wlan_start(struct wilc *wilc)
@@ -882,49 +871,26 @@ int wilc_wlan_start(struct wilc *wilc)
 	}
 	acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
 	ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg);
-	if (!ret) {
+	if (ret) {
 		release_bus(wilc, WILC_BUS_RELEASE_ONLY);
-		return -EIO;
+		return ret;
 	}
 	reg = 0;
 	if (wilc->io_type == WILC_HIF_SDIO && wilc->dev_irq_num)
 		reg |= WILC_HAVE_SDIO_IRQ_GPIO;
 
-#ifdef WILC_DISABLE_PMU
-#else
-	reg |= WILC_HAVE_USE_PMU;
-#endif
-
-#ifdef WILC_SLEEP_CLK_SRC_XO
-	reg |= WILC_HAVE_SLEEP_CLK_SRC_XO;
-#elif defined WILC_SLEEP_CLK_SRC_RTC
-	reg |= WILC_HAVE_SLEEP_CLK_SRC_RTC;
-#endif
-
-#ifdef WILC_EXT_PA_INV_TX_RX
-	reg |= WILC_HAVE_EXT_PA_INV_TX_RX;
-#endif
-	reg |= WILC_HAVE_USE_IRQ_AS_HOST_WAKE;
-	reg |= WILC_HAVE_LEGACY_RF_SETTINGS;
-#ifdef XTAL_24
-	reg |= WILC_HAVE_XTAL_24;
-#endif
-#ifdef DISABLE_WILC_UART
-	reg |= WILC_HAVE_DISABLE_WILC_UART;
-#endif
-
 	ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg);
-	if (!ret) {
+	if (ret) {
 		release_bus(wilc, WILC_BUS_RELEASE_ONLY);
-		return -EIO;
+		return ret;
 	}
 
 	wilc->hif_func->hif_sync_ext(wilc, NUM_INT_EXT);
 
 	ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid);
-	if (!ret) {
+	if (ret) {
 		release_bus(wilc, WILC_BUS_RELEASE_ONLY);
-		return -EIO;
+		return ret;
 	}
 
 	wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
@@ -939,7 +905,7 @@ int wilc_wlan_start(struct wilc *wilc)
 	wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
 	release_bus(wilc, WILC_BUS_RELEASE_ONLY);
 
-	return (ret < 0) ? ret : 0;
+	return ret;
 }
 
 int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
@@ -950,33 +916,33 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
 	acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
 
 	ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
-	if (!ret) {
+	if (ret) {
 		netdev_err(vif->ndev, "Error while reading reg\n");
 		release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
-		return -EIO;
+		return ret;
 	}
 
 	ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
 					(reg | WILC_ABORT_REQ_BIT));
-	if (!ret) {
+	if (ret) {
 		netdev_err(vif->ndev, "Error while writing reg\n");
 		release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
-		return -EIO;
+		return ret;
 	}
 
 	ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg);
-	if (!ret) {
+	if (ret) {
 		netdev_err(vif->ndev, "Error while reading reg\n");
 		release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
-		return -EIO;
+		return ret;
 	}
 	reg = BIT(0);
 
 	ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg);
-	if (!ret) {
+	if (ret) {
 		netdev_err(vif->ndev, "Error while writing reg\n");
 		release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
-		return -EIO;
+		return ret;
 	}
 
 	release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
@@ -992,21 +958,14 @@ void wilc_wlan_cleanup(struct net_device *dev)
 	struct wilc *wilc = vif->wilc;
 
 	wilc->quit = 1;
-	do {
-		tqe = wilc_wlan_txq_remove_from_head(dev);
-		if (!tqe)
-			break;
+	while ((tqe = wilc_wlan_txq_remove_from_head(dev))) {
 		if (tqe->tx_complete_func)
 			tqe->tx_complete_func(tqe->priv, 0);
 		kfree(tqe);
-	} while (1);
+	}
 
-	do {
-		rqe = wilc_wlan_rxq_remove(wilc);
-		if (!rqe)
-			break;
+	while ((rqe = wilc_wlan_rxq_remove(wilc)))
 		kfree(rqe);
-	} while (1);
 
 	kfree(wilc->rx_buffer);
 	wilc->rx_buffer = NULL;
@@ -1156,10 +1115,11 @@ int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
 	return ret;
 }
 
-static u32 init_chip(struct net_device *dev)
+static int init_chip(struct net_device *dev)
 {
 	u32 chipid;
-	u32 reg, ret = 0;
+	u32 reg;
+	int ret = 0;
 	struct wilc_vif *vif = netdev_priv(dev);
 	struct wilc *wilc = vif->wilc;
 
@@ -1169,18 +1129,18 @@ static u32 init_chip(struct net_device *dev)
 
 	if ((chipid & 0xfff) != 0xa0) {
 		ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
-		if (!ret) {
+		if (ret) {
 			netdev_err(dev, "fail read reg 0x1118\n");
 			goto release;
 		}
 		reg |= BIT(0);
 		ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
-		if (!ret) {
+		if (ret) {
 			netdev_err(dev, "fail write reg 0x1118\n");
 			goto release;
 		}
 		ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
-		if (!ret) {
+		if (ret) {
 			netdev_err(dev, "fail write reg 0xc0000\n");
 			goto release;
 		}
@@ -1230,7 +1190,7 @@ int wilc_wlan_init(struct net_device *dev)
 
 	wilc->quit = 0;
 
-	if (!wilc->hif_func->hif_init(wilc, false)) {
+	if (wilc->hif_func->hif_init(wilc, false)) {
 		ret = -EIO;
 		goto fail;
 	}
@@ -1251,12 +1211,12 @@ int wilc_wlan_init(struct net_device *dev)
 		goto fail;
 	}
 
-	if (!init_chip(dev)) {
+	if (init_chip(dev)) {
 		ret = -EIO;
 		goto fail;
 	}
 
-	return 1;
+	return 0;
 
 fail:
 
diff --git a/drivers/staging/wilc1000/wlan.h b/drivers/staging/wilc1000/wlan.h
index 1f6957c..8c46342 100644
--- a/drivers/staging/wilc1000/wlan.h
+++ b/drivers/staging/wilc1000/wlan.h
@@ -197,6 +197,8 @@
 #define IS_MANAGMEMENT_CALLBACK	0x080
 #define IS_MGMT_STATUS_SUCCES	0x040
 
+#define WILC_WID_TYPE		GENMASK(15, 12)
+#define WILC_VMM_ENTRY_FULL_RETRY	1
 /********************************************
  *
  *      Tx/Rx Queue Structure
diff --git a/drivers/staging/wilc1000/wlan_cfg.c b/drivers/staging/wilc1000/wlan_cfg.c
index 6f6b286..fe2a7ed8 100644
--- a/drivers/staging/wilc1000/wlan_cfg.c
+++ b/drivers/staging/wilc1000/wlan_cfg.c
@@ -4,6 +4,7 @@
  * All rights reserved.
  */
 
+#include <linux/bitfield.h>
 #include "wlan_if.h"
 #include "wlan.h"
 #include "wlan_cfg.h"
@@ -132,75 +133,54 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
  *
  ********************************************/
 
-#define GET_WID_TYPE(wid)		(((wid) >> 12) & 0x7)
 static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
 {
 	u16 wid;
 	u32 len = 0, i = 0;
+	struct wilc_cfg *cfg = &wl->cfg;
 
 	while (size > 0) {
 		i = 0;
 		wid = get_unaligned_le16(info);
 
-		switch (GET_WID_TYPE(wid)) {
+		switch (FIELD_GET(WILC_WID_TYPE, wid)) {
 		case WID_CHAR:
-			do {
-				if (wl->cfg.b[i].id == WID_NIL)
-					break;
-
-				if (wl->cfg.b[i].id == wid) {
-					wl->cfg.b[i].val = info[4];
-					break;
-				}
+			while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
 				i++;
-			} while (1);
+
+			if (cfg->b[i].id == wid)
+				cfg->b[i].val = info[4];
+
 			len = 3;
 			break;
 
 		case WID_SHORT:
-			do {
-				struct wilc_cfg_hword *hw = &wl->cfg.hw[i];
-
-				if (hw->id == WID_NIL)
-					break;
-
-				if (hw->id == wid) {
-					hw->val = get_unaligned_le16(&info[4]);
-					break;
-				}
+			while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
 				i++;
-			} while (1);
+
+			if (cfg->hw[i].id == wid)
+				cfg->hw[i].val = get_unaligned_le16(&info[4]);
+
 			len = 4;
 			break;
 
 		case WID_INT:
-			do {
-				struct wilc_cfg_word *w = &wl->cfg.w[i];
-
-				if (w->id == WID_NIL)
-					break;
-
-				if (w->id == wid) {
-					w->val = get_unaligned_le32(&info[4]);
-					break;
-				}
+			while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
 				i++;
-			} while (1);
+
+			if (cfg->w[i].id == wid)
+				cfg->w[i].val = get_unaligned_le32(&info[4]);
+
 			len = 6;
 			break;
 
 		case WID_STR:
-			do {
-				if (wl->cfg.s[i].id == WID_NIL)
-					break;
-
-				if (wl->cfg.s[i].id == wid) {
-					memcpy(wl->cfg.s[i].str, &info[2],
-					       (info[2] + 2));
-					break;
-				}
+			while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
 				i++;
-			} while (1);
+
+			if (cfg->s[i].id == wid)
+				memcpy(cfg->s[i].str, &info[2], info[2] + 2);
+
 			len = 2 + info[2];
 			break;
 
@@ -223,16 +203,12 @@ static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info)
 	if (len == 1 && wid == WID_STATUS) {
 		int i = 0;
 
-		do {
-			if (wl->cfg.b[i].id == WID_NIL)
-				break;
-
-			if (wl->cfg.b[i].id == wid) {
-				wl->cfg.b[i].val = info[3];
-				break;
-			}
+		while (wl->cfg.b[i].id != WID_NIL &&
+		       wl->cfg.b[i].id != wid)
 			i++;
-		} while (1);
+
+		if (wl->cfg.b[i].id == wid)
+			wl->cfg.b[i].val = info[3];
 	}
 }
 
@@ -244,7 +220,7 @@ static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info)
 
 int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size)
 {
-	u8 type = (id >> 12) & 0xf;
+	u8 type = FIELD_GET(WILC_WID_TYPE, id);
 	int ret = 0;
 
 	switch (type) {
@@ -290,65 +266,47 @@ int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id)
 int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer,
 			  u32 buffer_size)
 {
-	u32 type = (wid >> 12) & 0xf;
+	u8 type = FIELD_GET(WILC_WID_TYPE, wid);
 	int i, ret = 0;
+	struct wilc_cfg *cfg = &wl->cfg;
 
 	i = 0;
 	if (type == CFG_BYTE_CMD) {
-		do {
-			if (wl->cfg.b[i].id == WID_NIL)
-				break;
-
-			if (wl->cfg.b[i].id == wid) {
-				memcpy(buffer, &wl->cfg.b[i].val, 1);
-				ret = 1;
-				break;
-			}
+		while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
 			i++;
-		} while (1);
+
+		if (cfg->b[i].id == wid) {
+			memcpy(buffer, &cfg->b[i].val, 1);
+			ret = 1;
+		}
 	} else if (type == CFG_HWORD_CMD) {
-		do {
-			if (wl->cfg.hw[i].id == WID_NIL)
-				break;
-
-			if (wl->cfg.hw[i].id == wid) {
-				memcpy(buffer, &wl->cfg.hw[i].val, 2);
-				ret = 2;
-				break;
-			}
+		while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
 			i++;
-		} while (1);
+
+		if (cfg->hw[i].id == wid) {
+			memcpy(buffer, &cfg->hw[i].val, 2);
+			ret = 2;
+		}
 	} else if (type == CFG_WORD_CMD) {
-		do {
-			if (wl->cfg.w[i].id == WID_NIL)
-				break;
-
-			if (wl->cfg.w[i].id == wid) {
-				memcpy(buffer, &wl->cfg.w[i].val, 4);
-				ret = 4;
-				break;
-			}
+		while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
 			i++;
-		} while (1);
+
+		if (cfg->w[i].id == wid) {
+			memcpy(buffer, &cfg->w[i].val, 4);
+			ret = 4;
+		}
 	} else if (type == CFG_STR_CMD) {
-		do {
-			u32 id = wl->cfg.s[i].id;
-
-			if (id == WID_NIL)
-				break;
-
-			if (id == wid) {
-				u16 size = get_unaligned_le16(wl->cfg.s[i].str);
-
-				if (buffer_size >= size) {
-					memcpy(buffer, &wl->cfg.s[i].str[2],
-					       size);
-					ret = size;
-				}
-				break;
-			}
+		while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
 			i++;
-		} while (1);
+
+		if (cfg->s[i].id == wid) {
+			u16 size = get_unaligned_le16(cfg->s[i].str);
+
+			if (buffer_size >= size) {
+				memcpy(buffer, &cfg->s[i].str[2], size);
+				ret = size;
+			}
+		}
 	}
 	return ret;
 }
diff --git a/drivers/staging/wilc1000/wlan_if.h b/drivers/staging/wilc1000/wlan_if.h
index 7c7ee66..f85fd57 100644
--- a/drivers/staging/wilc1000/wlan_if.h
+++ b/drivers/staging/wilc1000/wlan_if.h
@@ -8,6 +8,7 @@
 #define WILC_WLAN_IF_H
 
 #include <linux/netdevice.h>
+#include "fw.h"
 
 /********************************************
  *
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 7350fe5..a8860d2 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -959,7 +959,7 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
 		}
 	}
 
-	return 0;
+	return result;
 }
 
 /*----------------------------------------------------------------
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1354a15..6a38ff9 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -221,7 +221,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	ep = fc_seq_exch(seq);
 	lport = ep->lp;
 	if (cmd->was_ddp_setup) {
-		BUG_ON(!ep);
 		BUG_ON(!lport);
 		/*
 		 * Since DDP (Large Rx offload) was setup for this request,
diff --git a/drivers/tc/tc-driver.c b/drivers/tc/tc-driver.c
index 16b5bae..d45f2c1 100644
--- a/drivers/tc/tc-driver.c
+++ b/drivers/tc/tc-driver.c
@@ -56,8 +56,8 @@ EXPORT_SYMBOL(tc_unregister_driver);
  * system is in its list of supported devices.  Returns the matching
  * tc_device_id structure or %NULL if there is no match.
  */
-const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
-					   struct tc_dev *tdev)
+static const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
+						  struct tc_dev *tdev)
 {
 	const struct tc_device_id *id = tdrv->id_table;
 
@@ -71,7 +71,6 @@ const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
 	}
 	return NULL;
 }
-EXPORT_SYMBOL(tc_match_device);
 
 /**
  * tc_bus_match - Tell if a device structure has a matching
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index b830e0a..99698b8 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -534,13 +534,13 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
 	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
 }
 
-static optee_invoke_fn *get_invoke_func(struct device_node *np)
+static optee_invoke_fn *get_invoke_func(struct device *dev)
 {
 	const char *method;
 
-	pr_info("probing for conduit method from DT.\n");
+	pr_info("probing for conduit method.\n");
 
-	if (of_property_read_string(np, "method", &method)) {
+	if (device_property_read_string(dev, "method", &method)) {
 		pr_warn("missing \"method\" property\n");
 		return ERR_PTR(-ENXIO);
 	}
@@ -554,7 +554,37 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np)
 	return ERR_PTR(-EINVAL);
 }
 
-static struct optee *optee_probe(struct device_node *np)
+static int optee_remove(struct platform_device *pdev)
+{
+	struct optee *optee = platform_get_drvdata(pdev);
+
+	/*
+	 * Ask OP-TEE to free all cached shared memory objects to decrease
+	 * reference counters and also avoid wild pointers in secure world
+	 * into the old shared memory range.
+	 */
+	optee_disable_shm_cache(optee);
+
+	/*
+	 * The two devices have to be unregistered before we can free the
+	 * other resources.
+	 */
+	tee_device_unregister(optee->supp_teedev);
+	tee_device_unregister(optee->teedev);
+
+	tee_shm_pool_free(optee->pool);
+	if (optee->memremaped_shm)
+		memunmap(optee->memremaped_shm);
+	optee_wait_queue_exit(&optee->wait_queue);
+	optee_supp_uninit(&optee->supp);
+	mutex_destroy(&optee->call_queue.mutex);
+
+	kfree(optee);
+
+	return 0;
+}
+
+static int optee_probe(struct platform_device *pdev)
 {
 	optee_invoke_fn *invoke_fn;
 	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
@@ -564,25 +594,25 @@ static struct optee *optee_probe(struct device_node *np)
 	u32 sec_caps;
 	int rc;
 
-	invoke_fn = get_invoke_func(np);
+	invoke_fn = get_invoke_func(&pdev->dev);
 	if (IS_ERR(invoke_fn))
-		return (void *)invoke_fn;
+		return PTR_ERR(invoke_fn);
 
 	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
 		pr_warn("api uid mismatch\n");
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 	}
 
 	optee_msg_get_os_revision(invoke_fn);
 
 	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
 		pr_warn("api revision mismatch\n");
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 	}
 
 	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
 		pr_warn("capabilities mismatch\n");
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 	}
 
 	/*
@@ -598,7 +628,7 @@ static struct optee *optee_probe(struct device_node *np)
 		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
 
 	if (IS_ERR(pool))
-		return (void *)pool;
+		return PTR_ERR(pool);
 
 	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
 	if (!optee) {
@@ -643,7 +673,16 @@ static struct optee *optee_probe(struct device_node *np)
 	if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
 		pr_info("dynamic shared memory is enabled\n");
 
-	return optee;
+	platform_set_drvdata(pdev, optee);
+
+	rc = optee_enumerate_devices();
+	if (rc) {
+		optee_remove(pdev);
+		return rc;
+	}
+
+	pr_info("initialized driver\n");
+	return 0;
 err:
 	if (optee) {
 		/*
@@ -659,92 +698,28 @@ static struct optee *optee_probe(struct device_node *np)
 		tee_shm_pool_free(pool);
 	if (memremaped_shm)
 		memunmap(memremaped_shm);
-	return ERR_PTR(rc);
+	return rc;
 }
 
-static void optee_remove(struct optee *optee)
-{
-	/*
-	 * Ask OP-TEE to free all cached shared memory objects to decrease
-	 * reference counters and also avoid wild pointers in secure world
-	 * into the old shared memory range.
-	 */
-	optee_disable_shm_cache(optee);
-
-	/*
-	 * The two devices has to be unregistered before we can free the
-	 * other resources.
-	 */
-	tee_device_unregister(optee->supp_teedev);
-	tee_device_unregister(optee->teedev);
-
-	tee_shm_pool_free(optee->pool);
-	if (optee->memremaped_shm)
-		memunmap(optee->memremaped_shm);
-	optee_wait_queue_exit(&optee->wait_queue);
-	optee_supp_uninit(&optee->supp);
-	mutex_destroy(&optee->call_queue.mutex);
-
-	kfree(optee);
-}
-
-static const struct of_device_id optee_match[] = {
+static const struct of_device_id optee_dt_match[] = {
 	{ .compatible = "linaro,optee-tz" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, optee_dt_match);
 
-static struct optee *optee_svc;
-
-static int __init optee_driver_init(void)
-{
-	struct device_node *fw_np = NULL;
-	struct device_node *np = NULL;
-	struct optee *optee = NULL;
-	int rc = 0;
-
-	/* Node is supposed to be below /firmware */
-	fw_np = of_find_node_by_name(NULL, "firmware");
-	if (!fw_np)
-		return -ENODEV;
-
-	np = of_find_matching_node(fw_np, optee_match);
-	if (!np || !of_device_is_available(np)) {
-		of_node_put(np);
-		return -ENODEV;
-	}
-
-	optee = optee_probe(np);
-	of_node_put(np);
-
-	if (IS_ERR(optee))
-		return PTR_ERR(optee);
-
-	rc = optee_enumerate_devices();
-	if (rc) {
-		optee_remove(optee);
-		return rc;
-	}
-
-	pr_info("initialized driver\n");
-
-	optee_svc = optee;
-
-	return 0;
-}
-module_init(optee_driver_init);
-
-static void __exit optee_driver_exit(void)
-{
-	struct optee *optee = optee_svc;
-
-	optee_svc = NULL;
-	if (optee)
-		optee_remove(optee);
-}
-module_exit(optee_driver_exit);
+static struct platform_driver optee_driver = {
+	.probe  = optee_probe,
+	.remove = optee_remove,
+	.driver = {
+		.name = "optee",
+		.of_match_table = optee_dt_match,
+	},
+};
+module_platform_driver(optee_driver);
 
 MODULE_AUTHOR("Linaro");
 MODULE_DESCRIPTION("OP-TEE driver");
 MODULE_SUPPORTED_DEVICE("");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:optee");
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 09ddcd0..937ac5a 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -71,11 +71,6 @@ static void tee_shm_op_release(struct dma_buf *dmabuf)
 	tee_shm_release(shm);
 }
 
-static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
-{
-	return NULL;
-}
-
 static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 {
 	struct tee_shm *shm = dmabuf->priv;
@@ -93,7 +88,6 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
 	.map_dma_buf = tee_shm_op_map_dma_buf,
 	.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
 	.release = tee_shm_op_release,
-	.map = tee_shm_op_map,
 	.mmap = tee_shm_op_mmap,
 };
 
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 8c4d124..7c447cd 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -21,8 +21,6 @@
 
 #include "thermal_core.h"
 
-#define TO_MCELSIUS(c)			((c) * 1000)
-
 /* Thermal Manager Control and Status Register */
 #define PMU_TDC0_SW_RST_MASK		(0x1 << 1)
 #define PMU_TM_DISABLE_OFFS		0
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 75484d6..4322132 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -8,6 +8,7 @@
 #include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/thermal.h>
+#include <linux/units.h>
 #include "int340x_thermal_zone.h"
 
 static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
@@ -34,7 +35,7 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
 		*temp = (unsigned long)conv_temp * 10;
 	} else
 		/* _TMP returns the temperature in tenths of degrees Kelvin */
-		*temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
+		*temp = deci_kelvin_to_millicelsius(tmp);
 
 	return 0;
 }
@@ -116,7 +117,7 @@ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
 
 	snprintf(name, sizeof(name), "PAT%d", trip);
 	status = acpi_execute_simple_method(d->adev->handle, name,
-			MILLICELSIUS_TO_DECI_KELVIN(temp));
+			millicelsius_to_deci_kelvin(temp));
 	if (ACPI_FAILURE(status))
 		return -EIO;
 
@@ -163,7 +164,7 @@ static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
 	if (ACPI_FAILURE(status))
 		return -EIO;
 
-	*temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+	*temp = deci_kelvin_to_millicelsius(r);
 
 	return 0;
 }
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index ed75a0c..56401fd 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -13,6 +13,7 @@
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/thermal.h>
+#include <linux/units.h>
 #include <linux/pm.h>
 
 /* Intel PCH thermal Device IDs */
@@ -93,7 +94,7 @@ static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
 		if (ACPI_SUCCESS(status)) {
 			unsigned long trip_temp;
 
-			trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+			trip_temp = deci_kelvin_to_millicelsius(r);
 			if (trip_temp) {
 				ptd->psv_temp = trip_temp;
 				ptd->psv_trip_id = *nr_trips;
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 1cc5e6c..ad9e3bf 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -535,7 +535,7 @@ static int stm_thermal_probe(struct platform_device *pdev)
 	/* Configure and enable HW sensor */
 	ret = stm_thermal_prepare(sensor);
 	if (ret) {
-		dev_err(&pdev->dev, "Error preprare sensor: %d\n", ret);
+		dev_err(&pdev->dev, "Error prepare sensor: %d\n", ret);
 		return ret;
 	}
 
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index fd9adca..1eb757e 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
-menuconfig THUNDERBOLT
-	tristate "Thunderbolt support"
+menuconfig USB4
+	tristate "Unified support for USB4 and Thunderbolt"
 	depends on PCI
 	depends on X86 || COMPILE_TEST
 	select APPLE_PROPERTIES if EFI_STUB && X86
@@ -9,9 +9,10 @@
 	select CRYPTO_HASH
 	select NVMEM
 	help
-	  Thunderbolt Controller driver. This driver is required if you
-	  want to hotplug Thunderbolt devices on Apple hardware or on PCs
-	  with Intel Falcon Ridge or newer.
+	  USB4 and Thunderbolt driver. USB4 is the public speficiation
+	  based on Thunderbolt 3 protocol. This driver is required if
+	  you want to hotplug Thunderbolt and USB4 compliant devices on
+	  Apple hardware or on PCs with Intel Falcon Ridge or newer.
 
 	  To compile this driver a module, choose M here. The module will be
 	  called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 001187c..eae28dd 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
+obj-${CONFIG_USB4} := thunderbolt.o
 thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index fdd77bb..19db6cd 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -113,7 +113,16 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
 	return ret;
 }
 
-static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+/**
+ * tb_switch_find_cap() - Find switch capability
+ * @sw Switch to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
 {
 	int offset = sw->config.first_cap_offset;
 
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d97813e..f77ceae 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -708,19 +708,26 @@ void tb_ctl_stop(struct tb_ctl *ctl)
 /* public interface, commands */
 
 /**
- * tb_cfg_error() - send error packet
+ * tb_cfg_ack_plug() - Ack hot plug/unplug event
+ * @ctl: Control channel to use
+ * @route: Router that originated the event
+ * @port: Port where the hot plug/unplug happened
+ * @unplug: Ack hot plug or unplug
  *
- * Return: Returns 0 on success or an error code on failure.
+ * Call this as response for hot plug/unplug event to ack it.
+ * Returns %0 on success or an error code on failure.
  */
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
-		 enum tb_cfg_error error)
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
 {
 	struct cfg_error_pkg pkg = {
 		.header = tb_cfg_make_header(route),
 		.port = port,
-		.error = error,
+		.error = TB_CFG_ERROR_ACK_PLUG_EVENT,
+		.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
+			     : TB_CFG_ERROR_PG_HOT_PLUG,
 	};
-	tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
+	tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
+		   unplug ? "un" : "", route, port);
 	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
 }
 
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 2f1a1e1..97cb03b 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -123,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
 	return header;
 }
 
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
-		 enum tb_cfg_error error);
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
 				  int timeout_msec);
 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 8dd7de0..921d164 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -131,12 +131,51 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
 }
 
 /**
+ * tb_eeprom_get_drom_offset - get drom offset within eeprom
+ */
+static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
+{
+	struct tb_cap_plug_events cap;
+	int res;
+
+	if (!sw->cap_plug_events) {
+		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
+		return -ENODEV;
+	}
+	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
+			     sizeof(cap) / 4);
+	if (res)
+		return res;
+
+	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
+		tb_sw_warn(sw, "no NVM\n");
+		return -ENODEV;
+	}
+
+	if (cap.drom_offset > 0xffff) {
+		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
+				cap.drom_offset);
+		return -ENXIO;
+	}
+	*offset = cap.drom_offset;
+	return 0;
+}
+
+/**
  * tb_eeprom_read_n - read count bytes from offset into val
  */
 static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
 		size_t count)
 {
+	u16 drom_offset;
 	int i, res;
+
+	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+	if (res)
+		return res;
+
+	offset += drom_offset;
+
 	res = tb_eeprom_active(sw, true);
 	if (res)
 		return res;
@@ -239,36 +278,6 @@ struct tb_drom_entry_port {
 
 
 /**
- * tb_eeprom_get_drom_offset - get drom offset within eeprom
- */
-static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
-{
-	struct tb_cap_plug_events cap;
-	int res;
-	if (!sw->cap_plug_events) {
-		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
-		return -ENOSYS;
-	}
-	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
-			     sizeof(cap) / 4);
-	if (res)
-		return res;
-
-	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
-		tb_sw_warn(sw, "no NVM\n");
-		return -ENOSYS;
-	}
-
-	if (cap.drom_offset > 0xffff) {
-		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
-				cap.drom_offset);
-		return -ENXIO;
-	}
-	*offset = cap.drom_offset;
-	return 0;
-}
-
-/**
  * tb_drom_read_uid_only - read uid directly from drom
  *
  * Does not use the cached copy in sw->drom. Used during resume to check switch
@@ -277,17 +286,11 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
 {
 	u8 data[9];
-	u16 drom_offset;
 	u8 crc;
-	int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
-	if (res)
-		return res;
-
-	if (drom_offset == 0)
-		return -ENODEV;
+	int res;
 
 	/* read uid */
-	res = tb_eeprom_read_n(sw, drom_offset, data, 9);
+	res = tb_eeprom_read_n(sw, 0, data, 9);
 	if (res)
 		return res;
 
@@ -484,12 +487,42 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
 	return ret;
 }
 
+static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
+{
+	int ret;
+
+	ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
+	if (ret)
+		return ret;
+
+	/* Size includes CRC8 + UID + CRC32 */
+	*size += 1 + 8 + 4;
+	sw->drom = kzalloc(*size, GFP_KERNEL);
+	if (!sw->drom)
+		return -ENOMEM;
+
+	ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
+	if (ret) {
+		kfree(sw->drom);
+		sw->drom = NULL;
+	}
+
+	return ret;
+}
+
+static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+			  size_t count)
+{
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_drom_read(sw, offset, val, count);
+	return tb_eeprom_read_n(sw, offset, val, count);
+}
+
 /**
  * tb_drom_read - copy drom to sw->drom and parse it
  */
 int tb_drom_read(struct tb_switch *sw)
 {
-	u16 drom_offset;
 	u16 size;
 	u32 crc;
 	struct tb_drom_header *header;
@@ -510,18 +543,26 @@ int tb_drom_read(struct tb_switch *sw)
 			goto parse;
 
 		/*
-		 * The root switch contains only a dummy drom (header only,
-		 * no entries). Hardcode the configuration here.
+		 * USB4 hosts may support reading DROM through router
+		 * operations.
 		 */
-		tb_drom_read_uid_only(sw, &sw->uid);
+		if (tb_switch_is_usb4(sw)) {
+			usb4_switch_read_uid(sw, &sw->uid);
+			if (!usb4_copy_host_drom(sw, &size))
+				goto parse;
+		} else {
+			/*
+			 * The root switch contains only a dummy drom
+			 * (header only, no entries). Hardcode the
+			 * configuration here.
+			 */
+			tb_drom_read_uid_only(sw, &sw->uid);
+		}
+
 		return 0;
 	}
 
-	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
-	if (res)
-		return res;
-
-	res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
+	res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
 	if (res)
 		return res;
 	size &= 0x3ff;
@@ -535,7 +576,7 @@ int tb_drom_read(struct tb_switch *sw)
 	sw->drom = kzalloc(size, GFP_KERNEL);
 	if (!sw->drom)
 		return -ENOMEM;
-	res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
+	res = tb_drom_read_n(sw, 0, sw->drom, size);
 	if (res)
 		goto err;
 
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 641b21b..1be491e 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1271,6 +1271,9 @@ static struct pci_device_id nhi_ids[] = {
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
 
+	/* Any USB4 compliant host */
+	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
+
 	{ 0,}
 };
 
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index b7b9739..5d276ee 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -74,4 +74,6 @@ extern const struct tb_nhi_ops icl_nhi_ops;
 #define PCI_DEVICE_ID_INTEL_ICL_NHI1			0x8a0d
 #define PCI_DEVICE_ID_INTEL_ICL_NHI0			0x8a17
 
+#define PCI_CLASS_SERIAL_USB_USB4			0x0c0340
+
 #endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ca86a8e..ad5479f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -163,10 +163,12 @@ static int nvm_validate_and_write(struct tb_switch *sw)
 		image_size -= hdr_size;
 	}
 
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_nvm_write(sw, 0, buf, image_size);
 	return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
 }
 
-static int nvm_authenticate_host(struct tb_switch *sw)
+static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
 {
 	int ret = 0;
 
@@ -206,7 +208,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
 	return ret;
 }
 
-static int nvm_authenticate_device(struct tb_switch *sw)
+static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
 {
 	int ret, retries = 10;
 
@@ -251,6 +253,78 @@ static int nvm_authenticate_device(struct tb_switch *sw)
 	return -ETIMEDOUT;
 }
 
+static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
+{
+	struct pci_dev *root_port;
+
+	/*
+	 * During host router NVM upgrade we should not allow root port to
+	 * go into D3cold because some root ports cannot trigger PME
+	 * itself. To be on the safe side keep the root port in D0 during
+	 * the whole upgrade process.
+	 */
+	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+	if (root_port)
+		pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
+{
+	struct pci_dev *root_port;
+
+	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+	if (root_port)
+		pm_runtime_put(&root_port->dev);
+}
+
+static inline bool nvm_readable(struct tb_switch *sw)
+{
+	if (tb_switch_is_usb4(sw)) {
+		/*
+		 * USB4 devices must support NVM operations but it is
+		 * optional for hosts. Therefore we query the NVM sector
+		 * size here and if it is supported assume NVM
+		 * operations are implemented.
+		 */
+		return usb4_switch_nvm_sector_size(sw) > 0;
+	}
+
+	/* Thunderbolt 2 and 3 devices support NVM through DMA port */
+	return !!sw->dma_port;
+}
+
+static inline bool nvm_upgradeable(struct tb_switch *sw)
+{
+	if (sw->no_nvm_upgrade)
+		return false;
+	return nvm_readable(sw);
+}
+
+static inline int nvm_read(struct tb_switch *sw, unsigned int address,
+			   void *buf, size_t size)
+{
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_nvm_read(sw, address, buf, size);
+	return dma_port_flash_read(sw->dma_port, address, buf, size);
+}
+
+static int nvm_authenticate(struct tb_switch *sw)
+{
+	int ret;
+
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_nvm_authenticate(sw);
+
+	if (!tb_route(sw)) {
+		nvm_authenticate_start_dma_port(sw);
+		ret = nvm_authenticate_host_dma_port(sw);
+	} else {
+		ret = nvm_authenticate_device_dma_port(sw);
+	}
+
+	return ret;
+}
+
 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
 			      size_t bytes)
 {
@@ -264,7 +338,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
 		goto out;
 	}
 
-	ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+	ret = nvm_read(sw, offset, val, bytes);
 	mutex_unlock(&sw->tb->lock);
 
 out:
@@ -341,9 +415,21 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
 	u32 val;
 	int ret;
 
-	if (!sw->dma_port)
+	if (!nvm_readable(sw))
 		return 0;
 
+	/*
+	 * The NVM format of non-Intel hardware is not known so
+	 * currently restrict NVM upgrade for Intel hardware. We may
+	 * relax this in the future when we learn other NVM formats.
+	 */
+	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) {
+		dev_info(&sw->dev,
+			 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
+			 sw->config.vendor_id);
+		return 0;
+	}
+
 	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
 	if (!nvm)
 		return -ENOMEM;
@@ -358,8 +444,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
 	if (!sw->safe_mode) {
 		u32 nvm_size, hdr_size;
 
-		ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
-					  sizeof(val));
+		ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
 		if (ret)
 			goto err_ida;
 
@@ -367,8 +452,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
 		nvm_size = (SZ_1M << (val & 7)) / 8;
 		nvm_size = (nvm_size - hdr_size) / 2;
 
-		ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
-					  sizeof(val));
+		ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
 		if (ret)
 			goto err_ida;
 
@@ -620,6 +704,24 @@ int tb_port_clear_counter(struct tb_port *port, int counter)
 }
 
 /**
+ * tb_port_unlock() - Unlock downstream port
+ * @port: Port to unlock
+ *
+ * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
+ * downstream router accessible for CM.
+ */
+int tb_port_unlock(struct tb_port *port)
+{
+	if (tb_switch_is_icm(port->sw))
+		return 0;
+	if (!tb_port_is_null(port))
+		return -EINVAL;
+	if (tb_switch_is_usb4(port->sw))
+		return usb4_port_unlock(port);
+	return 0;
+}
+
+/**
  * tb_init_port() - initialize a port
  *
  * This is a helper method for tb_switch_alloc. Does not check or initialize
@@ -650,6 +752,10 @@ static int tb_init_port(struct tb_port *port)
 			port->cap_phy = cap;
 		else
 			tb_port_WARN(port, "non switch port without a PHY\n");
+
+		cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
+		if (cap > 0)
+			port->cap_usb4 = cap;
 	} else if (port->port != 0) {
 		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
 		if (cap > 0)
@@ -936,12 +1042,47 @@ bool tb_port_is_enabled(struct tb_port *port)
 	case TB_TYPE_DP_HDMI_OUT:
 		return tb_dp_port_is_enabled(port);
 
+	case TB_TYPE_USB3_UP:
+	case TB_TYPE_USB3_DOWN:
+		return tb_usb3_port_is_enabled(port);
+
 	default:
 		return false;
 	}
 }
 
 /**
+ * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
+ * @port: USB3 adapter port to check
+ */
+bool tb_usb3_port_is_enabled(struct tb_port *port)
+{
+	u32 data;
+
+	if (tb_port_read(port, &data, TB_CFG_PORT,
+			 port->cap_adap + ADP_USB3_CS_0, 1))
+		return false;
+
+	return !!(data & ADP_USB3_CS_0_PE);
+}
+
+/**
+ * tb_usb3_port_enable() - Enable USB3 adapter port
+ * @port: USB3 adapter port to enable
+ * @enable: Enable/disable the USB3 adapter
+ */
+int tb_usb3_port_enable(struct tb_port *port, bool enable)
+{
+	u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
+			  : ADP_USB3_CS_0_V;
+
+	if (!port->cap_adap)
+		return -ENXIO;
+	return tb_port_write(port, &word, TB_CFG_PORT,
+			     port->cap_adap + ADP_USB3_CS_0, 1);
+}
+
+/**
  * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
  * @port: PCIe port to check
  */
@@ -1088,20 +1229,38 @@ int tb_dp_port_enable(struct tb_port *port, bool enable)
 
 /* switch utility functions */
 
-static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
+static const char *tb_switch_generation_name(const struct tb_switch *sw)
 {
-	tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
-	       sw->vendor_id, sw->device_id, sw->revision,
-	       sw->thunderbolt_version);
-	tb_dbg(tb, "  Max Port Number: %d\n", sw->max_port_number);
+	switch (sw->generation) {
+	case 1:
+		return "Thunderbolt 1";
+	case 2:
+		return "Thunderbolt 2";
+	case 3:
+		return "Thunderbolt 3";
+	case 4:
+		return "USB4";
+	default:
+		return "Unknown";
+	}
+}
+
+static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
+{
+	const struct tb_regs_switch_header *regs = &sw->config;
+
+	tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+	       tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
+	       regs->revision, regs->thunderbolt_version);
+	tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
 	tb_dbg(tb, "  Config:\n");
 	tb_dbg(tb,
 		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
-	       sw->upstream_port_number, sw->depth,
-	       (((u64) sw->route_hi) << 32) | sw->route_lo,
-	       sw->enabled, sw->plug_events_delay);
+	       regs->upstream_port_number, regs->depth,
+	       (((u64) regs->route_hi) << 32) | regs->route_lo,
+	       regs->enabled, regs->plug_events_delay);
 	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
-	       sw->__unknown1, sw->__unknown4);
+	       regs->__unknown1, regs->__unknown4);
 }
 
 /**
@@ -1148,6 +1307,10 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
 	if (res)
 		return res;
 
+	/* Plug events are always enabled in USB4 */
+	if (tb_switch_is_usb4(sw))
+		return 0;
+
 	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
 	if (res)
 		return res;
@@ -1359,30 +1522,6 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
 
-static void nvm_authenticate_start(struct tb_switch *sw)
-{
-	struct pci_dev *root_port;
-
-	/*
-	 * During host router NVM upgrade we should not allow root port to
-	 * go into D3cold because some root ports cannot trigger PME
-	 * itself. To be on the safe side keep the root port in D0 during
-	 * the whole upgrade process.
-	 */
-	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
-	if (root_port)
-		pm_runtime_get_noresume(&root_port->dev);
-}
-
-static void nvm_authenticate_complete(struct tb_switch *sw)
-{
-	struct pci_dev *root_port;
-
-	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
-	if (root_port)
-		pm_runtime_put(&root_port->dev);
-}
-
 static ssize_t nvm_authenticate_show(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
@@ -1431,17 +1570,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
 			goto exit_unlock;
 
 		sw->nvm->authenticating = true;
-
-		if (!tb_route(sw)) {
-			/*
-			 * Keep root port from suspending as long as the
-			 * NVM upgrade process is running.
-			 */
-			nvm_authenticate_start(sw);
-			ret = nvm_authenticate_host(sw);
-		} else {
-			ret = nvm_authenticate_device(sw);
-		}
+		ret = nvm_authenticate(sw);
 	}
 
 exit_unlock:
@@ -1556,11 +1685,11 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
 			return attr->mode;
 		return 0;
 	} else if (attr == &dev_attr_nvm_authenticate.attr) {
-		if (sw->dma_port && !sw->no_nvm_upgrade)
+		if (nvm_upgradeable(sw))
 			return attr->mode;
 		return 0;
 	} else if (attr == &dev_attr_nvm_version.attr) {
-		if (sw->dma_port)
+		if (nvm_readable(sw))
 			return attr->mode;
 		return 0;
 	} else if (attr == &dev_attr_boot.attr) {
@@ -1672,6 +1801,9 @@ static int tb_switch_get_generation(struct tb_switch *sw)
 		return 3;
 
 	default:
+		if (tb_switch_is_usb4(sw))
+			return 4;
+
 		/*
 		 * For unknown switches assume generation to be 1 to be
 		 * on the safe side.
@@ -1682,6 +1814,19 @@ static int tb_switch_get_generation(struct tb_switch *sw)
 	}
 }
 
+static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
+{
+	int max_depth;
+
+	if (tb_switch_is_usb4(sw) ||
+	    (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
+		max_depth = USB4_SWITCH_MAX_DEPTH;
+	else
+		max_depth = TB_SWITCH_MAX_DEPTH;
+
+	return depth > max_depth;
+}
+
 /**
  * tb_switch_alloc() - allocate a switch
  * @tb: Pointer to the owning domain
@@ -1703,10 +1848,16 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
 	int upstream_port;
 	int i, ret, depth;
 
-	/* Make sure we do not exceed maximum topology limit */
+	/* Unlock the downstream port so we can access the switch below */
+	if (route) {
+		struct tb_switch *parent_sw = tb_to_switch(parent);
+		struct tb_port *down;
+
+		down = tb_port_at(route, parent_sw);
+		tb_port_unlock(down);
+	}
+
 	depth = tb_route_length(route);
-	if (depth > TB_SWITCH_MAX_DEPTH)
-		return ERR_PTR(-EADDRNOTAVAIL);
 
 	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
 	if (upstream_port < 0)
@@ -1721,8 +1872,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
 	if (ret)
 		goto err_free_sw_ports;
 
+	sw->generation = tb_switch_get_generation(sw);
+
 	tb_dbg(tb, "current switch config:\n");
-	tb_dump_switch(tb, &sw->config);
+	tb_dump_switch(tb, sw);
 
 	/* configure switch */
 	sw->config.upstream_port_number = upstream_port;
@@ -1731,6 +1884,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
 	sw->config.route_lo = lower_32_bits(route);
 	sw->config.enabled = 0;
 
+	/* Make sure we do not exceed maximum topology limit */
+	if (tb_switch_exceeds_max_depth(sw, depth)) {
+		ret = -EADDRNOTAVAIL;
+		goto err_free_sw_ports;
+	}
+
 	/* initialize ports */
 	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
 				GFP_KERNEL);
@@ -1745,14 +1904,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
 		sw->ports[i].port = i;
 	}
 
-	sw->generation = tb_switch_get_generation(sw);
-
 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
-	if (ret < 0) {
-		tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
-		goto err_free_sw_ports;
-	}
-	sw->cap_plug_events = ret;
+	if (ret > 0)
+		sw->cap_plug_events = ret;
 
 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
 	if (ret > 0)
@@ -1823,7 +1977,8 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
  *
  * Call this function before the switch is added to the system. It will
  * upload configuration to the switch and makes it available for the
- * connection manager to use.
+ * connection manager to use. Can be called to the switch again after
+ * resume from low power states to re-initialize it.
  *
  * Return: %0 in case of success and negative errno in case of failure
  */
@@ -1834,21 +1989,50 @@ int tb_switch_configure(struct tb_switch *sw)
 	int ret;
 
 	route = tb_route(sw);
-	tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
-	       route, tb_route_length(route), sw->config.upstream_port_number);
 
-	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
-		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
-			   sw->config.vendor_id);
+	tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
+	       sw->config.enabled ? "restoring " : "initializing", route,
+	       tb_route_length(route), sw->config.upstream_port_number);
 
 	sw->config.enabled = 1;
 
-	/* upload configuration */
-	ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
-	if (ret)
-		return ret;
+	if (tb_switch_is_usb4(sw)) {
+		/*
+		 * For USB4 devices, we need to program the CM version
+		 * accordingly so that it knows to expose all the
+		 * additional capabilities.
+		 */
+		sw->config.cmuv = USB4_VERSION_1_0;
 
-	ret = tb_lc_configure_link(sw);
+		/* Enumerate the switch */
+		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+				  ROUTER_CS_1, 4);
+		if (ret)
+			return ret;
+
+		ret = usb4_switch_setup(sw);
+		if (ret)
+			return ret;
+
+		ret = usb4_switch_configure_link(sw);
+	} else {
+		if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+			tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+				   sw->config.vendor_id);
+
+		if (!sw->cap_plug_events) {
+			tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+			return -ENODEV;
+		}
+
+		/* Enumerate the switch */
+		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+				  ROUTER_CS_1, 3);
+		if (ret)
+			return ret;
+
+		ret = tb_lc_configure_link(sw);
+	}
 	if (ret)
 		return ret;
 
@@ -1857,18 +2041,32 @@ int tb_switch_configure(struct tb_switch *sw)
 
 static int tb_switch_set_uuid(struct tb_switch *sw)
 {
+	bool uid = false;
 	u32 uuid[4];
 	int ret;
 
 	if (sw->uuid)
 		return 0;
 
-	/*
-	 * The newer controllers include fused UUID as part of link
-	 * controller specific registers
-	 */
-	ret = tb_lc_read_uuid(sw, uuid);
-	if (ret) {
+	if (tb_switch_is_usb4(sw)) {
+		ret = usb4_switch_read_uid(sw, &sw->uid);
+		if (ret)
+			return ret;
+		uid = true;
+	} else {
+		/*
+		 * The newer controllers include fused UUID as part of
+		 * link controller specific registers
+		 */
+		ret = tb_lc_read_uuid(sw, uuid);
+		if (ret) {
+			if (ret != -EINVAL)
+				return ret;
+			uid = true;
+		}
+	}
+
+	if (uid) {
 		/*
 		 * ICM generates UUID based on UID and fills the upper
 		 * two words with ones. This is not strictly following
@@ -1935,7 +2133,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
 	nvm_get_auth_status(sw, &status);
 	if (status) {
 		if (!tb_route(sw))
-			nvm_authenticate_complete(sw);
+			nvm_authenticate_complete_dma_port(sw);
 		return 0;
 	}
 
@@ -1950,7 +2148,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
 
 	/* Now we can allow root port to suspend again */
 	if (!tb_route(sw))
-		nvm_authenticate_complete(sw);
+		nvm_authenticate_complete_dma_port(sw);
 
 	if (status) {
 		tb_sw_info(sw, "switch flash authentication failed\n");
@@ -2004,6 +2202,8 @@ static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
 	if (!up->dual_link_port || !up->dual_link_port->remote)
 		return false;
 
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_lane_bonding_possible(sw);
 	return tb_lc_lane_bonding_possible(sw);
 }
 
@@ -2175,6 +2375,10 @@ int tb_switch_add(struct tb_switch *sw)
 		ret = tb_switch_update_link_attributes(sw);
 		if (ret)
 			return ret;
+
+		ret = tb_switch_tmu_init(sw);
+		if (ret)
+			return ret;
 	}
 
 	ret = device_add(&sw->dev);
@@ -2240,7 +2444,11 @@ void tb_switch_remove(struct tb_switch *sw)
 
 	if (!sw->is_unplugged)
 		tb_plug_events_active(sw, false);
-	tb_lc_unconfigure_link(sw);
+
+	if (tb_switch_is_usb4(sw))
+		usb4_switch_unconfigure_link(sw);
+	else
+		tb_lc_unconfigure_link(sw);
 
 	tb_switch_nvm_remove(sw);
 
@@ -2298,7 +2506,10 @@ int tb_switch_resume(struct tb_switch *sw)
 			return err;
 		}
 
-		err = tb_drom_read_uid_only(sw, &uid);
+		if (tb_switch_is_usb4(sw))
+			err = usb4_switch_read_uid(sw, &uid);
+		else
+			err = tb_drom_read_uid_only(sw, &uid);
 		if (err) {
 			tb_sw_warn(sw, "uid read failed\n");
 			return err;
@@ -2311,16 +2522,7 @@ int tb_switch_resume(struct tb_switch *sw)
 		}
 	}
 
-	/* upload configuration */
-	err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
-	if (err)
-		return err;
-
-	err = tb_lc_configure_link(sw);
-	if (err)
-		return err;
-
-	err = tb_plug_events_active(sw, true);
+	err = tb_switch_configure(sw);
 	if (err)
 		return err;
 
@@ -2336,8 +2538,14 @@ int tb_switch_resume(struct tb_switch *sw)
 				tb_sw_set_unplugged(port->remote->sw);
 			else if (port->xdomain)
 				port->xdomain->is_unplugged = true;
-		} else if (tb_port_has_remote(port)) {
-			if (tb_switch_resume(port->remote->sw)) {
+		} else if (tb_port_has_remote(port) || port->xdomain) {
+			/*
+			 * Always unlock the port so the downstream
+			 * switch/domain is accessible.
+			 */
+			if (tb_port_unlock(port))
+				tb_port_warn(port, "failed to unlock port\n");
+			if (port->remote && tb_switch_resume(port->remote->sw)) {
 				tb_port_warn(port,
 					     "lost during suspend, disconnecting\n");
 				tb_sw_set_unplugged(port->remote->sw);
@@ -2361,7 +2569,10 @@ void tb_switch_suspend(struct tb_switch *sw)
 			tb_switch_suspend(port->remote->sw);
 	}
 
-	tb_lc_set_sleep(sw);
+	if (tb_switch_is_usb4(sw))
+		usb4_switch_set_sleep(sw);
+	else
+		tb_lc_set_sleep(sw);
 }
 
 /**
@@ -2374,6 +2585,8 @@ void tb_switch_suspend(struct tb_switch *sw)
  */
 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
 {
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_query_dp_resource(sw, in);
 	return tb_lc_dp_sink_query(sw, in);
 }
 
@@ -2388,6 +2601,8 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
  */
 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
 {
+	if (tb_switch_is_usb4(sw))
+		return usb4_switch_alloc_dp_resource(sw, in);
 	return tb_lc_dp_sink_alloc(sw, in);
 }
 
@@ -2401,10 +2616,16 @@ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  */
 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
 {
-	if (tb_lc_dp_sink_dealloc(sw, in)) {
+	int ret;
+
+	if (tb_switch_is_usb4(sw))
+		ret = usb4_switch_dealloc_dp_resource(sw, in);
+	else
+		ret = tb_lc_dp_sink_dealloc(sw, in);
+
+	if (ret)
 		tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
 			   in->port);
-	}
 }
 
 struct tb_sw_lookup {
@@ -2517,6 +2738,24 @@ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
 	return NULL;
 }
 
+/**
+ * tb_switch_find_port() - return the first port of @type on @sw or NULL
+ * @sw: Switch to find the port from
+ * @type: Port type to look for
+ */
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+				    enum tb_port_type type)
+{
+	struct tb_port *port;
+
+	tb_switch_for_each_port(sw, port) {
+		if (port->config.type == type)
+			return port;
+	}
+
+	return NULL;
+}
+
 void tb_switch_exit(void)
 {
 	ida_destroy(&nvm_ida);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index ea8727f..107cd23 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -111,6 +111,10 @@ static void tb_discover_tunnels(struct tb_switch *sw)
 			tunnel = tb_tunnel_discover_pci(tb, port);
 			break;
 
+		case TB_TYPE_USB3_DOWN:
+			tunnel = tb_tunnel_discover_usb3(tb, port);
+			break;
+
 		default:
 			break;
 		}
@@ -158,6 +162,137 @@ static void tb_scan_xdomain(struct tb_port *port)
 	}
 }
 
+static int tb_enable_tmu(struct tb_switch *sw)
+{
+	int ret;
+
+	/* If it is already enabled in correct mode, don't touch it */
+	if (tb_switch_tmu_is_enabled(sw))
+		return 0;
+
+	ret = tb_switch_tmu_disable(sw);
+	if (ret)
+		return ret;
+
+	ret = tb_switch_tmu_post_time(sw);
+	if (ret)
+		return ret;
+
+	return tb_switch_tmu_enable(sw);
+}
+
+/**
+ * tb_find_unused_port() - return the first inactive port on @sw
+ * @sw: Switch to find the port on
+ * @type: Port type to look for
+ */
+static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
+					   enum tb_port_type type)
+{
+	struct tb_port *port;
+
+	tb_switch_for_each_port(sw, port) {
+		if (tb_is_upstream_port(port))
+			continue;
+		if (port->config.type != type)
+			continue;
+		if (!port->cap_adap)
+			continue;
+		if (tb_port_is_enabled(port))
+			continue;
+		return port;
+	}
+	return NULL;
+}
+
+static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
+					const struct tb_port *port)
+{
+	struct tb_port *down;
+
+	down = usb4_switch_map_usb3_down(sw, port);
+	if (down) {
+		if (WARN_ON(!tb_port_is_usb3_down(down)))
+			goto out;
+		if (WARN_ON(tb_usb3_port_is_enabled(down)))
+			goto out;
+
+		return down;
+	}
+
+out:
+	return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN);
+}
+
+static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
+{
+	struct tb_switch *parent = tb_switch_parent(sw);
+	struct tb_port *up, *down, *port;
+	struct tb_cm *tcm = tb_priv(tb);
+	struct tb_tunnel *tunnel;
+
+	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
+	if (!up)
+		return 0;
+
+	/*
+	 * Look up available down port. Since we are chaining it should
+	 * be found right above this switch.
+	 */
+	port = tb_port_at(tb_route(sw), parent);
+	down = tb_find_usb3_down(parent, port);
+	if (!down)
+		return 0;
+
+	if (tb_route(parent)) {
+		struct tb_port *parent_up;
+		/*
+		 * Check first that the parent switch has its upstream USB3
+		 * port enabled. Otherwise the chain is not complete and
+		 * there is no point setting up a new tunnel.
+		 */
+		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
+		if (!parent_up || !tb_port_is_enabled(parent_up))
+			return 0;
+	}
+
+	tunnel = tb_tunnel_alloc_usb3(tb, up, down);
+	if (!tunnel)
+		return -ENOMEM;
+
+	if (tb_tunnel_activate(tunnel)) {
+		tb_port_info(up,
+			     "USB3 tunnel activation failed, aborting\n");
+		tb_tunnel_free(tunnel);
+		return -EIO;
+	}
+
+	list_add_tail(&tunnel->list, &tcm->tunnel_list);
+	return 0;
+}
+
+static int tb_create_usb3_tunnels(struct tb_switch *sw)
+{
+	struct tb_port *port;
+	int ret;
+
+	if (tb_route(sw)) {
+		ret = tb_tunnel_usb3(sw->tb, sw);
+		if (ret)
+			return ret;
+	}
+
+	tb_switch_for_each_port(sw, port) {
+		if (!tb_port_has_remote(port))
+			continue;
+		ret = tb_create_usb3_tunnels(port->remote->sw);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static void tb_scan_port(struct tb_port *port);
 
 /**
@@ -257,6 +392,18 @@ static void tb_scan_port(struct tb_port *port)
 	if (tb_switch_lane_bonding_enable(sw))
 		tb_sw_warn(sw, "failed to enable lane bonding\n");
 
+	if (tb_enable_tmu(sw))
+		tb_sw_warn(sw, "failed to enable TMU\n");
+
+	/*
+	 * Create USB 3.x tunnels only when the switch is plugged to the
+	 * domain. This is because we scan the domain also during discovery
+	 * and want to discover existing USB 3.x tunnels before we create
+	 * any new.
+	 */
+	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
+		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
+
 	tb_scan_switch(sw);
 }
 
@@ -338,57 +485,18 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
 	}
 }
 
-/**
- * tb_find_port() - return the first port of @type on @sw or NULL
- * @sw: Switch to find the port from
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_port(struct tb_switch *sw,
-				    enum tb_port_type type)
-{
-	struct tb_port *port;
-
-	tb_switch_for_each_port(sw, port) {
-		if (port->config.type == type)
-			return port;
-	}
-
-	return NULL;
-}
-
-/**
- * tb_find_unused_port() - return the first inactive port on @sw
- * @sw: Switch to find the port on
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
-					   enum tb_port_type type)
-{
-	struct tb_port *port;
-
-	tb_switch_for_each_port(sw, port) {
-		if (tb_is_upstream_port(port))
-			continue;
-		if (port->config.type != type)
-			continue;
-		if (port->cap_adap)
-			continue;
-		if (tb_port_is_enabled(port))
-			continue;
-		return port;
-	}
-	return NULL;
-}
-
 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
 					 const struct tb_port *port)
 {
+	struct tb_port *down = NULL;
+
 	/*
 	 * To keep plugging devices consistently in the same PCIe
-	 * hierarchy, do mapping here for root switch downstream PCIe
-	 * ports.
+	 * hierarchy, do mapping here for switch downstream PCIe ports.
 	 */
-	if (!tb_route(sw)) {
+	if (tb_switch_is_usb4(sw)) {
+		down = usb4_switch_map_pcie_down(sw, port);
+	} else if (!tb_route(sw)) {
 		int phy_port = tb_phy_port_from_link(port->port);
 		int index;
 
@@ -409,12 +517,17 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
 		/* Validate the hard-coding */
 		if (WARN_ON(index > sw->config.max_port_number))
 			goto out;
-		if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
+
+		down = &sw->ports[index];
+	}
+
+	if (down) {
+		if (WARN_ON(!tb_port_is_pcie_down(down)))
 			goto out;
-		if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
+		if (WARN_ON(tb_pci_port_is_enabled(down)))
 			goto out;
 
-		return &sw->ports[index];
+		return down;
 	}
 
 out:
@@ -586,7 +699,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
 	struct tb_switch *parent_sw;
 	struct tb_tunnel *tunnel;
 
-	up = tb_find_port(sw, TB_TYPE_PCIE_UP);
+	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
 	if (!up)
 		return 0;
 
@@ -624,7 +737,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 
 	sw = tb_to_switch(xd->dev.parent);
 	dst_port = tb_port_at(xd->route, sw);
-	nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
+	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
 
 	mutex_lock(&tb->lock);
 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
@@ -719,6 +832,7 @@ static void tb_handle_hotplug(struct work_struct *work)
 			tb_sw_set_unplugged(port->remote->sw);
 			tb_free_invalid_tunnels(tb);
 			tb_remove_dp_resources(port->remote->sw);
+			tb_switch_tmu_disable(port->remote->sw);
 			tb_switch_lane_bonding_disable(port->remote->sw);
 			tb_switch_remove(port->remote->sw);
 			port->remote = NULL;
@@ -786,8 +900,7 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
 
 	route = tb_cfg_get_route(&pkg->header);
 
-	if (tb_cfg_error(tb->ctl, route, pkg->port,
-			 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
 			pkg->port);
 	}
@@ -866,10 +979,17 @@ static int tb_start(struct tb *tb)
 		return ret;
 	}
 
+	/* Enable TMU if it is off */
+	tb_switch_tmu_enable(tb->root_switch);
 	/* Full scan to discover devices added before the driver was loaded. */
 	tb_scan_switch(tb->root_switch);
 	/* Find out tunnels created by the boot firmware */
 	tb_discover_tunnels(tb->root_switch);
+	/*
+	 * If the boot firmware did not create USB 3.x tunnels create them
+	 * now for the whole topology.
+	 */
+	tb_create_usb3_tunnels(tb->root_switch);
 	/* Add DP IN resources for the root switch */
 	tb_add_dp_resources(tb->root_switch);
 	/* Make the discovered switches available to the userspace */
@@ -897,6 +1017,9 @@ static void tb_restore_children(struct tb_switch *sw)
 {
 	struct tb_port *port;
 
+	if (tb_enable_tmu(sw))
+		tb_sw_warn(sw, "failed to restore TMU configuration\n");
+
 	tb_switch_for_each_port(sw, port) {
 		if (!tb_port_has_remote(port))
 			continue;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index ec851f2..2eb2bcd 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -44,6 +44,39 @@ struct tb_switch_nvm {
 
 #define TB_SWITCH_KEY_SIZE		32
 #define TB_SWITCH_MAX_DEPTH		6
+#define USB4_SWITCH_MAX_DEPTH		5
+
+/**
+ * enum tb_switch_tmu_rate - TMU refresh rate
+ * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
+ * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
+ *			     transmission of the Delay Request TSNOS
+ *			     (Time Sync Notification Ordered Set) on a Link
+ * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
+ *			       transmission of the Delay Request TSNOS on
+ *			       a Link
+ */
+enum tb_switch_tmu_rate {
+	TB_SWITCH_TMU_RATE_OFF = 0,
+	TB_SWITCH_TMU_RATE_HIFI = 16,
+	TB_SWITCH_TMU_RATE_NORMAL = 1000,
+};
+
+/**
+ * struct tb_switch_tmu - Structure holding switch TMU configuration
+ * @cap: Offset to the TMU capability (%0 if not found)
+ * @has_ucap: Does the switch support uni-directional mode
+ * @rate: TMU refresh rate related to upstream switch. In case of root
+ *	  switch this holds the domain rate.
+ * @unidirectional: Is the TMU in uni-directional or bi-directional mode
+ *		    related to upstream switch. Don't case for root switch.
+ */
+struct tb_switch_tmu {
+	int cap;
+	bool has_ucap;
+	enum tb_switch_tmu_rate rate;
+	bool unidirectional;
+};
 
 /**
  * struct tb_switch - a thunderbolt switch
@@ -54,6 +87,7 @@ struct tb_switch_nvm {
  *	      mailbox this will hold the pointer to that (%NULL
  *	      otherwise). If set it also means the switch has
  *	      upgradeable NVM.
+ * @tmu: The switch TMU configuration
  * @tb: Pointer to the domain the switch belongs to
  * @uid: Unique ID of the switch
  * @uuid: UUID of the switch (or %NULL if not supported)
@@ -92,6 +126,7 @@ struct tb_switch {
 	struct tb_regs_switch_header config;
 	struct tb_port *ports;
 	struct tb_dma_port *dma_port;
+	struct tb_switch_tmu tmu;
 	struct tb *tb;
 	u64 uid;
 	uuid_t *uuid;
@@ -128,7 +163,9 @@ struct tb_switch {
  * @remote: Remote port (%NULL if not connected)
  * @xdomain: Remote host (%NULL if not connected)
  * @cap_phy: Offset, zero if not found
+ * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
  * @cap_adap: Offset of the adapter specific capability (%0 if not present)
+ * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
  * @port: Port number on switch
  * @disabled: Disabled by eeprom
  * @bonded: true if the port is bonded (two lanes combined as one)
@@ -145,7 +182,9 @@ struct tb_port {
 	struct tb_port *remote;
 	struct tb_xdomain *xdomain;
 	int cap_phy;
+	int cap_tmu;
 	int cap_adap;
+	int cap_usb4;
 	u8 port;
 	bool disabled;
 	bool bonded;
@@ -393,6 +432,16 @@ static inline bool tb_port_is_dpout(const struct tb_port *port)
 	return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
 }
 
+static inline bool tb_port_is_usb3_down(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_USB3_DOWN;
+}
+
+static inline bool tb_port_is_usb3_up(const struct tb_port *port)
+{
+	return port && port->config.type == TB_TYPE_USB3_UP;
+}
+
 static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
 			     enum tb_cfg_space space, u32 offset, u32 length)
 {
@@ -533,6 +582,8 @@ void tb_switch_suspend(struct tb_switch *sw);
 int tb_switch_resume(struct tb_switch *sw);
 int tb_switch_reset(struct tb *tb, u64 route);
 void tb_sw_set_unplugged(struct tb_switch *sw);
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+				    enum tb_port_type type);
 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
 					       u8 depth);
 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
@@ -636,6 +687,17 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
 }
 
 /**
+ * tb_switch_is_usb4() - Is the switch USB4 compliant
+ * @sw: Switch to check
+ *
+ * Returns true if the @sw is USB4 compliant router, false otherwise.
+ */
+static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
+{
+	return sw->config.thunderbolt_version == USB4_VERSION_1_0;
+}
+
+/**
  * tb_switch_is_icm() - Is the switch handled by ICM firmware
  * @sw: Switch to check
  *
@@ -656,10 +718,22 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
 
+int tb_switch_tmu_init(struct tb_switch *sw);
+int tb_switch_tmu_post_time(struct tb_switch *sw);
+int tb_switch_tmu_disable(struct tb_switch *sw);
+int tb_switch_tmu_enable(struct tb_switch *sw);
+
+static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
+{
+	return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
+	       !sw->tmu.unidirectional;
+}
+
 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
 int tb_port_add_nfc_credits(struct tb_port *port, int credits);
 int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
 int tb_port_clear_counter(struct tb_port *port, int counter);
+int tb_port_unlock(struct tb_port *port);
 int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
 void tb_port_release_in_hopid(struct tb_port *port, int hopid);
 int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
@@ -668,9 +742,13 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
 				     struct tb_port *prev);
 
 int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
 int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
 bool tb_port_is_enabled(struct tb_port *port);
 
+bool tb_usb3_port_is_enabled(struct tb_port *port);
+int tb_usb3_port_enable(struct tb_port *port, bool enable);
+
 bool tb_pci_port_is_enabled(struct tb_port *port);
 int tb_pci_port_enable(struct tb_port *port, bool enable);
 
@@ -734,4 +812,27 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
 						 u8 depth);
 
+int usb4_switch_setup(struct tb_switch *sw);
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+			  size_t size);
+int usb4_switch_configure_link(struct tb_switch *sw);
+void usb4_switch_unconfigure_link(struct tb_switch *sw);
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+int usb4_switch_set_sleep(struct tb_switch *sw);
+int usb4_switch_nvm_sector_size(struct tb_switch *sw);
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+			 size_t size);
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+			  const void *buf, size_t size);
+int usb4_switch_nvm_authenticate(struct tb_switch *sw);
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+					  const struct tb_port *port);
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+					  const struct tb_port *port);
+
+int usb4_port_unlock(struct tb_port *port);
 #endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 3705057..fc208c5 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -67,9 +67,13 @@ struct cfg_error_pkg {
 	u32 zero1:4;
 	u32 port:6;
 	u32 zero2:2; /* Both should be zero, still they are different fields. */
-	u32 zero3:16;
+	u32 zero3:14;
+	u32 pg:2;
 } __packed;
 
+#define TB_CFG_ERROR_PG_HOT_PLUG	0x2
+#define TB_CFG_ERROR_PG_HOT_UNPLUG	0x3
+
 /* TB_CFG_PKG_EVENT */
 struct cfg_event_pkg {
 	struct tb_cfg_header header;
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 7ee45b7..c29c507 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -26,6 +26,7 @@
 #define TB_MAX_CONFIG_RW_LENGTH 60
 
 enum tb_switch_cap {
+	TB_SWITCH_CAP_TMU		= 0x03,
 	TB_SWITCH_CAP_VSE		= 0x05,
 };
 
@@ -41,6 +42,7 @@ enum tb_port_cap {
 	TB_PORT_CAP_TIME1		= 0x03,
 	TB_PORT_CAP_ADAP		= 0x04,
 	TB_PORT_CAP_VSE			= 0x05,
+	TB_PORT_CAP_USB4		= 0x06,
 };
 
 enum tb_port_state {
@@ -164,10 +166,52 @@ struct tb_regs_switch_header {
 				  * milliseconds. Writing 0x00 is interpreted
 				  * as 255ms.
 				  */
-	u32 __unknown4:16;
+	u32 cmuv:8;
+	u32 __unknown4:8;
 	u32 thunderbolt_version:8;
 } __packed;
 
+/* USB4 version 1.0 */
+#define USB4_VERSION_1_0			0x20
+
+#define ROUTER_CS_1				0x01
+#define ROUTER_CS_4				0x04
+#define ROUTER_CS_5				0x05
+#define ROUTER_CS_5_SLP				BIT(0)
+#define ROUTER_CS_5_C3S				BIT(23)
+#define ROUTER_CS_5_PTO				BIT(24)
+#define ROUTER_CS_5_UTO				BIT(25)
+#define ROUTER_CS_5_HCO				BIT(26)
+#define ROUTER_CS_5_CV				BIT(31)
+#define ROUTER_CS_6				0x06
+#define ROUTER_CS_6_SLPR			BIT(0)
+#define ROUTER_CS_6_TNS				BIT(1)
+#define ROUTER_CS_6_HCI				BIT(18)
+#define ROUTER_CS_6_CR				BIT(25)
+#define ROUTER_CS_7				0x07
+#define ROUTER_CS_9				0x09
+#define ROUTER_CS_25				0x19
+#define ROUTER_CS_26				0x1a
+#define ROUTER_CS_26_STATUS_MASK		GENMASK(29, 24)
+#define ROUTER_CS_26_STATUS_SHIFT		24
+#define ROUTER_CS_26_ONS			BIT(30)
+#define ROUTER_CS_26_OV				BIT(31)
+
+/* Router TMU configuration */
+#define TMU_RTR_CS_0				0x00
+#define TMU_RTR_CS_0_TD				BIT(27)
+#define TMU_RTR_CS_0_UCAP			BIT(30)
+#define TMU_RTR_CS_1				0x01
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK		GENMASK(31, 16)
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT	16
+#define TMU_RTR_CS_2				0x02
+#define TMU_RTR_CS_3				0x03
+#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK		GENMASK(15, 0)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK	GENMASK(31, 16)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT	16
+#define TMU_RTR_CS_22				0x16
+#define TMU_RTR_CS_24				0x18
+
 enum tb_port_type {
 	TB_TYPE_INACTIVE	= 0x000000,
 	TB_TYPE_PORT		= 0x000001,
@@ -178,7 +222,8 @@ enum tb_port_type {
 	TB_TYPE_DP_HDMI_OUT	= 0x0e0102,
 	TB_TYPE_PCIE_DOWN	= 0x100101,
 	TB_TYPE_PCIE_UP		= 0x100102,
-	/* TB_TYPE_USB		= 0x200000, lower order bits are not known */
+	TB_TYPE_USB3_DOWN	= 0x200101,
+	TB_TYPE_USB3_UP		= 0x200102,
 };
 
 /* Present on every port in TB_CF_PORT at address zero. */
@@ -216,10 +261,15 @@ struct tb_regs_port_header {
 #define ADP_CS_4_NFC_BUFFERS_MASK		GENMASK(9, 0)
 #define ADP_CS_4_TOTAL_BUFFERS_MASK		GENMASK(29, 20)
 #define ADP_CS_4_TOTAL_BUFFERS_SHIFT		20
+#define ADP_CS_4_LCK				BIT(31)
 #define ADP_CS_5				0x05
 #define ADP_CS_5_LCA_MASK			GENMASK(28, 22)
 #define ADP_CS_5_LCA_SHIFT			22
 
+/* TMU adapter registers */
+#define TMU_ADP_CS_3				0x03
+#define TMU_ADP_CS_3_UDM			BIT(29)
+
 /* Lane adapter registers */
 #define LANE_ADP_CS_0				0x00
 #define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK	GENMASK(25, 20)
@@ -237,6 +287,12 @@ struct tb_regs_port_header {
 #define LANE_ADP_CS_1_CURRENT_WIDTH_MASK	GENMASK(25, 20)
 #define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT	20
 
+/* USB4 port registers */
+#define PORT_CS_18				0x12
+#define PORT_CS_18_BE				BIT(8)
+#define PORT_CS_19				0x13
+#define PORT_CS_19_PC				BIT(3)
+
 /* Display Port adapter registers */
 #define ADP_DP_CS_0				0x00
 #define ADP_DP_CS_0_VIDEO_HOPID_MASK		GENMASK(26, 16)
@@ -277,6 +333,11 @@ struct tb_regs_port_header {
 #define ADP_PCIE_CS_0				0x00
 #define ADP_PCIE_CS_0_PE			BIT(31)
 
+/* USB adapter registers */
+#define ADP_USB3_CS_0				0x00
+#define ADP_USB3_CS_0_V				BIT(30)
+#define ADP_USB3_CS_0_PE			BIT(31)
+
 /* Hop register from TB_CFG_HOPS. 8 byte per entry. */
 struct tb_regs_hop {
 	/* DWORD 0 */
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
new file mode 100644
index 0000000..039c42a
--- /dev/null
+++ b/drivers/thunderbolt/tmu.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Time Management Unit (TMU) support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *	    Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+
+#include "tb.h"
+
+static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
+{
+	bool root_switch = !tb_route(sw);
+
+	switch (sw->tmu.rate) {
+	case TB_SWITCH_TMU_RATE_OFF:
+		return "off";
+
+	case TB_SWITCH_TMU_RATE_HIFI:
+		/* Root switch does not have upstream directionality */
+		if (root_switch)
+			return "HiFi";
+		if (sw->tmu.unidirectional)
+			return "uni-directional, HiFi";
+		return "bi-directional, HiFi";
+
+	case TB_SWITCH_TMU_RATE_NORMAL:
+		if (root_switch)
+			return "normal";
+		return "uni-directional, normal";
+
+	default:
+		return "unknown";
+	}
+}
+
+static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->tmu.cap + TMU_RTR_CS_0, 1);
+	if (ret)
+		return false;
+
+	return !!(val & TMU_RTR_CS_0_UCAP);
+}
+
+static int tb_switch_tmu_rate_read(struct tb_switch *sw)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->tmu.cap + TMU_RTR_CS_3, 1);
+	if (ret)
+		return ret;
+
+	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+	return val;
+}
+
+static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->tmu.cap + TMU_RTR_CS_3, 1);
+	if (ret)
+		return ret;
+
+	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
+	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+
+	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+			   sw->tmu.cap + TMU_RTR_CS_3, 1);
+}
+
+static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
+			     u32 value)
+{
+	u32 data;
+	int ret;
+
+	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
+	if (ret)
+		return ret;
+
+	data &= ~mask;
+	data |= value;
+
+	return tb_port_write(port, &data, TB_CFG_PORT,
+			     port->cap_tmu + offset, 1);
+}
+
+static int tb_port_tmu_set_unidirectional(struct tb_port *port,
+					  bool unidirectional)
+{
+	u32 val;
+
+	if (!port->sw->tmu.has_ucap)
+		return 0;
+
+	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
+	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
+}
+
+static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
+{
+	return tb_port_tmu_set_unidirectional(port, false);
+}
+
+static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_tmu + TMU_ADP_CS_3, 1);
+	if (ret)
+		return false;
+
+	return val & TMU_ADP_CS_3_UDM;
+}
+
+static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+			 sw->tmu.cap + TMU_RTR_CS_0, 1);
+	if (ret)
+		return ret;
+
+	if (set)
+		val |= TMU_RTR_CS_0_TD;
+	else
+		val &= ~TMU_RTR_CS_0_TD;
+
+	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+			   sw->tmu.cap + TMU_RTR_CS_0, 1);
+}
+
+/**
+ * tb_switch_tmu_init() - Initialize switch TMU structures
+ * @sw: Switch to initialized
+ *
+ * This function must be called before other TMU related functions to
+ * makes the internal structures are filled in correctly. Does not
+ * change any hardware configuration.
+ */
+int tb_switch_tmu_init(struct tb_switch *sw)
+{
+	struct tb_port *port;
+	int ret;
+
+	if (tb_switch_is_icm(sw))
+		return 0;
+
+	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
+	if (ret > 0)
+		sw->tmu.cap = ret;
+
+	tb_switch_for_each_port(sw, port) {
+		int cap;
+
+		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
+		if (cap > 0)
+			port->cap_tmu = cap;
+	}
+
+	ret = tb_switch_tmu_rate_read(sw);
+	if (ret < 0)
+		return ret;
+
+	sw->tmu.rate = ret;
+
+	sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
+	if (sw->tmu.has_ucap) {
+		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
+
+		if (tb_route(sw)) {
+			struct tb_port *up = tb_upstream_port(sw);
+
+			sw->tmu.unidirectional =
+				tb_port_tmu_is_unidirectional(up);
+		}
+	} else {
+		sw->tmu.unidirectional = false;
+	}
+
+	tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
+	return 0;
+}
+
+/**
+ * tb_switch_tmu_post_time() - Update switch local time
+ * @sw: Switch whose time to update
+ *
+ * Updates switch local time using time posting procedure.
+ */
+int tb_switch_tmu_post_time(struct tb_switch *sw)
+{
+	unsigned int  post_local_time_offset, post_time_offset;
+	struct tb_switch *root_switch = sw->tb->root_switch;
+	u64 hi, mid, lo, local_time, post_time;
+	int i, ret, retries = 100;
+	u32 gm_local_time[3];
+
+	if (!tb_route(sw))
+		return 0;
+
+	if (!tb_switch_is_usb4(sw))
+		return 0;
+
+	/* Need to be able to read the grand master time */
+	if (!root_switch->tmu.cap)
+		return 0;
+
+	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
+			 root_switch->tmu.cap + TMU_RTR_CS_1,
+			 ARRAY_SIZE(gm_local_time));
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
+		tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
+			  gm_local_time[i]);
+
+	/* Convert to nanoseconds (drop fractional part) */
+	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
+	mid = gm_local_time[1];
+	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
+		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
+	local_time = hi << 48 | mid << 16 | lo;
+
+	/* Tell the switch that time sync is disrupted for a while */
+	ret = tb_switch_tmu_set_time_disruption(sw, true);
+	if (ret)
+		return ret;
+
+	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
+	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
+
+	/*
+	 * Write the Grandmaster time to the Post Local Time registers
+	 * of the new switch.
+	 */
+	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
+			  post_local_time_offset, 2);
+	if (ret)
+		goto out;
+
+	/*
+	 * Have the new switch update its local time (by writing 1 to
+	 * the post_time registers) and wait for the completion of the
+	 * same (post_time register becomes 0). This means the time has
+	 * been converged properly.
+	 */
+	post_time = 1;
+
+	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
+	if (ret)
+		goto out;
+
+	do {
+		usleep_range(5, 10);
+		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
+				 post_time_offset, 2);
+		if (ret)
+			goto out;
+	} while (--retries && post_time);
+
+	if (!retries) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
+
+out:
+	tb_switch_tmu_set_time_disruption(sw, false);
+	return ret;
+}
+
+/**
+ * tb_switch_tmu_disable() - Disable TMU of a switch
+ * @sw: Switch whose TMU to disable
+ *
+ * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
+ */
+int tb_switch_tmu_disable(struct tb_switch *sw)
+{
+	int ret;
+
+	if (!tb_switch_is_usb4(sw))
+		return 0;
+
+	/* Already disabled? */
+	if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
+		return 0;
+
+	if (sw->tmu.unidirectional) {
+		struct tb_switch *parent = tb_switch_parent(sw);
+		struct tb_port *up, *down;
+
+		up = tb_upstream_port(sw);
+		down = tb_port_at(tb_route(sw), parent);
+
+		/* The switch may be unplugged so ignore any errors */
+		tb_port_tmu_unidirectional_disable(up);
+		ret = tb_port_tmu_unidirectional_disable(down);
+		if (ret)
+			return ret;
+	}
+
+	tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+	sw->tmu.unidirectional = false;
+	sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
+
+	tb_sw_dbg(sw, "TMU: disabled\n");
+	return 0;
+}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a switch
+ * @sw: Switch whose TMU to enable
+ *
+ * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode
+ * all tunneling should work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
+{
+	int ret;
+
+	if (!tb_switch_is_usb4(sw))
+		return 0;
+
+	if (tb_switch_tmu_is_enabled(sw))
+		return 0;
+
+	ret = tb_switch_tmu_set_time_disruption(sw, true);
+	if (ret)
+		return ret;
+
+	/* Change mode to bi-directional */
+	if (tb_route(sw) && sw->tmu.unidirectional) {
+		struct tb_switch *parent = tb_switch_parent(sw);
+		struct tb_port *up, *down;
+
+		up = tb_upstream_port(sw);
+		down = tb_port_at(tb_route(sw), parent);
+
+		ret = tb_port_tmu_unidirectional_disable(down);
+		if (ret)
+			return ret;
+
+		ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+		if (ret)
+			return ret;
+
+		ret = tb_port_tmu_unidirectional_disable(up);
+		if (ret)
+			return ret;
+	} else {
+		ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+		if (ret)
+			return ret;
+	}
+
+	sw->tmu.unidirectional = false;
+	sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
+	tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
+
+	return tb_switch_tmu_set_time_disruption(sw, false);
+}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 0d3463c..dbe90bc 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -19,6 +19,12 @@
 #define TB_PCI_PATH_DOWN		0
 #define TB_PCI_PATH_UP			1
 
+/* USB3 adapters use always HopID of 8 for both directions */
+#define TB_USB3_HOPID			8
+
+#define TB_USB3_PATH_DOWN		0
+#define TB_USB3_PATH_UP			1
+
 /* DP adapters use HopID 8 for AUX and 9 for Video */
 #define TB_DP_AUX_TX_HOPID		8
 #define TB_DP_AUX_RX_HOPID		8
@@ -31,7 +37,7 @@
 #define TB_DMA_PATH_OUT			0
 #define TB_DMA_PATH_IN			1
 
-static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
+static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
 
 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
 	do {                                                            \
@@ -243,6 +249,12 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
 	return tunnel;
 }
 
+static bool tb_dp_is_usb4(const struct tb_switch *sw)
+{
+	/* Titan Ridge DP adapters need the same treatment as USB4 */
+	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
 {
 	int timeout = 10;
@@ -250,8 +262,7 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
 	int ret;
 
 	/* Both ends need to support this */
-	if (!tb_switch_is_titan_ridge(in->sw) ||
-	    !tb_switch_is_titan_ridge(out->sw))
+	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
 		return 0;
 
 	ret = tb_port_read(out, &val, TB_CFG_PORT,
@@ -531,7 +542,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
 	u32 val, rate = 0, lanes = 0;
 	int ret;
 
-	if (tb_switch_is_titan_ridge(sw)) {
+	if (tb_dp_is_usb4(sw)) {
 		int timeout = 10;
 
 		/*
@@ -843,6 +854,156 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
 	return tunnel;
 }
 
+static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
+{
+	int res;
+
+	res = tb_usb3_port_enable(tunnel->src_port, activate);
+	if (res)
+		return res;
+
+	if (tb_port_is_usb3_up(tunnel->dst_port))
+		return tb_usb3_port_enable(tunnel->dst_port, activate);
+
+	return 0;
+}
+
+static void tb_usb3_init_path(struct tb_path *path)
+{
+	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+	path->egress_shared_buffer = TB_PATH_NONE;
+	path->ingress_fc_enable = TB_PATH_ALL;
+	path->ingress_shared_buffer = TB_PATH_NONE;
+	path->priority = 3;
+	path->weight = 3;
+	path->drop_packages = 0;
+	path->nfc_credits = 0;
+	path->hops[0].initial_credits = 7;
+	path->hops[1].initial_credits =
+		tb_initial_credits(path->hops[1].in_port->sw);
+}
+
+/**
+ * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
+ * @tb: Pointer to the domain structure
+ * @down: USB3 downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the USB3 upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_path *path;
+
+	if (!tb_usb3_port_is_enabled(down))
+		return NULL;
+
+	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->activate = tb_usb3_activate;
+	tunnel->src_port = down;
+
+	/*
+	 * Discover both paths even if they are not complete. We will
+	 * clean them up by calling tb_tunnel_deactivate() below in that
+	 * case.
+	 */
+	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
+				&tunnel->dst_port, "USB3 Up");
+	if (!path) {
+		/* Just disable the downstream port */
+		tb_usb3_port_enable(down, false);
+		goto err_free;
+	}
+	tunnel->paths[TB_USB3_PATH_UP] = path;
+	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
+
+	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
+				"USB3 Down");
+	if (!path)
+		goto err_deactivate;
+	tunnel->paths[TB_USB3_PATH_DOWN] = path;
+	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
+
+	/* Validate that the tunnel is complete */
+	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
+		tb_port_warn(tunnel->dst_port,
+			     "path does not end on an USB3 adapter, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	if (down != tunnel->src_port) {
+		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
+		tb_tunnel_warn(tunnel,
+			       "tunnel is not fully activated, cleaning up\n");
+		goto err_deactivate;
+	}
+
+	tb_tunnel_dbg(tunnel, "discovered\n");
+	return tunnel;
+
+err_deactivate:
+	tb_tunnel_deactivate(tunnel);
+err_free:
+	tb_tunnel_free(tunnel);
+
+	return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
+ * @tb: Pointer to the domain structure
+ * @up: USB3 upstream adapter port
+ * @down: USB3 downstream adapter port
+ *
+ * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
+ * @TB_TYPE_USB3_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or %NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+				       struct tb_port *down)
+{
+	struct tb_tunnel *tunnel;
+	struct tb_path *path;
+
+	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+	if (!tunnel)
+		return NULL;
+
+	tunnel->activate = tb_usb3_activate;
+	tunnel->src_port = down;
+	tunnel->dst_port = up;
+
+	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
+			     "USB3 Down");
+	if (!path) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+	tb_usb3_init_path(path);
+	tunnel->paths[TB_USB3_PATH_DOWN] = path;
+
+	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
+			     "USB3 Up");
+	if (!path) {
+		tb_tunnel_free(tunnel);
+		return NULL;
+	}
+	tb_usb3_init_path(path);
+	tunnel->paths[TB_USB3_PATH_UP] = path;
+
+	return tunnel;
+}
+
 /**
  * tb_tunnel_free() - free a tunnel
  * @tunnel: Tunnel to be freed
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index ba888da..3f5ba93 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -15,6 +15,7 @@ enum tb_tunnel_type {
 	TB_TUNNEL_PCI,
 	TB_TUNNEL_DP,
 	TB_TUNNEL_DMA,
+	TB_TUNNEL_USB3,
 };
 
 /**
@@ -57,6 +58,9 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
 				      struct tb_port *dst, int transmit_ring,
 				      int transmit_path, int receive_ring,
 				      int receive_path);
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+				       struct tb_port *down);
 
 void tb_tunnel_free(struct tb_tunnel *tunnel);
 int tb_tunnel_activate(struct tb_tunnel *tunnel);
@@ -82,5 +86,10 @@ static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
 	return tunnel->type == TB_TUNNEL_DMA;
 }
 
+static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
+{
+	return tunnel->type == TB_TUNNEL_USB3;
+}
+
 #endif
 
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
new file mode 100644
index 0000000..b341fc6
--- /dev/null
+++ b/drivers/thunderbolt/usb4.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB4 specific functionality
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *	    Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ktime.h>
+
+#include "tb.h"
+
+#define USB4_DATA_DWORDS		16
+#define USB4_DATA_RETRIES		3
+
+enum usb4_switch_op {
+	USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
+	USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
+	USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
+	USB4_SWITCH_OP_NVM_WRITE = 0x20,
+	USB4_SWITCH_OP_NVM_AUTH = 0x21,
+	USB4_SWITCH_OP_NVM_READ = 0x22,
+	USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
+	USB4_SWITCH_OP_DROM_READ = 0x24,
+	USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
+};
+
+#define USB4_NVM_READ_OFFSET_MASK	GENMASK(23, 2)
+#define USB4_NVM_READ_OFFSET_SHIFT	2
+#define USB4_NVM_READ_LENGTH_MASK	GENMASK(27, 24)
+#define USB4_NVM_READ_LENGTH_SHIFT	24
+
+#define USB4_NVM_SET_OFFSET_MASK	USB4_NVM_READ_OFFSET_MASK
+#define USB4_NVM_SET_OFFSET_SHIFT	USB4_NVM_READ_OFFSET_SHIFT
+
+#define USB4_DROM_ADDRESS_MASK		GENMASK(14, 2)
+#define USB4_DROM_ADDRESS_SHIFT		2
+#define USB4_DROM_SIZE_MASK		GENMASK(19, 15)
+#define USB4_DROM_SIZE_SHIFT		15
+
+#define USB4_NVM_SECTOR_SIZE_MASK	GENMASK(23, 0)
+
+typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t);
+typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t);
+
+static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+				    u32 value, int timeout_msec)
+{
+	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+	do {
+		u32 val;
+		int ret;
+
+		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+		if (ret)
+			return ret;
+
+		if ((val & bit) == value)
+			return 0;
+
+		usleep_range(50, 100);
+	} while (ktime_before(ktime_get(), timeout));
+
+	return -ETIMEDOUT;
+}
+
+static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
+				    size_t dwords)
+{
+	if (dwords > USB4_DATA_DWORDS)
+		return -EINVAL;
+
+	return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
+				     size_t dwords)
+{
+	if (dwords > USB4_DATA_DWORDS)
+		return -EINVAL;
+
+	return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
+{
+	return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
+{
+	return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address,
+	void *buf, size_t size, read_block_fn read_block)
+{
+	unsigned int retries = USB4_DATA_RETRIES;
+	unsigned int offset;
+
+	offset = address & 3;
+	address = address & ~3;
+
+	do {
+		size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
+		unsigned int dwaddress, dwords;
+		u8 data[USB4_DATA_DWORDS * 4];
+		int ret;
+
+		dwaddress = address / 4;
+		dwords = ALIGN(nbytes, 4) / 4;
+
+		ret = read_block(sw, dwaddress, data, dwords);
+		if (ret) {
+			if (ret == -ETIMEDOUT) {
+				if (retries--)
+					continue;
+				ret = -EIO;
+			}
+			return ret;
+		}
+
+		memcpy(buf, data + offset, nbytes);
+
+		size -= nbytes;
+		address += nbytes;
+		buf += nbytes;
+	} while (size > 0);
+
+	return 0;
+}
+
+static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address,
+	const void *buf, size_t size, write_block_fn write_next_block)
+{
+	unsigned int retries = USB4_DATA_RETRIES;
+	unsigned int offset;
+
+	offset = address & 3;
+	address = address & ~3;
+
+	do {
+		u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
+		u8 data[USB4_DATA_DWORDS * 4];
+		int ret;
+
+		memcpy(data + offset, buf, nbytes);
+
+		ret = write_next_block(sw, data, nbytes / 4);
+		if (ret) {
+			if (ret == -ETIMEDOUT) {
+				if (retries--)
+					continue;
+				ret = -EIO;
+			}
+			return ret;
+		}
+
+		size -= nbytes;
+		address += nbytes;
+		buf += nbytes;
+	} while (size > 0);
+
+	return 0;
+}
+
+static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
+{
+	u32 val;
+	int ret;
+
+	val = opcode | ROUTER_CS_26_OV;
+	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
+	if (ret)
+		return ret;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+	if (val & ROUTER_CS_26_ONS)
+		return -EOPNOTSUPP;
+
+	*status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
+	return 0;
+}
+
+/**
+ * usb4_switch_setup() - Additional setup for USB4 device
+ * @sw: USB4 router to setup
+ *
+ * USB4 routers need additional settings in order to enable all the
+ * tunneling. This function enables USB and PCIe tunneling if it can be
+ * enabled (e.g the parent switch also supports them). If USB tunneling
+ * is not available for some reason (like that there is Thunderbolt 3
+ * switch upstream) then the internal xHCI controller is enabled
+ * instead.
+ */
+int usb4_switch_setup(struct tb_switch *sw)
+{
+	struct tb_switch *parent;
+	bool tbt3, xhci;
+	u32 val = 0;
+	int ret;
+
+	if (!tb_route(sw))
+		return 0;
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
+	if (ret)
+		return ret;
+
+	xhci = val & ROUTER_CS_6_HCI;
+	tbt3 = !(val & ROUTER_CS_6_TNS);
+
+	tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
+		  tbt3 ? "yes" : "no", xhci ? "yes" : "no");
+
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+	if (ret)
+		return ret;
+
+	parent = tb_switch_parent(sw);
+
+	if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
+		val |= ROUTER_CS_5_UTO;
+		xhci = false;
+	}
+
+	/* Only enable PCIe tunneling if the parent router supports it */
+	if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
+		val |= ROUTER_CS_5_PTO;
+		/*
+		 * xHCI can be enabled if PCIe tunneling is supported
+		 * and the parent does not have any USB3 dowstream
+		 * adapters (so we cannot do USB 3.x tunneling).
+		 */
+		if (xhci)
+			val |= ROUTER_CS_5_HCO;
+	}
+
+	/* TBT3 supported by the CM */
+	val |= ROUTER_CS_5_C3S;
+	/* Tunneling configuration is ready now */
+	val |= ROUTER_CS_5_CV;
+
+	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+	if (ret)
+		return ret;
+
+	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
+					ROUTER_CS_6_CR, 50);
+}
+
+/**
+ * usb4_switch_read_uid() - Read UID from USB4 router
+ * @sw: USB4 router
+ *
+ * Reads 64-bit UID from USB4 router config space.
+ */
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
+{
+	return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
+}
+
+static int usb4_switch_drom_read_block(struct tb_switch *sw,
+				       unsigned int dwaddress, void *buf,
+				       size_t dwords)
+{
+	u8 status = 0;
+	u32 metadata;
+	int ret;
+
+	metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
+	metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
+		USB4_DROM_ADDRESS_MASK;
+
+	ret = usb4_switch_op_write_metadata(sw, metadata);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
+	if (ret)
+		return ret;
+
+	if (status)
+		return -EIO;
+
+	return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
+ * @sw: USB4 router
+ *
+ * Uses USB4 router operations to read router DROM. For devices this
+ * should always work but for hosts it may return %-EOPNOTSUPP in which
+ * case the host router does not have DROM.
+ */
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+			  size_t size)
+{
+	return usb4_switch_do_read_data(sw, address, buf, size,
+					usb4_switch_drom_read_block);
+}
+
+static int usb4_set_port_configured(struct tb_port *port, bool configured)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT,
+			   port->cap_usb4 + PORT_CS_19, 1);
+	if (ret)
+		return ret;
+
+	if (configured)
+		val |= PORT_CS_19_PC;
+	else
+		val &= ~PORT_CS_19_PC;
+
+	return tb_port_write(port, &val, TB_CFG_PORT,
+			     port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_switch_configure_link() - Set upstream USB4 link configured
+ * @sw: USB4 router
+ *
+ * Sets the upstream USB4 link to be configured for power management
+ * purposes.
+ */
+int usb4_switch_configure_link(struct tb_switch *sw)
+{
+	struct tb_port *up;
+
+	if (!tb_route(sw))
+		return 0;
+
+	up = tb_upstream_port(sw);
+	return usb4_set_port_configured(up, true);
+}
+
+/**
+ * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
+ * @sw: USB4 router
+ *
+ * Reverse of usb4_switch_configure_link().
+ */
+void usb4_switch_unconfigure_link(struct tb_switch *sw)
+{
+	struct tb_port *up;
+
+	if (sw->is_unplugged || !tb_route(sw))
+		return;
+
+	up = tb_upstream_port(sw);
+	usb4_set_port_configured(up, false);
+}
+
+/**
+ * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
+ * @sw: USB4 router
+ *
+ * Checks whether conditions are met so that lane bonding can be
+ * established with the upstream router. Call only for device routers.
+ */
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+	struct tb_port *up;
+	int ret;
+	u32 val;
+
+	up = tb_upstream_port(sw);
+	ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
+	if (ret)
+		return false;
+
+	return !!(val & PORT_CS_18_BE);
+}
+
+/**
+ * usb4_switch_set_sleep() - Prepare the router to enter sleep
+ * @sw: USB4 router
+ *
+ * Enables wakes and sets sleep bit for the router. Returns when the
+ * router sleep ready bit has been asserted.
+ */
+int usb4_switch_set_sleep(struct tb_switch *sw)
+{
+	int ret;
+	u32 val;
+
+	/* Set sleep bit and wait for sleep ready to be asserted */
+	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+	if (ret)
+		return ret;
+
+	val |= ROUTER_CS_5_SLP;
+
+	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+	if (ret)
+		return ret;
+
+	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
+					ROUTER_CS_6_SLPR, 500);
+}
+
+/**
+ * usb4_switch_nvm_sector_size() - Return router NVM sector size
+ * @sw: USB4 router
+ *
+ * If the router supports NVM operations this function returns the NVM
+ * sector size in bytes. If NVM operations are not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_sector_size(struct tb_switch *sw)
+{
+	u32 metadata;
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
+	if (ret)
+		return ret;
+
+	if (status)
+		return status == 0x2 ? -EOPNOTSUPP : -EIO;
+
+	ret = usb4_switch_op_read_metadata(sw, &metadata);
+	if (ret)
+		return ret;
+
+	return metadata & USB4_NVM_SECTOR_SIZE_MASK;
+}
+
+static int usb4_switch_nvm_read_block(struct tb_switch *sw,
+	unsigned int dwaddress, void *buf, size_t dwords)
+{
+	u8 status = 0;
+	u32 metadata;
+	int ret;
+
+	metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
+		   USB4_NVM_READ_LENGTH_MASK;
+	metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
+		   USB4_NVM_READ_OFFSET_MASK;
+
+	ret = usb4_switch_op_write_metadata(sw, metadata);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
+	if (ret)
+		return ret;
+
+	if (status)
+		return -EIO;
+
+	return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
+ * @sw: USB4 router
+ * @address: Starting address in bytes
+ * @buf: Read data is placed here
+ * @size: How many bytes to read
+ *
+ * Reads NVM contents of the router. If NVM is not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+			 size_t size)
+{
+	return usb4_switch_do_read_data(sw, address, buf, size,
+					usb4_switch_nvm_read_block);
+}
+
+static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
+				      unsigned int address)
+{
+	u32 metadata, dwaddress;
+	u8 status = 0;
+	int ret;
+
+	dwaddress = address / 4;
+	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
+		   USB4_NVM_SET_OFFSET_MASK;
+
+	ret = usb4_switch_op_write_metadata(sw, metadata);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
+	if (ret)
+		return ret;
+
+	return status ? -EIO : 0;
+}
+
+static int usb4_switch_nvm_write_next_block(struct tb_switch *sw,
+					    const void *buf, size_t dwords)
+{
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op_write_data(sw, buf, dwords);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
+	if (ret)
+		return ret;
+
+	return status ? -EIO : 0;
+}
+
+/**
+ * usb4_switch_nvm_write() - Write to the router NVM
+ * @sw: USB4 router
+ * @address: Start address where to write in bytes
+ * @buf: Pointer to the data to write
+ * @size: Size of @buf in bytes
+ *
+ * Writes @buf to the router NVM using USB4 router operations. If NVM
+ * write is not supported returns %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+			  const void *buf, size_t size)
+{
+	int ret;
+
+	ret = usb4_switch_nvm_set_offset(sw, address);
+	if (ret)
+		return ret;
+
+	return usb4_switch_do_write_data(sw, address, buf, size,
+					 usb4_switch_nvm_write_next_block);
+}
+
+/**
+ * usb4_switch_nvm_authenticate() - Authenticate new NVM
+ * @sw: USB4 router
+ *
+ * After the new NVM has been written via usb4_switch_nvm_write(), this
+ * function triggers NVM authentication process. If the authentication
+ * is successful the router is power cycled and the new NVM starts
+ * running. In case of failure returns negative errno.
+ */
+int usb4_switch_nvm_authenticate(struct tb_switch *sw)
+{
+	u8 status = 0;
+	int ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
+	if (ret)
+		return ret;
+
+	switch (status) {
+	case 0x0:
+		tb_sw_dbg(sw, "NVM authentication successful\n");
+		return 0;
+	case 0x1:
+		return -EINVAL;
+	case 0x2:
+		return -EAGAIN;
+	case 0x3:
+		return -EOPNOTSUPP;
+	default:
+		return -EIO;
+	}
+}
+
+/**
+ * usb4_switch_query_dp_resource() - Query availability of DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * For DP tunneling this function can be used to query availability of
+ * DP IN resource. Returns true if the resource is available for DP
+ * tunneling, false otherwise.
+ */
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op_write_metadata(sw, in->port);
+	if (ret)
+		return false;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
+	/*
+	 * If DP resource allocation is not supported assume it is
+	 * always available.
+	 */
+	if (ret == -EOPNOTSUPP)
+		return true;
+	else if (ret)
+		return false;
+
+	return !status;
+}
+
+/**
+ * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Allocates DP IN resource for DP tunneling using USB4 router
+ * operations. If the resource was allocated returns %0. Otherwise
+ * returns negative errno, in particular %-EBUSY if the resource is
+ * already allocated.
+ */
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op_write_metadata(sw, in->port);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
+	if (ret == -EOPNOTSUPP)
+		return 0;
+	else if (ret)
+		return ret;
+
+	return status ? -EBUSY : 0;
+}
+
+/**
+ * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Releases the previously allocated DP IN resource.
+ */
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+	u8 status;
+	int ret;
+
+	ret = usb4_switch_op_write_metadata(sw, in->port);
+	if (ret)
+		return ret;
+
+	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
+	if (ret == -EOPNOTSUPP)
+		return 0;
+	else if (ret)
+		return ret;
+
+	return status ? -EIO : 0;
+}
+
+static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
+{
+	struct tb_port *p;
+	int usb4_idx = 0;
+
+	/* Assume port is primary */
+	tb_switch_for_each_port(sw, p) {
+		if (!tb_port_is_null(p))
+			continue;
+		if (tb_is_upstream_port(p))
+			continue;
+		if (!p->link_nr) {
+			if (p == port)
+				break;
+			usb4_idx++;
+		}
+	}
+
+	return usb4_idx;
+}
+
+/**
+ * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and PCIe
+ * downstream adapters where the PCIe topology is extended. This
+ * function returns the corresponding downstream PCIe adapter or %NULL
+ * if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+					  const struct tb_port *port)
+{
+	int usb4_idx = usb4_port_idx(sw, port);
+	struct tb_port *p;
+	int pcie_idx = 0;
+
+	/* Find PCIe down port matching usb4_port */
+	tb_switch_for_each_port(sw, p) {
+		if (!tb_port_is_pcie_down(p))
+			continue;
+
+		if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p))
+			return p;
+
+		pcie_idx++;
+	}
+
+	return NULL;
+}
+
+/**
+ * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and USB 3.x
+ * downstream adapters where the USB 3.x topology is extended. This
+ * function returns the corresponding downstream USB 3.x adapter or
+ * %NULL if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+					  const struct tb_port *port)
+{
+	int usb4_idx = usb4_port_idx(sw, port);
+	struct tb_port *p;
+	int usb_idx = 0;
+
+	/* Find USB3 down port matching usb4_port */
+	tb_switch_for_each_port(sw, p) {
+		if (!tb_port_is_usb3_down(p))
+			continue;
+
+		if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p))
+			return p;
+
+		usb_idx++;
+	}
+
+	return NULL;
+}
+
+/**
+ * usb4_port_unlock() - Unlock USB4 downstream port
+ * @port: USB4 port to unlock
+ *
+ * Unlocks USB4 downstream port so that the connection manager can
+ * access the router below this port.
+ */
+int usb4_port_unlock(struct tb_port *port)
+{
+	int ret;
+	u32 val;
+
+	ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+	if (ret)
+		return ret;
+
+	val &= ~ADP_CS_4_LCK;
+	return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 880d784..053f918 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1220,7 +1220,13 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
 				    u64 route, const uuid_t *local_uuid,
 				    const uuid_t *remote_uuid)
 {
+	struct tb_switch *parent_sw = tb_to_switch(parent);
 	struct tb_xdomain *xd;
+	struct tb_port *down;
+
+	/* Make sure the downstream domain is accessible */
+	down = tb_port_at(route, parent_sw);
+	tb_port_unlock(down);
 
 	xd = kzalloc(sizeof(*xd), GFP_KERNEL);
 	if (!xd)
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 98361ac..27b506bf 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -115,11 +115,9 @@
 struct n_hdlc_buf {
 	struct list_head  list_item;
 	int		  count;
-	char		  buf[1];
+	char		  buf[];
 };
 
-#define	N_HDLC_BUF_SIZE	(sizeof(struct n_hdlc_buf) + maxframe)
-
 struct n_hdlc_buf_list {
 	struct list_head  list;
 	int		  count;
@@ -524,7 +522,8 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
 		/* no buffers in free list, attempt to allocate another rx buffer */
 		/* unless the maximum count has been reached */
 		if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
-			buf = kmalloc(N_HDLC_BUF_SIZE, GFP_ATOMIC);
+			buf = kmalloc(struct_size(buf, buf, maxframe),
+				      GFP_ATOMIC);
 	}
 	
 	if (!buf) {
@@ -853,7 +852,7 @@ static struct n_hdlc *n_hdlc_alloc(void)
 
 	/* allocate free rx buffer list */
 	for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
-		buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+		buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
 		if (buf)
 			n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,buf);
 		else if (debuglevel >= DEBUG_LEVEL_INFO)	
@@ -862,7 +861,7 @@ static struct n_hdlc *n_hdlc_alloc(void)
 	
 	/* allocate free tx buffer list */
 	for(i=0;i<DEFAULT_TX_BUF_COUNT;i++) {
-		buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+		buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
 		if (buf)
 			n_hdlc_buf_put(&n_hdlc->tx_free_buf_list,buf);
 		else if (debuglevel >= DEBUG_LEVEL_INFO)	
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index ce5309d..42345e7 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -115,8 +115,8 @@ int serdev_device_add(struct serdev_device *serdev)
 
 	err = device_add(&serdev->dev);
 	if (err < 0) {
-		dev_err(&serdev->dev, "Can't add %s, status %d\n",
-			dev_name(&serdev->dev), err);
+		dev_err(&serdev->dev, "Can't add %s, status %pe\n",
+			dev_name(&serdev->dev), ERR_PTR(err));
 		goto err_clear_serdev;
 	}
 
@@ -540,7 +540,8 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
 		err = serdev_device_add(serdev);
 		if (err) {
 			dev_err(&serdev->dev,
-				"failure adding device. status %d\n", err);
+				"failure adding device. status %pe\n",
+				ERR_PTR(err));
 			serdev_device_put(serdev);
 		} else
 			found = true;
@@ -656,7 +657,8 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
 	err = serdev_device_add(serdev);
 	if (err) {
 		dev_err(&serdev->dev,
-			"failure adding ACPI serdev device. status %d\n", err);
+			"failure adding ACPI serdev device. status %pe\n",
+			ERR_PTR(err));
 		serdev_device_put(serdev);
 	}
 
@@ -741,8 +743,8 @@ int serdev_controller_add(struct serdev_controller *ctrl)
 	ret_of = of_serdev_register_devices(ctrl);
 	ret_acpi = acpi_serdev_register_devices(ctrl);
 	if (ret_of && ret_acpi) {
-		dev_dbg(&ctrl->dev, "no devices registered: of:%d acpi:%d\n",
-			ret_of, ret_acpi);
+		dev_dbg(&ctrl->dev, "no devices registered: of:%pe acpi:%pe\n",
+			ERR_PTR(ret_of), ERR_PTR(ret_acpi));
 		ret = -ENODEV;
 		goto err_rpm_disable;
 	}
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 32b3acf..718e010 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -41,8 +41,43 @@
 
 static const char serial21285_name[] = "Footbridge UART";
 
-#define tx_enabled(port)	((port)->unused[0])
-#define rx_enabled(port)	((port)->unused[1])
+/*
+ * We only need 2 bits of data, so instead of creating a whole structure for
+ * this, use bits of the private_data pointer of the uart port structure.
+ */
+#define tx_enabled_bit	0
+#define rx_enabled_bit	1
+
+static bool is_enabled(struct uart_port *port, int bit)
+{
+	unsigned long private_data = (unsigned long)port->private_data;
+
+	if (test_bit(bit, &private_data))
+		return true;
+	return false;
+}
+
+static void enable(struct uart_port *port, int bit)
+{
+	unsigned long private_data = (unsigned long)port->private_data;
+
+	set_bit(bit, &private_data);
+}
+
+static void disable(struct uart_port *port, int bit)
+{
+	unsigned long private_data = (unsigned long)port->private_data;
+
+	clear_bit(bit, &private_data);
+}
+
+#define is_tx_enabled(port)	is_enabled(port, tx_enabled_bit)
+#define tx_enable(port)		enable(port, tx_enabled_bit)
+#define tx_disable(port)	disable(port, tx_enabled_bit)
+
+#define is_rx_enabled(port)	is_enabled(port, rx_enabled_bit)
+#define rx_enable(port)		enable(port, rx_enabled_bit)
+#define rx_disable(port)	disable(port, rx_enabled_bit)
 
 /*
  * The documented expression for selecting the divisor is:
@@ -57,25 +92,25 @@ static const char serial21285_name[] = "Footbridge UART";
 
 static void serial21285_stop_tx(struct uart_port *port)
 {
-	if (tx_enabled(port)) {
+	if (is_tx_enabled(port)) {
 		disable_irq_nosync(IRQ_CONTX);
-		tx_enabled(port) = 0;
+		tx_disable(port);
 	}
 }
 
 static void serial21285_start_tx(struct uart_port *port)
 {
-	if (!tx_enabled(port)) {
+	if (!is_tx_enabled(port)) {
 		enable_irq(IRQ_CONTX);
-		tx_enabled(port) = 1;
+		tx_enable(port);
 	}
 }
 
 static void serial21285_stop_rx(struct uart_port *port)
 {
-	if (rx_enabled(port)) {
+	if (is_rx_enabled(port)) {
 		disable_irq_nosync(IRQ_CONRX);
-		rx_enabled(port) = 0;
+		rx_disable(port);
 	}
 }
 
@@ -185,8 +220,8 @@ static int serial21285_startup(struct uart_port *port)
 {
 	int ret;
 
-	tx_enabled(port) = 1;
-	rx_enabled(port) = 1;
+	tx_enable(port);
+	rx_enable(port);
 
 	ret = request_irq(IRQ_CONRX, serial21285_rx_chars, 0,
 			  serial21285_name, port);
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 6e67fd8..d657aa1 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -5,10 +5,6 @@
  *    Copyright (C) 2016 Jeremy Kerr <jk@ozlabs.org>, IBM Corp.
  *    Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
  */
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
@@ -406,6 +402,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
 	port.port.unthrottle = aspeed_vuart_unthrottle;
 	port.port.status = UPSTAT_SYNC_FIFO;
 	port.port.dev = &pdev->dev;
+	port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
 
 	rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
 	if (rc < 0)
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 8ce700c..e70e3cc3 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -16,14 +16,19 @@
 
 #include "8250.h"
 
+/**
+ * struct bcm2835aux_data - driver private data of BCM2835 auxiliary UART
+ * @clk: clock producer of the port's uartclk
+ * @line: index of the port's serial8250_ports[] entry
+ */
 struct bcm2835aux_data {
-	struct uart_8250_port uart;
 	struct clk *clk;
 	int line;
 };
 
 static int bcm2835aux_serial_probe(struct platform_device *pdev)
 {
+	struct uart_8250_port up = { };
 	struct bcm2835aux_data *data;
 	struct resource *res;
 	int ret;
@@ -34,23 +39,21 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	/* initialize data */
-	spin_lock_init(&data->uart.port.lock);
-	data->uart.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
-	data->uart.port.dev = &pdev->dev;
-	data->uart.port.regshift = 2;
-	data->uart.port.type = PORT_16550;
-	data->uart.port.iotype = UPIO_MEM;
-	data->uart.port.fifosize = 8;
-	data->uart.port.flags = UPF_SHARE_IRQ |
-				UPF_FIXED_PORT |
-				UPF_FIXED_TYPE |
-				UPF_SKIP_TEST;
+	up.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
+	up.port.dev = &pdev->dev;
+	up.port.regshift = 2;
+	up.port.type = PORT_16550;
+	up.port.iotype = UPIO_MEM;
+	up.port.fifosize = 8;
+	up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
+			UPF_SKIP_TEST | UPF_IOREMAP;
 
 	/* get the clock - this also enables the HW */
 	data->clk = devm_clk_get(&pdev->dev, NULL);
 	ret = PTR_ERR_OR_ZERO(data->clk);
 	if (ret) {
-		dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "could not get clk: %d\n", ret);
 		return ret;
 	}
 
@@ -58,7 +61,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
 	ret = platform_get_irq(pdev, 0);
 	if (ret < 0)
 		return ret;
-	data->uart.port.irq = ret;
+	up.port.irq = ret;
 
 	/* map the main registers */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -66,15 +69,13 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "memory resource not found");
 		return -EINVAL;
 	}
-	data->uart.port.membase = devm_ioremap_resource(&pdev->dev, res);
-	ret = PTR_ERR_OR_ZERO(data->uart.port.membase);
-	if (ret)
-		return ret;
+	up.port.mapbase = res->start;
+	up.port.mapsize = resource_size(res);
 
 	/* Check for a fixed line number */
 	ret = of_alias_get_id(pdev->dev.of_node, "serial");
 	if (ret >= 0)
-		data->uart.port.line = ret;
+		up.port.line = ret;
 
 	/* enable the clock as a last step */
 	ret = clk_prepare_enable(data->clk);
@@ -89,13 +90,14 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
 	 * so we have to multiply the actual clock by 2
 	 * to get identical baudrates.
 	 */
-	data->uart.port.uartclk = clk_get_rate(data->clk) * 2;
+	up.port.uartclk = clk_get_rate(data->clk) * 2;
 
 	/* register the port */
-	ret = serial8250_register_8250_port(&data->uart);
+	ret = serial8250_register_8250_port(&up);
 	if (ret < 0) {
-		dev_err(&pdev->dev, "unable to register 8250 port - %d\n",
-			ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"unable to register 8250 port - %d\n", ret);
 		goto dis_clk;
 	}
 	data->line = ret;
@@ -113,7 +115,7 @@ static int bcm2835aux_serial_remove(struct platform_device *pdev)
 {
 	struct bcm2835aux_data *data = platform_get_drvdata(pdev);
 
-	serial8250_unregister_port(data->uart.port.line);
+	serial8250_unregister_port(data->line);
 	clk_disable_unprepare(data->clk);
 
 	return 0;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e682390..0894a22 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -816,6 +816,7 @@ static int serial8250_probe(struct platform_device *dev)
 		uart.port.flags		= p->flags;
 		uart.port.mapbase	= p->mapbase;
 		uart.port.hub6		= p->hub6;
+		uart.port.has_sysrq	= p->has_sysrq;
 		uart.port.private_data	= p->private_data;
 		uart.port.type		= p->type;
 		uart.port.serial_in	= p->serial_in;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 108cd55..91e9b07 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -186,7 +186,7 @@ static int xr17v35x_startup(struct uart_port *port)
 static void exar_shutdown(struct uart_port *port)
 {
 	unsigned char lsr;
-	bool tx_complete = 0;
+	bool tx_complete = false;
 	struct uart_8250_port *up = up_to_u8250p(port);
 	struct circ_buf *xmit = &port->state->xmit;
 	int i = 0;
@@ -194,9 +194,9 @@ static void exar_shutdown(struct uart_port *port)
 	do {
 		lsr = serial_in(up, UART_LSR);
 		if (lsr & (UART_LSR_TEMT | UART_LSR_THRE))
-			tx_complete = 1;
+			tx_complete = true;
 		else
-			tx_complete = 0;
+			tx_complete = false;
 		usleep_range(1000, 1100);
 	} while (!uart_circ_empty(xmit) && !tx_complete && i++ < 1000);
 
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index aa0e216..0d0c809 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -1,8 +1,4 @@
 // SPDX-License-Identifier: GPL-2.0
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_reg.h>
 #include <linux/serial_8250.h>
 
diff --git a/drivers/tty/serial/8250/8250_ioc3.c b/drivers/tty/serial/8250/8250_ioc3.c
new file mode 100644
index 0000000..d5a39e1
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_ioc3.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 8250 UART driver
+ *
+ * Copyright (C) 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * based on code Copyright (C) 2005 Stanislaw Skowronek <skylark@unaligned.org>
+ *               Copyright (C) 2014 Joshua Kinard <kumba@gentoo.org>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "8250.h"
+
+#define IOC3_UARTCLK (22000000 / 3)
+
+struct ioc3_8250_data {
+	int line;
+};
+
+static unsigned int ioc3_serial_in(struct uart_port *p, int offset)
+{
+	return readb(p->membase + (offset ^ 3));
+}
+
+static void ioc3_serial_out(struct uart_port *p, int offset, int value)
+{
+	writeb(value, p->membase + (offset ^ 3));
+}
+
+static int serial8250_ioc3_probe(struct platform_device *pdev)
+{
+	struct ioc3_8250_data *data;
+	struct uart_8250_port up;
+	struct resource *r;
+	void __iomem *membase;
+	int irq, line;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r)
+		return -ENODEV;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	membase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+	if (!membase)
+		return -ENOMEM;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		irq = 0; /* no interrupt -> use polling */
+
+	/* Register serial ports with 8250.c */
+	memset(&up, 0, sizeof(struct uart_8250_port));
+	up.port.iotype = UPIO_MEM;
+	up.port.uartclk = IOC3_UARTCLK;
+	up.port.type = PORT_16550A;
+	up.port.irq = irq;
+	up.port.flags = (UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ);
+	up.port.dev = &pdev->dev;
+	up.port.membase = membase;
+	up.port.mapbase = r->start;
+	up.port.serial_in = ioc3_serial_in;
+	up.port.serial_out = ioc3_serial_out;
+	line = serial8250_register_8250_port(&up);
+	if (line < 0)
+		return line;
+
+	platform_set_drvdata(pdev, data);
+	return 0;
+}
+
+static int serial8250_ioc3_remove(struct platform_device *pdev)
+{
+	struct ioc3_8250_data *data = platform_get_drvdata(pdev);
+
+	serial8250_unregister_port(data->line);
+	return 0;
+}
+
+static struct platform_driver serial8250_ioc3_driver = {
+	.probe  = serial8250_ioc3_probe,
+	.remove = serial8250_ioc3_remove,
+	.driver = {
+		.name = "ioc3-serial8250",
+	}
+};
+
+module_platform_driver(serial8250_ioc3_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 8250 UART driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 92fbf46..531ad67 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -222,8 +222,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
 
 	if (IS_ENABLED(CONFIG_SERIAL_8250_FSL) &&
 	    (of_device_is_compatible(np, "fsl,ns16550") ||
-	     of_device_is_compatible(np, "fsl,16550-FIFO64")))
+	     of_device_is_compatible(np, "fsl,16550-FIFO64"))) {
 		port->handle_irq = fsl8250_handle_irq;
+		port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+	}
 
 	return 0;
 err_unprepare:
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index e603c66..6f343ca 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -8,10 +8,6 @@
  *
  */
 
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/io.h>
@@ -1192,6 +1188,7 @@ static int omap8250_probe(struct platform_device *pdev)
 	up.port.throttle = omap_8250_throttle;
 	up.port.unthrottle = omap_8250_unthrottle;
 	up.port.rs485_config = omap_8250_rs485_config;
+	up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
 
 	ret = of_alias_get_id(np, "serial");
 	if (ret < 0) {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 9ff5dfa..430e346 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -11,10 +11,6 @@
  *  membase is an 'ioremapped' cookie.
  */
 
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/ioport.h>
@@ -1001,6 +997,9 @@ static void autoconfig_16550a(struct uart_8250_port *up)
 	up->port.type = PORT_16550A;
 	up->capabilities |= UART_CAP_FIFO;
 
+	if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS))
+		return;
+
 	/*
 	 * Check for presence of the EFR when DLAB is set.
 	 * Only ST16C650V1 UARTs pass this test.
@@ -3055,6 +3054,7 @@ void serial8250_init_port(struct uart_8250_port *up)
 
 	spin_lock_init(&port->lock);
 	port->ops = &serial8250_pops;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
 
 	up->cur_iotype = 0xFF;
 }
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index fab3d4f..f16824bb 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -60,6 +60,16 @@
 	  This builds standard PNP serial support. You may be able to
 	  disable this feature if you only need legacy serial support.
 
+config SERIAL_8250_16550A_VARIANTS
+	bool "Support for variants of the 16550A serial port"
+	depends on SERIAL_8250
+	help
+	  The 8250 driver can probe for many variants of the venerable 16550A
+	  serial port. Doing so takes additional time at boot.
+
+	  On modern systems, especially those using serial only for a simple
+	  console, you can say N here.
+
 config SERIAL_8250_FINTEK
 	bool "Support for Fintek F81216A LPC to 4 UART RS485 API"
 	depends on SERIAL_8250
@@ -371,6 +381,17 @@
 	  port hardware found on the Emma Mobile line of processors.
 	  If unsure, say N.
 
+config SERIAL_8250_IOC3
+	tristate "SGI IOC3 8250 UART support"
+	depends on SGI_MFD_IOC3 && SERIAL_8250
+	select SERIAL_8250_EXTENDED
+	select SERIAL_8250_SHARE_IRQ
+	help
+	  Enable this if you have a SGI Origin or Octane machine. This module
+	  provides basic serial support by directly driving the UART chip
+	  behind the IOC3 device on those systems.  Maximum baud speed is
+	  38400bps using this driver.
+
 config SERIAL_8250_RT288X
 	bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
 	depends on SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 08c1d81..51a6079 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_SERIAL_8250_MEN_MCB)	+= 8250_men_mcb.o
 obj-$(CONFIG_SERIAL_8250_DW)		+= 8250_dw.o
 obj-$(CONFIG_SERIAL_8250_EM)		+= 8250_em.o
+obj-$(CONFIG_SERIAL_8250_IOC3)		+= 8250_ioc3.o
 obj-$(CONFIG_SERIAL_8250_OMAP)		+= 8250_omap.o
 obj-$(CONFIG_SERIAL_8250_LPC18XX)	+= 8250_lpc18xx.o
 obj-$(CONFIG_SERIAL_8250_MT6577)	+= 8250_mtk.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 99f5da3..52eaac2 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -237,7 +237,7 @@
 
 config SERIAL_SAMSUNG
 	tristate "Samsung SoC serial support"
-	depends on PLAT_SAMSUNG || ARCH_EXYNOS
+	depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
 	select SERIAL_CORE
 	help
 	  Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
@@ -975,7 +975,7 @@
 
 config SERIAL_QCOM_GENI_CONSOLE
 	bool "QCOM GENI Serial Console support"
-	depends on SERIAL_QCOM_GENI=y
+	depends on SERIAL_QCOM_GENI
 	select SERIAL_CORE_CONSOLE
 	select SERIAL_EARLYCON
 	help
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2c37d11..3284f34 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -15,10 +15,6 @@
  * and hooked into this driver.
  */
 
-#if defined(CONFIG_SERIAL_AMBA_PL010_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
@@ -728,6 +724,7 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
 	uap->port.iotype = UPIO_MEM;
 	uap->port.irq = dev->irq[0];
 	uap->port.fifosize = 16;
+	uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL010_CONSOLE);
 	uap->port.ops = &amba_pl010_pops;
 	uap->port.flags = UPF_BOOT_AUTOCONF;
 	uap->port.line = i;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4b28134..2296bb0 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -16,11 +16,6 @@
  * and hooked into this driver.
  */
 
-
-#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
@@ -1452,8 +1447,6 @@ static void pl011_modem_status(struct uart_amba_port *uap)
 
 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
 {
-	unsigned int dummy_read;
-
 	if (!uap->vendor->cts_event_workaround)
 		return;
 
@@ -1465,8 +1458,8 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
 	 * single apb access will incur 2 pclk(133.12Mhz) delay,
 	 * so add 2 dummy reads
 	 */
-	dummy_read = pl011_read(uap, REG_ICR);
-	dummy_read = pl011_read(uap, REG_ICR);
+	pl011_read(uap, REG_ICR);
+	pl011_read(uap, REG_ICR);
 }
 
 static irqreturn_t pl011_int(int irq, void *dev_id)
@@ -2579,6 +2572,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
 	uap->port.mapbase = mmiobase->start;
 	uap->port.membase = base;
 	uap->port.fifosize = uap->fifosize;
+	uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
 	uap->port.flags = UPF_BOOT_AUTOCONF;
 	uap->port.line = index;
 
@@ -2769,6 +2763,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
 	.remove		= sbsa_uart_remove,
 	.driver	= {
 		.name	= "sbsa-uart",
+		.pm	= &pl011_dev_pm_ops,
 		.of_match_table = of_match_ptr(sbsa_uart_of_match),
 		.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
 		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 60cd133..e8d56e8 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -11,10 +11,6 @@
  *  Copyright (C) 2009 Kristoffer Glembo <kristoffer@gaisler.com>, Aeroflex Gaisler AB
  */
 
-#if defined(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
@@ -626,6 +622,7 @@ static int __init grlib_apbuart_configure(void)
 		port->irq = 0;
 		port->iotype = UPIO_MEM;
 		port->ops = &grlib_apbuart_ops;
+		port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE);
 		port->flags = UPF_BOOT_AUTOCONF;
 		port->line = line;
 		port->uartclk = *freq_hz;
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index d904a3a..17c3fc3 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -21,10 +21,6 @@
  *  -check if sysreq works
  */
 
-#if defined(CONFIG_SERIAL_ARC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/serial.h>
 #include <linux/console.h>
@@ -625,6 +621,7 @@ static int arc_serial_probe(struct platform_device *pdev)
 	port->flags = UPF_BOOT_AUTOCONF;
 	port->line = dev_id;
 	port->ops = &arc_serial_pops;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ARC_CONSOLE);
 
 	port->fifosize = ARC_UART_TX_FIFO_SIZE;
 
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 1ba9bc66..c15c398 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -51,10 +51,6 @@
 #define ATMEL_RTS_HIGH_OFFSET	16
 #define ATMEL_RTS_LOW_OFFSET	20
 
-#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 
 #include "serial_mctrl_gpio.h"
@@ -196,10 +192,6 @@ struct atmel_uart_port {
 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
 
-#ifdef SUPPORT_SYSRQ
-static struct console atmel_console;
-#endif
-
 #if defined(CONFIG_OF)
 static const struct of_device_id atmel_serial_dt_ids[] = {
 	{ .compatible = "atmel,at91rm9200-usart-serial" },
@@ -313,7 +305,11 @@ static int atmel_config_rs485(struct uart_port *port,
 
 	if (rs485conf->flags & SER_RS485_ENABLED) {
 		dev_dbg(port->dev, "Setting UART to RS485\n");
-		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+		if (port->rs485.flags & SER_RS485_RX_DURING_TX)
+			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+		else
+			atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+
 		atmel_uart_writel(port, ATMEL_US_TTGR,
 				  rs485conf->delay_rts_after_send);
 		mode |= ATMEL_US_USMODE_RS485;
@@ -831,7 +827,7 @@ static void atmel_tx_chars(struct uart_port *port)
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
 	if (port->x_char &&
-	    (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
+	    (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) {
 		atmel_uart_write_char(port, port->x_char);
 		port->icount.tx++;
 		port->x_char = 0;
@@ -839,8 +835,7 @@ static void atmel_tx_chars(struct uart_port *port)
 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
 		return;
 
-	while (atmel_uart_readl(port, ATMEL_US_CSR) &
-	       atmel_port->tx_done_mask) {
+	while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) {
 		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 		port->icount.tx++;
@@ -851,10 +846,20 @@ static void atmel_tx_chars(struct uart_port *port)
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(port);
 
-	if (!uart_circ_empty(xmit))
+	if (!uart_circ_empty(xmit)) {
+		/* we still have characters to transmit, so we should continue
+		 * transmitting them when TX is ready, regardless of
+		 * mode or duplexity
+		 */
+		atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
+
 		/* Enable interrupts */
 		atmel_uart_writel(port, ATMEL_US_IER,
 				  atmel_port->tx_done_mask);
+	} else {
+		if (atmel_uart_is_half_duplex(port))
+			atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
+	}
 }
 
 static void atmel_complete_tx_dma(void *arg)
@@ -1067,7 +1072,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
 
 chan_err:
 	dev_err(port->dev, "TX channel not available, switch to pio\n");
-	atmel_port->use_dma_tx = 0;
+	atmel_port->use_dma_tx = false;
 	if (atmel_port->chan_tx)
 		atmel_release_tx_dma(port);
 	return -EINVAL;
@@ -1266,7 +1271,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
 
 chan_err:
 	dev_err(port->dev, "RX channel not available, switch to pio\n");
-	atmel_port->use_dma_rx = 0;
+	atmel_port->use_dma_rx = false;
 	if (atmel_port->chan_rx)
 		atmel_release_rx_dma(port);
 	return -EINVAL;
@@ -1693,7 +1698,7 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
 					DMA_FROM_DEVICE);
 				kfree(atmel_port->pdc_rx[0].buf);
 			}
-			atmel_port->use_pdc_rx = 0;
+			atmel_port->use_pdc_rx = false;
 			return -ENOMEM;
 		}
 		pdc->dma_addr = dma_map_single(port->dev,
@@ -2526,8 +2531,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
 	 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
 	 * ENDTX|TXBUFE
 	 */
-	if (port->rs485.flags & SER_RS485_ENABLED ||
-	    port->iso7816.flags & SER_ISO7816_ENABLED)
+	if (atmel_uart_is_half_duplex(port))
 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
 	else if (atmel_use_pdc_tx(port)) {
 		port->fifosize = PDC_BUFFER_SIZE;
@@ -2878,6 +2882,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
 	atmel_port = &atmel_ports[ret];
 	atmel_port->backup_imr = 0;
 	atmel_port->uart.line = ret;
+	atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
 	atmel_serial_probe_fifos(atmel_port, pdev);
 
 	atomic_set(&atmel_port->tasklet_shutdown, 0);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index b7adc61..5674da2 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -10,10 +10,6 @@
  * my board.
  */
 
-#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/init.h>
@@ -858,6 +854,7 @@ static int bcm_uart_probe(struct platform_device *pdev)
 	port->fifosize = 16;
 	port->uartclk = clk_get_rate(clk) / 2;
 	port->line = pdev->id;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_BCM63XX_CONSOLE);
 	clk_put(clk);
 
 	ret = uart_add_one_port(&bcm_uart_driver, port);
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 0615907..95abc6f 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -8,10 +8,6 @@
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
  */
 
-#if defined(CONFIG_SERIAL_CLPS711X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/console.h>
@@ -479,6 +475,7 @@ static int uart_clps711x_probe(struct platform_device *pdev)
 	s->port.mapbase		= res->start;
 	s->port.type		= PORT_CLPS711X;
 	s->port.fifosize	= 16;
+	s->port.has_sysrq	= IS_ENABLED(CONFIG_SERIAL_CLPS711X_CONSOLE);
 	s->port.flags		= UPF_SKIP_TEST | UPF_FIXED_TYPE;
 	s->port.uartclk		= clk_get_rate(uart_clk);
 	s->port.ops		= &uart_clps711x_ops;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index de6d02f..19d5a4c 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -40,10 +40,6 @@
 #include <asm/fs_pd.h>
 #include <asm/udbg.h>
 
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 #include <linux/kernel.h>
 
@@ -347,9 +343,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
 		/* ASSUMPTION: it contains nothing valid */
 		i = 0;
 	}
-#ifdef SUPPORT_SYSRQ
 	port->sysrq = 0;
-#endif
 	goto error_return;
 }
 
@@ -1204,7 +1198,8 @@ static int cpm_uart_init_port(struct device_node *np,
 	pinfo->port.uartclk = ppc_proc_freq;
 	pinfo->port.mapbase = (unsigned long)mem;
 	pinfo->port.type = PORT_CPM;
-	pinfo->port.ops = &cpm_uart_pops,
+	pinfo->port.ops = &cpm_uart_pops;
+	pinfo->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_CPM_CONSOLE);
 	pinfo->port.iotype = UPIO_MEM;
 	pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
 	spin_lock_init(&pinfo->port.lock);
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 730da41..4552742 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -29,10 +29,6 @@
 
 #undef DEBUG_DZ
 
-#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/bitops.h>
 #include <linux/compiler.h>
 #include <linux/console.h>
@@ -787,6 +783,7 @@ static void __init dz_init_ports(void)
 		uport->ops	= &dz_ops;
 		uport->line	= line;
 		uport->mapbase	= base;
+		uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_DZ_CONSOLE);
 	}
 }
 
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index d6b5e54..2ac8712 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -1,8 +1,4 @@
 // SPDX-License-Identifier: GPL-2.0
-#if defined(CONFIG_SERIAL_EFM32_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/io.h>
@@ -748,6 +744,7 @@ static int efm32_uart_probe(struct platform_device *pdev)
 	efm_port->port.type = PORT_EFMUART;
 	efm_port->port.iotype = UPIO_MEM32;
 	efm_port->port.fifosize = 2;
+	efm_port->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_EFM32_UART_CONSOLE);
 	efm_port->port.ops = &efm32_uart_pops;
 	efm_port->port.flags = UPF_BOOT_AUTOCONF;
 
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 205c31a..3e28be4 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -6,11 +6,6 @@
  * Copyright 2017-2019 NXP
  */
 
-#if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \
-	defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/console.h>
 #include <linux/io.h>
 #include <linux/irq.h>
@@ -279,10 +274,8 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
 		if (brk) {
 			uart_handle_break(sport);
 		} else {
-#ifdef SUPPORT_SYSRQ
 			if (uart_handle_sysrq_char(sport, (unsigned char)rx))
 				continue;
-#endif
 			tty_insert_flip_char(port, rx, flg);
 		}
 	}
@@ -863,6 +856,7 @@ static int linflex_probe(struct platform_device *pdev)
 	sport->irq = platform_get_irq(pdev, 0);
 	sport->ops = &linflex_pops;
 	sport->flags = UPF_BOOT_AUTOCONF;
+	sport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE);
 
 	linflex_ports[sport->line] = sport;
 
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 4e128d1..91e2805 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -5,10 +5,6 @@
  *  Copyright 2012-2014 Freescale Semiconductor, Inc.
  */
 
-#if defined(CONFIG_SERIAL_FSL_LPUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/clk.h>
 #include <linux/console.h>
 #include <linux/dma-mapping.h>
@@ -864,9 +860,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
 			if (sr & UARTSR1_OR)
 				flg = TTY_OVERRUN;
 
-#ifdef SUPPORT_SYSRQ
 			sport->port.sysrq = 0;
-#endif
 		}
 
 		tty_insert_flip_char(port, rx, flg);
@@ -946,9 +940,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
 			if (sr & UARTSTAT_OR)
 				flg = TTY_OVERRUN;
 
-#ifdef SUPPORT_SYSRQ
 			sport->port.sysrq = 0;
-#endif
 		}
 
 		tty_insert_flip_char(port, rx, flg);
@@ -2376,7 +2368,9 @@ static int __init lpuart32_early_console_setup(struct earlycon_device *device,
 	if (!device->port.membase)
 		return -ENODEV;
 
-	device->port.iotype = UPIO_MEM32BE;
+	if (device->port.iotype != UPIO_MEM32)
+		device->port.iotype = UPIO_MEM32BE;
+
 	device->con->write = lpuart32_early_write;
 	return 0;
 }
@@ -2396,9 +2390,6 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
 OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
-OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
-EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
-EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
 
 #define LPUART_CONSOLE	(&lpuart_console)
 #define LPUART32_CONSOLE	(&lpuart32_console)
@@ -2461,6 +2452,7 @@ static int lpuart_probe(struct platform_device *pdev)
 		sport->port.ops = &lpuart32_pops;
 	else
 		sport->port.ops = &lpuart_pops;
+	sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LPUART_CONSOLE);
 	sport->port.flags = UPF_BOOT_AUTOCONF;
 
 	if (lpuart_is_32(sport))
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a9e20e6..0c6c631 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -8,10 +8,6 @@
  * Copyright (C) 2004 Pengutronix
  */
 
-#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
@@ -700,22 +696,33 @@ static void imx_uart_start_tx(struct uart_port *port)
 	}
 }
 
-static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
+static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id)
 {
 	struct imx_port *sport = dev_id;
 	u32 usr1;
 
-	spin_lock(&sport->port.lock);
-
 	imx_uart_writel(sport, USR1_RTSD, USR1);
 	usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
 	uart_handle_cts_change(&sport->port, !!usr1);
 	wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
 
-	spin_unlock(&sport->port.lock);
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
+{
+	struct imx_port *sport = dev_id;
+	irqreturn_t ret;
+
+	spin_lock(&sport->port.lock);
+
+	ret = __imx_uart_rtsint(irq, dev_id);
+
+	spin_unlock(&sport->port.lock);
+
+	return ret;
+}
+
 static irqreturn_t imx_uart_txint(int irq, void *dev_id)
 {
 	struct imx_port *sport = dev_id;
@@ -726,14 +733,12 @@ static irqreturn_t imx_uart_txint(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
+static irqreturn_t __imx_uart_rxint(int irq, void *dev_id)
 {
 	struct imx_port *sport = dev_id;
 	unsigned int rx, flg, ignored = 0;
 	struct tty_port *port = &sport->port.state->port;
 
-	spin_lock(&sport->port.lock);
-
 	while (imx_uart_readl(sport, USR2) & USR2_RDR) {
 		u32 usr2;
 
@@ -779,9 +784,7 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
 			if (rx & URXD_OVRRUN)
 				flg = TTY_OVERRUN;
 
-#ifdef SUPPORT_SYSRQ
 			sport->port.sysrq = 0;
-#endif
 		}
 
 		if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
@@ -792,11 +795,25 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
 	}
 
 out:
-	spin_unlock(&sport->port.lock);
 	tty_flip_buffer_push(port);
+
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
+{
+	struct imx_port *sport = dev_id;
+	irqreturn_t ret;
+
+	spin_lock(&sport->port.lock);
+
+	ret = __imx_uart_rxint(irq, dev_id);
+
+	spin_unlock(&sport->port.lock);
+
+	return ret;
+}
+
 static void imx_uart_clear_rx_errors(struct imx_port *sport);
 
 /*
@@ -855,6 +872,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
 	unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
 	irqreturn_t ret = IRQ_NONE;
 
+	spin_lock(&sport->port.lock);
+
 	usr1 = imx_uart_readl(sport, USR1);
 	usr2 = imx_uart_readl(sport, USR2);
 	ucr1 = imx_uart_readl(sport, UCR1);
@@ -888,27 +907,25 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
 		usr2 &= ~USR2_ORE;
 
 	if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
-		imx_uart_rxint(irq, dev_id);
+		__imx_uart_rxint(irq, dev_id);
 		ret = IRQ_HANDLED;
 	}
 
 	if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) {
-		imx_uart_txint(irq, dev_id);
+		imx_uart_transmit_buffer(sport);
 		ret = IRQ_HANDLED;
 	}
 
 	if (usr1 & USR1_DTRD) {
 		imx_uart_writel(sport, USR1_DTRD, USR1);
 
-		spin_lock(&sport->port.lock);
 		imx_uart_mctrl_check(sport);
-		spin_unlock(&sport->port.lock);
 
 		ret = IRQ_HANDLED;
 	}
 
 	if (usr1 & USR1_RTSD) {
-		imx_uart_rtsint(irq, dev_id);
+		__imx_uart_rtsint(irq, dev_id);
 		ret = IRQ_HANDLED;
 	}
 
@@ -923,6 +940,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
 		ret = IRQ_HANDLED;
 	}
 
+	spin_unlock(&sport->port.lock);
+
 	return ret;
 }
 
@@ -2231,6 +2250,7 @@ static int imx_uart_probe(struct platform_device *pdev)
 	sport->port.iotype = UPIO_MEM;
 	sport->port.irq = rxirq;
 	sport->port.fifosize = 32;
+	sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
 	sport->port.ops = &imx_uart_pops;
 	sport->port.rs485_config = imx_uart_rs485_config;
 	sport->port.flags = UPF_BOOT_AUTOCONF;
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 8c81073..86fff69 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -38,10 +38,6 @@
 #include <asm/sgi/hpc3.h>
 #include <asm/sgi/ip22.h>
 
-#if defined(CONFIG_SERIAL_IP22_ZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 
 #include "ip22zilog.h"
@@ -1080,6 +1076,7 @@ static struct uart_driver ip22zilog_reg = {
 
 static void __init ip22zilog_prepare(void)
 {
+	unsigned char sysrq_on = IS_ENABLED(CONFIG_SERIAL_IP22_ZILOG_CONSOLE);
 	struct uart_ip22zilog_port *up;
 	struct zilog_layout *rp;
 	int channel, chip;
@@ -1115,6 +1112,7 @@ static void __init ip22zilog_prepare(void)
 		up[(chip * 2) + 0].port.irq = zilog_irq;
 		up[(chip * 2) + 0].port.uartclk = ZS_CLOCK;
 		up[(chip * 2) + 0].port.fifosize = 1;
+		up[(chip * 2) + 0].port.has_sysrq = sysrq_on;
 		up[(chip * 2) + 0].port.ops = &ip22zilog_pops;
 		up[(chip * 2) + 0].port.type = PORT_IP22ZILOG;
 		up[(chip * 2) + 0].port.flags = 0;
@@ -1126,6 +1124,7 @@ static void __init ip22zilog_prepare(void)
 		up[(chip * 2) + 1].port.irq = zilog_irq;
 		up[(chip * 2) + 1].port.uartclk = ZS_CLOCK;
 		up[(chip * 2) + 1].port.fifosize = 1;
+		up[(chip * 2) + 1].port.has_sysrq = sysrq_on;
 		up[(chip * 2) + 1].port.ops = &ip22zilog_pops;
 		up[(chip * 2) + 1].port.type = PORT_IP22ZILOG;
 		up[(chip * 2) + 1].port.line = (chip * 2) + 1;
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 4029272..5022447 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -118,7 +118,7 @@ static int kgdb_nmi_poll_one_knock(void)
 	int c = -1;
 	const char *magic = kgdb_nmi_magic;
 	size_t m = strlen(magic);
-	bool printch = 0;
+	bool printch = false;
 
 	c = dbg_io_ops->read_char();
 	if (c == NO_POLL_CHAR)
@@ -130,7 +130,7 @@ static int kgdb_nmi_poll_one_knock(void)
 		n = (n + 1) % m;
 		if (!n)
 			return 1;
-		printch = 1;
+		printch = true;
 	} else {
 		n = 0;
 	}
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 164b183..d2c08b7 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -5,15 +5,12 @@
  * Copyright (C) 2014 Carlo Caione <carlo@caione.org>
  */
 
-#if defined(CONFIG_SERIAL_MESON_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/clk.h>
 #include <linux/console.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
@@ -76,6 +73,8 @@
 #define AML_UART_PORT_OFFSET		6
 #define AML_UART_DEV_NAME		"ttyAML"
 
+#define AML_UART_POLL_USEC		5
+#define AML_UART_TIMEOUT_USEC		10000
 
 static struct uart_driver meson_uart_driver;
 
@@ -427,6 +426,64 @@ static void meson_uart_config_port(struct uart_port *port, int flags)
 	}
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context (i.e. kgdb).
+ */
+
+static int meson_uart_poll_get_char(struct uart_port *port)
+{
+	u32 c;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
+		c = NO_POLL_CHAR;
+	else
+		c = readl(port->membase + AML_UART_RFIFO);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return c;
+}
+
+static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
+{
+	unsigned long flags;
+	u32 reg;
+	int ret;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* Wait until FIFO is empty or timeout */
+	ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
+					reg & AML_UART_TX_EMPTY,
+					AML_UART_POLL_USEC,
+					AML_UART_TIMEOUT_USEC);
+	if (ret == -ETIMEDOUT) {
+		dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
+		goto out;
+	}
+
+	/* Write the character */
+	writel(c, port->membase + AML_UART_WFIFO);
+
+	/* Wait until FIFO is empty or timeout */
+	ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
+					reg & AML_UART_TX_EMPTY,
+					AML_UART_POLL_USEC,
+					AML_UART_TIMEOUT_USEC);
+	if (ret == -ETIMEDOUT)
+		dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
+
+out:
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
 static const struct uart_ops meson_uart_ops = {
 	.set_mctrl      = meson_uart_set_mctrl,
 	.get_mctrl      = meson_uart_get_mctrl,
@@ -442,6 +499,10 @@ static const struct uart_ops meson_uart_ops = {
 	.request_port	= meson_uart_request_port,
 	.release_port	= meson_uart_release_port,
 	.verify_port	= meson_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char	= meson_uart_poll_get_char,
+	.poll_put_char	= meson_uart_poll_put_char,
+#endif
 };
 
 #ifdef CONFIG_SERIAL_MESON_CONSOLE
@@ -703,6 +764,7 @@ static int meson_uart_probe(struct platform_device *pdev)
 	port->mapsize = resource_size(res_mem);
 	port->irq = res_irq->start;
 	port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
 	port->dev = &pdev->dev;
 	port->line = pdev->id;
 	port->type = PORT_MESON;
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index 949ab7e..8f2cab7 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -3,10 +3,6 @@
  * Copyright (C) 2018 Socionext Inc.
  */
 
-#if defined(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/clk.h>
 #include <linux/console.h>
 #include <linux/module.h>
@@ -537,6 +533,7 @@ static int mlb_usio_probe(struct platform_device *pdev)
 	port->irq = mlb_usio_irq[index][RX];
 	port->uartclk = clk_get_rate(clk);
 	port->fifosize = 128;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE);
 	port->iotype = UPIO_MEM32;
 	port->flags = UPF_BOOT_AUTOCONF | UPF_SPD_VHI;
 	port->line = index;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 3a75ee0..af17004 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -44,10 +44,6 @@
 #include <asm/mpc52xx.h>
 #include <asm/mpc52xx_psc.h>
 
-#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 
 
@@ -1382,12 +1378,8 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
 		ch = psc_ops->read_char(port);
 
 		/* Handle sysreq char */
-#ifdef SUPPORT_SYSRQ
-		if (uart_handle_sysrq_char(port, ch)) {
-			port->sysrq = 0;
+		if (uart_handle_sysrq_char(port, ch))
 			continue;
-		}
-#endif
 
 		/* Store it */
 
@@ -1770,6 +1762,7 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
 	spin_lock_init(&port->lock);
 	port->uartclk = uartclk;
 	port->fifosize	= 512;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MPC52xx_CONSOLE);
 	port->iotype	= UPIO_MEM;
 	port->flags	= UPF_BOOT_AUTOCONF |
 			  (uart_console(port) ? 0 : UPF_IOREMAP);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index f6c45a7..60a9c53 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -7,10 +7,6 @@
  * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
  */
 
-#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
 #include <linux/kernel.h>
 #include <linux/atomic.h>
 #include <linux/dma-mapping.h>
@@ -610,7 +606,7 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
 				   UARTDM_RX_SIZE, dma->dir);
 	ret = dma_mapping_error(uart->dev, dma->phys);
 	if (ret)
-		return;
+		goto sw_mode;
 
 	dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
 						UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
@@ -661,6 +657,22 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
 	return;
 unmap:
 	dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
+
+sw_mode:
+	/*
+	 * Switch from DMA to SW/FIFO mode. After clearing Rx BAM (UARTDM_DMEN),
+	 * receiver must be reset.
+	 */
+	msm_write(uart, UART_CR_CMD_RESET_RX, UART_CR);
+	msm_write(uart, UART_CR_RX_ENABLE, UART_CR);
+
+	msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+	msm_write(uart, 0xFFFFFF, UARTDM_DMRX);
+	msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+	/* Re-enable RX interrupts */
+	msm_port->imr |= (UART_IMR_RXLEV | UART_IMR_RXSTALE);
+	msm_write(uart, msm_port->imr, UART_IMR);
 }
 
 static void msm_stop_rx(struct uart_port *port)
@@ -1810,6 +1822,7 @@ static int msm_serial_probe(struct platform_device *pdev)
 	if (unlikely(irq < 0))
 		return -ENXIO;
 	port->irq = irq;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MSM_CONSOLE);
 
 	platform_set_drvdata(pdev, port);
 
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index fc58a00..47ab280 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -25,11 +25,7 @@
 #include <asm/irq.h>
 #include <asm/parisc-device.h>
 
-#if defined(CONFIG_SERIAL_MUX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #include <linux/sysrq.h>
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 
 #define MUX_OFFSET 0x800
@@ -483,6 +479,7 @@ static int __init mux_probe(struct parisc_device *dev)
 		port->ops	= &mux_pops;
 		port->flags	= UPF_BOOT_AUTOCONF;
 		port->line	= port_cnt;
+		port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE);
 
 		/* The port->timeout needs to match what is present in
 		 * uart_wait_until_sent in serial_core.c.  Otherwise
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index e345259..b4f835e 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -12,10 +12,6 @@
  * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
  */
 
-#if defined(CONFIG_SERIAL_MXS_AUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/init.h>
@@ -1693,6 +1689,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
 	s->port.fifosize = MXS_AUART_FIFO_SIZE;
 	s->port.uartclk = clk_get_rate(s->clk);
 	s->port.type = PORT_IMX;
+	s->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_MXS_AUART_CONSOLE);
 
 	mxs_init_regs(s);
 
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 6420ae5..48017ce 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -16,10 +16,6 @@
  * this driver as required for the omap-platform.
  */
 
-#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/console.h>
@@ -493,10 +489,13 @@ static unsigned int check_modem_status(struct uart_omap_port *up)
 static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
 {
 	unsigned int flag;
-	unsigned char ch = 0;
 
+	/*
+	 * Read one data character out to avoid stalling the receiver according
+	 * to the table 23-246 of the omap4 TRM.
+	 */
 	if (likely(lsr & UART_LSR_DR))
-		ch = serial_in(up, UART_RX);
+		serial_in(up, UART_RX);
 
 	up->port.icount.rx++;
 	flag = TTY_NORMAL;
@@ -1680,6 +1679,7 @@ static int serial_omap_probe(struct platform_device *pdev)
 	up->port.regshift = 2;
 	up->port.fifosize = 64;
 	up->port.ops = &serial_omap_pops;
+	up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_OMAP_CONSOLE);
 
 	if (pdev->dev.of_node)
 		ret = of_alias_get_id(pdev->dev.of_node, "serial");
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index c16234b..0a96217 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -2,9 +2,6 @@
 /*
  *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  */
-#if defined(CONFIG_SERIAL_PCH_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
 #include <linux/kernel.h>
 #include <linux/serial_reg.h>
 #include <linux/slab.h>
@@ -587,12 +584,8 @@ static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
 			if (uart_handle_break(port))
 				continue;
 		}
-#ifdef SUPPORT_SYSRQ
-		if (port->sysrq) {
-			if (uart_handle_sysrq_char(port, rbr))
-				continue;
-		}
-#endif
+		if (uart_handle_sysrq_char(port, rbr))
+			continue;
 
 		buf[i++] = rbr;
 	}
@@ -1796,6 +1789,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
 	priv->port.flags = UPF_BOOT_AUTOCONF;
 	priv->port.fifosize = fifosize;
 	priv->port.line = board->line_no;
+	priv->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PCH_UART_CONSOLE);
 	priv->trigger = PCH_UART_HAL_TRIGGER_M;
 
 	snprintf(priv->irq_name, IRQ_NAME_SIZE,
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index bcb5bf7..ba65a3b 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -61,10 +61,6 @@
 #define of_machine_is_compatible(x) (0)
 #endif
 
-#if defined (CONFIG_SERIAL_PMACZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 
@@ -1721,6 +1717,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
 	uap->control_reg   = uap->port.membase;
 	uap->data_reg      = uap->control_reg + 4;
 	uap->port_type     = 0;
+	uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PMACZILOG_CONSOLE);
 
 	pmz_convert_to_zs(uap, CS8, 0, 9600);
 
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 223a949..972d94e 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -10,10 +10,6 @@
  * Copyright (C) 2000 Deep Blue Solutions Ltd.
  */
 
-#if defined(CONFIG_SERIAL_PNX8XXX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
@@ -220,9 +216,7 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
 			else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
 				flg = TTY_FRAME;
 
-#ifdef SUPPORT_SYSRQ
 			sport->port.sysrq = 0;
-#endif
 		}
 
 		if (uart_handle_sysrq_char(&sport->port, ch))
@@ -800,6 +794,7 @@ static int pnx8xxx_serial_probe(struct platform_device *pdev)
 			if (pnx8xxx_ports[i].port.mapbase != res->start)
 				continue;
 
+			pnx8xxx_ports[i].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PNX8XXX_CONSOLE);
 			pnx8xxx_ports[i].port.dev = &pdev->dev;
 			uart_add_one_port(&pnx8xxx_reg, &pnx8xxx_ports[i].port);
 			platform_set_drvdata(pdev, &pnx8xxx_ports[i]);
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 4932b67..41319ef 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -19,10 +19,6 @@
  */
 
 
-#if defined(CONFIG_SERIAL_PXA_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/console.h>
@@ -879,6 +875,7 @@ static int serial_pxa_probe(struct platform_device *dev)
 	sport->port.dev = &dev->dev;
 	sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
 	sport->port.uartclk = clk_get_rate(sport->clk);
+	sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PXA_CONSOLE);
 
 	ret = serial_pxa_probe_dt(dev, sport);
 	if (ret > 0)
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index ff63728..191abb1 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1,10 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 // Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
 
-#if defined(CONFIG_SERIAL_QCOM_GENI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
 #include <linux/clk.h>
 #include <linux/console.h>
 #include <linux/io.h>
@@ -14,6 +10,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/pm_wakeirq.h>
 #include <linux/qcom-geni-se.h>
 #include <linux/serial.h>
@@ -92,7 +89,11 @@
 #define DEF_TX_WM		2
 #define DEF_FIFO_WIDTH_BITS	32
 #define UART_RX_WM		2
-#define MAX_LOOPBACK_CFG	3
+
+/* SE_UART_LOOPBACK_CFG */
+#define RX_TX_SORTED	BIT(0)
+#define CTS_RTS_SORTED	BIT(1)
+#define RX_TX_CTS_RTS_SORTED	(RX_TX_SORTED | CTS_RTS_SORTED)
 
 #ifdef CONFIG_CONSOLE_POLL
 #define CONSOLE_RX_BYTES_PW 1
@@ -103,7 +104,7 @@
 struct qcom_geni_serial_port {
 	struct uart_port uport;
 	struct geni_se se;
-	char name[20];
+	const char *name;
 	u32 tx_fifo_depth;
 	u32 tx_fifo_width;
 	u32 rx_fifo_depth;
@@ -164,30 +165,6 @@ static struct qcom_geni_serial_port qcom_geni_uart_ports[GENI_UART_PORTS] = {
 	},
 };
 
-static ssize_t loopback_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
-
-	return snprintf(buf, sizeof(u32), "%d\n", port->loopback);
-}
-
-static ssize_t loopback_store(struct device *dev,
-				struct device_attribute *attr, const char *buf,
-				size_t size)
-{
-	struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
-	u32 loopback;
-
-	if (kstrtoint(buf, 0, &loopback) || loopback > MAX_LOOPBACK_CFG) {
-		dev_err(dev, "Invalid input\n");
-		return -EINVAL;
-	}
-	port->loopback = loopback;
-	return size;
-}
-static DEVICE_ATTR_RW(loopback);
-
 static struct qcom_geni_serial_port qcom_geni_console_port = {
 	.uport = {
 		.iotype = UPIO_MEM,
@@ -237,10 +214,14 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
 							unsigned int mctrl)
 {
 	u32 uart_manual_rfr = 0;
+	struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
 
 	if (uart_console(uport))
 		return;
 
+	if (mctrl & TIOCM_LOOP)
+		port->loopback = RX_TX_CTS_RTS_SORTED;
+
 	if (!(mctrl & TIOCM_RTS))
 		uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
 	writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
@@ -757,15 +738,6 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
 		uart_write_wakeup(uport);
 }
 
-static irqreturn_t qcom_geni_serial_wakeup_isr(int isr, void *dev)
-{
-	struct uart_port *uport = dev;
-
-	pm_wakeup_event(uport->dev, 2000);
-
-	return IRQ_HANDLED;
-}
-
 static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
 {
 	u32 m_irq_en;
@@ -1302,50 +1274,57 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
 	port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
 	port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
 
-	scnprintf(port->name, sizeof(port->name), "qcom_geni_serial_%s%d",
-		(uart_console(uport) ? "console" : "uart"), uport->line);
+	port->name = devm_kasprintf(uport->dev, GFP_KERNEL,
+			"qcom_geni_serial_%s%d",
+			uart_console(uport) ? "console" : "uart", uport->line);
+	if (!port->name)
+		return -ENOMEM;
+
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
 		return irq;
 	uport->irq = irq;
+	uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_QCOM_GENI_CONSOLE);
+
+	if (!console)
+		port->wakeup_irq = platform_get_irq_optional(pdev, 1);
+
+	uport->private_data = drv;
+	platform_set_drvdata(pdev, port);
+	port->handle_rx = console ? handle_rx_console : handle_rx_uart;
+
+	ret = uart_add_one_port(drv, uport);
+	if (ret)
+		return ret;
 
 	irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
 	ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
 			IRQF_TRIGGER_HIGH, port->name, uport);
 	if (ret) {
 		dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+		uart_remove_one_port(drv, uport);
 		return ret;
 	}
 
-	if (!console) {
-		port->wakeup_irq = platform_get_irq(pdev, 1);
-		if (port->wakeup_irq < 0) {
-			dev_err(&pdev->dev, "Failed to get wakeup IRQ %d\n",
-					port->wakeup_irq);
-		} else {
-			irq_set_status_flags(port->wakeup_irq, IRQ_NOAUTOEN);
-			ret = devm_request_irq(uport->dev, port->wakeup_irq,
-				qcom_geni_serial_wakeup_isr,
-				IRQF_TRIGGER_FALLING, "uart_wakeup", uport);
-			if (ret) {
-				dev_err(uport->dev, "Failed to register wakeup IRQ ret %d\n",
-						ret);
-				return ret;
-			}
+	/*
+	 * Set pm_runtime status as ACTIVE so that wakeup_irq gets
+	 * enabled/disabled from dev_pm_arm_wake_irq during system
+	 * suspend/resume respectively.
+	 */
+	pm_runtime_set_active(&pdev->dev);
 
-			device_init_wakeup(&pdev->dev, true);
-			ret = dev_pm_set_wake_irq(&pdev->dev, port->wakeup_irq);
-			if (unlikely(ret))
-				dev_err(uport->dev, "%s:Failed to set IRQ wake:%d\n",
-						__func__, ret);
+	if (port->wakeup_irq > 0) {
+		device_init_wakeup(&pdev->dev, true);
+		ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+						port->wakeup_irq);
+		if (ret) {
+			device_init_wakeup(&pdev->dev, false);
+			uart_remove_one_port(drv, uport);
+			return ret;
 		}
 	}
-	uport->private_data = drv;
-	platform_set_drvdata(pdev, port);
-	port->handle_rx = console ? handle_rx_console : handle_rx_uart;
-	if (!console)
-		device_create_file(uport->dev, &dev_attr_loopback);
-	return uart_add_one_port(drv, uport);
+
+	return 0;
 }
 
 static int qcom_geni_serial_remove(struct platform_device *pdev)
@@ -1353,7 +1332,10 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
 	struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
 	struct uart_driver *drv = port->uport.private_data;
 
+	dev_pm_clear_wake_irq(&pdev->dev);
+	device_init_wakeup(&pdev->dev, false);
 	uart_remove_one_port(drv, &port->uport);
+
 	return 0;
 }
 
@@ -1362,12 +1344,7 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
 	struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
 	struct uart_port *uport = &port->uport;
 
-	uart_suspend_port(uport->private_data, uport);
-
-	if (port->wakeup_irq > 0)
-		enable_irq(port->wakeup_irq);
-
-	return 0;
+	return uart_suspend_port(uport->private_data, uport);
 }
 
 static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
@@ -1375,9 +1352,6 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
 	struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
 	struct uart_port *uport = &port->uport;
 
-	if (port->wakeup_irq > 0)
-		disable_irq(port->wakeup_irq);
-
 	return uart_resume_port(uport->private_data, uport);
 }
 
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 8e61812..75c2a22 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -7,10 +7,6 @@
  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
  */
 
-#if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
@@ -214,9 +210,7 @@ sa1100_rx_chars(struct sa1100_port *sport)
 			else if (status & UTSR1_TO_SM(UTSR1_FRE))
 				flg = TTY_FRAME;
 
-#ifdef SUPPORT_SYSRQ
 			sport->port.sysrq = 0;
-#endif
 		}
 
 		if (uart_handle_sysrq_char(&sport->port, ch))
@@ -860,6 +854,7 @@ static int sa1100_serial_resume(struct platform_device *dev)
 static int sa1100_serial_add_one_port(struct sa1100_port *sport, struct platform_device *dev)
 {
 	sport->port.dev = &dev->dev;
+	sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SA1100_CONSOLE);
 
 	// mctrl_gpio_init() requires that the GPIO driver supports interrupts,
 	// but we need to support GPIO drivers for hardware that has no such
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
deleted file mode 100644
index f930221..0000000
--- a/drivers/tty/serial/samsung.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef __SAMSUNG_H
-#define __SAMSUNG_H
-
-/*
- * Driver for Samsung SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
-*/
-
-#include <linux/dmaengine.h>
-
-struct s3c24xx_uart_info {
-	char			*name;
-	unsigned int		type;
-	unsigned int		fifosize;
-	unsigned long		rx_fifomask;
-	unsigned long		rx_fifoshift;
-	unsigned long		rx_fifofull;
-	unsigned long		tx_fifomask;
-	unsigned long		tx_fifoshift;
-	unsigned long		tx_fifofull;
-	unsigned int		def_clk_sel;
-	unsigned long		num_clks;
-	unsigned long		clksel_mask;
-	unsigned long		clksel_shift;
-
-	/* uart port features */
-
-	unsigned int		has_divslot:1;
-
-	/* uart controls */
-	int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
-};
-
-struct s3c24xx_serial_drv_data {
-	struct s3c24xx_uart_info	*info;
-	struct s3c2410_uartcfg		*def_cfg;
-	unsigned int			fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
-};
-
-struct s3c24xx_uart_dma {
-	unsigned int			rx_chan_id;
-	unsigned int			tx_chan_id;
-
-	struct dma_slave_config		rx_conf;
-	struct dma_slave_config		tx_conf;
-
-	struct dma_chan			*rx_chan;
-	struct dma_chan			*tx_chan;
-
-	dma_addr_t			rx_addr;
-	dma_addr_t			tx_addr;
-
-	dma_cookie_t			rx_cookie;
-	dma_cookie_t			tx_cookie;
-
-	char				*rx_buf;
-
-	dma_addr_t			tx_transfer_addr;
-
-	size_t				rx_size;
-	size_t				tx_size;
-
-	struct dma_async_tx_descriptor	*tx_desc;
-	struct dma_async_tx_descriptor	*rx_desc;
-
-	int				tx_bytes_requested;
-	int				rx_bytes_requested;
-};
-
-struct s3c24xx_uart_port {
-	unsigned char			rx_claimed;
-	unsigned char			tx_claimed;
-	unsigned int			pm_level;
-	unsigned long			baudclk_rate;
-	unsigned int			min_dma_size;
-
-	unsigned int			rx_irq;
-	unsigned int			tx_irq;
-
-	unsigned int			tx_in_progress;
-	unsigned int			tx_mode;
-	unsigned int			rx_mode;
-
-	struct s3c24xx_uart_info	*info;
-	struct clk			*clk;
-	struct clk			*baudclk;
-	struct uart_port		port;
-	struct s3c24xx_serial_drv_data	*drv_data;
-
-	/* reference to platform data */
-	struct s3c2410_uartcfg		*cfg;
-
-	struct s3c24xx_uart_dma		*dma;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-	struct notifier_block		freq_transition;
-#endif
-};
-
-/* conversion functions */
-
-#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
-
-/* register access controls */
-
-#define portaddr(port, reg) ((port)->membase + (reg))
-#define portaddrl(port, reg) \
-	((unsigned long *)(unsigned long)((port)->membase + (reg)))
-
-#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
-#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
-
-#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
-#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
-
-/* Byte-order aware bit setting/clearing functions. */
-
-static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
-				   unsigned int reg)
-{
-	unsigned long flags;
-	u32 val;
-
-	local_irq_save(flags);
-	val = rd_regl(port, reg);
-	val |= (1 << idx);
-	wr_regl(port, reg, val);
-	local_irq_restore(flags);
-}
-
-static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
-				     unsigned int reg)
-{
-	unsigned long flags;
-	u32 val;
-
-	local_irq_save(flags);
-	val = rd_regl(port, reg);
-	val &= ~(1 << idx);
-	wr_regl(port, reg, val);
-	local_irq_restore(flags);
-}
-
-#endif
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 83fd516..73f951d 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -4,7 +4,7 @@
  *
  * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
  *	http://armlinux.simtec.co.uk/
-*/
+ */
 
 /* Hote on 2410 error handling
  *
@@ -19,11 +19,7 @@
  * and change the policy on BREAK
  *
  * BJD, 04-Nov-2004
-*/
-
-#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
+ */
 
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
@@ -44,33 +40,8 @@
 #include <linux/clk.h>
 #include <linux/cpufreq.h>
 #include <linux/of.h>
-
 #include <asm/irq.h>
 
-#include "samsung.h"
-
-#if	defined(CONFIG_SERIAL_SAMSUNG_DEBUG) &&	\
-	!defined(MODULE)
-
-extern void printascii(const char *);
-
-__printf(1, 2)
-static void dbg(const char *fmt, ...)
-{
-	va_list va;
-	char buff[256];
-
-	va_start(va, fmt);
-	vscnprintf(buff, sizeof(buff), fmt, va);
-	va_end(va);
-
-	printascii(buff);
-}
-
-#else
-#define dbg(fmt, ...) do { if (0) no_printk(fmt, ##__VA_ARGS__); } while (0)
-#endif
-
 /* UART name and device definitions */
 
 #define S3C24XX_SERIAL_NAME	"ttySAC"
@@ -81,14 +52,142 @@ static void dbg(const char *fmt, ...)
 #define S3C24XX_TX_DMA			2
 #define S3C24XX_RX_PIO			1
 #define S3C24XX_RX_DMA			2
-/* macros to change one thing to another */
-
-#define tx_enabled(port) ((port)->unused[0])
-#define rx_enabled(port) ((port)->unused[1])
 
 /* flag to ignore all characters coming in */
 #define RXSTAT_DUMMY_READ (0x10000000)
 
+struct s3c24xx_uart_info {
+	char			*name;
+	unsigned int		type;
+	unsigned int		fifosize;
+	unsigned long		rx_fifomask;
+	unsigned long		rx_fifoshift;
+	unsigned long		rx_fifofull;
+	unsigned long		tx_fifomask;
+	unsigned long		tx_fifoshift;
+	unsigned long		tx_fifofull;
+	unsigned int		def_clk_sel;
+	unsigned long		num_clks;
+	unsigned long		clksel_mask;
+	unsigned long		clksel_shift;
+
+	/* uart port features */
+
+	unsigned int		has_divslot:1;
+};
+
+struct s3c24xx_serial_drv_data {
+	struct s3c24xx_uart_info	*info;
+	struct s3c2410_uartcfg		*def_cfg;
+	unsigned int			fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+};
+
+struct s3c24xx_uart_dma {
+	unsigned int			rx_chan_id;
+	unsigned int			tx_chan_id;
+
+	struct dma_slave_config		rx_conf;
+	struct dma_slave_config		tx_conf;
+
+	struct dma_chan			*rx_chan;
+	struct dma_chan			*tx_chan;
+
+	dma_addr_t			rx_addr;
+	dma_addr_t			tx_addr;
+
+	dma_cookie_t			rx_cookie;
+	dma_cookie_t			tx_cookie;
+
+	char				*rx_buf;
+
+	dma_addr_t			tx_transfer_addr;
+
+	size_t				rx_size;
+	size_t				tx_size;
+
+	struct dma_async_tx_descriptor	*tx_desc;
+	struct dma_async_tx_descriptor	*rx_desc;
+
+	int				tx_bytes_requested;
+	int				rx_bytes_requested;
+};
+
+struct s3c24xx_uart_port {
+	unsigned char			rx_claimed;
+	unsigned char			tx_claimed;
+	unsigned char			rx_enabled;
+	unsigned char			tx_enabled;
+	unsigned int			pm_level;
+	unsigned long			baudclk_rate;
+	unsigned int			min_dma_size;
+
+	unsigned int			rx_irq;
+	unsigned int			tx_irq;
+
+	unsigned int			tx_in_progress;
+	unsigned int			tx_mode;
+	unsigned int			rx_mode;
+
+	struct s3c24xx_uart_info	*info;
+	struct clk			*clk;
+	struct clk			*baudclk;
+	struct uart_port		port;
+	struct s3c24xx_serial_drv_data	*drv_data;
+
+	/* reference to platform data */
+	struct s3c2410_uartcfg		*cfg;
+
+	struct s3c24xx_uart_dma		*dma;
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
+	struct notifier_block		freq_transition;
+#endif
+};
+
+/* conversion functions */
+
+#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
+
+/* register access controls */
+
+#define portaddr(port, reg) ((port)->membase + (reg))
+#define portaddrl(port, reg) \
+	((unsigned long *)(unsigned long)((port)->membase + (reg)))
+
+#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
+#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
+
+#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
+
+/* Byte-order aware bit setting/clearing functions. */
+
+static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
+				   unsigned int reg)
+{
+	unsigned long flags;
+	u32 val;
+
+	local_irq_save(flags);
+	val = rd_regl(port, reg);
+	val |= (1 << idx);
+	wr_regl(port, reg, val);
+	local_irq_restore(flags);
+}
+
+static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
+				     unsigned int reg)
+{
+	unsigned long flags;
+	u32 val;
+
+	local_irq_save(flags);
+	val = rd_regl(port, reg);
+	val &= ~(1 << idx);
+	wr_regl(port, reg, val);
+	local_irq_restore(flags);
+}
+
 static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port)
 {
 	return container_of(port, struct s3c24xx_uart_port, port);
@@ -118,6 +217,7 @@ static int s3c24xx_serial_has_interrupt_mask(struct uart_port *port)
 
 static void s3c24xx_serial_rx_enable(struct uart_port *port)
 {
+	struct s3c24xx_uart_port *ourport = to_ourport(port);
 	unsigned long flags;
 	unsigned int ucon, ufcon;
 	int count = 10000;
@@ -135,12 +235,13 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
 	ucon |= S3C2410_UCON_RXIRQMODE;
 	wr_regl(port, S3C2410_UCON, ucon);
 
-	rx_enabled(port) = 1;
+	ourport->rx_enabled = 1;
 	spin_unlock_irqrestore(&port->lock, flags);
 }
 
 static void s3c24xx_serial_rx_disable(struct uart_port *port)
 {
+	struct s3c24xx_uart_port *ourport = to_ourport(port);
 	unsigned long flags;
 	unsigned int ucon;
 
@@ -150,7 +251,7 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
 	ucon &= ~S3C2410_UCON_RXIRQMODE;
 	wr_regl(port, S3C2410_UCON, ucon);
 
-	rx_enabled(port) = 0;
+	ourport->rx_enabled = 0;
 	spin_unlock_irqrestore(&port->lock, flags);
 }
 
@@ -162,7 +263,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
 	struct dma_tx_state state;
 	int count;
 
-	if (!tx_enabled(port))
+	if (!ourport->tx_enabled)
 		return;
 
 	if (s3c24xx_serial_has_interrupt_mask(port))
@@ -182,7 +283,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
 		port->icount.tx += count;
 	}
 
-	tx_enabled(port) = 0;
+	ourport->tx_enabled = 0;
 	ourport->tx_in_progress = 0;
 
 	if (port->flags & UPF_CONS_FLOW)
@@ -340,11 +441,11 @@ static void s3c24xx_serial_start_tx(struct uart_port *port)
 	struct s3c24xx_uart_port *ourport = to_ourport(port);
 	struct circ_buf *xmit = &port->state->xmit;
 
-	if (!tx_enabled(port)) {
+	if (!ourport->tx_enabled) {
 		if (port->flags & UPF_CONS_FLOW)
 			s3c24xx_serial_rx_disable(port);
 
-		tx_enabled(port) = 1;
+		ourport->tx_enabled = 1;
 		if (!ourport->dma || !ourport->dma->tx_chan)
 			s3c24xx_serial_start_tx_pio(ourport);
 	}
@@ -389,14 +490,14 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
 	enum dma_status dma_status;
 	unsigned int received;
 
-	if (rx_enabled(port)) {
-		dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
+	if (ourport->rx_enabled) {
+		dev_dbg(port->dev, "stopping rx\n");
 		if (s3c24xx_serial_has_interrupt_mask(port))
 			s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
 					S3C64XX_UINTM);
 		else
 			disable_irq_nosync(ourport->rx_irq);
-		rx_enabled(port) = 0;
+		ourport->rx_enabled = 0;
 	}
 	if (dma && dma->rx_chan) {
 		dmaengine_pause(dma->tx_chan);
@@ -546,7 +647,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport);
 
 static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
 {
-	unsigned int utrstat, ufstat, received;
+	unsigned int utrstat, received;
 	struct s3c24xx_uart_port *ourport = dev_id;
 	struct uart_port *port = &ourport->port;
 	struct s3c24xx_uart_dma *dma = ourport->dma;
@@ -556,7 +657,7 @@ static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
 	struct dma_tx_state state;
 
 	utrstat = rd_regl(port, S3C2410_UTRSTAT);
-	ufstat = rd_regl(port, S3C2410_UFSTAT);
+	rd_regl(port, S3C2410_UFSTAT);
 
 	spin_lock_irqsave(&port->lock, flags);
 
@@ -618,9 +719,9 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
 		if (port->flags & UPF_CONS_FLOW) {
 			int txe = s3c24xx_serial_txempty_nofifo(port);
 
-			if (rx_enabled(port)) {
+			if (ourport->rx_enabled) {
 				if (!txe) {
-					rx_enabled(port) = 0;
+					ourport->rx_enabled = 0;
 					continue;
 				}
 			} else {
@@ -628,7 +729,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
 					ufcon = rd_regl(port, S3C2410_UFCON);
 					ufcon |= S3C2410_UFCON_RESETRX;
 					wr_regl(port, S3C2410_UFCON, ufcon);
-					rx_enabled(port) = 1;
+					ourport->rx_enabled = 1;
 					return;
 				}
 				continue;
@@ -641,12 +742,13 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
 		port->icount.rx++;
 
 		if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
-			dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n",
-			    ch, uerstat);
+			dev_dbg(port->dev,
+				"rxerr: port ch=0x%02x, rxs=0x%08x\n",
+				ch, uerstat);
 
 			/* check for break */
 			if (uerstat & S3C2410_UERSTAT_BREAK) {
-				dbg("break!\n");
+				dev_dbg(port->dev, "break!\n");
 				port->icount.brk++;
 				if (uart_handle_break(port))
 					continue; /* Ignore character */
@@ -732,7 +834,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
 
 	/* if there isn't anything more to transmit, or the uart is now
 	 * stopped, disable the uart and exit
-	*/
+	 */
 
 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
 		s3c24xx_serial_stop_tx(port);
@@ -978,7 +1080,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
 	if (ourport->tx_claimed) {
 		if (!s3c24xx_serial_has_interrupt_mask(port))
 			free_irq(ourport->tx_irq, ourport);
-		tx_enabled(port) = 0;
+		ourport->tx_enabled = 0;
 		ourport->tx_claimed = 0;
 		ourport->tx_mode = 0;
 	}
@@ -987,7 +1089,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
 		if (!s3c24xx_serial_has_interrupt_mask(port))
 			free_irq(ourport->rx_irq, ourport);
 		ourport->rx_claimed = 0;
-		rx_enabled(port) = 0;
+		ourport->rx_enabled = 0;
 	}
 
 	/* Clear pending interrupts and mask all interrupts */
@@ -1009,10 +1111,7 @@ static int s3c24xx_serial_startup(struct uart_port *port)
 	struct s3c24xx_uart_port *ourport = to_ourport(port);
 	int ret;
 
-	dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
-	    port, (unsigned long long)port->mapbase, port->membase);
-
-	rx_enabled(port) = 1;
+	ourport->rx_enabled = 1;
 
 	ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_chars, 0,
 			  s3c24xx_serial_portname(port), ourport);
@@ -1024,9 +1123,9 @@ static int s3c24xx_serial_startup(struct uart_port *port)
 
 	ourport->rx_claimed = 1;
 
-	dbg("requesting tx irq...\n");
+	dev_dbg(port->dev, "requesting tx irq...\n");
 
-	tx_enabled(port) = 1;
+	ourport->tx_enabled = 1;
 
 	ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_chars, 0,
 			  s3c24xx_serial_portname(port), ourport);
@@ -1038,10 +1137,9 @@ static int s3c24xx_serial_startup(struct uart_port *port)
 
 	ourport->tx_claimed = 1;
 
-	dbg("s3c24xx_serial_startup ok\n");
-
 	/* the port reset code should have done the correct
-	 * register setup for the port controls */
+	 * register setup for the port controls
+	 */
 
 	return ret;
 
@@ -1057,9 +1155,6 @@ static int s3c64xx_serial_startup(struct uart_port *port)
 	unsigned int ufcon;
 	int ret;
 
-	dbg("s3c64xx_serial_startup: port=%p (%08llx,%p)\n",
-	    port, (unsigned long long)port->mapbase, port->membase);
-
 	wr_regl(port, S3C64XX_UINTM, 0xf);
 	if (ourport->dma) {
 		ret = s3c24xx_serial_request_dma(ourport);
@@ -1077,9 +1172,9 @@ static int s3c64xx_serial_startup(struct uart_port *port)
 	}
 
 	/* For compatibility with s3c24xx Soc's */
-	rx_enabled(port) = 1;
+	ourport->rx_enabled = 1;
 	ourport->rx_claimed = 1;
-	tx_enabled(port) = 0;
+	ourport->tx_enabled = 0;
 	ourport->tx_claimed = 1;
 
 	spin_lock_irqsave(&port->lock, flags);
@@ -1097,7 +1192,6 @@ static int s3c64xx_serial_startup(struct uart_port *port)
 	/* Enable Rx Interrupt */
 	s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
 
-	dbg("s3c64xx_serial_startup ok\n");
 	return ret;
 }
 
@@ -1145,7 +1239,7 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
  * baud clocks (and the resultant actual baud rates) and then tries to
  * pick the closest one and select that.
  *
-*/
+ */
 
 #define MAX_CLK_NAME_LENGTH 15
 
@@ -1315,29 +1409,30 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
 
 		if (cfg->has_fracval) {
 			udivslot = (div & 15);
-			dbg("fracval = %04x\n", udivslot);
+			dev_dbg(port->dev, "fracval = %04x\n", udivslot);
 		} else {
 			udivslot = udivslot_table[div & 15];
-			dbg("udivslot = %04x (div %d)\n", udivslot, div & 15);
+			dev_dbg(port->dev, "udivslot = %04x (div %d)\n",
+				udivslot, div & 15);
 		}
 	}
 
 	switch (termios->c_cflag & CSIZE) {
 	case CS5:
-		dbg("config: 5bits/char\n");
+		dev_dbg(port->dev, "config: 5bits/char\n");
 		ulcon = S3C2410_LCON_CS5;
 		break;
 	case CS6:
-		dbg("config: 6bits/char\n");
+		dev_dbg(port->dev, "config: 6bits/char\n");
 		ulcon = S3C2410_LCON_CS6;
 		break;
 	case CS7:
-		dbg("config: 7bits/char\n");
+		dev_dbg(port->dev, "config: 7bits/char\n");
 		ulcon = S3C2410_LCON_CS7;
 		break;
 	case CS8:
 	default:
-		dbg("config: 8bits/char\n");
+		dev_dbg(port->dev, "config: 8bits/char\n");
 		ulcon = S3C2410_LCON_CS8;
 		break;
 	}
@@ -1359,8 +1454,9 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
 
 	spin_lock_irqsave(&port->lock, flags);
 
-	dbg("setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
-	    ulcon, quot, udivslot);
+	dev_dbg(port->dev,
+		"setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
+		ulcon, quot, udivslot);
 
 	wr_regl(port, S3C2410_ULCON, ulcon);
 	wr_regl(port, S3C2410_UBRDIV, quot);
@@ -1381,10 +1477,11 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
 	if (ourport->info->has_divslot)
 		wr_regl(port, S3C2443_DIVSLOT, udivslot);
 
-	dbg("uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
-	    rd_regl(port, S3C2410_ULCON),
-	    rd_regl(port, S3C2410_UCON),
-	    rd_regl(port, S3C2410_UFCON));
+	dev_dbg(port->dev,
+		"uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
+		rd_regl(port, S3C2410_ULCON),
+		rd_regl(port, S3C2410_UCON),
+		rd_regl(port, S3C2410_UFCON));
 
 	/*
 	 * Update the per-port timeout.
@@ -1442,6 +1539,7 @@ static void s3c24xx_serial_release_port(struct uart_port *port)
 static int s3c24xx_serial_request_port(struct uart_port *port)
 {
 	const char *name = s3c24xx_serial_portname(port);
+
 	return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
 }
 
@@ -1583,7 +1681,7 @@ s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
 /* s3c24xx_serial_resetport
  *
  * reset the fifos and other the settings.
-*/
+ */
 
 static void s3c24xx_serial_resetport(struct uart_port *port,
 				   struct s3c2410_uartcfg *cfg)
@@ -1637,7 +1735,8 @@ static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
 
 	if (val == CPUFREQ_PRECHANGE) {
 		/* we should really shut the port down whilst the
-		 * frequency change is in progress. */
+		 * frequency change is in progress.
+		 */
 
 	} else if (val == CPUFREQ_POSTCHANGE) {
 		struct ktermios *termios;
@@ -1743,8 +1842,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 	struct resource *res;
 	int ret;
 
-	dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
-
 	if (platdev == NULL)
 		return -ENODEV;
 
@@ -1761,7 +1858,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 	port->uartclk = 1;
 
 	if (cfg->uart_flags & UPF_CONS_FLOW) {
-		dbg("s3c24xx_serial_init_port: enabling flow control\n");
+		dev_dbg(port->dev, "enabling flow control\n");
 		port->flags |= UPF_CONS_FLOW;
 	}
 
@@ -1773,7 +1870,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 		return -EINVAL;
 	}
 
-	dbg("resource %pR)\n", res);
+	dev_dbg(port->dev, "resource %pR)\n", res);
 
 	port->membase = devm_ioremap(port->dev, res->start, resource_size(res));
 	if (!port->membase) {
@@ -1835,9 +1932,9 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 		wr_regl(port, S3C64XX_UINTSP, 0xf);
 	}
 
-	dbg("port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n",
-	    &port->mapbase, port->membase, port->irq,
-	    ourport->rx_irq, ourport->tx_irq, port->uartclk);
+	dev_dbg(port->dev, "port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n",
+		&port->mapbase, port->membase, port->irq,
+		ourport->rx_irq, ourport->tx_irq, port->uartclk);
 
 	/* reset the fifos (and setup the uart) */
 	s3c24xx_serial_resetport(port, cfg);
@@ -1851,7 +1948,10 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 
 /* Device driver serial port probe */
 
+#ifdef CONFIG_OF
 static const struct of_device_id s3c24xx_uart_dt_match[];
+#endif
+
 static int probe_index;
 
 static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
@@ -1860,6 +1960,7 @@ static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
 #ifdef CONFIG_OF
 	if (pdev->dev.of_node) {
 		const struct of_device_id *match;
+
 		match = of_match_node(s3c24xx_uart_dt_match, pdev->dev.of_node);
 		return (struct s3c24xx_serial_drv_data *)match->data;
 	}
@@ -1881,8 +1982,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
 			index = ret;
 	}
 
-	dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
-
 	if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
 		dev_err(&pdev->dev, "serial%d out of range\n", index);
 		return -EINVAL;
@@ -1909,6 +2008,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
 		ourport->port.fifosize = ourport->drv_data->fifosize[index];
 	else if (ourport->info->fifosize)
 		ourport->port.fifosize = ourport->info->fifosize;
+	ourport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SAMSUNG_CONSOLE);
 
 	/*
 	 * DMA transfers must be aligned at least to cache line size,
@@ -1917,7 +2017,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
 	ourport->min_dma_size = max_t(int, ourport->port.fifosize,
 				    dma_get_cache_alignment());
 
-	dbg("%s: initialising port %p...\n", __func__, ourport);
+	dev_dbg(&pdev->dev, "%s: initialising port %p...\n", __func__, ourport);
 
 	ret = s3c24xx_serial_init_port(ourport, pdev);
 	if (ret < 0)
@@ -1931,7 +2031,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
 		}
 	}
 
-	dbg("%s: adding port\n", __func__);
+	dev_dbg(&pdev->dev, "%s: adding port\n", __func__);
 	uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
 	platform_set_drvdata(pdev, &ourport->port);
 
@@ -2008,9 +2108,10 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
 		/* restore IRQ mask */
 		if (s3c24xx_serial_has_interrupt_mask(port)) {
 			unsigned int uintm = 0xf;
-			if (tx_enabled(port))
+
+			if (ourport->tx_enabled)
 				uintm &= ~S3C64XX_UINTM_TXD_MSK;
-			if (rx_enabled(port))
+			if (ourport->rx_enabled)
 				uintm &= ~S3C64XX_UINTM_RXD_MSK;
 			clk_prepare_enable(ourport->clk);
 			if (!IS_ERR(ourport->baudclk))
@@ -2143,10 +2244,6 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
 	ucon   = rd_regl(port, S3C2410_UCON);
 	ubrdiv = rd_regl(port, S3C2410_UBRDIV);
 
-	dbg("s3c24xx_serial_get_options: port=%p\n"
-	    "registers: ulcon=%08x, ucon=%08x, ubdriv=%08x\n",
-	    port, ulcon, ucon, ubrdiv);
-
 	if (s3c24xx_port_configured(ucon)) {
 		switch (ulcon & S3C2410_LCON_CSMASK) {
 		case S3C2410_LCON_CS5:
@@ -2190,7 +2287,7 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
 			rate = 1;
 
 		*baud = rate / (16 * (ubrdiv + 1));
-		dbg("calculated baud %d\n", *baud);
+		dev_dbg(port->dev, "calculated baud %d\n", *baud);
 	}
 
 }
@@ -2204,9 +2301,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
 	int parity = 'n';
 	int flow = 'n';
 
-	dbg("s3c24xx_serial_console_setup: co=%p (%d), %s\n",
-	    co, co->index, options);
-
 	/* is this a valid port */
 
 	if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
@@ -2221,8 +2315,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
 
 	cons_uart = port;
 
-	dbg("s3c24xx_serial_console_setup: port=%p (%d)\n", port, co->index);
-
 	/*
 	 * Check whether an invalid uart number has been specified, and
 	 * if so, search for the first available port that does have
@@ -2233,7 +2325,7 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
 	else
 		s3c24xx_serial_get_options(port, &baud, &parity, &bits);
 
-	dbg("s3c24xx_serial_console_setup: baud %d\n", baud);
+	dev_dbg(port->dev, "baud %d\n", baud);
 
 	return uart_set_options(port, co, baud, parity, bits, flow);
 }
@@ -2523,7 +2615,8 @@ static void samsung_early_putc(struct uart_port *port, int c)
 	writeb(c, port->membase + S3C2410_UTXH);
 }
 
-static void samsung_early_write(struct console *con, const char *s, unsigned n)
+static void samsung_early_write(struct console *con, const char *s,
+				unsigned int n)
 {
 	struct earlycon_device *dev = con->data;
 
@@ -2572,7 +2665,7 @@ OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
 OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
 			s3c2440_early_console_setup);
 
-/* S5PV210, EXYNOS */
+/* S5PV210, Exynos */
 static struct samsung_early_console_data s5pv210_early_console_data = {
 	.txfull_mask = S5PV210_UFSTAT_TXFULL,
 };
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 7c99340..bd5e7e9 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -15,10 +15,6 @@
  *	"BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
  */
 
-#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/compiler.h>
 #include <linux/console.h>
 #include <linux/delay.h>
@@ -813,6 +809,7 @@ static void __init sbd_probe_duarts(void)
 			uport->ops	= &sbd_ops;
 			uport->line	= line;
 			uport->mapbase	= SBD_CHANREGS(line);
+			uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SB1250_DUART_CONSOLE);
 		}
 	}
 }
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index d2b77aa..10cc16a 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -7,10 +7,6 @@
  *  Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de)
  */
 
-#if defined(CONFIG_SERIAL_SCCNXP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -1000,6 +996,7 @@ static int sccnxp_probe(struct platform_device *pdev)
 		s->port[i].regshift	= s->pdata.reg_shift;
 		s->port[i].uartclk	= uartclk;
 		s->port[i].ops		= &sccnxp_ops;
+		s->port[i].has_sysrq = IS_ENABLED(CONFIG_SERIAL_SCCNXP_CONSOLE);
 		uart_add_one_port(&s->uart, &s->port[i]);
 		/* Set direction to input */
 		if (s->chip->flags & SCCNXP_HAVE_IO)
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index b6ace62..33034b8 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -141,6 +141,7 @@ struct tegra_uart_port {
 	int					configured_rate;
 	bool					use_rx_pio;
 	bool					use_tx_pio;
+	bool					rx_dma_active;
 };
 
 static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
@@ -533,11 +534,12 @@ static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
 	struct circ_buf *xmit = &tup->uport.state->xmit;
 	dma_addr_t tx_phys_addr;
 
-	dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
-				UART_XMIT_SIZE, DMA_TO_DEVICE);
-
 	tup->tx_bytes = count & ~(0xF);
 	tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
+
+	dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
+				   tup->tx_bytes, DMA_TO_DEVICE);
+
 	tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
 				tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
 				DMA_PREP_INTERRUPT);
@@ -679,7 +681,7 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
 		return;
 
 	dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
-				TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+				count, DMA_FROM_DEVICE);
 	copied = tty_insert_flip_string(tty,
 			((unsigned char *)(tup->rx_dma_buf_virt)), count);
 	if (copied != count) {
@@ -687,7 +689,7 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
 		dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
 	}
 	dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
-				TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+				   count, DMA_TO_DEVICE);
 }
 
 static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
@@ -731,6 +733,7 @@ static void tegra_uart_rx_dma_complete(void *args)
 	if (tup->rts_active)
 		set_rts(tup, false);
 
+	tup->rx_dma_active = false;
 	tegra_uart_rx_buffer_push(tup, 0);
 	tegra_uart_start_rx_dma(tup);
 
@@ -742,18 +745,27 @@ static void tegra_uart_rx_dma_complete(void *args)
 	spin_unlock_irqrestore(&u->lock, flags);
 }
 
-static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
 {
 	struct dma_tx_state state;
 
+	if (!tup->rx_dma_active)
+		return;
+
+	dmaengine_terminate_all(tup->rx_dma_chan);
+	dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+
+	tegra_uart_rx_buffer_push(tup, state.residue);
+	tup->rx_dma_active = false;
+}
+
+static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+{
 	/* Deactivate flow control to stop sender */
 	if (tup->rts_active)
 		set_rts(tup, false);
 
-	dmaengine_terminate_all(tup->rx_dma_chan);
-	dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
-	tegra_uart_rx_buffer_push(tup, state.residue);
-	tegra_uart_start_rx_dma(tup);
+	tegra_uart_terminate_rx_dma(tup);
 
 	if (tup->rts_active)
 		set_rts(tup, true);
@@ -763,6 +775,9 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
 {
 	unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
 
+	if (tup->rx_dma_active)
+		return 0;
+
 	tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
 				tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
 				DMA_PREP_INTERRUPT);
@@ -771,10 +786,9 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
 		return -EIO;
 	}
 
+	tup->rx_dma_active = true;
 	tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
 	tup->rx_dma_desc->callback_param = tup;
-	dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
-				count, DMA_TO_DEVICE);
 	tup->rx_bytes_requested = count;
 	tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
 	dma_async_issue_pending(tup->rx_dma_chan);
@@ -820,6 +834,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
 	struct uart_port *u = &tup->uport;
 	unsigned long iir;
 	unsigned long ier;
+	bool is_rx_start = false;
 	bool is_rx_int = false;
 	unsigned long flags;
 
@@ -832,10 +847,12 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
 				if (tup->rx_in_progress) {
 					ier = tup->ier_shadow;
 					ier |= (UART_IER_RLSI | UART_IER_RTOIE |
-						TEGRA_UART_IER_EORD);
+						TEGRA_UART_IER_EORD | UART_IER_RDI);
 					tup->ier_shadow = ier;
 					tegra_uart_write(tup, ier, UART_IER);
 				}
+			} else if (is_rx_start) {
+				tegra_uart_start_rx_dma(tup);
 			}
 			spin_unlock_irqrestore(&u->lock, flags);
 			return IRQ_HANDLED;
@@ -854,17 +871,23 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
 
 		case 4: /* End of data */
 		case 6: /* Rx timeout */
-		case 2: /* Receive */
-			if (!tup->use_rx_pio && !is_rx_int) {
-				is_rx_int = true;
+			if (!tup->use_rx_pio) {
+				is_rx_int = tup->rx_in_progress;
 				/* Disable Rx interrupts */
 				ier = tup->ier_shadow;
-				ier |= UART_IER_RDI;
-				tegra_uart_write(tup, ier, UART_IER);
 				ier &= ~(UART_IER_RDI | UART_IER_RLSI |
 					UART_IER_RTOIE | TEGRA_UART_IER_EORD);
 				tup->ier_shadow = ier;
 				tegra_uart_write(tup, ier, UART_IER);
+				break;
+			}
+			/* Fall through */
+		case 2: /* Receive */
+			if (!tup->use_rx_pio) {
+				is_rx_start = tup->rx_in_progress;
+				tup->ier_shadow  &= ~UART_IER_RDI;
+				tegra_uart_write(tup, tup->ier_shadow,
+						 UART_IER);
 			} else {
 				do_handle_rx_pio(tup);
 			}
@@ -886,7 +909,6 @@ static void tegra_uart_stop_rx(struct uart_port *u)
 {
 	struct tegra_uart_port *tup = to_tegra_uport(u);
 	struct tty_port *port = &tup->uport.state->port;
-	struct dma_tx_state state;
 	unsigned long ier;
 
 	if (tup->rts_active)
@@ -903,13 +925,11 @@ static void tegra_uart_stop_rx(struct uart_port *u)
 	tup->ier_shadow = ier;
 	tegra_uart_write(tup, ier, UART_IER);
 	tup->rx_in_progress = 0;
-	if (tup->rx_dma_chan && !tup->use_rx_pio) {
-		dmaengine_terminate_all(tup->rx_dma_chan);
-		dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
-		tegra_uart_rx_buffer_push(tup, state.residue);
-	} else {
+
+	if (!tup->use_rx_pio)
+		tegra_uart_terminate_rx_dma(tup);
+	else
 		tegra_uart_handle_rx_pio(tup, port);
-	}
 }
 
 static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
@@ -1052,12 +1072,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
 		tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
 		tup->fcr_shadow |= UART_FCR_DMA_SELECT;
 		tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
-
-		ret = tegra_uart_start_rx_dma(tup);
-		if (ret < 0) {
-			dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
-			return ret;
-		}
 	} else {
 		tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
 	}
@@ -1067,10 +1081,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
 	 * Enable IE_RXS for the receive status interrupts like line errros.
 	 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
 	 *
-	 * If using DMA mode, enable EORD instead of receive interrupt which
-	 * will interrupt after the UART is done with the receive instead of
-	 * the interrupt when the FIFO "threshold" is reached.
-	 *
 	 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
 	 * the DATA is sitting in the FIFO and couldn't be transferred to the
 	 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
@@ -1081,11 +1091,14 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
 	 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
 	 * then the EORD.
 	 */
+	tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+
+	/*
+	 * If using DMA mode, enable EORD interrupt to notify about RX
+	 * completion.
+	 */
 	if (!tup->use_rx_pio)
-		tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE |
-			TEGRA_UART_IER_EORD;
-	else
-		tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+		tup->ier_shadow |= TEGRA_UART_IER_EORD;
 
 	tegra_uart_write(tup, tup->ier_shadow, UART_IER);
 	return 0;
@@ -1140,6 +1153,9 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
 			dma_release_channel(dma_chan);
 			return -ENOMEM;
 		}
+		dma_sync_single_for_device(tup->uport.dev, dma_phys,
+					   TEGRA_UART_RX_DMA_BUFFER_SIZE,
+					   DMA_TO_DEVICE);
 		dma_sconfig.src_addr = tup->uport.mapbase;
 		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 		dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7c27827..76e506e 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2603,6 +2603,7 @@ struct tty_driver *uart_console_device(struct console *co, int *index)
 	*index = co->index;
 	return p->tty_driver;
 }
+EXPORT_SYMBOL_GPL(uart_console_device);
 
 static ssize_t uart_get_attr_uartclk(struct device *dev,
 	struct device_attribute *attr, char *buf)
@@ -3081,6 +3082,89 @@ void uart_insert_char(struct uart_port *port, unsigned int status,
 }
 EXPORT_SYMBOL_GPL(uart_insert_char);
 
+int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+	if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
+		return 0;
+
+	if (!port->has_sysrq || !port->sysrq)
+		return 0;
+
+	if (ch && time_before(jiffies, port->sysrq)) {
+		handle_sysrq(ch);
+		port->sysrq = 0;
+		return 1;
+	}
+	port->sysrq = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uart_handle_sysrq_char);
+
+int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+	if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
+		return 0;
+
+	if (!port->has_sysrq || !port->sysrq)
+		return 0;
+
+	if (ch && time_before(jiffies, port->sysrq)) {
+		port->sysrq_ch = ch;
+		port->sysrq = 0;
+		return 1;
+	}
+	port->sysrq = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char);
+
+void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+	int sysrq_ch;
+
+	if (!port->has_sysrq) {
+		spin_unlock_irqrestore(&port->lock, irqflags);
+		return;
+	}
+
+	sysrq_ch = port->sysrq_ch;
+	port->sysrq_ch = 0;
+
+	spin_unlock_irqrestore(&port->lock, irqflags);
+
+	if (sysrq_ch)
+		handle_sysrq(sysrq_ch);
+}
+EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq);
+
+/*
+ * We do the SysRQ and SAK checking like this...
+ */
+int uart_handle_break(struct uart_port *port)
+{
+	struct uart_state *state = port->state;
+
+	if (port->handle_break)
+		port->handle_break(port);
+
+	if (port->has_sysrq) {
+		if (port->cons && port->cons->index == port->line) {
+			if (!port->sysrq) {
+				port->sysrq = jiffies + HZ*5;
+				return 1;
+			}
+			port->sysrq = 0;
+		}
+	}
+
+	if (port->flags & UPF_SAK)
+		do_SAK(state->port.tty);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uart_handle_break);
+
 EXPORT_SYMBOL(uart_write_wakeup);
 EXPORT_SYMBOL(uart_register_driver);
 EXPORT_SYMBOL(uart_unregister_driver);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index d22ccb3..b4d89e3 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -12,10 +12,6 @@
  *  Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller
  */
 
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
@@ -1095,6 +1091,7 @@ static int serial_txx9_probe(struct platform_device *dev)
 		port.flags	= p->flags;
 		port.mapbase	= p->mapbase;
 		port.dev	= &dev->dev;
+		port.has_sysrq	= IS_ENABLED(CONFIG_SERIAL_TXX9_CONSOLE);
 		ret = serial_txx9_register_port(&port);
 		if (ret < 0) {
 			dev_err(&dev->dev, "unable to register port at index %d "
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 87ca629..c073aa7 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -15,10 +15,6 @@
  *   Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
  *   Removed SH7300 support (Jul 2007).
  */
-#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #undef DEBUG
 
 #include <linux/clk.h>
@@ -2887,6 +2883,7 @@ static int sci_init_single(struct platform_device *dev,
 	port->ops	= &sci_uart_ops;
 	port->iotype	= UPIO_MEM;
 	port->line	= index;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SH_SCI_CONSOLE);
 
 	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
 	if (res == NULL)
@@ -3015,12 +3012,9 @@ static void serial_console_write(struct console *co, const char *s,
 	unsigned long flags;
 	int locked = 1;
 
-#if defined(SUPPORT_SYSRQ)
 	if (port->sysrq)
 		locked = 0;
-	else
-#endif
-	if (oops_in_progress)
+	else if (oops_in_progress)
 		locked = spin_trylock_irqsave(&port->lock, flags);
 	else
 		spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index f60a59d..3d3c706 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -3,10 +3,6 @@
  * Copyright (C) 2012-2015 Spreadtrum Communications Inc.
  */
 
-#if defined(CONFIG_SERIAL_SPRD_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/clk.h>
 #include <linux/console.h>
 #include <linux/delay.h>
@@ -1230,6 +1226,7 @@ static int sprd_probe(struct platform_device *pdev)
 	up->fifosize = SPRD_FIFO_SIZE;
 	up->ops = &serial_sprd_ops;
 	up->flags = UPF_BOOT_AUTOCONF;
+	up->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SPRD_CONSOLE);
 
 	ret = sprd_clk_init(up);
 	if (ret)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 7971997c..e704851 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -5,10 +5,6 @@
  * Copyright (C) 2003-2013 STMicroelectronics (R&D) Limited
  */
 
-#if defined(CONFIG_SERIAL_ST_ASC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/serial.h>
 #include <linux/console.h>
@@ -508,7 +504,6 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
 			    struct ktermios *old)
 {
 	struct asc_port *ascport = to_asc_port(port);
-	struct device_node *np = port->dev->of_node;
 	struct gpio_desc *gpiod;
 	unsigned int baud;
 	u32 ctrl_val;
@@ -570,13 +565,12 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
 			pinctrl_select_state(ascport->pinctrl,
 					     ascport->states[NO_HW_FLOWCTRL]);
 
-			gpiod = devm_fwnode_get_gpiod_from_child(port->dev,
-								 "rts",
-								 &np->fwnode,
-								 GPIOD_OUT_LOW,
-								 np->name);
-			if (!IS_ERR(gpiod))
+			gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW);
+			if (!IS_ERR(gpiod)) {
+				gpiod_set_consumer_name(gpiod,
+						port->dev->of_node->name);
 				ascport->rts = gpiod;
+			}
 		}
 	}
 
@@ -730,6 +724,7 @@ static int asc_init_port(struct asc_port *ascport,
 	port->fifosize	= ASC_FIFO_SIZE;
 	port->dev	= &pdev->dev;
 	port->irq	= platform_get_irq(pdev, 0);
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ST_ASC_CONSOLE);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	port->membase = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 2f72514..5e93e8d 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -8,10 +8,6 @@
  * Inspired by st-asc.c from STMicroelectronics (c)
  */
 
-#if defined(CONFIG_SERIAL_STM32_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/clk.h>
 #include <linux/console.h>
 #include <linux/delay.h>
@@ -926,6 +922,7 @@ static int stm32_init_port(struct stm32_port *stm32port,
 	port->ops	= &stm32_uart_ops;
 	port->dev	= &pdev->dev;
 	port->fifosize	= stm32port->info->cfg.fifosize;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
 
 	ret = platform_get_irq(pdev, 0);
 	if (ret <= 0)
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index f8503f8..eafada8 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -25,10 +25,6 @@
 #include <asm/irq.h>
 #include <asm/setup.h>
 
-#if defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 #include <linux/sunserialcore.h>
 
@@ -552,6 +548,7 @@ static int hv_probe(struct platform_device *op)
 
 	sunhv_port = port;
 
+	port->has_sysrq = 1;
 	port->line = 0;
 	port->ops = &sunhv_pops;
 	port->type = PORT_SUNHV;
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 72131b5..1eb703c 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -40,10 +40,6 @@
 #include <asm/prom.h>
 #include <asm/setup.h>
 
-#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 #include <linux/sunserialcore.h>
 
@@ -985,6 +981,7 @@ static int sunsab_init_one(struct uart_sunsab_port *up,
 
 	up->port.fifosize = SAB82532_XMIT_FIFO_SIZE;
 	up->port.iotype = UPIO_MEM;
+	up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNSAB_CONSOLE);
 
 	writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc);
 
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 4db6aaa..8ce9a7a 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -44,10 +44,6 @@
 #include <asm/prom.h>
 #include <asm/setup.h>
 
-#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 #include <linux/sunserialcore.h>
 
@@ -1475,6 +1471,7 @@ static int su_probe(struct platform_device *op)
 
 	up->port.type = PORT_UNKNOWN;
 	up->port.uartclk = (SU_BASE_BAUD * 16);
+	up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNSU_CONSOLE);
 
 	err = 0;
 	if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) {
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index bc7af8b..103ab8c 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -40,10 +40,6 @@
 #include <asm/prom.h>
 #include <asm/setup.h>
 
-#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/serial_core.h>
 #include <linux/sunserialcore.h>
 
@@ -1444,6 +1440,7 @@ static int zs_probe(struct platform_device *op)
 	up[0].port.line = (inst * 2) + 0;
 	up[0].port.dev = &op->dev;
 	up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
+	up[0].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNZILOG_CONSOLE);
 	if (keyboard_mouse)
 		up[0].flags |= SUNZILOG_FLAG_CONS_KEYB;
 	sunzilog_init_hw(&up[0]);
@@ -1461,6 +1458,7 @@ static int zs_probe(struct platform_device *op)
 	up[1].port.line = (inst * 2) + 1;
 	up[1].port.dev = &op->dev;
 	up[1].flags |= 0;
+	up[1].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNZILOG_CONSOLE);
 	if (keyboard_mouse)
 		up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE;
 	sunzilog_init_hw(&up[1]);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index a0555ae..3c8c662 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -32,7 +32,11 @@
 #include <soc/fsl/qe/ucc_slow.h>
 
 #include <linux/firmware.h>
-#include <asm/reg.h>
+#include <soc/fsl/cpm.h>
+
+#ifdef CONFIG_PPC32
+#include <asm/reg.h> /* mfspr, SPRN_SVR */
+#endif
 
 /*
  * The GUMR flag for Soft UART.  This would normally be defined in qe.h,
@@ -257,11 +261,11 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
 	struct qe_bd *bdp = qe_port->tx_bd_base;
 
 	while (1) {
-		if (in_be16(&bdp->status) & BD_SC_READY)
+		if (qe_ioread16be(&bdp->status) & BD_SC_READY)
 			/* This BD is not done, so return "not done" */
 			return 0;
 
-		if (in_be16(&bdp->status) & BD_SC_WRAP)
+		if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
 			/*
 			 * This BD is done and it's the last one, so return
 			 * "done"
@@ -307,7 +311,7 @@ static void qe_uart_stop_tx(struct uart_port *port)
 	struct uart_qe_port *qe_port =
 		container_of(port, struct uart_qe_port, port);
 
-	clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
+	qe_clrbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
 }
 
 /*
@@ -332,20 +336,18 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
 	struct uart_port *port = &qe_port->port;
 	struct circ_buf *xmit = &port->state->xmit;
 
-	bdp = qe_port->rx_cur;
-
 	/* Handle xon/xoff */
 	if (port->x_char) {
 		/* Pick next descriptor and fill from buffer */
 		bdp = qe_port->tx_cur;
 
-		p = qe2cpu_addr(bdp->buf, qe_port);
+		p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
 
 		*p++ = port->x_char;
-		out_be16(&bdp->length, 1);
-		setbits16(&bdp->status, BD_SC_READY);
+		qe_iowrite16be(1, &bdp->length);
+		qe_setbits_be16(&bdp->status, BD_SC_READY);
 		/* Get next BD. */
-		if (in_be16(&bdp->status) & BD_SC_WRAP)
+		if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
 			bdp = qe_port->tx_bd_base;
 		else
 			bdp++;
@@ -364,10 +366,10 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
 	/* Pick next descriptor and fill from buffer */
 	bdp = qe_port->tx_cur;
 
-	while (!(in_be16(&bdp->status) & BD_SC_READY) &&
+	while (!(qe_ioread16be(&bdp->status) & BD_SC_READY) &&
 	       (xmit->tail != xmit->head)) {
 		count = 0;
-		p = qe2cpu_addr(bdp->buf, qe_port);
+		p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
 		while (count < qe_port->tx_fifosize) {
 			*p++ = xmit->buf[xmit->tail];
 			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -377,11 +379,11 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
 				break;
 		}
 
-		out_be16(&bdp->length, count);
-		setbits16(&bdp->status, BD_SC_READY);
+		qe_iowrite16be(count, &bdp->length);
+		qe_setbits_be16(&bdp->status, BD_SC_READY);
 
 		/* Get next BD. */
-		if (in_be16(&bdp->status) & BD_SC_WRAP)
+		if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
 			bdp = qe_port->tx_bd_base;
 		else
 			bdp++;
@@ -414,12 +416,12 @@ static void qe_uart_start_tx(struct uart_port *port)
 		container_of(port, struct uart_qe_port, port);
 
 	/* If we currently are transmitting, then just return */
-	if (in_be16(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
+	if (qe_ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
 		return;
 
 	/* Otherwise, pump the port and start transmission */
 	if (qe_uart_tx_pump(qe_port))
-		setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
+		qe_setbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
 }
 
 /*
@@ -430,7 +432,7 @@ static void qe_uart_stop_rx(struct uart_port *port)
 	struct uart_qe_port *qe_port =
 		container_of(port, struct uart_qe_port, port);
 
-	clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
+	qe_clrbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
 }
 
 /* Start or stop sending  break signal
@@ -469,14 +471,14 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
 	 */
 	bdp = qe_port->rx_cur;
 	while (1) {
-		status = in_be16(&bdp->status);
+		status = qe_ioread16be(&bdp->status);
 
 		/* If this one is empty, then we assume we've read them all */
 		if (status & BD_SC_EMPTY)
 			break;
 
 		/* get number of characters, and check space in RX buffer */
-		i = in_be16(&bdp->length);
+		i = qe_ioread16be(&bdp->length);
 
 		/* If we don't have enough room in RX buffer for the entire BD,
 		 * then we try later, which will be the next RX interrupt.
@@ -487,7 +489,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
 		}
 
 		/* get pointer */
-		cp = qe2cpu_addr(bdp->buf, qe_port);
+		cp = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
 
 		/* loop through the buffer */
 		while (i-- > 0) {
@@ -507,9 +509,10 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
 		}
 
 		/* This BD is ready to be used again. Clear status. get next */
-		clrsetbits_be16(&bdp->status, BD_SC_BR | BD_SC_FR | BD_SC_PR |
-			BD_SC_OV | BD_SC_ID, BD_SC_EMPTY);
-		if (in_be16(&bdp->status) & BD_SC_WRAP)
+		qe_clrsetbits_be16(&bdp->status,
+				   BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID,
+				   BD_SC_EMPTY);
+		if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
 			bdp = qe_port->rx_bd_base;
 		else
 			bdp++;
@@ -551,9 +554,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
 	/* Overrun does not affect the current character ! */
 	if (status & BD_SC_OV)
 		tty_insert_flip_char(tport, 0, TTY_OVERRUN);
-#ifdef SUPPORT_SYSRQ
 	port->sysrq = 0;
-#endif
 	goto error_return;
 }
 
@@ -568,8 +569,8 @@ static irqreturn_t qe_uart_int(int irq, void *data)
 	u16 events;
 
 	/* Clear the interrupts */
-	events = in_be16(&uccp->ucce);
-	out_be16(&uccp->ucce, events);
+	events = qe_ioread16be(&uccp->ucce);
+	qe_iowrite16be(events, &uccp->ucce);
 
 	if (events & UCC_UART_UCCE_BRKE)
 		uart_handle_break(&qe_port->port);
@@ -600,17 +601,17 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
 	bdp = qe_port->rx_bd_base;
 	qe_port->rx_cur = qe_port->rx_bd_base;
 	for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) {
-		out_be16(&bdp->status, BD_SC_EMPTY | BD_SC_INTRPT);
-		out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
-		out_be16(&bdp->length, 0);
+		qe_iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
+		qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+		qe_iowrite16be(0, &bdp->length);
 		bd_virt += qe_port->rx_fifosize;
 		bdp++;
 	}
 
 	/* */
-	out_be16(&bdp->status, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT);
-	out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
-	out_be16(&bdp->length, 0);
+	qe_iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
+	qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+	qe_iowrite16be(0, &bdp->length);
 
 	/* Set the physical address of the host memory
 	 * buffers in the buffer descriptors, and the
@@ -621,21 +622,21 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
 	qe_port->tx_cur = qe_port->tx_bd_base;
 	bdp = qe_port->tx_bd_base;
 	for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) {
-		out_be16(&bdp->status, BD_SC_INTRPT);
-		out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
-		out_be16(&bdp->length, 0);
+		qe_iowrite16be(BD_SC_INTRPT, &bdp->status);
+		qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+		qe_iowrite16be(0, &bdp->length);
 		bd_virt += qe_port->tx_fifosize;
 		bdp++;
 	}
 
 	/* Loopback requires the preamble bit to be set on the first TX BD */
 #ifdef LOOPBACK
-	setbits16(&qe_port->tx_cur->status, BD_SC_P);
+	qe_setbits_be16(&qe_port->tx_cur->status, BD_SC_P);
 #endif
 
-	out_be16(&bdp->status, BD_SC_WRAP | BD_SC_INTRPT);
-	out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
-	out_be16(&bdp->length, 0);
+	qe_iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status);
+	qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+	qe_iowrite16be(0, &bdp->length);
 }
 
 /*
@@ -657,78 +658,74 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
 	ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
 
 	/* Program the UCC UART parameter RAM */
-	out_8(&uccup->common.rbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
-	out_8(&uccup->common.tbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
-	out_be16(&uccup->common.mrblr, qe_port->rx_fifosize);
-	out_be16(&uccup->maxidl, 0x10);
-	out_be16(&uccup->brkcr, 1);
-	out_be16(&uccup->parec, 0);
-	out_be16(&uccup->frmec, 0);
-	out_be16(&uccup->nosec, 0);
-	out_be16(&uccup->brkec, 0);
-	out_be16(&uccup->uaddr[0], 0);
-	out_be16(&uccup->uaddr[1], 0);
-	out_be16(&uccup->toseq, 0);
+	qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr);
+	qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr);
+	qe_iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr);
+	qe_iowrite16be(0x10, &uccup->maxidl);
+	qe_iowrite16be(1, &uccup->brkcr);
+	qe_iowrite16be(0, &uccup->parec);
+	qe_iowrite16be(0, &uccup->frmec);
+	qe_iowrite16be(0, &uccup->nosec);
+	qe_iowrite16be(0, &uccup->brkec);
+	qe_iowrite16be(0, &uccup->uaddr[0]);
+	qe_iowrite16be(0, &uccup->uaddr[1]);
+	qe_iowrite16be(0, &uccup->toseq);
 	for (i = 0; i < 8; i++)
-		out_be16(&uccup->cchars[i], 0xC000);
-	out_be16(&uccup->rccm, 0xc0ff);
+		qe_iowrite16be(0xC000, &uccup->cchars[i]);
+	qe_iowrite16be(0xc0ff, &uccup->rccm);
 
 	/* Configure the GUMR registers for UART */
 	if (soft_uart) {
 		/* Soft-UART requires a 1X multiplier for TX */
-		clrsetbits_be32(&uccp->gumr_l,
-			UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
-			UCC_SLOW_GUMR_L_RDCR_MASK,
-			UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 |
-			UCC_SLOW_GUMR_L_RDCR_16);
+		qe_clrsetbits_be32(&uccp->gumr_l,
+				   UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK,
+				   UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 | UCC_SLOW_GUMR_L_RDCR_16);
 
-		clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW,
-			UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX);
+		qe_clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW,
+				   UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX);
 	} else {
-		clrsetbits_be32(&uccp->gumr_l,
-			UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
-			UCC_SLOW_GUMR_L_RDCR_MASK,
-			UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 |
-			UCC_SLOW_GUMR_L_RDCR_16);
+		qe_clrsetbits_be32(&uccp->gumr_l,
+				   UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK,
+				   UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 | UCC_SLOW_GUMR_L_RDCR_16);
 
-		clrsetbits_be32(&uccp->gumr_h,
-			UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX,
-			UCC_SLOW_GUMR_H_RFW);
+		qe_clrsetbits_be32(&uccp->gumr_h,
+				   UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX,
+				   UCC_SLOW_GUMR_H_RFW);
 	}
 
 #ifdef LOOPBACK
-	clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
-		UCC_SLOW_GUMR_L_DIAG_LOOP);
-	clrsetbits_be32(&uccp->gumr_h,
-		UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN,
-		UCC_SLOW_GUMR_H_CDS);
+	qe_clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
+			   UCC_SLOW_GUMR_L_DIAG_LOOP);
+	qe_clrsetbits_be32(&uccp->gumr_h,
+			   UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN,
+			   UCC_SLOW_GUMR_H_CDS);
 #endif
 
 	/* Disable rx interrupts  and clear all pending events.  */
-	out_be16(&uccp->uccm, 0);
-	out_be16(&uccp->ucce, 0xffff);
-	out_be16(&uccp->udsr, 0x7e7e);
+	qe_iowrite16be(0, &uccp->uccm);
+	qe_iowrite16be(0xffff, &uccp->ucce);
+	qe_iowrite16be(0x7e7e, &uccp->udsr);
 
 	/* Initialize UPSMR */
-	out_be16(&uccp->upsmr, 0);
+	qe_iowrite16be(0, &uccp->upsmr);
 
 	if (soft_uart) {
-		out_be16(&uccup->supsmr, 0x30);
-		out_be16(&uccup->res92, 0);
-		out_be32(&uccup->rx_state, 0);
-		out_be32(&uccup->rx_cnt, 0);
-		out_8(&uccup->rx_bitmark, 0);
-		out_8(&uccup->rx_length, 10);
-		out_be32(&uccup->dump_ptr, 0x4000);
-		out_8(&uccup->rx_temp_dlst_qe, 0);
-		out_be32(&uccup->rx_frame_rem, 0);
-		out_8(&uccup->rx_frame_rem_size, 0);
+		qe_iowrite16be(0x30, &uccup->supsmr);
+		qe_iowrite16be(0, &uccup->res92);
+		qe_iowrite32be(0, &uccup->rx_state);
+		qe_iowrite32be(0, &uccup->rx_cnt);
+		qe_iowrite8(0, &uccup->rx_bitmark);
+		qe_iowrite8(10, &uccup->rx_length);
+		qe_iowrite32be(0x4000, &uccup->dump_ptr);
+		qe_iowrite8(0, &uccup->rx_temp_dlst_qe);
+		qe_iowrite32be(0, &uccup->rx_frame_rem);
+		qe_iowrite8(0, &uccup->rx_frame_rem_size);
 		/* Soft-UART requires TX to be 1X */
-		out_8(&uccup->tx_mode,
-			UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1);
-		out_be16(&uccup->tx_state, 0);
-		out_8(&uccup->resD4, 0);
-		out_be16(&uccup->resD5, 0);
+		qe_iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1,
+			    &uccup->tx_mode);
+		qe_iowrite16be(0, &uccup->tx_state);
+		qe_iowrite8(0, &uccup->resD4);
+		qe_iowrite16be(0, &uccup->resD5);
 
 		/* Set UART mode.
 		 * Enable receive and transmit.
@@ -742,22 +739,19 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
 		 * ...
 		 * 6.Receiver must use 16x over sampling
 		 */
-		clrsetbits_be32(&uccp->gumr_l,
-			UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
-			UCC_SLOW_GUMR_L_RDCR_MASK,
-			UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 |
-			UCC_SLOW_GUMR_L_RDCR_16);
+		qe_clrsetbits_be32(&uccp->gumr_l,
+				   UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK,
+				   UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 | UCC_SLOW_GUMR_L_RDCR_16);
 
-		clrsetbits_be32(&uccp->gumr_h,
-			UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN,
-			UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX |
-			UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL);
+		qe_clrsetbits_be32(&uccp->gumr_h,
+				   UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN,
+				   UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL);
 
 #ifdef LOOPBACK
-		clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
-				UCC_SLOW_GUMR_L_DIAG_LOOP);
-		clrbits32(&uccp->gumr_h, UCC_SLOW_GUMR_H_CTSP |
-			  UCC_SLOW_GUMR_H_CDS);
+		qe_clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
+				   UCC_SLOW_GUMR_L_DIAG_LOOP);
+		qe_clrbits_be32(&uccp->gumr_h,
+				UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_CDS);
 #endif
 
 		cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num);
@@ -800,7 +794,7 @@ static int qe_uart_startup(struct uart_port *port)
 	}
 
 	/* Startup rx-int */
-	setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
+	qe_setbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
 	ucc_slow_enable(qe_port->us_private, COMM_DIR_RX_AND_TX);
 
 	return 0;
@@ -836,7 +830,7 @@ static void qe_uart_shutdown(struct uart_port *port)
 
 	/* Stop uarts */
 	ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
-	clrbits16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX);
+	qe_clrbits_be16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX);
 
 	/* Shut them really down and reinit buffer descriptors */
 	ucc_slow_graceful_stop_tx(qe_port->us_private);
@@ -856,9 +850,9 @@ static void qe_uart_set_termios(struct uart_port *port,
 	struct ucc_slow __iomem *uccp = qe_port->uccp;
 	unsigned int baud;
 	unsigned long flags;
-	u16 upsmr = in_be16(&uccp->upsmr);
+	u16 upsmr = qe_ioread16be(&uccp->upsmr);
 	struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
-	u16 supsmr = in_be16(&uccup->supsmr);
+	u16 supsmr = qe_ioread16be(&uccup->supsmr);
 	u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
 
 	/* Character length programmed into the mode register is the
@@ -956,10 +950,10 @@ static void qe_uart_set_termios(struct uart_port *port,
 	/* Update the per-port timeout. */
 	uart_update_timeout(port, termios->c_cflag, baud);
 
-	out_be16(&uccp->upsmr, upsmr);
+	qe_iowrite16be(upsmr, &uccp->upsmr);
 	if (soft_uart) {
-		out_be16(&uccup->supsmr, supsmr);
-		out_8(&uccup->rx_length, char_length);
+		qe_iowrite16be(supsmr, &uccup->supsmr);
+		qe_iowrite8(char_length, &uccup->rx_length);
 
 		/* Soft-UART requires a 1X multiplier for TX */
 		qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
@@ -1101,6 +1095,8 @@ static const struct uart_ops qe_uart_pops = {
 	.verify_port    = qe_uart_verify_port,
 };
 
+
+#ifdef CONFIG_PPC32
 /*
  * Obtain the SOC model number and revision level
  *
@@ -1188,70 +1184,86 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
 	release_firmware(fw);
 }
 
+static int soft_uart_init(struct platform_device *ofdev)
+{
+	struct device_node *np = ofdev->dev.of_node;
+	struct qe_firmware_info *qe_fw_info;
+	int ret;
+
+	if (of_find_property(np, "soft-uart", NULL)) {
+		dev_dbg(&ofdev->dev, "using Soft-UART mode\n");
+		soft_uart = 1;
+	} else {
+		return 0;
+	}
+
+	qe_fw_info = qe_get_firmware_info();
+
+	/* Check if the firmware has been uploaded. */
+	if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) {
+		firmware_loaded = 1;
+	} else {
+		char filename[32];
+		unsigned int soc;
+		unsigned int rev_h;
+		unsigned int rev_l;
+
+		soc = soc_info(&rev_h, &rev_l);
+		if (!soc) {
+			dev_err(&ofdev->dev, "unknown CPU model\n");
+			return -ENXIO;
+		}
+		sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin",
+			soc, rev_h, rev_l);
+
+		dev_info(&ofdev->dev, "waiting for firmware %s\n",
+			 filename);
+
+		/*
+		 * We call request_firmware_nowait instead of
+		 * request_firmware so that the driver can load and
+		 * initialize the ports without holding up the rest of
+		 * the kernel.  If hotplug support is enabled in the
+		 * kernel, then we use it.
+		 */
+		ret = request_firmware_nowait(THIS_MODULE,
+					      FW_ACTION_HOTPLUG, filename, &ofdev->dev,
+					      GFP_KERNEL, &ofdev->dev, uart_firmware_cont);
+		if (ret) {
+			dev_err(&ofdev->dev,
+				"could not load firmware %s\n",
+				filename);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+#else /* !CONFIG_PPC32 */
+
+static int soft_uart_init(struct platform_device *ofdev)
+{
+	return 0;
+}
+
+#endif
+
+
 static int ucc_uart_probe(struct platform_device *ofdev)
 {
 	struct device_node *np = ofdev->dev.of_node;
-	const unsigned int *iprop;      /* Integer OF properties */
 	const char *sprop;      /* String OF properties */
 	struct uart_qe_port *qe_port = NULL;
 	struct resource res;
+	u32 val;
 	int ret;
 
 	/*
 	 * Determine if we need Soft-UART mode
 	 */
-	if (of_find_property(np, "soft-uart", NULL)) {
-		dev_dbg(&ofdev->dev, "using Soft-UART mode\n");
-		soft_uart = 1;
-	}
-
-	/*
-	 * If we are using Soft-UART, determine if we need to upload the
-	 * firmware, too.
-	 */
-	if (soft_uart) {
-		struct qe_firmware_info *qe_fw_info;
-
-		qe_fw_info = qe_get_firmware_info();
-
-		/* Check if the firmware has been uploaded. */
-		if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) {
-			firmware_loaded = 1;
-		} else {
-			char filename[32];
-			unsigned int soc;
-			unsigned int rev_h;
-			unsigned int rev_l;
-
-			soc = soc_info(&rev_h, &rev_l);
-			if (!soc) {
-				dev_err(&ofdev->dev, "unknown CPU model\n");
-				return -ENXIO;
-			}
-			sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin",
-				soc, rev_h, rev_l);
-
-			dev_info(&ofdev->dev, "waiting for firmware %s\n",
-				filename);
-
-			/*
-			 * We call request_firmware_nowait instead of
-			 * request_firmware so that the driver can load and
-			 * initialize the ports without holding up the rest of
-			 * the kernel.  If hotplug support is enabled in the
-			 * kernel, then we use it.
-			 */
-			ret = request_firmware_nowait(THIS_MODULE,
-				FW_ACTION_HOTPLUG, filename, &ofdev->dev,
-				GFP_KERNEL, &ofdev->dev, uart_firmware_cont);
-			if (ret) {
-				dev_err(&ofdev->dev,
-					"could not load firmware %s\n",
-					filename);
-				return ret;
-			}
-		}
-	}
+	ret = soft_uart_init(ofdev);
+	if (ret)
+		return ret;
 
 	qe_port = kzalloc(sizeof(struct uart_qe_port), GFP_KERNEL);
 	if (!qe_port) {
@@ -1274,23 +1286,20 @@ static int ucc_uart_probe(struct platform_device *ofdev)
 
 	/* Get the UCC number (device ID) */
 	/* UCCs are numbered 1-7 */
-	iprop = of_get_property(np, "cell-index", NULL);
-	if (!iprop) {
-		iprop = of_get_property(np, "device-id", NULL);
-		if (!iprop) {
-			dev_err(&ofdev->dev, "UCC is unspecified in "
-				"device tree\n");
+	if (of_property_read_u32(np, "cell-index", &val)) {
+		if (of_property_read_u32(np, "device-id", &val)) {
+			dev_err(&ofdev->dev, "UCC is unspecified in device tree\n");
 			ret = -EINVAL;
 			goto out_free;
 		}
 	}
 
-	if ((*iprop < 1) || (*iprop > UCC_MAX_NUM)) {
-		dev_err(&ofdev->dev, "no support for UCC%u\n", *iprop);
+	if (val < 1 || val > UCC_MAX_NUM) {
+		dev_err(&ofdev->dev, "no support for UCC%u\n", val);
 		ret = -ENODEV;
 		goto out_free;
 	}
-	qe_port->ucc_num = *iprop - 1;
+	qe_port->ucc_num = val - 1;
 
 	/*
 	 * In the future, we should not require the BRG to be specified in the
@@ -1334,13 +1343,12 @@ static int ucc_uart_probe(struct platform_device *ofdev)
 	}
 
 	/* Get the port number, numbered 0-3 */
-	iprop = of_get_property(np, "port-number", NULL);
-	if (!iprop) {
+	if (of_property_read_u32(np, "port-number", &val)) {
 		dev_err(&ofdev->dev, "missing port-number in device tree\n");
 		ret = -EINVAL;
 		goto out_free;
 	}
-	qe_port->port.line = *iprop;
+	qe_port->port.line = val;
 	if (qe_port->port.line >= UCC_MAX_UART) {
 		dev_err(&ofdev->dev, "port-number must be 0-%u\n",
 			UCC_MAX_UART - 1);
@@ -1370,31 +1378,36 @@ static int ucc_uart_probe(struct platform_device *ofdev)
 		}
 	}
 
-	iprop = of_get_property(np, "brg-frequency", NULL);
-	if (!iprop) {
+	if (of_property_read_u32(np, "brg-frequency", &val)) {
 		dev_err(&ofdev->dev,
 		       "missing brg-frequency in device tree\n");
 		ret = -EINVAL;
 		goto out_np;
 	}
 
-	if (*iprop)
-		qe_port->port.uartclk = *iprop;
+	if (val)
+		qe_port->port.uartclk = val;
 	else {
+		if (!IS_ENABLED(CONFIG_PPC32)) {
+			dev_err(&ofdev->dev,
+				"invalid brg-frequency in device tree\n");
+			ret = -EINVAL;
+			goto out_np;
+		}
+
 		/*
 		 * Older versions of U-Boot do not initialize the brg-frequency
 		 * property, so in this case we assume the BRG frequency is
 		 * half the QE bus frequency.
 		 */
-		iprop = of_get_property(np, "bus-frequency", NULL);
-		if (!iprop) {
+		if (of_property_read_u32(np, "bus-frequency", &val)) {
 			dev_err(&ofdev->dev,
 				"missing QE bus-frequency in device tree\n");
 			ret = -EINVAL;
 			goto out_np;
 		}
-		if (*iprop)
-			qe_port->port.uartclk = *iprop / 2;
+		if (val)
+			qe_port->port.uartclk = val / 2;
 		else {
 			dev_err(&ofdev->dev,
 				"invalid QE bus-frequency in device tree\n");
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 6d106e3..eeb4b65 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -7,10 +7,6 @@
  *  Based on drivers/serial/8250.c, by Russell King.
  */
 
-#if defined(CONFIG_SERIAL_VR41XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/console.h>
 #include <linux/errno.h>
 #include <linux/init.h>
@@ -869,6 +865,7 @@ static int siu_probe(struct platform_device *dev)
 		port = &siu_uart_ports[i];
 		port->ops = &siu_uart_ops;
 		port->dev = &dev->dev;
+		port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_VR41XX_CONSOLE);
 
 		retval = uart_add_one_port(&siu_uart_driver, port);
 		if (retval < 0) {
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 3d58e9b..764e992 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -7,10 +7,6 @@
  * Author: Robert Love <rlove@google.com>
  */
 
-#if defined(CONFIG_SERIAL_VT8500_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
 #include <linux/hrtimer.h>
 #include <linux/delay.h>
 #include <linux/io.h>
@@ -703,6 +699,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
 	vt8500_port->uart.line = port;
 	vt8500_port->uart.dev = &pdev->dev;
 	vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+	vt8500_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_VT8500_CONSOLE);
 
 	/* Serial core uses the magic "16" everywhere - adjust for it */
 	vt8500_port->uart.uartclk = 16 * clk_get_rate(vt8500_port->clk) /
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 4e55bc3..98db9dc 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -9,10 +9,6 @@
  * in the code.
  */
 
-#if defined(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/platform_device.h>
 #include <linux/serial.h>
 #include <linux/console.h>
@@ -158,6 +154,16 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
 #define CDNS_UART_MODEMCR_DTR	0x00000001 /* Data Terminal Ready */
 
 /*
+ * Modem Status register:
+ * The read/write Modem Status register reports the interface with the modem
+ * or data set, or a peripheral device emulating a modem.
+ */
+#define CDNS_UART_MODEMSR_DCD	BIT(7) /* Data Carrier Detect */
+#define CDNS_UART_MODEMSR_RI	BIT(6) /* Ting Indicator */
+#define CDNS_UART_MODEMSR_DSR	BIT(5) /* Data Set Ready */
+#define CDNS_UART_MODEMSR_CTS	BIT(4) /* Clear To Send */
+
+/*
  * Channel Status Register:
  * The channel status register (CSR) is provided to enable the control logic
  * to monitor the status of bits in the channel interrupt status register,
@@ -684,7 +690,7 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
 static void cdns_uart_set_termios(struct uart_port *port,
 				struct ktermios *termios, struct ktermios *old)
 {
-	unsigned int cval = 0;
+	u32 cval = 0;
 	unsigned int baud, minbaud, maxbaud;
 	unsigned long flags;
 	unsigned int ctrl_reg, mode_reg, val;
@@ -805,6 +811,13 @@ static void cdns_uart_set_termios(struct uart_port *port,
 	cval |= mode_reg & 1;
 	writel(cval, port->membase + CDNS_UART_MR);
 
+	cval = readl(port->membase + CDNS_UART_MODEMCR);
+	if (termios->c_cflag & CRTSCTS)
+		cval |= CDNS_UART_MODEMCR_FCM;
+	else
+		cval &= ~CDNS_UART_MODEMCR_FCM;
+	writel(cval, port->membase + CDNS_UART_MODEMCR);
+
 	spin_unlock_irqrestore(&port->lock, flags);
 }
 
@@ -1007,12 +1020,24 @@ static void cdns_uart_config_port(struct uart_port *port, int flags)
  */
 static unsigned int cdns_uart_get_mctrl(struct uart_port *port)
 {
+	u32 val;
+	unsigned int mctrl = 0;
 	struct cdns_uart *cdns_uart_data = port->private_data;
 
 	if (cdns_uart_data->cts_override)
-		return 0;
+		return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
 
-	return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+	val = readl(port->membase + CDNS_UART_MODEMSR);
+	if (val & CDNS_UART_MODEMSR_CTS)
+		mctrl |= TIOCM_CTS;
+	if (val & CDNS_UART_MODEMSR_DSR)
+		mctrl |= TIOCM_DSR;
+	if (val & CDNS_UART_MODEMSR_RI)
+		mctrl |= TIOCM_RNG;
+	if (val & CDNS_UART_MODEMSR_DCD)
+		mctrl |= TIOCM_CAR;
+
+	return mctrl;
 }
 
 static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1027,12 +1052,13 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
 	val = readl(port->membase + CDNS_UART_MODEMCR);
 	mode_reg = readl(port->membase + CDNS_UART_MR);
 
-	val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR |
-		 CDNS_UART_MODEMCR_FCM);
+	val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR);
 	mode_reg &= ~CDNS_UART_MR_CHMODE_MASK;
 
-	if (mctrl & TIOCM_RTS || mctrl & TIOCM_DTR)
-		val |= CDNS_UART_MODEMCR_FCM;
+	if (mctrl & TIOCM_RTS)
+		val |= CDNS_UART_MODEMCR_RTS;
+	if (mctrl & TIOCM_DTR)
+		val |= CDNS_UART_MODEMCR_DTR;
 	if (mctrl & TIOCM_LOOP)
 		mode_reg |= CDNS_UART_MR_CHMODE_L_LOOP;
 	else
@@ -1634,6 +1660,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
 	port->flags	= UPF_BOOT_AUTOCONF;
 	port->ops	= &cdns_uart_ops;
 	port->fifosize	= CDNS_UART_FIFO_SIZE;
+	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE);
 
 	/*
 	 * Register the port.
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 89154ac..4b4f604 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -44,10 +44,6 @@
  * complicated and prevents the use of some automatic modes of operation.
  */
 
-#if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/bug.h>
 #include <linux/console.h>
 #include <linux/delay.h>
@@ -1106,6 +1102,7 @@ static int __init zs_probe_sccs(void)
 			zport->scc	= &zs_sccs[chip];
 			zport->clk_mode	= 16;
 
+			uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ZS_CONSOLE);
 			uport->irq	= zs_parms.irq[chip];
 			uport->uartclk	= ZS_CLOCK;
 			uport->fifosize	= 1;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 5759b6c..b794177 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1334,10 +1334,10 @@ static void throttle(struct tty_struct * tty)
 	DBGINFO(("%s throttle\n", info->device_name));
 	if (I_IXOFF(tty))
 		send_xchar(tty, STOP_CHAR(tty));
- 	if (C_CRTSCTS(tty)) {
+	if (C_CRTSCTS(tty)) {
 		spin_lock_irqsave(&info->lock,flags);
 		info->signals &= ~SerialSignal_RTS;
-	 	set_signals(info);
+		set_signals(info);
 		spin_unlock_irqrestore(&info->lock,flags);
 	}
 }
@@ -1359,10 +1359,10 @@ static void unthrottle(struct tty_struct * tty)
 		else
 			send_xchar(tty, START_CHAR(tty));
 	}
- 	if (C_CRTSCTS(tty)) {
+	if (C_CRTSCTS(tty)) {
 		spin_lock_irqsave(&info->lock,flags);
 		info->signals |= SerialSignal_RTS;
-	 	set_signals(info);
+		set_signals(info);
 		spin_unlock_irqrestore(&info->lock,flags);
 	}
 }
@@ -2098,7 +2098,7 @@ static void isr_rxdata(struct slgt_info *info)
 		if (desc_complete(info->rbufs[i])) {
 			/* all buffers full */
 			rx_stop(info);
-			info->rx_restart = 1;
+			info->rx_restart = true;
 			continue;
 		}
 		info->rbufs[i].buf[count++] = (unsigned char)reg;
@@ -2560,8 +2560,8 @@ static void change_params(struct slgt_info *info)
 	info->read_status_mask = IRQ_RXOVER;
 	if (I_INPCK(info->port.tty))
 		info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
- 	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- 		info->read_status_mask |= MASK_BREAK;
+	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+		info->read_status_mask |= MASK_BREAK;
 	if (I_IGNPAR(info->port.tty))
 		info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
 	if (I_IGNBRK(info->port.tty)) {
@@ -3192,7 +3192,7 @@ static int tiocmset(struct tty_struct *tty,
 		info->signals &= ~SerialSignal_DTR;
 
 	spin_lock_irqsave(&info->lock,flags);
- 	set_signals(info);
+	set_signals(info);
 	spin_unlock_irqrestore(&info->lock,flags);
 	return 0;
 }
@@ -3203,7 +3203,7 @@ static int carrier_raised(struct tty_port *port)
 	struct slgt_info *info = container_of(port, struct slgt_info, port);
 
 	spin_lock_irqsave(&info->lock,flags);
- 	get_signals(info);
+	get_signals(info);
 	spin_unlock_irqrestore(&info->lock,flags);
 	return (info->signals & SerialSignal_DCD) ? 1 : 0;
 }
@@ -3218,7 +3218,7 @@ static void dtr_rts(struct tty_port *port, int on)
 		info->signals |= SerialSignal_RTS | SerialSignal_DTR;
 	else
 		info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- 	set_signals(info);
+	set_signals(info);
 	spin_unlock_irqrestore(&info->lock,flags);
 }
 
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 7446b03..33ff2db 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1453,10 +1453,10 @@ static void throttle(struct tty_struct * tty)
 	if (I_IXOFF(tty))
 		send_xchar(tty, STOP_CHAR(tty));
 
- 	if (C_CRTSCTS(tty)) {
+	if (C_CRTSCTS(tty)) {
 		spin_lock_irqsave(&info->lock,flags);
 		info->serial_signals &= ~SerialSignal_RTS;
-	 	set_signals(info);
+		set_signals(info);
 		spin_unlock_irqrestore(&info->lock,flags);
 	}
 }
@@ -1482,10 +1482,10 @@ static void unthrottle(struct tty_struct * tty)
 			send_xchar(tty, START_CHAR(tty));
 	}
 
- 	if (C_CRTSCTS(tty)) {
+	if (C_CRTSCTS(tty)) {
 		spin_lock_irqsave(&info->lock,flags);
 		info->serial_signals |= SerialSignal_RTS;
-	 	set_signals(info);
+		set_signals(info);
 		spin_unlock_irqrestore(&info->lock,flags);
 	}
 }
@@ -2470,7 +2470,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
 					if (status & SerialSignal_CTS) {
 						if ( debug_level >= DEBUG_LEVEL_ISR )
 							printk("CTS tx start...");
-			 			info->port.tty->hw_stopped = 0;
+						info->port.tty->hw_stopped = 0;
 						tx_start(info);
 						info->pending_bh |= BH_TRANSMIT;
 						return;
@@ -2479,7 +2479,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
 					if (!(status & SerialSignal_CTS)) {
 						if ( debug_level >= DEBUG_LEVEL_ISR )
 							printk("CTS tx stop...");
-			 			info->port.tty->hw_stopped = 1;
+						info->port.tty->hw_stopped = 1;
 						tx_stop(info);
 					}
 				}
@@ -2806,8 +2806,8 @@ static void change_params(SLMP_INFO *info)
 	info->read_status_mask2 = OVRN;
 	if (I_INPCK(info->port.tty))
 		info->read_status_mask2 |= PE | FRME;
- 	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- 		info->read_status_mask1 |= BRKD;
+	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+		info->read_status_mask1 |= BRKD;
 	if (I_IGNPAR(info->port.tty))
 		info->ignore_status_mask2 |= PE | FRME;
 	if (I_IGNBRK(info->port.tty)) {
@@ -3177,7 +3177,7 @@ static int tiocmget(struct tty_struct *tty)
  	unsigned long flags;
 
 	spin_lock_irqsave(&info->lock,flags);
- 	get_signals(info);
+	get_signals(info);
 	spin_unlock_irqrestore(&info->lock,flags);
 
 	result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) |
@@ -3215,7 +3215,7 @@ static int tiocmset(struct tty_struct *tty,
 		info->serial_signals &= ~SerialSignal_DTR;
 
 	spin_lock_irqsave(&info->lock,flags);
- 	set_signals(info);
+	set_signals(info);
 	spin_unlock_irqrestore(&info->lock,flags);
 
 	return 0;
@@ -3227,7 +3227,7 @@ static int carrier_raised(struct tty_port *port)
 	unsigned long flags;
 
 	spin_lock_irqsave(&info->lock,flags);
- 	get_signals(info);
+	get_signals(info);
 	spin_unlock_irqrestore(&info->lock,flags);
 
 	return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
@@ -3243,7 +3243,7 @@ static void dtr_rts(struct tty_port *port, int on)
 		info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
 	else
 		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- 	set_signals(info);
+	set_signals(info);
 	spin_unlock_irqrestore(&info->lock,flags);
 }
 
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 573b205..f724962 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -967,8 +967,6 @@ static struct input_handler sysrq_handler = {
 	.id_table	= sysrq_ids,
 };
 
-static bool sysrq_handler_registered;
-
 static inline void sysrq_register_handler(void)
 {
 	int error;
@@ -978,16 +976,11 @@ static inline void sysrq_register_handler(void)
 	error = input_register_handler(&sysrq_handler);
 	if (error)
 		pr_err("Failed to register input handler, error %d", error);
-	else
-		sysrq_handler_registered = true;
 }
 
 static inline void sysrq_unregister_handler(void)
 {
-	if (sysrq_handler_registered) {
-		input_unregister_handler(&sysrq_handler);
-		sysrq_handler_registered = false;
-	}
+	input_unregister_handler(&sysrq_handler);
 }
 
 static int sysrq_reset_seq_param_set(const char *buffer,
@@ -1108,15 +1101,15 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
 	return count;
 }
 
-static const struct file_operations proc_sysrq_trigger_operations = {
-	.write		= write_sysrq_trigger,
-	.llseek		= noop_llseek,
+static const struct proc_ops sysrq_trigger_proc_ops = {
+	.proc_write	= write_sysrq_trigger,
+	.proc_lseek	= noop_llseek,
 };
 
 static void sysrq_init_procfs(void)
 {
 	if (!proc_create("sysrq-trigger", S_IWUSR, NULL,
-			 &proc_sysrq_trigger_operations))
+			 &sysrq_trigger_proc_ops))
 		pr_err("Failed to register proc interface\n");
 }
 
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index f438eaa..40207ca 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -17,32 +17,28 @@
  * include/asm/termbits.h file.
  */
 static const speed_t baud_table[] = {
-	0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
-	9600, 19200, 38400, 57600, 115200, 230400, 460800,
+	0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
+	4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800,
 #ifdef __sparc__
-	76800, 153600, 307200, 614400, 921600
+	76800, 153600, 307200, 614400, 921600, 500000, 576000,
+	1000000, 1152000, 1500000, 2000000
 #else
 	500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
 	2500000, 3000000, 3500000, 4000000
 #endif
 };
 
-#ifndef __sparc__
 static const tcflag_t baud_bits[] = {
-	B0, B50, B75, B110, B134, B150, B200, B300, B600,
-	B1200, B1800, B2400, B4800, B9600, B19200, B38400,
-	B57600, B115200, B230400, B460800, B500000, B576000,
-	B921600, B1000000, B1152000, B1500000, B2000000, B2500000,
-	B3000000, B3500000, B4000000
-};
+	B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400,
+	B4800, B9600, B19200, B38400, B57600, B115200, B230400, B460800,
+#ifdef __sparc__
+	B76800, B153600, B307200, B614400, B921600, B500000, B576000,
+	B1000000, B1152000, B1500000, B2000000
 #else
-static const tcflag_t baud_bits[] = {
-	B0, B50, B75, B110, B134, B150, B200, B300, B600,
-	B1200, B1800, B2400, B4800, B9600, B19200, B38400,
-	B57600, B115200, B230400, B460800, B76800, B153600,
-	B307200, B614400, B921600
-};
+	B500000, B576000, B921600, B1000000, B1152000, B1500000, B2000000,
+	B2500000, B3000000, B3500000, B4000000
 #endif
+};
 
 static int n_baud_table = ARRAY_SIZE(baud_table);
 
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index d9f54c7..a1453fe 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1893,7 +1893,7 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
 struct tty_struct *tty_kopen(dev_t device)
 {
 	struct tty_struct *tty;
-	struct tty_driver *driver = NULL;
+	struct tty_driver *driver;
 	int index = -1;
 
 	mutex_lock(&tty_mutex);
diff --git a/drivers/tty/vt/.gitignore b/drivers/tty/vt/.gitignore
index 9b38b85..3ecf422 100644
--- a/drivers/tty/vt/.gitignore
+++ b/drivers/tty/vt/.gitignore
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
+conmakehash
 consolemap_deftbl.c
 defkeymap.c
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index edbbe0c..329ca33 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -12,10 +12,12 @@
 # Files generated that shall be removed upon make clean
 clean-files := consolemap_deftbl.c defkeymap.c
 
-quiet_cmd_conmk = CONMK   $@
-      cmd_conmk = scripts/conmakehash $< > $@
+hostprogs-y += conmakehash
 
-$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
+quiet_cmd_conmk = CONMK   $@
+      cmd_conmk = $(obj)/conmakehash $< > $@
+
+$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE) $(obj)/conmakehash
 	$(call cmd,conmk)
 
 $(obj)/defkeymap.o:  $(obj)/defkeymap.c
diff --git a/scripts/conmakehash.c b/drivers/tty/vt/conmakehash.c
similarity index 100%
rename from scripts/conmakehash.c
rename to drivers/tty/vt/conmakehash.c
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 34aa39d..35d21cd 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3326,8 +3326,9 @@ static int __init con_init(void)
 
 	console_lock();
 
-	if (conswitchp)
-		display_desc = conswitchp->con_startup();
+	if (!conswitchp)
+		conswitchp = &dummy_con;
+	display_desc = conswitchp->con_startup();
 	if (!display_desc) {
 		fg_console = 0;
 		console_unlock();
@@ -3567,7 +3568,6 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
 
 
 #ifdef CONFIG_VT_HW_CONSOLE_BINDING
-/* unlocked version of unbind_con_driver() */
 int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
 {
 	struct module *owner = csw->owner;
@@ -4095,7 +4095,7 @@ static void con_driver_unregister_callback(struct work_struct *ignored)
  *	when a driver wants to take over some existing consoles
  *	and become default driver for newly opened ones.
  *
- *	do_take_over_console is basically a register followed by unbind
+ *	do_take_over_console is basically a register followed by bind
  */
 int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
 {
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 81c88f7b..f6ab3f2 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -132,11 +132,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
 	if (irq_on) {
 		if (test_and_clear_bit(0, &priv->flags))
 			enable_irq(dev_info->irq);
+		spin_unlock_irqrestore(&priv->lock, flags);
 	} else {
-		if (!test_and_set_bit(0, &priv->flags))
+		if (!test_and_set_bit(0, &priv->flags)) {
+			spin_unlock_irqrestore(&priv->lock, flags);
 			disable_irq(dev_info->irq);
+		}
 	}
-	spin_unlock_irqrestore(&priv->lock, flags);
 
 	return 0;
 }
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 1303b165..fc25ce9 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -156,6 +156,8 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
 		uioinfo->irq = ret;
 		if (ret == -ENXIO && pdev->dev.of_node)
 			uioinfo->irq = UIO_IRQ_NONE;
+		else if (ret == -EPROBE_DEFER)
+			return ret;
 		else if (ret < 0) {
 			dev_err(&pdev->dev, "failed to get IRQ\n");
 			return ret;
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index 2a1e89d1..84716d2 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -53,4 +53,14 @@
 
 	  e.g. J721e.
 
+config USB_CDNS3_IMX
+	tristate "Cadence USB3 support on NXP i.MX platforms"
+	depends on ARCH_MXC || COMPILE_TEST
+	default USB_CDNS3
+	help
+	  Say 'Y' or 'M' here if you are building for NXP i.MX
+	  platforms that contain Cadence USB3 controller core.
+
+	  For example, imx8qm and imx8qxp.
+
 endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index 948e6b8..d47e341 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -15,3 +15,4 @@
 
 obj-$(CONFIG_USB_CDNS3_PCI_WRAP)	+= cdns3-pci-wrap.o
 obj-$(CONFIG_USB_CDNS3_TI)		+= cdns3-ti.o
+obj-$(CONFIG_USB_CDNS3_IMX)		+= cdns3-imx.o
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
new file mode 100644
index 0000000..aba988e
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-imx.c - NXP i.MX specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2019 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+
+#define USB3_CORE_CTRL1    0x00
+#define USB3_CORE_CTRL2    0x04
+#define USB3_INT_REG       0x08
+#define USB3_CORE_STATUS   0x0c
+#define XHCI_DEBUG_LINK_ST 0x10
+#define XHCI_DEBUG_BUS     0x14
+#define USB3_SSPHY_CTRL1   0x40
+#define USB3_SSPHY_CTRL2   0x44
+#define USB3_SSPHY_STATUS  0x4c
+#define USB2_PHY_CTRL1     0x50
+#define USB2_PHY_CTRL2     0x54
+#define USB2_PHY_STATUS    0x5c
+
+/* Register bits definition */
+
+/* USB3_CORE_CTRL1 */
+#define SW_RESET_MASK	(0x3f << 26)
+#define PWR_SW_RESET	BIT(31)
+#define APB_SW_RESET	BIT(30)
+#define AXI_SW_RESET	BIT(29)
+#define RW_SW_RESET	BIT(28)
+#define PHY_SW_RESET	BIT(27)
+#define PHYAHB_SW_RESET	BIT(26)
+#define ALL_SW_RESET	(PWR_SW_RESET | APB_SW_RESET | AXI_SW_RESET | \
+		RW_SW_RESET | PHY_SW_RESET | PHYAHB_SW_RESET)
+#define OC_DISABLE	BIT(9)
+#define MDCTRL_CLK_SEL	BIT(7)
+#define MODE_STRAP_MASK	(0x7)
+#define DEV_MODE	(1 << 2)
+#define HOST_MODE	(1 << 1)
+#define OTG_MODE	(1 << 0)
+
+/* USB3_INT_REG */
+#define CLK_125_REQ	BIT(29)
+#define LPM_CLK_REQ	BIT(28)
+#define DEVU3_WAEKUP_EN	BIT(14)
+#define OTG_WAKEUP_EN	BIT(12)
+#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */
+#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */
+
+/* USB3_CORE_STATUS */
+#define MDCTRL_CLK_STATUS	BIT(15)
+#define DEV_POWER_ON_READY	BIT(13)
+#define HOST_POWER_ON_READY	BIT(12)
+
+/* USB3_SSPHY_STATUS */
+#define CLK_VALID_MASK		(0x3f << 26)
+#define CLK_VALID_COMPARE_BITS	(0xf << 28)
+#define PHY_REFCLK_REQ		(1 << 0)
+
+struct cdns_imx {
+	struct device *dev;
+	void __iomem *noncore;
+	struct clk_bulk_data *clks;
+	int num_clks;
+};
+
+static inline u32 cdns_imx_readl(struct cdns_imx *data, u32 offset)
+{
+	return readl(data->noncore + offset);
+}
+
+static inline void cdns_imx_writel(struct cdns_imx *data, u32 offset, u32 value)
+{
+	writel(value, data->noncore + offset);
+}
+
+static const struct clk_bulk_data imx_cdns3_core_clks[] = {
+	{ .id = "usb3_lpm_clk" },
+	{ .id = "usb3_bus_clk" },
+	{ .id = "usb3_aclk" },
+	{ .id = "usb3_ipg_clk" },
+	{ .id = "usb3_core_pclk" },
+};
+
+static int cdns_imx_noncore_init(struct cdns_imx *data)
+{
+	u32 value;
+	int ret;
+	struct device *dev = data->dev;
+
+	cdns_imx_writel(data, USB3_SSPHY_STATUS, CLK_VALID_MASK);
+	udelay(1);
+	ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value,
+		(value & CLK_VALID_COMPARE_BITS) == CLK_VALID_COMPARE_BITS,
+		10, 100000);
+	if (ret) {
+		dev_err(dev, "wait clkvld timeout\n");
+		return ret;
+	}
+
+	value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+	value |= ALL_SW_RESET;
+	cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+	udelay(1);
+
+	value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+	value = (value & ~MODE_STRAP_MASK) | OTG_MODE | OC_DISABLE;
+	cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+
+	value = cdns_imx_readl(data, USB3_INT_REG);
+	value |= HOST_INT1_EN | DEV_INT_EN;
+	cdns_imx_writel(data, USB3_INT_REG, value);
+
+	value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+	value &= ~ALL_SW_RESET;
+	cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+	return ret;
+}
+
+static int cdns_imx_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct cdns_imx *data;
+	int ret;
+
+	if (!node)
+		return -ENODEV;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, data);
+	data->dev = dev;
+	data->noncore = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(data->noncore)) {
+		dev_err(dev, "can't map IOMEM resource\n");
+		return PTR_ERR(data->noncore);
+	}
+
+	data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
+	data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+	ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
+	if (ret)
+		return ret;
+
+	ret = clk_bulk_prepare_enable(data->num_clks, data->clks);
+	if (ret)
+		return ret;
+
+	ret = cdns_imx_noncore_init(data);
+	if (ret)
+		goto err;
+
+	ret = of_platform_populate(node, NULL, NULL, dev);
+	if (ret) {
+		dev_err(dev, "failed to create children: %d\n", ret);
+		goto err;
+	}
+
+	return ret;
+
+err:
+	clk_bulk_disable_unprepare(data->num_clks, data->clks);
+	return ret;
+}
+
+static int cdns_imx_remove_core(struct device *dev, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+
+	platform_device_unregister(pdev);
+
+	return 0;
+}
+
+static int cdns_imx_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	device_for_each_child(dev, NULL, cdns_imx_remove_core);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id cdns_imx_of_match[] = {
+	{ .compatible = "fsl,imx8qm-usb3", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, cdns_imx_of_match);
+
+static struct platform_driver cdns_imx_driver = {
+	.probe		= cdns_imx_probe,
+	.remove		= cdns_imx_remove,
+	.driver		= {
+		.name	= "cdns3-imx",
+		.of_match_table	= cdns_imx_of_match,
+	},
+};
+module_platform_driver(cdns_imx_driver);
+
+MODULE_ALIAS("platform:cdns3-imx");
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 i.MX Glue Layer");
diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/debug.h
index 2c9afbf..a5c6a29 100644
--- a/drivers/usb/cdns3/debug.h
+++ b/drivers/usb/cdns3/debug.h
@@ -140,7 +140,7 @@ static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep,
 		trb_per_sector = TRBS_PER_SEGMENT;
 
 	if (trb_per_sector > TRBS_PER_SEGMENT) {
-		sprintf(str + ret, "\t\tTo big transfer ring %d\n",
+		sprintf(str + ret, "\t\tTransfer ring %d too big\n",
 			trb_per_sector);
 		return str;
 	}
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 02f6ca2..736b0c6 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -71,6 +71,23 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
 				   struct usb_request *request,
 				   gfp_t gfp_flags);
 
+static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+				 struct usb_request *request);
+
+static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+					struct usb_request *request);
+
+/**
+ * cdns3_clear_register_bit - clear bit in given register.
+ * @ptr: address of device controller register to be read and changed
+ * @mask: bits requested to clar
+ */
+void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
+{
+	mask = readl(ptr) & ~mask;
+	writel(mask, ptr);
+}
+
 /**
  * cdns3_set_register_bit - set bit in given register.
  * @ptr: address of device controller register to be read and changed
@@ -150,6 +167,21 @@ void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
 	writel(ep, &priv_dev->regs->ep_sel);
 }
 
+/**
+ * cdns3_get_tdl - gets current tdl for selected endpoint.
+ * @priv_dev:  extended gadget object
+ *
+ * Before calling this function the appropriate endpoint must
+ * be selected by means of cdns3_select_ep function.
+ */
+static int cdns3_get_tdl(struct cdns3_device *priv_dev)
+{
+	if (priv_dev->dev_ver < DEV_VER_V3)
+		return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+	else
+		return readl(&priv_dev->regs->ep_tdl);
+}
+
 dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
 				 struct cdns3_trb *trb)
 {
@@ -166,7 +198,22 @@ int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
 	case USB_ENDPOINT_XFER_CONTROL:
 		return TRB_CTRL_RING_SIZE;
 	default:
-		return TRB_RING_SIZE;
+		if (priv_ep->use_streams)
+			return TRB_STREAM_RING_SIZE;
+		else
+			return TRB_RING_SIZE;
+	}
+}
+
+static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
+{
+	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+	if (priv_ep->trb_pool) {
+		dma_free_coherent(priv_dev->sysdev,
+				  cdns3_ring_size(priv_ep),
+				  priv_ep->trb_pool, priv_ep->trb_pool_dma);
+		priv_ep->trb_pool = NULL;
 	}
 }
 
@@ -180,8 +227,12 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
 {
 	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
 	int ring_size = cdns3_ring_size(priv_ep);
+	int num_trbs = ring_size / TRB_SIZE;
 	struct cdns3_trb *link_trb;
 
+	if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size)
+		cdns3_free_trb_pool(priv_ep);
+
 	if (!priv_ep->trb_pool) {
 		priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev,
 						       ring_size,
@@ -189,32 +240,30 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
 						       GFP_DMA32 | GFP_ATOMIC);
 		if (!priv_ep->trb_pool)
 			return -ENOMEM;
-	} else {
+
+		priv_ep->alloc_ring_size = ring_size;
 		memset(priv_ep->trb_pool, 0, ring_size);
 	}
 
+	priv_ep->num_trbs = num_trbs;
+
 	if (!priv_ep->num)
 		return 0;
 
-	priv_ep->num_trbs = ring_size / TRB_SIZE;
-	/* Initialize the last TRB as Link TRB. */
+	/* Initialize the last TRB as Link TRB */
 	link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
-	link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
-	link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
 
-	return 0;
-}
-
-static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
-{
-	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
-
-	if (priv_ep->trb_pool) {
-		dma_free_coherent(priv_dev->sysdev,
-				  cdns3_ring_size(priv_ep),
-				  priv_ep->trb_pool, priv_ep->trb_pool_dma);
-		priv_ep->trb_pool = NULL;
+	if (priv_ep->use_streams) {
+		/*
+		 * For stream capable endpoints driver use single correct TRB.
+		 * The last trb has zeroed cycle bit
+		 */
+		link_trb->control = 0;
+	} else {
+		link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
+		link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
 	}
+	return 0;
 }
 
 /**
@@ -253,6 +302,7 @@ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
 	priv_dev->onchip_used_size = 0;
 	priv_dev->out_mem_is_allocated = 0;
 	priv_dev->wait_for_setup = 0;
+	priv_dev->using_streams = 0;
 }
 
 /**
@@ -356,17 +406,43 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
 {
 	struct usb_request *request;
 	int ret = 0;
+	u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+
+	/*
+	 * If the last pending transfer is INTERNAL
+	 * OR streams are enabled for this endpoint
+	 * do NOT start new transfer till the last one is pending
+	 */
+	if (!pending_empty) {
+		struct cdns3_request *priv_req;
+
+		request = cdns3_next_request(&priv_ep->pending_req_list);
+		priv_req = to_cdns3_request(request);
+		if ((priv_req->flags & REQUEST_INTERNAL) ||
+		    (priv_ep->flags & EP_TDLCHK_EN) ||
+			priv_ep->use_streams) {
+			trace_printk("Blocking external request\n");
+			return ret;
+		}
+	}
 
 	while (!list_empty(&priv_ep->deferred_req_list)) {
 		request = cdns3_next_request(&priv_ep->deferred_req_list);
 
-		ret = cdns3_ep_run_transfer(priv_ep, request);
+		if (!priv_ep->use_streams) {
+			ret = cdns3_ep_run_transfer(priv_ep, request);
+		} else {
+			priv_ep->stream_sg_idx = 0;
+			ret = cdns3_ep_run_stream_transfer(priv_ep, request);
+		}
 		if (ret)
 			return ret;
 
 		list_del(&request->list);
 		list_add_tail(&request->list,
 			      &priv_ep->pending_req_list);
+		if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN))
+			break;
 	}
 
 	priv_ep->flags &= ~EP_RING_FULL;
@@ -379,7 +455,7 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
  * buffer for unblocking on-chip FIFO buffer. This flag will be cleared
  * if before first DESCMISS interrupt the DMA will be armed.
  */
-#define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \
+#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \
 	if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
 		priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
 		(reg) |= EP_STS_EN_DESCMISEN; \
@@ -450,10 +526,17 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
 		if (!req)
 			return NULL;
 
+		/* unmap the gadget request before copying data */
+		usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req,
+						priv_ep->dir);
+
 		cdns3_wa2_descmiss_copy_data(priv_ep, req);
 		if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
 		    req->length != req->actual) {
 			/* wait for next part of transfer */
+			/* re-map the gadget request buffer*/
+			usb_gadget_map_request_by_dev(priv_dev->sysdev, req,
+				usb_endpoint_dir_in(priv_ep->endpoint.desc));
 			return NULL;
 		}
 
@@ -570,6 +653,13 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
 {
 	struct cdns3_request *priv_req;
 	struct usb_request *request;
+	u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+
+	/* check for pending transfer */
+	if (!pending_empty) {
+		trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n");
+		return;
+	}
 
 	if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
 		priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
@@ -578,8 +668,10 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
 
 	trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
 
-	if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS)
+	if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) {
+		trace_cdns3_wa2(priv_ep, "WA2 overflow\n");
 		cdns3_wa2_remove_old_request(priv_ep);
+	}
 
 	request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
 						GFP_ATOMIC);
@@ -621,6 +713,78 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
 		"Failed: No sufficient memory for DESCMIS\n");
 }
 
+static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev)
+{
+	u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+
+	if (tdl) {
+		u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl;
+
+		writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL,
+		       &priv_dev->regs->ep_cmd);
+	}
+}
+
+static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev)
+{
+	u32 ep_sts_reg;
+
+	/* select EP0-out */
+	cdns3_select_ep(priv_dev, 0);
+
+	ep_sts_reg = readl(&priv_dev->regs->ep_sts);
+
+	if (EP_STS_OUTQ_VAL(ep_sts_reg)) {
+		u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg);
+		struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num];
+
+		if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) &&
+		    outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) {
+			u8 pending_empty = list_empty(&outq_ep->pending_req_list);
+
+			if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) ||
+			    (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) ||
+			    !pending_empty) {
+			} else {
+				u32 ep_sts_en_reg;
+				u32 ep_cmd_reg;
+
+				cdns3_select_ep(priv_dev, outq_ep->num |
+						outq_ep->dir);
+				ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en);
+				ep_cmd_reg = readl(&priv_dev->regs->ep_cmd);
+
+				outq_ep->flags |= EP_TDLCHK_EN;
+				cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
+						       EP_CFG_TDL_CHK);
+
+				cdns3_wa2_enable_detection(priv_dev, outq_ep,
+							   ep_sts_en_reg);
+				writel(ep_sts_en_reg,
+				       &priv_dev->regs->ep_sts_en);
+				/* reset tdl value to zero */
+				cdns3_wa2_reset_tdl(priv_dev);
+				/*
+				 * Memory barrier - Reset tdl before ringing the
+				 * doorbell.
+				 */
+				wmb();
+				if (EP_CMD_DRDY & ep_cmd_reg) {
+					trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n");
+
+				} else {
+					trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n");
+					/*
+					 * ring doorbell to generate DESCMIS irq
+					 */
+					writel(EP_CMD_DRDY,
+					       &priv_dev->regs->ep_cmd);
+				}
+			}
+		}
+	}
+}
+
 /**
  * cdns3_gadget_giveback - call struct usb_request's ->complete callback
  * @priv_ep: The endpoint to whom the request belongs to
@@ -807,14 +971,120 @@ static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
 		cdns3_wa1_restore_cycle_bit(priv_ep);
 }
 
+static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+					struct usb_request *request)
+{
+	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+	struct cdns3_request *priv_req;
+	struct cdns3_trb *trb;
+	dma_addr_t trb_dma;
+	int address;
+	u32 control;
+	u32 length;
+	u32 tdl;
+	unsigned int sg_idx = priv_ep->stream_sg_idx;
+
+	priv_req = to_cdns3_request(request);
+	address = priv_ep->endpoint.desc->bEndpointAddress;
+
+	priv_ep->flags |= EP_PENDING_REQUEST;
+
+	/* must allocate buffer aligned to 8 */
+	if (priv_req->flags & REQUEST_UNALIGNED)
+		trb_dma = priv_req->aligned_buf->dma;
+	else
+		trb_dma = request->dma;
+
+	/*  For stream capable endpoints driver use only single TD. */
+	trb = priv_ep->trb_pool + priv_ep->enqueue;
+	priv_req->start_trb = priv_ep->enqueue;
+	priv_req->end_trb = priv_req->start_trb;
+	priv_req->trb = trb;
+
+	cdns3_select_ep(priv_ep->cdns3_dev, address);
+
+	control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE |
+		  TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
+
+	if (!request->num_sgs) {
+		trb->buffer = TRB_BUFFER(trb_dma);
+		length = request->length;
+	} else {
+		trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address);
+		length = request->sg[sg_idx].length;
+	}
+
+	tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
+
+	trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) |
+				  TRB_LEN(length);
+
+	/*
+	 * For DEV_VER_V2 controller version we have enabled
+	 * USB_CONF2_EN_TDL_TRB in DMULT configuration.
+	 * This enables TDL calculation based on TRB, hence setting TDL in TRB.
+	 */
+	if (priv_dev->dev_ver >= DEV_VER_V2) {
+		if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+			trb->length |= TRB_TDL_SS_SIZE(tdl);
+	}
+	priv_req->flags |= REQUEST_PENDING;
+
+	trb->control = control;
+
+	trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
+
+	/*
+	 * Memory barrier - Cycle Bit must be set before trb->length  and
+	 * trb->buffer fields.
+	 */
+	wmb();
+
+	/* always first element */
+	writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma),
+	       &priv_dev->regs->ep_traddr);
+
+	if (!(priv_ep->flags & EP_STALLED)) {
+		trace_cdns3_ring(priv_ep);
+		/*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
+		writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
+
+		priv_ep->prime_flag = false;
+
+		/*
+		 * Controller version DEV_VER_V2 tdl calculation
+		 * is based on TRB
+		 */
+
+		if (priv_dev->dev_ver < DEV_VER_V2)
+			writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
+			       &priv_dev->regs->ep_cmd);
+		else if (priv_dev->dev_ver > DEV_VER_V2)
+			writel(tdl, &priv_dev->regs->ep_tdl);
+
+		priv_ep->last_stream_id = priv_req->request.stream_id;
+		writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+		writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) |
+		       EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
+
+		trace_cdns3_doorbell_epx(priv_ep->name,
+					 readl(&priv_dev->regs->ep_traddr));
+	}
+
+	/* WORKAROUND for transition to L0 */
+	__cdns3_gadget_wakeup(priv_dev);
+
+	return 0;
+}
+
 /**
  * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
  * @priv_ep: endpoint object
  *
  * Returns zero on success or negative value on failure
  */
-int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
-			  struct usb_request *request)
+static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+				 struct usb_request *request)
 {
 	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
 	struct cdns3_request *priv_req;
@@ -826,6 +1096,7 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
 	int address;
 	u32 control;
 	int pcs;
+	u16 total_tdl = 0;
 
 	if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
 		num_trb = priv_ep->interval;
@@ -910,6 +1181,9 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
 		if (likely(priv_dev->dev_ver >= DEV_VER_V2))
 			td_size = DIV_ROUND_UP(length,
 					       priv_ep->endpoint.maxpacket);
+		else if (priv_ep->flags & EP_TDLCHK_EN)
+			total_tdl += DIV_ROUND_UP(length,
+					       priv_ep->endpoint.maxpacket);
 
 		trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
 					TRB_LEN(length);
@@ -954,6 +1228,23 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
 	if (sg_iter == 1)
 		trb->control |= TRB_IOC | TRB_ISP;
 
+	if (priv_dev->dev_ver < DEV_VER_V2 &&
+	    (priv_ep->flags & EP_TDLCHK_EN)) {
+		u16 tdl = total_tdl;
+		u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+
+		if (tdl > EP_CMD_TDL_MAX) {
+			tdl = EP_CMD_TDL_MAX;
+			priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX;
+		}
+
+		if (old_tdl < tdl) {
+			tdl -= old_tdl;
+			writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
+			       &priv_dev->regs->ep_cmd);
+		}
+	}
+
 	/*
 	 * Memory barrier - cycle bit must be set before other filds in trb.
 	 */
@@ -1153,29 +1444,56 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
 			cdns3_move_deq_to_next_trb(priv_req);
 		}
 
-		/* Re-select endpoint. It could be changed by other CPU during
-		 * handling usb_gadget_giveback_request.
-		 */
-		cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+		if (!request->stream_id) {
+			/* Re-select endpoint. It could be changed by other CPU
+			 * during handling usb_gadget_giveback_request.
+			 */
+			cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
 
-		if (!cdns3_request_handled(priv_ep, priv_req))
-			goto prepare_next_td;
+			if (!cdns3_request_handled(priv_ep, priv_req))
+				goto prepare_next_td;
 
-		trb = priv_ep->trb_pool + priv_ep->dequeue;
-		trace_cdns3_complete_trb(priv_ep, trb);
+			trb = priv_ep->trb_pool + priv_ep->dequeue;
+			trace_cdns3_complete_trb(priv_ep, trb);
 
-		if (trb != priv_req->trb)
-			dev_warn(priv_dev->dev,
-				 "request_trb=0x%p, queue_trb=0x%p\n",
-				 priv_req->trb, trb);
+			if (trb != priv_req->trb)
+				dev_warn(priv_dev->dev,
+					 "request_trb=0x%p, queue_trb=0x%p\n",
+					 priv_req->trb, trb);
 
-		request->actual = TRB_LEN(le32_to_cpu(trb->length));
-		cdns3_move_deq_to_next_trb(priv_req);
-		cdns3_gadget_giveback(priv_ep, priv_req, 0);
+			request->actual = TRB_LEN(le32_to_cpu(trb->length));
+			cdns3_move_deq_to_next_trb(priv_req);
+			cdns3_gadget_giveback(priv_ep, priv_req, 0);
 
-		if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
-		    TRBS_PER_SEGMENT == 2)
+			if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
+			    TRBS_PER_SEGMENT == 2)
+				break;
+		} else {
+			/* Re-select endpoint. It could be changed by other CPU
+			 * during handling usb_gadget_giveback_request.
+			 */
+			cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+
+			trb = priv_ep->trb_pool;
+			trace_cdns3_complete_trb(priv_ep, trb);
+
+			if (trb != priv_req->trb)
+				dev_warn(priv_dev->dev,
+					 "request_trb=0x%p, queue_trb=0x%p\n",
+					 priv_req->trb, trb);
+
+			request->actual += TRB_LEN(le32_to_cpu(trb->length));
+
+			if (!request->num_sgs ||
+			    (request->num_sgs == (priv_ep->stream_sg_idx + 1))) {
+				priv_ep->stream_sg_idx = 0;
+				cdns3_gadget_giveback(priv_ep, priv_req, 0);
+			} else {
+				priv_ep->stream_sg_idx++;
+				cdns3_ep_run_stream_transfer(priv_ep, request);
+			}
 			break;
+		}
 	}
 	priv_ep->flags &= ~EP_PENDING_REQUEST;
 
@@ -1205,6 +1523,21 @@ void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
 	}
 }
 
+static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep)
+{
+	u16 tdl = priv_ep->pending_tdl;
+	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+	if (tdl > EP_CMD_TDL_MAX) {
+		tdl = EP_CMD_TDL_MAX;
+		priv_ep->pending_tdl -= EP_CMD_TDL_MAX;
+	} else {
+		priv_ep->pending_tdl = 0;
+	}
+
+	writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd);
+}
+
 /**
  * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
  * @priv_ep: endpoint object
@@ -1215,6 +1548,9 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
 {
 	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
 	u32 ep_sts_reg;
+	struct usb_request *deferred_request;
+	struct usb_request *pending_request;
+	u32 tdl = 0;
 
 	cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
 
@@ -1223,6 +1559,36 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
 	ep_sts_reg = readl(&priv_dev->regs->ep_sts);
 	writel(ep_sts_reg, &priv_dev->regs->ep_sts);
 
+	if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) {
+		bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY);
+
+		tdl = cdns3_get_tdl(priv_dev);
+
+		/*
+		 * Continue the previous transfer:
+		 * There is some racing between ERDY and PRIME. The device send
+		 * ERDY and almost in the same time Host send PRIME. It cause
+		 * that host ignore the ERDY packet and driver has to send it
+		 * again.
+		 */
+		if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) |
+		    EP_STS_HOSTPP(ep_sts_reg))) {
+			writel(EP_CMD_ERDY |
+			       EP_CMD_ERDY_SID(priv_ep->last_stream_id),
+			       &priv_dev->regs->ep_cmd);
+			ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC);
+		} else {
+			priv_ep->prime_flag = true;
+
+			pending_request = cdns3_next_request(&priv_ep->pending_req_list);
+			deferred_request = cdns3_next_request(&priv_ep->deferred_req_list);
+
+			if (deferred_request && !pending_request) {
+				cdns3_start_all_request(priv_dev, priv_ep);
+			}
+		}
+	}
+
 	if (ep_sts_reg & EP_STS_TRBERR) {
 		if (priv_ep->flags & EP_STALL_PENDING &&
 		    !(ep_sts_reg & EP_STS_DESCMIS &&
@@ -1259,7 +1625,8 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
 		}
 	}
 
-	if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
+	if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) ||
+	    (ep_sts_reg & EP_STS_IOT)) {
 		if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
 			if (ep_sts_reg & EP_STS_ISP)
 				priv_ep->flags |= EP_QUIRK_END_TRANSFER;
@@ -1267,6 +1634,29 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
 				priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
 		}
 
+		if (!priv_ep->use_streams) {
+			if ((ep_sts_reg & EP_STS_IOC) ||
+			    (ep_sts_reg & EP_STS_ISP)) {
+				cdns3_transfer_completed(priv_dev, priv_ep);
+			} else if ((priv_ep->flags & EP_TDLCHK_EN) &
+				   priv_ep->pending_tdl) {
+				/* handle IOT with pending tdl */
+				cdns3_reprogram_tdl(priv_ep);
+			}
+		} else if (priv_ep->dir == USB_DIR_OUT) {
+			priv_ep->ep_sts_pending |= ep_sts_reg;
+		} else if (ep_sts_reg & EP_STS_IOT) {
+			cdns3_transfer_completed(priv_dev, priv_ep);
+		}
+	}
+
+	/*
+	 * MD_EXIT interrupt sets when stream capable endpoint exits
+	 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine
+	 */
+	if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) &&
+	    (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) {
+		priv_ep->ep_sts_pending = 0;
 		cdns3_transfer_completed(priv_dev, priv_ep);
 	}
 
@@ -1274,7 +1664,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
 	 * WA2: this condition should only be meet when
 	 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
 	 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
-	 * In other cases this interrupt will be disabled/
+	 * In other cases this interrupt will be disabled.
 	 */
 	if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
 	    !(priv_ep->flags & EP_STALLED))
@@ -1457,6 +1847,9 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
 		ret = IRQ_HANDLED;
 	}
 
+	if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams)
+		cdns3_wa2_check_outq_status(priv_dev);
+
 irqend:
 	writel(~0, &priv_dev->regs->ep_ien);
 	spin_unlock_irqrestore(&priv_dev->lock, flags);
@@ -1511,6 +1904,27 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
 	return 0;
 }
 
+void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
+			      struct cdns3_endpoint *priv_ep)
+{
+	if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
+		return;
+
+	if (priv_dev->dev_ver >= DEV_VER_V3) {
+		u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
+
+		/*
+		 * Stream capable endpoints are handled by using ep_tdl
+		 * register. Other endpoints use TDL from TRB feature.
+		 */
+		cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, mask);
+	}
+
+	/*  Enable Stream Bit TDL chk and SID chk */
+	cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_STREAM_EN |
+			       EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
+}
+
 void cdns3_configure_dmult(struct cdns3_device *priv_dev,
 			   struct cdns3_endpoint *priv_ep)
 {
@@ -1772,6 +2186,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
 {
 	struct cdns3_endpoint *priv_ep;
 	struct cdns3_device *priv_dev;
+	const struct usb_ss_ep_comp_descriptor *comp_desc;
 	u32 reg = EP_STS_EN_TRBERREN;
 	u32 bEndpointAddress;
 	unsigned long flags;
@@ -1781,6 +2196,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
 
 	priv_ep = ep_to_cdns3_ep(ep);
 	priv_dev = priv_ep->cdns3_dev;
+	comp_desc = priv_ep->endpoint.comp_desc;
 
 	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
 		dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
@@ -1811,6 +2227,24 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
 		goto exit;
 	}
 
+	bEndpointAddress = priv_ep->num | priv_ep->dir;
+	cdns3_select_ep(priv_dev, bEndpointAddress);
+
+	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+		/*
+		 * Enable stream support (SS mode) related interrupts
+		 * in EP_STS_EN Register
+		 */
+		if (priv_dev->gadget.speed >= USB_SPEED_SUPER) {
+			reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN |
+				EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
+				EP_STS_EN_STREAMREN;
+			priv_ep->use_streams = true;
+			cdns3_stream_ep_reconfig(priv_dev, priv_ep);
+			priv_dev->using_streams |= true;
+		}
+	}
+
 	ret = cdns3_allocate_trb_pool(priv_ep);
 
 	if (ret)
@@ -1957,6 +2391,7 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
 
 	ep->desc = NULL;
 	priv_ep->flags &= ~EP_ENABLED;
+	priv_ep->use_streams = false;
 
 	spin_unlock_irqrestore(&priv_dev->lock, flags);
 
@@ -2005,13 +2440,21 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
 	list_add_tail(&request->list, &priv_ep->deferred_req_list);
 
 	/*
+	 * For stream capable endpoint if prime irq flag is set then only start
+	 * request.
 	 * If hardware endpoint configuration has not been set yet then
 	 * just queue request in deferred list. Transfer will be started in
 	 * cdns3_set_hw_configuration.
 	 */
-	if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) &&
-	    !(priv_ep->flags & EP_STALL_PENDING))
-		cdns3_start_all_request(priv_dev, priv_ep);
+	if (!request->stream_id) {
+		if (priv_dev->hw_configured_flag &&
+		    !(priv_ep->flags & EP_STALLED) &&
+		    !(priv_ep->flags & EP_STALL_PENDING))
+			cdns3_start_all_request(priv_dev, priv_ep);
+	} else {
+		if (priv_dev->hw_configured_flag && priv_ep->prime_flag)
+			cdns3_start_all_request(priv_dev, priv_ep);
+	}
 
 	return 0;
 }
@@ -2384,7 +2827,6 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
 	struct cdns3_endpoint *priv_ep;
 	u32 bEndpointAddress;
 	struct usb_ep *ep;
-	int ret = 0;
 	int val;
 
 	priv_dev->gadget_driver = NULL;
@@ -2408,7 +2850,7 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
 	writel(0, &priv_dev->regs->usb_ien);
 	writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
 
-	return ret;
+	return 0;
 }
 
 static const struct usb_gadget_ops cdns3_gadget_ops = {
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
index bc40240..f003a78 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/gadget.h
@@ -599,6 +599,7 @@ struct cdns3_usb_regs {
 #define EP_CMD_TDL_MASK		GENMASK(15, 9)
 #define EP_CMD_TDL_SET(p)	(((p) << 9) & EP_CMD_TDL_MASK)
 #define EP_CMD_TDL_GET(p)	(((p) & EP_CMD_TDL_MASK) >> 9)
+#define EP_CMD_TDL_MAX		(EP_CMD_TDL_MASK >> 9)
 
 /* ERDY Stream ID value (used in SS mode). */
 #define EP_CMD_ERDY_SID_MASK	GENMASK(31, 16)
@@ -969,10 +970,18 @@ struct cdns3_usb_regs {
 
 #define ISO_MAX_INTERVAL	10
 
+#define MAX_TRB_LENGTH          BIT(16)
+
 #if TRBS_PER_SEGMENT < 2
 #error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2."
 #endif
 
+#define TRBS_PER_STREAM_SEGMENT 2
+
+#if TRBS_PER_STREAM_SEGMENT < 2
+#error "Incorrect TRBS_PER_STREAMS_SEGMENT. Minimal Transfer Ring size is 2."
+#endif
+
 /*
  *Only for ISOC endpoints - maximum number of TRBs is calculated as
  * pow(2, bInterval-1) * number of usb requests. It is limitation made by
@@ -1000,6 +1009,7 @@ struct cdns3_trb {
 
 #define TRB_SIZE		(sizeof(struct cdns3_trb))
 #define TRB_RING_SIZE		(TRB_SIZE * TRBS_PER_SEGMENT)
+#define TRB_STREAM_RING_SIZE	(TRB_SIZE * TRBS_PER_STREAM_SEGMENT)
 #define TRB_ISO_RING_SIZE	(TRB_SIZE * TRBS_PER_ISOC_SEGMENT)
 #define TRB_CTRL_RING_SIZE	(TRB_SIZE * 2)
 
@@ -1078,7 +1088,7 @@ struct cdns3_trb {
 #define CDNS3_ENDPOINTS_MAX_COUNT	32
 #define CDNS3_EP_ZLP_BUF_SIZE		1024
 
-#define CDNS3_EP_BUF_SIZE		2	/* KB */
+#define CDNS3_EP_BUF_SIZE		4	/* KB */
 #define CDNS3_EP_ISO_HS_MULT		3
 #define CDNS3_EP_ISO_SS_BURST		3
 #define CDNS3_MAX_NUM_DESCMISS_BUF	32
@@ -1109,6 +1119,7 @@ struct cdns3_device;
  * @interval: interval between packets used for ISOC endpoint.
  * @free_trbs: number of free TRBs in transfer ring
  * @num_trbs: number of all TRBs in transfer ring
+ * @alloc_ring_size: size of the allocated TRB ring
  * @pcs: producer cycle state
  * @ccs: consumer cycle state
  * @enqueue: enqueue index in transfer ring
@@ -1142,6 +1153,7 @@ struct cdns3_endpoint {
 #define EP_QUIRK_END_TRANSFER	BIT(11)
 #define EP_QUIRK_EXTRA_BUF_DET	BIT(12)
 #define EP_QUIRK_EXTRA_BUF_EN	BIT(13)
+#define EP_TDLCHK_EN		BIT(15)
 	u32			flags;
 
 	struct cdns3_request	*descmis_req;
@@ -1153,6 +1165,7 @@ struct cdns3_endpoint {
 
 	int			free_trbs;
 	int			num_trbs;
+	int			alloc_ring_size;
 	u8			pcs;
 	u8			ccs;
 	int			enqueue;
@@ -1163,6 +1176,14 @@ struct cdns3_endpoint {
 	struct cdns3_trb	*wa1_trb;
 	unsigned int		wa1_trb_index;
 	unsigned int		wa1_cycle_bit:1;
+
+	/* Stream related */
+	unsigned int		use_streams:1;
+	unsigned int		prime_flag:1;
+	u32			ep_sts_pending;
+	u16			last_stream_id;
+	u16			pending_tdl;
+	unsigned int		stream_sg_idx;
 };
 
 /**
@@ -1290,6 +1311,7 @@ struct cdns3_device {
 	int				hw_configured_flag:1;
 	int				wake_up_flag:1;
 	unsigned			status_completion_no_call:1;
+	unsigned			using_streams:1;
 	int				out_mem_is_allocated;
 
 	struct work_struct		pending_status_wq;
@@ -1310,8 +1332,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev);
 void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep);
 void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable);
 struct usb_request *cdns3_next_request(struct list_head *list);
-int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
-			  struct usb_request *request);
 void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm);
 int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep);
 u8 cdns3_ep_addr_to_index(u8 ep_addr);
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
index e92348c..8d121e2 100644
--- a/drivers/usb/cdns3/trace.h
+++ b/drivers/usb/cdns3/trace.h
@@ -122,18 +122,24 @@ DECLARE_EVENT_CLASS(cdns3_log_epx_irq,
 		__string(ep_name, priv_ep->name)
 		__field(u32, ep_sts)
 		__field(u32, ep_traddr)
+		__field(u32, ep_last_sid)
+		__field(u32, use_streams)
 		__dynamic_array(char, str, CDNS3_MSG_MAX)
 	),
 	TP_fast_assign(
 		__assign_str(ep_name, priv_ep->name);
 		__entry->ep_sts = readl(&priv_dev->regs->ep_sts);
 		__entry->ep_traddr = readl(&priv_dev->regs->ep_traddr);
+		__entry->ep_last_sid = priv_ep->last_stream_id;
+		__entry->use_streams = priv_ep->use_streams;
 	),
-	TP_printk("%s, ep_traddr: %08x",
+	TP_printk("%s, ep_traddr: %08x ep_last_sid: %08x use_streams: %d",
 		  cdns3_decode_epx_irq(__get_str(str),
 				       __get_str(ep_name),
 				       __entry->ep_sts),
-		  __entry->ep_traddr)
+		  __entry->ep_traddr,
+		  __entry->ep_last_sid,
+		  __entry->use_streams)
 );
 
 DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq,
@@ -210,6 +216,7 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
 		__field(int, end_trb)
 		__field(struct cdns3_trb *, start_trb_addr)
 		__field(int, flags)
+		__field(unsigned int, stream_id)
 	),
 	TP_fast_assign(
 		__assign_str(name, req->priv_ep->name);
@@ -225,9 +232,10 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
 		__entry->end_trb = req->end_trb;
 		__entry->start_trb_addr = req->trb;
 		__entry->flags = req->flags;
+		__entry->stream_id = req->request.stream_id;
 	),
 	TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d,"
-		  " trb: [start:%d, end:%d: virt addr %pa], flags:%x ",
+		  " trb: [start:%d, end:%d: virt addr %pa], flags:%x SID: %u",
 		__get_str(name), __entry->req, __entry->buf, __entry->actual,
 		__entry->length,
 		__entry->zero ? "Z" : "z",
@@ -237,7 +245,8 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
 		__entry->start_trb,
 		__entry->end_trb,
 		__entry->start_trb_addr,
-		__entry->flags
+		__entry->flags,
+		__entry->stream_id
 	)
 );
 
@@ -281,6 +290,39 @@ TRACE_EVENT(cdns3_ep0_queue,
 		  __entry->length)
 );
 
+DECLARE_EVENT_CLASS(cdns3_stream_split_transfer_len,
+	TP_PROTO(struct cdns3_request *req),
+	TP_ARGS(req),
+	TP_STRUCT__entry(
+		__string(name, req->priv_ep->name)
+		__field(struct cdns3_request *, req)
+		__field(unsigned int, length)
+		__field(unsigned int, actual)
+		__field(unsigned int, stream_id)
+	),
+	TP_fast_assign(
+		__assign_str(name, req->priv_ep->name);
+		__entry->req = req;
+		__entry->actual = req->request.length;
+		__entry->length = req->request.actual;
+		__entry->stream_id = req->request.stream_id;
+	),
+	TP_printk("%s: req: %p,request length: %u actual length: %u  SID: %u",
+		  __get_str(name), __entry->req, __entry->length,
+		  __entry->actual, __entry->stream_id)
+);
+
+DEFINE_EVENT(cdns3_stream_split_transfer_len, cdns3_stream_transfer_split,
+	     TP_PROTO(struct cdns3_request *req),
+	     TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_stream_split_transfer_len,
+	     cdns3_stream_transfer_split_next_part,
+	     TP_PROTO(struct cdns3_request *req),
+	     TP_ARGS(req)
+);
+
 DECLARE_EVENT_CLASS(cdns3_log_aligned_request,
 	TP_PROTO(struct cdns3_request *priv_req),
 	TP_ARGS(priv_req),
@@ -319,6 +361,34 @@ DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request,
 	TP_ARGS(req)
 );
 
+DECLARE_EVENT_CLASS(cdns3_log_map_request,
+	TP_PROTO(struct cdns3_request *priv_req),
+	TP_ARGS(priv_req),
+	TP_STRUCT__entry(
+		__string(name, priv_req->priv_ep->name)
+		__field(struct usb_request *, req)
+		__field(void *, buf)
+		__field(dma_addr_t, dma)
+	),
+	TP_fast_assign(
+		__assign_str(name, priv_req->priv_ep->name);
+		__entry->req = &priv_req->request;
+		__entry->buf = priv_req->request.buf;
+		__entry->dma = priv_req->request.dma;
+	),
+	TP_printk("%s: req: %p, req buf %p, dma %p",
+		  __get_str(name), __entry->req, __entry->buf, &__entry->dma
+	)
+);
+DEFINE_EVENT(cdns3_log_map_request, cdns3_map_request,
+	     TP_PROTO(struct cdns3_request *req),
+	     TP_ARGS(req)
+);
+DEFINE_EVENT(cdns3_log_map_request, cdns3_mapped_request,
+	     TP_PROTO(struct cdns3_request *req),
+	     TP_ARGS(req)
+);
+
 DECLARE_EVENT_CLASS(cdns3_log_trb,
 	TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb),
 	TP_ARGS(priv_ep, trb),
@@ -329,6 +399,7 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
 		__field(u32, length)
 		__field(u32, control)
 		__field(u32, type)
+		__field(unsigned int, last_stream_id)
 	),
 	TP_fast_assign(
 		__assign_str(name, priv_ep->name);
@@ -337,8 +408,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
 		__entry->length = trb->length;
 		__entry->control = trb->control;
 		__entry->type = usb_endpoint_type(priv_ep->endpoint.desc);
+		__entry->last_stream_id = priv_ep->last_stream_id;
 	),
-	TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)",
+	TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u",
 		__get_str(name), __entry->trb, __entry->buffer,
 		TRB_LEN(__entry->length),
 		(u8)TRB_BURST_LEN_GET(__entry->length),
@@ -349,7 +421,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
 		__entry->control & TRB_FIFO_MODE ? "FIFO, " : "",
 		__entry->control & TRB_CHAIN ? "CHAIN, " : "",
 		__entry->control & TRB_IOC ? "IOC, " : "",
-		TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK"
+		TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK",
+		TRB_FIELD_TO_STREAMID(__entry->control),
+		__entry->last_stream_id
 	)
 );
 
@@ -398,6 +472,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep,
 		__field(unsigned int, maxpacket)
 		__field(unsigned int, maxpacket_limit)
 		__field(unsigned int, max_streams)
+		__field(unsigned int, use_streams)
 		__field(unsigned int, maxburst)
 		__field(unsigned int, flags)
 		__field(unsigned int, dir)
@@ -409,16 +484,18 @@ DECLARE_EVENT_CLASS(cdns3_log_ep,
 		__entry->maxpacket = priv_ep->endpoint.maxpacket;
 		__entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit;
 		__entry->max_streams = priv_ep->endpoint.max_streams;
+		__entry->use_streams = priv_ep->use_streams;
 		__entry->maxburst = priv_ep->endpoint.maxburst;
 		__entry->flags = priv_ep->flags;
 		__entry->dir = priv_ep->dir;
 		__entry->enqueue = priv_ep->enqueue;
 		__entry->dequeue = priv_ep->dequeue;
 	),
-	TP_printk("%s: mps: %d/%d. streams: %d, burst: %d, enq idx: %d, "
-		  "deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
+	TP_printk("%s: mps: %d/%d. streams: %d, stream enable: %d, burst: %d, "
+		  "enq idx: %d, deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
 		__get_str(name), __entry->maxpacket,
 		__entry->maxpacket_limit, __entry->max_streams,
+		__entry->use_streams,
 		__entry->maxburst, __entry->enqueue,
 		__entry->dequeue,
 		__entry->flags & EP_ENABLED ? "EN | " : "",
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index ae850b3..d53db52 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -7,6 +7,7 @@
 	select RESET_CONTROLLER
 	select USB_ULPI_BUS
 	select USB_ROLE_SWITCH
+	select USB_TEGRA_PHY if ARCH_TEGRA
 	help
 	  Say Y here if your system has a dual role high speed USB
 	  controller based on ChipIdea silicon IP. It supports:
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6911aef..d49d5e1 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -302,6 +302,16 @@ static inline enum usb_role ci_role_to_usb_role(struct ci_hdrc *ci)
 		return USB_ROLE_NONE;
 }
 
+static inline enum ci_role usb_role_to_ci_role(enum usb_role role)
+{
+	if (role == USB_ROLE_HOST)
+		return CI_ROLE_HOST;
+	else if (role == USB_ROLE_DEVICE)
+		return CI_ROLE_GADGET;
+	else
+		return CI_ROLE_END;
+}
+
 /**
  * hw_read_id_reg: reads from a identification register
  * @ci: the controller
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 0c9911d..7455df0 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -83,13 +83,6 @@ static int tegra_udc_probe(struct platform_device *pdev)
 		return err;
 	}
 
-	/*
-	 * Tegra's USB PHY driver doesn't implement optional phy_init()
-	 * hook, so we have to power on UDC controller before ChipIdea
-	 * driver initialization kicks in.
-	 */
-	usb_phy_set_suspend(udc->phy, 0);
-
 	/* setup and register ChipIdea HDRC device */
 	udc->data.name = "tegra-udc";
 	udc->data.flags = soc->flags;
@@ -109,7 +102,6 @@ static int tegra_udc_probe(struct platform_device *pdev)
 	return 0;
 
 fail_power_off:
-	usb_phy_set_suspend(udc->phy, 1);
 	clk_disable_unprepare(udc->clk);
 	return err;
 }
@@ -119,7 +111,6 @@ static int tegra_udc_remove(struct platform_device *pdev)
 	struct tegra_udc *udc = platform_get_drvdata(pdev);
 
 	ci_hdrc_remove_device(udc->dev);
-	usb_phy_set_suspend(udc->phy, 1);
 	clk_disable_unprepare(udc->clk);
 
 	return 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index dce5db4..52139c2 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -618,9 +618,11 @@ static int ci_usb_role_switch_set(struct device *dev, enum usb_role role)
 	struct ci_hdrc *ci = dev_get_drvdata(dev);
 	struct ci_hdrc_cable *cable = NULL;
 	enum usb_role current_role = ci_role_to_usb_role(ci);
+	enum ci_role ci_role = usb_role_to_ci_role(role);
 	unsigned long flags;
 
-	if (current_role == role)
+	if ((ci_role != CI_ROLE_END && !ci->roles[ci_role]) ||
+	    (current_role == role))
 		return 0;
 
 	pm_runtime_get_sync(ci->dev);
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 70112cf..2625aa0 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -20,7 +20,7 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
 
 }
 
-static void ci_hdrc_host_driver_init(void)
+static inline void ci_hdrc_host_driver_init(void)
 {
 
 }
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 12bb572..6833c918 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -574,7 +574,7 @@ __acquires(ps->lock)
 
 	/* Now carefully unlink all the marked pending URBs */
  rescan:
-	list_for_each_entry(as, &ps->async_pending, asynclist) {
+	list_for_each_entry_reverse(as, &ps->async_pending, asynclist) {
 		if (as->bulk_status == AS_UNLINK) {
 			as->bulk_status = 0;		/* Only once */
 			urb = as->urb;
@@ -636,7 +636,7 @@ static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
 
 	spin_lock_irqsave(&ps->lock, flags);
 	while (!list_empty(list)) {
-		as = list_entry(list->next, struct async, asynclist);
+		as = list_last_entry(list, struct async, asynclist);
 		list_del_init(&as->asynclist);
 		urb = as->urb;
 		usb_get_urb(urb);
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 6af6add..876ff31 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -288,14 +288,9 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
 
 	/*
 	 * Need to schedule a work, as there are possible DELAY function calls.
-	 * Release lock before scheduling workq as it holds spinlock during
-	 * scheduling.
 	 */
-	if (hsotg->wq_otg) {
-		spin_unlock(&hsotg->lock);
+	if (hsotg->wq_otg)
 		queue_work(hsotg->wq_otg, &hsotg->wf_otg);
-		spin_lock(&hsotg->lock);
-	}
 }
 
 /**
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index b8f2790..3a0dcbf 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -183,6 +183,7 @@ DEFINE_SHOW_ATTRIBUTE(state);
 static int fifo_show(struct seq_file *seq, void *v)
 {
 	struct dwc2_hsotg *hsotg = seq->private;
+	int fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
 	u32 val;
 	int idx;
 
@@ -196,7 +197,7 @@ static int fifo_show(struct seq_file *seq, void *v)
 
 	seq_puts(seq, "\nPeriodic TXFIFOs:\n");
 
-	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
+	for (idx = 1; idx <= fifo_count; idx++) {
 		val = dwc2_readl(hsotg, DPTXFSIZN(idx));
 
 		seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 6be10e496..88f7d6d 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3784,15 +3784,26 @@ static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
 		for (idx = 1; idx < hsotg->num_of_eps; idx++) {
 			hs_ep = hsotg->eps_out[idx];
 			/* Proceed only unmasked ISOC EPs */
-			if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
+			if (BIT(idx) & ~daintmsk)
 				continue;
 
 			epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
 
-			if (epctrl & DXEPCTL_EPENA) {
+			//ISOC Ep's only
+			if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
 				epctrl |= DXEPCTL_SNAK;
 				epctrl |= DXEPCTL_EPDIS;
 				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
+				continue;
+			}
+
+			//Non-ISOC EP's
+			if (hs_ep->halted) {
+				if (!(epctrl & DXEPCTL_EPENA))
+					epctrl |= DXEPCTL_EPENA;
+				epctrl |= DXEPCTL_EPDIS;
+				epctrl |= DXEPCTL_STALL;
+				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
 			}
 		}
 
@@ -4056,11 +4067,12 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 	 * a unique tx-fifo even if it is non-periodic.
 	 */
 	if (dir_in && hsotg->dedicated_fifos) {
+		unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
 		u32 fifo_index = 0;
 		u32 fifo_size = UINT_MAX;
 
 		size = hs_ep->ep.maxpacket * hs_ep->mc;
-		for (i = 1; i < hsotg->num_of_eps; ++i) {
+		for (i = 1; i <= fifo_count; ++i) {
 			if (hsotg->fifo_map & (1 << i))
 				continue;
 			val = dwc2_readl(hsotg, DPTXFSIZN(i));
@@ -4310,19 +4322,20 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
 		epctl = dwc2_readl(hs, epreg);
 
 		if (value) {
-			epctl |= DXEPCTL_STALL;
+			if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
+				dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
+			// STALL bit will be set in GOUTNAKEFF interrupt handler
 		} else {
 			epctl &= ~DXEPCTL_STALL;
 			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
 			if (xfertype == DXEPCTL_EPTYPE_BULK ||
 			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
 				epctl |= DXEPCTL_SETD0PID;
+			dwc2_writel(hs, epctl, epreg);
 		}
-		dwc2_writel(hs, epctl, epreg);
 	}
 
 	hs_ep->halted = value;
-
 	return 0;
 }
 
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 81afe55..b90f858 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2824,7 +2824,7 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
 		list_move_tail(&chan->split_order_list_entry,
 			       &hsotg->split_order);
 
-	if (hsotg->params.host_dma) {
+	if (hsotg->params.host_dma && chan->qh) {
 		if (hsotg->params.dma_desc_enable) {
 			if (!chan->xfer_started ||
 			    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f561c6c..1d85c42 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1246,6 +1246,9 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
 		/* do nothing */
 		break;
 	}
+
+	/* de-assert DRVVBUS for HOST and OTG mode */
+	dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
 }
 
 static void dwc3_get_properties(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1c8b3493..77c4a9a 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -688,7 +688,9 @@ struct dwc3_ep {
 #define DWC3_EP_STALL		BIT(1)
 #define DWC3_EP_WEDGE		BIT(2)
 #define DWC3_EP_TRANSFER_STARTED BIT(3)
+#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
 #define DWC3_EP_PENDING_REQUEST	BIT(5)
+#define DWC3_EP_DELAY_START	BIT(6)
 
 	/* This last one is specific to EP0 */
 #define DWC3_EP0_DIR_IN		BIT(31)
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index c1e9ea6..90bb022 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /**
- * dwc3-exynos.c - Samsung EXYNOS DWC3 Specific Glue layer
+ * dwc3-exynos.c - Samsung Exynos DWC3 Specific Glue layer
  *
  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
@@ -255,4 +255,4 @@ module_platform_driver(dwc3_exynos_driver);
 
 MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>");
 MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer");
+MODULE_DESCRIPTION("DesignWare USB3 Exynos Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 294276f..7051611 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,7 @@
 #define PCI_DEVICE_ID_INTEL_GLK			0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP		0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH		0xa36e
+#define PCI_DEVICE_ID_INTEL_CNPV		0xa3b0
 #define PCI_DEVICE_ID_INTEL_ICLLP		0x34ee
 #define PCI_DEVICE_ID_INTEL_EHLLP		0x4b7e
 #define PCI_DEVICE_ID_INTEL_TGPLP		0xa0ee
@@ -342,6 +343,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH),
 	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV),
+	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
 	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index fd1b100..6dee4da 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1136,8 +1136,10 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
 	case DWC3_DEPEVT_EPCMDCMPLT:
 		cmd = DEPEVT_PARAMETER_CMD(event->parameters);
 
-		if (cmd == DWC3_DEPCMD_ENDTRANSFER)
+		if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+			dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
 			dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+		}
 		break;
 	}
 }
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 154f3f3..1b8014a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -57,7 +57,7 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
 		return -EINVAL;
 	}
 
-	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+	dwc3_gadget_dctl_write_safe(dwc, reg);
 
 	return 0;
 }
@@ -111,6 +111,9 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
 
+	/* set no action before sending new link state change */
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
 	/* set requested state */
 	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
@@ -1447,6 +1450,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 	list_add_tail(&req->list, &dep->pending_list);
 	req->status = DWC3_REQUEST_STATUS_QUEUED;
 
+	/* Start the transfer only after the END_TRANSFER is completed */
+	if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
+		dep->flags |= DWC3_EP_DELAY_START;
+		return 0;
+	}
+
 	/*
 	 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
 	 * wait for a XferNotReady event so we will know what's the current
@@ -1828,7 +1837,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 		dwc->pullups_connected = false;
 	}
 
-	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+	dwc3_gadget_dctl_write_safe(dwc, reg);
 
 	do {
 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
@@ -2625,8 +2634,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 		cmd = DEPEVT_PARAMETER_CMD(event->parameters);
 
 		if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+			dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
 			dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
 			dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+			if ((dep->flags & DWC3_EP_DELAY_START) &&
+			    !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+				__dwc3_gadget_kick_transfer(dep);
+
+			dep->flags &= ~DWC3_EP_DELAY_START;
 		}
 		break;
 	case DWC3_DEPEVT_STREAMEVT:
@@ -2678,12 +2693,12 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
 	bool interrupt)
 {
-	struct dwc3 *dwc = dep->dwc;
 	struct dwc3_gadget_ep_cmd_params params;
 	u32 cmd;
 	int ret;
 
-	if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
+	if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
+	    (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
 		return;
 
 	/*
@@ -2693,16 +2708,13 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
 	 * much trouble synchronizing between us and gadget driver.
 	 *
 	 * We have discussed this with the IP Provider and it was
-	 * suggested to giveback all requests here, but give HW some
-	 * extra time to synchronize with the interconnect. We're using
-	 * an arbitrary 100us delay for that.
+	 * suggested to giveback all requests here.
 	 *
 	 * Note also that a similar handling was tested by Synopsys
 	 * (thanks a lot Paul) and nothing bad has come out of it.
-	 * In short, what we're doing is:
-	 *
-	 * - Issue EndTransfer WITH CMDIOC bit set
-	 * - Wait 100us
+	 * In short, what we're doing is issuing EndTransfer with
+	 * CMDIOC bit set and delay kicking transfer until the
+	 * EndTransfer command had completed.
 	 *
 	 * As of IP version 3.10a of the DWC_usb3 IP, the controller
 	 * supports a mode to work around the above limitation. The
@@ -2711,8 +2723,7 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
 	 * by writing GUCTL2[14]. This polling is already done in the
 	 * dwc3_send_gadget_ep_cmd() function so if the mode is
 	 * enabled, the EndTransfer command will have completed upon
-	 * returning from this function and we don't need to delay for
-	 * 100us.
+	 * returning from this function.
 	 *
 	 * This mode is NOT available on the DWC_usb31 IP.
 	 */
@@ -2728,9 +2739,8 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
 
 	if (!interrupt)
 		dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
-
-	if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
-		udelay(100);
+	else
+		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
 }
 
 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
@@ -2759,12 +2769,12 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
 {
 	int			reg;
 
+	dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
+
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 	reg &= ~DWC3_DCTL_INITU1ENA;
-	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
 	reg &= ~DWC3_DCTL_INITU2ENA;
-	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+	dwc3_gadget_dctl_write_safe(dwc, reg);
 
 	dwc3_disconnect_gadget(dwc);
 
@@ -2816,7 +2826,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
-	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+	dwc3_gadget_dctl_write_safe(dwc, reg);
 	dwc->test_mode = false;
 	dwc3_clear_stall_all_ep(dwc);
 
@@ -2920,11 +2930,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 		if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
 			reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
 
-		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+		dwc3_gadget_dctl_write_safe(dwc, reg);
 	} else {
 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
-		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+		dwc3_gadget_dctl_write_safe(dwc, reg);
 	}
 
 	dep = dwc->eps[0];
@@ -3033,7 +3043,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
 
 				reg &= ~u1u2;
 
-				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+				dwc3_gadget_dctl_write_safe(dwc, reg);
 				break;
 			default:
 				/* do nothing */
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 5faf4d1..fbc7d80 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -127,4 +127,18 @@ static inline void dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
 	dep->resource_index = DWC3_DEPCMD_GET_RSC_IDX(res_id);
 }
 
+/**
+ * dwc3_gadget_dctl_write_safe - write to DCTL safe from link state change
+ * @dwc: pointer to our context structure
+ * @value: value to write to DCTL
+ *
+ * Use this function when doing read-modify-write to DCTL. It will not
+ * send link state change request.
+ */
+static inline void dwc3_gadget_dctl_write_safe(struct dwc3 *dwc, u32 value)
+{
+	value &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+	dwc3_writel(dwc->regs, DWC3_DCTL, value);
+}
+
 #endif /* __DRIVERS_USB_DWC3_GADGET_H */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 02ff850..c6db0a0 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -483,34 +483,6 @@
 	  Both protocols can work on USB2.0 and USB3.0.
 	  UAS utilizes the USB 3.0 feature called streams support.
 
-choice
-	tristate "USB Gadget precomposed configurations"
-	default USB_ETH
-	optional
-	help
-	  A Linux "Gadget Driver" talks to the USB Peripheral Controller
-	  driver through the abstract "gadget" API.  Some other operating
-	  systems call these "client" drivers, of which "class drivers"
-	  are a subset (implementing a USB device class specification).
-	  A gadget driver implements one or more USB functions using
-	  the peripheral hardware.
-
-	  Gadget drivers are hardware-neutral, or "platform independent",
-	  except that they sometimes must understand quirks or limitations
-	  of the particular controllers they work with.  For example, when
-	  a controller doesn't support alternate configurations or provide
-	  enough of the right types of endpoints, the gadget driver might
-	  not be able work with that controller, or might need to implement
-	  a less common variant of a device class protocol.
-
-	  The available choices each represent a single precomposed USB
-	  gadget configuration. In the device model, each option contains
-	  both the device instantiation as a child for a USB gadget
-	  controller, and the relevant drivers for each function declared
-	  by the device.
-
 source "drivers/usb/gadget/legacy/Kconfig"
 
-endchoice
-
 endif # USB_GADGET
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index ab9ac48..32b637e 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -293,6 +293,47 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
 	return ret;
 }
 
+static ssize_t gadget_dev_desc_max_speed_show(struct config_item *item,
+					      char *page)
+{
+	enum usb_device_speed speed = to_gadget_info(item)->composite.max_speed;
+
+	return sprintf(page, "%s\n", usb_speed_string(speed));
+}
+
+static ssize_t gadget_dev_desc_max_speed_store(struct config_item *item,
+					       const char *page, size_t len)
+{
+	struct gadget_info *gi = to_gadget_info(item);
+
+	mutex_lock(&gi->lock);
+
+	/* Prevent changing of max_speed after the driver is binded */
+	if (gi->composite.gadget_driver.udc_name)
+		goto err;
+
+	if (strncmp(page, "super-speed-plus", 16) == 0)
+		gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
+	else if (strncmp(page, "super-speed", 11) == 0)
+		gi->composite.max_speed = USB_SPEED_SUPER;
+	else if (strncmp(page, "high-speed", 10) == 0)
+		gi->composite.max_speed = USB_SPEED_HIGH;
+	else if (strncmp(page, "full-speed", 10) == 0)
+		gi->composite.max_speed = USB_SPEED_FULL;
+	else if (strncmp(page, "low-speed", 9) == 0)
+		gi->composite.max_speed = USB_SPEED_LOW;
+	else
+		goto err;
+
+	gi->composite.gadget_driver.max_speed = gi->composite.max_speed;
+
+	mutex_unlock(&gi->lock);
+	return len;
+err:
+	mutex_unlock(&gi->lock);
+	return -EINVAL;
+}
+
 CONFIGFS_ATTR(gadget_dev_desc_, bDeviceClass);
 CONFIGFS_ATTR(gadget_dev_desc_, bDeviceSubClass);
 CONFIGFS_ATTR(gadget_dev_desc_, bDeviceProtocol);
@@ -302,6 +343,7 @@ CONFIGFS_ATTR(gadget_dev_desc_, idProduct);
 CONFIGFS_ATTR(gadget_dev_desc_, bcdDevice);
 CONFIGFS_ATTR(gadget_dev_desc_, bcdUSB);
 CONFIGFS_ATTR(gadget_dev_desc_, UDC);
+CONFIGFS_ATTR(gadget_dev_desc_, max_speed);
 
 static struct configfs_attribute *gadget_root_attrs[] = {
 	&gadget_dev_desc_attr_bDeviceClass,
@@ -313,6 +355,7 @@ static struct configfs_attribute *gadget_root_attrs[] = {
 	&gadget_dev_desc_attr_bcdDevice,
 	&gadget_dev_desc_attr_bcdUSB,
 	&gadget_dev_desc_attr_UDC,
+	&gadget_dev_desc_attr_max_speed,
 	NULL,
 };
 
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 460d5d7..7f5cf48 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -52,6 +52,7 @@ struct f_ecm {
 	struct usb_ep			*notify;
 	struct usb_request		*notify_req;
 	u8				notify_state;
+	atomic_t			notify_count;
 	bool				is_open;
 
 	/* FIXME is_open needs some irq-ish locking
@@ -380,7 +381,7 @@ static void ecm_do_notify(struct f_ecm *ecm)
 	int				status;
 
 	/* notification already in flight? */
-	if (!req)
+	if (atomic_read(&ecm->notify_count))
 		return;
 
 	event = req->buf;
@@ -420,10 +421,10 @@ static void ecm_do_notify(struct f_ecm *ecm)
 	event->bmRequestType = 0xA1;
 	event->wIndex = cpu_to_le16(ecm->ctrl_id);
 
-	ecm->notify_req = NULL;
+	atomic_inc(&ecm->notify_count);
 	status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
 	if (status < 0) {
-		ecm->notify_req = req;
+		atomic_dec(&ecm->notify_count);
 		DBG(cdev, "notify --> %d\n", status);
 	}
 }
@@ -448,17 +449,19 @@ static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req)
 	switch (req->status) {
 	case 0:
 		/* no fault */
+		atomic_dec(&ecm->notify_count);
 		break;
 	case -ECONNRESET:
 	case -ESHUTDOWN:
+		atomic_set(&ecm->notify_count, 0);
 		ecm->notify_state = ECM_NOTIFY_NONE;
 		break;
 	default:
 		DBG(cdev, "event %02x --> %d\n",
 			event->bNotificationType, req->status);
+		atomic_dec(&ecm->notify_count);
 		break;
 	}
-	ecm->notify_req = req;
 	ecm_do_notify(ecm);
 }
 
@@ -907,6 +910,11 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
 
 	usb_free_all_descriptors(f);
 
+	if (atomic_read(&ecm->notify_count)) {
+		usb_ep_dequeue(ecm->notify, ecm->notify_req);
+		atomic_set(&ecm->notify_count, 0);
+	}
+
 	kfree(ecm->notify_req->buf);
 	usb_ep_free_request(ecm->notify, ecm->notify_req);
 }
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 0bbccac..6171d28 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1062,6 +1062,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 			req->num_sgs = io_data->sgt.nents;
 		} else {
 			req->buf = data;
+			req->num_sgs = 0;
 		}
 		req->length = data_len;
 
@@ -1105,6 +1106,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 			req->num_sgs = io_data->sgt.nents;
 		} else {
 			req->buf = data;
+			req->num_sgs = 0;
 		}
 		req->length = data_len;
 
@@ -1486,7 +1488,7 @@ enum {
 	Opt_gid,
 };
 
-static const struct fs_parameter_spec ffs_fs_param_specs[] = {
+static const struct fs_parameter_spec ffs_fs_fs_parameters[] = {
 	fsparam_bool	("no_disconnect",	Opt_no_disconnect),
 	fsparam_u32	("rmode",		Opt_rmode),
 	fsparam_u32	("fmode",		Opt_fmode),
@@ -1496,11 +1498,6 @@ static const struct fs_parameter_spec ffs_fs_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description ffs_fs_fs_parameters = {
-	.name		= "kAFS",
-	.specs		= ffs_fs_param_specs,
-};
-
 static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct ffs_sb_fill_data *data = fc->fs_private;
@@ -1509,7 +1506,7 @@ static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 
 	ENTER();
 
-	opt = fs_parse(fc, &ffs_fs_fs_parameters, param, &result);
+	opt = fs_parse(fc, ffs_fs_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -1641,7 +1638,7 @@ static struct file_system_type ffs_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "functionfs",
 	.init_fs_context = ffs_fs_init_fs_context,
-	.parameters	= &ffs_fs_fs_parameters,
+	.parameters	= ffs_fs_fs_parameters,
 	.kill_sb	= ffs_fs_kill_sb,
 };
 MODULE_ALIAS_FS("functionfs");
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 2d6e76e..1d90008 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -53,6 +53,7 @@ struct f_ncm {
 	struct usb_ep			*notify;
 	struct usb_request		*notify_req;
 	u8				notify_state;
+	atomic_t			notify_count;
 	bool				is_open;
 
 	const struct ndp_parser_opts	*parser_opts;
@@ -547,7 +548,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
 	int				status;
 
 	/* notification already in flight? */
-	if (!req)
+	if (atomic_read(&ncm->notify_count))
 		return;
 
 	event = req->buf;
@@ -587,7 +588,8 @@ static void ncm_do_notify(struct f_ncm *ncm)
 	event->bmRequestType = 0xA1;
 	event->wIndex = cpu_to_le16(ncm->ctrl_id);
 
-	ncm->notify_req = NULL;
+	atomic_inc(&ncm->notify_count);
+
 	/*
 	 * In double buffering if there is a space in FIFO,
 	 * completion callback can be called right after the call,
@@ -597,7 +599,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
 	status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
 	spin_lock(&ncm->lock);
 	if (status < 0) {
-		ncm->notify_req = req;
+		atomic_dec(&ncm->notify_count);
 		DBG(cdev, "notify --> %d\n", status);
 	}
 }
@@ -632,17 +634,19 @@ static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
 	case 0:
 		VDBG(cdev, "Notification %02x sent\n",
 		     event->bNotificationType);
+		atomic_dec(&ncm->notify_count);
 		break;
 	case -ECONNRESET:
 	case -ESHUTDOWN:
+		atomic_set(&ncm->notify_count, 0);
 		ncm->notify_state = NCM_NOTIFY_NONE;
 		break;
 	default:
 		DBG(cdev, "event %02x --> %d\n",
 			event->bNotificationType, req->status);
+		atomic_dec(&ncm->notify_count);
 		break;
 	}
-	ncm->notify_req = req;
 	ncm_do_notify(ncm);
 	spin_unlock(&ncm->lock);
 }
@@ -1649,6 +1653,11 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
 	ncm_string_defs[0].id = 0;
 	usb_free_all_descriptors(f);
 
+	if (atomic_read(&ncm->notify_count)) {
+		usb_ep_dequeue(ncm->notify, ncm->notify_req);
+		atomic_set(&ncm->notify_count, 0);
+	}
+
 	kfree(ncm->notify_req->buf);
 	usb_ep_free_request(ncm->notify, ncm->notify_req);
 }
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 04c142c..64de9f1 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -72,7 +72,7 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params,
 
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
 
-static const struct file_operations rndis_proc_fops;
+static const struct proc_ops rndis_proc_ops;
 
 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
 
@@ -902,7 +902,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
 
 		sprintf(name, NAME_TEMPLATE, i);
 		proc_entry = proc_create_data(name, 0660, NULL,
-					      &rndis_proc_fops, params);
+					      &rndis_proc_ops, params);
 		if (!proc_entry) {
 			kfree(params);
 			rndis_put_nr(i);
@@ -1164,13 +1164,12 @@ static int rndis_proc_open(struct inode *inode, struct file *file)
 	return single_open(file, rndis_proc_show, PDE_DATA(inode));
 }
 
-static const struct file_operations rndis_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= rndis_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= rndis_proc_write,
+static const struct proc_ops rndis_proc_ops = {
+	.proc_open	= rndis_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= rndis_proc_write,
 };
 
 #define	NAME_TEMPLATE "driver/rndis-%03d"
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 7ec6a99..6d956f1 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -239,18 +239,6 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
 	return bytes_to_frames(substream->runtime, prm->hw_ptr);
 }
 
-static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
-			       struct snd_pcm_hw_params *hw_params)
-{
-	return snd_pcm_lib_malloc_pages(substream,
-					params_buffer_bytes(hw_params));
-}
-
-static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	return snd_pcm_lib_free_pages(substream);
-}
-
 static int uac_pcm_open(struct snd_pcm_substream *substream)
 {
 	struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
@@ -326,9 +314,6 @@ static int uac_pcm_null(struct snd_pcm_substream *substream)
 static const struct snd_pcm_ops uac_pcm_ops = {
 	.open = uac_pcm_open,
 	.close = uac_pcm_null,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = uac_pcm_hw_params,
-	.hw_free = uac_pcm_hw_free,
 	.trigger = uac_pcm_trigger,
 	.pointer = uac_pcm_pointer,
 	.prepare = uac_pcm_null,
@@ -422,7 +407,7 @@ int u_audio_start_playback(struct g_audio *audio_dev)
 	struct usb_ep *ep;
 	struct uac_rtd_params *prm;
 	struct uac_params *params = &audio_dev->params;
-	unsigned int factor, rate;
+	unsigned int factor;
 	const struct usb_endpoint_descriptor *ep_desc;
 	int req_len, i;
 
@@ -441,13 +426,15 @@ int u_audio_start_playback(struct g_audio *audio_dev)
 	/* pre-compute some values for iso_complete() */
 	uac->p_framesize = params->p_ssize *
 			    num_channels(params->p_chmask);
-	rate = params->p_srate * uac->p_framesize;
 	uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
-	uac->p_pktsize = min_t(unsigned int, rate / uac->p_interval,
+	uac->p_pktsize = min_t(unsigned int,
+				uac->p_framesize *
+					(params->p_srate / uac->p_interval),
 				prm->max_psize);
 
 	if (uac->p_pktsize < prm->max_psize)
-		uac->p_pktsize_residue = rate % uac->p_interval;
+		uac->p_pktsize_residue = uac->p_framesize *
+			(params->p_srate % uac->p_interval);
 	else
 		uac->p_pktsize_residue = 0;
 
@@ -584,8 +571,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
 	strlcpy(card->shortname, card_name, sizeof(card->shortname));
 	sprintf(card->longname, "%s %i", card_name, card->dev->id);
 
-	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
-					      NULL, 0, BUFF_SIZE_MAX);
+	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+				       NULL, 0, BUFF_SIZE_MAX);
 
 	err = snd_card_register(card);
 
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 119a4e4..6e7e1a9 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -14,6 +14,32 @@
 # both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
 #
 
+choice
+	tristate "USB Gadget precomposed configurations"
+	default USB_ETH
+	optional
+	help
+	  A Linux "Gadget Driver" talks to the USB Peripheral Controller
+	  driver through the abstract "gadget" API.  Some other operating
+	  systems call these "client" drivers, of which "class drivers"
+	  are a subset (implementing a USB device class specification).
+	  A gadget driver implements one or more USB functions using
+	  the peripheral hardware.
+
+	  Gadget drivers are hardware-neutral, or "platform independent",
+	  except that they sometimes must understand quirks or limitations
+	  of the particular controllers they work with.  For example, when
+	  a controller doesn't support alternate configurations or provide
+	  enough of the right types of endpoints, the gadget driver might
+	  not be able work with that controller, or might need to implement
+	  a less common variant of a device class protocol.
+
+	  The available choices each represent a single precomposed USB
+	  gadget configuration. In the device model, each option contains
+	  both the device instantiation as a child for a USB gadget
+	  controller, and the relevant drivers for each function declared
+	  by the device.
+
 config USB_ZERO
 	tristate "Gadget Zero (DEVELOPMENT)"
 	select USB_LIBCOMPOSITE
@@ -489,3 +515,5 @@
 
 	  Say "y" to link the driver statically, or "m" to build a
 	  dynamically linked module called "g_webcam".
+
+endchoice
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index da1c379..8d7a556 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -225,7 +225,7 @@ static struct usb_composite_driver cdc_driver = {
 	.name		= "g_cdc",
 	.dev		= &device_desc,
 	.strings	= dev_strings,
-	.max_speed	= USB_SPEED_HIGH,
+	.max_speed	= USB_SPEED_SUPER,
 	.bind		= cdc_bind,
 	.unbind		= cdc_unbind,
 };
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index b640ed3..ae6d8f7 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -149,7 +149,7 @@ static struct usb_composite_driver gfs_driver = {
 	.name		= DRIVER_NAME,
 	.dev		= &gfs_dev_desc,
 	.strings	= gfs_dev_strings,
-	.max_speed	= USB_SPEED_HIGH,
+	.max_speed	= USB_SPEED_SUPER,
 	.bind		= gfs_bind,
 	.unbind		= gfs_unbind,
 };
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index 50515f9..ec97498 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -482,7 +482,7 @@ static struct usb_composite_driver multi_driver = {
 	.name		= "g_multi",
 	.dev		= &device_desc,
 	.strings	= dev_strings,
-	.max_speed	= USB_SPEED_HIGH,
+	.max_speed	= USB_SPEED_SUPER,
 	.bind		= multi_bind,
 	.unbind		= multi_unbind,
 	.needs_serial	= 1,
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 8465f08..c61e71b 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -197,7 +197,7 @@ static struct usb_composite_driver ncm_driver = {
 	.name		= "g_ncm",
 	.dev		= &device_desc,
 	.strings	= dev_strings,
-	.max_speed	= USB_SPEED_HIGH,
+	.max_speed	= USB_SPEED_SUPER,
 	.bind		= gncm_bind,
 	.unbind		= gncm_unbind,
 };
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 8a42768..6e04321 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1122,7 +1122,7 @@ static struct usb_endpoint_descriptor usba_ep0_desc = {
 	.bInterval = 1,
 };
 
-static struct usb_gadget usba_gadget_template = {
+static const struct usb_gadget usba_gadget_template = {
 	.ops		= &usba_udc_ops,
 	.max_speed	= USB_SPEED_HIGH,
 	.name		= "atmel_usba_udc",
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 51fa614..9b11046 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1414,6 +1414,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
 	}
 
 	mutex_unlock(&udc_lock);
+	if (ret)
+		pr_warn("udc-core: couldn't find an available UDC or it's busy\n");
 	return ret;
 found:
 	ret = udc_bind_to_driver(udc, driver);
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 64d80c6..aaf975c 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2175,8 +2175,6 @@ static int gr_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	}
 
-	spin_lock(&dev->lock);
-
 	/* Inside lock so that no gadget can use this udc until probe is done */
 	retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
 	if (retval) {
@@ -2185,15 +2183,21 @@ static int gr_probe(struct platform_device *pdev)
 	}
 	dev->added = 1;
 
-	retval = gr_udc_init(dev);
-	if (retval)
-		goto out;
+	spin_lock(&dev->lock);
 
-	gr_dfs_create(dev);
+	retval = gr_udc_init(dev);
+	if (retval) {
+		spin_unlock(&dev->lock);
+		goto out;
+	}
 
 	/* Clear all interrupt enables that might be left on since last boot */
 	gr_disable_interrupts_and_pullup(dev);
 
+	spin_unlock(&dev->lock);
+
+	gr_dfs_create(dev);
+
 	retval = gr_request_irq(dev, dev->irq);
 	if (retval) {
 		dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
@@ -2222,8 +2226,6 @@ static int gr_probe(struct platform_device *pdev)
 		dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
 
 out:
-	spin_unlock(&dev->lock);
-
 	if (retval)
 		gr_remove(pdev);
 
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index f36f073..bd12417 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2757,7 +2757,7 @@ static int omap_udc_probe(struct platform_device *pdev)
 
 	/* NOTE:  "knows" the order of the resources! */
 	if (!request_mem_region(pdev->resource[0].start,
-			pdev->resource[0].end - pdev->resource[0].start + 1,
+			resource_size(&pdev->resource[0]),
 			driver_name)) {
 		DBG("request_mem_region failed\n");
 		return -EBUSY;
@@ -2934,7 +2934,7 @@ static int omap_udc_probe(struct platform_device *pdev)
 	}
 
 	release_mem_region(pdev->resource[0].start,
-			pdev->resource[0].end - pdev->resource[0].start + 1);
+			   resource_size(&pdev->resource[0]));
 
 	return status;
 }
@@ -2950,7 +2950,7 @@ static int omap_udc_remove(struct platform_device *pdev)
 	wait_for_completion(&done);
 
 	release_mem_region(pdev->resource[0].start,
-			pdev->resource[0].end - pdev->resource[0].start + 1);
+			   resource_size(&pdev->resource[0]));
 
 	return 0;
 }
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8d73018..55bdfdf 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -186,7 +186,7 @@
 
 config USB_EHCI_MXC
 	tristate "Support for Freescale i.MX on-chip EHCI USB controller"
-	depends on ARCH_MXC
+	depends on ARCH_MXC || COMPILE_TEST
 	select USB_EHCI_ROOT_HUB_TT
 	---help---
 	  Variation of ARC USB block used in some Freescale chips.
@@ -210,8 +210,8 @@
 
 config USB_EHCI_HCD_ORION
 	tristate  "Support for Marvell EBU on-chip EHCI USB controller"
-	depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU)
-	default y
+	depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU || COMPILE_TEST)
+	default y if (PLAT_ORION || ARCH_MVEBU)
 	---help---
 	  Enables support for the on-chip EHCI controller on Marvell's
 	  embedded ARM SoCs, including Orion, Kirkwood, Dove, Armada XP,
@@ -221,15 +221,15 @@
 
 config USB_EHCI_HCD_SPEAR
 	tristate "Support for ST SPEAr on-chip EHCI USB controller"
-	depends on USB_EHCI_HCD && PLAT_SPEAR
-	default y
+	depends on USB_EHCI_HCD && (PLAT_SPEAR || COMPILE_TEST)
+	default y if PLAT_SPEAR
 	---help---
 	  Enables support for the on-chip EHCI controller on
 	  ST SPEAr chips.
 
 config USB_EHCI_HCD_STI
 	tristate "Support for ST STiHxxx on-chip EHCI USB controller"
-	depends on ARCH_STI && OF
+	depends on (ARCH_STI || COMPILE_TEST) && OF
 	select GENERIC_PHY
 	select USB_EHCI_HCD_PLATFORM
 	help
@@ -238,8 +238,8 @@
 
 config USB_EHCI_HCD_AT91
 	tristate  "Support for Atmel on-chip EHCI USB controller"
-	depends on USB_EHCI_HCD && ARCH_AT91
-	default y
+	depends on USB_EHCI_HCD && (ARCH_AT91 || COMPILE_TEST)
+	default y if ARCH_AT91
 	---help---
 	  Enables support for the on-chip EHCI controller on
 	  Atmel chips.
@@ -263,20 +263,20 @@
 
 config USB_EHCI_SH
 	bool "EHCI support for SuperH USB controller"
-	depends on SUPERH
+	depends on SUPERH || COMPILE_TEST
 	---help---
 	  Enables support for the on-chip EHCI controller on the SuperH.
 	  If you use the PCI EHCI controller, this option is not necessary.
 
 config USB_EHCI_EXYNOS
-	tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
-	depends on ARCH_S5PV210 || ARCH_EXYNOS
+	tristate "EHCI support for Samsung S5P/Exynos SoC Series"
+	depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
 	help
 	  Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
 
 config USB_EHCI_MV
 	tristate "EHCI support for Marvell PXA/MMP USB controller"
-	depends on (ARCH_PXA || ARCH_MMP)
+	depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
 	select USB_EHCI_ROOT_HUB_TT
 	---help---
 	  Enables support for Marvell (including PXA and MMP series) on-chip
@@ -289,7 +289,7 @@
 
 config USB_CNS3XXX_EHCI
 	bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
-	depends on ARCH_CNS3XXX
+	depends on ARCH_CNS3XXX || COMPILE_TEST
 	select USB_EHCI_HCD_PLATFORM
 	---help---
 	  This option is deprecated now and the driver was removed, use
@@ -410,15 +410,15 @@
 
 config USB_OHCI_HCD_SPEAR
 	tristate "Support for ST SPEAr on-chip OHCI USB controller"
-	depends on USB_OHCI_HCD && PLAT_SPEAR
-	default y
+	depends on USB_OHCI_HCD && (PLAT_SPEAR || COMPILE_TEST)
+	default y if PLAT_SPEAR
 	---help---
 	  Enables support for the on-chip OHCI controller on
 	  ST SPEAr chips.
 
 config USB_OHCI_HCD_STI
 	tristate "Support for ST STiHxxx on-chip OHCI USB controller"
-	depends on ARCH_STI && OF
+	depends on (ARCH_STI || COMPILE_TEST) && OF
 	select GENERIC_PHY
 	select USB_OHCI_HCD_PLATFORM
 	help
@@ -427,8 +427,8 @@
 
 config USB_OHCI_HCD_S3C2410
 	tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
-	depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
-	default y
+	depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX || COMPILE_TEST)
+	default y if (ARCH_S3C24XX || ARCH_S3C64XX)
 	---help---
 	  Enables support for the on-chip OHCI controller on
 	  S3C24xx/S3C64xx chips.
@@ -453,17 +453,17 @@
 
 config USB_OHCI_HCD_AT91
 	tristate "Support for Atmel on-chip OHCI USB controller"
-	depends on USB_OHCI_HCD && ARCH_AT91 && OF
-	default y
+	depends on USB_OHCI_HCD && (ARCH_AT91 || COMPILE_TEST) && OF
+	default y if ARCH_AT91
 	---help---
 	  Enables support for the on-chip OHCI controller on
 	  Atmel chips.
 
 config USB_OHCI_HCD_OMAP3
 	tristate "OHCI support for OMAP3 and later chips"
-	depends on (ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5)
+	depends on ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST
 	select USB_OHCI_HCD_PLATFORM
-	default y
+	default y if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
 	help
 	  This option is deprecated now and the driver was removed, use
 	  USB_OHCI_HCD_PLATFORM instead.
@@ -473,10 +473,10 @@
 
 config USB_OHCI_HCD_DAVINCI
 	tristate "OHCI support for TI DaVinci DA8xx"
-	depends on ARCH_DAVINCI_DA8XX
+	depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
 	depends on USB_OHCI_HCD
 	select PHY_DA8XX_USB
-	default y
+	default y if ARCH_DAVINCI_DA8XX
 	help
 	  Enables support for the DaVinci DA8xx integrated OHCI
 	  controller. This driver cannot currently be a loadable
@@ -532,7 +532,7 @@
 
 config USB_OHCI_SH
 	bool "OHCI support for SuperH USB controller (DEPRECATED)"
-	depends on SUPERH
+	depends on SUPERH || COMPILE_TEST
 	select USB_OHCI_HCD_PLATFORM
 	---help---
 	  This option is deprecated now and the driver was removed, use
@@ -542,14 +542,14 @@
 	  If you use the PCI OHCI controller, this option is not necessary.
 
 config USB_OHCI_EXYNOS
-	tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
-	depends on ARCH_S5PV210 || ARCH_EXYNOS
+	tristate "OHCI support for Samsung S5P/Exynos SoC Series"
+	depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
 	help
 	  Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
 
 config USB_CNS3XXX_OHCI
 	bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
-	depends on ARCH_CNS3XXX
+	depends on ARCH_CNS3XXX || COMPILE_TEST
 	select USB_OHCI_HCD_PLATFORM
 	---help---
 	  This option is deprecated now and the driver was removed, use
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 01debfd..a4e9abc 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- * SAMSUNG EXYNOS USB HOST EHCI Controller
+ * Samsung Exynos USB HOST EHCI Controller
  *
  * Copyright (C) 2011 Samsung Electronics Co.Ltd
  * Author: Jingoo Han <jg1.han@samsung.com>
@@ -21,7 +21,7 @@
 
 #include "ehci.h"
 
-#define DRIVER_DESC "EHCI EXYNOS driver"
+#define DRIVER_DESC "EHCI Exynos driver"
 
 #define EHCI_INSNREG00(base)			(base + 0x90)
 #define EHCI_INSNREG00_ENA_INCR16		(0x1 << 25)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 66ec1fd..bd4f6ef 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -11,6 +11,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/usb/otg.h>
+#include <linux/usb/of.h>
 #include <linux/platform_data/mv_usb.h>
 #include <linux/io.h>
 
@@ -67,6 +68,8 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
 {
 	struct device *dev = hcd->self.controller;
 	struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	u32 status;
 	int retval;
 
 	if (ehci_mv == NULL) {
@@ -80,6 +83,14 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
 	if (retval)
 		dev_err(dev, "ehci_setup failed %d\n", retval);
 
+	if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) {
+		status = ehci_readl(ehci, &ehci->regs->port_status[0]);
+		status |= PORT_TEST_FORCE;
+		ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+		status &= ~PORT_TEST_FORCE;
+		ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+	}
+
 	return retval;
 }
 
@@ -116,7 +127,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
 		ehci_mv->set_vbus = pdata->set_vbus;
 	}
 
-	ehci_mv->phy = devm_phy_get(&pdev->dev, "usb");
+	ehci_mv->phy = devm_phy_optional_get(&pdev->dev, "usb");
 	if (IS_ERR(ehci_mv->phy)) {
 		retval = PTR_ERR(ehci_mv->phy);
 		if (retval != -EPROBE_DEFER)
@@ -164,7 +175,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
 	}
 
 	ehci = hcd_to_ehci(hcd);
-	ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+	ehci->caps = (struct ehci_caps __iomem *) ehci_mv->cap_regs;
 
 	if (ehci_mv->mode == MV_USB_MODE_OTG) {
 		ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
@@ -246,10 +257,8 @@ static int mv_ehci_remove(struct platform_device *pdev)
 MODULE_ALIAS("mv-ehci");
 
 static const struct platform_device_id ehci_id_table[] = {
-	{"pxa-u2oehci", PXA_U2OEHCI},
-	{"pxa-sph", PXA_SPH},
-	{"mmp3-hsic", MMP3_HSIC},
-	{"mmp3-fsic", MMP3_FSIC},
+	{"pxa-u2oehci", 0},
+	{"pxa-sph", 0},
 	{},
 };
 
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 2afde14..c25c51d 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -8,7 +8,6 @@
  */
 #include <linux/platform_device.h>
 #include <linux/clk.h>
-#include <linux/platform_data/ehci-sh.h>
 
 struct ehci_sh_priv {
 	struct clk *iclk, *fclk;
@@ -76,7 +75,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
 {
 	struct resource *res;
 	struct ehci_sh_priv *priv;
-	struct ehci_sh_platdata *pdata;
 	struct usb_hcd *hcd;
 	int irq, ret;
 
@@ -89,8 +87,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
 		goto fail_create_hcd;
 	}
 
-	pdata = dev_get_platdata(&pdev->dev);
-
 	/* initialize hcd */
 	hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
 			     dev_name(&pdev->dev));
@@ -127,9 +123,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
 	clk_enable(priv->fclk);
 	clk_enable(priv->iclk);
 
-	if (pdata && pdata->phy_init)
-		pdata->phy_init();
-
 	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to add hcd");
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4d2cdec..d6433f2 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -42,12 +42,10 @@ struct tegra_ehci_soc_config {
 };
 
 struct tegra_ehci_hcd {
-	struct tegra_usb_phy *phy;
 	struct clk *clk;
 	struct reset_control *rst;
 	int port_resuming;
 	bool needs_double_reset;
-	enum tegra_usb_phy_port_speed port_speed;
 };
 
 static int tegra_reset_usb_controller(struct platform_device *pdev)
@@ -480,12 +478,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
 	}
 	u_phy->otg->host = hcd_to_bus(hcd);
 
-	err = usb_phy_set_suspend(hcd->usb_phy, 0);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to power on the phy\n");
-		goto cleanup_phy;
-	}
-
 	irq = platform_get_irq(pdev, 0);
 	if (!irq) {
 		dev_err(&pdev->dev, "Failed to get IRQ\n");
@@ -521,16 +513,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
 	struct tegra_ehci_hcd *tegra =
 		(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
 
-	otg_set_host(hcd->usb_phy->otg, NULL);
-
-	usb_phy_shutdown(hcd->usb_phy);
 	usb_remove_hcd(hcd);
-
-	reset_control_assert(tegra->rst);
-	udelay(1);
-
+	otg_set_host(hcd->usb_phy->otg, NULL);
+	usb_phy_shutdown(hcd->usb_phy);
 	clk_disable_unprepare(tegra->clk);
-
 	usb_put_hcd(hcd);
 
 	return 0;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index d5ce98e..bd40e59 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -19,7 +19,7 @@
 
 #include "ohci.h"
 
-#define DRIVER_DESC "OHCI EXYNOS driver"
+#define DRIVER_DESC "OHCI Exynos driver"
 
 static const char hcd_name[] = "ohci-exynos";
 static struct hc_driver __read_mostly exynos_ohci_hc_driver;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index fe09b86..120666a 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2783,11 +2783,15 @@ static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
 		return;
 
 	oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
-	for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
-		(void) oxu_hub_control(oxu_to_hcd(oxu),
-				is_on ? SetPortFeature : ClearPortFeature,
-				USB_PORT_FEAT_POWER,
-				port--, NULL, 0);
+	for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
+		if (is_on)
+			oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
+				USB_PORT_FEAT_POWER, port--, NULL, 0);
+		else
+			oxu_hub_control(oxu_to_hcd(oxu), ClearPortFeature,
+				USB_PORT_FEAT_POWER, port--, NULL, 0);
+	}
+
 	msleep(20);
 }
 
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index b18a6ba..bfbdb3c 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -488,11 +488,6 @@ static int xhci_mtk_probe(struct platform_device *pdev)
 		goto disable_clk;
 	}
 
-	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
-	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-	if (ret)
-		goto disable_clk;
-
 	hcd = usb_create_hcd(driver, dev, dev_name(dev));
 	if (!hcd) {
 		ret = -ENOMEM;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index bf90654..8163aef 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -11,6 +11,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
@@ -38,7 +39,15 @@
 #define XUSB_CFG_4				0x010
 #define  XUSB_BASE_ADDR_SHIFT			15
 #define  XUSB_BASE_ADDR_MASK			0x1ffff
+#define XUSB_CFG_16				0x040
+#define XUSB_CFG_24				0x060
+#define XUSB_CFG_AXI_CFG			0x0f8
 #define XUSB_CFG_ARU_C11_CSBRANGE		0x41c
+#define XUSB_CFG_ARU_CONTEXT			0x43c
+#define XUSB_CFG_ARU_CONTEXT_HS_PLS		0x478
+#define XUSB_CFG_ARU_CONTEXT_FS_PLS		0x47c
+#define XUSB_CFG_ARU_CONTEXT_HSFS_SPEED		0x480
+#define XUSB_CFG_ARU_CONTEXT_HSFS_PP		0x484
 #define XUSB_CFG_CSB_BASE_ADDR			0x800
 
 /* FPCI mailbox registers */
@@ -62,11 +71,20 @@
 #define  MBOX_SMI_INTR_EN			BIT(3)
 
 /* IPFS registers */
+#define IPFS_XUSB_HOST_MSI_BAR_SZ_0		0x0c0
+#define IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0		0x0c4
+#define IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0	0x0c8
+#define IPFS_XUSB_HOST_MSI_VEC0_0		0x100
+#define IPFS_XUSB_HOST_MSI_EN_VEC0_0		0x140
 #define IPFS_XUSB_HOST_CONFIGURATION_0		0x180
 #define  IPFS_EN_FPCI				BIT(0)
+#define IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0	0x184
 #define IPFS_XUSB_HOST_INTR_MASK_0		0x188
 #define  IPFS_IP_INT_MASK			BIT(16)
+#define IPFS_XUSB_HOST_INTR_ENABLE_0		0x198
+#define IPFS_XUSB_HOST_UFPCI_CONFIG_0		0x19c
 #define IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0	0x1bc
+#define IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0		0x1dc
 
 #define CSB_PAGE_SELECT_MASK			0x7fffff
 #define CSB_PAGE_SELECT_SHIFT			9
@@ -101,6 +119,8 @@
 #define  L2IMEMOP_ACTION_SHIFT			24
 #define  L2IMEMOP_INVALIDATE_ALL		(0x40 << L2IMEMOP_ACTION_SHIFT)
 #define  L2IMEMOP_LOAD_LOCKED_RESULT		(0x11 << L2IMEMOP_ACTION_SHIFT)
+#define XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT	0x101a18
+#define  L2IMEMOP_RESULT_VLD			BIT(31)
 #define XUSB_CSB_MP_APMAP			0x10181c
 #define  APMAP_BOOTPATH				BIT(31)
 
@@ -145,19 +165,32 @@ struct tegra_xusb_phy_type {
 	unsigned int num;
 };
 
-struct tega_xusb_mbox_regs {
+struct tegra_xusb_mbox_regs {
 	u16 cmd;
 	u16 data_in;
 	u16 data_out;
 	u16 owner;
 };
 
+struct tegra_xusb_context_soc {
+	struct {
+		const unsigned int *offsets;
+		unsigned int num_offsets;
+	} ipfs;
+
+	struct {
+		const unsigned int *offsets;
+		unsigned int num_offsets;
+	} fpci;
+};
+
 struct tegra_xusb_soc {
 	const char *firmware;
 	const char * const *supply_names;
 	unsigned int num_supplies;
 	const struct tegra_xusb_phy_type *phy_types;
 	unsigned int num_types;
+	const struct tegra_xusb_context_soc *context;
 
 	struct {
 		struct {
@@ -166,12 +199,17 @@ struct tegra_xusb_soc {
 		} usb2, ulpi, hsic, usb3;
 	} ports;
 
-	struct tega_xusb_mbox_regs mbox;
+	struct tegra_xusb_mbox_regs mbox;
 
 	bool scale_ss_clock;
 	bool has_ipfs;
 };
 
+struct tegra_xusb_context {
+	u32 *ipfs;
+	u32 *fpci;
+};
+
 struct tegra_xusb {
 	struct device *dev;
 	void __iomem *regs;
@@ -218,6 +256,8 @@ struct tegra_xusb {
 		void *virt;
 		dma_addr_t phys;
 	} fw;
+
+	struct tegra_xusb_context context;
 };
 
 static struct hc_driver __read_mostly tegra_xhci_hc_driver;
@@ -623,9 +663,9 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static void tegra_xusb_config(struct tegra_xusb *tegra,
-			      struct resource *regs)
+static void tegra_xusb_config(struct tegra_xusb *tegra)
 {
+	u32 regs = tegra->hcd->rsrc_start;
 	u32 value;
 
 	if (tegra->soc->has_ipfs) {
@@ -639,7 +679,7 @@ static void tegra_xusb_config(struct tegra_xusb *tegra,
 	/* Program BAR0 space */
 	value = fpci_readl(tegra, XUSB_CFG_4);
 	value &= ~(XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
-	value |= regs->start & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+	value |= regs & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
 	fpci_writel(tegra, value, XUSB_CFG_4);
 
 	usleep_range(100, 200);
@@ -793,17 +833,34 @@ static int tegra_xusb_runtime_resume(struct device *dev)
 	return err;
 }
 
-static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+#ifdef CONFIG_PM_SLEEP
+static int tegra_xusb_init_context(struct tegra_xusb *tegra)
 {
-	unsigned int code_tag_blocks, code_size_blocks, code_blocks;
+	const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+
+	tegra->context.ipfs = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+					   sizeof(u32), GFP_KERNEL);
+	if (!tegra->context.ipfs)
+		return -ENOMEM;
+
+	tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+					   sizeof(u32), GFP_KERNEL);
+	if (!tegra->context.fpci)
+		return -ENOMEM;
+
+	return 0;
+}
+#else
+static inline int tegra_xusb_init_context(struct tegra_xusb *tegra)
+{
+	return 0;
+}
+#endif
+
+static int tegra_xusb_request_firmware(struct tegra_xusb *tegra)
+{
 	struct tegra_xusb_fw_header *header;
-	struct device *dev = tegra->dev;
 	const struct firmware *fw;
-	unsigned long timeout;
-	time64_t timestamp;
-	struct tm time;
-	u64 address;
-	u32 value;
 	int err;
 
 	err = request_firmware(&fw, tegra->soc->firmware, tegra->dev);
@@ -828,6 +885,26 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
 	memcpy(tegra->fw.virt, fw->data, tegra->fw.size);
 	release_firmware(fw);
 
+	return 0;
+}
+
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+	unsigned int code_tag_blocks, code_size_blocks, code_blocks;
+	struct xhci_cap_regs __iomem *cap = tegra->regs;
+	struct tegra_xusb_fw_header *header;
+	struct device *dev = tegra->dev;
+	struct xhci_op_regs __iomem *op;
+	unsigned long timeout;
+	time64_t timestamp;
+	struct tm time;
+	u64 address;
+	u32 value;
+	int err;
+
+	header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
+	op = tegra->regs + HC_LENGTH(readl(&cap->hc_capbase));
+
 	if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
 		dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
 			 csb_readl(tegra, XUSB_FALC_CPUCTL));
@@ -882,26 +959,37 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
 
 	csb_writel(tegra, 0, XUSB_FALC_DMACTL);
 
-	msleep(50);
+	/* wait for RESULT_VLD to get set */
+#define tegra_csb_readl(offset) csb_readl(tegra, offset)
+	err = readx_poll_timeout(tegra_csb_readl,
+				 XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT, value,
+				 value & L2IMEMOP_RESULT_VLD, 100, 10000);
+	if (err < 0) {
+		dev_err(dev, "DMA controller not ready %#010x\n", value);
+		return err;
+	}
+#undef tegra_csb_readl
 
 	csb_writel(tegra, le32_to_cpu(header->boot_codetag),
 		   XUSB_FALC_BOOTVEC);
 
-	/* Boot Falcon CPU and wait for it to enter the STOPPED (idle) state. */
-	timeout = jiffies + msecs_to_jiffies(5);
-
+	/* Boot Falcon CPU and wait for USBSTS_CNR to get cleared. */
 	csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
 
-	while (time_before(jiffies, timeout)) {
-		if (csb_readl(tegra, XUSB_FALC_CPUCTL) == CPUCTL_STATE_STOPPED)
+	timeout = jiffies + msecs_to_jiffies(200);
+
+	do {
+		value = readl(&op->status);
+		if ((value & STS_CNR) == 0)
 			break;
 
-		usleep_range(100, 200);
-	}
+		usleep_range(1000, 2000);
+	} while (time_is_after_jiffies(timeout));
 
-	if (csb_readl(tegra, XUSB_FALC_CPUCTL) != CPUCTL_STATE_STOPPED) {
-		dev_err(dev, "Falcon failed to start, state: %#x\n",
-			csb_readl(tegra, XUSB_FALC_CPUCTL));
+	value = readl(&op->status);
+	if (value & STS_CNR) {
+		value = csb_readl(tegra, XUSB_FALC_CPUCTL);
+		dev_err(dev, "XHCI controller not read: %#010x\n", value);
 		return -EIO;
 	}
 
@@ -966,11 +1054,37 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
 	return 0;
 }
 
-static int tegra_xusb_probe(struct platform_device *pdev)
+static int __tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
 {
 	struct tegra_xusb_mbox_msg msg;
-	struct resource *regs;
+	int err;
+
+	/* Enable firmware messages from controller. */
+	msg.cmd = MBOX_CMD_MSG_ENABLED;
+	msg.data = 0;
+
+	err = tegra_xusb_mbox_send(tegra, &msg);
+	if (err < 0)
+		dev_err(tegra->dev, "failed to enable messages: %d\n", err);
+
+	return err;
+}
+
+static int tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
+{
+	int err;
+
+	mutex_lock(&tegra->lock);
+	err = __tegra_xusb_enable_firmware_messages(tegra);
+	mutex_unlock(&tegra->lock);
+
+	return err;
+}
+
+static int tegra_xusb_probe(struct platform_device *pdev)
+{
 	struct tegra_xusb *tegra;
+	struct resource *regs;
 	struct xhci_hcd *xhci;
 	unsigned int i, j, k;
 	struct phy *phy;
@@ -986,6 +1100,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 	mutex_init(&tegra->lock);
 	tegra->dev = &pdev->dev;
 
+	err = tegra_xusb_init_context(tegra);
+	if (err < 0)
+		return err;
+
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	tegra->regs = devm_ioremap_resource(&pdev->dev, regs);
 	if (IS_ERR(tegra->regs))
@@ -1173,6 +1291,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 		goto put_powerdomains;
 	}
 
+	tegra->hcd->regs = tegra->regs;
+	tegra->hcd->rsrc_start = regs->start;
+	tegra->hcd->rsrc_len = resource_size(regs);
+
 	/*
 	 * This must happen after usb_create_hcd(), because usb_create_hcd()
 	 * will overwrite the drvdata of the device with the hcd it creates.
@@ -1185,19 +1307,6 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 		goto put_hcd;
 	}
 
-	pm_runtime_enable(&pdev->dev);
-	if (pm_runtime_enabled(&pdev->dev))
-		err = pm_runtime_get_sync(&pdev->dev);
-	else
-		err = tegra_xusb_runtime_resume(&pdev->dev);
-
-	if (err < 0) {
-		dev_err(&pdev->dev, "failed to enable device: %d\n", err);
-		goto disable_phy;
-	}
-
-	tegra_xusb_config(tegra, regs);
-
 	/*
 	 * The XUSB Falcon microcontroller can only address 40 bits, so set
 	 * the DMA mask accordingly.
@@ -1205,19 +1314,35 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 	err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
-		goto put_rpm;
+		goto disable_phy;
 	}
 
+	err = tegra_xusb_request_firmware(tegra);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to request firmware: %d\n", err);
+		goto disable_phy;
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	if (!pm_runtime_enabled(&pdev->dev))
+		err = tegra_xusb_runtime_resume(&pdev->dev);
+	else
+		err = pm_runtime_get_sync(&pdev->dev);
+
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable device: %d\n", err);
+		goto free_firmware;
+	}
+
+	tegra_xusb_config(tegra);
+
 	err = tegra_xusb_load_firmware(tegra);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
 		goto put_rpm;
 	}
 
-	tegra->hcd->regs = tegra->regs;
-	tegra->hcd->rsrc_start = regs->start;
-	tegra->hcd->rsrc_len = resource_size(regs);
-
 	err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
@@ -1244,21 +1369,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 		goto put_usb3;
 	}
 
-	mutex_lock(&tegra->lock);
-
-	/* Enable firmware messages from controller. */
-	msg.cmd = MBOX_CMD_MSG_ENABLED;
-	msg.data = 0;
-
-	err = tegra_xusb_mbox_send(tegra, &msg);
+	err = tegra_xusb_enable_firmware_messages(tegra);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
-		mutex_unlock(&tegra->lock);
 		goto remove_usb3;
 	}
 
-	mutex_unlock(&tegra->lock);
-
 	err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
 					tegra_xusb_mbox_irq,
 					tegra_xusb_mbox_thread, 0,
@@ -1281,6 +1397,9 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 		tegra_xusb_runtime_suspend(&pdev->dev);
 put_hcd:
 	usb_put_hcd(tegra->hcd);
+free_firmware:
+	dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
+			  tegra->fw.phys);
 disable_phy:
 	tegra_xusb_phy_disable(tegra);
 	pm_runtime_disable(&pdev->dev);
@@ -1328,22 +1447,176 @@ static int tegra_xusb_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM_SLEEP
+static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
+{
+	struct device *dev = hub->hcd->self.controller;
+	bool status = true;
+	unsigned int i;
+	u32 value;
+
+	for (i = 0; i < hub->num_ports; i++) {
+		value = readl(hub->ports[i]->addr);
+		if ((value & PORT_PE) == 0)
+			continue;
+
+		if ((value & PORT_PLS_MASK) != XDEV_U3) {
+			dev_info(dev, "%u-%u isn't suspended: %#010x\n",
+				 hub->hcd->self.busnum, i + 1, value);
+			status = false;
+		}
+	}
+
+	return status;
+}
+
+static int tegra_xusb_check_ports(struct tegra_xusb *tegra)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+	unsigned long flags;
+	int err = 0;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	if (!xhci_hub_ports_suspended(&xhci->usb2_rhub) ||
+	    !xhci_hub_ports_suspended(&xhci->usb3_rhub))
+		err = -EBUSY;
+
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return err;
+}
+
+static void tegra_xusb_save_context(struct tegra_xusb *tegra)
+{
+	const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+	struct tegra_xusb_context *ctx = &tegra->context;
+	unsigned int i;
+
+	if (soc->ipfs.num_offsets > 0) {
+		for (i = 0; i < soc->ipfs.num_offsets; i++)
+			ctx->ipfs[i] = ipfs_readl(tegra, soc->ipfs.offsets[i]);
+	}
+
+	if (soc->fpci.num_offsets > 0) {
+		for (i = 0; i < soc->fpci.num_offsets; i++)
+			ctx->fpci[i] = fpci_readl(tegra, soc->fpci.offsets[i]);
+	}
+}
+
+static void tegra_xusb_restore_context(struct tegra_xusb *tegra)
+{
+	const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+	struct tegra_xusb_context *ctx = &tegra->context;
+	unsigned int i;
+
+	if (soc->fpci.num_offsets > 0) {
+		for (i = 0; i < soc->fpci.num_offsets; i++)
+			fpci_writel(tegra, ctx->fpci[i], soc->fpci.offsets[i]);
+	}
+
+	if (soc->ipfs.num_offsets > 0) {
+		for (i = 0; i < soc->ipfs.num_offsets; i++)
+			ipfs_writel(tegra, ctx->ipfs[i], soc->ipfs.offsets[i]);
+	}
+}
+
+static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool wakeup)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+	int err;
+
+	err = tegra_xusb_check_ports(tegra);
+	if (err < 0) {
+		dev_err(tegra->dev, "not all ports suspended: %d\n", err);
+		return err;
+	}
+
+	err = xhci_suspend(xhci, wakeup);
+	if (err < 0) {
+		dev_err(tegra->dev, "failed to suspend XHCI: %d\n", err);
+		return err;
+	}
+
+	tegra_xusb_save_context(tegra);
+	tegra_xusb_phy_disable(tegra);
+	tegra_xusb_clk_disable(tegra);
+
+	return 0;
+}
+
+static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool wakeup)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+	int err;
+
+	err = tegra_xusb_clk_enable(tegra);
+	if (err < 0) {
+		dev_err(tegra->dev, "failed to enable clocks: %d\n", err);
+		return err;
+	}
+
+	err = tegra_xusb_phy_enable(tegra);
+	if (err < 0) {
+		dev_err(tegra->dev, "failed to enable PHYs: %d\n", err);
+		goto disable_clk;
+	}
+
+	tegra_xusb_config(tegra);
+	tegra_xusb_restore_context(tegra);
+
+	err = tegra_xusb_load_firmware(tegra);
+	if (err < 0) {
+		dev_err(tegra->dev, "failed to load firmware: %d\n", err);
+		goto disable_phy;
+	}
+
+	err = __tegra_xusb_enable_firmware_messages(tegra);
+	if (err < 0) {
+		dev_err(tegra->dev, "failed to enable messages: %d\n", err);
+		goto disable_phy;
+	}
+
+	err = xhci_resume(xhci, true);
+	if (err < 0) {
+		dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
+		goto disable_phy;
+	}
+
+	return 0;
+
+disable_phy:
+	tegra_xusb_phy_disable(tegra);
+disable_clk:
+	tegra_xusb_clk_disable(tegra);
+	return err;
+}
+
 static int tegra_xusb_suspend(struct device *dev)
 {
 	struct tegra_xusb *tegra = dev_get_drvdata(dev);
-	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
 	bool wakeup = device_may_wakeup(dev);
+	int err;
 
-	/* TODO: Powergate controller across suspend/resume. */
-	return xhci_suspend(xhci, wakeup);
+	synchronize_irq(tegra->mbox_irq);
+
+	mutex_lock(&tegra->lock);
+	err = tegra_xusb_enter_elpg(tegra, wakeup);
+	mutex_unlock(&tegra->lock);
+
+	return err;
 }
 
 static int tegra_xusb_resume(struct device *dev)
 {
 	struct tegra_xusb *tegra = dev_get_drvdata(dev);
-	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+	bool wakeup = device_may_wakeup(dev);
+	int err;
 
-	return xhci_resume(xhci, 0);
+	mutex_lock(&tegra->lock);
+	err = tegra_xusb_exit_elpg(tegra, wakeup);
+	mutex_unlock(&tegra->lock);
+
+	return err;
 }
 #endif
 
@@ -1370,12 +1643,50 @@ static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
 	{ .name = "hsic", .num = 2, },
 };
 
+static const unsigned int tegra124_xusb_context_ipfs[] = {
+	IPFS_XUSB_HOST_MSI_BAR_SZ_0,
+	IPFS_XUSB_HOST_MSI_BAR_SZ_0,
+	IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0,
+	IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0,
+	IPFS_XUSB_HOST_MSI_VEC0_0,
+	IPFS_XUSB_HOST_MSI_EN_VEC0_0,
+	IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0,
+	IPFS_XUSB_HOST_INTR_MASK_0,
+	IPFS_XUSB_HOST_INTR_ENABLE_0,
+	IPFS_XUSB_HOST_UFPCI_CONFIG_0,
+	IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0,
+	IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0,
+};
+
+static const unsigned int tegra124_xusb_context_fpci[] = {
+	XUSB_CFG_ARU_CONTEXT_HS_PLS,
+	XUSB_CFG_ARU_CONTEXT_FS_PLS,
+	XUSB_CFG_ARU_CONTEXT_HSFS_SPEED,
+	XUSB_CFG_ARU_CONTEXT_HSFS_PP,
+	XUSB_CFG_ARU_CONTEXT,
+	XUSB_CFG_AXI_CFG,
+	XUSB_CFG_24,
+	XUSB_CFG_16,
+};
+
+static const struct tegra_xusb_context_soc tegra124_xusb_context = {
+	.ipfs = {
+		.num_offsets = ARRAY_SIZE(tegra124_xusb_context_ipfs),
+		.offsets = tegra124_xusb_context_ipfs,
+	},
+	.fpci = {
+		.num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
+		.offsets = tegra124_xusb_context_fpci,
+	},
+};
+
 static const struct tegra_xusb_soc tegra124_soc = {
 	.firmware = "nvidia/tegra124/xusb.bin",
 	.supply_names = tegra124_supply_names,
 	.num_supplies = ARRAY_SIZE(tegra124_supply_names),
 	.phy_types = tegra124_phy_types,
 	.num_types = ARRAY_SIZE(tegra124_phy_types),
+	.context = &tegra124_xusb_context,
 	.ports = {
 		.usb2 = { .offset = 4, .count = 4, },
 		.hsic = { .offset = 6, .count = 2, },
@@ -1414,6 +1725,7 @@ static const struct tegra_xusb_soc tegra210_soc = {
 	.num_supplies = ARRAY_SIZE(tegra210_supply_names),
 	.phy_types = tegra210_phy_types,
 	.num_types = ARRAY_SIZE(tegra210_phy_types),
+	.context = &tegra124_xusb_context,
 	.ports = {
 		.usb2 = { .offset = 4, .count = 4, },
 		.hsic = { .offset = 8, .count = 1, },
@@ -1432,6 +1744,7 @@ MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
 
 static const char * const tegra186_supply_names[] = {
 };
+MODULE_FIRMWARE("nvidia/tegra186/xusb.bin");
 
 static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
 	{ .name = "usb3", .num = 3, },
@@ -1439,12 +1752,20 @@ static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
 	{ .name = "hsic", .num = 1, },
 };
 
+static const struct tegra_xusb_context_soc tegra186_xusb_context = {
+	.fpci = {
+		.num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
+		.offsets = tegra124_xusb_context_fpci,
+	},
+};
+
 static const struct tegra_xusb_soc tegra186_soc = {
 	.firmware = "nvidia/tegra186/xusb.bin",
 	.supply_names = tegra186_supply_names,
 	.num_supplies = ARRAY_SIZE(tegra186_supply_names),
 	.phy_types = tegra186_phy_types,
 	.num_types = ARRAY_SIZE(tegra186_phy_types),
+	.context = &tegra186_xusb_context,
 	.ports = {
 		.usb3 = { .offset = 0, .count = 3, },
 		.usb2 = { .offset = 3, .count = 3, },
@@ -1474,6 +1795,7 @@ static const struct tegra_xusb_soc tegra194_soc = {
 	.num_supplies = ARRAY_SIZE(tegra194_supply_names),
 	.phy_types = tegra194_phy_types,
 	.num_types = ARRAY_SIZE(tegra194_phy_types),
+	.context = &tegra186_xusb_context,
 	.ports = {
 		.usb3 = { .offset = 0, .count = 4, },
 		.usb2 = { .offset = 4, .count = 4, },
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 72f39a9..116bd78 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -7,11 +7,10 @@
 
 #include <linux/clk.h>
 #include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/platform_data/usb3503.h>
 #include <linux/regmap.h>
@@ -47,19 +46,19 @@ struct usb3503 {
 	struct device		*dev;
 	struct clk		*clk;
 	u8	port_off_mask;
-	int	gpio_intn;
-	int	gpio_reset;
-	int	gpio_connect;
+	struct gpio_desc	*intn;
+	struct gpio_desc 	*reset;
+	struct gpio_desc 	*connect;
 	bool	secondary_ref_clk;
 };
 
 static int usb3503_reset(struct usb3503 *hub, int state)
 {
-	if (!state && gpio_is_valid(hub->gpio_connect))
-		gpio_set_value_cansleep(hub->gpio_connect, 0);
+	if (!state && hub->connect)
+		gpiod_set_value_cansleep(hub->connect, 0);
 
-	if (gpio_is_valid(hub->gpio_reset))
-		gpio_set_value_cansleep(hub->gpio_reset, state);
+	if (hub->reset)
+		gpiod_set_value_cansleep(hub->reset, !state);
 
 	/* Wait T_HUBINIT == 4ms for hub logic to stabilize */
 	if (state)
@@ -115,8 +114,8 @@ static int usb3503_connect(struct usb3503 *hub)
 		}
 	}
 
-	if (gpio_is_valid(hub->gpio_connect))
-		gpio_set_value_cansleep(hub->gpio_connect, 1);
+	if (hub->connect)
+		gpiod_set_value_cansleep(hub->connect, 1);
 
 	hub->mode = USB3503_MODE_HUB;
 	dev_info(dev, "switched to HUB mode\n");
@@ -163,13 +162,11 @@ static int usb3503_probe(struct usb3503 *hub)
 	int err;
 	u32 mode = USB3503_MODE_HUB;
 	const u32 *property;
+	enum gpiod_flags flags;
 	int len;
 
 	if (pdata) {
 		hub->port_off_mask	= pdata->port_off_mask;
-		hub->gpio_intn		= pdata->gpio_intn;
-		hub->gpio_connect	= pdata->gpio_connect;
-		hub->gpio_reset		= pdata->gpio_reset;
 		hub->mode		= pdata->initial_mode;
 	} else if (np) {
 		u32 rate = 0;
@@ -230,59 +227,38 @@ static int usb3503_probe(struct usb3503 *hub)
 			}
 		}
 
-		hub->gpio_intn	= of_get_named_gpio(np, "intn-gpios", 0);
-		if (hub->gpio_intn == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		hub->gpio_connect = of_get_named_gpio(np, "connect-gpios", 0);
-		if (hub->gpio_connect == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
-		if (hub->gpio_reset == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
 		of_property_read_u32(np, "initial-mode", &mode);
 		hub->mode = mode;
 	}
 
-	if (hub->port_off_mask && !hub->regmap)
-		dev_err(dev, "Ports disabled with no control interface\n");
+	if (hub->secondary_ref_clk)
+		flags = GPIOD_OUT_LOW;
+	else
+		flags = GPIOD_OUT_HIGH;
+	hub->intn = devm_gpiod_get_optional(dev, "intn", flags);
+	if (IS_ERR(hub->intn))
+		return PTR_ERR(hub->intn);
+	if (hub->intn)
+		gpiod_set_consumer_name(hub->intn, "usb3503 intn");
 
-	if (gpio_is_valid(hub->gpio_intn)) {
-		int val = hub->secondary_ref_clk ? GPIOF_OUT_INIT_LOW :
-						   GPIOF_OUT_INIT_HIGH;
-		err = devm_gpio_request_one(dev, hub->gpio_intn, val,
-					    "usb3503 intn");
-		if (err) {
-			dev_err(dev,
-				"unable to request GPIO %d as interrupt pin (%d)\n",
-				hub->gpio_intn, err);
-			return err;
-		}
-	}
+	hub->connect = devm_gpiod_get_optional(dev, "connect", GPIOD_OUT_LOW);
+	if (IS_ERR(hub->connect))
+		return PTR_ERR(hub->connect);
+	if (hub->connect)
+		gpiod_set_consumer_name(hub->connect, "usb3503 connect");
 
-	if (gpio_is_valid(hub->gpio_connect)) {
-		err = devm_gpio_request_one(dev, hub->gpio_connect,
-				GPIOF_OUT_INIT_LOW, "usb3503 connect");
-		if (err) {
-			dev_err(dev,
-				"unable to request GPIO %d as connect pin (%d)\n",
-				hub->gpio_connect, err);
-			return err;
-		}
-	}
-
-	if (gpio_is_valid(hub->gpio_reset)) {
-		err = devm_gpio_request_one(dev, hub->gpio_reset,
-				GPIOF_OUT_INIT_LOW, "usb3503 reset");
+	hub->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(hub->reset))
+		return PTR_ERR(hub->reset);
+	if (hub->reset) {
 		/* Datasheet defines a hardware reset to be at least 100us */
 		usleep_range(100, 10000);
-		if (err) {
-			dev_err(dev,
-				"unable to request GPIO %d as reset pin (%d)\n",
-				hub->gpio_reset, err);
-			return err;
-		}
+		gpiod_set_consumer_name(hub->reset, "usb3503 reset");
 	}
 
+	if (hub->port_off_mask && !hub->regmap)
+		dev_err(dev, "Ports disabled with no control interface\n");
+
 	usb3503_switch_mode(hub, hub->mode);
 
 	dev_info(dev, "%s: probed in %s mode\n", __func__,
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 52f8e2b..eb2ded1 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -101,7 +101,6 @@
 
 config USB_MUSB_DSPS
 	tristate "TI DSPS platforms"
-	select USB_MUSB_AM335X_CHILD
 	depends on ARCH_OMAP2PLUS || COMPILE_TEST
 	depends on OF_IRQ
 
@@ -111,13 +110,16 @@
 
 config USB_MUSB_JZ4740
 	tristate "JZ4740"
-	depends on NOP_USB_XCEIV
 	depends on MIPS || COMPILE_TEST
 	depends on USB_MUSB_GADGET
 	depends on USB=n || USB_OTG_BLACKLIST_HUB
 
-config USB_MUSB_AM335X_CHILD
-	tristate
+config USB_MUSB_MEDIATEK
+	tristate "MediaTek platforms"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on NOP_USB_XCEIV
+	depends on GENERIC_PHY
+	select USB_ROLE_SWITCH
 
 comment "MUSB DMA mode"
 
@@ -142,7 +144,7 @@
 
 config USB_INVENTRA_DMA
 	bool 'Inventra'
-	depends on USB_MUSB_OMAP2PLUS
+	depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK
 	help
 	  Enable DMA transfers using Mentor's engine.
 
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 3a88c79..9322473 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -24,9 +24,7 @@
 obj-$(CONFIG_USB_MUSB_UX500)			+= ux500.o
 obj-$(CONFIG_USB_MUSB_JZ4740)			+= jz4740.o
 obj-$(CONFIG_USB_MUSB_SUNXI)			+= sunxi.o
-
-
-obj-$(CONFIG_USB_MUSB_AM335X_CHILD)		+= musb_am335x.o
+obj-$(CONFIG_USB_MUSB_MEDIATEK)      		+= mediatek.o
 
 # the kconfig must guarantee that only one of the
 # possible I/O schemes will be enabled at a time ...
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index fb6bbd2..7044355 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -13,7 +13,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/usb/usb_phy_generic.h>
@@ -25,10 +25,6 @@
 
 #include "musb_core.h"
 
-#ifdef CONFIG_MACH_DAVINCI_EVM
-#define GPIO_nVBUS_DRV		160
-#endif
-
 #include "davinci.h"
 #include "cppi_dma.h"
 
@@ -40,6 +36,9 @@ struct davinci_glue {
 	struct device		*dev;
 	struct platform_device	*musb;
 	struct clk		*clk;
+	bool			vbus_state;
+	struct gpio_desc	*vbus;
+	struct work_struct	vbus_work;
 };
 
 /* REVISIT (PM) we should be able to keep the PHY in low power mode most
@@ -135,43 +134,44 @@ static void davinci_musb_disable(struct musb *musb)
  * when J10 is out, and TI documents it as handling OTG.
  */
 
-#ifdef CONFIG_MACH_DAVINCI_EVM
-
-static int vbus_state = -1;
-
 /* I2C operations are always synchronous, and require a task context.
  * With unloaded systems, using the shared workqueue seems to suffice
  * to satisfy the 100msec A_WAIT_VRISE timeout...
  */
-static void evm_deferred_drvvbus(struct work_struct *ignored)
+static void evm_deferred_drvvbus(struct work_struct *work)
 {
-	gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
-	vbus_state = !vbus_state;
+	struct davinci_glue *glue = container_of(work, struct davinci_glue,
+						 vbus_work);
+
+	gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
+	glue->vbus_state = !glue->vbus_state;
 }
 
-#endif	/* EVM */
-
-static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate)
+static void davinci_musb_source_power(struct musb *musb, int is_on,
+				      int immediate)
 {
-#ifdef CONFIG_MACH_DAVINCI_EVM
+	struct davinci_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+	/* This GPIO handling is entirely optional */
+	if (!glue->vbus)
+		return;
+
 	if (is_on)
 		is_on = 1;
 
-	if (vbus_state == is_on)
+	if (glue->vbus_state == is_on)
 		return;
-	vbus_state = !is_on;		/* 0/1 vs "-1 == unknown/init" */
+	/* 0/1 vs "-1 == unknown/init" */
+	glue->vbus_state = !is_on;
 
 	if (machine_is_davinci_evm()) {
-		static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
-
 		if (immediate)
-			gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
+			gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
 		else
-			schedule_work(&evm_vbus_work);
+			schedule_work(&glue->vbus_work);
 	}
 	if (immediate)
-		vbus_state = is_on;
-#endif
+		glue->vbus_state = is_on;
 }
 
 static void davinci_musb_set_vbus(struct musb *musb, int is_on)
@@ -524,6 +524,15 @@ static int davinci_probe(struct platform_device *pdev)
 
 	pdata->platform_ops		= &davinci_ops;
 
+	glue->vbus = devm_gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
+	if (IS_ERR(glue->vbus)) {
+		ret = PTR_ERR(glue->vbus);
+		goto err0;
+	} else {
+		glue->vbus_state = -1;
+		INIT_WORK(&glue->vbus_work, evm_deferred_drvvbus);
+	}
+
 	usb_phy_generic_register();
 	platform_set_drvdata(pdev, glue);
 
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index e3b8c84..bc0109f 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -17,16 +17,15 @@
 #include "musb_core.h"
 
 struct jz4740_glue {
-	struct device           *dev;
-	struct platform_device  *musb;
+	struct platform_device	*pdev;
 	struct clk		*clk;
 };
 
 static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
 {
-	unsigned long   flags;
-	irqreturn_t     retval = IRQ_NONE;
-	struct musb     *musb = __hci;
+	unsigned long	flags;
+	irqreturn_t	retval = IRQ_NONE;
+	struct musb	*musb = __hci;
 
 	spin_lock_irqsave(&musb->lock, flags);
 
@@ -40,7 +39,7 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
 	 * never see them set
 	 */
 	musb->int_usb &= MUSB_INTR_SUSPEND | MUSB_INTR_RESUME |
-	    MUSB_INTR_RESET | MUSB_INTR_SOF;
+			 MUSB_INTR_RESET | MUSB_INTR_SOF;
 
 	if (musb->int_usb || musb->int_tx || musb->int_rx)
 		retval = musb_interrupt(musb);
@@ -51,25 +50,20 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
 }
 
 static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
-{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
+	{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
 };
 
 static const struct musb_hdrc_config jz4740_musb_config = {
 	/* Silicon does not implement USB OTG. */
-	.multipoint = 0,
+	.multipoint	= 0,
 	/* Max EPs scanned, driver will decide which EP can be used. */
-	.num_eps    = 4,
+	.num_eps	= 4,
 	/* RAMbits needed to configure EPs from table */
-	.ram_bits   = 9,
-	.fifo_cfg = jz4740_musb_fifo_cfg,
-	.fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
-};
-
-static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
-	.mode   = MUSB_PERIPHERAL,
-	.config = &jz4740_musb_config,
+	.ram_bits	= 9,
+	.fifo_cfg	= jz4740_musb_fifo_cfg,
+	.fifo_cfg_size	= ARRAY_SIZE(jz4740_musb_fifo_cfg),
 };
 
 static int jz4740_musb_init(struct musb *musb)
@@ -88,7 +82,8 @@ static int jz4740_musb_init(struct musb *musb)
 		return err;
 	}
 
-	/* Silicon does not implement ConfigData register.
+	/*
+	 * Silicon does not implement ConfigData register.
 	 * Set dyn_fifo to avoid reading EP config from hardware.
 	 */
 	musb->dyn_fifo = true;
@@ -108,63 +103,67 @@ static const struct musb_platform_ops jz4740_musb_ops = {
 	.init		= jz4740_musb_init,
 };
 
+static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
+	.mode		= MUSB_PERIPHERAL,
+	.config		= &jz4740_musb_config,
+	.platform_ops	= &jz4740_musb_ops,
+};
+
 static int jz4740_probe(struct platform_device *pdev)
 {
-	struct musb_hdrc_platform_data	*pdata = &jz4740_musb_platform_data;
+	struct device			*dev = &pdev->dev;
+	const struct musb_hdrc_platform_data *pdata = &jz4740_musb_pdata;
 	struct platform_device		*musb;
 	struct jz4740_glue		*glue;
-	struct clk                      *clk;
+	struct clk			*clk;
 	int				ret;
 
-	glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+	glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
 	if (!glue)
 		return -ENOMEM;
 
 	musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
 	if (!musb) {
-		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		dev_err(dev, "failed to allocate musb device");
 		return -ENOMEM;
 	}
 
-	clk = devm_clk_get(&pdev->dev, "udc");
+	clk = devm_clk_get(dev, "udc");
 	if (IS_ERR(clk)) {
-		dev_err(&pdev->dev, "failed to get clock\n");
+		dev_err(dev, "failed to get clock");
 		ret = PTR_ERR(clk);
 		goto err_platform_device_put;
 	}
 
 	ret = clk_prepare_enable(clk);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to enable clock\n");
+		dev_err(dev, "failed to enable clock");
 		goto err_platform_device_put;
 	}
 
-	musb->dev.parent		= &pdev->dev;
+	musb->dev.parent		= dev;
 
-	glue->dev			= &pdev->dev;
-	glue->musb			= musb;
+	glue->pdev			= musb;
 	glue->clk			= clk;
 
-	pdata->platform_ops		= &jz4740_musb_ops;
-
 	platform_set_drvdata(pdev, glue);
 
 	ret = platform_device_add_resources(musb, pdev->resource,
 					    pdev->num_resources);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to add resources\n");
+		dev_err(dev, "failed to add resources");
 		goto err_clk_disable;
 	}
 
 	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
 	if (ret) {
-		dev_err(&pdev->dev, "failed to add platform_data\n");
+		dev_err(dev, "failed to add platform_data");
 		goto err_clk_disable;
 	}
 
 	ret = platform_device_add(musb);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to register musb device\n");
+		dev_err(dev, "failed to register musb device");
 		goto err_clk_disable;
 	}
 
@@ -179,9 +178,9 @@ static int jz4740_probe(struct platform_device *pdev)
 
 static int jz4740_remove(struct platform_device *pdev)
 {
-	struct jz4740_glue	*glue = platform_get_drvdata(pdev);
+	struct jz4740_glue *glue = platform_get_drvdata(pdev);
 
-	platform_device_unregister(glue->musb);
+	platform_device_unregister(glue->pdev);
 	clk_disable_unprepare(glue->clk);
 
 	return 0;
@@ -190,7 +189,7 @@ static int jz4740_remove(struct platform_device *pdev)
 #ifdef CONFIG_OF
 static const struct of_device_id jz4740_musb_of_match[] = {
 	{ .compatible = "ingenic,jz4740-musb" },
-	{},
+	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, jz4740_musb_of_match);
 #endif
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
new file mode 100644
index 0000000..6b88c2f
--- /dev/null
+++ b/drivers/usb/musb/mediatek.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author:
+ *  Min Guo <min.guo@mediatek.com>
+ *  Yonglong Wu <yonglong.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb/role.h>
+#include <linux/usb/usb_phy_generic.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define USB_L1INTS		0x00a0
+#define USB_L1INTM		0x00a4
+#define MTK_MUSB_TXFUNCADDR	0x0480
+
+/* MediaTek controller toggle enable and status reg */
+#define MUSB_RXTOG		0x80
+#define MUSB_RXTOGEN		0x82
+#define MUSB_TXTOG		0x84
+#define MUSB_TXTOGEN		0x86
+#define MTK_TOGGLE_EN		GENMASK(15, 0)
+
+#define TX_INT_STATUS		BIT(0)
+#define RX_INT_STATUS		BIT(1)
+#define USBCOM_INT_STATUS	BIT(2)
+#define DMA_INT_STATUS		BIT(3)
+
+#define DMA_INTR_STATUS_MSK	GENMASK(7, 0)
+#define DMA_INTR_UNMASK_SET_MSK	GENMASK(31, 24)
+
+struct mtk_glue {
+	struct device *dev;
+	struct musb *musb;
+	struct platform_device *musb_pdev;
+	struct platform_device *usb_phy;
+	struct phy *phy;
+	struct usb_phy *xceiv;
+	enum phy_mode phy_mode;
+	struct clk *main;
+	struct clk *mcu;
+	struct clk *univpll;
+	enum usb_role role;
+	struct usb_role_switch *role_sw;
+};
+
+static int mtk_musb_clks_get(struct mtk_glue *glue)
+{
+	struct device *dev = glue->dev;
+
+	glue->main = devm_clk_get(dev, "main");
+	if (IS_ERR(glue->main)) {
+		dev_err(dev, "fail to get main clock\n");
+		return PTR_ERR(glue->main);
+	}
+
+	glue->mcu = devm_clk_get(dev, "mcu");
+	if (IS_ERR(glue->mcu)) {
+		dev_err(dev, "fail to get mcu clock\n");
+		return PTR_ERR(glue->mcu);
+	}
+
+	glue->univpll = devm_clk_get(dev, "univpll");
+	if (IS_ERR(glue->univpll)) {
+		dev_err(dev, "fail to get univpll clock\n");
+		return PTR_ERR(glue->univpll);
+	}
+
+	return 0;
+}
+
+static int mtk_musb_clks_enable(struct mtk_glue *glue)
+{
+	int ret;
+
+	ret = clk_prepare_enable(glue->main);
+	if (ret) {
+		dev_err(glue->dev, "failed to enable main clock\n");
+		goto err_main_clk;
+	}
+
+	ret = clk_prepare_enable(glue->mcu);
+	if (ret) {
+		dev_err(glue->dev, "failed to enable mcu clock\n");
+		goto err_mcu_clk;
+	}
+
+	ret = clk_prepare_enable(glue->univpll);
+	if (ret) {
+		dev_err(glue->dev, "failed to enable univpll clock\n");
+		goto err_univpll_clk;
+	}
+
+	return 0;
+
+err_univpll_clk:
+	clk_disable_unprepare(glue->mcu);
+err_mcu_clk:
+	clk_disable_unprepare(glue->main);
+err_main_clk:
+	return ret;
+}
+
+static void mtk_musb_clks_disable(struct mtk_glue *glue)
+{
+	clk_disable_unprepare(glue->univpll);
+	clk_disable_unprepare(glue->mcu);
+	clk_disable_unprepare(glue->main);
+}
+
+static int musb_usb_role_sx_set(struct device *dev, enum usb_role role)
+{
+	struct mtk_glue *glue = dev_get_drvdata(dev);
+	struct musb *musb = glue->musb;
+	u8 devctl = readb(musb->mregs + MUSB_DEVCTL);
+	enum usb_role new_role;
+
+	if (role == glue->role)
+		return 0;
+
+	switch (role) {
+	case USB_ROLE_HOST:
+		musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+		glue->phy_mode = PHY_MODE_USB_HOST;
+		new_role = USB_ROLE_HOST;
+		if (glue->role == USB_ROLE_NONE)
+			phy_power_on(glue->phy);
+
+		devctl |= MUSB_DEVCTL_SESSION;
+		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+		MUSB_HST_MODE(musb);
+		break;
+	case USB_ROLE_DEVICE:
+		musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+		glue->phy_mode = PHY_MODE_USB_DEVICE;
+		new_role = USB_ROLE_DEVICE;
+		devctl &= ~MUSB_DEVCTL_SESSION;
+		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+		if (glue->role == USB_ROLE_NONE)
+			phy_power_on(glue->phy);
+
+		MUSB_DEV_MODE(musb);
+		break;
+	case USB_ROLE_NONE:
+		glue->phy_mode = PHY_MODE_USB_OTG;
+		new_role = USB_ROLE_NONE;
+		devctl &= ~MUSB_DEVCTL_SESSION;
+		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+		if (glue->role != USB_ROLE_NONE)
+			phy_power_off(glue->phy);
+
+		break;
+	default:
+		dev_err(glue->dev, "Invalid State\n");
+		return -EINVAL;
+	}
+
+	glue->role = new_role;
+	phy_set_mode(glue->phy, glue->phy_mode);
+
+	return 0;
+}
+
+static enum usb_role musb_usb_role_sx_get(struct device *dev)
+{
+	struct mtk_glue *glue = dev_get_drvdata(dev);
+
+	return glue->role;
+}
+
+static int mtk_otg_switch_init(struct mtk_glue *glue)
+{
+	struct usb_role_switch_desc role_sx_desc = { 0 };
+
+	role_sx_desc.set = musb_usb_role_sx_set;
+	role_sx_desc.get = musb_usb_role_sx_get;
+	role_sx_desc.fwnode = dev_fwnode(glue->dev);
+	glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc);
+
+	return PTR_ERR_OR_ZERO(glue->role_sw);
+}
+
+static void mtk_otg_switch_exit(struct mtk_glue *glue)
+{
+	return usb_role_switch_unregister(glue->role_sw);
+}
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+	unsigned long flags;
+	irqreturn_t retval = IRQ_NONE;
+	struct musb *musb = __hci;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	musb->int_usb = musb_clearb(musb->mregs, MUSB_INTRUSB);
+	musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
+	musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
+
+	if (musb->int_usb || musb->int_tx || musb->int_rx)
+		retval = musb_interrupt(musb);
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return retval;
+}
+
+static irqreturn_t mtk_musb_interrupt(int irq, void *dev_id)
+{
+	irqreturn_t retval = IRQ_NONE;
+	struct musb *musb = (struct musb *)dev_id;
+	u32 l1_ints;
+
+	l1_ints = musb_readl(musb->mregs, USB_L1INTS) &
+			musb_readl(musb->mregs, USB_L1INTM);
+
+	if (l1_ints & (TX_INT_STATUS | RX_INT_STATUS | USBCOM_INT_STATUS))
+		retval = generic_interrupt(irq, musb);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+	if (l1_ints & DMA_INT_STATUS)
+		retval = dma_controller_irq(irq, musb->dma_controller);
+#endif
+	return retval;
+}
+
+static u32 mtk_musb_busctl_offset(u8 epnum, u16 offset)
+{
+	return MTK_MUSB_TXFUNCADDR + offset + 8 * epnum;
+}
+
+static u8 mtk_musb_clearb(void __iomem *addr, unsigned int offset)
+{
+	u8 data;
+
+	/* W1C */
+	data = musb_readb(addr, offset);
+	musb_writeb(addr, offset, data);
+	return data;
+}
+
+static u16 mtk_musb_clearw(void __iomem *addr, unsigned int offset)
+{
+	u16 data;
+
+	/* W1C */
+	data = musb_readw(addr, offset);
+	musb_writew(addr, offset, data);
+	return data;
+}
+
+static int mtk_musb_set_mode(struct musb *musb, u8 mode)
+{
+	struct device *dev = musb->controller;
+	struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+	enum phy_mode new_mode;
+	enum usb_role new_role;
+
+	switch (mode) {
+	case MUSB_HOST:
+		new_mode = PHY_MODE_USB_HOST;
+		new_role = USB_ROLE_HOST;
+		break;
+	case MUSB_PERIPHERAL:
+		new_mode = PHY_MODE_USB_DEVICE;
+		new_role = USB_ROLE_DEVICE;
+		break;
+	case MUSB_OTG:
+		new_mode = PHY_MODE_USB_OTG;
+		new_role = USB_ROLE_NONE;
+		break;
+	default:
+		dev_err(glue->dev, "Invalid mode request\n");
+		return -EINVAL;
+	}
+
+	if (glue->phy_mode == new_mode)
+		return 0;
+
+	if (musb->port_mode != MUSB_OTG) {
+		dev_err(glue->dev, "Does not support changing modes\n");
+		return -EINVAL;
+	}
+
+	glue->role = new_role;
+	musb_usb_role_sx_set(dev, glue->role);
+	return 0;
+}
+
+static int mtk_musb_init(struct musb *musb)
+{
+	struct device *dev = musb->controller;
+	struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+	int ret;
+
+	glue->musb = musb;
+	musb->phy = glue->phy;
+	musb->xceiv = glue->xceiv;
+	musb->is_host = false;
+	musb->isr = mtk_musb_interrupt;
+
+	/* Set TX/RX toggle enable */
+	musb_writew(musb->mregs, MUSB_TXTOGEN, MTK_TOGGLE_EN);
+	musb_writew(musb->mregs, MUSB_RXTOGEN, MTK_TOGGLE_EN);
+
+	if (musb->port_mode == MUSB_OTG) {
+		ret = mtk_otg_switch_init(glue);
+		if (ret)
+			return ret;
+	}
+
+	ret = phy_init(glue->phy);
+	if (ret)
+		goto err_phy_init;
+
+	ret = phy_power_on(glue->phy);
+	if (ret)
+		goto err_phy_power_on;
+
+	phy_set_mode(glue->phy, glue->phy_mode);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+	musb_writel(musb->mregs, MUSB_HSDMA_INTR,
+		    DMA_INTR_STATUS_MSK | DMA_INTR_UNMASK_SET_MSK);
+#endif
+	musb_writel(musb->mregs, USB_L1INTM, TX_INT_STATUS | RX_INT_STATUS |
+		    USBCOM_INT_STATUS | DMA_INT_STATUS);
+	return 0;
+
+err_phy_power_on:
+	phy_exit(glue->phy);
+err_phy_init:
+	mtk_otg_switch_exit(glue);
+	return ret;
+}
+
+static u16 mtk_musb_get_toggle(struct musb_qh *qh, int is_out)
+{
+	struct musb *musb = qh->hw_ep->musb;
+	u8 epnum = qh->hw_ep->epnum;
+	u16 toggle;
+
+	toggle = musb_readw(musb->mregs, is_out ? MUSB_TXTOG : MUSB_RXTOG);
+	return toggle & (1 << epnum);
+}
+
+static u16 mtk_musb_set_toggle(struct musb_qh *qh, int is_out, struct urb *urb)
+{
+	struct musb *musb = qh->hw_ep->musb;
+	u8 epnum = qh->hw_ep->epnum;
+	u16 value, toggle;
+
+	toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+	if (is_out) {
+		value = musb_readw(musb->mregs, MUSB_TXTOG);
+		value |= toggle << epnum;
+		musb_writew(musb->mregs, MUSB_TXTOG, value);
+	} else {
+		value = musb_readw(musb->mregs, MUSB_RXTOG);
+		value |= toggle << epnum;
+		musb_writew(musb->mregs, MUSB_RXTOG, value);
+	}
+
+	return 0;
+}
+
+static int mtk_musb_exit(struct musb *musb)
+{
+	struct device *dev = musb->controller;
+	struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+
+	mtk_otg_switch_exit(glue);
+	phy_power_off(glue->phy);
+	phy_exit(glue->phy);
+	mtk_musb_clks_disable(glue);
+
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+	return 0;
+}
+
+static const struct musb_platform_ops mtk_musb_ops = {
+	.quirks = MUSB_DMA_INVENTRA,
+	.init = mtk_musb_init,
+	.get_toggle = mtk_musb_get_toggle,
+	.set_toggle = mtk_musb_set_toggle,
+	.exit = mtk_musb_exit,
+#ifdef CONFIG_USB_INVENTRA_DMA
+	.dma_init = musbhs_dma_controller_create_noirq,
+	.dma_exit = musbhs_dma_controller_destroy,
+#endif
+	.clearb = mtk_musb_clearb,
+	.clearw = mtk_musb_clearw,
+	.busctl_offset = mtk_musb_busctl_offset,
+	.set_mode = mtk_musb_set_mode,
+};
+
+#define MTK_MUSB_MAX_EP_NUM	8
+#define MTK_MUSB_RAM_BITS	11
+
+static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
+	{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 1024, },
+	{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 1024, },
+	{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 64, },
+};
+
+static const struct musb_hdrc_config mtk_musb_hdrc_config = {
+	.fifo_cfg = mtk_musb_mode_cfg,
+	.fifo_cfg_size = ARRAY_SIZE(mtk_musb_mode_cfg),
+	.multipoint = true,
+	.dyn_fifo = true,
+	.num_eps = MTK_MUSB_MAX_EP_NUM,
+	.ram_bits = MTK_MUSB_RAM_BITS,
+};
+
+static const struct platform_device_info mtk_dev_info = {
+	.name = "musb-hdrc",
+	.id = PLATFORM_DEVID_AUTO,
+	.dma_mask = DMA_BIT_MASK(32),
+};
+
+static int mtk_musb_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data *pdata;
+	struct mtk_glue *glue;
+	struct platform_device_info pinfo;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	int ret = -ENOMEM;
+
+	glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+	if (!glue)
+		return -ENOMEM;
+
+	glue->dev = dev;
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	ret = of_platform_populate(np, NULL, NULL, dev);
+	if (ret) {
+		dev_err(dev, "failed to create child devices at %p\n", np);
+		return ret;
+	}
+
+	ret = mtk_musb_clks_get(glue);
+	if (ret)
+		return ret;
+
+	pdata->config = &mtk_musb_hdrc_config;
+	pdata->platform_ops = &mtk_musb_ops;
+	pdata->mode = usb_get_dr_mode(dev);
+
+	if (IS_ENABLED(CONFIG_USB_MUSB_HOST))
+		pdata->mode = USB_DR_MODE_HOST;
+	else if (IS_ENABLED(CONFIG_USB_MUSB_GADGET))
+		pdata->mode = USB_DR_MODE_PERIPHERAL;
+
+	switch (pdata->mode) {
+	case USB_DR_MODE_HOST:
+		glue->phy_mode = PHY_MODE_USB_HOST;
+		glue->role = USB_ROLE_HOST;
+		break;
+	case USB_DR_MODE_PERIPHERAL:
+		glue->phy_mode = PHY_MODE_USB_DEVICE;
+		glue->role = USB_ROLE_DEVICE;
+		break;
+	case USB_DR_MODE_OTG:
+		glue->phy_mode = PHY_MODE_USB_OTG;
+		glue->role = USB_ROLE_NONE;
+		break;
+	default:
+		dev_err(&pdev->dev, "Error 'dr_mode' property\n");
+		return -EINVAL;
+	}
+
+	glue->phy = devm_of_phy_get_by_index(dev, np, 0);
+	if (IS_ERR(glue->phy)) {
+		dev_err(dev, "fail to getting phy %ld\n",
+			PTR_ERR(glue->phy));
+		return PTR_ERR(glue->phy);
+	}
+
+	glue->usb_phy = usb_phy_generic_register();
+	if (IS_ERR(glue->usb_phy)) {
+		dev_err(dev, "fail to registering usb-phy %ld\n",
+			PTR_ERR(glue->usb_phy));
+		return PTR_ERR(glue->usb_phy);
+	}
+
+	glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+	if (IS_ERR(glue->xceiv)) {
+		dev_err(dev, "fail to getting usb-phy %d\n", ret);
+		ret = PTR_ERR(glue->xceiv);
+		goto err_unregister_usb_phy;
+	}
+
+	platform_set_drvdata(pdev, glue);
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	ret = mtk_musb_clks_enable(glue);
+	if (ret)
+		goto err_enable_clk;
+
+	pinfo = mtk_dev_info;
+	pinfo.parent = dev;
+	pinfo.res = pdev->resource;
+	pinfo.num_res = pdev->num_resources;
+	pinfo.data = pdata;
+	pinfo.size_data = sizeof(*pdata);
+
+	glue->musb_pdev = platform_device_register_full(&pinfo);
+	if (IS_ERR(glue->musb_pdev)) {
+		ret = PTR_ERR(glue->musb_pdev);
+		dev_err(dev, "failed to register musb device: %d\n", ret);
+		goto err_device_register;
+	}
+
+	return 0;
+
+err_device_register:
+	mtk_musb_clks_disable(glue);
+err_enable_clk:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+err_unregister_usb_phy:
+	usb_phy_generic_unregister(glue->usb_phy);
+	return ret;
+}
+
+static int mtk_musb_remove(struct platform_device *pdev)
+{
+	struct mtk_glue *glue = platform_get_drvdata(pdev);
+	struct platform_device *usb_phy = glue->usb_phy;
+
+	platform_device_unregister(glue->musb_pdev);
+	usb_phy_generic_unregister(usb_phy);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_musb_match[] = {
+	{.compatible = "mediatek,mtk-musb",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_musb_match);
+#endif
+
+static struct platform_driver mtk_musb_driver = {
+	.probe = mtk_musb_probe,
+	.remove = mtk_musb_remove,
+	.driver = {
+		   .name = "musb-mtk",
+		   .of_match_table = of_match_ptr(mtk_musb_match),
+	},
+};
+
+module_platform_driver(mtk_musb_driver);
+
+MODULE_DESCRIPTION("MediaTek MUSB Glue Layer");
+MODULE_AUTHOR("Min Guo <min.guo@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
deleted file mode 100644
index 5f04f8e..0000000
--- a/drivers/usb/musb/musb_am335x.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-static int am335x_child_probe(struct platform_device *pdev)
-{
-	int ret;
-
-	pm_runtime_enable(&pdev->dev);
-
-	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-	if (ret)
-		goto err;
-
-	return 0;
-err:
-	pm_runtime_disable(&pdev->dev);
-	return ret;
-}
-
-static const struct of_device_id am335x_child_of_match[] = {
-	{ .compatible = "ti,am33xx-usb" },
-	{  },
-};
-MODULE_DEVICE_TABLE(of, am335x_child_of_match);
-
-static struct platform_driver am335x_child_driver = {
-	.probe		= am335x_child_probe,
-	.driver         = {
-		.name   = "am335x-usb-childs",
-		.of_match_table	= am335x_child_of_match,
-	},
-};
-
-static int __init am335x_child_init(void)
-{
-	return platform_driver_register(&am335x_child_driver);
-}
-module_init(am335x_child_init);
-
-MODULE_DESCRIPTION("AM33xx child devices");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 5ebf30b..f616fb4 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -73,6 +73,7 @@
 #include <linux/prefetch.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/dma-mapping.h>
 #include <linux/usb.h>
 #include <linux/usb/of.h>
@@ -246,7 +247,7 @@ static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
 	return 0x80 + (0x08 * epnum) + offset;
 }
 
-static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
+static u8 musb_default_readb(void __iomem *addr, u32 offset)
 {
 	u8 data =  __raw_readb(addr + offset);
 
@@ -254,13 +255,13 @@ static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
 	return data;
 }
 
-static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
+static void musb_default_writeb(void __iomem *addr, u32 offset, u8 data)
 {
 	trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
 	__raw_writeb(data, addr + offset);
 }
 
-static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
+static u16 musb_default_readw(void __iomem *addr, u32 offset)
 {
 	u16 data = __raw_readw(addr + offset);
 
@@ -268,12 +269,44 @@ static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
 	return data;
 }
 
-static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
+static void musb_default_writew(void __iomem *addr, u32 offset, u16 data)
 {
 	trace_musb_writew(__builtin_return_address(0), addr, offset, data);
 	__raw_writew(data, addr + offset);
 }
 
+static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out)
+{
+	void __iomem *epio = qh->hw_ep->regs;
+	u16 csr;
+
+	if (is_out)
+		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
+	else
+		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
+
+	return csr;
+}
+
+static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out,
+				   struct urb *urb)
+{
+	u16 csr;
+	u16 toggle;
+
+	toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+	if (is_out)
+		csr = toggle ? (MUSB_TXCSR_H_WR_DATATOGGLE
+				| MUSB_TXCSR_H_DATATOGGLE)
+				: MUSB_TXCSR_CLRDATATOG;
+	else
+		csr = toggle ? (MUSB_RXCSR_H_WR_DATATOGGLE
+				| MUSB_RXCSR_H_DATATOGGLE) : 0;
+
+	return csr;
+}
+
 /*
  * Load an endpoint's FIFO
  */
@@ -364,19 +397,25 @@ static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
 /*
  * Old style IO functions
  */
-u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
+u8 (*musb_readb)(void __iomem *addr, u32 offset);
 EXPORT_SYMBOL_GPL(musb_readb);
 
-void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
+void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
 EXPORT_SYMBOL_GPL(musb_writeb);
 
-u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
+u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearb);
+
+u16 (*musb_readw)(void __iomem *addr, u32 offset);
 EXPORT_SYMBOL_GPL(musb_readw);
 
-void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
+void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
 EXPORT_SYMBOL_GPL(musb_writew);
 
-u32 musb_readl(const void __iomem *addr, unsigned offset)
+u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearw);
+
+u32 musb_readl(void __iomem *addr, u32 offset)
 {
 	u32 data = __raw_readl(addr + offset);
 
@@ -385,7 +424,7 @@ u32 musb_readl(const void __iomem *addr, unsigned offset)
 }
 EXPORT_SYMBOL_GPL(musb_readl);
 
-void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+void musb_writel(void __iomem *addr, u32 offset, u32 data)
 {
 	trace_musb_writel(__builtin_return_address(0), addr, offset, data);
 	__raw_writel(data, addr + offset);
@@ -414,6 +453,108 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
 	return hw_ep->musb->io.write_fifo(hw_ep, len, src);
 }
 
+static u8 musb_read_devctl(struct musb *musb)
+{
+	return musb_readb(musb->mregs, MUSB_DEVCTL);
+}
+
+/**
+ * musb_set_host - set and initialize host mode
+ * @musb: musb controller driver data
+ *
+ * At least some musb revisions need to enable devctl session bit in
+ * peripheral mode to switch to host mode. Initializes things to host
+ * mode and sets A_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ *
+ * Note that the SoC glue code may need to wait for musb to settle
+ * on enable before calling this to avoid babble.
+ */
+int musb_set_host(struct musb *musb)
+{
+	int error = 0;
+	u8 devctl;
+
+	if (!musb)
+		return -EINVAL;
+
+	devctl = musb_read_devctl(musb);
+	if (!(devctl & MUSB_DEVCTL_BDEVICE)) {
+		dev_info(musb->controller,
+			 "%s: already in host mode: %02x\n",
+			 __func__, devctl);
+		goto init_data;
+	}
+
+	devctl |= MUSB_DEVCTL_SESSION;
+	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+	error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+				   !(devctl & MUSB_DEVCTL_BDEVICE), 5000,
+				   1000000);
+	if (error) {
+		dev_err(musb->controller, "%s: could not set host: %02x\n",
+			__func__, devctl);
+
+		return error;
+	}
+
+init_data:
+	musb->is_active = 1;
+	musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+	MUSB_HST_MODE(musb);
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_host);
+
+/**
+ * musb_set_peripheral - set and initialize peripheral mode
+ * @musb: musb controller driver data
+ *
+ * Clears devctl session bit and initializes things for peripheral
+ * mode and sets B_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ */
+int musb_set_peripheral(struct musb *musb)
+{
+	int error = 0;
+	u8 devctl;
+
+	if (!musb)
+		return -EINVAL;
+
+	devctl = musb_read_devctl(musb);
+	if (devctl & MUSB_DEVCTL_BDEVICE) {
+		dev_info(musb->controller,
+			 "%s: already in peripheral mode: %02x\n",
+			 __func__, devctl);
+
+		goto init_data;
+	}
+
+	devctl &= ~MUSB_DEVCTL_SESSION;
+	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+	error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+				   devctl & MUSB_DEVCTL_BDEVICE, 5000,
+				   1000000);
+	if (error) {
+		dev_err(musb->controller, "%s: could not set peripheral: %02x\n",
+			__func__, devctl);
+
+		return error;
+	}
+
+init_data:
+	musb->is_active = 0;
+	musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+	MUSB_DEV_MODE(musb);
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_peripheral);
+
 /*-------------------------------------------------------------------------*/
 
 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
@@ -909,7 +1050,6 @@ static void musb_handle_intr_reset(struct musb *musb)
  * @param musb instance pointer
  * @param int_usb register contents
  * @param devctl
- * @param power
  */
 
 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
@@ -1015,7 +1155,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
 static void musb_disable_interrupts(struct musb *musb)
 {
 	void __iomem	*mbase = musb->mregs;
-	u16	temp;
 
 	/* disable interrupts */
 	musb_writeb(mbase, MUSB_INTRUSBE, 0);
@@ -1025,9 +1164,9 @@ static void musb_disable_interrupts(struct musb *musb)
 	musb_writew(mbase, MUSB_INTRRXE, 0);
 
 	/*  flush pending interrupts */
-	temp = musb_readb(mbase, MUSB_INTRUSB);
-	temp = musb_readw(mbase, MUSB_INTRTX);
-	temp = musb_readw(mbase, MUSB_INTRRX);
+	musb_clearb(mbase, MUSB_INTRUSB);
+	musb_clearw(mbase, MUSB_INTRTX);
+	musb_clearw(mbase, MUSB_INTRRX);
 }
 
 static void musb_enable_interrupts(struct musb *musb)
@@ -2254,10 +2393,19 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
 		musb_readb = musb->ops->readb;
 	if (musb->ops->writeb)
 		musb_writeb = musb->ops->writeb;
+	if (musb->ops->clearb)
+		musb_clearb = musb->ops->clearb;
+	else
+		musb_clearb = musb_readb;
+
 	if (musb->ops->readw)
 		musb_readw = musb->ops->readw;
 	if (musb->ops->writew)
 		musb_writew = musb->ops->writew;
+	if (musb->ops->clearw)
+		musb_clearw = musb->ops->clearw;
+	else
+		musb_clearw = musb_readw;
 
 #ifndef CONFIG_MUSB_PIO_ONLY
 	if (!musb->ops->dma_init || !musb->ops->dma_exit) {
@@ -2279,6 +2427,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
 	else
 		musb->io.write_fifo = musb_default_write_fifo;
 
+	if (musb->ops->get_toggle)
+		musb->io.get_toggle = musb->ops->get_toggle;
+	else
+		musb->io.get_toggle = musb_default_get_toggle;
+
+	if (musb->ops->set_toggle)
+		musb->io.set_toggle = musb->ops->set_toggle;
+	else
+		musb->io.set_toggle = musb_default_set_toggle;
+
 	if (!musb->xceiv->io_ops) {
 		musb->xceiv->io_dev = musb->controller;
 		musb->xceiv->io_priv = musb->mregs;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 04203b7..290a2bc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -27,6 +27,7 @@
 struct musb;
 struct musb_hw_ep;
 struct musb_ep;
+struct musb_qh;
 
 /* Helper defines for struct musb->hwvers */
 #define MUSB_HWVERS_MAJOR(x)	((x >> 10) & 0x1f)
@@ -119,10 +120,14 @@ struct musb_io;
  * @fifo_offset: returns the fifo offset
  * @readb:	read 8 bits
  * @writeb:	write 8 bits
+ * @clearb:	could be clear-on-readb or W1C
  * @readw:	read 16 bits
  * @writew:	write 16 bits
+ * @clearw:	could be clear-on-readw or W1C
  * @read_fifo:	reads the fifo
  * @write_fifo:	writes to fifo
+ * @get_toggle:	platform specific get toggle function
+ * @set_toggle:	platform specific set toggle function
  * @dma_init:	platform specific dma init function
  * @dma_exit:	platform specific dma exit function
  * @init:	turns on clocks, sets up platform-specific registers, etc
@@ -161,12 +166,16 @@ struct musb_platform_ops {
 	u16	fifo_mode;
 	u32	(*fifo_offset)(u8 epnum);
 	u32	(*busctl_offset)(u8 epnum, u16 offset);
-	u8	(*readb)(const void __iomem *addr, unsigned offset);
-	void	(*writeb)(void __iomem *addr, unsigned offset, u8 data);
-	u16	(*readw)(const void __iomem *addr, unsigned offset);
-	void	(*writew)(void __iomem *addr, unsigned offset, u16 data);
+	u8	(*readb)(void __iomem *addr, u32 offset);
+	void	(*writeb)(void __iomem *addr, u32 offset, u8 data);
+	u8	(*clearb)(void __iomem *addr, u32 offset);
+	u16	(*readw)(void __iomem *addr, u32 offset);
+	void	(*writew)(void __iomem *addr, u32 offset, u16 data);
+	u16	(*clearw)(void __iomem *addr, u32 offset);
 	void	(*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
 	void	(*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
+	u16	(*get_toggle)(struct musb_qh *qh, int is_out);
+	u16	(*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
 	struct dma_controller *
 		(*dma_init) (struct musb *musb, void __iomem *base);
 	void	(*dma_exit)(struct dma_controller *c);
@@ -487,6 +496,9 @@ extern void musb_start(struct musb *musb);
 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
 
+extern int musb_set_host(struct musb *musb);
+extern int musb_set_peripheral(struct musb *musb);
+
 extern void musb_load_testpacket(struct musb *);
 
 extern irqreturn_t musb_interrupt(struct musb *);
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 8f60271..4b4d8dc 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -35,6 +35,12 @@ struct musb_hw_ep;
  *    whether shared with the Inventra core or separate.
  */
 
+#define MUSB_HSDMA_BASE		0x200
+#define MUSB_HSDMA_INTR		(MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL	0x4
+#define MUSB_HSDMA_ADDRESS	0x8
+#define MUSB_HSDMA_COUNT	0xc
+
 #define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
 
 #ifdef CONFIG_MUSB_PIO_ONLY
@@ -191,6 +197,9 @@ extern void (*musb_dma_controller_destroy)(struct dma_controller *);
 extern struct dma_controller *
 musbhs_dma_controller_create(struct musb *musb, void __iomem *base);
 extern void musbhs_dma_controller_destroy(struct dma_controller *c);
+extern struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base);
+extern irqreturn_t dma_controller_irq(int irq, void *private_data);
 
 extern struct dma_controller *
 tusb_dma_controller_create(struct musb *musb, void __iomem *base);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 5a44b70..886c9b6 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -286,26 +286,6 @@ __acquires(musb->lock)
 	spin_lock(&musb->lock);
 }
 
-/* For bulk/interrupt endpoints only */
-static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
-				    struct urb *urb)
-{
-	void __iomem		*epio = qh->hw_ep->regs;
-	u16			csr;
-
-	/*
-	 * FIXME: the current Mentor DMA code seems to have
-	 * problems getting toggle correct.
-	 */
-
-	if (is_in)
-		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
-	else
-		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
-
-	usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
-}
-
 /*
  * Advance this hardware endpoint's queue, completing the specified URB and
  * advancing to either the next URB queued to that qh, or else invalidating
@@ -320,6 +300,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
 	struct musb_hw_ep	*ep = qh->hw_ep;
 	int			ready = qh->is_ready;
 	int			status;
+	u16			toggle;
 
 	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
 
@@ -327,7 +308,8 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
 	switch (qh->type) {
 	case USB_ENDPOINT_XFER_BULK:
 	case USB_ENDPOINT_XFER_INT:
-		musb_save_toggle(qh, is_in, urb);
+		toggle = musb->io.get_toggle(qh, !is_in);
+		usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
 		if (status == 0 && urb->error_count)
@@ -772,13 +754,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
 					);
 			csr |= MUSB_TXCSR_MODE;
 
-			if (!hw_ep->tx_double_buffered) {
-				if (usb_gettoggle(urb->dev, qh->epnum, 1))
-					csr |= MUSB_TXCSR_H_WR_DATATOGGLE
-						| MUSB_TXCSR_H_DATATOGGLE;
-				else
-					csr |= MUSB_TXCSR_CLRDATATOG;
-			}
+			if (!hw_ep->tx_double_buffered)
+				csr |= musb->io.set_toggle(qh, is_out, urb);
 
 			musb_writew(epio, MUSB_TXCSR, csr);
 			/* REVISIT may need to clear FLUSHFIFO ... */
@@ -860,17 +837,12 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
 
 	/* IN/receive */
 	} else {
-		u16	csr;
+		u16 csr = 0;
 
 		if (hw_ep->rx_reinit) {
 			musb_rx_reinit(musb, qh, epnum);
+			csr |= musb->io.set_toggle(qh, is_out, urb);
 
-			/* init new state: toggle and NYET, maybe DMA later */
-			if (usb_gettoggle(urb->dev, qh->epnum, 0))
-				csr = MUSB_RXCSR_H_WR_DATATOGGLE
-					| MUSB_RXCSR_H_DATATOGGLE;
-			else
-				csr = 0;
 			if (qh->type == USB_ENDPOINT_XFER_INT)
 				csr |= MUSB_RXCSR_DISNYET;
 
@@ -933,6 +905,7 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
 	void __iomem		*epio = ep->regs;
 	struct musb_qh		*cur_qh, *next_qh;
 	u16			rx_csr, tx_csr;
+	u16			toggle;
 
 	musb_ep_select(mbase, ep->epnum);
 	if (is_in) {
@@ -970,7 +943,8 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
 			urb->actual_length += dma->actual_len;
 			dma->actual_len = 0L;
 		}
-		musb_save_toggle(cur_qh, is_in, urb);
+		toggle = musb->io.get_toggle(cur_qh, !is_in);
+		usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
 
 		if (is_in) {
 			/* move cur_qh to end of queue */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 8058a58..f17aabd 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -22,6 +22,8 @@
  * @read_fifo:	platform specific function to read fifo
  * @write_fifo:	platform specific function to write fifo
  * @busctl_offset: platform specific function to get busctl offset
+ * @get_toggle: platform specific function to get toggle
+ * @set_toggle: platform specific function to set toggle
  */
 struct musb_io {
 	u32	(*ep_offset)(u8 epnum, u16 offset);
@@ -30,14 +32,18 @@ struct musb_io {
 	void	(*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
 	void	(*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
 	u32	(*busctl_offset)(u8 epnum, u16 offset);
+	u16	(*get_toggle)(struct musb_qh *qh, int is_out);
+	u16	(*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
 };
 
 /* Do not add new entries here, add them the struct musb_io instead */
-extern u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
-extern void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
-extern u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
-extern void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
-extern u32 musb_readl(const void __iomem *addr, unsigned offset);
-extern void musb_writel(void __iomem *addr, unsigned offset, u32 data);
+extern u8 (*musb_readb)(void __iomem *addr, u32 offset);
+extern void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
+extern u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+extern u16 (*musb_readw)(void __iomem *addr, u32 offset);
+extern void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
+extern u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+extern u32 musb_readl(void __iomem *addr, u32 offset);
+extern void musb_writel(void __iomem *addr, u32 offset, u32 data);
 
 #endif
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index a97d618..b193daf 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -38,11 +38,12 @@ TRACE_EVENT(musb_log,
 );
 
 DECLARE_EVENT_CLASS(musb_regb,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+	TP_PROTO(void *caller, const void  __iomem *addr,
+		 unsigned int offset, u8 data),
 	TP_ARGS(caller, addr, offset, data),
 	TP_STRUCT__entry(
 		__field(void *, caller)
-		__field(const void *, addr)
+		__field(const void __iomem *, addr)
 		__field(unsigned int, offset)
 		__field(u8, data)
 	),
@@ -57,21 +58,24 @@ DECLARE_EVENT_CLASS(musb_regb,
 );
 
 DEFINE_EVENT(musb_regb, musb_readb,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+	TP_PROTO(void *caller, const void __iomem *addr,
+		 unsigned int offset, u8 data),
 	TP_ARGS(caller, addr, offset, data)
 );
 
 DEFINE_EVENT(musb_regb, musb_writeb,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+	TP_PROTO(void *caller, const void __iomem *addr,
+		 unsigned int offset, u8 data),
 	TP_ARGS(caller, addr, offset, data)
 );
 
 DECLARE_EVENT_CLASS(musb_regw,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+	TP_PROTO(void *caller, const void __iomem *addr,
+		 unsigned int offset, u16 data),
 	TP_ARGS(caller, addr, offset, data),
 	TP_STRUCT__entry(
 		__field(void *, caller)
-		__field(const void *, addr)
+		__field(const void __iomem *, addr)
 		__field(unsigned int, offset)
 		__field(u16, data)
 	),
@@ -86,21 +90,24 @@ DECLARE_EVENT_CLASS(musb_regw,
 );
 
 DEFINE_EVENT(musb_regw, musb_readw,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+	TP_PROTO(void *caller, const void __iomem *addr,
+		 unsigned int offset, u16 data),
 	TP_ARGS(caller, addr, offset, data)
 );
 
 DEFINE_EVENT(musb_regw, musb_writew,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+	TP_PROTO(void *caller, const void __iomem *addr,
+		 unsigned int offset, u16 data),
 	TP_ARGS(caller, addr, offset, data)
 );
 
 DECLARE_EVENT_CLASS(musb_regl,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+	TP_PROTO(void *caller, const void __iomem *addr,
+		 unsigned int offset, u32 data),
 	TP_ARGS(caller, addr, offset, data),
 	TP_STRUCT__entry(
 		__field(void *, caller)
-		__field(const void *, addr)
+		__field(const void __iomem *, addr)
 		__field(unsigned int, offset)
 		__field(u32, data)
 	),
@@ -115,12 +122,14 @@ DECLARE_EVENT_CLASS(musb_regl,
 );
 
 DEFINE_EVENT(musb_regl, musb_readl,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+	TP_PROTO(void *caller, const void __iomem *addr,
+		 unsigned int offset, u32 data),
 	TP_ARGS(caller, addr, offset, data)
 );
 
 DEFINE_EVENT(musb_regl, musb_writel,
-	TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+	TP_PROTO(void *caller, const void __iomem *addr,
+		 unsigned int offset, u32 data),
 	TP_ARGS(caller, addr, offset, data)
 );
 
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 2d3751d..0aacfc8 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -10,12 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include "musb_core.h"
-
-#define MUSB_HSDMA_BASE		0x200
-#define MUSB_HSDMA_INTR		(MUSB_HSDMA_BASE + 0)
-#define MUSB_HSDMA_CONTROL		0x4
-#define MUSB_HSDMA_ADDRESS		0x8
-#define MUSB_HSDMA_COUNT		0xc
+#include "musb_dma.h"
 
 #define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset)		\
 		(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
@@ -268,7 +263,7 @@ static int dma_channel_abort(struct dma_channel *channel)
 	return 0;
 }
 
-static irqreturn_t dma_controller_irq(int irq, void *private_data)
+irqreturn_t dma_controller_irq(int irq, void *private_data)
 {
 	struct musb_dma_controller *controller = private_data;
 	struct musb *musb = controller->private_data;
@@ -289,7 +284,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
 
 	spin_lock_irqsave(&musb->lock, flags);
 
-	int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+	int_hsdma = musb_clearb(mbase, MUSB_HSDMA_INTR);
 
 	if (!int_hsdma) {
 		musb_dbg(musb, "spurious DMA irq");
@@ -383,6 +378,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
 	spin_unlock_irqrestore(&musb->lock, flags);
 	return retval;
 }
+EXPORT_SYMBOL_GPL(dma_controller_irq);
 
 void musbhs_dma_controller_destroy(struct dma_controller *c)
 {
@@ -398,18 +394,10 @@ void musbhs_dma_controller_destroy(struct dma_controller *c)
 }
 EXPORT_SYMBOL_GPL(musbhs_dma_controller_destroy);
 
-struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
-						    void __iomem *base)
+static struct musb_dma_controller *
+dma_controller_alloc(struct musb *musb, void __iomem *base)
 {
 	struct musb_dma_controller *controller;
-	struct device *dev = musb->controller;
-	struct platform_device *pdev = to_platform_device(dev);
-	int irq = platform_get_irq_byname(pdev, "dma");
-
-	if (irq <= 0) {
-		dev_err(dev, "No DMA interrupt line!\n");
-		return NULL;
-	}
 
 	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
 	if (!controller)
@@ -423,6 +411,25 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
 	controller->controller.channel_release = dma_channel_release;
 	controller->controller.channel_program = dma_channel_program;
 	controller->controller.channel_abort = dma_channel_abort;
+	return controller;
+}
+
+struct dma_controller *
+musbhs_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+	struct musb_dma_controller *controller;
+	struct device *dev = musb->controller;
+	struct platform_device *pdev = to_platform_device(dev);
+	int irq = platform_get_irq_byname(pdev, "dma");
+
+	if (irq <= 0) {
+		dev_err(dev, "No DMA interrupt line!\n");
+		return NULL;
+	}
+
+	controller = dma_controller_alloc(musb, base);
+	if (!controller)
+		return NULL;
 
 	if (request_irq(irq, dma_controller_irq, 0,
 			dev_name(musb->controller), controller)) {
@@ -437,3 +444,16 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
 	return &controller->controller;
 }
 EXPORT_SYMBOL_GPL(musbhs_dma_controller_create);
+
+struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base)
+{
+	struct musb_dma_controller *controller;
+
+	controller = dma_controller_alloc(musb, base);
+	if (!controller)
+		return NULL;
+
+	return &controller->controller;
+}
+EXPORT_SYMBOL_GPL(musbhs_dma_controller_create_noirq);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3d2fef..d62c78b 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -38,69 +38,6 @@ struct omap2430_glue {
 
 static struct omap2430_glue	*_glue;
 
-static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
-{
-	struct usb_otg	*otg = musb->xceiv->otg;
-	u8		devctl;
-	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-	/* HDRC controls CPEN, but beware current surges during device
-	 * connect.  They can trigger transient overcurrent conditions
-	 * that must be ignored.
-	 */
-
-	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
-	if (is_on) {
-		if (musb->xceiv->otg->state == OTG_STATE_A_IDLE) {
-			int loops = 100;
-			/* start the session */
-			devctl |= MUSB_DEVCTL_SESSION;
-			musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-			/*
-			 * Wait for the musb to set as A device to enable the
-			 * VBUS
-			 */
-			while (musb_readb(musb->mregs, MUSB_DEVCTL) &
-			       MUSB_DEVCTL_BDEVICE) {
-
-				mdelay(5);
-				cpu_relax();
-
-				if (time_after(jiffies, timeout)
-				    || loops-- <= 0) {
-					dev_err(musb->controller,
-					"configured as A device timeout");
-					break;
-				}
-			}
-
-			otg_set_vbus(otg, 1);
-		} else {
-			musb->is_active = 1;
-			musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
-			devctl |= MUSB_DEVCTL_SESSION;
-			MUSB_HST_MODE(musb);
-		}
-	} else {
-		musb->is_active = 0;
-
-		/* NOTE:  we're skipping A_WAIT_VFALL -> A_IDLE and
-		 * jumping right to B_IDLE...
-		 */
-
-		musb->xceiv->otg->state = OTG_STATE_B_IDLE;
-		devctl &= ~MUSB_DEVCTL_SESSION;
-
-		MUSB_DEV_MODE(musb);
-	}
-	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
-	dev_dbg(musb->controller, "VBUS %s, devctl %02x "
-		/* otg %3x conf %08x prcm %08x */ "\n",
-		usb_otg_state_string(musb->xceiv->otg->state),
-		musb_readb(musb->mregs, MUSB_DEVCTL));
-}
-
 static inline void omap2430_low_level_exit(struct musb *musb)
 {
 	u32 l;
@@ -140,24 +77,52 @@ static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
 	return 0;
 }
 
+/*
+ * HDRC controls CPEN, but beware current surges during device connect.
+ * They can trigger transient overcurrent conditions that must be ignored.
+ *
+ * Note that we're skipping A_WAIT_VFALL -> A_IDLE and jumping right to B_IDLE
+ * as set by musb_set_peripheral().
+ */
 static void omap_musb_set_mailbox(struct omap2430_glue *glue)
 {
 	struct musb *musb = glue_to_musb(glue);
-	struct musb_hdrc_platform_data *pdata =
-		dev_get_platdata(musb->controller);
-	struct omap_musb_board_data *data = pdata->board_data;
+	int error;
 
 	pm_runtime_get_sync(musb->controller);
+
+	dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+		usb_otg_state_string(musb->xceiv->otg->state),
+		musb_readb(musb->mregs, MUSB_DEVCTL));
+
 	switch (glue->status) {
 	case MUSB_ID_GROUND:
 		dev_dbg(musb->controller, "ID GND\n");
-
-		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
-		musb->xceiv->last_event = USB_EVENT_ID;
-		if (musb->gadget_driver) {
-			omap_control_usb_set_mode(glue->control_otghs,
-				USB_MODE_HOST);
-			omap2430_musb_set_vbus(musb, 1);
+		switch (musb->xceiv->otg->state) {
+		case OTG_STATE_A_IDLE:
+			error = musb_set_host(musb);
+			if (error)
+				break;
+			musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+			/* Fall through */
+		case OTG_STATE_A_WAIT_VRISE:
+		case OTG_STATE_A_WAIT_BCON:
+		case OTG_STATE_A_HOST:
+			/*
+			 * On multiple ID ground interrupts just keep enabling
+			 * VBUS. At least cpcap VBUS shuts down otherwise.
+			 */
+			otg_set_vbus(musb->xceiv->otg, 1);
+			break;
+		default:
+			musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+			musb->xceiv->last_event = USB_EVENT_ID;
+			if (musb->gadget_driver) {
+				omap_control_usb_set_mode(glue->control_otghs,
+							  USB_MODE_HOST);
+				otg_set_vbus(musb->xceiv->otg, 1);
+			}
+			break;
 		}
 		break;
 
@@ -174,12 +139,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
 		dev_dbg(musb->controller, "VBUS Disconnect\n");
 
 		musb->xceiv->last_event = USB_EVENT_NONE;
-		if (musb->gadget_driver)
-			omap2430_musb_set_vbus(musb, 0);
-
-		if (data->interface_type == MUSB_INTERFACE_UTMI)
-			otg_set_vbus(musb->xceiv->otg, 0);
-
+		musb_set_peripheral(musb);
+		otg_set_vbus(musb->xceiv->otg, 0);
 		omap_control_usb_set_mode(glue->control_otghs,
 			USB_MODE_DISCONNECT);
 		break;
@@ -226,7 +187,6 @@ static int omap2430_musb_init(struct musb *musb)
 	u32 l;
 	int status = 0;
 	struct device *dev = musb->controller;
-	struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 	struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
 	struct omap_musb_board_data *data = plat->board_data;
 
@@ -282,50 +242,17 @@ static int omap2430_musb_init(struct musb *musb)
 			musb_readl(musb->mregs, OTG_INTERFSEL),
 			musb_readl(musb->mregs, OTG_SIMENABLE));
 
-	if (glue->status != MUSB_UNKNOWN)
-		omap_musb_set_mailbox(glue);
-
 	return 0;
 }
 
 static void omap2430_musb_enable(struct musb *musb)
 {
-	u8		devctl;
-	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 	struct device *dev = musb->controller;
 	struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
-	struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
-	struct omap_musb_board_data *data = pdata->board_data;
 
-
-	switch (glue->status) {
-
-	case MUSB_ID_GROUND:
-		omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST);
-		if (data->interface_type != MUSB_INTERFACE_UTMI)
-			break;
-		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-		/* start the session */
-		devctl |= MUSB_DEVCTL_SESSION;
-		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-		while (musb_readb(musb->mregs, MUSB_DEVCTL) &
-				MUSB_DEVCTL_BDEVICE) {
-			cpu_relax();
-
-			if (time_after(jiffies, timeout)) {
-				dev_err(dev, "configured as A device timeout");
-				break;
-			}
-		}
-		break;
-
-	case MUSB_VBUS_VALID:
-		omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
-		break;
-
-	default:
-		break;
-	}
+	if (glue->status == MUSB_UNKNOWN)
+		glue->status = MUSB_VBUS_OFF;
+	omap_musb_set_mailbox(glue);
 }
 
 static void omap2430_musb_disable(struct musb *musb)
@@ -361,8 +288,6 @@ static const struct musb_platform_ops omap2430_ops = {
 	.init		= omap2430_musb_init,
 	.exit		= omap2430_musb_exit,
 
-	.set_vbus	= omap2430_musb_set_vbus,
-
 	.enable		= omap2430_musb_enable,
 	.disable	= omap2430_musb_disable,
 
@@ -552,6 +477,9 @@ static int omap2430_runtime_resume(struct device *dev)
 	musb_writel(musb->mregs, OTG_INTERFSEL,
 		    musb->context.otg_interfsel);
 
+	/* Wait for musb to get oriented. Otherwise we can get babble */
+	usleep_range(200000, 250000);
+
 	return 0;
 }
 
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 832a41f..f3f76f2 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -407,7 +407,7 @@ static u32 sunxi_musb_busctl_offset(u8 epnum, u16 offset)
 	return SUNXI_MUSB_TXFUNCADDR + offset;
 }
 
-static u8 sunxi_musb_readb(const void __iomem *addr, unsigned offset)
+static u8 sunxi_musb_readb(void __iomem *addr, u32 offset)
 {
 	struct sunxi_glue *glue;
 
@@ -520,7 +520,7 @@ static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
 		(int)(addr - sunxi_musb->mregs));
 }
 
-static u16 sunxi_musb_readw(const void __iomem *addr, unsigned offset)
+static u16 sunxi_musb_readw(void __iomem *addr, u32 offset)
 {
 	if (addr == sunxi_musb->mregs) {
 		/* generic control or fifo control reg access */
@@ -781,6 +781,8 @@ static int sunxi_musb_probe(struct platform_device *pdev)
 	pinfo.name	 = "musb-hdrc";
 	pinfo.id	= PLATFORM_DEVID_AUTO;
 	pinfo.parent	= &pdev->dev;
+	pinfo.fwnode	= of_fwnode_handle(pdev->dev.of_node);
+	pinfo.of_node_reused = true;
 	pinfo.res	= pdev->resource;
 	pinfo.num_res	= pdev->num_resources;
 	pinfo.data	= &pdata;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 3945328..5d44908 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -142,7 +142,7 @@ static void tusb_ep_select(void __iomem *mbase, u8 epnum)
 /*
  * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
  */
-static u8 tusb_readb(const void __iomem *addr, unsigned offset)
+static u8 tusb_readb(void __iomem *addr, u32 offset)
 {
 	u16 tmp;
 	u8 val;
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d19bb3e..d5cf5e8 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -310,9 +310,9 @@ static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
 			dma_channel->max_len = SZ_16M;
 
 			ux500_channel->dma_chan =
-				dma_request_slave_channel(dev, chan_names[ch_num]);
+				dma_request_chan(dev, chan_names[ch_num]);
 
-			if (!ux500_channel->dma_chan)
+			if (IS_ERR(ux500_channel->dma_chan))
 				ux500_channel->dma_chan =
 					dma_request_channel(mask,
 							    data ?
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 24b4f09..ff24fca 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -162,7 +162,7 @@
 
 config USB_TEGRA_PHY
 	tristate "NVIDIA Tegra USB PHY Driver"
-	depends on ARCH_TEGRA
+	depends on ARCH_TEGRA || COMPILE_TEST
 	select USB_COMMON
 	select USB_PHY
 	select USB_ULPI
@@ -172,7 +172,7 @@
 
 config USB_ULPI
 	bool "Generic ULPI Transceiver Driver"
-	depends on ARM || ARM64
+	depends on ARM || ARM64 || COMPILE_TEST
 	select USB_ULPI_VIEWPORT
 	help
 	  Enable this to support ULPI connected USB OTG transceivers which
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 4bb4b1d..20c0f08 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -108,7 +108,8 @@ enum ab8500_usb_mode {
 	USB_IDLE = 0,
 	USB_PERIPHERAL,
 	USB_HOST,
-	USB_DEDICATED_CHG
+	USB_DEDICATED_CHG,
+	USB_UART
 };
 
 /* Register USB_LINK_STATUS interrupt */
@@ -393,6 +394,24 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
 		usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
 		break;
 
+	/*
+	 * FIXME: For now we rely on the boot firmware to set up the necessary
+	 * PHY/pin configuration for UART mode.
+	 *
+	 * AB8505 does not seem to report any status change for UART cables,
+	 * possibly because it cannot detect them autonomously.
+	 * We may need to measure the ID resistance manually to reliably
+	 * detect UART cables after bootup.
+	 */
+	case USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505:
+	case USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505:
+		if (ab->mode == USB_IDLE) {
+			ab->mode = USB_UART;
+			ab8500_usb_peri_phy_en(ab);
+		}
+
+		break;
+
 	default:
 		break;
 	}
@@ -566,6 +585,11 @@ static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
 		ab->vbus_draw = 0;
 	}
 
+	if (ab->mode == USB_UART) {
+		ab8500_usb_peri_phy_dis(ab);
+		ab->mode = USB_IDLE;
+	}
+
 	if (is_ab8500_2p0(ab->ab8500)) {
 		if (ab->mode == USB_DEDICATED_CHG) {
 			ab8500_usb_wd_linkstatus(ab,
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index f5f0568..8524475 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -57,7 +57,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
 
 	am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
 
-	ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
+	ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen);
 	if (ret)
 		return ret;
 
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index a53b89b..661a229 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -21,8 +21,7 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 
 #include "phy-generic.h"
@@ -204,8 +203,7 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
 	return 0;
 }
 
-int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
-		struct usb_phy_generic_platform_data *pdata)
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
 {
 	enum usb_phy_type type = USB_PHY_TYPE_USB2;
 	int err = 0;
@@ -221,28 +219,15 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
 
 		needs_vcc = of_property_read_bool(node, "vcc-supply");
 		needs_clk = of_property_read_bool(node, "clocks");
-		nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
-							   GPIOD_ASIS);
-		err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
-		if (!err) {
-			nop->gpiod_vbus = devm_gpiod_get_optional(dev,
-							 "vbus-detect",
-							 GPIOD_ASIS);
-			err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
-		}
-	} else if (pdata) {
-		type = pdata->type;
-		clk_rate = pdata->clk_rate;
-		needs_vcc = pdata->needs_vcc;
-		if (gpio_is_valid(pdata->gpio_reset)) {
-			err = devm_gpio_request_one(dev, pdata->gpio_reset,
-						    GPIOF_ACTIVE_LOW,
-						    dev_name(dev));
-			if (!err)
-				nop->gpiod_reset =
-					gpio_to_desc(pdata->gpio_reset);
-		}
-		nop->gpiod_vbus = pdata->gpiod_vbus;
+	}
+	nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
+						   GPIOD_ASIS);
+	err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
+	if (!err) {
+		nop->gpiod_vbus = devm_gpiod_get_optional(dev,
+						 "vbus-detect",
+						 GPIOD_ASIS);
+		err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
 	}
 
 	if (err == -EPROBE_DEFER)
@@ -308,7 +293,7 @@ static int usb_phy_generic_probe(struct platform_device *pdev)
 	if (!nop)
 		return -ENOMEM;
 
-	err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
+	err = usb_phy_gen_create_phy(dev, nop);
 	if (err)
 		return err;
 	if (nop->gpiod_vbus) {
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index 9728962..7ee8021 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -22,7 +22,6 @@ struct usb_phy_generic {
 int usb_gen_phy_init(struct usb_phy *phy);
 void usb_gen_phy_shutdown(struct usb_phy *phy);
 
-int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
-		struct usb_phy_generic_platform_data *pdata);
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop);
 
 #endif
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 553e257..f13f553 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -7,7 +7,7 @@
 
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
@@ -17,7 +17,6 @@
 #include <linux/regulator/consumer.h>
 
 #include <linux/usb/gadget.h>
-#include <linux/usb/gpio_vbus.h>
 #include <linux/usb/otg.h>
 
 
@@ -29,6 +28,8 @@
  * Needs to be loaded before the UDC driver that will use it.
  */
 struct gpio_vbus_data {
+	struct gpio_desc	*vbus_gpiod;
+	struct gpio_desc	*pullup_gpiod;
 	struct usb_phy		phy;
 	struct device          *dev;
 	struct regulator       *vbus_draw;
@@ -83,38 +84,30 @@ static void set_vbus_draw(struct gpio_vbus_data *gpio_vbus, unsigned mA)
 	gpio_vbus->mA = mA;
 }
 
-static int is_vbus_powered(struct gpio_vbus_mach_info *pdata)
+static int is_vbus_powered(struct gpio_vbus_data *gpio_vbus)
 {
-	int vbus;
-
-	vbus = gpio_get_value(pdata->gpio_vbus);
-	if (pdata->gpio_vbus_inverted)
-		vbus = !vbus;
-
-	return vbus;
+	return gpiod_get_value(gpio_vbus->vbus_gpiod);
 }
 
 static void gpio_vbus_work(struct work_struct *work)
 {
 	struct gpio_vbus_data *gpio_vbus =
 		container_of(work, struct gpio_vbus_data, work.work);
-	struct gpio_vbus_mach_info *pdata = dev_get_platdata(gpio_vbus->dev);
-	int gpio, status, vbus;
+	int status, vbus;
 
 	if (!gpio_vbus->phy.otg->gadget)
 		return;
 
-	vbus = is_vbus_powered(pdata);
+	vbus = is_vbus_powered(gpio_vbus);
 	if ((vbus ^ gpio_vbus->vbus) == 0)
 		return;
 	gpio_vbus->vbus = vbus;
 
 	/* Peripheral controllers which manage the pullup themselves won't have
-	 * gpio_pullup configured here.  If it's configured here, we'll do what
-	 * isp1301_omap::b_peripheral() does and enable the pullup here... although
-	 * that may complicate usb_gadget_{,dis}connect() support.
+	 * a pullup GPIO configured here.  If it's configured here, we'll do
+	 * what isp1301_omap::b_peripheral() does and enable the pullup here...
+	 * although that may complicate usb_gadget_{,dis}connect() support.
 	 */
-	gpio = pdata->gpio_pullup;
 
 	if (vbus) {
 		status = USB_EVENT_VBUS;
@@ -126,16 +119,16 @@ static void gpio_vbus_work(struct work_struct *work)
 		set_vbus_draw(gpio_vbus, 100);
 
 		/* optionally enable D+ pullup */
-		if (gpio_is_valid(gpio))
-			gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
+		if (gpio_vbus->pullup_gpiod)
+			gpiod_set_value(gpio_vbus->pullup_gpiod, 1);
 
 		atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
 					   status, gpio_vbus->phy.otg->gadget);
 		usb_phy_set_event(&gpio_vbus->phy, USB_EVENT_ENUMERATED);
 	} else {
 		/* optionally disable D+ pullup */
-		if (gpio_is_valid(gpio))
-			gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+		if (gpio_vbus->pullup_gpiod)
+			gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
 
 		set_vbus_draw(gpio_vbus, 0);
 
@@ -154,12 +147,11 @@ static void gpio_vbus_work(struct work_struct *work)
 static irqreturn_t gpio_vbus_irq(int irq, void *data)
 {
 	struct platform_device *pdev = data;
-	struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
 	struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
 	struct usb_otg *otg = gpio_vbus->phy.otg;
 
 	dev_dbg(&pdev->dev, "VBUS %s (gadget: %s)\n",
-		is_vbus_powered(pdata) ? "supplied" : "inactive",
+		is_vbus_powered(gpio_vbus) ? "supplied" : "inactive",
 		otg->gadget ? otg->gadget->name : "none");
 
 	if (otg->gadget)
@@ -175,22 +167,18 @@ static int gpio_vbus_set_peripheral(struct usb_otg *otg,
 					struct usb_gadget *gadget)
 {
 	struct gpio_vbus_data *gpio_vbus;
-	struct gpio_vbus_mach_info *pdata;
 	struct platform_device *pdev;
-	int gpio;
 
 	gpio_vbus = container_of(otg->usb_phy, struct gpio_vbus_data, phy);
 	pdev = to_platform_device(gpio_vbus->dev);
-	pdata = dev_get_platdata(gpio_vbus->dev);
-	gpio = pdata->gpio_pullup;
 
 	if (!gadget) {
 		dev_dbg(&pdev->dev, "unregistering gadget '%s'\n",
 			otg->gadget->name);
 
 		/* optionally disable D+ pullup */
-		if (gpio_is_valid(gpio))
-			gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+		if (gpio_vbus->pullup_gpiod)
+			gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
 
 		set_vbus_draw(gpio_vbus, 0);
 
@@ -242,16 +230,12 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
 
 static int gpio_vbus_probe(struct platform_device *pdev)
 {
-	struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
 	struct gpio_vbus_data *gpio_vbus;
 	struct resource *res;
-	int err, gpio, irq;
+	struct device *dev = &pdev->dev;
+	int err, irq;
 	unsigned long irqflags;
 
-	if (!pdata || !gpio_is_valid(pdata->gpio_vbus))
-		return -EINVAL;
-	gpio = pdata->gpio_vbus;
-
 	gpio_vbus = devm_kzalloc(&pdev->dev, sizeof(struct gpio_vbus_data),
 				 GFP_KERNEL);
 	if (!gpio_vbus)
@@ -273,37 +257,43 @@ static int gpio_vbus_probe(struct platform_device *pdev)
 	gpio_vbus->phy.otg->usb_phy = &gpio_vbus->phy;
 	gpio_vbus->phy.otg->set_peripheral = gpio_vbus_set_peripheral;
 
-	err = devm_gpio_request(&pdev->dev, gpio, "vbus_detect");
-	if (err) {
-		dev_err(&pdev->dev, "can't request vbus gpio %d, err: %d\n",
-			gpio, err);
+	/* Look up the VBUS sensing GPIO */
+	gpio_vbus->vbus_gpiod = devm_gpiod_get(dev, "vbus", GPIOD_IN);
+	if (IS_ERR(gpio_vbus->vbus_gpiod)) {
+		err = PTR_ERR(gpio_vbus->vbus_gpiod);
+		dev_err(&pdev->dev, "can't request vbus gpio, err: %d\n", err);
 		return err;
 	}
-	gpio_direction_input(gpio);
+	gpiod_set_consumer_name(gpio_vbus->vbus_gpiod, "vbus_detect");
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (res) {
 		irq = res->start;
 		irqflags = (res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
 	} else {
-		irq = gpio_to_irq(gpio);
+		irq = gpiod_to_irq(gpio_vbus->vbus_gpiod);
 		irqflags = VBUS_IRQ_FLAGS;
 	}
 
 	gpio_vbus->irq = irq;
 
-	/* if data line pullup is in use, initialize it to "not pulling up" */
-	gpio = pdata->gpio_pullup;
-	if (gpio_is_valid(gpio)) {
-		err = devm_gpio_request(&pdev->dev, gpio, "udc_pullup");
-		if (err) {
-			dev_err(&pdev->dev,
-				"can't request pullup gpio %d, err: %d\n",
-				gpio, err);
-			return err;
-		}
-		gpio_direction_output(gpio, pdata->gpio_pullup_inverted);
+	/*
+	 * The VBUS sensing GPIO should have a pulldown, which will normally be
+	 * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
+	 * value the GPIO detects as active. Some systems will use comparators.
+	 * Get the optional D+ or D- pullup GPIO. If the data line pullup is
+	 * in use, initialize it to "not pulling up"
+	 */
+	gpio_vbus->pullup_gpiod = devm_gpiod_get_optional(dev, "pullup",
+							  GPIOD_OUT_LOW);
+	if (IS_ERR(gpio_vbus->pullup_gpiod)) {
+		err = PTR_ERR(gpio_vbus->pullup_gpiod);
+		dev_err(&pdev->dev, "can't request pullup gpio, err: %d\n",
+			err);
+		return err;
 	}
+	if (gpio_vbus->pullup_gpiod)
+		gpiod_set_consumer_name(gpio_vbus->pullup_gpiod, "udc_pullup");
 
 	err = devm_request_irq(&pdev->dev, irq, gpio_vbus_irq, irqflags,
 			       "vbus_detect", pdev);
@@ -330,7 +320,7 @@ static int gpio_vbus_probe(struct platform_device *pdev)
 		return err;
 	}
 
-	device_init_wakeup(&pdev->dev, pdata->wakeup);
+	/* TODO: wakeup could be enabled here with device_init_wakeup(dev, 1) */
 
 	return 0;
 }
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 110e6e9..9c226b57 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -76,7 +76,7 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
 	if (IS_ERR(k_phy->phy_ctrl))
 		return PTR_ERR(k_phy->phy_ctrl);
 
-	ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen, NULL);
+	ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen);
 	if (ret)
 		return ret;
 
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index ea7ef1d..037e8ee 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -9,54 +9,56 @@
  *	Venu Byravarasu <vbyravarasu@nvidia.com>
  */
 
-#include <linux/resource.h>
 #include <linux/delay.h>
-#include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/export.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/iopoll.h>
 #include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/ulpi.h>
-#include <linux/usb/of.h>
-#include <linux/usb/ehci_def.h>
-#include <linux/usb/tegra_usb_phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
 #include <linux/regulator/consumer.h>
 
-#define ULPI_VIEWPORT		0x170
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/of.h>
+#include <linux/usb/tegra_usb_phy.h>
+#include <linux/usb/ulpi.h>
+
+#define ULPI_VIEWPORT				0x170
 
 /* PORTSC PTS/PHCD bits, Tegra20 only */
-#define TEGRA_USB_PORTSC1				0x184
-#define TEGRA_USB_PORTSC1_PTS(x)			(((x) & 0x3) << 30)
-#define TEGRA_USB_PORTSC1_PHCD				(1 << 23)
+#define TEGRA_USB_PORTSC1			0x184
+#define TEGRA_USB_PORTSC1_PTS(x)		(((x) & 0x3) << 30)
+#define TEGRA_USB_PORTSC1_PHCD			BIT(23)
 
 /* HOSTPC1 PTS/PHCD bits, Tegra30 and above */
-#define TEGRA_USB_HOSTPC1_DEVLC		0x1b4
-#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x)	(((x) & 0x7) << 29)
-#define TEGRA_USB_HOSTPC1_DEVLC_PHCD	(1 << 22)
+#define TEGRA_USB_HOSTPC1_DEVLC			0x1b4
+#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x)		(((x) & 0x7) << 29)
+#define TEGRA_USB_HOSTPC1_DEVLC_PHCD		BIT(22)
 
 /* Bits of PORTSC1, which will get cleared by writing 1 into them */
 #define TEGRA_PORTSC1_RWC_BITS	(PORT_CSC | PORT_PEC | PORT_OCC)
 
-#define USB_SUSP_CTRL		0x400
-#define   USB_WAKE_ON_CNNT_EN_DEV	(1 << 3)
-#define   USB_WAKE_ON_DISCON_EN_DEV	(1 << 4)
-#define   USB_SUSP_CLR		(1 << 5)
-#define   USB_PHY_CLK_VALID	(1 << 7)
-#define   UTMIP_RESET			(1 << 11)
-#define   UHSIC_RESET			(1 << 11)
-#define   UTMIP_PHY_ENABLE		(1 << 12)
-#define   ULPI_PHY_ENABLE	(1 << 13)
-#define   USB_SUSP_SET		(1 << 14)
-#define   USB_WAKEUP_DEBOUNCE_COUNT(x)	(((x) & 0x7) << 16)
+#define USB_SUSP_CTRL				0x400
+#define   USB_WAKE_ON_CNNT_EN_DEV		BIT(3)
+#define   USB_WAKE_ON_DISCON_EN_DEV		BIT(4)
+#define   USB_SUSP_CLR				BIT(5)
+#define   USB_PHY_CLK_VALID			BIT(7)
+#define   UTMIP_RESET				BIT(11)
+#define   UHSIC_RESET				BIT(11)
+#define   UTMIP_PHY_ENABLE			BIT(12)
+#define   ULPI_PHY_ENABLE			BIT(13)
+#define   USB_SUSP_SET				BIT(14)
+#define   USB_WAKEUP_DEBOUNCE_COUNT(x)		(((x) & 0x7) << 16)
 
-#define USB1_LEGACY_CTRL	0x410
-#define   USB1_NO_LEGACY_MODE			(1 << 0)
+#define USB1_LEGACY_CTRL			0x410
+#define   USB1_NO_LEGACY_MODE			BIT(0)
 #define   USB1_VBUS_SENSE_CTL_MASK		(3 << 1)
 #define   USB1_VBUS_SENSE_CTL_VBUS_WAKEUP	(0 << 1)
 #define   USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \
@@ -64,94 +66,94 @@
 #define   USB1_VBUS_SENSE_CTL_AB_SESS_VLD	(2 << 1)
 #define   USB1_VBUS_SENSE_CTL_A_SESS_VLD	(3 << 1)
 
-#define ULPI_TIMING_CTRL_0	0x424
-#define   ULPI_OUTPUT_PINMUX_BYP	(1 << 10)
-#define   ULPI_CLKOUT_PINMUX_BYP	(1 << 11)
+#define ULPI_TIMING_CTRL_0			0x424
+#define   ULPI_OUTPUT_PINMUX_BYP		BIT(10)
+#define   ULPI_CLKOUT_PINMUX_BYP		BIT(11)
 
-#define ULPI_TIMING_CTRL_1	0x428
-#define   ULPI_DATA_TRIMMER_LOAD	(1 << 0)
-#define   ULPI_DATA_TRIMMER_SEL(x)	(((x) & 0x7) << 1)
-#define   ULPI_STPDIRNXT_TRIMMER_LOAD	(1 << 16)
-#define   ULPI_STPDIRNXT_TRIMMER_SEL(x)	(((x) & 0x7) << 17)
-#define   ULPI_DIR_TRIMMER_LOAD		(1 << 24)
-#define   ULPI_DIR_TRIMMER_SEL(x)	(((x) & 0x7) << 25)
+#define ULPI_TIMING_CTRL_1			0x428
+#define   ULPI_DATA_TRIMMER_LOAD		BIT(0)
+#define   ULPI_DATA_TRIMMER_SEL(x)		(((x) & 0x7) << 1)
+#define   ULPI_STPDIRNXT_TRIMMER_LOAD		BIT(16)
+#define   ULPI_STPDIRNXT_TRIMMER_SEL(x)		(((x) & 0x7) << 17)
+#define   ULPI_DIR_TRIMMER_LOAD			BIT(24)
+#define   ULPI_DIR_TRIMMER_SEL(x)		(((x) & 0x7) << 25)
 
-#define UTMIP_PLL_CFG1		0x804
+#define UTMIP_PLL_CFG1				0x804
 #define   UTMIP_XTAL_FREQ_COUNT(x)		(((x) & 0xfff) << 0)
 #define   UTMIP_PLLU_ENABLE_DLY_COUNT(x)	(((x) & 0x1f) << 27)
 
-#define UTMIP_XCVR_CFG0		0x808
+#define UTMIP_XCVR_CFG0				0x808
 #define   UTMIP_XCVR_SETUP(x)			(((x) & 0xf) << 0)
 #define   UTMIP_XCVR_SETUP_MSB(x)		((((x) & 0x70) >> 4) << 22)
 #define   UTMIP_XCVR_LSRSLEW(x)			(((x) & 0x3) << 8)
 #define   UTMIP_XCVR_LSFSLEW(x)			(((x) & 0x3) << 10)
-#define   UTMIP_FORCE_PD_POWERDOWN		(1 << 14)
-#define   UTMIP_FORCE_PD2_POWERDOWN		(1 << 16)
-#define   UTMIP_FORCE_PDZI_POWERDOWN		(1 << 18)
-#define   UTMIP_XCVR_LSBIAS_SEL			(1 << 21)
+#define   UTMIP_FORCE_PD_POWERDOWN		BIT(14)
+#define   UTMIP_FORCE_PD2_POWERDOWN		BIT(16)
+#define   UTMIP_FORCE_PDZI_POWERDOWN		BIT(18)
+#define   UTMIP_XCVR_LSBIAS_SEL			BIT(21)
 #define   UTMIP_XCVR_HSSLEW(x)			(((x) & 0x3) << 4)
 #define   UTMIP_XCVR_HSSLEW_MSB(x)		((((x) & 0x1fc) >> 2) << 25)
 
-#define UTMIP_BIAS_CFG0		0x80c
-#define   UTMIP_OTGPD			(1 << 11)
-#define   UTMIP_BIASPD			(1 << 10)
-#define   UTMIP_HSSQUELCH_LEVEL(x)	(((x) & 0x3) << 0)
-#define   UTMIP_HSDISCON_LEVEL(x)	(((x) & 0x3) << 2)
-#define   UTMIP_HSDISCON_LEVEL_MSB(x)	((((x) & 0x4) >> 2) << 24)
+#define UTMIP_BIAS_CFG0				0x80c
+#define   UTMIP_OTGPD				BIT(11)
+#define   UTMIP_BIASPD				BIT(10)
+#define   UTMIP_HSSQUELCH_LEVEL(x)		(((x) & 0x3) << 0)
+#define   UTMIP_HSDISCON_LEVEL(x)		(((x) & 0x3) << 2)
+#define   UTMIP_HSDISCON_LEVEL_MSB(x)		((((x) & 0x4) >> 2) << 24)
 
-#define UTMIP_HSRX_CFG0		0x810
-#define   UTMIP_ELASTIC_LIMIT(x)	(((x) & 0x1f) << 10)
-#define   UTMIP_IDLE_WAIT(x)		(((x) & 0x1f) << 15)
+#define UTMIP_HSRX_CFG0				0x810
+#define   UTMIP_ELASTIC_LIMIT(x)		(((x) & 0x1f) << 10)
+#define   UTMIP_IDLE_WAIT(x)			(((x) & 0x1f) << 15)
 
-#define UTMIP_HSRX_CFG1		0x814
-#define   UTMIP_HS_SYNC_START_DLY(x)	(((x) & 0x1f) << 1)
+#define UTMIP_HSRX_CFG1				0x814
+#define   UTMIP_HS_SYNC_START_DLY(x)		(((x) & 0x1f) << 1)
 
-#define UTMIP_TX_CFG0		0x820
-#define   UTMIP_FS_PREABMLE_J		(1 << 19)
-#define   UTMIP_HS_DISCON_DISABLE	(1 << 8)
+#define UTMIP_TX_CFG0				0x820
+#define   UTMIP_FS_PREABMLE_J			BIT(19)
+#define   UTMIP_HS_DISCON_DISABLE		BIT(8)
 
-#define UTMIP_MISC_CFG0		0x824
-#define   UTMIP_DPDM_OBSERVE		(1 << 26)
-#define   UTMIP_DPDM_OBSERVE_SEL(x)	(((x) & 0xf) << 27)
-#define   UTMIP_DPDM_OBSERVE_SEL_FS_J	UTMIP_DPDM_OBSERVE_SEL(0xf)
-#define   UTMIP_DPDM_OBSERVE_SEL_FS_K	UTMIP_DPDM_OBSERVE_SEL(0xe)
-#define   UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
-#define   UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
-#define   UTMIP_SUSPEND_EXIT_ON_EDGE	(1 << 22)
+#define UTMIP_MISC_CFG0				0x824
+#define   UTMIP_DPDM_OBSERVE			BIT(26)
+#define   UTMIP_DPDM_OBSERVE_SEL(x)		(((x) & 0xf) << 27)
+#define   UTMIP_DPDM_OBSERVE_SEL_FS_J		UTMIP_DPDM_OBSERVE_SEL(0xf)
+#define   UTMIP_DPDM_OBSERVE_SEL_FS_K		UTMIP_DPDM_OBSERVE_SEL(0xe)
+#define   UTMIP_DPDM_OBSERVE_SEL_FS_SE1		UTMIP_DPDM_OBSERVE_SEL(0xd)
+#define   UTMIP_DPDM_OBSERVE_SEL_FS_SE0		UTMIP_DPDM_OBSERVE_SEL(0xc)
+#define   UTMIP_SUSPEND_EXIT_ON_EDGE		BIT(22)
 
-#define UTMIP_MISC_CFG1		0x828
-#define   UTMIP_PLL_ACTIVE_DLY_COUNT(x)	(((x) & 0x1f) << 18)
-#define   UTMIP_PLLU_STABLE_COUNT(x)	(((x) & 0xfff) << 6)
+#define UTMIP_MISC_CFG1				0x828
+#define   UTMIP_PLL_ACTIVE_DLY_COUNT(x)		(((x) & 0x1f) << 18)
+#define   UTMIP_PLLU_STABLE_COUNT(x)		(((x) & 0xfff) << 6)
 
-#define UTMIP_DEBOUNCE_CFG0	0x82c
-#define   UTMIP_BIAS_DEBOUNCE_A(x)	(((x) & 0xffff) << 0)
+#define UTMIP_DEBOUNCE_CFG0			0x82c
+#define   UTMIP_BIAS_DEBOUNCE_A(x)		(((x) & 0xffff) << 0)
 
-#define UTMIP_BAT_CHRG_CFG0	0x830
-#define   UTMIP_PD_CHRG			(1 << 0)
+#define UTMIP_BAT_CHRG_CFG0			0x830
+#define   UTMIP_PD_CHRG				BIT(0)
 
-#define UTMIP_SPARE_CFG0	0x834
-#define   FUSE_SETUP_SEL		(1 << 3)
+#define UTMIP_SPARE_CFG0			0x834
+#define   FUSE_SETUP_SEL			BIT(3)
 
-#define UTMIP_XCVR_CFG1		0x838
-#define   UTMIP_FORCE_PDDISC_POWERDOWN	(1 << 0)
-#define   UTMIP_FORCE_PDCHRP_POWERDOWN	(1 << 2)
-#define   UTMIP_FORCE_PDDR_POWERDOWN	(1 << 4)
-#define   UTMIP_XCVR_TERM_RANGE_ADJ(x)	(((x) & 0xf) << 18)
+#define UTMIP_XCVR_CFG1				0x838
+#define   UTMIP_FORCE_PDDISC_POWERDOWN		BIT(0)
+#define   UTMIP_FORCE_PDCHRP_POWERDOWN		BIT(2)
+#define   UTMIP_FORCE_PDDR_POWERDOWN		BIT(4)
+#define   UTMIP_XCVR_TERM_RANGE_ADJ(x)		(((x) & 0xf) << 18)
 
-#define UTMIP_BIAS_CFG1		0x83c
-#define   UTMIP_BIAS_PDTRK_COUNT(x)	(((x) & 0x1f) << 3)
+#define UTMIP_BIAS_CFG1				0x83c
+#define   UTMIP_BIAS_PDTRK_COUNT(x)		(((x) & 0x1f) << 3)
 
 /* For Tegra30 and above only, the address is different in Tegra20 */
-#define USB_USBMODE		0x1f8
-#define   USB_USBMODE_MASK		(3 << 0)
-#define   USB_USBMODE_HOST		(3 << 0)
-#define   USB_USBMODE_DEVICE		(2 << 0)
+#define USB_USBMODE				0x1f8
+#define   USB_USBMODE_MASK			(3 << 0)
+#define   USB_USBMODE_HOST			(3 << 0)
+#define   USB_USBMODE_DEVICE			(2 << 0)
 
 static DEFINE_SPINLOCK(utmip_pad_lock);
-static int utmip_pad_count;
+static unsigned int utmip_pad_count;
 
 struct tegra_xtal_freq {
-	int freq;
+	unsigned int freq;
 	u8 enable_delay;
 	u8 stable_count;
 	u8 active_delay;
@@ -194,43 +196,49 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
 	},
 };
 
+static inline struct tegra_usb_phy *to_tegra_usb_phy(struct usb_phy *u_phy)
+{
+	return container_of(u_phy, struct tegra_usb_phy, u_phy);
+}
+
 static void set_pts(struct tegra_usb_phy *phy, u8 pts_val)
 {
 	void __iomem *base = phy->regs;
-	unsigned long val;
+	u32 val;
 
 	if (phy->soc_config->has_hostpc) {
-		val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+		val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
 		val &= ~TEGRA_USB_HOSTPC1_DEVLC_PTS(~0);
 		val |= TEGRA_USB_HOSTPC1_DEVLC_PTS(pts_val);
-		writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+		writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
 	} else {
-		val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+		val = readl_relaxed(base + TEGRA_USB_PORTSC1);
+		val &= ~TEGRA_PORTSC1_RWC_BITS;
 		val &= ~TEGRA_USB_PORTSC1_PTS(~0);
 		val |= TEGRA_USB_PORTSC1_PTS(pts_val);
-		writel(val, base + TEGRA_USB_PORTSC1);
+		writel_relaxed(val, base + TEGRA_USB_PORTSC1);
 	}
 }
 
 static void set_phcd(struct tegra_usb_phy *phy, bool enable)
 {
 	void __iomem *base = phy->regs;
-	unsigned long val;
+	u32 val;
 
 	if (phy->soc_config->has_hostpc) {
-		val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+		val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
 		if (enable)
 			val |= TEGRA_USB_HOSTPC1_DEVLC_PHCD;
 		else
 			val &= ~TEGRA_USB_HOSTPC1_DEVLC_PHCD;
-		writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+		writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
 	} else {
-		val = readl(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
+		val = readl_relaxed(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
 		if (enable)
 			val |= TEGRA_USB_PORTSC1_PHCD;
 		else
 			val &= ~TEGRA_USB_PORTSC1_PHCD;
-		writel(val, base + TEGRA_USB_PORTSC1);
+		writel_relaxed(val, base + TEGRA_USB_PORTSC1);
 	}
 }
 
@@ -238,23 +246,6 @@ static int utmip_pad_open(struct tegra_usb_phy *phy)
 {
 	int ret;
 
-	phy->pad_clk = devm_clk_get(phy->u_phy.dev, "utmi-pads");
-	if (IS_ERR(phy->pad_clk)) {
-		ret = PTR_ERR(phy->pad_clk);
-		dev_err(phy->u_phy.dev,
-			"Failed to get UTMIP pad clock: %d\n", ret);
-		return ret;
-	}
-
-	phy->pad_rst = devm_reset_control_get_optional_shared(
-						phy->u_phy.dev, "utmi-pads");
-	if (IS_ERR(phy->pad_rst)) {
-		ret = PTR_ERR(phy->pad_rst);
-		dev_err(phy->u_phy.dev,
-			"Failed to get UTMI-pads reset: %d\n", ret);
-		return ret;
-	}
-
 	ret = clk_prepare_enable(phy->pad_clk);
 	if (ret) {
 		dev_err(phy->u_phy.dev,
@@ -315,18 +306,21 @@ static int utmip_pad_close(struct tegra_usb_phy *phy)
 	return ret;
 }
 
-static void utmip_pad_power_on(struct tegra_usb_phy *phy)
+static int utmip_pad_power_on(struct tegra_usb_phy *phy)
 {
-	unsigned long val, flags;
-	void __iomem *base = phy->pad_regs;
 	struct tegra_utmip_config *config = phy->config;
+	void __iomem *base = phy->pad_regs;
+	u32 val;
+	int err;
 
-	clk_prepare_enable(phy->pad_clk);
+	err = clk_prepare_enable(phy->pad_clk);
+	if (err)
+		return err;
 
-	spin_lock_irqsave(&utmip_pad_lock, flags);
+	spin_lock(&utmip_pad_lock);
 
 	if (utmip_pad_count++ == 0) {
-		val = readl(base + UTMIP_BIAS_CFG0);
+		val = readl_relaxed(base + UTMIP_BIAS_CFG0);
 		val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
 
 		if (phy->soc_config->requires_extra_tuning_parameters) {
@@ -338,53 +332,59 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
 			val |= UTMIP_HSDISCON_LEVEL(config->hsdiscon_level);
 			val |= UTMIP_HSDISCON_LEVEL_MSB(config->hsdiscon_level);
 		}
-		writel(val, base + UTMIP_BIAS_CFG0);
+		writel_relaxed(val, base + UTMIP_BIAS_CFG0);
 	}
 
-	spin_unlock_irqrestore(&utmip_pad_lock, flags);
-
-	clk_disable_unprepare(phy->pad_clk);
-}
-
-static int utmip_pad_power_off(struct tegra_usb_phy *phy)
-{
-	unsigned long val, flags;
-	void __iomem *base = phy->pad_regs;
-
-	if (!utmip_pad_count) {
-		dev_err(phy->u_phy.dev, "UTMIP pad already powered off\n");
-		return -EINVAL;
-	}
-
-	clk_prepare_enable(phy->pad_clk);
-
-	spin_lock_irqsave(&utmip_pad_lock, flags);
-
-	if (--utmip_pad_count == 0) {
-		val = readl(base + UTMIP_BIAS_CFG0);
-		val |= UTMIP_OTGPD | UTMIP_BIASPD;
-		writel(val, base + UTMIP_BIAS_CFG0);
-	}
-
-	spin_unlock_irqrestore(&utmip_pad_lock, flags);
+	spin_unlock(&utmip_pad_lock);
 
 	clk_disable_unprepare(phy->pad_clk);
 
 	return 0;
 }
 
+static int utmip_pad_power_off(struct tegra_usb_phy *phy)
+{
+	void __iomem *base = phy->pad_regs;
+	u32 val;
+	int ret;
+
+	ret = clk_prepare_enable(phy->pad_clk);
+	if (ret)
+		return ret;
+
+	spin_lock(&utmip_pad_lock);
+
+	if (!utmip_pad_count) {
+		dev_err(phy->u_phy.dev, "UTMIP pad already powered off\n");
+		ret = -EINVAL;
+		goto ulock;
+	}
+
+	if (--utmip_pad_count == 0) {
+		val = readl_relaxed(base + UTMIP_BIAS_CFG0);
+		val |= UTMIP_OTGPD | UTMIP_BIASPD;
+		writel_relaxed(val, base + UTMIP_BIAS_CFG0);
+	}
+ulock:
+	spin_unlock(&utmip_pad_lock);
+
+	clk_disable_unprepare(phy->pad_clk);
+
+	return ret;
+}
+
 static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
 {
 	u32 tmp;
 
-	return readl_poll_timeout(reg, tmp, (tmp & mask) == result,
-				  2000, 6000);
+	return readl_relaxed_poll_timeout(reg, tmp, (tmp & mask) == result,
+					  2000, 6000);
 }
 
 static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
 {
-	unsigned long val;
 	void __iomem *base = phy->regs;
+	u32 val;
 
 	/*
 	 * The USB driver may have already initiated the phy clock
@@ -395,27 +395,28 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
 		return;
 
 	if (phy->is_legacy_phy) {
-		val = readl(base + USB_SUSP_CTRL);
+		val = readl_relaxed(base + USB_SUSP_CTRL);
 		val |= USB_SUSP_SET;
-		writel(val, base + USB_SUSP_CTRL);
+		writel_relaxed(val, base + USB_SUSP_CTRL);
 
-		udelay(10);
+		usleep_range(10, 100);
 
-		val = readl(base + USB_SUSP_CTRL);
+		val = readl_relaxed(base + USB_SUSP_CTRL);
 		val &= ~USB_SUSP_SET;
-		writel(val, base + USB_SUSP_CTRL);
-	} else
+		writel_relaxed(val, base + USB_SUSP_CTRL);
+	} else {
 		set_phcd(phy, true);
+	}
 
-	if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
+	if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0))
 		dev_err(phy->u_phy.dev,
 			"Timeout waiting for PHY to stabilize on disable\n");
 }
 
 static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
 {
-	unsigned long val;
 	void __iomem *base = phy->regs;
+	u32 val;
 
 	/*
 	 * The USB driver may have already initiated the phy clock
@@ -427,97 +428,101 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
 		return;
 
 	if (phy->is_legacy_phy) {
-		val = readl(base + USB_SUSP_CTRL);
+		val = readl_relaxed(base + USB_SUSP_CTRL);
 		val |= USB_SUSP_CLR;
-		writel(val, base + USB_SUSP_CTRL);
+		writel_relaxed(val, base + USB_SUSP_CTRL);
 
-		udelay(10);
+		usleep_range(10, 100);
 
-		val = readl(base + USB_SUSP_CTRL);
+		val = readl_relaxed(base + USB_SUSP_CTRL);
 		val &= ~USB_SUSP_CLR;
-		writel(val, base + USB_SUSP_CTRL);
-	} else
+		writel_relaxed(val, base + USB_SUSP_CTRL);
+	} else {
 		set_phcd(phy, false);
+	}
 
 	if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
-						     USB_PHY_CLK_VALID))
+			       USB_PHY_CLK_VALID))
 		dev_err(phy->u_phy.dev,
 			"Timeout waiting for PHY to stabilize on enable\n");
 }
 
 static int utmi_phy_power_on(struct tegra_usb_phy *phy)
 {
-	unsigned long val;
-	void __iomem *base = phy->regs;
 	struct tegra_utmip_config *config = phy->config;
+	void __iomem *base = phy->regs;
+	u32 val;
+	int err;
 
-	val = readl(base + USB_SUSP_CTRL);
+	val = readl_relaxed(base + USB_SUSP_CTRL);
 	val |= UTMIP_RESET;
-	writel(val, base + USB_SUSP_CTRL);
+	writel_relaxed(val, base + USB_SUSP_CTRL);
 
 	if (phy->is_legacy_phy) {
-		val = readl(base + USB1_LEGACY_CTRL);
+		val = readl_relaxed(base + USB1_LEGACY_CTRL);
 		val |= USB1_NO_LEGACY_MODE;
-		writel(val, base + USB1_LEGACY_CTRL);
+		writel_relaxed(val, base + USB1_LEGACY_CTRL);
 	}
 
-	val = readl(base + UTMIP_TX_CFG0);
+	val = readl_relaxed(base + UTMIP_TX_CFG0);
 	val |= UTMIP_FS_PREABMLE_J;
-	writel(val, base + UTMIP_TX_CFG0);
+	writel_relaxed(val, base + UTMIP_TX_CFG0);
 
-	val = readl(base + UTMIP_HSRX_CFG0);
+	val = readl_relaxed(base + UTMIP_HSRX_CFG0);
 	val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0));
 	val |= UTMIP_IDLE_WAIT(config->idle_wait_delay);
 	val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit);
-	writel(val, base + UTMIP_HSRX_CFG0);
+	writel_relaxed(val, base + UTMIP_HSRX_CFG0);
 
-	val = readl(base + UTMIP_HSRX_CFG1);
+	val = readl_relaxed(base + UTMIP_HSRX_CFG1);
 	val &= ~UTMIP_HS_SYNC_START_DLY(~0);
 	val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay);
-	writel(val, base + UTMIP_HSRX_CFG1);
+	writel_relaxed(val, base + UTMIP_HSRX_CFG1);
 
-	val = readl(base + UTMIP_DEBOUNCE_CFG0);
+	val = readl_relaxed(base + UTMIP_DEBOUNCE_CFG0);
 	val &= ~UTMIP_BIAS_DEBOUNCE_A(~0);
 	val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce);
-	writel(val, base + UTMIP_DEBOUNCE_CFG0);
+	writel_relaxed(val, base + UTMIP_DEBOUNCE_CFG0);
 
-	val = readl(base + UTMIP_MISC_CFG0);
+	val = readl_relaxed(base + UTMIP_MISC_CFG0);
 	val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
-	writel(val, base + UTMIP_MISC_CFG0);
+	writel_relaxed(val, base + UTMIP_MISC_CFG0);
 
 	if (!phy->soc_config->utmi_pll_config_in_car_module) {
-		val = readl(base + UTMIP_MISC_CFG1);
+		val = readl_relaxed(base + UTMIP_MISC_CFG1);
 		val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) |
 			UTMIP_PLLU_STABLE_COUNT(~0));
 		val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
 			UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
-		writel(val, base + UTMIP_MISC_CFG1);
+		writel_relaxed(val, base + UTMIP_MISC_CFG1);
 
-		val = readl(base + UTMIP_PLL_CFG1);
+		val = readl_relaxed(base + UTMIP_PLL_CFG1);
 		val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) |
 			UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
 		val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
 			UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
-		writel(val, base + UTMIP_PLL_CFG1);
+		writel_relaxed(val, base + UTMIP_PLL_CFG1);
 	}
 
 	if (phy->mode == USB_DR_MODE_PERIPHERAL) {
-		val = readl(base + USB_SUSP_CTRL);
+		val = readl_relaxed(base + USB_SUSP_CTRL);
 		val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
-		writel(val, base + USB_SUSP_CTRL);
+		writel_relaxed(val, base + USB_SUSP_CTRL);
 
-		val = readl(base + UTMIP_BAT_CHRG_CFG0);
+		val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
 		val &= ~UTMIP_PD_CHRG;
-		writel(val, base + UTMIP_BAT_CHRG_CFG0);
+		writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
 	} else {
-		val = readl(base + UTMIP_BAT_CHRG_CFG0);
+		val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
 		val |= UTMIP_PD_CHRG;
-		writel(val, base + UTMIP_BAT_CHRG_CFG0);
+		writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
 	}
 
-	utmip_pad_power_on(phy);
+	err = utmip_pad_power_on(phy);
+	if (err)
+		return err;
 
-	val = readl(base + UTMIP_XCVR_CFG0);
+	val = readl_relaxed(base + UTMIP_XCVR_CFG0);
 	val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
 		 UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_LSBIAS_SEL |
 		 UTMIP_XCVR_SETUP(~0) | UTMIP_XCVR_SETUP_MSB(~0) |
@@ -535,57 +540,57 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
 		val |= UTMIP_XCVR_HSSLEW(config->xcvr_hsslew);
 		val |= UTMIP_XCVR_HSSLEW_MSB(config->xcvr_hsslew);
 	}
-	writel(val, base + UTMIP_XCVR_CFG0);
+	writel_relaxed(val, base + UTMIP_XCVR_CFG0);
 
-	val = readl(base + UTMIP_XCVR_CFG1);
+	val = readl_relaxed(base + UTMIP_XCVR_CFG1);
 	val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
 		 UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0));
 	val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj);
-	writel(val, base + UTMIP_XCVR_CFG1);
+	writel_relaxed(val, base + UTMIP_XCVR_CFG1);
 
-	val = readl(base + UTMIP_BIAS_CFG1);
+	val = readl_relaxed(base + UTMIP_BIAS_CFG1);
 	val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
 	val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
-	writel(val, base + UTMIP_BIAS_CFG1);
+	writel_relaxed(val, base + UTMIP_BIAS_CFG1);
 
-	val = readl(base + UTMIP_SPARE_CFG0);
+	val = readl_relaxed(base + UTMIP_SPARE_CFG0);
 	if (config->xcvr_setup_use_fuses)
 		val |= FUSE_SETUP_SEL;
 	else
 		val &= ~FUSE_SETUP_SEL;
-	writel(val, base + UTMIP_SPARE_CFG0);
+	writel_relaxed(val, base + UTMIP_SPARE_CFG0);
 
 	if (!phy->is_legacy_phy) {
-		val = readl(base + USB_SUSP_CTRL);
+		val = readl_relaxed(base + USB_SUSP_CTRL);
 		val |= UTMIP_PHY_ENABLE;
-		writel(val, base + USB_SUSP_CTRL);
+		writel_relaxed(val, base + USB_SUSP_CTRL);
 	}
 
-	val = readl(base + USB_SUSP_CTRL);
+	val = readl_relaxed(base + USB_SUSP_CTRL);
 	val &= ~UTMIP_RESET;
-	writel(val, base + USB_SUSP_CTRL);
+	writel_relaxed(val, base + USB_SUSP_CTRL);
 
 	if (phy->is_legacy_phy) {
-		val = readl(base + USB1_LEGACY_CTRL);
+		val = readl_relaxed(base + USB1_LEGACY_CTRL);
 		val &= ~USB1_VBUS_SENSE_CTL_MASK;
 		val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
-		writel(val, base + USB1_LEGACY_CTRL);
+		writel_relaxed(val, base + USB1_LEGACY_CTRL);
 
-		val = readl(base + USB_SUSP_CTRL);
+		val = readl_relaxed(base + USB_SUSP_CTRL);
 		val &= ~USB_SUSP_SET;
-		writel(val, base + USB_SUSP_CTRL);
+		writel_relaxed(val, base + USB_SUSP_CTRL);
 	}
 
 	utmi_phy_clk_enable(phy);
 
 	if (phy->soc_config->requires_usbmode_setup) {
-		val = readl(base + USB_USBMODE);
+		val = readl_relaxed(base + USB_USBMODE);
 		val &= ~USB_USBMODE_MASK;
 		if (phy->mode == USB_DR_MODE_HOST)
 			val |= USB_USBMODE_HOST;
 		else
 			val |= USB_USBMODE_DEVICE;
-		writel(val, base + USB_USBMODE);
+		writel_relaxed(val, base + USB_USBMODE);
 	}
 
 	if (!phy->is_legacy_phy)
@@ -596,258 +601,252 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
 
 static int utmi_phy_power_off(struct tegra_usb_phy *phy)
 {
-	unsigned long val;
 	void __iomem *base = phy->regs;
+	u32 val;
 
 	utmi_phy_clk_disable(phy);
 
 	if (phy->mode == USB_DR_MODE_PERIPHERAL) {
-		val = readl(base + USB_SUSP_CTRL);
+		val = readl_relaxed(base + USB_SUSP_CTRL);
 		val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
 		val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
-		writel(val, base + USB_SUSP_CTRL);
+		writel_relaxed(val, base + USB_SUSP_CTRL);
 	}
 
-	val = readl(base + USB_SUSP_CTRL);
+	val = readl_relaxed(base + USB_SUSP_CTRL);
 	val |= UTMIP_RESET;
-	writel(val, base + USB_SUSP_CTRL);
+	writel_relaxed(val, base + USB_SUSP_CTRL);
 
-	val = readl(base + UTMIP_BAT_CHRG_CFG0);
+	val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
 	val |= UTMIP_PD_CHRG;
-	writel(val, base + UTMIP_BAT_CHRG_CFG0);
+	writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
 
-	val = readl(base + UTMIP_XCVR_CFG0);
+	val = readl_relaxed(base + UTMIP_XCVR_CFG0);
 	val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
 	       UTMIP_FORCE_PDZI_POWERDOWN;
-	writel(val, base + UTMIP_XCVR_CFG0);
+	writel_relaxed(val, base + UTMIP_XCVR_CFG0);
 
-	val = readl(base + UTMIP_XCVR_CFG1);
+	val = readl_relaxed(base + UTMIP_XCVR_CFG1);
 	val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
 	       UTMIP_FORCE_PDDR_POWERDOWN;
-	writel(val, base + UTMIP_XCVR_CFG1);
+	writel_relaxed(val, base + UTMIP_XCVR_CFG1);
 
 	return utmip_pad_power_off(phy);
 }
 
 static void utmi_phy_preresume(struct tegra_usb_phy *phy)
 {
-	unsigned long val;
 	void __iomem *base = phy->regs;
+	u32 val;
 
-	val = readl(base + UTMIP_TX_CFG0);
+	val = readl_relaxed(base + UTMIP_TX_CFG0);
 	val |= UTMIP_HS_DISCON_DISABLE;
-	writel(val, base + UTMIP_TX_CFG0);
+	writel_relaxed(val, base + UTMIP_TX_CFG0);
 }
 
 static void utmi_phy_postresume(struct tegra_usb_phy *phy)
 {
-	unsigned long val;
 	void __iomem *base = phy->regs;
+	u32 val;
 
-	val = readl(base + UTMIP_TX_CFG0);
+	val = readl_relaxed(base + UTMIP_TX_CFG0);
 	val &= ~UTMIP_HS_DISCON_DISABLE;
-	writel(val, base + UTMIP_TX_CFG0);
+	writel_relaxed(val, base + UTMIP_TX_CFG0);
 }
 
 static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
 				   enum tegra_usb_phy_port_speed port_speed)
 {
-	unsigned long val;
 	void __iomem *base = phy->regs;
+	u32 val;
 
-	val = readl(base + UTMIP_MISC_CFG0);
+	val = readl_relaxed(base + UTMIP_MISC_CFG0);
 	val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
 	if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
 		val |= UTMIP_DPDM_OBSERVE_SEL_FS_K;
 	else
 		val |= UTMIP_DPDM_OBSERVE_SEL_FS_J;
-	writel(val, base + UTMIP_MISC_CFG0);
-	udelay(1);
+	writel_relaxed(val, base + UTMIP_MISC_CFG0);
+	usleep_range(1, 10);
 
-	val = readl(base + UTMIP_MISC_CFG0);
+	val = readl_relaxed(base + UTMIP_MISC_CFG0);
 	val |= UTMIP_DPDM_OBSERVE;
-	writel(val, base + UTMIP_MISC_CFG0);
-	udelay(10);
+	writel_relaxed(val, base + UTMIP_MISC_CFG0);
+	usleep_range(10, 100);
 }
 
 static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
 {
-	unsigned long val;
 	void __iomem *base = phy->regs;
+	u32 val;
 
-	val = readl(base + UTMIP_MISC_CFG0);
+	val = readl_relaxed(base + UTMIP_MISC_CFG0);
 	val &= ~UTMIP_DPDM_OBSERVE;
-	writel(val, base + UTMIP_MISC_CFG0);
-	udelay(10);
+	writel_relaxed(val, base + UTMIP_MISC_CFG0);
+	usleep_range(10, 100);
 }
 
 static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
 {
-	int ret;
-	unsigned long val;
 	void __iomem *base = phy->regs;
+	u32 val;
+	int err;
 
-	ret = gpio_direction_output(phy->reset_gpio, 0);
-	if (ret < 0) {
-		dev_err(phy->u_phy.dev, "GPIO %d not set to 0: %d\n",
-			phy->reset_gpio, ret);
-		return ret;
-	}
-	msleep(5);
-	ret = gpio_direction_output(phy->reset_gpio, 1);
-	if (ret < 0) {
-		dev_err(phy->u_phy.dev, "GPIO %d not set to 1: %d\n",
-			phy->reset_gpio, ret);
-		return ret;
-	}
+	gpiod_set_value_cansleep(phy->reset_gpio, 1);
 
-	clk_prepare_enable(phy->clk);
-	msleep(1);
+	err = clk_prepare_enable(phy->clk);
+	if (err)
+		return err;
 
-	val = readl(base + USB_SUSP_CTRL);
+	usleep_range(5000, 6000);
+
+	gpiod_set_value_cansleep(phy->reset_gpio, 0);
+
+	usleep_range(1000, 2000);
+
+	val = readl_relaxed(base + USB_SUSP_CTRL);
 	val |= UHSIC_RESET;
-	writel(val, base + USB_SUSP_CTRL);
+	writel_relaxed(val, base + USB_SUSP_CTRL);
 
-	val = readl(base + ULPI_TIMING_CTRL_0);
+	val = readl_relaxed(base + ULPI_TIMING_CTRL_0);
 	val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
-	writel(val, base + ULPI_TIMING_CTRL_0);
+	writel_relaxed(val, base + ULPI_TIMING_CTRL_0);
 
-	val = readl(base + USB_SUSP_CTRL);
+	val = readl_relaxed(base + USB_SUSP_CTRL);
 	val |= ULPI_PHY_ENABLE;
-	writel(val, base + USB_SUSP_CTRL);
+	writel_relaxed(val, base + USB_SUSP_CTRL);
 
 	val = 0;
-	writel(val, base + ULPI_TIMING_CTRL_1);
+	writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
 
 	val |= ULPI_DATA_TRIMMER_SEL(4);
 	val |= ULPI_STPDIRNXT_TRIMMER_SEL(4);
 	val |= ULPI_DIR_TRIMMER_SEL(4);
-	writel(val, base + ULPI_TIMING_CTRL_1);
-	udelay(10);
+	writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
+	usleep_range(10, 100);
 
 	val |= ULPI_DATA_TRIMMER_LOAD;
 	val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
 	val |= ULPI_DIR_TRIMMER_LOAD;
-	writel(val, base + ULPI_TIMING_CTRL_1);
+	writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
 
 	/* Fix VbusInvalid due to floating VBUS */
-	ret = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
-	if (ret) {
-		dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", ret);
-		return ret;
+	err = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
+	if (err) {
+		dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
+		goto disable_clk;
 	}
 
-	ret = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
-	if (ret) {
-		dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", ret);
-		return ret;
+	err = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
+	if (err) {
+		dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
+		goto disable_clk;
 	}
 
-	val = readl(base + USB_SUSP_CTRL);
+	val = readl_relaxed(base + USB_SUSP_CTRL);
 	val |= USB_SUSP_CLR;
-	writel(val, base + USB_SUSP_CTRL);
-	udelay(100);
+	writel_relaxed(val, base + USB_SUSP_CTRL);
+	usleep_range(100, 1000);
 
-	val = readl(base + USB_SUSP_CTRL);
+	val = readl_relaxed(base + USB_SUSP_CTRL);
 	val &= ~USB_SUSP_CLR;
-	writel(val, base + USB_SUSP_CTRL);
+	writel_relaxed(val, base + USB_SUSP_CTRL);
 
 	return 0;
+
+disable_clk:
+	clk_disable_unprepare(phy->clk);
+
+	return err;
 }
 
 static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
 {
-	clk_disable(phy->clk);
-	return gpio_direction_output(phy->reset_gpio, 0);
-}
+	gpiod_set_value_cansleep(phy->reset_gpio, 1);
+	usleep_range(5000, 6000);
+	clk_disable_unprepare(phy->clk);
 
-static void tegra_usb_phy_close(struct tegra_usb_phy *phy)
-{
-	if (!IS_ERR(phy->vbus))
-		regulator_disable(phy->vbus);
-
-	if (!phy->is_ulpi_phy)
-		utmip_pad_close(phy);
-
-	clk_disable_unprepare(phy->pll_u);
+	return 0;
 }
 
 static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
 {
+	int err;
+
+	if (phy->powered_on)
+		return 0;
+
 	if (phy->is_ulpi_phy)
-		return ulpi_phy_power_on(phy);
+		err = ulpi_phy_power_on(phy);
 	else
-		return utmi_phy_power_on(phy);
+		err = utmi_phy_power_on(phy);
+	if (err)
+		return err;
+
+	phy->powered_on = true;
+
+	return 0;
 }
 
 static int tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
 {
+	int err;
+
+	if (!phy->powered_on)
+		return 0;
+
 	if (phy->is_ulpi_phy)
-		return ulpi_phy_power_off(phy);
+		err = ulpi_phy_power_off(phy);
 	else
-		return utmi_phy_power_off(phy);
+		err = utmi_phy_power_off(phy);
+	if (err)
+		return err;
+
+	phy->powered_on = false;
+
+	return 0;
 }
 
-static int	tegra_usb_phy_suspend(struct usb_phy *x, int suspend)
+static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
 {
-	struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+	struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+
+	if (WARN_ON(!phy->freq))
+		return;
+
+	tegra_usb_phy_power_off(phy);
+
+	if (!phy->is_ulpi_phy)
+		utmip_pad_close(phy);
+
+	regulator_disable(phy->vbus);
+	clk_disable_unprepare(phy->pll_u);
+
+	phy->freq = NULL;
+}
+
+static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend)
+{
+	struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+
+	if (WARN_ON(!phy->freq))
+		return -EINVAL;
+
 	if (suspend)
 		return tegra_usb_phy_power_off(phy);
 	else
 		return tegra_usb_phy_power_on(phy);
 }
 
-static int ulpi_open(struct tegra_usb_phy *phy)
+static int tegra_usb_phy_init(struct usb_phy *u_phy)
 {
-	int err;
-
-	phy->clk = devm_clk_get(phy->u_phy.dev, "ulpi-link");
-	if (IS_ERR(phy->clk)) {
-		err = PTR_ERR(phy->clk);
-		dev_err(phy->u_phy.dev, "Failed to get ULPI clock: %d\n", err);
-		return err;
-	}
-
-	err = devm_gpio_request(phy->u_phy.dev, phy->reset_gpio,
-		"ulpi_phy_reset_b");
-	if (err < 0) {
-		dev_err(phy->u_phy.dev, "Request failed for GPIO %d: %d\n",
-			phy->reset_gpio, err);
-		return err;
-	}
-
-	err = gpio_direction_output(phy->reset_gpio, 0);
-	if (err < 0) {
-		dev_err(phy->u_phy.dev,
-			"GPIO %d direction not set to output: %d\n",
-			phy->reset_gpio, err);
-		return err;
-	}
-
-	phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
-	if (!phy->ulpi) {
-		dev_err(phy->u_phy.dev, "Failed to create ULPI OTG\n");
-		err = -ENOMEM;
-		return err;
-	}
-
-	phy->ulpi->io_priv = phy->regs + ULPI_VIEWPORT;
-	return 0;
-}
-
-static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
-{
+	struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
 	unsigned long parent_rate;
-	int i;
+	unsigned int i;
 	int err;
 
-	phy->pll_u = devm_clk_get(phy->u_phy.dev, "pll_u");
-	if (IS_ERR(phy->pll_u)) {
-		err = PTR_ERR(phy->pll_u);
-		dev_err(phy->u_phy.dev,
-			"Failed to get pll_u clock: %d\n", err);
-		return err;
-	}
+	if (WARN_ON(phy->freq))
+		return 0;
 
 	err = clk_prepare_enable(phy->pll_u);
 	if (err)
@@ -864,64 +863,74 @@ static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
 		dev_err(phy->u_phy.dev, "Invalid pll_u parent rate %ld\n",
 			parent_rate);
 		err = -EINVAL;
-		goto fail;
+		goto disable_clk;
 	}
 
-	if (!IS_ERR(phy->vbus)) {
-		err = regulator_enable(phy->vbus);
-		if (err) {
-			dev_err(phy->u_phy.dev,
-				"Failed to enable USB VBUS regulator: %d\n",
-				err);
-			goto fail;
-		}
+	err = regulator_enable(phy->vbus);
+	if (err) {
+		dev_err(phy->u_phy.dev,
+			"Failed to enable USB VBUS regulator: %d\n", err);
+		goto disable_clk;
 	}
 
-	if (phy->is_ulpi_phy)
-		err = ulpi_open(phy);
-	else
+	if (!phy->is_ulpi_phy) {
 		err = utmip_pad_open(phy);
-	if (err < 0)
-		goto fail;
+		if (err)
+			goto disable_vbus;
+	}
+
+	err = tegra_usb_phy_power_on(phy);
+	if (err)
+		goto close_phy;
 
 	return 0;
 
-fail:
+close_phy:
+	if (!phy->is_ulpi_phy)
+		utmip_pad_close(phy);
+
+disable_vbus:
+	regulator_disable(phy->vbus);
+
+disable_clk:
 	clk_disable_unprepare(phy->pll_u);
+
+	phy->freq = NULL;
+
 	return err;
 }
 
-void tegra_usb_phy_preresume(struct usb_phy *x)
+void tegra_usb_phy_preresume(struct usb_phy *u_phy)
 {
-	struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+	struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
 
 	if (!phy->is_ulpi_phy)
 		utmi_phy_preresume(phy);
 }
 EXPORT_SYMBOL_GPL(tegra_usb_phy_preresume);
 
-void tegra_usb_phy_postresume(struct usb_phy *x)
+void tegra_usb_phy_postresume(struct usb_phy *u_phy)
 {
-	struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+	struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
 
 	if (!phy->is_ulpi_phy)
 		utmi_phy_postresume(phy);
 }
 EXPORT_SYMBOL_GPL(tegra_usb_phy_postresume);
 
-void tegra_ehci_phy_restore_start(struct usb_phy *x,
-				 enum tegra_usb_phy_port_speed port_speed)
+void tegra_ehci_phy_restore_start(struct usb_phy *u_phy,
+				  enum tegra_usb_phy_port_speed port_speed)
 {
-	struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+	struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
 
 	if (!phy->is_ulpi_phy)
 		utmi_phy_restore_start(phy, port_speed);
 }
 EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_start);
 
-void tegra_ehci_phy_restore_end(struct usb_phy *x)
+void tegra_ehci_phy_restore_end(struct usb_phy *u_phy)
 {
-	struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+	struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
 
 	if (!phy->is_ulpi_phy)
 		utmi_phy_restore_end(phy);
@@ -932,21 +941,25 @@ static int read_utmi_param(struct platform_device *pdev, const char *param,
 			   u8 *dest)
 {
 	u32 value;
-	int err = of_property_read_u32(pdev->dev.of_node, param, &value);
-	*dest = (u8)value;
-	if (err < 0)
+	int err;
+
+	err = of_property_read_u32(pdev->dev.of_node, param, &value);
+	if (err)
 		dev_err(&pdev->dev,
 			"Failed to read USB UTMI parameter %s: %d\n",
 			param, err);
+	else
+		*dest = value;
+
 	return err;
 }
 
 static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
 			  struct platform_device *pdev)
 {
+	struct tegra_utmip_config *config;
 	struct resource *res;
 	int err;
-	struct tegra_utmip_config *config;
 
 	tegra_phy->is_ulpi_phy = false;
 
@@ -957,7 +970,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
 	}
 
 	tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
-		resource_size(res));
+					   resource_size(res));
 	if (!tegra_phy->pad_regs) {
 		dev_err(&pdev->dev, "Failed to remap UTMI pad regs\n");
 		return -ENOMEM;
@@ -971,49 +984,49 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
 	config = tegra_phy->config;
 
 	err = read_utmi_param(pdev, "nvidia,hssync-start-delay",
-		&config->hssync_start_delay);
-	if (err < 0)
+			      &config->hssync_start_delay);
+	if (err)
 		return err;
 
 	err = read_utmi_param(pdev, "nvidia,elastic-limit",
-		&config->elastic_limit);
-	if (err < 0)
+			      &config->elastic_limit);
+	if (err)
 		return err;
 
 	err = read_utmi_param(pdev, "nvidia,idle-wait-delay",
-		&config->idle_wait_delay);
-	if (err < 0)
+			      &config->idle_wait_delay);
+	if (err)
 		return err;
 
 	err = read_utmi_param(pdev, "nvidia,term-range-adj",
-		&config->term_range_adj);
-	if (err < 0)
+			      &config->term_range_adj);
+	if (err)
 		return err;
 
 	err = read_utmi_param(pdev, "nvidia,xcvr-lsfslew",
-		&config->xcvr_lsfslew);
-	if (err < 0)
+			      &config->xcvr_lsfslew);
+	if (err)
 		return err;
 
 	err = read_utmi_param(pdev, "nvidia,xcvr-lsrslew",
-		&config->xcvr_lsrslew);
-	if (err < 0)
+			      &config->xcvr_lsrslew);
+	if (err)
 		return err;
 
 	if (tegra_phy->soc_config->requires_extra_tuning_parameters) {
 		err = read_utmi_param(pdev, "nvidia,xcvr-hsslew",
-			&config->xcvr_hsslew);
-		if (err < 0)
+				      &config->xcvr_hsslew);
+		if (err)
 			return err;
 
 		err = read_utmi_param(pdev, "nvidia,hssquelch-level",
-			&config->hssquelch_level);
-		if (err < 0)
+				      &config->hssquelch_level);
+		if (err)
 			return err;
 
 		err = read_utmi_param(pdev, "nvidia,hsdiscon-level",
-			&config->hsdiscon_level);
-		if (err < 0)
+				      &config->hsdiscon_level);
+		if (err)
 			return err;
 	}
 
@@ -1022,8 +1035,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
 
 	if (!config->xcvr_setup_use_fuses) {
 		err = read_utmi_param(pdev, "nvidia,xcvr-setup",
-			&config->xcvr_setup);
-		if (err < 0)
+				      &config->xcvr_setup);
+		if (err)
 			return err;
 	}
 
@@ -1053,23 +1066,20 @@ MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table);
 
 static int tegra_usb_phy_probe(struct platform_device *pdev)
 {
-	const struct of_device_id *match;
-	struct resource *res;
-	struct tegra_usb_phy *tegra_phy = NULL;
 	struct device_node *np = pdev->dev.of_node;
+	struct tegra_usb_phy *tegra_phy;
 	enum usb_phy_interface phy_type;
+	struct reset_control *reset;
+	struct gpio_desc *gpiod;
+	struct resource *res;
+	struct usb_phy *phy;
 	int err;
 
 	tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL);
 	if (!tegra_phy)
 		return -ENOMEM;
 
-	match = of_match_device(tegra_usb_phy_id_table, &pdev->dev);
-	if (!match) {
-		dev_err(&pdev->dev, "Error: No device match found\n");
-		return -ENODEV;
-	}
-	tegra_phy->soc_config = match->data;
+	tegra_phy->soc_config = of_device_get_match_data(&pdev->dev);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
@@ -1078,7 +1088,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
 	}
 
 	tegra_phy->regs = devm_ioremap(&pdev->dev, res->start,
-		resource_size(res));
+				       resource_size(res));
 	if (!tegra_phy->regs) {
 		dev_err(&pdev->dev, "Failed to remap I/O memory\n");
 		return -ENOMEM;
@@ -1087,33 +1097,6 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
 	tegra_phy->is_legacy_phy =
 		of_property_read_bool(np, "nvidia,has-legacy-mode");
 
-	phy_type = of_usb_get_phy_mode(np);
-	switch (phy_type) {
-	case USBPHY_INTERFACE_MODE_UTMI:
-		err = utmi_phy_probe(tegra_phy, pdev);
-		if (err < 0)
-			return err;
-		break;
-
-	case USBPHY_INTERFACE_MODE_ULPI:
-		tegra_phy->is_ulpi_phy = true;
-
-		tegra_phy->reset_gpio =
-			of_get_named_gpio(np, "nvidia,phy-reset-gpio", 0);
-		if (!gpio_is_valid(tegra_phy->reset_gpio)) {
-			dev_err(&pdev->dev,
-				"Invalid GPIO: %d\n", tegra_phy->reset_gpio);
-			return tegra_phy->reset_gpio;
-		}
-		tegra_phy->config = NULL;
-		break;
-
-	default:
-		dev_err(&pdev->dev, "phy_type %u is invalid or unsupported\n",
-			phy_type);
-		return -EINVAL;
-	}
-
 	if (of_find_property(np, "dr_mode", NULL))
 		tegra_phy->mode = usb_get_dr_mode(&pdev->dev);
 	else
@@ -1125,29 +1108,93 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
 	}
 
 	/* On some boards, the VBUS regulator doesn't need to be controlled */
-	if (of_find_property(np, "vbus-supply", NULL)) {
-		tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
-		if (IS_ERR(tegra_phy->vbus))
-			return PTR_ERR(tegra_phy->vbus);
-	} else {
-		dev_notice(&pdev->dev, "no vbus regulator");
-		tegra_phy->vbus = ERR_PTR(-ENODEV);
+	tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
+	if (IS_ERR(tegra_phy->vbus))
+		return PTR_ERR(tegra_phy->vbus);
+
+	tegra_phy->pll_u = devm_clk_get(&pdev->dev, "pll_u");
+	err = PTR_ERR_OR_ZERO(tegra_phy->pll_u);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to get pll_u clock: %d\n", err);
+		return err;
+	}
+
+	phy_type = of_usb_get_phy_mode(np);
+	switch (phy_type) {
+	case USBPHY_INTERFACE_MODE_UTMI:
+		err = utmi_phy_probe(tegra_phy, pdev);
+		if (err)
+			return err;
+
+		tegra_phy->pad_clk = devm_clk_get(&pdev->dev, "utmi-pads");
+		err = PTR_ERR_OR_ZERO(tegra_phy->pad_clk);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Failed to get UTMIP pad clock: %d\n", err);
+			return err;
+		}
+
+		reset = devm_reset_control_get_optional_shared(&pdev->dev,
+							       "utmi-pads");
+		err = PTR_ERR_OR_ZERO(reset);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Failed to get UTMI-pads reset: %d\n", err);
+			return err;
+		}
+		tegra_phy->pad_rst = reset;
+		break;
+
+	case USBPHY_INTERFACE_MODE_ULPI:
+		tegra_phy->is_ulpi_phy = true;
+
+		tegra_phy->clk = devm_clk_get(&pdev->dev, "ulpi-link");
+		err = PTR_ERR_OR_ZERO(tegra_phy->clk);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Failed to get ULPI clock: %d\n", err);
+			return err;
+		}
+
+		gpiod = devm_gpiod_get_from_of_node(&pdev->dev, np,
+						    "nvidia,phy-reset-gpio",
+						    0, GPIOD_OUT_HIGH,
+						    "ulpi_phy_reset_b");
+		err = PTR_ERR_OR_ZERO(gpiod);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Request failed for reset GPIO: %d\n", err);
+			return err;
+		}
+		tegra_phy->reset_gpio = gpiod;
+
+		phy = devm_otg_ulpi_create(&pdev->dev,
+					   &ulpi_viewport_access_ops, 0);
+		if (!phy) {
+			dev_err(&pdev->dev, "Failed to create ULPI OTG\n");
+			return -ENOMEM;
+		}
+
+		tegra_phy->ulpi = phy;
+		tegra_phy->ulpi->io_priv = tegra_phy->regs + ULPI_VIEWPORT;
+		break;
+
+	default:
+		dev_err(&pdev->dev, "phy_type %u is invalid or unsupported\n",
+			phy_type);
+		return -EINVAL;
 	}
 
 	tegra_phy->u_phy.dev = &pdev->dev;
-	err = tegra_usb_phy_init(tegra_phy);
-	if (err < 0)
-		return err;
-
-	tegra_phy->u_phy.set_suspend = tegra_usb_phy_suspend;
+	tegra_phy->u_phy.init = tegra_usb_phy_init;
+	tegra_phy->u_phy.shutdown = tegra_usb_phy_shutdown;
+	tegra_phy->u_phy.set_suspend = tegra_usb_phy_set_suspend;
 
 	platform_set_drvdata(pdev, tegra_phy);
 
 	err = usb_add_phy_dev(&tegra_phy->u_phy);
-	if (err < 0) {
-		tegra_usb_phy_close(tegra_phy);
+	if (err)
 		return err;
-	}
 
 	return 0;
 }
@@ -1157,7 +1204,6 @@ static int tegra_usb_phy_remove(struct platform_device *pdev)
 	struct tegra_usb_phy *tegra_phy = platform_get_drvdata(pdev);
 
 	usb_remove_phy(&tegra_phy->u_phy);
-	tegra_usb_phy_close(tegra_phy);
 
 	return 0;
 }
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index a43c493..e683a37 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -240,6 +240,21 @@ static int ulpi_set_vbus(struct usb_otg *otg, bool on)
 	return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
 }
 
+static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
+			  struct usb_phy_io_ops *ops,
+			  unsigned int flags)
+{
+	phy->label	= "ULPI";
+	phy->flags	= flags;
+	phy->io_ops	= ops;
+	phy->otg	= otg;
+	phy->init	= ulpi_init;
+
+	otg->usb_phy	= phy;
+	otg->set_host	= ulpi_set_host;
+	otg->set_vbus	= ulpi_set_vbus;
+}
+
 struct usb_phy *
 otg_ulpi_create(struct usb_phy_io_ops *ops,
 		unsigned int flags)
@@ -257,17 +272,32 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
 		return NULL;
 	}
 
-	phy->label	= "ULPI";
-	phy->flags	= flags;
-	phy->io_ops	= ops;
-	phy->otg	= otg;
-	phy->init	= ulpi_init;
-
-	otg->usb_phy	= phy;
-	otg->set_host	= ulpi_set_host;
-	otg->set_vbus	= ulpi_set_vbus;
+	otg_ulpi_init(phy, otg, ops, flags);
 
 	return phy;
 }
 EXPORT_SYMBOL_GPL(otg_ulpi_create);
 
+struct usb_phy *
+devm_otg_ulpi_create(struct device *dev,
+		     struct usb_phy_io_ops *ops,
+		     unsigned int flags)
+{
+	struct usb_phy *phy;
+	struct usb_otg *otg;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return NULL;
+
+	otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL);
+	if (!otg) {
+		devm_kfree(dev, phy);
+		return NULL;
+	}
+
+	otg_ulpi_init(phy, otg, ops, flags);
+
+	return phy;
+}
+EXPORT_SYMBOL_GPL(devm_otg_ulpi_create);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 0277f627..ad25546 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,6 +34,14 @@ struct phy_devm {
 	struct notifier_block *nb;
 };
 
+static const char *const usb_chger_type[] = {
+	[UNKNOWN_TYPE]			= "USB_CHARGER_UNKNOWN_TYPE",
+	[SDP_TYPE]			= "USB_CHARGER_SDP_TYPE",
+	[CDP_TYPE]			= "USB_CHARGER_CDP_TYPE",
+	[DCP_TYPE]			= "USB_CHARGER_DCP_TYPE",
+	[ACA_TYPE]			= "USB_CHARGER_ACA_TYPE",
+};
+
 static struct usb_phy *__usb_find_phy(struct list_head *list,
 	enum usb_phy_type type)
 {
@@ -98,7 +106,8 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
 {
 	struct usb_phy *usb_phy = container_of(work, struct usb_phy, chg_work);
 	char uchger_state[50] = { 0 };
-	char *envp[] = { uchger_state, NULL };
+	char uchger_type[50] = { 0 };
+	char *envp[] = { uchger_state, uchger_type, NULL };
 	unsigned int min, max;
 
 	switch (usb_phy->chg_state) {
@@ -122,6 +131,8 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
 		return;
 	}
 
+	snprintf(uchger_type, ARRAY_SIZE(uchger_type),
+		 "USB_CHARGER_TYPE=%s", usb_chger_type[usb_phy->chg_type]);
 	kobject_uevent_env(&usb_phy->dev->kobj, KOBJ_CHANGE, envp);
 }
 
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d438b78..3af91b2 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -8,11 +8,10 @@
  */
 #include <linux/clk.h>
 #include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
 #include <linux/slab.h>
@@ -592,7 +591,8 @@ static int usbhs_probe(struct platform_device *pdev)
 	struct usbhs_priv *priv;
 	struct resource *irq_res;
 	struct device *dev = &pdev->dev;
-	int ret, gpio;
+	struct gpio_desc *gpiod;
+	int ret;
 	u32 tmp;
 
 	/* check device node */
@@ -657,10 +657,9 @@ static int usbhs_probe(struct platform_device *pdev)
 		priv->dparam.pio_dma_border = 64; /* 64byte */
 	if (!of_property_read_u32(dev_of_node(dev), "renesas,buswait", &tmp))
 		priv->dparam.buswait_bwait = tmp;
-	gpio = of_get_named_gpio_flags(dev_of_node(dev), "renesas,enable-gpio",
-				       0, NULL);
-	if (gpio > 0)
-		priv->dparam.enable_gpio = gpio;
+	gpiod = devm_gpiod_get_optional(dev, "renesas,enable", GPIOD_IN);
+	if (IS_ERR(gpiod))
+		return PTR_ERR(gpiod);
 
 	/* FIXME */
 	/* runtime power control ? */
@@ -708,13 +707,10 @@ static int usbhs_probe(struct platform_device *pdev)
 	usbhs_sys_clock_ctrl(priv, 0);
 
 	/* check GPIO determining if USB function should be enabled */
-	if (priv->dparam.enable_gpio) {
-		gpio_request_one(priv->dparam.enable_gpio, GPIOF_IN, NULL);
-		ret = !gpio_get_value(priv->dparam.enable_gpio);
-		gpio_free(priv->dparam.enable_gpio);
+	if (gpiod) {
+		ret = !gpiod_get_value(gpiod);
 		if (ret) {
-			dev_warn(dev, "USB function not selected (GPIO %d)\n",
-				 priv->dparam.enable_gpio);
+			dev_warn(dev, "USB function not selected (GPIO)\n");
 			ret = -ENOTSUPP;
 			goto probe_end_mod_exit;
 		}
diff --git a/drivers/usb/renesas_usbhs/rcar2.c b/drivers/usb/renesas_usbhs/rcar2.c
index 440d213..52756fc 100644
--- a/drivers/usb/renesas_usbhs/rcar2.c
+++ b/drivers/usb/renesas_usbhs/rcar2.c
@@ -6,8 +6,6 @@
  * Copyright (C) 2019 Renesas Electronics Corporation
  */
 
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
 #include <linux/phy/phy.h>
 #include "common.h"
 #include "rcar2.h"
@@ -34,7 +32,7 @@ static int usbhs_rcar2_hardware_exit(struct platform_device *pdev)
 	struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
 
 	if (priv->phy) {
-		phy_put(priv->phy);
+		phy_put(&pdev->dev, priv->phy);
 		priv->phy = NULL;
 	}
 
diff --git a/drivers/usb/renesas_usbhs/rza2.c b/drivers/usb/renesas_usbhs/rza2.c
index 0217495..3eed333 100644
--- a/drivers/usb/renesas_usbhs/rza2.c
+++ b/drivers/usb/renesas_usbhs/rza2.c
@@ -29,7 +29,7 @@ static int usbhs_rza2_hardware_exit(struct platform_device *pdev)
 {
 	struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
 
-	phy_put(priv->phy);
+	phy_put(&pdev->dev, priv->phy);
 	priv->phy = NULL;
 
 	return 0;
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ed4a18b..25d7e0c 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -129,9 +129,6 @@
 	  parallel port on the USB 2 appears as a third serial port on Linux.
 	  The Digi Acceleport USB 8 is not yet supported by this driver.
 
-	  This driver works under SMP with the usb-uhci driver.  It does not
-	  work under SMP with the uhci driver.
-
 	  To compile this driver as a module, choose M here: the
 	  module will be called digi_acceleport.
 
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index ebd76ab..8219706 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -276,7 +276,7 @@ static void cyberjack_read_int_callback(struct urb *urb)
 		old_rdtodo = priv->rdtodo;
 
 		if (old_rdtodo > SHRT_MAX - size) {
-			dev_dbg(dev, "To many bulk_in urbs to do.\n");
+			dev_dbg(dev, "Too many bulk_in urbs to do.\n");
 			spin_unlock_irqrestore(&priv->lock, flags);
 			goto resubmit;
 		}
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 633550e..ffd9841 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -104,7 +104,7 @@ struct garmin_packet {
 	int               seq;
 	/* the real size of the data array, always > 0 */
 	int               size;
-	__u8              data[1];
+	__u8              data[];
 };
 
 /* structure used to keep the current state of the driver */
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 302eb95..79d0586 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -45,9 +45,10 @@ static int buffer_size;
 static int xbof = -1;
 
 static int  ir_startup (struct usb_serial *serial);
-static int  ir_open(struct tty_struct *tty, struct usb_serial_port *port);
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
-						void *dest, size_t size);
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+		const unsigned char *buf, int count);
+static int ir_write_room(struct tty_struct *tty);
+static void ir_write_bulk_callback(struct urb *urb);
 static void ir_process_read_urb(struct urb *urb);
 static void ir_set_termios(struct tty_struct *tty,
 		struct usb_serial_port *port, struct ktermios *old_termios);
@@ -75,10 +76,13 @@ static struct usb_serial_driver ir_device = {
 	.description		= "IR Dongle",
 	.id_table		= ir_id_table,
 	.num_ports		= 1,
+	.num_bulk_in		= 1,
+	.num_bulk_out		= 1,
 	.set_termios		= ir_set_termios,
 	.attach			= ir_startup,
-	.open			= ir_open,
-	.prepare_write_buffer	= ir_prepare_write_buffer,
+	.write			= ir_write,
+	.write_room		= ir_write_room,
+	.write_bulk_callback	= ir_write_bulk_callback,
 	.process_read_urb	= ir_process_read_urb,
 };
 
@@ -251,35 +255,102 @@ static int ir_startup(struct usb_serial *serial)
 	return 0;
 }
 
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+		const unsigned char *buf, int count)
 {
-	int i;
+	struct urb *urb = NULL;
+	unsigned long flags;
+	int ret;
 
-	for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
-		port->write_urbs[i]->transfer_flags = URB_ZERO_PACKET;
+	if (port->bulk_out_size == 0)
+		return -EINVAL;
 
-	/* Start reading from the device */
-	return usb_serial_generic_open(tty, port);
-}
+	if (count == 0)
+		return 0;
 
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
-						void *dest, size_t size)
-{
-	unsigned char *buf = dest;
-	int count;
+	count = min(count, port->bulk_out_size - 1);
+
+	spin_lock_irqsave(&port->lock, flags);
+	if (__test_and_clear_bit(0, &port->write_urbs_free)) {
+		urb = port->write_urbs[0];
+		port->tx_bytes += count;
+	}
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	if (!urb)
+		return 0;
 
 	/*
 	 * The first byte of the packet we send to the device contains an
-	 * inbound header which indicates an additional number of BOFs and
+	 * outbound header which indicates an additional number of BOFs and
 	 * a baud rate change.
 	 *
 	 * See section 5.4.2.2 of the USB IrDA spec.
 	 */
-	*buf = ir_xbof | ir_baud;
+	*(u8 *)urb->transfer_buffer = ir_xbof | ir_baud;
 
-	count = kfifo_out_locked(&port->write_fifo, buf + 1, size - 1,
-								&port->lock);
-	return count + 1;
+	memcpy(urb->transfer_buffer + 1, buf, count);
+
+	urb->transfer_buffer_length = count + 1;
+	urb->transfer_flags = URB_ZERO_PACKET;
+
+	ret = usb_submit_urb(urb, GFP_ATOMIC);
+	if (ret) {
+		dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+
+		spin_lock_irqsave(&port->lock, flags);
+		__set_bit(0, &port->write_urbs_free);
+		port->tx_bytes -= count;
+		spin_unlock_irqrestore(&port->lock, flags);
+
+		return ret;
+	}
+
+	return count;
+}
+
+static void ir_write_bulk_callback(struct urb *urb)
+{
+	struct usb_serial_port *port = urb->context;
+	int status = urb->status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	__set_bit(0, &port->write_urbs_free);
+	port->tx_bytes -= urb->transfer_buffer_length - 1;
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	switch (status) {
+	case 0:
+		break;
+	case -ENOENT:
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		dev_dbg(&port->dev, "write urb stopped: %d\n", status);
+		return;
+	case -EPIPE:
+		dev_err(&port->dev, "write urb stopped: %d\n", status);
+		return;
+	default:
+		dev_err(&port->dev, "nonzero write-urb status: %d\n", status);
+		break;
+	}
+
+	usb_serial_port_softint(port);
+}
+
+static int ir_write_room(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	int count = 0;
+
+	if (port->bulk_out_size == 0)
+		return 0;
+
+	if (test_bit(0, &port->write_urbs_free))
+		count = port->bulk_out_size - 1;
+
+	return count;
 }
 
 static void ir_process_read_urb(struct urb *urb)
@@ -304,23 +375,15 @@ static void ir_process_read_urb(struct urb *urb)
 	tty_flip_buffer_push(&port->port);
 }
 
-static void ir_set_termios_callback(struct urb *urb)
-{
-	kfree(urb->transfer_buffer);
-
-	if (urb->status)
-		dev_dbg(&urb->dev->dev, "%s - non-zero urb status: %d\n",
-			__func__, urb->status);
-}
-
 static void ir_set_termios(struct tty_struct *tty,
 		struct usb_serial_port *port, struct ktermios *old_termios)
 {
-	struct urb *urb;
+	struct usb_device *udev = port->serial->dev;
 	unsigned char *transfer_buffer;
-	int result;
+	int actual_length;
 	speed_t baud;
 	int ir_baud;
+	int ret;
 
 	baud = tty_get_baud_rate(tty);
 
@@ -332,34 +395,34 @@ static void ir_set_termios(struct tty_struct *tty,
 
 	switch (baud) {
 	case 2400:
-		ir_baud = USB_IRDA_BR_2400;
+		ir_baud = USB_IRDA_LS_2400;
 		break;
 	case 9600:
-		ir_baud = USB_IRDA_BR_9600;
+		ir_baud = USB_IRDA_LS_9600;
 		break;
 	case 19200:
-		ir_baud = USB_IRDA_BR_19200;
+		ir_baud = USB_IRDA_LS_19200;
 		break;
 	case 38400:
-		ir_baud = USB_IRDA_BR_38400;
+		ir_baud = USB_IRDA_LS_38400;
 		break;
 	case 57600:
-		ir_baud = USB_IRDA_BR_57600;
+		ir_baud = USB_IRDA_LS_57600;
 		break;
 	case 115200:
-		ir_baud = USB_IRDA_BR_115200;
+		ir_baud = USB_IRDA_LS_115200;
 		break;
 	case 576000:
-		ir_baud = USB_IRDA_BR_576000;
+		ir_baud = USB_IRDA_LS_576000;
 		break;
 	case 1152000:
-		ir_baud = USB_IRDA_BR_1152000;
+		ir_baud = USB_IRDA_LS_1152000;
 		break;
 	case 4000000:
-		ir_baud = USB_IRDA_BR_4000000;
+		ir_baud = USB_IRDA_LS_4000000;
 		break;
 	default:
-		ir_baud = USB_IRDA_BR_9600;
+		ir_baud = USB_IRDA_LS_9600;
 		baud = 9600;
 	}
 
@@ -375,42 +438,22 @@ static void ir_set_termios(struct tty_struct *tty,
 	/*
 	 * send the baud change out on an "empty" data packet
 	 */
-	urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (!urb)
-		return;
-
 	transfer_buffer = kmalloc(1, GFP_KERNEL);
 	if (!transfer_buffer)
-		goto err_buf;
+		return;
 
 	*transfer_buffer = ir_xbof | ir_baud;
 
-	usb_fill_bulk_urb(
-		urb,
-		port->serial->dev,
-		usb_sndbulkpipe(port->serial->dev,
-			port->bulk_out_endpointAddress),
-		transfer_buffer,
-		1,
-		ir_set_termios_callback,
-		port);
-
-	urb->transfer_flags = URB_ZERO_PACKET;
-
-	result = usb_submit_urb(urb, GFP_KERNEL);
-	if (result) {
-		dev_err(&port->dev, "%s - failed to submit urb: %d\n",
-							__func__, result);
-		goto err_subm;
+	ret = usb_bulk_msg(udev,
+			usb_sndbulkpipe(udev, port->bulk_out_endpointAddress),
+			transfer_buffer, 1, &actual_length, 5000);
+	if (ret || actual_length != 1) {
+		if (actual_length != 1)
+			ret = -EIO;
+		dev_err(&port->dev, "failed to change line speed: %d\n", ret);
 	}
 
-	usb_free_urb(urb);
-
-	return;
-err_subm:
 	kfree(transfer_buffer);
-err_buf:
-	usb_free_urb(urb);
 }
 
 static int __init ir_init(void)
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index ed2b4e6..0af7680 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -41,6 +41,9 @@ struct opticon_private {
 	bool rts;
 	bool cts;
 	int outstanding_urbs;
+	int outstanding_bytes;
+
+	struct usb_anchor anchor;
 };
 
 
@@ -149,6 +152,15 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
 	return res;
 }
 
+static void opticon_close(struct usb_serial_port *port)
+{
+	struct opticon_private *priv = usb_get_serial_port_data(port);
+
+	usb_kill_anchored_urbs(&priv->anchor);
+
+	usb_serial_generic_close(port);
+}
+
 static void opticon_write_control_callback(struct urb *urb)
 {
 	struct usb_serial_port *port = urb->context;
@@ -169,6 +181,7 @@ static void opticon_write_control_callback(struct urb *urb)
 
 	spin_lock_irqsave(&priv->lock, flags);
 	--priv->outstanding_urbs;
+	priv->outstanding_bytes -= urb->transfer_buffer_length;
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	usb_serial_port_softint(port);
@@ -182,8 +195,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
 	struct urb *urb;
 	unsigned char *buffer;
 	unsigned long flags;
-	int status;
 	struct usb_ctrlrequest *dr;
+	int ret = -ENOMEM;
 
 	spin_lock_irqsave(&priv->lock, flags);
 	if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
@@ -192,19 +205,16 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
 		return 0;
 	}
 	priv->outstanding_urbs++;
+	priv->outstanding_bytes += count;
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	buffer = kmalloc(count, GFP_ATOMIC);
-	if (!buffer) {
-		count = -ENOMEM;
+	if (!buffer)
 		goto error_no_buffer;
-	}
 
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
-	if (!urb) {
-		count = -ENOMEM;
+	if (!urb)
 		goto error_no_urb;
-	}
 
 	memcpy(buffer, buf, count);
 
@@ -213,10 +223,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
 	/* The connected devices do not have a bulk write endpoint,
 	 * to transmit data to de barcode device the control endpoint is used */
 	dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
-	if (!dr) {
-		count = -ENOMEM;
+	if (!dr)
 		goto error_no_dr;
-	}
 
 	dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
 	dr->bRequest = 0x01;
@@ -229,13 +237,13 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
 		(unsigned char *)dr, buffer, count,
 		opticon_write_control_callback, port);
 
+	usb_anchor_urb(urb, &priv->anchor);
+
 	/* send it down the pipe */
-	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status) {
-		dev_err(&port->dev,
-		"%s - usb_submit_urb(write endpoint) failed status = %d\n",
-							__func__, status);
-		count = status;
+	ret = usb_submit_urb(urb, GFP_ATOMIC);
+	if (ret) {
+		dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+		usb_unanchor_urb(urb);
 		goto error;
 	}
 
@@ -253,8 +261,10 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
 error_no_buffer:
 	spin_lock_irqsave(&priv->lock, flags);
 	--priv->outstanding_urbs;
+	priv->outstanding_bytes -= count;
 	spin_unlock_irqrestore(&priv->lock, flags);
-	return count;
+
+	return ret;
 }
 
 static int opticon_write_room(struct tty_struct *tty)
@@ -279,6 +289,20 @@ static int opticon_write_room(struct tty_struct *tty)
 	return 2048;
 }
 
+static int opticon_chars_in_buffer(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct opticon_private *priv = usb_get_serial_port_data(port);
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	count = priv->outstanding_bytes;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return count;
+}
+
 static int opticon_tiocmget(struct tty_struct *tty)
 {
 	struct usb_serial_port *port = tty->driver_data;
@@ -354,6 +378,7 @@ static int opticon_port_probe(struct usb_serial_port *port)
 		return -ENOMEM;
 
 	spin_lock_init(&priv->lock);
+	init_usb_anchor(&priv->anchor);
 
 	usb_set_serial_port_data(port, priv);
 
@@ -381,8 +406,10 @@ static struct usb_serial_driver opticon_device = {
 	.port_probe =		opticon_port_probe,
 	.port_remove =		opticon_port_remove,
 	.open =			opticon_open,
+	.close =		opticon_close,
 	.write =		opticon_write,
 	.write_room = 		opticon_write_room,
+	.chars_in_buffer =	opticon_chars_in_buffer,
 	.throttle =		usb_serial_generic_throttle,
 	.unthrottle =		usb_serial_generic_unthrottle,
 	.get_serial =		get_serial_info,
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 4092248..0edfb89 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -188,7 +188,7 @@ static void dp_altmode_work(struct work_struct *work)
 
 	switch (dp->state) {
 	case DP_STATE_ENTER:
-		ret = typec_altmode_enter(dp->alt);
+		ret = typec_altmode_enter(dp->alt, NULL);
 		if (ret)
 			dev_err(&dp->alt->dev, "failed to enter mode\n");
 		break;
@@ -306,7 +306,8 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
 
 static int dp_altmode_activate(struct typec_altmode *alt, int activate)
 {
-	return activate ? typec_altmode_enter(alt) : typec_altmode_exit(alt);
+	return activate ? typec_altmode_enter(alt, NULL) :
+			  typec_altmode_exit(alt);
 }
 
 static const struct typec_altmode_ops dp_altmode_ops = {
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 74cb3c2..2e45eb4 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * Bus for USB Type-C Alternate Modes
  *
  * Copyright (C) 2018 Intel Corporation
@@ -10,12 +10,23 @@
 
 #include "bus.h"
 
-static inline int typec_altmode_set_mux(struct altmode *alt, u8 state)
+static inline int
+typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
 {
-	return alt->mux ? alt->mux->set(alt->mux, state) : 0;
+	struct typec_mux_state state;
+
+	if (!alt->mux)
+		return 0;
+
+	state.alt = &alt->adev;
+	state.mode = conf;
+	state.data = data;
+
+	return alt->mux->set(alt->mux, &state);
 }
 
-static int typec_altmode_set_state(struct typec_altmode *adev, int state)
+static int typec_altmode_set_state(struct typec_altmode *adev,
+				   unsigned long conf, void *data)
 {
 	bool is_port = is_typec_port(adev->dev.parent);
 	struct altmode *port_altmode;
@@ -23,11 +34,11 @@ static int typec_altmode_set_state(struct typec_altmode *adev, int state)
 
 	port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
 
-	ret = typec_altmode_set_mux(port_altmode, state);
+	ret = typec_altmode_set_mux(port_altmode, conf, data);
 	if (ret)
 		return ret;
 
-	blocking_notifier_call_chain(&port_altmode->nh, state, NULL);
+	blocking_notifier_call_chain(&port_altmode->nh, conf, NULL);
 
 	return 0;
 }
@@ -67,7 +78,7 @@ int typec_altmode_notify(struct typec_altmode *adev,
 	is_port = is_typec_port(adev->dev.parent);
 	partner = altmode->partner;
 
-	ret = typec_altmode_set_mux(is_port ? altmode : partner, (u8)conf);
+	ret = typec_altmode_set_mux(is_port ? altmode : partner, conf, data);
 	if (ret)
 		return ret;
 
@@ -84,12 +95,14 @@ EXPORT_SYMBOL_GPL(typec_altmode_notify);
 /**
  * typec_altmode_enter - Enter Mode
  * @adev: The alternate mode
+ * @vdo: VDO for the Enter Mode command
  *
  * The alternate mode drivers use this function to enter mode. The port drivers
  * use this to inform the alternate mode drivers that the partner has initiated
- * Enter Mode command.
+ * Enter Mode command. If the alternate mode does not require VDO, @vdo must be
+ * NULL.
  */
-int typec_altmode_enter(struct typec_altmode *adev)
+int typec_altmode_enter(struct typec_altmode *adev, u32 *vdo)
 {
 	struct altmode *partner = to_altmode(adev)->partner;
 	struct typec_altmode *pdev = &partner->adev;
@@ -101,13 +114,16 @@ int typec_altmode_enter(struct typec_altmode *adev)
 	if (!pdev->ops || !pdev->ops->enter)
 		return -EOPNOTSUPP;
 
+	if (is_typec_port(pdev->dev.parent) && !pdev->active)
+		return -EPERM;
+
 	/* Moving to USB Safe State */
-	ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+	ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
 	if (ret)
 		return ret;
 
 	/* Enter Mode */
-	return pdev->ops->enter(pdev);
+	return pdev->ops->enter(pdev, vdo);
 }
 EXPORT_SYMBOL_GPL(typec_altmode_enter);
 
@@ -130,7 +146,7 @@ int typec_altmode_exit(struct typec_altmode *adev)
 		return -EOPNOTSUPP;
 
 	/* Moving to USB Safe State */
-	ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+	ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
 	if (ret)
 		return ret;
 
@@ -383,7 +399,7 @@ static int typec_remove(struct device *dev)
 		drv->remove(to_typec_altmode(dev));
 
 	if (adev->active) {
-		WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE));
+		WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL));
 		typec_altmode_update_active(adev, false);
 	}
 
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 91d6227..7c44e93 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -834,6 +834,52 @@ static const struct device_type typec_cable_dev_type = {
 	.release = typec_cable_release,
 };
 
+static int cable_match(struct device *dev, void *data)
+{
+	return is_typec_cable(dev);
+}
+
+/**
+ * typec_cable_get - Get a reference to the USB Type-C cable
+ * @port: The USB Type-C Port the cable is connected to
+ *
+ * The caller must decrement the reference count with typec_cable_put() after
+ * use.
+ */
+struct typec_cable *typec_cable_get(struct typec_port *port)
+{
+	struct device *dev;
+
+	dev = device_find_child(&port->dev, NULL, cable_match);
+	if (!dev)
+		return NULL;
+
+	return to_typec_cable(dev);
+}
+EXPORT_SYMBOL_GPL(typec_cable_get);
+
+/**
+ * typec_cable_get - Decrement the reference count on USB Type-C cable
+ * @cable: The USB Type-C cable
+ */
+void typec_cable_put(struct typec_cable *cable)
+{
+	put_device(&cable->dev);
+}
+EXPORT_SYMBOL_GPL(typec_cable_put);
+
+/**
+ * typec_cable_is_active - Check is the USB Type-C cable active or passive
+ * @cable: The USB Type-C Cable
+ *
+ * Return 1 if the cable is active or 0 if it's passive.
+ */
+int typec_cable_is_active(struct typec_cable *cable)
+{
+	return cable->active;
+}
+EXPORT_SYMBOL_GPL(typec_cable_is_active);
+
 /**
  * typec_cable_set_identity - Report result from Discover Identity command
  * @cable: The cable updated identity values
@@ -1483,7 +1529,11 @@ EXPORT_SYMBOL_GPL(typec_get_orientation);
  */
 int typec_set_mode(struct typec_port *port, int mode)
 {
-	return port->mux ? port->mux->set(port->mux, mode) : 0;
+	struct typec_mux_state state = { };
+
+	state.mode = mode;
+
+	return port->mux ? port->mux->set(port->mux, &state) : 0;
 }
 EXPORT_SYMBOL_GPL(typec_set_mode);
 
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 57907f2..5baf0f4 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * USB Type-C Multiplexer/DeMultiplexer Switch support
  *
  * Copyright (C) 2018 Intel Corporation
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 5585b10..46457c1 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -73,7 +73,8 @@ static int pi3usb30532_sw_set(struct typec_switch *sw,
 	return ret;
 }
 
-static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
+static int
+pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
 {
 	struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
 	u8 new_conf;
@@ -82,7 +83,7 @@ static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
 	mutex_lock(&pi->lock);
 	new_conf = pi->conf;
 
-	switch (state) {
+	switch (state->mode) {
 	case TYPEC_STATE_SAFE:
 		new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
 			   PI3USB30532_CONF_OPEN;
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index ed8655c..b498960f 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1666,7 +1666,7 @@ static const struct property_entry port_props[] = {
 	PROPERTY_ENTRY_STRING("try-power-role", "sink"),
 	PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
 	PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
-	PROPERTY_ENTRY_U32("op-sink-microwatt", 2500),
+	PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
 	{ }
 };
 
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 8b4ff9ff..753645b 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -591,6 +591,12 @@ static int tcpci_probe(struct i2c_client *client,
 static int tcpci_remove(struct i2c_client *client)
 {
 	struct tcpci_chip *chip = i2c_get_clientdata(client);
+	int err;
+
+	/* Disable chip interrupts before unregistering port */
+	err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
+	if (err < 0)
+		return err;
 
 	tcpci_unregister_port(chip->tcpci);
 
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 56fc356..f3087ef 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1475,16 +1475,16 @@ static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
 	return 0;
 }
 
-static int tcpm_altmode_enter(struct typec_altmode *altmode)
+static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
 {
 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
 	u32 header;
 
 	mutex_lock(&port->lock);
-	header = VDO(altmode->svid, 1, CMD_ENTER_MODE);
+	header = VDO(altmode->svid, vdo ? 2 : 1, CMD_ENTER_MODE);
 	header |= VDO_OPOS(altmode->mode);
 
-	tcpm_queue_vdm(port, header, NULL, 0);
+	tcpm_queue_vdm(port, header, vdo, vdo ? 1 : 0);
 	mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
 	mutex_unlock(&port->lock);
 
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index edc271d..9b745f4 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -597,7 +597,7 @@ static const struct property_entry wcove_props[] = {
 	PROPERTY_ENTRY_STRING("try-power-role", "sink"),
 	PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
 	PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
-	PROPERTY_ENTRY_U32("op-sink-microwatt", 15000),
+	PROPERTY_ENTRY_U32("op-sink-microwatt", 15000000),
 	{ }
 };
 
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index d4d5189..0f1273ae 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -45,7 +45,7 @@ struct ucsi_dp {
  * -EOPNOTSUPP.
  */
 
-static int ucsi_displayport_enter(struct typec_altmode *alt)
+static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
 {
 	struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
 	struct ucsi *ucsi = dp->con->ucsi;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 4459bc6..d5a6aac 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -189,7 +189,7 @@ int ucsi_resume(struct ucsi *ucsi)
 	u64 command;
 
 	/* Restore UCSI notification enable mask after system resume */
-	command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+	command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
 
 	return ucsi_send_command(ucsi, command, NULL, 0);
 }
@@ -318,6 +318,82 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
 	return ret;
 }
 
+static int
+ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
+{
+	int max_altmodes = UCSI_MAX_ALTMODES;
+	struct typec_altmode_desc desc;
+	struct ucsi_altmode alt;
+	struct ucsi_altmode orig[UCSI_MAX_ALTMODES];
+	struct ucsi_altmode updated[UCSI_MAX_ALTMODES];
+	struct ucsi *ucsi = con->ucsi;
+	bool multi_dp = false;
+	u64 command;
+	int ret;
+	int len;
+	int i;
+	int k = 0;
+
+	if (recipient == UCSI_RECIPIENT_CON)
+		max_altmodes = con->ucsi->cap.num_alt_modes;
+
+	memset(orig, 0, sizeof(orig));
+	memset(updated, 0, sizeof(updated));
+
+	/* First get all the alternate modes */
+	for (i = 0; i < max_altmodes; i++) {
+		memset(&alt, 0, sizeof(alt));
+		command = UCSI_GET_ALTERNATE_MODES;
+		command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
+		command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
+		command |= UCSI_GET_ALTMODE_OFFSET(i);
+		len = ucsi_run_command(con->ucsi, command, &alt, sizeof(alt));
+		/*
+		 * We are collecting all altmodes first and then registering.
+		 * Some type-C device will return zero length data beyond last
+		 * alternate modes. We should not return if length is zero.
+		 */
+		if (len < 0)
+			return len;
+
+		/* We got all altmodes, now break out and register them */
+		if (!len || !alt.svid)
+			break;
+
+		orig[k].mid = alt.mid;
+		orig[k].svid = alt.svid;
+		k++;
+	}
+	/*
+	 * Update the original altmode table as some ppms may report
+	 * multiple DP altmodes.
+	 */
+	if (recipient == UCSI_RECIPIENT_CON)
+		multi_dp = ucsi->ops->update_altmodes(ucsi, orig, updated);
+
+	/* now register altmodes */
+	for (i = 0; i < max_altmodes; i++) {
+		memset(&desc, 0, sizeof(desc));
+		if (multi_dp && recipient == UCSI_RECIPIENT_CON) {
+			desc.svid = updated[i].svid;
+			desc.vdo = updated[i].mid;
+		} else {
+			desc.svid = orig[i].svid;
+			desc.vdo = orig[i].mid;
+		}
+		desc.roles = TYPEC_PORT_DRD;
+
+		if (!desc.svid)
+			return 0;
+
+		ret = ucsi_register_altmode(con, &desc, recipient);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
 {
 	int max_altmodes = UCSI_MAX_ALTMODES;
@@ -336,6 +412,9 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
 	if (recipient == UCSI_RECIPIENT_SOP && con->partner_altmode[0])
 		return 0;
 
+	if (con->ucsi->ops->update_altmodes)
+		return ucsi_register_altmodes_nvidia(con, recipient);
+
 	if (recipient == UCSI_RECIPIENT_CON)
 		max_altmodes = con->ucsi->cap.num_alt_modes;
 
@@ -589,6 +668,11 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
 {
 	struct ucsi_connector *con = &ucsi->connector[num - 1];
 
+	if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
+		dev_dbg(ucsi->dev, "Bogus connector change event\n");
+		return;
+	}
+
 	if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
 		schedule_work(&con->work);
 }
@@ -656,7 +740,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
 		ucsi_reset_ppm(con->ucsi);
 		mutex_unlock(&con->ucsi->ppm_lock);
 
-		c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+		c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
 		ucsi_send_command(con->ucsi, c, NULL, 0);
 
 		ucsi_reset_connector(con, true);
@@ -890,8 +974,8 @@ int ucsi_init(struct ucsi *ucsi)
 	}
 
 	/* Enable basic notifications */
-	command = UCSI_SET_NOTIFICATION_ENABLE;
-	command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+	ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+	command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
 	ret = ucsi_run_command(ucsi, command, NULL, 0);
 	if (ret < 0)
 		goto err_reset;
@@ -923,7 +1007,8 @@ int ucsi_init(struct ucsi *ucsi)
 	}
 
 	/* Enable all notifications */
-	command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+	ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
+	command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
 	ret = ucsi_run_command(ucsi, command, NULL, 0);
 	if (ret < 0)
 		goto err_unregister;
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 831c947..e434b9c 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -11,6 +11,7 @@
 /* -------------------------------------------------------------------------- */
 
 struct ucsi;
+struct ucsi_altmode;
 
 /* UCSI offsets (Bytes) */
 #define UCSI_VERSION			0
@@ -35,6 +36,7 @@ struct ucsi;
  * @read: Read operation
  * @sync_write: Blocking write operation
  * @async_write: Non-blocking write operation
+ * @update_altmodes: Squashes duplicate DP altmodes
  *
  * Read and write routines for UCSI interface. @sync_write must wait for the
  * Command Completion Event from the PPM before returning, and @async_write must
@@ -47,6 +49,8 @@ struct ucsi_operations {
 			  const void *val, size_t val_len);
 	int (*async_write)(struct ucsi *ucsi, unsigned int offset,
 			   const void *val, size_t val_len);
+	bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
+				struct ucsi_altmode *updated);
 };
 
 struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
@@ -82,6 +86,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
 #define UCSI_GET_ERROR_STATUS		0x13
 
 #define UCSI_CONNECTOR_NUMBER(_num_)		((u64)(_num_) << 16)
+#define UCSI_COMMAND(_cmd_)			((_cmd_) & 0xff)
 
 /* CONNECTOR_RESET command bits */
 #define UCSI_CONNECTOR_RESET_HARD		BIT(23) /* Deprecated in v1.1 */
@@ -140,6 +145,12 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
 #define UCSI_ERROR_PPM_POLICY_CONFLICT		BIT(11)
 #define UCSI_ERROR_SWAP_REJECTED		BIT(12)
 
+#define UCSI_SET_NEW_CAM_ENTER(x)		(((x) >> 23) & 0x1)
+#define UCSI_SET_NEW_CAM_GET_AM(x)		(((x) >> 24) & 0xff)
+#define UCSI_SET_NEW_CAM_AM_MASK		(0xff << 24)
+#define UCSI_SET_NEW_CAM_SET_AM(x)		(((x) & 0xff) << 24)
+#define UCSI_CMD_CONNECTOR_MASK			(0x7)
+
 /* Data structure filled by PPM in response to GET_CAPABILITY command. */
 struct ucsi_capability {
 	u32 attributes;
@@ -269,6 +280,9 @@ struct ucsi {
 	/* PPM Communication lock */
 	struct mutex ppm_lock;
 
+	/* The latest "Notification Enable" bits (SET_NOTIFICATION_ENABLE) */
+	u64 ntfy;
+
 	/* PPM communication flags */
 	unsigned long flags;
 #define EVENT_PENDING	0
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 3370b3f..a5b8530 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
+#include <linux/usb/typec_dp.h>
 
 #include <asm/unaligned.h>
 #include "ucsi.h"
@@ -173,6 +174,15 @@ struct ccg_resp {
 	u8 length;
 };
 
+struct ucsi_ccg_altmode {
+	u16 svid;
+	u32 mid;
+	u8 linked_idx;
+	u8 active_idx;
+#define UCSI_MULTI_DP_INDEX	(0xff)
+	bool checked;
+} __packed;
+
 struct ucsi_ccg {
 	struct device *dev;
 	struct ucsi *ucsi;
@@ -198,6 +208,11 @@ struct ucsi_ccg {
 	struct work_struct pm_work;
 
 	struct completion complete;
+
+	u64 last_cmd_sent;
+	bool has_multiple_dp;
+	struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
+	struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
 };
 
 static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -319,12 +334,169 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
 	return -ETIMEDOUT;
 }
 
+static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
+{
+	u8 cam, new_cam;
+
+	cam = data[0];
+	new_cam = uc->orig[cam].linked_idx;
+	uc->updated[new_cam].active_idx = cam;
+	data[0] = new_cam;
+}
+
+static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
+				     struct ucsi_altmode *orig,
+				     struct ucsi_altmode *updated)
+{
+	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+	struct ucsi_ccg_altmode *alt, *new_alt;
+	int i, j, k = 0;
+	bool found = false;
+
+	alt = uc->orig;
+	new_alt = uc->updated;
+	memset(uc->updated, 0, sizeof(uc->updated));
+
+	/*
+	 * Copy original connector altmodes to new structure.
+	 * We need this before second loop since second loop
+	 * checks for duplicate altmodes.
+	 */
+	for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
+		alt[i].svid = orig[i].svid;
+		alt[i].mid = orig[i].mid;
+		if (!alt[i].svid)
+			break;
+	}
+
+	for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
+		if (!alt[i].svid)
+			break;
+
+		/* already checked and considered */
+		if (alt[i].checked)
+			continue;
+
+		if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
+			/* Found Non DP altmode */
+			new_alt[k].svid = alt[i].svid;
+			new_alt[k].mid |= alt[i].mid;
+			new_alt[k].linked_idx = i;
+			alt[i].linked_idx = k;
+			updated[k].svid = new_alt[k].svid;
+			updated[k].mid = new_alt[k].mid;
+			k++;
+			continue;
+		}
+
+		for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
+			if (alt[i].svid != alt[j].svid ||
+			    !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
+				continue;
+			} else {
+				/* Found duplicate DP mode */
+				new_alt[k].svid = alt[i].svid;
+				new_alt[k].mid |= alt[i].mid | alt[j].mid;
+				new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
+				alt[i].linked_idx = k;
+				alt[j].linked_idx = k;
+				alt[j].checked = true;
+				found = true;
+			}
+		}
+		if (found) {
+			uc->has_multiple_dp = true;
+		} else {
+			/* Didn't find any duplicate DP altmode */
+			new_alt[k].svid = alt[i].svid;
+			new_alt[k].mid |= alt[i].mid;
+			new_alt[k].linked_idx = i;
+			alt[i].linked_idx = k;
+		}
+		updated[k].svid = new_alt[k].svid;
+		updated[k].mid = new_alt[k].mid;
+		k++;
+	}
+	return found;
+}
+
+static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
+					    struct ucsi_connector *con,
+					    u64 *cmd)
+{
+	struct ucsi_ccg_altmode *new_port, *port;
+	struct typec_altmode *alt = NULL;
+	u8 new_cam, cam, pin;
+	bool enter_new_mode;
+	int i, j, k = 0xff;
+
+	port = uc->orig;
+	new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
+	new_port = &uc->updated[new_cam];
+	cam = new_port->linked_idx;
+	enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
+
+	/*
+	 * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
+	 * with multiple DP mode. Find out CAM for best pin assignment
+	 * among all DP mode. Priorite pin E->D->C after making sure
+	 * the partner supports that pin.
+	 */
+	if (cam == UCSI_MULTI_DP_INDEX) {
+		if (enter_new_mode) {
+			for (i = 0; con->partner_altmode[i]; i++) {
+				alt = con->partner_altmode[i];
+				if (alt->svid == new_port->svid)
+					break;
+			}
+			/*
+			 * alt will always be non NULL since this is
+			 * UCSI_SET_NEW_CAM command and so there will be
+			 * at least one con->partner_altmode[i] with svid
+			 * matching with new_port->svid.
+			 */
+			for (j = 0; port[j].svid; j++) {
+				pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
+				if (alt && port[j].svid == alt->svid &&
+				    (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
+					/* prioritize pin E->D->C */
+					if (k == 0xff || (k != 0xff && pin >
+					    DP_CONF_GET_PIN_ASSIGN(port[k].mid))
+					    ) {
+						k = j;
+					}
+				}
+			}
+			cam = k;
+			new_port->active_idx = cam;
+		} else {
+			cam = new_port->active_idx;
+		}
+	}
+	*cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
+	*cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
+}
+
 static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
 			 void *val, size_t val_len)
 {
+	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+	int ret;
 	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
 
-	return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len);
+	ret = ccg_read(uc, reg, val, val_len);
+	if (ret)
+		return ret;
+
+	if (offset == UCSI_MESSAGE_IN) {
+		if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_GET_CURRENT_CAM &&
+		    uc->has_multiple_dp) {
+			ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
+		}
+		uc->last_cmd_sent = 0;
+	}
+
+	return ret;
 }
 
 static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
@@ -339,12 +511,26 @@ static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
 			       const void *val, size_t val_len)
 {
 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+	struct ucsi_connector *con;
+	int con_index;
 	int ret;
 
 	mutex_lock(&uc->lock);
 	pm_runtime_get_sync(uc->dev);
 	set_bit(DEV_CMD_PENDING, &uc->flags);
 
+	if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) {
+		uc->last_cmd_sent = *(u64 *)val;
+
+		if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
+		    uc->has_multiple_dp) {
+			con_index = (uc->last_cmd_sent >> 16) &
+				    UCSI_CMD_CONNECTOR_MASK;
+			con = &uc->ucsi->connector[con_index - 1];
+			ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val);
+		}
+	}
+
 	ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
 	if (ret)
 		goto err_clear_bit;
@@ -363,7 +549,8 @@ static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
 static const struct ucsi_operations ucsi_ccg_ops = {
 	.read = ucsi_ccg_read,
 	.sync_write = ucsi_ccg_sync_write,
-	.async_write = ucsi_ccg_async_write
+	.async_write = ucsi_ccg_async_write,
+	.update_altmodes = ucsi_ccg_update_altmodes
 };
 
 static irqreturn_t ccg_irq_handler(int irq, void *data)
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 7570c76..8ad14e5 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -74,7 +74,7 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev,
 	return count;
 }
 
-MDEV_TYPE_ATTR_WO(create);
+static MDEV_TYPE_ATTR_WO(create);
 
 static void mdev_type_release(struct kobject *kobj)
 {
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index f2983f0..df4d960 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -97,8 +97,10 @@ static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
 
 	/* If there were any mappings at all... */
 	if (data->mm) {
-		ret = mm_iommu_put(data->mm, data->mem);
-		WARN_ON(ret);
+		if (data->mem) {
+			ret = mm_iommu_put(data->mm, data->mem);
+			WARN_ON(ret);
+		}
 
 		mmdrop(data->mm);
 	}
@@ -159,7 +161,7 @@ static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
 	data->useraddr = vma->vm_start;
 	data->mm = current->mm;
 
-	atomic_inc(&data->mm->mm_count);
+	mmgrab(data->mm);
 	ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
 			vma_pages(vma), data->gpu_hpa, &data->mem);
 
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index 40d4fb9..abdca90 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -24,7 +24,7 @@
 #define MDIO_AN_INT		0x8002
 #define MDIO_AN_INTMASK		0x8001
 
-static unsigned int xmdio_read(void *ioaddr, unsigned int mmd,
+static unsigned int xmdio_read(void __iomem *ioaddr, unsigned int mmd,
 			       unsigned int reg)
 {
 	unsigned int mmd_address, value;
@@ -35,7 +35,7 @@ static unsigned int xmdio_read(void *ioaddr, unsigned int mmd,
 	return value;
 }
 
-static void xmdio_write(void *ioaddr, unsigned int mmd,
+static void xmdio_write(void __iomem *ioaddr, unsigned int mmd,
 			unsigned int reg, unsigned int value)
 {
 	unsigned int mmd_address;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 26cef65..16b3adc 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -79,7 +79,7 @@ static long tce_iommu_mm_set(struct tce_container *container)
 	}
 	BUG_ON(!current->mm);
 	container->mm = current->mm;
-	atomic_inc(&container->mm->mm_count);
+	mmgrab(container->mm);
 
 	return 0;
 }
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ada8e6..a177bf2 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -309,9 +309,8 @@ static int put_pfn(unsigned long pfn, int prot)
 {
 	if (!is_invalid_reserved_pfn(pfn)) {
 		struct page *page = pfn_to_page(pfn);
-		if (prot & IOMMU_WRITE)
-			SetPageDirty(page);
-		put_page(page);
+
+		unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE);
 		return 1;
 	}
 	return 0;
@@ -322,7 +321,6 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
 {
 	struct page *page[1];
 	struct vm_area_struct *vma;
-	struct vm_area_struct *vmas[1];
 	unsigned int flags = 0;
 	int ret;
 
@@ -330,33 +328,14 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
 		flags |= FOLL_WRITE;
 
 	down_read(&mm->mmap_sem);
-	if (mm == current->mm) {
-		ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page,
-				     vmas);
-	} else {
-		ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
-					    vmas, NULL);
-		/*
-		 * The lifetime of a vaddr_get_pfn() page pin is
-		 * userspace-controlled. In the fs-dax case this could
-		 * lead to indefinite stalls in filesystem operations.
-		 * Disallow attempts to pin fs-dax pages via this
-		 * interface.
-		 */
-		if (ret > 0 && vma_is_fsdax(vmas[0])) {
-			ret = -EOPNOTSUPP;
-			put_page(page[0]);
-		}
-	}
-	up_read(&mm->mmap_sem);
-
+	ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
+				    page, NULL, NULL);
 	if (ret == 1) {
 		*pfn = page_to_pfn(page[0]);
-		return 0;
+		ret = 0;
+		goto done;
 	}
 
-	down_read(&mm->mmap_sem);
-
 	vaddr = untagged_addr(vaddr);
 
 	vma = find_vma_intersection(mm, vaddr, vaddr + 1);
@@ -366,7 +345,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
 		if (is_invalid_reserved_pfn(*pfn))
 			ret = 0;
 	}
-
+done:
 	up_read(&mm->mmap_sem);
 	return ret;
 }
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 94ccb90..8a4361e 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -11,7 +11,6 @@
 #include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
 #include <linux/lcd.h>
 #include <linux/module.h>
 #include <linux/spi/spi.h>
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index d344fb0..d5d5fb4 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -11,7 +11,7 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/platform_data/bd6107.h>
@@ -71,6 +71,7 @@ struct bd6107 {
 	struct i2c_client *client;
 	struct backlight_device *backlight;
 	struct bd6107_platform_data *pdata;
+	struct gpio_desc *reset;
 };
 
 static int bd6107_write(struct bd6107 *bd, u8 reg, u8 data)
@@ -94,9 +95,10 @@ static int bd6107_backlight_update_status(struct backlight_device *backlight)
 		bd6107_write(bd, BD6107_MAINCNT1, brightness);
 		bd6107_write(bd, BD6107_LEDCNT1, BD6107_LEDCNT1_LEDONOFF1);
 	} else {
-		gpio_set_value(bd->pdata->reset, 0);
+		/* Assert the reset line (gpiolib will handle active low) */
+		gpiod_set_value(bd->reset, 1);
 		msleep(24);
-		gpio_set_value(bd->pdata->reset, 1);
+		gpiod_set_value(bd->reset, 0);
 	}
 
 	return 0;
@@ -125,8 +127,8 @@ static int bd6107_probe(struct i2c_client *client,
 	struct bd6107 *bd;
 	int ret;
 
-	if (pdata == NULL || !pdata->reset) {
-		dev_err(&client->dev, "No reset GPIO in platform data\n");
+	if (pdata == NULL) {
+		dev_err(&client->dev, "No platform data\n");
 		return -EINVAL;
 	}
 
@@ -144,10 +146,16 @@ static int bd6107_probe(struct i2c_client *client,
 	bd->client = client;
 	bd->pdata = pdata;
 
-	ret = devm_gpio_request_one(&client->dev, pdata->reset,
-				    GPIOF_DIR_OUT | GPIOF_INIT_LOW, "reset");
-	if (ret < 0) {
+	/*
+	 * Request the reset GPIO line with GPIOD_OUT_HIGH meaning asserted,
+	 * so in the machine descriptor table (or other hardware description),
+	 * the line should be flagged as active low so this will assert
+	 * the reset.
+	 */
+	bd->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(bd->reset)) {
 		dev_err(&client->dev, "unable to request reset GPIO\n");
+		ret = PTR_ERR(bd->reset);
 		return ret;
 	}
 
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index d46052d..3d276b3 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -956,8 +956,8 @@ static int wled_configure(struct wled *wled, int version)
 	struct wled_config *cfg = &wled->cfg;
 	struct device *dev = wled->dev;
 	const __be32 *prop_addr;
-	u32 size, val, c, string_len;
-	int rc, i, j;
+	u32 size, val, c;
+	int rc, i, j, string_len;
 
 	const struct wled_u32_opts *u32_opts = NULL;
 	const struct wled_u32_opts wled3_opts[] = {
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index c10e17f..70c10ea 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -93,7 +93,6 @@
 
 config DUMMY_CONSOLE
 	bool
-	depends on VGA_CONSOLE!=y || SGI_NEWPORT_CONSOLE!=y 
 	default y
 
 config DUMMY_CONSOLE_COLUMNS
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index d48e960..9811f1ba 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -96,7 +96,7 @@ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var,
 			   struct fb_info *info);
 static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma);
 
-static struct fb_ops mc68x328fb_ops = {
+static const struct fb_ops mc68x328fb_ops = {
 	.fb_check_var	= mc68x328fb_check_var,
 	.fb_set_par	= mc68x328fb_set_par,
 	.fb_setcolreg	= mc68x328fb_setcolreg,
@@ -405,20 +405,8 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 
 int __init mc68x328fb_setup(char *options)
 {
-#if 0
-	char *this_opt;
-#endif
-
 	if (!options || !*options)
 		return 1;
-#if 0
-	while ((this_opt = strsep(&options, ",")) != NULL) {
-		if (!*this_opt)
-			continue;
-		if (!strncmp(this_opt, "disable", 7))
-			mc68x328fb_enable = 0;
-	}
-#endif
 	return 1;
 }
 
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index aa9541b..f65991a 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2215,6 +2215,7 @@
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select FB_DEFERRED_IO
+	select DMA_CMA if HAVE_DMA_CONTIGUOUS && CMA
 	help
 	  This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
 
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 7cacae5..a3af495 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -604,7 +604,7 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops acornfb_ops = {
+static const struct fb_ops acornfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= acornfb_check_var,
 	.fb_set_par	= acornfb_set_par,
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 7de43be..c3d55fc6 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -423,7 +423,7 @@ static int clcdfb_mmap(struct fb_info *info,
 	return ret;
 }
 
-static struct fb_ops clcdfb_ops = {
+static const struct fb_ops clcdfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= clcdfb_check_var,
 	.fb_set_par	= clcdfb_set_par,
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 91ddc96..20e03e0 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -3493,7 +3493,7 @@ static irqreturn_t amifb_interrupt(int irq, void *dev_id)
 }
 
 
-static struct fb_ops amifb_ops = {
+static const struct fb_ops amifb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= amifb_check_var,
 	.fb_set_par	= amifb_set_par,
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index a48741a..314ab82 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -491,7 +491,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
 	return err;
 }
 
-static struct fb_ops arcfb_ops = {
+static const struct fb_ops arcfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= arcfb_open,
 	.fb_read        = fb_sys_read,
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index f940e8b..11ab9a1 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -917,7 +917,7 @@ static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info
 
 /* Frame buffer operations */
 
-static struct fb_ops arkfb_ops = {
+static const struct fb_ops arkfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= arkfb_open,
 	.fb_release	= arkfb_release,
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index ea31054..3e006da 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -95,7 +95,7 @@ static int asiliantfb_set_par(struct fb_info *info);
 static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
 				u_int transp, struct fb_info *info);
 
-static struct fb_ops asiliantfb_ops = {
+static const struct fb_ops asiliantfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= asiliantfb_check_var,
 	.fb_set_par	= asiliantfb_set_par,
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 5ff8e03..d567f5d 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -824,7 +824,7 @@ static int atmel_lcdfb_blank(int blank_mode, struct fb_info *info)
 	return ((blank_mode == FB_BLANK_NORMAL) ? 1 : 0);
 }
 
-static struct fb_ops atmel_lcdfb_ops = {
+static const struct fb_ops atmel_lcdfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= atmel_lcdfb_check_var,
 	.fb_set_par	= atmel_lcdfb_set_par,
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index fc1e45d..d7e41c8 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -509,7 +509,7 @@ static void aty128_bl_set_power(struct fb_info *info, int power);
 			  (readb(bios + (v) + 3) << 24))
 
 
-static struct fb_ops aty128fb_ops = {
+static const struct fb_ops aty128fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= aty128fb_check_var,
 	.fb_set_par	= aty128fb_set_par,
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index e5a347c..a7833bc 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -341,7 +341,7 @@ extern const u8 aty_postdividers[8];
      *  Hardware cursor support
      */
 
-extern int aty_init_cursor(struct fb_info *info);
+extern int aty_init_cursor(struct fb_info *info, struct fb_ops *atyfb_ops);
 
     /*
      *  Hardware acceleration
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 79d5487..175d259 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -1326,10 +1326,10 @@ static int atyfb_set_par(struct fb_info *info)
 	par->accel_flags = var->accel_flags; /* hack */
 
 	if (var->accel_flags) {
-		info->fbops->fb_sync = atyfb_sync;
+		atyfb_ops.fb_sync = atyfb_sync;
 		info->flags &= ~FBINFO_HWACCEL_DISABLED;
 	} else {
-		info->fbops->fb_sync = NULL;
+		atyfb_ops.fb_sync = NULL;
 		info->flags |= FBINFO_HWACCEL_DISABLED;
 	}
 
@@ -2712,7 +2712,7 @@ static int aty_init(struct fb_info *info)
 
 #ifdef CONFIG_FB_ATY_CT
 	if (!noaccel && M64_HAS(INTEGRATED))
-		aty_init_cursor(info);
+		aty_init_cursor(info, &atyfb_ops);
 #endif /* CONFIG_FB_ATY_CT */
 	info->var = var;
 
diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
index 4cde25e..b06fa6e 100644
--- a/drivers/video/fbdev/aty/mach64_cursor.c
+++ b/drivers/video/fbdev/aty/mach64_cursor.c
@@ -194,7 +194,7 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
 	return 0;
 }
 
-int aty_init_cursor(struct fb_info *info)
+int aty_init_cursor(struct fb_info *info, struct fb_ops *atyfb_ops)
 {
 	unsigned long addr;
 
@@ -219,7 +219,7 @@ int aty_init_cursor(struct fb_info *info)
 	info->sprite.buf_align = 16; 	/* and 64 lines tall. */
 	info->sprite.flags = FB_PIXMAP_IO;
 
-	info->fbops->fb_cursor = atyfb_cursor;
+	atyfb_ops->fb_cursor = atyfb_cursor;
 
 	return 0;
 }
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 4ca0786..3af00e3 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1965,7 +1965,7 @@ static int radeonfb_set_par(struct fb_info *info)
 }
 
 
-static struct fb_ops radeonfb_ops = {
+static const struct fb_ops radeonfb_ops = {
 	.owner			= THIS_MODULE,
 	.fb_check_var		= radeonfb_check_var,
 	.fb_set_par		= radeonfb_set_par,
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 99941ae..37a6512 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -348,7 +348,7 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
 			fbdev->fb_len);
 }
 
-static struct fb_ops au1100fb_ops =
+static const struct fb_ops au1100fb_ops =
 {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= au1100fb_fb_setcolreg,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 265d3b4..c00e01a 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1483,7 +1483,7 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
 }
 
 
-static struct fb_ops au1200fb_fb_ops = {
+static const struct fb_ops au1200fb_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= au1200fb_fb_check_var,
 	.fb_set_par	= au1200fb_fb_set_par,
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index d6ba348..fd66f4d 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -1048,7 +1048,7 @@ static ssize_t broadsheetfb_write(struct fb_info *info, const char __user *buf,
 	return (err) ? err : count;
 }
 
-static struct fb_ops broadsheetfb_ops = {
+static const struct fb_ops broadsheetfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_read        = fb_sys_read,
 	.fb_write	= broadsheetfb_write,
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index 436f10f..0d9a6bb 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -37,7 +37,7 @@ static int bw2_ioctl(struct fb_info *, unsigned int, unsigned long);
  *  Frame buffer operations
  */
 
-static struct fb_ops bw2_ops = {
+static const struct fb_ops bw2_ops = {
 	.owner			= THIS_MODULE,
 	.fb_blank		= bw2_blank,
 	.fb_fillrect		= cfb_fillrect,
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index 27ba2ed..3a1c2e0 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -527,7 +527,7 @@ static int init_hardware(struct carmine_hw *hw)
 	return 0;
 }
 
-static struct fb_ops carminefb_ops = {
+static const struct fb_ops carminefb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_fillrect	= cfb_fillrect,
 	.fb_copyarea	= cfb_copyarea,
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index d80d99db..a620b51 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -39,7 +39,7 @@ static int cg14_pan_display(struct fb_var_screeninfo *, struct fb_info *);
  *  Frame buffer operations
  */
 
-static struct fb_ops cg14_ops = {
+static const struct fb_ops cg14_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= cg14_setcolreg,
 	.fb_pan_display		= cg14_pan_display,
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 09f616d..77f6470 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -39,7 +39,7 @@ static int cg3_ioctl(struct fb_info *, unsigned int, unsigned long);
  *  Frame buffer operations
  */
 
-static struct fb_ops cg3_ops = {
+static const struct fb_ops cg3_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= cg3_setcolreg,
 	.fb_blank		= cg3_blank,
diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c
index d5888ae..a1c68cd 100644
--- a/drivers/video/fbdev/cg6.c
+++ b/drivers/video/fbdev/cg6.c
@@ -44,7 +44,7 @@ static int cg6_pan_display(struct fb_var_screeninfo *, struct fb_info *);
  *  Frame buffer operations
  */
 
-static struct fb_ops cg6_ops = {
+static const struct fb_ops cg6_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= cg6_setcolreg,
 	.fb_blank		= cg6_blank,
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index f4dc320..998067b 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -79,7 +79,7 @@ static int chipsfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
 			     u_int transp, struct fb_info *info);
 static int chipsfb_blank(int blank, struct fb_info *info);
 
-static struct fb_ops chipsfb_ops = {
+static const struct fb_ops chipsfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= chipsfb_check_var,
 	.fb_set_par	= chipsfb_set_par,
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index e4ce566..c3a3e34 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1956,7 +1956,7 @@ static void cirrusfb_zorro_unmap(struct fb_info *info)
 #endif /* CONFIG_ZORRO */
 
 /* function table of the above functions */
-static struct fb_ops cirrusfb_ops = {
+static const struct fb_ops cirrusfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= cirrusfb_open,
 	.fb_release	= cirrusfb_release,
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index cabbc72..c5d15c6 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -153,7 +153,7 @@ static int clps711x_fb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops clps711x_fb_ops = {
+static const struct fb_ops clps711x_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= clps711x_fb_setcolreg,
 	.fb_check_var	= clps711x_fb_check_var,
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 5449469..5f8b6324 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -269,7 +269,7 @@ static int cobalt_lcdfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
 	return 0;
 }
 
-static struct fb_ops cobalt_lcd_fbops = {
+static const struct fb_ops cobalt_lcd_fbops = {
 	.owner		= THIS_MODULE,
 	.fb_read	= cobalt_lcdfb_read,
 	.fb_write	= cobalt_lcdfb_write,
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 9a680ef..38b61cd 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -157,7 +157,7 @@ static int default_vmode __initdata = VMODE_NVRAM;
 static int default_cmode __initdata = CMODE_NVRAM;
 
 
-static struct fb_ops controlfb_ops = {
+static const struct fb_ops controlfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= controlfb_check_var,
 	.fb_set_par	= controlfb_set_par,
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 82c20c6..a591d291 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -171,7 +171,6 @@ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	vma->vm_private_data = info;
 	return 0;
 }
-EXPORT_SYMBOL(fb_deferred_io_mmap);
 
 /* workqueue callback */
 static void fb_deferred_io_work(struct work_struct *work)
@@ -206,7 +205,6 @@ void fb_deferred_io_init(struct fb_info *info)
 
 	BUG_ON(!fbdefio);
 	mutex_init(&fbdefio->lock);
-	info->fbops->fb_mmap = fb_deferred_io_mmap;
 	INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
 	INIT_LIST_HEAD(&fbdefio->pagelist);
 	if (fbdefio->delay == 0) /* set a default of 1 s */
@@ -237,7 +235,6 @@ void fb_deferred_io_cleanup(struct fb_info *info)
 		page->mapping = NULL;
 	}
 
-	info->fbops->fb_mmap = NULL;
 	mutex_destroy(&fbdefio->lock);
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index c9235a2f..bb6ae99 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -536,6 +536,13 @@ static int __init fb_console_setup(char *this_opt)
 				fb_center_logo = true;
 			continue;
 		}
+
+		if (!strncmp(options, "logo-count:", 11)) {
+			options += 11;
+			if (*options)
+				fb_logo_count = simple_strtol(options, &options, 0);
+			continue;
+		}
 	}
 	return 1;
 }
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 6f6fc78..d045549 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -54,7 +54,8 @@ int num_registered_fb __read_mostly;
 EXPORT_SYMBOL(num_registered_fb);
 
 bool fb_center_logo __read_mostly;
-EXPORT_SYMBOL(fb_center_logo);
+
+int fb_logo_count __read_mostly = -1;
 
 static struct fb_info *get_fb_info(unsigned int idx)
 {
@@ -620,7 +621,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
 	memset(&fb_logo, 0, sizeof(struct logo_data));
 
 	if (info->flags & FBINFO_MISC_TILEBLITTING ||
-	    info->fbops->owner)
+	    info->fbops->owner || !fb_logo_count)
 		return 0;
 
 	if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -686,10 +687,14 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
 
 int fb_show_logo(struct fb_info *info, int rotate)
 {
+	unsigned int count;
 	int y;
 
-	y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
-			      num_online_cpus());
+	if (!fb_logo_count)
+		return 0;
+
+	count = fb_logo_count < 0 ? num_online_cpus() : fb_logo_count;
+	y = fb_show_logo_line(info, rotate, fb_logo.logo, 0, count);
 	y = fb_show_extra_logos(info, y, rotate);
 
 	return y;
@@ -1079,7 +1084,7 @@ EXPORT_SYMBOL(fb_blank);
 static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
 			unsigned long arg)
 {
-	struct fb_ops *fb;
+	const struct fb_ops *fb;
 	struct fb_var_screeninfo var;
 	struct fb_fix_screeninfo fix;
 	struct fb_cmap cmap_from;
@@ -1292,7 +1297,7 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
 			    unsigned long arg)
 {
 	struct fb_info *info = file_fb_info(file);
-	struct fb_ops *fb;
+	const struct fb_ops *fb;
 	long ret = -ENOIOCTLCMD;
 
 	if (!info)
@@ -1332,16 +1337,23 @@ static int
 fb_mmap(struct file *file, struct vm_area_struct * vma)
 {
 	struct fb_info *info = file_fb_info(file);
-	struct fb_ops *fb;
+	int (*fb_mmap_fn)(struct fb_info *info, struct vm_area_struct *vma);
 	unsigned long mmio_pgoff;
 	unsigned long start;
 	u32 len;
 
 	if (!info)
 		return -ENODEV;
-	fb = info->fbops;
 	mutex_lock(&info->mm_lock);
-	if (fb->fb_mmap) {
+
+	fb_mmap_fn = info->fbops->fb_mmap;
+
+#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
+	if (info->fbdefio)
+		fb_mmap_fn = fb_deferred_io_mmap;
+#endif
+
+	if (fb_mmap_fn) {
 		int res;
 
 		/*
@@ -1349,7 +1361,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
 		 * SME protection is removed ahead of the call
 		 */
 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-		res = fb->fb_mmap(info, vma);
+		res = fb_mmap_fn(info, vma);
 		mutex_unlock(&info->mm_lock);
 		return res;
 	}
@@ -1673,7 +1685,7 @@ static void unbind_console(struct fb_info *fb_info)
 	console_unlock();
 }
 
-void unlink_framebuffer(struct fb_info *fb_info)
+static void unlink_framebuffer(struct fb_info *fb_info)
 {
 	int i;
 
@@ -1692,7 +1704,6 @@ void unlink_framebuffer(struct fb_info *fb_info)
 
 	fb_info->dev = NULL;
 }
-EXPORT_SYMBOL(unlink_framebuffer);
 
 static void do_unregister_framebuffer(struct fb_info *fb_info)
 {
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 3a2d9ff..460826a 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1060,7 +1060,7 @@ static int cyber2000fb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops cyber2000fb_ops = {
+static const struct fb_ops cyber2000fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= cyber2000fb_check_var,
 	.fb_set_par	= cyber2000fb_set_par,
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 2d3dcc5..73c3c4c 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1294,7 +1294,7 @@ static int da8xxfb_set_par(struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops da8xx_fb_ops = {
+static const struct fb_ops da8xx_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = fb_check_var,
 	.fb_set_par = da8xxfb_set_par,
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 8da517e..3688f91 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -108,7 +108,7 @@
 static int dnfb_blank(int blank, struct fb_info *info);
 static void dnfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
 
-static struct fb_ops dn_fb_ops = {
+static const struct fb_ops dn_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_blank	= dnfb_blank,
 	.fb_fillrect	= cfb_fillrect,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 1caa372..65491ae 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -255,7 +255,7 @@ static void efifb_destroy(struct fb_info *info)
 	fb_dealloc_cmap(&info->cmap);
 }
 
-static struct fb_ops efifb_ops = {
+static const struct fb_ops efifb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_destroy	= efifb_destroy,
 	.fb_setcolreg	= efifb_setcolreg,
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index d04a047..cda2ef3 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -402,7 +402,7 @@ static int ep93xxfb_setcolreg(unsigned int regno, unsigned int red,
 	return 0;
 }
 
-static struct fb_ops ep93xxfb_ops = {
+static const struct fb_ops ep93xxfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= ep93xxfb_check_var,
 	.fb_set_par	= ep93xxfb_set_par,
diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c
index fa62c4d..75df6aa 100644
--- a/drivers/video/fbdev/fb-puv3.c
+++ b/drivers/video/fbdev/fb-puv3.c
@@ -644,7 +644,7 @@ int unifb_mmap(struct fb_info *info,
 	return vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
 }
 
-static struct fb_ops unifb_ops = {
+static const struct fb_ops unifb_ops = {
 	.fb_read        = fb_sys_read,
 	.fb_write       = fb_sys_write,
 	.fb_check_var	= unifb_check_var,
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index cd2d1db..948b7318 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -44,7 +44,7 @@ static int ffb_pan_display(struct fb_var_screeninfo *, struct fb_info *);
  *  Frame buffer operations
  */
 
-static struct fb_ops ffb_ops = {
+static const struct fb_ops ffb_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= ffb_setcolreg,
 	.fb_blank		= ffb_blank,
diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c
index ac7a4eb..3b727d5 100644
--- a/drivers/video/fbdev/fm2fb.c
+++ b/drivers/video/fbdev/fm2fb.c
@@ -165,7 +165,7 @@ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                            u_int transp, struct fb_info *info);
 static int fm2fb_blank(int blank, struct fb_info *info);
 
-static struct fb_ops fm2fb_ops = {
+static const struct fb_ops fm2fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= fm2fb_setcolreg,
 	.fb_blank	= fm2fb_blank,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index d19f582..67ebfe5 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1287,6 +1287,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
 		dev_warn(info->dev,
 			 "MFB_SET_PIXFMT value of 0x%08x is deprecated.\n",
 			 MFB_SET_PIXFMT_OLD);
+		/* fall through */
 	case MFB_SET_PIXFMT:
 		if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
 			return -EFAULT;
@@ -1296,6 +1297,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
 		dev_warn(info->dev,
 			 "MFB_GET_PIXFMT value of 0x%08x is deprecated.\n",
 			 MFB_GET_PIXFMT_OLD);
+		/* fall through */
 	case MFB_GET_PIXFMT:
 		pix_fmt = ad->pix_fmt;
 		if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt)))
@@ -1448,7 +1450,7 @@ static int fsl_diu_release(struct fb_info *info, int user)
 	return res;
 }
 
-static struct fb_ops fsl_diu_ops = {
+static const struct fb_ops fsl_diu_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = fsl_diu_check_var,
 	.fb_set_par = fsl_diu_set_par,
diff --git a/drivers/video/fbdev/g364fb.c b/drivers/video/fbdev/g364fb.c
index 223896c..845b79d 100644
--- a/drivers/video/fbdev/g364fb.c
+++ b/drivers/video/fbdev/g364fb.c
@@ -111,7 +111,7 @@ static int g364fb_setcolreg(u_int regno, u_int red, u_int green,
 static int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor);
 static int g364fb_blank(int blank, struct fb_info *info);
 
-static struct fb_ops g364fb_ops = {
+static const struct fb_ops g364fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= g364fb_setcolreg,
 	.fb_pan_display	= g364fb_pan_display,
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index b9f6a82..31270a8 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1044,7 +1044,7 @@ static int gbefb_mmap(struct fb_info *info,
 	return 0;
 }
 
-static struct fb_ops gbefb_ops = {
+static const struct fb_ops gbefb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= gbefb_check_var,
 	.fb_set_par	= gbefb_set_par,
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 737e472..5d34d89 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -252,7 +252,7 @@ static int parse_panel_option(struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops gx1fb_ops = {
+static const struct fb_ops gx1fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= gx1fb_check_var,
 	.fb_set_par	= gx1fb_set_par,
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index 435ce2a..d38a148 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -265,7 +265,7 @@ static int gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
 	return 0;
 }
 
-static struct fb_ops gxfb_ops = {
+static const struct fb_ops gxfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= gxfb_check_var,
 	.fb_set_par	= gxfb_set_par,
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index b0f07d6..adc2d9c 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -386,7 +386,7 @@ static int lxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
 	return 0;
 }
 
-static struct fb_ops lxfb_ops = {
+static const struct fb_ops lxfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= lxfb_check_var,
 	.fb_set_par	= lxfb_set_par,
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index f60ac27..9c83ec3 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -160,7 +160,7 @@ static int goldfish_fb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops goldfish_fb_ops = {
+static const struct fb_ops goldfish_fb_ops = {
 	.owner          = THIS_MODULE,
 	.fb_check_var   = goldfish_fb_check_var,
 	.fb_set_par     = goldfish_fb_set_par,
diff --git a/drivers/video/fbdev/grvga.c b/drivers/video/fbdev/grvga.c
index d22e8b0..07dda03 100644
--- a/drivers/video/fbdev/grvga.c
+++ b/drivers/video/fbdev/grvga.c
@@ -251,7 +251,7 @@ static int grvga_pan_display(struct fb_var_screeninfo *var,
 	return 0;
 }
 
-static struct fb_ops grvga_ops = {
+static const struct fb_ops grvga_ops = {
 	.owner          = THIS_MODULE,
 	.fb_check_var   = grvga_check_var,
 	.fb_set_par	= grvga_set_par,
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index c7502fd..13ded3a 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -599,7 +599,7 @@ static const struct fb_fix_screeninfo gxt4500_fix = {
 	.mmio_len = 0x20000,
 };
 
-static struct fb_ops gxt4500_ops = {
+static const struct fb_ops gxt4500_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = gxt4500_check_var,
 	.fb_set_par = gxt4500_set_par,
diff --git a/drivers/video/fbdev/hecubafb.c b/drivers/video/fbdev/hecubafb.c
index 8577195..00d7710 100644
--- a/drivers/video/fbdev/hecubafb.c
+++ b/drivers/video/fbdev/hecubafb.c
@@ -197,7 +197,7 @@ static ssize_t hecubafb_write(struct fb_info *info, const char __user *buf,
 	return (err) ? err : count;
 }
 
-static struct fb_ops hecubafb_ops = {
+static const struct fb_ops hecubafb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_read        = fb_sys_read,
 	.fb_write	= hecubafb_write,
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 59e1cae..a45fcff 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -523,7 +523,7 @@ static void hgafb_imageblit(struct fb_info *info, const struct fb_image *image)
 	}
 }
 
-static struct fb_ops hgafb_ops = {
+static const struct fb_ops hgafb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= hgafb_open,
 	.fb_release	= hgafb_release,
diff --git a/drivers/video/fbdev/hitfb.c b/drivers/video/fbdev/hitfb.c
index abe3e54..009e5d2 100644
--- a/drivers/video/fbdev/hitfb.c
+++ b/drivers/video/fbdev/hitfb.c
@@ -311,7 +311,7 @@ static int hitfb_set_par(struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops hitfb_ops = {
+static const struct fb_ops hitfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= hitfb_check_var,
 	.fb_set_par		= hitfb_set_par,
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index a79af8f..f02be0d 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -184,7 +184,7 @@ static int hpfb_sync(struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops hpfb_ops = {
+static const struct fb_ops hpfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= hpfb_setcolreg,
 	.fb_blank	= hpfb_blank,
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 4cd27e5..f47d50e 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -31,6 +31,16 @@
  * "set-vmvideo" command. For example
  *     set-vmvideo -vmname name -horizontalresolution:1920 \
  * -verticalresolution:1200 -resolutiontype single
+ *
+ * Gen 1 VMs also support direct using VM's physical memory for framebuffer.
+ * It could improve the efficiency and performance for framebuffer and VM.
+ * This requires to allocate contiguous physical memory from Linux kernel's
+ * CMA memory allocator. To enable this, supply a kernel parameter to give
+ * enough memory space to CMA allocator for framebuffer. For example:
+ *    cma=130m
+ * This gives 130MB memory to CMA allocator that can be allocated to
+ * framebuffer. For reference, 8K resolution (7680x4320) takes about
+ * 127MB memory.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -228,7 +238,6 @@ struct synthvid_msg {
 } __packed;
 
 
-
 /* FB driver definitions and structures */
 #define HVFB_WIDTH 1152 /* default screen width */
 #define HVFB_HEIGHT 864 /* default screen height */
@@ -258,12 +267,15 @@ struct hvfb_par {
 	/* If true, the VSC notifies the VSP on every framebuffer change */
 	bool synchronous_fb;
 
+	/* If true, need to copy from deferred IO mem to framebuffer mem */
+	bool need_docopy;
+
 	struct notifier_block hvfb_panic_nb;
 
 	/* Memory for deferred IO and frame buffer itself */
 	unsigned char *dio_vp;
 	unsigned char *mmio_vp;
-	unsigned long mmio_pp;
+	phys_addr_t mmio_pp;
 
 	/* Dirty rectangle, protected by delayed_refresh_lock */
 	int x1, y1, x2, y2;
@@ -434,7 +446,7 @@ static void synthvid_deferred_io(struct fb_info *p,
 		maxy = max_t(int, maxy, y2);
 
 		/* Copy from dio space to mmio address */
-		if (par->fb_ready)
+		if (par->fb_ready && par->need_docopy)
 			hvfb_docopy(par, start, PAGE_SIZE);
 	}
 
@@ -751,12 +763,12 @@ static void hvfb_update_work(struct work_struct *w)
 		return;
 
 	/* Copy the dirty rectangle to frame buffer memory */
-	for (j = y1; j < y2; j++) {
-		hvfb_docopy(par,
-			    j * info->fix.line_length +
-			    (x1 * screen_depth / 8),
-			    (x2 - x1) * screen_depth / 8);
-	}
+	if (par->need_docopy)
+		for (j = y1; j < y2; j++)
+			hvfb_docopy(par,
+				    j * info->fix.line_length +
+				    (x1 * screen_depth / 8),
+				    (x2 - x1) * screen_depth / 8);
 
 	/* Refresh */
 	if (par->fb_ready && par->update)
@@ -801,7 +813,8 @@ static int hvfb_on_panic(struct notifier_block *nb,
 	par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
 	par->synchronous_fb = true;
 	info = par->info;
-	hvfb_docopy(par, 0, dio_fb_size);
+	if (par->need_docopy)
+		hvfb_docopy(par, 0, dio_fb_size);
 	synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
 
 	return NOTIFY_DONE;
@@ -895,7 +908,7 @@ static void hvfb_cfb_imageblit(struct fb_info *p,
 					       image->width, image->height);
 }
 
-static struct fb_ops hvfb_ops = {
+static const struct fb_ops hvfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = hvfb_check_var,
 	.fb_set_par = hvfb_set_par,
@@ -940,6 +953,62 @@ static void hvfb_get_option(struct fb_info *info)
 	return;
 }
 
+/*
+ * Allocate enough contiguous physical memory.
+ * Return physical address if succeeded or -1 if failed.
+ */
+static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
+				   unsigned int request_size)
+{
+	struct page *page = NULL;
+	dma_addr_t dma_handle;
+	void *vmem;
+	phys_addr_t paddr = 0;
+	unsigned int order = get_order(request_size);
+
+	if (request_size == 0)
+		return -1;
+
+	if (order < MAX_ORDER) {
+		/* Call alloc_pages if the size is less than 2^MAX_ORDER */
+		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+		if (!page)
+			return -1;
+
+		paddr = (page_to_pfn(page) << PAGE_SHIFT);
+	} else {
+		/* Allocate from CMA */
+		hdev->device.coherent_dma_mask = DMA_BIT_MASK(64);
+
+		vmem = dma_alloc_coherent(&hdev->device,
+					  round_up(request_size, PAGE_SIZE),
+					  &dma_handle,
+					  GFP_KERNEL | __GFP_NOWARN);
+
+		if (!vmem)
+			return -1;
+
+		paddr = virt_to_phys(vmem);
+	}
+
+	return paddr;
+}
+
+/* Release contiguous physical memory */
+static void hvfb_release_phymem(struct hv_device *hdev,
+				phys_addr_t paddr, unsigned int size)
+{
+	unsigned int order = get_order(size);
+
+	if (order < MAX_ORDER)
+		__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
+	else
+		dma_free_coherent(&hdev->device,
+				  round_up(size, PAGE_SIZE),
+				  phys_to_virt(paddr),
+				  paddr);
+}
+
 
 /* Get framebuffer memory from Hyper-V video pci space */
 static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
@@ -949,8 +1018,54 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
 	void __iomem *fb_virt;
 	int gen2vm = efi_enabled(EFI_BOOT);
 	resource_size_t pot_start, pot_end;
+	phys_addr_t paddr;
 	int ret;
 
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures)
+		return -ENOMEM;
+
+	if (!gen2vm) {
+		pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+			PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+		if (!pdev) {
+			pr_err("Unable to find PCI Hyper-V video\n");
+			kfree(info->apertures);
+			return -ENODEV;
+		}
+
+		info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+		info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
+
+		/*
+		 * For Gen 1 VM, we can directly use the contiguous memory
+		 * from VM. If we succeed, deferred IO happens directly
+		 * on this allocated framebuffer memory, avoiding extra
+		 * memory copy.
+		 */
+		paddr = hvfb_get_phymem(hdev, screen_fb_size);
+		if (paddr != (phys_addr_t) -1) {
+			par->mmio_pp = paddr;
+			par->mmio_vp = par->dio_vp = __va(paddr);
+
+			info->fix.smem_start = paddr;
+			info->fix.smem_len = screen_fb_size;
+			info->screen_base = par->mmio_vp;
+			info->screen_size = screen_fb_size;
+
+			par->need_docopy = false;
+			goto getmem_done;
+		}
+		pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+	} else {
+		info->apertures->ranges[0].base = screen_info.lfb_base;
+		info->apertures->ranges[0].size = screen_info.lfb_size;
+	}
+
+	/*
+	 * Cannot use the contiguous physical memory.
+	 * Allocate mmio space for framebuffer.
+	 */
 	dio_fb_size =
 		screen_width * screen_height * screen_depth / 8;
 
@@ -958,13 +1073,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
 		pot_start = 0;
 		pot_end = -1;
 	} else {
-		pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
-			      PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
-		if (!pdev) {
-			pr_err("Unable to find PCI Hyper-V video\n");
-			return -ENODEV;
-		}
-
 		if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
 		    pci_resource_len(pdev, 0) < screen_fb_size) {
 			pr_err("Resource not available or (0x%lx < 0x%lx)\n",
@@ -993,20 +1101,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
 	if (par->dio_vp == NULL)
 		goto err3;
 
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures)
-		goto err4;
-
-	if (gen2vm) {
-		info->apertures->ranges[0].base = screen_info.lfb_base;
-		info->apertures->ranges[0].size = screen_info.lfb_size;
-		remove_conflicting_framebuffers(info->apertures,
-						KBUILD_MODNAME, false);
-	} else {
-		info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
-		info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
-	}
-
 	/* Physical address of FB device */
 	par->mmio_pp = par->mem->start;
 	/* Virtual address of FB device */
@@ -1017,13 +1111,15 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
 	info->screen_base = par->dio_vp;
 	info->screen_size = dio_fb_size;
 
+getmem_done:
+	remove_conflicting_framebuffers(info->apertures,
+					KBUILD_MODNAME, false);
 	if (!gen2vm)
 		pci_dev_put(pdev);
+	kfree(info->apertures);
 
 	return 0;
 
-err4:
-	vfree(par->dio_vp);
 err3:
 	iounmap(fb_virt);
 err2:
@@ -1032,18 +1128,25 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
 err1:
 	if (!gen2vm)
 		pci_dev_put(pdev);
+	kfree(info->apertures);
 
 	return -ENOMEM;
 }
 
 /* Release the framebuffer */
-static void hvfb_putmem(struct fb_info *info)
+static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
 {
 	struct hvfb_par *par = info->par;
 
-	vfree(par->dio_vp);
-	iounmap(info->screen_base);
-	vmbus_free_mmio(par->mem->start, screen_fb_size);
+	if (par->need_docopy) {
+		vfree(par->dio_vp);
+		iounmap(info->screen_base);
+		vmbus_free_mmio(par->mem->start, screen_fb_size);
+	} else {
+		hvfb_release_phymem(hdev, info->fix.smem_start,
+				    screen_fb_size);
+	}
+
 	par->mem = NULL;
 }
 
@@ -1062,6 +1165,7 @@ static int hvfb_probe(struct hv_device *hdev,
 	par = info->par;
 	par->info = info;
 	par->fb_ready = false;
+	par->need_docopy = true;
 	init_completion(&par->wait);
 	INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
 
@@ -1147,7 +1251,7 @@ static int hvfb_probe(struct hv_device *hdev,
 
 error:
 	fb_deferred_io_cleanup(info);
-	hvfb_putmem(info);
+	hvfb_putmem(hdev, info);
 error2:
 	vmbus_close(hdev->channel);
 error1:
@@ -1177,7 +1281,7 @@ static int hvfb_remove(struct hv_device *hdev)
 	vmbus_close(hdev->channel);
 	hv_set_drvdata(hdev, NULL);
 
-	hvfb_putmem(info);
+	hvfb_putmem(hdev, info);
 	framebuffer_release(info);
 
 	return 0;
@@ -1194,6 +1298,7 @@ static int hvfb_suspend(struct hv_device *hdev)
 	fb_set_suspend(info, 1);
 
 	cancel_delayed_work_sync(&par->dwork);
+	cancel_delayed_work_sync(&info->deferred_work);
 
 	par->update_saved = par->update;
 	par->update = false;
@@ -1227,6 +1332,7 @@ static int hvfb_resume(struct hv_device *hdev)
 	par->fb_ready = true;
 	par->update = par->update_saved;
 
+	schedule_delayed_work(&info->deferred_work, info->fbdefio->delay);
 	schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
 
 	/* 0 means do resume */
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 347cf8b..c65ec73 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -981,7 +981,7 @@ static int i740fb_blank(int blank_mode, struct fb_info *info)
 	return (blank_mode == FB_BLANK_NORMAL) ? 1 : 0;
 }
 
-static struct fb_ops i740fb_ops = {
+static const struct fb_ops i740fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= i740fb_open,
 	.fb_release	= i740fb_release,
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 58b01c7..3ac053b 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1333,7 +1333,7 @@ static struct pci_driver imsttfb_pci_driver = {
 	.remove =	imsttfb_remove,
 };
 
-static struct fb_ops imsttfb_ops = {
+static const struct fb_ops imsttfb_ops = {
 	.owner 		= THIS_MODULE,
 	.fb_check_var	= imsttfb_check_var,
 	.fb_set_par 	= imsttfb_set_par,
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index b3286d1..370bf25 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -566,7 +566,7 @@ static int imxfb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops imxfb_ops = {
+static const struct fb_ops imxfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= imxfb_check_var,
 	.fb_set_par	= imxfb_set_par,
@@ -1017,7 +1017,7 @@ static int imxfb_probe(struct platform_device *pdev)
 	}
 
 	fbi->lcd_pwr = devm_regulator_get(&pdev->dev, "lcd");
-	if (IS_ERR(fbi->lcd_pwr) && (PTR_ERR(fbi->lcd_pwr) == -EPROBE_DEFER)) {
+	if (PTR_ERR(fbi->lcd_pwr) == -EPROBE_DEFER) {
 		ret = -EPROBE_DEFER;
 		goto failed_lcd;
 	}
diff --git a/drivers/video/fbdev/intelfb/intelfb.h b/drivers/video/fbdev/intelfb/intelfb.h
index b54db05..5de7039 100644
--- a/drivers/video/fbdev/intelfb/intelfb.h
+++ b/drivers/video/fbdev/intelfb/intelfb.h
@@ -273,7 +273,7 @@ struct intelfb_vsync {
 
 struct intelfb_info {
 	struct fb_info *info;
-	struct fb_ops  *fbops;
+	const struct fb_ops *fbops;
 	struct pci_dev *pdev;
 
 	struct intelfb_hwstate save_state;
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index a09fc2e..a957996 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -193,7 +193,7 @@ static const struct pci_device_id intelfb_pci_table[] = {
 static int num_registered = 0;
 
 /* fb ops */
-static struct fb_ops intel_fb_ops = {
+static const struct fb_ops intel_fb_ops = {
 	.owner =		THIS_MODULE,
 	.fb_open =              intelfb_open,
 	.fb_release =           intelfb_release,
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index a866092..8fbde92 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -648,7 +648,7 @@ static struct pci_driver kyrofb_pci_driver = {
 	.remove		= kyrofb_remove,
 };
 
-static struct fb_ops kyrofb_ops = {
+static const struct fb_ops kyrofb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= kyrofb_check_var,
 	.fb_set_par	= kyrofb_set_par,
diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c
index 5b1141a..40b11cc 100644
--- a/drivers/video/fbdev/leo.c
+++ b/drivers/video/fbdev/leo.c
@@ -39,7 +39,7 @@ static int leo_pan_display(struct fb_var_screeninfo *, struct fb_info *);
  *  Frame buffer operations
  */
 
-static struct fb_ops leo_ops = {
+static const struct fb_ops leo_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= leo_setcolreg,
 	.fb_blank		= leo_blank,
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index 9a6feee..e05a976 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -496,7 +496,7 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 	return 0;
 }
 
-static struct fb_ops macfb_ops = {
+static const struct fb_ops macfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= macfb_setcolreg,
 	.fb_fillrect	= cfb_fillrect,
diff --git a/drivers/video/fbdev/matrox/matroxfb_crtc2.c b/drivers/video/fbdev/matrox/matroxfb_crtc2.c
index d2a81a2..7655afa 100644
--- a/drivers/video/fbdev/matrox/matroxfb_crtc2.c
+++ b/drivers/video/fbdev/matrox/matroxfb_crtc2.c
@@ -563,7 +563,7 @@ static int matroxfb_dh_blank(int blank, struct fb_info* info) {
 #undef m2info
 }
 
-static struct fb_ops matroxfb_dh_ops = {
+static const struct fb_ops matroxfb_dh_ops = {
 	.owner =	THIS_MODULE,
 	.fb_open =	matroxfb_dh_open,
 	.fb_release =	matroxfb_dh_release,
diff --git a/drivers/video/fbdev/matrox/matroxfb_misc.c b/drivers/video/fbdev/matrox/matroxfb_misc.c
index c7aaca1..8f159a2 100644
--- a/drivers/video/fbdev/matrox/matroxfb_misc.c
+++ b/drivers/video/fbdev/matrox/matroxfb_misc.c
@@ -673,7 +673,10 @@ static int parse_pins5(struct matrox_fb_info *minfo,
 	if (bd->pins[115] & 4) {
 		minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst;
 	} else {
-		u_int32_t wtst_xlat[] = { 0, 1, 5, 6, 7, 5, 2, 3 };
+		static const u8 wtst_xlat[] = {
+			0, 1, 5, 6, 7, 5, 2, 3
+		};
+
 		minfo->values.reg.mctlwtst_core = (minfo->values.reg.mctlwtst & ~7) |
 						  wtst_xlat[minfo->values.reg.mctlwtst & 7];
 	}
diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c
index 5bb1b5c..ae1a42b 100644
--- a/drivers/video/fbdev/maxinefb.c
+++ b/drivers/video/fbdev/maxinefb.c
@@ -105,7 +105,7 @@ static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
 	return 0;
 }
 
-static struct fb_ops maxinefb_ops = {
+static const struct fb_ops maxinefb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= maxinefb_setcolreg,
 	.fb_fillrect	= cfb_fillrect,
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb.h b/drivers/video/fbdev/mb862xx/mb862xxfb.h
index 50bc9b5..52a77ea 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb.h
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb.h
@@ -89,7 +89,7 @@ struct mb862xxfb_par {
 	u32			pseudo_palette[16];
 };
 
-extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
+extern void mb862xxfb_init_accel(struct fb_info *info, struct fb_ops *fbops, int xres);
 #ifdef CONFIG_FB_MB862XX_I2C
 extern int mb862xx_i2c_init(struct mb862xxfb_par *par);
 extern void mb862xx_i2c_exit(struct mb862xxfb_par *par);
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
index f58ff90..4256926 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
@@ -303,19 +303,19 @@ static void mb86290fb_fillrect(struct fb_info *info,
 	mb862xxfb_write_fifo(7, cmd, info);
 }
 
-void mb862xxfb_init_accel(struct fb_info *info, int xres)
+void mb862xxfb_init_accel(struct fb_info *info, struct fb_ops *fbops, int xres)
 {
 	struct mb862xxfb_par *par = info->par;
 
 	if (info->var.bits_per_pixel == 32) {
-		info->fbops->fb_fillrect = cfb_fillrect;
-		info->fbops->fb_copyarea = cfb_copyarea;
-		info->fbops->fb_imageblit = cfb_imageblit;
+		fbops->fb_fillrect = cfb_fillrect;
+		fbops->fb_copyarea = cfb_copyarea;
+		fbops->fb_imageblit = cfb_imageblit;
 	} else {
 		outreg(disp, GC_L0EM, 3);
-		info->fbops->fb_fillrect = mb86290fb_fillrect;
-		info->fbops->fb_copyarea = mb86290fb_copyarea;
-		info->fbops->fb_imageblit = mb86290fb_imageblit;
+		fbops->fb_fillrect = mb86290fb_fillrect;
+		fbops->fb_copyarea = mb86290fb_copyarea;
+		fbops->fb_imageblit = mb86290fb_imageblit;
 	}
 	outreg(draw, GDC_REG_DRAW_BASE, 0);
 	outreg(draw, GDC_REG_MODE_MISC, 0x8000);
@@ -326,6 +326,5 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
 	    FBINFO_HWACCEL_IMAGEBLIT;
 	info->fix.accel = 0xff;	/*FIXME: add right define */
 }
-EXPORT_SYMBOL(mb862xxfb_init_accel);
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 962c017..52755b5 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -194,6 +194,8 @@ static int mb862xxfb_check_var(struct fb_var_screeninfo *var,
 	return 0;
 }
 
+static struct fb_ops mb862xxfb_ops;
+
 /*
  * set display parameters
  */
@@ -204,7 +206,7 @@ static int mb862xxfb_set_par(struct fb_info *fbi)
 
 	dev_dbg(par->dev, "%s\n", __func__);
 	if (par->type == BT_CORALP)
-		mb862xxfb_init_accel(fbi, fbi->var.xres);
+		mb862xxfb_init_accel(fbi, &mb862xxfb_ops, fbi->var.xres);
 
 	if (par->pre_init)
 		return 0;
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index 3de4b3e..6dc287c 100644
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
@@ -671,7 +671,7 @@ static int mbxfb_ioctl(struct fb_info *info, unsigned int cmd,
 	return -EINVAL;
 }
 
-static struct fb_ops mbxfb_ops = {
+static const struct fb_ops mbxfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = mbxfb_check_var,
 	.fb_set_par = mbxfb_set_par,
diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c
index bb4fee5..a42e2ec 100644
--- a/drivers/video/fbdev/metronomefb.c
+++ b/drivers/video/fbdev/metronomefb.c
@@ -558,7 +558,7 @@ static ssize_t metronomefb_write(struct fb_info *info, const char __user *buf,
 	return (err) ? err : count;
 }
 
-static struct fb_ops metronomefb_ops = {
+static const struct fb_ops metronomefb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_write	= metronomefb_write,
 	.fb_fillrect	= metronomefb_fillrect,
diff --git a/drivers/video/fbdev/mmp/Kconfig b/drivers/video/fbdev/mmp/Kconfig
index 9041ffd..5c6cc97 100644
--- a/drivers/video/fbdev/mmp/Kconfig
+++ b/drivers/video/fbdev/mmp/Kconfig
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 menuconfig MMP_DISP
 	tristate "Marvell MMP Display Subsystem support"
-	depends on CPU_PXA910 || CPU_MMP2
+	depends on CPU_PXA910 || CPU_MMP2 || COMPILE_TEST
 	help
 	  Marvell Display Subsystem support.
 
diff --git a/drivers/video/fbdev/mmp/fb/Kconfig b/drivers/video/fbdev/mmp/fb/Kconfig
index 39944eb..0ec2e3f 100644
--- a/drivers/video/fbdev/mmp/fb/Kconfig
+++ b/drivers/video/fbdev/mmp/fb/Kconfig
@@ -1,6 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-if MMP_DISP
-
 config MMP_FB
 	tristate "fb driver for Marvell MMP Display Subsystem"
 	depends on FB
@@ -10,5 +8,3 @@
 	default y
 	help
 		fb driver for Marvell MMP Display Subsystem
-
-endif
diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c
index 47bc7c5..01c75c0 100644
--- a/drivers/video/fbdev/mmp/fb/mmpfb.c
+++ b/drivers/video/fbdev/mmp/fb/mmpfb.c
@@ -454,7 +454,7 @@ static int mmpfb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops mmpfb_ops = {
+static const struct fb_ops mmpfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_blank	= mmpfb_blank,
 	.fb_check_var	= mmpfb_check_var,
@@ -522,7 +522,7 @@ static int fb_info_setup(struct fb_info *info,
 		info->var.bits_per_pixel / 8;
 	info->fbops = &mmpfb_ops;
 	info->pseudo_palette = fbi->pseudo_palette;
-	info->screen_base = fbi->fb_start;
+	info->screen_buffer = fbi->fb_start;
 	info->screen_size = fbi->fb_size;
 
 	/* For FB framework: Allocate color map and Register framebuffer*/
diff --git a/drivers/video/fbdev/mmp/hw/Kconfig b/drivers/video/fbdev/mmp/hw/Kconfig
index 4d018cf..7ebe125 100644
--- a/drivers/video/fbdev/mmp/hw/Kconfig
+++ b/drivers/video/fbdev/mmp/hw/Kconfig
@@ -1,9 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
-if MMP_DISP
-
 config MMP_DISP_CONTROLLER
 	bool "mmp display controller hw support"
-	depends on CPU_PXA910 || CPU_MMP2
+	depends on HAVE_CLK && HAS_IOMEM
+	depends on CPU_PXA910 || CPU_MMP2 || COMPILE_TEST
 	help
 		Marvell MMP display hw controller support
 		this controller is used on Marvell PXA910 and
@@ -16,5 +15,3 @@
 	help
 		Marvell MMP display hw controller spi port support
 		will register as a spi master for panel usage
-
-endif
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 974e4c2..061a105 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -136,19 +136,26 @@ static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
 	mutex_lock(&overlay->access_ok);
 
 	if (overlay_is_vid(overlay)) {
-		writel_relaxed(win->pitch[0], &regs->v_pitch_yc);
-		writel_relaxed(win->pitch[2] << 16 |
-				win->pitch[1], &regs->v_pitch_uv);
+		writel_relaxed(win->pitch[0],
+				(void __iomem *)&regs->v_pitch_yc);
+		writel_relaxed(win->pitch[2] << 16 | win->pitch[1],
+				(void __iomem *)&regs->v_pitch_uv);
 
-		writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->v_size);
-		writel_relaxed((win->ydst << 16) | win->xdst, &regs->v_size_z);
-		writel_relaxed(win->ypos << 16 | win->xpos, &regs->v_start);
+		writel_relaxed((win->ysrc << 16) | win->xsrc,
+				(void __iomem *)&regs->v_size);
+		writel_relaxed((win->ydst << 16) | win->xdst,
+				(void __iomem *)&regs->v_size_z);
+		writel_relaxed(win->ypos << 16 | win->xpos,
+				(void __iomem *)&regs->v_start);
 	} else {
-		writel_relaxed(win->pitch[0], &regs->g_pitch);
+		writel_relaxed(win->pitch[0], (void __iomem *)&regs->g_pitch);
 
-		writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
-		writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
-		writel_relaxed(win->ypos << 16 | win->xpos, &regs->g_start);
+		writel_relaxed((win->ysrc << 16) | win->xsrc,
+				(void __iomem *)&regs->g_size);
+		writel_relaxed((win->ydst << 16) | win->xdst,
+				(void __iomem *)&regs->g_size_z);
+		writel_relaxed(win->ypos << 16 | win->xpos,
+				(void __iomem *)&regs->g_start);
 	}
 
 	dmafetch_set_fmt(overlay);
@@ -233,11 +240,11 @@ static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
 	memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
 
 	if (overlay_is_vid(overlay)) {
-		writel_relaxed(addr->phys[0], &regs->v_y0);
-		writel_relaxed(addr->phys[1], &regs->v_u0);
-		writel_relaxed(addr->phys[2], &regs->v_v0);
+		writel_relaxed(addr->phys[0], (void __iomem *)&regs->v_y0);
+		writel_relaxed(addr->phys[1], (void __iomem *)&regs->v_u0);
+		writel_relaxed(addr->phys[2], (void __iomem *)&regs->v_v0);
 	} else
-		writel_relaxed(addr->phys[0], &regs->g_0);
+		writel_relaxed(addr->phys[0], (void __iomem *)&regs->g_0);
 
 	return overlay->addr.phys[0];
 }
@@ -268,16 +275,18 @@ static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
 	tmp |= dsi_rbswap & CFG_INTFRBSWAP_MASK;
 	writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id));
 
-	writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
+	writel_relaxed((mode->yres << 16) | mode->xres,
+		(void __iomem *)&regs->screen_active);
 	writel_relaxed((mode->left_margin << 16) | mode->right_margin,
-		&regs->screen_h_porch);
+		(void __iomem *)&regs->screen_h_porch);
 	writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
-		&regs->screen_v_porch);
+		(void __iomem *)&regs->screen_v_porch);
 	total_x = mode->xres + mode->left_margin + mode->right_margin +
 		mode->hsync_len;
 	total_y = mode->yres + mode->upper_margin + mode->lower_margin +
 		mode->vsync_len;
-	writel_relaxed((total_y << 16) | total_x, &regs->screen_size);
+	writel_relaxed((total_y << 16) | total_x,
+		(void __iomem *)&regs->screen_size);
 
 	/* vsync ctrl */
 	if (path->output_type == PATH_OUT_DSI)
@@ -285,7 +294,7 @@ static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
 	else
 		vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
 					| (mode->xres + mode->right_margin);
-	writel_relaxed(vsync_ctrl, &regs->vsync_ctrl);
+	writel_relaxed(vsync_ctrl, (void __iomem *)&regs->vsync_ctrl);
 
 	/* set pixclock div */
 	sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
@@ -366,9 +375,9 @@ static void path_set_default(struct mmp_path *path)
 	writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
 
 	/* Configure default register values */
-	writel_relaxed(0x00000000, &regs->blank_color);
-	writel_relaxed(0x00000000, &regs->g_1);
-	writel_relaxed(0x00000000, &regs->g_start);
+	writel_relaxed(0x00000000, (void __iomem *)&regs->blank_color);
+	writel_relaxed(0x00000000, (void __iomem *)&regs->g_1);
+	writel_relaxed(0x00000000, (void __iomem *)&regs->g_start);
 
 	/*
 	 * 1.enable multiple burst request in DMA AXI
@@ -447,7 +456,6 @@ static int mmphw_probe(struct platform_device *pdev)
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
-		dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
 		ret = -ENOENT;
 		goto failed;
 	}
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index e9ec45c..335d498 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
@@ -1393,7 +1393,7 @@ struct mmphw_ctrl {
 	/* platform related, get from config */
 	const char *name;
 	int irq;
-	void *reg_base;
+	void __iomem *reg_base;
 	struct clk *clk;
 
 	/* sys info */
@@ -1429,7 +1429,7 @@ static inline struct mmphw_ctrl *overlay_to_ctrl(struct mmp_overlay *overlay)
 	return path_to_ctrl(overlay->path);
 }
 
-static inline void *ctrl_regs(struct mmp_path *path)
+static inline void __iomem *ctrl_regs(struct mmp_path *path)
 {
 	return path_to_ctrl(path)->reg_base;
 }
@@ -1438,11 +1438,11 @@ static inline void *ctrl_regs(struct mmp_path *path)
 static inline struct lcd_regs *path_regs(struct mmp_path *path)
 {
 	if (path->id == PATH_PN)
-		return (struct lcd_regs *)(ctrl_regs(path) + 0xc0);
+		return (struct lcd_regs __force *)(ctrl_regs(path) + 0xc0);
 	else if (path->id == PATH_TV)
-		return (struct lcd_regs *)ctrl_regs(path);
+		return (struct lcd_regs __force  *)ctrl_regs(path);
 	else if (path->id == PATH_P2)
-		return (struct lcd_regs *)(ctrl_regs(path) + 0x200);
+		return (struct lcd_regs __force *)(ctrl_regs(path) + 0x200);
 	else {
 		dev_err(path->dev, "path id %d invalid\n", path->id);
 		BUG_ON(1);
diff --git a/drivers/video/fbdev/mmp/hw/mmp_spi.c b/drivers/video/fbdev/mmp/hw/mmp_spi.c
index bbb75de..1911a47 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_spi.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_spi.c
@@ -31,7 +31,7 @@ static inline int lcd_spi_write(struct spi_device *spi, u32 data)
 {
 	int timeout = 100000, isr, ret = 0;
 	u32 tmp;
-	void *reg_base =
+	void __iomem *reg_base = (void __iomem *)
 		*(void **)spi_master_get_devdata(spi->master);
 
 	/* clear ISR */
@@ -80,7 +80,7 @@ static inline int lcd_spi_write(struct spi_device *spi, u32 data)
 
 static int lcd_spi_setup(struct spi_device *spi)
 {
-	void *reg_base =
+	void __iomem *reg_base = (void __iomem *)
 		*(void **)spi_master_get_devdata(spi->master);
 	u32 tmp;
 
@@ -146,7 +146,7 @@ int lcd_spi_register(struct mmphw_ctrl *ctrl)
 		return -ENOMEM;
 	}
 	p_regbase = spi_master_get_devdata(master);
-	*p_regbase = ctrl->reg_base;
+	*p_regbase = (void __force *)ctrl->reg_base;
 
 	/* set bus num to 5 to avoid conflict with other spi hosts */
 	master->bus_num = 5;
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index bafd5f5..4af28e4 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -1249,7 +1249,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
  * invoked by the core framebuffer driver to perform operations like
  * blitting, rectangle filling, copy regions and cursor definition.
  */
-static struct fb_ops mx3fb_ops = {
+static const struct fb_ops mx3fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_set_par = mx3fb_set_par,
 	.fb_check_var = mx3fb_check_var,
@@ -1389,7 +1389,8 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
  * mx3fb_init_fbinfo() - initialize framebuffer information object.
  * @return:	initialized framebuffer structure.
  */
-static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops)
+static struct fb_info *mx3fb_init_fbinfo(struct device *dev,
+					 const struct fb_ops *ops)
 {
 	struct fb_info *fbi;
 	struct mx3fb_info *mx3fbi;
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index b770946..e6ea853 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -1610,7 +1610,7 @@ neofb_cursor(struct fb_info *info, struct fb_cursor *cursor)
 }
 */
 
-static struct fb_ops neofb_ops = {
+static const struct fb_ops neofb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= neofb_open,
 	.fb_release	= neofb_release,
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index fbeeed5..c583c01 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -607,6 +607,8 @@ static int nvidiafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
 	return 0;
 }
 
+static struct fb_ops nvidia_fb_ops;
+
 static int nvidiafb_set_par(struct fb_info *info)
 {
 	struct nvidia_par *par = info->par;
@@ -660,19 +662,19 @@ static int nvidiafb_set_par(struct fb_info *info)
 	info->fix.line_length = (info->var.xres_virtual *
 				 info->var.bits_per_pixel) >> 3;
 	if (info->var.accel_flags) {
-		info->fbops->fb_imageblit = nvidiafb_imageblit;
-		info->fbops->fb_fillrect = nvidiafb_fillrect;
-		info->fbops->fb_copyarea = nvidiafb_copyarea;
-		info->fbops->fb_sync = nvidiafb_sync;
+		nvidia_fb_ops.fb_imageblit = nvidiafb_imageblit;
+		nvidia_fb_ops.fb_fillrect = nvidiafb_fillrect;
+		nvidia_fb_ops.fb_copyarea = nvidiafb_copyarea;
+		nvidia_fb_ops.fb_sync = nvidiafb_sync;
 		info->pixmap.scan_align = 4;
 		info->flags &= ~FBINFO_HWACCEL_DISABLED;
 		info->flags |= FBINFO_READS_FAST;
 		NVResetGraphics(info);
 	} else {
-		info->fbops->fb_imageblit = cfb_imageblit;
-		info->fbops->fb_fillrect = cfb_fillrect;
-		info->fbops->fb_copyarea = cfb_copyarea;
-		info->fbops->fb_sync = NULL;
+		nvidia_fb_ops.fb_imageblit = cfb_imageblit;
+		nvidia_fb_ops.fb_fillrect = cfb_fillrect;
+		nvidia_fb_ops.fb_copyarea = cfb_copyarea;
+		nvidia_fb_ops.fb_sync = NULL;
 		info->pixmap.scan_align = 1;
 		info->flags |= FBINFO_HWACCEL_DISABLED;
 		info->flags &= ~FBINFO_READS_FAST;
@@ -1165,7 +1167,7 @@ static int nvidia_set_fbinfo(struct fb_info *info)
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 
 	if (!hwcur)
-	    info->fbops->fb_cursor = NULL;
+	    nvidia_fb_ops.fb_cursor = NULL;
 
 	info->var.accel_flags = (!noaccel);
 
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index a970edc2..bfa4ed4 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -285,7 +285,7 @@ static int ocfb_init_var(struct ocfb_dev *fbdev)
 	return 0;
 }
 
-static struct fb_ops ocfb_ops = {
+static const struct fb_ops ocfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= ocfb_setcolreg,
 	.fb_fillrect	= cfb_fillrect,
@@ -297,7 +297,6 @@ static int ocfb_probe(struct platform_device *pdev)
 {
 	int ret = 0;
 	struct ocfb_dev *fbdev;
-	struct resource *res;
 	int fbsize;
 
 	fbdev = devm_kzalloc(&pdev->dev, sizeof(*fbdev), GFP_KERNEL);
@@ -319,13 +318,7 @@ static int ocfb_probe(struct platform_device *pdev)
 	ocfb_init_var(fbdev);
 	ocfb_init_fix(fbdev);
 
-	/* Request I/O resource */
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "I/O resource request failed\n");
-		return -ENXIO;
-	}
-	fbdev->regs = devm_ioremap_resource(&pdev->dev, res);
+	fbdev->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(fbdev->regs))
 		return PTR_ERR(fbdev->regs);
 
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index fbc6eaf..5cd0f5f 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -286,7 +286,7 @@ static void offb_destroy(struct fb_info *info)
 	framebuffer_release(info);
 }
 
-static struct fb_ops offb_ops = {
+static const struct fb_ops offb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_destroy	= offb_destroy,
 	.fb_setcolreg	= offb_setcolreg,
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 702cca5..e8a304f8 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1052,7 +1052,7 @@ static int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd,
 {
 	struct omapfb_plane_struct *plane = fbi->par;
 	struct omapfb_device	*fbdev = plane->fbdev;
-	struct fb_ops		*ops = fbi->fbops;
+	const struct fb_ops *ops = fbi->fbops;
 	union {
 		struct omapfb_update_window	update_window;
 		struct omapfb_plane_info	plane_info;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index 376ee5b..ce37da8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -1635,7 +1635,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
 {
 	int scale_x = out_width != orig_width;
 	int scale_y = out_height != orig_height;
-	bool chroma_upscale = plane != OMAP_DSS_WB ? true : false;
+	bool chroma_upscale = plane != OMAP_DSS_WB;
 
 	if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
 		return;
@@ -3100,9 +3100,9 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
 		unsigned long pclk)
 {
 	if (dss_mgr_is_lcd(channel))
-		return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+		return pclk <= dispc.feat->max_lcd_pclk;
 	else
-		return pclk <= dispc.feat->max_tv_pclk ? true : false;
+		return pclk <= dispc.feat->max_tv_pclk;
 }
 
 bool dispc_mgr_timings_ok(enum omap_channel channel,
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 858c2c0..8dfa915 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1280,7 +1280,7 @@ ssize_t omapfb_write(struct fb_info *info, const char __user *buf,
 }
 #endif
 
-static struct fb_ops omapfb_ops = {
+static const struct fb_ops omapfb_ops = {
 	.owner          = THIS_MODULE,
 	.fb_open        = omapfb_open,
 	.fb_release     = omapfb_release,
diff --git a/drivers/video/fbdev/omap2/omapfb/vrfb.c b/drivers/video/fbdev/omap2/omapfb/vrfb.c
index 819e0bc..ee0dd4c 100644
--- a/drivers/video/fbdev/omap2/omapfb/vrfb.c
+++ b/drivers/video/fbdev/omap2/omapfb/vrfb.c
@@ -339,9 +339,7 @@ static int __init vrfb_probe(struct platform_device *pdev)
 	int i;
 
 	/* first resource is the register res, the rest are vrfb contexts */
-
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	vrfb_base = devm_ioremap_resource(&pdev->dev, mem);
+	vrfb_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(vrfb_base))
 		return PTR_ERR(vrfb_base);
 
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 8c18cc5..6da672e 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -37,7 +37,7 @@ static int p9100_ioctl(struct fb_info *, unsigned int, unsigned long);
  *  Frame buffer operations
  */
 
-static struct fb_ops p9100_ops = {
+static const struct fb_ops p9100_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= p9100_setcolreg,
 	.fb_blank		= p9100_blank,
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 632b246..ce413a9 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -96,7 +96,7 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
  * Interface used by the world
  */
 
-static struct fb_ops platinumfb_ops = {
+static const struct fb_ops platinumfb_ops = {
 	.owner =	THIS_MODULE,
 	.fb_check_var	= platinumfb_check_var,
 	.fb_set_par	= platinumfb_set_par,
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 7cc1216..fe2cade 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1483,7 +1483,7 @@ static int pm2fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
  *  Frame buffer operations
  */
 
-static struct fb_ops pm2fb_ops = {
+static const struct fb_ops pm2fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= pm2fb_check_var,
 	.fb_set_par	= pm2fb_set_par,
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 2fa4660..2f5e23c 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -1200,7 +1200,7 @@ static int pm3fb_blank(int blank_mode, struct fb_info *info)
 	 *  Frame buffer operations
 	 */
 
-static struct fb_ops pm3fb_ops = {
+static const struct fb_ops pm3fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= pm3fb_check_var,
 	.fb_set_par	= pm3fb_set_par,
diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c
index d5bf185..62c8de9 100644
--- a/drivers/video/fbdev/pmag-aa-fb.c
+++ b/drivers/video/fbdev/pmag-aa-fb.c
@@ -147,7 +147,7 @@ static int aafb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops aafb_ops = {
+static const struct fb_ops aafb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_blank	= aafb_blank,
 	.fb_fillrect	= cfb_fillrect,
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 2ddcdf7..1296f9b 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -117,7 +117,7 @@ static int pmagbafb_setcolreg(unsigned int regno, unsigned int red,
 	return 0;
 }
 
-static struct fb_ops pmagbafb_ops = {
+static const struct fb_ops pmagbafb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= pmagbafb_setcolreg,
 	.fb_fillrect	= cfb_fillrect,
diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c
index 90d2b04..9dccd51 100644
--- a/drivers/video/fbdev/pmagb-b-fb.c
+++ b/drivers/video/fbdev/pmagb-b-fb.c
@@ -121,7 +121,7 @@ static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red,
 	return 0;
 }
 
-static struct fb_ops pmagbbfb_ops = {
+static const struct fb_ops pmagbbfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= pmagbbfb_setcolreg,
 	.fb_fillrect	= cfb_fillrect,
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 5ed2db3..834f63e 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -934,7 +934,7 @@ static irqreturn_t ps3fb_vsync_interrupt(int irq, void *ptr)
 }
 
 
-static struct fb_ops ps3fb_ops = {
+static const struct fb_ops ps3fb_ops = {
 	.fb_open	= ps3fb_open,
 	.fb_release	= ps3fb_release,
 	.fb_read        = fb_sys_read,
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index c680b3e..f18d457 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -707,7 +707,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
 }
 #endif /* CONFIG_PVR2_DMA */
 
-static struct fb_ops pvr2fb_ops = {
+static const struct fb_ops pvr2fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= pvr2fb_setcolreg,
 	.fb_blank	= pvr2fb_blank,
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 5615054..9b9ec14 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -545,7 +545,7 @@ static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id)
 	return IRQ_NONE;
 }
 
-static struct fb_ops pxa168fb_ops = {
+static const struct fb_ops pxa168fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= pxa168fb_check_var,
 	.fb_set_par	= pxa168fb_set_par,
@@ -766,8 +766,8 @@ static int pxa168fb_probe(struct platform_device *pdev)
 failed_free_clk:
 	clk_disable_unprepare(fbi->clk);
 failed_free_fbmem:
-	dma_free_coherent(fbi->dev, info->fix.smem_len,
-			info->screen_base, fbi->fb_start_dma);
+	dma_free_wc(fbi->dev, info->fix.smem_len,
+		    info->screen_base, fbi->fb_start_dma);
 failed_free_info:
 	kfree(info);
 
@@ -801,7 +801,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
 
 	irq = platform_get_irq(pdev, 0);
 
-	dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+	dma_free_wc(fbi->dev, info->fix.smem_len,
 		    info->screen_base, info->fix.smem_start);
 
 	clk_disable_unprepare(fbi->clk);
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index f70c9f7..00b96a7 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -597,7 +597,7 @@ static int pxafb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops pxafb_ops = {
+static const struct fb_ops pxafb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= pxafb_check_var,
 	.fb_set_par	= pxafb_set_par,
@@ -865,7 +865,7 @@ static int overlayfb_set_par(struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops overlay_fb_ops = {
+static const struct fb_ops overlay_fb_ops = {
 	.owner			= THIS_MODULE,
 	.fb_open		= overlayfb_open,
 	.fb_release		= overlayfb_release,
@@ -2237,7 +2237,6 @@ static int pxafb_probe(struct platform_device *dev)
 {
 	struct pxafb_info *fbi;
 	struct pxafb_mach_info *inf, *pdata;
-	struct resource *r;
 	int i, irq, ret;
 
 	dev_dbg(&dev->dev, "pxafb_probe\n");
@@ -2303,14 +2302,7 @@ static int pxafb_probe(struct platform_device *dev)
 		fbi->lcd_supply = NULL;
 	}
 
-	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
-	if (r == NULL) {
-		dev_err(&dev->dev, "no I/O memory resource defined\n");
-		ret = -ENODEV;
-		goto failed;
-	}
-
-	fbi->mmio_base = devm_ioremap_resource(&dev->dev, r);
+	fbi->mmio_base = devm_platform_ioremap_resource(dev, 0);
 	if (IS_ERR(fbi->mmio_base)) {
 		dev_err(&dev->dev, "failed to get I/O memory\n");
 		ret = -EBUSY;
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index 0b93aa9..79ff14a 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -75,7 +75,7 @@ static int q40fb_setcolreg(unsigned regno, unsigned red, unsigned green,
     return 0;
 }
 
-static struct fb_ops q40fb_ops = {
+static const struct fb_ops q40fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= q40fb_setcolreg,
 	.fb_fillrect	= cfb_fillrect,
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index ca593a3..764ec32 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -1673,7 +1673,7 @@ static int rivafb_sync(struct fb_info *info)
  * ------------------------------------------------------------------------- */
 
 /* kernel interface */
-static struct fb_ops riva_fb_ops = {
+static const struct fb_ops riva_fb_ops = {
 	.owner 		= THIS_MODULE,
 	.fb_open	= rivafb_open,
 	.fb_release	= rivafb_release,
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index ba04d7a..9dc9250 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1035,7 +1035,7 @@ static int s3c_fb_ioctl(struct fb_info *info, unsigned int cmd,
 	return ret;
 }
 
-static struct fb_ops s3c_fb_ops = {
+static const struct fb_ops s3c_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= s3c_fb_check_var,
 	.fb_set_par	= s3c_fb_set_par,
@@ -1411,8 +1411,7 @@ static int s3c_fb_probe(struct platform_device *pdev)
 
 	pm_runtime_enable(sfb->dev);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	sfb->regs = devm_ioremap_resource(dev, res);
+	sfb->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(sfb->regs)) {
 		ret = PTR_ERR(sfb->regs);
 		goto err_lcd_clk;
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index a702da8..2fb15a5 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -618,7 +618,7 @@ static int s3c2410fb_debug_store(struct device *dev,
 
 static DEVICE_ATTR(debug, 0664, s3c2410fb_debug_show, s3c2410fb_debug_store);
 
-static struct fb_ops s3c2410fb_ops = {
+static const struct fb_ops s3c2410fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= s3c2410fb_check_var,
 	.fb_set_par	= s3c2410fb_set_par,
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index be16c34..60c424f 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -1037,7 +1037,7 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
 
 /* Frame buffer operations */
 
-static struct fb_ops s3fb_ops = {
+static const struct fb_ops s3fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= s3fb_open,
 	.fb_release	= s3fb_release,
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 81ad3aa..5bb653d 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -574,7 +574,7 @@ static int sa1100fb_mmap(struct fb_info *info,
 	return vm_iomap_memory(vma, info->fix.mmio_start, info->fix.mmio_len);
 }
 
-static struct fb_ops sa1100fb_ops = {
+static const struct fb_ops sa1100fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= sa1100fb_check_var,
 	.fb_set_par	= sa1100fb_set_par,
@@ -1143,7 +1143,6 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
 static int sa1100fb_probe(struct platform_device *pdev)
 {
 	struct sa1100fb_info *fbi;
-	struct resource *res;
 	int ret, irq;
 
 	if (!dev_get_platdata(&pdev->dev)) {
@@ -1159,8 +1158,7 @@ static int sa1100fb_probe(struct platform_device *pdev)
 	if (!fbi)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	fbi->base = devm_ioremap_resource(&pdev->dev, res);
+	fbi->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(fbi->base))
 		return PTR_ERR(fbi->base);
 
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 512789f..aab312a7 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -1637,7 +1637,7 @@ static int savagefb_release(struct fb_info *info, int user)
 	return 0;
 }
 
-static struct fb_ops savagefb_ops = {
+static const struct fb_ops savagefb_ops = {
 	.owner          = THIS_MODULE,
 	.fb_open        = savagefb_open,
 	.fb_release     = savagefb_release,
diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
index f72b035..5978a89 100644
--- a/drivers/video/fbdev/sh7760fb.c
+++ b/drivers/video/fbdev/sh7760fb.c
@@ -341,7 +341,7 @@ static int sh7760fb_set_par(struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops sh7760fb_ops = {
+static const struct fb_ops sh7760fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_blank = sh7760fb_blank,
 	.fb_check_var = sh7760fb_check_var,
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 54ee7e0..4ea6f93 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1490,7 +1490,7 @@ sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
 				 ovl->dma_handle, ovl->fb_size);
 }
 
-static struct fb_ops sh_mobile_lcdc_overlay_ops = {
+static const struct fb_ops sh_mobile_lcdc_overlay_ops = {
 	.owner          = THIS_MODULE,
 	.fb_read        = fb_sys_read,
 	.fb_write       = fb_sys_write,
@@ -1964,7 +1964,7 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
 				 ch->dma_handle, ch->fb_size);
 }
 
-static struct fb_ops sh_mobile_lcdc_ops = {
+static const struct fb_ops sh_mobile_lcdc_ops = {
 	.owner          = THIS_MODULE,
 	.fb_setcolreg	= sh_mobile_lcdc_setcolreg,
 	.fb_read        = fb_sys_read,
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 7dc0105..533a047d 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -78,7 +78,7 @@ static void simplefb_destroy(struct fb_info *info)
 		iounmap(info->screen_base);
 }
 
-static struct fb_ops simplefb_ops = {
+static const struct fb_ops simplefb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_destroy	= simplefb_destroy,
 	.fb_setcolreg	= simplefb_setcolreg,
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index b443a8e..ac14096 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1906,7 +1906,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
 
 /* ----------------  fb_ops structures ----------------- */
 
-static struct fb_ops sisfb_ops = {
+static const struct fb_ops sisfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= sisfb_open,
 	.fb_release	= sisfb_release,
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index 812a36c..bcacfb6 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -634,7 +634,7 @@ int xxxfb_sync(struct fb_info *info)
      *  Frame buffer operations
      */
 
-static struct fb_ops xxxfb_ops = {
+static const struct fb_ops xxxfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= xxxfb_open,
 	.fb_read	= xxxfb_read,
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 207d0ad..6a1b4a8 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1369,7 +1369,7 @@ static int smtc_set_par(struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops smtcfb_ops = {
+static const struct fb_ops smtcfb_ops = {
 	.owner        = THIS_MODULE,
 	.fb_check_var = smtc_check_var,
 	.fb_set_par   = smtc_set_par,
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 0e0f5bb..bfac3ee 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1170,7 +1170,6 @@ static int ufx_ops_release(struct fb_info *info, int user)
 		fb_deferred_io_cleanup(info);
 		kfree(info->fbdefio);
 		info->fbdefio = NULL;
-		info->fbops->fb_mmap = ufx_ops_mmap;
 	}
 
 	pr_debug("released /dev/fb%d user=%d count=%d",
@@ -1269,7 +1268,7 @@ static int ufx_ops_blank(int blank_mode, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops ufx_ops = {
+static const struct fb_ops ufx_ops = {
 	.owner = THIS_MODULE,
 	.fb_read = fb_sys_read,
 	.fb_write = ufx_ops_write,
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 78ca7ff..1425352 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -280,7 +280,7 @@ static void ssd1307fb_imageblit(struct fb_info *info, const struct fb_image *ima
 	ssd1307fb_update_display(par);
 }
 
-static struct fb_ops ssd1307fb_ops = {
+static const struct fb_ops ssd1307fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_read	= fb_sys_read,
 	.fb_write	= ssd1307fb_write,
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 1f171a5..afe6d1b 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1307,7 +1307,7 @@ static int sstfb_setup(char *options)
 }
 
 
-static struct fb_ops sstfb_ops = {
+static const struct fb_ops sstfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= sstfb_check_var,
 	.fb_set_par	= sstfb_set_par,
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 4670944..de953dd 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1101,7 +1101,7 @@ stifb_init_display(struct stifb_info *fb)
 
 /* ------------ Interfaces to hardware functions ------------ */
 
-static struct fb_ops stifb_ops = {
+static const struct fb_ops stifb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_setcolreg	= stifb_setcolreg,
 	.fb_blank	= stifb_blank,
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 784c9bd..15b0795 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -59,7 +59,7 @@ static int gfb_setcolreg(unsigned regno,
 	return 0;
 }
 
-static struct fb_ops gfb_ops = {
+static const struct fb_ops gfb_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= gfb_setcolreg,
 	.fb_fillrect		= cfb_fillrect,
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 31683e5..1d3bacd 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -63,7 +63,7 @@ static int s3d_setcolreg(unsigned regno,
 	return 0;
 }
 
-static struct fb_ops s3d_ops = {
+static const struct fb_ops s3d_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= s3d_setcolreg,
 	.fb_fillrect		= cfb_fillrect,
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index d392976..9daf17b 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -186,7 +186,7 @@ static void e3d_copyarea(struct fb_info *info, const struct fb_copyarea *area)
 	spin_unlock_irqrestore(&ep->lock, flags);
 }
 
-static struct fb_ops e3d_ops = {
+static const struct fb_ops e3d_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= e3d_setcolreg,
 	.fb_fillrect		= e3d_fillrect,
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 7897f86..34b2e5b 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -40,7 +40,7 @@ static int tcx_pan_display(struct fb_var_screeninfo *, struct fb_info *);
  *  Frame buffer operations
  */
 
-static struct fb_ops tcx_ops = {
+static const struct fb_ops tcx_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= tcx_setcolreg,
 	.fb_blank		= tcx_blank,
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 0337d1a..f73e26c 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1141,7 +1141,7 @@ static int tdfxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
 	return 0;
 }
 
-static struct fb_ops tdfxfb_ops = {
+static const struct fb_ops tdfxfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= tdfxfb_check_var,
 	.fb_set_par	= tdfxfb_set_par,
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 1966f1d..e986913 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -70,7 +70,7 @@ static struct tc_driver tgafb_tc_driver;
  *  Frame buffer operations
  */
 
-static struct fb_ops tgafb_ops = {
+static const struct fb_ops tgafb_ops = {
 	.owner			= THIS_MODULE,
 	.fb_check_var		= tgafb_check_var,
 	.fb_set_par		= tgafb_set_par,
diff --git a/drivers/video/fbdev/tmiofb.c b/drivers/video/fbdev/tmiofb.c
index 4f2fcea..5011196 100644
--- a/drivers/video/fbdev/tmiofb.c
+++ b/drivers/video/fbdev/tmiofb.c
@@ -646,7 +646,7 @@ static int tmiofb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops tmiofb_ops = {
+static const struct fb_ops tmiofb_ops = {
 	.owner		= THIS_MODULE,
 
 	.fb_ioctl	= tmiofb_ioctl,
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 91b2f6c..4d20cb5 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -1443,7 +1443,7 @@ static int tridentfb_blank(int blank_mode, struct fb_info *info)
 	return (blank_mode == FB_BLANK_NORMAL) ? 1 : 0;
 }
 
-static struct fb_ops tridentfb_ops = {
+static const struct fb_ops tridentfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_setcolreg = tridentfb_setcolreg,
 	.fb_pan_display = tridentfb_pan_display,
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index fe373b6..07905d3 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1037,7 +1037,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
 		fb_deferred_io_cleanup(info);
 		kfree(info->fbdefio);
 		info->fbdefio = NULL;
-		info->fbops->fb_mmap = dlfb_ops_mmap;
 	}
 
 	dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 439565c..53d08d1 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1440,7 +1440,7 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
 
 	/* Disable blanking if the user requested so. */
 	if (!blank)
-		info->fbops->fb_blank = NULL;
+		uvesafb_ops.fb_blank = NULL;
 
 	/*
 	 * Find out how much IO memory is required for the mode with
@@ -1510,7 +1510,7 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
 			(par->ypan ? FBINFO_HWACCEL_YPAN : 0);
 
 	if (!par->ypan)
-		info->fbops->fb_pan_display = NULL;
+		uvesafb_ops.fb_pan_display = NULL;
 }
 
 static void uvesafb_init_mtrr(struct fb_info *info)
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 97a59b5..4d20c46 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -113,7 +113,7 @@ static int valkyrie_init_info(struct fb_info *info, struct fb_info_valkyrie *p);
 static void valkyrie_par_to_fix(struct fb_par_valkyrie *par, struct fb_fix_screeninfo *fix);
 static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p);
 
-static struct fb_ops valkyriefb_ops = {
+static const struct fb_ops valkyriefb_ops = {
 	.owner =	THIS_MODULE,
 	.fb_check_var =	valkyriefb_check_var,
 	.fb_set_par =	valkyriefb_set_par,
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index d9c08f6..a1fe24e 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -447,15 +447,15 @@ static int vesafb_probe(struct platform_device *dev)
 	       vesafb_fix.smem_start, info->screen_base,
 	       size_remap/1024, size_total/1024);
 
+	if (!ypan)
+		vesafb_ops.fb_pan_display = NULL;
+
 	info->fbops = &vesafb_ops;
 	info->var = vesafb_defined;
 	info->fix = vesafb_fix;
 	info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
 		(ypan ? FBINFO_HWACCEL_YPAN : 0);
 
-	if (!ypan)
-		info->fbops->fb_pan_display = NULL;
-
 	if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
 		err = -ENOMEM;
 		goto err;
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 5412790..95d3c59 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -78,7 +78,7 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
 static int vfb_mmap(struct fb_info *info,
 		    struct vm_area_struct *vma);
 
-static struct fb_ops vfb_ops = {
+static const struct fb_ops vfb_ops = {
 	.fb_read        = fb_sys_read,
 	.fb_write       = fb_sys_write,
 	.fb_check_var	= vfb_check_var,
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 2c6a576..a20eeb8 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1270,7 +1270,7 @@ static void vga16fb_destroy(struct fb_info *info)
 	framebuffer_release(info);
 }
 
-static struct fb_ops vga16fb_ops = {
+static const struct fb_ops vga16fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open        = vga16fb_open,
 	.fb_release     = vga16fb_release,
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f815f98..852673c 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1173,13 +1173,12 @@ static ssize_t viafb_dvp0_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations viafb_dvp0_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= viafb_dvp0_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= viafb_dvp0_proc_write,
+static const struct proc_ops viafb_dvp0_proc_ops = {
+	.proc_open	= viafb_dvp0_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= viafb_dvp0_proc_write,
 };
 
 static int viafb_dvp1_proc_show(struct seq_file *m, void *v)
@@ -1238,13 +1237,12 @@ static ssize_t viafb_dvp1_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations viafb_dvp1_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= viafb_dvp1_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= viafb_dvp1_proc_write,
+static const struct proc_ops viafb_dvp1_proc_ops = {
+	.proc_open	= viafb_dvp1_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= viafb_dvp1_proc_write,
 };
 
 static int viafb_dfph_proc_show(struct seq_file *m, void *v)
@@ -1273,13 +1271,12 @@ static ssize_t viafb_dfph_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations viafb_dfph_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= viafb_dfph_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= viafb_dfph_proc_write,
+static const struct proc_ops viafb_dfph_proc_ops = {
+	.proc_open	= viafb_dfph_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= viafb_dfph_proc_write,
 };
 
 static int viafb_dfpl_proc_show(struct seq_file *m, void *v)
@@ -1308,13 +1305,12 @@ static ssize_t viafb_dfpl_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations viafb_dfpl_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= viafb_dfpl_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= viafb_dfpl_proc_write,
+static const struct proc_ops viafb_dfpl_proc_ops = {
+	.proc_open	= viafb_dfpl_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= viafb_dfpl_proc_write,
 };
 
 static int viafb_vt1636_proc_show(struct seq_file *m, void *v)
@@ -1444,13 +1440,12 @@ static ssize_t viafb_vt1636_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations viafb_vt1636_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= viafb_vt1636_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= viafb_vt1636_proc_write,
+static const struct proc_ops viafb_vt1636_proc_ops = {
+	.proc_open	= viafb_vt1636_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= viafb_vt1636_proc_write,
 };
 
 #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
@@ -1522,13 +1517,12 @@ static ssize_t viafb_iga1_odev_proc_write(struct file *file,
 	return res;
 }
 
-static const struct file_operations viafb_iga1_odev_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= viafb_iga1_odev_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= viafb_iga1_odev_proc_write,
+static const struct proc_ops viafb_iga1_odev_proc_ops = {
+	.proc_open	= viafb_iga1_odev_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= viafb_iga1_odev_proc_write,
 };
 
 static int viafb_iga2_odev_proc_show(struct seq_file *m, void *v)
@@ -1562,13 +1556,12 @@ static ssize_t viafb_iga2_odev_proc_write(struct file *file,
 	return res;
 }
 
-static const struct file_operations viafb_iga2_odev_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= viafb_iga2_odev_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= viafb_iga2_odev_proc_write,
+static const struct proc_ops viafb_iga2_odev_proc_ops = {
+	.proc_open	= viafb_iga2_odev_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= viafb_iga2_odev_proc_write,
 };
 
 #define IS_VT1636(lvds_chip)	((lvds_chip).lvds_chip_name == VT1636_LVDS)
@@ -1580,14 +1573,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
 	shared->proc_entry = viafb_entry;
 	if (viafb_entry) {
 #ifdef CONFIG_FB_VIA_DIRECT_PROCFS
-		proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_fops);
-		proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_fops);
-		proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_fops);
-		proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_fops);
+		proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_ops);
+		proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_ops);
+		proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_ops);
+		proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_ops);
 		if (IS_VT1636(shared->chip_info.lvds_chip_info)
 			|| IS_VT1636(shared->chip_info.lvds_chip_info2))
 			proc_create("vt1636", 0, viafb_entry,
-				&viafb_vt1636_proc_fops);
+				    &viafb_vt1636_proc_ops);
 #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
 
 		proc_create_single("supported_output_devices", 0, viafb_entry,
@@ -1595,11 +1588,11 @@ static void viafb_init_proc(struct viafb_shared *shared)
 		iga1_entry = proc_mkdir("iga1", viafb_entry);
 		shared->iga1_proc_entry = iga1_entry;
 		proc_create("output_devices", 0, iga1_entry,
-			&viafb_iga1_odev_proc_fops);
+			    &viafb_iga1_odev_proc_ops);
 		iga2_entry = proc_mkdir("iga2", viafb_entry);
 		shared->iga2_proc_entry = iga2_entry;
 		proc_create("output_devices", 0, iga2_entry,
-			&viafb_iga2_odev_proc_fops);
+			    &viafb_iga2_odev_proc_ops);
 	}
 }
 static void viafb_remove_proc(struct viafb_shared *shared)
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index be8d970..f744479 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -238,7 +238,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops vt8500lcd_ops = {
+static const struct fb_ops vt8500lcd_ops = {
 	.owner		= THIS_MODULE,
 	.fb_set_par	= vt8500lcd_set_par,
 	.fb_setcolreg	= vt8500lcd_setcolreg,
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index c339a8f..7b3eef1b 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -634,7 +634,7 @@ static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *i
 
 /* Frame buffer operations */
 
-static struct fb_ops vt8623fb_ops = {
+static const struct fb_ops vt8623fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= vt8623fb_open,
 	.fb_release	= vt8623fb_release,
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index 0796b1d..ad26cbf 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -549,7 +549,7 @@ static int w100fb_set_par(struct fb_info *info)
 /*
  *  Frame buffer operations
  */
-static struct fb_ops w100fb_ops = {
+static const struct fb_ops w100fb_ops = {
 	.owner        = THIS_MODULE,
 	.fb_check_var = w100fb_check_var,
 	.fb_set_par   = w100fb_set_par,
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index 17c7803..b656eff 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -246,7 +246,7 @@ static int wm8505fb_blank(int blank, struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops wm8505fb_ops = {
+static const struct fb_ops wm8505fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_set_par	= wm8505fb_set_par,
 	.fb_setcolreg	= wm8505fb_setcolreg,
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index a3d6b6d..00307b8 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -328,7 +328,7 @@ static int xenfb_set_par(struct fb_info *info)
 	return 0;
 }
 
-static struct fb_ops xenfb_fb_ops = {
+static const struct fb_ops xenfb_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_read	= fb_sys_read,
 	.fb_write	= xenfb_write,
diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index 8628829..ca4ff65 100644
--- a/drivers/video/fbdev/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
@@ -247,7 +247,7 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
 	return 0; /* success */
 }
 
-static struct fb_ops xilinxfb_ops = {
+static const struct fb_ops xilinxfb_ops = {
 	.owner			= THIS_MODULE,
 	.fb_setcolreg		= xilinx_fb_setcolreg,
 	.fb_blank		= xilinx_fb_blank,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 93f995f..7bfe365 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -158,6 +158,8 @@ static void set_page_pfns(struct virtio_balloon *vb,
 {
 	unsigned int i;
 
+	BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
+
 	/*
 	 * Set balloon pfns pointing at this page.
 	 * Note that the first pfn points at start of the page.
@@ -475,7 +477,9 @@ static int init_vqs(struct virtio_balloon *vb)
 	names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
 	callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
 	names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
+	callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL;
 	names[VIRTIO_BALLOON_VQ_STATS] = NULL;
+	callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
 	names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
 
 	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
@@ -899,8 +903,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
 	vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
 	if (IS_ERR(vb->vb_dev_info.inode)) {
 		err = PTR_ERR(vb->vb_dev_info.inode);
-		kern_unmount(balloon_mnt);
-		goto out_del_vqs;
+		goto out_kern_unmount;
 	}
 	vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
 #endif
@@ -911,13 +914,13 @@ static int virtballoon_probe(struct virtio_device *vdev)
 		 */
 		if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
 			err = -ENOSPC;
-			goto out_del_vqs;
+			goto out_iput;
 		}
 		vb->balloon_wq = alloc_workqueue("balloon-wq",
 					WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
 		if (!vb->balloon_wq) {
 			err = -ENOMEM;
-			goto out_del_vqs;
+			goto out_iput;
 		}
 		INIT_WORK(&vb->report_free_page_work, report_free_page_func);
 		vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
@@ -951,6 +954,12 @@ static int virtballoon_probe(struct virtio_device *vdev)
 out_del_balloon_wq:
 	if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
 		destroy_workqueue(vb->balloon_wq);
+out_iput:
+#ifdef CONFIG_BALLOON_COMPACTION
+	iput(vb->vb_dev_info.inode);
+out_kern_unmount:
+	kern_unmount(balloon_mnt);
+#endif
 out_del_vqs:
 	vdev->config->del_vqs(vdev);
 out_free_vb:
@@ -966,6 +975,10 @@ static void remove_common(struct virtio_balloon *vb)
 		leak_balloon(vb, vb->num_pages);
 	update_balloon_size(vb);
 
+	/* There might be free pages that are being reported: release them. */
+	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+		return_free_pages_to_mm(vb, ULONG_MAX);
+
 	/* Now we reset the device so we can clean up the queues. */
 	vb->vdev->config->reset(vb->vdev);
 
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index e09edb5..97d5725 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -531,18 +531,9 @@ static void virtio_mmio_release_dev(struct device *_d)
 static int virtio_mmio_probe(struct platform_device *pdev)
 {
 	struct virtio_mmio_device *vm_dev;
-	struct resource *mem;
 	unsigned long magic;
 	int rc;
 
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem)
-		return -EINVAL;
-
-	if (!devm_request_mem_region(&pdev->dev, mem->start,
-			resource_size(mem), pdev->name))
-		return -EBUSY;
-
 	vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
 	if (!vm_dev)
 		return -ENOMEM;
@@ -554,9 +545,9 @@ static int virtio_mmio_probe(struct platform_device *pdev)
 	INIT_LIST_HEAD(&vm_dev->virtqueues);
 	spin_lock_init(&vm_dev->lock);
 
-	vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (vm_dev->base == NULL)
-		return -EFAULT;
+	vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(vm_dev->base))
+		return PTR_ERR(vm_dev->base);
 
 	/* Check magic value */
 	magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index f2862f6..222d630 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -294,7 +294,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
 		/* Best option: one for change interrupt, one per vq. */
 		nvectors = 1;
 		for (i = 0; i < nvqs; ++i)
-			if (callbacks[i])
+			if (names[i] && callbacks[i])
 				++nvectors;
 	} else {
 		/* Second best: one for change, shared for all vqs. */
diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
index ca752b8..cb1eb7e 100644
--- a/drivers/visorbus/visorchipset.c
+++ b/drivers/visorbus/visorchipset.c
@@ -1210,14 +1210,17 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
 {
 	struct controlvm_message local_crash_bus_msg;
 	struct controlvm_message local_crash_dev_msg;
-	struct controlvm_message msg;
+	struct controlvm_message msg = {
+		.hdr.id = CONTROLVM_CHIPSET_INIT,
+		.cmd.init_chipset = {
+			.bus_count = 23,
+			.switch_count = 0,
+		},
+	};
 	u32 local_crash_msg_offset;
 	u16 local_crash_msg_count;
 
 	/* send init chipset msg */
-	msg.hdr.id = CONTROLVM_CHIPSET_INIT;
-	msg.cmd.init_chipset.bus_count = 23;
-	msg.cmd.init_chipset.switch_count = 0;
 	chipset_init(&msg);
 	/* get saved message count */
 	if (visorchannel_read(chipset_dev->controlvm_channel,
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
index 3208a44..6a1bc284 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/vme/bridges/vme_fake.c
@@ -414,8 +414,9 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
 	}
 }
 
-static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
-		u32 aspace, u32 cycle)
+static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge,
+					   unsigned long long addr,
+					   u32 aspace, u32 cycle)
 {
 	u8 retval = 0xff;
 	int i;
@@ -446,8 +447,9 @@ static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
 	return retval;
 }
 
-static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
-		u32 aspace, u32 cycle)
+static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge,
+					     unsigned long long addr,
+					     u32 aspace, u32 cycle)
 {
 	u16 retval = 0xffff;
 	int i;
@@ -478,8 +480,9 @@ static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
 	return retval;
 }
 
-static u32 fake_vmeread32(struct fake_driver *bridge, unsigned long long addr,
-		u32 aspace, u32 cycle)
+static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge,
+					     unsigned long long addr,
+					     u32 aspace, u32 cycle)
 {
 	u32 retval = 0xffffffff;
 	int i;
@@ -609,8 +612,9 @@ static ssize_t fake_master_read(struct vme_master_resource *image, void *buf,
 	return retval;
 }
 
-static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
-			   unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge,
+					      u8 *buf, unsigned long long addr,
+					      u32 aspace, u32 cycle)
 {
 	int i;
 	unsigned long long start, end, offset;
@@ -639,8 +643,9 @@ static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
 
 }
 
-static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
-			    unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge,
+					       u16 *buf, unsigned long long addr,
+					       u32 aspace, u32 cycle)
 {
 	int i;
 	unsigned long long start, end, offset;
@@ -669,8 +674,9 @@ static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
 
 }
 
-static void fake_vmewrite32(struct fake_driver *bridge, u32 *buf,
-			    unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge,
+					       u32 *buf, unsigned long long addr,
+					       u32 aspace, u32 cycle)
 {
 	int i;
 	unsigned long long start, end, offset;
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 4164045..aa09f85 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -38,12 +38,6 @@
 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE		BIT(2)
 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE		BIT(1)
 #define OMAP_HDQ_INT_STATUS_TIMEOUT		BIT(0)
-#define OMAP_HDQ_SYSCONFIG			0x14
-#define OMAP_HDQ_SYSCONFIG_SOFTRESET		BIT(1)
-#define OMAP_HDQ_SYSCONFIG_AUTOIDLE		BIT(0)
-#define OMAP_HDQ_SYSCONFIG_NOIDLE		0x0
-#define OMAP_HDQ_SYSSTATUS			0x18
-#define OMAP_HDQ_SYSSTATUS_RESETDONE		BIT(0)
 
 #define OMAP_HDQ_FLAG_CLEAR			0
 #define OMAP_HDQ_FLAG_SET			1
@@ -62,17 +56,9 @@ struct hdq_data {
 	void __iomem		*hdq_base;
 	/* lock status update */
 	struct  mutex		hdq_mutex;
-	int			hdq_usecount;
 	u8			hdq_irqstatus;
 	/* device lock */
 	spinlock_t		hdq_spinlock;
-	/*
-	 * Used to control the call to omap_hdq_get and omap_hdq_put.
-	 * HDQ Protocol: Write the CMD|REG_address first, followed by
-	 * the data wrire or read.
-	 */
-	int			init_trans;
-	int                     rrw;
 	/* mode: 0-HDQ 1-W1 */
 	int                     mode;
 
@@ -99,15 +85,6 @@ static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
 	return new_val;
 }
 
-static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
-				  u32 mask)
-{
-	u32 ie;
-
-	ie = readl(hdq_data->hdq_base + offset);
-	writel(ie & mask, hdq_data->hdq_base + offset);
-}
-
 /*
  * Wait for one or more bits in flag change.
  * HDQ_FLAG_SET: wait until any bit in the flag is set.
@@ -142,22 +119,24 @@ static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
 	return ret;
 }
 
+/* Clear saved irqstatus after using an interrupt */
+static void hdq_reset_irqstatus(struct hdq_data *hdq_data)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
+	hdq_data->hdq_irqstatus = 0;
+	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
+}
+
 /* write out a byte and fill *status with HDQ_INT_STATUS */
 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
 {
 	int ret;
 	u8 tmp_status;
-	unsigned long irqflags;
 
 	*status = 0;
 
-	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
-	/* clear interrupt flags via a dummy read */
-	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
-	/* ISR loads it with new INT_STATUS */
-	hdq_data->hdq_irqstatus = 0;
-	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
-
 	hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
 
 	/* set the GO bit */
@@ -191,6 +170,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
 	}
 
 out:
+	hdq_reset_irqstatus(hdq_data);
 	return ret;
 }
 
@@ -237,47 +217,11 @@ static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
 	slave_found(master_dev, id);
 }
 
-static int _omap_hdq_reset(struct hdq_data *hdq_data)
-{
-	int ret;
-	u8 tmp_status;
-
-	hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
-		    OMAP_HDQ_SYSCONFIG_SOFTRESET);
-	/*
-	 * Select HDQ/1W mode & enable clocks.
-	 * It is observed that INT flags can't be cleared via a read and GO/INIT
-	 * won't return to zero if interrupt is disabled. So we always enable
-	 * interrupt.
-	 */
-	hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
-		OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
-		OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
-
-	/* wait for reset to complete */
-	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
-		OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
-	if (ret)
-		dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
-				tmp_status);
-	else {
-		hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
-			OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
-			OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
-			hdq_data->mode);
-		hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
-			OMAP_HDQ_SYSCONFIG_AUTOIDLE);
-	}
-
-	return ret;
-}
-
 /* Issue break pulse to the device */
 static int omap_hdq_break(struct hdq_data *hdq_data)
 {
 	int ret = 0;
 	u8 tmp_status;
-	unsigned long irqflags;
 
 	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
 	if (ret < 0) {
@@ -286,13 +230,6 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
 		goto rtn;
 	}
 
-	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
-	/* clear interrupt flags via a dummy read */
-	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
-	/* ISR loads it with new INT_STATUS */
-	hdq_data->hdq_irqstatus = 0;
-	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
-
 	/* set the INIT and GO bit */
 	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
 		OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
@@ -341,6 +278,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
 			" return to zero, %x", tmp_status);
 
 out:
+	hdq_reset_irqstatus(hdq_data);
 	mutex_unlock(&hdq_data->hdq_mutex);
 rtn:
 	return ret;
@@ -357,7 +295,7 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
 		goto rtn;
 	}
 
-	if (!hdq_data->hdq_usecount) {
+	if (pm_runtime_suspended(hdq_data->dev)) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -388,86 +326,13 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
 	/* the data is ready. Read it in! */
 	*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
 out:
+	hdq_reset_irqstatus(hdq_data);
 	mutex_unlock(&hdq_data->hdq_mutex);
 rtn:
 	return ret;
 
 }
 
-/* Enable clocks and set the controller to HDQ/1W mode */
-static int omap_hdq_get(struct hdq_data *hdq_data)
-{
-	int ret = 0;
-
-	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
-	if (ret < 0) {
-		ret = -EINTR;
-		goto rtn;
-	}
-
-	if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
-		dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
-		ret = -EINVAL;
-		goto out;
-	} else {
-		hdq_data->hdq_usecount++;
-		try_module_get(THIS_MODULE);
-		if (1 == hdq_data->hdq_usecount) {
-
-			pm_runtime_get_sync(hdq_data->dev);
-
-			/* make sure HDQ/1W is out of reset */
-			if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
-				OMAP_HDQ_SYSSTATUS_RESETDONE)) {
-				ret = _omap_hdq_reset(hdq_data);
-				if (ret)
-					/* back up the count */
-					hdq_data->hdq_usecount--;
-			} else {
-				/* select HDQ/1W mode & enable clocks */
-				hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
-					OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
-					OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
-					hdq_data->mode);
-				hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
-					OMAP_HDQ_SYSCONFIG_NOIDLE);
-				hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
-			}
-		}
-	}
-
-out:
-	mutex_unlock(&hdq_data->hdq_mutex);
-rtn:
-	return ret;
-}
-
-/* Disable clocks to the module */
-static int omap_hdq_put(struct hdq_data *hdq_data)
-{
-	int ret = 0;
-
-	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
-	if (ret < 0)
-		return -EINTR;
-
-	hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
-		    OMAP_HDQ_SYSCONFIG_AUTOIDLE);
-	if (0 == hdq_data->hdq_usecount) {
-		dev_dbg(hdq_data->dev, "attempt to decrement use count"
-			" when it is zero");
-		ret = -EINVAL;
-	} else {
-		hdq_data->hdq_usecount--;
-		module_put(THIS_MODULE);
-		if (0 == hdq_data->hdq_usecount)
-			pm_runtime_put_sync(hdq_data->dev);
-	}
-	mutex_unlock(&hdq_data->hdq_mutex);
-
-	return ret;
-}
-
 /*
  * W1 triplet callback function - used for searching ROM addresses.
  * Registered only when controller is in 1-wire mode.
@@ -482,7 +347,12 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
 		  OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
 	u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
 
-	omap_hdq_get(_hdq);
+	err = pm_runtime_get_sync(hdq_data->dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(hdq_data->dev);
+
+		return err;
+	}
 
 	err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
 	if (err < 0) {
@@ -490,7 +360,6 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
 		goto rtn;
 	}
 
-	hdq_data->hdq_irqstatus = 0;
 	/* read id_bit */
 	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
 		      ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
@@ -504,7 +373,9 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
 	}
 	id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
 
-	hdq_data->hdq_irqstatus = 0;
+	/* Must clear irqstatus for another RXCOMPLETE interrupt */
+	hdq_reset_irqstatus(hdq_data);
+
 	/* read comp_bit */
 	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
 		      ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
@@ -547,18 +418,33 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
 		      OMAP_HDQ_CTRL_STATUS_SINGLE);
 
 out:
+	hdq_reset_irqstatus(hdq_data);
 	mutex_unlock(&hdq_data->hdq_mutex);
 rtn:
-	omap_hdq_put(_hdq);
+	pm_runtime_mark_last_busy(hdq_data->dev);
+	pm_runtime_put_autosuspend(hdq_data->dev);
+
 	return ret;
 }
 
 /* reset callback */
 static u8 omap_w1_reset_bus(void *_hdq)
 {
-	omap_hdq_get(_hdq);
-	omap_hdq_break(_hdq);
-	omap_hdq_put(_hdq);
+	struct hdq_data *hdq_data = _hdq;
+	int err;
+
+	err = pm_runtime_get_sync(hdq_data->dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(hdq_data->dev);
+
+		return err;
+	}
+
+	omap_hdq_break(hdq_data);
+
+	pm_runtime_mark_last_busy(hdq_data->dev);
+	pm_runtime_put_autosuspend(hdq_data->dev);
+
 	return 0;
 }
 
@@ -569,37 +455,19 @@ static u8 omap_w1_read_byte(void *_hdq)
 	u8 val = 0;
 	int ret;
 
-	/* First write to initialize the transfer */
-	if (hdq_data->init_trans == 0)
-		omap_hdq_get(hdq_data);
+	ret = pm_runtime_get_sync(hdq_data->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(hdq_data->dev);
 
-	ret = hdq_read_byte(hdq_data, &val);
-	if (ret) {
-		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
-		if (ret < 0) {
-			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
-			return -EINTR;
-		}
-		hdq_data->init_trans = 0;
-		mutex_unlock(&hdq_data->hdq_mutex);
-		omap_hdq_put(hdq_data);
 		return -1;
 	}
 
-	hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
-			      ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
+	ret = hdq_read_byte(hdq_data, &val);
+	if (ret)
+		ret = -1;
 
-	/* Write followed by a read, release the module */
-	if (hdq_data->init_trans) {
-		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
-		if (ret < 0) {
-			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
-			return -EINTR;
-		}
-		hdq_data->init_trans = 0;
-		mutex_unlock(&hdq_data->hdq_mutex);
-		omap_hdq_put(hdq_data);
-	}
+	pm_runtime_mark_last_busy(hdq_data->dev);
+	pm_runtime_put_autosuspend(hdq_data->dev);
 
 	return val;
 }
@@ -611,9 +479,12 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
 	int ret;
 	u8 status;
 
-	/* First write to initialize the transfer */
-	if (hdq_data->init_trans == 0)
-		omap_hdq_get(hdq_data);
+	ret = pm_runtime_get_sync(hdq_data->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(hdq_data->dev);
+
+		return;
+	}
 
 	/*
 	 * We need to reset the slave before
@@ -623,31 +494,15 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
 	if (byte == W1_SKIP_ROM)
 		omap_hdq_break(hdq_data);
 
-	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
-	if (ret < 0) {
-		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
-		return;
-	}
-	hdq_data->init_trans++;
-	mutex_unlock(&hdq_data->hdq_mutex);
-
 	ret = hdq_write_byte(hdq_data, byte, &status);
 	if (ret < 0) {
 		dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
-		return;
+		goto out_err;
 	}
 
-	/* Second write, data transferred. Release the module */
-	if (hdq_data->init_trans > 1) {
-		omap_hdq_put(hdq_data);
-		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
-		if (ret < 0) {
-			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
-			return;
-		}
-		hdq_data->init_trans = 0;
-		mutex_unlock(&hdq_data->hdq_mutex);
-	}
+out_err:
+	pm_runtime_mark_last_busy(hdq_data->dev);
+	pm_runtime_put_autosuspend(hdq_data->dev);
 }
 
 static struct w1_bus_master omap_w1_master = {
@@ -656,6 +511,35 @@ static struct w1_bus_master omap_w1_master = {
 	.reset_bus	= omap_w1_reset_bus,
 };
 
+static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
+{
+	struct hdq_data *hdq_data = dev_get_drvdata(dev);
+
+	hdq_reg_out(hdq_data, 0, hdq_data->mode);
+	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
+
+	return 0;
+}
+
+static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
+{
+	struct hdq_data *hdq_data = dev_get_drvdata(dev);
+
+	/* select HDQ/1W mode & enable clocks */
+	hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
+		    OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
+		    OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
+		    hdq_data->mode);
+	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
+
+	return 0;
+}
+
+static const struct dev_pm_ops omap_hdq_pm_ops = {
+	SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
+			   omap_hdq_runtime_resume, NULL)
+};
+
 static int omap_hdq_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -677,21 +561,25 @@ static int omap_hdq_probe(struct platform_device *pdev)
 	if (IS_ERR(hdq_data->hdq_base))
 		return PTR_ERR(hdq_data->hdq_base);
 
-	hdq_data->hdq_usecount = 0;
-	hdq_data->rrw = 0;
 	mutex_init(&hdq_data->hdq_mutex);
 
-	pm_runtime_enable(&pdev->dev);
-	ret = pm_runtime_get_sync(&pdev->dev);
-	if (ret < 0) {
-		dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
-		goto err_w1;
+	ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
+	if (ret < 0 || !strcmp(mode, "hdq")) {
+		hdq_data->mode = 0;
+		omap_w1_master.search = omap_w1_search_bus;
+	} else {
+		hdq_data->mode = 1;
+		omap_w1_master.triplet = omap_w1_triplet;
 	}
 
-	ret = _omap_hdq_reset(hdq_data);
-	if (ret) {
-		dev_dbg(&pdev->dev, "reset failed\n");
-		goto err_irq;
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(&pdev->dev);
+		dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
+		goto err_w1;
 	}
 
 	rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
@@ -715,16 +603,8 @@ static int omap_hdq_probe(struct platform_device *pdev)
 
 	omap_hdq_break(hdq_data);
 
-	pm_runtime_put_sync(&pdev->dev);
-
-	ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
-	if (ret < 0 || !strcmp(mode, "hdq")) {
-		hdq_data->mode = 0;
-		omap_w1_master.search = omap_w1_search_bus;
-	} else {
-		hdq_data->mode = 1;
-		omap_w1_master.triplet = omap_w1_triplet;
-	}
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
 
 	omap_w1_master.data = hdq_data;
 
@@ -739,6 +619,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
 err_irq:
 	pm_runtime_put_sync(&pdev->dev);
 err_w1:
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
 	return ret;
@@ -746,23 +627,19 @@ static int omap_hdq_probe(struct platform_device *pdev)
 
 static int omap_hdq_remove(struct platform_device *pdev)
 {
-	struct hdq_data *hdq_data = platform_get_drvdata(pdev);
+	int active;
 
-	mutex_lock(&hdq_data->hdq_mutex);
-
-	if (hdq_data->hdq_usecount) {
-		dev_dbg(&pdev->dev, "removed when use count is not zero\n");
-		mutex_unlock(&hdq_data->hdq_mutex);
-		return -EBUSY;
-	}
-
-	mutex_unlock(&hdq_data->hdq_mutex);
-
-	/* remove module dependency */
-	pm_runtime_disable(&pdev->dev);
+	active = pm_runtime_get_sync(&pdev->dev);
+	if (active < 0)
+		pm_runtime_put_noidle(&pdev->dev);
 
 	w1_remove_master_device(&omap_w1_master);
 
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	if (active >= 0)
+		pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
 	return 0;
 }
 
@@ -779,6 +656,7 @@ static struct platform_driver omap_hdq_driver = {
 	.driver = {
 		.name =	"omap_hdq",
 		.of_match_table = omap_hdq_dt_ids,
+		.pm = &omap_hdq_pm_ops,
 	},
 };
 module_platform_driver(omap_hdq_driver);
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index abfe34d..298d545 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -24,7 +24,10 @@
 #define AT91_WDT_MR		0x04			/* Watchdog Mode Register */
 #define  AT91_WDT_WDV		(0xfffUL << 0)		/* Counter Value */
 #define  AT91_WDT_SET_WDV(x)	((x) & AT91_WDT_WDV)
+#define  AT91_SAM9X60_PERIODRST	BIT(4)		/* Period Reset */
+#define  AT91_SAM9X60_RPTHRST	BIT(5)		/* Minimum Restart Period */
 #define  AT91_WDT_WDFIEN	BIT(12)		/* Fault Interrupt Enable */
+#define  AT91_SAM9X60_WDDIS	BIT(12)		/* Watchdog Disable */
 #define  AT91_WDT_WDRSTEN	BIT(13)		/* Reset Processor */
 #define  AT91_WDT_WDRPROC	BIT(14)		/* Timer Restart */
 #define  AT91_WDT_WDDIS		BIT(15)		/* Watchdog Disable */
@@ -37,4 +40,22 @@
 #define  AT91_WDT_WDUNF		BIT(0)		/* Watchdog Underflow */
 #define  AT91_WDT_WDERR		BIT(1)		/* Watchdog Error */
 
+/* Watchdog Timer Value Register */
+#define AT91_SAM9X60_VR		0x08
+
+/* Watchdog Window Level Register */
+#define AT91_SAM9X60_WLR	0x0c
+/* Watchdog Period Value */
+#define  AT91_SAM9X60_COUNTER	(0xfffUL << 0)
+#define  AT91_SAM9X60_SET_COUNTER(x)	((x) & AT91_SAM9X60_COUNTER)
+
+/* Interrupt Enable Register */
+#define AT91_SAM9X60_IER	0x14
+/* Period Interrupt Enable */
+#define  AT91_SAM9X60_PERINT	BIT(0)
+/* Interrupt Disable Register */
+#define AT91_SAM9X60_IDR	0x18
+/* Interrupt Status Register */
+#define AT91_SAM9X60_ISR	0x1c
+
 #endif
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 06bd4e1..672b184 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -369,9 +369,8 @@ static int cdns_wdt_probe(struct platform_device *pdev)
 		return ret;
 	platform_set_drvdata(pdev, wdt);
 
-	dev_info(dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n",
-		 wdt->regs, cdns_wdt_device->timeout,
-		 nowayout ? ", nowayout" : "");
+	dev_info(dev, "Xilinx Watchdog Timer with timeout %ds%s\n",
+		 cdns_wdt_device->timeout, nowayout ? ", nowayout" : "");
 
 	return 0;
 }
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index e149e66..47eefe0 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
+#include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
 #include <linux/mfd/da9062/registers.h>
@@ -147,12 +148,13 @@ static int da9062_wdt_restart(struct watchdog_device *wdd, unsigned long action,
 			      void *data)
 {
 	struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+	struct i2c_client *client = to_i2c_client(wdt->hw->dev);
 	int ret;
 
-	ret = regmap_write(wdt->hw->regmap,
-			   DA9062AA_CONTROL_F,
-			   DA9062AA_SHUTDOWN_MASK);
-	if (ret)
+	/* Don't use regmap because it is not atomic safe */
+	ret = i2c_smbus_write_byte_data(client, DA9062AA_CONTROL_F,
+					DA9062AA_SHUTDOWN_MASK);
+	if (ret < 0)
 		dev_alert(wdt->hw->dev, "Failed to shutdown (err = %d)\n",
 			  ret);
 
@@ -212,6 +214,7 @@ static int da9062_wdt_probe(struct platform_device *pdev)
 	watchdog_set_restart_priority(&wdt->wdtdev, 128);
 
 	watchdog_set_drvdata(&wdt->wdtdev, wdt);
+	dev_set_drvdata(dev, &wdt->wdtdev);
 
 	ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
 	if (ret < 0)
@@ -220,10 +223,34 @@ static int da9062_wdt_probe(struct platform_device *pdev)
 	return da9062_wdt_ping(&wdt->wdtdev);
 }
 
+static int __maybe_unused da9062_wdt_suspend(struct device *dev)
+{
+	struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+	if (watchdog_active(wdd))
+		return da9062_wdt_stop(wdd);
+
+	return 0;
+}
+
+static int __maybe_unused da9062_wdt_resume(struct device *dev)
+{
+	struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+	if (watchdog_active(wdd))
+		return da9062_wdt_start(wdd);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(da9062_wdt_pm_ops,
+			 da9062_wdt_suspend, da9062_wdt_resume);
+
 static struct platform_driver da9062_wdt_driver = {
 	.probe = da9062_wdt_probe,
 	.driver = {
 		.name = "da9062-watchdog",
+		.pm = &da9062_wdt_pm_ops,
 		.of_match_table = da9062_compatible_id_table,
 	},
 };
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index fef7c61..fba21de 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -114,7 +114,15 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
 	writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
 	       dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
 
-	wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val);
+	/*
+	 * In case users set bigger timeout value than HW can support,
+	 * kernel(watchdog_dev.c) helps to feed watchdog before
+	 * wdd->max_hw_heartbeat_ms
+	 */
+	if (top_s * 1000 <= wdd->max_hw_heartbeat_ms)
+		wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val);
+	else
+		wdd->timeout = top_s;
 
 	return 0;
 }
@@ -135,6 +143,7 @@ static int dw_wdt_start(struct watchdog_device *wdd)
 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
 
 	dw_wdt_set_timeout(wdd, wdd->timeout);
+	dw_wdt_ping(&dw_wdt->wdd);
 	dw_wdt_arm_system_reset(dw_wdt);
 
 	return 0;
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index a4b71eb..f3bf3ea 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -67,6 +67,7 @@
 #define IT8726_ID	0x8726	/* the data sheet suggest wrongly 0x8716 */
 #define IT8728_ID	0x8728
 #define IT8783_ID	0x8783
+#define IT8786_ID	0x8786
 
 /* GPIO Configuration Registers LDN=0x07 */
 #define WDTCTRL		0x71
@@ -294,6 +295,7 @@ static int __init it87_wdt_init(void)
 	case IT8721_ID:
 	case IT8728_ID:
 	case IT8783_ID:
+	case IT8786_ID:
 		max_units = 65535;
 		break;
 	case IT8705_ID:
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 9c3d003..d6a6393 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -9,6 +9,9 @@
  * Based on sunxi_wdt.c
  */
 
+#include <dt-bindings/reset-controller/mt2712-resets.h>
+#include <dt-bindings/reset-controller/mt8183-resets.h>
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -16,10 +19,11 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/reset-controller.h>
 #include <linux/types.h>
 #include <linux/watchdog.h>
-#include <linux/delay.h>
 
 #define WDT_MAX_TIMEOUT		31
 #define WDT_MIN_TIMEOUT		1
@@ -44,6 +48,9 @@
 #define WDT_SWRST		0x14
 #define WDT_SWRST_KEY		0x1209
 
+#define WDT_SWSYSRST		0x18U
+#define WDT_SWSYS_RST_KEY	0x88000000
+
 #define DRV_NAME		"mtk-wdt"
 #define DRV_VERSION		"1.0"
 
@@ -53,8 +60,94 @@ static unsigned int timeout;
 struct mtk_wdt_dev {
 	struct watchdog_device wdt_dev;
 	void __iomem *wdt_base;
+	spinlock_t lock; /* protects WDT_SWSYSRST reg */
+	struct reset_controller_dev rcdev;
 };
 
+struct mtk_wdt_data {
+	int toprgu_sw_rst_num;
+};
+
+static const struct mtk_wdt_data mt2712_data = {
+	.toprgu_sw_rst_num = MT2712_TOPRGU_SW_RST_NUM,
+};
+
+static const struct mtk_wdt_data mt8183_data = {
+	.toprgu_sw_rst_num = MT8183_TOPRGU_SW_RST_NUM,
+};
+
+static int toprgu_reset_update(struct reset_controller_dev *rcdev,
+			       unsigned long id, bool assert)
+{
+	unsigned int tmp;
+	unsigned long flags;
+	struct mtk_wdt_dev *data =
+		 container_of(rcdev, struct mtk_wdt_dev, rcdev);
+
+	spin_lock_irqsave(&data->lock, flags);
+
+	tmp = readl(data->wdt_base + WDT_SWSYSRST);
+	if (assert)
+		tmp |= BIT(id);
+	else
+		tmp &= ~BIT(id);
+	tmp |= WDT_SWSYS_RST_KEY;
+	writel(tmp, data->wdt_base + WDT_SWSYSRST);
+
+	spin_unlock_irqrestore(&data->lock, flags);
+
+	return 0;
+}
+
+static int toprgu_reset_assert(struct reset_controller_dev *rcdev,
+			       unsigned long id)
+{
+	return toprgu_reset_update(rcdev, id, true);
+}
+
+static int toprgu_reset_deassert(struct reset_controller_dev *rcdev,
+				 unsigned long id)
+{
+	return toprgu_reset_update(rcdev, id, false);
+}
+
+static int toprgu_reset(struct reset_controller_dev *rcdev,
+			unsigned long id)
+{
+	int ret;
+
+	ret = toprgu_reset_assert(rcdev, id);
+	if (ret)
+		return ret;
+
+	return toprgu_reset_deassert(rcdev, id);
+}
+
+static const struct reset_control_ops toprgu_reset_ops = {
+	.assert = toprgu_reset_assert,
+	.deassert = toprgu_reset_deassert,
+	.reset = toprgu_reset,
+};
+
+static int toprgu_register_reset_controller(struct platform_device *pdev,
+					    int rst_num)
+{
+	int ret;
+	struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
+
+	spin_lock_init(&mtk_wdt->lock);
+
+	mtk_wdt->rcdev.owner = THIS_MODULE;
+	mtk_wdt->rcdev.nr_resets = rst_num;
+	mtk_wdt->rcdev.ops = &toprgu_reset_ops;
+	mtk_wdt->rcdev.of_node = pdev->dev.of_node;
+	ret = devm_reset_controller_register(&pdev->dev, &mtk_wdt->rcdev);
+	if (ret != 0)
+		dev_err(&pdev->dev,
+			"couldn't register wdt reset controller: %d\n", ret);
+	return ret;
+}
+
 static int mtk_wdt_restart(struct watchdog_device *wdt_dev,
 			   unsigned long action, void *data)
 {
@@ -155,6 +248,7 @@ static int mtk_wdt_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct mtk_wdt_dev *mtk_wdt;
+	const struct mtk_wdt_data *wdt_data;
 	int err;
 
 	mtk_wdt = devm_kzalloc(dev, sizeof(*mtk_wdt), GFP_KERNEL);
@@ -190,6 +284,13 @@ static int mtk_wdt_probe(struct platform_device *pdev)
 	dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
 		 mtk_wdt->wdt_dev.timeout, nowayout);
 
+	wdt_data = of_device_get_match_data(dev);
+	if (wdt_data) {
+		err = toprgu_register_reset_controller(pdev,
+						       wdt_data->toprgu_sw_rst_num);
+		if (err)
+			return err;
+	}
 	return 0;
 }
 
@@ -218,7 +319,9 @@ static int mtk_wdt_resume(struct device *dev)
 #endif
 
 static const struct of_device_id mtk_wdt_dt_ids[] = {
+	{ .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
 	{ .compatible = "mediatek,mt6589-wdt" },
+	{ .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index a494543..eb47fe5 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
 	}
 
 	/* check if there is pretimeout support */
-	irq = platform_get_irq(pdev, 0);
+	irq = platform_get_irq_optional(pdev, 0);
 	if (irq > 0) {
 		ret = devm_request_irq(dev, irq, qcom_wdt_isr,
 				       IRQF_TRIGGER_RISING,
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index d193a60..e5d11d6 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -2,7 +2,7 @@
 /*
  * Driver for Atmel SAMA5D4 Watchdog Timer
  *
- * Copyright (C) 2015 Atmel Corporation
+ * Copyright (C) 2015-2019 Microchip Technology Inc. and its subsidiaries
  */
 
 #include <linux/delay.h>
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
@@ -29,7 +30,10 @@ struct sama5d4_wdt {
 	struct watchdog_device	wdd;
 	void __iomem		*reg_base;
 	u32			mr;
+	u32			ir;
 	unsigned long		last_ping;
+	bool			need_irq;
+	bool			sam9x60_support;
 };
 
 static int wdt_timeout;
@@ -78,7 +82,12 @@ static int sama5d4_wdt_start(struct watchdog_device *wdd)
 {
 	struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
 
-	wdt->mr &= ~AT91_WDT_WDDIS;
+	if (wdt->sam9x60_support) {
+		writel_relaxed(wdt->ir, wdt->reg_base + AT91_SAM9X60_IER);
+		wdt->mr &= ~AT91_SAM9X60_WDDIS;
+	} else {
+		wdt->mr &= ~AT91_WDT_WDDIS;
+	}
 	wdt_write(wdt, AT91_WDT_MR, wdt->mr);
 
 	return 0;
@@ -88,7 +97,12 @@ static int sama5d4_wdt_stop(struct watchdog_device *wdd)
 {
 	struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
 
-	wdt->mr |= AT91_WDT_WDDIS;
+	if (wdt->sam9x60_support) {
+		writel_relaxed(wdt->ir, wdt->reg_base + AT91_SAM9X60_IDR);
+		wdt->mr |= AT91_SAM9X60_WDDIS;
+	} else {
+		wdt->mr |= AT91_WDT_WDDIS;
+	}
 	wdt_write(wdt, AT91_WDT_MR, wdt->mr);
 
 	return 0;
@@ -109,6 +123,14 @@ static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
 	struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
 	u32 value = WDT_SEC2TICKS(timeout);
 
+	if (wdt->sam9x60_support) {
+		wdt_write(wdt, AT91_SAM9X60_WLR,
+			  AT91_SAM9X60_SET_COUNTER(value));
+
+		wdd->timeout = timeout;
+		return 0;
+	}
+
 	wdt->mr &= ~AT91_WDT_WDV;
 	wdt->mr |= AT91_WDT_SET_WDV(value);
 
@@ -143,8 +165,14 @@ static const struct watchdog_ops sama5d4_wdt_ops = {
 static irqreturn_t sama5d4_wdt_irq_handler(int irq, void *dev_id)
 {
 	struct sama5d4_wdt *wdt = platform_get_drvdata(dev_id);
+	u32 reg;
 
-	if (wdt_read(wdt, AT91_WDT_SR)) {
+	if (wdt->sam9x60_support)
+		reg = wdt_read(wdt, AT91_SAM9X60_ISR);
+	else
+		reg = wdt_read(wdt, AT91_WDT_SR);
+
+	if (reg) {
 		pr_crit("Atmel Watchdog Software Reset\n");
 		emergency_restart();
 		pr_crit("Reboot didn't succeed\n");
@@ -157,13 +185,14 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
 {
 	const char *tmp;
 
-	wdt->mr = AT91_WDT_WDDIS;
+	if (wdt->sam9x60_support)
+		wdt->mr = AT91_SAM9X60_WDDIS;
+	else
+		wdt->mr = AT91_WDT_WDDIS;
 
 	if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
 	    !strcmp(tmp, "software"))
-		wdt->mr |= AT91_WDT_WDFIEN;
-	else
-		wdt->mr |= AT91_WDT_WDRSTEN;
+		wdt->need_irq = true;
 
 	if (of_property_read_bool(np, "atmel,idle-halt"))
 		wdt->mr |= AT91_WDT_WDIDLEHLT;
@@ -176,21 +205,46 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
 
 static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
 {
-	u32 reg;
+	u32 reg, val;
+
+	val = WDT_SEC2TICKS(WDT_DEFAULT_TIMEOUT);
 	/*
 	 * When booting and resuming, the bootloader may have changed the
 	 * watchdog configuration.
 	 * If the watchdog is already running, we can safely update it.
 	 * Else, we have to disable it properly.
 	 */
-	if (wdt_enabled) {
-		wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr);
-	} else {
+	if (!wdt_enabled) {
 		reg = wdt_read(wdt, AT91_WDT_MR);
-		if (!(reg & AT91_WDT_WDDIS))
+		if (wdt->sam9x60_support && (!(reg & AT91_SAM9X60_WDDIS)))
+			wdt_write_nosleep(wdt, AT91_WDT_MR,
+					  reg | AT91_SAM9X60_WDDIS);
+		else if (!wdt->sam9x60_support &&
+			 (!(reg & AT91_WDT_WDDIS)))
 			wdt_write_nosleep(wdt, AT91_WDT_MR,
 					  reg | AT91_WDT_WDDIS);
 	}
+
+	if (wdt->sam9x60_support) {
+		if (wdt->need_irq)
+			wdt->ir = AT91_SAM9X60_PERINT;
+		else
+			wdt->mr |= AT91_SAM9X60_PERIODRST;
+
+		wdt_write(wdt, AT91_SAM9X60_IER, wdt->ir);
+		wdt_write(wdt, AT91_SAM9X60_WLR, AT91_SAM9X60_SET_COUNTER(val));
+	} else {
+		wdt->mr |= AT91_WDT_SET_WDD(WDT_SEC2TICKS(MAX_WDT_TIMEOUT));
+		wdt->mr |= AT91_WDT_SET_WDV(val);
+
+		if (wdt->need_irq)
+			wdt->mr |= AT91_WDT_WDFIEN;
+		else
+			wdt->mr |= AT91_WDT_WDRSTEN;
+	}
+
+	wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr);
+
 	return 0;
 }
 
@@ -201,7 +255,6 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
 	struct sama5d4_wdt *wdt;
 	void __iomem *regs;
 	u32 irq = 0;
-	u32 timeout;
 	int ret;
 
 	wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -215,6 +268,8 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
 	wdd->min_timeout = MIN_WDT_TIMEOUT;
 	wdd->max_timeout = MAX_WDT_TIMEOUT;
 	wdt->last_ping = jiffies;
+	wdt->sam9x60_support = of_device_is_compatible(dev->of_node,
+						       "microchip,sam9x60-wdt");
 
 	watchdog_set_drvdata(wdd, wdt);
 
@@ -224,15 +279,19 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
 
 	wdt->reg_base = regs;
 
-	irq = irq_of_parse_and_map(dev->of_node, 0);
-	if (!irq)
-		dev_warn(dev, "failed to get IRQ from DT\n");
-
 	ret = of_sama5d4_wdt_init(dev->of_node, wdt);
 	if (ret)
 		return ret;
 
-	if ((wdt->mr & AT91_WDT_WDFIEN) && irq) {
+	if (wdt->need_irq) {
+		irq = irq_of_parse_and_map(dev->of_node, 0);
+		if (!irq) {
+			dev_warn(dev, "failed to get IRQ from DT\n");
+			wdt->need_irq = false;
+		}
+	}
+
+	if (wdt->need_irq) {
 		ret = devm_request_irq(dev, irq, sama5d4_wdt_irq_handler,
 				       IRQF_SHARED | IRQF_IRQPOLL |
 				       IRQF_NO_SUSPEND, pdev->name, pdev);
@@ -244,11 +303,6 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
 
 	watchdog_init_timeout(wdd, wdt_timeout, dev);
 
-	timeout = WDT_SEC2TICKS(wdd->timeout);
-
-	wdt->mr |= AT91_WDT_SET_WDD(WDT_SEC2TICKS(MAX_WDT_TIMEOUT));
-	wdt->mr |= AT91_WDT_SET_WDV(timeout);
-
 	ret = sama5d4_wdt_init(wdt);
 	if (ret)
 		return ret;
@@ -269,7 +323,12 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id sama5d4_wdt_of_match[] = {
-	{ .compatible = "atmel,sama5d4-wdt", },
+	{
+		.compatible = "atmel,sama5d4-wdt",
+	},
+	{
+		.compatible = "microchip,sam9x60-wdt",
+	},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index a3a3290..25188d6 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
 	watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
 	watchdog_init_timeout(wdd, 0, dev);
 
+	/*
+	 * In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set
+	 * (Means U-Boot/bootloaders leaves the watchdog running)
+	 * When we get here we should make a decision to prevent
+	 * any side effects before user space daemon will take care of it.
+	 * The best option, taking into consideration that there is no
+	 * way to read values back from hardware, is to enforce watchdog
+	 * being run with deterministic values.
+	 */
+	if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) {
+		ret = stm32_iwdg_start(wdd);
+		if (ret)
+			return ret;
+
+		/* Make sure the watchdog is serviced */
+		set_bit(WDOG_HW_RUNNING, &wdd->status);
+	}
+
 	ret = devm_watchdog_register_device(dev, wdd);
 	if (ret)
 		return ret;
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 21e8085..861daf4 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -147,6 +147,25 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
 }
 EXPORT_SYMBOL_GPL(watchdog_init_timeout);
 
+static int watchdog_reboot_notifier(struct notifier_block *nb,
+				    unsigned long code, void *data)
+{
+	struct watchdog_device *wdd;
+
+	wdd = container_of(nb, struct watchdog_device, reboot_nb);
+	if (code == SYS_DOWN || code == SYS_HALT) {
+		if (watchdog_active(wdd)) {
+			int ret;
+
+			ret = wdd->ops->stop(wdd);
+			if (ret)
+				return NOTIFY_BAD;
+		}
+	}
+
+	return NOTIFY_DONE;
+}
+
 static int watchdog_restart_notifier(struct notifier_block *nb,
 				     unsigned long action, void *data)
 {
@@ -235,6 +254,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
 		}
 	}
 
+	if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
+		wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
+
+		ret = register_reboot_notifier(&wdd->reboot_nb);
+		if (ret) {
+			pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
+			       wdd->id, ret);
+			watchdog_dev_unregister(wdd);
+			ida_simple_remove(&watchdog_ida, id);
+			return ret;
+		}
+	}
+
 	if (wdd->ops->restart) {
 		wdd->restart_nb.notifier_call = watchdog_restart_notifier;
 
@@ -289,6 +321,9 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd)
 	if (wdd->ops->restart)
 		unregister_restart_handler(&wdd->restart_nb);
 
+	if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
+		unregister_reboot_notifier(&wdd->reboot_nb);
+
 	watchdog_dev_unregister(wdd);
 	ida_simple_remove(&watchdog_ida, wdd->id);
 }
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 4b2a854..8b5c742 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -38,7 +38,6 @@
 #include <linux/miscdevice.h>	/* For handling misc devices */
 #include <linux/module.h>	/* For module stuff/... */
 #include <linux/mutex.h>	/* For mutexes */
-#include <linux/reboot.h>	/* For reboot notifier */
 #include <linux/slab.h>		/* For memory functions */
 #include <linux/types.h>	/* For standard types (like size_t) */
 #include <linux/watchdog.h>	/* For watchdog specific items */
@@ -1097,25 +1096,6 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
 	put_device(&wd_data->dev);
 }
 
-static int watchdog_reboot_notifier(struct notifier_block *nb,
-				    unsigned long code, void *data)
-{
-	struct watchdog_device *wdd;
-
-	wdd = container_of(nb, struct watchdog_device, reboot_nb);
-	if (code == SYS_DOWN || code == SYS_HALT) {
-		if (watchdog_active(wdd)) {
-			int ret;
-
-			ret = wdd->ops->stop(wdd);
-			if (ret)
-				return NOTIFY_BAD;
-		}
-	}
-
-	return NOTIFY_DONE;
-}
-
 /*
  *	watchdog_dev_register: register a watchdog device
  *	@wdd: watchdog device
@@ -1134,22 +1114,8 @@ int watchdog_dev_register(struct watchdog_device *wdd)
 		return ret;
 
 	ret = watchdog_register_pretimeout(wdd);
-	if (ret) {
+	if (ret)
 		watchdog_cdev_unregister(wdd);
-		return ret;
-	}
-
-	if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
-		wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
-
-		ret = devm_register_reboot_notifier(&wdd->wd_data->dev,
-						    &wdd->reboot_nb);
-		if (ret) {
-			pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
-			       wdd->id, ret);
-			watchdog_dev_unregister(wdd);
-		}
-	}
 
 	return ret;
 }
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 63f0857..75d3bb9 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -342,35 +342,12 @@ static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
 	mutex_unlock(&priv->lock);
 }
 
-static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
-				 unsigned long page_num)
-{
-	/* Not implemented. */
-	return NULL;
-}
-
-static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
-				  unsigned long page_num, void *addr)
-{
-	/* Not implemented. */
-}
-
-static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
-			       struct vm_area_struct *vma)
-{
-	/* Not implemented. */
-	return 0;
-}
-
 static const struct dma_buf_ops dmabuf_exp_ops =  {
 	.attach = dmabuf_exp_ops_attach,
 	.detach = dmabuf_exp_ops_detach,
 	.map_dma_buf = dmabuf_exp_ops_map_dma_buf,
 	.unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
 	.release = dmabuf_exp_ops_release,
-	.map = dmabuf_exp_ops_kmap,
-	.unmap = dmabuf_exp_ops_kunmap,
-	.mmap = dmabuf_exp_ops_mmap,
 };
 
 struct gntdev_dmabuf_export_args {
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 4fc83e3f..0258415 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1006,19 +1006,19 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 	}
 	mutex_unlock(&priv->lock);
 
-	/*
-	 * gntdev takes the address of the PTE in find_grant_ptes() and passes
-	 * it to the hypervisor in gntdev_map_grant_pages(). The purpose of
-	 * the notifier is to prevent the hypervisor pointer to the PTE from
-	 * going stale.
-	 *
-	 * Since this vma's mappings can't be touched without the mmap_sem,
-	 * and we are holding it now, there is no need for the notifier_range
-	 * locking pattern.
-	 */
-	mmu_interval_read_begin(&map->notifier);
-
 	if (use_ptemod) {
+		/*
+		 * gntdev takes the address of the PTE in find_grant_ptes() and
+		 * passes it to the hypervisor in gntdev_map_grant_pages(). The
+		 * purpose of the notifier is to prevent the hypervisor pointer
+		 * to the PTE from going stale.
+		 *
+		 * Since this vma's mappings can't be touched without the
+		 * mmap_sem, and we are holding it now, there is no need for
+		 * the notifier_range locking pattern.
+		 */
+		mmu_interval_read_begin(&map->notifier);
+
 		map->pages_vm_start = vma->vm_start;
 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
 					  vma->vm_end - vma->vm_start,
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 6d12fc3..a8d2443 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -94,7 +94,7 @@ static void watch_target(struct xenbus_watch *watch,
 				  "%llu", &static_max) == 1))
 			static_max >>= PAGE_SHIFT - 10;
 		else
-			static_max = new_target;
+			static_max = balloon_stats.current_pages;
 
 		target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
 				: static_max - balloon_stats.target_pages;
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 6011171..b20e43e 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -286,6 +286,43 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
 	return xen_pcibios_err_to_errno(err);
 }
 
+int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+{
+	int err;
+	u16 val;
+	int ret = 0;
+
+	err = pci_read_config_word(dev, PCI_COMMAND, &val);
+	if (err)
+		return err;
+	if (!(val & PCI_COMMAND_INTX_DISABLE))
+		ret |= INTERRUPT_TYPE_INTX;
+
+	/*
+	 * Do not trust dev->msi(x)_enabled here, as enabling could be done
+	 * bypassing the pci_*msi* functions, by the qemu.
+	 */
+	if (dev->msi_cap) {
+		err = pci_read_config_word(dev,
+				dev->msi_cap + PCI_MSI_FLAGS,
+				&val);
+		if (err)
+			return err;
+		if (val & PCI_MSI_FLAGS_ENABLE)
+			ret |= INTERRUPT_TYPE_MSI;
+	}
+	if (dev->msix_cap) {
+		err = pci_read_config_word(dev,
+				dev->msix_cap + PCI_MSIX_FLAGS,
+				&val);
+		if (err)
+			return err;
+		if (val & PCI_MSIX_FLAGS_ENABLE)
+			ret |= INTERRUPT_TYPE_MSIX;
+	}
+	return ret;
+}
+
 void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
 {
 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index 22db630..28c4518 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -65,6 +65,11 @@ struct config_field_entry {
 	void *data;
 };
 
+#define INTERRUPT_TYPE_NONE (1<<0)
+#define INTERRUPT_TYPE_INTX (1<<1)
+#define INTERRUPT_TYPE_MSI  (1<<2)
+#define INTERRUPT_TYPE_MSIX (1<<3)
+
 extern bool xen_pcibk_permissive;
 
 #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
@@ -126,4 +131,6 @@ int xen_pcibk_config_capability_init(void);
 int xen_pcibk_config_header_add_fields(struct pci_dev *dev);
 int xen_pcibk_config_capability_add_fields(struct pci_dev *dev);
 
+int xen_pcibk_get_interrupt_type(struct pci_dev *dev);
+
 #endif				/* __XEN_PCIBACK_CONF_SPACE_H__ */
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index e569413..22f13ab 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -189,6 +189,85 @@ static const struct config_field caplist_pm[] = {
 	{}
 };
 
+static struct msi_msix_field_config {
+	u16          enable_bit; /* bit for enabling MSI/MSI-X */
+	unsigned int int_type;   /* interrupt type for exclusiveness check */
+} msi_field_config = {
+	.enable_bit	= PCI_MSI_FLAGS_ENABLE,
+	.int_type	= INTERRUPT_TYPE_MSI,
+}, msix_field_config = {
+	.enable_bit	= PCI_MSIX_FLAGS_ENABLE,
+	.int_type	= INTERRUPT_TYPE_MSIX,
+};
+
+static void *msi_field_init(struct pci_dev *dev, int offset)
+{
+	return &msi_field_config;
+}
+
+static void *msix_field_init(struct pci_dev *dev, int offset)
+{
+	return &msix_field_config;
+}
+
+static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
+				void *data)
+{
+	int err;
+	u16 old_value;
+	const struct msi_msix_field_config *field_config = data;
+	const struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+
+	if (xen_pcibk_permissive || dev_data->permissive)
+		goto write;
+
+	err = pci_read_config_word(dev, offset, &old_value);
+	if (err)
+		return err;
+
+	if (new_value == old_value)
+		return 0;
+
+	if (!dev_data->allow_interrupt_control ||
+	    (new_value ^ old_value) & ~field_config->enable_bit)
+		return PCIBIOS_SET_FAILED;
+
+	if (new_value & field_config->enable_bit) {
+		/* don't allow enabling together with other interrupt types */
+		int int_type = xen_pcibk_get_interrupt_type(dev);
+
+		if (int_type == INTERRUPT_TYPE_NONE ||
+		    int_type == field_config->int_type)
+			goto write;
+		return PCIBIOS_SET_FAILED;
+	}
+
+write:
+	return pci_write_config_word(dev, offset, new_value);
+}
+
+static const struct config_field caplist_msix[] = {
+	{
+		.offset    = PCI_MSIX_FLAGS,
+		.size      = 2,
+		.init      = msix_field_init,
+		.u.w.read  = xen_pcibk_read_config_word,
+		.u.w.write = msi_msix_flags_write,
+	},
+	{}
+};
+
+static const struct config_field caplist_msi[] = {
+	{
+		.offset    = PCI_MSI_FLAGS,
+		.size      = 2,
+		.init      = msi_field_init,
+		.u.w.read  = xen_pcibk_read_config_word,
+		.u.w.write = msi_msix_flags_write,
+	},
+	{}
+};
+
 static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
 	.capability = PCI_CAP_ID_PM,
 	.fields = caplist_pm,
@@ -197,11 +276,21 @@ static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
 	.capability = PCI_CAP_ID_VPD,
 	.fields = caplist_vpd,
 };
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_msi = {
+	.capability = PCI_CAP_ID_MSI,
+	.fields = caplist_msi,
+};
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_msix = {
+	.capability = PCI_CAP_ID_MSIX,
+	.fields = caplist_msix,
+};
 
 int xen_pcibk_config_capability_init(void)
 {
 	register_capability(&xen_pcibk_config_capability_vpd);
 	register_capability(&xen_pcibk_config_capability_pm);
+	register_capability(&xen_pcibk_config_capability_msi);
+	register_capability(&xen_pcibk_config_capability_msix);
 
 	return 0;
 }
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 10ae24b..fb4fccb 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -117,6 +117,25 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
 		pci_clear_mwi(dev);
 	}
 
+	if (dev_data && dev_data->allow_interrupt_control) {
+		if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
+			if (value & PCI_COMMAND_INTX_DISABLE) {
+				pci_intx(dev, 0);
+			} else {
+				/* Do not allow enabling INTx together with MSI or MSI-X. */
+				switch (xen_pcibk_get_interrupt_type(dev)) {
+				case INTERRUPT_TYPE_NONE:
+					pci_intx(dev, 1);
+					break;
+				case INTERRUPT_TYPE_INTX:
+					break;
+				default:
+					return PCIBIOS_SET_FAILED;
+				}
+			}
+		}
+	}
+
 	cmd->val = value;
 
 	if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 097410a..7af93d6 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -304,6 +304,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
 	xen_pcibk_config_reset_dev(dev);
 	xen_pcibk_config_free_dyn_fields(dev);
 
+	dev_data->allow_interrupt_control = 0;
+
 	xen_unregister_device_domain_owner(dev);
 
 	spin_lock_irqsave(&found_psdev->lock, flags);
@@ -1431,6 +1433,65 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)
 }
 static DRIVER_ATTR_RW(permissive);
 
+static ssize_t allow_interrupt_control_store(struct device_driver *drv,
+					     const char *buf, size_t count)
+{
+	int domain, bus, slot, func;
+	int err;
+	struct pcistub_device *psdev;
+	struct xen_pcibk_dev_data *dev_data;
+
+	err = str_to_slot(buf, &domain, &bus, &slot, &func);
+	if (err)
+		goto out;
+
+	psdev = pcistub_device_find(domain, bus, slot, func);
+	if (!psdev) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	dev_data = pci_get_drvdata(psdev->dev);
+	/* the driver data for a device should never be null at this point */
+	if (!dev_data) {
+		err = -ENXIO;
+		goto release;
+	}
+	dev_data->allow_interrupt_control = 1;
+release:
+	pcistub_device_put(psdev);
+out:
+	if (!err)
+		err = count;
+	return err;
+}
+
+static ssize_t allow_interrupt_control_show(struct device_driver *drv,
+					    char *buf)
+{
+	struct pcistub_device *psdev;
+	struct xen_pcibk_dev_data *dev_data;
+	size_t count = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pcistub_devices_lock, flags);
+	list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+		if (count >= PAGE_SIZE)
+			break;
+		if (!psdev->dev)
+			continue;
+		dev_data = pci_get_drvdata(psdev->dev);
+		if (!dev_data || !dev_data->allow_interrupt_control)
+			continue;
+		count +=
+		    scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
+			      pci_name(psdev->dev));
+	}
+	spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+	return count;
+}
+static DRIVER_ATTR_RW(allow_interrupt_control);
+
 static void pcistub_exit(void)
 {
 	driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
@@ -1441,6 +1502,8 @@ static void pcistub_exit(void)
 	driver_remove_file(&xen_pcibk_pci_driver.driver,
 			   &driver_attr_permissive);
 	driver_remove_file(&xen_pcibk_pci_driver.driver,
+			   &driver_attr_allow_interrupt_control);
+	driver_remove_file(&xen_pcibk_pci_driver.driver,
 			   &driver_attr_irq_handlers);
 	driver_remove_file(&xen_pcibk_pci_driver.driver,
 			   &driver_attr_irq_handler_state);
@@ -1530,6 +1593,9 @@ static int __init pcistub_init(void)
 	if (!err)
 		err = driver_create_file(&xen_pcibk_pci_driver.driver,
 					 &driver_attr_permissive);
+	if (!err)
+		err = driver_create_file(&xen_pcibk_pci_driver.driver,
+					 &driver_attr_allow_interrupt_control);
 
 	if (!err)
 		err = driver_create_file(&xen_pcibk_pci_driver.driver,
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 263c059..ce1077e 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -45,6 +45,7 @@ struct xen_pcibk_dev_data {
 	struct list_head config_fields;
 	struct pci_saved_state *pci_saved_state;
 	unsigned int permissive:1;
+	unsigned int allow_interrupt_control:1;
 	unsigned int warned_on_write:1;
 	unsigned int enable_intx:1;
 	unsigned int isr_on:1; /* Whether the IRQ handler is installed. */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 378486b..66975da 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -239,7 +239,9 @@ int xenbus_dev_probe(struct device *_dev)
 		goto fail;
 	}
 
+	spin_lock(&dev->reclaim_lock);
 	err = drv->probe(dev, id);
+	spin_unlock(&dev->reclaim_lock);
 	if (err)
 		goto fail_put;
 
@@ -268,8 +270,11 @@ int xenbus_dev_remove(struct device *_dev)
 
 	free_otherend_watch(dev);
 
-	if (drv->remove)
+	if (drv->remove) {
+		spin_lock(&dev->reclaim_lock);
 		drv->remove(dev);
+		spin_unlock(&dev->reclaim_lock);
+	}
 
 	module_put(drv->driver.owner);
 
@@ -468,6 +473,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
 		goto fail;
 
 	dev_set_name(&xendev->dev, "%s", devname);
+	spin_lock_init(&xendev->reclaim_lock);
 
 	/* Register with generic device framework. */
 	err = device_register(&xendev->dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 14876fa..791f6fe 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -247,6 +247,41 @@ static int backend_probe_and_watch(struct notifier_block *notifier,
 	return NOTIFY_DONE;
 }
 
+static int backend_reclaim_memory(struct device *dev, void *data)
+{
+	const struct xenbus_driver *drv;
+	struct xenbus_device *xdev;
+
+	if (!dev->driver)
+		return 0;
+	drv = to_xenbus_driver(dev->driver);
+	if (drv && drv->reclaim_memory) {
+		xdev = to_xenbus_device(dev);
+		if (!spin_trylock(&xdev->reclaim_lock))
+			return 0;
+		drv->reclaim_memory(xdev);
+		spin_unlock(&xdev->reclaim_lock);
+	}
+	return 0;
+}
+
+/*
+ * Returns 0 always because we are using shrinker to only detect memory
+ * pressure.
+ */
+static unsigned long backend_shrink_memory_count(struct shrinker *shrinker,
+				struct shrink_control *sc)
+{
+	bus_for_each_dev(&xenbus_backend.bus, NULL, NULL,
+			backend_reclaim_memory);
+	return 0;
+}
+
+static struct shrinker backend_memory_shrinker = {
+	.count_objects = backend_shrink_memory_count,
+	.seeks = DEFAULT_SEEKS,
+};
+
 static int __init xenbus_probe_backend_init(void)
 {
 	static struct notifier_block xenstore_notifier = {
@@ -263,6 +298,9 @@ static int __init xenbus_probe_backend_init(void)
 
 	register_xenstore_notifier(&xenstore_notifier);
 
+	if (register_shrinker(&backend_memory_shrinker))
+		pr_warn("shrinker registration failed\n");
+
 	return 0;
 }
 subsys_initcall(xenbus_probe_backend_init);
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 2e4ca4d..1c9ae08 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -56,10 +56,9 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
 	return nbytes;
 }
 
-static const struct file_operations proc_bus_zorro_operations = {
-	.owner		= THIS_MODULE,
-	.llseek		= proc_bus_zorro_lseek,
-	.read		= proc_bus_zorro_read,
+static const struct proc_ops bus_zorro_proc_ops = {
+	.proc_lseek	= proc_bus_zorro_lseek,
+	.proc_read	= proc_bus_zorro_read,
 };
 
 static void * zorro_seq_start(struct seq_file *m, loff_t *pos)
@@ -105,7 +104,7 @@ static int __init zorro_proc_attach_device(unsigned int slot)
 
 	sprintf(name, "%02x", slot);
 	entry = proc_create_data(name, 0, proc_bus_zorro_dir,
-				 &proc_bus_zorro_operations,
+				 &bus_zorro_proc_ops,
 				 &zorro_autocon[slot]);
 	if (!entry)
 		return -ENOMEM;
diff --git a/fs/Makefile b/fs/Makefile
index 1148c55..98be354 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -37,7 +37,7 @@
 obj-$(CONFIG_FS_ENCRYPTION)	+= crypto/
 obj-$(CONFIG_FS_VERITY)		+= verity/
 obj-$(CONFIG_FILE_LOCKING)      += locks.o
-obj-$(CONFIG_COMPAT)		+= compat.o compat_ioctl.o
+obj-$(CONFIG_COMPAT)		+= compat.o
 obj-$(CONFIG_BINFMT_AOUT)	+= binfmt_aout.o
 obj-$(CONFIG_BINFMT_EM86)	+= binfmt_em86.o
 obj-$(CONFIG_BINFMT_MISC)	+= binfmt_misc.o
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index b7e844d..699c4fa 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -26,14 +26,13 @@ static inline u16 adfs_filetype(u32 loadaddr)
 #define ADFS_NDA_PUBLIC_READ	(1 << 5)
 #define ADFS_NDA_PUBLIC_WRITE	(1 << 6)
 
-#include "dir_f.h"
-
 /*
  * adfs file system inode data in memory
  */
 struct adfs_inode_info {
 	loff_t		mmu_private;
 	__u32		parent_id;	/* parent indirect disc address	*/
+	__u32		indaddr;	/* object indirect disc address	*/
 	__u32		loadaddr;	/* RISC OS load address		*/
 	__u32		execaddr;	/* RISC OS exec address		*/
 	unsigned int	attr;		/* RISC OS permissions		*/
@@ -93,15 +92,19 @@ struct adfs_dir {
 
 	int			nr_buffers;
 	struct buffer_head	*bh[4];
-
-	/* big directories need allocated buffers */
-	struct buffer_head	**bh_fplus;
+	struct buffer_head	**bhs;
 
 	unsigned int		pos;
 	__u32			parent_id;
 
-	struct adfs_dirheader	dirhead;
-	union  adfs_dirtail	dirtail;
+	union {
+		struct adfs_dirheader	*dirhead;
+		struct adfs_bigdirheader *bighead;
+	};
+	union {
+		struct adfs_newdirtail	*newtail;
+		struct adfs_bigdirtail	*bigtail;
+	};
 };
 
 /*
@@ -122,13 +125,13 @@ struct object_info {
 struct adfs_dir_ops {
 	int	(*read)(struct super_block *sb, unsigned int indaddr,
 			unsigned int size, struct adfs_dir *dir);
+	int	(*iterate)(struct adfs_dir *dir, struct dir_context *ctx);
 	int	(*setpos)(struct adfs_dir *dir, unsigned int fpos);
 	int	(*getnext)(struct adfs_dir *dir, struct object_info *obj);
 	int	(*update)(struct adfs_dir *dir, struct object_info *obj);
 	int	(*create)(struct adfs_dir *dir, struct object_info *obj);
 	int	(*remove)(struct adfs_dir *dir, struct object_info *obj);
-	int	(*sync)(struct adfs_dir *dir);
-	void	(*free)(struct adfs_dir *dir);
+	int	(*commit)(struct adfs_dir *dir);
 };
 
 struct adfs_discmap {
@@ -145,7 +148,9 @@ int adfs_notify_change(struct dentry *dentry, struct iattr *attr);
 
 /* map.c */
 int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset);
-extern unsigned int adfs_map_free(struct super_block *sb);
+void adfs_map_statfs(struct super_block *sb, struct kstatfs *buf);
+struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr);
+void adfs_free_map(struct super_block *sb);
 
 /* Misc */
 __printf(3, 4)
@@ -167,6 +172,13 @@ extern const struct dentry_operations adfs_dentry_operations;
 extern const struct adfs_dir_ops adfs_f_dir_ops;
 extern const struct adfs_dir_ops adfs_fplus_dir_ops;
 
+int adfs_dir_copyfrom(void *dst, struct adfs_dir *dir, unsigned int offset,
+		      size_t len);
+int adfs_dir_copyto(struct adfs_dir *dir, unsigned int offset, const void *src,
+		    size_t len);
+void adfs_dir_relse(struct adfs_dir *dir);
+int adfs_dir_read_buffers(struct super_block *sb, u32 indaddr,
+			  unsigned int size, struct adfs_dir *dir);
 void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj);
 extern int adfs_dir_update(struct super_block *sb, struct object_info *obj,
 			   int wait);
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index a54c532..77fbd19 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -6,12 +6,196 @@
  *
  *  Common directory handling for ADFS
  */
+#include <linux/slab.h>
 #include "adfs.h"
 
 /*
  * For future.  This should probably be per-directory.
  */
-static DEFINE_RWLOCK(adfs_dir_lock);
+static DECLARE_RWSEM(adfs_dir_rwsem);
+
+int adfs_dir_copyfrom(void *dst, struct adfs_dir *dir, unsigned int offset,
+		      size_t len)
+{
+	struct super_block *sb = dir->sb;
+	unsigned int index, remain;
+
+	index = offset >> sb->s_blocksize_bits;
+	offset &= sb->s_blocksize - 1;
+	remain = sb->s_blocksize - offset;
+	if (index + (remain < len) >= dir->nr_buffers)
+		return -EINVAL;
+
+	if (remain < len) {
+		memcpy(dst, dir->bhs[index]->b_data + offset, remain);
+		dst += remain;
+		len -= remain;
+		index += 1;
+		offset = 0;
+	}
+
+	memcpy(dst, dir->bhs[index]->b_data + offset, len);
+
+	return 0;
+}
+
+int adfs_dir_copyto(struct adfs_dir *dir, unsigned int offset, const void *src,
+		    size_t len)
+{
+	struct super_block *sb = dir->sb;
+	unsigned int index, remain;
+
+	index = offset >> sb->s_blocksize_bits;
+	offset &= sb->s_blocksize - 1;
+	remain = sb->s_blocksize - offset;
+	if (index + (remain < len) >= dir->nr_buffers)
+		return -EINVAL;
+
+	if (remain < len) {
+		memcpy(dir->bhs[index]->b_data + offset, src, remain);
+		src += remain;
+		len -= remain;
+		index += 1;
+		offset = 0;
+	}
+
+	memcpy(dir->bhs[index]->b_data + offset, src, len);
+
+	return 0;
+}
+
+static void __adfs_dir_cleanup(struct adfs_dir *dir)
+{
+	dir->nr_buffers = 0;
+
+	if (dir->bhs != dir->bh)
+		kfree(dir->bhs);
+	dir->bhs = NULL;
+	dir->sb = NULL;
+}
+
+void adfs_dir_relse(struct adfs_dir *dir)
+{
+	unsigned int i;
+
+	for (i = 0; i < dir->nr_buffers; i++)
+		brelse(dir->bhs[i]);
+
+	__adfs_dir_cleanup(dir);
+}
+
+static void adfs_dir_forget(struct adfs_dir *dir)
+{
+	unsigned int i;
+
+	for (i = 0; i < dir->nr_buffers; i++)
+		bforget(dir->bhs[i]);
+
+	__adfs_dir_cleanup(dir);
+}
+
+int adfs_dir_read_buffers(struct super_block *sb, u32 indaddr,
+			  unsigned int size, struct adfs_dir *dir)
+{
+	struct buffer_head **bhs;
+	unsigned int i, num;
+	int block;
+
+	num = ALIGN(size, sb->s_blocksize) >> sb->s_blocksize_bits;
+	if (num > ARRAY_SIZE(dir->bh)) {
+		/* We only allow one extension */
+		if (dir->bhs != dir->bh)
+			return -EINVAL;
+
+		bhs = kcalloc(num, sizeof(*bhs), GFP_KERNEL);
+		if (!bhs)
+			return -ENOMEM;
+
+		if (dir->nr_buffers)
+			memcpy(bhs, dir->bhs, dir->nr_buffers * sizeof(*bhs));
+
+		dir->bhs = bhs;
+	}
+
+	for (i = dir->nr_buffers; i < num; i++) {
+		block = __adfs_block_map(sb, indaddr, i);
+		if (!block) {
+			adfs_error(sb, "dir %06x has a hole at offset %u",
+				   indaddr, i);
+			goto error;
+		}
+
+		dir->bhs[i] = sb_bread(sb, block);
+		if (!dir->bhs[i]) {
+			adfs_error(sb,
+				   "dir %06x failed read at offset %u, mapped block 0x%08x",
+				   indaddr, i, block);
+			goto error;
+		}
+
+		dir->nr_buffers++;
+	}
+	return 0;
+
+error:
+	adfs_dir_relse(dir);
+
+	return -EIO;
+}
+
+static int adfs_dir_read(struct super_block *sb, u32 indaddr,
+			 unsigned int size, struct adfs_dir *dir)
+{
+	dir->sb = sb;
+	dir->bhs = dir->bh;
+	dir->nr_buffers = 0;
+
+	return ADFS_SB(sb)->s_dir->read(sb, indaddr, size, dir);
+}
+
+static int adfs_dir_read_inode(struct super_block *sb, struct inode *inode,
+			       struct adfs_dir *dir)
+{
+	int ret;
+
+	ret = adfs_dir_read(sb, ADFS_I(inode)->indaddr, inode->i_size, dir);
+	if (ret)
+		return ret;
+
+	if (ADFS_I(inode)->parent_id != dir->parent_id) {
+		adfs_error(sb,
+			   "parent directory id changed under me! (%06x but got %06x)\n",
+			   ADFS_I(inode)->parent_id, dir->parent_id);
+		adfs_dir_relse(dir);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static void adfs_dir_mark_dirty(struct adfs_dir *dir)
+{
+	unsigned int i;
+
+	/* Mark the buffers dirty */
+	for (i = 0; i < dir->nr_buffers; i++)
+		mark_buffer_dirty(dir->bhs[i]);
+}
+
+static int adfs_dir_sync(struct adfs_dir *dir)
+{
+	int err = 0;
+	int i;
+
+	for (i = dir->nr_buffers - 1; i >= 0; i--) {
+		struct buffer_head *bh = dir->bhs[i];
+		sync_dirty_buffer(bh);
+		if (buffer_req(bh) && !buffer_uptodate(bh))
+			err = -EIO;
+	}
+
+	return err;
+}
 
 void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj)
 {
@@ -51,87 +235,90 @@ void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj)
 	}
 }
 
-static int
-adfs_readdir(struct file *file, struct dir_context *ctx)
+static int adfs_iterate(struct file *file, struct dir_context *ctx)
 {
 	struct inode *inode = file_inode(file);
 	struct super_block *sb = inode->i_sb;
 	const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
-	struct object_info obj;
 	struct adfs_dir dir;
-	int ret = 0;
+	int ret;
 
-	if (ctx->pos >> 32)
-		return 0;
-
-	ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
+	down_read(&adfs_dir_rwsem);
+	ret = adfs_dir_read_inode(sb, inode, &dir);
 	if (ret)
-		return ret;
+		goto unlock;
 
 	if (ctx->pos == 0) {
 		if (!dir_emit_dot(file, ctx))
-			goto free_out;
+			goto unlock_relse;
 		ctx->pos = 1;
 	}
 	if (ctx->pos == 1) {
 		if (!dir_emit(ctx, "..", 2, dir.parent_id, DT_DIR))
-			goto free_out;
+			goto unlock_relse;
 		ctx->pos = 2;
 	}
 
-	read_lock(&adfs_dir_lock);
+	ret = ops->iterate(&dir, ctx);
 
-	ret = ops->setpos(&dir, ctx->pos - 2);
-	if (ret)
-		goto unlock_out;
-	while (ops->getnext(&dir, &obj) == 0) {
-		if (!dir_emit(ctx, obj.name, obj.name_len,
-			      obj.indaddr, DT_UNKNOWN))
-			break;
-		ctx->pos++;
-	}
+unlock_relse:
+	up_read(&adfs_dir_rwsem);
+	adfs_dir_relse(&dir);
+	return ret;
 
-unlock_out:
-	read_unlock(&adfs_dir_lock);
-
-free_out:
-	ops->free(&dir);
+unlock:
+	up_read(&adfs_dir_rwsem);
 	return ret;
 }
 
 int
 adfs_dir_update(struct super_block *sb, struct object_info *obj, int wait)
 {
-	int ret = -EINVAL;
-#ifdef CONFIG_ADFS_FS_RW
 	const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
 	struct adfs_dir dir;
+	int ret;
 
-	printk(KERN_INFO "adfs_dir_update: object %06x in dir %06x\n",
-		 obj->indaddr, obj->parent_id);
+	if (!IS_ENABLED(CONFIG_ADFS_FS_RW))
+		return -EINVAL;
 
-	if (!ops->update) {
-		ret = -EINVAL;
-		goto out;
-	}
+	if (!ops->update)
+		return -EINVAL;
 
-	ret = ops->read(sb, obj->parent_id, 0, &dir);
+	down_write(&adfs_dir_rwsem);
+	ret = adfs_dir_read(sb, obj->parent_id, 0, &dir);
 	if (ret)
-		goto out;
+		goto unlock;
 
-	write_lock(&adfs_dir_lock);
 	ret = ops->update(&dir, obj);
-	write_unlock(&adfs_dir_lock);
+	if (ret)
+		goto forget;
 
-	if (wait) {
-		int err = ops->sync(&dir);
-		if (!ret)
-			ret = err;
-	}
+	ret = ops->commit(&dir);
+	if (ret)
+		goto forget;
+	up_write(&adfs_dir_rwsem);
 
-	ops->free(&dir);
-out:
-#endif
+	adfs_dir_mark_dirty(&dir);
+
+	if (wait)
+		ret = adfs_dir_sync(&dir);
+
+	adfs_dir_relse(&dir);
+	return ret;
+
+	/*
+	 * If the updated failed because the entry wasn't found, we can
+	 * just release the buffers. If it was any other error, forget
+	 * the dirtied buffers so they aren't written back to the media.
+	 */
+forget:
+	if (ret == -ENOENT)
+		adfs_dir_relse(&dir);
+	else
+		adfs_dir_forget(&dir);
+unlock:
+	up_write(&adfs_dir_rwsem);
+
 	return ret;
 }
 
@@ -167,25 +354,14 @@ static int adfs_dir_lookup_byname(struct inode *inode, const struct qstr *qstr,
 	u32 name_len;
 	int ret;
 
-	ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
+	down_read(&adfs_dir_rwsem);
+	ret = adfs_dir_read_inode(sb, inode, &dir);
 	if (ret)
-		goto out;
-
-	if (ADFS_I(inode)->parent_id != dir.parent_id) {
-		adfs_error(sb,
-			   "parent directory changed under me! (%06x but got %06x)\n",
-			   ADFS_I(inode)->parent_id, dir.parent_id);
-		ret = -EIO;
-		goto free_out;
-	}
-
-	obj->parent_id = inode->i_ino;
-
-	read_lock(&adfs_dir_lock);
+		goto unlock;
 
 	ret = ops->setpos(&dir, 0);
 	if (ret)
-		goto unlock_out;
+		goto unlock_relse;
 
 	ret = -ENOENT;
 	name = qstr->name;
@@ -196,20 +372,22 @@ static int adfs_dir_lookup_byname(struct inode *inode, const struct qstr *qstr,
 			break;
 		}
 	}
+	obj->parent_id = ADFS_I(inode)->indaddr;
 
-unlock_out:
-	read_unlock(&adfs_dir_lock);
+unlock_relse:
+	up_read(&adfs_dir_rwsem);
+	adfs_dir_relse(&dir);
+	return ret;
 
-free_out:
-	ops->free(&dir);
-out:
+unlock:
+	up_read(&adfs_dir_rwsem);
 	return ret;
 }
 
 const struct file_operations adfs_dir_operations = {
 	.read		= generic_read_dir,
 	.llseek		= generic_file_llseek,
-	.iterate	= adfs_readdir,
+	.iterate_shared	= adfs_iterate,
 	.fsync		= generic_file_fsync,
 };
 
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index c1a950c..30d526f 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -9,8 +9,6 @@
 #include "adfs.h"
 #include "dir_f.h"
 
-static void adfs_f_free(struct adfs_dir *dir);
-
 /*
  * Read an (unaligned) value of length 1..4 bytes
  */
@@ -60,7 +58,7 @@ static inline void adfs_writeval(unsigned char *p, int len, unsigned int val)
 #define bufoff(_bh,_idx)			\
 	({ int _buf = _idx >> blocksize_bits;	\
 	   int _off = _idx - (_buf << blocksize_bits);\
-	  (u8 *)(_bh[_buf]->b_data + _off);	\
+	  (void *)(_bh[_buf]->b_data + _off);	\
 	})
 
 /*
@@ -123,65 +121,49 @@ adfs_dir_checkbyte(const struct adfs_dir *dir)
 	return (dircheck ^ (dircheck >> 8) ^ (dircheck >> 16) ^ (dircheck >> 24)) & 0xff;
 }
 
+static int adfs_f_validate(struct adfs_dir *dir)
+{
+	struct adfs_dirheader *head = dir->dirhead;
+	struct adfs_newdirtail *tail = dir->newtail;
+
+	if (head->startmasseq != tail->endmasseq ||
+	    tail->dirlastmask || tail->reserved[0] || tail->reserved[1] ||
+	    (memcmp(&head->startname, "Nick", 4) &&
+	     memcmp(&head->startname, "Hugo", 4)) ||
+	    memcmp(&head->startname, &tail->endname, 4) ||
+	    adfs_dir_checkbyte(dir) != tail->dircheckbyte)
+		return -EIO;
+
+	return 0;
+}
+
 /* Read and check that a directory is valid */
-static int adfs_dir_read(struct super_block *sb, u32 indaddr,
-			 unsigned int size, struct adfs_dir *dir)
+static int adfs_f_read(struct super_block *sb, u32 indaddr, unsigned int size,
+		       struct adfs_dir *dir)
 {
 	const unsigned int blocksize_bits = sb->s_blocksize_bits;
-	int blk = 0;
+	int ret;
 
-	/*
-	 * Directories which are not a multiple of 2048 bytes
-	 * are considered bad v2 [3.6]
-	 */
-	if (size & 2047)
+	if (size && size != ADFS_NEWDIR_SIZE)
+		return -EIO;
+
+	ret = adfs_dir_read_buffers(sb, indaddr, ADFS_NEWDIR_SIZE, dir);
+	if (ret)
+		return ret;
+
+	dir->dirhead = bufoff(dir->bh, 0);
+	dir->newtail = bufoff(dir->bh, 2007);
+
+	if (adfs_f_validate(dir))
 		goto bad_dir;
 
-	size >>= blocksize_bits;
-
-	dir->nr_buffers = 0;
-	dir->sb = sb;
-
-	for (blk = 0; blk < size; blk++) {
-		int phys;
-
-		phys = __adfs_block_map(sb, indaddr, blk);
-		if (!phys) {
-			adfs_error(sb, "dir %06x has a hole at offset %d",
-				   indaddr, blk);
-			goto release_buffers;
-		}
-
-		dir->bh[blk] = sb_bread(sb, phys);
-		if (!dir->bh[blk])
-			goto release_buffers;
-	}
-
-	memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead));
-	memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail));
-
-	if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq ||
-	    memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4))
-		goto bad_dir;
-
-	if (memcmp(&dir->dirhead.startname, "Nick", 4) &&
-	    memcmp(&dir->dirhead.startname, "Hugo", 4))
-		goto bad_dir;
-
-	if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte)
-		goto bad_dir;
-
-	dir->nr_buffers = blk;
+	dir->parent_id = adfs_readval(dir->newtail->dirparent, 3);
 
 	return 0;
 
 bad_dir:
 	adfs_error(sb, "dir %06x is corrupted", indaddr);
-release_buffers:
-	for (blk -= 1; blk >= 0; blk -= 1)
-		brelse(dir->bh[blk]);
-
-	dir->sb = NULL;
+	adfs_dir_relse(dir);
 
 	return -EIO;
 }
@@ -232,24 +214,12 @@ adfs_obj2dir(struct adfs_direntry *de, struct object_info *obj)
 static int
 __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj)
 {
-	struct super_block *sb = dir->sb;
 	struct adfs_direntry de;
-	int thissize, buffer, offset;
+	int ret;
 
-	buffer = pos >> sb->s_blocksize_bits;
-
-	if (buffer > dir->nr_buffers)
-		return -EINVAL;
-
-	offset = pos & (sb->s_blocksize - 1);
-	thissize = sb->s_blocksize - offset;
-	if (thissize > 26)
-		thissize = 26;
-
-	memcpy(&de, dir->bh[buffer]->b_data + offset, thissize);
-	if (thissize != 26)
-		memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data,
-		       26 - thissize);
+	ret = adfs_dir_copyfrom(&de, dir, pos, 26);
+	if (ret)
+		return ret;
 
 	if (!de.dirobname[0])
 		return -ENOENT;
@@ -260,89 +230,6 @@ __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj)
 }
 
 static int
-__adfs_dir_put(struct adfs_dir *dir, int pos, struct object_info *obj)
-{
-	struct super_block *sb = dir->sb;
-	struct adfs_direntry de;
-	int thissize, buffer, offset;
-
-	buffer = pos >> sb->s_blocksize_bits;
-
-	if (buffer > dir->nr_buffers)
-		return -EINVAL;
-
-	offset = pos & (sb->s_blocksize - 1);
-	thissize = sb->s_blocksize - offset;
-	if (thissize > 26)
-		thissize = 26;
-
-	/*
-	 * Get the entry in total
-	 */
-	memcpy(&de, dir->bh[buffer]->b_data + offset, thissize);
-	if (thissize != 26)
-		memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data,
-		       26 - thissize);
-
-	/*
-	 * update it
-	 */
-	adfs_obj2dir(&de, obj);
-
-	/*
-	 * Put the new entry back
-	 */
-	memcpy(dir->bh[buffer]->b_data + offset, &de, thissize);
-	if (thissize != 26)
-		memcpy(dir->bh[buffer + 1]->b_data, ((char *)&de) + thissize,
-		       26 - thissize);
-
-	return 0;
-}
-
-/*
- * the caller is responsible for holding the necessary
- * locks.
- */
-static int adfs_dir_find_entry(struct adfs_dir *dir, u32 indaddr)
-{
-	int pos, ret;
-
-	ret = -ENOENT;
-
-	for (pos = 5; pos < ADFS_NUM_DIR_ENTRIES * 26 + 5; pos += 26) {
-		struct object_info obj;
-
-		if (!__adfs_dir_get(dir, pos, &obj))
-			break;
-
-		if (obj.indaddr == indaddr) {
-			ret = pos;
-			break;
-		}
-	}
-
-	return ret;
-}
-
-static int adfs_f_read(struct super_block *sb, u32 indaddr, unsigned int size,
-		       struct adfs_dir *dir)
-{
-	int ret;
-
-	if (size != ADFS_NEWDIR_SIZE)
-		return -EIO;
-
-	ret = adfs_dir_read(sb, indaddr, size, dir);
-	if (ret)
-		adfs_error(sb, "unable to read directory");
-	else
-		dir->parent_id = adfs_readval(dir->dirtail.new.dirparent, 3);
-
-	return ret;
-}
-
-static int
 adfs_f_setpos(struct adfs_dir *dir, unsigned int fpos)
 {
 	if (fpos >= ADFS_NUM_DIR_ENTRIES)
@@ -364,99 +251,74 @@ adfs_f_getnext(struct adfs_dir *dir, struct object_info *obj)
 	return ret;
 }
 
-static int
-adfs_f_update(struct adfs_dir *dir, struct object_info *obj)
+static int adfs_f_iterate(struct adfs_dir *dir, struct dir_context *ctx)
 {
-	struct super_block *sb = dir->sb;
-	int ret, i;
+	struct object_info obj;
+	int pos = 5 + (ctx->pos - 2) * 26;
 
-	ret = adfs_dir_find_entry(dir, obj->indaddr);
-	if (ret < 0) {
-		adfs_error(dir->sb, "unable to locate entry to update");
-		goto out;
+	while (ctx->pos < 2 + ADFS_NUM_DIR_ENTRIES) {
+		if (__adfs_dir_get(dir, pos, &obj))
+			break;
+		if (!dir_emit(ctx, obj.name, obj.name_len,
+			      obj.indaddr, DT_UNKNOWN))
+			break;
+		pos += 26;
+		ctx->pos++;
 	}
+	return 0;
+}
 
-	__adfs_dir_put(dir, ret, obj);
- 
-	/*
-	 * Increment directory sequence number
-	 */
-	dir->bh[0]->b_data[0] += 1;
-	dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 6] += 1;
+static int adfs_f_update(struct adfs_dir *dir, struct object_info *obj)
+{
+	struct adfs_direntry de;
+	int offset, ret;
 
-	ret = adfs_dir_checkbyte(dir);
-	/*
-	 * Update directory check byte
-	 */
-	dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 1] = ret;
+	offset = 5 - (int)sizeof(de);
 
-#if 1
-	{
-	const unsigned int blocksize_bits = sb->s_blocksize_bits;
+	do {
+		offset += sizeof(de);
+		ret = adfs_dir_copyfrom(&de, dir, offset, sizeof(de));
+		if (ret) {
+			adfs_error(dir->sb, "error reading directory entry");
+			return -ENOENT;
+		}
+		if (!de.dirobname[0]) {
+			adfs_error(dir->sb, "unable to locate entry to update");
+			return -ENOENT;
+		}
+	} while (adfs_readval(de.dirinddiscadd, 3) != obj->indaddr);
 
-	memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead));
-	memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail));
+	/* Update the directory entry with the new object state */
+	adfs_obj2dir(&de, obj);
 
-	if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq ||
-	    memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4))
-		goto bad_dir;
+	/* Write the directory entry back to the directory */
+	return adfs_dir_copyto(dir, offset, &de, 26);
+}
 
-	if (memcmp(&dir->dirhead.startname, "Nick", 4) &&
-	    memcmp(&dir->dirhead.startname, "Hugo", 4))
-		goto bad_dir;
+static int adfs_f_commit(struct adfs_dir *dir)
+{
+	int ret;
 
-	if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte)
-		goto bad_dir;
-	}
-#endif
-	for (i = dir->nr_buffers - 1; i >= 0; i--)
-		mark_buffer_dirty(dir->bh[i]);
+	/* Increment directory sequence number */
+	dir->dirhead->startmasseq += 1;
+	dir->newtail->endmasseq += 1;
 
-	ret = 0;
-out:
+	/* Update directory check byte */
+	dir->newtail->dircheckbyte = adfs_dir_checkbyte(dir);
+
+	/* Make sure the directory still validates correctly */
+	ret = adfs_f_validate(dir);
+	if (ret)
+		adfs_msg(dir->sb, KERN_ERR, "error: update broke directory");
+
 	return ret;
-#if 1
-bad_dir:
-	adfs_error(dir->sb, "whoops!  I broke a directory!");
-	return -EIO;
-#endif
-}
-
-static int
-adfs_f_sync(struct adfs_dir *dir)
-{
-	int err = 0;
-	int i;
-
-	for (i = dir->nr_buffers - 1; i >= 0; i--) {
-		struct buffer_head *bh = dir->bh[i];
-		sync_dirty_buffer(bh);
-		if (buffer_req(bh) && !buffer_uptodate(bh))
-			err = -EIO;
-	}
-
-	return err;
-}
-
-static void
-adfs_f_free(struct adfs_dir *dir)
-{
-	int i;
-
-	for (i = dir->nr_buffers - 1; i >= 0; i--) {
-		brelse(dir->bh[i]);
-		dir->bh[i] = NULL;
-	}
-
-	dir->nr_buffers = 0;
-	dir->sb = NULL;
 }
 
 const struct adfs_dir_ops adfs_f_dir_ops = {
 	.read		= adfs_f_read,
+	.iterate	= adfs_f_iterate,
 	.setpos		= adfs_f_setpos,
 	.getnext	= adfs_f_getnext,
 	.update		= adfs_f_update,
-	.sync		= adfs_f_sync,
-	.free		= adfs_f_free
+	.commit		= adfs_f_commit,
 };
diff --git a/fs/adfs/dir_f.h b/fs/adfs/dir_f.h
index 5aec332..a5393e6 100644
--- a/fs/adfs/dir_f.h
+++ b/fs/adfs/dir_f.h
@@ -13,9 +13,9 @@
  * Directory header
  */
 struct adfs_dirheader {
-	unsigned char startmasseq;
-	unsigned char startname[4];
-};
+	__u8 startmasseq;
+	__u8 startname[4];
+} __attribute__((packed));
 
 #define ADFS_NEWDIR_SIZE	2048
 #define ADFS_NUM_DIR_ENTRIES	77
@@ -31,32 +31,36 @@ struct adfs_direntry {
 	__u8 dirlen[4];
 	__u8 dirinddiscadd[3];
 	__u8 newdiratts;
-};
+} __attribute__((packed));
 
 /*
  * Directory tail
  */
+struct adfs_olddirtail {
+	__u8 dirlastmask;
+	char dirname[10];
+	__u8 dirparent[3];
+	char dirtitle[19];
+	__u8 reserved[14];
+	__u8 endmasseq;
+	__u8 endname[4];
+	__u8 dircheckbyte;
+} __attribute__((packed));
+
+struct adfs_newdirtail {
+	__u8 dirlastmask;
+	__u8 reserved[2];
+	__u8 dirparent[3];
+	char dirtitle[19];
+	char dirname[10];
+	__u8 endmasseq;
+	__u8 endname[4];
+	__u8 dircheckbyte;
+} __attribute__((packed));
+
 union adfs_dirtail {
-	struct {
-		unsigned char dirlastmask;
-		char dirname[10];
-		unsigned char dirparent[3];
-		char dirtitle[19];
-		unsigned char reserved[14];
-		unsigned char endmasseq;
-		unsigned char endname[4];
-		unsigned char dircheckbyte;
-	} old;
-	struct {
-		unsigned char dirlastmask;
-		unsigned char reserved[2];
-		unsigned char dirparent[3];
-		char dirtitle[19];
-		char dirname[10];
-		unsigned char endmasseq;
-		unsigned char endname[4];
-		unsigned char dircheckbyte;
-	} new;
+	struct adfs_olddirtail old;
+	struct adfs_newdirtail new;
 };
 
 #endif
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index d56924c..4a15924 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -4,123 +4,163 @@
  *
  *  Copyright (C) 1997-1999 Russell King
  */
-#include <linux/slab.h>
 #include "adfs.h"
 #include "dir_fplus.h"
 
-static int
-adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir)
+/* Return the byte offset to directory entry pos */
+static unsigned int adfs_fplus_offset(const struct adfs_bigdirheader *h,
+				      unsigned int pos)
+{
+	return offsetof(struct adfs_bigdirheader, bigdirname) +
+	       ALIGN(le32_to_cpu(h->bigdirnamelen), 4) +
+	       pos * sizeof(struct adfs_bigdirentry);
+}
+
+static int adfs_fplus_validate_header(const struct adfs_bigdirheader *h)
+{
+	unsigned int size = le32_to_cpu(h->bigdirsize);
+	unsigned int len;
+
+	if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
+	    h->bigdirversion[2] != 0 ||
+	    h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME) ||
+	    !size || size & 2047 || size > SZ_4M)
+		return -EIO;
+
+	size -= sizeof(struct adfs_bigdirtail) +
+		offsetof(struct adfs_bigdirheader, bigdirname);
+
+	/* Check that bigdirnamelen fits within the directory */
+	len = ALIGN(le32_to_cpu(h->bigdirnamelen), 4);
+	if (len > size)
+		return -EIO;
+
+	size -= len;
+
+	/* Check that bigdirnamesize fits within the directory */
+	len = le32_to_cpu(h->bigdirnamesize);
+	if (len > size)
+		return -EIO;
+
+	size -= len;
+
+	/*
+	 * Avoid division, we know that absolute maximum number of entries
+	 * can not be so large to cause overflow of the multiplication below.
+	 */
+	len = le32_to_cpu(h->bigdirentries);
+	if (len > SZ_4M / sizeof(struct adfs_bigdirentry) ||
+	    len * sizeof(struct adfs_bigdirentry) > size)
+		return -EIO;
+
+	return 0;
+}
+
+static int adfs_fplus_validate_tail(const struct adfs_bigdirheader *h,
+				    const struct adfs_bigdirtail *t)
+{
+	if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
+	    t->bigdirendmasseq != h->startmasseq ||
+	    t->reserved[0] != 0 || t->reserved[1] != 0)
+		return -EIO;
+
+	return 0;
+}
+
+static u8 adfs_fplus_checkbyte(struct adfs_dir *dir)
+{
+	struct adfs_bigdirheader *h = dir->bighead;
+	struct adfs_bigdirtail *t = dir->bigtail;
+	unsigned int end, bs, bi, i;
+	__le32 *bp;
+	u32 dircheck;
+
+	end = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries)) +
+		le32_to_cpu(h->bigdirnamesize);
+
+	/* Accumulate the contents of the header, entries and names */
+	for (dircheck = 0, bi = 0; end; bi++) {
+		bp = (void *)dir->bhs[bi]->b_data;
+		bs = dir->bhs[bi]->b_size;
+		if (bs > end)
+			bs = end;
+
+		for (i = 0; i < bs; i += sizeof(u32))
+			dircheck = ror32(dircheck, 13) ^ le32_to_cpup(bp++);
+
+		end -= bs;
+	}
+
+	/* Accumulate the contents of the tail except for the check byte */
+	dircheck = ror32(dircheck, 13) ^ le32_to_cpu(t->bigdirendname);
+	dircheck = ror32(dircheck, 13) ^ t->bigdirendmasseq;
+	dircheck = ror32(dircheck, 13) ^ t->reserved[0];
+	dircheck = ror32(dircheck, 13) ^ t->reserved[1];
+
+	return dircheck ^ dircheck >> 8 ^ dircheck >> 16 ^ dircheck >> 24;
+}
+
+static int adfs_fplus_read(struct super_block *sb, u32 indaddr,
+			   unsigned int size, struct adfs_dir *dir)
 {
 	struct adfs_bigdirheader *h;
 	struct adfs_bigdirtail *t;
-	unsigned long block;
-	unsigned int blk, size;
-	int i, ret = -EIO;
+	unsigned int dirsize;
+	int ret;
 
-	dir->nr_buffers = 0;
+	/* Read first buffer */
+	ret = adfs_dir_read_buffers(sb, indaddr, sb->s_blocksize, dir);
+	if (ret)
+		return ret;
 
-	/* start off using fixed bh set - only alloc for big dirs */
-	dir->bh_fplus = &dir->bh[0];
-
-	block = __adfs_block_map(sb, id, 0);
-	if (!block) {
-		adfs_error(sb, "dir object %X has a hole at offset 0", id);
+	dir->bighead = h = (void *)dir->bhs[0]->b_data;
+	ret = adfs_fplus_validate_header(h);
+	if (ret) {
+		adfs_error(sb, "dir %06x has malformed header", indaddr);
 		goto out;
 	}
 
-	dir->bh_fplus[0] = sb_bread(sb, block);
-	if (!dir->bh_fplus[0])
-		goto out;
-	dir->nr_buffers += 1;
-
-	h = (struct adfs_bigdirheader *)dir->bh_fplus[0]->b_data;
-	size = le32_to_cpu(h->bigdirsize);
-	if (size != sz) {
+	dirsize = le32_to_cpu(h->bigdirsize);
+	if (size && dirsize != size) {
 		adfs_msg(sb, KERN_WARNING,
-			 "directory header size %X does not match directory size %X",
-			 size, sz);
+			 "dir %06x header size %X does not match directory size %X",
+			 indaddr, dirsize, size);
 	}
 
-	if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
-	    h->bigdirversion[2] != 0 || size & 2047 ||
-	    h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) {
-		adfs_error(sb, "dir %06x has malformed header", id);
+	/* Read remaining buffers */
+	ret = adfs_dir_read_buffers(sb, indaddr, dirsize, dir);
+	if (ret)
+		return ret;
+
+	dir->bigtail = t = (struct adfs_bigdirtail *)
+		(dir->bhs[dir->nr_buffers - 1]->b_data + (sb->s_blocksize - 8));
+
+	ret = adfs_fplus_validate_tail(h, t);
+	if (ret) {
+		adfs_error(sb, "dir %06x has malformed tail", indaddr);
 		goto out;
 	}
 
-	size >>= sb->s_blocksize_bits;
-	if (size > ARRAY_SIZE(dir->bh)) {
-		/* this directory is too big for fixed bh set, must allocate */
-		struct buffer_head **bh_fplus =
-			kcalloc(size, sizeof(struct buffer_head *),
-				GFP_KERNEL);
-		if (!bh_fplus) {
-			adfs_msg(sb, KERN_ERR,
-				 "not enough memory for dir object %X (%d blocks)",
-				 id, size);
-			ret = -ENOMEM;
-			goto out;
-		}
-		dir->bh_fplus = bh_fplus;
-		/* copy over the pointer to the block that we've already read */
-		dir->bh_fplus[0] = dir->bh[0];
-	}
-
-	for (blk = 1; blk < size; blk++) {
-		block = __adfs_block_map(sb, id, blk);
-		if (!block) {
-			adfs_error(sb, "dir object %X has a hole at offset %d", id, blk);
-			goto out;
-		}
-
-		dir->bh_fplus[blk] = sb_bread(sb, block);
-		if (!dir->bh_fplus[blk]) {
-			adfs_error(sb,	"dir object %x failed read for offset %d, mapped block %lX",
-				   id, blk, block);
-			goto out;
-		}
-
-		dir->nr_buffers += 1;
-	}
-
-	t = (struct adfs_bigdirtail *)
-		(dir->bh_fplus[size - 1]->b_data + (sb->s_blocksize - 8));
-
-	if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
-	    t->bigdirendmasseq != h->startmasseq ||
-	    t->reserved[0] != 0 || t->reserved[1] != 0) {
-		adfs_error(sb, "dir %06x has malformed tail", id);
+	if (adfs_fplus_checkbyte(dir) != t->bigdircheckbyte) {
+		adfs_error(sb, "dir %06x checkbyte mismatch\n", indaddr);
 		goto out;
 	}
 
 	dir->parent_id = le32_to_cpu(h->bigdirparent);
-	dir->sb = sb;
 	return 0;
 
 out:
-	if (dir->bh_fplus) {
-		for (i = 0; i < dir->nr_buffers; i++)
-			brelse(dir->bh_fplus[i]);
+	adfs_dir_relse(dir);
 
-		if (&dir->bh[0] != dir->bh_fplus)
-			kfree(dir->bh_fplus);
-
-		dir->bh_fplus = NULL;
-	}
-
-	dir->nr_buffers = 0;
-	dir->sb = NULL;
 	return ret;
 }
 
 static int
 adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
 {
-	struct adfs_bigdirheader *h =
-		(struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
 	int ret = -ENOENT;
 
-	if (fpos <= le32_to_cpu(h->bigdirentries)) {
+	if (fpos <= le32_to_cpu(dir->bighead->bigdirentries)) {
 		dir->pos = fpos;
 		ret = 0;
 	}
@@ -128,51 +168,23 @@ adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
 	return ret;
 }
 
-static void
-dir_memcpy(struct adfs_dir *dir, unsigned int offset, void *to, int len)
-{
-	struct super_block *sb = dir->sb;
-	unsigned int buffer, partial, remainder;
-
-	buffer = offset >> sb->s_blocksize_bits;
-	offset &= sb->s_blocksize - 1;
-
-	partial = sb->s_blocksize - offset;
-
-	if (partial >= len)
-		memcpy(to, dir->bh_fplus[buffer]->b_data + offset, len);
-	else {
-		char *c = (char *)to;
-
-		remainder = len - partial;
-
-		memcpy(c,
-			dir->bh_fplus[buffer]->b_data + offset,
-			partial);
-
-		memcpy(c + partial,
-			dir->bh_fplus[buffer + 1]->b_data,
-			remainder);
-	}
-}
-
 static int
 adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
 {
-	struct adfs_bigdirheader *h =
-		(struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
+	struct adfs_bigdirheader *h = dir->bighead;
 	struct adfs_bigdirentry bde;
 	unsigned int offset;
-	int ret = -ENOENT;
+	int ret;
 
 	if (dir->pos >= le32_to_cpu(h->bigdirentries))
-		goto out;
+		return -ENOENT;
 
-	offset = offsetof(struct adfs_bigdirheader, bigdirname);
-	offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
-	offset += dir->pos * sizeof(struct adfs_bigdirentry);
+	offset = adfs_fplus_offset(h, dir->pos);
 
-	dir_memcpy(dir, offset, &bde, sizeof(struct adfs_bigdirentry));
+	ret = adfs_dir_copyfrom(&bde, dir, offset,
+				sizeof(struct adfs_bigdirentry));
+	if (ret)
+		return ret;
 
 	obj->loadaddr = le32_to_cpu(bde.bigdirload);
 	obj->execaddr = le32_to_cpu(bde.bigdirexec);
@@ -181,59 +193,95 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
 	obj->attr     = le32_to_cpu(bde.bigdirattr);
 	obj->name_len = le32_to_cpu(bde.bigdirobnamelen);
 
-	offset = offsetof(struct adfs_bigdirheader, bigdirname);
-	offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
-	offset += le32_to_cpu(h->bigdirentries) * sizeof(struct adfs_bigdirentry);
+	offset = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries));
 	offset += le32_to_cpu(bde.bigdirobnameptr);
 
-	dir_memcpy(dir, offset, obj->name, obj->name_len);
+	ret = adfs_dir_copyfrom(obj->name, dir, offset, obj->name_len);
+	if (ret)
+		return ret;
+
 	adfs_object_fixup(dir, obj);
 
 	dir->pos += 1;
-	ret = 0;
-out:
+
+	return 0;
+}
+
+static int adfs_fplus_iterate(struct adfs_dir *dir, struct dir_context *ctx)
+{
+	struct object_info obj;
+
+	if ((ctx->pos - 2) >> 32)
+		return 0;
+
+	if (adfs_fplus_setpos(dir, ctx->pos - 2))
+		return 0;
+
+	while (!adfs_fplus_getnext(dir, &obj)) {
+		if (!dir_emit(ctx, obj.name, obj.name_len,
+			      obj.indaddr, DT_UNKNOWN))
+			break;
+		ctx->pos++;
+	}
+
+	return 0;
+}
+
+static int adfs_fplus_update(struct adfs_dir *dir, struct object_info *obj)
+{
+	struct adfs_bigdirheader *h = dir->bighead;
+	struct adfs_bigdirentry bde;
+	int offset, end, ret;
+
+	offset = adfs_fplus_offset(h, 0) - sizeof(bde);
+	end = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries));
+
+	do {
+		offset += sizeof(bde);
+		if (offset >= end) {
+			adfs_error(dir->sb, "unable to locate entry to update");
+			return -ENOENT;
+		}
+		ret = adfs_dir_copyfrom(&bde, dir, offset, sizeof(bde));
+		if (ret) {
+			adfs_error(dir->sb, "error reading directory entry");
+			return -ENOENT;
+		}
+	} while (le32_to_cpu(bde.bigdirindaddr) != obj->indaddr);
+
+	bde.bigdirload    = cpu_to_le32(obj->loadaddr);
+	bde.bigdirexec    = cpu_to_le32(obj->execaddr);
+	bde.bigdirlen     = cpu_to_le32(obj->size);
+	bde.bigdirindaddr = cpu_to_le32(obj->indaddr);
+	bde.bigdirattr    = cpu_to_le32(obj->attr);
+
+	return adfs_dir_copyto(dir, offset, &bde, sizeof(bde));
+}
+
+static int adfs_fplus_commit(struct adfs_dir *dir)
+{
+	int ret;
+
+	/* Increment directory sequence number */
+	dir->bighead->startmasseq += 1;
+	dir->bigtail->bigdirendmasseq += 1;
+
+	/* Update directory check byte */
+	dir->bigtail->bigdircheckbyte = adfs_fplus_checkbyte(dir);
+
+	/* Make sure the directory still validates correctly */
+	ret = adfs_fplus_validate_header(dir->bighead);
+	if (ret == 0)
+		ret = adfs_fplus_validate_tail(dir->bighead, dir->bigtail);
+
 	return ret;
 }
 
-static int
-adfs_fplus_sync(struct adfs_dir *dir)
-{
-	int err = 0;
-	int i;
-
-	for (i = dir->nr_buffers - 1; i >= 0; i--) {
-		struct buffer_head *bh = dir->bh_fplus[i];
-		sync_dirty_buffer(bh);
-		if (buffer_req(bh) && !buffer_uptodate(bh))
-			err = -EIO;
-	}
-
-	return err;
-}
-
-static void
-adfs_fplus_free(struct adfs_dir *dir)
-{
-	int i;
-
-	if (dir->bh_fplus) {
-		for (i = 0; i < dir->nr_buffers; i++)
-			brelse(dir->bh_fplus[i]);
-
-		if (&dir->bh[0] != dir->bh_fplus)
-			kfree(dir->bh_fplus);
-
-		dir->bh_fplus = NULL;
-	}
-
-	dir->nr_buffers = 0;
-	dir->sb = NULL;
-}
-
 const struct adfs_dir_ops adfs_fplus_dir_ops = {
 	.read		= adfs_fplus_read,
+	.iterate	= adfs_fplus_iterate,
 	.setpos		= adfs_fplus_setpos,
 	.getnext	= adfs_fplus_getnext,
-	.sync		= adfs_fplus_sync,
-	.free		= adfs_fplus_free
+	.update		= adfs_fplus_update,
+	.commit		= adfs_fplus_commit,
 };
diff --git a/fs/adfs/dir_fplus.h b/fs/adfs/dir_fplus.h
index 4ec0931..d729b15 100644
--- a/fs/adfs/dir_fplus.h
+++ b/fs/adfs/dir_fplus.h
@@ -22,7 +22,7 @@ struct adfs_bigdirheader {
 	__le32	bigdirnamesize;
 	__le32	bigdirparent;
 	char	bigdirname[1];
-};
+} __attribute__((packed, aligned(4)));
 
 struct adfs_bigdirentry {
 	__le32	bigdirload;
@@ -32,11 +32,11 @@ struct adfs_bigdirentry {
 	__le32	bigdirattr;
 	__le32	bigdirobnamelen;
 	__le32	bigdirobnameptr;
-};
+} __attribute__((packed, aligned(4)));
 
 struct adfs_bigdirtail {
 	__le32	bigdirendname;
 	__u8	bigdirendmasseq;
 	__u8	reserved[2];
 	__u8	bigdircheckbyte;
-};
+} __attribute__((packed, aligned(4)));
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 124de75..32620f4 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -20,7 +20,8 @@ adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh,
 		if (block >= inode->i_blocks)
 			goto abort_toobig;
 
-		block = __adfs_block_map(inode->i_sb, inode->i_ino, block);
+		block = __adfs_block_map(inode->i_sb, ADFS_I(inode)->indaddr,
+					 block);
 		if (block)
 			map_bh(bh, inode->i_sb, block);
 		return 0;
@@ -126,29 +127,29 @@ adfs_atts2mode(struct super_block *sb, struct inode *inode)
  * Convert Linux permission to ADFS attribute.  We try to do the reverse
  * of atts2mode, but there is not a 1:1 translation.
  */
-static int
-adfs_mode2atts(struct super_block *sb, struct inode *inode)
+static int adfs_mode2atts(struct super_block *sb, struct inode *inode,
+			  umode_t ia_mode)
 {
+	struct adfs_sb_info *asb = ADFS_SB(sb);
 	umode_t mode;
 	int attr;
-	struct adfs_sb_info *asb = ADFS_SB(sb);
 
 	/* FIXME: should we be able to alter a link? */
 	if (S_ISLNK(inode->i_mode))
 		return ADFS_I(inode)->attr;
 
+	/* Directories do not have read/write permissions on the media */
 	if (S_ISDIR(inode->i_mode))
-		attr = ADFS_NDA_DIRECTORY;
-	else
-		attr = 0;
+		return ADFS_NDA_DIRECTORY;
 
-	mode = inode->i_mode & asb->s_owner_mask;
+	attr = 0;
+	mode = ia_mode & asb->s_owner_mask;
 	if (mode & S_IRUGO)
 		attr |= ADFS_NDA_OWNER_READ;
 	if (mode & S_IWUGO)
 		attr |= ADFS_NDA_OWNER_WRITE;
 
-	mode = inode->i_mode & asb->s_other_mask;
+	mode = ia_mode & asb->s_other_mask;
 	mode &= ~asb->s_owner_mask;
 	if (mode & S_IRUGO)
 		attr |= ADFS_NDA_PUBLIC_READ;
@@ -158,6 +159,8 @@ adfs_mode2atts(struct super_block *sb, struct inode *inode)
 	return attr;
 }
 
+static const s64 nsec_unix_epoch_diff_risc_os_epoch = 2208988800000000000LL;
+
 /*
  * Convert an ADFS time to Unix time.  ADFS has a 40-bit centi-second time
  * referenced to 1 Jan 1900 (til 2248) so we need to discard 2208988800 seconds
@@ -170,8 +173,6 @@ adfs_adfs2unix_time(struct timespec64 *tv, struct inode *inode)
 	/* 01 Jan 1970 00:00:00 (Unix epoch) as nanoseconds since
 	 * 01 Jan 1900 00:00:00 (RISC OS epoch)
 	 */
-	static const s64 nsec_unix_epoch_diff_risc_os_epoch =
-							2208988800000000000LL;
 	s64 nsec;
 
 	if (!adfs_inode_is_stamped(inode))
@@ -204,24 +205,23 @@ adfs_adfs2unix_time(struct timespec64 *tv, struct inode *inode)
 	return;
 }
 
-/*
- * Convert an Unix time to ADFS time.  We only do this if the entry has a
- * time/date stamp already.
- */
-static void
-adfs_unix2adfs_time(struct inode *inode, unsigned int secs)
+/* Convert an Unix time to ADFS time for an entry that is already stamped. */
+static void adfs_unix2adfs_time(struct inode *inode,
+				const struct timespec64 *ts)
 {
-	unsigned int high, low;
+	s64 cs, nsec = timespec64_to_ns(ts);
 
-	if (adfs_inode_is_stamped(inode)) {
-		/* convert 32-bit seconds to 40-bit centi-seconds */
-		low  = (secs & 255) * 100;
-		high = (secs / 256) * 100 + (low >> 8) + 0x336e996a;
+	/* convert from Unix to RISC OS epoch */
+	nsec += nsec_unix_epoch_diff_risc_os_epoch;
 
-		ADFS_I(inode)->loadaddr = (high >> 24) |
-				(ADFS_I(inode)->loadaddr & ~0xff);
-		ADFS_I(inode)->execaddr = (low & 255) | (high << 8);
-	}
+	/* convert from nanoseconds to centiseconds */
+	cs = div_s64(nsec, 10000000);
+
+	cs = clamp_t(s64, cs, 0, 0xffffffffff);
+
+	ADFS_I(inode)->loadaddr &= ~0xff;
+	ADFS_I(inode)->loadaddr |= (cs >> 32) & 0xff;
+	ADFS_I(inode)->execaddr = cs;
 }
 
 /*
@@ -260,6 +260,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
 	 * for cross-directory renames.
 	 */
 	ADFS_I(inode)->parent_id = obj->parent_id;
+	ADFS_I(inode)->indaddr   = obj->indaddr;
 	ADFS_I(inode)->loadaddr  = obj->loadaddr;
 	ADFS_I(inode)->execaddr  = obj->execaddr;
 	ADFS_I(inode)->attr      = obj->attr;
@@ -315,10 +316,11 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr)
 	if (ia_valid & ATTR_SIZE)
 		truncate_setsize(inode, attr->ia_size);
 
-	if (ia_valid & ATTR_MTIME) {
-		inode->i_mtime = attr->ia_mtime;
-		adfs_unix2adfs_time(inode, attr->ia_mtime.tv_sec);
+	if (ia_valid & ATTR_MTIME && adfs_inode_is_stamped(inode)) {
+		adfs_unix2adfs_time(inode, &attr->ia_mtime);
+		adfs_adfs2unix_time(&inode->i_mtime, inode);
 	}
+
 	/*
 	 * FIXME: should we make these == to i_mtime since we don't
 	 * have the ability to represent them in our filesystem?
@@ -328,7 +330,7 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr)
 	if (ia_valid & ATTR_CTIME)
 		inode->i_ctime = attr->ia_ctime;
 	if (ia_valid & ATTR_MODE) {
-		ADFS_I(inode)->attr = adfs_mode2atts(sb, inode);
+		ADFS_I(inode)->attr = adfs_mode2atts(sb, inode, attr->ia_mode);
 		inode->i_mode = adfs_atts2mode(sb, inode);
 	}
 
@@ -353,7 +355,7 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
 	struct object_info obj;
 	int ret;
 
-	obj.indaddr	= inode->i_ino;
+	obj.indaddr	= ADFS_I(inode)->indaddr;
 	obj.name_len	= 0;
 	obj.parent_id	= ADFS_I(inode)->parent_id;
 	obj.loadaddr	= ADFS_I(inode)->loadaddr;
diff --git a/fs/adfs/map.c b/fs/adfs/map.c
index f44d12c..a81de80 100644
--- a/fs/adfs/map.c
+++ b/fs/adfs/map.c
@@ -4,6 +4,8 @@
  *
  *  Copyright (C) 1997-2002 Russell King
  */
+#include <linux/slab.h>
+#include <linux/statfs.h>
 #include <asm/unaligned.h>
 #include "adfs.h"
 
@@ -66,54 +68,41 @@ static DEFINE_RWLOCK(adfs_map_lock);
 static int lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen,
 		       const u32 frag_id, unsigned int *offset)
 {
-	const unsigned int mapsize = dm->dm_endbit;
+	const unsigned int endbit = dm->dm_endbit;
 	const u32 idmask = (1 << idlen) - 1;
-	unsigned char *map = dm->dm_bh->b_data + 4;
+	unsigned char *map = dm->dm_bh->b_data;
 	unsigned int start = dm->dm_startbit;
-	unsigned int mapptr;
+	unsigned int freelink, fragend;
 	u32 frag;
 
+	frag = GET_FRAG_ID(map, 8, idmask & 0x7fff);
+	freelink = frag ? 8 + frag : 0;
+
 	do {
 		frag = GET_FRAG_ID(map, start, idmask);
-		mapptr = start + idlen;
 
-		/*
-		 * find end of fragment
-		 */
-		{
-			__le32 *_map = (__le32 *)map;
-			u32 v = le32_to_cpu(_map[mapptr >> 5]) >> (mapptr & 31);
-			while (v == 0) {
-				mapptr = (mapptr & ~31) + 32;
-				if (mapptr >= mapsize)
-					goto error;
-				v = le32_to_cpu(_map[mapptr >> 5]);
-			}
+		fragend = find_next_bit_le(map, endbit, start + idlen);
+		if (fragend >= endbit)
+			goto error;
 
-			mapptr += 1 + ffz(~v);
+		if (start == freelink) {
+			freelink += frag & 0x7fff;
+		} else if (frag == frag_id) {
+			unsigned int length = fragend + 1 - start;
+
+			if (*offset < length)
+				return start + *offset;
+			*offset -= length;
 		}
 
-		if (frag == frag_id)
-			goto found;
-again:
-		start = mapptr;
-	} while (mapptr < mapsize);
+		start = fragend + 1;
+	} while (start < endbit);
 	return -1;
 
 error:
 	printk(KERN_ERR "adfs: oversized fragment 0x%x at 0x%x-0x%x\n",
-		frag, start, mapptr);
+		frag, start, fragend);
 	return -1;
-
-found:
-	{
-		int length = mapptr - start;
-		if (*offset >= length) {
-			*offset -= length;
-			goto again;
-		}
-	}
-	return start + *offset;
 }
 
 /*
@@ -125,12 +114,12 @@ static int lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen,
 static unsigned int
 scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm)
 {
-	const unsigned int mapsize = dm->dm_endbit + 32;
+	const unsigned int endbit = dm->dm_endbit;
 	const unsigned int idlen  = asb->s_idlen;
 	const unsigned int frag_idlen = idlen <= 15 ? idlen : 15;
 	const u32 idmask = (1 << frag_idlen) - 1;
 	unsigned char *map = dm->dm_bh->b_data;
-	unsigned int start = 8, mapptr;
+	unsigned int start = 8, fragend;
 	u32 frag;
 	unsigned long total = 0;
 
@@ -149,29 +138,13 @@ scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm)
 	do {
 		start += frag;
 
-		/*
-		 * get fragment id
-		 */
 		frag = GET_FRAG_ID(map, start, idmask);
-		mapptr = start + idlen;
 
-		/*
-		 * find end of fragment
-		 */
-		{
-			__le32 *_map = (__le32 *)map;
-			u32 v = le32_to_cpu(_map[mapptr >> 5]) >> (mapptr & 31);
-			while (v == 0) {
-				mapptr = (mapptr & ~31) + 32;
-				if (mapptr >= mapsize)
-					goto error;
-				v = le32_to_cpu(_map[mapptr >> 5]);
-			}
+		fragend = find_next_bit_le(map, endbit, start + idlen);
+		if (fragend >= endbit)
+			goto error;
 
-			mapptr += 1 + ffz(~v);
-		}
-
-		total += mapptr - start;
+		total += fragend + 1 - start;
 	} while (frag >= idlen + 1);
 
 	if (frag != 0)
@@ -220,10 +193,10 @@ static int scan_map(struct adfs_sb_info *asb, unsigned int zone,
  *  total_free = E(free_in_zone_n)
  *              nzones
  */
-unsigned int
-adfs_map_free(struct super_block *sb)
+void adfs_map_statfs(struct super_block *sb, struct kstatfs *buf)
 {
 	struct adfs_sb_info *asb = ADFS_SB(sb);
+	struct adfs_discrecord *dr = adfs_map_discrecord(asb->s_map);
 	struct adfs_discmap *dm;
 	unsigned int total = 0;
 	unsigned int zone;
@@ -235,7 +208,10 @@ adfs_map_free(struct super_block *sb)
 		total += scan_free_map(asb, dm++);
 	} while (--zone > 0);
 
-	return signed_asl(total, asb->s_map2blk);
+	buf->f_blocks  = adfs_disc_size(dr) >> sb->s_blocksize_bits;
+	buf->f_files   = asb->s_ids_per_zone * asb->s_map_size;
+	buf->f_bavail  =
+	buf->f_bfree   = signed_asl(total, asb->s_map2blk);
 }
 
 int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset)
@@ -280,3 +256,152 @@ int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset)
 		   frag_id, zone, asb->s_map_size);
 	return 0;
 }
+
+static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map)
+{
+	unsigned int v0, v1, v2, v3;
+	int i;
+
+	v0 = v1 = v2 = v3 = 0;
+	for (i = sb->s_blocksize - 4; i; i -= 4) {
+		v0 += map[i]     + (v3 >> 8);
+		v3 &= 0xff;
+		v1 += map[i + 1] + (v0 >> 8);
+		v0 &= 0xff;
+		v2 += map[i + 2] + (v1 >> 8);
+		v1 &= 0xff;
+		v3 += map[i + 3] + (v2 >> 8);
+		v2 &= 0xff;
+	}
+	v0 +=           v3 >> 8;
+	v1 += map[1] + (v0 >> 8);
+	v2 += map[2] + (v1 >> 8);
+	v3 += map[3] + (v2 >> 8);
+
+	return v0 ^ v1 ^ v2 ^ v3;
+}
+
+static int adfs_checkmap(struct super_block *sb, struct adfs_discmap *dm)
+{
+	unsigned char crosscheck = 0, zonecheck = 1;
+	int i;
+
+	for (i = 0; i < ADFS_SB(sb)->s_map_size; i++) {
+		unsigned char *map;
+
+		map = dm[i].dm_bh->b_data;
+
+		if (adfs_calczonecheck(sb, map) != map[0]) {
+			adfs_error(sb, "zone %d fails zonecheck", i);
+			zonecheck = 0;
+		}
+		crosscheck ^= map[3];
+	}
+	if (crosscheck != 0xff)
+		adfs_error(sb, "crosscheck != 0xff");
+	return crosscheck == 0xff && zonecheck;
+}
+
+/*
+ * Layout the map - the first zone contains a copy of the disc record,
+ * and the last zone must be limited to the size of the filesystem.
+ */
+static void adfs_map_layout(struct adfs_discmap *dm, unsigned int nzones,
+			    struct adfs_discrecord *dr)
+{
+	unsigned int zone, zone_size;
+	u64 size;
+
+	zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare);
+
+	dm[0].dm_bh       = NULL;
+	dm[0].dm_startblk = 0;
+	dm[0].dm_startbit = 32 + ADFS_DR_SIZE_BITS;
+	dm[0].dm_endbit   = 32 + zone_size;
+
+	for (zone = 1; zone < nzones; zone++) {
+		dm[zone].dm_bh       = NULL;
+		dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
+		dm[zone].dm_startbit = 32;
+		dm[zone].dm_endbit   = 32 + zone_size;
+	}
+
+	size = adfs_disc_size(dr) >> dr->log2bpmb;
+	size -= (nzones - 1) * zone_size - ADFS_DR_SIZE_BITS;
+	dm[nzones - 1].dm_endbit = 32 + size;
+}
+
+static int adfs_map_read(struct adfs_discmap *dm, struct super_block *sb,
+			 unsigned int map_addr, unsigned int nzones)
+{
+	unsigned int zone;
+
+	for (zone = 0; zone < nzones; zone++) {
+		dm[zone].dm_bh = sb_bread(sb, map_addr + zone);
+		if (!dm[zone].dm_bh)
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static void adfs_map_relse(struct adfs_discmap *dm, unsigned int nzones)
+{
+	unsigned int zone;
+
+	for (zone = 0; zone < nzones; zone++)
+		brelse(dm[zone].dm_bh);
+}
+
+struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr)
+{
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+	struct adfs_discmap *dm;
+	unsigned int map_addr, zone_size, nzones;
+	int ret;
+
+	nzones    = dr->nzones | dr->nzones_high << 8;
+	zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare);
+
+	asb->s_idlen = dr->idlen;
+	asb->s_map_size = nzones;
+	asb->s_map2blk = dr->log2bpmb - dr->log2secsize;
+	asb->s_log2sharesize = dr->log2sharesize;
+	asb->s_ids_per_zone = zone_size / (asb->s_idlen + 1);
+
+	map_addr = (nzones >> 1) * zone_size -
+		     ((nzones > 1) ? ADFS_DR_SIZE_BITS : 0);
+	map_addr = signed_asl(map_addr, asb->s_map2blk);
+
+	dm = kmalloc_array(nzones, sizeof(*dm), GFP_KERNEL);
+	if (dm == NULL) {
+		adfs_error(sb, "not enough memory");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	adfs_map_layout(dm, nzones, dr);
+
+	ret = adfs_map_read(dm, sb, map_addr, nzones);
+	if (ret) {
+		adfs_error(sb, "unable to read map");
+		goto error_free;
+	}
+
+	if (adfs_checkmap(sb, dm))
+		return dm;
+
+	adfs_error(sb, "map corrupted");
+
+error_free:
+	adfs_map_relse(dm, nzones);
+	kfree(dm);
+	return ERR_PTR(-EIO);
+}
+
+void adfs_free_map(struct super_block *sb)
+{
+	struct adfs_sb_info *asb = ADFS_SB(sb);
+
+	adfs_map_relse(asb->s_map, asb->s_map_size);
+	kfree(asb->s_map);
+}
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 65b04eb..a3cc8ec 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -88,59 +88,11 @@ static int adfs_checkdiscrecord(struct adfs_discrecord *dr)
 	return 0;
 }
 
-static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map)
-{
-	unsigned int v0, v1, v2, v3;
-	int i;
-
-	v0 = v1 = v2 = v3 = 0;
-	for (i = sb->s_blocksize - 4; i; i -= 4) {
-		v0 += map[i]     + (v3 >> 8);
-		v3 &= 0xff;
-		v1 += map[i + 1] + (v0 >> 8);
-		v0 &= 0xff;
-		v2 += map[i + 2] + (v1 >> 8);
-		v1 &= 0xff;
-		v3 += map[i + 3] + (v2 >> 8);
-		v2 &= 0xff;
-	}
-	v0 +=           v3 >> 8;
-	v1 += map[1] + (v0 >> 8);
-	v2 += map[2] + (v1 >> 8);
-	v3 += map[3] + (v2 >> 8);
-
-	return v0 ^ v1 ^ v2 ^ v3;
-}
-
-static int adfs_checkmap(struct super_block *sb, struct adfs_discmap *dm)
-{
-	unsigned char crosscheck = 0, zonecheck = 1;
-	int i;
-
-	for (i = 0; i < ADFS_SB(sb)->s_map_size; i++) {
-		unsigned char *map;
-
-		map = dm[i].dm_bh->b_data;
-
-		if (adfs_calczonecheck(sb, map) != map[0]) {
-			adfs_error(sb, "zone %d fails zonecheck", i);
-			zonecheck = 0;
-		}
-		crosscheck ^= map[3];
-	}
-	if (crosscheck != 0xff)
-		adfs_error(sb, "crosscheck != 0xff");
-	return crosscheck == 0xff && zonecheck;
-}
-
 static void adfs_put_super(struct super_block *sb)
 {
-	int i;
 	struct adfs_sb_info *asb = ADFS_SB(sb);
 
-	for (i = 0; i < asb->s_map_size; i++)
-		brelse(asb->s_map[i].dm_bh);
-	kfree(asb->s_map);
+	adfs_free_map(sb);
 	kfree_rcu(asb, rcu);
 }
 
@@ -249,16 +201,13 @@ static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *sb = dentry->d_sb;
 	struct adfs_sb_info *sbi = ADFS_SB(sb);
-	struct adfs_discrecord *dr = adfs_map_discrecord(sbi->s_map);
 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 
+	adfs_map_statfs(sb, buf);
+
 	buf->f_type    = ADFS_SUPER_MAGIC;
 	buf->f_namelen = sbi->s_namelen;
 	buf->f_bsize   = sb->s_blocksize;
-	buf->f_blocks  = adfs_disc_size(dr) >> sb->s_blocksize_bits;
-	buf->f_files   = sbi->s_ids_per_zone * sbi->s_map_size;
-	buf->f_bavail  =
-	buf->f_bfree   = adfs_map_free(sb);
 	buf->f_ffree   = (long)(buf->f_bfree * buf->f_files) / (long)buf->f_blocks;
 	buf->f_fsid.val[0] = (u32)id;
 	buf->f_fsid.val[1] = (u32)(id >> 32);
@@ -282,6 +231,12 @@ static void adfs_free_inode(struct inode *inode)
 	kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
 }
 
+static int adfs_drop_inode(struct inode *inode)
+{
+	/* always drop inodes if we are read-only */
+	return !IS_ENABLED(CONFIG_ADFS_FS_RW) || IS_RDONLY(inode);
+}
+
 static void init_once(void *foo)
 {
 	struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
@@ -314,7 +269,7 @@ static void destroy_inodecache(void)
 static const struct super_operations adfs_sops = {
 	.alloc_inode	= adfs_alloc_inode,
 	.free_inode	= adfs_free_inode,
-	.drop_inode	= generic_delete_inode,
+	.drop_inode	= adfs_drop_inode,
 	.write_inode	= adfs_write_inode,
 	.put_super	= adfs_put_super,
 	.statfs		= adfs_statfs,
@@ -322,66 +277,94 @@ static const struct super_operations adfs_sops = {
 	.show_options	= adfs_show_options,
 };
 
-static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr)
+static int adfs_probe(struct super_block *sb, unsigned int offset, int silent,
+		      int (*validate)(struct super_block *sb,
+				      struct buffer_head *bh,
+				      struct adfs_discrecord **bhp))
 {
-	struct adfs_discmap *dm;
-	unsigned int map_addr, zone_size, nzones;
-	int i, zone;
 	struct adfs_sb_info *asb = ADFS_SB(sb);
+	struct adfs_discrecord *dr;
+	struct buffer_head *bh;
+	unsigned int blocksize = BLOCK_SIZE;
+	int ret, try;
 
-	nzones    = asb->s_map_size;
-	zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare);
-	map_addr  = (nzones >> 1) * zone_size -
-		     ((nzones > 1) ? ADFS_DR_SIZE_BITS : 0);
-	map_addr  = signed_asl(map_addr, asb->s_map2blk);
-
-	asb->s_ids_per_zone = zone_size / (asb->s_idlen + 1);
-
-	dm = kmalloc_array(nzones, sizeof(*dm), GFP_KERNEL);
-	if (dm == NULL) {
-		adfs_error(sb, "not enough memory");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	for (zone = 0; zone < nzones; zone++, map_addr++) {
-		dm[zone].dm_startbit = 0;
-		dm[zone].dm_endbit   = zone_size;
-		dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
-		dm[zone].dm_bh       = sb_bread(sb, map_addr);
-
-		if (!dm[zone].dm_bh) {
-			adfs_error(sb, "unable to read map");
-			goto error_free;
+	for (try = 0; try < 2; try++) {
+		/* try to set the requested block size */
+		if (sb->s_blocksize != blocksize &&
+		    !sb_set_blocksize(sb, blocksize)) {
+			if (!silent)
+				adfs_msg(sb, KERN_ERR,
+					 "error: unsupported blocksize");
+			return -EINVAL;
 		}
+
+		/* read the buffer */
+		bh = sb_bread(sb, offset >> sb->s_blocksize_bits);
+		if (!bh) {
+			adfs_msg(sb, KERN_ERR,
+				 "error: unable to read block %u, try %d",
+				 offset >> sb->s_blocksize_bits, try);
+			return -EIO;
+		}
+
+		/* validate it */
+		ret = validate(sb, bh, &dr);
+		if (ret) {
+			brelse(bh);
+			return ret;
+		}
+
+		/* does the block size match the filesystem block size? */
+		blocksize = 1 << dr->log2secsize;
+		if (sb->s_blocksize == blocksize) {
+			asb->s_map = adfs_read_map(sb, dr);
+			brelse(bh);
+			return PTR_ERR_OR_ZERO(asb->s_map);
+		}
+
+		brelse(bh);
 	}
 
-	/* adjust the limits for the first and last map zones */
-	i = zone - 1;
-	dm[0].dm_startblk = 0;
-	dm[0].dm_startbit = ADFS_DR_SIZE_BITS;
-	dm[i].dm_endbit   = (adfs_disc_size(dr) >> dr->log2bpmb) +
-			    (ADFS_DR_SIZE_BITS - i * zone_size);
+	return -EIO;
+}
 
-	if (adfs_checkmap(sb, dm))
-		return dm;
+static int adfs_validate_bblk(struct super_block *sb, struct buffer_head *bh,
+			      struct adfs_discrecord **drp)
+{
+	struct adfs_discrecord *dr;
+	unsigned char *b_data;
 
-	adfs_error(sb, "map corrupted");
+	b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize);
+	if (adfs_checkbblk(b_data))
+		return -EILSEQ;
 
-error_free:
-	while (--zone >= 0)
-		brelse(dm[zone].dm_bh);
+	/* Do some sanity checks on the ADFS disc record */
+	dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
+	if (adfs_checkdiscrecord(dr))
+		return -EILSEQ;
 
-	kfree(dm);
-	return ERR_PTR(-EIO);
+	*drp = dr;
+	return 0;
+}
+
+static int adfs_validate_dr0(struct super_block *sb, struct buffer_head *bh,
+			      struct adfs_discrecord **drp)
+{
+	struct adfs_discrecord *dr;
+
+	/* Do some sanity checks on the ADFS disc record */
+	dr = (struct adfs_discrecord *)(bh->b_data + 4);
+	if (adfs_checkdiscrecord(dr) || dr->nzones_high || dr->nzones != 1)
+		return -EILSEQ;
+
+	*drp = dr;
+	return 0;
 }
 
 static int adfs_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct adfs_discrecord *dr;
-	struct buffer_head *bh;
 	struct object_info root_obj;
-	unsigned char *b_data;
-	unsigned int blocksize;
 	struct adfs_sb_info *asb;
 	struct inode *root;
 	int ret = -EINVAL;
@@ -391,7 +374,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
 	asb = kzalloc(sizeof(*asb), GFP_KERNEL);
 	if (!asb)
 		return -ENOMEM;
+
 	sb->s_fs_info = asb;
+	sb->s_magic = ADFS_SUPER_MAGIC;
+	sb->s_time_gran = 10000000;
 
 	/* set default options */
 	asb->s_uid = GLOBAL_ROOT_UID;
@@ -403,78 +389,21 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
 	if (parse_options(sb, asb, data))
 		goto error;
 
-	sb_set_blocksize(sb, BLOCK_SIZE);
-	if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) {
-		adfs_msg(sb, KERN_ERR, "error: unable to read superblock");
-		ret = -EIO;
-		goto error;
-	}
-
-	b_data = bh->b_data + (ADFS_DISCRECORD % BLOCK_SIZE);
-
-	if (adfs_checkbblk(b_data)) {
-		ret = -EINVAL;
-		goto error_badfs;
-	}
-
-	dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
-
-	/*
-	 * Do some sanity checks on the ADFS disc record
-	 */
-	if (adfs_checkdiscrecord(dr)) {
-		ret = -EINVAL;
-		goto error_badfs;
-	}
-
-	blocksize = 1 << dr->log2secsize;
-	brelse(bh);
-
-	if (sb_set_blocksize(sb, blocksize)) {
-		bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
-		if (!bh) {
-			adfs_msg(sb, KERN_ERR,
-				 "error: couldn't read superblock on 2nd try.");
-			ret = -EIO;
-			goto error;
-		}
-		b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize);
-		if (adfs_checkbblk(b_data)) {
-			adfs_msg(sb, KERN_ERR,
-				 "error: disc record mismatch, very weird!");
-			ret = -EINVAL;
-			goto error_free_bh;
-		}
-		dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
-	} else {
+	/* Try to probe the filesystem boot block */
+	ret = adfs_probe(sb, ADFS_DISCRECORD, 1, adfs_validate_bblk);
+	if (ret == -EILSEQ)
+		ret = adfs_probe(sb, 0, silent, adfs_validate_dr0);
+	if (ret == -EILSEQ) {
 		if (!silent)
 			adfs_msg(sb, KERN_ERR,
-				 "error: unsupported blocksize");
+				 "error: can't find an ADFS filesystem on dev %s.",
+				 sb->s_id);
 		ret = -EINVAL;
+	}
+	if (ret)
 		goto error;
-	}
 
-	/*
-	 * blocksize on this device should now be set to the ADFS log2secsize
-	 */
-
-	sb->s_magic		= ADFS_SUPER_MAGIC;
-	asb->s_idlen		= dr->idlen;
-	asb->s_map_size		= dr->nzones | (dr->nzones_high << 8);
-	asb->s_map2blk		= dr->log2bpmb - dr->log2secsize;
-	asb->s_log2sharesize	= dr->log2sharesize;
-
-	asb->s_map = adfs_read_map(sb, dr);
-	if (IS_ERR(asb->s_map)) {
-		ret =  PTR_ERR(asb->s_map);
-		goto error_free_bh;
-	}
-
-	brelse(bh);
-
-	/*
-	 * set up enough so that we can read an inode
-	 */
+	/* set up enough so that we can read an inode */
 	sb->s_op = &adfs_sops;
 
 	dr = adfs_map_discrecord(asb->s_map);
@@ -511,23 +440,13 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
 	root = adfs_iget(sb, &root_obj);
 	sb->s_root = d_make_root(root);
 	if (!sb->s_root) {
-		int i;
-		for (i = 0; i < asb->s_map_size; i++)
-			brelse(asb->s_map[i].dm_bh);
-		kfree(asb->s_map);
+		adfs_free_map(sb);
 		adfs_error(sb, "get root inode failed\n");
 		ret = -EIO;
 		goto error;
 	}
 	return 0;
 
-error_badfs:
-	if (!silent)
-		adfs_msg(sb, KERN_ERR,
-			 "error: can't find an ADFS filesystem on dev %s.",
-			 sb->s_id);
-error_free_bh:
-	brelse(bh);
 error:
 	sb->s_fs_info = NULL;
 	kfree(asb);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 7f8a9b3..dda7a9a 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -38,13 +38,13 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
 static int afs_show_devname(struct seq_file *m, struct dentry *root);
 static int afs_show_options(struct seq_file *m, struct dentry *root);
 static int afs_init_fs_context(struct fs_context *fc);
-static const struct fs_parameter_description afs_fs_parameters;
+static const struct fs_parameter_spec afs_fs_parameters[];
 
 struct file_system_type afs_fs_type = {
 	.owner			= THIS_MODULE,
 	.name			= "afs",
 	.init_fs_context	= afs_init_fs_context,
-	.parameters		= &afs_fs_parameters,
+	.parameters		= afs_fs_parameters,
 	.kill_sb		= afs_kill_super,
 	.fs_flags		= FS_RENAME_DOES_D_MOVE,
 };
@@ -73,28 +73,22 @@ enum afs_param {
 	Opt_source,
 };
 
-static const struct fs_parameter_spec afs_param_specs[] = {
+static const struct constant_table afs_param_flock[] = {
+	{"local",	afs_flock_mode_local },
+	{"openafs",	afs_flock_mode_openafs },
+	{"strict",	afs_flock_mode_strict },
+	{"write",	afs_flock_mode_write },
+	{}
+};
+
+static const struct fs_parameter_spec afs_fs_parameters[] = {
 	fsparam_flag  ("autocell",	Opt_autocell),
 	fsparam_flag  ("dyn",		Opt_dyn),
-	fsparam_enum  ("flock",		Opt_flock),
+	fsparam_enum  ("flock",		Opt_flock, afs_param_flock),
 	fsparam_string("source",	Opt_source),
 	{}
 };
 
-static const struct fs_parameter_enum afs_param_enums[] = {
-	{ Opt_flock,	"local",	afs_flock_mode_local },
-	{ Opt_flock,	"openafs",	afs_flock_mode_openafs },
-	{ Opt_flock,	"strict",	afs_flock_mode_strict },
-	{ Opt_flock,	"write",	afs_flock_mode_write },
-	{}
-};
-
-static const struct fs_parameter_description afs_fs_parameters = {
-	.name		= "kAFS",
-	.specs		= afs_param_specs,
-	.enums		= afs_param_enums,
-};
-
 /*
  * initialise the filesystem
  */
@@ -323,7 +317,7 @@ static int afs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	struct afs_fs_context *ctx = fc->fs_private;
 	int opt;
 
-	opt = fs_parse(fc, &afs_fs_parameters, param, &result);
+	opt = fs_parse(fc, afs_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
diff --git a/fs/aio.c b/fs/aio.c
index a9fbad2..5f3d3d8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1610,6 +1610,14 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
 	return 0;
 }
 
+static void aio_poll_put_work(struct work_struct *work)
+{
+	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+
+	iocb_put(iocb);
+}
+
 static void aio_poll_complete_work(struct work_struct *work)
 {
 	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1674,6 +1682,8 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 	list_del_init(&req->wait.entry);
 
 	if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+		struct kioctx *ctx = iocb->ki_ctx;
+
 		/*
 		 * Try to complete the iocb inline if we can. Use
 		 * irqsave/irqrestore because not all filesystems (e.g. fuse)
@@ -1683,8 +1693,14 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 		list_del(&iocb->ki_list);
 		iocb->ki_res.res = mangle_poll(mask);
 		req->done = true;
-		spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-		iocb_put(iocb);
+		if (iocb->ki_eventfd && eventfd_signal_count()) {
+			iocb = NULL;
+			INIT_WORK(&req->work, aio_poll_put_work);
+			schedule_work(&req->work);
+		}
+		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+		if (iocb)
+			iocb_put(iocb);
 	} else {
 		schedule_work(&req->work);
 	}
diff --git a/fs/attr.c b/fs/attr.c
index df28035..b4bbdbd4 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -183,18 +183,12 @@ void setattr_copy(struct inode *inode, const struct iattr *attr)
 		inode->i_uid = attr->ia_uid;
 	if (ia_valid & ATTR_GID)
 		inode->i_gid = attr->ia_gid;
-	if (ia_valid & ATTR_ATIME) {
-		inode->i_atime = timestamp_truncate(attr->ia_atime,
-						  inode);
-	}
-	if (ia_valid & ATTR_MTIME) {
-		inode->i_mtime = timestamp_truncate(attr->ia_mtime,
-						  inode);
-	}
-	if (ia_valid & ATTR_CTIME) {
-		inode->i_ctime = timestamp_truncate(attr->ia_ctime,
-						  inode);
-	}
+	if (ia_valid & ATTR_ATIME)
+		inode->i_atime = attr->ia_atime;
+	if (ia_valid & ATTR_MTIME)
+		inode->i_mtime = attr->ia_mtime;
+	if (ia_valid & ATTR_CTIME)
+		inode->i_ctime = attr->ia_ctime;
 	if (ia_valid & ATTR_MODE) {
 		umode_t mode = attr->ia_mode;
 
@@ -268,8 +262,13 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
 	attr->ia_ctime = now;
 	if (!(ia_valid & ATTR_ATIME_SET))
 		attr->ia_atime = now;
+	else
+		attr->ia_atime = timestamp_truncate(attr->ia_atime, inode);
 	if (!(ia_valid & ATTR_MTIME_SET))
 		attr->ia_mtime = now;
+	else
+		attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode);
+
 	if (ia_valid & ATTR_KILL_PRIV) {
 		error = security_inode_need_killpriv(dentry);
 		if (error < 0)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index ecd8d26..f4713ea 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -97,7 +97,7 @@ static struct linux_binfmt elf_format = {
 	.min_coredump	= ELF_EXEC_PAGESIZE,
 };
 
-#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
+#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
 
 static int set_brk(unsigned long start, unsigned long end, int prot)
 {
@@ -161,9 +161,11 @@ static int padzero(unsigned long elf_bss)
 #endif
 
 static int
-create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
-		unsigned long load_addr, unsigned long interp_load_addr)
+create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
+		unsigned long load_addr, unsigned long interp_load_addr,
+		unsigned long e_entry)
 {
+	struct mm_struct *mm = current->mm;
 	unsigned long p = bprm->p;
 	int argc = bprm->argc;
 	int envc = bprm->envc;
@@ -176,7 +178,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 	unsigned char k_rand_bytes[16];
 	int items;
 	elf_addr_t *elf_info;
-	int ei_index = 0;
+	int ei_index;
 	const struct cred *cred = current_cred();
 	struct vm_area_struct *vma;
 
@@ -226,12 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 		return -EFAULT;
 
 	/* Create the ELF interpreter info */
-	elf_info = (elf_addr_t *)current->mm->saved_auxv;
+	elf_info = (elf_addr_t *)mm->saved_auxv;
 	/* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
 #define NEW_AUX_ENT(id, val) \
 	do { \
-		elf_info[ei_index++] = id; \
-		elf_info[ei_index++] = val; \
+		*elf_info++ = id; \
+		*elf_info++ = val; \
 	} while (0)
 
 #ifdef ARCH_DLINFO
@@ -251,7 +253,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 	NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
 	NEW_AUX_ENT(AT_BASE, interp_load_addr);
 	NEW_AUX_ENT(AT_FLAGS, 0);
-	NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
+	NEW_AUX_ENT(AT_ENTRY, e_entry);
 	NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
 	NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
 	NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
@@ -275,12 +277,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 	}
 #undef NEW_AUX_ENT
 	/* AT_NULL is zero; clear the rest too */
-	memset(&elf_info[ei_index], 0,
-	       sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
+	memset(elf_info, 0, (char *)mm->saved_auxv +
+			sizeof(mm->saved_auxv) - (char *)elf_info);
 
 	/* And advance past the AT_NULL entry.  */
-	ei_index += 2;
+	elf_info += 2;
 
+	ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
 	sp = STACK_ADD(p, ei_index);
 
 	items = (argc + 1) + (envc + 1) + 1;
@@ -299,7 +302,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 	 * Grow the stack manually; some architectures have a limit on how
 	 * far ahead a user-space access may be in order to grow the stack.
 	 */
-	vma = find_extend_vma(current->mm, bprm->p);
+	vma = find_extend_vma(mm, bprm->p);
 	if (!vma)
 		return -EFAULT;
 
@@ -308,7 +311,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 		return -EFAULT;
 
 	/* Populate list of argv pointers back to argv strings. */
-	p = current->mm->arg_end = current->mm->arg_start;
+	p = mm->arg_end = mm->arg_start;
 	while (argc-- > 0) {
 		size_t len;
 		if (__put_user((elf_addr_t)p, sp++))
@@ -320,10 +323,10 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 	}
 	if (__put_user(0, sp++))
 		return -EFAULT;
-	current->mm->arg_end = p;
+	mm->arg_end = p;
 
 	/* Populate list of envp pointers back to envp strings. */
-	current->mm->env_end = current->mm->env_start = p;
+	mm->env_end = mm->env_start = p;
 	while (envc-- > 0) {
 		size_t len;
 		if (__put_user((elf_addr_t)p, sp++))
@@ -335,10 +338,10 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 	}
 	if (__put_user(0, sp++))
 		return -EFAULT;
-	current->mm->env_end = p;
+	mm->env_end = p;
 
 	/* Put the elf_info on the stack in the right place.  */
-	if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
+	if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
 		return -EFAULT;
 	return 0;
 }
@@ -689,15 +692,17 @@ static int load_elf_binary(struct linux_binprm *bprm)
 	int bss_prot = 0;
 	int retval, i;
 	unsigned long elf_entry;
+	unsigned long e_entry;
 	unsigned long interp_load_addr = 0;
 	unsigned long start_code, end_code, start_data, end_data;
 	unsigned long reloc_func_desc __maybe_unused = 0;
 	int executable_stack = EXSTACK_DEFAULT;
+	struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
 	struct {
-		struct elfhdr elf_ex;
 		struct elfhdr interp_elf_ex;
 	} *loc;
 	struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
+	struct mm_struct *mm;
 	struct pt_regs *regs;
 
 	loc = kmalloc(sizeof(*loc), GFP_KERNEL);
@@ -705,30 +710,27 @@ static int load_elf_binary(struct linux_binprm *bprm)
 		retval = -ENOMEM;
 		goto out_ret;
 	}
-	
-	/* Get the exec-header */
-	loc->elf_ex = *((struct elfhdr *)bprm->buf);
 
 	retval = -ENOEXEC;
 	/* First of all, some simple consistency checks */
-	if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+	if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
 		goto out;
 
-	if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
+	if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
 		goto out;
-	if (!elf_check_arch(&loc->elf_ex))
+	if (!elf_check_arch(elf_ex))
 		goto out;
-	if (elf_check_fdpic(&loc->elf_ex))
+	if (elf_check_fdpic(elf_ex))
 		goto out;
 	if (!bprm->file->f_op->mmap)
 		goto out;
 
-	elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
+	elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
 	if (!elf_phdata)
 		goto out;
 
 	elf_ppnt = elf_phdata;
-	for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+	for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
 		char *elf_interpreter;
 
 		if (elf_ppnt->p_type != PT_INTERP)
@@ -782,7 +784,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 	}
 
 	elf_ppnt = elf_phdata;
-	for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
+	for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
 		switch (elf_ppnt->p_type) {
 		case PT_GNU_STACK:
 			if (elf_ppnt->p_flags & PF_X)
@@ -792,7 +794,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 			break;
 
 		case PT_LOPROC ... PT_HIPROC:
-			retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
+			retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
 						  bprm->file, false,
 						  &arch_state);
 			if (retval)
@@ -836,7 +838,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 	 * still possible to return an error to the code that invoked
 	 * the exec syscall.
 	 */
-	retval = arch_check_elf(&loc->elf_ex,
+	retval = arch_check_elf(elf_ex,
 				!!interpreter, &loc->interp_elf_ex,
 				&arch_state);
 	if (retval)
@@ -849,8 +851,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
 
 	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
 	   may depend on the personality.  */
-	SET_PERSONALITY2(loc->elf_ex, &arch_state);
-	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+	SET_PERSONALITY2(*elf_ex, &arch_state);
+	if (elf_read_implies_exec(*elf_ex, executable_stack))
 		current->personality |= READ_IMPLIES_EXEC;
 
 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
@@ -877,7 +879,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 	/* Now we do a little grungy work by mmapping the ELF image into
 	   the correct location in memory. */
 	for(i = 0, elf_ppnt = elf_phdata;
-	    i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+	    i < elf_ex->e_phnum; i++, elf_ppnt++) {
 		int elf_prot, elf_flags;
 		unsigned long k, vaddr;
 		unsigned long total_size = 0;
@@ -921,9 +923,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
 		 * If we are loading ET_EXEC or we have already performed
 		 * the ET_DYN load_addr calculations, proceed normally.
 		 */
-		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
+		if (elf_ex->e_type == ET_EXEC || load_addr_set) {
 			elf_flags |= MAP_FIXED;
-		} else if (loc->elf_ex.e_type == ET_DYN) {
+		} else if (elf_ex->e_type == ET_DYN) {
 			/*
 			 * This logic is run once for the first LOAD Program
 			 * Header for ET_DYN binaries to calculate the
@@ -972,7 +974,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 			load_bias = ELF_PAGESTART(load_bias - vaddr);
 
 			total_size = total_mapping_size(elf_phdata,
-							loc->elf_ex.e_phnum);
+							elf_ex->e_phnum);
 			if (!total_size) {
 				retval = -EINVAL;
 				goto out_free_dentry;
@@ -990,7 +992,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 		if (!load_addr_set) {
 			load_addr_set = 1;
 			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
-			if (loc->elf_ex.e_type == ET_DYN) {
+			if (elf_ex->e_type == ET_DYN) {
 				load_bias += error -
 				             ELF_PAGESTART(load_bias + vaddr);
 				load_addr += load_bias;
@@ -998,7 +1000,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 			}
 		}
 		k = elf_ppnt->p_vaddr;
-		if (k < start_code)
+		if ((elf_ppnt->p_flags & PF_X) && k < start_code)
 			start_code = k;
 		if (start_data < k)
 			start_data = k;
@@ -1031,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 		}
 	}
 
-	loc->elf_ex.e_entry += load_bias;
+	e_entry = elf_ex->e_entry + load_bias;
 	elf_bss += load_bias;
 	elf_brk += load_bias;
 	start_code += load_bias;
@@ -1074,7 +1076,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
 		allow_write_access(interpreter);
 		fput(interpreter);
 	} else {
-		elf_entry = loc->elf_ex.e_entry;
+		elf_entry = e_entry;
 		if (BAD_ADDR(elf_entry)) {
 			retval = -EINVAL;
 			goto out_free_dentry;
@@ -1092,15 +1094,17 @@ static int load_elf_binary(struct linux_binprm *bprm)
 		goto out;
 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
 
-	retval = create_elf_tables(bprm, &loc->elf_ex,
-			  load_addr, interp_load_addr);
+	retval = create_elf_tables(bprm, elf_ex,
+			  load_addr, interp_load_addr, e_entry);
 	if (retval < 0)
 		goto out;
-	current->mm->end_code = end_code;
-	current->mm->start_code = start_code;
-	current->mm->start_data = start_data;
-	current->mm->end_data = end_data;
-	current->mm->start_stack = bprm->p;
+
+	mm = current->mm;
+	mm->end_code = end_code;
+	mm->start_code = start_code;
+	mm->start_data = start_data;
+	mm->end_data = end_data;
+	mm->start_stack = bprm->p;
 
 	if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
 		/*
@@ -1111,12 +1115,11 @@ static int load_elf_binary(struct linux_binprm *bprm)
 		 * growing down), and into the unused ELF_ET_DYN_BASE region.
 		 */
 		if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
-		    loc->elf_ex.e_type == ET_DYN && !interpreter)
-			current->mm->brk = current->mm->start_brk =
-				ELF_ET_DYN_BASE;
+		    elf_ex->e_type == ET_DYN && !interpreter) {
+			mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
+		}
 
-		current->mm->brk = current->mm->start_brk =
-			arch_randomize_brk(current->mm);
+		mm->brk = mm->start_brk = arch_randomize_brk(mm);
 #ifdef compat_brk_randomized
 		current->brk_randomized = 1;
 #endif
@@ -1574,6 +1577,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
  */
 static int fill_files_note(struct memelfnote *note)
 {
+	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned count, size, names_ofs, remaining, n;
 	user_long_t *data;
@@ -1581,7 +1585,7 @@ static int fill_files_note(struct memelfnote *note)
 	char *name_base, *name_curpos;
 
 	/* *Estimated* file count and total data size needed */
-	count = current->mm->map_count;
+	count = mm->map_count;
 	if (count > UINT_MAX / 64)
 		return -EINVAL;
 	size = count * 64;
@@ -1591,6 +1595,10 @@ static int fill_files_note(struct memelfnote *note)
 	if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
 		return -EINVAL;
 	size = round_up(size, PAGE_SIZE);
+	/*
+	 * "size" can be 0 here legitimately.
+	 * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
+	 */
 	data = kvmalloc(size, GFP_KERNEL);
 	if (ZERO_OR_NULL_PTR(data))
 		return -ENOMEM;
@@ -1599,7 +1607,7 @@ static int fill_files_note(struct memelfnote *note)
 	name_base = name_curpos = ((char *)data) + names_ofs;
 	remaining = size - names_ofs;
 	count = 0;
-	for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+	for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
 		struct file *file;
 		const char *filename;
 
@@ -1633,10 +1641,10 @@ static int fill_files_note(struct memelfnote *note)
 	data[0] = count;
 	data[1] = PAGE_SIZE;
 	/*
-	 * Count usually is less than current->mm->map_count,
+	 * Count usually is less than mm->map_count,
 	 * we need to move filenames down.
 	 */
-	n = current->mm->map_count - count;
+	n = mm->map_count - count;
 	if (n != 0) {
 		unsigned shift_bytes = n * 3 * sizeof(data[0]);
 		memmove(name_base - shift_bytes, name_base,
@@ -2182,7 +2190,7 @@ static int elf_core_dump(struct coredump_params *cprm)
 	int segs, i;
 	size_t vma_data_size = 0;
 	struct vm_area_struct *vma, *gate_vma;
-	struct elfhdr *elf = NULL;
+	struct elfhdr elf;
 	loff_t offset = 0, dataoff;
 	struct elf_note_info info = { };
 	struct elf_phdr *phdr4note = NULL;
@@ -2203,10 +2211,6 @@ static int elf_core_dump(struct coredump_params *cprm)
 	 * exists while dumping the mm->vm_next areas to the core file.
 	 */
   
-	/* alloc memory for large data structures: too large to be on stack */
-	elf = kmalloc(sizeof(*elf), GFP_KERNEL);
-	if (!elf)
-		goto out;
 	/*
 	 * The number of segs are recored into ELF header as 16bit value.
 	 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
@@ -2230,7 +2234,7 @@ static int elf_core_dump(struct coredump_params *cprm)
 	 * Collect all the non-memory information about the process for the
 	 * notes.  This also sets up the file header.
 	 */
-	if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
+	if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
 		goto cleanup;
 
 	has_dumped = 1;
@@ -2238,7 +2242,7 @@ static int elf_core_dump(struct coredump_params *cprm)
 	fs = get_fs();
 	set_fs(KERNEL_DS);
 
-	offset += sizeof(*elf);				/* Elf header */
+	offset += sizeof(elf);				/* Elf header */
 	offset += segs * sizeof(struct elf_phdr);	/* Program headers */
 
 	/* Write notes phdr entry */
@@ -2257,11 +2261,13 @@ static int elf_core_dump(struct coredump_params *cprm)
 
 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
-	if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
-		goto end_coredump;
+	/*
+	 * Zero vma process will get ZERO_SIZE_PTR here.
+	 * Let coredump continue for register state at least.
+	 */
 	vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
 			      GFP_KERNEL);
-	if (ZERO_OR_NULL_PTR(vma_filesz))
+	if (!vma_filesz)
 		goto end_coredump;
 
 	for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
@@ -2281,12 +2287,12 @@ static int elf_core_dump(struct coredump_params *cprm)
 		shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
 		if (!shdr4extnum)
 			goto end_coredump;
-		fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
+		fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
 	}
 
 	offset = dataoff;
 
-	if (!dump_emit(cprm, elf, sizeof(*elf)))
+	if (!dump_emit(cprm, &elf, sizeof(elf)))
 		goto end_coredump;
 
 	if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
@@ -2370,8 +2376,6 @@ static int elf_core_dump(struct coredump_params *cprm)
 	kfree(shdr4extnum);
 	kvfree(vma_filesz);
 	kfree(phdr4note);
-	kfree(elf);
-out:
 	return has_dumped;
 }
 
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 1485158..404e050 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1191,7 +1191,6 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
 {
 	struct btrfs_space_info *sinfo = cache->space_info;
 	u64 num_bytes;
-	u64 sinfo_used;
 	int ret = -ENOSPC;
 
 	spin_lock(&sinfo->lock);
@@ -1205,19 +1204,38 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
 
 	num_bytes = cache->length - cache->reserved - cache->pinned -
 		    cache->bytes_super - cache->used;
-	sinfo_used = btrfs_space_info_used(sinfo, true);
 
 	/*
-	 * sinfo_used + num_bytes should always <= sinfo->total_bytes.
-	 *
-	 * Here we make sure if we mark this bg RO, we still have enough
-	 * free space as buffer.
+	 * Data never overcommits, even in mixed mode, so do just the straight
+	 * check of left over space in how much we have allocated.
 	 */
-	if (sinfo_used + num_bytes <= sinfo->total_bytes) {
+	if (force) {
+		ret = 0;
+	} else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
+		u64 sinfo_used = btrfs_space_info_used(sinfo, true);
+
+		/*
+		 * Here we make sure if we mark this bg RO, we still have enough
+		 * free space as buffer.
+		 */
+		if (sinfo_used + num_bytes <= sinfo->total_bytes)
+			ret = 0;
+	} else {
+		/*
+		 * We overcommit metadata, so we need to do the
+		 * btrfs_can_overcommit check here, and we need to pass in
+		 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
+		 * leeway to allow us to mark this block group as read only.
+		 */
+		if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
+					 BTRFS_RESERVE_NO_FLUSH))
+			ret = 0;
+	}
+
+	if (!ret) {
 		sinfo->bytes_readonly += num_bytes;
 		cache->ro++;
 		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
-		ret = 0;
 	}
 out:
 	spin_unlock(&cache->lock);
@@ -1225,9 +1243,6 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
 	if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
 		btrfs_info(cache->fs_info,
 			"unable to make block group %llu ro", cache->start);
-		btrfs_info(cache->fs_info,
-			"sinfo_used=%llu bg_num_bytes=%llu",
-			sinfo_used, num_bytes);
 		btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
 	}
 	return ret;
@@ -2225,7 +2240,7 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
 		}
 	}
 
-	ret = inc_block_group_ro(cache, !do_chunk_alloc);
+	ret = inc_block_group_ro(cache, 0);
 	if (!do_chunk_alloc)
 		goto unlock_out;
 	if (!ret)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index de95ad2..9ab610c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1290,7 +1290,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
 	/* copy bytes from the working buffer into the pages */
 	while (working_bytes > 0) {
 		bytes = min_t(unsigned long, bvec.bv_len,
-				PAGE_SIZE - buf_offset);
+				PAGE_SIZE - (buf_offset % PAGE_SIZE));
 		bytes = min(bytes, working_bytes);
 
 		kaddr = kmap_atomic(bvec.bv_page);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 24658b5..f2ec1a9 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -326,12 +326,10 @@ u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
 			   struct seq_list *elem)
 {
 	write_lock(&fs_info->tree_mod_log_lock);
-	spin_lock(&fs_info->tree_mod_seq_lock);
 	if (!elem->seq) {
 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
 	}
-	spin_unlock(&fs_info->tree_mod_seq_lock);
 	write_unlock(&fs_info->tree_mod_log_lock);
 
 	return elem->seq;
@@ -351,7 +349,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
 	if (!seq_putting)
 		return;
 
-	spin_lock(&fs_info->tree_mod_seq_lock);
+	write_lock(&fs_info->tree_mod_log_lock);
 	list_del(&elem->list);
 	elem->seq = 0;
 
@@ -362,19 +360,17 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
 				 * blocker with lower sequence number exists, we
 				 * cannot remove anything from the log
 				 */
-				spin_unlock(&fs_info->tree_mod_seq_lock);
+				write_unlock(&fs_info->tree_mod_log_lock);
 				return;
 			}
 			min_seq = cur_elem->seq;
 		}
 	}
-	spin_unlock(&fs_info->tree_mod_seq_lock);
 
 	/*
 	 * anything that's lower than the lowest existing (read: blocked)
 	 * sequence number can be removed from the tree.
 	 */
-	write_lock(&fs_info->tree_mod_log_lock);
 	tm_root = &fs_info->tree_mod_log;
 	for (node = rb_first(tm_root); node; node = next) {
 		next = rb_next(node);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index f90b820..36df977 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -714,14 +714,12 @@ struct btrfs_fs_info {
 	atomic_t nr_delayed_iputs;
 	wait_queue_head_t delayed_iputs_wait;
 
-	/* this protects tree_mod_seq_list */
-	spinlock_t tree_mod_seq_lock;
 	atomic64_t tree_mod_seq;
-	struct list_head tree_mod_seq_list;
 
-	/* this protects tree_mod_log */
+	/* this protects tree_mod_log and tree_mod_seq_list */
 	rwlock_t tree_mod_log_lock;
 	struct rb_root tree_mod_log;
+	struct list_head tree_mod_seq_list;
 
 	atomic_t async_delalloc_pages;
 
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index df3bd88..dfdb7d4 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -492,7 +492,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 	if (head->is_data)
 		return;
 
-	spin_lock(&fs_info->tree_mod_seq_lock);
+	read_lock(&fs_info->tree_mod_log_lock);
 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
 		struct seq_list *elem;
 
@@ -500,7 +500,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 					struct seq_list, list);
 		seq = elem->seq;
 	}
-	spin_unlock(&fs_info->tree_mod_seq_lock);
+	read_unlock(&fs_info->tree_mod_log_lock);
 
 again:
 	for (node = rb_first_cached(&head->ref_tree); node;
@@ -518,7 +518,7 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
 	struct seq_list *elem;
 	int ret = 0;
 
-	spin_lock(&fs_info->tree_mod_seq_lock);
+	read_lock(&fs_info->tree_mod_log_lock);
 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
 					struct seq_list, list);
@@ -531,7 +531,7 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
 		}
 	}
 
-	spin_unlock(&fs_info->tree_mod_seq_lock);
+	read_unlock(&fs_info->tree_mod_log_lock);
 	return ret;
 }
 
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index aea48d6..7fa9bb7 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2697,7 +2697,6 @@ int __cold open_ctree(struct super_block *sb,
 	spin_lock_init(&fs_info->fs_roots_radix_lock);
 	spin_lock_init(&fs_info->delayed_iput_lock);
 	spin_lock_init(&fs_info->defrag_inodes_lock);
-	spin_lock_init(&fs_info->tree_mod_seq_lock);
 	spin_lock_init(&fs_info->super_lock);
 	spin_lock_init(&fs_info->buffer_lock);
 	spin_lock_init(&fs_info->unused_bgs_lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e2d3028..c0f2027 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1593,21 +1593,25 @@ void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
 	/* Find first extent with bits cleared */
 	while (1) {
 		node = __etree_search(tree, start, &next, &prev, NULL, NULL);
-		if (!node) {
+		if (!node && !next && !prev) {
+			/*
+			 * Tree is completely empty, send full range and let
+			 * caller deal with it
+			 */
+			*start_ret = 0;
+			*end_ret = -1;
+			goto out;
+		} else if (!node && !next) {
+			/*
+			 * We are past the last allocated chunk, set start at
+			 * the end of the last extent.
+			 */
+			state = rb_entry(prev, struct extent_state, rb_node);
+			*start_ret = state->end + 1;
+			*end_ret = -1;
+			goto out;
+		} else if (!node) {
 			node = next;
-			if (!node) {
-				/*
-				 * We are past the last allocated chunk,
-				 * set start at the end of the last extent. The
-				 * device alloc tree should never be empty so
-				 * prev is always set.
-				 */
-				ASSERT(prev);
-				state = rb_entry(prev, struct extent_state, rb_node);
-				*start_ret = state->end + 1;
-				*end_ret = -1;
-				goto out;
-			}
 		}
 		/*
 		 * At this point 'node' either contains 'start' or start is
@@ -3438,11 +3442,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
 	ret = btrfs_writepage_cow_fixup(page, start, page_end);
 	if (ret) {
 		/* Fixup worker will requeue */
-		if (ret == -EBUSY)
-			wbc->pages_skipped++;
-		else
-			redirty_page_for_writepage(wbc, page);
-
+		redirty_page_for_writepage(wbc, page);
 		update_nr_written(wbc, nr_written);
 		unlock_page(page);
 		return 1;
@@ -4166,7 +4166,16 @@ static int extent_write_cache_pages(struct address_space *mapping,
 		 */
 		scanned = 1;
 		index = 0;
-		goto retry;
+
+		/*
+		 * If we're looping we could run into a page that is locked by a
+		 * writer and that writer could be waiting on writeback for a
+		 * page in our current bio, and thus deadlock, so flush the
+		 * write bio here.
+		 */
+		ret = flush_write_bio(epd);
+		if (!ret)
+			goto retry;
 	}
 
 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6d2bb58..5b3ec93 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2189,6 +2189,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 /* see btrfs_writepage_start_hook for details on why this is required */
 struct btrfs_writepage_fixup {
 	struct page *page;
+	struct inode *inode;
 	struct btrfs_work work;
 };
 
@@ -2202,27 +2203,71 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
 	struct inode *inode;
 	u64 page_start;
 	u64 page_end;
-	int ret;
+	int ret = 0;
+	bool free_delalloc_space = true;
 
 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
 	page = fixup->page;
+	inode = fixup->inode;
+	page_start = page_offset(page);
+	page_end = page_offset(page) + PAGE_SIZE - 1;
+
+	/*
+	 * This is similar to page_mkwrite, we need to reserve the space before
+	 * we take the page lock.
+	 */
+	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
+					   PAGE_SIZE);
 again:
 	lock_page(page);
+
+	/*
+	 * Before we queued this fixup, we took a reference on the page.
+	 * page->mapping may go NULL, but it shouldn't be moved to a different
+	 * address space.
+	 */
 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
-		ClearPageChecked(page);
+		/*
+		 * Unfortunately this is a little tricky, either
+		 *
+		 * 1) We got here and our page had already been dealt with and
+		 *    we reserved our space, thus ret == 0, so we need to just
+		 *    drop our space reservation and bail.  This can happen the
+		 *    first time we come into the fixup worker, or could happen
+		 *    while waiting for the ordered extent.
+		 * 2) Our page was already dealt with, but we happened to get an
+		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
+		 *    this case we obviously don't have anything to release, but
+		 *    because the page was already dealt with we don't want to
+		 *    mark the page with an error, so make sure we're resetting
+		 *    ret to 0.  This is why we have this check _before_ the ret
+		 *    check, because we do not want to have a surprise ENOSPC
+		 *    when the page was already properly dealt with.
+		 */
+		if (!ret) {
+			btrfs_delalloc_release_extents(BTRFS_I(inode),
+						       PAGE_SIZE);
+			btrfs_delalloc_release_space(inode, data_reserved,
+						     page_start, PAGE_SIZE,
+						     true);
+		}
+		ret = 0;
 		goto out_page;
 	}
 
-	inode = page->mapping->host;
-	page_start = page_offset(page);
-	page_end = page_offset(page) + PAGE_SIZE - 1;
+	/*
+	 * We can't mess with the page state unless it is locked, so now that
+	 * it is locked bail if we failed to make our space reservation.
+	 */
+	if (ret)
+		goto out_page;
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
 			 &cached_state);
 
 	/* already ordered? We're done */
 	if (PagePrivate2(page))
-		goto out;
+		goto out_reserved;
 
 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
 					PAGE_SIZE);
@@ -2235,39 +2280,49 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
 		goto again;
 	}
 
-	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
-					   PAGE_SIZE);
-	if (ret) {
-		mapping_set_error(page->mapping, ret);
-		end_extent_writepage(page, ret, page_start, page_end);
-		ClearPageChecked(page);
-		goto out;
-	 }
-
 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
 					&cached_state);
-	if (ret) {
-		mapping_set_error(page->mapping, ret);
-		end_extent_writepage(page, ret, page_start, page_end);
-		ClearPageChecked(page);
+	if (ret)
 		goto out_reserved;
-	}
 
-	ClearPageChecked(page);
-	set_page_dirty(page);
+	/*
+	 * Everything went as planned, we're now the owner of a dirty page with
+	 * delayed allocation bits set and space reserved for our COW
+	 * destination.
+	 *
+	 * The page was dirty when we started, nothing should have cleaned it.
+	 */
+	BUG_ON(!PageDirty(page));
+	free_delalloc_space = false;
 out_reserved:
 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
-	if (ret)
+	if (free_delalloc_space)
 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
 					     PAGE_SIZE, true);
-out:
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
 			     &cached_state);
 out_page:
+	if (ret) {
+		/*
+		 * We hit ENOSPC or other errors.  Update the mapping and page
+		 * to reflect the errors and clean the page.
+		 */
+		mapping_set_error(page->mapping, ret);
+		end_extent_writepage(page, ret, page_start, page_end);
+		clear_page_dirty_for_io(page);
+		SetPageError(page);
+	}
+	ClearPageChecked(page);
 	unlock_page(page);
 	put_page(page);
 	kfree(fixup);
 	extent_changeset_free(data_reserved);
+	/*
+	 * As a precaution, do a delayed iput in case it would be the last iput
+	 * that could need flushing space. Recursing back to fixup worker would
+	 * deadlock.
+	 */
+	btrfs_add_delayed_iput(inode);
 }
 
 /*
@@ -2291,6 +2346,13 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
 	if (TestClearPagePrivate2(page))
 		return 0;
 
+	/*
+	 * PageChecked is set below when we create a fixup worker for this page,
+	 * don't try to create another one if we're already PageChecked()
+	 *
+	 * The extent_io writepage code will redirty the page if we send back
+	 * EAGAIN.
+	 */
 	if (PageChecked(page))
 		return -EAGAIN;
 
@@ -2298,12 +2360,21 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
 	if (!fixup)
 		return -EAGAIN;
 
+	/*
+	 * We are already holding a reference to this inode from
+	 * write_cache_pages.  We need to hold it because the space reservation
+	 * takes place outside of the page lock, and we can't trust
+	 * page->mapping outside of the page lock.
+	 */
+	ihold(inode);
 	SetPageChecked(page);
 	get_page(page);
 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
 	fixup->page = page;
+	fixup->inode = inode;
 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
-	return -EBUSY;
+
+	return -EAGAIN;
 }
 
 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 091e5bc..a055b65 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1269,7 +1269,8 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
 		 * destination of the stream.
 		 */
 		if (ino == bctx->cur_objectid &&
-		    offset >= bctx->sctx->cur_inode_next_write_offset)
+		    offset + bctx->extent_len >
+		    bctx->sctx->cur_inode_next_write_offset)
 			return 0;
 	}
 
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 537bc31..01297c5 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -159,9 +159,9 @@ static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
 	return (global->size << 1);
 }
 
-static int can_overcommit(struct btrfs_fs_info *fs_info,
-			  struct btrfs_space_info *space_info, u64 bytes,
-			  enum btrfs_reserve_flush_enum flush)
+int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+			 struct btrfs_space_info *space_info, u64 bytes,
+			 enum btrfs_reserve_flush_enum flush)
 {
 	u64 profile;
 	u64 avail;
@@ -226,7 +226,8 @@ void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
 
 		/* Check and see if our ticket can be satisified now. */
 		if ((used + ticket->bytes <= space_info->total_bytes) ||
-		    can_overcommit(fs_info, space_info, ticket->bytes, flush)) {
+		    btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
+					 flush)) {
 			btrfs_space_info_update_bytes_may_use(fs_info,
 							      space_info,
 							      ticket->bytes);
@@ -639,13 +640,14 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
 		return to_reclaim;
 
 	to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
-	if (can_overcommit(fs_info, space_info, to_reclaim,
-			   BTRFS_RESERVE_FLUSH_ALL))
+	if (btrfs_can_overcommit(fs_info, space_info, to_reclaim,
+				 BTRFS_RESERVE_FLUSH_ALL))
 		return 0;
 
 	used = btrfs_space_info_used(space_info, true);
 
-	if (can_overcommit(fs_info, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
+	if (btrfs_can_overcommit(fs_info, space_info, SZ_1M,
+				 BTRFS_RESERVE_FLUSH_ALL))
 		expected = div_factor_fine(space_info->total_bytes, 95);
 	else
 		expected = div_factor_fine(space_info->total_bytes, 90);
@@ -1004,7 +1006,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
 	 */
 	if (!pending_tickets &&
 	    ((used + orig_bytes <= space_info->total_bytes) ||
-	     can_overcommit(fs_info, space_info, orig_bytes, flush))) {
+	     btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
 		btrfs_space_info_update_bytes_may_use(fs_info, space_info,
 						      orig_bytes);
 		ret = 0;
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 1a349e3..24514cd 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -127,6 +127,9 @@ int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
 				 enum btrfs_reserve_flush_enum flush);
 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
 				struct btrfs_space_info *space_info);
+int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+			 struct btrfs_space_info *space_info, u64 bytes,
+			 enum btrfs_reserve_flush_enum flush);
 
 static inline void btrfs_space_info_free_bytes_may_use(
 				struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a906315..0616a54 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2135,7 +2135,15 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 	 */
 	thresh = SZ_4M;
 
-	if (!mixed && total_free_meta - thresh < block_rsv->size)
+	/*
+	 * We only want to claim there's no available space if we can no longer
+	 * allocate chunks for our metadata profile and our global reserve will
+	 * not fit in the free metadata space.  If we aren't ->full then we
+	 * still can allocate chunks and thus are fine using the currently
+	 * calculated f_bavail.
+	 */
+	if (!mixed && block_rsv->space_info->full &&
+	    total_free_meta - thresh < block_rsv->size)
 		buf->f_bavail = 0;
 
 	buf->f_type = BTRFS_SUPER_MAGIC;
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index c12b91f..84fb3fa 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -142,7 +142,6 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
 	spin_lock_init(&fs_info->qgroup_lock);
 	spin_lock_init(&fs_info->super_lock);
 	spin_lock_init(&fs_info->fs_roots_radix_lock);
-	spin_lock_init(&fs_info->tree_mod_seq_lock);
 	mutex_init(&fs_info->qgroup_ioctl_lock);
 	mutex_init(&fs_info->qgroup_rescan_lock);
 	rwlock_init(&fs_info->tree_mod_log_lock);
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 123d9a6..df7ce87 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -441,8 +441,17 @@ static int test_find_first_clear_extent_bit(void)
 	int ret = -EINVAL;
 
 	test_msg("running find_first_clear_extent_bit test");
+
 	extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST, NULL);
 
+	/* Test correct handling of empty tree */
+	find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
+	if (start != 0 || end != -1) {
+		test_err(
+	"error getting a range from completely empty tree: start %llu end %llu",
+			 start, end);
+		goto out;
+	}
 	/*
 	 * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
 	 * 4M-32M
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index a6c90a0..05615a1 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -20,9 +20,13 @@
 #include <linux/refcount.h>
 #include "compression.h"
 
+/* workspace buffer size for s390 zlib hardware support */
+#define ZLIB_DFLTCC_BUF_SIZE    (4 * PAGE_SIZE)
+
 struct workspace {
 	z_stream strm;
 	char *buf;
+	unsigned int buf_size;
 	struct list_head list;
 	int level;
 };
@@ -61,7 +65,21 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
 			zlib_inflate_workspacesize());
 	workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
 	workspace->level = level;
-	workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	workspace->buf = NULL;
+	/*
+	 * In case of s390 zlib hardware support, allocate lager workspace
+	 * buffer. If allocator fails, fall back to a single page buffer.
+	 */
+	if (zlib_deflate_dfltcc_enabled()) {
+		workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
+					 __GFP_NOMEMALLOC | __GFP_NORETRY |
+					 __GFP_NOWARN | GFP_NOIO);
+		workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
+	}
+	if (!workspace->buf) {
+		workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+		workspace->buf_size = PAGE_SIZE;
+	}
 	if (!workspace->strm.workspace || !workspace->buf)
 		goto fail;
 
@@ -85,6 +103,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
 	struct page *in_page = NULL;
 	struct page *out_page = NULL;
 	unsigned long bytes_left;
+	unsigned int in_buf_pages;
 	unsigned long len = *total_out;
 	unsigned long nr_dest_pages = *out_pages;
 	const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
@@ -102,9 +121,6 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
 	workspace->strm.total_in = 0;
 	workspace->strm.total_out = 0;
 
-	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
-	data_in = kmap(in_page);
-
 	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 	if (out_page == NULL) {
 		ret = -ENOMEM;
@@ -114,12 +130,51 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
 	pages[0] = out_page;
 	nr_pages = 1;
 
-	workspace->strm.next_in = data_in;
+	workspace->strm.next_in = workspace->buf;
+	workspace->strm.avail_in = 0;
 	workspace->strm.next_out = cpage_out;
 	workspace->strm.avail_out = PAGE_SIZE;
-	workspace->strm.avail_in = min(len, PAGE_SIZE);
 
 	while (workspace->strm.total_in < len) {
+		/*
+		 * Get next input pages and copy the contents to
+		 * the workspace buffer if required.
+		 */
+		if (workspace->strm.avail_in == 0) {
+			bytes_left = len - workspace->strm.total_in;
+			in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
+					   workspace->buf_size / PAGE_SIZE);
+			if (in_buf_pages > 1) {
+				int i;
+
+				for (i = 0; i < in_buf_pages; i++) {
+					if (in_page) {
+						kunmap(in_page);
+						put_page(in_page);
+					}
+					in_page = find_get_page(mapping,
+								start >> PAGE_SHIFT);
+					data_in = kmap(in_page);
+					memcpy(workspace->buf + i * PAGE_SIZE,
+					       data_in, PAGE_SIZE);
+					start += PAGE_SIZE;
+				}
+				workspace->strm.next_in = workspace->buf;
+			} else {
+				if (in_page) {
+					kunmap(in_page);
+					put_page(in_page);
+				}
+				in_page = find_get_page(mapping,
+							start >> PAGE_SHIFT);
+				data_in = kmap(in_page);
+				start += PAGE_SIZE;
+				workspace->strm.next_in = data_in;
+			}
+			workspace->strm.avail_in = min(bytes_left,
+						       (unsigned long) workspace->buf_size);
+		}
+
 		ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
 		if (ret != Z_OK) {
 			pr_debug("BTRFS: deflate in loop returned %d\n",
@@ -161,33 +216,43 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
 		/* we're all done */
 		if (workspace->strm.total_in >= len)
 			break;
-
-		/* we've read in a full page, get a new one */
-		if (workspace->strm.avail_in == 0) {
-			if (workspace->strm.total_out > max_out)
-				break;
-
-			bytes_left = len - workspace->strm.total_in;
-			kunmap(in_page);
-			put_page(in_page);
-
-			start += PAGE_SIZE;
-			in_page = find_get_page(mapping,
-						start >> PAGE_SHIFT);
-			data_in = kmap(in_page);
-			workspace->strm.avail_in = min(bytes_left,
-							   PAGE_SIZE);
-			workspace->strm.next_in = data_in;
-		}
+		if (workspace->strm.total_out > max_out)
+			break;
 	}
 	workspace->strm.avail_in = 0;
-	ret = zlib_deflate(&workspace->strm, Z_FINISH);
-	zlib_deflateEnd(&workspace->strm);
-
-	if (ret != Z_STREAM_END) {
-		ret = -EIO;
-		goto out;
+	/*
+	 * Call deflate with Z_FINISH flush parameter providing more output
+	 * space but no more input data, until it returns with Z_STREAM_END.
+	 */
+	while (ret != Z_STREAM_END) {
+		ret = zlib_deflate(&workspace->strm, Z_FINISH);
+		if (ret == Z_STREAM_END)
+			break;
+		if (ret != Z_OK && ret != Z_BUF_ERROR) {
+			zlib_deflateEnd(&workspace->strm);
+			ret = -EIO;
+			goto out;
+		} else if (workspace->strm.avail_out == 0) {
+			/* get another page for the stream end */
+			kunmap(out_page);
+			if (nr_pages == nr_dest_pages) {
+				out_page = NULL;
+				ret = -E2BIG;
+				goto out;
+			}
+			out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+			if (out_page == NULL) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			cpage_out = kmap(out_page);
+			pages[nr_pages] = out_page;
+			nr_pages++;
+			workspace->strm.avail_out = PAGE_SIZE;
+			workspace->strm.next_out = cpage_out;
+		}
 	}
+	zlib_deflateEnd(&workspace->strm);
 
 	if (workspace->strm.total_out >= workspace->strm.total_in) {
 		ret = -E2BIG;
@@ -231,7 +296,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 
 	workspace->strm.total_out = 0;
 	workspace->strm.next_out = workspace->buf;
-	workspace->strm.avail_out = PAGE_SIZE;
+	workspace->strm.avail_out = workspace->buf_size;
 
 	/* If it's deflate, and it's got no preset dictionary, then
 	   we can tell zlib to skip the adler32 check. */
@@ -270,7 +335,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 		}
 
 		workspace->strm.next_out = workspace->buf;
-		workspace->strm.avail_out = PAGE_SIZE;
+		workspace->strm.avail_out = workspace->buf_size;
 
 		if (workspace->strm.avail_in == 0) {
 			unsigned long tmp;
@@ -320,7 +385,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
 	workspace->strm.total_in = 0;
 
 	workspace->strm.next_out = workspace->buf;
-	workspace->strm.avail_out = PAGE_SIZE;
+	workspace->strm.avail_out = workspace->buf_size;
 	workspace->strm.total_out = 0;
 	/* If it's deflate, and it's got no preset dictionary, then
 	   we can tell zlib to skip the adler32 check. */
@@ -364,7 +429,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
 			buf_offset = 0;
 
 		bytes = min(PAGE_SIZE - pg_offset,
-			    PAGE_SIZE - buf_offset);
+			    PAGE_SIZE - (buf_offset % PAGE_SIZE));
 		bytes = min(bytes, bytes_left);
 
 		kaddr = kmap_atomic(dest_page);
@@ -375,7 +440,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
 		bytes_left -= bytes;
 next:
 		workspace->strm.next_out = workspace->buf;
-		workspace->strm.avail_out = PAGE_SIZE;
+		workspace->strm.avail_out = workspace->buf_size;
 	}
 
 	if (ret != Z_STREAM_END && bytes_left != 0)
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 44a3ce1..1dc97f2 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -396,7 +396,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 	struct cachefiles_object *object;
 	struct cachefiles_cache *cache;
 	struct inode *inode;
-	sector_t block0, block;
+	sector_t block;
 	unsigned shift;
 	int ret;
 
@@ -412,7 +412,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 
 	inode = d_backing_inode(object->backer);
 	ASSERT(S_ISREG(inode->i_mode));
-	ASSERT(inode->i_mapping->a_ops->bmap);
 	ASSERT(inode->i_mapping->a_ops->readpages);
 
 	/* calculate the shift required to use bmap */
@@ -428,12 +427,14 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 	 *   enough for this as it doesn't indicate errors, but it's all we've
 	 *   got for the moment
 	 */
-	block0 = page->index;
-	block0 <<= shift;
+	block = page->index;
+	block <<= shift;
 
-	block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
+	ret = bmap(inode, &block);
+	ASSERT(ret < 0);
+
 	_debug("%llx -> %llx",
-	       (unsigned long long) block0,
+	       (unsigned long long) (page->index << shift),
 	       (unsigned long long) block);
 
 	if (block) {
@@ -711,7 +712,6 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
 
 	inode = d_backing_inode(object->backer);
 	ASSERT(S_ISREG(inode->i_mode));
-	ASSERT(inode->i_mapping->a_ops->bmap);
 	ASSERT(inode->i_mapping->a_ops->readpages);
 
 	/* calculate the shift required to use bmap */
@@ -728,7 +728,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
 
 	ret = space ? -ENODATA : -ENOBUFS;
 	list_for_each_entry_safe(page, _n, pages, lru) {
-		sector_t block0, block;
+		sector_t block;
 
 		/* we assume the absence or presence of the first block is a
 		 * good enough indication for the page as a whole
@@ -736,13 +736,14 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
 		 *   good enough for this as it doesn't indicate errors, but
 		 *   it's all we've got for the moment
 		 */
-		block0 = page->index;
-		block0 <<= shift;
+		block = page->index;
+		block <<= shift;
 
-		block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
-						      block0);
+		ret = bmap(inode, &block);
+		ASSERT(!ret);
+
 		_debug("%llx -> %llx",
-		       (unsigned long long) block0,
+		       (unsigned long long) (page->index << shift),
 		       (unsigned long long) block);
 
 		if (block) {
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index c1da294..0a0823d 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -8,7 +8,7 @@
 ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
 	export.o caps.o snap.o xattr.o quota.o io.o \
 	mds_client.o mdsmap.o strings.o ceph_frag.o \
-	debugfs.o
+	debugfs.o util.o
 
 ceph-$(CONFIG_CEPH_FSCACHE) += cache.o
 ceph-$(CONFIG_CEPH_FS_POSIX_ACL) += acl.o
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index aa55f41..26be652 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -222,8 +222,8 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
 		err = ceph_pagelist_reserve(pagelist, len + val_size2 + 8);
 		if (err)
 			goto out_err;
-		err = ceph_pagelist_encode_string(pagelist,
-						  XATTR_NAME_POSIX_ACL_DEFAULT, len);
+		ceph_pagelist_encode_string(pagelist,
+					  XATTR_NAME_POSIX_ACL_DEFAULT, len);
 		err = posix_acl_to_xattr(&init_user_ns, default_acl,
 					 tmp_buf, val_size2);
 		if (err < 0)
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 73f24f3..270b769 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -67,7 +67,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
 		if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
 			continue;
 
-		errorf(fc, "ceph: fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
+		errorfc(fc, "fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
 		       fsid);
 		err = -EBUSY;
 		goto out_unlock;
@@ -96,7 +96,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
 		list_add_tail(&ent->list, &ceph_fscache_list);
 	} else {
 		kfree(ent);
-		errorf(fc, "ceph: unable to register fscache cookie for fsid %pU",
+		errorfc(fc, "unable to register fscache cookie for fsid %pU",
 		       fsid);
 		/* all other fs ignore this error */
 	}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 9d09bb5..28ae0c1 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -908,7 +908,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 						       ci_node);
 					if (!__cap_is_valid(cap))
 						continue;
-					__touch_cap(cap);
+					if (cap->issued & mask)
+						__touch_cap(cap);
 				}
 			}
 			return 1;
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index c281f32..fb7cabd9 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -33,7 +33,7 @@ static int mdsmap_show(struct seq_file *s, void *p)
 	seq_printf(s, "max_mds %d\n", mdsmap->m_max_mds);
 	seq_printf(s, "session_timeout %d\n", mdsmap->m_session_timeout);
 	seq_printf(s, "session_autoclose %d\n", mdsmap->m_session_autoclose);
-	for (i = 0; i < mdsmap->m_num_mds; i++) {
+	for (i = 0; i < mdsmap->possible_max_rank; i++) {
 		struct ceph_entity_addr *addr = &mdsmap->m_info[i].addr;
 		int state = mdsmap->m_info[i].state;
 		seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 2e4764f..d0cd0ab 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1186,7 +1186,7 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
 	struct dentry *dn = di->dentry;
 	struct ceph_mds_client *mdsc;
 
-	dout("dentry_dir_lease_touch %p %p '%pd' (offset %lld)\n",
+	dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n",
 	     di, dn, dn, di->offset);
 
 	if (!list_empty(&di->lease_list)) {
@@ -1567,7 +1567,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
 		inode = d_inode(dentry);
 	}
 
-	dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
+	dout("d_revalidate %p '%pd' inode %p offset 0x%llx\n", dentry,
 	     dentry, inode, ceph_dentry(dentry)->offset);
 
 	/* always trust cached snapped dentries, snapdir dentry */
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 11929d2..c3b8e8e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1974,6 +1974,9 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
 	if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
 		return -EOPNOTSUPP;
 
+	if (!src_fsc->have_copy_from2)
+		return -EOPNOTSUPP;
+
 	/*
 	 * Striped file layouts require that we copy partial objects, but the
 	 * OSD copy-from operation only supports full-object copies.  Limit
@@ -2101,8 +2104,14 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
 			CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
 			&dst_oid, &dst_oloc,
 			CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
-			CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
+			CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
+			dst_ci->i_truncate_seq, dst_ci->i_truncate_size,
+			CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
 		if (err) {
+			if (err == -EOPNOTSUPP) {
+				src_fsc->have_copy_from2 = false;
+				pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
+			}
 			dout("ceph_osdc_copy_from returned %d\n", err);
 			if (!ret)
 				ret = err;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index c074075..d01710a 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -55,11 +55,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
 	inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
-	if (inode->i_state & I_NEW) {
+	if (inode->i_state & I_NEW)
 		dout("get_inode created new inode %p %llx.%llx ino %llx\n",
 		     inode, ceph_vinop(inode), (u64)inode->i_ino);
-		unlock_new_inode(inode);
-	}
 
 	dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
 	     vino.snap, inode);
@@ -88,6 +86,10 @@ struct inode *ceph_get_snapdir(struct inode *parent)
 	inode->i_fop = &ceph_snapdir_fops;
 	ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
 	ci->i_rbytes = 0;
+
+	if (inode->i_state & I_NEW)
+		unlock_new_inode(inode);
+
 	return inode;
 }
 
@@ -728,8 +730,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
 static int fill_inode(struct inode *inode, struct page *locked_page,
 		      struct ceph_mds_reply_info_in *iinfo,
 		      struct ceph_mds_reply_dirfrag *dirinfo,
-		      struct ceph_mds_session *session,
-		      unsigned long ttl_from, int cap_fmode,
+		      struct ceph_mds_session *session, int cap_fmode,
 		      struct ceph_cap_reservation *caps_reservation)
 {
 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
@@ -754,8 +755,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 	info_caps = le32_to_cpu(info->cap.caps);
 
 	/* prealloc new cap struct */
-	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
+	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
 		new_cap = ceph_get_cap(mdsc, caps_reservation);
+		if (!new_cap)
+			return -ENOMEM;
+	}
 
 	/*
 	 * prealloc xattr data, if it looks like we'll need it.  only
@@ -1237,7 +1241,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
 		if (dir) {
 			err = fill_inode(dir, NULL,
 					 &rinfo->diri, rinfo->dirfrag,
-					 session, req->r_request_started, -1,
+					 session, -1,
 					 &req->r_caps_reservation);
 			if (err < 0)
 				goto done;
@@ -1302,18 +1306,22 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
 			err = PTR_ERR(in);
 			goto done;
 		}
-		req->r_target_inode = in;
 
 		err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
-				session, req->r_request_started,
+				session,
 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
-				rinfo->head->result == 0) ?  req->r_fmode : -1,
+				 rinfo->head->result == 0) ?  req->r_fmode : -1,
 				&req->r_caps_reservation);
 		if (err < 0) {
 			pr_err("fill_inode badness %p %llx.%llx\n",
 				in, ceph_vinop(in));
+			if (in->i_state & I_NEW)
+				discard_new_inode(in);
 			goto done;
 		}
+		req->r_target_inode = in;
+		if (in->i_state & I_NEW)
+			unlock_new_inode(in);
 	}
 
 	/*
@@ -1493,12 +1501,18 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
 			continue;
 		}
 		rc = fill_inode(in, NULL, &rde->inode, NULL, session,
-				req->r_request_started, -1,
-				&req->r_caps_reservation);
+				-1, &req->r_caps_reservation);
 		if (rc < 0) {
 			pr_err("fill_inode badness on %p got %d\n", in, rc);
 			err = rc;
+			if (in->i_state & I_NEW) {
+				ihold(in);
+				discard_new_inode(in);
+			}
+		} else if (in->i_state & I_NEW) {
+			unlock_new_inode(in);
 		}
+
 		/* avoid calling iput_final() in mds dispatch threads */
 		ceph_async_iput(in);
 	}
@@ -1694,19 +1708,24 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
 		}
 
 		ret = fill_inode(in, NULL, &rde->inode, NULL, session,
-				 req->r_request_started, -1,
-				 &req->r_caps_reservation);
+				 -1, &req->r_caps_reservation);
 		if (ret < 0) {
 			pr_err("fill_inode badness on %p\n", in);
 			if (d_really_is_negative(dn)) {
 				/* avoid calling iput_final() in mds
 				 * dispatch threads */
+				if (in->i_state & I_NEW) {
+					ihold(in);
+					discard_new_inode(in);
+				}
 				ceph_async_iput(in);
 			}
 			d_drop(dn);
 			err = ret;
 			goto next_item;
 		}
+		if (in->i_state & I_NEW)
+			unlock_new_inode(in);
 
 		if (d_really_is_negative(dn)) {
 			if (ceph_security_xattr_deadlock(in)) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 145d46b..bbbbddf7 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -9,6 +9,7 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/ratelimit.h>
+#include <linux/bits.h>
 
 #include "super.h"
 #include "mds_client.h"
@@ -530,6 +531,7 @@ const char *ceph_session_state_name(int s)
 	case CEPH_MDS_SESSION_OPEN: return "open";
 	case CEPH_MDS_SESSION_HUNG: return "hung";
 	case CEPH_MDS_SESSION_CLOSING: return "closing";
+	case CEPH_MDS_SESSION_CLOSED: return "closed";
 	case CEPH_MDS_SESSION_RESTARTING: return "restarting";
 	case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
 	case CEPH_MDS_SESSION_REJECTED: return "rejected";
@@ -537,7 +539,7 @@ const char *ceph_session_state_name(int s)
 	}
 }
 
-static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
+struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
 {
 	if (refcount_inc_not_zero(&s->s_ref)) {
 		dout("mdsc get_session %p %d -> %d\n", s,
@@ -568,7 +570,7 @@ struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
 {
 	if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
 		return NULL;
-	return get_session(mdsc->sessions[mds]);
+	return ceph_get_mds_session(mdsc->sessions[mds]);
 }
 
 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
@@ -597,7 +599,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
 {
 	struct ceph_mds_session *s;
 
-	if (mds >= mdsc->mdsmap->m_num_mds)
+	if (mds >= mdsc->mdsmap->possible_max_rank)
 		return ERR_PTR(-EINVAL);
 
 	s = kzalloc(sizeof(*s), GFP_NOFS);
@@ -674,7 +676,6 @@ static void __unregister_session(struct ceph_mds_client *mdsc,
 	dout("__unregister_session mds%d %p\n", s->s_mds, s);
 	BUG_ON(mdsc->sessions[s->s_mds] != s);
 	mdsc->sessions[s->s_mds] = NULL;
-	s->s_state = 0;
 	ceph_con_close(&s->s_con);
 	ceph_put_mds_session(s);
 	atomic_dec(&mdsc->num_sessions);
@@ -878,7 +879,8 @@ static struct inode *get_nonsnap_parent(struct dentry *dentry)
  * Called under mdsc->mutex.
  */
 static int __choose_mds(struct ceph_mds_client *mdsc,
-			struct ceph_mds_request *req)
+			struct ceph_mds_request *req,
+			bool *random)
 {
 	struct inode *inode;
 	struct ceph_inode_info *ci;
@@ -888,6 +890,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
 	u32 hash = req->r_direct_hash;
 	bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
 
+	if (random)
+		*random = false;
+
 	/*
 	 * is there a specific mds we should try?  ignore hint if we have
 	 * no session and the mds is not up (active or recovering).
@@ -895,7 +900,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
 	if (req->r_resend_mds >= 0 &&
 	    (__have_session(mdsc, req->r_resend_mds) ||
 	     ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
-		dout("choose_mds using resend_mds mds%d\n",
+		dout("%s using resend_mds mds%d\n", __func__,
 		     req->r_resend_mds);
 		return req->r_resend_mds;
 	}
@@ -913,7 +918,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
 			rcu_read_lock();
 			inode = get_nonsnap_parent(req->r_dentry);
 			rcu_read_unlock();
-			dout("__choose_mds using snapdir's parent %p\n", inode);
+			dout("%s using snapdir's parent %p\n", __func__, inode);
 		}
 	} else if (req->r_dentry) {
 		/* ignore race with rename; old or new d_parent is okay */
@@ -933,7 +938,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
 			/* direct snapped/virtual snapdir requests
 			 * based on parent dir inode */
 			inode = get_nonsnap_parent(parent);
-			dout("__choose_mds using nonsnap parent %p\n", inode);
+			dout("%s using nonsnap parent %p\n", __func__, inode);
 		} else {
 			/* dentry target */
 			inode = d_inode(req->r_dentry);
@@ -949,8 +954,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
 		rcu_read_unlock();
 	}
 
-	dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
-	     (int)hash, mode);
+	dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
+	     hash, mode);
 	if (!inode)
 		goto random;
 	ci = ceph_inode(inode);
@@ -968,30 +973,33 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
 				get_random_bytes(&r, 1);
 				r %= frag.ndist;
 				mds = frag.dist[r];
-				dout("choose_mds %p %llx.%llx "
-				     "frag %u mds%d (%d/%d)\n",
-				     inode, ceph_vinop(inode),
-				     frag.frag, mds,
-				     (int)r, frag.ndist);
+				dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
+				     __func__, inode, ceph_vinop(inode),
+				     frag.frag, mds, (int)r, frag.ndist);
 				if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
-				    CEPH_MDS_STATE_ACTIVE)
+				    CEPH_MDS_STATE_ACTIVE &&
+				    !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
 					goto out;
 			}
 
 			/* since this file/dir wasn't known to be
 			 * replicated, then we want to look for the
 			 * authoritative mds. */
-			mode = USE_AUTH_MDS;
 			if (frag.mds >= 0) {
 				/* choose auth mds */
 				mds = frag.mds;
-				dout("choose_mds %p %llx.%llx "
-				     "frag %u mds%d (auth)\n",
-				     inode, ceph_vinop(inode), frag.frag, mds);
+				dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
+				     __func__, inode, ceph_vinop(inode),
+				     frag.frag, mds);
 				if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
-				    CEPH_MDS_STATE_ACTIVE)
-					goto out;
+				    CEPH_MDS_STATE_ACTIVE) {
+					if (mode == USE_ANY_MDS &&
+					    !ceph_mdsmap_is_laggy(mdsc->mdsmap,
+								  mds))
+						goto out;
+				}
 			}
+			mode = USE_AUTH_MDS;
 		}
 	}
 
@@ -1007,7 +1015,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
 		goto random;
 	}
 	mds = cap->session->s_mds;
-	dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
+	dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
 	     inode, ceph_vinop(inode), mds,
 	     cap == ci->i_auth_cap ? "auth " : "", cap);
 	spin_unlock(&ci->i_ceph_lock);
@@ -1018,8 +1026,11 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
 	return mds;
 
 random:
+	if (random)
+		*random = true;
+
 	mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
-	dout("choose_mds chose random mds%d\n", mds);
+	dout("%s chose random mds%d\n", __func__, mds);
 	return mds;
 }
 
@@ -1045,20 +1056,21 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
 	return msg;
 }
 
+static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
+#define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
 static void encode_supported_features(void **p, void *end)
 {
-	static const unsigned char bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
-	static const size_t count = ARRAY_SIZE(bits);
+	static const size_t count = ARRAY_SIZE(feature_bits);
 
 	if (count > 0) {
 		size_t i;
-		size_t size = ((size_t)bits[count - 1] + 64) / 64 * 8;
+		size_t size = FEATURE_BYTES(count);
 
 		BUG_ON(*p + 4 + size > end);
 		ceph_encode_32(p, size);
 		memset(*p, 0, size);
 		for (i = 0; i < count; i++)
-			((unsigned char*)(*p))[i / 8] |= 1 << (bits[i] % 8);
+			((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
 		*p += size;
 	} else {
 		BUG_ON(*p + 4 > end);
@@ -1079,6 +1091,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
 	int metadata_key_count = 0;
 	struct ceph_options *opt = mdsc->fsc->client->options;
 	struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
+	size_t size, count;
 	void *p, *end;
 
 	const char* metadata[][2] = {
@@ -1096,8 +1109,13 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
 			strlen(metadata[i][1]);
 		metadata_key_count++;
 	}
+
 	/* supported feature */
-	extra_bytes += 4 + 8;
+	size = 0;
+	count = ARRAY_SIZE(feature_bits);
+	if (count > 0)
+		size = FEATURE_BYTES(count);
+	extra_bytes += 4 + size;
 
 	/* Allocate the message */
 	msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
@@ -1117,7 +1135,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
 	 * Serialize client metadata into waiting buffer space, using
 	 * the format that userspace expects for map<string, string>
 	 *
-	 * ClientSession messages with metadata are v2
+	 * ClientSession messages with metadata are v3
 	 */
 	msg->hdr.version = cpu_to_le16(3);
 	msg->hdr.compat_version = cpu_to_le16(1);
@@ -1219,7 +1237,7 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
 	struct ceph_mds_session *ts;
 	int i, mds = session->s_mds;
 
-	if (mds >= mdsc->mdsmap->m_num_mds)
+	if (mds >= mdsc->mdsmap->possible_max_rank)
 		return;
 
 	mi = &mdsc->mdsmap->m_info[mds];
@@ -1967,7 +1985,7 @@ void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
 	if (mdsc->stopping)
 		return;
 
-	get_session(session);
+	ceph_get_mds_session(session);
 	if (queue_work(mdsc->fsc->cap_wq,
 		       &session->s_cap_release_work)) {
 		dout("cap release work queued\n");
@@ -2072,7 +2090,6 @@ struct ceph_mds_request *
 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
 {
 	struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
-	struct timespec64 ts;
 
 	if (!req)
 		return ERR_PTR(-ENOMEM);
@@ -2091,8 +2108,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
 	init_completion(&req->r_safe_completion);
 	INIT_LIST_HEAD(&req->r_unsafe_item);
 
-	ktime_get_coarse_real_ts64(&ts);
-	req->r_stamp = timespec64_trunc(ts, mdsc->fsc->sb->s_time_gran);
+	ktime_get_coarse_real_ts64(&req->r_stamp);
 
 	req->r_op = op;
 	req->r_direct_mode = mode;
@@ -2518,6 +2534,26 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
 }
 
 /*
+ * called under mdsc->mutex
+ */
+static int __send_request(struct ceph_mds_client *mdsc,
+			  struct ceph_mds_session *session,
+			  struct ceph_mds_request *req,
+			  bool drop_cap_releases)
+{
+	int err;
+
+	err = __prepare_send_request(mdsc, req, session->s_mds,
+				     drop_cap_releases);
+	if (!err) {
+		ceph_msg_get(req->r_request);
+		ceph_con_send(&session->s_con, req->r_request);
+	}
+
+	return err;
+}
+
+/*
  * send request, or put it on the appropriate wait list.
  */
 static void __do_request(struct ceph_mds_client *mdsc,
@@ -2526,6 +2562,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
 	struct ceph_mds_session *session = NULL;
 	int mds = -1;
 	int err = 0;
+	bool random;
 
 	if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
 		if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
@@ -2558,15 +2595,14 @@ static void __do_request(struct ceph_mds_client *mdsc,
 		if (!(mdsc->fsc->mount_options->flags &
 		      CEPH_MOUNT_OPT_MOUNTWAIT) &&
 		    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
-			err = -ENOENT;
-			pr_info("probably no mds server is up\n");
+			err = -EHOSTUNREACH;
 			goto finish;
 		}
 	}
 
 	put_request_session(req);
 
-	mds = __choose_mds(mdsc, req);
+	mds = __choose_mds(mdsc, req, &random);
 	if (mds < 0 ||
 	    ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
 		dout("do_request no mds or not active, waiting for map\n");
@@ -2583,7 +2619,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
 			goto finish;
 		}
 	}
-	req->r_session = get_session(session);
+	req->r_session = ceph_get_mds_session(session);
 
 	dout("do_request mds%d session %p state %s\n", mds, session,
 	     ceph_session_state_name(session->s_state));
@@ -2594,8 +2630,12 @@ static void __do_request(struct ceph_mds_client *mdsc,
 			goto out_session;
 		}
 		if (session->s_state == CEPH_MDS_SESSION_NEW ||
-		    session->s_state == CEPH_MDS_SESSION_CLOSING)
+		    session->s_state == CEPH_MDS_SESSION_CLOSING) {
 			__open_session(mdsc, session);
+			/* retry the same mds later */
+			if (random)
+				req->r_resend_mds = mds;
+		}
 		list_add(&req->r_wait, &session->s_waiting);
 		goto out_session;
 	}
@@ -2606,11 +2646,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
 	if (req->r_request_started == 0)   /* note request start time */
 		req->r_request_started = jiffies;
 
-	err = __prepare_send_request(mdsc, req, mds, false);
-	if (!err) {
-		ceph_msg_get(req->r_request);
-		ceph_con_send(&session->s_con, req->r_request);
-	}
+	err = __send_request(mdsc, session, req, false);
 
 out_session:
 	ceph_put_mds_session(session);
@@ -2863,7 +2899,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
 			mutex_unlock(&mdsc->mutex);
 			goto out;
 		} else  {
-			int mds = __choose_mds(mdsc, req);
+			int mds = __choose_mds(mdsc, req, NULL);
 			if (mds >= 0 && mds != req->r_session->s_mds) {
 				dout("but auth changed, so resending\n");
 				__do_request(mdsc, req);
@@ -2879,6 +2915,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
 		set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
 		__unregister_request(mdsc, req);
 
+		/* last request during umount? */
+		if (mdsc->stopping && !__get_oldest_req(mdsc))
+			complete_all(&mdsc->safe_umount_waiters);
+
 		if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
 			/*
 			 * We already handled the unsafe response, now do the
@@ -2889,9 +2929,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
 			 */
 			dout("got safe reply %llu, mds%d\n", tid, mds);
 
-			/* last unsafe request during umount? */
-			if (mdsc->stopping && !__get_oldest_req(mdsc))
-				complete_all(&mdsc->safe_umount_waiters);
 			mutex_unlock(&mdsc->mutex);
 			goto out;
 		}
@@ -3106,7 +3143,7 @@ static void handle_session(struct ceph_mds_session *session,
 
 	mutex_lock(&mdsc->mutex);
 	if (op == CEPH_SESSION_CLOSE) {
-		get_session(session);
+		ceph_get_mds_session(session);
 		__unregister_session(mdsc, session);
 	}
 	/* FIXME: this ttl calculation is generous */
@@ -3144,6 +3181,7 @@ static void handle_session(struct ceph_mds_session *session,
 	case CEPH_SESSION_CLOSE:
 		if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
 			pr_info("mds%d reconnect denied\n", session->s_mds);
+		session->s_state = CEPH_MDS_SESSION_CLOSED;
 		cleanup_session_requests(mdsc, session);
 		remove_session_caps(session);
 		wake = 2; /* for good measure */
@@ -3211,7 +3249,6 @@ static void handle_session(struct ceph_mds_session *session,
 	return;
 }
 
-
 /*
  * called under session->mutex.
  */
@@ -3220,18 +3257,12 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
 {
 	struct ceph_mds_request *req, *nreq;
 	struct rb_node *p;
-	int err;
 
 	dout("replay_unsafe_requests mds%d\n", session->s_mds);
 
 	mutex_lock(&mdsc->mutex);
-	list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
-		err = __prepare_send_request(mdsc, req, session->s_mds, true);
-		if (!err) {
-			ceph_msg_get(req->r_request);
-			ceph_con_send(&session->s_con, req->r_request);
-		}
-	}
+	list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
+		__send_request(mdsc, session, req, true);
 
 	/*
 	 * also re-send old requests when MDS enters reconnect stage. So that MDS
@@ -3246,14 +3277,8 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
 		if (req->r_attempts == 0)
 			continue; /* only old requests */
 		if (req->r_session &&
-		    req->r_session->s_mds == session->s_mds) {
-			err = __prepare_send_request(mdsc, req,
-						     session->s_mds, true);
-			if (!err) {
-				ceph_msg_get(req->r_request);
-				ceph_con_send(&session->s_con, req->r_request);
-			}
-		}
+		    req->r_session->s_mds == session->s_mds)
+			__send_request(mdsc, session, req, true);
 	}
 	mutex_unlock(&mdsc->mutex);
 }
@@ -3764,7 +3789,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 	dout("check_new_map new %u old %u\n",
 	     newmap->m_epoch, oldmap->m_epoch);
 
-	for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) {
+	for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
 		if (!mdsc->sessions[i])
 			continue;
 		s = mdsc->sessions[i];
@@ -3778,9 +3803,9 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 		     ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
 		     ceph_session_state_name(s->s_state));
 
-		if (i >= newmap->m_num_mds) {
+		if (i >= newmap->possible_max_rank) {
 			/* force close session for stopped mds */
-			get_session(s);
+			ceph_get_mds_session(s);
 			__unregister_session(mdsc, s);
 			__wake_requests(mdsc, &s->s_waiting);
 			mutex_unlock(&mdsc->mutex);
@@ -3835,7 +3860,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 		}
 	}
 
-	for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) {
+	for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
 		s = mdsc->sessions[i];
 		if (!s)
 			continue;
@@ -4381,7 +4406,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
 	mutex_lock(&mdsc->mutex);
 	for (i = 0; i < mdsc->max_sessions; i++) {
 		if (mdsc->sessions[i]) {
-			session = get_session(mdsc->sessions[i]);
+			session = ceph_get_mds_session(mdsc->sessions[i]);
 			__unregister_session(mdsc, session);
 			mutex_unlock(&mdsc->mutex);
 			mutex_lock(&session->s_mutex);
@@ -4609,11 +4634,8 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
 {
 	struct ceph_mds_session *s = con->private;
 
-	if (get_session(s)) {
-		dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref));
+	if (ceph_get_mds_session(s))
 		return con;
-	}
-	dout("mdsc con_get %p FAIL\n", s);
 	return NULL;
 }
 
@@ -4621,7 +4643,6 @@ static void con_put(struct ceph_connection *con)
 {
 	struct ceph_mds_session *s = con->private;
 
-	dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1);
 	ceph_put_mds_session(s);
 }
 
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 14c7e8c..27a7446 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -17,22 +17,31 @@
 #include <linux/ceph/auth.h>
 
 /* The first 8 bits are reserved for old ceph releases */
-#define CEPHFS_FEATURE_MIMIC		8
-#define CEPHFS_FEATURE_REPLY_ENCODING	9
-#define CEPHFS_FEATURE_RECLAIM_CLIENT	10
-#define CEPHFS_FEATURE_LAZY_CAP_WANTED	11
-#define CEPHFS_FEATURE_MULTI_RECONNECT  12
+enum ceph_feature_type {
+	CEPHFS_FEATURE_MIMIC = 8,
+	CEPHFS_FEATURE_REPLY_ENCODING,
+	CEPHFS_FEATURE_RECLAIM_CLIENT,
+	CEPHFS_FEATURE_LAZY_CAP_WANTED,
+	CEPHFS_FEATURE_MULTI_RECONNECT,
 
-#define CEPHFS_FEATURES_CLIENT_SUPPORTED { 	\
+	CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_MULTI_RECONNECT,
+};
+
+/*
+ * This will always have the highest feature bit value
+ * as the last element of the array.
+ */
+#define CEPHFS_FEATURES_CLIENT_SUPPORTED {	\
 	0, 1, 2, 3, 4, 5, 6, 7,			\
 	CEPHFS_FEATURE_MIMIC,			\
 	CEPHFS_FEATURE_REPLY_ENCODING,		\
 	CEPHFS_FEATURE_LAZY_CAP_WANTED,		\
 	CEPHFS_FEATURE_MULTI_RECONNECT,		\
+						\
+	CEPHFS_FEATURE_MAX,			\
 }
 #define CEPHFS_FEATURES_CLIENT_REQUIRED {}
 
-
 /*
  * Some lock dependencies:
  *
@@ -151,7 +160,8 @@ enum {
 	CEPH_MDS_SESSION_RESTARTING = 5,
 	CEPH_MDS_SESSION_RECONNECTING = 6,
 	CEPH_MDS_SESSION_CLOSING = 7,
-	CEPH_MDS_SESSION_REJECTED = 8,
+	CEPH_MDS_SESSION_CLOSED = 8,
+	CEPH_MDS_SESSION_REJECTED = 9,
 };
 
 struct ceph_mds_session {
@@ -174,6 +184,7 @@ struct ceph_mds_session {
 
 	/* protected by s_cap_lock */
 	spinlock_t        s_cap_lock;
+	refcount_t        s_ref;
 	struct list_head  s_caps;     /* all caps issued by this session */
 	struct ceph_cap  *s_cap_iterator;
 	int               s_nr_caps;
@@ -188,7 +199,6 @@ struct ceph_mds_session {
 	unsigned long     s_renew_requested; /* last time we sent a renew req */
 	u64               s_renew_seq;
 
-	refcount_t        s_ref;
 	struct list_head  s_waiting;  /* waiting requests */
 	struct list_head  s_unsafe;   /* unsafe requests */
 };
@@ -224,6 +234,7 @@ struct ceph_mds_request {
 	struct rb_node r_node;
 	struct ceph_mds_client *r_mdsc;
 
+	struct kref       r_kref;
 	int r_op;                    /* mds op code */
 
 	/* operation on what? */
@@ -294,7 +305,6 @@ struct ceph_mds_request {
 	int               r_resend_mds; /* mds to resend to next, if any*/
 	u32               r_sent_on_mseq; /* cap mseq request was sent at*/
 
-	struct kref       r_kref;
 	struct list_head  r_wait;
 	struct completion r_completion;
 	struct completion r_safe_completion;
@@ -451,15 +461,10 @@ extern const char *ceph_mds_op_name(int op);
 extern struct ceph_mds_session *
 __ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
 
-static inline struct ceph_mds_session *
-ceph_get_mds_session(struct ceph_mds_session *s)
-{
-	refcount_inc(&s->s_ref);
-	return s;
-}
-
 extern const char *ceph_session_state_name(int s);
 
+extern struct ceph_mds_session *
+ceph_get_mds_session(struct ceph_mds_session *s);
 extern void ceph_put_mds_session(struct ceph_mds_session *s);
 
 extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 471bac3..8896278 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -13,30 +13,25 @@
 
 #include "super.h"
 
+#define CEPH_MDS_IS_READY(i, ignore_laggy) \
+	(m->m_info[i].state > 0 && ignore_laggy ? true : !m->m_info[i].laggy)
 
-/*
- * choose a random mds that is "up" (i.e. has a state > 0), or -1.
- */
-int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
+static int __mdsmap_get_random_mds(struct ceph_mdsmap *m, bool ignore_laggy)
 {
 	int n = 0;
 	int i, j;
 
-	/* special case for one mds */
-	if (1 == m->m_num_mds && m->m_info[0].state > 0)
-		return 0;
-
 	/* count */
-	for (i = 0; i < m->m_num_mds; i++)
-		if (m->m_info[i].state > 0)
+	for (i = 0; i < m->possible_max_rank; i++)
+		if (CEPH_MDS_IS_READY(i, ignore_laggy))
 			n++;
 	if (n == 0)
 		return -1;
 
 	/* pick */
 	n = prandom_u32() % n;
-	for (j = 0, i = 0; i < m->m_num_mds; i++) {
-		if (m->m_info[i].state > 0)
+	for (j = 0, i = 0; i < m->possible_max_rank; i++) {
+		if (CEPH_MDS_IS_READY(i, ignore_laggy))
 			j++;
 		if (j > n)
 			break;
@@ -45,6 +40,20 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
 	return i;
 }
 
+/*
+ * choose a random mds that is "up" (i.e. has a state > 0), or -1.
+ */
+int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
+{
+	int mds;
+
+	mds = __mdsmap_get_random_mds(m, false);
+	if (mds == m->possible_max_rank || mds == -1)
+		mds = __mdsmap_get_random_mds(m, true);
+
+	return mds == m->possible_max_rank ? -1 : mds;
+}
+
 #define __decode_and_drop_type(p, end, type, bad)		\
 	do {							\
 		if (*p + sizeof(type) > end)			\
@@ -138,14 +147,29 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 	m->m_session_autoclose = ceph_decode_32(p);
 	m->m_max_file_size = ceph_decode_64(p);
 	m->m_max_mds = ceph_decode_32(p);
-	m->m_num_mds = m->m_max_mds;
 
-	m->m_info = kcalloc(m->m_num_mds, sizeof(*m->m_info), GFP_NOFS);
+	/*
+	 * pick out the active nodes as the m_num_active_mds, the
+	 * m_num_active_mds maybe larger than m_max_mds when decreasing
+	 * the max_mds in cluster side, in other case it should less
+	 * than or equal to m_max_mds.
+	 */
+	m->m_num_active_mds = n = ceph_decode_32(p);
+
+	/*
+	 * the possible max rank, it maybe larger than the m_num_active_mds,
+	 * for example if the mds_max == 2 in the cluster, when the MDS(0)
+	 * was laggy and being replaced by a new MDS, we will temporarily
+	 * receive a new mds map with n_num_mds == 1 and the active MDS(1),
+	 * and the mds rank >= m_num_active_mds.
+	 */
+	m->possible_max_rank = max(m->m_num_active_mds, m->m_max_mds);
+
+	m->m_info = kcalloc(m->possible_max_rank, sizeof(*m->m_info), GFP_NOFS);
 	if (!m->m_info)
 		goto nomem;
 
 	/* pick out active nodes from mds_info (state > 0) */
-	n = ceph_decode_32(p);
 	for (i = 0; i < n; i++) {
 		u64 global_id;
 		u32 namelen;
@@ -215,18 +239,15 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 		     ceph_mds_state_name(state),
 		     laggy ? "(laggy)" : "");
 
-		if (mds < 0 || state <= 0)
+		if (mds < 0 || mds >= m->possible_max_rank) {
+			pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds);
 			continue;
+		}
 
-		if (mds >= m->m_num_mds) {
-			int new_num = max(mds + 1, m->m_num_mds * 2);
-			void *new_m_info = krealloc(m->m_info,
-						new_num * sizeof(*m->m_info),
-						GFP_NOFS | __GFP_ZERO);
-			if (!new_m_info)
-				goto nomem;
-			m->m_info = new_m_info;
-			m->m_num_mds = new_num;
+		if (state <= 0) {
+			pr_warn("mdsmap_decode got incorrect state(%s)\n",
+				ceph_mds_state_name(state));
+			continue;
 		}
 
 		info = &m->m_info[mds];
@@ -247,14 +268,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 			info->export_targets = NULL;
 		}
 	}
-	if (m->m_num_mds > m->m_max_mds) {
-		/* find max up mds */
-		for (i = m->m_num_mds; i >= m->m_max_mds; i--) {
-			if (i == 0 || m->m_info[i-1].state > 0)
-				break;
-		}
-		m->m_num_mds = i;
-	}
 
 	/* pg_pools */
 	ceph_decode_32_safe(p, end, n, bad);
@@ -296,14 +309,14 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 
 		for (i = 0; i < n; i++) {
 			s32 mds = ceph_decode_32(p);
-			if (mds >= 0 && mds < m->m_num_mds) {
+			if (mds >= 0 && mds < m->possible_max_rank) {
 				if (m->m_info[mds].laggy)
 					num_laggy++;
 			}
 		}
 		m->m_num_laggy = num_laggy;
 
-		if (n > m->m_num_mds) {
+		if (n > m->possible_max_rank) {
 			void *new_m_info = krealloc(m->m_info,
 						    n * sizeof(*m->m_info),
 						    GFP_NOFS | __GFP_ZERO);
@@ -311,7 +324,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 				goto nomem;
 			m->m_info = new_m_info;
 		}
-		m->m_num_mds = n;
+		m->possible_max_rank = n;
 	}
 
 	/* inc */
@@ -382,7 +395,7 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
 {
 	int i;
 
-	for (i = 0; i < m->m_num_mds; i++)
+	for (i = 0; i < m->possible_max_rank; i++)
 		kfree(m->m_info[i].export_targets);
 	kfree(m->m_info);
 	kfree(m->m_data_pg_pools);
@@ -396,9 +409,9 @@ bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m)
 		return false;
 	if (m->m_damaged)
 		return false;
-	if (m->m_num_laggy > 0)
+	if (m->m_num_laggy == m->m_num_active_mds)
 		return false;
-	for (i = 0; i < m->m_num_mds; i++) {
+	for (i = 0; i < m->possible_max_rank; i++) {
 		if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE)
 			nr_active++;
 	}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 29a795f..1d9f083 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -107,7 +107,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
 	return 0;
 }
 
-
 static int ceph_sync_fs(struct super_block *sb, int wait)
 {
 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
@@ -163,13 +162,13 @@ enum ceph_recover_session_mode {
 	ceph_recover_session_clean
 };
 
-static const struct fs_parameter_enum ceph_mount_param_enums[] = {
-	{ Opt_recover_session,	"no",		ceph_recover_session_no },
-	{ Opt_recover_session,	"clean",	ceph_recover_session_clean },
+static const struct constant_table ceph_param_recover[] = {
+	{ "no",		ceph_recover_session_no },
+	{ "clean",	ceph_recover_session_clean },
 	{}
 };
 
-static const struct fs_parameter_spec ceph_mount_param_specs[] = {
+static const struct fs_parameter_spec ceph_mount_parameters[] = {
 	fsparam_flag_no ("acl",				Opt_acl),
 	fsparam_flag_no ("asyncreaddir",		Opt_asyncreaddir),
 	fsparam_s32	("caps_max",			Opt_caps_max),
@@ -179,8 +178,8 @@ static const struct fs_parameter_spec ceph_mount_param_specs[] = {
 	fsparam_flag_no ("copyfrom",			Opt_copyfrom),
 	fsparam_flag_no ("dcache",			Opt_dcache),
 	fsparam_flag_no ("dirstat",			Opt_dirstat),
-	__fsparam	(fs_param_is_string, "fsc",	Opt_fscache,
-			 fs_param_neg_with_no | fs_param_v_optional),
+	fsparam_flag_no	("fsc",				Opt_fscache), // fsc|nofsc
+	fsparam_string	("fsc",				Opt_fscache), // fsc=...
 	fsparam_flag_no ("ino32",			Opt_ino32),
 	fsparam_string	("mds_namespace",		Opt_mds_namespace),
 	fsparam_flag_no ("poolperm",			Opt_poolperm),
@@ -189,7 +188,7 @@ static const struct fs_parameter_spec ceph_mount_param_specs[] = {
 	fsparam_flag_no ("rbytes",			Opt_rbytes),
 	fsparam_u32	("readdir_max_bytes",		Opt_readdir_max_bytes),
 	fsparam_u32	("readdir_max_entries",		Opt_readdir_max_entries),
-	fsparam_enum	("recover_session",		Opt_recover_session),
+	fsparam_enum	("recover_session",		Opt_recover_session, ceph_param_recover),
 	fsparam_flag_no ("require_active_mds",		Opt_require_active_mds),
 	fsparam_u32	("rsize",			Opt_rsize),
 	fsparam_string	("snapdirname",			Opt_snapdirname),
@@ -198,12 +197,6 @@ static const struct fs_parameter_spec ceph_mount_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description ceph_mount_parameters = {
-	.name           = "ceph",
-	.specs          = ceph_mount_param_specs,
-	.enums		= ceph_mount_param_enums,
-};
-
 struct ceph_parse_opts_ctx {
 	struct ceph_options		*copts;
 	struct ceph_mount_options	*opts;
@@ -211,7 +204,6 @@ struct ceph_parse_opts_ctx {
 
 /*
  * Parse the source parameter.  Distinguish the server list from the path.
- * Internally we do not include the leading '/' in the path.
  *
  * The source will look like:
  *     <server_spec>[,<server_spec>...]:[<path>]
@@ -228,30 +220,33 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
 
 	dout("%s '%s'\n", __func__, dev_name);
 	if (!dev_name || !*dev_name)
-		return invalf(fc, "ceph: Empty source");
+		return invalfc(fc, "Empty source");
 
 	dev_name_end = strchr(dev_name, '/');
 	if (dev_name_end) {
-		if (strlen(dev_name_end) > 1) {
-			kfree(fsopt->server_path);
-			fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
-			if (!fsopt->server_path)
-				return -ENOMEM;
-		}
+		kfree(fsopt->server_path);
+
+		/*
+		 * The server_path will include the whole chars from userland
+		 * including the leading '/'.
+		 */
+		fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+		if (!fsopt->server_path)
+			return -ENOMEM;
 	} else {
 		dev_name_end = dev_name + strlen(dev_name);
 	}
 
 	dev_name_end--;		/* back up to ':' separator */
 	if (dev_name_end < dev_name || *dev_name_end != ':')
-		return invalf(fc, "ceph: No path or : separator in source");
+		return invalfc(fc, "No path or : separator in source");
 
 	dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
 	if (fsopt->server_path)
 		dout("server path '%s'\n", fsopt->server_path);
 
 	ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
-				 pctx->copts, fc);
+				 pctx->copts, fc->log.log);
 	if (ret)
 		return ret;
 
@@ -269,11 +264,11 @@ static int ceph_parse_mount_param(struct fs_context *fc,
 	unsigned int mode;
 	int token, ret;
 
-	ret = ceph_parse_param(param, pctx->copts, fc);
+	ret = ceph_parse_param(param, pctx->copts, fc->log.log);
 	if (ret != -ENOPARAM)
 		return ret;
 
-	token = fs_parse(fc, &ceph_mount_parameters, param, &result);
+	token = fs_parse(fc, ceph_mount_parameters, param, &result);
 	dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
 	if (token < 0)
 		return token;
@@ -300,7 +295,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
 		break;
 	case Opt_source:
 		if (fc->source)
-			return invalf(fc, "ceph: Multiple sources specified");
+			return invalfc(fc, "Multiple sources specified");
 		return ceph_parse_source(param, fc);
 	case Opt_wsize:
 		if (result.uint_32 < PAGE_SIZE ||
@@ -391,7 +386,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
 		}
 		break;
 #else
-		return invalf(fc, "ceph: fscache support is disabled");
+		return invalfc(fc, "fscache support is disabled");
 #endif
 	case Opt_poolperm:
 		if (!result.negated)
@@ -422,7 +417,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
 			fc->sb_flags |= SB_POSIXACL;
 #else
-			return invalf(fc, "ceph: POSIX ACL support is disabled");
+			return invalfc(fc, "POSIX ACL support is disabled");
 #endif
 		} else {
 			fc->sb_flags &= ~SB_POSIXACL;
@@ -434,7 +429,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
 	return 0;
 
 out_of_range:
-	return invalf(fc, "ceph: %s out of range", param->key);
+	return invalfc(fc, "%s out of range", param->key);
 }
 
 static void destroy_mount_options(struct ceph_mount_options *args)
@@ -461,6 +456,73 @@ static int strcmp_null(const char *s1, const char *s2)
 	return strcmp(s1, s2);
 }
 
+/**
+ * path_remove_extra_slash - Remove the extra slashes in the server path
+ * @server_path: the server path and could be NULL
+ *
+ * Return NULL if the path is NULL or only consists of "/", or a string
+ * without any extra slashes including the leading slash(es) and the
+ * slash(es) at the end of the server path, such as:
+ * "//dir1////dir2///" --> "dir1/dir2"
+ */
+static char *path_remove_extra_slash(const char *server_path)
+{
+	const char *path = server_path;
+	const char *cur, *end;
+	char *buf, *p;
+	int len;
+
+	/* if the server path is omitted */
+	if (!path)
+		return NULL;
+
+	/* remove all the leading slashes */
+	while (*path == '/')
+		path++;
+
+	/* if the server path only consists of slashes */
+	if (*path == '\0')
+		return NULL;
+
+	len = strlen(path);
+
+	buf = kmalloc(len + 1, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	end = path + len;
+	p = buf;
+	do {
+		cur = strchr(path, '/');
+		if (!cur)
+			cur = end;
+
+		len = cur - path;
+
+		/* including one '/' */
+		if (cur != end)
+			len += 1;
+
+		memcpy(p, path, len);
+		p += len;
+
+		while (cur <= end && *cur == '/')
+			cur++;
+		path = cur;
+	} while (path < end);
+
+	*p = '\0';
+
+	/*
+	 * remove the last slash if there has and just to make sure that
+	 * we will get something like "dir1/dir2"
+	 */
+	if (*(--p) == '/')
+		*p = '\0';
+
+	return buf;
+}
+
 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
 				 struct ceph_options *new_opt,
 				 struct ceph_fs_client *fsc)
@@ -468,6 +530,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
 	struct ceph_mount_options *fsopt1 = new_fsopt;
 	struct ceph_mount_options *fsopt2 = fsc->mount_options;
 	int ofs = offsetof(struct ceph_mount_options, snapdir_name);
+	char *p1, *p2;
 	int ret;
 
 	ret = memcmp(fsopt1, fsopt2, ofs);
@@ -480,9 +543,21 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
 	ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
 	if (ret)
 		return ret;
-	ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
+
+	p1 = path_remove_extra_slash(fsopt1->server_path);
+	if (IS_ERR(p1))
+		return PTR_ERR(p1);
+	p2 = path_remove_extra_slash(fsopt2->server_path);
+	if (IS_ERR(p2)) {
+		kfree(p1);
+		return PTR_ERR(p2);
+	}
+	ret = strcmp_null(p1, p2);
+	kfree(p1);
+	kfree(p2);
 	if (ret)
 		return ret;
+
 	ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
 	if (ret)
 		return ret;
@@ -637,6 +712,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
 	fsc->sb = NULL;
 	fsc->mount_state = CEPH_MOUNT_MOUNTING;
 	fsc->filp_gen = 1;
+	fsc->have_copy_from2 = true;
 
 	atomic_long_set(&fsc->writeback_count, 0);
 
@@ -788,7 +864,6 @@ static void destroy_caches(void)
 	ceph_fscache_unregister();
 }
 
-
 /*
  * ceph_umount_begin - initiate forced umount.  Tear down down the
  * mount, skipping steps that may hang while waiting for server(s).
@@ -868,9 +943,6 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
 	return root;
 }
 
-
-
-
 /*
  * mount: join the ceph cluster, and open root directory.
  */
@@ -885,7 +957,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
 	mutex_lock(&fsc->client->mount_mutex);
 
 	if (!fsc->sb->s_root) {
-		const char *path;
+		const char *path, *p;
 		err = __ceph_open_session(fsc->client, started);
 		if (err < 0)
 			goto out;
@@ -897,17 +969,22 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
 				goto out;
 		}
 
-		if (!fsc->mount_options->server_path) {
-			path = "";
-			dout("mount opening path \\t\n");
-		} else {
-			path = fsc->mount_options->server_path + 1;
-			dout("mount opening path %s\n", path);
+		p = path_remove_extra_slash(fsc->mount_options->server_path);
+		if (IS_ERR(p)) {
+			err = PTR_ERR(p);
+			goto out;
 		}
+		/* if the server path is omitted or just consists of '/' */
+		if (!p)
+			path = "";
+		else
+			path = p;
+		dout("mount opening path '%s'\n", path);
 
 		ceph_fs_debugfs_init(fsc);
 
 		root = open_root_dentry(fsc, path, started);
+		kfree(p);
 		if (IS_ERR(root)) {
 			err = PTR_ERR(root);
 			goto out;
@@ -1018,7 +1095,7 @@ static int ceph_get_tree(struct fs_context *fc)
 	dout("ceph_get_tree\n");
 
 	if (!fc->source)
-		return invalf(fc, "ceph: No source");
+		return invalfc(fc, "No source");
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
 	fc->sb_flags |= SB_POSIXACL;
@@ -1070,6 +1147,11 @@ static int ceph_get_tree(struct fs_context *fc)
 	return 0;
 
 out_splat:
+	if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
+		pr_info("No mds server is up or the cluster is laggy\n");
+		err = -EHOSTUNREACH;
+	}
+
 	ceph_mdsc_close_sessions(fsc->mdsc);
 	deactivate_locked_super(sb);
 	goto out_final;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3bf1a01..1e456a9 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -106,6 +106,8 @@ struct ceph_fs_client {
 	unsigned long last_auto_reconnect;
 	bool blacklisted;
 
+	bool have_copy_from2;
+
 	u32 filp_gen;
 	loff_t max_file_size;
 
diff --git a/fs/ceph/util.c b/fs/ceph/util.c
new file mode 100644
index 0000000..2c34875
--- /dev/null
+++ b/fs/ceph/util.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Some non-inline ceph helpers
+ */
+#include <linux/module.h>
+#include <linux/ceph/types.h>
+
+/*
+ * return true if @layout appears to be valid
+ */
+int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
+{
+	__u32 su = layout->stripe_unit;
+	__u32 sc = layout->stripe_count;
+	__u32 os = layout->object_size;
+
+	/* stripe unit, object size must be non-zero, 64k increment */
+	if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
+		return 0;
+	if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
+		return 0;
+	/* object size must be a multiple of stripe unit */
+	if (os < su || os % su)
+		return 0;
+	/* stripe count must be non-zero */
+	if (!sc)
+		return 0;
+	return 1;
+}
+
+void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
+				  struct ceph_file_layout_legacy *legacy)
+{
+	fl->stripe_unit = le32_to_cpu(legacy->fl_stripe_unit);
+	fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
+	fl->object_size = le32_to_cpu(legacy->fl_object_size);
+	fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
+	if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
+	    fl->stripe_count == 0 && fl->object_size == 0)
+		fl->pool_id = -1;
+}
+
+void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
+				struct ceph_file_layout_legacy *legacy)
+{
+	legacy->fl_stripe_unit = cpu_to_le32(fl->stripe_unit);
+	legacy->fl_stripe_count = cpu_to_le32(fl->stripe_count);
+	legacy->fl_object_size = cpu_to_le32(fl->object_size);
+	if (fl->pool_id >= 0)
+		legacy->fl_pg_pool = cpu_to_le32(fl->pool_id);
+	else
+		legacy->fl_pg_pool = 0;
+}
+
+int ceph_flags_to_mode(int flags)
+{
+	int mode;
+
+#ifdef O_DIRECTORY  /* fixme */
+	if ((flags & O_DIRECTORY) == O_DIRECTORY)
+		return CEPH_FILE_MODE_PIN;
+#endif
+
+	switch (flags & O_ACCMODE) {
+	case O_WRONLY:
+		mode = CEPH_FILE_MODE_WR;
+		break;
+	case O_RDONLY:
+		mode = CEPH_FILE_MODE_RD;
+		break;
+	case O_RDWR:
+	case O_ACCMODE: /* this is what the VFS does */
+		mode = CEPH_FILE_MODE_RDWR;
+		break;
+	}
+#ifdef O_LAZY
+	if (flags & O_LAZY)
+		mode |= CEPH_FILE_MODE_LAZY;
+#endif
+
+	return mode;
+}
+
+int ceph_caps_for_mode(int mode)
+{
+	int caps = CEPH_CAP_PIN;
+
+	if (mode & CEPH_FILE_MODE_RD)
+		caps |= CEPH_CAP_FILE_SHARED |
+			CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
+	if (mode & CEPH_FILE_MODE_WR)
+		caps |= CEPH_CAP_FILE_EXCL |
+			CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
+			CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
+			CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
+	if (mode & CEPH_FILE_MODE_LAZY)
+		caps |= CEPH_CAP_FILE_LAZYIO;
+
+	return caps;
+}
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index cb18ee6..7b8a070 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -655,7 +655,7 @@ static int __build_xattrs(struct inode *inode)
 	u32 len;
 	const char *name, *val;
 	struct ceph_inode_info *ci = ceph_inode(inode);
-	int xattr_version;
+	u64 xattr_version;
 	struct ceph_inode_xattr **xattrs = NULL;
 	int err = 0;
 	int i;
@@ -851,7 +851,7 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
 	req_mask = __get_request_mask(inode);
 
 	spin_lock(&ci->i_ceph_lock);
-	dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
+	dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
 	     ci->i_xattrs.version, ci->i_xattrs.index_version);
 
 	if (ci->i_xattrs.version == 0 ||
@@ -1078,7 +1078,8 @@ int __ceph_setxattr(struct inode *inode, const char *name,
 		}
 	}
 
-	dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
+	dout("setxattr %p name '%s' issued %s\n", inode, name,
+	     ceph_cap_string(issued));
 	__build_xattrs(inode);
 
 	required_blob_size = __get_required_blob_size(ci, name_len, val_len);
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 19f6e59..276e4b5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -611,12 +611,12 @@ static int cifs_stats_proc_open(struct inode *inode, struct file *file)
 	return single_open(file, cifs_stats_proc_show, NULL);
 }
 
-static const struct file_operations cifs_stats_proc_fops = {
-	.open		= cifs_stats_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= cifs_stats_proc_write,
+static const struct proc_ops cifs_stats_proc_ops = {
+	.proc_open	= cifs_stats_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= cifs_stats_proc_write,
 };
 
 #ifdef CONFIG_CIFS_SMB_DIRECT
@@ -640,12 +640,12 @@ static int name##_open(struct inode *inode, struct file *file) \
 	return single_open(file, name##_proc_show, NULL); \
 } \
 \
-static const struct file_operations cifs_##name##_proc_fops = { \
-	.open		= name##_open, \
-	.read		= seq_read, \
-	.llseek		= seq_lseek, \
-	.release	= single_release, \
-	.write		= name##_write, \
+static const struct proc_ops cifs_##name##_proc_fops = { \
+	.proc_open	= name##_open, \
+	.proc_read	= seq_read, \
+	.proc_lseek	= seq_lseek, \
+	.proc_release	= single_release, \
+	.proc_write	= name##_write, \
 }
 
 PROC_FILE_DEFINE(rdma_readwrite_threshold);
@@ -659,11 +659,11 @@ PROC_FILE_DEFINE(smbd_receive_credit_max);
 #endif
 
 static struct proc_dir_entry *proc_fs_cifs;
-static const struct file_operations cifsFYI_proc_fops;
-static const struct file_operations cifs_lookup_cache_proc_fops;
-static const struct file_operations traceSMB_proc_fops;
-static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_linux_ext_proc_fops;
+static const struct proc_ops cifsFYI_proc_ops;
+static const struct proc_ops cifs_lookup_cache_proc_ops;
+static const struct proc_ops traceSMB_proc_ops;
+static const struct proc_ops cifs_security_flags_proc_ops;
+static const struct proc_ops cifs_linux_ext_proc_ops;
 
 void
 cifs_proc_init(void)
@@ -678,18 +678,18 @@ cifs_proc_init(void)
 	proc_create_single("open_files", 0400, proc_fs_cifs,
 			cifs_debug_files_proc_show);
 
-	proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_fops);
-	proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_fops);
-	proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_fops);
+	proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_ops);
+	proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_ops);
+	proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_ops);
 	proc_create("LinuxExtensionsEnabled", 0644, proc_fs_cifs,
-		    &cifs_linux_ext_proc_fops);
+		    &cifs_linux_ext_proc_ops);
 	proc_create("SecurityFlags", 0644, proc_fs_cifs,
-		    &cifs_security_flags_proc_fops);
+		    &cifs_security_flags_proc_ops);
 	proc_create("LookupCacheEnabled", 0644, proc_fs_cifs,
-		    &cifs_lookup_cache_proc_fops);
+		    &cifs_lookup_cache_proc_ops);
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
-	proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_fops);
+	proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_ops);
 #endif
 
 #ifdef CONFIG_CIFS_SMB_DIRECT
@@ -774,12 +774,12 @@ static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
 	return count;
 }
 
-static const struct file_operations cifsFYI_proc_fops = {
-	.open		= cifsFYI_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= cifsFYI_proc_write,
+static const struct proc_ops cifsFYI_proc_ops = {
+	.proc_open	= cifsFYI_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= cifsFYI_proc_write,
 };
 
 static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
@@ -805,12 +805,12 @@ static ssize_t cifs_linux_ext_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations cifs_linux_ext_proc_fops = {
-	.open		= cifs_linux_ext_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= cifs_linux_ext_proc_write,
+static const struct proc_ops cifs_linux_ext_proc_ops = {
+	.proc_open	= cifs_linux_ext_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= cifs_linux_ext_proc_write,
 };
 
 static int cifs_lookup_cache_proc_show(struct seq_file *m, void *v)
@@ -836,12 +836,12 @@ static ssize_t cifs_lookup_cache_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations cifs_lookup_cache_proc_fops = {
-	.open		= cifs_lookup_cache_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= cifs_lookup_cache_proc_write,
+static const struct proc_ops cifs_lookup_cache_proc_ops = {
+	.proc_open	= cifs_lookup_cache_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= cifs_lookup_cache_proc_write,
 };
 
 static int traceSMB_proc_show(struct seq_file *m, void *v)
@@ -867,12 +867,12 @@ static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
 	return count;
 }
 
-static const struct file_operations traceSMB_proc_fops = {
-	.open		= traceSMB_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= traceSMB_proc_write,
+static const struct proc_ops traceSMB_proc_ops = {
+	.proc_open	= traceSMB_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= traceSMB_proc_write,
 };
 
 static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
@@ -978,12 +978,12 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations cifs_security_flags_proc_fops = {
-	.open		= cifs_security_flags_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= cifs_security_flags_proc_write,
+static const struct proc_ops cifs_security_flags_proc_ops = {
+	.proc_open	= cifs_security_flags_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= cifs_security_flags_proc_write,
 };
 #else
 inline void cifs_proc_init(void)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 096a4c1..b87456b 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -156,5 +156,5 @@ extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.24"
+#define CIFS_VERSION   "2.25"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 9a384d1..43c1b43 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -8,6 +8,7 @@
 #include <linux/jhash.h>
 #include <linux/ktime.h>
 #include <linux/slab.h>
+#include <linux/proc_fs.h>
 #include <linux/nls.h>
 #include <linux/workqueue.h>
 #include "cifsglob.h"
@@ -211,12 +212,12 @@ static int dfscache_proc_open(struct inode *inode, struct file *file)
 	return single_open(file, dfscache_proc_show, NULL);
 }
 
-const struct file_operations dfscache_proc_fops = {
-	.open		= dfscache_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= dfscache_proc_write,
+const struct proc_ops dfscache_proc_ops = {
+	.proc_open	= dfscache_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= dfscache_proc_write,
 };
 
 #ifdef CONFIG_CIFS_DEBUG2
diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
index 76c7329..99ee44f 100644
--- a/fs/cifs/dfs_cache.h
+++ b/fs/cifs/dfs_cache.h
@@ -24,7 +24,7 @@ struct dfs_cache_tgt_iterator {
 
 extern int dfs_cache_init(void);
 extern void dfs_cache_destroy(void);
-extern const struct file_operations dfscache_proc_fops;
+extern const struct proc_ops dfscache_proc_ops;
 
 extern int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
 			  const struct nls_table *nls_codepage, int remap,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9b547f7..676e96a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -113,6 +113,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
 	}
 
 	 /* revalidate if mtime or size have changed */
+	fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
 	if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
 	    cifs_i->server_eof == fattr->cf_eof) {
 		cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
@@ -162,6 +163,9 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
 	cifs_revalidate_cache(inode, fattr);
 
 	spin_lock(&inode->i_lock);
+	fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
+	fattr->cf_atime = timestamp_truncate(fattr->cf_atime, inode);
+	fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode);
 	/* we do not want atime to be less than mtime, it broke some apps */
 	if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
 		inode->i_atime = fattr->cf_mtime;
@@ -329,8 +333,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
 	fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
 	fattr->cf_uid = cifs_sb->mnt_uid;
 	fattr->cf_gid = cifs_sb->mnt_gid;
-	ktime_get_real_ts64(&fattr->cf_mtime);
-	fattr->cf_mtime = timespec64_trunc(fattr->cf_mtime, sb->s_time_gran);
+	ktime_get_coarse_real_ts64(&fattr->cf_mtime);
 	fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
 	fattr->cf_nlink = 2;
 	fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL;
@@ -609,10 +612,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
 
 	if (info->LastAccessTime)
 		fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
-	else {
-		ktime_get_real_ts64(&fattr->cf_atime);
-		fattr->cf_atime = timespec64_trunc(fattr->cf_atime, sb->s_time_gran);
-	}
+	else
+		ktime_get_coarse_real_ts64(&fattr->cf_atime);
 
 	fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
 	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7edba3e..14f209f 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -312,7 +312,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
 		if (server->tcpStatus != CifsNeedReconnect)
 			break;
 
-		if (--retries)
+		if (retries && --retries)
 			continue;
 
 		/*
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
deleted file mode 100644
index 358ea2e..0000000
--- a/fs/compat_ioctl.c
+++ /dev/null
@@ -1,261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * Copyright (C) 1997-2000  Jakub Jelinek  (jakub@redhat.com)
- * Copyright (C) 1998  Eddie C. Dost  (ecd@skynet.be)
- * Copyright (C) 2001,2002  Andi Kleen, SuSE Labs 
- * Copyright (C) 2003       Pavel Machek (pavel@ucw.cz)
- *
- * These routines maintain argument size conversion between 32bit and 64bit
- * ioctls.
- */
-
-#include <linux/types.h>
-#include <linux/compat.h>
-#include <linux/kernel.h>
-#include <linux/capability.h>
-#include <linux/compiler.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/ioctl.h>
-#include <linux/if.h>
-#include <linux/raid/md_u.h>
-#include <linux/falloc.h>
-#include <linux/file.h>
-#include <linux/ppp-ioctl.h>
-#include <linux/if_pppox.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h>
-#include <linux/blkdev.h>
-#include <linux/serial.h>
-#include <linux/ctype.h>
-#include <linux/syscalls.h>
-#include <linux/gfp.h>
-#include <linux/cec.h>
-
-#include "internal.h"
-
-#ifdef CONFIG_BLOCK
-#include <linux/cdrom.h>
-#include <linux/fd.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_ioctl.h>
-#include <scsi/sg.h>
-#endif
-
-#include <linux/uaccess.h>
-#include <linux/watchdog.h>
-
-#include <linux/hiddev.h>
-
-
-#include <linux/sort.h>
-
-/*
- * simple reversible transform to make our table more evenly
- * distributed after sorting.
- */
-#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
-
-#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
-static unsigned int ioctl_pointer[] = {
-#ifdef CONFIG_BLOCK
-/* Big S */
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
-COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK)
-COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK)
-COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY)
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER)
-COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
-COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
-#endif
-#ifdef CONFIG_BLOCK
-/* SG stuff */
-COMPATIBLE_IOCTL(SG_IO)
-COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
-COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
-COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
-COMPATIBLE_IOCTL(SG_EMULATED_HOST)
-COMPATIBLE_IOCTL(SG_GET_TRANSFORM)
-COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE)
-COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE)
-COMPATIBLE_IOCTL(SG_GET_SCSI_ID)
-COMPATIBLE_IOCTL(SG_SET_FORCE_LOW_DMA)
-COMPATIBLE_IOCTL(SG_GET_LOW_DMA)
-COMPATIBLE_IOCTL(SG_SET_FORCE_PACK_ID)
-COMPATIBLE_IOCTL(SG_GET_PACK_ID)
-COMPATIBLE_IOCTL(SG_GET_NUM_WAITING)
-COMPATIBLE_IOCTL(SG_SET_DEBUG)
-COMPATIBLE_IOCTL(SG_GET_SG_TABLESIZE)
-COMPATIBLE_IOCTL(SG_GET_COMMAND_Q)
-COMPATIBLE_IOCTL(SG_SET_COMMAND_Q)
-COMPATIBLE_IOCTL(SG_GET_VERSION_NUM)
-COMPATIBLE_IOCTL(SG_NEXT_CMD_LEN)
-COMPATIBLE_IOCTL(SG_SCSI_RESET)
-COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
-COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN)
-COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN)
-#endif
-};
-
-/*
- * Convert common ioctl arguments based on their command number
- *
- * Please do not add any code in here. Instead, implement
- * a compat_ioctl operation in the place that handleѕ the
- * ioctl for the native case.
- */
-static long do_ioctl_trans(unsigned int cmd,
-		 unsigned long arg, struct file *file)
-{
-	return -ENOIOCTLCMD;
-}
-
-static int compat_ioctl_check_table(unsigned int xcmd)
-{
-#ifdef CONFIG_BLOCK
-	int i;
-	const int max = ARRAY_SIZE(ioctl_pointer) - 1;
-
-	BUILD_BUG_ON(max >= (1 << 16));
-
-	/* guess initial offset into table, assuming a
-	   normalized distribution */
-	i = ((xcmd >> 16) * max) >> 16;
-
-	/* do linear search up first, until greater or equal */
-	while (ioctl_pointer[i] < xcmd && i < max)
-		i++;
-
-	/* then do linear search down */
-	while (ioctl_pointer[i] > xcmd && i > 0)
-		i--;
-
-	return ioctl_pointer[i] == xcmd;
-#else
-	return 0;
-#endif
-}
-
-COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
-		       compat_ulong_t, arg32)
-{
-	unsigned long arg = arg32;
-	struct fd f = fdget(fd);
-	int error = -EBADF;
-	if (!f.file)
-		goto out;
-
-	/* RED-PEN how should LSM module know it's handling 32bit? */
-	error = security_file_ioctl(f.file, cmd, arg);
-	if (error)
-		goto out_fput;
-
-	switch (cmd) {
-	/* these are never seen by ->ioctl(), no argument or int argument */
-	case FIOCLEX:
-	case FIONCLEX:
-	case FIFREEZE:
-	case FITHAW:
-	case FICLONE:
-		goto do_ioctl;
-	/* these are never seen by ->ioctl(), pointer argument */
-	case FIONBIO:
-	case FIOASYNC:
-	case FIOQSIZE:
-	case FS_IOC_FIEMAP:
-	case FIGETBSZ:
-	case FICLONERANGE:
-	case FIDEDUPERANGE:
-		goto found_handler;
-	/*
-	 * The next group is the stuff handled inside file_ioctl().
-	 * For regular files these never reach ->ioctl(); for
-	 * devices, sockets, etc. they do and one (FIONREAD) is
-	 * even accepted in some cases.  In all those cases
-	 * argument has the same type, so we can handle these
-	 * here, shunting them towards do_vfs_ioctl().
-	 * ->compat_ioctl() will never see any of those.
-	 */
-	/* pointer argument, never actually handled by ->ioctl() */
-	case FIBMAP:
-		goto found_handler;
-	/* handled by some ->ioctl(); always a pointer to int */
-	case FIONREAD:
-		goto found_handler;
-	/* these get messy on amd64 due to alignment differences */
-#if defined(CONFIG_X86_64)
-	case FS_IOC_RESVSP_32:
-	case FS_IOC_RESVSP64_32:
-		error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
-		goto out_fput;
-	case FS_IOC_UNRESVSP_32:
-	case FS_IOC_UNRESVSP64_32:
-		error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
-				compat_ptr(arg));
-		goto out_fput;
-	case FS_IOC_ZERO_RANGE_32:
-		error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
-				compat_ptr(arg));
-		goto out_fput;
-#else
-	case FS_IOC_RESVSP:
-	case FS_IOC_RESVSP64:
-	case FS_IOC_UNRESVSP:
-	case FS_IOC_UNRESVSP64:
-	case FS_IOC_ZERO_RANGE:
-		goto found_handler;
-#endif
-
-	default:
-		if (f.file->f_op->compat_ioctl) {
-			error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
-			if (error != -ENOIOCTLCMD)
-				goto out_fput;
-		}
-
-		if (!f.file->f_op->unlocked_ioctl)
-			goto do_ioctl;
-		break;
-	}
-
-	if (compat_ioctl_check_table(XFORM(cmd)))
-		goto found_handler;
-
-	error = do_ioctl_trans(cmd, arg, f.file);
-	if (error == -ENOIOCTLCMD)
-		error = -ENOTTY;
-
-	goto out_fput;
-
- found_handler:
-	arg = (unsigned long)compat_ptr(arg);
- do_ioctl:
-	error = do_vfs_ioctl(f.file, fd, cmd, arg);
- out_fput:
-	fdput(f);
- out:
-	return error;
-}
-
-static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
-{
-	unsigned int a, b;
-	a = *(unsigned int *)p;
-	b = *(unsigned int *)q;
-	if (a > b)
-		return 1;
-	if (a < b)
-		return -1;
-	return 0;
-}
-
-static int __init init_sys32_ioctl(void)
-{
-	sort(ioctl_pointer, ARRAY_SIZE(ioctl_pointer), sizeof(*ioctl_pointer),
-		init_sys32_ioctl_cmp, NULL);
-	return 0;
-}
-__initcall(init_sys32_ioctl);
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 680aba9..fd0b5dd 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -76,14 +76,11 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
 	if (ia_valid & ATTR_GID)
 		sd_iattr->ia_gid = iattr->ia_gid;
 	if (ia_valid & ATTR_ATIME)
-		sd_iattr->ia_atime = timestamp_truncate(iattr->ia_atime,
-						      inode);
+		sd_iattr->ia_atime = iattr->ia_atime;
 	if (ia_valid & ATTR_MTIME)
-		sd_iattr->ia_mtime = timestamp_truncate(iattr->ia_mtime,
-						      inode);
+		sd_iattr->ia_mtime = iattr->ia_mtime;
 	if (ia_valid & ATTR_CTIME)
-		sd_iattr->ia_ctime = timestamp_truncate(iattr->ia_ctime,
-						      inode);
+		sd_iattr->ia_ctime = iattr->ia_ctime;
 	if (ia_valid & ATTR_MODE) {
 		umode_t mode = iattr->ia_mode;
 
diff --git a/fs/coredump.c b/fs/coredump.c
index b1ea7df..f8296a8 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -517,7 +517,7 @@ static void wait_for_dump_helpers(struct file *file)
 	pipe_lock(pipe);
 	pipe->readers++;
 	pipe->writers--;
-	wake_up_interruptible_sync(&pipe->wait);
+	wake_up_interruptible_sync(&pipe->rd_wait);
 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 	pipe_unlock(pipe);
 
@@ -525,7 +525,7 @@ static void wait_for_dump_helpers(struct file *file)
 	 * We actually want wait_event_freezable() but then we need
 	 * to clear TIF_SIGPENDING and improve dump_interrupted().
 	 */
-	wait_event_interruptible(pipe->wait, pipe->readers == 1);
+	wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
 
 	pipe_lock(pipe);
 	pipe->readers--;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 2f04024..9123086 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -534,7 +534,7 @@ static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
 		/* check for wrong endianness */
 		if (super->magic == CRAMFS_MAGIC_WEND) {
 			if (!silent)
-				errorf(fc, "cramfs: wrong endianness");
+				errorfc(fc, "wrong endianness");
 			return -EINVAL;
 		}
 
@@ -546,22 +546,22 @@ static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
 		mutex_unlock(&read_mutex);
 		if (super->magic != CRAMFS_MAGIC) {
 			if (super->magic == CRAMFS_MAGIC_WEND && !silent)
-				errorf(fc, "cramfs: wrong endianness");
+				errorfc(fc, "wrong endianness");
 			else if (!silent)
-				errorf(fc, "cramfs: wrong magic");
+				errorfc(fc, "wrong magic");
 			return -EINVAL;
 		}
 	}
 
 	/* get feature flags first */
 	if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
-		errorf(fc, "cramfs: unsupported filesystem features");
+		errorfc(fc, "unsupported filesystem features");
 		return -EINVAL;
 	}
 
 	/* Check that the root inode is in a sane state */
 	if (!S_ISDIR(super->root.mode)) {
-		errorf(fc, "cramfs: root is not a directory");
+		errorfc(fc, "root is not a directory");
 		return -EINVAL;
 	}
 	/* correct strange, hard-coded permissions of mkcramfs */
@@ -580,12 +580,12 @@ static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
 	sbi->magic = super->magic;
 	sbi->flags = super->flags;
 	if (root_offset == 0)
-		infof(fc, "cramfs: empty filesystem");
+		infofc(fc, "empty filesystem");
 	else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
 		 ((root_offset != sizeof(struct cramfs_super)) &&
 		  (root_offset != 512 + sizeof(struct cramfs_super))))
 	{
-		errorf(fc, "cramfs: bad root offset %lu", root_offset);
+		errorfc(fc, "bad root offset %lu", root_offset);
 		return -EINVAL;
 	}
 
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index dede252..634b09d 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -142,18 +142,21 @@ EXPORT_SYMBOL_GPL(debugfs_file_put);
  * We also need to exclude any file that has ways to write or alter it as root
  * can bypass the permissions check.
  */
-static bool debugfs_is_locked_down(struct inode *inode,
-				   struct file *filp,
-				   const struct file_operations *real_fops)
+static int debugfs_locked_down(struct inode *inode,
+			       struct file *filp,
+			       const struct file_operations *real_fops)
 {
 	if ((inode->i_mode & 07777) == 0444 &&
 	    !(filp->f_mode & FMODE_WRITE) &&
 	    !real_fops->unlocked_ioctl &&
 	    !real_fops->compat_ioctl &&
 	    !real_fops->mmap)
-		return false;
+		return 0;
 
-	return security_locked_down(LOCKDOWN_DEBUGFS);
+	if (security_locked_down(LOCKDOWN_DEBUGFS))
+		return -EPERM;
+
+	return 0;
 }
 
 static int open_proxy_open(struct inode *inode, struct file *filp)
@@ -168,7 +171,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
 
 	real_fops = debugfs_real_fops(filp);
 
-	r = debugfs_is_locked_down(inode, filp, real_fops);
+	r = debugfs_locked_down(inode, filp, real_fops);
 	if (r)
 		goto out;
 
@@ -298,7 +301,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
 
 	real_fops = debugfs_real_fops(filp);
 
-	r = debugfs_is_locked_down(inode, filp, real_fops);
+	r = debugfs_locked_down(inode, filp, real_fops);
 	if (r)
 		goto out;
 
@@ -496,10 +499,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n");
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.)  If an error occurs, ERR_PTR(-ERROR) will be
  * returned.
  *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
  * be returned.
  */
 struct dentry *debugfs_create_u32(const char *name, umode_t mode,
@@ -581,10 +584,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_wo, NULL, debugfs_ulong_set, "%llu\n");
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.)  If an error occurs, ERR_PTR(-ERROR) will be
  * returned.
  *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
  * be returned.
  */
 struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
@@ -846,10 +849,10 @@ static const struct file_operations fops_bool_wo = {
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.)  If an error occurs, ERR_PTR(-ERROR) will be
  * returned.
  *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
  * be returned.
  */
 struct dentry *debugfs_create_bool(const char *name, umode_t mode,
@@ -899,10 +902,10 @@ static const struct file_operations fops_blob = {
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.)  If an error occurs, ERR_PTR(-ERROR) will be
  * returned.
  *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
  * be returned.
  */
 struct dentry *debugfs_create_blob(const char *name, umode_t mode,
@@ -1091,10 +1094,10 @@ static const struct file_operations fops_regset32 = {
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.)  If an error occurs, ERR_PTR(-ERROR) will be
  * returned.
  *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
  * be returned.
  */
 struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
@@ -1158,4 +1161,3 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
 				   &debugfs_devm_entry_ops);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_devm_seqfile);
-
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index f4d8df5..e742dfc 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -332,7 +332,10 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
 		parent = debugfs_mount->mnt_root;
 
 	inode_lock(d_inode(parent));
-	dentry = lookup_one_len(name, parent, strlen(name));
+	if (unlikely(IS_DEADDIR(d_inode(parent))))
+		dentry = ERR_PTR(-ENOENT);
+	else
+		dentry = lookup_one_len(name, parent, strlen(name));
 	if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
 		if (d_is_dir(dentry))
 			pr_err("Directory '%s' with parent '%s' already present!\n",
@@ -423,7 +426,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.)  If an error occurs, ERR_PTR(-ERROR) will be
  * returned.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
@@ -502,7 +505,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.)  If an error occurs, ERR_PTR(-ERROR) will be
  * returned.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
@@ -534,7 +537,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.)  If an error occurs, ERR_PTR(-ERROR) will be
  * returned.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
@@ -627,7 +630,7 @@ EXPORT_SYMBOL(debugfs_create_automount);
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the symbolic
  * link is to be removed (no automatic cleanup happens if your module is
- * unloaded, you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR)
+ * unloaded, you are responsible here.)  If an error occurs, ERR_PTR(-ERROR)
  * will be returned.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
@@ -681,62 +684,15 @@ static void __debugfs_file_removed(struct dentry *dentry)
 		wait_for_completion(&fsd->active_users_drained);
 }
 
-static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
+static void remove_one(struct dentry *victim)
 {
-	int ret = 0;
-
-	if (simple_positive(dentry)) {
-		dget(dentry);
-		if (d_is_dir(dentry)) {
-			ret = simple_rmdir(d_inode(parent), dentry);
-			if (!ret)
-				fsnotify_rmdir(d_inode(parent), dentry);
-		} else {
-			simple_unlink(d_inode(parent), dentry);
-			fsnotify_unlink(d_inode(parent), dentry);
-		}
-		if (!ret)
-			d_delete(dentry);
-		if (d_is_reg(dentry))
-			__debugfs_file_removed(dentry);
-		dput(dentry);
-	}
-	return ret;
+        if (d_is_reg(victim))
+		__debugfs_file_removed(victim);
+	simple_release_fs(&debugfs_mount, &debugfs_mount_count);
 }
 
 /**
- * debugfs_remove - removes a file or directory from the debugfs filesystem
- * @dentry: a pointer to a the dentry of the file or directory to be
- *          removed.  If this parameter is NULL or an error value, nothing
- *          will be done.
- *
- * This function removes a file or directory in debugfs that was previously
- * created with a call to another debugfs function (like
- * debugfs_create_file() or variants thereof.)
- *
- * This function is required to be called in order for the file to be
- * removed, no automatic cleanup of files will happen when a module is
- * removed, you are responsible here.
- */
-void debugfs_remove(struct dentry *dentry)
-{
-	struct dentry *parent;
-	int ret;
-
-	if (IS_ERR_OR_NULL(dentry))
-		return;
-
-	parent = dentry->d_parent;
-	inode_lock(d_inode(parent));
-	ret = __debugfs_remove(dentry, parent);
-	inode_unlock(d_inode(parent));
-	if (!ret)
-		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-}
-EXPORT_SYMBOL_GPL(debugfs_remove);
-
-/**
- * debugfs_remove_recursive - recursively removes a directory
+ * debugfs_remove - recursively removes a directory
  * @dentry: a pointer to a the dentry of the directory to be removed.  If this
  *          parameter is NULL or an error value, nothing will be done.
  *
@@ -748,65 +704,16 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
  * removed, no automatic cleanup of files will happen when a module is
  * removed, you are responsible here.
  */
-void debugfs_remove_recursive(struct dentry *dentry)
+void debugfs_remove(struct dentry *dentry)
 {
-	struct dentry *child, *parent;
-
 	if (IS_ERR_OR_NULL(dentry))
 		return;
 
-	parent = dentry;
- down:
-	inode_lock(d_inode(parent));
- loop:
-	/*
-	 * The parent->d_subdirs is protected by the d_lock. Outside that
-	 * lock, the child can be unlinked and set to be freed which can
-	 * use the d_u.d_child as the rcu head and corrupt this list.
-	 */
-	spin_lock(&parent->d_lock);
-	list_for_each_entry(child, &parent->d_subdirs, d_child) {
-		if (!simple_positive(child))
-			continue;
-
-		/* perhaps simple_empty(child) makes more sense */
-		if (!list_empty(&child->d_subdirs)) {
-			spin_unlock(&parent->d_lock);
-			inode_unlock(d_inode(parent));
-			parent = child;
-			goto down;
-		}
-
-		spin_unlock(&parent->d_lock);
-
-		if (!__debugfs_remove(child, parent))
-			simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-
-		/*
-		 * The parent->d_lock protects agaist child from unlinking
-		 * from d_subdirs. When releasing the parent->d_lock we can
-		 * no longer trust that the next pointer is valid.
-		 * Restart the loop. We'll skip this one with the
-		 * simple_positive() check.
-		 */
-		goto loop;
-	}
-	spin_unlock(&parent->d_lock);
-
-	inode_unlock(d_inode(parent));
-	child = parent;
-	parent = parent->d_parent;
-	inode_lock(d_inode(parent));
-
-	if (child != dentry)
-		/* go up */
-		goto loop;
-
-	if (!__debugfs_remove(child, parent))
-		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-	inode_unlock(d_inode(parent));
+	simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count);
+	simple_recursive_removal(dentry, remove_one);
+	simple_release_fs(&debugfs_mount, &debugfs_mount_count);
 }
-EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
+EXPORT_SYMBOL_GPL(debugfs_remove);
 
 /**
  * debugfs_rename - rename a file/directory in the debugfs filesystem
@@ -906,4 +813,3 @@ static int __init debugfs_init(void)
 	return retval;
 }
 core_initcall(debugfs_init);
-
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 3951d39..cdfaf4f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1035,7 +1035,7 @@ static void sctp_connect_to_sock(struct connection *con)
 	int result;
 	int addr_len;
 	struct socket *sock;
-	struct timeval tv = { .tv_sec = 5, .tv_usec = 0 };
+	struct __kernel_sock_timeval tv = { .tv_sec = 5, .tv_usec = 0 };
 
 	if (con->nodeid == 0) {
 		log_print("attempt to connect sock 0 foiled");
@@ -1087,12 +1087,12 @@ static void sctp_connect_to_sock(struct connection *con)
 	 * since O_NONBLOCK argument in connect() function does not work here,
 	 * then, we should restore the default value of this attribute.
 	 */
-	kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_OLD, (char *)&tv,
+	kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv,
 			  sizeof(tv));
 	result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
 				   0);
 	memset(&tv, 0, sizeof(tv));
-	kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_OLD, (char *)&tv,
+	kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv,
 			  sizeof(tv));
 
 	if (result == -EINPROGRESS)
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index cffa0c1..019572c 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -524,16 +524,12 @@ static int ecryptfs_write_end(struct file *file,
 
 static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
 {
-	int rc = 0;
-	struct inode *inode;
-	struct inode *lower_inode;
+	struct inode *lower_inode = ecryptfs_inode_to_lower(mapping->host);
+	int ret = bmap(lower_inode, &block);
 
-	inode = (struct inode *)mapping->host;
-	lower_inode = ecryptfs_inode_to_lower(inode);
-	if (lower_inode->i_mapping->a_ops->bmap)
-		rc = lower_inode->i_mapping->a_ops->bmap(lower_inode->i_mapping,
-							 block);
-	return rc;
+	if (ret)
+		return 0;
+	return block;
 }
 
 const struct address_space_operations ecryptfs_aops = {
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 2890a67..5779a15 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
 	}
 
 	src = kmap_atomic(*rq->in);
-	if (!rq->out[0]) {
-		dst = NULL;
-	} else {
+	if (rq->out[0]) {
 		dst = kmap_atomic(rq->out[0]);
 		memcpy(dst + rq->pageofs_out, src, righthalf);
+		kunmap_atomic(dst);
 	}
 
-	if (rq->out[1] == *rq->in) {
-		memmove(src, src + righthalf, rq->pageofs_out);
-	} else if (nrpages_out == 2) {
-		if (dst)
-			kunmap_atomic(dst);
+	if (nrpages_out == 2) {
 		DBG_BUGON(!rq->out[1]);
-		dst = kmap_atomic(rq->out[1]);
-		memcpy(dst, src + righthalf, rq->pageofs_out);
+		if (rq->out[1] == *rq->in) {
+			memmove(src, src + righthalf, rq->pageofs_out);
+		} else {
+			dst = kmap_atomic(rq->out[1]);
+			memcpy(dst, src + righthalf, rq->pageofs_out);
+			kunmap_atomic(dst);
+		}
 	}
-	if (dst)
-		kunmap_atomic(dst);
 	kunmap_atomic(src);
 	return 0;
 }
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 1ed5bef..c4c6dcd 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -401,9 +401,9 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr)
 #ifdef CONFIG_EROFS_FS_ZIP
 int erofs_workgroup_put(struct erofs_workgroup *grp);
 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
-					     pgoff_t index, bool *tag);
+					     pgoff_t index);
 int erofs_register_workgroup(struct super_block *sb,
-			     struct erofs_workgroup *grp, bool tag);
+			     struct erofs_workgroup *grp);
 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
 void erofs_shrinker_register(struct super_block *sb);
 void erofs_shrinker_unregister(struct super_block *sb);
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 1e8e145..fddc505 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -59,7 +59,7 @@ static int erofs_workgroup_get(struct erofs_workgroup *grp)
 }
 
 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
-					     pgoff_t index, bool *tag)
+					     pgoff_t index)
 {
 	struct erofs_sb_info *sbi = EROFS_SB(sb);
 	struct erofs_workgroup *grp;
@@ -68,9 +68,6 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
 	rcu_read_lock();
 	grp = radix_tree_lookup(&sbi->workstn_tree, index);
 	if (grp) {
-		*tag = xa_pointer_tag(grp);
-		grp = xa_untag_pointer(grp);
-
 		if (erofs_workgroup_get(grp)) {
 			/* prefer to relax rcu read side */
 			rcu_read_unlock();
@@ -84,8 +81,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
 }
 
 int erofs_register_workgroup(struct super_block *sb,
-			     struct erofs_workgroup *grp,
-			     bool tag)
+			     struct erofs_workgroup *grp)
 {
 	struct erofs_sb_info *sbi;
 	int err;
@@ -103,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb,
 	sbi = EROFS_SB(sb);
 	xa_lock(&sbi->workstn_tree);
 
-	grp = xa_tag_pointer(grp, tag);
-
 	/*
 	 * Bump up reference count before making this workgroup
 	 * visible to other users in order to avoid potential UAF
@@ -175,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
 	 * however in order to avoid some race conditions, add a
 	 * DBG_BUGON to observe this in advance.
 	 */
-	DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
-						     grp->index)) != grp);
+	DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp);
 
 	/*
 	 * If managed cache is on, last refcount should indicate
@@ -201,7 +194,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
 				       batch, first_index, PAGEVEC_SIZE);
 
 	for (i = 0; i < found; ++i) {
-		struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
+		struct erofs_workgroup *grp = batch[i];
 
 		first_index = grp->index + 1;
 
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index 3585b84..50966f1 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -46,18 +46,19 @@ extern const struct xattr_handler erofs_xattr_security_handler;
 
 static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
 {
-static const struct xattr_handler *xattr_handler_map[] = {
-	[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
+	static const struct xattr_handler *xattr_handler_map[] = {
+		[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
 #ifdef CONFIG_EROFS_FS_POSIX_ACL
-	[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
-	[EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
-		&posix_acl_default_xattr_handler,
+		[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] =
+			&posix_acl_access_xattr_handler,
+		[EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
+			&posix_acl_default_xattr_handler,
 #endif
-	[EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
+		[EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
 #ifdef CONFIG_EROFS_FS_SECURITY
-	[EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
+		[EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
 #endif
-};
+	};
 
 	return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
 		xattr_handler_map[idx] : NULL;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index ca99425..80e47f0 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
 	struct z_erofs_pcluster *pcl;
 	struct z_erofs_collection *cl;
 	unsigned int length;
-	bool tag;
 
-	grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
+	grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
 	if (!grp)
 		return -ENOENT;
 
@@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
 	 */
 	mutex_trylock(&cl->lock);
 
-	err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
+	err = erofs_register_workgroup(inode->i_sb, &pcl->obj);
 	if (err) {
 		mutex_unlock(&cl->lock);
 		kmem_cache_free(pcluster_cachep, pcl);
@@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
 	qtail[JQ_BYPASS] = &pcl->next;
 }
 
-static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
-				       unsigned int nr_bios, bool force_fg)
-{
-	/*
-	 * although background is preferred, no one is pending for submission.
-	 * don't issue workqueue for decompression but drop it directly instead.
-	 */
-	if (force_fg || nr_bios)
-		return false;
-
-	kvfree(q[JQ_SUBMIT]);
-	return true;
-}
-
-static bool z_erofs_submit_queue(struct super_block *sb,
+static void z_erofs_submit_queue(struct super_block *sb,
 				 z_erofs_next_pcluster_t owned_head,
 				 struct list_head *pagepool,
 				 struct z_erofs_decompressqueue *fgq,
@@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
 	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
 	struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
-	struct bio *bio;
 	void *bi_private;
 	/* since bio will be NULL, no need to initialize last_index */
 	pgoff_t uninitialized_var(last_index);
-	bool force_submit = false;
-	unsigned int nr_bios;
+	unsigned int nr_bios = 0;
+	struct bio *bio = NULL;
 
-	if (owned_head == Z_EROFS_PCLUSTER_TAIL)
-		return false;
-
-	force_submit = false;
-	bio = NULL;
-	nr_bios = 0;
 	bi_private = jobqueueset_init(sb, q, fgq, force_fg);
 	qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
 	qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
@@ -1194,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb,
 
 	do {
 		struct z_erofs_pcluster *pcl;
-		unsigned int clusterpages;
-		pgoff_t first_index;
-		struct page *page;
-		unsigned int i = 0, bypass = 0;
-		int err;
+		pgoff_t cur, end;
+		unsigned int i = 0;
+		bool bypass = true;
 
 		/* no possible 'owned_head' equals the following */
 		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
@@ -1206,55 +1182,50 @@ static bool z_erofs_submit_queue(struct super_block *sb,
 
 		pcl = container_of(owned_head, struct z_erofs_pcluster, next);
 
-		clusterpages = BIT(pcl->clusterbits);
+		cur = pcl->obj.index;
+		end = cur + BIT(pcl->clusterbits);
 
 		/* close the main owned chain at first */
 		owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
 				     Z_EROFS_PCLUSTER_TAIL_CLOSED);
 
-		first_index = pcl->obj.index;
-		force_submit |= (first_index != last_index + 1);
+		do {
+			struct page *page;
+			int err;
 
-repeat:
-		page = pickup_page_for_submission(pcl, i, pagepool,
-						  MNGD_MAPPING(sbi),
-						  GFP_NOFS);
-		if (!page) {
-			force_submit = true;
-			++bypass;
-			goto skippage;
-		}
+			page = pickup_page_for_submission(pcl, i++, pagepool,
+							  MNGD_MAPPING(sbi),
+							  GFP_NOFS);
+			if (!page)
+				continue;
 
-		if (bio && force_submit) {
+			if (bio && cur != last_index + 1) {
 submit_bio_retry:
-			submit_bio(bio);
-			bio = NULL;
-		}
+				submit_bio(bio);
+				bio = NULL;
+			}
 
-		if (!bio) {
-			bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+			if (!bio) {
+				bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 
-			bio->bi_end_io = z_erofs_decompressqueue_endio;
-			bio_set_dev(bio, sb->s_bdev);
-			bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
-				LOG_SECTORS_PER_BLOCK;
-			bio->bi_private = bi_private;
-			bio->bi_opf = REQ_OP_READ;
+				bio->bi_end_io = z_erofs_decompressqueue_endio;
+				bio_set_dev(bio, sb->s_bdev);
+				bio->bi_iter.bi_sector = (sector_t)cur <<
+					LOG_SECTORS_PER_BLOCK;
+				bio->bi_private = bi_private;
+				bio->bi_opf = REQ_OP_READ;
+				++nr_bios;
+			}
 
-			++nr_bios;
-		}
+			err = bio_add_page(bio, page, PAGE_SIZE, 0);
+			if (err < PAGE_SIZE)
+				goto submit_bio_retry;
 
-		err = bio_add_page(bio, page, PAGE_SIZE, 0);
-		if (err < PAGE_SIZE)
-			goto submit_bio_retry;
+			last_index = cur;
+			bypass = false;
+		} while (++cur < end);
 
-		force_submit = false;
-		last_index = first_index + i;
-skippage:
-		if (++i < clusterpages)
-			goto repeat;
-
-		if (bypass < clusterpages)
+		if (!bypass)
 			qtail[JQ_SUBMIT] = &pcl->next;
 		else
 			move_to_bypass_jobqueue(pcl, qtail, owned_head);
@@ -1263,11 +1234,15 @@ static bool z_erofs_submit_queue(struct super_block *sb,
 	if (bio)
 		submit_bio(bio);
 
-	if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
-		return true;
-
+	/*
+	 * although background is preferred, no one is pending for submission.
+	 * don't issue workqueue for decompression but drop it directly instead.
+	 */
+	if (!*force_fg && !nr_bios) {
+		kvfree(q[JQ_SUBMIT]);
+		return;
+	}
 	z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
-	return true;
 }
 
 static void z_erofs_runqueue(struct super_block *sb,
@@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
 {
 	struct z_erofs_decompressqueue io[NR_JOBQUEUES];
 
-	if (!z_erofs_submit_queue(sb, clt->owned_head,
-				  pagepool, io, &force_fg))
+	if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
 		return;
+	z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
 
 	/* handle bypass queue (no i/o pclusters) immediately */
 	z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 8aa0ea8..78e41c7 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -24,6 +24,8 @@
 #include <linux/seq_file.h>
 #include <linux/idr.h>
 
+DEFINE_PER_CPU(int, eventfd_wake_count);
+
 static DEFINE_IDA(eventfd_ida);
 
 struct eventfd_ctx {
@@ -60,12 +62,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
 {
 	unsigned long flags;
 
+	/*
+	 * Deadlock or stack overflow issues can happen if we recurse here
+	 * through waitqueue wakeup handlers. If the caller users potentially
+	 * nested waitqueues with custom wakeup handlers, then it should
+	 * check eventfd_signal_count() before calling this function. If
+	 * it returns true, the eventfd_signal() call should be deferred to a
+	 * safe context.
+	 */
+	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+		return 0;
+
 	spin_lock_irqsave(&ctx->wqh.lock, flags);
+	this_cpu_inc(eventfd_wake_count);
 	if (ULLONG_MAX - ctx->count < n)
 		n = ULLONG_MAX - ctx->count;
 	ctx->count += n;
 	if (waitqueue_active(&ctx->wqh))
 		wake_up_locked_poll(&ctx->wqh, EPOLLIN);
+	this_cpu_dec(eventfd_wake_count);
 	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
 	return n;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 67a39503..b041b66 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -354,12 +354,6 @@ static inline struct epitem *ep_item_from_epqueue(poll_table *p)
 	return container_of(p, struct ep_pqueue, pt)->epi;
 }
 
-/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
-static inline int ep_op_has_event(int op)
-{
-	return op != EPOLL_CTL_DEL;
-}
-
 /* Initialize the poll safe wake up structure */
 static void ep_nested_calls_init(struct nested_calls *ncalls)
 {
@@ -2074,27 +2068,28 @@ SYSCALL_DEFINE1(epoll_create, int, size)
 	return do_epoll_create(0);
 }
 
-/*
- * The following function implements the controller interface for
- * the eventpoll file that enables the insertion/removal/change of
- * file descriptors inside the interest set.
- */
-SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
-		struct epoll_event __user *, event)
+static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
+				   bool nonblock)
+{
+	if (!nonblock) {
+		mutex_lock_nested(mutex, depth);
+		return 0;
+	}
+	if (mutex_trylock(mutex))
+		return 0;
+	return -EAGAIN;
+}
+
+int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
+		 bool nonblock)
 {
 	int error;
 	int full_check = 0;
 	struct fd f, tf;
 	struct eventpoll *ep;
 	struct epitem *epi;
-	struct epoll_event epds;
 	struct eventpoll *tep = NULL;
 
-	error = -EFAULT;
-	if (ep_op_has_event(op) &&
-	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
-		goto error_return;
-
 	error = -EBADF;
 	f = fdget(epfd);
 	if (!f.file)
@@ -2112,7 +2107,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
 
 	/* Check if EPOLLWAKEUP is allowed */
 	if (ep_op_has_event(op))
-		ep_take_care_of_epollwakeup(&epds);
+		ep_take_care_of_epollwakeup(epds);
 
 	/*
 	 * We have to check that the file structure underneath the file descriptor
@@ -2128,11 +2123,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
 	 * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
 	 * Also, we do not currently supported nested exclusive wakeups.
 	 */
-	if (ep_op_has_event(op) && (epds.events & EPOLLEXCLUSIVE)) {
+	if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
 		if (op == EPOLL_CTL_MOD)
 			goto error_tgt_fput;
 		if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
-				(epds.events & ~EPOLLEXCLUSIVE_OK_BITS)))
+				(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
 			goto error_tgt_fput;
 	}
 
@@ -2157,13 +2152,17 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
 	 * deep wakeup paths from forming in parallel through multiple
 	 * EPOLL_CTL_ADD operations.
 	 */
-	mutex_lock_nested(&ep->mtx, 0);
+	error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
+	if (error)
+		goto error_tgt_fput;
 	if (op == EPOLL_CTL_ADD) {
 		if (!list_empty(&f.file->f_ep_links) ||
 						is_file_epoll(tf.file)) {
-			full_check = 1;
 			mutex_unlock(&ep->mtx);
-			mutex_lock(&epmutex);
+			error = epoll_mutex_lock(&epmutex, 0, nonblock);
+			if (error)
+				goto error_tgt_fput;
+			full_check = 1;
 			if (is_file_epoll(tf.file)) {
 				error = -ELOOP;
 				if (ep_loop_check(ep, tf.file) != 0) {
@@ -2173,10 +2172,19 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
 			} else
 				list_add(&tf.file->f_tfile_llink,
 							&tfile_check_list);
-			mutex_lock_nested(&ep->mtx, 0);
+			error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
+			if (error) {
+out_del:
+				list_del(&tf.file->f_tfile_llink);
+				goto error_tgt_fput;
+			}
 			if (is_file_epoll(tf.file)) {
 				tep = tf.file->private_data;
-				mutex_lock_nested(&tep->mtx, 1);
+				error = epoll_mutex_lock(&tep->mtx, 1, nonblock);
+				if (error) {
+					mutex_unlock(&ep->mtx);
+					goto out_del;
+				}
 			}
 		}
 	}
@@ -2192,8 +2200,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
 	switch (op) {
 	case EPOLL_CTL_ADD:
 		if (!epi) {
-			epds.events |= EPOLLERR | EPOLLHUP;
-			error = ep_insert(ep, &epds, tf.file, fd, full_check);
+			epds->events |= EPOLLERR | EPOLLHUP;
+			error = ep_insert(ep, epds, tf.file, fd, full_check);
 		} else
 			error = -EEXIST;
 		if (full_check)
@@ -2208,8 +2216,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
 	case EPOLL_CTL_MOD:
 		if (epi) {
 			if (!(epi->event.events & EPOLLEXCLUSIVE)) {
-				epds.events |= EPOLLERR | EPOLLHUP;
-				error = ep_modify(ep, epi, &epds);
+				epds->events |= EPOLLERR | EPOLLHUP;
+				error = ep_modify(ep, epi, epds);
 			}
 		} else
 			error = -ENOENT;
@@ -2232,6 +2240,23 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
 }
 
 /*
+ * The following function implements the controller interface for
+ * the eventpoll file that enables the insertion/removal/change of
+ * file descriptors inside the interest set.
+ */
+SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+		struct epoll_event __user *, event)
+{
+	struct epoll_event epds;
+
+	if (ep_op_has_event(op) &&
+	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
+		return -EFAULT;
+
+	return do_epoll_ctl(epfd, op, fd, &epds, false);
+}
+
+/*
  * Implement the event wait interface for the eventpoll file. It is the kernel
  * part of the user space epoll_wait(2).
  */
diff --git a/fs/exec.c b/fs/exec.c
index 74d88da..db17be5 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -272,7 +272,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
 		goto err;
 
 	mm->stack_vm = mm->total_vm = 1;
-	arch_bprm_mm_init(mm, vma);
 	up_write(&mm->mmap_sem);
 	bprm->p = vma->vm_end - sizeof(void *);
 	return 0;
@@ -761,6 +760,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
 		goto out_unlock;
 	BUG_ON(prev != vma);
 
+	if (unlikely(vm_flags & VM_EXEC)) {
+		pr_warn_once("process '%pD4' started with executable stack\n",
+			     bprm->file);
+	}
+
 	/* Move stack pages down in memory. */
 	if (stack_shift) {
 		ret = shift_arg_pages(vma, stack_shift);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index bcffe25..4a4ab68 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1073,9 +1073,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
 
 	if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
 		goto cantfind_ext2;
- 	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
- 				le32_to_cpu(es->s_first_data_block) - 1)
- 					/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
+	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
+				le32_to_cpu(es->s_first_data_block) - 1)
+					/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
 	db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
 		   EXT2_DESC_PER_BLOCK(sb);
 	sbi->s_group_desc = kmalloc_array (db_count,
@@ -1138,6 +1138,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
 				ext2_count_dirs(sb), GFP_KERNEL);
 	}
 	if (err) {
+		ret = err;
 		ext2_msg(sb, KERN_ERR, "error: insufficient memory");
 		goto failed_mount3;
 	}
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index db9bfa0..2a592e3 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -4,12 +4,7 @@
 # kernels after the removal of ext3 driver.
 config EXT3_FS
 	tristate "The Extended 3 (ext3) filesystem"
-	# These must match EXT4_FS selects...
 	select EXT4_FS
-	select JBD2
-	select CRC16
-	select CRYPTO
-	select CRYPTO_CRC32C
 	help
 	  This config option is here only for backward compatibility. ext3
 	  filesystem is now handled by the ext4 driver.
@@ -33,7 +28,6 @@
 
 config EXT4_FS
 	tristate "The Extended 4 (ext4) filesystem"
-	# Please update EXT3_FS selects when changing these
 	select JBD2
 	select CRC16
 	select CRYPTO
@@ -109,7 +103,7 @@
 		echo 1 > /sys/module/ext4/parameters/mballoc_debug
 
 config EXT4_KUNIT_TESTS
-	bool "KUnit tests for ext4"
+	tristate "KUnit tests for ext4"
 	select EXT4_FS
 	depends on KUNIT
 	help
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 840b91d..4ccb3c9 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -13,5 +13,6 @@
 
 ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
 ext4-$(CONFIG_EXT4_FS_SECURITY)		+= xattr_security.o
-ext4-$(CONFIG_EXT4_KUNIT_TESTS)		+= inode-test.o
+ext4-inode-test-objs			+= inode-test.o
+obj-$(CONFIG_EXT4_KUNIT_TESTS)		+= ext4-inode-test.o
 ext4-$(CONFIG_FS_VERITY)		+= verity.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 0b202e0..5f993a4 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -371,7 +371,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
 	if (buffer_verified(bh))
 		goto verified;
 	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
-			desc, bh))) {
+						    desc, bh) ||
+		     ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_CRC))) {
 		ext4_unlock_group(sb, block_group);
 		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
 		ext4_mark_group_bitmap_corrupted(sb, block_group,
@@ -505,7 +506,9 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
 	if (!desc)
 		return -EFSCORRUPTED;
 	wait_on_buffer(bh);
+	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO);
 	if (!buffer_uptodate(bh)) {
+		ext4_set_errno(sb, EIO);
 		ext4_error(sb, "Cannot read block bitmap - "
 			   "block_group = %u, block_bitmap = %llu",
 			   block_group, (unsigned long long) bh->b_blocknr);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 4e09327..1f34074 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -462,7 +462,6 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
 	new_fn->name_len = ent_name->len;
 	new_fn->file_type = dirent->file_type;
 	memcpy(new_fn->name, ent_name->name, ent_name->len);
-	new_fn->name[ent_name->len] = 0;
 
 	while (*p) {
 		parent = *p;
@@ -672,9 +671,11 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len,
 			  const char *str, const struct qstr *name)
 {
 	struct qstr qstr = {.name = str, .len = len };
-	struct inode *inode = dentry->d_parent->d_inode;
+	const struct dentry *parent = READ_ONCE(dentry->d_parent);
+	const struct inode *inode = READ_ONCE(parent->d_inode);
 
-	if (!IS_CASEFOLDED(inode) || !EXT4_SB(inode->i_sb)->s_encoding) {
+	if (!inode || !IS_CASEFOLDED(inode) ||
+	    !EXT4_SB(inode->i_sb)->s_encoding) {
 		if (len != name->len)
 			return -1;
 		return memcmp(str, name->name, len);
@@ -687,10 +688,11 @@ static int ext4_d_hash(const struct dentry *dentry, struct qstr *str)
 {
 	const struct ext4_sb_info *sbi = EXT4_SB(dentry->d_sb);
 	const struct unicode_map *um = sbi->s_encoding;
+	const struct inode *inode = READ_ONCE(dentry->d_inode);
 	unsigned char *norm;
 	int len, ret = 0;
 
-	if (!IS_CASEFOLDED(dentry->d_inode) || !um)
+	if (!inode || !IS_CASEFOLDED(inode) || !um)
 		return 0;
 
 	norm = kmalloc(PATH_MAX, GFP_ATOMIC);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f8578ca..9a2ee24 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1052,8 +1052,6 @@ struct ext4_inode_info {
 	/* allocation reservation info for delalloc */
 	/* In case of bigalloc, this refer to clusters rather than blocks */
 	unsigned int i_reserved_data_blocks;
-	ext4_lblk_t i_da_metadata_calc_last_lblock;
-	int i_da_metadata_calc_len;
 
 	/* pending cluster reservations for bigalloc file systems */
 	struct ext4_pending_tree i_pending_tree;
@@ -1343,7 +1341,8 @@ struct ext4_super_block {
 	__u8	s_lastcheck_hi;
 	__u8	s_first_error_time_hi;
 	__u8	s_last_error_time_hi;
-	__u8	s_pad[2];
+	__u8	s_first_error_errcode;
+	__u8    s_last_error_errcode;
 	__le16  s_encoding;		/* Filename charset encoding */
 	__le16  s_encoding_flags;	/* Filename charset encoding flags */
 	__le32	s_reserved[95];		/* Padding to the end of the block */
@@ -1556,6 +1555,9 @@ struct ext4_sb_info {
 	/* Barrier between changing inodes' journal flags and writepages ops. */
 	struct percpu_rw_semaphore s_journal_flag_rwsem;
 	struct dax_device *s_daxdev;
+#ifdef CONFIG_EXT4_DEBUG
+	unsigned long s_simulate_fail;
+#endif
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1575,6 +1577,66 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
 }
 
 /*
+ * Simulate_fail codes
+ */
+#define EXT4_SIM_BBITMAP_EIO	1
+#define EXT4_SIM_BBITMAP_CRC	2
+#define EXT4_SIM_IBITMAP_EIO	3
+#define EXT4_SIM_IBITMAP_CRC	4
+#define EXT4_SIM_INODE_EIO	5
+#define EXT4_SIM_INODE_CRC	6
+#define EXT4_SIM_DIRBLOCK_EIO	7
+#define EXT4_SIM_DIRBLOCK_CRC	8
+
+static inline bool ext4_simulate_fail(struct super_block *sb,
+				     unsigned long code)
+{
+#ifdef CONFIG_EXT4_DEBUG
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+	if (unlikely(sbi->s_simulate_fail == code)) {
+		sbi->s_simulate_fail = 0;
+		return true;
+	}
+#endif
+	return false;
+}
+
+static inline void ext4_simulate_fail_bh(struct super_block *sb,
+					 struct buffer_head *bh,
+					 unsigned long code)
+{
+	if (!IS_ERR(bh) && ext4_simulate_fail(sb, code))
+		clear_buffer_uptodate(bh);
+}
+
+/*
+ * Error number codes for s_{first,last}_error_errno
+ *
+ * Linux errno numbers are architecture specific, so we need to translate
+ * them into something which is architecture independent.   We don't define
+ * codes for all errno's; just the ones which are most likely to be the cause
+ * of an ext4_error() call.
+ */
+#define EXT4_ERR_UNKNOWN	 1
+#define EXT4_ERR_EIO		 2
+#define EXT4_ERR_ENOMEM		 3
+#define EXT4_ERR_EFSBADCRC	 4
+#define EXT4_ERR_EFSCORRUPTED	 5
+#define EXT4_ERR_ENOSPC		 6
+#define EXT4_ERR_ENOKEY		 7
+#define EXT4_ERR_EROFS		 8
+#define EXT4_ERR_EFBIG		 9
+#define EXT4_ERR_EEXIST		10
+#define EXT4_ERR_ERANGE		11
+#define EXT4_ERR_EOVERFLOW	12
+#define EXT4_ERR_EBUSY		13
+#define EXT4_ERR_ENOTDIR	14
+#define EXT4_ERR_ENOTEMPTY	15
+#define EXT4_ERR_ESHUTDOWN	16
+#define EXT4_ERR_EFAULT		17
+
+/*
  * Inode dynamic state flags
  */
 enum {
@@ -2628,7 +2690,6 @@ extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
 /* indirect.c */
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
 				struct ext4_map_blocks *map, int flags);
-extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
 extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
@@ -2679,8 +2740,6 @@ extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
 extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
 extern int ext4_calculate_overhead(struct super_block *sb);
 extern void ext4_superblock_csum_set(struct super_block *sb);
-extern void *ext4_kvmalloc(size_t size, gfp_t flags);
-extern void *ext4_kvzalloc(size_t size, gfp_t flags);
 extern int ext4_alloc_flex_bg_array(struct super_block *sb,
 				    ext4_group_t ngroup);
 extern const char *ext4_decode_error(struct super_block *sb, int errno,
@@ -2688,6 +2747,7 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno,
 extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
 					     ext4_group_t block_group,
 					     unsigned int flags);
+extern void ext4_set_errno(struct super_block *sb, int err);
 
 extern __printf(4, 5)
 void __ext4_error(struct super_block *, const char *, unsigned int,
@@ -3254,7 +3314,6 @@ struct ext4_extent;
 #define EXT_MAX_BLOCKS	0xffffffff
 
 extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
-extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
 extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
 extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
 			       struct ext4_map_blocks *map, int flags);
@@ -3271,14 +3330,9 @@ extern int ext4_convert_unwritten_io_end_vec(handle_t *handle,
 					     ext4_io_end_t *io_end);
 extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
 			   struct ext4_map_blocks *map, int flags);
-extern int ext4_ext_calc_metadata_amount(struct inode *inode,
-					 ext4_lblk_t lblocks);
 extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
 						   int num,
 						   struct ext4_ext_path *path);
-extern int ext4_can_extents_be_merged(struct inode *inode,
-				      struct ext4_extent *ex1,
-				      struct ext4_extent *ex2);
 extern int ext4_ext_insert_extent(handle_t *, struct inode *,
 				  struct ext4_ext_path **,
 				  struct ext4_extent *, int);
@@ -3294,8 +3348,6 @@ extern int ext4_get_es_cache(struct inode *inode,
 			     struct fiemap_extent_info *fieinfo,
 			     __u64 start, __u64 len);
 extern int ext4_ext_precache(struct inode *inode);
-extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
-extern int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
 extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
 				struct inode *inode2, ext4_lblk_t lblk1,
 			     ext4_lblk_t lblk2,  ext4_lblk_t count,
@@ -3390,6 +3442,7 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
 }
 
 extern const struct iomap_ops ext4_iomap_ops;
+extern const struct iomap_ops ext4_iomap_overwrite_ops;
 extern const struct iomap_ops ext4_iomap_report_ops;
 
 static inline int ext4_buffer_uptodate(struct buffer_head *bh)
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 98bd0e9..1c216fc 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -267,10 +267,5 @@ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
 				     0xffff);
 }
 
-#define ext4_ext_dirty(handle, inode, path) \
-		__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
-int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
-		     struct inode *inode, struct ext4_ext_path *path);
-
 #endif /* _EXT4_EXTENTS */
 
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index d3b8cde..1f53d64 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -7,6 +7,28 @@
 
 #include <trace/events/ext4.h>
 
+int ext4_inode_journal_mode(struct inode *inode)
+{
+	if (EXT4_JOURNAL(inode) == NULL)
+		return EXT4_INODE_WRITEBACK_DATA_MODE;	/* writeback */
+	/* We do not support data journalling with delayed allocation */
+	if (!S_ISREG(inode->i_mode) ||
+	    ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
+	    test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
+	    (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
+	    !test_opt(inode->i_sb, DELALLOC))) {
+		/* We do not support data journalling for encrypted data */
+		if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
+			return EXT4_INODE_ORDERED_DATA_MODE;  /* ordered */
+		return EXT4_INODE_JOURNAL_DATA_MODE;	/* journal data */
+	}
+	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+		return EXT4_INODE_ORDERED_DATA_MODE;	/* ordered */
+	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
+		return EXT4_INODE_WRITEBACK_DATA_MODE;	/* writeback */
+	BUG();
+}
+
 /* Just increment the non-pointer handle value */
 static handle_t *ext4_get_nojournal(void)
 {
@@ -58,6 +80,7 @@ static int ext4_journal_check_start(struct super_block *sb)
 	 * take the FS itself readonly cleanly.
 	 */
 	if (journal && is_journal_aborted(journal)) {
+		ext4_set_errno(sb, -journal->j_errno);
 		ext4_abort(sb, "Detected aborted journal");
 		return -EROFS;
 	}
@@ -249,6 +272,7 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
 	if (err) {
 		ext4_journal_abort_handle(where, line, __func__,
 					  bh, handle, err);
+		ext4_set_errno(inode->i_sb, -err);
 		__ext4_abort(inode->i_sb, where, line,
 			   "error %d when attempting revoke", err);
 	}
@@ -320,6 +344,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
 				es = EXT4_SB(inode->i_sb)->s_es;
 				es->s_last_error_block =
 					cpu_to_le64(bh->b_blocknr);
+				ext4_set_errno(inode->i_sb, EIO);
 				ext4_error_inode(inode, where, line,
 						 bh->b_blocknr,
 					"IO error syncing itable block");
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index a6b9b66..7ea4f6f 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -463,27 +463,7 @@ int ext4_force_commit(struct super_block *sb);
 #define EXT4_INODE_ORDERED_DATA_MODE	0x02 /* ordered data mode */
 #define EXT4_INODE_WRITEBACK_DATA_MODE	0x04 /* writeback data mode */
 
-static inline int ext4_inode_journal_mode(struct inode *inode)
-{
-	if (EXT4_JOURNAL(inode) == NULL)
-		return EXT4_INODE_WRITEBACK_DATA_MODE;	/* writeback */
-	/* We do not support data journalling with delayed allocation */
-	if (!S_ISREG(inode->i_mode) ||
-	    ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
-	    test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
-	    (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
-	    !test_opt(inode->i_sb, DELALLOC))) {
-		/* We do not support data journalling for encrypted data */
-		if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
-			return EXT4_INODE_ORDERED_DATA_MODE;  /* ordered */
-		return EXT4_INODE_JOURNAL_DATA_MODE;	/* journal data */
-	}
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
-		return EXT4_INODE_ORDERED_DATA_MODE;	/* ordered */
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
-		return EXT4_INODE_WRITEBACK_DATA_MODE;	/* writeback */
-	BUG();
-}
+int ext4_inode_journal_mode(struct inode *inode);
 
 static inline int ext4_should_journal_data(struct inode *inode)
 {
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0e8708b..954013d 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -161,8 +161,9 @@ static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
  *  - ENOMEM
  *  - EIO
  */
-int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
-		     struct inode *inode, struct ext4_ext_path *path)
+static int __ext4_ext_dirty(const char *where, unsigned int line,
+			    handle_t *handle, struct inode *inode,
+			    struct ext4_ext_path *path)
 {
 	int err;
 
@@ -179,6 +180,9 @@ int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
 	return err;
 }
 
+#define ext4_ext_dirty(handle, inode, path) \
+		__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
+
 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
 			      struct ext4_ext_path *path,
 			      ext4_lblk_t block)
@@ -309,53 +313,6 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
 			(nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
 }
 
-/*
- * Calculate the number of metadata blocks needed
- * to allocate @blocks
- * Worse case is one block per extent
- */
-int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
-{
-	struct ext4_inode_info *ei = EXT4_I(inode);
-	int idxs;
-
-	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
-		/ sizeof(struct ext4_extent_idx));
-
-	/*
-	 * If the new delayed allocation block is contiguous with the
-	 * previous da block, it can share index blocks with the
-	 * previous block, so we only need to allocate a new index
-	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
-	 * an additional index block, and at ldxs**3 blocks, yet
-	 * another index blocks.
-	 */
-	if (ei->i_da_metadata_calc_len &&
-	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
-		int num = 0;
-
-		if ((ei->i_da_metadata_calc_len % idxs) == 0)
-			num++;
-		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
-			num++;
-		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
-			num++;
-			ei->i_da_metadata_calc_len = 0;
-		} else
-			ei->i_da_metadata_calc_len++;
-		ei->i_da_metadata_calc_last_lblock++;
-		return num;
-	}
-
-	/*
-	 * In the worst case we need a new set of index blocks at
-	 * every level of the inode's extent tree.
-	 */
-	ei->i_da_metadata_calc_len = 1;
-	ei->i_da_metadata_calc_last_lblock = lblock;
-	return ext_depth(inode) + 1;
-}
-
 static int
 ext4_ext_max_entries(struct inode *inode, int depth)
 {
@@ -492,6 +449,7 @@ static int __ext4_ext_check(const char *function, unsigned int line,
 	return 0;
 
 corrupted:
+	ext4_set_errno(inode->i_sb, -err);
 	ext4_error_inode(inode, function, line, 0,
 			 "pblk %llu bad header/extent: %s - magic %x, "
 			 "entries %u, max %u(%u), depth %u(%u)",
@@ -510,6 +468,30 @@ int ext4_ext_check_inode(struct inode *inode)
 	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
 }
 
+static void ext4_cache_extents(struct inode *inode,
+			       struct ext4_extent_header *eh)
+{
+	struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
+	ext4_lblk_t prev = 0;
+	int i;
+
+	for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
+		unsigned int status = EXTENT_STATUS_WRITTEN;
+		ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
+		int len = ext4_ext_get_actual_len(ex);
+
+		if (prev && (prev != lblk))
+			ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
+					     EXTENT_STATUS_HOLE);
+
+		if (ext4_ext_is_unwritten(ex))
+			status = EXTENT_STATUS_UNWRITTEN;
+		ext4_es_cache_extent(inode, lblk, len,
+				     ext4_ext_pblock(ex), status);
+		prev = lblk + len;
+	}
+}
+
 static struct buffer_head *
 __read_extent_tree_block(const char *function, unsigned int line,
 			 struct inode *inode, ext4_fsblk_t pblk, int depth,
@@ -544,26 +526,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
 	 */
 	if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
 		struct ext4_extent_header *eh = ext_block_hdr(bh);
-		struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
-		ext4_lblk_t prev = 0;
-		int i;
-
-		for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
-			unsigned int status = EXTENT_STATUS_WRITTEN;
-			ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
-			int len = ext4_ext_get_actual_len(ex);
-
-			if (prev && (prev != lblk))
-				ext4_es_cache_extent(inode, prev,
-						     lblk - prev, ~0,
-						     EXTENT_STATUS_HOLE);
-
-			if (ext4_ext_is_unwritten(ex))
-				status = EXTENT_STATUS_UNWRITTEN;
-			ext4_es_cache_extent(inode, lblk, len,
-					     ext4_ext_pblock(ex), status);
-			prev = lblk + len;
-		}
+		ext4_cache_extents(inode, eh);
 	}
 	return bh;
 errout:
@@ -649,8 +612,9 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
 	ext_debug("path:");
 	for (k = 0; k <= l; k++, path++) {
 		if (path->p_idx) {
-		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
-			    ext4_idx_pblock(path->p_idx));
+			ext_debug("  %d->%llu",
+				  le32_to_cpu(path->p_idx->ei_block),
+				  ext4_idx_pblock(path->p_idx));
 		} else if (path->p_ext) {
 			ext_debug("  %d:[%d]%d:%llu ",
 				  le32_to_cpu(path->p_ext->ee_block),
@@ -731,11 +695,12 @@ void ext4_ext_drop_refs(struct ext4_ext_path *path)
 	if (!path)
 		return;
 	depth = path->p_depth;
-	for (i = 0; i <= depth; i++, path++)
+	for (i = 0; i <= depth; i++, path++) {
 		if (path->p_bh) {
 			brelse(path->p_bh);
 			path->p_bh = NULL;
 		}
+	}
 }
 
 /*
@@ -777,8 +742,8 @@ ext4_ext_binsearch_idx(struct inode *inode,
 
 		chix = ix = EXT_FIRST_INDEX(eh);
 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
-		  if (k != 0 &&
-		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
+			if (k != 0 && le32_to_cpu(ix->ei_block) <=
+			    le32_to_cpu(ix[-1].ei_block)) {
 				printk(KERN_DEBUG "k=%d, ix=0x%p, "
 				       "first=0x%p\n", k,
 				       ix, EXT_FIRST_INDEX(eh));
@@ -911,6 +876,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
 	path[0].p_bh = NULL;
 
 	i = depth;
+	if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
+		ext4_cache_extents(inode, eh);
 	/* walk through the tree */
 	while (i) {
 		ext_debug("depth %d: num %d, max %d\n",
@@ -1632,17 +1599,16 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
 		return EXT_MAX_BLOCKS;
 
 	while (depth >= 0) {
+		struct ext4_ext_path *p = &path[depth];
+
 		if (depth == path->p_depth) {
 			/* leaf */
-			if (path[depth].p_ext &&
-				path[depth].p_ext !=
-					EXT_LAST_EXTENT(path[depth].p_hdr))
-			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
+			if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
+				return le32_to_cpu(p->p_ext[1].ee_block);
 		} else {
 			/* index */
-			if (path[depth].p_idx !=
-					EXT_LAST_INDEX(path[depth].p_hdr))
-			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
+			if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
+				return le32_to_cpu(p->p_idx[1].ei_block);
 		}
 		depth--;
 	}
@@ -1742,9 +1708,9 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
 	return err;
 }
 
-int
-ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
-				struct ext4_extent *ex2)
+static int ext4_can_extents_be_merged(struct inode *inode,
+				      struct ext4_extent *ex1,
+				      struct ext4_extent *ex2)
 {
 	unsigned short ext1_ee_len, ext2_ee_len;
 
@@ -1758,11 +1724,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
 			le32_to_cpu(ex2->ee_block))
 		return 0;
 
-	/*
-	 * To allow future support for preallocated extents to be added
-	 * as an RO_COMPAT feature, refuse to merge to extents if
-	 * this can result in the top bit of ee_len being set.
-	 */
 	if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
 		return 0;
 
@@ -1870,13 +1831,14 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
 }
 
 /*
- * This function tries to merge the @ex extent to neighbours in the tree.
- * return 1 if merge left else 0.
+ * This function tries to merge the @ex extent to neighbours in the tree, then
+ * tries to collapse the extent tree into the inode.
  */
 static void ext4_ext_try_to_merge(handle_t *handle,
 				  struct inode *inode,
 				  struct ext4_ext_path *path,
-				  struct ext4_extent *ex) {
+				  struct ext4_extent *ex)
+{
 	struct ext4_extent_header *eh;
 	unsigned int depth;
 	int merge_done = 0;
@@ -3718,9 +3680,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
 		max_zeroout = sbi->s_extent_max_zeroout_kb >>
 			(inode->i_sb->s_blocksize_bits - 10);
 
-	if (IS_ENCRYPTED(inode))
-		max_zeroout = 0;
-
 	/*
 	 * five cases:
 	 * 1. split the extent into three extents.
@@ -4706,6 +4665,10 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
 	return ret > 0 ? ret2 : ret;
 }
 
+static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
+
+static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
+
 static long ext4_zero_range(struct file *file, loff_t offset,
 			    loff_t len, int mode)
 {
@@ -4723,9 +4686,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
 	trace_ext4_zero_range(inode, offset, len, mode);
 
-	if (!S_ISREG(inode->i_mode))
-		return -EINVAL;
-
 	/* Call ext4_force_commit to flush all data in case of data=journal. */
 	if (ext4_should_journal_data(inode)) {
 		ret = ext4_force_commit(inode->i_sb);
@@ -4765,7 +4725,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 	}
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	    (offset + len > i_size_read(inode) ||
+	    (offset + len > inode->i_size ||
 	     offset + len > EXT4_I(inode)->i_disksize)) {
 		new_size = offset + len;
 		ret = inode_newsize_ok(inode, new_size);
@@ -4849,7 +4809,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 		* Mark that we allocate beyond EOF so the subsequent truncate
 		* can proceed even if the new size is the same as i_size.
 		*/
-		if ((offset + len) > i_size_read(inode))
+		if (offset + len > inode->i_size)
 			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
 	}
 	ext4_mark_inode_dirty(handle, inode);
@@ -4890,14 +4850,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 	 * range since we would need to re-encrypt blocks with a
 	 * different IV or XTS tweak (which are based on the logical
 	 * block number).
-	 *
-	 * XXX It's not clear why zero range isn't working, but we'll
-	 * leave it disabled for encrypted inodes for now.  This is a
-	 * bug we should fix....
 	 */
 	if (IS_ENCRYPTED(inode) &&
-	    (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
-		     FALLOC_FL_ZERO_RANGE)))
+	    (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
 		return -EOPNOTSUPP;
 
 	/* Return error if mode is not supported */
@@ -4941,7 +4896,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 	}
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	    (offset + len > i_size_read(inode) ||
+	    (offset + len > inode->i_size ||
 	     offset + len > EXT4_I(inode)->i_disksize)) {
 		new_size = offset + len;
 		ret = inode_newsize_ok(inode, new_size);
@@ -5268,7 +5223,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
 {
 	int depth, err = 0;
 	struct ext4_extent *ex_start, *ex_last;
-	bool update = 0;
+	bool update = false;
 	depth = path->p_depth;
 
 	while (depth >= 0) {
@@ -5284,7 +5239,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
 				goto out;
 
 			if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
-				update = 1;
+				update = true;
 
 			while (ex_start <= ex_last) {
 				if (SHIFT == SHIFT_LEFT) {
@@ -5472,7 +5427,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
  * This implements the fallocate's collapse range functionality for ext4
  * Returns: 0 and non-zero on error.
  */
-int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 {
 	struct super_block *sb = inode->i_sb;
 	ext4_lblk_t punch_start, punch_stop;
@@ -5489,12 +5444,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 		return -EOPNOTSUPP;
 
-	/* Collapse range works only on fs block size aligned offsets. */
-	if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
-	    len & (EXT4_CLUSTER_SIZE(sb) - 1))
-		return -EINVAL;
-
-	if (!S_ISREG(inode->i_mode))
+	/* Collapse range works only on fs cluster size aligned regions. */
+	if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
 		return -EINVAL;
 
 	trace_ext4_collapse_range(inode, offset, len);
@@ -5514,7 +5465,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 	 * There is no need to overlap collapse range with EOF, in which case
 	 * it is effectively a truncate operation
 	 */
-	if (offset + len >= i_size_read(inode)) {
+	if (offset + len >= inode->i_size) {
 		ret = -EINVAL;
 		goto out_mutex;
 	}
@@ -5592,7 +5543,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 		goto out_stop;
 	}
 
-	new_size = i_size_read(inode) - len;
+	new_size = inode->i_size - len;
 	i_size_write(inode, new_size);
 	EXT4_I(inode)->i_disksize = new_size;
 
@@ -5620,7 +5571,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
  * by len bytes.
  * Returns 0 on success, error otherwise.
  */
-int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
 {
 	struct super_block *sb = inode->i_sb;
 	handle_t *handle;
@@ -5639,14 +5590,10 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 		return -EOPNOTSUPP;
 
-	/* Insert range works only on fs block size aligned offsets. */
-	if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
-			len & (EXT4_CLUSTER_SIZE(sb) - 1))
+	/* Insert range works only on fs cluster size aligned regions. */
+	if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
 		return -EINVAL;
 
-	if (!S_ISREG(inode->i_mode))
-		return -EOPNOTSUPP;
-
 	trace_ext4_insert_range(inode, offset, len);
 
 	offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
@@ -5666,14 +5613,14 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
 		goto out_mutex;
 	}
 
-	/* Check for wrap through zero */
-	if (inode->i_size + len > inode->i_sb->s_maxbytes) {
+	/* Check whether the maximum file size would be exceeded */
+	if (len > inode->i_sb->s_maxbytes - inode->i_size) {
 		ret = -EFBIG;
 		goto out_mutex;
 	}
 
-	/* Offset should be less than i_size */
-	if (offset >= i_size_read(inode)) {
+	/* Offset must be less than i_size */
+	if (offset >= inode->i_size) {
 		ret = -EINVAL;
 		goto out_mutex;
 	}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 825313c..4ec30a7 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -209,6 +209,12 @@ static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
 	return es->es_pblk & ~ES_MASK;
 }
 
+static inline ext4_fsblk_t ext4_es_show_pblock(struct extent_status *es)
+{
+	ext4_fsblk_t pblock = ext4_es_pblock(es);
+	return pblock == ~ES_MASK ? 0 : pblock;
+}
+
 static inline void ext4_es_store_pblock(struct extent_status *es,
 					ext4_fsblk_t pb)
 {
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6a7293a..5f22588 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -88,9 +88,10 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
 	struct inode *inode = file_inode(iocb->ki_filp);
 	ssize_t ret;
 
-	if (!inode_trylock_shared(inode)) {
-		if (iocb->ki_flags & IOCB_NOWAIT)
+	if (iocb->ki_flags & IOCB_NOWAIT) {
+		if (!inode_trylock_shared(inode))
 			return -EAGAIN;
+	} else {
 		inode_lock_shared(inode);
 	}
 	/*
@@ -165,19 +166,25 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
  * threads are at work on the same unwritten block, they must be synchronized
  * or one thread will zero the other's data, causing corruption.
  */
-static int
-ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
+static bool
+ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
 {
 	struct super_block *sb = inode->i_sb;
-	int blockmask = sb->s_blocksize - 1;
-
-	if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
-		return 0;
+	unsigned long blockmask = sb->s_blocksize - 1;
 
 	if ((pos | iov_iter_alignment(from)) & blockmask)
-		return 1;
+		return true;
 
-	return 0;
+	return false;
+}
+
+static bool
+ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
+{
+	if (offset + len > i_size_read(inode) ||
+	    offset + len > EXT4_I(inode)->i_disksize)
+		return true;
+	return false;
 }
 
 /* Is IO overwriting allocated and initialized blocks? */
@@ -203,7 +210,8 @@ static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
 }
 
-static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
+					 struct iov_iter *from)
 {
 	struct inode *inode = file_inode(iocb->ki_filp);
 	ssize_t ret;
@@ -227,11 +235,21 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
 		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
 	}
 
+	return iov_iter_count(from);
+}
+
+static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+	ssize_t ret, count;
+
+	count = ext4_generic_write_checks(iocb, from);
+	if (count <= 0)
+		return count;
+
 	ret = file_modified(iocb->ki_filp);
 	if (ret)
 		return ret;
-
-	return iov_iter_count(from);
+	return count;
 }
 
 static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
@@ -363,62 +381,137 @@ static const struct iomap_dio_ops ext4_dio_write_ops = {
 	.end_io = ext4_dio_write_end_io,
 };
 
+/*
+ * The intention here is to start with shared lock acquired then see if any
+ * condition requires an exclusive inode lock. If yes, then we restart the
+ * whole operation by releasing the shared lock and acquiring exclusive lock.
+ *
+ * - For unaligned_io we never take shared lock as it may cause data corruption
+ *   when two unaligned IO tries to modify the same block e.g. while zeroing.
+ *
+ * - For extending writes case we don't take the shared lock, since it requires
+ *   updating inode i_disksize and/or orphan handling with exclusive lock.
+ *
+ * - shared locking will only be true mostly with overwrites. Otherwise we will
+ *   switch to exclusive i_rwsem lock.
+ */
+static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
+				     bool *ilock_shared, bool *extend)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file_inode(file);
+	loff_t offset;
+	size_t count;
+	ssize_t ret;
+
+restart:
+	ret = ext4_generic_write_checks(iocb, from);
+	if (ret <= 0)
+		goto out;
+
+	offset = iocb->ki_pos;
+	count = ret;
+	if (ext4_extending_io(inode, offset, count))
+		*extend = true;
+	/*
+	 * Determine whether the IO operation will overwrite allocated
+	 * and initialized blocks.
+	 * We need exclusive i_rwsem for changing security info
+	 * in file_modified().
+	 */
+	if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
+	     !ext4_overwrite_io(inode, offset, count))) {
+		inode_unlock_shared(inode);
+		*ilock_shared = false;
+		inode_lock(inode);
+		goto restart;
+	}
+
+	ret = file_modified(file);
+	if (ret < 0)
+		goto out;
+
+	return count;
+out:
+	if (*ilock_shared)
+		inode_unlock_shared(inode);
+	else
+		inode_unlock(inode);
+	return ret;
+}
+
 static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
 	ssize_t ret;
-	size_t count;
-	loff_t offset;
 	handle_t *handle;
 	struct inode *inode = file_inode(iocb->ki_filp);
-	bool extend = false, overwrite = false, unaligned_aio = false;
+	loff_t offset = iocb->ki_pos;
+	size_t count = iov_iter_count(from);
+	const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
+	bool extend = false, unaligned_io = false;
+	bool ilock_shared = true;
+
+	/*
+	 * We initially start with shared inode lock unless it is
+	 * unaligned IO which needs exclusive lock anyways.
+	 */
+	if (ext4_unaligned_io(inode, from, offset)) {
+		unaligned_io = true;
+		ilock_shared = false;
+	}
+	/*
+	 * Quick check here without any i_rwsem lock to see if it is extending
+	 * IO. A more reliable check is done in ext4_dio_write_checks() with
+	 * proper locking in place.
+	 */
+	if (offset + count > i_size_read(inode))
+		ilock_shared = false;
 
 	if (iocb->ki_flags & IOCB_NOWAIT) {
-		if (!inode_trylock(inode))
-			return -EAGAIN;
+		if (ilock_shared) {
+			if (!inode_trylock_shared(inode))
+				return -EAGAIN;
+		} else {
+			if (!inode_trylock(inode))
+				return -EAGAIN;
+		}
 	} else {
-		inode_lock(inode);
+		if (ilock_shared)
+			inode_lock_shared(inode);
+		else
+			inode_lock(inode);
 	}
 
+	/* Fallback to buffered I/O if the inode does not support direct I/O. */
 	if (!ext4_dio_supported(inode)) {
-		inode_unlock(inode);
-		/*
-		 * Fallback to buffered I/O if the inode does not support
-		 * direct I/O.
-		 */
+		if (ilock_shared)
+			inode_unlock_shared(inode);
+		else
+			inode_unlock(inode);
 		return ext4_buffered_write_iter(iocb, from);
 	}
 
-	ret = ext4_write_checks(iocb, from);
-	if (ret <= 0) {
-		inode_unlock(inode);
+	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
+	if (ret <= 0)
 		return ret;
-	}
 
-	/*
-	 * Unaligned asynchronous direct I/O must be serialized among each
-	 * other as the zeroing of partial blocks of two competing unaligned
-	 * asynchronous direct I/O writes can result in data corruption.
-	 */
 	offset = iocb->ki_pos;
-	count = iov_iter_count(from);
-	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
-	    !is_sync_kiocb(iocb) && ext4_unaligned_aio(inode, from, offset)) {
-		unaligned_aio = true;
-		inode_dio_wait(inode);
-	}
+	count = ret;
 
 	/*
-	 * Determine whether the I/O will overwrite allocated and initialized
-	 * blocks. If so, check to see whether it is possible to take the
-	 * dioread_nolock path.
+	 * Unaligned direct IO must be serialized among each other as zeroing
+	 * of partial blocks of two competing unaligned IOs can result in data
+	 * corruption.
+	 *
+	 * So we make sure we don't allow any unaligned IO in flight.
+	 * For IOs where we need not wait (like unaligned non-AIO DIO),
+	 * below inode_dio_wait() may anyway become a no-op, since we start
+	 * with exclusive lock.
 	 */
-	if (!unaligned_aio && ext4_overwrite_io(inode, offset, count) &&
-	    ext4_should_dioread_nolock(inode)) {
-		overwrite = true;
-		downgrade_write(&inode->i_rwsem);
-	}
+	if (unaligned_io)
+		inode_dio_wait(inode);
 
-	if (offset + count > EXT4_I(inode)->i_disksize) {
+	if (extend) {
 		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
 		if (IS_ERR(handle)) {
 			ret = PTR_ERR(handle);
@@ -431,18 +524,19 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
 			goto out;
 		}
 
-		extend = true;
 		ext4_journal_stop(handle);
 	}
 
-	ret = iomap_dio_rw(iocb, from, &ext4_iomap_ops, &ext4_dio_write_ops,
-			   is_sync_kiocb(iocb) || unaligned_aio || extend);
+	if (ilock_shared)
+		iomap_ops = &ext4_iomap_overwrite_ops;
+	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
+			   is_sync_kiocb(iocb) || unaligned_io || extend);
 
 	if (extend)
 		ret = ext4_handle_inode_extension(inode, offset, ret, count);
 
 out:
-	if (overwrite)
+	if (ilock_shared)
 		inode_unlock_shared(inode);
 	else
 		inode_unlock(inode);
@@ -487,9 +581,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	bool extend = false;
 	struct inode *inode = file_inode(iocb->ki_filp);
 
-	if (!inode_trylock(inode)) {
-		if (iocb->ki_flags & IOCB_NOWAIT)
+	if (iocb->ki_flags & IOCB_NOWAIT) {
+		if (!inode_trylock(inode))
 			return -EAGAIN;
+	} else {
 		inode_lock(inode);
 	}
 
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 8ca4a23..c66e8f9 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -94,7 +94,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
 		goto verified;
 	blk = ext4_inode_bitmap(sb, desc);
 	if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
-					   EXT4_INODES_PER_GROUP(sb) / 8)) {
+					   EXT4_INODES_PER_GROUP(sb) / 8) ||
+	    ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
 		ext4_unlock_group(sb, block_group);
 		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
 			   "inode_bitmap = %llu", block_group, blk);
@@ -192,8 +193,10 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
 	get_bh(bh);
 	submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
 	wait_on_buffer(bh);
+	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
 	if (!buffer_uptodate(bh)) {
 		put_bh(bh);
+		ext4_set_errno(sb, EIO);
 		ext4_error(sb, "Cannot read inode bitmap - "
 			   "block_group = %u, inode_bitmap = %llu",
 			   block_group, bitmap_blk);
@@ -1223,6 +1226,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
 	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
+		ext4_set_errno(sb, -err);
 		ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
 			   ino, err);
 		return inode;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 3a4ab70..569fc68 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -660,32 +660,6 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
 }
 
 /*
- * Calculate the number of metadata blocks need to reserve
- * to allocate a new block at @lblocks for non extent file based file
- */
-int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
-{
-	struct ext4_inode_info *ei = EXT4_I(inode);
-	sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
-	int blk_bits;
-
-	if (lblock < EXT4_NDIR_BLOCKS)
-		return 0;
-
-	lblock -= EXT4_NDIR_BLOCKS;
-
-	if (ei->i_da_metadata_calc_len &&
-	    (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
-		ei->i_da_metadata_calc_len++;
-		return 0;
-	}
-	ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
-	ei->i_da_metadata_calc_len = 1;
-	blk_bits = order_base_2(lblock);
-	return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
-}
-
-/*
  * Calculate number of indirect blocks touched by mapping @nrblocks logically
  * contiguous blocks
  */
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 2fec62d..fad82d0 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -98,6 +98,7 @@ int ext4_get_max_inline_size(struct inode *inode)
 
 	error = ext4_get_inode_loc(inode, &iloc);
 	if (error) {
+		ext4_set_errno(inode->i_sb, -error);
 		ext4_error_inode(inode, __func__, __LINE__, 0,
 				 "can't get inode location %lu",
 				 inode->i_ino);
@@ -849,7 +850,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
 
 /*
  * Prepare the write for the inline data.
- * If the the data can be written into the inode, we just read
+ * If the data can be written into the inode, we just read
  * the page and make it uptodate, and start the journal.
  * Otherwise read the page, makes it dirty so that it can be
  * handle in writepages(the i_disksize update is left to the
@@ -1761,6 +1762,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
 
 	err = ext4_get_inode_loc(dir, &iloc);
 	if (err) {
+		ext4_set_errno(dir->i_sb, -err);
 		EXT4_ERROR_INODE(dir, "error %d getting inode %lu block",
 				 err, dir->i_ino);
 		return true;
diff --git a/fs/ext4/inode-test.c b/fs/ext4/inode-test.c
index bbce1c3..d62d802 100644
--- a/fs/ext4/inode-test.c
+++ b/fs/ext4/inode-test.c
@@ -269,4 +269,6 @@ static struct kunit_suite ext4_inode_test_suite = {
 	.test_cases = ext4_inode_test_cases,
 };
 
-kunit_test_suite(ext4_inode_test_suite);
+kunit_test_suites(&ext4_inode_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 629a25d..3313168 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -48,8 +48,6 @@
 
 #include <trace/events/ext4.h>
 
-#define MPAGE_DA_EXTENT_TAIL 0x01
-
 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
 			      struct ext4_inode_info *ei)
 {
@@ -271,6 +269,7 @@ void ext4_evict_inode(struct inode *inode)
 	if (inode->i_blocks) {
 		err = ext4_truncate(inode);
 		if (err) {
+			ext4_set_errno(inode->i_sb, -err);
 			ext4_error(inode->i_sb,
 				   "couldn't truncate inode %lu (err %d)",
 				   inode->i_ino, err);
@@ -402,7 +401,7 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
 {
 	int ret;
 
-	if (IS_ENCRYPTED(inode))
+	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
 		return fscrypt_zeroout_range(inode, lblk, pblk, len);
 
 	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
@@ -2478,10 +2477,12 @@ static int mpage_map_and_submit_extent(handle_t *handle,
 			EXT4_I(inode)->i_disksize = disksize;
 		up_write(&EXT4_I(inode)->i_data_sem);
 		err2 = ext4_mark_inode_dirty(handle, inode);
-		if (err2)
+		if (err2) {
+			ext4_set_errno(inode->i_sb, -err2);
 			ext4_error(inode->i_sb,
 				   "Failed to mark inode %lu dirty",
 				   inode->i_ino);
+		}
 		if (!err)
 			err = err2;
 	}
@@ -3448,6 +3449,22 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
 	return 0;
 }
 
+static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
+		loff_t length, unsigned flags, struct iomap *iomap,
+		struct iomap *srcmap)
+{
+	int ret;
+
+	/*
+	 * Even for writes we don't need to allocate blocks, so just pretend
+	 * we are reading to save overhead of starting a transaction.
+	 */
+	flags &= ~IOMAP_WRITE;
+	ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
+	WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
+	return ret;
+}
+
 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
 			  ssize_t written, unsigned flags, struct iomap *iomap)
 {
@@ -3469,6 +3486,11 @@ const struct iomap_ops ext4_iomap_ops = {
 	.iomap_end		= ext4_iomap_end,
 };
 
+const struct iomap_ops ext4_iomap_overwrite_ops = {
+	.iomap_begin		= ext4_iomap_overwrite_begin,
+	.iomap_end		= ext4_iomap_end,
+};
+
 static bool ext4_iomap_is_delalloc(struct inode *inode,
 				   struct ext4_map_blocks *map)
 {
@@ -3701,8 +3723,12 @@ static int __ext4_block_zero_page_range(handle_t *handle,
 		if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
 			/* We expect the key to be set. */
 			BUG_ON(!fscrypt_has_encryption_key(inode));
-			WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
-					page, blocksize, bh_offset(bh)));
+			err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
+							       bh_offset(bh));
+			if (err) {
+				clear_buffer_uptodate(bh);
+				goto unlock;
+			}
 		}
 	}
 	if (ext4_should_journal_data(inode)) {
@@ -3912,9 +3938,6 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 	unsigned int credits;
 	int ret = 0;
 
-	if (!S_ISREG(inode->i_mode))
-		return -EOPNOTSUPP;
-
 	trace_ext4_punch_hole(inode, offset, length, 0);
 
 	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
@@ -4240,6 +4263,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
 	bh = sb_getblk(sb, block);
 	if (unlikely(!bh))
 		return -ENOMEM;
+	if (ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO))
+		goto simulate_eio;
 	if (!buffer_uptodate(bh)) {
 		lock_buffer(bh);
 
@@ -4338,6 +4363,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
 		blk_finish_plug(&plug);
 		wait_on_buffer(bh);
 		if (!buffer_uptodate(bh)) {
+		simulate_eio:
+			ext4_set_errno(inode->i_sb, EIO);
 			EXT4_ERROR_INODE_BLOCK(inode, block,
 					       "unable to read itable block");
 			brelse(bh);
@@ -4551,7 +4578,9 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
 					      sizeof(gen));
 	}
 
-	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
+	if (!ext4_inode_csum_verify(inode, raw_inode, ei) ||
+	    ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) {
+		ext4_set_errno(inode->i_sb, EFSBADCRC);
 		ext4_error_inode(inode, function, line, 0,
 				 "iget: checksum invalid");
 		ret = -EFSBADCRC;
@@ -5090,6 +5119,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
 		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
 			sync_dirty_buffer(iloc.bh);
 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
+			ext4_set_errno(inode->i_sb, EIO);
 			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
 					 "IO error syncing inode");
 			err = -EIO;
@@ -5368,7 +5398,8 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	unsigned int flags;
 
-	if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
+	if ((request_mask & STATX_BTIME) &&
+	    EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
 		stat->result_mask |= STATX_BTIME;
 		stat->btime.tv_sec = ei->i_crtime.tv_sec;
 		stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index e8870ff..a0ec750 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1377,6 +1377,8 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	case EXT4_IOC_CLEAR_ES_CACHE:
 	case EXT4_IOC_GETSTATE:
 	case EXT4_IOC_GET_ES_CACHE:
+	case EXT4_IOC_FSGETXATTR:
+	case EXT4_IOC_FSSETXATTR:
 		break;
 	default:
 		return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index a3e2767..f648381 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3895,6 +3895,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
 	bitmap_bh = ext4_read_block_bitmap(sb, group);
 	if (IS_ERR(bitmap_bh)) {
 		err = PTR_ERR(bitmap_bh);
+		ext4_set_errno(sb, -err);
 		ext4_error(sb, "Error %d reading block bitmap for %u",
 			   err, group);
 		return 0;
@@ -4063,6 +4064,7 @@ void ext4_discard_preallocations(struct inode *inode)
 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
 					     GFP_NOFS|__GFP_NOFAIL);
 		if (err) {
+			ext4_set_errno(sb, -err);
 			ext4_error(sb, "Error %d loading buddy information for %u",
 				   err, group);
 			continue;
@@ -4071,6 +4073,7 @@ void ext4_discard_preallocations(struct inode *inode)
 		bitmap_bh = ext4_read_block_bitmap(sb, group);
 		if (IS_ERR(bitmap_bh)) {
 			err = PTR_ERR(bitmap_bh);
+			ext4_set_errno(sb, -err);
 			ext4_error(sb, "Error %d reading block bitmap for %u",
 					err, group);
 			ext4_mb_unload_buddy(&e4b);
@@ -4325,6 +4328,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
 					     GFP_NOFS|__GFP_NOFAIL);
 		if (err) {
+			ext4_set_errno(sb, -err);
 			ext4_error(sb, "Error %d loading buddy information for %u",
 				   err, group);
 			continue;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 2305b43..1c44b1a 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -173,8 +173,10 @@ static int kmmpd(void *data)
 		 * (s_mmp_update_interval * 60) seconds.
 		 */
 		if (retval) {
-			if ((failed_writes % 60) == 0)
+			if ((failed_writes % 60) == 0) {
+				ext4_set_errno(sb, -retval);
 				ext4_error(sb, "Error writing to MMP block");
+			}
 			failed_writes++;
 		}
 
@@ -205,6 +207,7 @@ static int kmmpd(void *data)
 
 			retval = read_mmp_block(sb, &bh_check, mmp_block);
 			if (retval) {
+				ext4_set_errno(sb, -retval);
 				ext4_error(sb, "error reading MMP data: %d",
 					   retval);
 				goto exit_thread;
@@ -218,6 +221,7 @@ static int kmmpd(void *data)
 					     "Error while updating MMP info. "
 					     "The filesystem seems to have been"
 					     " multiply mounted.");
+				ext4_set_errno(sb, EBUSY);
 				ext4_error(sb, "abort");
 				put_bh(bh_check);
 				retval = -EBUSY;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 1cb42d9..129d2eb 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -109,7 +109,10 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
 	struct ext4_dir_entry *dirent;
 	int is_dx_block = 0;
 
-	bh = ext4_bread(NULL, inode, block, 0);
+	if (ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_EIO))
+		bh = ERR_PTR(-EIO);
+	else
+		bh = ext4_bread(NULL, inode, block, 0);
 	if (IS_ERR(bh)) {
 		__ext4_warning(inode->i_sb, func, line,
 			       "inode #%lu: lblock %lu: comm %s: "
@@ -153,9 +156,11 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
 	 * caller is sure it should be an index block.
 	 */
 	if (is_dx_block && type == INDEX) {
-		if (ext4_dx_csum_verify(inode, dirent))
+		if (ext4_dx_csum_verify(inode, dirent) &&
+		    !ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC))
 			set_buffer_verified(bh);
 		else {
+			ext4_set_errno(inode->i_sb, EFSBADCRC);
 			ext4_error_inode(inode, func, line, block,
 					 "Directory index failed checksum");
 			brelse(bh);
@@ -163,9 +168,11 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
 		}
 	}
 	if (!is_dx_block) {
-		if (ext4_dirblock_csum_verify(inode, bh))
+		if (ext4_dirblock_csum_verify(inode, bh) &&
+		    !ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC))
 			set_buffer_verified(bh);
 		else {
+			ext4_set_errno(inode->i_sb, EFSBADCRC);
 			ext4_error_inode(inode, func, line, block,
 					 "Directory block failed checksum");
 			brelse(bh);
@@ -1002,7 +1009,6 @@ static int htree_dirblock_to_tree(struct file *dir_file,
 	top = (struct ext4_dir_entry_2 *) ((char *) de +
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
-#ifdef CONFIG_FS_ENCRYPTION
 	/* Check if the directory is encrypted */
 	if (IS_ENCRYPTED(dir)) {
 		err = fscrypt_get_encryption_info(dir);
@@ -1017,7 +1023,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
 			return err;
 		}
 	}
-#endif
+
 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
 		if (ext4_check_dir_entry(dir, NULL, de, bh,
 				bh->b_data, bh->b_size,
@@ -1065,9 +1071,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
 	}
 errout:
 	brelse(bh);
-#ifdef CONFIG_FS_ENCRYPTION
 	fscrypt_fname_free_buffer(&fname_crypto_str);
-#endif
 	return count;
 }
 
@@ -1527,6 +1531,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
 			goto next;
 		wait_on_buffer(bh);
 		if (!buffer_uptodate(bh)) {
+			ext4_set_errno(sb, EIO);
 			EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
 					 (unsigned long) block);
 			brelse(bh);
@@ -1537,6 +1542,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
 		    !is_dx_internal_node(dir, block,
 					 (struct ext4_dir_entry *)bh->b_data) &&
 		    !ext4_dirblock_csum_verify(dir, bh)) {
+			ext4_set_errno(sb, EFSBADCRC);
 			EXT4_ERROR_INODE(dir, "checksumming directory "
 					 "block %lu", (unsigned long)block);
 			brelse(bh);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 24aeedb..68b39e7 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -512,17 +512,26 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 		gfp_t gfp_flags = GFP_NOFS;
 		unsigned int enc_bytes = round_up(len, i_blocksize(inode));
 
+		/*
+		 * Since bounce page allocation uses a mempool, we can only use
+		 * a waiting mask (i.e. request guaranteed allocation) on the
+		 * first page of the bio.  Otherwise it can deadlock.
+		 */
+		if (io->io_bio)
+			gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
 	retry_encrypt:
 		bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
 							       0, gfp_flags);
 		if (IS_ERR(bounce_page)) {
 			ret = PTR_ERR(bounce_page);
-			if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
-				if (io->io_bio) {
+			if (ret == -ENOMEM &&
+			    (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
+				gfp_flags = GFP_NOFS;
+				if (io->io_bio)
 					ext4_io_submit(io);
-					congestion_wait(BLK_RW_ASYNC, HZ/50);
-				}
-				gfp_flags |= __GFP_NOFAIL;
+				else
+					gfp_flags |= __GFP_NOFAIL;
+				congestion_wait(BLK_RW_ASYNC, HZ/50);
 				goto retry_encrypt;
 			}
 
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index fef7755..c1769af 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -57,6 +57,7 @@ enum bio_post_read_step {
 	STEP_INITIAL = 0,
 	STEP_DECRYPT,
 	STEP_VERITY,
+	STEP_MAX,
 };
 
 struct bio_post_read_ctx {
@@ -106,10 +107,22 @@ static void verity_work(struct work_struct *work)
 {
 	struct bio_post_read_ctx *ctx =
 		container_of(work, struct bio_post_read_ctx, work);
+	struct bio *bio = ctx->bio;
 
-	fsverity_verify_bio(ctx->bio);
+	/*
+	 * fsverity_verify_bio() may call readpages() again, and although verity
+	 * will be disabled for that, decryption may still be needed, causing
+	 * another bio_post_read_ctx to be allocated.  So to guarantee that
+	 * mempool_alloc() never deadlocks we must free the current ctx first.
+	 * This is safe because verity is the last post-read step.
+	 */
+	BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
+	mempool_free(ctx, bio_post_read_ctx_pool);
+	bio->bi_private = NULL;
 
-	bio_post_read_processing(ctx);
+	fsverity_verify_bio(bio);
+
+	__read_end_io(bio);
 }
 
 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
@@ -176,12 +189,11 @@ static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
 }
 
-static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
-						       struct bio *bio,
-						       pgoff_t first_idx)
+static void ext4_set_bio_post_read_ctx(struct bio *bio,
+				       const struct inode *inode,
+				       pgoff_t first_idx)
 {
 	unsigned int post_read_steps = 0;
-	struct bio_post_read_ctx *ctx = NULL;
 
 	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
 		post_read_steps |= 1 << STEP_DECRYPT;
@@ -190,14 +202,14 @@ static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
 		post_read_steps |= 1 << STEP_VERITY;
 
 	if (post_read_steps) {
-		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
-		if (!ctx)
-			return ERR_PTR(-ENOMEM);
+		/* Due to the mempool, this never fails. */
+		struct bio_post_read_ctx *ctx =
+			mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
+
 		ctx->bio = bio;
 		ctx->enabled_steps = post_read_steps;
 		bio->bi_private = ctx;
 	}
-	return ctx;
 }
 
 static inline loff_t ext4_readpage_limit(struct inode *inode)
@@ -358,24 +370,16 @@ int ext4_mpage_readpages(struct address_space *mapping,
 			bio = NULL;
 		}
 		if (bio == NULL) {
-			struct bio_post_read_ctx *ctx;
-
 			/*
 			 * bio_alloc will _always_ be able to allocate a bio if
 			 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
 			 */
 			bio = bio_alloc(GFP_KERNEL,
 				min_t(int, nr_pages, BIO_MAX_PAGES));
-			ctx = get_bio_post_read_ctx(inode, bio, page->index);
-			if (IS_ERR(ctx)) {
-				bio_put(bio);
-				bio = NULL;
-				goto set_error_page;
-			}
+			ext4_set_bio_post_read_ctx(bio, inode, page->index);
 			bio_set_dev(bio, bdev);
 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
 			bio->bi_end_io = mpage_end_io;
-			bio->bi_private = ctx;
 			bio_set_op_attrs(bio, REQ_OP_READ,
 						is_readahead ? REQ_RAHEAD : 0);
 		}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index a8c0f2b..86a2500 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -824,9 +824,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
 	if (unlikely(err))
 		goto errout;
 
-	n_group_desc = ext4_kvmalloc((gdb_num + 1) *
-				     sizeof(struct buffer_head *),
-				     GFP_NOFS);
+	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
+				GFP_KERNEL);
 	if (!n_group_desc) {
 		err = -ENOMEM;
 		ext4_warning(sb, "not enough memory for %lu groups",
@@ -900,9 +899,8 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
 	if (IS_ERR(gdb_bh))
 		return PTR_ERR(gdb_bh);
-	n_group_desc = ext4_kvmalloc((gdb_num + 1) *
-				     sizeof(struct buffer_head *),
-				     GFP_NOFS);
+	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
+				GFP_KERNEL);
 	if (!n_group_desc) {
 		brelse(gdb_bh);
 		err = -ENOMEM;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2937a88..8434217 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -154,7 +154,7 @@ ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
 
 	if (bh == NULL)
 		return ERR_PTR(-ENOMEM);
-	if (buffer_uptodate(bh))
+	if (ext4_buffer_uptodate(bh))
 		return bh;
 	ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
 	wait_on_buffer(bh);
@@ -204,26 +204,6 @@ void ext4_superblock_csum_set(struct super_block *sb)
 	es->s_checksum = ext4_superblock_csum(sb, es);
 }
 
-void *ext4_kvmalloc(size_t size, gfp_t flags)
-{
-	void *ret;
-
-	ret = kmalloc(size, flags | __GFP_NOWARN);
-	if (!ret)
-		ret = __vmalloc(size, flags, PAGE_KERNEL);
-	return ret;
-}
-
-void *ext4_kvzalloc(size_t size, gfp_t flags)
-{
-	void *ret;
-
-	ret = kzalloc(size, flags | __GFP_NOWARN);
-	if (!ret)
-		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
-	return ret;
-}
-
 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
 			       struct ext4_group_desc *bg)
 {
@@ -367,6 +347,8 @@ static void __save_error_info(struct super_block *sb, const char *func,
 	ext4_update_tstamp(es, s_last_error_time);
 	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
 	es->s_last_error_line = cpu_to_le32(line);
+	if (es->s_last_error_errcode == 0)
+		es->s_last_error_errcode = EXT4_ERR_EFSCORRUPTED;
 	if (!es->s_first_error_time) {
 		es->s_first_error_time = es->s_last_error_time;
 		es->s_first_error_time_hi = es->s_last_error_time_hi;
@@ -375,6 +357,7 @@ static void __save_error_info(struct super_block *sb, const char *func,
 		es->s_first_error_line = cpu_to_le32(line);
 		es->s_first_error_ino = es->s_last_error_ino;
 		es->s_first_error_block = es->s_last_error_block;
+		es->s_first_error_errcode = es->s_last_error_errcode;
 	}
 	/*
 	 * Start the daily error reporting function if it hasn't been
@@ -631,6 +614,66 @@ const char *ext4_decode_error(struct super_block *sb, int errno,
 	return errstr;
 }
 
+void ext4_set_errno(struct super_block *sb, int err)
+{
+	if (err < 0)
+		err = -err;
+
+	switch (err) {
+	case EIO:
+		err = EXT4_ERR_EIO;
+		break;
+	case ENOMEM:
+		err = EXT4_ERR_ENOMEM;
+		break;
+	case EFSBADCRC:
+		err = EXT4_ERR_EFSBADCRC;
+		break;
+	case EFSCORRUPTED:
+		err = EXT4_ERR_EFSCORRUPTED;
+		break;
+	case ENOSPC:
+		err = EXT4_ERR_ENOSPC;
+		break;
+	case ENOKEY:
+		err = EXT4_ERR_ENOKEY;
+		break;
+	case EROFS:
+		err = EXT4_ERR_EROFS;
+		break;
+	case EFBIG:
+		err = EXT4_ERR_EFBIG;
+		break;
+	case EEXIST:
+		err = EXT4_ERR_EEXIST;
+		break;
+	case ERANGE:
+		err = EXT4_ERR_ERANGE;
+		break;
+	case EOVERFLOW:
+		err = EXT4_ERR_EOVERFLOW;
+		break;
+	case EBUSY:
+		err = EXT4_ERR_EBUSY;
+		break;
+	case ENOTDIR:
+		err = EXT4_ERR_ENOTDIR;
+		break;
+	case ENOTEMPTY:
+		err = EXT4_ERR_ENOTEMPTY;
+		break;
+	case ESHUTDOWN:
+		err = EXT4_ERR_ESHUTDOWN;
+		break;
+	case EFAULT:
+		err = EXT4_ERR_EFAULT;
+		break;
+	default:
+		err = EXT4_ERR_UNKNOWN;
+	}
+	EXT4_SB(sb)->s_es->s_last_error_errcode = err;
+}
+
 /* __ext4_std_error decodes expected errors from journaling functions
  * automatically and invokes the appropriate error response.  */
 
@@ -655,6 +698,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
 		       sb->s_id, function, line, errstr);
 	}
 
+	ext4_set_errno(sb, -errno);
 	save_error_info(sb, function, line);
 	ext4_handle_error(sb);
 }
@@ -982,8 +1026,10 @@ static void ext4_put_super(struct super_block *sb)
 		aborted = is_journal_aborted(sbi->s_journal);
 		err = jbd2_journal_destroy(sbi->s_journal);
 		sbi->s_journal = NULL;
-		if ((err < 0) && !aborted)
+		if ((err < 0) && !aborted) {
+			ext4_set_errno(sb, -err);
 			ext4_abort(sb, "Couldn't clean up the journal");
+		}
 	}
 
 	ext4_unregister_sysfs(sb);
@@ -1085,8 +1131,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
 	ei->i_es_shk_nr = 0;
 	ei->i_es_shrink_lblk = 0;
 	ei->i_reserved_data_blocks = 0;
-	ei->i_da_metadata_calc_len = 0;
-	ei->i_da_metadata_calc_last_lblock = 0;
 	spin_lock_init(&(ei->i_block_reservation_lock));
 	ext4_init_pending_tree(&ei->i_pending_tree);
 #ifdef CONFIG_QUOTA
@@ -1548,6 +1592,7 @@ static const match_table_t tokens = {
 	{Opt_auto_da_alloc, "auto_da_alloc"},
 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
 	{Opt_dioread_nolock, "dioread_nolock"},
+	{Opt_dioread_lock, "nodioread_nolock"},
 	{Opt_dioread_lock, "dioread_lock"},
 	{Opt_discard, "discard"},
 	{Opt_nodiscard, "nodiscard"},
@@ -3720,6 +3765,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 		set_opt(sb, NO_UID32);
 	/* xattr user namespace & acls are now defaulted on */
 	set_opt(sb, XATTR_USER);
+	set_opt(sb, DIOREAD_NOLOCK);
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 	set_opt(sb, POSIX_ACL);
 #endif
@@ -3887,9 +3933,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 #endif
 
 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
-		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
-			    "with data=journal disables delayed "
-			    "allocation and O_DIRECT support!\n");
+		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support!\n");
+		clear_opt(sb, DIOREAD_NOLOCK);
 		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
 			ext4_msg(sb, KERN_ERR, "can't mount with "
 				 "both data=journal and delalloc");
@@ -5540,9 +5585,15 @@ static int ext4_statfs_project(struct super_block *sb,
 		return PTR_ERR(dquot);
 	spin_lock(&dquot->dq_dqb_lock);
 
-	limit = (dquot->dq_dqb.dqb_bsoftlimit ?
-		 dquot->dq_dqb.dqb_bsoftlimit :
-		 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
+	limit = 0;
+	if (dquot->dq_dqb.dqb_bsoftlimit &&
+	    (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit))
+		limit = dquot->dq_dqb.dqb_bsoftlimit;
+	if (dquot->dq_dqb.dqb_bhardlimit &&
+	    (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
+		limit = dquot->dq_dqb.dqb_bhardlimit;
+	limit >>= sb->s_blocksize_bits;
+
 	if (limit && buf->f_blocks > limit) {
 		curblock = (dquot->dq_dqb.dqb_curspace +
 			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
@@ -5552,9 +5603,14 @@ static int ext4_statfs_project(struct super_block *sb,
 			 (buf->f_blocks - curblock) : 0;
 	}
 
-	limit = dquot->dq_dqb.dqb_isoftlimit ?
-		dquot->dq_dqb.dqb_isoftlimit :
-		dquot->dq_dqb.dqb_ihardlimit;
+	limit = 0;
+	if (dquot->dq_dqb.dqb_isoftlimit &&
+	    (!limit || dquot->dq_dqb.dqb_isoftlimit < limit))
+		limit = dquot->dq_dqb.dqb_isoftlimit;
+	if (dquot->dq_dqb.dqb_ihardlimit &&
+	    (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
+		limit = dquot->dq_dqb.dqb_ihardlimit;
+
 	if (limit && buf->f_files > limit) {
 		buf->f_files = limit;
 		buf->f_ffree =
@@ -5987,7 +6043,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
 		bh = ext4_bread(handle, inode, blk,
 				EXT4_GET_BLOCKS_CREATE |
 				EXT4_GET_BLOCKS_METADATA_NOFAIL);
-	} while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
+	} while (PTR_ERR(bh) == -ENOSPC &&
 		 ext4_should_retry_alloc(inode->i_sb, &retries));
 	if (IS_ERR(bh))
 		return PTR_ERR(bh);
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index eb1efad..d218ebd 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -29,6 +29,10 @@ typedef enum {
 	attr_last_error_time,
 	attr_feature,
 	attr_pointer_ui,
+	attr_pointer_ul,
+	attr_pointer_u64,
+	attr_pointer_u8,
+	attr_pointer_string,
 	attr_pointer_atomic,
 	attr_journal_task,
 } attr_id_t;
@@ -46,6 +50,7 @@ struct ext4_attr {
 	struct attribute attr;
 	short attr_id;
 	short attr_ptr;
+	unsigned short attr_size;
 	union {
 		int offset;
 		void *explicit_ptr;
@@ -154,12 +159,35 @@ static struct ext4_attr ext4_attr_##_name = {			\
 	},							\
 }
 
+#define EXT4_ATTR_STRING(_name,_mode,_size,_struct,_elname)	\
+static struct ext4_attr ext4_attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.attr_id = attr_pointer_string,				\
+	.attr_size = _size,					\
+	.attr_ptr = ptr_##_struct##_offset,			\
+	.u = {							\
+		.offset = offsetof(struct _struct, _elname),\
+	},							\
+}
+
 #define EXT4_RO_ATTR_ES_UI(_name,_elname)				\
 	EXT4_ATTR_OFFSET(_name, 0444, pointer_ui, ext4_super_block, _elname)
 
+#define EXT4_RO_ATTR_ES_U8(_name,_elname)				\
+	EXT4_ATTR_OFFSET(_name, 0444, pointer_u8, ext4_super_block, _elname)
+
+#define EXT4_RO_ATTR_ES_U64(_name,_elname)				\
+	EXT4_ATTR_OFFSET(_name, 0444, pointer_u64, ext4_super_block, _elname)
+
+#define EXT4_RO_ATTR_ES_STRING(_name,_elname,_size)			\
+	EXT4_ATTR_STRING(_name, 0444, _size, ext4_super_block, _elname)
+
 #define EXT4_RW_ATTR_SBI_UI(_name,_elname)	\
 	EXT4_ATTR_OFFSET(_name, 0644, pointer_ui, ext4_sb_info, _elname)
 
+#define EXT4_RW_ATTR_SBI_UL(_name,_elname)	\
+	EXT4_ATTR_OFFSET(_name, 0644, pointer_ul, ext4_sb_info, _elname)
+
 #define EXT4_ATTR_PTR(_name,_mode,_id,_ptr) \
 static struct ext4_attr ext4_attr_##_name = {			\
 	.attr = {.name = __stringify(_name), .mode = _mode },	\
@@ -194,7 +222,20 @@ EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.int
 EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
 EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
 EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
+#ifdef CONFIG_EXT4_DEBUG
+EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail);
+#endif
 EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
+EXT4_RO_ATTR_ES_U8(first_error_errcode, s_first_error_errcode);
+EXT4_RO_ATTR_ES_U8(last_error_errcode, s_last_error_errcode);
+EXT4_RO_ATTR_ES_UI(first_error_ino, s_first_error_ino);
+EXT4_RO_ATTR_ES_UI(last_error_ino, s_last_error_ino);
+EXT4_RO_ATTR_ES_U64(first_error_block, s_first_error_block);
+EXT4_RO_ATTR_ES_U64(last_error_block, s_last_error_block);
+EXT4_RO_ATTR_ES_UI(first_error_line, s_first_error_line);
+EXT4_RO_ATTR_ES_UI(last_error_line, s_last_error_line);
+EXT4_RO_ATTR_ES_STRING(first_error_func, s_first_error_func, 32);
+EXT4_RO_ATTR_ES_STRING(last_error_func, s_last_error_func, 32);
 EXT4_ATTR(first_error_time, 0444, first_error_time);
 EXT4_ATTR(last_error_time, 0444, last_error_time);
 EXT4_ATTR(journal_task, 0444, journal_task);
@@ -225,9 +266,22 @@ static struct attribute *ext4_attrs[] = {
 	ATTR_LIST(msg_ratelimit_interval_ms),
 	ATTR_LIST(msg_ratelimit_burst),
 	ATTR_LIST(errors_count),
+	ATTR_LIST(first_error_ino),
+	ATTR_LIST(last_error_ino),
+	ATTR_LIST(first_error_block),
+	ATTR_LIST(last_error_block),
+	ATTR_LIST(first_error_line),
+	ATTR_LIST(last_error_line),
+	ATTR_LIST(first_error_func),
+	ATTR_LIST(last_error_func),
+	ATTR_LIST(first_error_errcode),
+	ATTR_LIST(last_error_errcode),
 	ATTR_LIST(first_error_time),
 	ATTR_LIST(last_error_time),
 	ATTR_LIST(journal_task),
+#ifdef CONFIG_EXT4_DEBUG
+	ATTR_LIST(simulate_fail),
+#endif
 	NULL,
 };
 ATTRIBUTE_GROUPS(ext4);
@@ -280,7 +334,7 @@ static void *calc_ptr(struct ext4_attr *a, struct ext4_sb_info *sbi)
 
 static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi)
 {
-	return snprintf(buf, PAGE_SIZE, "%lld",
+	return snprintf(buf, PAGE_SIZE, "%lld\n",
 			((time64_t)hi << 32) + le32_to_cpu(lo));
 }
 
@@ -318,6 +372,30 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
 		else
 			return snprintf(buf, PAGE_SIZE, "%u\n",
 					*((unsigned int *) ptr));
+	case attr_pointer_ul:
+		if (!ptr)
+			return 0;
+		return snprintf(buf, PAGE_SIZE, "%lu\n",
+				*((unsigned long *) ptr));
+	case attr_pointer_u8:
+		if (!ptr)
+			return 0;
+		return snprintf(buf, PAGE_SIZE, "%u\n",
+				*((unsigned char *) ptr));
+	case attr_pointer_u64:
+		if (!ptr)
+			return 0;
+		if (a->attr_ptr == ptr_ext4_super_block_offset)
+			return snprintf(buf, PAGE_SIZE, "%llu\n",
+					le64_to_cpup(ptr));
+		else
+			return snprintf(buf, PAGE_SIZE, "%llu\n",
+					*((unsigned long long *) ptr));
+	case attr_pointer_string:
+		if (!ptr)
+			return 0;
+		return snprintf(buf, PAGE_SIZE, "%.*s\n", a->attr_size,
+				(char *) ptr);
 	case attr_pointer_atomic:
 		if (!ptr)
 			return 0;
@@ -361,6 +439,14 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
 		else
 			*((unsigned int *) ptr) = t;
 		return len;
+	case attr_pointer_ul:
+		if (!ptr)
+			return 0;
+		ret = kstrtoul(skip_spaces(buf), 0, &t);
+		if (ret)
+			return ret;
+		*((unsigned long *) ptr) = t;
+		return len;
 	case attr_inode_readahead:
 		return inode_readahead_blks_store(sbi, buf, len);
 	case attr_trigger_test_error:
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 8966a54..8cac7d9 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1456,7 +1456,7 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
 	if (!ce)
 		return NULL;
 
-	ea_data = ext4_kvmalloc(value_len, GFP_NOFS);
+	ea_data = kvmalloc(value_len, GFP_KERNEL);
 	if (!ea_data) {
 		mb_cache_entry_put(ea_inode_cache, ce);
 		return NULL;
@@ -2879,9 +2879,11 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
 		bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
 		if (IS_ERR(bh)) {
 			error = PTR_ERR(bh);
-			if (error == -EIO)
+			if (error == -EIO) {
+				ext4_set_errno(inode->i_sb, EIO);
 				EXT4_ERROR_INODE(inode, "block %llu read error",
 						 EXT4_I(inode)->i_file_acl);
+			}
 			bh = NULL;
 			goto cleanup;
 		}
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 599fb91..f0faada 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -22,7 +22,7 @@
 
 config F2FS_STAT_FS
 	bool "F2FS Status Information"
-	depends on F2FS_FS && DEBUG_FS
+	depends on F2FS_FS
 	default y
 	help
 	  /sys/kernel/debug/f2fs/ contains information about all the partitions
@@ -93,3 +93,28 @@
 	  Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on.
 
 	  If unsure, say N.
+
+config F2FS_FS_COMPRESSION
+	bool "F2FS compression feature"
+	depends on F2FS_FS
+	help
+	  Enable filesystem-level compression on f2fs regular files,
+	  multiple back-end compression algorithms are supported.
+
+config F2FS_FS_LZO
+	bool "LZO compression support"
+	depends on F2FS_FS_COMPRESSION
+	select LZO_COMPRESS
+	select LZO_DECOMPRESS
+	default y
+	help
+	  Support LZO compress algorithm, if unsure, say Y.
+
+config F2FS_FS_LZ4
+	bool "LZ4 compression support"
+	depends on F2FS_FS_COMPRESSION
+	select LZ4_COMPRESS
+	select LZ4_DECOMPRESS
+	default y
+	help
+	  Support LZ4 compress algorithm, if unsure, say Y.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 2aaecc6..ee7316b 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -9,3 +9,4 @@
 f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
 f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
 f2fs-$(CONFIG_FS_VERITY) += verity.o
+f2fs-$(CONFIG_F2FS_FS_COMPRESSION) += compress.o
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index ffdaba0..44e84ac 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1509,10 +1509,10 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 	f2fs_wait_on_all_pages_writeback(sbi);
 
 	/*
-	 * invalidate intermediate page cache borrowed from meta inode
-	 * which are used for migration of encrypted inode's blocks.
+	 * invalidate intermediate page cache borrowed from meta inode which are
+	 * used for migration of encrypted or verity inode's blocks.
 	 */
-	if (f2fs_sb_has_encrypt(sbi))
+	if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi))
 		invalidate_mapping_pages(META_MAPPING(sbi),
 				MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
 
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
new file mode 100644
index 0000000..d8a64be
--- /dev/null
+++ b/fs/f2fs/compress.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * f2fs compress support
+ *
+ * Copyright (c) 2019 Chao Yu <chao@kernel.org>
+ */
+
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/lzo.h>
+#include <linux/lz4.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include <trace/events/f2fs.h>
+
+struct f2fs_compress_ops {
+	int (*init_compress_ctx)(struct compress_ctx *cc);
+	void (*destroy_compress_ctx)(struct compress_ctx *cc);
+	int (*compress_pages)(struct compress_ctx *cc);
+	int (*decompress_pages)(struct decompress_io_ctx *dic);
+};
+
+static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
+{
+	return index & (cc->cluster_size - 1);
+}
+
+static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
+{
+	return index >> cc->log_cluster_size;
+}
+
+static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
+{
+	return cc->cluster_idx << cc->log_cluster_size;
+}
+
+bool f2fs_is_compressed_page(struct page *page)
+{
+	if (!PagePrivate(page))
+		return false;
+	if (!page_private(page))
+		return false;
+	if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
+		return false;
+	f2fs_bug_on(F2FS_M_SB(page->mapping),
+		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
+	return true;
+}
+
+static void f2fs_set_compressed_page(struct page *page,
+		struct inode *inode, pgoff_t index, void *data, refcount_t *r)
+{
+	SetPagePrivate(page);
+	set_page_private(page, (unsigned long)data);
+
+	/* i_crypto_info and iv index */
+	page->index = index;
+	page->mapping = inode->i_mapping;
+	if (r)
+		refcount_inc(r);
+}
+
+static void f2fs_put_compressed_page(struct page *page)
+{
+	set_page_private(page, (unsigned long)NULL);
+	ClearPagePrivate(page);
+	page->mapping = NULL;
+	unlock_page(page);
+	put_page(page);
+}
+
+static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (!cc->rpages[i])
+			continue;
+		if (unlock)
+			unlock_page(cc->rpages[i]);
+		else
+			put_page(cc->rpages[i]);
+	}
+}
+
+static void f2fs_put_rpages(struct compress_ctx *cc)
+{
+	f2fs_drop_rpages(cc, cc->cluster_size, false);
+}
+
+static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
+{
+	f2fs_drop_rpages(cc, len, true);
+}
+
+static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
+				struct address_space *mapping,
+				pgoff_t start, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		struct page *page = find_get_page(mapping, start + i);
+
+		put_page(page);
+		put_page(page);
+	}
+}
+
+static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
+		struct writeback_control *wbc, bool redirty, int unlock)
+{
+	unsigned int i;
+
+	for (i = 0; i < cc->cluster_size; i++) {
+		if (!cc->rpages[i])
+			continue;
+		if (redirty)
+			redirty_page_for_writepage(wbc, cc->rpages[i]);
+		f2fs_put_page(cc->rpages[i], unlock);
+	}
+}
+
+struct page *f2fs_compress_control_page(struct page *page)
+{
+	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
+}
+
+int f2fs_init_compress_ctx(struct compress_ctx *cc)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+
+	if (cc->nr_rpages)
+		return 0;
+
+	cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+					cc->log_cluster_size, GFP_NOFS);
+	return cc->rpages ? 0 : -ENOMEM;
+}
+
+void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
+{
+	kfree(cc->rpages);
+	cc->rpages = NULL;
+	cc->nr_rpages = 0;
+	cc->nr_cpages = 0;
+	cc->cluster_idx = NULL_CLUSTER;
+}
+
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+{
+	unsigned int cluster_ofs;
+
+	if (!f2fs_cluster_can_merge_page(cc, page->index))
+		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
+
+	cluster_ofs = offset_in_cluster(cc, page->index);
+	cc->rpages[cluster_ofs] = page;
+	cc->nr_rpages++;
+	cc->cluster_idx = cluster_idx(cc, page->index);
+}
+
+#ifdef CONFIG_F2FS_FS_LZO
+static int lzo_init_compress_ctx(struct compress_ctx *cc)
+{
+	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+				LZO1X_MEM_COMPRESS, GFP_NOFS);
+	if (!cc->private)
+		return -ENOMEM;
+
+	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
+	return 0;
+}
+
+static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
+{
+	kvfree(cc->private);
+	cc->private = NULL;
+}
+
+static int lzo_compress_pages(struct compress_ctx *cc)
+{
+	int ret;
+
+	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
+					&cc->clen, cc->private);
+	if (ret != LZO_E_OK) {
+		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
+				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int lzo_decompress_pages(struct decompress_io_ctx *dic)
+{
+	int ret;
+
+	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
+						dic->rbuf, &dic->rlen);
+	if (ret != LZO_E_OK) {
+		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
+				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
+		return -EIO;
+	}
+
+	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
+		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
+					"expected:%lu\n", KERN_ERR,
+					F2FS_I_SB(dic->inode)->sb->s_id,
+					dic->rlen,
+					PAGE_SIZE << dic->log_cluster_size);
+		return -EIO;
+	}
+	return 0;
+}
+
+static const struct f2fs_compress_ops f2fs_lzo_ops = {
+	.init_compress_ctx	= lzo_init_compress_ctx,
+	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
+	.compress_pages		= lzo_compress_pages,
+	.decompress_pages	= lzo_decompress_pages,
+};
+#endif
+
+#ifdef CONFIG_F2FS_FS_LZ4
+static int lz4_init_compress_ctx(struct compress_ctx *cc)
+{
+	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+				LZ4_MEM_COMPRESS, GFP_NOFS);
+	if (!cc->private)
+		return -ENOMEM;
+
+	cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
+	return 0;
+}
+
+static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
+{
+	kvfree(cc->private);
+	cc->private = NULL;
+}
+
+static int lz4_compress_pages(struct compress_ctx *cc)
+{
+	int len;
+
+	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+						cc->clen, cc->private);
+	if (!len) {
+		printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
+				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
+		return -EIO;
+	}
+	cc->clen = len;
+	return 0;
+}
+
+static int lz4_decompress_pages(struct decompress_io_ctx *dic)
+{
+	int ret;
+
+	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
+						dic->clen, dic->rlen);
+	if (ret < 0) {
+		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
+				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
+		return -EIO;
+	}
+
+	if (ret != PAGE_SIZE << dic->log_cluster_size) {
+		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
+					"expected:%lu\n", KERN_ERR,
+					F2FS_I_SB(dic->inode)->sb->s_id,
+					dic->rlen,
+					PAGE_SIZE << dic->log_cluster_size);
+		return -EIO;
+	}
+	return 0;
+}
+
+static const struct f2fs_compress_ops f2fs_lz4_ops = {
+	.init_compress_ctx	= lz4_init_compress_ctx,
+	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
+	.compress_pages		= lz4_compress_pages,
+	.decompress_pages	= lz4_decompress_pages,
+};
+#endif
+
+static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
+#ifdef CONFIG_F2FS_FS_LZO
+	&f2fs_lzo_ops,
+#else
+	NULL,
+#endif
+#ifdef CONFIG_F2FS_FS_LZ4
+	&f2fs_lz4_ops,
+#else
+	NULL,
+#endif
+};
+
+bool f2fs_is_compress_backend_ready(struct inode *inode)
+{
+	if (!f2fs_compressed_file(inode))
+		return true;
+	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
+}
+
+static struct page *f2fs_grab_page(void)
+{
+	struct page *page;
+
+	page = alloc_page(GFP_NOFS);
+	if (!page)
+		return NULL;
+	lock_page(page);
+	return page;
+}
+
+static int f2fs_compress_pages(struct compress_ctx *cc)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
+	const struct f2fs_compress_ops *cops =
+				f2fs_cops[fi->i_compress_algorithm];
+	unsigned int max_len, nr_cpages;
+	int i, ret;
+
+	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
+				cc->cluster_size, fi->i_compress_algorithm);
+
+	ret = cops->init_compress_ctx(cc);
+	if (ret)
+		goto out;
+
+	max_len = COMPRESS_HEADER_SIZE + cc->clen;
+	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+
+	cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
+					cc->nr_cpages, GFP_NOFS);
+	if (!cc->cpages) {
+		ret = -ENOMEM;
+		goto destroy_compress_ctx;
+	}
+
+	for (i = 0; i < cc->nr_cpages; i++) {
+		cc->cpages[i] = f2fs_grab_page();
+		if (!cc->cpages[i]) {
+			ret = -ENOMEM;
+			goto out_free_cpages;
+		}
+	}
+
+	cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
+	if (!cc->rbuf) {
+		ret = -ENOMEM;
+		goto out_free_cpages;
+	}
+
+	cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
+	if (!cc->cbuf) {
+		ret = -ENOMEM;
+		goto out_vunmap_rbuf;
+	}
+
+	ret = cops->compress_pages(cc);
+	if (ret)
+		goto out_vunmap_cbuf;
+
+	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
+
+	if (cc->clen > max_len) {
+		ret = -EAGAIN;
+		goto out_vunmap_cbuf;
+	}
+
+	cc->cbuf->clen = cpu_to_le32(cc->clen);
+	cc->cbuf->chksum = cpu_to_le32(0);
+
+	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
+		cc->cbuf->reserved[i] = cpu_to_le32(0);
+
+	vunmap(cc->cbuf);
+	vunmap(cc->rbuf);
+
+	nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+
+	for (i = nr_cpages; i < cc->nr_cpages; i++) {
+		f2fs_put_compressed_page(cc->cpages[i]);
+		cc->cpages[i] = NULL;
+	}
+
+	cc->nr_cpages = nr_cpages;
+
+	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
+							cc->clen, ret);
+	return 0;
+
+out_vunmap_cbuf:
+	vunmap(cc->cbuf);
+out_vunmap_rbuf:
+	vunmap(cc->rbuf);
+out_free_cpages:
+	for (i = 0; i < cc->nr_cpages; i++) {
+		if (cc->cpages[i])
+			f2fs_put_compressed_page(cc->cpages[i]);
+	}
+	kfree(cc->cpages);
+	cc->cpages = NULL;
+destroy_compress_ctx:
+	cops->destroy_compress_ctx(cc);
+out:
+	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
+							cc->clen, ret);
+	return ret;
+}
+
+void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
+{
+	struct decompress_io_ctx *dic =
+			(struct decompress_io_ctx *)page_private(page);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
+	const struct f2fs_compress_ops *cops =
+			f2fs_cops[fi->i_compress_algorithm];
+	int ret;
+
+	dec_page_count(sbi, F2FS_RD_DATA);
+
+	if (bio->bi_status || PageError(page))
+		dic->failed = true;
+
+	if (refcount_dec_not_one(&dic->ref))
+		return;
+
+	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
+				dic->cluster_size, fi->i_compress_algorithm);
+
+	/* submit partial compressed pages */
+	if (dic->failed) {
+		ret = -EIO;
+		goto out_free_dic;
+	}
+
+	dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
+	if (!dic->rbuf) {
+		ret = -ENOMEM;
+		goto out_free_dic;
+	}
+
+	dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
+	if (!dic->cbuf) {
+		ret = -ENOMEM;
+		goto out_vunmap_rbuf;
+	}
+
+	dic->clen = le32_to_cpu(dic->cbuf->clen);
+	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
+
+	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
+		ret = -EFSCORRUPTED;
+		goto out_vunmap_cbuf;
+	}
+
+	ret = cops->decompress_pages(dic);
+
+out_vunmap_cbuf:
+	vunmap(dic->cbuf);
+out_vunmap_rbuf:
+	vunmap(dic->rbuf);
+out_free_dic:
+	if (!verity)
+		f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
+								ret, false);
+
+	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
+							dic->clen, ret);
+	if (!verity)
+		f2fs_free_dic(dic);
+}
+
+static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
+{
+	if (cc->cluster_idx == NULL_CLUSTER)
+		return true;
+	return cc->cluster_idx == cluster_idx(cc, index);
+}
+
+bool f2fs_cluster_is_empty(struct compress_ctx *cc)
+{
+	return cc->nr_rpages == 0;
+}
+
+static bool f2fs_cluster_is_full(struct compress_ctx *cc)
+{
+	return cc->cluster_size == cc->nr_rpages;
+}
+
+bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
+{
+	if (f2fs_cluster_is_empty(cc))
+		return true;
+	return is_page_in_cluster(cc, index);
+}
+
+static bool __cluster_may_compress(struct compress_ctx *cc)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+	loff_t i_size = i_size_read(cc->inode);
+	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
+	int i;
+
+	for (i = 0; i < cc->cluster_size; i++) {
+		struct page *page = cc->rpages[i];
+
+		f2fs_bug_on(sbi, !page);
+
+		if (unlikely(f2fs_cp_error(sbi)))
+			return false;
+		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+			return false;
+
+		/* beyond EOF */
+		if (page->index >= nr_pages)
+			return false;
+	}
+	return true;
+}
+
+/* return # of compressed block addresses */
+static int f2fs_compressed_blocks(struct compress_ctx *cc)
+{
+	struct dnode_of_data dn;
+	int ret;
+
+	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
+	ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
+							LOOKUP_NODE);
+	if (ret) {
+		if (ret == -ENOENT)
+			ret = 0;
+		goto fail;
+	}
+
+	if (dn.data_blkaddr == COMPRESS_ADDR) {
+		int i;
+
+		ret = 1;
+		for (i = 1; i < cc->cluster_size; i++) {
+			block_t blkaddr;
+
+			blkaddr = datablock_addr(dn.inode,
+					dn.node_page, dn.ofs_in_node + i);
+			if (blkaddr != NULL_ADDR)
+				ret++;
+		}
+	}
+fail:
+	f2fs_put_dnode(&dn);
+	return ret;
+}
+
+int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
+{
+	struct compress_ctx cc = {
+		.inode = inode,
+		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+		.cluster_size = F2FS_I(inode)->i_cluster_size,
+		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
+	};
+
+	return f2fs_compressed_blocks(&cc);
+}
+
+static bool cluster_may_compress(struct compress_ctx *cc)
+{
+	if (!f2fs_compressed_file(cc->inode))
+		return false;
+	if (f2fs_is_atomic_file(cc->inode))
+		return false;
+	if (f2fs_is_mmap_file(cc->inode))
+		return false;
+	if (!f2fs_cluster_is_full(cc))
+		return false;
+	return __cluster_may_compress(cc);
+}
+
+static void set_cluster_writeback(struct compress_ctx *cc)
+{
+	int i;
+
+	for (i = 0; i < cc->cluster_size; i++) {
+		if (cc->rpages[i])
+			set_page_writeback(cc->rpages[i]);
+	}
+}
+
+static void set_cluster_dirty(struct compress_ctx *cc)
+{
+	int i;
+
+	for (i = 0; i < cc->cluster_size; i++)
+		if (cc->rpages[i])
+			set_page_dirty(cc->rpages[i]);
+}
+
+static int prepare_compress_overwrite(struct compress_ctx *cc,
+		struct page **pagep, pgoff_t index, void **fsdata)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+	struct address_space *mapping = cc->inode->i_mapping;
+	struct page *page;
+	struct dnode_of_data dn;
+	sector_t last_block_in_bio;
+	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
+	pgoff_t start_idx = start_idx_of_cluster(cc);
+	int i, ret;
+	bool prealloc;
+
+retry:
+	ret = f2fs_compressed_blocks(cc);
+	if (ret <= 0)
+		return ret;
+
+	/* compressed case */
+	prealloc = (ret < cc->cluster_size);
+
+	ret = f2fs_init_compress_ctx(cc);
+	if (ret)
+		return ret;
+
+	/* keep page reference to avoid page reclaim */
+	for (i = 0; i < cc->cluster_size; i++) {
+		page = f2fs_pagecache_get_page(mapping, start_idx + i,
+							fgp_flag, GFP_NOFS);
+		if (!page) {
+			ret = -ENOMEM;
+			goto unlock_pages;
+		}
+
+		if (PageUptodate(page))
+			unlock_page(page);
+		else
+			f2fs_compress_ctx_add_page(cc, page);
+	}
+
+	if (!f2fs_cluster_is_empty(cc)) {
+		struct bio *bio = NULL;
+
+		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
+						&last_block_in_bio, false);
+		f2fs_destroy_compress_ctx(cc);
+		if (ret)
+			goto release_pages;
+		if (bio)
+			f2fs_submit_bio(sbi, bio, DATA);
+
+		ret = f2fs_init_compress_ctx(cc);
+		if (ret)
+			goto release_pages;
+	}
+
+	for (i = 0; i < cc->cluster_size; i++) {
+		f2fs_bug_on(sbi, cc->rpages[i]);
+
+		page = find_lock_page(mapping, start_idx + i);
+		f2fs_bug_on(sbi, !page);
+
+		f2fs_wait_on_page_writeback(page, DATA, true, true);
+
+		f2fs_compress_ctx_add_page(cc, page);
+		f2fs_put_page(page, 0);
+
+		if (!PageUptodate(page)) {
+			f2fs_unlock_rpages(cc, i + 1);
+			f2fs_put_rpages_mapping(cc, mapping, start_idx,
+					cc->cluster_size);
+			f2fs_destroy_compress_ctx(cc);
+			goto retry;
+		}
+	}
+
+	if (prealloc) {
+		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+
+		set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
+
+		for (i = cc->cluster_size - 1; i > 0; i--) {
+			ret = f2fs_get_block(&dn, start_idx + i);
+			if (ret) {
+				i = cc->cluster_size;
+				break;
+			}
+
+			if (dn.data_blkaddr != NEW_ADDR)
+				break;
+		}
+
+		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+	}
+
+	if (likely(!ret)) {
+		*fsdata = cc->rpages;
+		*pagep = cc->rpages[offset_in_cluster(cc, index)];
+		return cc->cluster_size;
+	}
+
+unlock_pages:
+	f2fs_unlock_rpages(cc, i);
+release_pages:
+	f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
+	f2fs_destroy_compress_ctx(cc);
+	return ret;
+}
+
+int f2fs_prepare_compress_overwrite(struct inode *inode,
+		struct page **pagep, pgoff_t index, void **fsdata)
+{
+	struct compress_ctx cc = {
+		.inode = inode,
+		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+		.cluster_size = F2FS_I(inode)->i_cluster_size,
+		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
+		.rpages = NULL,
+		.nr_rpages = 0,
+	};
+
+	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
+}
+
+bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+					pgoff_t index, unsigned copied)
+
+{
+	struct compress_ctx cc = {
+		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+		.cluster_size = F2FS_I(inode)->i_cluster_size,
+		.rpages = fsdata,
+	};
+	bool first_index = (index == cc.rpages[0]->index);
+
+	if (copied)
+		set_cluster_dirty(&cc);
+
+	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
+	f2fs_destroy_compress_ctx(&cc);
+
+	return first_index;
+}
+
+static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+					int *submitted,
+					struct writeback_control *wbc,
+					enum iostat_type io_type)
+{
+	struct inode *inode = cc->inode;
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+	struct f2fs_io_info fio = {
+		.sbi = sbi,
+		.ino = cc->inode->i_ino,
+		.type = DATA,
+		.op = REQ_OP_WRITE,
+		.op_flags = wbc_to_write_flags(wbc),
+		.old_blkaddr = NEW_ADDR,
+		.page = NULL,
+		.encrypted_page = NULL,
+		.compressed_page = NULL,
+		.submitted = false,
+		.need_lock = LOCK_RETRY,
+		.io_type = io_type,
+		.io_wbc = wbc,
+		.encrypted = f2fs_encrypted_file(cc->inode),
+	};
+	struct dnode_of_data dn;
+	struct node_info ni;
+	struct compress_io_ctx *cic;
+	pgoff_t start_idx = start_idx_of_cluster(cc);
+	unsigned int last_index = cc->cluster_size - 1;
+	loff_t psize;
+	int i, err;
+
+	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
+
+	f2fs_lock_op(sbi);
+
+	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+	if (err)
+		goto out_unlock_op;
+
+	for (i = 0; i < cc->cluster_size; i++) {
+		if (datablock_addr(dn.inode, dn.node_page,
+					dn.ofs_in_node + i) == NULL_ADDR)
+			goto out_put_dnode;
+	}
+
+	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
+
+	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+	if (err)
+		goto out_put_dnode;
+
+	fio.version = ni.version;
+
+	cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
+	if (!cic)
+		goto out_put_dnode;
+
+	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
+	cic->inode = inode;
+	refcount_set(&cic->ref, 1);
+	cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+			cc->log_cluster_size, GFP_NOFS);
+	if (!cic->rpages)
+		goto out_put_cic;
+
+	cic->nr_rpages = cc->cluster_size;
+
+	for (i = 0; i < cc->nr_cpages; i++) {
+		f2fs_set_compressed_page(cc->cpages[i], inode,
+					cc->rpages[i + 1]->index,
+					cic, i ? &cic->ref : NULL);
+		fio.compressed_page = cc->cpages[i];
+		if (fio.encrypted) {
+			fio.page = cc->rpages[i + 1];
+			err = f2fs_encrypt_one_page(&fio);
+			if (err)
+				goto out_destroy_crypt;
+			cc->cpages[i] = fio.encrypted_page;
+		}
+	}
+
+	set_cluster_writeback(cc);
+
+	for (i = 0; i < cc->cluster_size; i++)
+		cic->rpages[i] = cc->rpages[i];
+
+	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
+		block_t blkaddr;
+
+		blkaddr = datablock_addr(dn.inode, dn.node_page,
+							dn.ofs_in_node);
+		fio.page = cic->rpages[i];
+		fio.old_blkaddr = blkaddr;
+
+		/* cluster header */
+		if (i == 0) {
+			if (blkaddr == COMPRESS_ADDR)
+				fio.compr_blocks++;
+			if (__is_valid_data_blkaddr(blkaddr))
+				f2fs_invalidate_blocks(sbi, blkaddr);
+			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
+			goto unlock_continue;
+		}
+
+		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
+			fio.compr_blocks++;
+
+		if (i > cc->nr_cpages) {
+			if (__is_valid_data_blkaddr(blkaddr)) {
+				f2fs_invalidate_blocks(sbi, blkaddr);
+				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
+			}
+			goto unlock_continue;
+		}
+
+		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
+
+		if (fio.encrypted)
+			fio.encrypted_page = cc->cpages[i - 1];
+		else
+			fio.compressed_page = cc->cpages[i - 1];
+
+		cc->cpages[i - 1] = NULL;
+		f2fs_outplace_write_data(&dn, &fio);
+		(*submitted)++;
+unlock_continue:
+		inode_dec_dirty_pages(cc->inode);
+		unlock_page(fio.page);
+	}
+
+	if (fio.compr_blocks)
+		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
+	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
+
+	set_inode_flag(cc->inode, FI_APPEND_WRITE);
+	if (cc->cluster_idx == 0)
+		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+
+	f2fs_put_dnode(&dn);
+	f2fs_unlock_op(sbi);
+
+	down_write(&fi->i_sem);
+	if (fi->last_disk_size < psize)
+		fi->last_disk_size = psize;
+	up_write(&fi->i_sem);
+
+	f2fs_put_rpages(cc);
+	f2fs_destroy_compress_ctx(cc);
+	return 0;
+
+out_destroy_crypt:
+	kfree(cic->rpages);
+
+	for (--i; i >= 0; i--)
+		fscrypt_finalize_bounce_page(&cc->cpages[i]);
+	for (i = 0; i < cc->nr_cpages; i++) {
+		if (!cc->cpages[i])
+			continue;
+		f2fs_put_page(cc->cpages[i], 1);
+	}
+out_put_cic:
+	kfree(cic);
+out_put_dnode:
+	f2fs_put_dnode(&dn);
+out_unlock_op:
+	f2fs_unlock_op(sbi);
+	return -EAGAIN;
+}
+
+void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+{
+	struct f2fs_sb_info *sbi = bio->bi_private;
+	struct compress_io_ctx *cic =
+			(struct compress_io_ctx *)page_private(page);
+	int i;
+
+	if (unlikely(bio->bi_status))
+		mapping_set_error(cic->inode->i_mapping, -EIO);
+
+	f2fs_put_compressed_page(page);
+
+	dec_page_count(sbi, F2FS_WB_DATA);
+
+	if (refcount_dec_not_one(&cic->ref))
+		return;
+
+	for (i = 0; i < cic->nr_rpages; i++) {
+		WARN_ON(!cic->rpages[i]);
+		clear_cold_data(cic->rpages[i]);
+		end_page_writeback(cic->rpages[i]);
+	}
+
+	kfree(cic->rpages);
+	kfree(cic);
+}
+
+static int f2fs_write_raw_pages(struct compress_ctx *cc,
+					int *submitted,
+					struct writeback_control *wbc,
+					enum iostat_type io_type)
+{
+	struct address_space *mapping = cc->inode->i_mapping;
+	int _submitted, compr_blocks, ret;
+	int i = -1, err = 0;
+
+	compr_blocks = f2fs_compressed_blocks(cc);
+	if (compr_blocks < 0) {
+		err = compr_blocks;
+		goto out_err;
+	}
+
+	for (i = 0; i < cc->cluster_size; i++) {
+		if (!cc->rpages[i])
+			continue;
+retry_write:
+		if (cc->rpages[i]->mapping != mapping) {
+			unlock_page(cc->rpages[i]);
+			continue;
+		}
+
+		BUG_ON(!PageLocked(cc->rpages[i]));
+
+		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
+						NULL, NULL, wbc, io_type,
+						compr_blocks);
+		if (ret) {
+			if (ret == AOP_WRITEPAGE_ACTIVATE) {
+				unlock_page(cc->rpages[i]);
+				ret = 0;
+			} else if (ret == -EAGAIN) {
+				ret = 0;
+				cond_resched();
+				congestion_wait(BLK_RW_ASYNC, HZ/50);
+				lock_page(cc->rpages[i]);
+				clear_page_dirty_for_io(cc->rpages[i]);
+				goto retry_write;
+			}
+			err = ret;
+			goto out_fail;
+		}
+
+		*submitted += _submitted;
+	}
+	return 0;
+
+out_fail:
+	/* TODO: revoke partially updated block addresses */
+	BUG_ON(compr_blocks);
+out_err:
+	for (++i; i < cc->cluster_size; i++) {
+		if (!cc->rpages[i])
+			continue;
+		redirty_page_for_writepage(wbc, cc->rpages[i]);
+		unlock_page(cc->rpages[i]);
+	}
+	return err;
+}
+
+int f2fs_write_multi_pages(struct compress_ctx *cc,
+					int *submitted,
+					struct writeback_control *wbc,
+					enum iostat_type io_type)
+{
+	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
+	const struct f2fs_compress_ops *cops =
+			f2fs_cops[fi->i_compress_algorithm];
+	int err;
+
+	*submitted = 0;
+	if (cluster_may_compress(cc)) {
+		err = f2fs_compress_pages(cc);
+		if (err == -EAGAIN) {
+			goto write;
+		} else if (err) {
+			f2fs_put_rpages_wbc(cc, wbc, true, 1);
+			goto destroy_out;
+		}
+
+		err = f2fs_write_compressed_pages(cc, submitted,
+							wbc, io_type);
+		cops->destroy_compress_ctx(cc);
+		if (!err)
+			return 0;
+		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
+	}
+write:
+	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
+
+	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
+	f2fs_put_rpages_wbc(cc, wbc, false, 0);
+destroy_out:
+	f2fs_destroy_compress_ctx(cc);
+	return err;
+}
+
+struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+	struct decompress_io_ctx *dic;
+	pgoff_t start_idx = start_idx_of_cluster(cc);
+	int i;
+
+	dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
+	if (!dic)
+		return ERR_PTR(-ENOMEM);
+
+	dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+			cc->log_cluster_size, GFP_NOFS);
+	if (!dic->rpages) {
+		kfree(dic);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
+	dic->inode = cc->inode;
+	refcount_set(&dic->ref, 1);
+	dic->cluster_idx = cc->cluster_idx;
+	dic->cluster_size = cc->cluster_size;
+	dic->log_cluster_size = cc->log_cluster_size;
+	dic->nr_cpages = cc->nr_cpages;
+	dic->failed = false;
+
+	for (i = 0; i < dic->cluster_size; i++)
+		dic->rpages[i] = cc->rpages[i];
+	dic->nr_rpages = cc->cluster_size;
+
+	dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
+					dic->nr_cpages, GFP_NOFS);
+	if (!dic->cpages)
+		goto out_free;
+
+	for (i = 0; i < dic->nr_cpages; i++) {
+		struct page *page;
+
+		page = f2fs_grab_page();
+		if (!page)
+			goto out_free;
+
+		f2fs_set_compressed_page(page, cc->inode,
+					start_idx + i + 1,
+					dic, i ? &dic->ref : NULL);
+		dic->cpages[i] = page;
+	}
+
+	dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
+					dic->cluster_size, GFP_NOFS);
+	if (!dic->tpages)
+		goto out_free;
+
+	for (i = 0; i < dic->cluster_size; i++) {
+		if (cc->rpages[i])
+			continue;
+
+		dic->tpages[i] = f2fs_grab_page();
+		if (!dic->tpages[i])
+			goto out_free;
+	}
+
+	for (i = 0; i < dic->cluster_size; i++) {
+		if (dic->tpages[i])
+			continue;
+		dic->tpages[i] = cc->rpages[i];
+	}
+
+	return dic;
+
+out_free:
+	f2fs_free_dic(dic);
+	return ERR_PTR(-ENOMEM);
+}
+
+void f2fs_free_dic(struct decompress_io_ctx *dic)
+{
+	int i;
+
+	if (dic->tpages) {
+		for (i = 0; i < dic->cluster_size; i++) {
+			if (dic->rpages[i])
+				continue;
+			f2fs_put_page(dic->tpages[i], 1);
+		}
+		kfree(dic->tpages);
+	}
+
+	if (dic->cpages) {
+		for (i = 0; i < dic->nr_cpages; i++) {
+			if (!dic->cpages[i])
+				continue;
+			f2fs_put_compressed_page(dic->cpages[i]);
+		}
+		kfree(dic->cpages);
+	}
+
+	kfree(dic->rpages);
+	kfree(dic);
+}
+
+void f2fs_decompress_end_io(struct page **rpages,
+			unsigned int cluster_size, bool err, bool verity)
+{
+	int i;
+
+	for (i = 0; i < cluster_size; i++) {
+		struct page *rpage = rpages[i];
+
+		if (!rpage)
+			continue;
+
+		if (err || PageError(rpage)) {
+			ClearPageUptodate(rpage);
+			ClearPageError(rpage);
+		} else {
+			if (!verity || fsverity_verify_page(rpage))
+				SetPageUptodate(rpage);
+			else
+				SetPageError(rpage);
+		}
+		unlock_page(rpage);
+	}
+}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 0fa356e..b27b721 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -31,6 +31,47 @@
 static struct kmem_cache *bio_post_read_ctx_cache;
 static struct kmem_cache *bio_entry_slab;
 static mempool_t *bio_post_read_ctx_pool;
+static struct bio_set f2fs_bioset;
+
+#define	F2FS_BIO_POOL_SIZE	NR_CURSEG_TYPE
+
+int __init f2fs_init_bioset(void)
+{
+	if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
+					0, BIOSET_NEED_BVECS))
+		return -ENOMEM;
+	return 0;
+}
+
+void f2fs_destroy_bioset(void)
+{
+	bioset_exit(&f2fs_bioset);
+}
+
+static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
+						unsigned int nr_iovecs)
+{
+	return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
+}
+
+struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail)
+{
+	struct bio *bio;
+
+	if (no_fail) {
+		/* No failure on bio allocation */
+		bio = __f2fs_bio_alloc(GFP_NOIO, npages);
+		if (!bio)
+			bio = __f2fs_bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
+		return bio;
+	}
+	if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
+		f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
+		return NULL;
+	}
+
+	return __f2fs_bio_alloc(GFP_KERNEL, npages);
+}
 
 static bool __is_cp_guaranteed(struct page *page)
 {
@@ -41,6 +82,9 @@ static bool __is_cp_guaranteed(struct page *page)
 	if (!mapping)
 		return false;
 
+	if (f2fs_is_compressed_page(page))
+		return false;
+
 	inode = mapping->host;
 	sbi = F2FS_I_SB(inode);
 
@@ -73,19 +117,19 @@ static enum count_type __read_io_type(struct page *page)
 
 /* postprocessing steps for read bios */
 enum bio_post_read_step {
-	STEP_INITIAL = 0,
 	STEP_DECRYPT,
+	STEP_DECOMPRESS,
 	STEP_VERITY,
 };
 
 struct bio_post_read_ctx {
 	struct bio *bio;
+	struct f2fs_sb_info *sbi;
 	struct work_struct work;
-	unsigned int cur_step;
 	unsigned int enabled_steps;
 };
 
-static void __read_end_io(struct bio *bio)
+static void __read_end_io(struct bio *bio, bool compr, bool verity)
 {
 	struct page *page;
 	struct bio_vec *bv;
@@ -94,6 +138,13 @@ static void __read_end_io(struct bio *bio)
 	bio_for_each_segment_all(bv, bio, iter_all) {
 		page = bv->bv_page;
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+		if (compr && f2fs_is_compressed_page(page)) {
+			f2fs_decompress_pages(bio, page, verity);
+			continue;
+		}
+#endif
+
 		/* PG_error was set if any post_read step failed */
 		if (bio->bi_status || PageError(page)) {
 			ClearPageUptodate(page);
@@ -105,31 +156,107 @@ static void __read_end_io(struct bio *bio)
 		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
 		unlock_page(page);
 	}
-	if (bio->bi_private)
-		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
-	bio_put(bio);
+}
+
+static void f2fs_release_read_bio(struct bio *bio);
+static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
+{
+	if (!compr)
+		__read_end_io(bio, false, verity);
+	f2fs_release_read_bio(bio);
+}
+
+static void f2fs_decompress_bio(struct bio *bio, bool verity)
+{
+	__read_end_io(bio, true, verity);
 }
 
 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
 
-static void decrypt_work(struct work_struct *work)
+static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
 {
-	struct bio_post_read_ctx *ctx =
-		container_of(work, struct bio_post_read_ctx, work);
-
 	fscrypt_decrypt_bio(ctx->bio);
-
-	bio_post_read_processing(ctx);
 }
 
-static void verity_work(struct work_struct *work)
+static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
+{
+	f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
+}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
+{
+	f2fs_decompress_end_io(rpages, cluster_size, false, true);
+}
+
+static void f2fs_verify_bio(struct bio *bio)
+{
+	struct page *page = bio_first_page_all(bio);
+	struct decompress_io_ctx *dic =
+			(struct decompress_io_ctx *)page_private(page);
+
+	f2fs_verify_pages(dic->rpages, dic->cluster_size);
+	f2fs_free_dic(dic);
+}
+#endif
+
+static void f2fs_verity_work(struct work_struct *work)
+{
+	struct bio_post_read_ctx *ctx =
+		container_of(work, struct bio_post_read_ctx, work);
+	struct bio *bio = ctx->bio;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	unsigned int enabled_steps = ctx->enabled_steps;
+#endif
+
+	/*
+	 * fsverity_verify_bio() may call readpages() again, and while verity
+	 * will be disabled for this, decryption may still be needed, resulting
+	 * in another bio_post_read_ctx being allocated.  So to prevent
+	 * deadlocks we need to release the current ctx to the mempool first.
+	 * This assumes that verity is the last post-read step.
+	 */
+	mempool_free(ctx, bio_post_read_ctx_pool);
+	bio->bi_private = NULL;
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	/* previous step is decompression */
+	if (enabled_steps & (1 << STEP_DECOMPRESS)) {
+		f2fs_verify_bio(bio);
+		f2fs_release_read_bio(bio);
+		return;
+	}
+#endif
+
+	fsverity_verify_bio(bio);
+	__f2fs_read_end_io(bio, false, false);
+}
+
+static void f2fs_post_read_work(struct work_struct *work)
 {
 	struct bio_post_read_ctx *ctx =
 		container_of(work, struct bio_post_read_ctx, work);
 
-	fsverity_verify_bio(ctx->bio);
+	if (ctx->enabled_steps & (1 << STEP_DECRYPT))
+		f2fs_decrypt_work(ctx);
 
-	bio_post_read_processing(ctx);
+	if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
+		f2fs_decompress_work(ctx);
+
+	if (ctx->enabled_steps & (1 << STEP_VERITY)) {
+		INIT_WORK(&ctx->work, f2fs_verity_work);
+		fsverity_enqueue_verify_work(&ctx->work);
+		return;
+	}
+
+	__f2fs_read_end_io(ctx->bio,
+		ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
+}
+
+static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
+						struct work_struct *work)
+{
+	queue_work(sbi->post_read_wq, work);
 }
 
 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
@@ -139,31 +266,26 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
 	 * verity may require reading metadata pages that need decryption, and
 	 * we shouldn't recurse to the same workqueue.
 	 */
-	switch (++ctx->cur_step) {
-	case STEP_DECRYPT:
-		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
-			INIT_WORK(&ctx->work, decrypt_work);
-			fscrypt_enqueue_decrypt_work(&ctx->work);
-			return;
-		}
-		ctx->cur_step++;
-		/* fall-through */
-	case STEP_VERITY:
-		if (ctx->enabled_steps & (1 << STEP_VERITY)) {
-			INIT_WORK(&ctx->work, verity_work);
-			fsverity_enqueue_verify_work(&ctx->work);
-			return;
-		}
-		ctx->cur_step++;
-		/* fall-through */
-	default:
-		__read_end_io(ctx->bio);
+
+	if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
+		ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
+		INIT_WORK(&ctx->work, f2fs_post_read_work);
+		f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
+		return;
 	}
+
+	if (ctx->enabled_steps & (1 << STEP_VERITY)) {
+		INIT_WORK(&ctx->work, f2fs_verity_work);
+		fsverity_enqueue_verify_work(&ctx->work);
+		return;
+	}
+
+	__f2fs_read_end_io(ctx->bio, false, false);
 }
 
 static bool f2fs_bio_post_read_required(struct bio *bio)
 {
-	return bio->bi_private && !bio->bi_status;
+	return bio->bi_private;
 }
 
 static void f2fs_read_end_io(struct bio *bio)
@@ -178,12 +300,11 @@ static void f2fs_read_end_io(struct bio *bio)
 	if (f2fs_bio_post_read_required(bio)) {
 		struct bio_post_read_ctx *ctx = bio->bi_private;
 
-		ctx->cur_step = STEP_INITIAL;
 		bio_post_read_processing(ctx);
 		return;
 	}
 
-	__read_end_io(bio);
+	__f2fs_read_end_io(bio, false, false);
 }
 
 static void f2fs_write_end_io(struct bio *bio)
@@ -214,6 +335,13 @@ static void f2fs_write_end_io(struct bio *bio)
 
 		fscrypt_finalize_bounce_page(&page);
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+		if (f2fs_is_compressed_page(page)) {
+			f2fs_compress_write_end_io(bio, page);
+			continue;
+		}
+#endif
+
 		if (unlikely(bio->bi_status)) {
 			mapping_set_error(page->mapping, -EIO);
 			if (type == F2FS_WB_CP_DATA)
@@ -358,6 +486,12 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
 	submit_bio(bio);
 }
 
+void f2fs_submit_bio(struct f2fs_sb_info *sbi,
+				struct bio *bio, enum page_type type)
+{
+	__submit_bio(sbi, bio, type);
+}
+
 static void __submit_merged_bio(struct f2fs_bio_info *io)
 {
 	struct f2fs_io_info *fio = &io->fio;
@@ -380,7 +514,6 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
 						struct page *page, nid_t ino)
 {
 	struct bio_vec *bvec;
-	struct page *target;
 	struct bvec_iter_all iter_all;
 
 	if (!bio)
@@ -390,10 +523,18 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
 		return true;
 
 	bio_for_each_segment_all(bvec, bio, iter_all) {
+		struct page *target = bvec->bv_page;
 
-		target = bvec->bv_page;
-		if (fscrypt_is_bounce_page(target))
+		if (fscrypt_is_bounce_page(target)) {
 			target = fscrypt_pagecache_page(target);
+			if (IS_ERR(target))
+				continue;
+		}
+		if (f2fs_is_compressed_page(target)) {
+			target = f2fs_compress_control_page(target);
+			if (IS_ERR(target))
+				continue;
+		}
 
 		if (inode && inode == target->mapping->host)
 			return true;
@@ -588,7 +729,8 @@ static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio,
 
 			found = true;
 
-			if (bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
+			if (bio_add_page(*bio, page, PAGE_SIZE, 0) ==
+							PAGE_SIZE) {
 				ret = 0;
 				break;
 			}
@@ -728,7 +870,12 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
 
 	verify_fio_blkaddr(fio);
 
-	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+	if (fio->encrypted_page)
+		bio_page = fio->encrypted_page;
+	else if (fio->compressed_page)
+		bio_page = fio->compressed_page;
+	else
+		bio_page = fio->page;
 
 	/* set submitted = true as a return value */
 	fio->submitted = true;
@@ -797,17 +944,16 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
 
 	if (f2fs_encrypted_file(inode))
 		post_read_steps |= 1 << STEP_DECRYPT;
-
+	if (f2fs_compressed_file(inode))
+		post_read_steps |= 1 << STEP_DECOMPRESS;
 	if (f2fs_need_verity(inode, first_idx))
 		post_read_steps |= 1 << STEP_VERITY;
 
 	if (post_read_steps) {
+		/* Due to the mempool, this never fails. */
 		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
-		if (!ctx) {
-			bio_put(bio);
-			return ERR_PTR(-ENOMEM);
-		}
 		ctx->bio = bio;
+		ctx->sbi = sbi;
 		ctx->enabled_steps = post_read_steps;
 		bio->bi_private = ctx;
 	}
@@ -815,6 +961,13 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
 	return bio;
 }
 
+static void f2fs_release_read_bio(struct bio *bio)
+{
+	if (bio->bi_private)
+		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
+	bio_put(bio);
+}
+
 /* This can handle encryption stuffs */
 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
 							block_t blkaddr)
@@ -1180,19 +1333,6 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
 	int err = 0;
 	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
 
-	/* convert inline data for Direct I/O*/
-	if (direct_io) {
-		err = f2fs_convert_inline_inode(inode);
-		if (err)
-			return err;
-	}
-
-	if (direct_io && allow_outplace_dio(inode, iocb, from))
-		return 0;
-
-	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
-		return 0;
-
 	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
 	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
 	if (map.m_len > map.m_lblk)
@@ -1872,6 +2012,144 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
 	return ret;
 }
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+				unsigned nr_pages, sector_t *last_block_in_bio,
+				bool is_readahead)
+{
+	struct dnode_of_data dn;
+	struct inode *inode = cc->inode;
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct bio *bio = *bio_ret;
+	unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
+	sector_t last_block_in_file;
+	const unsigned blkbits = inode->i_blkbits;
+	const unsigned blocksize = 1 << blkbits;
+	struct decompress_io_ctx *dic = NULL;
+	int i;
+	int ret = 0;
+
+	f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
+
+	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+
+	/* get rid of pages beyond EOF */
+	for (i = 0; i < cc->cluster_size; i++) {
+		struct page *page = cc->rpages[i];
+
+		if (!page)
+			continue;
+		if ((sector_t)page->index >= last_block_in_file) {
+			zero_user_segment(page, 0, PAGE_SIZE);
+			if (!PageUptodate(page))
+				SetPageUptodate(page);
+		} else if (!PageUptodate(page)) {
+			continue;
+		}
+		unlock_page(page);
+		cc->rpages[i] = NULL;
+		cc->nr_rpages--;
+	}
+
+	/* we are done since all pages are beyond EOF */
+	if (f2fs_cluster_is_empty(cc))
+		goto out;
+
+	set_new_dnode(&dn, inode, NULL, NULL, 0);
+	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+	if (ret)
+		goto out;
+
+	/* cluster was overwritten as normal cluster */
+	if (dn.data_blkaddr != COMPRESS_ADDR)
+		goto out;
+
+	for (i = 1; i < cc->cluster_size; i++) {
+		block_t blkaddr;
+
+		blkaddr = datablock_addr(dn.inode, dn.node_page,
+						dn.ofs_in_node + i);
+
+		if (!__is_valid_data_blkaddr(blkaddr))
+			break;
+
+		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+			ret = -EFAULT;
+			goto out_put_dnode;
+		}
+		cc->nr_cpages++;
+	}
+
+	/* nothing to decompress */
+	if (cc->nr_cpages == 0) {
+		ret = 0;
+		goto out_put_dnode;
+	}
+
+	dic = f2fs_alloc_dic(cc);
+	if (IS_ERR(dic)) {
+		ret = PTR_ERR(dic);
+		goto out_put_dnode;
+	}
+
+	for (i = 0; i < dic->nr_cpages; i++) {
+		struct page *page = dic->cpages[i];
+		block_t blkaddr;
+
+		blkaddr = datablock_addr(dn.inode, dn.node_page,
+						dn.ofs_in_node + i + 1);
+
+		if (bio && !page_is_mergeable(sbi, bio,
+					*last_block_in_bio, blkaddr)) {
+submit_and_realloc:
+			__submit_bio(sbi, bio, DATA);
+			bio = NULL;
+		}
+
+		if (!bio) {
+			bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
+					is_readahead ? REQ_RAHEAD : 0,
+					page->index);
+			if (IS_ERR(bio)) {
+				ret = PTR_ERR(bio);
+				bio = NULL;
+				dic->failed = true;
+				if (refcount_sub_and_test(dic->nr_cpages - i,
+							&dic->ref))
+					f2fs_decompress_end_io(dic->rpages,
+							cc->cluster_size, true,
+							false);
+				f2fs_free_dic(dic);
+				f2fs_put_dnode(&dn);
+				*bio_ret = bio;
+				return ret;
+			}
+		}
+
+		f2fs_wait_on_block_writeback(inode, blkaddr);
+
+		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+			goto submit_and_realloc;
+
+		inc_page_count(sbi, F2FS_RD_DATA);
+		ClearPageError(page);
+		*last_block_in_bio = blkaddr;
+	}
+
+	f2fs_put_dnode(&dn);
+
+	*bio_ret = bio;
+	return 0;
+
+out_put_dnode:
+	f2fs_put_dnode(&dn);
+out:
+	f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
+	*bio_ret = bio;
+	return ret;
+}
+#endif
+
 /*
  * This function was originally taken from fs/mpage.c, and customized for f2fs.
  * Major change was from block_size == page_size in f2fs by default.
@@ -1889,6 +2167,19 @@ int f2fs_mpage_readpages(struct address_space *mapping,
 	sector_t last_block_in_bio = 0;
 	struct inode *inode = mapping->host;
 	struct f2fs_map_blocks map;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	struct compress_ctx cc = {
+		.inode = inode,
+		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+		.cluster_size = F2FS_I(inode)->i_cluster_size,
+		.cluster_idx = NULL_CLUSTER,
+		.rpages = NULL,
+		.cpages = NULL,
+		.nr_rpages = 0,
+		.nr_cpages = 0,
+	};
+#endif
+	unsigned max_nr_pages = nr_pages;
 	int ret = 0;
 
 	map.m_pblk = 0;
@@ -1912,9 +2203,41 @@ int f2fs_mpage_readpages(struct address_space *mapping,
 				goto next_page;
 		}
 
-		ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
-					&last_block_in_bio, is_readahead);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+		if (f2fs_compressed_file(inode)) {
+			/* there are remained comressed pages, submit them */
+			if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
+				ret = f2fs_read_multi_pages(&cc, &bio,
+							max_nr_pages,
+							&last_block_in_bio,
+							is_readahead);
+				f2fs_destroy_compress_ctx(&cc);
+				if (ret)
+					goto set_error_page;
+			}
+			ret = f2fs_is_compressed_cluster(inode, page->index);
+			if (ret < 0)
+				goto set_error_page;
+			else if (!ret)
+				goto read_single_page;
+
+			ret = f2fs_init_compress_ctx(&cc);
+			if (ret)
+				goto set_error_page;
+
+			f2fs_compress_ctx_add_page(&cc, page);
+
+			goto next_page;
+		}
+read_single_page:
+#endif
+
+		ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
+					&bio, &last_block_in_bio, is_readahead);
 		if (ret) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+set_error_page:
+#endif
 			SetPageError(page);
 			zero_user_segment(page, 0, PAGE_SIZE);
 			unlock_page(page);
@@ -1922,6 +2245,19 @@ int f2fs_mpage_readpages(struct address_space *mapping,
 next_page:
 		if (pages)
 			put_page(page);
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+		if (f2fs_compressed_file(inode)) {
+			/* last page */
+			if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
+				ret = f2fs_read_multi_pages(&cc, &bio,
+							max_nr_pages,
+							&last_block_in_bio,
+							is_readahead);
+				f2fs_destroy_compress_ctx(&cc);
+			}
+		}
+#endif
 	}
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
@@ -1936,6 +2272,11 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
 
 	trace_f2fs_readpage(page, DATA);
 
+	if (!f2fs_is_compress_backend_ready(inode)) {
+		unlock_page(page);
+		return -EOPNOTSUPP;
+	}
+
 	/* If the file has inline data, try to read it directly */
 	if (f2fs_has_inline_data(inode))
 		ret = f2fs_read_inline_data(inode, page);
@@ -1954,6 +2295,9 @@ static int f2fs_read_data_pages(struct file *file,
 
 	trace_f2fs_readpages(inode, page, nr_pages);
 
+	if (!f2fs_is_compress_backend_ready(inode))
+		return 0;
+
 	/* If the file has inline data, skip readpages */
 	if (f2fs_has_inline_data(inode))
 		return 0;
@@ -1961,22 +2305,23 @@ static int f2fs_read_data_pages(struct file *file,
 	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
 }
 
-static int encrypt_one_page(struct f2fs_io_info *fio)
+int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
 {
 	struct inode *inode = fio->page->mapping->host;
-	struct page *mpage;
+	struct page *mpage, *page;
 	gfp_t gfp_flags = GFP_NOFS;
 
 	if (!f2fs_encrypted_file(inode))
 		return 0;
 
+	page = fio->compressed_page ? fio->compressed_page : fio->page;
+
 	/* wait for GCed page writeback via META_MAPPING */
 	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
 
 retry_encrypt:
-	fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
-							       PAGE_SIZE, 0,
-							       gfp_flags);
+	fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
+					PAGE_SIZE, 0, gfp_flags);
 	if (IS_ERR(fio->encrypted_page)) {
 		/* flush pending IOs and wait for a while in the ENOMEM case */
 		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
@@ -2136,7 +2481,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 	if (ipu_force ||
 		(__is_valid_data_blkaddr(fio->old_blkaddr) &&
 					need_inplace_update(fio))) {
-		err = encrypt_one_page(fio);
+		err = f2fs_encrypt_one_page(fio);
 		if (err)
 			goto out_writepage;
 
@@ -2172,13 +2517,16 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 
 	fio->version = ni.version;
 
-	err = encrypt_one_page(fio);
+	err = f2fs_encrypt_one_page(fio);
 	if (err)
 		goto out_writepage;
 
 	set_page_writeback(page);
 	ClearPageError(page);
 
+	if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
+		f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
+
 	/* LFS mode write path */
 	f2fs_outplace_write_data(&dn, fio);
 	trace_f2fs_do_write_data_page(page, OPU);
@@ -2193,16 +2541,17 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 	return err;
 }
 
-static int __write_data_page(struct page *page, bool *submitted,
+int f2fs_write_single_data_page(struct page *page, int *submitted,
 				struct bio **bio,
 				sector_t *last_block,
 				struct writeback_control *wbc,
-				enum iostat_type io_type)
+				enum iostat_type io_type,
+				int compr_blocks)
 {
 	struct inode *inode = page->mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	loff_t i_size = i_size_read(inode);
-	const pgoff_t end_index = ((unsigned long long) i_size)
+	const pgoff_t end_index = ((unsigned long long)i_size)
 							>> PAGE_SHIFT;
 	loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
 	unsigned offset = 0;
@@ -2218,6 +2567,7 @@ static int __write_data_page(struct page *page, bool *submitted,
 		.page = page,
 		.encrypted_page = NULL,
 		.submitted = false,
+		.compr_blocks = compr_blocks,
 		.need_lock = LOCK_RETRY,
 		.io_type = io_type,
 		.io_wbc = wbc,
@@ -2242,7 +2592,9 @@ static int __write_data_page(struct page *page, bool *submitted,
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 		goto redirty_out;
 
-	if (page->index < end_index || f2fs_verity_in_progress(inode))
+	if (page->index < end_index ||
+			f2fs_verity_in_progress(inode) ||
+			compr_blocks)
 		goto write;
 
 	/*
@@ -2318,7 +2670,6 @@ static int __write_data_page(struct page *page, bool *submitted,
 		f2fs_remove_dirty_inode(inode);
 		submitted = NULL;
 	}
-
 	unlock_page(page);
 	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
 					!F2FS_I(inode)->cp_task)
@@ -2331,7 +2682,7 @@ static int __write_data_page(struct page *page, bool *submitted,
 	}
 
 	if (submitted)
-		*submitted = fio.submitted;
+		*submitted = fio.submitted ? 1 : 0;
 
 	return 0;
 
@@ -2352,7 +2703,23 @@ static int __write_data_page(struct page *page, bool *submitted,
 static int f2fs_write_data_page(struct page *page,
 					struct writeback_control *wbc)
 {
-	return __write_data_page(page, NULL, NULL, NULL, wbc, FS_DATA_IO);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	struct inode *inode = page->mapping->host;
+
+	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+		goto out;
+
+	if (f2fs_compressed_file(inode)) {
+		if (f2fs_is_compressed_cluster(inode, page->index)) {
+			redirty_page_for_writepage(wbc, page);
+			return AOP_WRITEPAGE_ACTIVATE;
+		}
+	}
+out:
+#endif
+
+	return f2fs_write_single_data_page(page, NULL, NULL, NULL,
+						wbc, FS_DATA_IO, 0);
 }
 
 /*
@@ -2365,11 +2732,27 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 					enum iostat_type io_type)
 {
 	int ret = 0;
-	int done = 0;
+	int done = 0, retry = 0;
 	struct pagevec pvec;
 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
 	struct bio *bio = NULL;
 	sector_t last_block;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	struct inode *inode = mapping->host;
+	struct compress_ctx cc = {
+		.inode = inode,
+		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+		.cluster_size = F2FS_I(inode)->i_cluster_size,
+		.cluster_idx = NULL_CLUSTER,
+		.rpages = NULL,
+		.nr_rpages = 0,
+		.cpages = NULL,
+		.rbuf = NULL,
+		.cbuf = NULL,
+		.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
+		.private = NULL,
+	};
+#endif
 	int nr_pages;
 	pgoff_t uninitialized_var(writeback_index);
 	pgoff_t index;
@@ -2379,6 +2762,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 	int range_whole = 0;
 	xa_mark_t tag;
 	int nwritten = 0;
+	int submitted = 0;
+	int i;
 
 	pagevec_init(&pvec);
 
@@ -2408,12 +2793,11 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 	else
 		tag = PAGECACHE_TAG_DIRTY;
 retry:
+	retry = 0;
 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
 		tag_pages_for_writeback(mapping, index, end);
 	done_index = index;
-	while (!done && (index <= end)) {
-		int i;
-
+	while (!done && !retry && (index <= end)) {
 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
 				tag);
 		if (nr_pages == 0)
@@ -2421,15 +2805,62 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
-			bool submitted = false;
+			bool need_readd;
+readd:
+			need_readd = false;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+			if (f2fs_compressed_file(inode)) {
+				ret = f2fs_init_compress_ctx(&cc);
+				if (ret) {
+					done = 1;
+					break;
+				}
 
+				if (!f2fs_cluster_can_merge_page(&cc,
+								page->index)) {
+					ret = f2fs_write_multi_pages(&cc,
+						&submitted, wbc, io_type);
+					if (!ret)
+						need_readd = true;
+					goto result;
+				}
+
+				if (unlikely(f2fs_cp_error(sbi)))
+					goto lock_page;
+
+				if (f2fs_cluster_is_empty(&cc)) {
+					void *fsdata = NULL;
+					struct page *pagep;
+					int ret2;
+
+					ret2 = f2fs_prepare_compress_overwrite(
+							inode, &pagep,
+							page->index, &fsdata);
+					if (ret2 < 0) {
+						ret = ret2;
+						done = 1;
+						break;
+					} else if (ret2 &&
+						!f2fs_compress_write_end(inode,
+								fsdata, page->index,
+								1)) {
+						retry = 1;
+						break;
+					}
+				} else {
+					goto lock_page;
+				}
+			}
+#endif
 			/* give a priority to WB_SYNC threads */
 			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
 					wbc->sync_mode == WB_SYNC_NONE) {
 				done = 1;
 				break;
 			}
-
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+lock_page:
+#endif
 			done_index = page->index;
 retry_write:
 			lock_page(page);
@@ -2456,45 +2887,71 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 			if (!clear_page_dirty_for_io(page))
 				goto continue_unlock;
 
-			ret = __write_data_page(page, &submitted, &bio,
-					&last_block, wbc, io_type);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+			if (f2fs_compressed_file(inode)) {
+				get_page(page);
+				f2fs_compress_ctx_add_page(&cc, page);
+				continue;
+			}
+#endif
+			ret = f2fs_write_single_data_page(page, &submitted,
+					&bio, &last_block, wbc, io_type, 0);
+			if (ret == AOP_WRITEPAGE_ACTIVATE)
+				unlock_page(page);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+result:
+#endif
+			nwritten += submitted;
+			wbc->nr_to_write -= submitted;
+
 			if (unlikely(ret)) {
 				/*
 				 * keep nr_to_write, since vfs uses this to
 				 * get # of written pages.
 				 */
 				if (ret == AOP_WRITEPAGE_ACTIVATE) {
-					unlock_page(page);
 					ret = 0;
-					continue;
+					goto next;
 				} else if (ret == -EAGAIN) {
 					ret = 0;
 					if (wbc->sync_mode == WB_SYNC_ALL) {
 						cond_resched();
 						congestion_wait(BLK_RW_ASYNC,
-									HZ/50);
+								HZ/50);
 						goto retry_write;
 					}
-					continue;
+					goto next;
 				}
 				done_index = page->index + 1;
 				done = 1;
 				break;
-			} else if (submitted) {
-				nwritten++;
 			}
 
-			if (--wbc->nr_to_write <= 0 &&
+			if (wbc->nr_to_write <= 0 &&
 					wbc->sync_mode == WB_SYNC_NONE) {
 				done = 1;
 				break;
 			}
+next:
+			if (need_readd)
+				goto readd;
 		}
 		pagevec_release(&pvec);
 		cond_resched();
 	}
-
-	if (!cycled && !done) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	/* flush remained pages in compress cluster */
+	if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
+		ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
+		nwritten += submitted;
+		wbc->nr_to_write -= submitted;
+		if (ret) {
+			done = 1;
+			retry = 0;
+		}
+	}
+#endif
+	if ((!cycled && !done) || retry) {
 		cycled = 1;
 		index = 0;
 		end = writeback_index - 1;
@@ -2518,6 +2975,8 @@ static inline bool __should_serialize_io(struct inode *inode,
 {
 	if (!S_ISREG(inode->i_mode))
 		return false;
+	if (f2fs_compressed_file(inode))
+		return true;
 	if (IS_NOQUOTA(inode))
 		return false;
 	/* to avoid deadlock in path of data flush */
@@ -2613,14 +3072,16 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
 	struct inode *inode = mapping->host;
 	loff_t i_size = i_size_read(inode);
 
+	if (IS_NOQUOTA(inode))
+		return;
+
 	/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
 	if (to > i_size && !f2fs_verity_in_progress(inode)) {
 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		down_write(&F2FS_I(inode)->i_mmap_sem);
 
 		truncate_pagecache(inode, i_size);
-		if (!IS_NOQUOTA(inode))
-			f2fs_truncate_blocks(inode, i_size, true);
+		f2fs_truncate_blocks(inode, i_size, true);
 
 		up_write(&F2FS_I(inode)->i_mmap_sem);
 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2660,6 +3121,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
 		__do_map_lock(sbi, flag, true);
 		locked = true;
 	}
+
 restart:
 	/* check inline_data */
 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
@@ -2750,6 +3212,24 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
 		if (err)
 			goto fail;
 	}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (f2fs_compressed_file(inode)) {
+		int ret;
+
+		*fsdata = NULL;
+
+		ret = f2fs_prepare_compress_overwrite(inode, pagep,
+							index, fsdata);
+		if (ret < 0) {
+			err = ret;
+			goto fail;
+		} else if (ret) {
+			return 0;
+		}
+	}
+#endif
+
 repeat:
 	/*
 	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
@@ -2762,6 +3242,8 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
 		goto fail;
 	}
 
+	/* TODO: cluster can be compressed due to race with .writepage */
+
 	*pagep = page;
 
 	err = prepare_write_begin(sbi, page, pos, len,
@@ -2845,6 +3327,16 @@ static int f2fs_write_end(struct file *file,
 		else
 			SetPageUptodate(page);
 	}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	/* overwrite compressed file */
+	if (f2fs_compressed_file(inode) && fsdata) {
+		f2fs_compress_write_end(inode, fsdata, page->index, copied);
+		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+		return copied;
+	}
+#endif
+
 	if (!copied)
 		goto unlock_out;
 
@@ -3145,7 +3637,8 @@ int f2fs_migrate_page(struct address_space *mapping,
 
 #ifdef CONFIG_SWAP
 /* Copied from generic_swapfile_activate() to check any holes */
-static int check_swap_activate(struct file *swap_file, unsigned int max)
+static int check_swap_activate(struct swap_info_struct *sis,
+				struct file *swap_file, sector_t *span)
 {
 	struct address_space *mapping = swap_file->f_mapping;
 	struct inode *inode = mapping->host;
@@ -3156,6 +3649,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
 	sector_t last_block;
 	sector_t lowest_block = -1;
 	sector_t highest_block = 0;
+	int nr_extents = 0;
+	int ret;
 
 	blkbits = inode->i_blkbits;
 	blocks_per_page = PAGE_SIZE >> blkbits;
@@ -3167,15 +3662,20 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
 	probe_block = 0;
 	page_no = 0;
 	last_block = i_size_read(inode) >> blkbits;
-	while ((probe_block + blocks_per_page) <= last_block && page_no < max) {
+	while ((probe_block + blocks_per_page) <= last_block &&
+			page_no < sis->max) {
 		unsigned block_in_page;
 		sector_t first_block;
+		sector_t block = 0;
+		int	 err = 0;
 
 		cond_resched();
 
-		first_block = bmap(inode, probe_block);
-		if (first_block == 0)
+		block = probe_block;
+		err = bmap(inode, &block);
+		if (err || !block)
 			goto bad_bmap;
+		first_block = block;
 
 		/*
 		 * It must be PAGE_SIZE aligned on-disk
@@ -3187,11 +3687,13 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
 
 		for (block_in_page = 1; block_in_page < blocks_per_page;
 					block_in_page++) {
-			sector_t block;
 
-			block = bmap(inode, probe_block + block_in_page);
-			if (block == 0)
+			block = probe_block + block_in_page;
+			err = bmap(inode, &block);
+
+			if (err || !block)
 				goto bad_bmap;
+
 			if (block != first_block + block_in_page) {
 				/* Discontiguity */
 				probe_block++;
@@ -3207,13 +3709,27 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
 				highest_block = first_block;
 		}
 
+		/*
+		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
+		 */
+		ret = add_swap_extent(sis, page_no, 1, first_block);
+		if (ret < 0)
+			goto out;
+		nr_extents += ret;
 		page_no++;
 		probe_block += blocks_per_page;
 reprobe:
 		continue;
 	}
-	return 0;
-
+	ret = nr_extents;
+	*span = 1 + highest_block - lowest_block;
+	if (page_no == 0)
+		page_no = 1;	/* force Empty message */
+	sis->max = page_no;
+	sis->pages = page_no - 1;
+	sis->highest_bit = page_no - 1;
+out:
+	return ret;
 bad_bmap:
 	pr_err("swapon: swapfile has holes\n");
 	return -EINVAL;
@@ -3235,14 +3751,17 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
 	if (ret)
 		return ret;
 
-	ret = check_swap_activate(file, sis->max);
-	if (ret)
+	if (f2fs_disable_compressed_file(inode))
+		return -EINVAL;
+
+	ret = check_swap_activate(sis, file, span);
+	if (ret < 0)
 		return ret;
 
 	set_inode_flag(inode, FI_PIN_FILE);
 	f2fs_precache_extents(inode);
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-	return 0;
+	return ret;
 }
 
 static void f2fs_swap_deactivate(struct file *file)
@@ -3319,6 +3838,27 @@ void f2fs_destroy_post_read_processing(void)
 	kmem_cache_destroy(bio_post_read_ctx_cache);
 }
 
+int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
+{
+	if (!f2fs_sb_has_encrypt(sbi) &&
+		!f2fs_sb_has_verity(sbi) &&
+		!f2fs_sb_has_compression(sbi))
+		return 0;
+
+	sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
+						 WQ_UNBOUND | WQ_HIGHPRI,
+						 num_online_cpus());
+	if (!sbi->post_read_wq)
+		return -ENOMEM;
+	return 0;
+}
+
+void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
+{
+	if (sbi->post_read_wq)
+		destroy_workqueue(sbi->post_read_wq);
+}
+
 int __init f2fs_init_bio_entry_cache(void)
 {
 	bio_entry_slab = f2fs_kmem_cache_create("bio_entry_slab",
@@ -3328,7 +3868,7 @@ int __init f2fs_init_bio_entry_cache(void)
 	return 0;
 }
 
-void __exit f2fs_destroy_bio_entry_cache(void)
+void f2fs_destroy_bio_entry_cache(void)
 {
 	kmem_cache_destroy(bio_entry_slab);
 }
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 9b0bedd..6b89eae 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -21,9 +21,45 @@
 #include "gc.h"
 
 static LIST_HEAD(f2fs_stat_list);
-static struct dentry *f2fs_debugfs_root;
 static DEFINE_MUTEX(f2fs_stat_mutex);
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *f2fs_debugfs_root;
+#endif
 
+/*
+ * This function calculates BDF of every segments
+ */
+void f2fs_update_sit_info(struct f2fs_sb_info *sbi)
+{
+	struct f2fs_stat_info *si = F2FS_STAT(sbi);
+	unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
+	unsigned long long bimodal, dist;
+	unsigned int segno, vblocks;
+	int ndirty = 0;
+
+	bimodal = 0;
+	total_vblocks = 0;
+	blks_per_sec = BLKS_PER_SEC(sbi);
+	hblks_per_sec = blks_per_sec / 2;
+	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
+		vblocks = get_valid_blocks(sbi, segno, true);
+		dist = abs(vblocks - hblks_per_sec);
+		bimodal += dist * dist;
+
+		if (vblocks > 0 && vblocks < blks_per_sec) {
+			total_vblocks += vblocks;
+			ndirty++;
+		}
+	}
+	dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
+	si->bimodal = div64_u64(bimodal, dist);
+	if (si->dirty_count)
+		si->avg_vblocks = div_u64(total_vblocks, ndirty);
+	else
+		si->avg_vblocks = 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
 static void update_general_status(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_stat_info *si = F2FS_STAT(sbi);
@@ -56,7 +92,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
 	si->nquota_files = sbi->nquota_files;
 	si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
 	si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
-	si->aw_cnt = atomic_read(&sbi->aw_cnt);
+	si->aw_cnt = sbi->atomic_files;
 	si->vw_cnt = atomic_read(&sbi->vw_cnt);
 	si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
 	si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
@@ -94,6 +130,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
 	si->inline_xattr = atomic_read(&sbi->inline_xattr);
 	si->inline_inode = atomic_read(&sbi->inline_inode);
 	si->inline_dir = atomic_read(&sbi->inline_dir);
+	si->compr_inode = atomic_read(&sbi->compr_inode);
+	si->compr_blocks = atomic_read(&sbi->compr_blocks);
 	si->append = sbi->im[APPEND_INO].ino_num;
 	si->update = sbi->im[UPDATE_INO].ino_num;
 	si->orphans = sbi->im[ORPHAN_INO].ino_num;
@@ -114,7 +152,6 @@ static void update_general_status(struct f2fs_sb_info *sbi)
 	si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
 	si->avail_nids = NM_I(sbi)->available_nids;
 	si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
-	si->bg_gc = sbi->bg_gc;
 	si->io_skip_bggc = sbi->io_skip_bggc;
 	si->other_skip_bggc = sbi->other_skip_bggc;
 	si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
@@ -146,39 +183,6 @@ static void update_general_status(struct f2fs_sb_info *sbi)
 }
 
 /*
- * This function calculates BDF of every segments
- */
-static void update_sit_info(struct f2fs_sb_info *sbi)
-{
-	struct f2fs_stat_info *si = F2FS_STAT(sbi);
-	unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
-	unsigned long long bimodal, dist;
-	unsigned int segno, vblocks;
-	int ndirty = 0;
-
-	bimodal = 0;
-	total_vblocks = 0;
-	blks_per_sec = BLKS_PER_SEC(sbi);
-	hblks_per_sec = blks_per_sec / 2;
-	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
-		vblocks = get_valid_blocks(sbi, segno, true);
-		dist = abs(vblocks - hblks_per_sec);
-		bimodal += dist * dist;
-
-		if (vblocks > 0 && vblocks < blks_per_sec) {
-			total_vblocks += vblocks;
-			ndirty++;
-		}
-	}
-	dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
-	si->bimodal = div64_u64(bimodal, dist);
-	if (si->dirty_count)
-		si->avg_vblocks = div_u64(total_vblocks, ndirty);
-	else
-		si->avg_vblocks = 0;
-}
-
-/*
  * This function calculates memory footprint.
  */
 static void update_mem_info(struct f2fs_sb_info *sbi)
@@ -315,6 +319,8 @@ static int stat_show(struct seq_file *s, void *v)
 			   si->inline_inode);
 		seq_printf(s, "  - Inline_dentry Inode: %u\n",
 			   si->inline_dir);
+		seq_printf(s, "  - Compressed Inode: %u, Blocks: %u\n",
+			   si->compr_inode, si->compr_blocks);
 		seq_printf(s, "  - Orphan/Append/Update Inode: %u, %u, %u\n",
 			   si->orphans, si->append, si->update);
 		seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
@@ -441,7 +447,7 @@ static int stat_show(struct seq_file *s, void *v)
 			   si->block_count[LFS], si->segment_count[LFS]);
 
 		/* segment usage info */
-		update_sit_info(si->sbi);
+		f2fs_update_sit_info(si->sbi);
 		seq_printf(s, "\nBDF: %u, avg. vblocks: %u\n",
 			   si->bimodal, si->avg_vblocks);
 
@@ -461,6 +467,7 @@ static int stat_show(struct seq_file *s, void *v)
 }
 
 DEFINE_SHOW_ATTRIBUTE(stat);
+#endif
 
 int f2fs_build_stats(struct f2fs_sb_info *sbi)
 {
@@ -491,11 +498,12 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
 	atomic_set(&sbi->inline_xattr, 0);
 	atomic_set(&sbi->inline_inode, 0);
 	atomic_set(&sbi->inline_dir, 0);
+	atomic_set(&sbi->compr_inode, 0);
+	atomic_set(&sbi->compr_blocks, 0);
 	atomic_set(&sbi->inplace_count, 0);
 	for (i = META_CP; i < META_MAX; i++)
 		atomic_set(&sbi->meta_count[i], 0);
 
-	atomic_set(&sbi->aw_cnt, 0);
 	atomic_set(&sbi->vw_cnt, 0);
 	atomic_set(&sbi->max_aw_cnt, 0);
 	atomic_set(&sbi->max_vw_cnt, 0);
@@ -520,14 +528,18 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
 
 void __init f2fs_create_root_stats(void)
 {
+#ifdef CONFIG_DEBUG_FS
 	f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
 
 	debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root, NULL,
 			    &stat_fops);
+#endif
 }
 
 void f2fs_destroy_root_stats(void)
 {
+#ifdef CONFIG_DEBUG_FS
 	debugfs_remove_recursive(f2fs_debugfs_root);
 	f2fs_debugfs_root = NULL;
+#endif
 }
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index d9ad842..27d0dd7 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -578,6 +578,20 @@ int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots)
 	goto next;
 }
 
+bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+					struct fscrypt_name *fname)
+{
+	struct f2fs_dentry_ptr d;
+	unsigned int bit_pos;
+	int slots = GET_DENTRY_SLOTS(fname_len(fname));
+
+	make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage));
+
+	bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
+
+	return bit_pos < d.max;
+}
+
 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
 				const struct qstr *name, f2fs_hash_t name_hash,
 				unsigned int bit_pos)
@@ -1069,24 +1083,27 @@ static int f2fs_d_compare(const struct dentry *dentry, unsigned int len,
 			  const char *str, const struct qstr *name)
 {
 	struct qstr qstr = {.name = str, .len = len };
+	const struct dentry *parent = READ_ONCE(dentry->d_parent);
+	const struct inode *inode = READ_ONCE(parent->d_inode);
 
-	if (!IS_CASEFOLDED(dentry->d_parent->d_inode)) {
+	if (!inode || !IS_CASEFOLDED(inode)) {
 		if (len != name->len)
 			return -1;
-		return memcmp(str, name, len);
+		return memcmp(str, name->name, len);
 	}
 
-	return f2fs_ci_compare(dentry->d_parent->d_inode, name, &qstr, false);
+	return f2fs_ci_compare(inode, name, &qstr, false);
 }
 
 static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
 	const struct unicode_map *um = sbi->s_encoding;
+	const struct inode *inode = READ_ONCE(dentry->d_inode);
 	unsigned char *norm;
 	int len, ret = 0;
 
-	if (!IS_CASEFOLDED(dentry->d_inode))
+	if (!inode || !IS_CASEFOLDED(inode))
 		return 0;
 
 	norm = f2fs_kmalloc(sbi, PATH_MAX, GFP_ATOMIC);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 059ade8..5355be6 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -116,6 +116,8 @@ typedef u32 block_t;	/*
 			 */
 typedef u32 nid_t;
 
+#define COMPRESS_EXT_NUM		16
+
 struct f2fs_mount_info {
 	unsigned int opt;
 	int write_io_size_bits;		/* Write IO size bits */
@@ -140,6 +142,12 @@ struct f2fs_mount_info {
 	block_t unusable_cap;		/* Amount of space allowed to be
 					 * unusable when disabling checkpoint
 					 */
+
+	/* For compression */
+	unsigned char compress_algorithm;	/* algorithm type */
+	unsigned compress_log_size;		/* cluster log size */
+	unsigned char compress_ext_cnt;		/* extension count */
+	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
 };
 
 #define F2FS_FEATURE_ENCRYPT		0x0001
@@ -155,6 +163,7 @@ struct f2fs_mount_info {
 #define F2FS_FEATURE_VERITY		0x0400
 #define F2FS_FEATURE_SB_CHKSUM		0x0800
 #define F2FS_FEATURE_CASEFOLD		0x1000
+#define F2FS_FEATURE_COMPRESSION	0x2000
 
 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
 	((raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -712,6 +721,12 @@ struct f2fs_inode_info {
 	int i_inline_xattr_size;	/* inline xattr size */
 	struct timespec64 i_crtime;	/* inode creation time */
 	struct timespec64 i_disk_time[4];/* inode disk times */
+
+	/* for file compress */
+	u64 i_compr_blocks;			/* # of compressed blocks */
+	unsigned char i_compress_algorithm;	/* algorithm type */
+	unsigned char i_log_cluster_size;	/* log of cluster size */
+	unsigned int i_cluster_size;		/* cluster size */
 };
 
 static inline void get_extent_info(struct extent_info *ext,
@@ -1018,6 +1033,7 @@ enum need_lock_type {
 enum cp_reason_type {
 	CP_NO_NEEDED,
 	CP_NON_REGULAR,
+	CP_COMPRESSED,
 	CP_HARDLINK,
 	CP_SB_NEED_CP,
 	CP_WRONG_PINO,
@@ -1056,12 +1072,15 @@ struct f2fs_io_info {
 	block_t old_blkaddr;	/* old block address before Cow */
 	struct page *page;	/* page to be written */
 	struct page *encrypted_page;	/* encrypted page */
+	struct page *compressed_page;	/* compressed page */
 	struct list_head list;		/* serialize IOs */
 	bool submitted;		/* indicate IO submission */
 	int need_lock;		/* indicate we need to lock cp_rwsem */
 	bool in_list;		/* indicate fio is in io_list */
 	bool is_por;		/* indicate IO is from recovery or not */
 	bool retry;		/* need to reallocate block address */
+	int compr_blocks;	/* # of compressed block addresses */
+	bool encrypted;		/* indicate file is encrypted */
 	enum iostat_type io_type;	/* io type */
 	struct writeback_control *io_wbc; /* writeback control */
 	struct bio **bio;		/* bio for ipu */
@@ -1169,6 +1188,18 @@ enum fsync_mode {
 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
 };
 
+/*
+ * this value is set in page as a private data which indicate that
+ * the page is atomically written, and it is in inmem_pages list.
+ */
+#define ATOMIC_WRITTEN_PAGE		((unsigned long)-1)
+#define DUMMY_WRITTEN_PAGE		((unsigned long)-2)
+
+#define IS_ATOMIC_WRITTEN_PAGE(page)			\
+		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
+#define IS_DUMMY_WRITTEN_PAGE(page)			\
+		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
+
 #ifdef CONFIG_FS_ENCRYPTION
 #define DUMMY_ENCRYPTION_ENABLED(sbi) \
 			(unlikely(F2FS_OPTION(sbi).test_dummy_encryption))
@@ -1176,6 +1207,75 @@ enum fsync_mode {
 #define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
 #endif
 
+/* For compression */
+enum compress_algorithm_type {
+	COMPRESS_LZO,
+	COMPRESS_LZ4,
+	COMPRESS_MAX,
+};
+
+#define COMPRESS_DATA_RESERVED_SIZE		4
+struct compress_data {
+	__le32 clen;			/* compressed data size */
+	__le32 chksum;			/* checksum of compressed data */
+	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
+	u8 cdata[];			/* compressed data */
+};
+
+#define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
+
+#define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
+
+/* compress context */
+struct compress_ctx {
+	struct inode *inode;		/* inode the context belong to */
+	pgoff_t cluster_idx;		/* cluster index number */
+	unsigned int cluster_size;	/* page count in cluster */
+	unsigned int log_cluster_size;	/* log of cluster size */
+	struct page **rpages;		/* pages store raw data in cluster */
+	unsigned int nr_rpages;		/* total page number in rpages */
+	struct page **cpages;		/* pages store compressed data in cluster */
+	unsigned int nr_cpages;		/* total page number in cpages */
+	void *rbuf;			/* virtual mapped address on rpages */
+	struct compress_data *cbuf;	/* virtual mapped address on cpages */
+	size_t rlen;			/* valid data length in rbuf */
+	size_t clen;			/* valid data length in cbuf */
+	void *private;			/* payload buffer for specified compression algorithm */
+};
+
+/* compress context for write IO path */
+struct compress_io_ctx {
+	u32 magic;			/* magic number to indicate page is compressed */
+	struct inode *inode;		/* inode the context belong to */
+	struct page **rpages;		/* pages store raw data in cluster */
+	unsigned int nr_rpages;		/* total page number in rpages */
+	refcount_t ref;			/* referrence count of raw page */
+};
+
+/* decompress io context for read IO path */
+struct decompress_io_ctx {
+	u32 magic;			/* magic number to indicate page is compressed */
+	struct inode *inode;		/* inode the context belong to */
+	pgoff_t cluster_idx;		/* cluster index number */
+	unsigned int cluster_size;	/* page count in cluster */
+	unsigned int log_cluster_size;	/* log of cluster size */
+	struct page **rpages;		/* pages store raw data in cluster */
+	unsigned int nr_rpages;		/* total page number in rpages */
+	struct page **cpages;		/* pages store compressed data in cluster */
+	unsigned int nr_cpages;		/* total page number in cpages */
+	struct page **tpages;		/* temp pages to pad holes in cluster */
+	void *rbuf;			/* virtual mapped address on rpages */
+	struct compress_data *cbuf;	/* virtual mapped address on cpages */
+	size_t rlen;			/* valid data length in rbuf */
+	size_t clen;			/* valid data length in cbuf */
+	refcount_t ref;			/* referrence count of compressed page */
+	bool failed;			/* indicate IO error during decompression */
+};
+
+#define NULL_CLUSTER			((unsigned int)(~0))
+#define MIN_COMPRESS_LOG_SIZE		2
+#define MAX_COMPRESS_LOG_SIZE		8
+
 struct f2fs_sb_info {
 	struct super_block *sb;			/* pointer to VFS super block */
 	struct proc_dir_entry *s_proc;		/* proc entry */
@@ -1291,7 +1391,10 @@ struct f2fs_sb_info {
 	struct f2fs_mount_info mount_opt;	/* mount options */
 
 	/* for cleaning operations */
-	struct mutex gc_mutex;			/* mutex for GC */
+	struct rw_semaphore gc_lock;		/*
+						 * semaphore for GC, avoid
+						 * race between GC and GC or CP
+						 */
 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
 	unsigned int cur_victim_sec;		/* current victim section num */
 	unsigned int gc_mode;			/* current GC state */
@@ -1327,11 +1430,11 @@ struct f2fs_sb_info {
 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
 	atomic_t inline_inode;			/* # of inline_data inodes */
 	atomic_t inline_dir;			/* # of inline_dentry inodes */
-	atomic_t aw_cnt;			/* # of atomic writes */
+	atomic_t compr_inode;			/* # of compressed inodes */
+	atomic_t compr_blocks;			/* # of compressed blocks */
 	atomic_t vw_cnt;			/* # of volatile writes */
 	atomic_t max_aw_cnt;			/* max # of atomic writes */
 	atomic_t max_vw_cnt;			/* max # of volatile writes */
-	int bg_gc;				/* background gc calls */
 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
@@ -1365,6 +1468,8 @@ struct f2fs_sb_info {
 
 	/* Precomputed FS UUID checksum for seeding other checksums */
 	__u32 s_chksum_seed;
+
+	struct workqueue_struct *post_read_wq;	/* post read workqueue */
 };
 
 struct f2fs_private_dio {
@@ -2222,26 +2327,6 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
 	return entry;
 }
 
-static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
-						int npages, bool no_fail)
-{
-	struct bio *bio;
-
-	if (no_fail) {
-		/* No failure on bio allocation */
-		bio = bio_alloc(GFP_NOIO, npages);
-		if (!bio)
-			bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
-		return bio;
-	}
-	if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
-		f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
-		return NULL;
-	}
-
-	return bio_alloc(GFP_KERNEL, npages);
-}
-
 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
 {
 	if (sbi->gc_mode == GC_URGENT)
@@ -2378,11 +2463,13 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
 /*
  * On-disk inode flags (f2fs_inode::i_flags)
  */
+#define F2FS_COMPR_FL			0x00000004 /* Compress file */
 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
+#define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
@@ -2391,7 +2478,7 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
 /* Flags that should be inherited by new inodes from their parent. */
 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
-			   F2FS_CASEFOLD_FL)
+			   F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
 
 /* Flags that are appropriate for regular files (all but dir-specific ones). */
 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
@@ -2443,6 +2530,8 @@ enum {
 	FI_PIN_FILE,		/* indicate file should not be gced */
 	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
+	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
+	FI_MMAP_FILE,		/* indicate file was mmapped */
 };
 
 static inline void __mark_inode_dirty_flag(struct inode *inode,
@@ -2459,6 +2548,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
 	case FI_DATA_EXIST:
 	case FI_INLINE_DOTS:
 	case FI_PIN_FILE:
+	case FI_COMPRESSED_FILE:
 		f2fs_mark_inode_dirty_sync(inode, true);
 	}
 }
@@ -2614,16 +2704,27 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
 }
 
+static inline int f2fs_compressed_file(struct inode *inode)
+{
+	return S_ISREG(inode->i_mode) &&
+		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
+}
+
 static inline unsigned int addrs_per_inode(struct inode *inode)
 {
 	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
 				get_inline_xattr_addrs(inode);
-	return ALIGN_DOWN(addrs, 1);
+
+	if (!f2fs_compressed_file(inode))
+		return addrs;
+	return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
 }
 
 static inline unsigned int addrs_per_block(struct inode *inode)
 {
-	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1);
+	if (!f2fs_compressed_file(inode))
+		return DEF_ADDRS_PER_BLOCK;
+	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
 }
 
 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
@@ -2656,6 +2757,11 @@ static inline int f2fs_has_inline_dots(struct inode *inode)
 	return is_inode_flag_set(inode, FI_INLINE_DOTS);
 }
 
+static inline int f2fs_is_mmap_file(struct inode *inode)
+{
+	return is_inode_flag_set(inode, FI_MMAP_FILE);
+}
+
 static inline bool f2fs_is_pinned_file(struct inode *inode)
 {
 	return is_inode_flag_set(inode, FI_PIN_FILE);
@@ -2783,7 +2889,8 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
 	if (!test_opt(sbi, EXTENT_CACHE) ||
-			is_inode_flag_set(inode, FI_NO_EXTENT))
+			is_inode_flag_set(inode, FI_NO_EXTENT) ||
+			is_inode_flag_set(inode, FI_COMPRESSED_FILE))
 		return false;
 
 	/*
@@ -2903,7 +3010,8 @@ static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
 
 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
 {
-	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
+			blkaddr == COMPRESS_ADDR)
 		return false;
 	return true;
 }
@@ -3001,6 +3109,8 @@ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
 			struct page **page);
 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
 			struct page *page, struct inode *inode);
+bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+			struct fscrypt_name *fname);
 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
 			const struct qstr *name, f2fs_hash_t name_hash,
 			unsigned int bit_pos);
@@ -3155,6 +3265,8 @@ void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
 			unsigned int val, int alloc);
 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
+int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
 int __init f2fs_create_segment_manager_caches(void);
@@ -3205,10 +3317,13 @@ void f2fs_destroy_checkpoint_caches(void);
 /*
  * data.c
  */
-int f2fs_init_post_read_processing(void);
-void f2fs_destroy_post_read_processing(void);
+int __init f2fs_init_bioset(void);
+void f2fs_destroy_bioset(void);
+struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail);
 int f2fs_init_bio_entry_cache(void);
 void f2fs_destroy_bio_entry_cache(void);
+void f2fs_submit_bio(struct f2fs_sb_info *sbi,
+				struct bio *bio, enum page_type type);
 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
 				struct inode *inode, struct page *page,
@@ -3245,8 +3360,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 			int create, int flag);
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 			u64 start, u64 len);
+int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
+int f2fs_write_single_data_page(struct page *page, int *submitted,
+				struct bio **bio, sector_t *last_block,
+				struct writeback_control *wbc,
+				enum iostat_type io_type,
+				int compr_blocks);
 void f2fs_invalidate_page(struct page *page, unsigned int offset,
 			unsigned int length);
 int f2fs_release_page(struct page *page, gfp_t wait);
@@ -3256,6 +3377,10 @@ int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
 #endif
 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
 void f2fs_clear_page_cache_dirty_tag(struct page *page);
+int f2fs_init_post_read_processing(void);
+void f2fs_destroy_post_read_processing(void);
+int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
+void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
 
 /*
  * gc.c
@@ -3302,6 +3427,7 @@ struct f2fs_stat_info {
 	int nr_discard_cmd;
 	unsigned int undiscard_blks;
 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
+	int compr_inode, compr_blocks;
 	int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
 	unsigned int bimodal, avg_vblocks;
@@ -3333,7 +3459,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
 #define stat_inc_cp_count(si)		((si)->cp_count++)
 #define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
 #define stat_inc_call_count(si)		((si)->call_count++)
-#define stat_inc_bggc_count(sbi)	((sbi)->bg_gc++)
+#define stat_inc_bggc_count(si)		((si)->bg_gc++)
 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
@@ -3372,6 +3498,20 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
 		if (f2fs_has_inline_dentry(inode))			\
 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
 	} while (0)
+#define stat_inc_compr_inode(inode)					\
+	do {								\
+		if (f2fs_compressed_file(inode))			\
+			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
+	} while (0)
+#define stat_dec_compr_inode(inode)					\
+	do {								\
+		if (f2fs_compressed_file(inode))			\
+			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
+	} while (0)
+#define stat_add_compr_blocks(inode, blocks)				\
+		(atomic_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
+#define stat_sub_compr_blocks(inode, blocks)				\
+		(atomic_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
 #define stat_inc_meta_count(sbi, blkaddr)				\
 	do {								\
 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
@@ -3389,13 +3529,9 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
 		((sbi)->block_count[(curseg)->alloc_type]++)
 #define stat_inc_inplace_blocks(sbi)					\
 		(atomic_inc(&(sbi)->inplace_count))
-#define stat_inc_atomic_write(inode)					\
-		(atomic_inc(&F2FS_I_SB(inode)->aw_cnt))
-#define stat_dec_atomic_write(inode)					\
-		(atomic_dec(&F2FS_I_SB(inode)->aw_cnt))
 #define stat_update_max_atomic_write(inode)				\
 	do {								\
-		int cur = atomic_read(&F2FS_I_SB(inode)->aw_cnt);	\
+		int cur = F2FS_I_SB(inode)->atomic_files;	\
 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
 		if (cur > max)						\
 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
@@ -3447,6 +3583,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi);
 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
 void __init f2fs_create_root_stats(void);
 void f2fs_destroy_root_stats(void);
+void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
 #else
 #define stat_inc_cp_count(si)				do { } while (0)
 #define stat_inc_bg_cp_count(si)			do { } while (0)
@@ -3456,8 +3593,8 @@ void f2fs_destroy_root_stats(void);
 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
-#define stat_inc_total_hit(sb)				do { } while (0)
-#define stat_inc_rbtree_node_hit(sb)			do { } while (0)
+#define stat_inc_total_hit(sbi)				do { } while (0)
+#define stat_inc_rbtree_node_hit(sbi)			do { } while (0)
 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
 #define stat_inc_cached_node_hit(sbi)			do { } while (0)
 #define stat_inc_inline_xattr(inode)			do { } while (0)
@@ -3466,6 +3603,10 @@ void f2fs_destroy_root_stats(void);
 #define stat_dec_inline_inode(inode)			do { } while (0)
 #define stat_inc_inline_dir(inode)			do { } while (0)
 #define stat_dec_inline_dir(inode)			do { } while (0)
+#define stat_inc_compr_inode(inode)			do { } while (0)
+#define stat_dec_compr_inode(inode)			do { } while (0)
+#define stat_add_compr_blocks(inode, blocks)		do { } while (0)
+#define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
 #define stat_inc_atomic_write(inode)			do { } while (0)
 #define stat_dec_atomic_write(inode)			do { } while (0)
 #define stat_update_max_atomic_write(inode)		do { } while (0)
@@ -3485,6 +3626,7 @@ static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
 static inline void __init f2fs_create_root_stats(void) { }
 static inline void f2fs_destroy_root_stats(void) { }
+static inline void update_sit_info(struct f2fs_sb_info *sbi) {}
 #endif
 
 extern const struct file_operations f2fs_dir_operations;
@@ -3513,6 +3655,7 @@ void f2fs_truncate_inline_inode(struct inode *inode,
 int f2fs_read_inline_data(struct inode *inode, struct page *page);
 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
 int f2fs_convert_inline_inode(struct inode *inode);
+int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
 int f2fs_write_inline_data(struct inode *inode, struct page *page);
 bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
@@ -3605,7 +3748,85 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
  */
 static inline bool f2fs_post_read_required(struct inode *inode)
 {
-	return f2fs_encrypted_file(inode) || fsverity_active(inode);
+	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
+		f2fs_compressed_file(inode);
+}
+
+/*
+ * compress.c
+ */
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+bool f2fs_is_compressed_page(struct page *page);
+struct page *f2fs_compress_control_page(struct page *page);
+int f2fs_prepare_compress_overwrite(struct inode *inode,
+			struct page **pagep, pgoff_t index, void **fsdata);
+bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+					pgoff_t index, unsigned copied);
+void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
+bool f2fs_is_compress_backend_ready(struct inode *inode);
+void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
+bool f2fs_cluster_is_empty(struct compress_ctx *cc);
+bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
+int f2fs_write_multi_pages(struct compress_ctx *cc,
+						int *submitted,
+						struct writeback_control *wbc,
+						enum iostat_type io_type);
+int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
+int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+				unsigned nr_pages, sector_t *last_block_in_bio,
+				bool is_readahead);
+struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
+void f2fs_free_dic(struct decompress_io_ctx *dic);
+void f2fs_decompress_end_io(struct page **rpages,
+			unsigned int cluster_size, bool err, bool verity);
+int f2fs_init_compress_ctx(struct compress_ctx *cc);
+void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
+void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+#else
+static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
+static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
+{
+	if (!f2fs_compressed_file(inode))
+		return true;
+	/* not support compression */
+	return false;
+}
+static inline struct page *f2fs_compress_control_page(struct page *page)
+{
+	WARN_ON_ONCE(1);
+	return ERR_PTR(-EINVAL);
+}
+#endif
+
+static inline void set_compress_context(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+	F2FS_I(inode)->i_compress_algorithm =
+			F2FS_OPTION(sbi).compress_algorithm;
+	F2FS_I(inode)->i_log_cluster_size =
+			F2FS_OPTION(sbi).compress_log_size;
+	F2FS_I(inode)->i_cluster_size =
+			1 << F2FS_I(inode)->i_log_cluster_size;
+	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
+	set_inode_flag(inode, FI_COMPRESSED_FILE);
+	stat_inc_compr_inode(inode);
+}
+
+static inline u64 f2fs_disable_compressed_file(struct inode *inode)
+{
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+
+	if (!f2fs_compressed_file(inode))
+		return 0;
+	if (fi->i_compr_blocks)
+		return fi->i_compr_blocks;
+
+	fi->i_flags &= ~F2FS_COMPR_FL;
+	clear_inode_flag(inode, FI_COMPRESSED_FILE);
+	stat_dec_compr_inode(inode);
+	return 0;
 }
 
 #define F2FS_FEATURE_FUNCS(name, flagname) \
@@ -3626,6 +3847,7 @@ F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
 F2FS_FEATURE_FUNCS(verity, VERITY);
 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
+F2FS_FEATURE_FUNCS(compression, COMPRESSION);
 
 #ifdef CONFIG_BLK_DEV_ZONED
 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
@@ -3707,6 +3929,30 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
 #endif
 }
 
+static inline bool f2fs_may_compress(struct inode *inode)
+{
+	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
+				f2fs_is_atomic_file(inode) ||
+				f2fs_is_volatile_file(inode))
+		return false;
+	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
+}
+
+static inline void f2fs_i_compr_blocks_update(struct inode *inode,
+						u64 blocks, bool add)
+{
+	int diff = F2FS_I(inode)->i_cluster_size - blocks;
+
+	if (add) {
+		F2FS_I(inode)->i_compr_blocks += diff;
+		stat_add_compr_blocks(inode, diff);
+	} else {
+		F2FS_I(inode)->i_compr_blocks -= diff;
+		stat_sub_compr_blocks(inode, diff);
+	}
+	f2fs_mark_inode_dirty_sync(inode, true);
+}
+
 static inline int block_unaligned_IO(struct inode *inode,
 				struct kiocb *iocb, struct iov_iter *iter)
 {
@@ -3738,6 +3984,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
 		return true;
 	if (f2fs_is_multi_device(sbi))
 		return true;
+	if (f2fs_compressed_file(inode))
+		return true;
 	/*
 	 * for blkzoned device, fallback direct IO to buffered IO, so
 	 * all IOs can be serialized by log-structured write.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 85af112..0d4da64 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -50,8 +50,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 	struct page *page = vmf->page;
 	struct inode *inode = file_inode(vmf->vma->vm_file);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct dnode_of_data dn = { .node_changed = false };
-	int err;
+	struct dnode_of_data dn;
+	bool need_alloc = true;
+	int err = 0;
 
 	if (unlikely(f2fs_cp_error(sbi))) {
 		err = -EIO;
@@ -63,6 +64,26 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 		goto err;
 	}
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+	if (f2fs_compressed_file(inode)) {
+		int ret = f2fs_is_compressed_cluster(inode, page->index);
+
+		if (ret < 0) {
+			err = ret;
+			goto err;
+		} else if (ret) {
+			if (ret < F2FS_I(inode)->i_cluster_size) {
+				err = -EAGAIN;
+				goto err;
+			}
+			need_alloc = false;
+		}
+	}
+#endif
+	/* should do out of any locked page */
+	if (need_alloc)
+		f2fs_balance_fs(sbi, true);
+
 	sb_start_pagefault(inode->i_sb);
 
 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@@ -78,15 +99,17 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 		goto out_sem;
 	}
 
-	/* block allocation */
-	__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
-	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = f2fs_get_block(&dn, page->index);
-	f2fs_put_dnode(&dn);
-	__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
-	if (err) {
-		unlock_page(page);
-		goto out_sem;
+	if (need_alloc) {
+		/* block allocation */
+		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+		set_new_dnode(&dn, inode, NULL, NULL, 0);
+		err = f2fs_get_block(&dn, page->index);
+		f2fs_put_dnode(&dn);
+		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+		if (err) {
+			unlock_page(page);
+			goto out_sem;
+		}
 	}
 
 	/* fill the page */
@@ -120,8 +143,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 out_sem:
 	up_read(&F2FS_I(inode)->i_mmap_sem);
 
-	f2fs_balance_fs(sbi, dn.node_changed);
-
 	sb_end_pagefault(inode->i_sb);
 err:
 	return block_page_mkwrite_return(err);
@@ -155,6 +176,8 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
 
 	if (!S_ISREG(inode->i_mode))
 		cp_reason = CP_NON_REGULAR;
+	else if (f2fs_compressed_file(inode))
+		cp_reason = CP_COMPRESSED;
 	else if (inode->i_nlink != 1)
 		cp_reason = CP_HARDLINK;
 	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
@@ -485,6 +508,9 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 		return -EIO;
 
+	if (!f2fs_is_compress_backend_ready(inode))
+		return -EOPNOTSUPP;
+
 	/* we don't need to use inline_data strictly */
 	err = f2fs_convert_inline_inode(inode);
 	if (err)
@@ -492,6 +518,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 
 	file_accessed(file);
 	vma->vm_ops = &f2fs_file_vm_ops;
+	set_inode_flag(inode, FI_MMAP_FILE);
 	return 0;
 }
 
@@ -502,6 +529,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
 	if (err)
 		return err;
 
+	if (!f2fs_is_compress_backend_ready(inode))
+		return -EOPNOTSUPP;
+
 	err = fsverity_file_open(inode, filp);
 	if (err)
 		return err;
@@ -518,6 +548,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
 	__le32 *addr;
 	int base = 0;
+	bool compressed_cluster = false;
+	int cluster_index = 0, valid_blocks = 0;
+	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
 
 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
 		base = get_extra_isize(dn->inode);
@@ -525,26 +558,43 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 	raw_node = F2FS_NODE(dn->node_page);
 	addr = blkaddr_in_node(raw_node) + base + ofs;
 
-	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
+	/* Assumption: truncateion starts with cluster */
+	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
 		block_t blkaddr = le32_to_cpu(*addr);
 
+		if (f2fs_compressed_file(dn->inode) &&
+					!(cluster_index & (cluster_size - 1))) {
+			if (compressed_cluster)
+				f2fs_i_compr_blocks_update(dn->inode,
+							valid_blocks, false);
+			compressed_cluster = (blkaddr == COMPRESS_ADDR);
+			valid_blocks = 0;
+		}
+
 		if (blkaddr == NULL_ADDR)
 			continue;
 
 		dn->data_blkaddr = NULL_ADDR;
 		f2fs_set_data_blkaddr(dn);
 
-		if (__is_valid_data_blkaddr(blkaddr) &&
-			!f2fs_is_valid_blkaddr(sbi, blkaddr,
+		if (__is_valid_data_blkaddr(blkaddr)) {
+			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
 					DATA_GENERIC_ENHANCE))
-			continue;
+				continue;
+			if (compressed_cluster)
+				valid_blocks++;
+		}
 
-		f2fs_invalidate_blocks(sbi, blkaddr);
 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
+
+		f2fs_invalidate_blocks(sbi, blkaddr);
 		nr_free++;
 	}
 
+	if (compressed_cluster)
+		f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
+
 	if (nr_free) {
 		pgoff_t fofs;
 		/*
@@ -587,6 +637,9 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
 		return 0;
 	}
 
+	if (f2fs_compressed_file(inode))
+		return 0;
+
 	page = f2fs_get_lock_data_page(inode, index, true);
 	if (IS_ERR(page))
 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
@@ -602,7 +655,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
 	return 0;
 }
 
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
+static int do_truncate_blocks(struct inode *inode, u64 from, bool lock)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct dnode_of_data dn;
@@ -667,6 +720,28 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
 	return err;
 }
 
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
+{
+	u64 free_from = from;
+
+	/*
+	 * for compressed file, only support cluster size
+	 * aligned truncation.
+	 */
+	if (f2fs_compressed_file(inode)) {
+		size_t cluster_shift = PAGE_SHIFT +
+					F2FS_I(inode)->i_log_cluster_size;
+		size_t cluster_mask = (1 << cluster_shift) - 1;
+
+		free_from = from >> cluster_shift;
+		if (from & cluster_mask)
+			free_from++;
+		free_from <<= cluster_shift;
+	}
+
+	return do_truncate_blocks(inode, free_from, lock);
+}
+
 int f2fs_truncate(struct inode *inode)
 {
 	int err;
@@ -754,18 +829,12 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
 		inode->i_uid = attr->ia_uid;
 	if (ia_valid & ATTR_GID)
 		inode->i_gid = attr->ia_gid;
-	if (ia_valid & ATTR_ATIME) {
-		inode->i_atime = timestamp_truncate(attr->ia_atime,
-						  inode);
-	}
-	if (ia_valid & ATTR_MTIME) {
-		inode->i_mtime = timestamp_truncate(attr->ia_mtime,
-						  inode);
-	}
-	if (ia_valid & ATTR_CTIME) {
-		inode->i_ctime = timestamp_truncate(attr->ia_ctime,
-						  inode);
-	}
+	if (ia_valid & ATTR_ATIME)
+		inode->i_atime = attr->ia_atime;
+	if (ia_valid & ATTR_MTIME)
+		inode->i_mtime = attr->ia_mtime;
+	if (ia_valid & ATTR_CTIME)
+		inode->i_ctime = attr->ia_ctime;
 	if (ia_valid & ATTR_MODE) {
 		umode_t mode = attr->ia_mode;
 
@@ -786,6 +855,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 		return -EIO;
 
+	if ((attr->ia_valid & ATTR_SIZE) &&
+		!f2fs_is_compress_backend_ready(inode))
+		return -EOPNOTSUPP;
+
 	err = setattr_prepare(dentry, attr);
 	if (err)
 		return err;
@@ -1026,8 +1099,8 @@ static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
 	} else if (ret == -ENOENT) {
 		if (dn.max_level == 0)
 			return -ENOENT;
-		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node,
-									len);
+		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
+						dn.ofs_in_node, len);
 		blkaddr += done;
 		do_replace += done;
 		goto next;
@@ -1190,13 +1263,13 @@ static int __exchange_data_block(struct inode *src_inode,
 
 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
 					array_size(olen, sizeof(block_t)),
-					GFP_KERNEL);
+					GFP_NOFS);
 		if (!src_blkaddr)
 			return -ENOMEM;
 
 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
 					array_size(olen, sizeof(int)),
-					GFP_KERNEL);
+					GFP_NOFS);
 		if (!do_replace) {
 			kvfree(src_blkaddr);
 			return -ENOMEM;
@@ -1563,7 +1636,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
 next_alloc:
 		if (has_not_enough_free_secs(sbi, 0,
 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
-			mutex_lock(&sbi->gc_mutex);
+			down_write(&sbi->gc_lock);
 			err = f2fs_gc(sbi, true, false, NULL_SEGNO);
 			if (err && err != -ENODATA && err != -EAGAIN)
 				goto out_err;
@@ -1621,6 +1694,8 @@ static long f2fs_fallocate(struct file *file, int mode,
 		return -EIO;
 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
 		return -ENOSPC;
+	if (!f2fs_is_compress_backend_ready(inode))
+		return -EOPNOTSUPP;
 
 	/* f2fs only support ->fallocate for regular file */
 	if (!S_ISREG(inode->i_mode))
@@ -1630,6 +1705,11 @@ static long f2fs_fallocate(struct file *file, int mode,
 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
 		return -EOPNOTSUPP;
 
+	if (f2fs_compressed_file(inode) &&
+		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
+		return -EOPNOTSUPP;
+
 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
 			FALLOC_FL_INSERT_RANGE))
@@ -1719,7 +1799,40 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
 			return -ENOTEMPTY;
 	}
 
+	if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
+		if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+			return -EOPNOTSUPP;
+		if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
+			return -EINVAL;
+	}
+
+	if ((iflags ^ fi->i_flags) & F2FS_COMPR_FL) {
+		if (S_ISREG(inode->i_mode) &&
+			(fi->i_flags & F2FS_COMPR_FL || i_size_read(inode) ||
+						F2FS_HAS_BLOCKS(inode)))
+			return -EINVAL;
+		if (iflags & F2FS_NOCOMP_FL)
+			return -EINVAL;
+		if (iflags & F2FS_COMPR_FL) {
+			int err = f2fs_convert_inline_inode(inode);
+
+			if (err)
+				return err;
+
+			if (!f2fs_may_compress(inode))
+				return -EINVAL;
+
+			set_compress_context(inode);
+		}
+	}
+	if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
+		if (fi->i_flags & F2FS_COMPR_FL)
+			return -EINVAL;
+	}
+
 	fi->i_flags = iflags | (fi->i_flags & ~mask);
+	f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
+					(fi->i_flags & F2FS_NOCOMP_FL));
 
 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
 		set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1745,11 +1858,13 @@ static const struct {
 	u32 iflag;
 	u32 fsflag;
 } f2fs_fsflags_map[] = {
+	{ F2FS_COMPR_FL,	FS_COMPR_FL },
 	{ F2FS_SYNC_FL,		FS_SYNC_FL },
 	{ F2FS_IMMUTABLE_FL,	FS_IMMUTABLE_FL },
 	{ F2FS_APPEND_FL,	FS_APPEND_FL },
 	{ F2FS_NODUMP_FL,	FS_NODUMP_FL },
 	{ F2FS_NOATIME_FL,	FS_NOATIME_FL },
+	{ F2FS_NOCOMP_FL,	FS_NOCOMP_FL },
 	{ F2FS_INDEX_FL,	FS_INDEX_FL },
 	{ F2FS_DIRSYNC_FL,	FS_DIRSYNC_FL },
 	{ F2FS_PROJINHERIT_FL,	FS_PROJINHERIT_FL },
@@ -1757,11 +1872,13 @@ static const struct {
 };
 
 #define F2FS_GETTABLE_FS_FL (		\
+		FS_COMPR_FL |		\
 		FS_SYNC_FL |		\
 		FS_IMMUTABLE_FL |	\
 		FS_APPEND_FL |		\
 		FS_NODUMP_FL |		\
 		FS_NOATIME_FL |		\
+		FS_NOCOMP_FL |		\
 		FS_INDEX_FL |		\
 		FS_DIRSYNC_FL |		\
 		FS_PROJINHERIT_FL |	\
@@ -1772,11 +1889,13 @@ static const struct {
 		FS_CASEFOLD_FL)
 
 #define F2FS_SETTABLE_FS_FL (		\
+		FS_COMPR_FL |		\
 		FS_SYNC_FL |		\
 		FS_IMMUTABLE_FL |	\
 		FS_APPEND_FL |		\
 		FS_NODUMP_FL |		\
 		FS_NOATIME_FL |		\
+		FS_NOCOMP_FL |		\
 		FS_DIRSYNC_FL |		\
 		FS_PROJINHERIT_FL |	\
 		FS_CASEFOLD_FL)
@@ -1897,6 +2016,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 
 	inode_lock(inode);
 
+	f2fs_disable_compressed_file(inode);
+
 	if (f2fs_is_atomic_file(inode)) {
 		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
 			ret = -EINVAL;
@@ -1935,7 +2056,6 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 	F2FS_I(inode)->inmem_task = current;
-	stat_inc_atomic_write(inode);
 	stat_update_max_atomic_write(inode);
 out:
 	inode_unlock(inode);
@@ -2324,12 +2444,12 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
 		return ret;
 
 	if (!sync) {
-		if (!mutex_trylock(&sbi->gc_mutex)) {
+		if (!down_write_trylock(&sbi->gc_lock)) {
 			ret = -EBUSY;
 			goto out;
 		}
 	} else {
-		mutex_lock(&sbi->gc_mutex);
+		down_write(&sbi->gc_lock);
 	}
 
 	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
@@ -2367,12 +2487,12 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
 
 do_more:
 	if (!range.sync) {
-		if (!mutex_trylock(&sbi->gc_mutex)) {
+		if (!down_write_trylock(&sbi->gc_lock)) {
 			ret = -EBUSY;
 			goto out;
 		}
 	} else {
-		mutex_lock(&sbi->gc_mutex);
+		down_write(&sbi->gc_lock);
 	}
 
 	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
@@ -2803,7 +2923,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
 	end_segno = min(start_segno + range.segments, dev_end_segno);
 
 	while (start_segno < end_segno) {
-		if (!mutex_trylock(&sbi->gc_mutex)) {
+		if (!down_write_trylock(&sbi->gc_lock)) {
 			ret = -EBUSY;
 			goto out;
 		}
@@ -3098,10 +3218,16 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
 		ret = -EAGAIN;
 		goto out;
 	}
+
 	ret = f2fs_convert_inline_inode(inode);
 	if (ret)
 		goto out;
 
+	if (f2fs_disable_compressed_file(inode)) {
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+
 	set_inode_flag(inode, FI_PIN_FILE);
 	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
 done:
@@ -3350,6 +3476,17 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	}
 }
 
+static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file_inode(file);
+
+	if (!f2fs_is_compress_backend_ready(inode))
+		return -EOPNOTSUPP;
+
+	return generic_file_read_iter(iocb, iter);
+}
+
 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file *file = iocb->ki_filp;
@@ -3361,6 +3498,9 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 		goto out;
 	}
 
+	if (!f2fs_is_compress_backend_ready(inode))
+		return -EOPNOTSUPP;
+
 	if (iocb->ki_flags & IOCB_NOWAIT) {
 		if (!inode_trylock(inode)) {
 			ret = -EAGAIN;
@@ -3389,18 +3529,41 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 				ret = -EAGAIN;
 				goto out;
 			}
-		} else {
-			preallocated = true;
-			target_size = iocb->ki_pos + iov_iter_count(from);
-
-			err = f2fs_preallocate_blocks(iocb, from);
-			if (err) {
-				clear_inode_flag(inode, FI_NO_PREALLOC);
-				inode_unlock(inode);
-				ret = err;
-				goto out;
-			}
+			goto write;
 		}
+
+		if (is_inode_flag_set(inode, FI_NO_PREALLOC))
+			goto write;
+
+		if (iocb->ki_flags & IOCB_DIRECT) {
+			/*
+			 * Convert inline data for Direct I/O before entering
+			 * f2fs_direct_IO().
+			 */
+			err = f2fs_convert_inline_inode(inode);
+			if (err)
+				goto out_err;
+			/*
+			 * If force_buffere_io() is true, we have to allocate
+			 * blocks all the time, since f2fs_direct_IO will fall
+			 * back to buffered IO.
+			 */
+			if (!f2fs_force_buffered_io(inode, iocb, from) &&
+					allow_outplace_dio(inode, iocb, from))
+				goto write;
+		}
+		preallocated = true;
+		target_size = iocb->ki_pos + iov_iter_count(from);
+
+		err = f2fs_preallocate_blocks(iocb, from);
+		if (err) {
+out_err:
+			clear_inode_flag(inode, FI_NO_PREALLOC);
+			inode_unlock(inode);
+			ret = err;
+			goto out;
+		}
+write:
 		ret = __generic_file_write_iter(iocb, from);
 		clear_inode_flag(inode, FI_NO_PREALLOC);
 
@@ -3475,7 +3638,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 const struct file_operations f2fs_file_operations = {
 	.llseek		= f2fs_llseek,
-	.read_iter	= generic_file_read_iter,
+	.read_iter	= f2fs_file_read_iter,
 	.write_iter	= f2fs_file_write_iter,
 	.open		= f2fs_file_open,
 	.release	= f2fs_release_file,
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index b3d3996..db8725d 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -78,18 +78,18 @@ static int gc_thread_func(void *data)
 		 */
 		if (sbi->gc_mode == GC_URGENT) {
 			wait_ms = gc_th->urgent_sleep_time;
-			mutex_lock(&sbi->gc_mutex);
+			down_write(&sbi->gc_lock);
 			goto do_gc;
 		}
 
-		if (!mutex_trylock(&sbi->gc_mutex)) {
+		if (!down_write_trylock(&sbi->gc_lock)) {
 			stat_other_skip_bggc_count(sbi);
 			goto next;
 		}
 
 		if (!is_idle(sbi, GC_TIME)) {
 			increase_sleep_time(gc_th, &wait_ms);
-			mutex_unlock(&sbi->gc_mutex);
+			up_write(&sbi->gc_lock);
 			stat_io_skip_bggc_count(sbi);
 			goto next;
 		}
@@ -99,7 +99,7 @@ static int gc_thread_func(void *data)
 		else
 			increase_sleep_time(gc_th, &wait_ms);
 do_gc:
-		stat_inc_bggc_count(sbi);
+		stat_inc_bggc_count(sbi->stat_info);
 
 		/* if return value is not zero, no victim was selected */
 		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
@@ -1049,8 +1049,10 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 
 		if (phase == 3) {
 			inode = f2fs_iget(sb, dni.ino);
-			if (IS_ERR(inode) || is_bad_inode(inode))
+			if (IS_ERR(inode) || is_bad_inode(inode)) {
+				set_sbi_flag(sbi, SBI_NEED_FSCK);
 				continue;
+			}
 
 			if (!down_write_trylock(
 				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
@@ -1368,7 +1370,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
 				reserved_segments(sbi),
 				prefree_segments(sbi));
 
-	mutex_unlock(&sbi->gc_mutex);
+	up_write(&sbi->gc_lock);
 
 	put_gc_inode(&gc_list);
 
@@ -1407,9 +1409,9 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
 			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
 		};
 
-		mutex_lock(&sbi->gc_mutex);
+		down_write(&sbi->gc_lock);
 		do_garbage_collect(sbi, segno, &gc_list, FG_GC);
-		mutex_unlock(&sbi->gc_mutex);
+		up_write(&sbi->gc_lock);
 		put_gc_inode(&gc_list);
 
 		if (get_valid_blocks(sbi, segno, true))
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 896db04..4167e54 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -368,7 +368,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
 	struct f2fs_dentry_ptr src, dst;
 	int err;
 
-	page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
+	page = f2fs_grab_cache_page(dir->i_mapping, 0, true);
 	if (!page) {
 		f2fs_put_page(ipage, 1);
 		return -ENOMEM;
@@ -530,7 +530,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
 	return err;
 }
 
-static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+static int do_convert_inline_dir(struct inode *dir, struct page *ipage,
 							void *inline_dentry)
 {
 	if (!F2FS_I(dir)->i_dir_level)
@@ -539,6 +539,44 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
 		return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
 }
 
+int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+	struct page *ipage;
+	struct fscrypt_name fname;
+	void *inline_dentry = NULL;
+	int err = 0;
+
+	if (!f2fs_has_inline_dentry(dir))
+		return 0;
+
+	f2fs_lock_op(sbi);
+
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
+	if (err)
+		goto out;
+
+	ipage = f2fs_get_node_page(sbi, dir->i_ino);
+	if (IS_ERR(ipage)) {
+		err = PTR_ERR(ipage);
+		goto out;
+	}
+
+	if (f2fs_has_enough_room(dir, ipage, &fname)) {
+		f2fs_put_page(ipage, 1);
+		goto out;
+	}
+
+	inline_dentry = inline_data_addr(dir, ipage);
+
+	err = do_convert_inline_dir(dir, ipage, inline_dentry);
+	if (!err)
+		f2fs_put_page(ipage, 1);
+out:
+	f2fs_unlock_op(sbi);
+	return err;
+}
+
 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
 				const struct qstr *orig_name,
 				struct inode *inode, nid_t ino, umode_t mode)
@@ -562,7 +600,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
 
 	bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
 	if (bit_pos >= d.max) {
-		err = f2fs_convert_inline_dir(dir, ipage, inline_dentry);
+		err = do_convert_inline_dir(dir, ipage, inline_dentry);
 		if (err)
 			return err;
 		err = -EAGAIN;
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 502bd49..78c3f1d 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -200,6 +200,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
+	struct f2fs_inode *ri = F2FS_INODE(node_page);
 	unsigned long long iblocks;
 
 	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
@@ -286,6 +287,19 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
 		return false;
 	}
 
+	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
+			fi->i_flags & F2FS_COMPR_FL &&
+			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+						i_log_cluster_size)) {
+		if (ri->i_compress_algorithm >= COMPRESS_MAX)
+			return false;
+		if (le64_to_cpu(ri->i_compr_blocks) > inode->i_blocks)
+			return false;
+		if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+			ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE)
+			return false;
+	}
+
 	return true;
 }
 
@@ -407,6 +421,18 @@ static int do_read_inode(struct inode *inode)
 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
 	}
 
+	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
+					(fi->i_flags & F2FS_COMPR_FL)) {
+		if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+					i_log_cluster_size)) {
+			fi->i_compr_blocks = le64_to_cpu(ri->i_compr_blocks);
+			fi->i_compress_algorithm = ri->i_compress_algorithm;
+			fi->i_log_cluster_size = ri->i_log_cluster_size;
+			fi->i_cluster_size = 1 << fi->i_log_cluster_size;
+			set_inode_flag(inode, FI_COMPRESSED_FILE);
+		}
+	}
+
 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
@@ -416,6 +442,8 @@ static int do_read_inode(struct inode *inode)
 	stat_inc_inline_xattr(inode);
 	stat_inc_inline_inode(inode);
 	stat_inc_inline_dir(inode);
+	stat_inc_compr_inode(inode);
+	stat_add_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
 
 	return 0;
 }
@@ -569,6 +597,17 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
 			ri->i_crtime_nsec =
 				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
 		}
+
+		if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
+			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
+							i_log_cluster_size)) {
+			ri->i_compr_blocks =
+				cpu_to_le64(F2FS_I(inode)->i_compr_blocks);
+			ri->i_compress_algorithm =
+				F2FS_I(inode)->i_compress_algorithm;
+			ri->i_log_cluster_size =
+				F2FS_I(inode)->i_log_cluster_size;
+		}
 	}
 
 	__set_inode_rdev(inode, ri);
@@ -711,6 +750,8 @@ void f2fs_evict_inode(struct inode *inode)
 	stat_dec_inline_xattr(inode);
 	stat_dec_inline_dir(inode);
 	stat_dec_inline_inode(inode);
+	stat_dec_compr_inode(inode);
+	stat_sub_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
 
 	if (likely(!f2fs_cp_error(sbi) &&
 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index a1c507b..2aa0354 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -119,6 +119,13 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
 	if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
 		set_inode_flag(inode, FI_PROJ_INHERIT);
 
+	if (f2fs_sb_has_compression(sbi)) {
+		/* Inherit the compression flag in directory */
+		if ((F2FS_I(dir)->i_flags & F2FS_COMPR_FL) &&
+					f2fs_may_compress(inode))
+			set_compress_context(inode);
+	}
+
 	f2fs_set_inode_flags(inode);
 
 	trace_f2fs_new_inode(inode, 0);
@@ -149,6 +156,9 @@ static inline int is_extension_exist(const unsigned char *s, const char *sub)
 	size_t sublen = strlen(sub);
 	int i;
 
+	if (sublen == 1 && *sub == '*')
+		return 1;
+
 	/*
 	 * filename format of multimedia file should be defined as:
 	 * "filename + '.' + extension + (optional: '.' + temp extension)".
@@ -262,6 +272,45 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
 	return 0;
 }
 
+static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
+						const unsigned char *name)
+{
+	__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+	unsigned char (*ext)[F2FS_EXTENSION_LEN];
+	unsigned int ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+	int i, cold_count, hot_count;
+
+	if (!f2fs_sb_has_compression(sbi) ||
+			is_inode_flag_set(inode, FI_COMPRESSED_FILE) ||
+			F2FS_I(inode)->i_flags & F2FS_NOCOMP_FL ||
+			!f2fs_may_compress(inode))
+		return;
+
+	down_read(&sbi->sb_lock);
+
+	cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+	hot_count = sbi->raw_super->hot_ext_count;
+
+	for (i = cold_count; i < cold_count + hot_count; i++) {
+		if (is_extension_exist(name, extlist[i])) {
+			up_read(&sbi->sb_lock);
+			return;
+		}
+	}
+
+	up_read(&sbi->sb_lock);
+
+	ext = F2FS_OPTION(sbi).extensions;
+
+	for (i = 0; i < ext_cnt; i++) {
+		if (!is_extension_exist(name, ext[i]))
+			continue;
+
+		set_compress_context(inode);
+		return;
+	}
+}
+
 static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 						bool excl)
 {
@@ -286,6 +335,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 	if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
 		set_file_temperature(sbi, inode, dentry->d_name.name);
 
+	set_compress_inode(sbi, inode, dentry->d_name.name);
+
 	inode->i_op = &f2fs_file_inode_operations;
 	inode->i_fop = &f2fs_file_operations;
 	inode->i_mapping->a_ops = &f2fs_dblock_aops;
@@ -797,6 +848,7 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
 
 	if (whiteout) {
 		f2fs_i_links_write(inode, false);
+		inode->i_state |= I_LINKABLE;
 		*whiteout = inode;
 	} else {
 		d_tmpfile(dentry, inode);
@@ -849,12 +901,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	struct inode *old_inode = d_inode(old_dentry);
 	struct inode *new_inode = d_inode(new_dentry);
 	struct inode *whiteout = NULL;
-	struct page *old_dir_page;
+	struct page *old_dir_page = NULL;
 	struct page *old_page, *new_page = NULL;
 	struct f2fs_dir_entry *old_dir_entry = NULL;
 	struct f2fs_dir_entry *old_entry;
 	struct f2fs_dir_entry *new_entry;
-	bool is_old_inline = f2fs_has_inline_dentry(old_dir);
 	int err;
 
 	if (unlikely(f2fs_cp_error(sbi)))
@@ -867,6 +918,26 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 			F2FS_I(old_dentry->d_inode)->i_projid)))
 		return -EXDEV;
 
+	/*
+	 * If new_inode is null, the below renaming flow will
+	 * add a link in old_dir which can conver inline_dir.
+	 * After then, if we failed to get the entry due to other
+	 * reasons like ENOMEM, we had to remove the new entry.
+	 * Instead of adding such the error handling routine, let's
+	 * simply convert first here.
+	 */
+	if (old_dir == new_dir && !new_inode) {
+		err = f2fs_try_convert_inline_dir(old_dir, new_dentry);
+		if (err)
+			return err;
+	}
+
+	if (flags & RENAME_WHITEOUT) {
+		err = f2fs_create_whiteout(old_dir, &whiteout);
+		if (err)
+			return err;
+	}
+
 	err = dquot_initialize(old_dir);
 	if (err)
 		goto out;
@@ -898,17 +969,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 		}
 	}
 
-	if (flags & RENAME_WHITEOUT) {
-		err = f2fs_create_whiteout(old_dir, &whiteout);
-		if (err)
-			goto out_dir;
-	}
-
 	if (new_inode) {
 
 		err = -ENOTEMPTY;
 		if (old_dir_entry && !f2fs_empty_dir(new_inode))
-			goto out_whiteout;
+			goto out_dir;
 
 		err = -ENOENT;
 		new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
@@ -916,7 +981,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 		if (!new_entry) {
 			if (IS_ERR(new_page))
 				err = PTR_ERR(new_page);
-			goto out_whiteout;
+			goto out_dir;
 		}
 
 		f2fs_balance_fs(sbi, true);
@@ -928,6 +993,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 			goto put_out_dir;
 
 		f2fs_set_link(new_dir, new_entry, new_page, old_inode);
+		new_page = NULL;
 
 		new_inode->i_ctime = current_time(new_inode);
 		down_write(&F2FS_I(new_inode)->i_sem);
@@ -948,33 +1014,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 		err = f2fs_add_link(new_dentry, old_inode);
 		if (err) {
 			f2fs_unlock_op(sbi);
-			goto out_whiteout;
+			goto out_dir;
 		}
 
 		if (old_dir_entry)
 			f2fs_i_links_write(new_dir, true);
-
-		/*
-		 * old entry and new entry can locate in the same inline
-		 * dentry in inode, when attaching new entry in inline dentry,
-		 * it could force inline dentry conversion, after that,
-		 * old_entry and old_page will point to wrong address, in
-		 * order to avoid this, let's do the check and update here.
-		 */
-		if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) {
-			f2fs_put_page(old_page, 0);
-			old_page = NULL;
-
-			old_entry = f2fs_find_entry(old_dir,
-						&old_dentry->d_name, &old_page);
-			if (!old_entry) {
-				err = -ENOENT;
-				if (IS_ERR(old_page))
-					err = PTR_ERR(old_page);
-				f2fs_unlock_op(sbi);
-				goto out_whiteout;
-			}
-		}
 	}
 
 	down_write(&F2FS_I(old_inode)->i_sem);
@@ -989,9 +1033,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	f2fs_mark_inode_dirty_sync(old_inode, false);
 
 	f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
+	old_page = NULL;
 
 	if (whiteout) {
-		whiteout->i_state |= I_LINKABLE;
 		set_inode_flag(whiteout, FI_INC_LINK);
 		err = f2fs_add_link(old_dentry, whiteout);
 		if (err)
@@ -1025,17 +1069,15 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 put_out_dir:
 	f2fs_unlock_op(sbi);
-	if (new_page)
-		f2fs_put_page(new_page, 0);
-out_whiteout:
-	if (whiteout)
-		iput(whiteout);
+	f2fs_put_page(new_page, 0);
 out_dir:
 	if (old_dir_entry)
 		f2fs_put_page(old_dir_page, 0);
 out_old:
 	f2fs_put_page(old_page, 0);
 out:
+	if (whiteout)
+		iput(whiteout);
 	return err;
 }
 
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3314a0f..9d02cdc 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -875,7 +875,7 @@ static int truncate_dnode(struct dnode_of_data *dn)
 
 	/* get direct node */
 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
-	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
+	if (PTR_ERR(page) == -ENOENT)
 		return 1;
 	else if (IS_ERR(page))
 		return PTR_ERR(page);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 76477f7..763d5c0 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -723,6 +723,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 	int ret = 0;
 	unsigned long s_flags = sbi->sb->s_flags;
 	bool need_writecp = false;
+	bool fix_curseg_write_pointer = false;
 #ifdef CONFIG_QUOTA
 	int quota_enabled;
 #endif
@@ -774,6 +775,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 		sbi->sb->s_flags = s_flags;
 	}
 skip:
+	fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
+
 	destroy_fsync_dnodes(&inode_list, err);
 	destroy_fsync_dnodes(&tmp_inode_list, err);
 
@@ -784,9 +787,22 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 	if (err) {
 		truncate_inode_pages_final(NODE_MAPPING(sbi));
 		truncate_inode_pages_final(META_MAPPING(sbi));
-	} else {
-		clear_sbi_flag(sbi, SBI_POR_DOING);
 	}
+
+	/*
+	 * If fsync data succeeds or there is no fsync data to recover,
+	 * and the f2fs is not read only, check and fix zoned block devices'
+	 * write pointer consistency.
+	 */
+	if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
+			f2fs_sb_has_blkzoned(sbi)) {
+		err = f2fs_fix_curseg_write_pointer(sbi);
+		ret = err;
+	}
+
+	if (!err)
+		clear_sbi_flag(sbi, SBI_POR_DOING);
+
 	mutex_unlock(&sbi->cp_mutex);
 
 	/* let's drop all the directory inodes for clean checkpoint */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 56e8144..cf0eb00 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -334,7 +334,6 @@ void f2fs_drop_inmem_pages(struct inode *inode)
 	}
 
 	fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
-	stat_dec_atomic_write(inode);
 
 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
 	if (!list_empty(&fi->inmem_ilist))
@@ -505,7 +504,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 	 * dir/node pages without enough free segments.
 	 */
 	if (has_not_enough_free_secs(sbi, 0, 0)) {
-		mutex_lock(&sbi->gc_mutex);
+		down_write(&sbi->gc_lock);
 		f2fs_gc(sbi, false, false, NULL_SEGNO);
 	}
 }
@@ -2225,7 +2224,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
 	struct sit_info *sit_i = SIT_I(sbi);
 
 	f2fs_bug_on(sbi, addr == NULL_ADDR);
-	if (addr == NEW_ADDR)
+	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
 		return;
 
 	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
@@ -2861,9 +2860,9 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
 	if (sbi->discard_blks == 0)
 		goto out;
 
-	mutex_lock(&sbi->gc_mutex);
+	down_write(&sbi->gc_lock);
 	err = f2fs_write_checkpoint(sbi, &cpc);
-	mutex_unlock(&sbi->gc_mutex);
+	up_write(&sbi->gc_lock);
 	if (err)
 		goto out;
 
@@ -3036,7 +3035,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
 	if (fio->type == DATA) {
 		struct inode *inode = fio->page->mapping->host;
 
-		if (is_cold_data(fio->page) || file_is_cold(inode))
+		if (is_cold_data(fio->page) || file_is_cold(inode) ||
+				f2fs_compressed_file(inode))
 			return CURSEG_COLD_DATA;
 		if (file_is_hot(inode) ||
 				is_inode_flag_set(inode, FI_HOT_DATA) ||
@@ -3289,7 +3289,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
 
 	stat_inc_inplace_blocks(fio->sbi);
 
-	if (fio->bio)
+	if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
 		err = f2fs_merge_page_bio(fio);
 	else
 		err = f2fs_submit_page_bio(fio);
@@ -4368,6 +4368,263 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
 	return 0;
 }
 
+#ifdef CONFIG_BLK_DEV_ZONED
+
+static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
+				    struct f2fs_dev_info *fdev,
+				    struct blk_zone *zone)
+{
+	unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
+	block_t zone_block, wp_block, last_valid_block;
+	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
+	int i, s, b, ret;
+	struct seg_entry *se;
+
+	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+		return 0;
+
+	wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
+	wp_segno = GET_SEGNO(sbi, wp_block);
+	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
+	zone_segno = GET_SEGNO(sbi, zone_block);
+	zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
+
+	if (zone_segno >= MAIN_SEGS(sbi))
+		return 0;
+
+	/*
+	 * Skip check of zones cursegs point to, since
+	 * fix_curseg_write_pointer() checks them.
+	 */
+	for (i = 0; i < NO_CHECK_TYPE; i++)
+		if (zone_secno == GET_SEC_FROM_SEG(sbi,
+						   CURSEG_I(sbi, i)->segno))
+			return 0;
+
+	/*
+	 * Get last valid block of the zone.
+	 */
+	last_valid_block = zone_block - 1;
+	for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
+		segno = zone_segno + s;
+		se = get_seg_entry(sbi, segno);
+		for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
+			if (f2fs_test_bit(b, se->cur_valid_map)) {
+				last_valid_block = START_BLOCK(sbi, segno) + b;
+				break;
+			}
+		if (last_valid_block >= zone_block)
+			break;
+	}
+
+	/*
+	 * If last valid block is beyond the write pointer, report the
+	 * inconsistency. This inconsistency does not cause write error
+	 * because the zone will not be selected for write operation until
+	 * it get discarded. Just report it.
+	 */
+	if (last_valid_block >= wp_block) {
+		f2fs_notice(sbi, "Valid block beyond write pointer: "
+			    "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
+			    GET_SEGNO(sbi, last_valid_block),
+			    GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
+			    wp_segno, wp_blkoff);
+		return 0;
+	}
+
+	/*
+	 * If there is no valid block in the zone and if write pointer is
+	 * not at zone start, reset the write pointer.
+	 */
+	if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
+		f2fs_notice(sbi,
+			    "Zone without valid block has non-zero write "
+			    "pointer. Reset the write pointer: wp[0x%x,0x%x]",
+			    wp_segno, wp_blkoff);
+		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
+					zone->len >> log_sectors_per_block);
+		if (ret) {
+			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
+				 fdev->path, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
+						  block_t zone_blkaddr)
+{
+	int i;
+
+	for (i = 0; i < sbi->s_ndevs; i++) {
+		if (!bdev_is_zoned(FDEV(i).bdev))
+			continue;
+		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
+				zone_blkaddr <= FDEV(i).end_blk))
+			return &FDEV(i);
+	}
+
+	return NULL;
+}
+
+static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
+			      void *data) {
+	memcpy(data, zone, sizeof(struct blk_zone));
+	return 0;
+}
+
+static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
+{
+	struct curseg_info *cs = CURSEG_I(sbi, type);
+	struct f2fs_dev_info *zbd;
+	struct blk_zone zone;
+	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
+	block_t cs_zone_block, wp_block;
+	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
+	sector_t zone_sector;
+	int err;
+
+	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
+	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
+
+	zbd = get_target_zoned_dev(sbi, cs_zone_block);
+	if (!zbd)
+		return 0;
+
+	/* report zone for the sector the curseg points to */
+	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
+		<< log_sectors_per_block;
+	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
+				  report_one_zone_cb, &zone);
+	if (err != 1) {
+		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
+			 zbd->path, err);
+		return err;
+	}
+
+	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+		return 0;
+
+	wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
+	wp_segno = GET_SEGNO(sbi, wp_block);
+	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+	wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
+
+	if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
+		wp_sector_off == 0)
+		return 0;
+
+	f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
+		    "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
+		    type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
+
+	f2fs_notice(sbi, "Assign new section to curseg[%d]: "
+		    "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
+	allocate_segment_by_default(sbi, type, true);
+
+	/* check consistency of the zone curseg pointed to */
+	if (check_zone_write_pointer(sbi, zbd, &zone))
+		return -EIO;
+
+	/* check newly assigned zone */
+	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
+	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
+
+	zbd = get_target_zoned_dev(sbi, cs_zone_block);
+	if (!zbd)
+		return 0;
+
+	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
+		<< log_sectors_per_block;
+	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
+				  report_one_zone_cb, &zone);
+	if (err != 1) {
+		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
+			 zbd->path, err);
+		return err;
+	}
+
+	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+		return 0;
+
+	if (zone.wp != zone.start) {
+		f2fs_notice(sbi,
+			    "New zone for curseg[%d] is not yet discarded. "
+			    "Reset the zone: curseg[0x%x,0x%x]",
+			    type, cs->segno, cs->next_blkoff);
+		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
+				zone_sector >> log_sectors_per_block,
+				zone.len >> log_sectors_per_block);
+		if (err) {
+			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
+				 zbd->path, err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
+{
+	int i, ret;
+
+	for (i = 0; i < NO_CHECK_TYPE; i++) {
+		ret = fix_curseg_write_pointer(sbi, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+struct check_zone_write_pointer_args {
+	struct f2fs_sb_info *sbi;
+	struct f2fs_dev_info *fdev;
+};
+
+static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
+				      void *data) {
+	struct check_zone_write_pointer_args *args;
+	args = (struct check_zone_write_pointer_args *)data;
+
+	return check_zone_write_pointer(args->sbi, args->fdev, zone);
+}
+
+int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
+{
+	int i, ret;
+	struct check_zone_write_pointer_args args;
+
+	for (i = 0; i < sbi->s_ndevs; i++) {
+		if (!bdev_is_zoned(FDEV(i).bdev))
+			continue;
+
+		args.sbi = sbi;
+		args.fdev = &FDEV(i);
+		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
+					  check_zone_write_pointer_cb, &args);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+#else
+int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
+{
+	return 0;
+}
+
+int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
+{
+	return 0;
+}
+#endif
+
 /*
  * Update min, max modified time for cost-benefit GC algorithm
  */
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index a95467b..459dc39 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -200,18 +200,6 @@ struct segment_allocation {
 	void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
 };
 
-/*
- * this value is set in page as a private data which indicate that
- * the page is atomically written, and it is in inmem_pages list.
- */
-#define ATOMIC_WRITTEN_PAGE		((unsigned long)-1)
-#define DUMMY_WRITTEN_PAGE		((unsigned long)-2)
-
-#define IS_ATOMIC_WRITTEN_PAGE(page)			\
-		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
-#define IS_DUMMY_WRITTEN_PAGE(page)			\
-		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
-
 #define MAX_SKIP_GC_COUNT			16
 
 struct inmem_pages {
@@ -619,8 +607,10 @@ static inline int utilization(struct f2fs_sb_info *sbi)
  *                     threashold,
  * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
  *                     storages. IPU will be triggered only if the # of dirty
- *                     pages over min_fsync_blocks.
- * F2FS_IPUT_DISABLE - disable IPU. (=default option)
+ *                     pages over min_fsync_blocks. (=default option)
+ * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
+ * F2FS_IPU_NOCACHE - disable IPU bio cache.
+ * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
  */
 #define DEF_MIN_IPU_UTIL	70
 #define DEF_MIN_FSYNC_BLOCKS	8
@@ -635,6 +625,7 @@ enum {
 	F2FS_IPU_SSR_UTIL,
 	F2FS_IPU_FSYNC,
 	F2FS_IPU_ASYNC,
+	F2FS_IPU_NOCACHE,
 };
 
 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 5111e1f..65a7a43 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -141,6 +141,9 @@ enum {
 	Opt_checkpoint_disable_cap,
 	Opt_checkpoint_disable_cap_perc,
 	Opt_checkpoint_enable,
+	Opt_compress_algorithm,
+	Opt_compress_log_size,
+	Opt_compress_extension,
 	Opt_err,
 };
 
@@ -203,6 +206,9 @@ static match_table_t f2fs_tokens = {
 	{Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
 	{Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
 	{Opt_checkpoint_enable, "checkpoint=enable"},
+	{Opt_compress_algorithm, "compress_algorithm=%s"},
+	{Opt_compress_log_size, "compress_log_size=%u"},
+	{Opt_compress_extension, "compress_extension=%s"},
 	{Opt_err, NULL},
 };
 
@@ -391,8 +397,9 @@ static int parse_options(struct super_block *sb, char *options)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	substring_t args[MAX_OPT_ARGS];
+	unsigned char (*ext)[F2FS_EXTENSION_LEN];
 	char *p, *name;
-	int arg = 0;
+	int arg = 0, ext_cnt;
 	kuid_t uid;
 	kgid_t gid;
 #ifdef CONFIG_QUOTA
@@ -810,6 +817,66 @@ static int parse_options(struct super_block *sb, char *options)
 		case Opt_checkpoint_enable:
 			clear_opt(sbi, DISABLE_CHECKPOINT);
 			break;
+		case Opt_compress_algorithm:
+			if (!f2fs_sb_has_compression(sbi)) {
+				f2fs_err(sbi, "Compression feature if off");
+				return -EINVAL;
+			}
+			name = match_strdup(&args[0]);
+			if (!name)
+				return -ENOMEM;
+			if (strlen(name) == 3 && !strcmp(name, "lzo")) {
+				F2FS_OPTION(sbi).compress_algorithm =
+								COMPRESS_LZO;
+			} else if (strlen(name) == 3 &&
+					!strcmp(name, "lz4")) {
+				F2FS_OPTION(sbi).compress_algorithm =
+								COMPRESS_LZ4;
+			} else {
+				kfree(name);
+				return -EINVAL;
+			}
+			kfree(name);
+			break;
+		case Opt_compress_log_size:
+			if (!f2fs_sb_has_compression(sbi)) {
+				f2fs_err(sbi, "Compression feature is off");
+				return -EINVAL;
+			}
+			if (args->from && match_int(args, &arg))
+				return -EINVAL;
+			if (arg < MIN_COMPRESS_LOG_SIZE ||
+				arg > MAX_COMPRESS_LOG_SIZE) {
+				f2fs_err(sbi,
+					"Compress cluster log size is out of range");
+				return -EINVAL;
+			}
+			F2FS_OPTION(sbi).compress_log_size = arg;
+			break;
+		case Opt_compress_extension:
+			if (!f2fs_sb_has_compression(sbi)) {
+				f2fs_err(sbi, "Compression feature is off");
+				return -EINVAL;
+			}
+			name = match_strdup(&args[0]);
+			if (!name)
+				return -ENOMEM;
+
+			ext = F2FS_OPTION(sbi).extensions;
+			ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+
+			if (strlen(name) >= F2FS_EXTENSION_LEN ||
+				ext_cnt >= COMPRESS_EXT_NUM) {
+				f2fs_err(sbi,
+					"invalid extension length/number");
+				kfree(name);
+				return -EINVAL;
+			}
+
+			strcpy(ext[ext_cnt], name);
+			F2FS_OPTION(sbi).compress_ext_cnt++;
+			kfree(name);
+			break;
 		default:
 			f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
 				 p);
@@ -1125,6 +1192,8 @@ static void f2fs_put_super(struct super_block *sb)
 	f2fs_destroy_node_manager(sbi);
 	f2fs_destroy_segment_manager(sbi);
 
+	f2fs_destroy_post_read_wq(sbi);
+
 	kvfree(sbi->ckpt);
 
 	f2fs_unregister_sysfs(sbi);
@@ -1169,9 +1238,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
 
 		cpc.reason = __get_cp_reason(sbi);
 
-		mutex_lock(&sbi->gc_mutex);
+		down_write(&sbi->gc_lock);
 		err = f2fs_write_checkpoint(sbi, &cpc);
-		mutex_unlock(&sbi->gc_mutex);
+		up_write(&sbi->gc_lock);
 	}
 	f2fs_trace_ios(NULL, 1);
 
@@ -1213,12 +1282,10 @@ static int f2fs_statfs_project(struct super_block *sb,
 		return PTR_ERR(dquot);
 	spin_lock(&dquot->dq_dqb_lock);
 
-	limit = 0;
-	if (dquot->dq_dqb.dqb_bsoftlimit)
-		limit = dquot->dq_dqb.dqb_bsoftlimit;
-	if (dquot->dq_dqb.dqb_bhardlimit &&
-			(!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
-		limit = dquot->dq_dqb.dqb_bhardlimit;
+	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
+					dquot->dq_dqb.dqb_bhardlimit);
+	if (limit)
+		limit >>= sb->s_blocksize_bits;
 
 	if (limit && buf->f_blocks > limit) {
 		curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
@@ -1228,12 +1295,8 @@ static int f2fs_statfs_project(struct super_block *sb,
 			 (buf->f_blocks - curblock) : 0;
 	}
 
-	limit = 0;
-	if (dquot->dq_dqb.dqb_isoftlimit)
-		limit = dquot->dq_dqb.dqb_isoftlimit;
-	if (dquot->dq_dqb.dqb_ihardlimit &&
-			(!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
-		limit = dquot->dq_dqb.dqb_ihardlimit;
+	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
+					dquot->dq_dqb.dqb_ihardlimit);
 
 	if (limit && buf->f_files > limit) {
 		buf->f_files = limit;
@@ -1340,6 +1403,35 @@ static inline void f2fs_show_quota_options(struct seq_file *seq,
 #endif
 }
 
+static inline void f2fs_show_compress_options(struct seq_file *seq,
+							struct super_block *sb)
+{
+	struct f2fs_sb_info *sbi = F2FS_SB(sb);
+	char *algtype = "";
+	int i;
+
+	if (!f2fs_sb_has_compression(sbi))
+		return;
+
+	switch (F2FS_OPTION(sbi).compress_algorithm) {
+	case COMPRESS_LZO:
+		algtype = "lzo";
+		break;
+	case COMPRESS_LZ4:
+		algtype = "lz4";
+		break;
+	}
+	seq_printf(seq, ",compress_algorithm=%s", algtype);
+
+	seq_printf(seq, ",compress_log_size=%u",
+			F2FS_OPTION(sbi).compress_log_size);
+
+	for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
+		seq_printf(seq, ",compress_extension=%s",
+			F2FS_OPTION(sbi).extensions[i]);
+	}
+}
+
 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
@@ -1462,6 +1554,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 		seq_printf(seq, ",fsync_mode=%s", "strict");
 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
 		seq_printf(seq, ",fsync_mode=%s", "nobarrier");
+
+	f2fs_show_compress_options(seq, sbi->sb);
 	return 0;
 }
 
@@ -1476,6 +1570,9 @@ static void default_options(struct f2fs_sb_info *sbi)
 	F2FS_OPTION(sbi).test_dummy_encryption = false;
 	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
 	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
+	F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZO;
+	F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
+	F2FS_OPTION(sbi).compress_ext_cnt = 0;
 
 	set_opt(sbi, BG_GC);
 	set_opt(sbi, INLINE_XATTR);
@@ -1524,7 +1621,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
 	f2fs_update_time(sbi, DISABLE_TIME);
 
 	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
-		mutex_lock(&sbi->gc_mutex);
+		down_write(&sbi->gc_lock);
 		err = f2fs_gc(sbi, true, false, NULL_SEGNO);
 		if (err == -ENODATA) {
 			err = 0;
@@ -1546,7 +1643,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
 		goto restore_flag;
 	}
 
-	mutex_lock(&sbi->gc_mutex);
+	down_write(&sbi->gc_lock);
 	cpc.reason = CP_PAUSE;
 	set_sbi_flag(sbi, SBI_CP_DISABLED);
 	err = f2fs_write_checkpoint(sbi, &cpc);
@@ -1558,7 +1655,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
 	spin_unlock(&sbi->stat_lock);
 
 out_unlock:
-	mutex_unlock(&sbi->gc_mutex);
+	up_write(&sbi->gc_lock);
 restore_flag:
 	sbi->sb->s_flags = s_flags;	/* Restore MS_RDONLY status */
 	return err;
@@ -1566,12 +1663,12 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
 
 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
 {
-	mutex_lock(&sbi->gc_mutex);
+	down_write(&sbi->gc_lock);
 	f2fs_dirty_to_prefree(sbi);
 
 	clear_sbi_flag(sbi, SBI_CP_DISABLED);
 	set_sbi_flag(sbi, SBI_IS_DIRTY);
-	mutex_unlock(&sbi->gc_mutex);
+	up_write(&sbi->gc_lock);
 
 	f2fs_sync_fs(sbi->sb, 1);
 }
@@ -2158,7 +2255,7 @@ static int f2fs_dquot_commit(struct dquot *dquot)
 	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
 	int ret;
 
-	down_read(&sbi->quota_sem);
+	down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
 	ret = dquot_commit(dquot);
 	if (ret < 0)
 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
@@ -2182,13 +2279,10 @@ static int f2fs_dquot_acquire(struct dquot *dquot)
 static int f2fs_dquot_release(struct dquot *dquot)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
-	int ret;
+	int ret = dquot_release(dquot);
 
-	down_read(&sbi->quota_sem);
-	ret = dquot_release(dquot);
 	if (ret < 0)
 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
-	up_read(&sbi->quota_sem);
 	return ret;
 }
 
@@ -2196,29 +2290,22 @@ static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
 {
 	struct super_block *sb = dquot->dq_sb;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
-	int ret;
-
-	down_read(&sbi->quota_sem);
-	ret = dquot_mark_dquot_dirty(dquot);
+	int ret = dquot_mark_dquot_dirty(dquot);
 
 	/* if we are using journalled quota */
 	if (is_journalled_quota(sbi))
 		set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
 
-	up_read(&sbi->quota_sem);
 	return ret;
 }
 
 static int f2fs_dquot_commit_info(struct super_block *sb, int type)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
-	int ret;
+	int ret = dquot_commit_info(sb, type);
 
-	down_read(&sbi->quota_sem);
-	ret = dquot_commit_info(sb, type);
 	if (ret < 0)
 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
-	up_read(&sbi->quota_sem);
 	return ret;
 }
 
@@ -3311,7 +3398,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 
 	/* init f2fs-specific super block info */
 	sbi->valid_super_block = valid_super_block;
-	mutex_init(&sbi->gc_mutex);
+	init_rwsem(&sbi->gc_lock);
 	mutex_init(&sbi->writepages);
 	mutex_init(&sbi->cp_mutex);
 	mutex_init(&sbi->resize_mutex);
@@ -3400,6 +3487,12 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 		goto free_devices;
 	}
 
+	err = f2fs_init_post_read_wq(sbi);
+	if (err) {
+		f2fs_err(sbi, "Failed to initialize post read workqueue");
+		goto free_devices;
+	}
+
 	sbi->total_valid_node_count =
 				le32_to_cpu(sbi->ckpt->valid_node_count);
 	percpu_counter_set(&sbi->total_valid_inode_count,
@@ -3544,6 +3637,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 			goto free_meta;
 		}
 	}
+
+	/*
+	 * If the f2fs is not readonly and fsync data recovery succeeds,
+	 * check zoned block devices' write pointer consistency.
+	 */
+	if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
+		err = f2fs_check_write_pointer(sbi);
+		if (err)
+			goto free_meta;
+	}
+
 reset_checkpoint:
 	/* f2fs_recover_fsync_data() cleared this already */
 	clear_sbi_flag(sbi, SBI_POR_DOING);
@@ -3621,6 +3725,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 	f2fs_destroy_node_manager(sbi);
 free_sm:
 	f2fs_destroy_segment_manager(sbi);
+	f2fs_destroy_post_read_wq(sbi);
 free_devices:
 	destroy_device_list(sbi);
 	kvfree(sbi->ckpt);
@@ -3762,8 +3867,12 @@ static int __init init_f2fs_fs(void)
 	err = f2fs_init_bio_entry_cache();
 	if (err)
 		goto free_post_read;
+	err = f2fs_init_bioset();
+	if (err)
+		goto free_bio_enrty_cache;
 	return 0;
-
+free_bio_enrty_cache:
+	f2fs_destroy_bio_entry_cache();
 free_post_read:
 	f2fs_destroy_post_read_processing();
 free_root_stats:
@@ -3789,6 +3898,7 @@ static int __init init_f2fs_fs(void)
 
 static void __exit exit_f2fs_fs(void)
 {
+	f2fs_destroy_bioset();
 	f2fs_destroy_bio_entry_cache();
 	f2fs_destroy_post_read_processing();
 	f2fs_destroy_root_stats();
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 70945ce..91d6497 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -25,6 +25,9 @@ enum {
 	DCC_INFO,	/* struct discard_cmd_control */
 	NM_INFO,	/* struct f2fs_nm_info */
 	F2FS_SBI,	/* struct f2fs_sb_info */
+#ifdef CONFIG_F2FS_STAT_FS
+	STAT_INFO,      /* struct f2fs_stat_info */
+#endif
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 	FAULT_INFO_RATE,	/* struct f2fs_fault_info */
 	FAULT_INFO_TYPE,	/* struct f2fs_fault_info */
@@ -42,6 +45,9 @@ struct f2fs_attr {
 	int id;
 };
 
+static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
+			     struct f2fs_sb_info *sbi, char *buf);
+
 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
 {
 	if (struct_type == GC_THREAD)
@@ -59,41 +65,25 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
 					struct_type == FAULT_INFO_TYPE)
 		return (unsigned char *)&F2FS_OPTION(sbi).fault_info;
 #endif
+#ifdef CONFIG_F2FS_STAT_FS
+	else if (struct_type == STAT_INFO)
+		return (unsigned char *)F2FS_STAT(sbi);
+#endif
 	return NULL;
 }
 
 static ssize_t dirty_segments_show(struct f2fs_attr *a,
 		struct f2fs_sb_info *sbi, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
-		(unsigned long long)(dirty_segments(sbi)));
+	return sprintf(buf, "%llu\n",
+			(unsigned long long)(dirty_segments(sbi)));
 }
 
-static ssize_t unusable_show(struct f2fs_attr *a,
+static ssize_t free_segments_show(struct f2fs_attr *a,
 		struct f2fs_sb_info *sbi, char *buf)
 {
-	block_t unusable;
-
-	if (test_opt(sbi, DISABLE_CHECKPOINT))
-		unusable = sbi->unusable_block_count;
-	else
-		unusable = f2fs_get_unusable_blocks(sbi);
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
-		(unsigned long long)unusable);
-}
-
-static ssize_t encoding_show(struct f2fs_attr *a,
-		struct f2fs_sb_info *sbi, char *buf)
-{
-#ifdef CONFIG_UNICODE
-	if (f2fs_sb_has_casefold(sbi))
-		return snprintf(buf, PAGE_SIZE, "%s (%d.%d.%d)\n",
-			sbi->s_encoding->charset,
-			(sbi->s_encoding->version >> 16) & 0xff,
-			(sbi->s_encoding->version >> 8) & 0xff,
-			sbi->s_encoding->version & 0xff);
-#endif
-	return snprintf(buf, PAGE_SIZE, "(none)");
+	return sprintf(buf, "%llu\n",
+			(unsigned long long)(free_segments(sbi)));
 }
 
 static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
@@ -102,10 +92,10 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
 	struct super_block *sb = sbi->sb;
 
 	if (!sb->s_bdev->bd_part)
-		return snprintf(buf, PAGE_SIZE, "0\n");
+		return sprintf(buf, "0\n");
 
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
-		(unsigned long long)(sbi->kbytes_written +
+	return sprintf(buf, "%llu\n",
+			(unsigned long long)(sbi->kbytes_written +
 			BD_PART_WRITTEN(sbi)));
 }
 
@@ -116,7 +106,7 @@ static ssize_t features_show(struct f2fs_attr *a,
 	int len = 0;
 
 	if (!sb->s_bdev->bd_part)
-		return snprintf(buf, PAGE_SIZE, "0\n");
+		return sprintf(buf, "0\n");
 
 	if (f2fs_sb_has_encrypt(sbi))
 		len += snprintf(buf, PAGE_SIZE - len, "%s",
@@ -154,6 +144,9 @@ static ssize_t features_show(struct f2fs_attr *a,
 	if (f2fs_sb_has_casefold(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "casefold");
+	if (f2fs_sb_has_compression(sbi))
+		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+				len ? ", " : "", "compression");
 	len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "pin_file");
 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -163,9 +156,66 @@ static ssize_t features_show(struct f2fs_attr *a,
 static ssize_t current_reserved_blocks_show(struct f2fs_attr *a,
 					struct f2fs_sb_info *sbi, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%u\n", sbi->current_reserved_blocks);
+	return sprintf(buf, "%u\n", sbi->current_reserved_blocks);
 }
 
+static ssize_t unusable_show(struct f2fs_attr *a,
+		struct f2fs_sb_info *sbi, char *buf)
+{
+	block_t unusable;
+
+	if (test_opt(sbi, DISABLE_CHECKPOINT))
+		unusable = sbi->unusable_block_count;
+	else
+		unusable = f2fs_get_unusable_blocks(sbi);
+	return sprintf(buf, "%llu\n", (unsigned long long)unusable);
+}
+
+static ssize_t encoding_show(struct f2fs_attr *a,
+		struct f2fs_sb_info *sbi, char *buf)
+{
+#ifdef CONFIG_UNICODE
+	if (f2fs_sb_has_casefold(sbi))
+		return snprintf(buf, PAGE_SIZE, "%s (%d.%d.%d)\n",
+			sbi->s_encoding->charset,
+			(sbi->s_encoding->version >> 16) & 0xff,
+			(sbi->s_encoding->version >> 8) & 0xff,
+			sbi->s_encoding->version & 0xff);
+#endif
+	return sprintf(buf, "(none)");
+}
+
+#ifdef CONFIG_F2FS_STAT_FS
+static ssize_t moved_blocks_foreground_show(struct f2fs_attr *a,
+				struct f2fs_sb_info *sbi, char *buf)
+{
+	struct f2fs_stat_info *si = F2FS_STAT(sbi);
+
+	return sprintf(buf, "%llu\n",
+		(unsigned long long)(si->tot_blks -
+			(si->bg_data_blks + si->bg_node_blks)));
+}
+
+static ssize_t moved_blocks_background_show(struct f2fs_attr *a,
+				struct f2fs_sb_info *sbi, char *buf)
+{
+	struct f2fs_stat_info *si = F2FS_STAT(sbi);
+
+	return sprintf(buf, "%llu\n",
+		(unsigned long long)(si->bg_data_blks + si->bg_node_blks));
+}
+
+static ssize_t avg_vblocks_show(struct f2fs_attr *a,
+		struct f2fs_sb_info *sbi, char *buf)
+{
+	struct f2fs_stat_info *si = F2FS_STAT(sbi);
+
+	si->dirty_count = dirty_segments(sbi);
+	f2fs_update_sit_info(sbi);
+	return sprintf(buf, "%llu\n", (unsigned long long)(si->avg_vblocks));
+}
+#endif
+
 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
 			struct f2fs_sb_info *sbi, char *buf)
 {
@@ -199,7 +249,7 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
 
 	ui = (unsigned int *)(ptr + a->offset);
 
-	return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+	return sprintf(buf, "%u\n", *ui);
 }
 
 static ssize_t __sbi_store(struct f2fs_attr *a,
@@ -389,6 +439,7 @@ enum feat_id {
 	FEAT_VERITY,
 	FEAT_SB_CHECKSUM,
 	FEAT_CASEFOLD,
+	FEAT_COMPRESSION,
 };
 
 static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -408,7 +459,8 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
 	case FEAT_VERITY:
 	case FEAT_SB_CHECKSUM:
 	case FEAT_CASEFOLD:
-		return snprintf(buf, PAGE_SIZE, "supported\n");
+	case FEAT_COMPRESSION:
+		return sprintf(buf, "supported\n");
 	}
 	return 0;
 }
@@ -437,6 +489,14 @@ static struct f2fs_attr f2fs_attr_##_name = {			\
 	.id	= _id,						\
 }
 
+#define F2FS_STAT_ATTR(_struct_type, _struct_name, _name, _elname)	\
+static struct f2fs_attr f2fs_attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = 0444 },	\
+	.show = f2fs_sbi_show,					\
+	.struct_type = _struct_type,				\
+	.offset = offsetof(struct _struct_name, _elname),       \
+}
+
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_urgent_sleep_time,
 							urgent_sleep_time);
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
@@ -478,11 +538,21 @@ F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
 F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
 #endif
 F2FS_GENERAL_RO_ATTR(dirty_segments);
+F2FS_GENERAL_RO_ATTR(free_segments);
 F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
 F2FS_GENERAL_RO_ATTR(features);
 F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
 F2FS_GENERAL_RO_ATTR(unusable);
 F2FS_GENERAL_RO_ATTR(encoding);
+#ifdef CONFIG_F2FS_STAT_FS
+F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count);
+F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count);
+F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, gc_foreground_calls, call_count);
+F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, gc_background_calls, bg_gc);
+F2FS_GENERAL_RO_ATTR(moved_blocks_background);
+F2FS_GENERAL_RO_ATTR(moved_blocks_foreground);
+F2FS_GENERAL_RO_ATTR(avg_vblocks);
+#endif
 
 #ifdef CONFIG_FS_ENCRYPTION
 F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
@@ -503,6 +573,7 @@ F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY);
 #endif
 F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
 F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD);
+F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION);
 
 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
 static struct attribute *f2fs_attrs[] = {
@@ -543,12 +614,22 @@ static struct attribute *f2fs_attrs[] = {
 	ATTR_LIST(inject_type),
 #endif
 	ATTR_LIST(dirty_segments),
+	ATTR_LIST(free_segments),
 	ATTR_LIST(unusable),
 	ATTR_LIST(lifetime_write_kbytes),
 	ATTR_LIST(features),
 	ATTR_LIST(reserved_blocks),
 	ATTR_LIST(current_reserved_blocks),
 	ATTR_LIST(encoding),
+#ifdef CONFIG_F2FS_STAT_FS
+	ATTR_LIST(cp_foreground_calls),
+	ATTR_LIST(cp_background_calls),
+	ATTR_LIST(gc_foreground_calls),
+	ATTR_LIST(gc_background_calls),
+	ATTR_LIST(moved_blocks_foreground),
+	ATTR_LIST(moved_blocks_background),
+	ATTR_LIST(avg_vblocks),
+#endif
 	NULL,
 };
 ATTRIBUTE_GROUPS(f2fs);
@@ -573,6 +654,7 @@ static struct attribute *f2fs_feat_attrs[] = {
 #endif
 	ATTR_LIST(sb_checksum),
 	ATTR_LIST(casefold),
+	ATTR_LIST(compression),
 	NULL,
 };
 ATTRIBUTE_GROUPS(f2fs_feat);
@@ -733,10 +815,12 @@ int __init f2fs_init_sysfs(void)
 
 	ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype,
 				   NULL, "features");
-	if (ret)
+	if (ret) {
+		kobject_put(&f2fs_feat);
 		kset_unregister(&f2fs_kset);
-	else
+	} else {
 		f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+	}
 	return ret;
 }
 
@@ -757,8 +841,11 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
 	init_completion(&sbi->s_kobj_unregister);
 	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL,
 				"%s", sb->s_id);
-	if (err)
+	if (err) {
+		kobject_put(&sbi->s_kobj);
+		wait_for_completion(&sbi->s_kobj_unregister);
 		return err;
+	}
 
 	if (f2fs_proc_root)
 		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
@@ -786,4 +873,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
 		remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
 	}
 	kobject_del(&sbi->s_kobj);
+	kobject_put(&sbi->s_kobj);
 }
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 5f04c5c..594b05a 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -21,6 +21,7 @@
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
 #include <asm/unaligned.h>
+#include <linux/random.h>
 #include <linux/iversion.h>
 #include "fat.h"
 
@@ -521,7 +522,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
 	inode->i_uid = sbi->options.fs_uid;
 	inode->i_gid = sbi->options.fs_gid;
 	inode_inc_iversion(inode);
-	inode->i_generation = get_seconds();
+	inode->i_generation = prandom_u32();
 
 	if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
 		inode->i_generation &= ~1;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 1e08bd5..f1b2a1f 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -271,6 +271,14 @@ static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
 {
 	return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
 }
+
+static inline struct timespec64 fat_timespec64_trunc_10ms(struct timespec64 ts)
+{
+	if (ts.tv_nsec)
+		ts.tv_nsec -= ts.tv_nsec % 10000000UL;
+	return ts;
+}
+
 /*
  * truncate the various times with appropriate granularity:
  *   root inode:
@@ -308,7 +316,7 @@ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
 	}
 	if (flags & S_CTIME) {
 		if (sbi->options.isvfat)
-			inode->i_ctime = timespec64_trunc(*now, 10000000);
+			inode->i_ctime = fat_timespec64_trunc_10ms(*now);
 		else
 			inode->i_ctime = fat_timespec64_trunc_2secs(*now);
 	}
diff --git a/fs/file.c b/fs/file.c
index 3da91a1..a364e1a 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -642,7 +642,9 @@ int __close_fd(struct files_struct *files, unsigned fd)
 EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
 
 /*
- * variant of __close_fd that gets a ref on the file for later fput
+ * variant of __close_fd that gets a ref on the file for later fput.
+ * The caller must ensure that filp_close() called on the file, and then
+ * an fput().
  */
 int __close_fd_get_file(unsigned int fd, struct file **res)
 {
@@ -662,7 +664,7 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
 	spin_unlock(&files->file_lock);
 	get_file(file);
 	*res = file;
-	return filp_close(file, files);
+	return 0;
 
 out_unlock:
 	spin_unlock(&files->file_lock);
@@ -706,9 +708,9 @@ void do_close_on_exec(struct files_struct *files)
 	spin_unlock(&files->file_lock);
 }
 
-static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
+static struct file *__fget_files(struct files_struct *files, unsigned int fd,
+				 fmode_t mask, unsigned int refs)
 {
-	struct files_struct *files = current->files;
 	struct file *file;
 
 	rcu_read_lock();
@@ -729,6 +731,12 @@ static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
 	return file;
 }
 
+static inline struct file *__fget(unsigned int fd, fmode_t mask,
+				  unsigned int refs)
+{
+	return __fget_files(current->files, fd, mask, refs);
+}
+
 struct file *fget_many(unsigned int fd, unsigned int refs)
 {
 	return __fget(fd, FMODE_PATH, refs);
@@ -746,6 +754,18 @@ struct file *fget_raw(unsigned int fd)
 }
 EXPORT_SYMBOL(fget_raw);
 
+struct file *fget_task(struct task_struct *task, unsigned int fd)
+{
+	struct file *file = NULL;
+
+	task_lock(task);
+	if (task->files)
+		file = __fget_files(task->files, fd, 0, 1);
+	task_unlock(task);
+
+	return file;
+}
+
 /*
  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
  *
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 9135646..77bf5f9 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -74,7 +74,8 @@ int register_filesystem(struct file_system_type * fs)
 	int res = 0;
 	struct file_system_type ** p;
 
-	if (fs->parameters && !fs_validate_description(fs->parameters))
+	if (fs->parameters &&
+	    !fs_validate_description(fs->name, fs->parameters))
 		return -EINVAL;
 
 	BUG_ON(strchr(fs->name, '.'));
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 335607b..76ac9c7 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2063,7 +2063,7 @@ void wb_workfn(struct work_struct *work)
 						struct bdi_writeback, dwork);
 	long pages_written;
 
-	set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
+	set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
 	current->flags |= PF_SWAPWRITE;
 
 	if (likely(!current_is_workqueue_rescuer() ||
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 138b5b4..fc9f6ef 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -45,6 +45,7 @@ static const struct constant_table common_set_sb_flag[] = {
 	{ "posixacl",	SB_POSIXACL },
 	{ "ro",		SB_RDONLY },
 	{ "sync",	SB_SYNCHRONOUS },
+	{ },
 };
 
 static const struct constant_table common_clear_sb_flag[] = {
@@ -53,6 +54,7 @@ static const struct constant_table common_clear_sb_flag[] = {
 	{ "nomand",	SB_MANDLOCK },
 	{ "rw",		SB_RDONLY },
 	{ "silent",	SB_SILENT },
+	{ },
 };
 
 static const char *const forbidden_sb_flag[] = {
@@ -175,14 +177,15 @@ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
 
 	struct fs_parameter param = {
 		.key	= key,
-		.type	= fs_value_is_string,
+		.type	= fs_value_is_flag,
 		.size	= v_size,
 	};
 
-	if (v_size > 0) {
+	if (value) {
 		param.string = kmemdup_nul(value, v_size, GFP_KERNEL);
 		if (!param.string)
 			return -ENOMEM;
+		param.type = fs_value_is_string;
 	}
 
 	ret = vfs_parse_fs_param(fc, &param);
@@ -268,6 +271,7 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
 	fc->fs_type	= get_filesystem(fs_type);
 	fc->cred	= get_current_cred();
 	fc->net_ns	= get_net(current->nsproxy->net_ns);
+	fc->log.prefix	= fs_type->name;
 
 	mutex_init(&fc->uapi_mutex);
 
@@ -361,8 +365,8 @@ struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc)
 	get_net(fc->net_ns);
 	get_user_ns(fc->user_ns);
 	get_cred(fc->cred);
-	if (fc->log)
-		refcount_inc(&fc->log->usage);
+	if (fc->log.log)
+		refcount_inc(&fc->log.log->usage);
 
 	/* Can't call put until we've called ->dup */
 	ret = fc->ops->dup(fc, src_fc);
@@ -385,64 +389,33 @@ EXPORT_SYMBOL(vfs_dup_fs_context);
  * @fc: The filesystem context to log to.
  * @fmt: The format of the buffer.
  */
-void logfc(struct fs_context *fc, const char *fmt, ...)
+void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...)
 {
-	static const char store_failure[] = "OOM: Can't store error string";
-	struct fc_log *log = fc ? fc->log : NULL;
-	const char *p;
 	va_list va;
-	char *q;
-	u8 freeable;
+	struct va_format vaf = {.fmt = fmt, .va = &va};
 
 	va_start(va, fmt);
-	if (!strchr(fmt, '%')) {
-		p = fmt;
-		goto unformatted_string;
-	}
-	if (strcmp(fmt, "%s") == 0) {
-		p = va_arg(va, const char *);
-		goto unformatted_string;
-	}
-
-	q = kvasprintf(GFP_KERNEL, fmt, va);
-copied_string:
-	if (!q)
-		goto store_failure;
-	freeable = 1;
-	goto store_string;
-
-unformatted_string:
-	if ((unsigned long)p >= (unsigned long)__start_rodata &&
-	    (unsigned long)p <  (unsigned long)__end_rodata)
-		goto const_string;
-	if (log && within_module_core((unsigned long)p, log->owner))
-		goto const_string;
-	q = kstrdup(p, GFP_KERNEL);
-	goto copied_string;
-
-store_failure:
-	p = store_failure;
-const_string:
-	q = (char *)p;
-	freeable = 0;
-store_string:
 	if (!log) {
-		switch (fmt[0]) {
+		switch (level) {
 		case 'w':
-			printk(KERN_WARNING "%s\n", q + 2);
+			printk(KERN_WARNING "%s%s%pV\n", prefix ? prefix : "",
+						prefix ? ": " : "", &vaf);
 			break;
 		case 'e':
-			printk(KERN_ERR "%s\n", q + 2);
+			printk(KERN_ERR "%s%s%pV\n", prefix ? prefix : "",
+						prefix ? ": " : "", &vaf);
 			break;
 		default:
-			printk(KERN_NOTICE "%s\n", q + 2);
+			printk(KERN_NOTICE "%s%s%pV\n", prefix ? prefix : "",
+						prefix ? ": " : "", &vaf);
 			break;
 		}
-		if (freeable)
-			kfree(q);
 	} else {
 		unsigned int logsize = ARRAY_SIZE(log->buffer);
 		u8 index;
+		char *q = kasprintf(GFP_KERNEL, "%c %s%s%pV\n", level,
+						prefix ? prefix : "",
+						prefix ? ": " : "", &vaf);
 
 		index = log->head & (logsize - 1);
 		BUILD_BUG_ON(sizeof(log->head) != sizeof(u8) ||
@@ -454,9 +427,11 @@ void logfc(struct fs_context *fc, const char *fmt, ...)
 			log->tail++;
 		}
 
-		log->buffer[index] = q;
-		log->need_free &= ~(1 << index);
-		log->need_free |= freeable << index;
+		log->buffer[index] = q ? q : "OOM: Can't store error string";
+		if (q)
+			log->need_free |= 1 << index;
+		else
+			log->need_free &= ~(1 << index);
 		log->head++;
 	}
 	va_end(va);
@@ -468,12 +443,12 @@ EXPORT_SYMBOL(logfc);
  */
 static void put_fc_log(struct fs_context *fc)
 {
-	struct fc_log *log = fc->log;
+	struct fc_log *log = fc->log.log;
 	int i;
 
 	if (log) {
 		if (refcount_dec_and_test(&log->usage)) {
-			fc->log = NULL;
+			fc->log.log = NULL;
 			for (i = 0; i <= 7; i++)
 				if (log->need_free & (1 << i))
 					kfree(log->buffer[i]);
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index d1930ad..7e6fb43 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -20,42 +20,66 @@ static const struct constant_table bool_names[] = {
 	{ "no",		false },
 	{ "true",	true },
 	{ "yes",	true },
+	{ },
 };
 
+static const struct constant_table *
+__lookup_constant(const struct constant_table *tbl, const char *name)
+{
+	for ( ; tbl->name; tbl++)
+		if (strcmp(name, tbl->name) == 0)
+			return tbl;
+	return NULL;
+}
+
 /**
  * lookup_constant - Look up a constant by name in an ordered table
  * @tbl: The table of constants to search.
- * @tbl_size: The size of the table.
  * @name: The name to look up.
  * @not_found: The value to return if the name is not found.
  */
-int __lookup_constant(const struct constant_table *tbl, size_t tbl_size,
-		      const char *name, int not_found)
+int lookup_constant(const struct constant_table *tbl, const char *name, int not_found)
 {
-	unsigned int i;
+	const struct constant_table *p = __lookup_constant(tbl, name);
 
-	for (i = 0; i < tbl_size; i++)
-		if (strcmp(name, tbl[i].name) == 0)
-			return tbl[i].value;
-
-	return not_found;
+	return p ? p->value : not_found;
 }
-EXPORT_SYMBOL(__lookup_constant);
+EXPORT_SYMBOL(lookup_constant);
+
+static inline bool is_flag(const struct fs_parameter_spec *p)
+{
+	return p->type == NULL;
+}
 
 static const struct fs_parameter_spec *fs_lookup_key(
-	const struct fs_parameter_description *desc,
-	const char *name)
+	const struct fs_parameter_spec *desc,
+	struct fs_parameter *param, bool *negated)
 {
-	const struct fs_parameter_spec *p;
+	const struct fs_parameter_spec *p, *other = NULL;
+	const char *name = param->key;
+	bool want_flag = param->type == fs_value_is_flag;
 
-	if (!desc->specs)
-		return NULL;
-
-	for (p = desc->specs; p->name; p++)
-		if (strcmp(p->name, name) == 0)
+	*negated = false;
+	for (p = desc; p->name; p++) {
+		if (strcmp(p->name, name) != 0)
+			continue;
+		if (likely(is_flag(p) == want_flag))
 			return p;
-
-	return NULL;
+		other = p;
+	}
+	if (want_flag) {
+		if (name[0] == 'n' && name[1] == 'o' && name[2]) {
+			for (p = desc; p->name; p++) {
+				if (strcmp(p->name, name + 2) != 0)
+					continue;
+				if (!(p->flags & fs_param_neg_with_no))
+					continue;
+				*negated = true;
+				return p;
+			}
+		}
+	}
+	return other;
 }
 
 /*
@@ -76,172 +100,38 @@ static const struct fs_parameter_spec *fs_lookup_key(
  * unknown parameters are okay and -EINVAL if there was a conversion issue or
  * the parameter wasn't recognised and unknowns aren't okay.
  */
-int fs_parse(struct fs_context *fc,
-	     const struct fs_parameter_description *desc,
+int __fs_parse(struct p_log *log,
+	     const struct fs_parameter_spec *desc,
 	     struct fs_parameter *param,
 	     struct fs_parse_result *result)
 {
 	const struct fs_parameter_spec *p;
-	const struct fs_parameter_enum *e;
-	int ret = -ENOPARAM, b;
 
-	result->has_value = !!param->string;
-	result->negated = false;
 	result->uint_64 = 0;
 
-	p = fs_lookup_key(desc, param->key);
-	if (!p) {
-		/* If we didn't find something that looks like "noxxx", see if
-		 * "xxx" takes the "no"-form negative - but only if there
-		 * wasn't an value.
-		 */
-		if (result->has_value)
-			goto unknown_parameter;
-		if (param->key[0] != 'n' || param->key[1] != 'o' || !param->key[2])
-			goto unknown_parameter;
-
-		p = fs_lookup_key(desc, param->key + 2);
-		if (!p)
-			goto unknown_parameter;
-		if (!(p->flags & fs_param_neg_with_no))
-			goto unknown_parameter;
-		result->boolean = false;
-		result->negated = true;
-	}
+	p = fs_lookup_key(desc, param, &result->negated);
+	if (!p)
+		return -ENOPARAM;
 
 	if (p->flags & fs_param_deprecated)
-		warnf(fc, "%s: Deprecated parameter '%s'",
-		      desc->name, param->key);
-
-	if (result->negated)
-		goto okay;
-
-	/* Certain parameter types only take a string and convert it. */
-	switch (p->type) {
-	case __fs_param_wasnt_defined:
-		return -EINVAL;
-	case fs_param_is_u32:
-	case fs_param_is_u32_octal:
-	case fs_param_is_u32_hex:
-	case fs_param_is_s32:
-	case fs_param_is_u64:
-	case fs_param_is_enum:
-	case fs_param_is_string:
-		if (param->type != fs_value_is_string)
-			goto bad_value;
-		if (!result->has_value) {
-			if (p->flags & fs_param_v_optional)
-				goto okay;
-			goto bad_value;
-		}
-		/* Fall through */
-	default:
-		break;
-	}
+		warn_plog(log, "Deprecated parameter '%s'", param->key);
 
 	/* Try to turn the type we were given into the type desired by the
 	 * parameter and give an error if we can't.
 	 */
-	switch (p->type) {
-	case fs_param_is_flag:
-		if (param->type != fs_value_is_flag &&
-		    (param->type != fs_value_is_string || result->has_value))
-			return invalf(fc, "%s: Unexpected value for '%s'",
-				      desc->name, param->key);
-		result->boolean = true;
-		goto okay;
-
-	case fs_param_is_bool:
-		switch (param->type) {
-		case fs_value_is_flag:
-			result->boolean = true;
-			goto okay;
-		case fs_value_is_string:
-			if (param->size == 0) {
-				result->boolean = true;
-				goto okay;
-			}
-			b = lookup_constant(bool_names, param->string, -1);
-			if (b == -1)
-				goto bad_value;
-			result->boolean = b;
-			goto okay;
-		default:
-			goto bad_value;
-		}
-
-	case fs_param_is_u32:
-		ret = kstrtouint(param->string, 0, &result->uint_32);
-		goto maybe_okay;
-	case fs_param_is_u32_octal:
-		ret = kstrtouint(param->string, 8, &result->uint_32);
-		goto maybe_okay;
-	case fs_param_is_u32_hex:
-		ret = kstrtouint(param->string, 16, &result->uint_32);
-		goto maybe_okay;
-	case fs_param_is_s32:
-		ret = kstrtoint(param->string, 0, &result->int_32);
-		goto maybe_okay;
-	case fs_param_is_u64:
-		ret = kstrtoull(param->string, 0, &result->uint_64);
-		goto maybe_okay;
-
-	case fs_param_is_enum:
-		for (e = desc->enums; e->name[0]; e++) {
-			if (e->opt == p->opt &&
-			    strcmp(e->name, param->string) == 0) {
-				result->uint_32 = e->value;
-				goto okay;
-			}
-		}
-		goto bad_value;
-
-	case fs_param_is_string:
-		goto okay;
-	case fs_param_is_blob:
-		if (param->type != fs_value_is_blob)
-			goto bad_value;
-		goto okay;
-
-	case fs_param_is_fd: {
-		switch (param->type) {
-		case fs_value_is_string:
-			if (!result->has_value)
-				goto bad_value;
-
-			ret = kstrtouint(param->string, 0, &result->uint_32);
-			break;
-		case fs_value_is_file:
-			result->uint_32 = param->dirfd;
-			ret = 0;
-		default:
-			goto bad_value;
-		}
-
-		if (result->uint_32 > INT_MAX)
-			goto bad_value;
-		goto maybe_okay;
+	if (is_flag(p)) {
+		if (param->type != fs_value_is_flag)
+			return inval_plog(log, "Unexpected value for '%s'",
+				      param->key);
+		result->boolean = !result->negated;
+	} else  {
+		int ret = p->type(log, p, param, result);
+		if (ret)
+			return ret;
 	}
-
-	case fs_param_is_blockdev:
-	case fs_param_is_path:
-		goto okay;
-	default:
-		BUG();
-	}
-
-maybe_okay:
-	if (ret < 0)
-		goto bad_value;
-okay:
 	return p->opt;
-
-bad_value:
-	return invalf(fc, "%s: Bad value for '%s'", desc->name, param->key);
-unknown_parameter:
-	return -ENOPARAM;
 }
-EXPORT_SYMBOL(fs_parse);
+EXPORT_SYMBOL(__fs_parse);
 
 /**
  * fs_lookup_param - Look up a path referred to by a parameter
@@ -267,9 +157,6 @@ int fs_lookup_param(struct fs_context *fc,
 			return PTR_ERR(f);
 		put_f = true;
 		break;
-	case fs_value_is_filename_empty:
-		flags = LOOKUP_EMPTY;
-		/* Fall through */
 	case fs_value_is_filename:
 		f = param->name;
 		put_f = false;
@@ -302,6 +189,124 @@ int fs_lookup_param(struct fs_context *fc,
 }
 EXPORT_SYMBOL(fs_lookup_param);
 
+int fs_param_bad_value(struct p_log *log, struct fs_parameter *param)
+{
+	return inval_plog(log, "Bad value for '%s'", param->key);
+}
+
+int fs_param_is_bool(struct p_log *log, const struct fs_parameter_spec *p,
+		     struct fs_parameter *param, struct fs_parse_result *result)
+{
+	int b;
+	if (param->type != fs_value_is_string)
+		return fs_param_bad_value(log, param);
+	b = lookup_constant(bool_names, param->string, -1);
+	if (b == -1)
+		return fs_param_bad_value(log, param);
+	result->boolean = b;
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_bool);
+
+int fs_param_is_u32(struct p_log *log, const struct fs_parameter_spec *p,
+		    struct fs_parameter *param, struct fs_parse_result *result)
+{
+	int base = (unsigned long)p->data;
+	if (param->type != fs_value_is_string ||
+	    kstrtouint(param->string, base, &result->uint_32) < 0)
+		return fs_param_bad_value(log, param);
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_u32);
+
+int fs_param_is_s32(struct p_log *log, const struct fs_parameter_spec *p,
+		    struct fs_parameter *param, struct fs_parse_result *result)
+{
+	if (param->type != fs_value_is_string ||
+	    kstrtoint(param->string, 0, &result->int_32) < 0)
+		return fs_param_bad_value(log, param);
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_s32);
+
+int fs_param_is_u64(struct p_log *log, const struct fs_parameter_spec *p,
+		    struct fs_parameter *param, struct fs_parse_result *result)
+{
+	if (param->type != fs_value_is_string ||
+	    kstrtoull(param->string, 0, &result->uint_64) < 0)
+		return fs_param_bad_value(log, param);
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_u64);
+
+int fs_param_is_enum(struct p_log *log, const struct fs_parameter_spec *p,
+		     struct fs_parameter *param, struct fs_parse_result *result)
+{
+	const struct constant_table *c;
+	if (param->type != fs_value_is_string)
+		return fs_param_bad_value(log, param);
+	c = __lookup_constant(p->data, param->string);
+	if (!c)
+		return fs_param_bad_value(log, param);
+	result->uint_32 = c->value;
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_enum);
+
+int fs_param_is_string(struct p_log *log, const struct fs_parameter_spec *p,
+		       struct fs_parameter *param, struct fs_parse_result *result)
+{
+	if (param->type != fs_value_is_string || !*param->string)
+		return fs_param_bad_value(log, param);
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_string);
+
+int fs_param_is_blob(struct p_log *log, const struct fs_parameter_spec *p,
+		     struct fs_parameter *param, struct fs_parse_result *result)
+{
+	if (param->type != fs_value_is_blob)
+		return fs_param_bad_value(log, param);
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_blob);
+
+int fs_param_is_fd(struct p_log *log, const struct fs_parameter_spec *p,
+		  struct fs_parameter *param, struct fs_parse_result *result)
+{
+	switch (param->type) {
+	case fs_value_is_string:
+		if (kstrtouint(param->string, 0, &result->uint_32) < 0)
+			break;
+		if (result->uint_32 <= INT_MAX)
+			return 0;
+		break;
+	case fs_value_is_file:
+		result->uint_32 = param->dirfd;
+		if (result->uint_32 <= INT_MAX)
+			return 0;
+		break;
+	default:
+		break;
+	}
+	return fs_param_bad_value(log, param);
+}
+EXPORT_SYMBOL(fs_param_is_fd);
+
+int fs_param_is_blockdev(struct p_log *log, const struct fs_parameter_spec *p,
+		  struct fs_parameter *param, struct fs_parse_result *result)
+{
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_blockdev);
+
+int fs_param_is_path(struct p_log *log, const struct fs_parameter_spec *p,
+		     struct fs_parameter *param, struct fs_parse_result *result)
+{
+	return 0;
+}
+EXPORT_SYMBOL(fs_param_is_path);
+
 #ifdef CONFIG_VALIDATE_FS_PARSER
 /**
  * validate_constant_table - Validate a constant table
@@ -357,102 +362,26 @@ bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
  * fs_validate_description - Validate a parameter description
  * @desc: The parameter description to validate.
  */
-bool fs_validate_description(const struct fs_parameter_description *desc)
+bool fs_validate_description(const char *name,
+	const struct fs_parameter_spec *desc)
 {
 	const struct fs_parameter_spec *param, *p2;
-	const struct fs_parameter_enum *e;
-	const char *name = desc->name;
-	unsigned int nr_params = 0;
-	bool good = true, enums = false;
+	bool good = true;
 
 	pr_notice("*** VALIDATE %s ***\n", name);
 
-	if (!name[0]) {
-		pr_err("VALIDATE Parser: No name\n");
-		name = "Unknown";
-		good = false;
-	}
-
-	if (desc->specs) {
-		for (param = desc->specs; param->name; param++) {
-			enum fs_parameter_type t = param->type;
-
-			/* Check that the type is in range */
-			if (t == __fs_param_wasnt_defined ||
-			    t >= nr__fs_parameter_type) {
-				pr_err("VALIDATE %s: PARAM[%s] Bad type %u\n",
-				       name, param->name, t);
-				good = false;
-			} else if (t == fs_param_is_enum) {
-				enums = true;
-			}
-
-			/* Check for duplicate parameter names */
-			for (p2 = desc->specs; p2 < param; p2++) {
-				if (strcmp(param->name, p2->name) == 0) {
-					pr_err("VALIDATE %s: PARAM[%s]: Duplicate\n",
-					       name, param->name);
-					good = false;
-				}
-			}
-		}
-
-		nr_params = param - desc->specs;
-	}
-
-	if (desc->enums) {
-		if (!nr_params) {
-			pr_err("VALIDATE %s: Enum table but no parameters\n",
-			       name);
-			good = false;
-			goto no_enums;
-		}
-		if (!enums) {
-			pr_err("VALIDATE %s: Enum table but no enum-type values\n",
-			       name);
-			good = false;
-			goto no_enums;
-		}
-
-		for (e = desc->enums; e->name[0]; e++) {
-			/* Check that all entries in the enum table have at
-			 * least one parameter that uses them.
-			 */
-			for (param = desc->specs; param->name; param++) {
-				if (param->opt == e->opt &&
-				    param->type != fs_param_is_enum) {
-					pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
-					       name, e - desc->enums, param->name);
-					good = false;
-				}
-			}
-		}
-
-		/* Check that all enum-type parameters have at least one enum
-		 * value in the enum table.
-		 */
-		for (param = desc->specs; param->name; param++) {
-			if (param->type != fs_param_is_enum)
-				continue;
-			for (e = desc->enums; e->name[0]; e++)
-				if (e->opt == param->opt)
-					break;
-			if (!e->name[0]) {
-				pr_err("VALIDATE %s: PARAM[%s] enum with no values\n",
+	for (param = desc; param->name; param++) {
+		/* Check for duplicate parameter names */
+		for (p2 = desc; p2 < param; p2++) {
+			if (strcmp(param->name, p2->name) == 0) {
+				if (is_flag(param) != is_flag(p2))
+					continue;
+				pr_err("VALIDATE %s: PARAM[%s]: Duplicate\n",
 				       name, param->name);
 				good = false;
 			}
 		}
-	} else {
-		if (enums) {
-			pr_err("VALIDATE %s: enum-type values, but no enum table\n",
-			       name);
-			good = false;
-			goto no_enums;
-		}
 	}
-
-no_enums:
 	return good;
 }
 #endif /* CONFIG_VALIDATE_FS_PARSER */
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 9616af3..08e91ef 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -111,7 +111,7 @@ extern void fscache_enqueue_object(struct fscache_object *);
  * object-list.c
  */
 #ifdef CONFIG_FSCACHE_OBJECT_LIST
-extern const struct file_operations fscache_objlist_fops;
+extern const struct proc_ops fscache_objlist_proc_ops;
 
 extern void fscache_objlist_add(struct fscache_object *);
 extern void fscache_objlist_remove(struct fscache_object *);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 72ebfe5..e106a1a 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -7,6 +7,7 @@
 
 #define FSCACHE_DEBUG_LEVEL COOKIE
 #include <linux/module.h>
+#include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/key.h>
@@ -405,9 +406,9 @@ static int fscache_objlist_release(struct inode *inode, struct file *file)
 	return seq_release(inode, file);
 }
 
-const struct file_operations fscache_objlist_fops = {
-	.open		= fscache_objlist_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= fscache_objlist_release,
+const struct proc_ops fscache_objlist_proc_ops = {
+	.proc_open	= fscache_objlist_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= fscache_objlist_release,
 };
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
index 5523446..90a7bc2 100644
--- a/fs/fscache/proc.c
+++ b/fs/fscache/proc.c
@@ -35,7 +35,7 @@ int __init fscache_proc_init(void)
 
 #ifdef CONFIG_FSCACHE_OBJECT_LIST
 	if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
-			 &fscache_objlist_fops))
+			 &fscache_objlist_proc_ops))
 		goto error_objects;
 #endif
 
diff --git a/fs/fsopen.c b/fs/fsopen.c
index 043ffa8..2fa3f24 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -25,7 +25,7 @@ static ssize_t fscontext_read(struct file *file,
 			      char __user *_buf, size_t len, loff_t *pos)
 {
 	struct fs_context *fc = file->private_data;
-	struct fc_log *log = fc->log;
+	struct fc_log *log = fc->log.log;
 	unsigned int logsize = ARRAY_SIZE(log->buffer);
 	ssize_t ret;
 	char *p;
@@ -97,11 +97,11 @@ static int fscontext_create_fd(struct fs_context *fc, unsigned int o_flags)
 
 static int fscontext_alloc_log(struct fs_context *fc)
 {
-	fc->log = kzalloc(sizeof(*fc->log), GFP_KERNEL);
-	if (!fc->log)
+	fc->log.log = kzalloc(sizeof(*fc->log.log), GFP_KERNEL);
+	if (!fc->log.log)
 		return -ENOMEM;
-	refcount_set(&fc->log->usage, 1);
-	fc->log->owner = fc->fs_type->owner;
+	refcount_set(&fc->log.log->usage, 1);
+	fc->log.log->owner = fc->fs_type->owner;
 	return 0;
 }
 
@@ -321,6 +321,7 @@ SYSCALL_DEFINE5(fsconfig,
 	struct fs_context *fc;
 	struct fd f;
 	int ret;
+	int lookup_flags = 0;
 
 	struct fs_parameter param = {
 		.type	= fs_value_is_undefined,
@@ -409,19 +410,12 @@ SYSCALL_DEFINE5(fsconfig,
 			goto out_key;
 		}
 		break;
+	case FSCONFIG_SET_PATH_EMPTY:
+		lookup_flags = LOOKUP_EMPTY;
+		/* fallthru */
 	case FSCONFIG_SET_PATH:
 		param.type = fs_value_is_filename;
-		param.name = getname_flags(_value, 0, NULL);
-		if (IS_ERR(param.name)) {
-			ret = PTR_ERR(param.name);
-			goto out_key;
-		}
-		param.dirfd = aux;
-		param.size = strlen(param.name->name);
-		break;
-	case FSCONFIG_SET_PATH_EMPTY:
-		param.type = fs_value_is_filename_empty;
-		param.name = getname_flags(_value, LOOKUP_EMPTY, NULL);
+		param.name = getname_flags(_value, lookup_flags, NULL);
 		if (IS_ERR(param.name)) {
 			ret = PTR_ERR(param.name);
 			goto out_key;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 00015d8..030f094 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -451,8 +451,8 @@ static int cuse_send_init(struct cuse_conn *cc)
 	ap->args.out_args[0].size = sizeof(ia->out);
 	ap->args.out_args[0].value = &ia->out;
 	ap->args.out_args[1].size = CUSE_INIT_INFO_MAX;
-	ap->args.out_argvar = 1;
-	ap->args.out_pages = 1;
+	ap->args.out_argvar = true;
+	ap->args.out_pages = true;
 	ap->num_pages = 1;
 	ap->pages = &ia->page;
 	ap->descs = &ia->desc;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index ee19011..de1e2fde 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -818,7 +818,7 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
 	struct fuse_conn *fc = get_fuse_conn(olddir);
 	int err;
 
-	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
 		return -EINVAL;
 
 	if (flags) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ce71538..9d67b83 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -803,6 +803,10 @@ static int fuse_do_readpage(struct file *file, struct page *page)
 
 	attr_ver = fuse_get_attr_version(fc);
 
+	/* Don't overflow end offset */
+	if (pos + (desc.length - 1) == LLONG_MAX)
+		desc.length--;
+
 	fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
 	res = fuse_simple_request(fc, &ia.ap.args);
 	if (res < 0)
@@ -888,6 +892,14 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
 	ap->args.out_pages = true;
 	ap->args.page_zeroing = true;
 	ap->args.page_replace = true;
+
+	/* Don't overflow end offset */
+	if (pos + (count - 1) == LLONG_MAX) {
+		count--;
+		ap->descs[ap->num_pages - 1].length--;
+	}
+	WARN_ON((loff_t) (pos + count) < 0);
+
 	fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
 	ia->read.attr_ver = fuse_get_attr_version(fc);
 	if (fc->async_read) {
@@ -1397,9 +1409,9 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
 	}
 
 	if (write)
-		ap->args.in_pages = 1;
+		ap->args.in_pages = true;
 	else
-		ap->args.out_pages = 1;
+		ap->args.out_pages = true;
 
 	*nbytesp = nbytes;
 
@@ -1465,6 +1477,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
 		}
 		ia = NULL;
 		if (nres < 0) {
+			iov_iter_revert(iter, nbytes);
 			err = nres;
 			break;
 		}
@@ -1473,8 +1486,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
 		count -= nres;
 		res += nres;
 		pos += nres;
-		if (nres != nbytes)
+		if (nres != nbytes) {
+			iov_iter_revert(iter, nbytes - nres);
 			break;
+		}
 		if (count) {
 			max_pages = iov_iter_npages(iter, fc->max_pages);
 			ia = fuse_io_alloc(io, max_pages);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 16aec32..95d712d 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -448,7 +448,7 @@ enum {
 	OPT_ERR
 };
 
-static const struct fs_parameter_spec fuse_param_specs[] = {
+static const struct fs_parameter_spec fuse_fs_parameters[] = {
 	fsparam_string	("source",		OPT_SOURCE),
 	fsparam_u32	("fd",			OPT_FD),
 	fsparam_u32oct	("rootmode",		OPT_ROOTMODE),
@@ -462,68 +462,63 @@ static const struct fs_parameter_spec fuse_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description fuse_fs_parameters = {
-	.name		= "fuse",
-	.specs		= fuse_param_specs,
-};
-
 static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct fs_parse_result result;
 	struct fuse_fs_context *ctx = fc->fs_private;
 	int opt;
 
-	opt = fs_parse(fc, &fuse_fs_parameters, param, &result);
+	opt = fs_parse(fc, fuse_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
 	switch (opt) {
 	case OPT_SOURCE:
 		if (fc->source)
-			return invalf(fc, "fuse: Multiple sources specified");
+			return invalfc(fc, "Multiple sources specified");
 		fc->source = param->string;
 		param->string = NULL;
 		break;
 
 	case OPT_SUBTYPE:
 		if (ctx->subtype)
-			return invalf(fc, "fuse: Multiple subtypes specified");
+			return invalfc(fc, "Multiple subtypes specified");
 		ctx->subtype = param->string;
 		param->string = NULL;
 		return 0;
 
 	case OPT_FD:
 		ctx->fd = result.uint_32;
-		ctx->fd_present = 1;
+		ctx->fd_present = true;
 		break;
 
 	case OPT_ROOTMODE:
 		if (!fuse_valid_type(result.uint_32))
-			return invalf(fc, "fuse: Invalid rootmode");
+			return invalfc(fc, "Invalid rootmode");
 		ctx->rootmode = result.uint_32;
-		ctx->rootmode_present = 1;
+		ctx->rootmode_present = true;
 		break;
 
 	case OPT_USER_ID:
 		ctx->user_id = make_kuid(fc->user_ns, result.uint_32);
 		if (!uid_valid(ctx->user_id))
-			return invalf(fc, "fuse: Invalid user_id");
-		ctx->user_id_present = 1;
+			return invalfc(fc, "Invalid user_id");
+		ctx->user_id_present = true;
 		break;
 
 	case OPT_GROUP_ID:
 		ctx->group_id = make_kgid(fc->user_ns, result.uint_32);
 		if (!gid_valid(ctx->group_id))
-			return invalf(fc, "fuse: Invalid group_id");
-		ctx->group_id_present = 1;
+			return invalfc(fc, "Invalid group_id");
+		ctx->group_id_present = true;
 		break;
 
 	case OPT_DEFAULT_PERMISSIONS:
-		ctx->default_permissions = 1;
+		ctx->default_permissions = true;
 		break;
 
 	case OPT_ALLOW_OTHER:
-		ctx->allow_other = 1;
+		ctx->allow_other = true;
 		break;
 
 	case OPT_MAX_READ:
@@ -532,7 +527,7 @@ static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param)
 
 	case OPT_BLKSIZE:
 		if (!ctx->is_bdev)
-			return invalf(fc, "fuse: blksize only supported for fuseblk");
+			return invalfc(fc, "blksize only supported for fuseblk");
 		ctx->blksize = result.uint_32;
 		break;
 
@@ -997,7 +992,7 @@ void fuse_send_init(struct fuse_conn *fc)
 	/* Variable length argument used for backward compatibility
 	   with interface version < 7.5.  Rest of init_out is zeroed
 	   by do_get_request(), so a short reply is not a problem */
-	ia->args.out_argvar = 1;
+	ia->args.out_argvar = true;
 	ia->args.out_args[0].size = sizeof(ia->out);
 	ia->args.out_args[0].value = &ia->out;
 	ia->args.force = true;
@@ -1347,7 +1342,7 @@ static struct file_system_type fuse_fs_type = {
 	.name		= "fuse",
 	.fs_flags	= FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
 	.init_fs_context = fuse_init_fs_context,
-	.parameters	= &fuse_fs_parameters,
+	.parameters	= fuse_fs_parameters,
 	.kill_sb	= fuse_kill_sb_anon,
 };
 MODULE_ALIAS_FS("fuse");
@@ -1363,7 +1358,7 @@ static struct file_system_type fuseblk_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "fuseblk",
 	.init_fs_context = fuse_init_fs_context,
-	.parameters	= &fuse_fs_parameters,
+	.parameters	= fuse_fs_parameters,
 	.kill_sb	= fuse_kill_sb_blk,
 	.fs_flags	= FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
 };
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 6a40f75..90e3f01 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -332,7 +332,7 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
 		return -ENOMEM;
 
 	plus = fuse_use_readdirplus(inode, ctx);
-	ap->args.out_pages = 1;
+	ap->args.out_pages = true;
 	ap->num_pages = 1;
 	ap->pages = &page;
 	ap->descs = &desc;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 9c6df72..ba83b49 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -183,14 +183,12 @@ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc
 	struct inode *inode = page->mapping->host;
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	int ret;
 
 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
 		goto out;
 	if (PageChecked(page) || current->journal_info)
 		goto out_ignore;
-	ret = __gfs2_jdata_writepage(page, wbc);
-	return ret;
+	return __gfs2_jdata_writepage(page, wbc);
 
 out_ignore:
 	redirty_page_for_writepage(wbc, page);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index eb9c057..c8b6257 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -73,9 +73,6 @@
 #include "bmap.h"
 #include "util.h"
 
-#define IS_LEAF     1 /* Hashed (leaf) directory */
-#define IS_DINODE   2 /* Linear (stuffed dinode block) directory */
-
 #define MAX_RA_BLOCKS 32 /* max read-ahead blocks */
 
 #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 9d58295..cb26be6 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -847,7 +847,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
 	struct gfs2_inode *ip = GFS2_I(inode);
-	ssize_t written = 0, ret;
+	ssize_t ret;
 
 	ret = gfs2_rsqa_alloc(ip);
 	if (ret)
@@ -867,68 +867,58 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	inode_lock(inode);
 	ret = generic_write_checks(iocb, from);
 	if (ret <= 0)
-		goto out;
-
-	/* We can write back this queue in page reclaim */
-	current->backing_dev_info = inode_to_bdi(inode);
+		goto out_unlock;
 
 	ret = file_remove_privs(file);
 	if (ret)
-		goto out2;
+		goto out_unlock;
 
 	ret = file_update_time(file);
 	if (ret)
-		goto out2;
+		goto out_unlock;
 
 	if (iocb->ki_flags & IOCB_DIRECT) {
 		struct address_space *mapping = file->f_mapping;
-		loff_t pos, endbyte;
-		ssize_t buffered;
+		ssize_t buffered, ret2;
 
-		written = gfs2_file_direct_write(iocb, from);
-		if (written < 0 || !iov_iter_count(from))
-			goto out2;
+		ret = gfs2_file_direct_write(iocb, from);
+		if (ret < 0 || !iov_iter_count(from))
+			goto out_unlock;
 
-		ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
-		if (unlikely(ret < 0))
-			goto out2;
-		buffered = ret;
+		iocb->ki_flags |= IOCB_DSYNC;
+		current->backing_dev_info = inode_to_bdi(inode);
+		buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+		current->backing_dev_info = NULL;
+		if (unlikely(buffered <= 0))
+			goto out_unlock;
 
 		/*
 		 * We need to ensure that the page cache pages are written to
 		 * disk and invalidated to preserve the expected O_DIRECT
-		 * semantics.
+		 * semantics.  If the writeback or invalidate fails, only report
+		 * the direct I/O range as we don't know if the buffered pages
+		 * made it to disk.
 		 */
-		pos = iocb->ki_pos;
-		endbyte = pos + buffered - 1;
-		ret = filemap_write_and_wait_range(mapping, pos, endbyte);
-		if (!ret) {
-			iocb->ki_pos += buffered;
-			written += buffered;
-			invalidate_mapping_pages(mapping,
-						 pos >> PAGE_SHIFT,
-						 endbyte >> PAGE_SHIFT);
-		} else {
-			/*
-			 * We don't know how much we wrote, so just return
-			 * the number of bytes which were direct-written
-			 */
-		}
+		iocb->ki_pos += buffered;
+		ret2 = generic_write_sync(iocb, buffered);
+		invalidate_mapping_pages(mapping,
+				(iocb->ki_pos - buffered) >> PAGE_SHIFT,
+				(iocb->ki_pos - 1) >> PAGE_SHIFT);
+		if (!ret || ret2 > 0)
+			ret += ret2;
 	} else {
+		current->backing_dev_info = inode_to_bdi(inode);
 		ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
-		if (likely(ret > 0))
+		current->backing_dev_info = NULL;
+		if (likely(ret > 0)) {
 			iocb->ki_pos += ret;
+			ret = generic_write_sync(iocb, ret);
+		}
 	}
 
-out2:
-	current->backing_dev_info = NULL;
-out:
+out_unlock:
 	inode_unlock(inode);
-	if (likely(ret > 0)) {
-		/* Handle various SYNC-type writes */
-		ret = generic_write_sync(iocb, ret);
-	}
-	return written ? written : ret;
+	return ret;
 }
 
 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b7123de..d0eceaf 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -826,7 +826,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
 
 	if (glops->go_flags & GLOF_LVB) {
-		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
+		gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
 		if (!gl->gl_lksb.sb_lvbptr) {
 			kmem_cache_free(cachep, gl);
 			return -ENOMEM;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 4ede1f1..061d22e 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -95,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
 	/* A shortened, inline version of gfs2_trans_begin()
          * tr->alloced is not set since the transaction structure is
          * on the stack */
-	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
+	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
 	tr.tr_ip = _RET_IP_;
 	if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
 		return;
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 5f89c51..9fd88ed 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -387,8 +387,6 @@ struct gfs2_glock {
 	struct rhash_head gl_node;
 };
 
-#define GFS2_MIN_LVB_SIZE 32	/* Min size of LVB that gfs2 supports */
-
 enum {
 	GIF_INVALID		= 0,
 	GIF_QD_LOCKED		= 1,
@@ -505,6 +503,7 @@ struct gfs2_trans {
 	unsigned int tr_num_buf_rm;
 	unsigned int tr_num_databuf_rm;
 	unsigned int tr_num_revoke;
+	unsigned int tr_num_revoke_rm;
 
 	struct list_head tr_list;
 	struct list_head tr_databuf;
@@ -703,6 +702,7 @@ struct gfs2_sbd {
 	u32 sd_fsb2bb_shift;
 	u32 sd_diptrs;	/* Number of pointers in a dinode */
 	u32 sd_inptrs;	/* Number of pointers in a indirect block */
+	u32 sd_ldptrs;  /* Number of pointers in a log descriptor block */
 	u32 sd_jbsize;	/* Size of a journaled data block */
 	u32 sd_hash_bsize;	/* sizeof(exhash block) */
 	u32 sd_hash_bsize_shift;
@@ -803,7 +803,7 @@ struct gfs2_sbd {
 
 	struct gfs2_trans *sd_log_tr;
 	unsigned int sd_log_blks_reserved;
-	int sd_log_commited_revoke;
+	int sd_log_committed_revoke;
 
 	atomic_t sd_log_pinned;
 	unsigned int sd_log_num_revoke;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index dafef10..2716d56 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -136,7 +136,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
 	if (inode->i_state & I_NEW) {
 		struct gfs2_sbd *sdp = GFS2_SB(inode);
-		ip->i_no_formal_ino = no_formal_ino;
 
 		error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
 		if (unlikely(error))
@@ -175,21 +174,22 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 		gfs2_glock_put(io_gl);
 		io_gl = NULL;
 
+		/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
+		inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
+		inode->i_atime.tv_nsec = 0;
+
 		if (type == DT_UNKNOWN) {
 			/* Inode glock must be locked already */
 			error = gfs2_inode_refresh(GFS2_I(inode));
 			if (error)
 				goto fail_refresh;
 		} else {
+			ip->i_no_formal_ino = no_formal_ino;
 			inode->i_mode = DT2IF(type);
 		}
 
 		gfs2_set_iop(inode);
 
-		/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
-		inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
-		inode->i_atime.tv_nsec = 0;
-
 		unlock_new_inode(inode);
 	}
 
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index eb3f2e7..00a2e72 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -37,7 +37,6 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
  * gfs2_struct2blk - compute stuff
  * @sdp: the filesystem
  * @nstruct: the number of structures
- * @ssize: the size of the structures
  *
  * Compute the number of log descriptor blocks needed to hold a certain number
  * of structures of a certain size.
@@ -45,18 +44,16 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
  * Returns: the number of blocks needed (minimum is always 1)
  */
 
-unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
-			     unsigned int ssize)
+unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
 {
 	unsigned int blks;
 	unsigned int first, second;
 
 	blks = 1;
-	first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
+	first = sdp->sd_ldptrs;
 
 	if (nstruct > first) {
-		second = (sdp->sd_sb.sb_bsize -
-			  sizeof(struct gfs2_meta_header)) / ssize;
+		second = sdp->sd_inptrs;
 		blks += DIV_ROUND_UP(nstruct - first, second);
 	}
 
@@ -472,9 +469,8 @@ static unsigned int calc_reserved(struct gfs2_sbd *sdp)
 		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
 	}
 
-	if (sdp->sd_log_commited_revoke > 0)
-		reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
-					  sizeof(u64));
+	if (sdp->sd_log_committed_revoke > 0)
+		reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
 	/* One for the overall header */
 	if (reserved)
 		reserved++;
@@ -829,7 +825,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
 	if (unlikely(state == SFS_FROZEN))
 		gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
 	gfs2_assert_withdraw(sdp,
-			sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
+			sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke);
 
 	gfs2_ordered_write(sdp);
 	lops_before_commit(sdp, tr);
@@ -848,7 +844,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
 	gfs2_log_lock(sdp);
 	sdp->sd_log_head = sdp->sd_log_flush_head;
 	sdp->sd_log_blks_reserved = 0;
-	sdp->sd_log_commited_revoke = 0;
+	sdp->sd_log_committed_revoke = 0;
 
 	spin_lock(&sdp->sd_ail_lock);
 	if (tr && !list_empty(&tr->tr_ail1_list)) {
@@ -899,6 +895,7 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
 	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
 	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
 	old->tr_num_revoke	+= new->tr_num_revoke;
+	old->tr_num_revoke_rm	+= new->tr_num_revoke_rm;
 
 	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
 	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
@@ -920,7 +917,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 		set_bit(TR_ATTACHED, &tr->tr_flags);
 	}
 
-	sdp->sd_log_commited_revoke += tr->tr_num_revoke;
+	sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
 	reserved = calc_reserved(sdp);
 	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
 	gfs2_assert_withdraw(sdp, maxres >= reserved);
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 2ff163a..c0a65e5 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -60,9 +60,9 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
 		spin_unlock(&sdp->sd_ordered_lock);
 	}
 }
+
 extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
-extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
-			    unsigned int ssize);
+extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
 
 extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
 extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 55fed7d..c090d5a 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -259,7 +259,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
 	struct super_block *sb = sdp->sd_vfs;
 	struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 
-	bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
+	bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
 	bio_set_dev(bio, sb->s_bdev);
 	bio->bi_end_io = end_io;
 	bio->bi_private = sdp;
@@ -422,7 +422,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
 
 	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
 		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
-			if (lh.lh_sequence > head->lh_sequence)
+			if (lh.lh_sequence >= head->lh_sequence)
 				*head = lh;
 			else {
 				ret = true;
@@ -472,6 +472,20 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
 	put_page(page); /* Once more for find_or_create_page */
 }
 
+static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
+{
+	struct bio *new;
+
+	new = bio_alloc(GFP_NOIO, nr_iovecs);
+	bio_copy_dev(new, prev);
+	new->bi_iter.bi_sector = bio_end_sector(prev);
+	new->bi_opf = prev->bi_opf;
+	new->bi_write_hint = prev->bi_write_hint;
+	bio_chain(new, prev);
+	submit_bio(prev);
+	return new;
+}
+
 /**
  * gfs2_find_jhead - find the head of a log
  * @jd: The journal descriptor
@@ -488,15 +502,15 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 	struct address_space *mapping = jd->jd_inode->i_mapping;
 	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
-	unsigned int bsize = sdp->sd_sb.sb_bsize;
+	unsigned int bsize = sdp->sd_sb.sb_bsize, off;
 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
 	unsigned int shift = PAGE_SHIFT - bsize_shift;
-	unsigned int readhead_blocks = BIO_MAX_PAGES << shift;
+	unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
 	struct gfs2_journal_extent *je;
 	int sz, ret = 0;
 	struct bio *bio = NULL;
 	struct page *page = NULL;
-	bool done = false;
+	bool bio_chained = false, done = false;
 	errseq_t since;
 
 	memset(head, 0, sizeof(*head));
@@ -505,9 +519,9 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
 
 	since = filemap_sample_wb_err(mapping);
 	list_for_each_entry(je, &jd->extent_list, list) {
-		for (; block < je->lblock + je->blocks; block++) {
-			u64 dblock;
+		u64 dblock = je->dblock;
 
+		for (; block < je->lblock + je->blocks; block++, dblock++) {
 			if (!page) {
 				page = find_or_create_page(mapping,
 						block >> shift, GFP_NOFS);
@@ -516,35 +530,41 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
 					done = true;
 					goto out;
 				}
+				off = 0;
+			}
+
+			if (!bio || (bio_chained && !off)) {
+				/* start new bio */
+			} else {
+				sz = bio_add_page(bio, page, bsize, off);
+				if (sz == bsize)
+					goto block_added;
+				if (off) {
+					unsigned int blocks =
+						(PAGE_SIZE - off) >> bsize_shift;
+
+					bio = gfs2_chain_bio(bio, blocks);
+					bio_chained = true;
+					goto add_block_to_new_bio;
+				}
 			}
 
 			if (bio) {
-				unsigned int off;
-
-				off = (block << bsize_shift) & ~PAGE_MASK;
-				sz = bio_add_page(bio, page, bsize, off);
-				if (sz == bsize) { /* block added */
-					if (off + bsize == PAGE_SIZE) {
-						page = NULL;
-						goto page_added;
-					}
-					continue;
-				}
 				blocks_submitted = block + 1;
 				submit_bio(bio);
-				bio = NULL;
 			}
 
-			dblock = je->dblock + (block - je->lblock);
 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
 			bio->bi_opf = REQ_OP_READ;
-			sz = bio_add_page(bio, page, bsize, 0);
-			gfs2_assert_warn(sdp, sz == bsize);
-			if (bsize == PAGE_SIZE)
+			bio_chained = false;
+add_block_to_new_bio:
+			sz = bio_add_page(bio, page, bsize, off);
+			BUG_ON(sz != bsize);
+block_added:
+			off += bsize;
+			if (off == PAGE_SIZE)
 				page = NULL;
-
-page_added:
-			if (blocks_submitted < blocks_read + readhead_blocks) {
+			if (blocks_submitted < blocks_read + readahead_blocks) {
 				/* Keep at least one bio in flight */
 				continue;
 			}
@@ -846,7 +866,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 	if (!sdp->sd_log_num_revoke)
 		return;
 
-	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
+	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
 	offset = sizeof(struct gfs2_log_descriptor);
 
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index e8b7b0c..a1a8ef7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -298,6 +298,8 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
 			  sizeof(struct gfs2_dinode)) / sizeof(u64);
 	sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
 			  sizeof(struct gfs2_meta_header)) / sizeof(u64);
+	sdp->sd_ldptrs = (sdp->sd_sb.sb_bsize -
+			  sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
 	sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
 	sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
 	sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
@@ -1250,6 +1252,7 @@ enum gfs2_param {
 	Opt_upgrade,
 	Opt_acl,
 	Opt_quota,
+	Opt_quota_flag,
 	Opt_suiddir,
 	Opt_data,
 	Opt_meta,
@@ -1264,17 +1267,11 @@ enum gfs2_param {
 	Opt_loccookie,
 };
 
-enum opt_quota {
-	Opt_quota_unset = 0,
-	Opt_quota_off,
-	Opt_quota_account,
-	Opt_quota_on,
-};
-
-static const unsigned int opt_quota_values[] = {
-	[Opt_quota_off]     = GFS2_QUOTA_OFF,
-	[Opt_quota_account] = GFS2_QUOTA_ACCOUNT,
-	[Opt_quota_on]      = GFS2_QUOTA_ON,
+static const struct constant_table gfs2_param_quota[] = {
+	{"off",        GFS2_QUOTA_OFF},
+	{"account",    GFS2_QUOTA_ACCOUNT},
+	{"on",         GFS2_QUOTA_ON},
+	{}
 };
 
 enum opt_data {
@@ -1282,12 +1279,24 @@ enum opt_data {
 	Opt_data_ordered   = GFS2_DATA_ORDERED,
 };
 
+static const struct constant_table gfs2_param_data[] = {
+	{"writeback",  Opt_data_writeback },
+	{"ordered",    Opt_data_ordered },
+	{}
+};
+
 enum opt_errors {
 	Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
 	Opt_errors_panic    = GFS2_ERRORS_PANIC,
 };
 
-static const struct fs_parameter_spec gfs2_param_specs[] = {
+static const struct constant_table gfs2_param_errors[] = {
+	{"withdraw",   Opt_errors_withdraw },
+	{"panic",      Opt_errors_panic },
+	{}
+};
+
+static const struct fs_parameter_spec gfs2_fs_parameters[] = {
 	fsparam_string ("lockproto",          Opt_lockproto),
 	fsparam_string ("locktable",          Opt_locktable),
 	fsparam_string ("hostdata",           Opt_hostdata),
@@ -1300,11 +1309,11 @@ static const struct fs_parameter_spec gfs2_param_specs[] = {
 	fsparam_flag   ("upgrade",            Opt_upgrade),
 	fsparam_flag_no("acl",                Opt_acl),
 	fsparam_flag_no("suiddir",            Opt_suiddir),
-	fsparam_enum   ("data",               Opt_data),
+	fsparam_enum   ("data",               Opt_data, gfs2_param_data),
 	fsparam_flag   ("meta",               Opt_meta),
 	fsparam_flag_no("discard",            Opt_discard),
 	fsparam_s32    ("commit",             Opt_commit),
-	fsparam_enum   ("errors",             Opt_errors),
+	fsparam_enum   ("errors",             Opt_errors, gfs2_param_errors),
 	fsparam_s32    ("statfs_quantum",     Opt_statfs_quantum),
 	fsparam_s32    ("statfs_percent",     Opt_statfs_percent),
 	fsparam_s32    ("quota_quantum",      Opt_quota_quantum),
@@ -1312,27 +1321,11 @@ static const struct fs_parameter_spec gfs2_param_specs[] = {
 	fsparam_flag_no("rgrplvb",            Opt_rgrplvb),
 	fsparam_flag_no("loccookie",          Opt_loccookie),
 	/* quota can be a flag or an enum so it gets special treatment */
-	__fsparam(fs_param_is_enum, "quota", Opt_quota, fs_param_neg_with_no|fs_param_v_optional),
+	fsparam_flag_no("quota",	      Opt_quota_flag),
+	fsparam_enum("quota",		      Opt_quota, gfs2_param_quota),
 	{}
 };
 
-static const struct fs_parameter_enum gfs2_param_enums[] = {
-	{ Opt_quota,    "off",        Opt_quota_off },
-	{ Opt_quota,    "account",    Opt_quota_account },
-	{ Opt_quota,    "on",         Opt_quota_on },
-	{ Opt_data,     "writeback",  Opt_data_writeback },
-	{ Opt_data,     "ordered",    Opt_data_ordered },
-	{ Opt_errors,   "withdraw",   Opt_errors_withdraw },
-	{ Opt_errors,   "panic",      Opt_errors_panic },
-	{}
-};
-
-static const struct fs_parameter_description gfs2_fs_parameters = {
-	.name = "gfs2",
-	.specs = gfs2_param_specs,
-	.enums = gfs2_param_enums,
-};
-
 /* Parse a single mount parameter */
 static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
@@ -1340,7 +1333,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	struct fs_parse_result result;
 	int o;
 
-	o = fs_parse(fc, &gfs2_fs_parameters, param, &result);
+	o = fs_parse(fc, gfs2_fs_parameters, param, &result);
 	if (o < 0)
 		return o;
 
@@ -1368,7 +1361,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
 		break;
 	case Opt_debug:
 		if (result.boolean && args->ar_errors == GFS2_ERRORS_PANIC)
-			return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
+			return invalfc(fc, "-o debug and -o errors=panic are mutually exclusive");
 		args->ar_debug = result.boolean;
 		break;
 	case Opt_upgrade:
@@ -1377,17 +1370,11 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	case Opt_acl:
 		args->ar_posix_acl = result.boolean;
 		break;
+	case Opt_quota_flag:
+		args->ar_quota = result.negated ? GFS2_QUOTA_OFF : GFS2_QUOTA_ON;
+		break;
 	case Opt_quota:
-		/* The quota option can be a flag or an enum. A non-zero int_32
-		   result means that we have an enum index. Otherwise we have
-		   to rely on the 'negated' flag to tell us whether 'quota' or
-		   'noquota' was specified. */
-		if (result.negated)
-			args->ar_quota = GFS2_QUOTA_OFF;
-		else if (result.int_32 > 0)
-			args->ar_quota = opt_quota_values[result.int_32];
-		else
-			args->ar_quota = GFS2_QUOTA_ON;
+		args->ar_quota = result.int_32;
 		break;
 	case Opt_suiddir:
 		args->ar_suiddir = result.boolean;
@@ -1404,27 +1391,27 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
 		break;
 	case Opt_commit:
 		if (result.int_32 <= 0)
-			return invalf(fc, "gfs2: commit mount option requires a positive numeric argument");
+			return invalfc(fc, "commit mount option requires a positive numeric argument");
 		args->ar_commit = result.int_32;
 		break;
 	case Opt_statfs_quantum:
 		if (result.int_32 < 0)
-			return invalf(fc, "gfs2: statfs_quantum mount option requires a non-negative numeric argument");
+			return invalfc(fc, "statfs_quantum mount option requires a non-negative numeric argument");
 		args->ar_statfs_quantum = result.int_32;
 		break;
 	case Opt_quota_quantum:
 		if (result.int_32 <= 0)
-			return invalf(fc, "gfs2: quota_quantum mount option requires a positive numeric argument");
+			return invalfc(fc, "quota_quantum mount option requires a positive numeric argument");
 		args->ar_quota_quantum = result.int_32;
 		break;
 	case Opt_statfs_percent:
 		if (result.int_32 < 0 || result.int_32 > 100)
-			return invalf(fc, "gfs2: statfs_percent mount option requires a numeric argument between 0 and 100");
+			return invalfc(fc, "statfs_percent mount option requires a numeric argument between 0 and 100");
 		args->ar_statfs_percent = result.int_32;
 		break;
 	case Opt_errors:
 		if (args->ar_debug && result.uint_32 == GFS2_ERRORS_PANIC)
-			return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
+			return invalfc(fc, "-o debug and -o errors=panic are mutually exclusive");
 		args->ar_errors = result.uint_32;
 		break;
 	case Opt_barrier:
@@ -1437,7 +1424,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
 		args->ar_loccookie = result.boolean;
 		break;
 	default:
-		return invalf(fc, "gfs2: invalid mount option: %s", param->key);
+		return invalfc(fc, "invalid mount option: %s", param->key);
 	}
 	return 0;
 }
@@ -1463,27 +1450,27 @@ static int gfs2_reconfigure(struct fs_context *fc)
 	spin_unlock(&gt->gt_spin);
 
 	if (strcmp(newargs->ar_lockproto, oldargs->ar_lockproto)) {
-		errorf(fc, "gfs2: reconfiguration of locking protocol not allowed");
+		errorfc(fc, "reconfiguration of locking protocol not allowed");
 		return -EINVAL;
 	}
 	if (strcmp(newargs->ar_locktable, oldargs->ar_locktable)) {
-		errorf(fc, "gfs2: reconfiguration of lock table not allowed");
+		errorfc(fc, "reconfiguration of lock table not allowed");
 		return -EINVAL;
 	}
 	if (strcmp(newargs->ar_hostdata, oldargs->ar_hostdata)) {
-		errorf(fc, "gfs2: reconfiguration of host data not allowed");
+		errorfc(fc, "reconfiguration of host data not allowed");
 		return -EINVAL;
 	}
 	if (newargs->ar_spectator != oldargs->ar_spectator) {
-		errorf(fc, "gfs2: reconfiguration of spectator mode not allowed");
+		errorfc(fc, "reconfiguration of spectator mode not allowed");
 		return -EINVAL;
 	}
 	if (newargs->ar_localflocks != oldargs->ar_localflocks) {
-		errorf(fc, "gfs2: reconfiguration of localflocks not allowed");
+		errorfc(fc, "reconfiguration of localflocks not allowed");
 		return -EINVAL;
 	}
 	if (newargs->ar_meta != oldargs->ar_meta) {
-		errorf(fc, "gfs2: switching between gfs2 and gfs2meta not allowed");
+		errorfc(fc, "switching between gfs2 and gfs2meta not allowed");
 		return -EINVAL;
 	}
 	if (oldargs->ar_spectator)
@@ -1493,11 +1480,11 @@ static int gfs2_reconfigure(struct fs_context *fc)
 		if (fc->sb_flags & SB_RDONLY) {
 			error = gfs2_make_fs_ro(sdp);
 			if (error)
-				errorf(fc, "gfs2: unable to remount read-only");
+				errorfc(fc, "unable to remount read-only");
 		} else {
 			error = gfs2_make_fs_rw(sdp);
 			if (error)
-				errorf(fc, "gfs2: unable to remount read-write");
+				errorfc(fc, "unable to remount read-write");
 		}
 	}
 	sdp->sd_args = *newargs;
@@ -1642,7 +1629,7 @@ struct file_system_type gfs2_fs_type = {
 	.name = "gfs2",
 	.fs_flags = FS_REQUIRES_DEV,
 	.init_fs_context = gfs2_init_fs_context,
-	.parameters = &gfs2_fs_parameters,
+	.parameters = gfs2_fs_parameters,
 	.kill_sb = gfs2_kill_sb,
 	.owner = THIS_MODULE,
 };
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 2466bb4..e7bf91e 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -36,16 +36,6 @@
 #define BFITNOENT ((u32)~0)
 #define NO_BLOCK ((u64)~0)
 
-#if BITS_PER_LONG == 32
-#define LBITMASK   (0x55555555UL)
-#define LBITSKIP55 (0x55555555UL)
-#define LBITSKIP00 (0x00000000UL)
-#else
-#define LBITMASK   (0x5555555555555555UL)
-#define LBITSKIP55 (0x5555555555555555UL)
-#define LBITSKIP00 (0x0000000000000000UL)
-#endif
-
 /*
  * These routines are used by the resource group routines (rgrp.c)
  * to keep track of block allocation.  Each block is represented by two
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 9d42273..a685637 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -49,8 +49,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
 	if (blocks)
 		tr->tr_reserved += 6 + blocks;
 	if (revokes)
-		tr->tr_reserved += gfs2_struct2blk(sdp, revokes,
-						   sizeof(u64));
+		tr->tr_reserved += gfs2_struct2blk(sdp, revokes);
 	INIT_LIST_HEAD(&tr->tr_databuf);
 	INIT_LIST_HEAD(&tr->tr_buf);
 
@@ -77,10 +76,10 @@ static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
 	fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
 		tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
 		test_bit(TR_TOUCHED, &tr->tr_flags));
-	fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
+	fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
 		tr->tr_num_buf_new, tr->tr_num_buf_rm,
 		tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
-		tr->tr_num_revoke);
+		tr->tr_num_revoke, tr->tr_num_revoke_rm);
 }
 
 void gfs2_trans_end(struct gfs2_sbd *sdp)
@@ -265,7 +264,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
 			if (bd->bd_gl)
 				gfs2_glock_remove_revoke(bd->bd_gl);
 			kmem_cache_free(gfs2_bufdata_cachep, bd);
-			tr->tr_num_revoke--;
+			tr->tr_num_revoke_rm++;
 			if (--n == 0)
 				break;
 		}
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 6d0783e..f71c384 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -242,19 +242,35 @@ extern void hfs_mark_mdb_dirty(struct super_block *sb);
 /*
  * There are two time systems.  Both are based on seconds since
  * a particular time/date.
- *	Unix:	unsigned lil-endian since 00:00 GMT, Jan. 1, 1970
+ *	Unix:	signed little-endian since 00:00 GMT, Jan. 1, 1970
  *	mac:	unsigned big-endian since 00:00 GMT, Jan. 1, 1904
  *
+ * HFS implementations are highly inconsistent, this one matches the
+ * traditional behavior of 64-bit Linux, giving the most useful
+ * time range between 1970 and 2106, by treating any on-disk timestamp
+ * under HFS_UTC_OFFSET (Jan 1 1970) as a time between 2040 and 2106.
  */
-#define __hfs_u_to_mtime(sec)	cpu_to_be32(sec + 2082844800U - sys_tz.tz_minuteswest * 60)
-#define __hfs_m_to_utime(sec)	(be32_to_cpu(sec) - 2082844800U  + sys_tz.tz_minuteswest * 60)
+#define HFS_UTC_OFFSET 2082844800U
 
+static inline time64_t __hfs_m_to_utime(__be32 mt)
+{
+	time64_t ut = (u32)(be32_to_cpu(mt) - HFS_UTC_OFFSET);
+
+	return ut + sys_tz.tz_minuteswest * 60;
+}
+
+static inline __be32 __hfs_u_to_mtime(time64_t ut)
+{
+	ut -= sys_tz.tz_minuteswest * 60;
+
+	return cpu_to_be32(lower_32_bits(ut) + HFS_UTC_OFFSET);
+}
 #define HFS_I(inode)	(container_of(inode, struct hfs_inode_info, vfs_inode))
 #define HFS_SB(sb)	((struct hfs_sb_info *)(sb)->s_fs_info)
 
-#define hfs_m_to_utime(time)	(struct timespec){ .tv_sec = __hfs_m_to_utime(time) }
-#define hfs_u_to_mtime(time)	__hfs_u_to_mtime((time).tv_sec)
-#define hfs_mtime()		__hfs_u_to_mtime(get_seconds())
+#define hfs_m_to_utime(time)   (struct timespec64){ .tv_sec = __hfs_m_to_utime(time) }
+#define hfs_u_to_mtime(time)   __hfs_u_to_mtime((time).tv_sec)
+#define hfs_mtime()		__hfs_u_to_mtime(ktime_get_real_seconds())
 
 static inline const char *hfs_mdb_name(struct super_block *sb)
 {
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index da243c8..2f224b9 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -351,7 +351,7 @@ static int hfs_read_inode(struct inode *inode, void *data)
 		inode->i_mode &= ~hsb->s_file_umask;
 		inode->i_mode |= S_IFREG;
 		inode->i_ctime = inode->i_atime = inode->i_mtime =
-				timespec_to_timespec64(hfs_m_to_utime(rec->file.MdDat));
+				hfs_m_to_utime(rec->file.MdDat);
 		inode->i_op = &hfs_file_inode_operations;
 		inode->i_fop = &hfs_file_operations;
 		inode->i_mapping->a_ops = &hfs_aops;
@@ -362,7 +362,7 @@ static int hfs_read_inode(struct inode *inode, void *data)
 		HFS_I(inode)->fs_blocks = 0;
 		inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
 		inode->i_ctime = inode->i_atime = inode->i_mtime =
-				timespec_to_timespec64(hfs_m_to_utime(rec->dir.MdDat));
+				hfs_m_to_utime(rec->dir.MdDat);
 		inode->i_op = &hfs_dir_inode_operations;
 		inode->i_fop = &hfs_dir_operations;
 		break;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index b8471bf..3b03fff 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -533,13 +533,31 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf,
 		       void **data, int op, int op_flags);
 int hfsplus_read_wrapper(struct super_block *sb);
 
-/* time macros */
-#define __hfsp_mt2ut(t)		(be32_to_cpu(t) - 2082844800U)
-#define __hfsp_ut2mt(t)		(cpu_to_be32(t + 2082844800U))
+/*
+ * time helpers: convert between 1904-base and 1970-base timestamps
+ *
+ * HFS+ implementations are highly inconsistent, this one matches the
+ * traditional behavior of 64-bit Linux, giving the most useful
+ * time range between 1970 and 2106, by treating any on-disk timestamp
+ * under HFSPLUS_UTC_OFFSET (Jan 1 1970) as a time between 2040 and 2106.
+ */
+#define HFSPLUS_UTC_OFFSET 2082844800U
+
+static inline time64_t __hfsp_mt2ut(__be32 mt)
+{
+	time64_t ut = (u32)(be32_to_cpu(mt) - HFSPLUS_UTC_OFFSET);
+
+	return ut;
+}
+
+static inline __be32 __hfsp_ut2mt(time64_t ut)
+{
+	return cpu_to_be32(lower_32_bits(ut) + HFSPLUS_UTC_OFFSET);
+}
 
 /* compatibility */
-#define hfsp_mt2ut(t)		(struct timespec){ .tv_sec = __hfsp_mt2ut(t) }
+#define hfsp_mt2ut(t)		(struct timespec64){ .tv_sec = __hfsp_mt2ut(t) }
 #define hfsp_ut2mt(t)		__hfsp_ut2mt((t).tv_sec)
-#define hfsp_now2mt()		__hfsp_ut2mt(get_seconds())
+#define hfsp_now2mt()		__hfsp_ut2mt(ktime_get_real_seconds())
 
 #endif
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index d131c8e..94bd83b 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -504,9 +504,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
 		hfsplus_get_perms(inode, &folder->permissions, 1);
 		set_nlink(inode, 1);
 		inode->i_size = 2 + be32_to_cpu(folder->valence);
-		inode->i_atime = timespec_to_timespec64(hfsp_mt2ut(folder->access_date));
-		inode->i_mtime = timespec_to_timespec64(hfsp_mt2ut(folder->content_mod_date));
-		inode->i_ctime = timespec_to_timespec64(hfsp_mt2ut(folder->attribute_mod_date));
+		inode->i_atime = hfsp_mt2ut(folder->access_date);
+		inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
+		inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
 		HFSPLUS_I(inode)->create_date = folder->create_date;
 		HFSPLUS_I(inode)->fs_blocks = 0;
 		if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
@@ -542,9 +542,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
 			init_special_inode(inode, inode->i_mode,
 					   be32_to_cpu(file->permissions.dev));
 		}
-		inode->i_atime = timespec_to_timespec64(hfsp_mt2ut(file->access_date));
-		inode->i_mtime = timespec_to_timespec64(hfsp_mt2ut(file->content_mod_date));
-		inode->i_ctime = timespec_to_timespec64(hfsp_mt2ut(file->attribute_mod_date));
+		inode->i_atime = hfsp_mt2ut(file->access_date);
+		inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
+		inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
 		HFSPLUS_I(inode)->create_date = file->create_date;
 	} else {
 		pr_err("bad catalog entry used to create inode\n");
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index f4295aa..69cb796 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -37,16 +37,20 @@
  * is on, and remove the appropriate bits from attr->ia_mode (attr is a
  * "struct iattr *"). -BlaisorBlade
  */
+struct hostfs_timespec {
+	long long tv_sec;
+	long long tv_nsec;
+};
 
 struct hostfs_iattr {
-	unsigned int	ia_valid;
-	unsigned short	ia_mode;
-	uid_t		ia_uid;
-	gid_t		ia_gid;
-	loff_t		ia_size;
-	struct timespec	ia_atime;
-	struct timespec	ia_mtime;
-	struct timespec	ia_ctime;
+	unsigned int		ia_valid;
+	unsigned short		ia_mode;
+	uid_t			ia_uid;
+	gid_t			ia_gid;
+	loff_t			ia_size;
+	struct hostfs_timespec	ia_atime;
+	struct hostfs_timespec	ia_mtime;
+	struct hostfs_timespec	ia_ctime;
 };
 
 struct hostfs_stat {
@@ -56,7 +60,7 @@ struct hostfs_stat {
 	unsigned int uid;
 	unsigned int gid;
 	unsigned long long size;
-	struct timespec atime, mtime, ctime;
+	struct hostfs_timespec atime, mtime, ctime;
 	unsigned int blksize;
 	unsigned long long blocks;
 	unsigned int maj;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5a7eb0c..e6b8c49 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -549,9 +549,9 @@ static int read_name(struct inode *ino, char *name)
 	set_nlink(ino, st.nlink);
 	i_uid_write(ino, st.uid);
 	i_gid_write(ino, st.gid);
-	ino->i_atime = timespec_to_timespec64(st.atime);
-	ino->i_mtime = timespec_to_timespec64(st.mtime);
-	ino->i_ctime = timespec_to_timespec64(st.ctime);
+	ino->i_atime = (struct timespec64){ st.atime.tv_sec, st.atime.tv_nsec };
+	ino->i_mtime = (struct timespec64){ st.mtime.tv_sec, st.mtime.tv_nsec };
+	ino->i_ctime = (struct timespec64){ st.ctime.tv_sec, st.ctime.tv_nsec };
 	ino->i_size = st.size;
 	ino->i_blocks = st.blocks;
 	return 0;
@@ -820,15 +820,18 @@ static int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
 	}
 	if (attr->ia_valid & ATTR_ATIME) {
 		attrs.ia_valid |= HOSTFS_ATTR_ATIME;
-		attrs.ia_atime = timespec64_to_timespec(attr->ia_atime);
+		attrs.ia_atime = (struct hostfs_timespec)
+			{ attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec };
 	}
 	if (attr->ia_valid & ATTR_MTIME) {
 		attrs.ia_valid |= HOSTFS_ATTR_MTIME;
-		attrs.ia_mtime = timespec64_to_timespec(attr->ia_mtime);
+		attrs.ia_mtime = (struct hostfs_timespec)
+			{ attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec };
 	}
 	if (attr->ia_valid & ATTR_CTIME) {
 		attrs.ia_valid |= HOSTFS_ATTR_CTIME;
-		attrs.ia_ctime = timespec64_to_timespec(attr->ia_ctime);
+		attrs.ia_ctime = (struct hostfs_timespec)
+			{ attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec };
 	}
 	if (attr->ia_valid & ATTR_ATIME_SET) {
 		attrs.ia_valid |= HOSTFS_ATTR_ATIME_SET;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a66e425..aff8642 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -73,7 +73,7 @@ enum hugetlb_param {
 	Opt_uid,
 };
 
-static const struct fs_parameter_spec hugetlb_param_specs[] = {
+static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
 	fsparam_u32   ("gid",		Opt_gid),
 	fsparam_string("min_size",	Opt_min_size),
 	fsparam_u32   ("mode",		Opt_mode),
@@ -84,11 +84,6 @@ static const struct fs_parameter_spec hugetlb_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description hugetlb_fs_parameters = {
-	.name		= "hugetlbfs",
-	.specs		= hugetlb_param_specs,
-};
-
 #ifdef CONFIG_NUMA
 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
 					struct inode *inode, pgoff_t index)
@@ -1171,7 +1166,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
 	unsigned long ps;
 	int opt;
 
-	opt = fs_parse(fc, &hugetlb_fs_parameters, param, &result);
+	opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -1233,7 +1228,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
 	}
 
 bad_val:
-	return invalf(fc, "hugetlbfs: Bad value '%s' for mount option '%s'\n",
+	return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
 		      param->string, param->key);
 }
 
@@ -1358,7 +1353,7 @@ static int hugetlbfs_init_fs_context(struct fs_context *fc)
 static struct file_system_type hugetlbfs_fs_type = {
 	.name			= "hugetlbfs",
 	.init_fs_context	= hugetlbfs_init_fs_context,
-	.parameters		= &hugetlb_fs_parameters,
+	.parameters		= hugetlb_fs_parameters,
 	.kill_sb		= kill_litter_super,
 };
 
diff --git a/fs/inode.c b/fs/inode.c
index ea15c6d..7d57068 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1599,25 +1599,31 @@ void iput(struct inode *inode)
 }
 EXPORT_SYMBOL(iput);
 
+#ifdef CONFIG_BLOCK
 /**
  *	bmap	- find a block number in a file
- *	@inode: inode of file
- *	@block: block to find
+ *	@inode:  inode owning the block number being requested
+ *	@block: pointer containing the block to find
  *
- *	Returns the block number on the device holding the inode that
- *	is the disk block number for the block of the file requested.
- *	That is, asked for block 4 of inode 1 the function will return the
- *	disk block relative to the disk start that holds that block of the
- *	file.
+ *	Replaces the value in *block with the block number on the device holding
+ *	corresponding to the requested block number in the file.
+ *	That is, asked for block 4 of inode 1 the function will replace the
+ *	4 in *block, with disk block relative to the disk start that holds that
+ *	block of the file.
+ *
+ *	Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
+ *	hole, returns 0 and *block is also set to 0.
  */
-sector_t bmap(struct inode *inode, sector_t block)
+int bmap(struct inode *inode, sector_t *block)
 {
-	sector_t res = 0;
-	if (inode->i_mapping->a_ops->bmap)
-		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
-	return res;
+	if (!inode->i_mapping->a_ops->bmap)
+		return -EINVAL;
+
+	*block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
+	return 0;
 }
 EXPORT_SYMBOL(bmap);
+#endif
 
 /*
  * With relative atime, only update atime if the previous atime is
@@ -1683,12 +1689,9 @@ EXPORT_SYMBOL(generic_update_time);
  */
 static int update_time(struct inode *inode, struct timespec64 *time, int flags)
 {
-	int (*update_time)(struct inode *, struct timespec64 *, int);
-
-	update_time = inode->i_op->update_time ? inode->i_op->update_time :
-		generic_update_time;
-
-	return update_time(inode, time, flags);
+	if (inode->i_op->update_time)
+		return inode->i_op->update_time(inode, time, flags);
+	return generic_update_time(inode, time, flags);
 }
 
 /**
@@ -2154,30 +2157,6 @@ void inode_nohighmem(struct inode *inode)
 EXPORT_SYMBOL(inode_nohighmem);
 
 /**
- * timespec64_trunc - Truncate timespec64 to a granularity
- * @t: Timespec64
- * @gran: Granularity in ns.
- *
- * Truncate a timespec64 to a granularity. Always rounds down. gran must
- * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
- */
-struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran)
-{
-	/* Avoid division in the common cases 1 ns and 1 s. */
-	if (gran == 1) {
-		/* nothing */
-	} else if (gran == NSEC_PER_SEC) {
-		t.tv_nsec = 0;
-	} else if (gran > 1 && gran < NSEC_PER_SEC) {
-		t.tv_nsec -= t.tv_nsec % gran;
-	} else {
-		WARN(1, "illegal file time granularity: %u", gran);
-	}
-	return t;
-}
-EXPORT_SYMBOL(timespec64_trunc);
-
-/**
  * timestamp_truncate - Truncate timespec to a granularity
  * @t: Timespec
  * @inode: inode being updated
diff --git a/fs/internal.h b/fs/internal.h
index e3fa695..f3f280b 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -124,6 +124,8 @@ extern struct file *do_filp_open(int dfd, struct filename *pathname,
 		const struct open_flags *op);
 extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
 		const char *, const struct open_flags *);
+extern struct open_how build_open_how(int flags, umode_t mode);
+extern int build_open_flags(const struct open_how *how, struct open_flags *op);
 
 long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
 long do_faccessat(int dfd, const char __user *filename, int mode);
@@ -180,11 +182,11 @@ extern void mnt_pin_kill(struct mount *m);
  */
 extern const struct dentry_operations ns_dentry_operations;
 
-/*
- * fs/ioctl.c
- */
-extern int do_vfs_ioctl(struct file *file, unsigned int fd, unsigned int cmd,
-		    unsigned long arg);
-
 /* direct-io.c: */
 int sb_init_dio_done_wq(struct super_block *sb);
+
+/*
+ * fs/stat.c:
+ */
+unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags);
+int cp_statx(const struct kstat *stat, struct statx __user *buffer);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 5147d22..cb60a42b 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -56,7 +56,8 @@ struct io_worker {
 
 	struct rcu_head rcu;
 	struct mm_struct *mm;
-	const struct cred *creds;
+	const struct cred *cur_creds;
+	const struct cred *saved_creds;
 	struct files_struct *restore_files;
 };
 
@@ -109,10 +110,10 @@ struct io_wq {
 
 	struct task_struct *manager;
 	struct user_struct *user;
-	const struct cred *creds;
-	struct mm_struct *mm;
 	refcount_t refs;
 	struct completion done;
+
+	refcount_t use_refs;
 };
 
 static bool io_worker_get(struct io_worker *worker)
@@ -135,9 +136,9 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
 {
 	bool dropped_lock = false;
 
-	if (worker->creds) {
-		revert_creds(worker->creds);
-		worker->creds = NULL;
+	if (worker->saved_creds) {
+		revert_creds(worker->saved_creds);
+		worker->cur_creds = worker->saved_creds = NULL;
 	}
 
 	if (current->files != worker->restore_files) {
@@ -396,6 +397,43 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
 	return NULL;
 }
 
+static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
+{
+	if (worker->mm) {
+		unuse_mm(worker->mm);
+		mmput(worker->mm);
+		worker->mm = NULL;
+	}
+	if (!work->mm) {
+		set_fs(KERNEL_DS);
+		return;
+	}
+	if (mmget_not_zero(work->mm)) {
+		use_mm(work->mm);
+		if (!worker->mm)
+			set_fs(USER_DS);
+		worker->mm = work->mm;
+		/* hang on to this mm */
+		work->mm = NULL;
+		return;
+	}
+
+	/* failed grabbing mm, ensure work gets cancelled */
+	work->flags |= IO_WQ_WORK_CANCEL;
+}
+
+static void io_wq_switch_creds(struct io_worker *worker,
+			       struct io_wq_work *work)
+{
+	const struct cred *old_creds = override_creds(work->creds);
+
+	worker->cur_creds = work->creds;
+	if (worker->saved_creds)
+		put_cred(old_creds); /* creds set by previous switch */
+	else
+		worker->saved_creds = old_creds;
+}
+
 static void io_worker_handle_work(struct io_worker *worker)
 	__releases(wqe->lock)
 {
@@ -438,24 +476,19 @@ static void io_worker_handle_work(struct io_worker *worker)
 		if (work->flags & IO_WQ_WORK_CB)
 			work->func(&work);
 
-		if ((work->flags & IO_WQ_WORK_NEEDS_FILES) &&
-		    current->files != work->files) {
+		if (work->files && current->files != work->files) {
 			task_lock(current);
 			current->files = work->files;
 			task_unlock(current);
 		}
-		if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
-		    wq->mm) {
-			if (mmget_not_zero(wq->mm)) {
-				use_mm(wq->mm);
-				set_fs(USER_DS);
-				worker->mm = wq->mm;
-			} else {
-				work->flags |= IO_WQ_WORK_CANCEL;
-			}
-		}
-		if (!worker->creds)
-			worker->creds = override_creds(wq->creds);
+		if (work->mm != worker->mm)
+			io_wq_switch_mm(worker, work);
+		if (worker->cur_creds != work->creds)
+			io_wq_switch_creds(worker, work);
+		/*
+		 * OK to set IO_WQ_WORK_CANCEL even for uncancellable work,
+		 * the worker function will do the right thing.
+		 */
 		if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
 			work->flags |= IO_WQ_WORK_CANCEL;
 		if (worker->mm)
@@ -720,6 +753,7 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
 {
 	struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+	int work_flags;
 	unsigned long flags;
 
 	/*
@@ -734,12 +768,14 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
 		return;
 	}
 
+	work_flags = work->flags;
 	spin_lock_irqsave(&wqe->lock, flags);
 	wq_list_add_tail(&work->list, &wqe->work_list);
 	wqe->flags &= ~IO_WQE_FLAG_STALLED;
 	spin_unlock_irqrestore(&wqe->lock, flags);
 
-	if (!atomic_read(&acct->nr_running))
+	if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
+	    !atomic_read(&acct->nr_running))
 		io_wqe_wake_worker(wqe, acct);
 }
 
@@ -828,6 +864,7 @@ static bool io_work_cancel(struct io_worker *worker, void *cancel_data)
 	 */
 	spin_lock_irqsave(&worker->lock, flags);
 	if (worker->cur_work &&
+	    !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
 	    data->cancel(worker->cur_work, data->caller_data)) {
 		send_sig(SIGINT, worker->task, 1);
 		ret = true;
@@ -902,7 +939,8 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
 		return false;
 
 	spin_lock_irqsave(&worker->lock, flags);
-	if (worker->cur_work == work) {
+	if (worker->cur_work == work &&
+	    !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL)) {
 		send_sig(SIGINT, worker->task, 1);
 		ret = true;
 	}
@@ -1026,7 +1064,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
 
 	/* caller must already hold a reference to this */
 	wq->user = data->user;
-	wq->creds = data->creds;
 
 	for_each_node(node) {
 		struct io_wqe *wqe;
@@ -1053,9 +1090,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
 
 	init_completion(&wq->done);
 
-	/* caller must have already done mmgrab() on this mm */
-	wq->mm = data->mm;
-
 	wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
 	if (!IS_ERR(wq->manager)) {
 		wake_up_process(wq->manager);
@@ -1064,6 +1098,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
 			ret = -ENOMEM;
 			goto err;
 		}
+		refcount_set(&wq->use_refs, 1);
 		reinit_completion(&wq->done);
 		return wq;
 	}
@@ -1078,13 +1113,21 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
 	return ERR_PTR(ret);
 }
 
+bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
+{
+	if (data->get_work != wq->get_work || data->put_work != wq->put_work)
+		return false;
+
+	return refcount_inc_not_zero(&wq->use_refs);
+}
+
 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
 {
 	wake_up_process(worker->task);
 	return false;
 }
 
-void io_wq_destroy(struct io_wq *wq)
+static void __io_wq_destroy(struct io_wq *wq)
 {
 	int node;
 
@@ -1104,3 +1147,9 @@ void io_wq_destroy(struct io_wq *wq)
 	kfree(wq->wqes);
 	kfree(wq);
 }
+
+void io_wq_destroy(struct io_wq *wq)
+{
+	if (refcount_dec_and_test(&wq->use_refs))
+		__io_wq_destroy(wq);
+}
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 3f5e356..50b3378 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -7,11 +7,11 @@ enum {
 	IO_WQ_WORK_CANCEL	= 1,
 	IO_WQ_WORK_HAS_MM	= 2,
 	IO_WQ_WORK_HASHED	= 4,
-	IO_WQ_WORK_NEEDS_USER	= 8,
-	IO_WQ_WORK_NEEDS_FILES	= 16,
 	IO_WQ_WORK_UNBOUND	= 32,
 	IO_WQ_WORK_INTERNAL	= 64,
 	IO_WQ_WORK_CB		= 128,
+	IO_WQ_WORK_NO_CANCEL	= 256,
+	IO_WQ_WORK_CONCURRENT	= 512,
 
 	IO_WQ_HASH_SHIFT	= 24,	/* upper 8 bits are used for hash key */
 };
@@ -72,6 +72,8 @@ struct io_wq_work {
 	};
 	void (*func)(struct io_wq_work **);
 	struct files_struct *files;
+	struct mm_struct *mm;
+	const struct cred *creds;
 	unsigned flags;
 };
 
@@ -81,21 +83,22 @@ struct io_wq_work {
 		(work)->func = _func;			\
 		(work)->flags = 0;			\
 		(work)->files = NULL;			\
+		(work)->mm = NULL;			\
+		(work)->creds = NULL;			\
 	} while (0)					\
 
 typedef void (get_work_fn)(struct io_wq_work *);
 typedef void (put_work_fn)(struct io_wq_work *);
 
 struct io_wq_data {
-	struct mm_struct *mm;
 	struct user_struct *user;
-	const struct cred *creds;
 
 	get_work_fn *get_work;
 	put_work_fn *put_work;
 };
 
 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
+bool io_wq_get(struct io_wq *wq, struct io_wq_data *data);
 void io_wq_destroy(struct io_wq *wq);
 
 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e54556b..77f22c3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -46,6 +46,7 @@
 #include <linux/compat.h>
 #include <linux/refcount.h>
 #include <linux/uio.h>
+#include <linux/bits.h>
 
 #include <linux/sched/signal.h>
 #include <linux/fs.h>
@@ -70,6 +71,10 @@
 #include <linux/sizes.h>
 #include <linux/hugetlb.h>
 #include <linux/highmem.h>
+#include <linux/namei.h>
+#include <linux/fsnotify.h>
+#include <linux/fadvise.h>
+#include <linux/eventpoll.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
@@ -177,6 +182,21 @@ struct fixed_file_table {
 	struct file		**files;
 };
 
+enum {
+	FFD_F_ATOMIC,
+};
+
+struct fixed_file_data {
+	struct fixed_file_table		*table;
+	struct io_ring_ctx		*ctx;
+
+	struct percpu_ref		refs;
+	struct llist_head		put_llist;
+	unsigned long			state;
+	struct work_struct		ref_work;
+	struct completion		done;
+};
+
 struct io_ring_ctx {
 	struct {
 		struct percpu_ref	refs;
@@ -184,10 +204,11 @@ struct io_ring_ctx {
 
 	struct {
 		unsigned int		flags;
-		bool			compat;
-		bool			account_mem;
-		bool			cq_overflow_flushed;
-		bool			drain_next;
+		int			compat: 1;
+		int			account_mem: 1;
+		int			cq_overflow_flushed: 1;
+		int			drain_next: 1;
+		int			eventfd_async: 1;
 
 		/*
 		 * Ring buffer of indices into array of io_uring_sqe, which is
@@ -207,13 +228,14 @@ struct io_ring_ctx {
 		unsigned		sq_thread_idle;
 		unsigned		cached_sq_dropped;
 		atomic_t		cached_cq_overflow;
-		struct io_uring_sqe	*sq_sqes;
+		unsigned long		sq_check_overflow;
 
 		struct list_head	defer_list;
 		struct list_head	timeout_list;
 		struct list_head	cq_overflow_list;
 
 		wait_queue_head_t	inflight_wait;
+		struct io_uring_sqe	*sq_sqes;
 	} ____cacheline_aligned_in_smp;
 
 	struct io_rings	*rings;
@@ -229,8 +251,10 @@ struct io_ring_ctx {
 	 * readers must ensure that ->refs is alive as long as the file* is
 	 * used. Only updated through io_uring_register(2).
 	 */
-	struct fixed_file_table	*file_table;
+	struct fixed_file_data	*file_data;
 	unsigned		nr_user_files;
+	int 			ring_fd;
+	struct file 		*ring_file;
 
 	/* if used, fixed mapped user buffers */
 	unsigned		nr_user_bufs;
@@ -250,11 +274,14 @@ struct io_ring_ctx {
 	struct socket		*ring_sock;
 #endif
 
+	struct idr		personality_idr;
+
 	struct {
 		unsigned		cached_cq_tail;
 		unsigned		cq_entries;
 		unsigned		cq_mask;
 		atomic_t		cq_timeouts;
+		unsigned long		cq_check_overflow;
 		struct wait_queue_head	cq_wait;
 		struct fasync_struct	*cq_fasync;
 		struct eventfd_ctx	*cq_ev_fd;
@@ -267,7 +294,8 @@ struct io_ring_ctx {
 
 	struct {
 		spinlock_t		completion_lock;
-		bool			poll_multi_file;
+		struct llist_head	poll_llist;
+
 		/*
 		 * ->poll_list is protected by the ctx->uring_lock for
 		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
@@ -277,6 +305,7 @@ struct io_ring_ctx {
 		struct list_head	poll_list;
 		struct hlist_head	*cancel_hash;
 		unsigned		cancel_hash_bits;
+		bool			poll_multi_file;
 
 		spinlock_t		inflight_lock;
 		struct list_head	inflight_list;
@@ -299,6 +328,12 @@ struct io_poll_iocb {
 	struct wait_queue_entry		wait;
 };
 
+struct io_close {
+	struct file			*file;
+	struct file			*put_file;
+	int				fd;
+};
+
 struct io_timeout_data {
 	struct io_kiocb			*req;
 	struct hrtimer			timer;
@@ -319,6 +354,7 @@ struct io_sync {
 	loff_t				len;
 	loff_t				off;
 	int				flags;
+	int				mode;
 };
 
 struct io_cancel {
@@ -348,8 +384,52 @@ struct io_connect {
 
 struct io_sr_msg {
 	struct file			*file;
-	struct user_msghdr __user	*msg;
+	union {
+		struct user_msghdr __user *msg;
+		void __user		*buf;
+	};
 	int				msg_flags;
+	size_t				len;
+};
+
+struct io_open {
+	struct file			*file;
+	int				dfd;
+	union {
+		unsigned		mask;
+	};
+	struct filename			*filename;
+	struct statx __user		*buffer;
+	struct open_how			how;
+};
+
+struct io_files_update {
+	struct file			*file;
+	u64				arg;
+	u32				nr_args;
+	u32				offset;
+};
+
+struct io_fadvise {
+	struct file			*file;
+	u64				offset;
+	u32				len;
+	u32				advice;
+};
+
+struct io_madvise {
+	struct file			*file;
+	u64				addr;
+	u32				len;
+	u32				advice;
+};
+
+struct io_epoll {
+	struct file			*file;
+	int				epfd;
+	int				op;
+	int				fd;
+	struct epoll_event		event;
 };
 
 struct io_async_connect {
@@ -370,15 +450,79 @@ struct io_async_rw {
 	ssize_t				size;
 };
 
+struct io_async_open {
+	struct filename			*filename;
+};
+
 struct io_async_ctx {
 	union {
 		struct io_async_rw	rw;
 		struct io_async_msghdr	msg;
 		struct io_async_connect	connect;
 		struct io_timeout_data	timeout;
+		struct io_async_open	open;
 	};
 };
 
+enum {
+	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
+	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
+	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
+	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
+	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
+
+	REQ_F_LINK_NEXT_BIT,
+	REQ_F_FAIL_LINK_BIT,
+	REQ_F_INFLIGHT_BIT,
+	REQ_F_CUR_POS_BIT,
+	REQ_F_NOWAIT_BIT,
+	REQ_F_IOPOLL_COMPLETED_BIT,
+	REQ_F_LINK_TIMEOUT_BIT,
+	REQ_F_TIMEOUT_BIT,
+	REQ_F_ISREG_BIT,
+	REQ_F_MUST_PUNT_BIT,
+	REQ_F_TIMEOUT_NOSEQ_BIT,
+	REQ_F_COMP_LOCKED_BIT,
+};
+
+enum {
+	/* ctx owns file */
+	REQ_F_FIXED_FILE	= BIT(REQ_F_FIXED_FILE_BIT),
+	/* drain existing IO first */
+	REQ_F_IO_DRAIN		= BIT(REQ_F_IO_DRAIN_BIT),
+	/* linked sqes */
+	REQ_F_LINK		= BIT(REQ_F_LINK_BIT),
+	/* doesn't sever on completion < 0 */
+	REQ_F_HARDLINK		= BIT(REQ_F_HARDLINK_BIT),
+	/* IOSQE_ASYNC */
+	REQ_F_FORCE_ASYNC	= BIT(REQ_F_FORCE_ASYNC_BIT),
+
+	/* already grabbed next link */
+	REQ_F_LINK_NEXT		= BIT(REQ_F_LINK_NEXT_BIT),
+	/* fail rest of links */
+	REQ_F_FAIL_LINK		= BIT(REQ_F_FAIL_LINK_BIT),
+	/* on inflight list */
+	REQ_F_INFLIGHT		= BIT(REQ_F_INFLIGHT_BIT),
+	/* read/write uses file position */
+	REQ_F_CUR_POS		= BIT(REQ_F_CUR_POS_BIT),
+	/* must not punt to workers */
+	REQ_F_NOWAIT		= BIT(REQ_F_NOWAIT_BIT),
+	/* polled IO has completed */
+	REQ_F_IOPOLL_COMPLETED	= BIT(REQ_F_IOPOLL_COMPLETED_BIT),
+	/* has linked timeout */
+	REQ_F_LINK_TIMEOUT	= BIT(REQ_F_LINK_TIMEOUT_BIT),
+	/* timeout request */
+	REQ_F_TIMEOUT		= BIT(REQ_F_TIMEOUT_BIT),
+	/* regular file */
+	REQ_F_ISREG		= BIT(REQ_F_ISREG_BIT),
+	/* must be punted even for NONBLOCK */
+	REQ_F_MUST_PUNT		= BIT(REQ_F_MUST_PUNT_BIT),
+	/* no timeout sequence */
+	REQ_F_TIMEOUT_NOSEQ	= BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
+	/* completion under lock */
+	REQ_F_COMP_LOCKED	= BIT(REQ_F_COMP_LOCKED_BIT),
+};
+
 /*
  * NOTE! Each of the iocb union members has the file pointer
  * as the first entry in their struct definition. So you can
@@ -396,11 +540,19 @@ struct io_kiocb {
 		struct io_timeout	timeout;
 		struct io_connect	connect;
 		struct io_sr_msg	sr_msg;
+		struct io_open		open;
+		struct io_close		close;
+		struct io_files_update	files_update;
+		struct io_fadvise	fadvise;
+		struct io_madvise	madvise;
+		struct io_epoll		epoll;
 	};
 
 	struct io_async_ctx		*io;
-	struct file			*ring_file;
-	int				ring_fd;
+	/*
+	 * llist_node is only used for poll deferred completions
+	 */
+	struct llist_node		llist_node;
 	bool				has_user;
 	bool				in_async;
 	bool				needs_fixed_file;
@@ -414,23 +566,6 @@ struct io_kiocb {
 	struct list_head	link_list;
 	unsigned int		flags;
 	refcount_t		refs;
-#define REQ_F_NOWAIT		1	/* must not punt to workers */
-#define REQ_F_IOPOLL_COMPLETED	2	/* polled IO has completed */
-#define REQ_F_FIXED_FILE	4	/* ctx owns file */
-#define REQ_F_LINK_NEXT		8	/* already grabbed next link */
-#define REQ_F_IO_DRAIN		16	/* drain existing IO first */
-#define REQ_F_IO_DRAINED	32	/* drain done */
-#define REQ_F_LINK		64	/* linked sqes */
-#define REQ_F_LINK_TIMEOUT	128	/* has linked timeout */
-#define REQ_F_FAIL_LINK		256	/* fail rest of links */
-#define REQ_F_DRAIN_LINK	512	/* link should be fully drained */
-#define REQ_F_TIMEOUT		1024	/* timeout request */
-#define REQ_F_ISREG		2048	/* regular file */
-#define REQ_F_MUST_PUNT		4096	/* must be punted even for NONBLOCK */
-#define REQ_F_TIMEOUT_NOSEQ	8192	/* no timeout sequence */
-#define REQ_F_INFLIGHT		16384	/* on inflight list */
-#define REQ_F_COMP_LOCKED	32768	/* completion under lock */
-#define REQ_F_HARDLINK		65536	/* doesn't sever on completion < 0 */
 	u64			user_data;
 	u32			result;
 	u32			sequence;
@@ -450,8 +585,7 @@ struct io_submit_state {
 	 * io_kiocb alloc cache
 	 */
 	void			*reqs[IO_IOPOLL_BATCH];
-	unsigned		int free_reqs;
-	unsigned		int cur_req;
+	unsigned int		free_reqs;
 
 	/*
 	 * File reference cache
@@ -463,14 +597,163 @@ struct io_submit_state {
 	unsigned int		ios_left;
 };
 
+struct io_op_def {
+	/* needs req->io allocated for deferral/async */
+	unsigned		async_ctx : 1;
+	/* needs current->mm setup, does mm access */
+	unsigned		needs_mm : 1;
+	/* needs req->file assigned */
+	unsigned		needs_file : 1;
+	/* needs req->file assigned IFF fd is >= 0 */
+	unsigned		fd_non_neg : 1;
+	/* hash wq insertion if file is a regular file */
+	unsigned		hash_reg_file : 1;
+	/* unbound wq insertion if file is a non-regular file */
+	unsigned		unbound_nonreg_file : 1;
+	/* opcode is not supported by this kernel */
+	unsigned		not_supported : 1;
+	/* needs file table */
+	unsigned		file_table : 1;
+};
+
+static const struct io_op_def io_op_defs[] = {
+	[IORING_OP_NOP] = {},
+	[IORING_OP_READV] = {
+		.async_ctx		= 1,
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_WRITEV] = {
+		.async_ctx		= 1,
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.hash_reg_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_FSYNC] = {
+		.needs_file		= 1,
+	},
+	[IORING_OP_READ_FIXED] = {
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_WRITE_FIXED] = {
+		.needs_file		= 1,
+		.hash_reg_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_POLL_ADD] = {
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_POLL_REMOVE] = {},
+	[IORING_OP_SYNC_FILE_RANGE] = {
+		.needs_file		= 1,
+	},
+	[IORING_OP_SENDMSG] = {
+		.async_ctx		= 1,
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_RECVMSG] = {
+		.async_ctx		= 1,
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_TIMEOUT] = {
+		.async_ctx		= 1,
+		.needs_mm		= 1,
+	},
+	[IORING_OP_TIMEOUT_REMOVE] = {},
+	[IORING_OP_ACCEPT] = {
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+		.file_table		= 1,
+	},
+	[IORING_OP_ASYNC_CANCEL] = {},
+	[IORING_OP_LINK_TIMEOUT] = {
+		.async_ctx		= 1,
+		.needs_mm		= 1,
+	},
+	[IORING_OP_CONNECT] = {
+		.async_ctx		= 1,
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_FALLOCATE] = {
+		.needs_file		= 1,
+	},
+	[IORING_OP_OPENAT] = {
+		.needs_file		= 1,
+		.fd_non_neg		= 1,
+		.file_table		= 1,
+	},
+	[IORING_OP_CLOSE] = {
+		.needs_file		= 1,
+		.file_table		= 1,
+	},
+	[IORING_OP_FILES_UPDATE] = {
+		.needs_mm		= 1,
+		.file_table		= 1,
+	},
+	[IORING_OP_STATX] = {
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.fd_non_neg		= 1,
+	},
+	[IORING_OP_READ] = {
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_WRITE] = {
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_FADVISE] = {
+		.needs_file		= 1,
+	},
+	[IORING_OP_MADVISE] = {
+		.needs_mm		= 1,
+	},
+	[IORING_OP_SEND] = {
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_RECV] = {
+		.needs_mm		= 1,
+		.needs_file		= 1,
+		.unbound_nonreg_file	= 1,
+	},
+	[IORING_OP_OPENAT2] = {
+		.needs_file		= 1,
+		.fd_non_neg		= 1,
+		.file_table		= 1,
+	},
+	[IORING_OP_EPOLL_CTL] = {
+		.unbound_nonreg_file	= 1,
+		.file_table		= 1,
+	},
+};
+
 static void io_wq_submit_work(struct io_wq_work **workptr);
 static void io_cqring_fill_event(struct io_kiocb *req, long res);
-static void __io_free_req(struct io_kiocb *req);
 static void io_put_req(struct io_kiocb *req);
-static void io_double_put_req(struct io_kiocb *req);
 static void __io_double_put_req(struct io_kiocb *req);
 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
 static void io_queue_linked_timeout(struct io_kiocb *req);
+static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+				 struct io_uring_files_update *ip,
+				 unsigned nr_args);
+static int io_grab_files(struct io_kiocb *req);
+static void io_ring_file_ref_flush(struct fixed_file_data *data);
 
 static struct kmem_cache *req_cachep;
 
@@ -537,9 +820,11 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
 	init_completion(&ctx->completions[0]);
 	init_completion(&ctx->completions[1]);
+	idr_init(&ctx->personality_idr);
 	mutex_init(&ctx->uring_lock);
 	init_waitqueue_head(&ctx->wait);
 	spin_lock_init(&ctx->completion_lock);
+	init_llist_head(&ctx->poll_llist);
 	INIT_LIST_HEAD(&ctx->poll_list);
 	INIT_LIST_HEAD(&ctx->defer_list);
 	INIT_LIST_HEAD(&ctx->timeout_list);
@@ -566,7 +851,7 @@ static inline bool __req_need_defer(struct io_kiocb *req)
 
 static inline bool req_need_defer(struct io_kiocb *req)
 {
-	if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
+	if (unlikely(req->flags & REQ_F_IO_DRAIN))
 		return __req_need_defer(req);
 
 	return false;
@@ -606,53 +891,53 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
 {
 	struct io_rings *rings = ctx->rings;
 
-	if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
-		/* order cqe stores with ring update */
-		smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
+	/* order cqe stores with ring update */
+	smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
 
-		if (wq_has_sleeper(&ctx->cq_wait)) {
-			wake_up_interruptible(&ctx->cq_wait);
-			kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
-		}
+	if (wq_has_sleeper(&ctx->cq_wait)) {
+		wake_up_interruptible(&ctx->cq_wait);
+		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
 	}
 }
 
-static inline bool io_req_needs_user(struct io_kiocb *req)
+static inline void io_req_work_grab_env(struct io_kiocb *req,
+					const struct io_op_def *def)
 {
-	return !(req->opcode == IORING_OP_READ_FIXED ||
-		 req->opcode == IORING_OP_WRITE_FIXED);
+	if (!req->work.mm && def->needs_mm) {
+		mmgrab(current->mm);
+		req->work.mm = current->mm;
+	}
+	if (!req->work.creds)
+		req->work.creds = get_current_cred();
+}
+
+static inline void io_req_work_drop_env(struct io_kiocb *req)
+{
+	if (req->work.mm) {
+		mmdrop(req->work.mm);
+		req->work.mm = NULL;
+	}
+	if (req->work.creds) {
+		put_cred(req->work.creds);
+		req->work.creds = NULL;
+	}
 }
 
 static inline bool io_prep_async_work(struct io_kiocb *req,
 				      struct io_kiocb **link)
 {
+	const struct io_op_def *def = &io_op_defs[req->opcode];
 	bool do_hashed = false;
 
-	switch (req->opcode) {
-	case IORING_OP_WRITEV:
-	case IORING_OP_WRITE_FIXED:
-		/* only regular files should be hashed for writes */
-		if (req->flags & REQ_F_ISREG)
+	if (req->flags & REQ_F_ISREG) {
+		if (def->hash_reg_file)
 			do_hashed = true;
-		/* fall-through */
-	case IORING_OP_READV:
-	case IORING_OP_READ_FIXED:
-	case IORING_OP_SENDMSG:
-	case IORING_OP_RECVMSG:
-	case IORING_OP_ACCEPT:
-	case IORING_OP_POLL_ADD:
-	case IORING_OP_CONNECT:
-		/*
-		 * We know REQ_F_ISREG is not set on some of these
-		 * opcodes, but this enables us to keep the check in
-		 * just one place.
-		 */
-		if (!(req->flags & REQ_F_ISREG))
+	} else {
+		if (def->unbound_nonreg_file)
 			req->work.flags |= IO_WQ_WORK_UNBOUND;
-		break;
 	}
-	if (io_req_needs_user(req))
-		req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+
+	io_req_work_grab_env(req, def);
 
 	*link = io_prep_linked_timeout(req);
 	return do_hashed;
@@ -711,10 +996,8 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
 
 	__io_commit_cqring(ctx);
 
-	while ((req = io_get_deferred_req(ctx)) != NULL) {
-		req->flags |= REQ_F_IO_DRAINED;
+	while ((req = io_get_deferred_req(ctx)) != NULL)
 		io_queue_async_work(req);
-	}
 }
 
 static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
@@ -735,16 +1018,30 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
 	return &rings->cqes[tail & ctx->cq_mask];
 }
 
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
+{
+	if (!ctx->cq_ev_fd)
+		return false;
+	if (!ctx->eventfd_async)
+		return true;
+	return io_wq_current_is_worker() || in_interrupt();
+}
+
+static void __io_cqring_ev_posted(struct io_ring_ctx *ctx, bool trigger_ev)
 {
 	if (waitqueue_active(&ctx->wait))
 		wake_up(&ctx->wait);
 	if (waitqueue_active(&ctx->sqo_wait))
 		wake_up(&ctx->sqo_wait);
-	if (ctx->cq_ev_fd)
+	if (trigger_ev)
 		eventfd_signal(ctx->cq_ev_fd, 1);
 }
 
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+	__io_cqring_ev_posted(ctx, io_should_trigger_evfd(ctx));
+}
+
 /* Returns true if there are no backlogged entries after the flush */
 static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 {
@@ -766,7 +1063,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 
 	/* if force is set, the ring is going away. always drop after that */
 	if (force)
-		ctx->cq_overflow_flushed = true;
+		ctx->cq_overflow_flushed = 1;
 
 	cqe = NULL;
 	while (!list_empty(&ctx->cq_overflow_list)) {
@@ -788,6 +1085,10 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 	}
 
 	io_commit_cqring(ctx);
+	if (cqe) {
+		clear_bit(0, &ctx->sq_check_overflow);
+		clear_bit(0, &ctx->cq_check_overflow);
+	}
 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
 	io_cqring_ev_posted(ctx);
 
@@ -821,6 +1122,10 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
 		WRITE_ONCE(ctx->rings->cq_overflow,
 				atomic_inc_return(&ctx->cached_cq_overflow));
 	} else {
+		if (list_empty(&ctx->cq_overflow_list)) {
+			set_bit(0, &ctx->sq_check_overflow);
+			set_bit(0, &ctx->cq_check_overflow);
+		}
 		refcount_inc(&req->refs);
 		req->result = res;
 		list_add_tail(&req->list, &ctx->cq_overflow_list);
@@ -863,9 +1168,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
 	struct io_kiocb *req;
 
-	if (!percpu_ref_tryget(&ctx->refs))
-		return NULL;
-
 	if (!state) {
 		req = kmem_cache_alloc(req_cachep, gfp);
 		if (unlikely(!req))
@@ -888,17 +1190,14 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
 			ret = 1;
 		}
 		state->free_reqs = ret - 1;
-		state->cur_req = 1;
-		req = state->reqs[0];
+		req = state->reqs[ret - 1];
 	} else {
-		req = state->reqs[state->cur_req];
 		state->free_reqs--;
-		state->cur_req++;
+		req = state->reqs[state->free_reqs];
 	}
 
 got_it:
 	req->io = NULL;
-	req->ring_file = NULL;
 	req->file = NULL;
 	req->ctx = ctx;
 	req->flags = 0;
@@ -915,24 +1214,35 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
 	return NULL;
 }
 
-static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
+static void __io_req_do_free(struct io_kiocb *req)
 {
-	if (*nr) {
-		kmem_cache_free_bulk(req_cachep, *nr, reqs);
-		percpu_ref_put_many(&ctx->refs, *nr);
-		*nr = 0;
+	if (likely(!io_is_fallback_req(req)))
+		kmem_cache_free(req_cachep, req);
+	else
+		clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
+}
+
+static void __io_req_aux_free(struct io_kiocb *req)
+{
+	struct io_ring_ctx *ctx = req->ctx;
+
+	kfree(req->io);
+	if (req->file) {
+		if (req->flags & REQ_F_FIXED_FILE)
+			percpu_ref_put(&ctx->file_data->refs);
+		else
+			fput(req->file);
 	}
+
+	io_req_work_drop_env(req);
 }
 
 static void __io_free_req(struct io_kiocb *req)
 {
-	struct io_ring_ctx *ctx = req->ctx;
+	__io_req_aux_free(req);
 
-	if (req->io)
-		kfree(req->io);
-	if (req->file && !(req->flags & REQ_F_FIXED_FILE))
-		fput(req->file);
 	if (req->flags & REQ_F_INFLIGHT) {
+		struct io_ring_ctx *ctx = req->ctx;
 		unsigned long flags;
 
 		spin_lock_irqsave(&ctx->inflight_lock, flags);
@@ -941,11 +1251,63 @@ static void __io_free_req(struct io_kiocb *req)
 			wake_up(&ctx->inflight_wait);
 		spin_unlock_irqrestore(&ctx->inflight_lock, flags);
 	}
-	percpu_ref_put(&ctx->refs);
-	if (likely(!io_is_fallback_req(req)))
-		kmem_cache_free(req_cachep, req);
-	else
-		clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
+
+	percpu_ref_put(&req->ctx->refs);
+	__io_req_do_free(req);
+}
+
+struct req_batch {
+	void *reqs[IO_IOPOLL_BATCH];
+	int to_free;
+	int need_iter;
+};
+
+static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
+{
+	int fixed_refs = rb->to_free;
+
+	if (!rb->to_free)
+		return;
+	if (rb->need_iter) {
+		int i, inflight = 0;
+		unsigned long flags;
+
+		fixed_refs = 0;
+		for (i = 0; i < rb->to_free; i++) {
+			struct io_kiocb *req = rb->reqs[i];
+
+			if (req->flags & REQ_F_FIXED_FILE) {
+				req->file = NULL;
+				fixed_refs++;
+			}
+			if (req->flags & REQ_F_INFLIGHT)
+				inflight++;
+			__io_req_aux_free(req);
+		}
+		if (!inflight)
+			goto do_free;
+
+		spin_lock_irqsave(&ctx->inflight_lock, flags);
+		for (i = 0; i < rb->to_free; i++) {
+			struct io_kiocb *req = rb->reqs[i];
+
+			if (req->flags & REQ_F_INFLIGHT) {
+				list_del(&req->inflight_entry);
+				if (!--inflight)
+					break;
+			}
+		}
+		spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+
+		if (waitqueue_active(&ctx->inflight_wait))
+			wake_up(&ctx->inflight_wait);
+	}
+do_free:
+	kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
+	if (fixed_refs)
+		percpu_ref_put_many(&ctx->file_data->refs, fixed_refs);
+	percpu_ref_put_many(&ctx->refs, rb->to_free);
+	rb->to_free = rb->need_iter = 0;
 }
 
 static bool io_link_cancel_timeout(struct io_kiocb *req)
@@ -1118,19 +1480,21 @@ static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
 {
 	struct io_rings *rings = ctx->rings;
 
-	/*
-	 * noflush == true is from the waitqueue handler, just ensure we wake
-	 * up the task, and the next invocation will flush the entries. We
-	 * cannot safely to it from here.
-	 */
-	if (noflush && !list_empty(&ctx->cq_overflow_list))
-		return -1U;
+	if (test_bit(0, &ctx->cq_check_overflow)) {
+		/*
+		 * noflush == true is from the waitqueue handler, just ensure
+		 * we wake up the task, and the next invocation will flush the
+		 * entries. We cannot safely to it from here.
+		 */
+		if (noflush && !list_empty(&ctx->cq_overflow_list))
+			return -1U;
 
-	io_cqring_overflow_flush(ctx, false);
+		io_cqring_overflow_flush(ctx, false);
+	}
 
 	/* See comment at the top of this file */
 	smp_rmb();
-	return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
+	return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
 }
 
 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
@@ -1141,17 +1505,30 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
 }
 
+static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
+{
+	if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
+		return false;
+
+	if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
+		rb->need_iter++;
+
+	rb->reqs[rb->to_free++] = req;
+	if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
+		io_free_req_many(req->ctx, rb);
+	return true;
+}
+
 /*
  * Find and free completed poll iocbs
  */
 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 			       struct list_head *done)
 {
-	void *reqs[IO_IOPOLL_BATCH];
+	struct req_batch rb;
 	struct io_kiocb *req;
-	int to_free;
 
-	to_free = 0;
+	rb.to_free = rb.need_iter = 0;
 	while (!list_empty(done)) {
 		req = list_first_entry(done, struct io_kiocb, list);
 		list_del(&req->list);
@@ -1159,26 +1536,13 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 		io_cqring_fill_event(req, req->result);
 		(*nr_events)++;
 
-		if (refcount_dec_and_test(&req->refs)) {
-			/* If we're not using fixed files, we have to pair the
-			 * completion part with the file put. Use regular
-			 * completions for those, only batch free for fixed
-			 * file and non-linked commands.
-			 */
-			if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
-			    REQ_F_FIXED_FILE) && !io_is_fallback_req(req) &&
-			    !req->io) {
-				reqs[to_free++] = req;
-				if (to_free == ARRAY_SIZE(reqs))
-					io_free_req_many(ctx, reqs, &to_free);
-			} else {
-				io_free_req(req);
-			}
-		}
+		if (refcount_dec_and_test(&req->refs) &&
+		    !io_req_multi_free(&rb, req))
+			io_free_req(req);
 	}
 
 	io_commit_cqring(ctx);
-	io_free_req_many(ctx, reqs, &to_free);
+	io_free_req_many(ctx, &rb);
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -1496,15 +1860,19 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	unsigned ioprio;
 	int ret;
 
-	if (!req->file)
-		return -EBADF;
-
 	if (S_ISREG(file_inode(req->file)->i_mode))
 		req->flags |= REQ_F_ISREG;
 
 	kiocb->ki_pos = READ_ONCE(sqe->off);
-	kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
+	if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
+		req->flags |= REQ_F_CUR_POS;
+		kiocb->ki_pos = req->file->f_pos;
+	}
 	kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
+	kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
+	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
+	if (unlikely(ret))
+		return ret;
 
 	ioprio = READ_ONCE(sqe->ioprio);
 	if (ioprio) {
@@ -1516,10 +1884,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	} else
 		kiocb->ki_ioprio = get_current_ioprio();
 
-	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
-	if (unlikely(ret))
-		return ret;
-
 	/* don't allow async punt if RWF_NOWAIT was requested */
 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
 	    (req->file->f_flags & O_NONBLOCK))
@@ -1574,6 +1938,10 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
 		       bool in_async)
 {
+	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
+
+	if (req->flags & REQ_F_CUR_POS)
+		req->file->f_pos = kiocb->ki_pos;
 	if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw)
 		*nxt = __io_complete_rw(kiocb, ret);
 	else
@@ -1671,6 +2039,13 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
 	if (req->rw.kiocb.private)
 		return -EINVAL;
 
+	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
+		ssize_t ret;
+		ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
+		*iovec = NULL;
+		return ret;
+	}
+
 	if (req->io) {
 		struct io_async_rw *iorw = &req->io->rw;
 
@@ -1767,6 +2142,8 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
 
 static int io_alloc_async_ctx(struct io_kiocb *req)
 {
+	if (!io_op_defs[req->opcode].async_ctx)
+		return 0;
 	req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
 	return req->io == NULL;
 }
@@ -1786,13 +2163,14 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
 			     struct iovec *iovec, struct iovec *fast_iov,
 			     struct iov_iter *iter)
 {
-	if (req->opcode == IORING_OP_READ_FIXED ||
-	    req->opcode == IORING_OP_WRITE_FIXED)
+	if (!io_op_defs[req->opcode].async_ctx)
 		return 0;
-	if (!req->io && io_alloc_async_ctx(req))
-		return -ENOMEM;
+	if (!req->io) {
+		if (io_alloc_async_ctx(req))
+			return -ENOMEM;
 
-	io_req_map_rw(req, io_size, iovec, fast_iov, iter);
+		io_req_map_rw(req, io_size, iovec, fast_iov, iter);
+	}
 	req->work.func = io_rw_async;
 	return 0;
 }
@@ -2101,6 +2479,431 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
 	return 0;
 }
 
+static void io_fallocate_finish(struct io_wq_work **workptr)
+{
+	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+	struct io_kiocb *nxt = NULL;
+	int ret;
+
+	ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
+				req->sync.len);
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_cqring_add_event(req, ret);
+	io_put_req_find_next(req, &nxt);
+	if (nxt)
+		io_wq_assign_next(workptr, nxt);
+}
+
+static int io_fallocate_prep(struct io_kiocb *req,
+			     const struct io_uring_sqe *sqe)
+{
+	if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
+		return -EINVAL;
+
+	req->sync.off = READ_ONCE(sqe->off);
+	req->sync.len = READ_ONCE(sqe->addr);
+	req->sync.mode = READ_ONCE(sqe->len);
+	return 0;
+}
+
+static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
+			bool force_nonblock)
+{
+	struct io_wq_work *work, *old_work;
+
+	/* fallocate always requiring blocking context */
+	if (force_nonblock) {
+		io_put_req(req);
+		req->work.func = io_fallocate_finish;
+		return -EAGAIN;
+	}
+
+	work = old_work = &req->work;
+	io_fallocate_finish(&work);
+	if (work && work != old_work)
+		*nxt = container_of(work, struct io_kiocb, work);
+
+	return 0;
+}
+
+static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+	const char __user *fname;
+	int ret;
+
+	if (sqe->ioprio || sqe->buf_index)
+		return -EINVAL;
+
+	req->open.dfd = READ_ONCE(sqe->fd);
+	req->open.how.mode = READ_ONCE(sqe->len);
+	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+	req->open.how.flags = READ_ONCE(sqe->open_flags);
+
+	req->open.filename = getname(fname);
+	if (IS_ERR(req->open.filename)) {
+		ret = PTR_ERR(req->open.filename);
+		req->open.filename = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+	struct open_how __user *how;
+	const char __user *fname;
+	size_t len;
+	int ret;
+
+	if (sqe->ioprio || sqe->buf_index)
+		return -EINVAL;
+
+	req->open.dfd = READ_ONCE(sqe->fd);
+	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+	how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+	len = READ_ONCE(sqe->len);
+
+	if (len < OPEN_HOW_SIZE_VER0)
+		return -EINVAL;
+
+	ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
+					len);
+	if (ret)
+		return ret;
+
+	if (!(req->open.how.flags & O_PATH) && force_o_largefile())
+		req->open.how.flags |= O_LARGEFILE;
+
+	req->open.filename = getname(fname);
+	if (IS_ERR(req->open.filename)) {
+		ret = PTR_ERR(req->open.filename);
+		req->open.filename = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
+		      bool force_nonblock)
+{
+	struct open_flags op;
+	struct file *file;
+	int ret;
+
+	if (force_nonblock)
+		return -EAGAIN;
+
+	ret = build_open_flags(&req->open.how, &op);
+	if (ret)
+		goto err;
+
+	ret = get_unused_fd_flags(req->open.how.flags);
+	if (ret < 0)
+		goto err;
+
+	file = do_filp_open(req->open.dfd, req->open.filename, &op);
+	if (IS_ERR(file)) {
+		put_unused_fd(ret);
+		ret = PTR_ERR(file);
+	} else {
+		fsnotify_open(file);
+		fd_install(ret, file);
+	}
+err:
+	putname(req->open.filename);
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_cqring_add_event(req, ret);
+	io_put_req_find_next(req, nxt);
+	return 0;
+}
+
+static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
+		     bool force_nonblock)
+{
+	req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
+	return io_openat2(req, nxt, force_nonblock);
+}
+
+static int io_epoll_ctl_prep(struct io_kiocb *req,
+			     const struct io_uring_sqe *sqe)
+{
+#if defined(CONFIG_EPOLL)
+	if (sqe->ioprio || sqe->buf_index)
+		return -EINVAL;
+
+	req->epoll.epfd = READ_ONCE(sqe->fd);
+	req->epoll.op = READ_ONCE(sqe->len);
+	req->epoll.fd = READ_ONCE(sqe->off);
+
+	if (ep_op_has_event(req->epoll.op)) {
+		struct epoll_event __user *ev;
+
+		ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
+		if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
+			return -EFAULT;
+	}
+
+	return 0;
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+
+static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
+			bool force_nonblock)
+{
+#if defined(CONFIG_EPOLL)
+	struct io_epoll *ie = &req->epoll;
+	int ret;
+
+	ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
+	if (force_nonblock && ret == -EAGAIN)
+		return -EAGAIN;
+
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_cqring_add_event(req, ret);
+	io_put_req_find_next(req, nxt);
+	return 0;
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+
+static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
+	if (sqe->ioprio || sqe->buf_index || sqe->off)
+		return -EINVAL;
+
+	req->madvise.addr = READ_ONCE(sqe->addr);
+	req->madvise.len = READ_ONCE(sqe->len);
+	req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
+	return 0;
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+
+static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
+		      bool force_nonblock)
+{
+#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
+	struct io_madvise *ma = &req->madvise;
+	int ret;
+
+	if (force_nonblock)
+		return -EAGAIN;
+
+	ret = do_madvise(ma->addr, ma->len, ma->advice);
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_cqring_add_event(req, ret);
+	io_put_req_find_next(req, nxt);
+	return 0;
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+
+static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+	if (sqe->ioprio || sqe->buf_index || sqe->addr)
+		return -EINVAL;
+
+	req->fadvise.offset = READ_ONCE(sqe->off);
+	req->fadvise.len = READ_ONCE(sqe->len);
+	req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
+	return 0;
+}
+
+static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
+		      bool force_nonblock)
+{
+	struct io_fadvise *fa = &req->fadvise;
+	int ret;
+
+	if (force_nonblock) {
+		switch (fa->advice) {
+		case POSIX_FADV_NORMAL:
+		case POSIX_FADV_RANDOM:
+		case POSIX_FADV_SEQUENTIAL:
+			break;
+		default:
+			return -EAGAIN;
+		}
+	}
+
+	ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_cqring_add_event(req, ret);
+	io_put_req_find_next(req, nxt);
+	return 0;
+}
+
+static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+	const char __user *fname;
+	unsigned lookup_flags;
+	int ret;
+
+	if (sqe->ioprio || sqe->buf_index)
+		return -EINVAL;
+
+	req->open.dfd = READ_ONCE(sqe->fd);
+	req->open.mask = READ_ONCE(sqe->len);
+	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+	req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+	req->open.how.flags = READ_ONCE(sqe->statx_flags);
+
+	if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags))
+		return -EINVAL;
+
+	req->open.filename = getname_flags(fname, lookup_flags, NULL);
+	if (IS_ERR(req->open.filename)) {
+		ret = PTR_ERR(req->open.filename);
+		req->open.filename = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
+		    bool force_nonblock)
+{
+	struct io_open *ctx = &req->open;
+	unsigned lookup_flags;
+	struct path path;
+	struct kstat stat;
+	int ret;
+
+	if (force_nonblock)
+		return -EAGAIN;
+
+	if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
+		return -EINVAL;
+
+retry:
+	/* filename_lookup() drops it, keep a reference */
+	ctx->filename->refcnt++;
+
+	ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path,
+				NULL);
+	if (ret)
+		goto err;
+
+	ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags);
+	path_put(&path);
+	if (retry_estale(ret, lookup_flags)) {
+		lookup_flags |= LOOKUP_REVAL;
+		goto retry;
+	}
+	if (!ret)
+		ret = cp_statx(&stat, ctx->buffer);
+err:
+	putname(ctx->filename);
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_cqring_add_event(req, ret);
+	io_put_req_find_next(req, nxt);
+	return 0;
+}
+
+static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+	/*
+	 * If we queue this for async, it must not be cancellable. That would
+	 * leave the 'file' in an undeterminate state.
+	 */
+	req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+
+	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
+	    sqe->rw_flags || sqe->buf_index)
+		return -EINVAL;
+	if (sqe->flags & IOSQE_FIXED_FILE)
+		return -EINVAL;
+
+	req->close.fd = READ_ONCE(sqe->fd);
+	if (req->file->f_op == &io_uring_fops ||
+	    req->close.fd == req->ctx->ring_fd)
+		return -EBADF;
+
+	return 0;
+}
+
+static void io_close_finish(struct io_wq_work **workptr)
+{
+	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+	struct io_kiocb *nxt = NULL;
+
+	/* Invoked with files, we need to do the close */
+	if (req->work.files) {
+		int ret;
+
+		ret = filp_close(req->close.put_file, req->work.files);
+		if (ret < 0)
+			req_set_fail_links(req);
+		io_cqring_add_event(req, ret);
+	}
+
+	fput(req->close.put_file);
+
+	io_put_req_find_next(req, &nxt);
+	if (nxt)
+		io_wq_assign_next(workptr, nxt);
+}
+
+static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
+		    bool force_nonblock)
+{
+	int ret;
+
+	req->close.put_file = NULL;
+	ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
+	if (ret < 0)
+		return ret;
+
+	/* if the file has a flush method, be safe and punt to async */
+	if (req->close.put_file->f_op->flush && !io_wq_current_is_worker())
+		goto eagain;
+
+	/*
+	 * No ->flush(), safely close from here and just punt the
+	 * fput() to async context.
+	 */
+	ret = filp_close(req->close.put_file, current->files);
+
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_cqring_add_event(req, ret);
+
+	if (io_wq_current_is_worker()) {
+		struct io_wq_work *old_work, *work;
+
+		old_work = work = &req->work;
+		io_close_finish(&work);
+		if (work && work != old_work)
+			*nxt = container_of(work, struct io_kiocb, work);
+		return 0;
+	}
+
+eagain:
+	req->work.func = io_close_finish;
+	/*
+	 * Do manual async queue here to avoid grabbing files - we don't
+	 * need the files, and it'll cause io_close_finish() to close
+	 * the file again and cause a double CQE entry for this request
+	 */
+	io_queue_async_work(req);
+	return 0;
+}
+
 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	struct io_ring_ctx *ctx = req->ctx;
@@ -2178,8 +2981,9 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
 	sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+	sr->len = READ_ONCE(sqe->len);
 
-	if (!io)
+	if (!io || req->opcode == IORING_OP_SEND)
 		return 0;
 
 	io->msg.iov = io->msg.fast_iov;
@@ -2259,6 +3063,57 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
 #endif
 }
 
+static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
+		   bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+	struct socket *sock;
+	int ret;
+
+	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+		return -EINVAL;
+
+	sock = sock_from_file(req->file, &ret);
+	if (sock) {
+		struct io_sr_msg *sr = &req->sr_msg;
+		struct msghdr msg;
+		struct iovec iov;
+		unsigned flags;
+
+		ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
+						&msg.msg_iter);
+		if (ret)
+			return ret;
+
+		msg.msg_name = NULL;
+		msg.msg_control = NULL;
+		msg.msg_controllen = 0;
+		msg.msg_namelen = 0;
+
+		flags = req->sr_msg.msg_flags;
+		if (flags & MSG_DONTWAIT)
+			req->flags |= REQ_F_NOWAIT;
+		else if (force_nonblock)
+			flags |= MSG_DONTWAIT;
+
+		msg.msg_flags = flags;
+		ret = sock_sendmsg(sock, &msg);
+		if (force_nonblock && ret == -EAGAIN)
+			return -EAGAIN;
+		if (ret == -ERESTARTSYS)
+			ret = -EINTR;
+	}
+
+	io_cqring_add_event(req, ret);
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_put_req_find_next(req, nxt);
+	return 0;
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+
 static int io_recvmsg_prep(struct io_kiocb *req,
 			   const struct io_uring_sqe *sqe)
 {
@@ -2268,8 +3123,9 @@ static int io_recvmsg_prep(struct io_kiocb *req,
 
 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
 	sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+	sr->len = READ_ONCE(sqe->len);
 
-	if (!io)
+	if (!io || req->opcode == IORING_OP_RECV)
 		return 0;
 
 	io->msg.iov = io->msg.fast_iov;
@@ -2351,6 +3207,59 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
 #endif
 }
 
+static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
+		   bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+	struct socket *sock;
+	int ret;
+
+	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+		return -EINVAL;
+
+	sock = sock_from_file(req->file, &ret);
+	if (sock) {
+		struct io_sr_msg *sr = &req->sr_msg;
+		struct msghdr msg;
+		struct iovec iov;
+		unsigned flags;
+
+		ret = import_single_range(READ, sr->buf, sr->len, &iov,
+						&msg.msg_iter);
+		if (ret)
+			return ret;
+
+		msg.msg_name = NULL;
+		msg.msg_control = NULL;
+		msg.msg_controllen = 0;
+		msg.msg_namelen = 0;
+		msg.msg_iocb = NULL;
+		msg.msg_flags = 0;
+
+		flags = req->sr_msg.msg_flags;
+		if (flags & MSG_DONTWAIT)
+			req->flags |= REQ_F_NOWAIT;
+		else if (force_nonblock)
+			flags |= MSG_DONTWAIT;
+
+		ret = sock_recvmsg(sock, &msg, flags);
+		if (force_nonblock && ret == -EAGAIN)
+			return -EAGAIN;
+		if (ret == -ERESTARTSYS)
+			ret = -EINTR;
+	}
+
+	io_cqring_add_event(req, ret);
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_put_req_find_next(req, nxt);
+	return 0;
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+
+
 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
@@ -2414,7 +3323,6 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
 	ret = __io_accept(req, nxt, force_nonblock);
 	if (ret == -EAGAIN && force_nonblock) {
 		req->work.func = io_accept_finish;
-		req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
 		io_put_req(req);
 		return -EAGAIN;
 	}
@@ -2635,6 +3543,47 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
 		io_wq_assign_next(workptr, nxt);
 }
 
+static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
+{
+	struct io_kiocb *req, *tmp;
+	struct req_batch rb;
+
+	rb.to_free = rb.need_iter = 0;
+	spin_lock_irq(&ctx->completion_lock);
+	llist_for_each_entry_safe(req, tmp, nodes, llist_node) {
+		hash_del(&req->hash_node);
+		io_poll_complete(req, req->result, 0);
+
+		if (refcount_dec_and_test(&req->refs) &&
+		    !io_req_multi_free(&rb, req)) {
+			req->flags |= REQ_F_COMP_LOCKED;
+			io_free_req(req);
+		}
+	}
+	spin_unlock_irq(&ctx->completion_lock);
+
+	io_cqring_ev_posted(ctx);
+	io_free_req_many(ctx, &rb);
+}
+
+static void io_poll_flush(struct io_wq_work **workptr)
+{
+	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+	struct llist_node *nodes;
+
+	nodes = llist_del_all(&req->ctx->poll_llist);
+	if (nodes)
+		__io_poll_flush(req->ctx, nodes);
+}
+
+static void io_poll_trigger_evfd(struct io_wq_work **workptr)
+{
+	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+
+	eventfd_signal(req->ctx->cq_ev_fd, 1);
+	io_put_req(req);
+}
+
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 			void *key)
 {
@@ -2642,7 +3591,6 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 	struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
 	struct io_ring_ctx *ctx = req->ctx;
 	__poll_t mask = key_to_poll(key);
-	unsigned long flags;
 
 	/* for instances that support it check for an event match first: */
 	if (mask && !(mask & poll->events))
@@ -2656,17 +3604,39 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 	 * If we have a link timeout we're going to need the completion_lock
 	 * for finalizing the request, mark us as having grabbed that already.
 	 */
-	if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
-		hash_del(&req->hash_node);
-		io_poll_complete(req, mask, 0);
-		req->flags |= REQ_F_COMP_LOCKED;
-		io_put_req(req);
-		spin_unlock_irqrestore(&ctx->completion_lock, flags);
+	if (mask) {
+		unsigned long flags;
 
-		io_cqring_ev_posted(ctx);
-	} else {
-		io_queue_async_work(req);
+		if (llist_empty(&ctx->poll_llist) &&
+		    spin_trylock_irqsave(&ctx->completion_lock, flags)) {
+			bool trigger_ev;
+
+			hash_del(&req->hash_node);
+			io_poll_complete(req, mask, 0);
+
+			trigger_ev = io_should_trigger_evfd(ctx);
+			if (trigger_ev && eventfd_signal_count()) {
+				trigger_ev = false;
+				req->work.func = io_poll_trigger_evfd;
+			} else {
+				req->flags |= REQ_F_COMP_LOCKED;
+				io_put_req(req);
+				req = NULL;
+			}
+			spin_unlock_irqrestore(&ctx->completion_lock, flags);
+			__io_cqring_ev_posted(ctx, trigger_ev);
+		} else {
+			req->result = mask;
+			req->llist_node.next = NULL;
+			/* if the list wasn't empty, we're done */
+			if (!llist_add(&req->llist_node, &ctx->poll_llist))
+				req = NULL;
+			else
+				req->work.func = io_poll_flush;
+		}
 	}
+	if (req)
+		io_queue_async_work(req);
 
 	return 1;
 }
@@ -3071,20 +4041,67 @@ static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
 	return 0;
 }
 
+static int io_files_update_prep(struct io_kiocb *req,
+				const struct io_uring_sqe *sqe)
+{
+	if (sqe->flags || sqe->ioprio || sqe->rw_flags)
+		return -EINVAL;
+
+	req->files_update.offset = READ_ONCE(sqe->off);
+	req->files_update.nr_args = READ_ONCE(sqe->len);
+	if (!req->files_update.nr_args)
+		return -EINVAL;
+	req->files_update.arg = READ_ONCE(sqe->addr);
+	return 0;
+}
+
+static int io_files_update(struct io_kiocb *req, bool force_nonblock)
+{
+	struct io_ring_ctx *ctx = req->ctx;
+	struct io_uring_files_update up;
+	int ret;
+
+	if (force_nonblock)
+		return -EAGAIN;
+
+	up.offset = req->files_update.offset;
+	up.fds = req->files_update.arg;
+
+	mutex_lock(&ctx->uring_lock);
+	ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
+	mutex_unlock(&ctx->uring_lock);
+
+	if (ret < 0)
+		req_set_fail_links(req);
+	io_cqring_add_event(req, ret);
+	io_put_req(req);
+	return 0;
+}
+
 static int io_req_defer_prep(struct io_kiocb *req,
 			     const struct io_uring_sqe *sqe)
 {
 	ssize_t ret = 0;
 
+	if (io_op_defs[req->opcode].file_table) {
+		ret = io_grab_files(req);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	io_req_work_grab_env(req, &io_op_defs[req->opcode]);
+
 	switch (req->opcode) {
 	case IORING_OP_NOP:
 		break;
 	case IORING_OP_READV:
 	case IORING_OP_READ_FIXED:
+	case IORING_OP_READ:
 		ret = io_read_prep(req, sqe, true);
 		break;
 	case IORING_OP_WRITEV:
 	case IORING_OP_WRITE_FIXED:
+	case IORING_OP_WRITE:
 		ret = io_write_prep(req, sqe, true);
 		break;
 	case IORING_OP_POLL_ADD:
@@ -3100,9 +4117,11 @@ static int io_req_defer_prep(struct io_kiocb *req,
 		ret = io_prep_sfr(req, sqe);
 		break;
 	case IORING_OP_SENDMSG:
+	case IORING_OP_SEND:
 		ret = io_sendmsg_prep(req, sqe);
 		break;
 	case IORING_OP_RECVMSG:
+	case IORING_OP_RECV:
 		ret = io_recvmsg_prep(req, sqe);
 		break;
 	case IORING_OP_CONNECT:
@@ -3123,6 +4142,33 @@ static int io_req_defer_prep(struct io_kiocb *req,
 	case IORING_OP_ACCEPT:
 		ret = io_accept_prep(req, sqe);
 		break;
+	case IORING_OP_FALLOCATE:
+		ret = io_fallocate_prep(req, sqe);
+		break;
+	case IORING_OP_OPENAT:
+		ret = io_openat_prep(req, sqe);
+		break;
+	case IORING_OP_CLOSE:
+		ret = io_close_prep(req, sqe);
+		break;
+	case IORING_OP_FILES_UPDATE:
+		ret = io_files_update_prep(req, sqe);
+		break;
+	case IORING_OP_STATX:
+		ret = io_statx_prep(req, sqe);
+		break;
+	case IORING_OP_FADVISE:
+		ret = io_fadvise_prep(req, sqe);
+		break;
+	case IORING_OP_MADVISE:
+		ret = io_madvise_prep(req, sqe);
+		break;
+	case IORING_OP_OPENAT2:
+		ret = io_openat2_prep(req, sqe);
+		break;
+	case IORING_OP_EPOLL_CTL:
+		ret = io_epoll_ctl_prep(req, sqe);
+		break;
 	default:
 		printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
 				req->opcode);
@@ -3173,6 +4219,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 		break;
 	case IORING_OP_READV:
 	case IORING_OP_READ_FIXED:
+	case IORING_OP_READ:
 		if (sqe) {
 			ret = io_read_prep(req, sqe, force_nonblock);
 			if (ret < 0)
@@ -3182,6 +4229,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 		break;
 	case IORING_OP_WRITEV:
 	case IORING_OP_WRITE_FIXED:
+	case IORING_OP_WRITE:
 		if (sqe) {
 			ret = io_write_prep(req, sqe, force_nonblock);
 			if (ret < 0)
@@ -3222,20 +4270,28 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 		ret = io_sync_file_range(req, nxt, force_nonblock);
 		break;
 	case IORING_OP_SENDMSG:
+	case IORING_OP_SEND:
 		if (sqe) {
 			ret = io_sendmsg_prep(req, sqe);
 			if (ret < 0)
 				break;
 		}
-		ret = io_sendmsg(req, nxt, force_nonblock);
+		if (req->opcode == IORING_OP_SENDMSG)
+			ret = io_sendmsg(req, nxt, force_nonblock);
+		else
+			ret = io_send(req, nxt, force_nonblock);
 		break;
 	case IORING_OP_RECVMSG:
+	case IORING_OP_RECV:
 		if (sqe) {
 			ret = io_recvmsg_prep(req, sqe);
 			if (ret)
 				break;
 		}
-		ret = io_recvmsg(req, nxt, force_nonblock);
+		if (req->opcode == IORING_OP_RECVMSG)
+			ret = io_recvmsg(req, nxt, force_nonblock);
+		else
+			ret = io_recv(req, nxt, force_nonblock);
 		break;
 	case IORING_OP_TIMEOUT:
 		if (sqe) {
@@ -3277,6 +4333,78 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 		}
 		ret = io_async_cancel(req, nxt);
 		break;
+	case IORING_OP_FALLOCATE:
+		if (sqe) {
+			ret = io_fallocate_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_fallocate(req, nxt, force_nonblock);
+		break;
+	case IORING_OP_OPENAT:
+		if (sqe) {
+			ret = io_openat_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_openat(req, nxt, force_nonblock);
+		break;
+	case IORING_OP_CLOSE:
+		if (sqe) {
+			ret = io_close_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_close(req, nxt, force_nonblock);
+		break;
+	case IORING_OP_FILES_UPDATE:
+		if (sqe) {
+			ret = io_files_update_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_files_update(req, force_nonblock);
+		break;
+	case IORING_OP_STATX:
+		if (sqe) {
+			ret = io_statx_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_statx(req, nxt, force_nonblock);
+		break;
+	case IORING_OP_FADVISE:
+		if (sqe) {
+			ret = io_fadvise_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_fadvise(req, nxt, force_nonblock);
+		break;
+	case IORING_OP_MADVISE:
+		if (sqe) {
+			ret = io_madvise_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_madvise(req, nxt, force_nonblock);
+		break;
+	case IORING_OP_OPENAT2:
+		if (sqe) {
+			ret = io_openat2_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_openat2(req, nxt, force_nonblock);
+		break;
+	case IORING_OP_EPOLL_CTL:
+		if (sqe) {
+			ret = io_epoll_ctl_prep(req, sqe);
+			if (ret)
+				break;
+		}
+		ret = io_epoll_ctl(req, nxt, force_nonblock);
+		break;
 	default:
 		ret = -EINVAL;
 		break;
@@ -3311,8 +4439,11 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
 	struct io_kiocb *nxt = NULL;
 	int ret = 0;
 
-	if (work->flags & IO_WQ_WORK_CANCEL)
+	/* if NO_CANCEL is set, we must still run the work */
+	if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
+				IO_WQ_WORK_CANCEL) {
 		ret = -ECANCELED;
+	}
 
 	if (!ret) {
 		req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
@@ -3344,26 +4475,13 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
 		io_wq_assign_next(workptr, nxt);
 }
 
-static bool io_req_op_valid(int op)
+static int io_req_needs_file(struct io_kiocb *req, int fd)
 {
-	return op >= IORING_OP_NOP && op < IORING_OP_LAST;
-}
-
-static int io_req_needs_file(struct io_kiocb *req)
-{
-	switch (req->opcode) {
-	case IORING_OP_NOP:
-	case IORING_OP_POLL_REMOVE:
-	case IORING_OP_TIMEOUT:
-	case IORING_OP_TIMEOUT_REMOVE:
-	case IORING_OP_ASYNC_CANCEL:
-	case IORING_OP_LINK_TIMEOUT:
+	if (!io_op_defs[req->opcode].needs_file)
 		return 0;
-	default:
-		if (io_req_op_valid(req->opcode))
-			return 1;
-		return -EINVAL;
-	}
+	if (fd == -1 && io_op_defs[req->opcode].fd_non_neg)
+		return 0;
+	return 1;
 }
 
 static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
@@ -3371,8 +4489,8 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
 {
 	struct fixed_file_table *table;
 
-	table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
-	return table->files[index & IORING_FILE_TABLE_MASK];
+	table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
+	return table->files[index & IORING_FILE_TABLE_MASK];;
 }
 
 static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
@@ -3380,20 +4498,16 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
 {
 	struct io_ring_ctx *ctx = req->ctx;
 	unsigned flags;
-	int fd, ret;
+	int fd;
 
 	flags = READ_ONCE(sqe->flags);
 	fd = READ_ONCE(sqe->fd);
 
-	if (flags & IOSQE_IO_DRAIN)
-		req->flags |= REQ_F_IO_DRAIN;
-
-	ret = io_req_needs_file(req);
-	if (ret <= 0)
-		return ret;
+	if (!io_req_needs_file(req, fd))
+		return 0;
 
 	if (flags & IOSQE_FIXED_FILE) {
-		if (unlikely(!ctx->file_table ||
+		if (unlikely(!ctx->file_data ||
 		    (unsigned) fd >= ctx->nr_user_files))
 			return -EBADF;
 		fd = array_index_nospec(fd, ctx->nr_user_files);
@@ -3401,6 +4515,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
 		if (!req->file)
 			return -EBADF;
 		req->flags |= REQ_F_FIXED_FILE;
+		percpu_ref_get(&ctx->file_data->refs);
 	} else {
 		if (req->needs_fixed_file)
 			return -EBADF;
@@ -3418,6 +4533,11 @@ static int io_grab_files(struct io_kiocb *req)
 	int ret = -EBADF;
 	struct io_ring_ctx *ctx = req->ctx;
 
+	if (req->work.files)
+		return 0;
+	if (!ctx->ring_file)
+		return -EBADF;
+
 	rcu_read_lock();
 	spin_lock_irq(&ctx->inflight_lock);
 	/*
@@ -3426,7 +4546,7 @@ static int io_grab_files(struct io_kiocb *req)
 	 * the fd has changed since we started down this path, and disallow
 	 * this operation if it has.
 	 */
-	if (fcheck(req->ring_fd) == req->ring_file) {
+	if (fcheck(ctx->ring_fd) == ctx->ring_file) {
 		list_add(&req->inflight_entry, &ctx->inflight_list);
 		req->flags |= REQ_F_INFLIGHT;
 		req->work.files = current->files;
@@ -3532,7 +4652,8 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	 */
 	if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
 	    (req->flags & REQ_F_MUST_PUNT))) {
-		if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
+punt:
+		if (io_op_defs[req->opcode].file_table) {
 			ret = io_grab_files(req);
 			if (ret)
 				goto err;
@@ -3567,6 +4688,9 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	if (nxt) {
 		req = nxt;
 		nxt = NULL;
+
+		if (req->flags & REQ_F_FORCE_ASYNC)
+			goto punt;
 		goto again;
 	}
 }
@@ -3575,21 +4699,27 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	int ret;
 
-	if (unlikely(req->ctx->drain_next)) {
-		req->flags |= REQ_F_IO_DRAIN;
-		req->ctx->drain_next = false;
-	}
-	req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
-
 	ret = io_req_defer(req, sqe);
 	if (ret) {
 		if (ret != -EIOCBQUEUED) {
+fail_req:
 			io_cqring_add_event(req, ret);
 			req_set_fail_links(req);
 			io_double_put_req(req);
 		}
-	} else
+	} else if (req->flags & REQ_F_FORCE_ASYNC) {
+		ret = io_req_defer_prep(req, sqe);
+		if (unlikely(ret < 0))
+			goto fail_req;
+		/*
+		 * Never try inline submit of IOSQE_ASYNC is set, go straight
+		 * to async execution.
+		 */
+		req->work.flags |= IO_WQ_WORK_CONCURRENT;
+		io_queue_async_work(req);
+	} else {
 		__io_queue_sqe(req, sqe);
+	}
 }
 
 static inline void io_queue_link_head(struct io_kiocb *req)
@@ -3602,25 +4732,47 @@ static inline void io_queue_link_head(struct io_kiocb *req)
 }
 
 #define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK|	\
-				IOSQE_IO_HARDLINK)
+				IOSQE_IO_HARDLINK | IOSQE_ASYNC)
 
 static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 			  struct io_submit_state *state, struct io_kiocb **link)
 {
+	const struct cred *old_creds = NULL;
 	struct io_ring_ctx *ctx = req->ctx;
-	int ret;
+	unsigned int sqe_flags;
+	int ret, id;
+
+	sqe_flags = READ_ONCE(sqe->flags);
 
 	/* enforce forwards compatibility on users */
-	if (unlikely(sqe->flags & ~SQE_VALID_FLAGS)) {
+	if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
 		ret = -EINVAL;
 		goto err_req;
 	}
 
+	id = READ_ONCE(sqe->personality);
+	if (id) {
+		const struct cred *personality_creds;
+
+		personality_creds = idr_find(&ctx->personality_idr, id);
+		if (unlikely(!personality_creds)) {
+			ret = -EINVAL;
+			goto err_req;
+		}
+		old_creds = override_creds(personality_creds);
+	}
+
+	/* same numerical values with corresponding REQ_F_*, safe to copy */
+	req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
+					IOSQE_ASYNC);
+
 	ret = io_req_set_file(state, req, sqe);
 	if (unlikely(ret)) {
 err_req:
 		io_cqring_add_event(req, ret);
 		io_double_put_req(req);
+		if (old_creds)
+			revert_creds(old_creds);
 		return false;
 	}
 
@@ -3632,14 +4784,19 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	 * conditions are true (normal request), then just queue it.
 	 */
 	if (*link) {
-		struct io_kiocb *prev = *link;
+		struct io_kiocb *head = *link;
 
-		if (sqe->flags & IOSQE_IO_DRAIN)
-			(*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
-
-		if (sqe->flags & IOSQE_IO_HARDLINK)
-			req->flags |= REQ_F_HARDLINK;
-
+		/*
+		 * Taking sequential execution of a link, draining both sides
+		 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
+		 * requests in the link. So, it drains the head and the
+		 * next after the link request. The last one is done via
+		 * drain_next flag to persist the effect across calls.
+		 */
+		if (sqe_flags & IOSQE_IO_DRAIN) {
+			head->flags |= REQ_F_IO_DRAIN;
+			ctx->drain_next = 1;
+		}
 		if (io_alloc_async_ctx(req)) {
 			ret = -EAGAIN;
 			goto err_req;
@@ -3648,25 +4805,36 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 		ret = io_req_defer_prep(req, sqe);
 		if (ret) {
 			/* fail even hard links since we don't submit */
-			prev->flags |= REQ_F_FAIL_LINK;
+			head->flags |= REQ_F_FAIL_LINK;
 			goto err_req;
 		}
-		trace_io_uring_link(ctx, req, prev);
-		list_add_tail(&req->link_list, &prev->link_list);
-	} else if (sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
-		req->flags |= REQ_F_LINK;
-		if (sqe->flags & IOSQE_IO_HARDLINK)
-			req->flags |= REQ_F_HARDLINK;
+		trace_io_uring_link(ctx, req, head);
+		list_add_tail(&req->link_list, &head->link_list);
 
-		INIT_LIST_HEAD(&req->link_list);
-		ret = io_req_defer_prep(req, sqe);
-		if (ret)
-			req->flags |= REQ_F_FAIL_LINK;
-		*link = req;
+		/* last request of a link, enqueue the link */
+		if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
+			io_queue_link_head(head);
+			*link = NULL;
+		}
 	} else {
-		io_queue_sqe(req, sqe);
+		if (unlikely(ctx->drain_next)) {
+			req->flags |= REQ_F_IO_DRAIN;
+			req->ctx->drain_next = 0;
+		}
+		if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
+			req->flags |= REQ_F_LINK;
+			INIT_LIST_HEAD(&req->link_list);
+			ret = io_req_defer_prep(req, sqe);
+			if (ret)
+				req->flags |= REQ_F_FAIL_LINK;
+			*link = req;
+		} else {
+			io_queue_sqe(req, sqe);
+		}
 	}
 
+	if (old_creds)
+		revert_creds(old_creds);
 	return true;
 }
 
@@ -3678,8 +4846,7 @@ static void io_submit_state_end(struct io_submit_state *state)
 	blk_finish_plug(&state->plug);
 	io_file_put(state);
 	if (state->free_reqs)
-		kmem_cache_free_bulk(req_cachep, state->free_reqs,
-					&state->reqs[state->cur_req]);
+		kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
 }
 
 /*
@@ -3698,14 +4865,12 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
 {
 	struct io_rings *rings = ctx->rings;
 
-	if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
-		/*
-		 * Ensure any loads from the SQEs are done at this point,
-		 * since once we write the new head, the application could
-		 * write new data to them.
-		 */
-		smp_store_release(&rings->sq.head, ctx->cached_sq_head);
-	}
+	/*
+	 * Ensure any loads from the SQEs are done at this point,
+	 * since once we write the new head, the application could
+	 * write new data to them.
+	 */
+	smp_store_release(&rings->sq.head, ctx->cached_sq_head);
 }
 
 /*
@@ -3719,7 +4884,6 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
 static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
 			  const struct io_uring_sqe **sqe_ptr)
 {
-	struct io_rings *rings = ctx->rings;
 	u32 *sq_array = ctx->sq_array;
 	unsigned head;
 
@@ -3731,12 +4895,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
 	 * 2) allows the kernel side to track the head on its own, even
 	 *    though the application is the one updating it.
 	 */
-	head = ctx->cached_sq_head;
-	/* make sure SQ entry isn't read before tail */
-	if (unlikely(head == smp_load_acquire(&rings->sq.tail)))
-		return false;
-
-	head = READ_ONCE(sq_array[head & ctx->sq_mask]);
+	head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
 	if (likely(head < ctx->sq_entries)) {
 		/*
 		 * All io need record the previous position, if LINK vs DARIN,
@@ -3754,7 +4913,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
 	/* drop invalid entries */
 	ctx->cached_sq_head++;
 	ctx->cached_sq_dropped++;
-	WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
+	WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
 	return false;
 }
 
@@ -3768,19 +4927,29 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	bool mm_fault = false;
 
 	/* if we have a backlog and couldn't flush it all, return BUSY */
-	if (!list_empty(&ctx->cq_overflow_list) &&
-	    !io_cqring_overflow_flush(ctx, false))
-		return -EBUSY;
+	if (test_bit(0, &ctx->sq_check_overflow)) {
+		if (!list_empty(&ctx->cq_overflow_list) &&
+		    !io_cqring_overflow_flush(ctx, false))
+			return -EBUSY;
+	}
+
+	/* make sure SQ entry isn't read before tail */
+	nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
+
+	if (!percpu_ref_tryget_many(&ctx->refs, nr))
+		return -EAGAIN;
 
 	if (nr > IO_PLUG_THRESHOLD) {
 		io_submit_state_start(&state, nr);
 		statep = &state;
 	}
 
+	ctx->ring_fd = ring_fd;
+	ctx->ring_file = ring_file;
+
 	for (i = 0; i < nr; i++) {
 		const struct io_uring_sqe *sqe;
 		struct io_kiocb *req;
-		unsigned int sqe_flags;
 
 		req = io_get_req(ctx, statep);
 		if (unlikely(!req)) {
@@ -3789,11 +4958,20 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 			break;
 		}
 		if (!io_get_sqring(ctx, req, &sqe)) {
-			__io_free_req(req);
+			__io_req_do_free(req);
 			break;
 		}
 
-		if (io_req_needs_user(req) && !*mm) {
+		/* will complete beyond this point, count as submitted */
+		submitted++;
+
+		if (unlikely(req->opcode >= IORING_OP_LAST)) {
+			io_cqring_add_event(req, -EINVAL);
+			io_double_put_req(req);
+			break;
+		}
+
+		if (io_op_defs[req->opcode].needs_mm && !*mm) {
 			mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
 			if (!mm_fault) {
 				use_mm(ctx->sqo_mm);
@@ -3801,27 +4979,20 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 			}
 		}
 
-		submitted++;
-		sqe_flags = sqe->flags;
-
-		req->ring_file = ring_file;
-		req->ring_fd = ring_fd;
 		req->has_user = *mm != NULL;
 		req->in_async = async;
 		req->needs_fixed_file = async;
-		trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
+		trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
+						true, async);
 		if (!io_submit_sqe(req, sqe, statep, &link))
 			break;
-		/*
-		 * If previous wasn't linked and we have a linked command,
-		 * that's the end of the chain. Submit the previous link.
-		 */
-		if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) && link) {
-			io_queue_link_head(link);
-			link = NULL;
-		}
 	}
 
+	if (unlikely(submitted != nr)) {
+		int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
+
+		percpu_ref_put_many(&ctx->refs, nr - ref_used);
+	}
 	if (link)
 		io_queue_link_head(link);
 	if (statep)
@@ -3900,7 +5071,8 @@ static int io_sq_thread(void *data)
 			 * reap events and wake us up.
 			 */
 			if (inflight ||
-			    (!time_after(jiffies, timeout) && ret != -EBUSY)) {
+			    (!time_after(jiffies, timeout) && ret != -EBUSY &&
+			    !percpu_ref_is_dying(&ctx->refs))) {
 				cond_resched();
 				continue;
 			}
@@ -3944,7 +5116,6 @@ static int io_sq_thread(void *data)
 			ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
 		}
 
-		to_submit = min(to_submit, ctx->sq_entries);
 		mutex_lock(&ctx->uring_lock);
 		ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
 		mutex_unlock(&ctx->uring_lock);
@@ -4075,19 +5246,35 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
 #endif
 }
 
+static void io_file_ref_kill(struct percpu_ref *ref)
+{
+	struct fixed_file_data *data;
+
+	data = container_of(ref, struct fixed_file_data, refs);
+	complete(&data->done);
+}
+
 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
 {
+	struct fixed_file_data *data = ctx->file_data;
 	unsigned nr_tables, i;
 
-	if (!ctx->file_table)
+	if (!data)
 		return -ENXIO;
 
+	percpu_ref_kill_and_confirm(&data->refs, io_file_ref_kill);
+	flush_work(&data->ref_work);
+	wait_for_completion(&data->done);
+	io_ring_file_ref_flush(data);
+	percpu_ref_exit(&data->refs);
+
 	__io_sqe_files_unregister(ctx);
 	nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
 	for (i = 0; i < nr_tables; i++)
-		kfree(ctx->file_table[i].files);
-	kfree(ctx->file_table);
-	ctx->file_table = NULL;
+		kfree(data->table[i].files);
+	kfree(data->table);
+	kfree(data);
+	ctx->file_data = NULL;
 	ctx->nr_user_files = 0;
 	return 0;
 }
@@ -4118,16 +5305,6 @@ static void io_finish_async(struct io_ring_ctx *ctx)
 }
 
 #if defined(CONFIG_UNIX)
-static void io_destruct_skb(struct sk_buff *skb)
-{
-	struct io_ring_ctx *ctx = skb->sk->sk_user_data;
-
-	if (ctx->io_wq)
-		io_wq_flush(ctx->io_wq);
-
-	unix_destruct_scm(skb);
-}
-
 /*
  * Ensure the UNIX gc is aware of our file set, so we are certain that
  * the io_uring can be safely unregistered on process exit, even if we have
@@ -4175,7 +5352,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
 		fpl->max = SCM_MAX_FD;
 		fpl->count = nr_files;
 		UNIXCB(skb).fp = fpl;
-		skb->destructor = io_destruct_skb;
+		skb->destructor = unix_destruct_scm;
 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
 		skb_queue_head(&sk->sk_receive_queue, skb);
 
@@ -4237,7 +5414,7 @@ static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
 	int i;
 
 	for (i = 0; i < nr_tables; i++) {
-		struct fixed_file_table *table = &ctx->file_table[i];
+		struct fixed_file_table *table = &ctx->file_data->table[i];
 		unsigned this_files;
 
 		this_files = min(nr_files, IORING_MAX_FILES_TABLE);
@@ -4252,101 +5429,15 @@ static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
 		return 0;
 
 	for (i = 0; i < nr_tables; i++) {
-		struct fixed_file_table *table = &ctx->file_table[i];
+		struct fixed_file_table *table = &ctx->file_data->table[i];
 		kfree(table->files);
 	}
 	return 1;
 }
 
-static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
-				 unsigned nr_args)
-{
-	__s32 __user *fds = (__s32 __user *) arg;
-	unsigned nr_tables;
-	int fd, ret = 0;
-	unsigned i;
-
-	if (ctx->file_table)
-		return -EBUSY;
-	if (!nr_args)
-		return -EINVAL;
-	if (nr_args > IORING_MAX_FIXED_FILES)
-		return -EMFILE;
-
-	nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
-	ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
-					GFP_KERNEL);
-	if (!ctx->file_table)
-		return -ENOMEM;
-
-	if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
-		kfree(ctx->file_table);
-		ctx->file_table = NULL;
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
-		struct fixed_file_table *table;
-		unsigned index;
-
-		ret = -EFAULT;
-		if (copy_from_user(&fd, &fds[i], sizeof(fd)))
-			break;
-		/* allow sparse sets */
-		if (fd == -1) {
-			ret = 0;
-			continue;
-		}
-
-		table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
-		index = i & IORING_FILE_TABLE_MASK;
-		table->files[index] = fget(fd);
-
-		ret = -EBADF;
-		if (!table->files[index])
-			break;
-		/*
-		 * Don't allow io_uring instances to be registered. If UNIX
-		 * isn't enabled, then this causes a reference cycle and this
-		 * instance can never get freed. If UNIX is enabled we'll
-		 * handle it just fine, but there's still no point in allowing
-		 * a ring fd as it doesn't support regular read/write anyway.
-		 */
-		if (table->files[index]->f_op == &io_uring_fops) {
-			fput(table->files[index]);
-			break;
-		}
-		ret = 0;
-	}
-
-	if (ret) {
-		for (i = 0; i < ctx->nr_user_files; i++) {
-			struct file *file;
-
-			file = io_file_from_index(ctx, i);
-			if (file)
-				fput(file);
-		}
-		for (i = 0; i < nr_tables; i++)
-			kfree(ctx->file_table[i].files);
-
-		kfree(ctx->file_table);
-		ctx->file_table = NULL;
-		ctx->nr_user_files = 0;
-		return ret;
-	}
-
-	ret = io_sqe_files_scm(ctx);
-	if (ret)
-		io_sqe_files_unregister(ctx);
-
-	return ret;
-}
-
-static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
+static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
 {
 #if defined(CONFIG_UNIX)
-	struct file *file = io_file_from_index(ctx, index);
 	struct sock *sock = ctx->ring_sock->sk;
 	struct sk_buff_head list, *head = &sock->sk_receive_queue;
 	struct sk_buff *skb;
@@ -4402,10 +5493,167 @@ static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
 		spin_unlock_irq(&head->lock);
 	}
 #else
-	fput(io_file_from_index(ctx, index));
+	fput(file);
 #endif
 }
 
+struct io_file_put {
+	struct llist_node llist;
+	struct file *file;
+	struct completion *done;
+};
+
+static void io_ring_file_ref_flush(struct fixed_file_data *data)
+{
+	struct io_file_put *pfile, *tmp;
+	struct llist_node *node;
+
+	while ((node = llist_del_all(&data->put_llist)) != NULL) {
+		llist_for_each_entry_safe(pfile, tmp, node, llist) {
+			io_ring_file_put(data->ctx, pfile->file);
+			if (pfile->done)
+				complete(pfile->done);
+			else
+				kfree(pfile);
+		}
+	}
+}
+
+static void io_ring_file_ref_switch(struct work_struct *work)
+{
+	struct fixed_file_data *data;
+
+	data = container_of(work, struct fixed_file_data, ref_work);
+	io_ring_file_ref_flush(data);
+	percpu_ref_get(&data->refs);
+	percpu_ref_switch_to_percpu(&data->refs);
+}
+
+static void io_file_data_ref_zero(struct percpu_ref *ref)
+{
+	struct fixed_file_data *data;
+
+	data = container_of(ref, struct fixed_file_data, refs);
+
+	/*
+	 * We can't safely switch from inside this context, punt to wq. If
+	 * the table ref is going away, the table is being unregistered.
+	 * Don't queue up the async work for that case, the caller will
+	 * handle it.
+	 */
+	if (!percpu_ref_is_dying(&data->refs))
+		queue_work(system_wq, &data->ref_work);
+}
+
+static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+				 unsigned nr_args)
+{
+	__s32 __user *fds = (__s32 __user *) arg;
+	unsigned nr_tables;
+	struct file *file;
+	int fd, ret = 0;
+	unsigned i;
+
+	if (ctx->file_data)
+		return -EBUSY;
+	if (!nr_args)
+		return -EINVAL;
+	if (nr_args > IORING_MAX_FIXED_FILES)
+		return -EMFILE;
+
+	ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
+	if (!ctx->file_data)
+		return -ENOMEM;
+	ctx->file_data->ctx = ctx;
+	init_completion(&ctx->file_data->done);
+
+	nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
+	ctx->file_data->table = kcalloc(nr_tables,
+					sizeof(struct fixed_file_table),
+					GFP_KERNEL);
+	if (!ctx->file_data->table) {
+		kfree(ctx->file_data);
+		ctx->file_data = NULL;
+		return -ENOMEM;
+	}
+
+	if (percpu_ref_init(&ctx->file_data->refs, io_file_data_ref_zero,
+				PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
+		kfree(ctx->file_data->table);
+		kfree(ctx->file_data);
+		ctx->file_data = NULL;
+		return -ENOMEM;
+	}
+	ctx->file_data->put_llist.first = NULL;
+	INIT_WORK(&ctx->file_data->ref_work, io_ring_file_ref_switch);
+
+	if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
+		percpu_ref_exit(&ctx->file_data->refs);
+		kfree(ctx->file_data->table);
+		kfree(ctx->file_data);
+		ctx->file_data = NULL;
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
+		struct fixed_file_table *table;
+		unsigned index;
+
+		ret = -EFAULT;
+		if (copy_from_user(&fd, &fds[i], sizeof(fd)))
+			break;
+		/* allow sparse sets */
+		if (fd == -1) {
+			ret = 0;
+			continue;
+		}
+
+		table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
+		index = i & IORING_FILE_TABLE_MASK;
+		file = fget(fd);
+
+		ret = -EBADF;
+		if (!file)
+			break;
+
+		/*
+		 * Don't allow io_uring instances to be registered. If UNIX
+		 * isn't enabled, then this causes a reference cycle and this
+		 * instance can never get freed. If UNIX is enabled we'll
+		 * handle it just fine, but there's still no point in allowing
+		 * a ring fd as it doesn't support regular read/write anyway.
+		 */
+		if (file->f_op == &io_uring_fops) {
+			fput(file);
+			break;
+		}
+		ret = 0;
+		table->files[index] = file;
+	}
+
+	if (ret) {
+		for (i = 0; i < ctx->nr_user_files; i++) {
+			file = io_file_from_index(ctx, i);
+			if (file)
+				fput(file);
+		}
+		for (i = 0; i < nr_tables; i++)
+			kfree(ctx->file_data->table[i].files);
+
+		kfree(ctx->file_data->table);
+		kfree(ctx->file_data);
+		ctx->file_data = NULL;
+		ctx->nr_user_files = 0;
+		return ret;
+	}
+
+	ret = io_sqe_files_scm(ctx);
+	if (ret)
+		io_sqe_files_unregister(ctx);
+
+	return ret;
+}
+
 static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 				int index)
 {
@@ -4449,29 +5697,65 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 #endif
 }
 
-static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
-			       unsigned nr_args)
+static void io_atomic_switch(struct percpu_ref *ref)
 {
-	struct io_uring_files_update up;
+	struct fixed_file_data *data;
+
+	data = container_of(ref, struct fixed_file_data, refs);
+	clear_bit(FFD_F_ATOMIC, &data->state);
+}
+
+static bool io_queue_file_removal(struct fixed_file_data *data,
+				  struct file *file)
+{
+	struct io_file_put *pfile, pfile_stack;
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	/*
+	 * If we fail allocating the struct we need for doing async reomval
+	 * of this file, just punt to sync and wait for it.
+	 */
+	pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
+	if (!pfile) {
+		pfile = &pfile_stack;
+		pfile->done = &done;
+	}
+
+	pfile->file = file;
+	llist_add(&pfile->llist, &data->put_llist);
+
+	if (pfile == &pfile_stack) {
+		if (!test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
+			percpu_ref_put(&data->refs);
+			percpu_ref_switch_to_atomic(&data->refs,
+							io_atomic_switch);
+		}
+		wait_for_completion(&done);
+		flush_work(&data->ref_work);
+		return false;
+	}
+
+	return true;
+}
+
+static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+				 struct io_uring_files_update *up,
+				 unsigned nr_args)
+{
+	struct fixed_file_data *data = ctx->file_data;
+	bool ref_switch = false;
+	struct file *file;
 	__s32 __user *fds;
 	int fd, i, err;
 	__u32 done;
 
-	if (!ctx->file_table)
-		return -ENXIO;
-	if (!nr_args)
-		return -EINVAL;
-	if (copy_from_user(&up, arg, sizeof(up)))
-		return -EFAULT;
-	if (up.resv)
-		return -EINVAL;
-	if (check_add_overflow(up.offset, nr_args, &done))
+	if (check_add_overflow(up->offset, nr_args, &done))
 		return -EOVERFLOW;
 	if (done > ctx->nr_user_files)
 		return -EINVAL;
 
 	done = 0;
-	fds = u64_to_user_ptr(up.fds);
+	fds = u64_to_user_ptr(up->fds);
 	while (nr_args) {
 		struct fixed_file_table *table;
 		unsigned index;
@@ -4481,16 +5765,16 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
 			err = -EFAULT;
 			break;
 		}
-		i = array_index_nospec(up.offset, ctx->nr_user_files);
-		table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+		i = array_index_nospec(up->offset, ctx->nr_user_files);
+		table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
 		index = i & IORING_FILE_TABLE_MASK;
 		if (table->files[index]) {
-			io_sqe_file_unregister(ctx, i);
+			file = io_file_from_index(ctx, index);
 			table->files[index] = NULL;
+			if (io_queue_file_removal(data, file))
+				ref_switch = true;
 		}
 		if (fd != -1) {
-			struct file *file;
-
 			file = fget(fd);
 			if (!file) {
 				err = -EBADF;
@@ -4516,11 +5800,32 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
 		}
 		nr_args--;
 		done++;
-		up.offset++;
+		up->offset++;
+	}
+
+	if (ref_switch && !test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
+		percpu_ref_put(&data->refs);
+		percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
 	}
 
 	return done ? done : err;
 }
+static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
+			       unsigned nr_args)
+{
+	struct io_uring_files_update up;
+
+	if (!ctx->file_data)
+		return -ENXIO;
+	if (!nr_args)
+		return -EINVAL;
+	if (copy_from_user(&up, arg, sizeof(up)))
+		return -EFAULT;
+	if (up.resv)
+		return -EINVAL;
+
+	return __io_sqe_files_update(ctx, &up, nr_args);
+}
 
 static void io_put_work(struct io_wq_work *work)
 {
@@ -4536,11 +5841,56 @@ static void io_get_work(struct io_wq_work *work)
 	refcount_inc(&req->refs);
 }
 
+static int io_init_wq_offload(struct io_ring_ctx *ctx,
+			      struct io_uring_params *p)
+{
+	struct io_wq_data data;
+	struct fd f;
+	struct io_ring_ctx *ctx_attach;
+	unsigned int concurrency;
+	int ret = 0;
+
+	data.user = ctx->user;
+	data.get_work = io_get_work;
+	data.put_work = io_put_work;
+
+	if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
+		/* Do QD, or 4 * CPUS, whatever is smallest */
+		concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
+
+		ctx->io_wq = io_wq_create(concurrency, &data);
+		if (IS_ERR(ctx->io_wq)) {
+			ret = PTR_ERR(ctx->io_wq);
+			ctx->io_wq = NULL;
+		}
+		return ret;
+	}
+
+	f = fdget(p->wq_fd);
+	if (!f.file)
+		return -EBADF;
+
+	if (f.file->f_op != &io_uring_fops) {
+		ret = -EINVAL;
+		goto out_fput;
+	}
+
+	ctx_attach = f.file->private_data;
+	/* @io_wq is protected by holding the fd */
+	if (!io_wq_get(ctx_attach->io_wq, &data)) {
+		ret = -EINVAL;
+		goto out_fput;
+	}
+
+	ctx->io_wq = ctx_attach->io_wq;
+out_fput:
+	fdput(f);
+	return ret;
+}
+
 static int io_sq_offload_start(struct io_ring_ctx *ctx,
 			       struct io_uring_params *p)
 {
-	struct io_wq_data data;
-	unsigned concurrency;
 	int ret;
 
 	init_waitqueue_head(&ctx->sqo_wait);
@@ -4584,20 +5934,9 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
 		goto err;
 	}
 
-	data.mm = ctx->sqo_mm;
-	data.user = ctx->user;
-	data.creds = ctx->creds;
-	data.get_work = io_get_work;
-	data.put_work = io_put_work;
-
-	/* Do QD, or 4 * CPUS, whatever is smallest */
-	concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
-	ctx->io_wq = io_wq_create(concurrency, &data);
-	if (IS_ERR(ctx->io_wq)) {
-		ret = PTR_ERR(ctx->io_wq);
-		ctx->io_wq = NULL;
+	ret = io_init_wq_offload(ctx, p);
+	if (ret)
 		goto err;
-	}
 
 	return 0;
 err:
@@ -4702,7 +6041,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
 		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
 
 		for (j = 0; j < imu->nr_bvecs; j++)
-			put_user_page(imu->bvec[j].bv_page);
+			unpin_user_page(imu->bvec[j].bv_page);
 
 		if (ctx->account_mem)
 			io_unaccount_mem(ctx->user, imu->nr_bvecs);
@@ -4823,7 +6162,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
 
 		ret = 0;
 		down_read(&current->mm->mmap_sem);
-		pret = get_user_pages(ubuf, nr_pages,
+		pret = pin_user_pages(ubuf, nr_pages,
 				      FOLL_WRITE | FOLL_LONGTERM,
 				      pages, vmas);
 		if (pret == nr_pages) {
@@ -4847,7 +6186,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
 			 * release any pages we did get
 			 */
 			if (pret > 0)
-				put_user_pages(pages, pret);
+				unpin_user_pages(pages, pret);
 			if (ctx->account_mem)
 				io_unaccount_mem(ctx->user, nr_pages);
 			kvfree(imu->bvec);
@@ -4975,12 +6314,33 @@ static int io_uring_fasync(int fd, struct file *file, int on)
 	return fasync_helper(fd, file, on, &ctx->cq_fasync);
 }
 
+static int io_remove_personalities(int id, void *p, void *data)
+{
+	struct io_ring_ctx *ctx = data;
+	const struct cred *cred;
+
+	cred = idr_remove(&ctx->personality_idr, id);
+	if (cred)
+		put_cred(cred);
+	return 0;
+}
+
 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 {
 	mutex_lock(&ctx->uring_lock);
 	percpu_ref_kill(&ctx->refs);
 	mutex_unlock(&ctx->uring_lock);
 
+	/*
+	 * Wait for sq thread to idle, if we have one. It won't spin on new
+	 * work after we've killed the ctx ref above. This is important to do
+	 * before we cancel existing commands, as the thread could otherwise
+	 * be queueing new work post that. If that's work we need to cancel,
+	 * it could cause shutdown to hang.
+	 */
+	while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
+		cpu_relax();
+
 	io_kill_timeouts(ctx);
 	io_poll_remove_all(ctx);
 
@@ -4991,6 +6351,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 	/* if we failed setting up the ctx, we might not have any rings */
 	if (ctx->rings)
 		io_cqring_overflow_flush(ctx, true);
+	idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
 	wait_for_completion(&ctx->completions[0]);
 	io_ring_ctx_free(ctx);
 }
@@ -5157,7 +6518,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 	} else if (to_submit) {
 		struct mm_struct *cur_mm;
 
-		to_submit = min(to_submit, ctx->sq_entries);
 		mutex_lock(&ctx->uring_lock);
 		/* already have mm, so io_submit_sqes() won't try to grab it */
 		cur_mm = ctx->sqo_mm;
@@ -5187,6 +6547,80 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 	return submitted ? submitted : ret;
 }
 
+static int io_uring_show_cred(int id, void *p, void *data)
+{
+	const struct cred *cred = p;
+	struct seq_file *m = data;
+	struct user_namespace *uns = seq_user_ns(m);
+	struct group_info *gi;
+	kernel_cap_t cap;
+	unsigned __capi;
+	int g;
+
+	seq_printf(m, "%5d\n", id);
+	seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
+	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
+	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
+	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
+	seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
+	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
+	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
+	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
+	seq_puts(m, "\n\tGroups:\t");
+	gi = cred->group_info;
+	for (g = 0; g < gi->ngroups; g++) {
+		seq_put_decimal_ull(m, g ? " " : "",
+					from_kgid_munged(uns, gi->gid[g]));
+	}
+	seq_puts(m, "\n\tCapEff:\t");
+	cap = cred->cap_effective;
+	CAP_FOR_EACH_U32(__capi)
+		seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
+	seq_putc(m, '\n');
+	return 0;
+}
+
+static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
+{
+	int i;
+
+	mutex_lock(&ctx->uring_lock);
+	seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
+	for (i = 0; i < ctx->nr_user_files; i++) {
+		struct fixed_file_table *table;
+		struct file *f;
+
+		table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
+		f = table->files[i & IORING_FILE_TABLE_MASK];
+		if (f)
+			seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
+		else
+			seq_printf(m, "%5u: <none>\n", i);
+	}
+	seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
+	for (i = 0; i < ctx->nr_user_bufs; i++) {
+		struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
+
+		seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
+						(unsigned int) buf->len);
+	}
+	if (!idr_is_empty(&ctx->personality_idr)) {
+		seq_printf(m, "Personalities:\n");
+		idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
+	}
+	mutex_unlock(&ctx->uring_lock);
+}
+
+static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
+{
+	struct io_ring_ctx *ctx = f->private_data;
+
+	if (percpu_ref_tryget(&ctx->refs)) {
+		__io_uring_show_fdinfo(ctx, m);
+		percpu_ref_put(&ctx->refs);
+	}
+}
+
 static const struct file_operations io_uring_fops = {
 	.release	= io_uring_release,
 	.flush		= io_uring_flush,
@@ -5197,6 +6631,7 @@ static const struct file_operations io_uring_fops = {
 #endif
 	.poll		= io_uring_poll,
 	.fasync		= io_uring_fasync,
+	.show_fdinfo	= io_uring_show_fdinfo,
 };
 
 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
@@ -5273,7 +6708,6 @@ static int io_uring_get_fd(struct io_ring_ctx *ctx)
 
 #if defined(CONFIG_UNIX)
 	ctx->ring_sock->file = file;
-	ctx->ring_sock->sk->sk_user_data = ctx;
 #endif
 	fd_install(ret, file);
 	return ret;
@@ -5292,8 +6726,13 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
 	bool account_mem;
 	int ret;
 
-	if (!entries || entries > IORING_MAX_ENTRIES)
+	if (!entries)
 		return -EINVAL;
+	if (entries > IORING_MAX_ENTRIES) {
+		if (!(p->flags & IORING_SETUP_CLAMP))
+			return -EINVAL;
+		entries = IORING_MAX_ENTRIES;
+	}
 
 	/*
 	 * Use twice as many entries for the CQ ring. It's possible for the
@@ -5310,8 +6749,13 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
 		 * to a power-of-two, if it isn't already. We do NOT impose
 		 * any cq vs sq ring sizing.
 		 */
-		if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
+		if (p->cq_entries < p->sq_entries)
 			return -EINVAL;
+		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
+			if (!(p->flags & IORING_SETUP_CLAMP))
+				return -EINVAL;
+			p->cq_entries = IORING_MAX_CQ_ENTRIES;
+		}
 		p->cq_entries = roundup_pow_of_two(p->cq_entries);
 	} else {
 		p->cq_entries = 2 * p->sq_entries;
@@ -5376,7 +6820,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
 		goto err;
 
 	p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
-			IORING_FEAT_SUBMIT_STABLE;
+			IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
+			IORING_FEAT_CUR_PERSONALITY;
 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
 	return ret;
 err:
@@ -5403,7 +6848,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
 	}
 
 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
-			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
+			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
+			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
 		return -EINVAL;
 
 	ret = io_uring_create(entries, &p);
@@ -5422,6 +6868,84 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
 	return io_uring_setup(entries, params);
 }
 
+static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
+{
+	struct io_uring_probe *p;
+	size_t size;
+	int i, ret;
+
+	size = struct_size(p, ops, nr_args);
+	if (size == SIZE_MAX)
+		return -EOVERFLOW;
+	p = kzalloc(size, GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+
+	ret = -EFAULT;
+	if (copy_from_user(p, arg, size))
+		goto out;
+	ret = -EINVAL;
+	if (memchr_inv(p, 0, size))
+		goto out;
+
+	p->last_op = IORING_OP_LAST - 1;
+	if (nr_args > IORING_OP_LAST)
+		nr_args = IORING_OP_LAST;
+
+	for (i = 0; i < nr_args; i++) {
+		p->ops[i].op = i;
+		if (!io_op_defs[i].not_supported)
+			p->ops[i].flags = IO_URING_OP_SUPPORTED;
+	}
+	p->ops_len = i;
+
+	ret = 0;
+	if (copy_to_user(arg, p, size))
+		ret = -EFAULT;
+out:
+	kfree(p);
+	return ret;
+}
+
+static int io_register_personality(struct io_ring_ctx *ctx)
+{
+	const struct cred *creds = get_current_cred();
+	int id;
+
+	id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
+				USHRT_MAX, GFP_KERNEL);
+	if (id < 0)
+		put_cred(creds);
+	return id;
+}
+
+static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
+{
+	const struct cred *old_creds;
+
+	old_creds = idr_remove(&ctx->personality_idr, id);
+	if (old_creds) {
+		put_cred(old_creds);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static bool io_register_op_must_quiesce(int op)
+{
+	switch (op) {
+	case IORING_UNREGISTER_FILES:
+	case IORING_REGISTER_FILES_UPDATE:
+	case IORING_REGISTER_PROBE:
+	case IORING_REGISTER_PERSONALITY:
+	case IORING_UNREGISTER_PERSONALITY:
+		return false;
+	default:
+		return true;
+	}
+}
+
 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
 			       void __user *arg, unsigned nr_args)
 	__releases(ctx->uring_lock)
@@ -5437,18 +6961,26 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
 	if (percpu_ref_is_dying(&ctx->refs))
 		return -ENXIO;
 
-	percpu_ref_kill(&ctx->refs);
+	if (io_register_op_must_quiesce(opcode)) {
+		percpu_ref_kill(&ctx->refs);
 
-	/*
-	 * Drop uring mutex before waiting for references to exit. If another
-	 * thread is currently inside io_uring_enter() it might need to grab
-	 * the uring_lock to make progress. If we hold it here across the drain
-	 * wait, then we can deadlock. It's safe to drop the mutex here, since
-	 * no new references will come in after we've killed the percpu ref.
-	 */
-	mutex_unlock(&ctx->uring_lock);
-	wait_for_completion(&ctx->completions[0]);
-	mutex_lock(&ctx->uring_lock);
+		/*
+		 * Drop uring mutex before waiting for references to exit. If
+		 * another thread is currently inside io_uring_enter() it might
+		 * need to grab the uring_lock to make progress. If we hold it
+		 * here across the drain wait, then we can deadlock. It's safe
+		 * to drop the mutex here, since no new references will come in
+		 * after we've killed the percpu ref.
+		 */
+		mutex_unlock(&ctx->uring_lock);
+		ret = wait_for_completion_interruptible(&ctx->completions[0]);
+		mutex_lock(&ctx->uring_lock);
+		if (ret) {
+			percpu_ref_resurrect(&ctx->refs);
+			ret = -EINTR;
+			goto out;
+		}
+	}
 
 	switch (opcode) {
 	case IORING_REGISTER_BUFFERS:
@@ -5473,10 +7005,17 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
 		ret = io_sqe_files_update(ctx, arg, nr_args);
 		break;
 	case IORING_REGISTER_EVENTFD:
+	case IORING_REGISTER_EVENTFD_ASYNC:
 		ret = -EINVAL;
 		if (nr_args != 1)
 			break;
 		ret = io_eventfd_register(ctx, arg);
+		if (ret)
+			break;
+		if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
+			ctx->eventfd_async = 1;
+		else
+			ctx->eventfd_async = 0;
 		break;
 	case IORING_UNREGISTER_EVENTFD:
 		ret = -EINVAL;
@@ -5484,14 +7023,35 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
 			break;
 		ret = io_eventfd_unregister(ctx);
 		break;
+	case IORING_REGISTER_PROBE:
+		ret = -EINVAL;
+		if (!arg || nr_args > 256)
+			break;
+		ret = io_probe(ctx, arg, nr_args);
+		break;
+	case IORING_REGISTER_PERSONALITY:
+		ret = -EINVAL;
+		if (arg || nr_args)
+			break;
+		ret = io_register_personality(ctx);
+		break;
+	case IORING_UNREGISTER_PERSONALITY:
+		ret = -EINVAL;
+		if (arg)
+			break;
+		ret = io_unregister_personality(ctx, nr_args);
+		break;
 	default:
 		ret = -EINVAL;
 		break;
 	}
 
-	/* bring the ctx back to life */
-	reinit_completion(&ctx->completions[0]);
-	percpu_ref_reinit(&ctx->refs);
+	if (io_register_op_must_quiesce(opcode)) {
+		/* bring the ctx back to life */
+		percpu_ref_reinit(&ctx->refs);
+out:
+		reinit_completion(&ctx->completions[0]);
+	}
 	return ret;
 }
 
@@ -5524,6 +7084,40 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
 
 static int __init io_uring_init(void)
 {
+#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
+	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
+	BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
+} while (0)
+
+#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
+	__BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
+	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
+	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
+	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
+	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
+	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
+	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
+	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
+	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
+	BUILD_BUG_SQE_ELEM(24, __u32,  len);
+	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
+	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
+	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
+	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
+	BUILD_BUG_SQE_ELEM(28, __u16,  poll_events);
+	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
+	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
+	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
+	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
+	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
+	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
+	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
+	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
+	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
+	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
+	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
+
+	BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
 	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
 	return 0;
 };
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 2f5e4e5..282d45be 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -54,19 +54,32 @@ EXPORT_SYMBOL(vfs_ioctl);
 
 static int ioctl_fibmap(struct file *filp, int __user *p)
 {
-	struct address_space *mapping = filp->f_mapping;
-	int res, block;
+	struct inode *inode = file_inode(filp);
+	int error, ur_block;
+	sector_t block;
 
-	/* do we support this mess? */
-	if (!mapping->a_ops->bmap)
-		return -EINVAL;
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
-	res = get_user(block, p);
-	if (res)
-		return res;
-	res = mapping->a_ops->bmap(mapping, block);
-	return put_user(res, p);
+
+	error = get_user(ur_block, p);
+	if (error)
+		return error;
+
+	if (ur_block < 0)
+		return -EINVAL;
+
+	block = ur_block;
+	error = bmap(inode, &block);
+
+	if (error)
+		ur_block = 0;
+	else
+		ur_block = block;
+
+	if (put_user(ur_block, p))
+		error = -EFAULT;
+
+	return error;
 }
 
 /**
@@ -467,7 +480,7 @@ EXPORT_SYMBOL(generic_block_fiemap);
  * Only the l_start, l_len and l_whence fields of the 'struct space_resv'
  * are used here, rest are ignored.
  */
-int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
+static int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
 {
 	struct inode *inode = file_inode(filp);
 	struct space_resv sr;
@@ -495,8 +508,8 @@ int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
 /* on ia32 l_start is on a 32-bit boundary */
 #if defined CONFIG_COMPAT && defined(CONFIG_X86_64)
 /* just account for different alignment */
-int compat_ioctl_preallocate(struct file *file, int mode,
-				struct space_resv_32 __user *argp)
+static int compat_ioctl_preallocate(struct file *file, int mode,
+				    struct space_resv_32 __user *argp)
 {
 	struct inode *inode = file_inode(file);
 	struct space_resv_32 sr;
@@ -521,17 +534,11 @@ int compat_ioctl_preallocate(struct file *file, int mode,
 }
 #endif
 
-static int file_ioctl(struct file *filp, unsigned int cmd,
-		unsigned long arg)
+static int file_ioctl(struct file *filp, unsigned int cmd, int __user *p)
 {
-	struct inode *inode = file_inode(filp);
-	int __user *p = (int __user *)arg;
-
 	switch (cmd) {
 	case FIBMAP:
 		return ioctl_fibmap(filp, p);
-	case FIONREAD:
-		return put_user(i_size_read(inode) - filp->f_pos, p);
 	case FS_IOC_RESVSP:
 	case FS_IOC_RESVSP64:
 		return ioctl_preallocate(filp, 0, p);
@@ -542,7 +549,7 @@ static int file_ioctl(struct file *filp, unsigned int cmd,
 		return ioctl_preallocate(filp, FALLOC_FL_ZERO_RANGE, p);
 	}
 
-	return vfs_ioctl(filp, cmd, arg);
+	return -ENOIOCTLCMD;
 }
 
 static int ioctl_fionbio(struct file *filp, int __user *argp)
@@ -661,53 +668,48 @@ static int ioctl_file_dedupe_range(struct file *file,
 }
 
 /*
- * When you add any new common ioctls to the switches above and below
- * please update compat_sys_ioctl() too.
- *
  * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
  * It's just a simple helper for sys_ioctl and compat_sys_ioctl.
+ *
+ * When you add any new common ioctls to the switches above and below,
+ * please ensure they have compatible arguments in compat mode.
  */
-int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
-	     unsigned long arg)
+static int do_vfs_ioctl(struct file *filp, unsigned int fd,
+			unsigned int cmd, unsigned long arg)
 {
-	int error = 0;
 	void __user *argp = (void __user *)arg;
 	struct inode *inode = file_inode(filp);
 
 	switch (cmd) {
 	case FIOCLEX:
 		set_close_on_exec(fd, 1);
-		break;
+		return 0;
 
 	case FIONCLEX:
 		set_close_on_exec(fd, 0);
-		break;
+		return 0;
 
 	case FIONBIO:
-		error = ioctl_fionbio(filp, argp);
-		break;
+		return ioctl_fionbio(filp, argp);
 
 	case FIOASYNC:
-		error = ioctl_fioasync(fd, filp, argp);
-		break;
+		return ioctl_fioasync(fd, filp, argp);
 
 	case FIOQSIZE:
 		if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
 		    S_ISLNK(inode->i_mode)) {
 			loff_t res = inode_get_bytes(inode);
-			error = copy_to_user(argp, &res, sizeof(res)) ?
-					-EFAULT : 0;
-		} else
-			error = -ENOTTY;
-		break;
+			return copy_to_user(argp, &res, sizeof(res)) ?
+					    -EFAULT : 0;
+		}
+
+		return -ENOTTY;
 
 	case FIFREEZE:
-		error = ioctl_fsfreeze(filp);
-		break;
+		return ioctl_fsfreeze(filp);
 
 	case FITHAW:
-		error = ioctl_fsthaw(filp);
-		break;
+		return ioctl_fsthaw(filp);
 
 	case FS_IOC_FIEMAP:
 		return ioctl_fiemap(filp, argp);
@@ -716,6 +718,7 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
 		/* anon_bdev filesystems may not have a block size */
 		if (!inode->i_sb->s_blocksize)
 			return -EINVAL;
+
 		return put_user(inode->i_sb->s_blocksize, (int __user *)argp);
 
 	case FICLONE:
@@ -727,26 +730,39 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
 	case FIDEDUPERANGE:
 		return ioctl_file_dedupe_range(filp, argp);
 
+	case FIONREAD:
+		if (!S_ISREG(inode->i_mode))
+			return vfs_ioctl(filp, cmd, arg);
+
+		return put_user(i_size_read(inode) - filp->f_pos,
+				(int __user *)argp);
+
 	default:
 		if (S_ISREG(inode->i_mode))
-			error = file_ioctl(filp, cmd, arg);
-		else
-			error = vfs_ioctl(filp, cmd, arg);
+			return file_ioctl(filp, cmd, argp);
 		break;
 	}
-	return error;
+
+	return -ENOIOCTLCMD;
 }
 
 int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
 {
-	int error;
 	struct fd f = fdget(fd);
+	int error;
 
 	if (!f.file)
 		return -EBADF;
+
 	error = security_file_ioctl(f.file, cmd, arg);
-	if (!error)
-		error = do_vfs_ioctl(f.file, fd, cmd, arg);
+	if (error)
+		goto out;
+
+	error = do_vfs_ioctl(f.file, fd, cmd, arg);
+	if (error == -ENOIOCTLCMD)
+		error = vfs_ioctl(f.file, cmd, arg);
+
+out:
 	fdput(f);
 	return error;
 }
@@ -788,4 +804,65 @@ long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
 }
 EXPORT_SYMBOL(compat_ptr_ioctl);
+
+COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
+		       compat_ulong_t, arg)
+{
+	struct fd f = fdget(fd);
+	int error;
+
+	if (!f.file)
+		return -EBADF;
+
+	/* RED-PEN how should LSM module know it's handling 32bit? */
+	error = security_file_ioctl(f.file, cmd, arg);
+	if (error)
+		goto out;
+
+	switch (cmd) {
+	/* FICLONE takes an int argument, so don't use compat_ptr() */
+	case FICLONE:
+		error = ioctl_file_clone(f.file, arg, 0, 0, 0);
+		break;
+
+#if defined(CONFIG_X86_64)
+	/* these get messy on amd64 due to alignment differences */
+	case FS_IOC_RESVSP_32:
+	case FS_IOC_RESVSP64_32:
+		error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
+		break;
+	case FS_IOC_UNRESVSP_32:
+	case FS_IOC_UNRESVSP64_32:
+		error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
+				compat_ptr(arg));
+		break;
+	case FS_IOC_ZERO_RANGE_32:
+		error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
+				compat_ptr(arg));
+		break;
+#endif
+
+	/*
+	 * everything else in do_vfs_ioctl() takes either a compatible
+	 * pointer argument or no argument -- call it with a modified
+	 * argument.
+	 */
+	default:
+		error = do_vfs_ioctl(f.file, fd, cmd,
+				     (unsigned long)compat_ptr(arg));
+		if (error != -ENOIOCTLCMD)
+			break;
+
+		if (f.file->f_op->compat_ioctl)
+			error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
+		if (error == -ENOIOCTLCMD)
+			error = -ENOTTY;
+		break;
+	}
+
+ out:
+	fdput(f);
+
+	return error;
+}
 #endif
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 828444e..7c84c4c 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1077,24 +1077,16 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
 	struct page *page = vmf->page;
 	struct inode *inode = file_inode(vmf->vma->vm_file);
 	unsigned long length;
-	loff_t offset, size;
+	loff_t offset;
 	ssize_t ret;
 
 	lock_page(page);
-	size = i_size_read(inode);
-	offset = page_offset(page);
-	if (page->mapping != inode->i_mapping || offset > size) {
-		/* We overload EFAULT to mean page got truncated */
-		ret = -EFAULT;
+	ret = page_mkwrite_check_truncate(page, inode);
+	if (ret < 0)
 		goto out_unlock;
-	}
+	length = ret;
 
-	/* page is wholly or partially inside EOF */
-	if (offset > size - PAGE_SIZE)
-		length = offset_in_page(size);
-	else
-		length = PAGE_SIZE;
-
+	offset = page_offset(page);
 	while (length > 0) {
 		ret = iomap_apply(inode, offset, length,
 				IOMAP_WRITE | IOMAP_FAULT, ops, page,
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 8fff667..96bf339 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -164,7 +164,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
 				       "journal space in %s\n", __func__,
 				       journal->j_devname);
 				WARN_ON(1);
-				jbd2_journal_abort(journal, 0);
+				jbd2_journal_abort(journal, -EIO);
 			}
 			write_lock(&journal->j_state_lock);
 		} else {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 7f0b362..2494095 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -782,7 +782,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 		err = journal_submit_commit_record(journal, commit_transaction,
 						 &cbh, crc32_sum);
 		if (err)
-			__jbd2_journal_abort_hard(journal);
+			jbd2_journal_abort(journal, err);
 	}
 
 	blk_finish_plug(&plug);
@@ -875,7 +875,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 		err = journal_submit_commit_record(journal, commit_transaction,
 						&cbh, crc32_sum);
 		if (err)
-			__jbd2_journal_abort_hard(journal);
+			jbd2_journal_abort(journal, err);
 	}
 	if (cbh)
 		err = journal_wait_on_commit_record(journal, cbh);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 5e408ee..a49d0e6 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -96,7 +96,6 @@ EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
 EXPORT_SYMBOL(jbd2_inode_cache);
 
-static void __journal_abort_soft (journal_t *journal, int errno);
 static int jbd2_journal_create_slab(size_t slab_size);
 
 #ifdef CONFIG_JBD2_DEBUG
@@ -795,18 +794,22 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
 {
 	int err = 0;
 	unsigned long long ret;
+	sector_t block = 0;
 
 	if (journal->j_inode) {
-		ret = bmap(journal->j_inode, blocknr);
-		if (ret)
-			*retp = ret;
-		else {
+		block = blocknr;
+		ret = bmap(journal->j_inode, &block);
+
+		if (ret || !block) {
 			printk(KERN_ALERT "%s: journal block not found "
 					"at offset %lu on %s\n",
 			       __func__, blocknr, journal->j_devname);
 			err = -EIO;
-			__journal_abort_soft(journal, err);
+			jbd2_journal_abort(journal, err);
+		} else {
+			*retp = block;
 		}
+
 	} else {
 		*retp = blocknr; /* +journal->j_blk_offset */
 	}
@@ -982,6 +985,7 @@ static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos)
 
 static void *jbd2_seq_info_next(struct seq_file *seq, void *v, loff_t *pos)
 {
+	(*pos)++;
 	return NULL;
 }
 
@@ -1074,12 +1078,11 @@ static int jbd2_seq_info_release(struct inode *inode, struct file *file)
 	return seq_release(inode, file);
 }
 
-static const struct file_operations jbd2_seq_info_fops = {
-	.owner		= THIS_MODULE,
-	.open           = jbd2_seq_info_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release        = jbd2_seq_info_release,
+static const struct proc_ops jbd2_info_proc_ops = {
+	.proc_open	= jbd2_seq_info_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= jbd2_seq_info_release,
 };
 
 static struct proc_dir_entry *proc_jbd2_stats;
@@ -1089,7 +1092,7 @@ static void jbd2_stats_proc_init(journal_t *journal)
 	journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats);
 	if (journal->j_proc_entry) {
 		proc_create_data("info", S_IRUGO, journal->j_proc_entry,
-				 &jbd2_seq_info_fops, journal);
+				 &jbd2_info_proc_ops, journal);
 	}
 }
 
@@ -1244,11 +1247,14 @@ journal_t *jbd2_journal_init_dev(struct block_device *bdev,
 journal_t *jbd2_journal_init_inode(struct inode *inode)
 {
 	journal_t *journal;
+	sector_t blocknr;
 	char *p;
-	unsigned long long blocknr;
+	int err = 0;
 
-	blocknr = bmap(inode, 0);
-	if (!blocknr) {
+	blocknr = 0;
+	err = bmap(inode, &blocknr);
+
+	if (err || !blocknr) {
 		pr_err("%s: Cannot locate journal superblock\n",
 			__func__);
 		return NULL;
@@ -1710,6 +1716,11 @@ int jbd2_journal_load(journal_t *journal)
 		       journal->j_devname);
 		return -EFSCORRUPTED;
 	}
+	/*
+	 * clear JBD2_ABORT flag initialized in journal_init_common
+	 * here to update log tail information with the newest seq.
+	 */
+	journal->j_flags &= ~JBD2_ABORT;
 
 	/* OK, we've finished with the dynamic journal bits:
 	 * reinitialise the dynamic contents of the superblock in memory
@@ -1717,7 +1728,6 @@ int jbd2_journal_load(journal_t *journal)
 	if (journal_reset(journal))
 		goto recovery_error;
 
-	journal->j_flags &= ~JBD2_ABORT;
 	journal->j_flags |= JBD2_LOADED;
 	return 0;
 
@@ -2098,67 +2108,6 @@ int jbd2_journal_wipe(journal_t *journal, int write)
 	return err;
 }
 
-/*
- * Journal abort has very specific semantics, which we describe
- * for journal abort.
- *
- * Two internal functions, which provide abort to the jbd layer
- * itself are here.
- */
-
-/*
- * Quick version for internal journal use (doesn't lock the journal).
- * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
- * and don't attempt to make any other journal updates.
- */
-void __jbd2_journal_abort_hard(journal_t *journal)
-{
-	transaction_t *transaction;
-
-	if (journal->j_flags & JBD2_ABORT)
-		return;
-
-	printk(KERN_ERR "Aborting journal on device %s.\n",
-	       journal->j_devname);
-
-	write_lock(&journal->j_state_lock);
-	journal->j_flags |= JBD2_ABORT;
-	transaction = journal->j_running_transaction;
-	if (transaction)
-		__jbd2_log_start_commit(journal, transaction->t_tid);
-	write_unlock(&journal->j_state_lock);
-}
-
-/* Soft abort: record the abort error status in the journal superblock,
- * but don't do any other IO. */
-static void __journal_abort_soft (journal_t *journal, int errno)
-{
-	int old_errno;
-
-	write_lock(&journal->j_state_lock);
-	old_errno = journal->j_errno;
-	if (!journal->j_errno || errno == -ESHUTDOWN)
-		journal->j_errno = errno;
-
-	if (journal->j_flags & JBD2_ABORT) {
-		write_unlock(&journal->j_state_lock);
-		if (!old_errno && old_errno != -ESHUTDOWN &&
-		    errno == -ESHUTDOWN)
-			jbd2_journal_update_sb_errno(journal);
-		return;
-	}
-	write_unlock(&journal->j_state_lock);
-
-	__jbd2_journal_abort_hard(journal);
-
-	if (errno) {
-		jbd2_journal_update_sb_errno(journal);
-		write_lock(&journal->j_state_lock);
-		journal->j_flags |= JBD2_REC_ERR;
-		write_unlock(&journal->j_state_lock);
-	}
-}
-
 /**
  * void jbd2_journal_abort () - Shutdown the journal immediately.
  * @journal: the journal to shutdown.
@@ -2198,16 +2147,51 @@ static void __journal_abort_soft (journal_t *journal, int errno)
  * failure to disk.  ext3_error, for example, now uses this
  * functionality.
  *
- * Errors which originate from within the journaling layer will NOT
- * supply an errno; a null errno implies that absolutely no further
- * writes are done to the journal (unless there are any already in
- * progress).
- *
  */
 
 void jbd2_journal_abort(journal_t *journal, int errno)
 {
-	__journal_abort_soft(journal, errno);
+	transaction_t *transaction;
+
+	/*
+	 * ESHUTDOWN always takes precedence because a file system check
+	 * caused by any other journal abort error is not required after
+	 * a shutdown triggered.
+	 */
+	write_lock(&journal->j_state_lock);
+	if (journal->j_flags & JBD2_ABORT) {
+		int old_errno = journal->j_errno;
+
+		write_unlock(&journal->j_state_lock);
+		if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) {
+			journal->j_errno = errno;
+			jbd2_journal_update_sb_errno(journal);
+		}
+		return;
+	}
+
+	/*
+	 * Mark the abort as occurred and start current running transaction
+	 * to release all journaled buffer.
+	 */
+	pr_err("Aborting journal on device %s.\n", journal->j_devname);
+
+	journal->j_flags |= JBD2_ABORT;
+	journal->j_errno = errno;
+	transaction = journal->j_running_transaction;
+	if (transaction)
+		__jbd2_log_start_commit(journal, transaction->t_tid);
+	write_unlock(&journal->j_state_lock);
+
+	/*
+	 * Record errno to the journal super block, so that fsck and jbd2
+	 * layer could realise that a filesystem check is needed.
+	 */
+	jbd2_journal_update_sb_errno(journal);
+
+	write_lock(&journal->j_state_lock);
+	journal->j_flags |= JBD2_REC_ERR;
+	write_unlock(&journal->j_state_lock);
 }
 
 /**
@@ -2556,7 +2540,6 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
 {
 	struct journal_head *jh = bh2jh(bh);
 
-	J_ASSERT_JH(jh, jh->b_jcount >= 0);
 	J_ASSERT_JH(jh, jh->b_transaction == NULL);
 	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
 	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 27b9f9d..e77a5a0 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(jbd2__journal_start);
  * modified buffers in the log.  We block until the log can guarantee
  * that much space. Additionally, if rsv_blocks > 0, we also create another
  * handle with rsv_blocks reserved blocks in the journal. This handle is
- * is stored in h_rsv_handle. It is not attached to any particular transaction
+ * stored in h_rsv_handle. It is not attached to any particular transaction
  * and thus doesn't block transaction commit. If the caller uses this reserved
  * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
  * on the parent handle will dispose the reserved one. Reserved handle has to
@@ -1595,7 +1595,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
  * Allow this call even if the handle has aborted --- it may be part of
  * the caller's cleanup after an abort.
  */
-int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
 {
 	transaction_t *transaction = handle->h_transaction;
 	journal_t *journal;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 0e6406c..05d7878 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -167,27 +167,21 @@ enum {
 	Opt_rp_size,
 };
 
-static const struct fs_parameter_spec jffs2_param_specs[] = {
-	fsparam_enum	("compr",	Opt_override_compr),
-	fsparam_u32	("rp_size",	Opt_rp_size),
-	{}
-};
-
-static const struct fs_parameter_enum jffs2_param_enums[] = {
-	{ Opt_override_compr,	"none",	JFFS2_COMPR_MODE_NONE },
+static const struct constant_table jffs2_param_compr[] = {
+	{"none",	JFFS2_COMPR_MODE_NONE },
 #ifdef CONFIG_JFFS2_LZO
-	{ Opt_override_compr,	"lzo",	JFFS2_COMPR_MODE_FORCELZO },
+	{"lzo",		JFFS2_COMPR_MODE_FORCELZO },
 #endif
 #ifdef CONFIG_JFFS2_ZLIB
-	{ Opt_override_compr,	"zlib",	JFFS2_COMPR_MODE_FORCEZLIB },
+	{"zlib",	JFFS2_COMPR_MODE_FORCEZLIB },
 #endif
 	{}
 };
 
-const struct fs_parameter_description jffs2_fs_parameters = {
-	.name		= "jffs2",
-	.specs		= jffs2_param_specs,
-	.enums		= jffs2_param_enums,
+static const struct fs_parameter_spec jffs2_fs_parameters[] = {
+	fsparam_enum	("compr",	Opt_override_compr, jffs2_param_compr),
+	fsparam_u32	("rp_size",	Opt_rp_size),
+	{}
 };
 
 static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
@@ -196,7 +190,7 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	struct jffs2_sb_info *c = fc->s_fs_info;
 	int opt;
 
-	opt = fs_parse(fc, &jffs2_fs_parameters, param, &result);
+	opt = fs_parse(fc, jffs2_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -339,7 +333,7 @@ static struct file_system_type jffs2_fs_type = {
 	.owner =	THIS_MODULE,
 	.name =		"jffs2",
 	.init_fs_context = jffs2_init_fs_context,
-	.parameters =	&jffs2_fs_parameters,
+	.parameters =	jffs2_fs_parameters,
 	.kill_sb =	jffs2_kill_sb,
 };
 MODULE_ALIAS_FS("jffs2");
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c
index 888cdd6..44b62b3 100644
--- a/fs/jfs/jfs_debug.c
+++ b/fs/jfs/jfs_debug.c
@@ -43,12 +43,12 @@ static ssize_t jfs_loglevel_proc_write(struct file *file,
 	return count;
 }
 
-static const struct file_operations jfs_loglevel_proc_fops = {
-	.open		= jfs_loglevel_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= jfs_loglevel_proc_write,
+static const struct proc_ops jfs_loglevel_proc_ops = {
+	.proc_open	= jfs_loglevel_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= jfs_loglevel_proc_write,
 };
 #endif
 
@@ -68,7 +68,7 @@ void jfs_proc_init(void)
 #endif
 #ifdef CONFIG_JFS_DEBUG
 	proc_create_single("TxAnchor", 0, base, jfs_txanchor_proc_show);
-	proc_create("loglevel", 0, base, &jfs_loglevel_proc_fops);
+	proc_create("loglevel", 0, base, &jfs_loglevel_proc_ops);
 #endif
 }
 
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index caade18..7dfcab2 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -4027,7 +4027,6 @@ static int dbGetL2AGSize(s64 nblocks)
  */
 #define MAXL0PAGES	(1 + LPERCTL)
 #define MAXL1PAGES	(1 + LPERCTL * MAXL0PAGES)
-#define MAXL2PAGES	(1 + LPERCTL * MAXL1PAGES)
 
 /*
  * convert number of map pages to the zero origin top dmapctl level
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 9d96e68..9aec80b 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1266,7 +1266,7 @@ void kernfs_activate(struct kernfs_node *kn)
 
 	pos = NULL;
 	while ((pos = kernfs_next_descendant_post(pos, kn))) {
-		if (!pos || (pos->flags & KERNFS_ACTIVATED))
+		if (pos->flags & KERNFS_ACTIVATED)
 			continue;
 
 		WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index eac277c..d0f7a5a 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -160,9 +160,9 @@ static inline void set_inode_attr(struct inode *inode,
 {
 	inode->i_uid = attrs->ia_uid;
 	inode->i_gid = attrs->ia_gid;
-	inode->i_atime = timestamp_truncate(attrs->ia_atime, inode);
-	inode->i_mtime = timestamp_truncate(attrs->ia_mtime, inode);
-	inode->i_ctime = timestamp_truncate(attrs->ia_ctime, inode);
+	inode->i_atime = attrs->ia_atime;
+	inode->i_mtime = attrs->ia_mtime;
+	inode->i_ctime = attrs->ia_ctime;
 }
 
 static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
diff --git a/fs/libfs.c b/fs/libfs.c
index 1463b03..c686bd9 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -19,6 +19,7 @@
 #include <linux/buffer_head.h> /* sync_mapping_buffers */
 #include <linux/fs_context.h>
 #include <linux/pseudo_fs.h>
+#include <linux/fsnotify.h>
 
 #include <linux/uaccess.h>
 
@@ -239,6 +240,75 @@ const struct inode_operations simple_dir_inode_operations = {
 };
 EXPORT_SYMBOL(simple_dir_inode_operations);
 
+static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
+{
+	struct dentry *child = NULL;
+	struct list_head *p = prev ? &prev->d_child : &parent->d_subdirs;
+
+	spin_lock(&parent->d_lock);
+	while ((p = p->next) != &parent->d_subdirs) {
+		struct dentry *d = container_of(p, struct dentry, d_child);
+		if (simple_positive(d)) {
+			spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+			if (simple_positive(d))
+				child = dget_dlock(d);
+			spin_unlock(&d->d_lock);
+			if (likely(child))
+				break;
+		}
+	}
+	spin_unlock(&parent->d_lock);
+	dput(prev);
+	return child;
+}
+
+void simple_recursive_removal(struct dentry *dentry,
+                              void (*callback)(struct dentry *))
+{
+	struct dentry *this = dget(dentry);
+	while (true) {
+		struct dentry *victim = NULL, *child;
+		struct inode *inode = this->d_inode;
+
+		inode_lock(inode);
+		if (d_is_dir(this))
+			inode->i_flags |= S_DEAD;
+		while ((child = find_next_child(this, victim)) == NULL) {
+			// kill and ascend
+			// update metadata while it's still locked
+			inode->i_ctime = current_time(inode);
+			clear_nlink(inode);
+			inode_unlock(inode);
+			victim = this;
+			this = this->d_parent;
+			inode = this->d_inode;
+			inode_lock(inode);
+			if (simple_positive(victim)) {
+				d_invalidate(victim);	// avoid lost mounts
+				if (d_is_dir(victim))
+					fsnotify_rmdir(inode, victim);
+				else
+					fsnotify_unlink(inode, victim);
+				if (callback)
+					callback(victim);
+				dput(victim);		// unpin it
+			}
+			if (victim == dentry) {
+				inode->i_ctime = inode->i_mtime =
+					current_time(inode);
+				if (d_is_dir(dentry))
+					drop_nlink(inode);
+				inode_unlock(inode);
+				dput(dentry);
+				return;
+			}
+		}
+		inode_unlock(inode);
+		this = child;
+	}
+}
+EXPORT_SYMBOL(simple_recursive_removal);
+
 static const struct super_operations simple_super_operations = {
 	.statfs		= simple_statfs,
 };
diff --git a/fs/lockd/procfs.c b/fs/lockd/procfs.c
index ca9228a..a01f08c 100644
--- a/fs/lockd/procfs.c
+++ b/fs/lockd/procfs.c
@@ -60,11 +60,11 @@ nlm_end_grace_read(struct file *file, char __user *buf, size_t size,
 	return simple_read_from_buffer(buf, size, pos, resp, sizeof(resp));
 }
 
-static const struct file_operations lockd_end_grace_operations = {
-	.write		= nlm_end_grace_write,
-	.read		= nlm_end_grace_read,
-	.llseek		= default_llseek,
-	.release	= simple_transaction_release,
+static const struct proc_ops lockd_end_grace_proc_ops = {
+	.proc_write	= nlm_end_grace_write,
+	.proc_read	= nlm_end_grace_read,
+	.proc_lseek	= default_llseek,
+	.proc_release	= simple_transaction_release,
 };
 
 int __init
@@ -76,7 +76,7 @@ lockd_create_procfs(void)
 	if (!entry)
 		return -ENOMEM;
 	entry = proc_create("nlm_end_grace", S_IRUGO|S_IWUSR, entry,
-				 &lockd_end_grace_operations);
+			    &lockd_end_grace_proc_ops);
 	if (!entry) {
 		remove_proc_entry("fs/lockd", NULL);
 		return -ENOMEM;
diff --git a/fs/namei.c b/fs/namei.c
index 4fb61e0..db6565c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -491,7 +491,7 @@ struct nameidata {
 	struct path	root;
 	struct inode	*inode; /* path.dentry.d_inode */
 	unsigned int	flags;
-	unsigned	seq, m_seq;
+	unsigned	seq, m_seq, r_seq;
 	int		last_type;
 	unsigned	depth;
 	int		total_link_count;
@@ -641,6 +641,14 @@ static bool legitimize_links(struct nameidata *nd)
 
 static bool legitimize_root(struct nameidata *nd)
 {
+	/*
+	 * For scoped-lookups (where nd->root has been zeroed), we need to
+	 * restart the whole lookup from scratch -- because set_root() is wrong
+	 * for these lookups (nd->dfd is the root, not the filesystem root).
+	 */
+	if (!nd->root.mnt && (nd->flags & LOOKUP_IS_SCOPED))
+		return false;
+	/* Nothing to do if nd->root is zero or is managed by the VFS user. */
 	if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT))
 		return true;
 	nd->flags |= LOOKUP_ROOT_GRABBED;
@@ -776,12 +784,37 @@ static int complete_walk(struct nameidata *nd)
 	int status;
 
 	if (nd->flags & LOOKUP_RCU) {
-		if (!(nd->flags & LOOKUP_ROOT))
+		/*
+		 * We don't want to zero nd->root for scoped-lookups or
+		 * externally-managed nd->root.
+		 */
+		if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED)))
 			nd->root.mnt = NULL;
 		if (unlikely(unlazy_walk(nd)))
 			return -ECHILD;
 	}
 
+	if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
+		/*
+		 * While the guarantee of LOOKUP_IS_SCOPED is (roughly) "don't
+		 * ever step outside the root during lookup" and should already
+		 * be guaranteed by the rest of namei, we want to avoid a namei
+		 * BUG resulting in userspace being given a path that was not
+		 * scoped within the root at some point during the lookup.
+		 *
+		 * So, do a final sanity-check to make sure that in the
+		 * worst-case scenario (a complete bypass of LOOKUP_IS_SCOPED)
+		 * we won't silently return an fd completely outside of the
+		 * requested root to userspace.
+		 *
+		 * Userspace could move the path outside the root after this
+		 * check, but as discussed elsewhere this is not a concern (the
+		 * resolved file was inside the root at some point).
+		 */
+		if (!path_is_under(&nd->path, &nd->root))
+			return -EXDEV;
+	}
+
 	if (likely(!(nd->flags & LOOKUP_JUMPED)))
 		return 0;
 
@@ -798,10 +831,18 @@ static int complete_walk(struct nameidata *nd)
 	return status;
 }
 
-static void set_root(struct nameidata *nd)
+static int set_root(struct nameidata *nd)
 {
 	struct fs_struct *fs = current->fs;
 
+	/*
+	 * Jumping to the real root in a scoped-lookup is a BUG in namei, but we
+	 * still have to ensure it doesn't happen because it will cause a breakout
+	 * from the dirfd.
+	 */
+	if (WARN_ON(nd->flags & LOOKUP_IS_SCOPED))
+		return -ENOTRECOVERABLE;
+
 	if (nd->flags & LOOKUP_RCU) {
 		unsigned seq;
 
@@ -814,6 +855,7 @@ static void set_root(struct nameidata *nd)
 		get_fs_root(fs, &nd->root);
 		nd->flags |= LOOKUP_ROOT_GRABBED;
 	}
+	return 0;
 }
 
 static void path_put_conditional(struct path *path, struct nameidata *nd)
@@ -837,6 +879,18 @@ static inline void path_to_nameidata(const struct path *path,
 
 static int nd_jump_root(struct nameidata *nd)
 {
+	if (unlikely(nd->flags & LOOKUP_BENEATH))
+		return -EXDEV;
+	if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
+		/* Absolute path arguments to path_init() are allowed. */
+		if (nd->path.mnt != NULL && nd->path.mnt != nd->root.mnt)
+			return -EXDEV;
+	}
+	if (!nd->root.mnt) {
+		int error = set_root(nd);
+		if (error)
+			return error;
+	}
 	if (nd->flags & LOOKUP_RCU) {
 		struct dentry *d;
 		nd->path = nd->root;
@@ -859,14 +913,32 @@ static int nd_jump_root(struct nameidata *nd)
  * Helper to directly jump to a known parsed path from ->get_link,
  * caller must have taken a reference to path beforehand.
  */
-void nd_jump_link(struct path *path)
+int nd_jump_link(struct path *path)
 {
+	int error = -ELOOP;
 	struct nameidata *nd = current->nameidata;
-	path_put(&nd->path);
 
+	if (unlikely(nd->flags & LOOKUP_NO_MAGICLINKS))
+		goto err;
+
+	error = -EXDEV;
+	if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
+		if (nd->path.mnt != path->mnt)
+			goto err;
+	}
+	/* Not currently safe for scoped-lookups. */
+	if (unlikely(nd->flags & LOOKUP_IS_SCOPED))
+		goto err;
+
+	path_put(&nd->path);
 	nd->path = *path;
 	nd->inode = nd->path.dentry->d_inode;
 	nd->flags |= LOOKUP_JUMPED;
+	return 0;
+
+err:
+	path_put(path);
+	return error;
 }
 
 static inline void put_link(struct nameidata *nd)
@@ -1050,6 +1122,9 @@ const char *get_link(struct nameidata *nd)
 	int error;
 	const char *res;
 
+	if (unlikely(nd->flags & LOOKUP_NO_SYMLINKS))
+		return ERR_PTR(-ELOOP);
+
 	if (!(nd->flags & LOOKUP_RCU)) {
 		touch_atime(&last->link);
 		cond_resched();
@@ -1084,10 +1159,9 @@ const char *get_link(struct nameidata *nd)
 			return res;
 	}
 	if (*res == '/') {
-		if (!nd->root.mnt)
-			set_root(nd);
-		if (unlikely(nd_jump_root(nd)))
-			return ERR_PTR(-ECHILD);
+		error = nd_jump_root(nd);
+		if (unlikely(error))
+			return ERR_PTR(error);
 		while (unlikely(*++res == '/'))
 			;
 	}
@@ -1269,10 +1343,14 @@ static int follow_managed(struct path *path, struct nameidata *nd)
 		break;
 	}
 
-	if (need_mntput && path->mnt == mnt)
-		mntput(path->mnt);
-	if (need_mntput)
-		nd->flags |= LOOKUP_JUMPED;
+	if (need_mntput) {
+		if (path->mnt == mnt)
+			mntput(path->mnt);
+		if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+			ret = -EXDEV;
+		else
+			nd->flags |= LOOKUP_JUMPED;
+	}
 	if (ret == -EISDIR || !ret)
 		ret = 1;
 	if (ret > 0 && unlikely(d_flags_negative(flags)))
@@ -1333,6 +1411,8 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
 		mounted = __lookup_mnt(path->mnt, path->dentry);
 		if (!mounted)
 			break;
+		if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+			return false;
 		path->mnt = &mounted->mnt;
 		path->dentry = mounted->mnt.mnt_root;
 		nd->flags |= LOOKUP_JUMPED;
@@ -1353,8 +1433,11 @@ static int follow_dotdot_rcu(struct nameidata *nd)
 	struct inode *inode = nd->inode;
 
 	while (1) {
-		if (path_equal(&nd->path, &nd->root))
+		if (path_equal(&nd->path, &nd->root)) {
+			if (unlikely(nd->flags & LOOKUP_BENEATH))
+				return -ECHILD;
 			break;
+		}
 		if (nd->path.dentry != nd->path.mnt->mnt_root) {
 			struct dentry *old = nd->path.dentry;
 			struct dentry *parent = old->d_parent;
@@ -1367,7 +1450,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
 			nd->path.dentry = parent;
 			nd->seq = seq;
 			if (unlikely(!path_connected(&nd->path)))
-				return -ENOENT;
+				return -ECHILD;
 			break;
 		} else {
 			struct mount *mnt = real_mount(nd->path.mnt);
@@ -1379,6 +1462,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
 				return -ECHILD;
 			if (&mparent->mnt == nd->path.mnt)
 				break;
+			if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+				return -ECHILD;
 			/* we know that mountpoint was pinned */
 			nd->path.dentry = mountpoint;
 			nd->path.mnt = &mparent->mnt;
@@ -1393,6 +1478,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
 			return -ECHILD;
 		if (!mounted)
 			break;
+		if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+			return -ECHILD;
 		nd->path.mnt = &mounted->mnt;
 		nd->path.dentry = mounted->mnt.mnt_root;
 		inode = nd->path.dentry->d_inode;
@@ -1480,9 +1567,12 @@ static int path_parent_directory(struct path *path)
 
 static int follow_dotdot(struct nameidata *nd)
 {
-	while(1) {
-		if (path_equal(&nd->path, &nd->root))
+	while (1) {
+		if (path_equal(&nd->path, &nd->root)) {
+			if (unlikely(nd->flags & LOOKUP_BENEATH))
+				return -EXDEV;
 			break;
+		}
 		if (nd->path.dentry != nd->path.mnt->mnt_root) {
 			int ret = path_parent_directory(&nd->path);
 			if (ret)
@@ -1491,6 +1581,8 @@ static int follow_dotdot(struct nameidata *nd)
 		}
 		if (!follow_up(&nd->path))
 			break;
+		if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+			return -EXDEV;
 	}
 	follow_mount(&nd->path);
 	nd->inode = nd->path.dentry->d_inode;
@@ -1699,12 +1791,33 @@ static inline int may_lookup(struct nameidata *nd)
 static inline int handle_dots(struct nameidata *nd, int type)
 {
 	if (type == LAST_DOTDOT) {
-		if (!nd->root.mnt)
-			set_root(nd);
-		if (nd->flags & LOOKUP_RCU) {
-			return follow_dotdot_rcu(nd);
-		} else
-			return follow_dotdot(nd);
+		int error = 0;
+
+		if (!nd->root.mnt) {
+			error = set_root(nd);
+			if (error)
+				return error;
+		}
+		if (nd->flags & LOOKUP_RCU)
+			error = follow_dotdot_rcu(nd);
+		else
+			error = follow_dotdot(nd);
+		if (error)
+			return error;
+
+		if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
+			/*
+			 * If there was a racing rename or mount along our
+			 * path, then we can't be sure that ".." hasn't jumped
+			 * above nd->root (and so userspace should retry or use
+			 * some fallback).
+			 */
+			smp_rmb();
+			if (unlikely(__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq)))
+				return -EAGAIN;
+			if (unlikely(__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq)))
+				return -EAGAIN;
+		}
 	}
 	return 0;
 }
@@ -2158,6 +2271,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
 /* must be paired with terminate_walk() */
 static const char *path_init(struct nameidata *nd, unsigned flags)
 {
+	int error;
 	const char *s = nd->name->name;
 
 	if (!*s)
@@ -2168,6 +2282,11 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
 	nd->last_type = LAST_ROOT; /* if there are only slashes... */
 	nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
 	nd->depth = 0;
+
+	nd->m_seq = __read_seqcount_begin(&mount_lock.seqcount);
+	nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount);
+	smp_rmb();
+
 	if (flags & LOOKUP_ROOT) {
 		struct dentry *root = nd->root.dentry;
 		struct inode *inode = root->d_inode;
@@ -2176,9 +2295,8 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
 		nd->path = nd->root;
 		nd->inode = inode;
 		if (flags & LOOKUP_RCU) {
-			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+			nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
 			nd->root_seq = nd->seq;
-			nd->m_seq = read_seqbegin(&mount_lock);
 		} else {
 			path_get(&nd->path);
 		}
@@ -2189,13 +2307,16 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
 	nd->path.mnt = NULL;
 	nd->path.dentry = NULL;
 
-	nd->m_seq = read_seqbegin(&mount_lock);
-	if (*s == '/') {
-		set_root(nd);
-		if (likely(!nd_jump_root(nd)))
-			return s;
-		return ERR_PTR(-ECHILD);
-	} else if (nd->dfd == AT_FDCWD) {
+	/* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
+	if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
+		error = nd_jump_root(nd);
+		if (unlikely(error))
+			return ERR_PTR(error);
+		return s;
+	}
+
+	/* Relative pathname -- get the starting-point it is relative to. */
+	if (nd->dfd == AT_FDCWD) {
 		if (flags & LOOKUP_RCU) {
 			struct fs_struct *fs = current->fs;
 			unsigned seq;
@@ -2210,7 +2331,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
 			get_fs_pwd(current->fs, &nd->path);
 			nd->inode = nd->path.dentry->d_inode;
 		}
-		return s;
 	} else {
 		/* Caller must check execute permissions on the starting path component */
 		struct fd f = fdget_raw(nd->dfd);
@@ -2235,8 +2355,19 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
 			nd->inode = nd->path.dentry->d_inode;
 		}
 		fdput(f);
-		return s;
 	}
+
+	/* For scoped-lookups we need to set the root to the dirfd as well. */
+	if (flags & LOOKUP_IS_SCOPED) {
+		nd->root = nd->path;
+		if (flags & LOOKUP_RCU) {
+			nd->root_seq = nd->seq;
+		} else {
+			path_get(&nd->root);
+			nd->flags |= LOOKUP_ROOT_GRABBED;
+		}
+	}
+	return s;
 }
 
 static const char *trailing_symlink(struct nameidata *nd)
@@ -3202,8 +3333,8 @@ static int do_last(struct nameidata *nd,
 		   struct file *file, const struct open_flags *op)
 {
 	struct dentry *dir = nd->path.dentry;
-	kuid_t dir_uid = dir->d_inode->i_uid;
-	umode_t dir_mode = dir->d_inode->i_mode;
+	kuid_t dir_uid = nd->inode->i_uid;
+	umode_t dir_mode = nd->inode->i_mode;
 	int open_flag = op->open_flag;
 	bool will_truncate = (open_flag & O_TRUNC) != 0;
 	bool got_write = false;
diff --git a/fs/namespace.c b/fs/namespace.c
index 5e1bf61..85b5f7b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2979,39 +2979,10 @@ static void shrink_submounts(struct mount *mnt)
 	}
 }
 
-/*
- * Some copy_from_user() implementations do not return the exact number of
- * bytes remaining to copy on a fault.  But copy_mount_options() requires that.
- * Note that this function differs from copy_from_user() in that it will oops
- * on bad values of `to', rather than returning a short copy.
- */
-static long exact_copy_from_user(void *to, const void __user * from,
-				 unsigned long n)
-{
-	char *t = to;
-	const char __user *f = from;
-	char c;
-
-	if (!access_ok(from, n))
-		return n;
-
-	while (n) {
-		if (__get_user(c, f)) {
-			memset(t, 0, n);
-			break;
-		}
-		*t++ = c;
-		f++;
-		n--;
-	}
-	return n;
-}
-
 void *copy_mount_options(const void __user * data)
 {
-	int i;
-	unsigned long size;
 	char *copy;
+	unsigned size;
 
 	if (!data)
 		return NULL;
@@ -3020,22 +2991,16 @@ void *copy_mount_options(const void __user * data)
 	if (!copy)
 		return ERR_PTR(-ENOMEM);
 
-	/* We only care that *some* data at the address the user
-	 * gave us is valid.  Just in case, we'll zero
-	 * the remainder of the page.
-	 */
-	/* copy_from_user cannot cross TASK_SIZE ! */
-	size = TASK_SIZE - (unsigned long)untagged_addr(data);
-	if (size > PAGE_SIZE)
-		size = PAGE_SIZE;
+	size = PAGE_SIZE - offset_in_page(data);
 
-	i = size - exact_copy_from_user(copy, data, size);
-	if (!i) {
+	if (copy_from_user(copy, data, size)) {
 		kfree(copy);
 		return ERR_PTR(-EFAULT);
 	}
-	if (i != PAGE_SIZE)
-		memset(copy + i, 0, PAGE_SIZE - i);
+	if (size != PAGE_SIZE) {
+		if (copy_from_user(copy + size, data + size, PAGE_SIZE - size))
+			memset(copy + size, 0, PAGE_SIZE - size);
+	}
 	return copy;
 }
 
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 295a7a2..40b6c5a 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -90,7 +90,7 @@
 config NFS_SWAP
 	bool "Provide swap over NFS support"
 	default n
-	depends on NFS_FS
+	depends on NFS_FS && SWAP
 	select SUNRPC_SWAP
 	help
 	  This option enables swapon to work on files located on NFS mounts.
@@ -196,3 +196,12 @@
 	depends on NFS_FS && SUNRPC_DEBUG
 	select CRC32
 	default y
+
+config NFS_DISABLE_UDP_SUPPORT
+       bool "NFS: Disable NFS UDP protocol support"
+       depends on NFS_FS
+       default y
+       help
+	 Choose Y here to disable the use of NFS over UDP. NFS over UDP
+	 on modern networks (1Gb+) can lead to data corruption caused by
+	 fragmentation during high loads.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 34cdeae..2433c3e 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -9,7 +9,7 @@
 nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o \
 			   io.o direct.o pagelist.o read.o symlink.o unlink.o \
 			   write.o namespace.o mount_clnt.o nfstrace.o \
-			   export.o sysfs.o
+			   export.o sysfs.o fs_context.o
 nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
 nfs-$(CONFIG_SYSCTL)	+= sysctl.o
 nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 03a20f5..79ff172 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -18,6 +18,7 @@
 #include "callback.h"
 #include "internal.h"
 #include "nfs4session.h"
+#include "nfs4trace.h"
 
 #define CB_OP_TAGLEN_MAXSZ		(512)
 #define CB_OP_HDR_RES_MAXSZ		(2 * 4) // opcode, status
@@ -946,9 +947,13 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
 
 	if (hdr_arg.minorversion == 0) {
 		cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
-		if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
-			if (cps.clp)
-				nfs_put_client(cps.clp);
+		if (!cps.clp) {
+			trace_nfs_cb_no_clp(rqstp->rq_xid, hdr_arg.cb_ident);
+			goto out_invalidcred;
+		}
+		if (!check_gss_callback_principal(cps.clp, rqstp)) {
+			trace_nfs_cb_badprinc(rqstp->rq_xid, hdr_arg.cb_ident);
+			nfs_put_client(cps.clp);
 			goto out_invalidcred;
 		}
 	}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 02110a3..989c30c 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -474,6 +474,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
 			to->to_maxval = to->to_initval;
 		to->to_exponential = 0;
 		break;
+#ifndef CONFIG_NFS_DISABLE_UDP_SUPPORT
 	case XPRT_TRANSPORT_UDP:
 		if (retrans == NFS_UNSPEC_RETRANS)
 			to->to_retries = NFS_DEF_UDP_RETRANS;
@@ -484,6 +485,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
 		to->to_maxval = NFS_MAX_UDP_TIMEOUT;
 		to->to_exponential = 1;
 		break;
+#endif
 	default:
 		BUG();
 	}
@@ -580,8 +582,10 @@ static int nfs_start_lockd(struct nfs_server *server)
 		default:
 			nlm_init.protocol = IPPROTO_TCP;
 			break;
+#ifndef CONFIG_NFS_DISABLE_UDP_SUPPORT
 		case XPRT_TRANSPORT_UDP:
 			nlm_init.protocol = IPPROTO_UDP;
+#endif
 	}
 
 	host = nlmclnt_init(&nlm_init);
@@ -658,28 +662,28 @@ EXPORT_SYMBOL_GPL(nfs_init_client);
  * Create a version 2 or 3 client
  */
 static int nfs_init_server(struct nfs_server *server,
-			   const struct nfs_parsed_mount_data *data,
-			   struct nfs_subversion *nfs_mod)
+			   const struct fs_context *fc)
 {
+	const struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	struct rpc_timeout timeparms;
 	struct nfs_client_initdata cl_init = {
-		.hostname = data->nfs_server.hostname,
-		.addr = (const struct sockaddr *)&data->nfs_server.address,
-		.addrlen = data->nfs_server.addrlen,
-		.nfs_mod = nfs_mod,
-		.proto = data->nfs_server.protocol,
-		.net = data->net,
+		.hostname = ctx->nfs_server.hostname,
+		.addr = (const struct sockaddr *)&ctx->nfs_server.address,
+		.addrlen = ctx->nfs_server.addrlen,
+		.nfs_mod = ctx->nfs_mod,
+		.proto = ctx->nfs_server.protocol,
+		.net = fc->net_ns,
 		.timeparms = &timeparms,
 		.cred = server->cred,
-		.nconnect = data->nfs_server.nconnect,
+		.nconnect = ctx->nfs_server.nconnect,
 		.init_flags = (1UL << NFS_CS_REUSEPORT),
 	};
 	struct nfs_client *clp;
 	int error;
 
-	nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
-			data->timeo, data->retrans);
-	if (data->flags & NFS_MOUNT_NORESVPORT)
+	nfs_init_timeout_values(&timeparms, ctx->nfs_server.protocol,
+				ctx->timeo, ctx->retrans);
+	if (ctx->flags & NFS_MOUNT_NORESVPORT)
 		set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
 
 	/* Allocate or find a client reference we can use */
@@ -690,46 +694,46 @@ static int nfs_init_server(struct nfs_server *server,
 	server->nfs_client = clp;
 
 	/* Initialise the client representation from the mount data */
-	server->flags = data->flags;
-	server->options = data->options;
+	server->flags = ctx->flags;
+	server->options = ctx->options;
 	server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
 		NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
 		NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
 
-	if (data->rsize)
-		server->rsize = nfs_block_size(data->rsize, NULL);
-	if (data->wsize)
-		server->wsize = nfs_block_size(data->wsize, NULL);
+	if (ctx->rsize)
+		server->rsize = nfs_block_size(ctx->rsize, NULL);
+	if (ctx->wsize)
+		server->wsize = nfs_block_size(ctx->wsize, NULL);
 
-	server->acregmin = data->acregmin * HZ;
-	server->acregmax = data->acregmax * HZ;
-	server->acdirmin = data->acdirmin * HZ;
-	server->acdirmax = data->acdirmax * HZ;
+	server->acregmin = ctx->acregmin * HZ;
+	server->acregmax = ctx->acregmax * HZ;
+	server->acdirmin = ctx->acdirmin * HZ;
+	server->acdirmax = ctx->acdirmax * HZ;
 
 	/* Start lockd here, before we might error out */
 	error = nfs_start_lockd(server);
 	if (error < 0)
 		goto error;
 
-	server->port = data->nfs_server.port;
-	server->auth_info = data->auth_info;
+	server->port = ctx->nfs_server.port;
+	server->auth_info = ctx->auth_info;
 
 	error = nfs_init_server_rpcclient(server, &timeparms,
-					  data->selected_flavor);
+					  ctx->selected_flavor);
 	if (error < 0)
 		goto error;
 
 	/* Preserve the values of mount_server-related mount options */
-	if (data->mount_server.addrlen) {
-		memcpy(&server->mountd_address, &data->mount_server.address,
-			data->mount_server.addrlen);
-		server->mountd_addrlen = data->mount_server.addrlen;
+	if (ctx->mount_server.addrlen) {
+		memcpy(&server->mountd_address, &ctx->mount_server.address,
+			ctx->mount_server.addrlen);
+		server->mountd_addrlen = ctx->mount_server.addrlen;
 	}
-	server->mountd_version = data->mount_server.version;
-	server->mountd_port = data->mount_server.port;
-	server->mountd_protocol = data->mount_server.protocol;
+	server->mountd_version = ctx->mount_server.version;
+	server->mountd_port = ctx->mount_server.port;
+	server->mountd_protocol = ctx->mount_server.protocol;
 
-	server->namelen  = data->namlen;
+	server->namelen  = ctx->namlen;
 	return 0;
 
 error:
@@ -951,9 +955,9 @@ EXPORT_SYMBOL_GPL(nfs_free_server);
  * Create a version 2 or 3 volume record
  * - keyed on server and FSID
  */
-struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info,
-				     struct nfs_subversion *nfs_mod)
+struct nfs_server *nfs_create_server(struct fs_context *fc)
 {
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	struct nfs_server *server;
 	struct nfs_fattr *fattr;
 	int error;
@@ -970,18 +974,18 @@ struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info,
 		goto error;
 
 	/* Get a client representation */
-	error = nfs_init_server(server, mount_info->parsed, nfs_mod);
+	error = nfs_init_server(server, fc);
 	if (error < 0)
 		goto error;
 
 	/* Probe the root fh to retrieve its FSID */
-	error = nfs_probe_fsinfo(server, mount_info->mntfh, fattr);
+	error = nfs_probe_fsinfo(server, ctx->mntfh, fattr);
 	if (error < 0)
 		goto error;
 	if (server->nfs_client->rpc_ops->version == 3) {
 		if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
 			server->namelen = NFS3_MAXNAMLEN;
-		if (!(mount_info->parsed->flags & NFS_MOUNT_NORDIRPLUS))
+		if (!(ctx->flags & NFS_MOUNT_NORDIRPLUS))
 			server->caps |= NFS_CAP_READDIRPLUS;
 	} else {
 		if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
@@ -989,8 +993,8 @@ struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info,
 	}
 
 	if (!(fattr->valid & NFS_ATTR_FATTR)) {
-		error = nfs_mod->rpc_ops->getattr(server, mount_info->mntfh,
-				fattr, NULL, NULL);
+		error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh,
+						       fattr, NULL, NULL);
 		if (error < 0) {
 			dprintk("nfs_create_server: getattr error = %d\n", -error);
 			goto error;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index fe57b2b..4a84107 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -25,13 +25,32 @@
 #include "internal.h"
 #include "nfs4trace.h"
 
-static void nfs_free_delegation(struct nfs_delegation *delegation)
+#define NFS_DEFAULT_DELEGATION_WATERMARK (5000U)
+
+static atomic_long_t nfs_active_delegations;
+static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK;
+
+static void __nfs_free_delegation(struct nfs_delegation *delegation)
 {
 	put_cred(delegation->cred);
 	delegation->cred = NULL;
 	kfree_rcu(delegation, rcu);
 }
 
+static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
+{
+	if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+		delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
+		atomic_long_dec(&nfs_active_delegations);
+	}
+}
+
+static void nfs_free_delegation(struct nfs_delegation *delegation)
+{
+	nfs_mark_delegation_revoked(delegation);
+	__nfs_free_delegation(delegation);
+}
+
 /**
  * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
  * @delegation: delegation to process
@@ -343,7 +362,8 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
 		delegation->stateid.seqid = update->stateid.seqid;
 		smp_wmb();
 		delegation->type = update->type;
-		clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
+		if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+			atomic_long_inc(&nfs_active_delegations);
 	}
 }
 
@@ -423,6 +443,8 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
 	rcu_assign_pointer(nfsi->delegation, delegation);
 	delegation = NULL;
 
+	atomic_long_inc(&nfs_active_delegations);
+
 	trace_nfs4_set_delegation(inode, type);
 
 	spin_lock(&inode->i_lock);
@@ -432,7 +454,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
 out:
 	spin_unlock(&clp->cl_lock);
 	if (delegation != NULL)
-		nfs_free_delegation(delegation);
+		__nfs_free_delegation(delegation);
 	if (freeme != NULL) {
 		nfs_do_return_delegation(inode, freeme, 0);
 		nfs_free_delegation(freeme);
@@ -479,7 +501,7 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
 
 	if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
 		ret = true;
-	if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
+	else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
 		struct inode *inode;
 
 		spin_lock(&delegation->lock);
@@ -488,6 +510,8 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
 			ret = true;
 		spin_unlock(&delegation->lock);
 	}
+	if (ret)
+		clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
 	if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
 	    test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
 		ret = false;
@@ -607,6 +631,7 @@ void nfs_inode_evict_delegation(struct inode *inode)
 
 	delegation = nfs_inode_detach_delegation(inode);
 	if (delegation != NULL) {
+		set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
 		set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
 		nfs_do_return_delegation(inode, delegation, 1);
 		nfs_free_delegation(delegation);
@@ -637,6 +662,40 @@ int nfs4_inode_return_delegation(struct inode *inode)
 }
 
 /**
+ * nfs_inode_return_delegation_on_close - asynchronously return a delegation
+ * @inode: inode to process
+ *
+ * This routine is called on file close in order to determine if the
+ * inode delegation needs to be returned immediately.
+ */
+void nfs4_inode_return_delegation_on_close(struct inode *inode)
+{
+	struct nfs_delegation *delegation;
+	struct nfs_delegation *ret = NULL;
+
+	if (!inode)
+		return;
+	rcu_read_lock();
+	delegation = nfs4_get_valid_delegation(inode);
+	if (!delegation)
+		goto out;
+	if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
+	    atomic_long_read(&nfs_active_delegations) >= nfs_delegation_watermark) {
+		spin_lock(&delegation->lock);
+		if (delegation->inode &&
+		    list_empty(&NFS_I(inode)->open_files) &&
+		    !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+			clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+			ret = delegation;
+		}
+		spin_unlock(&delegation->lock);
+	}
+out:
+	rcu_read_unlock();
+	nfs_end_delegation_return(inode, ret, 0);
+}
+
+/**
  * nfs4_inode_make_writeable
  * @inode: pointer to inode
  *
@@ -760,13 +819,6 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
 	rcu_read_unlock();
 }
 
-static void nfs_mark_delegation_revoked(struct nfs_server *server,
-		struct nfs_delegation *delegation)
-{
-	set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
-	delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
-}
-
 static void nfs_revoke_delegation(struct inode *inode,
 		const nfs4_stateid *stateid)
 {
@@ -794,7 +846,7 @@ static void nfs_revoke_delegation(struct inode *inode,
 		}
 		spin_unlock(&delegation->lock);
 	}
-	nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
+	nfs_mark_delegation_revoked(delegation);
 	ret = true;
 out:
 	rcu_read_unlock();
@@ -833,7 +885,7 @@ void nfs_delegation_mark_returned(struct inode *inode,
 			delegation->stateid.seqid = stateid->seqid;
 	}
 
-	nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
+	nfs_mark_delegation_revoked(delegation);
 
 out_clear_returning:
 	clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
@@ -1317,3 +1369,5 @@ bool nfs4_delegation_flush_on_close(const struct inode *inode)
 	rcu_read_unlock();
 	return ret;
 }
+
+module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 15d3484..31b8460 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -42,6 +42,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
 void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
 		fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit);
 int nfs4_inode_return_delegation(struct inode *inode);
+void nfs4_inode_return_delegation_on_close(struct inode *inode);
 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
 void nfs_inode_evict_delegation(struct inode *inode);
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e180033..1320288 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -58,7 +58,7 @@ static void nfs_readdir_clear_array(struct page*);
 const struct file_operations nfs_dir_operations = {
 	.llseek		= nfs_llseek_dir,
 	.read		= generic_read_dir,
-	.iterate	= nfs_readdir,
+	.iterate_shared	= nfs_readdir,
 	.open		= nfs_opendir,
 	.release	= nfs_closedir,
 	.fsync		= nfs_fsync_dir,
@@ -162,6 +162,17 @@ typedef struct {
 	bool eof;
 } nfs_readdir_descriptor_t;
 
+static
+void nfs_readdir_init_array(struct page *page)
+{
+	struct nfs_cache_array *array;
+
+	array = kmap_atomic(page);
+	memset(array, 0, sizeof(struct nfs_cache_array));
+	array->eof_index = -1;
+	kunmap_atomic(array);
+}
+
 /*
  * we are freeing strings created by nfs_add_to_readdir_array()
  */
@@ -174,6 +185,7 @@ void nfs_readdir_clear_array(struct page *page)
 	array = kmap_atomic(page);
 	for (i = 0; i < array->size; i++)
 		kfree(array->array[i].string.name);
+	array->size = 0;
 	kunmap_atomic(array);
 }
 
@@ -186,7 +198,7 @@ static
 int nfs_readdir_make_qstr(struct qstr *string, const char *name, unsigned int len)
 {
 	string->len = len;
-	string->name = kmemdup(name, len, GFP_KERNEL);
+	string->name = kmemdup_nul(name, len, GFP_KERNEL);
 	if (string->name == NULL)
 		return -ENOMEM;
 	/*
@@ -437,7 +449,8 @@ void nfs_force_use_readdirplus(struct inode *dir)
 	if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
 	    !list_empty(&nfsi->open_files)) {
 		set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
-		invalidate_mapping_pages(dir->i_mapping, 0, -1);
+		invalidate_mapping_pages(dir->i_mapping,
+			nfsi->page_index + 1, -1);
 	}
 }
 
@@ -610,6 +623,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
 	int status = -ENOMEM;
 	unsigned int array_size = ARRAY_SIZE(pages);
 
+	nfs_readdir_init_array(page);
+
 	entry.prev_cookie = 0;
 	entry.cookie = desc->last_cookie;
 	entry.eof = 0;
@@ -626,8 +641,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
 	}
 
 	array = kmap(page);
-	memset(array, 0, sizeof(struct nfs_cache_array));
-	array->eof_index = -1;
 
 	status = nfs_readdir_alloc_pages(pages, array_size);
 	if (status < 0)
@@ -682,6 +695,7 @@ int nfs_readdir_filler(void *data, struct page* page)
 	unlock_page(page);
 	return 0;
  error:
+	nfs_readdir_clear_array(page);
 	unlock_page(page);
 	return ret;
 }
@@ -689,8 +703,6 @@ int nfs_readdir_filler(void *data, struct page* page)
 static
 void cache_page_release(nfs_readdir_descriptor_t *desc)
 {
-	if (!desc->page->mapping)
-		nfs_readdir_clear_array(desc->page);
 	put_page(desc->page);
 	desc->page = NULL;
 }
@@ -704,19 +716,32 @@ struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
 
 /*
  * Returns 0 if desc->dir_cookie was found on page desc->page_index
+ * and locks the page to prevent removal from the page cache.
  */
 static
-int find_cache_page(nfs_readdir_descriptor_t *desc)
+int find_and_lock_cache_page(nfs_readdir_descriptor_t *desc)
 {
+	struct inode *inode = file_inode(desc->file);
+	struct nfs_inode *nfsi = NFS_I(inode);
 	int res;
 
 	desc->page = get_cache_page(desc);
 	if (IS_ERR(desc->page))
 		return PTR_ERR(desc->page);
-
-	res = nfs_readdir_search_array(desc);
+	res = lock_page_killable(desc->page);
 	if (res != 0)
-		cache_page_release(desc);
+		goto error;
+	res = -EAGAIN;
+	if (desc->page->mapping != NULL) {
+		res = nfs_readdir_search_array(desc);
+		if (res == 0) {
+			nfsi->page_index = desc->page_index;
+			return 0;
+		}
+	}
+	unlock_page(desc->page);
+error:
+	cache_page_release(desc);
 	return res;
 }
 
@@ -731,7 +756,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
 		desc->last_cookie = 0;
 	}
 	do {
-		res = find_cache_page(desc);
+		res = find_and_lock_cache_page(desc);
 	} while (res == -EAGAIN);
 	return res;
 }
@@ -770,7 +795,6 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
 		desc->eof = true;
 
 	kunmap(desc->page);
-	cache_page_release(desc);
 	dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
 			(unsigned long long)*desc->dir_cookie, res);
 	return res;
@@ -816,13 +840,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
 
 	status = nfs_do_filldir(desc);
 
+ out_release:
+	nfs_readdir_clear_array(desc->page);
+	cache_page_release(desc);
  out:
 	dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
 			__func__, status);
 	return status;
- out_release:
-	cache_page_release(desc);
-	goto out;
 }
 
 /* The file offset position represents the dirent entry number.  A
@@ -887,6 +911,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
 			break;
 
 		res = nfs_do_filldir(desc);
+		unlock_page(desc->page);
+		cache_page_release(desc);
 		if (res < 0)
 			break;
 	} while (!desc->eof);
@@ -1142,10 +1168,17 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
 	if (fhandle == NULL || fattr == NULL || IS_ERR(label))
 		goto out;
 
-	ret = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+	ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
 	if (ret < 0) {
-		if (ret == -ESTALE || ret == -ENOENT)
+		switch (ret) {
+		case -ESTALE:
+		case -ENOENT:
 			ret = 0;
+			break;
+		case -ETIMEDOUT:
+			if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
+				ret = 1;
+		}
 		goto out;
 	}
 	ret = 0;
@@ -1408,7 +1441,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
 		goto out;
 
 	trace_nfs_lookup_enter(dir, dentry, flags);
-	error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+	error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
 	if (error == -ENOENT)
 		goto no_entry;
 	if (error < 0) {
@@ -1683,7 +1716,7 @@ nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
 	d_drop(dentry);
 
 	if (fhandle->size == 0) {
-		error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, NULL);
+		error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, NULL);
 		if (error)
 			goto out_error;
 	}
@@ -2312,11 +2345,11 @@ static int nfs_access_get_cached(struct inode *inode, const struct cred *cred, s
 		/* Found an entry, is our attribute cache valid? */
 		if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
 			break;
+		if (!retry)
+			break;
 		err = -ECHILD;
 		if (!may_block)
 			goto out;
-		if (!retry)
-			goto out_zap;
 		spin_unlock(&inode->i_lock);
 		err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
 		if (err)
@@ -2353,7 +2386,7 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre
 	lh = rcu_dereference(nfsi->access_cache_entry_lru.prev);
 	cache = list_entry(lh, struct nfs_access_entry, lru);
 	if (lh == &nfsi->access_cache_entry_lru ||
-	    cred != cache->cred)
+	    cred_fscmp(cred, cache->cred) != 0)
 		cache = NULL;
 	if (cache == NULL)
 		goto out;
@@ -2476,7 +2509,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
 {
 	struct nfs_access_entry cache;
 	bool may_block = (mask & MAY_NOT_BLOCK) == 0;
-	int cache_mask;
+	int cache_mask = -1;
 	int status;
 
 	trace_nfs_access_enter(inode);
@@ -2515,7 +2548,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
 	if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0)
 		status = -EACCES;
 out:
-	trace_nfs_access_exit(inode, status);
+	trace_nfs_access_exit(inode, mask, cache_mask, status);
 	return status;
 }
 
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 040a50f..b768a0b 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -245,10 +245,10 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
 					 data->ds_commit_index);
 
 	/* verifier not set so always fail */
-	if (verfp->committed < 0)
+	if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
 		return 1;
 
-	return nfs_direct_cmp_verf(verfp, &data->verf);
+	return nfs_direct_cmp_verf(verfp, data->res.verf);
 }
 
 /**
@@ -824,7 +824,8 @@ static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
 		/* fake unstable write to let common nfs resend pages */
 		hdr->verf.committed = NFS_UNSTABLE;
-		hdr->good_bytes = hdr->args.count;
+		hdr->good_bytes = hdr->args.offset + hdr->args.count -
+			hdr->io_start;
 	}
 	spin_unlock(&dreq->lock);
 }
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index aec769a..89bd558 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -93,7 +93,7 @@ static void nfs_dns_ent_init(struct cache_head *cnew,
 	key = container_of(ckey, struct nfs_dns_ent, h);
 
 	kfree(new->hostname);
-	new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
+	new->hostname = kmemdup_nul(key->hostname, key->namelen, GFP_KERNEL);
 	if (new->hostname) {
 		new->namelen = key->namelen;
 		nfs_dns_ent_update(cnew, ckey);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 8eb731d..f96367a 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -204,44 +204,39 @@ EXPORT_SYMBOL_GPL(nfs_file_mmap);
 static int
 nfs_file_fsync_commit(struct file *file, int datasync)
 {
-	struct nfs_open_context *ctx = nfs_file_open_context(file);
 	struct inode *inode = file_inode(file);
-	int do_resend, status;
-	int ret = 0;
+	int ret;
 
 	dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
 
 	nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
-	do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
-	status = nfs_commit_inode(inode, FLUSH_SYNC);
-	if (status == 0)
-		status = file_check_and_advance_wb_err(file);
-	if (status < 0) {
-		ret = status;
-		goto out;
-	}
-	do_resend |= test_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
-	if (do_resend)
-		ret = -EAGAIN;
-out:
-	return ret;
+	ret = nfs_commit_inode(inode, FLUSH_SYNC);
+	if (ret < 0)
+		return ret;
+	return file_check_and_advance_wb_err(file);
 }
 
 int
 nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-	int ret;
+	struct nfs_open_context *ctx = nfs_file_open_context(file);
 	struct inode *inode = file_inode(file);
+	int ret;
 
 	trace_nfs_fsync_enter(inode);
 
-	do {
+	for (;;) {
 		ret = file_write_and_wait_range(file, start, end);
 		if (ret != 0)
 			break;
 		ret = nfs_file_fsync_commit(file, datasync);
-		if (!ret)
-			ret = pnfs_sync_inode(inode, !!datasync);
+		if (ret != 0)
+			break;
+		ret = pnfs_sync_inode(inode, !!datasync);
+		if (ret != 0)
+			break;
+		if (!test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags))
+			break;
 		/*
 		 * If nfs_file_fsync_commit detected a server reboot, then
 		 * resend all dirty pages that might have been covered by
@@ -249,7 +244,7 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 		 */
 		start = 0;
 		end = LLONG_MAX;
-	} while (ret == -EAGAIN);
+	}
 
 	trace_nfs_fsync_exit(inode, ret);
 	return ret;
@@ -489,7 +484,19 @@ static int nfs_launder_page(struct page *page)
 static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
 						sector_t *span)
 {
+	unsigned long blocks;
+	long long isize;
 	struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
+	struct inode *inode = file->f_mapping->host;
+
+	spin_lock(&inode->i_lock);
+	blocks = inode->i_blocks;
+	isize = inode->i_size;
+	spin_unlock(&inode->i_lock);
+	if (blocks*512 < isize) {
+		pr_warn("swap activate: swapfile has holes\n");
+		return -EINVAL;
+	}
 
 	*span = sis->pages;
 
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 5657b7f..bb9148b 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1266,9 +1266,10 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
 
 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
 					int idx, u64 offset, u64 length,
-					u32 status, int opnum, int error)
+					u32 *op_status, int opnum, int error)
 {
 	struct nfs4_ff_layout_mirror *mirror;
+	u32 status = *op_status;
 	int err;
 
 	if (status == 0) {
@@ -1286,10 +1287,10 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
 		case -ENOBUFS:
 		case -EPIPE:
 		case -EPERM:
-			status = NFS4ERR_NXIO;
+			*op_status = status = NFS4ERR_NXIO;
 			break;
 		case -EACCES:
-			status = NFS4ERR_ACCESS;
+			*op_status = status = NFS4ERR_ACCESS;
 			break;
 		default:
 			return;
@@ -1321,16 +1322,19 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
 	int new_idx = hdr->pgio_mirror_idx;
 	int err;
 
-	trace_nfs4_pnfs_read(hdr, task->tk_status);
-	if (task->tk_status < 0)
+	if (task->tk_status < 0) {
 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
 					    hdr->args.offset, hdr->args.count,
-					    hdr->res.op_status, OP_READ,
+					    &hdr->res.op_status, OP_READ,
 					    task->tk_status);
+		trace_ff_layout_read_error(hdr);
+	}
+
 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
 					   hdr->ds_clp, hdr->lseg,
 					   hdr->pgio_mirror_idx);
 
+	trace_nfs4_pnfs_read(hdr, err);
 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
 	switch (err) {
@@ -1494,16 +1498,19 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
 	loff_t end_offs = 0;
 	int err;
 
-	trace_nfs4_pnfs_write(hdr, task->tk_status);
-	if (task->tk_status < 0)
+	if (task->tk_status < 0) {
 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
 					    hdr->args.offset, hdr->args.count,
-					    hdr->res.op_status, OP_WRITE,
+					    &hdr->res.op_status, OP_WRITE,
 					    task->tk_status);
+		trace_ff_layout_write_error(hdr);
+	}
+
 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
 					   hdr->ds_clp, hdr->lseg,
 					   hdr->pgio_mirror_idx);
 
+	trace_nfs4_pnfs_write(hdr, err);
 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
 	switch (err) {
@@ -1537,15 +1544,18 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
 {
 	int err;
 
-	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
-	if (task->tk_status < 0)
+	if (task->tk_status < 0) {
 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
 					    data->args.offset, data->args.count,
-					    data->res.op_status, OP_COMMIT,
+					    &data->res.op_status, OP_COMMIT,
 					    task->tk_status);
+		trace_ff_layout_commit_error(data);
+	}
+
 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
 					   data->lseg, data->ds_commit_index);
 
+	trace_nfs4_pnfs_commit_ds(data, err);
 	switch (err) {
 	case -NFS4ERR_RESET_TO_PNFS:
 		pnfs_generic_prepare_to_resend_writes(data);
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
new file mode 100644
index 0000000..e1b9384
--- /dev/null
+++ b/fs/nfs/fs_context.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/fs/nfs/fs_context.c
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ * Conversion to new mount api Copyright (C) David Howells
+ *
+ * NFS mount handling.
+ *
+ * Split from fs/nfs/super.c by David Howells <dhowells@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include "nfs.h"
+#include "internal.h"
+
+#define NFSDBG_FACILITY		NFSDBG_MOUNT
+
+#if IS_ENABLED(CONFIG_NFS_V3)
+#define NFS_DEFAULT_VERSION 3
+#else
+#define NFS_DEFAULT_VERSION 2
+#endif
+
+#define NFS_MAX_CONNECTIONS 16
+
+enum nfs_param {
+	Opt_ac,
+	Opt_acdirmax,
+	Opt_acdirmin,
+	Opt_acl,
+	Opt_acregmax,
+	Opt_acregmin,
+	Opt_actimeo,
+	Opt_addr,
+	Opt_bg,
+	Opt_bsize,
+	Opt_clientaddr,
+	Opt_cto,
+	Opt_fg,
+	Opt_fscache,
+	Opt_fscache_flag,
+	Opt_hard,
+	Opt_intr,
+	Opt_local_lock,
+	Opt_lock,
+	Opt_lookupcache,
+	Opt_migration,
+	Opt_minorversion,
+	Opt_mountaddr,
+	Opt_mounthost,
+	Opt_mountport,
+	Opt_mountproto,
+	Opt_mountvers,
+	Opt_namelen,
+	Opt_nconnect,
+	Opt_port,
+	Opt_posix,
+	Opt_proto,
+	Opt_rdirplus,
+	Opt_rdma,
+	Opt_resvport,
+	Opt_retrans,
+	Opt_retry,
+	Opt_rsize,
+	Opt_sec,
+	Opt_sharecache,
+	Opt_sloppy,
+	Opt_soft,
+	Opt_softerr,
+	Opt_softreval,
+	Opt_source,
+	Opt_tcp,
+	Opt_timeo,
+	Opt_udp,
+	Opt_v,
+	Opt_vers,
+	Opt_wsize,
+};
+
+enum {
+	Opt_local_lock_all,
+	Opt_local_lock_flock,
+	Opt_local_lock_none,
+	Opt_local_lock_posix,
+};
+
+static const struct constant_table nfs_param_enums_local_lock[] = {
+	{ "all",		Opt_local_lock_all },
+	{ "flock",	Opt_local_lock_flock },
+	{ "none",		Opt_local_lock_none },
+	{}
+};
+
+enum {
+	Opt_lookupcache_all,
+	Opt_lookupcache_none,
+	Opt_lookupcache_positive,
+};
+
+static const struct constant_table nfs_param_enums_lookupcache[] = {
+	{ "all",		Opt_lookupcache_all },
+	{ "none",		Opt_lookupcache_none },
+	{ "pos",		Opt_lookupcache_positive },
+	{ "positive",		Opt_lookupcache_positive },
+	{}
+};
+
+static const struct fs_parameter_spec nfs_fs_parameters[] = {
+	fsparam_flag_no("ac",		Opt_ac),
+	fsparam_u32   ("acdirmax",	Opt_acdirmax),
+	fsparam_u32   ("acdirmin",	Opt_acdirmin),
+	fsparam_flag_no("acl",		Opt_acl),
+	fsparam_u32   ("acregmax",	Opt_acregmax),
+	fsparam_u32   ("acregmin",	Opt_acregmin),
+	fsparam_u32   ("actimeo",	Opt_actimeo),
+	fsparam_string("addr",		Opt_addr),
+	fsparam_flag  ("bg",		Opt_bg),
+	fsparam_u32   ("bsize",		Opt_bsize),
+	fsparam_string("clientaddr",	Opt_clientaddr),
+	fsparam_flag_no("cto",		Opt_cto),
+	fsparam_flag  ("fg",		Opt_fg),
+	fsparam_flag_no("fsc",		Opt_fscache_flag),
+	fsparam_string("fsc",		Opt_fscache),
+	fsparam_flag  ("hard",		Opt_hard),
+	__fsparam(NULL, "intr",		Opt_intr,
+		  fs_param_neg_with_no|fs_param_deprecated, NULL),
+	fsparam_enum  ("local_lock",	Opt_local_lock, nfs_param_enums_local_lock),
+	fsparam_flag_no("lock",		Opt_lock),
+	fsparam_enum  ("lookupcache",	Opt_lookupcache, nfs_param_enums_lookupcache),
+	fsparam_flag_no("migration",	Opt_migration),
+	fsparam_u32   ("minorversion",	Opt_minorversion),
+	fsparam_string("mountaddr",	Opt_mountaddr),
+	fsparam_string("mounthost",	Opt_mounthost),
+	fsparam_u32   ("mountport",	Opt_mountport),
+	fsparam_string("mountproto",	Opt_mountproto),
+	fsparam_u32   ("mountvers",	Opt_mountvers),
+	fsparam_u32   ("namlen",	Opt_namelen),
+	fsparam_u32   ("nconnect",	Opt_nconnect),
+	fsparam_string("nfsvers",	Opt_vers),
+	fsparam_u32   ("port",		Opt_port),
+	fsparam_flag_no("posix",	Opt_posix),
+	fsparam_string("proto",		Opt_proto),
+	fsparam_flag_no("rdirplus",	Opt_rdirplus),
+	fsparam_flag  ("rdma",		Opt_rdma),
+	fsparam_flag_no("resvport",	Opt_resvport),
+	fsparam_u32   ("retrans",	Opt_retrans),
+	fsparam_string("retry",		Opt_retry),
+	fsparam_u32   ("rsize",		Opt_rsize),
+	fsparam_string("sec",		Opt_sec),
+	fsparam_flag_no("sharecache",	Opt_sharecache),
+	fsparam_flag  ("sloppy",	Opt_sloppy),
+	fsparam_flag  ("soft",		Opt_soft),
+	fsparam_flag  ("softerr",	Opt_softerr),
+	fsparam_flag  ("softreval",	Opt_softreval),
+	fsparam_string("source",	Opt_source),
+	fsparam_flag  ("tcp",		Opt_tcp),
+	fsparam_u32   ("timeo",		Opt_timeo),
+	fsparam_flag  ("udp",		Opt_udp),
+	fsparam_flag  ("v2",		Opt_v),
+	fsparam_flag  ("v3",		Opt_v),
+	fsparam_flag  ("v4",		Opt_v),
+	fsparam_flag  ("v4.0",		Opt_v),
+	fsparam_flag  ("v4.1",		Opt_v),
+	fsparam_flag  ("v4.2",		Opt_v),
+	fsparam_string("vers",		Opt_vers),
+	fsparam_u32   ("wsize",		Opt_wsize),
+	{}
+};
+
+enum {
+	Opt_vers_2,
+	Opt_vers_3,
+	Opt_vers_4,
+	Opt_vers_4_0,
+	Opt_vers_4_1,
+	Opt_vers_4_2,
+};
+
+static const struct constant_table nfs_vers_tokens[] = {
+	{ "2",		Opt_vers_2 },
+	{ "3",		Opt_vers_3 },
+	{ "4",		Opt_vers_4 },
+	{ "4.0",	Opt_vers_4_0 },
+	{ "4.1",	Opt_vers_4_1 },
+	{ "4.2",	Opt_vers_4_2 },
+};
+
+enum {
+	Opt_xprt_rdma,
+	Opt_xprt_rdma6,
+	Opt_xprt_tcp,
+	Opt_xprt_tcp6,
+	Opt_xprt_udp,
+	Opt_xprt_udp6,
+	nr__Opt_xprt
+};
+
+static const struct constant_table nfs_xprt_protocol_tokens[nr__Opt_xprt] = {
+	{ "rdma",	Opt_xprt_rdma },
+	{ "rdma6",	Opt_xprt_rdma6 },
+	{ "tcp",	Opt_xprt_tcp },
+	{ "tcp6",	Opt_xprt_tcp6 },
+	{ "udp",	Opt_xprt_udp },
+	{ "udp6",	Opt_xprt_udp6 },
+};
+
+enum {
+	Opt_sec_krb5,
+	Opt_sec_krb5i,
+	Opt_sec_krb5p,
+	Opt_sec_lkey,
+	Opt_sec_lkeyi,
+	Opt_sec_lkeyp,
+	Opt_sec_none,
+	Opt_sec_spkm,
+	Opt_sec_spkmi,
+	Opt_sec_spkmp,
+	Opt_sec_sys,
+	nr__Opt_sec
+};
+
+static const struct constant_table nfs_secflavor_tokens[] = {
+	{ "krb5",	Opt_sec_krb5 },
+	{ "krb5i",	Opt_sec_krb5i },
+	{ "krb5p",	Opt_sec_krb5p },
+	{ "lkey",	Opt_sec_lkey },
+	{ "lkeyi",	Opt_sec_lkeyi },
+	{ "lkeyp",	Opt_sec_lkeyp },
+	{ "none",	Opt_sec_none },
+	{ "null",	Opt_sec_none },
+	{ "spkm3",	Opt_sec_spkm },
+	{ "spkm3i",	Opt_sec_spkmi },
+	{ "spkm3p",	Opt_sec_spkmp },
+	{ "sys",	Opt_sec_sys },
+};
+
+/*
+ * Sanity-check a server address provided by the mount command.
+ *
+ * Address family must be initialized, and address must not be
+ * the ANY address for that family.
+ */
+static int nfs_verify_server_address(struct sockaddr *addr)
+{
+	switch (addr->sa_family) {
+	case AF_INET: {
+		struct sockaddr_in *sa = (struct sockaddr_in *)addr;
+		return sa->sin_addr.s_addr != htonl(INADDR_ANY);
+	}
+	case AF_INET6: {
+		struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr;
+		return !ipv6_addr_any(sa);
+	}
+	}
+
+	dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
+	return 0;
+}
+
+/*
+ * Sanity check the NFS transport protocol.
+ *
+ */
+static void nfs_validate_transport_protocol(struct nfs_fs_context *ctx)
+{
+	switch (ctx->nfs_server.protocol) {
+	case XPRT_TRANSPORT_UDP:
+	case XPRT_TRANSPORT_TCP:
+	case XPRT_TRANSPORT_RDMA:
+		break;
+	default:
+		ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+	}
+}
+
+/*
+ * For text based NFSv2/v3 mounts, the mount protocol transport default
+ * settings should depend upon the specified NFS transport.
+ */
+static void nfs_set_mount_transport_protocol(struct nfs_fs_context *ctx)
+{
+	nfs_validate_transport_protocol(ctx);
+
+	if (ctx->mount_server.protocol == XPRT_TRANSPORT_UDP ||
+	    ctx->mount_server.protocol == XPRT_TRANSPORT_TCP)
+			return;
+	switch (ctx->nfs_server.protocol) {
+	case XPRT_TRANSPORT_UDP:
+		ctx->mount_server.protocol = XPRT_TRANSPORT_UDP;
+		break;
+	case XPRT_TRANSPORT_TCP:
+	case XPRT_TRANSPORT_RDMA:
+		ctx->mount_server.protocol = XPRT_TRANSPORT_TCP;
+	}
+}
+
+/*
+ * Add 'flavor' to 'auth_info' if not already present.
+ * Returns true if 'flavor' ends up in the list, false otherwise
+ */
+static int nfs_auth_info_add(struct fs_context *fc,
+			     struct nfs_auth_info *auth_info,
+			     rpc_authflavor_t flavor)
+{
+	unsigned int i;
+	unsigned int max_flavor_len = ARRAY_SIZE(auth_info->flavors);
+
+	/* make sure this flavor isn't already in the list */
+	for (i = 0; i < auth_info->flavor_len; i++) {
+		if (flavor == auth_info->flavors[i])
+			return 0;
+	}
+
+	if (auth_info->flavor_len + 1 >= max_flavor_len)
+		return nfs_invalf(fc, "NFS: too many sec= flavors");
+
+	auth_info->flavors[auth_info->flavor_len++] = flavor;
+	return 0;
+}
+
+/*
+ * Parse the value of the 'sec=' option.
+ */
+static int nfs_parse_security_flavors(struct fs_context *fc,
+				      struct fs_parameter *param)
+{
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	rpc_authflavor_t pseudoflavor;
+	char *string = param->string, *p;
+	int ret;
+
+	dfprintk(MOUNT, "NFS: parsing %s=%s option\n", param->key, param->string);
+
+	while ((p = strsep(&string, ":")) != NULL) {
+		if (!*p)
+			continue;
+		switch (lookup_constant(nfs_secflavor_tokens, p, -1)) {
+		case Opt_sec_none:
+			pseudoflavor = RPC_AUTH_NULL;
+			break;
+		case Opt_sec_sys:
+			pseudoflavor = RPC_AUTH_UNIX;
+			break;
+		case Opt_sec_krb5:
+			pseudoflavor = RPC_AUTH_GSS_KRB5;
+			break;
+		case Opt_sec_krb5i:
+			pseudoflavor = RPC_AUTH_GSS_KRB5I;
+			break;
+		case Opt_sec_krb5p:
+			pseudoflavor = RPC_AUTH_GSS_KRB5P;
+			break;
+		case Opt_sec_lkey:
+			pseudoflavor = RPC_AUTH_GSS_LKEY;
+			break;
+		case Opt_sec_lkeyi:
+			pseudoflavor = RPC_AUTH_GSS_LKEYI;
+			break;
+		case Opt_sec_lkeyp:
+			pseudoflavor = RPC_AUTH_GSS_LKEYP;
+			break;
+		case Opt_sec_spkm:
+			pseudoflavor = RPC_AUTH_GSS_SPKM;
+			break;
+		case Opt_sec_spkmi:
+			pseudoflavor = RPC_AUTH_GSS_SPKMI;
+			break;
+		case Opt_sec_spkmp:
+			pseudoflavor = RPC_AUTH_GSS_SPKMP;
+			break;
+		default:
+			return nfs_invalf(fc, "NFS: sec=%s option not recognized", p);
+		}
+
+		ret = nfs_auth_info_add(fc, &ctx->auth_info, pseudoflavor);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int nfs_parse_version_string(struct fs_context *fc,
+				    const char *string)
+{
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+
+	ctx->flags &= ~NFS_MOUNT_VER3;
+	switch (lookup_constant(nfs_vers_tokens, string, -1)) {
+	case Opt_vers_2:
+		ctx->version = 2;
+		break;
+	case Opt_vers_3:
+		ctx->flags |= NFS_MOUNT_VER3;
+		ctx->version = 3;
+		break;
+	case Opt_vers_4:
+		/* Backward compatibility option. In future,
+		 * the mount program should always supply
+		 * a NFSv4 minor version number.
+		 */
+		ctx->version = 4;
+		break;
+	case Opt_vers_4_0:
+		ctx->version = 4;
+		ctx->minorversion = 0;
+		break;
+	case Opt_vers_4_1:
+		ctx->version = 4;
+		ctx->minorversion = 1;
+		break;
+	case Opt_vers_4_2:
+		ctx->version = 4;
+		ctx->minorversion = 2;
+		break;
+	default:
+		return nfs_invalf(fc, "NFS: Unsupported NFS version");
+	}
+	return 0;
+}
+
+/*
+ * Parse a single mount parameter.
+ */
+static int nfs_fs_context_parse_param(struct fs_context *fc,
+				      struct fs_parameter *param)
+{
+	struct fs_parse_result result;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	unsigned short protofamily, mountfamily;
+	unsigned int len;
+	int ret, opt;
+
+	dfprintk(MOUNT, "NFS:   parsing nfs mount option '%s'\n", param->key);
+
+	opt = fs_parse(fc, nfs_fs_parameters, param, &result);
+	if (opt < 0)
+		return ctx->sloppy ? 1 : opt;
+
+	switch (opt) {
+	case Opt_source:
+		if (fc->source)
+			return nfs_invalf(fc, "NFS: Multiple sources not supported");
+		fc->source = param->string;
+		param->string = NULL;
+		break;
+
+		/*
+		 * boolean options:  foo/nofoo
+		 */
+	case Opt_soft:
+		ctx->flags |= NFS_MOUNT_SOFT;
+		ctx->flags &= ~NFS_MOUNT_SOFTERR;
+		break;
+	case Opt_softerr:
+		ctx->flags |= NFS_MOUNT_SOFTERR | NFS_MOUNT_SOFTREVAL;
+		ctx->flags &= ~NFS_MOUNT_SOFT;
+		break;
+	case Opt_hard:
+		ctx->flags &= ~(NFS_MOUNT_SOFT |
+				NFS_MOUNT_SOFTERR |
+				NFS_MOUNT_SOFTREVAL);
+		break;
+	case Opt_softreval:
+		if (result.negated)
+			ctx->flags &= ~NFS_MOUNT_SOFTREVAL;
+		else
+			ctx->flags &= NFS_MOUNT_SOFTREVAL;
+		break;
+	case Opt_posix:
+		if (result.negated)
+			ctx->flags &= ~NFS_MOUNT_POSIX;
+		else
+			ctx->flags |= NFS_MOUNT_POSIX;
+		break;
+	case Opt_cto:
+		if (result.negated)
+			ctx->flags |= NFS_MOUNT_NOCTO;
+		else
+			ctx->flags &= ~NFS_MOUNT_NOCTO;
+		break;
+	case Opt_ac:
+		if (result.negated)
+			ctx->flags |= NFS_MOUNT_NOAC;
+		else
+			ctx->flags &= ~NFS_MOUNT_NOAC;
+		break;
+	case Opt_lock:
+		if (result.negated) {
+			ctx->flags |= NFS_MOUNT_NONLM;
+			ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL);
+		} else {
+			ctx->flags &= ~NFS_MOUNT_NONLM;
+			ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL);
+		}
+		break;
+	case Opt_udp:
+		ctx->flags &= ~NFS_MOUNT_TCP;
+		ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+		break;
+	case Opt_tcp:
+		ctx->flags |= NFS_MOUNT_TCP;
+		ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+		break;
+	case Opt_rdma:
+		ctx->flags |= NFS_MOUNT_TCP; /* for side protocols */
+		ctx->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
+		xprt_load_transport(param->key);
+		break;
+	case Opt_acl:
+		if (result.negated)
+			ctx->flags |= NFS_MOUNT_NOACL;
+		else
+			ctx->flags &= ~NFS_MOUNT_NOACL;
+		break;
+	case Opt_rdirplus:
+		if (result.negated)
+			ctx->flags |= NFS_MOUNT_NORDIRPLUS;
+		else
+			ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+		break;
+	case Opt_sharecache:
+		if (result.negated)
+			ctx->flags |= NFS_MOUNT_UNSHARED;
+		else
+			ctx->flags &= ~NFS_MOUNT_UNSHARED;
+		break;
+	case Opt_resvport:
+		if (result.negated)
+			ctx->flags |= NFS_MOUNT_NORESVPORT;
+		else
+			ctx->flags &= ~NFS_MOUNT_NORESVPORT;
+		break;
+	case Opt_fscache_flag:
+		if (result.negated)
+			ctx->options &= ~NFS_OPTION_FSCACHE;
+		else
+			ctx->options |= NFS_OPTION_FSCACHE;
+		kfree(ctx->fscache_uniq);
+		ctx->fscache_uniq = NULL;
+		break;
+	case Opt_fscache:
+		ctx->options |= NFS_OPTION_FSCACHE;
+		kfree(ctx->fscache_uniq);
+		ctx->fscache_uniq = param->string;
+		param->string = NULL;
+		break;
+	case Opt_migration:
+		if (result.negated)
+			ctx->options &= ~NFS_OPTION_MIGRATION;
+		else
+			ctx->options |= NFS_OPTION_MIGRATION;
+		break;
+
+		/*
+		 * options that take numeric values
+		 */
+	case Opt_port:
+		if (result.uint_32 > USHRT_MAX)
+			goto out_of_bounds;
+		ctx->nfs_server.port = result.uint_32;
+		break;
+	case Opt_rsize:
+		ctx->rsize = result.uint_32;
+		break;
+	case Opt_wsize:
+		ctx->wsize = result.uint_32;
+		break;
+	case Opt_bsize:
+		ctx->bsize = result.uint_32;
+		break;
+	case Opt_timeo:
+		if (result.uint_32 < 1 || result.uint_32 > INT_MAX)
+			goto out_of_bounds;
+		ctx->timeo = result.uint_32;
+		break;
+	case Opt_retrans:
+		if (result.uint_32 > INT_MAX)
+			goto out_of_bounds;
+		ctx->retrans = result.uint_32;
+		break;
+	case Opt_acregmin:
+		ctx->acregmin = result.uint_32;
+		break;
+	case Opt_acregmax:
+		ctx->acregmax = result.uint_32;
+		break;
+	case Opt_acdirmin:
+		ctx->acdirmin = result.uint_32;
+		break;
+	case Opt_acdirmax:
+		ctx->acdirmax = result.uint_32;
+		break;
+	case Opt_actimeo:
+		ctx->acregmin = result.uint_32;
+		ctx->acregmax = result.uint_32;
+		ctx->acdirmin = result.uint_32;
+		ctx->acdirmax = result.uint_32;
+		break;
+	case Opt_namelen:
+		ctx->namlen = result.uint_32;
+		break;
+	case Opt_mountport:
+		if (result.uint_32 > USHRT_MAX)
+			goto out_of_bounds;
+		ctx->mount_server.port = result.uint_32;
+		break;
+	case Opt_mountvers:
+		if (result.uint_32 < NFS_MNT_VERSION ||
+		    result.uint_32 > NFS_MNT3_VERSION)
+			goto out_of_bounds;
+		ctx->mount_server.version = result.uint_32;
+		break;
+	case Opt_minorversion:
+		if (result.uint_32 > NFS4_MAX_MINOR_VERSION)
+			goto out_of_bounds;
+		ctx->minorversion = result.uint_32;
+		break;
+
+		/*
+		 * options that take text values
+		 */
+	case Opt_v:
+		ret = nfs_parse_version_string(fc, param->key + 1);
+		if (ret < 0)
+			return ret;
+		break;
+	case Opt_vers:
+		ret = nfs_parse_version_string(fc, param->string);
+		if (ret < 0)
+			return ret;
+		break;
+	case Opt_sec:
+		ret = nfs_parse_security_flavors(fc, param);
+		if (ret < 0)
+			return ret;
+		break;
+
+	case Opt_proto:
+		protofamily = AF_INET;
+		switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
+		case Opt_xprt_udp6:
+			protofamily = AF_INET6;
+			/* fall through */
+		case Opt_xprt_udp:
+			ctx->flags &= ~NFS_MOUNT_TCP;
+			ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+			break;
+		case Opt_xprt_tcp6:
+			protofamily = AF_INET6;
+			/* fall through */
+		case Opt_xprt_tcp:
+			ctx->flags |= NFS_MOUNT_TCP;
+			ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+			break;
+		case Opt_xprt_rdma6:
+			protofamily = AF_INET6;
+			/* fall through */
+		case Opt_xprt_rdma:
+			/* vector side protocols to TCP */
+			ctx->flags |= NFS_MOUNT_TCP;
+			ctx->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
+			xprt_load_transport(param->string);
+			break;
+		default:
+			return nfs_invalf(fc, "NFS: Unrecognized transport protocol");
+		}
+
+		ctx->protofamily = protofamily;
+		break;
+
+	case Opt_mountproto:
+		mountfamily = AF_INET;
+		switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
+		case Opt_xprt_udp6:
+			mountfamily = AF_INET6;
+			/* fall through */
+		case Opt_xprt_udp:
+			ctx->mount_server.protocol = XPRT_TRANSPORT_UDP;
+			break;
+		case Opt_xprt_tcp6:
+			mountfamily = AF_INET6;
+			/* fall through */
+		case Opt_xprt_tcp:
+			ctx->mount_server.protocol = XPRT_TRANSPORT_TCP;
+			break;
+		case Opt_xprt_rdma: /* not used for side protocols */
+		default:
+			return nfs_invalf(fc, "NFS: Unrecognized transport protocol");
+		}
+		ctx->mountfamily = mountfamily;
+		break;
+
+	case Opt_addr:
+		len = rpc_pton(fc->net_ns, param->string, param->size,
+			       &ctx->nfs_server.address,
+			       sizeof(ctx->nfs_server._address));
+		if (len == 0)
+			goto out_invalid_address;
+		ctx->nfs_server.addrlen = len;
+		break;
+	case Opt_clientaddr:
+		kfree(ctx->client_address);
+		ctx->client_address = param->string;
+		param->string = NULL;
+		break;
+	case Opt_mounthost:
+		kfree(ctx->mount_server.hostname);
+		ctx->mount_server.hostname = param->string;
+		param->string = NULL;
+		break;
+	case Opt_mountaddr:
+		len = rpc_pton(fc->net_ns, param->string, param->size,
+			       &ctx->mount_server.address,
+			       sizeof(ctx->mount_server._address));
+		if (len == 0)
+			goto out_invalid_address;
+		ctx->mount_server.addrlen = len;
+		break;
+	case Opt_nconnect:
+		if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_CONNECTIONS)
+			goto out_of_bounds;
+		ctx->nfs_server.nconnect = result.uint_32;
+		break;
+	case Opt_lookupcache:
+		switch (result.uint_32) {
+		case Opt_lookupcache_all:
+			ctx->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE);
+			break;
+		case Opt_lookupcache_positive:
+			ctx->flags &= ~NFS_MOUNT_LOOKUP_CACHE_NONE;
+			ctx->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG;
+			break;
+		case Opt_lookupcache_none:
+			ctx->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE;
+			break;
+		default:
+			goto out_invalid_value;
+		}
+		break;
+	case Opt_local_lock:
+		switch (result.uint_32) {
+		case Opt_local_lock_all:
+			ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK |
+				       NFS_MOUNT_LOCAL_FCNTL);
+			break;
+		case Opt_local_lock_flock:
+			ctx->flags |= NFS_MOUNT_LOCAL_FLOCK;
+			break;
+		case Opt_local_lock_posix:
+			ctx->flags |= NFS_MOUNT_LOCAL_FCNTL;
+			break;
+		case Opt_local_lock_none:
+			ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
+					NFS_MOUNT_LOCAL_FCNTL);
+			break;
+		default:
+			goto out_invalid_value;
+		}
+		break;
+
+		/*
+		 * Special options
+		 */
+	case Opt_sloppy:
+		ctx->sloppy = true;
+		dfprintk(MOUNT, "NFS:   relaxing parsing rules\n");
+		break;
+	}
+
+	return 0;
+
+out_invalid_value:
+	return nfs_invalf(fc, "NFS: Bad mount option value specified");
+out_invalid_address:
+	return nfs_invalf(fc, "NFS: Bad IP address specified");
+out_of_bounds:
+	return nfs_invalf(fc, "NFS: Value for '%s' out of range", param->key);
+}
+
+/*
+ * Split fc->source into "hostname:export_path".
+ *
+ * The leftmost colon demarks the split between the server's hostname
+ * and the export path.  If the hostname starts with a left square
+ * bracket, then it may contain colons.
+ *
+ * Note: caller frees hostname and export path, even on error.
+ */
+static int nfs_parse_source(struct fs_context *fc,
+			    size_t maxnamlen, size_t maxpathlen)
+{
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	const char *dev_name = fc->source;
+	size_t len;
+	const char *end;
+
+	if (unlikely(!dev_name || !*dev_name)) {
+		dfprintk(MOUNT, "NFS: device name not specified\n");
+		return -EINVAL;
+	}
+
+	/* Is the host name protected with square brakcets? */
+	if (*dev_name == '[') {
+		end = strchr(++dev_name, ']');
+		if (end == NULL || end[1] != ':')
+			goto out_bad_devname;
+
+		len = end - dev_name;
+		end++;
+	} else {
+		const char *comma;
+
+		end = strchr(dev_name, ':');
+		if (end == NULL)
+			goto out_bad_devname;
+		len = end - dev_name;
+
+		/* kill possible hostname list: not supported */
+		comma = memchr(dev_name, ',', len);
+		if (comma)
+			len = comma - dev_name;
+	}
+
+	if (len > maxnamlen)
+		goto out_hostname;
+
+	/* N.B. caller will free nfs_server.hostname in all cases */
+	ctx->nfs_server.hostname = kmemdup_nul(dev_name, len, GFP_KERNEL);
+	if (!ctx->nfs_server.hostname)
+		goto out_nomem;
+	len = strlen(++end);
+	if (len > maxpathlen)
+		goto out_path;
+	ctx->nfs_server.export_path = kmemdup_nul(end, len, GFP_KERNEL);
+	if (!ctx->nfs_server.export_path)
+		goto out_nomem;
+
+	dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", ctx->nfs_server.export_path);
+	return 0;
+
+out_bad_devname:
+	return nfs_invalf(fc, "NFS: device name not in host:path format");
+out_nomem:
+	nfs_errorf(fc, "NFS: not enough memory to parse device name");
+	return -ENOMEM;
+out_hostname:
+	nfs_errorf(fc, "NFS: server hostname too long");
+	return -ENAMETOOLONG;
+out_path:
+	nfs_errorf(fc, "NFS: export pathname too long");
+	return -ENAMETOOLONG;
+}
+
+static inline bool is_remount_fc(struct fs_context *fc)
+{
+	return fc->root != NULL;
+}
+
+/*
+ * Parse monolithic NFS2/NFS3 mount data
+ * - fills in the mount root filehandle
+ *
+ * For option strings, user space handles the following behaviors:
+ *
+ * + DNS: mapping server host name to IP address ("addr=" option)
+ *
+ * + failure mode: how to behave if a mount request can't be handled
+ *   immediately ("fg/bg" option)
+ *
+ * + retry: how often to retry a mount request ("retry=" option)
+ *
+ * + breaking back: trying proto=udp after proto=tcp, v2 after v3,
+ *   mountproto=tcp after mountproto=udp, and so on
+ */
+static int nfs23_parse_monolithic(struct fs_context *fc,
+				  struct nfs_mount_data *data)
+{
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct nfs_fh *mntfh = ctx->mntfh;
+	struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+	int extra_flags = NFS_MOUNT_LEGACY_INTERFACE;
+
+	if (data == NULL)
+		goto out_no_data;
+
+	ctx->version = NFS_DEFAULT_VERSION;
+	switch (data->version) {
+	case 1:
+		data->namlen = 0; /* fall through */
+	case 2:
+		data->bsize = 0; /* fall through */
+	case 3:
+		if (data->flags & NFS_MOUNT_VER3)
+			goto out_no_v3;
+		data->root.size = NFS2_FHSIZE;
+		memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
+		/* Turn off security negotiation */
+		extra_flags |= NFS_MOUNT_SECFLAVOUR;
+		/* fall through */
+	case 4:
+		if (data->flags & NFS_MOUNT_SECFLAVOUR)
+			goto out_no_sec;
+		/* fall through */
+	case 5:
+		memset(data->context, 0, sizeof(data->context));
+		/* fall through */
+	case 6:
+		if (data->flags & NFS_MOUNT_VER3) {
+			if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
+				goto out_invalid_fh;
+			mntfh->size = data->root.size;
+			ctx->version = 3;
+		} else {
+			mntfh->size = NFS2_FHSIZE;
+			ctx->version = 2;
+		}
+
+
+		memcpy(mntfh->data, data->root.data, mntfh->size);
+		if (mntfh->size < sizeof(mntfh->data))
+			memset(mntfh->data + mntfh->size, 0,
+			       sizeof(mntfh->data) - mntfh->size);
+
+		/*
+		 * Translate to nfs_fs_context, which nfs_fill_super
+		 * can deal with.
+		 */
+		ctx->flags	= data->flags & NFS_MOUNT_FLAGMASK;
+		ctx->flags	|= extra_flags;
+		ctx->rsize	= data->rsize;
+		ctx->wsize	= data->wsize;
+		ctx->timeo	= data->timeo;
+		ctx->retrans	= data->retrans;
+		ctx->acregmin	= data->acregmin;
+		ctx->acregmax	= data->acregmax;
+		ctx->acdirmin	= data->acdirmin;
+		ctx->acdirmax	= data->acdirmax;
+		ctx->need_mount	= false;
+
+		memcpy(sap, &data->addr, sizeof(data->addr));
+		ctx->nfs_server.addrlen = sizeof(data->addr);
+		ctx->nfs_server.port = ntohs(data->addr.sin_port);
+		if (sap->sa_family != AF_INET ||
+		    !nfs_verify_server_address(sap))
+			goto out_no_address;
+
+		if (!(data->flags & NFS_MOUNT_TCP))
+			ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+		/* N.B. caller will free nfs_server.hostname in all cases */
+		ctx->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL);
+		if (!ctx->nfs_server.hostname)
+			goto out_nomem;
+
+		ctx->namlen		= data->namlen;
+		ctx->bsize		= data->bsize;
+
+		if (data->flags & NFS_MOUNT_SECFLAVOUR)
+			ctx->selected_flavor = data->pseudoflavor;
+		else
+			ctx->selected_flavor = RPC_AUTH_UNIX;
+
+		if (!(data->flags & NFS_MOUNT_NONLM))
+			ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK|
+					 NFS_MOUNT_LOCAL_FCNTL);
+		else
+			ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK|
+					NFS_MOUNT_LOCAL_FCNTL);
+
+		/*
+		 * The legacy version 6 binary mount data from userspace has a
+		 * field used only to transport selinux information into the
+		 * the kernel.  To continue to support that functionality we
+		 * have a touch of selinux knowledge here in the NFS code. The
+		 * userspace code converted context=blah to just blah so we are
+		 * converting back to the full string selinux understands.
+		 */
+		if (data->context[0]){
+#ifdef CONFIG_SECURITY_SELINUX
+			int ret;
+
+			data->context[NFS_MAX_CONTEXT_LEN] = '\0';
+			ret = vfs_parse_fs_string(fc, "context",
+						  data->context, strlen(data->context));
+			if (ret < 0)
+				return ret;
+#else
+			return -EINVAL;
+#endif
+		}
+
+		break;
+	default:
+		goto generic;
+	}
+
+	ctx->skip_reconfig_option_check = true;
+	return 0;
+
+generic:
+	return generic_parse_monolithic(fc, data);
+
+out_no_data:
+	if (is_remount_fc(fc)) {
+		ctx->skip_reconfig_option_check = true;
+		return 0;
+	}
+	return nfs_invalf(fc, "NFS: mount program didn't pass any mount data");
+
+out_no_v3:
+	return nfs_invalf(fc, "NFS: nfs_mount_data version does not support v3");
+
+out_no_sec:
+	return nfs_invalf(fc, "NFS: nfs_mount_data version supports only AUTH_SYS");
+
+out_nomem:
+	dfprintk(MOUNT, "NFS: not enough memory to handle mount options");
+	return -ENOMEM;
+
+out_no_address:
+	return nfs_invalf(fc, "NFS: mount program didn't pass remote address");
+
+out_invalid_fh:
+	return nfs_invalf(fc, "NFS: invalid root filehandle");
+}
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+/*
+ * Validate NFSv4 mount options
+ */
+static int nfs4_parse_monolithic(struct fs_context *fc,
+				 struct nfs4_mount_data *data)
+{
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+	char *c;
+
+	if (data == NULL)
+		goto out_no_data;
+
+	ctx->version = 4;
+
+	switch (data->version) {
+	case 1:
+		if (data->host_addrlen > sizeof(ctx->nfs_server.address))
+			goto out_no_address;
+		if (data->host_addrlen == 0)
+			goto out_no_address;
+		ctx->nfs_server.addrlen = data->host_addrlen;
+		if (copy_from_user(sap, data->host_addr, data->host_addrlen))
+			return -EFAULT;
+		if (!nfs_verify_server_address(sap))
+			goto out_no_address;
+		ctx->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
+
+		if (data->auth_flavourlen) {
+			rpc_authflavor_t pseudoflavor;
+			if (data->auth_flavourlen > 1)
+				goto out_inval_auth;
+			if (copy_from_user(&pseudoflavor,
+					   data->auth_flavours,
+					   sizeof(pseudoflavor)))
+				return -EFAULT;
+			ctx->selected_flavor = pseudoflavor;
+		} else
+			ctx->selected_flavor = RPC_AUTH_UNIX;
+
+		c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN);
+		if (IS_ERR(c))
+			return PTR_ERR(c);
+		ctx->nfs_server.hostname = c;
+
+		c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN);
+		if (IS_ERR(c))
+			return PTR_ERR(c);
+		ctx->nfs_server.export_path = c;
+		dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c);
+
+		c = strndup_user(data->client_addr.data, 16);
+		if (IS_ERR(c))
+			return PTR_ERR(c);
+		ctx->client_address = c;
+
+		/*
+		 * Translate to nfs_fs_context, which nfs_fill_super
+		 * can deal with.
+		 */
+
+		ctx->flags	= data->flags & NFS4_MOUNT_FLAGMASK;
+		ctx->rsize	= data->rsize;
+		ctx->wsize	= data->wsize;
+		ctx->timeo	= data->timeo;
+		ctx->retrans	= data->retrans;
+		ctx->acregmin	= data->acregmin;
+		ctx->acregmax	= data->acregmax;
+		ctx->acdirmin	= data->acdirmin;
+		ctx->acdirmax	= data->acdirmax;
+		ctx->nfs_server.protocol = data->proto;
+		nfs_validate_transport_protocol(ctx);
+		if (ctx->nfs_server.protocol == XPRT_TRANSPORT_UDP)
+			goto out_invalid_transport_udp;
+
+		break;
+	default:
+		goto generic;
+	}
+
+	ctx->skip_reconfig_option_check = true;
+	return 0;
+
+generic:
+	return generic_parse_monolithic(fc, data);
+
+out_no_data:
+	if (is_remount_fc(fc)) {
+		ctx->skip_reconfig_option_check = true;
+		return 0;
+	}
+	return nfs_invalf(fc, "NFS4: mount program didn't pass any mount data");
+
+out_inval_auth:
+	return nfs_invalf(fc, "NFS4: Invalid number of RPC auth flavours %d",
+		      data->auth_flavourlen);
+
+out_no_address:
+	return nfs_invalf(fc, "NFS4: mount program didn't pass remote address");
+
+out_invalid_transport_udp:
+	return nfs_invalf(fc, "NFSv4: Unsupported transport protocol udp");
+}
+#endif
+
+/*
+ * Parse a monolithic block of data from sys_mount().
+ */
+static int nfs_fs_context_parse_monolithic(struct fs_context *fc,
+					   void *data)
+{
+	if (fc->fs_type == &nfs_fs_type)
+		return nfs23_parse_monolithic(fc, data);
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+	if (fc->fs_type == &nfs4_fs_type)
+		return nfs4_parse_monolithic(fc, data);
+#endif
+
+	return nfs_invalf(fc, "NFS: Unsupported monolithic data version");
+}
+
+/*
+ * Validate the preparsed information in the config.
+ */
+static int nfs_fs_context_validate(struct fs_context *fc)
+{
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct nfs_subversion *nfs_mod;
+	struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+	int max_namelen = PAGE_SIZE;
+	int max_pathlen = NFS_MAXPATHLEN;
+	int port = 0;
+	int ret;
+
+	if (!fc->source)
+		goto out_no_device_name;
+
+	/* Check for sanity first. */
+	if (ctx->minorversion && ctx->version != 4)
+		goto out_minorversion_mismatch;
+
+	if (ctx->options & NFS_OPTION_MIGRATION &&
+	    (ctx->version != 4 || ctx->minorversion != 0))
+		goto out_migration_misuse;
+
+	/* Verify that any proto=/mountproto= options match the address
+	 * families in the addr=/mountaddr= options.
+	 */
+	if (ctx->protofamily != AF_UNSPEC &&
+	    ctx->protofamily != ctx->nfs_server.address.sa_family)
+		goto out_proto_mismatch;
+
+	if (ctx->mountfamily != AF_UNSPEC) {
+		if (ctx->mount_server.addrlen) {
+			if (ctx->mountfamily != ctx->mount_server.address.sa_family)
+				goto out_mountproto_mismatch;
+		} else {
+			if (ctx->mountfamily != ctx->nfs_server.address.sa_family)
+				goto out_mountproto_mismatch;
+		}
+	}
+
+	if (!nfs_verify_server_address(sap))
+		goto out_no_address;
+
+	if (ctx->version == 4) {
+		if (IS_ENABLED(CONFIG_NFS_V4)) {
+			if (ctx->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
+				port = NFS_RDMA_PORT;
+			else
+				port = NFS_PORT;
+			max_namelen = NFS4_MAXNAMLEN;
+			max_pathlen = NFS4_MAXPATHLEN;
+			nfs_validate_transport_protocol(ctx);
+			if (ctx->nfs_server.protocol == XPRT_TRANSPORT_UDP)
+				goto out_invalid_transport_udp;
+			ctx->flags &= ~(NFS_MOUNT_NONLM | NFS_MOUNT_NOACL |
+					NFS_MOUNT_VER3 | NFS_MOUNT_LOCAL_FLOCK |
+					NFS_MOUNT_LOCAL_FCNTL);
+		} else {
+			goto out_v4_not_compiled;
+		}
+	} else {
+		nfs_set_mount_transport_protocol(ctx);
+#ifdef CONFIG_NFS_DISABLE_UDP_SUPPORT
+	       if (ctx->nfs_server.protocol == XPRT_TRANSPORT_UDP)
+		       goto out_invalid_transport_udp;
+#endif
+		if (ctx->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
+			port = NFS_RDMA_PORT;
+	}
+
+	nfs_set_port(sap, &ctx->nfs_server.port, port);
+
+	ret = nfs_parse_source(fc, max_namelen, max_pathlen);
+	if (ret < 0)
+		return ret;
+
+	/* Load the NFS protocol module if we haven't done so yet */
+	if (!ctx->nfs_mod) {
+		nfs_mod = get_nfs_version(ctx->version);
+		if (IS_ERR(nfs_mod)) {
+			ret = PTR_ERR(nfs_mod);
+			goto out_version_unavailable;
+		}
+		ctx->nfs_mod = nfs_mod;
+	}
+	return 0;
+
+out_no_device_name:
+	return nfs_invalf(fc, "NFS: Device name not specified");
+out_v4_not_compiled:
+	nfs_errorf(fc, "NFS: NFSv4 is not compiled into kernel");
+	return -EPROTONOSUPPORT;
+out_invalid_transport_udp:
+	return nfs_invalf(fc, "NFSv4: Unsupported transport protocol udp");
+out_no_address:
+	return nfs_invalf(fc, "NFS: mount program didn't pass remote address");
+out_mountproto_mismatch:
+	return nfs_invalf(fc, "NFS: Mount server address does not match mountproto= option");
+out_proto_mismatch:
+	return nfs_invalf(fc, "NFS: Server address does not match proto= option");
+out_minorversion_mismatch:
+	return nfs_invalf(fc, "NFS: Mount option vers=%u does not support minorversion=%u",
+			  ctx->version, ctx->minorversion);
+out_migration_misuse:
+	return nfs_invalf(fc, "NFS: 'Migration' not supported for this NFS version");
+out_version_unavailable:
+	nfs_errorf(fc, "NFS: Version unavailable");
+	return ret;
+}
+
+/*
+ * Create an NFS superblock by the appropriate method.
+ */
+static int nfs_get_tree(struct fs_context *fc)
+{
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	int err = nfs_fs_context_validate(fc);
+
+	if (err)
+		return err;
+	if (!ctx->internal)
+		return ctx->nfs_mod->rpc_ops->try_get_tree(fc);
+	else
+		return nfs_get_tree_common(fc);
+}
+
+/*
+ * Handle duplication of a configuration.  The caller copied *src into *sc, but
+ * it can't deal with resource pointers in the filesystem context, so we have
+ * to do that.  We need to clear pointers, copy data or get extra refs as
+ * appropriate.
+ */
+static int nfs_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
+{
+	struct nfs_fs_context *src = nfs_fc2context(src_fc), *ctx;
+
+	ctx = kmemdup(src, sizeof(struct nfs_fs_context), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->mntfh = nfs_alloc_fhandle();
+	if (!ctx->mntfh) {
+		kfree(ctx);
+		return -ENOMEM;
+	}
+	nfs_copy_fh(ctx->mntfh, src->mntfh);
+
+	__module_get(ctx->nfs_mod->owner);
+	ctx->client_address		= NULL;
+	ctx->mount_server.hostname	= NULL;
+	ctx->nfs_server.export_path	= NULL;
+	ctx->nfs_server.hostname	= NULL;
+	ctx->fscache_uniq		= NULL;
+	ctx->clone_data.fattr		= NULL;
+	fc->fs_private = ctx;
+	return 0;
+}
+
+static void nfs_fs_context_free(struct fs_context *fc)
+{
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+
+	if (ctx) {
+		if (ctx->server)
+			nfs_free_server(ctx->server);
+		if (ctx->nfs_mod)
+			put_nfs_version(ctx->nfs_mod);
+		kfree(ctx->client_address);
+		kfree(ctx->mount_server.hostname);
+		kfree(ctx->nfs_server.export_path);
+		kfree(ctx->nfs_server.hostname);
+		kfree(ctx->fscache_uniq);
+		nfs_free_fhandle(ctx->mntfh);
+		nfs_free_fattr(ctx->clone_data.fattr);
+		kfree(ctx);
+	}
+}
+
+static const struct fs_context_operations nfs_fs_context_ops = {
+	.free			= nfs_fs_context_free,
+	.dup			= nfs_fs_context_dup,
+	.parse_param		= nfs_fs_context_parse_param,
+	.parse_monolithic	= nfs_fs_context_parse_monolithic,
+	.get_tree		= nfs_get_tree,
+	.reconfigure		= nfs_reconfigure,
+};
+
+/*
+ * Prepare superblock configuration.  We use the namespaces attached to the
+ * context.  This may be the current process's namespaces, or it may be a
+ * container's namespaces.
+ */
+static int nfs_init_fs_context(struct fs_context *fc)
+{
+	struct nfs_fs_context *ctx;
+
+	ctx = kzalloc(sizeof(struct nfs_fs_context), GFP_KERNEL);
+	if (unlikely(!ctx))
+		return -ENOMEM;
+
+	ctx->mntfh = nfs_alloc_fhandle();
+	if (unlikely(!ctx->mntfh)) {
+		kfree(ctx);
+		return -ENOMEM;
+	}
+
+	ctx->protofamily	= AF_UNSPEC;
+	ctx->mountfamily	= AF_UNSPEC;
+	ctx->mount_server.port	= NFS_UNSPEC_PORT;
+
+	if (fc->root) {
+		/* reconfigure, start with the current config */
+		struct nfs_server *nfss = fc->root->d_sb->s_fs_info;
+		struct net *net = nfss->nfs_client->cl_net;
+
+		ctx->flags		= nfss->flags;
+		ctx->rsize		= nfss->rsize;
+		ctx->wsize		= nfss->wsize;
+		ctx->retrans		= nfss->client->cl_timeout->to_retries;
+		ctx->selected_flavor	= nfss->client->cl_auth->au_flavor;
+		ctx->acregmin		= nfss->acregmin / HZ;
+		ctx->acregmax		= nfss->acregmax / HZ;
+		ctx->acdirmin		= nfss->acdirmin / HZ;
+		ctx->acdirmax		= nfss->acdirmax / HZ;
+		ctx->timeo		= 10U * nfss->client->cl_timeout->to_initval / HZ;
+		ctx->nfs_server.port	= nfss->port;
+		ctx->nfs_server.addrlen	= nfss->nfs_client->cl_addrlen;
+		ctx->version		= nfss->nfs_client->rpc_ops->version;
+		ctx->minorversion	= nfss->nfs_client->cl_minorversion;
+
+		memcpy(&ctx->nfs_server.address, &nfss->nfs_client->cl_addr,
+			ctx->nfs_server.addrlen);
+
+		if (fc->net_ns != net) {
+			put_net(fc->net_ns);
+			fc->net_ns = get_net(net);
+		}
+
+		ctx->nfs_mod = nfss->nfs_client->cl_nfs_mod;
+		__module_get(ctx->nfs_mod->owner);
+	} else {
+		/* defaults */
+		ctx->timeo		= NFS_UNSPEC_TIMEO;
+		ctx->retrans		= NFS_UNSPEC_RETRANS;
+		ctx->acregmin		= NFS_DEF_ACREGMIN;
+		ctx->acregmax		= NFS_DEF_ACREGMAX;
+		ctx->acdirmin		= NFS_DEF_ACDIRMIN;
+		ctx->acdirmax		= NFS_DEF_ACDIRMAX;
+		ctx->nfs_server.port	= NFS_UNSPEC_PORT;
+		ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+		ctx->selected_flavor	= RPC_AUTH_MAXFLAVOR;
+		ctx->minorversion	= 0;
+		ctx->need_mount		= true;
+	}
+	fc->fs_private = ctx;
+	fc->ops = &nfs_fs_context_ops;
+	return 0;
+}
+
+struct file_system_type nfs_fs_type = {
+	.owner			= THIS_MODULE,
+	.name			= "nfs",
+	.init_fs_context	= nfs_init_fs_context,
+	.parameters		= nfs_fs_parameters,
+	.kill_sb		= nfs_kill_super,
+	.fs_flags		= FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
+};
+MODULE_ALIAS_FS("nfs");
+EXPORT_SYMBOL_GPL(nfs_fs_type);
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+struct file_system_type nfs4_fs_type = {
+	.owner			= THIS_MODULE,
+	.name			= "nfs4",
+	.init_fs_context	= nfs_init_fs_context,
+	.parameters		= nfs_fs_parameters,
+	.kill_sb		= nfs_kill_super,
+	.fs_flags		= FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
+};
+MODULE_ALIAS_FS("nfs4");
+MODULE_ALIAS("nfs4");
+EXPORT_SYMBOL_GPL(nfs4_fs_type);
+#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
index 15f2714..573b1da 100644
--- a/fs/nfs/fscache-index.c
+++ b/fs/nfs/fscache-index.c
@@ -84,8 +84,10 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
 		return FSCACHE_CHECKAUX_OBSOLETE;
 
 	memset(&auxdata, 0, sizeof(auxdata));
-	auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
-	auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+	auxdata.mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
+	auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+	auxdata.ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
+	auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
 
 	if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
 		auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 3800ab6..52270bf 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -128,7 +128,7 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
 		return;
 
 	key->nfs_client = nfss->nfs_client;
-	key->key.super.s_flags = sb->s_flags & NFS_MS_MASK;
+	key->key.super.s_flags = sb->s_flags & NFS_SB_MASK;
 	key->key.nfs_server.flags = nfss->flags;
 	key->key.nfs_server.rsize = nfss->rsize;
 	key->key.nfs_server.wsize = nfss->wsize;
@@ -238,8 +238,10 @@ void nfs_fscache_init_inode(struct inode *inode)
 		return;
 
 	memset(&auxdata, 0, sizeof(auxdata));
-	auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
-	auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+	auxdata.mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
+	auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+	auxdata.ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
+	auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
 
 	if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
 		auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
@@ -263,8 +265,10 @@ void nfs_fscache_clear_inode(struct inode *inode)
 	dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
 
 	memset(&auxdata, 0, sizeof(auxdata));
-	auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
-	auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+	auxdata.mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
+	auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+	auxdata.ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
+	auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
 	fscache_relinquish_cookie(cookie, &auxdata, false);
 	nfsi->fscache = NULL;
 }
@@ -305,8 +309,10 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp)
 		return;
 
 	memset(&auxdata, 0, sizeof(auxdata));
-	auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
-	auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+	auxdata.mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
+	auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+	auxdata.ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
+	auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
 
 	if (inode_is_open_for_write(inode)) {
 		dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index ad041cfb..6754c86 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -62,9 +62,11 @@ struct nfs_fscache_key {
  * cache object.
  */
 struct nfs_fscache_inode_auxdata {
-	struct timespec	mtime;
-	struct timespec	ctime;
-	u64		change_attr;
+	s64	mtime_sec;
+	s64	mtime_nsec;
+	s64	ctime_sec;
+	s64	ctime_nsec;
+	u64	change_attr;
 };
 
 /*
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 878c4c5..b012c26 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -64,66 +64,71 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
 /*
  * get an NFS2/NFS3 root dentry from the root filehandle
  */
-struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh,
-			    const char *devname)
+int nfs_get_root(struct super_block *s, struct fs_context *fc)
 {
-	struct nfs_server *server = NFS_SB(sb);
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct nfs_server *server = NFS_SB(s);
 	struct nfs_fsinfo fsinfo;
-	struct dentry *ret;
+	struct dentry *root;
 	struct inode *inode;
-	void *name = kstrdup(devname, GFP_KERNEL);
-	int error;
+	char *name;
+	int error = -ENOMEM;
 
+	name = kstrdup(fc->source, GFP_KERNEL);
 	if (!name)
-		return ERR_PTR(-ENOMEM);
+		goto out;
 
 	/* get the actual root for this mount */
 	fsinfo.fattr = nfs_alloc_fattr();
-	if (fsinfo.fattr == NULL) {
-		kfree(name);
-		return ERR_PTR(-ENOMEM);
-	}
+	if (fsinfo.fattr == NULL)
+		goto out_name;
 
-	error = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
+	error = server->nfs_client->rpc_ops->getroot(server, ctx->mntfh, &fsinfo);
 	if (error < 0) {
 		dprintk("nfs_get_root: getattr error = %d\n", -error);
-		ret = ERR_PTR(error);
-		goto out;
+		nfs_errorf(fc, "NFS: Couldn't getattr on root");
+		goto out_fattr;
 	}
 
-	inode = nfs_fhget(sb, mntfh, fsinfo.fattr, NULL);
+	inode = nfs_fhget(s, ctx->mntfh, fsinfo.fattr, NULL);
 	if (IS_ERR(inode)) {
 		dprintk("nfs_get_root: get root inode failed\n");
-		ret = ERR_CAST(inode);
-		goto out;
+		error = PTR_ERR(inode);
+		nfs_errorf(fc, "NFS: Couldn't get root inode");
+		goto out_fattr;
 	}
 
-	error = nfs_superblock_set_dummy_root(sb, inode);
-	if (error != 0) {
-		ret = ERR_PTR(error);
-		goto out;
-	}
+	error = nfs_superblock_set_dummy_root(s, inode);
+	if (error != 0)
+		goto out_fattr;
 
 	/* root dentries normally start off anonymous and get spliced in later
 	 * if the dentry tree reaches them; however if the dentry already
 	 * exists, we'll pick it up at this point and use it as the root
 	 */
-	ret = d_obtain_root(inode);
-	if (IS_ERR(ret)) {
+	root = d_obtain_root(inode);
+	if (IS_ERR(root)) {
 		dprintk("nfs_get_root: get root dentry failed\n");
-		goto out;
+		error = PTR_ERR(root);
+		nfs_errorf(fc, "NFS: Couldn't get root dentry");
+		goto out_fattr;
 	}
 
-	security_d_instantiate(ret, inode);
-	spin_lock(&ret->d_lock);
-	if (IS_ROOT(ret) && !ret->d_fsdata &&
-	    !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
-		ret->d_fsdata = name;
+	security_d_instantiate(root, inode);
+	spin_lock(&root->d_lock);
+	if (IS_ROOT(root) && !root->d_fsdata &&
+	    !(root->d_flags & DCACHE_NFSFS_RENAMED)) {
+		root->d_fsdata = name;
 		name = NULL;
 	}
-	spin_unlock(&ret->d_lock);
-out:
-	kfree(name);
+	spin_unlock(&root->d_lock);
+	fc->root = root;
+	error = 0;
+
+out_fattr:
 	nfs_free_fattr(fsinfo.fattr);
-	return ret;
+out_name:
+	kfree(name);
+out:
+	return error;
 }
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b0b4b9f..1309e6f 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1061,7 +1061,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(pos, &nfsi->open_files, list) {
-		if (cred != NULL && pos->cred != cred)
+		if (cred != NULL && cred_fscmp(pos->cred, cred) != 0)
 			continue;
 		if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
 			continue;
@@ -1156,7 +1156,13 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
 		dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) getattr failed, error=%d\n",
 			 inode->i_sb->s_id,
 			 (unsigned long long)NFS_FILEID(inode), status);
-		if (status == -ESTALE) {
+		switch (status) {
+		case -ETIMEDOUT:
+			/* A soft timeout occurred. Use cached information? */
+			if (server->flags & NFS_MOUNT_SOFTREVAL)
+				status = 0;
+			break;
+		case -ESTALE:
 			nfs_zap_caches(inode);
 			if (!S_ISDIR(inode->i_mode))
 				set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 24a65da..f80c47d 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -4,17 +4,19 @@
  */
 
 #include "nfs4_fs.h"
-#include <linux/mount.h>
+#include <linux/fs_context.h>
 #include <linux/security.h>
 #include <linux/crc32.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/nfs_page.h>
 #include <linux/wait_bit.h>
 
-#define NFS_MS_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
+#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
 
 extern const struct export_operations nfs_export_ops;
 
 struct nfs_string;
+struct nfs_pageio_descriptor;
 
 static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
 {
@@ -31,17 +33,14 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
 	return 1;
 }
 
-struct nfs_clone_mount {
-	const struct super_block *sb;
-	const struct dentry *dentry;
-	struct nfs_fh *fh;
-	struct nfs_fattr *fattr;
-	char *hostname;
-	char *mnt_path;
-	struct sockaddr *addr;
-	size_t addrlen;
-	rpc_authflavor_t authflavor;
-};
+static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
+{
+	if (!(NFS_SB(dentry->d_sb)->flags & NFS_MOUNT_SOFTREVAL))
+		return false;
+	if (!d_is_positive(dentry) || !NFS_FH(d_inode(dentry))->size)
+		return false;
+	return true;
+}
 
 /*
  * Note: RFC 1813 doesn't limit the number of auth flavors that
@@ -82,12 +81,16 @@ struct nfs_client_initdata {
 /*
  * In-kernel mount arguments
  */
-struct nfs_parsed_mount_data {
-	int			flags;
+struct nfs_fs_context {
+	bool			internal;
+	bool			skip_reconfig_option_check;
+	bool			need_mount;
+	bool			sloppy;
+	unsigned int		flags;		/* NFS{,4}_MOUNT_* flags */
 	unsigned int		rsize, wsize;
 	unsigned int		timeo, retrans;
-	unsigned int		acregmin, acregmax,
-				acdirmin, acdirmax;
+	unsigned int		acregmin, acregmax;
+	unsigned int		acdirmin, acdirmax;
 	unsigned int		namlen;
 	unsigned int		options;
 	unsigned int		bsize;
@@ -97,10 +100,14 @@ struct nfs_parsed_mount_data {
 	unsigned int		version;
 	unsigned int		minorversion;
 	char			*fscache_uniq;
-	bool			need_mount;
+	unsigned short		protofamily;
+	unsigned short		mountfamily;
 
 	struct {
-		struct sockaddr_storage	address;
+		union {
+			struct sockaddr	address;
+			struct sockaddr_storage	_address;
+		};
 		size_t			addrlen;
 		char			*hostname;
 		u32			version;
@@ -109,19 +116,41 @@ struct nfs_parsed_mount_data {
 	} mount_server;
 
 	struct {
-		struct sockaddr_storage	address;
+		union {
+			struct sockaddr	address;
+			struct sockaddr_storage	_address;
+		};
 		size_t			addrlen;
 		char			*hostname;
 		char			*export_path;
 		int			port;
 		unsigned short		protocol;
 		unsigned short		nconnect;
+		unsigned short		export_path_len;
 	} nfs_server;
 
-	void			*lsm_opts;
-	struct net		*net;
+	struct nfs_fh		*mntfh;
+	struct nfs_server	*server;
+	struct nfs_subversion	*nfs_mod;
+
+	/* Information for a cloned mount. */
+	struct nfs_clone_mount {
+		struct super_block	*sb;
+		struct dentry		*dentry;
+		struct nfs_fattr	*fattr;
+		unsigned int		inherited_bsize;
+	} clone_data;
 };
 
+#define nfs_errorf(fc, fmt, ...) errorf(fc, fmt, ## __VA_ARGS__)
+#define nfs_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
+#define nfs_warnf(fc, fmt, ...) warnf(fc, fmt, ## __VA_ARGS__)
+
+static inline struct nfs_fs_context *nfs_fc2context(const struct fs_context *fc)
+{
+	return fc->fs_private;
+}
+
 /* mount_clnt.c */
 struct nfs_mount_request {
 	struct sockaddr		*sap;
@@ -137,14 +166,6 @@ struct nfs_mount_request {
 	struct net		*net;
 };
 
-struct nfs_mount_info {
-	void (*fill_super)(struct super_block *, struct nfs_mount_info *);
-	int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
-	struct nfs_parsed_mount_data *parsed;
-	struct nfs_clone_mount *cloned;
-	struct nfs_fh *mntfh;
-};
-
 extern int nfs_mount(struct nfs_mount_request *info);
 extern void nfs_umount(const struct nfs_mount_request *info);
 
@@ -170,13 +191,9 @@ extern struct nfs_client *nfs4_find_client_ident(struct net *, int);
 extern struct nfs_client *
 nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
 				struct nfs4_sessionid *, u32);
-extern struct nfs_server *nfs_create_server(struct nfs_mount_info *,
-					struct nfs_subversion *);
-extern struct nfs_server *nfs4_create_server(
-					struct nfs_mount_info *,
-					struct nfs_subversion *);
-extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
-						      struct nfs_fh *);
+extern struct nfs_server *nfs_create_server(struct fs_context *);
+extern struct nfs_server *nfs4_create_server(struct fs_context *);
+extern struct nfs_server *nfs4_create_referral_server(struct fs_context *);
 extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
 					struct sockaddr *sap, size_t salen,
 					struct net *net);
@@ -227,7 +244,9 @@ static inline void nfs_fs_proc_exit(void)
 extern const struct svc_version nfs4_callback_version1;
 extern const struct svc_version nfs4_callback_version4;
 
-struct nfs_pageio_descriptor;
+/* fs_context.c */
+extern struct file_system_type nfs_fs_type;
+
 /* pagelist.c */
 extern int __init nfs_init_nfspagecache(void);
 extern void nfs_destroy_nfspagecache(void);
@@ -387,23 +406,10 @@ extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
 
 /* super.c */
 extern const struct super_operations nfs_sops;
-extern struct file_system_type nfs_fs_type;
-extern struct file_system_type nfs_xdev_fs_type;
-#if IS_ENABLED(CONFIG_NFS_V4)
-extern struct file_system_type nfs4_referral_fs_type;
-#endif
 bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
-struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
-			struct nfs_subversion *);
-int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
-int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
-struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
-				   struct nfs_mount_info *, struct nfs_subversion *);
-struct dentry *nfs_fs_mount(struct file_system_type *, int, const char *, void *);
-struct dentry * nfs_xdev_mount_common(struct file_system_type *, int,
-		const char *, struct nfs_mount_info *);
+int nfs_try_get_tree(struct fs_context *);
+int nfs_get_tree_common(struct fs_context *);
 void nfs_kill_super(struct super_block *);
-void nfs_fill_super(struct super_block *, struct nfs_mount_info *);
 
 extern struct rpc_stat nfs_rpcstat;
 
@@ -430,18 +436,12 @@ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
 extern char *nfs_path(char **p, struct dentry *dentry,
 		      char *buffer, ssize_t buflen, unsigned flags);
 extern struct vfsmount *nfs_d_automount(struct path *path);
-struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
-			      struct nfs_fh *, struct nfs_fattr *);
-struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
-				 struct nfs_fattr *, rpc_authflavor_t);
+int nfs_submount(struct fs_context *, struct nfs_server *);
+int nfs_do_submount(struct fs_context *);
 
 /* getroot.c */
-extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
-				   const char *);
+extern int nfs_get_root(struct super_block *s, struct fs_context *fc);
 #if IS_ENABLED(CONFIG_NFS_V4)
-extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
-				    const char *);
-
 extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool);
 #endif
 
@@ -460,7 +460,7 @@ int  nfs_show_options(struct seq_file *, struct dentry *);
 int  nfs_show_devname(struct seq_file *, struct dentry *);
 int  nfs_show_path(struct seq_file *, struct dentry *);
 int  nfs_show_stats(struct seq_file *, struct dentry *);
-int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
+int  nfs_reconfigure(struct fs_context *);
 
 /* write.c */
 extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
@@ -706,9 +706,9 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
 }
 
 /*
- * Convert a struct timespec into a 64-bit change attribute
+ * Convert a struct timespec64 into a 64-bit change attribute
  *
- * This does approximately the same thing as timespec_to_ns(),
+ * This does approximately the same thing as timespec64_to_ns(),
  * but for calculation efficiency, we multiply the seconds by
  * 1024*1024*1024.
  */
@@ -777,3 +777,16 @@ static inline bool nfs_error_is_fatal_on_server(int err)
 	}
 	return nfs_error_is_fatal(err);
 }
+
+/*
+ * Select between a default port value and a user-specified port value.
+ * If a zero value is set, then autobind will be used.
+ */
+static inline void nfs_set_port(struct sockaddr *sap, int *port,
+				const unsigned short default_port)
+{
+	if (*port == NFS_UNSPEC_PORT)
+		*port = default_port;
+
+	rpc_set_port(sap, *port);
+}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index cb7c10e..35c8cb2 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -29,9 +29,7 @@
  */
 #define encode_dirpath_sz	(1 + XDR_QUADLEN(MNTPATHLEN))
 #define MNT_status_sz		(1)
-#define MNT_fhs_status_sz	(1)
 #define MNT_fhandle_sz		XDR_QUADLEN(NFS2_FHSIZE)
-#define MNT_fhandle3_sz		(1 + XDR_QUADLEN(NFS3_FHSIZE))
 #define MNT_authflav3_sz	(1 + NFS_MAX_SECFLAVORS)
 
 /*
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 5e0e9d2..ad60774 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -19,6 +19,7 @@
 #include <linux/vfs.h>
 #include <linux/sunrpc/gss_api.h>
 #include "internal.h"
+#include "nfs.h"
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
 
@@ -139,34 +140,65 @@ EXPORT_SYMBOL_GPL(nfs_path);
  */
 struct vfsmount *nfs_d_automount(struct path *path)
 {
-	struct vfsmount *mnt;
+	struct nfs_fs_context *ctx;
+	struct fs_context *fc;
+	struct vfsmount *mnt = ERR_PTR(-ENOMEM);
 	struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
-	struct nfs_fh *fh = NULL;
-	struct nfs_fattr *fattr = NULL;
+	struct nfs_client *client = server->nfs_client;
+	int ret;
 
 	if (IS_ROOT(path->dentry))
 		return ERR_PTR(-ESTALE);
 
-	mnt = ERR_PTR(-ENOMEM);
-	fh = nfs_alloc_fhandle();
-	fattr = nfs_alloc_fattr();
-	if (fh == NULL || fattr == NULL)
-		goto out;
+	/* Open a new filesystem context, transferring parameters from the
+	 * parent superblock, including the network namespace.
+	 */
+	fc = fs_context_for_submount(&nfs_fs_type, path->dentry);
+	if (IS_ERR(fc))
+		return ERR_CAST(fc);
 
-	mnt = server->nfs_client->rpc_ops->submount(server, path->dentry, fh, fattr);
+	ctx = nfs_fc2context(fc);
+	ctx->clone_data.dentry	= path->dentry;
+	ctx->clone_data.sb	= path->dentry->d_sb;
+	ctx->clone_data.fattr	= nfs_alloc_fattr();
+	if (!ctx->clone_data.fattr)
+		goto out_fc;
+
+	if (fc->net_ns != client->cl_net) {
+		put_net(fc->net_ns);
+		fc->net_ns = get_net(client->cl_net);
+	}
+
+	/* for submounts we want the same server; referrals will reassign */
+	memcpy(&ctx->nfs_server.address, &client->cl_addr, client->cl_addrlen);
+	ctx->nfs_server.addrlen	= client->cl_addrlen;
+	ctx->nfs_server.port	= server->port;
+
+	ctx->version		= client->rpc_ops->version;
+	ctx->minorversion	= client->cl_minorversion;
+	ctx->nfs_mod		= client->cl_nfs_mod;
+	__module_get(ctx->nfs_mod->owner);
+
+	ret = client->rpc_ops->submount(fc, server);
+	if (ret < 0) {
+		mnt = ERR_PTR(ret);
+		goto out_fc;
+	}
+
+	up_write(&fc->root->d_sb->s_umount);
+	mnt = vfs_create_mount(fc);
 	if (IS_ERR(mnt))
-		goto out;
+		goto out_fc;
 
 	if (nfs_mountpoint_expiry_timeout < 0)
-		goto out;
+		goto out_fc;
 
 	mntget(mnt); /* prevent immediate expiration */
 	mnt_set_expiry(mnt, &nfs_automount_list);
 	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
 
-out:
-	nfs_free_fattr(fattr);
-	nfs_free_fhandle(fh);
+out_fc:
+	put_fs_context(fc);
 	return mnt;
 }
 
@@ -213,16 +245,6 @@ void nfs_release_automount_timer(void)
 		cancel_delayed_work(&nfs_automount_task);
 }
 
-/*
- * Clone a mountpoint of the appropriate type
- */
-static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
-					   const char *devname,
-					   struct nfs_clone_mount *mountdata)
-{
-	return vfs_submount(mountdata->dentry, &nfs_xdev_fs_type, devname, mountdata);
-}
-
 /**
  * nfs_do_submount - set up mountpoint when crossing a filesystem boundary
  * @dentry: parent directory
@@ -231,46 +253,62 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
  * @authflavor: security flavor to use when performing the mount
  *
  */
-struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
-				 struct nfs_fattr *fattr, rpc_authflavor_t authflavor)
+int nfs_do_submount(struct fs_context *fc)
 {
-	struct nfs_clone_mount mountdata = {
-		.sb = dentry->d_sb,
-		.dentry = dentry,
-		.fh = fh,
-		.fattr = fattr,
-		.authflavor = authflavor,
-	};
-	struct vfsmount *mnt;
-	char *page = (char *) __get_free_page(GFP_USER);
-	char *devname;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct dentry *dentry = ctx->clone_data.dentry;
+	struct nfs_server *server;
+	char *buffer, *p;
+	int ret;
 
-	if (page == NULL)
-		return ERR_PTR(-ENOMEM);
+	/* create a new volume representation */
+	server = ctx->nfs_mod->rpc_ops->clone_server(NFS_SB(ctx->clone_data.sb),
+						     ctx->mntfh,
+						     ctx->clone_data.fattr,
+						     ctx->selected_flavor);
 
-	devname = nfs_devname(dentry, page, PAGE_SIZE);
-	if (IS_ERR(devname))
-		mnt = ERR_CAST(devname);
-	else
-		mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
+	if (IS_ERR(server))
+		return PTR_ERR(server);
 
-	free_page((unsigned long)page);
-	return mnt;
+	ctx->server = server;
+
+	buffer = kmalloc(4096, GFP_USER);
+	if (!buffer)
+		return -ENOMEM;
+
+	ctx->internal		= true;
+	ctx->clone_data.inherited_bsize = ctx->clone_data.sb->s_blocksize_bits;
+
+	p = nfs_devname(dentry, buffer, 4096);
+	if (IS_ERR(p)) {
+		nfs_errorf(fc, "NFS: Couldn't determine submount pathname");
+		ret = PTR_ERR(p);
+	} else {
+		ret = vfs_parse_fs_string(fc, "source", p, buffer + 4096 - p);
+		if (!ret)
+			ret = vfs_get_tree(fc);
+	}
+	kfree(buffer);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(nfs_do_submount);
 
-struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
-			      struct nfs_fh *fh, struct nfs_fattr *fattr)
+int nfs_submount(struct fs_context *fc, struct nfs_server *server)
 {
-	int err;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct dentry *dentry = ctx->clone_data.dentry;
 	struct dentry *parent = dget_parent(dentry);
+	int err;
 
 	/* Look it up again to get its attributes */
-	err = server->nfs_client->rpc_ops->lookup(d_inode(parent), &dentry->d_name, fh, fattr, NULL);
+	err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry,
+						  ctx->mntfh, ctx->clone_data.fattr,
+						  NULL);
 	dput(parent);
 	if (err != 0)
-		return ERR_PTR(err);
+		return err;
 
-	return nfs_do_submount(dentry, fh, fattr, server->client->cl_auth->au_flavor);
+	ctx->selected_flavor = server->client->cl_auth->au_flavor;
+	return nfs_do_submount(fc);
 }
 EXPORT_SYMBOL_GPL(nfs_submount);
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index d94c7ab..f6676af 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -360,17 +360,17 @@ static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr,
 	else
 		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
 
-	if (attr->ia_valid & ATTR_ATIME_SET) {
+	if (attr->ia_valid & ATTR_ATIME_SET)
 		p = xdr_encode_time(p, &attr->ia_atime);
-	} else if (attr->ia_valid & ATTR_ATIME) {
+	else if (attr->ia_valid & ATTR_ATIME)
 		p = xdr_encode_current_server_time(p, &attr->ia_atime);
-	} else
+	else
 		p = xdr_time_not_set(p);
-	if (attr->ia_valid & ATTR_MTIME_SET) {
+	if (attr->ia_valid & ATTR_MTIME_SET)
 		xdr_encode_time(p, &attr->ia_mtime);
-	} else if (attr->ia_valid & ATTR_MTIME) {
+	else if (attr->ia_valid & ATTR_MTIME)
 		xdr_encode_current_server_time(p, &attr->ia_mtime);
-	} else
+	else
 		xdr_time_not_set(p);
 }
 
diff --git a/fs/nfs/nfs3_fs.h b/fs/nfs/nfs3_fs.h
index f82e11c..1b950b6 100644
--- a/fs/nfs/nfs3_fs.h
+++ b/fs/nfs/nfs3_fs.h
@@ -27,7 +27,7 @@ static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
 #endif /* CONFIG_NFS_V3_ACL */
 
 /* nfs3client.c */
-struct nfs_server *nfs3_create_server(struct nfs_mount_info *, struct nfs_subversion *);
+struct nfs_server *nfs3_create_server(struct fs_context *);
 struct nfs_server *nfs3_clone_server(struct nfs_server *, struct nfs_fh *,
 				     struct nfs_fattr *, rpc_authflavor_t);
 
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 223904b..5601e47 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -46,10 +46,10 @@ static inline void nfs_init_server_aclclient(struct nfs_server *server)
 }
 #endif
 
-struct nfs_server *nfs3_create_server(struct nfs_mount_info *mount_info,
-				      struct nfs_subversion *nfs_mod)
+struct nfs_server *nfs3_create_server(struct fs_context *fc)
 {
-	struct nfs_server *server = nfs_create_server(mount_info, nfs_mod);
+	struct nfs_server *server = nfs_create_server(fc);
+
 	/* Create a client RPC handle for the NFS v3 ACL management interface */
 	if (!IS_ERR(server))
 		nfs_init_server_aclclient(server);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 9eb2f1a..a46d1d5 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -110,10 +110,15 @@ nfs3_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
 		.rpc_resp	= fattr,
 	};
 	int	status;
+	unsigned short task_flags = 0;
+
+	/* Is this is an attribute revalidation, subject to softreval? */
+	if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
+		task_flags |= RPC_TASK_TIMEOUT;
 
 	dprintk("NFS call  getattr\n");
 	nfs_fattr_init(fattr);
-	status = rpc_call_sync(server->client, &msg, 0);
+	status = rpc_call_sync(server->client, &msg, task_flags);
 	dprintk("NFS reply getattr: %d\n", status);
 	return status;
 }
@@ -140,23 +145,23 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 	nfs_fattr_init(fattr);
 	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
 	if (status == 0) {
+		nfs_setattr_update_inode(inode, sattr, fattr);
 		if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
 			nfs_zap_acl_cache(inode);
-		nfs_setattr_update_inode(inode, sattr, fattr);
 	}
 	dprintk("NFS reply setattr: %d\n", status);
 	return status;
 }
 
 static int
-nfs3_proc_lookup(struct inode *dir, const struct qstr *name,
+nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
 		 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
 		 struct nfs4_label *label)
 {
 	struct nfs3_diropargs	arg = {
 		.fh		= NFS_FH(dir),
-		.name		= name->name,
-		.len		= name->len
+		.name		= dentry->d_name.name,
+		.len		= dentry->d_name.len
 	};
 	struct nfs3_diropres	res = {
 		.fh		= fhandle,
@@ -168,20 +173,25 @@ nfs3_proc_lookup(struct inode *dir, const struct qstr *name,
 		.rpc_resp	= &res,
 	};
 	int			status;
+	unsigned short task_flags = 0;
 
-	dprintk("NFS call  lookup %s\n", name->name);
+	/* Is this is an attribute revalidation, subject to softreval? */
+	if (nfs_lookup_is_soft_revalidate(dentry))
+		task_flags |= RPC_TASK_TIMEOUT;
+
+	dprintk("NFS call  lookup %pd2\n", dentry);
 	res.dir_attr = nfs_alloc_fattr();
 	if (res.dir_attr == NULL)
 		return -ENOMEM;
 
 	nfs_fattr_init(fattr);
-	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
 	nfs_refresh_inode(dir, res.dir_attr);
 	if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR)) {
 		msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
 		msg.rpc_argp = fhandle;
 		msg.rpc_resp = fattr;
-		status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+		status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
 	}
 	nfs_free_fattr(res.dir_attr);
 	dprintk("NFS reply lookup: %d\n", status);
@@ -990,7 +1000,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
 	.nlmclnt_ops	= &nlmclnt_fl_close_lock_ops,
 	.getroot	= nfs3_proc_get_root,
 	.submount	= nfs_submount,
-	.try_mount	= nfs_try_mount,
+	.try_get_tree	= nfs_try_get_tree,
 	.getattr	= nfs3_proc_getattr,
 	.setattr	= nfs3_proc_setattr,
 	.lookup		= nfs3_proc_lookup,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 927eb68..69971f6 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -2334,6 +2334,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
 				   void *data)
 {
 	struct nfs_commitres *result = data;
+	struct nfs_writeverf *verf = result->verf;
 	enum nfs_stat status;
 	int error;
 
@@ -2346,7 +2347,9 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
 	result->op_status = status;
 	if (status != NFS3_OK)
 		goto out_status;
-	error = decode_writeverf3(xdr, &result->verf->verifier);
+	error = decode_writeverf3(xdr, &verf->verifier);
+	if (!error)
+		verf->committed = NFS_FILE_SYNC;
 out:
 	return error;
 out_status:
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 1fe83e0..e2ae54b3 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -61,8 +61,11 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
 
 	status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
 			lock, FMODE_WRITE);
-	if (status)
+	if (status) {
+		if (status == -EAGAIN)
+			status = -NFS4ERR_BAD_STATEID;
 		return status;
+	}
 
 	res.falloc_fattr = nfs_alloc_fattr();
 	if (!res.falloc_fattr)
@@ -287,8 +290,11 @@ static ssize_t _nfs42_proc_copy(struct file *src,
 	} else {
 		status = nfs4_set_rw_stateid(&args->src_stateid,
 				src_lock->open_context, src_lock, FMODE_READ);
-		if (status)
+		if (status) {
+			if (status == -EAGAIN)
+				status = -NFS4ERR_BAD_STATEID;
 			return status;
+		}
 	}
 	status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
 			pos_src, pos_src + (loff_t)count - 1);
@@ -297,8 +303,11 @@ static ssize_t _nfs42_proc_copy(struct file *src,
 
 	status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
 				     dst_lock, FMODE_WRITE);
-	if (status)
+	if (status) {
+		if (status == -EAGAIN)
+			status = -NFS4ERR_BAD_STATEID;
 		return status;
+	}
 
 	status = nfs_sync_inode(dst_inode);
 	if (status)
@@ -334,14 +343,14 @@ static ssize_t _nfs42_proc_copy(struct file *src,
 		status = handle_async_copy(res, dst_server, src_server, src,
 				dst, &args->src_stateid, restart);
 		if (status)
-			return status;
+			goto out;
 	}
 
 	if ((!res->synchronous || !args->sync) &&
 			res->write_res.verifier.committed != NFS_FILE_SYNC) {
 		status = process_copy_commit(dst, pos_dst, res);
 		if (status)
-			return status;
+			goto out;
 	}
 
 	truncate_pagecache_range(dst_inode, pos_dst,
@@ -546,8 +555,11 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
 	status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
 				     FMODE_READ);
 	nfs_put_lock_context(l_ctx);
-	if (status)
+	if (status) {
+		if (status == -EAGAIN)
+			status = -NFS4ERR_BAD_STATEID;
 		return status;
+	}
 
 	status = nfs4_call_sync(src_server->client, src_server, &msg,
 				&args->cna_seq_args, &res->cnr_seq_res, 0);
@@ -618,8 +630,11 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
 
 	status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
 			lock, FMODE_READ);
-	if (status)
+	if (status) {
+		if (status == -EAGAIN)
+			status = -NFS4ERR_BAD_STATEID;
 		return status;
+	}
 
 	status = nfs_filemap_write_and_wait_range(inode->i_mapping,
 			offset, LLONG_MAX);
@@ -994,13 +1009,18 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
 
 	status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
 			src_lock, FMODE_READ);
-	if (status)
+	if (status) {
+		if (status == -EAGAIN)
+			status = -NFS4ERR_BAD_STATEID;
 		return status;
-
+	}
 	status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
 			dst_lock, FMODE_WRITE);
-	if (status)
+	if (status) {
+		if (status == -EAGAIN)
+			status = -NFS4ERR_BAD_STATEID;
 		return status;
+	}
 
 	res.dst_fattr = nfs_alloc_fattr();
 	if (!res.dst_fattr)
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index a7a73b1..8be1ba7 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -268,14 +268,13 @@ extern const struct dentry_operations nfs4_dentry_operations;
 int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
 		    unsigned, umode_t);
 
-/* super.c */
+/* fs_context.c */
 extern struct file_system_type nfs4_fs_type;
 
 /* nfs4namespace.c */
 struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *,
 					 const struct qstr *);
-struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
-			       struct nfs_fh *, struct nfs_fattr *);
+int nfs4_submount(struct fs_context *, struct nfs_server *);
 int nfs4_replace_transport(struct nfs_server *server,
 				const struct nfs4_fs_locations *locations);
 
@@ -303,8 +302,10 @@ extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struc
 extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *,
 		struct page *page, const struct cred *);
 extern int nfs4_proc_fsid_present(struct inode *, const struct cred *);
-extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, const struct qstr *,
-			    struct nfs_fh *, struct nfs_fattr *);
+extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *,
+						    struct dentry *,
+						    struct nfs_fh *,
+						    struct nfs_fattr *);
 extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
 extern const struct xattr_handler *nfs4_xattr_handlers[];
 extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
@@ -446,9 +447,7 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
 extern void nfs4_kill_renewd(struct nfs_client *);
 extern void nfs4_renew_state(struct work_struct *);
-extern void nfs4_set_lease_period(struct nfs_client *clp,
-		unsigned long lease,
-		unsigned long lastrenewed);
+extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
 
 
 /* nfs4state.c */
@@ -526,7 +525,6 @@ extern const nfs4_stateid invalid_stateid;
 /* nfs4super.c */
 struct nfs_mount_info;
 extern struct nfs_subversion nfs_v4;
-struct dentry *nfs4_try_mount(int, const char *, struct nfs_mount_info *, struct nfs_subversion *);
 extern bool nfs4_disable_idmapping;
 extern unsigned short max_session_slots;
 extern unsigned short max_session_cb_slots;
@@ -536,6 +534,9 @@ extern bool recover_lost_locks;
 #define NFS4_CLIENT_ID_UNIQ_LEN		(64)
 extern char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN];
 
+extern int nfs4_try_get_tree(struct fs_context *);
+extern int nfs4_get_referral_tree(struct fs_context *);
+
 /* nfs4sysctl.c */
 #ifdef CONFIG_SYSCTL
 int nfs4_register_sysctl(void);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 460d625..0cd767e 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1055,66 +1055,64 @@ static int nfs4_server_common_setup(struct nfs_server *server,
 /*
  * Create a version 4 volume record
  */
-static int nfs4_init_server(struct nfs_server *server,
-		struct nfs_parsed_mount_data *data)
+static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
 {
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	struct rpc_timeout timeparms;
 	int error;
 
-	nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
-			data->timeo, data->retrans);
+	nfs_init_timeout_values(&timeparms, ctx->nfs_server.protocol,
+				ctx->timeo, ctx->retrans);
 
 	/* Initialise the client representation from the mount data */
-	server->flags = data->flags;
-	server->options = data->options;
-	server->auth_info = data->auth_info;
+	server->flags = ctx->flags;
+	server->options = ctx->options;
+	server->auth_info = ctx->auth_info;
 
 	/* Use the first specified auth flavor. If this flavor isn't
 	 * allowed by the server, use the SECINFO path to try the
 	 * other specified flavors */
-	if (data->auth_info.flavor_len >= 1)
-		data->selected_flavor = data->auth_info.flavors[0];
+	if (ctx->auth_info.flavor_len >= 1)
+		ctx->selected_flavor = ctx->auth_info.flavors[0];
 	else
-		data->selected_flavor = RPC_AUTH_UNIX;
+		ctx->selected_flavor = RPC_AUTH_UNIX;
 
 	/* Get a client record */
 	error = nfs4_set_client(server,
-			data->nfs_server.hostname,
-			(const struct sockaddr *)&data->nfs_server.address,
-			data->nfs_server.addrlen,
-			data->client_address,
-			data->nfs_server.protocol,
-			&timeparms,
-			data->minorversion,
-			data->nfs_server.nconnect,
-			data->net);
+				ctx->nfs_server.hostname,
+				&ctx->nfs_server.address,
+				ctx->nfs_server.addrlen,
+				ctx->client_address,
+				ctx->nfs_server.protocol,
+				&timeparms,
+				ctx->minorversion,
+				ctx->nfs_server.nconnect,
+				fc->net_ns);
 	if (error < 0)
 		return error;
 
-	if (data->rsize)
-		server->rsize = nfs_block_size(data->rsize, NULL);
-	if (data->wsize)
-		server->wsize = nfs_block_size(data->wsize, NULL);
+	if (ctx->rsize)
+		server->rsize = nfs_block_size(ctx->rsize, NULL);
+	if (ctx->wsize)
+		server->wsize = nfs_block_size(ctx->wsize, NULL);
 
-	server->acregmin = data->acregmin * HZ;
-	server->acregmax = data->acregmax * HZ;
-	server->acdirmin = data->acdirmin * HZ;
-	server->acdirmax = data->acdirmax * HZ;
-	server->port     = data->nfs_server.port;
+	server->acregmin = ctx->acregmin * HZ;
+	server->acregmax = ctx->acregmax * HZ;
+	server->acdirmin = ctx->acdirmin * HZ;
+	server->acdirmax = ctx->acdirmax * HZ;
+	server->port     = ctx->nfs_server.port;
 
 	return nfs_init_server_rpcclient(server, &timeparms,
-					 data->selected_flavor);
+					 ctx->selected_flavor);
 }
 
 /*
  * Create a version 4 volume record
  * - keyed on server and FSID
  */
-/*struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
-				      struct nfs_fh *mntfh)*/
-struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info,
-				      struct nfs_subversion *nfs_mod)
+struct nfs_server *nfs4_create_server(struct fs_context *fc)
 {
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	struct nfs_server *server;
 	bool auth_probe;
 	int error;
@@ -1125,14 +1123,14 @@ struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info,
 
 	server->cred = get_cred(current_cred());
 
-	auth_probe = mount_info->parsed->auth_info.flavor_len < 1;
+	auth_probe = ctx->auth_info.flavor_len < 1;
 
 	/* set up the general RPC client */
-	error = nfs4_init_server(server, mount_info->parsed);
+	error = nfs4_init_server(server, fc);
 	if (error < 0)
 		goto error;
 
-	error = nfs4_server_common_setup(server, mount_info->mntfh, auth_probe);
+	error = nfs4_server_common_setup(server, ctx->mntfh, auth_probe);
 	if (error < 0)
 		goto error;
 
@@ -1146,9 +1144,9 @@ struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info,
 /*
  * Create an NFS4 referral server record
  */
-struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
-					       struct nfs_fh *mntfh)
+struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
 {
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	struct nfs_client *parent_client;
 	struct nfs_server *server, *parent_server;
 	bool auth_probe;
@@ -1158,7 +1156,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
 	if (!server)
 		return ERR_PTR(-ENOMEM);
 
-	parent_server = NFS_SB(data->sb);
+	parent_server = NFS_SB(ctx->clone_data.sb);
 	parent_client = parent_server->nfs_client;
 
 	server->cred = get_cred(parent_server->cred);
@@ -1168,10 +1166,11 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
 
 	/* Get a client representation */
 #if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
-	rpc_set_port(data->addr, NFS_RDMA_PORT);
-	error = nfs4_set_client(server, data->hostname,
-				data->addr,
-				data->addrlen,
+	rpc_set_port(&ctx->nfs_server.address, NFS_RDMA_PORT);
+	error = nfs4_set_client(server,
+				ctx->nfs_server.hostname,
+				&ctx->nfs_server.address,
+				ctx->nfs_server.addrlen,
 				parent_client->cl_ipaddr,
 				XPRT_TRANSPORT_RDMA,
 				parent_server->client->cl_timeout,
@@ -1182,10 +1181,11 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
 		goto init_server;
 #endif	/* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */
 
-	rpc_set_port(data->addr, NFS_PORT);
-	error = nfs4_set_client(server, data->hostname,
-				data->addr,
-				data->addrlen,
+	rpc_set_port(&ctx->nfs_server.address, NFS_PORT);
+	error = nfs4_set_client(server,
+				ctx->nfs_server.hostname,
+				&ctx->nfs_server.address,
+				ctx->nfs_server.addrlen,
 				parent_client->cl_ipaddr,
 				XPRT_TRANSPORT_TCP,
 				parent_server->client->cl_timeout,
@@ -1198,13 +1198,14 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
 #if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
 init_server:
 #endif
-	error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
+	error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout,
+					  ctx->selected_flavor);
 	if (error < 0)
 		goto error;
 
 	auth_probe = parent_server->auth_info.flavor_len < 1;
 
-	error = nfs4_server_common_setup(server, mntfh, auth_probe);
+	error = nfs4_server_common_setup(server, ctx->mntfh, auth_probe);
 	if (error < 0)
 		goto error;
 
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 620de90..be4eb72 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -7,6 +7,7 @@
 #include <linux/fs.h>
 #include <linux/file.h>
 #include <linux/falloc.h>
+#include <linux/mount.h>
 #include <linux/nfs_fs.h>
 #include "delegation.h"
 #include "internal.h"
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 2e460c3..84026e7 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -8,6 +8,7 @@
  * NFSv4 namespace
  */
 
+#include <linux/module.h>
 #include <linux/dcache.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
@@ -21,37 +22,64 @@
 #include <linux/inet.h>
 #include "internal.h"
 #include "nfs4_fs.h"
+#include "nfs.h"
 #include "dns_resolve.h"
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
 
 /*
- * Convert the NFSv4 pathname components into a standard posix path.
- *
- * Note that the resulting string will be placed at the end of the buffer
+ * Work out the length that an NFSv4 path would render to as a standard posix
+ * path, with a leading slash but no terminating slash.
  */
-static inline char *nfs4_pathname_string(const struct nfs4_pathname *pathname,
-					 char *buffer, ssize_t buflen)
+static ssize_t nfs4_pathname_len(const struct nfs4_pathname *pathname)
 {
-	char *end = buffer + buflen;
-	int n;
+	ssize_t len = 0;
+	int i;
 
-	*--end = '\0';
-	buflen--;
+	for (i = 0; i < pathname->ncomponents; i++) {
+		const struct nfs4_string *component = &pathname->components[i];
 
-	n = pathname->ncomponents;
-	while (--n >= 0) {
-		const struct nfs4_string *component = &pathname->components[n];
-		buflen -= component->len + 1;
-		if (buflen < 0)
-			goto Elong;
-		end -= component->len;
-		memcpy(end, component->data, component->len);
-		*--end = '/';
+		if (component->len > NAME_MAX)
+			goto too_long;
+		len += 1 + component->len; /* Adding "/foo" */
+		if (len > PATH_MAX)
+			goto too_long;
 	}
-	return end;
-Elong:
-	return ERR_PTR(-ENAMETOOLONG);
+	return len;
+
+too_long:
+	return -ENAMETOOLONG;
+}
+
+/*
+ * Convert the NFSv4 pathname components into a standard posix path.
+ */
+static char *nfs4_pathname_string(const struct nfs4_pathname *pathname,
+				  unsigned short *_len)
+{
+	ssize_t len;
+	char *buf, *p;
+	int i;
+
+	len = nfs4_pathname_len(pathname);
+	if (len < 0)
+		return ERR_PTR(len);
+	*_len = len;
+
+	p = buf = kmalloc(len + 1, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < pathname->ncomponents; i++) {
+		const struct nfs4_string *component = &pathname->components[i];
+
+		*p++ = '/';
+		memcpy(p, component->data, component->len);
+		p += component->len;
+	}
+
+	*p = 0;
+	return buf;
 }
 
 /*
@@ -100,21 +128,36 @@ static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
  */
 static int nfs4_validate_fspath(struct dentry *dentry,
 				const struct nfs4_fs_locations *locations,
-				char *page, char *page2)
+				struct nfs_fs_context *ctx)
 {
-	const char *path, *fs_path;
+	const char *path;
+	char *fs_path;
+	unsigned short len;
+	char *buf;
+	int n;
 
-	path = nfs4_path(dentry, page, PAGE_SIZE);
-	if (IS_ERR(path))
+	buf = kmalloc(4096, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	path = nfs4_path(dentry, buf, 4096);
+	if (IS_ERR(path)) {
+		kfree(buf);
 		return PTR_ERR(path);
+	}
 
-	fs_path = nfs4_pathname_string(&locations->fs_path, page2, PAGE_SIZE);
-	if (IS_ERR(fs_path))
+	fs_path = nfs4_pathname_string(&locations->fs_path, &len);
+	if (IS_ERR(fs_path)) {
+		kfree(buf);
 		return PTR_ERR(fs_path);
+	}
 
-	if (strncmp(path, fs_path, strlen(fs_path)) != 0) {
+	n = strncmp(path, fs_path, len);
+	kfree(buf);
+	kfree(fs_path);
+	if (n != 0) {
 		dprintk("%s: path %s does not begin with fsroot %s\n",
-			__func__, path, fs_path);
+			__func__, path, ctx->nfs_server.export_path);
 		return -ENOENT;
 	}
 
@@ -236,55 +279,77 @@ nfs4_negotiate_security(struct rpc_clnt *clnt, struct inode *inode,
 	return new;
 }
 
-static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
-				     char *page, char *page2,
-				     const struct nfs4_fs_location *location)
+static int try_location(struct fs_context *fc,
+			const struct nfs4_fs_location *location)
 {
-	const size_t addr_bufsize = sizeof(struct sockaddr_storage);
-	struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client);
-	struct vfsmount *mnt = ERR_PTR(-ENOENT);
-	char *mnt_path;
-	unsigned int maxbuflen;
-	unsigned int s;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	unsigned int len, s;
+	char *export_path, *source, *p;
+	int ret = -ENOENT;
 
-	mnt_path = nfs4_pathname_string(&location->rootpath, page2, PAGE_SIZE);
-	if (IS_ERR(mnt_path))
-		return ERR_CAST(mnt_path);
-	mountdata->mnt_path = mnt_path;
-	maxbuflen = mnt_path - 1 - page2;
-
-	mountdata->addr = kmalloc(addr_bufsize, GFP_KERNEL);
-	if (mountdata->addr == NULL)
-		return ERR_PTR(-ENOMEM);
-
+	/* Allocate a buffer big enough to hold any of the hostnames plus a
+	 * terminating char and also a buffer big enough to hold the hostname
+	 * plus a colon plus the path.
+	 */
+	len = 0;
 	for (s = 0; s < location->nservers; s++) {
 		const struct nfs4_string *buf = &location->servers[s];
+		if (buf->len > len)
+			len = buf->len;
+	}
 
-		if (buf->len <= 0 || buf->len >= maxbuflen)
-			continue;
+	kfree(ctx->nfs_server.hostname);
+	ctx->nfs_server.hostname = kmalloc(len + 1, GFP_KERNEL);
+	if (!ctx->nfs_server.hostname)
+		return -ENOMEM;
+
+	export_path = nfs4_pathname_string(&location->rootpath,
+					   &ctx->nfs_server.export_path_len);
+	if (IS_ERR(export_path))
+		return PTR_ERR(export_path);
+
+	ctx->nfs_server.export_path = export_path;
+
+	source = kmalloc(len + 1 + ctx->nfs_server.export_path_len + 1,
+			 GFP_KERNEL);
+	if (!source)
+		return -ENOMEM;
+
+	kfree(fc->source);
+	fc->source = source;
+	for (s = 0; s < location->nservers; s++) {
+		const struct nfs4_string *buf = &location->servers[s];
 
 		if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len))
 			continue;
 
-		mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
-				mountdata->addr, addr_bufsize, net);
-		if (mountdata->addrlen == 0)
+		ctx->nfs_server.addrlen =
+			nfs_parse_server_name(buf->data, buf->len,
+					      &ctx->nfs_server.address,
+					      sizeof(ctx->nfs_server._address),
+					      fc->net_ns);
+		if (ctx->nfs_server.addrlen == 0)
 			continue;
 
-		memcpy(page2, buf->data, buf->len);
-		page2[buf->len] = '\0';
-		mountdata->hostname = page2;
+		rpc_set_port(&ctx->nfs_server.address, NFS_PORT);
 
-		snprintf(page, PAGE_SIZE, "%s:%s",
-				mountdata->hostname,
-				mountdata->mnt_path);
+		memcpy(ctx->nfs_server.hostname, buf->data, buf->len);
+		ctx->nfs_server.hostname[buf->len] = '\0';
 
-		mnt = vfs_submount(mountdata->dentry, &nfs4_referral_fs_type, page, mountdata);
-		if (!IS_ERR(mnt))
-			break;
+		p = source;
+		memcpy(p, buf->data, buf->len);
+		p += buf->len;
+		*p++ = ':';
+		memcpy(p, ctx->nfs_server.export_path, ctx->nfs_server.export_path_len);
+		p += ctx->nfs_server.export_path_len;
+		*p = 0;
+
+		ret = nfs4_get_referral_tree(fc);
+		if (ret == 0)
+			return 0;
 	}
-	kfree(mountdata->addr);
-	return mnt;
+
+	return ret;
 }
 
 /**
@@ -293,38 +358,23 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
  * @locations: array of NFSv4 server location information
  *
  */
-static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
-					    const struct nfs4_fs_locations *locations)
+static int nfs_follow_referral(struct fs_context *fc,
+			       const struct nfs4_fs_locations *locations)
 {
-	struct vfsmount *mnt = ERR_PTR(-ENOENT);
-	struct nfs_clone_mount mountdata = {
-		.sb = dentry->d_sb,
-		.dentry = dentry,
-		.authflavor = NFS_SB(dentry->d_sb)->client->cl_auth->au_flavor,
-	};
-	char *page = NULL, *page2 = NULL;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	int loc, error;
 
 	if (locations == NULL || locations->nlocations <= 0)
-		goto out;
+		return -ENOENT;
 
-	dprintk("%s: referral at %pd2\n", __func__, dentry);
-
-	page = (char *) __get_free_page(GFP_USER);
-	if (!page)
-		goto out;
-
-	page2 = (char *) __get_free_page(GFP_USER);
-	if (!page2)
-		goto out;
+	dprintk("%s: referral at %pd2\n", __func__, ctx->clone_data.dentry);
 
 	/* Ensure fs path is a prefix of current dentry path */
-	error = nfs4_validate_fspath(dentry, locations, page, page2);
-	if (error < 0) {
-		mnt = ERR_PTR(error);
-		goto out;
-	}
+	error = nfs4_validate_fspath(ctx->clone_data.dentry, locations, ctx);
+	if (error < 0)
+		return error;
 
+	error = -ENOENT;
 	for (loc = 0; loc < locations->nlocations; loc++) {
 		const struct nfs4_fs_location *location = &locations->locations[loc];
 
@@ -332,15 +382,12 @@ static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
 		    location->rootpath.ncomponents == 0)
 			continue;
 
-		mnt = try_location(&mountdata, page, page2, location);
-		if (!IS_ERR(mnt))
-			break;
+		error = try_location(fc, location);
+		if (error == 0)
+			return 0;
 	}
 
-out:
-	free_page((unsigned long) page);
-	free_page((unsigned long) page2);
-	return mnt;
+	return error;
 }
 
 /*
@@ -348,71 +395,72 @@ static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
  * @dentry - dentry of referral
  *
  */
-static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
+static int nfs_do_refmount(struct fs_context *fc, struct rpc_clnt *client)
 {
-	struct vfsmount *mnt = ERR_PTR(-ENOMEM);
-	struct dentry *parent;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct dentry *dentry, *parent;
 	struct nfs4_fs_locations *fs_locations = NULL;
 	struct page *page;
-	int err;
+	int err = -ENOMEM;
 
 	/* BUG_ON(IS_ROOT(dentry)); */
 	page = alloc_page(GFP_KERNEL);
-	if (page == NULL)
-		return mnt;
+	if (!page)
+		return -ENOMEM;
 
 	fs_locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
-	if (fs_locations == NULL)
+	if (!fs_locations)
 		goto out_free;
 
 	/* Get locations */
-	mnt = ERR_PTR(-ENOENT);
-
+	dentry = ctx->clone_data.dentry;
 	parent = dget_parent(dentry);
 	dprintk("%s: getting locations for %pd2\n",
 		__func__, dentry);
 
 	err = nfs4_proc_fs_locations(client, d_inode(parent), &dentry->d_name, fs_locations, page);
 	dput(parent);
-	if (err != 0 ||
-	    fs_locations->nlocations <= 0 ||
-	    fs_locations->fs_path.ncomponents <= 0)
-		goto out_free;
+	if (err != 0)
+		goto out_free_2;
 
-	mnt = nfs_follow_referral(dentry, fs_locations);
+	err = -ENOENT;
+	if (fs_locations->nlocations <= 0 ||
+	    fs_locations->fs_path.ncomponents <= 0)
+		goto out_free_2;
+
+	err = nfs_follow_referral(fc, fs_locations);
+out_free_2:
+	kfree(fs_locations);
 out_free:
 	__free_page(page);
-	kfree(fs_locations);
-	return mnt;
+	return err;
 }
 
-struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
-			       struct nfs_fh *fh, struct nfs_fattr *fattr)
+int nfs4_submount(struct fs_context *fc, struct nfs_server *server)
 {
-	rpc_authflavor_t flavor = server->client->cl_auth->au_flavor;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct dentry *dentry = ctx->clone_data.dentry;
 	struct dentry *parent = dget_parent(dentry);
 	struct inode *dir = d_inode(parent);
-	const struct qstr *name = &dentry->d_name;
 	struct rpc_clnt *client;
-	struct vfsmount *mnt;
+	int ret;
 
 	/* Look it up again to get its attributes and sec flavor */
-	client = nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
+	client = nfs4_proc_lookup_mountpoint(dir, dentry, ctx->mntfh,
+					     ctx->clone_data.fattr);
 	dput(parent);
 	if (IS_ERR(client))
-		return ERR_CAST(client);
+		return PTR_ERR(client);
 
-	if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
-		mnt = nfs_do_refmount(client, dentry);
-		goto out;
+	ctx->selected_flavor = client->cl_auth->au_flavor;
+	if (ctx->clone_data.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
+		ret = nfs_do_refmount(fc, client);
+	} else {
+		ret = nfs_do_submount(fc);
 	}
 
-	if (client->cl_auth->au_flavor != flavor)
-		flavor = client->cl_auth->au_flavor;
-	mnt = nfs_do_submount(dentry, fh, fattr, flavor);
-out:
 	rpc_shutdown_client(client);
-	return mnt;
+	return ret;
 }
 
 /*
@@ -453,7 +501,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
 		rpc_set_port(sap, NFS_PORT);
 
 		error = -ENOMEM;
-		hostname = kstrndup(buf->data, buf->len, GFP_KERNEL);
+		hostname = kmemdup_nul(buf->data, buf->len, GFP_KERNEL);
 		if (hostname == NULL)
 			break;
 
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 76d3716..95d07a3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1097,11 +1097,12 @@ static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
 	return ret;
 }
 
-static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
-				   struct nfs_server *server,
-				   struct rpc_message *msg,
-				   struct nfs4_sequence_args *args,
-				   struct nfs4_sequence_res *res)
+static int nfs4_do_call_sync(struct rpc_clnt *clnt,
+			     struct nfs_server *server,
+			     struct rpc_message *msg,
+			     struct nfs4_sequence_args *args,
+			     struct nfs4_sequence_res *res,
+			     unsigned short task_flags)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_call_sync_data data = {
@@ -1113,12 +1114,23 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
 		.rpc_client = clnt,
 		.rpc_message = msg,
 		.callback_ops = clp->cl_mvops->call_sync_ops,
-		.callback_data = &data
+		.callback_data = &data,
+		.flags = task_flags,
 	};
 
 	return nfs4_call_sync_custom(&task_setup);
 }
 
+static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
+				   struct nfs_server *server,
+				   struct rpc_message *msg,
+				   struct nfs4_sequence_args *args,
+				   struct nfs4_sequence_res *res)
+{
+	return nfs4_do_call_sync(clnt, server, msg, args, res, 0);
+}
+
+
 int nfs4_call_sync(struct rpc_clnt *clnt,
 		   struct nfs_server *server,
 		   struct rpc_message *msg,
@@ -3187,6 +3199,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
 			exception.retry = 1;
 			continue;
 		}
+		if (status == -NFS4ERR_EXPIRED) {
+			nfs4_schedule_lease_recovery(server->nfs_client);
+			exception.retry = 1;
+			continue;
+		}
 		if (status == -EAGAIN) {
 			/* We must have found a delegation */
 			exception.retry = 1;
@@ -3239,6 +3256,8 @@ static int _nfs4_do_setattr(struct inode *inode,
 		nfs_put_lock_context(l_ctx);
 		if (status == -EIO)
 			return -EBADF;
+		else if (status == -EAGAIN)
+			goto zero_stateid;
 	} else {
 zero_stateid:
 		nfs4_stateid_copy(&arg->stateid, &zero_stateid);
@@ -4064,11 +4083,18 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
 		.rpc_argp = &args,
 		.rpc_resp = &res,
 	};
+	unsigned short task_flags = 0;
+
+	/* Is this is an attribute revalidation, subject to softreval? */
+	if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
+		task_flags |= RPC_TASK_TIMEOUT;
 
 	nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
 
 	nfs_fattr_init(fattr);
-	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
+	return nfs4_do_call_sync(server->client, server, &msg,
+			&args.seq_args, &res.seq_res, task_flags);
 }
 
 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -4156,7 +4182,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 }
 
 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
-		const struct qstr *name, struct nfs_fh *fhandle,
+		struct dentry *dentry, struct nfs_fh *fhandle,
 		struct nfs_fattr *fattr, struct nfs4_label *label)
 {
 	struct nfs_server *server = NFS_SERVER(dir);
@@ -4164,7 +4190,7 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
 	struct nfs4_lookup_arg args = {
 		.bitmask = server->attr_bitmask,
 		.dir_fh = NFS_FH(dir),
-		.name = name,
+		.name = &dentry->d_name,
 	};
 	struct nfs4_lookup_res res = {
 		.server = server,
@@ -4177,13 +4203,20 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
 		.rpc_argp = &args,
 		.rpc_resp = &res,
 	};
+	unsigned short task_flags = 0;
+
+	/* Is this is an attribute revalidation, subject to softreval? */
+	if (nfs_lookup_is_soft_revalidate(dentry))
+		task_flags |= RPC_TASK_TIMEOUT;
 
 	args.bitmask = nfs4_bitmask(server, label);
 
 	nfs_fattr_init(fattr);
 
-	dprintk("NFS call  lookup %s\n", name->name);
-	status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
+	dprintk("NFS call  lookup %pd2\n", dentry);
+	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
+	status = nfs4_do_call_sync(clnt, server, &msg,
+			&args.seq_args, &res.seq_res, task_flags);
 	dprintk("NFS reply lookup: %d\n", status);
 	return status;
 }
@@ -4197,16 +4230,17 @@ static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
 }
 
 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
-				   const struct qstr *name, struct nfs_fh *fhandle,
+				   struct dentry *dentry, struct nfs_fh *fhandle,
 				   struct nfs_fattr *fattr, struct nfs4_label *label)
 {
 	struct nfs4_exception exception = {
 		.interruptible = true,
 	};
 	struct rpc_clnt *client = *clnt;
+	const struct qstr *name = &dentry->d_name;
 	int err;
 	do {
-		err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
+		err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr, label);
 		trace_nfs4_lookup(dir, name, err);
 		switch (err) {
 		case -NFS4ERR_BADNAME:
@@ -4241,14 +4275,14 @@ static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
 	return err;
 }
 
-static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
+static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr,
 			    struct nfs4_label *label)
 {
 	int status;
 	struct rpc_clnt *client = NFS_CLIENT(dir);
 
-	status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
+	status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, label);
 	if (client != NFS_CLIENT(dir)) {
 		rpc_shutdown_client(client);
 		nfs_fixup_secinfo_attributes(fattr);
@@ -4257,13 +4291,13 @@ static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
 }
 
 struct rpc_clnt *
-nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
+nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
 	struct rpc_clnt *client = NFS_CLIENT(dir);
 	int status;
 
-	status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
+	status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, NULL);
 	if (status < 0)
 		return ERR_PTR(status);
 	return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
@@ -5019,16 +5053,13 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
 	struct nfs4_exception exception = {
 		.interruptible = true,
 	};
-	unsigned long now = jiffies;
 	int err;
 
 	do {
 		err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
 		trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
 		if (err == 0) {
-			nfs4_set_lease_period(server->nfs_client,
-					fsinfo->lease_time * HZ,
-					now);
+			nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
 			break;
 		}
 		err = nfs4_handle_exception(server, err, &exception);
@@ -5582,10 +5613,9 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size
  */
 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
 {
-	struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
+	struct page **pages;
 	struct nfs_getaclargs args = {
 		.fh = NFS_FH(inode),
-		.acl_pages = pages,
 		.acl_len = buflen,
 	};
 	struct nfs_getaclres res = {
@@ -5596,11 +5626,19 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
 		.rpc_argp = &args,
 		.rpc_resp = &res,
 	};
-	unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
+	unsigned int npages;
 	int ret = -ENOMEM, i;
+	struct nfs_server *server = NFS_SERVER(inode);
 
-	if (npages > ARRAY_SIZE(pages))
-		return -ERANGE;
+	if (buflen == 0)
+		buflen = server->rsize;
+
+	npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
+	pages = kmalloc_array(npages, sizeof(struct page *), GFP_NOFS);
+	if (!pages)
+		return -ENOMEM;
+
+	args.acl_pages = pages;
 
 	for (i = 0; i < npages; i++) {
 		pages[i] = alloc_page(GFP_KERNEL);
@@ -5646,6 +5684,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
 			__free_page(pages[i]);
 	if (res.acl_scratch)
 		__free_page(res.acl_scratch);
+	kfree(pages);
 	return ret;
 }
 
@@ -6084,6 +6123,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 		.callback_data = &setclientid,
 		.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
 	};
+	unsigned long now = jiffies;
 	int status;
 
 	/* nfs_client_id4 */
@@ -6116,6 +6156,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 		clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
 		put_rpccred(setclientid.sc_cred);
 	}
+
+	if (status == 0)
+		do_renew_lease(clp, now);
 out:
 	trace_nfs4_setclientid(clp, status);
 	dprintk("NFS reply setclientid: %d\n", status);
@@ -6859,7 +6902,7 @@ static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_
 	case -NFS4ERR_STALE_STATEID:
 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
 		nfs4_schedule_lease_recovery(server->nfs_client);
-	};
+	}
 }
 
 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
@@ -8203,6 +8246,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
 	struct rpc_task *task;
 	struct nfs41_exchange_id_args *argp;
 	struct nfs41_exchange_id_res *resp;
+	unsigned long now = jiffies;
 	int status;
 
 	task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
@@ -8223,6 +8267,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
 	if (status != 0)
 		goto out;
 
+	do_renew_lease(clp, now);
+
 	clp->cl_clientid = resp->clientid;
 	clp->cl_exchange_flags = resp->flags;
 	clp->cl_seqid = resp->seqid;
@@ -8626,7 +8672,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
 	case -EACCES:
 	case -EAGAIN:
 		goto out;
-	};
+	}
 
 	clp->cl_seqid++;
 	if (!status) {
@@ -10001,7 +10047,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
 	.file_ops	= &nfs4_file_operations,
 	.getroot	= nfs4_proc_get_root,
 	.submount	= nfs4_submount,
-	.try_mount	= nfs4_try_mount,
+	.try_get_tree	= nfs4_try_get_tree,
 	.getattr	= nfs4_proc_getattr,
 	.setattr	= nfs4_proc_setattr,
 	.lookup		= nfs4_proc_lookup,
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 6ea431b..ff876dd 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -138,15 +138,12 @@ nfs4_kill_renewd(struct nfs_client *clp)
  *
  * @clp: pointer to nfs_client
  * @lease: new value for lease period
- * @lastrenewed: time at which lease was last renewed
  */
 void nfs4_set_lease_period(struct nfs_client *clp,
-		unsigned long lease,
-		unsigned long lastrenewed)
+		unsigned long lease)
 {
 	spin_lock(&clp->cl_lock);
 	clp->cl_lease_time = lease;
-	clp->cl_last_renewal = lastrenewed;
 	spin_unlock(&clp->cl_lock);
 
 	/* Cap maximum reconnect timeout at 1/2 lease period */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 34552329..f7723d2 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -92,17 +92,15 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp)
 {
 	int status;
 	struct nfs_fsinfo fsinfo;
-	unsigned long now;
 
 	if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
 		nfs4_schedule_state_renewal(clp);
 		return 0;
 	}
 
-	now = jiffies;
 	status = nfs4_proc_get_lease_time(clp, &fsinfo);
 	if (status == 0) {
-		nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
+		nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
 		nfs4_schedule_state_renewal(clp);
 	}
 
@@ -766,6 +764,7 @@ void nfs4_put_open_state(struct nfs4_state *state)
 	list_del(&state->open_states);
 	spin_unlock(&inode->i_lock);
 	spin_unlock(&owner->so_lock);
+	nfs4_inode_return_delegation_on_close(inode);
 	iput(inode);
 	nfs4_free_open_state(state);
 	nfs4_put_state_owner(owner);
@@ -1135,7 +1134,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
 		case -NFS4ERR_MOVED:
 			/* Non-seqid mutating errors */
 			return;
-	};
+	}
 	/*
 	 * Note: no locking needed as we are guaranteed to be first
 	 * on the sequence list
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 2c9cbad..1475f93 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -4,6 +4,7 @@
  */
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/mount.h>
 #include <linux/nfs4_mount.h>
 #include <linux/nfs_fs.h>
 #include "delegation.h"
@@ -18,36 +19,6 @@
 
 static int nfs4_write_inode(struct inode *inode, struct writeback_control *wbc);
 static void nfs4_evict_inode(struct inode *inode);
-static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *raw_data);
-static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *raw_data);
-static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *raw_data);
-
-static struct file_system_type nfs4_remote_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "nfs4",
-	.mount		= nfs4_remote_mount,
-	.kill_sb	= nfs_kill_super,
-	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
-
-static struct file_system_type nfs4_remote_referral_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "nfs4",
-	.mount		= nfs4_remote_referral_mount,
-	.kill_sb	= nfs_kill_super,
-	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
-
-struct file_system_type nfs4_referral_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "nfs4",
-	.mount		= nfs4_referral_mount,
-	.kill_sb	= nfs_kill_super,
-	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
 
 static const struct super_operations nfs4_sops = {
 	.alloc_inode	= nfs_alloc_inode,
@@ -61,16 +32,15 @@ static const struct super_operations nfs4_sops = {
 	.show_devname	= nfs_show_devname,
 	.show_path	= nfs_show_path,
 	.show_stats	= nfs_show_stats,
-	.remount_fs	= nfs_remount,
 };
 
 struct nfs_subversion nfs_v4 = {
-	.owner = THIS_MODULE,
-	.nfs_fs   = &nfs4_fs_type,
-	.rpc_vers = &nfs_version4,
-	.rpc_ops  = &nfs_v4_clientops,
-	.sops     = &nfs4_sops,
-	.xattr    = nfs4_xattr_handlers,
+	.owner		= THIS_MODULE,
+	.nfs_fs		= &nfs4_fs_type,
+	.rpc_vers	= &nfs_version4,
+	.rpc_ops	= &nfs_v4_clientops,
+	.sops		= &nfs4_sops,
+	.xattr		= nfs4_xattr_handlers,
 };
 
 static int nfs4_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -101,53 +71,6 @@ static void nfs4_evict_inode(struct inode *inode)
 	nfs_clear_inode(inode);
 }
 
-/*
- * Get the superblock for the NFS4 root partition
- */
-static struct dentry *
-nfs4_remote_mount(struct file_system_type *fs_type, int flags,
-		  const char *dev_name, void *info)
-{
-	struct nfs_mount_info *mount_info = info;
-	struct nfs_server *server;
-	struct dentry *mntroot = ERR_PTR(-ENOMEM);
-
-	mount_info->set_security = nfs_set_sb_security;
-
-	/* Get a volume representation */
-	server = nfs4_create_server(mount_info, &nfs_v4);
-	if (IS_ERR(server)) {
-		mntroot = ERR_CAST(server);
-		goto out;
-	}
-
-	mntroot = nfs_fs_mount_common(server, flags, dev_name, mount_info, &nfs_v4);
-
-out:
-	return mntroot;
-}
-
-static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
-		int flags, void *data, const char *hostname)
-{
-	struct vfsmount *root_mnt;
-	char *root_devname;
-	size_t len;
-
-	len = strlen(hostname) + 5;
-	root_devname = kmalloc(len, GFP_KERNEL);
-	if (root_devname == NULL)
-		return ERR_PTR(-ENOMEM);
-	/* Does hostname needs to be enclosed in brackets? */
-	if (strchr(hostname, ':'))
-		snprintf(root_devname, len, "[%s]:/", hostname);
-	else
-		snprintf(root_devname, len, "%s:/", hostname);
-	root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
-	kfree(root_devname);
-	return root_mnt;
-}
-
 struct nfs_referral_count {
 	struct list_head list;
 	const struct task_struct *task;
@@ -214,111 +137,125 @@ static void nfs_referral_loop_unprotect(void)
 	kfree(p);
 }
 
-static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
-		const char *export_path)
+static int do_nfs4_mount(struct nfs_server *server,
+			 struct fs_context *fc,
+			 const char *hostname,
+			 const char *export_path)
 {
+	struct nfs_fs_context *root_ctx;
+	struct fs_context *root_fc;
+	struct vfsmount *root_mnt;
 	struct dentry *dentry;
-	int err;
+	size_t len;
+	int ret;
+
+	struct fs_parameter param = {
+		.key	= "source",
+		.type	= fs_value_is_string,
+		.dirfd	= -1,
+	};
+
+	if (IS_ERR(server))
+		return PTR_ERR(server);
+
+	root_fc = vfs_dup_fs_context(fc);
+	if (IS_ERR(root_fc)) {
+		nfs_free_server(server);
+		return PTR_ERR(root_fc);
+	}
+	kfree(root_fc->source);
+	root_fc->source = NULL;
+
+	root_ctx = nfs_fc2context(root_fc);
+	root_ctx->internal = true;
+	root_ctx->server = server;
+	/* We leave export_path unset as it's not used to find the root. */
+
+	len = strlen(hostname) + 5;
+	param.string = kmalloc(len, GFP_KERNEL);
+	if (param.string == NULL) {
+		put_fs_context(root_fc);
+		return -ENOMEM;
+	}
+
+	/* Does hostname needs to be enclosed in brackets? */
+	if (strchr(hostname, ':'))
+		param.size = snprintf(param.string, len, "[%s]:/", hostname);
+	else
+		param.size = snprintf(param.string, len, "%s:/", hostname);
+	ret = vfs_parse_fs_param(root_fc, &param);
+	kfree(param.string);
+	if (ret < 0) {
+		put_fs_context(root_fc);
+		return ret;
+	}
+	root_mnt = fc_mount(root_fc);
+	put_fs_context(root_fc);
 
 	if (IS_ERR(root_mnt))
-		return ERR_CAST(root_mnt);
+		return PTR_ERR(root_mnt);
 
-	err = nfs_referral_loop_protect();
-	if (err) {
+	ret = nfs_referral_loop_protect();
+	if (ret) {
 		mntput(root_mnt);
-		return ERR_PTR(err);
+		return ret;
 	}
 
 	dentry = mount_subtree(root_mnt, export_path);
 	nfs_referral_loop_unprotect();
 
-	return dentry;
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
+
+	fc->root = dentry;
+	return 0;
 }
 
-struct dentry *nfs4_try_mount(int flags, const char *dev_name,
-			      struct nfs_mount_info *mount_info,
-			      struct nfs_subversion *nfs_mod)
+int nfs4_try_get_tree(struct fs_context *fc)
 {
-	char *export_path;
-	struct vfsmount *root_mnt;
-	struct dentry *res;
-	struct nfs_parsed_mount_data *data = mount_info->parsed;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	int err;
 
-	dfprintk(MOUNT, "--> nfs4_try_mount()\n");
+	dfprintk(MOUNT, "--> nfs4_try_get_tree()\n");
 
-	export_path = data->nfs_server.export_path;
-	data->nfs_server.export_path = "/";
-	root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
-			data->nfs_server.hostname);
-	data->nfs_server.export_path = export_path;
-
-	res = nfs_follow_remote_path(root_mnt, export_path);
-
-	dfprintk(MOUNT, "<-- nfs4_try_mount() = %d%s\n",
-		 PTR_ERR_OR_ZERO(res),
-		 IS_ERR(res) ? " [error]" : "");
-	return res;
-}
-
-static struct dentry *
-nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
-			   const char *dev_name, void *raw_data)
-{
-	struct nfs_mount_info mount_info = {
-		.fill_super = nfs_fill_super,
-		.set_security = nfs_clone_sb_security,
-		.cloned = raw_data,
-	};
-	struct nfs_server *server;
-	struct dentry *mntroot = ERR_PTR(-ENOMEM);
-
-	dprintk("--> nfs4_referral_get_sb()\n");
-
-	mount_info.mntfh = nfs_alloc_fhandle();
-	if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
-		goto out;
-
-	/* create a new volume representation */
-	server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
-	if (IS_ERR(server)) {
-		mntroot = ERR_CAST(server);
-		goto out;
+	/* We create a mount for the server's root, walk to the requested
+	 * location and then create another mount for that.
+	 */
+	err= do_nfs4_mount(nfs4_create_server(fc),
+			   fc, ctx->nfs_server.hostname,
+			   ctx->nfs_server.export_path);
+	if (err) {
+		nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+		dfprintk(MOUNT, "<-- nfs4_try_get_tree() = %d [error]\n", err);
+	} else {
+		dfprintk(MOUNT, "<-- nfs4_try_get_tree() = 0\n");
 	}
-
-	mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, &nfs_v4);
-out:
-	nfs_free_fhandle(mount_info.mntfh);
-	return mntroot;
+	return err;
 }
 
 /*
  * Create an NFS4 server record on referral traversal
  */
-static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
-		int flags, const char *dev_name, void *raw_data)
+int nfs4_get_referral_tree(struct fs_context *fc)
 {
-	struct nfs_clone_mount *data = raw_data;
-	char *export_path;
-	struct vfsmount *root_mnt;
-	struct dentry *res;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	int err;
 
 	dprintk("--> nfs4_referral_mount()\n");
 
-	export_path = data->mnt_path;
-	data->mnt_path = "/";
-
-	root_mnt = nfs_do_root_mount(&nfs4_remote_referral_fs_type,
-			flags, data, data->hostname);
-	data->mnt_path = export_path;
-
-	res = nfs_follow_remote_path(root_mnt, export_path);
-	dprintk("<-- nfs4_referral_mount() = %d%s\n",
-		PTR_ERR_OR_ZERO(res),
-		IS_ERR(res) ? " [error]" : "");
-	return res;
+	/* create a new volume representation */
+	err = do_nfs4_mount(nfs4_create_referral_server(fc),
+			    fc, ctx->nfs_server.hostname,
+			    ctx->nfs_server.export_path);
+	if (err) {
+		nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+		dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = %d [error]\n", err);
+	} else {
+		dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = 0\n");
+	}
+	return err;
 }
 
-
 static int __init init_nfs_v4(void)
 {
 	int err;
diff --git a/fs/nfs/nfs4trace.c b/fs/nfs/nfs4trace.c
index 1a8f376..d9ac556 100644
--- a/fs/nfs/nfs4trace.c
+++ b/fs/nfs/nfs4trace.c
@@ -24,4 +24,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_done);
 EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_done);
 EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_pagelist);
 EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_pagelist);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_read_error);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_write_error);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_commit_error);
 #endif
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index e60b6fb..1e97e5e 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -155,6 +155,9 @@ TRACE_DEFINE_ENUM(NFS4ERR_WRONG_CRED);
 TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
 TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
 
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_MDS);
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_PNFS);
+
 #define show_nfsv4_errors(error) \
 	__print_symbolic(error, \
 		{ NFS4_OK, "OK" }, \
@@ -305,7 +308,10 @@ TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
 		{ NFS4ERR_WRONGSEC, "WRONGSEC" }, \
 		{ NFS4ERR_WRONG_CRED, "WRONG_CRED" }, \
 		{ NFS4ERR_WRONG_TYPE, "WRONG_TYPE" }, \
-		{ NFS4ERR_XDEV, "XDEV" })
+		{ NFS4ERR_XDEV, "XDEV" }, \
+		/* ***** Internal to Linux NFS client ***** */ \
+		{ NFS4ERR_RESET_TO_MDS, "RESET_TO_MDS" }, \
+		{ NFS4ERR_RESET_TO_PNFS, "RESET_TO_PNFS" })
 
 #define show_open_flags(flags) \
 	__print_flags(flags, "|", \
@@ -352,7 +358,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
 		),
 
 		TP_fast_assign(
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__assign_str(dstaddr, clp->cl_hostname);
 		),
 
@@ -432,7 +438,8 @@ TRACE_EVENT(nfs4_sequence_done,
 			__entry->target_highest_slotid =
 					res->sr_target_highest_slotid;
 			__entry->status_flags = res->sr_status_flags;
-			__entry->error = res->sr_status;
+			__entry->error = res->sr_status < 0 ?
+					-res->sr_status : 0;
 		),
 		TP_printk(
 			"error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
@@ -640,7 +647,7 @@ TRACE_EVENT(nfs4_state_mgr_failed,
 		),
 
 		TP_fast_assign(
-			__entry->error = status;
+			__entry->error = status < 0 ? -status : 0;
 			__entry->state = clp->cl_state;
 			__assign_str(hostname, clp->cl_hostname);
 			__assign_str(section, section);
@@ -659,7 +666,7 @@ TRACE_EVENT(nfs4_xdr_status,
 		TP_PROTO(
 			const struct xdr_stream *xdr,
 			u32 op,
-			int error
+			u32 error
 		),
 
 		TP_ARGS(xdr, op, error),
@@ -691,6 +698,41 @@ TRACE_EVENT(nfs4_xdr_status,
 		)
 );
 
+DECLARE_EVENT_CLASS(nfs4_cb_error_class,
+		TP_PROTO(
+			__be32 xid,
+			u32 cb_ident
+		),
+
+		TP_ARGS(xid, cb_ident),
+
+		TP_STRUCT__entry(
+			__field(u32, xid)
+			__field(u32, cbident)
+		),
+
+		TP_fast_assign(
+			__entry->xid = be32_to_cpu(xid);
+			__entry->cbident = cb_ident;
+		),
+
+		TP_printk(
+			"xid=0x%08x cb_ident=0x%08x",
+			__entry->xid, __entry->cbident
+		)
+);
+
+#define DEFINE_CB_ERROR_EVENT(name) \
+	DEFINE_EVENT(nfs4_cb_error_class, nfs_cb_##name, \
+			TP_PROTO( \
+				__be32 xid, \
+				u32 cb_ident \
+			), \
+			TP_ARGS(xid, cb_ident))
+
+DEFINE_CB_ERROR_EVENT(no_clp);
+DEFINE_CB_ERROR_EVENT(badprinc);
+
 DECLARE_EVENT_CLASS(nfs4_open_event,
 		TP_PROTO(
 			const struct nfs_open_context *ctx,
@@ -849,7 +891,7 @@ TRACE_EVENT(nfs4_close,
 			__entry->fileid = NFS_FILEID(inode);
 			__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
 			__entry->fmode = (__force unsigned int)state->state;
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->stateid_seq =
 				be32_to_cpu(args->stateid.seqid);
 			__entry->stateid_hash =
@@ -914,7 +956,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
 		TP_fast_assign(
 			const struct inode *inode = state->inode;
 
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->cmd = cmd;
 			__entry->type = request->fl_type;
 			__entry->start = request->fl_start;
@@ -986,7 +1028,7 @@ TRACE_EVENT(nfs4_set_lock,
 		TP_fast_assign(
 			const struct inode *inode = state->inode;
 
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->cmd = cmd;
 			__entry->type = request->fl_type;
 			__entry->start = request->fl_start;
@@ -1164,7 +1206,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
 		TP_fast_assign(
 			__entry->dev = res->server->s_dev;
 			__entry->fhandle = nfs_fhandle_hash(args->fhandle);
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->stateid_seq =
 				be32_to_cpu(args->stateid->seqid);
 			__entry->stateid_hash =
@@ -1204,7 +1246,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
 		TP_fast_assign(
 			const struct inode *inode = state->inode;
 
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->fileid = NFS_FILEID(inode);
 			__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
@@ -1306,7 +1348,7 @@ TRACE_EVENT(nfs4_lookupp,
 		TP_fast_assign(
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->ino = NFS_FILEID(inode);
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 		),
 
 		TP_printk(
@@ -1342,7 +1384,7 @@ TRACE_EVENT(nfs4_rename,
 			__entry->dev = olddir->i_sb->s_dev;
 			__entry->olddir = NFS_FILEID(olddir);
 			__entry->newdir = NFS_FILEID(newdir);
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__assign_str(oldname, oldname->name);
 			__assign_str(newname, newname->name);
 		),
@@ -1433,7 +1475,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->fileid = NFS_FILEID(inode);
 			__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->stateid_seq =
 				be32_to_cpu(stateid->seqid);
 			__entry->stateid_hash =
@@ -1489,7 +1531,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
 			__entry->valid = fattr->valid;
 			__entry->fhandle = nfs_fhandle_hash(fhandle);
 			__entry->fileid = (fattr->valid & NFS_ATTR_FATTR_FILEID) ? fattr->fileid : 0;
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 		),
 
 		TP_printk(
@@ -1536,7 +1578,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
 		),
 
 		TP_fast_assign(
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->fhandle = nfs_fhandle_hash(fhandle);
 			if (!IS_ERR_OR_NULL(inode)) {
 				__entry->fileid = NFS_FILEID(inode);
@@ -1593,7 +1635,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
 		),
 
 		TP_fast_assign(
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->fhandle = nfs_fhandle_hash(fhandle);
 			if (!IS_ERR_OR_NULL(inode)) {
 				__entry->fileid = NFS_FILEID(inode);
@@ -1694,7 +1736,8 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
 			__field(u32, fhandle)
 			__field(u64, fileid)
 			__field(loff_t, offset)
-			__field(size_t, count)
+			__field(u32, arg_count)
+			__field(u32, res_count)
 			__field(unsigned long, error)
 			__field(int, stateid_seq)
 			__field(u32, stateid_hash)
@@ -1702,13 +1745,18 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
 
 		TP_fast_assign(
 			const struct inode *inode = hdr->inode;
+			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = hdr->args.fh ?
+						  hdr->args.fh : &nfsi->fh;
 			const struct nfs4_state *state =
 				hdr->args.context->state;
+
 			__entry->dev = inode->i_sb->s_dev;
-			__entry->fileid = NFS_FILEID(inode);
-			__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+			__entry->fileid = nfsi->fileid;
+			__entry->fhandle = nfs_fhandle_hash(fh);
 			__entry->offset = hdr->args.offset;
-			__entry->count = hdr->args.count;
+			__entry->arg_count = hdr->args.count;
+			__entry->res_count = hdr->res.count;
 			__entry->error = error < 0 ? -error : 0;
 			__entry->stateid_seq =
 				be32_to_cpu(state->stateid.seqid);
@@ -1718,14 +1766,14 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
 
 		TP_printk(
 			"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld count=%zu stateid=%d:0x%08x",
+			"offset=%lld count=%u res=%u stateid=%d:0x%08x",
 			-__entry->error,
 			show_nfsv4_errors(__entry->error),
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
 			(long long)__entry->offset,
-			__entry->count,
+			__entry->arg_count, __entry->res_count,
 			__entry->stateid_seq, __entry->stateid_hash
 		)
 );
@@ -1754,7 +1802,8 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
 			__field(u32, fhandle)
 			__field(u64, fileid)
 			__field(loff_t, offset)
-			__field(size_t, count)
+			__field(u32, arg_count)
+			__field(u32, res_count)
 			__field(unsigned long, error)
 			__field(int, stateid_seq)
 			__field(u32, stateid_hash)
@@ -1762,13 +1811,18 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
 
 		TP_fast_assign(
 			const struct inode *inode = hdr->inode;
+			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = hdr->args.fh ?
+						  hdr->args.fh : &nfsi->fh;
 			const struct nfs4_state *state =
 				hdr->args.context->state;
+
 			__entry->dev = inode->i_sb->s_dev;
-			__entry->fileid = NFS_FILEID(inode);
-			__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+			__entry->fileid = nfsi->fileid;
+			__entry->fhandle = nfs_fhandle_hash(fh);
 			__entry->offset = hdr->args.offset;
-			__entry->count = hdr->args.count;
+			__entry->arg_count = hdr->args.count;
+			__entry->res_count = hdr->res.count;
 			__entry->error = error < 0 ? -error : 0;
 			__entry->stateid_seq =
 				be32_to_cpu(state->stateid.seqid);
@@ -1778,14 +1832,14 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
 
 		TP_printk(
 			"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld count=%zu stateid=%d:0x%08x",
+			"offset=%lld count=%u res=%u stateid=%d:0x%08x",
 			-__entry->error,
 			show_nfsv4_errors(__entry->error),
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
 			(long long)__entry->offset,
-			__entry->count,
+			__entry->arg_count, __entry->res_count,
 			__entry->stateid_seq, __entry->stateid_hash
 		)
 );
@@ -1814,24 +1868,28 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
-			__field(loff_t, offset)
-			__field(size_t, count)
 			__field(unsigned long, error)
+			__field(loff_t, offset)
+			__field(u32, count)
 		),
 
 		TP_fast_assign(
 			const struct inode *inode = data->inode;
+			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = data->args.fh ?
+						  data->args.fh : &nfsi->fh;
+
 			__entry->dev = inode->i_sb->s_dev;
-			__entry->fileid = NFS_FILEID(inode);
-			__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+			__entry->fileid = nfsi->fileid;
+			__entry->fhandle = nfs_fhandle_hash(fh);
 			__entry->offset = data->args.offset;
 			__entry->count = data->args.count;
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 		),
 
 		TP_printk(
 			"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld count=%zu",
+			"offset=%lld count=%u",
 			-__entry->error,
 			show_nfsv4_errors(__entry->error),
 			MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -1896,7 +1954,7 @@ TRACE_EVENT(nfs4_layoutget,
 			__entry->iomode = args->iomode;
 			__entry->offset = args->offset;
 			__entry->count = args->length;
-			__entry->error = error;
+			__entry->error = error < 0 ? -error : 0;
 			__entry->stateid_seq =
 				be32_to_cpu(state->stateid.seqid);
 			__entry->stateid_hash =
@@ -2094,6 +2152,115 @@ DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_write_done);
 DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_read_pagelist);
 DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_write_pagelist);
 
+DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
+		TP_PROTO(
+			const struct nfs_pgio_header *hdr
+		),
+
+		TP_ARGS(hdr),
+
+		TP_STRUCT__entry(
+			__field(unsigned long, error)
+			__field(dev_t, dev)
+			__field(u32, fhandle)
+			__field(u64, fileid)
+			__field(loff_t, offset)
+			__field(u32, count)
+			__field(int, stateid_seq)
+			__field(u32, stateid_hash)
+			__string(dstaddr, hdr->ds_clp ?
+				rpc_peeraddr2str(hdr->ds_clp->cl_rpcclient,
+					RPC_DISPLAY_ADDR) : "unknown")
+		),
+
+		TP_fast_assign(
+			const struct inode *inode = hdr->inode;
+
+			__entry->error = hdr->res.op_status;
+			__entry->fhandle = nfs_fhandle_hash(hdr->args.fh);
+			__entry->fileid = NFS_FILEID(inode);
+			__entry->dev = inode->i_sb->s_dev;
+			__entry->offset = hdr->args.offset;
+			__entry->count = hdr->args.count;
+			__entry->stateid_seq =
+				be32_to_cpu(hdr->args.stateid.seqid);
+			__entry->stateid_hash =
+				nfs_stateid_hash(&hdr->args.stateid);
+			__assign_str(dstaddr, hdr->ds_clp ?
+				rpc_peeraddr2str(hdr->ds_clp->cl_rpcclient,
+					RPC_DISPLAY_ADDR) : "unknown");
+		),
+
+		TP_printk(
+			"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+			"offset=%llu count=%u stateid=%d:0x%08x dstaddr=%s",
+			-__entry->error,
+			show_nfsv4_errors(__entry->error),
+			MAJOR(__entry->dev), MINOR(__entry->dev),
+			(unsigned long long)__entry->fileid,
+			__entry->fhandle,
+			__entry->offset, __entry->count,
+			__entry->stateid_seq, __entry->stateid_hash,
+			__get_str(dstaddr)
+		)
+);
+
+#define DEFINE_NFS4_FLEXFILES_IO_EVENT(name) \
+	DEFINE_EVENT(nfs4_flexfiles_io_event, name, \
+			TP_PROTO( \
+				const struct nfs_pgio_header *hdr \
+			), \
+			TP_ARGS(hdr))
+DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_read_error);
+DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_write_error);
+
+TRACE_EVENT(ff_layout_commit_error,
+		TP_PROTO(
+			const struct nfs_commit_data *data
+		),
+
+		TP_ARGS(data),
+
+		TP_STRUCT__entry(
+			__field(unsigned long, error)
+			__field(dev_t, dev)
+			__field(u32, fhandle)
+			__field(u64, fileid)
+			__field(loff_t, offset)
+			__field(u32, count)
+			__string(dstaddr, data->ds_clp ?
+				rpc_peeraddr2str(data->ds_clp->cl_rpcclient,
+					RPC_DISPLAY_ADDR) : "unknown")
+		),
+
+		TP_fast_assign(
+			const struct inode *inode = data->inode;
+
+			__entry->error = data->res.op_status;
+			__entry->fhandle = nfs_fhandle_hash(data->args.fh);
+			__entry->fileid = NFS_FILEID(inode);
+			__entry->dev = inode->i_sb->s_dev;
+			__entry->offset = data->args.offset;
+			__entry->count = data->args.count;
+			__assign_str(dstaddr, data->ds_clp ?
+				rpc_peeraddr2str(data->ds_clp->cl_rpcclient,
+					RPC_DISPLAY_ADDR) : "unknown");
+		),
+
+		TP_printk(
+			"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+			"offset=%llu count=%u dstaddr=%s",
+			-__entry->error,
+			show_nfsv4_errors(__entry->error),
+			MAJOR(__entry->dev), MINOR(__entry->dev),
+			(unsigned long long)__entry->fileid,
+			__entry->fhandle,
+			__entry->offset, __entry->count,
+			__get_str(dstaddr)
+		)
+);
+
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #endif /* _TRACE_NFS4_H */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 936c577..47817ef 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1061,7 +1061,7 @@ static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *ve
 static __be32 *
 xdr_encode_nfstime4(__be32 *p, const struct timespec64 *t)
 {
-	p = xdr_encode_hyper(p, (__s64)t->tv_sec);
+	p = xdr_encode_hyper(p, t->tv_sec);
 	*p++ = cpu_to_be32(t->tv_nsec);
 	return p;
 }
@@ -4097,7 +4097,7 @@ static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, str
 			status = NFS_ATTR_FATTR_ATIME;
 		bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS;
 	}
-	dprintk("%s: atime=%ld\n", __func__, (long)time->tv_sec);
+	dprintk("%s: atime=%lld\n", __func__, time->tv_sec);
 	return status;
 }
 
@@ -4115,7 +4115,7 @@ static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, s
 			status = NFS_ATTR_FATTR_CTIME;
 		bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA;
 	}
-	dprintk("%s: ctime=%ld\n", __func__, (long)time->tv_sec);
+	dprintk("%s: ctime=%lld\n", __func__, time->tv_sec);
 	return status;
 }
 
@@ -4132,8 +4132,8 @@ static int decode_attr_time_delta(struct xdr_stream *xdr, uint32_t *bitmap,
 		status = decode_attr_time(xdr, time);
 		bitmap[1] &= ~FATTR4_WORD1_TIME_DELTA;
 	}
-	dprintk("%s: time_delta=%ld %ld\n", __func__, (long)time->tv_sec,
-		(long)time->tv_nsec);
+	dprintk("%s: time_delta=%lld %ld\n", __func__, time->tv_sec,
+		time->tv_nsec);
 	return status;
 }
 
@@ -4197,7 +4197,7 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str
 			status = NFS_ATTR_FATTR_MTIME;
 		bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY;
 	}
-	dprintk("%s: mtime=%ld\n", __func__, (long)time->tv_sec);
+	dprintk("%s: mtime=%lld\n", __func__, time->tv_sec);
 	return status;
 }
 
@@ -4313,11 +4313,14 @@ static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifi
 
 static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
 {
+	struct nfs_writeverf *verf = res->verf;
 	int status;
 
 	status = decode_op_hdr(xdr, OP_COMMIT);
 	if (!status)
-		status = decode_write_verifier(xdr, &res->verf->verifier);
+		status = decode_write_verifier(xdr, &verf->verifier);
+	if (!status)
+		verf->committed = NFS_FILE_SYNC;
 	return status;
 }
 
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 2a82dcc..a9588d1 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -198,7 +198,66 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit);
 DEFINE_NFS_INODE_EVENT(nfs_fsync_enter);
 DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit);
 DEFINE_NFS_INODE_EVENT(nfs_access_enter);
-DEFINE_NFS_INODE_EVENT_DONE(nfs_access_exit);
+
+TRACE_EVENT(nfs_access_exit,
+		TP_PROTO(
+			const struct inode *inode,
+			unsigned int mask,
+			unsigned int permitted,
+			int error
+		),
+
+		TP_ARGS(inode, mask, permitted, error),
+
+		TP_STRUCT__entry(
+			__field(unsigned long, error)
+			__field(dev_t, dev)
+			__field(u32, fhandle)
+			__field(unsigned char, type)
+			__field(u64, fileid)
+			__field(u64, version)
+			__field(loff_t, size)
+			__field(unsigned long, nfsi_flags)
+			__field(unsigned long, cache_validity)
+			__field(unsigned int, mask)
+			__field(unsigned int, permitted)
+		),
+
+		TP_fast_assign(
+			const struct nfs_inode *nfsi = NFS_I(inode);
+			__entry->error = error < 0 ? -error : 0;
+			__entry->dev = inode->i_sb->s_dev;
+			__entry->fileid = nfsi->fileid;
+			__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+			__entry->type = nfs_umode_to_dtype(inode->i_mode);
+			__entry->version = inode_peek_iversion_raw(inode);
+			__entry->size = i_size_read(inode);
+			__entry->nfsi_flags = nfsi->flags;
+			__entry->cache_validity = nfsi->cache_validity;
+			__entry->mask = mask;
+			__entry->permitted = permitted;
+		),
+
+		TP_printk(
+			"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+			"type=%u (%s) version=%llu size=%lld "
+			"cache_validity=0x%lx (%s) nfs_flags=0x%lx (%s) "
+			"mask=0x%x permitted=0x%x",
+			-__entry->error, nfs_show_status(__entry->error),
+			MAJOR(__entry->dev), MINOR(__entry->dev),
+			(unsigned long long)__entry->fileid,
+			__entry->fhandle,
+			__entry->type,
+			nfs_show_file_type(__entry->type),
+			(unsigned long long)__entry->version,
+			(long long)__entry->size,
+			__entry->cache_validity,
+			nfs_show_cache_validity(__entry->cache_validity),
+			__entry->nfsi_flags,
+			nfs_show_nfsi_flags(__entry->nfsi_flags),
+			__entry->mask, __entry->permitted
+		)
+);
 
 TRACE_DEFINE_ENUM(LOOKUP_FOLLOW);
 TRACE_DEFINE_ENUM(LOOKUP_DIRECTORY);
@@ -818,75 +877,85 @@ TRACE_EVENT(nfs_sillyrename_unlink,
 
 TRACE_EVENT(nfs_initiate_read,
 		TP_PROTO(
-			const struct inode *inode,
-			loff_t offset, unsigned long count
+			const struct nfs_pgio_header *hdr
 		),
 
-		TP_ARGS(inode, offset, count),
+		TP_ARGS(hdr),
 
 		TP_STRUCT__entry(
-			__field(loff_t, offset)
-			__field(unsigned long, count)
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
+			__field(loff_t, offset)
+			__field(u32, count)
 		),
 
 		TP_fast_assign(
+			const struct inode *inode = hdr->inode;
 			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = hdr->args.fh ?
+						  hdr->args.fh : &nfsi->fh;
 
-			__entry->offset = offset;
-			__entry->count = count;
+			__entry->offset = hdr->args.offset;
+			__entry->count = hdr->args.count;
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->fileid = nfsi->fileid;
-			__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+			__entry->fhandle = nfs_fhandle_hash(fh);
 		),
 
 		TP_printk(
 			"fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld count=%lu",
+			"offset=%lld count=%u",
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
-			__entry->offset, __entry->count
+			(long long)__entry->offset, __entry->count
 		)
 );
 
 TRACE_EVENT(nfs_readpage_done,
 		TP_PROTO(
-			const struct inode *inode,
-			int status, loff_t offset, bool eof
+			const struct rpc_task *task,
+			const struct nfs_pgio_header *hdr
 		),
 
-		TP_ARGS(inode, status, offset, eof),
+		TP_ARGS(task, hdr),
 
 		TP_STRUCT__entry(
-			__field(int, status)
-			__field(loff_t, offset)
-			__field(bool, eof)
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
+			__field(loff_t, offset)
+			__field(u32, arg_count)
+			__field(u32, res_count)
+			__field(bool, eof)
+			__field(int, status)
 		),
 
 		TP_fast_assign(
+			const struct inode *inode = hdr->inode;
 			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = hdr->args.fh ?
+						  hdr->args.fh : &nfsi->fh;
 
-			__entry->status = status;
-			__entry->offset = offset;
-			__entry->eof = eof;
+			__entry->status = task->tk_status;
+			__entry->offset = hdr->args.offset;
+			__entry->arg_count = hdr->args.count;
+			__entry->res_count = hdr->res.count;
+			__entry->eof = hdr->res.eof;
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->fileid = nfsi->fileid;
-			__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+			__entry->fhandle = nfs_fhandle_hash(fh);
 		),
 
 		TP_printk(
 			"fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld status=%d%s",
+			"offset=%lld count=%u res=%u status=%d%s",
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
-			__entry->offset, __entry->status,
+			(long long)__entry->offset, __entry->arg_count,
+			__entry->res_count, __entry->status,
 			__entry->eof ? " eof" : ""
 		)
 );
@@ -903,90 +972,144 @@ TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
 
 TRACE_EVENT(nfs_initiate_write,
 		TP_PROTO(
-			const struct inode *inode,
-			loff_t offset, unsigned long count,
-			enum nfs3_stable_how stable
+			const struct nfs_pgio_header *hdr
 		),
 
-		TP_ARGS(inode, offset, count, stable),
+		TP_ARGS(hdr),
 
 		TP_STRUCT__entry(
-			__field(loff_t, offset)
-			__field(unsigned long, count)
-			__field(enum nfs3_stable_how, stable)
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
+			__field(loff_t, offset)
+			__field(u32, count)
+			__field(enum nfs3_stable_how, stable)
 		),
 
 		TP_fast_assign(
+			const struct inode *inode = hdr->inode;
 			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = hdr->args.fh ?
+						  hdr->args.fh : &nfsi->fh;
 
-			__entry->offset = offset;
-			__entry->count = count;
-			__entry->stable = stable;
+			__entry->offset = hdr->args.offset;
+			__entry->count = hdr->args.count;
+			__entry->stable = hdr->args.stable;
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->fileid = nfsi->fileid;
-			__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+			__entry->fhandle = nfs_fhandle_hash(fh);
 		),
 
 		TP_printk(
 			"fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld count=%lu stable=%s",
+			"offset=%lld count=%u stable=%s",
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
-			__entry->offset, __entry->count,
+			(long long)__entry->offset, __entry->count,
 			nfs_show_stable(__entry->stable)
 		)
 );
 
 TRACE_EVENT(nfs_writeback_done,
 		TP_PROTO(
-			const struct inode *inode,
-			int status,
-			loff_t offset,
-			struct nfs_writeverf *writeverf
+			const struct rpc_task *task,
+			const struct nfs_pgio_header *hdr
 		),
 
-		TP_ARGS(inode, status, offset, writeverf),
+		TP_ARGS(task, hdr),
 
 		TP_STRUCT__entry(
-			__field(int, status)
-			__field(loff_t, offset)
-			__field(enum nfs3_stable_how, stable)
-			__field(unsigned long long, verifier)
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
+			__field(loff_t, offset)
+			__field(u32, arg_count)
+			__field(u32, res_count)
+			__field(int, status)
+			__field(enum nfs3_stable_how, stable)
+			__array(char, verifier, NFS4_VERIFIER_SIZE)
 		),
 
 		TP_fast_assign(
+			const struct inode *inode = hdr->inode;
 			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = hdr->args.fh ?
+						  hdr->args.fh : &nfsi->fh;
+			const struct nfs_writeverf *verf = hdr->res.verf;
 
-			__entry->status = status;
-			__entry->offset = offset;
-			__entry->stable = writeverf->committed;
-			memcpy(&__entry->verifier, &writeverf->verifier,
-			       sizeof(__entry->verifier));
+			__entry->status = task->tk_status;
+			__entry->offset = hdr->args.offset;
+			__entry->arg_count = hdr->args.count;
+			__entry->res_count = hdr->res.count;
+			__entry->stable = verf->committed;
+			memcpy(__entry->verifier,
+				&verf->verifier,
+				NFS4_VERIFIER_SIZE);
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->fileid = nfsi->fileid;
-			__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+			__entry->fhandle = nfs_fhandle_hash(fh);
 		),
 
 		TP_printk(
 			"fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld status=%d stable=%s "
-			"verifier 0x%016llx",
+			"offset=%lld count=%u res=%u status=%d stable=%s "
+			"verifier=%s",
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
-			__entry->offset, __entry->status,
+			(long long)__entry->offset, __entry->arg_count,
+			__entry->res_count, __entry->status,
 			nfs_show_stable(__entry->stable),
-			__entry->verifier
+			__print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE)
 		)
 );
 
+DECLARE_EVENT_CLASS(nfs_page_error_class,
+		TP_PROTO(
+			const struct nfs_page *req,
+			int error
+		),
+
+		TP_ARGS(req, error),
+
+		TP_STRUCT__entry(
+			__field(const void *, req)
+			__field(pgoff_t, index)
+			__field(unsigned int, offset)
+			__field(unsigned int, pgbase)
+			__field(unsigned int, bytes)
+			__field(int, error)
+		),
+
+		TP_fast_assign(
+			__entry->req = req;
+			__entry->index = req->wb_index;
+			__entry->offset = req->wb_offset;
+			__entry->pgbase = req->wb_pgbase;
+			__entry->bytes = req->wb_bytes;
+			__entry->error = error;
+		),
+
+		TP_printk(
+			"req=%p index=%lu offset=%u pgbase=%u bytes=%u error=%d",
+			__entry->req, __entry->index, __entry->offset,
+			__entry->pgbase, __entry->bytes, __entry->error
+		)
+);
+
+#define DEFINE_NFS_PAGEERR_EVENT(name) \
+	DEFINE_EVENT(nfs_page_error_class, name, \
+			TP_PROTO( \
+				const struct nfs_page *req, \
+				int error \
+			), \
+			TP_ARGS(req, error))
+
+DEFINE_NFS_PAGEERR_EVENT(nfs_write_error);
+DEFINE_NFS_PAGEERR_EVENT(nfs_comp_error);
+DEFINE_NFS_PAGEERR_EVENT(nfs_commit_error);
+
 TRACE_EVENT(nfs_initiate_commit,
 		TP_PROTO(
 			const struct nfs_commit_data *data
@@ -995,71 +1118,81 @@ TRACE_EVENT(nfs_initiate_commit,
 		TP_ARGS(data),
 
 		TP_STRUCT__entry(
-			__field(loff_t, offset)
-			__field(unsigned long, count)
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
+			__field(loff_t, offset)
+			__field(u32, count)
 		),
 
 		TP_fast_assign(
 			const struct inode *inode = data->inode;
 			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = data->args.fh ?
+						  data->args.fh : &nfsi->fh;
 
 			__entry->offset = data->args.offset;
 			__entry->count = data->args.count;
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->fileid = nfsi->fileid;
-			__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+			__entry->fhandle = nfs_fhandle_hash(fh);
 		),
 
 		TP_printk(
 			"fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld count=%lu",
+			"offset=%lld count=%u",
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
-			__entry->offset, __entry->count
+			(long long)__entry->offset, __entry->count
 		)
 );
 
 TRACE_EVENT(nfs_commit_done,
 		TP_PROTO(
+			const struct rpc_task *task,
 			const struct nfs_commit_data *data
 		),
 
-		TP_ARGS(data),
+		TP_ARGS(task, data),
 
 		TP_STRUCT__entry(
-			__field(int, status)
-			__field(loff_t, offset)
-			__field(unsigned long long, verifier)
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
+			__field(loff_t, offset)
+			__field(int, status)
+			__field(enum nfs3_stable_how, stable)
+			__array(char, verifier, NFS4_VERIFIER_SIZE)
 		),
 
 		TP_fast_assign(
 			const struct inode *inode = data->inode;
 			const struct nfs_inode *nfsi = NFS_I(inode);
+			const struct nfs_fh *fh = data->args.fh ?
+						  data->args.fh : &nfsi->fh;
+			const struct nfs_writeverf *verf = data->res.verf;
 
-			__entry->status = data->res.op_status;
+			__entry->status = task->tk_status;
 			__entry->offset = data->args.offset;
-			memcpy(&__entry->verifier, &data->verf.verifier,
-			       sizeof(__entry->verifier));
+			__entry->stable = verf->committed;
+			memcpy(__entry->verifier,
+				&verf->verifier,
+				NFS4_VERIFIER_SIZE);
 			__entry->dev = inode->i_sb->s_dev;
 			__entry->fileid = nfsi->fileid;
-			__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+			__entry->fhandle = nfs_fhandle_hash(fh);
 		),
 
 		TP_printk(
 			"fileid=%02x:%02x:%llu fhandle=0x%08x "
-			"offset=%lld status=%d verifier 0x%016llx",
+			"offset=%lld status=%d stable=%s verifier=%s",
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
-			__entry->offset, __entry->status,
-			__entry->verifier
+			(long long)__entry->offset, __entry->status,
+			nfs_show_stable(__entry->stable),
+			__print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE)
 		)
 );
 
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index cec3070..542ea8d 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1425,7 +1425,7 @@ bool pnfs_roc(struct inode *ino,
 	/* lo ref dropped in pnfs_roc_release() */
 	layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
 	/* If the creds don't match, we can't compound the layoutreturn */
-	if (!layoutreturn || cred != lo->plh_lc_cred)
+	if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
 		goto out_noroc;
 
 	roc = layoutreturn;
@@ -1998,8 +1998,6 @@ pnfs_update_layout(struct inode *ino,
 			trace_pnfs_update_layout(ino, pos, count,
 					iomode, lo, lseg,
 					PNFS_UPDATE_LAYOUT_INVALID_OPEN);
-			if (status != -EAGAIN)
-				goto out_unlock;
 			spin_unlock(&ino->i_lock);
 			nfs4_schedule_stateid_recovery(server, ctx->state);
 			pnfs_clear_first_layoutget(lo);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index f8a3806..0fafdad 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -79,6 +79,10 @@ enum pnfs_try_status {
 	PNFS_TRY_AGAIN     = 2,
 };
 
+/* error codes for internal use */
+#define NFS4ERR_RESET_TO_MDS   12001
+#define NFS4ERR_RESET_TO_PNFS  12002
+
 #ifdef CONFIG_NFS_V4_1
 
 #define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
@@ -91,10 +95,6 @@ enum pnfs_try_status {
 #define NFS4_DEF_DS_RETRANS 5
 #define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
 
-/* error codes for internal use */
-#define NFS4ERR_RESET_TO_MDS   12001
-#define NFS4ERR_RESET_TO_PNFS  12002
-
 enum {
 	NFS_LAYOUT_RO_FAILED = 0,	/* get ro layout failed stop trying */
 	NFS_LAYOUT_RW_FAILED,		/* get rw layout failed stop trying */
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 82af480..8b37e7f 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -31,12 +31,11 @@ EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
 /* Fake up some data that will cause nfs_commit_release to retry the writes. */
 void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
 {
-	struct nfs_page *first = nfs_list_entry(data->pages.next);
+	struct nfs_writeverf *verf = data->res.verf;
 
 	data->task.tk_status = 0;
-	memcpy(&data->verf.verifier, &first->wb_verf,
-	       sizeof(data->verf.verifier));
-	data->verf.verifier.data[0]++; /* ensure verifier mismatch */
+	memset(&verf->verifier, 0, sizeof(verf->verifier));
+	verf->committed = NFS_UNSTABLE;
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
 
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 0f7288b..15c865c 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -108,10 +108,15 @@ nfs_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
 		.rpc_resp	= fattr,
 	};
 	int	status;
+	unsigned short task_flags = 0;
+
+	/* Is this is an attribute revalidation, subject to softreval? */
+	if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
+		task_flags |= RPC_TASK_TIMEOUT;
 
 	dprintk("NFS call  getattr\n");
 	nfs_fattr_init(fattr);
-	status = rpc_call_sync(server->client, &msg, 0);
+	status = rpc_call_sync(server->client, &msg, task_flags);
 	dprintk("NFS reply getattr: %d\n", status);
 	return status;
 }
@@ -147,14 +152,14 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 }
 
 static int
-nfs_proc_lookup(struct inode *dir, const struct qstr *name,
+nfs_proc_lookup(struct inode *dir, struct dentry *dentry,
 		struct nfs_fh *fhandle, struct nfs_fattr *fattr,
 		struct nfs4_label *label)
 {
 	struct nfs_diropargs	arg = {
 		.fh		= NFS_FH(dir),
-		.name		= name->name,
-		.len		= name->len
+		.name		= dentry->d_name.name,
+		.len		= dentry->d_name.len
 	};
 	struct nfs_diropok	res = {
 		.fh		= fhandle,
@@ -166,10 +171,15 @@ nfs_proc_lookup(struct inode *dir, const struct qstr *name,
 		.rpc_resp	= &res,
 	};
 	int			status;
+	unsigned short task_flags = 0;
 
-	dprintk("NFS call  lookup %s\n", name->name);
+	/* Is this is an attribute revalidation, subject to softreval? */
+	if (nfs_lookup_is_soft_revalidate(dentry))
+		task_flags |= RPC_TASK_TIMEOUT;
+
+	dprintk("NFS call  lookup %pd2\n", dentry);
 	nfs_fattr_init(fattr);
-	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+	status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
 	dprintk("NFS reply lookup: %d\n", status);
 	return status;
 }
@@ -710,7 +720,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
 	.file_ops	= &nfs_file_operations,
 	.getroot	= nfs_proc_get_root,
 	.submount	= nfs_submount,
-	.try_mount	= nfs_try_mount,
+	.try_get_tree	= nfs_try_get_tree,
 	.getattr	= nfs_proc_getattr,
 	.setattr	= nfs_proc_setattr,
 	.lookup		= nfs_proc_lookup,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index cfe0b58..34bb9ad 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -214,7 +214,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr,
 
 	task_setup_data->flags |= swap_flags;
 	rpc_ops->read_setup(hdr, msg);
-	trace_nfs_initiate_read(inode, hdr->io_start, hdr->good_bytes);
+	trace_nfs_initiate_read(hdr);
 }
 
 static void
@@ -247,8 +247,7 @@ static int nfs_readpage_done(struct rpc_task *task,
 		return status;
 
 	nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
-	trace_nfs_readpage_done(inode, task->tk_status,
-				hdr->args.offset, hdr->res.eof);
+	trace_nfs_readpage_done(task, hdr);
 
 	if (task->tk_status == -ESTALE) {
 		set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
@@ -282,6 +281,8 @@ static void nfs_readpage_retry(struct rpc_task *task,
 	argp->offset += resp->count;
 	argp->pgbase += resp->count;
 	argp->count -= resp->count;
+	resp->count = 0;
+	resp->eof = 0;
 	rpc_restart_call_prepare(task);
 }
 
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 8d8d04b..dada09b 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -69,250 +69,6 @@
 #include "nfs.h"
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
-#define NFS_TEXT_DATA		1
-
-#if IS_ENABLED(CONFIG_NFS_V3)
-#define NFS_DEFAULT_VERSION 3
-#else
-#define NFS_DEFAULT_VERSION 2
-#endif
-
-#define NFS_MAX_CONNECTIONS 16
-
-enum {
-	/* Mount options that take no arguments */
-	Opt_soft, Opt_softerr, Opt_hard,
-	Opt_posix, Opt_noposix,
-	Opt_cto, Opt_nocto,
-	Opt_ac, Opt_noac,
-	Opt_lock, Opt_nolock,
-	Opt_udp, Opt_tcp, Opt_rdma,
-	Opt_acl, Opt_noacl,
-	Opt_rdirplus, Opt_nordirplus,
-	Opt_sharecache, Opt_nosharecache,
-	Opt_resvport, Opt_noresvport,
-	Opt_fscache, Opt_nofscache,
-	Opt_migration, Opt_nomigration,
-
-	/* Mount options that take integer arguments */
-	Opt_port,
-	Opt_rsize, Opt_wsize, Opt_bsize,
-	Opt_timeo, Opt_retrans,
-	Opt_acregmin, Opt_acregmax,
-	Opt_acdirmin, Opt_acdirmax,
-	Opt_actimeo,
-	Opt_namelen,
-	Opt_mountport,
-	Opt_mountvers,
-	Opt_minorversion,
-
-	/* Mount options that take string arguments */
-	Opt_nfsvers,
-	Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
-	Opt_addr, Opt_mountaddr, Opt_clientaddr,
-	Opt_nconnect,
-	Opt_lookupcache,
-	Opt_fscache_uniq,
-	Opt_local_lock,
-
-	/* Special mount options */
-	Opt_userspace, Opt_deprecated, Opt_sloppy,
-
-	Opt_err
-};
-
-static const match_table_t nfs_mount_option_tokens = {
-	{ Opt_userspace, "bg" },
-	{ Opt_userspace, "fg" },
-	{ Opt_userspace, "retry=%s" },
-
-	{ Opt_sloppy, "sloppy" },
-
-	{ Opt_soft, "soft" },
-	{ Opt_softerr, "softerr" },
-	{ Opt_hard, "hard" },
-	{ Opt_deprecated, "intr" },
-	{ Opt_deprecated, "nointr" },
-	{ Opt_posix, "posix" },
-	{ Opt_noposix, "noposix" },
-	{ Opt_cto, "cto" },
-	{ Opt_nocto, "nocto" },
-	{ Opt_ac, "ac" },
-	{ Opt_noac, "noac" },
-	{ Opt_lock, "lock" },
-	{ Opt_nolock, "nolock" },
-	{ Opt_udp, "udp" },
-	{ Opt_tcp, "tcp" },
-	{ Opt_rdma, "rdma" },
-	{ Opt_acl, "acl" },
-	{ Opt_noacl, "noacl" },
-	{ Opt_rdirplus, "rdirplus" },
-	{ Opt_nordirplus, "nordirplus" },
-	{ Opt_sharecache, "sharecache" },
-	{ Opt_nosharecache, "nosharecache" },
-	{ Opt_resvport, "resvport" },
-	{ Opt_noresvport, "noresvport" },
-	{ Opt_fscache, "fsc" },
-	{ Opt_nofscache, "nofsc" },
-	{ Opt_migration, "migration" },
-	{ Opt_nomigration, "nomigration" },
-
-	{ Opt_port, "port=%s" },
-	{ Opt_rsize, "rsize=%s" },
-	{ Opt_wsize, "wsize=%s" },
-	{ Opt_bsize, "bsize=%s" },
-	{ Opt_timeo, "timeo=%s" },
-	{ Opt_retrans, "retrans=%s" },
-	{ Opt_acregmin, "acregmin=%s" },
-	{ Opt_acregmax, "acregmax=%s" },
-	{ Opt_acdirmin, "acdirmin=%s" },
-	{ Opt_acdirmax, "acdirmax=%s" },
-	{ Opt_actimeo, "actimeo=%s" },
-	{ Opt_namelen, "namlen=%s" },
-	{ Opt_mountport, "mountport=%s" },
-	{ Opt_mountvers, "mountvers=%s" },
-	{ Opt_minorversion, "minorversion=%s" },
-
-	{ Opt_nfsvers, "nfsvers=%s" },
-	{ Opt_nfsvers, "vers=%s" },
-
-	{ Opt_sec, "sec=%s" },
-	{ Opt_proto, "proto=%s" },
-	{ Opt_mountproto, "mountproto=%s" },
-	{ Opt_addr, "addr=%s" },
-	{ Opt_clientaddr, "clientaddr=%s" },
-	{ Opt_mounthost, "mounthost=%s" },
-	{ Opt_mountaddr, "mountaddr=%s" },
-
-	{ Opt_nconnect, "nconnect=%s" },
-
-	{ Opt_lookupcache, "lookupcache=%s" },
-	{ Opt_fscache_uniq, "fsc=%s" },
-	{ Opt_local_lock, "local_lock=%s" },
-
-	/* The following needs to be listed after all other options */
-	{ Opt_nfsvers, "v%s" },
-
-	{ Opt_err, NULL }
-};
-
-enum {
-	Opt_xprt_udp, Opt_xprt_udp6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_rdma,
-	Opt_xprt_rdma6,
-
-	Opt_xprt_err
-};
-
-static const match_table_t nfs_xprt_protocol_tokens = {
-	{ Opt_xprt_udp, "udp" },
-	{ Opt_xprt_udp6, "udp6" },
-	{ Opt_xprt_tcp, "tcp" },
-	{ Opt_xprt_tcp6, "tcp6" },
-	{ Opt_xprt_rdma, "rdma" },
-	{ Opt_xprt_rdma6, "rdma6" },
-
-	{ Opt_xprt_err, NULL }
-};
-
-enum {
-	Opt_sec_none, Opt_sec_sys,
-	Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
-	Opt_sec_lkey, Opt_sec_lkeyi, Opt_sec_lkeyp,
-	Opt_sec_spkm, Opt_sec_spkmi, Opt_sec_spkmp,
-
-	Opt_sec_err
-};
-
-static const match_table_t nfs_secflavor_tokens = {
-	{ Opt_sec_none, "none" },
-	{ Opt_sec_none, "null" },
-	{ Opt_sec_sys, "sys" },
-
-	{ Opt_sec_krb5, "krb5" },
-	{ Opt_sec_krb5i, "krb5i" },
-	{ Opt_sec_krb5p, "krb5p" },
-
-	{ Opt_sec_lkey, "lkey" },
-	{ Opt_sec_lkeyi, "lkeyi" },
-	{ Opt_sec_lkeyp, "lkeyp" },
-
-	{ Opt_sec_spkm, "spkm3" },
-	{ Opt_sec_spkmi, "spkm3i" },
-	{ Opt_sec_spkmp, "spkm3p" },
-
-	{ Opt_sec_err, NULL }
-};
-
-enum {
-	Opt_lookupcache_all, Opt_lookupcache_positive,
-	Opt_lookupcache_none,
-
-	Opt_lookupcache_err
-};
-
-static match_table_t nfs_lookupcache_tokens = {
-	{ Opt_lookupcache_all, "all" },
-	{ Opt_lookupcache_positive, "pos" },
-	{ Opt_lookupcache_positive, "positive" },
-	{ Opt_lookupcache_none, "none" },
-
-	{ Opt_lookupcache_err, NULL }
-};
-
-enum {
-	Opt_local_lock_all, Opt_local_lock_flock, Opt_local_lock_posix,
-	Opt_local_lock_none,
-
-	Opt_local_lock_err
-};
-
-static match_table_t nfs_local_lock_tokens = {
-	{ Opt_local_lock_all, "all" },
-	{ Opt_local_lock_flock, "flock" },
-	{ Opt_local_lock_posix, "posix" },
-	{ Opt_local_lock_none, "none" },
-
-	{ Opt_local_lock_err, NULL }
-};
-
-enum {
-	Opt_vers_2, Opt_vers_3, Opt_vers_4, Opt_vers_4_0,
-	Opt_vers_4_1, Opt_vers_4_2,
-
-	Opt_vers_err
-};
-
-static match_table_t nfs_vers_tokens = {
-	{ Opt_vers_2, "2" },
-	{ Opt_vers_3, "3" },
-	{ Opt_vers_4, "4" },
-	{ Opt_vers_4_0, "4.0" },
-	{ Opt_vers_4_1, "4.1" },
-	{ Opt_vers_4_2, "4.2" },
-
-	{ Opt_vers_err, NULL }
-};
-
-static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
-		int flags, const char *dev_name, void *raw_data);
-
-struct file_system_type nfs_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "nfs",
-	.mount		= nfs_fs_mount,
-	.kill_sb	= nfs_kill_super,
-	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
-MODULE_ALIAS_FS("nfs");
-EXPORT_SYMBOL_GPL(nfs_fs_type);
-
-struct file_system_type nfs_xdev_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "nfs",
-	.mount		= nfs_xdev_mount,
-	.kill_sb	= nfs_kill_super,
-	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
 
 const struct super_operations nfs_sops = {
 	.alloc_inode	= nfs_alloc_inode,
@@ -326,26 +82,10 @@ const struct super_operations nfs_sops = {
 	.show_devname	= nfs_show_devname,
 	.show_path	= nfs_show_path,
 	.show_stats	= nfs_show_stats,
-	.remount_fs	= nfs_remount,
 };
 EXPORT_SYMBOL_GPL(nfs_sops);
 
 #if IS_ENABLED(CONFIG_NFS_V4)
-static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
-static int nfs4_validate_mount_data(void *options,
-	struct nfs_parsed_mount_data *args, const char *dev_name);
-
-struct file_system_type nfs4_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "nfs4",
-	.mount		= nfs_fs_mount,
-	.kill_sb	= nfs_kill_super,
-	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
-MODULE_ALIAS_FS("nfs4");
-MODULE_ALIAS("nfs4");
-EXPORT_SYMBOL_GPL(nfs4_fs_type);
-
 static int __init register_nfs4_fs(void)
 {
 	return register_filesystem(&nfs4_fs_type);
@@ -635,6 +375,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
 	} nfs_info[] = {
 		{ NFS_MOUNT_SOFT, ",soft", "" },
 		{ NFS_MOUNT_SOFTERR, ",softerr", "" },
+		{ NFS_MOUNT_SOFTREVAL, ",softreval", "" },
 		{ NFS_MOUNT_POSIX, ",posix", "" },
 		{ NFS_MOUNT_NOCTO, ",nocto", "" },
 		{ NFS_MOUNT_NOAC, ",noac", "" },
@@ -931,141 +672,6 @@ void nfs_umount_begin(struct super_block *sb)
 }
 EXPORT_SYMBOL_GPL(nfs_umount_begin);
 
-static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
-{
-	struct nfs_parsed_mount_data *data;
-
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (data) {
-		data->timeo		= NFS_UNSPEC_TIMEO;
-		data->retrans		= NFS_UNSPEC_RETRANS;
-		data->acregmin		= NFS_DEF_ACREGMIN;
-		data->acregmax		= NFS_DEF_ACREGMAX;
-		data->acdirmin		= NFS_DEF_ACDIRMIN;
-		data->acdirmax		= NFS_DEF_ACDIRMAX;
-		data->mount_server.port	= NFS_UNSPEC_PORT;
-		data->nfs_server.port	= NFS_UNSPEC_PORT;
-		data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
-		data->selected_flavor	= RPC_AUTH_MAXFLAVOR;
-		data->minorversion	= 0;
-		data->need_mount	= true;
-		data->net		= current->nsproxy->net_ns;
-		data->lsm_opts		= NULL;
-	}
-	return data;
-}
-
-static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
-{
-	if (data) {
-		kfree(data->client_address);
-		kfree(data->mount_server.hostname);
-		kfree(data->nfs_server.export_path);
-		kfree(data->nfs_server.hostname);
-		kfree(data->fscache_uniq);
-		security_free_mnt_opts(&data->lsm_opts);
-		kfree(data);
-	}
-}
-
-/*
- * Sanity-check a server address provided by the mount command.
- *
- * Address family must be initialized, and address must not be
- * the ANY address for that family.
- */
-static int nfs_verify_server_address(struct sockaddr *addr)
-{
-	switch (addr->sa_family) {
-	case AF_INET: {
-		struct sockaddr_in *sa = (struct sockaddr_in *)addr;
-		return sa->sin_addr.s_addr != htonl(INADDR_ANY);
-	}
-	case AF_INET6: {
-		struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr;
-		return !ipv6_addr_any(sa);
-	}
-	}
-
-	dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
-	return 0;
-}
-
-/*
- * Select between a default port value and a user-specified port value.
- * If a zero value is set, then autobind will be used.
- */
-static void nfs_set_port(struct sockaddr *sap, int *port,
-				 const unsigned short default_port)
-{
-	if (*port == NFS_UNSPEC_PORT)
-		*port = default_port;
-
-	rpc_set_port(sap, *port);
-}
-
-/*
- * Sanity check the NFS transport protocol.
- *
- */
-static void nfs_validate_transport_protocol(struct nfs_parsed_mount_data *mnt)
-{
-	switch (mnt->nfs_server.protocol) {
-	case XPRT_TRANSPORT_UDP:
-	case XPRT_TRANSPORT_TCP:
-	case XPRT_TRANSPORT_RDMA:
-		break;
-	default:
-		mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
-	}
-}
-
-/*
- * For text based NFSv2/v3 mounts, the mount protocol transport default
- * settings should depend upon the specified NFS transport.
- */
-static void nfs_set_mount_transport_protocol(struct nfs_parsed_mount_data *mnt)
-{
-	nfs_validate_transport_protocol(mnt);
-
-	if (mnt->mount_server.protocol == XPRT_TRANSPORT_UDP ||
-	    mnt->mount_server.protocol == XPRT_TRANSPORT_TCP)
-			return;
-	switch (mnt->nfs_server.protocol) {
-	case XPRT_TRANSPORT_UDP:
-		mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
-		break;
-	case XPRT_TRANSPORT_TCP:
-	case XPRT_TRANSPORT_RDMA:
-		mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
-	}
-}
-
-/*
- * Add 'flavor' to 'auth_info' if not already present.
- * Returns true if 'flavor' ends up in the list, false otherwise
- */
-static bool nfs_auth_info_add(struct nfs_auth_info *auth_info,
-			      rpc_authflavor_t flavor)
-{
-	unsigned int i;
-	unsigned int max_flavor_len = ARRAY_SIZE(auth_info->flavors);
-
-	/* make sure this flavor isn't already in the list */
-	for (i = 0; i < auth_info->flavor_len; i++) {
-		if (flavor == auth_info->flavors[i])
-			return true;
-	}
-
-	if (auth_info->flavor_len + 1 >= max_flavor_len) {
-		dfprintk(MOUNT, "NFS: too many sec= flavors\n");
-		return false;
-	}
-
-	auth_info->flavors[auth_info->flavor_len++] = flavor;
-	return true;
-}
-
 /*
  * Return true if 'match' is in auth_info or auth_info is empty.
  * Return false otherwise.
@@ -1087,633 +693,13 @@ bool nfs_auth_info_match(const struct nfs_auth_info *auth_info,
 EXPORT_SYMBOL_GPL(nfs_auth_info_match);
 
 /*
- * Parse the value of the 'sec=' option.
- */
-static int nfs_parse_security_flavors(char *value,
-				      struct nfs_parsed_mount_data *mnt)
-{
-	substring_t args[MAX_OPT_ARGS];
-	rpc_authflavor_t pseudoflavor;
-	char *p;
-
-	dfprintk(MOUNT, "NFS: parsing sec=%s option\n", value);
-
-	while ((p = strsep(&value, ":")) != NULL) {
-		switch (match_token(p, nfs_secflavor_tokens, args)) {
-		case Opt_sec_none:
-			pseudoflavor = RPC_AUTH_NULL;
-			break;
-		case Opt_sec_sys:
-			pseudoflavor = RPC_AUTH_UNIX;
-			break;
-		case Opt_sec_krb5:
-			pseudoflavor = RPC_AUTH_GSS_KRB5;
-			break;
-		case Opt_sec_krb5i:
-			pseudoflavor = RPC_AUTH_GSS_KRB5I;
-			break;
-		case Opt_sec_krb5p:
-			pseudoflavor = RPC_AUTH_GSS_KRB5P;
-			break;
-		case Opt_sec_lkey:
-			pseudoflavor = RPC_AUTH_GSS_LKEY;
-			break;
-		case Opt_sec_lkeyi:
-			pseudoflavor = RPC_AUTH_GSS_LKEYI;
-			break;
-		case Opt_sec_lkeyp:
-			pseudoflavor = RPC_AUTH_GSS_LKEYP;
-			break;
-		case Opt_sec_spkm:
-			pseudoflavor = RPC_AUTH_GSS_SPKM;
-			break;
-		case Opt_sec_spkmi:
-			pseudoflavor = RPC_AUTH_GSS_SPKMI;
-			break;
-		case Opt_sec_spkmp:
-			pseudoflavor = RPC_AUTH_GSS_SPKMP;
-			break;
-		default:
-			dfprintk(MOUNT,
-				 "NFS: sec= option '%s' not recognized\n", p);
-			return 0;
-		}
-
-		if (!nfs_auth_info_add(&mnt->auth_info, pseudoflavor))
-			return 0;
-	}
-
-	return 1;
-}
-
-static int nfs_parse_version_string(char *string,
-		struct nfs_parsed_mount_data *mnt,
-		substring_t *args)
-{
-	mnt->flags &= ~NFS_MOUNT_VER3;
-	switch (match_token(string, nfs_vers_tokens, args)) {
-	case Opt_vers_2:
-		mnt->version = 2;
-		break;
-	case Opt_vers_3:
-		mnt->flags |= NFS_MOUNT_VER3;
-		mnt->version = 3;
-		break;
-	case Opt_vers_4:
-		/* Backward compatibility option. In future,
-		 * the mount program should always supply
-		 * a NFSv4 minor version number.
-		 */
-		mnt->version = 4;
-		break;
-	case Opt_vers_4_0:
-		mnt->version = 4;
-		mnt->minorversion = 0;
-		break;
-	case Opt_vers_4_1:
-		mnt->version = 4;
-		mnt->minorversion = 1;
-		break;
-	case Opt_vers_4_2:
-		mnt->version = 4;
-		mnt->minorversion = 2;
-		break;
-	default:
-		return 0;
-	}
-	return 1;
-}
-
-static int nfs_get_option_str(substring_t args[], char **option)
-{
-	kfree(*option);
-	*option = match_strdup(args);
-	return !*option;
-}
-
-static int nfs_get_option_ul(substring_t args[], unsigned long *option)
-{
-	int rc;
-	char *string;
-
-	string = match_strdup(args);
-	if (string == NULL)
-		return -ENOMEM;
-	rc = kstrtoul(string, 10, option);
-	kfree(string);
-
-	return rc;
-}
-
-static int nfs_get_option_ul_bound(substring_t args[], unsigned long *option,
-		unsigned long l_bound, unsigned long u_bound)
-{
-	int ret;
-
-	ret = nfs_get_option_ul(args, option);
-	if (ret != 0)
-		return ret;
-	if (*option < l_bound || *option > u_bound)
-		return -ERANGE;
-	return 0;
-}
-
-/*
- * Error-check and convert a string of mount options from user space into
- * a data structure.  The whole mount string is processed; bad options are
- * skipped as they are encountered.  If there were no errors, return 1;
- * otherwise return 0 (zero).
- */
-static int nfs_parse_mount_options(char *raw,
-				   struct nfs_parsed_mount_data *mnt)
-{
-	char *p, *string;
-	int rc, sloppy = 0, invalid_option = 0;
-	unsigned short protofamily = AF_UNSPEC;
-	unsigned short mountfamily = AF_UNSPEC;
-
-	if (!raw) {
-		dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
-		return 1;
-	}
-	dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw);
-
-	rc = security_sb_eat_lsm_opts(raw, &mnt->lsm_opts);
-	if (rc)
-		goto out_security_failure;
-
-	while ((p = strsep(&raw, ",")) != NULL) {
-		substring_t args[MAX_OPT_ARGS];
-		unsigned long option;
-		int token;
-
-		if (!*p)
-			continue;
-
-		dfprintk(MOUNT, "NFS:   parsing nfs mount option '%s'\n", p);
-
-		token = match_token(p, nfs_mount_option_tokens, args);
-		switch (token) {
-
-		/*
-		 * boolean options:  foo/nofoo
-		 */
-		case Opt_soft:
-			mnt->flags |= NFS_MOUNT_SOFT;
-			mnt->flags &= ~NFS_MOUNT_SOFTERR;
-			break;
-		case Opt_softerr:
-			mnt->flags |= NFS_MOUNT_SOFTERR;
-			mnt->flags &= ~NFS_MOUNT_SOFT;
-			break;
-		case Opt_hard:
-			mnt->flags &= ~(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR);
-			break;
-		case Opt_posix:
-			mnt->flags |= NFS_MOUNT_POSIX;
-			break;
-		case Opt_noposix:
-			mnt->flags &= ~NFS_MOUNT_POSIX;
-			break;
-		case Opt_cto:
-			mnt->flags &= ~NFS_MOUNT_NOCTO;
-			break;
-		case Opt_nocto:
-			mnt->flags |= NFS_MOUNT_NOCTO;
-			break;
-		case Opt_ac:
-			mnt->flags &= ~NFS_MOUNT_NOAC;
-			break;
-		case Opt_noac:
-			mnt->flags |= NFS_MOUNT_NOAC;
-			break;
-		case Opt_lock:
-			mnt->flags &= ~NFS_MOUNT_NONLM;
-			mnt->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
-					NFS_MOUNT_LOCAL_FCNTL);
-			break;
-		case Opt_nolock:
-			mnt->flags |= NFS_MOUNT_NONLM;
-			mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK |
-				       NFS_MOUNT_LOCAL_FCNTL);
-			break;
-		case Opt_udp:
-			mnt->flags &= ~NFS_MOUNT_TCP;
-			mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
-			break;
-		case Opt_tcp:
-			mnt->flags |= NFS_MOUNT_TCP;
-			mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
-			break;
-		case Opt_rdma:
-			mnt->flags |= NFS_MOUNT_TCP; /* for side protocols */
-			mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
-			xprt_load_transport(p);
-			break;
-		case Opt_acl:
-			mnt->flags &= ~NFS_MOUNT_NOACL;
-			break;
-		case Opt_noacl:
-			mnt->flags |= NFS_MOUNT_NOACL;
-			break;
-		case Opt_rdirplus:
-			mnt->flags &= ~NFS_MOUNT_NORDIRPLUS;
-			break;
-		case Opt_nordirplus:
-			mnt->flags |= NFS_MOUNT_NORDIRPLUS;
-			break;
-		case Opt_sharecache:
-			mnt->flags &= ~NFS_MOUNT_UNSHARED;
-			break;
-		case Opt_nosharecache:
-			mnt->flags |= NFS_MOUNT_UNSHARED;
-			break;
-		case Opt_resvport:
-			mnt->flags &= ~NFS_MOUNT_NORESVPORT;
-			break;
-		case Opt_noresvport:
-			mnt->flags |= NFS_MOUNT_NORESVPORT;
-			break;
-		case Opt_fscache:
-			mnt->options |= NFS_OPTION_FSCACHE;
-			kfree(mnt->fscache_uniq);
-			mnt->fscache_uniq = NULL;
-			break;
-		case Opt_nofscache:
-			mnt->options &= ~NFS_OPTION_FSCACHE;
-			kfree(mnt->fscache_uniq);
-			mnt->fscache_uniq = NULL;
-			break;
-		case Opt_migration:
-			mnt->options |= NFS_OPTION_MIGRATION;
-			break;
-		case Opt_nomigration:
-			mnt->options &= ~NFS_OPTION_MIGRATION;
-			break;
-
-		/*
-		 * options that take numeric values
-		 */
-		case Opt_port:
-			if (nfs_get_option_ul(args, &option) ||
-			    option > USHRT_MAX)
-				goto out_invalid_value;
-			mnt->nfs_server.port = option;
-			break;
-		case Opt_rsize:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->rsize = option;
-			break;
-		case Opt_wsize:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->wsize = option;
-			break;
-		case Opt_bsize:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->bsize = option;
-			break;
-		case Opt_timeo:
-			if (nfs_get_option_ul_bound(args, &option, 1, INT_MAX))
-				goto out_invalid_value;
-			mnt->timeo = option;
-			break;
-		case Opt_retrans:
-			if (nfs_get_option_ul_bound(args, &option, 0, INT_MAX))
-				goto out_invalid_value;
-			mnt->retrans = option;
-			break;
-		case Opt_acregmin:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->acregmin = option;
-			break;
-		case Opt_acregmax:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->acregmax = option;
-			break;
-		case Opt_acdirmin:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->acdirmin = option;
-			break;
-		case Opt_acdirmax:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->acdirmax = option;
-			break;
-		case Opt_actimeo:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->acregmin = mnt->acregmax =
-			mnt->acdirmin = mnt->acdirmax = option;
-			break;
-		case Opt_namelen:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			mnt->namlen = option;
-			break;
-		case Opt_mountport:
-			if (nfs_get_option_ul(args, &option) ||
-			    option > USHRT_MAX)
-				goto out_invalid_value;
-			mnt->mount_server.port = option;
-			break;
-		case Opt_mountvers:
-			if (nfs_get_option_ul(args, &option) ||
-			    option < NFS_MNT_VERSION ||
-			    option > NFS_MNT3_VERSION)
-				goto out_invalid_value;
-			mnt->mount_server.version = option;
-			break;
-		case Opt_minorversion:
-			if (nfs_get_option_ul(args, &option))
-				goto out_invalid_value;
-			if (option > NFS4_MAX_MINOR_VERSION)
-				goto out_invalid_value;
-			mnt->minorversion = option;
-			break;
-
-		/*
-		 * options that take text values
-		 */
-		case Opt_nfsvers:
-			string = match_strdup(args);
-			if (string == NULL)
-				goto out_nomem;
-			rc = nfs_parse_version_string(string, mnt, args);
-			kfree(string);
-			if (!rc)
-				goto out_invalid_value;
-			break;
-		case Opt_sec:
-			string = match_strdup(args);
-			if (string == NULL)
-				goto out_nomem;
-			rc = nfs_parse_security_flavors(string, mnt);
-			kfree(string);
-			if (!rc) {
-				dfprintk(MOUNT, "NFS:   unrecognized "
-						"security flavor\n");
-				return 0;
-			}
-			break;
-		case Opt_proto:
-			string = match_strdup(args);
-			if (string == NULL)
-				goto out_nomem;
-			token = match_token(string,
-					    nfs_xprt_protocol_tokens, args);
-
-			protofamily = AF_INET;
-			switch (token) {
-			case Opt_xprt_udp6:
-				protofamily = AF_INET6;
-				/* fall through */
-			case Opt_xprt_udp:
-				mnt->flags &= ~NFS_MOUNT_TCP;
-				mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
-				break;
-			case Opt_xprt_tcp6:
-				protofamily = AF_INET6;
-				/* fall through */
-			case Opt_xprt_tcp:
-				mnt->flags |= NFS_MOUNT_TCP;
-				mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
-				break;
-			case Opt_xprt_rdma6:
-				protofamily = AF_INET6;
-				/* fall through */
-			case Opt_xprt_rdma:
-				/* vector side protocols to TCP */
-				mnt->flags |= NFS_MOUNT_TCP;
-				mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
-				xprt_load_transport(string);
-				break;
-			default:
-				dfprintk(MOUNT, "NFS:   unrecognized "
-						"transport protocol\n");
-				kfree(string);
-				return 0;
-			}
-			kfree(string);
-			break;
-		case Opt_mountproto:
-			string = match_strdup(args);
-			if (string == NULL)
-				goto out_nomem;
-			token = match_token(string,
-					    nfs_xprt_protocol_tokens, args);
-			kfree(string);
-
-			mountfamily = AF_INET;
-			switch (token) {
-			case Opt_xprt_udp6:
-				mountfamily = AF_INET6;
-				/* fall through */
-			case Opt_xprt_udp:
-				mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
-				break;
-			case Opt_xprt_tcp6:
-				mountfamily = AF_INET6;
-				/* fall through */
-			case Opt_xprt_tcp:
-				mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
-				break;
-			case Opt_xprt_rdma: /* not used for side protocols */
-			default:
-				dfprintk(MOUNT, "NFS:   unrecognized "
-						"transport protocol\n");
-				return 0;
-			}
-			break;
-		case Opt_addr:
-			string = match_strdup(args);
-			if (string == NULL)
-				goto out_nomem;
-			mnt->nfs_server.addrlen =
-				rpc_pton(mnt->net, string, strlen(string),
-					(struct sockaddr *)
-					&mnt->nfs_server.address,
-					sizeof(mnt->nfs_server.address));
-			kfree(string);
-			if (mnt->nfs_server.addrlen == 0)
-				goto out_invalid_address;
-			break;
-		case Opt_clientaddr:
-			if (nfs_get_option_str(args, &mnt->client_address))
-				goto out_nomem;
-			break;
-		case Opt_mounthost:
-			if (nfs_get_option_str(args,
-					       &mnt->mount_server.hostname))
-				goto out_nomem;
-			break;
-		case Opt_mountaddr:
-			string = match_strdup(args);
-			if (string == NULL)
-				goto out_nomem;
-			mnt->mount_server.addrlen =
-				rpc_pton(mnt->net, string, strlen(string),
-					(struct sockaddr *)
-					&mnt->mount_server.address,
-					sizeof(mnt->mount_server.address));
-			kfree(string);
-			if (mnt->mount_server.addrlen == 0)
-				goto out_invalid_address;
-			break;
-		case Opt_nconnect:
-			if (nfs_get_option_ul_bound(args, &option, 1, NFS_MAX_CONNECTIONS))
-				goto out_invalid_value;
-			mnt->nfs_server.nconnect = option;
-			break;
-		case Opt_lookupcache:
-			string = match_strdup(args);
-			if (string == NULL)
-				goto out_nomem;
-			token = match_token(string,
-					nfs_lookupcache_tokens, args);
-			kfree(string);
-			switch (token) {
-				case Opt_lookupcache_all:
-					mnt->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE);
-					break;
-				case Opt_lookupcache_positive:
-					mnt->flags &= ~NFS_MOUNT_LOOKUP_CACHE_NONE;
-					mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG;
-					break;
-				case Opt_lookupcache_none:
-					mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE;
-					break;
-				default:
-					dfprintk(MOUNT, "NFS:   invalid "
-							"lookupcache argument\n");
-					return 0;
-			}
-			break;
-		case Opt_fscache_uniq:
-			if (nfs_get_option_str(args, &mnt->fscache_uniq))
-				goto out_nomem;
-			mnt->options |= NFS_OPTION_FSCACHE;
-			break;
-		case Opt_local_lock:
-			string = match_strdup(args);
-			if (string == NULL)
-				goto out_nomem;
-			token = match_token(string, nfs_local_lock_tokens,
-					args);
-			kfree(string);
-			switch (token) {
-			case Opt_local_lock_all:
-				mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK |
-					       NFS_MOUNT_LOCAL_FCNTL);
-				break;
-			case Opt_local_lock_flock:
-				mnt->flags |= NFS_MOUNT_LOCAL_FLOCK;
-				break;
-			case Opt_local_lock_posix:
-				mnt->flags |= NFS_MOUNT_LOCAL_FCNTL;
-				break;
-			case Opt_local_lock_none:
-				mnt->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
-						NFS_MOUNT_LOCAL_FCNTL);
-				break;
-			default:
-				dfprintk(MOUNT, "NFS:	invalid	"
-						"local_lock argument\n");
-				return 0;
-			}
-			break;
-
-		/*
-		 * Special options
-		 */
-		case Opt_sloppy:
-			sloppy = 1;
-			dfprintk(MOUNT, "NFS:   relaxing parsing rules\n");
-			break;
-		case Opt_userspace:
-		case Opt_deprecated:
-			dfprintk(MOUNT, "NFS:   ignoring mount option "
-					"'%s'\n", p);
-			break;
-
-		default:
-			invalid_option = 1;
-			dfprintk(MOUNT, "NFS:   unrecognized mount option "
-					"'%s'\n", p);
-		}
-	}
-
-	if (!sloppy && invalid_option)
-		return 0;
-
-	if (mnt->minorversion && mnt->version != 4)
-		goto out_minorversion_mismatch;
-
-	if (mnt->options & NFS_OPTION_MIGRATION &&
-	    (mnt->version != 4 || mnt->minorversion != 0))
-		goto out_migration_misuse;
-
-	/*
-	 * verify that any proto=/mountproto= options match the address
-	 * families in the addr=/mountaddr= options.
-	 */
-	if (protofamily != AF_UNSPEC &&
-	    protofamily != mnt->nfs_server.address.ss_family)
-		goto out_proto_mismatch;
-
-	if (mountfamily != AF_UNSPEC) {
-		if (mnt->mount_server.addrlen) {
-			if (mountfamily != mnt->mount_server.address.ss_family)
-				goto out_mountproto_mismatch;
-		} else {
-			if (mountfamily != mnt->nfs_server.address.ss_family)
-				goto out_mountproto_mismatch;
-		}
-	}
-
-	return 1;
-
-out_mountproto_mismatch:
-	printk(KERN_INFO "NFS: mount server address does not match mountproto= "
-			 "option\n");
-	return 0;
-out_proto_mismatch:
-	printk(KERN_INFO "NFS: server address does not match proto= option\n");
-	return 0;
-out_invalid_address:
-	printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
-	return 0;
-out_invalid_value:
-	printk(KERN_INFO "NFS: bad mount option value specified: %s\n", p);
-	return 0;
-out_minorversion_mismatch:
-	printk(KERN_INFO "NFS: mount option vers=%u does not support "
-			 "minorversion=%u\n", mnt->version, mnt->minorversion);
-	return 0;
-out_migration_misuse:
-	printk(KERN_INFO
-		"NFS: 'migration' not supported for this NFS version\n");
-	return 0;
-out_nomem:
-	printk(KERN_INFO "NFS: not enough memory to parse option\n");
-	return 0;
-out_security_failure:
-	printk(KERN_INFO "NFS: security options invalid: %d\n", rc);
-	return 0;
-}
-
-/*
- * Ensure that a specified authtype in args->auth_info is supported by
- * the server. Returns 0 and sets args->selected_flavor if it's ok, and
+ * Ensure that a specified authtype in ctx->auth_info is supported by
+ * the server. Returns 0 and sets ctx->selected_flavor if it's ok, and
  * -EACCES if not.
  */
-static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
-			rpc_authflavor_t *server_authlist, unsigned int count)
+static int nfs_verify_authflavors(struct nfs_fs_context *ctx,
+				  rpc_authflavor_t *server_authlist,
+				  unsigned int count)
 {
 	rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
 	bool found_auth_null = false;
@@ -1734,7 +720,7 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
 	for (i = 0; i < count; i++) {
 		flavor = server_authlist[i];
 
-		if (nfs_auth_info_match(&args->auth_info, flavor))
+		if (nfs_auth_info_match(&ctx->auth_info, flavor))
 			goto out;
 
 		if (flavor == RPC_AUTH_NULL)
@@ -1742,7 +728,7 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
 	}
 
 	if (found_auth_null) {
-		flavor = args->auth_info.flavors[0];
+		flavor = ctx->auth_info.flavors[0];
 		goto out;
 	}
 
@@ -1751,8 +737,8 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
 	return -EACCES;
 
 out:
-	args->selected_flavor = flavor;
-	dfprintk(MOUNT, "NFS: using auth flavor %u\n", args->selected_flavor);
+	ctx->selected_flavor = flavor;
+	dfprintk(MOUNT, "NFS: using auth flavor %u\n", ctx->selected_flavor);
 	return 0;
 }
 
@@ -1760,50 +746,51 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
  * Use the remote server's MOUNT service to request the NFS file handle
  * corresponding to the provided path.
  */
-static int nfs_request_mount(struct nfs_parsed_mount_data *args,
+static int nfs_request_mount(struct fs_context *fc,
 			     struct nfs_fh *root_fh,
 			     rpc_authflavor_t *server_authlist,
 			     unsigned int *server_authlist_len)
 {
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	struct nfs_mount_request request = {
 		.sap		= (struct sockaddr *)
-						&args->mount_server.address,
-		.dirpath	= args->nfs_server.export_path,
-		.protocol	= args->mount_server.protocol,
+						&ctx->mount_server.address,
+		.dirpath	= ctx->nfs_server.export_path,
+		.protocol	= ctx->mount_server.protocol,
 		.fh		= root_fh,
-		.noresvport	= args->flags & NFS_MOUNT_NORESVPORT,
+		.noresvport	= ctx->flags & NFS_MOUNT_NORESVPORT,
 		.auth_flav_len	= server_authlist_len,
 		.auth_flavs	= server_authlist,
-		.net		= args->net,
+		.net		= fc->net_ns,
 	};
 	int status;
 
-	if (args->mount_server.version == 0) {
-		switch (args->version) {
+	if (ctx->mount_server.version == 0) {
+		switch (ctx->version) {
 			default:
-				args->mount_server.version = NFS_MNT3_VERSION;
+				ctx->mount_server.version = NFS_MNT3_VERSION;
 				break;
 			case 2:
-				args->mount_server.version = NFS_MNT_VERSION;
+				ctx->mount_server.version = NFS_MNT_VERSION;
 		}
 	}
-	request.version = args->mount_server.version;
+	request.version = ctx->mount_server.version;
 
-	if (args->mount_server.hostname)
-		request.hostname = args->mount_server.hostname;
+	if (ctx->mount_server.hostname)
+		request.hostname = ctx->mount_server.hostname;
 	else
-		request.hostname = args->nfs_server.hostname;
+		request.hostname = ctx->nfs_server.hostname;
 
 	/*
 	 * Construct the mount server's address.
 	 */
-	if (args->mount_server.address.ss_family == AF_UNSPEC) {
-		memcpy(request.sap, &args->nfs_server.address,
-		       args->nfs_server.addrlen);
-		args->mount_server.addrlen = args->nfs_server.addrlen;
+	if (ctx->mount_server.address.sa_family == AF_UNSPEC) {
+		memcpy(request.sap, &ctx->nfs_server.address,
+		       ctx->nfs_server.addrlen);
+		ctx->mount_server.addrlen = ctx->nfs_server.addrlen;
 	}
-	request.salen = args->mount_server.addrlen;
-	nfs_set_port(request.sap, &args->mount_server.port, 0);
+	request.salen = ctx->mount_server.addrlen;
+	nfs_set_port(request.sap, &ctx->mount_server.port, 0);
 
 	/*
 	 * Now ask the mount server to map our export path
@@ -1819,20 +806,18 @@ static int nfs_request_mount(struct nfs_parsed_mount_data *args,
 	return 0;
 }
 
-static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_info,
-					struct nfs_subversion *nfs_mod)
+static struct nfs_server *nfs_try_mount_request(struct fs_context *fc)
 {
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	int status;
 	unsigned int i;
 	bool tried_auth_unix = false;
 	bool auth_null_in_list = false;
 	struct nfs_server *server = ERR_PTR(-EACCES);
-	struct nfs_parsed_mount_data *args = mount_info->parsed;
 	rpc_authflavor_t authlist[NFS_MAX_SECFLAVORS];
 	unsigned int authlist_len = ARRAY_SIZE(authlist);
 
-	status = nfs_request_mount(args, mount_info->mntfh, authlist,
-					&authlist_len);
+	status = nfs_request_mount(fc, ctx->mntfh, authlist, &authlist_len);
 	if (status)
 		return ERR_PTR(status);
 
@@ -1840,13 +825,13 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
 	 * Was a sec= authflavor specified in the options? First, verify
 	 * whether the server supports it, and then just try to use it if so.
 	 */
-	if (args->auth_info.flavor_len > 0) {
-		status = nfs_verify_authflavors(args, authlist, authlist_len);
+	if (ctx->auth_info.flavor_len > 0) {
+		status = nfs_verify_authflavors(ctx, authlist, authlist_len);
 		dfprintk(MOUNT, "NFS: using auth flavor %u\n",
-			 args->selected_flavor);
+			 ctx->selected_flavor);
 		if (status)
 			return ERR_PTR(status);
-		return nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
+		return ctx->nfs_mod->rpc_ops->create_server(fc);
 	}
 
 	/*
@@ -1872,8 +857,8 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
 			/* Fallthrough */
 		}
 		dfprintk(MOUNT, "NFS: attempting to use auth flavor %u\n", flavor);
-		args->selected_flavor = flavor;
-		server = nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
+		ctx->selected_flavor = flavor;
+		server = ctx->nfs_mod->rpc_ops->create_server(fc);
 		if (!IS_ERR(server))
 			return server;
 	}
@@ -1888,348 +873,23 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
 
 	/* Last chance! Try AUTH_UNIX */
 	dfprintk(MOUNT, "NFS: attempting to use auth flavor %u\n", RPC_AUTH_UNIX);
-	args->selected_flavor = RPC_AUTH_UNIX;
-	return nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
+	ctx->selected_flavor = RPC_AUTH_UNIX;
+	return ctx->nfs_mod->rpc_ops->create_server(fc);
 }
 
-struct dentry *nfs_try_mount(int flags, const char *dev_name,
-			     struct nfs_mount_info *mount_info,
-			     struct nfs_subversion *nfs_mod)
+int nfs_try_get_tree(struct fs_context *fc)
 {
-	struct nfs_server *server;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 
-	if (mount_info->parsed->need_mount)
-		server = nfs_try_mount_request(mount_info, nfs_mod);
+	if (ctx->need_mount)
+		ctx->server = nfs_try_mount_request(fc);
 	else
-		server = nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
+		ctx->server = ctx->nfs_mod->rpc_ops->create_server(fc);
 
-	if (IS_ERR(server))
-		return ERR_CAST(server);
-
-	return nfs_fs_mount_common(server, flags, dev_name, mount_info, nfs_mod);
+	return nfs_get_tree_common(fc);
 }
-EXPORT_SYMBOL_GPL(nfs_try_mount);
+EXPORT_SYMBOL_GPL(nfs_try_get_tree);
 
-/*
- * Split "dev_name" into "hostname:export_path".
- *
- * The leftmost colon demarks the split between the server's hostname
- * and the export path.  If the hostname starts with a left square
- * bracket, then it may contain colons.
- *
- * Note: caller frees hostname and export path, even on error.
- */
-static int nfs_parse_devname(const char *dev_name,
-			     char **hostname, size_t maxnamlen,
-			     char **export_path, size_t maxpathlen)
-{
-	size_t len;
-	char *end;
-
-	if (unlikely(!dev_name || !*dev_name)) {
-		dfprintk(MOUNT, "NFS: device name not specified\n");
-		return -EINVAL;
-	}
-
-	/* Is the host name protected with square brakcets? */
-	if (*dev_name == '[') {
-		end = strchr(++dev_name, ']');
-		if (end == NULL || end[1] != ':')
-			goto out_bad_devname;
-
-		len = end - dev_name;
-		end++;
-	} else {
-		char *comma;
-
-		end = strchr(dev_name, ':');
-		if (end == NULL)
-			goto out_bad_devname;
-		len = end - dev_name;
-
-		/* kill possible hostname list: not supported */
-		comma = strchr(dev_name, ',');
-		if (comma != NULL && comma < end)
-			len = comma - dev_name;
-	}
-
-	if (len > maxnamlen)
-		goto out_hostname;
-
-	/* N.B. caller will free nfs_server.hostname in all cases */
-	*hostname = kstrndup(dev_name, len, GFP_KERNEL);
-	if (*hostname == NULL)
-		goto out_nomem;
-	len = strlen(++end);
-	if (len > maxpathlen)
-		goto out_path;
-	*export_path = kstrndup(end, len, GFP_KERNEL);
-	if (!*export_path)
-		goto out_nomem;
-
-	dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *export_path);
-	return 0;
-
-out_bad_devname:
-	dfprintk(MOUNT, "NFS: device name not in host:path format\n");
-	return -EINVAL;
-
-out_nomem:
-	dfprintk(MOUNT, "NFS: not enough memory to parse device name\n");
-	return -ENOMEM;
-
-out_hostname:
-	dfprintk(MOUNT, "NFS: server hostname too long\n");
-	return -ENAMETOOLONG;
-
-out_path:
-	dfprintk(MOUNT, "NFS: export pathname too long\n");
-	return -ENAMETOOLONG;
-}
-
-/*
- * Validate the NFS2/NFS3 mount data
- * - fills in the mount root filehandle
- *
- * For option strings, user space handles the following behaviors:
- *
- * + DNS: mapping server host name to IP address ("addr=" option)
- *
- * + failure mode: how to behave if a mount request can't be handled
- *   immediately ("fg/bg" option)
- *
- * + retry: how often to retry a mount request ("retry=" option)
- *
- * + breaking back: trying proto=udp after proto=tcp, v2 after v3,
- *   mountproto=tcp after mountproto=udp, and so on
- */
-static int nfs23_validate_mount_data(void *options,
-				     struct nfs_parsed_mount_data *args,
-				     struct nfs_fh *mntfh,
-				     const char *dev_name)
-{
-	struct nfs_mount_data *data = (struct nfs_mount_data *)options;
-	struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
-	int extra_flags = NFS_MOUNT_LEGACY_INTERFACE;
-
-	if (data == NULL)
-		goto out_no_data;
-
-	args->version = NFS_DEFAULT_VERSION;
-	switch (data->version) {
-	case 1:
-		data->namlen = 0; /* fall through */
-	case 2:
-		data->bsize = 0; /* fall through */
-	case 3:
-		if (data->flags & NFS_MOUNT_VER3)
-			goto out_no_v3;
-		data->root.size = NFS2_FHSIZE;
-		memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
-		/* Turn off security negotiation */
-		extra_flags |= NFS_MOUNT_SECFLAVOUR;
-		/* fall through */
-	case 4:
-		if (data->flags & NFS_MOUNT_SECFLAVOUR)
-			goto out_no_sec;
-		/* fall through */
-	case 5:
-		memset(data->context, 0, sizeof(data->context));
-		/* fall through */
-	case 6:
-		if (data->flags & NFS_MOUNT_VER3) {
-			if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
-				goto out_invalid_fh;
-			mntfh->size = data->root.size;
-			args->version = 3;
-		} else {
-			mntfh->size = NFS2_FHSIZE;
-			args->version = 2;
-		}
-
-
-		memcpy(mntfh->data, data->root.data, mntfh->size);
-		if (mntfh->size < sizeof(mntfh->data))
-			memset(mntfh->data + mntfh->size, 0,
-			       sizeof(mntfh->data) - mntfh->size);
-
-		/*
-		 * Translate to nfs_parsed_mount_data, which nfs_fill_super
-		 * can deal with.
-		 */
-		args->flags		= data->flags & NFS_MOUNT_FLAGMASK;
-		args->flags		|= extra_flags;
-		args->rsize		= data->rsize;
-		args->wsize		= data->wsize;
-		args->timeo		= data->timeo;
-		args->retrans		= data->retrans;
-		args->acregmin		= data->acregmin;
-		args->acregmax		= data->acregmax;
-		args->acdirmin		= data->acdirmin;
-		args->acdirmax		= data->acdirmax;
-		args->need_mount	= false;
-
-		memcpy(sap, &data->addr, sizeof(data->addr));
-		args->nfs_server.addrlen = sizeof(data->addr);
-		args->nfs_server.port = ntohs(data->addr.sin_port);
-		if (sap->sa_family != AF_INET ||
-		    !nfs_verify_server_address(sap))
-			goto out_no_address;
-
-		if (!(data->flags & NFS_MOUNT_TCP))
-			args->nfs_server.protocol = XPRT_TRANSPORT_UDP;
-		/* N.B. caller will free nfs_server.hostname in all cases */
-		args->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL);
-		args->namlen		= data->namlen;
-		args->bsize		= data->bsize;
-
-		if (data->flags & NFS_MOUNT_SECFLAVOUR)
-			args->selected_flavor = data->pseudoflavor;
-		else
-			args->selected_flavor = RPC_AUTH_UNIX;
-		if (!args->nfs_server.hostname)
-			goto out_nomem;
-
-		if (!(data->flags & NFS_MOUNT_NONLM))
-			args->flags &= ~(NFS_MOUNT_LOCAL_FLOCK|
-					 NFS_MOUNT_LOCAL_FCNTL);
-		else
-			args->flags |= (NFS_MOUNT_LOCAL_FLOCK|
-					NFS_MOUNT_LOCAL_FCNTL);
-		/*
-		 * The legacy version 6 binary mount data from userspace has a
-		 * field used only to transport selinux information into the
-		 * the kernel.  To continue to support that functionality we
-		 * have a touch of selinux knowledge here in the NFS code. The
-		 * userspace code converted context=blah to just blah so we are
-		 * converting back to the full string selinux understands.
-		 */
-		if (data->context[0]){
-#ifdef CONFIG_SECURITY_SELINUX
-			int rc;
-			data->context[NFS_MAX_CONTEXT_LEN] = '\0';
-			rc = security_add_mnt_opt("context", data->context,
-					strlen(data->context), &args->lsm_opts);
-			if (rc)
-				return rc;
-#else
-			return -EINVAL;
-#endif
-		}
-
-		break;
-	default:
-		return NFS_TEXT_DATA;
-	}
-
-	return 0;
-
-out_no_data:
-	dfprintk(MOUNT, "NFS: mount program didn't pass any mount data\n");
-	return -EINVAL;
-
-out_no_v3:
-	dfprintk(MOUNT, "NFS: nfs_mount_data version %d does not support v3\n",
-		 data->version);
-	return -EINVAL;
-
-out_no_sec:
-	dfprintk(MOUNT, "NFS: nfs_mount_data version supports only AUTH_SYS\n");
-	return -EINVAL;
-
-out_nomem:
-	dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
-	return -ENOMEM;
-
-out_no_address:
-	dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
-	return -EINVAL;
-
-out_invalid_fh:
-	dfprintk(MOUNT, "NFS: invalid root filehandle\n");
-	return -EINVAL;
-}
-
-#if IS_ENABLED(CONFIG_NFS_V4)
-static int nfs_validate_mount_data(struct file_system_type *fs_type,
-				   void *options,
-				   struct nfs_parsed_mount_data *args,
-				   struct nfs_fh *mntfh,
-				   const char *dev_name)
-{
-	if (fs_type == &nfs_fs_type)
-		return nfs23_validate_mount_data(options, args, mntfh, dev_name);
-	return nfs4_validate_mount_data(options, args, dev_name);
-}
-#else
-static int nfs_validate_mount_data(struct file_system_type *fs_type,
-				   void *options,
-				   struct nfs_parsed_mount_data *args,
-				   struct nfs_fh *mntfh,
-				   const char *dev_name)
-{
-	return nfs23_validate_mount_data(options, args, mntfh, dev_name);
-}
-#endif
-
-static int nfs_validate_text_mount_data(void *options,
-					struct nfs_parsed_mount_data *args,
-					const char *dev_name)
-{
-	int port = 0;
-	int max_namelen = PAGE_SIZE;
-	int max_pathlen = NFS_MAXPATHLEN;
-	struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
-
-	if (nfs_parse_mount_options((char *)options, args) == 0)
-		return -EINVAL;
-
-	if (!nfs_verify_server_address(sap))
-		goto out_no_address;
-
-	if (args->version == 4) {
-#if IS_ENABLED(CONFIG_NFS_V4)
-		if (args->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
-			port = NFS_RDMA_PORT;
-		else
-			port = NFS_PORT;
-		max_namelen = NFS4_MAXNAMLEN;
-		max_pathlen = NFS4_MAXPATHLEN;
-		nfs_validate_transport_protocol(args);
-		if (args->nfs_server.protocol == XPRT_TRANSPORT_UDP)
-			goto out_invalid_transport_udp;
-		nfs4_validate_mount_flags(args);
-#else
-		goto out_v4_not_compiled;
-#endif /* CONFIG_NFS_V4 */
-	} else {
-		nfs_set_mount_transport_protocol(args);
-		if (args->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
-			port = NFS_RDMA_PORT;
-	}
-
-	nfs_set_port(sap, &args->nfs_server.port, port);
-
-	return nfs_parse_devname(dev_name,
-				   &args->nfs_server.hostname,
-				   max_namelen,
-				   &args->nfs_server.export_path,
-				   max_pathlen);
-
-#if !IS_ENABLED(CONFIG_NFS_V4)
-out_v4_not_compiled:
-	dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
-	return -EPROTONOSUPPORT;
-#else
-out_invalid_transport_udp:
-	dfprintk(MOUNT, "NFSv4: Unsupported transport protocol udp\n");
-	return -EINVAL;
-#endif /* !CONFIG_NFS_V4 */
-
-out_no_address:
-	dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
-	return -EINVAL;
-}
 
 #define NFS_REMOUNT_CMP_FLAGMASK ~(NFS_MOUNT_INTR \
 		| NFS_MOUNT_SECURE \
@@ -2246,39 +906,35 @@ static int nfs_validate_text_mount_data(void *options,
 
 static int
 nfs_compare_remount_data(struct nfs_server *nfss,
-			 struct nfs_parsed_mount_data *data)
+			 struct nfs_fs_context *ctx)
 {
-	if ((data->flags ^ nfss->flags) & NFS_REMOUNT_CMP_FLAGMASK ||
-	    data->rsize != nfss->rsize ||
-	    data->wsize != nfss->wsize ||
-	    data->version != nfss->nfs_client->rpc_ops->version ||
-	    data->minorversion != nfss->nfs_client->cl_minorversion ||
-	    data->retrans != nfss->client->cl_timeout->to_retries ||
-	    !nfs_auth_info_match(&data->auth_info, nfss->client->cl_auth->au_flavor) ||
-	    data->acregmin != nfss->acregmin / HZ ||
-	    data->acregmax != nfss->acregmax / HZ ||
-	    data->acdirmin != nfss->acdirmin / HZ ||
-	    data->acdirmax != nfss->acdirmax / HZ ||
-	    data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
-	    (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
-	    data->nfs_server.port != nfss->port ||
-	    data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
-	    !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
+	if ((ctx->flags ^ nfss->flags) & NFS_REMOUNT_CMP_FLAGMASK ||
+	    ctx->rsize != nfss->rsize ||
+	    ctx->wsize != nfss->wsize ||
+	    ctx->version != nfss->nfs_client->rpc_ops->version ||
+	    ctx->minorversion != nfss->nfs_client->cl_minorversion ||
+	    ctx->retrans != nfss->client->cl_timeout->to_retries ||
+	    !nfs_auth_info_match(&ctx->auth_info, nfss->client->cl_auth->au_flavor) ||
+	    ctx->acregmin != nfss->acregmin / HZ ||
+	    ctx->acregmax != nfss->acregmax / HZ ||
+	    ctx->acdirmin != nfss->acdirmin / HZ ||
+	    ctx->acdirmax != nfss->acdirmax / HZ ||
+	    ctx->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+	    (ctx->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
+	    ctx->nfs_server.port != nfss->port ||
+	    ctx->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
+	    !rpc_cmp_addr((struct sockaddr *)&ctx->nfs_server.address,
 			  (struct sockaddr *)&nfss->nfs_client->cl_addr))
 		return -EINVAL;
 
 	return 0;
 }
 
-int
-nfs_remount(struct super_block *sb, int *flags, char *raw_data)
+int nfs_reconfigure(struct fs_context *fc)
 {
-	int error;
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
+	struct super_block *sb = fc->root->d_sb;
 	struct nfs_server *nfss = sb->s_fs_info;
-	struct nfs_parsed_mount_data *data;
-	struct nfs_mount_data *options = (struct nfs_mount_data *)raw_data;
-	struct nfs4_mount_data *options4 = (struct nfs4_mount_data *)raw_data;
-	u32 nfsvers = nfss->nfs_client->rpc_ops->version;
 
 	sync_filesystem(sb);
 
@@ -2288,92 +944,38 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
 	 * ones were explicitly specified. Fall back to legacy behavior and
 	 * just return success.
 	 */
-	if ((nfsvers == 4 && (!options4 || options4->version == 1)) ||
-	    (nfsvers <= 3 && (!options || (options->version >= 1 &&
-					   options->version <= 6))))
+	if (ctx->skip_reconfig_option_check)
 		return 0;
 
-	data = nfs_alloc_parsed_mount_data();
-	if (data == NULL)
-		return -ENOMEM;
-
-	/* fill out struct with values from existing mount */
-	data->flags = nfss->flags;
-	data->rsize = nfss->rsize;
-	data->wsize = nfss->wsize;
-	data->retrans = nfss->client->cl_timeout->to_retries;
-	data->selected_flavor = nfss->client->cl_auth->au_flavor;
-	data->acregmin = nfss->acregmin / HZ;
-	data->acregmax = nfss->acregmax / HZ;
-	data->acdirmin = nfss->acdirmin / HZ;
-	data->acdirmax = nfss->acdirmax / HZ;
-	data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ;
-	data->nfs_server.port = nfss->port;
-	data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
-	data->version = nfsvers;
-	data->minorversion = nfss->nfs_client->cl_minorversion;
-	data->net = current->nsproxy->net_ns;
-	memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
-		data->nfs_server.addrlen);
-
-	/* overwrite those values with any that were specified */
-	error = -EINVAL;
-	if (!nfs_parse_mount_options((char *)options, data))
-		goto out;
-
 	/*
 	 * noac is a special case. It implies -o sync, but that's not
-	 * necessarily reflected in the mtab options. do_remount_sb
+	 * necessarily reflected in the mtab options. reconfigure_super
 	 * will clear SB_SYNCHRONOUS if -o sync wasn't specified in the
 	 * remount options, so we have to explicitly reset it.
 	 */
-	if (data->flags & NFS_MOUNT_NOAC)
-		*flags |= SB_SYNCHRONOUS;
+	if (ctx->flags & NFS_MOUNT_NOAC) {
+		fc->sb_flags |= SB_SYNCHRONOUS;
+		fc->sb_flags_mask |= SB_SYNCHRONOUS;
+	}
 
 	/* compare new mount options with old ones */
-	error = nfs_compare_remount_data(nfss, data);
-	if (!error)
-		error = security_sb_remount(sb, data->lsm_opts);
-out:
-	nfs_free_parsed_mount_data(data);
-	return error;
+	return nfs_compare_remount_data(nfss, ctx);
 }
-EXPORT_SYMBOL_GPL(nfs_remount);
+EXPORT_SYMBOL_GPL(nfs_reconfigure);
 
 /*
- * Initialise the common bits of the superblock
+ * Finish setting up an NFS superblock
  */
-static void nfs_initialise_sb(struct super_block *sb)
+static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
 {
 	struct nfs_server *server = NFS_SB(sb);
 
-	sb->s_magic = NFS_SUPER_MAGIC;
-
-	/* We probably want something more informative here */
-	snprintf(sb->s_id, sizeof(sb->s_id),
-		 "%u:%u", MAJOR(sb->s_dev), MINOR(sb->s_dev));
-
-	if (sb->s_blocksize == 0)
-		sb->s_blocksize = nfs_block_bits(server->wsize,
-						 &sb->s_blocksize_bits);
-
-	nfs_super_set_maxbytes(sb, server->maxfilesize);
-}
-
-/*
- * Finish setting up an NFS2/3 superblock
- */
-void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
-{
-	struct nfs_parsed_mount_data *data = mount_info->parsed;
-	struct nfs_server *server = NFS_SB(sb);
-
 	sb->s_blocksize_bits = 0;
 	sb->s_blocksize = 0;
 	sb->s_xattr = server->nfs_client->cl_nfs_mod->xattr;
 	sb->s_op = server->nfs_client->cl_nfs_mod->sops;
-	if (data && data->bsize)
-		sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
+	if (ctx && ctx->bsize)
+		sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits);
 
 	if (server->nfs_client->rpc_ops->version != 2) {
 		/* The VFS shouldn't apply the umask to mode bits. We will do
@@ -2393,53 +995,27 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
 		sb->s_time_max = S64_MAX;
 	}
 
- 	nfs_initialise_sb(sb);
-}
-EXPORT_SYMBOL_GPL(nfs_fill_super);
+	sb->s_magic = NFS_SUPER_MAGIC;
 
-/*
- * Finish setting up a cloned NFS2/3/4 superblock
- */
-static void nfs_clone_super(struct super_block *sb,
-			    struct nfs_mount_info *mount_info)
-{
-	const struct super_block *old_sb = mount_info->cloned->sb;
-	struct nfs_server *server = NFS_SB(sb);
+	/* We probably want something more informative here */
+	snprintf(sb->s_id, sizeof(sb->s_id),
+		 "%u:%u", MAJOR(sb->s_dev), MINOR(sb->s_dev));
 
-	sb->s_blocksize_bits = old_sb->s_blocksize_bits;
-	sb->s_blocksize = old_sb->s_blocksize;
-	sb->s_maxbytes = old_sb->s_maxbytes;
-	sb->s_xattr = old_sb->s_xattr;
-	sb->s_op = old_sb->s_op;
-	sb->s_export_op = old_sb->s_export_op;
+	if (sb->s_blocksize == 0)
+		sb->s_blocksize = nfs_block_bits(server->wsize,
+						 &sb->s_blocksize_bits);
 
-	if (server->nfs_client->rpc_ops->version != 2) {
-		/* The VFS shouldn't apply the umask to mode bits. We will do
-		 * so ourselves when necessary.
-		 */
-		sb->s_flags |= SB_POSIXACL;
-		sb->s_time_gran = 1;
-	} else
-		sb->s_time_gran = 1000;
-
-	if (server->nfs_client->rpc_ops->version != 4) {
-		sb->s_time_min = 0;
-		sb->s_time_max = U32_MAX;
-	} else {
-		sb->s_time_min = S64_MIN;
-		sb->s_time_max = S64_MAX;
-	}
-
- 	nfs_initialise_sb(sb);
+	nfs_super_set_maxbytes(sb, server->maxfilesize);
 }
 
-static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
+static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b,
+				     const struct fs_context *fc)
 {
 	const struct nfs_server *a = s->s_fs_info;
 	const struct rpc_clnt *clnt_a = a->client;
 	const struct rpc_clnt *clnt_b = b->client;
 
-	if ((s->s_flags & NFS_MS_MASK) != (flags & NFS_MS_MASK))
+	if ((s->s_flags & NFS_SB_MASK) != (fc->sb_flags & NFS_SB_MASK))
 		goto Ebusy;
 	if (a->nfs_client != b->nfs_client)
 		goto Ebusy;
@@ -2464,19 +1040,11 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
 	return 0;
 }
 
-struct nfs_sb_mountdata {
-	struct nfs_server *server;
-	int mntflags;
-};
-
-static int nfs_set_super(struct super_block *s, void *data)
+static int nfs_set_super(struct super_block *s, struct fs_context *fc)
 {
-	struct nfs_sb_mountdata *sb_mntdata = data;
-	struct nfs_server *server = sb_mntdata->server;
+	struct nfs_server *server = fc->s_fs_info;
 	int ret;
 
-	s->s_flags = sb_mntdata->mntflags;
-	s->s_fs_info = server;
 	s->s_d_op = server->nfs_client->rpc_ops->dentry_ops;
 	ret = set_anon_super(s, server);
 	if (ret == 0)
@@ -2541,11 +1109,9 @@ static int nfs_compare_userns(const struct nfs_server *old,
 	return 1;
 }
 
-static int nfs_compare_super(struct super_block *sb, void *data)
+static int nfs_compare_super(struct super_block *sb, struct fs_context *fc)
 {
-	struct nfs_sb_mountdata *sb_mntdata = data;
-	struct nfs_server *server = sb_mntdata->server, *old = NFS_SB(sb);
-	int mntflags = sb_mntdata->mntflags;
+	struct nfs_server *server = fc->s_fs_info, *old = NFS_SB(sb);
 
 	if (!nfs_compare_super_address(old, server))
 		return 0;
@@ -2556,13 +1122,12 @@ static int nfs_compare_super(struct super_block *sb, void *data)
 		return 0;
 	if (!nfs_compare_userns(old, server))
 		return 0;
-	return nfs_compare_mount_options(sb, server, mntflags);
+	return nfs_compare_mount_options(sb, server, fc);
 }
 
 #ifdef CONFIG_NFS_FSCACHE
 static void nfs_get_cache_cookie(struct super_block *sb,
-				 struct nfs_parsed_mount_data *parsed,
-				 struct nfs_clone_mount *cloned)
+				 struct nfs_fs_context *ctx)
 {
 	struct nfs_server *nfss = NFS_SB(sb);
 	char *uniq = NULL;
@@ -2571,80 +1136,36 @@ static void nfs_get_cache_cookie(struct super_block *sb,
 	nfss->fscache_key = NULL;
 	nfss->fscache = NULL;
 
-	if (parsed) {
-		if (!(parsed->options & NFS_OPTION_FSCACHE))
-			return;
-		if (parsed->fscache_uniq) {
-			uniq = parsed->fscache_uniq;
-			ulen = strlen(parsed->fscache_uniq);
-		}
-	} else if (cloned) {
-		struct nfs_server *mnt_s = NFS_SB(cloned->sb);
+	if (!ctx)
+		return;
+
+	if (ctx->clone_data.sb) {
+		struct nfs_server *mnt_s = NFS_SB(ctx->clone_data.sb);
 		if (!(mnt_s->options & NFS_OPTION_FSCACHE))
 			return;
 		if (mnt_s->fscache_key) {
 			uniq = mnt_s->fscache_key->key.uniquifier;
 			ulen = mnt_s->fscache_key->key.uniq_len;
 		}
-	} else
+	} else {
+		if (!(ctx->options & NFS_OPTION_FSCACHE))
+			return;
+		if (ctx->fscache_uniq) {
+			uniq = ctx->fscache_uniq;
+			ulen = strlen(ctx->fscache_uniq);
+		}
 		return;
+	}
 
 	nfs_fscache_get_super_cookie(sb, uniq, ulen);
 }
 #else
 static void nfs_get_cache_cookie(struct super_block *sb,
-				 struct nfs_parsed_mount_data *parsed,
-				 struct nfs_clone_mount *cloned)
+				 struct nfs_fs_context *ctx)
 {
 }
 #endif
 
-int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
-			struct nfs_mount_info *mount_info)
-{
-	int error;
-	unsigned long kflags = 0, kflags_out = 0;
-	if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL)
-		kflags |= SECURITY_LSM_NATIVE_LABELS;
-
-	error = security_sb_set_mnt_opts(s, mount_info->parsed->lsm_opts,
-						kflags, &kflags_out);
-	if (error)
-		goto err;
-
-	if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL &&
-		!(kflags_out & SECURITY_LSM_NATIVE_LABELS))
-		NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL;
-err:
-	return error;
-}
-EXPORT_SYMBOL_GPL(nfs_set_sb_security);
-
-int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
-			  struct nfs_mount_info *mount_info)
-{
-	int error;
-	unsigned long kflags = 0, kflags_out = 0;
-
-	/* clone any lsm security options from the parent to the new sb */
-	if (d_inode(mntroot)->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
-		return -ESTALE;
-
-	if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL)
-		kflags |= SECURITY_LSM_NATIVE_LABELS;
-
-	error = security_sb_clone_mnt_opts(mount_info->cloned->sb, s, kflags,
-			&kflags_out);
-	if (error)
-		return error;
-
-	if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL &&
-		!(kflags_out & SECURITY_LSM_NATIVE_LABELS))
-		NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL;
-	return 0;
-}
-EXPORT_SYMBOL_GPL(nfs_clone_sb_security);
-
 static void nfs_set_readahead(struct backing_dev_info *bdi,
 			      unsigned long iomax_pages)
 {
@@ -2652,35 +1173,40 @@ static void nfs_set_readahead(struct backing_dev_info *bdi,
 	bdi->io_pages = iomax_pages;
 }
 
-struct dentry *nfs_fs_mount_common(struct nfs_server *server,
-				   int flags, const char *dev_name,
-				   struct nfs_mount_info *mount_info,
-				   struct nfs_subversion *nfs_mod)
+int nfs_get_tree_common(struct fs_context *fc)
 {
+	struct nfs_fs_context *ctx = nfs_fc2context(fc);
 	struct super_block *s;
-	struct dentry *mntroot = ERR_PTR(-ENOMEM);
-	int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-	struct nfs_sb_mountdata sb_mntdata = {
-		.mntflags = flags,
-		.server = server,
-	};
+	int (*compare_super)(struct super_block *, struct fs_context *) = nfs_compare_super;
+	struct nfs_server *server = ctx->server;
+	unsigned long kflags = 0, kflags_out = 0;
 	int error;
 
+	ctx->server = NULL;
+	if (IS_ERR(server))
+		return PTR_ERR(server);
+
 	if (server->flags & NFS_MOUNT_UNSHARED)
 		compare_super = NULL;
 
 	/* -o noac implies -o sync */
 	if (server->flags & NFS_MOUNT_NOAC)
-		sb_mntdata.mntflags |= SB_SYNCHRONOUS;
+		fc->sb_flags |= SB_SYNCHRONOUS;
 
-	if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
-		if (mount_info->cloned->sb->s_flags & SB_SYNCHRONOUS)
-			sb_mntdata.mntflags |= SB_SYNCHRONOUS;
+	if (ctx->clone_data.sb)
+		if (ctx->clone_data.sb->s_flags & SB_SYNCHRONOUS)
+			fc->sb_flags |= SB_SYNCHRONOUS;
+
+	if (server->caps & NFS_CAP_SECURITY_LABEL)
+		fc->lsm_flags |= SECURITY_LSM_NATIVE_LABELS;
 
 	/* Get a superblock - note that we may end up sharing one that already exists */
-	s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
+	fc->s_fs_info = server;
+	s = sget_fc(fc, compare_super, nfs_set_super);
+	fc->s_fs_info = NULL;
 	if (IS_ERR(s)) {
-		mntroot = ERR_CAST(s);
+		error = PTR_ERR(s);
+		nfs_errorf(fc, "NFS: Couldn't get superblock");
 		goto out_err_nosb;
 	}
 
@@ -2690,88 +1216,66 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
 	} else {
 		error = super_setup_bdi_name(s, "%u:%u", MAJOR(server->s_dev),
 					     MINOR(server->s_dev));
-		if (error) {
-			mntroot = ERR_PTR(error);
+		if (error)
 			goto error_splat_super;
-		}
 		nfs_set_readahead(s->s_bdi, server->rpages);
 		server->super = s;
 	}
 
 	if (!s->s_root) {
+		unsigned bsize = ctx->clone_data.inherited_bsize;
 		/* initial superblock/root creation */
-		mount_info->fill_super(s, mount_info);
-		nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
-		if (!(server->flags & NFS_MOUNT_UNSHARED))
-			s->s_iflags |= SB_I_MULTIROOT;
+		nfs_fill_super(s, ctx);
+		if (bsize) {
+			s->s_blocksize_bits = bsize;
+			s->s_blocksize = 1U << bsize;
+		}
+		nfs_get_cache_cookie(s, ctx);
 	}
 
-	mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
-	if (IS_ERR(mntroot))
+	error = nfs_get_root(s, fc);
+	if (error < 0) {
+		nfs_errorf(fc, "NFS: Couldn't get root dentry");
 		goto error_splat_super;
+	}
 
-	error = mount_info->set_security(s, mntroot, mount_info);
+	if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL)
+		kflags |= SECURITY_LSM_NATIVE_LABELS;
+	if (ctx->clone_data.sb) {
+		if (d_inode(fc->root)->i_fop != &nfs_dir_operations) {
+			error = -ESTALE;
+			goto error_splat_root;
+		}
+		/* clone any lsm security options from the parent to the new sb */
+		error = security_sb_clone_mnt_opts(ctx->clone_data.sb, s, kflags,
+				&kflags_out);
+	} else {
+		error = security_sb_set_mnt_opts(s, fc->security,
+							kflags, &kflags_out);
+	}
 	if (error)
 		goto error_splat_root;
+	if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL &&
+		!(kflags_out & SECURITY_LSM_NATIVE_LABELS))
+		NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL;
 
 	s->s_flags |= SB_ACTIVE;
+	error = 0;
 
 out:
-	return mntroot;
+	return error;
 
 out_err_nosb:
 	nfs_free_server(server);
 	goto out;
 
 error_splat_root:
-	dput(mntroot);
-	mntroot = ERR_PTR(error);
+	dput(fc->root);
+	fc->root = NULL;
 error_splat_super:
 	deactivate_locked_super(s);
 	goto out;
 }
-EXPORT_SYMBOL_GPL(nfs_fs_mount_common);
-
-struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *raw_data)
-{
-	struct nfs_mount_info mount_info = {
-		.fill_super = nfs_fill_super,
-		.set_security = nfs_set_sb_security,
-	};
-	struct dentry *mntroot = ERR_PTR(-ENOMEM);
-	struct nfs_subversion *nfs_mod;
-	int error;
-
-	mount_info.parsed = nfs_alloc_parsed_mount_data();
-	mount_info.mntfh = nfs_alloc_fhandle();
-	if (mount_info.parsed == NULL || mount_info.mntfh == NULL)
-		goto out;
-
-	/* Validate the mount data */
-	error = nfs_validate_mount_data(fs_type, raw_data, mount_info.parsed, mount_info.mntfh, dev_name);
-	if (error == NFS_TEXT_DATA)
-		error = nfs_validate_text_mount_data(raw_data, mount_info.parsed, dev_name);
-	if (error < 0) {
-		mntroot = ERR_PTR(error);
-		goto out;
-	}
-
-	nfs_mod = get_nfs_version(mount_info.parsed->version);
-	if (IS_ERR(nfs_mod)) {
-		mntroot = ERR_CAST(nfs_mod);
-		goto out;
-	}
-
-	mntroot = nfs_mod->rpc_ops->try_mount(flags, dev_name, &mount_info, nfs_mod);
-
-	put_nfs_version(nfs_mod);
-out:
-	nfs_free_parsed_mount_data(mount_info.parsed);
-	nfs_free_fhandle(mount_info.mntfh);
-	return mntroot;
-}
-EXPORT_SYMBOL_GPL(nfs_fs_mount);
 
 /*
  * Destroy an NFS2/3 superblock
@@ -2790,150 +1294,8 @@ void nfs_kill_super(struct super_block *s)
 }
 EXPORT_SYMBOL_GPL(nfs_kill_super);
 
-/*
- * Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
- */
-static struct dentry *
-nfs_xdev_mount(struct file_system_type *fs_type, int flags,
-		const char *dev_name, void *raw_data)
-{
-	struct nfs_clone_mount *data = raw_data;
-	struct nfs_mount_info mount_info = {
-		.fill_super = nfs_clone_super,
-		.set_security = nfs_clone_sb_security,
-		.cloned = data,
-	};
-	struct nfs_server *server;
-	struct dentry *mntroot = ERR_PTR(-ENOMEM);
-	struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod;
-
-	dprintk("--> nfs_xdev_mount()\n");
-
-	mount_info.mntfh = mount_info.cloned->fh;
-
-	/* create a new volume representation */
-	server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
-
-	if (IS_ERR(server))
-		mntroot = ERR_CAST(server);
-	else
-		mntroot = nfs_fs_mount_common(server, flags,
-				dev_name, &mount_info, nfs_mod);
-
-	dprintk("<-- nfs_xdev_mount() = %ld\n",
-			IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L);
-	return mntroot;
-}
-
 #if IS_ENABLED(CONFIG_NFS_V4)
 
-static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
-{
-	args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3|
-			 NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
-}
-
-/*
- * Validate NFSv4 mount options
- */
-static int nfs4_validate_mount_data(void *options,
-				    struct nfs_parsed_mount_data *args,
-				    const char *dev_name)
-{
-	struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
-	struct nfs4_mount_data *data = (struct nfs4_mount_data *)options;
-	char *c;
-
-	if (data == NULL)
-		goto out_no_data;
-
-	args->version = 4;
-
-	switch (data->version) {
-	case 1:
-		if (data->host_addrlen > sizeof(args->nfs_server.address))
-			goto out_no_address;
-		if (data->host_addrlen == 0)
-			goto out_no_address;
-		args->nfs_server.addrlen = data->host_addrlen;
-		if (copy_from_user(sap, data->host_addr, data->host_addrlen))
-			return -EFAULT;
-		if (!nfs_verify_server_address(sap))
-			goto out_no_address;
-		args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
-
-		if (data->auth_flavourlen) {
-			rpc_authflavor_t pseudoflavor;
-			if (data->auth_flavourlen > 1)
-				goto out_inval_auth;
-			if (copy_from_user(&pseudoflavor,
-					   data->auth_flavours,
-					   sizeof(pseudoflavor)))
-				return -EFAULT;
-			args->selected_flavor = pseudoflavor;
-		} else
-			args->selected_flavor = RPC_AUTH_UNIX;
-
-		c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN);
-		if (IS_ERR(c))
-			return PTR_ERR(c);
-		args->nfs_server.hostname = c;
-
-		c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN);
-		if (IS_ERR(c))
-			return PTR_ERR(c);
-		args->nfs_server.export_path = c;
-		dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c);
-
-		c = strndup_user(data->client_addr.data, 16);
-		if (IS_ERR(c))
-			return PTR_ERR(c);
-		args->client_address = c;
-
-		/*
-		 * Translate to nfs_parsed_mount_data, which nfs4_fill_super
-		 * can deal with.
-		 */
-
-		args->flags	= data->flags & NFS4_MOUNT_FLAGMASK;
-		args->rsize	= data->rsize;
-		args->wsize	= data->wsize;
-		args->timeo	= data->timeo;
-		args->retrans	= data->retrans;
-		args->acregmin	= data->acregmin;
-		args->acregmax	= data->acregmax;
-		args->acdirmin	= data->acdirmin;
-		args->acdirmax	= data->acdirmax;
-		args->nfs_server.protocol = data->proto;
-		nfs_validate_transport_protocol(args);
-		if (args->nfs_server.protocol == XPRT_TRANSPORT_UDP)
-			goto out_invalid_transport_udp;
-
-		break;
-	default:
-		return NFS_TEXT_DATA;
-	}
-
-	return 0;
-
-out_no_data:
-	dfprintk(MOUNT, "NFS4: mount program didn't pass any mount data\n");
-	return -EINVAL;
-
-out_inval_auth:
-	dfprintk(MOUNT, "NFS4: Invalid number of RPC auth flavours %d\n",
-		 data->auth_flavourlen);
-	return -EINVAL;
-
-out_no_address:
-	dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
-	return -EINVAL;
-
-out_invalid_transport_udp:
-	dfprintk(MOUNT, "NFSv4: Unsupported transport protocol udp\n");
-	return -EINVAL;
-}
-
 /*
  * NFS v4 module parameters need to stay in the
  * NFS client for backwards compatibility
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 52cab65..c478b77 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -243,13 +243,24 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
 /* A writeback failed: mark the page as bad, and invalidate the page cache */
 static void nfs_set_pageerror(struct address_space *mapping)
 {
+	struct inode *inode = mapping->host;
+
 	nfs_zap_mapping(mapping->host, mapping);
+	/* Force file size revalidation */
+	spin_lock(&inode->i_lock);
+	NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
+					NFS_INO_REVAL_PAGECACHE |
+					NFS_INO_INVALID_SIZE;
+	spin_unlock(&inode->i_lock);
 }
 
 static void nfs_mapping_set_error(struct page *page, int error)
 {
+	struct address_space *mapping = page_file_mapping(page);
+
 	SetPageError(page);
-	mapping_set_error(page_file_mapping(page), error);
+	mapping_set_error(mapping, error);
+	nfs_set_pageerror(mapping);
 }
 
 /*
@@ -592,7 +603,7 @@ nfs_lock_and_join_requests(struct page *page)
 
 static void nfs_write_error(struct nfs_page *req, int error)
 {
-	nfs_set_pageerror(page_file_mapping(req->wb_page));
+	trace_nfs_write_error(req, error);
 	nfs_mapping_set_error(req->wb_page, error);
 	nfs_inode_remove_request(req);
 	nfs_end_page_writeback(req);
@@ -998,7 +1009,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
 		nfs_list_remove_request(req);
 		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
 		    (hdr->good_bytes < bytes)) {
-			nfs_set_pageerror(page_file_mapping(req->wb_page));
+			trace_nfs_comp_error(req, hdr->error);
 			nfs_mapping_set_error(req->wb_page, hdr->error);
 			goto remove_req;
 		}
@@ -1403,8 +1414,7 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
 
 	task_setup_data->priority = priority;
 	rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
-	trace_nfs_initiate_write(hdr->inode, hdr->io_start, hdr->good_bytes,
-				 hdr->args.stable);
+	trace_nfs_initiate_write(hdr);
 }
 
 /* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -1568,8 +1578,7 @@ static int nfs_writeback_done(struct rpc_task *task,
 		return status;
 
 	nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
-	trace_nfs_writeback_done(inode, task->tk_status,
-				 hdr->args.offset, hdr->res.verf);
+	trace_nfs_writeback_done(task, hdr);
 
 	if (hdr->res.verf->committed < hdr->args.stable &&
 	    task->tk_status >= 0) {
@@ -1649,6 +1658,8 @@ static void nfs_writeback_result(struct rpc_task *task,
 			 */
 			argp->stable = NFS_FILE_SYNC;
 		}
+		resp->count = 0;
+		resp->verf->committed = 0;
 		rpc_restart_call_prepare(task);
 	}
 }
@@ -1824,11 +1835,12 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
 
 	/* Call the NFS version-specific code */
 	NFS_PROTO(data->inode)->commit_done(task, data);
-	trace_nfs_commit_done(data);
+	trace_nfs_commit_done(task, data);
 }
 
 static void nfs_commit_release_pages(struct nfs_commit_data *data)
 {
+	const struct nfs_writeverf *verf = data->res.verf;
 	struct nfs_page	*req;
 	int status = data->task.tk_status;
 	struct nfs_commit_info cinfo;
@@ -1847,6 +1859,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
 			(long long)req_offset(req));
 		if (status < 0) {
 			if (req->wb_page) {
+				trace_nfs_commit_error(req, status);
 				nfs_mapping_set_error(req->wb_page, status);
 				nfs_inode_remove_request(req);
 			}
@@ -1856,7 +1869,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
 
 		/* Okay, COMMIT succeeded, apparently. Check the verifier
 		 * returned by the server against all stored verfs. */
-		if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
+		if (verf->committed > NFS_UNSTABLE &&
+		    !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
 			/* We have a match */
 			if (req->wb_page)
 				nfs_inode_remove_request(req);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index f2f8156..f368f32 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -134,6 +134,16 @@
 
 	  If unsure, say N.
 
+config NFSD_V4_2_INTER_SSC
+	bool "NFSv4.2 inter server to server COPY"
+	depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2
+	help
+	  This option enables support for NFSv4.2 inter server to
+	  server copy where the destination server calls the NFSv4.2
+	  client to read the data to copy from the source server.
+
+	  If unsure, say N.
+
 config NFSD_V4_SECURITY_LABEL
 	bool "Provide Security Label support for NFSv4 server"
 	depends on NFSD_V4 && SECURITY
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 32a9bf2..22e77ed 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -27,7 +27,6 @@
 #define NFSD_FILE_HASH_SIZE                  (1 << NFSD_FILE_HASH_BITS)
 #define NFSD_LAUNDRETTE_DELAY		     (2 * HZ)
 
-#define NFSD_FILE_LRU_RESCAN		     (0)
 #define NFSD_FILE_SHUTDOWN		     (1)
 #define NFSD_FILE_LRU_THRESHOLD		     (4096UL)
 #define NFSD_FILE_LRU_LIMIT		     (NFSD_FILE_LRU_THRESHOLD << 2)
@@ -44,6 +43,17 @@ struct nfsd_fcache_bucket {
 
 static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
 
+struct nfsd_fcache_disposal {
+	struct list_head list;
+	struct work_struct work;
+	struct net *net;
+	spinlock_t lock;
+	struct list_head freeme;
+	struct rcu_head rcu;
+};
+
+static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
+
 static struct kmem_cache		*nfsd_file_slab;
 static struct kmem_cache		*nfsd_file_mark_slab;
 static struct nfsd_fcache_bucket	*nfsd_file_hashtbl;
@@ -52,32 +62,21 @@ static long				nfsd_file_lru_flags;
 static struct fsnotify_group		*nfsd_file_fsnotify_group;
 static atomic_long_t			nfsd_filecache_count;
 static struct delayed_work		nfsd_filecache_laundrette;
+static DEFINE_SPINLOCK(laundrette_lock);
+static LIST_HEAD(laundrettes);
 
-enum nfsd_file_laundrette_ctl {
-	NFSD_FILE_LAUNDRETTE_NOFLUSH = 0,
-	NFSD_FILE_LAUNDRETTE_MAY_FLUSH
-};
+static void nfsd_file_gc(void);
 
 static void
-nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl)
+nfsd_file_schedule_laundrette(void)
 {
 	long count = atomic_long_read(&nfsd_filecache_count);
 
 	if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
 		return;
 
-	/* Be more aggressive about scanning if over the threshold */
-	if (count > NFSD_FILE_LRU_THRESHOLD)
-		mod_delayed_work(system_wq, &nfsd_filecache_laundrette, 0);
-	else
-		schedule_delayed_work(&nfsd_filecache_laundrette, NFSD_LAUNDRETTE_DELAY);
-
-	if (ctl == NFSD_FILE_LAUNDRETTE_NOFLUSH)
-		return;
-
-	/* ...and don't delay flushing if we're out of control */
-	if (count >= NFSD_FILE_LRU_LIMIT)
-		flush_delayed_work(&nfsd_filecache_laundrette);
+	queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
+			NFSD_LAUNDRETTE_DELAY);
 }
 
 static void
@@ -101,7 +100,7 @@ nfsd_file_mark_free(struct fsnotify_mark *mark)
 static struct nfsd_file_mark *
 nfsd_file_mark_get(struct nfsd_file_mark *nfm)
 {
-	if (!atomic_inc_not_zero(&nfm->nfm_ref))
+	if (!refcount_inc_not_zero(&nfm->nfm_ref))
 		return NULL;
 	return nfm;
 }
@@ -109,8 +108,7 @@ nfsd_file_mark_get(struct nfsd_file_mark *nfm)
 static void
 nfsd_file_mark_put(struct nfsd_file_mark *nfm)
 {
-	if (atomic_dec_and_test(&nfm->nfm_ref)) {
-
+	if (refcount_dec_and_test(&nfm->nfm_ref)) {
 		fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
 		fsnotify_put_mark(&nfm->nfm_mark);
 	}
@@ -133,9 +131,13 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf)
 						 struct nfsd_file_mark,
 						 nfm_mark));
 			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
-			fsnotify_put_mark(mark);
-			if (likely(nfm))
+			if (nfm) {
+				fsnotify_put_mark(mark);
 				break;
+			}
+			/* Avoid soft lockup race with nfsd_file_mark_put() */
+			fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
+			fsnotify_put_mark(mark);
 		} else
 			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
 
@@ -145,7 +147,7 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf)
 			return NULL;
 		fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
 		new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
-		atomic_set(&new->nfm_ref, 1);
+		refcount_set(&new->nfm_ref, 1);
 
 		err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
 
@@ -183,7 +185,7 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
 		nf->nf_flags = 0;
 		nf->nf_inode = inode;
 		nf->nf_hashval = hashval;
-		atomic_set(&nf->nf_ref, 1);
+		refcount_set(&nf->nf_ref, 1);
 		nf->nf_may = may & NFSD_FILE_MAY_MASK;
 		if (may & NFSD_MAY_NOT_BREAK_LEASE) {
 			if (may & NFSD_MAY_WRITE)
@@ -192,6 +194,7 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
 				__set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
 		}
 		nf->nf_mark = NULL;
+		init_rwsem(&nf->nf_rwsem);
 		trace_nfsd_file_alloc(nf);
 	}
 	return nf;
@@ -238,13 +241,6 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
 	return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
 }
 
-static bool
-nfsd_file_in_use(struct nfsd_file *nf)
-{
-	return nfsd_file_check_writeback(nf) ||
-			nfsd_file_check_write_error(nf);
-}
-
 static void
 nfsd_file_do_unhash(struct nfsd_file *nf)
 {
@@ -256,8 +252,6 @@ nfsd_file_do_unhash(struct nfsd_file *nf)
 		nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
 	--nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
 	hlist_del_rcu(&nf->nf_node);
-	if (!list_empty(&nf->nf_lru))
-		list_lru_del(&nfsd_file_lru, &nf->nf_lru);
 	atomic_long_dec(&nfsd_filecache_count);
 }
 
@@ -266,6 +260,8 @@ nfsd_file_unhash(struct nfsd_file *nf)
 {
 	if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
 		nfsd_file_do_unhash(nf);
+		if (!list_empty(&nf->nf_lru))
+			list_lru_del(&nfsd_file_lru, &nf->nf_lru);
 		return true;
 	}
 	return false;
@@ -283,42 +279,48 @@ nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *disp
 	if (!nfsd_file_unhash(nf))
 		return false;
 	/* keep final reference for nfsd_file_lru_dispose */
-	if (atomic_add_unless(&nf->nf_ref, -1, 1))
+	if (refcount_dec_not_one(&nf->nf_ref))
 		return true;
 
 	list_add(&nf->nf_lru, dispose);
 	return true;
 }
 
-static int
+static void
 nfsd_file_put_noref(struct nfsd_file *nf)
 {
-	int count;
 	trace_nfsd_file_put(nf);
 
-	count = atomic_dec_return(&nf->nf_ref);
-	if (!count) {
+	if (refcount_dec_and_test(&nf->nf_ref)) {
 		WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
 		nfsd_file_free(nf);
 	}
-	return count;
 }
 
 void
 nfsd_file_put(struct nfsd_file *nf)
 {
-	bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
-	bool unused = !nfsd_file_in_use(nf);
+	bool is_hashed;
 
 	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
-	if (nfsd_file_put_noref(nf) == 1 && is_hashed && unused)
-		nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH);
+	if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
+		nfsd_file_put_noref(nf);
+		return;
+	}
+
+	filemap_flush(nf->nf_file->f_mapping);
+	is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
+	nfsd_file_put_noref(nf);
+	if (is_hashed)
+		nfsd_file_schedule_laundrette();
+	if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
+		nfsd_file_gc();
 }
 
 struct nfsd_file *
 nfsd_file_get(struct nfsd_file *nf)
 {
-	if (likely(atomic_inc_not_zero(&nf->nf_ref)))
+	if (likely(refcount_inc_not_zero(&nf->nf_ref)))
 		return nf;
 	return NULL;
 }
@@ -344,7 +346,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
 	while(!list_empty(dispose)) {
 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
 		list_del(&nf->nf_lru);
-		if (!atomic_dec_and_test(&nf->nf_ref))
+		if (!refcount_dec_and_test(&nf->nf_ref))
 			continue;
 		if (nfsd_file_free(nf))
 			flush = true;
@@ -353,6 +355,58 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
 		flush_delayed_fput();
 }
 
+static void
+nfsd_file_list_remove_disposal(struct list_head *dst,
+		struct nfsd_fcache_disposal *l)
+{
+	spin_lock(&l->lock);
+	list_splice_init(&l->freeme, dst);
+	spin_unlock(&l->lock);
+}
+
+static void
+nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
+{
+	struct nfsd_fcache_disposal *l;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(l, &laundrettes, list) {
+		if (l->net == net) {
+			spin_lock(&l->lock);
+			list_splice_tail_init(files, &l->freeme);
+			spin_unlock(&l->lock);
+			queue_work(nfsd_filecache_wq, &l->work);
+			break;
+		}
+	}
+	rcu_read_unlock();
+}
+
+static void
+nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
+		struct net *net)
+{
+	struct nfsd_file *nf, *tmp;
+
+	list_for_each_entry_safe(nf, tmp, src, nf_lru) {
+		if (nf->nf_net == net)
+			list_move_tail(&nf->nf_lru, dst);
+	}
+}
+
+static void
+nfsd_file_dispose_list_delayed(struct list_head *dispose)
+{
+	LIST_HEAD(list);
+	struct nfsd_file *nf;
+
+	while(!list_empty(dispose)) {
+		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+		nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
+		nfsd_file_list_add_disposal(&list, nf->nf_net);
+	}
+}
+
 /*
  * Note this can deadlock with nfsd_file_cache_purge.
  */
@@ -375,7 +429,7 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
 	 * counter. Here we check the counter and then test and clear the flag.
 	 * That order is deliberate to ensure that we can do this locklessly.
 	 */
-	if (atomic_read(&nf->nf_ref) > 1)
+	if (refcount_read(&nf->nf_ref) > 1)
 		goto out_skip;
 
 	/*
@@ -386,31 +440,51 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
 		goto out_skip;
 
 	if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
-		goto out_rescan;
+		goto out_skip;
 
 	if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
 		goto out_skip;
 
 	list_lru_isolate_move(lru, &nf->nf_lru, head);
 	return LRU_REMOVED;
-out_rescan:
-	set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags);
 out_skip:
 	return LRU_SKIP;
 }
 
-static void
-nfsd_file_lru_dispose(struct list_head *head)
+static unsigned long
+nfsd_file_lru_walk_list(struct shrink_control *sc)
 {
-	while(!list_empty(head)) {
-		struct nfsd_file *nf = list_first_entry(head,
-				struct nfsd_file, nf_lru);
-		list_del_init(&nf->nf_lru);
+	LIST_HEAD(head);
+	struct nfsd_file *nf;
+	unsigned long ret;
+
+	if (sc)
+		ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
+				nfsd_file_lru_cb, &head);
+	else
+		ret = list_lru_walk(&nfsd_file_lru,
+				nfsd_file_lru_cb,
+				&head, LONG_MAX);
+	list_for_each_entry(nf, &head, nf_lru) {
 		spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
 		nfsd_file_do_unhash(nf);
 		spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
-		nfsd_file_put_noref(nf);
 	}
+	nfsd_file_dispose_list_delayed(&head);
+	return ret;
+}
+
+static void
+nfsd_file_gc(void)
+{
+	nfsd_file_lru_walk_list(NULL);
+}
+
+static void
+nfsd_file_gc_worker(struct work_struct *work)
+{
+	nfsd_file_gc();
+	nfsd_file_schedule_laundrette();
 }
 
 static unsigned long
@@ -422,12 +496,7 @@ nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
 static unsigned long
 nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
 {
-	LIST_HEAD(head);
-	unsigned long ret;
-
-	ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head);
-	nfsd_file_lru_dispose(&head);
-	return ret;
+	return nfsd_file_lru_walk_list(sc);
 }
 
 static struct shrinker	nfsd_file_shrinker = {
@@ -489,7 +558,7 @@ nfsd_file_close_inode(struct inode *inode)
 
 	__nfsd_file_close_inode(inode, hashval, &dispose);
 	trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
-	nfsd_file_dispose_list(&dispose);
+	nfsd_file_dispose_list_delayed(&dispose);
 }
 
 /**
@@ -505,16 +574,11 @@ static void
 nfsd_file_delayed_close(struct work_struct *work)
 {
 	LIST_HEAD(head);
+	struct nfsd_fcache_disposal *l = container_of(work,
+			struct nfsd_fcache_disposal, work);
 
-	list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX);
-
-	if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags))
-		nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH);
-
-	if (!list_empty(&head)) {
-		nfsd_file_lru_dispose(&head);
-		flush_delayed_fput();
-	}
+	nfsd_file_list_remove_disposal(&head, l);
+	nfsd_file_dispose_list(&head);
 }
 
 static int
@@ -575,6 +639,10 @@ nfsd_file_cache_init(void)
 	if (nfsd_file_hashtbl)
 		return 0;
 
+	nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
+	if (!nfsd_filecache_wq)
+		goto out;
+
 	nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
 				sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
 	if (!nfsd_file_hashtbl) {
@@ -628,7 +696,7 @@ nfsd_file_cache_init(void)
 		spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
 	}
 
-	INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_delayed_close);
+	INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
 out:
 	return ret;
 out_notifier:
@@ -644,6 +712,8 @@ nfsd_file_cache_init(void)
 	nfsd_file_mark_slab = NULL;
 	kfree(nfsd_file_hashtbl);
 	nfsd_file_hashtbl = NULL;
+	destroy_workqueue(nfsd_filecache_wq);
+	nfsd_filecache_wq = NULL;
 	goto out;
 }
 
@@ -682,6 +752,88 @@ nfsd_file_cache_purge(struct net *net)
 	}
 }
 
+static struct nfsd_fcache_disposal *
+nfsd_alloc_fcache_disposal(struct net *net)
+{
+	struct nfsd_fcache_disposal *l;
+
+	l = kmalloc(sizeof(*l), GFP_KERNEL);
+	if (!l)
+		return NULL;
+	INIT_WORK(&l->work, nfsd_file_delayed_close);
+	l->net = net;
+	spin_lock_init(&l->lock);
+	INIT_LIST_HEAD(&l->freeme);
+	return l;
+}
+
+static void
+nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
+{
+	rcu_assign_pointer(l->net, NULL);
+	cancel_work_sync(&l->work);
+	nfsd_file_dispose_list(&l->freeme);
+	kfree_rcu(l, rcu);
+}
+
+static void
+nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l)
+{
+	spin_lock(&laundrette_lock);
+	list_add_tail_rcu(&l->list, &laundrettes);
+	spin_unlock(&laundrette_lock);
+}
+
+static void
+nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l)
+{
+	spin_lock(&laundrette_lock);
+	list_del_rcu(&l->list);
+	spin_unlock(&laundrette_lock);
+}
+
+static int
+nfsd_alloc_fcache_disposal_net(struct net *net)
+{
+	struct nfsd_fcache_disposal *l;
+
+	l = nfsd_alloc_fcache_disposal(net);
+	if (!l)
+		return -ENOMEM;
+	nfsd_add_fcache_disposal(l);
+	return 0;
+}
+
+static void
+nfsd_free_fcache_disposal_net(struct net *net)
+{
+	struct nfsd_fcache_disposal *l;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(l, &laundrettes, list) {
+		if (l->net != net)
+			continue;
+		nfsd_del_fcache_disposal(l);
+		rcu_read_unlock();
+		nfsd_free_fcache_disposal(l);
+		return;
+	}
+	rcu_read_unlock();
+}
+
+int
+nfsd_file_cache_start_net(struct net *net)
+{
+	return nfsd_alloc_fcache_disposal_net(net);
+}
+
+void
+nfsd_file_cache_shutdown_net(struct net *net)
+{
+	nfsd_file_cache_purge(net);
+	nfsd_free_fcache_disposal_net(net);
+}
+
 void
 nfsd_file_cache_shutdown(void)
 {
@@ -706,6 +858,8 @@ nfsd_file_cache_shutdown(void)
 	nfsd_file_mark_slab = NULL;
 	kfree(nfsd_file_hashtbl);
 	nfsd_file_hashtbl = NULL;
+	destroy_workqueue(nfsd_filecache_wq);
+	nfsd_filecache_wq = NULL;
 }
 
 static bool
@@ -789,6 +943,7 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
 	struct nfsd_file *nf, *new;
 	struct inode *inode;
 	unsigned int hashval;
+	bool retry = true;
 
 	/* FIXME: skip this if fh_dentry is already set? */
 	status = fh_verify(rqstp, fhp, S_IFREG,
@@ -824,6 +979,11 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
 
 	/* Did construction of this file fail? */
 	if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+		if (!retry) {
+			status = nfserr_jukebox;
+			goto out;
+		}
+		retry = false;
 		nfsd_file_put_noref(nf);
 		goto retry;
 	}
@@ -858,7 +1018,7 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
 open_file:
 	nf = new;
 	/* Take reference for the hashtable */
-	atomic_inc(&nf->nf_ref);
+	refcount_inc(&nf->nf_ref);
 	__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
 	__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
 	list_lru_add(&nfsd_file_lru, &nf->nf_lru);
@@ -867,7 +1027,8 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
 	nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
 			nfsd_file_hashtbl[hashval].nfb_count);
 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
-	atomic_long_inc(&nfsd_filecache_count);
+	if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
+		nfsd_file_gc();
 
 	nf->nf_mark = nfsd_file_mark_find_or_create(nf);
 	if (nf->nf_mark)
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index 851d9ab..7872df5 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -19,7 +19,7 @@
  */
 struct nfsd_file_mark {
 	struct fsnotify_mark	nfm_mark;
-	atomic_t		nfm_ref;
+	refcount_t		nfm_ref;
 };
 
 /*
@@ -43,14 +43,17 @@ struct nfsd_file {
 	unsigned long		nf_flags;
 	struct inode		*nf_inode;
 	unsigned int		nf_hashval;
-	atomic_t		nf_ref;
+	refcount_t		nf_ref;
 	unsigned char		nf_may;
 	struct nfsd_file_mark	*nf_mark;
+	struct rw_semaphore	nf_rwsem;
 };
 
 int nfsd_file_cache_init(void);
 void nfsd_file_cache_purge(struct net *);
 void nfsd_file_cache_shutdown(void);
+int nfsd_file_cache_start_net(struct net *net);
+void nfsd_file_cache_shutdown_net(struct net *net);
 void nfsd_file_put(struct nfsd_file *nf);
 struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
 void nfsd_file_close_inode_sync(struct inode *inode);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 9a4ef81..2baf323 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -40,7 +40,7 @@ struct nfsd_net {
 
 	struct lock_manager nfsd4_manager;
 	bool grace_ended;
-	time_t boot_time;
+	time64_t boot_time;
 
 	/* internal mount of the "nfsd" pseudofilesystem: */
 	struct vfsmount *nfsd_mnt;
@@ -92,8 +92,8 @@ struct nfsd_net {
 	bool in_grace;
 	const struct nfsd4_client_tracking_ops *client_tracking_ops;
 
-	time_t nfsd4_lease;
-	time_t nfsd4_grace;
+	time64_t nfsd4_lease;
+	time64_t nfsd4_grace;
 	bool somebody_reclaimed;
 
 	bool track_reclaim_completes;
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index cea68d8..288bc76 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -203,7 +203,7 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
 		RETURN_STATUS(nfserr_io);
 	nfserr = nfsd_write(rqstp, &resp->fh, argp->offset,
 			    rqstp->rq_vec, nvecs, &cnt,
-			    resp->committed);
+			    resp->committed, resp->verf);
 	resp->count = cnt;
 	RETURN_STATUS(nfserr);
 }
@@ -683,7 +683,8 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
 		RETURN_STATUS(nfserr_inval);
 
 	fh_copy(&resp->fh, &argp->fh);
-	nfserr = nfsd_commit(rqstp, &resp->fh, argp->offset, argp->count);
+	nfserr = nfsd_commit(rqstp, &resp->fh, argp->offset, argp->count,
+			resp->verf);
 
 	RETURN_STATUS(nfserr);
 }
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 195ab7a..aae514d4 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -32,14 +32,14 @@ static u32	nfs3_ftypes[] = {
  * XDR functions for basic NFS types
  */
 static __be32 *
-encode_time3(__be32 *p, struct timespec *time)
+encode_time3(__be32 *p, struct timespec64 *time)
 {
 	*p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec);
 	return p;
 }
 
 static __be32 *
-decode_time3(__be32 *p, struct timespec *time)
+decode_time3(__be32 *p, struct timespec64 *time)
 {
 	time->tv_sec = ntohl(*p++);
 	time->tv_nsec = ntohl(*p++);
@@ -167,7 +167,6 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
 	      struct kstat *stat)
 {
 	struct user_namespace *userns = nfsd_user_namespace(rqstp);
-	struct timespec ts;
 	*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
 	*p++ = htonl((u32) (stat->mode & S_IALLUGO));
 	*p++ = htonl((u32) stat->nlink);
@@ -183,12 +182,9 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
 	*p++ = htonl((u32) MINOR(stat->rdev));
 	p = encode_fsid(p, fhp);
 	p = xdr_encode_hyper(p, stat->ino);
-	ts = timespec64_to_timespec(stat->atime);
-	p = encode_time3(p, &ts);
-	ts = timespec64_to_timespec(stat->mtime);
-	p = encode_time3(p, &ts);
-	ts = timespec64_to_timespec(stat->ctime);
-	p = encode_time3(p, &ts);
+	p = encode_time3(p, &stat->atime);
+	p = encode_time3(p, &stat->mtime);
+	p = encode_time3(p, &stat->ctime);
 
 	return p;
 }
@@ -277,8 +273,8 @@ void fill_pre_wcc(struct svc_fh *fhp)
 		stat.size  = inode->i_size;
 	}
 
-	fhp->fh_pre_mtime = timespec64_to_timespec(stat.mtime);
-	fhp->fh_pre_ctime = timespec64_to_timespec(stat.ctime);
+	fhp->fh_pre_mtime = stat.mtime;
+	fhp->fh_pre_ctime = stat.ctime;
 	fhp->fh_pre_size  = stat.size;
 	fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
 	fhp->fh_pre_saved = true;
@@ -330,7 +326,7 @@ nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p)
 	p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
 
 	if ((args->check_guard = ntohl(*p++)) != 0) { 
-		struct timespec time; 
+		struct timespec64 time;
 		p = decode_time3(p, &time);
 		args->guardtime = time.tv_sec;
 	}
@@ -751,17 +747,13 @@ int
 nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p)
 {
 	struct nfsd3_writeres *resp = rqstp->rq_resp;
-	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-	__be32 verf[2];
 
 	p = encode_wcc_data(rqstp, p, &resp->fh);
 	if (resp->status == 0) {
 		*p++ = htonl(resp->count);
 		*p++ = htonl(resp->committed);
-		/* unique identifier, y2038 overflow can be ignored */
-		nfsd_copy_boot_verifier(verf, nn);
-		*p++ = verf[0];
-		*p++ = verf[1];
+		*p++ = resp->verf[0];
+		*p++ = resp->verf[1];
 	}
 	return xdr_ressize_check(rqstp, p);
 }
@@ -1125,16 +1117,12 @@ int
 nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p)
 {
 	struct nfsd3_commitres *resp = rqstp->rq_resp;
-	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-	__be32 verf[2];
 
 	p = encode_wcc_data(rqstp, p, &resp->fh);
 	/* Write verifier */
 	if (resp->status == 0) {
-		/* unique identifier, y2038 overflow can be ignored */
-		nfsd_copy_boot_verifier(verf, nn);
-		*p++ = verf[0];
-		*p++ = verf[1];
+		*p++ = resp->verf[0];
+		*p++ = resp->verf[1];
 	}
 	return xdr_ressize_check(rqstp, p);
 }
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 24534db..c3b11a7 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -823,7 +823,16 @@ static const struct rpc_program cb_program = {
 static int max_cb_time(struct net *net)
 {
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-	return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
+
+	/*
+	 * nfsd4_lease is set to at most one hour in __nfsd4_write_time,
+	 * so we can use 32-bit math on it. Warn if that assumption
+	 * ever stops being true.
+	 */
+	if (WARN_ON_ONCE(nn->nfsd4_lease > 3600))
+		return 360 * HZ;
+
+	return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ;
 }
 
 static struct workqueue_struct *callback_wq;
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 2681c70..e12409e 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -675,7 +675,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
 
 		/* Client gets 2 lease periods to return it */
 		cutoff = ktime_add_ns(task->tk_start,
-					 nn->nfsd4_lease * NSEC_PER_SEC * 2);
+					 (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
 
 		if (ktime_before(now, cutoff)) {
 			rpc_delay(task, HZ/100); /* 10 mili-seconds */
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 4798667..0e75f7f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -37,6 +37,7 @@
 #include <linux/falloc.h>
 #include <linux/slab.h>
 #include <linux/kthread.h>
+#include <linux/sunrpc/addr.h>
 
 #include "idmap.h"
 #include "cache.h"
@@ -232,7 +233,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
 	if (!*resfh)
 		return nfserr_jukebox;
 	fh_init(*resfh, NFS4_FHSIZE);
-	open->op_truncate = 0;
+	open->op_truncate = false;
 
 	if (open->op_create) {
 		/* FIXME: check session persistence and pnfs flags.
@@ -365,7 +366,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
 		return nfserr_inval;
 
-	open->op_created = 0;
+	open->op_created = false;
 	/*
 	 * RFC5661 18.51.3
 	 * Before RECLAIM_COMPLETE done, server should deny new lock
@@ -503,12 +504,20 @@ nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	    union nfsd4_op_u *u)
 {
 	struct nfsd4_putfh *putfh = &u->putfh;
+	__be32 ret;
 
 	fh_put(&cstate->current_fh);
 	cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
 	memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
 	       putfh->pf_fhlen);
-	return fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
+	ret = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+	if (ret == nfserr_stale && putfh->no_verify) {
+		SET_FH_FLAG(&cstate->current_fh, NFSD4_FH_FOREIGN);
+		ret = 0;
+	}
+#endif
+	return ret;
 }
 
 static __be32
@@ -530,9 +539,9 @@ nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		return nfserr_restorefh;
 
 	fh_dup2(&cstate->current_fh, &cstate->save_fh);
-	if (HAS_STATE_ID(cstate, SAVED_STATE_ID_FLAG)) {
+	if (HAS_CSTATE_FLAG(cstate, SAVED_STATE_ID_FLAG)) {
 		memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t));
-		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+		SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
 	}
 	return nfs_ok;
 }
@@ -542,9 +551,9 @@ nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	     union nfsd4_op_u *u)
 {
 	fh_dup2(&cstate->save_fh, &cstate->current_fh);
-	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) {
+	if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG)) {
 		memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t));
-		SET_STATE_ID(cstate, SAVED_STATE_ID_FLAG);
+		SET_CSTATE_FLAG(cstate, SAVED_STATE_ID_FLAG);
 	}
 	return nfs_ok;
 }
@@ -581,9 +590,9 @@ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 {
 	struct nfsd4_commit *commit = &u->commit;
 
-	gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp));
 	return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
-			     commit->co_count);
+			     commit->co_count,
+			     (__be32 *)commit->co_verf.data);
 }
 
 static __be32
@@ -776,7 +785,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	/* check stateid */
 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
 					&read->rd_stateid, RD_STATE,
-					&read->rd_nf);
+					&read->rd_nf, NULL);
 	if (status) {
 		dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
 		goto out;
@@ -948,7 +957,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
 		status = nfs4_preprocess_stateid_op(rqstp, cstate,
 				&cstate->current_fh, &setattr->sa_stateid,
-				WR_STATE, NULL);
+				WR_STATE, NULL, NULL);
 		if (status) {
 			dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
 			return status;
@@ -975,7 +984,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	if (status)
 		goto out;
 	status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
-				0, (time_t)0);
+				0, (time64_t)0);
 out:
 	fh_drop_write(&cstate->current_fh);
 	return status;
@@ -999,22 +1008,22 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	trace_nfsd_write_start(rqstp, &cstate->current_fh,
 			       write->wr_offset, cnt);
 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
-						stateid, WR_STATE, &nf);
+						stateid, WR_STATE, &nf, NULL);
 	if (status) {
 		dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
 		return status;
 	}
 
 	write->wr_how_written = write->wr_stable_how;
-	gen_boot_verifier(&write->wr_verifier, SVC_NET(rqstp));
 
 	nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
 				      &write->wr_head, write->wr_buflen);
 	WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
 
-	status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf->nf_file,
+	status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf,
 				write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
-				write->wr_how_written);
+				write->wr_how_written,
+				(__be32 *)write->wr_verifier.data);
 	nfsd_file_put(nf);
 
 	write->wr_bytes_written = cnt;
@@ -1034,14 +1043,14 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		return nfserr_nofilehandle;
 
 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
-					    src_stateid, RD_STATE, src);
+					    src_stateid, RD_STATE, src, NULL);
 	if (status) {
 		dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
 		goto out;
 	}
 
 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
-					    dst_stateid, WR_STATE, dst);
+					    dst_stateid, WR_STATE, dst, NULL);
 	if (status) {
 		dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
 		goto out_put_src;
@@ -1076,8 +1085,8 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	if (status)
 		goto out;
 
-	status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos,
-			dst->nf_file, clone->cl_dst_pos, clone->cl_count,
+	status = nfsd4_clone_file_range(src, clone->cl_src_pos,
+			dst, clone->cl_dst_pos, clone->cl_count,
 			EX_ISSYNC(cstate->current_fh.fh_export));
 
 	nfsd_file_put(dst);
@@ -1135,6 +1144,207 @@ void nfsd4_shutdown_copy(struct nfs4_client *clp)
 	while ((copy = nfsd4_get_copy(clp)) != NULL)
 		nfsd4_stop_copy(copy);
 }
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+
+extern struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+				   struct nfs_fh *src_fh,
+				   nfs4_stateid *stateid);
+extern void nfs42_ssc_close(struct file *filep);
+
+extern void nfs_sb_deactive(struct super_block *sb);
+
+#define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys"
+
+/**
+ * Support one copy source server for now.
+ */
+static __be32
+nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+		       struct vfsmount **mount)
+{
+	struct file_system_type *type;
+	struct vfsmount *ss_mnt;
+	struct nfs42_netaddr *naddr;
+	struct sockaddr_storage tmp_addr;
+	size_t tmp_addrlen, match_netid_len = 3;
+	char *startsep = "", *endsep = "", *match_netid = "tcp";
+	char *ipaddr, *dev_name, *raw_data;
+	int len, raw_len;
+	__be32 status = nfserr_inval;
+
+	naddr = &nss->u.nl4_addr;
+	tmp_addrlen = rpc_uaddr2sockaddr(SVC_NET(rqstp), naddr->addr,
+					 naddr->addr_len,
+					 (struct sockaddr *)&tmp_addr,
+					 sizeof(tmp_addr));
+	if (tmp_addrlen == 0)
+		goto out_err;
+
+	if (tmp_addr.ss_family == AF_INET6) {
+		startsep = "[";
+		endsep = "]";
+		match_netid = "tcp6";
+		match_netid_len = 4;
+	}
+
+	if (naddr->netid_len != match_netid_len ||
+		strncmp(naddr->netid, match_netid, naddr->netid_len))
+		goto out_err;
+
+	/* Construct the raw data for the vfs_kern_mount call */
+	len = RPC_MAX_ADDRBUFLEN + 1;
+	ipaddr = kzalloc(len, GFP_KERNEL);
+	if (!ipaddr)
+		goto out_err;
+
+	rpc_ntop((struct sockaddr *)&tmp_addr, ipaddr, len);
+
+	/* 2 for ipv6 endsep and startsep. 3 for ":/" and trailing '/0'*/
+
+	raw_len = strlen(NFSD42_INTERSSC_MOUNTOPS) + strlen(ipaddr);
+	raw_data = kzalloc(raw_len, GFP_KERNEL);
+	if (!raw_data)
+		goto out_free_ipaddr;
+
+	snprintf(raw_data, raw_len, NFSD42_INTERSSC_MOUNTOPS, ipaddr);
+
+	status = nfserr_nodev;
+	type = get_fs_type("nfs");
+	if (!type)
+		goto out_free_rawdata;
+
+	/* Set the server:<export> for the vfs_kern_mount call */
+	dev_name = kzalloc(len + 5, GFP_KERNEL);
+	if (!dev_name)
+		goto out_free_rawdata;
+	snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
+
+	/* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */
+	ss_mnt = vfs_kern_mount(type, SB_KERNMOUNT, dev_name, raw_data);
+	module_put(type->owner);
+	if (IS_ERR(ss_mnt))
+		goto out_free_devname;
+
+	status = 0;
+	*mount = ss_mnt;
+
+out_free_devname:
+	kfree(dev_name);
+out_free_rawdata:
+	kfree(raw_data);
+out_free_ipaddr:
+	kfree(ipaddr);
+out_err:
+	return status;
+}
+
+static void
+nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+{
+	nfs_sb_deactive(ss_mnt->mnt_sb);
+	mntput(ss_mnt);
+}
+
+/**
+ * nfsd4_setup_inter_ssc
+ *
+ * Verify COPY destination stateid.
+ * Connect to the source server with NFSv4.1.
+ * Create the source struct file for nfsd_copy_range.
+ * Called with COPY cstate:
+ *    SAVED_FH: source filehandle
+ *    CURRENT_FH: destination filehandle
+ */
+static __be32
+nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+		      struct nfsd4_compound_state *cstate,
+		      struct nfsd4_copy *copy, struct vfsmount **mount)
+{
+	struct svc_fh *s_fh = NULL;
+	stateid_t *s_stid = &copy->cp_src_stateid;
+	__be32 status = nfserr_inval;
+
+	/* Verify the destination stateid and set dst struct file*/
+	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+					    &copy->cp_dst_stateid,
+					    WR_STATE, &copy->nf_dst, NULL);
+	if (status)
+		goto out;
+
+	status = nfsd4_interssc_connect(&copy->cp_src, rqstp, mount);
+	if (status)
+		goto out;
+
+	s_fh = &cstate->save_fh;
+
+	copy->c_fh.size = s_fh->fh_handle.fh_size;
+	memcpy(copy->c_fh.data, &s_fh->fh_handle.fh_base, copy->c_fh.size);
+	copy->stateid.seqid = cpu_to_be32(s_stid->si_generation);
+	memcpy(copy->stateid.other, (void *)&s_stid->si_opaque,
+	       sizeof(stateid_opaque_t));
+
+	status = 0;
+out:
+	return status;
+}
+
+static void
+nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+			struct nfsd_file *dst)
+{
+	nfs42_ssc_close(src->nf_file);
+	nfsd_file_put(src);
+	nfsd_file_put(dst);
+	mntput(ss_mnt);
+}
+
+#else /* CONFIG_NFSD_V4_2_INTER_SSC */
+
+static __be32
+nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+		      struct nfsd4_compound_state *cstate,
+		      struct nfsd4_copy *copy,
+		      struct vfsmount **mount)
+{
+	*mount = NULL;
+	return nfserr_inval;
+}
+
+static void
+nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+			struct nfsd_file *dst)
+{
+}
+
+static void
+nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+{
+}
+
+static struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+				   struct nfs_fh *src_fh,
+				   nfs4_stateid *stateid)
+{
+	return NULL;
+}
+#endif /* CONFIG_NFSD_V4_2_INTER_SSC */
+
+static __be32
+nfsd4_setup_intra_ssc(struct svc_rqst *rqstp,
+		      struct nfsd4_compound_state *cstate,
+		      struct nfsd4_copy *copy)
+{
+	return nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid,
+				 &copy->nf_src, &copy->cp_dst_stateid,
+				 &copy->nf_dst);
+}
+
+static void
+nfsd4_cleanup_intra_ssc(struct nfsd_file *src, struct nfsd_file *dst)
+{
+	nfsd_file_put(src);
+	nfsd_file_put(dst);
+}
 
 static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
 {
@@ -1200,12 +1410,16 @@ static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
 		status = nfs_ok;
 	}
 
-	nfsd_file_put(copy->nf_src);
-	nfsd_file_put(copy->nf_dst);
+	if (!copy->cp_intra) /* Inter server SSC */
+		nfsd4_cleanup_inter_ssc(copy->ss_mnt, copy->nf_src,
+					copy->nf_dst);
+	else
+		nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
+
 	return status;
 }
 
-static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
+static int dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
 {
 	dst->cp_src_pos = src->cp_src_pos;
 	dst->cp_dst_pos = src->cp_dst_pos;
@@ -1215,15 +1429,25 @@ static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
 	memcpy(&dst->fh, &src->fh, sizeof(src->fh));
 	dst->cp_clp = src->cp_clp;
 	dst->nf_dst = nfsd_file_get(src->nf_dst);
-	dst->nf_src = nfsd_file_get(src->nf_src);
+	dst->cp_intra = src->cp_intra;
+	if (src->cp_intra) /* for inter, file_src doesn't exist yet */
+		dst->nf_src = nfsd_file_get(src->nf_src);
+
 	memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
+	memcpy(&dst->cp_src, &src->cp_src, sizeof(struct nl4_server));
+	memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
+	memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
+	dst->ss_mnt = src->ss_mnt;
+
+	return 0;
 }
 
 static void cleanup_async_copy(struct nfsd4_copy *copy)
 {
-	nfs4_free_cp_state(copy);
+	nfs4_free_copy_state(copy);
 	nfsd_file_put(copy->nf_dst);
-	nfsd_file_put(copy->nf_src);
+	if (copy->cp_intra)
+		nfsd_file_put(copy->nf_src);
 	spin_lock(&copy->cp_clp->async_lock);
 	list_del(&copy->copies);
 	spin_unlock(&copy->cp_clp->async_lock);
@@ -1235,7 +1459,24 @@ static int nfsd4_do_async_copy(void *data)
 	struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
 	struct nfsd4_copy *cb_copy;
 
+	if (!copy->cp_intra) { /* Inter server SSC */
+		copy->nf_src = kzalloc(sizeof(struct nfsd_file), GFP_KERNEL);
+		if (!copy->nf_src) {
+			copy->nfserr = nfserr_serverfault;
+			nfsd4_interssc_disconnect(copy->ss_mnt);
+			goto do_callback;
+		}
+		copy->nf_src->nf_file = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
+					      &copy->stateid);
+		if (IS_ERR(copy->nf_src->nf_file)) {
+			copy->nfserr = nfserr_offload_denied;
+			nfsd4_interssc_disconnect(copy->ss_mnt);
+			goto do_callback;
+		}
+	}
+
 	copy->nfserr = nfsd4_do_copy(copy, 0);
+do_callback:
 	cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
 	if (!cb_copy)
 		goto out;
@@ -1247,6 +1488,8 @@ static int nfsd4_do_async_copy(void *data)
 			&nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD);
 	nfsd4_run_cb(&cb_copy->cp_cb);
 out:
+	if (!copy->cp_intra)
+		kfree(copy->nf_src);
 	cleanup_async_copy(copy);
 	return 0;
 }
@@ -1259,11 +1502,20 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	__be32 status;
 	struct nfsd4_copy *async_copy = NULL;
 
-	status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid,
-				   &copy->nf_src, &copy->cp_dst_stateid,
-				   &copy->nf_dst);
-	if (status)
-		goto out;
+	if (!copy->cp_intra) { /* Inter server SSC */
+		if (!inter_copy_offload_enable || copy->cp_synchronous) {
+			status = nfserr_notsupp;
+			goto out;
+		}
+		status = nfsd4_setup_inter_ssc(rqstp, cstate, copy,
+				&copy->ss_mnt);
+		if (status)
+			return nfserr_offload_denied;
+	} else {
+		status = nfsd4_setup_intra_ssc(rqstp, cstate, copy);
+		if (status)
+			return status;
+	}
 
 	copy->cp_clp = cstate->clp;
 	memcpy(&copy->fh, &cstate->current_fh.fh_handle,
@@ -1274,15 +1526,15 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		status = nfserrno(-ENOMEM);
 		async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
 		if (!async_copy)
-			goto out;
-		if (!nfs4_init_cp_state(nn, copy)) {
-			kfree(async_copy);
-			goto out;
-		}
+			goto out_err;
+		if (!nfs4_init_copy_state(nn, copy))
+			goto out_err;
 		refcount_set(&async_copy->refcount, 1);
 		memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
 			sizeof(copy->cp_stateid));
-		dup_copy_fields(copy, async_copy);
+		status = dup_copy_fields(copy, async_copy);
+		if (status)
+			goto out_err;
 		async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
 				async_copy, "%s", "copy thread");
 		if (IS_ERR(async_copy->copy_task))
@@ -1293,13 +1545,17 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		spin_unlock(&async_copy->cp_clp->async_lock);
 		wake_up_process(async_copy->copy_task);
 		status = nfs_ok;
-	} else
+	} else {
 		status = nfsd4_do_copy(copy, 1);
+	}
 out:
 	return status;
 out_err:
 	if (async_copy)
 		cleanup_async_copy(async_copy);
+	status = nfserrno(-ENOMEM);
+	if (!copy->cp_intra)
+		nfsd4_interssc_disconnect(copy->ss_mnt);
 	goto out;
 }
 
@@ -1310,7 +1566,7 @@ find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
 
 	spin_lock(&clp->async_lock);
 	list_for_each_entry(copy, &clp->async_copies, copies) {
-		if (memcmp(&copy->cp_stateid, stateid, NFS4_STATEID_SIZE))
+		if (memcmp(&copy->cp_stateid.stid, stateid, NFS4_STATEID_SIZE))
 			continue;
 		refcount_inc(&copy->refcount);
 		spin_unlock(&clp->async_lock);
@@ -1326,16 +1582,61 @@ nfsd4_offload_cancel(struct svc_rqst *rqstp,
 		     union nfsd4_op_u *u)
 {
 	struct nfsd4_offload_status *os = &u->offload_status;
-	__be32 status = 0;
 	struct nfsd4_copy *copy;
 	struct nfs4_client *clp = cstate->clp;
 
 	copy = find_async_copy(clp, &os->stateid);
-	if (copy)
-		nfsd4_stop_copy(copy);
-	else
-		status = nfserr_bad_stateid;
+	if (!copy) {
+		struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
+		return manage_cpntf_state(nn, &os->stateid, clp, NULL);
+	} else
+		nfsd4_stop_copy(copy);
+
+	return nfs_ok;
+}
+
+static __be32
+nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+		  union nfsd4_op_u *u)
+{
+	struct nfsd4_copy_notify *cn = &u->copy_notify;
+	__be32 status;
+	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+	struct nfs4_stid *stid;
+	struct nfs4_cpntf_state *cps;
+	struct nfs4_client *clp = cstate->clp;
+
+	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+					&cn->cpn_src_stateid, RD_STATE, NULL,
+					&stid);
+	if (status)
+		return status;
+
+	cn->cpn_sec = nn->nfsd4_lease;
+	cn->cpn_nsec = 0;
+
+	status = nfserrno(-ENOMEM);
+	cps = nfs4_alloc_init_cpntf_state(nn, stid);
+	if (!cps)
+		goto out;
+	memcpy(&cn->cpn_cnr_stateid, &cps->cp_stateid.stid, sizeof(stateid_t));
+	memcpy(&cps->cp_p_stateid, &stid->sc_stateid, sizeof(stateid_t));
+	memcpy(&cps->cp_p_clid, &clp->cl_clientid, sizeof(clientid_t));
+
+	/* For now, only return one server address in cpn_src, the
+	 * address used by the client to connect to this server.
+	 */
+	cn->cpn_src.nl4_type = NL4_NETADDR;
+	status = nfsd4_set_netaddr((struct sockaddr *)&rqstp->rq_daddr,
+				 &cn->cpn_src.u.nl4_addr);
+	WARN_ON_ONCE(status);
+	if (status) {
+		nfs4_put_cpntf_state(nn, cps);
+		goto out;
+	}
+out:
+	nfs4_put_stid(stid);
 	return status;
 }
 
@@ -1348,7 +1649,7 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 
 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
 					    &fallocate->falloc_stateid,
-					    WR_STATE, &nf);
+					    WR_STATE, &nf, NULL);
 	if (status != nfs_ok) {
 		dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
 		return status;
@@ -1407,7 +1708,7 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 
 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
 					    &seek->seek_stateid,
-					    RD_STATE, &nf);
+					    RD_STATE, &nf, NULL);
 	if (status) {
 		dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
 		return status;
@@ -1912,6 +2213,45 @@ static void svcxdr_init_encode(struct svc_rqst *rqstp,
 		- rqstp->rq_auth_slack;
 }
 
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+static void
+check_if_stalefh_allowed(struct nfsd4_compoundargs *args)
+{
+	struct nfsd4_op	*op, *current_op = NULL, *saved_op = NULL;
+	struct nfsd4_copy *copy;
+	struct nfsd4_putfh *putfh;
+	int i;
+
+	/* traverse all operation and if it's a COPY compound, mark the
+	 * source filehandle to skip verification
+	 */
+	for (i = 0; i < args->opcnt; i++) {
+		op = &args->ops[i];
+		if (op->opnum == OP_PUTFH)
+			current_op = op;
+		else if (op->opnum == OP_SAVEFH)
+			saved_op = current_op;
+		else if (op->opnum == OP_RESTOREFH)
+			current_op = saved_op;
+		else if (op->opnum == OP_COPY) {
+			copy = (struct nfsd4_copy *)&op->u;
+			if (!saved_op) {
+				op->status = nfserr_nofilehandle;
+				return;
+			}
+			putfh = (struct nfsd4_putfh *)&saved_op->u;
+			if (!copy->cp_intra)
+				putfh->no_verify = true;
+		}
+	}
+}
+#else
+static void
+check_if_stalefh_allowed(struct nfsd4_compoundargs *args)
+{
+}
+#endif
+
 /*
  * COMPOUND call.
  */
@@ -1960,6 +2300,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
 		resp->opcnt = 1;
 		goto encode_op;
 	}
+	check_if_stalefh_allowed(args);
 
 	trace_nfsd_compound(rqstp, args->opcnt);
 	while (!status && resp->opcnt < args->opcnt) {
@@ -1975,13 +2316,14 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
 				op->status = nfsd4_open_omfg(rqstp, cstate, op);
 			goto encode_op;
 		}
-
-		if (!current_fh->fh_dentry) {
+		if (!current_fh->fh_dentry &&
+				!HAS_FH_FLAG(current_fh, NFSD4_FH_FOREIGN)) {
 			if (!(op->opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
 				op->status = nfserr_nofilehandle;
 				goto encode_op;
 			}
-		} else if (current_fh->fh_export->ex_fslocs.migrated &&
+		} else if (current_fh->fh_export &&
+			   current_fh->fh_export->ex_fslocs.migrated &&
 			  !(op->opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) {
 			op->status = nfserr_moved;
 			goto encode_op;
@@ -2025,7 +2367,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
 			if (op->opdesc->op_flags & OP_CLEAR_STATEID)
 				clear_current_stateid(cstate);
 
-			if (need_wrongsec_check(rqstp))
+			if (current_fh->fh_export &&
+					need_wrongsec_check(rqstp))
 				op->status = check_nfsd_access(current_fh->fh_export, rqstp);
 		}
 encode_op:
@@ -2292,6 +2635,21 @@ static inline u32 nfsd4_offload_status_rsize(struct svc_rqst *rqstp,
 		1 /* osr_complete<1> optional 0 for now */) * sizeof(__be32);
 }
 
+static inline u32 nfsd4_copy_notify_rsize(struct svc_rqst *rqstp,
+					struct nfsd4_op *op)
+{
+	return (op_encode_hdr_size +
+		3 /* cnr_lease_time */ +
+		1 /* We support one cnr_source_server */ +
+		1 /* cnr_stateid seq */ +
+		op_encode_stateid_maxsz /* cnr_stateid */ +
+		1 /* num cnr_source_server*/ +
+		1 /* nl4_type */ +
+		1 /* nl4 size */ +
+		XDR_QUADLEN(NFS4_OPAQUE_LIMIT) /*nl4_loc + nl4_loc_sz */)
+		* sizeof(__be32);
+}
+
 #ifdef CONFIG_NFSD_PNFS
 static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
 {
@@ -2716,6 +3074,12 @@ static const struct nfsd4_operation nfsd4_ops[] = {
 		.op_name = "OP_OFFLOAD_CANCEL",
 		.op_rsize_bop = nfsd4_only_status_rsize,
 	},
+	[OP_COPY_NOTIFY] = {
+		.op_func = nfsd4_copy_notify,
+		.op_flags = OP_MODIFIES_SOMETHING,
+		.op_name = "OP_COPY_NOTIFY",
+		.op_rsize_bop = nfsd4_copy_notify_rsize,
+	},
 };
 
 /**
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 2481e76..a8fb186 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -1445,7 +1445,7 @@ nfsd4_cld_grace_done_v0(struct nfsd_net *nn)
 	}
 
 	cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone;
-	cup->cu_u.cu_msg.cm_u.cm_gracetime = (int64_t)nn->boot_time;
+	cup->cu_u.cu_msg.cm_u.cm_gracetime = nn->boot_time;
 	ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
 	if (!ret)
 		ret = cup->cu_u.cu_msg.cm_status;
@@ -1782,7 +1782,7 @@ nfsd4_cltrack_client_has_session(struct nfs4_client *clp)
 }
 
 static char *
-nfsd4_cltrack_grace_start(time_t grace_start)
+nfsd4_cltrack_grace_start(time64_t grace_start)
 {
 	int copied;
 	size_t len;
@@ -1795,7 +1795,7 @@ nfsd4_cltrack_grace_start(time_t grace_start)
 	if (!result)
 		return result;
 
-	copied = snprintf(result, len, GRACE_START_ENV_PREFIX "%ld",
+	copied = snprintf(result, len, GRACE_START_ENV_PREFIX "%lld",
 				grace_start);
 	if (copied >= len) {
 		/* just return nothing if output was truncated */
@@ -2004,7 +2004,7 @@ nfsd4_umh_cltrack_grace_done(struct nfsd_net *nn)
 	char *legacy;
 	char timestr[22]; /* FIXME: better way to determine max size? */
 
-	sprintf(timestr, "%ld", nn->boot_time);
+	sprintf(timestr, "%lld", nn->boot_time);
 	legacy = nfsd4_cltrack_legacy_topdir();
 	nfsd4_umh_cltrack_upcall("gracedone", timestr, legacy, NULL);
 	kfree(legacy);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 369e574..65cfe9a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -80,6 +80,7 @@ static u64 current_sessionid = 1;
 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
 void nfsd4_end_grace(struct nfsd_net *nn);
+static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
 
 /* Locking: */
 
@@ -170,7 +171,7 @@ renew_client_locked(struct nfs4_client *clp)
 			clp->cl_clientid.cl_boot,
 			clp->cl_clientid.cl_id);
 	list_move_tail(&clp->cl_lru, &nn->client_lru);
-	clp->cl_time = get_seconds();
+	clp->cl_time = ktime_get_boottime_seconds();
 }
 
 static void put_client_renew_locked(struct nfs4_client *clp)
@@ -722,6 +723,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
 	/* Will be incremented before return to client: */
 	refcount_set(&stid->sc_count, 1);
 	spin_lock_init(&stid->sc_lock);
+	INIT_LIST_HEAD(&stid->sc_cp_list);
 
 	/*
 	 * It shouldn't be a problem to reuse an opaque stateid value.
@@ -741,30 +743,76 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
 /*
  * Create a unique stateid_t to represent each COPY.
  */
-int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
+static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
+			      unsigned char sc_type)
 {
 	int new_id;
 
+	stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
+	stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
+	stid->sc_type = sc_type;
+
 	idr_preload(GFP_KERNEL);
 	spin_lock(&nn->s2s_cp_lock);
-	new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, copy, 0, 0, GFP_NOWAIT);
+	new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
+	stid->stid.si_opaque.so_id = new_id;
 	spin_unlock(&nn->s2s_cp_lock);
 	idr_preload_end();
 	if (new_id < 0)
 		return 0;
-	copy->cp_stateid.si_opaque.so_id = new_id;
-	copy->cp_stateid.si_opaque.so_clid.cl_boot = nn->boot_time;
-	copy->cp_stateid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
 	return 1;
 }
 
-void nfs4_free_cp_state(struct nfsd4_copy *copy)
+int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
+{
+	return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
+}
+
+struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
+						     struct nfs4_stid *p_stid)
+{
+	struct nfs4_cpntf_state *cps;
+
+	cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
+	if (!cps)
+		return NULL;
+	cps->cpntf_time = ktime_get_boottime_seconds();
+	refcount_set(&cps->cp_stateid.sc_count, 1);
+	if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
+		goto out_free;
+	spin_lock(&nn->s2s_cp_lock);
+	list_add(&cps->cp_list, &p_stid->sc_cp_list);
+	spin_unlock(&nn->s2s_cp_lock);
+	return cps;
+out_free:
+	kfree(cps);
+	return NULL;
+}
+
+void nfs4_free_copy_state(struct nfsd4_copy *copy)
 {
 	struct nfsd_net *nn;
 
+	WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
 	nn = net_generic(copy->cp_clp->net, nfsd_net_id);
 	spin_lock(&nn->s2s_cp_lock);
-	idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.si_opaque.so_id);
+	idr_remove(&nn->s2s_cp_stateids,
+		   copy->cp_stateid.stid.si_opaque.so_id);
+	spin_unlock(&nn->s2s_cp_lock);
+}
+
+static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
+{
+	struct nfs4_cpntf_state *cps;
+	struct nfsd_net *nn;
+
+	nn = net_generic(net, nfsd_net_id);
+	spin_lock(&nn->s2s_cp_lock);
+	while (!list_empty(&stid->sc_cp_list)) {
+		cps = list_first_entry(&stid->sc_cp_list,
+				       struct nfs4_cpntf_state, cp_list);
+		_free_cpntf_state_locked(nn, cps);
+	}
 	spin_unlock(&nn->s2s_cp_lock);
 }
 
@@ -806,7 +854,7 @@ static void nfs4_free_deleg(struct nfs4_stid *stid)
 static DEFINE_SPINLOCK(blocked_delegations_lock);
 static struct bloom_pair {
 	int	entries, old_entries;
-	time_t	swap_time;
+	time64_t swap_time;
 	int	new; /* index into 'set' */
 	DECLARE_BITMAP(set[2], 256);
 } blocked_delegations;
@@ -818,15 +866,15 @@ static int delegation_blocked(struct knfsd_fh *fh)
 
 	if (bd->entries == 0)
 		return 0;
-	if (seconds_since_boot() - bd->swap_time > 30) {
+	if (ktime_get_seconds() - bd->swap_time > 30) {
 		spin_lock(&blocked_delegations_lock);
-		if (seconds_since_boot() - bd->swap_time > 30) {
+		if (ktime_get_seconds() - bd->swap_time > 30) {
 			bd->entries -= bd->old_entries;
 			bd->old_entries = bd->entries;
 			memset(bd->set[bd->new], 0,
 			       sizeof(bd->set[0]));
 			bd->new = 1-bd->new;
-			bd->swap_time = seconds_since_boot();
+			bd->swap_time = ktime_get_seconds();
 		}
 		spin_unlock(&blocked_delegations_lock);
 	}
@@ -856,7 +904,7 @@ static void block_delegations(struct knfsd_fh *fh)
 	__set_bit((hash>>8)&255, bd->set[bd->new]);
 	__set_bit((hash>>16)&255, bd->set[bd->new]);
 	if (bd->entries == 0)
-		bd->swap_time = seconds_since_boot();
+		bd->swap_time = ktime_get_seconds();
 	bd->entries += 1;
 	spin_unlock(&blocked_delegations_lock);
 }
@@ -915,6 +963,7 @@ nfs4_put_stid(struct nfs4_stid *s)
 		return;
 	}
 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
+	nfs4_free_cpntf_statelist(clp->net, s);
 	spin_unlock(&clp->cl_lock);
 	s->sc_free(s);
 	if (fp)
@@ -1862,7 +1911,7 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
 	 */
 	if (clid->cl_boot == (u32)nn->boot_time)
 		return 0;
-	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
+	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08llx\n",
 		clid->cl_boot, clid->cl_id, nn->boot_time);
 	return 1;
 }
@@ -2215,14 +2264,14 @@ static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
 	 * This is opaque to client, so no need to byte-swap. Use
 	 * __force to keep sparse happy
 	 */
-	verf[0] = (__force __be32)get_seconds();
+	verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
 	verf[1] = (__force __be32)nn->clverifier_counter++;
 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
 }
 
 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
 {
-	clp->cl_clientid.cl_boot = nn->boot_time;
+	clp->cl_clientid.cl_boot = (u32)nn->boot_time;
 	clp->cl_clientid.cl_id = nn->clientid_counter++;
 	gen_confirm(clp, nn);
 }
@@ -2292,7 +2341,7 @@ static int client_info_show(struct seq_file *m, void *v)
 					clp->cl_nii_domain.len);
 		seq_printf(m, "\nImplementation name: ");
 		seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
-		seq_printf(m, "\nImplementation time: [%ld, %ld]\n",
+		seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
 			clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
 	}
 	drop_client(clp);
@@ -2612,7 +2661,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
 	gen_clid(clp, nn);
 	kref_init(&clp->cl_nfsdfs.cl_ref);
 	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
-	clp->cl_time = get_seconds();
+	clp->cl_time = ktime_get_boottime_seconds();
 	clear_bit(0, &clp->cl_cb_slot_busy);
 	copy_verf(clp, verf);
 	memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
@@ -2946,8 +2995,7 @@ static __be32 copy_impl_id(struct nfs4_client *clp,
 	xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
 	if (!clp->cl_nii_name.data)
 		return nfserr_jukebox;
-	clp->cl_nii_time.tv_sec = exid->nii_time.tv_sec;
-	clp->cl_nii_time.tv_nsec = exid->nii_time.tv_nsec;
+	clp->cl_nii_time = exid->nii_time;
 	return 0;
 }
 
@@ -3373,7 +3421,7 @@ static __be32 nfsd4_map_bcts_dir(u32 *dir)
 	case NFS4_CDFC4_BACK_OR_BOTH:
 		*dir = NFS4_CDFC4_BOTH;
 		return nfs_ok;
-	};
+	}
 	return nfserr_inval;
 }
 
@@ -4283,7 +4331,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
 	last = oo->oo_last_closed_stid;
 	oo->oo_last_closed_stid = s;
 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
-	oo->oo_time = get_seconds();
+	oo->oo_time = ktime_get_boottime_seconds();
 	spin_unlock(&nn->client_lock);
 	if (last)
 		nfs4_put_stid(&last->st_stid);
@@ -4378,7 +4426,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
 	 */
 	spin_lock(&state_lock);
 	if (dp->dl_time == 0) {
-		dp->dl_time = get_seconds();
+		dp->dl_time = ktime_get_boottime_seconds();
 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
 	}
 	spin_unlock(&state_lock);
@@ -4490,7 +4538,8 @@ static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4
 
 static __be32 lookup_clientid(clientid_t *clid,
 		struct nfsd4_compound_state *cstate,
-		struct nfsd_net *nn)
+		struct nfsd_net *nn,
+		bool sessions)
 {
 	struct nfs4_client *found;
 
@@ -4511,7 +4560,7 @@ static __be32 lookup_clientid(clientid_t *clid,
 	 */
 	WARN_ON_ONCE(cstate->session);
 	spin_lock(&nn->client_lock);
-	found = find_confirmed_client(clid, false, nn);
+	found = find_confirmed_client(clid, sessions, nn);
 	if (!found) {
 		spin_unlock(&nn->client_lock);
 		return nfserr_expired;
@@ -4544,7 +4593,7 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
 	if (open->op_file == NULL)
 		return nfserr_jukebox;
 
-	status = lookup_clientid(clientid, cstate, nn);
+	status = lookup_clientid(clientid, cstate, nn, false);
 	if (status)
 		return status;
 	clp = cstate->clp;
@@ -4672,7 +4721,7 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
 		return 0;
 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
 		return nfserr_inval;
-	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
+	return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
 }
 
 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
@@ -5133,7 +5182,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 
 	dprintk("process_renew(%08x/%08x): starting\n", 
 			clid->cl_boot, clid->cl_id);
-	status = lookup_clientid(clid, cstate, nn);
+	status = lookup_clientid(clid, cstate, nn, false);
 	if (status)
 		goto out;
 	clp = cstate->clp;
@@ -5184,9 +5233,8 @@ nfsd4_end_grace(struct nfsd_net *nn)
  */
 static bool clients_still_reclaiming(struct nfsd_net *nn)
 {
-	unsigned long now = get_seconds();
-	unsigned long double_grace_period_end = nn->boot_time +
-						2 * nn->nfsd4_lease;
+	time64_t double_grace_period_end = nn->boot_time +
+					   2 * nn->nfsd4_lease;
 
 	if (nn->track_reclaim_completes &&
 			atomic_read(&nn->nr_reclaim_complete) ==
@@ -5199,12 +5247,12 @@ static bool clients_still_reclaiming(struct nfsd_net *nn)
 	 * If we've given them *two* lease times to reclaim, and they're
 	 * still not done, give up:
 	 */
-	if (time_after(now, double_grace_period_end))
+	if (ktime_get_boottime_seconds() > double_grace_period_end)
 		return false;
 	return true;
 }
 
-static time_t
+static time64_t
 nfs4_laundromat(struct nfsd_net *nn)
 {
 	struct nfs4_client *clp;
@@ -5213,8 +5261,11 @@ nfs4_laundromat(struct nfsd_net *nn)
 	struct nfs4_ol_stateid *stp;
 	struct nfsd4_blocked_lock *nbl;
 	struct list_head *pos, *next, reaplist;
-	time_t cutoff = get_seconds() - nn->nfsd4_lease;
-	time_t t, new_timeo = nn->nfsd4_lease;
+	time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease;
+	time64_t t, new_timeo = nn->nfsd4_lease;
+	struct nfs4_cpntf_state *cps;
+	copy_stateid_t *cps_t;
+	int i;
 
 	dprintk("NFSD: laundromat service - starting\n");
 
@@ -5225,10 +5276,20 @@ nfs4_laundromat(struct nfsd_net *nn)
 	dprintk("NFSD: end of grace period\n");
 	nfsd4_end_grace(nn);
 	INIT_LIST_HEAD(&reaplist);
+
+	spin_lock(&nn->s2s_cp_lock);
+	idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
+		cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
+		if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
+				cps->cpntf_time > cutoff)
+			_free_cpntf_state_locked(nn, cps);
+	}
+	spin_unlock(&nn->s2s_cp_lock);
+
 	spin_lock(&nn->client_lock);
 	list_for_each_safe(pos, next, &nn->client_lru) {
 		clp = list_entry(pos, struct nfs4_client, cl_lru);
-		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
+		if (clp->cl_time > cutoff) {
 			t = clp->cl_time - cutoff;
 			new_timeo = min(new_timeo, t);
 			break;
@@ -5251,7 +5312,7 @@ nfs4_laundromat(struct nfsd_net *nn)
 	spin_lock(&state_lock);
 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
-		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
+		if (dp->dl_time > cutoff) {
 			t = dp->dl_time - cutoff;
 			new_timeo = min(new_timeo, t);
 			break;
@@ -5271,8 +5332,7 @@ nfs4_laundromat(struct nfsd_net *nn)
 	while (!list_empty(&nn->close_lru)) {
 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
 					oo_close_lru);
-		if (time_after((unsigned long)oo->oo_time,
-			       (unsigned long)cutoff)) {
+		if (oo->oo_time > cutoff) {
 			t = oo->oo_time - cutoff;
 			new_timeo = min(new_timeo, t);
 			break;
@@ -5302,8 +5362,7 @@ nfs4_laundromat(struct nfsd_net *nn)
 	while (!list_empty(&nn->blocked_locks_lru)) {
 		nbl = list_first_entry(&nn->blocked_locks_lru,
 					struct nfsd4_blocked_lock, nbl_lru);
-		if (time_after((unsigned long)nbl->nbl_time,
-			       (unsigned long)cutoff)) {
+		if (nbl->nbl_time > cutoff) {
 			t = nbl->nbl_time - cutoff;
 			new_timeo = min(new_timeo, t);
 			break;
@@ -5320,7 +5379,7 @@ nfs4_laundromat(struct nfsd_net *nn)
 		free_blocked_lock(nbl);
 	}
 out:
-	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
+	new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
 	return new_timeo;
 }
 
@@ -5330,13 +5389,13 @@ static void laundromat_main(struct work_struct *);
 static void
 laundromat_main(struct work_struct *laundry)
 {
-	time_t t;
+	time64_t t;
 	struct delayed_work *dwork = to_delayed_work(laundry);
 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
 					   laundromat_work);
 
 	t = nfs4_laundromat(nn);
-	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
+	dprintk("NFSD: laundromat_main - sleeping for %lld seconds\n", t);
 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
 }
 
@@ -5521,7 +5580,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
 		CLOSE_STATEID(stateid))
 		return nfserr_bad_stateid;
-	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
+	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn,
+				 false);
 	if (status == nfserr_stale_clientid) {
 		if (cstate->session)
 			return nfserr_bad_stateid;
@@ -5600,6 +5660,85 @@ nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
 out:
 	return status;
 }
+static void
+_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
+{
+	WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
+	if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
+		return;
+	list_del(&cps->cp_list);
+	idr_remove(&nn->s2s_cp_stateids,
+		   cps->cp_stateid.stid.si_opaque.so_id);
+	kfree(cps);
+}
+/*
+ * A READ from an inter server to server COPY will have a
+ * copy stateid. Look up the copy notify stateid from the
+ * idr structure and take a reference on it.
+ */
+__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
+			  struct nfs4_client *clp,
+			  struct nfs4_cpntf_state **cps)
+{
+	copy_stateid_t *cps_t;
+	struct nfs4_cpntf_state *state = NULL;
+
+	if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
+		return nfserr_bad_stateid;
+	spin_lock(&nn->s2s_cp_lock);
+	cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
+	if (cps_t) {
+		state = container_of(cps_t, struct nfs4_cpntf_state,
+				     cp_stateid);
+		if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
+			state = NULL;
+			goto unlock;
+		}
+		if (!clp)
+			refcount_inc(&state->cp_stateid.sc_count);
+		else
+			_free_cpntf_state_locked(nn, state);
+	}
+unlock:
+	spin_unlock(&nn->s2s_cp_lock);
+	if (!state)
+		return nfserr_bad_stateid;
+	if (!clp && state)
+		*cps = state;
+	return 0;
+}
+
+static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
+			       struct nfs4_stid **stid)
+{
+	__be32 status;
+	struct nfs4_cpntf_state *cps = NULL;
+	struct nfsd4_compound_state cstate;
+
+	status = manage_cpntf_state(nn, st, NULL, &cps);
+	if (status)
+		return status;
+
+	cps->cpntf_time = ktime_get_boottime_seconds();
+	memset(&cstate, 0, sizeof(cstate));
+	status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true);
+	if (status)
+		goto out;
+	status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid,
+				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
+				stid, nn);
+	put_client_renew(cstate.clp);
+out:
+	nfs4_put_cpntf_state(nn, cps);
+	return status;
+}
+
+void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
+{
+	spin_lock(&nn->s2s_cp_lock);
+	_free_cpntf_state_locked(nn, cps);
+	spin_unlock(&nn->s2s_cp_lock);
+}
 
 /*
  * Checks for stateid operations
@@ -5607,7 +5746,8 @@ nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
 __be32
 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
 		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
-		stateid_t *stateid, int flags, struct nfsd_file **nfp)
+		stateid_t *stateid, int flags, struct nfsd_file **nfp,
+		struct nfs4_stid **cstid)
 {
 	struct inode *ino = d_inode(fhp->fh_dentry);
 	struct net *net = SVC_NET(rqstp);
@@ -5629,6 +5769,8 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
 	status = nfsd4_lookup_stateid(cstate, stateid,
 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
 				&s, nn);
+	if (status == nfserr_bad_stateid)
+		status = find_cpntf_state(nn, stateid, &s);
 	if (status)
 		return status;
 	status = nfsd4_stid_check_stateid_generation(stateid, s,
@@ -5656,8 +5798,12 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
 	if (status == nfs_ok && nfp)
 		status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
 out:
-	if (s)
-		nfs4_put_stid(s);
+	if (s) {
+		if (!status && cstid)
+			*cstid = s;
+		else
+			nfs4_put_stid(s);
+	}
 	return status;
 }
 
@@ -6550,7 +6696,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	}
 
 	if (fl_flags & FL_SLEEP) {
-		nbl->nbl_time = jiffies;
+		nbl->nbl_time = ktime_get_boottime_seconds();
 		spin_lock(&nn->blocked_locks_lock);
 		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
 		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
@@ -6657,7 +6803,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		 return nfserr_inval;
 
 	if (!nfsd4_has_session(cstate)) {
-		status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
+		status = lookup_clientid(&lockt->lt_clientid, cstate, nn,
+					 false);
 		if (status)
 			goto out;
 	}
@@ -6841,7 +6988,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
 		clid->cl_boot, clid->cl_id);
 
-	status = lookup_clientid(clid, cstate, nn);
+	status = lookup_clientid(clid, cstate, nn, false);
 	if (status)
 		return status;
 
@@ -6988,7 +7135,7 @@ nfs4_check_open_reclaim(clientid_t *clid,
 	__be32 status;
 
 	/* find clientid in conf_id_hashtbl */
-	status = lookup_clientid(clid, cstate, nn);
+	status = lookup_clientid(clid, cstate, nn, false);
 	if (status)
 		return nfserr_reclaim_bad;
 
@@ -7641,7 +7788,7 @@ static int nfs4_state_create_net(struct net *net)
 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
 	nn->conf_name_tree = RB_ROOT;
 	nn->unconf_name_tree = RB_ROOT;
-	nn->boot_time = get_seconds();
+	nn->boot_time = ktime_get_real_seconds();
 	nn->grace_ended = false;
 	nn->nfsd4_manager.block_opens = true;
 	INIT_LIST_HEAD(&nn->nfsd4_manager.list);
@@ -7710,7 +7857,7 @@ nfs4_state_start_net(struct net *net)
 	nfsd4_client_tracking_init(net);
 	if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
 		goto skip_grace;
-	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
+	printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
 	       nn->nfsd4_grace, net->ns.inum);
 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
 	return 0;
@@ -7786,7 +7933,8 @@ nfs4_state_shutdown(void)
 static void
 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
 {
-	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
+	if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
+	    CURRENT_STATEID(stateid))
 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
 }
 
@@ -7795,14 +7943,14 @@ put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
 {
 	if (cstate->minorversion) {
 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
-		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+		SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
 	}
 }
 
 void
 clear_current_stateid(struct nfsd4_compound_state *cstate)
 {
-	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+	CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
 }
 
 /*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index d2dc4c0..9761512 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -40,6 +40,7 @@
 #include <linux/utsname.h>
 #include <linux/pagemap.h>
 #include <linux/sunrpc/svcauth_gss.h>
+#include <linux/sunrpc/addr.h>
 
 #include "idmap.h"
 #include "acl.h"
@@ -1744,10 +1745,47 @@ nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
 	DECODE_TAIL;
 }
 
+static __be32 nfsd4_decode_nl4_server(struct nfsd4_compoundargs *argp,
+				      struct nl4_server *ns)
+{
+	DECODE_HEAD;
+	struct nfs42_netaddr *naddr;
+
+	READ_BUF(4);
+	ns->nl4_type = be32_to_cpup(p++);
+
+	/* currently support for 1 inter-server source server */
+	switch (ns->nl4_type) {
+	case NL4_NETADDR:
+		naddr = &ns->u.nl4_addr;
+
+		READ_BUF(4);
+		naddr->netid_len = be32_to_cpup(p++);
+		if (naddr->netid_len > RPCBIND_MAXNETIDLEN)
+			goto xdr_error;
+
+		READ_BUF(naddr->netid_len + 4); /* 4 for uaddr len */
+		COPYMEM(naddr->netid, naddr->netid_len);
+
+		naddr->addr_len = be32_to_cpup(p++);
+		if (naddr->addr_len > RPCBIND_MAXUADDRLEN)
+			goto xdr_error;
+
+		READ_BUF(naddr->addr_len);
+		COPYMEM(naddr->addr, naddr->addr_len);
+		break;
+	default:
+		goto xdr_error;
+	}
+	DECODE_TAIL;
+}
+
 static __be32
 nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
 {
 	DECODE_HEAD;
+	struct nl4_server *ns_dummy;
+	int i, count;
 
 	status = nfsd4_decode_stateid(argp, &copy->cp_src_stateid);
 	if (status)
@@ -1762,7 +1800,32 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
 	p = xdr_decode_hyper(p, &copy->cp_count);
 	p++; /* ca_consecutive: we always do consecutive copies */
 	copy->cp_synchronous = be32_to_cpup(p++);
-	/* tmp = be32_to_cpup(p); Source server list not supported */
+
+	count = be32_to_cpup(p++);
+
+	copy->cp_intra = false;
+	if (count == 0) { /* intra-server copy */
+		copy->cp_intra = true;
+		goto intra;
+	}
+
+	/* decode all the supplied server addresses but use first */
+	status = nfsd4_decode_nl4_server(argp, &copy->cp_src);
+	if (status)
+		return status;
+
+	ns_dummy = kmalloc(sizeof(struct nl4_server), GFP_KERNEL);
+	if (ns_dummy == NULL)
+		return nfserrno(-ENOMEM);
+	for (i = 0; i < count - 1; i++) {
+		status = nfsd4_decode_nl4_server(argp, ns_dummy);
+		if (status) {
+			kfree(ns_dummy);
+			return status;
+		}
+	}
+	kfree(ns_dummy);
+intra:
 
 	DECODE_TAIL;
 }
@@ -1775,6 +1838,18 @@ nfsd4_decode_offload_status(struct nfsd4_compoundargs *argp,
 }
 
 static __be32
+nfsd4_decode_copy_notify(struct nfsd4_compoundargs *argp,
+			 struct nfsd4_copy_notify *cn)
+{
+	int status;
+
+	status = nfsd4_decode_stateid(argp, &cn->cpn_src_stateid);
+	if (status)
+		return status;
+	return nfsd4_decode_nl4_server(argp, &cn->cpn_dst);
+}
+
+static __be32
 nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
 {
 	DECODE_HEAD;
@@ -1875,7 +1950,7 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
 	/* new operations for NFSv4.2 */
 	[OP_ALLOCATE]		= (nfsd4_dec)nfsd4_decode_fallocate,
 	[OP_COPY]		= (nfsd4_dec)nfsd4_decode_copy,
-	[OP_COPY_NOTIFY]	= (nfsd4_dec)nfsd4_decode_notsupp,
+	[OP_COPY_NOTIFY]	= (nfsd4_dec)nfsd4_decode_copy_notify,
 	[OP_DEALLOCATE]		= (nfsd4_dec)nfsd4_decode_fallocate,
 	[OP_IO_ADVISE]		= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_LAYOUTERROR]	= (nfsd4_dec)nfsd4_decode_notsupp,
@@ -2024,11 +2099,11 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
  */
 static __be32 *encode_time_delta(__be32 *p, struct inode *inode)
 {
-	struct timespec ts;
+	struct timespec64 ts;
 	u32 ns;
 
 	ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
-	ts = ns_to_timespec(ns);
+	ts = ns_to_timespec64(ns);
 
 	p = xdr_encode_hyper(p, ts.tv_sec);
 	*p++ = cpu_to_be32(ts.tv_nsec);
@@ -4244,6 +4319,46 @@ nfsd42_encode_write_res(struct nfsd4_compoundres *resp,
 }
 
 static __be32
+nfsd42_encode_nl4_server(struct nfsd4_compoundres *resp, struct nl4_server *ns)
+{
+	struct xdr_stream *xdr = &resp->xdr;
+	struct nfs42_netaddr *addr;
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p++ = cpu_to_be32(ns->nl4_type);
+
+	switch (ns->nl4_type) {
+	case NL4_NETADDR:
+		addr = &ns->u.nl4_addr;
+
+		/* netid_len, netid, uaddr_len, uaddr (port included
+		 * in RPCBIND_MAXUADDRLEN)
+		 */
+		p = xdr_reserve_space(xdr,
+			4 /* netid len */ +
+			(XDR_QUADLEN(addr->netid_len) * 4) +
+			4 /* uaddr len */ +
+			(XDR_QUADLEN(addr->addr_len) * 4));
+		if (!p)
+			return nfserr_resource;
+
+		*p++ = cpu_to_be32(addr->netid_len);
+		p = xdr_encode_opaque_fixed(p, addr->netid,
+					    addr->netid_len);
+		*p++ = cpu_to_be32(addr->addr_len);
+		p = xdr_encode_opaque_fixed(p, addr->addr,
+					addr->addr_len);
+		break;
+	default:
+		WARN_ON_ONCE(ns->nl4_type != NL4_NETADDR);
+		return nfserr_inval;
+	}
+
+	return 0;
+}
+
+static __be32
 nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
 		  struct nfsd4_copy *copy)
 {
@@ -4277,6 +4392,40 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
 }
 
 static __be32
+nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
+			 struct nfsd4_copy_notify *cn)
+{
+	struct xdr_stream *xdr = &resp->xdr;
+	__be32 *p;
+
+	if (nfserr)
+		return nfserr;
+
+	/* 8 sec, 4 nsec */
+	p = xdr_reserve_space(xdr, 12);
+	if (!p)
+		return nfserr_resource;
+
+	/* cnr_lease_time */
+	p = xdr_encode_hyper(p, cn->cpn_sec);
+	*p++ = cpu_to_be32(cn->cpn_nsec);
+
+	/* cnr_stateid */
+	nfserr = nfsd4_encode_stateid(xdr, &cn->cpn_cnr_stateid);
+	if (nfserr)
+		return nfserr;
+
+	/* cnr_src.nl_nsvr */
+	p = xdr_reserve_space(xdr, 4);
+	if (!p)
+		return nfserr_resource;
+
+	*p++ = cpu_to_be32(1);
+
+	return nfsd42_encode_nl4_server(resp, &cn->cpn_src);
+}
+
+static __be32
 nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
 		  struct nfsd4_seek *seek)
 {
@@ -4373,7 +4522,7 @@ static const nfsd4_enc nfsd4_enc_ops[] = {
 	/* NFSv4.2 operations */
 	[OP_ALLOCATE]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_COPY]		= (nfsd4_enc)nfsd4_encode_copy,
-	[OP_COPY_NOTIFY]	= (nfsd4_enc)nfsd4_encode_noop,
+	[OP_COPY_NOTIFY]	= (nfsd4_enc)nfsd4_encode_copy_notify,
 	[OP_DEALLOCATE]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_IO_ADVISE]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_LAYOUTERROR]	= (nfsd4_enc)nfsd4_encode_noop,
@@ -4500,8 +4649,6 @@ nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
 	__be32 *p;
 	struct nfs4_replay *rp = op->replay;
 
-	BUG_ON(!rp);
-
 	p = xdr_reserve_space(xdr, 8 + rp->rp_buflen);
 	if (!p) {
 		WARN_ON_ONCE(1);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 11b42c5..e109a10 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -157,11 +157,11 @@ static int exports_proc_open(struct inode *inode, struct file *file)
 	return exports_net_open(current->nsproxy->net_ns, file);
 }
 
-static const struct file_operations exports_proc_operations = {
-	.open		= exports_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops exports_proc_ops = {
+	.proc_open	= exports_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 static int exports_nfsd_open(struct inode *inode, struct file *file)
@@ -956,7 +956,7 @@ static ssize_t write_maxconn(struct file *file, char *buf, size_t size)
 
 #ifdef CONFIG_NFSD_V4
 static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
-				  time_t *time, struct nfsd_net *nn)
+				  time64_t *time, struct nfsd_net *nn)
 {
 	char *mesg = buf;
 	int rv, i;
@@ -984,11 +984,11 @@ static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
 		*time = i;
 	}
 
-	return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n", *time);
+	return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%lld\n", *time);
 }
 
 static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
-				time_t *time, struct nfsd_net *nn)
+				time64_t *time, struct nfsd_net *nn)
 {
 	ssize_t rv;
 
@@ -1431,8 +1431,7 @@ static int create_proc_exports_entry(void)
 	entry = proc_mkdir("fs/nfs", NULL);
 	if (!entry)
 		return -ENOMEM;
-	entry = proc_create("exports", 0, entry,
-				 &exports_proc_operations);
+	entry = proc_create("exports", 0, entry, &exports_proc_ops);
 	if (!entry) {
 		remove_proc_entry("fs/nfs", NULL);
 		return -ENOMEM;
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 57b93d9..2ab5569 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -19,6 +19,7 @@
 #include <linux/sunrpc/svc.h>
 #include <linux/sunrpc/svc_xprt.h>
 #include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/addr.h>
 
 #include <uapi/linux/nfsd/debug.h>
 
@@ -142,7 +143,6 @@ int nfs4_state_start(void);
 int nfs4_state_start_net(struct net *net);
 void nfs4_state_shutdown(void);
 void nfs4_state_shutdown_net(struct net *net);
-void nfs4_reset_lease(time_t leasetime);
 int nfs4_reset_recoverydir(char *recdir);
 char * nfs4_recoverydir(void);
 bool nfsd4_spo_must_allow(struct svc_rqst *rqstp);
@@ -153,7 +153,6 @@ static inline int nfs4_state_start(void) { return 0; }
 static inline int nfs4_state_start_net(struct net *net) { return 0; }
 static inline void nfs4_state_shutdown(void) { }
 static inline void nfs4_state_shutdown_net(struct net *net) { }
-static inline void nfs4_reset_lease(time_t leasetime) { }
 static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
 static inline char * nfs4_recoverydir(void) {return NULL; }
 static inline bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
@@ -387,6 +386,37 @@ void		nfsd_lockd_shutdown(void);
 
 extern const u32 nfsd_suppattrs[3][3];
 
+static inline __be32 nfsd4_set_netaddr(struct sockaddr *addr,
+				    struct nfs42_netaddr *netaddr)
+{
+	struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+	unsigned int port;
+	size_t ret_addr, ret_port;
+
+	switch (addr->sa_family) {
+	case AF_INET:
+		port = ntohs(sin->sin_port);
+		sprintf(netaddr->netid, "tcp");
+		netaddr->netid_len = 3;
+		break;
+	case AF_INET6:
+		port = ntohs(sin6->sin6_port);
+		sprintf(netaddr->netid, "tcp6");
+		netaddr->netid_len = 4;
+		break;
+	default:
+		return nfserr_inval;
+	}
+	ret_addr = rpc_ntop(addr, netaddr->addr, sizeof(netaddr->addr));
+	ret_port = snprintf(netaddr->addr + ret_addr,
+			    RPCBIND_MAXUADDRLEN + 1 - ret_addr,
+			    ".%u.%u", port >> 8, port & 0xff);
+	WARN_ON(ret_port >= RPCBIND_MAXUADDRLEN + 1 - ret_addr);
+	netaddr->addr_len = ret_addr + ret_port;
+	return 0;
+}
+
 static inline bool bmval_is_subset(const u32 *bm1, const u32 *bm2)
 {
 	return !((bm1[0] & ~bm2[0]) ||
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 755e256..56cfbc3 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -35,15 +35,15 @@ typedef struct svc_fh {
 
 	bool			fh_locked;	/* inode locked by us */
 	bool			fh_want_write;	/* remount protection taken */
-
+	int			fh_flags;	/* FH flags */
 #ifdef CONFIG_NFSD_V3
 	bool			fh_post_saved;	/* post-op attrs saved */
 	bool			fh_pre_saved;	/* pre-op attrs saved */
 
 	/* Pre-op attributes saved during fh_lock */
 	__u64			fh_pre_size;	/* size before operation */
-	struct timespec		fh_pre_mtime;	/* mtime before oper */
-	struct timespec		fh_pre_ctime;	/* ctime before oper */
+	struct timespec64	fh_pre_mtime;	/* mtime before oper */
+	struct timespec64	fh_pre_ctime;	/* ctime before oper */
 	/*
 	 * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode)
 	 *  to find out if it is valid.
@@ -56,6 +56,9 @@ typedef struct svc_fh {
 #endif /* CONFIG_NFSD_V3 */
 
 } svc_fh;
+#define NFSD4_FH_FOREIGN (1<<0)
+#define SET_FH_FLAG(c, f) ((c)->fh_flags |= (f))
+#define HAS_FH_FLAG(c, f) ((c)->fh_flags & (f))
 
 enum nfsd_fsid {
 	FSID_DEV = 0,
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index c83ddac..543bbe0 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -94,7 +94,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
 		 * Solaris, at least, doesn't seem to care what the time
 		 * request is.  We require it be within 30 minutes of now.
 		 */
-		time_t delta = iap->ia_atime.tv_sec - get_seconds();
+		time64_t delta = iap->ia_atime.tv_sec - ktime_get_real_seconds();
 
 		nfserr = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
 		if (nfserr)
@@ -113,7 +113,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
 		}
 	}
 
-	nfserr = nfsd_setattr(rqstp, fhp, iap, 0, (time_t)0);
+	nfserr = nfsd_setattr(rqstp, fhp, iap, 0, (time64_t)0);
 done:
 	return nfsd_return_attrs(nfserr, resp);
 }
@@ -226,7 +226,7 @@ nfsd_proc_write(struct svc_rqst *rqstp)
 		return nfserr_io;
 	nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
 			    argp->offset, rqstp->rq_vec, nvecs,
-			    &cnt, NFS_DATA_SYNC);
+			    &cnt, NFS_DATA_SYNC, NULL);
 	return nfsd_return_attrs(nfserr, resp);
 }
 
@@ -380,7 +380,7 @@ nfsd_proc_create(struct svc_rqst *rqstp)
 		 */
 		attr->ia_valid &= ATTR_SIZE;
 		if (attr->ia_valid)
-			nfserr = nfsd_setattr(rqstp, newfhp, attr, 0, (time_t)0);
+			nfserr = nfsd_setattr(rqstp, newfhp, attr, 0, (time64_t)0);
 	}
 
 out_unlock:
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index e8bee8f..3b77b90 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -31,6 +31,12 @@
 
 #define NFSDDBG_FACILITY	NFSDDBG_SVC
 
+bool inter_copy_offload_enable;
+EXPORT_SYMBOL_GPL(inter_copy_offload_enable);
+module_param(inter_copy_offload_enable, bool, 0644);
+MODULE_PARM_DESC(inter_copy_offload_enable,
+		 "Enable inter server to server copy offload. Default: false");
+
 extern struct svc_program	nfsd_program;
 static int			nfsd(void *vrqstp);
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
@@ -391,20 +397,25 @@ static int nfsd_startup_net(int nrservs, struct net *net, const struct cred *cre
 		ret = lockd_up(net, cred);
 		if (ret)
 			goto out_socks;
-		nn->lockd_up = 1;
+		nn->lockd_up = true;
 	}
 
-	ret = nfs4_state_start_net(net);
+	ret = nfsd_file_cache_start_net(net);
 	if (ret)
 		goto out_lockd;
+	ret = nfs4_state_start_net(net);
+	if (ret)
+		goto out_filecache;
 
 	nn->nfsd_net_up = true;
 	return 0;
 
+out_filecache:
+	nfsd_file_cache_shutdown_net(net);
 out_lockd:
 	if (nn->lockd_up) {
 		lockd_down(net);
-		nn->lockd_up = 0;
+		nn->lockd_up = false;
 	}
 out_socks:
 	nfsd_shutdown_generic();
@@ -415,11 +426,11 @@ static void nfsd_shutdown_net(struct net *net)
 {
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-	nfsd_file_cache_purge(net);
+	nfsd_file_cache_shutdown_net(net);
 	nfs4_state_shutdown_net(net);
 	if (nn->lockd_up) {
 		lockd_down(net);
-		nn->lockd_up = 0;
+		nn->lockd_up = false;
 	}
 	nn->nfsd_net_up = false;
 	nfsd_shutdown_generic();
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index d61b83b..68d3f30 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -56,6 +56,14 @@ typedef struct {
 	stateid_opaque_t        si_opaque;
 } stateid_t;
 
+typedef struct {
+	stateid_t		stid;
+#define NFS4_COPY_STID 1
+#define NFS4_COPYNOTIFY_STID 2
+	unsigned char		sc_type;
+	refcount_t		sc_count;
+} copy_stateid_t;
+
 #define STATEID_FMT	"(%08x/%08x/%08x/%08x)"
 #define STATEID_VAL(s) \
 	(s)->si_opaque.so_clid.cl_boot, \
@@ -96,6 +104,7 @@ struct nfs4_stid {
 #define NFS4_REVOKED_DELEG_STID 16
 #define NFS4_CLOSED_DELEG_STID 32
 #define NFS4_LAYOUT_STID 64
+	struct list_head	sc_cp_list;
 	unsigned char		sc_type;
 	stateid_t		sc_stateid;
 	spinlock_t		sc_lock;
@@ -104,6 +113,17 @@ struct nfs4_stid {
 	void			(*sc_free)(struct nfs4_stid *);
 };
 
+/* Keep a list of stateids issued by the COPY_NOTIFY, associate it with the
+ * parent OPEN/LOCK/DELEG stateid.
+ */
+struct nfs4_cpntf_state {
+	copy_stateid_t		cp_stateid;
+	struct list_head	cp_list;	/* per parent nfs4_stid */
+	stateid_t		cp_p_stateid;	/* copy of parent's stateid */
+	clientid_t		cp_p_clid;	/* copy of parent's clid */
+	time64_t		cpntf_time;	/* last time stateid used */
+};
+
 /*
  * Represents a delegation stateid. The nfs4_client holds references to these
  * and they are put when it is being destroyed or when the delegation is
@@ -132,7 +152,7 @@ struct nfs4_delegation {
 	struct list_head	dl_recall_lru;  /* delegation recalled */
 	struct nfs4_clnt_odstate *dl_clnt_odstate;
 	u32			dl_type;
-	time_t			dl_time;
+	time64_t		dl_time;
 /* For recall: */
 	int			dl_retries;
 	struct nfsd4_callback	dl_recall;
@@ -310,7 +330,7 @@ struct nfs4_client {
 #endif
 	struct xdr_netobj	cl_name; 	/* id generated by client */
 	nfs4_verifier		cl_verifier; 	/* generated by client */
-	time_t                  cl_time;        /* time of last lease renewal */
+	time64_t		cl_time;	/* time of last lease renewal */
 	struct sockaddr_storage	cl_addr; 	/* client ipaddress */
 	bool			cl_mach_cred;	/* SP4_MACH_CRED in force */
 	struct svc_cred		cl_cred; 	/* setclientid principal */
@@ -320,7 +340,7 @@ struct nfs4_client {
 	/* NFSv4.1 client implementation id: */
 	struct xdr_netobj	cl_nii_domain;
 	struct xdr_netobj	cl_nii_name;
-	struct timespec		cl_nii_time;
+	struct timespec64	cl_nii_time;
 
 	/* for v4.0 and v4.1 callbacks: */
 	struct nfs4_cb_conn	cl_cb_conn;
@@ -449,7 +469,7 @@ struct nfs4_openowner {
 	 */
 	struct list_head	oo_close_lru;
 	struct nfs4_ol_stateid *oo_last_closed_stid;
-	time_t			oo_time; /* time of placement on so_close_lru */
+	time64_t		oo_time; /* time of placement on so_close_lru */
 #define NFS4_OO_CONFIRMED   1
 	unsigned char		oo_flags;
 };
@@ -606,7 +626,7 @@ static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b)
 struct nfsd4_blocked_lock {
 	struct list_head	nbl_list;
 	struct list_head	nbl_lru;
-	unsigned long		nbl_time;
+	time64_t		nbl_time;
 	struct file_lock	nbl_lock;
 	struct knfsd_fh		nbl_fh;
 	struct nfsd4_callback	nbl_cb;
@@ -618,14 +638,17 @@ struct nfsd4_copy;
 
 extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
 		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
-		stateid_t *stateid, int flags, struct nfsd_file **filp);
+		stateid_t *stateid, int flags, struct nfsd_file **filp,
+		struct nfs4_stid **cstid);
 __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
 		     stateid_t *stateid, unsigned char typemask,
 		     struct nfs4_stid **s, struct nfsd_net *nn);
 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
 				  void (*sc_free)(struct nfs4_stid *));
-int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy);
-void nfs4_free_cp_state(struct nfsd4_copy *copy);
+int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy);
+void nfs4_free_copy_state(struct nfsd4_copy *copy);
+struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
+			struct nfs4_stid *p_stid);
 void nfs4_unhash_stid(struct nfs4_stid *s);
 void nfs4_put_stid(struct nfs4_stid *s);
 void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
@@ -655,6 +678,11 @@ void put_nfs4_file(struct nfs4_file *fi);
 extern void nfs4_put_copy(struct nfsd4_copy *copy);
 extern struct nfsd4_copy *
 find_async_copy(struct nfs4_client *clp, stateid_t *staetid);
+extern void nfs4_put_cpntf_state(struct nfsd_net *nn,
+				 struct nfs4_cpntf_state *cps);
+extern __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
+				 struct nfs4_client *clp,
+				 struct nfs4_cpntf_state **cps);
 static inline void get_nfs4_file(struct nfs4_file *fi)
 {
 	refcount_inc(&fi->fi_ref);
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 9bce3b9..b1bc582 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -84,17 +84,17 @@ static int nfsd_proc_open(struct inode *inode, struct file *file)
 	return single_open(file, nfsd_proc_show, NULL);
 }
 
-static const struct file_operations nfsd_proc_fops = {
-	.open = nfsd_proc_open,
-	.read  = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
+static const struct proc_ops nfsd_proc_ops = {
+	.proc_open	= nfsd_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 void
 nfsd_stat_init(void)
 {
-	svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_fops);
+	svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
 }
 
 void
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index ffc78a0..06dd0d3 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -166,6 +166,12 @@ DEFINE_STATEID_EVENT(layout_recall_done);
 DEFINE_STATEID_EVENT(layout_recall_fail);
 DEFINE_STATEID_EVENT(layout_recall_release);
 
+TRACE_DEFINE_ENUM(NFSD_FILE_HASHED);
+TRACE_DEFINE_ENUM(NFSD_FILE_PENDING);
+TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ);
+TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_WRITE);
+TRACE_DEFINE_ENUM(NFSD_FILE_REFERENCED);
+
 #define show_nf_flags(val)						\
 	__print_flags(val, "|",						\
 		{ 1 << NFSD_FILE_HASHED,	"HASHED" },		\
@@ -195,7 +201,7 @@ DECLARE_EVENT_CLASS(nfsd_file_class,
 	TP_fast_assign(
 		__entry->nf_hashval = nf->nf_hashval;
 		__entry->nf_inode = nf->nf_inode;
-		__entry->nf_ref = atomic_read(&nf->nf_ref);
+		__entry->nf_ref = refcount_read(&nf->nf_ref);
 		__entry->nf_flags = nf->nf_flags;
 		__entry->nf_may = nf->nf_may;
 		__entry->nf_file = nf->nf_file;
@@ -228,7 +234,7 @@ TRACE_EVENT(nfsd_file_acquire,
 	TP_ARGS(rqstp, hash, inode, may_flags, nf, status),
 
 	TP_STRUCT__entry(
-		__field(__be32, xid)
+		__field(u32, xid)
 		__field(unsigned int, hash)
 		__field(void *, inode)
 		__field(unsigned int, may_flags)
@@ -236,27 +242,27 @@ TRACE_EVENT(nfsd_file_acquire,
 		__field(unsigned long, nf_flags)
 		__field(unsigned char, nf_may)
 		__field(struct file *, nf_file)
-		__field(__be32, status)
+		__field(u32, status)
 	),
 
 	TP_fast_assign(
-		__entry->xid = rqstp->rq_xid;
+		__entry->xid = be32_to_cpu(rqstp->rq_xid);
 		__entry->hash = hash;
 		__entry->inode = inode;
 		__entry->may_flags = may_flags;
-		__entry->nf_ref = nf ? atomic_read(&nf->nf_ref) : 0;
+		__entry->nf_ref = nf ? refcount_read(&nf->nf_ref) : 0;
 		__entry->nf_flags = nf ? nf->nf_flags : 0;
 		__entry->nf_may = nf ? nf->nf_may : 0;
 		__entry->nf_file = nf ? nf->nf_file : NULL;
-		__entry->status = status;
+		__entry->status = be32_to_cpu(status);
 	),
 
 	TP_printk("xid=0x%x hash=0x%x inode=0x%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=0x%p status=%u",
-			be32_to_cpu(__entry->xid), __entry->hash, __entry->inode,
+			__entry->xid, __entry->hash, __entry->inode,
 			show_nf_may(__entry->may_flags), __entry->nf_ref,
 			show_nf_flags(__entry->nf_flags),
 			show_nf_may(__entry->nf_may), __entry->nf_file,
-			be32_to_cpu(__entry->status))
+			__entry->status)
 );
 
 DECLARE_EVENT_CLASS(nfsd_file_search_class,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c0dc491..0aa02eb 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -280,19 +280,25 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
  * Commit metadata changes to stable storage.
  */
 static int
-commit_metadata(struct svc_fh *fhp)
+commit_inode_metadata(struct inode *inode)
 {
-	struct inode *inode = d_inode(fhp->fh_dentry);
 	const struct export_operations *export_ops = inode->i_sb->s_export_op;
 
-	if (!EX_ISSYNC(fhp->fh_export))
-		return 0;
-
 	if (export_ops->commit_metadata)
 		return export_ops->commit_metadata(inode);
 	return sync_inode_metadata(inode, 1);
 }
 
+static int
+commit_metadata(struct svc_fh *fhp)
+{
+	struct inode *inode = d_inode(fhp->fh_dentry);
+
+	if (!EX_ISSYNC(fhp->fh_export))
+		return 0;
+	return commit_inode_metadata(inode);
+}
+
 /*
  * Go over the attributes and take care of the small differences between
  * NFS semantics and what Linux expects.
@@ -358,7 +364,7 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
  */
 __be32
 nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
-	     int check_guard, time_t guardtime)
+	     int check_guard, time64_t guardtime)
 {
 	struct dentry	*dentry;
 	struct inode	*inode;
@@ -524,23 +530,39 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
 }
 #endif
 
-__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
-		u64 dst_pos, u64 count, bool sync)
+__be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
+		struct nfsd_file *nf_dst, u64 dst_pos, u64 count, bool sync)
 {
+	struct file *src = nf_src->nf_file;
+	struct file *dst = nf_dst->nf_file;
 	loff_t cloned;
+	__be32 ret = 0;
 
+	down_write(&nf_dst->nf_rwsem);
 	cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
-	if (cloned < 0)
-		return nfserrno(cloned);
-	if (count && cloned != count)
-		return nfserrno(-EINVAL);
+	if (cloned < 0) {
+		ret = nfserrno(cloned);
+		goto out_err;
+	}
+	if (count && cloned != count) {
+		ret = nfserrno(-EINVAL);
+		goto out_err;
+	}
 	if (sync) {
 		loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
 		int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
-		if (status < 0)
-			return nfserrno(status);
+
+		if (!status)
+			status = commit_inode_metadata(file_inode(src));
+		if (status < 0) {
+			nfsd_reset_boot_verifier(net_generic(nf_dst->nf_net,
+						 nfsd_net_id));
+			ret = nfserrno(status);
+		}
 	}
-	return 0;
+out_err:
+	up_write(&nf_dst->nf_rwsem);
+	return ret;
 }
 
 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
@@ -938,10 +960,12 @@ static int wait_for_concurrent_writes(struct file *file)
 }
 
 __be32
-nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
 				loff_t offset, struct kvec *vec, int vlen,
-				unsigned long *cnt, int stable)
+				unsigned long *cnt, int stable,
+				__be32 *verf)
 {
+	struct file		*file = nf->nf_file;
 	struct svc_export	*exp;
 	struct iov_iter		iter;
 	__be32			nfserr;
@@ -972,9 +996,28 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
 		flags |= RWF_SYNC;
 
 	iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
-	host_err = vfs_iter_write(file, &iter, &pos, flags);
-	if (host_err < 0)
+	if (flags & RWF_SYNC) {
+		down_write(&nf->nf_rwsem);
+		host_err = vfs_iter_write(file, &iter, &pos, flags);
+		if (host_err < 0)
+			nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
+						 nfsd_net_id));
+		up_write(&nf->nf_rwsem);
+	} else {
+		down_read(&nf->nf_rwsem);
+		if (verf)
+			nfsd_copy_boot_verifier(verf,
+					net_generic(SVC_NET(rqstp),
+					nfsd_net_id));
+		host_err = vfs_iter_write(file, &iter, &pos, flags);
+		up_read(&nf->nf_rwsem);
+	}
+	if (host_err < 0) {
+		nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
+					 nfsd_net_id));
 		goto out_nfserr;
+	}
+	*cnt = host_err;
 	nfsdstats.io_write += *cnt;
 	fsnotify_modify(file);
 
@@ -1036,7 +1079,8 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
  */
 __be32
 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
-	   struct kvec *vec, int vlen, unsigned long *cnt, int stable)
+	   struct kvec *vec, int vlen, unsigned long *cnt, int stable,
+	   __be32 *verf)
 {
 	struct nfsd_file *nf;
 	__be32 err;
@@ -1047,8 +1091,8 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
 	if (err)
 		goto out;
 
-	err = nfsd_vfs_write(rqstp, fhp, nf->nf_file, offset, vec,
-			vlen, cnt, stable);
+	err = nfsd_vfs_write(rqstp, fhp, nf, offset, vec,
+			vlen, cnt, stable, verf);
 	nfsd_file_put(nf);
 out:
 	trace_nfsd_write_done(rqstp, fhp, offset, *cnt);
@@ -1067,7 +1111,7 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
  */
 __be32
 nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
-               loff_t offset, unsigned long count)
+               loff_t offset, unsigned long count, __be32 *verf)
 {
 	struct nfsd_file	*nf;
 	loff_t			end = LLONG_MAX;
@@ -1086,10 +1130,14 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
 	if (err)
 		goto out;
 	if (EX_ISSYNC(fhp->fh_export)) {
-		int err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
+		int err2;
 
+		down_write(&nf->nf_rwsem);
+		err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
 		switch (err2) {
 		case 0:
+			nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
+						nfsd_net_id));
 			break;
 		case -EINVAL:
 			err = nfserr_notsupp;
@@ -1099,7 +1147,10 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
 			nfsd_reset_boot_verifier(net_generic(nf->nf_net,
 						 nfsd_net_id));
 		}
-	}
+		up_write(&nf->nf_rwsem);
+	} else
+		nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
+					nfsd_net_id));
 
 	nfsd_file_put(nf);
 out:
@@ -1123,7 +1174,7 @@ nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp,
 	if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
 		iap->ia_valid &= ~(ATTR_UID|ATTR_GID);
 	if (iap->ia_valid)
-		return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
+		return nfsd_setattr(rqstp, resfhp, iap, 0, (time64_t)0);
 	/* Callers expect file metadata to be committed here */
 	return nfserrno(commit_metadata(resfhp));
 }
@@ -1386,7 +1437,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
 			    && d_inode(dchild)->i_atime.tv_sec == v_atime
 			    && d_inode(dchild)->i_size  == 0 ) {
 				if (created)
-					*created = 1;
+					*created = true;
 				break;
 			}
 			/* fall through */
@@ -1395,7 +1446,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
 			    && d_inode(dchild)->i_atime.tv_sec == v_atime
 			    && d_inode(dchild)->i_size  == 0 ) {
 				if (created)
-					*created = 1;
+					*created = true;
 				goto set_attr;
 			}
 			/* fall through */
@@ -1412,7 +1463,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
 		goto out_nfserr;
 	}
 	if (created)
-		*created = 1;
+		*created = true;
 
 	nfsd_check_ignore_resizing(iap);
 
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index cc110a10..3eb660a 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -34,6 +34,8 @@
 #define NFSD_MAY_CREATE		(NFSD_MAY_EXEC|NFSD_MAY_WRITE)
 #define NFSD_MAY_REMOVE		(NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
 
+struct nfsd_file;
+
 /*
  * Callback function for readdir
  */
@@ -48,15 +50,16 @@ __be32		 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
 				const char *, unsigned int,
 				struct svc_export **, struct dentry **);
 __be32		nfsd_setattr(struct svc_rqst *, struct svc_fh *,
-				struct iattr *, int, time_t);
+				struct iattr *, int, time64_t);
 int nfsd_mountpoint(struct dentry *, struct svc_export *);
 #ifdef CONFIG_NFSD_V4
 __be32          nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
 		    struct xdr_netobj *);
 __be32		nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
 				    struct file *, loff_t, loff_t, int);
-__be32		nfsd4_clone_file_range(struct file *, u64, struct file *,
-				       u64, u64, bool);
+__be32		nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
+				       struct nfsd_file *nf_dst, u64 dst_pos,
+				       u64 count, bool sync);
 #endif /* CONFIG_NFSD_V4 */
 __be32		nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
 				char *name, int len, struct iattr *attrs,
@@ -71,7 +74,7 @@ __be32		do_nfsd_create(struct svc_rqst *, struct svc_fh *,
 				struct svc_fh *res, int createmode,
 				u32 *verifier, bool *truncp, bool *created);
 __be32		nfsd_commit(struct svc_rqst *, struct svc_fh *,
-				loff_t, unsigned long);
+				loff_t, unsigned long, __be32 *verf);
 #endif /* CONFIG_NFSD_V3 */
 int 		nfsd_open_break_lease(struct inode *, int);
 __be32		nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
@@ -91,11 +94,12 @@ __be32 		nfsd_read(struct svc_rqst *, struct svc_fh *,
 				loff_t, struct kvec *, int, unsigned long *,
 				u32 *eof);
 __be32 		nfsd_write(struct svc_rqst *, struct svc_fh *, loff_t,
-				struct kvec *, int, unsigned long *, int);
+				struct kvec *, int, unsigned long *,
+				int stable, __be32 *verf);
 __be32		nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
-				struct file *file, loff_t offset,
+				struct nfsd_file *nf, loff_t offset,
 				struct kvec *vec, int vlen, unsigned long *cnt,
-				int stable);
+				int stable, __be32 *verf);
 __be32		nfsd_readlink(struct svc_rqst *, struct svc_fh *,
 				char *, int *);
 __be32		nfsd_symlink(struct svc_rqst *, struct svc_fh *,
diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index 99ff9f4..4155fd7 100644
--- a/fs/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -14,7 +14,7 @@ struct nfsd3_sattrargs {
 	struct svc_fh		fh;
 	struct iattr		attrs;
 	int			check_guard;
-	time_t			guardtime;
+	time64_t		guardtime;
 };
 
 struct nfsd3_diropargs {
@@ -159,6 +159,7 @@ struct nfsd3_writeres {
 	struct svc_fh		fh;
 	unsigned long		count;
 	int			committed;
+	__be32			verf[2];
 };
 
 struct nfsd3_renameres {
@@ -223,6 +224,7 @@ struct nfsd3_pathconfres {
 struct nfsd3_commitres {
 	__be32			status;
 	struct svc_fh		fh;
+	__be32			verf[2];
 };
 
 struct nfsd3_getaclres {
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index f4737d6..db63d39 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -46,9 +46,9 @@
 #define CURRENT_STATE_ID_FLAG (1<<0)
 #define SAVED_STATE_ID_FLAG (1<<1)
 
-#define SET_STATE_ID(c, f) ((c)->sid_flags |= (f))
-#define HAS_STATE_ID(c, f) ((c)->sid_flags & (f))
-#define CLEAR_STATE_ID(c, f) ((c)->sid_flags &= ~(f))
+#define SET_CSTATE_FLAG(c, f) ((c)->sid_flags |= (f))
+#define HAS_CSTATE_FLAG(c, f) ((c)->sid_flags & (f))
+#define CLEAR_CSTATE_FLAG(c, f) ((c)->sid_flags &= ~(f))
 
 struct nfsd4_compound_state {
 	struct svc_fh		current_fh;
@@ -221,6 +221,7 @@ struct nfsd4_lookup {
 struct nfsd4_putfh {
 	u32		pf_fhlen;           /* request */
 	char		*pf_fhval;          /* request */
+	bool		no_verify;	    /* represents foreigh fh */
 };
 
 struct nfsd4_open {
@@ -518,11 +519,13 @@ struct nfsd42_write_res {
 
 struct nfsd4_copy {
 	/* request */
-	stateid_t	cp_src_stateid;
-	stateid_t	cp_dst_stateid;
-	u64		cp_src_pos;
-	u64		cp_dst_pos;
-	u64		cp_count;
+	stateid_t		cp_src_stateid;
+	stateid_t		cp_dst_stateid;
+	u64			cp_src_pos;
+	u64			cp_dst_pos;
+	u64			cp_count;
+	struct nl4_server	cp_src;
+	bool			cp_intra;
 
 	/* both */
 	bool		cp_synchronous;
@@ -540,13 +543,18 @@ struct nfsd4_copy {
 	struct nfsd_file        *nf_src;
 	struct nfsd_file        *nf_dst;
 
-	stateid_t		cp_stateid;
+	copy_stateid_t		cp_stateid;
 
 	struct list_head	copies;
 	struct task_struct	*copy_task;
 	refcount_t		refcount;
 	bool			stopped;
+
+	struct vfsmount		*ss_mnt;
+	struct nfs_fh		c_fh;
+	nfs4_stateid		stateid;
 };
+extern bool inter_copy_offload_enable;
 
 struct nfsd4_seek {
 	/* request */
@@ -568,6 +576,18 @@ struct nfsd4_offload_status {
 	u32		status;
 };
 
+struct nfsd4_copy_notify {
+	/* request */
+	stateid_t		cpn_src_stateid;
+	struct nl4_server	cpn_dst;
+
+	/* response */
+	stateid_t		cpn_cnr_stateid;
+	u64			cpn_sec;
+	u32			cpn_nsec;
+	struct nl4_server	cpn_src;
+};
+
 struct nfsd4_op {
 	int					opnum;
 	const struct nfsd4_operation *		opdesc;
@@ -627,6 +647,7 @@ struct nfsd4_op {
 		struct nfsd4_clone		clone;
 		struct nfsd4_copy		copy;
 		struct nfsd4_offload_status	offload_status;
+		struct nfsd4_copy_notify	copy_notify;
 		struct nfsd4_seek		seek;
 	} u;
 	struct nfs4_replay *			replay;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index f75767b..b13bfd4 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -55,7 +55,7 @@ static void nsfs_evict(struct inode *inode)
 	ns->ops->put(ns);
 }
 
-static void *__ns_get_path(struct path *path, struct ns_common *ns)
+static int __ns_get_path(struct path *path, struct ns_common *ns)
 {
 	struct vfsmount *mnt = nsfs_mnt;
 	struct dentry *dentry;
@@ -74,13 +74,13 @@ static void *__ns_get_path(struct path *path, struct ns_common *ns)
 got_it:
 	path->mnt = mntget(mnt);
 	path->dentry = dentry;
-	return NULL;
+	return 0;
 slow:
 	rcu_read_unlock();
 	inode = new_inode_pseudo(mnt->mnt_sb);
 	if (!inode) {
 		ns->ops->put(ns);
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 	}
 	inode->i_ino = ns->inum;
 	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
@@ -92,7 +92,7 @@ static void *__ns_get_path(struct path *path, struct ns_common *ns)
 	dentry = d_alloc_anon(mnt->mnt_sb);
 	if (!dentry) {
 		iput(inode);
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 	}
 	d_instantiate(dentry, inode);
 	dentry->d_fsdata = (void *)ns->ops;
@@ -101,23 +101,22 @@ static void *__ns_get_path(struct path *path, struct ns_common *ns)
 		d_delete(dentry);	/* make sure ->d_prune() does nothing */
 		dput(dentry);
 		cpu_relax();
-		return ERR_PTR(-EAGAIN);
+		return -EAGAIN;
 	}
 	goto got_it;
 }
 
-void *ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
+int ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
 		     void *private_data)
 {
-	void *ret;
+	int ret;
 
 	do {
 		struct ns_common *ns = ns_get_cb(private_data);
 		if (!ns)
-			return ERR_PTR(-ENOENT);
-
+			return -ENOENT;
 		ret = __ns_get_path(path, ns);
-	} while (ret == ERR_PTR(-EAGAIN));
+	} while (ret == -EAGAIN);
 
 	return ret;
 }
@@ -134,7 +133,7 @@ static struct ns_common *ns_get_path_task(void *private_data)
 	return args->ns_ops->get(args->task);
 }
 
-void *ns_get_path(struct path *path, struct task_struct *task,
+int ns_get_path(struct path *path, struct task_struct *task,
 		  const struct proc_ns_operations *ns_ops)
 {
 	struct ns_get_path_task_args args = {
@@ -150,7 +149,7 @@ int open_related_ns(struct ns_common *ns,
 {
 	struct path path = {};
 	struct file *f;
-	void *err;
+	int err;
 	int fd;
 
 	fd = get_unused_fd_flags(O_CLOEXEC);
@@ -167,11 +166,11 @@ int open_related_ns(struct ns_common *ns,
 		}
 
 		err = __ns_get_path(&path, relative);
-	} while (err == ERR_PTR(-EAGAIN));
+	} while (err == -EAGAIN);
 
-	if (IS_ERR(err)) {
+	if (err) {
 		put_unused_fd(fd);
-		return PTR_ERR(err);
+		return err;
 	}
 
 	f = dentry_open(&path, O_RDONLY, current_cred());
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 6c73884..d4359a1 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2899,18 +2899,12 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
 			ia_valid |= ATTR_MTIME | ATTR_CTIME;
 		}
 	}
-	if (ia_valid & ATTR_ATIME) {
-		vi->i_atime = timestamp_truncate(attr->ia_atime,
-					       vi);
-	}
-	if (ia_valid & ATTR_MTIME) {
-		vi->i_mtime = timestamp_truncate(attr->ia_mtime,
-					       vi);
-	}
-	if (ia_valid & ATTR_CTIME) {
-		vi->i_ctime = timestamp_truncate(attr->ia_ctime,
-					       vi);
-	}
+	if (ia_valid & ATTR_ATIME)
+		vi->i_atime = attr->ia_atime;
+	if (ia_valid & ATTR_MTIME)
+		vi->i_mtime = attr->ia_mtime;
+	if (ia_valid & ATTR_CTIME)
+		vi->i_ctime = attr->ia_ctime;
 	mark_inode_dirty(vi);
 out:
 	return err;
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 5c424a0..1ef2457 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -73,7 +73,7 @@ static void o2quo_fence_self(void)
 		       "system by restarting ***\n");
 		emergency_restart();
 		break;
-	};
+	}
 }
 
 /* Indicate that a timeout occurred on a heartbeat region write. The
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index 38b2243..5e700b4 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,6 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/..
-
 obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
 
 ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 4de89af..6abaded 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -23,15 +23,15 @@
 #include <linux/spinlock.h>
 
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
 
 #define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
 			   struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index aaf2454..0463dce 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -688,10 +688,6 @@ struct dlm_begin_reco
 	__be32 pad2;
 };
 
-
-#define BITS_PER_BYTE 8
-#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
-
 struct dlm_query_join_request
 {
 	u8 node_idx;
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 965f45d..6051edc 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -23,9 +23,9 @@
 #include <linux/spinlock.h>
 
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
@@ -33,7 +33,7 @@
 #include "dlmconvert.h"
 
 #define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 /* NOTE: __dlmconvert_master is the only function in here that
  * needs a spinlock held on entry (res->spinlock) and it is the
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 4d0b452..c5c6efb 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -17,9 +17,9 @@
 #include <linux/debugfs.h>
 #include <linux/export.h>
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
@@ -27,7 +27,7 @@
 #include "dlmdebug.h"
 
 #define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 static int stringify_lockname(const char *lockname, int locklen, char *buf,
 			      int len);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index ee6f459..357cfc7 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -20,9 +20,9 @@
 #include <linux/debugfs.h>
 #include <linux/sched/signal.h>
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
@@ -30,7 +30,7 @@
 #include "dlmdebug.h"
 
 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 /*
  * ocfs2 node maps are array of long int, which limits to send them freely
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index baff087..83f0760 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -25,9 +25,9 @@
 #include <linux/delay.h>
 
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
@@ -35,7 +35,7 @@
 #include "dlmconvert.h"
 
 #define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 static struct kmem_cache *dlm_lock_cache;
 
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 74b768c..900f7e4 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -25,9 +25,9 @@
 #include <linux/delay.h>
 
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
@@ -35,7 +35,7 @@
 #include "dlmdebug.h"
 
 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
 			      struct dlm_master_list_entry *mle,
@@ -2554,8 +2554,6 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
 	if (!dlm_grab(dlm))
 		return -EINVAL;
 
-	BUG_ON(target == O2NM_MAX_NODES);
-
 	name = res->lockname.name;
 	namelen = res->lockname.len;
 
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 064ce5b..4b566e8 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -26,16 +26,16 @@
 #include <linux/delay.h>
 
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
 #include "dlmdomain.h"
 
 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
 
@@ -1668,7 +1668,7 @@ static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
 			  u8 nodenum, u8 *real_master)
 {
-	int ret = -EINVAL;
+	int ret;
 	struct dlm_master_requery req;
 	int status = DLM_LOCK_RES_OWNER_UNKNOWN;
 
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 61c51c2..fd40c17 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -25,16 +25,16 @@
 #include <linux/delay.h>
 
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
 #include "dlmdomain.h"
 
 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 static int dlm_thread(void *data);
 static void dlm_flush_asts(struct dlm_ctxt *dlm);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 3883633..dcb17ca 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -23,15 +23,15 @@
 #include <linux/spinlock.h>
 #include <linux/delay.h>
 
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
 
 #include "dlmapi.h"
 #include "dlmcommon.h"
 
 #define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 #define DLM_UNLOCK_FREE_LOCK           0x00000001
 #define DLM_UNLOCK_CALL_AST            0x00000002
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
index a9874e4..c7895f6 100644
--- a/fs/ocfs2/dlmfs/Makefile
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -1,6 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/..
-
 obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
 
 ocfs2_dlmfs-objs := userdlm.o dlmfs.o
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 4f1668c..8e4f1ac 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -33,11 +33,11 @@
 
 #include <linux/uaccess.h>
 
-#include "stackglue.h"
+#include "../stackglue.h"
 #include "userdlm.h"
 
 #define MLOG_MASK_PREFIX ML_DLMFS
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 
 static const struct super_operations dlmfs_ops;
diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
index 525b14d..3df5be2 100644
--- a/fs/ocfs2/dlmfs/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -21,12 +21,12 @@
 #include <linux/types.h>
 #include <linux/crc32.h>
 
-#include "ocfs2_lockingver.h"
-#include "stackglue.h"
+#include "../ocfs2_lockingver.h"
+#include "../stackglue.h"
 #include "userdlm.h"
 
 #define MLOG_MASK_PREFIX ML_DLMFS
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
 
 
 static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index cda1027..cb9e6a7 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -570,7 +570,7 @@ void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
 			mlog_bug_on_msg(1, "type: %d\n", type);
 			ops = NULL; /* thanks, gcc */
 			break;
-	};
+	}
 
 	ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
 			      generation, res->l_name);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 9876db5..6cd5e49 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2101,17 +2101,15 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
 static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
 					    struct buffer_head **di_bh,
 					    int meta_level,
-					    int overwrite_io,
 					    int write_sem,
 					    int wait)
 {
 	int ret = 0;
 
 	if (wait)
-		ret = ocfs2_inode_lock(inode, NULL, meta_level);
+		ret = ocfs2_inode_lock(inode, di_bh, meta_level);
 	else
-		ret = ocfs2_try_inode_lock(inode,
-			overwrite_io ? NULL : di_bh, meta_level);
+		ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
 	if (ret < 0)
 		goto out;
 
@@ -2136,6 +2134,7 @@ static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
 
 out_unlock:
 	brelse(*di_bh);
+	*di_bh = NULL;
 	ocfs2_inode_unlock(inode, meta_level);
 out:
 	return ret;
@@ -2177,7 +2176,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
 		ret = ocfs2_inode_lock_for_extent_tree(inode,
 						       &di_bh,
 						       meta_level,
-						       overwrite_io,
 						       write_sem,
 						       wait);
 		if (ret < 0) {
@@ -2233,13 +2231,13 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
 							   &di_bh,
 							   meta_level,
 							   write_sem);
+			meta_level = 1;
+			write_sem = 1;
 			ret = ocfs2_inode_lock_for_extent_tree(inode,
 							       &di_bh,
 							       meta_level,
-							       overwrite_io,
-							       1,
+							       write_sem,
 							       wait);
-			write_sem = 1;
 			if (ret < 0) {
 				if (ret != -EAGAIN)
 					mlog_errno(ret);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 3103ba7..bfe611e 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -597,9 +597,11 @@ static inline void ocfs2_update_inode_fsync_trans(handle_t *handle,
 {
 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 
-	oi->i_sync_tid = handle->h_transaction->t_tid;
-	if (datasync)
-		oi->i_datasync_tid = handle->h_transaction->t_tid;
+	if (!is_handle_aborted(handle)) {
+		oi->i_sync_tid = handle->h_transaction->t_tid;
+		if (datasync)
+			oi->i_datasync_tid = handle->h_transaction->t_tid;
+	}
 }
 
 #endif /* OCFS2_JOURNAL_H */
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 8ea51cf..da65251 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -586,8 +586,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
 			mlog_errno(status);
 	}
 
-	oi->i_sync_tid = handle->h_transaction->t_tid;
-	oi->i_datasync_tid = handle->h_transaction->t_tid;
+	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 
 leave:
 	if (status < 0) {
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 4180c3e..939df99 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -696,7 +696,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
 
 	bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
 					       ac, cl);
-	if (IS_ERR(bg_bh) && (PTR_ERR(bg_bh) == -ENOSPC))
+	if (PTR_ERR(bg_bh) == -ENOSPC)
 		bg_bh = ocfs2_block_group_alloc_discontig(handle,
 							  alloc_inode,
 							  ac, cl);
diff --git a/fs/open.c b/fs/open.c
index b62f5c0..0788b37 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -955,24 +955,73 @@ struct file *open_with_fake_path(const struct path *path, int flags,
 }
 EXPORT_SYMBOL(open_with_fake_path);
 
-static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
+#define WILL_CREATE(flags)	(flags & (O_CREAT | __O_TMPFILE))
+#define O_PATH_FLAGS		(O_DIRECTORY | O_NOFOLLOW | O_PATH | O_CLOEXEC)
+
+inline struct open_how build_open_how(int flags, umode_t mode)
 {
+	struct open_how how = {
+		.flags = flags & VALID_OPEN_FLAGS,
+		.mode = mode & S_IALLUGO,
+	};
+
+	/* O_PATH beats everything else. */
+	if (how.flags & O_PATH)
+		how.flags &= O_PATH_FLAGS;
+	/* Modes should only be set for create-like flags. */
+	if (!WILL_CREATE(how.flags))
+		how.mode = 0;
+	return how;
+}
+
+inline int build_open_flags(const struct open_how *how, struct open_flags *op)
+{
+	int flags = how->flags;
 	int lookup_flags = 0;
 	int acc_mode = ACC_MODE(flags);
 
-	/*
-	 * Clear out all open flags we don't know about so that we don't report
-	 * them in fcntl(F_GETFD) or similar interfaces.
-	 */
-	flags &= VALID_OPEN_FLAGS;
-
-	if (flags & (O_CREAT | __O_TMPFILE))
-		op->mode = (mode & S_IALLUGO) | S_IFREG;
-	else
-		op->mode = 0;
-
 	/* Must never be set by userspace */
-	flags &= ~FMODE_NONOTIFY & ~O_CLOEXEC;
+	flags &= ~(FMODE_NONOTIFY | O_CLOEXEC);
+
+	/*
+	 * Older syscalls implicitly clear all of the invalid flags or argument
+	 * values before calling build_open_flags(), but openat2(2) checks all
+	 * of its arguments.
+	 */
+	if (flags & ~VALID_OPEN_FLAGS)
+		return -EINVAL;
+	if (how->resolve & ~VALID_RESOLVE_FLAGS)
+		return -EINVAL;
+
+	/* Deal with the mode. */
+	if (WILL_CREATE(flags)) {
+		if (how->mode & ~S_IALLUGO)
+			return -EINVAL;
+		op->mode = how->mode | S_IFREG;
+	} else {
+		if (how->mode != 0)
+			return -EINVAL;
+		op->mode = 0;
+	}
+
+	/*
+	 * In order to ensure programs get explicit errors when trying to use
+	 * O_TMPFILE on old kernels, O_TMPFILE is implemented such that it
+	 * looks like (O_DIRECTORY|O_RDWR & ~O_CREAT) to old kernels. But we
+	 * have to require userspace to explicitly set it.
+	 */
+	if (flags & __O_TMPFILE) {
+		if ((flags & O_TMPFILE_MASK) != O_TMPFILE)
+			return -EINVAL;
+		if (!(acc_mode & MAY_WRITE))
+			return -EINVAL;
+	}
+	if (flags & O_PATH) {
+		/* O_PATH only permits certain other flags to be set. */
+		if (flags & ~O_PATH_FLAGS)
+			return -EINVAL;
+		acc_mode = 0;
+	}
 
 	/*
 	 * O_SYNC is implemented as __O_SYNC|O_DSYNC.  As many places only
@@ -983,20 +1032,6 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
 	if (flags & __O_SYNC)
 		flags |= O_DSYNC;
 
-	if (flags & __O_TMPFILE) {
-		if ((flags & O_TMPFILE_MASK) != O_TMPFILE)
-			return -EINVAL;
-		if (!(acc_mode & MAY_WRITE))
-			return -EINVAL;
-	} else if (flags & O_PATH) {
-		/*
-		 * If we have O_PATH in the open flag. Then we
-		 * cannot have anything other than the below set of flags
-		 */
-		flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH;
-		acc_mode = 0;
-	}
-
 	op->open_flag = flags;
 
 	/* O_TRUNC implies we need access checks for write permissions */
@@ -1022,6 +1057,18 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
 		lookup_flags |= LOOKUP_DIRECTORY;
 	if (!(flags & O_NOFOLLOW))
 		lookup_flags |= LOOKUP_FOLLOW;
+
+	if (how->resolve & RESOLVE_NO_XDEV)
+		lookup_flags |= LOOKUP_NO_XDEV;
+	if (how->resolve & RESOLVE_NO_MAGICLINKS)
+		lookup_flags |= LOOKUP_NO_MAGICLINKS;
+	if (how->resolve & RESOLVE_NO_SYMLINKS)
+		lookup_flags |= LOOKUP_NO_SYMLINKS;
+	if (how->resolve & RESOLVE_BENEATH)
+		lookup_flags |= LOOKUP_BENEATH;
+	if (how->resolve & RESOLVE_IN_ROOT)
+		lookup_flags |= LOOKUP_IN_ROOT;
+
 	op->lookup_flags = lookup_flags;
 	return 0;
 }
@@ -1040,8 +1087,11 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
 struct file *file_open_name(struct filename *name, int flags, umode_t mode)
 {
 	struct open_flags op;
-	int err = build_open_flags(flags, mode, &op);
-	return err ? ERR_PTR(err) : do_filp_open(AT_FDCWD, name, &op);
+	struct open_how how = build_open_how(flags, mode);
+	int err = build_open_flags(&how, &op);
+	if (err)
+		return ERR_PTR(err);
+	return do_filp_open(AT_FDCWD, name, &op);
 }
 
 /**
@@ -1072,17 +1122,19 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
 			    const char *filename, int flags, umode_t mode)
 {
 	struct open_flags op;
-	int err = build_open_flags(flags, mode, &op);
+	struct open_how how = build_open_how(flags, mode);
+	int err = build_open_flags(&how, &op);
 	if (err)
 		return ERR_PTR(err);
 	return do_file_open_root(dentry, mnt, filename, &op);
 }
 EXPORT_SYMBOL(file_open_root);
 
-long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+static long do_sys_openat2(int dfd, const char __user *filename,
+			   struct open_how *how)
 {
 	struct open_flags op;
-	int fd = build_open_flags(flags, mode, &op);
+	int fd = build_open_flags(how, &op);
 	struct filename *tmp;
 
 	if (fd)
@@ -1092,7 +1144,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
 	if (IS_ERR(tmp))
 		return PTR_ERR(tmp);
 
-	fd = get_unused_fd_flags(flags);
+	fd = get_unused_fd_flags(how->flags);
 	if (fd >= 0) {
 		struct file *f = do_filp_open(dfd, tmp, &op);
 		if (IS_ERR(f)) {
@@ -1107,12 +1159,16 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
 	return fd;
 }
 
+long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+{
+	struct open_how how = build_open_how(flags, mode);
+	return do_sys_openat2(dfd, filename, &how);
+}
+
+
 SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
 {
-	if (force_o_largefile())
-		flags |= O_LARGEFILE;
-
-	return do_sys_open(AT_FDCWD, filename, flags, mode);
+	return ksys_open(filename, flags, mode);
 }
 
 SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
@@ -1120,10 +1176,32 @@ SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
 {
 	if (force_o_largefile())
 		flags |= O_LARGEFILE;
-
 	return do_sys_open(dfd, filename, flags, mode);
 }
 
+SYSCALL_DEFINE4(openat2, int, dfd, const char __user *, filename,
+		struct open_how __user *, how, size_t, usize)
+{
+	int err;
+	struct open_how tmp;
+
+	BUILD_BUG_ON(sizeof(struct open_how) < OPEN_HOW_SIZE_VER0);
+	BUILD_BUG_ON(sizeof(struct open_how) != OPEN_HOW_SIZE_LATEST);
+
+	if (unlikely(usize < OPEN_HOW_SIZE_VER0))
+		return -EINVAL;
+
+	err = copy_struct_from_user(&tmp, sizeof(tmp), how, usize);
+	if (err)
+		return err;
+
+	/* O_LARGEFILE is only allowed for non-O_PATH. */
+	if (!(tmp.flags & O_PATH) && force_o_largefile())
+		tmp.flags |= O_LARGEFILE;
+
+	return do_sys_openat2(dfd, filename, &tmp);
+}
+
 #ifdef CONFIG_COMPAT
 /*
  * Exactly like sys_open(), except that it doesn't set the
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 25543a9..29eaa45 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -273,6 +273,7 @@ static void *help_start(struct seq_file *m, loff_t *pos)
 
 static void *help_next(struct seq_file *m, void *v, loff_t *pos)
 {
+	(*pos)++;
 	gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n");
 
 	return NULL;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 6220642..9fc47c2 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -24,7 +24,7 @@
 
 static int ovl_ccup_set(const char *buf, const struct kernel_param *param)
 {
-	pr_warn("overlayfs: \"check_copy_up\" module option is obsolete\n");
+	pr_warn("\"check_copy_up\" module option is obsolete\n");
 	return 0;
 }
 
@@ -123,6 +123,9 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
 	loff_t old_pos = 0;
 	loff_t new_pos = 0;
 	loff_t cloned;
+	loff_t data_pos = -1;
+	loff_t hole_len;
+	bool skip_hole = false;
 	int error = 0;
 
 	if (len == 0)
@@ -144,7 +147,11 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
 		goto out;
 	/* Couldn't clone, so now we try to copy the data */
 
-	/* FIXME: copy up sparse files efficiently */
+	/* Check if lower fs supports seek operation */
+	if (old_file->f_mode & FMODE_LSEEK &&
+	    old_file->f_op->llseek)
+		skip_hole = true;
+
 	while (len) {
 		size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
 		long bytes;
@@ -157,6 +164,36 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
 			break;
 		}
 
+		/*
+		 * Fill zero for hole will cost unnecessary disk space
+		 * and meanwhile slow down the copy-up speed, so we do
+		 * an optimization for hole during copy-up, it relies
+		 * on SEEK_DATA implementation in lower fs so if lower
+		 * fs does not support it, copy-up will behave as before.
+		 *
+		 * Detail logic of hole detection as below:
+		 * When we detect next data position is larger than current
+		 * position we will skip that hole, otherwise we copy
+		 * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually,
+		 * it may not recognize all kind of holes and sometimes
+		 * only skips partial of hole area. However, it will be
+		 * enough for most of the use cases.
+		 */
+
+		if (skip_hole && data_pos < old_pos) {
+			data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
+			if (data_pos > old_pos) {
+				hole_len = data_pos - old_pos;
+				len -= hole_len;
+				old_pos = new_pos = data_pos;
+				continue;
+			} else if (data_pos == -ENXIO) {
+				break;
+			} else if (data_pos < 0) {
+				skip_hole = false;
+			}
+		}
+
 		bytes = do_splice_direct(old_file, &old_pos,
 					 new_file, &new_pos,
 					 this_len, SPLICE_F_MOVE);
@@ -480,7 +517,7 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
 	}
 
 	inode_lock(temp->d_inode);
-	if (c->metacopy)
+	if (S_ISREG(c->stat.mode))
 		err = ovl_set_size(temp, &c->stat);
 	if (!err)
 		err = ovl_set_attr(temp, &c->stat);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 29abdb1..8e57d53 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -35,7 +35,7 @@ int ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
 	dput(wdentry);
 
 	if (err) {
-		pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n",
+		pr_err("cleanup of '%pd2' failed (%i)\n",
 		       wdentry, err);
 	}
 
@@ -53,7 +53,7 @@ static struct dentry *ovl_lookup_temp(struct dentry *workdir)
 
 	temp = lookup_one_len(name, workdir, strlen(name));
 	if (!IS_ERR(temp) && temp->d_inode) {
-		pr_err("overlayfs: workdir/%s already exists\n", name);
+		pr_err("workdir/%s already exists\n", name);
 		dput(temp);
 		temp = ERR_PTR(-EIO);
 	}
@@ -134,7 +134,7 @@ static int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry,
 	d = lookup_one_len(dentry->d_name.name, dentry->d_parent,
 			   dentry->d_name.len);
 	if (IS_ERR(d)) {
-		pr_warn("overlayfs: failed lookup after mkdir (%pd2, err=%i).\n",
+		pr_warn("failed lookup after mkdir (%pd2, err=%i).\n",
 			dentry, err);
 		return PTR_ERR(d);
 	}
@@ -267,7 +267,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
 
 	d_instantiate(dentry, inode);
 	if (inode != oip.newinode) {
-		pr_warn_ratelimited("overlayfs: newly created inode found in cache (%pd2)\n",
+		pr_warn_ratelimited("newly created inode found in cache (%pd2)\n",
 				    dentry);
 	}
 
@@ -1009,7 +1009,7 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
 		spin_unlock(&dentry->d_lock);
 	} else {
 		kfree(redirect);
-		pr_warn_ratelimited("overlayfs: failed to set redirect (%i)\n",
+		pr_warn_ratelimited("failed to set redirect (%i)\n",
 				    err);
 		/* Fall back to userspace copy-up */
 		err = -EXDEV;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 70e5558..6f54d70 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -30,7 +30,7 @@ static int ovl_encode_maybe_copy_up(struct dentry *dentry)
 	}
 
 	if (err) {
-		pr_warn_ratelimited("overlayfs: failed to copy up on encode (%pd2, err=%i)\n",
+		pr_warn_ratelimited("failed to copy up on encode (%pd2, err=%i)\n",
 				    dentry, err);
 	}
 
@@ -244,7 +244,7 @@ static int ovl_dentry_to_fid(struct dentry *dentry, u32 *fid, int buflen)
 	return err;
 
 fail:
-	pr_warn_ratelimited("overlayfs: failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
+	pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
 			    dentry, err, buflen, fh ? (int)fh->fb.len : 0,
 			    fh ? fh->fb.type : 0);
 	goto out;
@@ -358,7 +358,7 @@ static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx)
  */
 static struct dentry *ovl_lookup_real_one(struct dentry *connected,
 					  struct dentry *real,
-					  struct ovl_layer *layer)
+					  const struct ovl_layer *layer)
 {
 	struct inode *dir = d_inode(connected);
 	struct dentry *this, *parent = NULL;
@@ -406,7 +406,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
 	return this;
 
 fail:
-	pr_warn_ratelimited("overlayfs: failed to lookup one by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
+	pr_warn_ratelimited("failed to lookup one by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
 			    real, layer->idx, connected, err);
 	this = ERR_PTR(err);
 	goto out;
@@ -414,17 +414,16 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
 
 static struct dentry *ovl_lookup_real(struct super_block *sb,
 				      struct dentry *real,
-				      struct ovl_layer *layer);
+				      const struct ovl_layer *layer);
 
 /*
  * Lookup an indexed or hashed overlay dentry by real inode.
  */
 static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
 					    struct dentry *real,
-					    struct ovl_layer *layer)
+					    const struct ovl_layer *layer)
 {
 	struct ovl_fs *ofs = sb->s_fs_info;
-	struct ovl_layer upper_layer = { .mnt = ofs->upper_mnt };
 	struct dentry *index = NULL;
 	struct dentry *this = NULL;
 	struct inode *inode;
@@ -466,7 +465,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
 		 * recursive call walks back from indexed upper to the topmost
 		 * connected/hashed upper parent (or up to root).
 		 */
-		this = ovl_lookup_real(sb, upper, &upper_layer);
+		this = ovl_lookup_real(sb, upper, &ofs->layers[0]);
 		dput(upper);
 	}
 
@@ -487,7 +486,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
  */
 static struct dentry *ovl_lookup_real_ancestor(struct super_block *sb,
 					       struct dentry *real,
-					       struct ovl_layer *layer)
+					       const struct ovl_layer *layer)
 {
 	struct dentry *next, *parent = NULL;
 	struct dentry *ancestor = ERR_PTR(-EIO);
@@ -540,7 +539,7 @@ static struct dentry *ovl_lookup_real_ancestor(struct super_block *sb,
  */
 static struct dentry *ovl_lookup_real(struct super_block *sb,
 				      struct dentry *real,
-				      struct ovl_layer *layer)
+				      const struct ovl_layer *layer)
 {
 	struct dentry *connected;
 	int err = 0;
@@ -631,7 +630,7 @@ static struct dentry *ovl_lookup_real(struct super_block *sb,
 	return connected;
 
 fail:
-	pr_warn_ratelimited("overlayfs: failed to lookup by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
+	pr_warn_ratelimited("failed to lookup by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
 			    real, layer->idx, connected, err);
 	dput(connected);
 	return ERR_PTR(err);
@@ -646,8 +645,7 @@ static struct dentry *ovl_get_dentry(struct super_block *sb,
 				     struct dentry *index)
 {
 	struct ovl_fs *ofs = sb->s_fs_info;
-	struct ovl_layer upper_layer = { .mnt = ofs->upper_mnt };
-	struct ovl_layer *layer = upper ? &upper_layer : lowerpath->layer;
+	const struct ovl_layer *layer = upper ? &ofs->layers[0] : lowerpath->layer;
 	struct dentry *real = upper ?: (index ?: lowerpath->dentry);
 
 	/*
@@ -822,7 +820,7 @@ static struct dentry *ovl_fh_to_dentry(struct super_block *sb, struct fid *fid,
 	return dentry;
 
 out_err:
-	pr_warn_ratelimited("overlayfs: failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n",
+	pr_warn_ratelimited("failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n",
 			    fh_len, fh_type, flags, err);
 	dentry = ERR_PTR(err);
 	goto out;
@@ -831,7 +829,7 @@ static struct dentry *ovl_fh_to_dentry(struct super_block *sb, struct fid *fid,
 static struct dentry *ovl_fh_to_parent(struct super_block *sb, struct fid *fid,
 				       int fh_len, int fh_type)
 {
-	pr_warn_ratelimited("overlayfs: connectable file handles not supported; use 'no_subtree_check' exportfs option.\n");
+	pr_warn_ratelimited("connectable file handles not supported; use 'no_subtree_check' exportfs option.\n");
 	return ERR_PTR(-EACCES);
 }
 
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index e235a63..a531721 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -9,8 +9,19 @@
 #include <linux/xattr.h>
 #include <linux/uio.h>
 #include <linux/uaccess.h>
+#include <linux/splice.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
 #include "overlayfs.h"
 
+struct ovl_aio_req {
+	struct kiocb iocb;
+	struct kiocb *orig_iocb;
+	struct fd fd;
+};
+
+static struct kmem_cache *ovl_aio_request_cachep;
+
 static char ovl_whatisit(struct inode *inode, struct inode *realinode)
 {
 	if (realinode != ovl_inode_upper(inode))
@@ -146,7 +157,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
 	struct inode *inode = file_inode(file);
 	struct fd real;
 	const struct cred *old_cred;
-	ssize_t ret;
+	loff_t ret;
 
 	/*
 	 * The two special cases below do not need to involve real fs,
@@ -171,7 +182,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
 	 * limitations that are more strict than ->s_maxbytes for specific
 	 * files, so we use the real file to perform seeks.
 	 */
-	inode_lock(inode);
+	ovl_inode_lock(inode);
 	real.file->f_pos = file->f_pos;
 
 	old_cred = ovl_override_creds(inode->i_sb);
@@ -179,7 +190,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
 	revert_creds(old_cred);
 
 	file->f_pos = real.file->f_pos;
-	inode_unlock(inode);
+	ovl_inode_unlock(inode);
 
 	fdput(real);
 
@@ -225,6 +236,33 @@ static rwf_t ovl_iocb_to_rwf(struct kiocb *iocb)
 	return flags;
 }
 
+static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
+{
+	struct kiocb *iocb = &aio_req->iocb;
+	struct kiocb *orig_iocb = aio_req->orig_iocb;
+
+	if (iocb->ki_flags & IOCB_WRITE) {
+		struct inode *inode = file_inode(orig_iocb->ki_filp);
+
+		file_end_write(iocb->ki_filp);
+		ovl_copyattr(ovl_inode_real(inode), inode);
+	}
+
+	orig_iocb->ki_pos = iocb->ki_pos;
+	fdput(aio_req->fd);
+	kmem_cache_free(ovl_aio_request_cachep, aio_req);
+}
+
+static void ovl_aio_rw_complete(struct kiocb *iocb, long res, long res2)
+{
+	struct ovl_aio_req *aio_req = container_of(iocb,
+						   struct ovl_aio_req, iocb);
+	struct kiocb *orig_iocb = aio_req->orig_iocb;
+
+	ovl_aio_cleanup_handler(aio_req);
+	orig_iocb->ki_complete(orig_iocb, res, res2);
+}
+
 static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
@@ -240,10 +278,28 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 		return ret;
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
-	ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
-			    ovl_iocb_to_rwf(iocb));
-	revert_creds(old_cred);
+	if (is_sync_kiocb(iocb)) {
+		ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
+				    ovl_iocb_to_rwf(iocb));
+	} else {
+		struct ovl_aio_req *aio_req;
 
+		ret = -ENOMEM;
+		aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
+		if (!aio_req)
+			goto out;
+
+		aio_req->fd = real;
+		real.flags = 0;
+		aio_req->orig_iocb = iocb;
+		kiocb_clone(&aio_req->iocb, iocb, real.file);
+		aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+		ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
+		if (ret != -EIOCBQUEUED)
+			ovl_aio_cleanup_handler(aio_req);
+	}
+out:
+	revert_creds(old_cred);
 	ovl_file_accessed(file);
 
 	fdput(real);
@@ -274,15 +330,33 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
 		goto out_unlock;
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
-	file_start_write(real.file);
-	ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
-			     ovl_iocb_to_rwf(iocb));
-	file_end_write(real.file);
+	if (is_sync_kiocb(iocb)) {
+		file_start_write(real.file);
+		ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
+				     ovl_iocb_to_rwf(iocb));
+		file_end_write(real.file);
+		/* Update size */
+		ovl_copyattr(ovl_inode_real(inode), inode);
+	} else {
+		struct ovl_aio_req *aio_req;
+
+		ret = -ENOMEM;
+		aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
+		if (!aio_req)
+			goto out;
+
+		file_start_write(real.file);
+		aio_req->fd = real;
+		real.flags = 0;
+		aio_req->orig_iocb = iocb;
+		kiocb_clone(&aio_req->iocb, iocb, real.file);
+		aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+		ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
+		if (ret != -EIOCBQUEUED)
+			ovl_aio_cleanup_handler(aio_req);
+	}
+out:
 	revert_creds(old_cred);
-
-	/* Update size */
-	ovl_copyattr(ovl_inode_real(inode), inode);
-
 	fdput(real);
 
 out_unlock:
@@ -291,6 +365,48 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
 	return ret;
 }
 
+static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
+			 struct pipe_inode_info *pipe, size_t len,
+			 unsigned int flags)
+{
+	ssize_t ret;
+	struct fd real;
+	const struct cred *old_cred;
+
+	ret = ovl_real_fdget(in, &real);
+	if (ret)
+		return ret;
+
+	old_cred = ovl_override_creds(file_inode(in)->i_sb);
+	ret = generic_file_splice_read(real.file, ppos, pipe, len, flags);
+	revert_creds(old_cred);
+
+	ovl_file_accessed(in);
+	fdput(real);
+	return ret;
+}
+
+static ssize_t
+ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
+			  loff_t *ppos, size_t len, unsigned int flags)
+{
+	struct fd real;
+	const struct cred *old_cred;
+	ssize_t ret;
+
+	ret = ovl_real_fdget(out, &real);
+	if (ret)
+		return ret;
+
+	old_cred = ovl_override_creds(file_inode(out)->i_sb);
+	ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
+	revert_creds(old_cred);
+
+	ovl_file_accessed(out);
+	fdput(real);
+	return ret;
+}
+
 static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
 	struct fd real;
@@ -647,7 +763,25 @@ const struct file_operations ovl_file_operations = {
 	.fadvise	= ovl_fadvise,
 	.unlocked_ioctl	= ovl_ioctl,
 	.compat_ioctl	= ovl_compat_ioctl,
+	.splice_read    = ovl_splice_read,
+	.splice_write   = ovl_splice_write,
 
 	.copy_file_range	= ovl_copy_file_range,
 	.remap_file_range	= ovl_remap_file_range,
 };
+
+int __init ovl_aio_request_cache_init(void)
+{
+	ovl_aio_request_cachep = kmem_cache_create("ovl_aio_req",
+						   sizeof(struct ovl_aio_req),
+						   0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!ovl_aio_request_cachep)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void ovl_aio_request_cache_destroy(void)
+{
+	kmem_cache_destroy(ovl_aio_request_cachep);
+}
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b045cf1..79e8994 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -75,10 +75,9 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
 	return err;
 }
 
-static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
-			   struct ovl_layer *lower_layer)
+static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
 {
-	bool samefs = ovl_same_sb(dentry->d_sb);
+	bool samefs = ovl_same_fs(dentry->d_sb);
 	unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
 
 	if (samefs) {
@@ -100,12 +99,10 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
 		 * persistent for a given layer configuration.
 		 */
 		if (stat->ino >> shift) {
-			pr_warn_ratelimited("overlayfs: inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
+			pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
 					    dentry, stat->ino, xinobits);
 		} else {
-			if (lower_layer)
-				stat->ino |= ((u64)lower_layer->fsid) << shift;
-
+			stat->ino |= ((u64)fsid) << shift;
 			stat->dev = dentry->d_sb->s_dev;
 			return 0;
 		}
@@ -124,15 +121,14 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
 		 */
 		stat->dev = dentry->d_sb->s_dev;
 		stat->ino = dentry->d_inode->i_ino;
-	} else if (lower_layer && lower_layer->fsid) {
+	} else {
 		/*
 		 * For non-samefs setup, if we cannot map all layers st_ino
 		 * to a unified address space, we need to make sure that st_dev
-		 * is unique per lower fs. Upper layer uses real st_dev and
-		 * lower layers use the unique anonymous bdev assigned to the
-		 * lower fs.
+		 * is unique per underlying fs, so we use the unique anonymous
+		 * bdev assigned to the underlying fs.
 		 */
-		stat->dev = lower_layer->fs->pseudo_dev;
+		stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
 	}
 
 	return 0;
@@ -146,8 +142,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
 	struct path realpath;
 	const struct cred *old_cred;
 	bool is_dir = S_ISDIR(dentry->d_inode->i_mode);
-	bool samefs = ovl_same_sb(dentry->d_sb);
-	struct ovl_layer *lower_layer = NULL;
+	int fsid = 0;
 	int err;
 	bool metacopy_blocks = false;
 
@@ -168,9 +163,9 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
 	 * If lower filesystem supports NFS file handles, this also guaranties
 	 * persistent st_ino across mount cycle.
 	 */
-	if (!is_dir || samefs || ovl_xino_bits(dentry->d_sb)) {
+	if (!is_dir || ovl_same_dev(dentry->d_sb)) {
 		if (!OVL_TYPE_UPPER(type)) {
-			lower_layer = ovl_layer_lower(dentry);
+			fsid = ovl_layer_lower(dentry)->fsid;
 		} else if (OVL_TYPE_ORIGIN(type)) {
 			struct kstat lowerstat;
 			u32 lowermask = STATX_INO | STATX_BLOCKS |
@@ -200,14 +195,8 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
 			if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
 			    (!ovl_verify_lower(dentry->d_sb) &&
 			     (is_dir || lowerstat.nlink == 1))) {
-				lower_layer = ovl_layer_lower(dentry);
-				/*
-				 * Cannot use origin st_dev;st_ino because
-				 * origin inode content may differ from overlay
-				 * inode content.
-				 */
-				if (samefs || lower_layer->fsid)
-					stat->ino = lowerstat.ino;
+				fsid = ovl_layer_lower(dentry)->fsid;
+				stat->ino = lowerstat.ino;
 			}
 
 			/*
@@ -241,7 +230,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
 		}
 	}
 
-	err = ovl_map_dev_ino(dentry, stat, lower_layer);
+	err = ovl_map_dev_ino(dentry, stat, fsid);
 	if (err)
 		goto out;
 
@@ -527,6 +516,27 @@ static const struct address_space_operations ovl_aops = {
  * [...] &ovl_i_mutex_dir_key[depth]   (stack_depth=2)
  * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
  * [...] &type->i_mutex_dir_key        (stack_depth=0)
+ *
+ * Locking order w.r.t ovl_want_write() is important for nested overlayfs.
+ *
+ * This chain is valid:
+ * - inode->i_rwsem			(inode_lock[2])
+ * - upper_mnt->mnt_sb->s_writers	(ovl_want_write[0])
+ * - OVL_I(inode)->lock			(ovl_inode_lock[2])
+ * - OVL_I(lowerinode)->lock		(ovl_inode_lock[1])
+ *
+ * And this chain is valid:
+ * - inode->i_rwsem			(inode_lock[2])
+ * - OVL_I(inode)->lock			(ovl_inode_lock[2])
+ * - lowerinode->i_rwsem		(inode_lock[1])
+ * - OVL_I(lowerinode)->lock		(ovl_inode_lock[1])
+ *
+ * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is
+ * held, because it is in reverse order of the non-nested case using the same
+ * upper fs:
+ * - inode->i_rwsem			(inode_lock[1])
+ * - upper_mnt->mnt_sb->s_writers	(ovl_want_write[0])
+ * - OVL_I(inode)->lock			(ovl_inode_lock[1])
  */
 #define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
 
@@ -565,7 +575,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
 	 * ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
 	 * upper inode i_ino on ovl_inode_init() or ovl_inode_update().
 	 */
-	if (ovl_same_sb(inode->i_sb) || xinobits) {
+	if (ovl_same_dev(inode->i_sb)) {
 		inode->i_ino = ino;
 		if (xinobits && fsid && !(ino >> (64 - xinobits)))
 			inode->i_ino |= (unsigned long)fsid << (64 - xinobits);
@@ -698,7 +708,7 @@ unsigned int ovl_get_nlink(struct dentry *lowerdentry,
 	return nlink;
 
 fail:
-	pr_warn_ratelimited("overlayfs: failed to get index nlink (%pd2, err=%i)\n",
+	pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n",
 			    upperdentry, err);
 	return fallback;
 }
@@ -969,7 +979,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
 	return inode;
 
 out_err:
-	pr_warn_ratelimited("overlayfs: failed to get inode (%i)\n", err);
+	pr_warn_ratelimited("failed to get inode (%i)\n", err);
 	inode = ERR_PTR(err);
 	goto out;
 }
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 76ff663..ed9e129 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -141,10 +141,10 @@ static struct ovl_fh *ovl_get_fh(struct dentry *dentry, const char *name)
 	return NULL;
 
 fail:
-	pr_warn_ratelimited("overlayfs: failed to get origin (%i)\n", res);
+	pr_warn_ratelimited("failed to get origin (%i)\n", res);
 	goto out;
 invalid:
-	pr_warn_ratelimited("overlayfs: invalid origin (%*phN)\n", res, fh);
+	pr_warn_ratelimited("invalid origin (%*phN)\n", res, fh);
 	goto out;
 }
 
@@ -322,16 +322,16 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
 	struct dentry *origin = NULL;
 	int i;
 
-	for (i = 0; i < ofs->numlower; i++) {
+	for (i = 1; i < ofs->numlayer; i++) {
 		/*
 		 * If lower fs uuid is not unique among lower fs we cannot match
 		 * fh->uuid to layer.
 		 */
-		if (ofs->lower_layers[i].fsid &&
-		    ofs->lower_layers[i].fs->bad_uuid)
+		if (ofs->layers[i].fsid &&
+		    ofs->layers[i].fs->bad_uuid)
 			continue;
 
-		origin = ovl_decode_real_fh(fh, ofs->lower_layers[i].mnt,
+		origin = ovl_decode_real_fh(fh, ofs->layers[i].mnt,
 					    connected);
 		if (origin)
 			break;
@@ -354,13 +354,13 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
 	}
 	**stackp = (struct ovl_path){
 		.dentry = origin,
-		.layer = &ofs->lower_layers[i]
+		.layer = &ofs->layers[i]
 	};
 
 	return 0;
 
 invalid:
-	pr_warn_ratelimited("overlayfs: invalid origin (%pd2, ftype=%x, origin ftype=%x).\n",
+	pr_warn_ratelimited("invalid origin (%pd2, ftype=%x, origin ftype=%x).\n",
 			    upperdentry, d_inode(upperdentry)->i_mode & S_IFMT,
 			    d_inode(origin)->i_mode & S_IFMT);
 	dput(origin);
@@ -449,7 +449,7 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name,
 
 fail:
 	inode = d_inode(real);
-	pr_warn_ratelimited("overlayfs: failed to verify %s (%pd2, ino=%lu, err=%i)\n",
+	pr_warn_ratelimited("failed to verify %s (%pd2, ino=%lu, err=%i)\n",
 			    is_upper ? "upper" : "origin", real,
 			    inode ? inode->i_ino : 0, err);
 	goto out;
@@ -475,7 +475,7 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index)
 		return upper ?: ERR_PTR(-ESTALE);
 
 	if (!d_is_dir(upper)) {
-		pr_warn_ratelimited("overlayfs: invalid index upper (%pd2, upper=%pd2).\n",
+		pr_warn_ratelimited("invalid index upper (%pd2, upper=%pd2).\n",
 				    index, upper);
 		dput(upper);
 		return ERR_PTR(-EIO);
@@ -589,12 +589,12 @@ int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index)
 	return err;
 
 fail:
-	pr_warn_ratelimited("overlayfs: failed to verify index (%pd2, ftype=%x, err=%i)\n",
+	pr_warn_ratelimited("failed to verify index (%pd2, ftype=%x, err=%i)\n",
 			    index, d_inode(index)->i_mode & S_IFMT, err);
 	goto out;
 
 orphan:
-	pr_warn_ratelimited("overlayfs: orphan index entry (%pd2, ftype=%x, nlink=%u)\n",
+	pr_warn_ratelimited("orphan index entry (%pd2, ftype=%x, nlink=%u)\n",
 			    index, d_inode(index)->i_mode & S_IFMT,
 			    d_inode(index)->i_nlink);
 	err = -ENOENT;
@@ -696,7 +696,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
 			index = NULL;
 			goto out;
 		}
-		pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
+		pr_warn_ratelimited("failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
 				    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
 				    d_inode(origin)->i_ino, name.len, name.name,
 				    err);
@@ -723,13 +723,13 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
 		 * unlinked, which means that finding a lower origin on lookup
 		 * whose index is a whiteout should be treated as an error.
 		 */
-		pr_warn_ratelimited("overlayfs: bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n",
+		pr_warn_ratelimited("bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n",
 				    index, d_inode(index)->i_mode & S_IFMT,
 				    d_inode(origin)->i_mode & S_IFMT);
 		goto fail;
 	} else if (is_dir && verify) {
 		if (!upper) {
-			pr_warn_ratelimited("overlayfs: suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n",
+			pr_warn_ratelimited("suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n",
 					    origin, index);
 			goto fail;
 		}
@@ -738,7 +738,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
 		err = ovl_verify_upper(index, upper, false);
 		if (err) {
 			if (err == -ESTALE) {
-				pr_warn_ratelimited("overlayfs: suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n",
+				pr_warn_ratelimited("suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n",
 						    upper, origin, index);
 			}
 			goto fail;
@@ -885,7 +885,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
 	if (!d.stop && poe->numlower) {
 		err = -ENOMEM;
-		stack = kcalloc(ofs->numlower, sizeof(struct ovl_path),
+		stack = kcalloc(ofs->numlayer - 1, sizeof(struct ovl_path),
 				GFP_KERNEL);
 		if (!stack)
 			goto out_put_upper;
@@ -967,7 +967,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 		 */
 		err = -EPERM;
 		if (d.redirect && !ofs->config.redirect_follow) {
-			pr_warn_ratelimited("overlayfs: refusing to follow redirect for (%pd2)\n",
+			pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n",
 					    dentry);
 			goto out_put;
 		}
@@ -994,7 +994,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
 		err = -EPERM;
 		if (!ofs->config.metacopy) {
-			pr_warn_ratelimited("overlay: refusing to follow metacopy origin for (%pd2)\n",
+			pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n",
 					    dentry);
 			goto out_put;
 		}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index f283b1d6..3623d28 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -9,6 +9,9 @@
 #include <linux/fs.h>
 #include "ovl_entry.h"
 
+#undef pr_fmt
+#define pr_fmt(fmt) "overlayfs: " fmt
+
 enum ovl_path_type {
 	__OVL_PATH_UPPER	= (1 << 0),
 	__OVL_PATH_MERGE	= (1 << 1),
@@ -221,7 +224,6 @@ int ovl_want_write(struct dentry *dentry);
 void ovl_drop_write(struct dentry *dentry);
 struct dentry *ovl_workdir(struct dentry *dentry);
 const struct cred *ovl_override_creds(struct super_block *sb);
-struct super_block *ovl_same_sb(struct super_block *sb);
 int ovl_can_decode_fh(struct super_block *sb);
 struct dentry *ovl_indexdir(struct super_block *sb);
 bool ovl_index_all(struct super_block *sb);
@@ -237,7 +239,7 @@ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
 struct dentry *ovl_dentry_upper(struct dentry *dentry);
 struct dentry *ovl_dentry_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_lowerdata(struct dentry *dentry);
-struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
+const struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_real(struct dentry *dentry);
 struct dentry *ovl_i_dentry_upper(struct inode *inode);
 struct inode *ovl_inode_upper(struct inode *inode);
@@ -299,11 +301,21 @@ static inline bool ovl_is_impuredir(struct dentry *dentry)
 	return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
 }
 
+/* All layers on same fs? */
+static inline bool ovl_same_fs(struct super_block *sb)
+{
+	return OVL_FS(sb)->xino_mode == 0;
+}
+
+/* All overlay inodes have same st_dev? */
+static inline bool ovl_same_dev(struct super_block *sb)
+{
+	return OVL_FS(sb)->xino_mode >= 0;
+}
+
 static inline unsigned int ovl_xino_bits(struct super_block *sb)
 {
-	struct ovl_fs *ofs = sb->s_fs_info;
-
-	return ofs->xino_bits;
+	return ovl_same_dev(sb) ? OVL_FS(sb)->xino_mode : 0;
 }
 
 static inline int ovl_inode_lock(struct inode *inode)
@@ -438,6 +450,8 @@ struct dentry *ovl_create_temp(struct dentry *workdir, struct ovl_cattr *attr);
 
 /* file.c */
 extern const struct file_operations ovl_file_operations;
+int __init ovl_aio_request_cache_init(void);
+void ovl_aio_request_cache_destroy(void);
 
 /* copy_up.c */
 int ovl_copy_up(struct dentry *dentry);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 28348c4..89015ea 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -24,6 +24,8 @@ struct ovl_sb {
 	dev_t pseudo_dev;
 	/* Unusable (conflicting) uuid */
 	bool bad_uuid;
+	/* Used as a lower layer (but maybe also as upper) */
+	bool is_lower;
 };
 
 struct ovl_layer {
@@ -38,18 +40,18 @@ struct ovl_layer {
 };
 
 struct ovl_path {
-	struct ovl_layer *layer;
+	const struct ovl_layer *layer;
 	struct dentry *dentry;
 };
 
 /* private information held for overlayfs's superblock */
 struct ovl_fs {
 	struct vfsmount *upper_mnt;
-	unsigned int numlower;
-	/* Number of unique lower sb that differ from upper sb */
-	unsigned int numlowerfs;
-	struct ovl_layer *lower_layers;
-	struct ovl_sb *lower_fs;
+	unsigned int numlayer;
+	/* Number of unique fs among layers including upper fs */
+	unsigned int numfs;
+	const struct ovl_layer *layers;
+	struct ovl_sb *fs;
 	/* workbasedir is the path at workdir= mount option */
 	struct dentry *workbasedir;
 	/* workdir is the 'work' directory under workbasedir */
@@ -71,10 +73,15 @@ struct ovl_fs {
 	struct inode *workbasedir_trap;
 	struct inode *workdir_trap;
 	struct inode *indexdir_trap;
-	/* Inode numbers in all layers do not use the high xino_bits */
-	unsigned int xino_bits;
+	/* -1: disabled, 0: same fs, 1..32: number of unused ino bits */
+	int xino_mode;
 };
 
+static inline struct ovl_fs *OVL_FS(struct super_block *sb)
+{
+	return (struct ovl_fs *)sb->s_fs_info;
+}
+
 /* private information held for every overlayfs dentry */
 struct ovl_entry {
 	union {
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 47a91c9..40ac9ce 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -441,7 +441,7 @@ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
 			       const char *name, int namelen)
 {
 	if (ino >> (64 - xinobits)) {
-		pr_warn_ratelimited("overlayfs: d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
+		pr_warn_ratelimited("d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
 				    namelen, name, ino, xinobits);
 		return ino;
 	}
@@ -469,7 +469,7 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
 	int xinobits = ovl_xino_bits(dir->d_sb);
 	int err = 0;
 
-	if (!ovl_same_sb(dir->d_sb) && !xinobits)
+	if (!ovl_same_dev(dir->d_sb))
 		goto out;
 
 	if (p->name[0] == '.') {
@@ -504,7 +504,13 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
 		if (err)
 			goto fail;
 
-		WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
+		/*
+		 * Directory inode is always on overlay st_dev.
+		 * Non-dir with ovl_same_dev() could be on pseudo st_dev in case
+		 * of xino bits overflow.
+		 */
+		WARN_ON_ONCE(S_ISDIR(stat.mode) &&
+			     dir->d_sb->s_dev != stat.dev);
 		ino = stat.ino;
 	} else if (xinobits && !OVL_TYPE_UPPER(type)) {
 		ino = ovl_remap_lower_ino(ino, xinobits,
@@ -518,7 +524,7 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
 	return err;
 
 fail:
-	pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n",
+	pr_warn_ratelimited("failed to look up (%s) for ino (%i)\n",
 			    p->name, err);
 	goto out;
 }
@@ -685,7 +691,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
 	int err;
 	struct ovl_dir_file *od = file->private_data;
 	struct dentry *dir = file->f_path.dentry;
-	struct ovl_layer *lower_layer = ovl_layer_lower(dir);
+	const struct ovl_layer *lower_layer = ovl_layer_lower(dir);
 	struct ovl_readdir_translate rdt = {
 		.ctx.actor = ovl_fill_real,
 		.orig_ctx = ctx,
@@ -738,7 +744,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
 		 * entries.
 		 */
 		if (ovl_xino_bits(dentry->d_sb) ||
-		    (ovl_same_sb(dentry->d_sb) &&
+		    (ovl_same_fs(dentry->d_sb) &&
 		     (ovl_is_impure_dir(file) ||
 		      OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
 			return ovl_iterate_real(file, ctx);
@@ -965,7 +971,7 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
 
 		dentry = lookup_one_len(p->name, upper, p->len);
 		if (IS_ERR(dentry)) {
-			pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
+			pr_err("lookup '%s/%.*s' failed (%i)\n",
 			       upper->d_name.name, p->len, p->name,
 			       (int) PTR_ERR(dentry));
 			continue;
@@ -1147,6 +1153,6 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
 out:
 	ovl_cache_free(&list);
 	if (err)
-		pr_err("overlayfs: failed index dir cleanup (%i)\n", err);
+		pr_err("failed index dir cleanup (%i)\n", err);
 	return err;
 }
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 7621ff1..319fe0d 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -224,14 +224,14 @@ static void ovl_free_fs(struct ovl_fs *ofs)
 	if (ofs->upperdir_locked)
 		ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
 	mntput(ofs->upper_mnt);
-	for (i = 0; i < ofs->numlower; i++) {
-		iput(ofs->lower_layers[i].trap);
-		mntput(ofs->lower_layers[i].mnt);
+	for (i = 1; i < ofs->numlayer; i++) {
+		iput(ofs->layers[i].trap);
+		mntput(ofs->layers[i].mnt);
 	}
-	for (i = 0; i < ofs->numlowerfs; i++)
-		free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
-	kfree(ofs->lower_layers);
-	kfree(ofs->lower_fs);
+	kfree(ofs->layers);
+	for (i = 0; i < ofs->numfs; i++)
+		free_anon_bdev(ofs->fs[i].pseudo_dev);
+	kfree(ofs->fs);
 
 	kfree(ofs->config.lowerdir);
 	kfree(ofs->config.upperdir);
@@ -358,7 +358,7 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
 	if (ofs->config.nfs_export != ovl_nfs_export_def)
 		seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ?
 						"on" : "off");
-	if (ofs->config.xino != ovl_xino_def())
+	if (ofs->config.xino != ovl_xino_def() && !ovl_same_fs(sb))
 		seq_printf(m, ",xino=%s", ovl_xino_str[ofs->config.xino]);
 	if (ofs->config.metacopy != ovl_metacopy_def)
 		seq_printf(m, ",metacopy=%s",
@@ -462,7 +462,7 @@ static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode)
 		if (ovl_redirect_always_follow)
 			config->redirect_follow = true;
 	} else if (strcmp(mode, "nofollow") != 0) {
-		pr_err("overlayfs: bad mount option \"redirect_dir=%s\"\n",
+		pr_err("bad mount option \"redirect_dir=%s\"\n",
 		       mode);
 		return -EINVAL;
 	}
@@ -560,14 +560,15 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
 			break;
 
 		default:
-			pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
+			pr_err("unrecognized mount option \"%s\" or missing value\n",
+					p);
 			return -EINVAL;
 		}
 	}
 
 	/* Workdir is useless in non-upper mount */
 	if (!config->upperdir && config->workdir) {
-		pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
+		pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
 			config->workdir);
 		kfree(config->workdir);
 		config->workdir = NULL;
@@ -587,7 +588,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
 	/* Resolve metacopy -> redirect_dir dependency */
 	if (config->metacopy && !config->redirect_dir) {
 		if (metacopy_opt && redirect_opt) {
-			pr_err("overlayfs: conflicting options: metacopy=on,redirect_dir=%s\n",
+			pr_err("conflicting options: metacopy=on,redirect_dir=%s\n",
 			       config->redirect_mode);
 			return -EINVAL;
 		}
@@ -596,7 +597,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
 			 * There was an explicit redirect_dir=... that resulted
 			 * in this conflict.
 			 */
-			pr_info("overlayfs: disabling metacopy due to redirect_dir=%s\n",
+			pr_info("disabling metacopy due to redirect_dir=%s\n",
 				config->redirect_mode);
 			config->metacopy = false;
 		} else {
@@ -692,7 +693,7 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
 out_dput:
 	dput(work);
 out_err:
-	pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
+	pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n",
 		ofs->config.workdir, name, -err);
 	work = NULL;
 	goto out_unlock;
@@ -716,21 +717,21 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
 	int err = -EINVAL;
 
 	if (!*name) {
-		pr_err("overlayfs: empty lowerdir\n");
+		pr_err("empty lowerdir\n");
 		goto out;
 	}
 	err = kern_path(name, LOOKUP_FOLLOW, path);
 	if (err) {
-		pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+		pr_err("failed to resolve '%s': %i\n", name, err);
 		goto out;
 	}
 	err = -EINVAL;
 	if (ovl_dentry_weird(path->dentry)) {
-		pr_err("overlayfs: filesystem on '%s' not supported\n", name);
+		pr_err("filesystem on '%s' not supported\n", name);
 		goto out_put;
 	}
 	if (!d_is_dir(path->dentry)) {
-		pr_err("overlayfs: '%s' not a directory\n", name);
+		pr_err("'%s' not a directory\n", name);
 		goto out_put;
 	}
 	return 0;
@@ -752,7 +753,7 @@ static int ovl_mount_dir(const char *name, struct path *path)
 
 		if (!err)
 			if (ovl_dentry_remote(path->dentry)) {
-				pr_err("overlayfs: filesystem on '%s' not supported as upperdir\n",
+				pr_err("filesystem on '%s' not supported as upperdir\n",
 				       tmp);
 				path_put_init(path);
 				err = -EINVAL;
@@ -769,7 +770,7 @@ static int ovl_check_namelen(struct path *path, struct ovl_fs *ofs,
 	int err = vfs_statfs(path, &statfs);
 
 	if (err)
-		pr_err("overlayfs: statfs failed on '%s'\n", name);
+		pr_err("statfs failed on '%s'\n", name);
 	else
 		ofs->namelen = max(ofs->namelen, statfs.f_namelen);
 
@@ -804,13 +805,13 @@ static int ovl_lower_dir(const char *name, struct path *path,
 	     (ofs->config.index && ofs->config.upperdir)) && !fh_type) {
 		ofs->config.index = false;
 		ofs->config.nfs_export = false;
-		pr_warn("overlayfs: fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n",
+		pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n",
 			name);
 	}
 
 	/* Check if lower fs has 32bit inode numbers */
 	if (fh_type != FILEID_INO32_GEN)
-		ofs->xino_bits = 0;
+		ofs->xino_mode = -1;
 
 	return 0;
 
@@ -996,7 +997,7 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
 	err = PTR_ERR_OR_ZERO(trap);
 	if (err) {
 		if (err == -ELOOP)
-			pr_err("overlayfs: conflicting %s path\n", name);
+			pr_err("conflicting %s path\n", name);
 		return err;
 	}
 
@@ -1013,11 +1014,11 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
 static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
 {
 	if (ofs->config.index) {
-		pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
+		pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
 		       name);
 		return -EBUSY;
 	} else {
-		pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
+		pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
 			name);
 		return 0;
 	}
@@ -1035,7 +1036,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
 
 	/* Upper fs should not be r/o */
 	if (sb_rdonly(upperpath->mnt->mnt_sb)) {
-		pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n");
+		pr_err("upper fs is r/o, try multi-lower layers mount\n");
 		err = -EINVAL;
 		goto out;
 	}
@@ -1052,7 +1053,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
 	upper_mnt = clone_private_mount(upperpath);
 	err = PTR_ERR(upper_mnt);
 	if (IS_ERR(upper_mnt)) {
-		pr_err("overlayfs: failed to clone upperpath\n");
+		pr_err("failed to clone upperpath\n");
 		goto out;
 	}
 
@@ -1108,7 +1109,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
 	 * kernel upgrade. So warn instead of erroring out.
 	 */
 	if (!err)
-		pr_warn("overlayfs: upper fs needs to support d_type.\n");
+		pr_warn("upper fs needs to support d_type.\n");
 
 	/* Check if upper/work fs supports O_TMPFILE */
 	temp = ovl_do_tmpfile(ofs->workdir, S_IFREG | 0);
@@ -1116,7 +1117,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
 	if (ofs->tmpfile)
 		dput(temp);
 	else
-		pr_warn("overlayfs: upper fs does not support tmpfile.\n");
+		pr_warn("upper fs does not support tmpfile.\n");
 
 	/*
 	 * Check if upper/work fs supports trusted.overlay.* xattr
@@ -1126,7 +1127,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
 		ofs->noxattr = true;
 		ofs->config.index = false;
 		ofs->config.metacopy = false;
-		pr_warn("overlayfs: upper fs does not support xattr, falling back to index=off and metacopy=off.\n");
+		pr_warn("upper fs does not support xattr, falling back to index=off and metacopy=off.\n");
 		err = 0;
 	} else {
 		vfs_removexattr(ofs->workdir, OVL_XATTR_OPAQUE);
@@ -1136,16 +1137,16 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
 	fh_type = ovl_can_decode_fh(ofs->workdir->d_sb);
 	if (ofs->config.index && !fh_type) {
 		ofs->config.index = false;
-		pr_warn("overlayfs: upper fs does not support file handles, falling back to index=off.\n");
+		pr_warn("upper fs does not support file handles, falling back to index=off.\n");
 	}
 
 	/* Check if upper fs has 32bit inode numbers */
 	if (fh_type != FILEID_INO32_GEN)
-		ofs->xino_bits = 0;
+		ofs->xino_mode = -1;
 
 	/* NFS export of r/w mount depends on index */
 	if (ofs->config.nfs_export && !ofs->config.index) {
-		pr_warn("overlayfs: NFS export requires \"index=on\", falling back to nfs_export=off.\n");
+		pr_warn("NFS export requires \"index=on\", falling back to nfs_export=off.\n");
 		ofs->config.nfs_export = false;
 	}
 out:
@@ -1165,11 +1166,11 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
 
 	err = -EINVAL;
 	if (upperpath->mnt != workpath.mnt) {
-		pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
+		pr_err("workdir and upperdir must reside under the same mount\n");
 		goto out;
 	}
 	if (!ovl_workdir_ok(workpath.dentry, upperpath->dentry)) {
-		pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
+		pr_err("workdir and upperdir must be separate subtrees\n");
 		goto out;
 	}
 
@@ -1210,7 +1211,7 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
 	err = ovl_verify_origin(upperpath->dentry, oe->lowerstack[0].dentry,
 				true);
 	if (err) {
-		pr_err("overlayfs: failed to verify upper root origin\n");
+		pr_err("failed to verify upper root origin\n");
 		goto out;
 	}
 
@@ -1233,18 +1234,18 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
 			err = ovl_verify_set_fh(ofs->indexdir, OVL_XATTR_ORIGIN,
 						upperpath->dentry, true, false);
 			if (err)
-				pr_err("overlayfs: failed to verify index dir 'origin' xattr\n");
+				pr_err("failed to verify index dir 'origin' xattr\n");
 		}
 		err = ovl_verify_upper(ofs->indexdir, upperpath->dentry, true);
 		if (err)
-			pr_err("overlayfs: failed to verify index dir 'upper' xattr\n");
+			pr_err("failed to verify index dir 'upper' xattr\n");
 
 		/* Cleanup bad/stale/orphan index entries */
 		if (!err)
 			err = ovl_indexdir_cleanup(ofs);
 	}
 	if (err || !ofs->indexdir)
-		pr_warn("overlayfs: try deleting index dir or mounting with '-o index=off' to disable inodes index.\n");
+		pr_warn("try deleting index dir or mounting with '-o index=off' to disable inodes index.\n");
 
 out:
 	mnt_drop_write(mnt);
@@ -1258,7 +1259,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
 	if (!ofs->config.nfs_export && !ofs->upper_mnt)
 		return true;
 
-	for (i = 0; i < ofs->numlowerfs; i++) {
+	for (i = 0; i < ofs->numfs; i++) {
 		/*
 		 * We use uuid to associate an overlay lower file handle with a
 		 * lower layer, so we can accept lower fs with null uuid as long
@@ -1266,8 +1267,9 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
 		 * if we detect multiple lower fs with the same uuid, we
 		 * disable lower file handle decoding on all of them.
 		 */
-		if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) {
-			ofs->lower_fs[i].bad_uuid = true;
+		if (ofs->fs[i].is_lower &&
+		    uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) {
+			ofs->fs[i].bad_uuid = true;
 			return false;
 		}
 	}
@@ -1283,13 +1285,9 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
 	int err;
 	bool bad_uuid = false;
 
-	/* fsid 0 is reserved for upper fs even with non upper overlay */
-	if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb)
-		return 0;
-
-	for (i = 0; i < ofs->numlowerfs; i++) {
-		if (ofs->lower_fs[i].sb == sb)
-			return i + 1;
+	for (i = 0; i < ofs->numfs; i++) {
+		if (ofs->fs[i].sb == sb)
+			return i;
 	}
 
 	if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
@@ -1297,7 +1295,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
 		if (ofs->config.index || ofs->config.nfs_export) {
 			ofs->config.index = false;
 			ofs->config.nfs_export = false;
-			pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
+			pr_warn("%s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
 				uuid_is_null(&sb->s_uuid) ? "null" :
 							    "conflicting",
 				path->dentry);
@@ -1306,34 +1304,58 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
 
 	err = get_anon_bdev(&dev);
 	if (err) {
-		pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n");
+		pr_err("failed to get anonymous bdev for lowerpath\n");
 		return err;
 	}
 
-	ofs->lower_fs[ofs->numlowerfs].sb = sb;
-	ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev;
-	ofs->lower_fs[ofs->numlowerfs].bad_uuid = bad_uuid;
-	ofs->numlowerfs++;
+	ofs->fs[ofs->numfs].sb = sb;
+	ofs->fs[ofs->numfs].pseudo_dev = dev;
+	ofs->fs[ofs->numfs].bad_uuid = bad_uuid;
 
-	return ofs->numlowerfs;
+	return ofs->numfs++;
 }
 
-static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
-				struct path *stack, unsigned int numlower)
+static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
+			  struct path *stack, unsigned int numlower)
 {
 	int err;
 	unsigned int i;
+	struct ovl_layer *layers;
 
 	err = -ENOMEM;
-	ofs->lower_layers = kcalloc(numlower, sizeof(struct ovl_layer),
-				    GFP_KERNEL);
-	if (ofs->lower_layers == NULL)
+	layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL);
+	if (!layers)
+		goto out;
+	ofs->layers = layers;
+
+	ofs->fs = kcalloc(numlower + 1, sizeof(struct ovl_sb), GFP_KERNEL);
+	if (ofs->fs == NULL)
 		goto out;
 
-	ofs->lower_fs = kcalloc(numlower, sizeof(struct ovl_sb),
-				GFP_KERNEL);
-	if (ofs->lower_fs == NULL)
+	/* idx/fsid 0 are reserved for upper fs even with lower only overlay */
+	ofs->numfs++;
+
+	layers[0].mnt = ofs->upper_mnt;
+	layers[0].idx = 0;
+	layers[0].fsid = 0;
+	ofs->numlayer = 1;
+
+	/*
+	 * All lower layers that share the same fs as upper layer, use the same
+	 * pseudo_dev as upper layer.  Allocate fs[0].pseudo_dev even for lower
+	 * only overlay to simplify ovl_fs_free().
+	 * is_lower will be set if upper fs is shared with a lower layer.
+	 */
+	err = get_anon_bdev(&ofs->fs[0].pseudo_dev);
+	if (err) {
+		pr_err("failed to get anonymous bdev for upper fs\n");
 		goto out;
+	}
+
+	if (ofs->upper_mnt) {
+		ofs->fs[0].sb = ofs->upper_mnt->mnt_sb;
+		ofs->fs[0].is_lower = false;
+	}
 
 	for (i = 0; i < numlower; i++) {
 		struct vfsmount *mnt;
@@ -1357,7 +1379,7 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
 		mnt = clone_private_mount(&stack[i]);
 		err = PTR_ERR(mnt);
 		if (IS_ERR(mnt)) {
-			pr_err("overlayfs: failed to clone lowerpath\n");
+			pr_err("failed to clone lowerpath\n");
 			iput(trap);
 			goto out;
 		}
@@ -1368,15 +1390,13 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
 		 */
 		mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
 
-		ofs->lower_layers[ofs->numlower].trap = trap;
-		ofs->lower_layers[ofs->numlower].mnt = mnt;
-		ofs->lower_layers[ofs->numlower].idx = i + 1;
-		ofs->lower_layers[ofs->numlower].fsid = fsid;
-		if (fsid) {
-			ofs->lower_layers[ofs->numlower].fs =
-				&ofs->lower_fs[fsid - 1];
-		}
-		ofs->numlower++;
+		layers[ofs->numlayer].trap = trap;
+		layers[ofs->numlayer].mnt = mnt;
+		layers[ofs->numlayer].idx = ofs->numlayer;
+		layers[ofs->numlayer].fsid = fsid;
+		layers[ofs->numlayer].fs = &ofs->fs[fsid];
+		ofs->numlayer++;
+		ofs->fs[fsid].is_lower = true;
 	}
 
 	/*
@@ -1387,22 +1407,23 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
 	 * bits reserved for fsid, it emits a warning and uses the original
 	 * inode number.
 	 */
-	if (!ofs->numlowerfs || (ofs->numlowerfs == 1 && !ofs->upper_mnt)) {
-		ofs->xino_bits = 0;
-		ofs->config.xino = OVL_XINO_OFF;
-	} else if (ofs->config.xino == OVL_XINO_ON && !ofs->xino_bits) {
+	if (ofs->numfs - !ofs->upper_mnt == 1) {
+		if (ofs->config.xino == OVL_XINO_ON)
+			pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
+		ofs->xino_mode = 0;
+	} else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) {
 		/*
-		 * This is a roundup of number of bits needed for numlowerfs+1
-		 * (i.e. ilog2(numlowerfs+1 - 1) + 1). fsid 0 is reserved for
-		 * upper fs even with non upper overlay.
+		 * This is a roundup of number of bits needed for encoding
+		 * fsid, where fsid 0 is reserved for upper fs even with
+		 * lower only overlay.
 		 */
 		BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 31);
-		ofs->xino_bits = ilog2(ofs->numlowerfs) + 1;
+		ofs->xino_mode = ilog2(ofs->numfs - 1) + 1;
 	}
 
-	if (ofs->xino_bits) {
-		pr_info("overlayfs: \"xino\" feature enabled using %d upper inode bits.\n",
-			ofs->xino_bits);
+	if (ofs->xino_mode > 0) {
+		pr_info("\"xino\" feature enabled using %d upper inode bits.\n",
+			ofs->xino_mode);
 	}
 
 	err = 0;
@@ -1428,15 +1449,15 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
 	err = -EINVAL;
 	stacklen = ovl_split_lowerdirs(lowertmp);
 	if (stacklen > OVL_MAX_STACK) {
-		pr_err("overlayfs: too many lower directories, limit is %d\n",
+		pr_err("too many lower directories, limit is %d\n",
 		       OVL_MAX_STACK);
 		goto out_err;
 	} else if (!ofs->config.upperdir && stacklen == 1) {
-		pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n");
+		pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n");
 		goto out_err;
 	} else if (!ofs->config.upperdir && ofs->config.nfs_export &&
 		   ofs->config.redirect_follow) {
-		pr_warn("overlayfs: NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
+		pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
 		ofs->config.nfs_export = false;
 	}
 
@@ -1459,11 +1480,11 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
 	err = -EINVAL;
 	sb->s_stack_depth++;
 	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
-		pr_err("overlayfs: maximum fs stacking depth exceeded\n");
+		pr_err("maximum fs stacking depth exceeded\n");
 		goto out_err;
 	}
 
-	err = ovl_get_lower_layers(sb, ofs, stack, numlower);
+	err = ovl_get_layers(sb, ofs, stack, numlower);
 	if (err)
 		goto out_err;
 
@@ -1474,7 +1495,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
 
 	for (i = 0; i < numlower; i++) {
 		oe->lowerstack[i].dentry = dget(stack[i].dentry);
-		oe->lowerstack[i].layer = &ofs->lower_layers[i];
+		oe->lowerstack[i].layer = &ofs->layers[i+1];
 	}
 
 	if (remote)
@@ -1515,7 +1536,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
 	while (!err && parent != next) {
 		if (ovl_lookup_trap_inode(sb, parent)) {
 			err = -ELOOP;
-			pr_err("overlayfs: overlapping %s path\n", name);
+			pr_err("overlapping %s path\n", name);
 		} else if (ovl_is_inuse(parent)) {
 			err = ovl_report_in_use(ofs, name);
 		}
@@ -1555,9 +1576,9 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
 			return err;
 	}
 
-	for (i = 0; i < ofs->numlower; i++) {
+	for (i = 1; i < ofs->numlayer; i++) {
 		err = ovl_check_layer(sb, ofs,
-				      ofs->lower_layers[i].mnt->mnt_root,
+				      ofs->layers[i].mnt->mnt_root,
 				      "lowerdir");
 		if (err)
 			return err;
@@ -1595,7 +1616,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 	err = -EINVAL;
 	if (!ofs->config.lowerdir) {
 		if (!silent)
-			pr_err("overlayfs: missing 'lowerdir'\n");
+			pr_err("missing 'lowerdir'\n");
 		goto out_err;
 	}
 
@@ -1603,14 +1624,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	/* Assume underlaying fs uses 32bit inodes unless proven otherwise */
 	if (ofs->config.xino != OVL_XINO_OFF)
-		ofs->xino_bits = BITS_PER_LONG - 32;
+		ofs->xino_mode = BITS_PER_LONG - 32;
 
 	/* alloc/destroy_inode needed for setting up traps in inode cache */
 	sb->s_op = &ovl_super_operations;
 
 	if (ofs->config.upperdir) {
 		if (!ofs->config.workdir) {
-			pr_err("overlayfs: missing 'workdir'\n");
+			pr_err("missing 'workdir'\n");
 			goto out_err;
 		}
 
@@ -1660,13 +1681,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 	if (!ofs->indexdir) {
 		ofs->config.index = false;
 		if (ofs->upper_mnt && ofs->config.nfs_export) {
-			pr_warn("overlayfs: NFS export requires an index dir, falling back to nfs_export=off.\n");
+			pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n");
 			ofs->config.nfs_export = false;
 		}
 	}
 
 	if (ofs->config.metacopy && ofs->config.nfs_export) {
-		pr_warn("overlayfs: NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n");
+		pr_warn("NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n");
 		ofs->config.nfs_export = false;
 	}
 
@@ -1749,9 +1770,15 @@ static int __init ovl_init(void)
 	if (ovl_inode_cachep == NULL)
 		return -ENOMEM;
 
-	err = register_filesystem(&ovl_fs_type);
-	if (err)
-		kmem_cache_destroy(ovl_inode_cachep);
+	err = ovl_aio_request_cache_init();
+	if (!err) {
+		err = register_filesystem(&ovl_fs_type);
+		if (!err)
+			return 0;
+
+		ovl_aio_request_cache_destroy();
+	}
+	kmem_cache_destroy(ovl_inode_cachep);
 
 	return err;
 }
@@ -1766,7 +1793,7 @@ static void __exit ovl_exit(void)
 	 */
 	rcu_barrier();
 	kmem_cache_destroy(ovl_inode_cachep);
-
+	ovl_aio_request_cache_destroy();
 }
 
 module_init(ovl_init);
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index f5678a3..ea00508 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -40,18 +40,6 @@ const struct cred *ovl_override_creds(struct super_block *sb)
 	return override_creds(ofs->creator_cred);
 }
 
-struct super_block *ovl_same_sb(struct super_block *sb)
-{
-	struct ovl_fs *ofs = sb->s_fs_info;
-
-	if (!ofs->numlowerfs)
-		return ofs->upper_mnt->mnt_sb;
-	else if (ofs->numlowerfs == 1 && !ofs->upper_mnt)
-		return ofs->lower_fs[0].sb;
-	else
-		return NULL;
-}
-
 /*
  * Check if underlying fs supports file handles and try to determine encoding
  * type, in order to deduce maximum inode number used by fs.
@@ -198,7 +186,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry)
 	return oe->numlower ? oe->lowerstack[0].dentry : NULL;
 }
 
-struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
+const struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
 {
 	struct ovl_entry *oe = dentry->d_fsdata;
 
@@ -576,7 +564,7 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
 	err = ovl_do_setxattr(upperdentry, name, value, size, 0);
 
 	if (err == -EOPNOTSUPP) {
-		pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
+		pr_warn("cannot set %s xattr on upper\n", name);
 		ofs->noxattr = true;
 		return xerr;
 	}
@@ -700,7 +688,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
 
 	inode = d_inode(upperdentry);
 	if (!S_ISDIR(inode->i_mode) && inode->i_nlink != 1) {
-		pr_warn_ratelimited("overlayfs: cleanup linked index (%pd2, ino=%lu, nlink=%u)\n",
+		pr_warn_ratelimited("cleanup linked index (%pd2, ino=%lu, nlink=%u)\n",
 				    upperdentry, inode->i_ino, inode->i_nlink);
 		/*
 		 * We either have a bug with persistent union nlink or a lower
@@ -739,7 +727,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
 	return;
 
 fail:
-	pr_err("overlayfs: cleanup index of '%pd2' failed (%i)\n", dentry, err);
+	pr_err("cleanup index of '%pd2' failed (%i)\n", dentry, err);
 	goto out;
 }
 
@@ -830,7 +818,7 @@ int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
 err_unlock:
 	unlock_rename(workdir, upperdir);
 err:
-	pr_err("overlayfs: failed to lock workdir+upperdir\n");
+	pr_err("failed to lock workdir+upperdir\n");
 	return -EIO;
 }
 
@@ -852,7 +840,7 @@ int ovl_check_metacopy_xattr(struct dentry *dentry)
 
 	return 1;
 out:
-	pr_warn_ratelimited("overlayfs: failed to get metacopy (%i)\n", res);
+	pr_warn_ratelimited("failed to get metacopy (%i)\n", res);
 	return res;
 }
 
@@ -899,7 +887,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
 	return res;
 
 fail:
-	pr_warn_ratelimited("overlayfs: failed to get xattr %s: err=%zi)\n",
+	pr_warn_ratelimited("failed to get xattr %s: err=%zi)\n",
 			    name, res);
 	kfree(buf);
 	return res;
@@ -931,7 +919,7 @@ char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
 
 	return buf;
 invalid:
-	pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
+	pr_warn_ratelimited("invalid redirect (%s)\n", buf);
 	res = -EINVAL;
 	kfree(buf);
 	return ERR_PTR(res);
diff --git a/fs/pipe.c b/fs/pipe.c
index 57502c3..5a34d6c 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -108,16 +108,19 @@ void pipe_double_lock(struct pipe_inode_info *pipe1,
 /* Drop the inode semaphore and wait for a pipe event, atomically */
 void pipe_wait(struct pipe_inode_info *pipe)
 {
-	DEFINE_WAIT(wait);
+	DEFINE_WAIT(rdwait);
+	DEFINE_WAIT(wrwait);
 
 	/*
 	 * Pipes are system-local resources, so sleeping on them
 	 * is considered a noninteractive wait:
 	 */
-	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
+	prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
+	prepare_to_wait(&pipe->wr_wait, &wrwait, TASK_INTERRUPTIBLE);
 	pipe_unlock(pipe);
 	schedule();
-	finish_wait(&pipe->wait, &wait);
+	finish_wait(&pipe->rd_wait, &rdwait);
+	finish_wait(&pipe->wr_wait, &wrwait);
 	pipe_lock(pipe);
 }
 
@@ -286,7 +289,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
 	size_t total_len = iov_iter_count(to);
 	struct file *filp = iocb->ki_filp;
 	struct pipe_inode_info *pipe = filp->private_data;
-	bool was_full;
+	bool was_full, wake_next_reader = false;
 	ssize_t ret;
 
 	/* Null read succeeds. */
@@ -344,10 +347,10 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
 
 			if (!buf->len) {
 				pipe_buf_release(pipe, buf);
-				spin_lock_irq(&pipe->wait.lock);
+				spin_lock_irq(&pipe->rd_wait.lock);
 				tail++;
 				pipe->tail = tail;
-				spin_unlock_irq(&pipe->wait.lock);
+				spin_unlock_irq(&pipe->rd_wait.lock);
 			}
 			total_len -= chars;
 			if (!total_len)
@@ -384,7 +387,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
 		 * no data.
 		 */
 		if (unlikely(was_full)) {
-			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+			wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
 			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 		}
 
@@ -394,18 +397,23 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
 		 * since we've done any required wakeups and there's no need
 		 * to mark anything accessed. And we've dropped the lock.
 		 */
-		if (wait_event_interruptible(pipe->wait, pipe_readable(pipe)) < 0)
+		if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
 			return -ERESTARTSYS;
 
 		__pipe_lock(pipe);
 		was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
+		wake_next_reader = true;
 	}
+	if (pipe_empty(pipe->head, pipe->tail))
+		wake_next_reader = false;
 	__pipe_unlock(pipe);
 
 	if (was_full) {
-		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
+	if (wake_next_reader)
+		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
 	if (ret > 0)
 		file_accessed(filp);
 	return ret;
@@ -437,6 +445,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 	size_t total_len = iov_iter_count(from);
 	ssize_t chars;
 	bool was_empty = false;
+	bool wake_next_writer = false;
 
 	/* Null write succeeds. */
 	if (unlikely(total_len == 0))
@@ -515,16 +524,16 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 			 * it, either the reader will consume it or it'll still
 			 * be there for the next write.
 			 */
-			spin_lock_irq(&pipe->wait.lock);
+			spin_lock_irq(&pipe->rd_wait.lock);
 
 			head = pipe->head;
 			if (pipe_full(head, pipe->tail, pipe->max_usage)) {
-				spin_unlock_irq(&pipe->wait.lock);
+				spin_unlock_irq(&pipe->rd_wait.lock);
 				continue;
 			}
 
 			pipe->head = head + 1;
-			spin_unlock_irq(&pipe->wait.lock);
+			spin_unlock_irq(&pipe->rd_wait.lock);
 
 			/* Insert it into the buffer array */
 			buf = &pipe->bufs[head & mask];
@@ -576,14 +585,17 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 		 */
 		__pipe_unlock(pipe);
 		if (was_empty) {
-			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
+			wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 		}
-		wait_event_interruptible(pipe->wait, pipe_writable(pipe));
+		wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
 		__pipe_lock(pipe);
 		was_empty = pipe_empty(pipe->head, pipe->tail);
+		wake_next_writer = true;
 	}
 out:
+	if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+		wake_next_writer = false;
 	__pipe_unlock(pipe);
 
 	/*
@@ -596,9 +608,11 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 	 * wake up pending jobs
 	 */
 	if (was_empty) {
-		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
+		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 	}
+	if (wake_next_writer)
+		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
 		int err = file_update_time(filp);
 		if (err)
@@ -642,12 +656,15 @@ pipe_poll(struct file *filp, poll_table *wait)
 	unsigned int head, tail;
 
 	/*
-	 * Reading only -- no need for acquiring the semaphore.
+	 * Reading pipe state only -- no need for acquiring the semaphore.
 	 *
 	 * But because this is racy, the code has to add the
 	 * entry to the poll table _first_ ..
 	 */
-	poll_wait(filp, &pipe->wait, wait);
+	if (filp->f_mode & FMODE_READ)
+		poll_wait(filp, &pipe->rd_wait, wait);
+	if (filp->f_mode & FMODE_WRITE)
+		poll_wait(filp, &pipe->wr_wait, wait);
 
 	/*
 	 * .. and only then can you do the racy tests. That way,
@@ -706,7 +723,8 @@ pipe_release(struct inode *inode, struct file *file)
 		pipe->writers--;
 
 	if (pipe->readers || pipe->writers) {
-		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
+		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLHUP);
+		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
@@ -789,7 +807,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
 			     GFP_KERNEL_ACCOUNT);
 
 	if (pipe->bufs) {
-		init_waitqueue_head(&pipe->wait);
+		init_waitqueue_head(&pipe->rd_wait);
+		init_waitqueue_head(&pipe->wr_wait);
 		pipe->r_counter = pipe->w_counter = 1;
 		pipe->max_usage = pipe_bufs;
 		pipe->ring_size = pipe_bufs;
@@ -1007,7 +1026,8 @@ static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
 
 static void wake_up_partner(struct pipe_inode_info *pipe)
 {
-	wake_up_interruptible(&pipe->wait);
+	wake_up_interruptible(&pipe->rd_wait);
+	wake_up_interruptible(&pipe->wr_wait);
 }
 
 static int fifo_open(struct inode *inode, struct file *filp)
@@ -1118,13 +1138,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
 
 err_rd:
 	if (!--pipe->readers)
-		wake_up_interruptible(&pipe->wait);
+		wake_up_interruptible(&pipe->wr_wait);
 	ret = -ERESTARTSYS;
 	goto err;
 
 err_wr:
 	if (!--pipe->writers)
-		wake_up_interruptible(&pipe->wait);
+		wake_up_interruptible(&pipe->rd_wait);
 	ret = -ERESTARTSYS;
 	goto err;
 
@@ -1251,7 +1271,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
 	pipe->max_usage = nr_slots;
 	pipe->tail = tail;
 	pipe->head = head;
-	wake_up_interruptible_all(&pipe->wait);
+	wake_up_interruptible_all(&pipe->rd_wait);
+	wake_up_interruptible_all(&pipe->wr_wait);
 	return pipe->max_usage * PAGE_SIZE;
 
 out_revert_acct:
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index ead487e..bd08616 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -33,3 +33,4 @@
 proc-$(CONFIG_PROC_VMCORE)	+= vmcore.o
 proc-$(CONFIG_PRINTK)	+= kmsg.o
 proc-$(CONFIG_PROC_PAGE_MONITOR)	+= page.o
+proc-$(CONFIG_BOOT_CONFIG)	+= bootconfig.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9156867..c7c6427 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1718,8 +1718,7 @@ static const char *proc_pid_get_link(struct dentry *dentry,
 	if (error)
 		goto out;
 
-	nd_jump_link(&path);
-	return NULL;
+	error = nd_jump_link(&path);
 out:
 	return ERR_PTR(error);
 }
diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
new file mode 100644
index 0000000..9955d75c
--- /dev/null
+++ b/fs/proc/bootconfig.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * /proc/bootconfig - Extra boot configuration
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/bootconfig.h>
+#include <linux/slab.h>
+
+static char *saved_boot_config;
+
+static int boot_config_proc_show(struct seq_file *m, void *v)
+{
+	if (saved_boot_config)
+		seq_puts(m, saved_boot_config);
+	return 0;
+}
+
+/* Rest size of buffer */
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+/* Return the needed total length if @size is 0 */
+static int __init copy_xbc_key_value_list(char *dst, size_t size)
+{
+	struct xbc_node *leaf, *vnode;
+	const char *val;
+	char *key, *end = dst + size;
+	int ret = 0;
+
+	key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
+
+	xbc_for_each_key_value(leaf, val) {
+		ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
+		if (ret < 0)
+			break;
+		ret = snprintf(dst, rest(dst, end), "%s = ", key);
+		if (ret < 0)
+			break;
+		dst += ret;
+		vnode = xbc_node_get_child(leaf);
+		if (vnode && xbc_node_is_array(vnode)) {
+			xbc_array_for_each_value(vnode, val) {
+				ret = snprintf(dst, rest(dst, end), "\"%s\"%s",
+					val, vnode->next ? ", " : "\n");
+				if (ret < 0)
+					goto out;
+				dst += ret;
+			}
+		} else {
+			ret = snprintf(dst, rest(dst, end), "\"%s\"\n", val);
+			if (ret < 0)
+				break;
+			dst += ret;
+		}
+	}
+out:
+	kfree(key);
+
+	return ret < 0 ? ret : dst - (end - size);
+}
+
+static int __init proc_boot_config_init(void)
+{
+	int len;
+
+	len = copy_xbc_key_value_list(NULL, 0);
+	if (len < 0)
+		return len;
+
+	if (len > 0) {
+		saved_boot_config = kzalloc(len + 1, GFP_KERNEL);
+		if (!saved_boot_config)
+			return -ENOMEM;
+
+		len = copy_xbc_key_value_list(saved_boot_config, len + 1);
+		if (len < 0) {
+			kfree(saved_boot_config);
+			return len;
+		}
+	}
+
+	proc_create_single("bootconfig", 0, NULL, boot_config_proc_show);
+
+	return 0;
+}
+fs_initcall(proc_boot_config_init);
diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c
index 96f1087..c1dea9b 100644
--- a/fs/proc/cpuinfo.c
+++ b/fs/proc/cpuinfo.c
@@ -16,16 +16,16 @@ static int cpuinfo_open(struct inode *inode, struct file *file)
 	return seq_open(file, &cpuinfo_op);
 }
 
-static const struct file_operations proc_cpuinfo_operations = {
-	.open		= cpuinfo_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops cpuinfo_proc_ops = {
+	.proc_open	= cpuinfo_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 static int __init proc_cpuinfo_init(void)
 {
-	proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations);
+	proc_create("cpuinfo", 0, NULL, &cpuinfo_proc_ops);
 	return 0;
 }
 fs_initcall(proc_cpuinfo_init);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 074e958..3faed94 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -473,7 +473,7 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
 	ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
 	if (ent) {
 		ent->data = data;
-		ent->proc_fops = &proc_dir_operations;
+		ent->proc_dir_ops = &proc_dir_operations;
 		ent->proc_iops = &proc_dir_inode_operations;
 		ent = proc_register(parent, ent);
 	}
@@ -503,7 +503,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
 	ent = __proc_create(&parent, name, mode, 2);
 	if (ent) {
 		ent->data = NULL;
-		ent->proc_fops = NULL;
+		ent->proc_dir_ops = NULL;
 		ent->proc_iops = NULL;
 		ent = proc_register(parent, ent);
 	}
@@ -533,25 +533,23 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
 
 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
 		struct proc_dir_entry *parent,
-		const struct file_operations *proc_fops, void *data)
+		const struct proc_ops *proc_ops, void *data)
 {
 	struct proc_dir_entry *p;
 
-	BUG_ON(proc_fops == NULL);
-
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
-	p->proc_fops = proc_fops;
+	p->proc_ops = proc_ops;
 	return proc_register(parent, p);
 }
 EXPORT_SYMBOL(proc_create_data);
  
 struct proc_dir_entry *proc_create(const char *name, umode_t mode,
 				   struct proc_dir_entry *parent,
-				   const struct file_operations *proc_fops)
+				   const struct proc_ops *proc_ops)
 {
-	return proc_create_data(name, mode, parent, proc_fops, NULL);
+	return proc_create_data(name, mode, parent, proc_ops, NULL);
 }
 EXPORT_SYMBOL(proc_create);
 
@@ -573,11 +571,11 @@ static int proc_seq_release(struct inode *inode, struct file *file)
 	return seq_release(inode, file);
 }
 
-static const struct file_operations proc_seq_fops = {
-	.open		= proc_seq_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= proc_seq_release,
+static const struct proc_ops proc_seq_ops = {
+	.proc_open	= proc_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= proc_seq_release,
 };
 
 struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
@@ -589,7 +587,7 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
-	p->proc_fops = &proc_seq_fops;
+	p->proc_ops = &proc_seq_ops;
 	p->seq_ops = ops;
 	p->state_size = state_size;
 	return proc_register(parent, p);
@@ -603,11 +601,11 @@ static int proc_single_open(struct inode *inode, struct file *file)
 	return single_open(file, de->single_show, de->data);
 }
 
-static const struct file_operations proc_single_fops = {
-	.open		= proc_single_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
+static const struct proc_ops proc_single_ops = {
+	.proc_open	= proc_single_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
@@ -619,7 +617,7 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
-	p->proc_fops = &proc_single_fops;
+	p->proc_ops = &proc_single_ops;
 	p->single_show = show;
 	return proc_register(parent, p);
 }
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index dbe43a5..6da18316 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -163,7 +163,7 @@ static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
 		pdeo->closing = true;
 		spin_unlock(&pde->pde_unload_lock);
 		file = pdeo->file;
-		pde->proc_fops->release(file_inode(file), file);
+		pde->proc_ops->proc_release(file_inode(file), file);
 		spin_lock(&pde->pde_unload_lock);
 		/* After ->release. */
 		list_del(&pdeo->lh);
@@ -200,12 +200,12 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
 	struct proc_dir_entry *pde = PDE(file_inode(file));
 	loff_t rv = -EINVAL;
 	if (use_pde(pde)) {
-		typeof_member(struct file_operations, llseek) llseek;
+		typeof_member(struct proc_ops, proc_lseek) lseek;
 
-		llseek = pde->proc_fops->llseek;
-		if (!llseek)
-			llseek = default_llseek;
-		rv = llseek(file, offset, whence);
+		lseek = pde->proc_ops->proc_lseek;
+		if (!lseek)
+			lseek = default_llseek;
+		rv = lseek(file, offset, whence);
 		unuse_pde(pde);
 	}
 	return rv;
@@ -216,9 +216,9 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count,
 	struct proc_dir_entry *pde = PDE(file_inode(file));
 	ssize_t rv = -EIO;
 	if (use_pde(pde)) {
-		typeof_member(struct file_operations, read) read;
+		typeof_member(struct proc_ops, proc_read) read;
 
-		read = pde->proc_fops->read;
+		read = pde->proc_ops->proc_read;
 		if (read)
 			rv = read(file, buf, count, ppos);
 		unuse_pde(pde);
@@ -231,9 +231,9 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
 	struct proc_dir_entry *pde = PDE(file_inode(file));
 	ssize_t rv = -EIO;
 	if (use_pde(pde)) {
-		typeof_member(struct file_operations, write) write;
+		typeof_member(struct proc_ops, proc_write) write;
 
-		write = pde->proc_fops->write;
+		write = pde->proc_ops->proc_write;
 		if (write)
 			rv = write(file, buf, count, ppos);
 		unuse_pde(pde);
@@ -246,9 +246,9 @@ static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
 	struct proc_dir_entry *pde = PDE(file_inode(file));
 	__poll_t rv = DEFAULT_POLLMASK;
 	if (use_pde(pde)) {
-		typeof_member(struct file_operations, poll) poll;
+		typeof_member(struct proc_ops, proc_poll) poll;
 
-		poll = pde->proc_fops->poll;
+		poll = pde->proc_ops->proc_poll;
 		if (poll)
 			rv = poll(file, pts);
 		unuse_pde(pde);
@@ -261,9 +261,9 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
 	struct proc_dir_entry *pde = PDE(file_inode(file));
 	long rv = -ENOTTY;
 	if (use_pde(pde)) {
-		typeof_member(struct file_operations, unlocked_ioctl) ioctl;
+		typeof_member(struct proc_ops, proc_ioctl) ioctl;
 
-		ioctl = pde->proc_fops->unlocked_ioctl;
+		ioctl = pde->proc_ops->proc_ioctl;
 		if (ioctl)
 			rv = ioctl(file, cmd, arg);
 		unuse_pde(pde);
@@ -277,9 +277,9 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned
 	struct proc_dir_entry *pde = PDE(file_inode(file));
 	long rv = -ENOTTY;
 	if (use_pde(pde)) {
-		typeof_member(struct file_operations, compat_ioctl) compat_ioctl;
+		typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
 
-		compat_ioctl = pde->proc_fops->compat_ioctl;
+		compat_ioctl = pde->proc_ops->proc_compat_ioctl;
 		if (compat_ioctl)
 			rv = compat_ioctl(file, cmd, arg);
 		unuse_pde(pde);
@@ -293,9 +293,9 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
 	struct proc_dir_entry *pde = PDE(file_inode(file));
 	int rv = -EIO;
 	if (use_pde(pde)) {
-		typeof_member(struct file_operations, mmap) mmap;
+		typeof_member(struct proc_ops, proc_mmap) mmap;
 
-		mmap = pde->proc_fops->mmap;
+		mmap = pde->proc_ops->proc_mmap;
 		if (mmap)
 			rv = mmap(file, vma);
 		unuse_pde(pde);
@@ -312,9 +312,9 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
 	unsigned long rv = -EIO;
 
 	if (use_pde(pde)) {
-		typeof_member(struct file_operations, get_unmapped_area) get_area;
+		typeof_member(struct proc_ops, proc_get_unmapped_area) get_area;
 
-		get_area = pde->proc_fops->get_unmapped_area;
+		get_area = pde->proc_ops->proc_get_unmapped_area;
 #ifdef CONFIG_MMU
 		if (!get_area)
 			get_area = current->mm->get_unmapped_area;
@@ -333,8 +333,8 @@ static int proc_reg_open(struct inode *inode, struct file *file)
 {
 	struct proc_dir_entry *pde = PDE(inode);
 	int rv = 0;
-	typeof_member(struct file_operations, open) open;
-	typeof_member(struct file_operations, release) release;
+	typeof_member(struct proc_ops, proc_open) open;
+	typeof_member(struct proc_ops, proc_release) release;
 	struct pde_opener *pdeo;
 
 	/*
@@ -351,7 +351,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
 	if (!use_pde(pde))
 		return -ENOENT;
 
-	release = pde->proc_fops->release;
+	release = pde->proc_ops->proc_release;
 	if (release) {
 		pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL);
 		if (!pdeo) {
@@ -360,7 +360,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
 		}
 	}
 
-	open = pde->proc_fops->open;
+	open = pde->proc_ops->proc_open;
 	if (open)
 		rv = open(inode, file);
 
@@ -468,21 +468,23 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 			inode->i_size = de->size;
 		if (de->nlink)
 			set_nlink(inode, de->nlink);
-		WARN_ON(!de->proc_iops);
-		inode->i_op = de->proc_iops;
-		if (de->proc_fops) {
-			if (S_ISREG(inode->i_mode)) {
+
+		if (S_ISREG(inode->i_mode)) {
+			inode->i_op = de->proc_iops;
+			inode->i_fop = &proc_reg_file_ops;
 #ifdef CONFIG_COMPAT
-				if (!de->proc_fops->compat_ioctl)
-					inode->i_fop =
-						&proc_reg_file_ops_no_compat;
-				else
-#endif
-					inode->i_fop = &proc_reg_file_ops;
-			} else {
-				inode->i_fop = de->proc_fops;
+			if (!de->proc_ops->proc_compat_ioctl) {
+				inode->i_fop = &proc_reg_file_ops_no_compat;
 			}
-		}
+#endif
+		} else if (S_ISDIR(inode->i_mode)) {
+			inode->i_op = de->proc_iops;
+			inode->i_fop = de->proc_dir_ops;
+		} else if (S_ISLNK(inode->i_mode)) {
+			inode->i_op = de->proc_iops;
+			inode->i_fop = NULL;
+		} else
+			BUG();
 	} else
 	       pde_put(de);
 	return inode;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 0f3b557..4158727 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -39,7 +39,10 @@ struct proc_dir_entry {
 	spinlock_t pde_unload_lock;
 	struct completion *pde_unload_completion;
 	const struct inode_operations *proc_iops;
-	const struct file_operations *proc_fops;
+	union {
+		const struct proc_ops *proc_ops;
+		const struct file_operations *proc_dir_ops;
+	};
 	const struct dentry_operations *proc_dops;
 	union {
 		const struct seq_operations *seq_ops;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e2ed8e0..8ba492d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -574,11 +574,11 @@ static int release_kcore(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static const struct file_operations proc_kcore_operations = {
-	.read		= read_kcore,
-	.open		= open_kcore,
-	.release	= release_kcore,
-	.llseek		= default_llseek,
+static const struct proc_ops kcore_proc_ops = {
+	.proc_read	= read_kcore,
+	.proc_open	= open_kcore,
+	.proc_release	= release_kcore,
+	.proc_lseek	= default_llseek,
 };
 
 /* just remember that we have to update kcore */
@@ -637,8 +637,7 @@ static void __init add_modules_range(void)
 
 static int __init proc_kcore_init(void)
 {
-	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
-				      &proc_kcore_operations);
+	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
 	if (!proc_root_kcore) {
 		pr_err("couldn't create /proc/kcore\n");
 		return 0; /* Always returns 0. */
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index 4f4a2ab..ec1b7d2 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -49,17 +49,17 @@ static __poll_t kmsg_poll(struct file *file, poll_table *wait)
 }
 
 
-static const struct file_operations proc_kmsg_operations = {
-	.read		= kmsg_read,
-	.poll		= kmsg_poll,
-	.open		= kmsg_open,
-	.release	= kmsg_release,
-	.llseek		= generic_file_llseek,
+static const struct proc_ops kmsg_proc_ops = {
+	.proc_read	= kmsg_read,
+	.proc_poll	= kmsg_poll,
+	.proc_open	= kmsg_open,
+	.proc_release	= kmsg_release,
+	.proc_lseek	= generic_file_llseek,
 };
 
 static int __init proc_kmsg_init(void)
 {
-	proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations);
+	proc_create("kmsg", S_IRUSR, NULL, &kmsg_proc_ops);
 	return 0;
 }
 fs_initcall(proc_kmsg_init);
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 8b5c720..8e159fc 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -46,22 +46,26 @@ static const char *proc_ns_get_link(struct dentry *dentry,
 	const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
 	struct task_struct *task;
 	struct path ns_path;
-	void *error = ERR_PTR(-EACCES);
+	int error = -EACCES;
 
 	if (!dentry)
 		return ERR_PTR(-ECHILD);
 
 	task = get_proc_task(inode);
 	if (!task)
-		return error;
+		return ERR_PTR(-EACCES);
 
-	if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
-		error = ns_get_path(&ns_path, task, ns_ops);
-		if (!error)
-			nd_jump_link(&ns_path);
-	}
+	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+		goto out;
+
+	error = ns_get_path(&ns_path, task, ns_ops);
+	if (error)
+		goto out;
+
+	error = nd_jump_link(&ns_path);
+out:
 	put_task_struct(task);
-	return error;
+	return ERR_PTR(error);
 }
 
 static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen)
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 7c952ee..f909243 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -21,6 +21,21 @@
 #define KPMMASK (KPMSIZE - 1)
 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
 
+static inline unsigned long get_max_dump_pfn(void)
+{
+#ifdef CONFIG_SPARSEMEM
+	/*
+	 * The memmap of early sections is completely populated and marked
+	 * online even if max_pfn does not fall on a section boundary -
+	 * pfn_to_online_page() will succeed on all pages. Allow inspecting
+	 * these memmaps.
+	 */
+	return round_up(max_pfn, PAGES_PER_SECTION);
+#else
+	return max_pfn;
+#endif
+}
+
 /* /proc/kpagecount - an array exposing page counts
  *
  * Each entry is a u64 representing the corresponding
@@ -29,6 +44,7 @@
 static ssize_t kpagecount_read(struct file *file, char __user *buf,
 			     size_t count, loff_t *ppos)
 {
+	const unsigned long max_dump_pfn = get_max_dump_pfn();
 	u64 __user *out = (u64 __user *)buf;
 	struct page *ppage;
 	unsigned long src = *ppos;
@@ -37,9 +53,11 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
 	u64 pcount;
 
 	pfn = src / KPMSIZE;
-	count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
 	if (src & KPMMASK || count & KPMMASK)
 		return -EINVAL;
+	if (src >= max_dump_pfn * KPMSIZE)
+		return 0;
+	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
 
 	while (count > 0) {
 		/*
@@ -71,9 +89,9 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
 	return ret;
 }
 
-static const struct file_operations proc_kpagecount_operations = {
-	.llseek = mem_lseek,
-	.read = kpagecount_read,
+static const struct proc_ops kpagecount_proc_ops = {
+	.proc_lseek	= mem_lseek,
+	.proc_read	= kpagecount_read,
 };
 
 /* /proc/kpageflags - an array exposing page flags
@@ -206,6 +224,7 @@ u64 stable_page_flags(struct page *page)
 static ssize_t kpageflags_read(struct file *file, char __user *buf,
 			     size_t count, loff_t *ppos)
 {
+	const unsigned long max_dump_pfn = get_max_dump_pfn();
 	u64 __user *out = (u64 __user *)buf;
 	struct page *ppage;
 	unsigned long src = *ppos;
@@ -213,9 +232,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
 	ssize_t ret = 0;
 
 	pfn = src / KPMSIZE;
-	count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
 	if (src & KPMMASK || count & KPMMASK)
 		return -EINVAL;
+	if (src >= max_dump_pfn * KPMSIZE)
+		return 0;
+	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
 
 	while (count > 0) {
 		/*
@@ -242,15 +263,16 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
 	return ret;
 }
 
-static const struct file_operations proc_kpageflags_operations = {
-	.llseek = mem_lseek,
-	.read = kpageflags_read,
+static const struct proc_ops kpageflags_proc_ops = {
+	.proc_lseek	= mem_lseek,
+	.proc_read	= kpageflags_read,
 };
 
 #ifdef CONFIG_MEMCG
 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos)
 {
+	const unsigned long max_dump_pfn = get_max_dump_pfn();
 	u64 __user *out = (u64 __user *)buf;
 	struct page *ppage;
 	unsigned long src = *ppos;
@@ -259,9 +281,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
 	u64 ino;
 
 	pfn = src / KPMSIZE;
-	count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
 	if (src & KPMMASK || count & KPMMASK)
 		return -EINVAL;
+	if (src >= max_dump_pfn * KPMSIZE)
+		return 0;
+	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
 
 	while (count > 0) {
 		/*
@@ -293,18 +317,18 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
 	return ret;
 }
 
-static const struct file_operations proc_kpagecgroup_operations = {
-	.llseek = mem_lseek,
-	.read = kpagecgroup_read,
+static const struct proc_ops kpagecgroup_proc_ops = {
+	.proc_lseek	= mem_lseek,
+	.proc_read	= kpagecgroup_read,
 };
 #endif /* CONFIG_MEMCG */
 
 static int __init proc_page_init(void)
 {
-	proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
-	proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
+	proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
+	proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
 #ifdef CONFIG_MEMCG
-	proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations);
+	proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
 #endif
 	return 0;
 }
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 76ae278..4888c52 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -90,12 +90,12 @@ static int seq_release_net(struct inode *ino, struct file *f)
 	return 0;
 }
 
-static const struct file_operations proc_net_seq_fops = {
-	.open		= seq_open_net,
-	.read		= seq_read,
-	.write		= proc_simple_write,
-	.llseek		= seq_lseek,
-	.release	= seq_release_net,
+static const struct proc_ops proc_net_seq_ops = {
+	.proc_open	= seq_open_net,
+	.proc_read	= seq_read,
+	.proc_write	= proc_simple_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release_net,
 };
 
 struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
@@ -108,7 +108,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
 	if (!p)
 		return NULL;
 	pde_force_lookup(p);
-	p->proc_fops = &proc_net_seq_fops;
+	p->proc_ops = &proc_net_seq_ops;
 	p->seq_ops = ops;
 	p->state_size = state_size;
 	return proc_register(parent, p);
@@ -152,7 +152,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
 	if (!p)
 		return NULL;
 	pde_force_lookup(p);
-	p->proc_fops = &proc_net_seq_fops;
+	p->proc_ops = &proc_net_seq_ops;
 	p->seq_ops = ops;
 	p->state_size = state_size;
 	p->write = write;
@@ -183,12 +183,12 @@ static int single_release_net(struct inode *ino, struct file *f)
 	return single_release(ino, f);
 }
 
-static const struct file_operations proc_net_single_fops = {
-	.open		= single_open_net,
-	.read		= seq_read,
-	.write		= proc_simple_write,
-	.llseek		= seq_lseek,
-	.release	= single_release_net,
+static const struct proc_ops proc_net_single_ops = {
+	.proc_open	= single_open_net,
+	.proc_read	= seq_read,
+	.proc_write	= proc_simple_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release_net,
 };
 
 struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
@@ -201,7 +201,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
 	if (!p)
 		return NULL;
 	pde_force_lookup(p);
-	p->proc_fops = &proc_net_single_fops;
+	p->proc_ops = &proc_net_single_ops;
 	p->single_show = show;
 	return proc_register(parent, p);
 }
@@ -244,7 +244,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
 	if (!p)
 		return NULL;
 	pde_force_lookup(p);
-	p->proc_fops = &proc_net_single_fops;
+	p->proc_ops = &proc_net_single_ops;
 	p->single_show = show;
 	p->write = write;
 	return proc_register(parent, p);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d80989b..c75bb46 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1720,7 +1720,7 @@ int __init proc_sys_init(void)
 
 	proc_sys_root = proc_mkdir("sys", NULL);
 	proc_sys_root->proc_iops = &proc_sys_dir_operations;
-	proc_sys_root->proc_fops = &proc_sys_dir_file_operations;
+	proc_sys_root->proc_dir_ops = &proc_sys_dir_file_operations;
 	proc_sys_root->nlink = 0;
 
 	return sysctl_init();
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 0b7c8df..608233d 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -41,24 +41,19 @@ enum proc_param {
 	Opt_hidepid,
 };
 
-static const struct fs_parameter_spec proc_param_specs[] = {
+static const struct fs_parameter_spec proc_fs_parameters[] = {
 	fsparam_u32("gid",	Opt_gid),
 	fsparam_u32("hidepid",	Opt_hidepid),
 	{}
 };
 
-static const struct fs_parameter_description proc_fs_parameters = {
-	.name		= "proc",
-	.specs		= proc_param_specs,
-};
-
 static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct proc_fs_context *ctx = fc->fs_private;
 	struct fs_parse_result result;
 	int opt;
 
-	opt = fs_parse(fc, &proc_fs_parameters, param, &result);
+	opt = fs_parse(fc, proc_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -71,7 +66,7 @@ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
 		ctx->hidepid = result.uint_32;
 		if (ctx->hidepid < HIDEPID_OFF ||
 		    ctx->hidepid > HIDEPID_INVISIBLE)
-			return invalf(fc, "proc: hidepid value must be between 0 and 2.\n");
+			return invalfc(fc, "hidepid value must be between 0 and 2.\n");
 		break;
 
 	default:
@@ -207,7 +202,7 @@ static void proc_kill_sb(struct super_block *sb)
 static struct file_system_type proc_fs_type = {
 	.name			= "proc",
 	.init_fs_context	= proc_init_fs_context,
-	.parameters		= &proc_fs_parameters,
+	.parameters		= proc_fs_parameters,
 	.kill_sb		= proc_kill_sb,
 	.fs_flags		= FS_USERNS_MOUNT | FS_DISALLOW_NOTIFY_PERM,
 };
@@ -292,7 +287,7 @@ struct proc_dir_entry proc_root = {
 	.nlink		= 2, 
 	.refcnt		= REFCOUNT_INIT(1),
 	.proc_iops	= &proc_root_inode_operations, 
-	.proc_fops	= &proc_root_operations,
+	.proc_dir_ops	= &proc_root_operations,
 	.parent		= &proc_root,
 	.subdir		= RB_ROOT,
 	.name		= "/proc",
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index fd931d3..0449edf 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -223,16 +223,16 @@ static int stat_open(struct inode *inode, struct file *file)
 	return single_open_size(file, show_stat, NULL, size);
 }
 
-static const struct file_operations proc_stat_operations = {
-	.open		= stat_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
+static const struct proc_ops stat_proc_ops = {
+	.proc_open	= stat_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 static int __init proc_stat_init(void)
 {
-	proc_create("stat", 0, NULL, &proc_stat_operations);
+	proc_create("stat", 0, NULL, &stat_proc_ops);
 	return 0;
 }
 fs_initcall(proc_stat_init);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9442631..3ba9ae8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -505,7 +505,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
 
 #ifdef CONFIG_SHMEM
 static int smaps_pte_hole(unsigned long addr, unsigned long end,
-		struct mm_walk *walk)
+			  __always_unused int depth, struct mm_walk *walk)
 {
 	struct mem_size_stats *mss = walk->private;
 
@@ -1282,7 +1282,7 @@ static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
 }
 
 static int pagemap_pte_hole(unsigned long start, unsigned long end,
-				struct mm_walk *walk)
+			    __always_unused int depth, struct mm_walk *walk)
 {
 	struct pagemapread *pm = walk->private;
 	unsigned long addr = start;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 7b13988..7dc800c 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -667,10 +667,10 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 }
 #endif
 
-static const struct file_operations proc_vmcore_operations = {
-	.read		= read_vmcore,
-	.llseek		= default_llseek,
-	.mmap		= mmap_vmcore,
+static const struct proc_ops vmcore_proc_ops = {
+	.proc_read	= read_vmcore,
+	.proc_lseek	= default_llseek,
+	.proc_mmap	= mmap_vmcore,
 };
 
 static struct vmcore* __init get_new_element(void)
@@ -1555,7 +1555,7 @@ static int __init vmcore_init(void)
 	elfcorehdr_free(elfcorehdr_addr);
 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
 
-	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
+	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
 	if (proc_vmcore)
 		proc_vmcore->size = vmcore_size;
 	return 0;
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 53429c2..58fc2a7 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -22,8 +22,6 @@ MODULE_AUTHOR("Jan Kara");
 MODULE_DESCRIPTION("Quota format v2 support");
 MODULE_LICENSE("GPL");
 
-#define __QUOTA_V2_PARANOIA
-
 static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot);
 static void v2r0_disk2memdqb(struct dquot *dquot, void *dp);
 static int v2r0_is_id(void *dp, struct dquot *dquot);
diff --git a/fs/quota/quotaio_v1.h b/fs/quota/quotaio_v1.h
index bd11e2c..31dca9a 100644
--- a/fs/quota/quotaio_v1.h
+++ b/fs/quota/quotaio_v1.h
@@ -25,8 +25,10 @@ struct v1_disk_dqblk {
 	__u32 dqb_ihardlimit;	/* absolute limit on allocated inodes */
 	__u32 dqb_isoftlimit;	/* preferred inode limit */
 	__u32 dqb_curinodes;	/* current # allocated inodes */
-	time_t dqb_btime;	/* time limit for excessive disk use */
-	time_t dqb_itime;	/* time limit for excessive inode use */
+
+	/* below fields differ in length on 32-bit vs 64-bit architectures */
+	unsigned long dqb_btime; /* time limit for excessive disk use */
+	unsigned long dqb_itime; /* time limit for excessive inode use */
 };
 
 #define v1_dqoff(UID)      ((loff_t)((UID) * sizeof (struct v1_disk_dqblk)))
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index d82636e..ee179a8 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -181,23 +181,18 @@ enum ramfs_param {
 	Opt_mode,
 };
 
-static const struct fs_parameter_spec ramfs_param_specs[] = {
+const struct fs_parameter_spec ramfs_fs_parameters[] = {
 	fsparam_u32oct("mode",	Opt_mode),
 	{}
 };
 
-const struct fs_parameter_description ramfs_fs_parameters = {
-	.name		= "ramfs",
-	.specs		= ramfs_param_specs,
-};
-
 static int ramfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct fs_parse_result result;
 	struct ramfs_fs_info *fsi = fc->s_fs_info;
 	int opt;
 
-	opt = fs_parse(fc, &ramfs_fs_parameters, param, &result);
+	opt = fs_parse(fc, ramfs_fs_parameters, param, &result);
 	if (opt < 0) {
 		/*
 		 * We might like to report bad mount options here;
@@ -278,7 +273,7 @@ static void ramfs_kill_sb(struct super_block *sb)
 static struct file_system_type ramfs_fs_type = {
 	.name		= "ramfs",
 	.init_fs_context = ramfs_init_fs_context,
-	.parameters	= &ramfs_fs_parameters,
+	.parameters	= ramfs_fs_parameters,
 	.kill_sb	= ramfs_kill_sb,
 	.fs_flags	= FS_USERNS_MOUNT,
 };
diff --git a/fs/read_write.c b/fs/read_write.c
index 7458fcc..59d819c 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -939,6 +939,34 @@ static ssize_t do_iter_read(struct file *file, struct iov_iter *iter,
 	return ret;
 }
 
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+			   struct iov_iter *iter)
+{
+	size_t tot_len;
+	ssize_t ret = 0;
+
+	if (!file->f_op->read_iter)
+		return -EINVAL;
+	if (!(file->f_mode & FMODE_READ))
+		return -EBADF;
+	if (!(file->f_mode & FMODE_CAN_READ))
+		return -EINVAL;
+
+	tot_len = iov_iter_count(iter);
+	if (!tot_len)
+		goto out;
+	ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
+	if (ret < 0)
+		return ret;
+
+	ret = call_read_iter(file, iocb, iter);
+out:
+	if (ret >= 0)
+		fsnotify_access(file);
+	return ret;
+}
+EXPORT_SYMBOL(vfs_iocb_iter_read);
+
 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
 		rwf_t flags)
 {
@@ -975,6 +1003,34 @@ static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
 	return ret;
 }
 
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+			    struct iov_iter *iter)
+{
+	size_t tot_len;
+	ssize_t ret = 0;
+
+	if (!file->f_op->write_iter)
+		return -EINVAL;
+	if (!(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+	if (!(file->f_mode & FMODE_CAN_WRITE))
+		return -EINVAL;
+
+	tot_len = iov_iter_count(iter);
+	if (!tot_len)
+		return 0;
+	ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
+	if (ret < 0)
+		return ret;
+
+	ret = call_write_iter(file, iocb, iter);
+	if (ret > 0)
+		fsnotify_modify(file);
+
+	return ret;
+}
+EXPORT_SYMBOL(vfs_iocb_iter_write);
+
 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
 		rwf_t flags)
 {
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 4b3e3e7..072156c 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -56,8 +56,6 @@
 /* gets a struct reiserfs_journal_list * from a list head */
 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
                                j_list))
-#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
-                               j_working_list))
 
 /* must be correct to keep the desc and commit structs at 4k */
 #define JOURNAL_TRANS_HALF 1018
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index f2cf344..ff33651 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -63,7 +63,6 @@ static int show_version(struct seq_file *m, void *unused)
 #define MAP( i ) D4C( objectid_map( sb, rs )[ i ] )
 
 #define DJF( x ) le32_to_cpu( rs -> x )
-#define DJV( x ) le32_to_cpu( s_v1 -> x )
 #define DJP( x ) le32_to_cpu( jp -> x )
 #define JF( x ) ( r -> s_journal -> x )
 
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index da9ebe3..8bf88d6 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -918,12 +918,6 @@ int comp_items(const struct item_head *stored_ih, const struct treepath *path)
 	return memcmp(stored_ih, ih, IH_SIZE);
 }
 
-/* unformatted nodes are not logged anymore, ever.  This is safe now */
-#define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
-
-/* block can not be forgotten as it is in I/O or held by someone */
-#define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
-
 /* prepare for delete or cut of direct item */
 static inline int prepare_for_direct_item(struct treepath *path,
 					  struct item_head *le_ih,
@@ -2246,7 +2240,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
 	/* also releases the path */
 	unfix_nodes(&s_ins_balance);
 #ifdef REISERQUOTA_DEBUG
-	reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
+	if (inode)
+		reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
 		       "reiserquota insert_item(): freeing %u id=%u type=%c",
 		       quota_bytes, inode->i_uid, head2type(ih));
 #endif
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 3244037..a6bce5b 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -629,6 +629,7 @@ static void reiserfs_put_super(struct super_block *s)
 	reiserfs_write_unlock(s);
 	mutex_destroy(&REISERFS_SB(s)->lock);
 	destroy_workqueue(REISERFS_SB(s)->commit_wq);
+	kfree(REISERFS_SB(s)->s_jdev);
 	kfree(s->s_fs_info);
 	s->s_fs_info = NULL;
 }
@@ -1947,7 +1948,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
 		if (!sbi->s_jdev) {
 			SWARN(silent, s, "", "Cannot allocate memory for "
 				"journal device name");
-			goto error;
+			goto error_unlocked;
 		}
 	}
 #ifdef CONFIG_QUOTA
@@ -2240,6 +2241,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
 			kfree(qf_names[j]);
 	}
 #endif
+	kfree(sbi->s_jdev);
 	kfree(sbi);
 
 	s->s_fs_info = NULL;
diff --git a/fs/splice.c b/fs/splice.c
index 3009652..d671936 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -165,8 +165,8 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = {
 static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
 {
 	smp_mb();
-	if (waitqueue_active(&pipe->wait))
-		wake_up_interruptible(&pipe->wait);
+	if (waitqueue_active(&pipe->rd_wait))
+		wake_up_interruptible(&pipe->rd_wait);
 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 }
 
@@ -462,8 +462,8 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
 static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
 {
 	smp_mb();
-	if (waitqueue_active(&pipe->wait))
-		wake_up_interruptible(&pipe->wait);
+	if (waitqueue_active(&pipe->wr_wait))
+		wake_up_interruptible(&pipe->wr_wait);
 	kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 }
 
diff --git a/fs/stat.c b/fs/stat.c
index c38e4c2..0300087 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -21,6 +21,8 @@
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
 
+#include "internal.h"
+
 /**
  * generic_fillattr - Fill in the basic attributes from the inode struct
  * @inode: Inode to use as the source
@@ -150,6 +152,23 @@ int vfs_statx_fd(unsigned int fd, struct kstat *stat,
 }
 EXPORT_SYMBOL(vfs_statx_fd);
 
+inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags)
+{
+	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
+		       AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
+		return -EINVAL;
+
+	*lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
+	if (flags & AT_SYMLINK_NOFOLLOW)
+		*lookup_flags &= ~LOOKUP_FOLLOW;
+	if (flags & AT_NO_AUTOMOUNT)
+		*lookup_flags &= ~LOOKUP_AUTOMOUNT;
+	if (flags & AT_EMPTY_PATH)
+		*lookup_flags |= LOOKUP_EMPTY;
+
+	return 0;
+}
+
 /**
  * vfs_statx - Get basic and extra attributes by filename
  * @dfd: A file descriptor representing the base dir for a relative filename
@@ -170,19 +189,10 @@ int vfs_statx(int dfd, const char __user *filename, int flags,
 {
 	struct path path;
 	int error = -EINVAL;
-	unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
+	unsigned lookup_flags;
 
-	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
-		       AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
+	if (vfs_stat_set_lookup_flags(&lookup_flags, flags))
 		return -EINVAL;
-
-	if (flags & AT_SYMLINK_NOFOLLOW)
-		lookup_flags &= ~LOOKUP_FOLLOW;
-	if (flags & AT_NO_AUTOMOUNT)
-		lookup_flags &= ~LOOKUP_AUTOMOUNT;
-	if (flags & AT_EMPTY_PATH)
-		lookup_flags |= LOOKUP_EMPTY;
-
 retry:
 	error = user_path_at(dfd, filename, lookup_flags, &path);
 	if (error)
@@ -523,7 +533,7 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
 }
 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
 
-static noinline_for_stack int
+noinline_for_stack int
 cp_statx(const struct kstat *stat, struct statx __user *buffer)
 {
 	struct statx tmp;
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index d41c21f..c4ab045 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -449,7 +449,7 @@ int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
 	}
 
 	link = kernfs_create_link(kobj->sd, target_name, entry);
-	if (IS_ERR(link) && PTR_ERR(link) == -EEXIST)
+	if (PTR_ERR(link) == -EEXIST)
 		sysfs_warn_dup(kobj->sd, target_name);
 
 	kernfs_put(entry);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 0caa151..0ee8c6d 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -330,7 +330,10 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
 		parent = tracefs_mount->mnt_root;
 
 	inode_lock(parent->d_inode);
-	dentry = lookup_one_len(name, parent, strlen(name));
+	if (unlikely(IS_DEADDIR(parent->d_inode)))
+		dentry = ERR_PTR(-ENOENT);
+	else
+		dentry = lookup_one_len(name, parent, strlen(name));
 	if (!IS_ERR(dentry) && dentry->d_inode) {
 		dput(dentry);
 		dentry = ERR_PTR(-EEXIST);
@@ -499,122 +502,27 @@ __init struct dentry *tracefs_create_instance_dir(const char *name,
 	return dentry;
 }
 
-static int __tracefs_remove(struct dentry *dentry, struct dentry *parent)
+static void remove_one(struct dentry *victim)
 {
-	int ret = 0;
-
-	if (simple_positive(dentry)) {
-		if (dentry->d_inode) {
-			dget(dentry);
-			switch (dentry->d_inode->i_mode & S_IFMT) {
-			case S_IFDIR:
-				ret = simple_rmdir(parent->d_inode, dentry);
-				if (!ret)
-					fsnotify_rmdir(parent->d_inode, dentry);
-				break;
-			default:
-				simple_unlink(parent->d_inode, dentry);
-				fsnotify_unlink(parent->d_inode, dentry);
-				break;
-			}
-			if (!ret)
-				d_delete(dentry);
-			dput(dentry);
-		}
-	}
-	return ret;
+	simple_release_fs(&tracefs_mount, &tracefs_mount_count);
 }
 
 /**
- * tracefs_remove - removes a file or directory from the tracefs filesystem
- * @dentry: a pointer to a the dentry of the file or directory to be
- *          removed.
- *
- * This function removes a file or directory in tracefs that was previously
- * created with a call to another tracefs function (like
- * tracefs_create_file() or variants thereof.)
- */
-void tracefs_remove(struct dentry *dentry)
-{
-	struct dentry *parent;
-	int ret;
-
-	if (IS_ERR_OR_NULL(dentry))
-		return;
-
-	parent = dentry->d_parent;
-	inode_lock(parent->d_inode);
-	ret = __tracefs_remove(dentry, parent);
-	inode_unlock(parent->d_inode);
-	if (!ret)
-		simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-}
-
-/**
- * tracefs_remove_recursive - recursively removes a directory
+ * tracefs_remove - recursively removes a directory
  * @dentry: a pointer to a the dentry of the directory to be removed.
  *
  * This function recursively removes a directory tree in tracefs that
  * was previously created with a call to another tracefs function
  * (like tracefs_create_file() or variants thereof.)
  */
-void tracefs_remove_recursive(struct dentry *dentry)
+void tracefs_remove(struct dentry *dentry)
 {
-	struct dentry *child, *parent;
-
 	if (IS_ERR_OR_NULL(dentry))
 		return;
 
-	parent = dentry;
- down:
-	inode_lock(parent->d_inode);
- loop:
-	/*
-	 * The parent->d_subdirs is protected by the d_lock. Outside that
-	 * lock, the child can be unlinked and set to be freed which can
-	 * use the d_u.d_child as the rcu head and corrupt this list.
-	 */
-	spin_lock(&parent->d_lock);
-	list_for_each_entry(child, &parent->d_subdirs, d_child) {
-		if (!simple_positive(child))
-			continue;
-
-		/* perhaps simple_empty(child) makes more sense */
-		if (!list_empty(&child->d_subdirs)) {
-			spin_unlock(&parent->d_lock);
-			inode_unlock(parent->d_inode);
-			parent = child;
-			goto down;
-		}
-
-		spin_unlock(&parent->d_lock);
-
-		if (!__tracefs_remove(child, parent))
-			simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-
-		/*
-		 * The parent->d_lock protects agaist child from unlinking
-		 * from d_subdirs. When releasing the parent->d_lock we can
-		 * no longer trust that the next pointer is valid.
-		 * Restart the loop. We'll skip this one with the
-		 * simple_positive() check.
-		 */
-		goto loop;
-	}
-	spin_unlock(&parent->d_lock);
-
-	inode_unlock(parent->d_inode);
-	child = parent;
-	parent = parent->d_parent;
-	inode_lock(parent->d_inode);
-
-	if (child != dentry)
-		/* go up */
-		goto loop;
-
-	if (!__tracefs_remove(child, parent))
-		simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-	inode_unlock(parent->d_inode);
+	simple_pin_fs(&trace_fs_type, &tracefs_mount, &tracefs_mount_count);
+	simple_recursive_removal(dentry, remove_one);
+	simple_release_fs(&tracefs_mount, &tracefs_mount_count);
 }
 
 /**
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index c8e8f50..743928e 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -786,7 +786,9 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
 
 		if (page_offset > end_index)
 			break;
-		page = find_or_create_page(mapping, page_offset, ra_gfp_mask);
+		page = pagecache_get_page(mapping, page_offset,
+				 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
+				 ra_gfp_mask);
 		if (!page)
 			break;
 		if (!PageUptodate(page))
@@ -1078,18 +1080,12 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr)
 		inode->i_uid = attr->ia_uid;
 	if (attr->ia_valid & ATTR_GID)
 		inode->i_gid = attr->ia_gid;
-	if (attr->ia_valid & ATTR_ATIME) {
-		inode->i_atime = timestamp_truncate(attr->ia_atime,
-						  inode);
-	}
-	if (attr->ia_valid & ATTR_MTIME) {
-		inode->i_mtime = timestamp_truncate(attr->ia_mtime,
-						  inode);
-	}
-	if (attr->ia_valid & ATTR_CTIME) {
-		inode->i_ctime = timestamp_truncate(attr->ia_ctime,
-						  inode);
-	}
+	if (attr->ia_valid & ATTR_ATIME)
+		inode->i_atime = attr->ia_atime;
+	if (attr->ia_valid & ATTR_MTIME)
+		inode->i_mtime = attr->ia_mtime;
+	if (attr->ia_valid & ATTR_CTIME)
+		inode->i_ctime = attr->ia_ctime;
 	if (attr->ia_valid & ATTR_MODE) {
 		umode_t mode = attr->ia_mode;
 
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 5dc5abc..d49fc04 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -17,10 +17,14 @@
 #include "ubifs.h"
 
 /* Need to be kept consistent with checked flags in ioctl2ubifs() */
-#define UBIFS_SUPPORTED_IOCTL_FLAGS \
+#define UBIFS_SETTABLE_IOCTL_FLAGS \
 	(FS_COMPR_FL | FS_SYNC_FL | FS_APPEND_FL | \
 	 FS_IMMUTABLE_FL | FS_DIRSYNC_FL)
 
+/* Need to be kept consistent with checked flags in ubifs2ioctl() */
+#define UBIFS_GETTABLE_IOCTL_FLAGS \
+	(UBIFS_SETTABLE_IOCTL_FLAGS | FS_ENCRYPT_FL)
+
 /**
  * ubifs_set_inode_flags - set VFS inode flags.
  * @inode: VFS inode to set flags for
@@ -91,6 +95,8 @@ static int ubifs2ioctl(int ubifs_flags)
 		ioctl_flags |= FS_IMMUTABLE_FL;
 	if (ubifs_flags & UBIFS_DIRSYNC_FL)
 		ioctl_flags |= FS_DIRSYNC_FL;
+	if (ubifs_flags & UBIFS_CRYPT_FL)
+		ioctl_flags |= FS_ENCRYPT_FL;
 
 	return ioctl_flags;
 }
@@ -113,7 +119,8 @@ static int setflags(struct inode *inode, int flags)
 	if (err)
 		goto out_unlock;
 
-	ui->flags = ioctl2ubifs(flags);
+	ui->flags &= ~ioctl2ubifs(UBIFS_SETTABLE_IOCTL_FLAGS);
+	ui->flags |= ioctl2ubifs(flags);
 	ubifs_set_inode_flags(inode);
 	inode->i_ctime = current_time(inode);
 	release = ui->dirty;
@@ -155,8 +162,9 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		if (get_user(flags, (int __user *) arg))
 			return -EFAULT;
 
-		if (flags & ~UBIFS_SUPPORTED_IOCTL_FLAGS)
+		if (flags & ~UBIFS_GETTABLE_IOCTL_FLAGS)
 			return -EOPNOTSUPP;
+		flags &= UBIFS_SETTABLE_IOCTL_FLAGS;
 
 		if (!S_ISDIR(inode->i_mode))
 			flags &= ~FS_DIRSYNC_FL;
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 54d6db6..edf43ddd 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,7 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
 static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
 {
 	if (orph->del) {
-		dbg_gen("deleted twice ino %lu", orph->inum);
+		dbg_gen("deleted twice ino %lu", (unsigned long)orph->inum);
 		return;
 	}
 
@@ -137,7 +137,7 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
 		orph->del = 1;
 		orph->dnext = c->orph_dnext;
 		c->orph_dnext = orph;
-		dbg_gen("delete later ino %lu", orph->inum);
+		dbg_gen("delete later ino %lu", (unsigned long)orph->inum);
 		return;
 	}
 
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 2b7c04b..4b4b65b 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -84,7 +84,6 @@ static int create_default_filesystem(struct ubifs_info *c)
 	int idx_node_size;
 	long long tmp64, main_bytes;
 	__le64 tmp_le64;
-	__le32 tmp_le32;
 	struct timespec64 ts;
 	u8 hash[UBIFS_HASH_ARR_SZ];
 	u8 hash_lpt[UBIFS_HASH_ARR_SZ];
@@ -161,7 +160,7 @@ static int create_default_filesystem(struct ubifs_info *c)
 	sup = kzalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_KERNEL);
 	mst = kzalloc(c->mst_node_alsz, GFP_KERNEL);
 	idx_node_size = ubifs_idx_node_sz(c, 1);
-	idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL);
+	idx = kzalloc(ALIGN(idx_node_size, c->min_io_size), GFP_KERNEL);
 	ino = kzalloc(ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size), GFP_KERNEL);
 	cs = kzalloc(ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size), GFP_KERNEL);
 
@@ -291,16 +290,14 @@ static int create_default_filesystem(struct ubifs_info *c)
 	ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
 	ino->nlink = cpu_to_le32(2);
 
-	ktime_get_real_ts64(&ts);
-	ts = timespec64_trunc(ts, DEFAULT_TIME_GRAN);
+	ktime_get_coarse_real_ts64(&ts);
 	tmp_le64 = cpu_to_le64(ts.tv_sec);
 	ino->atime_sec   = tmp_le64;
 	ino->ctime_sec   = tmp_le64;
 	ino->mtime_sec   = tmp_le64;
-	tmp_le32 = cpu_to_le32(ts.tv_nsec);
-	ino->atime_nsec  = tmp_le32;
-	ino->ctime_nsec  = tmp_le32;
-	ino->mtime_nsec  = tmp_le32;
+	ino->atime_nsec  = 0;
+	ino->ctime_nsec  = 0;
+	ino->mtime_nsec  = 0;
 	ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
 	ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ);
 
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 5e1e8ec..7fc2f3f 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1599,6 +1599,7 @@ static int mount_ubifs(struct ubifs_info *c)
 	vfree(c->ileb_buf);
 	vfree(c->sbuf);
 	kfree(c->bottom_up_buf);
+	kfree(c->sup_node);
 	ubifs_debugging_exit(c);
 	return err;
 }
@@ -1641,6 +1642,7 @@ static void ubifs_umount(struct ubifs_info *c)
 	vfree(c->ileb_buf);
 	vfree(c->sbuf);
 	kfree(c->bottom_up_buf);
+	kfree(c->sup_node);
 	ubifs_debugging_exit(c);
 }
 
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index fb7f2c7..3fd8546 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -4,7 +4,8 @@
  * This file is based on ECMA-167 3rd edition (June 1997)
  * http://www.ecma.ch
  *
- * Copyright (c) 2001-2002  Ben Fennema <bfennema@falcon.csc.calpoly.edu>
+ * Copyright (c) 2001-2002  Ben Fennema
+ * Copyright (c) 2017-2019  Pali Rohár <pali.rohar@gmail.com>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -32,11 +33,19 @@
  * SUCH DAMAGE.
  */
 
+/**
+ * @file
+ * ECMA-167r3 defines and structure definitions
+ */
+
 #include <linux/types.h>
 
 #ifndef _ECMA_167_H
 #define _ECMA_167_H 1
 
+/* Character sets and coding - d-characters (ECMA 167r3 1/7.2) */
+typedef uint8_t		dchars;
+
 /* Character set specification (ECMA 167r3 1/7.2.1) */
 struct charspec {
 	uint8_t		charSetType;
@@ -54,6 +63,7 @@ struct charspec {
 #define CHARSPEC_TYPE_CS7		0x07	/* (1/7.2.9) */
 #define CHARSPEC_TYPE_CS8		0x08	/* (1/7.2.10) */
 
+/* Fixed-length character fields - d-string (EMCA 167r3 1/7.2.12) */
 typedef uint8_t		dstring;
 
 /* Timestamp (ECMA 167r3 1/7.3) */
@@ -85,22 +95,8 @@ struct regid {
 } __packed;
 
 /* Flags (ECMA 167r3 1/7.4.1) */
-#define ENTITYID_FLAGS_DIRTY		0x00
-#define ENTITYID_FLAGS_PROTECTED	0x01
-
-/* OSTA UDF 2.1.5.2 */
-#define UDF_ID_COMPLIANT "*OSTA UDF Compliant"
-
-/* OSTA UDF 2.1.5.3 */
-struct domainEntityIDSuffix {
-	uint16_t	revision;
-	uint8_t		flags;
-	uint8_t		reserved[5];
-};
-
-/* OSTA UDF 2.1.5.3 */
-#define ENTITYIDSUFFIX_FLAGS_HARDWRITEPROTECT 0
-#define ENTITYIDSUFFIX_FLAGS_SOFTWRITEPROTECT 1
+#define ENTITYID_FLAGS_DIRTY		0x01
+#define ENTITYID_FLAGS_PROTECTED	0x02
 
 /* Volume Structure Descriptor (ECMA 167r3 2/9.1) */
 #define VSD_STD_ID_LEN			5
@@ -202,6 +198,13 @@ struct NSRDesc {
 	uint8_t		structData[2040];
 } __packed;
 
+/* Generic Descriptor */
+struct genericDesc {
+	struct tag	descTag;
+	__le32		volDescSeqNum;
+	uint8_t		reserved[492];
+} __packed;
+
 /* Primary Volume Descriptor (ECMA 167r3 3/10.1) */
 struct primaryVolDesc {
 	struct tag		descTag;
@@ -316,7 +319,7 @@ struct genericPartitionMap {
 
 /* Partition Map Type (ECMA 167r3 3/10.7.1.1) */
 #define GP_PARTITION_MAP_TYPE_UNDEF	0x00
-#define GP_PARTIITON_MAP_TYPE_1		0x01
+#define GP_PARTITION_MAP_TYPE_1		0x01
 #define GP_PARTITION_MAP_TYPE_2		0x02
 
 /* Type 1 Partition Map (ECMA 167r3 3/10.7.2) */
@@ -723,6 +726,7 @@ struct appUseExtAttr {
 #define EXTATTR_DEV_SPEC		12
 #define EXTATTR_IMP_USE			2048
 #define EXTATTR_APP_USE			65536
+#define EXTATTR_SUBTYPE			1
 
 /* Unallocated Space Entry (ECMA 167r3 4/14.11) */
 struct unallocSpaceEntry {
@@ -754,10 +758,12 @@ struct partitionIntegrityEntry {
 /* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
 
 /* Extent Length (ECMA 167r3 4/14.14.1.1) */
+#define EXT_LENGTH_MASK			0x3FFFFFFF
+#define EXT_TYPE_MASK			0xC0000000
 #define EXT_RECORDED_ALLOCATED		0x00000000
 #define EXT_NOT_RECORDED_ALLOCATED	0x40000000
 #define EXT_NOT_RECORDED_NOT_ALLOCATED	0x80000000
-#define EXT_NEXT_EXTENT_ALLOCDECS	0xC0000000
+#define EXT_NEXT_EXTENT_ALLOCDESCS	0xC0000000
 
 /* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */
 
@@ -774,7 +780,7 @@ struct pathComponent {
 	uint8_t		componentType;
 	uint8_t		lengthComponentIdent;
 	__le16		componentFileVersionNum;
-	dstring		componentIdent[0];
+	dchars		componentIdent[0];
 } __packed;
 
 /* File Entry (ECMA 167r3 4/14.17) */
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index ea80036..e875bc5 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1981,10 +1981,10 @@ int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
 
 		__udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
 		udf_write_aext(inode, epos, &nepos.block,
-			       sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
+			       sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
 	} else {
 		__udf_add_aext(inode, epos, &nepos.block,
-			       sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
+			       sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
 	}
 
 	brelse(epos->bh);
@@ -2143,7 +2143,7 @@ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
 	unsigned int indirections = 0;
 
 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
-	       (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+	       (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
 		udf_pblk_t block;
 
 		if (++indirections > UDF_MAX_INDIR_EXTS) {
diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h
index a4da59e..35e61b2 100644
--- a/fs/udf/osta_udf.h
+++ b/fs/udf/osta_udf.h
@@ -1,10 +1,11 @@
 /*
  * osta_udf.h
  *
- * This file is based on OSTA UDF(tm) 2.50 (April 30, 2003)
+ * This file is based on OSTA UDF(tm) 2.60 (March 1, 2005)
  * http://www.osta.org
  *
- * Copyright (c) 2001-2004  Ben Fennema <bfennema@falcon.csc.calpoly.edu>
+ * Copyright (c) 2001-2004  Ben Fennema
+ * Copyright (c) 2017-2019  Pali Rohár <pali.rohar@gmail.com>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -32,38 +33,57 @@
  * SUCH DAMAGE.
  */
 
+/**
+ * @file
+ * OSTA-UDF defines and structure definitions
+ */
+
 #include "ecma_167.h"
 
 #ifndef _OSTA_UDF_H
 #define _OSTA_UDF_H 1
 
-/* OSTA CS0 Charspec (UDF 2.50 2.1.2) */
+/* OSTA CS0 Charspec (UDF 2.60 2.1.2) */
 #define UDF_CHAR_SET_TYPE		0
 #define UDF_CHAR_SET_INFO		"OSTA Compressed Unicode"
 
-/* Entity Identifier (UDF 2.50 2.1.5) */
-/* Identifiers (UDF 2.50 2.1.5.2) */
+/* Entity Identifier (UDF 2.60 2.1.5) */
+/* Identifiers (UDF 2.60 2.1.5.2) */
+/* Implementation Use Extended Attribute (UDF 2.60 3.3.4.5) */
+/* Virtual Allocation Table (UDF 1.50 2.2.10) */
+/* Logical Volume Extended Information (UDF 1.50 Errata, DCN 5003, 3.3.4.5.1.3) */
+/* OS2EA (UDF 1.50 3.3.4.5.3.1) */
+/* MacUniqueIDTable (UDF 1.50 3.3.4.5.4.3) */
+/* MacResourceFork (UDF 1.50 3.3.4.5.4.4) */
 #define UDF_ID_DEVELOPER		"*Linux UDFFS"
 #define	UDF_ID_COMPLIANT		"*OSTA UDF Compliant"
 #define UDF_ID_LV_INFO			"*UDF LV Info"
 #define UDF_ID_FREE_EA			"*UDF FreeEASpace"
 #define UDF_ID_FREE_APP_EA		"*UDF FreeAppEASpace"
 #define UDF_ID_DVD_CGMS			"*UDF DVD CGMS Info"
+#define UDF_ID_VAT_LVEXTENSION		"*UDF VAT LVExtension"
 #define UDF_ID_OS2_EA			"*UDF OS/2 EA"
 #define UDF_ID_OS2_EA_LENGTH		"*UDF OS/2 EALength"
 #define UDF_ID_MAC_VOLUME		"*UDF Mac VolumeInfo"
 #define UDF_ID_MAC_FINDER		"*UDF Mac FinderInfo"
 #define UDF_ID_MAC_UNIQUE		"*UDF Mac UniqueIDTable"
 #define UDF_ID_MAC_RESOURCE		"*UDF Mac ResourceFork"
+#define UDF_ID_OS400_DIRINFO		"*UDF OS/400 DirInfo"
 #define UDF_ID_VIRTUAL			"*UDF Virtual Partition"
 #define UDF_ID_SPARABLE			"*UDF Sparable Partition"
 #define UDF_ID_ALLOC			"*UDF Virtual Alloc Tbl"
 #define UDF_ID_SPARING			"*UDF Sparing Table"
 #define UDF_ID_METADATA			"*UDF Metadata Partition"
 
-/* Identifier Suffix (UDF 2.50 2.1.5.3) */
-#define IS_DF_HARD_WRITE_PROTECT	0x01
-#define IS_DF_SOFT_WRITE_PROTECT	0x02
+/* Identifier Suffix (UDF 2.60 2.1.5.3) */
+#define DOMAIN_FLAGS_HARD_WRITE_PROTECT	0x01
+#define DOMAIN_FLAGS_SOFT_WRITE_PROTECT	0x02
+
+struct domainIdentSuffix {
+	__le16		UDFRevision;
+	uint8_t		domainFlags;
+	uint8_t		reserved[5];
+} __packed;
 
 struct UDFIdentSuffix {
 	__le16		UDFRevision;
@@ -75,15 +95,15 @@ struct UDFIdentSuffix {
 struct impIdentSuffix {
 	uint8_t		OSClass;
 	uint8_t		OSIdentifier;
-	uint8_t		reserved[6];
+	uint8_t		impUse[6];
 } __packed;
 
 struct appIdentSuffix {
 	uint8_t		impUse[8];
 } __packed;
 
-/* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */
-/* Implementation Use (UDF 2.50 2.2.6.4) */
+/* Logical Volume Integrity Descriptor (UDF 2.60 2.2.6) */
+/* Implementation Use (UDF 2.60 2.2.6.4) */
 struct logicalVolIntegrityDescImpUse {
 	struct regid	impIdent;
 	__le32		numFiles;
@@ -94,8 +114,8 @@ struct logicalVolIntegrityDescImpUse {
 	uint8_t		impUse[0];
 } __packed;
 
-/* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */
-/* Implementation Use (UDF 2.50 2.2.7.2) */
+/* Implementation Use Volume Descriptor (UDF 2.60 2.2.7) */
+/* Implementation Use (UDF 2.60 2.2.7.2) */
 struct impUseVolDescImpUse {
 	struct charspec	LVICharset;
 	dstring		logicalVolIdent[128];
@@ -115,7 +135,7 @@ struct udfPartitionMap2 {
 	__le16		partitionNum;
 } __packed;
 
-/* Virtual Partition Map (UDF 2.50 2.2.8) */
+/* Virtual Partition Map (UDF 2.60 2.2.8) */
 struct virtualPartitionMap {
 	uint8_t		partitionMapType;
 	uint8_t		partitionMapLength;
@@ -126,7 +146,7 @@ struct virtualPartitionMap {
 	uint8_t		reserved2[24];
 } __packed;
 
-/* Sparable Partition Map (UDF 2.50 2.2.9) */
+/* Sparable Partition Map (UDF 2.60 2.2.9) */
 struct sparablePartitionMap {
 	uint8_t partitionMapType;
 	uint8_t partitionMapLength;
@@ -141,7 +161,7 @@ struct sparablePartitionMap {
 	__le32 locSparingTable[4];
 } __packed;
 
-/* Metadata Partition Map (UDF 2.4.0 2.2.10) */
+/* Metadata Partition Map (UDF 2.60 2.2.10) */
 struct metadataPartitionMap {
 	uint8_t		partitionMapType;
 	uint8_t		partitionMapLength;
@@ -160,14 +180,14 @@ struct metadataPartitionMap {
 
 /* Virtual Allocation Table (UDF 1.5 2.2.10) */
 struct virtualAllocationTable15 {
-	__le32		VirtualSector[0];
+	__le32		vatEntry[0];
 	struct regid	vatIdent;
 	__le32		previousVATICBLoc;
 } __packed;
 
 #define ICBTAG_FILE_TYPE_VAT15		0x00U
 
-/* Virtual Allocation Table (UDF 2.50 2.2.11) */
+/* Virtual Allocation Table (UDF 2.60 2.2.11) */
 struct virtualAllocationTable20 {
 	__le16		lengthHeader;
 	__le16		lengthImpUse;
@@ -175,9 +195,9 @@ struct virtualAllocationTable20 {
 	__le32		previousVATICBLoc;
 	__le32		numFiles;
 	__le32		numDirs;
-	__le16		minReadRevision;
-	__le16		minWriteRevision;
-	__le16		maxWriteRevision;
+	__le16		minUDFReadRev;
+	__le16		minUDFWriteRev;
+	__le16		maxUDFWriteRev;
 	__le16		reserved;
 	uint8_t		impUse[0];
 	__le32		vatEntry[0];
@@ -185,7 +205,7 @@ struct virtualAllocationTable20 {
 
 #define ICBTAG_FILE_TYPE_VAT20		0xF8U
 
-/* Sparing Table (UDF 2.50 2.2.12) */
+/* Sparing Table (UDF 2.60 2.2.12) */
 struct sparingEntry {
 	__le32		origLocation;
 	__le32		mappedLocation;
@@ -201,12 +221,12 @@ struct sparingTable {
 			mapEntry[0];
 } __packed;
 
-/* Metadata File (and Metadata Mirror File) (UDF 2.50 2.2.13.1) */
+/* Metadata File (and Metadata Mirror File) (UDF 2.60 2.2.13.1) */
 #define ICBTAG_FILE_TYPE_MAIN		0xFA
 #define ICBTAG_FILE_TYPE_MIRROR		0xFB
 #define ICBTAG_FILE_TYPE_BITMAP		0xFC
 
-/* struct struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */
+/* struct struct long_ad ICB - ADImpUse (UDF 2.60 2.2.4.3) */
 struct allocDescImpUse {
 	__le16		flags;
 	uint8_t		impUse[4];
@@ -214,17 +234,17 @@ struct allocDescImpUse {
 
 #define AD_IU_EXT_ERASED		0x0001
 
-/* Real-Time Files (UDF 2.50 6.11) */
+/* Real-Time Files (UDF 2.60 6.11) */
 #define ICBTAG_FILE_TYPE_REALTIME	0xF9U
 
-/* Implementation Use Extended Attribute (UDF 2.50 3.3.4.5) */
-/* FreeEASpace (UDF 2.50 3.3.4.5.1.1) */
+/* Implementation Use Extended Attribute (UDF 2.60 3.3.4.5) */
+/* FreeEASpace (UDF 2.60 3.3.4.5.1.1) */
 struct freeEaSpace {
 	__le16		headerChecksum;
 	uint8_t		freeEASpace[0];
 } __packed;
 
-/* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */
+/* DVD Copyright Management Information (UDF 2.60 3.3.4.5.1.2) */
 struct DVDCopyrightImpUse {
 	__le16		headerChecksum;
 	uint8_t		CGMSInfo;
@@ -232,20 +252,35 @@ struct DVDCopyrightImpUse {
 	uint8_t		protectionSystemInfo[4];
 } __packed;
 
-/* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */
-/* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */
+/* Logical Volume Extended Information (UDF 1.50 Errata, DCN 5003, 3.3.4.5.1.3) */
+struct LVExtensionEA {
+	__le16		headerChecksum;
+	__le64		verificationID;
+	__le32		numFiles;
+	__le32		numDirs;
+	dstring		logicalVolIdent[128];
+} __packed;
+
+/* Application Use Extended Attribute (UDF 2.60 3.3.4.6) */
+/* FreeAppEASpace (UDF 2.60 3.3.4.6.1) */
 struct freeAppEASpace {
 	__le16		headerChecksum;
 	uint8_t		freeEASpace[0];
 } __packed;
 
-/* UDF Defined System Stream (UDF 2.50 3.3.7) */
+/* UDF Defined System Stream (UDF 2.60 3.3.7) */
 #define UDF_ID_UNIQUE_ID		"*UDF Unique ID Mapping Data"
 #define UDF_ID_NON_ALLOC		"*UDF Non-Allocatable Space"
 #define UDF_ID_POWER_CAL		"*UDF Power Cal Table"
 #define UDF_ID_BACKUP			"*UDF Backup"
 
-/* Operating System Identifiers (UDF 2.50 6.3) */
+/* UDF Defined Non-System Streams (UDF 2.60 3.3.8) */
+#define UDF_ID_MAC_RESOURCE_FORK_STREAM	"*UDF Macintosh Resource Fork"
+/* #define UDF_ID_OS2_EA		"*UDF OS/2 EA" */
+#define UDF_ID_NT_ACL			"*UDF NT ACL"
+#define UDF_ID_UNIX_ACL			"*UDF UNIX ACL"
+
+/* Operating System Identifiers (UDF 2.60 6.3) */
 #define UDF_OS_CLASS_UNDEF		0x00U
 #define UDF_OS_CLASS_DOS		0x01U
 #define UDF_OS_CLASS_OS2		0x02U
@@ -270,6 +305,7 @@ struct freeAppEASpace {
 #define UDF_OS_ID_LINUX			0x05U
 #define UDF_OS_ID_MKLINUX		0x06U
 #define UDF_OS_ID_FREEBSD		0x07U
+#define UDF_OS_ID_NETBSD		0x08U
 #define UDF_OS_ID_WIN9X			0x00U
 #define UDF_OS_ID_WINNT			0x00U
 #define UDF_OS_ID_OS400			0x00U
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 8c28e93..f747bf7 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -767,20 +767,20 @@ static int udf_check_vsd(struct super_block *sb)
 static int udf_verify_domain_identifier(struct super_block *sb,
 					struct regid *ident, char *dname)
 {
-	struct domainEntityIDSuffix *suffix;
+	struct domainIdentSuffix *suffix;
 
 	if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
 		udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
 		goto force_ro;
 	}
-	if (ident->flags & (1 << ENTITYID_FLAGS_DIRTY)) {
+	if (ident->flags & ENTITYID_FLAGS_DIRTY) {
 		udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
 			 dname);
 		goto force_ro;
 	}
-	suffix = (struct domainEntityIDSuffix *)ident->identSuffix;
-	if (suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_HARDWRITEPROTECT) ||
-	    suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_SOFTWRITEPROTECT)) {
+	suffix = (struct domainIdentSuffix *)ident->identSuffix;
+	if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) ||
+	    (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) {
 		if (!sb_rdonly(sb)) {
 			udf_warn(sb, "Descriptor for %s marked write protected."
 				 " Forcing read only mount.\n", dname);
@@ -1035,7 +1035,6 @@ static int check_partition_desc(struct super_block *sb,
 	switch (le32_to_cpu(p->accessType)) {
 	case PD_ACCESS_TYPE_READ_ONLY:
 	case PD_ACCESS_TYPE_WRITE_ONCE:
-	case PD_ACCESS_TYPE_REWRITABLE:
 	case PD_ACCESS_TYPE_NONE:
 		goto force_ro;
 	}
@@ -1063,7 +1062,8 @@ static int check_partition_desc(struct super_block *sb,
 		goto force_ro;
 
 	if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
-	    map->s_partition_type == UDF_VIRTUAL_MAP20)
+	    map->s_partition_type == UDF_VIRTUAL_MAP20 ||
+	    map->s_partition_type == UDF_METADATA_MAP25)
 		goto force_ro;
 
 	return 0;
@@ -2402,6 +2402,10 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
 	buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
 	buf->f_bfree = udf_count_free(sb);
 	buf->f_bavail = buf->f_bfree;
+	/*
+	 * Let's pretend each free block is also a free 'inode' since UDF does
+	 * not have separate preallocated table of inodes.
+	 */
 	buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
 					  le32_to_cpu(lvidiu->numDirs)) : 0)
 			+ buf->f_bfree;
@@ -2492,17 +2496,29 @@ static unsigned int udf_count_free_table(struct super_block *sb,
 static unsigned int udf_count_free(struct super_block *sb)
 {
 	unsigned int accum = 0;
-	struct udf_sb_info *sbi;
+	struct udf_sb_info *sbi = UDF_SB(sb);
 	struct udf_part_map *map;
+	unsigned int part = sbi->s_partition;
+	int ptype = sbi->s_partmaps[part].s_partition_type;
 
-	sbi = UDF_SB(sb);
+	if (ptype == UDF_METADATA_MAP25) {
+		part = sbi->s_partmaps[part].s_type_specific.s_metadata.
+							s_phys_partition_ref;
+	} else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
+		/*
+		 * Filesystems with VAT are append-only and we cannot write to
+ 		 * them. Let's just report 0 here.
+		 */
+		return 0;
+	}
+
 	if (sbi->s_lvid_bh) {
 		struct logicalVolIntegrityDesc *lvid =
 			(struct logicalVolIntegrityDesc *)
 			sbi->s_lvid_bh->b_data;
-		if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
+		if (le32_to_cpu(lvid->numOfPartitions) > part) {
 			accum = le32_to_cpu(
-					lvid->freeSpaceTable[sbi->s_partition]);
+					lvid->freeSpaceTable[part]);
 			if (accum == 0xFFFFFFFF)
 				accum = 0;
 		}
@@ -2511,7 +2527,7 @@ static unsigned int udf_count_free(struct super_block *sb)
 	if (accum)
 		return accum;
 
-	map = &sbi->s_partmaps[sbi->s_partition];
+	map = &sbi->s_partmaps[part];
 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
 		accum += udf_count_free_bitmap(sb,
 					       map->s_uspace.s_bitmap);
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 63a47f1..532cda9 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -241,7 +241,7 @@ int udf_truncate_extents(struct inode *inode)
 
 	while ((etype = udf_current_aext(inode, &epos, &eloc,
 					 &elen, 0)) != -1) {
-		if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+		if (etype == (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
 			udf_write_aext(inode, &epos, &neloc, nelen, 0);
 			if (indirect_ext_len) {
 				/* We managed to free all extents in the
diff --git a/fs/utimes.c b/fs/utimes.c
index c952b6b3..1d17ce9 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -36,14 +36,14 @@ static int utimes_common(const struct path *path, struct timespec64 *times)
 		if (times[0].tv_nsec == UTIME_OMIT)
 			newattrs.ia_valid &= ~ATTR_ATIME;
 		else if (times[0].tv_nsec != UTIME_NOW) {
-			newattrs.ia_atime = timestamp_truncate(times[0], inode);
+			newattrs.ia_atime = times[0];
 			newattrs.ia_valid |= ATTR_ATIME_SET;
 		}
 
 		if (times[1].tv_nsec == UTIME_OMIT)
 			newattrs.ia_valid &= ~ATTR_MTIME;
 		else if (times[1].tv_nsec != UTIME_NOW) {
-			newattrs.ia_mtime = timestamp_truncate(times[1], inode);
+			newattrs.ia_mtime = times[1];
 			newattrs.ia_valid |= ATTR_MTIME_SET;
 		}
 		/*
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 14fbdf2..08d6beb 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -23,25 +23,28 @@
 #include "xfs_ag_resv.h"
 #include "xfs_health.h"
 
-static struct xfs_buf *
+static int
 xfs_get_aghdr_buf(
 	struct xfs_mount	*mp,
 	xfs_daddr_t		blkno,
 	size_t			numblks,
+	struct xfs_buf		**bpp,
 	const struct xfs_buf_ops *ops)
 {
 	struct xfs_buf		*bp;
+	int			error;
 
-	bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0);
-	if (!bp)
-		return NULL;
+	error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp);
+	if (error)
+		return error;
 
 	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
 	bp->b_bn = blkno;
 	bp->b_maps[0].bm_bn = blkno;
 	bp->b_ops = ops;
 
-	return bp;
+	*bpp = bp;
+	return 0;
 }
 
 static inline bool is_log_ag(struct xfs_mount *mp, struct aghdr_init_data *id)
@@ -340,13 +343,13 @@ xfs_ag_init_hdr(
 	struct aghdr_init_data	*id,
 	aghdr_init_work_f	work,
 	const struct xfs_buf_ops *ops)
-
 {
 	struct xfs_buf		*bp;
+	int			error;
 
-	bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, ops);
-	if (!bp)
-		return -ENOMEM;
+	error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops);
+	if (error)
+		return error;
 
 	(*work)(mp, bp, id);
 
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index fc93fd8..d8053bc 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1070,11 +1070,11 @@ xfs_alloc_ag_vextent_small(
 	if (args->datatype & XFS_ALLOC_USERDATA) {
 		struct xfs_buf	*bp;
 
-		bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
-		if (XFS_IS_CORRUPT(args->mp, !bp)) {
-			error = -EFSCORRUPTED;
+		error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
+				XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
+				args->mp->m_bsize, 0, &bp);
+		if (error)
 			goto error;
-		}
 		xfs_trans_binval(args->tp, bp);
 	}
 	*fbnop = args->agbno = fbno;
@@ -2347,9 +2347,11 @@ xfs_free_agfl_block(
 	if (error)
 		return error;
 
-	bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
-	if (XFS_IS_CORRUPT(tp->t_mountp, !bp))
-		return -EFSCORRUPTED;
+	error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
+			XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
+			tp->t_mountp->m_bsize, 0, &bp);
+	if (error)
+		return error;
 	xfs_trans_binval(tp, bp);
 
 	return 0;
@@ -2500,12 +2502,11 @@ xfs_alloc_fix_freelist(
 
 	if (!pag->pagf_init) {
 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
-		if (error)
+		if (error) {
+			/* Couldn't lock the AGF so skip this AG. */
+			if (error == -EAGAIN)
+				error = 0;
 			goto out_no_agbp;
-		if (!pag->pagf_init) {
-			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
-			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
-			goto out_agbp_relse;
 		}
 	}
 
@@ -2531,11 +2532,10 @@ xfs_alloc_fix_freelist(
 	 */
 	if (!agbp) {
 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
-		if (error)
-			goto out_no_agbp;
-		if (!agbp) {
-			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
-			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
+		if (error) {
+			/* Couldn't lock the AGF so skip this AG. */
+			if (error == -EAGAIN)
+				error = 0;
 			goto out_no_agbp;
 		}
 	}
@@ -2766,11 +2766,10 @@ xfs_alloc_pagf_init(
 	xfs_buf_t		*bp;
 	int			error;
 
-	if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
-		return error;
-	if (bp)
+	error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp);
+	if (!error)
 		xfs_trans_brelse(tp, bp);
-	return 0;
+	return error;
 }
 
 /*
@@ -2956,14 +2955,11 @@ xfs_read_agf(
 	trace_xfs_read_agf(mp, agno);
 
 	ASSERT(agno != NULLAGNUMBER);
-	error = xfs_trans_read_buf(
-			mp, tp, mp->m_ddev_targp,
+	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
 			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
 			XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
 	if (error)
 		return error;
-	if (!*bpp)
-		return 0;
 
 	ASSERT(!(*bpp)->b_error);
 	xfs_buf_set_ref(*bpp, XFS_AGF_REF);
@@ -2987,14 +2983,15 @@ xfs_alloc_read_agf(
 
 	trace_xfs_alloc_read_agf(mp, agno);
 
+	/* We don't support trylock when freeing. */
+	ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
+			(XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
 	ASSERT(agno != NULLAGNUMBER);
 	error = xfs_read_agf(mp, tp, agno,
 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
 			bpp);
 	if (error)
 		return error;
-	if (!*bpp)
-		return 0;
 	ASSERT(!(*bpp)->b_error);
 
 	agf = XFS_BUF_TO_AGF(*bpp);
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 0d7fcc98..e614972 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -62,6 +62,7 @@ xfs_attr_args_init(
 	struct xfs_da_args	*args,
 	struct xfs_inode	*dp,
 	const unsigned char	*name,
+	size_t			namelen,
 	int			flags)
 {
 
@@ -74,7 +75,7 @@ xfs_attr_args_init(
 	args->dp = dp;
 	args->flags = flags;
 	args->name = name;
-	args->namelen = strlen((const char *)name);
+	args->namelen = namelen;
 	if (args->namelen >= MAXNAMELEN)
 		return -EFAULT;		/* match IRIX behaviour */
 
@@ -139,6 +140,7 @@ int
 xfs_attr_get(
 	struct xfs_inode	*ip,
 	const unsigned char	*name,
+	size_t			namelen,
 	unsigned char		**value,
 	int			*valuelenp,
 	int			flags)
@@ -154,7 +156,7 @@ xfs_attr_get(
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return -EIO;
 
-	error = xfs_attr_args_init(&args, ip, name, flags);
+	error = xfs_attr_args_init(&args, ip, name, namelen, flags);
 	if (error)
 		return error;
 
@@ -338,6 +340,7 @@ int
 xfs_attr_set(
 	struct xfs_inode	*dp,
 	const unsigned char	*name,
+	size_t			namelen,
 	unsigned char		*value,
 	int			valuelen,
 	int			flags)
@@ -353,7 +356,7 @@ xfs_attr_set(
 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
 		return -EIO;
 
-	error = xfs_attr_args_init(&args, dp, name, flags);
+	error = xfs_attr_args_init(&args, dp, name, namelen, flags);
 	if (error)
 		return error;
 
@@ -442,6 +445,7 @@ int
 xfs_attr_remove(
 	struct xfs_inode	*dp,
 	const unsigned char	*name,
+	size_t			namelen,
 	int			flags)
 {
 	struct xfs_mount	*mp = dp->i_mount;
@@ -453,7 +457,7 @@ xfs_attr_remove(
 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
 		return -EIO;
 
-	error = xfs_attr_args_init(&args, dp, name, flags);
+	error = xfs_attr_args_init(&args, dp, name, namelen, flags);
 	if (error)
 		return error;
 
@@ -1007,7 +1011,7 @@ xfs_attr_node_addname(
 		 * The INCOMPLETE flag means that we will find the "old"
 		 * attr, not the "new" one.
 		 */
-		args->flags |= XFS_ATTR_INCOMPLETE;
+		args->op_flags |= XFS_DA_OP_INCOMPLETE;
 		state = xfs_da_state_alloc();
 		state->args = args;
 		state->mp = mp;
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 94badfa..4243b22 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -26,7 +26,7 @@ struct xfs_attr_list_context;
  *========================================================================*/
 
 
-#define ATTR_DONTFOLLOW	0x0001	/* -- unused, from IRIX -- */
+#define ATTR_DONTFOLLOW	0x0001	/* -- ignored, from IRIX -- */
 #define ATTR_ROOT	0x0002	/* use attrs in root (trusted) namespace */
 #define ATTR_TRUST	0x0004	/* -- unused, from IRIX -- */
 #define ATTR_SECURE	0x0008	/* use attrs in security namespace */
@@ -37,7 +37,10 @@ struct xfs_attr_list_context;
 #define ATTR_KERNOVAL	0x2000	/* [kernel] get attr size only, not value */
 
 #define ATTR_INCOMPLETE	0x4000	/* [kernel] return INCOMPLETE attr keys */
-#define ATTR_ALLOC	0x8000	/* allocate xattr buffer on demand */
+#define ATTR_ALLOC	0x8000	/* [kernel] allocate xattr buffer on demand */
+
+#define ATTR_KERNEL_FLAGS \
+	(ATTR_KERNOTIME | ATTR_KERNOVAL | ATTR_INCOMPLETE | ATTR_ALLOC)
 
 #define XFS_ATTR_FLAGS \
 	{ ATTR_DONTFOLLOW, 	"DONTFOLLOW" }, \
@@ -145,11 +148,13 @@ int xfs_attr_list_int(struct xfs_attr_list_context *);
 int xfs_inode_hasattr(struct xfs_inode *ip);
 int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
 int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
-		 unsigned char **value, int *valuelenp, int flags);
+		 size_t namelen, unsigned char **value, int *valuelenp,
+		 int flags);
 int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
-		 unsigned char *value, int valuelen, int flags);
+		 size_t namelen, unsigned char *value, int valuelen, int flags);
 int xfs_attr_set_args(struct xfs_da_args *args);
-int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name,
+		    size_t namelen, int flags);
 int xfs_attr_remove_args(struct xfs_da_args *args);
 int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
 		  int flags, struct attrlist_cursor_kern *cursor);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 08d4b10..fed537a 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -2403,8 +2403,8 @@ xfs_attr3_leaf_lookup_int(
 		 * If we are looking for INCOMPLETE entries, show only those.
 		 * If we are looking for complete entries, show only those.
 		 */
-		if ((args->flags & XFS_ATTR_INCOMPLETE) !=
-		    (entry->flags & XFS_ATTR_INCOMPLETE)) {
+		if (!!(args->op_flags & XFS_DA_OP_INCOMPLETE) !=
+		    !!(entry->flags & XFS_ATTR_INCOMPLETE)) {
 			continue;
 		}
 		if (entry->flags & XFS_ATTR_LOCAL) {
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index f4a188e..73615b1 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -39,15 +39,6 @@ struct xfs_attr3_icleaf_hdr {
 	} freemap[XFS_ATTR_LEAF_MAPSIZE];
 };
 
-/*
- * Used to keep a list of "remote value" extents when unlinking an inode.
- */
-typedef struct xfs_attr_inactive_list {
-	xfs_dablk_t	valueblk;	/* block number of value bytes */
-	int		valuelen;	/* number of bytes in value */
-} xfs_attr_inactive_list_t;
-
-
 /*========================================================================
  * Function prototypes for the kernel.
  *========================================================================*/
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index a6ef5df..8b7f74b 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -26,6 +26,23 @@
 #define ATTR_RMTVALUE_MAPSIZE	1	/* # of map entries at once */
 
 /*
+ * Remote Attribute Values
+ * =======================
+ *
+ * Remote extended attribute values are conceptually simple -- they're written
+ * to data blocks mapped by an inode's attribute fork, and they have an upper
+ * size limit of 64k.  Setting a value does not involve the XFS log.
+ *
+ * However, on a v5 filesystem, maximally sized remote attr values require one
+ * block more than 64k worth of space to hold both the remote attribute value
+ * header (64 bytes).  On a 4k block filesystem this results in a 68k buffer;
+ * on a 64k block filesystem, this would be a 128k buffer.  Note that the log
+ * format can only handle a dirty buffer of XFS_MAX_BLOCKSIZE length (64k).
+ * Therefore, we /must/ ensure that remote attribute value buffers never touch
+ * the logging system and therefore never have a log item.
+ */
+
+/*
  * Each contiguous block has a header, so it is not just a simple attribute
  * length to FSB conversion.
  */
@@ -401,17 +418,15 @@ xfs_attr_rmtval_get(
 			       (map[i].br_startblock != HOLESTARTBLOCK));
 			dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
 			dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
-			error = xfs_trans_read_buf(mp, args->trans,
-						   mp->m_ddev_targp,
-						   dblkno, dblkcnt, 0, &bp,
-						   &xfs_attr3_rmt_buf_ops);
+			error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
+					0, &bp, &xfs_attr3_rmt_buf_ops);
 			if (error)
 				return error;
 
 			error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
 							&offset, &valuelen,
 							&dst);
-			xfs_trans_brelse(args->trans, bp);
+			xfs_buf_relse(bp);
 			if (error)
 				return error;
 
@@ -530,9 +545,9 @@ xfs_attr_rmtval_set(
 		dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
 		dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
 
-		bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt);
-		if (!bp)
-			return -ENOMEM;
+		error = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, &bp);
+		if (error)
+			return error;
 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
 
 		xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
@@ -552,6 +567,33 @@ xfs_attr_rmtval_set(
 	return 0;
 }
 
+/* Mark stale any incore buffers for the remote value. */
+int
+xfs_attr_rmtval_stale(
+	struct xfs_inode	*ip,
+	struct xfs_bmbt_irec	*map,
+	xfs_buf_flags_t		incore_flags)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_buf		*bp;
+
+	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+	if (XFS_IS_CORRUPT(mp, map->br_startblock == DELAYSTARTBLOCK) ||
+	    XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK))
+		return -EFSCORRUPTED;
+
+	bp = xfs_buf_incore(mp->m_ddev_targp,
+			XFS_FSB_TO_DADDR(mp, map->br_startblock),
+			XFS_FSB_TO_BB(mp, map->br_blockcount), incore_flags);
+	if (bp) {
+		xfs_buf_stale(bp);
+		xfs_buf_relse(bp);
+	}
+
+	return 0;
+}
+
 /*
  * Remove the value associated with an attribute by deleting the
  * out-of-line buffer that it is stored on.
@@ -560,7 +602,6 @@ int
 xfs_attr_rmtval_remove(
 	struct xfs_da_args	*args)
 {
-	struct xfs_mount	*mp = args->dp->i_mount;
 	xfs_dablk_t		lblkno;
 	int			blkcnt;
 	int			error;
@@ -575,9 +616,6 @@ xfs_attr_rmtval_remove(
 	blkcnt = args->rmtblkcnt;
 	while (blkcnt > 0) {
 		struct xfs_bmbt_irec	map;
-		struct xfs_buf		*bp;
-		xfs_daddr_t		dblkno;
-		int			dblkcnt;
 		int			nmap;
 
 		/*
@@ -588,22 +626,11 @@ xfs_attr_rmtval_remove(
 				       blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
 		if (error)
 			return error;
-		ASSERT(nmap == 1);
-		ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
-		       (map.br_startblock != HOLESTARTBLOCK));
-
-		dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
-		dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
-
-		/*
-		 * If the "remote" value is in the cache, remove it.
-		 */
-		bp = xfs_buf_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
-		if (bp) {
-			xfs_buf_stale(bp);
-			xfs_buf_relse(bp);
-			bp = NULL;
-		}
+		if (XFS_IS_CORRUPT(args->dp->i_mount, nmap != 1))
+			return -EFSCORRUPTED;
+		error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
+		if (error)
+			return error;
 
 		lblkno += map.br_blockcount;
 		blkcnt -= map.br_blockcount;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h
index 9d20b66..6fb4572 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.h
+++ b/fs/xfs/libxfs/xfs_attr_remote.h
@@ -11,5 +11,7 @@ int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
 int xfs_attr_rmtval_get(struct xfs_da_args *args);
 int xfs_attr_rmtval_set(struct xfs_da_args *args);
 int xfs_attr_rmtval_remove(struct xfs_da_args *args);
+int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
+		xfs_buf_flags_t incore_flags);
 
 #endif /* __XFS_ATTR_REMOTE_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 4c2e046..9a6d7a8 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -730,11 +730,11 @@ xfs_bmap_extents_to_btree(
 	cur->bc_private.b.allocated++;
 	ip->i_d.di_nblocks++;
 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
-	abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
-	if (XFS_IS_CORRUPT(mp, !abp)) {
-		error = -EFSCORRUPTED;
+	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+			XFS_FSB_TO_DADDR(mp, args.fsbno),
+			mp->m_bsize, 0, &abp);
+	if (error)
 		goto out_unreserve_dquot;
-	}
 
 	/*
 	 * Fill in the child block.
@@ -878,7 +878,11 @@ xfs_bmap_local_to_extents(
 	ASSERT(args.fsbno != NULLFSBLOCK);
 	ASSERT(args.len == 1);
 	tp->t_firstblock = args.fsbno;
-	bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno);
+	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
+			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
+			args.mp->m_bsize, 0, &bp);
+	if (error)
+		goto done;
 
 	/*
 	 * Initialize the block, copy the data and log the remote buffer.
@@ -3307,11 +3311,12 @@ xfs_bmap_longest_free_extent(
 	pag = xfs_perag_get(mp, ag);
 	if (!pag->pagf_init) {
 		error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
-		if (error)
-			goto out;
-
-		if (!pag->pagf_init) {
-			*notinit = 1;
+		if (error) {
+			/* Couldn't lock the AGF, so skip this AG. */
+			if (error == -EAGAIN) {
+				*notinit = 1;
+				error = 0;
+			}
 			goto out;
 		}
 	}
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index e2cc989..fd300dc 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -679,42 +679,6 @@ xfs_btree_get_block(
 }
 
 /*
- * Get a buffer for the block, return it with no data read.
- * Long-form addressing.
- */
-xfs_buf_t *				/* buffer for fsbno */
-xfs_btree_get_bufl(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_fsblock_t	fsbno)		/* file system block number */
-{
-	xfs_daddr_t		d;		/* real disk block address */
-
-	ASSERT(fsbno != NULLFSBLOCK);
-	d = XFS_FSB_TO_DADDR(mp, fsbno);
-	return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, 0);
-}
-
-/*
- * Get a buffer for the block, return it with no data read.
- * Short-form addressing.
- */
-xfs_buf_t *				/* buffer for agno/agbno */
-xfs_btree_get_bufs(
-	xfs_mount_t	*mp,		/* file system mount point */
-	xfs_trans_t	*tp,		/* transaction pointer */
-	xfs_agnumber_t	agno,		/* allocation group number */
-	xfs_agblock_t	agbno)		/* allocation group block number */
-{
-	xfs_daddr_t		d;		/* real disk block address */
-
-	ASSERT(agno != NULLAGNUMBER);
-	ASSERT(agbno != NULLAGBLOCK);
-	d = XFS_AGB_TO_DADDR(mp, agno, agbno);
-	return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, 0);
-}
-
-/*
  * Change the cursor to point to the first record at the given level.
  * Other levels are unaffected.
  */
@@ -1270,11 +1234,10 @@ xfs_btree_get_buf_block(
 	error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
 	if (error)
 		return error;
-	*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
-				 mp->m_bsize, 0);
-
-	if (!*bpp)
-		return -ENOMEM;
+	error = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, mp->m_bsize,
+			0, bpp);
+	if (error)
+		return error;
 
 	(*bpp)->b_ops = cur->bc_ops->buf_ops;
 	*block = XFS_BUF_TO_BLOCK(*bpp);
@@ -2389,8 +2352,6 @@ xfs_btree_lshift(
 	XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
 	if (level > 0) {
 		/* It's a nonleaf. operate on keys and ptrs */
-		int			i;		/* loop index */
-
 		for (i = 0; i < rrecs; i++) {
 			error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level);
 			if (error)
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index fb9b212..3eff7c3 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -297,27 +297,6 @@ xfs_btree_dup_cursor(
 	xfs_btree_cur_t		**ncur);/* output cursor */
 
 /*
- * Get a buffer for the block, return it with no data read.
- * Long-form addressing.
- */
-struct xfs_buf *				/* buffer for fsbno */
-xfs_btree_get_bufl(
-	struct xfs_mount	*mp,	/* file system mount point */
-	struct xfs_trans	*tp,	/* transaction pointer */
-	xfs_fsblock_t		fsbno);	/* file system block number */
-
-/*
- * Get a buffer for the block, return it with no data read.
- * Short-form addressing.
- */
-struct xfs_buf *				/* buffer for agno/agbno */
-xfs_btree_get_bufs(
-	struct xfs_mount	*mp,	/* file system mount point */
-	struct xfs_trans	*tp,	/* transaction pointer */
-	xfs_agnumber_t		agno,	/* allocation group number */
-	xfs_agblock_t		agbno);	/* allocation group block number */
-
-/*
  * Compute first and last byte offsets for the fields given.
  * Interprets the offsets table, which contains struct field offsets.
  */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 8c3eafe..875e04f 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2591,13 +2591,9 @@ xfs_da_get_buf(
 	if (error || nmap == 0)
 		goto out_free;
 
-	bp = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0);
-	error = bp ? bp->b_error : -EIO;
-	if (error) {
-		if (bp)
-			xfs_trans_brelse(tp, bp);
+	error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp);
+	if (error)
 		goto out_free;
-	}
 
 	*bpp = bp;
 
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index e16610d..0f4fbb0 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -89,6 +89,7 @@ typedef struct xfs_da_args {
 #define XFS_DA_OP_OKNOENT	0x0008	/* lookup/add op, ENOENT ok, else die */
 #define XFS_DA_OP_CILOOKUP	0x0010	/* lookup to return CI name if found */
 #define XFS_DA_OP_ALLOCVAL	0x0020	/* lookup to alloc buffer if found  */
+#define XFS_DA_OP_INCOMPLETE	0x0040	/* lookup INCOMPLETE attr keys */
 
 #define XFS_DA_OP_FLAGS \
 	{ XFS_DA_OP_JUSTCHECK,	"JUSTCHECK" }, \
@@ -96,7 +97,8 @@ typedef struct xfs_da_args {
 	{ XFS_DA_OP_ADDNAME,	"ADDNAME" }, \
 	{ XFS_DA_OP_OKNOENT,	"OKNOENT" }, \
 	{ XFS_DA_OP_CILOOKUP,	"CILOOKUP" }, \
-	{ XFS_DA_OP_ALLOCVAL,	"ALLOCVAL" }
+	{ XFS_DA_OP_ALLOCVAL,	"ALLOCVAL" }, \
+	{ XFS_DA_OP_INCOMPLETE,	"INCOMPLETE" }
 
 /*
  * Storage for holding state during Btree searches and split/join ops.
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 3dee330..734837a 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -217,7 +217,7 @@ typedef struct xfs_dir2_sf_entry {
 	 * A 64-bit or 32-bit inode number follows here, at a variable offset
 	 * after the name.
 	 */
-} xfs_dir2_sf_entry_t;
+} __packed xfs_dir2_sf_entry_t;
 
 static inline int xfs_dir2_sf_hdr_size(int i8count)
 {
@@ -683,8 +683,6 @@ struct xfs_attr3_leafblock {
 
 /*
  * Flags used in the leaf_entry[i].flags field.
- * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
- * on the system call, they are "or"ed together for various operations.
  */
 #define	XFS_ATTR_LOCAL_BIT	0	/* attr is stored locally */
 #define	XFS_ATTR_ROOT_BIT	1	/* limit access to trusted attrs */
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 1b7dcba..77e9fa3 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -1540,6 +1540,13 @@ typedef struct xfs_bmdr_block {
 #define BMBT_BLOCKCOUNT_BITLEN	21
 
 #define BMBT_STARTOFF_MASK	((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+#define BMBT_BLOCKCOUNT_MASK	((1ULL << BMBT_BLOCKCOUNT_BITLEN) - 1)
+
+/*
+ * bmbt records have a file offset (block) field that is 54 bits wide, so this
+ * is the largest xfs_fileoff_t that we ever expect to see.
+ */
+#define XFS_MAX_FILEOFF		(BMBT_STARTOFF_MASK + BMBT_BLOCKCOUNT_MASK)
 
 typedef struct xfs_bmbt_rec {
 	__be64			l0, l1;
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 5b759af..bf161e9 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -276,6 +276,7 @@ xfs_ialloc_inode_init(
 	int			i, j;
 	xfs_daddr_t		d;
 	xfs_ino_t		ino = 0;
+	int			error;
 
 	/*
 	 * Loop over the new block(s), filling in the inodes.  For small block
@@ -327,12 +328,11 @@ xfs_ialloc_inode_init(
 		 */
 		d = XFS_AGB_TO_DADDR(mp, agno, agbno +
 				(j * M_IGEO(mp)->blocks_per_cluster));
-		fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-					 mp->m_bsize *
-					 M_IGEO(mp)->blocks_per_cluster,
-					 XBF_UNMAPPED);
-		if (!fbuf)
-			return -ENOMEM;
+		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+				mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
+				XBF_UNMAPPED, &fbuf);
+		if (error)
+			return error;
 
 		/* Initialize the inode buffers and log them appropriately. */
 		fbuf->b_ops = &xfs_inode_buf_ops;
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 8ef31d7..9bac0d2 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -462,11 +462,20 @@ static inline uint xfs_log_dinode_size(int version)
 #define	XFS_BLF_GDQUOT_BUF	(1<<4)
 
 /*
- * This is the structure used to lay out a buf log item in the
- * log.  The data map describes which 128 byte chunks of the buffer
- * have been logged.
+ * This is the structure used to lay out a buf log item in the log.  The data
+ * map describes which 128 byte chunks of the buffer have been logged.
+ *
+ * The placement of blf_map_size causes blf_data_map to start at an odd
+ * multiple of sizeof(unsigned int) offset within the struct.  Because the data
+ * bitmap size will always be an even number, the end of the data_map (and
+ * therefore the structure) will also be at an odd multiple of sizeof(unsigned
+ * int).  Some 64-bit compilers will insert padding at the end of the struct to
+ * ensure 64-bit alignment of blf_blkno, but 32-bit ones will not.  Therefore,
+ * XFS_BLF_DATAMAP_SIZE must be an odd number to make the padding explicit and
+ * keep the structure size consistent between 32-bit and 64-bit platforms.
  */
-#define XFS_BLF_DATAMAP_SIZE	((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
+#define __XFS_BLF_DATAMAP_SIZE	((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
+#define XFS_BLF_DATAMAP_SIZE	(__XFS_BLF_DATAMAP_SIZE + 1)
 
 typedef struct xfs_buf_log_format {
 	unsigned short	blf_type;	/* buf log item type indicator */
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index d7d702e..6e1665f 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1177,8 +1177,6 @@ xfs_refcount_finish_one(
 				XFS_ALLOC_FLAG_FREEING, &agbp);
 		if (error)
 			return error;
-		if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
-			return -EFSCORRUPTED;
 
 		rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
 		if (!rcur) {
@@ -1718,10 +1716,6 @@ xfs_refcount_recover_cow_leftovers(
 	error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
 	if (error)
 		goto out_trans;
-	if (!agbp) {
-		error = -ENOMEM;
-		goto out_trans;
-	}
 	cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
 
 	/* Find all the leftover CoW staging extents. */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 0ac6975..2f60fc3 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -985,9 +985,9 @@ xfs_update_secondary_sbs(
 	for (agno = 1; agno < mp->m_sb.sb_agcount; agno++) {
 		struct xfs_buf		*bp;
 
-		bp = xfs_buf_get(mp->m_ddev_targp,
+		error = xfs_buf_get(mp->m_ddev_targp,
 				 XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
-				 XFS_FSS_TO_BB(mp, 1));
+				 XFS_FSS_TO_BB(mp, 1), &bp);
 		/*
 		 * If we get an error reading or writing alternate superblocks,
 		 * continue.  xfs_repair chooses the "best" superblock based
@@ -995,12 +995,12 @@ xfs_update_secondary_sbs(
 		 * superblocks un-updated than updated, and xfs_repair may
 		 * pick them over the properly-updated primary.
 		 */
-		if (!bp) {
+		if (error) {
 			xfs_warn(mp,
 		"error allocating secondary superblock for ag %d",
 				agno);
 			if (!saved_error)
-				saved_error = -ENOMEM;
+				saved_error = error;
 			continue;
 		}
 
@@ -1185,13 +1185,14 @@ xfs_sb_get_secondary(
 	struct xfs_buf		**bpp)
 {
 	struct xfs_buf		*bp;
+	int			error;
 
 	ASSERT(agno != 0 && agno != NULLAGNUMBER);
-	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
 			XFS_AG_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
-			XFS_FSS_TO_BB(mp, 1), 0);
-	if (!bp)
-		return -ENOMEM;
+			XFS_FSS_TO_BB(mp, 1), 0, &bp);
+	if (error)
+		return error;
 	bp->b_ops = &xfs_sb_buf_ops;
 	xfs_buf_oneshot(bp);
 	*bpp = bp;
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 7a1a38b..d5e6db9 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -659,8 +659,6 @@ xrep_agfl(
 	error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
 	if (error)
 		return error;
-	if (!agf_bp)
-		return -ENOMEM;
 
 	/*
 	 * Make sure we have the AGFL buffer, as scrub might have decided it
@@ -735,8 +733,6 @@ xrep_agi_find_btrees(
 	error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
 	if (error)
 		return error;
-	if (!agf_bp)
-		return -ENOMEM;
 
 	/* Find the btree roots. */
 	error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index 7251c66..ec2064e 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -83,9 +83,6 @@ xchk_fscount_warmup(
 		error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp);
 		if (error)
 			break;
-		error = -ENOMEM;
-		if (!agf_bp || !agi_bp)
-			break;
 
 		/*
 		 * These are supposed to be initialized by the header read
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index b70a88b..e489d7a 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -341,13 +341,17 @@ xrep_init_btblock(
 	struct xfs_trans		*tp = sc->tp;
 	struct xfs_mount		*mp = sc->mp;
 	struct xfs_buf			*bp;
+	int				error;
 
 	trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
 			XFS_FSB_TO_AGBNO(mp, fsb), btnum);
 
 	ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
-	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, fsb),
-			XFS_FSB_TO_BB(mp, 1), 0);
+	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+			XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0,
+			&bp);
+	if (error)
+		return error;
 	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
 	xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.agno);
 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
@@ -542,8 +546,6 @@ xrep_reap_block(
 		error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf_bp);
 		if (error)
 			return error;
-		if (!agf_bp)
-			return -ENOMEM;
 	} else {
 		agf_bp = sc->sa.agf_bp;
 	}
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index 60c61d7..c3422403 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -75,7 +75,6 @@ static inline xfs_extlen_t
 xrep_calc_ag_resblks(
 	struct xfs_scrub	*sc)
 {
-	ASSERT(!(sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR));
 	return 0;
 }
 
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 91693fc..cd743fad 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -145,7 +145,8 @@ xfs_get_acl(struct inode *inode, int type)
 	 * go out to the disk.
 	 */
 	len = XFS_ACL_MAX_SIZE(ip->i_mount);
-	error = xfs_attr_get(ip, ea_name, (unsigned char **)&xfs_acl, &len,
+	error = xfs_attr_get(ip, ea_name, strlen(ea_name),
+				(unsigned char **)&xfs_acl, &len,
 				ATTR_ALLOC | ATTR_ROOT);
 	if (error) {
 		/*
@@ -196,15 +197,17 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 		len -= sizeof(struct xfs_acl_entry) *
 			 (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
 
-		error = xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
-				len, ATTR_ROOT);
+		error = xfs_attr_set(ip, ea_name, strlen(ea_name),
+				     (unsigned char *)xfs_acl, len, ATTR_ROOT);
 
 		kmem_free(xfs_acl);
 	} else {
 		/*
 		 * A NULL ACL argument means we want to remove the ACL.
 		 */
-		error = xfs_attr_remove(ip, ea_name, ATTR_ROOT);
+		error = xfs_attr_remove(ip, ea_name,
+					strlen(ea_name),
+					ATTR_ROOT);
 
 		/*
 		 * If the attribute didn't exist to start with that's fine.
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 5ff4952..bbfa6ba 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -25,22 +25,18 @@
 #include "xfs_error.h"
 
 /*
- * Look at all the extents for this logical region,
- * invalidate any buffers that are incore/in transactions.
+ * Invalidate any incore buffers associated with this remote attribute value
+ * extent.   We never log remote attribute value buffers, which means that they
+ * won't be attached to a transaction and are therefore safe to mark stale.
+ * The actual bunmapi will be taken care of later.
  */
 STATIC int
-xfs_attr3_leaf_freextent(
-	struct xfs_trans	**trans,
+xfs_attr3_rmt_stale(
 	struct xfs_inode	*dp,
 	xfs_dablk_t		blkno,
 	int			blkcnt)
 {
 	struct xfs_bmbt_irec	map;
-	struct xfs_buf		*bp;
-	xfs_dablk_t		tblkno;
-	xfs_daddr_t		dblkno;
-	int			tblkcnt;
-	int			dblkcnt;
 	int			nmap;
 	int			error;
 
@@ -48,47 +44,29 @@ xfs_attr3_leaf_freextent(
 	 * Roll through the "value", invalidating the attribute value's
 	 * blocks.
 	 */
-	tblkno = blkno;
-	tblkcnt = blkcnt;
-	while (tblkcnt > 0) {
+	while (blkcnt > 0) {
 		/*
 		 * Try to remember where we decided to put the value.
 		 */
 		nmap = 1;
-		error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
+		error = xfs_bmapi_read(dp, (xfs_fileoff_t)blkno, blkcnt,
 				       &map, &nmap, XFS_BMAPI_ATTRFORK);
-		if (error) {
+		if (error)
 			return error;
-		}
-		ASSERT(nmap == 1);
-		ASSERT(map.br_startblock != DELAYSTARTBLOCK);
+		if (XFS_IS_CORRUPT(dp->i_mount, nmap != 1))
+			return -EFSCORRUPTED;
 
 		/*
-		 * If it's a hole, these are already unmapped
-		 * so there's nothing to invalidate.
+		 * Mark any incore buffers for the remote value as stale.  We
+		 * never log remote attr value buffers, so the buffer should be
+		 * easy to kill.
 		 */
-		if (map.br_startblock != HOLESTARTBLOCK) {
+		error = xfs_attr_rmtval_stale(dp, &map, 0);
+		if (error)
+			return error;
 
-			dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
-						  map.br_startblock);
-			dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
-						map.br_blockcount);
-			bp = xfs_trans_get_buf(*trans,
-					dp->i_mount->m_ddev_targp,
-					dblkno, dblkcnt, 0);
-			if (!bp)
-				return -ENOMEM;
-			xfs_trans_binval(*trans, bp);
-			/*
-			 * Roll to next transaction.
-			 */
-			error = xfs_trans_roll_inode(trans, dp);
-			if (error)
-				return error;
-		}
-
-		tblkno += map.br_blockcount;
-		tblkcnt -= map.br_blockcount;
+		blkno += map.br_blockcount;
+		blkcnt -= map.br_blockcount;
 	}
 
 	return 0;
@@ -102,86 +80,45 @@ xfs_attr3_leaf_freextent(
  */
 STATIC int
 xfs_attr3_leaf_inactive(
-	struct xfs_trans	**trans,
-	struct xfs_inode	*dp,
-	struct xfs_buf		*bp)
+	struct xfs_trans		**trans,
+	struct xfs_inode		*dp,
+	struct xfs_buf			*bp)
 {
-	struct xfs_attr_leafblock *leaf;
-	struct xfs_attr3_icleaf_hdr ichdr;
-	struct xfs_attr_leaf_entry *entry;
+	struct xfs_attr3_icleaf_hdr	ichdr;
+	struct xfs_mount		*mp = bp->b_mount;
+	struct xfs_attr_leafblock	*leaf = bp->b_addr;
+	struct xfs_attr_leaf_entry	*entry;
 	struct xfs_attr_leaf_name_remote *name_rmt;
-	struct xfs_attr_inactive_list *list;
-	struct xfs_attr_inactive_list *lp;
-	int			error;
-	int			count;
-	int			size;
-	int			tmp;
-	int			i;
-	struct xfs_mount	*mp = bp->b_mount;
+	int				error = 0;
+	int				i;
 
-	leaf = bp->b_addr;
 	xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
 
 	/*
-	 * Count the number of "remote" value extents.
+	 * Find the remote value extents for this leaf and invalidate their
+	 * incore buffers.
 	 */
-	count = 0;
 	entry = xfs_attr3_leaf_entryp(leaf);
 	for (i = 0; i < ichdr.count; entry++, i++) {
-		if (be16_to_cpu(entry->nameidx) &&
-		    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
-			name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
-			if (name_rmt->valueblk)
-				count++;
-		}
+		int		blkcnt;
+
+		if (!entry->nameidx || (entry->flags & XFS_ATTR_LOCAL))
+			continue;
+
+		name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+		if (!name_rmt->valueblk)
+			continue;
+
+		blkcnt = xfs_attr3_rmt_blocks(dp->i_mount,
+				be32_to_cpu(name_rmt->valuelen));
+		error = xfs_attr3_rmt_stale(dp,
+				be32_to_cpu(name_rmt->valueblk), blkcnt);
+		if (error)
+			goto err;
 	}
 
-	/*
-	 * If there are no "remote" values, we're done.
-	 */
-	if (count == 0) {
-		xfs_trans_brelse(*trans, bp);
-		return 0;
-	}
-
-	/*
-	 * Allocate storage for a list of all the "remote" value extents.
-	 */
-	size = count * sizeof(xfs_attr_inactive_list_t);
-	list = kmem_alloc(size, 0);
-
-	/*
-	 * Identify each of the "remote" value extents.
-	 */
-	lp = list;
-	entry = xfs_attr3_leaf_entryp(leaf);
-	for (i = 0; i < ichdr.count; entry++, i++) {
-		if (be16_to_cpu(entry->nameidx) &&
-		    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
-			name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
-			if (name_rmt->valueblk) {
-				lp->valueblk = be32_to_cpu(name_rmt->valueblk);
-				lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
-						    be32_to_cpu(name_rmt->valuelen));
-				lp++;
-			}
-		}
-	}
-	xfs_trans_brelse(*trans, bp);	/* unlock for trans. in freextent() */
-
-	/*
-	 * Invalidate each of the "remote" value extents.
-	 */
-	error = 0;
-	for (lp = list, i = 0; i < count; i++, lp++) {
-		tmp = xfs_attr3_leaf_freextent(trans, dp,
-				lp->valueblk, lp->valuelen);
-
-		if (error == 0)
-			error = tmp;	/* save only the 1st errno */
-	}
-
-	kmem_free(list);
+	xfs_trans_brelse(*trans, bp);
+err:
 	return error;
 }
 
@@ -268,11 +205,12 @@ xfs_attr3_node_inactive(
 		/*
 		 * Remove the subsidiary block from the cache and from the log.
 		 */
-		child_bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
+		error = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
 				child_blkno,
-				XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
-		if (!child_bp)
-			return -EIO;
+				XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0,
+				&child_bp);
+		if (error)
+			return error;
 		error = bp->b_error;
 		if (error) {
 			xfs_trans_brelse(*trans, child_bp);
@@ -361,10 +299,10 @@ xfs_attr3_root_inactive(
 	/*
 	 * Invalidate the incore copy of the root block.
 	 */
-	bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
-			XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
-	if (!bp)
-		return -EIO;
+	error = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
+			XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp);
+	if (error)
+		return error;
 	error = bp->b_error;
 	if (error) {
 		xfs_trans_brelse(*trans, bp);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index a0229c3..217e4f8 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -198,20 +198,22 @@ xfs_buf_free_maps(
 	}
 }
 
-static struct xfs_buf *
+static int
 _xfs_buf_alloc(
 	struct xfs_buftarg	*target,
 	struct xfs_buf_map	*map,
 	int			nmaps,
-	xfs_buf_flags_t		flags)
+	xfs_buf_flags_t		flags,
+	struct xfs_buf		**bpp)
 {
 	struct xfs_buf		*bp;
 	int			error;
 	int			i;
 
+	*bpp = NULL;
 	bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
 	if (unlikely(!bp))
-		return NULL;
+		return -ENOMEM;
 
 	/*
 	 * We don't want certain flags to appear in b_flags unless they are
@@ -239,7 +241,7 @@ _xfs_buf_alloc(
 	error = xfs_buf_get_maps(bp, nmaps);
 	if (error)  {
 		kmem_cache_free(xfs_buf_zone, bp);
-		return NULL;
+		return error;
 	}
 
 	bp->b_bn = map[0].bm_bn;
@@ -256,7 +258,8 @@ _xfs_buf_alloc(
 	XFS_STATS_INC(bp->b_mount, xb_create);
 	trace_xfs_buf_init(bp, _RET_IP_);
 
-	return bp;
+	*bpp = bp;
+	return 0;
 }
 
 /*
@@ -682,53 +685,39 @@ xfs_buf_incore(
  * cache hits, as metadata intensive workloads will see 3 orders of magnitude
  * more hits than misses.
  */
-struct xfs_buf *
+int
 xfs_buf_get_map(
 	struct xfs_buftarg	*target,
 	struct xfs_buf_map	*map,
 	int			nmaps,
-	xfs_buf_flags_t		flags)
+	xfs_buf_flags_t		flags,
+	struct xfs_buf		**bpp)
 {
 	struct xfs_buf		*bp;
 	struct xfs_buf		*new_bp;
 	int			error = 0;
 
+	*bpp = NULL;
 	error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
-
-	switch (error) {
-	case 0:
-		/* cache hit */
+	if (!error)
 		goto found;
-	case -EAGAIN:
-		/* cache hit, trylock failure, caller handles failure */
-		ASSERT(flags & XBF_TRYLOCK);
-		return NULL;
-	case -ENOENT:
-		/* cache miss, go for insert */
-		break;
-	case -EFSCORRUPTED:
-	default:
-		/*
-		 * None of the higher layers understand failure types
-		 * yet, so return NULL to signal a fatal lookup error.
-		 */
-		return NULL;
-	}
+	if (error != -ENOENT)
+		return error;
 
-	new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
-	if (unlikely(!new_bp))
-		return NULL;
+	error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
+	if (error)
+		return error;
 
 	error = xfs_buf_allocate_memory(new_bp, flags);
 	if (error) {
 		xfs_buf_free(new_bp);
-		return NULL;
+		return error;
 	}
 
 	error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
 	if (error) {
 		xfs_buf_free(new_bp);
-		return NULL;
+		return error;
 	}
 
 	if (bp != new_bp)
@@ -741,7 +730,7 @@ xfs_buf_get_map(
 			xfs_warn(target->bt_mount,
 				"%s: failed to map pagesn", __func__);
 			xfs_buf_relse(bp);
-			return NULL;
+			return error;
 		}
 	}
 
@@ -754,7 +743,8 @@ xfs_buf_get_map(
 
 	XFS_STATS_INC(target->bt_mount, xb_get);
 	trace_xfs_buf_get(bp, flags, _RET_IP_);
-	return bp;
+	*bpp = bp;
+	return 0;
 }
 
 STATIC int
@@ -806,46 +796,77 @@ xfs_buf_reverify(
 	return bp->b_error;
 }
 
-xfs_buf_t *
+int
 xfs_buf_read_map(
 	struct xfs_buftarg	*target,
 	struct xfs_buf_map	*map,
 	int			nmaps,
 	xfs_buf_flags_t		flags,
-	const struct xfs_buf_ops *ops)
+	struct xfs_buf		**bpp,
+	const struct xfs_buf_ops *ops,
+	xfs_failaddr_t		fa)
 {
 	struct xfs_buf		*bp;
+	int			error;
 
 	flags |= XBF_READ;
+	*bpp = NULL;
 
-	bp = xfs_buf_get_map(target, map, nmaps, flags);
-	if (!bp)
-		return NULL;
+	error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+	if (error)
+		return error;
 
 	trace_xfs_buf_read(bp, flags, _RET_IP_);
 
 	if (!(bp->b_flags & XBF_DONE)) {
+		/* Initiate the buffer read and wait. */
 		XFS_STATS_INC(target->bt_mount, xb_get_read);
 		bp->b_ops = ops;
-		_xfs_buf_read(bp, flags);
-		return bp;
+		error = _xfs_buf_read(bp, flags);
+
+		/* Readahead iodone already dropped the buffer, so exit. */
+		if (flags & XBF_ASYNC)
+			return 0;
+	} else {
+		/* Buffer already read; all we need to do is check it. */
+		error = xfs_buf_reverify(bp, ops);
+
+		/* Readahead already finished; drop the buffer and exit. */
+		if (flags & XBF_ASYNC) {
+			xfs_buf_relse(bp);
+			return 0;
+		}
+
+		/* We do not want read in the flags */
+		bp->b_flags &= ~XBF_READ;
+		ASSERT(bp->b_ops != NULL || ops == NULL);
 	}
 
-	xfs_buf_reverify(bp, ops);
+	/*
+	 * If we've had a read error, then the contents of the buffer are
+	 * invalid and should not be used. To ensure that a followup read tries
+	 * to pull the buffer from disk again, we clear the XBF_DONE flag and
+	 * mark the buffer stale. This ensures that anyone who has a current
+	 * reference to the buffer will interpret it's contents correctly and
+	 * future cache lookups will also treat it as an empty, uninitialised
+	 * buffer.
+	 */
+	if (error) {
+		if (!XFS_FORCED_SHUTDOWN(target->bt_mount))
+			xfs_buf_ioerror_alert(bp, fa);
 
-	if (flags & XBF_ASYNC) {
-		/*
-		 * Read ahead call which is already satisfied,
-		 * drop the buffer
-		 */
+		bp->b_flags &= ~XBF_DONE;
+		xfs_buf_stale(bp);
 		xfs_buf_relse(bp);
-		return NULL;
+
+		/* bad CRC means corrupted metadata */
+		if (error == -EFSBADCRC)
+			error = -EFSCORRUPTED;
+		return error;
 	}
 
-	/* We do not want read in the flags */
-	bp->b_flags &= ~XBF_READ;
-	ASSERT(bp->b_ops != NULL || ops == NULL);
-	return bp;
+	*bpp = bp;
+	return 0;
 }
 
 /*
@@ -859,11 +880,14 @@ xfs_buf_readahead_map(
 	int			nmaps,
 	const struct xfs_buf_ops *ops)
 {
+	struct xfs_buf		*bp;
+
 	if (bdi_read_congested(target->bt_bdev->bd_bdi))
 		return;
 
 	xfs_buf_read_map(target, map, nmaps,
-		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
+		     XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
+		     __this_address);
 }
 
 /*
@@ -880,12 +904,13 @@ xfs_buf_read_uncached(
 	const struct xfs_buf_ops *ops)
 {
 	struct xfs_buf		*bp;
+	int			error;
 
 	*bpp = NULL;
 
-	bp = xfs_buf_get_uncached(target, numblks, flags);
-	if (!bp)
-		return -ENOMEM;
+	error = xfs_buf_get_uncached(target, numblks, flags, &bp);
+	if (error)
+		return error;
 
 	/* set up the buffer for a read IO */
 	ASSERT(bp->b_map_count == 1);
@@ -896,7 +921,7 @@ xfs_buf_read_uncached(
 
 	xfs_buf_submit(bp);
 	if (bp->b_error) {
-		int	error = bp->b_error;
+		error = bp->b_error;
 		xfs_buf_relse(bp);
 		return error;
 	}
@@ -905,20 +930,23 @@ xfs_buf_read_uncached(
 	return 0;
 }
 
-xfs_buf_t *
+int
 xfs_buf_get_uncached(
 	struct xfs_buftarg	*target,
 	size_t			numblks,
-	int			flags)
+	int			flags,
+	struct xfs_buf		**bpp)
 {
 	unsigned long		page_count;
 	int			error, i;
 	struct xfs_buf		*bp;
 	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
 
+	*bpp = NULL;
+
 	/* flags might contain irrelevant bits, pass only what we care about */
-	bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
-	if (unlikely(bp == NULL))
+	error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
+	if (error)
 		goto fail;
 
 	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
@@ -928,8 +956,10 @@ xfs_buf_get_uncached(
 
 	for (i = 0; i < page_count; i++) {
 		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
-		if (!bp->b_pages[i])
+		if (!bp->b_pages[i]) {
+			error = -ENOMEM;
 			goto fail_free_mem;
+		}
 	}
 	bp->b_flags |= _XBF_PAGES;
 
@@ -941,7 +971,8 @@ xfs_buf_get_uncached(
 	}
 
 	trace_xfs_buf_get_uncached(bp, _RET_IP_);
-	return bp;
+	*bpp = bp;
+	return 0;
 
  fail_free_mem:
 	while (--i >= 0)
@@ -951,7 +982,7 @@ xfs_buf_get_uncached(
 	xfs_buf_free_maps(bp);
 	kmem_cache_free(xfs_buf_zone, bp);
  fail:
-	return NULL;
+	return error;
 }
 
 /*
@@ -1205,10 +1236,10 @@ __xfs_buf_ioerror(
 void
 xfs_buf_ioerror_alert(
 	struct xfs_buf		*bp,
-	const char		*func)
+	xfs_failaddr_t		func)
 {
 	xfs_alert(bp->b_mount,
-"metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
+"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
 			func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
 			-bp->b_error);
 }
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 56e081d..d79a1fe 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -192,37 +192,40 @@ struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
 			   xfs_daddr_t blkno, size_t numblks,
 			   xfs_buf_flags_t flags);
 
-struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
-			       struct xfs_buf_map *map, int nmaps,
-			       xfs_buf_flags_t flags);
-struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
-			       struct xfs_buf_map *map, int nmaps,
-			       xfs_buf_flags_t flags,
-			       const struct xfs_buf_ops *ops);
+int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
+		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
+int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
+		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
+		const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
 void xfs_buf_readahead_map(struct xfs_buftarg *target,
 			       struct xfs_buf_map *map, int nmaps,
 			       const struct xfs_buf_ops *ops);
 
-static inline struct xfs_buf *
+static inline int
 xfs_buf_get(
 	struct xfs_buftarg	*target,
 	xfs_daddr_t		blkno,
-	size_t			numblks)
+	size_t			numblks,
+	struct xfs_buf		**bpp)
 {
 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
-	return xfs_buf_get_map(target, &map, 1, 0);
+
+	return xfs_buf_get_map(target, &map, 1, 0, bpp);
 }
 
-static inline struct xfs_buf *
+static inline int
 xfs_buf_read(
 	struct xfs_buftarg	*target,
 	xfs_daddr_t		blkno,
 	size_t			numblks,
 	xfs_buf_flags_t		flags,
+	struct xfs_buf		**bpp,
 	const struct xfs_buf_ops *ops)
 {
 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
-	return xfs_buf_read_map(target, &map, 1, flags, ops);
+
+	return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
+			__builtin_return_address(0));
 }
 
 static inline void
@@ -236,8 +239,8 @@ xfs_buf_readahead(
 	return xfs_buf_readahead_map(target, &map, 1, ops);
 }
 
-struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
-				int flags);
+int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags,
+		struct xfs_buf **bpp);
 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
 			  size_t numblks, int flags, struct xfs_buf **bpp,
 			  const struct xfs_buf_ops *ops);
@@ -259,7 +262,7 @@ extern void xfs_buf_ioend(struct xfs_buf *bp);
 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
 		xfs_failaddr_t failaddr);
 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
-extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
+extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
 
 extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
 static inline int xfs_buf_submit(struct xfs_buf *bp)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 3984779..663810e 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -27,6 +27,23 @@ static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
 
 STATIC void	xfs_buf_do_callbacks(struct xfs_buf *bp);
 
+/* Is this log iovec plausibly large enough to contain the buffer log format? */
+bool
+xfs_buf_log_check_iovec(
+	struct xfs_log_iovec		*iovec)
+{
+	struct xfs_buf_log_format	*blfp = iovec->i_addr;
+	char				*bmp_end;
+	char				*item_end;
+
+	if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
+		return false;
+
+	item_end = (char *)iovec->i_addr + iovec->i_len;
+	bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
+	return bmp_end <= item_end;
+}
+
 static inline int
 xfs_buf_log_format_size(
 	struct xfs_buf_log_format *blfp)
@@ -688,7 +705,7 @@ static const struct xfs_item_ops xfs_buf_item_ops = {
 	.iop_push	= xfs_buf_item_push,
 };
 
-STATIC int
+STATIC void
 xfs_buf_item_get_format(
 	struct xfs_buf_log_item	*bip,
 	int			count)
@@ -698,14 +715,11 @@ xfs_buf_item_get_format(
 
 	if (count == 1) {
 		bip->bli_formats = &bip->__bli_format;
-		return 0;
+		return;
 	}
 
 	bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
 				0);
-	if (!bip->bli_formats)
-		return -ENOMEM;
-	return 0;
 }
 
 STATIC void
@@ -731,7 +745,6 @@ xfs_buf_item_init(
 	struct xfs_buf_log_item	*bip = bp->b_log_item;
 	int			chunks;
 	int			map_size;
-	int			error;
 	int			i;
 
 	/*
@@ -760,19 +773,22 @@ xfs_buf_item_init(
 	 * Discontiguous buffer support follows the layout of the underlying
 	 * buffer. This makes the implementation as simple as possible.
 	 */
-	error = xfs_buf_item_get_format(bip, bp->b_map_count);
-	ASSERT(error == 0);
-	if (error) {	/* to stop gcc throwing set-but-unused warnings */
-		kmem_cache_free(xfs_buf_item_zone, bip);
-		return error;
-	}
-
+	xfs_buf_item_get_format(bip, bp->b_map_count);
 
 	for (i = 0; i < bip->bli_format_count; i++) {
 		chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
 				      XFS_BLF_CHUNK);
 		map_size = DIV_ROUND_UP(chunks, NBWORD);
 
+		if (map_size > XFS_BLF_DATAMAP_SIZE) {
+			kmem_cache_free(xfs_buf_item_zone, bip);
+			xfs_err(mp,
+	"buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
+					map_size,
+					BBTOB(bp->b_maps[i].bm_len));
+			return -EFSCORRUPTED;
+		}
+
 		bip->bli_formats[i].blf_type = XFS_LI_BUF;
 		bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
 		bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
@@ -805,6 +821,9 @@ xfs_buf_item_log_segment(
 	uint		end_bit;
 	uint		mask;
 
+	ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
+	ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
+
 	/*
 	 * Convert byte offsets to bit numbers.
 	 */
@@ -1094,7 +1113,7 @@ xfs_buf_iodone_callback_error(
 	if (bp->b_target != lasttarg ||
 	    time_after(jiffies, (lasttime + 5*HZ))) {
 		lasttime = jiffies;
-		xfs_buf_ioerror_alert(bp, __func__);
+		xfs_buf_ioerror_alert(bp, __this_address);
 	}
 	lasttarg = bp->b_target;
 
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 4a054b1..30114b5 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -61,6 +61,7 @@ void	xfs_buf_iodone_callbacks(struct xfs_buf *);
 void	xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
 bool	xfs_buf_resubmit_failed_buffers(struct xfs_buf *,
 					struct list_head *);
+bool	xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec);
 
 extern kmem_zone_t	*xfs_buf_item_zone;
 
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index cae6136..0b8350e 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -45,7 +45,7 @@ xfs_trim_extents(
 	xfs_log_force(mp, XFS_LOG_SYNC);
 
 	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
-	if (error || !agbp)
+	if (error)
 		goto out_put_perag;
 
 	cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 2bff21c..d223e1a 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -137,7 +137,7 @@ xfs_qm_adjust_dqtimers(
 		    (d->d_blk_hardlimit &&
 		     (be64_to_cpu(d->d_bcount) >
 		      be64_to_cpu(d->d_blk_hardlimit)))) {
-			d->d_btimer = cpu_to_be32(get_seconds() +
+			d->d_btimer = cpu_to_be32(ktime_get_real_seconds() +
 					mp->m_quotainfo->qi_btimelimit);
 		} else {
 			d->d_bwarns = 0;
@@ -160,7 +160,7 @@ xfs_qm_adjust_dqtimers(
 		    (d->d_ino_hardlimit &&
 		     (be64_to_cpu(d->d_icount) >
 		      be64_to_cpu(d->d_ino_hardlimit)))) {
-			d->d_itimer = cpu_to_be32(get_seconds() +
+			d->d_itimer = cpu_to_be32(ktime_get_real_seconds() +
 					mp->m_quotainfo->qi_itimelimit);
 		} else {
 			d->d_iwarns = 0;
@@ -183,7 +183,7 @@ xfs_qm_adjust_dqtimers(
 		    (d->d_rtb_hardlimit &&
 		     (be64_to_cpu(d->d_rtbcount) >
 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
-			d->d_rtbtimer = cpu_to_be32(get_seconds() +
+			d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() +
 					mp->m_quotainfo->qi_rtbtimelimit);
 		} else {
 			d->d_rtbwarns = 0;
@@ -320,10 +320,10 @@ xfs_dquot_disk_alloc(
 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 
 	/* now we can just get the buffer (there's nothing to read yet) */
-	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
-			mp->m_quotainfo->qi_dqchunklen, 0);
-	if (!bp)
-		return -ENOMEM;
+	error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
+			mp->m_quotainfo->qi_dqchunklen, 0, &bp);
+	if (error)
+		return error;
 	bp->b_ops = &xfs_dquot_buf_ops;
 
 	/*
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index c932501..b8a4a3f 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -187,7 +187,12 @@ xfs_file_dio_aio_read(
 
 	file_accessed(iocb->ki_filp);
 
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	if (iocb->ki_flags & IOCB_NOWAIT) {
+		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
+			return -EAGAIN;
+	} else {
+		xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	}
 	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
 			is_sync_kiocb(iocb));
 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 5f12b5d..1a88025 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -159,16 +159,15 @@ xfs_filestream_pick_ag(
 
 		if (!pag->pagf_init) {
 			err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
-			if (err && !trylock) {
+			if (err) {
 				xfs_perag_put(pag);
-				return err;
+				if (err != -EAGAIN)
+					return err;
+				/* Couldn't lock the AGF, skip this AG. */
+				continue;
 			}
 		}
 
-		/* Might fail sometimes during the 1st pass with trylock set. */
-		if (!pag->pagf_init)
-			goto next_ag;
-
 		/* Keep track of the AG with the most free blocks. */
 		if (pag->pagf_freeblks > maxfree) {
 			maxfree = pag->pagf_freeblks;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 401da19..c5077e6 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1518,10 +1518,8 @@ xfs_itruncate_extents_flags(
 	struct xfs_mount	*mp = ip->i_mount;
 	struct xfs_trans	*tp = *tpp;
 	xfs_fileoff_t		first_unmap_block;
-	xfs_fileoff_t		last_block;
 	xfs_filblks_t		unmap_len;
 	int			error = 0;
-	int			done = 0;
 
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
@@ -1541,21 +1539,22 @@ xfs_itruncate_extents_flags(
 	 * the end of the file (in a crash where the space is allocated
 	 * but the inode size is not yet updated), simply remove any
 	 * blocks which show up between the new EOF and the maximum
-	 * possible file size.  If the first block to be removed is
-	 * beyond the maximum file size (ie it is the same as last_block),
-	 * then there is nothing to do.
+	 * possible file size.
+	 *
+	 * We have to free all the blocks to the bmbt maximum offset, even if
+	 * the page cache can't scale that far.
 	 */
 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
-	last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
-	if (first_unmap_block == last_block)
+	if (first_unmap_block >= XFS_MAX_FILEOFF) {
+		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
 		return 0;
+	}
 
-	ASSERT(first_unmap_block < last_block);
-	unmap_len = last_block - first_unmap_block + 1;
-	while (!done) {
+	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
+	while (unmap_len > 0) {
 		ASSERT(tp->t_firstblock == NULLFSBLOCK);
-		error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
-				    XFS_ITRUNC_MAX_EXTENTS, &done);
+		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
+				flags, XFS_ITRUNC_MAX_EXTENTS);
 		if (error)
 			goto out;
 
@@ -1575,7 +1574,7 @@ xfs_itruncate_extents_flags(
 	if (whichfork == XFS_DATA_FORK) {
 		/* Remove all pending CoW reservations. */
 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
-				first_unmap_block, last_block, true);
+				first_unmap_block, XFS_MAX_FILEOFF, true);
 		if (error)
 			goto out;
 
@@ -2547,6 +2546,7 @@ xfs_ifree_cluster(
 	struct xfs_perag	*pag;
 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
 	xfs_ino_t		inum;
+	int			error;
 
 	inum = xic->first_ino;
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
@@ -2575,12 +2575,11 @@ xfs_ifree_cluster(
 		 * complete before we get a lock on it, and hence we may fail
 		 * to mark all the active inodes on the buffer stale.
 		 */
-		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
-					mp->m_bsize * igeo->blocks_per_cluster,
-					XBF_UNMAPPED);
-
-		if (!bp)
-			return -ENOMEM;
+		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
+				mp->m_bsize * igeo->blocks_per_cluster,
+				XBF_UNMAPPED, &bp);
+		if (error)
+			return error;
 
 		/*
 		 * This buffer may not have been correctly initialised as we
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 7b35d62..d42de92 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -357,6 +357,7 @@ xfs_attrmulti_attr_get(
 {
 	unsigned char		*kbuf;
 	int			error = -EFAULT;
+	size_t			namelen;
 
 	if (*len > XFS_XATTR_SIZE_MAX)
 		return -EINVAL;
@@ -364,7 +365,9 @@ xfs_attrmulti_attr_get(
 	if (!kbuf)
 		return -ENOMEM;
 
-	error = xfs_attr_get(XFS_I(inode), name, &kbuf, (int *)len, flags);
+	namelen = strlen(name);
+	error = xfs_attr_get(XFS_I(inode), name, namelen, &kbuf, (int *)len,
+			     flags);
 	if (error)
 		goto out_kfree;
 
@@ -386,6 +389,7 @@ xfs_attrmulti_attr_set(
 {
 	unsigned char		*kbuf;
 	int			error;
+	size_t			namelen;
 
 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 		return -EPERM;
@@ -396,7 +400,8 @@ xfs_attrmulti_attr_set(
 	if (IS_ERR(kbuf))
 		return PTR_ERR(kbuf);
 
-	error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
+	namelen = strlen(name);
+	error = xfs_attr_set(XFS_I(inode), name, namelen, kbuf, len, flags);
 	if (!error)
 		xfs_forget_acl(inode, name, flags);
 	kfree(kbuf);
@@ -410,10 +415,12 @@ xfs_attrmulti_attr_remove(
 	uint32_t		flags)
 {
 	int			error;
+	size_t			namelen;
 
 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 		return -EPERM;
-	error = xfs_attr_remove(XFS_I(inode), name, flags);
+	namelen = strlen(name);
+	error = xfs_attr_remove(XFS_I(inode), name, namelen, flags);
 	if (!error)
 		xfs_forget_acl(inode, name, flags);
 	return error;
@@ -462,6 +469,13 @@ xfs_attrmulti_by_handle(
 
 	error = 0;
 	for (i = 0; i < am_hreq.opcount; i++) {
+		if ((ops[i].am_flags & ATTR_ROOT) &&
+		    (ops[i].am_flags & ATTR_SECURE)) {
+			ops[i].am_error = -EINVAL;
+			continue;
+		}
+		ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
+
 		ops[i].am_error = strncpy_from_user((char *)attr_name,
 				ops[i].am_attrname, MAXNAMELEN);
 		if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index c4c4f09..769581a 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -107,7 +107,7 @@ xfs_ioctl32_bstime_copyin(
 	xfs_bstime_t		*bstime,
 	compat_xfs_bstime_t	__user *bstime32)
 {
-	compat_time_t		sec32;	/* tv_sec differs on 64 vs. 32 */
+	old_time32_t		sec32;	/* tv_sec differs on 64 vs. 32 */
 
 	if (get_user(sec32,		&bstime32->tv_sec)	||
 	    get_user(bstime->tv_nsec,	&bstime32->tv_nsec))
@@ -450,6 +450,13 @@ xfs_compat_attrmulti_by_handle(
 
 	error = 0;
 	for (i = 0; i < am_hreq.opcount; i++) {
+		if ((ops[i].am_flags & ATTR_ROOT) &&
+		    (ops[i].am_flags & ATTR_SECURE)) {
+			ops[i].am_error = -EINVAL;
+			continue;
+		}
+		ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
+
 		ops[i].am_error = strncpy_from_user((char *)attr_name,
 				compat_ptr(ops[i].am_attrname),
 				MAXNAMELEN);
diff --git a/fs/xfs/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h
index 8c7743c..053de7d 100644
--- a/fs/xfs/xfs_ioctl32.h
+++ b/fs/xfs/xfs_ioctl32.h
@@ -32,7 +32,7 @@
 #endif
 
 typedef struct compat_xfs_bstime {
-	compat_time_t	tv_sec;		/* seconds		*/
+	old_time32_t	tv_sec;		/* seconds		*/
 	__s32		tv_nsec;	/* and nanoseconds	*/
 } compat_xfs_bstime_t;
 
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 28e2d1f..bb590a2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -923,7 +923,7 @@ xfs_buffered_write_iomap_begin(
 		xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
 
 		/* Trim the mapping to the nearest shared extent boundary. */
-		error = xfs_inode_need_cow(ip, &imap, &shared);
+		error = xfs_bmap_trim_cow(ip, &imap, &shared);
 		if (error)
 			goto out_unlock;
 
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 8afe69c..81f2f93 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -50,8 +50,10 @@ xfs_initxattrs(
 	int			error = 0;
 
 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-		error = xfs_attr_set(ip, xattr->name, xattr->value,
-				      xattr->value_len, ATTR_SECURE);
+		error = xfs_attr_set(ip, xattr->name,
+				     strlen(xattr->name),
+				     xattr->value, xattr->value_len,
+				     ATTR_SECURE);
 		if (error < 0)
 			break;
 	}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 99ec3fb..25cfc85 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -294,7 +294,7 @@ xlog_recover_iodone(
 		 * this during recovery. One strike!
 		 */
 		if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
-			xfs_buf_ioerror_alert(bp, __func__);
+			xfs_buf_ioerror_alert(bp, __this_address);
 			xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
 		}
 	}
@@ -1934,6 +1934,12 @@ xlog_recover_buffer_pass1(
 	struct list_head	*bucket;
 	struct xfs_buf_cancel	*bcp;
 
+	if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) {
+		xfs_err(log->l_mp, "bad buffer log item size (%d)",
+				item->ri_buf[0].i_len);
+		return -EFSCORRUPTED;
+	}
+
 	/*
 	 * If this isn't a cancel buffer item, then just return.
 	 */
@@ -2739,15 +2745,10 @@ xlog_recover_buffer_pass2(
 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
 		buf_flags |= XBF_UNMAPPED;
 
-	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
-			  buf_flags, NULL);
-	if (!bp)
-		return -ENOMEM;
-	error = bp->b_error;
-	if (error) {
-		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
-		goto out_release;
-	}
+	error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
+			  buf_flags, &bp, NULL);
+	if (error)
+		return error;
 
 	/*
 	 * Recover the buffer only if we get an LSN from it and it's less than
@@ -2944,17 +2945,10 @@ xlog_recover_inode_pass2(
 	}
 	trace_xfs_log_recover_inode_recover(log, in_f);
 
-	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
-			  &xfs_inode_buf_ops);
-	if (!bp) {
-		error = -ENOMEM;
+	error = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
+			0, &bp, &xfs_inode_buf_ops);
+	if (error)
 		goto error;
-	}
-	error = bp->b_error;
-	if (error) {
-		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
-		goto out_release;
-	}
 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
 	dip = xfs_buf_offset(bp, in_f->ilf_boffset);
 
@@ -5633,7 +5627,7 @@ xlog_do_recover(
 	error = xfs_buf_submit(bp);
 	if (error) {
 		if (!XFS_FORCED_SHUTDOWN(mp)) {
-			xfs_buf_ioerror_alert(bp, __func__);
+			xfs_buf_ioerror_alert(bp, __this_address);
 			ASSERT(0);
 		}
 		xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index b6701b4..5f04d8a 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -111,6 +111,7 @@ xfs_check_ondisk_structs(void)
 	XFS_CHECK_STRUCT_SIZE(xfs_dir2_sf_hdr_t,		10);
 
 	/* log structures */
+	XFS_CHECK_STRUCT_SIZE(struct xfs_buf_log_format,	88);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_dq_logformat,		24);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_efd_log_format_32,	28);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_efd_log_format_64,	32);
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 7823af3..4e57edc 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -64,9 +64,9 @@ struct xfs_quotainfo {
 	struct xfs_inode	*qi_pquotaip;	/* project quota inode */
 	struct list_lru	 qi_lru;
 	int		 qi_dquots;
-	time_t		 qi_btimelimit;	 /* limit for blks timer */
-	time_t		 qi_itimelimit;	 /* limit for inodes timer */
-	time_t		 qi_rtbtimelimit;/* limit for rt blks timer */
+	time64_t	 qi_btimelimit;	 /* limit for blks timer */
+	time64_t	 qi_itimelimit;	 /* limit for inodes timer */
+	time64_t	 qi_rtbtimelimit;/* limit for rt blks timer */
 	xfs_qwarncnt_t	 qi_bwarnlimit;	 /* limit for blks warnings */
 	xfs_qwarncnt_t	 qi_iwarnlimit;	 /* limit for inodes warnings */
 	xfs_qwarncnt_t	 qi_rtbwarnlimit;/* limit for rt blks warnings */
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index c7de17d..38669e8 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -37,9 +37,9 @@ xfs_qm_fill_state(
 	tstate->flags |= QCI_SYSFILE;
 	tstate->blocks = ip->i_d.di_nblocks;
 	tstate->nextents = ip->i_d.di_nextents;
-	tstate->spc_timelimit = q->qi_btimelimit;
-	tstate->ino_timelimit = q->qi_itimelimit;
-	tstate->rt_spc_timelimit = q->qi_rtbtimelimit;
+	tstate->spc_timelimit = (u32)q->qi_btimelimit;
+	tstate->ino_timelimit = (u32)q->qi_itimelimit;
+	tstate->rt_spc_timelimit = (u32)q->qi_rtbtimelimit;
 	tstate->spc_warnlimit = q->qi_bwarnlimit;
 	tstate->ino_warnlimit = q->qi_iwarnlimit;
 	tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index de45123..b0ce04f 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -143,8 +143,6 @@ xfs_reflink_find_shared(
 	error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
 	if (error)
 		return error;
-	if (!agbp)
-		return -ENOMEM;
 
 	cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
 
@@ -223,8 +221,8 @@ xfs_reflink_trim_around_shared(
 	}
 }
 
-bool
-xfs_inode_need_cow(
+int
+xfs_bmap_trim_cow(
 	struct xfs_inode	*ip,
 	struct xfs_bmbt_irec	*imap,
 	bool			*shared)
@@ -327,7 +325,7 @@ xfs_find_trim_cow_extent(
 	if (cmap->br_startoff > offset_fsb) {
 		xfs_trim_extent(imap, imap->br_startoff,
 				cmap->br_startoff - imap->br_startoff);
-		return xfs_inode_need_cow(ip, imap, shared);
+		return xfs_bmap_trim_cow(ip, imap, shared);
 	}
 
 	*shared = true;
@@ -1457,7 +1455,8 @@ xfs_reflink_clear_inode_flag(
 	 * We didn't find any shared blocks so turn off the reflink flag.
 	 * First, get rid of any leftover CoW mappings.
 	 */
-	error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
+	error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, XFS_MAX_FILEOFF,
+			true);
 	if (error)
 		return error;
 
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index d18ad7f..3e4fd46 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -22,7 +22,7 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
 		xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
 extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
 		struct xfs_bmbt_irec *irec, bool *shared);
-bool xfs_inode_need_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
+int xfs_bmap_trim_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
 		bool *shared);
 
 int xfs_reflink_allocate_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index d42b5a2..6209e7b 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -826,12 +826,10 @@ xfs_growfs_rt_alloc(
 			 * Get a buffer for the block.
 			 */
 			d = XFS_FSB_TO_DADDR(mp, fsbno);
-			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-				mp->m_bsize, 0);
-			if (bp == NULL) {
-				error = -EIO;
+			error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+					mp->m_bsize, 0, &bp);
+			if (error)
 				goto out_trans_cancel;
-			}
 			memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
 			xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
 			/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d9ae27d..2094386 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -62,7 +62,7 @@ enum {
 	Opt_discard, Opt_nodiscard, Opt_dax,
 };
 
-static const struct fs_parameter_spec xfs_param_specs[] = {
+static const struct fs_parameter_spec xfs_fs_parameters[] = {
 	fsparam_u32("logbufs",		Opt_logbufs),
 	fsparam_string("logbsize",	Opt_logbsize),
 	fsparam_string("logdev",	Opt_logdev),
@@ -106,11 +106,6 @@ static const struct fs_parameter_spec xfs_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description xfs_fs_parameters = {
-	.name		= "xfs",
-	.specs		= xfs_param_specs,
-};
-
 struct proc_xfs_info {
 	uint64_t	flag;
 	char		*str;
@@ -193,32 +188,6 @@ xfs_fs_show_options(
 	return 0;
 }
 
-static uint64_t
-xfs_max_file_offset(
-	unsigned int		blockshift)
-{
-	unsigned int		pagefactor = 1;
-	unsigned int		bitshift = BITS_PER_LONG - 1;
-
-	/* Figure out maximum filesize, on Linux this can depend on
-	 * the filesystem blocksize (on 32 bit platforms).
-	 * __block_write_begin does this in an [unsigned] long long...
-	 *      page->index << (PAGE_SHIFT - bbits)
-	 * So, for page sized blocks (4K on 32 bit platforms),
-	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
-	 *      (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
-	 * but for smaller blocksizes it is less (bbits = log2 bsize).
-	 */
-
-#if BITS_PER_LONG == 32
-	ASSERT(sizeof(sector_t) == 8);
-	pagefactor = PAGE_SIZE;
-	bitshift = BITS_PER_LONG;
-#endif
-
-	return (((uint64_t)pagefactor) << bitshift) - 1;
-}
-
 /*
  * Set parameters for inode allocation heuristics, taking into account
  * filesystem size and inode32/inode64 mount options; i.e. specifically
@@ -1146,7 +1115,7 @@ xfs_fc_parse_param(
 	int			size = 0;
 	int			opt;
 
-	opt = fs_parse(fc, &xfs_fs_parameters, param, &result);
+	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -1424,6 +1393,26 @@ xfs_fc_fill_super(
 	if (error)
 		goto out_free_sb;
 
+	/*
+	 * XFS block mappings use 54 bits to store the logical block offset.
+	 * This should suffice to handle the maximum file size that the VFS
+	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
+	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
+	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
+	 * to check this assertion.
+	 *
+	 * Avoid integer overflow by comparing the maximum bmbt offset to the
+	 * maximum pagecache offset in units of fs blocks.
+	 */
+	if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
+		xfs_warn(mp,
+"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
+			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
+			 XFS_MAX_FILEOFF);
+		error = -EINVAL;
+		goto out_free_sb;
+	}
+
 	error = xfs_filestream_mount(mp);
 	if (error)
 		goto out_free_sb;
@@ -1435,7 +1424,7 @@ xfs_fc_fill_super(
 	sb->s_magic = XFS_SUPER_MAGIC;
 	sb->s_blocksize = mp->m_sb.sb_blocksize;
 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
-	sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	sb->s_max_links = XFS_MAXLINK;
 	sb->s_time_gran = 1;
 	sb->s_time_min = S32_MIN;
@@ -1788,7 +1777,7 @@ static struct file_system_type xfs_fs_type = {
 	.owner			= THIS_MODULE,
 	.name			= "xfs",
 	.init_fs_context	= xfs_init_fs_context,
-	.parameters		= &xfs_fs_parameters,
+	.parameters		= xfs_fs_parameters,
 	.kill_sb		= kill_block_super,
 	.fs_flags		= FS_REQUIRES_DEV,
 };
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index a25502b..d762d42 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -53,20 +53,10 @@ xfs_readlink_bmap_ilocked(
 		d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
 		byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
 
-		bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0,
-				  &xfs_symlink_buf_ops);
-		if (!bp)
-			return -ENOMEM;
-		error = bp->b_error;
-		if (error) {
-			xfs_buf_ioerror_alert(bp, __func__);
-			xfs_buf_relse(bp);
-
-			/* bad CRC means corrupted metadata */
-			if (error == -EFSBADCRC)
-				error = -EFSCORRUPTED;
-			goto out;
-		}
+		error = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0,
+				&bp, &xfs_symlink_buf_ops);
+		if (error)
+			return error;
 		byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
 		if (pathlen < byte_cnt)
 			byte_cnt = pathlen;
@@ -290,12 +280,10 @@ xfs_symlink(
 
 			d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
 			byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
-			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-					       BTOBB(byte_cnt), 0);
-			if (!bp) {
-				error = -ENOMEM;
+			error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+					       BTOBB(byte_cnt), 0, &bp);
+			if (error)
 				goto out_trans_cancel;
-			}
 			bp->b_ops = &xfs_symlink_buf_ops;
 
 			byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
@@ -433,13 +421,12 @@ xfs_inactive_symlink_rmt(
 	 * Invalidate the block(s). No validation is done.
 	 */
 	for (i = 0; i < nmaps; i++) {
-		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
-			XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
-			XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
-		if (!bp) {
-			error = -ENOMEM;
+		error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+				XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
+				XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0,
+				&bp);
+		if (error)
 			goto error_trans_cancel;
-		}
 		xfs_trans_binval(tp, bp);
 	}
 	/*
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 64d7f17..752c7fe 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -169,21 +169,21 @@ int		xfs_trans_alloc_empty(struct xfs_mount *mp,
 			struct xfs_trans **tpp);
 void		xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
 
-struct xfs_buf	*xfs_trans_get_buf_map(struct xfs_trans *tp,
-				       struct xfs_buftarg *target,
-				       struct xfs_buf_map *map, int nmaps,
-				       uint flags);
+int xfs_trans_get_buf_map(struct xfs_trans *tp, struct xfs_buftarg *target,
+		struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags,
+		struct xfs_buf **bpp);
 
-static inline struct xfs_buf *
+static inline int
 xfs_trans_get_buf(
 	struct xfs_trans	*tp,
 	struct xfs_buftarg	*target,
 	xfs_daddr_t		blkno,
 	int			numblks,
-	uint			flags)
+	uint			flags,
+	struct xfs_buf		**bpp)
 {
 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
-	return xfs_trans_get_buf_map(tp, target, &map, 1, flags);
+	return xfs_trans_get_buf_map(tp, target, &map, 1, flags, bpp);
 }
 
 int		xfs_trans_read_buf_map(struct xfs_mount *mp,
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index b5b3a78..08174ff 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -112,19 +112,22 @@ xfs_trans_bjoin(
  * If the transaction pointer is NULL, make this just a normal
  * get_buf() call.
  */
-struct xfs_buf *
+int
 xfs_trans_get_buf_map(
 	struct xfs_trans	*tp,
 	struct xfs_buftarg	*target,
 	struct xfs_buf_map	*map,
 	int			nmaps,
-	xfs_buf_flags_t		flags)
+	xfs_buf_flags_t		flags,
+	struct xfs_buf		**bpp)
 {
 	xfs_buf_t		*bp;
 	struct xfs_buf_log_item	*bip;
+	int			error;
 
+	*bpp = NULL;
 	if (!tp)
-		return xfs_buf_get_map(target, map, nmaps, flags);
+		return xfs_buf_get_map(target, map, nmaps, flags, bpp);
 
 	/*
 	 * If we find the buffer in the cache with this transaction
@@ -146,19 +149,20 @@ xfs_trans_get_buf_map(
 		ASSERT(atomic_read(&bip->bli_refcount) > 0);
 		bip->bli_recur++;
 		trace_xfs_trans_get_buf_recur(bip);
-		return bp;
+		*bpp = bp;
+		return 0;
 	}
 
-	bp = xfs_buf_get_map(target, map, nmaps, flags);
-	if (bp == NULL) {
-		return NULL;
-	}
+	error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+	if (error)
+		return error;
 
 	ASSERT(!bp->b_error);
 
 	_xfs_trans_bjoin(tp, bp, 1);
 	trace_xfs_trans_get_buf(bp->b_log_item);
-	return bp;
+	*bpp = bp;
+	return 0;
 }
 
 /*
@@ -276,7 +280,7 @@ xfs_trans_read_buf_map(
 		ASSERT(bp->b_ops != NULL);
 		error = xfs_buf_reverify(bp, ops);
 		if (error) {
-			xfs_buf_ioerror_alert(bp, __func__);
+			xfs_buf_ioerror_alert(bp, __return_address);
 
 			if (tp->t_flags & XFS_TRANS_DIRTY)
 				xfs_force_shutdown(tp->t_mountp,
@@ -298,36 +302,17 @@ xfs_trans_read_buf_map(
 		return 0;
 	}
 
-	bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
-	if (!bp) {
-		if (!(flags & XBF_TRYLOCK))
-			return -ENOMEM;
-		return tp ? 0 : -EAGAIN;
-	}
-
-	/*
-	 * If we've had a read error, then the contents of the buffer are
-	 * invalid and should not be used. To ensure that a followup read tries
-	 * to pull the buffer from disk again, we clear the XBF_DONE flag and
-	 * mark the buffer stale. This ensures that anyone who has a current
-	 * reference to the buffer will interpret it's contents correctly and
-	 * future cache lookups will also treat it as an empty, uninitialised
-	 * buffer.
-	 */
-	if (bp->b_error) {
-		error = bp->b_error;
-		if (!XFS_FORCED_SHUTDOWN(mp))
-			xfs_buf_ioerror_alert(bp, __func__);
-		bp->b_flags &= ~XBF_DONE;
-		xfs_buf_stale(bp);
-
+	error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops,
+			__return_address);
+	switch (error) {
+	case 0:
+		break;
+	default:
 		if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
 			xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
-		xfs_buf_relse(bp);
-
-		/* bad CRC means corrupted metadata */
-		if (error == -EFSBADCRC)
-			error = -EFSCORRUPTED;
+		/* fall through */
+	case -ENOMEM:
+	case -EAGAIN:
 		return error;
 	}
 
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index a6fe2d8..d1b9869 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -580,7 +580,7 @@ xfs_trans_dqresv(
 {
 	xfs_qcnt_t		hardlimit;
 	xfs_qcnt_t		softlimit;
-	time_t			timer;
+	time64_t		timer;
 	xfs_qwarncnt_t		warns;
 	xfs_qwarncnt_t		warnlimit;
 	xfs_qcnt_t		total_count;
@@ -635,7 +635,8 @@ xfs_trans_dqresv(
 				goto error_return;
 			}
 			if (softlimit && total_count > softlimit) {
-				if ((timer != 0 && get_seconds() > timer) ||
+				if ((timer != 0 &&
+				     ktime_get_real_seconds() > timer) ||
 				    (warns != 0 && warns >= warnlimit)) {
 					xfs_quota_warn(mp, dqp,
 						       QUOTA_NL_BSOFTLONGWARN);
@@ -662,7 +663,8 @@ xfs_trans_dqresv(
 				goto error_return;
 			}
 			if (softlimit && total_count > softlimit) {
-				if  ((timer != 0 && get_seconds() > timer) ||
+				if  ((timer != 0 &&
+				      ktime_get_real_seconds() > timer) ||
 				     (warns != 0 && warns >= warnlimit)) {
 					xfs_quota_warn(mp, dqp,
 						       QUOTA_NL_ISOFTLONGWARN);
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 383f020..b0fedb5 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -24,6 +24,7 @@ xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
 	int xflags = handler->flags;
 	struct xfs_inode *ip = XFS_I(inode);
 	int error, asize = size;
+	size_t namelen = strlen(name);
 
 	/* Convert Linux syscall to XFS internal ATTR flags */
 	if (!size) {
@@ -31,7 +32,8 @@ xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
 		value = NULL;
 	}
 
-	error = xfs_attr_get(ip, name, (unsigned char **)&value, &asize, xflags);
+	error = xfs_attr_get(ip, name, namelen, (unsigned char **)&value,
+			     &asize, xflags);
 	if (error)
 		return error;
 	return asize;
@@ -67,6 +69,7 @@ xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
 	int			xflags = handler->flags;
 	struct xfs_inode	*ip = XFS_I(inode);
 	int			error;
+	size_t			namelen = strlen(name);
 
 	/* Convert Linux syscall to XFS internal ATTR flags */
 	if (flags & XATTR_CREATE)
@@ -74,10 +77,11 @@ xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
 	if (flags & XATTR_REPLACE)
 		xflags |= ATTR_REPLACE;
 
-	if (!value)
-		return xfs_attr_remove(ip, (unsigned char *)name, xflags);
-	error = xfs_attr_set(ip, (unsigned char *)name,
-				(void *)value, size, xflags);
+	if (value)
+		error = xfs_attr_set(ip, name, namelen, (void *)value, size,
+				xflags);
+	else
+		error = xfs_attr_remove(ip, name, namelen, xflags);
 	if (!error)
 		xfs_forget_acl(inode, name, xflags);
 
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index ddfee1b..cd17d50 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -4,5 +4,6 @@
 # (This file is not included when SRCARCH=um since UML borrows several
 # asm headers from the host architecutre.)
 
+mandatory-y += dma-contiguous.h
 mandatory-y += msi.h
 mandatory-y += simd.h
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index afddc54..365345f 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -27,9 +27,11 @@
 .endm
 
 /*
- * note on .section use: @progbits vs %progbits nastiness doesn't matter,
- * since we immediately emit into those sections anyway.
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
  */
+
 .macro ___EXPORT_SYMBOL name,val,sec
 #ifdef CONFIG_MODULES
 	.section ___ksymtab\sec+\name,"a"
@@ -37,7 +39,7 @@
 __ksymtab_\name:
 	__put \val, __kstrtab_\name
 	.previous
-	.section __ksymtab_strings,"a"
+	.section __ksymtab_strings,"aMS",%progbits,1
 __kstrtab_\name:
 	.asciz "\name"
 	.previous
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 6736ed2..4dbb177 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -22,11 +22,6 @@ static inline void arch_unmap(struct mm_struct *mm,
 {
 }
 
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
-				     struct vm_area_struct *vma)
-{
-}
-
 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 		bool write, bool execute, bool foreign)
 {
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index c2de013..35e4a53 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -74,7 +74,7 @@ do {									\
 
 #define raw_cpu_generic_add_return(pcp, val)				\
 ({									\
-	typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));			\
+	typeof(pcp) *__p = raw_cpu_ptr(&(pcp));				\
 									\
 	*__p += val;							\
 	*__p;								\
@@ -82,7 +82,7 @@ do {									\
 
 #define raw_cpu_generic_xchg(pcp, nval)					\
 ({									\
-	typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));			\
+	typeof(pcp) *__p = raw_cpu_ptr(&(pcp));				\
 	typeof(pcp) __ret;						\
 	__ret = *__p;							\
 	*__p = nval;							\
@@ -91,7 +91,7 @@ do {									\
 
 #define raw_cpu_generic_cmpxchg(pcp, oval, nval)			\
 ({									\
-	typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));			\
+	typeof(pcp) *__p = raw_cpu_ptr(&(pcp));				\
 	typeof(pcp) __ret;						\
 	__ret = *__p;							\
 	if (__ret == (oval))						\
@@ -101,8 +101,8 @@ do {									\
 
 #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
 ({									\
-	typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1));			\
-	typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2));			\
+	typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1));			\
+	typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2));			\
 	int __ret = 0;							\
 	if (*__p1 == (oval1) && *__p2  == (oval2)) {			\
 		*__p1 = nval1;						\
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 798ea36..e2e2bef 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1238,4 +1238,24 @@ static inline bool arch_has_pfn_modify_check(void)
 #define mm_pmd_folded(mm)	__is_defined(__PAGETABLE_PMD_FOLDED)
 #endif
 
+/*
+ * p?d_leaf() - true if this entry is a final mapping to a physical address.
+ * This differs from p?d_huge() by the fact that they are always available (if
+ * the architecture supports large pages at the appropriate level) even
+ * if CONFIG_HUGETLB_PAGE is not defined.
+ * Only meaningful when called on a valid entry.
+ */
+#ifndef pgd_leaf
+#define pgd_leaf(x)	0
+#endif
+#ifndef p4d_leaf
+#define p4d_leaf(x)	0
+#endif
+#ifndef pud_leaf
+#define pud_leaf(x)	0
+#endif
+#ifndef pmd_leaf
+#define pmd_leaf(x)	0
+#endif
+
 #endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 2b10036..f391f6b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -56,6 +56,15 @@
  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
  *    there's large holes between the VMAs.
  *
+ *  - tlb_remove_table()
+ *
+ *    tlb_remove_table() is the basic primitive to free page-table directories
+ *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
+ *    tlb_remove_page() below, for when page directories are pages and have no
+ *    additional constraints.
+ *
+ *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
+ *
  *  - tlb_remove_page() / __tlb_remove_page()
  *  - tlb_remove_page_size() / __tlb_remove_page_size()
  *
@@ -121,65 +130,53 @@
  *
  * Additionally there are a few opt-in features:
  *
- *  HAVE_MMU_GATHER_PAGE_SIZE
+ *  MMU_GATHER_PAGE_SIZE
  *
  *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
  *  changes the size and provides mmu_gather::page_size to tlb_flush().
  *
- *  HAVE_RCU_TABLE_FREE
+ *  This might be useful if your architecture has size specific TLB
+ *  invalidation instructions.
+ *
+ *  MMU_GATHER_TABLE_FREE
  *
  *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
- *  for page directores (__p*_free_tlb()). This provides separate freeing of
- *  the page-table pages themselves in a semi-RCU fashion (see comment below).
- *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
- *  and therefore doesn't naturally serialize with software page-table walkers.
+ *  for page directores (__p*_free_tlb()).
+ *
+ *  Useful if your architecture has non-page page directories.
  *
  *  When used, an architecture is expected to provide __tlb_remove_table()
  *  which does the actual freeing of these pages.
  *
- *  HAVE_RCU_TABLE_NO_INVALIDATE
+ *  MMU_GATHER_RCU_TABLE_FREE
  *
- *  This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
- *  freeing the page-table pages. This can be avoided if you use
- *  HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
- *  page-tables natively.
+ *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
+ *  comment below).
+ *
+ *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ *  and therefore doesn't naturally serialize with software page-table walkers.
  *
  *  MMU_GATHER_NO_RANGE
  *
  *  Use this if your architecture lacks an efficient flush_tlb_range().
+ *
+ *  MMU_GATHER_NO_GATHER
+ *
+ *  If the option is set the mmu_gather will not track individual pages for
+ *  delayed page free anymore. A platform that enables the option needs to
+ *  provide its own implementation of the __tlb_remove_page_size() function to
+ *  free pages.
+ *
+ *  This is useful if your architecture already flushes TLB entries in the
+ *  various ptep_get_and_clear() functions.
  */
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-/*
- * Semi RCU freeing of the page directories.
- *
- * This is needed by some architectures to implement software pagetable walkers.
- *
- * gup_fast() and other software pagetable walkers do a lockless page-table
- * walk and therefore needs some synchronization with the freeing of the page
- * directories. The chosen means to accomplish that is by disabling IRQs over
- * the walk.
- *
- * Architectures that use IPIs to flush TLBs will then automagically DTRT,
- * since we unlink the page, flush TLBs, free the page. Since the disabling of
- * IRQs delays the completion of the TLB flush we can never observe an already
- * freed page.
- *
- * Architectures that do not have this (PPC) need to delay the freeing by some
- * other means, this is that means.
- *
- * What we do is batch the freed directory pages (tables) and RCU free them.
- * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
- * holds off grace periods.
- *
- * However, in order to batch these pages we need to allocate storage, this
- * allocation is deep inside the MM code and can thus easily fail on memory
- * pressure. To guarantee progress we fall back to single table freeing, see
- * the implementation of tlb_remove_table_one().
- *
- */
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
+
 struct mmu_table_batch {
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
 	struct rcu_head		rcu;
+#endif
 	unsigned int		nr;
 	void			*tables[0];
 };
@@ -189,9 +186,35 @@ struct mmu_table_batch {
 
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
+#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
+
+/*
+ * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
+ * page directories and we can use the normal page batching to free them.
+ */
+#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
+
+#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
+
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+/*
+ * This allows an architecture that does not use the linux page-tables for
+ * hardware to skip the TLBI when freeing page tables.
+ */
+#ifndef tlb_needs_table_invalidate
+#define tlb_needs_table_invalidate() (true)
 #endif
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#else
+
+#ifdef tlb_needs_table_invalidate
+#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
+#endif
+
+#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+
+
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
 /*
  * If we can't allocate a page to make a big batch of page pointers
  * to work on, then just handle a few from the on-stack structure.
@@ -227,7 +250,7 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
 struct mmu_gather {
 	struct mm_struct	*mm;
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
 	struct mmu_table_batch	*batch;
 #endif
 
@@ -266,22 +289,18 @@ struct mmu_gather {
 
 	unsigned int		batch_count;
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
 	struct mmu_gather_batch *active;
 	struct mmu_gather_batch	local;
 	struct page		*__pages[MMU_GATHER_BUNDLE];
 
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
 	unsigned int page_size;
 #endif
 #endif
 };
 
-void arch_tlb_gather_mmu(struct mmu_gather *tlb,
-	struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
-			 unsigned long start, unsigned long end, bool force);
 
 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 				      unsigned long address,
@@ -394,7 +413,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
 
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
-	if (!tlb->end)
+	/*
+	 * Anything calling __tlb_adjust_range() also sets at least one of
+	 * these bits.
+	 */
+	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
+	      tlb->cleared_puds || tlb->cleared_p4ds))
 		return;
 
 	tlb_flush(tlb);
@@ -426,7 +450,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 static inline void tlb_change_page_size(struct mmu_gather *tlb,
 						     unsigned int page_size)
 {
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
 	if (tlb->page_size && tlb->page_size != page_size) {
 		if (!tlb->fullmm && !tlb->need_flush_all)
 			tlb_flush_mmu(tlb);
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index 94cc64a..b0e390b 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -19,6 +19,13 @@ struct dw_mipi_dsi;
 struct mipi_dsi_device;
 struct platform_device;
 
+struct dw_mipi_dsi_dphy_timing {
+	u16 data_hs2lp;
+	u16 data_lp2hs;
+	u16 clk_hs2lp;
+	u16 clk_lp2hs;
+};
+
 struct dw_mipi_dsi_phy_ops {
 	int (*init)(void *priv_data);
 	void (*power_on)(void *priv_data);
@@ -27,6 +34,8 @@ struct dw_mipi_dsi_phy_ops {
 			     const struct drm_display_mode *mode,
 			     unsigned long mode_flags, u32 lanes, u32 format,
 			     unsigned int *lane_mbps);
+	int (*get_timing)(void *priv_data, unsigned int lane_mbps,
+			  struct dw_mipi_dsi_dphy_timing *timing);
 };
 
 struct dw_mipi_dsi_host_ops {
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 927e120..951dfb1 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -35,7 +35,7 @@
  * struct drm_crtc_commit - track modeset commits on a CRTC
  *
  * This structure is used to track pending modeset changes and atomic commit on
- * a per-CRTC basis. Since updating the list should never block this structure
+ * a per-CRTC basis. Since updating the list should never block, this structure
  * is reference counted to allow waiters to safely wait on an event to complete,
  * without holding any locks.
  *
@@ -60,8 +60,8 @@
  * 	wait for flip_done		<----
  * 	clean up atomic state
  *
- * The important bit to know is that cleanup_done is the terminal event, but the
- * ordering between flip_done and hw_done is entirely up to the specific driver
+ * The important bit to know is that &cleanup_done is the terminal event, but the
+ * ordering between &flip_done and &hw_done is entirely up to the specific driver
  * and modeset state change.
  *
  * For an implementation of how to use this look at
@@ -92,6 +92,9 @@ struct drm_crtc_commit {
 	 * commit is sent to userspace, or when an out-fence is singalled. Note
 	 * that for most hardware, in most cases this happens after @hw_done is
 	 * signalled.
+	 *
+	 * Completion of this stage is signalled implicitly by calling
+	 * drm_crtc_send_vblank_event() on &drm_crtc_state.event.
 	 */
 	struct completion flip_done;
 
@@ -107,6 +110,9 @@ struct drm_crtc_commit {
 	 * Note that this does not need to include separately reference-counted
 	 * resources like backing storage buffer pinning, or runtime pm
 	 * management.
+	 *
+	 * Drivers should call drm_atomic_helper_commit_hw_done() to signal
+	 * completion of this stage.
 	 */
 	struct completion hw_done;
 
@@ -118,6 +124,9 @@ struct drm_crtc_commit {
 	 * a vblank wait completed it might be a bit later. This completion is
 	 * useful to throttle updates and avoid hardware updates getting ahead
 	 * of the buffer cleanup too much.
+	 *
+	 * Drivers should call drm_atomic_helper_commit_cleanup_done() to signal
+	 * completion of this stage.
 	 */
 	struct completion cleanup_done;
 
@@ -354,7 +363,7 @@ struct drm_atomic_state {
 	 * When a connector or plane is not bound to any CRTC, it's still important
 	 * to preserve linearity to prevent the atomic states from being freed to early.
 	 *
-	 * This commit (if set) is not bound to any crtc, but will be completed when
+	 * This commit (if set) is not bound to any CRTC, but will be completed when
 	 * drm_atomic_helper_commit_hw_done() is called.
 	 */
 	struct drm_crtc_commit *fake_commit;
@@ -467,12 +476,12 @@ drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
 					 struct drm_encoder *encoder);
 
 /**
- * drm_atomic_get_existing_crtc_state - get crtc state, if it exists
+ * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists
  * @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
  *
- * This function returns the crtc state for the given crtc, or NULL
- * if the crtc is not part of the global atomic state.
+ * This function returns the CRTC state for the given CRTC, or NULL
+ * if the CRTC is not part of the global atomic state.
  *
  * This function is deprecated, @drm_atomic_get_old_crtc_state or
  * @drm_atomic_get_new_crtc_state should be used instead.
@@ -485,12 +494,12 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
 }
 
 /**
- * drm_atomic_get_old_crtc_state - get old crtc state, if it exists
+ * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists
  * @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
  *
- * This function returns the old crtc state for the given crtc, or
- * NULL if the crtc is not part of the global atomic state.
+ * This function returns the old CRTC state for the given CRTC, or
+ * NULL if the CRTC is not part of the global atomic state.
  */
 static inline struct drm_crtc_state *
 drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
@@ -499,12 +508,12 @@ drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
 	return state->crtcs[drm_crtc_index(crtc)].old_state;
 }
 /**
- * drm_atomic_get_new_crtc_state - get new crtc state, if it exists
+ * drm_atomic_get_new_crtc_state - get new CRTC state, if it exists
  * @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
  *
- * This function returns the new crtc state for the given crtc, or
- * NULL if the crtc is not part of the global atomic state.
+ * This function returns the new CRTC state for the given CRTC, or
+ * NULL if the CRTC is not part of the global atomic state.
  */
 static inline struct drm_crtc_state *
 drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
@@ -693,6 +702,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
 	     (__i)++)								\
 		for_each_if ((__state)->connectors[__i].ptr &&			\
 			     ((connector) = (__state)->connectors[__i].ptr,	\
+			     (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
 			     (old_connector_state) = (__state)->connectors[__i].old_state,	\
 			     (new_connector_state) = (__state)->connectors[__i].new_state, 1))
 
@@ -714,6 +724,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
 	     (__i)++)								\
 		for_each_if ((__state)->connectors[__i].ptr &&			\
 			     ((connector) = (__state)->connectors[__i].ptr,	\
+			     (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
 			     (old_connector_state) = (__state)->connectors[__i].old_state, 1))
 
 /**
@@ -734,7 +745,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
 	     (__i)++)								\
 		for_each_if ((__state)->connectors[__i].ptr &&			\
 			     ((connector) = (__state)->connectors[__i].ptr,	\
-			     (new_connector_state) = (__state)->connectors[__i].new_state, 1))
+			     (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
+			     (new_connector_state) = (__state)->connectors[__i].new_state, \
+			     (void)(new_connector_state) /* Only to avoid unused-but-set-variable warning */, 1))
 
 /**
  * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -754,7 +767,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
 	     (__i)++)							\
 		for_each_if ((__state)->crtcs[__i].ptr &&		\
 			     ((crtc) = (__state)->crtcs[__i].ptr,	\
+			      (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
 			     (old_crtc_state) = (__state)->crtcs[__i].old_state, \
+			     (void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \
 			     (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
 
 /**
@@ -793,7 +808,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
 	     (__i)++)							\
 		for_each_if ((__state)->crtcs[__i].ptr &&		\
 			     ((crtc) = (__state)->crtcs[__i].ptr,	\
-			     (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
+			     (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
+			     (new_crtc_state) = (__state)->crtcs[__i].new_state, \
+			     (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1))
 
 /**
  * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
@@ -813,6 +830,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
 	     (__i)++)							\
 		for_each_if ((__state)->planes[__i].ptr &&		\
 			     ((plane) = (__state)->planes[__i].ptr,	\
+			      (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
 			      (old_plane_state) = (__state)->planes[__i].old_state,\
 			      (new_plane_state) = (__state)->planes[__i].new_state, 1))
 
@@ -873,7 +891,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
 	     (__i)++)							\
 		for_each_if ((__state)->planes[__i].ptr &&		\
 			     ((plane) = (__state)->planes[__i].ptr,	\
-			      (new_plane_state) = (__state)->planes[__i].new_state, 1))
+			      (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
+			      (new_plane_state) = (__state)->planes[__i].new_state, \
+			      (void)(new_plane_state) /* Only to avoid unused-but-set-variable warning */, 1))
 
 /**
  * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update
@@ -958,11 +978,11 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
 }
 
 /**
- * drm_atomic_crtc_effectively_active - compute whether crtc is actually active
+ * drm_atomic_crtc_effectively_active - compute whether CRTC is actually active
  * @state: &drm_crtc_state for the CRTC
  *
  * When in self refresh mode, the crtc_state->active value will be false, since
- * the crtc is off. However in some cases we're interested in whether the crtc
+ * the CRTC is off. However in some cases we're interested in whether the CRTC
  * is active, or effectively active (ie: it's connected to an active display).
  * In these cases, use this function instead of just checking active.
  */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index bf4e071..9db3cac 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -152,7 +152,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
 /**
  * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
  * @plane: the loop cursor
- * @crtc:  the crtc whose planes are iterated
+ * @crtc:  the CRTC whose planes are iterated
  *
  * This iterates over the current state, useful (for example) when applying
  * atomic state after it has been checked and swapped.  To iterate over the
@@ -166,7 +166,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
 /**
  * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
  * @plane: the loop cursor
- * @crtc_state: the incoming crtc-state
+ * @crtc_state: the incoming CRTC state
  *
  * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
  * attached if the specified state is applied.  Useful during for example
@@ -180,7 +180,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
  * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
  * @plane: the loop cursor
  * @plane_state: loop cursor for the plane's state, must be const
- * @crtc_state: the incoming crtc-state
+ * @crtc_state: the incoming CRTC state
  *
  * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
  * attached if the specified state is applied.  Useful during for example
@@ -189,7 +189,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
  *
  * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
  * const plane_state. This is useful when a driver just wants to peek at other
- * active planes on this crtc, but does not need to change it.
+ * active planes on this CRTC, but does not need to change it.
  */
 #define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
 	drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index e4577cc..8171dea 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -37,6 +37,8 @@ struct drm_private_state;
 struct drm_modeset_acquire_ctx;
 struct drm_device;
 
+void __drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *state,
+					  struct drm_crtc *crtc);
 void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
 				    struct drm_crtc_state *state);
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
@@ -48,6 +50,8 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
 void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
 					  struct drm_crtc_state *state);
 
+void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *state,
+					   struct drm_plane *plane);
 void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
 				     struct drm_plane_state *state);
 void drm_atomic_helper_plane_reset(struct drm_plane *plane);
@@ -59,6 +63,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
 void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
 					  struct drm_plane_state *state);
 
+void __drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
+					       struct drm_connector *connector);
 void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
 					 struct drm_connector_state *conn_state);
 void drm_atomic_helper_connector_reset(struct drm_connector *connector);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index c0a2286..694e153 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -25,6 +25,7 @@
 
 #include <linux/list.h>
 #include <linux/ctype.h>
+#include <drm/drm_encoder.h>
 #include <drm/drm_mode_object.h>
 #include <drm/drm_modes.h>
 
@@ -254,14 +255,15 @@ struct drm_bridge_funcs {
 	 * there is one) when this callback is called.
 	 *
 	 * Note that this function will only be invoked in the context of an
-	 * atomic commit. It will not be invoked from &drm_bridge_pre_enable. It
-	 * would be prudent to also provide an implementation of @pre_enable if
-	 * you are expecting driver calls into &drm_bridge_pre_enable.
+	 * atomic commit. It will not be invoked from
+	 * &drm_bridge_chain_pre_enable. It would be prudent to also provide an
+	 * implementation of @pre_enable if you are expecting driver calls into
+	 * &drm_bridge_chain_pre_enable.
 	 *
 	 * The @atomic_pre_enable callback is optional.
 	 */
 	void (*atomic_pre_enable)(struct drm_bridge *bridge,
-				  struct drm_atomic_state *state);
+				  struct drm_atomic_state *old_state);
 
 	/**
 	 * @atomic_enable:
@@ -279,14 +281,14 @@ struct drm_bridge_funcs {
 	 * chain if there is one.
 	 *
 	 * Note that this function will only be invoked in the context of an
-	 * atomic commit. It will not be invoked from &drm_bridge_enable. It
-	 * would be prudent to also provide an implementation of @enable if
-	 * you are expecting driver calls into &drm_bridge_enable.
+	 * atomic commit. It will not be invoked from &drm_bridge_chain_enable.
+	 * It would be prudent to also provide an implementation of @enable if
+	 * you are expecting driver calls into &drm_bridge_chain_enable.
 	 *
 	 * The @atomic_enable callback is optional.
 	 */
 	void (*atomic_enable)(struct drm_bridge *bridge,
-			      struct drm_atomic_state *state);
+			      struct drm_atomic_state *old_state);
 	/**
 	 * @atomic_disable:
 	 *
@@ -301,14 +303,15 @@ struct drm_bridge_funcs {
 	 * signals) feeding it is still running when this callback is called.
 	 *
 	 * Note that this function will only be invoked in the context of an
-	 * atomic commit. It will not be invoked from &drm_bridge_disable. It
-	 * would be prudent to also provide an implementation of @disable if
-	 * you are expecting driver calls into &drm_bridge_disable.
+	 * atomic commit. It will not be invoked from
+	 * &drm_bridge_chain_disable. It would be prudent to also provide an
+	 * implementation of @disable if you are expecting driver calls into
+	 * &drm_bridge_chain_disable.
 	 *
 	 * The @atomic_disable callback is optional.
 	 */
 	void (*atomic_disable)(struct drm_bridge *bridge,
-			       struct drm_atomic_state *state);
+			       struct drm_atomic_state *old_state);
 
 	/**
 	 * @atomic_post_disable:
@@ -325,15 +328,16 @@ struct drm_bridge_funcs {
 	 * called.
 	 *
 	 * Note that this function will only be invoked in the context of an
-	 * atomic commit. It will not be invoked from &drm_bridge_post_disable.
+	 * atomic commit. It will not be invoked from
+	 * &drm_bridge_chain_post_disable.
 	 * It would be prudent to also provide an implementation of
 	 * @post_disable if you are expecting driver calls into
-	 * &drm_bridge_post_disable.
+	 * &drm_bridge_chain_post_disable.
 	 *
 	 * The @atomic_post_disable callback is optional.
 	 */
 	void (*atomic_post_disable)(struct drm_bridge *bridge,
-				    struct drm_atomic_state *state);
+				    struct drm_atomic_state *old_state);
 };
 
 /**
@@ -380,8 +384,8 @@ struct drm_bridge {
 	struct drm_device *dev;
 	/** @encoder: encoder to which this bridge is connected */
 	struct drm_encoder *encoder;
-	/** @next: the next bridge in the encoder chain */
-	struct drm_bridge *next;
+	/** @chain_node: used to form a bridge chain */
+	struct list_head chain_node;
 #ifdef CONFIG_OF
 	/** @of_node: device node pointer to the bridge */
 	struct device_node *of_node;
@@ -406,27 +410,86 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np);
 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
 		      struct drm_bridge *previous);
 
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode);
-enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
-					   const struct drm_display_mode *mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
-			 const struct drm_display_mode *mode,
-			 const struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
+/**
+ * drm_bridge_get_next_bridge() - Get the next bridge in the chain
+ * @bridge: bridge object
+ *
+ * RETURNS:
+ * the next bridge in the chain after @bridge, or NULL if @bridge is the last.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_next_bridge(struct drm_bridge *bridge)
+{
+	if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain))
+		return NULL;
 
-void drm_atomic_bridge_disable(struct drm_bridge *bridge,
-			       struct drm_atomic_state *state);
-void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
+	return list_next_entry(bridge, chain_node);
+}
+
+/**
+ * drm_bridge_get_prev_bridge() - Get the previous bridge in the chain
+ * @bridge: bridge object
+ *
+ * RETURNS:
+ * the previous bridge in the chain, or NULL if @bridge is the first.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
+{
+	if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain))
+		return NULL;
+
+	return list_prev_entry(bridge, chain_node);
+}
+
+/**
+ * drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain
+ * @encoder: encoder object
+ *
+ * RETURNS:
+ * the first bridge in the chain, or NULL if @encoder has no bridge attached
+ * to it.
+ */
+static inline struct drm_bridge *
+drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder)
+{
+	return list_first_entry_or_null(&encoder->bridge_chain,
+					struct drm_bridge, chain_node);
+}
+
+/**
+ * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain
+ * @encoder: the encoder to iterate bridges on
+ * @bridge: a bridge pointer updated to point to the current bridge at each
+ *	    iteration
+ *
+ * Iterate over all bridges present in the bridge chain attached to @encoder.
+ */
+#define drm_for_each_bridge_in_chain(encoder, bridge)			\
+	list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node)
+
+bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode);
+enum drm_mode_status
+drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+			    const struct drm_display_mode *mode);
+void drm_bridge_chain_disable(struct drm_bridge *bridge);
+void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
+void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
+			       const struct drm_display_mode *mode,
+			       const struct drm_display_mode *adjusted_mode);
+void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_chain_enable(struct drm_bridge *bridge);
+
+void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+				     struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+					  struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+					struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
 				    struct drm_atomic_state *state);
-void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
-				  struct drm_atomic_state *state);
-void drm_atomic_bridge_enable(struct drm_bridge *bridge,
-			      struct drm_atomic_state *state);
 
 #ifdef CONFIG_DRM_PANEL_BRIDGE
 struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
@@ -438,6 +501,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
 struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
 						   struct drm_panel *panel,
 						   u32 connector_type);
+struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
 #endif
 
 #endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index d1c662d..81c29848 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -29,7 +29,30 @@
 struct drm_crtc;
 struct drm_plane;
 
-uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
+/**
+ * drm_color_lut_extract - clamp and round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision)
+{
+	u32 val = user_input;
+	u32 max = 0xffff >> (16 - bit_precision);
+
+	/* Round only if we're not using full precision. */
+	if (bit_precision < 16) {
+		val += 1UL << (16 - bit_precision - 1);
+		val >>= 16 - bit_precision;
+	}
+
+	return clamp_val(val, 0, max);
+}
+
+u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);
 
 void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
 				uint degamma_lut_size,
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 5f8c338..2219109 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -188,19 +188,19 @@ struct drm_hdmi_info {
 
 	/**
 	 * @y420_vdb_modes: bitmap of modes which can support ycbcr420
-	 * output only (not normal RGB/YCBCR444/422 outputs). There are total
-	 * 107 VICs defined by CEA-861-F spec, so the size is 128 bits to map
-	 * upto 128 VICs;
+	 * output only (not normal RGB/YCBCR444/422 outputs). The max VIC
+	 * defined by the CEA-861-G spec is 219, so the size is 256 bits to map
+	 * up to 256 VICs.
 	 */
-	unsigned long y420_vdb_modes[BITS_TO_LONGS(128)];
+	unsigned long y420_vdb_modes[BITS_TO_LONGS(256)];
 
 	/**
 	 * @y420_cmdb_modes: bitmap of modes which can support ycbcr420
-	 * output also, along with normal HDMI outputs. There are total 107
-	 * VICs defined by CEA-861-F spec, so the size is 128 bits to map upto
-	 * 128 VICs;
+	 * output also, along with normal HDMI outputs. The max VIC defined by
+	 * the CEA-861-G spec is 219, so the size is 256 bits to map up to 256
+	 * VICs.
 	 */
-	unsigned long y420_cmdb_modes[BITS_TO_LONGS(128)];
+	unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)];
 
 	/** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */
 	u64 y420_cmdb_map;
@@ -1070,6 +1070,14 @@ struct drm_cmdline_mode {
 	unsigned int rotation_reflection;
 
 	/**
+	 * @panel_orientation:
+	 *
+	 * drm-connector "panel orientation" property override value,
+	 * DRM_MODE_PANEL_ORIENTATION_UNKNOWN if not set.
+	 */
+	enum drm_panel_orientation panel_orientation;
+
+	/**
 	 * @tv_margins: TV margins to apply to the mode.
 	 */
 	struct drm_connector_tv_margins tv_margins;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 51ecb51..bc04467 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -307,7 +307,7 @@
 # define DP_DSC_THROUGHPUT_MODE_0_900       (12 << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_950       (13 << 0)
 # define DP_DSC_THROUGHPUT_MODE_0_1000      (14 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_170       (15 << 4)
+# define DP_DSC_THROUGHPUT_MODE_0_170       (15 << 0) /* 1.4a */
 # define DP_DSC_THROUGHPUT_MODE_1_MASK      (0xf << 4)
 # define DP_DSC_THROUGHPUT_MODE_1_SHIFT     4
 # define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0
@@ -1042,6 +1042,8 @@
 #define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1	    0xf0039 /* 1.3 */
 #define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1	    0xf003b /* 1.3 */
 #define DP_FEC_STATUS_PHY_REPEATER1			    0xf0290 /* 1.4 */
+#define DP_FEC_ERROR_COUNT_PHY_REPEATER1                    0xf0291 /* 1.4 */
+#define DP_FEC_CAPABILITY_PHY_REPEATER1                     0xf0294 /* 1.4a */
 
 /* Repeater modes */
 #define DP_PHY_REPEATER_MODE_TRANSPARENT		    0x55    /* 1.3 */
@@ -1463,6 +1465,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
 void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 			     const u8 port_cap[4], struct drm_dp_aux *aux);
 
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
 void drm_dp_aux_init(struct drm_dp_aux *aux);
 int drm_dp_aux_register(struct drm_dp_aux *aux);
 void drm_dp_aux_unregister(struct drm_dp_aux *aux);
@@ -1520,6 +1523,13 @@ enum drm_dp_quirk {
 	 * The driver should ignore SINK_COUNT during detection.
 	 */
 	DP_DPCD_QUIRK_NO_SINK_COUNT,
+	/**
+	 * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
+	 *
+	 * The device supports MST DSC despite not supporting Virtual DPCD.
+	 * The DSC caps can be read from the physical aux instead.
+	 */
+	DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
 };
 
 /**
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index c1bda70..bcb39da 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -156,6 +156,8 @@ struct drm_dp_mst_port {
 	 * audio-capable.
 	 */
 	bool has_audio;
+
+	bool fec_capable;
 };
 
 /**
@@ -383,6 +385,7 @@ struct drm_dp_port_number_req {
 
 struct drm_dp_enum_path_resources_ack_reply {
 	u8 port_number;
+	bool fec_capable;
 	u16 full_payload_bw_number;
 	u16 avail_payload_bw_number;
 };
@@ -499,6 +502,8 @@ struct drm_dp_payload {
 struct drm_dp_vcpi_allocation {
 	struct drm_dp_mst_port *port;
 	int vcpi;
+	int pbn;
+	bool dsc_enabled;
 	struct list_head next;
 };
 
@@ -561,7 +566,8 @@ struct drm_dp_mst_topology_mgr {
 	struct drm_dp_sideband_msg_rx up_req_recv;
 
 	/**
-	 * @lock: protects mst state, primary, dpcd.
+	 * @lock: protects @mst_state, @mst_primary, @dpcd, and
+	 * @payload_id_table_cleared.
 	 */
 	struct mutex lock;
 
@@ -576,7 +582,14 @@ struct drm_dp_mst_topology_mgr {
 	 * @mst_state: If this manager is enabled for an MST capable port. False
 	 * if no MST sink/branch devices is connected.
 	 */
-	bool mst_state;
+	bool mst_state : 1;
+
+	/**
+	 * @payload_id_table_cleared: Whether or not we've cleared the payload
+	 * ID table for @mst_primary. Protected by @lock.
+	 */
+	bool payload_id_table_cleared : 1;
+
 	/**
 	 * @mst_primary: Pointer to the primary/first branch device.
 	 */
@@ -725,8 +738,7 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
-int drm_dp_calc_pbn_mode(int clock, int bpp);
-
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
 
 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
 			      struct drm_dp_mst_port *port, int pbn, int slots);
@@ -775,7 +787,15 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
 int __must_check
 drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
 			      struct drm_dp_mst_topology_mgr *mgr,
-			      struct drm_dp_mst_port *port, int pbn);
+			      struct drm_dp_mst_port *port, int pbn,
+			      int pbn_div);
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+				 struct drm_dp_mst_port *port,
+				 int pbn, int pbn_div,
+				 bool enable);
+int __must_check
+drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
+				  struct drm_dp_mst_topology_mgr *mgr);
 int __must_check
 drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
 				 struct drm_dp_mst_topology_mgr *mgr,
@@ -787,6 +807,8 @@ int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
 
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+
 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
 
 /**
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index f06164f..5623994 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -172,7 +172,12 @@ struct drm_encoder {
 	 * &drm_connector_state.crtc.
 	 */
 	struct drm_crtc *crtc;
-	struct drm_bridge *bridge;
+
+	/**
+	 * @bridge_chain: Bridges attached to this encoder.
+	 */
+	struct list_head bridge_chain;
+
 	const struct drm_encoder_funcs *funcs;
 	const struct drm_encoder_helper_funcs *helper_private;
 };
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4becb09..795aea1 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -2,6 +2,8 @@
 #ifndef __DRM_FB_CMA_HELPER_H__
 #define __DRM_FB_CMA_HELPER_H__
 
+#include <linux/types.h>
+
 struct drm_framebuffer;
 struct drm_plane_state;
 
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 2338e9f..1c6633d 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -231,8 +231,6 @@ void drm_fb_helper_fill_info(struct fb_info *info,
 			     struct drm_fb_helper *fb_helper,
 			     struct drm_fb_helper_surface_size *sizes);
 
-void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
-
 void drm_fb_helper_deferred_io(struct fb_info *info,
 			       struct list_head *pagelist);
 
@@ -269,18 +267,9 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
 int drm_fb_helper_debug_enter(struct fb_info *info);
 int drm_fb_helper_debug_leave(struct fb_info *info);
 
-int drm_fb_helper_fbdev_setup(struct drm_device *dev,
-			      struct drm_fb_helper *fb_helper,
-			      const struct drm_fb_helper_funcs *funcs,
-			      unsigned int preferred_bpp,
-			      unsigned int max_conn_count);
-void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
-
 void drm_fb_helper_lastclose(struct drm_device *dev);
 void drm_fb_helper_output_poll_changed(struct drm_device *dev);
 
-int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
-				struct drm_fb_helper_surface_size *sizes);
 int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
 #else
 static inline void drm_fb_helper_prepare(struct drm_device *dev,
@@ -363,10 +352,6 @@ static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
 	return 0;
 }
 
-static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
-{
-}
-
 static inline void drm_fb_helper_deferred_io(struct fb_info *info,
 					     struct list_head *pagelist)
 {
@@ -452,24 +437,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
 	return 0;
 }
 
-static inline int
-drm_fb_helper_fbdev_setup(struct drm_device *dev,
-			  struct drm_fb_helper *fb_helper,
-			  const struct drm_fb_helper_funcs *funcs,
-			  unsigned int preferred_bpp,
-			  unsigned int max_conn_count)
-{
-	/* So drivers can use it to free the struct */
-	dev->fb_helper = fb_helper;
-
-	return 0;
-}
-
-static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
-{
-	dev->fb_helper = NULL;
-}
-
 static inline void drm_fb_helper_lastclose(struct drm_device *dev)
 {
 }
@@ -479,13 +446,6 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
 }
 
 static inline int
-drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
-			    struct drm_fb_helper_surface_size *sizes)
-{
-	return 0;
-}
-
-static inline int
 drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
 {
 	return 0;
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 67af60b..8b099b3 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -42,6 +42,7 @@ struct dma_fence;
 struct drm_file;
 struct drm_device;
 struct device;
+struct file;
 
 /*
  * FIXME: Not sure we want to have drm_minor here in the end, but to avoid
@@ -387,4 +388,6 @@ void drm_event_cancel_free(struct drm_device *dev,
 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
 
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
+
 #endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 306d1ef..156b122 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -78,7 +78,7 @@ struct drm_format_info {
 		 * triplet @char_per_block, @block_w, @block_h for better
 		 * describing the pixel format.
 		 */
-		u8 cpp[3];
+		u8 cpp[4];
 
 		/**
 		 * @char_per_block:
@@ -104,7 +104,7 @@ struct drm_format_info {
 		 * information from their drm_mode_config.get_format_info hook
 		 * if they want the core to be validating the pitch.
 		 */
-		u8 char_per_block[3];
+		u8 char_per_block[4];
 	};
 
 	/**
@@ -113,7 +113,7 @@ struct drm_format_info {
 	 * Block width in pixels, this is intended to be accessed through
 	 * drm_format_info_block_width()
 	 */
-	u8 block_w[3];
+	u8 block_w[4];
 
 	/**
 	 * @block_h:
@@ -121,7 +121,7 @@ struct drm_format_info {
 	 * Block height in pixels, this is intended to be accessed through
 	 * drm_format_info_block_height()
 	 */
-	u8 block_h[3];
+	u8 block_h[4];
 
 	/** @hsub: Horizontal chroma subsampling factor */
 	u8 hsub;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 97a4816..0b37506 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -159,9 +159,7 @@ struct drm_gem_object_funcs {
 	 *
 	 * The callback is used by by both drm_gem_mmap_obj() and
 	 * drm_gem_prime_mmap().  When @mmap is present @vm_ops is not
-	 * used, the @mmap callback must set vma->vm_ops instead. The @mmap
-	 * callback is always called with a 0 offset. The caller will remove
-	 * the fake offset as necessary.
+	 * used, the @mmap callback must set vma->vm_ops instead.
 	 */
 	int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
 
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index e040541..573e9fd 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -16,7 +16,6 @@ struct drm_mode_create_dumb;
 struct drm_plane;
 struct drm_plane_state;
 struct drm_simple_display_pipe;
-struct drm_vram_mm_funcs;
 struct filp;
 struct vm_area_struct;
 
@@ -94,10 +93,8 @@ static inline struct drm_gem_vram_object *drm_gem_vram_of_gem(
 }
 
 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
-						struct ttm_bo_device *bdev,
 						size_t size,
-						unsigned long pg_align,
-						bool interruptible);
+						unsigned long pg_align);
 void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
@@ -111,9 +108,8 @@ void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr);
 
 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
 				  struct drm_device *dev,
-				  struct ttm_bo_device *bdev,
 				  unsigned long pg_align,
-				  bool interruptible,
+				  unsigned long pitch_align,
 				  struct drm_mode_create_dumb *args);
 
 /*
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 58dc0c0..5745710 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -38,7 +38,9 @@
 #include <drm/drm_hashtab.h>
 
 struct drm_device;
+struct drm_driver;
 struct file;
+struct pci_driver;
 
 /*
  * Legacy Support for palateontologic DRM drivers
@@ -188,8 +190,33 @@ do {										\
 void drm_legacy_idlelock_take(struct drm_lock_data *lock);
 void drm_legacy_idlelock_release(struct drm_lock_data *lock);
 
-/* drm_pci.c dma alloc wrappers */
+/* drm_pci.c */
+
+#ifdef CONFIG_PCI
+
 void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+
+#else
+
+static inline void __drm_legacy_pci_free(struct drm_device *dev,
+					 drm_dma_handle_t *dmah)
+{
+}
+
+static inline int drm_legacy_pci_init(struct drm_driver *driver,
+				      struct pci_driver *pdriver)
+{
+	return -EINVAL;
+}
+
+static inline void drm_legacy_pci_exit(struct drm_driver *driver,
+				       struct pci_driver *pdriver)
+{
+}
+
+#endif
 
 /* drm_memory.c */
 void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 13cf2ae..360e637 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -13,6 +13,7 @@
 
 struct mipi_dsi_host;
 struct mipi_dsi_device;
+struct drm_dsc_picture_parameter_set;
 
 /* request ACK from peripheral */
 #define MIPI_DSI_MSG_REQ_ACK	BIT(0)
@@ -228,6 +229,9 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
 int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
 int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
 					    u16 value);
+ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+				       const struct drm_dsc_picture_parameter_set *pps);
 
 ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
 			       size_t size);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index ead34ab..b9b093a 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -16,6 +16,18 @@ struct drm_panel;
 struct drm_bridge;
 struct device_node;
 
+/**
+ * enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection
+ * @DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: Even pixels are expected to be generated
+ *    from the first port, odd pixels from the second port
+ * @DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: Odd pixels are expected to be generated
+ *    from the first port, even pixels from the second port
+ */
+enum drm_lvds_dual_link_pixels {
+	DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 0,
+	DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 1,
+};
+
 #ifdef CONFIG_OF
 uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
 			    struct device_node *port);
@@ -35,6 +47,8 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
 				int port, int endpoint,
 				struct drm_panel **panel,
 				struct drm_bridge **bridge);
+int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+					  const struct device_node *port2);
 #else
 static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
 					  struct device_node *port)
@@ -77,6 +91,13 @@ static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
 {
 	return -EINVAL;
 }
+
+static inline int
+drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+				      const struct device_node *port2)
+{
+	return -EINVAL;
+}
 #endif
 
 /*
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index ce8da64..121f7aa 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -28,6 +28,7 @@
 #include <linux/errno.h>
 #include <linux/list.h>
 
+struct backlight_device;
 struct device_node;
 struct drm_connector;
 struct drm_device;
@@ -59,12 +60,18 @@ struct display_timing;
  *
  * To save power when no video data is transmitted, a driver can power down
  * the panel. This is the job of the .unprepare() function.
+ *
+ * Backlight can be handled automatically if configured using
+ * drm_panel_of_backlight(). Then the driver does not need to implement the
+ * functionality to enable/disable backlight.
  */
 struct drm_panel_funcs {
 	/**
 	 * @prepare:
 	 *
 	 * Turn on panel and perform set up.
+	 *
+	 * This function is optional.
 	 */
 	int (*prepare)(struct drm_panel *panel);
 
@@ -72,6 +79,8 @@ struct drm_panel_funcs {
 	 * @enable:
 	 *
 	 * Enable panel (turn on back light, etc.).
+	 *
+	 * This function is optional.
 	 */
 	int (*enable)(struct drm_panel *panel);
 
@@ -79,6 +88,8 @@ struct drm_panel_funcs {
 	 * @disable:
 	 *
 	 * Disable panel (turn off back light, etc.).
+	 *
+	 * This function is optional.
 	 */
 	int (*disable)(struct drm_panel *panel);
 
@@ -86,22 +97,29 @@ struct drm_panel_funcs {
 	 * @unprepare:
 	 *
 	 * Turn off panel.
+	 *
+	 * This function is optional.
 	 */
 	int (*unprepare)(struct drm_panel *panel);
 
 	/**
 	 * @get_modes:
 	 *
-	 * Add modes to the connector that the panel is attached to and
-	 * return the number of modes added.
+	 * Add modes to the connector that the panel is attached to
+	 * and returns the number of modes added.
+	 *
+	 * This function is mandatory.
 	 */
-	int (*get_modes)(struct drm_panel *panel);
+	int (*get_modes)(struct drm_panel *panel,
+			 struct drm_connector *connector);
 
 	/**
 	 * @get_timings:
 	 *
 	 * Copy display timings into the provided array and return
 	 * the number of display timings available.
+	 *
+	 * This function is optional.
 	 */
 	int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
 			   struct display_timing *timings);
@@ -112,20 +130,6 @@ struct drm_panel_funcs {
  */
 struct drm_panel {
 	/**
-	 * @drm:
-	 *
-	 * DRM device owning the panel.
-	 */
-	struct drm_device *drm;
-
-	/**
-	 * @connector:
-	 *
-	 * DRM connector that the panel is attached to.
-	 */
-	struct drm_connector *connector;
-
-	/**
 	 * @dev:
 	 *
 	 * Parent device of the panel.
@@ -133,6 +137,17 @@ struct drm_panel {
 	struct device *dev;
 
 	/**
+	 * @backlight:
+	 *
+	 * Backlight device, used to turn on backlight after the call
+	 * to enable(), and to turn off backlight before the call to
+	 * disable().
+	 * backlight is set by drm_panel_of_backlight() and drivers
+	 * shall not assign it.
+	 */
+	struct backlight_device *backlight;
+
+	/**
 	 * @funcs:
 	 *
 	 * Operations that can be performed on the panel.
@@ -172,7 +187,7 @@ int drm_panel_unprepare(struct drm_panel *panel);
 int drm_panel_enable(struct drm_panel *panel);
 int drm_panel_disable(struct drm_panel *panel);
 
-int drm_panel_get_modes(struct drm_panel *panel);
+int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector);
 
 #if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
 struct drm_panel *of_drm_find_panel(const struct device_node *np);
@@ -183,4 +198,13 @@ static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
 }
 #endif
 
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int drm_panel_of_backlight(struct drm_panel *panel);
+#else
+static inline int drm_panel_of_backlight(struct drm_panel *panel)
+{
+	return 0;
+}
+#endif
+
 #endif
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 8181e9e..9031e21 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -39,23 +39,36 @@ struct drm_device;
 struct drm_driver;
 struct drm_master;
 
+#ifdef CONFIG_PCI
+
 struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
 				     size_t align);
 void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
 
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
-#ifdef CONFIG_PCI
 int drm_get_pci_dev(struct pci_dev *pdev,
 		    const struct pci_device_id *ent,
 		    struct drm_driver *driver);
+
 #else
+
+static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
+						   size_t size, size_t align)
+{
+	return NULL;
+}
+
+static inline void drm_pci_free(struct drm_device *dev,
+				struct drm_dma_handle *dmah)
+{
+}
+
 static inline int drm_get_pci_dev(struct pci_dev *pdev,
 				  const struct pci_device_id *ent,
 				  struct drm_driver *driver)
 {
 	return -ENOSYS;
 }
+
 #endif
 
 #endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 5b80499..8f99d38 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -34,7 +34,8 @@
 
 #include <drm/drm.h>
 
-extern unsigned int drm_debug;
+/* Do *not* use outside of drm_print.[ch]! */
+extern unsigned int __drm_debug;
 
 /**
  * DOC: print
@@ -248,67 +249,247 @@ static inline struct drm_printer drm_err_printer(const char *prefix)
 	return p;
 }
 
-/*
- * The following categories are defined:
+/**
+ * enum drm_debug_category - The DRM debug categories
  *
- * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
- *	 This is the category used by the DRM_DEBUG() macro.
+ * Each of the DRM debug logging macros use a specific category, and the logging
+ * is filtered by the drm.debug module parameter. This enum specifies the values
+ * for the interface.
  *
- * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
- *	   This is the category used by the DRM_DEBUG_DRIVER() macro.
+ * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except
+ * DRM_DEBUG() logs to DRM_UT_CORE.
  *
- * KMS: used in the modesetting code.
- *	This is the category used by the DRM_DEBUG_KMS() macro.
+ * Enabling verbose debug messages is done through the drm.debug parameter, each
+ * category being enabled by a bit:
  *
- * PRIME: used in the prime code.
- *	  This is the category used by the DRM_DEBUG_PRIME() macro.
- *
- * ATOMIC: used in the atomic code.
- *	  This is the category used by the DRM_DEBUG_ATOMIC() macro.
- *
- * VBL: used for verbose debug message in the vblank code
- *	  This is the category used by the DRM_DEBUG_VBL() macro.
- *
- * Enabling verbose debug messages is done through the drm.debug parameter,
- * each category being enabled by a bit.
- *
- * drm.debug=0x1 will enable CORE messages
- * drm.debug=0x2 will enable DRIVER messages
- * drm.debug=0x3 will enable CORE and DRIVER messages
- * ...
- * drm.debug=0x3f will enable all messages
+ *  - drm.debug=0x1 will enable CORE messages
+ *  - drm.debug=0x2 will enable DRIVER messages
+ *  - drm.debug=0x3 will enable CORE and DRIVER messages
+ *  - ...
+ *  - drm.debug=0x1ff will enable all messages
  *
  * An interesting feature is that it's possible to enable verbose logging at
- * run-time by echoing the debug value in its sysfs node:
+ * run-time by echoing the debug value in its sysfs node::
+ *
  *   # echo 0xf > /sys/module/drm/parameters/debug
+ *
  */
-#define DRM_UT_NONE		0x00
-#define DRM_UT_CORE		0x01
-#define DRM_UT_DRIVER		0x02
-#define DRM_UT_KMS		0x04
-#define DRM_UT_PRIME		0x08
-#define DRM_UT_ATOMIC		0x10
-#define DRM_UT_VBL		0x20
-#define DRM_UT_STATE		0x40
-#define DRM_UT_LEASE		0x80
-#define DRM_UT_DP		0x100
+enum drm_debug_category {
+	/**
+	 * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
+	 * drm_memory.c, ...
+	 */
+	DRM_UT_CORE		= 0x01,
+	/**
+	 * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
+	 * radeon, ... macro.
+	 */
+	DRM_UT_DRIVER		= 0x02,
+	/**
+	 * @DRM_UT_KMS: Used in the modesetting code.
+	 */
+	DRM_UT_KMS		= 0x04,
+	/**
+	 * @DRM_UT_PRIME: Used in the prime code.
+	 */
+	DRM_UT_PRIME		= 0x08,
+	/**
+	 * @DRM_UT_ATOMIC: Used in the atomic code.
+	 */
+	DRM_UT_ATOMIC		= 0x10,
+	/**
+	 * @DRM_UT_VBL: Used for verbose debug message in the vblank code.
+	 */
+	DRM_UT_VBL		= 0x20,
+	/**
+	 * @DRM_UT_STATE: Used for verbose atomic state debugging.
+	 */
+	DRM_UT_STATE		= 0x40,
+	/**
+	 * @DRM_UT_LEASE: Used in the lease code.
+	 */
+	DRM_UT_LEASE		= 0x80,
+	/**
+	 * @DRM_UT_DP: Used in the DP code.
+	 */
+	DRM_UT_DP		= 0x100,
+};
 
-static inline bool drm_debug_enabled(unsigned int category)
+static inline bool drm_debug_enabled(enum drm_debug_category category)
 {
-	return unlikely(drm_debug & category);
+	return unlikely(__drm_debug & category);
 }
 
+/*
+ * struct device based logging
+ *
+ * Prefer drm_device based logging over device or prink based logging.
+ */
+
 __printf(3, 4)
 void drm_dev_printk(const struct device *dev, const char *level,
 		    const char *format, ...);
 __printf(3, 4)
-void drm_dev_dbg(const struct device *dev, unsigned int category,
+void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
 		 const char *format, ...);
 
+/**
+ * Error output.
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
+#define DRM_DEV_ERROR(dev, fmt, ...)					\
+	drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
+
+/**
+ * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
+#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...)			\
+({									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+									\
+	if (__ratelimit(&_rs))						\
+		DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__);			\
+})
+
+#define DRM_DEV_INFO(dev, fmt, ...)				\
+	drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_INFO_ONCE(dev, fmt, ...)				\
+({									\
+	static bool __print_once __read_mostly;				\
+	if (!__print_once) {						\
+		__print_once = true;					\
+		DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__);			\
+	}								\
+})
+
+/**
+ * Debug output.
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
+#define DRM_DEV_DEBUG(dev, fmt, ...)					\
+	drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...)				\
+	drm_dev_dbg(dev, DRM_UT_DRIVER,	fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_KMS(dev, fmt, ...)				\
+	drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...)				\
+	drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...)				\
+	drm_dev_dbg(dev, DRM_UT_ATOMIC,	fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_VBL(dev, fmt, ...)				\
+	drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define	DRM_DEV_DEBUG_DP(dev, fmt, ...)					\
+	drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
+
+#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...)	\
+({									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+	if (__ratelimit(&_rs))						\
+		drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__);		\
+})
+
+/**
+ * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
+#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...)			\
+	_DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE,		\
+					  fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...)			\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER,		\
+					  fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...)			\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS,		\
+					  fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...)			\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME,		\
+					  fmt, ##__VA_ARGS__)
+
+/*
+ * struct drm_device based logging
+ *
+ * Prefer drm_device based logging over device or prink based logging.
+ */
+
+/* Helper for struct drm_device based logging. */
+#define __drm_printk(drm, level, type, fmt, ...)			\
+	dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__)
+
+
+#define drm_info(drm, fmt, ...)					\
+	__drm_printk((drm), info,, fmt, ##__VA_ARGS__)
+
+#define drm_notice(drm, fmt, ...)				\
+	__drm_printk((drm), notice,, fmt, ##__VA_ARGS__)
+
+#define drm_warn(drm, fmt, ...)					\
+	__drm_printk((drm), warn,, fmt, ##__VA_ARGS__)
+
+#define drm_err(drm, fmt, ...)					\
+	__drm_printk((drm), err,, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_info_once(drm, fmt, ...)				\
+	__drm_printk((drm), info, _once, fmt, ##__VA_ARGS__)
+
+#define drm_notice_once(drm, fmt, ...)				\
+	__drm_printk((drm), notice, _once, fmt, ##__VA_ARGS__)
+
+#define drm_warn_once(drm, fmt, ...)				\
+	__drm_printk((drm), warn, _once, fmt, ##__VA_ARGS__)
+
+#define drm_err_once(drm, fmt, ...)				\
+	__drm_printk((drm), err, _once, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_err_ratelimited(drm, fmt, ...)				\
+	__drm_printk((drm), err, _ratelimited, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_dbg_core(drm, fmt, ...)					\
+	drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg(drm, fmt, ...)						\
+	drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+#define drm_dbg_kms(drm, fmt, ...)					\
+	drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define drm_dbg_prime(drm, fmt, ...)					\
+	drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+#define drm_dbg_atomic(drm, fmt, ...)					\
+	drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+#define drm_dbg_vbl(drm, fmt, ...)					\
+	drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define drm_dbg_state(drm, fmt, ...)					\
+	drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+#define drm_dbg_lease(drm, fmt, ...)					\
+	drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+#define drm_dbg_dp(drm, fmt, ...)					\
+	drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+
+
+/*
+ * printk based logging
+ *
+ * Prefer drm_device based logging over device or prink based logging.
+ */
+
 __printf(2, 3)
-void drm_dbg(unsigned int category, const char *format, ...);
+void __drm_dbg(enum drm_debug_category category, const char *format, ...);
 __printf(1, 2)
-void drm_err(const char *format, ...);
+void __drm_err(const char *format, ...);
 
 /* Macros to make printk easier */
 
@@ -329,127 +510,46 @@ void drm_err(const char *format, ...);
 #define DRM_WARN_ONCE(fmt, ...)						\
 	_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
 
-/**
- * Error output.
- *
- * @dev: device pointer
- * @fmt: printf() like format string.
- */
-#define DRM_DEV_ERROR(dev, fmt, ...)					\
-	drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
 #define DRM_ERROR(fmt, ...)						\
-	drm_err(fmt, ##__VA_ARGS__)
+	__drm_err(fmt, ##__VA_ARGS__)
 
-/**
- * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
- *
- * @dev: device pointer
- * @fmt: printf() like format string.
- */
-#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...)			\
-({									\
-	static DEFINE_RATELIMIT_STATE(_rs,				\
-				      DEFAULT_RATELIMIT_INTERVAL,	\
-				      DEFAULT_RATELIMIT_BURST);		\
-									\
-	if (__ratelimit(&_rs))						\
-		DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__);			\
-})
 #define DRM_ERROR_RATELIMITED(fmt, ...)					\
 	DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_INFO(dev, fmt, ...)					\
-	drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
-
-#define DRM_DEV_INFO_ONCE(dev, fmt, ...)				\
-({									\
-	static bool __print_once __read_mostly;				\
-	if (!__print_once) {						\
-		__print_once = true;					\
-		DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__);			\
-	}								\
-})
-
-/**
- * Debug output.
- *
- * @dev: device pointer
- * @fmt: printf() like format string.
- */
-#define DRM_DEV_DEBUG(dev, fmt, ...)					\
-	drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
 #define DRM_DEBUG(fmt, ...)						\
-	drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+	__drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...)				\
-	drm_dev_dbg(dev, DRM_UT_DRIVER,	fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_DRIVER(fmt, ...)					\
-	drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+	__drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_DEBUG_KMS(dev, fmt, ...)				\
-	drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_KMS(fmt, ...)						\
-	drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+	__drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...)				\
-	drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_PRIME(fmt, ...)					\
-	drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+	__drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...)				\
-	drm_dev_dbg(dev, DRM_UT_ATOMIC,	fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_ATOMIC(fmt, ...)					\
-	drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+	__drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_DEBUG_VBL(dev, fmt, ...)				\
-	drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_VBL(fmt, ...)						\
-	drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+	__drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
 
 #define DRM_DEBUG_LEASE(fmt, ...)					\
-	drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+	__drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
 
-#define	DRM_DEV_DEBUG_DP(dev, fmt, ...)					\
-	drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
 #define DRM_DEBUG_DP(fmt, ...)						\
-	drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
+	__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
 
-#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...)	\
-({									\
-	static DEFINE_RATELIMIT_STATE(_rs,				\
-				      DEFAULT_RATELIMIT_INTERVAL,	\
-				      DEFAULT_RATELIMIT_BURST);		\
-	if (__ratelimit(&_rs))						\
-		drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__);		\
-})
 
-/**
- * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
- *
- * @dev: device pointer
- * @fmt: printf() like format string.
- */
-#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...)			\
-	_DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE,		\
-					  fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_RATELIMITED(fmt, ...)					\
 	DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...)			\
-	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER,		\
-					  fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...)				\
 	DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...)			\
-	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS,		\
-					  fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_KMS_RATELIMITED(fmt, ...)				\
 	DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
 
-#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...)			\
-	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME,		\
-					  fmt, ##__VA_ARGS__)
 #define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...)				\
 	DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
 
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index cd01061..57a3be9 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -24,6 +24,8 @@
 #ifndef DRM_RECT_H
 #define DRM_RECT_H
 
+#include <linux/types.h>
+
 /**
  * DOC: rect utils
  *
diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h
index f92eb20..6a48353 100644
--- a/include/drm/drm_scdc_helper.h
+++ b/include/drm/drm_scdc_helper.h
@@ -50,9 +50,9 @@
 #define  SCDC_READ_REQUEST_ENABLE (1 << 0)
 
 #define SCDC_STATUS_FLAGS_0 0x40
-#define  SCDC_CH2_LOCK (1 < 3)
-#define  SCDC_CH1_LOCK (1 < 2)
-#define  SCDC_CH0_LOCK (1 < 1)
+#define  SCDC_CH2_LOCK (1 << 3)
+#define  SCDC_CH1_LOCK (1 << 2)
+#define  SCDC_CH0_LOCK (1 << 1)
 #define  SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK)
 #define  SCDC_CLOCK_DETECT (1 << 0)
 
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
index 07b8e9f..79952d8 100644
--- a/include/drm/drm_util.h
+++ b/include/drm/drm_util.h
@@ -41,7 +41,7 @@
  * Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall
  * only be visible for drmselftests.
  */
-#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#if defined(CONFIG_DRM_EXPORT_FOR_TESTS)
 #define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
 #else
 #define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 684692a..589be85 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,9 +52,10 @@ enum drm_sched_priority {
  * @list: used to append this struct to the list of entities in the
  *        runqueue.
  * @rq: runqueue on which this entity is currently scheduled.
- * @rq_list: a list of run queues on which jobs from this entity can
- *           be scheduled
- * @num_rq_list: number of run queues in the rq_list
+ * @sched_list: A list of schedulers (drm_gpu_schedulers).
+ *              Jobs from this entity can be scheduled on any scheduler
+ *              on this list.
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
  * @rq_lock: lock to modify the runqueue to which this entity belongs.
  * @job_queue: the list of jobs of this entity.
  * @fence_seq: a linearly increasing seqno incremented with each
@@ -81,8 +82,9 @@ enum drm_sched_priority {
 struct drm_sched_entity {
 	struct list_head		list;
 	struct drm_sched_rq		*rq;
-	struct drm_sched_rq		**rq_list;
-	unsigned int                    num_rq_list;
+	struct drm_gpu_scheduler        **sched_list;
+	unsigned int                    num_sched_list;
+	enum drm_sched_priority         priority;
 	spinlock_t			rq_lock;
 
 	struct spsc_queue		job_queue;
@@ -260,7 +262,7 @@ struct drm_sched_backend_ops {
  * @job_list_lock: lock to protect the ring_mirror_list.
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
- * @num_jobs: the number of jobs in queue in the scheduler
+ * @score: score to help loadbalancer pick a idle sched
  * @ready: marks if the underlying HW is ready to work
  * @free_guilty: A hit to time out handler to free the guilty job.
  *
@@ -281,8 +283,8 @@ struct drm_gpu_scheduler {
 	struct list_head		ring_mirror_list;
 	spinlock_t			job_list_lock;
 	int				hang_limit;
-	atomic_t                        num_jobs;
-	bool			ready;
+	atomic_t                        score;
+	bool				ready;
 	bool				free_guilty;
 };
 
@@ -312,8 +314,9 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 				struct drm_sched_entity *entity);
 
 int drm_sched_entity_init(struct drm_sched_entity *entity,
-			  struct drm_sched_rq **rq_list,
-			  unsigned int num_rq_list,
+			  enum drm_sched_priority priority,
+			  struct drm_gpu_scheduler **sched_list,
+			  unsigned int num_sched_list,
 			  atomic_t *guilty);
 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
 void drm_sched_entity_fini(struct drm_sched_entity *entity);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index b1f66b1..1d2c122 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -446,23 +446,18 @@
 
 /* CML GT1 */
 #define INTEL_CML_GT1_IDS(info)	\
-	INTEL_VGA_DEVICE(0x9B21, info), \
-	INTEL_VGA_DEVICE(0x9BAA, info), \
-	INTEL_VGA_DEVICE(0x9BAB, info), \
-	INTEL_VGA_DEVICE(0x9BAC, info), \
-	INTEL_VGA_DEVICE(0x9BA0, info), \
 	INTEL_VGA_DEVICE(0x9BA5, info), \
 	INTEL_VGA_DEVICE(0x9BA8, info), \
 	INTEL_VGA_DEVICE(0x9BA4, info), \
 	INTEL_VGA_DEVICE(0x9BA2, info)
 
+#define INTEL_CML_U_GT1_IDS(info) \
+	INTEL_VGA_DEVICE(0x9B21, info), \
+	INTEL_VGA_DEVICE(0x9BAA, info), \
+	INTEL_VGA_DEVICE(0x9BAC, info)
+
 /* CML GT2 */
 #define INTEL_CML_GT2_IDS(info)	\
-	INTEL_VGA_DEVICE(0x9B41, info), \
-	INTEL_VGA_DEVICE(0x9BCA, info), \
-	INTEL_VGA_DEVICE(0x9BCB, info), \
-	INTEL_VGA_DEVICE(0x9BCC, info), \
-	INTEL_VGA_DEVICE(0x9BC0, info), \
 	INTEL_VGA_DEVICE(0x9BC5, info), \
 	INTEL_VGA_DEVICE(0x9BC8, info), \
 	INTEL_VGA_DEVICE(0x9BC4, info), \
@@ -471,6 +466,11 @@
 	INTEL_VGA_DEVICE(0x9BE6, info), \
 	INTEL_VGA_DEVICE(0x9BF6, info)
 
+#define INTEL_CML_U_GT2_IDS(info) \
+	INTEL_VGA_DEVICE(0x9B41, info), \
+	INTEL_VGA_DEVICE(0x9BCA, info), \
+	INTEL_VGA_DEVICE(0x9BCC, info)
+
 #define INTEL_KBL_IDS(info) \
 	INTEL_KBL_GT1_IDS(info), \
 	INTEL_KBL_GT2_IDS(info), \
@@ -536,7 +536,9 @@
 	INTEL_WHL_U_GT3_IDS(info), \
 	INTEL_AML_CFL_GT2_IDS(info), \
 	INTEL_CML_GT1_IDS(info), \
-	INTEL_CML_GT2_IDS(info)
+	INTEL_CML_GT2_IDS(info), \
+	INTEL_CML_U_GT1_IDS(info), \
+	INTEL_CML_U_GT2_IDS(info)
 
 /* CNL */
 #define INTEL_CNL_PORT_F_IDS(info) \
@@ -579,12 +581,15 @@
 	INTEL_VGA_DEVICE(0x8A51, info), \
 	INTEL_VGA_DEVICE(0x8A5D, info)
 
-/* EHL */
+/* EHL/JSL */
 #define INTEL_EHL_IDS(info) \
 	INTEL_VGA_DEVICE(0x4500, info),	\
 	INTEL_VGA_DEVICE(0x4571, info), \
 	INTEL_VGA_DEVICE(0x4551, info), \
-	INTEL_VGA_DEVICE(0x4541, info)
+	INTEL_VGA_DEVICE(0x4541, info), \
+	INTEL_VGA_DEVICE(0x4E71, info), \
+	INTEL_VGA_DEVICE(0x4E61, info), \
+	INTEL_VGA_DEVICE(0x4E51, info)
 
 /* TGL */
 #define INTEL_TGL_12_IDS(info) \
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
new file mode 100644
index 0000000..087e3f6
--- /dev/null
+++ b/include/drm/task_barrier.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/semaphore.h>
+#include <linux/atomic.h>
+
+/*
+ * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
+ * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ */
+
+
+
+#ifndef DRM_TASK_BARRIER_H_
+#define DRM_TASK_BARRIER_H_
+
+/*
+ * Represents an instance of a task barrier.
+ */
+struct task_barrier {
+	unsigned int n;
+	atomic_t count;
+	struct semaphore enter_turnstile;
+	struct semaphore exit_turnstile;
+};
+
+static inline void task_barrier_signal_turnstile(struct semaphore *turnstile,
+						 unsigned int n)
+{
+	int i;
+
+	for (i = 0 ; i < n; i++)
+		up(turnstile);
+}
+
+static inline void task_barrier_init(struct task_barrier *tb)
+{
+	tb->n = 0;
+	atomic_set(&tb->count, 0);
+	sema_init(&tb->enter_turnstile, 0);
+	sema_init(&tb->exit_turnstile, 0);
+}
+
+static inline void task_barrier_add_task(struct task_barrier *tb)
+{
+	tb->n++;
+}
+
+static inline void task_barrier_rem_task(struct task_barrier *tb)
+{
+	tb->n--;
+}
+
+/*
+ * Lines up all the threads BEFORE the critical point.
+ *
+ * When all thread passed this code the entry barrier is back to locked state.
+ */
+static inline void task_barrier_enter(struct task_barrier *tb)
+{
+	if (atomic_inc_return(&tb->count) == tb->n)
+		task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n);
+
+	down(&tb->enter_turnstile);
+}
+
+/*
+ * Lines up all the threads AFTER the critical point.
+ *
+ * This function is used to avoid any one thread running ahead if the barrier is
+ *  used repeatedly .
+ */
+static inline void task_barrier_exit(struct task_barrier *tb)
+{
+	if (atomic_dec_return(&tb->count) == 0)
+		task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n);
+
+	down(&tb->exit_turnstile);
+}
+
+/* Convinieince function when nothing to be done in between entry and exit */
+static inline void task_barrier_full(struct task_barrier *tb)
+{
+	task_barrier_enter(tb);
+	task_barrier_exit(tb);
+}
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 65e399d..66ca49d 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -154,7 +154,6 @@ struct ttm_tt;
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
- * @wu_mutex: Wait unreserved mutex.
  *
  * Base class for TTM buffer object, that deals with data placement and CPU
  * mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -222,8 +221,6 @@ struct ttm_buffer_object {
 	uint64_t offset; /* GPU address space is independent of CPU word size */
 
 	struct sg_table *sg;
-
-	struct mutex wu_mutex;
 };
 
 /**
@@ -707,7 +704,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
 int ttm_bo_swapout(struct ttm_bo_global *glob,
 			struct ttm_operation_ctx *ctx);
 void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
 
 /**
  * ttm_bo_uses_embedded_gem_object - check if the given bo uses the
@@ -738,7 +734,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 				    pgprot_t prot,
 				    pgoff_t num_prefault);
 
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
+
 void ttm_bo_vm_open(struct vm_area_struct *vma);
 
 void ttm_bo_vm_close(struct vm_area_struct *vma);
+
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+		     void *buf, int len, int write);
+
 #endif
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 72f2e84..8cec5a1 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -29,6 +29,16 @@
 #define DRA7_RTC_CLKCTRL_INDEX(offset)	((offset) - DRA7_RTC_CLKCTRL_OFFSET)
 #define DRA7_RTCSS_CLKCTRL	DRA7_RTC_CLKCTRL_INDEX(0x44)
 
+/* vip clocks */
+#define DRA7_VIP1_CLKCTRL	DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_VIP2_CLKCTRL	DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_VIP3_CLKCTRL	DRA7_CLKCTRL_INDEX(0x30)
+
+/* vpe clocks */
+#define DRA7_VPE_CLKCTRL_OFFSET	0x60
+#define DRA7_VPE_CLKCTRL_INDEX(offset)	((offset) - DRA7_VPE_CLKCTRL_OFFSET)
+#define DRA7_VPE_CLKCTRL	DRA7_VPE_CLKCTRL_INDEX(0x64)
+
 /* coreaon clocks */
 #define DRA7_SMARTREFLEX_MPU_CLKCTRL	DRA7_CLKCTRL_INDEX(0x28)
 #define DRA7_SMARTREFLEX_CORE_CLKCTRL	DRA7_CLKCTRL_INDEX(0x38)
@@ -78,6 +88,9 @@
 #define DRA7_DSS_CORE_CLKCTRL	DRA7_CLKCTRL_INDEX(0x20)
 #define DRA7_BB2D_CLKCTRL	DRA7_CLKCTRL_INDEX(0x30)
 
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL	DRA7_CLKCTRL_INDEX(0x20)
+
 /* l3init clocks */
 #define DRA7_MMC1_CLKCTRL	DRA7_CLKCTRL_INDEX(0x28)
 #define DRA7_MMC2_CLKCTRL	DRA7_CLKCTRL_INDEX(0x30)
@@ -192,6 +205,16 @@
 /* rtc clocks */
 #define DRA7_RTC_RTCSS_CLKCTRL	DRA7_CLKCTRL_INDEX(0x44)
 
+/* vip clocks */
+#define DRA7_CAM_VIP1_CLKCTRL	DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_CAM_VIP2_CLKCTRL	DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_CAM_VIP3_CLKCTRL	DRA7_CLKCTRL_INDEX(0x30)
+
+/* vpe clocks */
+#define DRA7_VPE_CLKCTRL_OFFSET	0x60
+#define DRA7_VPE_CLKCTRL_INDEX(offset)	((offset) - DRA7_VPE_CLKCTRL_OFFSET)
+#define DRA7_VPE_VPE_CLKCTRL	DRA7_VPE_CLKCTRL_INDEX(0x64)
+
 /* coreaon clocks */
 #define DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL	DRA7_CLKCTRL_INDEX(0x28)
 #define DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL	DRA7_CLKCTRL_INDEX(0x38)
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
new file mode 100644
index 0000000..2fab631
--- /dev/null
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8MP_H
+#define __DT_BINDINGS_CLOCK_IMX8MP_H
+
+#define IMX8MP_CLK_DUMMY			0
+#define IMX8MP_CLK_32K				1
+#define IMX8MP_CLK_24M				2
+#define IMX8MP_OSC_HDMI_CLK			3
+#define IMX8MP_CLK_EXT1				4
+#define IMX8MP_CLK_EXT2				5
+#define IMX8MP_CLK_EXT3				6
+#define IMX8MP_CLK_EXT4				7
+#define IMX8MP_AUDIO_PLL1_REF_SEL		8
+#define IMX8MP_AUDIO_PLL2_REF_SEL		9
+#define IMX8MP_VIDEO_PLL1_REF_SEL		10
+#define IMX8MP_DRAM_PLL_REF_SEL			11
+#define IMX8MP_GPU_PLL_REF_SEL			12
+#define IMX8MP_VPU_PLL_REF_SEL			13
+#define IMX8MP_ARM_PLL_REF_SEL			14
+#define IMX8MP_SYS_PLL1_REF_SEL			15
+#define IMX8MP_SYS_PLL2_REF_SEL			16
+#define IMX8MP_SYS_PLL3_REF_SEL			17
+#define IMX8MP_AUDIO_PLL1			18
+#define IMX8MP_AUDIO_PLL2			19
+#define IMX8MP_VIDEO_PLL1			20
+#define IMX8MP_DRAM_PLL				21
+#define IMX8MP_GPU_PLL				22
+#define IMX8MP_VPU_PLL				23
+#define IMX8MP_ARM_PLL				24
+#define IMX8MP_SYS_PLL1				25
+#define IMX8MP_SYS_PLL2				26
+#define IMX8MP_SYS_PLL3				27
+#define IMX8MP_AUDIO_PLL1_BYPASS		28
+#define IMX8MP_AUDIO_PLL2_BYPASS		29
+#define IMX8MP_VIDEO_PLL1_BYPASS		30
+#define IMX8MP_DRAM_PLL_BYPASS			31
+#define IMX8MP_GPU_PLL_BYPASS			32
+#define IMX8MP_VPU_PLL_BYPASS			33
+#define IMX8MP_ARM_PLL_BYPASS			34
+#define IMX8MP_SYS_PLL1_BYPASS			35
+#define IMX8MP_SYS_PLL2_BYPASS			36
+#define IMX8MP_SYS_PLL3_BYPASS			37
+#define IMX8MP_AUDIO_PLL1_OUT			38
+#define IMX8MP_AUDIO_PLL2_OUT			39
+#define IMX8MP_VIDEO_PLL1_OUT			40
+#define IMX8MP_DRAM_PLL_OUT			41
+#define IMX8MP_GPU_PLL_OUT			42
+#define IMX8MP_VPU_PLL_OUT			43
+#define IMX8MP_ARM_PLL_OUT			44
+#define IMX8MP_SYS_PLL1_OUT			45
+#define IMX8MP_SYS_PLL2_OUT			46
+#define IMX8MP_SYS_PLL3_OUT			47
+#define IMX8MP_SYS_PLL1_40M			48
+#define IMX8MP_SYS_PLL1_80M			49
+#define IMX8MP_SYS_PLL1_100M			50
+#define IMX8MP_SYS_PLL1_133M			51
+#define IMX8MP_SYS_PLL1_160M			52
+#define IMX8MP_SYS_PLL1_200M			53
+#define IMX8MP_SYS_PLL1_266M			54
+#define IMX8MP_SYS_PLL1_400M			55
+#define IMX8MP_SYS_PLL1_800M			56
+#define IMX8MP_SYS_PLL2_50M			57
+#define IMX8MP_SYS_PLL2_100M			58
+#define IMX8MP_SYS_PLL2_125M			59
+#define IMX8MP_SYS_PLL2_166M			60
+#define IMX8MP_SYS_PLL2_200M			61
+#define IMX8MP_SYS_PLL2_250M			62
+#define IMX8MP_SYS_PLL2_333M			63
+#define IMX8MP_SYS_PLL2_500M			64
+#define IMX8MP_SYS_PLL2_1000M			65
+#define IMX8MP_CLK_A53_SRC			66
+#define IMX8MP_CLK_M7_SRC			67
+#define IMX8MP_CLK_ML_SRC			68
+#define IMX8MP_CLK_GPU3D_CORE_SRC		69
+#define IMX8MP_CLK_GPU3D_SHADER_SRC		70
+#define IMX8MP_CLK_GPU2D_SRC			71
+#define IMX8MP_CLK_AUDIO_AXI_SRC		72
+#define IMX8MP_CLK_HSIO_AXI_SRC			73
+#define IMX8MP_CLK_MEDIA_ISP_SRC		74
+#define IMX8MP_CLK_A53_CG			75
+#define IMX8MP_CLK_M4_CG			76
+#define IMX8MP_CLK_ML_CG			77
+#define IMX8MP_CLK_GPU3D_CORE_CG		78
+#define IMX8MP_CLK_GPU3D_SHADER_CG		79
+#define IMX8MP_CLK_GPU2D_CG			80
+#define IMX8MP_CLK_AUDIO_AXI_CG			81
+#define IMX8MP_CLK_HSIO_AXI_CG			82
+#define IMX8MP_CLK_MEDIA_ISP_CG			83
+#define IMX8MP_CLK_A53_DIV			84
+#define IMX8MP_CLK_M7_DIV			85
+#define IMX8MP_CLK_ML_DIV			86
+#define IMX8MP_CLK_GPU3D_CORE_DIV		87
+#define IMX8MP_CLK_GPU3D_SHADER_DIV		88
+#define IMX8MP_CLK_GPU2D_DIV			89
+#define IMX8MP_CLK_AUDIO_AXI_DIV		90
+#define IMX8MP_CLK_HSIO_AXI_DIV			91
+#define IMX8MP_CLK_MEDIA_ISP_DIV		92
+#define IMX8MP_CLK_MAIN_AXI			93
+#define IMX8MP_CLK_ENET_AXI			94
+#define IMX8MP_CLK_NAND_USDHC_BUS		95
+#define IMX8MP_CLK_VPU_BUS			96
+#define IMX8MP_CLK_MEDIA_AXI			97
+#define IMX8MP_CLK_MEDIA_APB			98
+#define IMX8MP_CLK_HDMI_APB			99
+#define IMX8MP_CLK_HDMI_AXI			100
+#define IMX8MP_CLK_GPU_AXI			101
+#define IMX8MP_CLK_GPU_AHB			102
+#define IMX8MP_CLK_NOC				103
+#define IMX8MP_CLK_NOC_IO			104
+#define IMX8MP_CLK_ML_AXI			105
+#define IMX8MP_CLK_ML_AHB			106
+#define IMX8MP_CLK_AHB				107
+#define IMX8MP_CLK_AUDIO_AHB			108
+#define IMX8MP_CLK_MIPI_DSI_ESC_RX		109
+#define IMX8MP_CLK_IPG_ROOT			110
+#define IMX8MP_CLK_IPG_AUDIO_ROOT		111
+#define IMX8MP_CLK_DRAM_ALT			112
+#define IMX8MP_CLK_DRAM_APB			113
+#define IMX8MP_CLK_VPU_G1			114
+#define IMX8MP_CLK_VPU_G2			115
+#define IMX8MP_CLK_CAN1				116
+#define IMX8MP_CLK_CAN2				117
+#define IMX8MP_CLK_MEMREPAIR			118
+#define IMX8MP_CLK_PCIE_PHY			119
+#define IMX8MP_CLK_PCIE_AUX			120
+#define IMX8MP_CLK_I2C5				121
+#define IMX8MP_CLK_I2C6				122
+#define IMX8MP_CLK_SAI1				123
+#define IMX8MP_CLK_SAI2				124
+#define IMX8MP_CLK_SAI3				125
+#define IMX8MP_CLK_SAI4				126
+#define IMX8MP_CLK_SAI5				127
+#define IMX8MP_CLK_SAI6				128
+#define IMX8MP_CLK_ENET_QOS			129
+#define IMX8MP_CLK_ENET_QOS_TIMER		130
+#define IMX8MP_CLK_ENET_REF			131
+#define IMX8MP_CLK_ENET_TIMER			132
+#define IMX8MP_CLK_ENET_PHY_REF			133
+#define IMX8MP_CLK_NAND				134
+#define IMX8MP_CLK_QSPI				135
+#define IMX8MP_CLK_USDHC1			136
+#define IMX8MP_CLK_USDHC2			137
+#define IMX8MP_CLK_I2C1				138
+#define IMX8MP_CLK_I2C2				139
+#define IMX8MP_CLK_I2C3				140
+#define IMX8MP_CLK_I2C4				141
+#define IMX8MP_CLK_UART1			142
+#define IMX8MP_CLK_UART2			143
+#define IMX8MP_CLK_UART3			144
+#define IMX8MP_CLK_UART4			145
+#define IMX8MP_CLK_USB_CORE_REF			146
+#define IMX8MP_CLK_USB_PHY_REF			147
+#define IMX8MP_CLK_GIC				148
+#define IMX8MP_CLK_ECSPI1			149
+#define IMX8MP_CLK_ECSPI2			150
+#define IMX8MP_CLK_PWM1				151
+#define IMX8MP_CLK_PWM2				152
+#define IMX8MP_CLK_PWM3				153
+#define IMX8MP_CLK_PWM4				154
+#define IMX8MP_CLK_GPT1				155
+#define IMX8MP_CLK_GPT2				156
+#define IMX8MP_CLK_GPT3				157
+#define IMX8MP_CLK_GPT4				158
+#define IMX8MP_CLK_GPT5				159
+#define IMX8MP_CLK_GPT6				160
+#define IMX8MP_CLK_TRACE			161
+#define IMX8MP_CLK_WDOG				162
+#define IMX8MP_CLK_WRCLK			163
+#define IMX8MP_CLK_IPP_DO_CLKO1			164
+#define IMX8MP_CLK_IPP_DO_CLKO2			165
+#define IMX8MP_CLK_HDMI_FDCC_TST		166
+#define IMX8MP_CLK_HDMI_27M			167
+#define IMX8MP_CLK_HDMI_REF_266M		168
+#define IMX8MP_CLK_USDHC3			169
+#define IMX8MP_CLK_MEDIA_CAM1_PIX		170
+#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF		171
+#define IMX8MP_CLK_MEDIA_DISP1_PIX		172
+#define IMX8MP_CLK_MEDIA_CAM2_PIX		173
+#define IMX8MP_CLK_MEDIA_MIPI_PHY2_REF		174
+#define IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC		175
+#define IMX8MP_CLK_PCIE2_CTRL			176
+#define IMX8MP_CLK_PCIE2_PHY			177
+#define IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE		178
+#define IMX8MP_CLK_ECSPI3			179
+#define IMX8MP_CLK_PDM				180
+#define IMX8MP_CLK_VPU_VC8000E			181
+#define IMX8MP_CLK_SAI7				182
+#define IMX8MP_CLK_GPC_ROOT			183
+#define IMX8MP_CLK_ANAMIX_ROOT			184
+#define IMX8MP_CLK_CPU_ROOT			185
+#define IMX8MP_CLK_CSU_ROOT			186
+#define IMX8MP_CLK_DEBUG_ROOT			187
+#define IMX8MP_CLK_DRAM1_ROOT			188
+#define IMX8MP_CLK_ECSPI1_ROOT			189
+#define IMX8MP_CLK_ECSPI2_ROOT			190
+#define IMX8MP_CLK_ECSPI3_ROOT			191
+#define IMX8MP_CLK_ENET1_ROOT			192
+#define IMX8MP_CLK_GPIO1_ROOT			193
+#define IMX8MP_CLK_GPIO2_ROOT			194
+#define IMX8MP_CLK_GPIO3_ROOT			195
+#define IMX8MP_CLK_GPIO4_ROOT			196
+#define IMX8MP_CLK_GPIO5_ROOT			197
+#define IMX8MP_CLK_GPT1_ROOT			198
+#define IMX8MP_CLK_GPT2_ROOT			199
+#define IMX8MP_CLK_GPT3_ROOT			200
+#define IMX8MP_CLK_GPT4_ROOT			201
+#define IMX8MP_CLK_GPT5_ROOT			202
+#define IMX8MP_CLK_GPT6_ROOT			203
+#define IMX8MP_CLK_HS_ROOT			204
+#define IMX8MP_CLK_I2C1_ROOT			205
+#define IMX8MP_CLK_I2C2_ROOT			206
+#define IMX8MP_CLK_I2C3_ROOT			207
+#define IMX8MP_CLK_I2C4_ROOT			208
+#define IMX8MP_CLK_IOMUX_ROOT			209
+#define IMX8MP_CLK_IPMUX1_ROOT			210
+#define IMX8MP_CLK_IPMUX2_ROOT			211
+#define IMX8MP_CLK_IPMUX3_ROOT			212
+#define IMX8MP_CLK_MU_ROOT			213
+#define IMX8MP_CLK_OCOTP_ROOT			214
+#define IMX8MP_CLK_OCRAM_ROOT			215
+#define IMX8MP_CLK_OCRAM_S_ROOT			216
+#define IMX8MP_CLK_PCIE_ROOT			217
+#define IMX8MP_CLK_PERFMON1_ROOT		218
+#define IMX8MP_CLK_PERFMON2_ROOT		219
+#define IMX8MP_CLK_PWM1_ROOT			220
+#define IMX8MP_CLK_PWM2_ROOT			221
+#define IMX8MP_CLK_PWM3_ROOT			222
+#define IMX8MP_CLK_PWM4_ROOT			223
+#define IMX8MP_CLK_QOS_ROOT			224
+#define IMX8MP_CLK_QOS_ENET_ROOT		225
+#define IMX8MP_CLK_QSPI_ROOT			226
+#define IMX8MP_CLK_NAND_ROOT			227
+#define IMX8MP_CLK_NAND_USDHC_BUS_RAWNAND_CLK	228
+#define IMX8MP_CLK_RDC_ROOT			229
+#define IMX8MP_CLK_ROM_ROOT			230
+#define IMX8MP_CLK_I2C5_ROOT			231
+#define IMX8MP_CLK_I2C6_ROOT			232
+#define IMX8MP_CLK_CAN1_ROOT			233
+#define IMX8MP_CLK_CAN2_ROOT			234
+#define IMX8MP_CLK_SCTR_ROOT			235
+#define IMX8MP_CLK_SDMA1_ROOT			236
+#define IMX8MP_CLK_ENET_QOS_ROOT		237
+#define IMX8MP_CLK_SEC_DEBUG_ROOT		238
+#define IMX8MP_CLK_SEMA1_ROOT			239
+#define IMX8MP_CLK_SEMA2_ROOT			240
+#define IMX8MP_CLK_IRQ_STEER_ROOT		241
+#define IMX8MP_CLK_SIM_ENET_ROOT		242
+#define IMX8MP_CLK_SIM_M_ROOT			243
+#define IMX8MP_CLK_SIM_MAIN_ROOT		244
+#define IMX8MP_CLK_SIM_S_ROOT			245
+#define IMX8MP_CLK_SIM_WAKEUP_ROOT		246
+#define IMX8MP_CLK_GPU2D_ROOT			247
+#define IMX8MP_CLK_GPU3D_ROOT			248
+#define IMX8MP_CLK_SNVS_ROOT			249
+#define IMX8MP_CLK_TRACE_ROOT			250
+#define IMX8MP_CLK_UART1_ROOT			251
+#define IMX8MP_CLK_UART2_ROOT			252
+#define IMX8MP_CLK_UART3_ROOT			253
+#define IMX8MP_CLK_UART4_ROOT			254
+#define IMX8MP_CLK_USB_ROOT			255
+#define IMX8MP_CLK_USB_PHY_ROOT			256
+#define IMX8MP_CLK_USDHC1_ROOT			257
+#define IMX8MP_CLK_USDHC2_ROOT			258
+#define IMX8MP_CLK_WDOG1_ROOT			259
+#define IMX8MP_CLK_WDOG2_ROOT			260
+#define IMX8MP_CLK_WDOG3_ROOT			261
+#define IMX8MP_CLK_VPU_G1_ROOT			262
+#define IMX8MP_CLK_GPU_ROOT			263
+#define IMX8MP_CLK_NOC_WRAPPER_ROOT		264
+#define IMX8MP_CLK_VPU_VC8KE_ROOT		265
+#define IMX8MP_CLK_VPU_G2_ROOT			266
+#define IMX8MP_CLK_NPU_ROOT			267
+#define IMX8MP_CLK_HSIO_ROOT			268
+#define IMX8MP_CLK_MEDIA_APB_ROOT		269
+#define IMX8MP_CLK_MEDIA_AXI_ROOT		270
+#define IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT		271
+#define IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT		272
+#define IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT		273
+#define IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT		274
+#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT	275
+#define IMX8MP_CLK_MEDIA_ISP_ROOT		276
+#define IMX8MP_CLK_USDHC3_ROOT			277
+#define IMX8MP_CLK_HDMI_ROOT			278
+#define IMX8MP_CLK_XTAL_ROOT			279
+#define IMX8MP_CLK_PLL_ROOT			280
+#define IMX8MP_CLK_TSENSOR_ROOT			281
+#define IMX8MP_CLK_VPU_ROOT			282
+#define IMX8MP_CLK_MRPR_ROOT			283
+#define IMX8MP_CLK_AUDIO_ROOT			284
+#define IMX8MP_CLK_DRAM_ALT_ROOT		285
+#define IMX8MP_CLK_DRAM_CORE			286
+#define IMX8MP_CLK_ARM				287
+
+#define IMX8MP_CLK_END				288
+
+#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index e785c6e..4b1a772 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -72,6 +72,8 @@
 #define MMP2_CLK_CCIC1_PHY		118
 #define MMP2_CLK_CCIC1_SPHY		119
 #define MMP2_CLK_DISP0_LCDC		120
+#define MMP2_CLK_USBHSIC0		121
+#define MMP2_CLK_USBHSIC1		122
 
 #define MMP2_NR_CLKS			200
 #endif
diff --git a/include/dt-bindings/clock/meson8-ddr-clkc.h b/include/dt-bindings/clock/meson8-ddr-clkc.h
new file mode 100644
index 0000000..a8e0fa29
--- /dev/null
+++ b/include/dt-bindings/clock/meson8-ddr-clkc.h
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define DDR_CLKID_DDR_PLL_DCO			0
+#define DDR_CLKID_DDR_PLL			1
diff --git a/include/dt-bindings/clock/omap4.h b/include/dt-bindings/clock/omap4.h
index 5167b2d..88d73be 100644
--- a/include/dt-bindings/clock/omap4.h
+++ b/include/dt-bindings/clock/omap4.h
@@ -124,6 +124,17 @@
 #define OMAP4_UART4_CLKCTRL	OMAP4_CLKCTRL_INDEX(0x158)
 #define OMAP4_MMC5_CLKCTRL	OMAP4_CLKCTRL_INDEX(0x160)
 
+/* l4_secure clocks */
+#define OMAP4_L4_SECURE_CLKCTRL_OFFSET	0x1a0
+#define OMAP4_L4_SECURE_CLKCTRL_INDEX(offset)	((offset) - OMAP4_L4_SECURE_CLKCTRL_OFFSET)
+#define OMAP4_AES1_CLKCTRL	OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1a0)
+#define OMAP4_AES2_CLKCTRL	OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1a8)
+#define OMAP4_DES3DES_CLKCTRL	OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1b0)
+#define OMAP4_PKA_CLKCTRL	OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1b8)
+#define OMAP4_RNG_CLKCTRL	OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1c0)
+#define OMAP4_SHA2MD5_CLKCTRL	OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1c8)
+#define OMAP4_CRYPTODMA_CLKCTRL	OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1d8)
+
 /* l4_wkup clocks */
 #define OMAP4_L4_WKUP_CLKCTRL	OMAP4_CLKCTRL_INDEX(0x20)
 #define OMAP4_WD_TIMER2_CLKCTRL	OMAP4_CLKCTRL_INDEX(0x30)
diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h
index ba67206..4177527 100644
--- a/include/dt-bindings/clock/omap5.h
+++ b/include/dt-bindings/clock/omap5.h
@@ -16,6 +16,7 @@
 
 /* abe clocks */
 #define OMAP5_L4_ABE_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_AESS_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x28)
 #define OMAP5_MCPDM_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x30)
 #define OMAP5_DMIC_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x38)
 #define OMAP5_MCBSP1_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x48)
@@ -86,6 +87,17 @@
 #define OMAP5_UART5_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x170)
 #define OMAP5_UART6_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x178)
 
+/* l4_secure clocks */
+#define OMAP5_L4_SECURE_CLKCTRL_OFFSET	0x1a0
+#define OMAP5_L4_SECURE_CLKCTRL_INDEX(offset)	((offset) - OMAP5_L4_SECURE_CLKCTRL_OFFSET)
+#define OMAP5_AES1_CLKCTRL	OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1a0)
+#define OMAP5_AES2_CLKCTRL	OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1a8)
+#define OMAP5_DES3DES_CLKCTRL	OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1b0)
+#define OMAP5_FPKA_CLKCTRL	OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1b8)
+#define OMAP5_RNG_CLKCTRL	OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1c0)
+#define OMAP5_SHA2MD5_CLKCTRL	OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1c8)
+#define OMAP5_DMA_CRYPTO_CLKCTRL	OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1d8)
+
 /* iva clocks */
 #define OMAP5_IVA_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x20)
 #define OMAP5_SL2IF_CLKCTRL	OMAP5_CLKCTRL_INDEX(0x28)
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7180.h b/include/dt-bindings/clock/qcom,dispcc-sc7180.h
new file mode 100644
index 0000000..b9b51617
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sc7180.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7180_H
+
+#define DISP_CC_PLL0				0
+#define DISP_CC_PLL0_OUT_EVEN			1
+#define DISP_CC_MDSS_AHB_CLK			2
+#define DISP_CC_MDSS_AHB_CLK_SRC		3
+#define DISP_CC_MDSS_BYTE0_CLK			4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC		5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC		6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK		7
+#define DISP_CC_MDSS_DP_AUX_CLK			8
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC		9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK		10
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC		11
+#define DISP_CC_MDSS_DP_LINK_CLK		12
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC		13
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC	14
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK		15
+#define DISP_CC_MDSS_DP_PIXEL_CLK		16
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC		17
+#define DISP_CC_MDSS_ESC0_CLK			18
+#define DISP_CC_MDSS_ESC0_CLK_SRC		19
+#define DISP_CC_MDSS_MDP_CLK			20
+#define DISP_CC_MDSS_MDP_CLK_SRC		21
+#define DISP_CC_MDSS_MDP_LUT_CLK		22
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK		23
+#define DISP_CC_MDSS_PCLK0_CLK			24
+#define DISP_CC_MDSS_PCLK0_CLK_SRC		25
+#define DISP_CC_MDSS_ROT_CLK			26
+#define DISP_CC_MDSS_ROT_CLK_SRC		27
+#define DISP_CC_MDSS_RSCC_AHB_CLK		28
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK		29
+#define DISP_CC_MDSS_VSYNC_CLK			30
+#define DISP_CC_MDSS_VSYNC_CLK_SRC		31
+#define DISP_CC_XO_CLK				32
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC				0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 11eed4b..4016fd1 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
@@ -35,6 +35,17 @@
 #define DISP_CC_PLL0						25
 #define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				26
 #define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				27
+#define DISP_CC_MDSS_DP_AUX_CLK					28
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC				29
+#define DISP_CC_MDSS_DP_CRYPTO_CLK				30
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC				31
+#define DISP_CC_MDSS_DP_LINK_CLK				32
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC				33
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK				34
+#define DISP_CC_MDSS_DP_PIXEL1_CLK				35
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC				36
+#define DISP_CC_MDSS_DP_PIXEL_CLK				37
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC				38
 
 /* DISP_CC Reset */
 #define DISP_CC_MDSS_RSCC_BCR					0
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq6018.h b/include/dt-bindings/clock/qcom,gcc-ipq6018.h
new file mode 100644
index 0000000..6f4be3a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq6018.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_6018_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_6018_H
+
+#define GPLL0					0
+#define UBI32_PLL				1
+#define GPLL6					2
+#define GPLL4					3
+#define PCNOC_BFDCD_CLK_SRC			4
+#define GPLL2					5
+#define NSS_CRYPTO_PLL				6
+#define NSS_PPE_CLK_SRC				7
+#define GCC_XO_CLK_SRC				8
+#define NSS_CE_CLK_SRC				9
+#define GCC_SLEEP_CLK_SRC			10
+#define APSS_AHB_CLK_SRC			11
+#define NSS_PORT5_RX_CLK_SRC			12
+#define NSS_PORT5_TX_CLK_SRC			13
+#define PCIE0_AXI_CLK_SRC			14
+#define USB0_MASTER_CLK_SRC			15
+#define APSS_AHB_POSTDIV_CLK_SRC		16
+#define NSS_PORT1_RX_CLK_SRC			17
+#define NSS_PORT1_TX_CLK_SRC			18
+#define NSS_PORT2_RX_CLK_SRC			19
+#define NSS_PORT2_TX_CLK_SRC			20
+#define NSS_PORT3_RX_CLK_SRC			21
+#define NSS_PORT3_TX_CLK_SRC			22
+#define NSS_PORT4_RX_CLK_SRC			23
+#define NSS_PORT4_TX_CLK_SRC			24
+#define NSS_PORT5_RX_DIV_CLK_SRC		25
+#define NSS_PORT5_TX_DIV_CLK_SRC		26
+#define APSS_AXI_CLK_SRC			27
+#define NSS_CRYPTO_CLK_SRC			28
+#define NSS_PORT1_RX_DIV_CLK_SRC		29
+#define NSS_PORT1_TX_DIV_CLK_SRC		30
+#define NSS_PORT2_RX_DIV_CLK_SRC		31
+#define NSS_PORT2_TX_DIV_CLK_SRC		32
+#define NSS_PORT3_RX_DIV_CLK_SRC		33
+#define NSS_PORT3_TX_DIV_CLK_SRC		34
+#define NSS_PORT4_RX_DIV_CLK_SRC		35
+#define NSS_PORT4_TX_DIV_CLK_SRC		36
+#define NSS_UBI0_CLK_SRC			37
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC		38
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC		39
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC		40
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC		41
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC		42
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC		43
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC		44
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC		45
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC		46
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC		47
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC		48
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC		49
+#define BLSP1_UART1_APPS_CLK_SRC		50
+#define BLSP1_UART2_APPS_CLK_SRC		51
+#define BLSP1_UART3_APPS_CLK_SRC		52
+#define BLSP1_UART4_APPS_CLK_SRC		53
+#define BLSP1_UART5_APPS_CLK_SRC		54
+#define BLSP1_UART6_APPS_CLK_SRC		55
+#define CRYPTO_CLK_SRC				56
+#define NSS_UBI0_DIV_CLK_SRC			57
+#define PCIE0_AUX_CLK_SRC			58
+#define PCIE0_PIPE_CLK_SRC			59
+#define SDCC1_APPS_CLK_SRC			60
+#define USB0_AUX_CLK_SRC			61
+#define USB0_MOCK_UTMI_CLK_SRC			62
+#define USB0_PIPE_CLK_SRC			63
+#define USB1_MOCK_UTMI_CLK_SRC			64
+#define GCC_APSS_AHB_CLK			65
+#define GCC_APSS_AXI_CLK			66
+#define GCC_BLSP1_AHB_CLK			67
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK		68
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK		69
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK		70
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK		71
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK		72
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK		73
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK		74
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK		75
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK		76
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK		77
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK		78
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK		79
+#define GCC_BLSP1_UART1_APPS_CLK		80
+#define GCC_BLSP1_UART2_APPS_CLK		81
+#define GCC_BLSP1_UART3_APPS_CLK		82
+#define GCC_BLSP1_UART4_APPS_CLK		83
+#define GCC_BLSP1_UART5_APPS_CLK		84
+#define GCC_BLSP1_UART6_APPS_CLK		85
+#define GCC_CRYPTO_AHB_CLK			86
+#define GCC_CRYPTO_AXI_CLK			87
+#define GCC_CRYPTO_CLK				88
+#define GCC_XO_CLK				89
+#define GCC_XO_DIV4_CLK				90
+#define GCC_MDIO_AHB_CLK			91
+#define GCC_CRYPTO_PPE_CLK			92
+#define GCC_NSS_CE_APB_CLK			93
+#define GCC_NSS_CE_AXI_CLK			94
+#define GCC_NSS_CFG_CLK				95
+#define GCC_NSS_CRYPTO_CLK			96
+#define GCC_NSS_CSR_CLK				97
+#define GCC_NSS_EDMA_CFG_CLK			98
+#define GCC_NSS_EDMA_CLK			99
+#define GCC_NSS_NOC_CLK				100
+#define GCC_NSS_PORT1_RX_CLK			101
+#define GCC_NSS_PORT1_TX_CLK			102
+#define GCC_NSS_PORT2_RX_CLK			103
+#define GCC_NSS_PORT2_TX_CLK			104
+#define GCC_NSS_PORT3_RX_CLK			105
+#define GCC_NSS_PORT3_TX_CLK			106
+#define GCC_NSS_PORT4_RX_CLK			107
+#define GCC_NSS_PORT4_TX_CLK			108
+#define GCC_NSS_PORT5_RX_CLK			109
+#define GCC_NSS_PORT5_TX_CLK			110
+#define GCC_NSS_PPE_CFG_CLK			111
+#define GCC_NSS_PPE_CLK				112
+#define GCC_NSS_PPE_IPE_CLK			113
+#define GCC_NSS_PTP_REF_CLK			114
+#define GCC_NSSNOC_CE_APB_CLK			115
+#define GCC_NSSNOC_CE_AXI_CLK			116
+#define GCC_NSSNOC_CRYPTO_CLK			117
+#define GCC_NSSNOC_PPE_CFG_CLK			118
+#define GCC_NSSNOC_PPE_CLK			119
+#define GCC_NSSNOC_QOSGEN_REF_CLK		120
+#define GCC_NSSNOC_TIMEOUT_REF_CLK		121
+#define GCC_NSSNOC_UBI0_AHB_CLK			122
+#define GCC_PORT1_MAC_CLK			123
+#define GCC_PORT2_MAC_CLK			124
+#define GCC_PORT3_MAC_CLK			125
+#define GCC_PORT4_MAC_CLK			126
+#define GCC_PORT5_MAC_CLK			127
+#define GCC_UBI0_AHB_CLK			128
+#define GCC_UBI0_AXI_CLK			129
+#define GCC_UBI0_CORE_CLK			130
+#define GCC_PCIE0_AHB_CLK			131
+#define GCC_PCIE0_AUX_CLK			132
+#define GCC_PCIE0_AXI_M_CLK			133
+#define GCC_PCIE0_AXI_S_CLK			134
+#define GCC_PCIE0_PIPE_CLK			135
+#define GCC_PRNG_AHB_CLK			136
+#define GCC_QPIC_AHB_CLK			137
+#define GCC_QPIC_CLK				138
+#define GCC_SDCC1_AHB_CLK			139
+#define GCC_SDCC1_APPS_CLK			140
+#define GCC_UNIPHY0_AHB_CLK			141
+#define GCC_UNIPHY0_PORT1_RX_CLK		142
+#define GCC_UNIPHY0_PORT1_TX_CLK		143
+#define GCC_UNIPHY0_PORT2_RX_CLK		144
+#define GCC_UNIPHY0_PORT2_TX_CLK		145
+#define GCC_UNIPHY0_PORT3_RX_CLK		146
+#define GCC_UNIPHY0_PORT3_TX_CLK		147
+#define GCC_UNIPHY0_PORT4_RX_CLK		148
+#define GCC_UNIPHY0_PORT4_TX_CLK		149
+#define GCC_UNIPHY0_PORT5_RX_CLK		150
+#define GCC_UNIPHY0_PORT5_TX_CLK		151
+#define GCC_UNIPHY0_SYS_CLK			152
+#define GCC_UNIPHY1_AHB_CLK			153
+#define GCC_UNIPHY1_PORT5_RX_CLK		154
+#define GCC_UNIPHY1_PORT5_TX_CLK		155
+#define GCC_UNIPHY1_SYS_CLK			156
+#define GCC_USB0_AUX_CLK			157
+#define GCC_USB0_MASTER_CLK			158
+#define GCC_USB0_MOCK_UTMI_CLK			159
+#define GCC_USB0_PHY_CFG_AHB_CLK		160
+#define GCC_USB0_PIPE_CLK			161
+#define GCC_USB0_SLEEP_CLK			162
+#define GCC_USB1_MASTER_CLK			163
+#define GCC_USB1_MOCK_UTMI_CLK			164
+#define GCC_USB1_PHY_CFG_AHB_CLK		165
+#define GCC_USB1_SLEEP_CLK			166
+#define GP1_CLK_SRC				167
+#define GP2_CLK_SRC				168
+#define GP3_CLK_SRC				169
+#define GCC_GP1_CLK				170
+#define GCC_GP2_CLK				171
+#define GCC_GP3_CLK				172
+#define SYSTEM_NOC_BFDCD_CLK_SRC		173
+#define GCC_NSSNOC_SNOC_CLK			174
+#define GCC_UBI0_NC_AXI_CLK			175
+#define GCC_UBI1_NC_AXI_CLK			176
+#define GPLL0_MAIN				177
+#define UBI32_PLL_MAIN				178
+#define GPLL6_MAIN				179
+#define GPLL4_MAIN				180
+#define GPLL2_MAIN				181
+#define NSS_CRYPTO_PLL_MAIN			182
+#define GCC_CMN_12GPLL_AHB_CLK			183
+#define GCC_CMN_12GPLL_SYS_CLK			184
+#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK		185
+#define GCC_SYS_NOC_USB0_AXI_CLK		186
+#define GCC_SYS_NOC_PCIE0_AXI_CLK		187
+#define QDSS_TSCTR_CLK_SRC			188
+#define QDSS_AT_CLK_SRC				189
+#define GCC_QDSS_AT_CLK				190
+#define GCC_QDSS_DAP_CLK			191
+#define ADSS_PWM_CLK_SRC			192
+#define GCC_ADSS_PWM_CLK			193
+#define SDCC1_ICE_CORE_CLK_SRC			194
+#define GCC_SDCC1_ICE_CORE_CLK			195
+#define GCC_DCC_CLK				196
+#define PCIE0_RCHNG_CLK_SRC			197
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK		198
+#define PCIE0_RCHNG_CLK				199
+#define UBI32_MEM_NOC_BFDCD_CLK_SRC		200
+#define WCSS_AHB_CLK_SRC			201
+#define Q6_AXI_CLK_SRC				202
+#define GCC_Q6SS_PCLKDBG_CLK			203
+#define GCC_Q6_TSCTR_1TO2_CLK			204
+#define GCC_WCSS_CORE_TBU_CLK			205
+#define GCC_WCSS_AXI_M_CLK			206
+#define GCC_SYS_NOC_WCSS_AHB_CLK		207
+#define GCC_Q6_AXIM_CLK				208
+#define GCC_Q6SS_ATBM_CLK			209
+#define GCC_WCSS_Q6_TBU_CLK			210
+#define GCC_Q6_AXIM2_CLK			211
+#define GCC_Q6_AHB_CLK				212
+#define GCC_Q6_AHB_S_CLK			213
+#define GCC_WCSS_DBG_IFC_APB_CLK		214
+#define GCC_WCSS_DBG_IFC_ATB_CLK		215
+#define GCC_WCSS_DBG_IFC_NTS_CLK		216
+#define GCC_WCSS_DBG_IFC_DAPBUS_CLK		217
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK		218
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK		219
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK		220
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_CLK		221
+#define GCC_WCSS_ECAHB_CLK			222
+#define GCC_WCSS_ACMT_CLK			223
+#define GCC_WCSS_AHB_S_CLK			224
+#define GCC_RBCPR_WCSS_CLK			225
+#define RBCPR_WCSS_CLK_SRC			226
+#define GCC_RBCPR_WCSS_AHB_CLK			227
+#define GCC_LPASS_CORE_AXIM_CLK			228
+#define GCC_LPASS_SNOC_CFG_CLK			229
+#define GCC_LPASS_Q6_AXIM_CLK			230
+#define GCC_LPASS_Q6_ATBM_AT_CLK		231
+#define GCC_LPASS_Q6_PCLKDBG_CLK		232
+#define GCC_LPASS_Q6SS_TSCTR_1TO2_CLK		233
+#define GCC_LPASS_Q6SS_TRIG_CLK			234
+#define GCC_LPASS_TBU_CLK			235
+#define LPASS_CORE_AXIM_CLK_SRC			236
+#define LPASS_SNOC_CFG_CLK_SRC			237
+#define LPASS_Q6_AXIM_CLK_SRC			238
+#define GCC_PCNOC_LPASS_CLK			239
+#define GCC_UBI0_UTCM_CLK			240
+#define SNOC_NSSNOC_BFDCD_CLK_SRC		241
+#define GCC_SNOC_NSSNOC_CLK			242
+#define GCC_MEM_NOC_Q6_AXI_CLK			243
+#define GCC_MEM_NOC_UBI32_CLK			244
+#define GCC_MEM_NOC_LPASS_CLK			245
+#define GCC_SNOC_LPASS_CFG_CLK			246
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK		247
+#define GCC_QDSS_STM_CLK			248
+#define GCC_QDSS_TRACECLKIN_CLK			249
+#define QDSS_STM_CLK_SRC			250
+#define QDSS_TRACECLKIN_CLK_SRC			251
+#define GCC_NSSNOC_ATB_CLK			252
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index de1d8a1..63e02dc 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -182,6 +182,7 @@
 #define GCC_MSS_GPLL0_DIV_CLK_SRC				173
 #define GCC_MSS_SNOC_AXI_CLK					174
 #define GCC_MSS_MNOC_BIMC_AXI_CLK				175
+#define GCC_BIMC_GFX_CLK					176
 
 #define PCIE_0_GDSC						0
 #define UFS_GDSC						1
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7180.h b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
new file mode 100644
index 0000000..0e4643b
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7180_H
+
+#define GPU_CC_PLL1			0
+#define GPU_CC_AHB_CLK			1
+#define GPU_CC_CRC_AHB_CLK		2
+#define GPU_CC_CX_GMU_CLK		3
+#define GPU_CC_CX_SNOC_DVM_CLK		4
+#define GPU_CC_CXO_AON_CLK		5
+#define GPU_CC_CXO_CLK			6
+#define GPU_CC_GMU_CLK_SRC		7
+
+/* CAM_CC GDSCRs */
+#define CX_GDSC				0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8998.h b/include/dt-bindings/clock/qcom,mmcc-msm8998.h
new file mode 100644
index 0000000..ecbafdb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8998.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8998_H
+#define _DT_BINDINGS_CLK_MSM_MMCC_8998_H
+
+#define MMPLL0						0
+#define MMPLL0_OUT_EVEN					1
+#define MMPLL1						2
+#define MMPLL1_OUT_EVEN					3
+#define MMPLL3						4
+#define MMPLL3_OUT_EVEN					5
+#define MMPLL4						6
+#define MMPLL4_OUT_EVEN					7
+#define MMPLL5						8
+#define MMPLL5_OUT_EVEN					9
+#define MMPLL6						10
+#define MMPLL6_OUT_EVEN					11
+#define MMPLL7						12
+#define MMPLL7_OUT_EVEN					13
+#define MMPLL10						14
+#define MMPLL10_OUT_EVEN				15
+#define BYTE0_CLK_SRC					16
+#define BYTE1_CLK_SRC					17
+#define CCI_CLK_SRC					18
+#define CPP_CLK_SRC					19
+#define CSI0_CLK_SRC					20
+#define CSI1_CLK_SRC					21
+#define CSI2_CLK_SRC					22
+#define CSI3_CLK_SRC					23
+#define CSIPHY_CLK_SRC					24
+#define CSI0PHYTIMER_CLK_SRC				25
+#define CSI1PHYTIMER_CLK_SRC				26
+#define CSI2PHYTIMER_CLK_SRC				27
+#define DP_AUX_CLK_SRC					28
+#define DP_CRYPTO_CLK_SRC				29
+#define DP_LINK_CLK_SRC					30
+#define DP_PIXEL_CLK_SRC				31
+#define ESC0_CLK_SRC					32
+#define ESC1_CLK_SRC					33
+#define EXTPCLK_CLK_SRC					34
+#define FD_CORE_CLK_SRC					35
+#define HDMI_CLK_SRC					36
+#define JPEG0_CLK_SRC					37
+#define MAXI_CLK_SRC					38
+#define MCLK0_CLK_SRC					39
+#define MCLK1_CLK_SRC					40
+#define MCLK2_CLK_SRC					41
+#define MCLK3_CLK_SRC					42
+#define MDP_CLK_SRC					43
+#define VSYNC_CLK_SRC					44
+#define AHB_CLK_SRC					45
+#define AXI_CLK_SRC					46
+#define PCLK0_CLK_SRC					47
+#define PCLK1_CLK_SRC					48
+#define ROT_CLK_SRC					49
+#define VIDEO_CORE_CLK_SRC				50
+#define VIDEO_SUBCORE0_CLK_SRC				51
+#define VIDEO_SUBCORE1_CLK_SRC				52
+#define VFE0_CLK_SRC					53
+#define VFE1_CLK_SRC					54
+#define MISC_AHB_CLK					55
+#define VIDEO_CORE_CLK					56
+#define VIDEO_AHB_CLK					57
+#define VIDEO_AXI_CLK					58
+#define VIDEO_MAXI_CLK					59
+#define VIDEO_SUBCORE0_CLK				60
+#define VIDEO_SUBCORE1_CLK				61
+#define MDSS_AHB_CLK					62
+#define MDSS_HDMI_DP_AHB_CLK				63
+#define MDSS_AXI_CLK					64
+#define MDSS_PCLK0_CLK					65
+#define MDSS_PCLK1_CLK					66
+#define MDSS_MDP_CLK					67
+#define MDSS_MDP_LUT_CLK				68
+#define MDSS_EXTPCLK_CLK				69
+#define MDSS_VSYNC_CLK					70
+#define MDSS_HDMI_CLK					71
+#define MDSS_BYTE0_CLK					72
+#define MDSS_BYTE1_CLK					73
+#define MDSS_ESC0_CLK					74
+#define MDSS_ESC1_CLK					75
+#define MDSS_ROT_CLK					76
+#define MDSS_DP_LINK_CLK				77
+#define MDSS_DP_LINK_INTF_CLK				78
+#define MDSS_DP_CRYPTO_CLK				79
+#define MDSS_DP_PIXEL_CLK				80
+#define MDSS_DP_AUX_CLK					81
+#define MDSS_BYTE0_INTF_CLK				82
+#define MDSS_BYTE1_INTF_CLK				83
+#define CAMSS_CSI0PHYTIMER_CLK				84
+#define CAMSS_CSI1PHYTIMER_CLK				85
+#define CAMSS_CSI2PHYTIMER_CLK				86
+#define CAMSS_CSI0_CLK					87
+#define CAMSS_CSI0_AHB_CLK				88
+#define CAMSS_CSI0RDI_CLK				89
+#define CAMSS_CSI0PIX_CLK				90
+#define CAMSS_CSI1_CLK					91
+#define CAMSS_CSI1_AHB_CLK				92
+#define CAMSS_CSI1RDI_CLK				93
+#define CAMSS_CSI1PIX_CLK				94
+#define CAMSS_CSI2_CLK					95
+#define CAMSS_CSI2_AHB_CLK				96
+#define CAMSS_CSI2RDI_CLK				97
+#define CAMSS_CSI2PIX_CLK				98
+#define CAMSS_CSI3_CLK					99
+#define CAMSS_CSI3_AHB_CLK				100
+#define CAMSS_CSI3RDI_CLK				101
+#define CAMSS_CSI3PIX_CLK				102
+#define CAMSS_ISPIF_AHB_CLK				103
+#define CAMSS_CCI_CLK					104
+#define CAMSS_CCI_AHB_CLK				105
+#define CAMSS_MCLK0_CLK					106
+#define CAMSS_MCLK1_CLK					107
+#define CAMSS_MCLK2_CLK					108
+#define CAMSS_MCLK3_CLK					109
+#define CAMSS_TOP_AHB_CLK				110
+#define CAMSS_AHB_CLK					111
+#define CAMSS_MICRO_AHB_CLK				112
+#define CAMSS_JPEG0_CLK					113
+#define CAMSS_JPEG_AHB_CLK				114
+#define CAMSS_JPEG_AXI_CLK				115
+#define CAMSS_VFE0_AHB_CLK				116
+#define CAMSS_VFE1_AHB_CLK				117
+#define CAMSS_VFE0_CLK					118
+#define CAMSS_VFE1_CLK					119
+#define CAMSS_CPP_CLK					120
+#define CAMSS_CPP_AHB_CLK				121
+#define CAMSS_VFE_VBIF_AHB_CLK				122
+#define CAMSS_VFE_VBIF_AXI_CLK				123
+#define CAMSS_CPP_AXI_CLK				124
+#define CAMSS_CPP_VBIF_AHB_CLK				125
+#define CAMSS_CSI_VFE0_CLK				126
+#define CAMSS_CSI_VFE1_CLK				127
+#define CAMSS_VFE0_STREAM_CLK				128
+#define CAMSS_VFE1_STREAM_CLK				129
+#define CAMSS_CPHY_CSID0_CLK				130
+#define CAMSS_CPHY_CSID1_CLK				131
+#define CAMSS_CPHY_CSID2_CLK				132
+#define CAMSS_CPHY_CSID3_CLK				133
+#define CAMSS_CSIPHY0_CLK				134
+#define CAMSS_CSIPHY1_CLK				135
+#define CAMSS_CSIPHY2_CLK				136
+#define FD_CORE_CLK					137
+#define FD_CORE_UAR_CLK					138
+#define FD_AHB_CLK					139
+#define MNOC_AHB_CLK					140
+#define BIMC_SMMU_AHB_CLK				141
+#define BIMC_SMMU_AXI_CLK				142
+#define MNOC_MAXI_CLK					143
+#define VMEM_MAXI_CLK					144
+#define VMEM_AHB_CLK					145
+
+#define SPDM_BCR					0
+#define SPDM_RM_BCR					1
+#define MISC_BCR					2
+#define VIDEO_TOP_BCR					3
+#define THROTTLE_VIDEO_BCR				4
+#define MDSS_BCR					5
+#define THROTTLE_MDSS_BCR				6
+#define CAMSS_PHY0_BCR					7
+#define CAMSS_PHY1_BCR					8
+#define CAMSS_PHY2_BCR					9
+#define CAMSS_CSI0_BCR					10
+#define CAMSS_CSI0RDI_BCR				11
+#define CAMSS_CSI0PIX_BCR				12
+#define CAMSS_CSI1_BCR					13
+#define CAMSS_CSI1RDI_BCR				14
+#define CAMSS_CSI1PIX_BCR				15
+#define CAMSS_CSI2_BCR					16
+#define CAMSS_CSI2RDI_BCR				17
+#define CAMSS_CSI2PIX_BCR				18
+#define CAMSS_CSI3_BCR					19
+#define CAMSS_CSI3RDI_BCR				20
+#define CAMSS_CSI3PIX_BCR				21
+#define CAMSS_ISPIF_BCR					22
+#define CAMSS_CCI_BCR					23
+#define CAMSS_TOP_BCR					24
+#define CAMSS_AHB_BCR					25
+#define CAMSS_MICRO_BCR					26
+#define CAMSS_JPEG_BCR					27
+#define CAMSS_VFE0_BCR					28
+#define CAMSS_VFE1_BCR					29
+#define CAMSS_VFE_VBIF_BCR				30
+#define CAMSS_CPP_TOP_BCR				31
+#define CAMSS_CPP_BCR					32
+#define CAMSS_CSI_VFE0_BCR				33
+#define CAMSS_CSI_VFE1_BCR				34
+#define CAMSS_FD_BCR					35
+#define THROTTLE_CAMSS_BCR				36
+#define MNOCAHB_BCR					37
+#define MNOCAXI_BCR					38
+#define BMIC_SMMU_BCR					39
+#define MNOC_MAXI_BCR					40
+#define VMEM_BCR					41
+#define BTO_BCR						42
+
+#define VIDEO_TOP_GDSC		1
+#define VIDEO_SUBCORE0_GDSC	2
+#define VIDEO_SUBCORE1_GDSC	3
+#define MDSS_GDSC		4
+#define CAMSS_TOP_GDSC		5
+#define CAMSS_VFE0_GDSC		6
+#define CAMSS_VFE1_GDSC		7
+#define CAMSS_CPP_GDSC		8
+#define BIMC_SMMU_GDSC		9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sc7180.h b/include/dt-bindings/clock/qcom,videocc-sc7180.h
new file mode 100644
index 0000000..7acaf13
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sc7180.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7180_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0				0
+#define VIDEO_CC_VCODEC0_AXI_CLK		1
+#define VIDEO_CC_VCODEC0_CORE_CLK		2
+#define VIDEO_CC_VENUS_AHB_CLK			3
+#define VIDEO_CC_VENUS_CLK_SRC			4
+#define VIDEO_CC_VENUS_CTL_AXI_CLK		5
+#define VIDEO_CC_VENUS_CTL_CORE_CLK		6
+#define VIDEO_CC_XO_CLK				7
+
+/* VIDEO_CC GDSCRs */
+#define VENUS_GDSC				0
+#define VCODEC0_GDSC				1
+
+#endif
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index a8ac4cf..e512a1c 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -46,6 +46,7 @@
 #define CLK_PLL_VIDEO0		7
 #define CLK_PLL_PERIPH0		11
 
+#define CLK_CPUX		21
 #define CLK_BUS_MIPI_DSI	28
 #define CLK_BUS_CE		29
 #define CLK_BUS_DMA		30
diff --git a/include/dt-bindings/clock/sun6i-a31-ccu.h b/include/dt-bindings/clock/sun6i-a31-ccu.h
index c5d1334..39878d9 100644
--- a/include/dt-bindings/clock/sun6i-a31-ccu.h
+++ b/include/dt-bindings/clock/sun6i-a31-ccu.h
@@ -49,6 +49,8 @@
 
 #define CLK_PLL_VIDEO1_2X	13
 
+#define CLK_PLL_MIPI		15
+
 #define CLK_CPU			18
 
 #define CLK_AHB1_MIPIDSI	23
diff --git a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
index f8222b6..eb524d0 100644
--- a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
+++ b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
@@ -43,6 +43,8 @@
 #ifndef _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
 #define _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
 
+#define CLK_PLL_MIPI		13
+
 #define CLK_CPUX		18
 
 #define CLK_BUS_MIPI_DSI	23
diff --git a/include/dt-bindings/clock/sun8i-r40-ccu.h b/include/dt-bindings/clock/sun8i-r40-ccu.h
index f9e15a2..d7337b5 100644
--- a/include/dt-bindings/clock/sun8i-r40-ccu.h
+++ b/include/dt-bindings/clock/sun8i-r40-ccu.h
@@ -176,7 +176,7 @@
 #define CLK_AVS			152
 #define CLK_HDMI		153
 #define CLK_HDMI_SLOW		154
-
+#define CLK_MBUS		155
 #define CLK_DSI_DPHY		156
 #define CLK_TVE0		157
 #define CLK_TVE1		158
diff --git a/include/dt-bindings/clk/ti-dra7-atl.h b/include/dt-bindings/clock/ti-dra7-atl.h
similarity index 100%
rename from include/dt-bindings/clk/ti-dra7-atl.h
rename to include/dt-bindings/clock/ti-dra7-atl.h
diff --git a/include/dt-bindings/clock/xlnx-versal-clk.h b/include/dt-bindings/clock/xlnx-versal-clk.h
new file mode 100644
index 0000000..264d634
--- /dev/null
+++ b/include/dt-bindings/clock/xlnx-versal-clk.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Xilinx Inc.
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_VERSAL_H
+#define _DT_BINDINGS_CLK_VERSAL_H
+
+#define PMC_PLL					1
+#define APU_PLL					2
+#define RPU_PLL					3
+#define CPM_PLL					4
+#define NOC_PLL					5
+#define PLL_MAX					6
+#define PMC_PRESRC				7
+#define PMC_POSTCLK				8
+#define PMC_PLL_OUT				9
+#define PPLL					10
+#define NOC_PRESRC				11
+#define NOC_POSTCLK				12
+#define NOC_PLL_OUT				13
+#define NPLL					14
+#define APU_PRESRC				15
+#define APU_POSTCLK				16
+#define APU_PLL_OUT				17
+#define APLL					18
+#define RPU_PRESRC				19
+#define RPU_POSTCLK				20
+#define RPU_PLL_OUT				21
+#define RPLL					22
+#define CPM_PRESRC				23
+#define CPM_POSTCLK				24
+#define CPM_PLL_OUT				25
+#define CPLL					26
+#define PPLL_TO_XPD				27
+#define NPLL_TO_XPD				28
+#define APLL_TO_XPD				29
+#define RPLL_TO_XPD				30
+#define EFUSE_REF				31
+#define SYSMON_REF				32
+#define IRO_SUSPEND_REF				33
+#define USB_SUSPEND				34
+#define SWITCH_TIMEOUT				35
+#define RCLK_PMC				36
+#define RCLK_LPD				37
+#define WDT					38
+#define TTC0					39
+#define TTC1					40
+#define TTC2					41
+#define TTC3					42
+#define GEM_TSU					43
+#define GEM_TSU_LB				44
+#define MUXED_IRO_DIV2				45
+#define MUXED_IRO_DIV4				46
+#define PSM_REF					47
+#define GEM0_RX					48
+#define GEM0_TX					49
+#define GEM1_RX					50
+#define GEM1_TX					51
+#define CPM_CORE_REF				52
+#define CPM_LSBUS_REF				53
+#define CPM_DBG_REF				54
+#define CPM_AUX0_REF				55
+#define CPM_AUX1_REF				56
+#define QSPI_REF				57
+#define OSPI_REF				58
+#define SDIO0_REF				59
+#define SDIO1_REF				60
+#define PMC_LSBUS_REF				61
+#define I2C_REF					62
+#define TEST_PATTERN_REF			63
+#define DFT_OSC_REF				64
+#define PMC_PL0_REF				65
+#define PMC_PL1_REF				66
+#define PMC_PL2_REF				67
+#define PMC_PL3_REF				68
+#define CFU_REF					69
+#define SPARE_REF				70
+#define NPI_REF					71
+#define HSM0_REF				72
+#define HSM1_REF				73
+#define SD_DLL_REF				74
+#define FPD_TOP_SWITCH				75
+#define FPD_LSBUS				76
+#define ACPU					77
+#define DBG_TRACE				78
+#define DBG_FPD					79
+#define LPD_TOP_SWITCH				80
+#define ADMA					81
+#define LPD_LSBUS				82
+#define CPU_R5					83
+#define CPU_R5_CORE				84
+#define CPU_R5_OCM				85
+#define CPU_R5_OCM2				86
+#define IOU_SWITCH				87
+#define GEM0_REF				88
+#define GEM1_REF				89
+#define GEM_TSU_REF				90
+#define USB0_BUS_REF				91
+#define UART0_REF				92
+#define UART1_REF				93
+#define SPI0_REF				94
+#define SPI1_REF				95
+#define CAN0_REF				96
+#define CAN1_REF				97
+#define I2C0_REF				98
+#define I2C1_REF				99
+#define DBG_LPD					100
+#define TIMESTAMP_REF				101
+#define DBG_TSTMP				102
+#define CPM_TOPSW_REF				103
+#define USB3_DUAL_REF				104
+#define OUTCLK_MAX				105
+#define REF_CLK					106
+#define PL_ALT_REF_CLK				107
+#define MUXED_IRO				108
+#define PL_EXT					109
+#define PL_LB					110
+#define MIO_50_OR_51				111
+#define MIO_24_OR_25				112
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8916.h b/include/dt-bindings/interconnect/qcom,msm8916.h
new file mode 100644
index 0000000..359a75f
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8916.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm interconnect IDs
+ *
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8916_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8916_H
+
+#define BIMC_SNOC_SLV			0
+#define MASTER_JPEG			1
+#define MASTER_MDP_PORT0		2
+#define MASTER_QDSS_BAM			3
+#define MASTER_QDSS_ETR			4
+#define MASTER_SNOC_CFG			5
+#define MASTER_VFE			6
+#define MASTER_VIDEO_P0			7
+#define SNOC_MM_INT_0			8
+#define SNOC_MM_INT_1			9
+#define SNOC_MM_INT_2			10
+#define SNOC_MM_INT_BIMC		11
+#define PCNOC_SNOC_SLV			12
+#define SLAVE_APSS			13
+#define SLAVE_CATS_128			14
+#define SLAVE_OCMEM_64			15
+#define SLAVE_IMEM			16
+#define SLAVE_QDSS_STM			17
+#define SLAVE_SRVC_SNOC			18
+#define SNOC_BIMC_0_MAS			19
+#define SNOC_BIMC_1_MAS			20
+#define SNOC_INT_0			21
+#define SNOC_INT_1			22
+#define SNOC_INT_BIMC			23
+#define SNOC_PCNOC_MAS			24
+#define SNOC_QDSS_INT			25
+
+#define BIMC_SNOC_MAS			0
+#define MASTER_AMPSS_M0			1
+#define MASTER_GRAPHICS_3D		2
+#define MASTER_TCU0			3
+#define MASTER_TCU1			4
+#define SLAVE_AMPSS_L2			5
+#define SLAVE_EBI_CH0			6
+#define SNOC_BIMC_0_SLV			7
+#define SNOC_BIMC_1_SLV			8
+
+#define MASTER_BLSP_1			0
+#define MASTER_DEHR			1
+#define MASTER_LPASS			2
+#define MASTER_CRYPTO_CORE0		3
+#define MASTER_SDCC_1			4
+#define MASTER_SDCC_2			5
+#define MASTER_SPDM			6
+#define MASTER_USB_HS			7
+#define PCNOC_INT_0			8
+#define PCNOC_INT_1			9
+#define PCNOC_MAS_0			10
+#define PCNOC_MAS_1			11
+#define PCNOC_SLV_0			12
+#define PCNOC_SLV_1			13
+#define PCNOC_SLV_2			14
+#define PCNOC_SLV_3			15
+#define PCNOC_SLV_4			16
+#define PCNOC_SLV_8			17
+#define PCNOC_SLV_9			18
+#define PCNOC_SNOC_MAS			19
+#define SLAVE_BIMC_CFG			20
+#define SLAVE_BLSP_1			21
+#define SLAVE_BOOT_ROM			22
+#define SLAVE_CAMERA_CFG		23
+#define SLAVE_CLK_CTL			24
+#define SLAVE_CRYPTO_0_CFG		25
+#define SLAVE_DEHR_CFG			26
+#define SLAVE_DISPLAY_CFG		27
+#define SLAVE_GRAPHICS_3D_CFG		28
+#define SLAVE_IMEM_CFG			29
+#define SLAVE_LPASS			30
+#define SLAVE_MPM			31
+#define SLAVE_MSG_RAM			32
+#define SLAVE_MSS			33
+#define SLAVE_PDM			34
+#define SLAVE_PMIC_ARB			35
+#define SLAVE_PCNOC_CFG			36
+#define SLAVE_PRNG			37
+#define SLAVE_QDSS_CFG			38
+#define SLAVE_RBCPR_CFG			39
+#define SLAVE_SDCC_1			40
+#define SLAVE_SDCC_2			41
+#define SLAVE_SECURITY			42
+#define SLAVE_SNOC_CFG			43
+#define SLAVE_SPDM			44
+#define SLAVE_TCSR			45
+#define SLAVE_TLMM			46
+#define SLAVE_USB_HS			47
+#define SLAVE_VENUS_CFG			48
+#define SNOC_PCNOC_SLV			49
+
+#endif
diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h
index 6481353..82a1e27 100644
--- a/include/dt-bindings/memory/tegra186-mc.h
+++ b/include/dt-bindings/memory/tegra186-mc.h
@@ -108,4 +108,143 @@
 #define TEGRA186_SID_SE_VM6		0x4e
 #define TEGRA186_SID_SE_VM7		0x4f
 
+/*
+ * memory client IDs
+ */
+
+/* Misses from System Memory Management Unit (SMMU) Page Table Cache (PTC) */
+#define TEGRA186_MEMORY_CLIENT_PTCR 0x00
+/* PCIE reads */
+#define TEGRA186_MEMORY_CLIENT_AFIR 0x0e
+/* High-definition audio (HDA) reads */
+#define TEGRA186_MEMORY_CLIENT_HDAR 0x15
+/* Host channel data reads */
+#define TEGRA186_MEMORY_CLIENT_HOST1XDMAR 0x16
+#define TEGRA186_MEMORY_CLIENT_NVENCSRD 0x1c
+/* SATA reads */
+#define TEGRA186_MEMORY_CLIENT_SATAR 0x1f
+/* Reads from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA186_MEMORY_CLIENT_MPCORER 0x27
+#define TEGRA186_MEMORY_CLIENT_NVENCSWR 0x2b
+/* PCIE writes */
+#define TEGRA186_MEMORY_CLIENT_AFIW 0x31
+/* High-definition audio (HDA) writes */
+#define TEGRA186_MEMORY_CLIENT_HDAW 0x35
+/* Writes from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA186_MEMORY_CLIENT_MPCOREW 0x39
+/* SATA writes */
+#define TEGRA186_MEMORY_CLIENT_SATAW 0x3d
+/* ISP Read client for Crossbar A */
+#define TEGRA186_MEMORY_CLIENT_ISPRA 0x44
+/* ISP Write client for Crossbar A */
+#define TEGRA186_MEMORY_CLIENT_ISPWA 0x46
+/* ISP Write client Crossbar B */
+#define TEGRA186_MEMORY_CLIENT_ISPWB 0x47
+/* XUSB reads */
+#define TEGRA186_MEMORY_CLIENT_XUSB_HOSTR 0x4a
+/* XUSB_HOST writes */
+#define TEGRA186_MEMORY_CLIENT_XUSB_HOSTW 0x4b
+/* XUSB reads */
+#define TEGRA186_MEMORY_CLIENT_XUSB_DEVR 0x4c
+/* XUSB_DEV writes */
+#define TEGRA186_MEMORY_CLIENT_XUSB_DEVW 0x4d
+/* TSEC Memory Return Data Client Description */
+#define TEGRA186_MEMORY_CLIENT_TSECSRD 0x54
+/* TSEC Memory Write Client Description */
+#define TEGRA186_MEMORY_CLIENT_TSECSWR 0x55
+/* 3D, ltcx reads instance 0 */
+#define TEGRA186_MEMORY_CLIENT_GPUSRD 0x58
+/* 3D, ltcx writes instance 0 */
+#define TEGRA186_MEMORY_CLIENT_GPUSWR 0x59
+/* sdmmca memory read client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCRA 0x60
+/* sdmmcbmemory read client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCRAA 0x61
+/* sdmmc memory read client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCR 0x62
+/* sdmmcd memory read client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCRAB 0x63
+/* sdmmca memory write client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCWA 0x64
+/* sdmmcb memory write client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCWAA 0x65
+/* sdmmc memory write client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCW 0x66
+/* sdmmcd memory write client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCWAB 0x67
+#define TEGRA186_MEMORY_CLIENT_VICSRD 0x6c
+#define TEGRA186_MEMORY_CLIENT_VICSWR 0x6d
+/* VI Write client */
+#define TEGRA186_MEMORY_CLIENT_VIW 0x72
+#define TEGRA186_MEMORY_CLIENT_NVDECSRD 0x78
+#define TEGRA186_MEMORY_CLIENT_NVDECSWR 0x79
+/* Audio Processing (APE) engine reads */
+#define TEGRA186_MEMORY_CLIENT_APER 0x7a
+/* Audio Processing (APE) engine writes */
+#define TEGRA186_MEMORY_CLIENT_APEW 0x7b
+#define TEGRA186_MEMORY_CLIENT_NVJPGSRD 0x7e
+#define TEGRA186_MEMORY_CLIENT_NVJPGSWR 0x7f
+/* SE Memory Return Data Client Description */
+#define TEGRA186_MEMORY_CLIENT_SESRD 0x80
+/* SE Memory Write Client Description */
+#define TEGRA186_MEMORY_CLIENT_SESWR 0x81
+/* ETR reads */
+#define TEGRA186_MEMORY_CLIENT_ETRR 0x84
+/* ETR writes */
+#define TEGRA186_MEMORY_CLIENT_ETRW 0x85
+/* TSECB Memory Return Data Client Description */
+#define TEGRA186_MEMORY_CLIENT_TSECSRDB 0x86
+/* TSECB Memory Write Client Description */
+#define TEGRA186_MEMORY_CLIENT_TSECSWRB 0x87
+/* 3D, ltcx reads instance 1 */
+#define TEGRA186_MEMORY_CLIENT_GPUSRD2 0x88
+/* 3D, ltcx writes instance 1 */
+#define TEGRA186_MEMORY_CLIENT_GPUSWR2 0x89
+/* AXI Switch read client */
+#define TEGRA186_MEMORY_CLIENT_AXISR 0x8c
+/* AXI Switch write client */
+#define TEGRA186_MEMORY_CLIENT_AXISW 0x8d
+/* EQOS read client */
+#define TEGRA186_MEMORY_CLIENT_EQOSR 0x8e
+/* EQOS write client */
+#define TEGRA186_MEMORY_CLIENT_EQOSW 0x8f
+/* UFSHC read client */
+#define TEGRA186_MEMORY_CLIENT_UFSHCR 0x90
+/* UFSHC write client */
+#define TEGRA186_MEMORY_CLIENT_UFSHCW 0x91
+/* NVDISPLAY read client */
+#define TEGRA186_MEMORY_CLIENT_NVDISPLAYR 0x92
+/* BPMP read client */
+#define TEGRA186_MEMORY_CLIENT_BPMPR 0x93
+/* BPMP write client */
+#define TEGRA186_MEMORY_CLIENT_BPMPW 0x94
+/* BPMPDMA read client */
+#define TEGRA186_MEMORY_CLIENT_BPMPDMAR 0x95
+/* BPMPDMA write client */
+#define TEGRA186_MEMORY_CLIENT_BPMPDMAW 0x96
+/* AON read client */
+#define TEGRA186_MEMORY_CLIENT_AONR 0x97
+/* AON write client */
+#define TEGRA186_MEMORY_CLIENT_AONW 0x98
+/* AONDMA read client */
+#define TEGRA186_MEMORY_CLIENT_AONDMAR 0x99
+/* AONDMA write client */
+#define TEGRA186_MEMORY_CLIENT_AONDMAW 0x9a
+/* SCE read client */
+#define TEGRA186_MEMORY_CLIENT_SCER 0x9b
+/* SCE write client */
+#define TEGRA186_MEMORY_CLIENT_SCEW 0x9c
+/* SCEDMA read client */
+#define TEGRA186_MEMORY_CLIENT_SCEDMAR 0x9d
+/* SCEDMA write client */
+#define TEGRA186_MEMORY_CLIENT_SCEDMAW 0x9e
+/* APEDMA read client */
+#define TEGRA186_MEMORY_CLIENT_APEDMAR 0x9f
+/* APEDMA write client */
+#define TEGRA186_MEMORY_CLIENT_APEDMAW 0xa0
+/* NVDISPLAY read client instance 2 */
+#define TEGRA186_MEMORY_CLIENT_NVDISPLAYR1 0xa1
+#define TEGRA186_MEMORY_CLIENT_VICSRD1 0xa2
+#define TEGRA186_MEMORY_CLIENT_NVDECSRD1 0xa3
+
 #endif
diff --git a/include/dt-bindings/memory/tegra194-mc.h b/include/dt-bindings/memory/tegra194-mc.h
new file mode 100644
index 0000000..eed48b7
--- /dev/null
+++ b/include/dt-bindings/memory/tegra194-mc.h
@@ -0,0 +1,410 @@
+#ifndef DT_BINDINGS_MEMORY_TEGRA194_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA194_MC_H
+
+/* special clients */
+#define TEGRA194_SID_INVALID		0x00
+#define TEGRA194_SID_PASSTHROUGH	0x7f
+
+/* host1x clients */
+#define TEGRA194_SID_HOST1X		0x01
+#define TEGRA194_SID_CSI		0x02
+#define TEGRA194_SID_VIC		0x03
+#define TEGRA194_SID_VI			0x04
+#define TEGRA194_SID_ISP		0x05
+#define TEGRA194_SID_NVDEC		0x06
+#define TEGRA194_SID_NVENC		0x07
+#define TEGRA194_SID_NVJPG		0x08
+#define TEGRA194_SID_NVDISPLAY		0x09
+#define TEGRA194_SID_TSEC		0x0a
+#define TEGRA194_SID_TSECB		0x0b
+#define TEGRA194_SID_SE			0x0c
+#define TEGRA194_SID_SE1		0x0d
+#define TEGRA194_SID_SE2		0x0e
+#define TEGRA194_SID_SE3		0x0f
+
+/* GPU clients */
+#define TEGRA194_SID_GPU		0x10
+
+/* other SoC clients */
+#define TEGRA194_SID_AFI		0x11
+#define TEGRA194_SID_HDA		0x12
+#define TEGRA194_SID_ETR		0x13
+#define TEGRA194_SID_EQOS		0x14
+#define TEGRA194_SID_UFSHC		0x15
+#define TEGRA194_SID_AON		0x16
+#define TEGRA194_SID_SDMMC4		0x17
+#define TEGRA194_SID_SDMMC3		0x18
+#define TEGRA194_SID_SDMMC2		0x19
+#define TEGRA194_SID_SDMMC1		0x1a
+#define TEGRA194_SID_XUSB_HOST		0x1b
+#define TEGRA194_SID_XUSB_DEV		0x1c
+#define TEGRA194_SID_SATA		0x1d
+#define TEGRA194_SID_APE		0x1e
+#define TEGRA194_SID_SCE		0x1f
+
+/* GPC DMA clients */
+#define TEGRA194_SID_GPCDMA_0		0x20
+#define TEGRA194_SID_GPCDMA_1		0x21
+#define TEGRA194_SID_GPCDMA_2		0x22
+#define TEGRA194_SID_GPCDMA_3		0x23
+#define TEGRA194_SID_GPCDMA_4		0x24
+#define TEGRA194_SID_GPCDMA_5		0x25
+#define TEGRA194_SID_GPCDMA_6		0x26
+#define TEGRA194_SID_GPCDMA_7		0x27
+
+/* APE DMA clients */
+#define TEGRA194_SID_APE_1		0x28
+#define TEGRA194_SID_APE_2		0x29
+
+/* camera RTCPU */
+#define TEGRA194_SID_RCE		0x2a
+
+/* camera RTCPU on host1x address space */
+#define TEGRA194_SID_RCE_1X		0x2b
+
+/* APE DMA clients */
+#define TEGRA194_SID_APE_3		0x2c
+
+/* camera RTCPU running on APE */
+#define TEGRA194_SID_APE_CAM		0x2d
+#define TEGRA194_SID_APE_CAM_1X		0x2e
+
+#define TEGRA194_SID_RCE_RM		0x2f
+#define TEGRA194_SID_VI_FALCON		0x30
+#define TEGRA194_SID_ISP_FALCON		0x31
+
+/*
+ * The BPMP has its SID value hardcoded in the firmware. Changing it requires
+ * considerable effort.
+ */
+#define TEGRA194_SID_BPMP		0x32
+
+/* for SMMU tests */
+#define TEGRA194_SID_SMMU_TEST		0x33
+
+/* host1x virtualization channels */
+#define TEGRA194_SID_HOST1X_CTX0	0x38
+#define TEGRA194_SID_HOST1X_CTX1	0x39
+#define TEGRA194_SID_HOST1X_CTX2	0x3a
+#define TEGRA194_SID_HOST1X_CTX3	0x3b
+#define TEGRA194_SID_HOST1X_CTX4	0x3c
+#define TEGRA194_SID_HOST1X_CTX5	0x3d
+#define TEGRA194_SID_HOST1X_CTX6	0x3e
+#define TEGRA194_SID_HOST1X_CTX7	0x3f
+
+/* host1x command buffers */
+#define TEGRA194_SID_HOST1X_VM0		0x40
+#define TEGRA194_SID_HOST1X_VM1		0x41
+#define TEGRA194_SID_HOST1X_VM2		0x42
+#define TEGRA194_SID_HOST1X_VM3		0x43
+#define TEGRA194_SID_HOST1X_VM4		0x44
+#define TEGRA194_SID_HOST1X_VM5		0x45
+#define TEGRA194_SID_HOST1X_VM6		0x46
+#define TEGRA194_SID_HOST1X_VM7		0x47
+
+/* SE data buffers */
+#define TEGRA194_SID_SE_VM0		0x48
+#define TEGRA194_SID_SE_VM1		0x49
+#define TEGRA194_SID_SE_VM2		0x4a
+#define TEGRA194_SID_SE_VM3		0x4b
+#define TEGRA194_SID_SE_VM4		0x4c
+#define TEGRA194_SID_SE_VM5		0x4d
+#define TEGRA194_SID_SE_VM6		0x4e
+#define TEGRA194_SID_SE_VM7		0x4f
+
+#define TEGRA194_SID_MIU		0x50
+
+#define TEGRA194_SID_NVDLA0		0x51
+#define TEGRA194_SID_NVDLA1		0x52
+
+#define TEGRA194_SID_PVA0		0x53
+#define TEGRA194_SID_PVA1		0x54
+#define TEGRA194_SID_NVENC1		0x55
+#define TEGRA194_SID_PCIE0		0x56
+#define TEGRA194_SID_PCIE1		0x57
+#define TEGRA194_SID_PCIE2		0x58
+#define TEGRA194_SID_PCIE3		0x59
+#define TEGRA194_SID_PCIE4		0x5a
+#define TEGRA194_SID_PCIE5		0x5b
+#define TEGRA194_SID_NVDEC1		0x5c
+
+#define TEGRA194_SID_XUSB_VF0		0x5d
+#define TEGRA194_SID_XUSB_VF1		0x5e
+#define TEGRA194_SID_XUSB_VF2		0x5f
+#define TEGRA194_SID_XUSB_VF3		0x60
+
+#define TEGRA194_SID_RCE_VM3		0x61
+#define TEGRA194_SID_VI_VM2		0x62
+#define TEGRA194_SID_VI_VM3		0x63
+#define TEGRA194_SID_RCE_SERVER		0x64
+
+/*
+ * memory client IDs
+ */
+
+/* Misses from System Memory Management Unit (SMMU) Page Table Cache (PTC) */
+#define TEGRA194_MEMORY_CLIENT_PTCR 0x00
+/* MSS internal memqual MIU7 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU7R 0x01
+/* MSS internal memqual MIU7 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU7W 0x02
+/* High-definition audio (HDA) read clients */
+#define TEGRA194_MEMORY_CLIENT_HDAR 0x15
+/* Host channel data read clients */
+#define TEGRA194_MEMORY_CLIENT_HOST1XDMAR 0x16
+#define TEGRA194_MEMORY_CLIENT_NVENCSRD 0x1c
+/* SATA read clients */
+#define TEGRA194_MEMORY_CLIENT_SATAR 0x1f
+/* Reads from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA194_MEMORY_CLIENT_MPCORER 0x27
+#define TEGRA194_MEMORY_CLIENT_NVENCSWR 0x2b
+/* High-definition audio (HDA) write clients */
+#define TEGRA194_MEMORY_CLIENT_HDAW 0x35
+/* Writes from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA194_MEMORY_CLIENT_MPCOREW 0x39
+/* SATA write clients */
+#define TEGRA194_MEMORY_CLIENT_SATAW 0x3d
+/* ISP read client for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPRA 0x44
+/* ISP read client 1 for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPFALR 0x45
+/* ISP Write client for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPWA 0x46
+/* ISP Write client Crossbar B */
+#define TEGRA194_MEMORY_CLIENT_ISPWB 0x47
+/* XUSB_HOST read clients */
+#define TEGRA194_MEMORY_CLIENT_XUSB_HOSTR 0x4a
+/* XUSB_HOST write clients */
+#define TEGRA194_MEMORY_CLIENT_XUSB_HOSTW 0x4b
+/* XUSB read clients */
+#define TEGRA194_MEMORY_CLIENT_XUSB_DEVR 0x4c
+/* XUSB_DEV write clients */
+#define TEGRA194_MEMORY_CLIENT_XUSB_DEVW 0x4d
+/* sdmmca memory read client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCRA 0x60
+/* sdmmc memory read client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCR 0x62
+/* sdmmcd memory read client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCRAB 0x63
+/* sdmmca memory write client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCWA 0x64
+/* sdmmc memory write client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCW 0x66
+/* sdmmcd memory write client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCWAB 0x67
+#define TEGRA194_MEMORY_CLIENT_VICSRD 0x6c
+#define TEGRA194_MEMORY_CLIENT_VICSWR 0x6d
+/* VI Write client */
+#define TEGRA194_MEMORY_CLIENT_VIW 0x72
+#define TEGRA194_MEMORY_CLIENT_NVDECSRD 0x78
+#define TEGRA194_MEMORY_CLIENT_NVDECSWR 0x79
+/* Audio Processing (APE) engine read clients */
+#define TEGRA194_MEMORY_CLIENT_APER 0x7a
+/* Audio Processing (APE) engine write clients */
+#define TEGRA194_MEMORY_CLIENT_APEW 0x7b
+#define TEGRA194_MEMORY_CLIENT_NVJPGSRD 0x7e
+#define TEGRA194_MEMORY_CLIENT_NVJPGSWR 0x7f
+/* AXI AP and DFD-AUX0/1 read clients Both share the same interface on the on MSS */
+#define TEGRA194_MEMORY_CLIENT_AXIAPR 0x82
+/* AXI AP and DFD-AUX0/1 write clients Both sahre the same interface on MSS */
+#define TEGRA194_MEMORY_CLIENT_AXIAPW 0x83
+/* ETR read clients */
+#define TEGRA194_MEMORY_CLIENT_ETRR 0x84
+/* ETR write clients */
+#define TEGRA194_MEMORY_CLIENT_ETRW 0x85
+/* AXI Switch read client */
+#define TEGRA194_MEMORY_CLIENT_AXISR 0x8c
+/* AXI Switch write client */
+#define TEGRA194_MEMORY_CLIENT_AXISW 0x8d
+/* EQOS read client */
+#define TEGRA194_MEMORY_CLIENT_EQOSR 0x8e
+/* EQOS write client */
+#define TEGRA194_MEMORY_CLIENT_EQOSW 0x8f
+/* UFSHC read client */
+#define TEGRA194_MEMORY_CLIENT_UFSHCR 0x90
+/* UFSHC write client */
+#define TEGRA194_MEMORY_CLIENT_UFSHCW 0x91
+/* NVDISPLAY read client */
+#define TEGRA194_MEMORY_CLIENT_NVDISPLAYR 0x92
+/* BPMP read client */
+#define TEGRA194_MEMORY_CLIENT_BPMPR 0x93
+/* BPMP write client */
+#define TEGRA194_MEMORY_CLIENT_BPMPW 0x94
+/* BPMPDMA read client */
+#define TEGRA194_MEMORY_CLIENT_BPMPDMAR 0x95
+/* BPMPDMA write client */
+#define TEGRA194_MEMORY_CLIENT_BPMPDMAW 0x96
+/* AON read client */
+#define TEGRA194_MEMORY_CLIENT_AONR 0x97
+/* AON write client */
+#define TEGRA194_MEMORY_CLIENT_AONW 0x98
+/* AONDMA read client */
+#define TEGRA194_MEMORY_CLIENT_AONDMAR 0x99
+/* AONDMA write client */
+#define TEGRA194_MEMORY_CLIENT_AONDMAW 0x9a
+/* SCE read client */
+#define TEGRA194_MEMORY_CLIENT_SCER 0x9b
+/* SCE write client */
+#define TEGRA194_MEMORY_CLIENT_SCEW 0x9c
+/* SCEDMA read client */
+#define TEGRA194_MEMORY_CLIENT_SCEDMAR 0x9d
+/* SCEDMA write client */
+#define TEGRA194_MEMORY_CLIENT_SCEDMAW 0x9e
+/* APEDMA read client */
+#define TEGRA194_MEMORY_CLIENT_APEDMAR 0x9f
+/* APEDMA write client */
+#define TEGRA194_MEMORY_CLIENT_APEDMAW 0xa0
+/* NVDISPLAY read client instance 2 */
+#define TEGRA194_MEMORY_CLIENT_NVDISPLAYR1 0xa1
+#define TEGRA194_MEMORY_CLIENT_VICSRD1 0xa2
+#define TEGRA194_MEMORY_CLIENT_NVDECSRD1 0xa3
+/* MSS internal memqual MIU0 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU0R 0xa6
+/* MSS internal memqual MIU0 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU0W 0xa7
+/* MSS internal memqual MIU1 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU1R 0xa8
+/* MSS internal memqual MIU1 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU1W 0xa9
+/* MSS internal memqual MIU2 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU2R 0xae
+/* MSS internal memqual MIU2 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU2W 0xaf
+/* MSS internal memqual MIU3 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU3R 0xb0
+/* MSS internal memqual MIU3 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU3W 0xb1
+/* MSS internal memqual MIU4 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU4R 0xb2
+/* MSS internal memqual MIU4 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU4W 0xb3
+#define TEGRA194_MEMORY_CLIENT_DPMUR 0xb4
+#define TEGRA194_MEMORY_CLIENT_DPMUW 0xb5
+#define TEGRA194_MEMORY_CLIENT_NVL0R 0xb6
+#define TEGRA194_MEMORY_CLIENT_NVL0W 0xb7
+#define TEGRA194_MEMORY_CLIENT_NVL1R 0xb8
+#define TEGRA194_MEMORY_CLIENT_NVL1W 0xb9
+#define TEGRA194_MEMORY_CLIENT_NVL2R 0xba
+#define TEGRA194_MEMORY_CLIENT_NVL2W 0xbb
+/* VI FLACON read clients */
+#define TEGRA194_MEMORY_CLIENT_VIFALR 0xbc
+/* VIFAL write clients */
+#define TEGRA194_MEMORY_CLIENT_VIFALW 0xbd
+/* DLA0ARDA read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0RDA 0xbe
+/* DLA0 Falcon read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0FALRDB 0xbf
+/* DLA0 write clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0WRA 0xc0
+/* DLA0 write clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0FALWRB 0xc1
+/* DLA1ARDA read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1RDA 0xc2
+/* DLA1 Falcon read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1FALRDB 0xc3
+/* DLA1 write clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1WRA 0xc4
+/* DLA1 write clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1FALWRB 0xc5
+/* PVA0RDA read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDA 0xc6
+/* PVA0RDB read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDB 0xc7
+/* PVA0RDC read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDC 0xc8
+/* PVA0WRA write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0WRA 0xc9
+/* PVA0WRB write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0WRB 0xca
+/* PVA0WRC write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0WRC 0xcb
+/* PVA1RDA read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDA 0xcc
+/* PVA1RDB read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDB 0xcd
+/* PVA1RDC read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDC 0xce
+/* PVA1WRA write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1WRA 0xcf
+/* PVA1WRB write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1WRB 0xd0
+/* PVA1WRC write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1WRC 0xd1
+/* RCE read client */
+#define TEGRA194_MEMORY_CLIENT_RCER 0xd2
+/* RCE write client */
+#define TEGRA194_MEMORY_CLIENT_RCEW 0xd3
+/* RCEDMA read client */
+#define TEGRA194_MEMORY_CLIENT_RCEDMAR 0xd4
+/* RCEDMA write client */
+#define TEGRA194_MEMORY_CLIENT_RCEDMAW 0xd5
+#define TEGRA194_MEMORY_CLIENT_NVENC1SRD 0xd6
+#define TEGRA194_MEMORY_CLIENT_NVENC1SWR 0xd7
+/* PCIE0 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE0R 0xd8
+/* PCIE0 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE0W 0xd9
+/* PCIE1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE1R 0xda
+/* PCIE1 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE1W 0xdb
+/* PCIE2 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE2AR 0xdc
+/* PCIE2 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE2AW 0xdd
+/* PCIE3 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE3R 0xde
+/* PCIE3 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE3W 0xdf
+/* PCIE4 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE4R 0xe0
+/* PCIE4 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE4W 0xe1
+/* PCIE5 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE5R 0xe2
+/* PCIE5 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE5W 0xe3
+/* ISP read client 1 for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPFALW 0xe4
+#define TEGRA194_MEMORY_CLIENT_NVL3R 0xe5
+#define TEGRA194_MEMORY_CLIENT_NVL3W 0xe6
+#define TEGRA194_MEMORY_CLIENT_NVL4R 0xe7
+#define TEGRA194_MEMORY_CLIENT_NVL4W 0xe8
+/* DLA0ARDA1 read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0RDA1 0xe9
+/* DLA1ARDA1 read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1RDA1 0xea
+/* PVA0RDA1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDA1 0xeb
+/* PVA0RDB1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDB1 0xec
+/* PVA1RDA1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDA1 0xed
+/* PVA1RDB1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDB1 0xee
+/* PCIE5r1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE5R1 0xef
+#define TEGRA194_MEMORY_CLIENT_NVENCSRD1 0xf0
+#define TEGRA194_MEMORY_CLIENT_NVENC1SRD1 0xf1
+/* ISP read client for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPRA1 0xf2
+/* PCIE0 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE0R1 0xf3
+#define TEGRA194_MEMORY_CLIENT_NVL0RHP 0xf4
+#define TEGRA194_MEMORY_CLIENT_NVL1RHP 0xf5
+#define TEGRA194_MEMORY_CLIENT_NVL2RHP 0xf6
+#define TEGRA194_MEMORY_CLIENT_NVL3RHP 0xf7
+#define TEGRA194_MEMORY_CLIENT_NVL4RHP 0xf8
+#define TEGRA194_MEMORY_CLIENT_NVDEC1SRD 0xf9
+#define TEGRA194_MEMORY_CLIENT_NVDEC1SRD1 0xfa
+#define TEGRA194_MEMORY_CLIENT_NVDEC1SWR 0xfb
+/* MSS internal memqual MIU5 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU5R 0xfc
+/* MSS internal memqual MIU5 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU5W 0xfd
+/* MSS internal memqual MIU6 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU6R 0xfe
+/* MSS internal memqual MIU6 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU6W 0xff
+
+#endif
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index b6a1eaf..1f3f866 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -16,5 +16,6 @@
 #define PHY_TYPE_USB2		3
 #define PHY_TYPE_USB3		4
 #define PHY_TYPE_UFS		5
+#define PHY_TYPE_DP		6
 
 #endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/power/mt6765-power.h b/include/dt-bindings/power/mt6765-power.h
new file mode 100644
index 0000000..d347b4e
--- /dev/null
+++ b/include/dt-bindings/power/mt6765-power.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DT_BINDINGS_POWER_MT6765_POWER_H
+#define _DT_BINDINGS_POWER_MT6765_POWER_H
+
+#define MT6765_POWER_DOMAIN_CONN		0
+#define MT6765_POWER_DOMAIN_MM			1
+#define MT6765_POWER_DOMAIN_MFG_ASYNC		2
+#define MT6765_POWER_DOMAIN_ISP			3
+#define MT6765_POWER_DOMAIN_MFG			4
+#define MT6765_POWER_DOMAIN_MFG_CORE0		5
+#define MT6765_POWER_DOMAIN_CAM			6
+#define MT6765_POWER_DOMAIN_VCODEC		7
+
+#endif /* _DT_BINDINGS_POWER_MT6765_POWER_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index f05f8b1..3f74096 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -15,12 +15,36 @@
 #define SDM845_GFX	7
 #define SDM845_MSS	8
 
+/* SM8150 Power Domain Indexes */
+#define SM8150_MSS	0
+#define SM8150_EBI	1
+#define SM8150_LMX	2
+#define SM8150_LCX	3
+#define SM8150_GFX	4
+#define SM8150_MX	5
+#define SM8150_MX_AO	6
+#define SM8150_CX	7
+#define SM8150_CX_AO	8
+#define SM8150_MMCX	9
+#define SM8150_MMCX_AO	10
+
+/* SC7180 Power Domain Indexes */
+#define SC7180_CX	0
+#define SC7180_CX_AO	1
+#define SC7180_GFX	2
+#define SC7180_MX	3
+#define SC7180_MX_AO	4
+#define SC7180_LMX	5
+#define SC7180_LCX	6
+#define SC7180_MSS	7
+
 /* SDM845 Power Domain performance levels */
 #define RPMH_REGULATOR_LEVEL_RETENTION	16
 #define RPMH_REGULATOR_LEVEL_MIN_SVS	48
 #define RPMH_REGULATOR_LEVEL_LOW_SVS	64
 #define RPMH_REGULATOR_LEVEL_SVS	128
 #define RPMH_REGULATOR_LEVEL_SVS_L1	192
+#define RPMH_REGULATOR_LEVEL_SVS_L2	224
 #define RPMH_REGULATOR_LEVEL_NOM	256
 #define RPMH_REGULATOR_LEVEL_NOM_L1	320
 #define RPMH_REGULATOR_LEVEL_NOM_L2	336
diff --git a/include/dt-bindings/reset-controller/mt2712-resets.h b/include/dt-bindings/reset-controller/mt2712-resets.h
new file mode 100644
index 0000000..9e7ee76
--- /dev/null
+++ b/include/dt-bindings/reset-controller/mt2712-resets.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Yong Liang <yong.liang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT2712
+#define _DT_BINDINGS_RESET_CONTROLLER_MT2712
+
+#define MT2712_TOPRGU_INFRA_SW_RST				0
+#define MT2712_TOPRGU_MM_SW_RST					1
+#define MT2712_TOPRGU_MFG_SW_RST				2
+#define MT2712_TOPRGU_VENC_SW_RST				3
+#define MT2712_TOPRGU_VDEC_SW_RST				4
+#define MT2712_TOPRGU_IMG_SW_RST				5
+#define MT2712_TOPRGU_INFRA_AO_SW_RST				8
+#define MT2712_TOPRGU_USB_SW_RST				9
+#define MT2712_TOPRGU_APMIXED_SW_RST				10
+
+#define MT2712_TOPRGU_SW_RST_NUM				11
+
+#endif  /* _DT_BINDINGS_RESET_CONTROLLER_MT2712 */
diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset-controller/mt8183-resets.h
index 8804e34..a1bbd41 100644
--- a/include/dt-bindings/reset-controller/mt8183-resets.h
+++ b/include/dt-bindings/reset-controller/mt8183-resets.h
@@ -78,4 +78,21 @@
 #define MT8183_INFRACFG_AO_I2C7_SW_RST				126
 #define MT8183_INFRACFG_AO_I2C8_SW_RST				127
 
+#define MT8183_INFRACFG_SW_RST_NUM				128
+
+#define MT8183_TOPRGU_MM_SW_RST					1
+#define MT8183_TOPRGU_MFG_SW_RST				2
+#define MT8183_TOPRGU_VENC_SW_RST				3
+#define MT8183_TOPRGU_VDEC_SW_RST				4
+#define MT8183_TOPRGU_IMG_SW_RST				5
+#define MT8183_TOPRGU_MD_SW_RST					7
+#define MT8183_TOPRGU_CONN_SW_RST				9
+#define MT8183_TOPRGU_CONN_MCU_SW_RST				12
+#define MT8183_TOPRGU_IPU0_SW_RST				14
+#define MT8183_TOPRGU_IPU1_SW_RST				15
+#define MT8183_TOPRGU_AUDIO_SW_RST				17
+#define MT8183_TOPRGU_CAMSYS_SW_RST				18
+
+#define MT8183_TOPRGU_SW_RST_NUM				19
+
 #endif  /* _DT_BINDINGS_RESET_CONTROLLER_MT8183 */
diff --git a/include/dt-bindings/reset/nuvoton,npcm7xx-reset.h b/include/dt-bindings/reset/nuvoton,npcm7xx-reset.h
new file mode 100644
index 0000000..df088e68
--- /dev/null
+++ b/include/dt-bindings/reset/nuvoton,npcm7xx-reset.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#ifndef _DT_BINDINGS_NPCM7XX_RESET_H
+#define _DT_BINDINGS_NPCM7XX_RESET_H
+
+#define NPCM7XX_RESET_IPSRST1		0x20
+#define NPCM7XX_RESET_IPSRST2		0x24
+#define NPCM7XX_RESET_IPSRST3		0x34
+
+/* Reset lines on IP1 reset module (NPCM7XX_RESET_IPSRST1) */
+#define NPCM7XX_RESET_FIU3		1
+#define NPCM7XX_RESET_UDC1		5
+#define NPCM7XX_RESET_EMC1		6
+#define NPCM7XX_RESET_UART_2_3		7
+#define NPCM7XX_RESET_UDC2		8
+#define NPCM7XX_RESET_PECI		9
+#define NPCM7XX_RESET_AES		10
+#define NPCM7XX_RESET_UART_0_1		11
+#define NPCM7XX_RESET_MC		12
+#define NPCM7XX_RESET_SMB2		13
+#define NPCM7XX_RESET_SMB3		14
+#define NPCM7XX_RESET_SMB4		15
+#define NPCM7XX_RESET_SMB5		16
+#define NPCM7XX_RESET_PWM_M0		18
+#define NPCM7XX_RESET_TIMER_0_4		19
+#define NPCM7XX_RESET_TIMER_5_9		20
+#define NPCM7XX_RESET_EMC2		21
+#define NPCM7XX_RESET_UDC4		22
+#define NPCM7XX_RESET_UDC5		23
+#define NPCM7XX_RESET_UDC6		24
+#define NPCM7XX_RESET_UDC3		25
+#define NPCM7XX_RESET_ADC		27
+#define NPCM7XX_RESET_SMB6		28
+#define NPCM7XX_RESET_SMB7		29
+#define NPCM7XX_RESET_SMB0		30
+#define NPCM7XX_RESET_SMB1		31
+
+/* Reset lines on IP2 reset module (NPCM7XX_RESET_IPSRST2) */
+#define NPCM7XX_RESET_MFT0		0
+#define NPCM7XX_RESET_MFT1		1
+#define NPCM7XX_RESET_MFT2		2
+#define NPCM7XX_RESET_MFT3		3
+#define NPCM7XX_RESET_MFT4		4
+#define NPCM7XX_RESET_MFT5		5
+#define NPCM7XX_RESET_MFT6		6
+#define NPCM7XX_RESET_MFT7		7
+#define NPCM7XX_RESET_MMC		8
+#define NPCM7XX_RESET_SDHC		9
+#define NPCM7XX_RESET_GFX_SYS		10
+#define NPCM7XX_RESET_AHB_PCIBRG	11
+#define NPCM7XX_RESET_VDMA		12
+#define NPCM7XX_RESET_ECE		13
+#define NPCM7XX_RESET_VCD		14
+#define NPCM7XX_RESET_OTP		16
+#define NPCM7XX_RESET_SIOX1		18
+#define NPCM7XX_RESET_SIOX2		19
+#define NPCM7XX_RESET_3DES		21
+#define NPCM7XX_RESET_PSPI1		22
+#define NPCM7XX_RESET_PSPI2		23
+#define NPCM7XX_RESET_GMAC2		25
+#define NPCM7XX_RESET_USB_HOST		26
+#define NPCM7XX_RESET_GMAC1		28
+#define NPCM7XX_RESET_CP		31
+
+/* Reset lines on IP3 reset module (NPCM7XX_RESET_IPSRST3) */
+#define NPCM7XX_RESET_PWM_M1		0
+#define NPCM7XX_RESET_SMB12		1
+#define NPCM7XX_RESET_SPIX		2
+#define NPCM7XX_RESET_SMB13		3
+#define NPCM7XX_RESET_UDC0		4
+#define NPCM7XX_RESET_UDC7		5
+#define NPCM7XX_RESET_UDC8		6
+#define NPCM7XX_RESET_UDC9		7
+#define NPCM7XX_RESET_PCI_MAILBOX	9
+#define NPCM7XX_RESET_SMB14		12
+#define NPCM7XX_RESET_SHA		13
+#define NPCM7XX_RESET_SEC_ECC		14
+#define NPCM7XX_RESET_PCIE_RC		15
+#define NPCM7XX_RESET_TIMER_10_14	16
+#define NPCM7XX_RESET_RNG		17
+#define NPCM7XX_RESET_SMB15		18
+#define NPCM7XX_RESET_SMB8		19
+#define NPCM7XX_RESET_SMB9		20
+#define NPCM7XX_RESET_SMB10		21
+#define NPCM7XX_RESET_SMB11		22
+#define NPCM7XX_RESET_ESPI		23
+#define NPCM7XX_RESET_USB_PHY_1		24
+#define NPCM7XX_RESET_USB_PHY_2		25
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq6018.h b/include/dt-bindings/reset/qcom,gcc-ipq6018.h
new file mode 100644
index 0000000..02a220a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-ipq6018.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_6018_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_6018_H
+
+#define GCC_BLSP1_BCR				0
+#define GCC_BLSP1_QUP1_BCR			1
+#define GCC_BLSP1_UART1_BCR			2
+#define GCC_BLSP1_QUP2_BCR			3
+#define GCC_BLSP1_UART2_BCR			4
+#define GCC_BLSP1_QUP3_BCR			5
+#define GCC_BLSP1_UART3_BCR			6
+#define GCC_BLSP1_QUP4_BCR			7
+#define GCC_BLSP1_UART4_BCR			8
+#define GCC_BLSP1_QUP5_BCR			9
+#define GCC_BLSP1_UART5_BCR			10
+#define GCC_BLSP1_QUP6_BCR			11
+#define GCC_BLSP1_UART6_BCR			12
+#define GCC_IMEM_BCR				13
+#define GCC_SMMU_BCR				14
+#define GCC_APSS_TCU_BCR			15
+#define GCC_SMMU_XPU_BCR			16
+#define GCC_PCNOC_TBU_BCR			17
+#define GCC_SMMU_CFG_BCR			18
+#define GCC_PRNG_BCR				19
+#define GCC_BOOT_ROM_BCR			20
+#define GCC_CRYPTO_BCR				21
+#define GCC_WCSS_BCR				22
+#define GCC_WCSS_Q6_BCR				23
+#define GCC_NSS_BCR				24
+#define GCC_SEC_CTRL_BCR			25
+#define GCC_DDRSS_BCR				26
+#define GCC_SYSTEM_NOC_BCR			27
+#define GCC_PCNOC_BCR				28
+#define GCC_TCSR_BCR				29
+#define GCC_QDSS_BCR				30
+#define GCC_DCD_BCR				31
+#define GCC_MSG_RAM_BCR				32
+#define GCC_MPM_BCR				33
+#define GCC_SPDM_BCR				34
+#define GCC_RBCPR_BCR				35
+#define GCC_RBCPR_MX_BCR			36
+#define GCC_TLMM_BCR				37
+#define GCC_RBCPR_WCSS_BCR			38
+#define GCC_USB0_PHY_BCR			39
+#define GCC_USB3PHY_0_PHY_BCR			40
+#define GCC_USB0_BCR				41
+#define GCC_USB1_BCR				42
+#define GCC_QUSB2_0_PHY_BCR			43
+#define GCC_QUSB2_1_PHY_BCR			44
+#define GCC_SDCC1_BCR				45
+#define GCC_SNOC_BUS_TIMEOUT0_BCR		46
+#define GCC_SNOC_BUS_TIMEOUT1_BCR		47
+#define GCC_SNOC_BUS_TIMEOUT2_BCR		48
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR		49
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR		50
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR		51
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR		52
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR		53
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR		54
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR		55
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR		56
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR		57
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR		58
+#define GCC_UNIPHY0_BCR				59
+#define GCC_UNIPHY1_BCR				60
+#define GCC_CMN_12GPLL_BCR			61
+#define GCC_QPIC_BCR				62
+#define GCC_MDIO_BCR				63
+#define GCC_WCSS_CORE_TBU_BCR			64
+#define GCC_WCSS_Q6_TBU_BCR			65
+#define GCC_USB0_TBU_BCR			66
+#define GCC_PCIE0_TBU_BCR			67
+#define GCC_PCIE0_BCR				68
+#define GCC_PCIE0_PHY_BCR			69
+#define GCC_PCIE0PHY_PHY_BCR			70
+#define GCC_PCIE0_LINK_DOWN_BCR			71
+#define GCC_DCC_BCR				72
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR	73
+#define GCC_SMMU_CATS_BCR			74
+#define GCC_UBI0_AXI_ARES			75
+#define GCC_UBI0_AHB_ARES			76
+#define GCC_UBI0_NC_AXI_ARES			77
+#define GCC_UBI0_DBG_ARES			78
+#define GCC_UBI0_CORE_CLAMP_ENABLE		79
+#define GCC_UBI0_CLKRST_CLAMP_ENABLE		80
+#define GCC_UBI0_UTCM_ARES			81
+#define GCC_NSS_CFG_ARES			82
+#define GCC_NSS_NOC_ARES			83
+#define GCC_NSS_CRYPTO_ARES			84
+#define GCC_NSS_CSR_ARES			85
+#define GCC_NSS_CE_APB_ARES			86
+#define GCC_NSS_CE_AXI_ARES			87
+#define GCC_NSSNOC_CE_APB_ARES			88
+#define GCC_NSSNOC_CE_AXI_ARES			89
+#define GCC_NSSNOC_UBI0_AHB_ARES		90
+#define GCC_NSSNOC_SNOC_ARES			91
+#define GCC_NSSNOC_CRYPTO_ARES			92
+#define GCC_NSSNOC_ATB_ARES			93
+#define GCC_NSSNOC_QOSGEN_REF_ARES		94
+#define GCC_NSSNOC_TIMEOUT_REF_ARES		95
+#define GCC_PCIE0_PIPE_ARES			96
+#define GCC_PCIE0_SLEEP_ARES			97
+#define GCC_PCIE0_CORE_STICKY_ARES		98
+#define GCC_PCIE0_AXI_MASTER_ARES		99
+#define GCC_PCIE0_AXI_SLAVE_ARES		100
+#define GCC_PCIE0_AHB_ARES			101
+#define GCC_PCIE0_AXI_MASTER_STICKY_ARES	102
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES		103
+#define GCC_PPE_FULL_RESET			104
+#define GCC_UNIPHY0_SOFT_RESET			105
+#define GCC_UNIPHY0_XPCS_RESET			106
+#define GCC_UNIPHY1_SOFT_RESET			107
+#define GCC_UNIPHY1_XPCS_RESET			108
+#define GCC_EDMA_HW_RESET			109
+#define GCC_ADSS_BCR				110
+#define GCC_NSS_NOC_TBU_BCR			111
+#define GCC_NSSPORT1_RESET			112
+#define GCC_NSSPORT2_RESET			113
+#define GCC_NSSPORT3_RESET			114
+#define GCC_NSSPORT4_RESET			115
+#define GCC_NSSPORT5_RESET			116
+#define GCC_UNIPHY0_PORT1_ARES			117
+#define GCC_UNIPHY0_PORT2_ARES			118
+#define GCC_UNIPHY0_PORT3_ARES			119
+#define GCC_UNIPHY0_PORT4_ARES			120
+#define GCC_UNIPHY0_PORT5_ARES			121
+#define GCC_UNIPHY0_PORT_4_5_RESET		122
+#define GCC_UNIPHY0_PORT_4_RESET		123
+#define GCC_LPASS_BCR				124
+#define GCC_UBI32_TBU_BCR			125
+#define GCC_LPASS_TBU_BCR			126
+#define GCC_WCSSAON_RESET			127
+#define GCC_LPASS_Q6_AXIM_ARES			128
+#define GCC_LPASS_Q6SS_TSCTR_1TO2_ARES		129
+#define GCC_LPASS_Q6SS_TRIG_ARES		130
+#define GCC_LPASS_Q6_ATBM_AT_ARES		131
+#define GCC_LPASS_Q6_PCLKDBG_ARES		132
+#define GCC_LPASS_CORE_AXIM_ARES		133
+#define GCC_LPASS_SNOC_CFG_ARES			134
+#define GCC_WCSS_DBG_ARES			135
+#define GCC_WCSS_ECAHB_ARES			136
+#define GCC_WCSS_ACMT_ARES			137
+#define GCC_WCSS_DBG_BDG_ARES			138
+#define GCC_WCSS_AHB_S_ARES			139
+#define GCC_WCSS_AXI_M_ARES			140
+#define GCC_Q6SS_DBG_ARES			141
+#define GCC_Q6_AHB_S_ARES			142
+#define GCC_Q6_AHB_ARES				143
+#define GCC_Q6_AXIM2_ARES			144
+#define GCC_Q6_AXIM_ARES			145
+#define GCC_UBI0_CORE_ARES			146
+
+#endif
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index db6a0fc..ad889b5 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -9,10 +9,11 @@
 #ifndef _KUNIT_ASSERT_H
 #define _KUNIT_ASSERT_H
 
-#include <kunit/string-stream.h>
 #include <linux/err.h>
+#include <linux/kernel.h>
 
 struct kunit;
+struct string_stream;
 
 /**
  * enum kunit_assert_type - Type of expectation/assertion.
diff --git a/include/kunit/test.h b/include/kunit/test.h
index dba4830..2dfb550 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -12,6 +12,7 @@
 #include <kunit/assert.h>
 #include <kunit/try-catch.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
@@ -197,31 +198,47 @@ void kunit_init_test(struct kunit *test, const char *name);
 int kunit_run_tests(struct kunit_suite *suite);
 
 /**
- * kunit_test_suite() - used to register a &struct kunit_suite with KUnit.
+ * kunit_test_suites() - used to register one or more &struct kunit_suite
+ *			 with KUnit.
  *
- * @suite: a statically allocated &struct kunit_suite.
+ * @suites: a statically allocated list of &struct kunit_suite.
  *
- * Registers @suite with the test framework. See &struct kunit_suite for
+ * Registers @suites with the test framework. See &struct kunit_suite for
  * more information.
  *
- * NOTE: Currently KUnit tests are all run as late_initcalls; this means
+ * When builtin,  KUnit tests are all run as late_initcalls; this means
  * that they cannot test anything where tests must run at a different init
  * phase. One significant restriction resulting from this is that KUnit
  * cannot reliably test anything that is initialize in the late_init phase;
  * another is that KUnit is useless to test things that need to be run in
  * an earlier init phase.
  *
+ * An alternative is to build the tests as a module.  Because modules
+ * do not support multiple late_initcall()s, we need to initialize an
+ * array of suites for a module.
+ *
  * TODO(brendanhiggins@google.com): Don't run all KUnit tests as
  * late_initcalls.  I have some future work planned to dispatch all KUnit
  * tests from the same place, and at the very least to do so after
  * everything else is definitely initialized.
  */
-#define kunit_test_suite(suite)						       \
-	static int kunit_suite_init##suite(void)			       \
-	{								       \
-		return kunit_run_tests(&suite);				       \
-	}								       \
-	late_initcall(kunit_suite_init##suite)
+#define kunit_test_suites(...)						\
+	static struct kunit_suite *suites[] = { __VA_ARGS__, NULL};	\
+	static int kunit_test_suites_init(void)				\
+	{								\
+		unsigned int i;						\
+		for (i = 0; suites[i] != NULL; i++)			\
+			kunit_run_tests(suites[i]);			\
+		return 0;						\
+	}								\
+	late_initcall(kunit_test_suites_init);				\
+	static void __exit kunit_test_suites_exit(void)			\
+	{								\
+		return;							\
+	}								\
+	module_exit(kunit_test_suites_exit)
+
+#define kunit_test_suite(suite)	kunit_test_suites(&suite)
 
 /*
  * Like kunit_alloc_resource() below, but returns the struct kunit_resource
diff --git a/include/kunit/try-catch.h b/include/kunit/try-catch.h
index 404f336..c507dd4 100644
--- a/include/kunit/try-catch.h
+++ b/include/kunit/try-catch.h
@@ -53,11 +53,6 @@ struct kunit_try_catch {
 	void *context;
 };
 
-void kunit_try_catch_init(struct kunit_try_catch *try_catch,
-			  struct kunit *test,
-			  kunit_try_catch_func_t try,
-			  kunit_try_catch_func_t catch);
-
 void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context);
 
 void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch);
@@ -67,9 +62,4 @@ static inline int kunit_try_catch_get_result(struct kunit_try_catch *try_catch)
 	return try_catch->try_result;
 }
 
-/*
- * Exposed for testing only.
- */
-void kunit_generic_try_catch_init(struct kunit_try_catch *try_catch);
-
 #endif /* _KUNIT_TRY_CATCH_H */
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
index 4416df5..8274ed5 100644
--- a/include/linux/alcor_pci.h
+++ b/include/linux/alcor_pci.h
@@ -17,6 +17,7 @@
 #define PCI_ID_ALCOR_MICRO			0x1AEA
 #define PCI_ID_AU6601				0x6601
 #define PCI_ID_AU6621				0x6621
+#define PCI_ID_AU6625				0x6625
 
 #define MHZ_TO_HZ(freq)				((freq) * 1000 * 1000)
 
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index d12bb21..e4004d1 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -54,6 +54,13 @@ void attribute_container_device_trigger(struct device *dev,
 					int (*fn)(struct attribute_container *,
 						  struct device *,
 						  struct device *));
+int attribute_container_device_trigger_safe(struct device *dev,
+					    int (*fn)(struct attribute_container *,
+						      struct device *,
+						      struct device *),
+					    int (*undo)(struct attribute_container *,
+							struct device *,
+							struct device *));
 void attribute_container_trigger(struct device *dev, 
 				 int (*fn)(struct attribute_container *,
 					   struct device *));
diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h
deleted file mode 100644
index 12a867c..0000000
--- a/include/linux/b1pcmcia.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $
- *
- * Exported functions of module b1pcmcia to be called by
- * avm_cs card services module.
- *
- * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _B1PCMCIA_H_
-#define _B1PCMCIA_H_
-
-int b1pcmcia_addcard_b1(unsigned int port, unsigned irq);
-int b1pcmcia_addcard_m1(unsigned int port, unsigned irq);
-int b1pcmcia_addcard_m2(unsigned int port, unsigned irq);
-int b1pcmcia_delcard(unsigned int port, unsigned irq);
-
-#endif	/* _B1PCMCIA_H_ */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 97967ce..f88197c 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,7 @@
 #include <linux/fs.h>
 #include <linux/sched.h>
 #include <linux/blkdev.h>
+#include <linux/device.h>
 #include <linux/writeback.h>
 #include <linux/blk-cgroup.h>
 #include <linux/backing-dev-defs.h>
@@ -504,4 +505,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
 				  (1 << WB_async_congested));
 }
 
+extern const char *bdi_unknown_name;
+
+static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
+{
+	if (!bdi || !bdi->dev)
+		return bdi_unknown_name;
+	return dev_name(bdi->dev);
+}
+
 #endif	/* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 80ad521..e52ceb1 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -186,7 +186,7 @@ bitmap_find_next_zero_area(unsigned long *map,
 					      align_mask, 0);
 }
 
-extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
+extern int bitmap_parse(const char *buf, unsigned int buflen,
 			unsigned long *dst, int nbits);
 extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
 			unsigned long *dst, int nbits);
@@ -454,12 +454,6 @@ static inline void bitmap_replace(unsigned long *dst,
 		__bitmap_replace(dst, old, new, mask, nbits);
 }
 
-static inline int bitmap_parse(const char *buf, unsigned int buflen,
-			unsigned long *maskp, int nmaskbits)
-{
-	return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
-}
-
 static inline void bitmap_next_clear_region(unsigned long *bitmap,
 					    unsigned int *rs, unsigned int *re,
 					    unsigned int end)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index e479067..47f54b4 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -11,8 +11,11 @@
 #  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
 #endif
 
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BITS_PER_TYPE(type)	(sizeof(type) * BITS_PER_BYTE)
 #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr)		DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr)		DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
+#define BITS_TO_BYTES(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
 
 extern unsigned int __sw_hweight8(unsigned int w);
 extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4c636c4..053ea4b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1711,6 +1711,13 @@ struct block_device_operations {
 	const struct pr_ops *pr_ops;
 };
 
+#ifdef CONFIG_COMPAT
+extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
+				      unsigned int, unsigned long);
+#else
+#define blkdev_compat_ptr_ioctl NULL
+#endif
+
 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
 				 unsigned long);
 extern int bdev_read_page(struct block_device *, sector_t, struct page *);
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
new file mode 100644
index 0000000..7e18c93
--- /dev/null
+++ b/include/linux/bootconfig.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_XBC_H
+#define _LINUX_XBC_H
+/*
+ * Extra Boot Config
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* XBC tree node */
+struct xbc_node {
+	u16 next;
+	u16 child;
+	u16 parent;
+	u16 data;
+} __attribute__ ((__packed__));
+
+#define XBC_KEY		0
+#define XBC_VALUE	(1 << 15)
+/* Maximum size of boot config is 32KB - 1 */
+#define XBC_DATA_MAX	(XBC_VALUE - 1)
+
+#define XBC_NODE_MAX	1024
+#define XBC_KEYLEN_MAX	256
+#define XBC_DEPTH_MAX	16
+
+/* Node tree access raw APIs */
+struct xbc_node * __init xbc_root_node(void);
+int __init xbc_node_index(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node);
+const char * __init xbc_node_get_data(struct xbc_node *node);
+
+/**
+ * xbc_node_is_value() - Test the node is a value node
+ * @node: An XBC node.
+ *
+ * Test the @node is a value node and return true if a value node, false if not.
+ */
+static inline __init bool xbc_node_is_value(struct xbc_node *node)
+{
+	return node->data & XBC_VALUE;
+}
+
+/**
+ * xbc_node_is_key() - Test the node is a key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a key node and return true if a key node, false if not.
+ */
+static inline __init bool xbc_node_is_key(struct xbc_node *node)
+{
+	return !xbc_node_is_value(node);
+}
+
+/**
+ * xbc_node_is_array() - Test the node is an arraied value node
+ * @node: An XBC node.
+ *
+ * Test the @node is an arraied value node.
+ */
+static inline __init bool xbc_node_is_array(struct xbc_node *node)
+{
+	return xbc_node_is_value(node) && node->next != 0;
+}
+
+/**
+ * xbc_node_is_leaf() - Test the node is a leaf key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a leaf key node which is a key node and has a value node
+ * or no child. Returns true if it is a leaf node, or false if not.
+ */
+static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
+{
+	return xbc_node_is_key(node) &&
+		(!node->child || xbc_node_is_value(xbc_node_get_child(node)));
+}
+
+/* Tree-based key-value access APIs */
+struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
+					     const char *key);
+
+const char * __init xbc_node_find_value(struct xbc_node *parent,
+					const char *key,
+					struct xbc_node **vnode);
+
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+						 struct xbc_node *leaf);
+
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+						 struct xbc_node **leaf);
+
+/**
+ * xbc_find_value() - Find a value which matches the key
+ * @key: Search key
+ * @vnode: A container pointer of XBC value node.
+ *
+ * Search a value whose key matches @key from whole of XBC tree and return
+ * the value if found. Found value node is stored in *@vnode.
+ * Note that this can return 0-length string and store NULL in *@vnode for
+ * key-only (non-value) entry.
+ */
+static inline const char * __init
+xbc_find_value(const char *key, struct xbc_node **vnode)
+{
+	return xbc_node_find_value(NULL, key, vnode);
+}
+
+/**
+ * xbc_find_node() - Find a node which matches the key
+ * @key: Search key
+ *
+ * Search a (key) node whose key matches @key from whole of XBC tree and
+ * return the node if found. If not found, returns NULL.
+ */
+static inline struct xbc_node * __init xbc_find_node(const char *key)
+{
+	return xbc_node_find_child(NULL, key);
+}
+
+/**
+ * xbc_array_for_each_value() - Iterate value nodes on an array
+ * @anode: An XBC arraied value node
+ * @value: A value
+ *
+ * Iterate array value nodes and values starts from @anode. This is expected to
+ * be used with xbc_find_value() and xbc_node_find_value(), so that user can
+ * process each array entry node.
+ */
+#define xbc_array_for_each_value(anode, value)				\
+	for (value = xbc_node_get_data(anode); anode != NULL ;		\
+	     anode = xbc_node_get_next(anode),				\
+	     value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_child() - Iterate child nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate child nodes of @parent. Each child nodes are stored to @child.
+ */
+#define xbc_node_for_each_child(parent, child)				\
+	for (child = xbc_node_get_child(parent); child != NULL ;	\
+	     child = xbc_node_get_next(child))
+
+/**
+ * xbc_node_for_each_array_value() - Iterate array entries of geven key
+ * @node: An XBC node.
+ * @key: A key string searched under @node
+ * @anode: Iterated XBC node of array entry.
+ * @value: Iterated value of array entry.
+ *
+ * Iterate array entries of given @key under @node. Each array entry node
+ * is stroed to @anode and @value. If the @node doesn't have @key node,
+ * it does nothing.
+ * Note that even if the found key node has only one value (not array)
+ * this executes block once. Hoever, if the found key node has no value
+ * (key-only node), this does nothing. So don't use this for testing the
+ * key-value pair existence.
+ */
+#define xbc_node_for_each_array_value(node, key, anode, value)		\
+	for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
+	     anode = xbc_node_get_next(anode),				\
+	     value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_key_value() - Iterate key-value pairs under a node
+ * @node: An XBC node.
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs under @node. Each key node and value string are
+ * stored in @knode and @value respectively.
+ */
+#define xbc_node_for_each_key_value(node, knode, value)			\
+	for (knode = NULL, value = xbc_node_find_next_key_value(node, &knode);\
+	     knode != NULL; value = xbc_node_find_next_key_value(node, &knode))
+
+/**
+ * xbc_for_each_key_value() - Iterate key-value pairs
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs in whole XBC tree. Each key node and value string
+ * are stored in @knode and @value respectively.
+ */
+#define xbc_for_each_key_value(knode, value)				\
+	xbc_node_for_each_key_value(NULL, knode, value)
+
+/* Compose partial key */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+			struct xbc_node *node, char *buf, size_t size);
+
+/**
+ * xbc_node_compose_key() - Compose full key string of the XBC node
+ * @node: An XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the full-length key of the @node into @buf. Returns the total
+ * length of the key stored in @buf. Or returns -EINVAL if @node is NULL,
+ * and -ERANGE if the key depth is deeper than max depth.
+ */
+static inline int __init xbc_node_compose_key(struct xbc_node *node,
+					      char *buf, size_t size)
+{
+	return xbc_node_compose_key_after(NULL, node, buf, size);
+}
+
+/* XBC node initializer */
+int __init xbc_init(char *buf);
+
+/* XBC cleanup data structures */
+void __init xbc_destroy_all(void);
+
+/* Debug dump functions */
+void __init xbc_debug_dump(void);
+
+#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8e9ad39..49b1a70 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -728,7 +728,7 @@ struct bpf_struct_ops {
 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
-void bpf_struct_ops_init(struct btf *btf);
+void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
 bool bpf_struct_ops_get(const void *kdata);
 void bpf_struct_ops_put(const void *kdata);
 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
@@ -752,7 +752,10 @@ static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
 {
 	return NULL;
 }
-static inline void bpf_struct_ops_init(struct btf *btf) { }
+static inline void bpf_struct_ops_init(struct btf *btf,
+				       struct bpf_verifier_log *log)
+{
+}
 static inline bool bpf_try_module_get(const void *data, struct module *owner)
 {
 	return try_module_get(owner);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 8fe9b80..ec73ebc 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -281,11 +281,12 @@ extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
 extern void *ceph_kvmalloc(size_t size, gfp_t flags);
 
 struct fs_parameter;
+struct fc_log;
 struct ceph_options *ceph_alloc_options(void);
 int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
-		       struct fs_context *fc);
+		       struct fc_log *l);
 int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
-		     struct fs_context *fc);
+		     struct fc_log *l);
 int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
 			      bool show_all);
 extern void ceph_destroy_options(struct ceph_options *opt);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 0067d76..35d3852 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -25,8 +25,9 @@ struct ceph_mdsmap {
 	u32 m_session_timeout;          /* seconds */
 	u32 m_session_autoclose;        /* seconds */
 	u64 m_max_file_size;
-	u32 m_max_mds;                  /* size of m_addr, m_state arrays */
-	int m_num_mds;
+	u32 m_max_mds;			/* expected up:active mds number */
+	u32 m_num_active_mds;		/* actual up:active mds number */
+	u32 possible_max_rank;		/* possible max rank index */
 	struct ceph_mds_info *m_info;
 
 	/* which object pools file data can be stored in */
@@ -42,7 +43,7 @@ struct ceph_mdsmap {
 static inline struct ceph_entity_addr *
 ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
 {
-	if (w >= m->m_num_mds)
+	if (w >= m->possible_max_rank)
 		return NULL;
 	return &m->m_info[w].addr;
 }
@@ -50,14 +51,14 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
 static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
 {
 	BUG_ON(w < 0);
-	if (w >= m->m_num_mds)
+	if (w >= m->possible_max_rank)
 		return CEPH_MDS_STATE_DNE;
 	return m->m_info[w].state;
 }
 
 static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
 {
-	if (w >= 0 && w < m->m_num_mds)
+	if (w >= 0 && w < m->possible_max_rank)
 		return m->m_info[w].laggy;
 	return false;
 }
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index eaffbdd..5a62dbd 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -534,6 +534,7 @@ int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
 			struct ceph_object_id *dst_oid,
 			struct ceph_object_locator *dst_oloc,
 			u32 dst_fadvise_flags,
+			u32 truncate_seq, u64 truncate_size,
 			u8 copy_from_flags);
 
 /* watch/notify */
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 3eb0e556..59bdfd4 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -256,6 +256,7 @@ extern const char *ceph_osd_state_name(int s);
 									    \
 	/* tiering */							    \
 	f(COPY_FROM,	__CEPH_OSD_OP(WR, DATA, 26),	"copy-from")	    \
+	f(COPY_FROM2,	__CEPH_OSD_OP(WR, DATA, 45),	"copy-from2")	    \
 	f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
 	f(UNDIRTY,	__CEPH_OSD_OP(WR, DATA, 28),	"undirty")	    \
 	f(ISDIRTY,	__CEPH_OSD_OP(RD, DATA, 29),	"isdirty")	    \
@@ -446,6 +447,7 @@ enum {
 	CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
 						     * cloneid */
 	CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16,     /* order with write */
+	CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ = 32,  /* send truncate_{seq,size} */
 };
 
 enum {
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index caf4b9d..952ac03 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -190,8 +190,14 @@ struct clk_duty {
  *
  * @init:	Perform platform-specific initialization magic.
  *		This is not not used by any of the basic clock types.
- *		Please consider other ways of solving initialization problems
- *		before using this callback, as its use is discouraged.
+ *		This callback exist for HW which needs to perform some
+ *		initialisation magic for CCF to get an accurate view of the
+ *		clock. It may also be used dynamic resource allocation is
+ *		required. It shall not used to deal with clock parameters,
+ *		such as rate or parents.
+ *		Returns 0 on success, -EERROR otherwise.
+ *
+ * @terminate:  Free any resource allocated by init.
  *
  * @debug_init:	Set up type-specific debugfs entries for this clock.  This
  *		is called once, after the debugfs directory entry for this
@@ -243,7 +249,8 @@ struct clk_ops {
 					  struct clk_duty *duty);
 	int		(*set_duty_cycle)(struct clk_hw *hw,
 					  struct clk_duty *duty);
-	void		(*init)(struct clk_hw *hw);
+	int		(*init)(struct clk_hw *hw);
+	void		(*terminate)(struct clk_hw *hw);
 	void		(*debug_init)(struct clk_hw *hw, struct dentry *dentry);
 };
 
@@ -321,29 +328,119 @@ struct clk_hw {
  * struct clk_fixed_rate - fixed-rate clock
  * @hw:		handle between common and hardware-specific interfaces
  * @fixed_rate:	constant frequency of clock
+ * @fixed_accuracy: constant accuracy of clock in ppb (parts per billion)
+ * @flags:	hardware specific flags
+ *
+ * Flags:
+ * * CLK_FIXED_RATE_PARENT_ACCURACY - Use the accuracy of the parent clk
+ *                                    instead of what's set in @fixed_accuracy.
  */
 struct clk_fixed_rate {
 	struct		clk_hw hw;
 	unsigned long	fixed_rate;
 	unsigned long	fixed_accuracy;
+	unsigned long	flags;
 };
 
-#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+#define CLK_FIXED_RATE_PARENT_ACCURACY		BIT(0)
 
 extern const struct clk_ops clk_fixed_rate_ops;
+struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+		struct device_node *np, const char *name,
+		const char *parent_name, const struct clk_hw *parent_hw,
+		const struct clk_parent_data *parent_data, unsigned long flags,
+		unsigned long fixed_rate, unsigned long fixed_accuracy,
+		unsigned long clk_fixed_flags);
 struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		unsigned long fixed_rate);
-struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
-		unsigned long fixed_rate);
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
-		const char *name, const char *parent_name, unsigned long flags,
-		unsigned long fixed_rate, unsigned long fixed_accuracy);
+/**
+ * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate)  \
+	__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+				     NULL, (flags), (fixed_rate), 0, 0)
+/**
+ * clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags,     \
+					     fixed_rate)		      \
+	__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw),  \
+				     NULL, (flags), (fixed_rate), 0, 0)
+/**
+ * clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_hw, flags,   \
+					     fixed_rate)		      \
+	__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL,	      \
+				     (parent_data), (flags), (fixed_rate), 0, \
+				     0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,      \
+						 flags, fixed_rate,	      \
+						 fixed_accuracy)	      \
+	__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name),      \
+				     NULL, NULL, (flags), (fixed_rate),       \
+				     (fixed_accuracy), 0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name,	      \
+		parent_hw, flags, fixed_rate, fixed_accuracy)		      \
+	__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw)   \
+				     NULL, NULL, (flags), (fixed_rate),	      \
+				     (fixed_accuracy), 0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_data(dev, name,	      \
+		parent_data, flags, fixed_rate, fixed_accuracy)		      \
+	__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL,	      \
+				     (parent_data), NULL, (flags),	      \
+				     (fixed_rate), (fixed_accuracy), 0)
+
 void clk_unregister_fixed_rate(struct clk *clk);
-struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
-		const char *name, const char *parent_name, unsigned long flags,
-		unsigned long fixed_rate, unsigned long fixed_accuracy);
 void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
 
 void of_fixed_clk_setup(struct device_node *np);
@@ -386,14 +483,67 @@ struct clk_gate {
 #define CLK_GATE_BIG_ENDIAN		BIT(2)
 
 extern const struct clk_ops clk_gate_ops;
+struct clk_hw *__clk_hw_register_gate(struct device *dev,
+		struct device_node *np, const char *name,
+		const char *parent_name, const struct clk_hw *parent_hw,
+		const struct clk_parent_data *parent_data,
+		unsigned long flags,
+		void __iomem *reg, u8 bit_idx,
+		u8 clk_gate_flags, spinlock_t *lock);
 struct clk *clk_register_gate(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 bit_idx,
 		u8 clk_gate_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
-		void __iomem *reg, u8 bit_idx,
-		u8 clk_gate_flags, spinlock_t *lock);
+/**
+ * clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,     \
+			     clk_gate_flags, lock)			      \
+	__clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
+			       NULL, (flags), (reg), (bit_idx),		      \
+			       (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_hw - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_hw(dev, name, parent_name, flags, reg,    \
+				       bit_idx, clk_gate_flags, lock)	      \
+	__clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
+			       NULL, (flags), (reg), (bit_idx),		      \
+			       (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_data - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_data(dev, name, parent_name, flags, reg,  \
+				       bit_idx, clk_gate_flags, lock)	      \
+	__clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
+			       NULL, (flags), (reg), (bit_idx),		      \
+			       (clk_gate_flags), (lock))
 void clk_unregister_gate(struct clk *clk);
 void clk_hw_unregister_gate(struct clk_hw *hw);
 int clk_gate_is_enabled(struct clk_hw *hw);
@@ -483,24 +633,153 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate,
 		const struct clk_div_table *table, u8 width,
 		unsigned long flags);
 
-struct clk *clk_register_divider(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_divider_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
-		const char *parent_name, unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_divider_flags, spinlock_t *lock);
+struct clk_hw *__clk_hw_register_divider(struct device *dev,
+		struct device_node *np, const char *name,
+		const char *parent_name, const struct clk_hw *parent_hw,
+		const struct clk_parent_data *parent_data, unsigned long flags,
+		void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+		const struct clk_div_table *table, spinlock_t *lock);
 struct clk *clk_register_divider_table(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 shift, u8 width,
 		u8 clk_divider_flags, const struct clk_div_table *table,
 		spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider_table(struct device *dev,
-		const char *name, const char *parent_name, unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_divider_flags, const struct clk_div_table *table,
-		spinlock_t *lock);
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_register_divider(dev, name, parent_name, flags, reg, shift, width, \
+			     clk_divider_flags, lock)			       \
+	clk_register_divider_table((dev), (name), (parent_name), (flags),      \
+				   (reg), (shift), (width),		       \
+				   (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider(dev, name, parent_name, flags, reg, shift,    \
+				width, clk_divider_flags, lock)		      \
+	__clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL,   \
+				  NULL, (flags), (reg), (shift), (width),     \
+				  (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_hw - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, reg,   \
+					  shift, width, clk_divider_flags,    \
+					  lock)				      \
+	__clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw),     \
+				  NULL, (flags), (reg), (shift), (width),     \
+				  (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_data - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_data(dev, name, parent_data, flags,    \
+					    reg, shift, width,		      \
+					    clk_divider_flags, lock)	      \
+	__clk_hw_register_divider((dev), NULL, (name), NULL, NULL,	      \
+				  (parent_data), (flags), (reg), (shift),     \
+				  (width), (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_table - register a table based divider clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table(dev, name, parent_name, flags, reg,     \
+				      shift, width, clk_divider_flags, table, \
+				      lock)				      \
+	__clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL,   \
+				  NULL, (flags), (reg), (shift), (width),     \
+				  (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_hw - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_hw(dev, name, parent_hw, flags,  \
+						reg, shift, width,	      \
+						clk_divider_flags, table,     \
+						lock)			      \
+	__clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw),     \
+				  NULL, (flags), (reg), (shift), (width),     \
+				  (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_data - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_data(dev, name, parent_data,     \
+						  flags, reg, shift, width,   \
+						  clk_divider_flags, table,   \
+						  lock)			      \
+	__clk_hw_register_divider((dev), NULL, (name), NULL, NULL,	      \
+				  (parent_data), (flags), (reg), (shift),     \
+				  (width), (clk_divider_flags), (table),      \
+				  (lock))
+
 void clk_unregister_divider(struct clk *clk);
 void clk_hw_unregister_divider(struct clk_hw *hw);
 
@@ -555,27 +834,47 @@ struct clk_mux {
 extern const struct clk_ops clk_mux_ops;
 extern const struct clk_ops clk_mux_ro_ops;
 
-struct clk *clk_register_mux(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents,
-		unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_mux_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents,
-		unsigned long flags,
-		void __iomem *reg, u8 shift, u8 width,
-		u8 clk_mux_flags, spinlock_t *lock);
-
+struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
+		const char *name, u8 num_parents,
+		const char * const *parent_names,
+		const struct clk_hw **parent_hws,
+		const struct clk_parent_data *parent_data,
+		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+		u8 clk_mux_flags, u32 *table, spinlock_t *lock);
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
 		const char * const *parent_names, u8 num_parents,
-		unsigned long flags,
-		void __iomem *reg, u8 shift, u32 mask,
+		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
 		u8 clk_mux_flags, u32 *table, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents,
-		unsigned long flags,
-		void __iomem *reg, u8 shift, u32 mask,
-		u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+
+#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg,    \
+			 shift, width, clk_mux_flags, lock)		      \
+	clk_register_mux_table((dev), (name), (parent_names), (num_parents),  \
+			       (flags), (reg), (shift), BIT((width)) - 1,     \
+			       (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_table(dev, name, parent_names, num_parents,	      \
+				  flags, reg, shift, mask, clk_mux_flags,     \
+				  table, lock)				      \
+	__clk_hw_register_mux((dev), NULL, (name), (num_parents),	      \
+			      (parent_names), NULL, NULL, (flags), (reg),     \
+			      (shift), (mask), (clk_mux_flags), (table),      \
+			      (lock))
+#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+			    shift, width, clk_mux_flags, lock)		      \
+	__clk_hw_register_mux((dev), NULL, (name), (num_parents),	      \
+			      (parent_names), NULL, NULL, (flags), (reg),     \
+			      (shift), BIT((width)) - 1, (clk_mux_flags),     \
+			      NULL, (lock))
+#define clk_hw_register_mux_hws(dev, name, parent_hws, num_parents, flags,    \
+				reg, shift, width, clk_mux_flags, lock)	      \
+	__clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL,	      \
+			      (parent_hws), NULL, (flags), (reg), (shift),    \
+			      BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data(dev, name, parent_data, num_parents,  \
+					flags, reg, shift, width,	      \
+					clk_mux_flags, lock)		      \
+	__clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+			      (parent_data), (flags), (reg), (shift),	      \
+			      BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
 
 int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
 			 unsigned int val);
@@ -743,6 +1042,12 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
 		struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
 		struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
 		unsigned long flags);
+struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
+		const struct clk_parent_data *parent_data, int num_parents,
+		struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+		struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+		struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+		unsigned long flags);
 void clk_unregister_composite(struct clk *clk);
 struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
 		const char * const *parent_names, int num_parents,
@@ -750,46 +1055,15 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
 		struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
 		struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
 		unsigned long flags);
+struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
+		const char *name,
+		const struct clk_parent_data *parent_data, int num_parents,
+		struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+		struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+		struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+		unsigned long flags);
 void clk_hw_unregister_composite(struct clk_hw *hw);
 
-/**
- * struct clk_gpio - gpio gated clock
- *
- * @hw:		handle between common and hardware-specific interfaces
- * @gpiod:	gpio descriptor
- *
- * Clock with a gpio control for enabling and disabling the parent clock
- * or switching between two parents by asserting or deasserting the gpio.
- *
- * Implements .enable, .disable and .is_enabled or
- * .get_parent, .set_parent and .determine_rate depending on which clk_ops
- * is used.
- */
-struct clk_gpio {
-	struct clk_hw	hw;
-	struct gpio_desc *gpiod;
-};
-
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
-extern const struct clk_ops clk_gpio_gate_ops;
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
-		const char *parent_name, struct gpio_desc *gpiod,
-		unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
-		const char *parent_name, struct gpio_desc *gpiod,
-		unsigned long flags);
-void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
-
-extern const struct clk_ops clk_gpio_mux_ops;
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-		unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
-		const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
-		unsigned long flags);
-void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
-
 struct clk *clk_register(struct device *dev, struct clk_hw *hw);
 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw);
 
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 18b7b95..7fd6a1f 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -627,6 +627,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
  * @clk: clock source
  * @rate: desired clock rate in Hz
  *
+ * Updating the rate starts at the top-most affected clock and then
+ * walks the tree down to the bottom-most clock that needs updating.
+ *
  * Returns success (0) or negative errno.
  */
 int clk_set_rate(struct clk *clk, unsigned long rate);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 68f79d8..11083d8 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -958,4 +958,22 @@ static inline bool in_compat_syscall(void) { return false; }
 
 #endif /* CONFIG_COMPAT */
 
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+#ifndef compat_ptr
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+	return (void __user *)(unsigned long)uptr;
+}
+#endif
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+	return (u32)(unsigned long)uptr;
+}
+
 #endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index d09951d..f33016b 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -101,7 +101,6 @@ extern const struct consw *conswitchp;
 extern const struct consw dummy_con;	/* dummy console buffer */
 extern const struct consw vga_con;	/* VGA text console */
 extern const struct consw newport_con;	/* SGI Newport console  */
-extern const struct consw prom_con;	/* SPARC PROM console */
 
 int con_is_bound(const struct consw *csw);
 int do_unregister_con_driver(const struct consw *csw);
@@ -201,7 +200,6 @@ extern void suspend_console(void);
 extern void resume_console(void);
 
 int mda_console_init(void);
-void prom_con_init(void);
 
 void vcs_make_sysfs(int index);
 void vcs_remove_sysfs(int index);
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 64ec828..8150f5a 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -154,15 +154,6 @@ static inline void guest_exit_irqoff(void)
 }
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
 
-static inline void guest_enter(void)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	guest_enter_irqoff();
-	local_irq_restore(flags);
-}
-
 static inline void guest_exit(void)
 {
 	unsigned long flags;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 31b1b0e..018dce8 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -148,6 +148,20 @@ struct cpufreq_policy {
 	struct notifier_block nb_max;
 };
 
+/*
+ * Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
+ * callback for sanitization.  That callback is only expected to modify the min
+ * and max values, if necessary, and specifically it must not update the
+ * frequency table.
+ */
+struct cpufreq_policy_data {
+	struct cpufreq_cpuinfo		cpuinfo;
+	struct cpufreq_frequency_table	*freq_table;
+	unsigned int			cpu;
+	unsigned int			min;    /* in kHz */
+	unsigned int			max;    /* in kHz */
+};
+
 struct cpufreq_freqs {
 	struct cpufreq_policy *policy;
 	unsigned int old;
@@ -201,8 +215,6 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
 void cpufreq_cpu_release(struct cpufreq_policy *policy);
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_set_policy(struct cpufreq_policy *policy,
-		       struct cpufreq_policy *new_policy);
 void refresh_frequency_limits(struct cpufreq_policy *policy);
 void cpufreq_update_policy(unsigned int cpu);
 void cpufreq_update_limits(unsigned int cpu);
@@ -284,7 +296,7 @@ struct cpufreq_driver {
 
 	/* needed by all drivers */
 	int		(*init)(struct cpufreq_policy *policy);
-	int		(*verify)(struct cpufreq_policy *policy);
+	int		(*verify)(struct cpufreq_policy_data *policy);
 
 	/* define one out of two */
 	int		(*setpolicy)(struct cpufreq_policy *policy);
@@ -415,8 +427,9 @@ static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
 		(drv->flags & CPUFREQ_IS_COOLING_DEV);
 }
 
-static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
-		unsigned int min, unsigned int max)
+static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
+						unsigned int min,
+						unsigned int max)
 {
 	if (policy->min < min)
 		policy->min = min;
@@ -432,10 +445,10 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
 }
 
 static inline void
-cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
 {
 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-			policy->cpuinfo.max_freq);
+				     policy->cpuinfo.max_freq);
 }
 
 #ifdef CONFIG_CPU_FREQ
@@ -513,6 +526,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
  *                          CPUFREQ GOVERNORS                        *
  *********************************************************************/
 
+#define CPUFREQ_POLICY_UNKNOWN		(0)
 /*
  * If (cpufreq_driver->target) exists, the ->governor decides what frequency
  * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
@@ -684,9 +698,9 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
 				    struct cpufreq_frequency_table *table);
 
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
 				   struct cpufreq_frequency_table *table);
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
 
 int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
 				 unsigned int target_freq,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index def48a5..d37c17e 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -96,6 +96,7 @@ enum cpuhp_state {
 	CPUHP_AP_OFFLINE,
 	CPUHP_AP_SCHED_STARTING,
 	CPUHP_AP_RCUTREE_DYING,
+	CPUHP_AP_CPU_PM_STARTING,
 	CPUHP_AP_IRQ_GIC_STARTING,
 	CPUHP_AP_IRQ_HIP04_STARTING,
 	CPUHP_AP_IRQ_ARMADA_XP_STARTING,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 78a73eb..d5cc885 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -663,9 +663,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
  */
 static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
 {
-	unsigned int len = strchrnul(buf, '\n') - buf;
-
-	return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+	return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index bf9b6ca..3d013de 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -83,7 +83,7 @@ struct dentry *debugfs_create_automount(const char *name,
 					void *data);
 
 void debugfs_remove(struct dentry *dentry);
-void debugfs_remove_recursive(struct dentry *dentry);
+#define debugfs_remove_recursive debugfs_remove
 
 const struct file_operations *debugfs_real_fops(const struct file *filp);
 
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
new file mode 100644
index 0000000..5aad06b
--- /dev/null
+++ b/include/linux/dev_printk.h
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dev_printk.h - printk messages helpers for devices
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ *
+ */
+
+#ifndef _DEVICE_PRINTK_H_
+#define _DEVICE_PRINTK_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+
+#ifndef dev_fmt
+#define dev_fmt(fmt) fmt
+#endif
+
+struct device;
+
+#ifdef CONFIG_PRINTK
+
+__printf(3, 0) __cold
+int dev_vprintk_emit(int level, const struct device *dev,
+		     const char *fmt, va_list args);
+__printf(3, 4) __cold
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
+
+__printf(3, 4) __cold
+void dev_printk(const char *level, const struct device *dev,
+		const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_emerg(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_alert(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_crit(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_err(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_warn(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_notice(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_info(const struct device *dev, const char *fmt, ...);
+
+#else
+
+static inline __printf(3, 0)
+int dev_vprintk_emit(int level, const struct device *dev,
+		     const char *fmt, va_list args)
+{ return 0; }
+static inline __printf(3, 4)
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
+{ return 0; }
+
+static inline void __dev_printk(const char *level, const struct device *dev,
+				struct va_format *vaf)
+{}
+static inline __printf(3, 4)
+void dev_printk(const char *level, const struct device *dev,
+		 const char *fmt, ...)
+{}
+
+static inline __printf(2, 3)
+void _dev_emerg(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_crit(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_alert(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_err(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_warn(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_notice(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_info(const struct device *dev, const char *fmt, ...)
+{}
+
+#endif
+
+/*
+ * #defines for all the dev_<level> macros to prefix with whatever
+ * possible use of #define dev_fmt(fmt) ...
+ */
+
+#define dev_emerg(dev, fmt, ...)					\
+	_dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...)						\
+	_dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...)					\
+	_dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...)						\
+	_dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...)						\
+	_dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...)					\
+	_dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...)						\
+	_dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define dev_dbg(dev, fmt, ...)						\
+	dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#elif defined(DEBUG)
+#define dev_dbg(dev, fmt, ...)						\
+	dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#else
+#define dev_dbg(dev, fmt, ...)						\
+({									\
+	if (0)								\
+		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+})
+#endif
+
+#ifdef CONFIG_PRINTK
+#define dev_level_once(dev_level, dev, fmt, ...)			\
+do {									\
+	static bool __print_once __read_mostly;				\
+									\
+	if (!__print_once) {						\
+		__print_once = true;					\
+		dev_level(dev, fmt, ##__VA_ARGS__);			\
+	}								\
+} while (0)
+#else
+#define dev_level_once(dev_level, dev, fmt, ...)			\
+do {									\
+	if (0)								\
+		dev_level(dev, fmt, ##__VA_ARGS__);			\
+} while (0)
+#endif
+
+#define dev_emerg_once(dev, fmt, ...)					\
+	dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_once(dev, fmt, ...)					\
+	dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_once(dev, fmt, ...)					\
+	dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_once(dev, fmt, ...)					\
+	dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_once(dev, fmt, ...)					\
+	dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_once(dev, fmt, ...)					\
+	dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_once(dev, fmt, ...)					\
+	dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+#define dev_dbg_once(dev, fmt, ...)					\
+	dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
+
+#define dev_level_ratelimited(dev_level, dev, fmt, ...)			\
+do {									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+	if (__ratelimit(&_rs))						\
+		dev_level(dev, fmt, ##__VA_ARGS__);			\
+} while (0)
+
+#define dev_emerg_ratelimited(dev, fmt, ...)				\
+	dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_ratelimited(dev, fmt, ...)				\
+	dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_ratelimited(dev, fmt, ...)				\
+	dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_ratelimited(dev, fmt, ...)				\
+	dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_ratelimited(dev, fmt, ...)				\
+	dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_ratelimited(dev, fmt, ...)				\
+	dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_ratelimited(dev, fmt, ...)				\
+	dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define dev_dbg_ratelimited(dev, fmt, ...)				\
+do {									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
+	if (DYNAMIC_DEBUG_BRANCH(descriptor) &&				\
+	    __ratelimit(&_rs))						\
+		__dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt),	\
+				  ##__VA_ARGS__);			\
+} while (0)
+#elif defined(DEBUG)
+#define dev_dbg_ratelimited(dev, fmt, ...)				\
+do {									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+	if (__ratelimit(&_rs))						\
+		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+} while (0)
+#else
+#define dev_dbg_ratelimited(dev, fmt, ...)				\
+do {									\
+	if (0)								\
+		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#ifdef VERBOSE_DEBUG
+#define dev_vdbg	dev_dbg
+#else
+#define dev_vdbg(dev, fmt, ...)						\
+({									\
+	if (0)								\
+		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+})
+#endif
+
+/*
+ * dev_WARN*() acts like dev_printk(), but with the key difference of
+ * using WARN/WARN_ONCE to include file/line information and a backtrace.
+ */
+#define dev_WARN(dev, format, arg...) \
+	WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
+
+#define dev_WARN_ONCE(dev, condition, format, arg...) \
+	WARN_ONCE(condition, "%s %s: " format, \
+			dev_driver_string(dev), dev_name(dev), ## arg)
+
+#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/device.h b/include/linux/device.h
index 96ff767..0cd7c64 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -12,6 +12,7 @@
 #ifndef _DEVICE_H_
 #define _DEVICE_H_
 
+#include <linux/dev_printk.h>
 #include <linux/ioport.h>
 #include <linux/kobject.h>
 #include <linux/klist.h>
@@ -22,10 +23,12 @@
 #include <linux/mutex.h>
 #include <linux/pm.h>
 #include <linux/atomic.h>
-#include <linux/ratelimit.h>
 #include <linux/uidgid.h>
 #include <linux/gfp.h>
 #include <linux/overflow.h>
+#include <linux/device/bus.h>
+#include <linux/device/class.h>
+#include <linux/device/driver.h>
 #include <asm/device.h>
 
 struct device;
@@ -35,7 +38,6 @@ struct driver_private;
 struct module;
 struct class;
 struct subsys_private;
-struct bus_type;
 struct device_node;
 struct fwnode_handle;
 struct iommu_ops;
@@ -44,490 +46,6 @@ struct iommu_fwspec;
 struct dev_pin_info;
 struct iommu_param;
 
-struct bus_attribute {
-	struct attribute	attr;
-	ssize_t (*show)(struct bus_type *bus, char *buf);
-	ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
-};
-
-#define BUS_ATTR_RW(_name) \
-	struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
-#define BUS_ATTR_RO(_name) \
-	struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
-#define BUS_ATTR_WO(_name) \
-	struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check bus_create_file(struct bus_type *,
-					struct bus_attribute *);
-extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
-
-/**
- * struct bus_type - The bus type of the device
- *
- * @name:	The name of the bus.
- * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
- * @dev_root:	Default device to use as the parent.
- * @bus_groups:	Default attributes of the bus.
- * @dev_groups:	Default attributes of the devices on the bus.
- * @drv_groups: Default attributes of the device drivers on the bus.
- * @match:	Called, perhaps multiple times, whenever a new device or driver
- *		is added for this bus. It should return a positive value if the
- *		given device can be handled by the given driver and zero
- *		otherwise. It may also return error code if determining that
- *		the driver supports the device is not possible. In case of
- *		-EPROBE_DEFER it will queue the device for deferred probing.
- * @uevent:	Called when a device is added, removed, or a few other things
- *		that generate uevents to add the environment variables.
- * @probe:	Called when a new device or driver add to this bus, and callback
- *		the specific driver's probe to initial the matched device.
- * @sync_state:	Called to sync device state to software state after all the
- *		state tracking consumers linked to this device (present at
- *		the time of late_initcall) have successfully bound to a
- *		driver. If the device has no consumers, this function will
- *		be called at late_initcall_sync level. If the device has
- *		consumers that are never bound to a driver, this function
- *		will never get called until they do.
- * @remove:	Called when a device removed from this bus.
- * @shutdown:	Called at shut-down time to quiesce the device.
- *
- * @online:	Called to put the device back online (after offlining it).
- * @offline:	Called to put the device offline for hot-removal. May fail.
- *
- * @suspend:	Called when a device on this bus wants to go to sleep mode.
- * @resume:	Called to bring a device on this bus out of sleep mode.
- * @num_vf:	Called to find out how many virtual functions a device on this
- *		bus supports.
- * @dma_configure:	Called to setup DMA configuration on a device on
- *			this bus.
- * @pm:		Power management operations of this bus, callback the specific
- *		device driver's pm-ops.
- * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
- *              driver implementations to a bus and allow the driver to do
- *              bus-specific setup
- * @p:		The private data of the driver core, only the driver core can
- *		touch this.
- * @lock_key:	Lock class key for use by the lock validator
- * @need_parent_lock:	When probing or removing a device on this bus, the
- *			device core should lock the device's parent.
- *
- * A bus is a channel between the processor and one or more devices. For the
- * purposes of the device model, all devices are connected via a bus, even if
- * it is an internal, virtual, "platform" bus. Buses can plug into each other.
- * A USB controller is usually a PCI device, for example. The device model
- * represents the actual connections between buses and the devices they control.
- * A bus is represented by the bus_type structure. It contains the name, the
- * default attributes, the bus' methods, PM operations, and the driver core's
- * private data.
- */
-struct bus_type {
-	const char		*name;
-	const char		*dev_name;
-	struct device		*dev_root;
-	const struct attribute_group **bus_groups;
-	const struct attribute_group **dev_groups;
-	const struct attribute_group **drv_groups;
-
-	int (*match)(struct device *dev, struct device_driver *drv);
-	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
-	int (*probe)(struct device *dev);
-	void (*sync_state)(struct device *dev);
-	int (*remove)(struct device *dev);
-	void (*shutdown)(struct device *dev);
-
-	int (*online)(struct device *dev);
-	int (*offline)(struct device *dev);
-
-	int (*suspend)(struct device *dev, pm_message_t state);
-	int (*resume)(struct device *dev);
-
-	int (*num_vf)(struct device *dev);
-
-	int (*dma_configure)(struct device *dev);
-
-	const struct dev_pm_ops *pm;
-
-	const struct iommu_ops *iommu_ops;
-
-	struct subsys_private *p;
-	struct lock_class_key lock_key;
-
-	bool need_parent_lock;
-};
-
-extern int __must_check bus_register(struct bus_type *bus);
-
-extern void bus_unregister(struct bus_type *bus);
-
-extern int __must_check bus_rescan_devices(struct bus_type *bus);
-
-/* iterator helpers for buses */
-struct subsys_dev_iter {
-	struct klist_iter		ki;
-	const struct device_type	*type;
-};
-void subsys_dev_iter_init(struct subsys_dev_iter *iter,
-			 struct bus_type *subsys,
-			 struct device *start,
-			 const struct device_type *type);
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
-
-int device_match_name(struct device *dev, const void *name);
-int device_match_of_node(struct device *dev, const void *np);
-int device_match_fwnode(struct device *dev, const void *fwnode);
-int device_match_devt(struct device *dev, const void *pdevt);
-int device_match_acpi_dev(struct device *dev, const void *adev);
-int device_match_any(struct device *dev, const void *unused);
-
-int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
-		     int (*fn)(struct device *dev, void *data));
-struct device *bus_find_device(struct bus_type *bus, struct device *start,
-			       const void *data,
-			       int (*match)(struct device *dev, const void *data));
-/**
- * bus_find_device_by_name - device iterator for locating a particular device
- * of a specific name.
- * @bus: bus type
- * @start: Device to begin with
- * @name: name of the device to match
- */
-static inline struct device *bus_find_device_by_name(struct bus_type *bus,
-						     struct device *start,
-						     const char *name)
-{
-	return bus_find_device(bus, start, name, device_match_name);
-}
-
-/**
- * bus_find_device_by_of_node : device iterator for locating a particular device
- * matching the of_node.
- * @bus: bus type
- * @np: of_node of the device to match.
- */
-static inline struct device *
-bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
-{
-	return bus_find_device(bus, NULL, np, device_match_of_node);
-}
-
-/**
- * bus_find_device_by_fwnode : device iterator for locating a particular device
- * matching the fwnode.
- * @bus: bus type
- * @fwnode: fwnode of the device to match.
- */
-static inline struct device *
-bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode)
-{
-	return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
-}
-
-/**
- * bus_find_device_by_devt : device iterator for locating a particular device
- * matching the device type.
- * @bus: bus type
- * @devt: device type of the device to match.
- */
-static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
-						     dev_t devt)
-{
-	return bus_find_device(bus, NULL, &devt, device_match_devt);
-}
-
-/**
- * bus_find_next_device - Find the next device after a given device in a
- * given bus.
- * @bus: bus type
- * @cur: device to begin the search with.
- */
-static inline struct device *
-bus_find_next_device(struct bus_type *bus,struct device *cur)
-{
-	return bus_find_device(bus, cur, NULL, device_match_any);
-}
-
-#ifdef CONFIG_ACPI
-struct acpi_device;
-
-/**
- * bus_find_device_by_acpi_dev : device iterator for locating a particular device
- * matching the ACPI COMPANION device.
- * @bus: bus type
- * @adev: ACPI COMPANION device to match.
- */
-static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev)
-{
-	return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
-}
-#else
-static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev)
-{
-	return NULL;
-}
-#endif
-
-struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
-					struct device *hint);
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
-		     void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
-			   int (*compare)(const struct device *a,
-					  const struct device *b));
-/*
- * Bus notifiers: Get notified of addition/removal of devices
- * and binding/unbinding of drivers to devices.
- * In the long run, it should be a replacement for the platform
- * notify hooks.
- */
-struct notifier_block;
-
-extern int bus_register_notifier(struct bus_type *bus,
-				 struct notifier_block *nb);
-extern int bus_unregister_notifier(struct bus_type *bus,
-				   struct notifier_block *nb);
-
-/* All 4 notifers below get called with the target struct device *
- * as an argument. Note that those functions are likely to be called
- * with the device lock held in the core, so be careful.
- */
-#define BUS_NOTIFY_ADD_DEVICE		0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE		0x00000002 /* device to be removed */
-#define BUS_NOTIFY_REMOVED_DEVICE	0x00000003 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER		0x00000004 /* driver about to be
-						      bound */
-#define BUS_NOTIFY_BOUND_DRIVER		0x00000005 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER	0x00000006 /* driver about to be
-						      unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER	0x00000007 /* driver is unbound
-						      from the device */
-#define BUS_NOTIFY_DRIVER_NOT_BOUND	0x00000008 /* driver fails to be bound */
-
-extern struct kset *bus_get_kset(struct bus_type *bus);
-extern struct klist *bus_get_device_klist(struct bus_type *bus);
-
-/**
- * enum probe_type - device driver probe type to try
- *	Device drivers may opt in for special handling of their
- *	respective probe routines. This tells the core what to
- *	expect and prefer.
- *
- * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
- *	whether probed synchronously or asynchronously.
- * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
- *	probing order is not essential for booting the system may
- *	opt into executing their probes asynchronously.
- * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
- *	their probe routines to run synchronously with driver and
- *	device registration (with the exception of -EPROBE_DEFER
- *	handling - re-probing always ends up being done asynchronously).
- *
- * Note that the end goal is to switch the kernel to use asynchronous
- * probing by default, so annotating drivers with
- * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
- * to speed up boot process while we are validating the rest of the
- * drivers.
- */
-enum probe_type {
-	PROBE_DEFAULT_STRATEGY,
-	PROBE_PREFER_ASYNCHRONOUS,
-	PROBE_FORCE_SYNCHRONOUS,
-};
-
-/**
- * struct device_driver - The basic device driver structure
- * @name:	Name of the device driver.
- * @bus:	The bus which the device of this driver belongs to.
- * @owner:	The module owner.
- * @mod_name:	Used for built-in modules.
- * @suppress_bind_attrs: Disables bind/unbind via sysfs.
- * @probe_type:	Type of the probe (synchronous or asynchronous) to use.
- * @of_match_table: The open firmware table.
- * @acpi_match_table: The ACPI match table.
- * @probe:	Called to query the existence of a specific device,
- *		whether this driver can work with it, and bind the driver
- *		to a specific device.
- * @sync_state:	Called to sync device state to software state after all the
- *		state tracking consumers linked to this device (present at
- *		the time of late_initcall) have successfully bound to a
- *		driver. If the device has no consumers, this function will
- *		be called at late_initcall_sync level. If the device has
- *		consumers that are never bound to a driver, this function
- *		will never get called until they do.
- * @remove:	Called when the device is removed from the system to
- *		unbind a device from this driver.
- * @shutdown:	Called at shut-down time to quiesce the device.
- * @suspend:	Called to put the device to sleep mode. Usually to a
- *		low power state.
- * @resume:	Called to bring a device from sleep mode.
- * @groups:	Default attributes that get created by the driver core
- *		automatically.
- * @dev_groups:	Additional attributes attached to device instance once the
- *		it is bound to the driver.
- * @pm:		Power management operations of the device which matched
- *		this driver.
- * @coredump:	Called when sysfs entry is written to. The device driver
- *		is expected to call the dev_coredump API resulting in a
- *		uevent.
- * @p:		Driver core's private data, no one other than the driver
- *		core can touch this.
- *
- * The device driver-model tracks all of the drivers known to the system.
- * The main reason for this tracking is to enable the driver core to match
- * up drivers with new devices. Once drivers are known objects within the
- * system, however, a number of other things become possible. Device drivers
- * can export information and configuration variables that are independent
- * of any specific device.
- */
-struct device_driver {
-	const char		*name;
-	struct bus_type		*bus;
-
-	struct module		*owner;
-	const char		*mod_name;	/* used for built-in modules */
-
-	bool suppress_bind_attrs;	/* disables bind/unbind via sysfs */
-	enum probe_type probe_type;
-
-	const struct of_device_id	*of_match_table;
-	const struct acpi_device_id	*acpi_match_table;
-
-	int (*probe) (struct device *dev);
-	void (*sync_state)(struct device *dev);
-	int (*remove) (struct device *dev);
-	void (*shutdown) (struct device *dev);
-	int (*suspend) (struct device *dev, pm_message_t state);
-	int (*resume) (struct device *dev);
-	const struct attribute_group **groups;
-	const struct attribute_group **dev_groups;
-
-	const struct dev_pm_ops *pm;
-	void (*coredump) (struct device *dev);
-
-	struct driver_private *p;
-};
-
-
-extern int __must_check driver_register(struct device_driver *drv);
-extern void driver_unregister(struct device_driver *drv);
-
-extern struct device_driver *driver_find(const char *name,
-					 struct bus_type *bus);
-extern int driver_probe_done(void);
-extern void wait_for_device_probe(void);
-
-/* sysfs interface for exporting driver attributes */
-
-struct driver_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct device_driver *driver, char *buf);
-	ssize_t (*store)(struct device_driver *driver, const char *buf,
-			 size_t count);
-};
-
-#define DRIVER_ATTR_RW(_name) \
-	struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
-#define DRIVER_ATTR_RO(_name) \
-	struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
-#define DRIVER_ATTR_WO(_name) \
-	struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check driver_create_file(struct device_driver *driver,
-					const struct driver_attribute *attr);
-extern void driver_remove_file(struct device_driver *driver,
-			       const struct driver_attribute *attr);
-
-extern int __must_check driver_for_each_device(struct device_driver *drv,
-					       struct device *start,
-					       void *data,
-					       int (*fn)(struct device *dev,
-							 void *));
-struct device *driver_find_device(struct device_driver *drv,
-				  struct device *start, const void *data,
-				  int (*match)(struct device *dev, const void *data));
-
-/**
- * driver_find_device_by_name - device iterator for locating a particular device
- * of a specific name.
- * @drv: the driver we're iterating
- * @name: name of the device to match
- */
-static inline struct device *driver_find_device_by_name(struct device_driver *drv,
-							const char *name)
-{
-	return driver_find_device(drv, NULL, name, device_match_name);
-}
-
-/**
- * driver_find_device_by_of_node- device iterator for locating a particular device
- * by of_node pointer.
- * @drv: the driver we're iterating
- * @np: of_node pointer to match.
- */
-static inline struct device *
-driver_find_device_by_of_node(struct device_driver *drv,
-			      const struct device_node *np)
-{
-	return driver_find_device(drv, NULL, np, device_match_of_node);
-}
-
-/**
- * driver_find_device_by_fwnode- device iterator for locating a particular device
- * by fwnode pointer.
- * @drv: the driver we're iterating
- * @fwnode: fwnode pointer to match.
- */
-static inline struct device *
-driver_find_device_by_fwnode(struct device_driver *drv,
-			     const struct fwnode_handle *fwnode)
-{
-	return driver_find_device(drv, NULL, fwnode, device_match_fwnode);
-}
-
-/**
- * driver_find_device_by_devt- device iterator for locating a particular device
- * by devt.
- * @drv: the driver we're iterating
- * @devt: devt pointer to match.
- */
-static inline struct device *driver_find_device_by_devt(struct device_driver *drv,
-							dev_t devt)
-{
-	return driver_find_device(drv, NULL, &devt, device_match_devt);
-}
-
-static inline struct device *driver_find_next_device(struct device_driver *drv,
-						     struct device *start)
-{
-	return driver_find_device(drv, start, NULL, device_match_any);
-}
-
-#ifdef CONFIG_ACPI
-/**
- * driver_find_device_by_acpi_dev : device iterator for locating a particular
- * device matching the ACPI_COMPANION device.
- * @drv: the driver we're iterating
- * @adev: ACPI_COMPANION device to match.
- */
-static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv,
-			       const struct acpi_device *adev)
-{
-	return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
-}
-#else
-static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
-{
-	return NULL;
-}
-#endif
-
-void driver_deferred_probe_add(struct device *dev);
-int driver_deferred_probe_check_state(struct device *dev);
-int driver_deferred_probe_check_state_continue(struct device *dev);
-
 /**
  * struct subsys_interface - interfaces to device functions
  * @name:       name of the device function
@@ -557,246 +75,6 @@ int subsys_system_register(struct bus_type *subsys,
 int subsys_virtual_register(struct bus_type *subsys,
 			    const struct attribute_group **groups);
 
-/**
- * struct class - device classes
- * @name:	Name of the class.
- * @owner:	The module owner.
- * @class_groups: Default attributes of this class.
- * @dev_groups:	Default attributes of the devices that belong to the class.
- * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
- * @dev_uevent:	Called when a device is added, removed from this class, or a
- *		few other things that generate uevents to add the environment
- *		variables.
- * @devnode:	Callback to provide the devtmpfs.
- * @class_release: Called to release this class.
- * @dev_release: Called to release the device.
- * @shutdown_pre: Called at shut-down time before driver shutdown.
- * @ns_type:	Callbacks so sysfs can detemine namespaces.
- * @namespace:	Namespace of the device belongs to this class.
- * @get_ownership: Allows class to specify uid/gid of the sysfs directories
- *		for the devices belonging to the class. Usually tied to
- *		device's namespace.
- * @pm:		The default device power management operations of this class.
- * @p:		The private data of the driver core, no one other than the
- *		driver core can touch this.
- *
- * A class is a higher-level view of a device that abstracts out low-level
- * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
- * at the class level, they are all simply disks. Classes allow user space
- * to work with devices based on what they do, rather than how they are
- * connected or how they work.
- */
-struct class {
-	const char		*name;
-	struct module		*owner;
-
-	const struct attribute_group	**class_groups;
-	const struct attribute_group	**dev_groups;
-	struct kobject			*dev_kobj;
-
-	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
-	char *(*devnode)(struct device *dev, umode_t *mode);
-
-	void (*class_release)(struct class *class);
-	void (*dev_release)(struct device *dev);
-
-	int (*shutdown_pre)(struct device *dev);
-
-	const struct kobj_ns_type_operations *ns_type;
-	const void *(*namespace)(struct device *dev);
-
-	void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
-
-	const struct dev_pm_ops *pm;
-
-	struct subsys_private *p;
-};
-
-struct class_dev_iter {
-	struct klist_iter		ki;
-	const struct device_type	*type;
-};
-
-extern struct kobject *sysfs_dev_block_kobj;
-extern struct kobject *sysfs_dev_char_kobj;
-extern int __must_check __class_register(struct class *class,
-					 struct lock_class_key *key);
-extern void class_unregister(struct class *class);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_register(class)			\
-({						\
-	static struct lock_class_key __key;	\
-	__class_register(class, &__key);	\
-})
-
-struct class_compat;
-struct class_compat *class_compat_register(const char *name);
-void class_compat_unregister(struct class_compat *cls);
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
-			     struct device *device_link);
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
-			      struct device *device_link);
-
-extern void class_dev_iter_init(struct class_dev_iter *iter,
-				struct class *class,
-				struct device *start,
-				const struct device_type *type);
-extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
-extern void class_dev_iter_exit(struct class_dev_iter *iter);
-
-extern int class_for_each_device(struct class *class, struct device *start,
-				 void *data,
-				 int (*fn)(struct device *dev, void *data));
-extern struct device *class_find_device(struct class *class,
-					struct device *start, const void *data,
-					int (*match)(struct device *, const void *));
-
-/**
- * class_find_device_by_name - device iterator for locating a particular device
- * of a specific name.
- * @class: class type
- * @name: name of the device to match
- */
-static inline struct device *class_find_device_by_name(struct class *class,
-						       const char *name)
-{
-	return class_find_device(class, NULL, name, device_match_name);
-}
-
-/**
- * class_find_device_by_of_node : device iterator for locating a particular device
- * matching the of_node.
- * @class: class type
- * @np: of_node of the device to match.
- */
-static inline struct device *
-class_find_device_by_of_node(struct class *class, const struct device_node *np)
-{
-	return class_find_device(class, NULL, np, device_match_of_node);
-}
-
-/**
- * class_find_device_by_fwnode : device iterator for locating a particular device
- * matching the fwnode.
- * @class: class type
- * @fwnode: fwnode of the device to match.
- */
-static inline struct device *
-class_find_device_by_fwnode(struct class *class,
-			    const struct fwnode_handle *fwnode)
-{
-	return class_find_device(class, NULL, fwnode, device_match_fwnode);
-}
-
-/**
- * class_find_device_by_devt : device iterator for locating a particular device
- * matching the device type.
- * @class: class type
- * @devt: device type of the device to match.
- */
-static inline struct device *class_find_device_by_devt(struct class *class,
-						       dev_t devt)
-{
-	return class_find_device(class, NULL, &devt, device_match_devt);
-}
-
-#ifdef CONFIG_ACPI
-struct acpi_device;
-/**
- * class_find_device_by_acpi_dev : device iterator for locating a particular
- * device matching the ACPI_COMPANION device.
- * @class: class type
- * @adev: ACPI_COMPANION device to match.
- */
-static inline struct device *
-class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev)
-{
-	return class_find_device(class, NULL, adev, device_match_acpi_dev);
-}
-#else
-static inline struct device *
-class_find_device_by_acpi_dev(struct class *class, const void *adev)
-{
-	return NULL;
-}
-#endif
-
-struct class_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct class *class, struct class_attribute *attr,
-			char *buf);
-	ssize_t (*store)(struct class *class, struct class_attribute *attr,
-			const char *buf, size_t count);
-};
-
-#define CLASS_ATTR_RW(_name) \
-	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
-#define CLASS_ATTR_RO(_name) \
-	struct class_attribute class_attr_##_name = __ATTR_RO(_name)
-#define CLASS_ATTR_WO(_name) \
-	struct class_attribute class_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check class_create_file_ns(struct class *class,
-					     const struct class_attribute *attr,
-					     const void *ns);
-extern void class_remove_file_ns(struct class *class,
-				 const struct class_attribute *attr,
-				 const void *ns);
-
-static inline int __must_check class_create_file(struct class *class,
-					const struct class_attribute *attr)
-{
-	return class_create_file_ns(class, attr, NULL);
-}
-
-static inline void class_remove_file(struct class *class,
-				     const struct class_attribute *attr)
-{
-	return class_remove_file_ns(class, attr, NULL);
-}
-
-/* Simple class attribute that is just a static string */
-struct class_attribute_string {
-	struct class_attribute attr;
-	char *str;
-};
-
-/* Currently read-only only */
-#define _CLASS_ATTR_STRING(_name, _mode, _str) \
-	{ __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
-#define CLASS_ATTR_STRING(_name, _mode, _str) \
-	struct class_attribute_string class_attr_##_name = \
-		_CLASS_ATTR_STRING(_name, _mode, _str)
-
-extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
-                        char *buf);
-
-struct class_interface {
-	struct list_head	node;
-	struct class		*class;
-
-	int (*add_dev)		(struct device *, struct class_interface *);
-	void (*remove_dev)	(struct device *, struct class_interface *);
-};
-
-extern int __must_check class_interface_register(struct class_interface *);
-extern void class_interface_unregister(struct class_interface *);
-
-extern struct class * __must_check __class_create(struct module *owner,
-						  const char *name,
-						  struct lock_class_key *key);
-extern void class_destroy(struct class *cls);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_create(owner, name)		\
-({						\
-	static struct lock_class_key __key;	\
-	__class_create(owner, name, &__key);	\
-})
-
 /*
  * The type of device, "struct device" is embedded in. A class
  * or bus can contain devices of different types
@@ -1520,8 +798,6 @@ static inline struct device_node *dev_of_node(struct device *dev)
 	return dev->of_node;
 }
 
-void driver_init(void);
-
 /*
  * High level routines for use by the bus drivers
  */
@@ -1664,12 +940,8 @@ extern void put_device(struct device *dev);
 extern bool kill_device(struct device *dev);
 
 #ifdef CONFIG_DEVTMPFS
-extern int devtmpfs_create_node(struct device *dev);
-extern int devtmpfs_delete_node(struct device *dev);
 extern int devtmpfs_mount(void);
 #else
-static inline int devtmpfs_create_node(struct device *dev) { return 0; }
-static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
 static inline int devtmpfs_mount(void) { return 0; }
 #endif
 
@@ -1687,221 +959,6 @@ void device_link_remove(void *consumer, struct device *supplier);
 void device_links_supplier_sync_state_pause(void);
 void device_links_supplier_sync_state_resume(void);
 
-#ifndef dev_fmt
-#define dev_fmt(fmt) fmt
-#endif
-
-#ifdef CONFIG_PRINTK
-
-__printf(3, 0) __cold
-int dev_vprintk_emit(int level, const struct device *dev,
-		     const char *fmt, va_list args);
-__printf(3, 4) __cold
-int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
-
-__printf(3, 4) __cold
-void dev_printk(const char *level, const struct device *dev,
-		const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_emerg(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_alert(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_crit(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_err(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_warn(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_notice(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_info(const struct device *dev, const char *fmt, ...);
-
-#else
-
-static inline __printf(3, 0)
-int dev_vprintk_emit(int level, const struct device *dev,
-		     const char *fmt, va_list args)
-{ return 0; }
-static inline __printf(3, 4)
-int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
-{ return 0; }
-
-static inline void __dev_printk(const char *level, const struct device *dev,
-				struct va_format *vaf)
-{}
-static inline __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
-		 const char *fmt, ...)
-{}
-
-static inline __printf(2, 3)
-void _dev_emerg(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_crit(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_alert(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_err(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_warn(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_notice(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_info(const struct device *dev, const char *fmt, ...)
-{}
-
-#endif
-
-/*
- * #defines for all the dev_<level> macros to prefix with whatever
- * possible use of #define dev_fmt(fmt) ...
- */
-
-#define dev_emerg(dev, fmt, ...)					\
-	_dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_crit(dev, fmt, ...)						\
-	_dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_alert(dev, fmt, ...)					\
-	_dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_err(dev, fmt, ...)						\
-	_dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_warn(dev, fmt, ...)						\
-	_dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_notice(dev, fmt, ...)					\
-	_dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_info(dev, fmt, ...)						\
-	_dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define dev_dbg(dev, fmt, ...)						\
-	dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#elif defined(DEBUG)
-#define dev_dbg(dev, fmt, ...)						\
-	dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
-#else
-#define dev_dbg(dev, fmt, ...)						\
-({									\
-	if (0)								\
-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
-#endif
-
-#ifdef CONFIG_PRINTK
-#define dev_level_once(dev_level, dev, fmt, ...)			\
-do {									\
-	static bool __print_once __read_mostly;				\
-									\
-	if (!__print_once) {						\
-		__print_once = true;					\
-		dev_level(dev, fmt, ##__VA_ARGS__);			\
-	}								\
-} while (0)
-#else
-#define dev_level_once(dev_level, dev, fmt, ...)			\
-do {									\
-	if (0)								\
-		dev_level(dev, fmt, ##__VA_ARGS__);			\
-} while (0)
-#endif
-
-#define dev_emerg_once(dev, fmt, ...)					\
-	dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
-#define dev_alert_once(dev, fmt, ...)					\
-	dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
-#define dev_crit_once(dev, fmt, ...)					\
-	dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
-#define dev_err_once(dev, fmt, ...)					\
-	dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
-#define dev_warn_once(dev, fmt, ...)					\
-	dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
-#define dev_notice_once(dev, fmt, ...)					\
-	dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
-#define dev_info_once(dev, fmt, ...)					\
-	dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
-#define dev_dbg_once(dev, fmt, ...)					\
-	dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
-
-#define dev_level_ratelimited(dev_level, dev, fmt, ...)			\
-do {									\
-	static DEFINE_RATELIMIT_STATE(_rs,				\
-				      DEFAULT_RATELIMIT_INTERVAL,	\
-				      DEFAULT_RATELIMIT_BURST);		\
-	if (__ratelimit(&_rs))						\
-		dev_level(dev, fmt, ##__VA_ARGS__);			\
-} while (0)
-
-#define dev_emerg_ratelimited(dev, fmt, ...)				\
-	dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
-#define dev_alert_ratelimited(dev, fmt, ...)				\
-	dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
-#define dev_crit_ratelimited(dev, fmt, ...)				\
-	dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
-#define dev_err_ratelimited(dev, fmt, ...)				\
-	dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
-#define dev_warn_ratelimited(dev, fmt, ...)				\
-	dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
-#define dev_notice_ratelimited(dev, fmt, ...)				\
-	dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
-#define dev_info_ratelimited(dev, fmt, ...)				\
-	dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
-/* descriptor check is first to prevent flooding with "callbacks suppressed" */
-#define dev_dbg_ratelimited(dev, fmt, ...)				\
-do {									\
-	static DEFINE_RATELIMIT_STATE(_rs,				\
-				      DEFAULT_RATELIMIT_INTERVAL,	\
-				      DEFAULT_RATELIMIT_BURST);		\
-	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
-	if (DYNAMIC_DEBUG_BRANCH(descriptor) &&				\
-	    __ratelimit(&_rs))						\
-		__dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt),	\
-				  ##__VA_ARGS__);			\
-} while (0)
-#elif defined(DEBUG)
-#define dev_dbg_ratelimited(dev, fmt, ...)				\
-do {									\
-	static DEFINE_RATELIMIT_STATE(_rs,				\
-				      DEFAULT_RATELIMIT_INTERVAL,	\
-				      DEFAULT_RATELIMIT_BURST);		\
-	if (__ratelimit(&_rs))						\
-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-#else
-#define dev_dbg_ratelimited(dev, fmt, ...)				\
-do {									\
-	if (0)								\
-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-#endif
-
-#ifdef VERBOSE_DEBUG
-#define dev_vdbg	dev_dbg
-#else
-#define dev_vdbg(dev, fmt, ...)						\
-({									\
-	if (0)								\
-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
-#endif
-
-/*
- * dev_WARN*() acts like dev_printk(), but with the key difference of
- * using WARN/WARN_ONCE to include file/line information and a backtrace.
- */
-#define dev_WARN(dev, format, arg...) \
-	WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
-
-#define dev_WARN_ONCE(dev, condition, format, arg...) \
-	WARN_ONCE(condition, "%s %s: " format, \
-			dev_driver_string(dev), dev_name(dev), ## arg)
-
 /* Create alias, so I can be autoloaded. */
 #define MODULE_ALIAS_CHARDEV(major,minor) \
 	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
@@ -1914,52 +971,4 @@ extern long sysfs_deprecated;
 #define sysfs_deprecated 0
 #endif
 
-/**
- * module_driver() - Helper macro for drivers that don't do anything
- * special in module init/exit. This eliminates a lot of boilerplate.
- * Each module may only use this macro once, and calling it replaces
- * module_init() and module_exit().
- *
- * @__driver: driver name
- * @__register: register function for this driver type
- * @__unregister: unregister function for this driver type
- * @...: Additional arguments to be passed to __register and __unregister.
- *
- * Use this macro to construct bus specific macros for registering
- * drivers, and do not use it on its own.
- */
-#define module_driver(__driver, __register, __unregister, ...) \
-static int __init __driver##_init(void) \
-{ \
-	return __register(&(__driver) , ##__VA_ARGS__); \
-} \
-module_init(__driver##_init); \
-static void __exit __driver##_exit(void) \
-{ \
-	__unregister(&(__driver) , ##__VA_ARGS__); \
-} \
-module_exit(__driver##_exit);
-
-/**
- * builtin_driver() - Helper macro for drivers that don't do anything
- * special in init and have no exit. This eliminates some boilerplate.
- * Each driver may only use this macro once, and calling it replaces
- * device_initcall (or in some cases, the legacy __initcall).  This is
- * meant to be a direct parallel of module_driver() above but without
- * the __exit stuff that is not used for builtin cases.
- *
- * @__driver: driver name
- * @__register: register function for this driver type
- * @...: Additional arguments to be passed to __register
- *
- * Use this macro to construct bus specific macros for registering
- * drivers, and do not use it on its own.
- */
-#define builtin_driver(__driver, __register, ...) \
-static int __init __driver##_init(void) \
-{ \
-	return __register(&(__driver) , ##__VA_ARGS__); \
-} \
-device_initcall(__driver##_init);
-
 #endif /* _DEVICE_H_ */
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
new file mode 100644
index 0000000..1ea5e1d
--- /dev/null
+++ b/include/linux/device/bus.h
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bus.h - the bus-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_BUS_H_
+#define _DEVICE_BUS_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+
+struct device_driver;
+struct fwnode_handle;
+
+/**
+ * struct bus_type - The bus type of the device
+ *
+ * @name:	The name of the bus.
+ * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
+ * @dev_root:	Default device to use as the parent.
+ * @bus_groups:	Default attributes of the bus.
+ * @dev_groups:	Default attributes of the devices on the bus.
+ * @drv_groups: Default attributes of the device drivers on the bus.
+ * @match:	Called, perhaps multiple times, whenever a new device or driver
+ *		is added for this bus. It should return a positive value if the
+ *		given device can be handled by the given driver and zero
+ *		otherwise. It may also return error code if determining that
+ *		the driver supports the device is not possible. In case of
+ *		-EPROBE_DEFER it will queue the device for deferred probing.
+ * @uevent:	Called when a device is added, removed, or a few other things
+ *		that generate uevents to add the environment variables.
+ * @probe:	Called when a new device or driver add to this bus, and callback
+ *		the specific driver's probe to initial the matched device.
+ * @sync_state:	Called to sync device state to software state after all the
+ *		state tracking consumers linked to this device (present at
+ *		the time of late_initcall) have successfully bound to a
+ *		driver. If the device has no consumers, this function will
+ *		be called at late_initcall_sync level. If the device has
+ *		consumers that are never bound to a driver, this function
+ *		will never get called until they do.
+ * @remove:	Called when a device removed from this bus.
+ * @shutdown:	Called at shut-down time to quiesce the device.
+ *
+ * @online:	Called to put the device back online (after offlining it).
+ * @offline:	Called to put the device offline for hot-removal. May fail.
+ *
+ * @suspend:	Called when a device on this bus wants to go to sleep mode.
+ * @resume:	Called to bring a device on this bus out of sleep mode.
+ * @num_vf:	Called to find out how many virtual functions a device on this
+ *		bus supports.
+ * @dma_configure:	Called to setup DMA configuration on a device on
+ *			this bus.
+ * @pm:		Power management operations of this bus, callback the specific
+ *		device driver's pm-ops.
+ * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
+ *              driver implementations to a bus and allow the driver to do
+ *              bus-specific setup
+ * @p:		The private data of the driver core, only the driver core can
+ *		touch this.
+ * @lock_key:	Lock class key for use by the lock validator
+ * @need_parent_lock:	When probing or removing a device on this bus, the
+ *			device core should lock the device's parent.
+ *
+ * A bus is a channel between the processor and one or more devices. For the
+ * purposes of the device model, all devices are connected via a bus, even if
+ * it is an internal, virtual, "platform" bus. Buses can plug into each other.
+ * A USB controller is usually a PCI device, for example. The device model
+ * represents the actual connections between buses and the devices they control.
+ * A bus is represented by the bus_type structure. It contains the name, the
+ * default attributes, the bus' methods, PM operations, and the driver core's
+ * private data.
+ */
+struct bus_type {
+	const char		*name;
+	const char		*dev_name;
+	struct device		*dev_root;
+	const struct attribute_group **bus_groups;
+	const struct attribute_group **dev_groups;
+	const struct attribute_group **drv_groups;
+
+	int (*match)(struct device *dev, struct device_driver *drv);
+	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+	int (*probe)(struct device *dev);
+	void (*sync_state)(struct device *dev);
+	int (*remove)(struct device *dev);
+	void (*shutdown)(struct device *dev);
+
+	int (*online)(struct device *dev);
+	int (*offline)(struct device *dev);
+
+	int (*suspend)(struct device *dev, pm_message_t state);
+	int (*resume)(struct device *dev);
+
+	int (*num_vf)(struct device *dev);
+
+	int (*dma_configure)(struct device *dev);
+
+	const struct dev_pm_ops *pm;
+
+	const struct iommu_ops *iommu_ops;
+
+	struct subsys_private *p;
+	struct lock_class_key lock_key;
+
+	bool need_parent_lock;
+};
+
+extern int __must_check bus_register(struct bus_type *bus);
+
+extern void bus_unregister(struct bus_type *bus);
+
+extern int __must_check bus_rescan_devices(struct bus_type *bus);
+
+struct bus_attribute {
+	struct attribute	attr;
+	ssize_t (*show)(struct bus_type *bus, char *buf);
+	ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
+};
+
+#define BUS_ATTR_RW(_name) \
+	struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
+#define BUS_ATTR_RO(_name) \
+	struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
+#define BUS_ATTR_WO(_name) \
+	struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
+
+extern int __must_check bus_create_file(struct bus_type *,
+					struct bus_attribute *);
+extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+
+/* Generic device matching functions that all busses can use to match with */
+int device_match_name(struct device *dev, const void *name);
+int device_match_of_node(struct device *dev, const void *np);
+int device_match_fwnode(struct device *dev, const void *fwnode);
+int device_match_devt(struct device *dev, const void *pdevt);
+int device_match_acpi_dev(struct device *dev, const void *adev);
+int device_match_any(struct device *dev, const void *unused);
+
+/* iterator helpers for buses */
+struct subsys_dev_iter {
+	struct klist_iter		ki;
+	const struct device_type	*type;
+};
+void subsys_dev_iter_init(struct subsys_dev_iter *iter,
+			 struct bus_type *subsys,
+			 struct device *start,
+			 const struct device_type *type);
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
+
+int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
+		     int (*fn)(struct device *dev, void *data));
+struct device *bus_find_device(struct bus_type *bus, struct device *start,
+			       const void *data,
+			       int (*match)(struct device *dev, const void *data));
+/**
+ * bus_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @bus: bus type
+ * @start: Device to begin with
+ * @name: name of the device to match
+ */
+static inline struct device *bus_find_device_by_name(struct bus_type *bus,
+						     struct device *start,
+						     const char *name)
+{
+	return bus_find_device(bus, start, name, device_match_name);
+}
+
+/**
+ * bus_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @bus: bus type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *
+bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
+{
+	return bus_find_device(bus, NULL, np, device_match_of_node);
+}
+
+/**
+ * bus_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @bus: bus type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *
+bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode)
+{
+	return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * bus_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @bus: bus type
+ * @devt: device type of the device to match.
+ */
+static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
+						     dev_t devt)
+{
+	return bus_find_device(bus, NULL, &devt, device_match_devt);
+}
+
+/**
+ * bus_find_next_device - Find the next device after a given device in a
+ * given bus.
+ * @bus: bus type
+ * @cur: device to begin the search with.
+ */
+static inline struct device *
+bus_find_next_device(struct bus_type *bus,struct device *cur)
+{
+	return bus_find_device(bus, cur, NULL, device_match_any);
+}
+
+#ifdef CONFIG_ACPI
+struct acpi_device;
+
+/**
+ * bus_find_device_by_acpi_dev : device iterator for locating a particular device
+ * matching the ACPI COMPANION device.
+ * @bus: bus type
+ * @adev: ACPI COMPANION device to match.
+ */
+static inline struct device *
+bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev)
+{
+	return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev)
+{
+	return NULL;
+}
+#endif
+
+struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
+					struct device *hint);
+int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+		     void *data, int (*fn)(struct device_driver *, void *));
+void bus_sort_breadthfirst(struct bus_type *bus,
+			   int (*compare)(const struct device *a,
+					  const struct device *b));
+/*
+ * Bus notifiers: Get notified of addition/removal of devices
+ * and binding/unbinding of drivers to devices.
+ * In the long run, it should be a replacement for the platform
+ * notify hooks.
+ */
+struct notifier_block;
+
+extern int bus_register_notifier(struct bus_type *bus,
+				 struct notifier_block *nb);
+extern int bus_unregister_notifier(struct bus_type *bus,
+				   struct notifier_block *nb);
+
+/* All 4 notifers below get called with the target struct device *
+ * as an argument. Note that those functions are likely to be called
+ * with the device lock held in the core, so be careful.
+ */
+#define BUS_NOTIFY_ADD_DEVICE		0x00000001 /* device added */
+#define BUS_NOTIFY_DEL_DEVICE		0x00000002 /* device to be removed */
+#define BUS_NOTIFY_REMOVED_DEVICE	0x00000003 /* device removed */
+#define BUS_NOTIFY_BIND_DRIVER		0x00000004 /* driver about to be
+						      bound */
+#define BUS_NOTIFY_BOUND_DRIVER		0x00000005 /* driver bound to device */
+#define BUS_NOTIFY_UNBIND_DRIVER	0x00000006 /* driver about to be
+						      unbound */
+#define BUS_NOTIFY_UNBOUND_DRIVER	0x00000007 /* driver is unbound
+						      from the device */
+#define BUS_NOTIFY_DRIVER_NOT_BOUND	0x00000008 /* driver fails to be bound */
+
+extern struct kset *bus_get_kset(struct bus_type *bus);
+extern struct klist *bus_get_device_klist(struct bus_type *bus);
+
+#endif
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
new file mode 100644
index 0000000..e8d470c
--- /dev/null
+++ b/include/linux/device/class.h
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The class-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_CLASS_H_
+#define _DEVICE_CLASS_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+#include <linux/device/bus.h>
+
+struct device;
+struct fwnode_handle;
+
+/**
+ * struct class - device classes
+ * @name:	Name of the class.
+ * @owner:	The module owner.
+ * @class_groups: Default attributes of this class.
+ * @dev_groups:	Default attributes of the devices that belong to the class.
+ * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
+ * @dev_uevent:	Called when a device is added, removed from this class, or a
+ *		few other things that generate uevents to add the environment
+ *		variables.
+ * @devnode:	Callback to provide the devtmpfs.
+ * @class_release: Called to release this class.
+ * @dev_release: Called to release the device.
+ * @shutdown_pre: Called at shut-down time before driver shutdown.
+ * @ns_type:	Callbacks so sysfs can detemine namespaces.
+ * @namespace:	Namespace of the device belongs to this class.
+ * @get_ownership: Allows class to specify uid/gid of the sysfs directories
+ *		for the devices belonging to the class. Usually tied to
+ *		device's namespace.
+ * @pm:		The default device power management operations of this class.
+ * @p:		The private data of the driver core, no one other than the
+ *		driver core can touch this.
+ *
+ * A class is a higher-level view of a device that abstracts out low-level
+ * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
+ * at the class level, they are all simply disks. Classes allow user space
+ * to work with devices based on what they do, rather than how they are
+ * connected or how they work.
+ */
+struct class {
+	const char		*name;
+	struct module		*owner;
+
+	const struct attribute_group	**class_groups;
+	const struct attribute_group	**dev_groups;
+	struct kobject			*dev_kobj;
+
+	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
+	char *(*devnode)(struct device *dev, umode_t *mode);
+
+	void (*class_release)(struct class *class);
+	void (*dev_release)(struct device *dev);
+
+	int (*shutdown_pre)(struct device *dev);
+
+	const struct kobj_ns_type_operations *ns_type;
+	const void *(*namespace)(struct device *dev);
+
+	void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
+
+	const struct dev_pm_ops *pm;
+
+	struct subsys_private *p;
+};
+
+struct class_dev_iter {
+	struct klist_iter		ki;
+	const struct device_type	*type;
+};
+
+extern struct kobject *sysfs_dev_block_kobj;
+extern struct kobject *sysfs_dev_char_kobj;
+extern int __must_check __class_register(struct class *class,
+					 struct lock_class_key *key);
+extern void class_unregister(struct class *class);
+
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define class_register(class)			\
+({						\
+	static struct lock_class_key __key;	\
+	__class_register(class, &__key);	\
+})
+
+struct class_compat;
+struct class_compat *class_compat_register(const char *name);
+void class_compat_unregister(struct class_compat *cls);
+int class_compat_create_link(struct class_compat *cls, struct device *dev,
+			     struct device *device_link);
+void class_compat_remove_link(struct class_compat *cls, struct device *dev,
+			      struct device *device_link);
+
+extern void class_dev_iter_init(struct class_dev_iter *iter,
+				struct class *class,
+				struct device *start,
+				const struct device_type *type);
+extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
+extern void class_dev_iter_exit(struct class_dev_iter *iter);
+
+extern int class_for_each_device(struct class *class, struct device *start,
+				 void *data,
+				 int (*fn)(struct device *dev, void *data));
+extern struct device *class_find_device(struct class *class,
+					struct device *start, const void *data,
+					int (*match)(struct device *, const void *));
+
+/**
+ * class_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @class: class type
+ * @name: name of the device to match
+ */
+static inline struct device *class_find_device_by_name(struct class *class,
+						       const char *name)
+{
+	return class_find_device(class, NULL, name, device_match_name);
+}
+
+/**
+ * class_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @class: class type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *
+class_find_device_by_of_node(struct class *class, const struct device_node *np)
+{
+	return class_find_device(class, NULL, np, device_match_of_node);
+}
+
+/**
+ * class_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @class: class type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *
+class_find_device_by_fwnode(struct class *class,
+			    const struct fwnode_handle *fwnode)
+{
+	return class_find_device(class, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * class_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @class: class type
+ * @devt: device type of the device to match.
+ */
+static inline struct device *class_find_device_by_devt(struct class *class,
+						       dev_t devt)
+{
+	return class_find_device(class, NULL, &devt, device_match_devt);
+}
+
+#ifdef CONFIG_ACPI
+struct acpi_device;
+/**
+ * class_find_device_by_acpi_dev : device iterator for locating a particular
+ * device matching the ACPI_COMPANION device.
+ * @class: class type
+ * @adev: ACPI_COMPANION device to match.
+ */
+static inline struct device *
+class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev)
+{
+	return class_find_device(class, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+class_find_device_by_acpi_dev(struct class *class, const void *adev)
+{
+	return NULL;
+}
+#endif
+
+struct class_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct class *class, struct class_attribute *attr,
+			char *buf);
+	ssize_t (*store)(struct class *class, struct class_attribute *attr,
+			const char *buf, size_t count);
+};
+
+#define CLASS_ATTR_RW(_name) \
+	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
+#define CLASS_ATTR_RO(_name) \
+	struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+#define CLASS_ATTR_WO(_name) \
+	struct class_attribute class_attr_##_name = __ATTR_WO(_name)
+
+extern int __must_check class_create_file_ns(struct class *class,
+					     const struct class_attribute *attr,
+					     const void *ns);
+extern void class_remove_file_ns(struct class *class,
+				 const struct class_attribute *attr,
+				 const void *ns);
+
+static inline int __must_check class_create_file(struct class *class,
+					const struct class_attribute *attr)
+{
+	return class_create_file_ns(class, attr, NULL);
+}
+
+static inline void class_remove_file(struct class *class,
+				     const struct class_attribute *attr)
+{
+	return class_remove_file_ns(class, attr, NULL);
+}
+
+/* Simple class attribute that is just a static string */
+struct class_attribute_string {
+	struct class_attribute attr;
+	char *str;
+};
+
+/* Currently read-only only */
+#define _CLASS_ATTR_STRING(_name, _mode, _str) \
+	{ __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
+#define CLASS_ATTR_STRING(_name, _mode, _str) \
+	struct class_attribute_string class_attr_##_name = \
+		_CLASS_ATTR_STRING(_name, _mode, _str)
+
+extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
+                        char *buf);
+
+struct class_interface {
+	struct list_head	node;
+	struct class		*class;
+
+	int (*add_dev)		(struct device *, struct class_interface *);
+	void (*remove_dev)	(struct device *, struct class_interface *);
+};
+
+extern int __must_check class_interface_register(struct class_interface *);
+extern void class_interface_unregister(struct class_interface *);
+
+extern struct class * __must_check __class_create(struct module *owner,
+						  const char *name,
+						  struct lock_class_key *key);
+extern void class_destroy(struct class *cls);
+
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define class_create(owner, name)		\
+({						\
+	static struct lock_class_key __key;	\
+	__class_create(owner, name, &__key);	\
+})
+
+
+#endif	/* _DEVICE_CLASS_H_ */
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
new file mode 100644
index 0000000..1188260
--- /dev/null
+++ b/include/linux/device/driver.h
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The driver-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_DRIVER_H_
+#define _DEVICE_DRIVER_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+#include <linux/device/bus.h>
+
+/**
+ * enum probe_type - device driver probe type to try
+ *	Device drivers may opt in for special handling of their
+ *	respective probe routines. This tells the core what to
+ *	expect and prefer.
+ *
+ * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
+ *	whether probed synchronously or asynchronously.
+ * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
+ *	probing order is not essential for booting the system may
+ *	opt into executing their probes asynchronously.
+ * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
+ *	their probe routines to run synchronously with driver and
+ *	device registration (with the exception of -EPROBE_DEFER
+ *	handling - re-probing always ends up being done asynchronously).
+ *
+ * Note that the end goal is to switch the kernel to use asynchronous
+ * probing by default, so annotating drivers with
+ * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
+ * to speed up boot process while we are validating the rest of the
+ * drivers.
+ */
+enum probe_type {
+	PROBE_DEFAULT_STRATEGY,
+	PROBE_PREFER_ASYNCHRONOUS,
+	PROBE_FORCE_SYNCHRONOUS,
+};
+
+/**
+ * struct device_driver - The basic device driver structure
+ * @name:	Name of the device driver.
+ * @bus:	The bus which the device of this driver belongs to.
+ * @owner:	The module owner.
+ * @mod_name:	Used for built-in modules.
+ * @suppress_bind_attrs: Disables bind/unbind via sysfs.
+ * @probe_type:	Type of the probe (synchronous or asynchronous) to use.
+ * @of_match_table: The open firmware table.
+ * @acpi_match_table: The ACPI match table.
+ * @probe:	Called to query the existence of a specific device,
+ *		whether this driver can work with it, and bind the driver
+ *		to a specific device.
+ * @sync_state:	Called to sync device state to software state after all the
+ *		state tracking consumers linked to this device (present at
+ *		the time of late_initcall) have successfully bound to a
+ *		driver. If the device has no consumers, this function will
+ *		be called at late_initcall_sync level. If the device has
+ *		consumers that are never bound to a driver, this function
+ *		will never get called until they do.
+ * @remove:	Called when the device is removed from the system to
+ *		unbind a device from this driver.
+ * @shutdown:	Called at shut-down time to quiesce the device.
+ * @suspend:	Called to put the device to sleep mode. Usually to a
+ *		low power state.
+ * @resume:	Called to bring a device from sleep mode.
+ * @groups:	Default attributes that get created by the driver core
+ *		automatically.
+ * @dev_groups:	Additional attributes attached to device instance once the
+ *		it is bound to the driver.
+ * @pm:		Power management operations of the device which matched
+ *		this driver.
+ * @coredump:	Called when sysfs entry is written to. The device driver
+ *		is expected to call the dev_coredump API resulting in a
+ *		uevent.
+ * @p:		Driver core's private data, no one other than the driver
+ *		core can touch this.
+ *
+ * The device driver-model tracks all of the drivers known to the system.
+ * The main reason for this tracking is to enable the driver core to match
+ * up drivers with new devices. Once drivers are known objects within the
+ * system, however, a number of other things become possible. Device drivers
+ * can export information and configuration variables that are independent
+ * of any specific device.
+ */
+struct device_driver {
+	const char		*name;
+	struct bus_type		*bus;
+
+	struct module		*owner;
+	const char		*mod_name;	/* used for built-in modules */
+
+	bool suppress_bind_attrs;	/* disables bind/unbind via sysfs */
+	enum probe_type probe_type;
+
+	const struct of_device_id	*of_match_table;
+	const struct acpi_device_id	*acpi_match_table;
+
+	int (*probe) (struct device *dev);
+	void (*sync_state)(struct device *dev);
+	int (*remove) (struct device *dev);
+	void (*shutdown) (struct device *dev);
+	int (*suspend) (struct device *dev, pm_message_t state);
+	int (*resume) (struct device *dev);
+	const struct attribute_group **groups;
+	const struct attribute_group **dev_groups;
+
+	const struct dev_pm_ops *pm;
+	void (*coredump) (struct device *dev);
+
+	struct driver_private *p;
+};
+
+
+extern int __must_check driver_register(struct device_driver *drv);
+extern void driver_unregister(struct device_driver *drv);
+
+extern struct device_driver *driver_find(const char *name,
+					 struct bus_type *bus);
+extern int driver_probe_done(void);
+extern void wait_for_device_probe(void);
+
+/* sysfs interface for exporting driver attributes */
+
+struct driver_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct device_driver *driver, char *buf);
+	ssize_t (*store)(struct device_driver *driver, const char *buf,
+			 size_t count);
+};
+
+#define DRIVER_ATTR_RW(_name) \
+	struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
+#define DRIVER_ATTR_RO(_name) \
+	struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
+#define DRIVER_ATTR_WO(_name) \
+	struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
+
+extern int __must_check driver_create_file(struct device_driver *driver,
+					const struct driver_attribute *attr);
+extern void driver_remove_file(struct device_driver *driver,
+			       const struct driver_attribute *attr);
+
+extern int __must_check driver_for_each_device(struct device_driver *drv,
+					       struct device *start,
+					       void *data,
+					       int (*fn)(struct device *dev,
+							 void *));
+struct device *driver_find_device(struct device_driver *drv,
+				  struct device *start, const void *data,
+				  int (*match)(struct device *dev, const void *data));
+
+/**
+ * driver_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @drv: the driver we're iterating
+ * @name: name of the device to match
+ */
+static inline struct device *driver_find_device_by_name(struct device_driver *drv,
+							const char *name)
+{
+	return driver_find_device(drv, NULL, name, device_match_name);
+}
+
+/**
+ * driver_find_device_by_of_node- device iterator for locating a particular device
+ * by of_node pointer.
+ * @drv: the driver we're iterating
+ * @np: of_node pointer to match.
+ */
+static inline struct device *
+driver_find_device_by_of_node(struct device_driver *drv,
+			      const struct device_node *np)
+{
+	return driver_find_device(drv, NULL, np, device_match_of_node);
+}
+
+/**
+ * driver_find_device_by_fwnode- device iterator for locating a particular device
+ * by fwnode pointer.
+ * @drv: the driver we're iterating
+ * @fwnode: fwnode pointer to match.
+ */
+static inline struct device *
+driver_find_device_by_fwnode(struct device_driver *drv,
+			     const struct fwnode_handle *fwnode)
+{
+	return driver_find_device(drv, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * driver_find_device_by_devt- device iterator for locating a particular device
+ * by devt.
+ * @drv: the driver we're iterating
+ * @devt: devt pointer to match.
+ */
+static inline struct device *driver_find_device_by_devt(struct device_driver *drv,
+							dev_t devt)
+{
+	return driver_find_device(drv, NULL, &devt, device_match_devt);
+}
+
+static inline struct device *driver_find_next_device(struct device_driver *drv,
+						     struct device *start)
+{
+	return driver_find_device(drv, start, NULL, device_match_any);
+}
+
+#ifdef CONFIG_ACPI
+/**
+ * driver_find_device_by_acpi_dev : device iterator for locating a particular
+ * device matching the ACPI_COMPANION device.
+ * @drv: the driver we're iterating
+ * @adev: ACPI_COMPANION device to match.
+ */
+static inline struct device *
+driver_find_device_by_acpi_dev(struct device_driver *drv,
+			       const struct acpi_device *adev)
+{
+	return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
+{
+	return NULL;
+}
+#endif
+
+void driver_deferred_probe_add(struct device *dev);
+int driver_deferred_probe_check_state(struct device *dev);
+int driver_deferred_probe_check_state_continue(struct device *dev);
+void driver_init(void);
+
+/**
+ * module_driver() - Helper macro for drivers that don't do anything
+ * special in module init/exit. This eliminates a lot of boilerplate.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @__unregister: unregister function for this driver type
+ * @...: Additional arguments to be passed to __register and __unregister.
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define module_driver(__driver, __register, __unregister, ...) \
+static int __init __driver##_init(void) \
+{ \
+	return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+module_init(__driver##_init); \
+static void __exit __driver##_exit(void) \
+{ \
+	__unregister(&(__driver) , ##__VA_ARGS__); \
+} \
+module_exit(__driver##_exit);
+
+/**
+ * builtin_driver() - Helper macro for drivers that don't do anything
+ * special in init and have no exit. This eliminates some boilerplate.
+ * Each driver may only use this macro once, and calling it replaces
+ * device_initcall (or in some cases, the legacy __initcall).  This is
+ * meant to be a direct parallel of module_driver() above but without
+ * the __exit stuff that is not used for builtin cases.
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @...: Additional arguments to be passed to __register
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define builtin_driver(__driver, __register, ...) \
+static int __init __driver##_init(void) \
+{ \
+	return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+device_initcall(__driver##_init);
+
+#endif	/* _DEVICE_DRIVER_H_ */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index af73f83..abf5459 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -249,31 +249,6 @@ struct dma_buf_ops {
 	 */
 	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
 
-	/**
-	 * @map:
-	 *
-	 * Maps a page from the buffer into kernel address space. The page is
-	 * specified by offset into the buffer in PAGE_SIZE units.
-	 *
-	 * This callback is optional.
-	 *
-	 * Returns:
-	 *
-	 * Virtual address pointer where requested page can be accessed. NULL
-	 * on error or when this function is unimplemented by the exporter.
-	 */
-	void *(*map)(struct dma_buf *, unsigned long);
-
-	/**
-	 * @unmap:
-	 *
-	 * Unmaps a page from the buffer. Page offset and address pointer should
-	 * be the same as the one passed to and returned by matching call to map.
-	 *
-	 * This callback is optional.
-	 */
-	void (*unmap)(struct dma_buf *, unsigned long, void *);
-
 	void *(*vmap)(struct dma_buf *);
 	void (*vunmap)(struct dma_buf *, void *vaddr);
 };
@@ -464,8 +439,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 			     enum dma_data_direction dir);
 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
 			   enum dma_data_direction dir);
-void *dma_buf_kmap(struct dma_buf *, unsigned long);
-void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
 
 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
 		 unsigned long);
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
new file mode 100644
index 0000000..454e354
--- /dev/null
+++ b/include/linux/dma-heap.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps Allocation Infrastructure
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _DMA_HEAPS_H
+#define _DMA_HEAPS_H
+
+#include <linux/cdev.h>
+#include <linux/types.h>
+
+struct dma_heap;
+
+/**
+ * struct dma_heap_ops - ops to operate on a given heap
+ * @allocate:		allocate dmabuf and return fd
+ *
+ * allocate returns dmabuf fd  on success, -errno on error.
+ */
+struct dma_heap_ops {
+	int (*allocate)(struct dma_heap *heap,
+			unsigned long len,
+			unsigned long fd_flags,
+			unsigned long heap_flags);
+};
+
+/**
+ * struct dma_heap_export_info - information needed to export a new dmabuf heap
+ * @name:	used for debugging/device-node name
+ * @ops:	ops struct for this heap
+ * @priv:	heap exporter private data
+ *
+ * Information needed to export a new dmabuf heap.
+ */
+struct dma_heap_export_info {
+	const char *name;
+	const struct dma_heap_ops *ops;
+	void *priv;
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-heap driver data
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-heap data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap);
+
+/**
+ * dma_heap_add - adds a heap to dmabuf heaps
+ * @exp_info:		information needed to register this heap
+ */
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+
+#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index ffcc772..dc4fd8a 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -12,6 +12,8 @@
 #include <linux/fcntl.h>
 #include <linux/wait.h>
 #include <linux/err.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
 
 /*
  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -40,6 +42,13 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
 				  __u64 *cnt);
 
+DECLARE_PER_CPU(int, eventfd_wake_count);
+
+static inline bool eventfd_signal_count(void)
+{
+	return this_cpu_read(eventfd_wake_count);
+}
+
 #else /* CONFIG_EVENTFD */
 
 /*
@@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
 	return -ENOSYS;
 }
 
+static inline bool eventfd_signal_count(void)
+{
+	return false;
+}
+
 #endif
 
 #endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index bc6d79b..8f000fa 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -61,6 +61,15 @@ static inline void eventpoll_release(struct file *file)
 	eventpoll_release_file(file);
 }
 
+int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
+		 bool nonblock);
+
+/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
+static inline int ep_op_has_event(int op)
+{
+	return op != EPOLL_CTL_DEL;
+}
+
 #else
 
 static inline void eventpoll_init_file(struct file *file) {}
diff --git a/include/linux/export.h b/include/linux/export.h
index 6278414..fceb5e8 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -82,16 +82,29 @@ struct kernel_symbol {
 
 #else
 
-/* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL(sym, sec, ns)					\
-	extern typeof(sym) sym;						\
-	__CRC_SYMBOL(sym, sec);						\
-	static const char __kstrtab_##sym[]				\
-	__attribute__((section("__ksymtab_strings"), used, aligned(1)))	\
-	= #sym;								\
-	static const char __kstrtabns_##sym[]				\
-	__attribute__((section("__ksymtab_strings"), used, aligned(1)))	\
-	= ns;								\
+/*
+ * For every exported symbol, do the following:
+ *
+ * - If applicable, place a CRC entry in the __kcrctab section.
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ *   __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define ___EXPORT_SYMBOL(sym, sec, ns)						\
+	extern typeof(sym) sym;							\
+	extern const char __kstrtab_##sym[];					\
+	extern const char __kstrtabns_##sym[];					\
+	__CRC_SYMBOL(sym, sec);							\
+	asm("	.section \"__ksymtab_strings\",\"aMS\",%progbits,1	\n"	\
+	    "__kstrtab_" #sym ":					\n"	\
+	    "	.asciz 	\"" #sym "\"					\n"	\
+	    "__kstrtabns_" #sym ":					\n"	\
+	    "	.asciz 	\"" ns "\"					\n"	\
+	    "	.previous						\n");	\
 	__KSYMTAB_ENTRY(sym, sec)
 
 #endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 2bdf643..1b1d77e 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -170,7 +170,7 @@ struct extcon_dev;
  * Following APIs get the connected state of each external connector.
  * The 'id' argument indicates the defined external connector.
  */
-extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
+int extcon_get_state(struct extcon_dev *edev, unsigned int id);
 
 /*
  * Following APIs get the property of each external connector.
@@ -181,10 +181,10 @@ extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
  * for each external connector. They are used to get the capability of the
  * property of each external connector based on the id and property.
  */
-extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+int extcon_get_property(struct extcon_dev *edev, unsigned int id,
 				unsigned int prop,
 				union extcon_property_value *prop_val);
-extern int extcon_get_property_capability(struct extcon_dev *edev,
+int extcon_get_property_capability(struct extcon_dev *edev,
 				unsigned int id, unsigned int prop);
 
 /*
@@ -196,38 +196,38 @@ extern int extcon_get_property_capability(struct extcon_dev *edev,
  * extcon_register_notifier_all(*edev, *nb) : Register a notifier block
  *			for all supported external connectors of the extcon.
  */
-extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
+int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
 				struct notifier_block *nb);
-extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
+int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
 				struct notifier_block *nb);
-extern int devm_extcon_register_notifier(struct device *dev,
+int devm_extcon_register_notifier(struct device *dev,
 				struct extcon_dev *edev, unsigned int id,
 				struct notifier_block *nb);
-extern void devm_extcon_unregister_notifier(struct device *dev,
+void devm_extcon_unregister_notifier(struct device *dev,
 				struct extcon_dev *edev, unsigned int id,
 				struct notifier_block *nb);
 
-extern int extcon_register_notifier_all(struct extcon_dev *edev,
+int extcon_register_notifier_all(struct extcon_dev *edev,
 				struct notifier_block *nb);
-extern int extcon_unregister_notifier_all(struct extcon_dev *edev,
+int extcon_unregister_notifier_all(struct extcon_dev *edev,
 				struct notifier_block *nb);
-extern int devm_extcon_register_notifier_all(struct device *dev,
+int devm_extcon_register_notifier_all(struct device *dev,
 				struct extcon_dev *edev,
 				struct notifier_block *nb);
-extern void devm_extcon_unregister_notifier_all(struct device *dev,
+void devm_extcon_unregister_notifier_all(struct device *dev,
 				struct extcon_dev *edev,
 				struct notifier_block *nb);
 
 /*
  * Following APIs get the extcon_dev from devicetree or by through extcon name.
  */
-extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
-extern struct extcon_dev *extcon_find_edev_by_node(struct device_node *node);
-extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+struct extcon_dev *extcon_find_edev_by_node(struct device_node *node);
+struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
 						     int index);
 
 /* Following API get the name of extcon device. */
-extern const char *extcon_get_edev_name(struct extcon_dev *edev);
+const char *extcon_get_edev_name(struct extcon_dev *edev);
 
 #else /* CONFIG_EXTCON */
 static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 2847389..ac3f488 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -23,6 +23,7 @@
 
 #define NULL_ADDR		((block_t)0)	/* used as block_t addresses */
 #define NEW_ADDR		((block_t)-1)	/* used as block_t addresses */
+#define COMPRESS_ADDR		((block_t)-2)	/* used as compressed data flag */
 
 #define F2FS_BYTES_TO_BLK(bytes)	((bytes) >> F2FS_BLKSIZE_BITS)
 #define F2FS_BLK_TO_BYTES(blk)		((blk) << F2FS_BLKSIZE_BITS)
@@ -271,6 +272,10 @@ struct f2fs_inode {
 			__le32 i_inode_checksum;/* inode meta checksum */
 			__le64 i_crtime;	/* creation time */
 			__le32 i_crtime_nsec;	/* creation time in nano scale */
+			__le64 i_compr_blocks;	/* # of compressed blocks */
+			__u8 i_compress_algorithm;	/* compress algorithm */
+			__u8 i_log_cluster_size;	/* log of cluster size */
+			__le16 i_padding;		/* padding */
 			__le32 i_extra_end[0];	/* for attribute size calculation */
 		} __packed;
 		__le32 i_addr[DEF_ADDRS_PER_INODE];	/* Pointers to data blocks */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 8bf3d79..f3f0b97 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -51,8 +51,6 @@ struct space_resv_32 {
 #define FS_IOC_UNRESVSP64_32	_IOW ('X', 43, struct space_resv_32)
 #define FS_IOC_ZERO_RANGE_32	_IOW ('X', 57, struct space_resv_32)
 
-int compat_ioctl_preallocate(struct file *, int, struct space_resv_32 __user *);
-
 #endif
 
 #endif /* _FALLOC_H_ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 41e0069..3b4b2f0 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -472,7 +472,7 @@ struct fb_info {
 	struct fb_deferred_io *fbdefio;
 #endif
 
-	struct fb_ops *fbops;
+	const struct fb_ops *fbops;
 	struct device *device;		/* This is the parent */
 	struct device *dev;		/* This is this fb device */
 	int class_flag;                    /* private sysfs flags */
@@ -606,7 +606,6 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
 /* drivers/video/fbmem.c */
 extern int register_framebuffer(struct fb_info *fb_info);
 extern void unregister_framebuffer(struct fb_info *fb_info);
-extern void unlink_framebuffer(struct fb_info *fb_info);
 extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
 					       const char *name);
 extern int remove_conflicting_framebuffers(struct apertures_struct *a,
@@ -626,6 +625,7 @@ extern int fb_new_modelist(struct fb_info *info);
 extern struct fb_info *registered_fb[FB_MAX];
 extern int num_registered_fb;
 extern bool fb_center_logo;
+extern int fb_logo_count;
 extern struct class *fb_class;
 
 #define for_each_registered_fb(i)		\
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index d019df9..7bcdcf4 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -2,15 +2,29 @@
 #ifndef _LINUX_FCNTL_H
 #define _LINUX_FCNTL_H
 
+#include <linux/stat.h>
 #include <uapi/linux/fcntl.h>
 
-/* list of all valid flags for the open/openat flags argument: */
+/* List of all valid flags for the open/openat flags argument: */
 #define VALID_OPEN_FLAGS \
 	(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
 	 O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
 	 FASYNC	| O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
 	 O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
 
+/* List of all valid flags for the how->upgrade_mask argument: */
+#define VALID_UPGRADE_FLAGS \
+	(UPGRADE_NOWRITE | UPGRADE_NOREAD)
+
+/* List of all valid flags for the how->resolve argument: */
+#define VALID_RESOLVE_FLAGS \
+	(RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
+	 RESOLVE_BENEATH | RESOLVE_IN_ROOT)
+
+/* List of all open_how "versions". */
+#define OPEN_HOW_SIZE_VER0	24 /* sizeof first published struct */
+#define OPEN_HOW_SIZE_LATEST	OPEN_HOW_SIZE_VER0
+
 #ifndef force_o_largefile
 #define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T))
 #endif
diff --git a/include/linux/file.h b/include/linux/file.h
index 3fcddff..c6c7b24 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -16,6 +16,7 @@ extern void fput(struct file *);
 extern void fput_many(struct file *, unsigned int);
 
 struct file_operations;
+struct task_struct;
 struct vfsmount;
 struct dentry;
 struct inode;
@@ -47,6 +48,7 @@ static inline void fdput(struct fd fd)
 extern struct file *fget(unsigned int fd);
 extern struct file *fget_many(unsigned int fd, unsigned int refs);
 extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
 extern unsigned long __fdget(unsigned int fd);
 extern unsigned long __fdget_raw(unsigned int fd);
 extern unsigned long __fdget_pos(unsigned int fd);
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index e41ad9e..2cd12eb 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -48,6 +48,10 @@
 #define	ZYNQMP_PM_CAPABILITY_WAKEUP	0x4U
 #define	ZYNQMP_PM_CAPABILITY_UNUSABLE	0x8U
 
+/* Feature check status */
+#define PM_FEATURE_INVALID		-1
+#define PM_FEATURE_UNCHECKED		0
+
 /*
  * Firmware FPGA Manager flags
  * XILINX_ZYNQMP_PM_FPGA_FULL:	FPGA full reconfiguration
@@ -78,17 +82,21 @@ enum pm_api_id {
 	PM_CLOCK_GETRATE,
 	PM_CLOCK_SETPARENT,
 	PM_CLOCK_GETPARENT,
+	PM_FEATURE_CHECK = 63,
+	PM_API_MAX,
 };
 
 /* PMU-FW return status codes */
 enum pm_ret_status {
 	XST_PM_SUCCESS = 0,
+	XST_PM_NO_FEATURE = 19,
 	XST_PM_INTERNAL = 2000,
 	XST_PM_CONFLICT,
 	XST_PM_NO_ACCESS,
 	XST_PM_INVALID_NODE,
 	XST_PM_DOUBLE_REQ,
 	XST_PM_ABORT_SUSPEND,
+	XST_PM_MULT_USER = 2008,
 };
 
 enum pm_ioctl_id {
@@ -107,6 +115,7 @@ enum pm_query_id {
 	PM_QID_CLOCK_GET_PARENTS,
 	PM_QID_CLOCK_GET_ATTRIBUTES,
 	PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+	PM_QID_CLOCK_GET_MAX_DIVISOR,
 };
 
 enum zynqmp_pm_reset_action {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index dddfcbb..3cd4fe6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -67,7 +67,7 @@ struct fscrypt_operations;
 struct fsverity_info;
 struct fsverity_operations;
 struct fs_context;
-struct fs_parameter_description;
+struct fs_parameter_spec;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -1575,7 +1575,6 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
 	inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
 }
 
-extern struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran);
 extern struct timespec64 current_time(struct inode *inode);
 
 /*
@@ -2078,6 +2077,18 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 	};
 }
 
+static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+			       struct file *filp)
+{
+	*kiocb = (struct kiocb) {
+		.ki_filp = filp,
+		.ki_flags = kiocb_src->ki_flags,
+		.ki_hint = kiocb_src->ki_hint,
+		.ki_ioprio = kiocb_src->ki_ioprio,
+		.ki_pos = kiocb_src->ki_pos,
+	};
+}
+
 /*
  * Inode state bits.  Protected by inode->i_lock
  *
@@ -2224,7 +2235,7 @@ struct file_system_type {
 #define FS_DISALLOW_NOTIFY_PERM	16	/* Disable fanotify permission events */
 #define FS_RENAME_DOES_D_MOVE	32768	/* FS will handle d_move() during rename() internally. */
 	int (*init_fs_context)(struct fs_context *);
-	const struct fs_parameter_description *parameters;
+	const struct fs_parameter_spec *parameters;
 	struct dentry *(*mount) (struct file_system_type *, int,
 		       const char *, void *);
 	void (*kill_sb) (struct super_block *);
@@ -2552,10 +2563,6 @@ extern int finish_open(struct file *file, struct dentry *dentry,
 			int (*open)(struct inode *, struct file *));
 extern int finish_no_open(struct file *file, struct dentry *dentry);
 
-/* fs/ioctl.c */
-
-extern int ioctl_preallocate(struct file *filp, int mode, void __user *argp);
-
 /* fs/dcache.c */
 extern void __init vfs_caches_init_early(void);
 extern void __init vfs_caches_init(void);
@@ -2741,7 +2748,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
 
 extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
 				  loff_t lend);
-extern int filemap_write_and_wait(struct address_space *mapping);
 extern int filemap_write_and_wait_range(struct address_space *mapping,
 				        loff_t lstart, loff_t lend);
 extern int __filemap_fdatawrite_range(struct address_space *mapping,
@@ -2751,6 +2757,11 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
 extern int filemap_check_errors(struct address_space *mapping);
 extern void __filemap_set_wb_err(struct address_space *mapping, int err);
 
+static inline int filemap_write_and_wait(struct address_space *mapping)
+{
+	return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
+}
+
 extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
 						loff_t lend);
 extern int __must_check file_check_and_advance_wb_err(struct file *file);
@@ -2865,9 +2876,16 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
 
 extern void emergency_sync(void);
 extern void emergency_remount(void);
+
 #ifdef CONFIG_BLOCK
-extern sector_t bmap(struct inode *, sector_t);
+extern int bmap(struct inode *inode, sector_t *block);
+#else
+static inline int bmap(struct inode *inode,  sector_t *block)
+{
+	return -EINVAL;
+}
 #endif
+
 extern int notify_change(struct dentry *, struct iattr *, struct inode **);
 extern int inode_permission(struct inode *, int);
 extern int generic_permission(struct inode *, int);
@@ -3108,6 +3126,10 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
 		rwf_t flags);
 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
 		rwf_t flags);
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+			   struct iov_iter *iter);
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+			    struct iov_iter *iter);
 
 /* fs/block_dev.c */
 extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
@@ -3303,6 +3325,8 @@ extern int simple_unlink(struct inode *, struct dentry *);
 extern int simple_rmdir(struct inode *, struct dentry *);
 extern int simple_rename(struct inode *, struct dentry *,
 			 struct inode *, struct dentry *, unsigned int);
+extern void simple_recursive_removal(struct dentry *,
+                              void (*callback)(struct dentry *));
 extern int noop_fsync(struct file *, loff_t, loff_t, int);
 extern int noop_set_page_dirty(struct page *page);
 extern void noop_invalidatepage(struct page *page, unsigned int offset,
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index e5c14e2..e6c3e4c 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -54,7 +54,6 @@ enum fs_value_type {
 	fs_value_is_string,		/* Value is a string */
 	fs_value_is_blob,		/* Value is a binary blob */
 	fs_value_is_filename,		/* Value is a filename* + dirfd */
-	fs_value_is_filename_empty,	/* Value is a filename* + dirfd + AT_EMPTY_PATH */
 	fs_value_is_file,		/* Value is a file* */
 };
 
@@ -74,6 +73,11 @@ struct fs_parameter {
 	int	dirfd;
 };
 
+struct p_log {
+	const char *prefix;
+	struct fc_log *log;
+};
+
 /*
  * Filesystem context for holding the parameters used in the creation or
  * reconfiguration of a superblock.
@@ -93,7 +97,7 @@ struct fs_context {
 	struct user_namespace	*user_ns;	/* The user namespace for this mount */
 	struct net		*net_ns;	/* The network namespace for this mount */
 	const struct cred	*cred;		/* The mounter's credentials */
-	struct fc_log		*log;		/* Logging buffer */
+	struct p_log		log;		/* Logging buffer */
 	const char		*source;	/* The source name (eg. dev path) */
 	void			*security;	/* Linux S&M options */
 	void			*s_fs_info;	/* Proposed s_fs_info */
@@ -182,9 +186,13 @@ struct fc_log {
 	char		*buffer[8];
 };
 
-extern __attribute__((format(printf, 2, 3)))
-void logfc(struct fs_context *fc, const char *fmt, ...);
+extern __attribute__((format(printf, 4, 5)))
+void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...);
 
+#define __logfc(fc, l, fmt, ...) logfc((fc)->log.log, NULL, \
+					l, fmt, ## __VA_ARGS__)
+#define __plog(p, l, fmt, ...) logfc((p)->log, (p)->prefix, \
+					l, fmt, ## __VA_ARGS__)
 /**
  * infof - Store supplementary informational message
  * @fc: The context in which to log the informational message
@@ -193,7 +201,9 @@ void logfc(struct fs_context *fc, const char *fmt, ...);
  * Store the supplementary informational message for the process if the process
  * has enabled the facility.
  */
-#define infof(fc, fmt, ...) ({ logfc(fc, "i "fmt, ## __VA_ARGS__); })
+#define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__)
+#define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__)
+#define infofc(p, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
 
 /**
  * warnf - Store supplementary warning message
@@ -203,7 +213,9 @@ void logfc(struct fs_context *fc, const char *fmt, ...);
  * Store the supplementary warning message for the process if the process has
  * enabled the facility.
  */
-#define warnf(fc, fmt, ...) ({ logfc(fc, "w "fmt, ## __VA_ARGS__); })
+#define warnf(fc, fmt, ...) __logfc(fc, 'w', fmt, ## __VA_ARGS__)
+#define warn_plog(p, fmt, ...) __plog(p, 'w', fmt, ## __VA_ARGS__)
+#define warnfc(fc, fmt, ...) __plog((&(fc)->log), 'w', fmt, ## __VA_ARGS__)
 
 /**
  * errorf - Store supplementary error message
@@ -213,7 +225,9 @@ void logfc(struct fs_context *fc, const char *fmt, ...);
  * Store the supplementary error message for the process if the process has
  * enabled the facility.
  */
-#define errorf(fc, fmt, ...) ({ logfc(fc, "e "fmt, ## __VA_ARGS__); })
+#define errorf(fc, fmt, ...) __logfc(fc, 'e', fmt, ## __VA_ARGS__)
+#define error_plog(p, fmt, ...) __plog(p, 'e', fmt, ## __VA_ARGS__)
+#define errorfc(fc, fmt, ...) __plog((&(fc)->log), 'e', fmt, ## __VA_ARGS__)
 
 /**
  * invalf - Store supplementary invalid argument error message
@@ -223,6 +237,8 @@ void logfc(struct fs_context *fc, const char *fmt, ...);
  * Store the supplementary error message for the process if the process has
  * enabled the facility and return -EINVAL.
  */
-#define invalf(fc, fmt, ...) ({	errorf(fc, fmt, ## __VA_ARGS__); -EINVAL; })
+#define invalf(fc, fmt, ...) (errorf(fc, fmt, ## __VA_ARGS__), -EINVAL)
+#define inval_plog(p, fmt, ...) (error_plog(p, fmt, ## __VA_ARGS__), -EINVAL)
+#define invalfc(fc, fmt, ...) (errorfc(fc, fmt, ## __VA_ARGS__), -EINVAL)
 
 #endif /* _LINUX_FS_CONTEXT_H */
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index dee140d..2eab6d5 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -17,26 +17,18 @@ struct constant_table {
 	int		value;
 };
 
+struct fs_parameter_spec;
+struct fs_parse_result;
+typedef int fs_param_type(struct p_log *,
+			  const struct fs_parameter_spec *,
+			  struct fs_parameter *,
+			  struct fs_parse_result *);
 /*
  * The type of parameter expected.
  */
-enum fs_parameter_type {
-	__fs_param_wasnt_defined,
-	fs_param_is_flag,
-	fs_param_is_bool,
-	fs_param_is_u32,
-	fs_param_is_u32_octal,
-	fs_param_is_u32_hex,
-	fs_param_is_s32,
-	fs_param_is_u64,
-	fs_param_is_enum,
-	fs_param_is_string,
-	fs_param_is_blob,
-	fs_param_is_blockdev,
-	fs_param_is_path,
-	fs_param_is_fd,
-	nr__fs_parameter_type,
-};
+fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
+	fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
+	fs_param_is_path, fs_param_is_fd;
 
 /*
  * Specification of the type of value a parameter wants.
@@ -46,25 +38,13 @@ enum fs_parameter_type {
  */
 struct fs_parameter_spec {
 	const char		*name;
+	fs_param_type		*type;	/* The desired parameter type */
 	u8			opt;	/* Option number (returned by fs_parse()) */
-	enum fs_parameter_type	type:8;	/* The desired parameter type */
 	unsigned short		flags;
-#define fs_param_v_optional	0x0001	/* The value is optional */
 #define fs_param_neg_with_no	0x0002	/* "noxxx" is negative param */
 #define fs_param_neg_with_empty	0x0004	/* "xxx=" is negative param */
 #define fs_param_deprecated	0x0008	/* The param is deprecated */
-};
-
-struct fs_parameter_enum {
-	u8		opt;		/* Option number (as fs_parameter_spec::opt) */
-	char		name[14];
-	u8		value;
-};
-
-struct fs_parameter_description {
-	const char	name[16];		/* Name for logging purposes */
-	const struct fs_parameter_spec *specs;	/* List of param specifications */
-	const struct fs_parameter_enum *enums;	/* Enum values */
+	const void		*data;
 };
 
 /*
@@ -72,7 +52,6 @@ struct fs_parameter_description {
  */
 struct fs_parse_result {
 	bool			negated;	/* T if param was "noxxx" */
-	bool			has_value;	/* T if value supplied to param */
 	union {
 		bool		boolean;	/* For spec_bool */
 		int		int_32;		/* For spec_s32/spec_enum */
@@ -81,28 +60,37 @@ struct fs_parse_result {
 	};
 };
 
-extern int fs_parse(struct fs_context *fc,
-		    const struct fs_parameter_description *desc,
+extern int __fs_parse(struct p_log *log,
+		    const struct fs_parameter_spec *desc,
 		    struct fs_parameter *value,
 		    struct fs_parse_result *result);
+
+static inline int fs_parse(struct fs_context *fc,
+	     const struct fs_parameter_spec *desc,
+	     struct fs_parameter *param,
+	     struct fs_parse_result *result)
+{
+	return __fs_parse(&fc->log, desc, param, result);
+}
+
 extern int fs_lookup_param(struct fs_context *fc,
 			   struct fs_parameter *param,
 			   bool want_bdev,
 			   struct path *_path);
 
-extern int __lookup_constant(const struct constant_table tbl[], size_t tbl_size,
-			     const char *name, int not_found);
-#define lookup_constant(t, n, nf) __lookup_constant(t, ARRAY_SIZE(t), (n), (nf))
+extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
 
 #ifdef CONFIG_VALIDATE_FS_PARSER
 extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
 				    int low, int high, int special);
-extern bool fs_validate_description(const struct fs_parameter_description *desc);
+extern bool fs_validate_description(const char *name,
+				    const struct fs_parameter_spec *desc);
 #else
 static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
 					   int low, int high, int special)
 { return true; }
-static inline bool fs_validate_description(const struct fs_parameter_description *desc)
+static inline bool fs_validate_description(const char *name,
+					   const struct fs_parameter_spec *desc)
 { return true; }
 #endif
 
@@ -115,33 +103,32 @@ static inline bool fs_validate_description(const struct fs_parameter_description
  * work, but any such case is probably a sign that new helper is needed.
  * Helpers will remain stable; low-level implementation may change.
  */
-#define __fsparam(TYPE, NAME, OPT, FLAGS) \
+#define __fsparam(TYPE, NAME, OPT, FLAGS, DATA) \
 	{ \
 		.name = NAME, \
 		.opt = OPT, \
 		.type = TYPE, \
-		.flags = FLAGS \
+		.flags = FLAGS, \
+		.data = DATA \
 	}
 
-#define fsparam_flag(NAME, OPT)	__fsparam(fs_param_is_flag, NAME, OPT, 0)
+#define fsparam_flag(NAME, OPT)	__fsparam(NULL, NAME, OPT, 0, NULL)
 #define fsparam_flag_no(NAME, OPT) \
-				__fsparam(fs_param_is_flag, NAME, OPT, \
-					    fs_param_neg_with_no)
-#define fsparam_bool(NAME, OPT)	__fsparam(fs_param_is_bool, NAME, OPT, 0)
-#define fsparam_u32(NAME, OPT)	__fsparam(fs_param_is_u32, NAME, OPT, 0)
+			__fsparam(NULL, NAME, OPT, fs_param_neg_with_no, NULL)
+#define fsparam_bool(NAME, OPT)	__fsparam(fs_param_is_bool, NAME, OPT, 0, NULL)
+#define fsparam_u32(NAME, OPT)	__fsparam(fs_param_is_u32, NAME, OPT, 0, NULL)
 #define fsparam_u32oct(NAME, OPT) \
-				__fsparam(fs_param_is_u32_octal, NAME, OPT, 0)
+			__fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
 #define fsparam_u32hex(NAME, OPT) \
-				__fsparam(fs_param_is_u32_hex, NAME, OPT, 0)
-#define fsparam_s32(NAME, OPT)	__fsparam(fs_param_is_s32, NAME, OPT, 0)
-#define fsparam_u64(NAME, OPT)	__fsparam(fs_param_is_u64, NAME, OPT, 0)
-#define fsparam_enum(NAME, OPT)	__fsparam(fs_param_is_enum, NAME, OPT, 0)
+			__fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *16))
+#define fsparam_s32(NAME, OPT)	__fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
+#define fsparam_u64(NAME, OPT)	__fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
+#define fsparam_enum(NAME, OPT, array)	__fsparam(fs_param_is_enum, NAME, OPT, 0, array)
 #define fsparam_string(NAME, OPT) \
-				__fsparam(fs_param_is_string, NAME, OPT, 0)
-#define fsparam_blob(NAME, OPT)	__fsparam(fs_param_is_blob, NAME, OPT, 0)
-#define fsparam_bdev(NAME, OPT)	__fsparam(fs_param_is_blockdev, NAME, OPT, 0)
-#define fsparam_path(NAME, OPT)	__fsparam(fs_param_is_path, NAME, OPT, 0)
-#define fsparam_fd(NAME, OPT)	__fsparam(fs_param_is_fd, NAME, OPT, 0)
-
+				__fsparam(fs_param_is_string, NAME, OPT, 0, NULL)
+#define fsparam_blob(NAME, OPT)	__fsparam(fs_param_is_blob, NAME, OPT, 0, NULL)
+#define fsparam_bdev(NAME, OPT)	__fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
+#define fsparam_path(NAME, OPT)	__fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
+#define fsparam_fd(NAME, OPT)	__fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
 
 #endif /* _LINUX_FS_PARSER_H */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index e2480ef..6ef05bc 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -94,16 +94,15 @@ struct gpio_irq_chip {
 				     unsigned int *parent_type);
 
 	/**
-	 * @populate_parent_fwspec:
+	 * @populate_parent_alloc_arg :
 	 *
-	 * This optional callback populates the &struct irq_fwspec for the
-	 * parent's IRQ domain. If this is not specified, then
+	 * This optional callback allocates and populates the specific struct
+	 * for the parent's IRQ domain. If this is not specified, then
 	 * &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell
 	 * variant named &gpiochip_populate_parent_fwspec_fourcell is also
 	 * available.
 	 */
-	void (*populate_parent_fwspec)(struct gpio_chip *chip,
-				       struct irq_fwspec *fwspec,
+	void *(*populate_parent_alloc_arg)(struct gpio_chip *chip,
 				       unsigned int parent_hwirq,
 				       unsigned int parent_type);
 
@@ -537,29 +536,27 @@ struct bgpio_pdata {
 
 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 
-void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
-					     struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
 					     unsigned int parent_hwirq,
 					     unsigned int parent_type);
-void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
-					      struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
 					      unsigned int parent_hwirq,
 					      unsigned int parent_type);
 
 #else
 
-static inline void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
-						    struct irq_fwspec *fwspec,
+static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
 						    unsigned int parent_hwirq,
 						    unsigned int parent_type)
 {
+	return NULL;
 }
 
-static inline void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
-						     struct irq_fwspec *fwspec,
+static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
 						     unsigned int parent_hwirq,
 						     unsigned int parent_type)
 {
+	return NULL;
 }
 
 #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
@@ -585,11 +582,6 @@ int gpiochip_irq_domain_activate(struct irq_domain *domain,
 void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
 				    struct irq_data *data);
 
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
-		struct irq_chip *irqchip,
-		unsigned int parent_irq,
-		irq_flow_handler_t parent_handler);
-
 void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
 		struct irq_chip *irqchip,
 		unsigned int parent_irq);
@@ -715,7 +707,8 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip)
 
 #endif /* CONFIG_PINCTRL */
 
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+					    unsigned int hwnum,
 					    const char *label,
 					    enum gpio_lookup_flags lflags,
 					    enum gpiod_flags dflags);
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 6f8d772..62d216f 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -24,16 +24,20 @@ struct iommu_group;
  * struct host1x_client_ops - host1x client operations
  * @init: host1x client initialization code
  * @exit: host1x client tear down code
+ * @suspend: host1x client suspend code
+ * @resume: host1x client resume code
  */
 struct host1x_client_ops {
 	int (*init)(struct host1x_client *client);
 	int (*exit)(struct host1x_client *client);
+	int (*suspend)(struct host1x_client *client);
+	int (*resume)(struct host1x_client *client);
 };
 
 /**
  * struct host1x_client - host1x client structure
  * @list: list node for the host1x client
- * @parent: pointer to struct device representing the host1x controller
+ * @host: pointer to struct device representing the host1x controller
  * @dev: pointer to struct device backing this host1x client
  * @group: IOMMU group that this client is a member of
  * @ops: host1x client operations
@@ -44,7 +48,7 @@ struct host1x_client_ops {
  */
 struct host1x_client {
 	struct list_head list;
-	struct device *parent;
+	struct device *host;
 	struct device *dev;
 	struct iommu_group *group;
 
@@ -55,6 +59,10 @@ struct host1x_client {
 
 	struct host1x_syncpt **syncpts;
 	unsigned int num_syncpts;
+
+	struct host1x_client *parent;
+	unsigned int usecount;
+	struct mutex lock;
 };
 
 /*
@@ -72,8 +80,6 @@ struct host1x_bo_ops {
 	void (*unpin)(struct device *dev, struct sg_table *sgt);
 	void *(*mmap)(struct host1x_bo *bo);
 	void (*munmap)(struct host1x_bo *bo, void *addr);
-	void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
-	void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
 };
 
 struct host1x_bo {
@@ -119,17 +125,6 @@ static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
 	bo->ops->munmap(bo, addr);
 }
 
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
-	return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
-				    unsigned int pagenum, void *addr)
-{
-	bo->ops->kunmap(bo, pagenum, addr);
-}
-
 /*
  * host1x syncpoints
  */
@@ -322,6 +317,9 @@ int host1x_device_exit(struct host1x_device *device);
 int host1x_client_register(struct host1x_client *client);
 int host1x_client_unregister(struct host1x_client *client);
 
+int host1x_client_suspend(struct host1x_client *client);
+int host1x_client_resume(struct host1x_client *client);
+
 struct tegra_mipi_device;
 
 struct tegra_mipi_device *tegra_mipi_request(struct device *device);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 0b84e13..5aca3d1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -160,6 +160,7 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
 
 extern void prep_transhuge_page(struct page *page);
 extern void free_transhuge_page(struct page *page);
+bool is_transparent_hugepage(struct page *page);
 
 bool can_split_huge_page(struct page *page, int *pextra_pins);
 int split_huge_page_to_list(struct page *page, struct list_head *list);
@@ -308,6 +309,11 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
 
 static inline void prep_transhuge_page(struct page *page) {}
 
+static inline bool is_transparent_hugepage(struct page *page)
+{
+	return false;
+}
+
 #define transparent_hugepage_flags 0UL
 
 #define thp_get_unmapped_area	NULL
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 26f3aee..692c89c 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -425,6 +425,8 @@ enum vmbus_channel_message_type {
 	CHANNELMSG_19				= 19,
 	CHANNELMSG_20				= 20,
 	CHANNELMSG_TL_CONNECT_REQUEST		= 21,
+	CHANNELMSG_22				= 22,
+	CHANNELMSG_TL_CONNECT_RESULT		= 23,
 	CHANNELMSG_COUNT
 };
 
@@ -1433,6 +1435,8 @@ struct hv_util_service {
 	void (*util_cb)(void *);
 	int (*util_init)(struct hv_util_service *);
 	void (*util_deinit)(void);
+	int (*util_pre_suspend)(void);
+	int (*util_pre_resume)(void);
 };
 
 struct vmbuspipe_hdr {
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 582ef05..f834687 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -50,8 +50,8 @@ struct property_entry;
  * transmit an arbitrary number of messages without interruption.
  * @count must be be less than 64k since msg.len is u16.
  */
-extern int i2c_transfer_buffer_flags(const struct i2c_client *client,
-				     char *buf, int count, u16 flags);
+int i2c_transfer_buffer_flags(const struct i2c_client *client,
+			      char *buf, int count, u16 flags);
 
 /**
  * i2c_master_recv - issue a single I2C message in master receive mode
@@ -115,11 +115,9 @@ static inline int i2c_master_send_dmasafe(const struct i2c_client *client,
 
 /* Transfer num messages.
  */
-extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
-			int num);
+int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
 /* Unlocked flavor */
-extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
-			  int num);
+int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
 
 /* This is the very generalized SMBus access routine. You probably do not
    want to use this, though; one of the functions below may be much easier,
@@ -138,16 +136,14 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
 /* Now follow the 'nice' access routines. These also document the calling
    conventions of i2c_smbus_xfer. */
 
-extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
-extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
-extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
-				    u8 command);
-extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
-				     u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
-				    u8 command);
-extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
-				     u8 command, u16 value);
+s32 i2c_smbus_read_byte(const struct i2c_client *client);
+s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
+s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command);
+s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
+			      u8 command, u8 value);
+s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command);
+s32 i2c_smbus_write_word_data(const struct i2c_client *client,
+			      u8 command, u16 value);
 
 static inline s32
 i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command)
@@ -165,19 +161,18 @@ i2c_smbus_write_word_swapped(const struct i2c_client *client,
 }
 
 /* Returns the number of read bytes */
-extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
-				     u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
-				      u8 command, u8 length, const u8 *values);
+s32 i2c_smbus_read_block_data(const struct i2c_client *client,
+			      u8 command, u8 *values);
+s32 i2c_smbus_write_block_data(const struct i2c_client *client,
+			       u8 command, u8 length, const u8 *values);
 /* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
-					 u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
-					  u8 command, u8 length,
-					  const u8 *values);
-extern s32
-i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
-					  u8 command, u8 length, u8 *values);
+s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
+				  u8 command, u8 length, u8 *values);
+s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
+				   u8 command, u8 length, const u8 *values);
+s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+					      u8 command, u8 length,
+					      u8 *values);
 int i2c_get_device_id(const struct i2c_client *client,
 		      struct i2c_device_identity *id);
 #endif /* I2C */
@@ -337,10 +332,10 @@ struct i2c_client {
 };
 #define to_i2c_client(d) container_of(d, struct i2c_client, dev)
 
-extern struct i2c_client *i2c_verify_client(struct device *dev);
-extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
-extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
-					const struct i2c_client *client);
+struct i2c_client *i2c_verify_client(struct device *dev);
+struct i2c_adapter *i2c_verify_adapter(struct device *dev);
+const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+					 const struct i2c_client *client);
 
 static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
 {
@@ -369,9 +364,9 @@ enum i2c_slave_event {
 	I2C_SLAVE_STOP,
 };
 
-extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
-extern int i2c_slave_unregister(struct i2c_client *client);
-extern bool i2c_detect_slave_mode(struct device *dev);
+int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
+int i2c_slave_unregister(struct i2c_client *client);
+bool i2c_detect_slave_mode(struct device *dev);
 
 static inline int i2c_slave_event(struct i2c_client *client,
 				  enum i2c_slave_event event, u8 *val)
@@ -440,10 +435,10 @@ struct i2c_board_info {
  * with integrated I2C, a config eeprom, sensors, and a codec that's
  * used in conjunction with the primary hardware.
  */
-extern struct i2c_client *
+struct i2c_client *
 i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
 
-extern struct i2c_client *
+struct i2c_client *
 i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
 
 /* If you don't know the exact address of an I2C device, use this variant
@@ -452,33 +447,33 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
  * it must return 1 on successful probe, 0 otherwise. If it is not provided,
  * a default probing method is used.
  */
-extern struct i2c_client *
+struct i2c_client *
 i2c_new_scanned_device(struct i2c_adapter *adap,
 		       struct i2c_board_info *info,
 		       unsigned short const *addr_list,
 		       int (*probe)(struct i2c_adapter *adap, unsigned short addr));
 
-extern struct i2c_client *
+struct i2c_client *
 i2c_new_probed_device(struct i2c_adapter *adap,
 		       struct i2c_board_info *info,
 		       unsigned short const *addr_list,
 		       int (*probe)(struct i2c_adapter *adap, unsigned short addr));
 
 /* Common custom probe functions */
-extern int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
+int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
 
-extern struct i2c_client *
+struct i2c_client *
 i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address);
 
-extern struct i2c_client *
+struct i2c_client *
 devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address);
 
-extern struct i2c_client *
+struct i2c_client *
 i2c_new_ancillary_device(struct i2c_client *client,
-				const char *name,
-				u16 default_addr);
+			 const char *name,
+			 u16 default_addr);
 
-extern void i2c_unregister_device(struct i2c_client *client);
+void i2c_unregister_device(struct i2c_client *client);
 #endif /* I2C */
 
 /* Mainboard arch_initcall() code should register all its I2C devices.
@@ -486,7 +481,7 @@ extern void i2c_unregister_device(struct i2c_client *client);
  * Modules for add-on boards must use other calls.
  */
 #ifdef CONFIG_I2C_BOARDINFO
-extern int
+int
 i2c_register_board_info(int busnum, struct i2c_board_info const *info,
 			unsigned n);
 #else
@@ -840,12 +835,12 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
 /* administration...
  */
 #if IS_ENABLED(CONFIG_I2C)
-extern int i2c_add_adapter(struct i2c_adapter *adap);
-extern void i2c_del_adapter(struct i2c_adapter *adap);
-extern int i2c_add_numbered_adapter(struct i2c_adapter *adap);
+int i2c_add_adapter(struct i2c_adapter *adap);
+void i2c_del_adapter(struct i2c_adapter *adap);
+int i2c_add_numbered_adapter(struct i2c_adapter *adap);
 
-extern int i2c_register_driver(struct module *owner, struct i2c_driver *driver);
-extern void i2c_del_driver(struct i2c_driver *driver);
+int i2c_register_driver(struct module *owner, struct i2c_driver *driver);
+void i2c_del_driver(struct i2c_driver *driver);
 
 /* use a define to avoid include chaining to get THIS_MODULE */
 #define i2c_add_driver(driver) \
@@ -858,12 +853,12 @@ static inline bool i2c_client_has_driver(struct i2c_client *client)
 
 /* call the i2c_client->command() of all attached clients with
  * the given arguments */
-extern void i2c_clients_command(struct i2c_adapter *adap,
-				unsigned int cmd, void *arg);
+void i2c_clients_command(struct i2c_adapter *adap,
+			 unsigned int cmd, void *arg);
 
-extern struct i2c_adapter *i2c_get_adapter(int nr);
-extern void i2c_put_adapter(struct i2c_adapter *adap);
-extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
+struct i2c_adapter *i2c_get_adapter(int nr);
+void i2c_put_adapter(struct i2c_adapter *adap);
+unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
 
 void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults);
 
@@ -935,15 +930,15 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
 
 #if IS_ENABLED(CONFIG_OF)
 /* must call put_device() when done with returned i2c_client device */
-extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
 
 /* must call put_device() when done with returned i2c_adapter device */
-extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
 
 /* must call i2c_put_adapter() when done with returned i2c_adapter device */
 struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
 
-extern const struct of_device_id
+const struct of_device_id
 *i2c_of_match_device(const struct of_device_id *matches,
 		     struct i2c_client *client);
 
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 46b771d..a254841 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -413,6 +413,8 @@ struct ide_disk_ops {
 				      sector_t);
 	int		(*ioctl)(struct ide_drive_s *, struct block_device *,
 				 fmode_t, unsigned int, unsigned long);
+	int		(*compat_ioctl)(struct ide_drive_s *, struct block_device *,
+					fmode_t, unsigned int, unsigned long);
 };
 
 /* ATAPI device flags */
@@ -943,6 +945,10 @@ ide_devset_get(_name, _field); \
 ide_devset_set(_name, _field); \
 IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
 
+#define ide_devset_ro_field(_name, _field) \
+ide_devset_get(_name, _field); \
+IDE_DEVSET(_name, 0, get_##_name, NULL)
+
 #define ide_devset_rw_flag(_name, _field) \
 ide_devset_get_flag(_name, _field); \
 ide_devset_set_flag(_name, _field); \
diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h
index 8c3c78b..ea0ecb7 100644
--- a/include/linux/iio/accel/kxcjk_1013.h
+++ b/include/linux/iio/accel/kxcjk_1013.h
@@ -7,8 +7,11 @@
 #ifndef __IIO_KXCJK_1013_H__
 #define __IIO_KXCJK_1013_H__
 
+#include <linux/iio/iio.h>
+
 struct kxcjk_1013_platform_data {
 	bool active_high_intr;
+	struct iio_mount_matrix orientation;
 };
 
 #endif
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 8a4e25a..5a127c0 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -40,6 +40,7 @@ struct iio_dev;
  * @read_mask: Mask for the communications register having the read bit set.
  * @data_reg: Address of the data register, if 0 the default address of 0x3 will
  *   be used.
+ * @irq_flags: flags for the interrupt used by the triggered buffer
  */
 struct ad_sigma_delta_info {
 	int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
@@ -49,6 +50,7 @@ struct ad_sigma_delta_info {
 	unsigned int addr_shift;
 	unsigned int read_mask;
 	unsigned int data_reg;
+	unsigned long irq_flags;
 };
 
 /**
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index d1171db2..a4d2d80 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -18,7 +18,7 @@ struct iio_buffer;
 /**
  * struct iio_buffer_access_funcs - access functions for buffers.
  * @store_to:		actually store stuff to the buffer
- * @read_first_n:	try to get a specified number of bytes (must exist)
+ * @read:		try to get a specified number of bytes (must exist)
  * @data_available:	indicates how much data is available for reading from
  *			the buffer.
  * @request_update:	if a parameter change has been marked, update underlying
@@ -45,9 +45,7 @@ struct iio_buffer;
  **/
 struct iio_buffer_access_funcs {
 	int (*store_to)(struct iio_buffer *buffer, const void *data);
-	int (*read_first_n)(struct iio_buffer *buffer,
-			    size_t n,
-			    char __user *buf);
+	int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf);
 	size_t (*data_available)(struct iio_buffer *buffer);
 
 	int (*request_update)(struct iio_buffer *buffer);
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 686be53..33e9399 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -315,16 +315,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
 ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
 				struct device_attribute *attr, char *buf);
 
-#ifdef CONFIG_OF
-void st_sensors_of_name_probe(struct device *dev,
-			      const struct of_device_id *match,
-			      char *name, int len);
-#else
-static inline void st_sensors_of_name_probe(struct device *dev,
-					    const struct of_device_id *match,
-					    char *name, int len)
-{
-}
-#endif
+void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
 
 #endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
index 01e424e..5f15cf0 100644
--- a/include/linux/iio/common/st_sensors_i2c.h
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -12,18 +12,8 @@
 
 #include <linux/i2c.h>
 #include <linux/iio/common/st_sensors.h>
-#include <linux/of.h>
 
 int st_sensors_i2c_configure(struct iio_dev *indio_dev,
 			     struct i2c_client *client);
 
-#ifdef CONFIG_ACPI
-int st_sensors_match_acpi_device(struct device *dev);
-#else
-static inline int st_sensors_match_acpi_device(struct device *dev)
-{
-	return -ENODEV;
-}
-#endif
-
 #endif /* ST_SENSORS_I2C_H */
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index ce9490b..de45cf2 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -103,9 +103,6 @@
  * @r2_user_settings:	User defined settings for ADF4350/1 REGISTER_2.
  * @r3_user_settings:	User defined settings for ADF4350/1 REGISTER_3.
  * @r4_user_settings:	User defined settings for ADF4350/1 REGISTER_4.
- * @gpio_lock_detect:	Optional, if set with a valid GPIO number,
- *			pll lock state is tested upon read.
- *			If not used - set to -1.
  */
 
 struct adf4350_platform_data {
@@ -121,7 +118,6 @@ struct adf4350_platform_data {
 	unsigned		r2_user_settings;
 	unsigned		r3_user_settings;
 	unsigned		r4_user_settings;
-	int			gpio_lock_detect;
 };
 
 #endif /* IIO_PLL_ADF4350_H_ */
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 92aae14..d2fcf45 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -23,6 +23,17 @@ struct adis;
 struct adis_burst;
 
 /**
+ * struct adis_timeouts - ADIS chip variant timeouts
+ * @reset_ms - Wait time after rst pin goes inactive
+ * @sw_reset_ms - Wait time after sw reset command
+ * @self_test_ms - Wait time after self test command
+ */
+struct adis_timeout {
+	u16 reset_ms;
+	u16 sw_reset_ms;
+	u16 self_test_ms;
+};
+/**
  * struct adis_data - ADIS chip variant specific data
  * @read_delay: SPI delay for read operations in us
  * @write_delay: SPI delay for write operations in us
@@ -32,6 +43,7 @@ struct adis_burst;
  * @diag_stat_reg: Register address of the DIAG_STAT register
  * @status_error_msgs: Array of error messgaes
  * @status_error_mask:
+ * @timeouts: Chip specific delays
  */
 struct adis_data {
 	unsigned int read_delay;
@@ -44,7 +56,7 @@ struct adis_data {
 
 	unsigned int self_test_mask;
 	bool self_test_no_autoclear;
-	unsigned int startup_delay;
+	const struct adis_timeout *timeouts;
 
 	const char * const *status_error_msgs;
 	unsigned int status_error_mask;
@@ -61,7 +73,7 @@ struct adis {
 	const struct adis_data	*data;
 	struct adis_burst	*burst;
 
-	struct mutex		txrx_lock;
+	struct mutex		state_lock;
 	struct spi_message	msg;
 	struct spi_transfer	*xfer;
 	unsigned int		current_page;
@@ -73,14 +85,143 @@ struct adis {
 
 int adis_init(struct adis *adis, struct iio_dev *indio_dev,
 	struct spi_device *spi, const struct adis_data *data);
-int adis_reset(struct adis *adis);
+int __adis_reset(struct adis *adis);
 
-int adis_write_reg(struct adis *adis, unsigned int reg,
+/**
+ * adis_reset() - Reset the device
+ * @adis: The adis device
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+static inline int adis_reset(struct adis *adis)
+{
+	int ret;
+
+	mutex_lock(&adis->state_lock);
+	ret = __adis_reset(adis);
+	mutex_unlock(&adis->state_lock);
+
+	return ret;
+}
+
+int __adis_write_reg(struct adis *adis, unsigned int reg,
 	unsigned int val, unsigned int size);
-int adis_read_reg(struct adis *adis, unsigned int reg,
+int __adis_read_reg(struct adis *adis, unsigned int reg,
 	unsigned int *val, unsigned int size);
 
 /**
+ * __adis_write_reg_8() - Write single byte to a register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the register to be written
+ * @value: The value to write
+ */
+static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
+	uint8_t val)
+{
+	return __adis_write_reg(adis, reg, val, 1);
+}
+
+/**
+ * __adis_write_reg_16() - Write 2 bytes to a pair of registers (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: Value to be written
+ */
+static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
+	uint16_t val)
+{
+	return __adis_write_reg(adis, reg, val, 2);
+}
+
+/**
+ * __adis_write_reg_32() - write 4 bytes to four registers (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the four register
+ * @value: Value to be written
+ */
+static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
+	uint32_t val)
+{
+	return __adis_write_reg(adis, reg, val, 4);
+}
+
+/**
+ * __adis_read_reg_16() - read 2 bytes from a 16-bit register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
+	uint16_t *val)
+{
+	unsigned int tmp;
+	int ret;
+
+	ret = __adis_read_reg(adis, reg, &tmp, 2);
+	if (ret == 0)
+		*val = tmp;
+
+	return ret;
+}
+
+/**
+ * __adis_read_reg_32() - read 4 bytes from a 32-bit register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
+	uint32_t *val)
+{
+	unsigned int tmp;
+	int ret;
+
+	ret = __adis_read_reg(adis, reg, &tmp, 4);
+	if (ret == 0)
+		*val = tmp;
+
+	return ret;
+}
+
+/**
+ * adis_write_reg() - write N bytes to register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: The value to write to device (up to 4 bytes)
+ * @size: The size of the @value (in bytes)
+ */
+static inline int adis_write_reg(struct adis *adis, unsigned int reg,
+	unsigned int val, unsigned int size)
+{
+	int ret;
+
+	mutex_lock(&adis->state_lock);
+	ret = __adis_write_reg(adis, reg, val, size);
+	mutex_unlock(&adis->state_lock);
+
+	return ret;
+}
+
+/**
+ * adis_read_reg() - read N bytes from register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ * @size: The size of the @val buffer
+ */
+static int adis_read_reg(struct adis *adis, unsigned int reg,
+	unsigned int *val, unsigned int size)
+{
+	int ret;
+
+	mutex_lock(&adis->state_lock);
+	ret = __adis_read_reg(adis, reg, val, size);
+	mutex_unlock(&adis->state_lock);
+
+	return ret;
+}
+
+/**
  * adis_write_reg_8() - Write single byte to a register
  * @adis: The adis device
  * @reg: The address of the register to be written
@@ -155,7 +296,18 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
 }
 
 int adis_enable_irq(struct adis *adis, bool enable);
-int adis_check_status(struct adis *adis);
+int __adis_check_status(struct adis *adis);
+
+static inline int adis_check_status(struct adis *adis)
+{
+	int ret;
+
+	mutex_lock(&adis->state_lock);
+	ret = __adis_check_status(adis);
+	mutex_unlock(&adis->state_lock);
+
+	return ret;
+}
 
 int adis_initial_startup(struct adis *adis);
 
diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h
deleted file mode 100644
index ac9366f..0000000
--- a/include/linux/iio/magnetometer/ak8975.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IIO_MAGNETOMETER_AK8975_H__
-#define __IIO_MAGNETOMETER_AK8975_H__
-
-#include <linux/iio/iio.h>
-
-/**
- * struct ak8975_platform_data - AK8975 magnetometer driver platform data
- * @eoc_gpio:    data ready event gpio
- * @orientation: mounting matrix relative to main hardware
- */
-struct ak8975_platform_data {
-	int                     eoc_gpio;
-	struct iio_mount_matrix orientation;
-};
-
-#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index fa824e1..e6fd364 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -25,6 +25,7 @@ enum iio_event_info {
 #define IIO_VAL_INT_MULTIPLE 5
 #define IIO_VAL_FRACTIONAL 10
 #define IIO_VAL_FRACTIONAL_LOG2 11
+#define IIO_VAL_CHAR 12
 
 enum iio_available_type {
 	IIO_AVAIL_LIST,
@@ -57,6 +58,7 @@ enum iio_chan_info_enum {
 	IIO_CHAN_INFO_DEBOUNCE_TIME,
 	IIO_CHAN_INFO_CALIBEMISSIVITY,
 	IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+	IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
 };
 
 #endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 6d8bf4b..4a16b39 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -34,10 +34,13 @@
 #define VTD_STRIDE_SHIFT        (9)
 #define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT)
 
-#define DMA_PTE_READ (1)
-#define DMA_PTE_WRITE (2)
-#define DMA_PTE_LARGE_PAGE (1 << 7)
-#define DMA_PTE_SNP (1 << 11)
+#define DMA_PTE_READ		BIT_ULL(0)
+#define DMA_PTE_WRITE		BIT_ULL(1)
+#define DMA_PTE_LARGE_PAGE	BIT_ULL(7)
+#define DMA_PTE_SNP		BIT_ULL(11)
+
+#define DMA_FL_PTE_PRESENT	BIT_ULL(0)
+#define DMA_FL_PTE_XD		BIT_ULL(63)
 
 #define CONTEXT_TT_MULTI_LEVEL	0
 #define CONTEXT_TT_DEV_IOTLB	1
@@ -435,8 +438,10 @@ enum {
 
 #define VTD_FLAG_TRANS_PRE_ENABLED	(1 << 0)
 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED	(1 << 1)
+#define VTD_FLAG_SVM_CAPABLE		(1 << 2)
 
 extern int intel_iommu_sm;
+extern spinlock_t device_domain_lock;
 
 #define sm_supported(iommu)	(intel_iommu_sm && ecap_smts((iommu)->ecap))
 #define pasid_supported(iommu)	(sm_supported(iommu) &&			\
@@ -609,10 +614,11 @@ static inline void dma_clear_pte(struct dma_pte *pte)
 static inline u64 dma_pte_addr(struct dma_pte *pte)
 {
 #ifdef CONFIG_64BIT
-	return pte->val & VTD_PAGE_MASK;
+	return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
 #else
 	/* Must have a full atomic 64-bit read */
-	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
+	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) &
+			VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
 #endif
 }
 
@@ -645,6 +651,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
 			  unsigned int size_order, u64 type);
 extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
 			u16 qdep, u64 addr, unsigned mask);
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+		     unsigned long npages, bool ih);
 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 extern int dmar_ir_support(void);
@@ -656,9 +664,10 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
 				     void *data), void *data);
 void iommu_flush_write_buffer(struct intel_iommu *iommu);
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
+struct dmar_domain *find_domain(struct device *dev);
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
-int intel_svm_init(struct intel_iommu *iommu);
+extern void intel_svm_check(struct intel_iommu *iommu);
 extern int intel_svm_enable_prq(struct intel_iommu *iommu);
 extern int intel_svm_finish_prq(struct intel_iommu *iommu);
 
@@ -686,6 +695,8 @@ struct intel_svm {
 };
 
 extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
+#else
+static inline void intel_svm_check(struct intel_iommu *iommu) {}
 #endif
 
 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index b16f9ef..0c49453 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -92,17 +92,26 @@ struct icc_node {
 
 #if IS_ENABLED(CONFIG_INTERCONNECT)
 
+int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+		      u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
 struct icc_node *icc_node_create(int id);
 void icc_node_destroy(int id);
 int icc_link_create(struct icc_node *node, const int dst_id);
 int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
 void icc_node_add(struct icc_node *node, struct icc_provider *provider);
 void icc_node_del(struct icc_node *node);
+int icc_nodes_remove(struct icc_provider *provider);
 int icc_provider_add(struct icc_provider *provider);
 int icc_provider_del(struct icc_provider *provider);
 
 #else
 
+static inline int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+				    u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+	return -ENOTSUPP;
+}
+
 static inline struct icc_node *icc_node_create(int id)
 {
 	return ERR_PTR(-ENOTSUPP);
@@ -130,6 +139,11 @@ void icc_node_del(struct icc_node *node)
 {
 }
 
+static inline int icc_nodes_remove(struct icc_provider *provider)
+{
+	return -ENOTSUPP;
+}
+
 static inline int icc_provider_add(struct icc_provider *provider)
 {
 	return -ENOTSUPP;
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 6e125e9..837058b 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -28,6 +28,7 @@ struct io_mapping {
 
 #ifdef CONFIG_HAVE_ATOMIC_IOMAP
 
+#include <linux/pfn.h>
 #include <asm/iomap.h>
 /*
  * For small address space machines, mapping large objects
@@ -64,12 +65,10 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 			 unsigned long offset)
 {
 	resource_size_t phys_addr;
-	unsigned long pfn;
 
 	BUG_ON(offset >= mapping->size);
 	phys_addr = mapping->base + offset;
-	pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
-	return iomap_atomic_prot_pfn(pfn, mapping->prot);
+	return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
 }
 
 static inline void
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index ee21eed..53d53c6 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -83,12 +83,16 @@ struct io_pgtable_cfg {
 	 * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
 	 *	on unmap, for DMA domains using the flush queue mechanism for
 	 *	delayed invalidation.
+	 *
+	 * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
+	 *	for use in the upper half of a split address space.
 	 */
 	#define IO_PGTABLE_QUIRK_ARM_NS		BIT(0)
 	#define IO_PGTABLE_QUIRK_NO_PERMS	BIT(1)
 	#define IO_PGTABLE_QUIRK_TLBI_ON_MAP	BIT(2)
 	#define IO_PGTABLE_QUIRK_ARM_MTK_EXT	BIT(3)
 	#define IO_PGTABLE_QUIRK_NON_STRICT	BIT(4)
+	#define IO_PGTABLE_QUIRK_ARM_TTBR1	BIT(5)
 	unsigned long			quirks;
 	unsigned long			pgsize_bitmap;
 	unsigned int			ias;
@@ -100,18 +104,33 @@ struct io_pgtable_cfg {
 	/* Low-level data specific to the table format */
 	union {
 		struct {
-			u64	ttbr[2];
-			u64	tcr;
+			u64	ttbr;
+			struct {
+				u32	ips:3;
+				u32	tg:2;
+				u32	sh:2;
+				u32	orgn:2;
+				u32	irgn:2;
+				u32	tsz:6;
+			}	tcr;
 			u64	mair;
 		} arm_lpae_s1_cfg;
 
 		struct {
 			u64	vttbr;
-			u64	vtcr;
+			struct {
+				u32	ps:3;
+				u32	tg:2;
+				u32	sh:2;
+				u32	orgn:2;
+				u32	irgn:2;
+				u32	sl:2;
+				u32	tsz:6;
+			}	vtcr;
 		} arm_lpae_s2_cfg;
 
 		struct {
-			u32	ttbr[2];
+			u32	ttbr;
 			u32	tcr;
 			u32	nmrr;
 			u32	prrr;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f2223cb..d1b5f4d 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -246,9 +246,10 @@ struct iommu_iotlb_gather {
  * @sva_get_pasid: Get PASID associated to a SVA handle
  * @page_response: handle page request response
  * @cache_invalidate: invalidate translation caches
- * @pgsize_bitmap: bitmap of all possible supported page sizes
  * @sva_bind_gpasid: bind guest pasid and mm
  * @sva_unbind_gpasid: unbind guest pasid and mm
+ * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @owner: Driver module providing these ops
  */
 struct iommu_ops {
 	bool (*capable)(enum iommu_cap);
@@ -318,6 +319,7 @@ struct iommu_ops {
 	int (*sva_unbind_gpasid)(struct device *dev, int pasid);
 
 	unsigned long pgsize_bitmap;
+	struct module *owner;
 };
 
 /**
@@ -386,12 +388,19 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu);
 int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
 
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
-					const struct iommu_ops *ops)
+static inline void __iommu_device_set_ops(struct iommu_device *iommu,
+					  const struct iommu_ops *ops)
 {
 	iommu->ops = ops;
 }
 
+#define iommu_device_set_ops(iommu, ops)				\
+do {									\
+	struct iommu_ops *__ops = (struct iommu_ops *)(ops);		\
+	__ops->owner = THIS_MODULE;					\
+	__iommu_device_set_ops(iommu, __ops);				\
+} while (0)
+
 static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
 					   struct fwnode_handle *fwnode)
 {
@@ -456,6 +465,8 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain,
 
 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
+extern void generic_iommu_put_resv_regions(struct device *dev,
+					   struct list_head *list);
 extern int iommu_request_dm_for_dev(struct device *dev);
 extern int iommu_request_dma_domain_for_dev(struct device *dev);
 extern void iommu_set_default_passthrough(bool cmd_line);
@@ -570,6 +581,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev);
  * @ops: ops for this device's IOMMU
  * @iommu_fwnode: firmware handle for this device's IOMMU
  * @iommu_priv: IOMMU driver private data for this device
+ * @num_pasid_bits: number of PASID bits supported by this device
  * @num_ids: number of associated device IDs
  * @ids: IDs which this device may present to the IOMMU
  */
@@ -578,6 +590,7 @@ struct iommu_fwspec {
 	struct fwnode_handle	*iommu_fwnode;
 	void			*iommu_priv;
 	u32			flags;
+	u32			num_pasid_bits;
 	unsigned int		num_ids;
 	u32			ids[1];
 };
diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h
deleted file mode 100644
index 05c9422..0000000
--- a/include/linux/ipmi-fru.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#ifndef __LINUX_IPMI_FRU_H__
-#define __LINUX_IPMI_FRU_H__
-#ifdef __KERNEL__
-#  include <linux/types.h>
-#  include <linux/string.h>
-#else
-#  include <stdint.h>
-#  include <string.h>
-#endif
-
-/*
- * These structures match the unaligned crap we have in FRU1011.pdf
- * (http://download.intel.com/design/servers/ipmi/FRU1011.pdf)
- */
-
-/* chapter 8, page 5 */
-struct fru_common_header {
-	uint8_t format;			/* 0x01 */
-	uint8_t internal_use_off;	/* multiple of 8 bytes */
-	uint8_t chassis_info_off;	/* multiple of 8 bytes */
-	uint8_t board_area_off;		/* multiple of 8 bytes */
-	uint8_t product_area_off;	/* multiple of 8 bytes */
-	uint8_t multirecord_off;	/* multiple of 8 bytes */
-	uint8_t pad;			/* must be 0 */
-	uint8_t checksum;		/* sum modulo 256 must be 0 */
-};
-
-/* chapter 9, page 5 -- internal_use: not used by us */
-
-/* chapter 10, page 6 -- chassis info: not used by us */
-
-/* chapter 13, page 9 -- used by board_info_area below */
-struct fru_type_length {
-	uint8_t type_length;
-	uint8_t data[0];
-};
-
-/* chapter 11, page 7 */
-struct fru_board_info_area {
-	uint8_t format;			/* 0x01 */
-	uint8_t area_len;		/* multiple of 8 bytes */
-	uint8_t language;		/* I hope it's 0 */
-	uint8_t mfg_date[3];		/* LSB, minutes since 1996-01-01 */
-	struct fru_type_length tl[0];	/* type-length stuff follows */
-
-	/*
-	 * the TL there are in order:
-	 * Board Manufacturer
-	 * Board Product Name
-	 * Board Serial Number
-	 * Board Part Number
-	 * FRU File ID (may be null)
-	 * more manufacturer-specific stuff
-	 * 0xc1 as a terminator
-	 * 0x00 pad to a multiple of 8 bytes - 1
-	 * checksum (sum of all stuff module 256 must be zero)
-	 */
-};
-
-enum fru_type {
-	FRU_TYPE_BINARY		= 0x00,
-	FRU_TYPE_BCDPLUS	= 0x40,
-	FRU_TYPE_ASCII6		= 0x80,
-	FRU_TYPE_ASCII		= 0xc0, /* not ascii: depends on language */
-};
-
-/*
- * some helpers
- */
-static inline struct fru_board_info_area *fru_get_board_area(
-	const struct fru_common_header *header)
-{
-	/* we know for sure that the header is 8 bytes in size */
-	return (struct fru_board_info_area *)(header + header->board_area_off);
-}
-
-static inline int fru_type(struct fru_type_length *tl)
-{
-	return tl->type_length & 0xc0;
-}
-
-static inline int fru_length(struct fru_type_length *tl)
-{
-	return (tl->type_length & 0x3f) + 1; /* len of whole record */
-}
-
-/* assume ascii-latin1 encoding */
-static inline int fru_strlen(struct fru_type_length *tl)
-{
-	return fru_length(tl) - 1;
-}
-
-static inline char *fru_strcpy(char *dest, struct fru_type_length *tl)
-{
-	int len = fru_strlen(tl);
-	memcpy(dest, tl->data, len);
-	dest[len] = '\0';
-	return dest;
-}
-
-static inline struct fru_type_length *fru_next_tl(struct fru_type_length *tl)
-{
-	return tl + fru_length(tl);
-}
-
-static inline int fru_is_eof(struct fru_type_length *tl)
-{
-	return tl->type_length == 0xc1;
-}
-
-/*
- * External functions defined in fru-parse.c.
- */
-extern int fru_header_cksum_ok(struct fru_common_header *header);
-extern int fru_bia_cksum_ok(struct fru_board_info_area *bia);
-
-/* All these 4 return allocated strings by calling fru_alloc() */
-extern char *fru_get_board_manufacturer(struct fru_common_header *header);
-extern char *fru_get_product_name(struct fru_common_header *header);
-extern char *fru_get_serial_number(struct fru_common_header *header);
-extern char *fru_get_part_number(struct fru_common_header *header);
-
-/* This must be defined by the caller of the above functions */
-extern void *fru_alloc(size_t size);
-
-#endif /* __LINUX_IMPI_FRU_H__ */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index f0b8ca7..83439bf 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -652,10 +652,10 @@ struct rdists {
 	struct {
 		void __iomem	*rd_base;
 		struct page	*pend_page;
-		struct page	*vpe_l1_page;
 		phys_addr_t	phys_base;
 		bool		lpi_enabled;
 		cpumask_t	*vpe_table_mask;
+		void		*vpe_l1_base;
 	} __percpu		*rdist;
 	phys_addr_t		prop_table_pa;
 	void			*prop_table_va;
diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h
index d75e1ad..12be09b 100644
--- a/include/linux/isdn/capilli.h
+++ b/include/linux/isdn/capilli.h
@@ -69,7 +69,6 @@ struct capi_ctr {
 	unsigned short state;			/* controller state */
 	int blocked;				/* output blocked */
 	int traceflag;				/* capi trace */
-	wait_queue_head_t state_wait_queue;
 
 	struct proc_dir_entry *procent;
         char procfn[128];
@@ -80,8 +79,6 @@ int detach_capi_ctr(struct capi_ctr *);
 
 void capi_ctr_ready(struct capi_ctr * card);
 void capi_ctr_down(struct capi_ctr * card);
-void capi_ctr_suspend_output(struct capi_ctr * card);
-void capi_ctr_resume_output(struct capi_ctr * card);
 void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb);
 
 // ---------------------------------------------------------------------------
@@ -91,23 +88,8 @@ struct capi_driver {
 	char name[32];				/* driver name */
 	char revision[32];
 
-	int (*add_card)(struct capi_driver *driver, capicardparams *data);
-
 	/* management information for kcapi */
 	struct list_head list; 
 };
 
-void register_capi_driver(struct capi_driver *driver);
-void unregister_capi_driver(struct capi_driver *driver);
-
-// ---------------------------------------------------------------------------
-// library functions for use by hardware controller drivers
-
-void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize);
-void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci);
-void capilib_release_appl(struct list_head *head, u16 applid);
-void capilib_release(struct list_head *head);
-void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
-u16  capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
-
 #endif				/* __CAPILLI_H__ */
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h
index 44bd604..953fd50 100644
--- a/include/linux/isdn/capiutil.h
+++ b/include/linux/isdn/capiutil.h
@@ -57,460 +57,4 @@ static inline void capimsg_setu32(void *m, int off, __u32 val)
 #define	CAPIMSG_SETCONTROL(m, contr)	capimsg_setu32(m, 8, contr)
 #define	CAPIMSG_SETDATALEN(m, len)	capimsg_setu16(m, 16, len)
 
-/*----- basic-type definitions -----*/
-
-typedef __u8 *_cstruct;
-
-typedef enum {
-	CAPI_COMPOSE,
-	CAPI_DEFAULT
-} _cmstruct;
-
-/*
-   The _cmsg structure contains all possible CAPI 2.0 parameter.
-   All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE
-   assembles the parameter and builds CAPI2.0 conform messages.
-   CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the
-   parameter in the _cmsg structure
- */
-
-typedef struct {
-	/* Header */
-	__u16 ApplId;
-	__u8 Command;
-	__u8 Subcommand;
-	__u16 Messagenumber;
-
-	/* Parameter */
-	union {
-		__u32 adrController;
-		__u32 adrPLCI;
-		__u32 adrNCCI;
-	} adr;
-
-	_cmstruct AdditionalInfo;
-	_cstruct B1configuration;
-	__u16 B1protocol;
-	_cstruct B2configuration;
-	__u16 B2protocol;
-	_cstruct B3configuration;
-	__u16 B3protocol;
-	_cstruct BC;
-	_cstruct BChannelinformation;
-	_cmstruct BProtocol;
-	_cstruct CalledPartyNumber;
-	_cstruct CalledPartySubaddress;
-	_cstruct CallingPartyNumber;
-	_cstruct CallingPartySubaddress;
-	__u32 CIPmask;
-	__u32 CIPmask2;
-	__u16 CIPValue;
-	__u32 Class;
-	_cstruct ConnectedNumber;
-	_cstruct ConnectedSubaddress;
-	__u32 Data;
-	__u16 DataHandle;
-	__u16 DataLength;
-	_cstruct FacilityConfirmationParameter;
-	_cstruct Facilitydataarray;
-	_cstruct FacilityIndicationParameter;
-	_cstruct FacilityRequestParameter;
-	__u16 FacilitySelector;
-	__u16 Flags;
-	__u32 Function;
-	_cstruct HLC;
-	__u16 Info;
-	_cstruct InfoElement;
-	__u32 InfoMask;
-	__u16 InfoNumber;
-	_cstruct Keypadfacility;
-	_cstruct LLC;
-	_cstruct ManuData;
-	__u32 ManuID;
-	_cstruct NCPI;
-	__u16 Reason;
-	__u16 Reason_B3;
-	__u16 Reject;
-	_cstruct Useruserdata;
-
-	/* intern */
-	unsigned l, p;
-	unsigned char *par;
-	__u8 *m;
-
-	/* buffer to construct message */
-	__u8 buf[180];
-
-} _cmsg;
-
-/*
- * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0
- * conform message
- */
-unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg);
-
-/*
- *  capi_message2cmsg disassembles a CAPI message an writes the parameter
- *  into _cmsg for easy access
- */
-unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg);
-
-/*
- * capi_cmsg_header() fills the _cmsg structure with default values, so only
- * parameter with non default values must be changed before sending the
- * message.
- */
-unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId,
-			  __u8 _Command, __u8 _Subcommand,
-			  __u16 _Messagenumber, __u32 _Controller);
-
-/*-----------------------------------------------------------------------*/
-
-/*
- * Debugging / Tracing functions
- */
-
-char *capi_cmd2str(__u8 cmd, __u8 subcmd);
-
-typedef struct {
-	u_char	*buf;
-	u_char	*p;
-	size_t	size;
-	size_t	pos;
-} _cdebbuf;
-
-#define	CDEBUG_SIZE	1024
-#define	CDEBUG_GSIZE	4096
-
-void cdebbuf_free(_cdebbuf *cdb);
-int cdebug_init(void);
-void cdebug_exit(void);
-
-_cdebbuf *capi_cmsg2str(_cmsg *cmsg);
-_cdebbuf *capi_message2str(__u8 *msg);
-
-/*-----------------------------------------------------------------------*/
-
-static inline void capi_cmsg_answer(_cmsg * cmsg)
-{
-	cmsg->Subcommand |= 0x01;
-}
-
-/*-----------------------------------------------------------------------*/
-
-static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					    __u32 adr,
-					    _cstruct NCPI)
-{
-	capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr);
-	cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					  __u32 adr,
-					  __u16 FacilitySelector,
-				       _cstruct FacilityRequestParameter)
-{
-	capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr);
-	cmsg->FacilitySelector = FacilitySelector;
-	cmsg->FacilityRequestParameter = FacilityRequestParameter;
-}
-
-static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-				      __u32 adr,
-				      _cstruct CalledPartyNumber,
-				      _cstruct BChannelinformation,
-				      _cstruct Keypadfacility,
-				      _cstruct Useruserdata,
-				      _cstruct Facilitydataarray)
-{
-	capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr);
-	cmsg->CalledPartyNumber = CalledPartyNumber;
-	cmsg->BChannelinformation = BChannelinformation;
-	cmsg->Keypadfacility = Keypadfacility;
-	cmsg->Useruserdata = Useruserdata;
-	cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					__u32 adr,
-					__u32 InfoMask,
-					__u32 CIPmask,
-					__u32 CIPmask2,
-					_cstruct CallingPartyNumber,
-					_cstruct CallingPartySubaddress)
-{
-	capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr);
-	cmsg->InfoMask = InfoMask;
-	cmsg->CIPmask = CIPmask;
-	cmsg->CIPmask2 = CIPmask2;
-	cmsg->CallingPartyNumber = CallingPartyNumber;
-	cmsg->CallingPartySubaddress = CallingPartySubaddress;
-}
-
-static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-				       __u32 adr,
-				       _cstruct BChannelinformation,
-				       _cstruct Keypadfacility,
-				       _cstruct Useruserdata,
-				       _cstruct Facilitydataarray)
-{
-	capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr);
-	cmsg->BChannelinformation = BChannelinformation;
-	cmsg->Keypadfacility = Keypadfacility;
-	cmsg->Useruserdata = Useruserdata;
-	cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					 __u32 adr,
-					 __u16 CIPValue,
-					 _cstruct CalledPartyNumber,
-					 _cstruct CallingPartyNumber,
-					 _cstruct CalledPartySubaddress,
-					 _cstruct CallingPartySubaddress,
-					 __u16 B1protocol,
-					 __u16 B2protocol,
-					 __u16 B3protocol,
-					 _cstruct B1configuration,
-					 _cstruct B2configuration,
-					 _cstruct B3configuration,
-					 _cstruct BC,
-					 _cstruct LLC,
-					 _cstruct HLC,
-					 _cstruct BChannelinformation,
-					 _cstruct Keypadfacility,
-					 _cstruct Useruserdata,
-					 _cstruct Facilitydataarray)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr);
-	cmsg->CIPValue = CIPValue;
-	cmsg->CalledPartyNumber = CalledPartyNumber;
-	cmsg->CallingPartyNumber = CallingPartyNumber;
-	cmsg->CalledPartySubaddress = CalledPartySubaddress;
-	cmsg->CallingPartySubaddress = CallingPartySubaddress;
-	cmsg->B1protocol = B1protocol;
-	cmsg->B2protocol = B2protocol;
-	cmsg->B3protocol = B3protocol;
-	cmsg->B1configuration = B1configuration;
-	cmsg->B2configuration = B2configuration;
-	cmsg->B3configuration = B3configuration;
-	cmsg->BC = BC;
-	cmsg->LLC = LLC;
-	cmsg->HLC = HLC;
-	cmsg->BChannelinformation = BChannelinformation;
-	cmsg->Keypadfacility = Keypadfacility;
-	cmsg->Useruserdata = Useruserdata;
-	cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					 __u32 adr,
-					 __u32 Data,
-					 __u16 DataLength,
-					 __u16 DataHandle,
-					 __u16 Flags)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr);
-	cmsg->Data = Data;
-	cmsg->DataLength = DataLength;
-	cmsg->DataHandle = DataHandle;
-	cmsg->Flags = Flags;
-}
-
-static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					    __u32 adr,
-					    _cstruct BChannelinformation,
-					    _cstruct Keypadfacility,
-					    _cstruct Useruserdata,
-					    _cstruct Facilitydataarray)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr);
-	cmsg->BChannelinformation = BChannelinformation;
-	cmsg->Keypadfacility = Keypadfacility;
-	cmsg->Useruserdata = Useruserdata;
-	cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					       __u32 adr,
-					       _cstruct NCPI)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr);
-	cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					      __u32 adr,
-					      __u32 ManuID,
-					      __u32 Class,
-					      __u32 Function,
-					      _cstruct ManuData)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr);
-	cmsg->ManuID = ManuID;
-	cmsg->Class = Class;
-	cmsg->Function = Function;
-	cmsg->ManuData = ManuData;
-}
-
-static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					  __u32 adr,
-					  _cstruct NCPI)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr);
-	cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-						   __u32 adr,
-						   __u16 B1protocol,
-						   __u16 B2protocol,
-						   __u16 B3protocol,
-						_cstruct B1configuration,
-						_cstruct B2configuration,
-						_cstruct B3configuration)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr);
-	cmsg->B1protocol = B1protocol;
-	cmsg->B2protocol = B2protocol;
-	cmsg->B3protocol = B3protocol;
-	cmsg->B1configuration = B1configuration;
-	cmsg->B2configuration = B2configuration;
-	cmsg->B3configuration = B3configuration;
-}
-
-static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					  __u32 adr,
-					  __u16 Reject,
-					  __u16 B1protocol,
-					  __u16 B2protocol,
-					  __u16 B3protocol,
-					  _cstruct B1configuration,
-					  _cstruct B2configuration,
-					  _cstruct B3configuration,
-					  _cstruct ConnectedNumber,
-					  _cstruct ConnectedSubaddress,
-					  _cstruct LLC,
-					  _cstruct BChannelinformation,
-					  _cstruct Keypadfacility,
-					  _cstruct Useruserdata,
-					  _cstruct Facilitydataarray)
-{
-	capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr);
-	cmsg->Reject = Reject;
-	cmsg->B1protocol = B1protocol;
-	cmsg->B2protocol = B2protocol;
-	cmsg->B3protocol = B3protocol;
-	cmsg->B1configuration = B1configuration;
-	cmsg->B2configuration = B2configuration;
-	cmsg->B3configuration = B3configuration;
-	cmsg->ConnectedNumber = ConnectedNumber;
-	cmsg->ConnectedSubaddress = ConnectedSubaddress;
-	cmsg->LLC = LLC;
-	cmsg->BChannelinformation = BChannelinformation;
-	cmsg->Keypadfacility = Keypadfacility;
-	cmsg->Useruserdata = Useruserdata;
-	cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-						 __u32 adr)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-						    __u32 adr)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					     __u32 adr,
-					     __u16 Reject,
-					     _cstruct NCPI)
-{
-	capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr);
-	cmsg->Reject = Reject;
-	cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-							__u32 adr)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					  __u32 adr,
-					  __u16 DataHandle)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr);
-	cmsg->DataHandle = DataHandle;
-}
-
-static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-						__u32 adr)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					     __u32 adr)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					   __u32 adr,
-					   __u16 FacilitySelector)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr);
-	cmsg->FacilitySelector = FacilitySelector;
-}
-
-static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-				       __u32 adr)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					       __u32 adr,
-					       __u32 ManuID,
-					       __u32 Class,
-					       __u32 Function,
-					       _cstruct ManuData)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr);
-	cmsg->ManuID = ManuID;
-	cmsg->Class = Class;
-	cmsg->Function = Function;
-	cmsg->ManuData = ManuData;
-}
-
-static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
-					   __u32 adr)
-{
-
-	capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr);
-}
-
 #endif				/* __CAPIUTIL_H__ */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index ce44b68..f613d85 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1403,7 +1403,6 @@ extern int	   jbd2_journal_skip_recovery	(journal_t *);
 extern void	   jbd2_journal_update_sb_errno(journal_t *);
 extern int	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
 				unsigned long, int);
-extern void	   __jbd2_journal_abort_hard	(journal_t *);
 extern void	   jbd2_journal_abort      (journal_t *, int);
 extern int	   jbd2_journal_errno      (journal_t *);
 extern void	   jbd2_journal_ack_err    (journal_t *);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1b6d31d..e3279ef 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -422,26 +422,6 @@ static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
 extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
 extern void jiffies_to_timespec64(const unsigned long jiffies,
 				  struct timespec64 *value);
-static inline unsigned long timespec_to_jiffies(const struct timespec *value)
-{
-	struct timespec64 ts = timespec_to_timespec64(*value);
-
-	return timespec64_to_jiffies(&ts);
-}
-
-static inline void jiffies_to_timespec(const unsigned long jiffies,
-				       struct timespec *value)
-{
-	struct timespec64 ts;
-
-	jiffies_to_timespec64(jiffies, &ts);
-	*value = timespec64_to_timespec(ts);
-}
-
-extern unsigned long timeval_to_jiffies(const struct timeval *value);
-extern void jiffies_to_timeval(const unsigned long jiffies,
-			       struct timeval *value);
-
 extern clock_t jiffies_to_clock_t(unsigned long x);
 static inline clock_t jiffies_delta_to_clock_t(long delta)
 {
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 68bd882..24cd447 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -183,8 +183,6 @@ int kdb_process_cpu(const struct task_struct *p)
 	return cpu;
 }
 
-/* kdb access to register set for stack dumping */
-extern struct pt_regs *kdb_current_regs;
 #ifdef CONFIG_KALLSYMS
 extern const char *kdb_walk_kallsyms(loff_t *pos);
 #else /* ! CONFIG_KALLSYMS */
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index 075fab5..94ba42b 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -10,46 +10,12 @@
 #ifndef __KERNELCAPI_H__
 #define __KERNELCAPI_H__
 
-
 #include <linux/list.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/notifier.h>
 #include <uapi/linux/kernelcapi.h>
 
-struct capi20_appl {
-	u16 applid;
-	capi_register_params rparam;
-	void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
-	void *private;
-
-	/* internal to kernelcapi.o */
-	unsigned long nrecvctlpkt;
-	unsigned long nrecvdatapkt;
-	unsigned long nsentctlpkt;
-	unsigned long nsentdatapkt;
-	struct mutex recv_mtx;
-	struct sk_buff_head recv_queue;
-	struct work_struct recv_work;
-	int release_in_progress;
-};
-
-u16 capi20_isinstalled(void);
-u16 capi20_register(struct capi20_appl *ap);
-u16 capi20_release(struct capi20_appl *ap);
-u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
-u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
-u16 capi20_get_version(u32 contr, struct capi_version *verp);
-u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
-u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
-int capi20_manufacturer(unsigned long cmd, void __user *data);
-
-#define CAPICTR_UP			0
-#define CAPICTR_DOWN			1
-
-int register_capictr_notifier(struct notifier_block *nb);
-int unregister_capictr_notifier(struct notifier_block *nb);
-
 #define CAPI_NOERROR                      0x0000
 
 #define CAPI_TOOMANYAPPLS		  0x1001
@@ -76,45 +42,4 @@ int unregister_capictr_notifier(struct notifier_block *nb);
 #define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP  0x110a
 #define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b
 
-typedef enum {
-        CapiMessageNotSupportedInCurrentState = 0x2001,
-        CapiIllContrPlciNcci                  = 0x2002,
-        CapiNoPlciAvailable                   = 0x2003,
-        CapiNoNcciAvailable                   = 0x2004,
-        CapiNoListenResourcesAvailable        = 0x2005,
-        CapiNoFaxResourcesAvailable           = 0x2006,
-        CapiIllMessageParmCoding              = 0x2007,
-} RESOURCE_CODING_PROBLEM;
-
-typedef enum {
-        CapiB1ProtocolNotSupported                      = 0x3001,
-        CapiB2ProtocolNotSupported                      = 0x3002,
-        CapiB3ProtocolNotSupported                      = 0x3003,
-        CapiB1ProtocolParameterNotSupported             = 0x3004,
-        CapiB2ProtocolParameterNotSupported             = 0x3005,
-        CapiB3ProtocolParameterNotSupported             = 0x3006,
-        CapiBProtocolCombinationNotSupported            = 0x3007,
-        CapiNcpiNotSupported                            = 0x3008,
-        CapiCipValueUnknown                             = 0x3009,
-        CapiFlagsNotSupported                           = 0x300a,
-        CapiFacilityNotSupported                        = 0x300b,
-        CapiDataLengthNotSupportedByCurrentProtocol     = 0x300c,
-        CapiResetProcedureNotSupportedByCurrentProtocol = 0x300d,
-        CapiTeiAssignmentFailed                         = 0x300e,
-} REQUESTED_SERVICES_PROBLEM;
-
-typedef enum {
-	CapiSuccess                                     = 0x0000,
-	CapiSupplementaryServiceNotSupported            = 0x300e,
-	CapiRequestNotAllowedInThisState                = 0x3010,
-} SUPPLEMENTARY_SERVICE_INFO;
-
-typedef enum {
-	CapiProtocolErrorLayer1                         = 0x3301,
-	CapiProtocolErrorLayer2                         = 0x3302,
-	CapiProtocolErrorLayer3                         = 0x3303,
-	CapiTimeOut                                     = 0x3303, // SuppServiceReason
-	CapiCallGivenToOtherApplication                 = 0x3304,
-} CAPI_REASON;
-
 #endif				/* __KERNELCAPI_H__ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 538c25e..e89eb67 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -157,8 +157,6 @@ static inline bool is_error_page(struct page *page)
 #define KVM_USERSPACE_IRQ_SOURCE_ID		0
 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
 
-extern struct kmem_cache *kvm_vcpu_cache;
-
 extern struct mutex kvm_lock;
 extern struct list_head vm_list;
 
@@ -204,7 +202,7 @@ struct kvm_async_pf {
 	struct list_head queue;
 	struct kvm_vcpu *vcpu;
 	struct mm_struct *mm;
-	gva_t gva;
+	gpa_t cr2_or_gpa;
 	unsigned long addr;
 	struct kvm_arch_async_pf arch;
 	bool   wakeup_all;
@@ -212,8 +210,8 @@ struct kvm_async_pf {
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
-		       struct kvm_arch_async_pf *arch);
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+		       unsigned long hva, struct kvm_arch_async_pf *arch);
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
@@ -579,8 +577,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
 		memslot++)
 
-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
 
 void vcpu_load(struct kvm_vcpu *vcpu);
 void vcpu_put(struct kvm_vcpu *vcpu);
@@ -723,10 +720,9 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 void kvm_get_pfn(kvm_pfn_t pfn);
 
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 			int len);
-int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
-			  unsigned long len);
 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 			   void *data, unsigned long len);
@@ -767,7 +763,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 
 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
@@ -775,8 +771,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+		struct gfn_to_pfn_cache *cache, bool atomic);
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+		  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -867,16 +867,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 int kvm_arch_init(void *opaque);
 void kvm_arch_exit(void);
 
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
-
 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
 
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 
@@ -982,10 +978,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
 void kvm_arch_sync_events(struct kvm *kvm);
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 
 bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
+bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
 
 struct kvm_irq_ack_notifier {
 	struct hlist_node link;
@@ -1109,9 +1105,8 @@ enum kvm_stat_kind {
 };
 
 struct kvm_stat_data {
-	int offset;
-	int mode;
 	struct kvm *kvm;
+	struct kvm_stats_debugfs_item *dbgfs_item;
 };
 
 struct kvm_stats_debugfs_item {
@@ -1120,6 +1115,10 @@ struct kvm_stats_debugfs_item {
 	enum kvm_stat_kind kind;
 	int mode;
 };
+
+#define KVM_DBGFS_GET_MODE(dbgfs_item)                                         \
+	((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
+
 extern struct kvm_stats_debugfs_item debugfs_entries[];
 extern struct dentry *kvm_debugfs_dir;
 
@@ -1342,6 +1341,9 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
 }
 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
 
+struct kvm_vcpu *kvm_get_running_vcpu(void);
+struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+
 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
 bool kvm_arch_has_irq_bypass(void);
 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 1c88e69..68e84cf 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -18,7 +18,7 @@ struct kvm_memslots;
 
 enum kvm_mr_change;
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 /*
  * Address types:
@@ -51,4 +51,11 @@ struct gfn_to_hva_cache {
 	struct kvm_memory_slot *memslot;
 };
 
+struct gfn_to_pfn_cache {
+	u64 generation;
+	gfn_t gfn;
+	kvm_pfn_t pfn;
+	bool dirty;
+};
+
 #endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
index dd93c8d..ec577f5 100644
--- a/include/linux/leds-bd2802.h
+++ b/include/linux/leds-bd2802.h
@@ -11,7 +11,6 @@
 #define _LEDS_BD2802_H_
 
 struct bd2802_led_platform_data{
-	int	reset_gpio;
 	u8	rgb_time;
 };
 
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 242258f..75353e5 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -20,6 +20,7 @@
 
 struct device;
 struct led_pattern;
+struct device_node;
 /*
  * LED Core
  */
@@ -196,6 +197,11 @@ void devm_led_classdev_unregister(struct device *parent,
 void led_classdev_suspend(struct led_classdev *led_cdev);
 void led_classdev_resume(struct led_classdev *led_cdev);
 
+extern struct led_classdev *of_led_get(struct device_node *np, int index);
+extern void led_put(struct led_classdev *led_cdev);
+struct led_classdev *__must_check devm_of_led_get(struct device *dev,
+						  int index);
+
 /**
  * led_blink_set - set blinking with software fallback
  * @led_cdev: the LED to start blinking
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2dbde11..2ca9b70 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1109,6 +1109,11 @@ extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_op
 extern int ata_scsi_detect(struct scsi_host_template *sht);
 extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
 			  void __user *arg);
+#ifdef CONFIG_COMPAT
+#define ATA_SCSI_COMPAT_IOCTL .compat_ioctl = ata_scsi_ioctl,
+#else
+#define ATA_SCSI_COMPAT_IOCTL /* empty */
+#endif
 extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
 			    unsigned int cmd, void __user *arg);
@@ -1221,6 +1226,7 @@ struct pci_bits {
 };
 
 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
+extern void ata_pci_shutdown_one(struct pci_dev *pdev);
 extern void ata_pci_remove_one(struct pci_dev *pdev);
 
 #ifdef CONFIG_PM
@@ -1341,6 +1347,7 @@ extern struct device_attribute *ata_common_sdev_attrs[];
 	.module			= THIS_MODULE,			\
 	.name			= drv_name,			\
 	.ioctl			= ata_scsi_ioctl,		\
+	ATA_SCSI_COMPAT_IOCTL					\
 	.queuecommand		= ata_scsi_queuecmd,		\
 	.can_queue		= ATA_DEF_QUEUE,		\
 	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,		\
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c50d01e..664f52c6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -627,6 +627,13 @@ do {									\
 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
 } while (0)
+# define might_lock_nested(lock, subclass) 				\
+do {									\
+	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
+	lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,		\
+		     _THIS_IP_);					\
+	lock_release(&(lock)->dep_map, _THIS_IP_);			\
+} while (0)
 
 #define lockdep_assert_irqs_enabled()	do {				\
 		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
@@ -649,6 +656,7 @@ do {									\
 #else
 # define might_lock(lock) do { } while (0)
 # define might_lock_read(lock) do { } while (0)
+# define might_lock_nested(lock, subclass) do { } while (0)
 # define lockdep_assert_irqs_enabled() do { } while (0)
 # define lockdep_assert_irqs_disabled() do { } while (0)
 # define lockdep_assert_in_irq() do { } while (0)
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index e6f54ef..a4dc45f 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -20,6 +20,16 @@
 #define CMDQ_WFE_WAIT			BIT(15)
 #define CMDQ_WFE_WAIT_VALUE		0x1
 
+/*
+ * WFE arg_b
+ * bit 0-11: wait value
+ * bit 15: 1 - wait, 0 - no wait
+ * bit 16-27: update value
+ * bit 31: 1 - update, 0 - no update
+ */
+#define CMDQ_WFE_OPTION			(CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
+					CMDQ_WFE_WAIT_VALUE)
+
 /** cmdq event maximum */
 #define CMDQ_MAX_EVENT			0x3ff
 
@@ -45,6 +55,7 @@
 enum cmdq_code {
 	CMDQ_CODE_MASK = 0x02,
 	CMDQ_CODE_WRITE = 0x04,
+	CMDQ_CODE_POLL = 0x08,
 	CMDQ_CODE_JUMP = 0x10,
 	CMDQ_CODE_WFE = 0x20,
 	CMDQ_CODE_EOC = 0x40,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b38bbef..079d17d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -113,6 +113,9 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
 int memblock_remove(phys_addr_t base, phys_addr_t size);
 int memblock_free(phys_addr_t base, phys_addr_t size);
 int memblock_reserve(phys_addr_t base, phys_addr_t size);
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+#endif
 void memblock_trim_memory(phys_addr_t align);
 bool memblock_overlaps_region(struct memblock_type *type,
 			      phys_addr_t base, phys_addr_t size);
@@ -127,10 +130,6 @@ void reset_node_managed_pages(pg_data_t *pgdat);
 void reset_all_zones_managed_pages(void);
 
 /* Low level functions */
-int memblock_add_range(struct memblock_type *type,
-		       phys_addr_t base, phys_addr_t size,
-		       int nid, enum memblock_flags flags);
-
 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
 		      struct memblock_type *type_a,
 		      struct memblock_type *type_b, phys_addr_t *out_start,
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 4c75dae..0b8d791 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -29,8 +29,6 @@ struct memory_block {
 	int section_count;		/* serialized by mem_sysfs_mutex */
 	int online_type;		/* for passing data to online routine */
 	int phys_device;		/* to which fru does this belong? */
-	void *hw;			/* optional pointer to fw/hw data */
-	int (*phys_callback)(struct memory_block *);
 	struct device dev;
 	int nid;			/* NID for this memory block */
 };
@@ -55,19 +53,6 @@ struct memory_notify {
 	int status_change_nid;
 };
 
-/*
- * During pageblock isolation, count the number of pages within the
- * range [start_pfn, start_pfn + nr_pages) which are owned by code
- * in the notifier chain.
- */
-#define MEM_ISOLATE_COUNT	(1<<0)
-
-struct memory_isolate_notify {
-	unsigned long start_pfn;	/* Start of range to check */
-	unsigned int nr_pages;		/* # pages in range to check */
-	unsigned int pages_found;	/* # pages owned found by callbacks */
-};
-
 struct notifier_block;
 struct mem_section;
 
@@ -94,27 +79,13 @@ static inline int memory_notify(unsigned long val, void *v)
 {
 	return 0;
 }
-static inline int register_memory_isolate_notifier(struct notifier_block *nb)
-{
-	return 0;
-}
-static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
-{
-}
-static inline int memory_isolate_notify(unsigned long val, void *v)
-{
-	return 0;
-}
 #else
 extern int register_memory_notifier(struct notifier_block *nb);
 extern void unregister_memory_notifier(struct notifier_block *nb);
-extern int register_memory_isolate_notifier(struct notifier_block *nb);
-extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
 int create_memory_block_devices(unsigned long start, unsigned long size);
 void remove_memory_block_devices(unsigned long start, unsigned long size);
 extern void memory_dev_init(void);
 extern int memory_notify(unsigned long val, void *v);
-extern int memory_isolate_notify(unsigned long val, void *v);
 extern struct memory_block *find_memory_block(struct mem_section *);
 typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
 extern int walk_memory_blocks(unsigned long start, unsigned long size,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index ba0dca6..f4d5915 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -94,9 +94,10 @@ extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
 /* VM interface that may be used by firmware interface */
-extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
-	unsigned long *valid_start, unsigned long *valid_end);
+extern int online_pages(unsigned long pfn, unsigned long nr_pages,
+			int online_type, int nid);
+extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
+					 unsigned long end_pfn);
 extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
 						unsigned long end_pfn);
 
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
deleted file mode 100644
index 61c2875..0000000
--- a/include/linux/mfd/cros_ec.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ChromeOS EC multi-function device
- *
- * Copyright (C) 2012 Google, Inc
- */
-
-#ifndef __LINUX_MFD_CROS_EC_H
-#define __LINUX_MFD_CROS_EC_H
-
-#include <linux/device.h>
-
-/**
- * struct cros_ec_dev - ChromeOS EC device entry point.
- * @class_dev: Device structure used in sysfs.
- * @ec_dev: cros_ec_device structure to talk to the physical device.
- * @dev: Pointer to the platform device.
- * @debug_info: cros_ec_debugfs structure for debugging information.
- * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
- * @cmd_offset: Offset to apply for each command.
- * @features: Features supported by the EC.
- */
-struct cros_ec_dev {
-	struct device class_dev;
-	struct cros_ec_device *ec_dev;
-	struct device *dev;
-	struct cros_ec_debugfs *debug_info;
-	bool has_kb_wake_angle;
-	u16 cmd_offset;
-	u32 features[2];
-};
-
-#define to_cros_ec_dev(dev)  container_of(dev, struct cros_ec_dev, class_dev)
-
-#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 1fc75d2..4b63d3e 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -525,9 +525,6 @@ u8 db8500_prcmu_get_power_state_result(void);
 void db8500_prcmu_enable_wakeups(u32 wakeups);
 int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state);
 int db8500_prcmu_request_clock(u8 clock, bool enable);
-int db8500_prcmu_set_display_clocks(void);
-int db8500_prcmu_disable_dsipll(void);
-int db8500_prcmu_enable_dsipll(void);
 void db8500_prcmu_config_abb_event_readout(u32 abb_events);
 void db8500_prcmu_get_abb_event_buffer(void __iomem **buf);
 int db8500_prcmu_config_esram0_deep_sleep(u8 state);
@@ -682,21 +679,6 @@ static inline int db8500_prcmu_request_clock(u8 clock, bool enable)
 	return 0;
 }
 
-static inline int db8500_prcmu_set_display_clocks(void)
-{
-	return 0;
-}
-
-static inline int db8500_prcmu_disable_dsipll(void)
-{
-	return 0;
-}
-
-static inline int db8500_prcmu_enable_dsipll(void)
-{
-	return 0;
-}
-
 static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state)
 {
 	return 0;
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e257104..e6ee2ec 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -321,21 +321,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
 	return db8500_prcmu_is_ac_wake_requested();
 }
 
-static inline int prcmu_set_display_clocks(void)
-{
-	return db8500_prcmu_set_display_clocks();
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
-	return db8500_prcmu_disable_dsipll();
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
-	return db8500_prcmu_enable_dsipll();
-}
-
 static inline int prcmu_config_esram0_deep_sleep(u8 state)
 {
 	return db8500_prcmu_config_esram0_deep_sleep(state);
@@ -511,21 +496,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
 	return false;
 }
 
-static inline int prcmu_set_display_clocks(void)
-{
-	return 0;
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
-	return 0;
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
-	return 0;
-}
-
 static inline int prcmu_config_esram0_deep_sleep(u8 state)
 {
 	return 0;
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
index 1013e60..a57af87 100644
--- a/include/linux/mfd/rohm-bd70528.h
+++ b/include/linux/mfd/rohm-bd70528.h
@@ -7,6 +7,7 @@
 #include <linux/bits.h>
 #include <linux/device.h>
 #include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-shared.h>
 #include <linux/regmap.h>
 
 enum {
@@ -89,10 +90,6 @@ struct bd70528_data {
 #define BD70528_REG_GPIO3_OUT	0x52
 #define BD70528_REG_GPIO4_OUT	0x54
 
-/* clk control */
-
-#define BD70528_REG_CLK_OUT	0x2c
-
 /* RTC */
 
 #define BD70528_REG_RTC_COUNT_H		0x2d
@@ -309,21 +306,8 @@ enum {
 
 #define BD70528_GPIO_IN_STATE_BASE 1
 
-#define BD70528_CLK_OUT_EN_MASK 0x1
-
 /* RTC masks to mask out reserved bits */
 
-#define BD70528_MASK_RTC_SEC		0x7f
-#define BD70528_MASK_RTC_MINUTE		0x7f
-#define BD70528_MASK_RTC_HOUR_24H	0x80
-#define BD70528_MASK_RTC_HOUR_PM	0x20
-#define BD70528_MASK_RTC_HOUR		0x1f
-#define BD70528_MASK_RTC_DAY		0x3f
-#define BD70528_MASK_RTC_WEEK		0x07
-#define BD70528_MASK_RTC_MONTH		0x1f
-#define BD70528_MASK_RTC_YEAR		0xff
-#define BD70528_MASK_RTC_COUNT_L	0x7f
-
 #define BD70528_MASK_ELAPSED_TIMER_EN	0x1
 /* Mask second, min and hour fields
  * HW would support ALM irq for over 24h
@@ -332,7 +316,6 @@ enum {
  * wake-up we limit ALM to 24H and only
  * unmask sec, min and hour
  */
-#define BD70528_MASK_ALM_EN		0x7
 #define BD70528_MASK_WAKE_EN		0x1
 
 /* WDT masks */
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
new file mode 100644
index 0000000..017a4c0
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -0,0 +1,423 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2019 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD71828_H__
+#define __LINUX_MFD_BD71828_H__
+
+#include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-shared.h>
+
+/* Regulator IDs */
+enum {
+	BD71828_BUCK1,
+	BD71828_BUCK2,
+	BD71828_BUCK3,
+	BD71828_BUCK4,
+	BD71828_BUCK5,
+	BD71828_BUCK6,
+	BD71828_BUCK7,
+	BD71828_LDO1,
+	BD71828_LDO2,
+	BD71828_LDO3,
+	BD71828_LDO4,
+	BD71828_LDO5,
+	BD71828_LDO6,
+	BD71828_LDO_SNVS,
+	BD71828_REGULATOR_AMOUNT,
+};
+
+#define BD71828_BUCK1267_VOLTS		0xEF
+#define BD71828_BUCK3_VOLTS		0x10
+#define BD71828_BUCK4_VOLTS		0x20
+#define BD71828_BUCK5_VOLTS		0x10
+#define BD71828_LDO_VOLTS		0x32
+/* LDO6 is fixed 1.8V voltage */
+#define BD71828_LDO_6_VOLTAGE		1800000
+
+/* Registers and masks*/
+
+/* MODE control */
+#define BD71828_REG_PS_CTRL_1		0x04
+#define BD71828_REG_PS_CTRL_2		0x05
+#define BD71828_REG_PS_CTRL_3		0x06
+
+//#define BD71828_REG_SWRESET		0x06
+#define BD71828_MASK_RUN_LVL_CTRL	0x30
+
+/* Regulator control masks */
+
+#define BD71828_MASK_RAMP_DELAY		0x6
+
+#define BD71828_MASK_RUN_EN		0x08
+#define BD71828_MASK_SUSP_EN		0x04
+#define BD71828_MASK_IDLE_EN		0x02
+#define BD71828_MASK_LPSR_EN		0x01
+
+#define BD71828_MASK_RUN0_EN		0x01
+#define BD71828_MASK_RUN1_EN		0x02
+#define BD71828_MASK_RUN2_EN		0x04
+#define BD71828_MASK_RUN3_EN		0x08
+
+#define BD71828_MASK_DVS_BUCK1_CTRL	0x10
+#define BD71828_DVS_BUCK1_CTRL_I2C	0
+#define BD71828_DVS_BUCK1_USE_RUNLVL	0x10
+
+#define BD71828_MASK_DVS_BUCK2_CTRL	0x20
+#define BD71828_DVS_BUCK2_CTRL_I2C	0
+#define BD71828_DVS_BUCK2_USE_RUNLVL	0x20
+
+#define BD71828_MASK_DVS_BUCK6_CTRL	0x40
+#define BD71828_DVS_BUCK6_CTRL_I2C	0
+#define BD71828_DVS_BUCK6_USE_RUNLVL	0x40
+
+#define BD71828_MASK_DVS_BUCK7_CTRL	0x80
+#define BD71828_DVS_BUCK7_CTRL_I2C	0
+#define BD71828_DVS_BUCK7_USE_RUNLVL	0x80
+
+#define BD71828_MASK_BUCK1267_VOLT	0xff
+#define BD71828_MASK_BUCK3_VOLT		0x1f
+#define BD71828_MASK_BUCK4_VOLT		0x3f
+#define BD71828_MASK_BUCK5_VOLT		0x1f
+#define BD71828_MASK_LDO_VOLT		0x3f
+
+/* Regulator control regs */
+#define BD71828_REG_BUCK1_EN		0x08
+#define BD71828_REG_BUCK1_CTRL		0x09
+#define BD71828_REG_BUCK1_MODE		0x0a
+#define BD71828_REG_BUCK1_IDLE_VOLT	0x0b
+#define BD71828_REG_BUCK1_SUSP_VOLT	0x0c
+#define BD71828_REG_BUCK1_VOLT		0x0d
+
+#define BD71828_REG_BUCK2_EN		0x12
+#define BD71828_REG_BUCK2_CTRL		0x13
+#define BD71828_REG_BUCK2_MODE		0x14
+#define BD71828_REG_BUCK2_IDLE_VOLT	0x15
+#define BD71828_REG_BUCK2_SUSP_VOLT	0x16
+#define BD71828_REG_BUCK2_VOLT		0x17
+
+#define BD71828_REG_BUCK3_EN		0x1c
+#define BD71828_REG_BUCK3_MODE		0x1d
+#define BD71828_REG_BUCK3_VOLT		0x1e
+
+#define BD71828_REG_BUCK4_EN		0x1f
+#define BD71828_REG_BUCK4_MODE		0x20
+#define BD71828_REG_BUCK4_VOLT		0x21
+
+#define BD71828_REG_BUCK5_EN		0x22
+#define BD71828_REG_BUCK5_MODE		0x23
+#define BD71828_REG_BUCK5_VOLT		0x24
+
+#define BD71828_REG_BUCK6_EN		0x25
+#define BD71828_REG_BUCK6_CTRL		0x26
+#define BD71828_REG_BUCK6_MODE		0x27
+#define BD71828_REG_BUCK6_IDLE_VOLT	0x28
+#define BD71828_REG_BUCK6_SUSP_VOLT	0x29
+#define BD71828_REG_BUCK6_VOLT		0x2a
+
+#define BD71828_REG_BUCK7_EN		0x2f
+#define BD71828_REG_BUCK7_CTRL		0x30
+#define BD71828_REG_BUCK7_MODE		0x31
+#define BD71828_REG_BUCK7_IDLE_VOLT	0x32
+#define BD71828_REG_BUCK7_SUSP_VOLT	0x33
+#define BD71828_REG_BUCK7_VOLT		0x34
+
+#define BD71828_REG_LDO1_EN		0x39
+#define BD71828_REG_LDO1_VOLT		0x3a
+#define BD71828_REG_LDO2_EN		0x3b
+#define BD71828_REG_LDO2_VOLT		0x3c
+#define BD71828_REG_LDO3_EN		0x3d
+#define BD71828_REG_LDO3_VOLT		0x3e
+#define BD71828_REG_LDO4_EN		0x3f
+#define BD71828_REG_LDO4_VOLT		0x40
+#define BD71828_REG_LDO5_EN		0x41
+#define BD71828_REG_LDO5_VOLT		0x43
+#define BD71828_REG_LDO5_VOLT_OPT	0x42
+#define BD71828_REG_LDO6_EN		0x44
+//#define BD71828_REG_LDO6_VOLT		0x4
+#define BD71828_REG_LDO7_EN		0x45
+#define BD71828_REG_LDO7_VOLT		0x46
+
+/* GPIO */
+
+#define BD71828_GPIO_DRIVE_MASK		0x2
+#define BD71828_GPIO_OPEN_DRAIN		0x0
+#define BD71828_GPIO_PUSH_PULL		0x2
+#define BD71828_GPIO_OUT_HI		0x1
+#define BD71828_GPIO_OUT_LO		0x0
+#define BD71828_GPIO_OUT_MASK		0x1
+
+#define BD71828_REG_GPIO_CTRL1		0x47
+#define BD71828_REG_GPIO_CTRL2		0x48
+#define BD71828_REG_GPIO_CTRL3		0x49
+#define BD71828_REG_IO_STAT		0xed
+
+/* RTC */
+#define BD71828_REG_RTC_SEC		0x4c
+#define BD71828_REG_RTC_MINUTE		0x4d
+#define BD71828_REG_RTC_HOUR		0x4e
+#define BD71828_REG_RTC_WEEK		0x4f
+#define BD71828_REG_RTC_DAY		0x50
+#define BD71828_REG_RTC_MONTH		0x51
+#define BD71828_REG_RTC_YEAR		0x52
+
+#define BD71828_REG_RTC_ALM0_SEC	0x53
+#define BD71828_REG_RTC_ALM_START	BD71828_REG_RTC_ALM0_SEC
+#define BD71828_REG_RTC_ALM0_MINUTE	0x54
+#define BD71828_REG_RTC_ALM0_HOUR	0x55
+#define BD71828_REG_RTC_ALM0_WEEK	0x56
+#define BD71828_REG_RTC_ALM0_DAY	0x57
+#define BD71828_REG_RTC_ALM0_MONTH	0x58
+#define BD71828_REG_RTC_ALM0_YEAR	0x59
+#define BD71828_REG_RTC_ALM0_MASK	0x61
+
+#define BD71828_REG_RTC_ALM1_SEC	0x5a
+#define BD71828_REG_RTC_ALM1_MINUTE	0x5b
+#define BD71828_REG_RTC_ALM1_HOUR	0x5c
+#define BD71828_REG_RTC_ALM1_WEEK	0x5d
+#define BD71828_REG_RTC_ALM1_DAY	0x5e
+#define BD71828_REG_RTC_ALM1_MONTH	0x5f
+#define BD71828_REG_RTC_ALM1_YEAR	0x60
+#define BD71828_REG_RTC_ALM1_MASK	0x62
+
+#define BD71828_REG_RTC_ALM2		0x63
+#define BD71828_REG_RTC_START		BD71828_REG_RTC_SEC
+
+/* Charger/Battey */
+#define BD71828_REG_CHG_STATE		0x65
+#define BD71828_REG_CHG_FULL		0xd2
+
+/* LEDs */
+#define BD71828_REG_LED_CTRL		0x4A
+#define BD71828_MASK_LED_AMBER		0x80
+#define BD71828_MASK_LED_GREEN		0x40
+#define BD71828_LED_ON			0xff
+#define BD71828_LED_OFF			0x0
+
+/* IRQ registers */
+#define BD71828_REG_INT_MASK_BUCK	0xd3
+#define BD71828_REG_INT_MASK_DCIN1	0xd4
+#define BD71828_REG_INT_MASK_DCIN2	0xd5
+#define BD71828_REG_INT_MASK_VSYS	0xd6
+#define BD71828_REG_INT_MASK_CHG	0xd7
+#define BD71828_REG_INT_MASK_BAT	0xd8
+#define BD71828_REG_INT_MASK_BAT_MON1	0xd9
+#define BD71828_REG_INT_MASK_BAT_MON2	0xda
+#define BD71828_REG_INT_MASK_BAT_MON3	0xdb
+#define BD71828_REG_INT_MASK_BAT_MON4	0xdc
+#define BD71828_REG_INT_MASK_TEMP	0xdd
+#define BD71828_REG_INT_MASK_RTC	0xde
+
+#define BD71828_REG_INT_MAIN		0xdf
+#define BD71828_REG_INT_BUCK		0xe0
+#define BD71828_REG_INT_DCIN1		0xe1
+#define BD71828_REG_INT_DCIN2		0xe2
+#define BD71828_REG_INT_VSYS		0xe3
+#define BD71828_REG_INT_CHG		0xe4
+#define BD71828_REG_INT_BAT		0xe5
+#define BD71828_REG_INT_BAT_MON1	0xe6
+#define BD71828_REG_INT_BAT_MON2	0xe7
+#define BD71828_REG_INT_BAT_MON3	0xe8
+#define BD71828_REG_INT_BAT_MON4	0xe9
+#define BD71828_REG_INT_TEMP		0xea
+#define BD71828_REG_INT_RTC		0xeb
+#define BD71828_REG_INT_UPDATE		0xec
+
+#define BD71828_MAX_REGISTER BD71828_REG_IO_STAT
+
+/* Masks for main IRQ register bits */
+enum {
+	BD71828_INT_BUCK,
+#define BD71828_INT_BUCK_MASK BIT(BD71828_INT_BUCK)
+	BD71828_INT_DCIN,
+#define BD71828_INT_DCIN_MASK BIT(BD71828_INT_DCIN)
+	BD71828_INT_VSYS,
+#define BD71828_INT_VSYS_MASK BIT(BD71828_INT_VSYS)
+	BD71828_INT_CHG,
+#define BD71828_INT_CHG_MASK BIT(BD71828_INT_CHG)
+	BD71828_INT_BAT,
+#define BD71828_INT_BAT_MASK BIT(BD71828_INT_BAT)
+	BD71828_INT_BAT_MON,
+#define BD71828_INT_BAT_MON_MASK BIT(BD71828_INT_BAT_MON)
+	BD71828_INT_TEMP,
+#define BD71828_INT_TEMP_MASK BIT(BD71828_INT_TEMP)
+	BD71828_INT_RTC,
+#define BD71828_INT_RTC_MASK BIT(BD71828_INT_RTC)
+};
+
+/* Interrupts */
+enum {
+	/* BUCK reg interrupts */
+	BD71828_INT_BUCK1_OCP,
+	BD71828_INT_BUCK2_OCP,
+	BD71828_INT_BUCK3_OCP,
+	BD71828_INT_BUCK4_OCP,
+	BD71828_INT_BUCK5_OCP,
+	BD71828_INT_BUCK6_OCP,
+	BD71828_INT_BUCK7_OCP,
+	BD71828_INT_PGFAULT,
+	/* DCIN1 interrupts */
+	BD71828_INT_DCIN_DET,
+	BD71828_INT_DCIN_RMV,
+	BD71828_INT_CLPS_OUT,
+	BD71828_INT_CLPS_IN,
+	/* DCIN2 interrupts */
+	BD71828_INT_DCIN_MON_RES,
+	BD71828_INT_DCIN_MON_DET,
+	BD71828_INT_LONGPUSH,
+	BD71828_INT_MIDPUSH,
+	BD71828_INT_SHORTPUSH,
+	BD71828_INT_PUSH,
+	BD71828_INT_WDOG,
+	BD71828_INT_SWRESET,
+	/* Vsys */
+	BD71828_INT_VSYS_UV_RES,
+	BD71828_INT_VSYS_UV_DET,
+	BD71828_INT_VSYS_LOW_RES,
+	BD71828_INT_VSYS_LOW_DET,
+	BD71828_INT_VSYS_HALL_IN,
+	BD71828_INT_VSYS_HALL_TOGGLE,
+	BD71828_INT_VSYS_MON_RES,
+	BD71828_INT_VSYS_MON_DET,
+	/* Charger */
+	BD71828_INT_CHG_DCIN_ILIM,
+	BD71828_INT_CHG_TOPOFF_TO_DONE,
+	BD71828_INT_CHG_WDG_TEMP,
+	BD71828_INT_CHG_WDG_TIME,
+	BD71828_INT_CHG_RECHARGE_RES,
+	BD71828_INT_CHG_RECHARGE_DET,
+	BD71828_INT_CHG_RANGED_TEMP_TRANSITION,
+	BD71828_INT_CHG_STATE_TRANSITION,
+	/* Battery */
+	BD71828_INT_BAT_TEMP_NORMAL,
+	BD71828_INT_BAT_TEMP_ERANGE,
+	BD71828_INT_BAT_TEMP_WARN,
+	BD71828_INT_BAT_REMOVED,
+	BD71828_INT_BAT_DETECTED,
+	BD71828_INT_THERM_REMOVED,
+	BD71828_INT_THERM_DETECTED,
+	/* Battery Mon 1 */
+	BD71828_INT_BAT_DEAD,
+	BD71828_INT_BAT_SHORTC_RES,
+	BD71828_INT_BAT_SHORTC_DET,
+	BD71828_INT_BAT_LOW_VOLT_RES,
+	BD71828_INT_BAT_LOW_VOLT_DET,
+	BD71828_INT_BAT_OVER_VOLT_RES,
+	BD71828_INT_BAT_OVER_VOLT_DET,
+	/* Battery Mon 2 */
+	BD71828_INT_BAT_MON_RES,
+	BD71828_INT_BAT_MON_DET,
+	/* Battery Mon 3 (Coulomb counter) */
+	BD71828_INT_BAT_CC_MON1,
+	BD71828_INT_BAT_CC_MON2,
+	BD71828_INT_BAT_CC_MON3,
+	/* Battery Mon 4 */
+	BD71828_INT_BAT_OVER_CURR_1_RES,
+	BD71828_INT_BAT_OVER_CURR_1_DET,
+	BD71828_INT_BAT_OVER_CURR_2_RES,
+	BD71828_INT_BAT_OVER_CURR_2_DET,
+	BD71828_INT_BAT_OVER_CURR_3_RES,
+	BD71828_INT_BAT_OVER_CURR_3_DET,
+	/* Temperature */
+	BD71828_INT_TEMP_BAT_LOW_RES,
+	BD71828_INT_TEMP_BAT_LOW_DET,
+	BD71828_INT_TEMP_BAT_HI_RES,
+	BD71828_INT_TEMP_BAT_HI_DET,
+	BD71828_INT_TEMP_CHIP_OVER_125_RES,
+	BD71828_INT_TEMP_CHIP_OVER_125_DET,
+	BD71828_INT_TEMP_CHIP_OVER_VF_DET,
+	BD71828_INT_TEMP_CHIP_OVER_VF_RES,
+	/* RTC Alarm */
+	BD71828_INT_RTC0,
+	BD71828_INT_RTC1,
+	BD71828_INT_RTC2,
+};
+
+#define BD71828_INT_BUCK1_OCP_MASK			0x1
+#define BD71828_INT_BUCK2_OCP_MASK			0x2
+#define BD71828_INT_BUCK3_OCP_MASK			0x4
+#define BD71828_INT_BUCK4_OCP_MASK			0x8
+#define BD71828_INT_BUCK5_OCP_MASK			0x10
+#define BD71828_INT_BUCK6_OCP_MASK			0x20
+#define BD71828_INT_BUCK7_OCP_MASK			0x40
+#define BD71828_INT_PGFAULT_MASK			0x80
+
+#define BD71828_INT_DCIN_DET_MASK			0x1
+#define BD71828_INT_DCIN_RMV_MASK			0x2
+#define BD71828_INT_CLPS_OUT_MASK			0x4
+#define BD71828_INT_CLPS_IN_MASK			0x8
+	/* DCIN2 interrupts */
+#define BD71828_INT_DCIN_MON_RES_MASK			0x1
+#define BD71828_INT_DCIN_MON_DET_MASK			0x2
+#define BD71828_INT_LONGPUSH_MASK			0x4
+#define BD71828_INT_MIDPUSH_MASK			0x8
+#define BD71828_INT_SHORTPUSH_MASK			0x10
+#define BD71828_INT_PUSH_MASK				0x20
+#define BD71828_INT_WDOG_MASK				0x40
+#define BD71828_INT_SWRESET_MASK			0x80
+	/* Vsys */
+#define BD71828_INT_VSYS_UV_RES_MASK			0x1
+#define BD71828_INT_VSYS_UV_DET_MASK			0x2
+#define BD71828_INT_VSYS_LOW_RES_MASK			0x4
+#define BD71828_INT_VSYS_LOW_DET_MASK			0x8
+#define BD71828_INT_VSYS_HALL_IN_MASK			0x10
+#define BD71828_INT_VSYS_HALL_TOGGLE_MASK		0x20
+#define BD71828_INT_VSYS_MON_RES_MASK			0x40
+#define BD71828_INT_VSYS_MON_DET_MASK			0x80
+	/* Charger */
+#define BD71828_INT_CHG_DCIN_ILIM_MASK			0x1
+#define BD71828_INT_CHG_TOPOFF_TO_DONE_MASK		0x2
+#define BD71828_INT_CHG_WDG_TEMP_MASK			0x4
+#define BD71828_INT_CHG_WDG_TIME_MASK			0x8
+#define BD71828_INT_CHG_RECHARGE_RES_MASK		0x10
+#define BD71828_INT_CHG_RECHARGE_DET_MASK		0x20
+#define BD71828_INT_CHG_RANGED_TEMP_TRANSITION_MASK	0x40
+#define BD71828_INT_CHG_STATE_TRANSITION_MASK		0x80
+	/* Battery */
+#define BD71828_INT_BAT_TEMP_NORMAL_MASK		0x1
+#define BD71828_INT_BAT_TEMP_ERANGE_MASK		0x2
+#define BD71828_INT_BAT_TEMP_WARN_MASK			0x4
+#define BD71828_INT_BAT_REMOVED_MASK			0x10
+#define BD71828_INT_BAT_DETECTED_MASK			0x20
+#define BD71828_INT_THERM_REMOVED_MASK			0x40
+#define BD71828_INT_THERM_DETECTED_MASK			0x80
+	/* Battery Mon 1 */
+#define BD71828_INT_BAT_DEAD_MASK			0x2
+#define BD71828_INT_BAT_SHORTC_RES_MASK			0x4
+#define BD71828_INT_BAT_SHORTC_DET_MASK			0x8
+#define BD71828_INT_BAT_LOW_VOLT_RES_MASK		0x10
+#define BD71828_INT_BAT_LOW_VOLT_DET_MASK		0x20
+#define BD71828_INT_BAT_OVER_VOLT_RES_MASK		0x40
+#define BD71828_INT_BAT_OVER_VOLT_DET_MASK		0x80
+	/* Battery Mon 2 */
+#define BD71828_INT_BAT_MON_RES_MASK			0x1
+#define BD71828_INT_BAT_MON_DET_MASK			0x2
+	/* Battery Mon 3 (Coulomb counter) */
+#define BD71828_INT_BAT_CC_MON1_MASK			0x1
+#define BD71828_INT_BAT_CC_MON2_MASK			0x2
+#define BD71828_INT_BAT_CC_MON3_MASK			0x4
+	/* Battery Mon 4 */
+#define BD71828_INT_BAT_OVER_CURR_1_RES_MASK		0x1
+#define BD71828_INT_BAT_OVER_CURR_1_DET_MASK		0x2
+#define BD71828_INT_BAT_OVER_CURR_2_RES_MASK		0x4
+#define BD71828_INT_BAT_OVER_CURR_2_DET_MASK		0x8
+#define BD71828_INT_BAT_OVER_CURR_3_RES_MASK		0x10
+#define BD71828_INT_BAT_OVER_CURR_3_DET_MASK		0x20
+	/* Temperature */
+#define BD71828_INT_TEMP_BAT_LOW_RES_MASK		0x1
+#define BD71828_INT_TEMP_BAT_LOW_DET_MASK		0x2
+#define BD71828_INT_TEMP_BAT_HI_RES_MASK		0x4
+#define BD71828_INT_TEMP_BAT_HI_DET_MASK		0x8
+#define BD71828_INT_TEMP_CHIP_OVER_125_RES_MASK		0x10
+#define BD71828_INT_TEMP_CHIP_OVER_125_DET_MASK		0x20
+#define BD71828_INT_TEMP_CHIP_OVER_VF_RES_MASK		0x40
+#define BD71828_INT_TEMP_CHIP_OVER_VF_DET_MASK		0x80
+	/* RTC Alarm */
+#define BD71828_INT_RTC0_MASK				0x1
+#define BD71828_INT_RTC1_MASK				0x2
+#define BD71828_INT_RTC2_MASK				0x4
+
+#define BD71828_OUT_TYPE_MASK				0x2
+#define BD71828_OUT_TYPE_OPEN_DRAIN			0x0
+#define BD71828_OUT_TYPE_CMOS				0x2
+
+#endif /* __LINUX_MFD_BD71828_H__ */
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index 7f2dbde..bee2474a 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -191,12 +191,6 @@ enum {
 #define IRQ_ON_REQ		0x02
 #define IRQ_STBY_REQ		0x01
 
-/* BD718XX_REG_OUT32K bits */
-#define BD718XX_OUT32K_EN	0x01
-
-/* BD7183XX gated clock rate */
-#define BD718XX_CLK_RATE 32768
-
 /* ROHM BD718XX irqs */
 enum {
 	BD718XX_INT_STBY_REQ,
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index bff15ac..4283b5b 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -4,17 +4,83 @@
 #ifndef __LINUX_MFD_ROHM_H__
 #define __LINUX_MFD_ROHM_H__
 
-enum {
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum rohm_chip_type {
 	ROHM_CHIP_TYPE_BD71837 = 0,
 	ROHM_CHIP_TYPE_BD71847,
 	ROHM_CHIP_TYPE_BD70528,
+	ROHM_CHIP_TYPE_BD71828,
 	ROHM_CHIP_TYPE_AMOUNT
 };
 
 struct rohm_regmap_dev {
-	unsigned int chip_type;
 	struct device *dev;
 	struct regmap *regmap;
 };
 
+enum {
+	ROHM_DVS_LEVEL_UNKNOWN,
+	ROHM_DVS_LEVEL_RUN,
+	ROHM_DVS_LEVEL_IDLE,
+	ROHM_DVS_LEVEL_SUSPEND,
+	ROHM_DVS_LEVEL_LPSR,
+	ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR,
+};
+
+/**
+ * struct rohm_dvs_config - dynamic voltage scaling register descriptions
+ *
+ * @level_map:		bitmap representing supported run-levels for this
+ *			regulator
+ * @run_reg:		register address for regulator config at 'run' state
+ * @run_mask:		value mask for regulator voltages at 'run' state
+ * @run_on_mask:	enable mask for regulator at 'run' state
+ * @idle_reg:		register address for regulator config at 'idle' state
+ * @idle_mask:		value mask for regulator voltages at 'idle' state
+ * @idle_on_mask:	enable mask for regulator at 'idle' state
+ * @suspend_reg:	register address for regulator config at 'suspend' state
+ * @suspend_mask:	value mask for regulator voltages at 'suspend' state
+ * @suspend_on_mask:	enable mask for regulator at 'suspend' state
+ * @lpsr_reg:		register address for regulator config at 'lpsr' state
+ * @lpsr_mask:		value mask for regulator voltages at 'lpsr' state
+ * @lpsr_on_mask:	enable mask for regulator at 'lpsr' state
+ *
+ * Description of ROHM PMICs voltage configuration registers for different
+ * system states. This is used to correctly configure the PMIC at startup
+ * based on values read from DT.
+ */
+struct rohm_dvs_config {
+	uint64_t level_map;
+	unsigned int run_reg;
+	unsigned int run_mask;
+	unsigned int run_on_mask;
+	unsigned int idle_reg;
+	unsigned int idle_mask;
+	unsigned int idle_on_mask;
+	unsigned int suspend_reg;
+	unsigned int suspend_mask;
+	unsigned int suspend_on_mask;
+	unsigned int lpsr_reg;
+	unsigned int lpsr_mask;
+	unsigned int lpsr_on_mask;
+};
+
+#if IS_ENABLED(CONFIG_REGULATOR_ROHM)
+int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+				  struct device_node *np,
+				  const struct regulator_desc *desc,
+				  struct regmap *regmap);
+
+#else
+static inline int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+						struct device_node *np,
+						const struct regulator_desc *desc,
+						struct regmap *regmap)
+{
+	return 0;
+}
+#endif
+
 #endif
diff --git a/include/linux/mfd/rohm-shared.h b/include/linux/mfd/rohm-shared.h
new file mode 100644
index 0000000..53dd7f6
--- /dev/null
+++ b/include/linux/mfd/rohm-shared.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2020 ROHM Semiconductors */
+
+
+#ifndef __LINUX_MFD_ROHM_SHARED_H__
+#define __LINUX_MFD_ROHM_SHARED_H__
+
+/* RTC definitions shared between BD70528 and BD71828 */
+
+#define BD70528_MASK_RTC_SEC		0x7f
+#define BD70528_MASK_RTC_MINUTE		0x7f
+#define BD70528_MASK_RTC_HOUR_24H	0x80
+#define BD70528_MASK_RTC_HOUR_PM	0x20
+#define BD70528_MASK_RTC_HOUR		0x3f
+#define BD70528_MASK_RTC_DAY		0x3f
+#define BD70528_MASK_RTC_WEEK		0x07
+#define BD70528_MASK_RTC_MONTH		0x1f
+#define BD70528_MASK_RTC_YEAR		0xff
+#define BD70528_MASK_ALM_EN		0x7
+
+#endif /* __LINUX_MFD_ROHM_SHARED_H__ */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 112dc66..7f20e9b 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -23,6 +23,11 @@ extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
 extern struct regmap *syscon_regmap_lookup_by_phandle(
 					struct device_node *np,
 					const char *property);
+extern struct regmap *syscon_regmap_lookup_by_phandle_args(
+					struct device_node *np,
+					const char *property,
+					int arg_count,
+					unsigned int *out_args);
 #else
 static inline struct regmap *device_node_to_regmap(struct device_node *np)
 {
@@ -45,6 +50,15 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle(
 {
 	return ERR_PTR(-ENOTSUPP);
 }
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_args(
+					struct device_node *np,
+					const char *property,
+					int arg_count,
+					unsigned int *out_args)
+{
+	return ERR_PTR(-ENOTSUPP);
+}
 #endif
 
 #endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
new file mode 100644
index 0000000..bb8d2e2
--- /dev/null
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _WCD934X_REGISTERS_H
+#define _WCD934X_REGISTERS_H
+
+#define WCD934X_CODEC_RPM_CLK_GATE				0x0002
+#define WCD934X_CODEC_RPM_CLK_GATE_MASK				GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG				0x0003
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_9P6MHZ			BIT(0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_12P288MHZ		BIT(1)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK		GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_RST_CTL				0x0009
+#define WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL			0x0011
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0			0x0021
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2			0x0023
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_CTL			0x0025
+#define WCD934X_EFUSE_SENSE_STATE_MASK				GENMASK(4, 1)
+#define WCD934X_EFUSE_SENSE_STATE_DEF				0x10
+#define WCD934X_EFUSE_SENSE_EN_MASK				BIT(0)
+#define WCD934X_EFUSE_SENSE_ENABLE				BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14			0x0037
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15			0x0038
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS			0x0039
+#define WCD934X_DATA_HUB_SB_TX10_INP_CFG			0x006b
+#define WCD934X_DATA_HUB_SB_TX11_INP_CFG			0x006c
+#define WCD934X_DATA_HUB_SB_TX13_INP_CFG			0x006e
+#define WCD934X_CPE_FLL_CONFIG_CTL_2				0x0111
+#define WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD		0x0213
+#define WCD934X_CPE_SS_SVA_CFG					0x0214
+#define WCD934X_CPE_SS_DMIC0_CTL				0x0218
+#define WCD934X_CPE_SS_DMIC1_CTL				0x0219
+#define WCD934X_DMIC_RATE_MASK					GENMASK(3, 1)
+#define WCD934X_CPE_SS_DMIC2_CTL				0x021a
+#define WCD934X_CPE_SS_DMIC_CFG					0x021b
+#define WCD934X_CPE_SS_DMIC_CFG					0x021b
+#define WCD934X_CPE_SS_CPAR_CFG					0x021c
+#define WCD934X_INTR_PIN1_MASK0					0x0409
+#define WCD934X_INTR_PIN1_STATUS0				0x0411
+#define WCD934X_INTR_PIN1_CLEAR0				0x0419
+#define WCD934X_INTR_PIN2_CLEAR3				0x0434
+#define WCD934X_INTR_LEVEL0					0x0461
+/* INTR_REG 0 */
+#define	WCD934X_IRQ_SLIMBUS			0
+#define	WCD934X_IRQ_MISC			1
+#define	WCD934X_IRQ_HPH_PA_OCPL_FAULT		2
+#define	WCD934X_IRQ_HPH_PA_OCPR_FAULT		3
+#define	WCD934X_IRQ_EAR_PA_OCP_FAULT		4
+#define	WCD934X_IRQ_HPH_PA_CNPL_COMPLETE	5
+#define	WCD934X_IRQ_HPH_PA_CNPR_COMPLETE	6
+#define	WCD934X_IRQ_EAR_PA_CNP_COMPLETE		7
+/* INTR_REG 1 */
+#define	WCD934X_IRQ_MBHC_SW_DET			8
+#define	WCD934X_IRQ_MBHC_ELECT_INS_REM_DET	9
+#define	WCD934X_IRQ_MBHC_BUTTON_PRESS_DET	10
+#define	WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET	11
+#define	WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET	12
+#define	WCD934X_IRQ_RESERVED_0			13
+#define	WCD934X_IRQ_RESERVED_1			14
+#define	WCD934X_IRQ_RESERVED_2			15
+/* INTR_REG 2 */
+#define	WCD934X_IRQ_LINE_PA1_CNP_COMPLETE	16
+#define	WCD934X_IRQ_LINE_PA2_CNP_COMPLETE	17
+#define	WCD934X_IRQ_SLNQ_ANALOG_ERROR		18
+#define	WCD934X_IRQ_RESERVED_3			19
+#define	WCD934X_IRQ_SOUNDWIRE			20
+#define	WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE	21
+#define	WCD934X_IRQ_RCO_ERROR			22
+#define	WCD934X_IRQ_CPE_ERROR			23
+/* INTR_REG 3 */
+#define	WCD934X_IRQ_MAD_AUDIO			24
+#define	WCD934X_IRQ_MAD_BEACON			25
+#define	WCD934X_IRQ_MAD_ULTRASOUND		26
+#define	WCD934X_IRQ_VBAT_ATTACK			27
+#define	WCD934X_IRQ_VBAT_RESTORE		28
+#define	WCD934X_IRQ_CPE1_INTR			29
+#define	WCD934X_IRQ_RESERVED_4			30
+#define	WCD934X_IRQ_SLNQ_DIGITAL		31
+#define WCD934X_NUM_IRQS			32
+#define WCD934X_ANA_BIAS					0x0601
+#define WCD934X_ANA_BIAS_EN_MASK				BIT(7)
+#define WCD934X_ANA_BIAS_EN					BIT(7)
+#define WCD934X_ANA_PRECHRG_EN_MASK				BIT(6)
+#define WCD934X_ANA_PRECHRG_EN					BIT(6)
+#define WCD934X_ANA_PRECHRG_MODE_MASK				BIT(5)
+#define WCD934X_ANA_PRECHRG_MODE_AUTO				BIT(5)
+#define WCD934X_ANA_RCO						0x0603
+#define WCD934X_ANA_RCO_BG_EN_MASK				BIT(7)
+#define WCD934X_ANA_RCO_BG_ENABLE				BIT(7)
+#define WCD934X_ANA_BUCK_CTL					0x0606
+#define WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK			GENMASK(1, 0)
+#define WCD934X_ANA_BUCK_PRE_EN2_MASK				BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN2_ENABLE				BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN1_MASK				BIT(1)
+#define WCD934X_ANA_BUCK_PRE_EN1_ENABLE				BIT(1)
+#define WCD934X_ANA_BUCK_HI_ACCU_EN_MASK			BIT(2)
+#define WCD934X_ANA_BUCK_HI_ACCU_ENABLE				BIT(2)
+#define WCD934X_ANA_RX_SUPPLIES					0x0608
+#define WCD934X_ANA_HPH						0x0609
+#define WCD934X_ANA_EAR						0x060a
+#define WCD934X_ANA_LO_1_2					0x060b
+#define WCD934X_ANA_AMIC1					0x060e
+#define WCD934X_ANA_AMIC2					0x060f
+#define WCD934X_ANA_AMIC3					0x0610
+#define WCD934X_ANA_AMIC4					0x0611
+#define WCD934X_ANA_MBHC_MECH					0x0614
+#define WCD934X_ANA_MBHC_ELECT					0x0615
+#define WCD934X_ANA_MBHC_ZDET					0x0616
+#define WCD934X_ANA_MBHC_RESULT_1				0x0617
+#define WCD934X_ANA_MBHC_RESULT_2				0x0618
+#define WCD934X_ANA_MBHC_RESULT_3				0x0619
+#define WCD934X_ANA_MICB1					0x0622
+#define WCD934X_MICB_VAL_MASK					GENMASK(5, 0)
+#define WCD934X_ANA_MICB_EN_MASK				GENMASK(7, 6)
+#define WCD934X_ANA_MICB_PULL_UP				0x80
+#define WCD934X_ANA_MICB_ENABLE					0x40
+#define WCD934X_ANA_MICB_DISABLE				0x0
+#define WCD934X_ANA_MICB2					0x0623
+#define WCD934X_ANA_MICB3					0x0625
+#define WCD934X_ANA_MICB4					0x0626
+#define WCD934X_BIAS_VBG_FINE_ADJ				0x0629
+#define WCD934X_MICB1_TEST_CTL_1				0x066b
+#define WCD934X_MICB1_TEST_CTL_2				0x066c
+#define WCD934X_MICB2_TEST_CTL_1				0x066e
+#define WCD934X_MICB3_TEST_CTL_1				0x0671
+#define WCD934X_MICB4_TEST_CTL_1				0x0674
+#define WCD934X_CLASSH_MODE_1					0x0697
+#define WCD934X_CLASSH_MODE_2					0x0698
+#define WCD934X_CLASSH_MODE_3					0x0699
+#define WCD934X_CLASSH_CTRL_VCL_1				0x069a
+#define WCD934X_CLASSH_CTRL_VCL_2				0x069b
+#define WCD934X_CLASSH_CTRL_CCL_1				0x069c
+#define WCD934X_CLASSH_CTRL_CCL_2				0x069d
+#define WCD934X_CLASSH_CTRL_CCL_3				0x069e
+#define WCD934X_CLASSH_CTRL_CCL_4				0x069f
+#define WCD934X_CLASSH_CTRL_CCL_5				0x06a0
+#define WCD934X_CLASSH_BUCK_TMUX_A_D				0x06a1
+#define WCD934X_CLASSH_BUCK_SW_DRV_CNTL				0x06a2
+#define WCD934X_RX_OCP_CTL					0x06b6
+#define WCD934X_RX_OCP_COUNT					0x06b7
+#define WCD934X_HPH_CNP_EN					0x06cb
+#define WCD934X_HPH_CNP_WG_CTL					0x06cc
+#define WCD934X_HPH_GM3_BOOST_EN_MASK				BIT(7)
+#define WCD934X_HPH_GM3_BOOST_ENABLE				BIT(7)
+#define WCD934X_HPH_OCP_CTL					0x06ce
+#define WCD934X_HPH_L_EN					0x06d3
+#define WCD934X_HPH_GAIN_SRC_SEL_MASK				BIT(5)
+#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER			0
+#define WCD934X_HPH_GAIN_SRC_SEL_REGISTER			BIT(5)
+#define WCD934X_HPH_L_TEST					0x06d4
+#define WCD934X_HPH_R_EN					0x06d6
+#define WCD934X_HPH_R_TEST					0x06d7
+#define WCD934X_HPH_OCP_DET_MASK				BIT(0)
+#define WCD934X_HPH_OCP_DET_ENABLE				BIT(0)
+#define WCD934X_HPH_OCP_DET_DISABLE				0
+#define WCD934X_DIFF_LO_LO2_COMPANDER				0x06ea
+#define WCD934X_DIFF_LO_LO1_COMPANDER				0x06eb
+#define WCD934X_CLK_SYS_MCLK_PRG				0x0711
+#define WCD934X_EXT_CLK_BUF_EN_MASK	BIT(7)
+#define WCD934X_EXT_CLK_BUF_EN		BIT(7)
+#define WCD934X_EXT_CLK_DIV_RATIO_MASK	GENMASK(5, 4)
+#define WCD934X_EXT_CLK_DIV_BY_2	0x10
+#define WCD934X_MCLK_SRC_MASK		BIT(1)
+#define WCD934X_MCLK_SRC_EXT_CLK	0
+#define WCD934X_MCLK_SRC_MASK		BIT(1)
+#define WCD934X_MCLK_EN_MASK		BIT(0)
+#define WCD934X_MCLK_EN			BIT(0)
+#define WCD934X_CLK_SYS_MCLK2_PRG1				0x0712
+#define WCD934X_CLK_SYS_MCLK2_PRG2				0x0713
+#define WCD934X_SIDO_NEW_VOUT_A_STARTUP				0x071b
+#define WCD934X_SIDO_NEW_VOUT_D_STARTUP				0x071c
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ1				0x071d
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ2				0x071e
+#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK			BIT(0)
+#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE				BIT(0)
+#define WCD934X_MBHC_NEW_CTL_2					0x0721
+#define WCD934X_TX_NEW_AMIC_4_5_SEL				0x0727
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L			0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL			0x0735
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R			0x0736
+#define WCD934X_HPH_NEW_INT_HPH_TIMER1				0x073a
+#define WCD934X_HPH_AUTOCHOP_TIMER_EN_MASK			BIT(1)
+#define WCD934X_HPH_AUTOCHOP_TIMER_ENABLE			BIT(1)
+#define WCD934X_CDC_TX0_TX_PATH_CTL				0x0a31
+#define WCD934X_CDC_TX_PATH_CTL_PCM_RATE_MASK			GENMASK(3, 0)
+#define WCD934X_CDC_TX_PATH_CTL(dec)			(0xa31 + dec * 0x10)
+#define WCD934X_CDC_TX0_TX_PATH_CFG0				0x0a32
+#define WCD934X_CDC_TX0_TX_PATH_CFG1				0x0a33
+#define WCD934X_CDC_TX0_TX_VOL_CTL				0x0a34
+#define WCD934X_CDC_TX0_TX_PATH_192_CTL				0x0a35
+#define WCD934X_CDC_TX0_TX_PATH_192_CFG				0x0a36
+#define WCD934X_CDC_TX0_TX_PATH_SEC2				0x0a39
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ_MASK			BIT(1)
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ			BIT(1)
+#define WCD934X_CDC_TX1_TX_PATH_CTL				0x0a41
+#define WCD934X_CDC_TX1_TX_PATH_CFG0				0x0a42
+#define WCD934X_CDC_TX1_TX_PATH_CFG1				0x0a43
+#define WCD934X_CDC_TX1_TX_VOL_CTL				0x0a44
+#define WCD934X_CDC_TX2_TX_PATH_CTL				0x0a51
+#define WCD934X_CDC_TX2_TX_PATH_CFG0				0x0a52
+#define WCD934X_CDC_TX2_TX_PATH_CFG1				0x0a53
+#define WCD934X_CDC_TX2_TX_VOL_CTL				0x0a54
+#define WCD934X_CDC_TX3_TX_PATH_CTL				0x0a61
+#define WCD934X_CDC_TX3_TX_PATH_CFG0				0x0a62
+#define WCD934X_CDC_TX3_TX_PATH_CFG1				0x0a63
+#define WCD934X_CDC_TX3_TX_VOL_CTL				0x0a64
+#define WCD934X_CDC_TX3_TX_PATH_192_CTL				0x0a65
+#define WCD934X_CDC_TX3_TX_PATH_192_CFG				0x0a66
+#define WCD934X_CDC_TX4_TX_PATH_CTL				0x0a71
+#define WCD934X_CDC_TX4_TX_PATH_CFG0				0x0a72
+#define WCD934X_CDC_TX4_TX_PATH_CFG1				0x0a73
+#define WCD934X_CDC_TX4_TX_VOL_CTL				0x0a74
+#define WCD934X_CDC_TX4_TX_PATH_192_CTL				0x0a75
+#define WCD934X_CDC_TX4_TX_PATH_192_CFG				0x0a76
+#define WCD934X_CDC_TX5_TX_PATH_CTL				0x0a81
+#define WCD934X_CDC_TX5_TX_PATH_CFG0				0x0a82
+#define WCD934X_CDC_TX5_TX_PATH_CFG1				0x0a83
+#define WCD934X_CDC_TX5_TX_VOL_CTL				0x0a84
+#define WCD934X_CDC_TX5_TX_PATH_192_CTL				0x0a85
+#define WCD934X_CDC_TX5_TX_PATH_192_CFG				0x0a86
+#define WCD934X_CDC_TX6_TX_PATH_CTL				0x0a91
+#define WCD934X_CDC_TX6_TX_PATH_CFG0				0x0a92
+#define WCD934X_CDC_TX6_TX_PATH_CFG1				0x0a93
+#define WCD934X_CDC_TX6_TX_VOL_CTL				0x0a94
+#define WCD934X_CDC_TX6_TX_PATH_192_CTL				0x0a95
+#define WCD934X_CDC_TX6_TX_PATH_192_CFG				0x0a96
+#define WCD934X_CDC_TX7_TX_PATH_CTL				0x0aa1
+#define WCD934X_CDC_TX7_TX_PATH_CFG0				0x0aa2
+#define WCD934X_CDC_TX7_TX_PATH_CFG1				0x0aa3
+#define WCD934X_CDC_TX7_TX_VOL_CTL				0x0aa4
+#define WCD934X_CDC_TX7_TX_PATH_192_CTL				0x0aa5
+#define WCD934X_CDC_TX7_TX_PATH_192_CFG				0x0aa6
+#define WCD934X_CDC_TX8_TX_PATH_CTL				0x0ab1
+#define WCD934X_CDC_TX8_TX_PATH_CFG0				0x0ab2
+#define WCD934X_CDC_TX8_TX_PATH_CFG1				0x0ab3
+#define WCD934X_CDC_TX8_TX_VOL_CTL				0x0ab4
+#define WCD934X_CDC_TX8_TX_PATH_192_CTL				0x0ab5
+#define WCD934X_CDC_TX8_TX_PATH_192_CFG				0x0ab6
+#define WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0			0x0ac3
+#define WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0			0x0ac7
+#define WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0			0x0acb
+#define WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0			0x0acf
+#define WCD934X_CDC_COMPANDER1_CTL0				0x0b01
+#define WCD934X_COMP_CLK_EN_MASK				BIT(0)
+#define WCD934X_COMP_CLK_ENABLE					BIT(0)
+#define WCD934X_COMP_SOFT_RST_MASK				BIT(1)
+#define WCD934X_COMP_SOFT_RST_ENABLE				BIT(1)
+#define WCD934X_COMP_HALT_MASK					BIT(2)
+#define WCD934X_COMP_HALT					BIT(2)
+#define WCD934X_COMP_SOFT_RST_DISABLE				0
+#define WCD934X_CDC_COMPANDER1_CTL7				0x0b08
+#define WCD934X_HPH_LOW_PWR_MODE_EN_MASK			BIT(5)
+#define WCD934X_CDC_COMPANDER2_CTL7				0x0b10
+#define WCD934X_CDC_COMPANDER7_CTL3				0x0b34
+#define WCD934X_CDC_COMPANDER7_CTL7				0x0b38
+#define WCD934X_CDC_COMPANDER8_CTL3				0x0b3c
+#define WCD934X_CDC_COMPANDER8_CTL7				0x0b40
+#define WCD934X_CDC_RX0_RX_PATH_CTL				0x0b41
+#define WCD934X_CDC_RX_PGA_MUTE_EN_MASK				BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_ENABLE				BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_DISABLE				0
+#define WCD934X_RX_CLK_EN_MASK					BIT(5)
+#define WCD934X_RX_CLK_ENABLE					BIT(5)
+#define WCD934X_RX_RESET_MASK					BIT(6)
+#define WCD934X_RX_RESET_ENABLE					BIT(6)
+#define WCD934X_RX_RESET_DISABLE				0
+#define WCD934X_RX_PCM_RATE_MASK				GENMASK(3, 0)
+#define WCD934X_RX_PCM_RATE_F_48K				0x04
+#define WCD934X_CDC_RX_PATH_CTL(rx)			(0xb41 + rx * 0x14)
+#define WCD934X_CDC_MIX_PCM_RATE_MASK				GENMASK(3, 0)
+#define WCD934X_CDC_RX0_RX_PATH_CFG0				0x0b42
+#define WCD934X_RX_DLY_ZN_EN_MASK				BIT(3)
+#define WCD934X_RX_DLY_ZN_ENABLE				BIT(3)
+#define WCD934X_RX_DLY_ZN_DISABLE				0
+#define WCD934X_CDC_RX0_RX_PATH_CFG1				0x0b43
+#define WCD934X_CDC_RX0_RX_PATH_CFG2				0x0b44
+#define WCD934X_CDC_RX0_RX_VOL_CTL				0x0b45
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CTL				0x0b46
+#define WCD934X_CDC_RX_MIX_CLK_EN_MASK				BIT(5)
+#define WCD934X_CDC_RX_MIX_CLK_ENABLE				BIT(5)
+#define WCD934X_CDC_RX_PATH_MIX_CTL(rx)			(0xb46 + rx * 0x14)
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CFG				0x0b47
+#define WCD934X_CDC_RX0_RX_VOL_MIX_CTL				0x0b48
+#define WCD934X_CDC_RX0_RX_PATH_SEC0				0x0b49
+#define WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL			0x0b53
+#define WCD934X_CDC_RX1_RX_PATH_CTL				0x0b55
+#define WCD934X_RX_PATH_PGA_MUTE_EN_MASK			BIT(4)
+#define WCD934X_RX_PATH_PGA_MUTE_ENABLE				BIT(4)
+#define WCD934X_CDC_RX_PATH_PGA_MUTE_DISABLE			0
+#define WCD934X_CDC_RX_PATH_CLK_EN_MASK				BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_ENABLE				BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_DISABLE				0
+#define WCD934X_CDC_RX1_RX_PATH_CFG0				0x0b56
+#define WCD934X_HPH_CMP_EN_MASK					BIT(1)
+#define WCD934X_HPH_CMP_ENABLE					BIT(1)
+#define WCD934X_HPH_CMP_DISABLE					0
+#define WCD934X_CDC_RX1_RX_PATH_CFG2				0x0b58
+#define WCD934X_CDC_RX1_RX_VOL_CTL				0x0b59
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CTL				0x0b5a
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CFG				0x0b5b
+#define WCD934X_CDC_RX1_RX_VOL_MIX_CTL				0x0b5c
+#define WCD934X_CDC_RX1_RX_PATH_SEC0				0x0b5d
+#define WCD934X_CDC_RX1_RX_PATH_SEC3				0x0b60
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_MASK			GENMASK(5, 2)
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P3125		0x14
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P0000		0
+#define WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL			0x0b67
+#define WCD934X_CDC_RX2_RX_PATH_CTL				0x0b69
+#define WCD934X_CDC_RX2_RX_PATH_CFG0				0x0b6a
+#define WCD934X_CDC_RX_PATH_CFG_HD2_EN_MASK			BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_ENABLE			BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_DISABLE			0
+#define WCD934X_CDC_RX2_RX_PATH_CFG2				0x0b6c
+#define WCD934X_CDC_RX2_RX_VOL_CTL				0x0b6d
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CTL				0x0b6e
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CFG				0x0b6f
+#define WCD934X_CDC_RX2_RX_VOL_MIX_CTL				0x0b70
+#define WCD934X_CDC_RX2_RX_PATH_SEC0				0x0b71
+#define WCD934X_CDC_RX2_RX_PATH_SEC3				0x0b74
+#define WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL			0x0b7b
+#define WCD934X_CDC_RX3_RX_PATH_CTL				0x0b7d
+#define WCD934X_CDC_RX3_RX_PATH_CFG0				0x0b6e
+#define WCD934X_CDC_RX3_RX_PATH_CFG2				0x0b80
+#define WCD934X_CDC_RX3_RX_VOL_CTL				0x0b81
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CTL				0x0b82
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CFG				0x0b83
+#define WCD934X_CDC_RX3_RX_VOL_MIX_CTL				0x0b84
+#define WCD934X_CDC_RX3_RX_PATH_SEC0				0x0b85
+#define WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL			0x0b8f
+#define WCD934X_CDC_RX4_RX_PATH_CTL				0x0b91
+#define WCD934X_CDC_RX4_RX_PATH_CFG0				0x0b92
+#define WCD934X_CDC_RX4_RX_PATH_CFG2				0x0b94
+#define WCD934X_CDC_RX4_RX_VOL_CTL				0x0b95
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CTL				0x0b96
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CFG				0x0b97
+#define WCD934X_CDC_RX4_RX_VOL_MIX_CTL				0x0b98
+#define WCD934X_CDC_RX4_RX_PATH_SEC0				0x0b99
+#define WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL			0x0ba3
+#define WCD934X_CDC_RX7_RX_PATH_CTL				0x0bcd
+#define WCD934X_CDC_RX7_RX_PATH_CFG0				0x0bce
+#define WCD934X_CDC_RX7_RX_PATH_CFG1				0x0bcf
+#define WCD934X_CDC_RX7_RX_PATH_CFG2				0x0bd0
+#define WCD934X_CDC_RX7_RX_VOL_CTL				0x0bd1
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CTL				0x0bd2
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CFG				0x0bd3
+#define WCD934X_CDC_RX7_RX_VOL_MIX_CTL				0x0bd4
+#define WCD934X_CDC_RX7_RX_PATH_SEC1				0x0bd6
+#define WCD934X_CDC_RX7_RX_PATH_MIX_SEC0			0x0bdd
+#define WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL			0x0bdf
+#define WCD934X_CDC_RX8_RX_PATH_CTL				0x0be1
+#define WCD934X_CDC_RX8_RX_PATH_CFG0				0x0be2
+#define WCD934X_CDC_RX8_RX_PATH_CFG1				0x0be3
+#define WCD934X_RX_SMART_BOOST_EN_MASK				BIT(0)
+#define WCD934X_RX_SMART_BOOST_ENABLE				BIT(0)
+#define WCD934X_RX_SMART_BOOST_DISABLE			0
+#define WCD934X_CDC_RX8_RX_PATH_CFG2				0x0be4
+#define WCD934X_CDC_RX8_RX_VOL_CTL				0x0be5
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CTL				0x0be6
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CFG				0x0be7
+#define WCD934X_CDC_RX8_RX_VOL_MIX_CTL				0x0be8
+#define WCD934X_CDC_RX8_RX_PATH_SEC1				0x0bea
+#define WCD934X_CDC_RX8_RX_PATH_MIX_SEC0			0x0bf1
+#define WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL			0x0bf3
+#define WCD934X_CDC_CLSH_DECAY_CTRL				0x0c03
+#define WCD934X_CDC_CLSH_K2_MSB					0x0c0a
+#define WCD934X_CDC_CLSH_K2_LSB					0x0c0b
+#define WCD934X_CDC_CLSH_TEST0					0x0c0f
+#define WCD934X_CDC_BOOST0_BOOST_PATH_CTL			0x0c19
+#define WCD934X_BOOST_PATH_CLK_EN_MASK				BIT(4)
+#define WCD934X_BOOST_PATH_CLK_ENABLE				BIT(4)
+#define WCD934X_BOOST_PATH_CLK_DISABLE				0
+#define WCD934X_CDC_BOOST0_BOOST_CTL				0x0c1a
+#define WCD934X_CDC_BOOST0_BOOST_CFG1				0x0c1b
+#define WCD934X_CDC_BOOST0_BOOST_CFG2				0x0c1c
+#define WCD934X_CDC_BOOST1_BOOST_PATH_CTL			0x0c21
+#define WCD934X_CDC_BOOST1_BOOST_CTL				0x0c22
+#define WCD934X_CDC_BOOST1_BOOST_CFG1				0x0c23
+#define WCD934X_CDC_BOOST1_BOOST_CFG2				0x0c24
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_0			0x0c91
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_1			0x0c92
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_2			0x0c93
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_3			0x0c94
+#define WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS			0x0c96
+#define WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL		0x0cb5
+#define WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL		0x0cb9
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0			0x0d01
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG0(i)		(0xd01 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_SEL_MASK			GENMASK(3, 0)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1			0x0d02
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG1(i)		(0xd02 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0			0x0d03
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1			0x0d04
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0			0x0d05
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1			0x0d06
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0			0x0d07
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1			0x0d08
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0			0x0d09
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1			0x0d0a
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0			0x0d0f
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1			0x0d10
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0			0x0d11
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1			0x0d12
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0			0x0d13
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1			0x0d14
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2			0x0d15
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3			0x0d16
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4			0x0d17
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0		0x0d18
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1		0x0d19
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0			0x0d1d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1			0x0d1e
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0			0x0d1f
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1			0x0d20
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0			0x0d21
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1			0x0d22
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0			0x0d23
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1			0x0d25
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0			0x0d26
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0			0x0d27
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0			0x0d28
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0			0x0d29
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0			0x0d2a
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0			0x0d2b
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0			0x0d2c
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0			0x0d2d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0			0x0d2e
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0		0x0d31
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1		0x0d32
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2		0x0d33
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3		0x0d34
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0		0x0d35
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1		0x0d36
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2		0x0d37
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3		0x0d38
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0			0x0d3a
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1			0x0d3b
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2			0x0d3c
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3			0x0d3d
+#define WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL			0x0d41
+#define WCD934X_CDC_MCLK_EN_MASK				BIT(0)
+#define WCD934X_CDC_MCLK_EN_ENABLE				BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL			0x0d42
+#define WCD934X_CDC_FS_MCLK_CNT_EN_MASK				BIT(0)
+#define WCD934X_CDC_FS_MCLK_CNT_ENABLE				BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL			0x0d43
+#define WCD934X_CDC_SWR_CLK_EN_MASK				BIT(0)
+#define WCD934X_CDC_SWR_CLK_ENABLE				BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL			0x0d44
+#define WCD934X_CDC_CLK_RST_CTRL_ASRC_SHARE_CONTROL		0x0d45
+#define WCD934X_CDC_CLK_RST_CTRL_GFM_CONTROL			0x0d46
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL			0x0d55
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL		0x0d56
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL		0x0d57
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL		0x0d58
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL		0x0d59
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL		0x0d5a
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL		0x0d5b
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL		0x0d5c
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL		0x0d5d
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_CTL			0x0d5e
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL		0x0d5f
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL		0x0d60
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL		0x0d61
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL			0x0d65
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL		0x0d66
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL		0x0d67
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL		0x0d68
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL		0x0d69
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL		0x0d6a
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL		0x0d6b
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL		0x0d6c
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL		0x0d6d
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_CTL			0x0d6e
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL		0x0d6f
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL		0x0d70
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL		0x0d71
+#define WCD934X_CDC_TOP_TOP_CFG1				0x0d82
+#define WCD934X_CDC_TOP_TOP_CFG7				0x0d88
+#define WCD934X_CDC_TOP_HPHL_COMP_LUT				0x0d8b
+#define WCD934X_CDC_TOP_HPHR_COMP_LUT				0x0d90
+#define WCD934X_HPH_LUT_BYPASS_MASK				BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_ENABLE				BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_DISABLE				0
+#define WCD934X_CODEC_CPR_WR_DATA_0				0x5001
+#define WCD934X_CODEC_CPR_WR_ADDR_0				0x5005
+#define WCD934X_CODEC_CPR_SVS_CX_VDD				0x5022
+#define WCD934X_CODEC_CPR_SVS2_CX_VDD				0x5023
+#define WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD			0x5027
+#define WCD934X_TLMM_DMIC1_CLK_PINCFG				0x8015
+#define WCD934X_TLMM_DMIC1_DATA_PINCFG				0x8016
+#define WCD934X_TLMM_DMIC2_CLK_PINCFG				0x8017
+#define WCD934X_TLMM_DMIC2_DATA_PINCFG				0x8018
+#define WCD934X_TLMM_DMIC3_CLK_PINCFG				0x8019
+#define WCD934X_TLMM_DMIC3_DATA_PINCFG				0x801a
+#define WCD934X_TEST_DEBUG_PAD_DRVCTL_0				0x803b
+#define WCD934X_TEST_DEBUG_NPL_DLY_TEST_1			0x803e
+
+#define WCD934X_MAX_REGISTER					0xffff
+#define WCD934X_SEL_REGISTER					0x800
+#define WCD934X_SEL_MASK					0xff
+#define WCD934X_SEL_SHIFT					0x0
+#define WCD934X_WINDOW_START					0x800
+#define WCD934X_WINDOW_LENGTH					0x100
+
+/* SLIMBUS Slave Registers */
+#define WCD934X_SLIM_PGD_PORT_INT_EN0				0x30
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_0			0x34
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_1			0x35
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_0			0x36
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_1			0x37
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_0			0x38
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_1			0x39
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_0			0x3A
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_1			0x3B
+#define WCD934X_SLIM_PGD_PORT_INT_RX_SOURCE0			0x60
+#define WCD934X_SLIM_PGD_PORT_INT_TX_SOURCE0			0x70
+#define WCD934X_SLIM_PGD_RX_PORT_CFG(p)				(0x30 + p)
+#define WCD934X_SLIM_PGD_PORT_CFG(p)				(0x40 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_CFG(p)				(0x50 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_SRC(p)			(0x60 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS(p)			(0x80 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_0(p)		(0x100 + 4 * p)
+/* ports range from 10-16 */
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_1(p)		(0x101 + 4 * p)
+#define WCD934X_SLIM_PGD_RX_PORT_MULTI_CHNL_0(p)		(0x140 + 4 * p)
+
+#define SLIM_MANF_ID_QCOM			0x217
+#define SLIM_PROD_CODE_WCD9340			0x250
+#define SLIM_DEV_IDX_WCD9340			0x1
+#define SLIM_DEV_INSTANCE_ID_WCD9340		0
+
+#endif
diff --git a/include/linux/mfd/wcd934x/wcd934x.h b/include/linux/mfd/wcd934x/wcd934x.h
new file mode 100644
index 0000000..f3c65a0
--- /dev/null
+++ b/include/linux/mfd/wcd934x/wcd934x.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __WCD934X_H__
+#define __WCD934X_H__
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+
+#define WCD934X_MAX_SUPPLY	5
+
+/**
+ * struct wcd934x_ddata - wcd934x driver data
+ *
+ * @supplies:	wcd934x regulator supplies
+ * @irq_data:	wcd934x irq_chip data
+ * @regmap:	wcd934x regmap pointer
+ * @extclk:	External clock
+ * @dev:	device instance of wcd934x slim device
+ * @irq:	irq for wcd934x.
+ */
+struct wcd934x_ddata {
+	struct regulator_bulk_data supplies[WCD934X_MAX_SUPPLY];
+	struct regmap_irq_chip_data *irq_data;
+	struct regmap *regmap;
+	struct clk *extclk;
+	struct device *dev;
+	int irq;
+};
+
+#endif /* __WCD934X_H__ */
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 508e8cc..653d2a0 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -130,6 +130,11 @@ enum {
 	MLX4_CQE_STATUS_IPOK		= 1 << 12,
 };
 
+/* L4_CSUM is logically part of status, but has to checked against badfcs_enc */
+enum {
+	MLX4_CQE_STATUS_L4_CSUM		= 1 << 2,
+};
+
 enum {
 	MLX4_CQE_LLC                     = 1,
 	MLX4_CQE_SNAP                    = 1 << 1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 22bd0d5..277a51d 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -463,6 +463,11 @@ struct mlx5_vf_context {
 	int	enabled;
 	u64	port_guid;
 	u64	node_guid;
+	/* Valid bits are used to validate administrative guid only.
+	 * Enabled after ndo_set_vf_guid
+	 */
+	u8	port_guid_valid:1;
+	u8	node_guid_valid:1;
 	enum port_state_policy	policy;
 };
 
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index ee0a34d..ff8c9d5 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1188,7 +1188,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         log_max_cq[0x5];
 
 	u8         log_max_eq_sz[0x8];
-	u8         reserved_at_e8[0x2];
+	u8         relaxed_ordering_write[0x1];
+	u8         relaxed_ordering_read[0x1];
 	u8         log_max_mkey[0x6];
 	u8         reserved_at_f0[0x8];
 	u8         dump_fill_mkey[0x1];
@@ -1211,7 +1212,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_130[0xa];
 	u8         log_max_ra_res_dc[0x6];
 
-	u8         reserved_at_140[0xa];
+	u8         reserved_at_140[0x9];
+	u8         roce_accl[0x1];
 	u8         log_max_ra_req_qp[0x6];
 	u8         reserved_at_150[0xa];
 	u8         log_max_ra_res_qp[0x6];
@@ -1446,14 +1448,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 
 	u8         reserved_at_440[0x20];
 
-	u8         tls[0x1];
-	u8         reserved_at_461[0x2];
+	u8         reserved_at_460[0x3];
 	u8         log_max_uctx[0x5];
 	u8         reserved_at_468[0x3];
 	u8         log_max_umem[0x5];
 	u8         max_num_eqs[0x10];
 
-	u8         reserved_at_480[0x3];
+	u8         reserved_at_480[0x1];
+	u8         tls_tx[0x1];
+	u8         reserved_at_482[0x1];
 	u8         log_max_l2_table[0x5];
 	u8         reserved_at_488[0x8];
 	u8         log_uar_page_sz[0x10];
@@ -3428,7 +3431,9 @@ struct mlx5_ifc_mkc_bits {
 
 	u8         translations_octword_size[0x20];
 
-	u8         reserved_at_1c0[0x1b];
+	u8         reserved_at_1c0[0x19];
+	u8         relaxed_ordering_read[0x1];
+	u8         reserved_at_1d9[0x1];
 	u8         log_page_size[0x5];
 
 	u8         reserved_at_1e0[0x20];
@@ -4889,7 +4894,19 @@ struct mlx5_ifc_query_q_counter_out_bits {
 
 	u8         req_cqe_flush_error[0x20];
 
-	u8         reserved_at_620[0x1e0];
+	u8         reserved_at_620[0x20];
+
+	u8         roce_adp_retrans[0x20];
+
+	u8         roce_adp_retrans_to[0x20];
+
+	u8         roce_slow_restart[0x20];
+
+	u8         roce_slow_restart_cnps[0x20];
+
+	u8         roce_slow_restart_trans[0x20];
+
+	u8         reserved_at_6e0[0x120];
 };
 
 struct mlx5_ifc_query_q_counter_in_bits {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 67f8451..52269e5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -70,11 +70,6 @@ static inline void totalram_pages_add(long count)
 	atomic_long_add(count, &_totalram_pages);
 }
 
-static inline void totalram_pages_set(long val)
-{
-	atomic_long_set(&_totalram_pages, val);
-}
-
 extern void * high_memory;
 extern int page_cluster;
 
@@ -916,10 +911,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
 
 #define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
 
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
-#endif
-
 #define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
 #define NODES_MASK		((1UL << NODES_WIDTH) - 1)
 #define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
@@ -947,9 +938,10 @@ static inline bool is_zone_device_page(const struct page *page)
 #endif
 
 #ifdef CONFIG_DEV_PAGEMAP_OPS
-void __put_devmap_managed_page(struct page *page);
+void free_devmap_managed_page(struct page *page);
 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-static inline bool put_devmap_managed_page(struct page *page)
+
+static inline bool page_is_devmap_managed(struct page *page)
 {
 	if (!static_branch_unlikely(&devmap_managed_key))
 		return false;
@@ -958,7 +950,6 @@ static inline bool put_devmap_managed_page(struct page *page)
 	switch (page->pgmap->type) {
 	case MEMORY_DEVICE_PRIVATE:
 	case MEMORY_DEVICE_FS_DAX:
-		__put_devmap_managed_page(page);
 		return true;
 	default:
 		break;
@@ -966,11 +957,17 @@ static inline bool put_devmap_managed_page(struct page *page)
 	return false;
 }
 
+void put_devmap_managed_page(struct page *page);
+
 #else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool put_devmap_managed_page(struct page *page)
+static inline bool page_is_devmap_managed(struct page *page)
 {
 	return false;
 }
+
+static inline void put_devmap_managed_page(struct page *page)
+{
+}
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
 static inline bool is_device_private_page(const struct page *page)
@@ -1023,37 +1020,37 @@ static inline void put_page(struct page *page)
 	 * need to inform the device driver through callback. See
 	 * include/linux/memremap.h and HMM for details.
 	 */
-	if (put_devmap_managed_page(page))
+	if (page_is_devmap_managed(page)) {
+		put_devmap_managed_page(page);
 		return;
+	}
 
 	if (put_page_testzero(page))
 		__put_page(page);
 }
 
 /**
- * put_user_page() - release a gup-pinned page
+ * unpin_user_page() - release a gup-pinned page
  * @page:            pointer to page to be released
  *
- * Pages that were pinned via get_user_pages*() must be released via
- * either put_user_page(), or one of the put_user_pages*() routines
- * below. This is so that eventually, pages that are pinned via
- * get_user_pages*() can be separately tracked and uniquely handled. In
- * particular, interactions with RDMA and filesystems need special
- * handling.
+ * Pages that were pinned via pin_user_pages*() must be released via either
+ * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
+ * that eventually such pages can be separately tracked and uniquely handled. In
+ * particular, interactions with RDMA and filesystems need special handling.
  *
- * put_user_page() and put_page() are not interchangeable, despite this early
- * implementation that makes them look the same. put_user_page() calls must
- * be perfectly matched up with get_user_page() calls.
+ * unpin_user_page() and put_page() are not interchangeable, despite this early
+ * implementation that makes them look the same. unpin_user_page() calls must
+ * be perfectly matched up with pin*() calls.
  */
-static inline void put_user_page(struct page *page)
+static inline void unpin_user_page(struct page *page)
 {
 	put_page(page);
 }
 
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
-			       bool make_dirty);
+void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+				 bool make_dirty);
 
-void put_user_pages(struct page **pages, unsigned long npages);
+void unpin_user_pages(struct page **pages, unsigned long npages);
 
 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 #define SECTION_IN_PAGE_FLAGS
@@ -1501,9 +1498,16 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 			    unsigned long start, unsigned long nr_pages,
 			    unsigned int gup_flags, struct page **pages,
 			    struct vm_area_struct **vmas, int *locked);
+long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+			   unsigned long start, unsigned long nr_pages,
+			   unsigned int gup_flags, struct page **pages,
+			   struct vm_area_struct **vmas, int *locked);
 long get_user_pages(unsigned long start, unsigned long nr_pages,
 			    unsigned int gup_flags, struct page **pages,
 			    struct vm_area_struct **vmas);
+long pin_user_pages(unsigned long start, unsigned long nr_pages,
+		    unsigned int gup_flags, struct page **pages,
+		    struct vm_area_struct **vmas);
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 		    unsigned int gup_flags, struct page **pages, int *locked);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
@@ -1511,6 +1515,8 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 
 int get_user_pages_fast(unsigned long start, int nr_pages,
 			unsigned int gup_flags, struct page **pages);
+int pin_user_pages_fast(unsigned long start, int nr_pages,
+			unsigned int gup_flags, struct page **pages);
 
 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
@@ -2176,12 +2182,6 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
 					struct mminit_pfnnid_cache *state);
 #endif
 
-#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
-void zero_resv_unavail(void);
-#else
-static inline void zero_resv_unavail(void) {}
-#endif
-
 extern void set_dma_reserve(unsigned long new_dma_reserve);
 extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
 		enum memmap_context, struct vmem_altmap *);
@@ -2323,6 +2323,7 @@ extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
 		       struct list_head *uf, bool downgrade);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
 		     struct list_head *uf);
+extern int do_madvise(unsigned long start, size_t len_in, int behavior);
 
 static inline unsigned long
 do_mmap_pgoff(struct file *file, unsigned long addr,
@@ -2528,6 +2529,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
 			unsigned long pfn, pgprot_t pgprot);
 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 			pfn_t pfn);
+vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
+			pfn_t pfn, pgprot_t pgprot);
 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
 		unsigned long addr, pfn_t pfn);
 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2574,13 +2577,15 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 #define FOLL_ANON	0x8000	/* don't do file mappings */
 #define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */
 #define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */
+#define FOLL_PIN	0x40000	/* pages must be released via unpin_user_page */
 
 /*
- * NOTE on FOLL_LONGTERM:
+ * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
+ * other. Here is what they mean, and how to use them:
  *
  * FOLL_LONGTERM indicates that the page will be held for an indefinite time
- * period _often_ under userspace control.  This is contrasted with
- * iov_iter_get_pages() where usages which are transient.
+ * period _often_ under userspace control.  This is in contrast to
+ * iov_iter_get_pages(), whose usages are transient.
  *
  * FIXME: For pages which are part of a filesystem, mappings are subject to the
  * lifetime enforced by the filesystem and we need guarantees that longterm
@@ -2595,11 +2600,39 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
  * Currently only get_user_pages() and get_user_pages_fast() support this flag
  * and calls to get_user_pages_[un]locked are specifically not allowed.  This
  * is due to an incompatibility with the FS DAX check and
- * FAULT_FLAG_ALLOW_RETRY
+ * FAULT_FLAG_ALLOW_RETRY.
  *
- * In the CMA case: longterm pins in a CMA region would unnecessarily fragment
- * that region.  And so CMA attempts to migrate the page before pinning when
+ * In the CMA case: long term pins in a CMA region would unnecessarily fragment
+ * that region.  And so, CMA attempts to migrate the page before pinning, when
  * FOLL_LONGTERM is specified.
+ *
+ * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
+ * but an additional pin counting system) will be invoked. This is intended for
+ * anything that gets a page reference and then touches page data (for example,
+ * Direct IO). This lets the filesystem know that some non-file-system entity is
+ * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
+ * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
+ * a call to unpin_user_page().
+ *
+ * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
+ * and separate refcounting mechanisms, however, and that means that each has
+ * its own acquire and release mechanisms:
+ *
+ *     FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
+ *
+ *     FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
+ *
+ * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
+ * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
+ * calls applied to them, and that's perfectly OK. This is a constraint on the
+ * callers, not on the pages.)
+ *
+ * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
+ * directly by the caller. That's in order to help avoid mismatches when
+ * releasing pages: get_user_pages*() pages must be released via put_page(),
+ * while pin_user_pages*() pages must be released via unpin_user_page().
+ *
+ * Please see Documentation/vm/pin_user_pages.rst for more information.
  */
 
 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 270aa8f..c28911c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -312,7 +312,12 @@ struct vm_area_struct {
 	/* Second cache line starts here. */
 
 	struct mm_struct *vm_mm;	/* The address space we belong to. */
-	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
+
+	/*
+	 * Access permissions of this VMA.
+	 * See vmf_insert_mixed_prot() for discussion.
+	 */
+	pgprot_t vm_page_prot;
 	unsigned long vm_flags;		/* Flags, see mm.h. */
 
 	/*
@@ -490,7 +495,7 @@ struct mm_struct {
 		/* store ref to file /proc/<pid>/exe symlink points to */
 		struct file __rcu *exe_file;
 #ifdef CONFIG_MMU_NOTIFIER
-		struct mmu_notifier_mm *mmu_notifier_mm;
+		struct mmu_notifier_subscriptions *notifier_subscriptions;
 #endif
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 9e6caa8..736f691 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -8,7 +8,7 @@
 #include <linux/srcu.h>
 #include <linux/interval_tree.h>
 
-struct mmu_notifier_mm;
+struct mmu_notifier_subscriptions;
 struct mmu_notifier;
 struct mmu_notifier_range;
 struct mmu_interval_notifier;
@@ -73,7 +73,7 @@ struct mmu_notifier_ops {
 	 * through the gart alias address, so leading to memory
 	 * corruption.
 	 */
-	void (*release)(struct mmu_notifier *mn,
+	void (*release)(struct mmu_notifier *subscription,
 			struct mm_struct *mm);
 
 	/*
@@ -85,7 +85,7 @@ struct mmu_notifier_ops {
 	 * Start-end is necessary in case the secondary MMU is mapping the page
 	 * at a smaller granularity than the primary MMU.
 	 */
-	int (*clear_flush_young)(struct mmu_notifier *mn,
+	int (*clear_flush_young)(struct mmu_notifier *subscription,
 				 struct mm_struct *mm,
 				 unsigned long start,
 				 unsigned long end);
@@ -95,7 +95,7 @@ struct mmu_notifier_ops {
 	 * latter, it is supposed to test-and-clear the young/accessed bitflag
 	 * in the secondary pte, but it may omit flushing the secondary tlb.
 	 */
-	int (*clear_young)(struct mmu_notifier *mn,
+	int (*clear_young)(struct mmu_notifier *subscription,
 			   struct mm_struct *mm,
 			   unsigned long start,
 			   unsigned long end);
@@ -106,7 +106,7 @@ struct mmu_notifier_ops {
 	 * frequently used without actually clearing the flag or tearing
 	 * down the secondary mapping on the page.
 	 */
-	int (*test_young)(struct mmu_notifier *mn,
+	int (*test_young)(struct mmu_notifier *subscription,
 			  struct mm_struct *mm,
 			  unsigned long address);
 
@@ -114,7 +114,7 @@ struct mmu_notifier_ops {
 	 * change_pte is called in cases that pte mapping to page is changed:
 	 * for example, when ksm remaps pte to point to a new shared page.
 	 */
-	void (*change_pte)(struct mmu_notifier *mn,
+	void (*change_pte)(struct mmu_notifier *subscription,
 			   struct mm_struct *mm,
 			   unsigned long address,
 			   pte_t pte);
@@ -169,9 +169,9 @@ struct mmu_notifier_ops {
 	 * invalidate_range_end.
 	 *
 	 */
-	int (*invalidate_range_start)(struct mmu_notifier *mn,
+	int (*invalidate_range_start)(struct mmu_notifier *subscription,
 				      const struct mmu_notifier_range *range);
-	void (*invalidate_range_end)(struct mmu_notifier *mn,
+	void (*invalidate_range_end)(struct mmu_notifier *subscription,
 				     const struct mmu_notifier_range *range);
 
 	/*
@@ -192,8 +192,10 @@ struct mmu_notifier_ops {
 	 * of what was passed to invalidate_range_start()/end(), if
 	 * called between those functions.
 	 */
-	void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
-				 unsigned long start, unsigned long end);
+	void (*invalidate_range)(struct mmu_notifier *subscription,
+				 struct mm_struct *mm,
+				 unsigned long start,
+				 unsigned long end);
 
 	/*
 	 * These callbacks are used with the get/put interface to manage the
@@ -206,7 +208,7 @@ struct mmu_notifier_ops {
 	 * and cannot sleep.
 	 */
 	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
-	void (*free_notifier)(struct mmu_notifier *mn);
+	void (*free_notifier)(struct mmu_notifier *subscription);
 };
 
 /*
@@ -235,7 +237,7 @@ struct mmu_notifier {
  *              was required but mmu_notifier_range_blockable(range) is false.
  */
 struct mmu_interval_notifier_ops {
-	bool (*invalidate)(struct mmu_interval_notifier *mni,
+	bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
 			   const struct mmu_notifier_range *range,
 			   unsigned long cur_seq);
 };
@@ -265,7 +267,7 @@ struct mmu_notifier_range {
 
 static inline int mm_has_notifiers(struct mm_struct *mm)
 {
-	return unlikely(mm->mmu_notifier_mm);
+	return unlikely(mm->notifier_subscriptions);
 }
 
 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
@@ -280,30 +282,31 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
 	up_write(&mm->mmap_sem);
 	return ret;
 }
-void mmu_notifier_put(struct mmu_notifier *mn);
+void mmu_notifier_put(struct mmu_notifier *subscription);
 void mmu_notifier_synchronize(void);
 
-extern int mmu_notifier_register(struct mmu_notifier *mn,
+extern int mmu_notifier_register(struct mmu_notifier *subscription,
 				 struct mm_struct *mm);
-extern int __mmu_notifier_register(struct mmu_notifier *mn,
+extern int __mmu_notifier_register(struct mmu_notifier *subscription,
 				   struct mm_struct *mm);
-extern void mmu_notifier_unregister(struct mmu_notifier *mn,
+extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
 				    struct mm_struct *mm);
 
-unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni);
-int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
+unsigned long
+mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
+int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
 				 struct mm_struct *mm, unsigned long start,
 				 unsigned long length,
 				 const struct mmu_interval_notifier_ops *ops);
 int mmu_interval_notifier_insert_locked(
-	struct mmu_interval_notifier *mni, struct mm_struct *mm,
+	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
 	unsigned long start, unsigned long length,
 	const struct mmu_interval_notifier_ops *ops);
-void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
+void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
 
 /**
  * mmu_interval_set_seq - Save the invalidation sequence
- * @mni - The mni passed to invalidate
+ * @interval_sub - The subscription passed to invalidate
  * @cur_seq - The cur_seq passed to the invalidate() callback
  *
  * This must be called unconditionally from the invalidate callback of a
@@ -314,15 +317,16 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
  * If the caller does not call mmu_interval_read_begin() or
  * mmu_interval_read_retry() then this call is not required.
  */
-static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
-					unsigned long cur_seq)
+static inline void
+mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
+		     unsigned long cur_seq)
 {
-	WRITE_ONCE(mni->invalidate_seq, cur_seq);
+	WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
 }
 
 /**
  * mmu_interval_read_retry - End a read side critical section against a VA range
- * mni: The range
+ * interval_sub: The subscription
  * seq: The return of the paired mmu_interval_read_begin()
  *
  * This MUST be called under a user provided lock that is also held
@@ -334,15 +338,16 @@ static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
  * Returns true if an invalidation collided with this critical section, and
  * the caller should retry.
  */
-static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
-					   unsigned long seq)
+static inline bool
+mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
+			unsigned long seq)
 {
-	return mni->invalidate_seq != seq;
+	return interval_sub->invalidate_seq != seq;
 }
 
 /**
  * mmu_interval_check_retry - Test if a collision has occurred
- * mni: The range
+ * interval_sub: The subscription
  * seq: The return of the matching mmu_interval_read_begin()
  *
  * This can be used in the critical section between mmu_interval_read_begin()
@@ -357,14 +362,15 @@ static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
  * This call can be used as part of loops and other expensive operations to
  * expedite a retry.
  */
-static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni,
-					    unsigned long seq)
+static inline bool
+mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
+			 unsigned long seq)
 {
 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
-	return READ_ONCE(mni->invalidate_seq) != seq;
+	return READ_ONCE(interval_sub->invalidate_seq) != seq;
 }
 
-extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
+extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
 extern void __mmu_notifier_release(struct mm_struct *mm);
 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 					  unsigned long start,
@@ -480,15 +486,15 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
 		__mmu_notifier_invalidate_range(mm, start, end);
 }
 
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
 {
-	mm->mmu_notifier_mm = NULL;
+	mm->notifier_subscriptions = NULL;
 }
 
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
 {
 	if (mm_has_notifiers(mm))
-		__mmu_notifier_mm_destroy(mm);
+		__mmu_notifier_subscriptions_destroy(mm);
 }
 
 
@@ -692,11 +698,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
 {
 }
 
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
 {
 }
 
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
 {
 }
 
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5334ad8..462f687 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -758,7 +758,7 @@ typedef struct pglist_data {
 
 #ifdef CONFIG_NUMA
 	/*
-	 * zone reclaim becomes active if more unmapped pages exist.
+	 * node reclaim becomes active if more unmapped pages exist.
 	 */
 	unsigned long		min_unmapped_pages;
 	unsigned long		min_slab_pages;
@@ -1379,6 +1379,16 @@ static inline int pfn_present(unsigned long pfn)
 	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
 }
 
+static inline unsigned long next_present_section_nr(unsigned long section_nr)
+{
+	while (++section_nr <= __highest_present_section_nr) {
+		if (present_section_nr(section_nr))
+			return section_nr;
+	}
+
+	return -1;
+}
+
 /*
  * These are _only_ used during initialisation, therefore they
  * can use __initdata ...  They could have names to indicate
diff --git a/include/linux/module.h b/include/linux/module.h
index 0c7366c..1ad393e 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -170,6 +170,16 @@ extern void cleanup_module(void);
 #define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
 
 /*
+ * MODULE_FILE is used for generating modules.builtin
+ * So, make it no-op when this is being built as a module
+ */
+#ifdef MODULE
+#define MODULE_FILE
+#else
+#define MODULE_FILE	MODULE_INFO(file, KBUILD_MODFILE);
+#endif
+
+/*
  * The following license idents are currently accepted as indicating free
  * software modules
  *
@@ -213,7 +223,7 @@ extern void cleanup_module(void);
  * 2.	So the community can ignore bug reports including proprietary modules
  * 3.	So vendors can do likewise based on their own policies
  */
-#define MODULE_LICENSE(_license) MODULE_INFO(license, _license)
+#define MODULE_LICENSE(_license) MODULE_FILE MODULE_INFO(license, _license)
 
 /*
  * Author(s), use "Name <email>" or just "Name", for multiple
@@ -429,7 +439,7 @@ struct module {
 
 #ifdef CONFIG_KALLSYMS
 	/* Protected by RCU and/or module_mutex: use rcu_dereference() */
-	struct mod_kallsyms *kallsyms;
+	struct mod_kallsyms __rcu *kallsyms;
 	struct mod_kallsyms core_kallsyms;
 
 	/* Section attributes */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index e5c3e23..3ef917f 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -128,6 +128,9 @@ struct kparam_array
 
 /**
  * module_param_unsafe - same as module_param but taints kernel
+ * @name: the variable to alter, and exposed parameter name.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
  */
 #define module_param_unsafe(name, type, perm)			\
 	module_param_named_unsafe(name, name, type, perm)
@@ -150,6 +153,10 @@ struct kparam_array
 
 /**
  * module_param_named_unsafe - same as module_param_named but taints kernel
+ * @name: a valid C identifier which is the parameter name.
+ * @value: the actual lvalue to alter.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
  */
 #define module_param_named_unsafe(name, value, type, perm)		\
 	param_check_##type(name, &(value));				\
@@ -160,6 +167,7 @@ struct kparam_array
  * module_param_cb - general callback for a module/cmdline parameter
  * @name: a valid C identifier which is the parameter name.
  * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
  * @perm: visibility in sysfs.
  *
  * The ops can have NULL set or get functions.
@@ -171,36 +179,96 @@ struct kparam_array
 	__module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1,    \
 			    KERNEL_PARAM_FL_UNSAFE)
 
+#define __level_param_cb(name, ops, arg, perm, level)			\
+	__module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
 /**
- * <level>_param_cb - general callback for a module/cmdline parameter
- *                    to be evaluated before certain initcall level
+ * core_param_cb - general callback for a module/cmdline parameter
+ *                 to be evaluated before core initcall level
  * @name: a valid C identifier which is the parameter name.
  * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
  * @perm: visibility in sysfs.
  *
  * The ops can have NULL set or get functions.
  */
-#define __level_param_cb(name, ops, arg, perm, level)			\
-	__module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
-
 #define core_param_cb(name, ops, arg, perm)		\
 	__level_param_cb(name, ops, arg, perm, 1)
 
+/**
+ * postcore_param_cb - general callback for a module/cmdline parameter
+ *                     to be evaluated before postcore initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
 #define postcore_param_cb(name, ops, arg, perm)		\
 	__level_param_cb(name, ops, arg, perm, 2)
 
+/**
+ * arch_param_cb - general callback for a module/cmdline parameter
+ *                 to be evaluated before arch initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
 #define arch_param_cb(name, ops, arg, perm)		\
 	__level_param_cb(name, ops, arg, perm, 3)
 
+/**
+ * subsys_param_cb - general callback for a module/cmdline parameter
+ *                   to be evaluated before subsys initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
 #define subsys_param_cb(name, ops, arg, perm)		\
 	__level_param_cb(name, ops, arg, perm, 4)
 
+/**
+ * fs_param_cb - general callback for a module/cmdline parameter
+ *               to be evaluated before fs initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
 #define fs_param_cb(name, ops, arg, perm)		\
 	__level_param_cb(name, ops, arg, perm, 5)
 
+/**
+ * device_param_cb - general callback for a module/cmdline parameter
+ *                   to be evaluated before device initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
 #define device_param_cb(name, ops, arg, perm)		\
 	__level_param_cb(name, ops, arg, perm, 6)
 
+/**
+ * late_param_cb - general callback for a module/cmdline parameter
+ *                 to be evaluated before late initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
 #define late_param_cb(name, ops, arg, perm)		\
 	__level_param_cb(name, ops, arg, perm, 7)
 
@@ -263,6 +331,10 @@ static inline void kernel_param_unlock(struct module *mod)
 
 /**
  * core_param_unsafe - same as core_param but taints kernel
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs
  */
 #define core_param_unsafe(name, var, type, perm)		\
 	param_check_##type(name, &(var));				\
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 5a4623f..5abd91c 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -128,7 +128,8 @@
 #define SR_BP0			BIT(2)	/* Block protect 0 */
 #define SR_BP1			BIT(3)	/* Block protect 1 */
 #define SR_BP2			BIT(4)	/* Block protect 2 */
-#define SR_TB			BIT(5)	/* Top/Bottom protect */
+#define SR_TB_BIT5		BIT(5)	/* Top/Bottom protect */
+#define SR_TB_BIT6		BIT(6)	/* Top/Bottom protect */
 #define SR_SRWD			BIT(7)	/* SR write protect */
 /* Spansion/Cypress specific status bits */
 #define SR_E_ERR		BIT(5)
@@ -224,14 +225,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
 	return spi_nor_get_protocol_data_nbits(proto);
 }
 
-enum spi_nor_ops {
-	SPI_NOR_OPS_READ = 0,
-	SPI_NOR_OPS_WRITE,
-	SPI_NOR_OPS_ERASE,
-	SPI_NOR_OPS_LOCK,
-	SPI_NOR_OPS_UNLOCK,
-};
-
 enum spi_nor_option_flags {
 	SNOR_F_USE_FSR		= BIT(0),
 	SNOR_F_HAS_SR_TB	= BIT(1),
@@ -244,6 +237,7 @@ enum spi_nor_option_flags {
 	SNOR_F_HAS_LOCK		= BIT(8),
 	SNOR_F_HAS_16BIT_SR	= BIT(9),
 	SNOR_F_NO_READ_CR	= BIT(10),
+	SNOR_F_HAS_SR_TB_BIT6	= BIT(11),
 
 };
 
@@ -483,8 +477,8 @@ struct spi_nor;
  *			opcode via write_reg().
  */
 struct spi_nor_controller_ops {
-	int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
-	void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+	int (*prepare)(struct spi_nor *nor);
+	void (*unprepare)(struct spi_nor *nor);
 	int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len);
 	int (*write_reg)(struct spi_nor *nor, u8 opcode, const u8 *buf,
 			 size_t len);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 07bfb08..0dd980d 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -2,6 +2,7 @@
 #ifndef _LINUX_NAMEI_H
 #define _LINUX_NAMEI_H
 
+#include <linux/fs.h>
 #include <linux/kernel.h>
 #include <linux/path.h>
 #include <linux/fcntl.h>
@@ -38,6 +39,15 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 #define LOOKUP_ROOT		0x2000
 #define LOOKUP_ROOT_GRABBED	0x0008
 
+/* Scoping flags for lookup. */
+#define LOOKUP_NO_SYMLINKS	0x010000 /* No symlink crossing. */
+#define LOOKUP_NO_MAGICLINKS	0x020000 /* No nd_jump_link() crossing. */
+#define LOOKUP_NO_XDEV		0x040000 /* No mountpoint crossing. */
+#define LOOKUP_BENEATH		0x080000 /* No escaping from starting point. */
+#define LOOKUP_IN_ROOT		0x100000 /* Treat dirfd as fs root. */
+/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
+#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
+
 extern int path_pts(struct path *path);
 
 extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
@@ -68,7 +78,7 @@ extern int follow_up(struct path *);
 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
 extern void unlock_rename(struct dentry *, struct dentry *);
 
-extern void nd_jump_link(struct path *path);
+extern int __must_check nd_jump_link(struct path *path);
 
 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
 {
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index c06b1fd..a5f8f03 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -168,6 +168,9 @@ struct nfs_inode {
 	struct rw_semaphore	rmdir_sem;
 	struct mutex		commit_mutex;
 
+	/* track last access to cached pages */
+	unsigned long		page_index;
+
 #if IS_ENABLED(CONFIG_NFS_V4)
 	struct nfs4_cached_acl	*nfs4_acl;
         /* NFSv4 state */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index c176f70..465fa98 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -152,6 +152,7 @@ struct nfs_server {
 #define NFS_MOUNT_LOCAL_FLOCK		0x100000
 #define NFS_MOUNT_LOCAL_FCNTL		0x200000
 #define NFS_MOUNT_SOFTERR		0x400000
+#define NFS_MOUNT_SOFTREVAL		0x800000
 
 	unsigned int		caps;		/* server capabilities */
 	unsigned int		rsize;		/* read size */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 72d5695..94c77ed 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1639,6 +1639,7 @@ struct nfs_subversion;
 struct nfs_mount_info;
 struct nfs_client_initdata;
 struct nfs_pageio_descriptor;
+struct fs_context;
 
 /*
  * RPC procedure vector for NFSv2/NFSv3 demuxing
@@ -1653,16 +1654,14 @@ struct nfs_rpc_ops {
 
 	int	(*getroot) (struct nfs_server *, struct nfs_fh *,
 			    struct nfs_fsinfo *);
-	struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
-				      struct nfs_fh *, struct nfs_fattr *);
-	struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *,
-				     struct nfs_subversion *);
+	int	(*submount) (struct fs_context *, struct nfs_server *);
+	int	(*try_get_tree) (struct fs_context *);
 	int	(*getattr) (struct nfs_server *, struct nfs_fh *,
 			    struct nfs_fattr *, struct nfs4_label *,
 			    struct inode *);
 	int	(*setattr) (struct dentry *, struct nfs_fattr *,
 			    struct iattr *);
-	int	(*lookup)  (struct inode *, const struct qstr *,
+	int	(*lookup)  (struct inode *, struct dentry *,
 			    struct nfs_fh *, struct nfs_fattr *,
 			    struct nfs4_label *);
 	int	(*lookupp) (struct inode *, struct nfs_fh *,
@@ -1723,7 +1722,7 @@ struct nfs_rpc_ops {
 	struct nfs_client *(*init_client) (struct nfs_client *,
 				const struct nfs_client_initdata *);
 	void	(*free_client) (struct nfs_client *);
-	struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
+	struct nfs_server *(*create_server)(struct fs_context *);
 	struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
 					   struct nfs_fattr *, rpc_authflavor_t);
 };
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index fe05132..6d6f8e5 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -11,6 +11,7 @@
 
 #include <linux/err.h>
 #include <linux/errno.h>
+#include <linux/gpio/consumer.h>
 
 struct nvmem_device;
 struct nvmem_cell_info;
@@ -45,6 +46,7 @@ enum nvmem_type {
  * @word_size:	Minimum read/write access granularity.
  * @stride:	Minimum read/write access stride.
  * @priv:	User context passed to read/write callbacks.
+ * @wp-gpio:   Write protect pin
  *
  * Note: A default "nvmem<id>" name will be assigned to the device if
  * no name is specified in its configuration. In such case "<id>" is
@@ -58,6 +60,7 @@ struct nvmem_config {
 	const char		*name;
 	int			id;
 	struct module		*owner;
+	struct gpio_desc	*wp_gpio;
 	const struct nvmem_cell_info	*cells;
 	int			ncells;
 	enum nvmem_type		type;
diff --git a/include/linux/of.h b/include/linux/of.h
index 844f89e..c669c0a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -351,6 +351,8 @@ extern const void *of_get_property(const struct device_node *node,
 				int *lenp);
 extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
 extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
+extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+						 int index);
 
 #define for_each_property_of_node(dn, pp) \
 	for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -765,6 +767,12 @@ static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
 	return NULL;
 }
 
+static inline struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+					int index)
+{
+	return NULL;
+}
+
 static inline int of_n_addr_cells(struct device_node *np)
 {
 	return 0;
diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h
index b27da9f..c86fcad 100644
--- a/include/linux/of_clk.h
+++ b/include/linux/of_clk.h
@@ -6,6 +6,9 @@
 #ifndef __LINUX_OF_CLK_H
 #define __LINUX_OF_CLK_H
 
+struct device_node;
+struct of_device_id;
+
 #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
 
 unsigned int of_clk_get_parent_count(struct device_node *np);
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index ba3cfbb..5c5c93a 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -129,7 +129,6 @@
 #define IS_WORD_16			BIT(0xd)
 #define ENABLE_16XX_MODE		BIT(0xe)
 #define HS_CHANNELS_RESERVED		BIT(0xf)
-#define DMA_ENGINE_HANDLE_IRQ		BIT(0x10)
 
 /* Defines for DMA Capabilities */
 #define DMA_HAS_TRANSPARENT_CAPS	(0x1 << 18)
@@ -239,9 +238,6 @@ struct omap_dma_lch {
 	void (*callback)(int lch, u16 ch_status, void *data);
 	void *data;
 	long flags;
-	/* required for Dynamic chaining */
-	int prev_linked_ch;
-	int next_linked_ch;
 	int state;
 	int chain_id;
 	int status;
@@ -303,7 +299,6 @@ extern void omap_set_dma_priority(int lch, int dst_port, int priority);
 extern int omap_request_dma(int dev_id, const char *dev_name,
 			void (*callback)(int lch, u16 ch_status, void *data),
 			void *data, int *dma_ch);
-extern void omap_enable_dma_irq(int ch, u16 irq_bits);
 extern void omap_disable_dma_irq(int ch, u16 irq_bits);
 extern void omap_free_dma(int ch);
 extern void omap_start_dma(int lch);
@@ -312,7 +307,6 @@ extern void omap_set_dma_transfer_params(int lch, int data_type,
 					 int elem_count, int frame_count,
 					 int sync_mode,
 					 int dma_trigger, int src_or_dst_synch);
-extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
 extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
 
 extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
@@ -329,22 +323,10 @@ extern void omap_set_dma_dest_data_pack(int lch, int enable);
 extern void omap_set_dma_dest_burst_mode(int lch,
 					 enum omap_dma_burst_mode burst_mode);
 
-extern void omap_set_dma_params(int lch,
-				struct omap_dma_channel_params *params);
-
-extern void omap_dma_link_lch(int lch_head, int lch_queue);
-
-extern int omap_set_dma_callback(int lch,
-			void (*callback)(int lch, u16 ch_status, void *data),
-			void *data);
 extern dma_addr_t omap_get_dma_src_pos(int lch);
 extern dma_addr_t omap_get_dma_dst_pos(int lch);
 extern int omap_get_dma_active_status(int lch);
 extern int omap_dma_running(void);
-extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
-				       int tparams);
-void omap_dma_global_context_save(void);
-void omap_dma_global_context_restore(void);
 
 #if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
 #include <mach/lcd_dma.h>
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 6861df7..5724580 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,8 +33,8 @@ static inline bool is_migrate_isolate(int migratetype)
 #define MEMORY_OFFLINE	0x1
 #define REPORT_FAILURE	0x2
 
-bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
-			 int migratetype, int flags);
+struct page *has_unmovable_pages(struct zone *zone, struct page *page,
+				 int migratetype, int flags);
 void set_pageblock_migratetype(struct page *page, int migratetype);
 int move_freepages_block(struct zone *zone, struct page *page,
 				int migratetype, int *num_movable);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 37a4d9e..ccb14b6 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -636,4 +636,32 @@ static inline unsigned long dir_pages(struct inode *inode)
 			       PAGE_SHIFT;
 }
 
+/**
+ * page_mkwrite_check_truncate - check if page was truncated
+ * @page: the page to check
+ * @inode: the inode to check the page against
+ *
+ * Returns the number of bytes in the page up to EOF,
+ * or -EFAULT if the page was truncated.
+ */
+static inline int page_mkwrite_check_truncate(struct page *page,
+					      struct inode *inode)
+{
+	loff_t size = i_size_read(inode);
+	pgoff_t index = size >> PAGE_SHIFT;
+	int offset = offset_in_page(size);
+
+	if (page->mapping != inode->i_mapping)
+		return -EFAULT;
+
+	/* page is wholly inside EOF */
+	if (page->index < index)
+		return PAGE_SIZE;
+	/* page is wholly past EOF */
+	if (page->index > index || !offset)
+		return -EFAULT;
+	/* page is partially inside EOF */
+	return offset;
+}
+
 #endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 6ec82e9..b1cb6b75 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -8,16 +8,19 @@ struct mm_walk;
 
 /**
  * mm_walk_ops - callbacks for walk_page_range
- * @pud_entry:		if set, called for each non-empty PUD (2nd-level) entry
- *			this handler should only handle pud_trans_huge() puds.
- *			the pmd_entry or pte_entry callbacks will be used for
- *			regular PUDs.
- * @pmd_entry:		if set, called for each non-empty PMD (3rd-level) entry
+ * @pgd_entry:		if set, called for each non-empty PGD (top-level) entry
+ * @p4d_entry:		if set, called for each non-empty P4D entry
+ * @pud_entry:		if set, called for each non-empty PUD entry
+ * @pmd_entry:		if set, called for each non-empty PMD entry
  *			this handler is required to be able to handle
  *			pmd_trans_huge() pmds.  They may simply choose to
  *			split_huge_page() instead of handling it explicitly.
- * @pte_entry:		if set, called for each non-empty PTE (4th-level) entry
- * @pte_hole:		if set, called for each hole at all levels
+ * @pte_entry:		if set, called for each non-empty PTE (lowest-level)
+ *			entry
+ * @pte_hole:		if set, called for each hole at all levels,
+ *			depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
+ *			4:PTE. Any folded depths (where PTRS_PER_P?D is equal
+ *			to 1) are skipped.
  * @hugetlb_entry:	if set, called for each hugetlb entry
  * @test_walk:		caller specific callback function to determine whether
  *			we walk over the current vma or not. Returning 0 means
@@ -27,8 +30,15 @@ struct mm_walk;
  * @pre_vma:            if set, called before starting walk on a non-null vma.
  * @post_vma:           if set, called after a walk on a non-null vma, provided
  *                      that @pre_vma and the vma walk succeeded.
+ *
+ * p?d_entry callbacks are called even if those levels are folded on a
+ * particular architecture/configuration.
  */
 struct mm_walk_ops {
+	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
+			 unsigned long next, struct mm_walk *walk);
+	int (*p4d_entry)(p4d_t *p4d, unsigned long addr,
+			 unsigned long next, struct mm_walk *walk);
 	int (*pud_entry)(pud_t *pud, unsigned long addr,
 			 unsigned long next, struct mm_walk *walk);
 	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
@@ -36,7 +46,7 @@ struct mm_walk_ops {
 	int (*pte_entry)(pte_t *pte, unsigned long addr,
 			 unsigned long next, struct mm_walk *walk);
 	int (*pte_hole)(unsigned long addr, unsigned long next,
-			struct mm_walk *walk);
+			int depth, struct mm_walk *walk);
 	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
 			     unsigned long addr, unsigned long next,
 			     struct mm_walk *walk);
@@ -47,11 +57,27 @@ struct mm_walk_ops {
 	void (*post_vma)(struct mm_walk *walk);
 };
 
+/*
+ * Action for pud_entry / pmd_entry callbacks.
+ * ACTION_SUBTREE is the default
+ */
+enum page_walk_action {
+	/* Descend to next level, splitting huge pages if needed and possible */
+	ACTION_SUBTREE = 0,
+	/* Continue to next entry at this level (ignoring any subtree) */
+	ACTION_CONTINUE = 1,
+	/* Call again for this entry */
+	ACTION_AGAIN = 2
+};
+
 /**
  * mm_walk - walk_page_range data
  * @ops:	operation to call during the walk
  * @mm:		mm_struct representing the target process of page table walk
+ * @pgd:	pointer to PGD; only valid with no_vma (otherwise set to NULL)
  * @vma:	vma currently walked (NULL if walking outside vmas)
+ * @action:	next action to perform (see enum page_walk_action)
+ * @no_vma:	walk ignoring vmas (vma will always be NULL)
  * @private:	private data for callbacks' usage
  *
  * (see the comment on walk_page_range() for more details)
@@ -59,13 +85,20 @@ struct mm_walk_ops {
 struct mm_walk {
 	const struct mm_walk_ops *ops;
 	struct mm_struct *mm;
+	pgd_t *pgd;
 	struct vm_area_struct *vma;
+	enum page_walk_action action;
+	bool no_vma;
 	void *private;
 };
 
 int walk_page_range(struct mm_struct *mm, unsigned long start,
 		unsigned long end, const struct mm_walk_ops *ops,
 		void *private);
+int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
+			  unsigned long end, const struct mm_walk_ops *ops,
+			  pgd_t *pgd,
+			  void *private);
 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
 		void *private);
 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 5d62e78..d08f086 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -33,6 +33,9 @@ void pci_disable_pasid(struct pci_dev *pdev);
 int pci_pasid_features(struct pci_dev *pdev);
 int pci_max_pasids(struct pci_dev *pdev);
 #else /* CONFIG_PCI_PASID */
+static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
+{ return -EINVAL; }
+static inline void pci_disable_pasid(struct pci_dev *pdev) { }
 static inline int pci_pasid_features(struct pci_dev *pdev)
 { return -EINVAL; }
 static inline int pci_max_pasids(struct pci_dev *pdev)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c393dff..3840a54 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1202,6 +1202,7 @@ int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
 bool pci_device_is_present(struct pci_dev *pdev);
 void pci_ignore_hotplug(struct pci_dev *dev);
+struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
 
 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
@@ -2310,7 +2311,7 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
 }
 #endif
 
-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
 int pci_for_each_dma_alias(struct pci_dev *pdev,
 			   int (*fn)(struct pci_dev *pdev,
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index a6fabd8..176bfbd 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -175,8 +175,7 @@
  * Declaration/definition used for per-CPU variables that should be accessed
  * as decrypted when memory encryption is enabled in the guest.
  */
-#if defined(CONFIG_VIRTUALIZATION) && defined(CONFIG_AMD_MEM_ENCRYPT)
-
+#ifdef CONFIG_AMD_MEM_ENCRYPT
 #define DECLARE_PER_CPU_DECRYPTED(type, name)				\
 	DECLARE_PER_CPU_SECTION(type, name, "..decrypted")
 
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 390031e8..22d9d18 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -210,6 +210,36 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
 }
 
 /**
+ * percpu_ref_tryget_many - try to increment a percpu refcount
+ * @ref: percpu_ref to try-get
+ * @nr: number of references to get
+ *
+ * Increment a percpu refcount  by @nr unless its count already reached zero.
+ * Returns %true on success; %false on failure.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
+					  unsigned long nr)
+{
+	unsigned long __percpu *percpu_count;
+	bool ret;
+
+	rcu_read_lock();
+
+	if (__ref_is_percpu(ref, &percpu_count)) {
+		this_cpu_add(*percpu_count, nr);
+		ret = true;
+	} else {
+		ret = atomic_long_add_unless(&ref->count, nr, 0);
+	}
+
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/**
  * percpu_ref_tryget - try to increment a percpu refcount
  * @ref: percpu_ref to try-get
  *
@@ -220,21 +250,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
  */
 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 {
-	unsigned long __percpu *percpu_count;
-	bool ret;
-
-	rcu_read_lock();
-
-	if (__ref_is_percpu(ref, &percpu_count)) {
-		this_cpu_inc(*percpu_count);
-		ret = true;
-	} else {
-		ret = atomic_long_inc_not_zero(&ref->count);
-	}
-
-	rcu_read_unlock();
-
-	return ret;
+	return percpu_ref_tryget_many(ref, 1);
 }
 
 /**
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 52928e0..547773f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -582,7 +582,7 @@ struct swevent_hlist {
 #define PERF_ATTACH_ITRACE	0x10
 
 struct perf_cgroup;
-struct ring_buffer;
+struct perf_buffer;
 
 struct pmu_event_list {
 	raw_spinlock_t		lock;
@@ -694,7 +694,7 @@ struct perf_event {
 	struct mutex			mmap_mutex;
 	atomic_t			mmap_count;
 
-	struct ring_buffer		*rb;
+	struct perf_buffer		*rb;
 	struct list_head		rb_entry;
 	unsigned long			rcu_batches;
 	int				rcu_pending;
@@ -854,7 +854,7 @@ struct perf_cpu_context {
 
 struct perf_output_handle {
 	struct perf_event		*event;
-	struct ring_buffer		*rb;
+	struct perf_buffer		*rb;
 	unsigned long			wakeup;
 	unsigned long			size;
 	u64				aux_flags;
diff --git a/include/linux/phy/phy-dp.h b/include/linux/phy/phy-dp.h
new file mode 100644
index 0000000..18cad23
--- /dev/null
+++ b/include/linux/phy/phy-dp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Cadence Design Systems Inc.
+ */
+
+#ifndef __PHY_DP_H_
+#define __PHY_DP_H_
+
+#include <linux/types.h>
+
+/**
+ * struct phy_configure_opts_dp - DisplayPort PHY configuration set
+ *
+ * This structure is used to represent the configuration state of a
+ * DisplayPort phy.
+ */
+struct phy_configure_opts_dp {
+	/**
+	 * @link_rate:
+	 *
+	 * Link Rate, in Mb/s, of the main link.
+	 *
+	 * Allowed values: 1620, 2160, 2430, 2700, 3240, 4320, 5400, 8100 Mb/s
+	 */
+	unsigned int link_rate;
+
+	/**
+	 * @lanes:
+	 *
+	 * Number of active, consecutive, data lanes, starting from
+	 * lane 0, used for the transmissions on main link.
+	 *
+	 * Allowed values: 1, 2, 4
+	 */
+	unsigned int lanes;
+
+	/**
+	 * @voltage:
+	 *
+	 * Voltage swing levels, as specified by DisplayPort specification,
+	 * to be used by particular lanes. One value per lane.
+	 * voltage[0] is for lane 0, voltage[1] is for lane 1, etc.
+	 *
+	 * Maximum value: 3
+	 */
+	unsigned int voltage[4];
+
+	/**
+	 * @pre:
+	 *
+	 * Pre-emphasis levels, as specified by DisplayPort specification, to be
+	 * used by particular lanes. One value per lane.
+	 *
+	 * Maximum value: 3
+	 */
+	unsigned int pre[4];
+
+	/**
+	 * @ssc:
+	 *
+	 * Flag indicating, whether or not to enable spread-spectrum clocking.
+	 *
+	 */
+	u8 ssc : 1;
+
+	/**
+	 * @set_rate:
+	 *
+	 * Flag indicating, whether or not reconfigure link rate and SSC to
+	 * requested values.
+	 *
+	 */
+	u8 set_rate : 1;
+
+	/**
+	 * @set_lanes:
+	 *
+	 * Flag indicating, whether or not reconfigure lane count to
+	 * requested value.
+	 *
+	 */
+	u8 set_lanes : 1;
+
+	/**
+	 * @set_voltages:
+	 *
+	 * Flag indicating, whether or not reconfigure voltage swing
+	 * and pre-emphasis to requested values. Only lanes specified
+	 * by "lanes" parameter will be affected.
+	 *
+	 */
+	u8 set_voltages : 1;
+};
+
+#endif /* __PHY_DP_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 56d3a10..bcee8eb 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -16,6 +16,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 
+#include <linux/phy/phy-dp.h>
 #include <linux/phy/phy-mipi-dphy.h>
 
 struct phy;
@@ -40,6 +41,7 @@ enum phy_mode {
 	PHY_MODE_MIPI_DPHY,
 	PHY_MODE_SATA,
 	PHY_MODE_LVDS,
+	PHY_MODE_DP
 };
 
 /**
@@ -47,9 +49,12 @@ enum phy_mode {
  *
  * @mipi_dphy:	Configuration set applicable for phys supporting
  *		the MIPI_DPHY phy mode.
+ * @dp:		Configuration set applicable for phys supporting
+ *		the DisplayPort protocol.
  */
 union phy_configure_opts {
 	struct phy_configure_opts_mipi_dphy	mipi_dphy;
+	struct phy_configure_opts_dp		dp;
 };
 
 /**
@@ -234,7 +239,8 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
 			    const char *con_id);
 struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
 				     int index);
-void phy_put(struct phy *phy);
+void of_phy_put(struct phy *phy);
+void phy_put(struct device *dev, struct phy *phy);
 void devm_phy_put(struct device *dev, struct phy *phy);
 struct phy *of_phy_get(struct device_node *np, const char *con_id);
 struct phy *of_phy_simple_xlate(struct device *dev,
@@ -419,7 +425,11 @@ static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
 	return ERR_PTR(-ENOSYS);
 }
 
-static inline void phy_put(struct phy *phy)
+static inline void of_phy_put(struct phy *phy)
+{
+}
+
+static inline void phy_put(struct device *dev, struct phy *phy)
 {
 }
 
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index ddd1b27..e987dc9 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -153,6 +153,7 @@ struct pinctrl_map {
 
 extern int pinctrl_register_mappings(const struct pinctrl_map *map,
 				unsigned num_maps);
+extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
 extern void pinctrl_provide_dummies(void);
 #else
 
@@ -162,6 +163,10 @@ static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
 	return 0;
 }
 
+static inline void pinctrl_unregister_mappings(const struct pinctrl_map *map)
+{
+}
+
 static inline void pinctrl_provide_dummies(void)
 {
 }
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index dbcfa68..d576503 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -47,7 +47,7 @@ struct pipe_buffer {
  **/
 struct pipe_inode_info {
 	struct mutex mutex;
-	wait_queue_head_t wait;
+	wait_queue_head_t rd_wait, wr_wait;
 	unsigned int head;
 	unsigned int tail;
 	unsigned int max_usage;
diff --git a/include/linux/platform_data/ad7266.h b/include/linux/platform_data/ad7266.h
index 7de6c16..f065256 100644
--- a/include/linux/platform_data/ad7266.h
+++ b/include/linux/platform_data/ad7266.h
@@ -40,14 +40,11 @@ enum ad7266_mode {
  * @range: Reference voltage range the device is configured for
  * @mode: Sample mode the device is configured for
  * @fixed_addr: Whether the address pins are hard-wired
- * @addr_gpios: GPIOs used for controlling the address pins, only used if
- *		fixed_addr is set to false.
  */
 struct ad7266_platform_data {
 	enum ad7266_range range;
 	enum ad7266_mode mode;
 	bool fixed_addr;
-	unsigned int addr_gpios[3];
 };
 
 #endif
diff --git a/include/linux/platform_data/ads1015.h b/include/linux/platform_data/ads1015.h
deleted file mode 100644
index 4cc9ffc..0000000
--- a/include/linux/platform_data/ads1015.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Platform Data for ADS1015 12-bit 4-input ADC
- * (C) Copyright 2010
- * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
- */
-
-#ifndef LINUX_ADS1015_H
-#define LINUX_ADS1015_H
-
-#define ADS1015_CHANNELS 8
-
-struct ads1015_channel_data {
-	bool enabled;
-	unsigned int pga;
-	unsigned int data_rate;
-};
-
-struct ads1015_platform_data {
-	struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
-};
-
-#endif /* LINUX_ADS1015_H */
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
index c3b61ea..6f6fed2 100644
--- a/include/linux/platform_data/b53.h
+++ b/include/linux/platform_data/b53.h
@@ -19,7 +19,7 @@
 #ifndef __B53_H
 #define __B53_H
 
-#include <linux/kernel.h>
+#include <linux/types.h>
 #include <linux/platform_data/dsa.h>
 
 struct b53_platform_data {
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 3bd0190..54a06a4 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -9,7 +9,6 @@ struct device;
 
 struct bd6107_platform_data {
 	struct device *fbdev;
-	int reset;			/* Reset GPIO */
 	unsigned int def_value;
 };
 
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 30098a5..ba59147 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -12,7 +12,6 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 
-#include <linux/mfd/cros_ec.h>
 #include <linux/platform_data/cros_ec_commands.h>
 
 #define CROS_EC_DEV_NAME	"cros_ec"
@@ -185,9 +184,27 @@ struct cros_ec_platform {
 	u16 cmd_offset;
 };
 
-int cros_ec_suspend(struct cros_ec_device *ec_dev);
+/**
+ * struct cros_ec_dev - ChromeOS EC device entry point.
+ * @class_dev: Device structure used in sysfs.
+ * @ec_dev: cros_ec_device structure to talk to the physical device.
+ * @dev: Pointer to the platform device.
+ * @debug_info: cros_ec_debugfs structure for debugging information.
+ * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
+ * @cmd_offset: Offset to apply for each command.
+ * @features: Features supported by the EC.
+ */
+struct cros_ec_dev {
+	struct device class_dev;
+	struct cros_ec_device *ec_dev;
+	struct device *dev;
+	struct cros_ec_debugfs *debug_info;
+	bool has_kb_wake_angle;
+	u16 cmd_offset;
+	u32 features[2];
+};
 
-int cros_ec_resume(struct cros_ec_device *ec_dev);
+#define to_cros_ec_dev(dev)  container_of(dev, struct cros_ec_dev, class_dev)
 
 int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
 		       struct cros_ec_command *msg);
@@ -201,10 +218,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
 int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
 			    struct cros_ec_command *msg);
 
-int cros_ec_register(struct cros_ec_device *ec_dev);
-
-int cros_ec_unregister(struct cros_ec_device *ec_dev);
-
 int cros_ec_query_all(struct cros_ec_device *ec_dev);
 
 int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
@@ -217,8 +230,6 @@ int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
 
 int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
 
-bool cros_ec_handle_event(struct cros_ec_device *ec_dev);
-
 /**
  * cros_ec_get_time_ns() - Return time in ns.
  *
diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h
deleted file mode 100644
index 219bd79d..0000000
--- a/include/linux/platform_data/ehci-sh.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * EHCI SuperH driver platform data
- *
- * Copyright (C) 2012  Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
- * Copyright (C) 2012  Renesas Solutions Corp.
- */
-
-#ifndef __USB_EHCI_SH_H
-#define __USB_EHCI_SH_H
-
-struct ehci_sh_platdata {
-	void (*phy_init)(void); /* Phy init function */
-};
-
-#endif /* __USB_EHCI_SH_H */
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index 84789ca..ea1cc6d 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -19,7 +19,7 @@
 #ifndef __MICROCHIP_KSZ_H
 #define __MICROCHIP_KSZ_H
 
-#include <linux/kernel.h>
+#include <linux/types.h>
 
 struct ksz_platform_data {
 	u32 chip_id;
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index 5376b6d..20d239c 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -6,14 +6,6 @@
 #ifndef __MV_PLATFORM_USB_H
 #define __MV_PLATFORM_USB_H
 
-enum pxa_ehci_type {
-	EHCI_UNDEFINED = 0,
-	PXA_U2OEHCI,	/* pxa 168, 9xx */
-	PXA_SPH,	/* pxa 168, 9xx SPH */
-	MMP3_HSIC,	/* mmp3 hsic */
-	MMP3_FSIC,	/* mmp3 fsic */
-};
-
 enum {
 	MV_USB_MODE_OTG,
 	MV_USB_MODE_HOST,
diff --git a/include/linux/platform_data/tc35876x.h b/include/linux/platform_data/tc35876x.h
deleted file mode 100644
index cd6a51c..0000000
--- a/include/linux/platform_data/tc35876x.h
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#ifndef _TC35876X_H
-#define _TC35876X_H
-
-struct tc35876x_platform_data {
-	int gpio_bridge_reset;
-	int gpio_panel_bl_en;
-	int gpio_panel_vadd;
-};
-
-#endif /* _TC35876X_H */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 8cfe570..2cbde65 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -49,6 +49,7 @@ struct sysc_regbits {
 	s8 emufree_shift;
 };
 
+#define SYSC_QUIRK_CLKDM_NOAUTO		BIT(21)
 #define SYSC_QUIRK_FORCE_MSTANDBY	BIT(20)
 #define SYSC_MODULE_QUIRK_AESS		BIT(19)
 #define SYSC_MODULE_QUIRK_SGX		BIT(18)
diff --git a/include/linux/platform_data/usb3503.h b/include/linux/platform_data/usb3503.h
index e049d51..d01ef97 100644
--- a/include/linux/platform_data/usb3503.h
+++ b/include/linux/platform_data/usb3503.h
@@ -17,9 +17,6 @@ enum usb3503_mode {
 struct usb3503_platform_data {
 	enum usb3503_mode	initial_mode;
 	u8	port_off_mask;
-	int	gpio_intn;
-	int	gpio_connect;
-	int	gpio_reset;
 };
 
 #endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 5a31c71..9ec78ee 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -284,6 +284,8 @@ void of_genpd_del_provider(struct device_node *np);
 int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
 			   struct of_phandle_args *subdomain_spec);
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+			      struct of_phandle_args *subdomain_spec);
 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
 int of_genpd_parse_idle_states(struct device_node *dn,
 			       struct genpd_power_state **states, int *n);
@@ -322,6 +324,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
 	return -ENODEV;
 }
 
+static inline int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+					struct of_phandle_args *subdomain_spec)
+{
+	return -ENODEV;
+}
+
 static inline int of_genpd_parse_idle_states(struct device_node *dn,
 			struct genpd_power_state **states, int *n)
 {
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index 4badd53..d55c746 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -105,11 +105,56 @@ enum max17042_register {
 
 	MAX17042_OCV		= 0xEE,
 
-	MAX17042_OCVInternal	= 0xFB,
+	MAX17042_OCVInternal	= 0xFB,  /* MAX17055 VFOCV */
 
 	MAX17042_VFSOC		= 0xFF,
 };
 
+enum max17055_register {
+	MAX17055_QRes		= 0x0C,
+	MAX17055_TTF		= 0x20,
+	MAX17055_V_empty	= 0x3A,
+	MAX17055_TIMER		= 0x3E,
+	MAX17055_USER_MEM	= 0x40,
+	MAX17055_RGAIN		= 0x42,
+
+	MAX17055_ConvgCfg	= 0x49,
+	MAX17055_VFRemCap	= 0x4A,
+
+	MAX17055_STATUS2	= 0xB0,
+	MAX17055_POWER		= 0xB1,
+	MAX17055_ID		= 0xB2,
+	MAX17055_AvgPower	= 0xB3,
+	MAX17055_IAlrtTh	= 0xB4,
+	MAX17055_TTFCfg		= 0xB5,
+	MAX17055_CVMixCap	= 0xB6,
+	MAX17055_CVHalfTime	= 0xB7,
+	MAX17055_CGTempCo	= 0xB8,
+	MAX17055_Curve		= 0xB9,
+	MAX17055_HibCfg		= 0xBA,
+	MAX17055_Config2	= 0xBB,
+	MAX17055_VRipple	= 0xBC,
+	MAX17055_RippleCfg	= 0xBD,
+	MAX17055_TimerH		= 0xBE,
+
+	MAX17055_RSense		= 0xD0,
+	MAX17055_ScOcvLim	= 0xD1,
+
+	MAX17055_SOCHold	= 0xD3,
+	MAX17055_MaxPeakPwr	= 0xD4,
+	MAX17055_SusPeakPwr	= 0xD5,
+	MAX17055_PackResistance	= 0xD6,
+	MAX17055_SysResistance	= 0xD7,
+	MAX17055_MinSysV	= 0xD8,
+	MAX17055_MPPCurrent	= 0xD9,
+	MAX17055_SPPCurrent	= 0xDA,
+	MAX17055_ModelCfg	= 0xDB,
+	MAX17055_AtQResidual	= 0xDC,
+	MAX17055_AtTTE		= 0xDD,
+	MAX17055_AtAvSOC	= 0xDE,
+	MAX17055_AtAvCap	= 0xDF,
+};
+
 /* Registers specific to max17047/50 */
 enum max17047_register {
 	MAX17047_QRTbl00	= 0x12,
@@ -125,6 +170,7 @@ enum max170xx_chip_type {
 	MAXIM_DEVICE_TYPE_MAX17042,
 	MAXIM_DEVICE_TYPE_MAX17047,
 	MAXIM_DEVICE_TYPE_MAX17050,
+	MAXIM_DEVICE_TYPE_MAX17055,
 
 	MAXIM_DEVICE_TYPE_NUM
 };
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 28413f7..dcd5a71 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -325,6 +325,11 @@ struct power_supply_battery_ocv_table {
 	int capacity;	/* percent */
 };
 
+struct power_supply_resistance_temp_table {
+	int temp;	/* celsius */
+	int resistance;	/* internal resistance percent */
+};
+
 #define POWER_SUPPLY_OCV_TEMP_MAX 20
 
 /*
@@ -349,6 +354,8 @@ struct power_supply_battery_info {
 	int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */
 	struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
 	int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
+	struct power_supply_resistance_temp_table *resist_table;
+	int resist_table_size;
 };
 
 extern struct atomic_notifier_head power_supply_notifier;
@@ -381,6 +388,9 @@ power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
 				int temp, int *table_len);
 extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
 					int ocv, int temp);
+extern int
+power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+				int table_len, int temp);
 extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
 extern int power_supply_set_input_current_limit_from_supplier(
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 0640be5..3dfa926 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -12,6 +12,21 @@ struct proc_dir_entry;
 struct seq_file;
 struct seq_operations;
 
+struct proc_ops {
+	int	(*proc_open)(struct inode *, struct file *);
+	ssize_t	(*proc_read)(struct file *, char __user *, size_t, loff_t *);
+	ssize_t	(*proc_write)(struct file *, const char __user *, size_t, loff_t *);
+	loff_t	(*proc_lseek)(struct file *, loff_t, int);
+	int	(*proc_release)(struct inode *, struct file *);
+	__poll_t (*proc_poll)(struct file *, struct poll_table_struct *);
+	long	(*proc_ioctl)(struct file *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+	long	(*proc_compat_ioctl)(struct file *, unsigned int, unsigned long);
+#endif
+	int	(*proc_mmap)(struct file *, struct vm_area_struct *);
+	unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+};
+
 #ifdef CONFIG_PROC_FS
 
 typedef int (*proc_write_t)(struct file *, char *, size_t);
@@ -43,10 +58,10 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
  
 extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
 					       struct proc_dir_entry *,
-					       const struct file_operations *,
+					       const struct proc_ops *,
 					       void *);
 
-struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops);
+struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
 extern void proc_set_size(struct proc_dir_entry *, loff_t);
 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
 extern void *PDE_DATA(const struct inode *);
@@ -108,8 +123,8 @@ static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
 #define proc_create_seq(name, mode, parent, ops) ({NULL;})
 #define proc_create_single(name, mode, parent, show) ({NULL;})
 #define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
-#define proc_create(name, mode, parent, proc_fops) ({NULL;})
-#define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;})
+#define proc_create(name, mode, parent, proc_ops) ({NULL;})
+#define proc_create_data(name, mode, parent, proc_ops, data) ({NULL;})
 
 static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
 static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index d312e62..4626b1a 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -79,10 +79,10 @@ static inline int ns_alloc_inum(struct ns_common *ns)
 
 extern struct file *proc_ns_fget(int fd);
 #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
-extern void *ns_get_path(struct path *path, struct task_struct *task,
+extern int ns_get_path(struct path *path, struct task_struct *task,
 			const struct proc_ns_operations *ns_ops);
 typedef struct ns_common *ns_get_path_helper_t(void *);
-extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
 			    void *private_data);
 
 extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
diff --git a/include/linux/psci.h b/include/linux/psci.h
index ebe0a88..a67712b 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -18,6 +18,8 @@ bool psci_tos_resident_on(int cpu);
 
 int psci_cpu_suspend_enter(u32 state);
 bool psci_power_state_is_valid(u32 state);
+int psci_set_osi_mode(void);
+bool psci_has_osi_support(void);
 
 enum smccc_version {
 	SMCCC_VERSION_1_0,
diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
new file mode 100644
index 0000000..a67065c
--- /dev/null
+++ b/include/linux/ptdump.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_PTDUMP_H
+#define _LINUX_PTDUMP_H
+
+#include <linux/mm_types.h>
+
+struct ptdump_range {
+	unsigned long start;
+	unsigned long end;
+};
+
+struct ptdump_state {
+	/* level is 0:PGD to 4:PTE, or -1 if unknown */
+	void (*note_page)(struct ptdump_state *st, unsigned long addr,
+			  int level, unsigned long val);
+	const struct ptdump_range *range;
+};
+
+void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd);
+
+#endif /* _LINUX_PTDUMP_H */
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index d05ddac..3d6a246 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
  * Copyright (C) 2015 Linaro Ltd.
  */
 #ifndef __QCOM_SCM_H
@@ -55,77 +55,94 @@ enum qcom_scm_sec_dev_id {
 #define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
 
 #if IS_ENABLED(CONFIG_QCOM_SCM)
+extern bool qcom_scm_is_available(void);
+
 extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
 extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
-extern bool qcom_scm_is_available(void);
-extern bool qcom_scm_hdcp_available(void);
-extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
-			     u32 *resp);
-extern bool qcom_scm_ocmem_lock_available(void);
-extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
-			       u32 size, u32 mode);
-extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
-				 u32 size);
-extern bool qcom_scm_pas_supported(u32 peripheral);
+extern void qcom_scm_cpu_power_down(u32 flags);
+extern int qcom_scm_set_remote_state(u32 state, u32 id);
+
 extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
 				   size_t size);
 extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
 				  phys_addr_t size);
 extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
 extern int qcom_scm_pas_shutdown(u32 peripheral);
-extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
-			       unsigned int *src,
-			       const struct qcom_scm_vmperm *newvm,
-			       unsigned int dest_cnt);
-extern void qcom_scm_cpu_power_down(u32 flags);
-extern u32 qcom_scm_get_version(void);
-extern int qcom_scm_set_remote_state(u32 state, u32 id);
+extern bool qcom_scm_pas_supported(u32 peripheral);
+
+extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
+extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
 extern bool qcom_scm_restore_sec_cfg_available(void);
 extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
 extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
 extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+			       unsigned int *src,
+			       const struct qcom_scm_vmperm *newvm,
+			       unsigned int dest_cnt);
+
+extern bool qcom_scm_ocmem_lock_available(void);
+extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
+			       u32 size, u32 mode);
+extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
+				 u32 size);
+
+extern bool qcom_scm_hdcp_available(void);
+extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
+			     u32 *resp);
+
 extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
-extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
-extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
 #else
 
 #include <linux/errno.h>
 
-static inline
-int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
-{
-	return -ENODEV;
-}
-static inline
-int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
-{
-	return -ENODEV;
-}
 static inline bool qcom_scm_is_available(void) { return false; }
+
+static inline int qcom_scm_set_cold_boot_addr(void *entry,
+		const cpumask_t *cpus) { return -ENODEV; }
+static inline int qcom_scm_set_warm_boot_addr(void *entry,
+		const cpumask_t *cpus) { return -ENODEV; }
+static inline void qcom_scm_cpu_power_down(u32 flags) {}
+static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
+		{ return -ENODEV; }
+
+static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
+		size_t size) { return -ENODEV; }
+static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
+		phys_addr_t size) { return -ENODEV; }
+static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
+		{ return -ENODEV; }
+static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
+static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
+
+static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+		{ return -ENODEV; }
+static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+		{ return -ENODEV; }
+
+static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
+static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+		{ return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
+		{ return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+		{ return -ENODEV; }
+static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+		unsigned int *src, const struct qcom_scm_vmperm *newvm,
+		unsigned int dest_cnt) { return -ENODEV; }
+
+static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
+static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
+		u32 size, u32 mode) { return -ENODEV; }
+static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
+		u32 offset, u32 size) { return -ENODEV; }
+
 static inline bool qcom_scm_hdcp_available(void) { return false; }
 static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
-				    u32 *resp) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
-					  size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
-					 phys_addr_t size) { return -ENODEV; }
-static inline int
-qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
-				      unsigned int *src,
-				      const struct qcom_scm_vmperm *newvm,
-				      unsigned int dest_cnt) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_get_version(void) { return 0; }
-static inline u32
-qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; }
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
+		u32 *resp) { return -ENODEV; }
+
+static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+		{ return -ENODEV; }
 #endif
 #endif
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index b806a0f..917528d 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -2,6 +2,8 @@
 #ifndef _LINUX_RAMFS_H
 #define _LINUX_RAMFS_H
 
+#include <linux/fs_parser.h> // bleh...
+
 struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
 	 umode_t mode, dev_t dev);
 extern int ramfs_init_fs_context(struct fs_context *fc);
@@ -16,7 +18,7 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
 extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
 #endif
 
-extern const struct fs_parameter_description ramfs_fs_parameters;
+extern const struct fs_parameter_spec ramfs_fs_parameters[];
 extern const struct file_operations ramfs_file_operations;
 extern const struct vm_operations_struct generic_file_vm_ops;
 
diff --git a/include/linux/random.h b/include/linux/random.h
index f189c92..d319f9a 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -167,29 +167,21 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
 #ifdef CONFIG_ARCH_RANDOM
 # include <asm/archrandom.h>
 #else
-static inline bool arch_get_random_long(unsigned long *v)
+static inline bool __must_check arch_get_random_long(unsigned long *v)
 {
-	return 0;
+	return false;
 }
-static inline bool arch_get_random_int(unsigned int *v)
+static inline bool __must_check arch_get_random_int(unsigned int *v)
 {
-	return 0;
+	return false;
 }
-static inline bool arch_has_random(void)
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 {
-	return 0;
+	return false;
 }
-static inline bool arch_get_random_seed_long(unsigned long *v)
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
 {
-	return 0;
-}
-static inline bool arch_get_random_seed_int(unsigned int *v)
-{
-	return 0;
-}
-static inline bool arch_has_random_seed(void)
-{
-	return 0;
+	return false;
 }
 #endif
 
diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h
new file mode 100644
index 0000000..b47416f
--- /dev/null
+++ b/include/linux/remoteproc/mtk_scp.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef _MTK_SCP_H
+#define _MTK_SCP_H
+
+#include <linux/platform_device.h>
+
+typedef void (*scp_ipi_handler_t) (void *data,
+				   unsigned int len,
+				   void *priv);
+struct mtk_scp;
+
+/**
+ * enum ipi_id - the id of inter-processor interrupt
+ *
+ * @SCP_IPI_INIT:	 The interrupt from scp is to notfiy kernel
+ *			 SCP initialization completed.
+ *			 IPI_SCP_INIT is sent from SCP when firmware is
+ *			 loaded. AP doesn't need to send IPI_SCP_INIT
+ *			 command to SCP.
+ *			 For other IPI below, AP should send the request
+ *			 to SCP to trigger the interrupt.
+ * @SCP_IPI_MAX:	 The maximum IPI number
+ */
+
+enum scp_ipi_id {
+	SCP_IPI_INIT = 0,
+	SCP_IPI_VDEC_H264,
+	SCP_IPI_VDEC_VP8,
+	SCP_IPI_VDEC_VP9,
+	SCP_IPI_VENC_H264,
+	SCP_IPI_VENC_VP8,
+	SCP_IPI_MDP_INIT,
+	SCP_IPI_MDP_DEINIT,
+	SCP_IPI_MDP_FRAME,
+	SCP_IPI_DIP,
+	SCP_IPI_ISP_CMD,
+	SCP_IPI_ISP_FRAME,
+	SCP_IPI_FD_CMD,
+	SCP_IPI_CROS_HOST_CMD,
+	SCP_IPI_NS_SERVICE = 0xFF,
+	SCP_IPI_MAX = 0x100,
+};
+
+struct mtk_scp *scp_get(struct platform_device *pdev);
+void scp_put(struct mtk_scp *scp);
+
+struct device *scp_get_device(struct mtk_scp *scp);
+struct rproc *scp_get_rproc(struct mtk_scp *scp);
+
+int scp_ipi_register(struct mtk_scp *scp, u32 id, scp_ipi_handler_t handler,
+		     void *priv);
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id);
+
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+		 unsigned int wait);
+
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp);
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp);
+
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr);
+
+#endif /* _MTK_SCP_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 1a40277..df0124e 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -6,7 +6,7 @@
 #include <linux/seq_file.h>
 #include <linux/poll.h>
 
-struct ring_buffer;
+struct trace_buffer;
 struct ring_buffer_iter;
 
 /*
@@ -77,13 +77,13 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
  *  else
  *    ring_buffer_unlock_commit(buffer, event);
  */
-void ring_buffer_discard_commit(struct ring_buffer *buffer,
+void ring_buffer_discard_commit(struct trace_buffer *buffer,
 				struct ring_buffer_event *event);
 
 /*
  * size is in bytes for each per CPU buffer.
  */
-struct ring_buffer *
+struct trace_buffer *
 __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
 
 /*
@@ -97,38 +97,38 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
 	__ring_buffer_alloc((size), (flags), &__key);	\
 })
 
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
-__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
 			  struct file *filp, poll_table *poll_table);
 
 
 #define RING_BUFFER_ALL_CPUS -1
 
-void ring_buffer_free(struct ring_buffer *buffer);
+void ring_buffer_free(struct trace_buffer *buffer);
 
-int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
+int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
 
-void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
+void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
 
-struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
 						   unsigned long length);
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+int ring_buffer_unlock_commit(struct trace_buffer *buffer,
 			      struct ring_buffer_event *event);
-int ring_buffer_write(struct ring_buffer *buffer,
+int ring_buffer_write(struct trace_buffer *buffer,
 		      unsigned long length, void *data);
 
-void ring_buffer_nest_start(struct ring_buffer *buffer);
-void ring_buffer_nest_end(struct ring_buffer *buffer);
+void ring_buffer_nest_start(struct trace_buffer *buffer);
+void ring_buffer_nest_end(struct trace_buffer *buffer);
 
 struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
 		 unsigned long *lost_events);
 struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
 		    unsigned long *lost_events);
 
 struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
+ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
 void ring_buffer_read_prepare_sync(void);
 void ring_buffer_read_start(struct ring_buffer_iter *iter);
 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
@@ -140,59 +140,59 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
 void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
 int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
 
-unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
 
-void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_reset(struct ring_buffer *buffer);
+void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_reset(struct trace_buffer *buffer);
 
 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
-int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
-			 struct ring_buffer *buffer_b, int cpu);
+int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+			 struct trace_buffer *buffer_b, int cpu);
 #else
 static inline int
-ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
-		     struct ring_buffer *buffer_b, int cpu)
+ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+		     struct trace_buffer *buffer_b, int cpu)
 {
 	return -ENODEV;
 }
 #endif
 
-bool ring_buffer_empty(struct ring_buffer *buffer);
-bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
+bool ring_buffer_empty(struct trace_buffer *buffer);
+bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
 
-void ring_buffer_record_disable(struct ring_buffer *buffer);
-void ring_buffer_record_enable(struct ring_buffer *buffer);
-void ring_buffer_record_off(struct ring_buffer *buffer);
-void ring_buffer_record_on(struct ring_buffer *buffer);
-bool ring_buffer_record_is_on(struct ring_buffer *buffer);
-bool ring_buffer_record_is_set_on(struct ring_buffer *buffer);
-void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
+void ring_buffer_record_disable(struct trace_buffer *buffer);
+void ring_buffer_record_enable(struct trace_buffer *buffer);
+void ring_buffer_record_off(struct trace_buffer *buffer);
+void ring_buffer_record_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
+void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
 
-u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_entries(struct ring_buffer *buffer);
-unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
-unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
+u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_entries(struct trace_buffer *buffer);
+unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
+unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
 
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
-void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
+void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
 				      int cpu, u64 *ts);
-void ring_buffer_set_clock(struct ring_buffer *buffer,
+void ring_buffer_set_clock(struct trace_buffer *buffer,
 			   u64 (*clock)(void));
-void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs);
-bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
+void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
+bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
 
-size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
-size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
+size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
+size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
 
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
+int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
 			  size_t len, int cpu, int full);
 
 struct trace_seq;
diff --git a/include/linux/rpmsg/mtk_rpmsg.h b/include/linux/rpmsg/mtk_rpmsg.h
new file mode 100644
index 0000000..363b601
--- /dev/null
+++ b/include/linux/rpmsg/mtk_rpmsg.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC.
+ */
+
+#ifndef __LINUX_RPMSG_MTK_RPMSG_H
+#define __LINUX_RPMSG_MTK_RPMSG_H
+
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+
+typedef void (*ipi_handler_t)(void *data, unsigned int len, void *priv);
+
+/*
+ * struct mtk_rpmsg_info - IPI functions tied to the rpmsg device.
+ * @register_ipi: register IPI handler for an IPI id.
+ * @unregister_ipi: unregister IPI handler for a registered IPI id.
+ * @send_ipi: send IPI to an IPI id. wait is the timeout (in msecs) to wait
+ *            until response, or 0 if there's no timeout.
+ * @ns_ipi_id: the IPI id used for name service, or -1 if name service isn't
+ *             supported.
+ */
+struct mtk_rpmsg_info {
+	int (*register_ipi)(struct platform_device *pdev, u32 id,
+			    ipi_handler_t handler, void *priv);
+	void (*unregister_ipi)(struct platform_device *pdev, u32 id);
+	int (*send_ipi)(struct platform_device *pdev, u32 id,
+			void *buf, unsigned int len, unsigned int wait);
+	int ns_ipi_id;
+};
+
+struct rproc_subdev *
+mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
+			      struct mtk_rpmsg_info *info);
+
+void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev);
+
+#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 4e9d3c7..23990bd 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -167,6 +167,7 @@ struct rtc_device {
 #define RTC_TIMESTAMP_BEGIN_1900	-2208988800LL /* 1900-01-01 00:00:00 */
 #define RTC_TIMESTAMP_BEGIN_2000	946684800LL /* 2000-01-01 00:00:00 */
 #define RTC_TIMESTAMP_END_2063		2966371199LL /* 2063-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_2079		3471292799LL /* 2079-12-31 23:59:59 */
 #define RTC_TIMESTAMP_END_2099		4102444799LL /* 2099-12-31 23:59:59 */
 #define RTC_TIMESTAMP_END_2199		7258118399LL /* 2199-12-31 23:59:59 */
 #define RTC_TIMESTAMP_END_9999		253402300799LL /* 9999-12-31 23:59:59 */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 716ad1d..0427849 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -917,7 +917,7 @@ struct task_struct {
 
 	/* Signal handlers: */
 	struct signal_struct		*signal;
-	struct sighand_struct		*sighand;
+	struct sighand_struct __rcu		*sighand;
 	sigset_t			blocked;
 	sigset_t			real_blocked;
 	/* Restored if set_restore_sigmask() was used: */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 881fea4..5c873a5 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -257,6 +257,7 @@ enum scmi_std_protocol {
 struct scmi_device {
 	u32 id;
 	u8 protocol_id;
+	const char *name;
 	struct device dev;
 	struct scmi_handle *handle;
 };
@@ -264,11 +265,13 @@ struct scmi_device {
 #define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
 
 struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol);
+scmi_device_create(struct device_node *np, struct device *parent, int protocol,
+		   const char *name);
 void scmi_device_destroy(struct scmi_device *scmi_dev);
 
 struct scmi_device_id {
 	u8 protocol_id;
+	const char *name;
 };
 
 struct scmi_driver {
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 5998e1f..770c2bf 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -160,6 +160,19 @@ static const struct file_operations __name ## _fops = {			\
 	.release	= single_release,				\
 }
 
+#define DEFINE_PROC_SHOW_ATTRIBUTE(__name)				\
+static int __name ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __name ## _show, inode->i_private);	\
+}									\
+									\
+static const struct proc_ops __name ## _proc_ops = {			\
+	.proc_open	= __name ## _open,				\
+	.proc_read	= seq_read,					\
+	.proc_lseek	= seq_lseek,					\
+	.proc_release	= single_release,				\
+}
+
 static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
 {
 #ifdef CONFIG_USER_NS
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index bb2bc99..6a8e8c4 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -25,6 +25,7 @@ struct plat_serial8250_port {
 	unsigned char	regshift;	/* register shift */
 	unsigned char	iotype;		/* UPIO_* */
 	unsigned char	hub6;
+	unsigned char	has_sysrq;	/* supports magic SysRq */
 	upf_t		flags;		/* UPF_* flags */
 	unsigned int	type;		/* If UPF_FIXED_TYPE */
 	unsigned int	(*serial_in)(struct uart_port *, int);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 2b78cc7..52404ef 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -161,11 +161,6 @@ struct uart_port {
 	struct uart_icount	icount;			/* statistics */
 
 	struct console		*cons;			/* struct console, if any */
-#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
-	unsigned long		sysrq;			/* sysrq timeout */
-	unsigned int		sysrq_ch;		/* char for sysrq */
-#endif
-
 	/* flags must be updated while holding port mutex */
 	upf_t			flags;
 
@@ -244,9 +239,13 @@ struct uart_port {
 	resource_size_t		mapbase;		/* for ioremap */
 	resource_size_t		mapsize;
 	struct device		*dev;			/* parent device */
+
+	unsigned long		sysrq;			/* sysrq timeout */
+	unsigned int		sysrq_ch;		/* char for sysrq */
+	unsigned char		has_sysrq;
+
 	unsigned char		hub6;			/* this should be in the 8250 driver */
 	unsigned char		suspended;
-	unsigned char		unused[2];
 	const char		*name;			/* port name */
 	struct attribute_group	*attr_group;		/* port specific attributes */
 	const struct attribute_group **tty_groups;	/* all attributes (serial core use only) */
@@ -460,81 +459,11 @@ extern void uart_handle_cts_change(struct uart_port *uport,
 extern void uart_insert_char(struct uart_port *port, unsigned int status,
 		 unsigned int overrun, unsigned int ch, unsigned int flag);
 
-#if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL)
-static inline int
-uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
-{
-	if (port->sysrq) {
-		if (ch && time_before(jiffies, port->sysrq)) {
-			handle_sysrq(ch);
-			port->sysrq = 0;
-			return 1;
-		}
-		port->sysrq = 0;
-	}
-	return 0;
-}
-static inline int
-uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
-{
-	if (port->sysrq) {
-		if (ch && time_before(jiffies, port->sysrq)) {
-			port->sysrq_ch = ch;
-			port->sysrq = 0;
-			return 1;
-		}
-		port->sysrq = 0;
-	}
-	return 0;
-}
-static inline void
-uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
-{
-	int sysrq_ch;
-
-	sysrq_ch = port->sysrq_ch;
-	port->sysrq_ch = 0;
-
-	spin_unlock_irqrestore(&port->lock, irqflags);
-
-	if (sysrq_ch)
-		handle_sysrq(sysrq_ch);
-}
-#else
-static inline int
-uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
-static inline int
-uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
-static inline void
-uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
-{
-	spin_unlock_irqrestore(&port->lock, irqflags);
-}
-#endif
-
-/*
- * We do the SysRQ and SAK checking like this...
- */
-static inline int uart_handle_break(struct uart_port *port)
-{
-	struct uart_state *state = port->state;
-
-	if (port->handle_break)
-		port->handle_break(port);
-
-#ifdef SUPPORT_SYSRQ
-	if (port->cons && port->cons->index == port->line) {
-		if (!port->sysrq) {
-			port->sysrq = jiffies + HZ*5;
-			return 1;
-		}
-		port->sysrq = 0;
-	}
-#endif
-	if (port->flags & UPF_SAK)
-		do_SAK(state->port.tty);
-	return 0;
-}
+extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch);
+extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch);
+extern void uart_unlock_and_check_sysrq(struct uart_port *port,
+					unsigned long irqflags);
+extern int uart_handle_break(struct uart_port *port);
 
 /*
  *	UART_ENABLE_MS - determine if port should enable modem status irqs
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index de8e4b7..d56fefe 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -8,6 +8,7 @@
 #include <linux/pagemap.h>
 #include <linux/percpu_counter.h>
 #include <linux/xattr.h>
+#include <linux/fs_parser.h>
 
 /* inode in-kernel data */
 
@@ -49,7 +50,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
 /*
  * Functions in mm/shmem.c called directly from elsewhere:
  */
-extern const struct fs_parameter_description shmem_fs_parameters;
+extern const struct fs_parameter_spec shmem_fs_parameters[];
 extern int shmem_init(void);
 extern int shmem_init_fs_context(struct fs_context *fc);
 extern struct file *shmem_file_setup(const char *name,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3d13a4b..ca8806b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1822,6 +1822,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
 }
 
 /**
+ *	skb_queue_len_lockless	- get queue length
+ *	@list_: list to measure
+ *
+ *	Return the length of an &sk_buff queue.
+ *	This variant can be used in lockless contexts.
+ */
+static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
+{
+	return READ_ONCE(list_->qlen);
+}
+
+/**
  *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
  *	@list: queue to initialize
  *
@@ -2026,7 +2038,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
 	struct sk_buff *next, *prev;
 
-	list->qlen--;
+	WRITE_ONCE(list->qlen, list->qlen - 1);
 	next	   = skb->next;
 	prev	   = skb->prev;
 	skb->next  = skb->prev = NULL;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 877a95c..03a3893 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -184,7 +184,6 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
 /*
  * Common kmalloc functions provided by all allocators
  */
-void * __must_check __krealloc(const void *, size_t, gfp_t);
 void * __must_check krealloc(const void *, size_t, gfp_t);
 void kfree(const void *);
 void kzfree(const void *);
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 9618deb..a74c1d5 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -15,6 +15,12 @@
 
 struct cmdq_pkt;
 
+struct cmdq_client_reg {
+	u8 subsys;
+	u16 offset;
+	u16 size;
+};
+
 struct cmdq_client {
 	spinlock_t lock;
 	u32 pkt_cnt;
@@ -25,6 +31,21 @@ struct cmdq_client {
 };
 
 /**
+ * cmdq_dev_get_client_reg() - parse cmdq client reg from the device
+ *			       node of CMDQ client
+ * @dev:	device of CMDQ mailbox client
+ * @client_reg: CMDQ client reg pointer
+ * @idx:	the index of desired reg
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Help CMDQ client parsing the cmdq client reg
+ * from the device node of CMDQ client.
+ */
+int cmdq_dev_get_client_reg(struct device *dev,
+			    struct cmdq_client_reg *client_reg, int idx);
+
+/**
  * cmdq_mbox_create() - create CMDQ mailbox client and channel
  * @dev:	device of CMDQ mailbox client
  * @index:	index of CMDQ mailbox channel
@@ -100,6 +121,38 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
 
 /**
+ * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to
+ *		     execute an instruction that wait for a specified
+ *		     hardware register to check for the value w/o mask.
+ *		     All GCE hardware threads will be blocked by this
+ *		     instruction.
+ * @pkt:	the CMDQ packet
+ * @subsys:	the CMDQ sub system code
+ * @offset:	register offset from CMDQ sub system
+ * @value:	the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+		  u16 offset, u32 value);
+
+/**
+ * cmdq_pkt_poll_mask() - Append polling command to the CMDQ packet, ask GCE to
+ *		          execute an instruction that wait for a specified
+ *		          hardware register to check for the value w/ mask.
+ *		          All GCE hardware threads will be blocked by this
+ *		          instruction.
+ * @pkt:	the CMDQ packet
+ * @subsys:	the CMDQ sub system code
+ * @offset:	register offset from CMDQ sub system
+ * @value:	the specified target register value
+ * @mask:	the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+		       u16 offset, u32 value, u32 mask);
+/**
  * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
  *                          packet and call back at the end of done packet
  * @pkt:	the CMDQ packet
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
new file mode 100644
index 0000000..082398e
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+#ifndef __MTK_SIP_SVC_H
+#define __MTK_SIP_SVC_H
+
+/* Error Code */
+#define SIP_SVC_E_SUCCESS               0
+#define SIP_SVC_E_NOT_SUPPORTED         -1
+#define SIP_SVC_E_INVALID_PARAMS        -2
+#define SIP_SVC_E_INVALID_RANGE         -3
+#define SIP_SVC_E_PERMISSION_DENIED     -4
+
+#ifdef CONFIG_ARM64
+#define MTK_SIP_SMC_CONVENTION          ARM_SMCCC_SMC_64
+#else
+#define MTK_SIP_SMC_CONVENTION          ARM_SMCCC_SMC_32
+#endif
+
+#define MTK_SIP_SMC_CMD(fn_id) \
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
+			   ARM_SMCCC_OWNER_SIP, fn_id)
+
+#endif
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index fc0b445..a4f5516 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -3,7 +3,7 @@
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * Header for EXYNOS PMU Driver support
+ * Header for Exynos PMU Driver support
  */
 
 #ifndef __LINUX_SOC_EXYNOS_PMU_H
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 5addaf5..fc9250f 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -3,7 +3,7 @@
  * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
- * EXYNOS - Power management unit definition
+ * Exynos - Power management unit definition
  *
  * Notice:
  * This is not a list of all Exynos Power Management Unit SFRs.
@@ -185,7 +185,7 @@
 /* Only for S5Pv210 */
 #define S5PV210_EINT_WAKEUP_MASK	0xC004
 
-/* Only for EXYNOS4210 */
+/* Only for Exynos4210 */
 #define S5P_CMU_CLKSTOP_LCD1_LOWPWR	0x1154
 #define S5P_CMU_RESET_LCD1_LOWPWR	0x1174
 #define S5P_MODIMIF_MEM_LOWPWR		0x11C4
@@ -193,7 +193,7 @@
 #define S5P_SATA_MEM_LOWPWR		0x11E4
 #define S5P_LCD1_LOWPWR			0x1394
 
-/* Only for EXYNOS4x12 */
+/* Only for Exynos4x12 */
 #define S5P_ISP_ARM_LOWPWR			0x1050
 #define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR	0x1054
 #define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR	0x1058
@@ -234,7 +234,7 @@
 #define S5P_SECSS_MEM_OPTION			0x2EC8
 #define S5P_ROTATOR_MEM_OPTION			0x2F48
 
-/* Only for EXYNOS4412 */
+/* Only for Exynos4412 */
 #define S5P_ARM_CORE2_LOWPWR			0x1020
 #define S5P_DIS_IRQ_CORE2			0x1024
 #define S5P_DIS_IRQ_CENTRAL2			0x1028
@@ -242,7 +242,7 @@
 #define S5P_DIS_IRQ_CORE3			0x1034
 #define S5P_DIS_IRQ_CENTRAL3			0x1038
 
-/* Only for EXYNOS3XXX */
+/* Only for Exynos3XXX */
 #define EXYNOS3_ARM_CORE0_SYS_PWR_REG			0x1000
 #define EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG	0x1004
 #define EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG	0x1008
@@ -347,7 +347,7 @@
 #define EXYNOS3_OPTION_USE_SC_FEEDBACK			(1 << 1)
 #define EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN	(1 << 7)
 
-/* For EXYNOS5 */
+/* For Exynos5 */
 
 #define EXYNOS5_AUTO_WDTRESET_DISABLE				0x0408
 #define EXYNOS5_MASK_WDTRESET_REQUEST				0x040C
@@ -484,7 +484,7 @@
 
 #define EXYNOS5420_SWRESET_KFC_SEL				0x3
 
-/* Only for EXYNOS5420 */
+/* Only for Exynos5420 */
 #define EXYNOS5420_L2RSTDISABLE_VALUE				BIT(3)
 
 #define EXYNOS5420_LPI_MASK					0x0004
@@ -645,7 +645,7 @@
 					 | EXYNOS5420_KFC_USE_STANDBY_WFI2  \
 					 | EXYNOS5420_KFC_USE_STANDBY_WFI3)
 
-/* For EXYNOS5433 */
+/* For Exynos5433 */
 #define EXYNOS5433_EINT_WAKEUP_MASK				(0x060C)
 #define EXYNOS5433_USBHOST30_PHY_CONTROL			(0x0728)
 #define EXYNOS5433_PAD_RETENTION_AUD_OPTION			(0x3028)
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index b7c9eca..b451bb6 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -546,7 +546,8 @@ struct sdw_slave_ops {
  * @debugfs: Slave debugfs
  * @node: node for bus list
  * @port_ready: Port ready completion flag for each Slave port
- * @dev_num: Device Number assigned by Bus
+ * @dev_num: Current Device Number, values can be 0 or dev_num_sticky
+ * @dev_num_sticky: one-time static Device Number assigned by Bus
  * @probed: boolean tracking driver state
  * @probe_complete: completion utility to control potential races
  * on startup between driver probe/initialization and SoundWire
@@ -575,6 +576,7 @@ struct sdw_slave {
 	struct list_head node;
 	struct completion *port_ready;
 	u16 dev_num;
+	u16 dev_num_sticky;
 	bool probed;
 	struct completion probe_complete;
 	struct completion enumeration_complete;
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 93b83bd..979b41b 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -5,6 +5,7 @@
 #define __SDW_INTEL_H
 
 #include <linux/irqreturn.h>
+#include <linux/soundwire/sdw.h>
 
 /**
  * struct sdw_intel_stream_params_data: configuration passed during
@@ -93,6 +94,11 @@ struct sdw_intel_link_res;
  */
 #define SDW_INTEL_CLK_STOP_BUS_RESET		BIT(3)
 
+struct sdw_intel_slave_id {
+	int link_id;
+	struct sdw_slave_id id;
+};
+
 /**
  * struct sdw_intel_ctx - context allocated by the controller
  * driver probe
@@ -101,9 +107,12 @@ struct sdw_intel_link_res;
  * hardware capabilities after all power dependencies are settled.
  * @link_mask: bit-wise mask listing SoundWire links reported by the
  * Controller
+ * @num_slaves: total number of devices exposed across all enabled links
  * @handle: ACPI parent handle
  * @links: information for each link (controller-specific and kept
  * opaque here)
+ * @ids: array of slave_id, representing Slaves exposed across all enabled
+ * links
  * @link_list: list to handle interrupts across all links
  * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers.
  */
@@ -111,8 +120,10 @@ struct sdw_intel_ctx {
 	int count;
 	void __iomem *mmio_base;
 	u32 link_mask;
+	int num_slaves;
 	acpi_handle handle;
 	struct sdw_intel_link_res *links;
+	struct sdw_intel_slave_id *ids;
 	struct list_head link_list;
 	struct mutex shim_lock; /* lock for access to shared SHIM registers */
 };
diff --git a/include/linux/string.h b/include/linux/string.h
index 02894e4..6dfbb2e 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,6 +62,7 @@ extern char * strchr(const char *,int);
 #ifndef __HAVE_ARCH_STRCHRNUL
 extern char * strchrnul(const char *,int);
 #endif
+extern char * strnchrnul(const char *, size_t, int);
 #ifndef __HAVE_ARCH_STRNCHR
 extern char * strnchr(const char *, size_t, int);
 #endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index e9ec742..4f6b284 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -113,7 +113,6 @@ struct rpc_authops {
 	int			(*hash_cred)(struct auth_cred *, unsigned int);
 	struct rpc_cred *	(*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
 	struct rpc_cred *	(*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t);
-	int			(*list_pseudoflavors)(rpc_authflavor_t *, int);
 	rpc_authflavor_t	(*info2flavor)(struct rpcsec_gss_info *);
 	int			(*flavor2info)(rpc_authflavor_t,
 						struct rpcsec_gss_info *);
@@ -158,7 +157,6 @@ rpc_authflavor_t	rpcauth_get_pseudoflavor(rpc_authflavor_t,
 				struct rpcsec_gss_info *);
 int			rpcauth_get_gssinfo(rpc_authflavor_t,
 				struct rpcsec_gss_info *);
-int			rpcauth_list_flavors(rpc_authflavor_t *, int);
 struct rpc_cred *	rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t);
 void			rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
 struct rpc_cred *	rpcauth_lookupcred(struct rpc_auth *, int);
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index f860372..0f64de7 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -45,8 +45,8 @@
  */
 struct cache_head {
 	struct hlist_node	cache_list;
-	time_t		expiry_time;	/* After time time, don't use the data */
-	time_t		last_refresh;   /* If CACHE_PENDING, this is when upcall was
+	time64_t	expiry_time;	/* After time time, don't use the data */
+	time64_t	last_refresh;   /* If CACHE_PENDING, this is when upcall was
 					 * sent, else this is when update was
 					 * received, though it is alway set to
 					 * be *after* ->flush_time.
@@ -95,22 +95,22 @@ struct cache_detail {
 	/* fields below this comment are for internal use
 	 * and should not be touched by cache owners
 	 */
-	time_t			flush_time;		/* flush all cache items with
+	time64_t		flush_time;		/* flush all cache items with
 							 * last_refresh at or earlier
 							 * than this.  last_refresh
 							 * is never set at or earlier
 							 * than this.
 							 */
 	struct list_head	others;
-	time_t			nextcheck;
+	time64_t		nextcheck;
 	int			entries;
 
 	/* fields for communication over channel */
 	struct list_head	queue;
 
 	atomic_t		writers;		/* how many time is /channel open */
-	time_t			last_close;		/* if no writers, when did last close */
-	time_t			last_warn;		/* when we last warned about no writers */
+	time64_t		last_close;		/* if no writers, when did last close */
+	time64_t		last_warn;		/* when we last warned about no writers */
 
 	union {
 		struct proc_dir_entry	*procfs;
@@ -147,18 +147,22 @@ struct cache_deferred_req {
  * timestamps kept in the cache are expressed in seconds
  * since boot.  This is the best for measuring differences in
  * real time.
+ * This reimplemnts ktime_get_boottime_seconds() in a slightly
+ * faster but less accurate way. When we end up converting
+ * back to wallclock (CLOCK_REALTIME), that error often
+ * cancels out during the reverse operation.
  */
-static inline time_t seconds_since_boot(void)
+static inline time64_t seconds_since_boot(void)
 {
-	struct timespec boot;
-	getboottime(&boot);
-	return get_seconds() - boot.tv_sec;
+	struct timespec64 boot;
+	getboottime64(&boot);
+	return ktime_get_real_seconds() - boot.tv_sec;
 }
 
-static inline time_t convert_to_wallclock(time_t sinceboot)
+static inline time64_t convert_to_wallclock(time64_t sinceboot)
 {
-	struct timespec boot;
-	getboottime(&boot);
+	struct timespec64 boot;
+	getboottime64(&boot);
 	return boot.tv_sec + sinceboot;
 }
 
@@ -273,7 +277,7 @@ static inline int get_uint(char **bpp, unsigned int *anint)
 	return 0;
 }
 
-static inline int get_time(char **bpp, time_t *time)
+static inline int get_time(char **bpp, time64_t *time)
 {
 	char buf[50];
 	long long ll;
@@ -287,20 +291,20 @@ static inline int get_time(char **bpp, time_t *time)
 	if (kstrtoll(buf, 0, &ll))
 		return -EINVAL;
 
-	*time = (time_t)ll;
+	*time = ll;
 	return 0;
 }
 
-static inline time_t get_expiry(char **bpp)
+static inline time64_t get_expiry(char **bpp)
 {
-	time_t rv;
-	struct timespec boot;
+	time64_t rv;
+	struct timespec64 boot;
 
 	if (get_time(bpp, &rv))
 		return 0;
 	if (rv < 0)
 		return 0;
-	getboottime(&boot);
+	getboottime64(&boot);
 	return rv - boot.tv_sec;
 }
 
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index bd691e0..48c1b16 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -48,7 +48,7 @@ int gss_import_sec_context(
 		size_t			bufsize,
 		struct gss_api_mech	*mech,
 		struct gss_ctx		**ctx_id,
-		time_t			*endtime,
+		time64_t		*endtime,
 		gfp_t			gfp_mask);
 u32 gss_get_mic(
 		struct gss_ctx		*ctx_id,
@@ -108,7 +108,7 @@ struct gss_api_ops {
 			const void		*input_token,
 			size_t			bufsize,
 			struct gss_ctx		*ctx_id,
-			time_t			*endtime,
+			time64_t		*endtime,
 			gfp_t			gfp_mask);
 	u32 (*gss_get_mic)(
 			struct gss_ctx		*ctx_id,
@@ -150,9 +150,6 @@ struct gss_api_mech *gss_mech_get_by_name(const char *);
 /* Similar, but get by pseudoflavor. */
 struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
 
-/* Fill in an array with a list of supported pseudoflavors */
-int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int);
-
 struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
 
 /* For every successful gss_mech_get or gss_mech_get_by_* call there must be a
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 02c0412..c1d77dd 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -106,9 +106,9 @@ struct krb5_ctx {
 	struct crypto_sync_skcipher *initiator_enc_aux;
 	u8			Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
 	u8			cksum[GSS_KRB5_MAX_KEYLEN];
-	s32			endtime;
 	atomic_t		seq_send;
 	atomic64_t		seq_send64;
+	time64_t		endtime;
 	struct xdr_netobj	mech_used;
 	u8			initiator_sign[GSS_KRB5_MAX_KEYLEN];
 	u8			acceptor_sign[GSS_KRB5_MAX_KEYLEN];
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index 84b92b4..d94d4f4 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -63,7 +63,7 @@ struct proc_dir_entry *	rpc_proc_register(struct net *,struct rpc_stat *);
 void			rpc_proc_unregister(struct net *,const char *);
 void			rpc_proc_zero(const struct rpc_program *);
 struct proc_dir_entry *	svc_proc_register(struct net *, struct svc_stat *,
-					  const struct file_operations *);
+					  const struct proc_ops *);
 void			svc_proc_unregister(struct net *, const char *);
 
 void			svc_seq_show(struct seq_file *,
@@ -75,7 +75,7 @@ static inline void rpc_proc_unregister(struct net *net, const char *p) {}
 static inline void rpc_proc_zero(const struct rpc_program *p) {}
 
 static inline struct proc_dir_entry *svc_proc_register(struct net *net, struct svc_stat *s,
-						       const struct file_operations *f) { return NULL; }
+						       const struct proc_ops *proc_ops) { return NULL; }
 static inline void svc_proc_unregister(struct net *net, const char *p) {}
 
 static inline void svc_seq_show(struct seq_file *seq,
diff --git a/include/linux/swab.h b/include/linux/swab.h
index e466fd1..bcff514 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -7,6 +7,7 @@
 # define swab16 __swab16
 # define swab32 __swab32
 # define swab64 __swab64
+# define swab __swab
 # define swahw32 __swahw32
 # define swahb32 __swahb32
 # define swab16p __swab16p
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index e295515b..082f1d5 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -21,6 +21,11 @@
 #define SWITCHTEC_EVENT_FATAL    BIT(4)
 
 #define SWITCHTEC_DMA_MRPC_EN	BIT(0)
+
+#define MRPC_GAS_READ		0x29
+#define MRPC_GAS_WRITE		0x87
+#define MRPC_CMD_ID(x)		((x) & 0xffff)
+
 enum {
 	SWITCHTEC_GAS_MRPC_OFFSET       = 0x0000,
 	SWITCHTEC_GAS_TOP_CFG_OFFSET    = 0x1000,
@@ -32,6 +37,11 @@ enum {
 	SWITCHTEC_GAS_PFF_CSR_OFFSET    = 0x134000,
 };
 
+enum switchtec_gen {
+	SWITCHTEC_GEN3,
+	SWITCHTEC_GEN4,
+};
+
 struct mrpc_regs {
 	u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
 	u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
@@ -98,16 +108,37 @@ struct sw_event_regs {
 } __packed;
 
 enum {
-	SWITCHTEC_CFG0_RUNNING = 0x04,
-	SWITCHTEC_CFG1_RUNNING = 0x05,
-	SWITCHTEC_IMG0_RUNNING = 0x03,
-	SWITCHTEC_IMG1_RUNNING = 0x07,
+	SWITCHTEC_GEN3_CFG0_RUNNING = 0x04,
+	SWITCHTEC_GEN3_CFG1_RUNNING = 0x05,
+	SWITCHTEC_GEN3_IMG0_RUNNING = 0x03,
+	SWITCHTEC_GEN3_IMG1_RUNNING = 0x07,
 };
 
-struct sys_info_regs {
-	u32 device_id;
-	u32 device_version;
-	u32 firmware_version;
+enum {
+	SWITCHTEC_GEN4_MAP0_RUNNING = 0x00,
+	SWITCHTEC_GEN4_MAP1_RUNNING = 0x01,
+	SWITCHTEC_GEN4_KEY0_RUNNING = 0x02,
+	SWITCHTEC_GEN4_KEY1_RUNNING = 0x03,
+	SWITCHTEC_GEN4_BL2_0_RUNNING = 0x04,
+	SWITCHTEC_GEN4_BL2_1_RUNNING = 0x05,
+	SWITCHTEC_GEN4_CFG0_RUNNING = 0x06,
+	SWITCHTEC_GEN4_CFG1_RUNNING = 0x07,
+	SWITCHTEC_GEN4_IMG0_RUNNING = 0x08,
+	SWITCHTEC_GEN4_IMG1_RUNNING = 0x09,
+};
+
+enum {
+	SWITCHTEC_GEN4_KEY0_ACTIVE = 0,
+	SWITCHTEC_GEN4_KEY1_ACTIVE = 1,
+	SWITCHTEC_GEN4_BL2_0_ACTIVE = 0,
+	SWITCHTEC_GEN4_BL2_1_ACTIVE = 1,
+	SWITCHTEC_GEN4_CFG0_ACTIVE = 0,
+	SWITCHTEC_GEN4_CFG1_ACTIVE = 1,
+	SWITCHTEC_GEN4_IMG0_ACTIVE = 0,
+	SWITCHTEC_GEN4_IMG1_ACTIVE = 1,
+};
+
+struct sys_info_regs_gen3 {
 	u32 reserved1;
 	u32 vendor_table_revision;
 	u32 table_format_version;
@@ -124,26 +155,78 @@ struct sys_info_regs {
 	u8 component_revision;
 } __packed;
 
-struct flash_info_regs {
+struct sys_info_regs_gen4 {
+	u16 gas_layout_ver;
+	u8 evlist_ver;
+	u8 reserved1;
+	u16 mgmt_cmd_set_ver;
+	u16 fabric_cmd_set_ver;
+	u32 reserved2[2];
+	u8 mrpc_uart_ver;
+	u8 mrpc_twi_ver;
+	u8 mrpc_eth_ver;
+	u8 mrpc_inband_ver;
+	u32 reserved3[7];
+	u32 fw_update_tmo;
+	u32 xml_version_cfg;
+	u32 xml_version_img;
+	u32 partition_id;
+	u16 bl2_running;
+	u16 cfg_running;
+	u16 img_running;
+	u16 key_running;
+	u32 reserved4[43];
+	u32 vendor_seeprom_twi;
+	u32 vendor_table_revision;
+	u32 vendor_specific_info[2];
+	u16 p2p_vendor_id;
+	u16 p2p_device_id;
+	u8 p2p_revision_id;
+	u8 reserved5[3];
+	u32 p2p_class_id;
+	u16 subsystem_vendor_id;
+	u16 subsystem_id;
+	u32 p2p_serial_number[2];
+	u8 mac_addr[6];
+	u8 reserved6[2];
+	u32 reserved7[3];
+	char vendor_id[8];
+	char product_id[24];
+	char  product_revision[2];
+	u16 reserved8;
+} __packed;
+
+struct sys_info_regs {
+	u32 device_id;
+	u32 device_version;
+	u32 firmware_version;
+	union {
+		struct sys_info_regs_gen3 gen3;
+		struct sys_info_regs_gen4 gen4;
+	};
+} __packed;
+
+struct partition_info {
+	u32 address;
+	u32 length;
+};
+
+struct flash_info_regs_gen3 {
 	u32 flash_part_map_upd_idx;
 
-	struct active_partition_info {
+	struct active_partition_info_gen3 {
 		u32 address;
 		u32 build_version;
 		u32 build_string;
 	} active_img;
 
-	struct active_partition_info active_cfg;
-	struct active_partition_info inactive_img;
-	struct active_partition_info inactive_cfg;
+	struct active_partition_info_gen3 active_cfg;
+	struct active_partition_info_gen3 inactive_img;
+	struct active_partition_info_gen3 inactive_cfg;
 
 	u32 flash_length;
 
-	struct partition_info {
-		u32 address;
-		u32 length;
-	} cfg0;
-
+	struct partition_info cfg0;
 	struct partition_info cfg1;
 	struct partition_info img0;
 	struct partition_info img1;
@@ -151,6 +234,40 @@ struct flash_info_regs {
 	struct partition_info vendor[8];
 };
 
+struct flash_info_regs_gen4 {
+	u32 flash_address;
+	u32 flash_length;
+
+	struct active_partition_info_gen4 {
+		unsigned char bl2;
+		unsigned char cfg;
+		unsigned char img;
+		unsigned char key;
+	} active_flag;
+
+	u32 reserved[3];
+
+	struct partition_info map0;
+	struct partition_info map1;
+	struct partition_info key0;
+	struct partition_info key1;
+	struct partition_info bl2_0;
+	struct partition_info bl2_1;
+	struct partition_info cfg0;
+	struct partition_info cfg1;
+	struct partition_info img0;
+	struct partition_info img1;
+	struct partition_info nvlog;
+	struct partition_info vendor[8];
+};
+
+struct flash_info_regs {
+	union {
+		struct flash_info_regs_gen3 gen3;
+		struct flash_info_regs_gen4 gen4;
+	};
+};
+
 enum {
 	SWITCHTEC_NTB_REG_INFO_OFFSET   = 0x0000,
 	SWITCHTEC_NTB_REG_CTRL_OFFSET   = 0x4000,
@@ -196,7 +313,9 @@ struct part_cfg_regs {
 	u32 mrpc_comp_async_data[5];
 	u32 dyn_binding_hdr;
 	u32 dyn_binding_data[5];
-	u32 reserved4[159];
+	u32 intercomm_notify_hdr;
+	u32 intercomm_notify_data[5];
+	u32 reserved4[153];
 } __packed;
 
 enum {
@@ -320,7 +439,8 @@ struct pff_csr_regs {
 	u32 dpc_data[5];
 	u32 cts_hdr;
 	u32 cts_data[5];
-	u32 reserved3[6];
+	u32 uec_hdr;
+	u32 uec_data[5];
 	u32 hotplug_hdr;
 	u32 hotplug_data[5];
 	u32 ier_hdr;
@@ -355,6 +475,8 @@ struct switchtec_dev {
 	struct device dev;
 	struct cdev cdev;
 
+	enum switchtec_gen gen;
+
 	int partition;
 	int partition_count;
 	int pff_csr_count;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 5262b7a..1815065 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -16,8 +16,7 @@ struct inode;
 struct iocb;
 struct io_event;
 struct iovec;
-struct itimerspec;
-struct itimerval;
+struct __kernel_old_itimerval;
 struct kexec_segment;
 struct linux_dirent;
 struct linux_dirent64;
@@ -69,6 +68,7 @@ struct rseq;
 union bpf_attr;
 struct io_uring_params;
 struct clone_args;
+struct open_how;
 
 #include <linux/types.h>
 #include <linux/aio_abi.h>
@@ -439,6 +439,8 @@ asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
 asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
 asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
 			   umode_t mode);
+asmlinkage long sys_openat2(int dfd, const char __user *filename,
+			    struct open_how *how, size_t size);
 asmlinkage long sys_close(unsigned int fd);
 asmlinkage long sys_vhangup(void);
 
@@ -591,10 +593,10 @@ asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
 				     struct old_timespec32 __user *rmtp);
 
 /* kernel/itimer.c */
-asmlinkage long sys_getitimer(int which, struct itimerval __user *value);
+asmlinkage long sys_getitimer(int which, struct __kernel_old_itimerval __user *value);
 asmlinkage long sys_setitimer(int which,
-				struct itimerval __user *value,
-				struct itimerval __user *ovalue);
+				struct __kernel_old_itimerval __user *value,
+				struct __kernel_old_itimerval __user *ovalue);
 
 /* kernel/kexec.c */
 asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
@@ -1000,6 +1002,7 @@ asmlinkage long sys_fspick(int dfd, const char __user *path, unsigned int flags)
 asmlinkage long sys_pidfd_send_signal(int pidfd, int sig,
 				       siginfo_t __user *info,
 				       unsigned int flags);
+asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags);
 
 /*
  * Architecture-specific system calls
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 1cf73e6..3dc9640 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -148,9 +148,7 @@ struct tcp_request_sock {
 	const struct tcp_request_sock_ops *af_specific;
 	u64				snt_synack; /* first SYNACK sent time */
 	bool				tfo_listener;
-#if IS_ENABLED(CONFIG_MPTCP)
 	bool				is_mptcp;
-#endif
 	u32				txhash;
 	u32				rcv_isn;
 	u32				snt_isn;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index d9111ae..126913c6 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -32,17 +32,6 @@
 /* use value, which < 0K, to indicate an invalid/uninitialized temperature */
 #define THERMAL_TEMP_INVALID	-274000
 
-/* Unit conversion macros */
-#define DECI_KELVIN_TO_CELSIUS(t)	({			\
-	long _t = (t);						\
-	((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10);	\
-})
-#define CELSIUS_TO_DECI_KELVIN(t)	((t)*10+2732)
-#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
-#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
-#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
-#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
-
 /* Default Thermal Governor */
 #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
 #define DEFAULT_THERMAL_GOVERNOR       "step_wise"
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 13ea7f7..af2c85d 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -11,7 +11,7 @@
 #include <linux/tracepoint.h>
 
 struct trace_array;
-struct trace_buffer;
+struct array_buffer;
 struct tracer;
 struct dentry;
 struct bpf_prog;
@@ -79,7 +79,7 @@ struct trace_entry {
 struct trace_iterator {
 	struct trace_array	*tr;
 	struct tracer		*trace;
-	struct trace_buffer	*trace_buffer;
+	struct array_buffer	*array_buffer;
 	void			*private;
 	int			cpu_file;
 	struct mutex		mutex;
@@ -153,7 +153,7 @@ void tracing_generic_entry_update(struct trace_entry *entry,
 struct trace_event_file;
 
 struct ring_buffer_event *
-trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
+trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
 				struct trace_event_file *trace_file,
 				int type, unsigned long len,
 				unsigned long flags, int pc);
@@ -226,12 +226,13 @@ extern int trace_event_reg(struct trace_event_call *event,
 			    enum trace_reg type, void *data);
 
 struct trace_event_buffer {
-	struct ring_buffer		*buffer;
+	struct trace_buffer		*buffer;
 	struct ring_buffer_event	*event;
 	struct trace_event_file		*trace_file;
 	void				*entry;
 	unsigned long			flags;
 	int				pc;
+	struct pt_regs			*regs;
 };
 
 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
@@ -364,6 +365,128 @@ enum {
 	EVENT_FILE_FL_WAS_ENABLED_BIT,
 };
 
+extern struct trace_event_file *trace_get_event_file(const char *instance,
+						     const char *system,
+						     const char *event);
+extern void trace_put_event_file(struct trace_event_file *file);
+
+#define MAX_DYNEVENT_CMD_LEN	(2048)
+
+enum dynevent_type {
+	DYNEVENT_TYPE_SYNTH = 1,
+	DYNEVENT_TYPE_KPROBE,
+	DYNEVENT_TYPE_NONE,
+};
+
+struct dynevent_cmd;
+
+typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
+
+struct dynevent_cmd {
+	struct seq_buf		seq;
+	const char		*event_name;
+	unsigned int		n_fields;
+	enum dynevent_type	type;
+	dynevent_create_fn_t	run_command;
+	void			*private_data;
+};
+
+extern int dynevent_create(struct dynevent_cmd *cmd);
+
+extern int synth_event_delete(const char *name);
+
+extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
+				 char *buf, int maxlen);
+
+extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
+				       const char *name,
+				       struct module *mod, ...);
+
+#define synth_event_gen_cmd_start(cmd, name, mod, ...)	\
+	__synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
+
+struct synth_field_desc {
+	const char *type;
+	const char *name;
+};
+
+extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
+					   const char *name,
+					   struct module *mod,
+					   struct synth_field_desc *fields,
+					   unsigned int n_fields);
+extern int synth_event_create(const char *name,
+			      struct synth_field_desc *fields,
+			      unsigned int n_fields, struct module *mod);
+
+extern int synth_event_add_field(struct dynevent_cmd *cmd,
+				 const char *type,
+				 const char *name);
+extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
+				     const char *type_name);
+extern int synth_event_add_fields(struct dynevent_cmd *cmd,
+				  struct synth_field_desc *fields,
+				  unsigned int n_fields);
+
+#define synth_event_gen_cmd_end(cmd)	\
+	dynevent_create(cmd)
+
+struct synth_event;
+
+struct synth_event_trace_state {
+	struct trace_event_buffer fbuffer;
+	struct synth_trace_event *entry;
+	struct trace_buffer *buffer;
+	struct synth_event *event;
+	unsigned int cur_field;
+	unsigned int n_u64;
+	bool enabled;
+	bool add_next;
+	bool add_name;
+};
+
+extern int synth_event_trace(struct trace_event_file *file,
+			     unsigned int n_vals, ...);
+extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
+				   unsigned int n_vals);
+extern int synth_event_trace_start(struct trace_event_file *file,
+				   struct synth_event_trace_state *trace_state);
+extern int synth_event_add_next_val(u64 val,
+				    struct synth_event_trace_state *trace_state);
+extern int synth_event_add_val(const char *field_name, u64 val,
+			       struct synth_event_trace_state *trace_state);
+extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
+
+extern int kprobe_event_delete(const char *name);
+
+extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
+				  char *buf, int maxlen);
+
+#define kprobe_event_gen_cmd_start(cmd, name, loc, ...)			\
+	__kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
+
+#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...)		\
+	__kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
+
+extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
+					bool kretprobe,
+					const char *name,
+					const char *loc, ...);
+
+#define kprobe_event_add_fields(cmd, ...)	\
+	__kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
+
+#define kprobe_event_add_field(cmd, field)	\
+	__kprobe_event_add_fields(cmd, field, NULL)
+
+extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+
+#define kprobe_event_gen_cmd_end(cmd)		\
+	dynevent_create(cmd)
+
+#define kretprobe_event_gen_cmd_end(cmd)	\
+	dynevent_create(cmd)
+
 /*
  * Event file flags:
  *  ENABLED	  - The event is enabled
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 88d279c..9991244 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -28,7 +28,6 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
 struct dentry *tracefs_create_dir(const char *name, struct dentry *parent);
 
 void tracefs_remove(struct dentry *dentry);
-void tracefs_remove_recursive(struct dentry *dentry);
 
 struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
 					   int (*mkdir)(const char *name),
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index a9c5976..63076fb 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -62,16 +62,16 @@ struct transport_container {
 	container_of(x, struct transport_container, ac)
 
 void transport_remove_device(struct device *);
-void transport_add_device(struct device *);
+int transport_add_device(struct device *);
 void transport_setup_device(struct device *);
 void transport_configure_device(struct device *);
 void transport_destroy_device(struct device *);
 
-static inline void
+static inline int
 transport_register_device(struct device *dev)
 {
 	transport_setup_device(dev);
-	transport_add_device(dev);
+	return transport_add_device(dev);
 }
 
 static inline void
diff --git a/include/linux/units.h b/include/linux/units.h
new file mode 100644
index 0000000..aaf7163
--- /dev/null
+++ b/include/linux/units.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNITS_H
+#define _LINUX_UNITS_H
+
+#include <linux/kernel.h>
+
+#define ABSOLUTE_ZERO_MILLICELSIUS -273150
+
+static inline long milli_kelvin_to_millicelsius(long t)
+{
+	return t + ABSOLUTE_ZERO_MILLICELSIUS;
+}
+
+static inline long millicelsius_to_milli_kelvin(long t)
+{
+	return t - ABSOLUTE_ZERO_MILLICELSIUS;
+}
+
+#define MILLIDEGREE_PER_DEGREE 1000
+#define MILLIDEGREE_PER_DECIDEGREE 100
+
+static inline long kelvin_to_millicelsius(long t)
+{
+	return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long millicelsius_to_kelvin(long t)
+{
+	t = millicelsius_to_milli_kelvin(t);
+
+	return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long deci_kelvin_to_celsius(long t)
+{
+	t = milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
+
+	return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long celsius_to_deci_kelvin(long t)
+{
+	t = millicelsius_to_milli_kelvin(t * MILLIDEGREE_PER_DEGREE);
+
+	return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
+}
+
+/**
+ * deci_kelvin_to_millicelsius_with_offset - convert Kelvin to Celsius
+ * @t: temperature value in decidegrees Kelvin
+ * @offset: difference between Kelvin and Celsius in millidegrees
+ *
+ * Return: temperature value in millidegrees Celsius
+ */
+static inline long deci_kelvin_to_millicelsius_with_offset(long t, long offset)
+{
+	return t * MILLIDEGREE_PER_DECIDEGREE - offset;
+}
+
+static inline long deci_kelvin_to_millicelsius(long t)
+{
+	return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
+}
+
+static inline long millicelsius_to_deci_kelvin(long t)
+{
+	t = millicelsius_to_milli_kelvin(t);
+
+	return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
+}
+
+static inline long kelvin_to_celsius(long t)
+{
+	return t + DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
+				     MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long celsius_to_kelvin(long t)
+{
+	return t - DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
+				     MILLIDEGREE_PER_DEGREE);
+}
+
+#endif /* _LINUX_UNITS_H */
diff --git a/include/linux/usb/gpio_vbus.h b/include/linux/usb/gpio_vbus.h
deleted file mode 100644
index 804fb06..0000000
--- a/include/linux/usb/gpio_vbus.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * A simple GPIO VBUS sensing driver for B peripheral only devices
- * with internal transceivers.
- * Optionally D+ pullup can be controlled by a second GPIO.
- *
- * Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-/**
- * struct gpio_vbus_mach_info - configuration for gpio_vbus
- * @gpio_vbus: VBUS sensing GPIO
- * @gpio_pullup: optional D+ or D- pullup GPIO (else negative/invalid)
- * @gpio_vbus_inverted: true if gpio_vbus is active low
- * @gpio_pullup_inverted: true if gpio_pullup is active low
- * @wakeup: configure gpio_vbus as a wake-up source
- *
- * The VBUS sensing GPIO should have a pulldown, which will normally be
- * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
- * value the GPIO detects as active.  Some systems will use comparators.
- */
-struct gpio_vbus_mach_info {
-	int gpio_vbus;
-	int gpio_pullup;
-	bool gpio_vbus_inverted;
-	bool gpio_pullup_inverted;
-	bool wakeup;
-};
diff --git a/include/linux/usb/irda.h b/include/linux/usb/irda.h
index 396d2b0..556a801e 100644
--- a/include/linux/usb/irda.h
+++ b/include/linux/usb/irda.h
@@ -119,11 +119,22 @@ struct usb_irda_cs_descriptor {
  * 6 - 115200 bps
  * 7 - 576000 bps
  * 8 - 1.152 Mbps
- * 9 - 5 mbps
+ * 9 - 4 Mbps
  * 10..15 - Reserved
  */
 #define USB_IRDA_STATUS_LINK_SPEED	0x0f
 
+#define USB_IRDA_LS_NO_CHANGE		0
+#define USB_IRDA_LS_2400		1
+#define USB_IRDA_LS_9600		2
+#define USB_IRDA_LS_19200		3
+#define USB_IRDA_LS_38400		4
+#define USB_IRDA_LS_57600		5
+#define USB_IRDA_LS_115200		6
+#define USB_IRDA_LS_576000		7
+#define USB_IRDA_LS_1152000		8
+#define USB_IRDA_LS_4000000		9
+
 /* The following is a 4-bit value used only for
  * outbound header:
  *
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index 145c38e..a665d7f 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -45,7 +45,8 @@ enum pd_data_msg_type {
 	PD_DATA_BATT_STATUS = 5,
 	PD_DATA_ALERT = 6,
 	PD_DATA_GET_COUNTRY_INFO = 7,
-	/* 8-14 Reserved */
+	PD_DATA_ENTER_USB = 8,
+	/* 9-14 Reserved */
 	PD_DATA_VENDOR_DEF = 15,
 	/* 16-31 Reserved */
 };
@@ -418,6 +419,36 @@ static inline unsigned int rdo_max_power(u32 rdo)
 	return ((rdo >> RDO_BATT_MAX_PWR_SHIFT) & RDO_PWR_MASK) * 250;
 }
 
+/* Enter_USB Data Object */
+#define EUDO_USB_MODE_MASK		GENMASK(30, 28)
+#define EUDO_USB_MODE_SHIFT		28
+#define   EUDO_USB_MODE_USB2		0
+#define   EUDO_USB_MODE_USB3		1
+#define   EUDO_USB_MODE_USB4		2
+#define EUDO_USB4_DRD			BIT(26)
+#define EUDO_USB3_DRD			BIT(25)
+#define EUDO_CABLE_SPEED_MASK		GENMASK(23, 21)
+#define EUDO_CABLE_SPEED_SHIFT		21
+#define   EUDO_CABLE_SPEED_USB2		0
+#define   EUDO_CABLE_SPEED_USB3_GEN1	1
+#define   EUDO_CABLE_SPEED_USB4_GEN2	2
+#define   EUDO_CABLE_SPEED_USB4_GEN3	3
+#define EUDO_CABLE_TYPE_MASK		GENMASK(20, 19)
+#define EUDO_CABLE_TYPE_SHIFT		19
+#define   EUDO_CABLE_TYPE_PASSIVE	0
+#define   EUDO_CABLE_TYPE_RE_TIMER	1
+#define   EUDO_CABLE_TYPE_RE_DRIVER	2
+#define   EUDO_CABLE_TYPE_OPTICAL	3
+#define EUDO_CABLE_CURRENT_MASK		GENMASK(18, 17)
+#define EUDO_CABLE_CURRENT_SHIFT	17
+#define   EUDO_CABLE_CURRENT_NOTSUPP	0
+#define   EUDO_CABLE_CURRENT_3A		2
+#define   EUDO_CABLE_CURRENT_5A		3
+#define EUDO_PCIE_SUPPORT		BIT(16)
+#define EUDO_DP_SUPPORT			BIT(15)
+#define EUDO_TBT_SUPPORT		BIT(14)
+#define EUDO_HOST_PRESENT		BIT(13)
+
 /* USB PD timers and counters */
 #define PD_T_NO_RESPONSE	5000	/* 4.5 - 5.5 seconds */
 #define PD_T_DB_DETECT		10000	/* 10 - 15 seconds */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index 781f4e9..35b8e15 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -140,6 +140,38 @@
 #define PD_PRODUCT_PID(vdo)	(((vdo) >> 16) & 0xffff)
 
 /*
+ * UFP VDO1
+ * --------
+ * <31:29> :: UFP VDO version
+ * <28>    :: Reserved
+ * <27:24> :: Device capability
+ * <23:6>  :: Reserved
+ * <5:3>   :: Alternate modes
+ * <2:0>   :: USB highest speed
+ */
+#define PD_VDO1_UFP_DEVCAP(vdo)	(((vdo) & GENMASK(27, 24)) >> 24)
+
+#define DEV_USB2_CAPABLE	BIT(0)
+#define DEV_USB2_BILLBOARD	BIT(1)
+#define DEV_USB3_CAPABLE	BIT(2)
+#define DEV_USB4_CAPABLE	BIT(3)
+
+/*
+ * DFP VDO
+ * --------
+ * <31:29> :: DFP VDO version
+ * <28:27> :: Reserved
+ * <26:24> :: Host capability
+ * <23:5>  :: Reserved
+ * <4:0>   :: Port number
+ */
+#define PD_VDO_DFP_HOSTCAP(vdo)	(((vdo) & GENMASK(26, 24)) >> 24)
+
+#define HOST_USB2_CAPABLE	BIT(0)
+#define HOST_USB3_CAPABLE	BIT(1)
+#define HOST_USB4_CAPABLE	BIT(2)
+
+/*
  * Cable VDO
  * ---------
  * <31:28> :: Cable HW version
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 6914475..d418c55 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -170,8 +170,6 @@ struct renesas_usbhs_driver_param {
 	 */
 	int pio_dma_border; /* default is 64byte */
 
-	u32 enable_gpio;
-
 	/*
 	 * option:
 	 */
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index 0c5c3ea..c29d1b4 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -17,6 +17,7 @@
 #define __TEGRA_USB_PHY_H
 
 #include <linux/clk.h>
+#include <linux/gpio.h>
 #include <linux/reset.h>
 #include <linux/usb/otg.h>
 
@@ -76,8 +77,9 @@ struct tegra_usb_phy {
 	struct usb_phy u_phy;
 	bool is_legacy_phy;
 	bool is_ulpi_phy;
-	int reset_gpio;
+	struct gpio_desc *reset_gpio;
 	struct reset_control *pad_rst;
+	bool powered_on;
 };
 
 void tegra_usb_phy_preresume(struct usb_phy *phy);
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 0f52723..c358b3f 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -9,6 +9,9 @@
 #define USB_TYPEC_REV_1_0	0x100 /* 1.0 */
 #define USB_TYPEC_REV_1_1	0x110 /* 1.1 */
 #define USB_TYPEC_REV_1_2	0x120 /* 1.2 */
+#define USB_TYPEC_REV_1_3	0x130 /* 1.3 */
+#define USB_TYPEC_REV_1_4	0x140 /* 1.4 */
+#define USB_TYPEC_REV_2_0	0x200 /* 2.0 */
 
 struct typec_partner;
 struct typec_cable;
@@ -74,6 +77,7 @@ enum typec_orientation {
  * @id_header: ID Header VDO
  * @cert_stat: Cert Stat VDO
  * @product: Product VDO
+ * @vdo: Product Type Specific VDOs
  *
  * USB power delivery Discover Identity command response data.
  *
@@ -84,6 +88,7 @@ struct usb_pd_identity {
 	u32			id_header;
 	u32			cert_stat;
 	u32			product;
+	u32			vdo[3];
 };
 
 int typec_partner_set_identity(struct typec_partner *partner);
@@ -230,6 +235,10 @@ struct typec_cable *typec_register_cable(struct typec_port *port,
 					 struct typec_cable_desc *desc);
 void typec_unregister_cable(struct typec_cable *cable);
 
+struct typec_cable *typec_cable_get(struct typec_port *port);
+void typec_cable_put(struct typec_cable *cable);
+int typec_cable_is_active(struct typec_cable *cable);
+
 struct typec_plug *typec_register_plug(struct typec_cable *cable,
 				       struct typec_plug_desc *desc);
 void typec_unregister_plug(struct typec_plug *plug);
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index 9a88c74..923ff3a 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -55,7 +55,7 @@ static inline void *typec_altmode_get_drvdata(struct typec_altmode *altmode)
  * @activate: User callback for Enter/Exit Mode
  */
 struct typec_altmode_ops {
-	int (*enter)(struct typec_altmode *altmode);
+	int (*enter)(struct typec_altmode *altmode, u32 *vdo);
 	int (*exit)(struct typec_altmode *altmode);
 	void (*attention)(struct typec_altmode *altmode, u32 vdo);
 	int (*vdm)(struct typec_altmode *altmode, const u32 hdr,
@@ -65,7 +65,7 @@ struct typec_altmode_ops {
 	int (*activate)(struct typec_altmode *altmode, int activate);
 };
 
-int typec_altmode_enter(struct typec_altmode *altmode);
+int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
 int typec_altmode_exit(struct typec_altmode *altmode);
 void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
 int typec_altmode_vdm(struct typec_altmode *altmode,
@@ -101,6 +101,22 @@ enum {
 	TYPEC_MODE_DEBUG,			/* Debug Accessory */
 };
 
+/*
+ * USB4 also requires that the pins on the connector are repurposed, just like
+ * Alternate Modes. USB4 mode is however not entered with the Enter Mode Command
+ * like the Alternate Modes are, but instead with a special Enter_USB Message.
+ * The Enter_USB Message can also be used for setting to connector to operate in
+ * USB 3.2 or in USB 2.0 mode instead of USB4.
+ *
+ * The Enter_USB specific "USB Modes" are also supplied here as special modal
+ * state values, just like the Accessory Modes.
+ */
+enum {
+	TYPEC_MODE_USB2 = TYPEC_MODE_DEBUG,	/* USB 2.0 mode */
+	TYPEC_MODE_USB3,			/* USB 3.2 mode */
+	TYPEC_MODE_USB4				/* USB4 mode */
+};
+
 #define TYPEC_MODAL_STATE(_state_)	((_state_) + TYPEC_STATE_MODAL)
 
 struct typec_altmode *typec_altmode_get_plug(struct typec_altmode *altmode,
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index 873ace5..be7292c 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -8,6 +8,7 @@
 struct device;
 struct typec_mux;
 struct typec_switch;
+struct typec_altmode;
 struct fwnode_handle;
 
 typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw,
@@ -29,7 +30,14 @@ void typec_switch_unregister(struct typec_switch *sw);
 void typec_switch_set_drvdata(struct typec_switch *sw, void *data);
 void *typec_switch_get_drvdata(struct typec_switch *sw);
 
-typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux, int state);
+struct typec_mux_state {
+	struct typec_altmode *alt;
+	unsigned long mode;
+	void *data;
+};
+
+typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux,
+				  struct typec_mux_state *state);
 
 struct typec_mux_desc {
 	struct fwnode_handle *fwnode;
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index c515765..36c2982 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -55,12 +55,23 @@
 #if IS_ENABLED(CONFIG_USB_ULPI)
 struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
 					unsigned int flags);
+
+struct usb_phy *devm_otg_ulpi_create(struct device *dev,
+				     struct usb_phy_io_ops *ops,
+				     unsigned int flags);
 #else
 static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
 					      unsigned int flags)
 {
 	return NULL;
 }
+
+static inline struct usb_phy *devm_otg_ulpi_create(struct device *dev,
+						   struct usb_phy_io_ops *ops,
+						   unsigned int flags)
+{
+	return NULL;
+}
 #endif
 
 #ifdef CONFIG_USB_ULPI_VIEWPORT
diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h
index 7408cf5..cd9e70a 100644
--- a/include/linux/usb/usb_phy_generic.h
+++ b/include/linux/usb/usb_phy_generic.h
@@ -3,18 +3,6 @@
 #define __LINUX_USB_NOP_XCEIV_H
 
 #include <linux/usb/otg.h>
-#include <linux/gpio/consumer.h>
-
-struct usb_phy_generic_platform_data {
-	enum usb_phy_type type;
-	unsigned long clk_rate;
-
-	/* if set fails with -EPROBE_DEFER if can't get regulator */
-	unsigned int needs_vcc:1;
-	unsigned int needs_reset:1;	/* deprecated */
-	int gpio_reset;
-	struct gpio_desc *gpiod_vbus;
-};
 
 #if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
 /* sometimes transceivers are accessed only through e.g. ULPI */
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index 92dbbd3..c757d84 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -191,6 +191,12 @@ extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
    exceed those passed here.
 */
 
+extern int zlib_deflate_dfltcc_enabled (void);
+/*
+   Returns 1 if Deflate-Conversion facility is installed and enabled,
+   otherwise 0.
+*/
+
 /* 
 extern int deflateInit (z_streamp strm, int level);
 
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index 985afea..139e93b 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -37,12 +37,6 @@ struct cec_notifier *cec_notifier_get_conn(struct device *dev,
 					   const char *conn);
 
 /**
- * cec_notifier_put - decrease refcount and delete when the refcount reaches 0.
- * @n: notifier
- */
-void cec_notifier_put(struct cec_notifier *n);
-
-/**
  * cec_notifier_conn_register - find or create a new cec_notifier for the given
  * HDMI device and connector tuple.
  * @hdmi_dev: HDMI device that sends the events.
@@ -138,10 +132,6 @@ static inline struct cec_notifier *cec_notifier_get_conn(struct device *dev,
 	return (struct cec_notifier *)0xdeadfeed;
 }
 
-static inline void cec_notifier_put(struct cec_notifier *n)
-{
-}
-
 static inline struct cec_notifier *
 cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
 			   const struct cec_connector_info *conn_info)
@@ -184,23 +174,6 @@ static inline struct device *cec_notifier_parse_hdmi_phandle(struct device *dev)
 #endif
 
 /**
- * cec_notifier_get - find or create a new cec_notifier for the given device.
- * @dev: device that sends the events.
- *
- * If a notifier for device @dev already exists, then increase the refcount
- * and return that notifier.
- *
- * If it doesn't exist, then allocate a new notifier struct and return a
- * pointer to that new struct.
- *
- * Return NULL if the memory could not be allocated.
- */
-static inline struct cec_notifier *cec_notifier_get(struct device *dev)
-{
-	return cec_notifier_get_conn(dev, NULL);
-}
-
-/**
  * cec_notifier_phys_addr_invalidate() - set the physical address to INVALID
  *
  * @n: the CEC notifier
diff --git a/include/media/cec.h b/include/media/cec.h
index 0a4f69c..972bc8c 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -386,52 +386,6 @@ cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info,
 
 #endif
 
-#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
-
-/**
- * cec_notifier_register - register a callback with the notifier
- * @n: the CEC notifier
- * @adap: the CEC adapter, passed as argument to the callback function
- * @callback: the callback function
- */
-void cec_notifier_register(struct cec_notifier *n,
-			   struct cec_adapter *adap,
-			   void (*callback)(struct cec_adapter *adap, u16 pa));
-
-/**
- * cec_notifier_unregister - unregister the callback from the notifier.
- * @n: the CEC notifier
- */
-void cec_notifier_unregister(struct cec_notifier *n);
-
-/**
- * cec_register_cec_notifier - register the notifier with the cec adapter.
- * @adap: the CEC adapter
- * @notifier: the CEC notifier
- */
-void cec_register_cec_notifier(struct cec_adapter *adap,
-			       struct cec_notifier *notifier);
-
-#else
-
-static inline void
-cec_notifier_register(struct cec_notifier *n,
-		      struct cec_adapter *adap,
-		      void (*callback)(struct cec_adapter *adap, u16 pa))
-{
-}
-
-static inline void cec_notifier_unregister(struct cec_notifier *n)
-{
-}
-
-static inline void cec_register_cec_notifier(struct cec_adapter *adap,
-					     struct cec_notifier *notifier)
-{
-}
-
-#endif
-
 /**
  * cec_phys_addr_invalidate() - set the physical address to INVALID
  *
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index 1409230a..800d473 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -180,6 +180,7 @@
 #define USB_PID_TERRATEC_CINERGY_T_STICK_RC		0x0097
 #define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC	0x0099
 #define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1	0x00a9
+#define USB_PID_TERRATEC_CINERGY_TC2_STICK		0x10b2
 #define USB_PID_TWINHAN_VP7041_COLD			0x3201
 #define USB_PID_TWINHAN_VP7041_WARM			0x3202
 #define USB_PID_TWINHAN_VP7020_COLD			0x3203
@@ -425,4 +426,5 @@
 #define USB_PID_EVOLVEO_XTRATV_STICK			0xa115
 #define USB_PID_HAMA_DVBT_HYBRID			0x2758
 #define USB_PID_XBOX_ONE_TUNER                          0x02d5
+#define USB_PID_PROLECTRIX_DV107669                     0xd803
 #endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index d8c29e0..150ee16 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -14,6 +14,7 @@
 #ifndef V4L2_COMMON_H_
 #define V4L2_COMMON_H_
 
+#include <linux/time.h>
 #include <media/v4l2-dev.h>
 
 /* Common printk constructs for v4l-i2c drivers. These macros create a unique
@@ -518,4 +519,24 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
 			u32 width, u32 height);
 
+static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
+{
+	/*
+	 * When the timestamp comes from 32-bit user space, there may be
+	 * uninitialized data in tv_usec, so cast it to u32.
+	 * Otherwise allow invalid input for backwards compatibility.
+	 */
+	return buf->timestamp.tv_sec * NSEC_PER_SEC +
+		(u32)buf->timestamp.tv_usec * NSEC_PER_USEC;
+}
+
+static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf,
+					     u64 timestamp)
+{
+	struct timespec64 ts = ns_to_timespec64(timestamp);
+
+	buf->timestamp.tv_sec  = ts.tv_sec;
+	buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+
 #endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 5f36e0d..95353ae 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -371,7 +371,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
 		struct v4l2_subdev *__sd;				\
 									\
 		__v4l2_device_call_subdevs_p(v4l2_dev, __sd,		\
-			!(grpid) || __sd->grp_id == (grpid), o, f ,	\
+			(grpid) == 0 || __sd->grp_id == (grpid), o, f ,	\
 			##args);					\
 	} while (0)
 
@@ -403,7 +403,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
 ({									\
 	struct v4l2_subdev *__sd;					\
 	__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd,		\
-			!(grpid) || __sd->grp_id == (grpid), o, f ,	\
+			(grpid) == 0 || __sd->grp_id == (grpid), o, f ,	\
 			##args);					\
 })
 
@@ -431,8 +431,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
 		struct v4l2_subdev *__sd;				\
 									\
 		__v4l2_device_call_subdevs_p(v4l2_dev, __sd,		\
-			!(grpmsk) || (__sd->grp_id & (grpmsk)), o, f ,	\
-			##args);					\
+			(grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o,	\
+			f , ##args);					\
 	} while (0)
 
 /**
@@ -462,8 +462,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
 ({									\
 	struct v4l2_subdev *__sd;					\
 	__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd,		\
-			!(grpmsk) || (__sd->grp_id & (grpmsk)), o, f ,	\
-			##args);					\
+			(grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o,	\
+			f , ##args);					\
 })
 
 
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 4bba65a..86878fb 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -724,4 +724,59 @@ long int video_usercopy(struct file *file, unsigned int cmd,
 long int video_ioctl2(struct file *file,
 		      unsigned int cmd, unsigned long int arg);
 
+/*
+ * The user space interpretation of the 'v4l2_event' differs
+ * based on the 'time_t' definition on 32-bit architectures, so
+ * the kernel has to handle both.
+ * This is the old version for 32-bit architectures.
+ */
+struct v4l2_event_time32 {
+	__u32				type;
+	union {
+		struct v4l2_event_vsync		vsync;
+		struct v4l2_event_ctrl		ctrl;
+		struct v4l2_event_frame_sync	frame_sync;
+		struct v4l2_event_src_change	src_change;
+		struct v4l2_event_motion_det	motion_det;
+		__u8				data[64];
+	} u;
+	__u32				pending;
+	__u32				sequence;
+	struct old_timespec32		timestamp;
+	__u32				id;
+	__u32				reserved[8];
+};
+
+#define	VIDIOC_DQEVENT_TIME32	 _IOR('V', 89, struct v4l2_event_time32)
+
+struct v4l2_buffer_time32 {
+	__u32			index;
+	__u32			type;
+	__u32			bytesused;
+	__u32			flags;
+	__u32			field;
+	struct old_timeval32	timestamp;
+	struct v4l2_timecode	timecode;
+	__u32			sequence;
+
+	/* memory location */
+	__u32			memory;
+	union {
+		__u32           offset;
+		unsigned long   userptr;
+		struct v4l2_plane *planes;
+		__s32		fd;
+	} m;
+	__u32			length;
+	__u32			reserved2;
+	union {
+		__s32		request_fd;
+		__u32		reserved;
+	};
+};
+#define VIDIOC_QUERYBUF_TIME32	_IOWR('V',  9, struct v4l2_buffer_time32)
+#define VIDIOC_QBUF_TIME32	_IOWR('V', 15, struct v4l2_buffer_time32)
+#define VIDIOC_DQBUF_TIME32	_IOWR('V', 17, struct v4l2_buffer_time32)
+#define VIDIOC_PREPARE_BUF_TIME32 _IOWR('V', 93, struct v4l2_buffer_time32)
+
 #endif /* _V4L2_IOCTL_H */
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
index c86474d..8800a64 100644
--- a/include/media/v4l2-rect.h
+++ b/include/media/v4l2-rect.h
@@ -63,10 +63,10 @@ static inline void v4l2_rect_map_inside(struct v4l2_rect *r,
 		r->left = boundary->left;
 	if (r->top < boundary->top)
 		r->top = boundary->top;
-	if (r->left + r->width > boundary->width)
-		r->left = boundary->width - r->width;
-	if (r->top + r->height > boundary->height)
-		r->top = boundary->height - r->height;
+	if (r->left + r->width > boundary->left + boundary->width)
+		r->left = boundary->left + boundary->width - r->width;
+	if (r->top + r->height > boundary->top + boundary->height)
+		r->top = boundary->top + boundary->height - r->height;
 }
 
 /**
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 71f1f2f..761aa83 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1090,7 +1090,7 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
  * @sd: pointer to the &struct v4l2_subdev
  * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
  *     Each element there groups a set of callbacks functions.
- * @f: callback function that will be called if @cond matches.
+ * @f: callback function to be called.
  *     The callback functions are defined in groups, according to
  *     each element at &struct v4l2_subdev_ops.
  * @args...: arguments for @f.
diff --git a/include/net/ipx.h b/include/net/ipx.h
index baf0903..9d13428 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -47,11 +47,6 @@ struct ipxhdr {
 /* From af_ipx.c */
 extern int sysctl_ipx_pprop_broadcasting;
 
-static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
-{
-	return (struct ipxhdr *)skb_transport_header(skb);
-}
-
 struct ipx_interface {
 	/* IPX address */
 	__be32			if_netnum;
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 27627e2..c971d25 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -174,15 +174,12 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
 
 #endif /* CONFIG_MPTCP */
 
-void mptcp_handle_ipv6_mapped(struct sock *sk, bool mapped);
-
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 int mptcpv6_init(void);
+void mptcpv6_handle_mapped(struct sock *sk, bool mapped);
 #elif IS_ENABLED(CONFIG_IPV6)
-static inline int mptcpv6_init(void)
-{
-	return 0;
-}
+static inline int mptcpv6_init(void) { return 0; }
+static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
 #endif
 
 #endif /* __NET_MPTCP_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 4a180f2..e55d5f7 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -476,6 +476,13 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
 	if (!inet_get_convert_csum(sk))
 		features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
+	/* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
+	 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
+	 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
+	 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
+	 * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
+	 * specific case, where PARTIAL is both correct and required.
+	 */
 	if (skb->pkt_type == PACKET_LOOPBACK)
 		skb->ip_summed = CHECKSUM_PARTIAL;
 
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index b01a8a8..8ec482e 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -500,21 +500,6 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
 		   u8 private_data_len);
 
 /**
- * ib_send_cm_lap - Sends a load alternate path request.
- * @cm_id: Connection identifier associated with the load alternate path
- *   message.
- * @alternate_path: A path record that identifies the alternate path to
- *   load.
- * @private_data: Optional user-defined private data sent with the
- *   load alternate path message.
- * @private_data_len: Size of the private data buffer, in bytes.
- */
-int ib_send_cm_lap(struct ib_cm_id *cm_id,
-		   struct sa_path_rec *alternate_path,
-		   const void *private_data,
-		   u8 private_data_len);
-
-/**
  * ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
  *   to a specified QP state.
  * @cm_id: Communication identifier associated with the QP attributes to
@@ -534,25 +519,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
 		       struct ib_qp_attr *qp_attr,
 		       int *qp_attr_mask);
 
-/**
- * ib_send_cm_apr - Sends an alternate path response message in response to
- *   a load alternate path request.
- * @cm_id: Connection identifier associated with the alternate path response.
- * @status: Reply status sent with the alternate path response.
- * @info: Optional additional information sent with the alternate path
- *   response.
- * @info_length: Size of the additional information, in bytes.
- * @private_data: Optional user-defined private data sent with the
- *   alternate path response message.
- * @private_data_len: Size of the private data buffer, in bytes.
- */
-int ib_send_cm_apr(struct ib_cm_id *cm_id,
-		   enum ib_cm_apr_status status,
-		   void *info,
-		   u8 info_length,
-		   const void *private_data,
-		   u8 private_data_len);
-
 struct ib_cm_sidr_req_param {
 	struct sa_path_rec	*path;
 	const struct ib_gid_attr *sgid_attr;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e2cc622..1f779fa 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -72,11 +72,16 @@
 #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
 
 struct ib_umem_odp;
+struct ib_uqp_object;
+struct ib_usrq_object;
+struct ib_uwq_object;
 
 extern struct workqueue_struct *ib_wq;
 extern struct workqueue_struct *ib_comp_wq;
 extern struct workqueue_struct *ib_comp_unbound_wq;
 
+struct ib_ucq_object;
+
 __printf(3, 4) __cold
 void ibdev_printk(const char *level, const struct ib_device *ibdev,
 		  const char *format, ...);
@@ -1413,8 +1418,11 @@ enum ib_access_flags {
 	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
 	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
 	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
+	IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
 
-	IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
+	IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
+	IB_ACCESS_SUPPORTED =
+		((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
 };
 
 /*
@@ -1544,7 +1552,7 @@ enum ib_poll_context {
 
 struct ib_cq {
 	struct ib_device       *device;
-	struct ib_uobject      *uobject;
+	struct ib_ucq_object   *uobject;
 	ib_comp_handler   	comp_handler;
 	void                  (*event_handler)(struct ib_event *, void *);
 	void                   *cq_context;
@@ -1558,6 +1566,11 @@ struct ib_cq {
 	};
 	struct workqueue_struct *comp_wq;
 	struct dim *dim;
+
+	/* updated only by trace points */
+	ktime_t timestamp;
+	bool interrupt;
+
 	/*
 	 * Implementation details of the RDMA core, don't use in drivers:
 	 */
@@ -1567,7 +1580,7 @@ struct ib_cq {
 struct ib_srq {
 	struct ib_device       *device;
 	struct ib_pd	       *pd;
-	struct ib_uobject      *uobject;
+	struct ib_usrq_object  *uobject;
 	void		      (*event_handler)(struct ib_event *, void *);
 	void		       *srq_context;
 	enum ib_srq_type	srq_type;
@@ -1612,7 +1625,7 @@ enum ib_wq_state {
 
 struct ib_wq {
 	struct ib_device       *device;
-	struct ib_uobject      *uobject;
+	struct ib_uwq_object   *uobject;
 	void		    *wq_context;
 	void		    (*event_handler)(struct ib_event *, void *);
 	struct ib_pd	       *pd;
@@ -1728,7 +1741,7 @@ struct ib_qp {
 	atomic_t		usecnt;
 	struct list_head	open_list;
 	struct ib_qp           *real_qp;
-	struct ib_uobject      *uobject;
+	struct ib_uqp_object   *uobject;
 	void                  (*event_handler)(struct ib_event *, void *);
 	void		       *qp_context;
 	/* sgid_attrs associated with the AV's */
@@ -2147,11 +2160,6 @@ struct ib_port_cache {
 	enum ib_port_state     port_state;
 };
 
-struct ib_cache {
-	rwlock_t                lock;
-	struct ib_event_handler event_handler;
-};
-
 struct ib_port_immutable {
 	int                           pkey_tbl_len;
 	int                           gid_tbl_len;
@@ -2627,13 +2635,18 @@ struct ib_device {
 	struct rcu_head rcu_head;
 
 	struct list_head              event_handler_list;
-	spinlock_t                    event_handler_lock;
+	/* Protects event_handler_list */
+	struct rw_semaphore event_handler_rwsem;
+
+	/* Protects QP's event_handler calls and open_qp list */
+	spinlock_t qp_open_list_lock;
 
 	struct rw_semaphore	      client_data_rwsem;
 	struct xarray                 client_data;
 	struct mutex                  unregistration_lock;
 
-	struct ib_cache               cache;
+	/* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
+	rwlock_t cache_lock;
 	/**
 	 * port_data is indexed by port number
 	 */
@@ -2942,7 +2955,7 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
 
 void ib_register_event_handler(struct ib_event_handler *event_handler);
 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
-void ib_dispatch_event(struct ib_event *event);
+void ib_dispatch_event(const struct ib_event *event);
 
 int ib_query_port(struct ib_device *device,
 		  u8 port_num, struct ib_port_attr *port_attr);
@@ -4309,6 +4322,9 @@ static inline int ib_check_mr_access(int flags)
 	    !(flags & IB_ACCESS_LOCAL_WRITE))
 		return -EINVAL;
 
+	if (flags & ~IB_ACCESS_SUPPORTED)
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/include/rdma/iba.h b/include/rdma/iba.h
new file mode 100644
index 0000000..6a1115b
--- /dev/null
+++ b/include/rdma/iba.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc.  All rights reserved.
+ */
+#ifndef _IBA_DEFS_H_
+#define _IBA_DEFS_H_
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <asm/unaligned.h>
+
+static inline u32 _iba_get8(const u8 *ptr)
+{
+	return *ptr;
+}
+
+static inline void _iba_set8(u8 *ptr, u32 mask, u32 prep_value)
+{
+	*ptr = (*ptr & ~mask) | prep_value;
+}
+
+static inline u16 _iba_get16(const __be16 *ptr)
+{
+	return be16_to_cpu(*ptr);
+}
+
+static inline void _iba_set16(__be16 *ptr, u16 mask, u16 prep_value)
+{
+	*ptr = cpu_to_be16((be16_to_cpu(*ptr) & ~mask) | prep_value);
+}
+
+static inline u32 _iba_get32(const __be32 *ptr)
+{
+	return be32_to_cpu(*ptr);
+}
+
+static inline void _iba_set32(__be32 *ptr, u32 mask, u32 prep_value)
+{
+	*ptr = cpu_to_be32((be32_to_cpu(*ptr) & ~mask) | prep_value);
+}
+
+static inline u64 _iba_get64(const __be64 *ptr)
+{
+	/*
+	 * The mads are constructed so that 32 bit and smaller are naturally
+	 * aligned, everything larger has a max alignment of 4 bytes.
+	 */
+	return be64_to_cpu(get_unaligned(ptr));
+}
+
+static inline void _iba_set64(__be64 *ptr, u64 mask, u64 prep_value)
+{
+	put_unaligned(cpu_to_be64((_iba_get64(ptr) & ~mask) | prep_value), ptr);
+}
+
+#define _IBA_SET(field_struct, field_offset, field_mask, num_bits, ptr, value) \
+	({                                                                     \
+		field_struct *_ptr = ptr;                                      \
+		_iba_set##num_bits((void *)_ptr + (field_offset), field_mask,  \
+				   FIELD_PREP(field_mask, value));             \
+	})
+#define IBA_SET(field, ptr, value) _IBA_SET(field, ptr, value)
+
+#define _IBA_GET_MEM_PTR(field_struct, field_offset, type, num_bits, ptr)      \
+	({                                                                     \
+		field_struct *_ptr = ptr;                                      \
+		(type *)((void *)_ptr + (field_offset));                       \
+	})
+#define IBA_GET_MEM_PTR(field, ptr) _IBA_GET_MEM_PTR(field, ptr)
+
+/* FIXME: A set should always set the entire field, meaning we should zero the trailing bytes */
+#define _IBA_SET_MEM(field_struct, field_offset, type, num_bits, ptr, in,      \
+		     bytes)                                                    \
+	({                                                                     \
+		const type *_in_ptr = in;                                      \
+		WARN_ON(bytes * 8 > num_bits);                                 \
+		if (in && bytes)                                               \
+			memcpy(_IBA_GET_MEM_PTR(field_struct, field_offset,    \
+						type, num_bits, ptr),          \
+			       _in_ptr, bytes);                                \
+	})
+#define IBA_SET_MEM(field, ptr, in, bytes) _IBA_SET_MEM(field, ptr, in, bytes)
+
+#define _IBA_GET(field_struct, field_offset, field_mask, num_bits, ptr)        \
+	({                                                                     \
+		const field_struct *_ptr = ptr;                                \
+		(u##num_bits) FIELD_GET(                                       \
+			field_mask, _iba_get##num_bits((const void *)_ptr +    \
+						       (field_offset)));       \
+	})
+#define IBA_GET(field, ptr) _IBA_GET(field, ptr)
+
+#define _IBA_GET_MEM(field_struct, field_offset, type, num_bits, ptr, out,     \
+		     bytes)                                                    \
+	({                                                                     \
+		type *_out_ptr = out;                                          \
+		WARN_ON(bytes * 8 > num_bits);                                 \
+		if (out && bytes)                                              \
+			memcpy(_out_ptr,                                       \
+			       _IBA_GET_MEM_PTR(field_struct, field_offset,    \
+						type, num_bits, ptr),          \
+			       bytes);                                         \
+	})
+#define IBA_GET_MEM(field, ptr, out, bytes) _IBA_GET_MEM(field, ptr, out, bytes)
+
+/*
+ * The generated list becomes the parameters to the macros, the order is:
+ *  - struct this applies to
+ *  - starting offset of the max
+ *  - GENMASK or GENMASK_ULL in CPU order
+ *  - The width of data the mask operations should work on, in bits
+ */
+
+/*
+ * Extraction using a tabular description like table 106. bit_offset is from
+ * the Byte[Bit] notation.
+ */
+#define IBA_FIELD_BLOC(field_struct, byte_offset, bit_offset, num_bits)        \
+	field_struct, byte_offset,                                             \
+		GENMASK(7 - (bit_offset), 7 - (bit_offset) - (num_bits - 1)),  \
+		8
+#define IBA_FIELD8_LOC(field_struct, byte_offset, num_bits)                    \
+	IBA_FIELD_BLOC(field_struct, byte_offset, 0, num_bits)
+
+#define IBA_FIELD16_LOC(field_struct, byte_offset, num_bits)                   \
+	field_struct, (byte_offset)&0xFFFE,                                    \
+		GENMASK(15 - (((byte_offset) % 2) * 8),                        \
+			15 - (((byte_offset) % 2) * 8) - (num_bits - 1)),      \
+		16
+
+#define IBA_FIELD32_LOC(field_struct, byte_offset, num_bits)                   \
+	field_struct, (byte_offset)&0xFFFC,                                    \
+		GENMASK(31 - (((byte_offset) % 4) * 8),                        \
+			31 - (((byte_offset) % 4) * 8) - (num_bits - 1)),      \
+		32
+
+#define IBA_FIELD64_LOC(field_struct, byte_offset)                             \
+	field_struct, byte_offset, GENMASK_ULL(63, 0), 64
+/*
+ * In IBTA spec, everything that is more than 64bits is multiple
+ * of bytes without leftover bits.
+ */
+#define IBA_FIELD_MLOC(field_struct, byte_offset, num_bits, type)              \
+	field_struct, byte_offset, type, num_bits
+
+#endif /* _IBA_DEFS_H_ */
diff --git a/include/rdma/ibta_vol1_c12.h b/include/rdma/ibta_vol1_c12.h
new file mode 100644
index 0000000..2699044
--- /dev/null
+++ b/include/rdma/ibta_vol1_c12.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ *
+ * This file is IBTA volume 1, chapter 12 declarations:
+ *  CHAPTER 12: COMMUNICATION MANAGEMENT
+ */
+#ifndef _IBTA_VOL1_C12_H_
+#define _IBTA_VOL1_C12_H_
+
+#include <rdma/iba.h>
+
+#define CM_FIELD_BLOC(field_struct, byte_offset, bits_offset, width)           \
+	IBA_FIELD_BLOC(field_struct,                                           \
+		       (byte_offset + sizeof(struct ib_mad_hdr)), bits_offset, \
+		       width)
+#define CM_FIELD8_LOC(field_struct, byte_offset, width)                        \
+	IBA_FIELD8_LOC(field_struct,                                           \
+		       (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD16_LOC(field_struct, byte_offset, width)                       \
+	IBA_FIELD16_LOC(field_struct,                                          \
+			(byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD32_LOC(field_struct, byte_offset, width)                       \
+	IBA_FIELD32_LOC(field_struct,                                          \
+			(byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD64_LOC(field_struct, byte_offset)                              \
+	IBA_FIELD64_LOC(field_struct, (byte_offset + sizeof(struct ib_mad_hdr)))
+#define CM_FIELD_MLOC(field_struct, byte_offset, width, type)                  \
+	IBA_FIELD_MLOC(field_struct,                                           \
+		       (byte_offset + sizeof(struct ib_mad_hdr)), width, type)
+#define CM_STRUCT(field_struct, total_len)                                     \
+	field_struct                                                           \
+	{                                                                      \
+		struct ib_mad_hdr hdr;                                         \
+		u32 _data[(total_len) / 32 +                                   \
+			  BUILD_BUG_ON_ZERO((total_len) % 32 != 0)];           \
+	}
+
+/* Table 106 REQ Message Contents */
+#define CM_REQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_req_msg, 0, 32)
+#define CM_REQ_SERVICE_ID CM_FIELD64_LOC(struct cm_req_msg, 8)
+#define CM_REQ_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_req_msg, 16)
+#define CM_REQ_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_req_msg, 28, 32)
+#define CM_REQ_LOCAL_QPN CM_FIELD32_LOC(struct cm_req_msg, 32, 24)
+#define CM_REQ_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_req_msg, 35, 8)
+#define CM_REQ_LOCAL_EECN CM_FIELD32_LOC(struct cm_req_msg, 36, 24)
+#define CM_REQ_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_req_msg, 39, 8)
+#define CM_REQ_REMOTE_EECN CM_FIELD32_LOC(struct cm_req_msg, 40, 24)
+#define CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT                                      \
+	CM_FIELD8_LOC(struct cm_req_msg, 43, 5)
+#define CM_REQ_TRANSPORT_SERVICE_TYPE CM_FIELD_BLOC(struct cm_req_msg, 43, 5, 2)
+#define CM_REQ_END_TO_END_FLOW_CONTROL                                         \
+	CM_FIELD_BLOC(struct cm_req_msg, 43, 7, 1)
+#define CM_REQ_STARTING_PSN CM_FIELD32_LOC(struct cm_req_msg, 44, 24)
+#define CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT CM_FIELD8_LOC(struct cm_req_msg, 47, 5)
+#define CM_REQ_RETRY_COUNT CM_FIELD_BLOC(struct cm_req_msg, 47, 5, 3)
+#define CM_REQ_PARTITION_KEY CM_FIELD16_LOC(struct cm_req_msg, 48, 16)
+#define CM_REQ_PATH_PACKET_PAYLOAD_MTU CM_FIELD8_LOC(struct cm_req_msg, 50, 4)
+#define CM_REQ_RDC_EXISTS CM_FIELD_BLOC(struct cm_req_msg, 50, 4, 1)
+#define CM_REQ_RNR_RETRY_COUNT CM_FIELD_BLOC(struct cm_req_msg, 50, 5, 3)
+#define CM_REQ_MAX_CM_RETRIES CM_FIELD8_LOC(struct cm_req_msg, 51, 4)
+#define CM_REQ_SRQ CM_FIELD_BLOC(struct cm_req_msg, 51, 4, 1)
+#define CM_REQ_EXTENDED_TRANSPORT_TYPE                                         \
+	CM_FIELD_BLOC(struct cm_req_msg, 51, 5, 3)
+#define CM_REQ_PRIMARY_LOCAL_PORT_LID CM_FIELD16_LOC(struct cm_req_msg, 52, 16)
+#define CM_REQ_PRIMARY_REMOTE_PORT_LID CM_FIELD16_LOC(struct cm_req_msg, 54, 16)
+#define CM_REQ_PRIMARY_LOCAL_PORT_GID                                          \
+	CM_FIELD_MLOC(struct cm_req_msg, 56, 128, union ib_gid)
+#define CM_REQ_PRIMARY_REMOTE_PORT_GID                                         \
+	CM_FIELD_MLOC(struct cm_req_msg, 72, 128, union ib_gid)
+#define CM_REQ_PRIMARY_FLOW_LABEL CM_FIELD32_LOC(struct cm_req_msg, 88, 20)
+#define CM_REQ_PRIMARY_PACKET_RATE CM_FIELD_BLOC(struct cm_req_msg, 91, 2, 6)
+#define CM_REQ_PRIMARY_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_req_msg, 92, 8)
+#define CM_REQ_PRIMARY_HOP_LIMIT CM_FIELD8_LOC(struct cm_req_msg, 93, 8)
+#define CM_REQ_PRIMARY_SL CM_FIELD8_LOC(struct cm_req_msg, 94, 4)
+#define CM_REQ_PRIMARY_SUBNET_LOCAL CM_FIELD_BLOC(struct cm_req_msg, 94, 4, 1)
+#define CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT CM_FIELD8_LOC(struct cm_req_msg, 95, 5)
+#define CM_REQ_ALTERNATE_LOCAL_PORT_LID                                        \
+	CM_FIELD16_LOC(struct cm_req_msg, 96, 16)
+#define CM_REQ_ALTERNATE_REMOTE_PORT_LID                                       \
+	CM_FIELD16_LOC(struct cm_req_msg, 98, 16)
+#define CM_REQ_ALTERNATE_LOCAL_PORT_GID                                        \
+	CM_FIELD_MLOC(struct cm_req_msg, 100, 128, union ib_gid)
+#define CM_REQ_ALTERNATE_REMOTE_PORT_GID                                       \
+	CM_FIELD_MLOC(struct cm_req_msg, 116, 128, union ib_gid)
+#define CM_REQ_ALTERNATE_FLOW_LABEL CM_FIELD32_LOC(struct cm_req_msg, 132, 20)
+#define CM_REQ_ALTERNATE_PACKET_RATE CM_FIELD_BLOC(struct cm_req_msg, 135, 2, 6)
+#define CM_REQ_ALTERNATE_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_req_msg, 136, 8)
+#define CM_REQ_ALTERNATE_HOP_LIMIT CM_FIELD8_LOC(struct cm_req_msg, 137, 8)
+#define CM_REQ_ALTERNATE_SL CM_FIELD8_LOC(struct cm_req_msg, 138, 4)
+#define CM_REQ_ALTERNATE_SUBNET_LOCAL                                          \
+	CM_FIELD_BLOC(struct cm_req_msg, 138, 4, 1)
+#define CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT                                     \
+	CM_FIELD8_LOC(struct cm_req_msg, 139, 5)
+#define CM_REQ_SAP_SUPPORTED CM_FIELD_BLOC(struct cm_req_msg, 139, 5, 1)
+#define CM_REQ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_req_msg, 140, 736, void)
+CM_STRUCT(struct cm_req_msg, 140 * 8 + 736);
+
+/* Table 107 MRA Message Contents */
+#define CM_MRA_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_mra_msg, 0, 32)
+#define CM_MRA_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_mra_msg, 4, 32)
+#define CM_MRA_MESSAGE_MRAED CM_FIELD8_LOC(struct cm_mra_msg, 8, 2)
+#define CM_MRA_SERVICE_TIMEOUT CM_FIELD8_LOC(struct cm_mra_msg, 9, 5)
+#define CM_MRA_PRIVATE_DATA CM_FIELD_MLOC(struct cm_mra_msg, 10, 1776, void)
+CM_STRUCT(struct cm_mra_msg, 10 * 8 + 1776);
+
+/* Table 108 REJ Message Contents */
+#define CM_REJ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rej_msg, 0, 32)
+#define CM_REJ_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rej_msg, 4, 32)
+#define CM_REJ_MESSAGE_REJECTED CM_FIELD8_LOC(struct cm_rej_msg, 8, 2)
+#define CM_REJ_REJECTED_INFO_LENGTH CM_FIELD8_LOC(struct cm_rej_msg, 9, 7)
+#define CM_REJ_REASON CM_FIELD16_LOC(struct cm_rej_msg, 10, 16)
+#define CM_REJ_ARI CM_FIELD_MLOC(struct cm_rej_msg, 12, 576, void)
+#define CM_REJ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rej_msg, 84, 1184, void)
+CM_STRUCT(struct cm_rej_msg, 84 * 8 + 1184);
+
+/* Table 110 REP Message Contents */
+#define CM_REP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 0, 32)
+#define CM_REP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 4, 32)
+#define CM_REP_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_rep_msg, 8, 32)
+#define CM_REP_LOCAL_QPN CM_FIELD32_LOC(struct cm_rep_msg, 12, 24)
+#define CM_REP_LOCAL_EE_CONTEXT_NUMBER CM_FIELD32_LOC(struct cm_rep_msg, 16, 24)
+#define CM_REP_STARTING_PSN CM_FIELD32_LOC(struct cm_rep_msg, 20, 24)
+#define CM_REP_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_rep_msg, 24, 8)
+#define CM_REP_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_rep_msg, 25, 8)
+#define CM_REP_TARGET_ACK_DELAY CM_FIELD8_LOC(struct cm_rep_msg, 26, 5)
+#define CM_REP_FAILOVER_ACCEPTED CM_FIELD_BLOC(struct cm_rep_msg, 26, 5, 2)
+#define CM_REP_END_TO_END_FLOW_CONTROL                                         \
+	CM_FIELD_BLOC(struct cm_rep_msg, 26, 7, 1)
+#define CM_REP_RNR_RETRY_COUNT CM_FIELD8_LOC(struct cm_rep_msg, 27, 3)
+#define CM_REP_SRQ CM_FIELD_BLOC(struct cm_rep_msg, 27, 3, 1)
+#define CM_REP_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_rep_msg, 28)
+#define CM_REP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rep_msg, 36, 1568, void)
+CM_STRUCT(struct cm_rep_msg, 36 * 8 + 1568);
+
+/* Table 111 RTU Message Contents */
+#define CM_RTU_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rtu_msg, 0, 32)
+#define CM_RTU_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rtu_msg, 4, 32)
+#define CM_RTU_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rtu_msg, 8, 1792, void)
+CM_STRUCT(struct cm_rtu_msg, 8 * 8 + 1792);
+
+/* Table 112 DREQ Message Contents */
+#define CM_DREQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_dreq_msg, 0, 32)
+#define CM_DREQ_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_dreq_msg, 4, 32)
+#define CM_DREQ_REMOTE_QPN_EECN CM_FIELD32_LOC(struct cm_dreq_msg, 8, 24)
+#define CM_DREQ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_dreq_msg, 12, 1760, void)
+CM_STRUCT(struct cm_dreq_msg, 12 * 8 + 1760);
+
+/* Table 113 DREP Message Contents */
+#define CM_DREP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_drep_msg, 0, 32)
+#define CM_DREP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_drep_msg, 4, 32)
+#define CM_DREP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_drep_msg, 8, 1792, void)
+CM_STRUCT(struct cm_drep_msg, 8 * 8 + 1792);
+
+/* Table 115 LAP Message Contents */
+#define CM_LAP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_lap_msg, 0, 32)
+#define CM_LAP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_lap_msg, 4, 32)
+#define CM_LAP_REMOTE_QPN_EECN CM_FIELD32_LOC(struct cm_lap_msg, 12, 24)
+#define CM_LAP_REMOTE_CM_RESPONSE_TIMEOUT                                      \
+	CM_FIELD8_LOC(struct cm_lap_msg, 15, 5)
+#define CM_LAP_ALTERNATE_LOCAL_PORT_LID                                        \
+	CM_FIELD16_LOC(struct cm_lap_msg, 20, 16)
+#define CM_LAP_ALTERNATE_REMOTE_PORT_LID                                       \
+	CM_FIELD16_LOC(struct cm_lap_msg, 22, 16)
+#define CM_LAP_ALTERNATE_LOCAL_PORT_GID                                        \
+	CM_FIELD_MLOC(struct cm_lap_msg, 24, 128, union ib_gid)
+#define CM_LAP_ALTERNATE_REMOTE_PORT_GID                                       \
+	CM_FIELD_MLOC(struct cm_lap_msg, 40, 128, union ib_gid)
+#define CM_LAP_ALTERNATE_FLOW_LABEL CM_FIELD32_LOC(struct cm_lap_msg, 56, 20)
+#define CM_LAP_ALTERNATE_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_lap_msg, 59, 8)
+#define CM_LAP_ALTERNATE_HOP_LIMIT CM_FIELD8_LOC(struct cm_lap_msg, 60, 8)
+#define CM_LAP_ALTERNATE_PACKET_RATE CM_FIELD_BLOC(struct cm_lap_msg, 61, 2, 6)
+#define CM_LAP_ALTERNATE_SL CM_FIELD8_LOC(struct cm_lap_msg, 62, 4)
+#define CM_LAP_ALTERNATE_SUBNET_LOCAL CM_FIELD_BLOC(struct cm_lap_msg, 62, 4, 1)
+#define CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT                                     \
+	CM_FIELD8_LOC(struct cm_lap_msg, 63, 5)
+#define CM_LAP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_lap_msg, 64, 1344, void)
+CM_STRUCT(struct cm_lap_msg, 64 * 8 + 1344);
+
+/* Table 116 APR Message Contents */
+#define CM_APR_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_apr_msg, 0, 32)
+#define CM_APR_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_apr_msg, 4, 32)
+#define CM_APR_ADDITIONAL_INFORMATION_LENGTH                                   \
+	CM_FIELD8_LOC(struct cm_apr_msg, 8, 8)
+#define CM_APR_AR_STATUS CM_FIELD8_LOC(struct cm_apr_msg, 9, 8)
+#define CM_APR_ADDITIONAL_INFORMATION                                          \
+	CM_FIELD_MLOC(struct cm_apr_msg, 12, 576, void)
+#define CM_APR_PRIVATE_DATA CM_FIELD_MLOC(struct cm_apr_msg, 84, 1184, void)
+CM_STRUCT(struct cm_apr_msg, 84 * 8 + 1184);
+
+/* Table 119 SIDR_REQ Message Contents */
+#define CM_SIDR_REQ_REQUESTID CM_FIELD32_LOC(struct cm_sidr_req_msg, 0, 32)
+#define CM_SIDR_REQ_PARTITION_KEY CM_FIELD16_LOC(struct cm_sidr_req_msg, 4, 16)
+#define CM_SIDR_REQ_SERVICEID CM_FIELD64_LOC(struct cm_sidr_req_msg, 8)
+#define CM_SIDR_REQ_PRIVATE_DATA                                               \
+	CM_FIELD_MLOC(struct cm_sidr_req_msg, 16, 1728, void)
+CM_STRUCT(struct cm_sidr_req_msg, 16 * 8 + 1728);
+
+/* Table 120 SIDR_REP Message Contents */
+#define CM_SIDR_REP_REQUESTID CM_FIELD32_LOC(struct cm_sidr_rep_msg, 0, 32)
+#define CM_SIDR_REP_STATUS CM_FIELD8_LOC(struct cm_sidr_rep_msg, 4, 8)
+#define CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH                              \
+	CM_FIELD8_LOC(struct cm_sidr_rep_msg, 5, 8)
+#define CM_SIDR_REP_QPN CM_FIELD32_LOC(struct cm_sidr_rep_msg, 8, 24)
+#define CM_SIDR_REP_SERVICEID CM_FIELD64_LOC(struct cm_sidr_rep_msg, 12)
+#define CM_SIDR_REP_Q_KEY CM_FIELD32_LOC(struct cm_sidr_rep_msg, 20, 32)
+#define CM_SIDR_REP_ADDITIONAL_INFORMATION                                     \
+	CM_FIELD_MLOC(struct cm_sidr_rep_msg, 24, 576, void)
+#define CM_SIDR_REP_PRIVATE_DATA                                               \
+	CM_FIELD_MLOC(struct cm_sidr_rep_msg, 96, 1088, void)
+CM_STRUCT(struct cm_sidr_rep_msg, 96 * 8 + 1088);
+
+#endif /* _IBTA_VOL1_C12_H_ */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index b550ae8..0d5c70e 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -640,34 +640,14 @@ static inline int rvt_cmp_msn(u32 a, u32 b)
 	return (((int)a) - ((int)b)) << 8;
 }
 
-/**
- * rvt_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
 __be32 rvt_compute_aeth(struct rvt_qp *qp);
 
-/**
- * rvt_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
 void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
 
-/**
- * rvt_restart_sge - rewind the sge state for a wqe
- * @ss: the sge state pointer
- * @wqe: the wqe to rewind
- * @len: the data length from the start of the wqe in bytes
- *
- * Returns the remaining data length.
- */
 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
 
 /**
+ * rvt_div_round_up_mtu - round up divide
  * @qp - the qp pair
  * @len - the length
  *
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index 3447bfe..6ae6cf8 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -76,7 +76,7 @@
 #define DECLARE_UVERBS_NAMED_OBJECT(_object_id, _type_attrs, ...)              \
 	static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS(    \
 		_object_id)[] = { __VA_ARGS__ };                               \
-	const struct uverbs_object_def UVERBS_OBJECT(_object_id) = {           \
+	static const struct uverbs_object_def UVERBS_OBJECT(_object_id) = {    \
 		.id = _object_id,                                              \
 		.type_attrs = &_type_attrs,                                    \
 		.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)),  \
@@ -88,10 +88,10 @@
  * identify all uapi methods with a (object,method) tuple. However, they have
  * no type pointer.
  */
-#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...)	\
+#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...)                         \
 	static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS(    \
 		_object_id)[] = { __VA_ARGS__ };                               \
-	const struct uverbs_object_def UVERBS_OBJECT(_object_id) = {           \
+	static const struct uverbs_object_def UVERBS_OBJECT(_object_id) = {    \
 		.id = _object_id,                                              \
 		.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)),  \
 		.methods = &UVERBS_OBJECT_METHODS(_object_id)                  \
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 05eabfd..1b28ce1 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -104,16 +104,6 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
 	rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
 }
 
-static inline int __must_check
-uobj_alloc_commit(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
-{
-	int ret = rdma_alloc_commit_uobject(uobj, attrs);
-
-	if (ret)
-		return ret;
-	return 0;
-}
-
 static inline void uobj_alloc_abort(struct ib_uobject *uobj,
 				    struct uverbs_attr_bundle *attrs)
 {
@@ -124,8 +114,7 @@ static inline struct ib_uobject *
 __uobj_alloc(const struct uverbs_api_object *obj,
 	     struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev)
 {
-	struct ib_uobject *uobj =
-		rdma_alloc_begin_uobject(obj, attrs->ufile, attrs);
+	struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, attrs);
 
 	if (!IS_ERR(uobj))
 		*ib_dev = attrs->context->device;
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index d57a5ba..f1cbdae 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -83,9 +83,9 @@ enum rdma_lookup_mode {
  */
 struct uverbs_obj_type_class {
 	struct ib_uobject *(*alloc_begin)(const struct uverbs_api_object *obj,
-					  struct ib_uverbs_file *ufile);
+					  struct uverbs_attr_bundle *attrs);
 	/* This consumes the kref on uobj */
-	int (*alloc_commit)(struct ib_uobject *uobj);
+	void (*alloc_commit)(struct ib_uobject *uobj);
 	/* This does not consume the kref on uobj */
 	void (*alloc_abort)(struct ib_uobject *uobj);
 
@@ -98,7 +98,6 @@ struct uverbs_obj_type_class {
 				       enum rdma_remove_reason why,
 				       struct uverbs_attr_bundle *attrs);
 	void (*remove_handle)(struct ib_uobject *uobj);
-	u8    needs_kfree_rcu;
 };
 
 struct uverbs_obj_type {
@@ -138,23 +137,34 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
 void rdma_lookup_put_uobject(struct ib_uobject *uobj,
 			     enum rdma_lookup_mode mode);
 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
-					    struct ib_uverbs_file *ufile,
 					    struct uverbs_attr_bundle *attrs);
 void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
 			      struct uverbs_attr_bundle *attrs);
-int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
-					   struct uverbs_attr_bundle *attrs);
+void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+			       struct uverbs_attr_bundle *attrs);
+
+/*
+ * uverbs_uobject_get is called in order to increase the reference count on
+ * an uobject. This is useful when a handler wants to keep the uobject's memory
+ * alive, regardless if this uobject is still alive in the context's objects
+ * repository. Objects are put via uverbs_uobject_put.
+ */
+static inline void uverbs_uobject_get(struct ib_uobject *uobject)
+{
+	kref_get(&uobject->ref);
+}
+void uverbs_uobject_put(struct ib_uobject *uobject);
 
 struct uverbs_obj_fd_type {
 	/*
 	 * In fd based objects, uverbs_obj_type_ops points to generic
 	 * fd operations. In order to specialize the underlying types (e.g.
 	 * completion_channel), we use fops, name and flags for fd creation.
-	 * context_closed is called when the context is closed either when
-	 * the driver is removed or the process terminated.
+	 * destroy_object is called when the uobject is to be destroyed,
+	 * because the driver is removed or the FD is closed.
 	 */
 	struct uverbs_obj_type  type;
-	int (*context_closed)(struct ib_uobject *uobj,
+	int (*destroy_object)(struct ib_uobject *uobj,
 			      enum rdma_remove_reason why);
 	const struct file_operations	*fops;
 	const char			*name;
@@ -163,11 +173,11 @@ struct uverbs_obj_fd_type {
 
 extern const struct uverbs_obj_type_class uverbs_idr_class;
 extern const struct uverbs_obj_type_class uverbs_fd_class;
-void uverbs_close_fd(struct file *f);
+int uverbs_uobject_fd_release(struct inode *inode, struct file *filp);
 
 #define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) -	\
 				   sizeof(char))
-#define UVERBS_TYPE_ALLOC_FD(_obj_size, _context_closed, _fops, _name, _flags)\
+#define UVERBS_TYPE_ALLOC_FD(_obj_size, _destroy_object, _fops, _name, _flags) \
 	((&((const struct uverbs_obj_fd_type)				\
 	 {.type = {							\
 		.type_class = &uverbs_fd_class,				\
@@ -175,7 +185,7 @@ void uverbs_close_fd(struct file *f);
 			UVERBS_BUILD_BUG_ON((_obj_size) <               \
 					    sizeof(struct ib_uobject)), \
 	 },								\
-	 .context_closed = _context_closed,				\
+	 .destroy_object = _destroy_object,				\
 	 .fops = _fops,							\
 	 .name = _name,							\
 	 .flags = _flags}))->type)
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 3ed836d..f8312a3 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -172,6 +172,7 @@ struct scsi_device {
 				     * because we did a bus reset. */
 	unsigned use_10_for_rw:1; /* first try 10-byte read / write */
 	unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+	unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
 	unsigned no_report_opcodes:1;	/* no REPORT SUPPORTED OPERATION CODES */
 	unsigned no_write_same:1;	/* no WRITE SAME command */
 	unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index 5101e98..4fe69d8 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -44,6 +44,7 @@ typedef struct scsi_fctargaddress {
 int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
 		int cmd, bool ndelay);
 extern int scsi_ioctl(struct scsi_device *, int, void __user *);
+extern int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
 
 #endif /* __KERNEL__ */
 #endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index f91bcca..29c7ad0 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -68,6 +68,36 @@ typedef struct sg_io_hdr
     unsigned int info;          /* [o] auxiliary information */
 } sg_io_hdr_t;  /* 64 bytes long (on i386) */
 
+#if defined(__KERNEL__)
+#include <linux/compat.h>
+
+struct compat_sg_io_hdr {
+	compat_int_t interface_id;	/* [i] 'S' for SCSI generic (required) */
+	compat_int_t dxfer_direction;	/* [i] data transfer direction  */
+	unsigned char cmd_len;		/* [i] SCSI command length ( <= 16 bytes) */
+	unsigned char mx_sb_len;	/* [i] max length to write to sbp */
+	unsigned short iovec_count;	/* [i] 0 implies no scatter gather */
+	compat_uint_t dxfer_len;	/* [i] byte count of data transfer */
+	compat_uint_t dxferp;		/* [i], [*io] points to data transfer memory
+						or scatter gather list */
+	compat_uptr_t cmdp;		/* [i], [*i] points to command to perform */
+	compat_uptr_t sbp;		/* [i], [*o] points to sense_buffer memory */
+	compat_uint_t timeout;		/* [i] MAX_UINT->no timeout (unit: millisec) */
+	compat_uint_t flags;		/* [i] 0 -> default, see SG_FLAG... */
+	compat_int_t pack_id;		/* [i->o] unused internally (normally) */
+	compat_uptr_t usr_ptr;		/* [i->o] unused internally */
+	unsigned char status;		/* [o] scsi status */
+	unsigned char masked_status;	/* [o] shifted, masked scsi status */
+	unsigned char msg_status;	/* [o] messaging level data (optional) */
+	unsigned char sb_len_wr;	/* [o] byte count actually written to sbp */
+	unsigned short host_status;	/* [o] errors from host adapter */
+	unsigned short driver_status;	/* [o] errors from software driver */
+	compat_int_t resid;		/* [o] dxfer_len - actual_transferred */
+	compat_uint_t duration;		/* [o] time taken by cmd (unit: millisec) */
+	compat_uint_t info;		/* [o] auxiliary information */
+};
+#endif
+
 #define SG_INTERFACE_ID_ORIG 'S'
 
 /* Use negative values to flag difference from original sg_header structure */
diff --git a/include/soc/fsl/cpm.h b/include/soc/fsl/cpm.h
new file mode 100644
index 0000000..4c24ea8
--- /dev/null
+++ b/include/soc/fsl/cpm.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CPM_H
+#define __CPM_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <soc/fsl/qe/qe.h>
+
+/*
+ * SPI Parameter RAM common to QE and CPM.
+ */
+struct spi_pram {
+	__be16	rbase;	/* Rx Buffer descriptor base address */
+	__be16	tbase;	/* Tx Buffer descriptor base address */
+	u8	rfcr;	/* Rx function code */
+	u8	tfcr;	/* Tx function code */
+	__be16	mrblr;	/* Max receive buffer length */
+	__be32	rstate;	/* Internal */
+	__be32	rdp;	/* Internal */
+	__be16	rbptr;	/* Internal */
+	__be16	rbc;	/* Internal */
+	__be32	rxtmp;	/* Internal */
+	__be32	tstate;	/* Internal */
+	__be32	tdp;	/* Internal */
+	__be16	tbptr;	/* Internal */
+	__be16	tbc;	/* Internal */
+	__be32	txtmp;	/* Internal */
+	__be32	res;	/* Tx temp. */
+	__be16  rpbase;	/* Relocation pointer (CPM1 only) */
+	__be16	res1;	/* Reserved */
+};
+
+/*
+ * USB Controller pram common to QE and CPM.
+ */
+struct usb_ctlr {
+	u8	usb_usmod;
+	u8	usb_usadr;
+	u8	usb_uscom;
+	u8	res1[1];
+	__be16	usb_usep[4];
+	u8	res2[4];
+	__be16	usb_usber;
+	u8	res3[2];
+	__be16	usb_usbmr;
+	u8	res4[1];
+	u8	usb_usbs;
+	/* Fields down below are QE-only */
+	__be16	usb_ussft;
+	u8	res5[2];
+	__be16	usb_usfrn;
+	u8	res6[0x22];
+} __attribute__ ((packed));
+
+/*
+ * Function code bits, usually generic to devices.
+ */
+#ifdef CONFIG_CPM1
+#define CPMFCR_GBL	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
+#define CPMFCR_TC2	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
+#define CPMFCR_DTB	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
+#define CPMFCR_BDB	((u_char)0x00)	/* Flag doesn't exist in CPM1 */
+#else
+#define CPMFCR_GBL	((u_char)0x20)	/* Set memory snooping */
+#define CPMFCR_TC2	((u_char)0x04)	/* Transfer code 2 value */
+#define CPMFCR_DTB	((u_char)0x02)	/* Use local bus for data when set */
+#define CPMFCR_BDB	((u_char)0x01)	/* Use local bus for BD when set */
+#endif
+#define CPMFCR_EB	((u_char)0x10)	/* Set big endian byte order */
+
+/* Opcodes common to CPM1 and CPM2
+*/
+#define CPM_CR_INIT_TRX		((ushort)0x0000)
+#define CPM_CR_INIT_RX		((ushort)0x0001)
+#define CPM_CR_INIT_TX		((ushort)0x0002)
+#define CPM_CR_HUNT_MODE	((ushort)0x0003)
+#define CPM_CR_STOP_TX		((ushort)0x0004)
+#define CPM_CR_GRA_STOP_TX	((ushort)0x0005)
+#define CPM_CR_RESTART_TX	((ushort)0x0006)
+#define CPM_CR_CLOSE_RX_BD	((ushort)0x0007)
+#define CPM_CR_SET_GADDR	((ushort)0x0008)
+#define CPM_CR_SET_TIMER	((ushort)0x0008)
+#define CPM_CR_STOP_IDMA	((ushort)0x000b)
+
+/* Buffer descriptors used by many of the CPM protocols. */
+typedef struct cpm_buf_desc {
+	ushort	cbd_sc;		/* Status and Control */
+	ushort	cbd_datlen;	/* Data length in buffer */
+	uint	cbd_bufaddr;	/* Buffer address in host memory */
+} cbd_t;
+
+/* Buffer descriptor control/status used by serial
+ */
+
+#define BD_SC_EMPTY	(0x8000)	/* Receive is empty */
+#define BD_SC_READY	(0x8000)	/* Transmit is ready */
+#define BD_SC_WRAP	(0x2000)	/* Last buffer descriptor */
+#define BD_SC_INTRPT	(0x1000)	/* Interrupt on change */
+#define BD_SC_LAST	(0x0800)	/* Last buffer in frame */
+#define BD_SC_TC	(0x0400)	/* Transmit CRC */
+#define BD_SC_CM	(0x0200)	/* Continuous mode */
+#define BD_SC_ID	(0x0100)	/* Rec'd too many idles */
+#define BD_SC_P		(0x0100)	/* xmt preamble */
+#define BD_SC_BR	(0x0020)	/* Break received */
+#define BD_SC_FR	(0x0010)	/* Framing error */
+#define BD_SC_PR	(0x0008)	/* Parity error */
+#define BD_SC_NAK	(0x0004)	/* NAK - did not respond */
+#define BD_SC_OV	(0x0002)	/* Overrun */
+#define BD_SC_UN	(0x0002)	/* Underrun */
+#define BD_SC_CD	(0x0001)	/* */
+#define BD_SC_CL	(0x0001)	/* Collision */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+ * Common to SCC and FCC.
+ */
+#define BD_ENET_RX_EMPTY	(0x8000)
+#define BD_ENET_RX_WRAP		(0x2000)
+#define BD_ENET_RX_INTR		(0x1000)
+#define BD_ENET_RX_LAST		(0x0800)
+#define BD_ENET_RX_FIRST	(0x0400)
+#define BD_ENET_RX_MISS		(0x0100)
+#define BD_ENET_RX_BC		(0x0080)	/* FCC Only */
+#define BD_ENET_RX_MC		(0x0040)	/* FCC Only */
+#define BD_ENET_RX_LG		(0x0020)
+#define BD_ENET_RX_NO		(0x0010)
+#define BD_ENET_RX_SH		(0x0008)
+#define BD_ENET_RX_CR		(0x0004)
+#define BD_ENET_RX_OV		(0x0002)
+#define BD_ENET_RX_CL		(0x0001)
+#define BD_ENET_RX_STATS	(0x01ff)	/* All status bits */
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+ * Common to SCC and FCC.
+ */
+#define BD_ENET_TX_READY	(0x8000)
+#define BD_ENET_TX_PAD		(0x4000)
+#define BD_ENET_TX_WRAP		(0x2000)
+#define BD_ENET_TX_INTR		(0x1000)
+#define BD_ENET_TX_LAST		(0x0800)
+#define BD_ENET_TX_TC		(0x0400)
+#define BD_ENET_TX_DEF		(0x0200)
+#define BD_ENET_TX_HB		(0x0100)
+#define BD_ENET_TX_LC		(0x0080)
+#define BD_ENET_TX_RL		(0x0040)
+#define BD_ENET_TX_RCMASK	(0x003c)
+#define BD_ENET_TX_UN		(0x0002)
+#define BD_ENET_TX_CSL		(0x0001)
+#define BD_ENET_TX_STATS	(0x03ff)	/* All status bits */
+
+/* Buffer descriptor control/status used by Transparent mode SCC.
+ */
+#define BD_SCC_TX_LAST		(0x0800)
+
+/* Buffer descriptor control/status used by I2C.
+ */
+#define BD_I2C_START		(0x0400)
+
+#ifdef CONFIG_CPM
+int cpm_command(u32 command, u8 opcode);
+#else
+static inline int cpm_command(u32 command, u8 opcode)
+{
+	return -ENOSYS;
+}
+#endif /* CONFIG_CPM */
+
+int cpm2_gpiochip_add32(struct device *dev);
+
+#endif
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index c1036d1..e282ac0 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -17,7 +17,7 @@
 #include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/err.h>
-#include <asm/cpm.h>
+#include <soc/fsl/cpm.h>
 #include <soc/fsl/qe/immap_qe.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -98,26 +98,25 @@ static inline void qe_reset(void) {}
 int cpm_muram_init(void);
 
 #if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
-int cpm_muram_free(unsigned long offset);
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+s32 cpm_muram_alloc(unsigned long size, unsigned long align);
+void cpm_muram_free(s32 offset);
+s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
 void __iomem *cpm_muram_addr(unsigned long offset);
 unsigned long cpm_muram_offset(void __iomem *addr);
 dma_addr_t cpm_muram_dma(void __iomem *addr);
 #else
-static inline unsigned long cpm_muram_alloc(unsigned long size,
-					    unsigned long align)
+static inline s32 cpm_muram_alloc(unsigned long size,
+				  unsigned long align)
 {
 	return -ENOSYS;
 }
 
-static inline int cpm_muram_free(unsigned long offset)
+static inline void cpm_muram_free(s32 offset)
 {
-	return -ENOSYS;
 }
 
-static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
-						  unsigned long size)
+static inline s32 cpm_muram_alloc_fixed(unsigned long offset,
+					unsigned long size)
 {
 	return -ENOSYS;
 }
@@ -241,21 +240,37 @@ static inline int qe_alive_during_sleep(void)
 #define qe_muram_offset cpm_muram_offset
 #define qe_muram_dma cpm_muram_dma
 
-#define qe_setbits32(_addr, _v) iowrite32be(ioread32be(_addr) |  (_v), (_addr))
-#define qe_clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
+#ifdef CONFIG_PPC32
+#define qe_iowrite8(val, addr)     out_8(addr, val)
+#define qe_iowrite16be(val, addr)  out_be16(addr, val)
+#define qe_iowrite32be(val, addr)  out_be32(addr, val)
+#define qe_ioread8(addr)           in_8(addr)
+#define qe_ioread16be(addr)        in_be16(addr)
+#define qe_ioread32be(addr)        in_be32(addr)
+#else
+#define qe_iowrite8(val, addr)     iowrite8(val, addr)
+#define qe_iowrite16be(val, addr)  iowrite16be(val, addr)
+#define qe_iowrite32be(val, addr)  iowrite32be(val, addr)
+#define qe_ioread8(addr)           ioread8(addr)
+#define qe_ioread16be(addr)        ioread16be(addr)
+#define qe_ioread32be(addr)        ioread32be(addr)
+#endif
 
-#define qe_setbits16(_addr, _v) iowrite16be(ioread16be(_addr) |  (_v), (_addr))
-#define qe_clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
+#define qe_setbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) |  (_v), (_addr))
+#define qe_clrbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) & ~(_v), (_addr))
 
-#define qe_setbits8(_addr, _v) iowrite8(ioread8(_addr) |  (_v), (_addr))
-#define qe_clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
+#define qe_setbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) |  (_v), (_addr))
+#define qe_clrbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) & ~(_v), (_addr))
 
-#define qe_clrsetbits32(addr, clear, set) \
-	iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
-#define qe_clrsetbits16(addr, clear, set) \
-	iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
-#define qe_clrsetbits8(addr, clear, set) \
-	iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
+#define qe_setbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) |  (_v), (_addr))
+#define qe_clrbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) & ~(_v), (_addr))
+
+#define qe_clrsetbits_be32(addr, clear, set) \
+	qe_iowrite32be((qe_ioread32be(addr) & ~(clear)) | (set), (addr))
+#define qe_clrsetbits_be16(addr, clear, set) \
+	qe_iowrite16be((qe_ioread16be(addr) & ~(clear)) | (set), (addr))
+#define qe_clrsetbits_8(addr, clear, set) \
+	qe_iowrite8((qe_ioread8(addr) & ~(clear)) | (set), (addr))
 
 /* Structure that defines QE firmware binary files.
  *
diff --git a/include/soc/fsl/qe/qe_ic.h b/include/soc/fsl/qe/qe_ic.h
deleted file mode 100644
index 714a9b8..0000000
--- a/include/soc/fsl/qe/qe_ic.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors: 	Shlomi Gridish <gridish@freescale.com>
- * 		Li Yang <leoli@freescale.com>
- *
- * Description:
- * QE IC external definitions and structure.
- */
-#ifndef _ASM_POWERPC_QE_IC_H
-#define _ASM_POWERPC_QE_IC_H
-
-#include <linux/irq.h>
-
-struct device_node;
-struct qe_ic;
-
-#define NUM_OF_QE_IC_GROUPS	6
-
-/* Flags when we init the QE IC */
-#define QE_IC_SPREADMODE_GRP_W			0x00000001
-#define QE_IC_SPREADMODE_GRP_X			0x00000002
-#define QE_IC_SPREADMODE_GRP_Y			0x00000004
-#define QE_IC_SPREADMODE_GRP_Z			0x00000008
-#define QE_IC_SPREADMODE_GRP_RISCA		0x00000010
-#define QE_IC_SPREADMODE_GRP_RISCB		0x00000020
-
-#define QE_IC_LOW_SIGNAL			0x00000100
-#define QE_IC_HIGH_SIGNAL			0x00000200
-
-#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH	0x00001000
-#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH	0x00002000
-#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH	0x00004000
-#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH	0x00008000
-#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH	0x00010000
-#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH	0x00020000
-#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH	0x00040000
-#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH	0x00080000
-#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH	0x00100000
-#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH	0x00200000
-#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH	0x00400000
-#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH	0x00800000
-#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT		(12)
-
-/* QE interrupt sources groups */
-enum qe_ic_grp_id {
-	QE_IC_GRP_W = 0,	/* QE interrupt controller group W */
-	QE_IC_GRP_X,		/* QE interrupt controller group X */
-	QE_IC_GRP_Y,		/* QE interrupt controller group Y */
-	QE_IC_GRP_Z,		/* QE interrupt controller group Z */
-	QE_IC_GRP_RISCA,	/* QE interrupt controller RISC group A */
-	QE_IC_GRP_RISCB		/* QE interrupt controller RISC group B */
-};
-
-#ifdef CONFIG_QUICC_ENGINE
-void qe_ic_init(struct device_node *node, unsigned int flags,
-		void (*low_handler)(struct irq_desc *desc),
-		void (*high_handler)(struct irq_desc *desc));
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
-#else
-static inline void qe_ic_init(struct device_node *node, unsigned int flags,
-		void (*low_handler)(struct irq_desc *desc),
-		void (*high_handler)(struct irq_desc *desc))
-{}
-static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
-{ return 0; }
-static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
-{ return 0; }
-#endif /* CONFIG_QUICC_ENGINE */
-
-void qe_ic_set_highest_priority(unsigned int virq, int high);
-int qe_ic_set_priority(unsigned int virq, unsigned int priority);
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
-
-static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
-{
-	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-	unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-
-	if (cascade_irq != NO_IRQ)
-		generic_handle_irq(cascade_irq);
-}
-
-static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
-{
-	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-	unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-
-	if (cascade_irq != NO_IRQ)
-		generic_handle_irq(cascade_irq);
-}
-
-static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
-{
-	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-	unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-
-	if (cascade_irq != NO_IRQ)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
-
-static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
-{
-	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-	unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-
-	if (cascade_irq != NO_IRQ)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
-
-static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
-{
-	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-	unsigned int cascade_irq;
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-
-	cascade_irq = qe_ic_get_high_irq(qe_ic);
-	if (cascade_irq == NO_IRQ)
-		cascade_irq = qe_ic_get_low_irq(qe_ic);
-
-	if (cascade_irq != NO_IRQ)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
-
-#endif /* _ASM_POWERPC_QE_IC_H */
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index e9cc460..ba0e838 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -188,9 +188,9 @@ struct ucc_fast_private {
 	int stopped_tx;		/* Whether channel has been stopped for Tx
 				   (STOP_TX, etc.) */
 	int stopped_rx;		/* Whether channel has been stopped for Rx */
-	u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
+	s32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
 						    virtual fifo */
-	u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
+	s32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
 						    virtual fifo */
 #ifdef STATISTICS
 	u32 tx_frames;		/* Transmitted frames counter. */
diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h
index 8696fde..d187a6b 100644
--- a/include/soc/fsl/qe/ucc_slow.h
+++ b/include/soc/fsl/qe/ucc_slow.h
@@ -185,7 +185,7 @@ struct ucc_slow_private {
 	struct ucc_slow_info *us_info;
 	struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
 	struct ucc_slow_pram *us_pram;	/* a pointer to the parameter RAM */
-	u32 us_pram_offset;
+	s32 us_pram_offset;
 	int enabled_tx;		/* Whether channel is enabled for Tx (ENT) */
 	int enabled_rx;		/* Whether channel is enabled for Rx (ENR) */
 	int stopped_tx;		/* Whether channel has been stopped for Tx
@@ -194,8 +194,8 @@ struct ucc_slow_private {
 	struct list_head confQ;	/* frames passed to chip waiting for tx */
 	u32 first_tx_bd_mask;	/* mask is used in Tx routine to save status
 				   and length for first BD in a frame */
-	u32 tx_base_offset;	/* first BD in Tx BD table offset (In MURAM) */
-	u32 rx_base_offset;	/* first BD in Rx BD table offset (In MURAM) */
+	s32 tx_base_offset;	/* first BD in Tx BD table offset (In MURAM) */
+	s32 rx_base_offset;	/* first BD in Rx BD table offset (In MURAM) */
 	struct qe_bd *confBd;	/* next BD for confirm after Tx */
 	struct qe_bd *tx_bd;	/* next BD for new Tx request */
 	struct qe_bd *rx_bd;	/* next BD to collect after Rx */
diff --git a/include/sound/aess.h b/include/sound/aess.h
deleted file mode 100644
index cee0d09..0000000
--- a/include/sound/aess.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * AESS IP block reset
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-#ifndef __SOUND_AESS_H__
-#define __SOUND_AESS_H__
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-
-/*
- * AESS_AUTO_GATING_ENABLE_OFFSET: offset in bytes of the AESS IP
- *     block's AESS_AUTO_GATING_ENABLE__1 register from the IP block's
- *     base address
- */
-#define AESS_AUTO_GATING_ENABLE_OFFSET			0x07c
-
-/* Register bitfields in the AESS_AUTO_GATING_ENABLE__1 register */
-#define AESS_AUTO_GATING_ENABLE_SHIFT			0
-
-/**
- * aess_enable_autogating - enable AESS internal autogating
- * @oh: struct omap_hwmod *
- *
- * Enable internal autogating on the AESS.  This allows the AESS to
- * indicate that it is idle to the OMAP PRCM.  Returns 0.
- */
-static inline void aess_enable_autogating(void __iomem *base)
-{
-	u32 v;
-
-	/* Set AESS_AUTO_GATING_ENABLE__1.ENABLE to allow idle entry */
-	v = 1 << AESS_AUTO_GATING_ENABLE_SHIFT;
-	writel(v, base + AESS_AUTO_GATING_ENABLE_OFFSET);
-}
-
-#endif /* __SOUND_AESS_H__ */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 5a31525e..f657ff0 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1450,7 +1450,7 @@ struct snd_pcm_status64 {
 #define SNDRV_PCM_IOCTL_STATUS_EXT64	_IOWR('A', 0x24, struct snd_pcm_status64)
 
 struct snd_pcm_status32 {
-	s32 state;		/* stream state */
+	snd_pcm_state_t state;		/* stream state */
 	s32 trigger_tstamp_sec;	/* time when stream was started/stopped/paused */
 	s32 trigger_tstamp_nsec;
 	s32 tstamp_sec;		/* reference timestamp */
@@ -1461,7 +1461,7 @@ struct snd_pcm_status32 {
 	u32 avail;		/* number of frames available */
 	u32 avail_max;		/* max frames available on hw since last status */
 	u32 overrange;		/* count of ADC (capture) overrange detections from last status */
-	s32 suspended_state;	/* suspended stream state */
+	snd_pcm_state_t suspended_state;	/* suspended stream state */
 	u32 audio_tstamp_data;	/* needed for 64-bit alignment, used for configs/report to/from userspace */
 	s32 audio_tstamp_sec;	/* sample counter, wall clock, PHC or on-demand sync'ed */
 	s32 audio_tstamp_nsec;
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 182c9fe..19c8766 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -48,6 +48,16 @@ struct partial_cluster;
 	{ EXT4_GET_BLOCKS_KEEP_SIZE,		"KEEP_SIZE" },		\
 	{ EXT4_GET_BLOCKS_ZERO,			"ZERO" })
 
+/*
+ * __print_flags() requires that all enum values be wrapped in the
+ * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
+ * ring buffer.
+ */
+TRACE_DEFINE_ENUM(BH_New);
+TRACE_DEFINE_ENUM(BH_Mapped);
+TRACE_DEFINE_ENUM(BH_Unwritten);
+TRACE_DEFINE_ENUM(BH_Boundary);
+
 #define show_mflags(flags) __print_flags(flags, "",	\
 	{ EXT4_MAP_NEW,		"N" },			\
 	{ EXT4_MAP_MAPPED,	"M" },			\
@@ -62,11 +72,18 @@ struct partial_cluster;
 	{ EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\
 	{ EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER,	"LAST_CLUSTER" })
 
+TRACE_DEFINE_ENUM(ES_WRITTEN_B);
+TRACE_DEFINE_ENUM(ES_UNWRITTEN_B);
+TRACE_DEFINE_ENUM(ES_DELAYED_B);
+TRACE_DEFINE_ENUM(ES_HOLE_B);
+TRACE_DEFINE_ENUM(ES_REFERENCED_B);
+
 #define show_extent_status(status) __print_flags(status, "",	\
 	{ EXTENT_STATUS_WRITTEN,	"W" },			\
 	{ EXTENT_STATUS_UNWRITTEN,	"U" },			\
 	{ EXTENT_STATUS_DELAYED,	"D" },			\
-	{ EXTENT_STATUS_HOLE,		"H" })
+	{ EXTENT_STATUS_HOLE,		"H" },			\
+	{ EXTENT_STATUS_REFERENCED,	"R" })
 
 #define show_falloc_mode(mode) __print_flags(mode, "|",		\
 	{ FALLOC_FL_KEEP_SIZE,		"KEEP_SIZE"},		\
@@ -2265,7 +2282,7 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
 		__entry->ino	= inode->i_ino;
 		__entry->lblk	= es->es_lblk;
 		__entry->len	= es->es_len;
-		__entry->pblk	= ext4_es_pblock(es);
+		__entry->pblk	= ext4_es_show_pblock(es);
 		__entry->status	= ext4_es_status(es);
 	),
 
@@ -2354,7 +2371,7 @@ TRACE_EVENT(ext4_es_find_extent_range_exit,
 		__entry->ino	= inode->i_ino;
 		__entry->lblk	= es->es_lblk;
 		__entry->len	= es->es_len;
-		__entry->pblk	= ext4_es_pblock(es);
+		__entry->pblk	= ext4_es_show_pblock(es);
 		__entry->status	= ext4_es_status(es);
 	),
 
@@ -2408,7 +2425,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
 		__entry->ino	= inode->i_ino;
 		__entry->lblk	= es->es_lblk;
 		__entry->len	= es->es_len;
-		__entry->pblk	= ext4_es_pblock(es);
+		__entry->pblk	= ext4_es_show_pblock(es);
 		__entry->status	= ext4_es_status(es);
 		__entry->found	= found;
 	),
@@ -2576,7 +2593,7 @@ TRACE_EVENT(ext4_es_insert_delayed_block,
 		__entry->ino		= inode->i_ino;
 		__entry->lblk		= es->es_lblk;
 		__entry->len		= es->es_len;
-		__entry->pblk		= ext4_es_pblock(es);
+		__entry->pblk		= ext4_es_show_pblock(es);
 		__entry->status		= ext4_es_status(es);
 		__entry->allocated	= allocated;
 	),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 1796ff9..67a9783 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -49,6 +49,7 @@ TRACE_DEFINE_ENUM(CP_SYNC);
 TRACE_DEFINE_ENUM(CP_RECOVERY);
 TRACE_DEFINE_ENUM(CP_DISCARD);
 TRACE_DEFINE_ENUM(CP_TRIMMED);
+TRACE_DEFINE_ENUM(CP_PAUSE);
 
 #define show_block_type(type)						\
 	__print_symbolic(type,						\
@@ -124,13 +125,14 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
 		{ CP_SYNC,	"Sync" },				\
 		{ CP_RECOVERY,	"Recovery" },				\
 		{ CP_DISCARD,	"Discard" },				\
-		{ CP_UMOUNT,	"Umount" },				\
+		{ CP_PAUSE,	"Pause" },				\
 		{ CP_TRIMMED,	"Trimmed" })
 
 #define show_fsync_cpreason(type)					\
 	__print_symbolic(type,						\
 		{ CP_NO_NEEDED,		"no needed" },			\
 		{ CP_NON_REGULAR,	"non regular" },		\
+		{ CP_COMPRESSED,	"compreesed" },			\
 		{ CP_HARDLINK,		"hardlink" },			\
 		{ CP_SB_NEED_CP,	"sb needs cp" },		\
 		{ CP_WRONG_PINO,	"wrong pino" },			\
@@ -148,6 +150,11 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
 		{ F2FS_GOING_DOWN_METAFLUSH,	"meta flush" },		\
 		{ F2FS_GOING_DOWN_NEED_FSCK,	"need fsck" })
 
+#define show_compress_algorithm(type)					\
+	__print_symbolic(type,						\
+		{ COMPRESS_LZO,		"LZO" },			\
+		{ COMPRESS_LZ4,		"LZ4" })
+
 struct f2fs_sb_info;
 struct f2fs_io_info;
 struct extent_info;
@@ -1710,6 +1717,100 @@ TRACE_EVENT(f2fs_shutdown,
 		__entry->ret)
 );
 
+DECLARE_EVENT_CLASS(f2fs_zip_start,
+
+	TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+			unsigned int cluster_size, unsigned char algtype),
+
+	TP_ARGS(inode, cluster_idx, cluster_size, algtype),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(pgoff_t, idx)
+		__field(unsigned int, size)
+		__field(unsigned int, algtype)
+	),
+
+	TP_fast_assign(
+		__entry->dev = inode->i_sb->s_dev;
+		__entry->ino = inode->i_ino;
+		__entry->idx = cluster_idx;
+		__entry->size = cluster_size;
+		__entry->algtype = algtype;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, "
+		"cluster_size = %u, algorithm = %s",
+		show_dev_ino(__entry),
+		__entry->idx,
+		__entry->size,
+		show_compress_algorithm(__entry->algtype))
+);
+
+DECLARE_EVENT_CLASS(f2fs_zip_end,
+
+	TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+			unsigned int compressed_size, int ret),
+
+	TP_ARGS(inode, cluster_idx, compressed_size, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(pgoff_t, idx)
+		__field(unsigned int, size)
+		__field(unsigned int, ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev = inode->i_sb->s_dev;
+		__entry->ino = inode->i_ino;
+		__entry->idx = cluster_idx;
+		__entry->size = compressed_size;
+		__entry->ret = ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, "
+		"compressed_size = %u, ret = %d",
+		show_dev_ino(__entry),
+		__entry->idx,
+		__entry->size,
+		__entry->ret)
+);
+
+DEFINE_EVENT(f2fs_zip_start, f2fs_compress_pages_start,
+
+	TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+		unsigned int cluster_size, unsigned char algtype),
+
+	TP_ARGS(inode, cluster_idx, cluster_size, algtype)
+);
+
+DEFINE_EVENT(f2fs_zip_start, f2fs_decompress_pages_start,
+
+	TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+		unsigned int cluster_size, unsigned char algtype),
+
+	TP_ARGS(inode, cluster_idx, cluster_size, algtype)
+);
+
+DEFINE_EVENT(f2fs_zip_end, f2fs_compress_pages_end,
+
+	TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+			unsigned int compressed_size, int ret),
+
+	TP_ARGS(inode, cluster_idx, compressed_size, ret)
+);
+
+DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end,
+
+	TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+			unsigned int compressed_size, int ret),
+
+	TP_ARGS(inode, cluster_idx, compressed_size, ret)
+);
+
 #endif /* _TRACE_F2FS_H */
 
  /* This part must be outside protection */
diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h
index 54e61d4..112bd06 100644
--- a/include/trace/events/intel_iommu.h
+++ b/include/trace/events/intel_iommu.h
@@ -49,12 +49,6 @@ DEFINE_EVENT(dma_map, map_single,
 	TP_ARGS(dev, dev_addr, phys_addr, size)
 );
 
-DEFINE_EVENT(dma_map, map_sg,
-	TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
-		 size_t size),
-	TP_ARGS(dev, dev_addr, phys_addr, size)
-);
-
 DEFINE_EVENT(dma_map, bounce_map_single,
 	TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
 		 size_t size),
@@ -99,6 +93,48 @@ DEFINE_EVENT(dma_unmap, bounce_unmap_single,
 	TP_ARGS(dev, dev_addr, size)
 );
 
+DECLARE_EVENT_CLASS(dma_map_sg,
+	TP_PROTO(struct device *dev, int index, int total,
+		 struct scatterlist *sg),
+
+	TP_ARGS(dev, index, total, sg),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name(dev))
+		__field(dma_addr_t, dev_addr)
+		__field(phys_addr_t, phys_addr)
+		__field(size_t,	size)
+		__field(int, index)
+		__field(int, total)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name(dev));
+		__entry->dev_addr = sg->dma_address;
+		__entry->phys_addr = sg_phys(sg);
+		__entry->size = sg->dma_length;
+		__entry->index = index;
+		__entry->total = total;
+	),
+
+	TP_printk("dev=%s [%d/%d] dev_addr=0x%llx phys_addr=0x%llx size=%zu",
+		  __get_str(dev_name), __entry->index, __entry->total,
+		  (unsigned long long)__entry->dev_addr,
+		  (unsigned long long)__entry->phys_addr,
+		  __entry->size)
+);
+
+DEFINE_EVENT(dma_map_sg, map_sg,
+	TP_PROTO(struct device *dev, int index, int total,
+		 struct scatterlist *sg),
+	TP_ARGS(dev, index, total, sg)
+);
+
+DEFINE_EVENT(dma_map_sg, bounce_map_sg,
+	TP_PROTO(struct device *dev, int index, int total,
+		 struct scatterlist *sg),
+	TP_ARGS(dev, index, total, sg)
+);
 #endif /* _TRACE_INTEL_IOMMU_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index b352d66..27bd9e4 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -320,6 +320,7 @@ TRACE_EVENT(io_uring_complete,
  * io_uring_submit_sqe - called before submitting one SQE
  *
  * @ctx:		pointer to a ring context structure
+ * @opcode:		opcode of request
  * @user_data:		user data associated with the request
  * @force_nonblock:	whether a context blocking or not
  * @sq_thread:		true if sq_thread has submitted this SQE
@@ -329,12 +330,14 @@ TRACE_EVENT(io_uring_complete,
  */
 TRACE_EVENT(io_uring_submit_sqe,
 
-	TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread),
+	TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
+		 bool sq_thread),
 
-	TP_ARGS(ctx, user_data, force_nonblock, sq_thread),
+	TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
 
 	TP_STRUCT__entry (
 		__field(  void *,	ctx		)
+		__field(  u8,		opcode		)
 		__field(  u64,		user_data	)
 		__field(  bool,		force_nonblock	)
 		__field(  bool,		sq_thread	)
@@ -342,13 +345,15 @@ TRACE_EVENT(io_uring_submit_sqe,
 
 	TP_fast_assign(
 		__entry->ctx		= ctx;
+		__entry->opcode		= opcode;
 		__entry->user_data	= user_data;
 		__entry->force_nonblock	= force_nonblock;
 		__entry->sq_thread	= sq_thread;
 	),
 
-	TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d",
-			  __entry->ctx, (unsigned long long) __entry->user_data,
+	TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
+			  __entry->ctx, __entry->opcode,
+			  (unsigned long long) __entry->user_data,
 			  __entry->force_nonblock, __entry->sq_thread)
 );
 
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index ad7e642b..f65b1f6 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -88,8 +88,8 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
 		__entry->node		= node;
 	),
 
-	TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
-		__entry->call_site,
+	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+		(void *)__entry->call_site,
 		__entry->ptr,
 		__entry->bytes_req,
 		__entry->bytes_alloc,
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
new file mode 100644
index 0000000..cf243de
--- /dev/null
+++ b/include/trace/events/pwm.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pwm
+
+#if !defined(_TRACE_PWM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PWM_H
+
+#include <linux/pwm.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(pwm,
+
+	TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+	TP_ARGS(pwm, state),
+
+	TP_STRUCT__entry(
+		__field(struct pwm_device *, pwm)
+		__field(u64, period)
+		__field(u64, duty_cycle)
+		__field(enum pwm_polarity, polarity)
+		__field(bool, enabled)
+	),
+
+	TP_fast_assign(
+		__entry->pwm = pwm;
+		__entry->period = state->period;
+		__entry->duty_cycle = state->duty_cycle;
+		__entry->polarity = state->polarity;
+		__entry->enabled = state->enabled;
+	),
+
+	TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d",
+		  __entry->pwm, __entry->period, __entry->duty_cycle,
+		  __entry->polarity, __entry->enabled)
+
+);
+
+DEFINE_EVENT(pwm, pwm_apply,
+
+	TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+	TP_ARGS(pwm, state)
+
+);
+
+DEFINE_EVENT(pwm, pwm_get,
+
+	TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+	TP_ARGS(pwm, state)
+
+);
+
+#endif /* _TRACE_PWM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rdma_core.h b/include/trace/events/rdma_core.h
new file mode 100644
index 0000000..17642aa
--- /dev/null
+++ b/include/trace/events/rdma_core.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for core RDMA functions.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdma_core
+
+#if !defined(_TRACE_RDMA_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RDMA_CORE_H
+
+#include <linux/tracepoint.h>
+#include <rdma/ib_verbs.h>
+
+/*
+ * enum ib_poll_context, from include/rdma/ib_verbs.h
+ */
+#define IB_POLL_CTX_LIST			\
+	ib_poll_ctx(DIRECT)			\
+	ib_poll_ctx(SOFTIRQ)			\
+	ib_poll_ctx(WORKQUEUE)			\
+	ib_poll_ctx_end(UNBOUND_WORKQUEUE)
+
+#undef ib_poll_ctx
+#undef ib_poll_ctx_end
+
+#define ib_poll_ctx(x)		TRACE_DEFINE_ENUM(IB_POLL_##x);
+#define ib_poll_ctx_end(x)	TRACE_DEFINE_ENUM(IB_POLL_##x);
+
+IB_POLL_CTX_LIST
+
+#undef ib_poll_ctx
+#undef ib_poll_ctx_end
+
+#define ib_poll_ctx(x)		{ IB_POLL_##x, #x },
+#define ib_poll_ctx_end(x)	{ IB_POLL_##x, #x }
+
+#define rdma_show_ib_poll_ctx(x) \
+		__print_symbolic(x, IB_POLL_CTX_LIST)
+
+/**
+ ** Completion Queue events
+ **/
+
+TRACE_EVENT(cq_schedule,
+	TP_PROTO(
+		struct ib_cq *cq
+	),
+
+	TP_ARGS(cq),
+
+	TP_STRUCT__entry(
+		__field(u32, cq_id)
+	),
+
+	TP_fast_assign(
+		cq->timestamp = ktime_get();
+		cq->interrupt = true;
+
+		__entry->cq_id = cq->res.id;
+	),
+
+	TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+TRACE_EVENT(cq_reschedule,
+	TP_PROTO(
+		struct ib_cq *cq
+	),
+
+	TP_ARGS(cq),
+
+	TP_STRUCT__entry(
+		__field(u32, cq_id)
+	),
+
+	TP_fast_assign(
+		cq->timestamp = ktime_get();
+		cq->interrupt = false;
+
+		__entry->cq_id = cq->res.id;
+	),
+
+	TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+TRACE_EVENT(cq_process,
+	TP_PROTO(
+		const struct ib_cq *cq
+	),
+
+	TP_ARGS(cq),
+
+	TP_STRUCT__entry(
+		__field(u32, cq_id)
+		__field(bool, interrupt)
+		__field(s64, latency)
+	),
+
+	TP_fast_assign(
+		ktime_t latency = ktime_sub(ktime_get(), cq->timestamp);
+
+		__entry->cq_id = cq->res.id;
+		__entry->latency = ktime_to_us(latency);
+		__entry->interrupt = cq->interrupt;
+	),
+
+	TP_printk("cq.id=%u wake-up took %lld [us] from %s",
+		__entry->cq_id, __entry->latency,
+		__entry->interrupt ? "interrupt" : "reschedule"
+	)
+);
+
+TRACE_EVENT(cq_poll,
+	TP_PROTO(
+		const struct ib_cq *cq,
+		int requested,
+		int rc
+	),
+
+	TP_ARGS(cq, requested, rc),
+
+	TP_STRUCT__entry(
+		__field(u32, cq_id)
+		__field(int, requested)
+		__field(int, rc)
+	),
+
+	TP_fast_assign(
+		__entry->cq_id = cq->res.id;
+		__entry->requested = requested;
+		__entry->rc = rc;
+	),
+
+	TP_printk("cq.id=%u requested %d, returned %d",
+		__entry->cq_id, __entry->requested, __entry->rc
+	)
+);
+
+TRACE_EVENT(cq_drain_complete,
+	TP_PROTO(
+		const struct ib_cq *cq
+	),
+
+	TP_ARGS(cq),
+
+	TP_STRUCT__entry(
+		__field(u32, cq_id)
+	),
+
+	TP_fast_assign(
+		__entry->cq_id = cq->res.id;
+	),
+
+	TP_printk("cq.id=%u",
+		__entry->cq_id
+	)
+);
+
+
+TRACE_EVENT(cq_modify,
+	TP_PROTO(
+		const struct ib_cq *cq,
+		u16 comps,
+		u16 usec
+	),
+
+	TP_ARGS(cq, comps, usec),
+
+	TP_STRUCT__entry(
+		__field(u32, cq_id)
+		__field(unsigned int, comps)
+		__field(unsigned int, usec)
+	),
+
+	TP_fast_assign(
+		__entry->cq_id = cq->res.id;
+		__entry->comps = comps;
+		__entry->usec = usec;
+	),
+
+	TP_printk("cq.id=%u comps=%u usec=%u",
+		__entry->cq_id, __entry->comps, __entry->usec
+	)
+);
+
+TRACE_EVENT(cq_alloc,
+	TP_PROTO(
+		const struct ib_cq *cq,
+		int nr_cqe,
+		int comp_vector,
+		enum ib_poll_context poll_ctx
+	),
+
+	TP_ARGS(cq, nr_cqe, comp_vector, poll_ctx),
+
+	TP_STRUCT__entry(
+		__field(u32, cq_id)
+		__field(int, nr_cqe)
+		__field(int, comp_vector)
+		__field(unsigned long, poll_ctx)
+	),
+
+	TP_fast_assign(
+		__entry->cq_id = cq->res.id;
+		__entry->nr_cqe = nr_cqe;
+		__entry->comp_vector = comp_vector;
+		__entry->poll_ctx = poll_ctx;
+	),
+
+	TP_printk("cq.id=%u nr_cqe=%d comp_vector=%d poll_ctx=%s",
+		__entry->cq_id, __entry->nr_cqe, __entry->comp_vector,
+		rdma_show_ib_poll_ctx(__entry->poll_ctx)
+	)
+);
+
+TRACE_EVENT(cq_alloc_error,
+	TP_PROTO(
+		int nr_cqe,
+		int comp_vector,
+		enum ib_poll_context poll_ctx,
+		int rc
+	),
+
+	TP_ARGS(nr_cqe, comp_vector, poll_ctx, rc),
+
+	TP_STRUCT__entry(
+		__field(int, rc)
+		__field(int, nr_cqe)
+		__field(int, comp_vector)
+		__field(unsigned long, poll_ctx)
+	),
+
+	TP_fast_assign(
+		__entry->rc = rc;
+		__entry->nr_cqe = nr_cqe;
+		__entry->comp_vector = comp_vector;
+		__entry->poll_ctx = poll_ctx;
+	),
+
+	TP_printk("nr_cqe=%d comp_vector=%d poll_ctx=%s rc=%d",
+		__entry->nr_cqe, __entry->comp_vector,
+		rdma_show_ib_poll_ctx(__entry->poll_ctx), __entry->rc
+	)
+);
+
+TRACE_EVENT(cq_free,
+	TP_PROTO(
+		const struct ib_cq *cq
+	),
+
+	TP_ARGS(cq),
+
+	TP_STRUCT__entry(
+		__field(u32, cq_id)
+	),
+
+	TP_fast_assign(
+		__entry->cq_id = cq->res.id;
+	),
+
+	TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+/**
+ ** Memory Region events
+ **/
+
+/*
+ * enum ib_mr_type, from include/rdma/ib_verbs.h
+ */
+#define IB_MR_TYPE_LIST				\
+	ib_mr_type_item(MEM_REG)		\
+	ib_mr_type_item(SG_GAPS)		\
+	ib_mr_type_item(DM)			\
+	ib_mr_type_item(USER)			\
+	ib_mr_type_item(DMA)			\
+	ib_mr_type_end(INTEGRITY)
+
+#undef ib_mr_type_item
+#undef ib_mr_type_end
+
+#define ib_mr_type_item(x)	TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
+#define ib_mr_type_end(x)	TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
+
+IB_MR_TYPE_LIST
+
+#undef ib_mr_type_item
+#undef ib_mr_type_end
+
+#define ib_mr_type_item(x)	{ IB_MR_TYPE_##x, #x },
+#define ib_mr_type_end(x)	{ IB_MR_TYPE_##x, #x }
+
+#define rdma_show_ib_mr_type(x) \
+		__print_symbolic(x, IB_MR_TYPE_LIST)
+
+TRACE_EVENT(mr_alloc,
+	TP_PROTO(
+		const struct ib_pd *pd,
+		enum ib_mr_type mr_type,
+		u32 max_num_sg,
+		const struct ib_mr *mr
+	),
+
+	TP_ARGS(pd, mr_type, max_num_sg, mr),
+
+	TP_STRUCT__entry(
+		__field(u32, pd_id)
+		__field(u32, mr_id)
+		__field(u32, max_num_sg)
+		__field(int, rc)
+		__field(unsigned long, mr_type)
+	),
+
+	TP_fast_assign(
+		__entry->pd_id = pd->res.id;
+		if (IS_ERR(mr)) {
+			__entry->mr_id = 0;
+			__entry->rc = PTR_ERR(mr);
+		} else {
+			__entry->mr_id = mr->res.id;
+			__entry->rc = 0;
+		}
+		__entry->max_num_sg = max_num_sg;
+		__entry->mr_type = mr_type;
+	),
+
+	TP_printk("pd.id=%u mr.id=%u type=%s max_num_sg=%u rc=%d",
+		__entry->pd_id, __entry->mr_id,
+		rdma_show_ib_mr_type(__entry->mr_type),
+		__entry->max_num_sg, __entry->rc)
+);
+
+TRACE_EVENT(mr_integ_alloc,
+	TP_PROTO(
+		const struct ib_pd *pd,
+		u32 max_num_data_sg,
+		u32 max_num_meta_sg,
+		const struct ib_mr *mr
+	),
+
+	TP_ARGS(pd, max_num_data_sg, max_num_meta_sg, mr),
+
+	TP_STRUCT__entry(
+		__field(u32, pd_id)
+		__field(u32, mr_id)
+		__field(u32, max_num_data_sg)
+		__field(u32, max_num_meta_sg)
+		__field(int, rc)
+	),
+
+	TP_fast_assign(
+		__entry->pd_id = pd->res.id;
+		if (IS_ERR(mr)) {
+			__entry->mr_id = 0;
+			__entry->rc = PTR_ERR(mr);
+		} else {
+			__entry->mr_id = mr->res.id;
+			__entry->rc = 0;
+		}
+		__entry->max_num_data_sg = max_num_data_sg;
+		__entry->max_num_meta_sg = max_num_meta_sg;
+	),
+
+	TP_printk("pd.id=%u mr.id=%u max_num_data_sg=%u max_num_meta_sg=%u rc=%d",
+		__entry->pd_id, __entry->mr_id, __entry->max_num_data_sg,
+		__entry->max_num_meta_sg, __entry->rc)
+);
+
+TRACE_EVENT(mr_dereg,
+	TP_PROTO(
+		const struct ib_mr *mr
+	),
+
+	TP_ARGS(mr),
+
+	TP_STRUCT__entry(
+		__field(u32, id)
+	),
+
+	TP_fast_assign(
+		__entry->id = mr->res.id;
+	),
+
+	TP_printk("mr.id=%u", __entry->id)
+);
+
+#endif /* _TRACE_RDMA_CORE_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 1879058..c0e4c93 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -729,6 +729,7 @@ TRACE_EVENT(xprtrdma_post_send,
 
 	TP_STRUCT__entry(
 		__field(const void *, req)
+		__field(const void *, sc)
 		__field(unsigned int, task_id)
 		__field(unsigned int, client_id)
 		__field(int, num_sge)
@@ -743,14 +744,15 @@ TRACE_EVENT(xprtrdma_post_send,
 		__entry->client_id = rqst->rq_task->tk_client ?
 				     rqst->rq_task->tk_client->cl_clid : -1;
 		__entry->req = req;
+		__entry->sc = req->rl_sendctx;
 		__entry->num_sge = req->rl_wr.num_sge;
 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
 		__entry->status = status;
 	),
 
-	TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
+	TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d",
 		__entry->task_id, __entry->client_id,
-		__entry->req, __entry->num_sge,
+		__entry->req, __entry->sc, __entry->num_sge,
 		(__entry->num_sge == 1 ? "" : "s"),
 		(__entry->signaled ? "signaled " : ""),
 		__entry->status
@@ -849,6 +851,7 @@ TRACE_EVENT(xprtrdma_wc_send,
 
 	TP_STRUCT__entry(
 		__field(const void *, req)
+		__field(const void *, sc)
 		__field(unsigned int, unmap_count)
 		__field(unsigned int, status)
 		__field(unsigned int, vendor_err)
@@ -856,13 +859,14 @@ TRACE_EVENT(xprtrdma_wc_send,
 
 	TP_fast_assign(
 		__entry->req = sc->sc_req;
+		__entry->sc = sc;
 		__entry->unmap_count = sc->sc_unmap_count;
 		__entry->status = wc->status;
 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
 	),
 
-	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
-		__entry->req, __entry->unmap_count,
+	TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
+		__entry->req, __entry->sc, __entry->unmap_count,
 		rdma_show_wc_status(__entry->status),
 		__entry->status, __entry->vendor_err
 	)
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
new file mode 100644
index 0000000..f076c43
--- /dev/null
+++ b/include/trace/events/scmi.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM scmi
+
+#if !defined(_TRACE_SCMI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCMI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(scmi_xfer_begin,
+	TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+		 bool poll),
+	TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll),
+
+	TP_STRUCT__entry(
+		__field(int, transfer_id)
+		__field(u8, msg_id)
+		__field(u8, protocol_id)
+		__field(u16, seq)
+		__field(bool, poll)
+	),
+
+	TP_fast_assign(
+		__entry->transfer_id = transfer_id;
+		__entry->msg_id = msg_id;
+		__entry->protocol_id = protocol_id;
+		__entry->seq = seq;
+		__entry->poll = poll;
+	),
+
+	TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
+		__entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+		__entry->seq, __entry->poll)
+);
+
+TRACE_EVENT(scmi_xfer_end,
+	TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+		 u32 status),
+	TP_ARGS(transfer_id, msg_id, protocol_id, seq, status),
+
+	TP_STRUCT__entry(
+		__field(int, transfer_id)
+		__field(u8, msg_id)
+		__field(u8, protocol_id)
+		__field(u16, seq)
+		__field(u32, status)
+	),
+
+	TP_fast_assign(
+		__entry->transfer_id = transfer_id;
+		__entry->msg_id = msg_id;
+		__entry->protocol_id = protocol_id;
+		__entry->seq = seq;
+		__entry->status = status;
+	),
+
+	TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%u",
+		__entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+		__entry->seq, __entry->status)
+);
+
+TRACE_EVENT(scmi_rx_done,
+	TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+		 u8 msg_type),
+	TP_ARGS(transfer_id, msg_id, protocol_id, seq, msg_type),
+
+	TP_STRUCT__entry(
+		__field(int, transfer_id)
+		__field(u8, msg_id)
+		__field(u8, protocol_id)
+		__field(u16, seq)
+		__field(u8, msg_type)
+	),
+
+	TP_fast_assign(
+		__entry->transfer_id = transfer_id;
+		__entry->msg_id = msg_id;
+		__entry->protocol_id = protocol_id;
+		__entry->seq = seq;
+		__entry->msg_type = msg_type;
+	),
+
+	TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
+		__entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+		__entry->seq, __entry->msg_type)
+);
+#endif /* _TRACE_SCMI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8c73ffb..ee99357 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -185,6 +185,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
 DEFINE_RPC_RUNNING_EVENT(begin);
 DEFINE_RPC_RUNNING_EVENT(run_action);
 DEFINE_RPC_RUNNING_EVENT(complete);
+DEFINE_RPC_RUNNING_EVENT(signalled);
 DEFINE_RPC_RUNNING_EVENT(end);
 
 DECLARE_EVENT_CLASS(rpc_task_queued,
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index 83860de..248bc09 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(v4l2_event_class,
 		__entry->bytesused = buf->bytesused;
 		__entry->flags = buf->flags;
 		__entry->field = buf->field;
-		__entry->timestamp = timeval_to_ns(&buf->timestamp);
+		__entry->timestamp = v4l2_buffer_get_timestamp(buf);
 		__entry->timecode_type = buf->timecode.type;
 		__entry->timecode_flags = buf->timecode.flags;
 		__entry->timecode_frames = buf->timecode.frames;
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index ef50be4..d94def2 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -67,8 +67,8 @@ DECLARE_EVENT_CLASS(writeback_page_template,
 
 	TP_fast_assign(
 		strscpy_pad(__entry->name,
-			    mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
-			    32);
+			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
+					 NULL), 32);
 		__entry->ino = mapping ? mapping->host->i_ino : 0;
 		__entry->index = page->index;
 	),
@@ -111,8 +111,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
 		struct backing_dev_info *bdi = inode_to_bdi(inode);
 
 		/* may be called for files on pseudo FSes w/ unregistered bdi */
-		strscpy_pad(__entry->name,
-			    bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
 		__entry->ino		= inode->i_ino;
 		__entry->state		= inode->i_state;
 		__entry->flags		= flags;
@@ -193,7 +192,7 @@ TRACE_EVENT(inode_foreign_history,
 	),
 
 	TP_fast_assign(
-		strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
+		strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
 		__entry->ino		= inode->i_ino;
 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
 		__entry->history	= history;
@@ -222,7 +221,7 @@ TRACE_EVENT(inode_switch_wbs,
 	),
 
 	TP_fast_assign(
-		strncpy(__entry->name,	dev_name(old_wb->bdi->dev), 32);
+		strncpy(__entry->name,	bdi_dev_name(old_wb->bdi), 32);
 		__entry->ino		= inode->i_ino;
 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
@@ -255,7 +254,7 @@ TRACE_EVENT(track_foreign_dirty,
 		struct address_space *mapping = page_mapping(page);
 		struct inode *inode = mapping ? mapping->host : NULL;
 
-		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
+		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
 		__entry->bdi_id		= wb->bdi->id;
 		__entry->ino		= inode ? inode->i_ino : 0;
 		__entry->memcg_id	= wb->memcg_css->id;
@@ -288,7 +287,7 @@ TRACE_EVENT(flush_foreign,
 	),
 
 	TP_fast_assign(
-		strncpy(__entry->name,	dev_name(wb->bdi->dev), 32);
+		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
 		__entry->frn_bdi_id	= frn_bdi_id;
 		__entry->frn_memcg_id	= frn_memcg_id;
@@ -318,7 +317,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
 
 	TP_fast_assign(
 		strscpy_pad(__entry->name,
-			    dev_name(inode_to_bdi(inode)->dev), 32);
+			    bdi_dev_name(inode_to_bdi(inode)), 32);
 		__entry->ino		= inode->i_ino;
 		__entry->sync_mode	= wbc->sync_mode;
 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
@@ -361,9 +360,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
 		__field(ino_t, cgroup_ino)
 	),
 	TP_fast_assign(
-		strscpy_pad(__entry->name,
-			    wb->bdi->dev ? dev_name(wb->bdi->dev) :
-			    "(unknown)", 32);
+		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
 		__entry->nr_pages = work->nr_pages;
 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
 		__entry->sync_mode = work->sync_mode;
@@ -416,7 +413,7 @@ DECLARE_EVENT_CLASS(writeback_class,
 		__field(ino_t, cgroup_ino)
 	),
 	TP_fast_assign(
-		strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
+		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
 	),
 	TP_printk("bdi %s: cgroup_ino=%lu",
@@ -438,7 +435,7 @@ TRACE_EVENT(writeback_bdi_register,
 		__array(char, name, 32)
 	),
 	TP_fast_assign(
-		strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
+		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
 	),
 	TP_printk("bdi %s",
 		__entry->name
@@ -463,7 +460,7 @@ DECLARE_EVENT_CLASS(wbc_class,
 	),
 
 	TP_fast_assign(
-		strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
+		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
 		__entry->nr_to_write	= wbc->nr_to_write;
 		__entry->pages_skipped	= wbc->pages_skipped;
 		__entry->sync_mode	= wbc->sync_mode;
@@ -514,7 +511,7 @@ TRACE_EVENT(writeback_queue_io,
 	),
 	TP_fast_assign(
 		unsigned long *older_than_this = work->older_than_this;
-		strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
+		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
 		__entry->older	= older_than_this ?  *older_than_this : 0;
 		__entry->age	= older_than_this ?
 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -600,7 +597,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
 	),
 
 	TP_fast_assign(
-		strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
+		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
 		__entry->write_bw	= KBps(wb->write_bandwidth);
 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
 		__entry->dirty_rate	= KBps(dirty_rate);
@@ -665,7 +662,7 @@ TRACE_EVENT(balance_dirty_pages,
 
 	TP_fast_assign(
 		unsigned long freerun = (thresh + bg_thresh) / 2;
-		strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
+		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
 
 		__entry->limit		= global_wb_domain.dirty_limit;
 		__entry->setpoint	= (global_wb_domain.dirty_limit +
@@ -726,7 +723,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
 
 	TP_fast_assign(
 		strscpy_pad(__entry->name,
-			    dev_name(inode_to_bdi(inode)->dev), 32);
+			    bdi_dev_name(inode_to_bdi(inode)), 32);
 		__entry->ino		= inode->i_ino;
 		__entry->state		= inode->i_state;
 		__entry->dirtied_when	= inode->dirtied_when;
@@ -800,7 +797,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
 
 	TP_fast_assign(
 		strscpy_pad(__entry->name,
-			    dev_name(inode_to_bdi(inode)->dev), 32);
+			    bdi_dev_name(inode_to_bdi(inode)), 32);
 		__entry->ino		= inode->i_ino;
 		__entry->state		= inode->i_state;
 		__entry->dirtied_when	= inode->dirtied_when;
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 96d77e5..502c7be 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -2,7 +2,8 @@
 /*
  * Stage 1 of the trace events.
  *
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
  *
  * struct trace_event_raw_<call> {
  *	struct trace_entry		ent;
@@ -223,7 +224,8 @@ TRACE_MAKE_SYSTEM_STR();
 /*
  * Stage 3 of the trace events.
  *
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
  *
  * enum print_line_t
  * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
@@ -533,7 +535,8 @@ static inline notrace int trace_event_get_offsets_##call(		\
 /*
  * Stage 4 of the trace events.
  *
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
  *
  * For those macros defined with TRACE_EVENT:
  *
@@ -548,7 +551,7 @@ static inline notrace int trace_event_get_offsets_##call(		\
  *	enum event_trigger_type __tt = ETT_NONE;
  *	struct ring_buffer_event *event;
  *	struct trace_event_raw_<call> *entry; <-- defined in stage 1
- *	struct ring_buffer *buffer;
+ *	struct trace_buffer *buffer;
  *	unsigned long irq_flags;
  *	int __data_size;
  *	int pc;
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 1fc8faa..3a3201e 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -851,8 +851,13 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
 __SYSCALL(__NR_clone3, sys_clone3)
 #endif
 
+#define __NR_openat2 437
+__SYSCALL(__NR_openat2, sys_openat2)
+#define __NR_pidfd_getfd 438
+__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
+
 #undef __NR_syscalls
-#define __NR_syscalls 436
+#define __NR_syscalls 439
 
 /*
  * 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index bbdad86..ac38798 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -703,6 +703,9 @@ struct drm_amdgpu_cs_chunk_data {
 	/* Subquery id: Query DMCU firmware version */
 	#define AMDGPU_INFO_FW_DMCU		0x12
 	#define AMDGPU_INFO_FW_TA		0x13
+	/* Subquery id: Query DMCUB firmware version */
+	#define AMDGPU_INFO_FW_DMCUB		0x14
+
 /* number of bytes moved for TTM migration */
 #define AMDGPU_INFO_NUM_BYTES_MOVED		0x0f
 /* the used VRAM size */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 8caaaf7f..8bc0b31 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -411,6 +411,30 @@ extern "C" {
 #define I915_FORMAT_MOD_Yf_TILED_CCS	fourcc_mod_code(INTEL, 5)
 
 /*
+ * Intel color control surfaces (CCS) for Gen-12 render compression.
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6)
+
+/*
+ * Intel color control surfaces (CCS) for Gen-12 media compression
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
+
+/*
  * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
  *
  * Macroblocks are laid in a Z-shape, and each pixel data is following the
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 45c6582..a51aa1c 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -394,7 +394,7 @@ struct drm_exynos_ioctl_ipp_commit {
 #define DRM_IOCTL_EXYNOS_IPP_COMMIT		DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
 
-/* EXYNOS specific events */
+/* Exynos specific events */
 #define DRM_EXYNOS_G2D_EVENT		0x80000000
 #define DRM_EXYNOS_IPP_EVENT		0x80000002
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 5400d7e..829c0a4 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -395,6 +395,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
 #define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
 #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
 #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
@@ -793,6 +794,37 @@ struct drm_i915_gem_mmap_gtt {
 	__u64 offset;
 };
 
+struct drm_i915_gem_mmap_offset {
+	/** Handle for the object being mapped. */
+	__u32 handle;
+	__u32 pad;
+	/**
+	 * Fake offset to use for subsequent mmap call
+	 *
+	 * This is a fixed-size type for 32/64 compatibility.
+	 */
+	__u64 offset;
+
+	/**
+	 * Flags for extended behaviour.
+	 *
+	 * It is mandatory that one of the MMAP_OFFSET types
+	 * (GTT, WC, WB, UC, etc) should be included.
+	 */
+	__u64 flags;
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC  1
+#define I915_MMAP_OFFSET_WB  2
+#define I915_MMAP_OFFSET_UC  3
+
+	/*
+	 * Zero-terminated chain of extensions.
+	 *
+	 * No current extensions defined; mbz.
+	 */
+	__u64 extensions;
+};
+
 struct drm_i915_gem_set_domain {
 	/** Handle for the object */
 	__u32 handle;
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 9459a6e..853a327 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -110,6 +110,7 @@ struct drm_nouveau_gem_pushbuf {
 	__u64 push;
 	__u32 suffix0;
 	__u32 suffix1;
+#define NOUVEAU_GEM_PUSHBUF_SYNC                                    (1ULL << 0)
 	__u64 vram_available;
 	__u64 gart_available;
 };
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 02cab33..fcb741e 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -71,6 +71,7 @@ extern "C" {
 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
 #define DRM_VMW_GB_SURFACE_CREATE_EXT   27
 #define DRM_VMW_GB_SURFACE_REF_EXT      28
+#define DRM_VMW_MSG                     29
 
 /*************************************************************************/
 /**
@@ -1213,6 +1214,22 @@ union drm_vmw_gb_surface_reference_ext_arg {
 	struct drm_vmw_surface_arg req;
 };
 
+/**
+ * struct drm_vmw_msg_arg
+ *
+ * @send: Pointer to user-space msg string (null terminated).
+ * @receive: Pointer to user-space receive buffer.
+ * @send_only: Boolean whether this is only sending or receiving too.
+ *
+ * Argument to the DRM_VMW_MSG ioctl.
+ */
+struct drm_vmw_msg_arg {
+	__u64 send;
+	__u64 receive;
+	__s32 send_only;
+	__u32 receive_len;
+};
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/include/uapi/linux/acct.h b/include/uapi/linux/acct.h
index 0e72172..985b890 100644
--- a/include/uapi/linux/acct.h
+++ b/include/uapi/linux/acct.h
@@ -49,6 +49,7 @@ struct acct
 	__u16		ac_uid16;		/* LSB of Real User ID */
 	__u16		ac_gid16;		/* LSB of Real Group ID */
 	__u16		ac_tty;			/* Control Terminal */
+	/* __u32 range means times from 1970 to 2106 */
 	__u32		ac_btime;		/* Process Creation Time */
 	comp_t		ac_utime;		/* User Time */
 	comp_t		ac_stime;		/* System Time */
@@ -81,6 +82,7 @@ struct acct_v3
 	__u32		ac_gid;			/* Real Group ID */
 	__u32		ac_pid;			/* Process ID */
 	__u32		ac_ppid;		/* Parent Process ID */
+	/* __u32 range means times from 1970 to 2106 */
 	__u32		ac_btime;		/* Process Creation Time */
 #ifdef __KERNEL__
 	__u32		ac_etime;		/* Elapsed Time */
diff --git a/include/uapi/linux/b1lli.h b/include/uapi/linux/b1lli.h
deleted file mode 100644
index 4ae6ac9..0000000
--- a/include/uapi/linux/b1lli.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* $Id: b1lli.h,v 1.8.8.3 2001/09/23 22:25:05 kai Exp $
- *
- * ISDN lowlevel-module for AVM B1-card.
- *
- * Copyright 1996 by Carsten Paeth (calle@calle.in-berlin.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _B1LLI_H_
-#define _B1LLI_H_
-/*
- * struct for loading t4 file 
- */
-typedef struct avmb1_t4file {
-	int len;
-	unsigned char *data;
-} avmb1_t4file;
-
-typedef struct avmb1_loaddef {
-	int contr;
-	avmb1_t4file t4file;
-} avmb1_loaddef;
-
-typedef struct avmb1_loadandconfigdef {
-	int contr;
-	avmb1_t4file t4file;
-        avmb1_t4file t4config; 
-} avmb1_loadandconfigdef;
-
-typedef struct avmb1_resetdef {
-	int contr;
-} avmb1_resetdef;
-
-typedef struct avmb1_getdef {
-	int contr;
-	int cardtype;
-	int cardstate;
-} avmb1_getdef;
-
-/*
- * struct for adding new cards 
- */
-typedef struct avmb1_carddef {
-	int port;
-	int irq;
-} avmb1_carddef;
-
-#define AVM_CARDTYPE_B1		0
-#define AVM_CARDTYPE_T1		1
-#define AVM_CARDTYPE_M1		2
-#define AVM_CARDTYPE_M2		3
-
-typedef struct avmb1_extcarddef {
-	int port;
-	int irq;
-        int cardtype;
-        int cardnr;  /* for HEMA/T1 */
-} avmb1_extcarddef;
-
-#define	AVMB1_LOAD		0	/* load image to card */
-#define AVMB1_ADDCARD		1	/* add a new card - OBSOLETE */
-#define AVMB1_RESETCARD		2	/* reset a card */
-#define	AVMB1_LOAD_AND_CONFIG	3	/* load image and config to card */
-#define	AVMB1_ADDCARD_WITH_TYPE	4	/* add a new card, with cardtype */
-#define AVMB1_GET_CARDINFO	5	/* get cardtype */
-#define AVMB1_REMOVECARD	6	/* remove a card - OBSOLETE */
-
-#define	AVMB1_REGISTERCARD_IS_OBSOLETE
-
-#endif				/* _B1LLI_H_ */
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 240fdb9..272dc69 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -301,6 +301,7 @@ struct vfs_ns_cap_data {
 /* Allow more than 64hz interrupts from the real-time clock */
 /* Override max number of consoles on console allocation */
 /* Override max number of keymaps */
+/* Control memory reclaim behavior */
 
 #define CAP_SYS_RESOURCE     24
 
diff --git a/include/uapi/linux/dma-heap.h b/include/uapi/linux/dma-heap.h
new file mode 100644
index 0000000..6f84fa0
--- /dev/null
+++ b/include/uapi/linux/dma-heap.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * DMABUF Heaps Userspace API
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+#ifndef _UAPI_LINUX_DMABUF_POOL_H
+#define _UAPI_LINUX_DMABUF_POOL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * DOC: DMABUF Heaps Userspace API
+ */
+
+/* Valid FD_FLAGS are O_CLOEXEC, O_RDONLY, O_WRONLY, O_RDWR */
+#define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE)
+
+/* Currently no heap flags */
+#define DMA_HEAP_VALID_HEAP_FLAGS (0)
+
+/**
+ * struct dma_heap_allocation_data - metadata passed from userspace for
+ *                                      allocations
+ * @len:		size of the allocation
+ * @fd:			will be populated with a fd which provides the
+ *			handle to the allocated dma-buf
+ * @fd_flags:		file descriptor flags used when allocating
+ * @heap_flags:		flags passed to heap
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct dma_heap_allocation_data {
+	__u64 len;
+	__u32 fd;
+	__u32 fd_flags;
+	__u64 heap_flags;
+};
+
+#define DMA_HEAP_IOC_MAGIC		'H'
+
+/**
+ * DOC: DMA_HEAP_IOCTL_ALLOC - allocate memory from pool
+ *
+ * Takes a dma_heap_allocation_data struct and returns it with the fd field
+ * populated with the dmabuf handle of the allocation.
+ */
+#define DMA_HEAP_IOCTL_ALLOC	_IOWR(DMA_HEAP_IOC_MAGIC, 0x0,\
+				      struct dma_heap_allocation_data)
+
+#endif /* _UAPI_LINUX_DMABUF_POOL_H */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 1f97b33..ca88b7b 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -3,6 +3,7 @@
 #define _UAPI_LINUX_FCNTL_H
 
 #include <asm/fcntl.h>
+#include <linux/openat2.h>
 
 #define F_SETLEASE	(F_LINUX_SPECIFIC_BASE + 0)
 #define F_GETLEASE	(F_LINUX_SPECIFIC_BASE + 1)
@@ -100,5 +101,4 @@
 
 #define AT_RECURSIVE		0x8000	/* Apply to the entire subtree */
 
-
 #endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/include/uapi/linux/gigaset_dev.h b/include/uapi/linux/gigaset_dev.h
deleted file mode 100644
index 279551a..0000000
--- a/include/uapi/linux/gigaset_dev.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * interface to user space for the gigaset driver
- *
- * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
- *
- * =====================================================================
- *    This program is free software; you can redistribute it and/or
- *    modify it under the terms of the GNU General Public License as
- *    published by the Free Software Foundation; either version 2 of
- *    the License, or (at your option) any later version.
- * =====================================================================
- */
-
-#ifndef GIGASET_INTERFACE_H
-#define GIGASET_INTERFACE_H
-
-#include <linux/ioctl.h>
-
-/* The magic IOCTL value for this interface. */
-#define GIGASET_IOCTL 0x47
-
-/* enable/disable device control via character device (lock out ISDN subsys) */
-#define GIGASET_REDIR    _IOWR(GIGASET_IOCTL, 0, int)
-
-/* enable adapter configuration mode (M10x only) */
-#define GIGASET_CONFIG   _IOWR(GIGASET_IOCTL, 1, int)
-
-/* set break characters (M105 only) */
-#define GIGASET_BRKCHARS _IOW(GIGASET_IOCTL, 2, unsigned char[6])
-
-/* get version information selected by arg[0] */
-#define GIGASET_VERSION  _IOWR(GIGASET_IOCTL, 3, unsigned[4])
-/* values for GIGASET_VERSION arg[0] */
-#define GIGVER_DRIVER 0		/* get driver version */
-#define GIGVER_COMPAT 1		/* get interface compatibility version */
-#define GIGVER_FWBASE 2		/* get base station firmware version */
-
-#endif
diff --git a/include/uapi/linux/hysdn_if.h b/include/uapi/linux/hysdn_if.h
deleted file mode 100644
index 99f77c5..0000000
--- a/include/uapi/linux/hysdn_if.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* $Id: hysdn_if.h,v 1.1.8.3 2001/09/23 22:25:05 kai Exp $
- *
- * Linux driver for HYSDN cards
- * ioctl definitions shared by hynetmgr and driver.
- *
- * Author    Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/****************/
-/* error values */
-/****************/
-#define ERR_NONE             0 /* no error occurred */
-#define ERR_ALREADY_BOOT  1000 /* we are already booting */
-#define EPOF_BAD_MAGIC    1001 /* bad magic in POF header */
-#define ERR_BOARD_DPRAM   1002 /* board DPRAM failed */
-#define EPOF_INTERNAL     1003 /* internal POF handler error */
-#define EPOF_BAD_IMG_SIZE 1004 /* POF boot image size invalid */
-#define ERR_BOOTIMG_FAIL  1005 /* 1. stage boot image did not start */
-#define ERR_BOOTSEQ_FAIL  1006 /* 2. stage boot seq handshake timeout */
-#define ERR_POF_TIMEOUT   1007 /* timeout waiting for card pof ready */
-#define ERR_NOT_BOOTED    1008 /* operation only allowed when booted */
-#define ERR_CONF_LONG     1009 /* conf line is too long */ 
-#define ERR_INV_CHAN      1010 /* invalid channel number */ 
-#define ERR_ASYNC_TIME    1011 /* timeout sending async data */ 
-
-
-
-
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 55cfcb7..3f7961c 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -34,21 +34,43 @@ struct io_uring_sqe {
 		__u32		timeout_flags;
 		__u32		accept_flags;
 		__u32		cancel_flags;
+		__u32		open_flags;
+		__u32		statx_flags;
+		__u32		fadvise_advice;
 	};
 	__u64	user_data;	/* data to be passed back at completion time */
 	union {
-		__u16	buf_index;	/* index into fixed buffers, if used */
+		struct {
+			/* index into fixed buffers, if used */
+			__u16	buf_index;
+			/* personality to use, if used */
+			__u16	personality;
+		};
 		__u64	__pad2[3];
 	};
 };
 
+enum {
+	IOSQE_FIXED_FILE_BIT,
+	IOSQE_IO_DRAIN_BIT,
+	IOSQE_IO_LINK_BIT,
+	IOSQE_IO_HARDLINK_BIT,
+	IOSQE_ASYNC_BIT,
+};
+
 /*
  * sqe->flags
  */
-#define IOSQE_FIXED_FILE	(1U << 0)	/* use fixed fileset */
-#define IOSQE_IO_DRAIN		(1U << 1)	/* issue after inflight IO */
-#define IOSQE_IO_LINK		(1U << 2)	/* links next sqe */
-#define IOSQE_IO_HARDLINK	(1U << 3)	/* like LINK, but stronger */
+/* use fixed fileset */
+#define IOSQE_FIXED_FILE	(1U << IOSQE_FIXED_FILE_BIT)
+/* issue after inflight IO */
+#define IOSQE_IO_DRAIN		(1U << IOSQE_IO_DRAIN_BIT)
+/* links next sqe */
+#define IOSQE_IO_LINK		(1U << IOSQE_IO_LINK_BIT)
+/* like LINK, but stronger */
+#define IOSQE_IO_HARDLINK	(1U << IOSQE_IO_HARDLINK_BIT)
+/* always go async */
+#define IOSQE_ASYNC		(1U << IOSQE_ASYNC_BIT)
 
 /*
  * io_uring_setup() flags
@@ -57,6 +79,8 @@ struct io_uring_sqe {
 #define IORING_SETUP_SQPOLL	(1U << 1)	/* SQ poll thread */
 #define IORING_SETUP_SQ_AFF	(1U << 2)	/* sq_thread_cpu is valid */
 #define IORING_SETUP_CQSIZE	(1U << 3)	/* app defines CQ size */
+#define IORING_SETUP_CLAMP	(1U << 4)	/* clamp SQ/CQ ring sizes */
+#define IORING_SETUP_ATTACH_WQ	(1U << 5)	/* attach to existing wq */
 
 enum {
 	IORING_OP_NOP,
@@ -76,6 +100,19 @@ enum {
 	IORING_OP_ASYNC_CANCEL,
 	IORING_OP_LINK_TIMEOUT,
 	IORING_OP_CONNECT,
+	IORING_OP_FALLOCATE,
+	IORING_OP_OPENAT,
+	IORING_OP_CLOSE,
+	IORING_OP_FILES_UPDATE,
+	IORING_OP_STATX,
+	IORING_OP_READ,
+	IORING_OP_WRITE,
+	IORING_OP_FADVISE,
+	IORING_OP_MADVISE,
+	IORING_OP_SEND,
+	IORING_OP_RECV,
+	IORING_OP_OPENAT2,
+	IORING_OP_EPOLL_CTL,
 
 	/* this goes last, obviously */
 	IORING_OP_LAST,
@@ -153,7 +190,8 @@ struct io_uring_params {
 	__u32 sq_thread_cpu;
 	__u32 sq_thread_idle;
 	__u32 features;
-	__u32 resv[4];
+	__u32 wq_fd;
+	__u32 resv[3];
 	struct io_sqring_offsets sq_off;
 	struct io_cqring_offsets cq_off;
 };
@@ -164,6 +202,8 @@ struct io_uring_params {
 #define IORING_FEAT_SINGLE_MMAP		(1U << 0)
 #define IORING_FEAT_NODROP		(1U << 1)
 #define IORING_FEAT_SUBMIT_STABLE	(1U << 2)
+#define IORING_FEAT_RW_CUR_POS		(1U << 3)
+#define IORING_FEAT_CUR_PERSONALITY	(1U << 4)
 
 /*
  * io_uring_register(2) opcodes and arguments
@@ -175,6 +215,10 @@ struct io_uring_params {
 #define IORING_REGISTER_EVENTFD		4
 #define IORING_UNREGISTER_EVENTFD	5
 #define IORING_REGISTER_FILES_UPDATE	6
+#define IORING_REGISTER_EVENTFD_ASYNC	7
+#define IORING_REGISTER_PROBE		8
+#define IORING_REGISTER_PERSONALITY	9
+#define IORING_UNREGISTER_PERSONALITY	10
 
 struct io_uring_files_update {
 	__u32 offset;
@@ -182,4 +226,21 @@ struct io_uring_files_update {
 	__aligned_u64 /* __s32 * */ fds;
 };
 
+#define IO_URING_OP_SUPPORTED	(1U << 0)
+
+struct io_uring_probe_op {
+	__u8 op;
+	__u8 resv;
+	__u16 flags;	/* IO_URING_OP_* flags */
+	__u32 resv2;
+};
+
+struct io_uring_probe {
+	__u8 last_op;	/* last opcode supported */
+	__u8 ops_len;	/* length of ops[] array below */
+	__u16 resv;
+	__u32 resv2[3];
+	struct io_uring_probe_op ops[0];
+};
+
 #endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f0a16b4..4b95f9a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1009,6 +1009,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_PPC_GUEST_DEBUG_SSTEP 176
 #define KVM_CAP_ARM_NISV_TO_USER 177
 #define KVM_CAP_ARM_INJECT_EXT_DABT 178
+#define KVM_CAP_S390_VCPU_RESETS 179
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1473,6 +1474,10 @@ struct kvm_enc_region {
 /* Available with KVM_CAP_ARM_SVE */
 #define KVM_ARM_VCPU_FINALIZE	  _IOW(KVMIO,  0xc2, int)
 
+/* Available with  KVM_CAP_S390_VCPU_RESETS */
+#define KVM_S390_NORMAL_RESET	_IO(KVMIO,   0xc3)
+#define KVM_S390_CLEAR_RESET	_IO(KVMIO,   0xc4)
+
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
 	/* Guest initialization commands */
diff --git a/include/uapi/linux/openat2.h b/include/uapi/linux/openat2.h
new file mode 100644
index 0000000..58b1eb7
--- /dev/null
+++ b/include/uapi/linux/openat2.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_OPENAT2_H
+#define _UAPI_LINUX_OPENAT2_H
+
+#include <linux/types.h>
+
+/*
+ * Arguments for how openat2(2) should open the target path. If only @flags and
+ * @mode are non-zero, then openat2(2) operates very similarly to openat(2).
+ *
+ * However, unlike openat(2), unknown or invalid bits in @flags result in
+ * -EINVAL rather than being silently ignored. @mode must be zero unless one of
+ * {O_CREAT, O_TMPFILE} are set.
+ *
+ * @flags: O_* flags.
+ * @mode: O_CREAT/O_TMPFILE file mode.
+ * @resolve: RESOLVE_* flags.
+ */
+struct open_how {
+	__u64 flags;
+	__u64 mode;
+	__u64 resolve;
+};
+
+/* how->resolve flags for openat2(2). */
+#define RESOLVE_NO_XDEV		0x01 /* Block mount-point crossings
+					(includes bind-mounts). */
+#define RESOLVE_NO_MAGICLINKS	0x02 /* Block traversal through procfs-style
+					"magic-links". */
+#define RESOLVE_NO_SYMLINKS	0x04 /* Block traversal through all symlinks
+					(implies OEXT_NO_MAGICLINKS) */
+#define RESOLVE_BENEATH		0x08 /* Block "lexical" trickery like
+					"..", symlinks, and absolute
+					paths which escape the dirfd. */
+#define RESOLVE_IN_ROOT		0x10 /* Make all jumps to "/" and ".."
+					be scoped inside the dirfd
+					(similar to chroot(2)). */
+
+#endif /* _UAPI_LINUX_OPENAT2_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index acb7d2b..5437690 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -676,6 +676,7 @@
 #define  PCI_EXP_LNKCTL2_TLS_32_0GT	0x0005 /* Supported Speed 32GT/s */
 #define  PCI_EXP_LNKCTL2_ENTER_COMP	0x0010 /* Enter Compliance */
 #define  PCI_EXP_LNKCTL2_TX_MARGIN	0x0380 /* Transmit Margin */
+#define  PCI_EXP_LNKCTL2_HASD		0x0020 /* HW Autonomous Speed Disable */
 #define PCI_EXP_LNKSTA2		50	/* Link Status 2 */
 #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2	52	/* v2 endpoints with link end here */
 #define PCI_EXP_SLTCAP2		52	/* Slot Capabilities 2 */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 7da1b37..07b4f81 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -234,4 +234,8 @@ struct prctl_mm_map {
 #define PR_GET_TAGGED_ADDR_CTRL		56
 # define PR_TAGGED_ADDR_ENABLE		(1UL << 0)
 
+/* Control reclaim behavior when allocating memory */
+#define PR_SET_IO_FLUSHER		57
+#define PR_GET_IO_FLUSHER		58
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index 26ee913..dcc1b3e 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -48,9 +48,11 @@ struct rand_pool_info {
  * Flags for getrandom(2)
  *
  * GRND_NONBLOCK	Don't block and return EAGAIN instead
- * GRND_RANDOM		Use the /dev/random pool instead of /dev/urandom
+ * GRND_RANDOM		No effect
+ * GRND_INSECURE	Return non-cryptographic random bytes
  */
 #define GRND_NONBLOCK	0x0001
 #define GRND_RANDOM	0x0002
+#define GRND_INSECURE	0x0004
 
 #endif /* _UAPI_LINUX_RANDOM_H */
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index 2ad1788..095af36 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -92,7 +92,12 @@ struct rtc_pll_info {
 #define RTC_PLL_GET	_IOR('p', 0x11, struct rtc_pll_info)  /* Get PLL correction */
 #define RTC_PLL_SET	_IOW('p', 0x12, struct rtc_pll_info)  /* Set PLL correction */
 
-#define RTC_VL_READ	_IOR('p', 0x13, int)	/* Voltage low detector */
+#define RTC_VL_DATA_INVALID	BIT(0) /* Voltage too low, RTC data is invalid */
+#define RTC_VL_BACKUP_LOW	BIT(1) /* Backup voltage is low */
+#define RTC_VL_BACKUP_EMPTY	BIT(2) /* Backup empty or not present */
+#define RTC_VL_ACCURACY_LOW	BIT(3) /* Voltage is low, RTC accuracy is reduced */
+
+#define RTC_VL_READ	_IOR('p', 0x13, unsigned int)	/* Voltage low detection */
 #define RTC_VL_CLR	_IO('p', 0x14)		/* Clear voltage low information */
 
 /* interrupt flags */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 23cd848..fa7f97d 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -4,6 +4,7 @@
 
 #include <linux/types.h>
 #include <linux/compiler.h>
+#include <asm/bitsperlong.h>
 #include <asm/swab.h>
 
 /*
@@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
 	__fswab64(x))
 #endif
 
+static __always_inline unsigned long __swab(const unsigned long y)
+{
+#if BITS_PER_LONG == 64
+	return __swab64(y);
+#else /* BITS_PER_LONG == 32 */
+	return __swab32(y);
+#endif
+}
+
 /**
  * __swahw32 - return a word-swapped 32-bit value
  * @x: value to wordswap
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index c912b5a..2c661a3 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -32,7 +32,18 @@
 #define SWITCHTEC_IOCTL_PART_VENDOR5	10
 #define SWITCHTEC_IOCTL_PART_VENDOR6	11
 #define SWITCHTEC_IOCTL_PART_VENDOR7	12
-#define SWITCHTEC_IOCTL_NUM_PARTITIONS	13
+#define SWITCHTEC_IOCTL_PART_BL2_0	13
+#define SWITCHTEC_IOCTL_PART_BL2_1	14
+#define SWITCHTEC_IOCTL_PART_MAP_0	15
+#define SWITCHTEC_IOCTL_PART_MAP_1	16
+#define SWITCHTEC_IOCTL_PART_KEY_0	17
+#define SWITCHTEC_IOCTL_PART_KEY_1	18
+
+#define SWITCHTEC_NUM_PARTITIONS_GEN3	13
+#define SWITCHTEC_NUM_PARTITIONS_GEN4	19
+
+/* obsolete: for compatibility with old userspace software */
+#define SWITCHTEC_IOCTL_NUM_PARTITIONS	SWITCHTEC_NUM_PARTITIONS_GEN3
 
 struct switchtec_ioctl_flash_info {
 	__u64 flash_length;
@@ -98,7 +109,9 @@ struct switchtec_ioctl_event_summary {
 #define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT		27
 #define SWITCHTEC_IOCTL_EVENT_LINK_STATE		28
 #define SWITCHTEC_IOCTL_EVENT_GFMS			29
-#define SWITCHTEC_IOCTL_MAX_EVENTS			30
+#define SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY	30
+#define SWITCHTEC_IOCTL_EVENT_UEC			31
+#define SWITCHTEC_IOCTL_MAX_EVENTS			32
 
 #define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
 #define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 87aa2a6..27c1ed2 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -195,7 +195,7 @@ enum
 	VM_MIN_UNMAPPED=32,	/* Set min percent of unmapped pages */
 	VM_PANIC_ON_OOM=33,	/* panic at out-of-memory */
 	VM_VDSO_ENABLED=34,	/* map VDSO into new processes? */
-	VM_MIN_SLAB=35,		 /* Percent pages ignored by zone reclaim */
+	VM_MIN_SLAB=35,		 /* Percent pages ignored by node reclaim */
 };
 
 
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 5e8ca16..ccbd087 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
  */
 
 
-#define TASKSTATS_VERSION	9
+#define TASKSTATS_VERSION	10
 #define TS_COMM_LEN		32	/* should be >= TASK_COMM_LEN
 					 * in linux/sched.h */
 
@@ -112,6 +112,7 @@ struct taskstats {
 	__u32	ac_gid;			/* Group ID */
 	__u32	ac_pid;			/* Process ID */
 	__u32	ac_ppid;		/* Parent process ID */
+	/* __u32 range means times from 1970 to 2106 */
 	__u32	ac_btime;		/* Begin time [sec since 1970] */
 	__u64	ac_etime __attribute__((aligned(8)));
 					/* Elapsed time [usec] */
@@ -168,6 +169,9 @@ struct taskstats {
 	/* Delay waiting for thrashing page */
 	__u64	thrashing_count;
 	__u64	thrashing_delay_total;
+
+	/* v10: 64-bit btime to avoid overflow */
+	__u64	ac_btime64;		/* 64-bit begin time */
 };
 
 
diff --git a/include/uapi/linux/time_types.h b/include/uapi/linux/time_types.h
index 074e391..bcc0002 100644
--- a/include/uapi/linux/time_types.h
+++ b/include/uapi/linux/time_types.h
@@ -33,6 +33,11 @@ struct __kernel_old_timespec {
 	long			tv_nsec;	/* nanoseconds */
 };
 
+struct __kernel_old_itimerval {
+	struct __kernel_old_timeval it_interval;/* timer interval */
+	struct __kernel_old_timeval it_value;	/* current value */
+};
+
 struct __kernel_sock_timeval {
 	__s64 tv_sec;
 	__s64 tv_usec;
diff --git a/include/uapi/linux/timex.h b/include/uapi/linux/timex.h
index 9f517f9..bd627c36 100644
--- a/include/uapi/linux/timex.h
+++ b/include/uapi/linux/timex.h
@@ -57,6 +57,7 @@
 
 #define NTP_API		4	/* NTP API version */
 
+#ifndef __KERNEL__
 /*
  * syscall interface - used (mainly by NTP daemon)
  * to discipline kernel clock oscillator
@@ -91,6 +92,7 @@ struct timex {
 	int  :32; int  :32; int  :32; int  :32;
 	int  :32; int  :32; int  :32;
 };
+#endif
 
 struct __kernel_timex_timeval {
 	__kernel_time64_t       tv_sec;
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 04481c7..5f9357d 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -912,6 +912,25 @@ struct v4l2_jpegcompression {
 /*
  *	M E M O R Y - M A P P I N G   B U F F E R S
  */
+
+#ifdef __KERNEL__
+/*
+ * This corresponds to the user space version of timeval
+ * for 64-bit time_t. sparc64 is different from everyone
+ * else, using the microseconds in the wrong half of the
+ * second 64-bit word.
+ */
+struct __kernel_v4l2_timeval {
+	long long	tv_sec;
+#if defined(__sparc__) && defined(__arch64__)
+	int		tv_usec;
+	int		__pad;
+#else
+	long long	tv_usec;
+#endif
+};
+#endif
+
 struct v4l2_requestbuffers {
 	__u32			count;
 	__u32			type;		/* enum v4l2_buf_type */
@@ -997,7 +1016,11 @@ struct v4l2_buffer {
 	__u32			bytesused;
 	__u32			flags;
 	__u32			field;
+#ifdef __KERNEL__
+	struct __kernel_v4l2_timeval timestamp;
+#else
 	struct timeval		timestamp;
+#endif
 	struct v4l2_timecode	timecode;
 	__u32			sequence;
 
@@ -1017,6 +1040,7 @@ struct v4l2_buffer {
 	};
 };
 
+#ifndef __KERNEL__
 /**
  * v4l2_timeval_to_ns - Convert timeval to nanoseconds
  * @ts:		pointer to the timeval variable to be converted
@@ -1028,6 +1052,7 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
 {
 	return (__u64)tv->tv_sec * 1000000000ULL + tv->tv_usec * 1000;
 }
+#endif
 
 /*  Flags for 'flags' field */
 /* Buffer is mapped (flag) */
@@ -2339,7 +2364,11 @@ struct v4l2_event {
 	} u;
 	__u32				pending;
 	__u32				sequence;
+#ifdef __KERNEL__
+	struct __kernel_timespec	timestamp;
+#else
 	struct timespec			timestamp;
+#endif
 	__u32				id;
 	__u32				reserved[8];
 };
diff --git a/include/uapi/misc/pvpanic.h b/include/uapi/misc/pvpanic.h
new file mode 100644
index 0000000..54b7485
--- /dev/null
+++ b/include/uapi/misc/pvpanic.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __PVPANIC_H__
+#define __PVPANIC_H__
+
+#define PVPANIC_PANICKED	(1 << 0)
+#define PVPANIC_CRASH_LOADED	(1 << 1)
+
+#endif /* __PVPANIC_H__ */
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 64f0e3a..d4ddbe4 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -56,6 +56,7 @@ enum uverbs_default_objects {
 	UVERBS_OBJECT_FLOW_ACTION,
 	UVERBS_OBJECT_DM,
 	UVERBS_OBJECT_COUNTERS,
+	UVERBS_OBJECT_ASYNC_EVENT,
 };
 
 enum {
@@ -67,6 +68,7 @@ enum uverbs_methods_device {
 	UVERBS_METHOD_INVOKE_WRITE,
 	UVERBS_METHOD_INFO_HANDLES,
 	UVERBS_METHOD_QUERY_PORT,
+	UVERBS_METHOD_GET_CONTEXT,
 };
 
 enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -80,6 +82,11 @@ enum uverbs_attrs_query_port_cmd_attr_ids {
 	UVERBS_ATTR_QUERY_PORT_RESP,
 };
 
+enum uverbs_attrs_get_context_attr_ids {
+	UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+	UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+};
+
 enum uverbs_attrs_create_cq_cmd_attr_ids {
 	UVERBS_ATTR_CREATE_CQ_HANDLE,
 	UVERBS_ATTR_CREATE_CQ_CQE,
@@ -241,4 +248,12 @@ enum uverbs_attrs_flow_destroy_ids {
 	UVERBS_ATTR_DESTROY_FLOW_HANDLE,
 };
 
+enum uverbs_method_async_event {
+	UVERBS_METHOD_ASYNC_EVENT_ALLOC,
+};
+
+enum uverbs_attrs_async_event_create {
+	UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
+};
+
 #endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 9019b2d..a640bb8 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -41,6 +41,13 @@
 #define RDMA_UAPI_PTR(_type, _name)	__aligned_u64 _name
 #endif
 
+#define IB_UVERBS_ACCESS_OPTIONAL_FIRST (1 << 20)
+#define IB_UVERBS_ACCESS_OPTIONAL_LAST (1 << 29)
+
+enum ib_uverbs_core_support {
+	IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS = 1 << 0,
+};
+
 enum ib_uverbs_access_flags {
 	IB_UVERBS_ACCESS_LOCAL_WRITE = 1 << 0,
 	IB_UVERBS_ACCESS_REMOTE_WRITE = 1 << 1,
@@ -50,6 +57,11 @@ enum ib_uverbs_access_flags {
 	IB_UVERBS_ACCESS_ZERO_BASED = 1 << 5,
 	IB_UVERBS_ACCESS_ON_DEMAND = 1 << 6,
 	IB_UVERBS_ACCESS_HUGETLB = 1 << 7,
+
+	IB_UVERBS_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_OPTIONAL_FIRST,
+	IB_UVERBS_ACCESS_OPTIONAL_RANGE =
+		((IB_UVERBS_ACCESS_OPTIONAL_LAST << 1) - 1) &
+		~(IB_UVERBS_ACCESS_OPTIONAL_FIRST - 1)
 };
 
 enum ib_uverbs_query_port_cap_flags {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 20d8830..afe7da6 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -115,6 +115,22 @@ enum mlx5_ib_devx_obj_methods {
 	MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
 };
 
+enum mlx5_ib_var_alloc_attrs {
+	MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+	MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+	MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+	MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+};
+
+enum mlx5_ib_var_obj_destroy_attrs {
+	MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_var_obj_methods {
+	MLX5_IB_METHOD_VAR_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+	MLX5_IB_METHOD_VAR_OBJ_DESTROY,
+};
+
 enum mlx5_ib_devx_umem_reg_attrs {
 	MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
 	MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
@@ -156,6 +172,7 @@ enum mlx5_ib_objects {
 	MLX5_IB_OBJECT_FLOW_MATCHER,
 	MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
 	MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+	MLX5_IB_OBJECT_VAR,
 };
 
 enum mlx5_ib_flow_matcher_create_attrs {
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index c022ee2..a0b83c9 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -48,6 +48,18 @@ struct qedr_alloc_ucontext_req {
 	__u32 reserved;
 };
 
+#define QEDR_LDPM_MAX_SIZE	(8192)
+#define QEDR_EDPM_TRANS_SIZE	(64)
+
+enum qedr_rdma_dpm_type {
+	QEDR_DPM_TYPE_NONE		= 0,
+	QEDR_DPM_TYPE_ROCE_ENHANCED	= 1 << 0,
+	QEDR_DPM_TYPE_ROCE_LEGACY	= 1 << 1,
+	QEDR_DPM_TYPE_IWARP_LEGACY	= 1 << 2,
+	QEDR_DPM_TYPE_RESERVED		= 1 << 3,
+	QEDR_DPM_SIZES_SET		= 1 << 4,
+};
+
 struct qedr_alloc_ucontext_resp {
 	__aligned_u64 db_pa;
 	__u32 db_size;
@@ -59,10 +71,12 @@ struct qedr_alloc_ucontext_resp {
 	__u32 sges_per_recv_wr;
 	__u32 sges_per_srq_wr;
 	__u32 max_cqes;
-	__u8 dpm_enabled;
+	__u8 dpm_flags;
 	__u8 wids_enabled;
 	__u16 wid_count;
-	__u32 reserved;
+	__u16 ldpm_limit_size;
+	__u8 edpm_trans_size;
+	__u8 reserved;
 };
 
 struct qedr_alloc_pd_ureq {
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 9988db6..d55f217 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -68,14 +68,13 @@ struct utp_upiu_cmd {
  * @header:UPIU header structure DW-0 to DW-2
  * @sc: fields structure for scsi command DW-3 to DW-7
  * @qr: fields structure for query request DW-3 to DW-7
+ * @uc: use utp_upiu_query to host the 4 dwords of uic command
  */
 struct utp_upiu_req {
 	struct utp_upiu_header header;
 	union {
 		struct utp_upiu_cmd		sc;
 		struct utp_upiu_query		qr;
-		struct utp_upiu_query		tr;
-		/* use utp_upiu_query to host the 4 dwords of uic command */
 		struct utp_upiu_query		uc;
 	};
 };
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 30ebb2a..535a722 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -564,13 +564,13 @@ typedef char __pad_after_uframe[sizeof(__u64) - sizeof(snd_pcm_uframes_t)];
 #endif
 
 struct __snd_pcm_mmap_status64 {
-	__s32 state;			/* RO: state - SNDRV_PCM_STATE_XXXX */
+	snd_pcm_state_t state;		/* RO: state - SNDRV_PCM_STATE_XXXX */
 	__u32 pad1;			/* Needed for 64 bit alignment */
 	__pad_before_uframe __pad1;
 	snd_pcm_uframes_t hw_ptr;	/* RO: hw ptr (0...boundary-1) */
 	__pad_after_uframe __pad2;
 	struct __snd_timespec64 tstamp;	/* Timestamp */
-	__s32 suspended_state;		/* RO: suspended stream state */
+	snd_pcm_state_t suspended_state;/* RO: suspended stream state */
 	__u32 pad3;			/* Needed for 64 bit alignment */
 	struct __snd_timespec64 audio_tstamp; /* sample counter or wall clock */
 };
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index cba57a6..b6d8b87 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -17,6 +17,9 @@ enum {
 	MIPI_DSI_H_SYNC_START				= 0x21,
 	MIPI_DSI_H_SYNC_END				= 0x31,
 
+	MIPI_DSI_COMPRESSION_MODE			= 0x07,
+	MIPI_DSI_END_OF_TRANSMISSION			= 0x08,
+
 	MIPI_DSI_COLOR_MODE_OFF				= 0x02,
 	MIPI_DSI_COLOR_MODE_ON				= 0x12,
 	MIPI_DSI_SHUTDOWN_PERIPHERAL			= 0x22,
@@ -34,19 +37,18 @@ enum {
 	MIPI_DSI_DCS_SHORT_WRITE_PARAM			= 0x15,
 
 	MIPI_DSI_DCS_READ				= 0x06,
-
-	MIPI_DSI_DCS_COMPRESSION_MODE                   = 0x07,
-	MIPI_DSI_PPS_LONG_WRITE                         = 0x0A,
+	MIPI_DSI_EXECUTE_QUEUE				= 0x16,
 
 	MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE		= 0x37,
 
-	MIPI_DSI_END_OF_TRANSMISSION			= 0x08,
-
 	MIPI_DSI_NULL_PACKET				= 0x09,
 	MIPI_DSI_BLANKING_PACKET			= 0x19,
 	MIPI_DSI_GENERIC_LONG_WRITE			= 0x29,
 	MIPI_DSI_DCS_LONG_WRITE				= 0x39,
 
+	MIPI_DSI_PICTURE_PARAMETER_SET			= 0x0a,
+	MIPI_DSI_COMPRESSED_PIXEL_STREAM		= 0x0b,
+
 	MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20	= 0x0c,
 	MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24		= 0x1c,
 	MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16		= 0x2c,
@@ -77,7 +79,9 @@ enum {
 enum {
 	MIPI_DCS_NOP			= 0x00,
 	MIPI_DCS_SOFT_RESET		= 0x01,
+	MIPI_DCS_GET_COMPRESSION_MODE	= 0x03,
 	MIPI_DCS_GET_DISPLAY_ID		= 0x04,
+	MIPI_DCS_GET_ERROR_COUNT_ON_DSI	= 0x05,
 	MIPI_DCS_GET_RED_CHANNEL	= 0x06,
 	MIPI_DCS_GET_GREEN_CHANNEL	= 0x07,
 	MIPI_DCS_GET_BLUE_CHANNEL	= 0x08,
@@ -92,6 +96,8 @@ enum {
 	MIPI_DCS_EXIT_SLEEP_MODE	= 0x11,
 	MIPI_DCS_ENTER_PARTIAL_MODE	= 0x12,
 	MIPI_DCS_ENTER_NORMAL_MODE	= 0x13,
+	MIPI_DCS_GET_IMAGE_CHECKSUM_RGB	= 0x14,
+	MIPI_DCS_GET_IMAGE_CHECKSUM_CT	= 0x15,
 	MIPI_DCS_EXIT_INVERT_MODE	= 0x20,
 	MIPI_DCS_ENTER_INVERT_MODE	= 0x21,
 	MIPI_DCS_SET_GAMMA_CURVE	= 0x26,
@@ -102,7 +108,8 @@ enum {
 	MIPI_DCS_WRITE_MEMORY_START	= 0x2C,
 	MIPI_DCS_WRITE_LUT		= 0x2D,
 	MIPI_DCS_READ_MEMORY_START	= 0x2E,
-	MIPI_DCS_SET_PARTIAL_AREA	= 0x30,
+	MIPI_DCS_SET_PARTIAL_ROWS	= 0x30,		/* MIPI DCS 1.02 - MIPI_DCS_SET_PARTIAL_AREA before that */
+	MIPI_DCS_SET_PARTIAL_COLUMNS	= 0x31,
 	MIPI_DCS_SET_SCROLL_AREA	= 0x33,
 	MIPI_DCS_SET_TEAR_OFF		= 0x34,
 	MIPI_DCS_SET_TEAR_ON		= 0x35,
@@ -112,7 +119,10 @@ enum {
 	MIPI_DCS_ENTER_IDLE_MODE	= 0x39,
 	MIPI_DCS_SET_PIXEL_FORMAT	= 0x3A,
 	MIPI_DCS_WRITE_MEMORY_CONTINUE	= 0x3C,
+	MIPI_DCS_SET_3D_CONTROL		= 0x3D,
 	MIPI_DCS_READ_MEMORY_CONTINUE	= 0x3E,
+	MIPI_DCS_GET_3D_CONTROL		= 0x3F,
+	MIPI_DCS_SET_VSYNC_TIMING	= 0x40,
 	MIPI_DCS_SET_TEAR_SCANLINE	= 0x44,
 	MIPI_DCS_GET_SCANLINE		= 0x45,
 	MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 0x51,		/* MIPI DCS 1.3 */
@@ -124,7 +134,9 @@ enum {
 	MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 0x5E,	/* MIPI DCS 1.3 */
 	MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 0x5F,	/* MIPI DCS 1.3 */
 	MIPI_DCS_READ_DDB_START		= 0xA1,
+	MIPI_DCS_READ_PPS_START		= 0xA2,
 	MIPI_DCS_READ_DDB_CONTINUE	= 0xA8,
+	MIPI_DCS_READ_PPS_CONTINUE	= 0xA9,
 };
 
 /* MIPI DCS pixel formats */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 24228a1..89a8895 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -76,6 +76,7 @@ struct xenbus_device {
 	enum xenbus_state state;
 	struct completion down;
 	struct work_struct work;
+	spinlock_t reclaim_lock;
 };
 
 static inline struct xenbus_device *to_xenbus_device(struct device *dev)
@@ -105,6 +106,7 @@ struct xenbus_driver {
 	struct device_driver driver;
 	int (*read_otherend_details)(struct xenbus_device *dev);
 	int (*is_ready)(struct xenbus_device *dev);
+	void (*reclaim_memory)(struct xenbus_device *dev);
 };
 
 static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
diff --git a/init/Kconfig b/init/Kconfig
index 24b23d8..cfee56c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1224,6 +1224,20 @@
 
 endif
 
+config BOOT_CONFIG
+	bool "Boot config support"
+	depends on BLK_DEV_INITRD
+	select LIBXBC
+	default y
+	help
+	  Extra boot config allows system admin to pass a config file as
+	  complemental extension of kernel cmdline when booting.
+	  The boot config file must be attached at the end of initramfs
+	  with checksum and size.
+	  See <file:Documentation/admin-guide/bootconfig.rst> for details.
+
+	  If unsure, say Y.
+
 choice
 	prompt "Compiler optimization level"
 	default CC_OPTIMIZE_FOR_PERFORMANCE
diff --git a/init/main.c b/init/main.c
index da1bc0b..cc0ee48 100644
--- a/init/main.c
+++ b/init/main.c
@@ -28,6 +28,7 @@
 #include <linux/initrd.h>
 #include <linux/memblock.h>
 #include <linux/acpi.h>
+#include <linux/bootconfig.h>
 #include <linux/console.h>
 #include <linux/nmi.h>
 #include <linux/percpu.h>
@@ -63,7 +64,7 @@
 #include <linux/lockdep.h>
 #include <linux/kmemleak.h>
 #include <linux/pid_namespace.h>
-#include <linux/device.h>
+#include <linux/device/driver.h>
 #include <linux/kthread.h>
 #include <linux/sched.h>
 #include <linux/sched/init.h>
@@ -136,8 +137,10 @@ char __initdata boot_command_line[COMMAND_LINE_SIZE];
 char *saved_command_line;
 /* Command line for parameter parsing */
 static char *static_command_line;
-/* Command line for per-initcall parameter parsing */
-static char *initcall_command_line;
+/* Untouched extra command line */
+static char *extra_command_line;
+/* Extra init arguments */
+static char *extra_init_args;
 
 static char *execute_command;
 static char *ramdisk_execute_command;
@@ -245,9 +248,158 @@ static int __init loglevel(char *str)
 
 early_param("loglevel", loglevel);
 
+#ifdef CONFIG_BOOT_CONFIG
+
+char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
+
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+static int __init xbc_snprint_cmdline(char *buf, size_t size,
+				      struct xbc_node *root)
+{
+	struct xbc_node *knode, *vnode;
+	char *end = buf + size;
+	char c = '\"';
+	const char *val;
+	int ret;
+
+	xbc_node_for_each_key_value(root, knode, val) {
+		ret = xbc_node_compose_key_after(root, knode,
+					xbc_namebuf, XBC_KEYLEN_MAX);
+		if (ret < 0)
+			return ret;
+
+		vnode = xbc_node_get_child(knode);
+		ret = snprintf(buf, rest(buf, end), "%s%c", xbc_namebuf,
+				vnode ? '=' : ' ');
+		if (ret < 0)
+			return ret;
+		buf += ret;
+		if (!vnode)
+			continue;
+
+		c = '\"';
+		xbc_array_for_each_value(vnode, val) {
+			ret = snprintf(buf, rest(buf, end), "%c%s", c, val);
+			if (ret < 0)
+				return ret;
+			buf += ret;
+			c = ',';
+		}
+		if (rest(buf, end) > 2)
+			strcpy(buf, "\" ");
+		buf += 2;
+	}
+
+	return buf - (end - size);
+}
+#undef rest
+
+/* Make an extra command line under given key word */
+static char * __init xbc_make_cmdline(const char *key)
+{
+	struct xbc_node *root;
+	char *new_cmdline;
+	int ret, len = 0;
+
+	root = xbc_find_node(key);
+	if (!root)
+		return NULL;
+
+	/* Count required buffer size */
+	len = xbc_snprint_cmdline(NULL, 0, root);
+	if (len <= 0)
+		return NULL;
+
+	new_cmdline = memblock_alloc(len + 1, SMP_CACHE_BYTES);
+	if (!new_cmdline) {
+		pr_err("Failed to allocate memory for extra kernel cmdline.\n");
+		return NULL;
+	}
+
+	ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
+	if (ret < 0 || ret > len) {
+		pr_err("Failed to print extra kernel cmdline.\n");
+		return NULL;
+	}
+
+	return new_cmdline;
+}
+
+u32 boot_config_checksum(unsigned char *p, u32 size)
+{
+	u32 ret = 0;
+
+	while (size--)
+		ret += *p++;
+
+	return ret;
+}
+
+static void __init setup_boot_config(const char *cmdline)
+{
+	u32 size, csum;
+	char *data, *copy;
+	const char *p;
+	u32 *hdr;
+	int ret;
+
+	p = strstr(cmdline, "bootconfig");
+	if (!p || (p != cmdline && !isspace(*(p-1))) ||
+	    (p[10] && !isspace(p[10])))
+		return;
+
+	if (!initrd_end)
+		goto not_found;
+
+	hdr = (u32 *)(initrd_end - 8);
+	size = hdr[0];
+	csum = hdr[1];
+
+	if (size >= XBC_DATA_MAX) {
+		pr_err("bootconfig size %d greater than max size %d\n",
+			size, XBC_DATA_MAX);
+		return;
+	}
+
+	data = ((void *)hdr) - size;
+	if ((unsigned long)data < initrd_start)
+		goto not_found;
+
+	if (boot_config_checksum((unsigned char *)data, size) != csum) {
+		pr_err("bootconfig checksum failed\n");
+		return;
+	}
+
+	copy = memblock_alloc(size + 1, SMP_CACHE_BYTES);
+	if (!copy) {
+		pr_err("Failed to allocate memory for bootconfig\n");
+		return;
+	}
+
+	memcpy(copy, data, size);
+	copy[size] = '\0';
+
+	ret = xbc_init(copy);
+	if (ret < 0)
+		pr_err("Failed to parse bootconfig\n");
+	else {
+		pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret);
+		/* keys starting with "kernel." are passed via cmdline */
+		extra_command_line = xbc_make_cmdline("kernel");
+		/* Also, "init." keys are init arguments */
+		extra_init_args = xbc_make_cmdline("init");
+	}
+	return;
+not_found:
+	pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+}
+#else
+#define setup_boot_config(cmdline)	do { } while (0)
+#endif
+
 /* Change NUL term back to "=", to make "param" the whole string. */
-static int __init repair_env_string(char *param, char *val,
-				    const char *unused, void *arg)
+static void __init repair_env_string(char *param, char *val)
 {
 	if (val) {
 		/* param=val or param="val"? */
@@ -256,11 +408,9 @@ static int __init repair_env_string(char *param, char *val,
 		else if (val == param+strlen(param)+2) {
 			val[-2] = '=';
 			memmove(val-1, val, strlen(val)+1);
-			val--;
 		} else
 			BUG();
 	}
-	return 0;
 }
 
 /* Anything after -- gets handed straight to init. */
@@ -272,7 +422,7 @@ static int __init set_init_arg(char *param, char *val,
 	if (panic_later)
 		return 0;
 
-	repair_env_string(param, val, unused, NULL);
+	repair_env_string(param, val);
 
 	for (i = 0; argv_init[i]; i++) {
 		if (i == MAX_INIT_ARGS) {
@@ -292,14 +442,16 @@ static int __init set_init_arg(char *param, char *val,
 static int __init unknown_bootoption(char *param, char *val,
 				     const char *unused, void *arg)
 {
-	repair_env_string(param, val, unused, NULL);
+	size_t len = strlen(param);
+
+	repair_env_string(param, val);
 
 	/* Handle obsolete-style parameters */
 	if (obsolete_checksetup(param))
 		return 0;
 
 	/* Unused module parameter. */
-	if (strchr(param, '.') && (!val || strchr(param, '.') < val))
+	if (strnchr(param, len, '.'))
 		return 0;
 
 	if (panic_later)
@@ -313,7 +465,7 @@ static int __init unknown_bootoption(char *param, char *val,
 				panic_later = "env";
 				panic_param = param;
 			}
-			if (!strncmp(param, envp_init[i], val - param))
+			if (!strncmp(param, envp_init[i], len+1))
 				break;
 		}
 		envp_init[i] = param;
@@ -374,22 +526,50 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
  */
 static void __init setup_command_line(char *command_line)
 {
-	size_t len = strlen(boot_command_line) + 1;
+	size_t len, xlen = 0, ilen = 0;
 
-	saved_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+	if (extra_command_line)
+		xlen = strlen(extra_command_line);
+	if (extra_init_args)
+		ilen = strlen(extra_init_args) + 4; /* for " -- " */
+
+	len = xlen + strlen(boot_command_line) + 1;
+
+	saved_command_line = memblock_alloc(len + ilen, SMP_CACHE_BYTES);
 	if (!saved_command_line)
-		panic("%s: Failed to allocate %zu bytes\n", __func__, len);
-
-	initcall_command_line =	memblock_alloc(len, SMP_CACHE_BYTES);
-	if (!initcall_command_line)
-		panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+		panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
 
 	static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
 	if (!static_command_line)
 		panic("%s: Failed to allocate %zu bytes\n", __func__, len);
 
-	strcpy(saved_command_line, boot_command_line);
-	strcpy(static_command_line, command_line);
+	if (xlen) {
+		/*
+		 * We have to put extra_command_line before boot command
+		 * lines because there could be dashes (separator of init
+		 * command line) in the command lines.
+		 */
+		strcpy(saved_command_line, extra_command_line);
+		strcpy(static_command_line, extra_command_line);
+	}
+	strcpy(saved_command_line + xlen, boot_command_line);
+	strcpy(static_command_line + xlen, command_line);
+
+	if (ilen) {
+		/*
+		 * Append supplemental init boot args to saved_command_line
+		 * so that user can check what command line options passed
+		 * to init.
+		 */
+		len = strlen(saved_command_line);
+		if (!strstr(boot_command_line, " -- ")) {
+			strcpy(saved_command_line + len, " -- ");
+			len += 4;
+		} else
+			saved_command_line[len++] = ' ';
+
+		strcpy(saved_command_line + len, extra_init_args);
+	}
 }
 
 /*
@@ -596,6 +776,7 @@ asmlinkage __visible void __init start_kernel(void)
 	pr_notice("%s", linux_banner);
 	early_security_init();
 	setup_arch(&command_line);
+	setup_boot_config(command_line);
 	setup_command_line(command_line);
 	setup_nr_cpu_ids();
 	setup_per_cpu_areas();
@@ -605,7 +786,7 @@ asmlinkage __visible void __init start_kernel(void)
 	build_all_zonelists(NULL);
 	page_alloc_init();
 
-	pr_notice("Kernel command line: %s\n", boot_command_line);
+	pr_notice("Kernel command line: %s\n", saved_command_line);
 	/* parameters may set static keys */
 	jump_label_init();
 	parse_early_param();
@@ -616,6 +797,9 @@ asmlinkage __visible void __init start_kernel(void)
 	if (!IS_ERR_OR_NULL(after_dashes))
 		parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
 			   NULL, set_init_arg);
+	if (extra_init_args)
+		parse_args("Setting extra init args", extra_init_args,
+			   NULL, 0, -1, -1, NULL, set_init_arg);
 
 	/*
 	 * These use large bootmem allocations and must precede
@@ -991,16 +1175,21 @@ static const char *initcall_level_names[] __initdata = {
 	"late",
 };
 
-static void __init do_initcall_level(int level)
+static int __init ignore_unknown_bootoption(char *param, char *val,
+			       const char *unused, void *arg)
+{
+	return 0;
+}
+
+static void __init do_initcall_level(int level, char *command_line)
 {
 	initcall_entry_t *fn;
 
-	strcpy(initcall_command_line, saved_command_line);
 	parse_args(initcall_level_names[level],
-		   initcall_command_line, __start___param,
+		   command_line, __start___param,
 		   __stop___param - __start___param,
 		   level, level,
-		   NULL, &repair_env_string);
+		   NULL, ignore_unknown_bootoption);
 
 	trace_initcall_level(initcall_level_names[level]);
 	for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
@@ -1010,9 +1199,20 @@ static void __init do_initcall_level(int level)
 static void __init do_initcalls(void)
 {
 	int level;
+	size_t len = strlen(saved_command_line) + 1;
+	char *command_line;
 
-	for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
-		do_initcall_level(level);
+	command_line = kzalloc(len, GFP_KERNEL);
+	if (!command_line)
+		panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+	for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
+		/* Parser modifies command_line, restore it each time */
+		strcpy(command_line, saved_command_line);
+		do_initcall_level(level, command_line);
+	}
+
+	kfree(command_line);
 }
 
 /*
@@ -1043,8 +1243,16 @@ static void __init do_pre_smp_initcalls(void)
 
 static int run_init_process(const char *init_filename)
 {
+	const char *const *p;
+
 	argv_init[0] = init_filename;
 	pr_info("Run %s as init process\n", init_filename);
+	pr_debug("  with arguments:\n");
+	for (p = argv_init; *p; p++)
+		pr_debug("    %s\n", *p);
+	pr_debug("  with environment:\n");
+	for (p = envp_init; *p; p++)
+		pr_debug("    %s\n", *p);
 	return do_execve(getname_kernel(init_filename),
 		(const char __user *const __user *)argv_init,
 		(const char __user *const __user *)envp_init);
@@ -1091,6 +1299,11 @@ static void mark_readonly(void)
 	} else
 		pr_info("Kernel memory protection disabled.\n");
 }
+#elif defined(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX)
+static inline void mark_readonly(void)
+{
+	pr_warn("Kernel memory protection not selected by kernel config.\n");
+}
 #else
 static inline void mark_readonly(void)
 {
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 3d920ff..49a05ba 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -63,6 +63,66 @@ struct posix_msg_tree_node {
 	int			priority;
 };
 
+/*
+ * Locking:
+ *
+ * Accesses to a message queue are synchronized by acquiring info->lock.
+ *
+ * There are two notable exceptions:
+ * - The actual wakeup of a sleeping task is performed using the wake_q
+ *   framework. info->lock is already released when wake_up_q is called.
+ * - The exit codepaths after sleeping check ext_wait_queue->state without
+ *   any locks. If it is STATE_READY, then the syscall is completed without
+ *   acquiring info->lock.
+ *
+ * MQ_BARRIER:
+ * To achieve proper release/acquire memory barrier pairing, the state is set to
+ * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
+ * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
+ *
+ * This prevents the following races:
+ *
+ * 1) With the simple wake_q_add(), the task could be gone already before
+ *    the increase of the reference happens
+ * Thread A
+ *				Thread B
+ * WRITE_ONCE(wait.state, STATE_NONE);
+ * schedule_hrtimeout()
+ *				wake_q_add(A)
+ *				if (cmpxchg()) // success
+ *				   ->state = STATE_READY (reordered)
+ * <timeout returns>
+ * if (wait.state == STATE_READY) return;
+ * sysret to user space
+ * sys_exit()
+ *				get_task_struct() // UaF
+ *
+ * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
+ * the smp_store_release() that does ->state = STATE_READY.
+ *
+ * 2) Without proper _release/_acquire barriers, the woken up task
+ *    could read stale data
+ *
+ * Thread A
+ *				Thread B
+ * do_mq_timedreceive
+ * WRITE_ONCE(wait.state, STATE_NONE);
+ * schedule_hrtimeout()
+ *				state = STATE_READY;
+ * <timeout returns>
+ * if (wait.state == STATE_READY) return;
+ * msg_ptr = wait.msg;		// Access to stale data!
+ *				receiver->msg = message; (reordered)
+ *
+ * Solution: use _release and _acquire barriers.
+ *
+ * 3) There is intentionally no barrier when setting current->state
+ *    to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
+ *    release memory barrier, and the wakeup is triggered when holding
+ *    info->lock, i.e. spin_lock(&info->lock) provided a pairing
+ *    acquire memory barrier.
+ */
+
 struct ext_wait_queue {		/* queue of sleeping tasks */
 	struct task_struct *task;
 	struct list_head list;
@@ -646,18 +706,23 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr,
 	wq_add(info, sr, ewp);
 
 	for (;;) {
+		/* memory barrier not required, we hold info->lock */
 		__set_current_state(TASK_INTERRUPTIBLE);
 
 		spin_unlock(&info->lock);
 		time = schedule_hrtimeout_range_clock(timeout, 0,
 			HRTIMER_MODE_ABS, CLOCK_REALTIME);
 
-		if (ewp->state == STATE_READY) {
+		if (READ_ONCE(ewp->state) == STATE_READY) {
+			/* see MQ_BARRIER for purpose/pairing */
+			smp_acquire__after_ctrl_dep();
 			retval = 0;
 			goto out;
 		}
 		spin_lock(&info->lock);
-		if (ewp->state == STATE_READY) {
+
+		/* we hold info->lock, so no memory barrier required */
+		if (READ_ONCE(ewp->state) == STATE_READY) {
 			retval = 0;
 			goto out_unlock;
 		}
@@ -918,6 +983,18 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
  * The same algorithm is used for senders.
  */
 
+static inline void __pipelined_op(struct wake_q_head *wake_q,
+				  struct mqueue_inode_info *info,
+				  struct ext_wait_queue *this)
+{
+	list_del(&this->list);
+	get_task_struct(this->task);
+
+	/* see MQ_BARRIER for purpose/pairing */
+	smp_store_release(&this->state, STATE_READY);
+	wake_q_add_safe(wake_q, this->task);
+}
+
 /* pipelined_send() - send a message directly to the task waiting in
  * sys_mq_timedreceive() (without inserting message into a queue).
  */
@@ -927,17 +1004,7 @@ static inline void pipelined_send(struct wake_q_head *wake_q,
 				  struct ext_wait_queue *receiver)
 {
 	receiver->msg = message;
-	list_del(&receiver->list);
-	wake_q_add(wake_q, receiver->task);
-	/*
-	 * Rely on the implicit cmpxchg barrier from wake_q_add such
-	 * that we can ensure that updating receiver->state is the last
-	 * write operation: As once set, the receiver can continue,
-	 * and if we don't have the reference count from the wake_q,
-	 * yet, at that point we can later have a use-after-free
-	 * condition and bogus wakeup.
-	 */
-	receiver->state = STATE_READY;
+	__pipelined_op(wake_q, info, receiver);
 }
 
 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
@@ -955,9 +1022,7 @@ static inline void pipelined_receive(struct wake_q_head *wake_q,
 	if (msg_insert(sender->msg, info))
 		return;
 
-	list_del(&sender->list);
-	wake_q_add(wake_q, sender->task);
-	sender->state = STATE_READY;
+	__pipelined_op(wake_q, info, sender);
 }
 
 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
@@ -1044,7 +1109,9 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
 		} else {
 			wait.task = current;
 			wait.msg = (void *) msg_ptr;
-			wait.state = STATE_NONE;
+
+			/* memory barrier not required, we hold info->lock */
+			WRITE_ONCE(wait.state, STATE_NONE);
 			ret = wq_sleep(info, SEND, timeout, &wait);
 			/*
 			 * wq_sleep must be called with info->lock held, and
@@ -1147,7 +1214,9 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
 			ret = -EAGAIN;
 		} else {
 			wait.task = current;
-			wait.state = STATE_NONE;
+
+			/* memory barrier not required, we hold info->lock */
+			WRITE_ONCE(wait.state, STATE_NONE);
 			ret = wq_sleep(info, RECV, timeout, &wait);
 			msg_ptr = wait.msg;
 		}
diff --git a/ipc/msg.c b/ipc/msg.c
index 8dec945..caca673 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -61,6 +61,16 @@ struct msg_queue {
 	struct list_head q_senders;
 } __randomize_layout;
 
+/*
+ * MSG_BARRIER Locking:
+ *
+ * Similar to the optimization used in ipc/mqueue.c, one syscall return path
+ * does not acquire any locks when it sees that a message exists in
+ * msg_receiver.r_msg. Therefore r_msg is set using smp_store_release()
+ * and accessed using READ_ONCE()+smp_acquire__after_ctrl_dep(). In addition,
+ * wake_q_add_safe() is used. See ipc/mqueue.c for more details
+ */
+
 /* one msg_receiver structure for each sleeping receiver */
 struct msg_receiver {
 	struct list_head	r_list;
@@ -184,6 +194,10 @@ static inline void ss_add(struct msg_queue *msq,
 {
 	mss->tsk = current;
 	mss->msgsz = msgsz;
+	/*
+	 * No memory barrier required: we did ipc_lock_object(),
+	 * and the waker obtains that lock before calling wake_q_add().
+	 */
 	__set_current_state(TASK_INTERRUPTIBLE);
 	list_add_tail(&mss->list, &msq->q_senders);
 }
@@ -237,8 +251,11 @@ static void expunge_all(struct msg_queue *msq, int res,
 	struct msg_receiver *msr, *t;
 
 	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
-		wake_q_add(wake_q, msr->r_tsk);
-		WRITE_ONCE(msr->r_msg, ERR_PTR(res));
+		get_task_struct(msr->r_tsk);
+
+		/* see MSG_BARRIER for purpose/pairing */
+		smp_store_release(&msr->r_msg, ERR_PTR(res));
+		wake_q_add_safe(wake_q, msr->r_tsk);
 	}
 }
 
@@ -377,7 +394,7 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
  * NOTE: no locks must be held, the rwsem is taken inside this function.
  */
 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
-			struct msqid64_ds *msqid64)
+			struct ipc64_perm *perm, int msg_qbytes)
 {
 	struct kern_ipc_perm *ipcp;
 	struct msg_queue *msq;
@@ -387,7 +404,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
 	rcu_read_lock();
 
 	ipcp = ipcctl_obtain_check(ns, &msg_ids(ns), msqid, cmd,
-				      &msqid64->msg_perm, msqid64->msg_qbytes);
+				      perm, msg_qbytes);
 	if (IS_ERR(ipcp)) {
 		err = PTR_ERR(ipcp);
 		goto out_unlock1;
@@ -409,18 +426,18 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
 	{
 		DEFINE_WAKE_Q(wake_q);
 
-		if (msqid64->msg_qbytes > ns->msg_ctlmnb &&
+		if (msg_qbytes > ns->msg_ctlmnb &&
 		    !capable(CAP_SYS_RESOURCE)) {
 			err = -EPERM;
 			goto out_unlock1;
 		}
 
 		ipc_lock_object(&msq->q_perm);
-		err = ipc_update_perm(&msqid64->msg_perm, ipcp);
+		err = ipc_update_perm(perm, ipcp);
 		if (err)
 			goto out_unlock0;
 
-		msq->q_qbytes = msqid64->msg_qbytes;
+		msq->q_qbytes = msg_qbytes;
 
 		msq->q_ctime = ktime_get_real_seconds();
 		/*
@@ -601,9 +618,10 @@ static long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf, int ver
 	case IPC_SET:
 		if (copy_msqid_from_user(&msqid64, buf, version))
 			return -EFAULT;
-		/* fallthru */
+		return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm,
+				   msqid64.msg_qbytes);
 	case IPC_RMID:
-		return msgctl_down(ns, msqid, cmd, &msqid64);
+		return msgctl_down(ns, msqid, cmd, NULL, 0);
 	default:
 		return  -EINVAL;
 	}
@@ -735,9 +753,9 @@ static long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr, int versio
 	case IPC_SET:
 		if (copy_compat_msqid_from_user(&msqid64, uptr, version))
 			return -EFAULT;
-		/* fallthru */
+		return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes);
 	case IPC_RMID:
-		return msgctl_down(ns, msqid, cmd, &msqid64);
+		return msgctl_down(ns, msqid, cmd, NULL, 0);
 	default:
 		return -EINVAL;
 	}
@@ -798,13 +816,17 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
 			list_del(&msr->r_list);
 			if (msr->r_maxsize < msg->m_ts) {
 				wake_q_add(wake_q, msr->r_tsk);
-				WRITE_ONCE(msr->r_msg, ERR_PTR(-E2BIG));
+
+				/* See expunge_all regarding memory barrier */
+				smp_store_release(&msr->r_msg, ERR_PTR(-E2BIG));
 			} else {
 				ipc_update_pid(&msq->q_lrpid, task_pid(msr->r_tsk));
 				msq->q_rtime = ktime_get_real_seconds();
 
 				wake_q_add(wake_q, msr->r_tsk);
-				WRITE_ONCE(msr->r_msg, msg);
+
+				/* See expunge_all regarding memory barrier */
+				smp_store_release(&msr->r_msg, msg);
 				return 1;
 			}
 		}
@@ -1154,7 +1176,11 @@ static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, in
 			msr_d.r_maxsize = INT_MAX;
 		else
 			msr_d.r_maxsize = bufsz;
-		msr_d.r_msg = ERR_PTR(-EAGAIN);
+
+		/* memory barrier not require due to ipc_lock_object() */
+		WRITE_ONCE(msr_d.r_msg, ERR_PTR(-EAGAIN));
+
+		/* memory barrier not required, we own ipc_lock_object() */
 		__set_current_state(TASK_INTERRUPTIBLE);
 
 		ipc_unlock_object(&msq->q_perm);
@@ -1183,8 +1209,12 @@ static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, in
 		 * signal) it will either see the message and continue ...
 		 */
 		msg = READ_ONCE(msr_d.r_msg);
-		if (msg != ERR_PTR(-EAGAIN))
+		if (msg != ERR_PTR(-EAGAIN)) {
+			/* see MSG_BARRIER for purpose/pairing */
+			smp_acquire__after_ctrl_dep();
+
 			goto out_unlock1;
+		}
 
 		 /*
 		  * ... or see -EAGAIN, acquire the lock to check the message
@@ -1192,7 +1222,7 @@ static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, in
 		  */
 		ipc_lock_object(&msq->q_perm);
 
-		msg = msr_d.r_msg;
+		msg = READ_ONCE(msr_d.r_msg);
 		if (msg != ERR_PTR(-EAGAIN))
 			goto out_unlock0;
 
diff --git a/ipc/sem.c b/ipc/sem.c
index ec97a70..4f4303f 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -205,15 +205,38 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
  *
  * Memory ordering:
  * Most ordering is enforced by using spin_lock() and spin_unlock().
- * The special case is use_global_lock:
+ *
+ * Exceptions:
+ * 1) use_global_lock: (SEM_BARRIER_1)
  * Setting it from non-zero to 0 is a RELEASE, this is ensured by
- * using smp_store_release().
+ * using smp_store_release(): Immediately after setting it to 0,
+ * a simple op can start.
  * Testing if it is non-zero is an ACQUIRE, this is ensured by using
  * smp_load_acquire().
  * Setting it from 0 to non-zero must be ordered with regards to
  * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
  * is inside a spin_lock() and after a write from 0 to non-zero a
  * spin_lock()+spin_unlock() is done.
+ *
+ * 2) queue.status: (SEM_BARRIER_2)
+ * Initialization is done while holding sem_lock(), so no further barrier is
+ * required.
+ * Setting it to a result code is a RELEASE, this is ensured by both a
+ * smp_store_release() (for case a) and while holding sem_lock()
+ * (for case b).
+ * The AQUIRE when reading the result code without holding sem_lock() is
+ * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
+ * (case a above).
+ * Reading the result code while holding sem_lock() needs no further barriers,
+ * the locks inside sem_lock() enforce ordering (case b above)
+ *
+ * 3) current->state:
+ * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
+ * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
+ * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
+ * when holding sem_lock(), no further barriers are required.
+ *
+ * See also ipc/mqueue.c for more details on the covered races.
  */
 
 #define sc_semmsl	sem_ctls[0]
@@ -344,12 +367,8 @@ static void complexmode_tryleave(struct sem_array *sma)
 		return;
 	}
 	if (sma->use_global_lock == 1) {
-		/*
-		 * Immediately after setting use_global_lock to 0,
-		 * a simple op can start. Thus: all memory writes
-		 * performed by the current operation must be visible
-		 * before we set use_global_lock to 0.
-		 */
+
+		/* See SEM_BARRIER_1 for purpose/pairing */
 		smp_store_release(&sma->use_global_lock, 0);
 	} else {
 		sma->use_global_lock--;
@@ -400,7 +419,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 		 */
 		spin_lock(&sem->lock);
 
-		/* pairs with smp_store_release() */
+		/* see SEM_BARRIER_1 for purpose/pairing */
 		if (!smp_load_acquire(&sma->use_global_lock)) {
 			/* fast path successful! */
 			return sops->sem_num;
@@ -766,15 +785,12 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
 					     struct wake_q_head *wake_q)
 {
-	wake_q_add(wake_q, q->sleeper);
-	/*
-	 * Rely on the above implicit barrier, such that we can
-	 * ensure that we hold reference to the task before setting
-	 * q->status. Otherwise we could race with do_exit if the
-	 * task is awoken by an external event before calling
-	 * wake_up_process().
-	 */
-	WRITE_ONCE(q->status, error);
+	get_task_struct(q->sleeper);
+
+	/* see SEM_BARRIER_2 for purpuse/pairing */
+	smp_store_release(&q->status, error);
+
+	wake_q_add_safe(wake_q, q->sleeper);
 }
 
 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -2148,9 +2164,11 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
 	}
 
 	do {
+		/* memory ordering ensured by the lock in sem_lock() */
 		WRITE_ONCE(queue.status, -EINTR);
 		queue.sleeper = current;
 
+		/* memory ordering is ensured by the lock in sem_lock() */
 		__set_current_state(TASK_INTERRUPTIBLE);
 		sem_unlock(sma, locknum);
 		rcu_read_unlock();
@@ -2173,13 +2191,8 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
 		 */
 		error = READ_ONCE(queue.status);
 		if (error != -EINTR) {
-			/*
-			 * User space could assume that semop() is a memory
-			 * barrier: Without the mb(), the cpu could
-			 * speculatively read in userspace stale data that was
-			 * overwritten by the previous owner of the semaphore.
-			 */
-			smp_mb();
+			/* see SEM_BARRIER_2 for purpose/pairing */
+			smp_acquire__after_ctrl_dep();
 			goto out_free;
 		}
 
@@ -2189,6 +2202,9 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
 		if (!ipc_valid_object(&sma->sem_perm))
 			goto out_unlock_free;
 
+		/*
+		 * No necessity for any barrier: We are protect by sem_lock()
+		 */
 		error = READ_ONCE(queue.status);
 
 		/*
diff --git a/ipc/util.c b/ipc/util.c
index 915eacb..fe61df5 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -126,7 +126,7 @@ void ipc_init_ids(struct ipc_ids *ids)
 }
 
 #ifdef CONFIG_PROC_FS
-static const struct file_operations sysvipc_proc_fops;
+static const struct proc_ops sysvipc_proc_ops;
 /**
  * ipc_init_proc_interface -  create a proc interface for sysipc types using a seq_file interface.
  * @path: Path in procfs
@@ -151,7 +151,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
 	pde = proc_create_data(path,
 			       S_IRUGO,        /* world readable */
 			       NULL,           /* parent dir */
-			       &sysvipc_proc_fops,
+			       &sysvipc_proc_ops,
 			       iface);
 	if (!pde)
 		kfree(iface);
@@ -884,10 +884,10 @@ static int sysvipc_proc_release(struct inode *inode, struct file *file)
 	return seq_release_private(inode, file);
 }
 
-static const struct file_operations sysvipc_proc_fops = {
-	.open    = sysvipc_proc_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = sysvipc_proc_release,
+static const struct proc_ops sysvipc_proc_ops = {
+	.proc_open	= sysvipc_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= sysvipc_proc_release,
 };
 #endif /* CONFIG_PROC_FS */
diff --git a/kernel/Makefile b/kernel/Makefile
index f2cc0d1..4cb4130 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -27,6 +27,7 @@
 # and produce insane amounts of uninteresting coverage.
 KCOV_INSTRUMENT_module.o := n
 KCOV_INSTRUMENT_extable.o := n
+KCOV_INSTRUMENT_stacktrace.o := n
 # Don't self-instrument.
 KCOV_INSTRUMENT_kcov.o := n
 KASAN_SANITIZE_kcov.o := n
diff --git a/kernel/acct.c b/kernel/acct.c
index 81f9831..11ff4a5 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -416,6 +416,7 @@ static void fill_ac(acct_t *ac)
 {
 	struct pacct_struct *pacct = &current->signal->pacct;
 	u64 elapsed, run_time;
+	time64_t btime;
 	struct tty_struct *tty;
 
 	/*
@@ -448,7 +449,8 @@ static void fill_ac(acct_t *ac)
 	}
 #endif
 	do_div(elapsed, AHZ);
-	ac->ac_btime = get_seconds() - elapsed;
+	btime = ktime_get_real_seconds() - elapsed;
+	ac->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX);
 #if ACCT_VERSION==2
 	ac->ac_ahz = AHZ;
 #endif
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 8ad1c9e..042f955 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -96,12 +96,11 @@ const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
 
 static const struct btf_type *module_type;
 
-void bpf_struct_ops_init(struct btf *btf)
+void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
 {
 	s32 type_id, value_id, module_id;
 	const struct btf_member *member;
 	struct bpf_struct_ops *st_ops;
-	struct bpf_verifier_log log = {};
 	const struct btf_type *t;
 	char value_name[128];
 	const char *mname;
@@ -172,7 +171,7 @@ void bpf_struct_ops_init(struct btf *btf)
 							       member->type,
 							       NULL);
 			if (func_proto &&
-			    btf_distill_func_proto(&log, btf,
+			    btf_distill_func_proto(log, btf,
 						   func_proto, mname,
 						   &st_ops->func_models[j])) {
 				pr_warn("Error in parsing func ptr %s in struct %s\n",
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index b7c1660..805c43b 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3643,7 +3643,7 @@ struct btf *btf_parse_vmlinux(void)
 		goto errout;
 	}
 
-	bpf_struct_ops_init(btf);
+	bpf_struct_ops_init(btf, log);
 
 	btf_verifier_env_free(env);
 	refcount_set(&btf->refcnt, 1);
@@ -3931,6 +3931,7 @@ int btf_struct_access(struct bpf_verifier_log *log,
 
 		if (btf_type_is_ptr(mtype)) {
 			const struct btf_type *stype;
+			u32 id;
 
 			if (msize != size || off != moff) {
 				bpf_log(log,
@@ -3939,12 +3940,9 @@ int btf_struct_access(struct bpf_verifier_log *log,
 				return -EACCES;
 			}
 
-			stype = btf_type_by_id(btf_vmlinux, mtype->type);
-			/* skip modifiers */
-			while (btf_type_is_modifier(stype))
-				stype = btf_type_by_id(btf_vmlinux, stype->type);
+			stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id);
 			if (btf_type_is_struct(stype)) {
-				*next_btf_id = mtype->type;
+				*next_btf_id = id;
 				return PTR_TO_BTF_ID;
 			}
 		}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index bd2fd8e..5e40e7f 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -569,16 +569,11 @@ enum {
 	OPT_MODE,
 };
 
-static const struct fs_parameter_spec bpf_param_specs[] = {
+static const struct fs_parameter_spec bpf_fs_parameters[] = {
 	fsparam_u32oct	("mode",			OPT_MODE),
 	{}
 };
 
-static const struct fs_parameter_description bpf_fs_parameters = {
-	.name		= "bpf",
-	.specs		= bpf_param_specs,
-};
-
 struct bpf_mount_opts {
 	umode_t mode;
 };
@@ -589,7 +584,7 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	struct fs_parse_result result;
 	int opt;
 
-	opt = fs_parse(fc, &bpf_fs_parameters, param, &result);
+	opt = fs_parse(fc, bpf_fs_parameters, param, &result);
 	if (opt < 0)
 		/* We might like to report bad mount options here, but
 		 * traditionally we've ignored all mount options, so we'd
@@ -665,7 +660,7 @@ static struct file_system_type bpf_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "bpf",
 	.init_fs_context = bpf_init_fs_context,
-	.parameters	= &bpf_fs_parameters,
+	.parameters	= bpf_fs_parameters,
 	.kill_sb	= kill_litter_super,
 };
 
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 5b9da09..2c5dc65 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -302,14 +302,14 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
 	struct inode *ns_inode;
 	struct path ns_path;
 	char __user *uinsns;
-	void *res;
+	int res;
 	u32 ulen;
 
 	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
-	if (IS_ERR(res)) {
+	if (res) {
 		if (!info->ifindex)
 			return -ENODEV;
-		return PTR_ERR(res);
+		return res;
 	}
 
 	down_read(&bpf_devs_lock);
@@ -526,13 +526,13 @@ int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
 	};
 	struct inode *ns_inode;
 	struct path ns_path;
-	void *res;
+	int res;
 
 	res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
-	if (IS_ERR(res)) {
+	if (res) {
 		if (!info->ifindex)
 			return -ENODEV;
-		return PTR_ERR(res);
+		return res;
 	}
 
 	ns_inode = ns_path.dentry->d_inode;
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 90d1710..bfbeabc 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -7,7 +7,7 @@
 #include <linux/workqueue.h>
 #include <linux/list.h>
 #include <linux/refcount.h>
-#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
 
 #define TRACE_CGROUP_PATH_LEN 1024
 extern spinlock_t trace_cgroup_path_lock;
@@ -265,7 +265,7 @@ extern const struct proc_ns_operations cgroupns_operations;
  */
 extern struct cftype cgroup1_base_files[];
 extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
-extern const struct fs_parameter_description cgroup1_fs_parameters;
+extern const struct fs_parameter_spec cgroup1_fs_parameters[];
 
 int proc_cgroupstats_show(struct seq_file *m, void *v);
 bool cgroup1_ssid_disabled(int ssid);
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 09f3a41..be1a1c8 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -18,8 +18,6 @@
 
 #include <trace/events/cgroup.h>
 
-#define cg_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
-
 /*
  * pidlists linger the following amount before being destroyed.  The goal
  * is avoiding frequent destruction in the middle of consecutive read calls
@@ -887,7 +885,7 @@ enum cgroup1_param {
 	Opt_xattr,
 };
 
-static const struct fs_parameter_spec cgroup1_param_specs[] = {
+const struct fs_parameter_spec cgroup1_fs_parameters[] = {
 	fsparam_flag  ("all",		Opt_all),
 	fsparam_flag  ("clone_children", Opt_clone_children),
 	fsparam_flag  ("cpuset_v2_mode", Opt_cpuset_v2_mode),
@@ -899,11 +897,6 @@ static const struct fs_parameter_spec cgroup1_param_specs[] = {
 	{}
 };
 
-const struct fs_parameter_description cgroup1_fs_parameters = {
-	.name		= "cgroup1",
-	.specs		= cgroup1_param_specs,
-};
-
 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
@@ -911,7 +904,7 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	struct fs_parse_result result;
 	int opt, i;
 
-	opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
+	opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
 	if (opt == -ENOPARAM) {
 		if (strcmp(param->key, "source") == 0) {
 			fc->source = param->string;
@@ -924,7 +917,7 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 			ctx->subsys_mask |= (1 << i);
 			return 0;
 		}
-		return cg_invalf(fc, "cgroup1: Unknown subsys name '%s'", param->key);
+		return invalfc(fc, "Unknown subsys name '%s'", param->key);
 	}
 	if (opt < 0)
 		return opt;
@@ -952,7 +945,7 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 	case Opt_release_agent:
 		/* Specifying two release agents is forbidden */
 		if (ctx->release_agent)
-			return cg_invalf(fc, "cgroup1: release_agent respecified");
+			return invalfc(fc, "release_agent respecified");
 		ctx->release_agent = param->string;
 		param->string = NULL;
 		break;
@@ -962,9 +955,9 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 			return -ENOENT;
 		/* Can't specify an empty name */
 		if (!param->size)
-			return cg_invalf(fc, "cgroup1: Empty name");
+			return invalfc(fc, "Empty name");
 		if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
-			return cg_invalf(fc, "cgroup1: Name too long");
+			return invalfc(fc, "Name too long");
 		/* Must match [\w.-]+ */
 		for (i = 0; i < param->size; i++) {
 			char c = param->string[i];
@@ -972,11 +965,11 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 				continue;
 			if ((c == '.') || (c == '-') || (c == '_'))
 				continue;
-			return cg_invalf(fc, "cgroup1: Invalid name");
+			return invalfc(fc, "Invalid name");
 		}
 		/* Specifying two names is forbidden */
 		if (ctx->name)
-			return cg_invalf(fc, "cgroup1: name respecified");
+			return invalfc(fc, "name respecified");
 		ctx->name = param->string;
 		param->string = NULL;
 		break;
@@ -1011,7 +1004,7 @@ static int check_cgroupfs_options(struct fs_context *fc)
 	if (ctx->all_ss) {
 		/* Mutually exclusive option 'all' + subsystem name */
 		if (ctx->subsys_mask)
-			return cg_invalf(fc, "cgroup1: subsys name conflicts with all");
+			return invalfc(fc, "subsys name conflicts with all");
 		/* 'all' => select all the subsystems */
 		ctx->subsys_mask = enabled;
 	}
@@ -1021,7 +1014,7 @@ static int check_cgroupfs_options(struct fs_context *fc)
 	 * empty hierarchies must have a name).
 	 */
 	if (!ctx->subsys_mask && !ctx->name)
-		return cg_invalf(fc, "cgroup1: Need name or subsystem set");
+		return invalfc(fc, "Need name or subsystem set");
 
 	/*
 	 * Option noprefix was introduced just for backward compatibility
@@ -1029,11 +1022,11 @@ static int check_cgroupfs_options(struct fs_context *fc)
 	 * the cpuset subsystem.
 	 */
 	if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
-		return cg_invalf(fc, "cgroup1: noprefix used incorrectly");
+		return invalfc(fc, "noprefix used incorrectly");
 
 	/* Can't specify "none" and some subsystems */
 	if (ctx->subsys_mask && ctx->none)
-		return cg_invalf(fc, "cgroup1: none used incorrectly");
+		return invalfc(fc, "none used incorrectly");
 
 	return 0;
 }
@@ -1063,7 +1056,7 @@ int cgroup1_reconfigure(struct fs_context *fc)
 	/* Don't allow flags or name to change at remount */
 	if ((ctx->flags ^ root->flags) ||
 	    (ctx->name && strcmp(ctx->name, root->name))) {
-		cg_invalf(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
+		errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
 		       ctx->flags, ctx->name ?: "", root->flags, root->name);
 		ret = -EINVAL;
 		goto out_unlock;
@@ -1180,7 +1173,7 @@ static int cgroup1_root_to_use(struct fs_context *fc)
 	 * can't create new one without subsys specification.
 	 */
 	if (!ctx->subsys_mask && !ctx->none)
-		return cg_invalf(fc, "cgroup1: No subsys list or none specified");
+		return invalfc(fc, "No subsys list or none specified");
 
 	/* Hierarchies may only be created in the initial cgroup namespace. */
 	if (ctx->ns != &init_cgroup_ns)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index b3744872..db552b9 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1816,24 +1816,19 @@ enum cgroup2_param {
 	nr__cgroup2_params
 };
 
-static const struct fs_parameter_spec cgroup2_param_specs[] = {
+static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
 	fsparam_flag("nsdelegate",		Opt_nsdelegate),
 	fsparam_flag("memory_localevents",	Opt_memory_localevents),
 	{}
 };
 
-static const struct fs_parameter_description cgroup2_fs_parameters = {
-	.name		= "cgroup2",
-	.specs		= cgroup2_param_specs,
-};
-
 static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
 	struct fs_parse_result result;
 	int opt;
 
-	opt = fs_parse(fc, &cgroup2_fs_parameters, param, &result);
+	opt = fs_parse(fc, cgroup2_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -2156,7 +2151,7 @@ static void cgroup_kill_sb(struct super_block *sb)
 struct file_system_type cgroup_fs_type = {
 	.name			= "cgroup",
 	.init_fs_context	= cgroup_init_fs_context,
-	.parameters		= &cgroup1_fs_parameters,
+	.parameters		= cgroup1_fs_parameters,
 	.kill_sb		= cgroup_kill_sb,
 	.fs_flags		= FS_USERNS_MOUNT,
 };
@@ -2164,7 +2159,7 @@ struct file_system_type cgroup_fs_type = {
 static struct file_system_type cgroup2_fs_type = {
 	.name			= "cgroup2",
 	.init_fs_context	= cgroup_init_fs_context,
-	.parameters		= &cgroup2_fs_parameters,
+	.parameters		= cgroup2_fs_parameters,
 	.kill_sb		= cgroup_kill_sb,
 	.fs_flags		= FS_USERNS_MOUNT,
 };
diff --git a/kernel/configs.c b/kernel/configs.c
index c09ea4c..a28c79c 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -47,10 +47,9 @@ ikconfig_read_current(struct file *file, char __user *buf,
 				       &kernel_config_data);
 }
 
-static const struct file_operations ikconfig_file_ops = {
-	.owner = THIS_MODULE,
-	.read = ikconfig_read_current,
-	.llseek = default_llseek,
+static const struct proc_ops config_gz_proc_ops = {
+	.proc_read	= ikconfig_read_current,
+	.proc_lseek	= default_llseek,
 };
 
 static int __init ikconfig_init(void)
@@ -59,7 +58,7 @@ static int __init ikconfig_init(void)
 
 	/* create the current config file */
 	entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
-			    &ikconfig_file_ops);
+			    &config_gz_proc_ops);
 	if (!entry)
 		return -ENOMEM;
 
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 62c301a..d7ebb2c 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -412,7 +412,6 @@ static int kdb_bc(int argc, const char **argv)
 		 * assume that the breakpoint number is desired.
 		 */
 		if (addr < KDB_MAXBPT) {
-			bp = &kdb_breakpoints[addr];
 			lowbp = highbp = addr;
 			highbp++;
 		} else {
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 4af48ac..3de0cc7 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -119,7 +119,6 @@ kdb_bt_cpu(unsigned long cpu)
 		return;
 	}
 
-	kdb_set_current_task(kdb_tsk);
 	kdb_bt1(kdb_tsk, ~0UL, false);
 }
 
@@ -166,10 +165,8 @@ kdb_bt(int argc, const char **argv)
 		if (diag)
 			return diag;
 		p = find_task_by_pid_ns(pid, &init_pid_ns);
-		if (p) {
-			kdb_set_current_task(p);
+		if (p)
 			return kdb_bt1(p, ~0UL, false);
-		}
 		kdb_printf("No process with pid == %ld found\n", pid);
 		return 0;
 	} else if (strcmp(argv[0], "btt") == 0) {
@@ -178,11 +175,9 @@ kdb_bt(int argc, const char **argv)
 		diag = kdbgetularg((char *)argv[1], &addr);
 		if (diag)
 			return diag;
-		kdb_set_current_task((struct task_struct *)addr);
 		return kdb_bt1((struct task_struct *)addr, ~0UL, false);
 	} else if (strcmp(argv[0], "btc") == 0) {
 		unsigned long cpu = ~0;
-		struct task_struct *save_current_task = kdb_current_task;
 		if (argc > 1)
 			return KDB_ARGCOUNT;
 		if (argc == 1) {
@@ -204,7 +199,6 @@ kdb_bt(int argc, const char **argv)
 				kdb_bt_cpu(cpu);
 				touch_nmi_watchdog();
 			}
-			kdb_set_current_task(save_current_task);
 		}
 		return 0;
 	} else {
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 8bcdded..924bc92 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -553,7 +553,7 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 	int this_cpu, old_cpu;
 	char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
 	char *moreprompt = "more> ";
-	struct console *c = console_drivers;
+	struct console *c;
 	unsigned long uninitialized_var(flags);
 
 	/* Serialize kdb_printf if multiple cpus try to write at once.
@@ -698,10 +698,9 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 				cp2++;
 			}
 		}
-		while (c) {
+		for_each_console(c) {
 			c->write(c, cp, retlen - (cp - kdb_buffer));
 			touch_nmi_watchdog();
-			c = c->next;
 		}
 	}
 	if (logging) {
@@ -752,7 +751,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 			moreprompt = "more> ";
 
 		kdb_input_flush();
-		c = console_drivers;
 
 		if (dbg_io_ops && !dbg_io_ops->is_console) {
 			len = strlen(moreprompt);
@@ -762,10 +760,9 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 				cp++;
 			}
 		}
-		while (c) {
+		for_each_console(c) {
 			c->write(c, moreprompt, strlen(moreprompt));
 			touch_nmi_watchdog();
-			c = c->next;
 		}
 
 		if (logging)
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 4567fe9..ba12e9f 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -73,7 +73,6 @@ int kdb_nextline = 1;
 int kdb_state;			/* General KDB state */
 
 struct task_struct *kdb_current_task;
-EXPORT_SYMBOL(kdb_current_task);
 struct pt_regs *kdb_current_regs;
 
 const char *kdb_diemsg;
@@ -1139,7 +1138,7 @@ static void kdb_dumpregs(struct pt_regs *regs)
 	console_loglevel = old_lvl;
 }
 
-void kdb_set_current_task(struct task_struct *p)
+static void kdb_set_current_task(struct task_struct *p)
 {
 	kdb_current_task = p;
 
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 55d0520..2e296e4 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -240,8 +240,8 @@ extern void *debug_kmalloc(size_t size, gfp_t flags);
 extern void debug_kfree(void *);
 extern void debug_kusage(void);
 
-extern void kdb_set_current_task(struct task_struct *);
 extern struct task_struct *kdb_current_task;
+extern struct pt_regs *kdb_current_regs;
 
 #ifdef CONFIG_KDB_KEYBOARD
 extern void kdb_kbd_cleanup_state(void);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index fdb7f7e..e453589 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4374,7 +4374,7 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_attach(struct perf_event *event,
-			       struct ring_buffer *rb);
+			       struct perf_buffer *rb);
 
 static void detach_sb_event(struct perf_event *event)
 {
@@ -5055,7 +5055,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 static __poll_t perf_poll(struct file *file, poll_table *wait)
 {
 	struct perf_event *event = file->private_data;
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 	__poll_t events = EPOLLHUP;
 
 	poll_wait(file, &event->waitq, wait);
@@ -5297,7 +5297,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
 		return perf_event_set_bpf_prog(event, arg);
 
 	case PERF_EVENT_IOC_PAUSE_OUTPUT: {
-		struct ring_buffer *rb;
+		struct perf_buffer *rb;
 
 		rcu_read_lock();
 		rb = rcu_dereference(event->rb);
@@ -5433,7 +5433,7 @@ static void calc_timer_values(struct perf_event *event,
 static void perf_event_init_userpage(struct perf_event *event)
 {
 	struct perf_event_mmap_page *userpg;
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 
 	rcu_read_lock();
 	rb = rcu_dereference(event->rb);
@@ -5465,7 +5465,7 @@ void __weak arch_perf_update_userpage(
 void perf_event_update_userpage(struct perf_event *event)
 {
 	struct perf_event_mmap_page *userpg;
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 	u64 enabled, running, now;
 
 	rcu_read_lock();
@@ -5516,7 +5516,7 @@ EXPORT_SYMBOL_GPL(perf_event_update_userpage);
 static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
 {
 	struct perf_event *event = vmf->vma->vm_file->private_data;
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 	vm_fault_t ret = VM_FAULT_SIGBUS;
 
 	if (vmf->flags & FAULT_FLAG_MKWRITE) {
@@ -5549,9 +5549,9 @@ static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
 }
 
 static void ring_buffer_attach(struct perf_event *event,
-			       struct ring_buffer *rb)
+			       struct perf_buffer *rb)
 {
-	struct ring_buffer *old_rb = NULL;
+	struct perf_buffer *old_rb = NULL;
 	unsigned long flags;
 
 	if (event->rb) {
@@ -5609,7 +5609,7 @@ static void ring_buffer_attach(struct perf_event *event,
 
 static void ring_buffer_wakeup(struct perf_event *event)
 {
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 
 	rcu_read_lock();
 	rb = rcu_dereference(event->rb);
@@ -5620,9 +5620,9 @@ static void ring_buffer_wakeup(struct perf_event *event)
 	rcu_read_unlock();
 }
 
-struct ring_buffer *ring_buffer_get(struct perf_event *event)
+struct perf_buffer *ring_buffer_get(struct perf_event *event)
 {
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 
 	rcu_read_lock();
 	rb = rcu_dereference(event->rb);
@@ -5635,7 +5635,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
 	return rb;
 }
 
-void ring_buffer_put(struct ring_buffer *rb)
+void ring_buffer_put(struct perf_buffer *rb)
 {
 	if (!refcount_dec_and_test(&rb->refcount))
 		return;
@@ -5673,7 +5673,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 {
 	struct perf_event *event = vma->vm_file->private_data;
 
-	struct ring_buffer *rb = ring_buffer_get(event);
+	struct perf_buffer *rb = ring_buffer_get(event);
 	struct user_struct *mmap_user = rb->mmap_user;
 	int mmap_locked = rb->mmap_locked;
 	unsigned long size = perf_data_size(rb);
@@ -5791,8 +5791,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 	struct perf_event *event = file->private_data;
 	unsigned long user_locked, user_lock_limit;
 	struct user_struct *user = current_user();
+	struct perf_buffer *rb = NULL;
 	unsigned long locked, lock_limit;
-	struct ring_buffer *rb = NULL;
 	unsigned long vma_size;
 	unsigned long nr_pages;
 	long user_extra = 0, extra = 0;
@@ -6275,7 +6275,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event,
 					  size_t size)
 {
 	struct perf_event *sampler = event->aux_event;
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 
 	data->aux_size = 0;
 
@@ -6308,7 +6308,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event,
 	return data->aux_size;
 }
 
-long perf_pmu_snapshot_aux(struct ring_buffer *rb,
+long perf_pmu_snapshot_aux(struct perf_buffer *rb,
 			   struct perf_event *event,
 			   struct perf_output_handle *handle,
 			   unsigned long size)
@@ -6347,8 +6347,8 @@ static void perf_aux_sample_output(struct perf_event *event,
 				   struct perf_sample_data *data)
 {
 	struct perf_event *sampler = event->aux_event;
+	struct perf_buffer *rb;
 	unsigned long pad;
-	struct ring_buffer *rb;
 	long size;
 
 	if (WARN_ON_ONCE(!sampler || !data->aux_size))
@@ -6716,7 +6716,7 @@ void perf_output_sample(struct perf_output_handle *handle,
 		int wakeup_events = event->attr.wakeup_events;
 
 		if (wakeup_events) {
-			struct ring_buffer *rb = handle->rb;
+			struct perf_buffer *rb = handle->rb;
 			int events = local_inc_return(&rb->events);
 
 			if (events >= wakeup_events) {
@@ -7159,7 +7159,7 @@ void perf_event_exec(void)
 }
 
 struct remote_output {
-	struct ring_buffer	*rb;
+	struct perf_buffer	*rb;
 	int			err;
 };
 
@@ -7167,7 +7167,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 {
 	struct perf_event *parent = event->parent;
 	struct remote_output *ro = data;
-	struct ring_buffer *rb = ro->rb;
+	struct perf_buffer *rb = ro->rb;
 	struct stop_event_data sd = {
 		.event	= event,
 	};
@@ -7504,7 +7504,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
 {
 	struct path ns_path;
 	struct inode *ns_inode;
-	void *error;
+	int error;
 
 	error = ns_get_path(&ns_path, task, ns_ops);
 	if (!error) {
@@ -11007,7 +11007,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
-	struct ring_buffer *rb = NULL;
+	struct perf_buffer *rb = NULL;
 	int ret = -EINVAL;
 
 	if (!output_event)
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 747d67f..f16f66b 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -10,7 +10,7 @@
 
 #define RING_BUFFER_WRITABLE		0x01
 
-struct ring_buffer {
+struct perf_buffer {
 	refcount_t			refcount;
 	struct rcu_head			rcu_head;
 #ifdef CONFIG_PERF_USE_VMALLOC
@@ -58,17 +58,17 @@ struct ring_buffer {
 	void				*data_pages[0];
 };
 
-extern void rb_free(struct ring_buffer *rb);
+extern void rb_free(struct perf_buffer *rb);
 
 static inline void rb_free_rcu(struct rcu_head *rcu_head)
 {
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 
-	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+	rb = container_of(rcu_head, struct perf_buffer, rcu_head);
 	rb_free(rb);
 }
 
-static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
+static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
 {
 	if (!pause && rb->nr_pages)
 		rb->paused = 0;
@@ -76,16 +76,16 @@ static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
 		rb->paused = 1;
 }
 
-extern struct ring_buffer *
+extern struct perf_buffer *
 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 extern void perf_event_wakeup(struct perf_event *event);
-extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
+extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
 			pgoff_t pgoff, int nr_pages, long watermark, int flags);
-extern void rb_free_aux(struct ring_buffer *rb);
-extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
-extern void ring_buffer_put(struct ring_buffer *rb);
+extern void rb_free_aux(struct perf_buffer *rb);
+extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
+extern void ring_buffer_put(struct perf_buffer *rb);
 
-static inline bool rb_has_aux(struct ring_buffer *rb)
+static inline bool rb_has_aux(struct perf_buffer *rb)
 {
 	return !!rb->aux_nr_pages;
 }
@@ -94,7 +94,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
 			  unsigned long size, u64 flags);
 
 extern struct page *
-perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
+perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
 
 #ifdef CONFIG_PERF_USE_VMALLOC
 /*
@@ -103,25 +103,25 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  * Required for architectures that have d-cache aliasing issues.
  */
 
-static inline int page_order(struct ring_buffer *rb)
+static inline int page_order(struct perf_buffer *rb)
 {
 	return rb->page_order;
 }
 
 #else
 
-static inline int page_order(struct ring_buffer *rb)
+static inline int page_order(struct perf_buffer *rb)
 {
 	return 0;
 }
 #endif
 
-static inline unsigned long perf_data_size(struct ring_buffer *rb)
+static inline unsigned long perf_data_size(struct perf_buffer *rb)
 {
 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
 }
 
-static inline unsigned long perf_aux_size(struct ring_buffer *rb)
+static inline unsigned long perf_aux_size(struct perf_buffer *rb)
 {
 	return rb->aux_nr_pages << PAGE_SHIFT;
 }
@@ -141,7 +141,7 @@ static inline unsigned long perf_aux_size(struct ring_buffer *rb)
 			buf += written;					\
 		handle->size -= written;				\
 		if (!handle->size) {					\
-			struct ring_buffer *rb = handle->rb;		\
+			struct perf_buffer *rb = handle->rb;	\
 									\
 			handle->page++;					\
 			handle->page &= rb->nr_pages - 1;		\
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 7ffd5c7..192b8ab 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -35,7 +35,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
  */
 static void perf_output_get_handle(struct perf_output_handle *handle)
 {
-	struct ring_buffer *rb = handle->rb;
+	struct perf_buffer *rb = handle->rb;
 
 	preempt_disable();
 
@@ -49,7 +49,7 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
 
 static void perf_output_put_handle(struct perf_output_handle *handle)
 {
-	struct ring_buffer *rb = handle->rb;
+	struct perf_buffer *rb = handle->rb;
 	unsigned long head;
 	unsigned int nest;
 
@@ -150,7 +150,7 @@ __perf_output_begin(struct perf_output_handle *handle,
 		    struct perf_event *event, unsigned int size,
 		    bool backward)
 {
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 	unsigned long tail, offset, head;
 	int have_lost, page_shift;
 	struct {
@@ -301,7 +301,7 @@ void perf_output_end(struct perf_output_handle *handle)
 }
 
 static void
-ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
+ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
 {
 	long max_size = perf_data_size(rb);
 
@@ -361,7 +361,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
 {
 	struct perf_event *output_event = event;
 	unsigned long aux_head, aux_tail;
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 	unsigned int nest;
 
 	if (output_event->parent)
@@ -449,7 +449,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
 }
 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
 
-static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
 {
 	if (rb->aux_overwrite)
 		return false;
@@ -475,7 +475,7 @@ static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
 {
 	bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
-	struct ring_buffer *rb = handle->rb;
+	struct perf_buffer *rb = handle->rb;
 	unsigned long aux_head;
 
 	/* in overwrite mode, driver provides aux_head via handle */
@@ -532,7 +532,7 @@ EXPORT_SYMBOL_GPL(perf_aux_output_end);
  */
 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
 {
-	struct ring_buffer *rb = handle->rb;
+	struct perf_buffer *rb = handle->rb;
 
 	if (size > handle->size)
 		return -ENOSPC;
@@ -569,8 +569,8 @@ long perf_output_copy_aux(struct perf_output_handle *aux_handle,
 			  struct perf_output_handle *handle,
 			  unsigned long from, unsigned long to)
 {
+	struct perf_buffer *rb = aux_handle->rb;
 	unsigned long tocopy, remainder, len = 0;
-	struct ring_buffer *rb = aux_handle->rb;
 	void *addr;
 
 	from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
@@ -626,7 +626,7 @@ static struct page *rb_alloc_aux_page(int node, int order)
 	return page;
 }
 
-static void rb_free_aux_page(struct ring_buffer *rb, int idx)
+static void rb_free_aux_page(struct perf_buffer *rb, int idx)
 {
 	struct page *page = virt_to_page(rb->aux_pages[idx]);
 
@@ -635,7 +635,7 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
 	__free_page(page);
 }
 
-static void __rb_free_aux(struct ring_buffer *rb)
+static void __rb_free_aux(struct perf_buffer *rb)
 {
 	int pg;
 
@@ -662,7 +662,7 @@ static void __rb_free_aux(struct ring_buffer *rb)
 	}
 }
 
-int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
+int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
 		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
 {
 	bool overwrite = !(flags & RING_BUFFER_WRITABLE);
@@ -753,7 +753,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
 	return ret;
 }
 
-void rb_free_aux(struct ring_buffer *rb)
+void rb_free_aux(struct perf_buffer *rb)
 {
 	if (refcount_dec_and_test(&rb->aux_refcount))
 		__rb_free_aux(rb);
@@ -766,7 +766,7 @@ void rb_free_aux(struct ring_buffer *rb)
  */
 
 static struct page *
-__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 {
 	if (pgoff > rb->nr_pages)
 		return NULL;
@@ -798,13 +798,13 @@ static void perf_mmap_free_page(void *addr)
 	__free_page(page);
 }
 
-struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 {
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 	unsigned long size;
 	int i;
 
-	size = sizeof(struct ring_buffer);
+	size = sizeof(struct perf_buffer);
 	size += nr_pages * sizeof(void *);
 
 	if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
@@ -843,7 +843,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 	return NULL;
 }
 
-void rb_free(struct ring_buffer *rb)
+void rb_free(struct perf_buffer *rb)
 {
 	int i;
 
@@ -854,13 +854,13 @@ void rb_free(struct ring_buffer *rb)
 }
 
 #else
-static int data_page_nr(struct ring_buffer *rb)
+static int data_page_nr(struct perf_buffer *rb)
 {
 	return rb->nr_pages << page_order(rb);
 }
 
 static struct page *
-__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 {
 	/* The '>' counts in the user page. */
 	if (pgoff > data_page_nr(rb))
@@ -878,11 +878,11 @@ static void perf_mmap_unmark_page(void *addr)
 
 static void rb_free_work(struct work_struct *work)
 {
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 	void *base;
 	int i, nr;
 
-	rb = container_of(work, struct ring_buffer, work);
+	rb = container_of(work, struct perf_buffer, work);
 	nr = data_page_nr(rb);
 
 	base = rb->user_page;
@@ -894,18 +894,18 @@ static void rb_free_work(struct work_struct *work)
 	kfree(rb);
 }
 
-void rb_free(struct ring_buffer *rb)
+void rb_free(struct perf_buffer *rb)
 {
 	schedule_work(&rb->work);
 }
 
-struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 {
-	struct ring_buffer *rb;
+	struct perf_buffer *rb;
 	unsigned long size;
 	void *all_buf;
 
-	size = sizeof(struct ring_buffer);
+	size = sizeof(struct perf_buffer);
 	size += sizeof(void *);
 
 	rb = kzalloc(size, GFP_KERNEL);
@@ -939,7 +939,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 #endif
 
 struct page *
-perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 {
 	if (rb->aux_nr_pages) {
 		/* above AUX space */
diff --git a/kernel/fork.c b/kernel/fork.c
index ef82feb..60a1295 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -692,7 +692,7 @@ void __mmdrop(struct mm_struct *mm)
 	WARN_ON_ONCE(mm == current->active_mm);
 	mm_free_pgd(mm);
 	destroy_context(mm);
-	mmu_notifier_mm_destroy(mm);
+	mmu_notifier_subscriptions_destroy(mm);
 	check_mm(mm);
 	put_user_ns(mm->user_ns);
 	free_mm(mm);
@@ -1025,7 +1025,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 	mm_init_aio(mm);
 	mm_init_owner(mm, p);
 	RCU_INIT_POINTER(mm->exe_file, NULL);
-	mmu_notifier_mm_init(mm);
+	mmu_notifier_subscriptions_init(mm);
 	init_tlb_flush_pending(mm);
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 	mm->pmd_huge_pte = NULL;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 818b280..3089a60 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -731,6 +731,13 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
  *
  *	Wakeup mode lets this IRQ wake the system from sleep
  *	states like "suspend to RAM".
+ *
+ *	Note: irq enable/disable state is completely orthogonal
+ *	to the enable/disable state of irq wake. An irq can be
+ *	disabled with disable_irq() and still wake the system as
+ *	long as the irq has wake enabled. If this does not hold,
+ *	then the underlying irq chip and the related driver need
+ *	to be investigated.
  */
 int irq_set_irq_wake(unsigned int irq, unsigned int on)
 {
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index cfc4f08..9e5783d 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -176,20 +176,20 @@ static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
 	return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
 }
 
-static const struct file_operations irq_affinity_proc_fops = {
-	.open		= irq_affinity_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= irq_affinity_proc_write,
+static const struct proc_ops irq_affinity_proc_ops = {
+	.proc_open	= irq_affinity_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= irq_affinity_proc_write,
 };
 
-static const struct file_operations irq_affinity_list_proc_fops = {
-	.open		= irq_affinity_list_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= irq_affinity_list_proc_write,
+static const struct proc_ops irq_affinity_list_proc_ops = {
+	.proc_open	= irq_affinity_list_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= irq_affinity_list_proc_write,
 };
 
 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -246,12 +246,12 @@ static int default_affinity_open(struct inode *inode, struct file *file)
 	return single_open(file, default_affinity_show, PDE_DATA(inode));
 }
 
-static const struct file_operations default_affinity_proc_fops = {
-	.open		= default_affinity_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= default_affinity_write,
+static const struct proc_ops default_affinity_proc_ops = {
+	.proc_open	= default_affinity_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= default_affinity_write,
 };
 
 static int irq_node_proc_show(struct seq_file *m, void *v)
@@ -342,7 +342,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 #ifdef CONFIG_SMP
 	/* create /proc/irq/<irq>/smp_affinity */
 	proc_create_data("smp_affinity", 0644, desc->dir,
-			 &irq_affinity_proc_fops, irqp);
+			 &irq_affinity_proc_ops, irqp);
 
 	/* create /proc/irq/<irq>/affinity_hint */
 	proc_create_single_data("affinity_hint", 0444, desc->dir,
@@ -350,7 +350,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 
 	/* create /proc/irq/<irq>/smp_affinity_list */
 	proc_create_data("smp_affinity_list", 0644, desc->dir,
-			 &irq_affinity_list_proc_fops, irqp);
+			 &irq_affinity_list_proc_ops, irqp);
 
 	proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
 			irqp);
@@ -401,7 +401,7 @@ static void register_default_affinity_proc(void)
 {
 #ifdef CONFIG_SMP
 	proc_create("irq/default_smp_affinity", 0644, NULL,
-		    &default_affinity_proc_fops);
+		    &default_affinity_proc_ops);
 #endif
 }
 
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 136ce04..d812b90 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -698,16 +698,16 @@ const char *kdb_walk_kallsyms(loff_t *pos)
 }
 #endif	/* CONFIG_KGDB_KDB */
 
-static const struct file_operations kallsyms_operations = {
-	.open = kallsyms_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = seq_release_private,
+static const struct proc_ops kallsyms_proc_ops = {
+	.proc_open	= kallsyms_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release_private,
 };
 
 static int __init kallsyms_init(void)
 {
-	proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
+	proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops);
 	return 0;
 }
 device_initcall(kallsyms_init);
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index e3acead..8d1c158 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -255,17 +255,17 @@ static int lstats_open(struct inode *inode, struct file *filp)
 	return single_open(filp, lstats_show, NULL);
 }
 
-static const struct file_operations lstats_fops = {
-	.open		= lstats_open,
-	.read		= seq_read,
-	.write		= lstats_write,
-	.llseek		= seq_lseek,
-	.release	= single_release,
+static const struct proc_ops lstats_proc_ops = {
+	.proc_open	= lstats_open,
+	.proc_read	= seq_read,
+	.proc_write	= lstats_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 static int __init init_lstats_procfs(void)
 {
-	proc_create("latency_stats", 0644, NULL, &lstats_fops);
+	proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
 	return 0;
 }
 
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 9bb6d24..231684c 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -643,12 +643,12 @@ static int lock_stat_release(struct inode *inode, struct file *file)
 	return seq_release(inode, file);
 }
 
-static const struct file_operations proc_lock_stat_operations = {
-	.open		= lock_stat_open,
-	.write		= lock_stat_write,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= lock_stat_release,
+static const struct proc_ops lock_stat_proc_ops = {
+	.proc_open	= lock_stat_open,
+	.proc_write	= lock_stat_write,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= lock_stat_release,
 };
 #endif /* CONFIG_LOCK_STAT */
 
@@ -660,8 +660,7 @@ static int __init lockdep_proc_init(void)
 #endif
 	proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show);
 #ifdef CONFIG_LOCK_STAT
-	proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
-		    &proc_lock_stat_operations);
+	proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, &lock_stat_proc_ops);
 #endif
 
 	return 0;
diff --git a/kernel/module.c b/kernel/module.c
index ac058a5..33569a0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -214,7 +214,8 @@ static struct module *mod_find(unsigned long addr)
 {
 	struct module *mod;
 
-	list_for_each_entry_rcu(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list,
+				lockdep_is_held(&module_mutex)) {
 		if (within_module(addr, mod))
 			return mod;
 	}
@@ -448,7 +449,8 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
 		return true;
 
-	list_for_each_entry_rcu(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list,
+				lockdep_is_held(&module_mutex)) {
 		struct symsearch arr[] = {
 			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
 			  NOT_GPL_ONLY, false },
@@ -616,7 +618,8 @@ static struct module *find_module_all(const char *name, size_t len,
 
 	module_assert_mutex_or_preempt();
 
-	list_for_each_entry_rcu(mod, &modules, list) {
+	list_for_each_entry_rcu(mod, &modules, list,
+				lockdep_is_held(&module_mutex)) {
 		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
 			continue;
 		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
@@ -1781,6 +1784,8 @@ static int module_add_modinfo_attrs(struct module *mod)
 error_out:
 	if (i > 0)
 		module_remove_modinfo_attrs(mod, --i);
+	else
+		kfree(mod->modinfo_attrs);
 	return error;
 }
 
@@ -2834,7 +2839,7 @@ static int module_sig_check(struct load_info *info, int flags)
 		reason = "Loading of module with unavailable key";
 	decide:
 		if (is_module_sig_enforced()) {
-			pr_notice("%s is rejected\n", reason);
+			pr_notice("%s: %s is rejected\n", info->name, reason);
 			return -EKEYREJECTED;
 		}
 
@@ -3011,9 +3016,7 @@ static int setup_load_info(struct load_info *info, int flags)
 
 	/* Try to find a name early so we can log errors with a module name */
 	info->index.info = find_sec(info, ".modinfo");
-	if (!info->index.info)
-		info->name = "(missing .modinfo section)";
-	else
+	if (info->index.info)
 		info->name = get_modinfo(info, "name");
 
 	/* Find internal symbols and strings. */
@@ -3028,14 +3031,15 @@ static int setup_load_info(struct load_info *info, int flags)
 	}
 
 	if (info->index.sym == 0) {
-		pr_warn("%s: module has no symbols (stripped?)\n", info->name);
+		pr_warn("%s: module has no symbols (stripped?)\n",
+			info->name ?: "(missing .modinfo section or name field)");
 		return -ENOEXEC;
 	}
 
 	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
 	if (!info->index.mod) {
 		pr_warn("%s: No module found in object\n",
-			info->name ?: "(missing .modinfo name field)");
+			info->name ?: "(missing .modinfo section or name field)");
 		return -ENOEXEC;
 	}
 	/* This is temporary: point mod into copy of data. */
@@ -4350,16 +4354,16 @@ static int modules_open(struct inode *inode, struct file *file)
 	return err;
 }
 
-static const struct file_operations proc_modules_operations = {
-	.open		= modules_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops modules_proc_ops = {
+	.proc_open	= modules_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 static int __init proc_modules_init(void)
 {
-	proc_create("modules", 0, NULL, &proc_modules_operations);
+	proc_create("modules", 0, NULL, &modules_proc_ops);
 	return 0;
 }
 module_init(proc_modules_init);
diff --git a/kernel/pid.c b/kernel/pid.c
index 2278e24..0f4ecb5 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -578,3 +578,93 @@ void __init pid_idr_init(void)
 	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
 			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
 }
+
+static struct file *__pidfd_fget(struct task_struct *task, int fd)
+{
+	struct file *file;
+	int ret;
+
+	ret = mutex_lock_killable(&task->signal->cred_guard_mutex);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
+		file = fget_task(task, fd);
+	else
+		file = ERR_PTR(-EPERM);
+
+	mutex_unlock(&task->signal->cred_guard_mutex);
+
+	return file ?: ERR_PTR(-EBADF);
+}
+
+static int pidfd_getfd(struct pid *pid, int fd)
+{
+	struct task_struct *task;
+	struct file *file;
+	int ret;
+
+	task = get_pid_task(pid, PIDTYPE_PID);
+	if (!task)
+		return -ESRCH;
+
+	file = __pidfd_fget(task, fd);
+	put_task_struct(task);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	ret = security_file_receive(file);
+	if (ret) {
+		fput(file);
+		return ret;
+	}
+
+	ret = get_unused_fd_flags(O_CLOEXEC);
+	if (ret < 0)
+		fput(file);
+	else
+		fd_install(ret, file);
+
+	return ret;
+}
+
+/**
+ * sys_pidfd_getfd() - Get a file descriptor from another process
+ *
+ * @pidfd:	the pidfd file descriptor of the process
+ * @fd:		the file descriptor number to get
+ * @flags:	flags on how to get the fd (reserved)
+ *
+ * This syscall gets a copy of a file descriptor from another process
+ * based on the pidfd, and file descriptor number. It requires that
+ * the calling process has the ability to ptrace the process represented
+ * by the pidfd. The process which is having its file descriptor copied
+ * is otherwise unaffected.
+ *
+ * Return: On success, a cloexec file descriptor is returned.
+ *         On error, a negative errno number will be returned.
+ */
+SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
+		unsigned int, flags)
+{
+	struct pid *pid;
+	struct fd f;
+	int ret;
+
+	/* flags is currently unused - make sure it's unset */
+	if (flags)
+		return -EINVAL;
+
+	f = fdget(pidfd);
+	if (!f.file)
+		return -EBADF;
+
+	pid = pidfd_pid(f.file);
+	if (IS_ERR(pid))
+		ret = PTR_ERR(pid);
+	else
+		ret = pidfd_getfd(pid, fd);
+
+	fdput(f);
+	return ret;
+}
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 1ef6f75..fada22d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2770,8 +2770,6 @@ void register_console(struct console *newcon)
 		 * for us.
 		 */
 		logbuf_lock_irqsave(flags);
-		console_seq = syslog_seq;
-		console_idx = syslog_idx;
 		/*
 		 * We're about to replay the log buffer.  Only do this to the
 		 * just-registered console to avoid excessive message spam to
@@ -2783,6 +2781,8 @@ void register_console(struct console *newcon)
 		 */
 		exclusive_console = newcon;
 		exclusive_console_stop_seq = console_seq;
+		console_seq = syslog_seq;
+		console_idx = syslog_idx;
 		logbuf_unlock_irqrestore(flags);
 	}
 	console_unlock();
diff --git a/kernel/profile.c b/kernel/profile.c
index 4b144b0..6f69a41 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -442,18 +442,18 @@ static ssize_t prof_cpu_mask_proc_write(struct file *file,
 	return err;
 }
 
-static const struct file_operations prof_cpu_mask_proc_fops = {
-	.open		= prof_cpu_mask_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= prof_cpu_mask_proc_write,
+static const struct proc_ops prof_cpu_mask_proc_ops = {
+	.proc_open	= prof_cpu_mask_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= prof_cpu_mask_proc_write,
 };
 
 void create_prof_cpu_mask(void)
 {
 	/* create /proc/irq/prof_cpu_mask */
-	proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
+	proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_ops);
 }
 
 /*
@@ -517,10 +517,10 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
 	return count;
 }
 
-static const struct file_operations proc_profile_operations = {
-	.read		= read_profile,
-	.write		= write_profile,
-	.llseek		= default_llseek,
+static const struct proc_ops profile_proc_ops = {
+	.proc_read	= read_profile,
+	.proc_write	= write_profile,
+	.proc_lseek	= default_llseek,
 };
 
 int __ref create_proc_profile(void)
@@ -548,7 +548,7 @@ int __ref create_proc_profile(void)
 	err = 0;
 #endif
 	entry = proc_create("profile", S_IWUSR | S_IRUGO,
-			    NULL, &proc_profile_operations);
+			    NULL, &profile_proc_ops);
 	if (!entry)
 		goto err_state_onl;
 	proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 6935a9e..dcbd757 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -508,7 +508,6 @@ static void synchronize_rcu_expedited_wait(void)
 				tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
 			}
 		}
-		WARN_ON_ONCE(1);
 	}
 
 	for (;;) {
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index db7b50b..ac4bd0c 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1251,40 +1251,40 @@ static int psi_fop_release(struct inode *inode, struct file *file)
 	return single_release(inode, file);
 }
 
-static const struct file_operations psi_io_fops = {
-	.open           = psi_io_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.write          = psi_io_write,
-	.poll           = psi_fop_poll,
-	.release        = psi_fop_release,
+static const struct proc_ops psi_io_proc_ops = {
+	.proc_open	= psi_io_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= psi_io_write,
+	.proc_poll	= psi_fop_poll,
+	.proc_release	= psi_fop_release,
 };
 
-static const struct file_operations psi_memory_fops = {
-	.open           = psi_memory_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.write          = psi_memory_write,
-	.poll           = psi_fop_poll,
-	.release        = psi_fop_release,
+static const struct proc_ops psi_memory_proc_ops = {
+	.proc_open	= psi_memory_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= psi_memory_write,
+	.proc_poll	= psi_fop_poll,
+	.proc_release	= psi_fop_release,
 };
 
-static const struct file_operations psi_cpu_fops = {
-	.open           = psi_cpu_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.write          = psi_cpu_write,
-	.poll           = psi_fop_poll,
-	.release        = psi_fop_release,
+static const struct proc_ops psi_cpu_proc_ops = {
+	.proc_open	= psi_cpu_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= psi_cpu_write,
+	.proc_poll	= psi_fop_poll,
+	.proc_release	= psi_fop_release,
 };
 
 static int __init psi_proc_init(void)
 {
 	if (psi_enable) {
 		proc_mkdir("pressure", NULL);
-		proc_create("pressure/io", 0, NULL, &psi_io_fops);
-		proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
-		proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
+		proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
+		proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
+		proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
 	}
 	return 0;
 }
diff --git a/kernel/signal.c b/kernel/signal.c
index bcd46f5..9ad8dea 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1383,7 +1383,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
 		 * must see ->sighand == NULL.
 		 */
 		spin_lock_irqsave(&sighand->siglock, *flags);
-		if (likely(sighand == tsk->sighand))
+		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
 			break;
 		spin_unlock_irqrestore(&sighand->siglock, *flags);
 	}
diff --git a/kernel/sys.c b/kernel/sys.c
index a9331f1..f9bc5c3 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2261,6 +2261,8 @@ int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
 	return -EINVAL;
 }
 
+#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LESS_THROTTLE)
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
 {
@@ -2488,6 +2490,29 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 			return -EINVAL;
 		error = GET_TAGGED_ADDR_CTRL();
 		break;
+	case PR_SET_IO_FLUSHER:
+		if (!capable(CAP_SYS_RESOURCE))
+			return -EPERM;
+
+		if (arg3 || arg4 || arg5)
+			return -EINVAL;
+
+		if (arg2 == 1)
+			current->flags |= PR_IO_FLUSHER;
+		else if (!arg2)
+			current->flags &= ~PR_IO_FLUSHER;
+		else
+			return -EINVAL;
+		break;
+	case PR_GET_IO_FLUSHER:
+		if (!capable(CAP_SYS_RESOURCE))
+			return -EPERM;
+
+		if (arg2 || arg3 || arg4 || arg5)
+			return -EINVAL;
+
+		error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
+		break;
 	default:
 		error = -EINVAL;
 		break;
diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c
index 2a63241..ccb7850 100644
--- a/kernel/sysctl-test.c
+++ b/kernel/sysctl-test.c
@@ -389,4 +389,6 @@ static struct kunit_suite sysctl_test_suite = {
 	.test_cases = sysctl_test_cases,
 };
 
-kunit_test_suite(sysctl_test_suite);
+kunit_test_suites(&sysctl_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index fff5f64..428beb6 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -293,8 +293,15 @@ static void clocksource_watchdog(struct timer_list *unused)
 	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
 	if (next_cpu >= nr_cpu_ids)
 		next_cpu = cpumask_first(cpu_online_mask);
-	watchdog_timer.expires += WATCHDOG_INTERVAL;
-	add_timer_on(&watchdog_timer, next_cpu);
+
+	/*
+	 * Arm timer if not already pending: could race with concurrent
+	 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
+	 */
+	if (!timer_pending(&watchdog_timer)) {
+		watchdog_timer.expires += WATCHDOG_INTERVAL;
+		add_timer_on(&watchdog_timer, next_cpu);
+	}
 out:
 	spin_unlock(&watchdog_lock);
 }
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 9e59c9e..ca4e6d5 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -97,20 +97,20 @@ static int do_getitimer(int which, struct itimerspec64 *value)
 	return 0;
 }
 
-static int put_itimerval(struct itimerval __user *o,
+static int put_itimerval(struct __kernel_old_itimerval __user *o,
 			 const struct itimerspec64 *i)
 {
-	struct itimerval v;
+	struct __kernel_old_itimerval v;
 
 	v.it_interval.tv_sec = i->it_interval.tv_sec;
 	v.it_interval.tv_usec = i->it_interval.tv_nsec / NSEC_PER_USEC;
 	v.it_value.tv_sec = i->it_value.tv_sec;
 	v.it_value.tv_usec = i->it_value.tv_nsec / NSEC_PER_USEC;
-	return copy_to_user(o, &v, sizeof(struct itimerval)) ? -EFAULT : 0;
+	return copy_to_user(o, &v, sizeof(struct __kernel_old_itimerval)) ? -EFAULT : 0;
 }
 
 
-SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
+SYSCALL_DEFINE2(getitimer, int, which, struct __kernel_old_itimerval __user *, value)
 {
 	struct itimerspec64 get_buffer;
 	int error = do_getitimer(which, &get_buffer);
@@ -314,11 +314,11 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
 
 #endif
 
-static int get_itimerval(struct itimerspec64 *o, const struct itimerval __user *i)
+static int get_itimerval(struct itimerspec64 *o, const struct __kernel_old_itimerval __user *i)
 {
-	struct itimerval v;
+	struct __kernel_old_itimerval v;
 
-	if (copy_from_user(&v, i, sizeof(struct itimerval)))
+	if (copy_from_user(&v, i, sizeof(struct __kernel_old_itimerval)))
 		return -EFAULT;
 
 	/* Validate the timevals in value. */
@@ -333,8 +333,8 @@ static int get_itimerval(struct itimerspec64 *o, const struct itimerval __user *
 	return 0;
 }
 
-SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
-		struct itimerval __user *, ovalue)
+SYSCALL_DEFINE3(setitimer, int, which, struct __kernel_old_itimerval __user *, value,
+		struct __kernel_old_itimerval __user *, ovalue)
 {
 	struct itimerspec64 set_buffer, get_buffer;
 	int error;
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 704ccd9..cdd7386 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -626,10 +626,12 @@ EXPORT_SYMBOL(__usecs_to_jiffies);
  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
  * value to a scaled second value.
  */
-static unsigned long
-__timespec64_to_jiffies(u64 sec, long nsec)
+
+unsigned long
+timespec64_to_jiffies(const struct timespec64 *value)
 {
-	nsec = nsec + TICK_NSEC - 1;
+	u64 sec = value->tv_sec;
+	long nsec = value->tv_nsec + TICK_NSEC - 1;
 
 	if (sec >= MAX_SEC_IN_JIFFIES){
 		sec = MAX_SEC_IN_JIFFIES;
@@ -640,18 +642,6 @@ __timespec64_to_jiffies(u64 sec, long nsec)
 		 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 
 }
-
-static unsigned long
-__timespec_to_jiffies(unsigned long sec, long nsec)
-{
-	return __timespec64_to_jiffies((u64)sec, nsec);
-}
-
-unsigned long
-timespec64_to_jiffies(const struct timespec64 *value)
-{
-	return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
-}
 EXPORT_SYMBOL(timespec64_to_jiffies);
 
 void
@@ -669,44 +659,6 @@ jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
 EXPORT_SYMBOL(jiffies_to_timespec64);
 
 /*
- * We could use a similar algorithm to timespec_to_jiffies (with a
- * different multiplier for usec instead of nsec). But this has a
- * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
- * usec value, since it's not necessarily integral.
- *
- * We could instead round in the intermediate scaled representation
- * (i.e. in units of 1/2^(large scale) jiffies) but that's also
- * perilous: the scaling introduces a small positive error, which
- * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
- * units to the intermediate before shifting) leads to accidental
- * overflow and overestimates.
- *
- * At the cost of one additional multiplication by a constant, just
- * use the timespec implementation.
- */
-unsigned long
-timeval_to_jiffies(const struct timeval *value)
-{
-	return __timespec_to_jiffies(value->tv_sec,
-				     value->tv_usec * NSEC_PER_USEC);
-}
-EXPORT_SYMBOL(timeval_to_jiffies);
-
-void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
-{
-	/*
-	 * Convert jiffies to nanoseconds and separate with
-	 * one divide.
-	 */
-	u32 rem;
-
-	value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
-				    NSEC_PER_SEC, &rem);
-	value->tv_usec = rem / NSEC_PER_USEC;
-}
-EXPORT_SYMBOL(jiffies_to_timeval);
-
-/*
  * Convert jiffies/jiffies_64 to clock_t and back.
  */
 clock_t jiffies_to_clock_t(unsigned long x)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 25a0fcf..91e8851 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -141,6 +141,15 @@
 
 if FTRACE
 
+config BOOTTIME_TRACING
+	bool "Boot-time Tracing support"
+	depends on BOOT_CONFIG && TRACING
+	default y
+	help
+	  Enable developer to setup ftrace subsystem via supplemental
+	  kernel cmdline at boot time for debugging (tracing) driver
+	  initialization and boot process.
+
 config FUNCTION_TRACER
 	bool "Kernel Function Tracer"
 	depends on HAVE_FUNCTION_TRACER
@@ -172,6 +181,77 @@
 	  the return value. This is done by setting the current return
 	  address on the current task structure into a stack of calls.
 
+config DYNAMIC_FTRACE
+	bool "enable/disable function tracing dynamically"
+	depends on FUNCTION_TRACER
+	depends on HAVE_DYNAMIC_FTRACE
+	default y
+	help
+	  This option will modify all the calls to function tracing
+	  dynamically (will patch them out of the binary image and
+	  replace them with a No-Op instruction) on boot up. During
+	  compile time, a table is made of all the locations that ftrace
+	  can function trace, and this table is linked into the kernel
+	  image. When this is enabled, functions can be individually
+	  enabled, and the functions not enabled will not affect
+	  performance of the system.
+
+	  See the files in /sys/kernel/debug/tracing:
+	    available_filter_functions
+	    set_ftrace_filter
+	    set_ftrace_notrace
+
+	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
+	  otherwise has native performance as long as no tracing is active.
+
+config DYNAMIC_FTRACE_WITH_REGS
+	def_bool y
+	depends on DYNAMIC_FTRACE
+	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+
+config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+	def_bool y
+	depends on DYNAMIC_FTRACE
+	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+
+config FUNCTION_PROFILER
+	bool "Kernel function profiler"
+	depends on FUNCTION_TRACER
+	default n
+	help
+	  This option enables the kernel function profiler. A file is created
+	  in debugfs called function_profile_enabled which defaults to zero.
+	  When a 1 is echoed into this file profiling begins, and when a
+	  zero is entered, profiling stops. A "functions" file is created in
+	  the trace_stat directory; this file shows the list of functions that
+	  have been hit and their counters.
+
+	  If in doubt, say N.
+
+config STACK_TRACER
+	bool "Trace max stack"
+	depends on HAVE_FUNCTION_TRACER
+	select FUNCTION_TRACER
+	select STACKTRACE
+	select KALLSYMS
+	help
+	  This special tracer records the maximum stack footprint of the
+	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
+
+	  This tracer works by hooking into every function call that the
+	  kernel executes, and keeping a maximum stack depth value and
+	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
+	  then it will not have any overhead while the stack tracer
+	  is disabled.
+
+	  To enable the stack tracer on bootup, pass in 'stacktrace'
+	  on the kernel command line.
+
+	  The stack tracer can also be enabled or disabled via the
+	  sysctl kernel.stack_tracer_enabled
+
+	  Say N if unsure.
+
 config TRACE_PREEMPT_TOGGLE
 	bool
 	help
@@ -282,6 +362,19 @@
 	 file. Every time a latency is greater than tracing_thresh, it will
 	 be recorded into the ring buffer.
 
+config MMIOTRACE
+	bool "Memory mapped IO tracing"
+	depends on HAVE_MMIOTRACE_SUPPORT && PCI
+	select GENERIC_TRACER
+	help
+	  Mmiotrace traces Memory Mapped I/O access and is meant for
+	  debugging and reverse engineering. It is called from the ioremap
+	  implementation and works via page faults. Tracing is disabled by
+	  default and can be enabled at run-time.
+
+	  See Documentation/trace/mmiotrace.rst.
+	  If you are not helping to develop drivers, say N.
+
 config ENABLE_DEFAULT_TRACERS
 	bool "Trace process context switches and events"
 	depends on !GENERIC_TRACER
@@ -410,30 +503,6 @@
 
 	  Say N if unsure.
 
-config STACK_TRACER
-	bool "Trace max stack"
-	depends on HAVE_FUNCTION_TRACER
-	select FUNCTION_TRACER
-	select STACKTRACE
-	select KALLSYMS
-	help
-	  This special tracer records the maximum stack footprint of the
-	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
-
-	  This tracer works by hooking into every function call that the
-	  kernel executes, and keeping a maximum stack depth value and
-	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
-	  then it will not have any overhead while the stack tracer
-	  is disabled.
-
-	  To enable the stack tracer on bootup, pass in 'stacktrace'
-	  on the kernel command line.
-
-	  The stack tracer can also be enabled or disabled via the
-	  sysctl kernel.stack_tracer_enabled
-
-	  Say N if unsure.
-
 config BLK_DEV_IO_TRACE
 	bool "Support for tracing block IO actions"
 	depends on SYSFS
@@ -531,53 +600,6 @@
 config PROBE_EVENTS
 	def_bool n
 
-config DYNAMIC_FTRACE
-	bool "enable/disable function tracing dynamically"
-	depends on FUNCTION_TRACER
-	depends on HAVE_DYNAMIC_FTRACE
-	default y
-	help
-	  This option will modify all the calls to function tracing
-	  dynamically (will patch them out of the binary image and
-	  replace them with a No-Op instruction) on boot up. During
-	  compile time, a table is made of all the locations that ftrace
-	  can function trace, and this table is linked into the kernel
-	  image. When this is enabled, functions can be individually
-	  enabled, and the functions not enabled will not affect
-	  performance of the system.
-
-	  See the files in /sys/kernel/debug/tracing:
-	    available_filter_functions
-	    set_ftrace_filter
-	    set_ftrace_notrace
-
-	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
-	  otherwise has native performance as long as no tracing is active.
-
-config DYNAMIC_FTRACE_WITH_REGS
-	def_bool y
-	depends on DYNAMIC_FTRACE
-	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
-
-config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
-	def_bool y
-	depends on DYNAMIC_FTRACE
-	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
-
-config FUNCTION_PROFILER
-	bool "Kernel function profiler"
-	depends on FUNCTION_TRACER
-	default n
-	help
-	  This option enables the kernel function profiler. A file is created
-	  in debugfs called function_profile_enabled which defaults to zero.
-	  When a 1 is echoed into this file profiling begins, and when a
-	  zero is entered, profiling stops. A "functions" file is created in
-	  the trace_stat directory; this file shows the list of functions that
-	  have been hit and their counters.
-
-	  If in doubt, say N.
-
 config BPF_KPROBE_OVERRIDE
 	bool "Enable BPF programs to override a kprobed function"
 	depends on BPF_EVENTS
@@ -592,54 +614,6 @@
 	depends on DYNAMIC_FTRACE
 	depends on HAVE_FTRACE_MCOUNT_RECORD
 
-config FTRACE_SELFTEST
-	bool
-
-config FTRACE_STARTUP_TEST
-	bool "Perform a startup test on ftrace"
-	depends on GENERIC_TRACER
-	select FTRACE_SELFTEST
-	help
-	  This option performs a series of startup tests on ftrace. On bootup
-	  a series of tests are made to verify that the tracer is
-	  functioning properly. It will do tests on all the configured
-	  tracers of ftrace.
-
-config EVENT_TRACE_STARTUP_TEST
-	bool "Run selftest on trace events"
-	depends on FTRACE_STARTUP_TEST
-	default y
-	help
-	  This option performs a test on all trace events in the system.
-	  It basically just enables each event and runs some code that
-	  will trigger events (not necessarily the event it enables)
-	  This may take some time run as there are a lot of events.
-
-config EVENT_TRACE_TEST_SYSCALLS
-	bool "Run selftest on syscall events"
-	depends on EVENT_TRACE_STARTUP_TEST
-	help
-	 This option will also enable testing every syscall event.
-	 It only enables the event and disables it and runs various loads
-	 with the event enabled. This adds a bit more time for kernel boot
-	 up since it runs this on every system call defined.
-
-	 TBD - enable a way to actually call the syscalls as we test their
-	       events
-
-config MMIOTRACE
-	bool "Memory mapped IO tracing"
-	depends on HAVE_MMIOTRACE_SUPPORT && PCI
-	select GENERIC_TRACER
-	help
-	  Mmiotrace traces Memory Mapped I/O access and is meant for
-	  debugging and reverse engineering. It is called from the ioremap
-	  implementation and works via page faults. Tracing is disabled by
-	  default and can be enabled at run-time.
-
-	  See Documentation/trace/mmiotrace.rst.
-	  If you are not helping to develop drivers, say N.
-
 config TRACING_MAP
 	bool
 	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -680,16 +654,6 @@
 
 	  If unsure, say N.
 
-config MMIOTRACE_TEST
-	tristate "Test module for mmiotrace"
-	depends on MMIOTRACE && m
-	help
-	  This is a dumb module for testing mmiotrace. It is very dangerous
-	  as it will write garbage to IO memory starting at a given address.
-	  However, it should be safe to use on e.g. unused portion of VRAM.
-
-	  Say N, unless you absolutely know what you are doing.
-
 config TRACEPOINT_BENCHMARK
 	bool "Add tracepoint that benchmarks tracepoints"
 	help
@@ -736,44 +700,6 @@
 
 	  If unsure, say N.
 
-config RING_BUFFER_STARTUP_TEST
-       bool "Ring buffer startup self test"
-       depends on RING_BUFFER
-       help
-	 Run a simple self test on the ring buffer on boot up. Late in the
-	 kernel boot sequence, the test will start that kicks off
-	 a thread per cpu. Each thread will write various size events
-	 into the ring buffer. Another thread is created to send IPIs
-	 to each of the threads, where the IPI handler will also write
-	 to the ring buffer, to test/stress the nesting ability.
-	 If any anomalies are discovered, a warning will be displayed
-	 and all ring buffers will be disabled.
-
-	 The test runs for 10 seconds. This will slow your boot time
-	 by at least 10 more seconds.
-
-	 At the end of the test, statics and more checks are done.
-	 It will output the stats of each per cpu buffer. What
-	 was written, the sizes, what was read, what was lost, and
-	 other similar details.
-
-	 If unsure, say N
-
-config PREEMPTIRQ_DELAY_TEST
-	tristate "Preempt / IRQ disable delay thread to test latency tracers"
-	depends on m
-	help
-	  Select this option to build a test module that can help test latency
-	  tracers by executing a preempt or irq disable section with a user
-	  configurable delay. The module busy waits for the duration of the
-	  critical section.
-
-	  For example, the following invocation generates a burst of three
-	  irq-disabled critical sections for 500us:
-	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
-
-	  If unsure, say N
-
 config TRACE_EVAL_MAP_FILE
        bool "Show eval mappings for trace events"
        depends on TRACING
@@ -814,6 +740,114 @@
 	  Note that on a kernel compiled with this config, ftrace will
 	  run significantly slower.
 
+config FTRACE_SELFTEST
+	bool
+
+config FTRACE_STARTUP_TEST
+	bool "Perform a startup test on ftrace"
+	depends on GENERIC_TRACER
+	select FTRACE_SELFTEST
+	help
+	  This option performs a series of startup tests on ftrace. On bootup
+	  a series of tests are made to verify that the tracer is
+	  functioning properly. It will do tests on all the configured
+	  tracers of ftrace.
+
+config EVENT_TRACE_STARTUP_TEST
+	bool "Run selftest on trace events"
+	depends on FTRACE_STARTUP_TEST
+	default y
+	help
+	  This option performs a test on all trace events in the system.
+	  It basically just enables each event and runs some code that
+	  will trigger events (not necessarily the event it enables)
+	  This may take some time run as there are a lot of events.
+
+config EVENT_TRACE_TEST_SYSCALLS
+	bool "Run selftest on syscall events"
+	depends on EVENT_TRACE_STARTUP_TEST
+	help
+	 This option will also enable testing every syscall event.
+	 It only enables the event and disables it and runs various loads
+	 with the event enabled. This adds a bit more time for kernel boot
+	 up since it runs this on every system call defined.
+
+	 TBD - enable a way to actually call the syscalls as we test their
+	       events
+
+config RING_BUFFER_STARTUP_TEST
+       bool "Ring buffer startup self test"
+       depends on RING_BUFFER
+       help
+	 Run a simple self test on the ring buffer on boot up. Late in the
+	 kernel boot sequence, the test will start that kicks off
+	 a thread per cpu. Each thread will write various size events
+	 into the ring buffer. Another thread is created to send IPIs
+	 to each of the threads, where the IPI handler will also write
+	 to the ring buffer, to test/stress the nesting ability.
+	 If any anomalies are discovered, a warning will be displayed
+	 and all ring buffers will be disabled.
+
+	 The test runs for 10 seconds. This will slow your boot time
+	 by at least 10 more seconds.
+
+	 At the end of the test, statics and more checks are done.
+	 It will output the stats of each per cpu buffer. What
+	 was written, the sizes, what was read, what was lost, and
+	 other similar details.
+
+	 If unsure, say N
+
+config MMIOTRACE_TEST
+	tristate "Test module for mmiotrace"
+	depends on MMIOTRACE && m
+	help
+	  This is a dumb module for testing mmiotrace. It is very dangerous
+	  as it will write garbage to IO memory starting at a given address.
+	  However, it should be safe to use on e.g. unused portion of VRAM.
+
+	  Say N, unless you absolutely know what you are doing.
+
+config PREEMPTIRQ_DELAY_TEST
+	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
+	depends on m
+	help
+	  Select this option to build a test module that can help test latency
+	  tracers by executing a preempt or irq disable section with a user
+	  configurable delay. The module busy waits for the duration of the
+	  critical section.
+
+	  For example, the following invocation generates a burst of three
+	  irq-disabled critical sections for 500us:
+	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
+
+	  If unsure, say N
+
+config SYNTH_EVENT_GEN_TEST
+	tristate "Test module for in-kernel synthetic event generation"
+	depends on HIST_TRIGGERS
+	help
+          This option creates a test module to check the base
+          functionality of in-kernel synthetic event definition and
+          generation.
+
+          To test, insert the module, and then check the trace buffer
+	  for the generated sample events.
+
+	  If unsure, say N.
+
+config KPROBE_EVENT_GEN_TEST
+	tristate "Test module for in-kernel kprobe event generation"
+	depends on KPROBE_EVENTS
+	help
+          This option creates a test module to check the base
+          functionality of in-kernel kprobe event definition.
+
+          To test, insert the module, and then check the trace buffer
+	  for the generated kprobe events.
+
+	  If unsure, say N.
+
 endif # FTRACE
 
 endif # TRACING_SUPPORT
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 0e63db6..f9dcd19 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -44,6 +44,8 @@
 obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_TRACING_MAP) += tracing_map.o
 obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o
+obj-$(CONFIG_SYNTH_EVENT_GEN_TEST) += synth_event_gen_test.o
+obj-$(CONFIG_KPROBE_EVENT_GEN_TEST) += kprobe_event_gen_test.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
 obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o
@@ -83,6 +85,7 @@
 obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
 obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
+obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
 
 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
 
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 475e294..0735ae8 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -68,14 +68,14 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
 {
 	struct blk_io_trace *t;
 	struct ring_buffer_event *event = NULL;
-	struct ring_buffer *buffer = NULL;
+	struct trace_buffer *buffer = NULL;
 	int pc = 0;
 	int cpu = smp_processor_id();
 	bool blk_tracer = blk_tracer_enabled;
 	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
 
 	if (blk_tracer) {
-		buffer = blk_tr->trace_buffer.buffer;
+		buffer = blk_tr->array_buffer.buffer;
 		pc = preempt_count();
 		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
 						  sizeof(*t) + len + cgid_len,
@@ -215,7 +215,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
 {
 	struct task_struct *tsk = current;
 	struct ring_buffer_event *event = NULL;
-	struct ring_buffer *buffer = NULL;
+	struct trace_buffer *buffer = NULL;
 	struct blk_io_trace *t;
 	unsigned long flags = 0;
 	unsigned long *sequence;
@@ -248,7 +248,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
 	if (blk_tracer) {
 		tracing_record_cmdline(current);
 
-		buffer = blk_tr->trace_buffer.buffer;
+		buffer = blk_tr->array_buffer.buffer;
 		pc = preempt_count();
 		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
 						  sizeof(*t) + pdu_len + cgid_len,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9bf1f2c..3f7ee10 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -62,8 +62,6 @@
 	})
 
 /* hash bits for specific function selection */
-#define FTRACE_HASH_BITS 7
-#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
 #define FTRACE_HASH_DEFAULT_BITS 10
 #define FTRACE_HASH_MAX_BITS 12
 
@@ -146,7 +144,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 {
 	struct trace_array *tr = op->private;
 
-	if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
+	if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
 		return;
 
 	op->saved_func(ip, parent_ip, op, regs);
@@ -1103,9 +1101,6 @@ struct ftrace_page {
 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
 
-/* estimate from running different kernels */
-#define NR_TO_INIT		10000
-
 static struct ftrace_page	*ftrace_pages_start;
 static struct ftrace_page	*ftrace_pages;
 
@@ -5464,7 +5459,7 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
 	struct ftrace_hash *hash;
 
 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
-	if (WARN_ON(!hash))
+	if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
 		return;
 
 	while (buf) {
@@ -5596,8 +5591,8 @@ static const struct file_operations ftrace_notrace_fops = {
 
 static DEFINE_MUTEX(graph_lock);
 
-struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
-struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
 
 enum graph_filter_type {
 	GRAPH_FILTER_NOTRACE	= 0,
@@ -5872,8 +5867,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
 
 		mutex_unlock(&graph_lock);
 
-		/* Wait till all users are no longer using the old hash */
-		synchronize_rcu();
+		/*
+		 * We need to do a hard force of sched synchronization.
+		 * This is because we use preempt_disable() to do RCU, but
+		 * the function tracers can be called where RCU is not watching
+		 * (like before user_exit()). We can not rely on the RCU
+		 * infrastructure to do the synchronization, thus we must do it
+		 * ourselves.
+		 */
+		schedule_on_each_cpu(ftrace_sync);
 
 		free_ftrace_hash(old_hash);
 	}
@@ -6596,7 +6598,7 @@ static void add_to_clear_hash_list(struct list_head *clear_list,
 
 	func = kmalloc(sizeof(*func), GFP_KERNEL);
 	if (!func) {
-		WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
+		MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
 		return;
 	}
 
@@ -6922,7 +6924,7 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
 
 	pid_list = rcu_dereference_sched(tr->function_pids);
 
-	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
+	this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
 		       trace_ignore_this_task(pid_list, next));
 }
 
@@ -6976,7 +6978,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
 	unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
 
 	for_each_possible_cpu(cpu)
-		per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
+		per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
 
 	rcu_assign_pointer(tr->function_pids, NULL);
 
@@ -7031,9 +7033,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
 	struct trace_array *tr = m->private;
 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
 
-	if (v == FTRACE_NO_PIDS)
+	if (v == FTRACE_NO_PIDS) {
+		(*pos)++;
 		return NULL;
-
+	}
 	return trace_pid_next(pid_list, v, pos);
 }
 
@@ -7100,7 +7103,7 @@ static void ignore_task_cpu(void *data)
 	pid_list = rcu_dereference_protected(tr->function_pids,
 					     mutex_is_locked(&ftrace_lock));
 
-	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
+	this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
 		       trace_ignore_this_task(pid_list, current));
 }
 
diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c
new file mode 100644
index 0000000..18b0f1c
--- /dev/null
+++ b/kernel/trace/kprobe_event_gen_test.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module for in-kernel kprobe event creation and generation.
+ *
+ * Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
+ */
+
+#include <linux/module.h>
+#include <linux/trace_events.h>
+
+/*
+ * This module is a simple test of basic functionality for in-kernel
+ * kprobe/kretprobe event creation.  The first test uses
+ * kprobe_event_gen_cmd_start(), kprobe_event_add_fields() and
+ * kprobe_event_gen_cmd_end() to create a kprobe event, which is then
+ * enabled in order to generate trace output.  The second creates a
+ * kretprobe event using kretprobe_event_gen_cmd_start() and
+ * kretprobe_event_gen_cmd_end(), and is also then enabled.
+ *
+ * To test, select CONFIG_KPROBE_EVENT_GEN_TEST and build the module.
+ * Then:
+ *
+ * # insmod kernel/trace/kprobe_event_gen_test.ko
+ * # cat /sys/kernel/debug/tracing/trace
+ *
+ * You should see many instances of the "gen_kprobe_test" and
+ * "gen_kretprobe_test" events in the trace buffer.
+ *
+ * To remove the events, remove the module:
+ *
+ * # rmmod kprobe_event_gen_test
+ *
+ */
+
+static struct trace_event_file *gen_kprobe_test;
+static struct trace_event_file *gen_kretprobe_test;
+
+/*
+ * Test to make sure we can create a kprobe event, then add more
+ * fields.
+ */
+static int __init test_gen_kprobe_cmd(void)
+{
+	struct dynevent_cmd cmd;
+	char *buf;
+	int ret;
+
+	/* Create a buffer to hold the generated command */
+	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Before generating the command, initialize the cmd object */
+	kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+	/*
+	 * Define the gen_kprobe_test event with the first 2 kprobe
+	 * fields.
+	 */
+	ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test",
+					 "do_sys_open",
+					 "dfd=%ax", "filename=%dx");
+	if (ret)
+		goto free;
+
+	/* Use kprobe_event_add_fields to add the rest of the fields */
+
+	ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)");
+	if (ret)
+		goto free;
+
+	/*
+	 * This actually creates the event.
+	 */
+	ret = kprobe_event_gen_cmd_end(&cmd);
+	if (ret)
+		goto free;
+
+	/*
+	 * Now get the gen_kprobe_test event file.  We need to prevent
+	 * the instance and event from disappearing from underneath
+	 * us, which trace_get_event_file() does (though in this case
+	 * we're using the top-level instance which never goes away).
+	 */
+	gen_kprobe_test = trace_get_event_file(NULL, "kprobes",
+					       "gen_kprobe_test");
+	if (IS_ERR(gen_kprobe_test)) {
+		ret = PTR_ERR(gen_kprobe_test);
+		goto delete;
+	}
+
+	/* Enable the event or you won't see anything */
+	ret = trace_array_set_clr_event(gen_kprobe_test->tr,
+					"kprobes", "gen_kprobe_test", true);
+	if (ret) {
+		trace_put_event_file(gen_kprobe_test);
+		goto delete;
+	}
+ out:
+	return ret;
+ delete:
+	/* We got an error after creating the event, delete it */
+	ret = kprobe_event_delete("gen_kprobe_test");
+ free:
+	kfree(buf);
+
+	goto out;
+}
+
+/*
+ * Test to make sure we can create a kretprobe event.
+ */
+static int __init test_gen_kretprobe_cmd(void)
+{
+	struct dynevent_cmd cmd;
+	char *buf;
+	int ret;
+
+	/* Create a buffer to hold the generated command */
+	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Before generating the command, initialize the cmd object */
+	kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+	/*
+	 * Define the kretprobe event.
+	 */
+	ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test",
+					    "do_sys_open",
+					    "$retval");
+	if (ret)
+		goto free;
+
+	/*
+	 * This actually creates the event.
+	 */
+	ret = kretprobe_event_gen_cmd_end(&cmd);
+	if (ret)
+		goto free;
+
+	/*
+	 * Now get the gen_kretprobe_test event file.  We need to
+	 * prevent the instance and event from disappearing from
+	 * underneath us, which trace_get_event_file() does (though in
+	 * this case we're using the top-level instance which never
+	 * goes away).
+	 */
+	gen_kretprobe_test = trace_get_event_file(NULL, "kprobes",
+						  "gen_kretprobe_test");
+	if (IS_ERR(gen_kretprobe_test)) {
+		ret = PTR_ERR(gen_kretprobe_test);
+		goto delete;
+	}
+
+	/* Enable the event or you won't see anything */
+	ret = trace_array_set_clr_event(gen_kretprobe_test->tr,
+					"kprobes", "gen_kretprobe_test", true);
+	if (ret) {
+		trace_put_event_file(gen_kretprobe_test);
+		goto delete;
+	}
+ out:
+	return ret;
+ delete:
+	/* We got an error after creating the event, delete it */
+	ret = kprobe_event_delete("gen_kretprobe_test");
+ free:
+	kfree(buf);
+
+	goto out;
+}
+
+static int __init kprobe_event_gen_test_init(void)
+{
+	int ret;
+
+	ret = test_gen_kprobe_cmd();
+	if (ret)
+		return ret;
+
+	ret = test_gen_kretprobe_cmd();
+	if (ret) {
+		WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+						  "kprobes",
+						  "gen_kretprobe_test", false));
+		trace_put_event_file(gen_kretprobe_test);
+		WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
+	}
+
+	return ret;
+}
+
+static void __exit kprobe_event_gen_test_exit(void)
+{
+	/* Disable the event or you can't remove it */
+	WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+					  "kprobes",
+					  "gen_kprobe_test", false));
+
+	/* Now give the file and instance back */
+	trace_put_event_file(gen_kprobe_test);
+
+	/* Now unregister and free the event */
+	WARN_ON(kprobe_event_delete("gen_kprobe_test"));
+
+	/* Disable the event or you can't remove it */
+	WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+					  "kprobes",
+					  "gen_kretprobe_test", false));
+
+	/* Now give the file and instance back */
+	trace_put_event_file(gen_kretprobe_test);
+
+	/* Now unregister and free the event */
+	WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
+}
+
+module_init(kprobe_event_gen_test_init)
+module_exit(kprobe_event_gen_test_exit)
+
+MODULE_AUTHOR("Tom Zanussi");
+MODULE_DESCRIPTION("kprobe event generation test");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3f65537..61f0e92 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -300,8 +300,6 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
 /* Missed count stored at end */
 #define RB_MISSED_STORED	(1 << 30)
 
-#define RB_MISSED_FLAGS		(RB_MISSED_EVENTS|RB_MISSED_STORED)
-
 struct buffer_data_page {
 	u64		 time_stamp;	/* page time stamp */
 	local_t		 commit;	/* write committed index */
@@ -443,7 +441,7 @@ enum {
 struct ring_buffer_per_cpu {
 	int				cpu;
 	atomic_t			record_disabled;
-	struct ring_buffer		*buffer;
+	struct trace_buffer	*buffer;
 	raw_spinlock_t			reader_lock;	/* serialize readers */
 	arch_spinlock_t			lock;
 	struct lock_class_key		lock_key;
@@ -482,7 +480,7 @@ struct ring_buffer_per_cpu {
 	struct rb_irq_work		irq_work;
 };
 
-struct ring_buffer {
+struct trace_buffer {
 	unsigned			flags;
 	int				cpus;
 	atomic_t			record_disabled;
@@ -518,7 +516,7 @@ struct ring_buffer_iter {
  *
  * Returns the number of pages used by a per_cpu buffer of the ring buffer.
  */
-size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
+size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
 {
 	return buffer->buffers[cpu]->nr_pages;
 }
@@ -530,7 +528,7 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
  *
  * Returns the number of pages that have content in the ring buffer.
  */
-size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
+size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
 {
 	size_t read;
 	size_t cnt;
@@ -573,7 +571,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
  * as data is added to any of the @buffer's cpu buffers. Otherwise
  * it will wait for data to be added to a specific cpu buffer.
  */
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
 {
 	struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
 	DEFINE_WAIT(wait);
@@ -684,7 +682,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
  * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
  * zero otherwise.
  */
-__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
 			  struct file *filp, poll_table *poll_table)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
@@ -742,13 +740,13 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 /* Up this if you want to test the TIME_EXTENTS and normalization */
 #define DEBUG_SHIFT 0
 
-static inline u64 rb_time_stamp(struct ring_buffer *buffer)
+static inline u64 rb_time_stamp(struct trace_buffer *buffer)
 {
 	/* shift to debug/test normalization and TIME_EXTENTS */
 	return buffer->clock() << DEBUG_SHIFT;
 }
 
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
 {
 	u64 time;
 
@@ -760,7 +758,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 
-void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
 				      int cpu, u64 *ts)
 {
 	/* Just stupid testing the normalize function and deltas */
@@ -1283,7 +1281,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct buffer_page *bpage;
@@ -1368,16 +1366,17 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  * __ring_buffer_alloc - allocate a new ring_buffer
  * @size: the size in bytes per cpu that is needed.
  * @flags: attributes to set for the ring buffer.
+ * @key: ring buffer reader_lock_key.
  *
  * Currently the only flag that is available is the RB_FL_OVERWRITE
  * flag. This flag means that the buffer will overwrite old data
  * when the buffer wraps. If this flag is not set, the buffer will
  * drop data when the tail hits the head.
  */
-struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
 					struct lock_class_key *key)
 {
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	long nr_pages;
 	int bsize;
 	int cpu;
@@ -1447,7 +1446,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
  * @buffer: the buffer to free.
  */
 void
-ring_buffer_free(struct ring_buffer *buffer)
+ring_buffer_free(struct trace_buffer *buffer)
 {
 	int cpu;
 
@@ -1463,18 +1462,18 @@ ring_buffer_free(struct ring_buffer *buffer)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_free);
 
-void ring_buffer_set_clock(struct ring_buffer *buffer,
+void ring_buffer_set_clock(struct trace_buffer *buffer,
 			   u64 (*clock)(void))
 {
 	buffer->clock = clock;
 }
 
-void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
+void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
 {
 	buffer->time_stamp_abs = abs;
 }
 
-bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
+bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
 {
 	return buffer->time_stamp_abs;
 }
@@ -1712,7 +1711,7 @@ static void update_pages_handler(struct work_struct *work)
  *
  * Returns 0 on success and < 0 on failure.
  */
-int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
 			int cpu_id)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
@@ -1891,7 +1890,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
 }
 EXPORT_SYMBOL_GPL(ring_buffer_resize);
 
-void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
+void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
 {
 	mutex_lock(&buffer->mutex);
 	if (val)
@@ -2206,7 +2205,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
 {
 	struct buffer_page *tail_page = info->tail_page;
 	struct buffer_page *commit_page = cpu_buffer->commit_page;
-	struct ring_buffer *buffer = cpu_buffer->buffer;
+	struct trace_buffer *buffer = cpu_buffer->buffer;
 	struct buffer_page *next_page;
 	int ret;
 
@@ -2330,11 +2329,11 @@ static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
 
 /**
  * rb_update_event - update event type and data
+ * @cpu_buffer: The per cpu buffer of the @event
  * @event: the event to update
- * @type: the type of event
- * @length: the size of the event field in the ring buffer
+ * @info: The info to update the @event with (contains length and delta)
  *
- * Update the type and data fields of the event. The length
+ * Update the type and data fields of the @event. The length
  * is the actual size that is written to the ring buffer,
  * and with this, we can determine what to place into the
  * data field.
@@ -2609,7 +2608,7 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static __always_inline void
-rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
 {
 	size_t nr_pages;
 	size_t dirty;
@@ -2733,7 +2732,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
  * Call this function before calling another ring_buffer_lock_reserve() and
  * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
  */
-void ring_buffer_nest_start(struct ring_buffer *buffer)
+void ring_buffer_nest_start(struct trace_buffer *buffer)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	int cpu;
@@ -2753,7 +2752,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer)
  * Must be called after ring_buffer_nest_start() and after the
  * ring_buffer_unlock_commit().
  */
-void ring_buffer_nest_end(struct ring_buffer *buffer)
+void ring_buffer_nest_end(struct trace_buffer *buffer)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	int cpu;
@@ -2775,7 +2774,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer)
  *
  * Must be paired with ring_buffer_lock_reserve.
  */
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+int ring_buffer_unlock_commit(struct trace_buffer *buffer,
 			      struct ring_buffer_event *event)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
@@ -2868,7 +2867,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static __always_inline struct ring_buffer_event *
-rb_reserve_next_event(struct ring_buffer *buffer,
+rb_reserve_next_event(struct trace_buffer *buffer,
 		      struct ring_buffer_per_cpu *cpu_buffer,
 		      unsigned long length)
 {
@@ -2961,7 +2960,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
  * If NULL is returned, then nothing has been allocated or locked.
  */
 struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
+ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct ring_buffer_event *event;
@@ -3062,7 +3061,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
  * If this function is called, do not call ring_buffer_unlock_commit on
  * the event.
  */
-void ring_buffer_discard_commit(struct ring_buffer *buffer,
+void ring_buffer_discard_commit(struct trace_buffer *buffer,
 				struct ring_buffer_event *event)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
@@ -3113,7 +3112,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  * Note, like ring_buffer_lock_reserve, the length is the length of the data
  * and not the length of the event which would hold the header.
  */
-int ring_buffer_write(struct ring_buffer *buffer,
+int ring_buffer_write(struct trace_buffer *buffer,
 		      unsigned long length,
 		      void *data)
 {
@@ -3193,7 +3192,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  *
  * The caller should call synchronize_rcu() after this.
  */
-void ring_buffer_record_disable(struct ring_buffer *buffer)
+void ring_buffer_record_disable(struct trace_buffer *buffer)
 {
 	atomic_inc(&buffer->record_disabled);
 }
@@ -3206,7 +3205,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  * Note, multiple disables will need the same number of enables
  * to truly enable the writing (much like preempt_disable).
  */
-void ring_buffer_record_enable(struct ring_buffer *buffer)
+void ring_buffer_record_enable(struct trace_buffer *buffer)
 {
 	atomic_dec(&buffer->record_disabled);
 }
@@ -3223,7 +3222,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  * it works like an on/off switch, where as the disable() version
  * must be paired with a enable().
  */
-void ring_buffer_record_off(struct ring_buffer *buffer)
+void ring_buffer_record_off(struct trace_buffer *buffer)
 {
 	unsigned int rd;
 	unsigned int new_rd;
@@ -3246,7 +3245,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off);
  * it works like an on/off switch, where as the enable() version
  * must be paired with a disable().
  */
-void ring_buffer_record_on(struct ring_buffer *buffer)
+void ring_buffer_record_on(struct trace_buffer *buffer)
 {
 	unsigned int rd;
 	unsigned int new_rd;
@@ -3264,7 +3263,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_on);
  *
  * Returns true if the ring buffer is in a state that it accepts writes.
  */
-bool ring_buffer_record_is_on(struct ring_buffer *buffer)
+bool ring_buffer_record_is_on(struct trace_buffer *buffer)
 {
 	return !atomic_read(&buffer->record_disabled);
 }
@@ -3280,7 +3279,7 @@ bool ring_buffer_record_is_on(struct ring_buffer *buffer)
  * ring_buffer_record_disable(), as that is a temporary disabling of
  * the ring buffer.
  */
-bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
 {
 	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
 }
@@ -3295,7 +3294,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
  *
  * The caller should call synchronize_rcu() after this.
  */
-void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 
@@ -3315,7 +3314,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  * Note, multiple disables will need the same number of enables
  * to truly enable the writing (much like preempt_disable).
  */
-void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 
@@ -3345,7 +3344,7 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
  * @buffer: The ring buffer
  * @cpu: The per CPU buffer to read from.
  */
-u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
 {
 	unsigned long flags;
 	struct ring_buffer_per_cpu *cpu_buffer;
@@ -3378,7 +3377,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
  * @buffer: The ring buffer
  * @cpu: The per CPU buffer to read from.
  */
-unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long ret;
@@ -3398,7 +3397,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
  * @buffer: The ring buffer
  * @cpu: The per CPU buffer to get the entries from.
  */
-unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 
@@ -3417,7 +3416,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  * @buffer: The ring buffer
  * @cpu: The per CPU buffer to get the number of overruns from
  */
-unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long ret;
@@ -3440,7 +3439,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  * @cpu: The per CPU buffer to get the number of overruns from
  */
 unsigned long
-ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long ret;
@@ -3462,7 +3461,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  * @cpu: The per CPU buffer to get the number of overruns from
  */
 unsigned long
-ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long ret;
@@ -3483,7 +3482,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
  * @cpu: The per CPU buffer to get the number of events read
  */
 unsigned long
-ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 
@@ -3502,7 +3501,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
  * Returns the total number of entries in the ring buffer
  * (all CPU entries)
  */
-unsigned long ring_buffer_entries(struct ring_buffer *buffer)
+unsigned long ring_buffer_entries(struct trace_buffer *buffer)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long entries = 0;
@@ -3525,7 +3524,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries);
  * Returns the total number of overruns in the ring buffer
  * (all CPU entries)
  */
-unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long overruns = 0;
@@ -3949,7 +3948,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_peek);
 static struct ring_buffer_event *
 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
 {
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct ring_buffer_event *event;
 	int nr_loops = 0;
@@ -4077,7 +4076,7 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
  * not consume the data.
  */
 struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
 		 unsigned long *lost_events)
 {
 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
@@ -4141,7 +4140,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  * and eventually empty the ring buffer if the producer is slower.
  */
 struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
 		    unsigned long *lost_events)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
@@ -4201,7 +4200,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
  * This overall must be paired with ring_buffer_read_finish.
  */
 struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
+ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct ring_buffer_iter *iter;
@@ -4331,8 +4330,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_read);
 /**
  * ring_buffer_size - return the size of the ring buffer (in bytes)
  * @buffer: The ring buffer.
+ * @cpu: The CPU to get ring buffer size from.
  */
-unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
 {
 	/*
 	 * Earlier, this method returned
@@ -4398,7 +4398,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  * @buffer: The ring buffer to reset a per cpu buffer of
  * @cpu: The CPU buffer to be reset
  */
-void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 	unsigned long flags;
@@ -4435,7 +4435,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  * ring_buffer_reset - reset a ring buffer
  * @buffer: The ring buffer to reset all cpu buffers
  */
-void ring_buffer_reset(struct ring_buffer *buffer)
+void ring_buffer_reset(struct trace_buffer *buffer)
 {
 	int cpu;
 
@@ -4448,7 +4448,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
  * rind_buffer_empty - is the ring buffer empty?
  * @buffer: The ring buffer to test
  */
-bool ring_buffer_empty(struct ring_buffer *buffer)
+bool ring_buffer_empty(struct trace_buffer *buffer)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long flags;
@@ -4478,7 +4478,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
  * @buffer: The ring buffer
  * @cpu: The CPU buffer to test
  */
-bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
+bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long flags;
@@ -4504,14 +4504,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  * @buffer_a: One buffer to swap with
  * @buffer_b: The other buffer to swap with
+ * @cpu: the CPU of the buffers to swap
  *
  * This function is useful for tracers that want to take a "snapshot"
  * of a CPU buffer and has another back up buffer lying around.
  * it is expected that the tracer handles the cpu buffer not being
  * used at the moment.
  */
-int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
-			 struct ring_buffer *buffer_b, int cpu)
+int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+			 struct trace_buffer *buffer_b, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer_a;
 	struct ring_buffer_per_cpu *cpu_buffer_b;
@@ -4590,7 +4591,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  * Returns:
  *  The page allocated, or ERR_PTR
  */
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
+void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct buffer_data_page *bpage = NULL;
@@ -4637,7 +4638,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  *
  * Free a page allocated from ring_buffer_alloc_read_page.
  */
-void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
 {
 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 	struct buffer_data_page *bpage = data;
@@ -4697,7 +4698,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  *  >=0 if data has been transferred, returns the offset of consumed data.
  *  <0 if no data has been transferred.
  */
-int ring_buffer_read_page(struct ring_buffer *buffer,
+int ring_buffer_read_page(struct trace_buffer *buffer,
 			  void **data_page, size_t len, int cpu, int full)
 {
 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
@@ -4868,12 +4869,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  */
 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
 {
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	long nr_pages_same;
 	int cpu_i;
 	unsigned long nr_pages;
 
-	buffer = container_of(node, struct ring_buffer, node);
+	buffer = container_of(node, struct trace_buffer, node);
 	if (cpumask_test_cpu(cpu, buffer->cpumask))
 		return 0;
 
@@ -4923,7 +4924,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
 static struct task_struct *rb_threads[NR_CPUS] __initdata;
 
 struct rb_test_data {
-	struct ring_buffer	*buffer;
+	struct trace_buffer *buffer;
 	unsigned long		events;
 	unsigned long		bytes_written;
 	unsigned long		bytes_alloc;
@@ -5065,7 +5066,7 @@ static __init int rb_hammer_test(void *arg)
 static __init int test_ringbuffer(void)
 {
 	struct task_struct *rb_hammer;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	int cpu;
 	int ret = 0;
 
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 32149e4..8df0aa8 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -29,7 +29,7 @@ static int reader_finish;
 static DECLARE_COMPLETION(read_start);
 static DECLARE_COMPLETION(read_done);
 
-static struct ring_buffer *buffer;
+static struct trace_buffer *buffer;
 static struct task_struct *producer;
 static struct task_struct *consumer;
 static unsigned long read;
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
new file mode 100644
index 0000000..4aefe00
--- /dev/null
+++ b/kernel/trace/synth_event_gen_test.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module for in-kernel sythetic event creation and generation.
+ *
+ * Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
+ */
+
+#include <linux/module.h>
+#include <linux/trace_events.h>
+
+/*
+ * This module is a simple test of basic functionality for in-kernel
+ * synthetic event creation and generation, the first and second tests
+ * using synth_event_gen_cmd_start() and synth_event_add_field(), the
+ * third uses synth_event_create() to do it all at once with a static
+ * field array.
+ *
+ * Following that are a few examples using the created events to test
+ * various ways of tracing a synthetic event.
+ *
+ * To test, select CONFIG_SYNTH_EVENT_GEN_TEST and build the module.
+ * Then:
+ *
+ * # insmod kernel/trace/synth_event_gen_test.ko
+ * # cat /sys/kernel/debug/tracing/trace
+ *
+ * You should see several events in the trace buffer -
+ * "create_synth_test", "empty_synth_test", and several instances of
+ * "gen_synth_test".
+ *
+ * To remove the events, remove the module:
+ *
+ * # rmmod synth_event_gen_test
+ *
+ */
+
+static struct trace_event_file *create_synth_test;
+static struct trace_event_file *empty_synth_test;
+static struct trace_event_file *gen_synth_test;
+
+/*
+ * Test to make sure we can create a synthetic event, then add more
+ * fields.
+ */
+static int __init test_gen_synth_cmd(void)
+{
+	struct dynevent_cmd cmd;
+	u64 vals[7];
+	char *buf;
+	int ret;
+
+	/* Create a buffer to hold the generated command */
+	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Before generating the command, initialize the cmd object */
+	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+	/*
+	 * Create the empty gen_synth_test synthetic event with the
+	 * first 4 fields.
+	 */
+	ret = synth_event_gen_cmd_start(&cmd, "gen_synth_test", THIS_MODULE,
+					"pid_t", "next_pid_field",
+					"char[16]", "next_comm_field",
+					"u64", "ts_ns",
+					"u64", "ts_ms");
+	if (ret)
+		goto free;
+
+	/* Use synth_event_add_field to add the rest of the fields */
+
+	ret = synth_event_add_field(&cmd, "unsigned int", "cpu");
+	if (ret)
+		goto free;
+
+	ret = synth_event_add_field(&cmd, "char[64]", "my_string_field");
+	if (ret)
+		goto free;
+
+	ret = synth_event_add_field(&cmd, "int", "my_int_field");
+	if (ret)
+		goto free;
+
+	ret = synth_event_gen_cmd_end(&cmd);
+	if (ret)
+		goto free;
+
+	/*
+	 * Now get the gen_synth_test event file.  We need to prevent
+	 * the instance and event from disappearing from underneath
+	 * us, which trace_get_event_file() does (though in this case
+	 * we're using the top-level instance which never goes away).
+	 */
+	gen_synth_test = trace_get_event_file(NULL, "synthetic",
+					      "gen_synth_test");
+	if (IS_ERR(gen_synth_test)) {
+		ret = PTR_ERR(gen_synth_test);
+		goto delete;
+	}
+
+	/* Enable the event or you won't see anything */
+	ret = trace_array_set_clr_event(gen_synth_test->tr,
+					"synthetic", "gen_synth_test", true);
+	if (ret) {
+		trace_put_event_file(gen_synth_test);
+		goto delete;
+	}
+
+	/* Create some bogus values just for testing */
+
+	vals[0] = 777;			/* next_pid_field */
+	vals[1] = (u64)"hula hoops";	/* next_comm_field */
+	vals[2] = 1000000;		/* ts_ns */
+	vals[3] = 1000;			/* ts_ms */
+	vals[4] = smp_processor_id();	/* cpu */
+	vals[5] = (u64)"thneed";	/* my_string_field */
+	vals[6] = 598;			/* my_int_field */
+
+	/* Now generate a gen_synth_test event */
+	ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+	return ret;
+ delete:
+	/* We got an error after creating the event, delete it */
+	synth_event_delete("gen_synth_test");
+ free:
+	kfree(buf);
+
+	goto out;
+}
+
+/*
+ * Test to make sure we can create an initially empty synthetic event,
+ * then add all the fields.
+ */
+static int __init test_empty_synth_event(void)
+{
+	struct dynevent_cmd cmd;
+	u64 vals[7];
+	char *buf;
+	int ret;
+
+	/* Create a buffer to hold the generated command */
+	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Before generating the command, initialize the cmd object */
+	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+	/*
+	 * Create the empty_synth_test synthetic event with no fields.
+	 */
+	ret = synth_event_gen_cmd_start(&cmd, "empty_synth_test", THIS_MODULE);
+	if (ret)
+		goto free;
+
+	/* Use synth_event_add_field to add all of the fields */
+
+	ret = synth_event_add_field(&cmd, "pid_t", "next_pid_field");
+	if (ret)
+		goto free;
+
+	ret = synth_event_add_field(&cmd, "char[16]", "next_comm_field");
+	if (ret)
+		goto free;
+
+	ret = synth_event_add_field(&cmd, "u64", "ts_ns");
+	if (ret)
+		goto free;
+
+	ret = synth_event_add_field(&cmd, "u64", "ts_ms");
+	if (ret)
+		goto free;
+
+	ret = synth_event_add_field(&cmd, "unsigned int", "cpu");
+	if (ret)
+		goto free;
+
+	ret = synth_event_add_field(&cmd, "char[64]", "my_string_field");
+	if (ret)
+		goto free;
+
+	ret = synth_event_add_field(&cmd, "int", "my_int_field");
+	if (ret)
+		goto free;
+
+	/* All fields have been added, close and register the synth event */
+
+	ret = synth_event_gen_cmd_end(&cmd);
+	if (ret)
+		goto free;
+
+	/*
+	 * Now get the empty_synth_test event file.  We need to
+	 * prevent the instance and event from disappearing from
+	 * underneath us, which trace_get_event_file() does (though in
+	 * this case we're using the top-level instance which never
+	 * goes away).
+	 */
+	empty_synth_test = trace_get_event_file(NULL, "synthetic",
+						"empty_synth_test");
+	if (IS_ERR(empty_synth_test)) {
+		ret = PTR_ERR(empty_synth_test);
+		goto delete;
+	}
+
+	/* Enable the event or you won't see anything */
+	ret = trace_array_set_clr_event(empty_synth_test->tr,
+					"synthetic", "empty_synth_test", true);
+	if (ret) {
+		trace_put_event_file(empty_synth_test);
+		goto delete;
+	}
+
+	/* Create some bogus values just for testing */
+
+	vals[0] = 777;			/* next_pid_field */
+	vals[1] = (u64)"tiddlywinks";	/* next_comm_field */
+	vals[2] = 1000000;		/* ts_ns */
+	vals[3] = 1000;			/* ts_ms */
+	vals[4] = smp_processor_id();	/* cpu */
+	vals[5] = (u64)"thneed_2.0";	/* my_string_field */
+	vals[6] = 399;			/* my_int_field */
+
+	/* Now trace an empty_synth_test event */
+	ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+	return ret;
+ delete:
+	/* We got an error after creating the event, delete it */
+	synth_event_delete("empty_synth_test");
+ free:
+	kfree(buf);
+
+	goto out;
+}
+
+static struct synth_field_desc create_synth_test_fields[] = {
+	{ .type = "pid_t",		.name = "next_pid_field" },
+	{ .type = "char[16]",		.name = "next_comm_field" },
+	{ .type = "u64",		.name = "ts_ns" },
+	{ .type = "u64",		.name = "ts_ms" },
+	{ .type = "unsigned int",	.name = "cpu" },
+	{ .type = "char[64]",		.name = "my_string_field" },
+	{ .type = "int",		.name = "my_int_field" },
+};
+
+/*
+ * Test synthetic event creation all at once from array of field
+ * descriptors.
+ */
+static int __init test_create_synth_event(void)
+{
+	u64 vals[7];
+	int ret;
+
+	/* Create the create_synth_test event with the fields above */
+	ret = synth_event_create("create_synth_test",
+				 create_synth_test_fields,
+				 ARRAY_SIZE(create_synth_test_fields),
+				 THIS_MODULE);
+	if (ret)
+		goto out;
+
+	/*
+	 * Now get the create_synth_test event file.  We need to
+	 * prevent the instance and event from disappearing from
+	 * underneath us, which trace_get_event_file() does (though in
+	 * this case we're using the top-level instance which never
+	 * goes away).
+	 */
+	create_synth_test = trace_get_event_file(NULL, "synthetic",
+						 "create_synth_test");
+	if (IS_ERR(create_synth_test)) {
+		ret = PTR_ERR(create_synth_test);
+		goto delete;
+	}
+
+	/* Enable the event or you won't see anything */
+	ret = trace_array_set_clr_event(create_synth_test->tr,
+					"synthetic", "create_synth_test", true);
+	if (ret) {
+		trace_put_event_file(create_synth_test);
+		goto delete;
+	}
+
+	/* Create some bogus values just for testing */
+
+	vals[0] = 777;			/* next_pid_field */
+	vals[1] = (u64)"tiddlywinks";	/* next_comm_field */
+	vals[2] = 1000000;		/* ts_ns */
+	vals[3] = 1000;			/* ts_ms */
+	vals[4] = smp_processor_id();	/* cpu */
+	vals[5] = (u64)"thneed";	/* my_string_field */
+	vals[6] = 398;			/* my_int_field */
+
+	/* Now generate a create_synth_test event */
+	ret = synth_event_trace_array(create_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+	return ret;
+ delete:
+	/* We got an error after creating the event, delete it */
+	ret = synth_event_delete("create_synth_test");
+
+	goto out;
+}
+
+/*
+ * Test tracing a synthetic event by reserving trace buffer space,
+ * then filling in fields one after another.
+ */
+static int __init test_add_next_synth_val(void)
+{
+	struct synth_event_trace_state trace_state;
+	int ret;
+
+	/* Start by reserving space in the trace buffer */
+	ret = synth_event_trace_start(gen_synth_test, &trace_state);
+	if (ret)
+		return ret;
+
+	/* Write some bogus values into the trace buffer, one after another */
+
+	/* next_pid_field */
+	ret = synth_event_add_next_val(777, &trace_state);
+	if (ret)
+		goto out;
+
+	/* next_comm_field */
+	ret = synth_event_add_next_val((u64)"slinky", &trace_state);
+	if (ret)
+		goto out;
+
+	/* ts_ns */
+	ret = synth_event_add_next_val(1000000, &trace_state);
+	if (ret)
+		goto out;
+
+	/* ts_ms */
+	ret = synth_event_add_next_val(1000, &trace_state);
+	if (ret)
+		goto out;
+
+	/* cpu */
+	ret = synth_event_add_next_val(smp_processor_id(), &trace_state);
+	if (ret)
+		goto out;
+
+	/* my_string_field */
+	ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state);
+	if (ret)
+		goto out;
+
+	/* my_int_field */
+	ret = synth_event_add_next_val(395, &trace_state);
+ out:
+	/* Finally, commit the event */
+	ret = synth_event_trace_end(&trace_state);
+
+	return ret;
+}
+
+/*
+ * Test tracing a synthetic event by reserving trace buffer space,
+ * then filling in fields using field names, which can be done in any
+ * order.
+ */
+static int __init test_add_synth_val(void)
+{
+	struct synth_event_trace_state trace_state;
+	int ret;
+
+	/* Start by reserving space in the trace buffer */
+	ret = synth_event_trace_start(gen_synth_test, &trace_state);
+	if (ret)
+		return ret;
+
+	/* Write some bogus values into the trace buffer, using field names */
+
+	ret = synth_event_add_val("ts_ns", 1000000, &trace_state);
+	if (ret)
+		goto out;
+
+	ret = synth_event_add_val("ts_ms", 1000, &trace_state);
+	if (ret)
+		goto out;
+
+	ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state);
+	if (ret)
+		goto out;
+
+	ret = synth_event_add_val("next_pid_field", 777, &trace_state);
+	if (ret)
+		goto out;
+
+	ret = synth_event_add_val("next_comm_field", (u64)"silly putty",
+				  &trace_state);
+	if (ret)
+		goto out;
+
+	ret = synth_event_add_val("my_string_field", (u64)"thneed_9",
+				  &trace_state);
+	if (ret)
+		goto out;
+
+	ret = synth_event_add_val("my_int_field", 3999, &trace_state);
+ out:
+	/* Finally, commit the event */
+	ret = synth_event_trace_end(&trace_state);
+
+	return ret;
+}
+
+/*
+ * Test tracing a synthetic event all at once from array of values.
+ */
+static int __init test_trace_synth_event(void)
+{
+	int ret;
+
+	/* Trace some bogus values just for testing */
+	ret = synth_event_trace(create_synth_test, 7,	/* number of values */
+				444,			/* next_pid_field */
+				(u64)"clackers",	/* next_comm_field */
+				1000000,		/* ts_ns */
+				1000,			/* ts_ms */
+				smp_processor_id(),	/* cpu */
+				(u64)"Thneed",		/* my_string_field */
+				999);			/* my_int_field */
+	return ret;
+}
+
+static int __init synth_event_gen_test_init(void)
+{
+	int ret;
+
+	ret = test_gen_synth_cmd();
+	if (ret)
+		return ret;
+
+	ret = test_empty_synth_event();
+	if (ret) {
+		WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+						  "synthetic",
+						  "gen_synth_test", false));
+		trace_put_event_file(gen_synth_test);
+		WARN_ON(synth_event_delete("gen_synth_test"));
+		goto out;
+	}
+
+	ret = test_create_synth_event();
+	if (ret) {
+		WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+						  "synthetic",
+						  "gen_synth_test", false));
+		trace_put_event_file(gen_synth_test);
+		WARN_ON(synth_event_delete("gen_synth_test"));
+
+		WARN_ON(trace_array_set_clr_event(empty_synth_test->tr,
+						  "synthetic",
+						  "empty_synth_test", false));
+		trace_put_event_file(empty_synth_test);
+		WARN_ON(synth_event_delete("empty_synth_test"));
+		goto out;
+	}
+
+	ret = test_add_next_synth_val();
+	WARN_ON(ret);
+
+	ret = test_add_synth_val();
+	WARN_ON(ret);
+
+	ret = test_trace_synth_event();
+	WARN_ON(ret);
+ out:
+	return ret;
+}
+
+static void __exit synth_event_gen_test_exit(void)
+{
+	/* Disable the event or you can't remove it */
+	WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+					  "synthetic",
+					  "gen_synth_test", false));
+
+	/* Now give the file and instance back */
+	trace_put_event_file(gen_synth_test);
+
+	/* Now unregister and free the synthetic event */
+	WARN_ON(synth_event_delete("gen_synth_test"));
+
+	/* Disable the event or you can't remove it */
+	WARN_ON(trace_array_set_clr_event(empty_synth_test->tr,
+					  "synthetic",
+					  "empty_synth_test", false));
+
+	/* Now give the file and instance back */
+	trace_put_event_file(empty_synth_test);
+
+	/* Now unregister and free the synthetic event */
+	WARN_ON(synth_event_delete("empty_synth_test"));
+
+	/* Disable the event or you can't remove it */
+	WARN_ON(trace_array_set_clr_event(create_synth_test->tr,
+					  "synthetic",
+					  "create_synth_test", false));
+
+	/* Now give the file and instance back */
+	trace_put_event_file(create_synth_test);
+
+	/* Now unregister and free the synthetic event */
+	WARN_ON(synth_event_delete("create_synth_test"));
+}
+
+module_init(synth_event_gen_test_init)
+module_exit(synth_event_gen_test_exit)
+
+MODULE_AUTHOR("Tom Zanussi");
+MODULE_DESCRIPTION("synthetic event generation test");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5b6ee4a..c797a15 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -162,8 +162,8 @@ union trace_eval_map_item {
 static union trace_eval_map_item *trace_eval_maps;
 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
 
-static int tracing_set_tracer(struct trace_array *tr, const char *buf);
-static void ftrace_trace_userstack(struct ring_buffer *buffer,
+int tracing_set_tracer(struct trace_array *tr, const char *buf);
+static void ftrace_trace_userstack(struct trace_buffer *buffer,
 				   unsigned long flags, int pc);
 
 #define MAX_TRACER_SIZE		100
@@ -338,7 +338,7 @@ int tracing_check_open_get_tr(struct trace_array *tr)
 }
 
 int call_filter_check_discard(struct trace_event_call *call, void *rec,
-			      struct ring_buffer *buffer,
+			      struct trace_buffer *buffer,
 			      struct ring_buffer_event *event)
 {
 	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
@@ -603,7 +603,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
 	return read;
 }
 
-static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
+static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
 {
 	u64 ts;
 
@@ -619,7 +619,7 @@ static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 
 u64 ftrace_now(int cpu)
 {
-	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
 }
 
 /**
@@ -747,22 +747,22 @@ static inline void trace_access_lock_init(void)
 #endif
 
 #ifdef CONFIG_STACKTRACE
-static void __ftrace_trace_stack(struct ring_buffer *buffer,
+static void __ftrace_trace_stack(struct trace_buffer *buffer,
 				 unsigned long flags,
 				 int skip, int pc, struct pt_regs *regs);
 static inline void ftrace_trace_stack(struct trace_array *tr,
-				      struct ring_buffer *buffer,
+				      struct trace_buffer *buffer,
 				      unsigned long flags,
 				      int skip, int pc, struct pt_regs *regs);
 
 #else
-static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
+static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
 					unsigned long flags,
 					int skip, int pc, struct pt_regs *regs)
 {
 }
 static inline void ftrace_trace_stack(struct trace_array *tr,
-				      struct ring_buffer *buffer,
+				      struct trace_buffer *buffer,
 				      unsigned long flags,
 				      int skip, int pc, struct pt_regs *regs)
 {
@@ -780,7 +780,7 @@ trace_event_setup(struct ring_buffer_event *event,
 }
 
 static __always_inline struct ring_buffer_event *
-__trace_buffer_lock_reserve(struct ring_buffer *buffer,
+__trace_buffer_lock_reserve(struct trace_buffer *buffer,
 			  int type,
 			  unsigned long len,
 			  unsigned long flags, int pc)
@@ -796,8 +796,8 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer,
 
 void tracer_tracing_on(struct trace_array *tr)
 {
-	if (tr->trace_buffer.buffer)
-		ring_buffer_record_on(tr->trace_buffer.buffer);
+	if (tr->array_buffer.buffer)
+		ring_buffer_record_on(tr->array_buffer.buffer);
 	/*
 	 * This flag is looked at when buffers haven't been allocated
 	 * yet, or by some tracers (like irqsoff), that just want to
@@ -825,7 +825,7 @@ EXPORT_SYMBOL_GPL(tracing_on);
 
 
 static __always_inline void
-__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
 {
 	__this_cpu_write(trace_taskinfo_save, true);
 
@@ -848,7 +848,7 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve
 int __trace_puts(unsigned long ip, const char *str, int size)
 {
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	struct print_entry *entry;
 	unsigned long irq_flags;
 	int alloc;
@@ -865,11 +865,14 @@ int __trace_puts(unsigned long ip, const char *str, int size)
 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
 
 	local_save_flags(irq_flags);
-	buffer = global_trace.trace_buffer.buffer;
+	buffer = global_trace.array_buffer.buffer;
+	ring_buffer_nest_start(buffer);
 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
 					    irq_flags, pc);
-	if (!event)
-		return 0;
+	if (!event) {
+		size = 0;
+		goto out;
+	}
 
 	entry = ring_buffer_event_data(event);
 	entry->ip = ip;
@@ -885,7 +888,8 @@ int __trace_puts(unsigned long ip, const char *str, int size)
 
 	__buffer_unlock_commit(buffer, event);
 	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
-
+ out:
+	ring_buffer_nest_end(buffer);
 	return size;
 }
 EXPORT_SYMBOL_GPL(__trace_puts);
@@ -898,10 +902,11 @@ EXPORT_SYMBOL_GPL(__trace_puts);
 int __trace_bputs(unsigned long ip, const char *str)
 {
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	struct bputs_entry *entry;
 	unsigned long irq_flags;
 	int size = sizeof(struct bputs_entry);
+	int ret = 0;
 	int pc;
 
 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
@@ -913,11 +918,13 @@ int __trace_bputs(unsigned long ip, const char *str)
 		return 0;
 
 	local_save_flags(irq_flags);
-	buffer = global_trace.trace_buffer.buffer;
+	buffer = global_trace.array_buffer.buffer;
+
+	ring_buffer_nest_start(buffer);
 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 					    irq_flags, pc);
 	if (!event)
-		return 0;
+		goto out;
 
 	entry = ring_buffer_event_data(event);
 	entry->ip			= ip;
@@ -926,7 +933,10 @@ int __trace_bputs(unsigned long ip, const char *str)
 	__buffer_unlock_commit(buffer, event);
 	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
 
-	return 1;
+	ret = 1;
+ out:
+	ring_buffer_nest_end(buffer);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(__trace_bputs);
 
@@ -1036,9 +1046,9 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
 }
 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
 
-static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
-					struct trace_buffer *size_buf, int cpu_id);
-static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
+static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
+					struct array_buffer *size_buf, int cpu_id);
+static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
 
 int tracing_alloc_snapshot_instance(struct trace_array *tr)
 {
@@ -1048,7 +1058,7 @@ int tracing_alloc_snapshot_instance(struct trace_array *tr)
 
 		/* allocate spare buffer */
 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
-				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
 		if (ret < 0)
 			return ret;
 
@@ -1251,8 +1261,8 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 
 void tracer_tracing_off(struct trace_array *tr)
 {
-	if (tr->trace_buffer.buffer)
-		ring_buffer_record_off(tr->trace_buffer.buffer);
+	if (tr->array_buffer.buffer)
+		ring_buffer_record_off(tr->array_buffer.buffer);
 	/*
 	 * This flag is looked at when buffers haven't been allocated
 	 * yet, or by some tracers (like irqsoff), that just want to
@@ -1294,8 +1304,8 @@ void disable_trace_on_warning(void)
  */
 bool tracer_tracing_is_on(struct trace_array *tr)
 {
-	if (tr->trace_buffer.buffer)
-		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
+	if (tr->array_buffer.buffer)
+		return ring_buffer_record_is_on(tr->array_buffer.buffer);
 	return !tr->buffer_disabled;
 }
 
@@ -1590,8 +1600,8 @@ void latency_fsnotify(struct trace_array *tr)
 static void
 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-	struct trace_buffer *trace_buf = &tr->trace_buffer;
-	struct trace_buffer *max_buf = &tr->max_buffer;
+	struct array_buffer *trace_buf = &tr->array_buffer;
+	struct array_buffer *max_buf = &tr->max_buffer;
 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
 
@@ -1649,8 +1659,8 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 
 	arch_spin_lock(&tr->max_lock);
 
-	/* Inherit the recordable setting from trace_buffer */
-	if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+	/* Inherit the recordable setting from array_buffer */
+	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
 		ring_buffer_record_on(tr->max_buffer.buffer);
 	else
 		ring_buffer_record_off(tr->max_buffer.buffer);
@@ -1659,7 +1669,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
 		goto out_unlock;
 #endif
-	swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
+	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
 
 	__update_max_tr(tr, tsk, cpu);
 
@@ -1692,7 +1702,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
 	arch_spin_lock(&tr->max_lock);
 
-	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
+	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
 
 	if (ret == -EBUSY) {
 		/*
@@ -1718,7 +1728,7 @@ static int wait_on_pipe(struct trace_iterator *iter, int full)
 	if (trace_buffer_iter(iter, iter->cpu_file))
 		return 0;
 
-	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
+	return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
 				full);
 }
 
@@ -1769,7 +1779,7 @@ static int run_tracer_selftest(struct tracer *type)
 	 * internal tracing to verify that everything is in order.
 	 * If we fail, we do not register this tracer.
 	 */
-	tracing_reset_online_cpus(&tr->trace_buffer);
+	tracing_reset_online_cpus(&tr->array_buffer);
 
 	tr->current_trace = type;
 
@@ -1795,7 +1805,7 @@ static int run_tracer_selftest(struct tracer *type)
 		return -1;
 	}
 	/* Only reset on passing, to avoid touching corrupted buffers */
-	tracing_reset_online_cpus(&tr->trace_buffer);
+	tracing_reset_online_cpus(&tr->array_buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 	if (type->use_max_tr) {
@@ -1962,9 +1972,9 @@ int __init register_tracer(struct tracer *type)
 	return ret;
 }
 
-static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
+static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
 {
-	struct ring_buffer *buffer = buf->buffer;
+	struct trace_buffer *buffer = buf->buffer;
 
 	if (!buffer)
 		return;
@@ -1978,9 +1988,9 @@ static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
 	ring_buffer_record_enable(buffer);
 }
 
-void tracing_reset_online_cpus(struct trace_buffer *buf)
+void tracing_reset_online_cpus(struct array_buffer *buf)
 {
-	struct ring_buffer *buffer = buf->buffer;
+	struct trace_buffer *buffer = buf->buffer;
 	int cpu;
 
 	if (!buffer)
@@ -2008,7 +2018,7 @@ void tracing_reset_all_online_cpus(void)
 		if (!tr->clear_trace)
 			continue;
 		tr->clear_trace = false;
-		tracing_reset_online_cpus(&tr->trace_buffer);
+		tracing_reset_online_cpus(&tr->array_buffer);
 #ifdef CONFIG_TRACER_MAX_TRACE
 		tracing_reset_online_cpus(&tr->max_buffer);
 #endif
@@ -2098,7 +2108,7 @@ int is_tracing_stopped(void)
  */
 void tracing_start(void)
 {
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	unsigned long flags;
 
 	if (tracing_disabled)
@@ -2117,7 +2127,7 @@ void tracing_start(void)
 	/* Prevent the buffers from switching */
 	arch_spin_lock(&global_trace.max_lock);
 
-	buffer = global_trace.trace_buffer.buffer;
+	buffer = global_trace.array_buffer.buffer;
 	if (buffer)
 		ring_buffer_record_enable(buffer);
 
@@ -2135,7 +2145,7 @@ void tracing_start(void)
 
 static void tracing_start_tr(struct trace_array *tr)
 {
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	unsigned long flags;
 
 	if (tracing_disabled)
@@ -2156,7 +2166,7 @@ static void tracing_start_tr(struct trace_array *tr)
 		goto out;
 	}
 
-	buffer = tr->trace_buffer.buffer;
+	buffer = tr->array_buffer.buffer;
 	if (buffer)
 		ring_buffer_record_enable(buffer);
 
@@ -2172,7 +2182,7 @@ static void tracing_start_tr(struct trace_array *tr)
  */
 void tracing_stop(void)
 {
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	unsigned long flags;
 
 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
@@ -2182,7 +2192,7 @@ void tracing_stop(void)
 	/* Prevent the buffers from switching */
 	arch_spin_lock(&global_trace.max_lock);
 
-	buffer = global_trace.trace_buffer.buffer;
+	buffer = global_trace.array_buffer.buffer;
 	if (buffer)
 		ring_buffer_record_disable(buffer);
 
@@ -2200,7 +2210,7 @@ void tracing_stop(void)
 
 static void tracing_stop_tr(struct trace_array *tr)
 {
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	unsigned long flags;
 
 	/* If global, we need to also stop the max tracer */
@@ -2211,7 +2221,7 @@ static void tracing_stop_tr(struct trace_array *tr)
 	if (tr->stop_count++)
 		goto out;
 
-	buffer = tr->trace_buffer.buffer;
+	buffer = tr->array_buffer.buffer;
 	if (buffer)
 		ring_buffer_record_disable(buffer);
 
@@ -2442,7 +2452,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
 
 struct ring_buffer_event *
-trace_buffer_lock_reserve(struct ring_buffer *buffer,
+trace_buffer_lock_reserve(struct trace_buffer *buffer,
 			  int type,
 			  unsigned long len,
 			  unsigned long flags, int pc)
@@ -2561,10 +2571,10 @@ void trace_buffered_event_disable(void)
 	preempt_enable();
 }
 
-static struct ring_buffer *temp_buffer;
+static struct trace_buffer *temp_buffer;
 
 struct ring_buffer_event *
-trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
+trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
 			  struct trace_event_file *trace_file,
 			  int type, unsigned long len,
 			  unsigned long flags, int pc)
@@ -2572,7 +2582,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
 	struct ring_buffer_event *entry;
 	int val;
 
-	*current_rb = trace_file->tr->trace_buffer.buffer;
+	*current_rb = trace_file->tr->array_buffer.buffer;
 
 	if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
 	     (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
@@ -2610,6 +2620,7 @@ static DEFINE_MUTEX(tracepoint_printk_mutex);
 static void output_printk(struct trace_event_buffer *fbuffer)
 {
 	struct trace_event_call *event_call;
+	struct trace_event_file *file;
 	struct trace_event *event;
 	unsigned long flags;
 	struct trace_iterator *iter = tracepoint_print_iter;
@@ -2623,6 +2634,12 @@ static void output_printk(struct trace_event_buffer *fbuffer)
 	    !event_call->event.funcs->trace)
 		return;
 
+	file = fbuffer->trace_file;
+	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+	     !filter_match_preds(file->filter, fbuffer->entry)))
+		return;
+
 	event = &fbuffer->trace_file->event_call->event;
 
 	spin_lock_irqsave(&tracepoint_iter_lock, flags);
@@ -2673,9 +2690,9 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 	if (static_key_false(&tracepoint_printk_key.key))
 		output_printk(fbuffer);
 
-	event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
+	event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
 				    fbuffer->event, fbuffer->entry,
-				    fbuffer->flags, fbuffer->pc);
+				    fbuffer->flags, fbuffer->pc, fbuffer->regs);
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
@@ -2689,7 +2706,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 # define STACK_SKIP 3
 
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
-				     struct ring_buffer *buffer,
+				     struct trace_buffer *buffer,
 				     struct ring_buffer_event *event,
 				     unsigned long flags, int pc,
 				     struct pt_regs *regs)
@@ -2710,7 +2727,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
  */
 void
-trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 				   struct ring_buffer_event *event)
 {
 	__buffer_unlock_commit(buffer, event);
@@ -2845,7 +2862,7 @@ trace_function(struct trace_array *tr,
 	       int pc)
 {
 	struct trace_event_call *call = &event_function;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 	struct ring_buffer_event *event;
 	struct ftrace_entry *entry;
 
@@ -2883,7 +2900,7 @@ struct ftrace_stacks {
 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
 
-static void __ftrace_trace_stack(struct ring_buffer *buffer,
+static void __ftrace_trace_stack(struct trace_buffer *buffer,
 				 unsigned long flags,
 				 int skip, int pc, struct pt_regs *regs)
 {
@@ -2958,7 +2975,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
 }
 
 static inline void ftrace_trace_stack(struct trace_array *tr,
-				      struct ring_buffer *buffer,
+				      struct trace_buffer *buffer,
 				      unsigned long flags,
 				      int skip, int pc, struct pt_regs *regs)
 {
@@ -2971,7 +2988,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
 		   int pc)
 {
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 
 	if (rcu_is_watching()) {
 		__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
@@ -3009,7 +3026,7 @@ void trace_dump_stack(int skip)
 	/* Skip 1 to skip this function. */
 	skip++;
 #endif
-	__ftrace_trace_stack(global_trace.trace_buffer.buffer,
+	__ftrace_trace_stack(global_trace.array_buffer.buffer,
 			     flags, skip, preempt_count(), NULL);
 }
 EXPORT_SYMBOL_GPL(trace_dump_stack);
@@ -3018,7 +3035,7 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
 static DEFINE_PER_CPU(int, user_stack_count);
 
 static void
-ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
+ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
 {
 	struct trace_event_call *call = &event_user_stack;
 	struct ring_buffer_event *event;
@@ -3063,7 +3080,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 	preempt_enable();
 }
 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
-static void ftrace_trace_userstack(struct ring_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_buffer *buffer,
 				   unsigned long flags, int pc)
 {
 }
@@ -3109,7 +3126,7 @@ static int alloc_percpu_trace_buffer(void)
 	struct trace_buffer_struct *buffers;
 
 	buffers = alloc_percpu(struct trace_buffer_struct);
-	if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
+	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
 		return -ENOMEM;
 
 	trace_percpu_buffer = buffers;
@@ -3154,7 +3171,7 @@ void trace_printk_init_buffers(void)
 	 * directly here. If the global_trace.buffer is already
 	 * allocated here, then this was called by module code.
 	 */
-	if (global_trace.trace_buffer.buffer)
+	if (global_trace.array_buffer.buffer)
 		tracing_start_cmdline_record();
 }
 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
@@ -3188,7 +3205,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 {
 	struct trace_event_call *call = &event_bprint;
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	struct trace_array *tr = &global_trace;
 	struct bprint_entry *entry;
 	unsigned long flags;
@@ -3213,11 +3230,12 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
 
 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
-		goto out;
+		goto out_put;
 
 	local_save_flags(flags);
 	size = sizeof(*entry) + sizeof(u32) * len;
-	buffer = tr->trace_buffer.buffer;
+	buffer = tr->array_buffer.buffer;
+	ring_buffer_nest_start(buffer);
 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
 					    flags, pc);
 	if (!event)
@@ -3233,6 +3251,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 	}
 
 out:
+	ring_buffer_nest_end(buffer);
+out_put:
 	put_trace_buf();
 
 out_nobuffer:
@@ -3245,7 +3265,7 @@ EXPORT_SYMBOL_GPL(trace_vbprintk);
 
 __printf(3, 0)
 static int
-__trace_array_vprintk(struct ring_buffer *buffer,
+__trace_array_vprintk(struct trace_buffer *buffer,
 		      unsigned long ip, const char *fmt, va_list args)
 {
 	struct trace_event_call *call = &event_print;
@@ -3275,6 +3295,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
 
 	local_save_flags(flags);
 	size = sizeof(*entry) + len + 1;
+	ring_buffer_nest_start(buffer);
 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 					    flags, pc);
 	if (!event)
@@ -3289,6 +3310,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
 	}
 
 out:
+	ring_buffer_nest_end(buffer);
 	put_trace_buf();
 
 out_nobuffer:
@@ -3302,7 +3324,7 @@ __printf(3, 0)
 int trace_array_vprintk(struct trace_array *tr,
 			unsigned long ip, const char *fmt, va_list args)
 {
-	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
+	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
 }
 
 __printf(3, 0)
@@ -3326,7 +3348,7 @@ int trace_array_printk(struct trace_array *tr,
 EXPORT_SYMBOL_GPL(trace_array_printk);
 
 __printf(3, 4)
-int trace_array_printk_buf(struct ring_buffer *buffer,
+int trace_array_printk_buf(struct trace_buffer *buffer,
 			   unsigned long ip, const char *fmt, ...)
 {
 	int ret;
@@ -3367,7 +3389,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
 	if (buf_iter)
 		event = ring_buffer_iter_peek(buf_iter, ts);
 	else
-		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
+		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
 					 lost_events);
 
 	if (event) {
@@ -3382,7 +3404,7 @@ static struct trace_entry *
 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
 		  unsigned long *missing_events, u64 *ent_ts)
 {
-	struct ring_buffer *buffer = iter->trace_buffer->buffer;
+	struct trace_buffer *buffer = iter->array_buffer->buffer;
 	struct trace_entry *ent, *next = NULL;
 	unsigned long lost_events = 0, next_lost = 0;
 	int cpu_file = iter->cpu_file;
@@ -3459,7 +3481,7 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)
 
 static void trace_consume(struct trace_iterator *iter)
 {
-	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
+	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
 			    &iter->lost_events);
 }
 
@@ -3497,7 +3519,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 	unsigned long entries = 0;
 	u64 ts;
 
-	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
+	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
 
 	buf_iter = trace_buffer_iter(iter, cpu);
 	if (!buf_iter)
@@ -3511,13 +3533,13 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 	 * by the timestamp being before the start of the buffer.
 	 */
 	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
-		if (ts >= iter->trace_buffer->time_start)
+		if (ts >= iter->array_buffer->time_start)
 			break;
 		entries++;
 		ring_buffer_read(buf_iter, NULL);
 	}
 
-	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
+	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
 }
 
 /*
@@ -3602,7 +3624,7 @@ static void s_stop(struct seq_file *m, void *p)
 }
 
 static void
-get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
+get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
 		      unsigned long *entries, int cpu)
 {
 	unsigned long count;
@@ -3624,7 +3646,7 @@ get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
 }
 
 static void
-get_total_entries(struct trace_buffer *buf,
+get_total_entries(struct array_buffer *buf,
 		  unsigned long *total, unsigned long *entries)
 {
 	unsigned long t, e;
@@ -3647,7 +3669,7 @@ unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
 	if (!tr)
 		tr = &global_trace;
 
-	get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
+	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
 
 	return entries;
 }
@@ -3659,7 +3681,7 @@ unsigned long trace_total_entries(struct trace_array *tr)
 	if (!tr)
 		tr = &global_trace;
 
-	get_total_entries(&tr->trace_buffer, &total, &entries);
+	get_total_entries(&tr->array_buffer, &total, &entries);
 
 	return entries;
 }
@@ -3676,7 +3698,7 @@ static void print_lat_help_header(struct seq_file *m)
 		    "#     \\   /      |||||  \\    |   /         \n");
 }
 
-static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
+static void print_event_info(struct array_buffer *buf, struct seq_file *m)
 {
 	unsigned long total;
 	unsigned long entries;
@@ -3687,7 +3709,7 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
 	seq_puts(m, "#\n");
 }
 
-static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
+static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
 				   unsigned int flags)
 {
 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
@@ -3698,7 +3720,7 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
 	seq_printf(m, "#              | |     %s    |       |         |\n",	 tgid ? "  |      " : "");
 }
 
-static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
+static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
 				       unsigned int flags)
 {
 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
@@ -3720,7 +3742,7 @@ void
 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 {
 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
-	struct trace_buffer *buf = iter->trace_buffer;
+	struct array_buffer *buf = iter->array_buffer;
 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
 	struct tracer *type = iter->trace;
 	unsigned long entries;
@@ -3795,7 +3817,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
 	    cpumask_test_cpu(iter->cpu, iter->started))
 		return;
 
-	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
+	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
 		return;
 
 	if (cpumask_available(iter->started))
@@ -3929,7 +3951,7 @@ int trace_empty(struct trace_iterator *iter)
 			if (!ring_buffer_iter_empty(buf_iter))
 				return 0;
 		} else {
-			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
+			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
 				return 0;
 		}
 		return 1;
@@ -3941,7 +3963,7 @@ int trace_empty(struct trace_iterator *iter)
 			if (!ring_buffer_iter_empty(buf_iter))
 				return 0;
 		} else {
-			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
+			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
 				return 0;
 		}
 	}
@@ -4031,10 +4053,10 @@ void trace_default_header(struct seq_file *m)
 	} else {
 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
 			if (trace_flags & TRACE_ITER_IRQ_INFO)
-				print_func_help_header_irq(iter->trace_buffer,
+				print_func_help_header_irq(iter->array_buffer,
 							   m, trace_flags);
 			else
-				print_func_help_header(iter->trace_buffer, m,
+				print_func_help_header(iter->array_buffer, m,
 						       trace_flags);
 		}
 	}
@@ -4192,21 +4214,21 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
 #ifdef CONFIG_TRACER_MAX_TRACE
 	/* Currently only the top directory has a snapshot */
 	if (tr->current_trace->print_max || snapshot)
-		iter->trace_buffer = &tr->max_buffer;
+		iter->array_buffer = &tr->max_buffer;
 	else
 #endif
-		iter->trace_buffer = &tr->trace_buffer;
+		iter->array_buffer = &tr->array_buffer;
 	iter->snapshot = snapshot;
 	iter->pos = -1;
 	iter->cpu_file = tracing_get_cpu(inode);
 	mutex_init(&iter->mutex);
 
 	/* Notify the tracer early; before we stop tracing. */
-	if (iter->trace && iter->trace->open)
+	if (iter->trace->open)
 		iter->trace->open(iter);
 
 	/* Annotate start of buffers if we had overruns */
-	if (ring_buffer_overruns(iter->trace_buffer->buffer))
+	if (ring_buffer_overruns(iter->array_buffer->buffer))
 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
 
 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -4220,7 +4242,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
 		for_each_tracing_cpu(cpu) {
 			iter->buffer_iter[cpu] =
-				ring_buffer_read_prepare(iter->trace_buffer->buffer,
+				ring_buffer_read_prepare(iter->array_buffer->buffer,
 							 cpu, GFP_KERNEL);
 		}
 		ring_buffer_read_prepare_sync();
@@ -4231,7 +4253,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
 	} else {
 		cpu = iter->cpu_file;
 		iter->buffer_iter[cpu] =
-			ring_buffer_read_prepare(iter->trace_buffer->buffer,
+			ring_buffer_read_prepare(iter->array_buffer->buffer,
 						 cpu, GFP_KERNEL);
 		ring_buffer_read_prepare_sync();
 		ring_buffer_read_start(iter->buffer_iter[cpu]);
@@ -4357,7 +4379,7 @@ static int tracing_open(struct inode *inode, struct file *file)
 	/* If this file was open for write, then erase contents */
 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 		int cpu = tracing_get_cpu(inode);
-		struct trace_buffer *trace_buf = &tr->trace_buffer;
+		struct array_buffer *trace_buf = &tr->array_buffer;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 		if (tr->current_trace->print_max)
@@ -4554,20 +4576,13 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
 	return count;
 }
 
-static ssize_t
-tracing_cpumask_write(struct file *filp, const char __user *ubuf,
-		      size_t count, loff_t *ppos)
+int tracing_set_cpumask(struct trace_array *tr,
+			cpumask_var_t tracing_cpumask_new)
 {
-	struct trace_array *tr = file_inode(filp)->i_private;
-	cpumask_var_t tracing_cpumask_new;
-	int err, cpu;
+	int cpu;
 
-	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
-		return -ENOMEM;
-
-	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
-	if (err)
-		goto err_unlock;
+	if (!tr)
+		return -EINVAL;
 
 	local_irq_disable();
 	arch_spin_lock(&tr->max_lock);
@@ -4578,24 +4593,47 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
 		 */
 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
-			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
-			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
+			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
 		}
 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
-			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
-			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
+			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
 		}
 	}
 	arch_spin_unlock(&tr->max_lock);
 	local_irq_enable();
 
 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
+
+	return 0;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+		      size_t count, loff_t *ppos)
+{
+	struct trace_array *tr = file_inode(filp)->i_private;
+	cpumask_var_t tracing_cpumask_new;
+	int err;
+
+	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+		return -ENOMEM;
+
+	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
+	if (err)
+		goto err_free;
+
+	err = tracing_set_cpumask(tr, tracing_cpumask_new);
+	if (err)
+		goto err_free;
+
 	free_cpumask_var(tracing_cpumask_new);
 
 	return count;
 
-err_unlock:
+err_free:
 	free_cpumask_var(tracing_cpumask_new);
 
 	return err;
@@ -4726,7 +4764,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
 		ftrace_pid_follow_fork(tr, enabled);
 
 	if (mask == TRACE_ITER_OVERWRITE) {
-		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
+		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
 #ifdef CONFIG_TRACER_MAX_TRACE
 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
 #endif
@@ -4740,7 +4778,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
 	return 0;
 }
 
-static int trace_set_options(struct trace_array *tr, char *option)
+int trace_set_options(struct trace_array *tr, char *option)
 {
 	char *cmp;
 	int neg = 0;
@@ -5361,14 +5399,12 @@ static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
 	 * Paranoid! If ptr points to end, we don't want to increment past it.
 	 * This really should never happen.
 	 */
+	(*pos)++;
 	ptr = update_eval_map(ptr);
 	if (WARN_ON_ONCE(!ptr))
 		return NULL;
 
 	ptr++;
-
-	(*pos)++;
-
 	ptr = update_eval_map(ptr);
 
 	return ptr;
@@ -5534,11 +5570,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
 
 int tracer_init(struct tracer *t, struct trace_array *tr)
 {
-	tracing_reset_online_cpus(&tr->trace_buffer);
+	tracing_reset_online_cpus(&tr->array_buffer);
 	return t->init(tr);
 }
 
-static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
+static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
 {
 	int cpu;
 
@@ -5548,8 +5584,8 @@ static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 /* resize @tr's buffer to the size of @size_tr's entries */
-static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
-					struct trace_buffer *size_buf, int cpu_id)
+static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
+					struct array_buffer *size_buf, int cpu_id)
 {
 	int cpu, ret = 0;
 
@@ -5587,10 +5623,10 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
 	ring_buffer_expanded = true;
 
 	/* May be called before buffers are initialized */
-	if (!tr->trace_buffer.buffer)
+	if (!tr->array_buffer.buffer)
 		return 0;
 
-	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
+	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
 	if (ret < 0)
 		return ret;
 
@@ -5601,8 +5637,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
 
 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
 	if (ret < 0) {
-		int r = resize_buffer_duplicate_size(&tr->trace_buffer,
-						     &tr->trace_buffer, cpu);
+		int r = resize_buffer_duplicate_size(&tr->array_buffer,
+						     &tr->array_buffer, cpu);
 		if (r < 0) {
 			/*
 			 * AARGH! We are left with different
@@ -5633,15 +5669,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
 	if (cpu == RING_BUFFER_ALL_CPUS)
-		set_buffer_entries(&tr->trace_buffer, size);
+		set_buffer_entries(&tr->array_buffer, size);
 	else
-		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
+		per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
 
 	return ret;
 }
 
-static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
-					  unsigned long size, int cpu_id)
+ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+				  unsigned long size, int cpu_id)
 {
 	int ret = size;
 
@@ -5720,7 +5756,7 @@ static void add_tracer_options(struct trace_array *tr, struct tracer *t)
 	create_trace_option_files(tr, t);
 }
 
-static int tracing_set_tracer(struct trace_array *tr, const char *buf)
+int tracing_set_tracer(struct trace_array *tr, const char *buf)
 {
 	struct tracer *t;
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -5979,7 +6015,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 
 	iter->tr = tr;
-	iter->trace_buffer = &tr->trace_buffer;
+	iter->array_buffer = &tr->array_buffer;
 	iter->cpu_file = tracing_get_cpu(inode);
 	mutex_init(&iter->mutex);
 	filp->private_data = iter;
@@ -6039,7 +6075,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
 		 */
 		return EPOLLIN | EPOLLRDNORM;
 	else
-		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
+		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
 					     filp, poll_table);
 }
 
@@ -6356,8 +6392,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
 		for_each_tracing_cpu(cpu) {
 			/* fill in the size from first enabled cpu */
 			if (size == 0)
-				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
-			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
+				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
+			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
 				buf_size_same = 0;
 				break;
 			}
@@ -6373,7 +6409,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
 		} else
 			r = sprintf(buf, "X\n");
 	} else
-		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
+		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
 
 	mutex_unlock(&trace_types_lock);
 
@@ -6420,7 +6456,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
 
 	mutex_lock(&trace_types_lock);
 	for_each_tracing_cpu(cpu) {
-		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
+		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
 		if (!ring_buffer_expanded)
 			expanded_size += trace_buf_size >> 10;
 	}
@@ -6470,7 +6506,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
 	struct trace_array *tr = filp->private_data;
 	struct ring_buffer_event *event;
 	enum event_trigger_type tt = ETT_NONE;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	struct print_entry *entry;
 	unsigned long irq_flags;
 	ssize_t written;
@@ -6499,7 +6535,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
 	if (cnt < FAULTED_SIZE)
 		size += FAULTED_SIZE - cnt;
 
-	buffer = tr->trace_buffer.buffer;
+	buffer = tr->array_buffer.buffer;
 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 					    irq_flags, preempt_count());
 	if (unlikely(!event))
@@ -6550,7 +6586,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
 {
 	struct trace_array *tr = filp->private_data;
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	struct raw_data_entry *entry;
 	unsigned long irq_flags;
 	ssize_t written;
@@ -6579,7 +6615,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
 	if (cnt < FAULT_SIZE_ID)
 		size += FAULT_SIZE_ID - cnt;
 
-	buffer = tr->trace_buffer.buffer;
+	buffer = tr->array_buffer.buffer;
 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
 					    irq_flags, preempt_count());
 	if (!event)
@@ -6634,13 +6670,13 @@ int tracing_set_clock(struct trace_array *tr, const char *clockstr)
 
 	tr->clock_id = i;
 
-	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
+	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
 
 	/*
 	 * New clock may not be consistent with the previous clock.
 	 * Reset the buffer so that it doesn't have incomparable timestamps.
 	 */
-	tracing_reset_online_cpus(&tr->trace_buffer);
+	tracing_reset_online_cpus(&tr->array_buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 	if (tr->max_buffer.buffer)
@@ -6703,7 +6739,7 @@ static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
 
 	mutex_lock(&trace_types_lock);
 
-	if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
+	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
 		seq_puts(m, "delta [absolute]\n");
 	else
 		seq_puts(m, "[delta] absolute\n");
@@ -6748,7 +6784,7 @@ int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
 			goto out;
 	}
 
-	ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
+	ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 	if (tr->max_buffer.buffer)
@@ -6797,7 +6833,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
 		ret = 0;
 
 		iter->tr = tr;
-		iter->trace_buffer = &tr->max_buffer;
+		iter->array_buffer = &tr->max_buffer;
 		iter->cpu_file = tracing_get_cpu(inode);
 		m->private = iter;
 		file->private_data = m;
@@ -6860,7 +6896,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 #endif
 		if (tr->allocated_snapshot)
 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
-					&tr->trace_buffer, iter->cpu_file);
+					&tr->array_buffer, iter->cpu_file);
 		else
 			ret = tracing_alloc_snapshot_instance(tr);
 		if (ret < 0)
@@ -6935,7 +6971,7 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
 	}
 
 	info->iter.snapshot = true;
-	info->iter.trace_buffer = &info->iter.tr->max_buffer;
+	info->iter.array_buffer = &info->iter.tr->max_buffer;
 
 	return ret;
 }
@@ -7310,7 +7346,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
 	info->iter.tr		= tr;
 	info->iter.cpu_file	= tracing_get_cpu(inode);
 	info->iter.trace	= tr->current_trace;
-	info->iter.trace_buffer = &tr->trace_buffer;
+	info->iter.array_buffer = &tr->array_buffer;
 	info->spare		= NULL;
 	/* Force reading ring buffer for first read */
 	info->read		= (unsigned int)-1;
@@ -7355,7 +7391,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
 #endif
 
 	if (!info->spare) {
-		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
+		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
 							  iter->cpu_file);
 		if (IS_ERR(info->spare)) {
 			ret = PTR_ERR(info->spare);
@@ -7373,7 +7409,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
 
  again:
 	trace_access_lock(iter->cpu_file);
-	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
+	ret = ring_buffer_read_page(iter->array_buffer->buffer,
 				    &info->spare,
 				    count,
 				    iter->cpu_file, 0);
@@ -7423,7 +7459,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
 	__trace_array_put(iter->tr);
 
 	if (info->spare)
-		ring_buffer_free_read_page(iter->trace_buffer->buffer,
+		ring_buffer_free_read_page(iter->array_buffer->buffer,
 					   info->spare_cpu, info->spare);
 	kfree(info);
 
@@ -7433,7 +7469,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
 }
 
 struct buffer_ref {
-	struct ring_buffer	*buffer;
+	struct trace_buffer	*buffer;
 	void			*page;
 	int			cpu;
 	refcount_t		refcount;
@@ -7528,7 +7564,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 
  again:
 	trace_access_lock(iter->cpu_file);
-	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 
 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
 		struct page *page;
@@ -7541,7 +7577,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 		}
 
 		refcount_set(&ref->refcount, 1);
-		ref->buffer = iter->trace_buffer->buffer;
+		ref->buffer = iter->array_buffer->buffer;
 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
 		if (IS_ERR(ref->page)) {
 			ret = PTR_ERR(ref->page);
@@ -7569,7 +7605,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 		spd.nr_pages++;
 		*ppos += PAGE_SIZE;
 
-		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 	}
 
 	trace_access_unlock(iter->cpu_file);
@@ -7613,7 +7649,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
 {
 	struct inode *inode = file_inode(filp);
 	struct trace_array *tr = inode->i_private;
-	struct trace_buffer *trace_buf = &tr->trace_buffer;
+	struct array_buffer *trace_buf = &tr->array_buffer;
 	int cpu = tracing_get_cpu(inode);
 	struct trace_seq *s;
 	unsigned long cnt;
@@ -7894,7 +7930,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
 
 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
 
-	WARN_ONCE(!tr->percpu_dir,
+	MEM_FAIL(!tr->percpu_dir,
 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
 
 	return tr->percpu_dir;
@@ -8215,7 +8251,7 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
 	for (cnt = 0; opts[cnt].name; cnt++) {
 		create_trace_option_file(tr, &topts[cnt], flags,
 					 &opts[cnt]);
-		WARN_ONCE(topts[cnt].entry == NULL,
+		MEM_FAIL(topts[cnt].entry == NULL,
 			  "Failed to create trace option: %s",
 			  opts[cnt].name);
 	}
@@ -8272,7 +8308,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
 		size_t cnt, loff_t *ppos)
 {
 	struct trace_array *tr = filp->private_data;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 	unsigned long val;
 	int ret;
 
@@ -8362,7 +8398,7 @@ static void
 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 
 static int
-allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
+allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
 {
 	enum ring_buffer_flags rb_flags;
 
@@ -8382,8 +8418,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
 	}
 
 	/* Allocate the first page for all buffers */
-	set_buffer_entries(&tr->trace_buffer,
-			   ring_buffer_size(tr->trace_buffer.buffer, 0));
+	set_buffer_entries(&tr->array_buffer,
+			   ring_buffer_size(tr->array_buffer.buffer, 0));
 
 	return 0;
 }
@@ -8392,18 +8428,18 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
 {
 	int ret;
 
-	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
+	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
 	if (ret)
 		return ret;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
 				    allocate_snapshot ? size : 1);
-	if (WARN_ON(ret)) {
-		ring_buffer_free(tr->trace_buffer.buffer);
-		tr->trace_buffer.buffer = NULL;
-		free_percpu(tr->trace_buffer.data);
-		tr->trace_buffer.data = NULL;
+	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
+		ring_buffer_free(tr->array_buffer.buffer);
+		tr->array_buffer.buffer = NULL;
+		free_percpu(tr->array_buffer.data);
+		tr->array_buffer.data = NULL;
 		return -ENOMEM;
 	}
 	tr->allocated_snapshot = allocate_snapshot;
@@ -8417,7 +8453,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
 	return 0;
 }
 
-static void free_trace_buffer(struct trace_buffer *buf)
+static void free_trace_buffer(struct array_buffer *buf)
 {
 	if (buf->buffer) {
 		ring_buffer_free(buf->buffer);
@@ -8432,7 +8468,7 @@ static void free_trace_buffers(struct trace_array *tr)
 	if (!tr)
 		return;
 
-	free_trace_buffer(&tr->trace_buffer);
+	free_trace_buffer(&tr->array_buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 	free_trace_buffer(&tr->max_buffer);
@@ -8463,6 +8499,34 @@ static void update_tracer_options(struct trace_array *tr)
 	mutex_unlock(&trace_types_lock);
 }
 
+/* Must have trace_types_lock held */
+struct trace_array *trace_array_find(const char *instance)
+{
+	struct trace_array *tr, *found = NULL;
+
+	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+		if (tr->name && strcmp(tr->name, instance) == 0) {
+			found = tr;
+			break;
+		}
+	}
+
+	return found;
+}
+
+struct trace_array *trace_array_find_get(const char *instance)
+{
+	struct trace_array *tr;
+
+	mutex_lock(&trace_types_lock);
+	tr = trace_array_find(instance);
+	if (tr)
+		tr->ref++;
+	mutex_unlock(&trace_types_lock);
+
+	return tr;
+}
+
 static struct trace_array *trace_array_create(const char *name)
 {
 	struct trace_array *tr;
@@ -8504,7 +8568,7 @@ static struct trace_array *trace_array_create(const char *name)
 
 	ret = event_trace_add_tracer(tr->dir, tr);
 	if (ret) {
-		tracefs_remove_recursive(tr->dir);
+		tracefs_remove(tr->dir);
 		goto out_free_tr;
 	}
 
@@ -8539,10 +8603,8 @@ static int instance_mkdir(const char *name)
 	mutex_lock(&trace_types_lock);
 
 	ret = -EEXIST;
-	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
-		if (tr->name && strcmp(tr->name, name) == 0)
-			goto out_unlock;
-	}
+	if (trace_array_find(name))
+		goto out_unlock;
 
 	tr = trace_array_create(name);
 
@@ -8564,6 +8626,10 @@ static int instance_mkdir(const char *name)
  * NOTE: This function increments the reference counter associated with the
  * trace array returned. This makes sure it cannot be freed while in use.
  * Use trace_array_put() once the trace array is no longer needed.
+ * If the trace_array is to be freed, trace_array_destroy() needs to
+ * be called after the trace_array_put(), or simply let user space delete
+ * it from the tracefs instances directory. But until the
+ * trace_array_put() is called, user space can not delete it.
  *
  */
 struct trace_array *trace_array_get_by_name(const char *name)
@@ -8613,7 +8679,7 @@ static int __remove_instance(struct trace_array *tr)
 	event_trace_del_tracer(tr);
 	ftrace_clear_pids(tr);
 	ftrace_destroy_function_files(tr);
-	tracefs_remove_recursive(tr->dir);
+	tracefs_remove(tr->dir);
 	free_trace_buffers(tr);
 
 	for (i = 0; i < tr->nr_topts; i++) {
@@ -8666,12 +8732,9 @@ static int instance_rmdir(const char *name)
 	mutex_lock(&trace_types_lock);
 
 	ret = -ENODEV;
-	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
-		if (tr->name && strcmp(tr->name, name) == 0) {
-			ret = __remove_instance(tr);
-			break;
-		}
-	}
+	tr = trace_array_find(name);
+	if (tr)
+		ret = __remove_instance(tr);
 
 	mutex_unlock(&trace_types_lock);
 	mutex_unlock(&event_mutex);
@@ -8684,7 +8747,7 @@ static __init void create_trace_instances(struct dentry *d_tracer)
 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
 							 instance_mkdir,
 							 instance_rmdir);
-	if (WARN_ON(!trace_instance_dir))
+	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
 		return;
 }
 
@@ -8754,7 +8817,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 #endif
 
 	if (ftrace_create_function_files(tr, d_tracer))
-		WARN(1, "Could not allocate function filter files");
+		MEM_FAIL(1, "Could not allocate function filter files");
 
 #ifdef CONFIG_TRACER_SNAPSHOT
 	trace_create_file("snapshot", 0644, d_tracer,
@@ -9036,13 +9099,13 @@ void trace_init_global_iter(struct trace_iterator *iter)
 	iter->tr = &global_trace;
 	iter->trace = iter->tr->current_trace;
 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
-	iter->trace_buffer = &global_trace.trace_buffer;
+	iter->array_buffer = &global_trace.array_buffer;
 
 	if (iter->trace && iter->trace->open)
 		iter->trace->open(iter);
 
 	/* Annotate start of buffers if we had overruns */
-	if (ring_buffer_overruns(iter->trace_buffer->buffer))
+	if (ring_buffer_overruns(iter->array_buffer->buffer))
 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
 
 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -9083,7 +9146,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 	trace_init_global_iter(&iter);
 
 	for_each_tracing_cpu(cpu) {
-		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
 	}
 
 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -9151,7 +9214,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 	tr->trace_flags |= old_userobj;
 
 	for_each_tracing_cpu(cpu) {
-		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
 	}
 	atomic_dec(&dump_running);
 	printk_nmi_direct_exit();
@@ -9306,8 +9369,7 @@ __init static int tracer_alloc_buffers(void)
 
 	/* TODO: make the number of buffers hot pluggable with CPUS */
 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
-		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
-		WARN_ON(1);
+		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
 		goto out_free_savedcmd;
 	}
 
@@ -9380,7 +9442,8 @@ void __init early_trace_init(void)
 	if (tracepoint_printk) {
 		tracepoint_print_iter =
 			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
-		if (WARN_ON(!tracepoint_print_iter))
+		if (MEM_FAIL(!tracepoint_print_iter,
+			     "Failed to allocate trace iterator\n"))
 			tracepoint_printk = 0;
 		else
 			static_key_enable(&tracepoint_printk_key.key);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a98dce1..99372dd 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -93,6 +93,18 @@ enum trace_type {
 
 #include "trace_entries.h"
 
+/* Use this for memory failure errors */
+#define MEM_FAIL(condition, fmt, ...) ({			\
+	static bool __section(.data.once) __warned;		\
+	int __ret_warn_once = !!(condition);			\
+								\
+	if (unlikely(__ret_warn_once && !__warned)) {		\
+		__warned = true;				\
+		pr_err("ERROR: " fmt, ##__VA_ARGS__);		\
+	}							\
+	unlikely(__ret_warn_once);				\
+})
+
 /*
  * syscalls are special, and need special handling, this is why
  * they are not included in trace_entries.h
@@ -175,9 +187,9 @@ struct trace_array_cpu {
 struct tracer;
 struct trace_option_dentry;
 
-struct trace_buffer {
+struct array_buffer {
 	struct trace_array		*tr;
-	struct ring_buffer		*buffer;
+	struct trace_buffer		*buffer;
 	struct trace_array_cpu __percpu	*data;
 	u64				time_start;
 	int				cpu;
@@ -248,7 +260,7 @@ struct cond_snapshot {
 struct trace_array {
 	struct list_head	list;
 	char			*name;
-	struct trace_buffer	trace_buffer;
+	struct array_buffer	array_buffer;
 #ifdef CONFIG_TRACER_MAX_TRACE
 	/*
 	 * The max_buffer is used to snapshot the trace when a maximum
@@ -256,12 +268,12 @@ struct trace_array {
 	 * Some tracers will use this to store a maximum trace while
 	 * it continues examining live traces.
 	 *
-	 * The buffers for the max_buffer are set up the same as the trace_buffer
+	 * The buffers for the max_buffer are set up the same as the array_buffer
 	 * When a snapshot is taken, the buffer of the max_buffer is swapped
-	 * with the buffer of the trace_buffer and the buffers are reset for
-	 * the trace_buffer so the tracing can continue.
+	 * with the buffer of the array_buffer and the buffers are reset for
+	 * the array_buffer so the tracing can continue.
 	 */
-	struct trace_buffer	max_buffer;
+	struct array_buffer	max_buffer;
 	bool			allocated_snapshot;
 #endif
 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
@@ -345,6 +357,8 @@ extern struct mutex trace_types_lock;
 
 extern int trace_array_get(struct trace_array *tr);
 extern int tracing_check_open_get_tr(struct trace_array *tr);
+extern struct trace_array *trace_array_find(const char *instance);
+extern struct trace_array *trace_array_find_get(const char *instance);
 
 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
@@ -684,7 +698,7 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
 
 int tracer_init(struct tracer *t, struct trace_array *tr);
 int tracing_is_enabled(void);
-void tracing_reset_online_cpus(struct trace_buffer *buf);
+void tracing_reset_online_cpus(struct array_buffer *buf);
 void tracing_reset_current(int cpu);
 void tracing_reset_all_online_cpus(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
@@ -704,7 +718,7 @@ struct dentry *tracing_init_dentry(void);
 struct ring_buffer_event;
 
 struct ring_buffer_event *
-trace_buffer_lock_reserve(struct ring_buffer *buffer,
+trace_buffer_lock_reserve(struct trace_buffer *buffer,
 			  int type,
 			  unsigned long len,
 			  unsigned long flags,
@@ -716,7 +730,7 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 					  int *ent_cpu, u64 *ent_ts);
 
-void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 					struct ring_buffer_event *event);
 
 int trace_empty(struct trace_iterator *iter);
@@ -872,7 +886,7 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 extern int
 trace_array_vprintk(struct trace_array *tr,
 		    unsigned long ip, const char *fmt, va_list args);
-int trace_array_printk_buf(struct ring_buffer *buffer,
+int trace_array_printk_buf(struct trace_buffer *buffer,
 			   unsigned long ip, const char *fmt, ...);
 void trace_printk_seq(struct trace_seq *s);
 enum print_line_t print_trace_line(struct trace_iterator *iter);
@@ -949,22 +963,31 @@ extern void __trace_graph_return(struct trace_array *tr,
 				 unsigned long flags, int pc);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-extern struct ftrace_hash *ftrace_graph_hash;
-extern struct ftrace_hash *ftrace_graph_notrace_hash;
+extern struct ftrace_hash __rcu *ftrace_graph_hash;
+extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
 
 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 {
 	unsigned long addr = trace->func;
 	int ret = 0;
+	struct ftrace_hash *hash;
 
 	preempt_disable_notrace();
 
-	if (ftrace_hash_empty(ftrace_graph_hash)) {
+	/*
+	 * Have to open code "rcu_dereference_sched()" because the
+	 * function graph tracer can be called when RCU is not
+	 * "watching".
+	 * Protected with schedule_on_each_cpu(ftrace_sync)
+	 */
+	hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
+
+	if (ftrace_hash_empty(hash)) {
 		ret = 1;
 		goto out;
 	}
 
-	if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
+	if (ftrace_lookup_ip(hash, addr)) {
 
 		/*
 		 * This needs to be cleared on the return functions
@@ -1000,10 +1023,20 @@ static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
 static inline int ftrace_graph_notrace_addr(unsigned long addr)
 {
 	int ret = 0;
+	struct ftrace_hash *notrace_hash;
 
 	preempt_disable_notrace();
 
-	if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
+	/*
+	 * Have to open code "rcu_dereference_sched()" because the
+	 * function graph tracer can be called when RCU is not
+	 * "watching".
+	 * Protected with schedule_on_each_cpu(ftrace_sync)
+	 */
+	notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+						 !preemptible());
+
+	if (ftrace_lookup_ip(notrace_hash, addr))
 		ret = 1;
 
 	preempt_enable_notrace();
@@ -1056,7 +1089,7 @@ struct ftrace_func_command {
 extern bool ftrace_filter_param __initdata;
 static inline int ftrace_trace_task(struct trace_array *tr)
 {
-	return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
+	return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
 }
 extern int ftrace_is_dead(void);
 int ftrace_create_function_files(struct trace_array *tr,
@@ -1144,6 +1177,11 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd);
 void ftrace_create_filter_files(struct ftrace_ops *ops,
 				struct dentry *parent);
 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
+
+extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+			     int len, int reset);
+extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+			      int len, int reset);
 #else
 struct ftrace_func_command;
 
@@ -1366,17 +1404,17 @@ struct trace_subsystem_dir {
 };
 
 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
-				     struct ring_buffer *buffer,
+				     struct trace_buffer *buffer,
 				     struct ring_buffer_event *event);
 
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
-				     struct ring_buffer *buffer,
+				     struct trace_buffer *buffer,
 				     struct ring_buffer_event *event,
 				     unsigned long flags, int pc,
 				     struct pt_regs *regs);
 
 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
-					      struct ring_buffer *buffer,
+					      struct trace_buffer *buffer,
 					      struct ring_buffer_event *event,
 					      unsigned long flags, int pc)
 {
@@ -1389,7 +1427,7 @@ void trace_buffered_event_disable(void);
 void trace_buffered_event_enable(void);
 
 static inline void
-__trace_event_discard_commit(struct ring_buffer *buffer,
+__trace_event_discard_commit(struct trace_buffer *buffer,
 			     struct ring_buffer_event *event)
 {
 	if (this_cpu_read(trace_buffered_event) == event) {
@@ -1415,7 +1453,7 @@ __trace_event_discard_commit(struct ring_buffer *buffer,
  */
 static inline bool
 __event_trigger_test_discard(struct trace_event_file *file,
-			     struct ring_buffer *buffer,
+			     struct trace_buffer *buffer,
 			     struct ring_buffer_event *event,
 			     void *entry,
 			     enum event_trigger_type *tt)
@@ -1450,7 +1488,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
  */
 static inline void
 event_trigger_unlock_commit(struct trace_event_file *file,
-			    struct ring_buffer *buffer,
+			    struct trace_buffer *buffer,
 			    struct ring_buffer_event *event,
 			    void *entry, unsigned long irq_flags, int pc)
 {
@@ -1481,7 +1519,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
  */
 static inline void
 event_trigger_unlock_commit_regs(struct trace_event_file *file,
-				 struct ring_buffer *buffer,
+				 struct trace_buffer *buffer,
 				 struct ring_buffer_event *event,
 				 void *entry, unsigned long irq_flags, int pc,
 				 struct pt_regs *regs)
@@ -1892,6 +1930,15 @@ void trace_printk_start_comm(void);
 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
 
+/* Used from boot time tracer */
+extern int trace_set_options(struct trace_array *tr, char *option);
+extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
+extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+					  unsigned long size, int cpu_id);
+extern int tracing_set_cpumask(struct trace_array *tr,
+				cpumask_var_t tracing_cpumask_new);
+
+
 #define MAX_EVENT_NAME_LEN	64
 
 extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
@@ -1949,6 +1996,9 @@ static inline const char *get_syscall_name(int syscall)
 #ifdef CONFIG_EVENT_TRACING
 void trace_event_init(void);
 void trace_event_eval_update(struct trace_eval_map **map, int len);
+/* Used from boot time tracer */
+extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
+extern int trigger_process_regex(struct trace_event_file *file, char *buff);
 #else
 static inline void __init trace_event_init(void) { }
 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
new file mode 100644
index 0000000..06d7feb
--- /dev/null
+++ b/kernel/trace/trace_boot.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace_boot.c
+ * Tracing kernel boot-time
+ */
+
+#define pr_fmt(fmt)	"trace_boot: " fmt
+
+#include <linux/bootconfig.h>
+#include <linux/cpumask.h>
+#include <linux/ftrace.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/trace.h>
+#include <linux/trace_events.h>
+
+#include "trace.h"
+
+#define MAX_BUF_LEN 256
+
+static void __init
+trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
+{
+	struct xbc_node *anode;
+	const char *p;
+	char buf[MAX_BUF_LEN];
+	unsigned long v = 0;
+
+	/* Common ftrace options */
+	xbc_node_for_each_array_value(node, "options", anode, p) {
+		if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
+			pr_err("String is too long: %s\n", p);
+			continue;
+		}
+
+		if (trace_set_options(tr, buf) < 0)
+			pr_err("Failed to set option: %s\n", buf);
+	}
+
+	p = xbc_node_find_value(node, "trace_clock", NULL);
+	if (p && *p != '\0') {
+		if (tracing_set_clock(tr, p) < 0)
+			pr_err("Failed to set trace clock: %s\n", p);
+	}
+
+	p = xbc_node_find_value(node, "buffer_size", NULL);
+	if (p && *p != '\0') {
+		v = memparse(p, NULL);
+		if (v < PAGE_SIZE)
+			pr_err("Buffer size is too small: %s\n", p);
+		if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0)
+			pr_err("Failed to resize trace buffer to %s\n", p);
+	}
+
+	p = xbc_node_find_value(node, "cpumask", NULL);
+	if (p && *p != '\0') {
+		cpumask_var_t new_mask;
+
+		if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+			if (cpumask_parse(p, new_mask) < 0 ||
+			    tracing_set_cpumask(tr, new_mask) < 0)
+				pr_err("Failed to set new CPU mask %s\n", p);
+			free_cpumask_var(new_mask);
+		}
+	}
+}
+
+#ifdef CONFIG_EVENT_TRACING
+static void __init
+trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
+{
+	struct xbc_node *anode;
+	char buf[MAX_BUF_LEN];
+	const char *p;
+
+	xbc_node_for_each_array_value(node, "events", anode, p) {
+		if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
+			pr_err("String is too long: %s\n", p);
+			continue;
+		}
+
+		if (ftrace_set_clr_event(tr, buf, 1) < 0)
+			pr_err("Failed to enable event: %s\n", p);
+	}
+}
+
+#ifdef CONFIG_KPROBE_EVENTS
+static int __init
+trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
+{
+	struct dynevent_cmd cmd;
+	struct xbc_node *anode;
+	char buf[MAX_BUF_LEN];
+	const char *val;
+	int ret;
+
+	kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
+
+	ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
+	if (ret)
+		return ret;
+
+	xbc_node_for_each_array_value(node, "probes", anode, val) {
+		ret = kprobe_event_add_field(&cmd, val);
+		if (ret)
+			return ret;
+	}
+
+	ret = kprobe_event_gen_cmd_end(&cmd);
+	if (ret)
+		pr_err("Failed to add probe: %s\n", buf);
+
+	return ret;
+}
+#else
+static inline int __init
+trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
+{
+	pr_err("Kprobe event is not supported.\n");
+	return -ENOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_HIST_TRIGGERS
+static int __init
+trace_boot_add_synth_event(struct xbc_node *node, const char *event)
+{
+	struct dynevent_cmd cmd;
+	struct xbc_node *anode;
+	char buf[MAX_BUF_LEN];
+	const char *p;
+	int ret;
+
+	synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
+
+	ret = synth_event_gen_cmd_start(&cmd, event, NULL);
+	if (ret)
+		return ret;
+
+	xbc_node_for_each_array_value(node, "fields", anode, p) {
+		ret = synth_event_add_field_str(&cmd, p);
+		if (ret)
+			return ret;
+	}
+
+	ret = synth_event_gen_cmd_end(&cmd);
+	if (ret < 0)
+		pr_err("Failed to add synthetic event: %s\n", buf);
+
+	return ret;
+}
+#else
+static inline int __init
+trace_boot_add_synth_event(struct xbc_node *node, const char *event)
+{
+	pr_err("Synthetic event is not supported.\n");
+	return -ENOTSUPP;
+}
+#endif
+
+static void __init
+trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
+			  struct xbc_node *enode)
+{
+	struct trace_event_file *file;
+	struct xbc_node *anode;
+	char buf[MAX_BUF_LEN];
+	const char *p, *group, *event;
+
+	group = xbc_node_get_data(gnode);
+	event = xbc_node_get_data(enode);
+
+	if (!strcmp(group, "kprobes"))
+		if (trace_boot_add_kprobe_event(enode, event) < 0)
+			return;
+	if (!strcmp(group, "synthetic"))
+		if (trace_boot_add_synth_event(enode, event) < 0)
+			return;
+
+	mutex_lock(&event_mutex);
+	file = find_event_file(tr, group, event);
+	if (!file) {
+		pr_err("Failed to find event: %s:%s\n", group, event);
+		goto out;
+	}
+
+	p = xbc_node_find_value(enode, "filter", NULL);
+	if (p && *p != '\0') {
+		if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+			pr_err("filter string is too long: %s\n", p);
+		else if (apply_event_filter(file, buf) < 0)
+			pr_err("Failed to apply filter: %s\n", buf);
+	}
+
+	xbc_node_for_each_array_value(enode, "actions", anode, p) {
+		if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+			pr_err("action string is too long: %s\n", p);
+		else if (trigger_process_regex(file, buf) < 0)
+			pr_err("Failed to apply an action: %s\n", buf);
+	}
+
+	if (xbc_node_find_value(enode, "enable", NULL)) {
+		if (trace_event_enable_disable(file, 1, 0) < 0)
+			pr_err("Failed to enable event node: %s:%s\n",
+				group, event);
+	}
+out:
+	mutex_unlock(&event_mutex);
+}
+
+static void __init
+trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
+{
+	struct xbc_node *gnode, *enode;
+
+	node = xbc_node_find_child(node, "event");
+	if (!node)
+		return;
+	/* per-event key starts with "event.GROUP.EVENT" */
+	xbc_node_for_each_child(node, gnode)
+		xbc_node_for_each_child(gnode, enode)
+			trace_boot_init_one_event(tr, gnode, enode);
+}
+#else
+#define trace_boot_enable_events(tr, node) do {} while (0)
+#define trace_boot_init_events(tr, node) do {} while (0)
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static void __init
+trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node)
+{
+	struct xbc_node *anode;
+	const char *p;
+	char *q;
+
+	xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) {
+		q = kstrdup(p, GFP_KERNEL);
+		if (!q)
+			return;
+		if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0)
+			pr_err("Failed to add %s to ftrace filter\n", p);
+		else
+			ftrace_filter_param = true;
+		kfree(q);
+	}
+	xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) {
+		q = kstrdup(p, GFP_KERNEL);
+		if (!q)
+			return;
+		if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0)
+			pr_err("Failed to add %s to ftrace filter\n", p);
+		else
+			ftrace_filter_param = true;
+		kfree(q);
+	}
+}
+#else
+#define trace_boot_set_ftrace_filter(tr, node) do {} while (0)
+#endif
+
+static void __init
+trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
+{
+	const char *p;
+
+	trace_boot_set_ftrace_filter(tr, node);
+
+	p = xbc_node_find_value(node, "tracer", NULL);
+	if (p && *p != '\0') {
+		if (tracing_set_tracer(tr, p) < 0)
+			pr_err("Failed to set given tracer: %s\n", p);
+	}
+}
+
+static void __init
+trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node)
+{
+	trace_boot_set_instance_options(tr, node);
+	trace_boot_init_events(tr, node);
+	trace_boot_enable_events(tr, node);
+	trace_boot_enable_tracer(tr, node);
+}
+
+static void __init
+trace_boot_init_instances(struct xbc_node *node)
+{
+	struct xbc_node *inode;
+	struct trace_array *tr;
+	const char *p;
+
+	node = xbc_node_find_child(node, "instance");
+	if (!node)
+		return;
+
+	xbc_node_for_each_child(node, inode) {
+		p = xbc_node_get_data(inode);
+		if (!p || *p == '\0')
+			continue;
+
+		tr = trace_array_get_by_name(p);
+		if (!tr) {
+			pr_err("Failed to get trace instance %s\n", p);
+			continue;
+		}
+		trace_boot_init_one_instance(tr, inode);
+		trace_array_put(tr);
+	}
+}
+
+static int __init trace_boot_init(void)
+{
+	struct xbc_node *trace_node;
+	struct trace_array *tr;
+
+	trace_node = xbc_find_node("ftrace");
+	if (!trace_node)
+		return 0;
+
+	tr = top_trace_array();
+	if (!tr)
+		return 0;
+
+	/* Global trace array is also one instance */
+	trace_boot_init_one_instance(tr, trace_node);
+	trace_boot_init_instances(trace_node);
+
+	return 0;
+}
+
+fs_initcall(trace_boot_init);
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 88e158d..eff0991 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -32,10 +32,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 {
 	struct trace_event_call *call = &event_branch;
 	struct trace_array *tr = branch_tracer;
+	struct trace_buffer *buffer;
 	struct trace_array_cpu *data;
 	struct ring_buffer_event *event;
 	struct trace_branch *entry;
-	struct ring_buffer *buffer;
 	unsigned long flags;
 	int pc;
 	const char *p;
@@ -55,12 +55,12 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 
 	raw_local_irq_save(flags);
 	current->trace_recursion |= TRACE_BRANCH_BIT;
-	data = this_cpu_ptr(tr->trace_buffer.data);
+	data = this_cpu_ptr(tr->array_buffer.data);
 	if (atomic_read(&data->disabled))
 		goto out;
 
 	pc = preempt_count();
-	buffer = tr->trace_buffer.buffer;
+	buffer = tr->array_buffer.buffer;
 	event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
 					  sizeof(*entry), flags, pc);
 	if (!event)
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index 89779eb..9f2e852 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -223,3 +223,215 @@ static __init int init_dynamic_event(void)
 	return 0;
 }
 fs_initcall(init_dynamic_event);
+
+/**
+ * dynevent_arg_add - Add an arg to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @arg: The argument to append to the current cmd
+ * @check_arg: An (optional) pointer to a function checking arg sanity
+ *
+ * Append an argument to a dynevent_cmd.  The argument string will be
+ * appended to the current cmd string, followed by a separator, if
+ * applicable.  Before the argument is added, the @check_arg function,
+ * if present, will be used to check the sanity of the current arg
+ * string.
+ *
+ * The cmd string and separator should be set using the
+ * dynevent_arg_init() before any arguments are added using this
+ * function.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_arg_add(struct dynevent_cmd *cmd,
+		     struct dynevent_arg *arg,
+		     dynevent_check_arg_fn_t check_arg)
+{
+	int ret = 0;
+
+	if (check_arg) {
+		ret = check_arg(arg);
+		if (ret)
+			return ret;
+	}
+
+	ret = seq_buf_printf(&cmd->seq, " %s%c", arg->str, arg->separator);
+	if (ret) {
+		pr_err("String is too long: %s%c\n", arg->str, arg->separator);
+		return -E2BIG;
+	}
+
+	return ret;
+}
+
+/**
+ * dynevent_arg_pair_add - Add an arg pair to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @arg_pair: The argument pair to append to the current cmd
+ * @check_arg: An (optional) pointer to a function checking arg sanity
+ *
+ * Append an argument pair to a dynevent_cmd.  An argument pair
+ * consists of a left-hand-side argument and a right-hand-side
+ * argument separated by an operator, which can be whitespace, all
+ * followed by a separator, if applicable.  This can be used to add
+ * arguments of the form 'type variable_name;' or 'x+y'.
+ *
+ * The lhs argument string will be appended to the current cmd string,
+ * followed by an operator, if applicable, followd by the rhs string,
+ * followed finally by a separator, if applicable.  Before the
+ * argument is added, the @check_arg function, if present, will be
+ * used to check the sanity of the current arg strings.
+ *
+ * The cmd strings, operator, and separator should be set using the
+ * dynevent_arg_pair_init() before any arguments are added using this
+ * function.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_arg_pair_add(struct dynevent_cmd *cmd,
+			  struct dynevent_arg_pair *arg_pair,
+			  dynevent_check_arg_fn_t check_arg)
+{
+	int ret = 0;
+
+	if (check_arg) {
+		ret = check_arg(arg_pair);
+		if (ret)
+			return ret;
+	}
+
+	ret = seq_buf_printf(&cmd->seq, " %s%c%s%c", arg_pair->lhs,
+			     arg_pair->operator, arg_pair->rhs,
+			     arg_pair->separator);
+	if (ret) {
+		pr_err("field string is too long: %s%c%s%c\n", arg_pair->lhs,
+		       arg_pair->operator, arg_pair->rhs,
+		       arg_pair->separator);
+		return -E2BIG;
+	}
+
+	return ret;
+}
+
+/**
+ * dynevent_str_add - Add a string to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @str: The string to append to the current cmd
+ *
+ * Append a string to a dynevent_cmd.  The string will be appended to
+ * the current cmd string as-is, with nothing prepended or appended.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_str_add(struct dynevent_cmd *cmd, const char *str)
+{
+	int ret = 0;
+
+	ret = seq_buf_puts(&cmd->seq, str);
+	if (ret) {
+		pr_err("String is too long: %s\n", str);
+		return -E2BIG;
+	}
+
+	return ret;
+}
+
+/**
+ * dynevent_cmd_init - Initialize a dynevent_cmd object
+ * @cmd: A pointer to the dynevent_cmd struct representing the cmd
+ * @buf: A pointer to the buffer to generate the command into
+ * @maxlen: The length of the buffer the command will be generated into
+ * @type: The type of the cmd, checked against further operations
+ * @run_command: The type-specific function that will actually run the command
+ *
+ * Initialize a dynevent_cmd.  A dynevent_cmd is used to build up and
+ * run dynamic event creation commands, such as commands for creating
+ * synthetic and kprobe events.  Before calling any of the functions
+ * used to build the command, a dynevent_cmd object should be
+ * instantiated and initialized using this function.
+ *
+ * The initialization sets things up by saving a pointer to the
+ * user-supplied buffer and its length via the @buf and @maxlen
+ * params, and by saving the cmd-specific @type and @run_command
+ * params which are used to check subsequent dynevent_cmd operations
+ * and actually run the command when complete.
+ */
+void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen,
+		       enum dynevent_type type,
+		       dynevent_create_fn_t run_command)
+{
+	memset(cmd, '\0', sizeof(*cmd));
+
+	seq_buf_init(&cmd->seq, buf, maxlen);
+	cmd->type = type;
+	cmd->run_command = run_command;
+}
+
+/**
+ * dynevent_arg_init - Initialize a dynevent_arg object
+ * @arg: A pointer to the dynevent_arg struct representing the arg
+ * @separator: An (optional) separator, appended after adding the arg
+ *
+ * Initialize a dynevent_arg object.  A dynevent_arg represents an
+ * object used to append single arguments to the current command
+ * string.  After the arg string is successfully appended to the
+ * command string, the optional @separator is appended.  If no
+ * separator was specified when initializing the arg, a space will be
+ * appended.
+ */
+void dynevent_arg_init(struct dynevent_arg *arg,
+		       char separator)
+{
+	memset(arg, '\0', sizeof(*arg));
+
+	if (!separator)
+		separator = ' ';
+	arg->separator = separator;
+}
+
+/**
+ * dynevent_arg_pair_init - Initialize a dynevent_arg_pair object
+ * @arg_pair: A pointer to the dynevent_arg_pair struct representing the arg
+ * @operator: An (optional) operator, appended after adding the first arg
+ * @separator: An (optional) separator, appended after adding the second arg
+ *
+ * Initialize a dynevent_arg_pair object.  A dynevent_arg_pair
+ * represents an object used to append argument pairs such as 'type
+ * variable_name;' or 'x+y' to the current command string.  An
+ * argument pair consists of a left-hand-side argument and a
+ * right-hand-side argument separated by an operator, which can be
+ * whitespace, all followed by a separator, if applicable.  After the
+ * first arg string is successfully appended to the command string,
+ * the optional @operator is appended, followed by the second arg and
+ * and optional @separator.  If no separator was specified when
+ * initializing the arg, a space will be appended.
+ */
+void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
+			    char operator, char separator)
+{
+	memset(arg_pair, '\0', sizeof(*arg_pair));
+
+	if (!operator)
+		operator = ' ';
+	arg_pair->operator = operator;
+
+	if (!separator)
+		separator = ' ';
+	arg_pair->separator = separator;
+}
+
+/**
+ * dynevent_create - Create the dynamic event contained in dynevent_cmd
+ * @cmd: The dynevent_cmd object containing the dynamic event creation command
+ *
+ * Once a dynevent_cmd object has been successfully built up via the
+ * dynevent_cmd_init(), dynevent_arg_add() and dynevent_arg_pair_add()
+ * functions, this function runs the final command to actually create
+ * the event.
+ *
+ * Return: 0 if the event was successfully created, error otherwise.
+ */
+int dynevent_create(struct dynevent_cmd *cmd)
+{
+	return cmd->run_command(cmd);
+}
+EXPORT_SYMBOL_GPL(dynevent_create);
diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h
index 4689813..d6857a254 100644
--- a/kernel/trace/trace_dynevent.h
+++ b/kernel/trace/trace_dynevent.h
@@ -117,4 +117,36 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type);
 #define for_each_dyn_event_safe(pos, n)	\
 	list_for_each_entry_safe(pos, n, &dyn_event_list, list)
 
+extern void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen,
+			      enum dynevent_type type,
+			      dynevent_create_fn_t run_command);
+
+typedef int (*dynevent_check_arg_fn_t)(void *data);
+
+struct dynevent_arg {
+	const char		*str;
+	char			separator; /* e.g. ';', ',', or nothing */
+};
+
+extern void dynevent_arg_init(struct dynevent_arg *arg,
+			      char separator);
+extern int dynevent_arg_add(struct dynevent_cmd *cmd,
+			    struct dynevent_arg *arg,
+			    dynevent_check_arg_fn_t check_arg);
+
+struct dynevent_arg_pair {
+	const char		*lhs;
+	const char		*rhs;
+	char			operator; /* e.g. '=' or nothing */
+	char			separator; /* e.g. ';', ',', or nothing */
+};
+
+extern void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
+				   char operator, char separator);
+
+extern int dynevent_arg_pair_add(struct dynevent_cmd *cmd,
+				 struct dynevent_arg_pair *arg_pair,
+				 dynevent_check_arg_fn_t check_arg);
+extern int dynevent_str_add(struct dynevent_cmd *cmd, const char *str);
+
 #endif
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 3e9d816..f22746f 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -164,7 +164,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
 
 	F_STRUCT(
 		__field(	int,		size	)
-		__dynamic_array(unsigned long,	caller	)
+		__array(	unsigned long,	caller,	FTRACE_STACK_ENTRIES	)
 	),
 
 	F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c8622a4..f38234e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -238,7 +238,7 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
 	if (!pid_list)
 		return false;
 
-	data = this_cpu_ptr(tr->trace_buffer.data);
+	data = this_cpu_ptr(tr->array_buffer.data);
 
 	return data->ignore_pid;
 }
@@ -273,6 +273,7 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
 	if (!fbuffer->event)
 		return NULL;
 
+	fbuffer->regs = NULL;
 	fbuffer->entry = ring_buffer_event_data(fbuffer->event);
 	return fbuffer->entry;
 }
@@ -547,7 +548,7 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
 
 	pid_list = rcu_dereference_sched(tr->filtered_pids);
 
-	this_cpu_write(tr->trace_buffer.data->ignore_pid,
+	this_cpu_write(tr->array_buffer.data->ignore_pid,
 		       trace_ignore_this_task(pid_list, prev) &&
 		       trace_ignore_this_task(pid_list, next));
 }
@@ -561,7 +562,7 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
 
 	pid_list = rcu_dereference_sched(tr->filtered_pids);
 
-	this_cpu_write(tr->trace_buffer.data->ignore_pid,
+	this_cpu_write(tr->array_buffer.data->ignore_pid,
 		       trace_ignore_this_task(pid_list, next));
 }
 
@@ -572,12 +573,12 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
 	struct trace_pid_list *pid_list;
 
 	/* Nothing to do if we are already tracing */
-	if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
+	if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
 		return;
 
 	pid_list = rcu_dereference_sched(tr->filtered_pids);
 
-	this_cpu_write(tr->trace_buffer.data->ignore_pid,
+	this_cpu_write(tr->array_buffer.data->ignore_pid,
 		       trace_ignore_this_task(pid_list, task));
 }
 
@@ -588,13 +589,13 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
 	struct trace_pid_list *pid_list;
 
 	/* Nothing to do if we are not tracing */
-	if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
+	if (this_cpu_read(tr->array_buffer.data->ignore_pid))
 		return;
 
 	pid_list = rcu_dereference_sched(tr->filtered_pids);
 
 	/* Set tracing if current is enabled */
-	this_cpu_write(tr->trace_buffer.data->ignore_pid,
+	this_cpu_write(tr->array_buffer.data->ignore_pid,
 		       trace_ignore_this_task(pid_list, current));
 }
 
@@ -626,7 +627,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
 	}
 
 	for_each_possible_cpu(cpu)
-		per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
+		per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
 
 	rcu_assign_pointer(tr->filtered_pids, NULL);
 
@@ -698,7 +699,7 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
 		return;
 
 	if (!--dir->nr_events) {
-		tracefs_remove_recursive(dir->entry);
+		tracefs_remove(dir->entry);
 		list_del(&dir->list);
 		__put_system_dir(dir);
 	}
@@ -717,7 +718,7 @@ static void remove_event_file_dir(struct trace_event_file *file)
 		}
 		spin_unlock(&dir->d_lock);
 
-		tracefs_remove_recursive(dir);
+		tracefs_remove(dir);
 	}
 
 	list_del(&file->list);
@@ -1595,7 +1596,7 @@ static void ignore_task_cpu(void *data)
 	pid_list = rcu_dereference_protected(tr->filtered_pids,
 					     mutex_is_locked(&event_mutex));
 
-	this_cpu_write(tr->trace_buffer.data->ignore_pid,
+	this_cpu_write(tr->array_buffer.data->ignore_pid,
 		       trace_ignore_this_task(pid_list, current));
 }
 
@@ -2553,6 +2554,91 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
 	return file;
 }
 
+/**
+ * trace_get_event_file - Find and return a trace event file
+ * @instance: The name of the trace instance containing the event
+ * @system: The name of the system containing the event
+ * @event: The name of the event
+ *
+ * Return a trace event file given the trace instance name, trace
+ * system, and trace event name.  If the instance name is NULL, it
+ * refers to the top-level trace array.
+ *
+ * This function will look it up and return it if found, after calling
+ * trace_array_get() to prevent the instance from going away, and
+ * increment the event's module refcount to prevent it from being
+ * removed.
+ *
+ * To release the file, call trace_put_event_file(), which will call
+ * trace_array_put() and decrement the event's module refcount.
+ *
+ * Return: The trace event on success, ERR_PTR otherwise.
+ */
+struct trace_event_file *trace_get_event_file(const char *instance,
+					      const char *system,
+					      const char *event)
+{
+	struct trace_array *tr = top_trace_array();
+	struct trace_event_file *file = NULL;
+	int ret = -EINVAL;
+
+	if (instance) {
+		tr = trace_array_find_get(instance);
+		if (!tr)
+			return ERR_PTR(-ENOENT);
+	} else {
+		ret = trace_array_get(tr);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	mutex_lock(&event_mutex);
+
+	file = find_event_file(tr, system, event);
+	if (!file) {
+		trace_array_put(tr);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Don't let event modules unload while in use */
+	ret = try_module_get(file->event_call->mod);
+	if (!ret) {
+		trace_array_put(tr);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = 0;
+ out:
+	mutex_unlock(&event_mutex);
+
+	if (ret)
+		file = ERR_PTR(ret);
+
+	return file;
+}
+EXPORT_SYMBOL_GPL(trace_get_event_file);
+
+/**
+ * trace_put_event_file - Release a file from trace_get_event_file()
+ * @file: The trace event file
+ *
+ * If a file was retrieved using trace_get_event_file(), this should
+ * be called when it's no longer needed.  It will cancel the previous
+ * trace_array_get() called by that function, and decrement the
+ * event's module refcount.
+ */
+void trace_put_event_file(struct trace_event_file *file)
+{
+	mutex_lock(&event_mutex);
+	module_put(file->event_call->mod);
+	mutex_unlock(&event_mutex);
+
+	trace_array_put(file->tr);
+}
+EXPORT_SYMBOL_GPL(trace_put_event_file);
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 /* Avoid typos */
@@ -3082,7 +3168,7 @@ int event_trace_del_tracer(struct trace_array *tr)
 
 	down_write(&trace_event_sem);
 	__trace_remove_event_dirs(tr);
-	tracefs_remove_recursive(tr->event_dir);
+	tracefs_remove(tr->event_dir);
 	up_write(&trace_event_sem);
 
 	tr->event_dir = NULL;
@@ -3409,8 +3495,8 @@ static void __init
 function_test_events_call(unsigned long ip, unsigned long parent_ip,
 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
+	struct trace_buffer *buffer;
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
 	struct ftrace_entry *entry;
 	unsigned long flags;
 	long disabled;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index f2896d1..e7ce7cd 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -66,7 +66,12 @@
 	C(INVALID_SUBSYS_EVENT,	"Invalid subsystem or event name"),	\
 	C(INVALID_REF_KEY,	"Using variable references in keys not supported"), \
 	C(VAR_NOT_FOUND,	"Couldn't find variable"),		\
-	C(FIELD_NOT_FOUND,	"Couldn't find field"),
+	C(FIELD_NOT_FOUND,	"Couldn't find field"),			\
+	C(EMPTY_ASSIGNMENT,	"Empty assignment"),			\
+	C(INVALID_SORT_MODIFIER,"Invalid sort modifier"),		\
+	C(EMPTY_SORT_FIELD,	"Empty sort field"),			\
+	C(TOO_MANY_SORT_FIELDS,	"Too many sort fields (Max = 2)"),	\
+	C(INVALID_SORT_FIELD,	"Sort field must be a key or a val"),
 
 #undef C
 #define C(a, b)		HIST_ERR_##a
@@ -375,7 +380,7 @@ struct hist_trigger_data {
 	unsigned int			n_save_var_str;
 };
 
-static int synth_event_create(int argc, const char **argv);
+static int create_synth_event(int argc, const char **argv);
 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
 static int synth_event_release(struct dyn_event *ev);
 static bool synth_event_is_busy(struct dyn_event *ev);
@@ -383,7 +388,7 @@ static bool synth_event_match(const char *system, const char *event,
 			int argc, const char **argv, struct dyn_event *ev);
 
 static struct dyn_event_operations synth_event_ops = {
-	.create = synth_event_create,
+	.create = create_synth_event,
 	.show = synth_event_show,
 	.is_busy = synth_event_is_busy,
 	.free = synth_event_release,
@@ -394,6 +399,7 @@ struct synth_field {
 	char *type;
 	char *name;
 	size_t size;
+	unsigned int offset;
 	bool is_signed;
 	bool is_string;
 };
@@ -408,6 +414,7 @@ struct synth_event {
 	struct trace_event_class		class;
 	struct trace_event_call			call;
 	struct tracepoint			*tp;
+	struct module				*mod;
 };
 
 static bool is_synth_event(struct dyn_event *ev)
@@ -470,11 +477,12 @@ struct action_data {
 	 * When a histogram trigger is hit, the values of any
 	 * references to variables, including variables being passed
 	 * as parameters to synthetic events, are collected into a
-	 * var_ref_vals array.  This var_ref_idx is the index of the
-	 * first param in the array to be passed to the synthetic
-	 * event invocation.
+	 * var_ref_vals array.  This var_ref_idx array is an array of
+	 * indices into the var_ref_vals array, one for each synthetic
+	 * event param, and is passed to the synthetic event
+	 * invocation.
 	 */
-	unsigned int		var_ref_idx;
+	unsigned int		var_ref_idx[TRACING_MAP_VARS_MAX];
 	struct synth_event	*synth_event;
 	bool			use_trace_keyword;
 	char			*synth_event_name;
@@ -608,7 +616,8 @@ static void last_cmd_set(struct trace_event_file *file, char *str)
 	if (!str)
 		return;
 
-	strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
+	strcpy(last_cmd, "hist:");
+	strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:"));
 
 	if (file) {
 		call = file->event_call;
@@ -662,6 +671,8 @@ static int synth_event_define_fields(struct trace_event_call *call)
 		if (ret)
 			break;
 
+		event->fields[i]->offset = n_u64;
+
 		if (event->fields[i]->is_string) {
 			offset += STR_VAR_LEN_MAX;
 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
@@ -834,7 +845,7 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
 		fmt = synth_field_fmt(se->fields[i]->type);
 
 		/* parameter types */
-		if (tr->trace_flags & TRACE_ITER_VERBOSE)
+		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
 			trace_seq_printf(s, "%s ", fmt);
 
 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
@@ -875,14 +886,14 @@ static struct trace_event_functions synth_event_funcs = {
 
 static notrace void trace_event_raw_event_synth(void *__data,
 						u64 *var_ref_vals,
-						unsigned int var_ref_idx)
+						unsigned int *var_ref_idx)
 {
 	struct trace_event_file *trace_file = __data;
 	struct synth_trace_event *entry;
 	struct trace_event_buffer fbuffer;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	struct synth_event *event;
-	unsigned int i, n_u64;
+	unsigned int i, n_u64, val_idx;
 	int fields_size = 0;
 
 	event = trace_file->event_call->data;
@@ -896,7 +907,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
 	 * Avoid ring buffer recursion detection, as this event
 	 * is being performed within another event.
 	 */
-	buffer = trace_file->tr->trace_buffer.buffer;
+	buffer = trace_file->tr->array_buffer.buffer;
 	ring_buffer_nest_start(buffer);
 
 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
@@ -905,15 +916,16 @@ static notrace void trace_event_raw_event_synth(void *__data,
 		goto out;
 
 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+		val_idx = var_ref_idx[i];
 		if (event->fields[i]->is_string) {
-			char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
+			char *str_val = (char *)(long)var_ref_vals[val_idx];
 			char *str_field = (char *)&entry->fields[n_u64];
 
 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
 		} else {
 			struct synth_field *field = event->fields[i];
-			u64 val = var_ref_vals[var_ref_idx + i];
+			u64 val = var_ref_vals[val_idx];
 
 			switch (field->size) {
 			case 1:
@@ -1113,10 +1125,10 @@ static struct tracepoint *alloc_synth_tracepoint(char *name)
 }
 
 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
-				    unsigned int var_ref_idx);
+				    unsigned int *var_ref_idx);
 
 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
-			       unsigned int var_ref_idx)
+			       unsigned int *var_ref_idx)
 {
 	struct tracepoint *tp = event->tp;
 
@@ -1293,6 +1305,273 @@ struct hist_var_data {
 	struct hist_trigger_data *hist_data;
 };
 
+static int synth_event_check_arg_fn(void *data)
+{
+	struct dynevent_arg_pair *arg_pair = data;
+	int size;
+
+	size = synth_field_size((char *)arg_pair->lhs);
+
+	return size ? 0 : -EINVAL;
+}
+
+/**
+ * synth_event_add_field - Add a new field to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @type: The type of the new field to add
+ * @name: The name of the new field to add
+ *
+ * Add a new field to a synthetic event cmd object.  Field ordering is in
+ * the same order the fields are added.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
+			  const char *name)
+{
+	struct dynevent_arg_pair arg_pair;
+	int ret;
+
+	if (cmd->type != DYNEVENT_TYPE_SYNTH)
+		return -EINVAL;
+
+	if (!type || !name)
+		return -EINVAL;
+
+	dynevent_arg_pair_init(&arg_pair, 0, ';');
+
+	arg_pair.lhs = type;
+	arg_pair.rhs = name;
+
+	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
+	if (ret)
+		return ret;
+
+	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
+		ret = -EINVAL;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_field);
+
+/**
+ * synth_event_add_field_str - Add a new field to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @type_name: The type and name of the new field to add, as a single string
+ *
+ * Add a new field to a synthetic event cmd object, as a single
+ * string.  The @type_name string is expected to be of the form 'type
+ * name', which will be appended by ';'.  No sanity checking is done -
+ * what's passed in is assumed to already be well-formed.  Field
+ * ordering is in the same order the fields are added.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
+{
+	struct dynevent_arg arg;
+	int ret;
+
+	if (cmd->type != DYNEVENT_TYPE_SYNTH)
+		return -EINVAL;
+
+	if (!type_name)
+		return -EINVAL;
+
+	dynevent_arg_init(&arg, ';');
+
+	arg.str = type_name;
+
+	ret = dynevent_arg_add(cmd, &arg, NULL);
+	if (ret)
+		return ret;
+
+	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
+		ret = -EINVAL;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_field_str);
+
+/**
+ * synth_event_add_fields - Add multiple fields to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ *
+ * Add a new set of fields to a synthetic event cmd object.  The event
+ * fields that will be defined for the event should be passed in as an
+ * array of struct synth_field_desc, and the number of elements in the
+ * array passed in as n_fields.  Field ordering will retain the
+ * ordering given in the fields array.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_fields(struct dynevent_cmd *cmd,
+			   struct synth_field_desc *fields,
+			   unsigned int n_fields)
+{
+	unsigned int i;
+	int ret = 0;
+
+	for (i = 0; i < n_fields; i++) {
+		if (fields[i].type == NULL || fields[i].name == NULL) {
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_fields);
+
+/**
+ * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the synthetic event
+ * @mod: The module creating the event, NULL if not created from a module
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the synth_event_gen_cmd_start() wrapper, which
+ * automatically adds a NULL to the end of the arg list.  If this
+ * function is used directly, make sure the last arg in the variable
+ * arg list is NULL.
+ *
+ * Generate a synthetic event command to be executed by
+ * synth_event_gen_cmd_end().  This function can be used to generate
+ * the complete command or only the first part of it; in the latter
+ * case, synth_event_add_field(), synth_event_add_field_str(), or
+ * synth_event_add_fields() can be used to add more fields following
+ * this.
+ *
+ * There should be an even number variable args, each pair consisting
+ * of a type followed by a field name.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
+				struct module *mod, ...)
+{
+	struct dynevent_arg arg;
+	va_list args;
+	int ret;
+
+	cmd->event_name = name;
+	cmd->private_data = mod;
+
+	if (cmd->type != DYNEVENT_TYPE_SYNTH)
+		return -EINVAL;
+
+	dynevent_arg_init(&arg, 0);
+	arg.str = name;
+	ret = dynevent_arg_add(cmd, &arg, NULL);
+	if (ret)
+		return ret;
+
+	va_start(args, mod);
+	for (;;) {
+		const char *type, *name;
+
+		type = va_arg(args, const char *);
+		if (!type)
+			break;
+		name = va_arg(args, const char *);
+		if (!name)
+			break;
+
+		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = synth_event_add_field(cmd, type, name);
+		if (ret)
+			break;
+	}
+	va_end(args);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
+
+/**
+ * synth_event_gen_cmd_array_start - Start synthetic event command from an array
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the synthetic event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ *
+ * Generate a synthetic event command to be executed by
+ * synth_event_gen_cmd_end().  This function can be used to generate
+ * the complete command or only the first part of it; in the latter
+ * case, synth_event_add_field(), synth_event_add_field_str(), or
+ * synth_event_add_fields() can be used to add more fields following
+ * this.
+ *
+ * The event fields that will be defined for the event should be
+ * passed in as an array of struct synth_field_desc, and the number of
+ * elements in the array passed in as n_fields.  Field ordering will
+ * retain the ordering given in the fields array.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
+				    struct module *mod,
+				    struct synth_field_desc *fields,
+				    unsigned int n_fields)
+{
+	struct dynevent_arg arg;
+	unsigned int i;
+	int ret = 0;
+
+	cmd->event_name = name;
+	cmd->private_data = mod;
+
+	if (cmd->type != DYNEVENT_TYPE_SYNTH)
+		return -EINVAL;
+
+	if (n_fields > SYNTH_FIELDS_MAX)
+		return -EINVAL;
+
+	dynevent_arg_init(&arg, 0);
+	arg.str = name;
+	ret = dynevent_arg_add(cmd, &arg, NULL);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < n_fields; i++) {
+		if (fields[i].type == NULL || fields[i].name == NULL)
+			return -EINVAL;
+
+		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
+
 static int __create_synth_event(int argc, const char *name, const char **argv)
 {
 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
@@ -1361,29 +1640,123 @@ static int __create_synth_event(int argc, const char *name, const char **argv)
 	goto out;
 }
 
+/**
+ * synth_event_create - Create a new synthetic event
+ * @name: The name of the new sythetic event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ * @mod: The module creating the event, NULL if not created from a module
+ *
+ * Create a new synthetic event with the given name under the
+ * trace/events/synthetic/ directory.  The event fields that will be
+ * defined for the event should be passed in as an array of struct
+ * synth_field_desc, and the number elements in the array passed in as
+ * n_fields. Field ordering will retain the ordering given in the
+ * fields array.
+ *
+ * If the new synthetic event is being created from a module, the mod
+ * param must be non-NULL.  This will ensure that the trace buffer
+ * won't contain unreadable events.
+ *
+ * The new synth event should be deleted using synth_event_delete()
+ * function.  The new synthetic event can be generated from modules or
+ * other kernel code using trace_synth_event() and related functions.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_create(const char *name, struct synth_field_desc *fields,
+		       unsigned int n_fields, struct module *mod)
+{
+	struct dynevent_cmd cmd;
+	char *buf;
+	int ret;
+
+	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
+					      fields, n_fields);
+	if (ret)
+		goto out;
+
+	ret = synth_event_gen_cmd_end(&cmd);
+ out:
+	kfree(buf);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_create);
+
+static int destroy_synth_event(struct synth_event *se)
+{
+	int ret;
+
+	if (se->ref)
+		ret = -EBUSY;
+	else {
+		ret = unregister_synth_event(se);
+		if (!ret) {
+			dyn_event_remove(&se->devent);
+			free_synth_event(se);
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * synth_event_delete - Delete a synthetic event
+ * @event_name: The name of the new sythetic event
+ *
+ * Delete a synthetic event that was created with synth_event_create().
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_delete(const char *event_name)
+{
+	struct synth_event *se = NULL;
+	struct module *mod = NULL;
+	int ret = -ENOENT;
+
+	mutex_lock(&event_mutex);
+	se = find_synth_event(event_name);
+	if (se) {
+		mod = se->mod;
+		ret = destroy_synth_event(se);
+	}
+	mutex_unlock(&event_mutex);
+
+	if (mod) {
+		mutex_lock(&trace_types_lock);
+		/*
+		 * It is safest to reset the ring buffer if the module
+		 * being unloaded registered any events that were
+		 * used. The only worry is if a new module gets
+		 * loaded, and takes on the same id as the events of
+		 * this module. When printing out the buffer, traced
+		 * events left over from this module may be passed to
+		 * the new module events and unexpected results may
+		 * occur.
+		 */
+		tracing_reset_all_online_cpus();
+		mutex_unlock(&trace_types_lock);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_delete);
+
 static int create_or_delete_synth_event(int argc, char **argv)
 {
 	const char *name = argv[0];
-	struct synth_event *event = NULL;
 	int ret;
 
 	/* trace_run_command() ensures argc != 0 */
 	if (name[0] == '!') {
-		mutex_lock(&event_mutex);
-		event = find_synth_event(name + 1);
-		if (event) {
-			if (event->ref)
-				ret = -EBUSY;
-			else {
-				ret = unregister_synth_event(event);
-				if (!ret) {
-					dyn_event_remove(&event->devent);
-					free_synth_event(event);
-				}
-			}
-		} else
-			ret = -ENOENT;
-		mutex_unlock(&event_mutex);
+		ret = synth_event_delete(name + 1);
 		return ret;
 	}
 
@@ -1391,7 +1764,474 @@ static int create_or_delete_synth_event(int argc, char **argv)
 	return ret == -ECANCELED ? -EINVAL : ret;
 }
 
-static int synth_event_create(int argc, const char **argv)
+static int synth_event_run_command(struct dynevent_cmd *cmd)
+{
+	struct synth_event *se;
+	int ret;
+
+	ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event);
+	if (ret)
+		return ret;
+
+	se = find_synth_event(cmd->event_name);
+	if (WARN_ON(!se))
+		return -ENOENT;
+
+	se->mod = cmd->private_data;
+
+	return ret;
+}
+
+/**
+ * synth_event_cmd_init - Initialize a synthetic event command object
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @buf: A pointer to the buffer used to build the command
+ * @maxlen: The length of the buffer passed in @buf
+ *
+ * Initialize a synthetic event command object.  Use this before
+ * calling any of the other dyenvent_cmd functions.
+ */
+void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
+{
+	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
+			  synth_event_run_command);
+}
+EXPORT_SYMBOL_GPL(synth_event_cmd_init);
+
+/**
+ * synth_event_trace - Trace a synthetic event
+ * @file: The trace_event_file representing the synthetic event
+ * @n_vals: The number of values in vals
+ * @args: Variable number of args containing the event values
+ *
+ * Trace a synthetic event using the values passed in the variable
+ * argument list.
+ *
+ * The argument list should be a list 'n_vals' u64 values.  The number
+ * of vals must match the number of field in the synthetic event, and
+ * must be in the same order as the synthetic event fields.
+ *
+ * All vals should be cast to u64, and string vals are just pointers
+ * to strings, cast to u64.  Strings will be copied into space
+ * reserved in the event for the string, using these pointers.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
+{
+	struct trace_event_buffer fbuffer;
+	struct synth_trace_event *entry;
+	struct trace_buffer *buffer;
+	struct synth_event *event;
+	unsigned int i, n_u64;
+	int fields_size = 0;
+	va_list args;
+	int ret = 0;
+
+	/*
+	 * Normal event generation doesn't get called at all unless
+	 * the ENABLED bit is set (which attaches the probe thus
+	 * allowing this code to be called, etc).  Because this is
+	 * called directly by the user, we don't have that but we
+	 * still need to honor not logging when disabled.
+	 */
+	if (!(file->flags & EVENT_FILE_FL_ENABLED))
+		return 0;
+
+	event = file->event_call->data;
+
+	if (n_vals != event->n_fields)
+		return -EINVAL;
+
+	if (trace_trigger_soft_disabled(file))
+		return -EINVAL;
+
+	fields_size = event->n_u64 * sizeof(u64);
+
+	/*
+	 * Avoid ring buffer recursion detection, as this event
+	 * is being performed within another event.
+	 */
+	buffer = file->tr->array_buffer.buffer;
+	ring_buffer_nest_start(buffer);
+
+	entry = trace_event_buffer_reserve(&fbuffer, file,
+					   sizeof(*entry) + fields_size);
+	if (!entry) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	va_start(args, n_vals);
+	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+		u64 val;
+
+		val = va_arg(args, u64);
+
+		if (event->fields[i]->is_string) {
+			char *str_val = (char *)(long)val;
+			char *str_field = (char *)&entry->fields[n_u64];
+
+			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+		} else {
+			entry->fields[n_u64] = val;
+			n_u64++;
+		}
+	}
+	va_end(args);
+
+	trace_event_buffer_commit(&fbuffer);
+out:
+	ring_buffer_nest_end(buffer);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace);
+
+/**
+ * synth_event_trace_array - Trace a synthetic event from an array
+ * @file: The trace_event_file representing the synthetic event
+ * @vals: Array of values
+ * @n_vals: The number of values in vals
+ *
+ * Trace a synthetic event using the values passed in as 'vals'.
+ *
+ * The 'vals' array is just an array of 'n_vals' u64.  The number of
+ * vals must match the number of field in the synthetic event, and
+ * must be in the same order as the synthetic event fields.
+ *
+ * All vals should be cast to u64, and string vals are just pointers
+ * to strings, cast to u64.  Strings will be copied into space
+ * reserved in the event for the string, using these pointers.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
+			    unsigned int n_vals)
+{
+	struct trace_event_buffer fbuffer;
+	struct synth_trace_event *entry;
+	struct trace_buffer *buffer;
+	struct synth_event *event;
+	unsigned int i, n_u64;
+	int fields_size = 0;
+	int ret = 0;
+
+	/*
+	 * Normal event generation doesn't get called at all unless
+	 * the ENABLED bit is set (which attaches the probe thus
+	 * allowing this code to be called, etc).  Because this is
+	 * called directly by the user, we don't have that but we
+	 * still need to honor not logging when disabled.
+	 */
+	if (!(file->flags & EVENT_FILE_FL_ENABLED))
+		return 0;
+
+	event = file->event_call->data;
+
+	if (n_vals != event->n_fields)
+		return -EINVAL;
+
+	if (trace_trigger_soft_disabled(file))
+		return -EINVAL;
+
+	fields_size = event->n_u64 * sizeof(u64);
+
+	/*
+	 * Avoid ring buffer recursion detection, as this event
+	 * is being performed within another event.
+	 */
+	buffer = file->tr->array_buffer.buffer;
+	ring_buffer_nest_start(buffer);
+
+	entry = trace_event_buffer_reserve(&fbuffer, file,
+					   sizeof(*entry) + fields_size);
+	if (!entry) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+		if (event->fields[i]->is_string) {
+			char *str_val = (char *)(long)vals[i];
+			char *str_field = (char *)&entry->fields[n_u64];
+
+			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+		} else {
+			entry->fields[n_u64] = vals[i];
+			n_u64++;
+		}
+	}
+
+	trace_event_buffer_commit(&fbuffer);
+out:
+	ring_buffer_nest_end(buffer);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_array);
+
+/**
+ * synth_event_trace_start - Start piecewise synthetic event trace
+ * @file: The trace_event_file representing the synthetic event
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Start the trace of a synthetic event field-by-field rather than all
+ * at once.
+ *
+ * This function 'opens' an event trace, which means space is reserved
+ * for the event in the trace buffer, after which the event's
+ * individual field values can be set through either
+ * synth_event_add_next_val() or synth_event_add_val().
+ *
+ * A pointer to a trace_state object is passed in, which will keep
+ * track of the current event trace state until the event trace is
+ * closed (and the event finally traced) using
+ * synth_event_trace_end().
+ *
+ * Note that synth_event_trace_end() must be called after all values
+ * have been added for each event trace, regardless of whether adding
+ * all field values succeeded or not.
+ *
+ * Note also that for a given event trace, all fields must be added
+ * using either synth_event_add_next_val() or synth_event_add_val()
+ * but not both together or interleaved.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_start(struct trace_event_file *file,
+			    struct synth_event_trace_state *trace_state)
+{
+	struct synth_trace_event *entry;
+	int fields_size = 0;
+	int ret = 0;
+
+	if (!trace_state) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memset(trace_state, '\0', sizeof(*trace_state));
+
+	/*
+	 * Normal event tracing doesn't get called at all unless the
+	 * ENABLED bit is set (which attaches the probe thus allowing
+	 * this code to be called, etc).  Because this is called
+	 * directly by the user, we don't have that but we still need
+	 * to honor not logging when disabled.  For the the iterated
+	 * trace case, we save the enabed state upon start and just
+	 * ignore the following data calls.
+	 */
+	if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
+		trace_state->enabled = false;
+		goto out;
+	}
+
+	trace_state->enabled = true;
+
+	trace_state->event = file->event_call->data;
+
+	if (trace_trigger_soft_disabled(file)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	fields_size = trace_state->event->n_u64 * sizeof(u64);
+
+	/*
+	 * Avoid ring buffer recursion detection, as this event
+	 * is being performed within another event.
+	 */
+	trace_state->buffer = file->tr->array_buffer.buffer;
+	ring_buffer_nest_start(trace_state->buffer);
+
+	entry = trace_event_buffer_reserve(&trace_state->fbuffer, file,
+					   sizeof(*entry) + fields_size);
+	if (!entry) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	trace_state->entry = entry;
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_start);
+
+static int __synth_event_add_val(const char *field_name, u64 val,
+				 struct synth_event_trace_state *trace_state)
+{
+	struct synth_field *field = NULL;
+	struct synth_trace_event *entry;
+	struct synth_event *event;
+	int i, ret = 0;
+
+	if (!trace_state) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* can't mix add_next_synth_val() with add_synth_val() */
+	if (field_name) {
+		if (trace_state->add_next) {
+			ret = -EINVAL;
+			goto out;
+		}
+		trace_state->add_name = true;
+	} else {
+		if (trace_state->add_name) {
+			ret = -EINVAL;
+			goto out;
+		}
+		trace_state->add_next = true;
+	}
+
+	if (!trace_state->enabled)
+		goto out;
+
+	event = trace_state->event;
+	if (trace_state->add_name) {
+		for (i = 0; i < event->n_fields; i++) {
+			field = event->fields[i];
+			if (strcmp(field->name, field_name) == 0)
+				break;
+		}
+		if (!field) {
+			ret = -EINVAL;
+			goto out;
+		}
+	} else {
+		if (trace_state->cur_field >= event->n_fields) {
+			ret = -EINVAL;
+			goto out;
+		}
+		field = event->fields[trace_state->cur_field++];
+	}
+
+	entry = trace_state->entry;
+	if (field->is_string) {
+		char *str_val = (char *)(long)val;
+		char *str_field;
+
+		if (!str_val) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		str_field = (char *)&entry->fields[field->offset];
+		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+	} else
+		entry->fields[field->offset] = val;
+ out:
+	return ret;
+}
+
+/**
+ * synth_event_add_next_val - Add the next field's value to an open synth trace
+ * @val: The value to set the next field to
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Set the value of the next field in an event that's been opened by
+ * synth_event_trace_start().
+ *
+ * The val param should be the value cast to u64.  If the value points
+ * to a string, the val param should be a char * cast to u64.
+ *
+ * This function assumes all the fields in an event are to be set one
+ * after another - successive calls to this function are made, one for
+ * each field, in the order of the fields in the event, until all
+ * fields have been set.  If you'd rather set each field individually
+ * without regard to ordering, synth_event_add_val() can be used
+ * instead.
+ *
+ * Note however that synth_event_add_next_val() and
+ * synth_event_add_val() can't be intermixed for a given event trace -
+ * one or the other but not both can be used at the same time.
+ *
+ * Note also that synth_event_trace_end() must be called after all
+ * values have been added for each event trace, regardless of whether
+ * adding all field values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_add_next_val(u64 val,
+			     struct synth_event_trace_state *trace_state)
+{
+	return __synth_event_add_val(NULL, val, trace_state);
+}
+EXPORT_SYMBOL_GPL(synth_event_add_next_val);
+
+/**
+ * synth_event_add_val - Add a named field's value to an open synth trace
+ * @field_name: The name of the synthetic event field value to set
+ * @val: The value to set the next field to
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Set the value of the named field in an event that's been opened by
+ * synth_event_trace_start().
+ *
+ * The val param should be the value cast to u64.  If the value points
+ * to a string, the val param should be a char * cast to u64.
+ *
+ * This function looks up the field name, and if found, sets the field
+ * to the specified value.  This lookup makes this function more
+ * expensive than synth_event_add_next_val(), so use that or the
+ * none-piecewise synth_event_trace() instead if efficiency is more
+ * important.
+ *
+ * Note however that synth_event_add_next_val() and
+ * synth_event_add_val() can't be intermixed for a given event trace -
+ * one or the other but not both can be used at the same time.
+ *
+ * Note also that synth_event_trace_end() must be called after all
+ * values have been added for each event trace, regardless of whether
+ * adding all field values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_add_val(const char *field_name, u64 val,
+			struct synth_event_trace_state *trace_state)
+{
+	return __synth_event_add_val(field_name, val, trace_state);
+}
+EXPORT_SYMBOL_GPL(synth_event_add_val);
+
+/**
+ * synth_event_trace_end - End piecewise synthetic event trace
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * End the trace of a synthetic event opened by
+ * synth_event_trace__start().
+ *
+ * This function 'closes' an event trace, which basically means that
+ * it commits the reserved event and cleans up other loose ends.
+ *
+ * A pointer to a trace_state object is passed in, which will keep
+ * track of the current event trace state opened with
+ * synth_event_trace_start().
+ *
+ * Note that this function must be called after all values have been
+ * added for each event trace, regardless of whether adding all field
+ * values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_end(struct synth_event_trace_state *trace_state)
+{
+	if (!trace_state)
+		return -EINVAL;
+
+	trace_event_buffer_commit(&trace_state->fbuffer);
+
+	ring_buffer_nest_end(trace_state->buffer);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_end);
+
+static int create_synth_event(int argc, const char **argv)
 {
 	const char *name = argv[0];
 	int len;
@@ -2041,12 +2881,6 @@ static int parse_map_size(char *str)
 	unsigned long size, map_bits;
 	int ret;
 
-	strsep(&str, "=");
-	if (!str) {
-		ret = -EINVAL;
-		goto out;
-	}
-
 	ret = kstrtoul(str, 0, &size);
 	if (ret)
 		goto out;
@@ -2106,25 +2940,25 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs)
 static int parse_assignment(struct trace_array *tr,
 			    char *str, struct hist_trigger_attrs *attrs)
 {
-	int ret = 0;
+	int len, ret = 0;
 
-	if ((str_has_prefix(str, "key=")) ||
-	    (str_has_prefix(str, "keys="))) {
-		attrs->keys_str = kstrdup(str, GFP_KERNEL);
+	if ((len = str_has_prefix(str, "key=")) ||
+	    (len = str_has_prefix(str, "keys="))) {
+		attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
 		if (!attrs->keys_str) {
 			ret = -ENOMEM;
 			goto out;
 		}
-	} else if ((str_has_prefix(str, "val=")) ||
-		   (str_has_prefix(str, "vals=")) ||
-		   (str_has_prefix(str, "values="))) {
-		attrs->vals_str = kstrdup(str, GFP_KERNEL);
+	} else if ((len = str_has_prefix(str, "val=")) ||
+		   (len = str_has_prefix(str, "vals=")) ||
+		   (len = str_has_prefix(str, "values="))) {
+		attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
 		if (!attrs->vals_str) {
 			ret = -ENOMEM;
 			goto out;
 		}
-	} else if (str_has_prefix(str, "sort=")) {
-		attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
+	} else if ((len = str_has_prefix(str, "sort="))) {
+		attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
 		if (!attrs->sort_key_str) {
 			ret = -ENOMEM;
 			goto out;
@@ -2135,12 +2969,8 @@ static int parse_assignment(struct trace_array *tr,
 			ret = -ENOMEM;
 			goto out;
 		}
-	} else if (str_has_prefix(str, "clock=")) {
-		strsep(&str, "=");
-		if (!str) {
-			ret = -EINVAL;
-			goto out;
-		}
+	} else if ((len = str_has_prefix(str, "clock="))) {
+		str += len;
 
 		str = strstrip(str);
 		attrs->clock = kstrdup(str, GFP_KERNEL);
@@ -2148,8 +2978,8 @@ static int parse_assignment(struct trace_array *tr,
 			ret = -ENOMEM;
 			goto out;
 		}
-	} else if (str_has_prefix(str, "size=")) {
-		int map_bits = parse_map_size(str);
+	} else if ((len = str_has_prefix(str, "size="))) {
+		int map_bits = parse_map_size(str + len);
 
 		if (map_bits < 0) {
 			ret = map_bits;
@@ -2189,8 +3019,15 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
 
 	while (trigger_str) {
 		char *str = strsep(&trigger_str, ":");
+		char *rhs;
 
-		if (strchr(str, '=')) {
+		rhs = strchr(str, '=');
+		if (rhs) {
+			if (!strlen(++rhs)) {
+				ret = -EINVAL;
+				hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
+				goto free;
+			}
 			ret = parse_assignment(tr, str, attrs);
 			if (ret)
 				goto free;
@@ -2661,6 +3498,22 @@ static int init_var_ref(struct hist_field *ref_field,
 	goto out;
 }
 
+static int find_var_ref_idx(struct hist_trigger_data *hist_data,
+			    struct hist_field *var_field)
+{
+	struct hist_field *ref_field;
+	int i;
+
+	for (i = 0; i < hist_data->n_var_refs; i++) {
+		ref_field = hist_data->var_refs[i];
+		if (ref_field->var.idx == var_field->var.idx &&
+		    ref_field->var.hist_data == var_field->hist_data)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
 /**
  * create_var_ref - Create a variable reference and attach it to trigger
  * @hist_data: The trigger that will be referencing the variable
@@ -4146,8 +4999,11 @@ static int check_synth_field(struct synth_event *event,
 
 	field = event->fields[field_pos];
 
-	if (strcmp(field->type, hist_field->type) != 0)
-		return -EINVAL;
+	if (strcmp(field->type, hist_field->type) != 0) {
+		if (field->size != hist_field->size ||
+		    field->is_signed != hist_field->is_signed)
+			return -EINVAL;
+	}
 
 	return 0;
 }
@@ -4234,11 +5090,11 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
 	struct trace_array *tr = hist_data->event_file->tr;
 	char *event_name, *param, *system = NULL;
 	struct hist_field *hist_field, *var_ref;
-	unsigned int i, var_ref_idx;
+	unsigned int i;
 	unsigned int field_pos = 0;
 	struct synth_event *event;
 	char *synth_event_name;
-	int ret = 0;
+	int var_ref_idx, ret = 0;
 
 	lockdep_assert_held(&event_mutex);
 
@@ -4255,8 +5111,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
 
 	event->ref++;
 
-	var_ref_idx = hist_data->n_var_refs;
-
 	for (i = 0; i < data->n_params; i++) {
 		char *p;
 
@@ -4305,6 +5159,14 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
 				goto err;
 			}
 
+			var_ref_idx = find_var_ref_idx(hist_data, var_ref);
+			if (WARN_ON(var_ref_idx < 0)) {
+				ret = var_ref_idx;
+				goto err;
+			}
+
+			data->var_ref_idx[i] = var_ref_idx;
+
 			field_pos++;
 			kfree(p);
 			continue;
@@ -4323,7 +5185,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
 	}
 
 	data->synth_event = event;
-	data->var_ref_idx = var_ref_idx;
  out:
 	return ret;
  err:
@@ -4542,10 +5403,6 @@ static int create_val_fields(struct hist_trigger_data *hist_data,
 	if (!fields_str)
 		goto out;
 
-	strsep(&fields_str, "=");
-	if (!fields_str)
-		goto out;
-
 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
 		     j < TRACING_MAP_VALS_MAX; i++) {
 		field_str = strsep(&fields_str, ",");
@@ -4640,10 +5497,6 @@ static int create_key_fields(struct hist_trigger_data *hist_data,
 	if (!fields_str)
 		goto out;
 
-	strsep(&fields_str, "=");
-	if (!fields_str)
-		goto out;
-
 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
 		field_str = strsep(&fields_str, ",");
 		if (!field_str)
@@ -4775,7 +5628,7 @@ static int create_hist_fields(struct hist_trigger_data *hist_data,
 	return ret;
 }
 
-static int is_descending(const char *str)
+static int is_descending(struct trace_array *tr, const char *str)
 {
 	if (!str)
 		return 0;
@@ -4786,11 +5639,14 @@ static int is_descending(const char *str)
 	if (strcmp(str, "ascending") == 0)
 		return 0;
 
+	hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
+
 	return -EINVAL;
 }
 
 static int create_sort_keys(struct hist_trigger_data *hist_data)
 {
+	struct trace_array *tr = hist_data->event_file->tr;
 	char *fields_str = hist_data->attrs->sort_key_str;
 	struct tracing_map_sort_key *sort_key;
 	int descending, ret = 0;
@@ -4801,12 +5657,6 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
 	if (!fields_str)
 		goto out;
 
-	strsep(&fields_str, "=");
-	if (!fields_str) {
-		ret = -EINVAL;
-		goto out;
-	}
-
 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
 		struct hist_field *hist_field;
 		char *field_str, *field_name;
@@ -4815,25 +5665,30 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
 		sort_key = &hist_data->sort_keys[i];
 
 		field_str = strsep(&fields_str, ",");
-		if (!field_str) {
-			if (i == 0)
-				ret = -EINVAL;
+		if (!field_str)
+			break;
+
+		if (!*field_str) {
+			ret = -EINVAL;
+			hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
 			break;
 		}
 
 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
+			hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
 			ret = -EINVAL;
 			break;
 		}
 
 		field_name = strsep(&field_str, ".");
-		if (!field_name) {
+		if (!field_name || !*field_name) {
 			ret = -EINVAL;
+			hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
 			break;
 		}
 
 		if (strcmp(field_name, "hitcount") == 0) {
-			descending = is_descending(field_str);
+			descending = is_descending(tr, field_str);
 			if (descending < 0) {
 				ret = descending;
 				break;
@@ -4855,7 +5710,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
 
 			if (strcmp(field_name, test_name) == 0) {
 				sort_key->field_idx = idx;
-				descending = is_descending(field_str);
+				descending = is_descending(tr, field_str);
 				if (descending < 0) {
 					ret = descending;
 					goto out;
@@ -4866,6 +5721,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
 		}
 		if (j == hist_data->n_fields) {
 			ret = -EINVAL;
+			hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
 			break;
 		}
 	}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 40106ff..dd34a1b 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -116,9 +116,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
 {
 	struct trace_event_file *event_file = event_file_data(m->private);
 
-	if (t == SHOW_AVAILABLE_TRIGGERS)
+	if (t == SHOW_AVAILABLE_TRIGGERS) {
+		(*pos)++;
 		return NULL;
-
+	}
 	return seq_list_next(t, &event_file->triggers, pos);
 }
 
@@ -213,7 +214,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
 	return ret;
 }
 
-static int trigger_process_regex(struct trace_event_file *file, char *buff)
+int trigger_process_regex(struct trace_event_file *file, char *buff)
 {
 	char *command, *next = buff;
 	struct event_command *p;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index b611cd3..8a4c8d5 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -101,7 +101,7 @@ static int function_trace_init(struct trace_array *tr)
 
 	ftrace_init_array_ops(tr, func);
 
-	tr->trace_buffer.cpu = get_cpu();
+	tr->array_buffer.cpu = get_cpu();
 	put_cpu();
 
 	tracing_start_cmdline_record();
@@ -118,7 +118,7 @@ static void function_trace_reset(struct trace_array *tr)
 
 static void function_trace_start(struct trace_array *tr)
 {
-	tracing_reset_online_cpus(&tr->trace_buffer);
+	tracing_reset_online_cpus(&tr->array_buffer);
 }
 
 static void
@@ -143,7 +143,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
 		goto out;
 
 	cpu = smp_processor_id();
-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	if (!atomic_read(&data->disabled)) {
 		local_save_flags(flags);
 		trace_function(tr, ip, parent_ip, flags, pc);
@@ -192,7 +192,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 	 */
 	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	disabled = atomic_inc_return(&data->disabled);
 
 	if (likely(disabled == 1)) {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 78af971..7d71546 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr,
 {
 	struct trace_event_call *call = &event_funcgraph_entry;
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 	struct ftrace_graph_ent_entry *entry;
 
 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -171,7 +171,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
 
 	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	disabled = atomic_inc_return(&data->disabled);
 	if (likely(disabled == 1)) {
 		pc = preempt_count();
@@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr,
 {
 	struct trace_event_call *call = &event_funcgraph_exit;
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 	struct ftrace_graph_ret_entry *entry;
 
 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -252,7 +252,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
 
 	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	disabled = atomic_inc_return(&data->disabled);
 	if (likely(disabled == 1)) {
 		pc = preempt_count();
@@ -444,9 +444,9 @@ get_return_for_leaf(struct trace_iterator *iter,
 			 * We need to consume the current entry to see
 			 * the next one.
 			 */
-			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
+			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
 					    NULL, NULL);
-			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
+			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
 						 NULL, NULL);
 		}
 
@@ -503,7 +503,7 @@ print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
 {
 	unsigned long long usecs;
 
-	usecs = iter->ts - iter->trace_buffer->time_start;
+	usecs = iter->ts - iter->array_buffer->time_start;
 	do_div(usecs, NSEC_PER_USEC);
 
 	trace_seq_printf(s, "%9llu us |  ", usecs);
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 6638d63..a48808c 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
 {
 	struct trace_array *tr = hwlat_trace;
 	struct trace_event_call *call = &event_hwlat;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 	struct ring_buffer_event *event;
 	struct hwlat_entry *entry;
 	unsigned long flags;
@@ -556,7 +556,7 @@ static int init_tracefs(void)
 	return 0;
 
  err:
-	tracefs_remove_recursive(top_dir);
+	tracefs_remove(top_dir);
 	return -ENOMEM;
 }
 
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a745b0c..10bbb0f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -122,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr,
 	if (!irqs_disabled_flags(*flags) && !preempt_count())
 		return 0;
 
-	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+	*data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	disabled = atomic_inc_return(&(*data)->disabled);
 
 	if (likely(disabled == 1))
@@ -167,7 +167,7 @@ static int irqsoff_display_graph(struct trace_array *tr, int set)
 		per_cpu(tracing_cpu, cpu) = 0;
 
 	tr->max_latency = 0;
-	tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
+	tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
 
 	return start_irqsoff_tracer(irqsoff_trace, set);
 }
@@ -382,7 +382,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
 	if (per_cpu(tracing_cpu, cpu))
 		return;
 
-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 
 	if (unlikely(!data) || atomic_read(&data->disabled))
 		return;
@@ -420,7 +420,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
 	if (!tracer_enabled || !tracing_is_enabled())
 		return;
 
-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 
 	if (unlikely(!data) ||
 	    !data->critical_start || atomic_read(&data->disabled))
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index cca6504..9da7610 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
 	if (cpu_file == RING_BUFFER_ALL_CPUS) {
 		for_each_tracing_cpu(cpu) {
 			iter.buffer_iter[cpu] =
-			ring_buffer_read_prepare(iter.trace_buffer->buffer,
+			ring_buffer_read_prepare(iter.array_buffer->buffer,
 						 cpu, GFP_ATOMIC);
 			ring_buffer_read_start(iter.buffer_iter[cpu]);
 			tracing_iter_reset(&iter, cpu);
@@ -51,7 +51,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
 	} else {
 		iter.cpu_file = cpu_file;
 		iter.buffer_iter[cpu_file] =
-			ring_buffer_read_prepare(iter.trace_buffer->buffer,
+			ring_buffer_read_prepare(iter.array_buffer->buffer,
 						 cpu_file, GFP_ATOMIC);
 		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
 		tracing_iter_reset(&iter, cpu_file);
@@ -124,7 +124,7 @@ static int kdb_ftdump(int argc, const char **argv)
 	iter.buffer_iter = buffer_iter;
 
 	for_each_tracing_cpu(cpu) {
-		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
 	}
 
 	/* A negative skip_entries means skip all but the last entries */
@@ -139,7 +139,7 @@ static int kdb_ftdump(int argc, const char **argv)
 	ftrace_dump_buf(skip_entries, cpu_file);
 
 	for_each_tracing_cpu(cpu) {
-		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
 	}
 
 	kdb_trap_printk--;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index aa515d5..d8264eb 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -22,7 +22,6 @@
 
 #define KPROBE_EVENT_SYSTEM "kprobes"
 #define KRETPROBE_MAXACTIVE_MAX 4096
-#define MAX_KPROBE_CMDLINE_SIZE 1024
 
 /* Kprobe early definition from command line */
 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
@@ -902,6 +901,167 @@ static int create_or_delete_trace_kprobe(int argc, char **argv)
 	return ret == -ECANCELED ? -EINVAL : ret;
 }
 
+static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
+{
+	return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe);
+}
+
+/**
+ * kprobe_event_cmd_init - Initialize a kprobe event command object
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @buf: A pointer to the buffer used to build the command
+ * @maxlen: The length of the buffer passed in @buf
+ *
+ * Initialize a synthetic event command object.  Use this before
+ * calling any of the other kprobe_event functions.
+ */
+void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
+{
+	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
+			  trace_kprobe_run_command);
+}
+EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+
+/**
+ * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the kprobe event
+ * @loc: The location of the kprobe event
+ * @kretprobe: Is this a return probe?
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
+ * adds a NULL to the end of the arg list.  If this function is used
+ * directly, make sure the last arg in the variable arg list is NULL.
+ *
+ * Generate a kprobe event command to be executed by
+ * kprobe_event_gen_cmd_end().  This function can be used to generate the
+ * complete command or only the first part of it; in the latter case,
+ * kprobe_event_add_fields() can be used to add more fields following this.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
+				 const char *name, const char *loc, ...)
+{
+	char buf[MAX_EVENT_NAME_LEN];
+	struct dynevent_arg arg;
+	va_list args;
+	int ret;
+
+	if (cmd->type != DYNEVENT_TYPE_KPROBE)
+		return -EINVAL;
+
+	if (kretprobe)
+		snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
+	else
+		snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
+
+	ret = dynevent_str_add(cmd, buf);
+	if (ret)
+		return ret;
+
+	dynevent_arg_init(&arg, 0);
+	arg.str = loc;
+	ret = dynevent_arg_add(cmd, &arg, NULL);
+	if (ret)
+		return ret;
+
+	va_start(args, loc);
+	for (;;) {
+		const char *field;
+
+		field = va_arg(args, const char *);
+		if (!field)
+			break;
+
+		if (++cmd->n_fields > MAX_TRACE_ARGS) {
+			ret = -EINVAL;
+			break;
+		}
+
+		arg.str = field;
+		ret = dynevent_arg_add(cmd, &arg, NULL);
+		if (ret)
+			break;
+	}
+	va_end(args);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
+
+/**
+ * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the kprobe_event_add_fields() wrapper, which
+ * automatically adds a NULL to the end of the arg list.  If this
+ * function is used directly, make sure the last arg in the variable
+ * arg list is NULL.
+ *
+ * Add probe fields to an existing kprobe command using a variable
+ * list of args.  Fields are added in the same order they're listed.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
+{
+	struct dynevent_arg arg;
+	va_list args;
+	int ret;
+
+	if (cmd->type != DYNEVENT_TYPE_KPROBE)
+		return -EINVAL;
+
+	dynevent_arg_init(&arg, 0);
+
+	va_start(args, cmd);
+	for (;;) {
+		const char *field;
+
+		field = va_arg(args, const char *);
+		if (!field)
+			break;
+
+		if (++cmd->n_fields > MAX_TRACE_ARGS) {
+			ret = -EINVAL;
+			break;
+		}
+
+		arg.str = field;
+		ret = dynevent_arg_add(cmd, &arg, NULL);
+		if (ret)
+			break;
+	}
+	va_end(args);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
+
+/**
+ * kprobe_event_delete - Delete a kprobe event
+ * @name: The name of the kprobe event to delete
+ *
+ * Delete a kprobe event with the give @name from kernel code rather
+ * than directly from the command line.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int kprobe_event_delete(const char *name)
+{
+	char buf[MAX_EVENT_NAME_LEN];
+
+	snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
+
+	return trace_run_command(buf, create_or_delete_trace_kprobe);
+}
+EXPORT_SYMBOL_GPL(kprobe_event_delete);
+
 static int trace_kprobe_release(struct dyn_event *ev)
 {
 	struct trace_kprobe *tk = to_trace_kprobe(ev);
@@ -1175,35 +1335,35 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
 		    struct trace_event_file *trace_file)
 {
 	struct kprobe_trace_entry_head *entry;
-	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
-	int size, dsize, pc;
-	unsigned long irq_flags;
 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+	struct trace_event_buffer fbuffer;
+	int dsize;
 
 	WARN_ON(call != trace_file->event_call);
 
 	if (trace_trigger_soft_disabled(trace_file))
 		return;
 
-	local_save_flags(irq_flags);
-	pc = preempt_count();
+	local_save_flags(fbuffer.flags);
+	fbuffer.pc = preempt_count();
+	fbuffer.trace_file = trace_file;
 
 	dsize = __get_data_size(&tk->tp, regs);
-	size = sizeof(*entry) + tk->tp.size + dsize;
 
-	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
-						call->event.type,
-						size, irq_flags, pc);
-	if (!event)
+	fbuffer.event =
+		trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
+					call->event.type,
+					sizeof(*entry) + tk->tp.size + dsize,
+					fbuffer.flags, fbuffer.pc);
+	if (!fbuffer.event)
 		return;
 
-	entry = ring_buffer_event_data(event);
+	fbuffer.regs = regs;
+	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
 	entry->ip = (unsigned long)tk->rp.kp.addr;
 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
 
-	event_trigger_unlock_commit_regs(trace_file, buffer, event,
-					 entry, irq_flags, pc, regs);
+	trace_event_buffer_commit(&fbuffer);
 }
 
 static void
@@ -1223,36 +1383,35 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
 		       struct trace_event_file *trace_file)
 {
 	struct kretprobe_trace_entry_head *entry;
-	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
-	int size, pc, dsize;
-	unsigned long irq_flags;
+	struct trace_event_buffer fbuffer;
 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+	int dsize;
 
 	WARN_ON(call != trace_file->event_call);
 
 	if (trace_trigger_soft_disabled(trace_file))
 		return;
 
-	local_save_flags(irq_flags);
-	pc = preempt_count();
+	local_save_flags(fbuffer.flags);
+	fbuffer.pc = preempt_count();
+	fbuffer.trace_file = trace_file;
 
 	dsize = __get_data_size(&tk->tp, regs);
-	size = sizeof(*entry) + tk->tp.size + dsize;
-
-	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
-						call->event.type,
-						size, irq_flags, pc);
-	if (!event)
+	fbuffer.event =
+		trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
+					call->event.type,
+					sizeof(*entry) + tk->tp.size + dsize,
+					fbuffer.flags, fbuffer.pc);
+	if (!fbuffer.event)
 		return;
 
-	entry = ring_buffer_event_data(event);
+	fbuffer.regs = regs;
+	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
 	entry->func = (unsigned long)tk->rp.kp.addr;
 	entry->ret_ip = (unsigned long)ri->ret_addr;
 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
 
-	event_trigger_unlock_commit_regs(trace_file, buffer, event,
-					 entry, irq_flags, pc, regs);
+	trace_event_buffer_commit(&fbuffer);
 }
 
 static void
@@ -1698,11 +1857,12 @@ static __init void setup_boot_kprobe_events(void)
 	enable_boot_kprobe_events();
 }
 
-/* Make a tracefs interface for controlling probe points */
-static __init int init_kprobe_trace(void)
+/*
+ * Register dynevent at subsys_initcall. This allows kernel to setup kprobe
+ * events in fs_initcall without tracefs.
+ */
+static __init int init_kprobe_trace_early(void)
 {
-	struct dentry *d_tracer;
-	struct dentry *entry;
 	int ret;
 
 	ret = dyn_event_register(&trace_kprobe_ops);
@@ -1712,6 +1872,16 @@ static __init int init_kprobe_trace(void)
 	if (register_module_notifier(&trace_kprobe_module_nb))
 		return -EINVAL;
 
+	return 0;
+}
+subsys_initcall(init_kprobe_trace_early);
+
+/* Make a tracefs interface for controlling probe points */
+static __init int init_kprobe_trace(void)
+{
+	struct dentry *d_tracer;
+	struct dentry *entry;
+
 	d_tracer = tracing_init_dentry();
 	if (IS_ERR(d_tracer))
 		return 0;
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index b038801..84582bf 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -32,7 +32,7 @@ static void mmio_reset_data(struct trace_array *tr)
 	overrun_detected = false;
 	prev_overruns = 0;
 
-	tracing_reset_online_cpus(&tr->trace_buffer);
+	tracing_reset_online_cpus(&tr->array_buffer);
 }
 
 static int mmio_trace_init(struct trace_array *tr)
@@ -122,7 +122,7 @@ static void mmio_close(struct trace_iterator *iter)
 static unsigned long count_overruns(struct trace_iterator *iter)
 {
 	unsigned long cnt = atomic_xchg(&dropped_count, 0);
-	unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
+	unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer);
 
 	if (over > prev_overruns)
 		cnt += over - prev_overruns;
@@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
 				struct mmiotrace_rw *rw)
 {
 	struct trace_event_call *call = &event_mmiotrace_rw;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 	struct ring_buffer_event *event;
 	struct trace_mmiotrace_rw *entry;
 	int pc = preempt_count();
@@ -318,7 +318,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
 void mmio_trace_rw(struct mmiotrace_rw *rw)
 {
 	struct trace_array *tr = mmio_trace_array;
-	struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
+	struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
 	__trace_mmiotrace_rw(tr, data, rw);
 }
 
@@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
 				struct mmiotrace_map *map)
 {
 	struct trace_event_call *call = &event_mmiotrace_map;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 	struct ring_buffer_event *event;
 	struct trace_mmiotrace_map *entry;
 	int pc = preempt_count();
@@ -351,7 +351,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
 	struct trace_array_cpu *data;
 
 	preempt_disable();
-	data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
+	data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
 	__trace_mmiotrace_map(tr, data, map);
 	preempt_enable();
 }
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d9b4b7c..b490908 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -538,7 +538,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
 	struct trace_array *tr = iter->tr;
 	unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
 	unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
-	unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
+	unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
 	unsigned long long rel_ts = next_ts - iter->ts;
 	struct trace_seq *s = &iter->seq;
 
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index e288168..e304196 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -89,8 +89,10 @@ static void tracing_sched_unregister(void)
 
 static void tracing_start_sched_switch(int ops)
 {
-	bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
+	bool sched_register;
+
 	mutex_lock(&sched_register_mutex);
+	sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
 
 	switch (ops) {
 	case RECORD_CMDLINE:
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 617e297..97b10bb 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -82,7 +82,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
 	if (cpu != wakeup_current_cpu)
 		goto out_enable;
 
-	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+	*data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	disabled = atomic_inc_return(&(*data)->disabled);
 	if (unlikely(disabled != 1))
 		goto out;
@@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
 			   unsigned long flags, int pc)
 {
 	struct trace_event_call *call = &event_context_switch;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 	struct ring_buffer_event *event;
 	struct ctx_switch_entry *entry;
 
@@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
 	struct trace_event_call *call = &event_wakeup;
 	struct ring_buffer_event *event;
 	struct ctx_switch_entry *entry;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
+	struct trace_buffer *buffer = tr->array_buffer.buffer;
 
 	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
 					  sizeof(*entry), flags, pc);
@@ -459,7 +459,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
 
 	/* disable local data, not wakeup_cpu data */
 	cpu = raw_smp_processor_id();
-	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
 	if (likely(disabled != 1))
 		goto out;
 
@@ -471,7 +471,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
 		goto out_unlock;
 
 	/* The task we are waiting for is waking up */
-	data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
+	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
 
 	__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
 	tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
@@ -494,7 +494,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
 	arch_spin_unlock(&wakeup_lock);
 	local_irq_restore(flags);
 out:
-	atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
 }
 
 static void __wakeup_reset(struct trace_array *tr)
@@ -513,7 +513,7 @@ static void wakeup_reset(struct trace_array *tr)
 {
 	unsigned long flags;
 
-	tracing_reset_online_cpus(&tr->trace_buffer);
+	tracing_reset_online_cpus(&tr->array_buffer);
 
 	local_irq_save(flags);
 	arch_spin_lock(&wakeup_lock);
@@ -551,7 +551,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
 		return;
 
 	pc = preempt_count();
-	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
 	if (unlikely(disabled != 1))
 		goto out;
 
@@ -583,7 +583,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
 
 	local_save_flags(flags);
 
-	data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
+	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
 	data->preempt_timestamp = ftrace_now(cpu);
 	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
 	__trace_stack(wakeup_trace, flags, 0, pc);
@@ -598,7 +598,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
 out_locked:
 	arch_spin_unlock(&wakeup_lock);
 out:
-	atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
 }
 
 static void start_wakeup_tracer(struct trace_array *tr)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 69ee8ef..b5e3496 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -23,7 +23,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
 	return 0;
 }
 
-static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
+static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
 {
 	struct ring_buffer_event *event;
 	struct trace_entry *entry;
@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  * Test the trace buffer to see if all the elements
  * are still sane.
  */
-static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
+static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
 {
 	unsigned long flags, cnt = 0;
 	int cpu, ret = 0;
@@ -362,7 +362,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 	msleep(100);
 
 	/* we should have nothing in the buffer */
-	ret = trace_test_buffer(&tr->trace_buffer, &count);
+	ret = trace_test_buffer(&tr->array_buffer, &count);
 	if (ret)
 		goto out;
 
@@ -383,7 +383,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 	ftrace_enabled = 0;
 
 	/* check the trace buffer */
-	ret = trace_test_buffer(&tr->trace_buffer, &count);
+	ret = trace_test_buffer(&tr->array_buffer, &count);
 
 	ftrace_enabled = 1;
 	tracing_start();
@@ -682,7 +682,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 	ftrace_enabled = 0;
 
 	/* check the trace buffer */
-	ret = trace_test_buffer(&tr->trace_buffer, &count);
+	ret = trace_test_buffer(&tr->array_buffer, &count);
 
 	ftrace_enabled = 1;
 	trace->reset(tr);
@@ -768,7 +768,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
 	 * Simulate the init() callback but we attach a watchdog callback
 	 * to detect and recover from possible hangs
 	 */
-	tracing_reset_online_cpus(&tr->trace_buffer);
+	tracing_reset_online_cpus(&tr->array_buffer);
 	set_graph_array(tr);
 	ret = register_ftrace_graph(&fgraph_ops);
 	if (ret) {
@@ -790,7 +790,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
 	tracing_stop();
 
 	/* check the trace buffer */
-	ret = trace_test_buffer(&tr->trace_buffer, &count);
+	ret = trace_test_buffer(&tr->array_buffer, &count);
 
 	/* Need to also simulate the tr->reset to remove this fgraph_ops */
 	tracing_stop_cmdline_record();
@@ -848,7 +848,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 	/* stop the tracing. */
 	tracing_stop();
 	/* check both trace buffers */
-	ret = trace_test_buffer(&tr->trace_buffer, NULL);
+	ret = trace_test_buffer(&tr->array_buffer, NULL);
 	if (!ret)
 		ret = trace_test_buffer(&tr->max_buffer, &count);
 	trace->reset(tr);
@@ -910,7 +910,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 	/* stop the tracing. */
 	tracing_stop();
 	/* check both trace buffers */
-	ret = trace_test_buffer(&tr->trace_buffer, NULL);
+	ret = trace_test_buffer(&tr->array_buffer, NULL);
 	if (!ret)
 		ret = trace_test_buffer(&tr->max_buffer, &count);
 	trace->reset(tr);
@@ -976,7 +976,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
 	/* stop the tracing. */
 	tracing_stop();
 	/* check both trace buffers */
-	ret = trace_test_buffer(&tr->trace_buffer, NULL);
+	ret = trace_test_buffer(&tr->array_buffer, NULL);
 	if (ret)
 		goto out;
 
@@ -1006,7 +1006,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
 	/* stop the tracing. */
 	tracing_stop();
 	/* check both trace buffers */
-	ret = trace_test_buffer(&tr->trace_buffer, NULL);
+	ret = trace_test_buffer(&tr->array_buffer, NULL);
 	if (ret)
 		goto out;
 
@@ -1136,7 +1136,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
 	/* stop the tracing. */
 	tracing_stop();
 	/* check both trace buffers */
-	ret = trace_test_buffer(&tr->trace_buffer, NULL);
+	ret = trace_test_buffer(&tr->array_buffer, NULL);
 	if (!ret)
 		ret = trace_test_buffer(&tr->max_buffer, &count);
 
@@ -1177,7 +1177,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
 	/* stop the tracing. */
 	tracing_stop();
 	/* check the trace buffer */
-	ret = trace_test_buffer(&tr->trace_buffer, &count);
+	ret = trace_test_buffer(&tr->array_buffer, &count);
 	trace->reset(tr);
 	tracing_start();
 
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index 87de6ed..1d84fcc 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -30,9 +30,6 @@
 /* How much buffer is left on the trace_seq? */
 #define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq)
 
-/* How much buffer is written? */
-#define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq)
-
 /*
  * trace_seq should work with being initialized with 0s.
  */
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 874f127..d1fa197 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -280,18 +280,22 @@ static int tracing_stat_init(void)
 
 	d_tracing = tracing_init_dentry();
 	if (IS_ERR(d_tracing))
-		return 0;
+		return -ENODEV;
 
 	stat_dir = tracefs_create_dir("trace_stat", d_tracing);
-	if (!stat_dir)
+	if (!stat_dir) {
 		pr_warn("Could not create tracefs 'trace_stat' entry\n");
+		return -ENOMEM;
+	}
 	return 0;
 }
 
 static int init_stat_file(struct stat_session *session)
 {
-	if (!stat_dir && tracing_stat_init())
-		return -ENODEV;
+	int ret;
+
+	if (!stat_dir && (ret = tracing_stat_init()))
+		return ret;
 
 	session->file = tracefs_create_file(session->ts->name, 0644,
 					    stat_dir,
@@ -304,7 +308,7 @@ static int init_stat_file(struct stat_session *session)
 int register_stat_tracer(struct tracer_stat *trace)
 {
 	struct stat_session *session, *node;
-	int ret;
+	int ret = -EINVAL;
 
 	if (!trace)
 		return -EINVAL;
@@ -315,17 +319,15 @@ int register_stat_tracer(struct tracer_stat *trace)
 	/* Already registered? */
 	mutex_lock(&all_stat_sessions_mutex);
 	list_for_each_entry(node, &all_stat_sessions, session_list) {
-		if (node->ts == trace) {
-			mutex_unlock(&all_stat_sessions_mutex);
-			return -EINVAL;
-		}
+		if (node->ts == trace)
+			goto out;
 	}
-	mutex_unlock(&all_stat_sessions_mutex);
 
+	ret = -ENOMEM;
 	/* Init the session */
 	session = kzalloc(sizeof(*session), GFP_KERNEL);
 	if (!session)
-		return -ENOMEM;
+		goto out;
 
 	session->ts = trace;
 	INIT_LIST_HEAD(&session->session_list);
@@ -334,15 +336,16 @@ int register_stat_tracer(struct tracer_stat *trace)
 	ret = init_stat_file(session);
 	if (ret) {
 		destroy_session(session);
-		return ret;
+		goto out;
 	}
 
+	ret = 0;
 	/* Register */
-	mutex_lock(&all_stat_sessions_mutex);
 	list_add_tail(&session->session_list, &all_stat_sessions);
+ out:
 	mutex_unlock(&all_stat_sessions_mutex);
 
-	return 0;
+	return ret;
 }
 
 void unregister_stat_tracer(struct tracer_stat *trace)
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 2978c29..d85a2f0 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -297,7 +297,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 	struct syscall_trace_enter *entry;
 	struct syscall_metadata *sys_data;
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	unsigned long irq_flags;
 	unsigned long args[6];
 	int pc;
@@ -325,7 +325,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 	local_save_flags(irq_flags);
 	pc = preempt_count();
 
-	buffer = tr->trace_buffer.buffer;
+	buffer = tr->array_buffer.buffer;
 	event = trace_buffer_lock_reserve(buffer,
 			sys_data->enter_event->event.type, size, irq_flags, pc);
 	if (!event)
@@ -347,7 +347,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
 	struct syscall_trace_exit *entry;
 	struct syscall_metadata *sys_data;
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
+	struct trace_buffer *buffer;
 	unsigned long irq_flags;
 	int pc;
 	int syscall_nr;
@@ -371,7 +371,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
 	local_save_flags(irq_flags);
 	pc = preempt_count();
 
-	buffer = tr->trace_buffer.buffer;
+	buffer = tr->array_buffer.buffer;
 	event = trace_buffer_lock_reserve(buffer,
 			sys_data->exit_event->event.type, sizeof(*entry),
 			irq_flags, pc);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 7885ebd..18d16f3 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -931,8 +931,8 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
 				struct trace_event_file *trace_file)
 {
 	struct uprobe_trace_entry_head *entry;
+	struct trace_buffer *buffer;
 	struct ring_buffer_event *event;
-	struct ring_buffer *buffer;
 	void *data;
 	int size, esize;
 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 7be3e75..257ffb9 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -24,6 +24,7 @@ void bacct_add_tsk(struct user_namespace *user_ns,
 	const struct cred *tcred;
 	u64 utime, stime, utimescaled, stimescaled;
 	u64 delta;
+	time64_t btime;
 
 	BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
 
@@ -32,9 +33,11 @@ void bacct_add_tsk(struct user_namespace *user_ns,
 	/* Convert to micro seconds */
 	do_div(delta, NSEC_PER_USEC);
 	stats->ac_etime = delta;
-	/* Convert to seconds for btime */
-	do_div(delta, USEC_PER_SEC);
-	stats->ac_btime = get_seconds() - delta;
+	/* Convert to seconds for btime (note y2106 limit) */
+	btime = ktime_get_real_seconds() - div_u64(delta, USEC_PER_SEC);
+	stats->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX);
+	stats->ac_btime64 = btime;
+
 	if (thread_group_leader(tsk)) {
 		stats->ac_exitcode = tsk->exit_code;
 		if (tsk->flags & PF_FORKNOEXEC)
diff --git a/lib/Kconfig b/lib/Kconfig
index 6e790dc..0cf875f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -278,6 +278,13 @@
 	tristate
 	select BITREVERSE
 
+config ZLIB_DFLTCC
+	def_bool y
+	depends on S390
+	prompt "Enable s390x DEFLATE CONVERSION CALL support for kernel zlib"
+	help
+	 Enable s390x hardware support for zlib in the kernel.
+
 config LZO_COMPRESS
 	tristate
 
@@ -566,6 +573,9 @@
 config LIBFDT
 	bool
 
+config LIBXBC
+	bool
+
 config OID_REGISTRY
 	tristate
 	help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6859f52..69def4a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2025,7 +2025,7 @@
 	  If unsure, say N.
 
 config SYSCTL_KUNIT_TEST
-	bool "KUnit test for sysctl"
+	tristate "KUnit test for sysctl"
 	depends on KUNIT
 	help
 	  This builds the proc sysctl unit test, which runs on boot.
@@ -2036,7 +2036,7 @@
 	  If unsure, say N.
 
 config LIST_KUNIT_TEST
-	bool "KUnit Test for Kernel Linked-list structures"
+	tristate "KUnit Test for Kernel Linked-list structures"
 	depends on KUNIT
 	help
 	  This builds the linked list KUnit test suite.
diff --git a/lib/Makefile b/lib/Makefile
index c20b1de..8bb9117 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -16,6 +16,7 @@
 KCOV_INSTRUMENT_list_debug.o := n
 KCOV_INSTRUMENT_debugobjects.o := n
 KCOV_INSTRUMENT_dynamic_debug.o := n
+KCOV_INSTRUMENT_fault-inject.o := n
 
 # Early boot use of cmdline, don't instrument it
 ifdef CONFIG_AMD_MEM_ENCRYPT
@@ -140,6 +141,7 @@
 obj-$(CONFIG_842_DECOMPRESS) += 842/
 obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
 obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
+obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc/
 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
 obj-$(CONFIG_BCH) += bch.o
 obj-$(CONFIG_LZO_COMPRESS) += lzo/
@@ -228,6 +230,8 @@
 	$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
 lib-$(CONFIG_LIBFDT) += $(libfdt_files)
 
+lib-$(CONFIG_LIBXBC) += bootconfig.o
+
 obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
 obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
 
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 6e175fb..89260aa 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -431,97 +431,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
  * second version by Paul Jackson, third by Joe Korty.
  */
 
-#define CHUNKSZ				32
-#define nbits_to_hold_value(val)	fls(val)
-#define BASEDEC 10		/* fancier cpuset lists input in decimal */
-
-/**
- * __bitmap_parse - convert an ASCII hex string into a bitmap.
- * @buf: pointer to buffer containing string.
- * @buflen: buffer size in bytes.  If string is smaller than this
- *    then it must be terminated with a \0.
- * @is_user: location of buffer, 0 indicates kernel space
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Commas group hex digits into chunks.  Each chunk defines exactly 32
- * bits of the resultant bitmask.  No chunk may specify a value larger
- * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
- * then leading 0-bits are prepended.  %-EINVAL is returned for illegal
- * characters and for grouping errors such as "1,,5", ",44", "," and "".
- * Leading and trailing whitespace accepted, but not embedded whitespace.
- */
-int __bitmap_parse(const char *buf, unsigned int buflen,
-		int is_user, unsigned long *maskp,
-		int nmaskbits)
-{
-	int c, old_c, totaldigits, ndigits, nchunks, nbits;
-	u32 chunk;
-	const char __user __force *ubuf = (const char __user __force *)buf;
-
-	bitmap_zero(maskp, nmaskbits);
-
-	nchunks = nbits = totaldigits = c = 0;
-	do {
-		chunk = 0;
-		ndigits = totaldigits;
-
-		/* Get the next chunk of the bitmap */
-		while (buflen) {
-			old_c = c;
-			if (is_user) {
-				if (__get_user(c, ubuf++))
-					return -EFAULT;
-			}
-			else
-				c = *buf++;
-			buflen--;
-			if (isspace(c))
-				continue;
-
-			/*
-			 * If the last character was a space and the current
-			 * character isn't '\0', we've got embedded whitespace.
-			 * This is a no-no, so throw an error.
-			 */
-			if (totaldigits && c && isspace(old_c))
-				return -EINVAL;
-
-			/* A '\0' or a ',' signal the end of the chunk */
-			if (c == '\0' || c == ',')
-				break;
-
-			if (!isxdigit(c))
-				return -EINVAL;
-
-			/*
-			 * Make sure there are at least 4 free bits in 'chunk'.
-			 * If not, this hexdigit will overflow 'chunk', so
-			 * throw an error.
-			 */
-			if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
-				return -EOVERFLOW;
-
-			chunk = (chunk << 4) | hex_to_bin(c);
-			totaldigits++;
-		}
-		if (ndigits == totaldigits)
-			return -EINVAL;
-		if (nchunks == 0 && chunk == 0)
-			continue;
-
-		__bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits);
-		*maskp |= chunk;
-		nchunks++;
-		nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
-		if (nbits > nmaskbits)
-			return -EOVERFLOW;
-	} while (buflen && c == ',');
-
-	return 0;
-}
-EXPORT_SYMBOL(__bitmap_parse);
-
 /**
  * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
  *
@@ -530,22 +439,22 @@ EXPORT_SYMBOL(__bitmap_parse);
  *    then it must be terminated with a \0.
  * @maskp: pointer to bitmap array that will contain result.
  * @nmaskbits: size of bitmap, in bits.
- *
- * Wrapper for __bitmap_parse(), providing it with user buffer.
- *
- * We cannot have this as an inline function in bitmap.h because it needs
- * linux/uaccess.h to get the access_ok() declaration and this causes
- * cyclic dependencies.
  */
 int bitmap_parse_user(const char __user *ubuf,
 			unsigned int ulen, unsigned long *maskp,
 			int nmaskbits)
 {
-	if (!access_ok(ubuf, ulen))
-		return -EFAULT;
-	return __bitmap_parse((const char __force *)ubuf,
-				ulen, 1, maskp, nmaskbits);
+	char *buf;
+	int ret;
 
+	buf = memdup_user_nul(ubuf, ulen);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
+
+	ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
+
+	kfree(buf);
+	return ret;
 }
 EXPORT_SYMBOL(bitmap_parse_user);
 
@@ -653,6 +562,14 @@ static const char *bitmap_find_region(const char *str)
 	return end_of_str(*str) ? NULL : str;
 }
 
+static const char *bitmap_find_region_reverse(const char *start, const char *end)
+{
+	while (start <= end && __end_of_region(*end))
+		end--;
+
+	return end;
+}
+
 static const char *bitmap_parse_region(const char *str, struct region *r)
 {
 	str = bitmap_getnum(str, &r->start);
@@ -776,6 +693,80 @@ int bitmap_parselist_user(const char __user *ubuf,
 }
 EXPORT_SYMBOL(bitmap_parselist_user);
 
+static const char *bitmap_get_x32_reverse(const char *start,
+					const char *end, u32 *num)
+{
+	u32 ret = 0;
+	int c, i;
+
+	for (i = 0; i < 32; i += 4) {
+		c = hex_to_bin(*end--);
+		if (c < 0)
+			return ERR_PTR(-EINVAL);
+
+		ret |= c << i;
+
+		if (start > end || __end_of_region(*end))
+			goto out;
+	}
+
+	if (hex_to_bin(*end--) >= 0)
+		return ERR_PTR(-EOVERFLOW);
+out:
+	*num = ret;
+	return end;
+}
+
+/**
+ * bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @start: pointer to buffer containing string.
+ * @buflen: buffer size in bytes.  If string is smaller than this
+ *    then it must be terminated with a \0 or \n. In that case,
+ *    UINT_MAX may be provided instead of string length.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Commas group hex digits into chunks.  Each chunk defines exactly 32
+ * bits of the resultant bitmask.  No chunk may specify a value larger
+ * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended.  %-EINVAL is returned for illegal
+ * characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
+ * Leading, embedded and trailing whitespace accepted.
+ */
+int bitmap_parse(const char *start, unsigned int buflen,
+		unsigned long *maskp, int nmaskbits)
+{
+	const char *end = strnchrnul(start, buflen, '\n') - 1;
+	int chunks = BITS_TO_U32(nmaskbits);
+	u32 *bitmap = (u32 *)maskp;
+	int unset_bit;
+
+	while (1) {
+		end = bitmap_find_region_reverse(start, end);
+		if (start > end)
+			break;
+
+		if (!chunks--)
+			return -EOVERFLOW;
+
+		end = bitmap_get_x32_reverse(start, end, bitmap++);
+		if (IS_ERR(end))
+			return PTR_ERR(end);
+	}
+
+	unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
+	if (unset_bit < nmaskbits) {
+		bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
+		return 0;
+	}
+
+	if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
+		return -EOVERFLOW;
+
+	return 0;
+}
+EXPORT_SYMBOL(bitmap_parse);
+
 
 #ifdef CONFIG_NUMA
 /**
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
new file mode 100644
index 0000000..afb2e76
--- /dev/null
+++ b/lib/bootconfig.c
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extra Boot Config
+ * Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#define pr_fmt(fmt)    "bootconfig: " fmt
+
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/bootconfig.h>
+#include <linux/string.h>
+
+/*
+ * Extra Boot Config (XBC) is given as tree-structured ascii text of
+ * key-value pairs on memory.
+ * xbc_parse() parses the text to build a simple tree. Each tree node is
+ * simply a key word or a value. A key node may have a next key node or/and
+ * a child node (both key and value). A value node may have a next value
+ * node (for array).
+ */
+
+static struct xbc_node xbc_nodes[XBC_NODE_MAX] __initdata;
+static int xbc_node_num __initdata;
+static char *xbc_data __initdata;
+static size_t xbc_data_size __initdata;
+static struct xbc_node *last_parent __initdata;
+
+static int __init xbc_parse_error(const char *msg, const char *p)
+{
+	int pos = p - xbc_data;
+
+	pr_err("Parse error at pos %d: %s\n", pos, msg);
+	return -EINVAL;
+}
+
+/**
+ * xbc_root_node() - Get the root node of extended boot config
+ *
+ * Return the address of root node of extended boot config. If the
+ * extended boot config is not initiized, return NULL.
+ */
+struct xbc_node * __init xbc_root_node(void)
+{
+	if (unlikely(!xbc_data))
+		return NULL;
+
+	return xbc_nodes;
+}
+
+/**
+ * xbc_node_index() - Get the index of XBC node
+ * @node: A target node of getting index.
+ *
+ * Return the index number of @node in XBC node list.
+ */
+int __init xbc_node_index(struct xbc_node *node)
+{
+	return node - &xbc_nodes[0];
+}
+
+/**
+ * xbc_node_get_parent() - Get the parent XBC node
+ * @node: An XBC node.
+ *
+ * Return the parent node of @node. If the node is top node of the tree,
+ * return NULL.
+ */
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node)
+{
+	return node->parent == XBC_NODE_MAX ? NULL : &xbc_nodes[node->parent];
+}
+
+/**
+ * xbc_node_get_child() - Get the child XBC node
+ * @node: An XBC node.
+ *
+ * Return the first child node of @node. If the node has no child, return
+ * NULL.
+ */
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node)
+{
+	return node->child ? &xbc_nodes[node->child] : NULL;
+}
+
+/**
+ * xbc_node_get_next() - Get the next sibling XBC node
+ * @node: An XBC node.
+ *
+ * Return the NEXT sibling node of @node. If the node has no next sibling,
+ * return NULL. Note that even if this returns NULL, it doesn't mean @node
+ * has no siblings. (You also has to check whether the parent's child node
+ * is @node or not.)
+ */
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node)
+{
+	return node->next ? &xbc_nodes[node->next] : NULL;
+}
+
+/**
+ * xbc_node_get_data() - Get the data of XBC node
+ * @node: An XBC node.
+ *
+ * Return the data (which is always a null terminated string) of @node.
+ * If the node has invalid data, warn and return NULL.
+ */
+const char * __init xbc_node_get_data(struct xbc_node *node)
+{
+	int offset = node->data & ~XBC_VALUE;
+
+	if (WARN_ON(offset >= xbc_data_size))
+		return NULL;
+
+	return xbc_data + offset;
+}
+
+static bool __init
+xbc_node_match_prefix(struct xbc_node *node, const char **prefix)
+{
+	const char *p = xbc_node_get_data(node);
+	int len = strlen(p);
+
+	if (strncmp(*prefix, p, len))
+		return false;
+
+	p = *prefix + len;
+	if (*p == '.')
+		p++;
+	else if (*p != '\0')
+		return false;
+	*prefix = p;
+
+	return true;
+}
+
+/**
+ * xbc_node_find_child() - Find a child node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ *
+ * Search a node under @parent which matches @key. The @key can contain
+ * several words jointed with '.'. If @parent is NULL, this searches the
+ * node from whole tree. Return NULL if no node is matched.
+ */
+struct xbc_node * __init
+xbc_node_find_child(struct xbc_node *parent, const char *key)
+{
+	struct xbc_node *node;
+
+	if (parent)
+		node = xbc_node_get_child(parent);
+	else
+		node = xbc_root_node();
+
+	while (node && xbc_node_is_key(node)) {
+		if (!xbc_node_match_prefix(node, &key))
+			node = xbc_node_get_next(node);
+		else if (*key != '\0')
+			node = xbc_node_get_child(node);
+		else
+			break;
+	}
+
+	return node;
+}
+
+/**
+ * xbc_node_find_value() - Find a value node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ * @vnode: A container pointer of found XBC node.
+ *
+ * Search a value node under @parent whose (parent) key node matches @key,
+ * store it in *@vnode, and returns the value string.
+ * The @key can contain several words jointed with '.'. If @parent is NULL,
+ * this searches the node from whole tree. Return the value string if a
+ * matched key found, return NULL if no node is matched.
+ * Note that this returns 0-length string and stores NULL in *@vnode if the
+ * key has no value. And also it will return the value of the first entry if
+ * the value is an array.
+ */
+const char * __init
+xbc_node_find_value(struct xbc_node *parent, const char *key,
+		    struct xbc_node **vnode)
+{
+	struct xbc_node *node = xbc_node_find_child(parent, key);
+
+	if (!node || !xbc_node_is_key(node))
+		return NULL;
+
+	node = xbc_node_get_child(node);
+	if (node && !xbc_node_is_value(node))
+		return NULL;
+
+	if (vnode)
+		*vnode = node;
+
+	return node ? xbc_node_get_data(node) : "";
+}
+
+/**
+ * xbc_node_compose_key_after() - Compose partial key string of the XBC node
+ * @root: Root XBC node
+ * @node: Target XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the partial key of the @node into @buf, which is starting right
+ * after @root (@root is not included.) If @root is NULL, this returns full
+ * key words of @node.
+ * Returns the total length of the key stored in @buf. Returns -EINVAL
+ * if @node is NULL or @root is not the ancestor of @node or @root is @node,
+ * or returns -ERANGE if the key depth is deeper than max depth.
+ * This is expected to be used with xbc_find_node() to list up all (child)
+ * keys under given key.
+ */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+				      struct xbc_node *node,
+				      char *buf, size_t size)
+{
+	u16 keys[XBC_DEPTH_MAX];
+	int depth = 0, ret = 0, total = 0;
+
+	if (!node || node == root)
+		return -EINVAL;
+
+	if (xbc_node_is_value(node))
+		node = xbc_node_get_parent(node);
+
+	while (node && node != root) {
+		keys[depth++] = xbc_node_index(node);
+		if (depth == XBC_DEPTH_MAX)
+			return -ERANGE;
+		node = xbc_node_get_parent(node);
+	}
+	if (!node && root)
+		return -EINVAL;
+
+	while (--depth >= 0) {
+		node = xbc_nodes + keys[depth];
+		ret = snprintf(buf, size, "%s%s", xbc_node_get_data(node),
+			       depth ? "." : "");
+		if (ret < 0)
+			return ret;
+		if (ret > size) {
+			size = 0;
+		} else {
+			size -= ret;
+			buf += ret;
+		}
+		total += ret;
+	}
+
+	return total;
+}
+
+/**
+ * xbc_node_find_next_leaf() - Find the next leaf node under given node
+ * @root: An XBC root node
+ * @node: An XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of @node
+ * under @root node (including @root node itself).
+ * Return the next node or NULL if next leaf node is not found.
+ */
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+						 struct xbc_node *node)
+{
+	if (unlikely(!xbc_data))
+		return NULL;
+
+	if (!node) {	/* First try */
+		node = root;
+		if (!node)
+			node = xbc_nodes;
+	} else {
+		if (node == root)	/* @root was a leaf, no child node. */
+			return NULL;
+
+		while (!node->next) {
+			node = xbc_node_get_parent(node);
+			if (node == root)
+				return NULL;
+			/* User passed a node which is not uder parent */
+			if (WARN_ON(!node))
+				return NULL;
+		}
+		node = xbc_node_get_next(node);
+	}
+
+	while (node && !xbc_node_is_leaf(node))
+		node = xbc_node_get_child(node);
+
+	return node;
+}
+
+/**
+ * xbc_node_find_next_key_value() - Find the next key-value pair nodes
+ * @root: An XBC root node
+ * @leaf: A container pointer of XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of *@leaf
+ * under @root node. Returns the value and update *@leaf if next leaf node
+ * is found, or NULL if no next leaf node is found.
+ * Note that this returns 0-length string if the key has no value, or
+ * the value of the first entry if the value is an array.
+ */
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+						 struct xbc_node **leaf)
+{
+	/* tip must be passed */
+	if (WARN_ON(!leaf))
+		return NULL;
+
+	*leaf = xbc_node_find_next_leaf(root, *leaf);
+	if (!*leaf)
+		return NULL;
+	if ((*leaf)->child)
+		return xbc_node_get_data(xbc_node_get_child(*leaf));
+	else
+		return "";	/* No value key */
+}
+
+/* XBC parse and tree build */
+
+static struct xbc_node * __init xbc_add_node(char *data, u32 flag)
+{
+	struct xbc_node *node;
+	unsigned long offset;
+
+	if (xbc_node_num == XBC_NODE_MAX)
+		return NULL;
+
+	node = &xbc_nodes[xbc_node_num++];
+	offset = data - xbc_data;
+	node->data = (u16)offset;
+	if (WARN_ON(offset >= XBC_DATA_MAX))
+		return NULL;
+	node->data |= flag;
+	node->child = 0;
+	node->next = 0;
+
+	return node;
+}
+
+static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
+{
+	while (node->next)
+		node = xbc_node_get_next(node);
+
+	return node;
+}
+
+static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+{
+	struct xbc_node *sib, *node = xbc_add_node(data, flag);
+
+	if (node) {
+		if (!last_parent) {
+			node->parent = XBC_NODE_MAX;
+			sib = xbc_last_sibling(xbc_nodes);
+			sib->next = xbc_node_index(node);
+		} else {
+			node->parent = xbc_node_index(last_parent);
+			if (!last_parent->child) {
+				last_parent->child = xbc_node_index(node);
+			} else {
+				sib = xbc_node_get_child(last_parent);
+				sib = xbc_last_sibling(sib);
+				sib->next = xbc_node_index(node);
+			}
+		}
+	} else
+		xbc_parse_error("Too many nodes", data);
+
+	return node;
+}
+
+static inline __init struct xbc_node *xbc_add_child(char *data, u32 flag)
+{
+	struct xbc_node *node = xbc_add_sibling(data, flag);
+
+	if (node)
+		last_parent = node;
+
+	return node;
+}
+
+static inline __init bool xbc_valid_keyword(char *key)
+{
+	if (key[0] == '\0')
+		return false;
+
+	while (isalnum(*key) || *key == '-' || *key == '_')
+		key++;
+
+	return *key == '\0';
+}
+
+static char *skip_comment(char *p)
+{
+	char *ret;
+
+	ret = strchr(p, '\n');
+	if (!ret)
+		ret = p + strlen(p);
+	else
+		ret++;
+
+	return ret;
+}
+
+static char *skip_spaces_until_newline(char *p)
+{
+	while (isspace(*p) && *p != '\n')
+		p++;
+	return p;
+}
+
+static int __init __xbc_open_brace(void)
+{
+	/* Mark the last key as open brace */
+	last_parent->next = XBC_NODE_MAX;
+
+	return 0;
+}
+
+static int __init __xbc_close_brace(char *p)
+{
+	struct xbc_node *node;
+
+	if (!last_parent || last_parent->next != XBC_NODE_MAX)
+		return xbc_parse_error("Unexpected closing brace", p);
+
+	node = last_parent;
+	node->next = 0;
+	do {
+		node = xbc_node_get_parent(node);
+	} while (node && node->next != XBC_NODE_MAX);
+	last_parent = node;
+
+	return 0;
+}
+
+/*
+ * Return delimiter or error, no node added. As same as lib/cmdline.c,
+ * you can use " around spaces, but can't escape " for value.
+ */
+static int __init __xbc_parse_value(char **__v, char **__n)
+{
+	char *p, *v = *__v;
+	int c, quotes = 0;
+
+	v = skip_spaces(v);
+	while (*v == '#') {
+		v = skip_comment(v);
+		v = skip_spaces(v);
+	}
+	if (*v == '"' || *v == '\'') {
+		quotes = *v;
+		v++;
+	}
+	p = v - 1;
+	while ((c = *++p)) {
+		if (!isprint(c) && !isspace(c))
+			return xbc_parse_error("Non printable value", p);
+		if (quotes) {
+			if (c != quotes)
+				continue;
+			quotes = 0;
+			*p++ = '\0';
+			p = skip_spaces_until_newline(p);
+			c = *p;
+			if (c && !strchr(",;\n#}", c))
+				return xbc_parse_error("No value delimiter", p);
+			if (*p)
+				p++;
+			break;
+		}
+		if (strchr(",;\n#}", c)) {
+			v = strim(v);
+			*p++ = '\0';
+			break;
+		}
+	}
+	if (quotes)
+		return xbc_parse_error("No closing quotes", p);
+	if (c == '#') {
+		p = skip_comment(p);
+		c = '\n';	/* A comment must be treated as a newline */
+	}
+	*__n = p;
+	*__v = v;
+
+	return c;
+}
+
+static int __init xbc_parse_array(char **__v)
+{
+	struct xbc_node *node;
+	char *next;
+	int c = 0;
+
+	do {
+		c = __xbc_parse_value(__v, &next);
+		if (c < 0)
+			return c;
+
+		node = xbc_add_sibling(*__v, XBC_VALUE);
+		if (!node)
+			return -ENOMEM;
+		*__v = next;
+	} while (c == ',');
+	node->next = 0;
+
+	return c;
+}
+
+static inline __init
+struct xbc_node *find_match_node(struct xbc_node *node, char *k)
+{
+	while (node) {
+		if (!strcmp(xbc_node_get_data(node), k))
+			break;
+		node = xbc_node_get_next(node);
+	}
+	return node;
+}
+
+static int __init __xbc_add_key(char *k)
+{
+	struct xbc_node *node;
+
+	if (!xbc_valid_keyword(k))
+		return xbc_parse_error("Invalid keyword", k);
+
+	if (unlikely(xbc_node_num == 0))
+		goto add_node;
+
+	if (!last_parent)	/* the first level */
+		node = find_match_node(xbc_nodes, k);
+	else
+		node = find_match_node(xbc_node_get_child(last_parent), k);
+
+	if (node)
+		last_parent = node;
+	else {
+add_node:
+		node = xbc_add_child(k, XBC_KEY);
+		if (!node)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static int __init __xbc_parse_keys(char *k)
+{
+	char *p;
+	int ret;
+
+	k = strim(k);
+	while ((p = strchr(k, '.'))) {
+		*p++ = '\0';
+		ret = __xbc_add_key(k);
+		if (ret)
+			return ret;
+		k = p;
+	}
+
+	return __xbc_add_key(k);
+}
+
+static int __init xbc_parse_kv(char **k, char *v)
+{
+	struct xbc_node *prev_parent = last_parent;
+	struct xbc_node *node;
+	char *next;
+	int c, ret;
+
+	ret = __xbc_parse_keys(*k);
+	if (ret)
+		return ret;
+
+	c = __xbc_parse_value(&v, &next);
+	if (c < 0)
+		return c;
+
+	node = xbc_add_sibling(v, XBC_VALUE);
+	if (!node)
+		return -ENOMEM;
+
+	if (c == ',') {	/* Array */
+		c = xbc_parse_array(&next);
+		if (c < 0)
+			return c;
+	}
+
+	last_parent = prev_parent;
+
+	if (c == '}') {
+		ret = __xbc_close_brace(next - 1);
+		if (ret < 0)
+			return ret;
+	}
+
+	*k = next;
+
+	return 0;
+}
+
+static int __init xbc_parse_key(char **k, char *n)
+{
+	struct xbc_node *prev_parent = last_parent;
+	int ret;
+
+	*k = strim(*k);
+	if (**k != '\0') {
+		ret = __xbc_parse_keys(*k);
+		if (ret)
+			return ret;
+		last_parent = prev_parent;
+	}
+	*k = n;
+
+	return 0;
+}
+
+static int __init xbc_open_brace(char **k, char *n)
+{
+	int ret;
+
+	ret = __xbc_parse_keys(*k);
+	if (ret)
+		return ret;
+	*k = n;
+
+	return __xbc_open_brace();
+}
+
+static int __init xbc_close_brace(char **k, char *n)
+{
+	int ret;
+
+	ret = xbc_parse_key(k, n);
+	if (ret)
+		return ret;
+	/* k is updated in xbc_parse_key() */
+
+	return __xbc_close_brace(n - 1);
+}
+
+static int __init xbc_verify_tree(void)
+{
+	int i, depth, len, wlen;
+	struct xbc_node *n, *m;
+
+	/* Empty tree */
+	if (xbc_node_num == 0) {
+		xbc_parse_error("Empty config", xbc_data);
+		return -ENOENT;
+	}
+
+	for (i = 0; i < xbc_node_num; i++) {
+		if (xbc_nodes[i].next > xbc_node_num) {
+			return xbc_parse_error("No closing brace",
+				xbc_node_get_data(xbc_nodes + i));
+		}
+	}
+
+	/* Key tree limitation check */
+	n = &xbc_nodes[0];
+	depth = 1;
+	len = 0;
+
+	while (n) {
+		wlen = strlen(xbc_node_get_data(n)) + 1;
+		len += wlen;
+		if (len > XBC_KEYLEN_MAX)
+			return xbc_parse_error("Too long key length",
+				xbc_node_get_data(n));
+
+		m = xbc_node_get_child(n);
+		if (m && xbc_node_is_key(m)) {
+			n = m;
+			depth++;
+			if (depth > XBC_DEPTH_MAX)
+				return xbc_parse_error("Too many key words",
+						xbc_node_get_data(n));
+			continue;
+		}
+		len -= wlen;
+		m = xbc_node_get_next(n);
+		while (!m) {
+			n = xbc_node_get_parent(n);
+			if (!n)
+				break;
+			len -= strlen(xbc_node_get_data(n)) + 1;
+			depth--;
+			m = xbc_node_get_next(n);
+		}
+		n = m;
+	}
+
+	return 0;
+}
+
+/**
+ * xbc_destroy_all() - Clean up all parsed bootconfig
+ *
+ * This clears all data structures of parsed bootconfig on memory.
+ * If you need to reuse xbc_init() with new boot config, you can
+ * use this.
+ */
+void __init xbc_destroy_all(void)
+{
+	xbc_data = NULL;
+	xbc_data_size = 0;
+	xbc_node_num = 0;
+	memset(xbc_nodes, 0, sizeof(xbc_nodes));
+}
+
+/**
+ * xbc_init() - Parse given XBC file and build XBC internal tree
+ * @buf: boot config text
+ *
+ * This parses the boot config text in @buf. @buf must be a
+ * null terminated string and smaller than XBC_DATA_MAX.
+ * Return the number of stored nodes (>0) if succeeded, or -errno
+ * if there is any error.
+ */
+int __init xbc_init(char *buf)
+{
+	char *p, *q;
+	int ret, c;
+
+	if (xbc_data) {
+		pr_err("Error: bootconfig is already initialized.\n");
+		return -EBUSY;
+	}
+
+	ret = strlen(buf);
+	if (ret > XBC_DATA_MAX - 1 || ret == 0) {
+		pr_err("Error: Config data is %s.\n",
+			ret ? "too big" : "empty");
+		return -ERANGE;
+	}
+
+	xbc_data = buf;
+	xbc_data_size = ret + 1;
+	last_parent = NULL;
+
+	p = buf;
+	do {
+		q = strpbrk(p, "{}=;\n#");
+		if (!q) {
+			p = skip_spaces(p);
+			if (*p != '\0')
+				ret = xbc_parse_error("No delimiter", p);
+			break;
+		}
+
+		c = *q;
+		*q++ = '\0';
+		switch (c) {
+		case '=':
+			ret = xbc_parse_kv(&p, q);
+			break;
+		case '{':
+			ret = xbc_open_brace(&p, q);
+			break;
+		case '#':
+			q = skip_comment(q);
+			/* fall through */
+		case ';':
+		case '\n':
+			ret = xbc_parse_key(&p, q);
+			break;
+		case '}':
+			ret = xbc_close_brace(&p, q);
+			break;
+		}
+	} while (!ret);
+
+	if (!ret)
+		ret = xbc_verify_tree();
+
+	if (ret < 0)
+		xbc_destroy_all();
+	else
+		ret = xbc_node_num;
+
+	return ret;
+}
+
+/**
+ * xbc_debug_dump() - Dump current XBC node list
+ *
+ * Dump the current XBC node list on printk buffer for debug.
+ */
+void __init xbc_debug_dump(void)
+{
+	int i;
+
+	for (i = 0; i < xbc_node_num; i++) {
+		pr_debug("[%d] %s (%s) .next=%d, .child=%d .parent=%d\n", i,
+			xbc_node_get_data(xbc_nodes + i),
+			xbc_node_is_value(xbc_nodes + i) ? "value" : "key",
+			xbc_nodes[i].next, xbc_nodes[i].child,
+			xbc_nodes[i].parent);
+	}
+}
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 63b4b7e..6130c42 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -10,6 +10,10 @@
 #include "zlib_inflate/inftrees.c"
 #include "zlib_inflate/inffast.c"
 #include "zlib_inflate/inflate.c"
+#ifdef CONFIG_ZLIB_DFLTCC
+#include "zlib_dfltcc/dfltcc.c"
+#include "zlib_dfltcc/dfltcc_inflate.c"
+#endif
 
 #else /* STATIC */
 /* initramfs et al: linked */
@@ -76,7 +80,12 @@ STATIC int INIT __gunzip(unsigned char *buf, long len,
 	}
 
 	strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
+#ifdef CONFIG_ZLIB_DFLTCC
+	/* Always allocate the full workspace for DFLTCC */
+				 zlib_inflate_workspacesize());
+#else
 				 sizeof(struct inflate_state));
+#endif
 	if (strm->workspace == NULL) {
 		error("Out of memory while allocating workspace");
 		goto gunzip_nomem4;
@@ -123,10 +132,14 @@ STATIC int INIT __gunzip(unsigned char *buf, long len,
 
 	rc = zlib_inflateInit2(strm, -MAX_WBITS);
 
+#ifdef CONFIG_ZLIB_DFLTCC
+	/* Always keep the window for DFLTCC */
+#else
 	if (!flush) {
 		WS(strm)->inflate_state.wsize = 0;
 		WS(strm)->inflate_state.window = NULL;
 	}
+#endif
 
 	while (rc == Z_OK) {
 		if (strm->avail_in == 0) {
diff --git a/lib/find_bit.c b/lib/find_bit.c
index e35a76b..49f875f 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -17,9 +17,9 @@
 #include <linux/export.h>
 #include <linux/kernel.h>
 
-#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
-		!defined(find_next_and_bit)
-
+#if !defined(find_next_bit) || !defined(find_next_zero_bit) ||			\
+	!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) ||	\
+	!defined(find_next_and_bit)
 /*
  * This is a common helper function for find_next_bit, find_next_zero_bit, and
  * find_next_and_bit. The differences are:
@@ -27,11 +27,11 @@
  *    searching it for one bits.
  *  - The optional "addr2", which is anded with "addr1" if present.
  */
-static inline unsigned long _find_next_bit(const unsigned long *addr1,
+static unsigned long _find_next_bit(const unsigned long *addr1,
 		const unsigned long *addr2, unsigned long nbits,
-		unsigned long start, unsigned long invert)
+		unsigned long start, unsigned long invert, unsigned long le)
 {
-	unsigned long tmp;
+	unsigned long tmp, mask;
 
 	if (unlikely(start >= nbits))
 		return nbits;
@@ -42,7 +42,12 @@ static inline unsigned long _find_next_bit(const unsigned long *addr1,
 	tmp ^= invert;
 
 	/* Handle 1st word. */
-	tmp &= BITMAP_FIRST_WORD_MASK(start);
+	mask = BITMAP_FIRST_WORD_MASK(start);
+	if (le)
+		mask = swab(mask);
+
+	tmp &= mask;
+
 	start = round_down(start, BITS_PER_LONG);
 
 	while (!tmp) {
@@ -56,6 +61,9 @@ static inline unsigned long _find_next_bit(const unsigned long *addr1,
 		tmp ^= invert;
 	}
 
+	if (le)
+		tmp = swab(tmp);
+
 	return min(start + __ffs(tmp), nbits);
 }
 #endif
@@ -67,7 +75,7 @@ static inline unsigned long _find_next_bit(const unsigned long *addr1,
 unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
 			    unsigned long offset)
 {
-	return _find_next_bit(addr, NULL, size, offset, 0UL);
+	return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
 }
 EXPORT_SYMBOL(find_next_bit);
 #endif
@@ -76,7 +84,7 @@ EXPORT_SYMBOL(find_next_bit);
 unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
 				 unsigned long offset)
 {
-	return _find_next_bit(addr, NULL, size, offset, ~0UL);
+	return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
 }
 EXPORT_SYMBOL(find_next_zero_bit);
 #endif
@@ -86,7 +94,7 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
 		const unsigned long *addr2, unsigned long size,
 		unsigned long offset)
 {
-	return _find_next_bit(addr1, addr2, size, offset, 0UL);
+	return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
 }
 EXPORT_SYMBOL(find_next_and_bit);
 #endif
@@ -149,57 +157,11 @@ EXPORT_SYMBOL(find_last_bit);
 
 #ifdef __BIG_ENDIAN
 
-/* include/linux/byteorder does not support "unsigned long" type */
-static inline unsigned long ext2_swab(const unsigned long y)
-{
-#if BITS_PER_LONG == 64
-	return (unsigned long) __swab64((u64) y);
-#elif BITS_PER_LONG == 32
-	return (unsigned long) __swab32((u32) y);
-#else
-#error BITS_PER_LONG not defined
-#endif
-}
-
-#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
-static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
-		const unsigned long *addr2, unsigned long nbits,
-		unsigned long start, unsigned long invert)
-{
-	unsigned long tmp;
-
-	if (unlikely(start >= nbits))
-		return nbits;
-
-	tmp = addr1[start / BITS_PER_LONG];
-	if (addr2)
-		tmp &= addr2[start / BITS_PER_LONG];
-	tmp ^= invert;
-
-	/* Handle 1st word. */
-	tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
-	start = round_down(start, BITS_PER_LONG);
-
-	while (!tmp) {
-		start += BITS_PER_LONG;
-		if (start >= nbits)
-			return nbits;
-
-		tmp = addr1[start / BITS_PER_LONG];
-		if (addr2)
-			tmp &= addr2[start / BITS_PER_LONG];
-		tmp ^= invert;
-	}
-
-	return min(start + __ffs(ext2_swab(tmp)), nbits);
-}
-#endif
-
 #ifndef find_next_zero_bit_le
 unsigned long find_next_zero_bit_le(const void *addr, unsigned
 		long size, unsigned long offset)
 {
-	return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
+	return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
 }
 EXPORT_SYMBOL(find_next_zero_bit_le);
 #endif
@@ -208,7 +170,7 @@ EXPORT_SYMBOL(find_next_zero_bit_le);
 unsigned long find_next_bit_le(const void *addr, unsigned
 		long size, unsigned long offset)
 {
-	return _find_next_bit_le(addr, NULL, size, offset, 0UL);
+	return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
 }
 EXPORT_SYMBOL(find_next_bit_le);
 #endif
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index af37016..065aa16 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -3,7 +3,7 @@
 #
 
 menuconfig KUNIT
-	bool "KUnit - Enable support for unit tests"
+	tristate "KUnit - Enable support for unit tests"
 	help
 	  Enables support for kernel unit tests (KUnit), a lightweight unit
 	  testing and mocking framework for the Linux kernel. These tests are
@@ -15,7 +15,7 @@
 if KUNIT
 
 config KUNIT_TEST
-	bool "KUnit test for KUnit"
+	tristate "KUnit test for KUnit"
 	help
 	  Enables the unit tests for the KUnit test framework. These tests test
 	  the KUnit test framework itself; the tests are both written using
@@ -24,7 +24,7 @@
 	  expected.
 
 config KUNIT_EXAMPLE_TEST
-	bool "Example test for KUnit"
+	tristate "Example test for KUnit"
 	help
 	  Enables an example unit test that illustrates some of the basic
 	  features of KUnit. This test only exists to help new users understand
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 769d940..fab5564 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -1,9 +1,15 @@
-obj-$(CONFIG_KUNIT) +=			test.o \
+obj-$(CONFIG_KUNIT) +=			kunit.o
+
+kunit-objs +=				test.o \
 					string-stream.o \
 					assert.o \
 					try-catch.o
 
-obj-$(CONFIG_KUNIT_TEST) +=		test-test.o \
-					string-stream-test.o
+obj-$(CONFIG_KUNIT_TEST) +=		kunit-test.o
 
-obj-$(CONFIG_KUNIT_EXAMPLE_TEST) +=	example-test.o
+# string-stream-test compiles built-in only.
+ifeq ($(CONFIG_KUNIT_TEST),y)
+obj-$(CONFIG_KUNIT_TEST) +=		string-stream-test.o
+endif
+
+obj-$(CONFIG_KUNIT_EXAMPLE_TEST) +=	kunit-example-test.o
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index 86013d4..b24bebc 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -7,6 +7,8 @@
  */
 #include <kunit/assert.h>
 
+#include "string-stream.h"
+
 void kunit_base_assert_format(const struct kunit_assert *assert,
 			      struct string_stream *stream)
 {
@@ -24,6 +26,7 @@ void kunit_base_assert_format(const struct kunit_assert *assert,
 	string_stream_add(stream, "%s FAILED at %s:%d\n",
 			 expect_or_assert, assert->file, assert->line);
 }
+EXPORT_SYMBOL_GPL(kunit_base_assert_format);
 
 void kunit_assert_print_msg(const struct kunit_assert *assert,
 			    struct string_stream *stream)
@@ -31,6 +34,7 @@ void kunit_assert_print_msg(const struct kunit_assert *assert,
 	if (assert->message.fmt)
 		string_stream_add(stream, "\n%pV", &assert->message);
 }
+EXPORT_SYMBOL_GPL(kunit_assert_print_msg);
 
 void kunit_fail_assert_format(const struct kunit_assert *assert,
 			      struct string_stream *stream)
@@ -38,6 +42,7 @@ void kunit_fail_assert_format(const struct kunit_assert *assert,
 	kunit_base_assert_format(assert, stream);
 	string_stream_add(stream, "%pV", &assert->message);
 }
+EXPORT_SYMBOL_GPL(kunit_fail_assert_format);
 
 void kunit_unary_assert_format(const struct kunit_assert *assert,
 			       struct string_stream *stream)
@@ -56,6 +61,7 @@ void kunit_unary_assert_format(const struct kunit_assert *assert,
 				 unary_assert->condition);
 	kunit_assert_print_msg(assert, stream);
 }
+EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
 
 void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
 				     struct string_stream *stream)
@@ -76,6 +82,7 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
 	}
 	kunit_assert_print_msg(assert, stream);
 }
+EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
 
 void kunit_binary_assert_format(const struct kunit_assert *assert,
 				struct string_stream *stream)
@@ -97,6 +104,7 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
 			 binary_assert->right_value);
 	kunit_assert_print_msg(assert, stream);
 }
+EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
 
 void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
 				    struct string_stream *stream)
@@ -118,6 +126,7 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
 			 binary_assert->right_value);
 	kunit_assert_print_msg(assert, stream);
 }
+EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
 
 void kunit_binary_str_assert_format(const struct kunit_assert *assert,
 				    struct string_stream *stream)
@@ -139,3 +148,4 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
 			 binary_assert->right_value);
 	kunit_assert_print_msg(assert, stream);
 }
+EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
diff --git a/lib/kunit/example-test.c b/lib/kunit/example-test.c
deleted file mode 100644
index f64a829..0000000
--- a/lib/kunit/example-test.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Example KUnit test to show how to use KUnit.
- *
- * Copyright (C) 2019, Google LLC.
- * Author: Brendan Higgins <brendanhiggins@google.com>
- */
-
-#include <kunit/test.h>
-
-/*
- * This is the most fundamental element of KUnit, the test case. A test case
- * makes a set EXPECTATIONs and ASSERTIONs about the behavior of some code; if
- * any expectations or assertions are not met, the test fails; otherwise, the
- * test passes.
- *
- * In KUnit, a test case is just a function with the signature
- * `void (*)(struct kunit *)`. `struct kunit` is a context object that stores
- * information about the current test.
- */
-static void example_simple_test(struct kunit *test)
-{
-	/*
-	 * This is an EXPECTATION; it is how KUnit tests things. When you want
-	 * to test a piece of code, you set some expectations about what the
-	 * code should do. KUnit then runs the test and verifies that the code's
-	 * behavior matched what was expected.
-	 */
-	KUNIT_EXPECT_EQ(test, 1 + 1, 2);
-}
-
-/*
- * This is run once before each test case, see the comment on
- * example_test_suite for more information.
- */
-static int example_test_init(struct kunit *test)
-{
-	kunit_info(test, "initializing\n");
-
-	return 0;
-}
-
-/*
- * Here we make a list of all the test cases we want to add to the test suite
- * below.
- */
-static struct kunit_case example_test_cases[] = {
-	/*
-	 * This is a helper to create a test case object from a test case
-	 * function; its exact function is not important to understand how to
-	 * use KUnit, just know that this is how you associate test cases with a
-	 * test suite.
-	 */
-	KUNIT_CASE(example_simple_test),
-	{}
-};
-
-/*
- * This defines a suite or grouping of tests.
- *
- * Test cases are defined as belonging to the suite by adding them to
- * `kunit_cases`.
- *
- * Often it is desirable to run some function which will set up things which
- * will be used by every test; this is accomplished with an `init` function
- * which runs before each test case is invoked. Similarly, an `exit` function
- * may be specified which runs after every test case and can be used to for
- * cleanup. For clarity, running tests in a test suite would behave as follows:
- *
- * suite.init(test);
- * suite.test_case[0](test);
- * suite.exit(test);
- * suite.init(test);
- * suite.test_case[1](test);
- * suite.exit(test);
- * ...;
- */
-static struct kunit_suite example_test_suite = {
-	.name = "example",
-	.init = example_test_init,
-	.test_cases = example_test_cases,
-};
-
-/*
- * This registers the above test suite telling KUnit that this is a suite of
- * tests that need to be run.
- */
-kunit_test_suite(example_test_suite);
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
new file mode 100644
index 0000000..be1164e
--- /dev/null
+++ b/lib/kunit/kunit-example-test.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Example KUnit test to show how to use KUnit.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+
+/*
+ * This is the most fundamental element of KUnit, the test case. A test case
+ * makes a set EXPECTATIONs and ASSERTIONs about the behavior of some code; if
+ * any expectations or assertions are not met, the test fails; otherwise, the
+ * test passes.
+ *
+ * In KUnit, a test case is just a function with the signature
+ * `void (*)(struct kunit *)`. `struct kunit` is a context object that stores
+ * information about the current test.
+ */
+static void example_simple_test(struct kunit *test)
+{
+	/*
+	 * This is an EXPECTATION; it is how KUnit tests things. When you want
+	 * to test a piece of code, you set some expectations about what the
+	 * code should do. KUnit then runs the test and verifies that the code's
+	 * behavior matched what was expected.
+	 */
+	KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+}
+
+/*
+ * This is run once before each test case, see the comment on
+ * example_test_suite for more information.
+ */
+static int example_test_init(struct kunit *test)
+{
+	kunit_info(test, "initializing\n");
+
+	return 0;
+}
+
+/*
+ * Here we make a list of all the test cases we want to add to the test suite
+ * below.
+ */
+static struct kunit_case example_test_cases[] = {
+	/*
+	 * This is a helper to create a test case object from a test case
+	 * function; its exact function is not important to understand how to
+	 * use KUnit, just know that this is how you associate test cases with a
+	 * test suite.
+	 */
+	KUNIT_CASE(example_simple_test),
+	{}
+};
+
+/*
+ * This defines a suite or grouping of tests.
+ *
+ * Test cases are defined as belonging to the suite by adding them to
+ * `kunit_cases`.
+ *
+ * Often it is desirable to run some function which will set up things which
+ * will be used by every test; this is accomplished with an `init` function
+ * which runs before each test case is invoked. Similarly, an `exit` function
+ * may be specified which runs after every test case and can be used to for
+ * cleanup. For clarity, running tests in a test suite would behave as follows:
+ *
+ * suite.init(test);
+ * suite.test_case[0](test);
+ * suite.exit(test);
+ * suite.init(test);
+ * suite.test_case[1](test);
+ * suite.exit(test);
+ * ...;
+ */
+static struct kunit_suite example_test_suite = {
+	.name = "example",
+	.init = example_test_init,
+	.test_cases = example_test_cases,
+};
+
+/*
+ * This registers the above test suite telling KUnit that this is a suite of
+ * tests that need to be run.
+ */
+kunit_test_suites(&example_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
new file mode 100644
index 0000000..ccb8d2e
--- /dev/null
+++ b/lib/kunit/kunit-test.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for core test infrastructure.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+#include <kunit/test.h>
+
+#include "try-catch-impl.h"
+
+struct kunit_try_catch_test_context {
+	struct kunit_try_catch *try_catch;
+	bool function_called;
+};
+
+static void kunit_test_successful_try(void *data)
+{
+	struct kunit *test = data;
+	struct kunit_try_catch_test_context *ctx = test->priv;
+
+	ctx->function_called = true;
+}
+
+static void kunit_test_no_catch(void *data)
+{
+	struct kunit *test = data;
+
+	KUNIT_FAIL(test, "Catch should not be called\n");
+}
+
+static void kunit_test_try_catch_successful_try_no_catch(struct kunit *test)
+{
+	struct kunit_try_catch_test_context *ctx = test->priv;
+	struct kunit_try_catch *try_catch = ctx->try_catch;
+
+	kunit_try_catch_init(try_catch,
+			     test,
+			     kunit_test_successful_try,
+			     kunit_test_no_catch);
+	kunit_try_catch_run(try_catch, test);
+
+	KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+static void kunit_test_unsuccessful_try(void *data)
+{
+	struct kunit *test = data;
+	struct kunit_try_catch_test_context *ctx = test->priv;
+	struct kunit_try_catch *try_catch = ctx->try_catch;
+
+	kunit_try_catch_throw(try_catch);
+	KUNIT_FAIL(test, "This line should never be reached\n");
+}
+
+static void kunit_test_catch(void *data)
+{
+	struct kunit *test = data;
+	struct kunit_try_catch_test_context *ctx = test->priv;
+
+	ctx->function_called = true;
+}
+
+static void kunit_test_try_catch_unsuccessful_try_does_catch(struct kunit *test)
+{
+	struct kunit_try_catch_test_context *ctx = test->priv;
+	struct kunit_try_catch *try_catch = ctx->try_catch;
+
+	kunit_try_catch_init(try_catch,
+			     test,
+			     kunit_test_unsuccessful_try,
+			     kunit_test_catch);
+	kunit_try_catch_run(try_catch, test);
+
+	KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+static int kunit_try_catch_test_init(struct kunit *test)
+{
+	struct kunit_try_catch_test_context *ctx;
+
+	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+	test->priv = ctx;
+
+	ctx->try_catch = kunit_kmalloc(test,
+				       sizeof(*ctx->try_catch),
+				       GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->try_catch);
+
+	return 0;
+}
+
+static struct kunit_case kunit_try_catch_test_cases[] = {
+	KUNIT_CASE(kunit_test_try_catch_successful_try_no_catch),
+	KUNIT_CASE(kunit_test_try_catch_unsuccessful_try_does_catch),
+	{}
+};
+
+static struct kunit_suite kunit_try_catch_test_suite = {
+	.name = "kunit-try-catch-test",
+	.init = kunit_try_catch_test_init,
+	.test_cases = kunit_try_catch_test_cases,
+};
+
+/*
+ * Context for testing test managed resources
+ * is_resource_initialized is used to test arbitrary resources
+ */
+struct kunit_test_resource_context {
+	struct kunit test;
+	bool is_resource_initialized;
+	int allocate_order[2];
+	int free_order[2];
+};
+
+static int fake_resource_init(struct kunit_resource *res, void *context)
+{
+	struct kunit_test_resource_context *ctx = context;
+
+	res->allocation = &ctx->is_resource_initialized;
+	ctx->is_resource_initialized = true;
+	return 0;
+}
+
+static void fake_resource_free(struct kunit_resource *res)
+{
+	bool *is_resource_initialized = res->allocation;
+
+	*is_resource_initialized = false;
+}
+
+static void kunit_resource_test_init_resources(struct kunit *test)
+{
+	struct kunit_test_resource_context *ctx = test->priv;
+
+	kunit_init_test(&ctx->test, "testing_test_init_test");
+
+	KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_alloc_resource(struct kunit *test)
+{
+	struct kunit_test_resource_context *ctx = test->priv;
+	struct kunit_resource *res;
+	kunit_resource_free_t free = fake_resource_free;
+
+	res = kunit_alloc_and_get_resource(&ctx->test,
+					   fake_resource_init,
+					   fake_resource_free,
+					   GFP_KERNEL,
+					   ctx);
+
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
+	KUNIT_EXPECT_PTR_EQ(test,
+			    &ctx->is_resource_initialized,
+			    (bool *) res->allocation);
+	KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
+	KUNIT_EXPECT_PTR_EQ(test, free, res->free);
+}
+
+static void kunit_resource_test_destroy_resource(struct kunit *test)
+{
+	struct kunit_test_resource_context *ctx = test->priv;
+	struct kunit_resource *res = kunit_alloc_and_get_resource(
+			&ctx->test,
+			fake_resource_init,
+			fake_resource_free,
+			GFP_KERNEL,
+			ctx);
+
+	KUNIT_ASSERT_FALSE(test,
+			   kunit_resource_destroy(&ctx->test,
+						  kunit_resource_instance_match,
+						  res->free,
+						  res->allocation));
+
+	KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
+	KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_cleanup_resources(struct kunit *test)
+{
+	int i;
+	struct kunit_test_resource_context *ctx = test->priv;
+	struct kunit_resource *resources[5];
+
+	for (i = 0; i < ARRAY_SIZE(resources); i++) {
+		resources[i] = kunit_alloc_and_get_resource(&ctx->test,
+							    fake_resource_init,
+							    fake_resource_free,
+							    GFP_KERNEL,
+							    ctx);
+	}
+
+	kunit_cleanup(&ctx->test);
+
+	KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_mark_order(int order_array[],
+					   size_t order_size,
+					   int key)
+{
+	int i;
+
+	for (i = 0; i < order_size && order_array[i]; i++)
+		;
+
+	order_array[i] = key;
+}
+
+#define KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, order_field, key)		       \
+		kunit_resource_test_mark_order(ctx->order_field,	       \
+					       ARRAY_SIZE(ctx->order_field),   \
+					       key)
+
+static int fake_resource_2_init(struct kunit_resource *res, void *context)
+{
+	struct kunit_test_resource_context *ctx = context;
+
+	KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2);
+
+	res->allocation = ctx;
+
+	return 0;
+}
+
+static void fake_resource_2_free(struct kunit_resource *res)
+{
+	struct kunit_test_resource_context *ctx = res->allocation;
+
+	KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2);
+}
+
+static int fake_resource_1_init(struct kunit_resource *res, void *context)
+{
+	struct kunit_test_resource_context *ctx = context;
+
+	kunit_alloc_and_get_resource(&ctx->test,
+				     fake_resource_2_init,
+				     fake_resource_2_free,
+				     GFP_KERNEL,
+				     ctx);
+
+	KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1);
+
+	res->allocation = ctx;
+
+	return 0;
+}
+
+static void fake_resource_1_free(struct kunit_resource *res)
+{
+	struct kunit_test_resource_context *ctx = res->allocation;
+
+	KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1);
+}
+
+/*
+ * TODO(brendanhiggins@google.com): replace the arrays that keep track of the
+ * order of allocation and freeing with strict mocks using the IN_SEQUENCE macro
+ * to assert allocation and freeing order when the feature becomes available.
+ */
+static void kunit_resource_test_proper_free_ordering(struct kunit *test)
+{
+	struct kunit_test_resource_context *ctx = test->priv;
+
+	/* fake_resource_1 allocates a fake_resource_2 in its init. */
+	kunit_alloc_and_get_resource(&ctx->test,
+				     fake_resource_1_init,
+				     fake_resource_1_free,
+				     GFP_KERNEL,
+				     ctx);
+
+	/*
+	 * Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER
+	 * before returning to fake_resource_1_init, it should be the first to
+	 * put its key in the allocate_order array.
+	 */
+	KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2);
+	KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1);
+
+	kunit_cleanup(&ctx->test);
+
+	/*
+	 * Because fake_resource_2 finishes allocation before fake_resource_1,
+	 * fake_resource_1 should be freed first since it could depend on
+	 * fake_resource_2.
+	 */
+	KUNIT_EXPECT_EQ(test, ctx->free_order[0], 1);
+	KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
+}
+
+static int kunit_resource_test_init(struct kunit *test)
+{
+	struct kunit_test_resource_context *ctx =
+			kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+	test->priv = ctx;
+
+	kunit_init_test(&ctx->test, "test_test_context");
+
+	return 0;
+}
+
+static void kunit_resource_test_exit(struct kunit *test)
+{
+	struct kunit_test_resource_context *ctx = test->priv;
+
+	kunit_cleanup(&ctx->test);
+	kfree(ctx);
+}
+
+static struct kunit_case kunit_resource_test_cases[] = {
+	KUNIT_CASE(kunit_resource_test_init_resources),
+	KUNIT_CASE(kunit_resource_test_alloc_resource),
+	KUNIT_CASE(kunit_resource_test_destroy_resource),
+	KUNIT_CASE(kunit_resource_test_cleanup_resources),
+	KUNIT_CASE(kunit_resource_test_proper_free_ordering),
+	{}
+};
+
+static struct kunit_suite kunit_resource_test_suite = {
+	.name = "kunit-resource-test",
+	.init = kunit_resource_test_init,
+	.exit = kunit_resource_test_exit,
+	.test_cases = kunit_resource_test_cases,
+};
+kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 76cc05e..110f3a9 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -6,10 +6,11 @@
  * Author: Brendan Higgins <brendanhiggins@google.com>
  */
 
-#include <kunit/string-stream.h>
 #include <kunit/test.h>
 #include <linux/slab.h>
 
+#include "string-stream.h"
+
 static void string_stream_test_empty_on_creation(struct kunit *test)
 {
 	struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
@@ -49,4 +50,4 @@ static struct kunit_suite string_stream_test_suite = {
 	.name = "string-stream-test",
 	.test_cases = string_stream_test_cases
 };
-kunit_test_suite(string_stream_test_suite);
+kunit_test_suites(&string_stream_test_suite);
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index e6d17aa..3503920 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -6,11 +6,12 @@
  * Author: Brendan Higgins <brendanhiggins@google.com>
  */
 
-#include <kunit/string-stream.h>
 #include <kunit/test.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 
+#include "string-stream.h"
+
 struct string_stream_fragment_alloc_context {
 	struct kunit *test;
 	int len;
diff --git a/include/kunit/string-stream.h b/lib/kunit/string-stream.h
similarity index 100%
rename from include/kunit/string-stream.h
rename to lib/kunit/string-stream.h
diff --git a/lib/kunit/test-test.c b/lib/kunit/test-test.c
deleted file mode 100644
index 5ebe059..0000000
--- a/lib/kunit/test-test.c
+++ /dev/null
@@ -1,331 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * KUnit test for core test infrastructure.
- *
- * Copyright (C) 2019, Google LLC.
- * Author: Brendan Higgins <brendanhiggins@google.com>
- */
-#include <kunit/test.h>
-
-struct kunit_try_catch_test_context {
-	struct kunit_try_catch *try_catch;
-	bool function_called;
-};
-
-static void kunit_test_successful_try(void *data)
-{
-	struct kunit *test = data;
-	struct kunit_try_catch_test_context *ctx = test->priv;
-
-	ctx->function_called = true;
-}
-
-static void kunit_test_no_catch(void *data)
-{
-	struct kunit *test = data;
-
-	KUNIT_FAIL(test, "Catch should not be called\n");
-}
-
-static void kunit_test_try_catch_successful_try_no_catch(struct kunit *test)
-{
-	struct kunit_try_catch_test_context *ctx = test->priv;
-	struct kunit_try_catch *try_catch = ctx->try_catch;
-
-	kunit_try_catch_init(try_catch,
-			     test,
-			     kunit_test_successful_try,
-			     kunit_test_no_catch);
-	kunit_try_catch_run(try_catch, test);
-
-	KUNIT_EXPECT_TRUE(test, ctx->function_called);
-}
-
-static void kunit_test_unsuccessful_try(void *data)
-{
-	struct kunit *test = data;
-	struct kunit_try_catch_test_context *ctx = test->priv;
-	struct kunit_try_catch *try_catch = ctx->try_catch;
-
-	kunit_try_catch_throw(try_catch);
-	KUNIT_FAIL(test, "This line should never be reached\n");
-}
-
-static void kunit_test_catch(void *data)
-{
-	struct kunit *test = data;
-	struct kunit_try_catch_test_context *ctx = test->priv;
-
-	ctx->function_called = true;
-}
-
-static void kunit_test_try_catch_unsuccessful_try_does_catch(struct kunit *test)
-{
-	struct kunit_try_catch_test_context *ctx = test->priv;
-	struct kunit_try_catch *try_catch = ctx->try_catch;
-
-	kunit_try_catch_init(try_catch,
-			     test,
-			     kunit_test_unsuccessful_try,
-			     kunit_test_catch);
-	kunit_try_catch_run(try_catch, test);
-
-	KUNIT_EXPECT_TRUE(test, ctx->function_called);
-}
-
-static int kunit_try_catch_test_init(struct kunit *test)
-{
-	struct kunit_try_catch_test_context *ctx;
-
-	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-	test->priv = ctx;
-
-	ctx->try_catch = kunit_kmalloc(test,
-				       sizeof(*ctx->try_catch),
-				       GFP_KERNEL);
-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->try_catch);
-
-	return 0;
-}
-
-static struct kunit_case kunit_try_catch_test_cases[] = {
-	KUNIT_CASE(kunit_test_try_catch_successful_try_no_catch),
-	KUNIT_CASE(kunit_test_try_catch_unsuccessful_try_does_catch),
-	{}
-};
-
-static struct kunit_suite kunit_try_catch_test_suite = {
-	.name = "kunit-try-catch-test",
-	.init = kunit_try_catch_test_init,
-	.test_cases = kunit_try_catch_test_cases,
-};
-kunit_test_suite(kunit_try_catch_test_suite);
-
-/*
- * Context for testing test managed resources
- * is_resource_initialized is used to test arbitrary resources
- */
-struct kunit_test_resource_context {
-	struct kunit test;
-	bool is_resource_initialized;
-	int allocate_order[2];
-	int free_order[2];
-};
-
-static int fake_resource_init(struct kunit_resource *res, void *context)
-{
-	struct kunit_test_resource_context *ctx = context;
-
-	res->allocation = &ctx->is_resource_initialized;
-	ctx->is_resource_initialized = true;
-	return 0;
-}
-
-static void fake_resource_free(struct kunit_resource *res)
-{
-	bool *is_resource_initialized = res->allocation;
-
-	*is_resource_initialized = false;
-}
-
-static void kunit_resource_test_init_resources(struct kunit *test)
-{
-	struct kunit_test_resource_context *ctx = test->priv;
-
-	kunit_init_test(&ctx->test, "testing_test_init_test");
-
-	KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
-}
-
-static void kunit_resource_test_alloc_resource(struct kunit *test)
-{
-	struct kunit_test_resource_context *ctx = test->priv;
-	struct kunit_resource *res;
-	kunit_resource_free_t free = fake_resource_free;
-
-	res = kunit_alloc_and_get_resource(&ctx->test,
-					   fake_resource_init,
-					   fake_resource_free,
-					   GFP_KERNEL,
-					   ctx);
-
-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
-	KUNIT_EXPECT_PTR_EQ(test,
-			    &ctx->is_resource_initialized,
-			    (bool *) res->allocation);
-	KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
-	KUNIT_EXPECT_PTR_EQ(test, free, res->free);
-}
-
-static void kunit_resource_test_destroy_resource(struct kunit *test)
-{
-	struct kunit_test_resource_context *ctx = test->priv;
-	struct kunit_resource *res = kunit_alloc_and_get_resource(
-			&ctx->test,
-			fake_resource_init,
-			fake_resource_free,
-			GFP_KERNEL,
-			ctx);
-
-	KUNIT_ASSERT_FALSE(test,
-			   kunit_resource_destroy(&ctx->test,
-						  kunit_resource_instance_match,
-						  res->free,
-						  res->allocation));
-
-	KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
-	KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
-}
-
-static void kunit_resource_test_cleanup_resources(struct kunit *test)
-{
-	int i;
-	struct kunit_test_resource_context *ctx = test->priv;
-	struct kunit_resource *resources[5];
-
-	for (i = 0; i < ARRAY_SIZE(resources); i++) {
-		resources[i] = kunit_alloc_and_get_resource(&ctx->test,
-							    fake_resource_init,
-							    fake_resource_free,
-							    GFP_KERNEL,
-							    ctx);
-	}
-
-	kunit_cleanup(&ctx->test);
-
-	KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
-}
-
-static void kunit_resource_test_mark_order(int order_array[],
-					   size_t order_size,
-					   int key)
-{
-	int i;
-
-	for (i = 0; i < order_size && order_array[i]; i++)
-		;
-
-	order_array[i] = key;
-}
-
-#define KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, order_field, key)		       \
-		kunit_resource_test_mark_order(ctx->order_field,	       \
-					       ARRAY_SIZE(ctx->order_field),   \
-					       key)
-
-static int fake_resource_2_init(struct kunit_resource *res, void *context)
-{
-	struct kunit_test_resource_context *ctx = context;
-
-	KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2);
-
-	res->allocation = ctx;
-
-	return 0;
-}
-
-static void fake_resource_2_free(struct kunit_resource *res)
-{
-	struct kunit_test_resource_context *ctx = res->allocation;
-
-	KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2);
-}
-
-static int fake_resource_1_init(struct kunit_resource *res, void *context)
-{
-	struct kunit_test_resource_context *ctx = context;
-
-	kunit_alloc_and_get_resource(&ctx->test,
-				     fake_resource_2_init,
-				     fake_resource_2_free,
-				     GFP_KERNEL,
-				     ctx);
-
-	KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1);
-
-	res->allocation = ctx;
-
-	return 0;
-}
-
-static void fake_resource_1_free(struct kunit_resource *res)
-{
-	struct kunit_test_resource_context *ctx = res->allocation;
-
-	KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1);
-}
-
-/*
- * TODO(brendanhiggins@google.com): replace the arrays that keep track of the
- * order of allocation and freeing with strict mocks using the IN_SEQUENCE macro
- * to assert allocation and freeing order when the feature becomes available.
- */
-static void kunit_resource_test_proper_free_ordering(struct kunit *test)
-{
-	struct kunit_test_resource_context *ctx = test->priv;
-
-	/* fake_resource_1 allocates a fake_resource_2 in its init. */
-	kunit_alloc_and_get_resource(&ctx->test,
-				     fake_resource_1_init,
-				     fake_resource_1_free,
-				     GFP_KERNEL,
-				     ctx);
-
-	/*
-	 * Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER
-	 * before returning to fake_resource_1_init, it should be the first to
-	 * put its key in the allocate_order array.
-	 */
-	KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2);
-	KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1);
-
-	kunit_cleanup(&ctx->test);
-
-	/*
-	 * Because fake_resource_2 finishes allocation before fake_resource_1,
-	 * fake_resource_1 should be freed first since it could depend on
-	 * fake_resource_2.
-	 */
-	KUNIT_EXPECT_EQ(test, ctx->free_order[0], 1);
-	KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
-}
-
-static int kunit_resource_test_init(struct kunit *test)
-{
-	struct kunit_test_resource_context *ctx =
-			kzalloc(sizeof(*ctx), GFP_KERNEL);
-
-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
-	test->priv = ctx;
-
-	kunit_init_test(&ctx->test, "test_test_context");
-
-	return 0;
-}
-
-static void kunit_resource_test_exit(struct kunit *test)
-{
-	struct kunit_test_resource_context *ctx = test->priv;
-
-	kunit_cleanup(&ctx->test);
-	kfree(ctx);
-}
-
-static struct kunit_case kunit_resource_test_cases[] = {
-	KUNIT_CASE(kunit_resource_test_init_resources),
-	KUNIT_CASE(kunit_resource_test_alloc_resource),
-	KUNIT_CASE(kunit_resource_test_destroy_resource),
-	KUNIT_CASE(kunit_resource_test_cleanup_resources),
-	KUNIT_CASE(kunit_resource_test_proper_free_ordering),
-	{}
-};
-
-static struct kunit_suite kunit_resource_test_suite = {
-	.name = "kunit-resource-test",
-	.init = kunit_resource_test_init,
-	.exit = kunit_resource_test_exit,
-	.test_cases = kunit_resource_test_cases,
-};
-kunit_test_suite(kunit_resource_test_suite);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index c83c0fa..9242f93 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -7,10 +7,12 @@
  */
 
 #include <kunit/test.h>
-#include <kunit/try-catch.h>
 #include <linux/kernel.h>
 #include <linux/sched/debug.h>
 
+#include "string-stream.h"
+#include "try-catch-impl.h"
+
 static void kunit_set_failure(struct kunit *test)
 {
 	WRITE_ONCE(test->success, false);
@@ -171,6 +173,7 @@ void kunit_do_assertion(struct kunit *test,
 	if (assert->type == KUNIT_ASSERTION)
 		kunit_abort(test);
 }
+EXPORT_SYMBOL_GPL(kunit_do_assertion);
 
 void kunit_init_test(struct kunit *test, const char *name)
 {
@@ -179,6 +182,7 @@ void kunit_init_test(struct kunit *test, const char *name)
 	test->name = name;
 	test->success = true;
 }
+EXPORT_SYMBOL_GPL(kunit_init_test);
 
 /*
  * Initializes and runs test case. Does not clean up or do post validations.
@@ -317,6 +321,7 @@ int kunit_run_tests(struct kunit_suite *suite)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(kunit_run_tests);
 
 struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
 						    kunit_resource_init_t init,
@@ -342,6 +347,7 @@ struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
 
 	return res;
 }
+EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
 
 static void kunit_resource_free(struct kunit *test, struct kunit_resource *res)
 {
@@ -400,6 +406,7 @@ int kunit_resource_destroy(struct kunit *test,
 	kunit_resource_free(test, resource);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(kunit_resource_destroy);
 
 struct kunit_kmalloc_params {
 	size_t size;
@@ -435,6 +442,7 @@ void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
 				    gfp,
 				    &params);
 }
+EXPORT_SYMBOL_GPL(kunit_kmalloc);
 
 void kunit_kfree(struct kunit *test, const void *ptr)
 {
@@ -447,6 +455,7 @@ void kunit_kfree(struct kunit *test, const void *ptr)
 
 	WARN_ON(rc);
 }
+EXPORT_SYMBOL_GPL(kunit_kfree);
 
 void kunit_cleanup(struct kunit *test)
 {
@@ -476,3 +485,17 @@ void kunit_cleanup(struct kunit *test)
 		kunit_resource_free(test, resource);
 	}
 }
+EXPORT_SYMBOL_GPL(kunit_cleanup);
+
+static int __init kunit_init(void)
+{
+	return 0;
+}
+late_initcall(kunit_init);
+
+static void __exit kunit_exit(void)
+{
+}
+module_exit(kunit_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/try-catch-impl.h b/lib/kunit/try-catch-impl.h
new file mode 100644
index 0000000..203ba6a
--- /dev/null
+++ b/lib/kunit/try-catch-impl.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal kunit try catch implementation to be shared with tests.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_TRY_CATCH_IMPL_H
+#define _KUNIT_TRY_CATCH_IMPL_H
+
+#include <kunit/try-catch.h>
+#include <linux/types.h>
+
+struct kunit;
+
+static inline void kunit_try_catch_init(struct kunit_try_catch *try_catch,
+					struct kunit *test,
+					kunit_try_catch_func_t try,
+					kunit_try_catch_func_t catch)
+{
+	try_catch->test = test;
+	try_catch->try = try;
+	try_catch->catch = catch;
+}
+
+#endif /* _KUNIT_TRY_CATCH_IMPL_H */
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
index 55686839..0dd434e 100644
--- a/lib/kunit/try-catch.c
+++ b/lib/kunit/try-catch.c
@@ -8,17 +8,18 @@
  */
 
 #include <kunit/test.h>
-#include <kunit/try-catch.h>
 #include <linux/completion.h>
 #include <linux/kernel.h>
 #include <linux/kthread.h>
-#include <linux/sched/sysctl.h>
+
+#include "try-catch-impl.h"
 
 void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
 {
 	try_catch->try_result = -EFAULT;
 	complete_and_exit(try_catch->try_completion, -EFAULT);
 }
+EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
 
 static int kunit_generic_run_threadfn_adapter(void *data)
 {
@@ -31,8 +32,6 @@ static int kunit_generic_run_threadfn_adapter(void *data)
 
 static unsigned long kunit_test_timeout(void)
 {
-	unsigned long timeout_msecs;
-
 	/*
 	 * TODO(brendanhiggins@google.com): We should probably have some type of
 	 * variable timeout here. The only question is what that timeout value
@@ -49,22 +48,11 @@ static unsigned long kunit_test_timeout(void)
 	 *
 	 * For more background on this topic, see:
 	 * https://mike-bland.com/2011/11/01/small-medium-large.html
+	 *
+	 * If tests timeout due to exceeding sysctl_hung_task_timeout_secs,
+	 * the task will be killed and an oops generated.
 	 */
-	if (sysctl_hung_task_timeout_secs) {
-		/*
-		 * If sysctl_hung_task is active, just set the timeout to some
-		 * value less than that.
-		 *
-		 * In regards to the above TODO, if we decide on variable
-		 * timeouts, this logic will likely need to change.
-		 */
-		timeout_msecs = (sysctl_hung_task_timeout_secs - 1) *
-				MSEC_PER_SEC;
-	} else {
-		timeout_msecs = 300 * MSEC_PER_SEC; /* 5 min */
-	}
-
-	return timeout_msecs;
+	return 300 * MSEC_PER_SEC; /* 5 min */
 }
 
 void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
@@ -106,13 +94,4 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
 
 	try_catch->catch(try_catch->context);
 }
-
-void kunit_try_catch_init(struct kunit_try_catch *try_catch,
-			  struct kunit *test,
-			  kunit_try_catch_func_t try,
-			  kunit_try_catch_func_t catch)
-{
-	try_catch->test = test;
-	try_catch->try = try;
-	try_catch->catch = catch;
-}
+EXPORT_SYMBOL_GPL(kunit_try_catch_run);
diff --git a/lib/list-test.c b/lib/list-test.c
index 363c600..76babb1 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -743,4 +743,6 @@ static struct kunit_suite list_test_module = {
 	.test_cases = list_test_cases,
 };
 
-kunit_test_suite(list_test_module);
+kunit_test_suites(&list_test_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index c2cf2c3..5813072 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -311,7 +311,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
 			if (prv)
 				table->nents = ++table->orig_nents;
 
- 			return -ENOMEM;
+			return -ENOMEM;
 		}
 
 		sg_init_table(sg, alloc_size);
diff --git a/lib/string.c b/lib/string.c
index 08ec58c..f607b96 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -434,6 +434,23 @@ char *strchrnul(const char *s, int c)
 EXPORT_SYMBOL(strchrnul);
 #endif
 
+/**
+ * strnchrnul - Find and return a character in a length limited string,
+ * or end of string
+ * @s: The string to be searched
+ * @count: The number of characters to be searched
+ * @c: The character to search for
+ *
+ * Returns pointer to the first occurrence of 'c' in s. If c is not found,
+ * then return a pointer to the last character of the string.
+ */
+char *strnchrnul(const char *s, size_t count, int c)
+{
+	while (count-- && *s && *s != (char)c)
+		s++;
+	return (char *)s;
+}
+
 #ifndef __HAVE_ARCH_STRRCHR
 /**
  * strrchr - Find the last occurrence of a character in a string
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index e14a15a..61ed71c 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -275,26 +275,28 @@ static void __init test_copy(void)
 static void __init test_replace(void)
 {
 	unsigned int nbits = 64;
+	unsigned int nlongs = DIV_ROUND_UP(nbits, BITS_PER_LONG);
 	DECLARE_BITMAP(bmap, 1024);
 
 	bitmap_zero(bmap, 1024);
-	bitmap_replace(bmap, &exp2[0], &exp2[1], exp2_to_exp3_mask, nbits);
+	bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
 	expect_eq_bitmap(bmap, exp3_0_1, nbits);
 
 	bitmap_zero(bmap, 1024);
-	bitmap_replace(bmap, &exp2[1], &exp2[0], exp2_to_exp3_mask, nbits);
+	bitmap_replace(bmap, &exp2[1 * nlongs], &exp2[0 * nlongs], exp2_to_exp3_mask, nbits);
 	expect_eq_bitmap(bmap, exp3_1_0, nbits);
 
 	bitmap_fill(bmap, 1024);
-	bitmap_replace(bmap, &exp2[0], &exp2[1], exp2_to_exp3_mask, nbits);
+	bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
 	expect_eq_bitmap(bmap, exp3_0_1, nbits);
 
 	bitmap_fill(bmap, 1024);
-	bitmap_replace(bmap, &exp2[1], &exp2[0], exp2_to_exp3_mask, nbits);
+	bitmap_replace(bmap, &exp2[1 * nlongs], &exp2[0 * nlongs], exp2_to_exp3_mask, nbits);
 	expect_eq_bitmap(bmap, exp3_1_0, nbits);
 }
 
-#define PARSE_TIME 0x1
+#define PARSE_TIME	0x1
+#define NO_LEN		0x2
 
 struct test_bitmap_parselist{
 	const int errno;
@@ -348,7 +350,6 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
 	{-EINVAL, "0-31:a/1", NULL, 8, 0},
 	{-EINVAL, "0-\n", NULL, 8, 0},
 
-#undef step
 };
 
 static void __init __test_bitmap_parselist(int is_user)
@@ -400,6 +401,95 @@ static void __init __test_bitmap_parselist(int is_user)
 	}
 }
 
+static const unsigned long parse_test[] __initconst = {
+	BITMAP_FROM_U64(0),
+	BITMAP_FROM_U64(1),
+	BITMAP_FROM_U64(0xdeadbeef),
+	BITMAP_FROM_U64(0x100000000ULL),
+};
+
+static const unsigned long parse_test2[] __initconst = {
+	BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xdeadbeef),
+	BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xbaadf00ddeadbeef),
+	BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0x0badf00ddeadbeef),
+};
+
+static const struct test_bitmap_parselist parse_tests[] __initconst = {
+	{0, "",				&parse_test[0 * step], 32, 0},
+	{0, " ",			&parse_test[0 * step], 32, 0},
+	{0, "0",			&parse_test[0 * step], 32, 0},
+	{0, "0\n",			&parse_test[0 * step], 32, 0},
+	{0, "1",			&parse_test[1 * step], 32, 0},
+	{0, "deadbeef",			&parse_test[2 * step], 32, 0},
+	{0, "1,0",			&parse_test[3 * step], 33, 0},
+	{0, "deadbeef,\n,0,1",		&parse_test[2 * step], 96, 0},
+
+	{0, "deadbeef,1,0",		&parse_test2[0 * 2 * step], 96, 0},
+	{0, "baadf00d,deadbeef,1,0",	&parse_test2[1 * 2 * step], 128, 0},
+	{0, "badf00d,deadbeef,1,0",	&parse_test2[2 * 2 * step], 124, 0},
+	{0, "badf00d,deadbeef,1,0",	&parse_test2[2 * 2 * step], 124, NO_LEN},
+	{0, "  badf00d,deadbeef,1,0  ",	&parse_test2[2 * 2 * step], 124, 0},
+	{0, " , badf00d,deadbeef,1,0 , ",	&parse_test2[2 * 2 * step], 124, 0},
+	{0, " , badf00d, ,, ,,deadbeef,1,0 , ",	&parse_test2[2 * 2 * step], 124, 0},
+
+	{-EINVAL,    "goodfood,deadbeef,1,0",	NULL, 128, 0},
+	{-EOVERFLOW, "3,0",			NULL, 33, 0},
+	{-EOVERFLOW, "123badf00d,deadbeef,1,0",	NULL, 128, 0},
+	{-EOVERFLOW, "badf00d,deadbeef,1,0",	NULL, 90, 0},
+	{-EOVERFLOW, "fbadf00d,deadbeef,1,0",	NULL, 95, 0},
+	{-EOVERFLOW, "badf00d,deadbeef,1,0",	NULL, 100, 0},
+#undef step
+};
+
+static void __init __test_bitmap_parse(int is_user)
+{
+	int i;
+	int err;
+	ktime_t time;
+	DECLARE_BITMAP(bmap, 2048);
+	char *mode = is_user ? "_user"  : "";
+
+	for (i = 0; i < ARRAY_SIZE(parse_tests); i++) {
+		struct test_bitmap_parselist test = parse_tests[i];
+
+		if (is_user) {
+			size_t len = strlen(test.in);
+			mm_segment_t orig_fs = get_fs();
+
+			set_fs(KERNEL_DS);
+			time = ktime_get();
+			err = bitmap_parse_user((__force const char __user *)test.in, len,
+						bmap, test.nbits);
+			time = ktime_get() - time;
+			set_fs(orig_fs);
+		} else {
+			size_t len = test.flags & NO_LEN ?
+				UINT_MAX : strlen(test.in);
+			time = ktime_get();
+			err = bitmap_parse(test.in, len, bmap, test.nbits);
+			time = ktime_get() - time;
+		}
+
+		if (err != test.errno) {
+			pr_err("parse%s: %d: input is %s, errno is %d, expected %d\n",
+					mode, i, test.in, err, test.errno);
+			continue;
+		}
+
+		if (!err && test.expected
+			 && !__bitmap_equal(bmap, test.expected, test.nbits)) {
+			pr_err("parse%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+					mode, i, test.in, bmap[0],
+					*test.expected);
+			continue;
+		}
+
+		if (test.flags & PARSE_TIME)
+			pr_err("parse%s: %d: input is '%s' OK, Time: %llu\n",
+					mode, i, test.in, time);
+	}
+}
+
 static void __init test_bitmap_parselist(void)
 {
 	__test_bitmap_parselist(0);
@@ -410,6 +500,16 @@ static void __init test_bitmap_parselist_user(void)
 	__test_bitmap_parselist(1);
 }
 
+static void __init test_bitmap_parse(void)
+{
+	__test_bitmap_parse(0);
+}
+
+static void __init test_bitmap_parse_user(void)
+{
+	__test_bitmap_parse(1);
+}
+
 #define EXP1_IN_BITS	(sizeof(exp1) * 8)
 
 static void __init test_bitmap_arr32(void)
@@ -515,6 +615,8 @@ static void __init selftest(void)
 	test_copy();
 	test_replace();
 	test_bitmap_arr32();
+	test_bitmap_parse();
+	test_bitmap_parse_user();
 	test_bitmap_parselist();
 	test_bitmap_parselist_user();
 	test_mem_optimisations();
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 328d33b..3872d25 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -158,6 +158,7 @@ static noinline void __init kmalloc_oob_krealloc_more(void)
 	if (!ptr1 || !ptr2) {
 		pr_err("Allocation failed\n");
 		kfree(ptr1);
+		kfree(ptr2);
 		return;
 	}
 
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index d20ef45..8a878d0 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -52,16 +52,19 @@
 #include <linux/zutil.h>
 #include "defutil.h"
 
+/* architecture-specific bits */
+#ifdef CONFIG_ZLIB_DFLTCC
+#  include "../zlib_dfltcc/dfltcc.h"
+#else
+#define DEFLATE_RESET_HOOK(strm) do {} while (0)
+#define DEFLATE_HOOK(strm, flush, bstate) 0
+#define DEFLATE_NEED_CHECKSUM(strm) 1
+#define DEFLATE_DFLTCC_ENABLED() 0
+#endif
 
 /* ===========================================================================
  *  Function prototypes.
  */
-typedef enum {
-    need_more,      /* block not completed, need more input or more output */
-    block_done,     /* block flush performed */
-    finish_started, /* finish started, need only more output at next deflate */
-    finish_done     /* finish done, accept no more input or output */
-} block_state;
 
 typedef block_state (*compress_func) (deflate_state *s, int flush);
 /* Compression function. Returns the block state after the call. */
@@ -72,7 +75,6 @@ static block_state deflate_fast   (deflate_state *s, int flush);
 static block_state deflate_slow   (deflate_state *s, int flush);
 static void lm_init        (deflate_state *s);
 static void putShortMSB    (deflate_state *s, uInt b);
-static void flush_pending  (z_streamp strm);
 static int read_buf        (z_streamp strm, Byte *buf, unsigned size);
 static uInt longest_match  (deflate_state *s, IPos cur_match);
 
@@ -98,6 +100,25 @@ static  void check_match (deflate_state *s, IPos start, IPos match,
  * See deflate.c for comments about the MIN_MATCH+1.
  */
 
+/* Workspace to be allocated for deflate processing */
+typedef struct deflate_workspace {
+    /* State memory for the deflator */
+    deflate_state deflate_memory;
+#ifdef CONFIG_ZLIB_DFLTCC
+    /* State memory for s390 hardware deflate */
+    struct dfltcc_state dfltcc_memory;
+#endif
+    Byte *window_memory;
+    Pos *prev_memory;
+    Pos *head_memory;
+    char *overlay_memory;
+} deflate_workspace;
+
+#ifdef CONFIG_ZLIB_DFLTCC
+/* dfltcc_state must be doubleword aligned for DFLTCC call */
+static_assert(offsetof(struct deflate_workspace, dfltcc_memory) % 8 == 0);
+#endif
+
 /* Values for max_lazy_match, good_match and max_chain_length, depending on
  * the desired pack level (0..9). The values given below have been tuned to
  * exclude worst case performance for pathological files. Better values may be
@@ -207,7 +228,15 @@ int zlib_deflateInit2(
      */
     next = (char *) mem;
     next += sizeof(*mem);
+#ifdef CONFIG_ZLIB_DFLTCC
+    /*
+     *  DFLTCC requires the window to be page aligned.
+     *  Thus, we overallocate and take the aligned portion of the buffer.
+     */
+    mem->window_memory = (Byte *) PTR_ALIGN(next, PAGE_SIZE);
+#else
     mem->window_memory = (Byte *) next;
+#endif
     next += zlib_deflate_window_memsize(windowBits);
     mem->prev_memory = (Pos *) next;
     next += zlib_deflate_prev_memsize(windowBits);
@@ -277,6 +306,8 @@ int zlib_deflateReset(
     zlib_tr_init(s);
     lm_init(s);
 
+    DEFLATE_RESET_HOOK(strm);
+
     return Z_OK;
 }
 
@@ -294,35 +325,6 @@ static void putShortMSB(
     put_byte(s, (Byte)(b & 0xff));
 }   
 
-/* =========================================================================
- * Flush as much pending output as possible. All deflate() output goes
- * through this function so some applications may wish to modify it
- * to avoid allocating a large strm->next_out buffer and copying into it.
- * (See also read_buf()).
- */
-static void flush_pending(
-	z_streamp strm
-)
-{
-    deflate_state *s = (deflate_state *) strm->state;
-    unsigned len = s->pending;
-
-    if (len > strm->avail_out) len = strm->avail_out;
-    if (len == 0) return;
-
-    if (strm->next_out != NULL) {
-	memcpy(strm->next_out, s->pending_out, len);
-	strm->next_out += len;
-    }
-    s->pending_out += len;
-    strm->total_out += len;
-    strm->avail_out  -= len;
-    s->pending -= len;
-    if (s->pending == 0) {
-        s->pending_out = s->pending_buf;
-    }
-}
-
 /* ========================================================================= */
 int zlib_deflate(
 	z_streamp strm,
@@ -404,7 +406,8 @@ int zlib_deflate(
         (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
         block_state bstate;
 
-	bstate = (*(configuration_table[s->level].func))(s, flush);
+	bstate = DEFLATE_HOOK(strm, flush, &bstate) ? bstate :
+		 (*(configuration_table[s->level].func))(s, flush);
 
         if (bstate == finish_started || bstate == finish_done) {
             s->status = FINISH_STATE;
@@ -503,7 +506,8 @@ static int read_buf(
 
     strm->avail_in  -= len;
 
-    if (!((deflate_state *)(strm->state))->noheader) {
+    if (!DEFLATE_NEED_CHECKSUM(strm)) {}
+    else if (!((deflate_state *)(strm->state))->noheader) {
         strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
     }
     memcpy(buf, strm->next_in, len);
@@ -1135,3 +1139,8 @@ int zlib_deflate_workspacesize(int windowBits, int memLevel)
         + zlib_deflate_head_memsize(memLevel)
         + zlib_deflate_overlay_memsize(memLevel);
 }
+
+int zlib_deflate_dfltcc_enabled(void)
+{
+	return DEFLATE_DFLTCC_ENABLED();
+}
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
index 72fe4b7..24b740b 100644
--- a/lib/zlib_deflate/deflate_syms.c
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -12,6 +12,7 @@
 #include <linux/zlib.h>
 
 EXPORT_SYMBOL(zlib_deflate_workspacesize);
+EXPORT_SYMBOL(zlib_deflate_dfltcc_enabled);
 EXPORT_SYMBOL(zlib_deflate);
 EXPORT_SYMBOL(zlib_deflateInit2);
 EXPORT_SYMBOL(zlib_deflateEnd);
diff --git a/lib/zlib_deflate/deftree.c b/lib/zlib_deflate/deftree.c
index 9b1756b..a4a34da 100644
--- a/lib/zlib_deflate/deftree.c
+++ b/lib/zlib_deflate/deftree.c
@@ -76,11 +76,6 @@ static const uch bl_order[BL_CODES]
  * probability, to avoid transmitting the lengths for unused bit length codes.
  */
 
-#define Buf_size (8 * 2*sizeof(char))
-/* Number of bits used within bi_buf. (bi_buf might be implemented on
- * more than 16 bits on some systems.)
- */
-
 /* ===========================================================================
  * Local data. These are initialized only once.
  */
@@ -147,7 +142,6 @@ static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
 static void compress_block (deflate_state *s, ct_data *ltree,
                            ct_data *dtree);
 static void set_data_type  (deflate_state *s);
-static void bi_windup      (deflate_state *s);
 static void bi_flush       (deflate_state *s);
 static void copy_block     (deflate_state *s, char *buf, unsigned len,
                            int header);
@@ -170,54 +164,6 @@ static void copy_block     (deflate_state *s, char *buf, unsigned len,
  */
 
 /* ===========================================================================
- * Send a value on a given number of bits.
- * IN assertion: length <= 16 and value fits in length bits.
- */
-#ifdef DEBUG_ZLIB
-static void send_bits      (deflate_state *s, int value, int length);
-
-static void send_bits(
-	deflate_state *s,
-	int value,  /* value to send */
-	int length  /* number of bits */
-)
-{
-    Tracevv((stderr," l %2d v %4x ", length, value));
-    Assert(length > 0 && length <= 15, "invalid length");
-    s->bits_sent += (ulg)length;
-
-    /* If not enough room in bi_buf, use (valid) bits from bi_buf and
-     * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
-     * unused bits in value.
-     */
-    if (s->bi_valid > (int)Buf_size - length) {
-        s->bi_buf |= (value << s->bi_valid);
-        put_short(s, s->bi_buf);
-        s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
-        s->bi_valid += length - Buf_size;
-    } else {
-        s->bi_buf |= value << s->bi_valid;
-        s->bi_valid += length;
-    }
-}
-#else /* !DEBUG_ZLIB */
-
-#define send_bits(s, value, length) \
-{ int len = length;\
-  if (s->bi_valid > (int)Buf_size - len) {\
-    int val = value;\
-    s->bi_buf |= (val << s->bi_valid);\
-    put_short(s, s->bi_buf);\
-    s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
-    s->bi_valid += len - Buf_size;\
-  } else {\
-    s->bi_buf |= (value) << s->bi_valid;\
-    s->bi_valid += len;\
-  }\
-}
-#endif /* DEBUG_ZLIB */
-
-/* ===========================================================================
  * Initialize the various 'constant' tables. In a multi-threaded environment,
  * this function may be called by two threads concurrently, but this is
  * harmless since both invocations do exactly the same thing.
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
index a8c3708..385333b 100644
--- a/lib/zlib_deflate/defutil.h
+++ b/lib/zlib_deflate/defutil.h
@@ -1,5 +1,7 @@
+#ifndef DEFUTIL_H
+#define DEFUTIL_H
 
-
+#include <linux/zutil.h>
 
 #define Assert(err, str) 
 #define Trace(dummy) 
@@ -238,17 +240,13 @@ typedef struct deflate_state {
 
 } deflate_state;
 
-typedef struct deflate_workspace {
-    /* State memory for the deflator */
-    deflate_state deflate_memory;
-    Byte *window_memory;
-    Pos *prev_memory;
-    Pos *head_memory;
-    char *overlay_memory;
-} deflate_workspace;
-
+#ifdef CONFIG_ZLIB_DFLTCC
+#define zlib_deflate_window_memsize(windowBits) \
+	(2 * (1 << (windowBits)) * sizeof(Byte) + PAGE_SIZE)
+#else
 #define zlib_deflate_window_memsize(windowBits) \
 	(2 * (1 << (windowBits)) * sizeof(Byte))
+#endif
 #define zlib_deflate_prev_memsize(windowBits) \
 	((1 << (windowBits)) * sizeof(Pos))
 #define zlib_deflate_head_memsize(memLevel) \
@@ -293,6 +291,24 @@ void zlib_tr_stored_type_only (deflate_state *);
 }
 
 /* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+static inline unsigned  bi_reverse(
+    unsigned code, /* the value to invert */
+    int len        /* its bit length */
+)
+{
+    register unsigned res = 0;
+    do {
+        res |= code & 1;
+        code >>= 1, res <<= 1;
+    } while (--len > 0);
+    return res >> 1;
+}
+
+/* ===========================================================================
  * Flush the bit buffer, keeping at most 7 bits in it.
  */
 static inline void bi_flush(deflate_state *s)
@@ -325,3 +341,101 @@ static inline void bi_windup(deflate_state *s)
 #endif
 }
 
+typedef enum {
+    need_more,      /* block not completed, need more input or more output */
+    block_done,     /* block flush performed */
+    finish_started, /* finish started, need only more output at next deflate */
+    finish_done     /* finish done, accept no more input or output */
+} block_state;
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG_ZLIB
+static void send_bits      (deflate_state *s, int value, int length);
+
+static void send_bits(
+    deflate_state *s,
+    int value,  /* value to send */
+    int length  /* number of bits */
+)
+{
+    Tracevv((stderr," l %2d v %4x ", length, value));
+    Assert(length > 0 && length <= 15, "invalid length");
+    s->bits_sent += (ulg)length;
+
+    /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+     * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+     * unused bits in value.
+     */
+    if (s->bi_valid > (int)Buf_size - length) {
+        s->bi_buf |= (value << s->bi_valid);
+        put_short(s, s->bi_buf);
+        s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+        s->bi_valid += length - Buf_size;
+    } else {
+        s->bi_buf |= value << s->bi_valid;
+        s->bi_valid += length;
+    }
+}
+#else /* !DEBUG_ZLIB */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+  if (s->bi_valid > (int)Buf_size - len) {\
+    int val = value;\
+    s->bi_buf |= (val << s->bi_valid);\
+    put_short(s, s->bi_buf);\
+    s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+    s->bi_valid += len - Buf_size;\
+  } else {\
+    s->bi_buf |= (value) << s->bi_valid;\
+    s->bi_valid += len;\
+  }\
+}
+#endif /* DEBUG_ZLIB */
+
+static inline void zlib_tr_send_bits(
+    deflate_state *s,
+    int value,
+    int length
+)
+{
+    send_bits(s, value, length);
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+static inline void flush_pending(
+	z_streamp strm
+)
+{
+    deflate_state *s = (deflate_state *) strm->state;
+    unsigned len = s->pending;
+
+    if (len > strm->avail_out) len = strm->avail_out;
+    if (len == 0) return;
+
+    if (strm->next_out != NULL) {
+	memcpy(strm->next_out, s->pending_out, len);
+	strm->next_out += len;
+    }
+    s->pending_out += len;
+    strm->total_out += len;
+    strm->avail_out  -= len;
+    s->pending -= len;
+    if (s->pending == 0) {
+        s->pending_out = s->pending_buf;
+    }
+}
+#endif /* DEFUTIL_H */
diff --git a/lib/zlib_dfltcc/Makefile b/lib/zlib_dfltcc/Makefile
new file mode 100644
index 0000000..8e4d5af
--- /dev/null
+++ b/lib/zlib_dfltcc/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is the code for s390 zlib hardware support.
+#
+
+obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
+
+zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o
diff --git a/lib/zlib_dfltcc/dfltcc.c b/lib/zlib_dfltcc/dfltcc.c
new file mode 100644
index 0000000..c30de43
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: Zlib
+/* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
+
+#include <linux/zutil.h>
+#include "dfltcc_util.h"
+#include "dfltcc.h"
+
+char *oesc_msg(
+    char *buf,
+    int oesc
+)
+{
+    if (oesc == 0x00)
+        return NULL; /* Successful completion */
+    else {
+#ifdef STATIC
+        return NULL; /* Ignore for pre-boot decompressor */
+#else
+        sprintf(buf, "Operation-Ending-Supplemental Code is 0x%.2X", oesc);
+        return buf;
+#endif
+    }
+}
+
+void dfltcc_reset(
+    z_streamp strm,
+    uInt size
+)
+{
+    struct dfltcc_state *dfltcc_state =
+        (struct dfltcc_state *)((char *)strm->state + size);
+    struct dfltcc_qaf_param *param =
+        (struct dfltcc_qaf_param *)&dfltcc_state->param;
+
+    /* Initialize available functions */
+    if (is_dfltcc_enabled()) {
+        dfltcc(DFLTCC_QAF, param, NULL, NULL, NULL, NULL, NULL);
+        memmove(&dfltcc_state->af, param, sizeof(dfltcc_state->af));
+    } else
+        memset(&dfltcc_state->af, 0, sizeof(dfltcc_state->af));
+
+    /* Initialize parameter block */
+    memset(&dfltcc_state->param, 0, sizeof(dfltcc_state->param));
+    dfltcc_state->param.nt = 1;
+
+    /* Initialize tuning parameters */
+    if (zlib_dfltcc_support == ZLIB_DFLTCC_FULL_DEBUG)
+        dfltcc_state->level_mask = DFLTCC_LEVEL_MASK_DEBUG;
+    else
+        dfltcc_state->level_mask = DFLTCC_LEVEL_MASK;
+    dfltcc_state->block_size = DFLTCC_BLOCK_SIZE;
+    dfltcc_state->block_threshold = DFLTCC_FIRST_FHT_BLOCK_SIZE;
+    dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
+    dfltcc_state->param.ribm = DFLTCC_RIBM;
+}
diff --git a/lib/zlib_dfltcc/dfltcc.h b/lib/zlib_dfltcc/dfltcc.h
new file mode 100644
index 0000000..2a2fac1
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc.h
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_H
+#define DFLTCC_H
+
+#include "../zlib_deflate/defutil.h"
+#include <asm/facility.h>
+#include <asm/setup.h>
+
+/*
+ * Tuning parameters.
+ */
+#define DFLTCC_LEVEL_MASK 0x2 /* DFLTCC compression for level 1 only */
+#define DFLTCC_LEVEL_MASK_DEBUG 0x3fe /* DFLTCC compression for all levels */
+#define DFLTCC_BLOCK_SIZE 1048576
+#define DFLTCC_FIRST_FHT_BLOCK_SIZE 4096
+#define DFLTCC_DHT_MIN_SAMPLE_SIZE 4096
+#define DFLTCC_RIBM 0
+
+#define DFLTCC_FACILITY 151
+
+/*
+ * Parameter Block for Query Available Functions.
+ */
+struct dfltcc_qaf_param {
+    char fns[16];
+    char reserved1[8];
+    char fmts[2];
+    char reserved2[6];
+};
+
+static_assert(sizeof(struct dfltcc_qaf_param) == 32);
+
+#define DFLTCC_FMT0 0
+
+/*
+ * Parameter Block for Generate Dynamic-Huffman Table, Compress and Expand.
+ */
+struct dfltcc_param_v0 {
+    uint16_t pbvn;                     /* Parameter-Block-Version Number */
+    uint8_t mvn;                       /* Model-Version Number */
+    uint8_t ribm;                      /* Reserved for IBM use */
+    unsigned reserved32 : 31;
+    unsigned cf : 1;                   /* Continuation Flag */
+    uint8_t reserved64[8];
+    unsigned nt : 1;                   /* New Task */
+    unsigned reserved129 : 1;
+    unsigned cvt : 1;                  /* Check Value Type */
+    unsigned reserved131 : 1;
+    unsigned htt : 1;                  /* Huffman-Table Type */
+    unsigned bcf : 1;                  /* Block-Continuation Flag */
+    unsigned bcc : 1;                  /* Block Closing Control */
+    unsigned bhf : 1;                  /* Block Header Final */
+    unsigned reserved136 : 1;
+    unsigned reserved137 : 1;
+    unsigned dhtgc : 1;                /* DHT Generation Control */
+    unsigned reserved139 : 5;
+    unsigned reserved144 : 5;
+    unsigned sbb : 3;                  /* Sub-Byte Boundary */
+    uint8_t oesc;                      /* Operation-Ending-Supplemental Code */
+    unsigned reserved160 : 12;
+    unsigned ifs : 4;                  /* Incomplete-Function Status */
+    uint16_t ifl;                      /* Incomplete-Function Length */
+    uint8_t reserved192[8];
+    uint8_t reserved256[8];
+    uint8_t reserved320[4];
+    uint16_t hl;                       /* History Length */
+    unsigned reserved368 : 1;
+    uint16_t ho : 15;                  /* History Offset */
+    uint32_t cv;                       /* Check Value */
+    unsigned eobs : 15;                /* End-of-block Symbol */
+    unsigned reserved431: 1;
+    uint8_t eobl : 4;                  /* End-of-block Length */
+    unsigned reserved436 : 12;
+    unsigned reserved448 : 4;
+    uint16_t cdhtl : 12;               /* Compressed-Dynamic-Huffman Table
+                                          Length */
+    uint8_t reserved464[6];
+    uint8_t cdht[288];
+    uint8_t reserved[32];
+    uint8_t csb[1152];
+};
+
+static_assert(sizeof(struct dfltcc_param_v0) == 1536);
+
+#define CVT_CRC32 0
+#define CVT_ADLER32 1
+#define HTT_FIXED 0
+#define HTT_DYNAMIC 1
+
+/*
+ *  Extension of inflate_state and deflate_state for DFLTCC.
+ */
+struct dfltcc_state {
+    struct dfltcc_param_v0 param;      /* Parameter block */
+    struct dfltcc_qaf_param af;        /* Available functions */
+    uLong level_mask;                  /* Levels on which to use DFLTCC */
+    uLong block_size;                  /* New block each X bytes */
+    uLong block_threshold;             /* New block after total_in > X */
+    uLong dht_threshold;               /* New block only if avail_in >= X */
+    char msg[64];                      /* Buffer for strm->msg */
+};
+
+/* Resides right after inflate_state or deflate_state */
+#define GET_DFLTCC_STATE(state) ((struct dfltcc_state *)((state) + 1))
+
+/* External functions */
+int dfltcc_can_deflate(z_streamp strm);
+int dfltcc_deflate(z_streamp strm,
+                   int flush,
+                   block_state *result);
+void dfltcc_reset(z_streamp strm, uInt size);
+int dfltcc_can_inflate(z_streamp strm);
+typedef enum {
+    DFLTCC_INFLATE_CONTINUE,
+    DFLTCC_INFLATE_BREAK,
+    DFLTCC_INFLATE_SOFTWARE,
+} dfltcc_inflate_action;
+dfltcc_inflate_action dfltcc_inflate(z_streamp strm,
+                                     int flush, int *ret);
+static inline int is_dfltcc_enabled(void)
+{
+return (zlib_dfltcc_support != ZLIB_DFLTCC_DISABLED &&
+        test_facility(DFLTCC_FACILITY));
+}
+
+#define DEFLATE_RESET_HOOK(strm) \
+    dfltcc_reset((strm), sizeof(deflate_state))
+
+#define DEFLATE_HOOK dfltcc_deflate
+
+#define DEFLATE_NEED_CHECKSUM(strm) (!dfltcc_can_deflate((strm)))
+
+#define DEFLATE_DFLTCC_ENABLED() is_dfltcc_enabled()
+
+#define INFLATE_RESET_HOOK(strm) \
+    dfltcc_reset((strm), sizeof(struct inflate_state))
+
+#define INFLATE_TYPEDO_HOOK(strm, flush) \
+    if (dfltcc_can_inflate((strm))) { \
+        dfltcc_inflate_action action; \
+\
+        RESTORE(); \
+        action = dfltcc_inflate((strm), (flush), &ret); \
+        LOAD(); \
+        if (action == DFLTCC_INFLATE_CONTINUE) \
+            break; \
+        else if (action == DFLTCC_INFLATE_BREAK) \
+            goto inf_leave; \
+    }
+
+#define INFLATE_NEED_CHECKSUM(strm) (!dfltcc_can_inflate((strm)))
+
+#define INFLATE_NEED_UPDATEWINDOW(strm) (!dfltcc_can_inflate((strm)))
+
+#endif /* DFLTCC_H */
diff --git a/lib/zlib_dfltcc/dfltcc_deflate.c b/lib/zlib_dfltcc/dfltcc_deflate.c
new file mode 100644
index 0000000..00c1851
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_deflate.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: Zlib
+
+#include "../zlib_deflate/defutil.h"
+#include "dfltcc_util.h"
+#include "dfltcc.h"
+#include <asm/setup.h>
+#include <linux/zutil.h>
+
+/*
+ * Compress.
+ */
+int dfltcc_can_deflate(
+    z_streamp strm
+)
+{
+    deflate_state *state = (deflate_state *)strm->state;
+    struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+    /* Check for kernel dfltcc command line parameter */
+    if (zlib_dfltcc_support == ZLIB_DFLTCC_DISABLED ||
+            zlib_dfltcc_support == ZLIB_DFLTCC_INFLATE_ONLY)
+        return 0;
+
+    /* Unsupported compression settings */
+    if (!dfltcc_are_params_ok(state->level, state->w_bits, state->strategy,
+                              dfltcc_state->level_mask))
+        return 0;
+
+    /* Unsupported hardware */
+    if (!is_bit_set(dfltcc_state->af.fns, DFLTCC_GDHT) ||
+            !is_bit_set(dfltcc_state->af.fns, DFLTCC_CMPR) ||
+            !is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0))
+        return 0;
+
+    return 1;
+}
+
+static void dfltcc_gdht(
+    z_streamp strm
+)
+{
+    deflate_state *state = (deflate_state *)strm->state;
+    struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+    size_t avail_in = avail_in = strm->avail_in;
+
+    dfltcc(DFLTCC_GDHT,
+           param, NULL, NULL,
+           &strm->next_in, &avail_in, NULL);
+}
+
+static dfltcc_cc dfltcc_cmpr(
+    z_streamp strm
+)
+{
+    deflate_state *state = (deflate_state *)strm->state;
+    struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+    size_t avail_in = strm->avail_in;
+    size_t avail_out = strm->avail_out;
+    dfltcc_cc cc;
+
+    cc = dfltcc(DFLTCC_CMPR | HBT_CIRCULAR,
+                param, &strm->next_out, &avail_out,
+                &strm->next_in, &avail_in, state->window);
+    strm->total_in += (strm->avail_in - avail_in);
+    strm->total_out += (strm->avail_out - avail_out);
+    strm->avail_in = avail_in;
+    strm->avail_out = avail_out;
+    return cc;
+}
+
+static void send_eobs(
+    z_streamp strm,
+    const struct dfltcc_param_v0 *param
+)
+{
+    deflate_state *state = (deflate_state *)strm->state;
+
+    zlib_tr_send_bits(
+          state,
+          bi_reverse(param->eobs >> (15 - param->eobl), param->eobl),
+          param->eobl);
+    flush_pending(strm);
+    if (state->pending != 0) {
+        /* The remaining data is located in pending_out[0:pending]. If someone
+         * calls put_byte() - this might happen in deflate() - the byte will be
+         * placed into pending_buf[pending], which is incorrect. Move the
+         * remaining data to the beginning of pending_buf so that put_byte() is
+         * usable again.
+         */
+        memmove(state->pending_buf, state->pending_out, state->pending);
+        state->pending_out = state->pending_buf;
+    }
+#ifdef ZLIB_DEBUG
+    state->compressed_len += param->eobl;
+#endif
+}
+
+int dfltcc_deflate(
+    z_streamp strm,
+    int flush,
+    block_state *result
+)
+{
+    deflate_state *state = (deflate_state *)strm->state;
+    struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+    struct dfltcc_param_v0 *param = &dfltcc_state->param;
+    uInt masked_avail_in;
+    dfltcc_cc cc;
+    int need_empty_block;
+    int soft_bcc;
+    int no_flush;
+
+    if (!dfltcc_can_deflate(strm))
+        return 0;
+
+again:
+    masked_avail_in = 0;
+    soft_bcc = 0;
+    no_flush = flush == Z_NO_FLUSH;
+
+    /* Trailing empty block. Switch to software, except when Continuation Flag
+     * is set, which means that DFLTCC has buffered some output in the
+     * parameter block and needs to be called again in order to flush it.
+     */
+    if (flush == Z_FINISH && strm->avail_in == 0 && !param->cf) {
+        if (param->bcf) {
+            /* A block is still open, and the hardware does not support closing
+             * blocks without adding data. Thus, close it manually.
+             */
+            send_eobs(strm, param);
+            param->bcf = 0;
+        }
+        return 0;
+    }
+
+    if (strm->avail_in == 0 && !param->cf) {
+        *result = need_more;
+        return 1;
+    }
+
+    /* There is an open non-BFINAL block, we are not going to close it just
+     * yet, we have compressed more than DFLTCC_BLOCK_SIZE bytes and we see
+     * more than DFLTCC_DHT_MIN_SAMPLE_SIZE bytes. Open a new block with a new
+     * DHT in order to adapt to a possibly changed input data distribution.
+     */
+    if (param->bcf && no_flush &&
+            strm->total_in > dfltcc_state->block_threshold &&
+            strm->avail_in >= dfltcc_state->dht_threshold) {
+        if (param->cf) {
+            /* We need to flush the DFLTCC buffer before writing the
+             * End-of-block Symbol. Mask the input data and proceed as usual.
+             */
+            masked_avail_in += strm->avail_in;
+            strm->avail_in = 0;
+            no_flush = 0;
+        } else {
+            /* DFLTCC buffer is empty, so we can manually write the
+             * End-of-block Symbol right away.
+             */
+            send_eobs(strm, param);
+            param->bcf = 0;
+            dfltcc_state->block_threshold =
+                strm->total_in + dfltcc_state->block_size;
+            if (strm->avail_out == 0) {
+                *result = need_more;
+                return 1;
+            }
+        }
+    }
+
+    /* The caller gave us too much data. Pass only one block worth of
+     * uncompressed data to DFLTCC and mask the rest, so that on the next
+     * iteration we start a new block.
+     */
+    if (no_flush && strm->avail_in > dfltcc_state->block_size) {
+        masked_avail_in += (strm->avail_in - dfltcc_state->block_size);
+        strm->avail_in = dfltcc_state->block_size;
+    }
+
+    /* When we have an open non-BFINAL deflate block and caller indicates that
+     * the stream is ending, we need to close an open deflate block and open a
+     * BFINAL one.
+     */
+    need_empty_block = flush == Z_FINISH && param->bcf && !param->bhf;
+
+    /* Translate stream to parameter block */
+    param->cvt = CVT_ADLER32;
+    if (!no_flush)
+        /* We need to close a block. Always do this in software - when there is
+         * no input data, the hardware will not nohor BCC. */
+        soft_bcc = 1;
+    if (flush == Z_FINISH && !param->bcf)
+        /* We are about to open a BFINAL block, set Block Header Final bit
+         * until the stream ends.
+         */
+        param->bhf = 1;
+    /* DFLTCC-CMPR will write to next_out, so make sure that buffers with
+     * higher precedence are empty.
+     */
+    Assert(state->pending == 0, "There must be no pending bytes");
+    Assert(state->bi_valid < 8, "There must be less than 8 pending bits");
+    param->sbb = (unsigned int)state->bi_valid;
+    if (param->sbb > 0)
+        *strm->next_out = (Byte)state->bi_buf;
+    if (param->hl)
+        param->nt = 0; /* Honor history */
+    param->cv = strm->adler;
+
+    /* When opening a block, choose a Huffman-Table Type */
+    if (!param->bcf) {
+        if (strm->total_in == 0 && dfltcc_state->block_threshold > 0) {
+            param->htt = HTT_FIXED;
+        }
+        else {
+            param->htt = HTT_DYNAMIC;
+            dfltcc_gdht(strm);
+        }
+    }
+
+    /* Deflate */
+    do {
+        cc = dfltcc_cmpr(strm);
+        if (strm->avail_in < 4096 && masked_avail_in > 0)
+            /* We are about to call DFLTCC with a small input buffer, which is
+             * inefficient. Since there is masked data, there will be at least
+             * one more DFLTCC call, so skip the current one and make the next
+             * one handle more data.
+             */
+            break;
+    } while (cc == DFLTCC_CC_AGAIN);
+
+    /* Translate parameter block to stream */
+    strm->msg = oesc_msg(dfltcc_state->msg, param->oesc);
+    state->bi_valid = param->sbb;
+    if (state->bi_valid == 0)
+        state->bi_buf = 0; /* Avoid accessing next_out */
+    else
+        state->bi_buf = *strm->next_out & ((1 << state->bi_valid) - 1);
+    strm->adler = param->cv;
+
+    /* Unmask the input data */
+    strm->avail_in += masked_avail_in;
+    masked_avail_in = 0;
+
+    /* If we encounter an error, it means there is a bug in DFLTCC call */
+    Assert(cc != DFLTCC_CC_OP2_CORRUPT || param->oesc == 0, "BUG");
+
+    /* Update Block-Continuation Flag. It will be used to check whether to call
+     * GDHT the next time.
+     */
+    if (cc == DFLTCC_CC_OK) {
+        if (soft_bcc) {
+            send_eobs(strm, param);
+            param->bcf = 0;
+            dfltcc_state->block_threshold =
+                strm->total_in + dfltcc_state->block_size;
+        } else
+            param->bcf = 1;
+        if (flush == Z_FINISH) {
+            if (need_empty_block)
+                /* Make the current deflate() call also close the stream */
+                return 0;
+            else {
+                bi_windup(state);
+                *result = finish_done;
+            }
+        } else {
+            if (flush == Z_FULL_FLUSH)
+                param->hl = 0; /* Clear history */
+            *result = flush == Z_NO_FLUSH ? need_more : block_done;
+        }
+    } else {
+        param->bcf = 1;
+        *result = need_more;
+    }
+    if (strm->avail_in != 0 && strm->avail_out != 0)
+        goto again; /* deflate() must use all input or all output */
+    return 1;
+}
diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
new file mode 100644
index 0000000..aa9ef23
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_inflate.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: Zlib
+
+#include "../zlib_inflate/inflate.h"
+#include "dfltcc_util.h"
+#include "dfltcc.h"
+#include <asm/setup.h>
+#include <linux/zutil.h>
+
+/*
+ * Expand.
+ */
+int dfltcc_can_inflate(
+    z_streamp strm
+)
+{
+    struct inflate_state *state = (struct inflate_state *)strm->state;
+    struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+    /* Check for kernel dfltcc command line parameter */
+    if (zlib_dfltcc_support == ZLIB_DFLTCC_DISABLED ||
+            zlib_dfltcc_support == ZLIB_DFLTCC_DEFLATE_ONLY)
+        return 0;
+
+    /* Unsupported compression settings */
+    if (state->wbits != HB_BITS)
+        return 0;
+
+    /* Unsupported hardware */
+    return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
+               is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
+}
+
+static int dfltcc_was_inflate_used(
+    z_streamp strm
+)
+{
+    struct inflate_state *state = (struct inflate_state *)strm->state;
+    struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+
+    return !param->nt;
+}
+
+static int dfltcc_inflate_disable(
+    z_streamp strm
+)
+{
+    struct inflate_state *state = (struct inflate_state *)strm->state;
+    struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+    if (!dfltcc_can_inflate(strm))
+        return 0;
+    if (dfltcc_was_inflate_used(strm))
+        /* DFLTCC has already decompressed some data. Since there is not
+         * enough information to resume decompression in software, the call
+         * must fail.
+         */
+        return 1;
+    /* DFLTCC was not used yet - decompress in software */
+    memset(&dfltcc_state->af, 0, sizeof(dfltcc_state->af));
+    return 0;
+}
+
+static dfltcc_cc dfltcc_xpnd(
+    z_streamp strm
+)
+{
+    struct inflate_state *state = (struct inflate_state *)strm->state;
+    struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+    size_t avail_in = strm->avail_in;
+    size_t avail_out = strm->avail_out;
+    dfltcc_cc cc;
+
+    cc = dfltcc(DFLTCC_XPND | HBT_CIRCULAR,
+                param, &strm->next_out, &avail_out,
+                &strm->next_in, &avail_in, state->window);
+    strm->avail_in = avail_in;
+    strm->avail_out = avail_out;
+    return cc;
+}
+
+dfltcc_inflate_action dfltcc_inflate(
+    z_streamp strm,
+    int flush,
+    int *ret
+)
+{
+    struct inflate_state *state = (struct inflate_state *)strm->state;
+    struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+    struct dfltcc_param_v0 *param = &dfltcc_state->param;
+    dfltcc_cc cc;
+
+    if (flush == Z_BLOCK) {
+        /* DFLTCC does not support stopping on block boundaries */
+        if (dfltcc_inflate_disable(strm)) {
+            *ret = Z_STREAM_ERROR;
+            return DFLTCC_INFLATE_BREAK;
+        } else
+            return DFLTCC_INFLATE_SOFTWARE;
+    }
+
+    if (state->last) {
+        if (state->bits != 0) {
+            strm->next_in++;
+            strm->avail_in--;
+            state->bits = 0;
+        }
+        state->mode = CHECK;
+        return DFLTCC_INFLATE_CONTINUE;
+    }
+
+    if (strm->avail_in == 0 && !param->cf)
+        return DFLTCC_INFLATE_BREAK;
+
+    if (!state->window || state->wsize == 0) {
+        state->mode = MEM;
+        return DFLTCC_INFLATE_CONTINUE;
+    }
+
+    /* Translate stream to parameter block */
+    param->cvt = CVT_ADLER32;
+    param->sbb = state->bits;
+    param->hl = state->whave; /* Software and hardware history formats match */
+    param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1);
+    if (param->hl)
+        param->nt = 0; /* Honor history for the first block */
+    param->cv = state->flags ? REVERSE(state->check) : state->check;
+
+    /* Inflate */
+    do {
+        cc = dfltcc_xpnd(strm);
+    } while (cc == DFLTCC_CC_AGAIN);
+
+    /* Translate parameter block to stream */
+    strm->msg = oesc_msg(dfltcc_state->msg, param->oesc);
+    state->last = cc == DFLTCC_CC_OK;
+    state->bits = param->sbb;
+    state->whave = param->hl;
+    state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1);
+    state->check = state->flags ? REVERSE(param->cv) : param->cv;
+    if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
+        /* Report an error if stream is corrupted */
+        state->mode = BAD;
+        return DFLTCC_INFLATE_CONTINUE;
+    }
+    state->mode = TYPEDO;
+    /* Break if operands are exhausted, otherwise continue looping */
+    return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
+        DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
+}
diff --git a/lib/zlib_dfltcc/dfltcc_syms.c b/lib/zlib_dfltcc/dfltcc_syms.c
new file mode 100644
index 0000000..6f23481
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_syms.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/lib/zlib_dfltcc/dfltcc_syms.c
+ *
+ * Exported symbols for the s390 zlib dfltcc support.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/zlib.h>
+#include "dfltcc.h"
+
+EXPORT_SYMBOL(dfltcc_can_deflate);
+EXPORT_SYMBOL(dfltcc_deflate);
+EXPORT_SYMBOL(dfltcc_reset);
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_dfltcc/dfltcc_util.h b/lib/zlib_dfltcc/dfltcc_util.h
new file mode 100644
index 0000000..4a46b50
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_util.h
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_UTIL_H
+#define DFLTCC_UTIL_H
+
+#include <linux/zutil.h>
+
+/*
+ * C wrapper for the DEFLATE CONVERSION CALL instruction.
+ */
+typedef enum {
+    DFLTCC_CC_OK = 0,
+    DFLTCC_CC_OP1_TOO_SHORT = 1,
+    DFLTCC_CC_OP2_TOO_SHORT = 2,
+    DFLTCC_CC_OP2_CORRUPT = 2,
+    DFLTCC_CC_AGAIN = 3,
+} dfltcc_cc;
+
+#define DFLTCC_QAF 0
+#define DFLTCC_GDHT 1
+#define DFLTCC_CMPR 2
+#define DFLTCC_XPND 4
+#define HBT_CIRCULAR (1 << 7)
+#define HB_BITS 15
+#define HB_SIZE (1 << HB_BITS)
+
+static inline dfltcc_cc dfltcc(
+    int fn,
+    void *param,
+    Byte **op1,
+    size_t *len1,
+    const Byte **op2,
+    size_t *len2,
+    void *hist
+)
+{
+    Byte *t2 = op1 ? *op1 : NULL;
+    size_t t3 = len1 ? *len1 : 0;
+    const Byte *t4 = op2 ? *op2 : NULL;
+    size_t t5 = len2 ? *len2 : 0;
+    register int r0 __asm__("r0") = fn;
+    register void *r1 __asm__("r1") = param;
+    register Byte *r2 __asm__("r2") = t2;
+    register size_t r3 __asm__("r3") = t3;
+    register const Byte *r4 __asm__("r4") = t4;
+    register size_t r5 __asm__("r5") = t5;
+    int cc;
+
+    __asm__ volatile(
+                     ".insn rrf,0xb9390000,%[r2],%[r4],%[hist],0\n"
+                     "ipm %[cc]\n"
+                     : [r2] "+r" (r2)
+                     , [r3] "+r" (r3)
+                     , [r4] "+r" (r4)
+                     , [r5] "+r" (r5)
+                     , [cc] "=r" (cc)
+                     : [r0] "r" (r0)
+                     , [r1] "r" (r1)
+                     , [hist] "r" (hist)
+                     : "cc", "memory");
+    t2 = r2; t3 = r3; t4 = r4; t5 = r5;
+
+    if (op1)
+        *op1 = t2;
+    if (len1)
+        *len1 = t3;
+    if (op2)
+        *op2 = t4;
+    if (len2)
+        *len2 = t5;
+    return (cc >> 28) & 3;
+}
+
+static inline int is_bit_set(
+    const char *bits,
+    int n
+)
+{
+    return bits[n / 8] & (1 << (7 - (n % 8)));
+}
+
+static inline void turn_bit_off(
+    char *bits,
+    int n
+)
+{
+    bits[n / 8] &= ~(1 << (7 - (n % 8)));
+}
+
+static inline int dfltcc_are_params_ok(
+    int level,
+    uInt window_bits,
+    int strategy,
+    uLong level_mask
+)
+{
+    return (level_mask & (1 << level)) != 0 &&
+        (window_bits == HB_BITS) &&
+        (strategy == Z_DEFAULT_STRATEGY);
+}
+
+char *oesc_msg(char *buf, int oesc);
+
+#endif /* DFLTCC_UTIL_H */
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 48f14cd..67cc9b0 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -15,6 +15,16 @@
 #include "inffast.h"
 #include "infutil.h"
 
+/* architecture-specific bits */
+#ifdef CONFIG_ZLIB_DFLTCC
+#  include "../zlib_dfltcc/dfltcc.h"
+#else
+#define INFLATE_RESET_HOOK(strm) do {} while (0)
+#define INFLATE_TYPEDO_HOOK(strm, flush) do {} while (0)
+#define INFLATE_NEED_UPDATEWINDOW(strm) 1
+#define INFLATE_NEED_CHECKSUM(strm) 1
+#endif
+
 int zlib_inflate_workspacesize(void)
 {
     return sizeof(struct inflate_workspace);
@@ -42,6 +52,7 @@ int zlib_inflateReset(z_streamp strm)
     state->write = 0;
     state->whave = 0;
 
+    INFLATE_RESET_HOOK(strm);
     return Z_OK;
 }
 
@@ -66,7 +77,15 @@ int zlib_inflateInit2(z_streamp strm, int windowBits)
         return Z_STREAM_ERROR;
     }
     state->wbits = (unsigned)windowBits;
+#ifdef CONFIG_ZLIB_DFLTCC
+    /*
+     * DFLTCC requires the window to be page aligned.
+     * Thus, we overallocate and take the aligned portion of the buffer.
+     */
+    state->window = PTR_ALIGN(&WS(strm)->working_window[0], PAGE_SIZE);
+#else
     state->window = &WS(strm)->working_window[0];
+#endif
 
     return zlib_inflateReset(strm);
 }
@@ -227,11 +246,6 @@ static int zlib_inflateSyncPacket(z_streamp strm)
         bits -= bits & 7; \
     } while (0)
 
-/* Reverse the bytes in a 32-bit value */
-#define REVERSE(q) \
-    ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
-     (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
-
 /*
    inflate() uses a state machine to process as much input data and generate as
    much output data as possible before returning.  The state machine is
@@ -395,6 +409,7 @@ int zlib_inflate(z_streamp strm, int flush)
             if (flush == Z_BLOCK) goto inf_leave;
 	    /* fall through */
         case TYPEDO:
+            INFLATE_TYPEDO_HOOK(strm, flush);
             if (state->last) {
                 BYTEBITS();
                 state->mode = CHECK;
@@ -692,7 +707,7 @@ int zlib_inflate(z_streamp strm, int flush)
                 out -= left;
                 strm->total_out += out;
                 state->total += out;
-                if (out)
+                if (INFLATE_NEED_CHECKSUM(strm) && out)
                     strm->adler = state->check =
                         UPDATE(state->check, put - out, out);
                 out = left;
@@ -726,7 +741,8 @@ int zlib_inflate(z_streamp strm, int flush)
      */
   inf_leave:
     RESTORE();
-    if (state->wsize || (state->mode < CHECK && out != strm->avail_out))
+    if (INFLATE_NEED_UPDATEWINDOW(strm) &&
+            (state->wsize || (state->mode < CHECK && out != strm->avail_out)))
         zlib_updatewindow(strm, out);
 
     in -= strm->avail_in;
@@ -734,7 +750,7 @@ int zlib_inflate(z_streamp strm, int flush)
     strm->total_in += in;
     strm->total_out += out;
     state->total += out;
-    if (state->wrap && out)
+    if (INFLATE_NEED_CHECKSUM(strm) && state->wrap && out)
         strm->adler = state->check =
             UPDATE(state->check, strm->next_out - out, out);
 
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h
index 3d17b3d..f79337d 100644
--- a/lib/zlib_inflate/inflate.h
+++ b/lib/zlib_inflate/inflate.h
@@ -11,6 +11,8 @@
    subject to change. Applications should only use zlib.h.
  */
 
+#include "inftrees.h"
+
 /* Possible inflate modes between inflate() calls */
 typedef enum {
     HEAD,       /* i: waiting for magic header */
@@ -108,4 +110,10 @@ struct inflate_state {
     unsigned short work[288];   /* work area for code table building */
     code codes[ENOUGH];         /* space for code tables */
 };
+
+/* Reverse the bytes in a 32-bit value */
+#define REVERSE(q) \
+    ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
+     (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
+
 #endif
diff --git a/lib/zlib_inflate/infutil.h b/lib/zlib_inflate/infutil.h
index eb1a900..784ab33 100644
--- a/lib/zlib_inflate/infutil.h
+++ b/lib/zlib_inflate/infutil.h
@@ -12,14 +12,28 @@
 #define _INFUTIL_H
 
 #include <linux/zlib.h>
+#ifdef CONFIG_ZLIB_DFLTCC
+#include "../zlib_dfltcc/dfltcc.h"
+#include <asm/page.h>
+#endif
 
 /* memory allocation for inflation */
 
 struct inflate_workspace {
 	struct inflate_state inflate_state;
-	unsigned char working_window[1 << MAX_WBITS];
+#ifdef CONFIG_ZLIB_DFLTCC
+	struct dfltcc_state dfltcc_state;
+	unsigned char working_window[(1 << MAX_WBITS) + PAGE_SIZE];
+#else
+	unsigned char working_window[(1 << MAX_WBITS)];
+#endif
 };
 
-#define WS(z) ((struct inflate_workspace *)(z->workspace))
+#ifdef CONFIG_ZLIB_DFLTCC
+/* dfltcc_state must be doubleword aligned for DFLTCC call */
+static_assert(offsetof(struct inflate_workspace, dfltcc_state) % 8 == 0);
+#endif
+
+#define WS(strm) ((struct inflate_workspace *)(strm->workspace))
 
 #endif
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 327b3eb..0271b22 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -117,3 +117,24 @@
     depends on STRICT_KERNEL_RWX
     ---help---
       This option enables a testcase for the setting rodata read-only.
+
+config GENERIC_PTDUMP
+	bool
+
+config PTDUMP_CORE
+	bool
+
+config PTDUMP_DEBUGFS
+	bool "Export kernel pagetable layout to userspace via debugfs"
+	depends on DEBUG_KERNEL
+	depends on DEBUG_FS
+	depends on GENERIC_PTDUMP
+	select PTDUMP_CORE
+	help
+	  Say Y here if you want to show the kernel pagetable layout in a
+	  debugfs file. This information is only useful for kernel developers
+	  who are working in architecture specific areas of the kernel.
+	  It is probably not a good idea to enable this feature in a production
+	  kernel.
+
+	  If in doubt, say N.
diff --git a/mm/Makefile b/mm/Makefile
index 1937cc2..272e660 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -20,6 +20,7 @@
 KCOV_INSTRUMENT_memcontrol.o := n
 KCOV_INSTRUMENT_mmzone.o := n
 KCOV_INSTRUMENT_vmstat.o := n
+KCOV_INSTRUMENT_failslab.o := n
 
 CFLAGS_init-mm.o += $(call cc-disable-warning, override-init)
 CFLAGS_init-mm.o += $(call cc-disable-warning, initializer-overrides)
@@ -108,3 +109,4 @@
 obj-$(CONFIG_HMM_MIRROR) += hmm.o
 obj-$(CONFIG_MEMFD_CREATE) += memfd.o
 obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
+obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index c360f6a..62f05f6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -21,6 +21,7 @@ struct backing_dev_info noop_backing_dev_info = {
 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
 
 static struct class *bdi_class;
+const char *bdi_unknown_name = "(unknown)";
 
 /*
  * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
diff --git a/mm/debug.c b/mm/debug.c
index 0461df1..ecccd9f 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -46,7 +46,15 @@ void __dump_page(struct page *page, const char *reason)
 {
 	struct address_space *mapping;
 	bool page_poisoned = PagePoisoned(page);
+	/*
+	 * Accessing the pageblock without the zone lock. It could change to
+	 * "isolate" again in the meantime, but since we are just dumping the
+	 * state for debugging, it should be fine to accept a bit of
+	 * inaccuracy here due to racing.
+	 */
+	bool page_cma = is_migrate_cma_page(page);
 	int mapcount;
+	char *type = "";
 
 	/*
 	 * If struct page is poisoned don't access Page*() functions as that
@@ -78,9 +86,9 @@ void __dump_page(struct page *page, const char *reason)
 			page, page_ref_count(page), mapcount,
 			page->mapping, page_to_pgoff(page));
 	if (PageKsm(page))
-		pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags);
+		type = "ksm ";
 	else if (PageAnon(page))
-		pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags);
+		type = "anon ";
 	else if (mapping) {
 		if (mapping->host && mapping->host->i_dentry.first) {
 			struct dentry *dentry;
@@ -88,10 +96,12 @@ void __dump_page(struct page *page, const char *reason)
 			pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
 		} else
 			pr_warn("%ps\n", mapping->a_ops);
-		pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
 	}
 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
 
+	pr_warn("%sflags: %#lx(%pGp)%s\n", type, page->flags, &page->flags,
+		page_cma ? " CMA" : "");
+
 hex_only:
 	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
 			sizeof(unsigned long), page,
@@ -153,7 +163,7 @@ void dump_mm(const struct mm_struct *mm)
 #endif
 		"exe_file %px\n"
 #ifdef CONFIG_MMU_NOTIFIER
-		"mmu_notifier_mm %px\n"
+		"notifier_subscriptions %px\n"
 #endif
 #ifdef CONFIG_NUMA_BALANCING
 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
@@ -185,7 +195,7 @@ void dump_mm(const struct mm_struct *mm)
 #endif
 		mm->exe_file,
 #ifdef CONFIG_MMU_NOTIFIER
-		mm->mmu_notifier_mm,
+		mm->notifier_subscriptions,
 #endif
 #ifdef CONFIG_NUMA_BALANCING
 		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index 1826f19..a0018ad 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -121,8 +121,8 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
 		}
 	}
 
-	if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n",
-		 __func__, (u64)phys_addr, size))
+	if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
+		 __func__, &phys_addr, size))
 		return NULL;
 
 	/* Don't allow wraparound or zero size */
@@ -158,8 +158,8 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
 		--idx;
 		--nrpages;
 	}
-	WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
-	     __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
+	WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
+	     __func__, &phys_addr, size, slot, offset, slot_virt[slot]);
 
 	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
 	return prev_map[slot];
diff --git a/mm/filemap.c b/mm/filemap.c
index bf6aa30..1784478 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -632,33 +632,6 @@ static bool mapping_needs_writeback(struct address_space *mapping)
 	return mapping->nrpages;
 }
 
-int filemap_write_and_wait(struct address_space *mapping)
-{
-	int err = 0;
-
-	if (mapping_needs_writeback(mapping)) {
-		err = filemap_fdatawrite(mapping);
-		/*
-		 * Even if the above returned error, the pages may be
-		 * written partially (e.g. -ENOSPC), so we wait for it.
-		 * But the -EIO is special case, it may indicate the worst
-		 * thing (e.g. bug) happened, so we avoid waiting for it.
-		 */
-		if (err != -EIO) {
-			int err2 = filemap_fdatawait(mapping);
-			if (!err)
-				err = err2;
-		} else {
-			/* Clear any previously stored errors */
-			filemap_check_errors(mapping);
-		}
-	} else {
-		err = filemap_check_errors(mapping);
-	}
-	return err;
-}
-EXPORT_SYMBOL(filemap_write_and_wait);
-
 /**
  * filemap_write_and_wait_range - write out & wait on a file range
  * @mapping:	the address_space for the pages
@@ -680,7 +653,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
 	if (mapping_needs_writeback(mapping)) {
 		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 						 WB_SYNC_ALL);
-		/* See comment of filemap_write_and_wait() */
+		/*
+		 * Even if the above returned error, the pages may be
+		 * written partially (e.g. -ENOSPC), so we wait for it.
+		 * But the -EIO is special case, it may indicate the worst
+		 * thing (e.g. bug) happened, so we avoid waiting for it.
+		 */
 		if (err != -EIO) {
 			int err2 = filemap_fdatawait_range(mapping,
 						lstart, lend);
diff --git a/mm/gup.c b/mm/gup.c
index 7646bf9..1b521e0 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -29,8 +29,23 @@ struct follow_page_context {
 	unsigned int page_mask;
 };
 
+/*
+ * Return the compound head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+	struct page *head = compound_head(page);
+
+	if (WARN_ON_ONCE(page_ref_count(head) < 0))
+		return NULL;
+	if (unlikely(!page_cache_add_speculative(head, refs)))
+		return NULL;
+	return head;
+}
+
 /**
- * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
+ * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
  * @pages:  array of pages to be maybe marked dirty, and definitely released.
  * @npages: number of pages in the @pages array.
  * @make_dirty: whether to mark the pages dirty
@@ -40,19 +55,19 @@ struct follow_page_context {
  *
  * For each page in the @pages array, make that page (or its head page, if a
  * compound page) dirty, if @make_dirty is true, and if the page was previously
- * listed as clean. In any case, releases all pages using put_user_page(),
- * possibly via put_user_pages(), for the non-dirty case.
+ * listed as clean. In any case, releases all pages using unpin_user_page(),
+ * possibly via unpin_user_pages(), for the non-dirty case.
  *
- * Please see the put_user_page() documentation for details.
+ * Please see the unpin_user_page() documentation for details.
  *
  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
  * required, then the caller should a) verify that this is really correct,
  * because _lock() is usually required, and b) hand code it:
- * set_page_dirty_lock(), put_user_page().
+ * set_page_dirty_lock(), unpin_user_page().
  *
  */
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
-			       bool make_dirty)
+void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+				 bool make_dirty)
 {
 	unsigned long index;
 
@@ -63,7 +78,7 @@ void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
 	 */
 
 	if (!make_dirty) {
-		put_user_pages(pages, npages);
+		unpin_user_pages(pages, npages);
 		return;
 	}
 
@@ -91,21 +106,21 @@ void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
 		 */
 		if (!PageDirty(page))
 			set_page_dirty_lock(page);
-		put_user_page(page);
+		unpin_user_page(page);
 	}
 }
-EXPORT_SYMBOL(put_user_pages_dirty_lock);
+EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
 
 /**
- * put_user_pages() - release an array of gup-pinned pages.
+ * unpin_user_pages() - release an array of gup-pinned pages.
  * @pages:  array of pages to be marked dirty and released.
  * @npages: number of pages in the @pages array.
  *
- * For each page in the @pages array, release the page using put_user_page().
+ * For each page in the @pages array, release the page using unpin_user_page().
  *
- * Please see the put_user_page() documentation for details.
+ * Please see the unpin_user_page() documentation for details.
  */
-void put_user_pages(struct page **pages, unsigned long npages)
+void unpin_user_pages(struct page **pages, unsigned long npages)
 {
 	unsigned long index;
 
@@ -115,9 +130,9 @@ void put_user_pages(struct page **pages, unsigned long npages)
 	 * single operation to the head page should suffice.
 	 */
 	for (index = 0; index < npages; index++)
-		put_user_page(pages[index]);
+		unpin_user_page(pages[index]);
 }
-EXPORT_SYMBOL(put_user_pages);
+EXPORT_SYMBOL(unpin_user_pages);
 
 #ifdef CONFIG_MMU
 static struct page *no_page_table(struct vm_area_struct *vma,
@@ -179,6 +194,10 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 	spinlock_t *ptl;
 	pte_t *ptep, pte;
 
+	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
+	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+			 (FOLL_PIN | FOLL_GET)))
+		return ERR_PTR(-EINVAL);
 retry:
 	if (unlikely(pmd_bad(*pmd)))
 		return no_page_table(vma, flags);
@@ -323,7 +342,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 	pmdval = READ_ONCE(*pmd);
 	if (pmd_none(pmdval))
 		return no_page_table(vma, flags);
-	if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
+	if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
 		page = follow_huge_pmd(mm, address, pmd, flags);
 		if (page)
 			return page;
@@ -433,7 +452,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
 	pud = pud_offset(p4dp, address);
 	if (pud_none(*pud))
 		return no_page_table(vma, flags);
-	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
+	if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
 		page = follow_huge_pud(mm, address, pud, flags);
 		if (page)
 			return page;
@@ -796,7 +815,7 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 
 	start = untagged_addr(start);
 
-	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
 
 	/*
 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
@@ -1020,7 +1039,16 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 		BUG_ON(*locked != 1);
 	}
 
-	if (pages)
+	/*
+	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
+	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
+	 * carelessly failed to specify FOLL_GET), so keep doing that, but only
+	 * for FOLL_GET, not for the newer FOLL_PIN.
+	 *
+	 * FOLL_PIN always expects pages to be non-null, but no need to assert
+	 * that here, as any failures will be obvious enough.
+	 */
+	if (pages && !(flags & FOLL_PIN))
 		flags |= FOLL_GET;
 
 	pages_done = 0;
@@ -1096,88 +1124,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 	return pages_done;
 }
 
-/*
- * get_user_pages_remote() - pin user pages in memory
- * @tsk:	the task_struct to use for page fault accounting, or
- *		NULL if faults are not to be recorded.
- * @mm:		mm_struct of target mm
- * @start:	starting user address
- * @nr_pages:	number of pages from start to pin
- * @gup_flags:	flags modifying lookup behaviour
- * @pages:	array that receives pointers to the pages pinned.
- *		Should be at least nr_pages long. Or NULL, if caller
- *		only intends to ensure the pages are faulted in.
- * @vmas:	array of pointers to vmas corresponding to each page.
- *		Or NULL if the caller does not require them.
- * @locked:	pointer to lock flag indicating whether lock is held and
- *		subsequently whether VM_FAULT_RETRY functionality can be
- *		utilised. Lock must initially be held.
- *
- * Returns either number of pages pinned (which may be less than the
- * number requested), or an error. Details about the return value:
- *
- * -- If nr_pages is 0, returns 0.
- * -- If nr_pages is >0, but no pages were pinned, returns -errno.
- * -- If nr_pages is >0, and some pages were pinned, returns the number of
- *    pages pinned. Again, this may be less than nr_pages.
- *
- * The caller is responsible for releasing returned @pages, via put_page().
- *
- * @vmas are valid only as long as mmap_sem is held.
- *
- * Must be called with mmap_sem held for read or write.
- *
- * get_user_pages walks a process's page tables and takes a reference to
- * each struct page that each user address corresponds to at a given
- * instant. That is, it takes the page that would be accessed if a user
- * thread accesses the given user virtual address at that instant.
- *
- * This does not guarantee that the page exists in the user mappings when
- * get_user_pages returns, and there may even be a completely different
- * page there in some cases (eg. if mmapped pagecache has been invalidated
- * and subsequently re faulted). However it does guarantee that the page
- * won't be freed completely. And mostly callers simply care that the page
- * contains data that was valid *at some point in time*. Typically, an IO
- * or similar operation cannot guarantee anything stronger anyway because
- * locks can't be held over the syscall boundary.
- *
- * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
- * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
- * be called after the page is finished with, and before put_page is called.
- *
- * get_user_pages is typically used for fewer-copy IO operations, to get a
- * handle on the memory by some means other than accesses via the user virtual
- * addresses. The pages may be submitted for DMA to devices or accessed via
- * their kernel linear mapping (via the kmap APIs). Care should be taken to
- * use the correct cache flushing APIs.
- *
- * See also get_user_pages_fast, for performance critical applications.
- *
- * get_user_pages should be phased out in favor of
- * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
- * should use get_user_pages because it cannot pass
- * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
- */
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long start, unsigned long nr_pages,
-		unsigned int gup_flags, struct page **pages,
-		struct vm_area_struct **vmas, int *locked)
-{
-	/*
-	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
-	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
-	 * vmas.  As there are no users of this flag in this call we simply
-	 * disallow this option for now.
-	 */
-	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
-		return -EINVAL;
-
-	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
-				       locked,
-				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
-}
-EXPORT_SYMBOL(get_user_pages_remote);
-
 /**
  * populate_vma_page_range() -  populate a range of pages in the vma.
  * @vma:   target vma
@@ -1612,6 +1558,116 @@ static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
 #endif /* CONFIG_FS_DAX || CONFIG_CMA */
 
 /*
+ * get_user_pages_remote() - pin user pages in memory
+ * @tsk:	the task_struct to use for page fault accounting, or
+ *		NULL if faults are not to be recorded.
+ * @mm:		mm_struct of target mm
+ * @start:	starting user address
+ * @nr_pages:	number of pages from start to pin
+ * @gup_flags:	flags modifying lookup behaviour
+ * @pages:	array that receives pointers to the pages pinned.
+ *		Should be at least nr_pages long. Or NULL, if caller
+ *		only intends to ensure the pages are faulted in.
+ * @vmas:	array of pointers to vmas corresponding to each page.
+ *		Or NULL if the caller does not require them.
+ * @locked:	pointer to lock flag indicating whether lock is held and
+ *		subsequently whether VM_FAULT_RETRY functionality can be
+ *		utilised. Lock must initially be held.
+ *
+ * Returns either number of pages pinned (which may be less than the
+ * number requested), or an error. Details about the return value:
+ *
+ * -- If nr_pages is 0, returns 0.
+ * -- If nr_pages is >0, but no pages were pinned, returns -errno.
+ * -- If nr_pages is >0, and some pages were pinned, returns the number of
+ *    pages pinned. Again, this may be less than nr_pages.
+ *
+ * The caller is responsible for releasing returned @pages, via put_page().
+ *
+ * @vmas are valid only as long as mmap_sem is held.
+ *
+ * Must be called with mmap_sem held for read or write.
+ *
+ * get_user_pages walks a process's page tables and takes a reference to
+ * each struct page that each user address corresponds to at a given
+ * instant. That is, it takes the page that would be accessed if a user
+ * thread accesses the given user virtual address at that instant.
+ *
+ * This does not guarantee that the page exists in the user mappings when
+ * get_user_pages returns, and there may even be a completely different
+ * page there in some cases (eg. if mmapped pagecache has been invalidated
+ * and subsequently re faulted). However it does guarantee that the page
+ * won't be freed completely. And mostly callers simply care that the page
+ * contains data that was valid *at some point in time*. Typically, an IO
+ * or similar operation cannot guarantee anything stronger anyway because
+ * locks can't be held over the syscall boundary.
+ *
+ * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
+ * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
+ * be called after the page is finished with, and before put_page is called.
+ *
+ * get_user_pages is typically used for fewer-copy IO operations, to get a
+ * handle on the memory by some means other than accesses via the user virtual
+ * addresses. The pages may be submitted for DMA to devices or accessed via
+ * their kernel linear mapping (via the kmap APIs). Care should be taken to
+ * use the correct cache flushing APIs.
+ *
+ * See also get_user_pages_fast, for performance critical applications.
+ *
+ * get_user_pages should be phased out in favor of
+ * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
+ * should use get_user_pages because it cannot pass
+ * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
+ */
+#ifdef CONFIG_MMU
+long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+		unsigned long start, unsigned long nr_pages,
+		unsigned int gup_flags, struct page **pages,
+		struct vm_area_struct **vmas, int *locked)
+{
+	/*
+	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
+	 * never directly by the caller, so enforce that with an assertion:
+	 */
+	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
+		return -EINVAL;
+
+	/*
+	 * Parts of FOLL_LONGTERM behavior are incompatible with
+	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
+	 * vmas. However, this only comes up if locked is set, and there are
+	 * callers that do request FOLL_LONGTERM, but do not set locked. So,
+	 * allow what we can.
+	 */
+	if (gup_flags & FOLL_LONGTERM) {
+		if (WARN_ON_ONCE(locked))
+			return -EINVAL;
+		/*
+		 * This will check the vmas (even if our vmas arg is NULL)
+		 * and return -ENOTSUPP if DAX isn't allowed in this case:
+		 */
+		return __gup_longterm_locked(tsk, mm, start, nr_pages, pages,
+					     vmas, gup_flags | FOLL_TOUCH |
+					     FOLL_REMOTE);
+	}
+
+	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+				       locked,
+				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
+}
+EXPORT_SYMBOL(get_user_pages_remote);
+
+#else /* CONFIG_MMU */
+long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+			   unsigned long start, unsigned long nr_pages,
+			   unsigned int gup_flags, struct page **pages,
+			   struct vm_area_struct **vmas, int *locked)
+{
+	return 0;
+}
+#endif /* !CONFIG_MMU */
+
+/*
  * This is the same as get_user_pages_remote(), just with a
  * less-flexible calling convention where we assume that the task
  * and mm being operated on are the current task's and don't allow
@@ -1622,6 +1678,13 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
 		unsigned int gup_flags, struct page **pages,
 		struct vm_area_struct **vmas)
 {
+	/*
+	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
+	 * never directly by the caller, so enforce that with an assertion:
+	 */
+	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
+		return -EINVAL;
+
 	return __gup_longterm_locked(current, current->mm, start, nr_pages,
 				     pages, vmas, gup_flags | FOLL_TOUCH);
 }
@@ -1729,7 +1792,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  * Before activating this code, please be aware that the following assumptions
  * are currently made:
  *
- *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
+ *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
  *  free pages containing page tables or TLB flushing requires IPI broadcast.
  *
  *  *) ptes can be read atomically by the architecture.
@@ -1807,20 +1870,6 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
 	}
 }
 
-/*
- * Return the compund head page with ref appropriately incremented,
- * or NULL if that failed.
- */
-static inline struct page *try_get_compound_head(struct page *page, int refs)
-{
-	struct page *head = compound_head(page);
-	if (WARN_ON_ONCE(page_ref_count(head) < 0))
-		return NULL;
-	if (unlikely(!page_cache_add_speculative(head, refs)))
-		return NULL;
-	return head;
-}
-
 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 			 unsigned int flags, struct page **pages, int *nr)
@@ -1978,6 +2027,29 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
 }
 #endif
 
+static int record_subpages(struct page *page, unsigned long addr,
+			   unsigned long end, struct page **pages)
+{
+	int nr;
+
+	for (nr = 0; addr != end; addr += PAGE_SIZE)
+		pages[nr++] = page++;
+
+	return nr;
+}
+
+static void put_compound_head(struct page *page, int refs)
+{
+	VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
+	/*
+	 * Calling put_page() for each ref is unnecessarily slow. Only the last
+	 * ref needs a put_page().
+	 */
+	if (refs > 1)
+		page_ref_sub(page, refs - 1);
+	put_page(page);
+}
+
 #ifdef CONFIG_ARCH_HAS_HUGEPD
 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
 				      unsigned long sz)
@@ -2007,32 +2079,20 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
 	/* hugepages are never "special" */
 	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 
-	refs = 0;
 	head = pte_page(pte);
-
 	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
-	do {
-		VM_BUG_ON(compound_head(page) != head);
-		pages[*nr] = page;
-		(*nr)++;
-		page++;
-		refs++;
-	} while (addr += PAGE_SIZE, addr != end);
+	refs = record_subpages(page, addr, end, pages + *nr);
 
 	head = try_get_compound_head(head, refs);
-	if (!head) {
-		*nr -= refs;
+	if (!head)
 		return 0;
-	}
 
 	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
-		/* Could be optimized better */
-		*nr -= refs;
-		while (refs--)
-			put_page(head);
+		put_compound_head(head, refs);
 		return 0;
 	}
 
+	*nr += refs;
 	SetPageReferenced(head);
 	return 1;
 }
@@ -2079,28 +2139,19 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
 		return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
 	}
 
-	refs = 0;
 	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
-	do {
-		pages[*nr] = page;
-		(*nr)++;
-		page++;
-		refs++;
-	} while (addr += PAGE_SIZE, addr != end);
+	refs = record_subpages(page, addr, end, pages + *nr);
 
 	head = try_get_compound_head(pmd_page(orig), refs);
-	if (!head) {
-		*nr -= refs;
+	if (!head)
 		return 0;
-	}
 
 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
-		*nr -= refs;
-		while (refs--)
-			put_page(head);
+		put_compound_head(head, refs);
 		return 0;
 	}
 
+	*nr += refs;
 	SetPageReferenced(head);
 	return 1;
 }
@@ -2120,28 +2171,19 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
 		return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
 	}
 
-	refs = 0;
 	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-	do {
-		pages[*nr] = page;
-		(*nr)++;
-		page++;
-		refs++;
-	} while (addr += PAGE_SIZE, addr != end);
+	refs = record_subpages(page, addr, end, pages + *nr);
 
 	head = try_get_compound_head(pud_page(orig), refs);
-	if (!head) {
-		*nr -= refs;
+	if (!head)
 		return 0;
-	}
 
 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
-		*nr -= refs;
-		while (refs--)
-			put_page(head);
+		put_compound_head(head, refs);
 		return 0;
 	}
 
+	*nr += refs;
 	SetPageReferenced(head);
 	return 1;
 }
@@ -2157,28 +2199,20 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
 		return 0;
 
 	BUILD_BUG_ON(pgd_devmap(orig));
-	refs = 0;
+
 	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
-	do {
-		pages[*nr] = page;
-		(*nr)++;
-		page++;
-		refs++;
-	} while (addr += PAGE_SIZE, addr != end);
+	refs = record_subpages(page, addr, end, pages + *nr);
 
 	head = try_get_compound_head(pgd_page(orig), refs);
-	if (!head) {
-		*nr -= refs;
+	if (!head)
 		return 0;
-	}
 
 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
-		*nr -= refs;
-		while (refs--)
-			put_page(head);
+		put_compound_head(head, refs);
 		return 0;
 	}
 
+	*nr += refs;
 	SetPageReferenced(head);
 	return 1;
 }
@@ -2237,7 +2271,7 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
 		pud_t pud = READ_ONCE(*pudp);
 
 		next = pud_addr_end(addr, end);
-		if (pud_none(pud))
+		if (unlikely(!pud_present(pud)))
 			return 0;
 		if (unlikely(pud_huge(pud))) {
 			if (!gup_huge_pud(pud, pudp, addr, next, flags,
@@ -2393,29 +2427,15 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
 	return ret;
 }
 
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start:	starting user address
- * @nr_pages:	number of pages from start to pin
- * @gup_flags:	flags modifying pin behaviour
- * @pages:	array that receives pointers to the pages pinned.
- *		Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages,
-			unsigned int gup_flags, struct page **pages)
+static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
+					unsigned int gup_flags,
+					struct page **pages)
 {
 	unsigned long addr, len, end;
 	int nr = 0, ret = 0;
 
-	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM)))
+	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
+				       FOLL_FORCE | FOLL_PIN)))
 		return -EINVAL;
 
 	start = untagged_addr(start) & PAGE_MASK;
@@ -2455,4 +2475,103 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
 
 	return ret;
 }
+
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start:	starting user address
+ * @nr_pages:	number of pages from start to pin
+ * @gup_flags:	flags modifying pin behaviour
+ * @pages:	array that receives pointers to the pages pinned.
+ *		Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number requested.
+ * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
+ * -errno.
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages,
+			unsigned int gup_flags, struct page **pages)
+{
+	/*
+	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
+	 * never directly by the caller, so enforce that:
+	 */
+	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
+		return -EINVAL;
+
+	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
+}
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
+
+/**
+ * pin_user_pages_fast() - pin user pages in memory without taking locks
+ *
+ * For now, this is a placeholder function, until various call sites are
+ * converted to use the correct get_user_pages*() or pin_user_pages*() API. So,
+ * this is identical to get_user_pages_fast().
+ *
+ * This is intended for Case 1 (DIO) in Documentation/vm/pin_user_pages.rst. It
+ * is NOT intended for Case 2 (RDMA: long-term pins).
+ */
+int pin_user_pages_fast(unsigned long start, int nr_pages,
+			unsigned int gup_flags, struct page **pages)
+{
+	/*
+	 * This is a placeholder, until the pin functionality is activated.
+	 * Until then, just behave like the corresponding get_user_pages*()
+	 * routine.
+	 */
+	return get_user_pages_fast(start, nr_pages, gup_flags, pages);
+}
+EXPORT_SYMBOL_GPL(pin_user_pages_fast);
+
+/**
+ * pin_user_pages_remote() - pin pages of a remote process (task != current)
+ *
+ * For now, this is a placeholder function, until various call sites are
+ * converted to use the correct get_user_pages*() or pin_user_pages*() API. So,
+ * this is identical to get_user_pages_remote().
+ *
+ * This is intended for Case 1 (DIO) in Documentation/vm/pin_user_pages.rst. It
+ * is NOT intended for Case 2 (RDMA: long-term pins).
+ */
+long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+			   unsigned long start, unsigned long nr_pages,
+			   unsigned int gup_flags, struct page **pages,
+			   struct vm_area_struct **vmas, int *locked)
+{
+	/*
+	 * This is a placeholder, until the pin functionality is activated.
+	 * Until then, just behave like the corresponding get_user_pages*()
+	 * routine.
+	 */
+	return get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, pages,
+				     vmas, locked);
+}
+EXPORT_SYMBOL(pin_user_pages_remote);
+
+/**
+ * pin_user_pages() - pin user pages in memory for use by other devices
+ *
+ * For now, this is a placeholder function, until various call sites are
+ * converted to use the correct get_user_pages*() or pin_user_pages*() API. So,
+ * this is identical to get_user_pages().
+ *
+ * This is intended for Case 1 (DIO) in Documentation/vm/pin_user_pages.rst. It
+ * is NOT intended for Case 2 (RDMA: long-term pins).
+ */
+long pin_user_pages(unsigned long start, unsigned long nr_pages,
+		    unsigned int gup_flags, struct page **pages,
+		    struct vm_area_struct **vmas)
+{
+	/*
+	 * This is a placeholder, until the pin functionality is activated.
+	 * Until then, just behave like the corresponding get_user_pages*()
+	 * routine.
+	 */
+	return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+}
+EXPORT_SYMBOL(pin_user_pages);
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index ad9d5b1..8dba38e 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -49,18 +49,21 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
 			nr = (next - addr) / PAGE_SIZE;
 		}
 
+		/* Filter out most gup flags: only allow a tiny subset here: */
+		gup->flags &= FOLL_WRITE;
+
 		switch (cmd) {
 		case GUP_FAST_BENCHMARK:
-			nr = get_user_pages_fast(addr, nr, gup->flags & 1,
+			nr = get_user_pages_fast(addr, nr, gup->flags,
 						 pages + i);
 			break;
 		case GUP_LONGTERM_BENCHMARK:
 			nr = get_user_pages(addr, nr,
-					    (gup->flags & 1) | FOLL_LONGTERM,
+					    gup->flags | FOLL_LONGTERM,
 					    pages + i, NULL);
 			break;
 		case GUP_BENCHMARK:
-			nr = get_user_pages(addr, nr, gup->flags & 1, pages + i,
+			nr = get_user_pages(addr, nr, gup->flags, pages + i,
 					    NULL);
 			break;
 		default:
diff --git a/mm/hmm.c b/mm/hmm.c
index d379cb6..72e5a6d 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -186,7 +186,7 @@ static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
 }
 
 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
-			     struct mm_walk *walk)
+			     __always_unused int depth, struct mm_walk *walk)
 {
 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
 	struct hmm_range *range = hmm_vma_walk->range;
@@ -380,7 +380,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 again:
 	pmd = READ_ONCE(*pmdp);
 	if (pmd_none(pmd))
-		return hmm_vma_walk_hole(start, end, walk);
+		return hmm_vma_walk_hole(start, end, -1, walk);
 
 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
 		bool fault, write_fault;
@@ -474,23 +474,32 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 {
 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
 	struct hmm_range *range = hmm_vma_walk->range;
-	unsigned long addr = start, next;
-	pmd_t *pmdp;
+	unsigned long addr = start;
 	pud_t pud;
-	int ret;
+	int ret = 0;
+	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
 
-again:
+	if (!ptl)
+		return 0;
+
+	/* Normally we don't want to split the huge page */
+	walk->action = ACTION_CONTINUE;
+
 	pud = READ_ONCE(*pudp);
-	if (pud_none(pud))
-		return hmm_vma_walk_hole(start, end, walk);
+	if (pud_none(pud)) {
+		ret = hmm_vma_walk_hole(start, end, -1, walk);
+		goto out_unlock;
+	}
 
 	if (pud_huge(pud) && pud_devmap(pud)) {
 		unsigned long i, npages, pfn;
 		uint64_t *pfns, cpu_flags;
 		bool fault, write_fault;
 
-		if (!pud_present(pud))
-			return hmm_vma_walk_hole(start, end, walk);
+		if (!pud_present(pud)) {
+			ret = hmm_vma_walk_hole(start, end, -1, walk);
+			goto out_unlock;
+		}
 
 		i = (addr - range->start) >> PAGE_SHIFT;
 		npages = (end - addr) >> PAGE_SHIFT;
@@ -499,16 +508,20 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
 				     cpu_flags, &fault, &write_fault);
-		if (fault || write_fault)
-			return hmm_vma_walk_hole_(addr, end, fault,
-						write_fault, walk);
+		if (fault || write_fault) {
+			ret = hmm_vma_walk_hole_(addr, end, fault,
+						 write_fault, walk);
+			goto out_unlock;
+		}
 
 		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 		for (i = 0; i < npages; ++i, ++pfn) {
 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
 					      hmm_vma_walk->pgmap);
-			if (unlikely(!hmm_vma_walk->pgmap))
-				return -EBUSY;
+			if (unlikely(!hmm_vma_walk->pgmap)) {
+				ret = -EBUSY;
+				goto out_unlock;
+			}
 			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
 				  cpu_flags;
 		}
@@ -517,22 +530,15 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 			hmm_vma_walk->pgmap = NULL;
 		}
 		hmm_vma_walk->last = end;
-		return 0;
+		goto out_unlock;
 	}
 
-	split_huge_pud(walk->vma, pudp, addr);
-	if (pud_none(*pudp))
-		goto again;
+	/* Ask for the PUD to be split */
+	walk->action = ACTION_SUBTREE;
 
-	pmdp = pmd_offset(pudp, addr);
-	do {
-		next = pmd_addr_end(addr, end);
-		ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
-		if (ret)
-			return ret;
-	} while (pmdp++, addr = next, addr != end);
-
-	return 0;
+out_unlock:
+	spin_unlock(ptl);
+	return ret;
 }
 #else
 #define hmm_vma_walk_pud	NULL
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a880932..b08b199 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -177,16 +177,13 @@ static ssize_t enabled_store(struct kobject *kobj,
 {
 	ssize_t ret = count;
 
-	if (!memcmp("always", buf,
-		    min(sizeof("always")-1, count))) {
+	if (sysfs_streq(buf, "always")) {
 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
 		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
-	} else if (!memcmp("madvise", buf,
-			   min(sizeof("madvise")-1, count))) {
+	} else if (sysfs_streq(buf, "madvise")) {
 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
-	} else if (!memcmp("never", buf,
-			   min(sizeof("never")-1, count))) {
+	} else if (sysfs_streq(buf, "never")) {
 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
 	} else
@@ -250,32 +247,27 @@ static ssize_t defrag_store(struct kobject *kobj,
 			    struct kobj_attribute *attr,
 			    const char *buf, size_t count)
 {
-	if (!memcmp("always", buf,
-		    min(sizeof("always")-1, count))) {
+	if (sysfs_streq(buf, "always")) {
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
-	} else if (!memcmp("defer+madvise", buf,
-		    min(sizeof("defer+madvise")-1, count))) {
+	} else if (sysfs_streq(buf, "defer+madvise")) {
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
-	} else if (!memcmp("defer", buf,
-		    min(sizeof("defer")-1, count))) {
+	} else if (sysfs_streq(buf, "defer")) {
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
-	} else if (!memcmp("madvise", buf,
-			   min(sizeof("madvise")-1, count))) {
+	} else if (sysfs_streq(buf, "madvise")) {
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
-	} else if (!memcmp("never", buf,
-			   min(sizeof("never")-1, count))) {
+	} else if (sysfs_streq(buf, "never")) {
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
@@ -527,6 +519,17 @@ void prep_transhuge_page(struct page *page)
 	set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
 }
 
+bool is_transparent_hugepage(struct page *page)
+{
+	if (!PageCompound(page))
+		return 0;
+
+	page = compound_head(page);
+	return is_huge_zero_page(page) ||
+	       page[1].compound_dtor == TRANSHUGE_PAGE_DTOR;
+}
+EXPORT_SYMBOL_GPL(is_transparent_hugepage);
+
 static unsigned long __thp_get_unmapped_area(struct file *filp,
 		unsigned long addr, unsigned long len,
 		loff_t off, unsigned long flags, unsigned long size)
@@ -2704,7 +2707,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
 	struct page *head = compound_head(page);
 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
-	struct deferred_split *ds_queue = get_deferred_split_queue(page);
+	struct deferred_split *ds_queue = get_deferred_split_queue(head);
 	struct anon_vma *anon_vma = NULL;
 	struct address_space *mapping = NULL;
 	int count, mapcount, extra_pins, ret;
@@ -2712,11 +2715,11 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 	unsigned long flags;
 	pgoff_t end;
 
-	VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
-	VM_BUG_ON_PAGE(!PageLocked(page), page);
-	VM_BUG_ON_PAGE(!PageCompound(page), page);
+	VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
+	VM_BUG_ON_PAGE(!PageLocked(head), head);
+	VM_BUG_ON_PAGE(!PageCompound(head), head);
 
-	if (PageWriteback(page))
+	if (PageWriteback(head))
 		return -EBUSY;
 
 	if (PageAnon(head)) {
@@ -2767,7 +2770,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 		goto out_unlock;
 	}
 
-	mlocked = PageMlocked(page);
+	mlocked = PageMlocked(head);
 	unmap_page(head);
 	VM_BUG_ON_PAGE(compound_mapcount(head), head);
 
@@ -2799,14 +2802,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 			ds_queue->split_queue_len--;
 			list_del(page_deferred_list(head));
 		}
+		spin_unlock(&ds_queue->split_queue_lock);
 		if (mapping) {
-			if (PageSwapBacked(page))
-				__dec_node_page_state(page, NR_SHMEM_THPS);
+			if (PageSwapBacked(head))
+				__dec_node_page_state(head, NR_SHMEM_THPS);
 			else
-				__dec_node_page_state(page, NR_FILE_THPS);
+				__dec_node_page_state(head, NR_FILE_THPS);
 		}
 
-		spin_unlock(&ds_queue->split_queue_lock);
 		__split_huge_page(page, list, end, flags);
 		if (PageSwapCache(head)) {
 			swp_entry_t entry = { .val = page_private(head) };
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index c15d8ae..6aa5172 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -110,6 +110,7 @@ void *memset(void *addr, int c, size_t len)
 	return __memset(addr, c, len);
 }
 
+#ifdef __HAVE_ARCH_MEMMOVE
 #undef memmove
 void *memmove(void *dest, const void *src, size_t len)
 {
@@ -118,6 +119,7 @@ void *memmove(void *dest, const void *src, size_t len)
 
 	return __memmove(dest, src, len);
 }
+#endif
 
 #undef memcpy
 void *memcpy(void *dest, const void *src, size_t len)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 2446076..3a4259e 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -13,7 +13,7 @@
  *
  * The following locks and mutexes are used by kmemleak:
  *
- * - kmemleak_lock (rwlock): protects the object_list modifications and
+ * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
  *   accesses to the object_tree_root. The object_list is the main list
  *   holding the metadata (struct kmemleak_object) for the allocated memory
  *   blocks. The object_tree_root is a red black tree used to look-up
@@ -22,13 +22,13 @@
  *   object_tree_root in the create_object() function called from the
  *   kmemleak_alloc() callback and removed in delete_object() called from the
  *   kmemleak_free() callback
- * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
- *   the metadata (e.g. count) are protected by this lock. Note that some
- *   members of this structure may be protected by other means (atomic or
- *   kmemleak_lock). This lock is also held when scanning the corresponding
- *   memory block to avoid the kernel freeing it via the kmemleak_free()
- *   callback. This is less heavyweight than holding a global lock like
- *   kmemleak_lock during scanning
+ * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
+ *   Accesses to the metadata (e.g. count) are protected by this lock. Note
+ *   that some members of this structure may be protected by other means
+ *   (atomic or kmemleak_lock). This lock is also held when scanning the
+ *   corresponding memory block to avoid the kernel freeing it via the
+ *   kmemleak_free() callback. This is less heavyweight than holding a global
+ *   lock like kmemleak_lock during scanning.
  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  *   unreferenced objects at a time. The gray_list contains the objects which
  *   are already referenced or marked as false positives and need to be
@@ -135,7 +135,7 @@ struct kmemleak_scan_area {
  * (use_count) and freed using the RCU mechanism.
  */
 struct kmemleak_object {
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	unsigned int flags;		/* object status flags */
 	struct list_head object_list;
 	struct list_head gray_list;
@@ -191,8 +191,8 @@ static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
 static LIST_HEAD(mem_pool_free_list);
 /* search tree for object boundaries */
 static struct rb_root object_tree_root = RB_ROOT;
-/* rw_lock protecting the access to object_list and object_tree_root */
-static DEFINE_RWLOCK(kmemleak_lock);
+/* protecting the access to object_list and object_tree_root */
+static DEFINE_RAW_SPINLOCK(kmemleak_lock);
 
 /* allocation caches for kmemleak internal data */
 static struct kmem_cache *object_cache;
@@ -426,7 +426,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
 	}
 
 	/* slab allocation failed, try the memory pool */
-	write_lock_irqsave(&kmemleak_lock, flags);
+	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 	object = list_first_entry_or_null(&mem_pool_free_list,
 					  typeof(*object), object_list);
 	if (object)
@@ -435,7 +435,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
 		object = &mem_pool[--mem_pool_free_count];
 	else
 		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
-	write_unlock_irqrestore(&kmemleak_lock, flags);
+	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 
 	return object;
 }
@@ -453,9 +453,9 @@ static void mem_pool_free(struct kmemleak_object *object)
 	}
 
 	/* add the object to the memory pool free list */
-	write_lock_irqsave(&kmemleak_lock, flags);
+	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 	list_add(&object->object_list, &mem_pool_free_list);
-	write_unlock_irqrestore(&kmemleak_lock, flags);
+	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 }
 
 /*
@@ -514,9 +514,9 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 	struct kmemleak_object *object;
 
 	rcu_read_lock();
-	read_lock_irqsave(&kmemleak_lock, flags);
+	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 	object = lookup_object(ptr, alias);
-	read_unlock_irqrestore(&kmemleak_lock, flags);
+	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 
 	/* check whether the object is still available */
 	if (object && !get_object(object))
@@ -546,11 +546,11 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
 	unsigned long flags;
 	struct kmemleak_object *object;
 
-	write_lock_irqsave(&kmemleak_lock, flags);
+	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 	object = lookup_object(ptr, alias);
 	if (object)
 		__remove_object(object);
-	write_unlock_irqrestore(&kmemleak_lock, flags);
+	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 
 	return object;
 }
@@ -585,7 +585,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 	INIT_LIST_HEAD(&object->object_list);
 	INIT_LIST_HEAD(&object->gray_list);
 	INIT_HLIST_HEAD(&object->area_list);
-	spin_lock_init(&object->lock);
+	raw_spin_lock_init(&object->lock);
 	atomic_set(&object->use_count, 1);
 	object->flags = OBJECT_ALLOCATED;
 	object->pointer = ptr;
@@ -617,7 +617,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 	/* kernel backtrace */
 	object->trace_len = __save_stack_trace(object->trace);
 
-	write_lock_irqsave(&kmemleak_lock, flags);
+	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 
 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 	min_addr = min(min_addr, untagged_ptr);
@@ -649,7 +649,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 
 	list_add_tail_rcu(&object->object_list, &object_list);
 out:
-	write_unlock_irqrestore(&kmemleak_lock, flags);
+	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 	return object;
 }
 
@@ -667,9 +667,9 @@ static void __delete_object(struct kmemleak_object *object)
 	 * Locking here also ensures that the corresponding memory block
 	 * cannot be freed when it is being scanned.
 	 */
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	object->flags &= ~OBJECT_ALLOCATED;
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	put_object(object);
 }
 
@@ -739,9 +739,9 @@ static void paint_it(struct kmemleak_object *object, int color)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	__paint_it(object, color);
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 }
 
 static void paint_ptr(unsigned long ptr, int color)
@@ -798,7 +798,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 	if (scan_area_cache)
 		area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	if (!area) {
 		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
 		/* mark the object for full scan to avoid false positives */
@@ -820,7 +820,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 
 	hlist_add_head(&area->node, &object->area_list);
 out_unlock:
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	put_object(object);
 }
 
@@ -842,9 +842,9 @@ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
 		return;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	object->excess_ref = excess_ref;
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	put_object(object);
 }
 
@@ -864,9 +864,9 @@ static void object_no_scan(unsigned long ptr)
 		return;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	object->flags |= OBJECT_NO_SCAN;
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	put_object(object);
 }
 
@@ -1026,9 +1026,9 @@ void __ref kmemleak_update_trace(const void *ptr)
 		return;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	object->trace_len = __save_stack_trace(object->trace);
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 
 	put_object(object);
 }
@@ -1233,7 +1233,7 @@ static void scan_block(void *_start, void *_end,
 	unsigned long flags;
 	unsigned long untagged_ptr;
 
-	read_lock_irqsave(&kmemleak_lock, flags);
+	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 	for (ptr = start; ptr < end; ptr++) {
 		struct kmemleak_object *object;
 		unsigned long pointer;
@@ -1268,7 +1268,7 @@ static void scan_block(void *_start, void *_end,
 		 * previously acquired in scan_object(). These locks are
 		 * enclosed by scan_mutex.
 		 */
-		spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
 		/* only pass surplus references (object already gray) */
 		if (color_gray(object)) {
 			excess_ref = object->excess_ref;
@@ -1277,7 +1277,7 @@ static void scan_block(void *_start, void *_end,
 			excess_ref = 0;
 			update_refs(object);
 		}
-		spin_unlock(&object->lock);
+		raw_spin_unlock(&object->lock);
 
 		if (excess_ref) {
 			object = lookup_object(excess_ref, 0);
@@ -1286,12 +1286,12 @@ static void scan_block(void *_start, void *_end,
 			if (object == scanned)
 				/* circular reference, ignore */
 				continue;
-			spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+			raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
 			update_refs(object);
-			spin_unlock(&object->lock);
+			raw_spin_unlock(&object->lock);
 		}
 	}
-	read_unlock_irqrestore(&kmemleak_lock, flags);
+	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 }
 
 /*
@@ -1324,7 +1324,7 @@ static void scan_object(struct kmemleak_object *object)
 	 * Once the object->lock is acquired, the corresponding memory block
 	 * cannot be freed (the same lock is acquired in delete_object).
 	 */
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	if (object->flags & OBJECT_NO_SCAN)
 		goto out;
 	if (!(object->flags & OBJECT_ALLOCATED))
@@ -1344,9 +1344,9 @@ static void scan_object(struct kmemleak_object *object)
 			if (start >= end)
 				break;
 
-			spin_unlock_irqrestore(&object->lock, flags);
+			raw_spin_unlock_irqrestore(&object->lock, flags);
 			cond_resched();
-			spin_lock_irqsave(&object->lock, flags);
+			raw_spin_lock_irqsave(&object->lock, flags);
 		} while (object->flags & OBJECT_ALLOCATED);
 	} else
 		hlist_for_each_entry(area, &object->area_list, node)
@@ -1354,7 +1354,7 @@ static void scan_object(struct kmemleak_object *object)
 				   (void *)(area->start + area->size),
 				   object);
 out:
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 }
 
 /*
@@ -1407,7 +1407,7 @@ static void kmemleak_scan(void)
 	/* prepare the kmemleak_object's */
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
-		spin_lock_irqsave(&object->lock, flags);
+		raw_spin_lock_irqsave(&object->lock, flags);
 #ifdef DEBUG
 		/*
 		 * With a few exceptions there should be a maximum of
@@ -1424,7 +1424,7 @@ static void kmemleak_scan(void)
 		if (color_gray(object) && get_object(object))
 			list_add_tail(&object->gray_list, &gray_list);
 
-		spin_unlock_irqrestore(&object->lock, flags);
+		raw_spin_unlock_irqrestore(&object->lock, flags);
 	}
 	rcu_read_unlock();
 
@@ -1492,14 +1492,14 @@ static void kmemleak_scan(void)
 	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
-		spin_lock_irqsave(&object->lock, flags);
+		raw_spin_lock_irqsave(&object->lock, flags);
 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
 		    && update_checksum(object) && get_object(object)) {
 			/* color it gray temporarily */
 			object->count = object->min_count;
 			list_add_tail(&object->gray_list, &gray_list);
 		}
-		spin_unlock_irqrestore(&object->lock, flags);
+		raw_spin_unlock_irqrestore(&object->lock, flags);
 	}
 	rcu_read_unlock();
 
@@ -1519,7 +1519,7 @@ static void kmemleak_scan(void)
 	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
-		spin_lock_irqsave(&object->lock, flags);
+		raw_spin_lock_irqsave(&object->lock, flags);
 		if (unreferenced_object(object) &&
 		    !(object->flags & OBJECT_REPORTED)) {
 			object->flags |= OBJECT_REPORTED;
@@ -1529,7 +1529,7 @@ static void kmemleak_scan(void)
 
 			new_leaks++;
 		}
-		spin_unlock_irqrestore(&object->lock, flags);
+		raw_spin_unlock_irqrestore(&object->lock, flags);
 	}
 	rcu_read_unlock();
 
@@ -1681,10 +1681,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
 	struct kmemleak_object *object = v;
 	unsigned long flags;
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
 		print_unreferenced(seq, object);
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 	return 0;
 }
 
@@ -1714,9 +1714,9 @@ static int dump_str_object_info(const char *str)
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&object->lock, flags);
+	raw_spin_lock_irqsave(&object->lock, flags);
 	dump_object_info(object);
-	spin_unlock_irqrestore(&object->lock, flags);
+	raw_spin_unlock_irqrestore(&object->lock, flags);
 
 	put_object(object);
 	return 0;
@@ -1735,11 +1735,11 @@ static void kmemleak_clear(void)
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
-		spin_lock_irqsave(&object->lock, flags);
+		raw_spin_lock_irqsave(&object->lock, flags);
 		if ((object->flags & OBJECT_REPORTED) &&
 		    unreferenced_object(object))
 			__paint_it(object, KMEMLEAK_GREY);
-		spin_unlock_irqrestore(&object->lock, flags);
+		raw_spin_unlock_irqrestore(&object->lock, flags);
 	}
 	rcu_read_unlock();
 
diff --git a/mm/madvise.c b/mm/madvise.c
index bcdb6a0..43b47d3 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1044,7 +1044,7 @@ madvise_behavior_valid(int behavior)
  *  -EBADF  - map exists, but area maps something that isn't a file.
  *  -EAGAIN - a kernel resource was temporarily unavailable.
  */
-SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
+int do_madvise(unsigned long start, size_t len_in, int behavior)
 {
 	unsigned long end, tmp;
 	struct vm_area_struct *vma, *prev;
@@ -1141,3 +1141,8 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
 
 	return error;
 }
+
+SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
+{
+	return do_madvise(start, len_in, behavior);
+}
diff --git a/mm/memblock.c b/mm/memblock.c
index 4bc2c7d..eba94ee 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -575,7 +575,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
  * Return:
  * 0 on success, -errno on failure.
  */
-int __init_memblock memblock_add_range(struct memblock_type *type,
+static int __init_memblock memblock_add_range(struct memblock_type *type,
 				phys_addr_t base, phys_addr_t size,
 				int nid, enum memblock_flags flags)
 {
@@ -694,7 +694,7 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 {
 	phys_addr_t end = base + size - 1;
 
-	memblock_dbg("memblock_add: [%pa-%pa] %pS\n",
+	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 		     &base, &end, (void *)_RET_IP_);
 
 	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
@@ -795,7 +795,7 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 {
 	phys_addr_t end = base + size - 1;
 
-	memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
+	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 		     &base, &end, (void *)_RET_IP_);
 
 	return memblock_remove_range(&memblock.memory, base, size);
@@ -813,7 +813,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 {
 	phys_addr_t end = base + size - 1;
 
-	memblock_dbg("   memblock_free: [%pa-%pa] %pS\n",
+	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 		     &base, &end, (void *)_RET_IP_);
 
 	kmemleak_free_part_phys(base, size);
@@ -824,12 +824,24 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 {
 	phys_addr_t end = base + size - 1;
 
-	memblock_dbg("memblock_reserve: [%pa-%pa] %pS\n",
+	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 		     &base, &end, (void *)_RET_IP_);
 
 	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
 }
 
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
+{
+	phys_addr_t end = base + size - 1;
+
+	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
+		     &base, &end, (void *)_RET_IP_);
+
+	return memblock_add_range(&memblock.physmem, base, size, MAX_NUMNODES, 0);
+}
+#endif
+
 /**
  * memblock_setclr_flag - set or clear flag for a memory region
  * @base: base address of the region
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6c83cf4..6f6dc87 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5340,14 +5340,6 @@ static int mem_cgroup_move_account(struct page *page,
 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
 	}
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	if (compound && !list_empty(page_deferred_list(page))) {
-		spin_lock(&from->deferred_split_queue.split_queue_lock);
-		list_del_init(page_deferred_list(page));
-		from->deferred_split_queue.split_queue_len--;
-		spin_unlock(&from->deferred_split_queue.split_queue_lock);
-	}
-#endif
 	/*
 	 * It is safe to change page->mem_cgroup here because the page
 	 * is referenced, charged, and isolated - we can't race with
@@ -5357,16 +5349,6 @@ static int mem_cgroup_move_account(struct page *page,
 	/* caller should have done css_get */
 	page->mem_cgroup = to;
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	if (compound && list_empty(page_deferred_list(page))) {
-		spin_lock(&to->deferred_split_queue.split_queue_lock);
-		list_add_tail(page_deferred_list(page),
-			      &to->deferred_split_queue.split_queue);
-		to->deferred_split_queue.split_queue_len++;
-		spin_unlock(&to->deferred_split_queue.split_queue_lock);
-	}
-#endif
-
 	spin_unlock_irqrestore(&from->move_lock, flags);
 
 	ret = 0;
@@ -6651,7 +6633,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 {
 	struct mem_cgroup *memcg;
 	unsigned int nr_pages;
-	bool compound;
 	unsigned long flags;
 
 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
@@ -6673,8 +6654,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 		return;
 
 	/* Force-charge the new page. The old one will be freed soon */
-	compound = PageTransHuge(newpage);
-	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
+	nr_pages = hpage_nr_pages(newpage);
 
 	page_counter_charge(&memcg->memory, nr_pages);
 	if (do_memsw_account())
@@ -6684,7 +6664,8 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 	commit_charge(newpage, memcg, false);
 
 	local_irq_save(flags);
-	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
+	mem_cgroup_charge_statistics(memcg, newpage, PageTransHuge(newpage),
+			nr_pages);
 	memcg_check_events(memcg, newpage);
 	local_irq_restore(flags);
 }
diff --git a/mm/memory.c b/mm/memory.c
index 1c4be87..0bccc62 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1664,6 +1664,9 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
  * impractical.
  *
+ * See vmf_insert_mixed_prot() for a discussion of the implication of using
+ * a value of @pgprot different from that of @vma->vm_page_prot.
+ *
  * Context: Process context.  May allocate using %GFP_KERNEL.
  * Return: vm_fault_t value.
  */
@@ -1737,9 +1740,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
 }
 
 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
-		unsigned long addr, pfn_t pfn, bool mkwrite)
+		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
+		bool mkwrite)
 {
-	pgprot_t pgprot = vma->vm_page_prot;
 	int err;
 
 	BUG_ON(!vm_mixed_ok(vma, pfn));
@@ -1782,10 +1785,43 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
 	return VM_FAULT_NOPAGE;
 }
 
+/**
+ * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ * @pgprot: pgprot flags for the inserted page
+ *
+ * This is exactly like vmf_insert_mixed(), except that it allows drivers to
+ * to override pgprot on a per-page basis.
+ *
+ * Typically this function should be used by drivers to set caching- and
+ * encryption bits different than those of @vma->vm_page_prot, because
+ * the caching- or encryption mode may not be known at mmap() time.
+ * This is ok as long as @vma->vm_page_prot is not used by the core vm
+ * to set caching and encryption bits for those vmas (except for COW pages).
+ * This is ensured by core vm only modifying these page table entries using
+ * functions that don't touch caching- or encryption bits, using pte_modify()
+ * if needed. (See for example mprotect()).
+ * Also when new page-table entries are created, this is only done using the
+ * fault() callback, and never using the value of vma->vm_page_prot,
+ * except for page-table entries that point to anonymous pages as the result
+ * of COW.
+ *
+ * Context: Process context.  May allocate using %GFP_KERNEL.
+ * Return: vm_fault_t value.
+ */
+vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
+				 pfn_t pfn, pgprot_t pgprot)
+{
+	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
+}
+EXPORT_SYMBOL(vmf_insert_mixed_prot);
+
 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 		pfn_t pfn)
 {
-	return __vm_insert_mixed(vma, addr, pfn, false);
+	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
 }
 EXPORT_SYMBOL(vmf_insert_mixed);
 
@@ -1797,7 +1833,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
 		unsigned long addr, pfn_t pfn)
 {
-	return __vm_insert_mixed(vma, addr, pfn, true);
+	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
 }
 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
 
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a91a072..0a54ffa 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -355,7 +355,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
 		if (unlikely(pfn_to_nid(start_pfn) != nid))
 			continue;
 
-		if (zone && zone != page_zone(pfn_to_page(start_pfn)))
+		if (zone != page_zone(pfn_to_page(start_pfn)))
 			continue;
 
 		return start_pfn;
@@ -380,7 +380,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 		if (unlikely(pfn_to_nid(pfn) != nid))
 			continue;
 
-		if (zone && zone != page_zone(pfn_to_page(pfn)))
+		if (zone != page_zone(pfn_to_page(pfn)))
 			continue;
 
 		return pfn;
@@ -392,14 +392,11 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
 			     unsigned long end_pfn)
 {
-	unsigned long zone_start_pfn = zone->zone_start_pfn;
-	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
-	unsigned long zone_end_pfn = z;
 	unsigned long pfn;
 	int nid = zone_to_nid(zone);
 
 	zone_span_writelock(zone);
-	if (zone_start_pfn == start_pfn) {
+	if (zone->zone_start_pfn == start_pfn) {
 		/*
 		 * If the section is smallest section in the zone, it need
 		 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
@@ -407,50 +404,30 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
 		 * for shrinking zone.
 		 */
 		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
-						zone_end_pfn);
+						zone_end_pfn(zone));
 		if (pfn) {
+			zone->spanned_pages = zone_end_pfn(zone) - pfn;
 			zone->zone_start_pfn = pfn;
-			zone->spanned_pages = zone_end_pfn - pfn;
+		} else {
+			zone->zone_start_pfn = 0;
+			zone->spanned_pages = 0;
 		}
-	} else if (zone_end_pfn == end_pfn) {
+	} else if (zone_end_pfn(zone) == end_pfn) {
 		/*
 		 * If the section is biggest section in the zone, it need
 		 * shrink zone->spanned_pages.
 		 * In this case, we find second biggest valid mem_section for
 		 * shrinking zone.
 		 */
-		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
+		pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
 					       start_pfn);
 		if (pfn)
-			zone->spanned_pages = pfn - zone_start_pfn + 1;
+			zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
+		else {
+			zone->zone_start_pfn = 0;
+			zone->spanned_pages = 0;
+		}
 	}
-
-	/*
-	 * The section is not biggest or smallest mem_section in the zone, it
-	 * only creates a hole in the zone. So in this case, we need not
-	 * change the zone. But perhaps, the zone has only hole data. Thus
-	 * it check the zone has only hole or not.
-	 */
-	pfn = zone_start_pfn;
-	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
-		if (unlikely(!pfn_to_online_page(pfn)))
-			continue;
-
-		if (page_zone(pfn_to_page(pfn)) != zone)
-			continue;
-
-		/* Skip range to be removed */
-		if (pfn >= start_pfn && pfn < end_pfn)
-			continue;
-
-		/* If we find valid section, we have nothing to do */
-		zone_span_writeunlock(zone);
-		return;
-	}
-
-	/* The zone has no valid section */
-	zone->zone_start_pfn = 0;
-	zone->spanned_pages = 0;
 	zone_span_writeunlock(zone);
 }
 
@@ -490,6 +467,9 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
 	struct pglist_data *pgdat = zone->zone_pgdat;
 	unsigned long flags;
 
+	/* Poison struct pages because they are now uninitialized again. */
+	page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+
 #ifdef CONFIG_ZONE_DEVICE
 	/*
 	 * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
@@ -536,25 +516,20 @@ static void __remove_section(unsigned long pfn, unsigned long nr_pages,
 void __remove_pages(unsigned long pfn, unsigned long nr_pages,
 		    struct vmem_altmap *altmap)
 {
+	const unsigned long end_pfn = pfn + nr_pages;
+	unsigned long cur_nr_pages;
 	unsigned long map_offset = 0;
-	unsigned long nr, start_sec, end_sec;
 
 	map_offset = vmem_altmap_offset(altmap);
 
 	if (check_pfn_span(pfn, nr_pages, "remove"))
 		return;
 
-	start_sec = pfn_to_section_nr(pfn);
-	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-	for (nr = start_sec; nr <= end_sec; nr++) {
-		unsigned long pfns;
-
+	for (; pfn < end_pfn; pfn += cur_nr_pages) {
 		cond_resched();
-		pfns = min(nr_pages, PAGES_PER_SECTION
-				- (pfn & ~PAGE_SECTION_MASK));
-		__remove_section(pfn, pfns, map_offset, altmap);
-		pfn += pfns;
-		nr_pages -= pfns;
+		/* Select all remaining pages up to the next section boundary */
+		cur_nr_pages = min(end_pfn - pfn, -(pfn | PAGE_SECTION_MASK));
+		__remove_section(pfn, cur_nr_pages, map_offset, altmap);
 		map_offset = 0;
 	}
 }
@@ -783,27 +758,18 @@ struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
 	return default_zone_for_pfn(nid, start_pfn, nr_pages);
 }
 
-int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
+int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+		       int online_type, int nid)
 {
 	unsigned long flags;
 	unsigned long onlined_pages = 0;
 	struct zone *zone;
 	int need_zonelists_rebuild = 0;
-	int nid;
 	int ret;
 	struct memory_notify arg;
-	struct memory_block *mem;
 
 	mem_hotplug_begin();
 
-	/*
-	 * We can't use pfn_to_nid() because nid might be stored in struct page
-	 * which is not yet initialized. Instead, we find nid from memory block.
-	 */
-	mem = find_memory_block(__pfn_to_section(pfn));
-	nid = mem->nid;
-	put_device(&mem->dev);
-
 	/* associate pfn range with the zone */
 	zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
 	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
@@ -1182,7 +1148,7 @@ static bool is_pageblock_removable_nolock(unsigned long pfn)
 	if (!zone_spans_pfn(zone, pfn))
 		return false;
 
-	return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE,
+	return !has_unmovable_pages(zone, page, MIGRATE_MOVABLE,
 				    MEMORY_OFFLINE);
 }
 
@@ -1206,14 +1172,13 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) belong to the same zone.
- * When true, return its valid [start, end).
+ * Confirm all pages in a range [start, end) belong to the same zone (skipping
+ * memory holes). When true, return the zone.
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
-			 unsigned long *valid_start, unsigned long *valid_end)
+struct zone *test_pages_in_a_zone(unsigned long start_pfn,
+				  unsigned long end_pfn)
 {
 	unsigned long pfn, sec_end_pfn;
-	unsigned long start, end;
 	struct zone *zone = NULL;
 	struct page *page;
 	int i;
@@ -1234,24 +1199,15 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
 				continue;
 			/* Check if we got outside of the zone */
 			if (zone && !zone_spans_pfn(zone, pfn + i))
-				return 0;
+				return NULL;
 			page = pfn_to_page(pfn + i);
 			if (zone && page_zone(page) != zone)
-				return 0;
-			if (!zone)
-				start = pfn + i;
+				return NULL;
 			zone = page_zone(page);
-			end = pfn + MAX_ORDER_NR_PAGES;
 		}
 	}
 
-	if (zone) {
-		*valid_start = start;
-		*valid_end = min(end, end_pfn);
-		return 1;
-	} else {
-		return 0;
-	}
+	return zone;
 }
 
 /*
@@ -1496,7 +1452,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
 	unsigned long offlined_pages = 0;
 	int ret, node, nr_isolate_pageblock;
 	unsigned long flags;
-	unsigned long valid_start, valid_end;
 	struct zone *zone;
 	struct memory_notify arg;
 	char *reason;
@@ -1521,14 +1476,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
 	/* This makes hotplug much easier...and readable.
 	   we assume this for now. .*/
-	if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
-				  &valid_end)) {
+	zone = test_pages_in_a_zone(start_pfn, end_pfn);
+	if (!zone) {
 		ret = -EINVAL;
 		reason = "multizone range";
 		goto failed_removal;
 	}
-
-	zone = page_zone(pfn_to_page(valid_start));
 	node = zone_to_nid(zone);
 
 	/* set above range as isolated */
@@ -1764,8 +1717,6 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
 
 	BUG_ON(check_hotplug_memory_range(start, size));
 
-	mem_hotplug_begin();
-
 	/*
 	 * All memory blocks must be offlined before removing memory.  Check
 	 * whether all memory blocks in question are offline and return error
@@ -1778,9 +1729,14 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
 	/* remove memmap entry */
 	firmware_map_remove(start, start + size, "System RAM");
 
-	/* remove memory block devices before removing memory */
+	/*
+	 * Memory block device removal under the device_hotplug_lock is
+	 * a barrier against racing online attempts.
+	 */
 	remove_memory_block_devices(start, size);
 
+	mem_hotplug_begin();
+
 	arch_remove_memory(nid, start, size, NULL);
 	memblock_free(start, size);
 	memblock_remove(start, size);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b2920ae..977c641 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2821,6 +2821,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
 	char *flags = strchr(str, '=');
 	int err = 1, mode;
 
+	if (flags)
+		*flags++ = '\0';	/* terminate mode string */
+
 	if (nodelist) {
 		/* NUL-terminate mode or flags string */
 		*nodelist++ = '\0';
@@ -2831,9 +2834,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
 	} else
 		nodes_clear(nodes);
 
-	if (flags)
-		*flags++ = '\0';	/* terminate mode string */
-
 	mode = match_string(policy_modes, MPOL_MAX, str);
 	if (mode < 0)
 		goto out;
diff --git a/mm/memremap.c b/mm/memremap.c
index c51c6bd..09b5b7a 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -27,7 +27,8 @@ static void devmap_managed_enable_put(void)
 
 static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
-	if (!pgmap->ops || !pgmap->ops->page_free) {
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE &&
+	    (!pgmap->ops || !pgmap->ops->page_free)) {
 		WARN(1, "Missing page_free method\n");
 		return -EINVAL;
 	}
@@ -119,6 +120,8 @@ void memunmap_pages(struct dev_pagemap *pgmap)
 	nid = page_to_nid(first_page);
 
 	mem_hotplug_begin();
+	remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(res->start),
+				   PHYS_PFN(resource_size(res)));
 	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
 		__remove_pages(PHYS_PFN(res->start),
 			       PHYS_PFN(resource_size(res)), NULL);
@@ -410,48 +413,42 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
 EXPORT_SYMBOL_GPL(get_dev_pagemap);
 
 #ifdef CONFIG_DEV_PAGEMAP_OPS
-void __put_devmap_managed_page(struct page *page)
+void free_devmap_managed_page(struct page *page)
 {
-	int count = page_ref_dec_return(page);
+	/* notify page idle for dax */
+	if (!is_device_private_page(page)) {
+		wake_up_var(&page->_refcount);
+		return;
+	}
+
+	/* Clear Active bit in case of parallel mark_page_accessed */
+	__ClearPageActive(page);
+	__ClearPageWaiters(page);
+
+	mem_cgroup_uncharge(page);
 
 	/*
-	 * If refcount is 1 then page is freed and refcount is stable as nobody
-	 * holds a reference on the page.
+	 * When a device_private page is freed, the page->mapping field
+	 * may still contain a (stale) mapping value. For example, the
+	 * lower bits of page->mapping may still identify the page as an
+	 * anonymous page. Ultimately, this entire field is just stale
+	 * and wrong, and it will cause errors if not cleared.  One
+	 * example is:
+	 *
+	 *  migrate_vma_pages()
+	 *    migrate_vma_insert_page()
+	 *      page_add_new_anon_rmap()
+	 *        __page_set_anon_rmap()
+	 *          ...checks page->mapping, via PageAnon(page) call,
+	 *            and incorrectly concludes that the page is an
+	 *            anonymous page. Therefore, it incorrectly,
+	 *            silently fails to set up the new anon rmap.
+	 *
+	 * For other types of ZONE_DEVICE pages, migration is either
+	 * handled differently or not done at all, so there is no need
+	 * to clear page->mapping.
 	 */
-	if (count == 1) {
-		/* Clear Active bit in case of parallel mark_page_accessed */
-		__ClearPageActive(page);
-		__ClearPageWaiters(page);
-
-		mem_cgroup_uncharge(page);
-
-		/*
-		 * When a device_private page is freed, the page->mapping field
-		 * may still contain a (stale) mapping value. For example, the
-		 * lower bits of page->mapping may still identify the page as
-		 * an anonymous page. Ultimately, this entire field is just
-		 * stale and wrong, and it will cause errors if not cleared.
-		 * One example is:
-		 *
-		 *  migrate_vma_pages()
-		 *    migrate_vma_insert_page()
-		 *      page_add_new_anon_rmap()
-		 *        __page_set_anon_rmap()
-		 *          ...checks page->mapping, via PageAnon(page) call,
-		 *            and incorrectly concludes that the page is an
-		 *            anonymous page. Therefore, it incorrectly,
-		 *            silently fails to set up the new anon rmap.
-		 *
-		 * For other types of ZONE_DEVICE pages, migration is either
-		 * handled differently or not done at all, so there is no need
-		 * to clear page->mapping.
-		 */
-		if (is_device_private_page(page))
-			page->mapping = NULL;
-
-		page->pgmap->ops->page_free(page);
-	} else if (!count)
-		__put_page(page);
+	page->mapping = NULL;
+	page->pgmap->ops->page_free(page);
 }
-EXPORT_SYMBOL(__put_devmap_managed_page);
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
diff --git a/mm/migrate.c b/mm/migrate.c
index 86873b6..b109287 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -48,6 +48,7 @@
 #include <linux/page_owner.h>
 #include <linux/sched/mm.h>
 #include <linux/ptrace.h>
+#include <linux/oom.h>
 
 #include <asm/tlbflush.h>
 
@@ -986,7 +987,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
 		}
 
 		/*
-		 * Anonymous and movable page->mapping will be cleard by
+		 * Anonymous and movable page->mapping will be cleared by
 		 * free_pages_prepare so don't reset it here for keeping
 		 * the type to work PageAnon, for example.
 		 */
@@ -1199,8 +1200,7 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
 		/*
 		 * A page that has been migrated has all references
 		 * removed and will be freed. A page that has not been
-		 * migrated will have kepts its references and be
-		 * restored.
+		 * migrated will have kept its references and be restored.
 		 */
 		list_del(&page->lru);
 
@@ -1627,8 +1627,19 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
 			start = i;
 		} else if (node != current_node) {
 			err = do_move_pages_to_node(mm, &pagelist, current_node);
-			if (err)
+			if (err) {
+				/*
+				 * Positive err means the number of failed
+				 * pages to migrate.  Since we are going to
+				 * abort and return the number of non-migrated
+				 * pages, so need to incude the rest of the
+				 * nr_pages that have not been attempted as
+				 * well.
+				 */
+				if (err > 0)
+					err += nr_pages - i - 1;
 				goto out;
+			}
 			err = store_status(status, start, current_node, i - start);
 			if (err)
 				goto out;
@@ -1659,8 +1670,11 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
 			goto out_flush;
 
 		err = do_move_pages_to_node(mm, &pagelist, current_node);
-		if (err)
+		if (err) {
+			if (err > 0)
+				err += nr_pages - i - 1;
 			goto out;
+		}
 		if (i > start) {
 			err = store_status(status, start, current_node, i - start);
 			if (err)
@@ -1674,9 +1688,16 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
 
 	/* Make sure we do not overwrite the existing error */
 	err1 = do_move_pages_to_node(mm, &pagelist, current_node);
+	/*
+	 * Don't have to report non-attempted pages here since:
+	 *     - If the above loop is done gracefully all pages have been
+	 *       attempted.
+	 *     - If the above loop is aborted it means a fatal error
+	 *       happened, should return ret.
+	 */
 	if (!err1)
 		err1 = store_status(status, start, current_node, i - start);
-	if (!err)
+	if (err >= 0)
 		err = err1;
 out:
 	return err;
@@ -2130,12 +2151,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 #ifdef CONFIG_DEVICE_PRIVATE
 static int migrate_vma_collect_hole(unsigned long start,
 				    unsigned long end,
+				    __always_unused int depth,
 				    struct mm_walk *walk)
 {
 	struct migrate_vma *migrate = walk->private;
 	unsigned long addr;
 
-	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+	for (addr = start; addr < end; addr += PAGE_SIZE) {
 		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
 		migrate->dst[migrate->npages] = 0;
 		migrate->npages++;
@@ -2152,7 +2174,7 @@ static int migrate_vma_collect_skip(unsigned long start,
 	struct migrate_vma *migrate = walk->private;
 	unsigned long addr;
 
-	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+	for (addr = start; addr < end; addr += PAGE_SIZE) {
 		migrate->dst[migrate->npages] = 0;
 		migrate->src[migrate->npages++] = 0;
 	}
@@ -2174,7 +2196,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
 
 again:
 	if (pmd_none(*pmdp))
-		return migrate_vma_collect_hole(start, end, walk);
+		return migrate_vma_collect_hole(start, end, -1, walk);
 
 	if (pmd_trans_huge(*pmdp)) {
 		struct page *page;
@@ -2207,7 +2229,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
 				return migrate_vma_collect_skip(start, end,
 								walk);
 			if (pmd_none(*pmdp))
-				return migrate_vma_collect_hole(start, end,
+				return migrate_vma_collect_hole(start, end, -1,
 								walk);
 		}
 	}
@@ -2675,6 +2697,14 @@ int migrate_vma_setup(struct migrate_vma *args)
 }
 EXPORT_SYMBOL(migrate_vma_setup);
 
+/*
+ * This code closely matches the code in:
+ *   __handle_mm_fault()
+ *     handle_pte_fault()
+ *       do_anonymous_page()
+ * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
+ * private page.
+ */
 static void migrate_vma_insert_page(struct migrate_vma *migrate,
 				    unsigned long addr,
 				    struct page *page,
@@ -2755,30 +2785,24 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
 
 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 
+	if (check_stable_address_space(mm))
+		goto unlock_abort;
+
 	if (pte_present(*ptep)) {
 		unsigned long pfn = pte_pfn(*ptep);
 
-		if (!is_zero_pfn(pfn)) {
-			pte_unmap_unlock(ptep, ptl);
-			mem_cgroup_cancel_charge(page, memcg, false);
-			goto abort;
-		}
+		if (!is_zero_pfn(pfn))
+			goto unlock_abort;
 		flush = true;
-	} else if (!pte_none(*ptep)) {
-		pte_unmap_unlock(ptep, ptl);
-		mem_cgroup_cancel_charge(page, memcg, false);
-		goto abort;
-	}
+	} else if (!pte_none(*ptep))
+		goto unlock_abort;
 
 	/*
-	 * Check for usefaultfd but do not deliver the fault. Instead,
+	 * Check for userfaultfd but do not deliver the fault. Instead,
 	 * just back off.
 	 */
-	if (userfaultfd_missing(vma)) {
-		pte_unmap_unlock(ptep, ptl);
-		mem_cgroup_cancel_charge(page, memcg, false);
-		goto abort;
-	}
+	if (userfaultfd_missing(vma))
+		goto unlock_abort;
 
 	inc_mm_counter(mm, MM_ANONPAGES);
 	page_add_new_anon_rmap(page, vma, addr, false);
@@ -2802,6 +2826,9 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
 	*src = MIGRATE_PFN_MIGRATE;
 	return;
 
+unlock_abort:
+	pte_unmap_unlock(ptep, ptl);
+	mem_cgroup_cancel_charge(page, memcg, false);
 abort:
 	*src &= ~MIGRATE_PFN_MIGRATE;
 }
@@ -2834,9 +2861,8 @@ void migrate_vma_pages(struct migrate_vma *migrate)
 		}
 
 		if (!page) {
-			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
+			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
 				continue;
-			}
 			if (!notified) {
 				notified = true;
 
diff --git a/mm/mincore.c b/mm/mincore.c
index 49b6fa2..0e6dd99 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -112,6 +112,7 @@ static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
 }
 
 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
+				   __always_unused int depth,
 				   struct mm_walk *walk)
 {
 	walk->private += __mincore_unmapped_range(addr, end,
diff --git a/mm/mmap.c b/mm/mmap.c
index bc78854..6756b8b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1270,26 +1270,22 @@ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_
  */
 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
 {
-	struct anon_vma *anon_vma;
-	struct vm_area_struct *near;
+	struct anon_vma *anon_vma = NULL;
 
-	near = vma->vm_next;
-	if (!near)
-		goto try_prev;
+	/* Try next first. */
+	if (vma->vm_next) {
+		anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next);
+		if (anon_vma)
+			return anon_vma;
+	}
 
-	anon_vma = reusable_anon_vma(near, vma, near);
-	if (anon_vma)
-		return anon_vma;
-try_prev:
-	near = vma->vm_prev;
-	if (!near)
-		goto none;
+	/* Try prev next. */
+	if (vma->vm_prev)
+		anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma);
 
-	anon_vma = reusable_anon_vma(near, near, vma);
-	if (anon_vma)
-		return anon_vma;
-none:
 	/*
+	 * We might reach here with anon_vma == NULL if we can't find
+	 * any reusable anon_vma.
 	 * There's no absolute need to look only at touching neighbours:
 	 * we could search further afield for "compatible" anon_vmas.
 	 * But it would probably just be a waste of time searching,
@@ -1297,7 +1293,7 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
 	 * We're trying to allow mprotect remerging later on,
 	 * not trying to minimize memory used for anon_vmas.
 	 */
-	return NULL;
+	return anon_vma;
 }
 
 /*
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 7d70e5c..a3538cb2 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -11,7 +11,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
 
 static bool tlb_next_batch(struct mmu_gather *tlb)
 {
@@ -69,7 +69,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
 
 	VM_BUG_ON(!tlb->end);
 
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
 	VM_WARN_ON(tlb->page_size != page_size);
 #endif
 
@@ -89,58 +89,108 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
 	return false;
 }
 
-#endif /* HAVE_MMU_GATHER_NO_GATHER */
+#endif /* MMU_GATHER_NO_GATHER */
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
 
-/*
- * See the comment near struct mmu_table_batch.
- */
-
-/*
- * If we want tlb_remove_table() to imply TLB invalidates.
- */
-static inline void tlb_table_invalidate(struct mmu_gather *tlb)
+static void __tlb_remove_table_free(struct mmu_table_batch *batch)
 {
-#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
-	/*
-	 * Invalidate page-table caches used by hardware walkers. Then we still
-	 * need to RCU-sched wait while freeing the pages because software
-	 * walkers can still be in-flight.
-	 */
-	tlb_flush_mmu_tlbonly(tlb);
-#endif
+	int i;
+
+	for (i = 0; i < batch->nr; i++)
+		__tlb_remove_table(batch->tables[i]);
+
+	free_page((unsigned long)batch);
 }
 
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+
+/*
+ * Semi RCU freeing of the page directories.
+ *
+ * This is needed by some architectures to implement software pagetable walkers.
+ *
+ * gup_fast() and other software pagetable walkers do a lockless page-table
+ * walk and therefore needs some synchronization with the freeing of the page
+ * directories. The chosen means to accomplish that is by disabling IRQs over
+ * the walk.
+ *
+ * Architectures that use IPIs to flush TLBs will then automagically DTRT,
+ * since we unlink the page, flush TLBs, free the page. Since the disabling of
+ * IRQs delays the completion of the TLB flush we can never observe an already
+ * freed page.
+ *
+ * Architectures that do not have this (PPC) need to delay the freeing by some
+ * other means, this is that means.
+ *
+ * What we do is batch the freed directory pages (tables) and RCU free them.
+ * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
+ * holds off grace periods.
+ *
+ * However, in order to batch these pages we need to allocate storage, this
+ * allocation is deep inside the MM code and can thus easily fail on memory
+ * pressure. To guarantee progress we fall back to single table freeing, see
+ * the implementation of tlb_remove_table_one().
+ *
+ */
+
 static void tlb_remove_table_smp_sync(void *arg)
 {
 	/* Simply deliver the interrupt */
 }
 
-static void tlb_remove_table_one(void *table)
+static void tlb_remove_table_sync_one(void)
 {
 	/*
 	 * This isn't an RCU grace period and hence the page-tables cannot be
 	 * assumed to be actually RCU-freed.
 	 *
 	 * It is however sufficient for software page-table walkers that rely on
-	 * IRQ disabling. See the comment near struct mmu_table_batch.
+	 * IRQ disabling.
 	 */
 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
-	__tlb_remove_table(table);
 }
 
 static void tlb_remove_table_rcu(struct rcu_head *head)
 {
-	struct mmu_table_batch *batch;
-	int i;
+	__tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
+}
 
-	batch = container_of(head, struct mmu_table_batch, rcu);
+static void tlb_remove_table_free(struct mmu_table_batch *batch)
+{
+	call_rcu(&batch->rcu, tlb_remove_table_rcu);
+}
 
-	for (i = 0; i < batch->nr; i++)
-		__tlb_remove_table(batch->tables[i]);
+#else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
 
-	free_page((unsigned long)batch);
+static void tlb_remove_table_sync_one(void) { }
+
+static void tlb_remove_table_free(struct mmu_table_batch *batch)
+{
+	__tlb_remove_table_free(batch);
+}
+
+#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+
+/*
+ * If we want tlb_remove_table() to imply TLB invalidates.
+ */
+static inline void tlb_table_invalidate(struct mmu_gather *tlb)
+{
+	if (tlb_needs_table_invalidate()) {
+		/*
+		 * Invalidate page-table caches used by hardware walkers. Then
+		 * we still need to RCU-sched wait while freeing the pages
+		 * because software walkers can still be in-flight.
+		 */
+		tlb_flush_mmu_tlbonly(tlb);
+	}
+}
+
+static void tlb_remove_table_one(void *table)
+{
+	tlb_remove_table_sync_one();
+	__tlb_remove_table(table);
 }
 
 static void tlb_table_flush(struct mmu_gather *tlb)
@@ -149,7 +199,7 @@ static void tlb_table_flush(struct mmu_gather *tlb)
 
 	if (*batch) {
 		tlb_table_invalidate(tlb);
-		call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
+		tlb_remove_table_free(*batch);
 		*batch = NULL;
 	}
 }
@@ -173,14 +223,22 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 		tlb_table_flush(tlb);
 }
 
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+static inline void tlb_table_init(struct mmu_gather *tlb)
+{
+	tlb->batch = NULL;
+}
+
+#else /* !CONFIG_MMU_GATHER_TABLE_FREE */
+
+static inline void tlb_table_flush(struct mmu_gather *tlb) { }
+static inline void tlb_table_init(struct mmu_gather *tlb) { }
+
+#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
 
 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	tlb_table_flush(tlb);
-#endif
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
 	tlb_batch_pages_flush(tlb);
 #endif
 }
@@ -211,7 +269,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 	/* Is it from 0 to ~0? */
 	tlb->fullmm     = !(start | (end+1));
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
 	tlb->need_flush_all = 0;
 	tlb->local.next = NULL;
 	tlb->local.nr   = 0;
@@ -220,10 +278,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 	tlb->batch_count = 0;
 #endif
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-	tlb->batch = NULL;
-#endif
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+	tlb_table_init(tlb);
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
 	tlb->page_size = 0;
 #endif
 
@@ -271,7 +327,7 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
 
 	tlb_flush_mmu(tlb);
 
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
 	tlb_batch_list_free(tlb);
 #endif
 	dec_tlb_flush_pending(tlb->mm);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index f76ea05..ef3973a 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -29,12 +29,12 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
 #endif
 
 /*
- * The mmu notifier_mm structure is allocated and installed in
- * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
+ * The mmu_notifier_subscriptions structure is allocated and installed in
+ * mm->notifier_subscriptions inside the mm_take_all_locks() protected
  * critical section and it's released only when mm_count reaches zero
  * in mmdrop().
  */
-struct mmu_notifier_mm {
+struct mmu_notifier_subscriptions {
 	/* all mmu notifiers registered in this mm are queued in this list */
 	struct hlist_head list;
 	bool has_itree;
@@ -65,80 +65,81 @@ struct mmu_notifier_mm {
  *
  * The write side has two states, fully excluded:
  *  - mm->active_invalidate_ranges != 0
- *  - mnn->invalidate_seq & 1 == True (odd)
+ *  - subscriptions->invalidate_seq & 1 == True (odd)
  *  - some range on the mm_struct is being invalidated
  *  - the itree is not allowed to change
  *
  * And partially excluded:
  *  - mm->active_invalidate_ranges != 0
- *  - mnn->invalidate_seq & 1 == False (even)
+ *  - subscriptions->invalidate_seq & 1 == False (even)
  *  - some range on the mm_struct is being invalidated
  *  - the itree is allowed to change
  *
- * Operations on mmu_notifier_mm->invalidate_seq (under spinlock):
+ * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
  *    seq |= 1  # Begin writing
  *    seq++     # Release the writing state
  *    seq & 1   # True if a writer exists
  *
  * The later state avoids some expensive work on inv_end in the common case of
- * no mni monitoring the VA.
+ * no mmu_interval_notifier monitoring the VA.
  */
-static bool mn_itree_is_invalidating(struct mmu_notifier_mm *mmn_mm)
+static bool
+mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
 {
-	lockdep_assert_held(&mmn_mm->lock);
-	return mmn_mm->invalidate_seq & 1;
+	lockdep_assert_held(&subscriptions->lock);
+	return subscriptions->invalidate_seq & 1;
 }
 
 static struct mmu_interval_notifier *
-mn_itree_inv_start_range(struct mmu_notifier_mm *mmn_mm,
+mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
 			 const struct mmu_notifier_range *range,
 			 unsigned long *seq)
 {
 	struct interval_tree_node *node;
 	struct mmu_interval_notifier *res = NULL;
 
-	spin_lock(&mmn_mm->lock);
-	mmn_mm->active_invalidate_ranges++;
-	node = interval_tree_iter_first(&mmn_mm->itree, range->start,
+	spin_lock(&subscriptions->lock);
+	subscriptions->active_invalidate_ranges++;
+	node = interval_tree_iter_first(&subscriptions->itree, range->start,
 					range->end - 1);
 	if (node) {
-		mmn_mm->invalidate_seq |= 1;
+		subscriptions->invalidate_seq |= 1;
 		res = container_of(node, struct mmu_interval_notifier,
 				   interval_tree);
 	}
 
-	*seq = mmn_mm->invalidate_seq;
-	spin_unlock(&mmn_mm->lock);
+	*seq = subscriptions->invalidate_seq;
+	spin_unlock(&subscriptions->lock);
 	return res;
 }
 
 static struct mmu_interval_notifier *
-mn_itree_inv_next(struct mmu_interval_notifier *mni,
+mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
 		  const struct mmu_notifier_range *range)
 {
 	struct interval_tree_node *node;
 
-	node = interval_tree_iter_next(&mni->interval_tree, range->start,
-				       range->end - 1);
+	node = interval_tree_iter_next(&interval_sub->interval_tree,
+				       range->start, range->end - 1);
 	if (!node)
 		return NULL;
 	return container_of(node, struct mmu_interval_notifier, interval_tree);
 }
 
-static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm)
+static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
 {
-	struct mmu_interval_notifier *mni;
+	struct mmu_interval_notifier *interval_sub;
 	struct hlist_node *next;
 
-	spin_lock(&mmn_mm->lock);
-	if (--mmn_mm->active_invalidate_ranges ||
-	    !mn_itree_is_invalidating(mmn_mm)) {
-		spin_unlock(&mmn_mm->lock);
+	spin_lock(&subscriptions->lock);
+	if (--subscriptions->active_invalidate_ranges ||
+	    !mn_itree_is_invalidating(subscriptions)) {
+		spin_unlock(&subscriptions->lock);
 		return;
 	}
 
 	/* Make invalidate_seq even */
-	mmn_mm->invalidate_seq++;
+	subscriptions->invalidate_seq++;
 
 	/*
 	 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
@@ -146,30 +147,31 @@ static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm)
 	 * they are progressed. This arrangement for tree updates is used to
 	 * avoid using a blocking lock during invalidate_range_start.
 	 */
-	hlist_for_each_entry_safe(mni, next, &mmn_mm->deferred_list,
+	hlist_for_each_entry_safe(interval_sub, next,
+				  &subscriptions->deferred_list,
 				  deferred_item) {
-		if (RB_EMPTY_NODE(&mni->interval_tree.rb))
-			interval_tree_insert(&mni->interval_tree,
-					     &mmn_mm->itree);
+		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
+			interval_tree_insert(&interval_sub->interval_tree,
+					     &subscriptions->itree);
 		else
-			interval_tree_remove(&mni->interval_tree,
-					     &mmn_mm->itree);
-		hlist_del(&mni->deferred_item);
+			interval_tree_remove(&interval_sub->interval_tree,
+					     &subscriptions->itree);
+		hlist_del(&interval_sub->deferred_item);
 	}
-	spin_unlock(&mmn_mm->lock);
+	spin_unlock(&subscriptions->lock);
 
-	wake_up_all(&mmn_mm->wq);
+	wake_up_all(&subscriptions->wq);
 }
 
 /**
  * mmu_interval_read_begin - Begin a read side critical section against a VA
  *                           range
- * mni: The range to use
+ * interval_sub: The interval subscription
  *
  * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
- * collision-retry scheme similar to seqcount for the VA range under mni. If
- * the mm invokes invalidation during the critical section then
- * mmu_interval_read_retry() will return true.
+ * collision-retry scheme similar to seqcount for the VA range under
+ * subscription. If the mm invokes invalidation during the critical section
+ * then mmu_interval_read_retry() will return true.
  *
  * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
  * require a blocking context.  The critical region formed by this can sleep,
@@ -180,68 +182,71 @@ static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm)
  *
  * The return value should be passed to mmu_interval_read_retry().
  */
-unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
+unsigned long
+mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
 {
-	struct mmu_notifier_mm *mmn_mm = mni->mm->mmu_notifier_mm;
+	struct mmu_notifier_subscriptions *subscriptions =
+		interval_sub->mm->notifier_subscriptions;
 	unsigned long seq;
 	bool is_invalidating;
 
 	/*
-	 * If the mni has a different seq value under the user_lock than we
-	 * started with then it has collided.
+	 * If the subscription has a different seq value under the user_lock
+	 * than we started with then it has collided.
 	 *
-	 * If the mni currently has the same seq value as the mmn_mm seq, then
-	 * it is currently between invalidate_start/end and is colliding.
+	 * If the subscription currently has the same seq value as the
+	 * subscriptions seq, then it is currently between
+	 * invalidate_start/end and is colliding.
 	 *
 	 * The locking looks broadly like this:
 	 *   mn_tree_invalidate_start():          mmu_interval_read_begin():
 	 *                                         spin_lock
-	 *                                          seq = READ_ONCE(mni->invalidate_seq);
-	 *                                          seq == mmn_mm->invalidate_seq
+	 *                                          seq = READ_ONCE(interval_sub->invalidate_seq);
+	 *                                          seq == subs->invalidate_seq
 	 *                                         spin_unlock
 	 *    spin_lock
-	 *     seq = ++mmn_mm->invalidate_seq
+	 *     seq = ++subscriptions->invalidate_seq
 	 *    spin_unlock
 	 *     op->invalidate_range():
 	 *       user_lock
 	 *        mmu_interval_set_seq()
-	 *         mni->invalidate_seq = seq
+	 *         interval_sub->invalidate_seq = seq
 	 *       user_unlock
 	 *
 	 *                          [Required: mmu_interval_read_retry() == true]
 	 *
 	 *   mn_itree_inv_end():
 	 *    spin_lock
-	 *     seq = ++mmn_mm->invalidate_seq
+	 *     seq = ++subscriptions->invalidate_seq
 	 *    spin_unlock
 	 *
 	 *                                        user_lock
 	 *                                         mmu_interval_read_retry():
-	 *                                          mni->invalidate_seq != seq
+	 *                                          interval_sub->invalidate_seq != seq
 	 *                                        user_unlock
 	 *
 	 * Barriers are not needed here as any races here are closed by an
 	 * eventual mmu_interval_read_retry(), which provides a barrier via the
 	 * user_lock.
 	 */
-	spin_lock(&mmn_mm->lock);
+	spin_lock(&subscriptions->lock);
 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
-	seq = READ_ONCE(mni->invalidate_seq);
-	is_invalidating = seq == mmn_mm->invalidate_seq;
-	spin_unlock(&mmn_mm->lock);
+	seq = READ_ONCE(interval_sub->invalidate_seq);
+	is_invalidating = seq == subscriptions->invalidate_seq;
+	spin_unlock(&subscriptions->lock);
 
 	/*
-	 * mni->invalidate_seq must always be set to an odd value via
+	 * interval_sub->invalidate_seq must always be set to an odd value via
 	 * mmu_interval_set_seq() using the provided cur_seq from
 	 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
 	 * will always clear the below sleep in some reasonable time as
-	 * mmn_mm->invalidate_seq is even in the idle state.
+	 * subscriptions->invalidate_seq is even in the idle state.
 	 */
 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 	if (is_invalidating)
-		wait_event(mmn_mm->wq,
-			   READ_ONCE(mmn_mm->invalidate_seq) != seq);
+		wait_event(subscriptions->wq,
+			   READ_ONCE(subscriptions->invalidate_seq) != seq);
 
 	/*
 	 * Notice that mmu_interval_read_retry() can already be true at this
@@ -253,7 +258,7 @@ unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
 }
 EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
 
-static void mn_itree_release(struct mmu_notifier_mm *mmn_mm,
+static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
 			     struct mm_struct *mm)
 {
 	struct mmu_notifier_range range = {
@@ -263,17 +268,20 @@ static void mn_itree_release(struct mmu_notifier_mm *mmn_mm,
 		.start = 0,
 		.end = ULONG_MAX,
 	};
-	struct mmu_interval_notifier *mni;
+	struct mmu_interval_notifier *interval_sub;
 	unsigned long cur_seq;
 	bool ret;
 
-	for (mni = mn_itree_inv_start_range(mmn_mm, &range, &cur_seq); mni;
-	     mni = mn_itree_inv_next(mni, &range)) {
-		ret = mni->ops->invalidate(mni, &range, cur_seq);
+	for (interval_sub =
+		     mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
+	     interval_sub;
+	     interval_sub = mn_itree_inv_next(interval_sub, &range)) {
+		ret = interval_sub->ops->invalidate(interval_sub, &range,
+						    cur_seq);
 		WARN_ON(!ret);
 	}
 
-	mn_itree_inv_end(mmn_mm);
+	mn_itree_inv_end(subscriptions);
 }
 
 /*
@@ -283,15 +291,15 @@ static void mn_itree_release(struct mmu_notifier_mm *mmn_mm,
  * in parallel despite there being no task using this mm any more,
  * through the vmas outside of the exit_mmap context, such as with
  * vmtruncate. This serializes against mmu_notifier_unregister with
- * the mmu_notifier_mm->lock in addition to SRCU and it serializes
- * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
+ * the notifier_subscriptions->lock in addition to SRCU and it serializes
+ * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
  * can't go away from under us as exit_mmap holds an mm_count pin
  * itself.
  */
-static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm,
+static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
 			     struct mm_struct *mm)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int id;
 
 	/*
@@ -299,29 +307,29 @@ static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm,
 	 * ->release returns.
 	 */
 	id = srcu_read_lock(&srcu);
-	hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist)
+	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist)
 		/*
 		 * If ->release runs before mmu_notifier_unregister it must be
 		 * handled, as it's the only way for the driver to flush all
 		 * existing sptes and stop the driver from establishing any more
 		 * sptes before all the pages in the mm are freed.
 		 */
-		if (mn->ops->release)
-			mn->ops->release(mn, mm);
+		if (subscription->ops->release)
+			subscription->ops->release(subscription, mm);
 
-	spin_lock(&mmn_mm->lock);
-	while (unlikely(!hlist_empty(&mmn_mm->list))) {
-		mn = hlist_entry(mmn_mm->list.first, struct mmu_notifier,
-				 hlist);
+	spin_lock(&subscriptions->lock);
+	while (unlikely(!hlist_empty(&subscriptions->list))) {
+		subscription = hlist_entry(subscriptions->list.first,
+					   struct mmu_notifier, hlist);
 		/*
 		 * We arrived before mmu_notifier_unregister so
 		 * mmu_notifier_unregister will do nothing other than to wait
 		 * for ->release to finish and for mmu_notifier_unregister to
 		 * return.
 		 */
-		hlist_del_init_rcu(&mn->hlist);
+		hlist_del_init_rcu(&subscription->hlist);
 	}
-	spin_unlock(&mmn_mm->lock);
+	spin_unlock(&subscriptions->lock);
 	srcu_read_unlock(&srcu, id);
 
 	/*
@@ -330,21 +338,22 @@ static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm,
 	 * until the ->release method returns, if it was invoked by
 	 * mmu_notifier_unregister.
 	 *
-	 * The mmu_notifier_mm can't go away from under us because one mm_count
-	 * is held by exit_mmap.
+	 * The notifier_subscriptions can't go away from under us because
+	 * one mm_count is held by exit_mmap.
 	 */
 	synchronize_srcu(&srcu);
 }
 
 void __mmu_notifier_release(struct mm_struct *mm)
 {
-	struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm;
+	struct mmu_notifier_subscriptions *subscriptions =
+		mm->notifier_subscriptions;
 
-	if (mmn_mm->has_itree)
-		mn_itree_release(mmn_mm, mm);
+	if (subscriptions->has_itree)
+		mn_itree_release(subscriptions, mm);
 
-	if (!hlist_empty(&mmn_mm->list))
-		mn_hlist_release(mmn_mm, mm);
+	if (!hlist_empty(&subscriptions->list))
+		mn_hlist_release(subscriptions, mm);
 }
 
 /*
@@ -356,13 +365,15 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 					unsigned long start,
 					unsigned long end)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int young = 0, id;
 
 	id = srcu_read_lock(&srcu);
-	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
-		if (mn->ops->clear_flush_young)
-			young |= mn->ops->clear_flush_young(mn, mm, start, end);
+	hlist_for_each_entry_rcu(subscription,
+				 &mm->notifier_subscriptions->list, hlist) {
+		if (subscription->ops->clear_flush_young)
+			young |= subscription->ops->clear_flush_young(
+				subscription, mm, start, end);
 	}
 	srcu_read_unlock(&srcu, id);
 
@@ -373,13 +384,15 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
 			       unsigned long start,
 			       unsigned long end)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int young = 0, id;
 
 	id = srcu_read_lock(&srcu);
-	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
-		if (mn->ops->clear_young)
-			young |= mn->ops->clear_young(mn, mm, start, end);
+	hlist_for_each_entry_rcu(subscription,
+				 &mm->notifier_subscriptions->list, hlist) {
+		if (subscription->ops->clear_young)
+			young |= subscription->ops->clear_young(subscription,
+								mm, start, end);
 	}
 	srcu_read_unlock(&srcu, id);
 
@@ -389,13 +402,15 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
 int __mmu_notifier_test_young(struct mm_struct *mm,
 			      unsigned long address)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int young = 0, id;
 
 	id = srcu_read_lock(&srcu);
-	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
-		if (mn->ops->test_young) {
-			young = mn->ops->test_young(mn, mm, address);
+	hlist_for_each_entry_rcu(subscription,
+				 &mm->notifier_subscriptions->list, hlist) {
+		if (subscription->ops->test_young) {
+			young = subscription->ops->test_young(subscription, mm,
+							      address);
 			if (young)
 				break;
 		}
@@ -408,28 +423,33 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
 			       pte_t pte)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int id;
 
 	id = srcu_read_lock(&srcu);
-	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
-		if (mn->ops->change_pte)
-			mn->ops->change_pte(mn, mm, address, pte);
+	hlist_for_each_entry_rcu(subscription,
+				 &mm->notifier_subscriptions->list, hlist) {
+		if (subscription->ops->change_pte)
+			subscription->ops->change_pte(subscription, mm, address,
+						      pte);
 	}
 	srcu_read_unlock(&srcu, id);
 }
 
-static int mn_itree_invalidate(struct mmu_notifier_mm *mmn_mm,
+static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
 			       const struct mmu_notifier_range *range)
 {
-	struct mmu_interval_notifier *mni;
+	struct mmu_interval_notifier *interval_sub;
 	unsigned long cur_seq;
 
-	for (mni = mn_itree_inv_start_range(mmn_mm, range, &cur_seq); mni;
-	     mni = mn_itree_inv_next(mni, range)) {
+	for (interval_sub =
+		     mn_itree_inv_start_range(subscriptions, range, &cur_seq);
+	     interval_sub;
+	     interval_sub = mn_itree_inv_next(interval_sub, range)) {
 		bool ret;
 
-		ret = mni->ops->invalidate(mni, range, cur_seq);
+		ret = interval_sub->ops->invalidate(interval_sub, range,
+						    cur_seq);
 		if (!ret) {
 			if (WARN_ON(mmu_notifier_range_blockable(range)))
 				continue;
@@ -443,31 +463,36 @@ static int mn_itree_invalidate(struct mmu_notifier_mm *mmn_mm,
 	 * On -EAGAIN the non-blocking caller is not allowed to call
 	 * invalidate_range_end()
 	 */
-	mn_itree_inv_end(mmn_mm);
+	mn_itree_inv_end(subscriptions);
 	return -EAGAIN;
 }
 
-static int mn_hlist_invalidate_range_start(struct mmu_notifier_mm *mmn_mm,
-					   struct mmu_notifier_range *range)
+static int mn_hlist_invalidate_range_start(
+	struct mmu_notifier_subscriptions *subscriptions,
+	struct mmu_notifier_range *range)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int ret = 0;
 	int id;
 
 	id = srcu_read_lock(&srcu);
-	hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) {
-		if (mn->ops->invalidate_range_start) {
+	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
+		const struct mmu_notifier_ops *ops = subscription->ops;
+
+		if (ops->invalidate_range_start) {
 			int _ret;
 
 			if (!mmu_notifier_range_blockable(range))
 				non_block_start();
-			_ret = mn->ops->invalidate_range_start(mn, range);
+			_ret = ops->invalidate_range_start(subscription, range);
 			if (!mmu_notifier_range_blockable(range))
 				non_block_end();
 			if (_ret) {
 				pr_info("%pS callback failed with %d in %sblockable context.\n",
-					mn->ops->invalidate_range_start, _ret,
-					!mmu_notifier_range_blockable(range) ? "non-" : "");
+					ops->invalidate_range_start, _ret,
+					!mmu_notifier_range_blockable(range) ?
+						"non-" :
+						"");
 				WARN_ON(mmu_notifier_range_blockable(range) ||
 					_ret != -EAGAIN);
 				ret = _ret;
@@ -481,28 +506,29 @@ static int mn_hlist_invalidate_range_start(struct mmu_notifier_mm *mmn_mm,
 
 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
 {
-	struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm;
+	struct mmu_notifier_subscriptions *subscriptions =
+		range->mm->notifier_subscriptions;
 	int ret;
 
-	if (mmn_mm->has_itree) {
-		ret = mn_itree_invalidate(mmn_mm, range);
+	if (subscriptions->has_itree) {
+		ret = mn_itree_invalidate(subscriptions, range);
 		if (ret)
 			return ret;
 	}
-	if (!hlist_empty(&mmn_mm->list))
-		return mn_hlist_invalidate_range_start(mmn_mm, range);
+	if (!hlist_empty(&subscriptions->list))
+		return mn_hlist_invalidate_range_start(subscriptions, range);
 	return 0;
 }
 
-static void mn_hlist_invalidate_end(struct mmu_notifier_mm *mmn_mm,
-				    struct mmu_notifier_range *range,
-				    bool only_end)
+static void
+mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
+			struct mmu_notifier_range *range, bool only_end)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int id;
 
 	id = srcu_read_lock(&srcu);
-	hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) {
+	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
 		/*
 		 * Call invalidate_range here too to avoid the need for the
 		 * subsystem of having to register an invalidate_range_end
@@ -516,14 +542,16 @@ static void mn_hlist_invalidate_end(struct mmu_notifier_mm *mmn_mm,
 		 * is safe to do when we know that a call to invalidate_range()
 		 * already happen under page table lock.
 		 */
-		if (!only_end && mn->ops->invalidate_range)
-			mn->ops->invalidate_range(mn, range->mm,
-						  range->start,
-						  range->end);
-		if (mn->ops->invalidate_range_end) {
+		if (!only_end && subscription->ops->invalidate_range)
+			subscription->ops->invalidate_range(subscription,
+							    range->mm,
+							    range->start,
+							    range->end);
+		if (subscription->ops->invalidate_range_end) {
 			if (!mmu_notifier_range_blockable(range))
 				non_block_start();
-			mn->ops->invalidate_range_end(mn, range);
+			subscription->ops->invalidate_range_end(subscription,
+								range);
 			if (!mmu_notifier_range_blockable(range))
 				non_block_end();
 		}
@@ -534,27 +562,30 @@ static void mn_hlist_invalidate_end(struct mmu_notifier_mm *mmn_mm,
 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
 					 bool only_end)
 {
-	struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm;
+	struct mmu_notifier_subscriptions *subscriptions =
+		range->mm->notifier_subscriptions;
 
 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
-	if (mmn_mm->has_itree)
-		mn_itree_inv_end(mmn_mm);
+	if (subscriptions->has_itree)
+		mn_itree_inv_end(subscriptions);
 
-	if (!hlist_empty(&mmn_mm->list))
-		mn_hlist_invalidate_end(mmn_mm, range, only_end);
+	if (!hlist_empty(&subscriptions->list))
+		mn_hlist_invalidate_end(subscriptions, range, only_end);
 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 }
 
 void __mmu_notifier_invalidate_range(struct mm_struct *mm,
 				  unsigned long start, unsigned long end)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int id;
 
 	id = srcu_read_lock(&srcu);
-	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
-		if (mn->ops->invalidate_range)
-			mn->ops->invalidate_range(mn, mm, start, end);
+	hlist_for_each_entry_rcu(subscription,
+				 &mm->notifier_subscriptions->list, hlist) {
+		if (subscription->ops->invalidate_range)
+			subscription->ops->invalidate_range(subscription, mm,
+							    start, end);
 	}
 	srcu_read_unlock(&srcu, id);
 }
@@ -564,9 +595,10 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
  * write mode. A NULL mn signals the notifier is being registered for itree
  * mode.
  */
-int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
+int __mmu_notifier_register(struct mmu_notifier *subscription,
+			    struct mm_struct *mm)
 {
-	struct mmu_notifier_mm *mmu_notifier_mm = NULL;
+	struct mmu_notifier_subscriptions *subscriptions = NULL;
 	int ret;
 
 	lockdep_assert_held_write(&mm->mmap_sem);
@@ -579,23 +611,23 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
 		fs_reclaim_release(GFP_KERNEL);
 	}
 
-	if (!mm->mmu_notifier_mm) {
+	if (!mm->notifier_subscriptions) {
 		/*
 		 * kmalloc cannot be called under mm_take_all_locks(), but we
-		 * know that mm->mmu_notifier_mm can't change while we hold
-		 * the write side of the mmap_sem.
+		 * know that mm->notifier_subscriptions can't change while we
+		 * hold the write side of the mmap_sem.
 		 */
-		mmu_notifier_mm =
-			kzalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
-		if (!mmu_notifier_mm)
+		subscriptions = kzalloc(
+			sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
+		if (!subscriptions)
 			return -ENOMEM;
 
-		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
-		spin_lock_init(&mmu_notifier_mm->lock);
-		mmu_notifier_mm->invalidate_seq = 2;
-		mmu_notifier_mm->itree = RB_ROOT_CACHED;
-		init_waitqueue_head(&mmu_notifier_mm->wq);
-		INIT_HLIST_HEAD(&mmu_notifier_mm->deferred_list);
+		INIT_HLIST_HEAD(&subscriptions->list);
+		spin_lock_init(&subscriptions->lock);
+		subscriptions->invalidate_seq = 2;
+		subscriptions->itree = RB_ROOT_CACHED;
+		init_waitqueue_head(&subscriptions->wq);
+		INIT_HLIST_HEAD(&subscriptions->deferred_list);
 	}
 
 	ret = mm_take_all_locks(mm);
@@ -610,34 +642,36 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
 	 * We can't race against any other mmu notifier method either
 	 * thanks to mm_take_all_locks().
 	 *
-	 * release semantics on the initialization of the mmu_notifier_mm's
-	 * contents are provided for unlocked readers.  acquire can only be
-	 * used while holding the mmgrab or mmget, and is safe because once
-	 * created the mmu_notififer_mm is not freed until the mm is
-	 * destroyed.  As above, users holding the mmap_sem or one of the
+	 * release semantics on the initialization of the
+	 * mmu_notifier_subscriptions's contents are provided for unlocked
+	 * readers.  acquire can only be used while holding the mmgrab or
+	 * mmget, and is safe because once created the
+	 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
+	 * As above, users holding the mmap_sem or one of the
 	 * mm_take_all_locks() do not need to use acquire semantics.
 	 */
-	if (mmu_notifier_mm)
-		smp_store_release(&mm->mmu_notifier_mm, mmu_notifier_mm);
+	if (subscriptions)
+		smp_store_release(&mm->notifier_subscriptions, subscriptions);
 
-	if (mn) {
+	if (subscription) {
 		/* Pairs with the mmdrop in mmu_notifier_unregister_* */
 		mmgrab(mm);
-		mn->mm = mm;
-		mn->users = 1;
+		subscription->mm = mm;
+		subscription->users = 1;
 
-		spin_lock(&mm->mmu_notifier_mm->lock);
-		hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
-		spin_unlock(&mm->mmu_notifier_mm->lock);
+		spin_lock(&mm->notifier_subscriptions->lock);
+		hlist_add_head_rcu(&subscription->hlist,
+				   &mm->notifier_subscriptions->list);
+		spin_unlock(&mm->notifier_subscriptions->lock);
 	} else
-		mm->mmu_notifier_mm->has_itree = true;
+		mm->notifier_subscriptions->has_itree = true;
 
 	mm_drop_all_locks(mm);
 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
 	return 0;
 
 out_clean:
-	kfree(mmu_notifier_mm);
+	kfree(subscriptions);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
@@ -658,15 +692,16 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_register);
  * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
  * unregister the notifier.
  *
- * While the caller has a mmu_notifier get the mn->mm pointer will remain
+ * While the caller has a mmu_notifier get the subscription->mm pointer will remain
  * valid, and can be converted to an active mm pointer via mmget_not_zero().
  */
-int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
+int mmu_notifier_register(struct mmu_notifier *subscription,
+			  struct mm_struct *mm)
 {
 	int ret;
 
 	down_write(&mm->mmap_sem);
-	ret = __mmu_notifier_register(mn, mm);
+	ret = __mmu_notifier_register(subscription, mm);
 	up_write(&mm->mmap_sem);
 	return ret;
 }
@@ -675,21 +710,22 @@ EXPORT_SYMBOL_GPL(mmu_notifier_register);
 static struct mmu_notifier *
 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 
-	spin_lock(&mm->mmu_notifier_mm->lock);
-	hlist_for_each_entry_rcu (mn, &mm->mmu_notifier_mm->list, hlist) {
-		if (mn->ops != ops)
+	spin_lock(&mm->notifier_subscriptions->lock);
+	hlist_for_each_entry_rcu(subscription,
+				 &mm->notifier_subscriptions->list, hlist) {
+		if (subscription->ops != ops)
 			continue;
 
-		if (likely(mn->users != UINT_MAX))
-			mn->users++;
+		if (likely(subscription->users != UINT_MAX))
+			subscription->users++;
 		else
-			mn = ERR_PTR(-EOVERFLOW);
-		spin_unlock(&mm->mmu_notifier_mm->lock);
-		return mn;
+			subscription = ERR_PTR(-EOVERFLOW);
+		spin_unlock(&mm->notifier_subscriptions->lock);
+		return subscription;
 	}
-	spin_unlock(&mm->mmu_notifier_mm->lock);
+	spin_unlock(&mm->notifier_subscriptions->lock);
 	return NULL;
 }
 
@@ -713,37 +749,37 @@ find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
 					     struct mm_struct *mm)
 {
-	struct mmu_notifier *mn;
+	struct mmu_notifier *subscription;
 	int ret;
 
 	lockdep_assert_held_write(&mm->mmap_sem);
 
-	if (mm->mmu_notifier_mm) {
-		mn = find_get_mmu_notifier(mm, ops);
-		if (mn)
-			return mn;
+	if (mm->notifier_subscriptions) {
+		subscription = find_get_mmu_notifier(mm, ops);
+		if (subscription)
+			return subscription;
 	}
 
-	mn = ops->alloc_notifier(mm);
-	if (IS_ERR(mn))
-		return mn;
-	mn->ops = ops;
-	ret = __mmu_notifier_register(mn, mm);
+	subscription = ops->alloc_notifier(mm);
+	if (IS_ERR(subscription))
+		return subscription;
+	subscription->ops = ops;
+	ret = __mmu_notifier_register(subscription, mm);
 	if (ret)
 		goto out_free;
-	return mn;
+	return subscription;
 out_free:
-	mn->ops->free_notifier(mn);
+	subscription->ops->free_notifier(subscription);
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
 
 /* this is called after the last mmu_notifier_unregister() returned */
-void __mmu_notifier_mm_destroy(struct mm_struct *mm)
+void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
 {
-	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
-	kfree(mm->mmu_notifier_mm);
-	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
+	BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
+	kfree(mm->notifier_subscriptions);
+	mm->notifier_subscriptions = LIST_POISON1; /* debug */
 }
 
 /*
@@ -756,11 +792,12 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm)
  * and only after mmu_notifier_unregister returned we're guaranteed
  * that ->release or any other method can't run anymore.
  */
-void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+void mmu_notifier_unregister(struct mmu_notifier *subscription,
+			     struct mm_struct *mm)
 {
 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
 
-	if (!hlist_unhashed(&mn->hlist)) {
+	if (!hlist_unhashed(&subscription->hlist)) {
 		/*
 		 * SRCU here will force exit_mmap to wait for ->release to
 		 * finish before freeing the pages.
@@ -772,17 +809,17 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
 		 * exit_mmap will block in mmu_notifier_release to guarantee
 		 * that ->release is called before freeing the pages.
 		 */
-		if (mn->ops->release)
-			mn->ops->release(mn, mm);
+		if (subscription->ops->release)
+			subscription->ops->release(subscription, mm);
 		srcu_read_unlock(&srcu, id);
 
-		spin_lock(&mm->mmu_notifier_mm->lock);
+		spin_lock(&mm->notifier_subscriptions->lock);
 		/*
 		 * Can not use list_del_rcu() since __mmu_notifier_release
 		 * can delete it before we hold the lock.
 		 */
-		hlist_del_init_rcu(&mn->hlist);
-		spin_unlock(&mm->mmu_notifier_mm->lock);
+		hlist_del_init_rcu(&subscription->hlist);
+		spin_unlock(&mm->notifier_subscriptions->lock);
 	}
 
 	/*
@@ -799,10 +836,11 @@ EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
 
 static void mmu_notifier_free_rcu(struct rcu_head *rcu)
 {
-	struct mmu_notifier *mn = container_of(rcu, struct mmu_notifier, rcu);
-	struct mm_struct *mm = mn->mm;
+	struct mmu_notifier *subscription =
+		container_of(rcu, struct mmu_notifier, rcu);
+	struct mm_struct *mm = subscription->mm;
 
-	mn->ops->free_notifier(mn);
+	subscription->ops->free_notifier(subscription);
 	/* Pairs with the get in __mmu_notifier_register() */
 	mmdrop(mm);
 }
@@ -829,39 +867,40 @@ static void mmu_notifier_free_rcu(struct rcu_head *rcu)
  * Modules calling this function must call mmu_notifier_synchronize() in
  * their __exit functions to ensure the async work is completed.
  */
-void mmu_notifier_put(struct mmu_notifier *mn)
+void mmu_notifier_put(struct mmu_notifier *subscription)
 {
-	struct mm_struct *mm = mn->mm;
+	struct mm_struct *mm = subscription->mm;
 
-	spin_lock(&mm->mmu_notifier_mm->lock);
-	if (WARN_ON(!mn->users) || --mn->users)
+	spin_lock(&mm->notifier_subscriptions->lock);
+	if (WARN_ON(!subscription->users) || --subscription->users)
 		goto out_unlock;
-	hlist_del_init_rcu(&mn->hlist);
-	spin_unlock(&mm->mmu_notifier_mm->lock);
+	hlist_del_init_rcu(&subscription->hlist);
+	spin_unlock(&mm->notifier_subscriptions->lock);
 
-	call_srcu(&srcu, &mn->rcu, mmu_notifier_free_rcu);
+	call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
 	return;
 
 out_unlock:
-	spin_unlock(&mm->mmu_notifier_mm->lock);
+	spin_unlock(&mm->notifier_subscriptions->lock);
 }
 EXPORT_SYMBOL_GPL(mmu_notifier_put);
 
 static int __mmu_interval_notifier_insert(
-	struct mmu_interval_notifier *mni, struct mm_struct *mm,
-	struct mmu_notifier_mm *mmn_mm, unsigned long start,
+	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
+	struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
 	unsigned long length, const struct mmu_interval_notifier_ops *ops)
 {
-	mni->mm = mm;
-	mni->ops = ops;
-	RB_CLEAR_NODE(&mni->interval_tree.rb);
-	mni->interval_tree.start = start;
+	interval_sub->mm = mm;
+	interval_sub->ops = ops;
+	RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
+	interval_sub->interval_tree.start = start;
 	/*
 	 * Note that the representation of the intervals in the interval tree
 	 * considers the ending point as contained in the interval.
 	 */
 	if (length == 0 ||
-	    check_add_overflow(start, length - 1, &mni->interval_tree.last))
+	    check_add_overflow(start, length - 1,
+			       &interval_sub->interval_tree.last))
 		return -EOVERFLOW;
 
 	/* Must call with a mmget() held */
@@ -881,38 +920,40 @@ static int __mmu_interval_notifier_insert(
 	 * possibility for live lock, instead defer the add to
 	 * mn_itree_inv_end() so this algorithm is deterministic.
 	 *
-	 * In all cases the value for the mni->invalidate_seq should be
+	 * In all cases the value for the interval_sub->invalidate_seq should be
 	 * odd, see mmu_interval_read_begin()
 	 */
-	spin_lock(&mmn_mm->lock);
-	if (mmn_mm->active_invalidate_ranges) {
-		if (mn_itree_is_invalidating(mmn_mm))
-			hlist_add_head(&mni->deferred_item,
-				       &mmn_mm->deferred_list);
+	spin_lock(&subscriptions->lock);
+	if (subscriptions->active_invalidate_ranges) {
+		if (mn_itree_is_invalidating(subscriptions))
+			hlist_add_head(&interval_sub->deferred_item,
+				       &subscriptions->deferred_list);
 		else {
-			mmn_mm->invalidate_seq |= 1;
-			interval_tree_insert(&mni->interval_tree,
-					     &mmn_mm->itree);
+			subscriptions->invalidate_seq |= 1;
+			interval_tree_insert(&interval_sub->interval_tree,
+					     &subscriptions->itree);
 		}
-		mni->invalidate_seq = mmn_mm->invalidate_seq;
+		interval_sub->invalidate_seq = subscriptions->invalidate_seq;
 	} else {
-		WARN_ON(mn_itree_is_invalidating(mmn_mm));
+		WARN_ON(mn_itree_is_invalidating(subscriptions));
 		/*
-		 * The starting seq for a mni not under invalidation should be
-		 * odd, not equal to the current invalidate_seq and
+		 * The starting seq for a subscription not under invalidation
+		 * should be odd, not equal to the current invalidate_seq and
 		 * invalidate_seq should not 'wrap' to the new seq any time
 		 * soon.
 		 */
-		mni->invalidate_seq = mmn_mm->invalidate_seq - 1;
-		interval_tree_insert(&mni->interval_tree, &mmn_mm->itree);
+		interval_sub->invalidate_seq =
+			subscriptions->invalidate_seq - 1;
+		interval_tree_insert(&interval_sub->interval_tree,
+				     &subscriptions->itree);
 	}
-	spin_unlock(&mmn_mm->lock);
+	spin_unlock(&subscriptions->lock);
 	return 0;
 }
 
 /**
  * mmu_interval_notifier_insert - Insert an interval notifier
- * @mni: Interval notifier to register
+ * @interval_sub: Interval subscription to register
  * @start: Starting virtual address to monitor
  * @length: Length of the range to monitor
  * @mm : mm_struct to attach to
@@ -925,53 +966,53 @@ static int __mmu_interval_notifier_insert(
  * The caller must use the normal interval notifier read flow via
  * mmu_interval_read_begin() to establish SPTEs for this range.
  */
-int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
+int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
 				 struct mm_struct *mm, unsigned long start,
 				 unsigned long length,
 				 const struct mmu_interval_notifier_ops *ops)
 {
-	struct mmu_notifier_mm *mmn_mm;
+	struct mmu_notifier_subscriptions *subscriptions;
 	int ret;
 
 	might_lock(&mm->mmap_sem);
 
-	mmn_mm = smp_load_acquire(&mm->mmu_notifier_mm);
-	if (!mmn_mm || !mmn_mm->has_itree) {
+	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
+	if (!subscriptions || !subscriptions->has_itree) {
 		ret = mmu_notifier_register(NULL, mm);
 		if (ret)
 			return ret;
-		mmn_mm = mm->mmu_notifier_mm;
+		subscriptions = mm->notifier_subscriptions;
 	}
-	return __mmu_interval_notifier_insert(mni, mm, mmn_mm, start, length,
-					      ops);
+	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
+					      start, length, ops);
 }
 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
 
 int mmu_interval_notifier_insert_locked(
-	struct mmu_interval_notifier *mni, struct mm_struct *mm,
+	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
 	unsigned long start, unsigned long length,
 	const struct mmu_interval_notifier_ops *ops)
 {
-	struct mmu_notifier_mm *mmn_mm;
+	struct mmu_notifier_subscriptions *subscriptions =
+		mm->notifier_subscriptions;
 	int ret;
 
 	lockdep_assert_held_write(&mm->mmap_sem);
 
-	mmn_mm = mm->mmu_notifier_mm;
-	if (!mmn_mm || !mmn_mm->has_itree) {
+	if (!subscriptions || !subscriptions->has_itree) {
 		ret = __mmu_notifier_register(NULL, mm);
 		if (ret)
 			return ret;
-		mmn_mm = mm->mmu_notifier_mm;
+		subscriptions = mm->notifier_subscriptions;
 	}
-	return __mmu_interval_notifier_insert(mni, mm, mmn_mm, start, length,
-					      ops);
+	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
+					      start, length, ops);
 }
 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
 
 /**
  * mmu_interval_notifier_remove - Remove a interval notifier
- * @mni: Interval notifier to unregister
+ * @interval_sub: Interval subscription to unregister
  *
  * This function must be paired with mmu_interval_notifier_insert(). It cannot
  * be called from any ops callback.
@@ -979,32 +1020,34 @@ EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
  * Once this returns ops callbacks are no longer running on other CPUs and
  * will not be called in future.
  */
-void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni)
+void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
 {
-	struct mm_struct *mm = mni->mm;
-	struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm;
+	struct mm_struct *mm = interval_sub->mm;
+	struct mmu_notifier_subscriptions *subscriptions =
+		mm->notifier_subscriptions;
 	unsigned long seq = 0;
 
 	might_sleep();
 
-	spin_lock(&mmn_mm->lock);
-	if (mn_itree_is_invalidating(mmn_mm)) {
+	spin_lock(&subscriptions->lock);
+	if (mn_itree_is_invalidating(subscriptions)) {
 		/*
 		 * remove is being called after insert put this on the
 		 * deferred list, but before the deferred list was processed.
 		 */
-		if (RB_EMPTY_NODE(&mni->interval_tree.rb)) {
-			hlist_del(&mni->deferred_item);
+		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
+			hlist_del(&interval_sub->deferred_item);
 		} else {
-			hlist_add_head(&mni->deferred_item,
-				       &mmn_mm->deferred_list);
-			seq = mmn_mm->invalidate_seq;
+			hlist_add_head(&interval_sub->deferred_item,
+				       &subscriptions->deferred_list);
+			seq = subscriptions->invalidate_seq;
 		}
 	} else {
-		WARN_ON(RB_EMPTY_NODE(&mni->interval_tree.rb));
-		interval_tree_remove(&mni->interval_tree, &mmn_mm->itree);
+		WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
+		interval_tree_remove(&interval_sub->interval_tree,
+				     &subscriptions->itree);
 	}
-	spin_unlock(&mmn_mm->lock);
+	spin_unlock(&subscriptions->lock);
 
 	/*
 	 * The possible sleep on progress in the invalidation requires the
@@ -1013,8 +1056,8 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni)
 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 	if (seq)
-		wait_event(mmn_mm->wq,
-			   READ_ONCE(mmn_mm->invalidate_seq) != seq);
+		wait_event(subscriptions->wq,
+			   READ_ONCE(subscriptions->invalidate_seq) != seq);
 
 	/* pairs with mmgrab in mmu_interval_notifier_insert() */
 	mmdrop(mm);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d58c481..dfc3576 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -26,6 +26,7 @@
 #include <linux/sched/mm.h>
 #include <linux/sched/coredump.h>
 #include <linux/sched/task.h>
+#include <linux/sched/debug.h>
 #include <linux/swap.h>
 #include <linux/timex.h>
 #include <linux/jiffies.h>
@@ -620,6 +621,7 @@ static void oom_reap_task(struct task_struct *tsk)
 
 	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
 		task_pid_nr(tsk), tsk->comm);
+	sched_show_task(tsk);
 	debug_show_all_locks();
 
 done:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d047bf7..3c4eb75 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5848,6 +5848,23 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
 	return false;
 }
 
+#ifdef CONFIG_SPARSEMEM
+/* Skip PFNs that belong to non-present sections */
+static inline __meminit unsigned long next_pfn(unsigned long pfn)
+{
+	const unsigned long section_nr = pfn_to_section_nr(++pfn);
+
+	if (present_section_nr(section_nr))
+		return pfn;
+	return section_nr_to_pfn(next_present_section_nr(section_nr));
+}
+#else
+static inline __meminit unsigned long next_pfn(unsigned long pfn)
+{
+	return pfn++;
+}
+#endif
+
 /*
  * Initially all pages are reserved - free ones are freed
  * up by memblock_free_all() once the early boot process is
@@ -5881,16 +5898,20 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 	}
 #endif
 
-	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+	for (pfn = start_pfn; pfn < end_pfn; ) {
 		/*
 		 * There can be holes in boot-time mem_map[]s handed to this
 		 * function.  They do not exist on hotplugged memory.
 		 */
 		if (context == MEMMAP_EARLY) {
-			if (!early_pfn_valid(pfn))
+			if (!early_pfn_valid(pfn)) {
+				pfn = next_pfn(pfn);
 				continue;
-			if (!early_pfn_in_nid(pfn, nid))
+			}
+			if (!early_pfn_in_nid(pfn, nid)) {
+				pfn++;
 				continue;
+			}
 			if (overlap_memmap_init(zone, &pfn))
 				continue;
 			if (defer_init(nid, pfn, end_pfn))
@@ -5918,16 +5939,17 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 			cond_resched();
 		}
+		pfn++;
 	}
 }
 
 #ifdef CONFIG_ZONE_DEVICE
 void __ref memmap_init_zone_device(struct zone *zone,
 				   unsigned long start_pfn,
-				   unsigned long size,
+				   unsigned long nr_pages,
 				   struct dev_pagemap *pgmap)
 {
-	unsigned long pfn, end_pfn = start_pfn + size;
+	unsigned long pfn, end_pfn = start_pfn + nr_pages;
 	struct pglist_data *pgdat = zone->zone_pgdat;
 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
 	unsigned long zone_idx = zone_idx(zone);
@@ -5944,7 +5966,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
 	 */
 	if (altmap) {
 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
-		size = end_pfn - start_pfn;
+		nr_pages = end_pfn - start_pfn;
 	}
 
 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
@@ -5991,7 +6013,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
 	}
 
 	pr_info("%s initialised %lu pages in %ums\n", __func__,
-		size, jiffies_to_msecs(jiffies - start));
+		nr_pages, jiffies_to_msecs(jiffies - start));
 }
 
 #endif
@@ -6890,10 +6912,10 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
 
 #if !defined(CONFIG_FLAT_NODE_MEM_MAP)
 /*
- * Zero all valid struct pages in range [spfn, epfn), return number of struct
- * pages zeroed
+ * Initialize all valid struct pages in the range [spfn, epfn) and mark them
+ * PageReserved(). Return the number of struct pages that were initialized.
  */
-static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
+static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
 {
 	unsigned long pfn;
 	u64 pgcnt = 0;
@@ -6904,7 +6926,13 @@ static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
 				+ pageblock_nr_pages - 1;
 			continue;
 		}
-		mm_zero_struct_page(pfn_to_page(pfn));
+		/*
+		 * Use a fake node/zone (0) for now. Some of these pages
+		 * (in memblock.reserved but not in memblock.memory) will
+		 * get re-initialized via reserve_bootmem_region() later.
+		 */
+		__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
+		__SetPageReserved(pfn_to_page(pfn));
 		pgcnt++;
 	}
 
@@ -6916,14 +6944,15 @@ static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
  * initialized by going through __init_single_page(). But, there are some
  * struct pages which are reserved in memblock allocator and their fields
  * may be accessed (for example page_to_pfn() on some configuration accesses
- * flags). We must explicitly zero those struct pages.
+ * flags). We must explicitly initialize those struct pages.
  *
  * This function also addresses a similar issue where struct pages are left
  * uninitialized because the physical address range is not covered by
  * memblock.memory or memblock.reserved. That could happen when memblock
- * layout is manually configured via memmap=.
+ * layout is manually configured via memmap=, or when the highest physical
+ * address (max_pfn) does not end on a section boundary.
  */
-void __init zero_resv_unavail(void)
+static void __init init_unavailable_mem(void)
 {
 	phys_addr_t start, end;
 	u64 i, pgcnt;
@@ -6936,10 +6965,20 @@ void __init zero_resv_unavail(void)
 	for_each_mem_range(i, &memblock.memory, NULL,
 			NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
 		if (next < start)
-			pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start));
+			pgcnt += init_unavailable_range(PFN_DOWN(next),
+							PFN_UP(start));
 		next = end;
 	}
-	pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn);
+
+	/*
+	 * Early sections always have a fully populated memmap for the whole
+	 * section - see pfn_valid(). If the last section has holes at the
+	 * end and that section is marked "online", the memmap will be
+	 * considered initialized. Make sure that memmap has a well defined
+	 * state.
+	 */
+	pgcnt += init_unavailable_range(PFN_DOWN(next),
+					round_up(max_pfn, PAGES_PER_SECTION));
 
 	/*
 	 * Struct pages that do not have backing memory. This could be because
@@ -6948,6 +6987,10 @@ void __init zero_resv_unavail(void)
 	if (pgcnt)
 		pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
 }
+#else
+static inline void __init init_unavailable_mem(void)
+{
+}
 #endif /* !CONFIG_FLAT_NODE_MEM_MAP */
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -7377,7 +7420,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
 	/* Initialise every node */
 	mminit_verify_pageflags_layout();
 	setup_nr_node_ids();
-	zero_resv_unavail();
+	init_unavailable_mem();
 	for_each_online_node(nid) {
 		pg_data_t *pgdat = NODE_DATA(nid);
 		free_area_init_node(nid, NULL,
@@ -7572,7 +7615,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 
 void __init free_area_init(unsigned long *zones_size)
 {
-	zero_resv_unavail();
+	init_unavailable_mem();
 	free_area_init_node(0, zones_size,
 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
@@ -8154,20 +8197,22 @@ void *__init alloc_large_system_hash(const char *tablename,
 
 /*
  * This function checks whether pageblock includes unmovable pages or not.
- * If @count is not zero, it is okay to include less @count unmovable pages
  *
  * PageLRU check without isolation or lru_lock could race so that
  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
  * check without lock_page also may miss some movable non-lru pages at
  * race condition. So you can't expect this function should be exact.
+ *
+ * Returns a page without holding a reference. If the caller wants to
+ * dereference that page (e.g., dumping), it has to make sure that that it
+ * cannot get removed (e.g., via memory unplug) concurrently.
+ *
  */
-bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
-			 int migratetype, int flags)
+struct page *has_unmovable_pages(struct zone *zone, struct page *page,
+				 int migratetype, int flags)
 {
-	unsigned long found;
 	unsigned long iter = 0;
 	unsigned long pfn = page_to_pfn(page);
-	const char *reason = "unmovable page";
 
 	/*
 	 * TODO we could make this much more efficient by not checking every
@@ -8184,22 +8229,19 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 		 * so consider them movable here.
 		 */
 		if (is_migrate_cma(migratetype))
-			return false;
+			return NULL;
 
-		reason = "CMA page";
-		goto unmovable;
+		return page;
 	}
 
-	for (found = 0; iter < pageblock_nr_pages; iter++) {
-		unsigned long check = pfn + iter;
-
-		if (!pfn_valid_within(check))
+	for (; iter < pageblock_nr_pages; iter++) {
+		if (!pfn_valid_within(pfn + iter))
 			continue;
 
-		page = pfn_to_page(check);
+		page = pfn_to_page(pfn + iter);
 
 		if (PageReserved(page))
-			goto unmovable;
+			return page;
 
 		/*
 		 * If the zone is movable and we have ruled out all reserved
@@ -8219,7 +8261,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 			unsigned int skip_pages;
 
 			if (!hugepage_migration_supported(page_hstate(head)))
-				goto unmovable;
+				return page;
 
 			skip_pages = compound_nr(head) - (page - head);
 			iter += skip_pages - 1;
@@ -8245,11 +8287,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 		if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
 			continue;
 
-		if (__PageMovable(page))
+		if (__PageMovable(page) || PageLRU(page))
 			continue;
 
-		if (!PageLRU(page))
-			found++;
 		/*
 		 * If there are RECLAIMABLE pages, we need to check
 		 * it.  But now, memory offline itself doesn't call
@@ -8263,15 +8303,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 		 * is set to both of a memory hole page and a _used_ kernel
 		 * page at boot.
 		 */
-		if (found > count)
-			goto unmovable;
+		return page;
 	}
-	return false;
-unmovable:
-	WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
-	if (flags & REPORT_FAILURE)
-		dump_page(pfn_to_page(pfn + iter), reason);
-	return true;
+	return NULL;
 }
 
 #ifdef CONFIG_CONTIG_ALLOC
@@ -8675,10 +8709,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
 		BUG_ON(!PageBuddy(page));
 		order = page_order(page);
 		offlined_pages += 1 << order;
-#ifdef CONFIG_DEBUG_VM
-		pr_info("remove from free list %lx %d %lx\n",
-			pfn, 1 << order, end_pfn);
-#endif
 		del_page_from_free_area(page, &zone->free_area[order]);
 		pfn += (1 << order);
 	}
diff --git a/mm/page_io.c b/mm/page_io.c
index 3a198de..76965be 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -177,8 +177,9 @@ int generic_swapfile_activate(struct swap_info_struct *sis,
 
 		cond_resched();
 
-		first_block = bmap(inode, probe_block);
-		if (first_block == 0)
+		first_block = probe_block;
+		ret = bmap(inode, &first_block);
+		if (ret || !first_block)
 			goto bad_bmap;
 
 		/*
@@ -193,9 +194,11 @@ int generic_swapfile_activate(struct swap_info_struct *sis,
 					block_in_page++) {
 			sector_t block;
 
-			block = bmap(inode, probe_block + block_in_page);
-			if (block == 0)
+			block = probe_block + block_in_page;
+			ret = bmap(inode, &block);
+			if (ret || !block)
 				goto bad_bmap;
+
 			if (block != first_block + block_in_page) {
 				/* Discontiguity */
 				probe_block++;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 04ee166..a9fd7c7 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -17,10 +17,9 @@
 
 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
 {
+	struct page *unmovable = NULL;
 	struct zone *zone;
-	unsigned long flags, pfn;
-	struct memory_isolate_notify arg;
-	int notifier_ret;
+	unsigned long flags;
 	int ret = -EBUSY;
 
 	zone = page_zone(page);
@@ -35,41 +34,12 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
 	if (is_migrate_isolate_page(page))
 		goto out;
 
-	pfn = page_to_pfn(page);
-	arg.start_pfn = pfn;
-	arg.nr_pages = pageblock_nr_pages;
-	arg.pages_found = 0;
-
-	/*
-	 * It may be possible to isolate a pageblock even if the
-	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
-	 * notifier chain is used by balloon drivers to return the
-	 * number of pages in a range that are held by the balloon
-	 * driver to shrink memory. If all the pages are accounted for
-	 * by balloons, are free, or on the LRU, isolation can continue.
-	 * Later, for example, when memory hotplug notifier runs, these
-	 * pages reported as "can be isolated" should be isolated(freed)
-	 * by the balloon driver through the memory notifier chain.
-	 */
-	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
-	notifier_ret = notifier_to_errno(notifier_ret);
-	if (notifier_ret)
-		goto out;
 	/*
 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
 	 * We just check MOVABLE pages.
 	 */
-	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
-				 isol_flags))
-		ret = 0;
-
-	/*
-	 * immobile means "not-on-lru" pages. If immobile is larger than
-	 * removable-by-driver pages reported by notifier, we'll fail.
-	 */
-
-out:
-	if (!ret) {
+	unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
+	if (!unmovable) {
 		unsigned long nr_pages;
 		int mt = get_pageblock_migratetype(page);
 
@@ -79,11 +49,24 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
 									NULL);
 
 		__mod_zone_freepage_state(zone, -nr_pages, mt);
+		ret = 0;
 	}
 
+out:
 	spin_unlock_irqrestore(&zone->lock, flags);
-	if (!ret)
+	if (!ret) {
 		drain_all_pages(zone);
+	} else {
+		WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
+
+		if ((isol_flags & REPORT_FAILURE) && unmovable)
+			/*
+			 * printk() with zone->lock held will likely trigger a
+			 * lockdep splat, so defer it here.
+			 */
+			dump_page(unmovable, "unmovable page");
+	}
+
 	return ret;
 }
 
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index eff4b45..719c352 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -52,12 +52,16 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
 	return true;
 }
 
-static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
+static inline bool pfn_is_match(struct page *page, unsigned long pfn)
 {
-	unsigned long hpage_pfn = page_to_pfn(hpage);
+	unsigned long page_pfn = page_to_pfn(page);
+
+	/* normal page and hugetlbfs page */
+	if (!PageTransCompound(page) || PageHuge(page))
+		return page_pfn == pfn;
 
 	/* THP can be referenced by any subpage */
-	return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
+	return pfn >= page_pfn && pfn - page_pfn < hpage_nr_pages(page);
 }
 
 /**
@@ -108,7 +112,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
 		pfn = pte_pfn(*pvmw->pte);
 	}
 
-	return pfn_in_hpage(pvmw->page, pfn);
+	return pfn_is_match(pvmw->page, pfn);
 }
 
 /**
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index ea0b9e6..928df16 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -4,26 +4,57 @@
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
 
+/*
+ * We want to know the real level where a entry is located ignoring any
+ * folding of levels which may be happening. For example if p4d is folded then
+ * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
+ */
+static int real_depth(int depth)
+{
+	if (depth == 3 && PTRS_PER_PMD == 1)
+		depth = 2;
+	if (depth == 2 && PTRS_PER_PUD == 1)
+		depth = 1;
+	if (depth == 1 && PTRS_PER_P4D == 1)
+		depth = 0;
+	return depth;
+}
+
+static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
+				unsigned long end, struct mm_walk *walk)
+{
+	const struct mm_walk_ops *ops = walk->ops;
+	int err = 0;
+
+	for (;;) {
+		err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
+		if (err)
+		       break;
+		if (addr >= end - PAGE_SIZE)
+			break;
+		addr += PAGE_SIZE;
+		pte++;
+	}
+	return err;
+}
+
 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 			  struct mm_walk *walk)
 {
 	pte_t *pte;
 	int err = 0;
-	const struct mm_walk_ops *ops = walk->ops;
 	spinlock_t *ptl;
 
-	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
-	for (;;) {
-		err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
-		if (err)
-		       break;
-		addr += PAGE_SIZE;
-		if (addr == end)
-			break;
-		pte++;
+	if (walk->no_vma) {
+		pte = pte_offset_map(pmd, addr);
+		err = walk_pte_range_inner(pte, addr, end, walk);
+		pte_unmap(pte);
+	} else {
+		pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+		err = walk_pte_range_inner(pte, addr, end, walk);
+		pte_unmap_unlock(pte, ptl);
 	}
 
-	pte_unmap_unlock(pte, ptl);
 	return err;
 }
 
@@ -34,18 +65,22 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 	unsigned long next;
 	const struct mm_walk_ops *ops = walk->ops;
 	int err = 0;
+	int depth = real_depth(3);
 
 	pmd = pmd_offset(pud, addr);
 	do {
 again:
 		next = pmd_addr_end(addr, end);
-		if (pmd_none(*pmd) || !walk->vma) {
+		if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
 			if (ops->pte_hole)
-				err = ops->pte_hole(addr, next, walk);
+				err = ops->pte_hole(addr, next, depth, walk);
 			if (err)
 				break;
 			continue;
 		}
+
+		walk->action = ACTION_SUBTREE;
+
 		/*
 		 * This implies that each ->pmd_entry() handler
 		 * needs to know about pmd_trans_huge() pmds
@@ -55,16 +90,24 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 		if (err)
 			break;
 
+		if (walk->action == ACTION_AGAIN)
+			goto again;
+
 		/*
 		 * Check this here so we only break down trans_huge
 		 * pages when we _need_ to
 		 */
-		if (!ops->pte_entry)
+		if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
+		    walk->action == ACTION_CONTINUE ||
+		    !(ops->pte_entry))
 			continue;
 
-		split_huge_pmd(walk->vma, pmd, addr);
-		if (pmd_trans_unstable(pmd))
-			goto again;
+		if (walk->vma) {
+			split_huge_pmd(walk->vma, pmd, addr);
+			if (pmd_trans_unstable(pmd))
+				goto again;
+		}
+
 		err = walk_pte_range(pmd, addr, next, walk);
 		if (err)
 			break;
@@ -80,37 +123,41 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 	unsigned long next;
 	const struct mm_walk_ops *ops = walk->ops;
 	int err = 0;
+	int depth = real_depth(2);
 
 	pud = pud_offset(p4d, addr);
 	do {
  again:
 		next = pud_addr_end(addr, end);
-		if (pud_none(*pud) || !walk->vma) {
+		if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
 			if (ops->pte_hole)
-				err = ops->pte_hole(addr, next, walk);
+				err = ops->pte_hole(addr, next, depth, walk);
 			if (err)
 				break;
 			continue;
 		}
 
-		if (ops->pud_entry) {
-			spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
+		walk->action = ACTION_SUBTREE;
 
-			if (ptl) {
-				err = ops->pud_entry(pud, addr, next, walk);
-				spin_unlock(ptl);
-				if (err)
-					break;
-				continue;
-			}
-		}
+		if (ops->pud_entry)
+			err = ops->pud_entry(pud, addr, next, walk);
+		if (err)
+			break;
 
-		split_huge_pud(walk->vma, pud, addr);
+		if (walk->action == ACTION_AGAIN)
+			goto again;
+
+		if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
+		    walk->action == ACTION_CONTINUE ||
+		    !(ops->pmd_entry || ops->pte_entry))
+			continue;
+
+		if (walk->vma)
+			split_huge_pud(walk->vma, pud, addr);
 		if (pud_none(*pud))
 			goto again;
 
-		if (ops->pmd_entry || ops->pte_entry)
-			err = walk_pmd_range(pud, addr, next, walk);
+		err = walk_pmd_range(pud, addr, next, walk);
 		if (err)
 			break;
 	} while (pud++, addr = next, addr != end);
@@ -125,18 +172,24 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 	unsigned long next;
 	const struct mm_walk_ops *ops = walk->ops;
 	int err = 0;
+	int depth = real_depth(1);
 
 	p4d = p4d_offset(pgd, addr);
 	do {
 		next = p4d_addr_end(addr, end);
 		if (p4d_none_or_clear_bad(p4d)) {
 			if (ops->pte_hole)
-				err = ops->pte_hole(addr, next, walk);
+				err = ops->pte_hole(addr, next, depth, walk);
 			if (err)
 				break;
 			continue;
 		}
-		if (ops->pmd_entry || ops->pte_entry)
+		if (ops->p4d_entry) {
+			err = ops->p4d_entry(p4d, addr, next, walk);
+			if (err)
+				break;
+		}
+		if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
 			err = walk_pud_range(p4d, addr, next, walk);
 		if (err)
 			break;
@@ -153,17 +206,26 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
 	const struct mm_walk_ops *ops = walk->ops;
 	int err = 0;
 
-	pgd = pgd_offset(walk->mm, addr);
+	if (walk->pgd)
+		pgd = walk->pgd + pgd_index(addr);
+	else
+		pgd = pgd_offset(walk->mm, addr);
 	do {
 		next = pgd_addr_end(addr, end);
 		if (pgd_none_or_clear_bad(pgd)) {
 			if (ops->pte_hole)
-				err = ops->pte_hole(addr, next, walk);
+				err = ops->pte_hole(addr, next, 0, walk);
 			if (err)
 				break;
 			continue;
 		}
-		if (ops->pmd_entry || ops->pte_entry)
+		if (ops->pgd_entry) {
+			err = ops->pgd_entry(pgd, addr, next, walk);
+			if (err)
+				break;
+		}
+		if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
+		    ops->pte_entry)
 			err = walk_p4d_range(pgd, addr, next, walk);
 		if (err)
 			break;
@@ -199,7 +261,7 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
 		if (pte)
 			err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
 		else if (ops->pte_hole)
-			err = ops->pte_hole(addr, next, walk);
+			err = ops->pte_hole(addr, next, -1, walk);
 
 		if (err)
 			break;
@@ -243,7 +305,7 @@ static int walk_page_test(unsigned long start, unsigned long end,
 	if (vma->vm_flags & VM_PFNMAP) {
 		int err = 1;
 		if (ops->pte_hole)
-			err = ops->pte_hole(start, end, walk);
+			err = ops->pte_hole(start, end, -1, walk);
 		return err ? err : 1;
 	}
 	return 0;
@@ -369,6 +431,33 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
 	return err;
 }
 
+/*
+ * Similar to walk_page_range() but can walk any page tables even if they are
+ * not backed by VMAs. Because 'unusual' entries may be walked this function
+ * will also not lock the PTEs for the pte_entry() callback. This is useful for
+ * walking the kernel pages tables or page tables for firmware.
+ */
+int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
+			  unsigned long end, const struct mm_walk_ops *ops,
+			  pgd_t *pgd,
+			  void *private)
+{
+	struct mm_walk walk = {
+		.ops		= ops,
+		.mm		= mm,
+		.pgd		= pgd,
+		.private	= private,
+		.no_vma		= true
+	};
+
+	if (start >= end || !walk.mm)
+		return -EINVAL;
+
+	lockdep_assert_held(&walk.mm->mmap_sem);
+
+	return __walk_page_range(start, end, &walk);
+}
+
 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
 		void *private)
 {
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 357aa7b..de41e83 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -42,12 +42,11 @@ static int process_vm_rw_pages(struct page **pages,
 		if (copy > len)
 			copy = len;
 
-		if (vm_write) {
+		if (vm_write)
 			copied = copy_page_from_iter(page, offset, copy, iter);
-			set_page_dirty_lock(page);
-		} else {
+		else
 			copied = copy_page_to_iter(page, offset, copy, iter);
-		}
+
 		len -= copied;
 		if (copied < copy && iov_iter_count(iter))
 			return -EFAULT;
@@ -96,7 +95,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
 		flags |= FOLL_WRITE;
 
 	while (!rc && nr_pages && iov_iter_count(iter)) {
-		int pages = min(nr_pages, max_pages_per_loop);
+		int pinned_pages = min(nr_pages, max_pages_per_loop);
 		int locked = 1;
 		size_t bytes;
 
@@ -106,14 +105,15 @@ static int process_vm_rw_single_vec(unsigned long addr,
 		 * current/current->mm
 		 */
 		down_read(&mm->mmap_sem);
-		pages = get_user_pages_remote(task, mm, pa, pages, flags,
-					      process_pages, NULL, &locked);
+		pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages,
+						     flags, process_pages,
+						     NULL, &locked);
 		if (locked)
 			up_read(&mm->mmap_sem);
-		if (pages <= 0)
+		if (pinned_pages <= 0)
 			return -EFAULT;
 
-		bytes = pages * PAGE_SIZE - start_offset;
+		bytes = pinned_pages * PAGE_SIZE - start_offset;
 		if (bytes > len)
 			bytes = len;
 
@@ -122,10 +122,12 @@ static int process_vm_rw_single_vec(unsigned long addr,
 					 vm_write);
 		len -= bytes;
 		start_offset = 0;
-		nr_pages -= pages;
-		pa += pages * PAGE_SIZE;
-		while (pages)
-			put_page(process_pages[--pages]);
+		nr_pages -= pinned_pages;
+		pa += pinned_pages * PAGE_SIZE;
+
+		/* If vm_write is set, the pages need to be made dirty: */
+		unpin_user_pages_dirty_lock(process_pages, pinned_pages,
+					    vm_write);
 	}
 
 	return rc;
diff --git a/mm/ptdump.c b/mm/ptdump.c
new file mode 100644
index 0000000..26208d0
--- /dev/null
+++ b/mm/ptdump.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/pagewalk.h>
+#include <linux/ptdump.h>
+#include <linux/kasan.h>
+
+#ifdef CONFIG_KASAN
+/*
+ * This is an optimization for KASAN=y case. Since all kasan page tables
+ * eventually point to the kasan_early_shadow_page we could call note_page()
+ * right away without walking through lower level page tables. This saves
+ * us dozens of seconds (minutes for 5-level config) while checking for
+ * W+X mapping or reading kernel_page_tables debugfs file.
+ */
+static inline int note_kasan_page_table(struct mm_walk *walk,
+					unsigned long addr)
+{
+	struct ptdump_state *st = walk->private;
+
+	st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0]));
+
+	walk->action = ACTION_CONTINUE;
+
+	return 0;
+}
+#endif
+
+static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
+			    unsigned long next, struct mm_walk *walk)
+{
+	struct ptdump_state *st = walk->private;
+	pgd_t val = READ_ONCE(*pgd);
+
+#if CONFIG_PGTABLE_LEVELS > 4 && defined(CONFIG_KASAN)
+	if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
+		return note_kasan_page_table(walk, addr);
+#endif
+
+	if (pgd_leaf(val))
+		st->note_page(st, addr, 0, pgd_val(val));
+
+	return 0;
+}
+
+static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
+			    unsigned long next, struct mm_walk *walk)
+{
+	struct ptdump_state *st = walk->private;
+	p4d_t val = READ_ONCE(*p4d);
+
+#if CONFIG_PGTABLE_LEVELS > 3 && defined(CONFIG_KASAN)
+	if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
+		return note_kasan_page_table(walk, addr);
+#endif
+
+	if (p4d_leaf(val))
+		st->note_page(st, addr, 1, p4d_val(val));
+
+	return 0;
+}
+
+static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
+			    unsigned long next, struct mm_walk *walk)
+{
+	struct ptdump_state *st = walk->private;
+	pud_t val = READ_ONCE(*pud);
+
+#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_KASAN)
+	if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
+		return note_kasan_page_table(walk, addr);
+#endif
+
+	if (pud_leaf(val))
+		st->note_page(st, addr, 2, pud_val(val));
+
+	return 0;
+}
+
+static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
+			    unsigned long next, struct mm_walk *walk)
+{
+	struct ptdump_state *st = walk->private;
+	pmd_t val = READ_ONCE(*pmd);
+
+#if defined(CONFIG_KASAN)
+	if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
+		return note_kasan_page_table(walk, addr);
+#endif
+
+	if (pmd_leaf(val))
+		st->note_page(st, addr, 3, pmd_val(val));
+
+	return 0;
+}
+
+static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
+			    unsigned long next, struct mm_walk *walk)
+{
+	struct ptdump_state *st = walk->private;
+
+	st->note_page(st, addr, 4, pte_val(READ_ONCE(*pte)));
+
+	return 0;
+}
+
+static int ptdump_hole(unsigned long addr, unsigned long next,
+		       int depth, struct mm_walk *walk)
+{
+	struct ptdump_state *st = walk->private;
+
+	st->note_page(st, addr, depth, 0);
+
+	return 0;
+}
+
+static const struct mm_walk_ops ptdump_ops = {
+	.pgd_entry	= ptdump_pgd_entry,
+	.p4d_entry	= ptdump_p4d_entry,
+	.pud_entry	= ptdump_pud_entry,
+	.pmd_entry	= ptdump_pmd_entry,
+	.pte_entry	= ptdump_pte_entry,
+	.pte_hole	= ptdump_hole,
+};
+
+void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
+{
+	const struct ptdump_range *range = st->range;
+
+	down_read(&mm->mmap_sem);
+	while (range->start != range->end) {
+		walk_page_range_novma(mm, range->start, range->end,
+				      &ptdump_ops, pgd, st);
+		range++;
+	}
+	up_read(&mm->mmap_sem);
+
+	/* Flush out the last page */
+	st->note_page(st, 0, -1, 0);
+}
diff --git a/mm/shmem.c b/mm/shmem.c
index 8793e8c..c8f7540 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3381,9 +3381,19 @@ enum shmem_param {
 	Opt_uid,
 };
 
-static const struct fs_parameter_spec shmem_param_specs[] = {
+static const struct constant_table shmem_param_enums_huge[] = {
+	{"never",	SHMEM_HUGE_NEVER },
+	{"always",	SHMEM_HUGE_ALWAYS },
+	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
+	{"advise",	SHMEM_HUGE_ADVISE },
+	{"deny",	SHMEM_HUGE_DENY },
+	{"force",	SHMEM_HUGE_FORCE },
+	{}
+};
+
+const struct fs_parameter_spec shmem_fs_parameters[] = {
 	fsparam_u32   ("gid",		Opt_gid),
-	fsparam_enum  ("huge",		Opt_huge),
+	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
 	fsparam_u32oct("mode",		Opt_mode),
 	fsparam_string("mpol",		Opt_mpol),
 	fsparam_string("nr_blocks",	Opt_nr_blocks),
@@ -3393,20 +3403,6 @@ static const struct fs_parameter_spec shmem_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_enum shmem_param_enums[] = {
-	{ Opt_huge,	"never",	SHMEM_HUGE_NEVER },
-	{ Opt_huge,	"always",	SHMEM_HUGE_ALWAYS },
-	{ Opt_huge,	"within_size",	SHMEM_HUGE_WITHIN_SIZE },
-	{ Opt_huge,	"advise",	SHMEM_HUGE_ADVISE },
-	{}
-};
-
-const struct fs_parameter_description shmem_fs_parameters = {
-	.name		= "tmpfs",
-	.specs		= shmem_param_specs,
-	.enums		= shmem_param_enums,
-};
-
 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
 {
 	struct shmem_options *ctx = fc->fs_private;
@@ -3415,7 +3411,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
 	char *rest;
 	int opt;
 
-	opt = fs_parse(fc, &shmem_fs_parameters, param, &result);
+	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -3479,9 +3475,9 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
 	return 0;
 
 unsupported_parameter:
-	return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key);
+	return invalfc(fc, "Unsupported parameter '%s'", param->key);
 bad_value:
-	return invalf(fc, "tmpfs: Bad value for '%s'", param->key);
+	return invalfc(fc, "Bad value for '%s'", param->key);
 }
 
 static int shmem_parse_options(struct fs_context *fc, void *data)
@@ -3587,7 +3583,7 @@ static int shmem_reconfigure(struct fs_context *fc)
 	return 0;
 out:
 	spin_unlock(&sbinfo->stat_lock);
-	return invalf(fc, "tmpfs: %s", err);
+	return invalfc(fc, "%s", err);
 }
 
 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
@@ -3889,7 +3885,7 @@ static struct file_system_type shmem_fs_type = {
 	.name		= "tmpfs",
 	.init_fs_context = shmem_init_fs_context,
 #ifdef CONFIG_TMPFS
-	.parameters	= &shmem_fs_parameters,
+	.parameters	= shmem_fs_parameters,
 #endif
 	.kill_sb	= kill_litter_super,
 	.fs_flags	= FS_USERNS_MOUNT,
@@ -4035,7 +4031,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
 static struct file_system_type shmem_fs_type = {
 	.name		= "tmpfs",
 	.init_fs_context = ramfs_init_fs_context,
-	.parameters	= &ramfs_fs_parameters,
+	.parameters	= ramfs_fs_parameters,
 	.kill_sb	= kill_litter_super,
 	.fs_flags	= FS_USERNS_MOUNT,
 };
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0d95dde..1907cb2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1580,18 +1580,17 @@ static int slabinfo_open(struct inode *inode, struct file *file)
 	return seq_open(file, &slabinfo_op);
 }
 
-static const struct file_operations proc_slabinfo_operations = {
-	.open		= slabinfo_open,
-	.read		= seq_read,
-	.write          = slabinfo_write,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
+static const struct proc_ops slabinfo_proc_ops = {
+	.proc_open	= slabinfo_open,
+	.proc_read	= seq_read,
+	.proc_write	= slabinfo_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 static int __init slab_proc_init(void)
 {
-	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
-						&proc_slabinfo_operations);
+	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
 	return 0;
 }
 module_init(slab_proc_init);
@@ -1677,28 +1676,6 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
 }
 
 /**
- * __krealloc - like krealloc() but don't free @p.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * This function is like krealloc() except it never frees the originally
- * allocated buffer. Use this if you don't want to free the buffer immediately
- * like, for example, with RCU.
- *
- * Return: pointer to the allocated memory or %NULL in case of error
- */
-void *__krealloc(const void *p, size_t new_size, gfp_t flags)
-{
-	if (unlikely(!new_size))
-		return ZERO_SIZE_PTR;
-
-	return __do_krealloc(p, new_size, flags);
-
-}
-EXPORT_SYMBOL(__krealloc);
-
-/**
  * krealloc - reallocate memory. The contents will remain unchanged.
  * @p: object to reallocate memory for.
  * @new_size: how many bytes of memory are required.
diff --git a/mm/slub.c b/mm/slub.c
index 0ab92ec..17dc00e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -439,19 +439,38 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 }
 
 #ifdef CONFIG_SLUB_DEBUG
+static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
+static DEFINE_SPINLOCK(object_map_lock);
+
 /*
  * Determine a map of object in use on a page.
  *
  * Node listlock must be held to guarantee that the page does
  * not vanish from under us.
  */
-static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
+static unsigned long *get_map(struct kmem_cache *s, struct page *page)
 {
 	void *p;
 	void *addr = page_address(page);
 
+	VM_BUG_ON(!irqs_disabled());
+
+	spin_lock(&object_map_lock);
+
+	bitmap_zero(object_map, page->objects);
+
 	for (p = page->freelist; p; p = get_freepointer(s, p))
-		set_bit(slab_index(p, s, addr), map);
+		set_bit(slab_index(p, s, addr), object_map);
+
+	return object_map;
+}
+
+static void put_map(unsigned long *map)
+{
+	VM_BUG_ON(map != object_map);
+	lockdep_assert_held(&object_map_lock);
+
+	spin_unlock(&object_map_lock);
 }
 
 static inline unsigned int size_from_object(struct kmem_cache *s)
@@ -3675,13 +3694,12 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
 #ifdef CONFIG_SLUB_DEBUG
 	void *addr = page_address(page);
 	void *p;
-	unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
-	if (!map)
-		return;
+	unsigned long *map;
+
 	slab_err(s, page, text, s->name);
 	slab_lock(page);
 
-	get_map(s, page, map);
+	map = get_map(s, page);
 	for_each_object(p, s, addr, page->objects) {
 
 		if (!test_bit(slab_index(p, s, addr), map)) {
@@ -3689,8 +3707,9 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
 			print_tracking(s, p);
 		}
 	}
+	put_map(map);
+
 	slab_unlock(page);
-	bitmap_free(map);
 #endif
 }
 
@@ -4384,19 +4403,19 @@ static int count_total(struct page *page)
 #endif
 
 #ifdef CONFIG_SLUB_DEBUG
-static void validate_slab(struct kmem_cache *s, struct page *page,
-						unsigned long *map)
+static void validate_slab(struct kmem_cache *s, struct page *page)
 {
 	void *p;
 	void *addr = page_address(page);
+	unsigned long *map;
+
+	slab_lock(page);
 
 	if (!check_slab(s, page) || !on_freelist(s, page, NULL))
-		return;
+		goto unlock;
 
 	/* Now we know that a valid freelist exists */
-	bitmap_zero(map, page->objects);
-
-	get_map(s, page, map);
+	map = get_map(s, page);
 	for_each_object(p, s, addr, page->objects) {
 		u8 val = test_bit(slab_index(p, s, addr), map) ?
 			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
@@ -4404,18 +4423,13 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
 		if (!check_object(s, page, p, val))
 			break;
 	}
-}
-
-static void validate_slab_slab(struct kmem_cache *s, struct page *page,
-						unsigned long *map)
-{
-	slab_lock(page);
-	validate_slab(s, page, map);
+	put_map(map);
+unlock:
 	slab_unlock(page);
 }
 
 static int validate_slab_node(struct kmem_cache *s,
-		struct kmem_cache_node *n, unsigned long *map)
+		struct kmem_cache_node *n)
 {
 	unsigned long count = 0;
 	struct page *page;
@@ -4424,7 +4438,7 @@ static int validate_slab_node(struct kmem_cache *s,
 	spin_lock_irqsave(&n->list_lock, flags);
 
 	list_for_each_entry(page, &n->partial, slab_list) {
-		validate_slab_slab(s, page, map);
+		validate_slab(s, page);
 		count++;
 	}
 	if (count != n->nr_partial)
@@ -4435,7 +4449,7 @@ static int validate_slab_node(struct kmem_cache *s,
 		goto out;
 
 	list_for_each_entry(page, &n->full, slab_list) {
-		validate_slab_slab(s, page, map);
+		validate_slab(s, page);
 		count++;
 	}
 	if (count != atomic_long_read(&n->nr_slabs))
@@ -4452,15 +4466,11 @@ static long validate_slab_cache(struct kmem_cache *s)
 	int node;
 	unsigned long count = 0;
 	struct kmem_cache_node *n;
-	unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
-
-	if (!map)
-		return -ENOMEM;
 
 	flush_all(s);
 	for_each_kmem_cache_node(s, node, n)
-		count += validate_slab_node(s, n, map);
-	bitmap_free(map);
+		count += validate_slab_node(s, n);
+
 	return count;
 }
 /*
@@ -4590,18 +4600,17 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
 }
 
 static void process_slab(struct loc_track *t, struct kmem_cache *s,
-		struct page *page, enum track_item alloc,
-		unsigned long *map)
+		struct page *page, enum track_item alloc)
 {
 	void *addr = page_address(page);
 	void *p;
+	unsigned long *map;
 
-	bitmap_zero(map, page->objects);
-	get_map(s, page, map);
-
+	map = get_map(s, page);
 	for_each_object(p, s, addr, page->objects)
 		if (!test_bit(slab_index(p, s, addr), map))
 			add_location(t, s, get_track(s, p, alloc));
+	put_map(map);
 }
 
 static int list_locations(struct kmem_cache *s, char *buf,
@@ -4612,11 +4621,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
 	struct loc_track t = { 0, 0, NULL };
 	int node;
 	struct kmem_cache_node *n;
-	unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
 
-	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
-				     GFP_KERNEL)) {
-		bitmap_free(map);
+	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
+			     GFP_KERNEL)) {
 		return sprintf(buf, "Out of memory\n");
 	}
 	/* Push back cpu slabs */
@@ -4631,9 +4638,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
 
 		spin_lock_irqsave(&n->list_lock, flags);
 		list_for_each_entry(page, &n->partial, slab_list)
-			process_slab(&t, s, page, alloc, map);
+			process_slab(&t, s, page, alloc);
 		list_for_each_entry(page, &n->full, slab_list)
-			process_slab(&t, s, page, alloc, map);
+			process_slab(&t, s, page, alloc);
 		spin_unlock_irqrestore(&n->list_lock, flags);
 	}
 
@@ -4682,7 +4689,6 @@ static int list_locations(struct kmem_cache *s, char *buf,
 	}
 
 	free_loc_track(&t);
-	bitmap_free(map);
 	if (!t.count)
 		len += sprintf(buf, "No data\n");
 	return len;
diff --git a/mm/sparse.c b/mm/sparse.c
index 3822ecb..c184b69 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -198,16 +198,6 @@ static void section_mark_present(struct mem_section *ms)
 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
 }
 
-static inline unsigned long next_present_section_nr(unsigned long section_nr)
-{
-	do {
-		section_nr++;
-		if (present_section_nr(section_nr))
-			return section_nr;
-	} while ((section_nr <= __highest_present_section_nr));
-
-	return -1;
-}
 #define for_each_present_section_nr(start, section_nr)		\
 	for (section_nr = next_present_section_nr(start-1);	\
 	     ((section_nr != -1) &&				\
@@ -789,7 +779,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
 			ms->usage = NULL;
 		}
 		memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
-		ms->section_mem_map = sparse_encode_mem_map(NULL, section_nr);
+		ms->section_mem_map = (unsigned long)NULL;
 	}
 
 	if (section_is_early && memmap)
diff --git a/mm/swap.c b/mm/swap.c
index 5341ae9..cf39d24 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -813,8 +813,10 @@ void release_pages(struct page **pages, int nr)
 			 * processing, and instead, expect a call to
 			 * put_page_testzero().
 			 */
-			if (put_devmap_managed_page(page))
+			if (page_is_devmap_managed(page)) {
+				put_devmap_managed_page(page);
 				continue;
+			}
 		}
 
 		page = compound_head(page);
@@ -1102,3 +1104,26 @@ void __init swap_setup(void)
 	 * _really_ don't want to cluster much more
 	 */
 }
+
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+void put_devmap_managed_page(struct page *page)
+{
+	int count;
+
+	if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
+		return;
+
+	count = page_ref_dec_return(page);
+
+	/*
+	 * devmap page refcounts are 1-based, rather than 0-based: if
+	 * refcount is 1, then the page is free and the refcount is
+	 * stable because nobody holds a reference on the page.
+	 */
+	if (count == 1)
+		free_devmap_managed_page(page);
+	else if (!count)
+		__put_page(page);
+}
+EXPORT_SYMBOL(put_devmap_managed_page);
+#endif
diff --git a/mm/swapfile.c b/mm/swapfile.c
index bb3261d..2c33ff4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2737,10 +2737,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
 	else
 		type = si->type + 1;
 
+	++(*pos);
 	for (; (si = swap_type_to_swap_info(type)); type++) {
 		if (!(si->flags & SWP_USED) || !si->swap_map)
 			continue;
-		++*pos;
 		return si;
 	}
 
@@ -2796,17 +2796,17 @@ static int swaps_open(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static const struct file_operations proc_swaps_operations = {
-	.open		= swaps_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= seq_release,
-	.poll		= swaps_poll,
+static const struct proc_ops swaps_proc_ops = {
+	.proc_open	= swaps_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
+	.proc_poll	= swaps_poll,
 };
 
 static int __init procswaps_init(void)
 {
-	proc_create("swaps", 0, NULL, &proc_swaps_operations);
+	proc_create("swaps", 0, NULL, &swaps_proc_ops);
 	return 0;
 }
 __initcall(procswaps_init);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 572fb17..c05eb9e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -146,20 +146,6 @@ struct scan_control {
 	struct reclaim_state reclaim_state;
 };
 
-#ifdef ARCH_HAS_PREFETCH
-#define prefetch_prev_lru_page(_page, _base, _field)			\
-	do {								\
-		if ((_page)->lru.prev != _base) {			\
-			struct page *prev;				\
-									\
-			prev = lru_to_page(&(_page->lru));		\
-			prefetch(&prev->_field);			\
-		}							\
-	} while (0)
-#else
-#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
-#endif
-
 #ifdef ARCH_HAS_PREFETCHW
 #define prefetchw_prev_lru_page(_page, _base, _field)			\
 	do {								\
@@ -2695,7 +2681,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
 	} while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
 }
 
-static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
+static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 {
 	struct reclaim_state *reclaim_state = current->reclaim_state;
 	unsigned long nr_reclaimed, nr_scanned;
@@ -2874,8 +2860,6 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 	 */
 	if (reclaimable)
 		pgdat->kswapd_failures = 0;
-
-	return reclaimable;
 }
 
 /*
@@ -4126,10 +4110,8 @@ module_init(kswapd_init)
  */
 int node_reclaim_mode __read_mostly;
 
-#define RECLAIM_OFF 0
-#define RECLAIM_ZONE (1<<0)	/* Run shrink_inactive_list on the zone */
-#define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */
-#define RECLAIM_UNMAP (1<<2)	/* Unmap pages during reclaim */
+#define RECLAIM_WRITE (1<<0)	/* Writeout pages during reclaim */
+#define RECLAIM_UNMAP (1<<1)	/* Unmap pages during reclaim */
 
 /*
  * Priority for NODE_RECLAIM. This determines the fraction of pages
diff --git a/mm/zswap.c b/mm/zswap.c
index 46a3223..55094e6 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -32,6 +32,7 @@
 #include <linux/swapops.h>
 #include <linux/writeback.h>
 #include <linux/pagemap.h>
+#include <linux/workqueue.h>
 
 /*********************************
 * statistics
@@ -65,6 +66,11 @@ static u64 zswap_reject_kmemcache_fail;
 /* Duplicate store was encountered (rare) */
 static u64 zswap_duplicate_entry;
 
+/* Shrinker work queue */
+static struct workqueue_struct *shrink_wq;
+/* Pool limit was hit, we need to calm down */
+static bool zswap_pool_reached_full;
+
 /*********************************
 * tunables
 **********************************/
@@ -109,6 +115,11 @@ module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 static unsigned int zswap_max_pool_percent = 20;
 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 
+/* The threshold for accepting new pages after the max_pool_percent was hit */
+static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
+module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
+		   uint, 0644);
+
 /* Enable/disable handling same-value filled pages (enabled by default) */
 static bool zswap_same_filled_pages_enabled = true;
 module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
@@ -123,7 +134,8 @@ struct zswap_pool {
 	struct crypto_comp * __percpu *tfm;
 	struct kref kref;
 	struct list_head list;
-	struct work_struct work;
+	struct work_struct release_work;
+	struct work_struct shrink_work;
 	struct hlist_node node;
 	char tfm_name[CRYPTO_MAX_ALG_NAME];
 };
@@ -214,6 +226,13 @@ static bool zswap_is_full(void)
 			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 }
 
+static bool zswap_can_accept(void)
+{
+	return totalram_pages() * zswap_accept_thr_percent / 100 *
+				zswap_max_pool_percent / 100 >
+			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
+}
+
 static void zswap_update_total_size(void)
 {
 	struct zswap_pool *pool;
@@ -501,6 +520,16 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 	return NULL;
 }
 
+static void shrink_worker(struct work_struct *w)
+{
+	struct zswap_pool *pool = container_of(w, typeof(*pool),
+						shrink_work);
+
+	if (zpool_shrink(pool->zpool, 1, NULL))
+		zswap_reject_reclaim_fail++;
+	zswap_pool_put(pool);
+}
+
 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 {
 	struct zswap_pool *pool;
@@ -551,6 +580,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 	 */
 	kref_init(&pool->kref);
 	INIT_LIST_HEAD(&pool->list);
+	INIT_WORK(&pool->shrink_work, shrink_worker);
 
 	zswap_pool_debug("created", pool);
 
@@ -624,7 +654,8 @@ static int __must_check zswap_pool_get(struct zswap_pool *pool)
 
 static void __zswap_pool_release(struct work_struct *work)
 {
-	struct zswap_pool *pool = container_of(work, typeof(*pool), work);
+	struct zswap_pool *pool = container_of(work, typeof(*pool),
+						release_work);
 
 	synchronize_rcu();
 
@@ -647,8 +678,8 @@ static void __zswap_pool_empty(struct kref *kref)
 
 	list_del_rcu(&pool->list);
 
-	INIT_WORK(&pool->work, __zswap_pool_release);
-	schedule_work(&pool->work);
+	INIT_WORK(&pool->release_work, __zswap_pool_release);
+	schedule_work(&pool->release_work);
 
 	spin_unlock(&zswap_pools_lock);
 }
@@ -942,22 +973,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
 	return ret;
 }
 
-static int zswap_shrink(void)
-{
-	struct zswap_pool *pool;
-	int ret;
-
-	pool = zswap_pool_last_get();
-	if (!pool)
-		return -ENOENT;
-
-	ret = zpool_shrink(pool->zpool, 1, NULL);
-
-	zswap_pool_put(pool);
-
-	return ret;
-}
-
 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
 {
 	unsigned int pos;
@@ -1011,21 +1026,23 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
 
 	/* reclaim space if needed */
 	if (zswap_is_full()) {
-		zswap_pool_limit_hit++;
-		if (zswap_shrink()) {
-			zswap_reject_reclaim_fail++;
-			ret = -ENOMEM;
-			goto reject;
-		}
+		struct zswap_pool *pool;
 
-		/* A second zswap_is_full() check after
-		 * zswap_shrink() to make sure it's now
-		 * under the max_pool_percent
-		 */
-		if (zswap_is_full()) {
+		zswap_pool_limit_hit++;
+		zswap_pool_reached_full = true;
+		pool = zswap_pool_last_get();
+		if (pool)
+			queue_work(shrink_wq, &pool->shrink_work);
+		ret = -ENOMEM;
+		goto reject;
+	}
+
+	if (zswap_pool_reached_full) {
+	       if (!zswap_can_accept()) {
 			ret = -ENOMEM;
 			goto reject;
-		}
+		} else
+			zswap_pool_reached_full = false;
 	}
 
 	/* allocate entry */
@@ -1332,11 +1349,18 @@ static int __init init_zswap(void)
 		zswap_enabled = false;
 	}
 
+	shrink_wq = create_workqueue("zswap-shrink");
+	if (!shrink_wq)
+		goto fallback_fail;
+
 	frontswap_register_ops(&zswap_frontswap_ops);
 	if (zswap_debugfs_init())
 		pr_warn("debugfs initialization failed\n");
 	return 0;
 
+fallback_fail:
+	if (pool)
+		zswap_pool_destroy(pool);
 hp_fail:
 	cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
 dstmem_fail:
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 46d6cd9..829db9e 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -53,15 +53,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
 
 static int parse_qos(const char *buff);
 
-/*
- *   Define allowed FILE OPERATIONS
- */
-static const struct file_operations mpc_file_operations = {
-	.open =		proc_mpc_open,
-	.read =		seq_read,
-	.llseek =	seq_lseek,
-	.write =	proc_mpc_write,
-	.release =	seq_release,
+static const struct proc_ops mpc_proc_ops = {
+	.proc_open	= proc_mpc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= proc_mpc_write,
+	.proc_release	= seq_release,
 };
 
 /*
@@ -290,7 +287,7 @@ int mpc_proc_init(void)
 {
 	struct proc_dir_entry *p;
 
-	p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
+	p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_proc_ops);
 	if (!p) {
 		pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
 		return -ENOMEM;
diff --git a/net/atm/proc.c b/net/atm/proc.c
index c318967..4369ffa 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -36,9 +36,9 @@
 static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
 				 size_t count, loff_t *pos);
 
-static const struct file_operations proc_atm_dev_ops = {
-	.read =		proc_dev_atm_read,
-	.llseek =	noop_llseek,
+static const struct proc_ops atm_dev_proc_ops = {
+	.proc_read	= proc_dev_atm_read,
+	.proc_lseek	= noop_llseek,
 };
 
 static void add_stats(struct seq_file *seq, const char *aal,
@@ -359,7 +359,7 @@ int atm_proc_dev_register(struct atm_dev *dev)
 		goto err_out;
 
 	dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
-					   &proc_atm_dev_ops, dev);
+					   &atm_dev_proc_ops, dev);
 	if (!dev->proc_entry)
 		goto err_free_name;
 	return 0;
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
index 59d0ba2..ce09bb4 100644
--- a/net/ceph/Makefile
+++ b/net/ceph/Makefile
@@ -13,5 +13,5 @@
 	auth.o auth_none.o \
 	crypto.o armor.o \
 	auth_x.o \
-	ceph_fs.o ceph_strings.o ceph_hash.o \
+	ceph_strings.o ceph_hash.o \
 	pagevec.o snapshot.o string_table.o
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a9d6c97..a0e97f6 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -269,7 +269,7 @@ enum {
 	Opt_abort_on_full,
 };
 
-static const struct fs_parameter_spec ceph_param_specs[] = {
+static const struct fs_parameter_spec ceph_parameters[] = {
 	fsparam_flag	("abort_on_full",		Opt_abort_on_full),
 	fsparam_flag_no ("cephx_require_signatures",	Opt_cephx_require_signatures),
 	fsparam_flag_no ("cephx_sign_messages",		Opt_cephx_sign_messages),
@@ -283,18 +283,13 @@ static const struct fs_parameter_spec ceph_param_specs[] = {
 	fsparam_u32	("osd_request_timeout",		Opt_osd_request_timeout),
 	fsparam_u32	("osdkeepalive",		Opt_osdkeepalivetimeout),
 	__fsparam	(fs_param_is_s32, "osdtimeout", Opt_osdtimeout,
-			 fs_param_deprecated),
+			 fs_param_deprecated, NULL),
 	fsparam_string	("secret",			Opt_secret),
 	fsparam_flag_no ("share",			Opt_share),
 	fsparam_flag_no ("tcp_nodelay",			Opt_tcp_nodelay),
 	{}
 };
 
-static const struct fs_parameter_description ceph_parameters = {
-        .name           = "libceph",
-        .specs          = ceph_param_specs,
-};
-
 struct ceph_options *ceph_alloc_options(void)
 {
 	struct ceph_options *opt;
@@ -337,7 +332,7 @@ EXPORT_SYMBOL(ceph_destroy_options);
 
 /* get secret from key store */
 static int get_secret(struct ceph_crypto_key *dst, const char *name,
-		      struct fs_context *fc)
+		      struct p_log *log)
 {
 	struct key *ukey;
 	int key_err;
@@ -351,19 +346,19 @@ static int get_secret(struct ceph_crypto_key *dst, const char *name,
 		key_err = PTR_ERR(ukey);
 		switch (key_err) {
 		case -ENOKEY:
-			errorf(fc, "libceph: Failed due to key not found: %s",
+			error_plog(log, "Failed due to key not found: %s",
 			       name);
 			break;
 		case -EKEYEXPIRED:
-			errorf(fc, "libceph: Failed due to expired key: %s",
+			error_plog(log, "Failed due to expired key: %s",
 			       name);
 			break;
 		case -EKEYREVOKED:
-			errorf(fc, "libceph: Failed due to revoked key: %s",
+			error_plog(log, "Failed due to revoked key: %s",
 			       name);
 			break;
 		default:
-			errorf(fc, "libceph: Failed due to key error %d: %s",
+			error_plog(log, "Failed due to key error %d: %s",
 			       key_err, name);
 		}
 		err = -EPERM;
@@ -383,15 +378,16 @@ static int get_secret(struct ceph_crypto_key *dst, const char *name,
 }
 
 int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
-		       struct fs_context *fc)
+		       struct fc_log *l)
 {
+	struct p_log log = {.prefix = "libceph", .log = l};
 	int ret;
 
 	/* ip1[:port1][,ip2[:port2]...] */
 	ret = ceph_parse_ips(buf, buf + len, opt->mon_addr, CEPH_MAX_MON,
 			     &opt->num_mon);
 	if (ret) {
-		errorf(fc, "libceph: Failed to parse monitor IPs: %d", ret);
+		error_plog(&log, "Failed to parse monitor IPs: %d", ret);
 		return ret;
 	}
 
@@ -400,12 +396,13 @@ int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
 EXPORT_SYMBOL(ceph_parse_mon_ips);
 
 int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
-		     struct fs_context *fc)
+		     struct fc_log *l)
 {
 	struct fs_parse_result result;
 	int token, err;
+	struct p_log log = {.prefix = "libceph", .log = l};
 
-	token = fs_parse(fc, &ceph_parameters, param, &result);
+	token = __fs_parse(&log, ceph_parameters, param, &result);
 	dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
 	if (token < 0)
 		return token;
@@ -417,7 +414,7 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
 				     &opt->my_addr,
 				     1, NULL);
 		if (err) {
-			errorf(fc, "libceph: Failed to parse ip: %d", err);
+			error_plog(&log, "Failed to parse ip: %d", err);
 			return err;
 		}
 		opt->flags |= CEPH_OPT_MYIP;
@@ -426,7 +423,7 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
 	case Opt_fsid:
 		err = parse_fsid(param->string, &opt->fsid);
 		if (err) {
-			errorf(fc, "libceph: Failed to parse fsid: %d", err);
+			error_plog(&log, "Failed to parse fsid: %d", err);
 			return err;
 		}
 		opt->flags |= CEPH_OPT_FSID;
@@ -445,7 +442,7 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
 			return -ENOMEM;
 		err = ceph_crypto_key_unarmor(opt->key, param->string);
 		if (err) {
-			errorf(fc, "libceph: Failed to parse secret: %d", err);
+			error_plog(&log, "Failed to parse secret: %d", err);
 			return err;
 		}
 		break;
@@ -456,10 +453,10 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
 		opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
 		if (!opt->key)
 			return -ENOMEM;
-		return get_secret(opt->key, param->string, fc);
+		return get_secret(opt->key, param->string, &log);
 
 	case Opt_osdtimeout:
-		warnf(fc, "libceph: Ignoring osdtimeout");
+		warn_plog(&log, "Ignoring osdtimeout");
 		break;
 	case Opt_osdkeepalivetimeout:
 		/* 0 isn't well defined right now, reject it */
@@ -530,7 +527,7 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
 	return 0;
 
 out_of_range:
-	return invalf(fc, "libceph: %s out of range", param->key);
+	return inval_plog(&log, "%s out of range", param->key);
 }
 EXPORT_SYMBOL(ceph_parse_param);
 
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
deleted file mode 100644
index 756a2dc..0000000
--- a/net/ceph/ceph_fs.c
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Some non-inline ceph helpers
- */
-#include <linux/module.h>
-#include <linux/ceph/types.h>
-
-/*
- * return true if @layout appears to be valid
- */
-int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
-{
-	__u32 su = layout->stripe_unit;
-	__u32 sc = layout->stripe_count;
-	__u32 os = layout->object_size;
-
-	/* stripe unit, object size must be non-zero, 64k increment */
-	if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
-		return 0;
-	if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
-		return 0;
-	/* object size must be a multiple of stripe unit */
-	if (os < su || os % su)
-		return 0;
-	/* stripe count must be non-zero */
-	if (!sc)
-		return 0;
-	return 1;
-}
-
-void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
-				  struct ceph_file_layout_legacy *legacy)
-{
-	fl->stripe_unit = le32_to_cpu(legacy->fl_stripe_unit);
-	fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
-	fl->object_size = le32_to_cpu(legacy->fl_object_size);
-	fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
-	if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
-	    fl->stripe_count == 0 && fl->object_size == 0)
-		fl->pool_id = -1;
-}
-EXPORT_SYMBOL(ceph_file_layout_from_legacy);
-
-void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
-				struct ceph_file_layout_legacy *legacy)
-{
-	legacy->fl_stripe_unit = cpu_to_le32(fl->stripe_unit);
-	legacy->fl_stripe_count = cpu_to_le32(fl->stripe_count);
-	legacy->fl_object_size = cpu_to_le32(fl->object_size);
-	if (fl->pool_id >= 0)
-		legacy->fl_pg_pool = cpu_to_le32(fl->pool_id);
-	else
-		legacy->fl_pg_pool = 0;
-}
-EXPORT_SYMBOL(ceph_file_layout_to_legacy);
-
-int ceph_flags_to_mode(int flags)
-{
-	int mode;
-
-#ifdef O_DIRECTORY  /* fixme */
-	if ((flags & O_DIRECTORY) == O_DIRECTORY)
-		return CEPH_FILE_MODE_PIN;
-#endif
-
-	switch (flags & O_ACCMODE) {
-	case O_WRONLY:
-		mode = CEPH_FILE_MODE_WR;
-		break;
-	case O_RDONLY:
-		mode = CEPH_FILE_MODE_RD;
-		break;
-	case O_RDWR:
-	case O_ACCMODE: /* this is what the VFS does */
-		mode = CEPH_FILE_MODE_RDWR;
-		break;
-	}
-#ifdef O_LAZY
-	if (flags & O_LAZY)
-		mode |= CEPH_FILE_MODE_LAZY;
-#endif
-
-	return mode;
-}
-EXPORT_SYMBOL(ceph_flags_to_mode);
-
-int ceph_caps_for_mode(int mode)
-{
-	int caps = CEPH_CAP_PIN;
-
-	if (mode & CEPH_FILE_MODE_RD)
-		caps |= CEPH_CAP_FILE_SHARED |
-			CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
-	if (mode & CEPH_FILE_MODE_WR)
-		caps |= CEPH_CAP_FILE_EXCL |
-			CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
-			CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
-			CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
-	if (mode & CEPH_FILE_MODE_LAZY)
-		caps |= CEPH_CAP_FILE_LAZYIO;
-
-	return caps;
-}
-EXPORT_SYMBOL(ceph_caps_for_mode);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index ba45b07..b68b376 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -402,7 +402,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
 	case CEPH_OSD_OP_LIST_WATCHERS:
 		ceph_osd_data_release(&op->list_watchers.response_data);
 		break;
-	case CEPH_OSD_OP_COPY_FROM:
+	case CEPH_OSD_OP_COPY_FROM2:
 		ceph_osd_data_release(&op->copy_from.osd_data);
 		break;
 	default:
@@ -697,7 +697,7 @@ static void get_num_data_items(struct ceph_osd_request *req,
 		case CEPH_OSD_OP_SETXATTR:
 		case CEPH_OSD_OP_CMPXATTR:
 		case CEPH_OSD_OP_NOTIFY_ACK:
-		case CEPH_OSD_OP_COPY_FROM:
+		case CEPH_OSD_OP_COPY_FROM2:
 			*num_request_data_items += 1;
 			break;
 
@@ -1029,7 +1029,7 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
 	case CEPH_OSD_OP_CREATE:
 	case CEPH_OSD_OP_DELETE:
 		break;
-	case CEPH_OSD_OP_COPY_FROM:
+	case CEPH_OSD_OP_COPY_FROM2:
 		dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
 		dst->copy_from.src_version =
 			cpu_to_le64(src->copy_from.src_version);
@@ -1966,7 +1966,7 @@ static void setup_request_data(struct ceph_osd_request *req)
 			ceph_osdc_msg_data_add(request_msg,
 					       &op->notify_ack.request_data);
 			break;
-		case CEPH_OSD_OP_COPY_FROM:
+		case CEPH_OSD_OP_COPY_FROM2:
 			ceph_osdc_msg_data_add(request_msg,
 					       &op->copy_from.osd_data);
 			break;
@@ -5315,6 +5315,7 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
 				     struct ceph_object_locator *src_oloc,
 				     u32 src_fadvise_flags,
 				     u32 dst_fadvise_flags,
+				     u32 truncate_seq, u64 truncate_size,
 				     u8 copy_from_flags)
 {
 	struct ceph_osd_req_op *op;
@@ -5325,7 +5326,8 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
 	if (IS_ERR(pages))
 		return PTR_ERR(pages);
 
-	op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
+	op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
+			      dst_fadvise_flags);
 	op->copy_from.snapid = src_snapid;
 	op->copy_from.src_version = src_version;
 	op->copy_from.flags = copy_from_flags;
@@ -5335,6 +5337,8 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
 	end = p + PAGE_SIZE;
 	ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
 	encode_oloc(&p, end, src_oloc);
+	ceph_encode_32(&p, truncate_seq);
+	ceph_encode_64(&p, truncate_size);
 	op->indata_len = PAGE_SIZE - (end - p);
 
 	ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
@@ -5350,6 +5354,7 @@ int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
 			struct ceph_object_id *dst_oid,
 			struct ceph_object_locator *dst_oloc,
 			u32 dst_fadvise_flags,
+			u32 truncate_seq, u64 truncate_size,
 			u8 copy_from_flags)
 {
 	struct ceph_osd_request *req;
@@ -5366,7 +5371,8 @@ int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
 
 	ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
 					src_oloc, src_fadvise_flags,
-					dst_fadvise_flags, copy_from_flags);
+					dst_fadvise_flags, truncate_seq,
+					truncate_size, copy_from_flags);
 	if (ret)
 		goto out;
 
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 458be6b..3ab23f6 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
 		return ERR_PTR(-ENOMEM);
 	bpf_map_init_from_attr(&smap->map, attr);
 
+	nbuckets = roundup_pow_of_two(num_possible_cpus());
 	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
-	smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
-	nbuckets = 1U << smap->bucket_log;
+	nbuckets = max_t(u32, 2, nbuckets);
+	smap->bucket_log = ilog2(nbuckets);
 	cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
 
 	ret = bpf_map_charge_init(&smap->map.memory, cost);
diff --git a/net/core/dev.c b/net/core/dev.c
index 17529d49..a69e8bd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5792,7 +5792,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 	if (&ptype->list == head)
 		goto normal;
 
-	if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
+	if (PTR_ERR(pp) == -EINPROGRESS) {
 		ret = GRO_CONSUMED;
 		goto ok;
 	}
diff --git a/net/core/devlink.c b/net/core/devlink.c
index ca1df0e..549ee56 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -3986,6 +3986,12 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
 		goto out_unlock;
 	}
 
+	/* return 0 if there is no further data to read */
+	if (start_offset >= region->size) {
+		err = 0;
+		goto out_unlock;
+	}
+
 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 			  &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
 			  DEVLINK_CMD_REGION_READ);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 536e032..31700e0 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -802,16 +802,12 @@ net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
 	if (!n_hw_metadata)
 		return NULL;
 
-	trap_group_name = kmemdup(hw_metadata->trap_group_name,
-				  strlen(hw_metadata->trap_group_name) + 1,
-				  GFP_ATOMIC | __GFP_ZERO);
+	trap_group_name = kstrdup(hw_metadata->trap_group_name, GFP_ATOMIC);
 	if (!trap_group_name)
 		goto free_hw_metadata;
 	n_hw_metadata->trap_group_name = trap_group_name;
 
-	trap_name = kmemdup(hw_metadata->trap_name,
-			    strlen(hw_metadata->trap_name) + 1,
-			    GFP_ATOMIC | __GFP_ZERO);
+	trap_name = kstrdup(hw_metadata->trap_name, GFP_ATOMIC);
 	if (!trap_name)
 		goto free_trap_group;
 	n_hw_metadata->trap_name = trap_name;
@@ -1004,8 +1000,10 @@ static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
 {
 	int cpu;
 
-	if (!monitor_hw)
+	if (!monitor_hw) {
 		NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled");
+		return;
+	}
 
 	monitor_hw = false;
 
diff --git a/net/core/filter.c b/net/core/filter.c
index 792e374..c180871 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1573,7 +1573,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
 		return -EPERM;
 
 	prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
-	if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL)
+	if (PTR_ERR(prog) == -EINVAL)
 		prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
 	if (IS_ERR(prog))
 		return PTR_ERR(prog);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 294bfcf..acc849d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -535,12 +535,12 @@ static int pgctrl_open(struct inode *inode, struct file *file)
 	return single_open(file, pgctrl_show, PDE_DATA(inode));
 }
 
-static const struct file_operations pktgen_fops = {
-	.open    = pgctrl_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.write   = pgctrl_write,
-	.release = single_release,
+static const struct proc_ops pktgen_proc_ops = {
+	.proc_open	= pgctrl_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= pgctrl_write,
+	.proc_release	= single_release,
 };
 
 static int pktgen_if_show(struct seq_file *seq, void *v)
@@ -1707,12 +1707,12 @@ static int pktgen_if_open(struct inode *inode, struct file *file)
 	return single_open(file, pktgen_if_show, PDE_DATA(inode));
 }
 
-static const struct file_operations pktgen_if_fops = {
-	.open    = pktgen_if_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.write   = pktgen_if_write,
-	.release = single_release,
+static const struct proc_ops pktgen_if_proc_ops = {
+	.proc_open	= pktgen_if_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= pktgen_if_write,
+	.proc_release	= single_release,
 };
 
 static int pktgen_thread_show(struct seq_file *seq, void *v)
@@ -1844,12 +1844,12 @@ static int pktgen_thread_open(struct inode *inode, struct file *file)
 	return single_open(file, pktgen_thread_show, PDE_DATA(inode));
 }
 
-static const struct file_operations pktgen_thread_fops = {
-	.open    = pktgen_thread_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.write   = pktgen_thread_write,
-	.release = single_release,
+static const struct proc_ops pktgen_thread_proc_ops = {
+	.proc_open	= pktgen_thread_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_write	= pktgen_thread_write,
+	.proc_release	= single_release,
 };
 
 /* Think find or remove for NN */
@@ -1926,7 +1926,7 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
 
 			pkt_dev->entry = proc_create_data(dev->name, 0600,
 							  pn->proc_dir,
-							  &pktgen_if_fops,
+							  &pktgen_if_proc_ops,
 							  pkt_dev);
 			if (!pkt_dev->entry)
 				pr_err("can't move proc entry for '%s'\n",
@@ -3638,7 +3638,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
 		pkt_dev->clone_skb = pg_clone_skb_d;
 
 	pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir,
-					  &pktgen_if_fops, pkt_dev);
+					  &pktgen_if_proc_ops, pkt_dev);
 	if (!pkt_dev->entry) {
 		pr_err("cannot create %s/%s procfs entry\n",
 		       PG_PROC_DIR, ifname);
@@ -3708,7 +3708,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
 	t->tsk = p;
 
 	pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
-			      &pktgen_thread_fops, t);
+			      &pktgen_thread_proc_ops, t);
 	if (!pe) {
 		pr_err("cannot create %s/%s procfs entry\n",
 		       PG_PROC_DIR, t->tsk->comm);
@@ -3793,7 +3793,7 @@ static int __net_init pg_net_init(struct net *net)
 		pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
 		return -ENODEV;
 	}
-	pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_fops);
+	pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_proc_ops);
 	if (pe == NULL) {
 		pr_err("cannot create %s procfs entry\n", PGCTRL);
 		ret = -EINVAL;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index cdad6ed..09c44bf 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1242,6 +1242,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
 		return 0;
 
 	memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
+	memset(&node_guid, 0, sizeof(node_guid));
+	memset(&port_guid, 0, sizeof(port_guid));
 
 	vf_mac.vf =
 		vf_vlan.vf =
@@ -1290,8 +1292,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
 		    sizeof(vf_trust), &vf_trust))
 		goto nla_put_vf_failure;
 
-	memset(&node_guid, 0, sizeof(node_guid));
-	memset(&port_guid, 0, sizeof(port_guid));
 	if (dev->netdev_ops->ndo_get_vf_guid &&
 	    !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
 					      &port_guid)) {
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 8998e35..085cef5 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map)
 	int i;
 
 	synchronize_rcu();
-	rcu_read_lock();
 	raw_spin_lock_bh(&stab->lock);
 	for (i = 0; i < stab->map.max_entries; i++) {
 		struct sock **psk = &stab->sks[i];
@@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map)
 		sk = xchg(psk, NULL);
 		if (sk) {
 			lock_sock(sk);
+			rcu_read_lock();
 			sock_map_unref(sk, psk);
+			rcu_read_unlock();
 			release_sock(sk);
 		}
 	}
 	raw_spin_unlock_bh(&stab->lock);
-	rcu_read_unlock();
 
+	/* wait for psock readers accessing its map link */
 	synchronize_rcu();
 
 	bpf_map_area_free(stab->sks);
@@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
 		ret = -EINVAL;
 		goto out;
 	}
-	if (!sock_map_sk_is_suitable(sk) ||
-	    sk->sk_state != TCP_ESTABLISHED) {
+	if (!sock_map_sk_is_suitable(sk)) {
 		ret = -EOPNOTSUPP;
 		goto out;
 	}
 
 	sock_map_sk_acquire(sk);
-	ret = sock_map_update_common(map, idx, sk, flags);
+	if (sk->sk_state != TCP_ESTABLISHED)
+		ret = -EOPNOTSUPP;
+	else
+		ret = sock_map_update_common(map, idx, sk, flags);
 	sock_map_sk_release(sk);
 out:
 	fput(sock->file);
@@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
 		ret = -EINVAL;
 		goto out;
 	}
-	if (!sock_map_sk_is_suitable(sk) ||
-	    sk->sk_state != TCP_ESTABLISHED) {
+	if (!sock_map_sk_is_suitable(sk)) {
 		ret = -EOPNOTSUPP;
 		goto out;
 	}
 
 	sock_map_sk_acquire(sk);
-	ret = sock_hash_update_common(map, key, sk, flags);
+	if (sk->sk_state != TCP_ESTABLISHED)
+		ret = -EOPNOTSUPP;
+	else
+		ret = sock_hash_update_common(map, key, sk, flags);
 	sock_map_sk_release(sk);
 out:
 	fput(sock->file);
@@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map)
 	int i;
 
 	synchronize_rcu();
-	rcu_read_lock();
 	for (i = 0; i < htab->buckets_num; i++) {
 		bucket = sock_hash_select_bucket(htab, i);
 		raw_spin_lock_bh(&bucket->lock);
 		hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
 			hlist_del_rcu(&elem->node);
 			lock_sock(elem->sk);
+			rcu_read_lock();
 			sock_map_unref(elem->sk, elem);
+			rcu_read_unlock();
 			release_sock(elem->sk);
 		}
 		raw_spin_unlock_bh(&bucket->lock);
 	}
-	rcu_read_unlock();
+
+	/* wait for psock readers accessing its map link */
+	synchronize_rcu();
 
 	bpf_map_area_free(htab->buckets);
 	kfree(htab);
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index ee56129..fbfd0db 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -27,6 +27,8 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
 
 	rcu_read_lock(); /* hsr->node_db, hsr->ports */
 	port = hsr_port_get_rcu(skb->dev);
+	if (!port)
+		goto finish_pass;
 
 	if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
 		/* Directly kill frames sent by ourselves */
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index f35308f..4438f6b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1334,7 +1334,7 @@ static int __init ipconfig_proc_net_init(void)
 
 /* Create a new file under /proc/net/ipconfig */
 static int ipconfig_proc_net_create(const char *name,
-				    const struct file_operations *fops)
+				    const struct proc_ops *proc_ops)
 {
 	char *pname;
 	struct proc_dir_entry *p;
@@ -1346,7 +1346,7 @@ static int ipconfig_proc_net_create(const char *name,
 	if (!pname)
 		return -ENOMEM;
 
-	p = proc_create(pname, 0444, init_net.proc_net, fops);
+	p = proc_create(pname, 0444, init_net.proc_net, proc_ops);
 	kfree(pname);
 	if (!p)
 		return -ENOMEM;
@@ -1355,7 +1355,7 @@ static int ipconfig_proc_net_create(const char *name,
 }
 
 /* Write NTP server IP addresses to /proc/net/ipconfig/ntp_servers */
-static int ntp_servers_seq_show(struct seq_file *seq, void *v)
+static int ntp_servers_show(struct seq_file *seq, void *v)
 {
 	int i;
 
@@ -1365,7 +1365,7 @@ static int ntp_servers_seq_show(struct seq_file *seq, void *v)
 	}
 	return 0;
 }
-DEFINE_SHOW_ATTRIBUTE(ntp_servers_seq);
+DEFINE_PROC_SHOW_ATTRIBUTE(ntp_servers);
 #endif /* CONFIG_PROC_FS */
 
 /*
@@ -1456,7 +1456,7 @@ static int __init ip_auto_config(void)
 	proc_create_single("pnp", 0444, init_net.proc_net, pnp_seq_show);
 
 	if (ipconfig_proc_net_init() == 0)
-		ipconfig_proc_net_create("ntp_servers", &ntp_servers_seq_fops);
+		ipconfig_proc_net_create("ntp_servers", &ntp_servers_proc_ops);
 #endif /* CONFIG_PROC_FS */
 
 	if (!ic_enable)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 6bdb1ab..f8755a4 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -58,7 +58,7 @@ struct clusterip_config {
 };
 
 #ifdef CONFIG_PROC_FS
-static const struct file_operations clusterip_proc_fops;
+static const struct proc_ops clusterip_proc_ops;
 #endif
 
 struct clusterip_net {
@@ -280,7 +280,7 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
 		mutex_lock(&cn->mutex);
 		c->pde = proc_create_data(buffer, 0600,
 					  cn->procdir,
-					  &clusterip_proc_fops, c);
+					  &clusterip_proc_ops, c);
 		mutex_unlock(&cn->mutex);
 		if (!c->pde) {
 			err = -ENOMEM;
@@ -804,12 +804,12 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
 	return size;
 }
 
-static const struct file_operations clusterip_proc_fops = {
-	.open	 = clusterip_proc_open,
-	.read	 = seq_read,
-	.write	 = clusterip_proc_write,
-	.llseek	 = seq_lseek,
-	.release = clusterip_proc_release,
+static const struct proc_ops clusterip_proc_ops = {
+	.proc_open	= clusterip_proc_open,
+	.proc_read	= seq_read,
+	.proc_write	= clusterip_proc_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= clusterip_proc_release,
 };
 
 #endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d5c57b3..ebe7060 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -237,11 +237,11 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
 	return seq_open(file, &rt_cache_seq_ops);
 }
 
-static const struct file_operations rt_cache_seq_fops = {
-	.open	 = rt_cache_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
+static const struct proc_ops rt_cache_proc_ops = {
+	.proc_open	= rt_cache_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 
@@ -328,11 +328,11 @@ static int rt_cpu_seq_open(struct inode *inode, struct file *file)
 	return seq_open(file, &rt_cpu_seq_ops);
 }
 
-static const struct file_operations rt_cpu_seq_fops = {
-	.open	 = rt_cpu_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
+static const struct proc_ops rt_cpu_proc_ops = {
+	.proc_open	= rt_cpu_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
 };
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
@@ -366,12 +366,12 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
 	struct proc_dir_entry *pde;
 
 	pde = proc_create("rt_cache", 0444, net->proc_net,
-			  &rt_cache_seq_fops);
+			  &rt_cache_proc_ops);
 	if (!pde)
 		goto err1;
 
 	pde = proc_create("rt_cache", 0444,
-			  net->proc_net_stat, &rt_cpu_seq_fops);
+			  net->proc_net_stat, &rt_cpu_proc_ops);
 	if (!pde)
 		goto err2;
 
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 345b2b0..9a4f6b1 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -349,6 +349,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
 	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
 	treq->snt_synack	= 0;
 	treq->tfo_listener	= false;
+
+	if (IS_ENABLED(CONFIG_MPTCP))
+		treq->is_mptcp = 0;
+
 	if (IS_ENABLED(CONFIG_SMC))
 		ireq->smc_ok = 0;
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 484485a..eb2d805 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2622,10 +2622,12 @@ int tcp_disconnect(struct sock *sk, int flags)
 	tp->snd_cwnd = TCP_INIT_CWND;
 	tp->snd_cwnd_cnt = 0;
 	tp->window_clamp = 0;
+	tp->delivered = 0;
 	tp->delivered_ce = 0;
 	tcp_set_ca_state(sk, TCP_CA_Open);
 	tp->is_sack_reneg = 0;
 	tcp_clear_retrans(tp);
+	tp->total_retrans = 0;
 	inet_csk_delack_init(sk);
 	/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
 	 * issue in __tcp_select_window()
@@ -2637,10 +2639,14 @@ int tcp_disconnect(struct sock *sk, int flags)
 	sk->sk_rx_dst = NULL;
 	tcp_saved_syn_free(tp);
 	tp->compressed_ack = 0;
+	tp->segs_in = 0;
+	tp->segs_out = 0;
 	tp->bytes_sent = 0;
 	tp->bytes_acked = 0;
 	tp->bytes_received = 0;
 	tp->bytes_retrans = 0;
+	tp->data_segs_in = 0;
+	tp->data_segs_out = 0;
 	tp->duplicate_sack[0].start_seq = 0;
 	tp->duplicate_sack[0].end_seq = 0;
 	tp->dsack_dups = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e8b840a..316ebdf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5908,8 +5908,14 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 		 *        the segment and return)"
 		 */
 		if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
-		    after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
+		    after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
+			/* Previous FIN/ACK or RST/ACK might be ignored. */
+			if (icsk->icsk_retransmits == 0)
+				inet_csk_reset_xmit_timer(sk,
+						ICSK_TIME_RETRANS,
+						TCP_TIMEOUT_MIN, TCP_RTO_MAX);
 			goto reset_and_undo;
+		}
 
 		if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
 		    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
@@ -6637,6 +6643,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
 
 	af_ops->init_req(req, sk, skb);
 
+	if (IS_ENABLED(CONFIG_MPTCP) && want_cookie)
+		tcp_rsk(req)->is_mptcp = 0;
+
 	if (security_inet_conn_request(sk, skb, req))
 		goto drop_and_free;
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 39d861d..cb493e1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5718,6 +5718,9 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
 	struct nlattr *tb[IFLA_INET6_MAX + 1];
 	int err;
 
+	if (!idev)
+		return -EAFNOSUPPORT;
+
 	if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
 		BUG();
 
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 30915f6..13235a0 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -178,6 +178,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
 	treq = tcp_rsk(req);
 	treq->tfo_listener = false;
 
+	if (IS_ENABLED(CONFIG_MPTCP))
+		treq->is_mptcp = 0;
+
 	if (security_inet_conn_request(sk, skb, req))
 		goto out_free;
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 33a578a..eaf09e6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -239,7 +239,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
 		icsk->icsk_af_ops = &ipv6_mapped;
 		if (sk_is_mptcp(sk))
-			mptcp_handle_ipv6_mapped(sk, true);
+			mptcpv6_handle_mapped(sk, true);
 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
 #ifdef CONFIG_TCP_MD5SIG
 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
@@ -251,7 +251,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 			icsk->icsk_ext_hdr_len = exthdrlen;
 			icsk->icsk_af_ops = &ipv6_specific;
 			if (sk_is_mptcp(sk))
-				mptcp_handle_ipv6_mapped(sk, false);
+				mptcpv6_handle_mapped(sk, false);
 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
 #ifdef CONFIG_TCP_MD5SIG
 			tp->af_specific = &tcp_sock_ipv6_specific;
@@ -1208,7 +1208,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
 
 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
 		if (sk_is_mptcp(newsk))
-			mptcp_handle_ipv6_mapped(newsk, true);
+			mptcpv6_handle_mapped(newsk, true);
 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
 #ifdef CONFIG_TCP_MD5SIG
 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index c99223c..fcb53ed 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -320,8 +320,13 @@ int l2tp_session_register(struct l2tp_session *session,
 
 		spin_lock_bh(&pn->l2tp_session_hlist_lock);
 
+		/* IP encap expects session IDs to be globally unique, while
+		 * UDP encap doesn't.
+		 */
 		hlist_for_each_entry(session_walk, g_head, global_hlist)
-			if (session_walk->session_id == session->session_id) {
+			if (session_walk->session_id == session->session_id &&
+			    (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
+			     tunnel->encap == L2TP_ENCAPTYPE_IP)) {
 				err = -EEXIST;
 				goto err_tlock_pnlock;
 			}
diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig
index 5db56d2..49f6054 100644
--- a/net/mptcp/Kconfig
+++ b/net/mptcp/Kconfig
@@ -10,17 +10,19 @@
 	  uses the TCP protocol, and TCP options carry header information for
 	  MPTCP.
 
+if MPTCP
+
 config MPTCP_IPV6
 	bool "MPTCP: IPv6 support for Multipath TCP"
-	depends on MPTCP
 	select IPV6
 	default y
 
 config MPTCP_HMAC_TEST
 	bool "Tests for MPTCP HMAC implementation"
-	default n
 	help
 	  This option enable boot time self-test for the HMAC implementation
 	  used by the MPTCP code
 
 	  Say N if you are unsure.
+
+endif
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 39fdca7..73780b4 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -24,57 +24,12 @@
 
 #define MPTCP_SAME_STATE TCP_MAX_STATES
 
-static void __mptcp_close(struct sock *sk, long timeout);
-
-static const struct proto_ops *tcp_proto_ops(struct sock *sk)
-{
-#if IS_ENABLED(CONFIG_IPV6)
-	if (sk->sk_family == AF_INET6)
-		return &inet6_stream_ops;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+struct mptcp6_sock {
+	struct mptcp_sock msk;
+	struct ipv6_pinfo np;
+};
 #endif
-	return &inet_stream_ops;
-}
-
-/* MP_CAPABLE handshake failed, convert msk to plain tcp, replacing
- * socket->sk and stream ops and destroying msk
- * return the msk socket, as we can't access msk anymore after this function
- * completes
- * Called with msk lock held, releases such lock before returning
- */
-static struct socket *__mptcp_fallback_to_tcp(struct mptcp_sock *msk,
-					      struct sock *ssk)
-{
-	struct mptcp_subflow_context *subflow;
-	struct socket *sock;
-	struct sock *sk;
-
-	sk = (struct sock *)msk;
-	sock = sk->sk_socket;
-	subflow = mptcp_subflow_ctx(ssk);
-
-	/* detach the msk socket */
-	list_del_init(&subflow->node);
-	sock_orphan(sk);
-	sock->sk = NULL;
-
-	/* socket is now TCP */
-	lock_sock(ssk);
-	sock_graft(ssk, sock);
-	if (subflow->conn) {
-		/* We can't release the ULP data on a live socket,
-		 * restore the tcp callback
-		 */
-		mptcp_subflow_tcp_fallback(ssk, subflow);
-		sock_put(subflow->conn);
-		subflow->conn = NULL;
-	}
-	release_sock(ssk);
-	sock->ops = tcp_proto_ops(ssk);
-
-	/* destroy the left-over msk sock */
-	__mptcp_close(sk, 0);
-	return sock;
-}
 
 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
  * completed yet or has failed, return the subflow socket.
@@ -93,10 +48,6 @@ static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk)
 	return msk->first && !sk_is_mptcp(msk->first);
 }
 
-/* if the mp_capable handshake has failed, it fallbacks msk to plain TCP,
- * releases the socket lock and returns a reference to the now TCP socket.
- * Otherwise returns NULL
- */
 static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
 {
 	sock_owned_by_me((const struct sock *)msk);
@@ -105,15 +56,11 @@ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
 		return NULL;
 
 	if (msk->subflow) {
-		/* the first subflow is an active connection, discart the
-		 * paired socket
-		 */
-		msk->subflow->sk = NULL;
-		sock_release(msk->subflow);
-		msk->subflow = NULL;
+		release_sock((struct sock *)msk);
+		return msk->subflow;
 	}
 
-	return __mptcp_fallback_to_tcp(msk, msk->first);
+	return NULL;
 }
 
 static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
@@ -640,32 +587,30 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how)
 }
 
 /* Called with msk lock held, releases such lock before returning */
-static void __mptcp_close(struct sock *sk, long timeout)
+static void mptcp_close(struct sock *sk, long timeout)
 {
 	struct mptcp_subflow_context *subflow, *tmp;
 	struct mptcp_sock *msk = mptcp_sk(sk);
+	LIST_HEAD(conn_list);
+
+	lock_sock(sk);
 
 	mptcp_token_destroy(msk->token);
 	inet_sk_state_store(sk, TCP_CLOSE);
 
-	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+	list_splice_init(&msk->conn_list, &conn_list);
+
+	release_sock(sk);
+
+	list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
 
 		__mptcp_close_ssk(sk, ssk, subflow, timeout);
 	}
 
-	if (msk->cached_ext)
-		__skb_ext_put(msk->cached_ext);
-	release_sock(sk);
 	sk_common_release(sk);
 }
 
-static void mptcp_close(struct sock *sk, long timeout)
-{
-	lock_sock(sk);
-	__mptcp_close(sk, timeout);
-}
-
 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 {
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -689,6 +634,30 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
 }
 
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
+{
+	unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
+
+	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
+}
+#endif
+
+struct sock *mptcp_sk_clone_lock(const struct sock *sk)
+{
+	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
+
+	if (!nsk)
+		return NULL;
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+	if (nsk->sk_family == AF_INET6)
+		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
+#endif
+
+	return nsk;
+}
+
 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
 				 bool kern)
 {
@@ -719,7 +688,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
 		lock_sock(sk);
 
 		local_bh_disable();
-		new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC);
+		new_mptcp_sock = mptcp_sk_clone_lock(sk);
 		if (!new_mptcp_sock) {
 			*err = -ENOBUFS;
 			local_bh_enable();
@@ -776,18 +745,19 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
 
 static void mptcp_destroy(struct sock *sk)
 {
+	struct mptcp_sock *msk = mptcp_sk(sk);
+
+	if (msk->cached_ext)
+		__skb_ext_put(msk->cached_ext);
 }
 
 static int mptcp_setsockopt(struct sock *sk, int level, int optname,
-			    char __user *uoptval, unsigned int optlen)
+			    char __user *optval, unsigned int optlen)
 {
 	struct mptcp_sock *msk = mptcp_sk(sk);
-	char __kernel *optval;
 	int ret = -EOPNOTSUPP;
 	struct socket *ssock;
-
-	/* will be treated as __user in tcp_setsockopt */
-	optval = (char __kernel __force *)uoptval;
+	struct sock *ssk;
 
 	pr_debug("msk=%p", msk);
 
@@ -796,27 +766,28 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
 	 */
 	lock_sock(sk);
 	ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
-	if (!IS_ERR(ssock)) {
-		pr_debug("subflow=%p", ssock->sk);
-		ret = kernel_setsockopt(ssock, level, optname, optval, optlen);
+	if (IS_ERR(ssock)) {
+		release_sock(sk);
+		return ret;
 	}
+
+	ssk = ssock->sk;
+	sock_hold(ssk);
 	release_sock(sk);
 
+	ret = tcp_setsockopt(ssk, level, optname, optval, optlen);
+	sock_put(ssk);
+
 	return ret;
 }
 
 static int mptcp_getsockopt(struct sock *sk, int level, int optname,
-			    char __user *uoptval, int __user *uoption)
+			    char __user *optval, int __user *option)
 {
 	struct mptcp_sock *msk = mptcp_sk(sk);
-	char __kernel *optval;
 	int ret = -EOPNOTSUPP;
-	int __kernel *option;
 	struct socket *ssock;
-
-	/* will be treated as __user in tcp_getsockopt */
-	optval = (char __kernel __force *)uoptval;
-	option = (int __kernel __force *)uoption;
+	struct sock *ssk;
 
 	pr_debug("msk=%p", msk);
 
@@ -825,12 +796,18 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname,
 	 */
 	lock_sock(sk);
 	ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
-	if (!IS_ERR(ssock)) {
-		pr_debug("subflow=%p", ssock->sk);
-		ret = kernel_getsockopt(ssock, level, optname, optval, option);
+	if (IS_ERR(ssock)) {
+		release_sock(sk);
+		return ret;
 	}
+
+	ssk = ssock->sk;
+	sock_hold(ssk);
 	release_sock(sk);
 
+	ret = tcp_getsockopt(ssk, level, optname, optval, option);
+	sock_put(ssk);
+
 	return ret;
 }
 
@@ -1260,8 +1237,7 @@ int mptcp_proto_v6_init(void)
 	strcpy(mptcp_v6_prot.name, "MPTCPv6");
 	mptcp_v6_prot.slab = NULL;
 	mptcp_v6_prot.destroy = mptcp_v6_destroy;
-	mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) +
-				 sizeof(struct ipv6_pinfo);
+	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
 
 	err = proto_register(&mptcp_v6_prot, 1);
 	if (err)
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 1662e11..65122ed 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -186,6 +186,9 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
 
 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
 
+	if (tcp_rsk(req)->is_mptcp == 0)
+		goto create_child;
+
 	/* if the sk is MP_CAPABLE, we try to fetch the client key */
 	subflow_req = mptcp_subflow_rsk(req);
 	if (subflow_req->mp_capable) {
@@ -582,9 +585,9 @@ subflow_default_af_ops(struct sock *sk)
 	return &subflow_specific;
 }
 
-void mptcp_handle_ipv6_mapped(struct sock *sk, bool mapped)
-{
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
+{
 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct inet_connection_sock_af_ops *target;
@@ -599,8 +602,8 @@ void mptcp_handle_ipv6_mapped(struct sock *sk, bool mapped)
 
 	subflow->icsk_af_ops = icsk->icsk_af_ops;
 	icsk->icsk_af_ops = target;
-#endif
 }
+#endif
 
 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
 {
@@ -621,7 +624,9 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
 	 */
 	sf->sk->sk_net_refcnt = 1;
 	get_net(net);
+#ifdef CONFIG_PROC_FS
 	this_cpu_add(*net->core.sock_inuse, 1);
+#endif
 	err = tcp_set_ulp(sf->sk, "mptcp");
 	release_sock(sf->sk);
 
@@ -767,7 +772,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
 	struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
 	struct mptcp_subflow_context *new_ctx;
 
-	if (!subflow_req->mp_capable) {
+	if (!tcp_rsk(req)->is_mptcp || !subflow_req->mp_capable) {
 		subflow_ulp_fallback(newsk, old_ctx);
 		return;
 	}
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index cf895bc..69c107f 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1483,31 +1483,34 @@ ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
 };
 
 static int
-dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
+ip_set_dump_start(struct netlink_callback *cb)
 {
 	struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
 	int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
 	struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
 	struct nlattr *attr = (void *)nlh + min_len;
+	struct sk_buff *skb = cb->skb;
+	struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
 	u32 dump_type;
-	ip_set_id_t index;
 	int ret;
 
 	ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
 			nlh->nlmsg_len - min_len,
 			ip_set_dump_policy, NULL);
 	if (ret)
-		return ret;
+		goto error;
 
 	cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
 	if (cda[IPSET_ATTR_SETNAME]) {
+		ip_set_id_t index;
 		struct ip_set *set;
 
 		set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
 				      &index);
-		if (!set)
-			return -ENOENT;
-
+		if (!set) {
+			ret = -ENOENT;
+			goto error;
+		}
 		dump_type = DUMP_ONE;
 		cb->args[IPSET_CB_INDEX] = index;
 	} else {
@@ -1523,10 +1526,17 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
 	cb->args[IPSET_CB_DUMP] = dump_type;
 
 	return 0;
+
+error:
+	/* We have to create and send the error message manually :-( */
+	if (nlh->nlmsg_flags & NLM_F_ACK) {
+		netlink_ack(cb->skb, nlh, ret, NULL);
+	}
+	return ret;
 }
 
 static int
-ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	ip_set_id_t index = IPSET_INVALID_ID, max;
 	struct ip_set *set = NULL;
@@ -1537,18 +1547,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
 	bool is_destroyed;
 	int ret = 0;
 
-	if (!cb->args[IPSET_CB_DUMP]) {
-		ret = dump_init(cb, inst);
-		if (ret < 0) {
-			nlh = nlmsg_hdr(cb->skb);
-			/* We have to create and send the error message
-			 * manually :-(
-			 */
-			if (nlh->nlmsg_flags & NLM_F_ACK)
-				netlink_ack(cb->skb, nlh, ret, NULL);
-			return ret;
-		}
-	}
+	if (!cb->args[IPSET_CB_DUMP])
+		return -EINVAL;
 
 	if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
 		goto out;
@@ -1684,7 +1684,8 @@ static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
 
 	{
 		struct netlink_dump_control c = {
-			.dump = ip_set_dump_start,
+			.start = ip_set_dump_start,
+			.dump = ip_set_dump_do,
 			.done = ip_set_dump_done,
 		};
 		return netlink_dump_start(ctnl, skb, nlh, &c);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f4c4b46..d130542 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2248,8 +2248,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
 	BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
 	nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
 
-	hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
-			      GFP_KERNEL | __GFP_ZERO);
+	hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
 
 	if (hash && nulls)
 		for (i = 0; i < nr_slots; i++)
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 7e91989..8af28e1 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -529,9 +529,9 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
 					  struct net_device *dev)
 {
-	nf_flow_table_offload_flush(flowtable);
 	nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
 	flush_delayed_work(&flowtable->gc_work);
+	nf_flow_table_offload_flush(flowtable);
 }
 
 void nf_flow_table_cleanup(struct net_device *dev)
@@ -553,6 +553,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
 	cancel_delayed_work_sync(&flow_table->gc_work);
 	nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
 	nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
+	nf_flow_table_offload_flush(flow_table);
 	rhashtable_destroy(&flow_table->rhashtable);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index c8b70ff..83e1db3 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -675,6 +675,7 @@ static void flow_offload_work_del(struct flow_offload_work *offload)
 {
 	flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
 	flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
+	set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
 }
 
 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index ce70c25..e27c6c5 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -939,14 +939,14 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
  *
  * @size: number of entries
  *
- * Return: NULL or kmalloc'd or vmalloc'd array
+ * Return: NULL or zeroed kmalloc'd or vmalloc'd array
  */
 unsigned int *xt_alloc_entry_offsets(unsigned int size)
 {
 	if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
 		return NULL;
 
-	return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
+	return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
 
 }
 EXPORT_SYMBOL(xt_alloc_entry_offsets);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 781e0b4..0a97080 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -103,7 +103,7 @@ static DEFINE_SPINLOCK(recent_lock);
 static DEFINE_MUTEX(recent_mutex);
 
 #ifdef CONFIG_PROC_FS
-static const struct file_operations recent_mt_fops;
+static const struct proc_ops recent_mt_proc_ops;
 #endif
 
 static u_int32_t hash_rnd __read_mostly;
@@ -405,7 +405,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
 		goto out;
 	}
 	pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
-		  &recent_mt_fops, t);
+			       &recent_mt_proc_ops, t);
 	if (pde == NULL) {
 		recent_table_free(t);
 		ret = -ENOMEM;
@@ -616,13 +616,12 @@ recent_mt_proc_write(struct file *file, const char __user *input,
 	return size + 1;
 }
 
-static const struct file_operations recent_mt_fops = {
-	.open    = recent_seq_open,
-	.read    = seq_read,
-	.write   = recent_mt_proc_write,
-	.release = seq_release_private,
-	.owner   = THIS_MODULE,
-	.llseek = seq_lseek,
+static const struct proc_ops recent_mt_proc_ops = {
+	.proc_open	= recent_seq_open,
+	.proc_read	= seq_read,
+	.proc_write	= recent_mt_proc_write,
+	.proc_release	= seq_release_private,
+	.proc_lseek	= seq_lseek,
 };
 
 static int __net_init recent_proc_net_init(struct net *net)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3bec515..30c6879 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -408,17 +408,17 @@ static int __packet_get_status(const struct packet_sock *po, void *frame)
 	}
 }
 
-static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
+static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
 				   unsigned int flags)
 {
 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 
 	if (shhwtstamps &&
 	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
-	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
+	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
 		return TP_STATUS_TS_RAW_HARDWARE;
 
-	if (ktime_to_timespec_cond(skb->tstamp, ts))
+	if (ktime_to_timespec64_cond(skb->tstamp, ts))
 		return TP_STATUS_TS_SOFTWARE;
 
 	return 0;
@@ -428,13 +428,20 @@ static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 				    struct sk_buff *skb)
 {
 	union tpacket_uhdr h;
-	struct timespec ts;
+	struct timespec64 ts;
 	__u32 ts_status;
 
 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 		return 0;
 
 	h.raw = frame;
+	/*
+	 * versions 1 through 3 overflow the timestamps in y2106, since they
+	 * all store the seconds in a 32-bit unsigned integer.
+	 * If we create a version 4, that should have a 64-bit timestamp,
+	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
+	 * nanoseconds.
+	 */
 	switch (po->tp_version) {
 	case TPACKET_V1:
 		h.h1->tp_sec = ts.tv_sec;
@@ -769,8 +776,8 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 		 * It shouldn't really happen as we don't close empty
 		 * blocks. See prb_retire_rx_blk_timer_expired().
 		 */
-		struct timespec ts;
-		getnstimeofday(&ts);
+		struct timespec64 ts;
+		ktime_get_real_ts64(&ts);
 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 	}
@@ -800,7 +807,7 @@ static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 	struct tpacket_block_desc *pbd1)
 {
-	struct timespec ts;
+	struct timespec64 ts;
 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 
 	smp_rmb();
@@ -813,7 +820,7 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 	BLOCK_NUM_PKTS(pbd1) = 0;
 	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 
-	getnstimeofday(&ts);
+	ktime_get_real_ts64(&ts);
 
 	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
@@ -2163,7 +2170,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 	unsigned long status = TP_STATUS_USER;
 	unsigned short macoff, netoff, hdrlen;
 	struct sk_buff *copy_skb = NULL;
-	struct timespec ts;
+	struct timespec64 ts;
 	__u32 ts_status;
 	bool is_drop_n_account = false;
 	bool do_vnet = false;
@@ -2295,7 +2302,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
-		getnstimeofday(&ts);
+		ktime_get_real_ts64(&ts);
 
 	status |= ts_status;
 
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 9d3c4d2..fe42f98 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -194,6 +194,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 service_in_use:
 	write_unlock(&local->services_lock);
 	rxrpc_unuse_local(local);
+	rxrpc_put_local(local);
 	ret = -EADDRINUSE;
 error_unlock:
 	release_sock(&rx->sk);
@@ -899,6 +900,7 @@ static int rxrpc_release_sock(struct sock *sk)
 	rxrpc_purge_queue(&sk->sk_receive_queue);
 
 	rxrpc_unuse_local(rx->local);
+	rxrpc_put_local(rx->local);
 	rx->local = NULL;
 	key_put(rx->key);
 	rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5e99df8..7d730c4 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -490,6 +490,7 @@ enum rxrpc_call_flag {
 	RXRPC_CALL_RX_HEARD,		/* The peer responded at least once to this call */
 	RXRPC_CALL_RX_UNDERRUN,		/* Got data underrun */
 	RXRPC_CALL_IS_INTR,		/* The call is interruptible */
+	RXRPC_CALL_DISCONNECTED,	/* The call has been disconnected */
 };
 
 /*
@@ -1021,6 +1022,16 @@ void rxrpc_unuse_local(struct rxrpc_local *);
 void rxrpc_queue_local(struct rxrpc_local *);
 void rxrpc_destroy_all_locals(struct rxrpc_net *);
 
+static inline bool __rxrpc_unuse_local(struct rxrpc_local *local)
+{
+	return atomic_dec_return(&local->active_users) == 0;
+}
+
+static inline bool __rxrpc_use_local(struct rxrpc_local *local)
+{
+	return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0;
+}
+
 /*
  * misc.c
  */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index a31c18c..c9f34b0 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -493,7 +493,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 
 	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 
-	if (conn)
+	if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
 		rxrpc_disconnect_call(call);
 	if (call->security)
 		call->security->free_call_crypto(call);
@@ -562,13 +562,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
 }
 
 /*
- * Final call destruction under RCU.
+ * Final call destruction - but must be done in process context.
  */
-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
+static void rxrpc_destroy_call(struct work_struct *work)
 {
-	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
+	struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
 	struct rxrpc_net *rxnet = call->rxnet;
 
+	rxrpc_put_connection(call->conn);
 	rxrpc_put_peer(call->peer);
 	kfree(call->rxtx_buffer);
 	kfree(call->rxtx_annotations);
@@ -578,6 +579,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
 }
 
 /*
+ * Final call destruction under RCU.
+ */
+static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
+{
+	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
+
+	if (in_softirq()) {
+		INIT_WORK(&call->processor, rxrpc_destroy_call);
+		if (!rxrpc_queue_work(&call->processor))
+			BUG();
+	} else {
+		rxrpc_destroy_call(&call->processor);
+	}
+}
+
+/*
  * clean up a call
  */
 void rxrpc_cleanup_call(struct rxrpc_call *call)
@@ -590,7 +607,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
 
 	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
 	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
-	ASSERTCMP(call->conn, ==, NULL);
 
 	rxrpc_cleanup_ring(call);
 	rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 376370c..ea7d4c2 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -785,6 +785,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
 	u32 cid;
 
 	spin_lock(&conn->channel_lock);
+	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
 
 	cid = call->cid;
 	if (cid) {
@@ -792,7 +793,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
 		chan = &conn->channels[channel];
 	}
 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
-	call->conn = NULL;
 
 	/* Calls that have never actually been assigned a channel can simply be
 	 * discarded.  If the conn didn't get used either, it will follow
@@ -908,7 +908,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
 	spin_unlock(&rxnet->client_conn_cache_lock);
 out_2:
 	spin_unlock(&conn->channel_lock);
-	rxrpc_put_connection(conn);
 	_leave("");
 	return;
 
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 808a472..06fcff2 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -438,16 +438,12 @@ static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
 /*
  * connection-level event processor
  */
-void rxrpc_process_connection(struct work_struct *work)
+static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
 {
-	struct rxrpc_connection *conn =
-		container_of(work, struct rxrpc_connection, processor);
 	struct sk_buff *skb;
 	u32 abort_code = RX_PROTOCOL_ERROR;
 	int ret;
 
-	rxrpc_see_connection(conn);
-
 	if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
 		rxrpc_secure_connection(conn);
 
@@ -475,18 +471,32 @@ void rxrpc_process_connection(struct work_struct *work)
 		}
 	}
 
-out:
-	rxrpc_put_connection(conn);
-	_leave("");
 	return;
 
 requeue_and_leave:
 	skb_queue_head(&conn->rx_queue, skb);
-	goto out;
+	return;
 
 protocol_error:
 	if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
 		goto requeue_and_leave;
 	rxrpc_free_skb(skb, rxrpc_skb_freed);
-	goto out;
+	return;
+}
+
+void rxrpc_process_connection(struct work_struct *work)
+{
+	struct rxrpc_connection *conn =
+		container_of(work, struct rxrpc_connection, processor);
+
+	rxrpc_see_connection(conn);
+
+	if (__rxrpc_use_local(conn->params.local)) {
+		rxrpc_do_process_connection(conn);
+		rxrpc_unuse_local(conn->params.local);
+	}
+
+	rxrpc_put_connection(conn);
+	_leave("");
+	return;
 }
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 38d718e..19e141e 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -223,9 +223,8 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
 	__rxrpc_disconnect_call(conn, call);
 	spin_unlock(&conn->channel_lock);
 
-	call->conn = NULL;
+	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
 	conn->idle_timestamp = jiffies;
-	rxrpc_put_connection(conn);
 }
 
 /*
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 96d54e5..ef10fbf 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -599,10 +599,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
 				  false, true,
 				  rxrpc_propose_ack_input_data);
 
-	if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
-		trace_rxrpc_notify_socket(call->debug_id, serial);
-		rxrpc_notify_socket(call);
-	}
+	trace_rxrpc_notify_socket(call->debug_id, serial);
+	rxrpc_notify_socket(call);
 
 unlock:
 	spin_unlock(&call->input_lock);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 3658726..a6c1349 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -364,11 +364,14 @@ void rxrpc_queue_local(struct rxrpc_local *local)
 void rxrpc_put_local(struct rxrpc_local *local)
 {
 	const void *here = __builtin_return_address(0);
+	unsigned int debug_id;
 	int n;
 
 	if (local) {
+		debug_id = local->debug_id;
+
 		n = atomic_dec_return(&local->usage);
-		trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
+		trace_rxrpc_local(debug_id, rxrpc_local_put, n, here);
 
 		if (n == 0)
 			call_rcu(&local->rcu, rxrpc_local_rcu);
@@ -380,14 +383,11 @@ void rxrpc_put_local(struct rxrpc_local *local)
  */
 struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
 {
-	unsigned int au;
-
 	local = rxrpc_get_local_maybe(local);
 	if (!local)
 		return NULL;
 
-	au = atomic_fetch_add_unless(&local->active_users, 1, 0);
-	if (au == 0) {
+	if (!__rxrpc_use_local(local)) {
 		rxrpc_put_local(local);
 		return NULL;
 	}
@@ -401,14 +401,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
  */
 void rxrpc_unuse_local(struct rxrpc_local *local)
 {
-	unsigned int au;
-
 	if (local) {
-		au = atomic_dec_return(&local->active_users);
-		if (au == 0)
+		if (__rxrpc_unuse_local(local)) {
+			rxrpc_get_local(local);
 			rxrpc_queue_local(local);
-		else
-			rxrpc_put_local(local);
+		}
 	}
 }
 
@@ -465,7 +462,7 @@ static void rxrpc_local_processor(struct work_struct *work)
 
 	do {
 		again = false;
-		if (atomic_read(&local->active_users) == 0) {
+		if (!__rxrpc_use_local(local)) {
 			rxrpc_local_destroyer(local);
 			break;
 		}
@@ -479,6 +476,8 @@ static void rxrpc_local_processor(struct work_struct *work)
 			rxrpc_process_local_events(local);
 			again = true;
 		}
+
+		__rxrpc_unuse_local(local);
 	} while (again);
 
 	rxrpc_put_local(local);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 935bb60..bad3d24 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -129,7 +129,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
 int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
 			  rxrpc_serial_t *_serial)
 {
-	struct rxrpc_connection *conn = NULL;
+	struct rxrpc_connection *conn;
 	struct rxrpc_ack_buffer *pkt;
 	struct msghdr msg;
 	struct kvec iov[2];
@@ -139,18 +139,14 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
 	int ret;
 	u8 reason;
 
-	spin_lock_bh(&call->lock);
-	if (call->conn)
-		conn = rxrpc_get_connection_maybe(call->conn);
-	spin_unlock_bh(&call->lock);
-	if (!conn)
+	if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
 		return -ECONNRESET;
 
 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
-	if (!pkt) {
-		rxrpc_put_connection(conn);
+	if (!pkt)
 		return -ENOMEM;
-	}
+
+	conn = call->conn;
 
 	msg.msg_name	= &call->peer->srx.transport;
 	msg.msg_namelen	= call->peer->srx.transport_len;
@@ -244,7 +240,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
 	}
 
 out:
-	rxrpc_put_connection(conn);
 	kfree(pkt);
 	return ret;
 }
@@ -254,7 +249,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
  */
 int rxrpc_send_abort_packet(struct rxrpc_call *call)
 {
-	struct rxrpc_connection *conn = NULL;
+	struct rxrpc_connection *conn;
 	struct rxrpc_abort_buffer pkt;
 	struct msghdr msg;
 	struct kvec iov[1];
@@ -271,13 +266,11 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
 	    test_bit(RXRPC_CALL_TX_LAST, &call->flags))
 		return 0;
 
-	spin_lock_bh(&call->lock);
-	if (call->conn)
-		conn = rxrpc_get_connection_maybe(call->conn);
-	spin_unlock_bh(&call->lock);
-	if (!conn)
+	if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
 		return -ECONNRESET;
 
+	conn = call->conn;
+
 	msg.msg_name	= &call->peer->srx.transport;
 	msg.msg_namelen	= call->peer->srx.transport_len;
 	msg.msg_control	= NULL;
@@ -312,8 +305,6 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
 		trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
 				      rxrpc_tx_point_call_abort);
 	rxrpc_tx_backoff(call, ret);
-
-	rxrpc_put_connection(conn);
 	return ret;
 }
 
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 48f67a9..923b263 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -364,27 +364,31 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
 		if (!rxrpc_get_peer_maybe(peer))
 			continue;
 
-		spin_unlock_bh(&rxnet->peer_hash_lock);
+		if (__rxrpc_use_local(peer->local)) {
+			spin_unlock_bh(&rxnet->peer_hash_lock);
 
-		keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
-		slot = keepalive_at - base;
-		_debug("%02x peer %u t=%d {%pISp}",
-		       cursor, peer->debug_id, slot, &peer->srx.transport);
+			keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+			slot = keepalive_at - base;
+			_debug("%02x peer %u t=%d {%pISp}",
+			       cursor, peer->debug_id, slot, &peer->srx.transport);
 
-		if (keepalive_at <= base ||
-		    keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
-			rxrpc_send_keepalive(peer);
-			slot = RXRPC_KEEPALIVE_TIME;
+			if (keepalive_at <= base ||
+			    keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
+				rxrpc_send_keepalive(peer);
+				slot = RXRPC_KEEPALIVE_TIME;
+			}
+
+			/* A transmission to this peer occurred since last we
+			 * examined it so put it into the appropriate future
+			 * bucket.
+			 */
+			slot += cursor;
+			slot &= mask;
+			spin_lock_bh(&rxnet->peer_hash_lock);
+			list_add_tail(&peer->keepalive_link,
+				      &rxnet->peer_keepalive[slot & mask]);
+			rxrpc_unuse_local(peer->local);
 		}
-
-		/* A transmission to this peer occurred since last we examined
-		 * it so put it into the appropriate future bucket.
-		 */
-		slot += cursor;
-		slot &= mask;
-		spin_lock_bh(&rxnet->peer_hash_lock);
-		list_add_tail(&peer->keepalive_link,
-			      &rxnet->peer_keepalive[slot & mask]);
 		rxrpc_put_peer_locked(peer);
 	}
 
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index c226241..d36949d 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -463,10 +463,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
 
 static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
 	[TCA_RSVP_CLASSID]	= { .type = NLA_U32 },
-	[TCA_RSVP_DST]		= { .type = NLA_BINARY,
-				    .len = RSVP_DST_LEN * sizeof(u32) },
-	[TCA_RSVP_SRC]		= { .type = NLA_BINARY,
-				    .len = RSVP_DST_LEN * sizeof(u32) },
+	[TCA_RSVP_DST]		= { .len = RSVP_DST_LEN * sizeof(u32) },
+	[TCA_RSVP_SRC]		= { .len = RSVP_DST_LEN * sizeof(u32) },
 	[TCA_RSVP_PINFO]	= { .len = sizeof(struct tc_rsvp_pinfo) },
 };
 
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 3d4a128..09b7dc5 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -333,23 +333,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 	cp->fall_through = p->fall_through;
 	cp->tp = tp;
 
-	if (p->perfect) {
-		int i;
-
-		if (tcindex_alloc_perfect_hash(net, cp) < 0)
-			goto errout;
-		for (i = 0; i < cp->hash; i++)
-			cp->perfect[i].res = p->perfect[i].res;
-		balloc = 1;
-	}
-	cp->h = p->h;
-
-	err = tcindex_filter_result_init(&new_filter_result, net);
-	if (err < 0)
-		goto errout1;
-	if (old_r)
-		cr = r->res;
-
 	if (tb[TCA_TCINDEX_HASH])
 		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
 
@@ -359,6 +342,33 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 	if (tb[TCA_TCINDEX_SHIFT])
 		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
 
+	if (!cp->hash) {
+		/* Hash not specified, use perfect hash if the upper limit
+		 * of the hashing index is below the threshold.
+		 */
+		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
+			cp->hash = (cp->mask >> cp->shift) + 1;
+		else
+			cp->hash = DEFAULT_HASH_SIZE;
+	}
+
+	if (p->perfect) {
+		int i;
+
+		if (tcindex_alloc_perfect_hash(net, cp) < 0)
+			goto errout;
+		for (i = 0; i < min(cp->hash, p->hash); i++)
+			cp->perfect[i].res = p->perfect[i].res;
+		balloc = 1;
+	}
+	cp->h = p->h;
+
+	err = tcindex_filter_result_init(&new_filter_result, net);
+	if (err < 0)
+		goto errout_alloc;
+	if (old_r)
+		cr = r->res;
+
 	err = -EBUSY;
 
 	/* Hash already allocated, make sure that we still meet the
@@ -376,16 +386,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 	if (tb[TCA_TCINDEX_FALL_THROUGH])
 		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
 
-	if (!cp->hash) {
-		/* Hash not specified, use perfect hash if the upper limit
-		 * of the hashing index is below the threshold.
-		 */
-		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
-			cp->hash = (cp->mask >> cp->shift) + 1;
-		else
-			cp->hash = DEFAULT_HASH_SIZE;
-	}
-
 	if (!cp->perfect && !cp->h)
 		cp->alloc_hash = cp->hash;
 
@@ -484,7 +484,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 		tcindex_free_perfect_hash(cp);
 	else if (balloc == 2)
 		kfree(cp->h);
-errout1:
 	tcf_exts_destroy(&new_filter_result.exts);
 errout:
 	kfree(cp);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index dba7037..a36974e 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -377,7 +377,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
 	if (mask != q->tab_mask) {
 		struct sk_buff **ntab;
 
-		ntab = kvmalloc_array((mask + 1), sizeof(struct sk_buff *), GFP_KERNEL | __GFP_ZERO);
+		ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
 		if (!ntab)
 			return -ENOMEM;
 
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index bbd0dea..214657e 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -349,9 +349,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
 	while (sch->q.qlen > sch->limit) {
 		struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
 
-		kfree_skb(skb);
 		len_dropped += qdisc_pkt_len(skb);
 		num_dropped += 1;
+		rtnl_kfree_skbs(skb, skb);
 	}
 	qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
 
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index c609373..660fc45 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(taprio_list_lock);
 
 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
+#define TAPRIO_FLAGS_INVALID U32_MAX
 
 struct sched_entry {
 	struct list_head list;
@@ -766,6 +767,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
 	[TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
+	[TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
 };
 
 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
@@ -1367,6 +1369,33 @@ static int taprio_mqprio_cmp(const struct net_device *dev,
 	return 0;
 }
 
+/* The semantics of the 'flags' argument in relation to 'change()'
+ * requests, are interpreted following two rules (which are applied in
+ * this order): (1) an omitted 'flags' argument is interpreted as
+ * zero; (2) the 'flags' of a "running" taprio instance cannot be
+ * changed.
+ */
+static int taprio_new_flags(const struct nlattr *attr, u32 old,
+			    struct netlink_ext_ack *extack)
+{
+	u32 new = 0;
+
+	if (attr)
+		new = nla_get_u32(attr);
+
+	if (old != TAPRIO_FLAGS_INVALID && old != new) {
+		NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
+		return -EOPNOTSUPP;
+	}
+
+	if (!taprio_flags_valid(new)) {
+		NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
+		return -EINVAL;
+	}
+
+	return new;
+}
+
 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
 			 struct netlink_ext_ack *extack)
 {
@@ -1375,7 +1404,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
 	struct taprio_sched *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
 	struct tc_mqprio_qopt *mqprio = NULL;
-	u32 taprio_flags = 0;
 	unsigned long flags;
 	ktime_t start;
 	int i, err;
@@ -1388,21 +1416,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
 	if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
 		mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
 
-	if (tb[TCA_TAPRIO_ATTR_FLAGS]) {
-		taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]);
+	err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
+			       q->flags, extack);
+	if (err < 0)
+		return err;
 
-		if (q->flags != 0 && q->flags != taprio_flags) {
-			NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
-			return -EOPNOTSUPP;
-		} else if (!taprio_flags_valid(taprio_flags)) {
-			NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
-			return -EINVAL;
-		}
+	q->flags = err;
 
-		q->flags = taprio_flags;
-	}
-
-	err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags);
+	err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
 	if (err < 0)
 		return err;
 
@@ -1444,7 +1465,20 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
 
 	taprio_set_picos_per_byte(dev, q);
 
-	if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
+	if (mqprio) {
+		netdev_set_num_tc(dev, mqprio->num_tc);
+		for (i = 0; i < mqprio->num_tc; i++)
+			netdev_set_tc_queue(dev, i,
+					    mqprio->count[i],
+					    mqprio->offset[i]);
+
+		/* Always use supplied priority mappings */
+		for (i = 0; i <= TC_BITMASK; i++)
+			netdev_set_prio_tc_map(dev, i,
+					       mqprio->prio_tc_map[i]);
+	}
+
+	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
 		err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
 	else
 		err = taprio_disable_offload(dev, q, extack);
@@ -1464,27 +1498,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
 		q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
 	}
 
-	if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
-	    !FULL_OFFLOAD_IS_ENABLED(taprio_flags) &&
+	if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
+	    !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
 	    !hrtimer_active(&q->advance_timer)) {
 		hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
 		q->advance_timer.function = advance_sched;
 	}
 
-	if (mqprio) {
-		netdev_set_num_tc(dev, mqprio->num_tc);
-		for (i = 0; i < mqprio->num_tc; i++)
-			netdev_set_tc_queue(dev, i,
-					    mqprio->count[i],
-					    mqprio->offset[i]);
-
-		/* Always use supplied priority mappings */
-		for (i = 0; i <= TC_BITMASK; i++)
-			netdev_set_prio_tc_map(dev, i,
-					       mqprio->prio_tc_map[i]);
-	}
-
-	if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) {
+	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
 		q->dequeue = taprio_dequeue_offload;
 		q->peek = taprio_peek_offload;
 	} else {
@@ -1501,9 +1522,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
 		goto unlock;
 	}
 
-	if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) {
-		setup_txtime(q, new_admin, start);
+	setup_txtime(q, new_admin, start);
 
+	if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
 		if (!oper) {
 			rcu_assign_pointer(q->oper_sched, new_admin);
 			err = 0;
@@ -1528,7 +1549,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
 
 		spin_unlock_irqrestore(&q->current_entry_lock, flags);
 
-		if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
+		if (FULL_OFFLOAD_IS_ENABLED(q->flags))
 			taprio_offload_config_changed(q);
 	}
 
@@ -1567,7 +1588,7 @@ static void taprio_destroy(struct Qdisc *sch)
 	}
 	q->qdiscs = NULL;
 
-	netdev_set_num_tc(dev, 0);
+	netdev_reset_tc(dev);
 
 	if (q->oper_sched)
 		call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
@@ -1597,6 +1618,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
 	 * and get the valid one on taprio_change().
 	 */
 	q->clockid = -1;
+	q->flags = TAPRIO_FLAGS_INVALID;
 
 	spin_lock(&taprio_list_lock);
 	list_add(&q->taprio_list, &taprio_list);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index d024af4..8b4d72b 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -175,7 +175,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
 		return 0;
 
 	len = (buf + buflen) - delim - 1;
-	p = kstrndup(delim + 1, len, GFP_KERNEL);
+	p = kmemdup_nul(delim + 1, len, GFP_KERNEL);
 	if (p) {
 		u32 scope_id = 0;
 		struct net_device *dev;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index cdb05b4..5748ad0 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -221,55 +221,6 @@ rpcauth_get_gssinfo(rpc_authflavor_t pseudoflavor, struct rpcsec_gss_info *info)
 }
 EXPORT_SYMBOL_GPL(rpcauth_get_gssinfo);
 
-/**
- * rpcauth_list_flavors - discover registered flavors and pseudoflavors
- * @array: array to fill in
- * @size: size of "array"
- *
- * Returns the number of array items filled in, or a negative errno.
- *
- * The returned array is not sorted by any policy.  Callers should not
- * rely on the order of the items in the returned array.
- */
-int
-rpcauth_list_flavors(rpc_authflavor_t *array, int size)
-{
-	const struct rpc_authops *ops;
-	rpc_authflavor_t flavor, pseudos[4];
-	int i, len, result = 0;
-
-	rcu_read_lock();
-	for (flavor = 0; flavor < RPC_AUTH_MAXFLAVOR; flavor++) {
-		ops = rcu_dereference(auth_flavors[flavor]);
-		if (result >= size) {
-			result = -ENOMEM;
-			break;
-		}
-
-		if (ops == NULL)
-			continue;
-		if (ops->list_pseudoflavors == NULL) {
-			array[result++] = ops->au_flavor;
-			continue;
-		}
-		len = ops->list_pseudoflavors(pseudos, ARRAY_SIZE(pseudos));
-		if (len < 0) {
-			result = len;
-			break;
-		}
-		for (i = 0; i < len; i++) {
-			if (result >= size) {
-				result = -ENOMEM;
-				break;
-			}
-			array[result++] = pseudos[i];
-		}
-	}
-	rcu_read_unlock();
-	return result;
-}
-EXPORT_SYMBOL_GPL(rpcauth_list_flavors);
-
 struct rpc_auth *
 rpcauth_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
 {
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index d75fddc..24ca861 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -2118,7 +2118,6 @@ static const struct rpc_authops authgss_ops = {
 	.hash_cred	= gss_hash_cred,
 	.lookup_cred	= gss_lookup_cred,
 	.crcreate	= gss_create_cred,
-	.list_pseudoflavors = gss_mech_list_pseudoflavors,
 	.info2flavor	= gss_mech_info2flavor,
 	.flavor2info	= gss_mech_flavor2info,
 };
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 6e5d6d2..75b3c2e 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -253,6 +253,7 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
 {
 	u32 seq_send;
 	int tmp;
+	u32 time32;
 
 	p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
 	if (IS_ERR(p))
@@ -290,9 +291,11 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
 		p = ERR_PTR(-ENOSYS);
 		goto out_err;
 	}
-	p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
+	p = simple_get_bytes(p, end, &time32, sizeof(time32));
 	if (IS_ERR(p))
 		goto out_err;
+	/* unsigned 32-bit time overflows in year 2106 */
+	ctx->endtime = (time64_t)time32;
 	p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send));
 	if (IS_ERR(p))
 		goto out_err;
@@ -587,15 +590,18 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
 {
 	u64 seq_send64;
 	int keylen;
+	u32 time32;
 
 	p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
 	if (IS_ERR(p))
 		goto out_err;
 	ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR;
 
-	p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
+	p = simple_get_bytes(p, end, &time32, sizeof(time32));
 	if (IS_ERR(p))
 		goto out_err;
+	/* unsigned 32-bit time overflows in year 2106 */
+	ctx->endtime = (time64_t)time32;
 	p = simple_get_bytes(p, end, &seq_send64, sizeof(seq_send64));
 	if (IS_ERR(p))
 		goto out_err;
@@ -659,7 +665,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
 static int
 gss_import_sec_context_kerberos(const void *p, size_t len,
 				struct gss_ctx *ctx_id,
-				time_t *endtime,
+				time64_t *endtime,
 				gfp_t gfp_mask)
 {
 	const void *end = (const void *)((const char *)p + len);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 48fe4a5..f1d280a 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -131,14 +131,14 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
 	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
 					    .data = cksumdata};
 	void			*ptr;
-	s32			now;
+	time64_t		now;
 	u32			seq_send;
 	u8			*cksumkey;
 
 	dprintk("RPC:       %s\n", __func__);
 	BUG_ON(ctx == NULL);
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	ptr = setup_token(ctx, token);
 
@@ -170,7 +170,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
 	struct xdr_netobj cksumobj = { .len = sizeof(cksumdata),
 				       .data = cksumdata};
 	void *krb5_hdr;
-	s32 now;
+	time64_t now;
 	u8 *cksumkey;
 	unsigned int cksum_usage;
 	__be64 seq_send_be64;
@@ -198,7 +198,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
 
 	memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len);
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index ef2b25b..aaab91c 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -124,7 +124,7 @@ gss_verify_mic_v1(struct krb5_ctx *ctx,
 
 	/* it got through unscathed.  Make sure the context is unexpired */
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	if (now > ctx->endtime)
 		return GSS_S_CONTEXT_EXPIRED;
@@ -149,7 +149,7 @@ gss_verify_mic_v2(struct krb5_ctx *ctx,
 	char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
 	struct xdr_netobj cksumobj = {.len = sizeof(cksumdata),
 				      .data = cksumdata};
-	s32 now;
+	time64_t now;
 	u8 *ptr = read_token->data;
 	u8 *cksumkey;
 	u8 flags;
@@ -194,7 +194,7 @@ gss_verify_mic_v2(struct krb5_ctx *ctx,
 		return GSS_S_BAD_SIG;
 
 	/* it got through unscathed.  Make sure the context is unexpired */
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 	if (now > ctx->endtime)
 		return GSS_S_CONTEXT_EXPIRED;
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 14a0aff..6c1920e 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -163,7 +163,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 					    .data = cksumdata};
 	int			blocksize = 0, plainlen;
 	unsigned char		*ptr, *msg_start;
-	s32			now;
+	time64_t		now;
 	int			headlen;
 	struct page		**tmp_pages;
 	u32			seq_send;
@@ -172,7 +172,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
 	dprintk("RPC:       %s\n", __func__);
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
 	gss_krb5_add_padding(buf, offset, blocksize);
@@ -268,7 +268,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
 	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
 					    .data = cksumdata};
-	s32			now;
+	time64_t		now;
 	int			direction;
 	s32			seqnum;
 	unsigned char		*ptr;
@@ -359,7 +359,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 
 	/* it got through unscathed.  Make sure the context is unexpired */
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	if (now > kctx->endtime)
 		return GSS_S_CONTEXT_EXPIRED;
@@ -439,7 +439,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
 		     struct xdr_buf *buf, struct page **pages)
 {
 	u8		*ptr, *plainhdr;
-	s32		now;
+	time64_t	now;
 	u8		flags = 0x00;
 	__be16		*be16ptr;
 	__be64		*be64ptr;
@@ -481,14 +481,14 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
 	if (err)
 		return err;
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 }
 
 static u32
 gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 {
-	s32		now;
+	time64_t	now;
 	u8		*ptr;
 	u8		flags = 0x00;
 	u16		ec, rrc;
@@ -557,7 +557,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 	/* do sequencing checks */
 
 	/* it got through unscathed.  Make sure the context is unexpired */
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 	if (now > kctx->endtime)
 		return GSS_S_CONTEXT_EXPIRED;
 
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 30b7de6..db550bf 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -220,35 +220,6 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
 }
 
 /**
- * gss_mech_list_pseudoflavors - Discover registered GSS pseudoflavors
- * @array_ptr: array to fill in
- * @size: size of "array"
- *
- * Returns the number of array items filled in, or a negative errno.
- *
- * The returned array is not sorted by any policy.  Callers should not
- * rely on the order of the items in the returned array.
- */
-int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr, int size)
-{
-	struct gss_api_mech *pos = NULL;
-	int j, i = 0;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(pos, &registered_mechs, gm_list) {
-		for (j = 0; j < pos->gm_pf_num; j++) {
-			if (i >= size) {
-				spin_unlock(&registered_mechs_lock);
-				return -ENOMEM;
-			}
-			array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
-		}
-	}
-	rcu_read_unlock();
-	return i;
-}
-
-/**
  * gss_svc_to_pseudoflavor - map a GSS service number to a pseudoflavor
  * @gm: GSS mechanism handle
  * @qop: GSS quality-of-protection value
@@ -376,7 +347,7 @@ int
 gss_import_sec_context(const void *input_token, size_t bufsize,
 		       struct gss_api_mech	*mech,
 		       struct gss_ctx		**ctx_id,
-		       time_t			*endtime,
+		       time64_t			*endtime,
 		       gfp_t gfp_mask)
 {
 	if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask)))
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index c62d1f1..65b67b2 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -203,7 +203,7 @@ static int rsi_parse(struct cache_detail *cd,
 	char *ep;
 	int len;
 	struct rsi rsii, *rsip = NULL;
-	time_t expiry;
+	time64_t expiry;
 	int status = -EINVAL;
 
 	memset(&rsii, 0, sizeof(rsii));
@@ -436,7 +436,7 @@ static int rsc_parse(struct cache_detail *cd,
 	int id;
 	int len, rv;
 	struct rsc rsci, *rscp = NULL;
-	time_t expiry;
+	time64_t expiry;
 	int status = -EINVAL;
 	struct gss_api_mech *gm = NULL;
 
@@ -1221,7 +1221,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
 	static atomic64_t ctxhctr;
 	long long ctxh;
 	struct gss_api_mech *gm = NULL;
-	time_t expiry;
+	time64_t expiry;
 	int status = -EINVAL;
 
 	memset(&rsci, 0, sizeof(rsci));
@@ -1248,6 +1248,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
 		dprintk("RPC:       No creds found!\n");
 		goto out;
 	} else {
+		struct timespec64 boot;
 
 		/* steal creds */
 		rsci.cred = ud->creds;
@@ -1268,6 +1269,9 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
 						&expiry, GFP_KERNEL);
 		if (status)
 			goto out;
+
+		getboottime64(&boot);
+		expiry -= boot.tv_sec;
 	}
 
 	rsci.h.expiry_time = expiry;
@@ -1428,10 +1432,10 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
 	return len;
 }
 
-static const struct file_operations use_gss_proxy_ops = {
-	.open = nonseekable_open,
-	.write = write_gssp,
-	.read = read_gssp,
+static const struct proc_ops use_gss_proxy_proc_ops = {
+	.proc_open	= nonseekable_open,
+	.proc_write	= write_gssp,
+	.proc_read	= read_gssp,
 };
 
 static int create_use_gss_proxy_proc_entry(struct net *net)
@@ -1442,7 +1446,7 @@ static int create_use_gss_proxy_proc_entry(struct net *net)
 	sn->use_gss_proxy = -1;
 	*p = proc_create_data("use-gss-proxy", S_IFREG | 0600,
 			      sn->proc_net_rpc,
-			      &use_gss_proxy_ops, net);
+			      &use_gss_proxy_proc_ops, net);
 	if (!*p)
 		return -ENOMEM;
 	init_gssp_clnt(sn);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f740cb5..bd843a8 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -42,7 +42,7 @@ static bool cache_listeners_exist(struct cache_detail *detail);
 
 static void cache_init(struct cache_head *h, struct cache_detail *detail)
 {
-	time_t now = seconds_since_boot();
+	time64_t now = seconds_since_boot();
 	INIT_HLIST_NODE(&h->cache_list);
 	h->flags = 0;
 	kref_init(&h->ref);
@@ -77,6 +77,22 @@ static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
 	return NULL;
 }
 
+static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
+					    struct cache_detail *cd)
+{
+	/* Must be called under cd->hash_lock */
+	hlist_del_init_rcu(&ch->cache_list);
+	set_bit(CACHE_CLEANED, &ch->flags);
+	cd->entries --;
+}
+
+static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
+					  struct cache_detail *cd)
+{
+	cache_fresh_unlocked(ch, cd);
+	cache_put(ch, cd);
+}
+
 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
 						 struct cache_head *key,
 						 int hash)
@@ -100,8 +116,7 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
 	hlist_for_each_entry_rcu(tmp, head, cache_list) {
 		if (detail->match(tmp, key)) {
 			if (cache_is_expired(detail, tmp)) {
-				hlist_del_init_rcu(&tmp->cache_list);
-				detail->entries --;
+				sunrpc_begin_cache_remove_entry(tmp, detail);
 				freeme = tmp;
 				break;
 			}
@@ -117,10 +132,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
 	cache_get(new);
 	spin_unlock(&detail->hash_lock);
 
-	if (freeme) {
-		cache_fresh_unlocked(freeme, detail);
-		cache_put(freeme, detail);
-	}
+	if (freeme)
+		sunrpc_end_cache_remove_entry(freeme, detail);
 	return new;
 }
 
@@ -139,10 +152,10 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
 
 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
 
-static void cache_fresh_locked(struct cache_head *head, time_t expiry,
+static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
 			       struct cache_detail *detail)
 {
-	time_t now = seconds_since_boot();
+	time64_t now = seconds_since_boot();
 	if (now <= detail->flush_time)
 		/* ensure it isn't immediately treated as expired */
 		now = detail->flush_time + 1;
@@ -274,7 +287,7 @@ int cache_check(struct cache_detail *detail,
 		    struct cache_head *h, struct cache_req *rqstp)
 {
 	int rv;
-	long refresh_age, age;
+	time64_t refresh_age, age;
 
 	/* First decide return status as best we can */
 	rv = cache_is_valid(h);
@@ -288,7 +301,7 @@ int cache_check(struct cache_detail *detail,
 			rv = -ENOENT;
 	} else if (rv == -EAGAIN ||
 		   (h->expiry_time != 0 && age > refresh_age/2)) {
-		dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
+		dprintk("RPC:       Want update, refage=%lld, age=%lld\n",
 				refresh_age, age);
 		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
 			switch (cache_make_upcall(detail, h)) {
@@ -454,8 +467,7 @@ static int cache_clean(void)
 			if (!cache_is_expired(current_detail, ch))
 				continue;
 
-			hlist_del_init_rcu(&ch->cache_list);
-			current_detail->entries--;
+			sunrpc_begin_cache_remove_entry(ch, current_detail);
 			rv = 1;
 			break;
 		}
@@ -465,11 +477,8 @@ static int cache_clean(void)
 		if (!ch)
 			current_index ++;
 		spin_unlock(&cache_list_lock);
-		if (ch) {
-			set_bit(CACHE_CLEANED, &ch->flags);
-			cache_fresh_unlocked(ch, d);
-			cache_put(ch, d);
-		}
+		if (ch)
+			sunrpc_end_cache_remove_entry(ch, d);
 	} else
 		spin_unlock(&cache_list_lock);
 
@@ -525,13 +534,9 @@ void cache_purge(struct cache_detail *detail)
 	for (i = 0; i < detail->hash_size; i++) {
 		head = &detail->hash_table[i];
 		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
-			hlist_del_init_rcu(&ch->cache_list);
-			detail->entries--;
-
-			set_bit(CACHE_CLEANED, &ch->flags);
+			sunrpc_begin_cache_remove_entry(ch, detail);
 			spin_unlock(&detail->hash_lock);
-			cache_fresh_unlocked(ch, detail);
-			cache_put(ch, detail);
+			sunrpc_end_cache_remove_entry(ch, detail);
 			spin_lock(&detail->hash_lock);
 		}
 	}
@@ -1404,7 +1409,7 @@ static int c_show(struct seq_file *m, void *p)
 		return cd->cache_show(m, cd, NULL);
 
 	ifdebug(CACHE)
-		seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
+		seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
 			   convert_to_wallclock(cp->expiry_time),
 			   kref_read(&cp->ref), cp->flags);
 	cache_get(cp);
@@ -1477,7 +1482,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
 	char tbuf[22];
 	size_t len;
 
-	len = snprintf(tbuf, sizeof(tbuf), "%lu\n",
+	len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
 			convert_to_wallclock(cd->flush_time));
 	return simple_read_from_buffer(buf, count, ppos, tbuf, len);
 }
@@ -1488,7 +1493,7 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
 {
 	char tbuf[20];
 	char *ep;
-	time_t now;
+	time64_t now;
 
 	if (*ppos || count > sizeof(tbuf)-1)
 		return -EINVAL;
@@ -1571,15 +1576,14 @@ static int cache_release_procfs(struct inode *inode, struct file *filp)
 	return cache_release(inode, filp, cd);
 }
 
-static const struct file_operations cache_file_operations_procfs = {
-	.owner		= THIS_MODULE,
-	.llseek		= no_llseek,
-	.read		= cache_read_procfs,
-	.write		= cache_write_procfs,
-	.poll		= cache_poll_procfs,
-	.unlocked_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
-	.open		= cache_open_procfs,
-	.release	= cache_release_procfs,
+static const struct proc_ops cache_channel_proc_ops = {
+	.proc_lseek	= no_llseek,
+	.proc_read	= cache_read_procfs,
+	.proc_write	= cache_write_procfs,
+	.proc_poll	= cache_poll_procfs,
+	.proc_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
+	.proc_open	= cache_open_procfs,
+	.proc_release	= cache_release_procfs,
 };
 
 static int content_open_procfs(struct inode *inode, struct file *filp)
@@ -1596,11 +1600,11 @@ static int content_release_procfs(struct inode *inode, struct file *filp)
 	return content_release(inode, filp, cd);
 }
 
-static const struct file_operations content_file_operations_procfs = {
-	.open		= content_open_procfs,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= content_release_procfs,
+static const struct proc_ops content_proc_ops = {
+	.proc_open	= content_open_procfs,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= content_release_procfs,
 };
 
 static int open_flush_procfs(struct inode *inode, struct file *filp)
@@ -1634,12 +1638,12 @@ static ssize_t write_flush_procfs(struct file *filp,
 	return write_flush(filp, buf, count, ppos, cd);
 }
 
-static const struct file_operations cache_flush_operations_procfs = {
-	.open		= open_flush_procfs,
-	.read		= read_flush_procfs,
-	.write		= write_flush_procfs,
-	.release	= release_flush_procfs,
-	.llseek		= no_llseek,
+static const struct proc_ops cache_flush_proc_ops = {
+	.proc_open	= open_flush_procfs,
+	.proc_read	= read_flush_procfs,
+	.proc_write	= write_flush_procfs,
+	.proc_release	= release_flush_procfs,
+	.proc_lseek	= no_llseek,
 };
 
 static void remove_cache_proc_entries(struct cache_detail *cd)
@@ -1662,19 +1666,19 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
 		goto out_nomem;
 
 	p = proc_create_data("flush", S_IFREG | 0600,
-			     cd->procfs, &cache_flush_operations_procfs, cd);
+			     cd->procfs, &cache_flush_proc_ops, cd);
 	if (p == NULL)
 		goto out_nomem;
 
 	if (cd->cache_request || cd->cache_parse) {
 		p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
-				     &cache_file_operations_procfs, cd);
+				     &cache_channel_proc_ops, cd);
 		if (p == NULL)
 			goto out_nomem;
 	}
 	if (cd->cache_show) {
 		p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
-				     &content_file_operations_procfs, cd);
+				     &content_proc_ops, cd);
 		if (p == NULL)
 			goto out_nomem;
 	}
@@ -1886,10 +1890,9 @@ void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
 {
 	spin_lock(&cd->hash_lock);
 	if (!hlist_unhashed(&h->cache_list)){
-		hlist_del_init_rcu(&h->cache_list);
-		cd->entries--;
+		sunrpc_begin_cache_remove_entry(h, cd);
 		spin_unlock(&cd->hash_lock);
-		cache_put(h, cd);
+		sunrpc_end_cache_remove_entry(h, cd);
 	} else
 		spin_unlock(&cd->hash_lock);
 }
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index a337976..7324b21 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2130,6 +2130,7 @@ call_connect_status(struct rpc_task *task)
 	case -ENETUNREACH:
 	case -EHOSTUNREACH:
 	case -EPIPE:
+	case -EPROTO:
 		xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
 					    task->tk_rqstp->rq_connect_cookie);
 		if (RPC_IS_SOFTCONN(task))
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 9c79548..55e9002 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -846,6 +846,8 @@ void rpc_signal_task(struct rpc_task *task)
 
 	if (!RPC_IS_ACTIVATED(task))
 		return;
+
+	trace_rpc_task_signalled(task, task->tk_action);
 	set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
 	smp_mb__after_atomic();
 	queue = READ_ONCE(task->tk_waitqueue);
@@ -949,7 +951,7 @@ static void __rpc_execute(struct rpc_task *task)
 			 * clean up after sleeping on some queue, we don't
 			 * break the loop here, but go around once more.
 			 */
-			dprintk("RPC: %5u got signal\n", task->tk_pid);
+			trace_rpc_task_signalled(task, task->tk_action);
 			set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
 			task->tk_rpc_status = -ERESTARTSYS;
 			rpc_exit(task, -ERESTARTSYS);
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 7c74197..c964b48 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -69,12 +69,11 @@ static int rpc_proc_open(struct inode *inode, struct file *file)
 	return single_open(file, rpc_proc_show, PDE_DATA(inode));
 }
 
-static const struct file_operations rpc_proc_fops = {
-	.owner = THIS_MODULE,
-	.open = rpc_proc_open,
-	.read  = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
+static const struct proc_ops rpc_proc_ops = {
+	.proc_open	= rpc_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
 };
 
 /*
@@ -281,19 +280,19 @@ EXPORT_SYMBOL_GPL(rpc_clnt_show_stats);
  */
 static inline struct proc_dir_entry *
 do_register(struct net *net, const char *name, void *data,
-	    const struct file_operations *fops)
+	    const struct proc_ops *proc_ops)
 {
 	struct sunrpc_net *sn;
 
 	dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
 	sn = net_generic(net, sunrpc_net_id);
-	return proc_create_data(name, 0, sn->proc_net_rpc, fops, data);
+	return proc_create_data(name, 0, sn->proc_net_rpc, proc_ops, data);
 }
 
 struct proc_dir_entry *
 rpc_proc_register(struct net *net, struct rpc_stat *statp)
 {
-	return do_register(net, statp->program->name, statp, &rpc_proc_fops);
+	return do_register(net, statp->program->name, statp, &rpc_proc_ops);
 }
 EXPORT_SYMBOL_GPL(rpc_proc_register);
 
@@ -308,9 +307,9 @@ rpc_proc_unregister(struct net *net, const char *name)
 EXPORT_SYMBOL_GPL(rpc_proc_unregister);
 
 struct proc_dir_entry *
-svc_proc_register(struct net *net, struct svc_stat *statp, const struct file_operations *fops)
+svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
 {
-	return do_register(net, statp->program->pg_name, statp, fops);
+	return do_register(net, statp->program->pg_name, statp, proc_ops);
 }
 EXPORT_SYMBOL_GPL(svc_proc_register);
 
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 5c04ba7..04aa80a 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -166,7 +166,7 @@ static void ip_map_request(struct cache_detail *cd,
 }
 
 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
-static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
+static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry);
 
 static int ip_map_parse(struct cache_detail *cd,
 			  char *mesg, int mlen)
@@ -187,7 +187,7 @@ static int ip_map_parse(struct cache_detail *cd,
 
 	struct ip_map *ipmp;
 	struct auth_domain *dom;
-	time_t expiry;
+	time64_t expiry;
 
 	if (mesg[mlen-1] != '\n')
 		return -EINVAL;
@@ -308,7 +308,7 @@ static inline struct ip_map *ip_map_lookup(struct net *net, char *class,
 }
 
 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
-		struct unix_domain *udom, time_t expiry)
+		struct unix_domain *udom, time64_t expiry)
 {
 	struct ip_map ip;
 	struct cache_head *ch;
@@ -328,7 +328,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
 }
 
 static inline int ip_map_update(struct net *net, struct ip_map *ipm,
-		struct unix_domain *udom, time_t expiry)
+		struct unix_domain *udom, time64_t expiry)
 {
 	struct sunrpc_net *sn;
 
@@ -491,7 +491,7 @@ static int unix_gid_parse(struct cache_detail *cd,
 	int rv;
 	int i;
 	int err;
-	time_t expiry;
+	time64_t expiry;
 	struct unix_gid ug, *ugp;
 
 	if (mesg[mlen - 1] != '\n')
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index f3104be..e5497dc 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1079,7 +1079,7 @@ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
 }
 EXPORT_SYMBOL_GPL(xdr_enter_page);
 
-static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
+static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
 
 void
 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 9d02eae3..1a0ae0c 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -194,6 +194,10 @@ static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
 	req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
 	if (!req)
 		return NULL;
+	if (rpcrdma_req_setup(r_xprt, req)) {
+		rpcrdma_req_destroy(req);
+		return NULL;
+	}
 
 	xprt->bc_alloc_count++;
 	rqst = &req->rl_slot;
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 523722b..095be88 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -51,28 +51,6 @@
 #endif
 
 /**
- * frwr_is_supported - Check if device supports FRWR
- * @device: interface adapter to check
- *
- * Returns true if device supports FRWR, otherwise false
- */
-bool frwr_is_supported(struct ib_device *device)
-{
-	struct ib_device_attr *attrs = &device->attrs;
-
-	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
-		goto out_not_supported;
-	if (attrs->max_fast_reg_page_list_len == 0)
-		goto out_not_supported;
-	return true;
-
-out_not_supported:
-	pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
-		device->name);
-	return false;
-}
-
-/**
  * frwr_release_mr - Destroy one MR
  * @mr: MR allocated by frwr_init_mr
  *
@@ -170,26 +148,48 @@ int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
 }
 
 /**
- * frwr_open - Prepare an endpoint for use with FRWR
- * @ia: interface adapter this endpoint will use
- * @ep: endpoint to prepare
+ * frwr_query_device - Prepare a transport for use with FRWR
+ * @r_xprt: controlling transport instance
+ * @device: RDMA device to query
  *
  * On success, sets:
- *	ep->rep_attr.cap.max_send_wr
- *	ep->rep_attr.cap.max_recv_wr
+ *	ep->rep_attr
  *	ep->rep_max_requests
- *	ia->ri_max_segs
+ *	ia->ri_max_rdma_segs
  *
  * And these FRWR-related fields:
  *	ia->ri_max_frwr_depth
  *	ia->ri_mrtype
  *
- * On failure, a negative errno is returned.
+ * Return values:
+ *   On success, returns zero.
+ *   %-EINVAL - the device does not support FRWR memory registration
+ *   %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
  */
-int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
+int frwr_query_device(struct rpcrdma_xprt *r_xprt,
+		      const struct ib_device *device)
 {
-	struct ib_device_attr *attrs = &ia->ri_id->device->attrs;
+	const struct ib_device_attr *attrs = &device->attrs;
+	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
 	int max_qp_wr, depth, delta;
+	unsigned int max_sge;
+
+	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
+	    attrs->max_fast_reg_page_list_len == 0) {
+		pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
+		       device->name);
+		return -EINVAL;
+	}
+
+	max_sge = min_t(unsigned int, attrs->max_send_sge,
+			RPCRDMA_MAX_SEND_SGES);
+	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
+		pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
+		return -ENOMEM;
+	}
+	ep->rep_attr.cap.max_send_sge = max_sge;
+	ep->rep_attr.cap.max_recv_sge = 1;
 
 	ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
 	if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
@@ -199,14 +199,12 @@ int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
 	 * capability, but perform optimally when the MRs are not larger
 	 * than a page.
 	 */
-	if (attrs->max_sge_rd > 1)
+	if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
 		ia->ri_max_frwr_depth = attrs->max_sge_rd;
 	else
 		ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
 	if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
 		ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
-	dprintk("RPC:       %s: max FR page list depth = %u\n",
-		__func__, ia->ri_max_frwr_depth);
 
 	/* Add room for frwr register and invalidate WRs.
 	 * 1. FRWR reg WR for head
@@ -230,7 +228,7 @@ int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
 		} while (delta > 0);
 	}
 
-	max_qp_wr = ia->ri_id->device->attrs.max_qp_wr;
+	max_qp_wr = attrs->max_qp_wr;
 	max_qp_wr -= RPCRDMA_BACKWARD_WRS;
 	max_qp_wr -= 1;
 	if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
@@ -241,7 +239,7 @@ int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
 	if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
 		ep->rep_max_requests = max_qp_wr / depth;
 		if (!ep->rep_max_requests)
-			return -EINVAL;
+			return -ENOMEM;
 		ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
 	}
 	ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
@@ -250,33 +248,25 @@ int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
 	ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
 	ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
 
-	ia->ri_max_segs =
+	ia->ri_max_rdma_segs =
 		DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth);
 	/* Reply chunks require segments for head and tail buffers */
-	ia->ri_max_segs += 2;
-	if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
-		ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
+	ia->ri_max_rdma_segs += 2;
+	if (ia->ri_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
+		ia->ri_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
+
+	/* Ensure the underlying device is capable of conveying the
+	 * largest r/wsize NFS will ask for. This guarantees that
+	 * failing over from one RDMA device to another will not
+	 * break NFS I/O.
+	 */
+	if ((ia->ri_max_rdma_segs * ia->ri_max_frwr_depth) < RPCRDMA_MAX_SEGS)
+		return -ENOMEM;
+
 	return 0;
 }
 
 /**
- * frwr_maxpages - Compute size of largest payload
- * @r_xprt: transport
- *
- * Returns maximum size of an RPC message, in pages.
- *
- * FRWR mode conveys a list of pages per chunk segment. The
- * maximum length of that list is the FRWR page list depth.
- */
-size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
-{
-	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-
-	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
-		     (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
-}
-
-/**
  * frwr_map - Register a memory region
  * @r_xprt: controlling transport
  * @seg: memory region co-ordinates
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index aec3beb..28020ec 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -111,7 +111,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
  */
 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
 {
-	unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
+	unsigned int maxsegs = r_xprt->rx_ia.ri_max_rdma_segs;
 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
 
 	ep->rep_max_inline_send =
@@ -145,7 +145,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
 			remaining -= min_t(unsigned int,
 					   PAGE_SIZE - offset, remaining);
 			offset = 0;
-			if (++count > r_xprt->rx_ia.ri_max_send_sges)
+			if (++count > r_xprt->rx_ep.rep_attr.cap.max_send_sge)
 				return false;
 		}
 	}
@@ -580,22 +580,19 @@ void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
 
 /* Prepare an SGE for the RPC-over-RDMA transport header.
  */
-static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
+static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
 				    struct rpcrdma_req *req, u32 len)
 {
 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
 	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
 	struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
 
-	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
-		return false;
 	sge->addr = rdmab_addr(rb);
 	sge->length = len;
 	sge->lkey = rdmab_lkey(rb);
 
 	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
 				      DMA_TO_DEVICE);
-	return true;
 }
 
 /* The head iovec is straightforward, as it is usually already
@@ -836,10 +833,9 @@ inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
 	req->rl_wr.num_sge = 0;
 	req->rl_wr.opcode = IB_WR_SEND;
 
-	ret = -EIO;
-	if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
-		goto out_unmap;
+	rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
 
+	ret = -EIO;
 	switch (rtype) {
 	case rpcrdma_noch_pullup:
 		if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
@@ -909,7 +905,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
 		goto out_err;
 	*p++ = rqst->rq_xid;
 	*p++ = rpcrdma_version;
-	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
+	*p++ = r_xprt->rx_buf.rb_max_requests;
 
 	/* When the ULP employs a GSS flavor that guarantees integrity
 	 * or privacy, direct data placement of individual data items
@@ -1480,8 +1476,8 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
 
 	if (credits == 0)
 		credits = 1;	/* don't deadlock */
-	else if (credits > buf->rb_max_requests)
-		credits = buf->rb_max_requests;
+	else if (credits > r_xprt->rx_ep.rep_max_requests)
+		credits = r_xprt->rx_ep.rep_max_requests;
 	if (buf->rb_credits != credits)
 		rpcrdma_update_cwnd(r_xprt, credits);
 	rpcrdma_post_recvs(r_xprt, false);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 7395eb2..3cfeba6 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -316,7 +316,8 @@ xprt_setup_rdma(struct xprt_create *args)
 	if (args->addrlen > sizeof(xprt->addr))
 		return ERR_PTR(-EBADF);
 
-	xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 0);
+	xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0,
+			  xprt_rdma_slot_table_entries);
 	if (!xprt)
 		return ERR_PTR(-ENOMEM);
 
@@ -358,19 +359,13 @@ xprt_setup_rdma(struct xprt_create *args)
 	if (rc)
 		goto out3;
 
-	INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
-			  xprt_rdma_connect_worker);
-
-	xprt->max_payload = frwr_maxpages(new_xprt);
-	if (xprt->max_payload == 0)
-		goto out4;
-	xprt->max_payload <<= PAGE_SHIFT;
-	dprintk("RPC:       %s: transport data payload maximum: %zu bytes\n",
-		__func__, xprt->max_payload);
-
 	if (!try_module_get(THIS_MODULE))
 		goto out4;
 
+	INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
+			  xprt_rdma_connect_worker);
+	xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
+
 	dprintk("RPC:       %s: %s:%s\n", __func__,
 		xprt->address_strings[RPC_DISPLAY_ADDR],
 		xprt->address_strings[RPC_DISPLAY_PORT]);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index fda3889..353f61a 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -74,9 +74,13 @@
 /*
  * internal functions
  */
+static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt);
+static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
 				       struct rpcrdma_sendctx *sc);
+static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
+static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
 static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
@@ -174,7 +178,7 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
 	return;
 
 out_flushed:
-	rpcrdma_recv_buffer_put(rep);
+	rpcrdma_rep_destroy(rep);
 }
 
 static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt,
@@ -366,18 +370,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
 		goto out_err;
 	}
 
-	switch (xprt_rdma_memreg_strategy) {
-	case RPCRDMA_FRWR:
-		if (frwr_is_supported(ia->ri_id->device))
-			break;
-		/*FALLTHROUGH*/
-	default:
-		pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
-		       ia->ri_id->device->name, xprt_rdma_memreg_strategy);
-		rc = -EINVAL;
-		goto out_err;
-	}
-
 	return 0;
 
 out_err:
@@ -391,6 +383,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
  *
  * Divest transport H/W resources associated with this adapter,
  * but allow it to be restored later.
+ *
+ * Caller must hold the transport send lock.
  */
 void
 rpcrdma_ia_remove(struct rpcrdma_ia *ia)
@@ -398,8 +392,6 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
 	struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
 						   rx_ia);
 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
-	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-	struct rpcrdma_req *req;
 
 	/* This is similar to rpcrdma_ep_destroy, but:
 	 * - Don't cancel the connect worker.
@@ -422,12 +414,9 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
 	 * mappings and MRs are gone.
 	 */
 	rpcrdma_reps_unmap(r_xprt);
-	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
-		rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
-		rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
-		rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
-	}
+	rpcrdma_reqs_reset(r_xprt);
 	rpcrdma_mrs_destroy(r_xprt);
+	rpcrdma_sendctxs_destroy(r_xprt);
 	ib_dealloc_pd(ia->ri_pd);
 	ia->ri_pd = NULL;
 
@@ -470,30 +459,20 @@ int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 	struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
 	struct ib_cq *sendcq, *recvcq;
-	unsigned int max_sge;
 	int rc;
 
-	ep->rep_max_requests = xprt_rdma_slot_table_entries;
+	ep->rep_max_requests = r_xprt->rx_xprt.max_reqs;
 	ep->rep_inline_send = xprt_rdma_max_inline_write;
 	ep->rep_inline_recv = xprt_rdma_max_inline_read;
 
-	max_sge = min_t(unsigned int, ia->ri_id->device->attrs.max_send_sge,
-			RPCRDMA_MAX_SEND_SGES);
-	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
-		pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
-		return -ENOMEM;
-	}
-	ia->ri_max_send_sges = max_sge;
-
-	rc = frwr_open(ia, ep);
+	rc = frwr_query_device(r_xprt, ia->ri_id->device);
 	if (rc)
 		return rc;
+	r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->rep_max_requests);
 
 	ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
 	ep->rep_attr.qp_context = ep;
 	ep->rep_attr.srq = NULL;
-	ep->rep_attr.cap.max_send_sge = max_sge;
-	ep->rep_attr.cap.max_recv_sge = 1;
 	ep->rep_attr.cap.max_inline_data = 0;
 	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 	ep->rep_attr.qp_type = IB_QPT_RC;
@@ -716,6 +695,10 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 	rpcrdma_reset_cwnd(r_xprt);
 	rpcrdma_post_recvs(r_xprt, true);
 
+	rc = rpcrdma_sendctxs_create(r_xprt);
+	if (rc)
+		goto out;
+
 	rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
 	if (rc)
 		goto out;
@@ -730,6 +713,11 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 		goto out;
 	}
 
+	rc = rpcrdma_reqs_setup(r_xprt);
+	if (rc) {
+		rpcrdma_ep_disconnect(ep, ia);
+		goto out;
+	}
 	rpcrdma_mrs_create(r_xprt);
 
 out:
@@ -768,6 +756,7 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 	rpcrdma_xprt_drain(r_xprt);
 	rpcrdma_reqs_reset(r_xprt);
 	rpcrdma_mrs_destroy(r_xprt);
+	rpcrdma_sendctxs_destroy(r_xprt);
 }
 
 /* Fixed-size circular FIFO queue. This implementation is wait-free and
@@ -787,20 +776,24 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
  * queue activity, and rpcrdma_xprt_drain has flushed all remaining
  * Send requests.
  */
-static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
+static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt)
 {
+	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
 	unsigned long i;
 
+	if (!buf->rb_sc_ctxs)
+		return;
 	for (i = 0; i <= buf->rb_sc_last; i++)
 		kfree(buf->rb_sc_ctxs[i]);
 	kfree(buf->rb_sc_ctxs);
+	buf->rb_sc_ctxs = NULL;
 }
 
-static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
+static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
 {
 	struct rpcrdma_sendctx *sc;
 
-	sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges),
+	sc = kzalloc(struct_size(sc, sc_sges, ep->rep_attr.cap.max_send_sge),
 		     GFP_KERNEL);
 	if (!sc)
 		return NULL;
@@ -820,21 +813,22 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
 	 * the ->send_request call to fail temporarily before too many
 	 * Sends are posted.
 	 */
-	i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
-	dprintk("RPC:       %s: allocating %lu send_ctxs\n", __func__, i);
+	i = r_xprt->rx_ep.rep_max_requests + RPCRDMA_MAX_BC_REQUESTS;
 	buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
 	if (!buf->rb_sc_ctxs)
 		return -ENOMEM;
 
 	buf->rb_sc_last = i - 1;
 	for (i = 0; i <= buf->rb_sc_last; i++) {
-		sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
+		sc = rpcrdma_sendctx_create(&r_xprt->rx_ep);
 		if (!sc)
 			return -ENOMEM;
 
 		buf->rb_sc_ctxs[i] = sc;
 	}
 
+	buf->rb_sc_head = 0;
+	buf->rb_sc_tail = 0;
 	return 0;
 }
 
@@ -933,7 +927,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 	unsigned int count;
 
-	for (count = 0; count < ia->ri_max_segs; count++) {
+	for (count = 0; count < ia->ri_max_rdma_segs; count++) {
 		struct rpcrdma_mr *mr;
 		int rc;
 
@@ -1005,32 +999,19 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
 				       gfp_t flags)
 {
 	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
-	struct rpcrdma_regbuf *rb;
 	struct rpcrdma_req *req;
-	size_t maxhdrsize;
 
 	req = kzalloc(sizeof(*req), flags);
 	if (req == NULL)
 		goto out1;
 
-	/* Compute maximum header buffer size in bytes */
-	maxhdrsize = rpcrdma_fixed_maxsz + 3 +
-		     r_xprt->rx_ia.ri_max_segs * rpcrdma_readchunk_maxsz;
-	maxhdrsize *= sizeof(__be32);
-	rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
-				  DMA_TO_DEVICE, flags);
-	if (!rb)
-		goto out2;
-	req->rl_rdmabuf = rb;
-	xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
-
 	req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
 	if (!req->rl_sendbuf)
-		goto out3;
+		goto out2;
 
 	req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
 	if (!req->rl_recvbuf)
-		goto out4;
+		goto out3;
 
 	INIT_LIST_HEAD(&req->rl_free_mrs);
 	INIT_LIST_HEAD(&req->rl_registered);
@@ -1039,10 +1020,8 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
 	spin_unlock(&buffer->rb_lock);
 	return req;
 
-out4:
-	kfree(req->rl_sendbuf);
 out3:
-	kfree(req->rl_rdmabuf);
+	kfree(req->rl_sendbuf);
 out2:
 	kfree(req);
 out1:
@@ -1050,10 +1029,71 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
 }
 
 /**
- * rpcrdma_reqs_reset - Reset all reqs owned by a transport
+ * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
  * @r_xprt: controlling transport instance
+ * @req: rpcrdma_req object to set up
  *
- * ASSUMPTION: the rb_allreqs list is stable for the duration,
+ * Returns zero on success, and a negative errno on failure.
+ */
+int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+{
+	struct rpcrdma_regbuf *rb;
+	size_t maxhdrsize;
+
+	/* Compute maximum header buffer size in bytes */
+	maxhdrsize = rpcrdma_fixed_maxsz + 3 +
+		     r_xprt->rx_ia.ri_max_rdma_segs * rpcrdma_readchunk_maxsz;
+	maxhdrsize *= sizeof(__be32);
+	rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
+				  DMA_TO_DEVICE, GFP_KERNEL);
+	if (!rb)
+		goto out;
+
+	if (!__rpcrdma_regbuf_dma_map(r_xprt, rb))
+		goto out_free;
+
+	req->rl_rdmabuf = rb;
+	xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
+	return 0;
+
+out_free:
+	rpcrdma_regbuf_free(rb);
+out:
+	return -ENOMEM;
+}
+
+/* ASSUMPTION: the rb_allreqs list is stable for the duration,
+ * and thus can be walked without holding rb_lock. Eg. the
+ * caller is holding the transport send lock to exclude
+ * device removal or disconnection.
+ */
+static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
+{
+	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+	struct rpcrdma_req *req;
+	int rc;
+
+	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
+		rc = rpcrdma_req_setup(r_xprt, req);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static void rpcrdma_req_reset(struct rpcrdma_req *req)
+{
+	/* Credits are valid for only one connection */
+	req->rl_slot.rq_cong = 0;
+
+	rpcrdma_regbuf_free(req->rl_rdmabuf);
+	req->rl_rdmabuf = NULL;
+
+	rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
+	rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
+}
+
+/* ASSUMPTION: the rb_allreqs list is stable for the duration,
  * and thus can be walked without holding rb_lock. Eg. the
  * caller is holding the transport send lock to exclude
  * device removal or disconnection.
@@ -1063,14 +1103,16 @@ static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
 	struct rpcrdma_req *req;
 
-	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
-		/* Credits are valid only for one connection */
-		req->rl_slot.rq_cong = 0;
-	}
+	list_for_each_entry(req, &buf->rb_allreqs, rl_all)
+		rpcrdma_req_reset(req);
 }
 
-static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
-					      bool temp)
+/* No locking needed here. This function is called only by the
+ * Receive completion handler.
+ */
+static noinline
+struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+				       bool temp)
 {
 	struct rpcrdma_rep *rep;
 
@@ -1083,6 +1125,9 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
 	if (!rep->rr_rdmabuf)
 		goto out_free;
 
+	if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
+		goto out_free_regbuf;
+
 	xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
 		     rdmab_length(rep->rr_rdmabuf));
 	rep->rr_cqe.done = rpcrdma_wc_receive;
@@ -1095,12 +1140,17 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
 	list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
 	return rep;
 
+out_free_regbuf:
+	rpcrdma_regbuf_free(rep->rr_rdmabuf);
 out_free:
 	kfree(rep);
 out:
 	return NULL;
 }
 
+/* No locking needed here. This function is invoked only by the
+ * Receive completion handler, or during transport shutdown.
+ */
 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
 {
 	list_del(&rep->rr_all);
@@ -1130,8 +1180,10 @@ static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
 	struct rpcrdma_rep *rep;
 
-	list_for_each_entry(rep, &buf->rb_all_reps, rr_all)
+	list_for_each_entry(rep, &buf->rb_all_reps, rr_all) {
 		rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
+		rep->rr_temp = true;
+	}
 }
 
 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
@@ -1153,7 +1205,6 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
 	int i, rc;
 
-	buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests;
 	buf->rb_bc_srv_max_requests = 0;
 	spin_lock_init(&buf->rb_lock);
 	INIT_LIST_HEAD(&buf->rb_mrs);
@@ -1165,7 +1216,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
 	INIT_LIST_HEAD(&buf->rb_all_reps);
 
 	rc = -ENOMEM;
-	for (i = 0; i < buf->rb_max_requests; i++) {
+	for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
 		struct rpcrdma_req *req;
 
 		req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
@@ -1177,10 +1228,6 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
 
 	init_llist_head(&buf->rb_free_reps);
 
-	rc = rpcrdma_sendctxs_create(r_xprt);
-	if (rc)
-		goto out;
-
 	return 0;
 out:
 	rpcrdma_buffer_destroy(buf);
@@ -1256,7 +1303,6 @@ static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt)
 void
 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
 {
-	rpcrdma_sendctxs_destroy(buf);
 	rpcrdma_reps_destroy(buf);
 
 	while (!list_empty(&buf->rb_send_bufs)) {
@@ -1497,7 +1543,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
 {
 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
-	struct ib_recv_wr *i, *wr, *bad_wr;
+	struct ib_recv_wr *wr, *bad_wr;
 	struct rpcrdma_rep *rep;
 	int needed, count, rc;
 
@@ -1524,23 +1570,15 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
 		if (!rep)
 			break;
 
+		trace_xprtrdma_post_recv(rep);
 		rep->rr_recv_wr.next = wr;
 		wr = &rep->rr_recv_wr;
 		--needed;
+		++count;
 	}
 	if (!wr)
 		goto out;
 
-	for (i = wr; i; i = i->next) {
-		rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
-
-		if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
-			goto release_wrs;
-
-		trace_xprtrdma_post_recv(rep);
-		++count;
-	}
-
 	rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
 			  (const struct ib_recv_wr **)&bad_wr);
 out:
@@ -1557,11 +1595,4 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
 	}
 	ep->rep_receive_count += count;
 	return;
-
-release_wrs:
-	for (i = wr; i;) {
-		rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
-		i = i->next;
-		rpcrdma_recv_buffer_put(rep);
-	}
 }
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index d796d68..37d5080 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -71,9 +71,8 @@ struct rpcrdma_ia {
 	struct rdma_cm_id 	*ri_id;
 	struct ib_pd		*ri_pd;
 	int			ri_async_rc;
-	unsigned int		ri_max_segs;
+	unsigned int		ri_max_rdma_segs;
 	unsigned int		ri_max_frwr_depth;
-	unsigned int		ri_max_send_sges;
 	bool			ri_implicit_roundup;
 	enum ib_mr_type		ri_mrtype;
 	unsigned long		ri_flags;
@@ -99,7 +98,7 @@ struct rpcrdma_ep {
 	wait_queue_head_t 	rep_connect_wait;
 	struct rpcrdma_connect_private	rep_cm_private;
 	struct rdma_conn_param	rep_remote_cma;
-	unsigned int		rep_max_requests;	/* set by /proc */
+	unsigned int		rep_max_requests;	/* depends on device */
 	unsigned int		rep_inline_send;	/* negotiated */
 	unsigned int		rep_inline_recv;	/* negotiated */
 	int			rep_receive_count;
@@ -373,7 +372,7 @@ struct rpcrdma_buffer {
 
 	struct llist_head	rb_free_reps;
 
-	u32			rb_max_requests;
+	__be32			rb_max_requests;
 	u32			rb_credits;	/* most recent credit grant */
 
 	u32			rb_bc_srv_max_requests;
@@ -479,6 +478,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
  */
 struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
 				       gfp_t flags);
+int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
 void rpcrdma_req_destroy(struct rpcrdma_req *req);
 int rpcrdma_buffer_create(struct rpcrdma_xprt *);
 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
@@ -535,12 +535,11 @@ rpcrdma_data_dir(bool writing)
 
 /* Memory registration calls xprtrdma/frwr_ops.c
  */
-bool frwr_is_supported(struct ib_device *device);
 void frwr_reset(struct rpcrdma_req *req);
-int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
+int frwr_query_device(struct rpcrdma_xprt *r_xprt,
+		      const struct ib_device *device);
 int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
 void frwr_release_mr(struct rpcrdma_mr *mr);
-size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt);
 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
 				struct rpcrdma_mr_seg *seg,
 				int nsegs, bool writing, __be32 xid,
@@ -583,7 +582,6 @@ static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
 
 /* RPC/RDMA module init - xprtrdma/transport.c
  */
-extern unsigned int xprt_rdma_slot_table_entries;
 extern unsigned int xprt_rdma_max_inline_read;
 extern unsigned int xprt_rdma_max_inline_write;
 void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 321af97..62c12cb 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -189,11 +189,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 }
 
-static inline int unix_recvq_full(struct sock const *sk)
+static inline int unix_recvq_full(const struct sock *sk)
 {
 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 }
 
+static inline int unix_recvq_full_lockless(const struct sock *sk)
+{
+	return skb_queue_len_lockless(&sk->sk_receive_queue) >
+		READ_ONCE(sk->sk_max_ack_backlog);
+}
+
 struct sock *unix_peer_get(struct sock *s)
 {
 	struct sock *peer;
@@ -1758,7 +1764,8 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
 	 */
 	if (other != sk &&
-	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+	    unlikely(unix_peer(other) != sk &&
+	    unix_recvq_full_lockless(other))) {
 		if (timeo) {
 			timeo = unix_wait_for_peer(other, timeo);
 
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index f93e917..fa7bb5e 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -212,7 +212,7 @@ static int xdp_umem_map_pages(struct xdp_umem *umem)
 
 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
 {
-	put_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
+	unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
 
 	kfree(umem->pgs);
 	umem->pgs = NULL;
@@ -291,7 +291,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
 		return -ENOMEM;
 
 	down_read(&current->mm->mmap_sem);
-	npgs = get_user_pages(umem->address, umem->npgs,
+	npgs = pin_user_pages(umem->address, umem->npgs,
 			      gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
 	up_read(&current->mm->mmap_sem);
 
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 297d1eb..dbda08e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3189,7 +3189,7 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
 					    flags | XFRM_LOOKUP_QUEUE |
 					    XFRM_LOOKUP_KEEP_DST_REF);
 
-	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
+	if (PTR_ERR(dst) == -EREMOTE)
 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
 
 	if (IS_ERR(dst))
diff --git a/samples/Makefile b/samples/Makefile
index 5ce50ef..f8f847b4 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for Linux samples code
+OBJECT_FILES_NON_STANDARD := y
 
 obj-$(CONFIG_SAMPLE_ANDROID_BINDERFS)	+= binderfs/
 obj-$(CONFIG_SAMPLE_CONFIGFS)		+= configfs/
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 0b5acd7..c91e913 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -83,7 +83,6 @@ static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
 static u32 opt_umem_flags;
 static int opt_unaligned_chunks;
 static int opt_mmap_flags;
-static u32 opt_xdp_bind_flags;
 static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
 static int opt_timeout = 1000;
 static bool opt_need_wakeup = true;
@@ -789,7 +788,8 @@ static void kick_tx(struct xsk_socket_info *xsk)
 	int ret;
 
 	ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
-	if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
+	if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN ||
+	    errno == EBUSY || errno == ENETDOWN)
 		return;
 	exit_with_error(errno);
 }
diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
index 9ca3e44..c406f03 100644
--- a/samples/kfifo/bytestream-example.c
+++ b/samples/kfifo/bytestream-example.c
@@ -142,11 +142,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
 	return ret ? ret : copied;
 }
 
-static const struct file_operations fifo_fops = {
-	.owner		= THIS_MODULE,
-	.read		= fifo_read,
-	.write		= fifo_write,
-	.llseek		= noop_llseek,
+static const struct proc_ops fifo_proc_ops = {
+	.proc_read	= fifo_read,
+	.proc_write	= fifo_write,
+	.proc_lseek	= noop_llseek,
 };
 
 static int __init example_init(void)
@@ -169,7 +168,7 @@ static int __init example_init(void)
 		return -EIO;
 	}
 
-	if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
+	if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
 #ifdef DYNAMIC
 		kfifo_free(&test);
 #endif
diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
index 6cdeb72..78977fc 100644
--- a/samples/kfifo/inttype-example.c
+++ b/samples/kfifo/inttype-example.c
@@ -135,11 +135,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
 	return ret ? ret : copied;
 }
 
-static const struct file_operations fifo_fops = {
-	.owner		= THIS_MODULE,
-	.read		= fifo_read,
-	.write		= fifo_write,
-	.llseek		= noop_llseek,
+static const struct proc_ops fifo_proc_ops = {
+	.proc_read	= fifo_read,
+	.proc_write	= fifo_write,
+	.proc_lseek	= noop_llseek,
 };
 
 static int __init example_init(void)
@@ -160,7 +159,7 @@ static int __init example_init(void)
 		return -EIO;
 	}
 
-	if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
+	if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
 #ifdef DYNAMIC
 		kfifo_free(&test);
 #endif
diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
index 79ae8bb..c507998 100644
--- a/samples/kfifo/record-example.c
+++ b/samples/kfifo/record-example.c
@@ -149,11 +149,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
 	return ret ? ret : copied;
 }
 
-static const struct file_operations fifo_fops = {
-	.owner		= THIS_MODULE,
-	.read		= fifo_read,
-	.write		= fifo_write,
-	.llseek		= noop_llseek,
+static const struct proc_ops fifo_proc_ops = {
+	.proc_read	= fifo_read,
+	.proc_write	= fifo_write,
+	.proc_lseek	= noop_llseek,
 };
 
 static int __init example_init(void)
@@ -176,7 +175,7 @@ static int __init example_init(void)
 		return -EIO;
 	}
 
-	if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
+	if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
 #ifdef DYNAMIC
 		kfifo_free(&test);
 #endif
diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c
index ac5c8c1..3cc5e59 100644
--- a/samples/vfio-mdev/mbochs.c
+++ b/samples/vfio-mdev/mbochs.c
@@ -891,26 +891,10 @@ static void mbochs_release_dmabuf(struct dma_buf *buf)
 	mutex_unlock(&mdev_state->ops_lock);
 }
 
-static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
-{
-	struct mbochs_dmabuf *dmabuf = buf->priv;
-	struct page *page = dmabuf->pages[page_num];
-
-	return kmap(page);
-}
-
-static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num,
-				 void *vaddr)
-{
-	kunmap(vaddr);
-}
-
 static struct dma_buf_ops mbochs_dmabuf_ops = {
 	.map_dma_buf	  = mbochs_map_dmabuf,
 	.unmap_dma_buf	  = mbochs_unmap_dmabuf,
 	.release	  = mbochs_release_dmabuf,
-	.map		  = mbochs_kmap_dmabuf,
-	.unmap		  = mbochs_kunmap_dmabuf,
 	.mmap		  = mbochs_mmap_dmabuf,
 };
 
diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
index 2719bb2..21dbf63 100644
--- a/samples/vfio-mdev/mdpy-fb.c
+++ b/samples/vfio-mdev/mdpy-fb.c
@@ -86,7 +86,7 @@ static void mdpy_fb_destroy(struct fb_info *info)
 		iounmap(info->screen_base);
 }
 
-static struct fb_ops mdpy_fb_ops = {
+static const struct fb_ops mdpy_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_destroy	= mdpy_fb_destroy,
 	.fb_setcolreg	= mdpy_fb_setcolreg,
diff --git a/scripts/.gitignore b/scripts/.gitignore
index 306054e..ef45f96 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -2,7 +2,6 @@
 # Generated files
 #
 bin2c
-conmakehash
 kallsyms
 unifdef
 recordmcount
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index bc5f257..6cabf20 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -33,6 +33,10 @@
 escsq = $(subst $(squote),'\$(squote)',$1)
 
 ###
+# Quote a string to pass it to C files. foo => '"foo"'
+stringify = $(squote)$(quote)$1$(quote)$(squote)
+
+###
 # Easy method for doing a status message
        kecho := :
  quiet_kecho := echo
@@ -55,14 +59,13 @@
 # - stdin is piped in from the first prerequisite ($<) so one has
 #   to specify a valid file as first prerequisite (often the kbuild file)
 define filechk
-	$(Q)set -e;				\
-	mkdir -p $(dir $@);			\
-	{ $(filechk_$(1)); } > $@.tmp;		\
-	if [ -r $@ ] && cmp -s $@ $@.tmp; then	\
-		rm -f $@.tmp;			\
-	else					\
-		$(kecho) '  UPD     $@';	\
-		mv -f $@.tmp $@;		\
+	$(Q)set -e;						\
+	mkdir -p $(dir $@);					\
+	trap "rm -f $(dot-target).tmp" EXIT;			\
+	{ $(filechk_$(1)); } > $(dot-target).tmp;		\
+	if [ ! -r $@ ] || ! cmp -s $@ $(dot-target).tmp; then	\
+		$(kecho) '  UPD     $@';			\
+		mv -f $(dot-target).tmp $@;			\
 	fi
 endef
 
@@ -160,12 +163,6 @@
 build := -f $(srctree)/scripts/Makefile.build obj
 
 ###
-# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.modbuiltin obj=
-# Usage:
-# $(Q)$(MAKE) $(modbuiltin)=dir
-modbuiltin := -f $(srctree)/scripts/Makefile.modbuiltin obj
-
-###
 # Shorthand for $(Q)$(MAKE) -f scripts/Makefile.dtbinst obj=
 # Usage:
 # $(Q)$(MAKE) $(dtbinst)=dir
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 9d07e59..85334dc 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -25,7 +25,7 @@
 
 # $(cc-option,<flag>)
 # Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
+cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /dev/null)
 
 # $(ld-option,<flag>)
 # Return y if the linker supports <flag>, n otherwise
diff --git a/scripts/Makefile b/scripts/Makefile
index b0e9626..4d41f48 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -4,14 +4,11 @@
 # the kernel for the build process.
 # ---------------------------------------------------------------------------
 # kallsyms:      Find all symbols in vmlinux
-# conmakehash:   Create chartable
-# conmakehash:	 Create arrays for initializing the kernel console tables
 
 HOST_EXTRACFLAGS += -I$(srctree)/tools/include
 
 hostprogs-$(CONFIG_BUILD_BIN2C)  += bin2c
 hostprogs-$(CONFIG_KALLSYMS)     += kallsyms
-hostprogs-$(CONFIG_VT)           += conmakehash
 hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
 hostprogs-$(CONFIG_BUILDTIME_TABLE_SORT) += sorttable
 hostprogs-$(CONFIG_ASN1)	 += asn1_compiler
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index b734ac8..a562d69 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -66,7 +66,7 @@
 real-obj-y += $(obj)/lib-ksyms.o
 endif
 
-ifneq ($(strip $(real-obj-y) $(need-builtin)),)
+ifdef need-builtin
 builtin-target := $(obj)/built-in.a
 endif
 
@@ -372,7 +372,7 @@
 # ---------------------------------------------------------------------------
 
 # To build objects in subdirs, we need to descend into the directories
-$(sort $(subdir-obj-y)): $(subdir-ym) ;
+$(obj)/%/built-in.a: $(obj)/% ;
 
 #
 # Rule to compile a set of .o files into one .a file (without symbol table)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 3fa32f8..d10f7a0 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -35,7 +35,11 @@
 subdir-y	+= $(__subdir-y)
 __subdir-m	:= $(patsubst %/,%,$(filter %/, $(obj-m)))
 subdir-m	+= $(__subdir-m)
+ifdef need-builtin
 obj-y		:= $(patsubst %/, %/built-in.a, $(obj-y))
+else
+obj-y		:= $(filter-out %/, $(obj-y))
+endif
 obj-m		:= $(filter-out %/, $(obj-m))
 
 # Subdirectories we need to descend into
@@ -80,12 +84,14 @@
 subdir-ym	:= $(addprefix $(obj)/,$(subdir-ym))
 
 # Finds the multi-part object the current object will be linked into.
-# If the object belongs to two or more multi-part objects, all of them are
-# concatenated with a colon separator.
-modname-multi = $(subst $(space),:,$(sort $(foreach m,$(multi-used),\
-		$(if $(filter $*.o, $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$(m:.o=)))))
+# If the object belongs to two or more multi-part objects, list them all.
+modname-multi = $(sort $(foreach m,$(multi-used),\
+		$(if $(filter $*.o, $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$(m:.o=))))
 
-modname = $(if $(modname-multi),$(modname-multi),$(basetarget))
+__modname = $(if $(modname-multi),$(modname-multi),$(basetarget))
+
+modname = $(subst $(space),:,$(__modname))
+modfile = $(addprefix $(obj)/,$(__modname))
 
 # target with $(obj)/ and its suffix stripped
 target-stem = $(basename $(patsubst $(obj)/%,%,$@))
@@ -93,9 +99,10 @@
 # These flags are needed for modversions and compiling, so we define them here
 # $(modname_flags) defines KBUILD_MODNAME as the name of the module it will
 # end up in (or would, if it gets compiled in)
-name-fix = $(squote)$(quote)$(subst $(comma),_,$(subst -,_,$1))$(quote)$(squote)
+name-fix = $(call stringify,$(subst $(comma),_,$(subst -,_,$1)))
 basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget))
 modname_flags  = -DKBUILD_MODNAME=$(call name-fix,$(modname))
+modfile_flags  = -DKBUILD_MODFILE=$(call stringify,$(modfile))
 
 orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) \
                  $(ccflags-y) $(CFLAGS_$(target-stem).o)
@@ -154,7 +161,7 @@
 modkern_cflags =                                          \
 	$(if $(part-of-module),                           \
 		$(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE), \
-		$(KBUILD_CFLAGS_KERNEL) $(CFLAGS_KERNEL))
+		$(KBUILD_CFLAGS_KERNEL) $(CFLAGS_KERNEL) $(modfile_flags))
 
 modkern_aflags = $(if $(part-of-module),				\
 			$(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE),	\
diff --git a/scripts/Makefile.modbuiltin b/scripts/Makefile.modbuiltin
deleted file mode 100644
index 7d4711b..0000000
--- a/scripts/Makefile.modbuiltin
+++ /dev/null
@@ -1,57 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# ==========================================================================
-# Generating modules.builtin
-# ==========================================================================
-
-src := $(obj)
-
-PHONY := __modbuiltin
-__modbuiltin:
-
-include include/config/auto.conf
-# tristate.conf sets tristate variables to uppercase 'Y' or 'M'
-# That way, we get the list of built-in modules in obj-Y
-include include/config/tristate.conf
-
-include scripts/Kbuild.include
-
-ifdef building_out_of_srctree
-# Create output directory if not already present
-_dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
-endif
-
-# The filename Kbuild has precedence over Makefile
-kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-kbuild-file := $(if $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Kbuild,$(kbuild-dir)/Makefile)
-include $(kbuild-file)
-
-include scripts/Makefile.lib
-__subdir-Y     := $(patsubst %/,%,$(filter %/, $(obj-Y)))
-subdir-Y       += $(__subdir-Y)
-subdir-ym      := $(sort $(subdir-y) $(subdir-Y) $(subdir-m))
-subdir-ym      := $(addprefix $(obj)/,$(subdir-ym))
-obj-Y          := $(addprefix $(obj)/,$(obj-Y))
-
-modbuiltin-subdirs := $(patsubst %,%/modules.builtin, $(subdir-ym))
-modbuiltin-mods    := $(filter %.ko, $(obj-Y:.o=.ko))
-modbuiltin-target  := $(obj)/modules.builtin
-
-__modbuiltin: $(modbuiltin-target) $(subdir-ym)
-	@:
-
-$(modbuiltin-target): $(subdir-ym) FORCE
-	$(Q)(for m in $(modbuiltin-mods); do echo $$m; done;	\
-	cat /dev/null $(modbuiltin-subdirs)) > $@
-
-PHONY += FORCE
-
-FORCE:
-
-# Descending
-# ---------------------------------------------------------------------------
-
-PHONY += $(subdir-ym)
-$(subdir-ym):
-	$(Q)$(MAKE) $(modbuiltin)=$@
-
-.PHONY: $(PHONY)
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 69897d5..b4d3f2d 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -90,7 +90,6 @@
       cmd_modpost = sed 's/ko$$/o/' $(MODORDER) | $(MODPOST)
 
 __modpost:
-	@$(kecho) '  Building modules, stage 2.'
 	$(call cmd,modpost)
 ifneq ($(KBUILD_MODPOST_NOFINAL),1)
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modfinal
diff --git a/scripts/coccinelle/free/devm_free.cocci b/scripts/coccinelle/free/devm_free.cocci
index 9330d42..3357bf4 100644
--- a/scripts/coccinelle/free/devm_free.cocci
+++ b/scripts/coccinelle/free/devm_free.cocci
@@ -91,8 +91,6 @@
 |
  kzfree@p(x)
 |
- __krealloc@p(x, ...)
-|
  krealloc@p(x, ...)
 |
  free_pages@p(x, ...)
@@ -116,8 +114,6 @@
 |
 * kzfree@p(x)
 |
-* __krealloc@p(x, ...)
-|
 * krealloc@p(x, ...)
 |
 * free_pages@p(x, ...)
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index d7986ee1..756f0fa 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -691,6 +691,11 @@ static void check_alias_paths(struct check *c, struct dt_info *dti,
 		return;
 
 	for_each_property(node, prop) {
+		if (streq(prop->name, "phandle")
+		    || streq(prop->name, "linux,phandle")) {
+			continue;
+		}
+
 		if (!prop->val.val || !get_node_by_path(dti->dt, prop->val.val)) {
 			FAIL_PROP(c, dti, node, prop, "aliases property is not a valid node (%s)",
 				  prop->val.val);
diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y
index 2ed4dc1..40dcf4f 100644
--- a/scripts/dtc/dtc-parser.y
+++ b/scripts/dtc/dtc-parser.y
@@ -2,6 +2,8 @@
 /*
  * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation.  2005.
  */
+%locations
+
 %{
 #include <stdio.h>
 #include <inttypes.h>
@@ -17,6 +19,8 @@
 		treesource_error = true; \
 	} while (0)
 
+#define YYERROR_CALL(msg) yyerror(msg)
+
 extern struct dt_info *parser_output;
 extern bool treesource_error;
 %}
diff --git a/scripts/dtc/fstree.c b/scripts/dtc/fstree.c
index 9871689..5e59594a 100644
--- a/scripts/dtc/fstree.c
+++ b/scripts/dtc/fstree.c
@@ -30,7 +30,7 @@ static struct node *read_fstree(const char *dirname)
 
 		tmpname = join_path(dirname, de->d_name);
 
-		if (lstat(tmpname, &st) < 0)
+		if (stat(tmpname, &st) < 0)
 			die("stat(%s): %s\n", tmpname, strerror(errno));
 
 		if (S_ISREG(st.st_mode)) {
diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c
index 179168e..d6ce7c0 100644
--- a/scripts/dtc/libfdt/fdt.c
+++ b/scripts/dtc/libfdt/fdt.c
@@ -15,8 +15,10 @@
  * that the given buffer contains what appears to be a flattened
  * device tree with sane information in its header.
  */
-int fdt_ro_probe_(const void *fdt)
+int32_t fdt_ro_probe_(const void *fdt)
 {
+	uint32_t totalsize = fdt_totalsize(fdt);
+
 	if (fdt_magic(fdt) == FDT_MAGIC) {
 		/* Complete tree */
 		if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
@@ -31,7 +33,10 @@ int fdt_ro_probe_(const void *fdt)
 		return -FDT_ERR_BADMAGIC;
 	}
 
-	return 0;
+	if (totalsize < INT32_MAX)
+		return totalsize;
+	else
+		return -FDT_ERR_TRUNCATED;
 }
 
 static int check_off_(uint32_t hdrsize, uint32_t totalsize, uint32_t off)
diff --git a/scripts/dtc/libfdt/fdt_addresses.c b/scripts/dtc/libfdt/fdt_addresses.c
index d8ba8ec..9a82cd0 100644
--- a/scripts/dtc/libfdt/fdt_addresses.c
+++ b/scripts/dtc/libfdt/fdt_addresses.c
@@ -14,7 +14,7 @@
 static int fdt_cells(const void *fdt, int nodeoffset, const char *name)
 {
 	const fdt32_t *c;
-	int val;
+	uint32_t val;
 	int len;
 
 	c = fdt_getprop(fdt, nodeoffset, name, &len);
@@ -25,10 +25,10 @@ static int fdt_cells(const void *fdt, int nodeoffset, const char *name)
 		return -FDT_ERR_BADNCELLS;
 
 	val = fdt32_to_cpu(*c);
-	if ((val <= 0) || (val > FDT_MAX_NCELLS))
+	if (val > FDT_MAX_NCELLS)
 		return -FDT_ERR_BADNCELLS;
 
-	return val;
+	return (int)val;
 }
 
 int fdt_address_cells(const void *fdt, int nodeoffset)
@@ -36,6 +36,8 @@ int fdt_address_cells(const void *fdt, int nodeoffset)
 	int val;
 
 	val = fdt_cells(fdt, nodeoffset, "#address-cells");
+	if (val == 0)
+		return -FDT_ERR_BADNCELLS;
 	if (val == -FDT_ERR_NOTFOUND)
 		return 2;
 	return val;
diff --git a/scripts/dtc/libfdt/fdt_overlay.c b/scripts/dtc/libfdt/fdt_overlay.c
index e97f12b..b310e49 100644
--- a/scripts/dtc/libfdt/fdt_overlay.c
+++ b/scripts/dtc/libfdt/fdt_overlay.c
@@ -733,26 +733,36 @@ static int overlay_symbol_update(void *fdt, void *fdto)
 		/* keep end marker to avoid strlen() */
 		e = path + path_len;
 
-		/* format: /<fragment-name>/__overlay__/<relative-subnode-path> */
-
 		if (*path != '/')
 			return -FDT_ERR_BADVALUE;
 
 		/* get fragment name first */
 		s = strchr(path + 1, '/');
-		if (!s)
-			return -FDT_ERR_BADOVERLAY;
+		if (!s) {
+			/* Symbol refers to something that won't end
+			 * up in the target tree */
+			continue;
+		}
 
 		frag_name = path + 1;
 		frag_name_len = s - path - 1;
 
 		/* verify format; safe since "s" lies in \0 terminated prop */
 		len = sizeof("/__overlay__/") - 1;
-		if ((e - s) < len || memcmp(s, "/__overlay__/", len))
-			return -FDT_ERR_BADOVERLAY;
-
-		rel_path = s + len;
-		rel_path_len = e - rel_path;
+		if ((e - s) > len && (memcmp(s, "/__overlay__/", len) == 0)) {
+			/* /<fragment-name>/__overlay__/<relative-subnode-path> */
+			rel_path = s + len;
+			rel_path_len = e - rel_path - 1;
+		} else if ((e - s) == len
+			   && (memcmp(s, "/__overlay__", len - 1) == 0)) {
+			/* /<fragment-name>/__overlay__ */
+			rel_path = "";
+			rel_path_len = 0;
+		} else {
+			/* Symbol refers to something that won't end
+			 * up in the target tree */
+			continue;
+		}
 
 		/* find the fragment index in which the symbol lies */
 		ret = fdt_subnode_offset_namelen(fdto, 0, frag_name,
diff --git a/scripts/dtc/libfdt/fdt_ro.c b/scripts/dtc/libfdt/fdt_ro.c
index 6fd9ec1..a5c2797 100644
--- a/scripts/dtc/libfdt/fdt_ro.c
+++ b/scripts/dtc/libfdt/fdt_ro.c
@@ -33,19 +33,20 @@ static int fdt_nodename_eq_(const void *fdt, int offset,
 
 const char *fdt_get_string(const void *fdt, int stroffset, int *lenp)
 {
+	int32_t totalsize = fdt_ro_probe_(fdt);
 	uint32_t absoffset = stroffset + fdt_off_dt_strings(fdt);
 	size_t len;
 	int err;
 	const char *s, *n;
 
-	err = fdt_ro_probe_(fdt);
-	if (err != 0)
+	err = totalsize;
+	if (totalsize < 0)
 		goto fail;
 
 	err = -FDT_ERR_BADOFFSET;
-	if (absoffset >= fdt_totalsize(fdt))
+	if (absoffset >= totalsize)
 		goto fail;
-	len = fdt_totalsize(fdt) - absoffset;
+	len = totalsize - absoffset;
 
 	if (fdt_magic(fdt) == FDT_MAGIC) {
 		if (stroffset < 0)
@@ -288,7 +289,7 @@ const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
 	const char *nameptr;
 	int err;
 
-	if (((err = fdt_ro_probe_(fdt)) != 0)
+	if (((err = fdt_ro_probe_(fdt)) < 0)
 	    || ((err = fdt_check_node_offset_(fdt, nodeoffset)) < 0))
 			goto fail;
 
diff --git a/scripts/dtc/libfdt/libfdt.h b/scripts/dtc/libfdt/libfdt.h
index 7b5ffd1..8907b09 100644
--- a/scripts/dtc/libfdt/libfdt.h
+++ b/scripts/dtc/libfdt/libfdt.h
@@ -136,7 +136,7 @@ static inline uint32_t fdt32_ld(const fdt32_t *p)
 
 static inline void fdt32_st(void *property, uint32_t value)
 {
-	uint8_t *bp = property;
+	uint8_t *bp = (uint8_t *)property;
 
 	bp[0] = value >> 24;
 	bp[1] = (value >> 16) & 0xff;
@@ -160,7 +160,7 @@ static inline uint64_t fdt64_ld(const fdt64_t *p)
 
 static inline void fdt64_st(void *property, uint64_t value)
 {
-	uint8_t *bp = property;
+	uint8_t *bp = (uint8_t *)property;
 
 	bp[0] = value >> 56;
 	bp[1] = (value >> 48) & 0xff;
diff --git a/scripts/dtc/libfdt/libfdt_internal.h b/scripts/dtc/libfdt/libfdt_internal.h
index 7830e55..058c735 100644
--- a/scripts/dtc/libfdt/libfdt_internal.h
+++ b/scripts/dtc/libfdt/libfdt_internal.h
@@ -10,12 +10,12 @@
 #define FDT_ALIGN(x, a)		(((x) + (a) - 1) & ~((a) - 1))
 #define FDT_TAGALIGN(x)		(FDT_ALIGN((x), FDT_TAGSIZE))
 
-int fdt_ro_probe_(const void *fdt);
-#define FDT_RO_PROBE(fdt)			\
-	{ \
-		int err_; \
-		if ((err_ = fdt_ro_probe_(fdt)) != 0)	\
-			return err_; \
+int32_t fdt_ro_probe_(const void *fdt);
+#define FDT_RO_PROBE(fdt)					\
+	{							\
+		int32_t totalsize_;				\
+		if ((totalsize_ = fdt_ro_probe_(fdt)) < 0)	\
+			return totalsize_;			\
 	}
 
 int fdt_check_node_offset_(const void *fdt, int offset);
diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c
index 0c03999..032df58 100644
--- a/scripts/dtc/livetree.c
+++ b/scripts/dtc/livetree.c
@@ -526,8 +526,7 @@ struct node *get_node_by_path(struct node *tree, const char *path)
 	p = strchr(path, '/');
 
 	for_each_child(tree, child) {
-		if (p && (strlen(child->name) == p-path) &&
-		    strprefixeq(path, p - path, child->name))
+		if (p && strprefixeq(path, p - path, child->name))
 			return get_node_by_path(child, p+1);
 		else if (!p && streq(path, child->name))
 			return child;
diff --git a/scripts/dtc/util.c b/scripts/dtc/util.c
index 48af961..40274fb 100644
--- a/scripts/dtc/util.c
+++ b/scripts/dtc/util.c
@@ -13,6 +13,7 @@
 #include <stdarg.h>
 #include <string.h>
 #include <assert.h>
+#include <inttypes.h>
 
 #include <errno.h>
 #include <fcntl.h>
@@ -393,7 +394,7 @@ void utilfdt_print_data(const char *data, int len)
 
 		printf(" = <");
 		for (i = 0, len /= 4; i < len; i++)
-			printf("0x%08x%s", fdt32_to_cpu(cell[i]),
+			printf("0x%08" PRIx32 "%s", fdt32_to_cpu(cell[i]),
 			       i < (len - 1) ? " " : "");
 		printf(">");
 	} else {
diff --git a/scripts/dtc/util.h b/scripts/dtc/util.h
index ca5cb52..5a4172d 100644
--- a/scripts/dtc/util.h
+++ b/scripts/dtc/util.h
@@ -12,7 +12,11 @@
  */
 
 #ifdef __GNUC__
+#ifdef __clang__
 #define PRINTF(i, j)	__attribute__((format (printf, i, j)))
+#else
+#define PRINTF(i, j)	__attribute__((format (gnu_printf, i, j)))
+#endif
 #define NORETURN	__attribute__((noreturn))
 #else
 #define PRINTF(i, j)
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index f2761e2..6dba95d 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.5.0-g702c1b6c"
+#define DTC_VERSION "DTC 1.5.0-gc40aeb60"
diff --git a/scripts/find-unused-docs.sh b/scripts/find-unused-docs.sh
index 3f46f89..ee6a50e 100755
--- a/scripts/find-unused-docs.sh
+++ b/scripts/find-unused-docs.sh
@@ -54,7 +54,7 @@
 	if [[ ${FILES_INCLUDED[$file]+_} ]]; then
 	continue;
 	fi
-	str=$(scripts/kernel-doc -text -export "$file" 2>/dev/null)
+	str=$(scripts/kernel-doc -export "$file" 2>/dev/null)
 	if [[ -n "$str" ]]; then
 	echo "$file"
 	fi
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 2f1a59f..fbeb62a 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -47,16 +47,16 @@
 build_xconfig: $(obj)/qconf
 
 localyesconfig localmodconfig: $(obj)/conf
-	$(Q)perl $(srctree)/$(src)/streamline_config.pl --$@ $(srctree) $(Kconfig) > .tmp.config
-	$(Q)if [ -f .config ]; then 					\
-			cmp -s .tmp.config .config ||			\
-			(mv -f .config .config.old.1;			\
-			 mv -f .tmp.config .config;			\
-			 $< $(silent) --oldconfig $(Kconfig);		\
-			 mv -f .config.old.1 .config.old)		\
-	else								\
-			mv -f .tmp.config .config;			\
-			$< $(silent) --oldconfig $(Kconfig);		\
+	$(Q)$(PERL) $(srctree)/$(src)/streamline_config.pl --$@ $(srctree) $(Kconfig) > .tmp.config
+	$(Q)if [ -f .config ]; then 				\
+		cmp -s .tmp.config .config ||			\
+		(mv -f .config .config.old.1;			\
+		 mv -f .tmp.config .config;			\
+		 $< $(silent) --oldconfig $(Kconfig);		\
+		 mv -f .config.old.1 .config.old)		\
+	else							\
+		mv -f .tmp.config .config;			\
+		$< $(silent) --oldconfig $(Kconfig);		\
 	fi
 	$(Q)rm -f .tmp.config
 
@@ -67,7 +67,7 @@
 #  deprecated for external use
 simple-targets := oldconfig allnoconfig allyesconfig allmodconfig \
 	alldefconfig randconfig listnewconfig olddefconfig syncconfig \
-	helpnewconfig
+	helpnewconfig yes2modconfig mod2yesconfig
 
 PHONY += $(simple-targets)
 
@@ -135,6 +135,8 @@
 	@echo  '  allmodconfig	  - New config selecting modules when possible'
 	@echo  '  alldefconfig    - New config with all symbols set to default'
 	@echo  '  randconfig	  - New config with random answer to all options'
+	@echo  '  yes2modconfig	  - Change answers from yes to mod if possible'
+	@echo  '  mod2yesconfig	  - Change answers from mod to yes if possible'
 	@echo  '  listnewconfig   - List new options'
 	@echo  '  helpnewconfig   - List new options and help text'
 	@echo  '  olddefconfig	  - Same as oldconfig but sets new symbols to their'
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 1f89bf1..f6e548b 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -34,6 +34,8 @@ enum input_mode {
 	listnewconfig,
 	helpnewconfig,
 	olddefconfig,
+	yes2modconfig,
+	mod2yesconfig,
 };
 static enum input_mode input_mode = oldaskconfig;
 
@@ -467,6 +469,8 @@ static struct option long_opts[] = {
 	{"listnewconfig",   no_argument,       NULL, listnewconfig},
 	{"helpnewconfig",   no_argument,       NULL, helpnewconfig},
 	{"olddefconfig",    no_argument,       NULL, olddefconfig},
+	{"yes2modconfig",   no_argument,       NULL, yes2modconfig},
+	{"mod2yesconfig",   no_argument,       NULL, mod2yesconfig},
 	{NULL, 0, NULL, 0}
 };
 
@@ -489,6 +493,8 @@ static void conf_usage(const char *progname)
 	printf("  --allmodconfig          New config where all options are answered with mod\n");
 	printf("  --alldefconfig          New config with all symbols set to default\n");
 	printf("  --randconfig            New config with random answer to all options\n");
+	printf("  --yes2modconfig         Change answers from yes to mod if possible\n");
+	printf("  --mod2yesconfig         Change answers from mod to yes if possible\n");
 }
 
 int main(int ac, char **av)
@@ -553,6 +559,8 @@ int main(int ac, char **av)
 		case listnewconfig:
 		case helpnewconfig:
 		case olddefconfig:
+		case yes2modconfig:
+		case mod2yesconfig:
 			break;
 		case '?':
 			conf_usage(progname);
@@ -587,6 +595,8 @@ int main(int ac, char **av)
 	case listnewconfig:
 	case helpnewconfig:
 	case olddefconfig:
+	case yes2modconfig:
+	case mod2yesconfig:
 		conf_read(NULL);
 		break;
 	case allnoconfig:
@@ -660,6 +670,12 @@ int main(int ac, char **av)
 		break;
 	case savedefconfig:
 		break;
+	case yes2modconfig:
+		conf_rewrite_mod_or_yes(def_y2m);
+		break;
+	case mod2yesconfig:
+		conf_rewrite_mod_or_yes(def_m2y);
+		break;
 	case oldaskconfig:
 		rootEntry = &rootmenu;
 		conf(&rootmenu);
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 3569d2d..11f6c72 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -710,25 +710,6 @@ static struct conf_printer header_printer_cb =
 	.print_comment = header_print_comment,
 };
 
-/*
- * Tristate printer
- *
- * This printer is used when generating the `include/config/tristate.conf' file.
- */
-static void
-tristate_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg)
-{
-
-	if (sym->type == S_TRISTATE && *value != 'n')
-		fprintf(fp, "%s%s=%c\n", CONFIG_, sym->name, (char)toupper(*value));
-}
-
-static struct conf_printer tristate_printer_cb =
-{
-	.print_symbol = tristate_print_symbol,
-	.print_comment = kconfig_print_comment,
-};
-
 static void conf_write_symbol(FILE *fp, struct symbol *sym,
 			      struct conf_printer *printer, void *printer_arg)
 {
@@ -1062,7 +1043,7 @@ int conf_write_autoconf(int overwrite)
 	struct symbol *sym;
 	const char *name;
 	const char *autoconf_name = conf_get_autoconfig_name();
-	FILE *out, *tristate, *out_h;
+	FILE *out, *out_h;
 	int i;
 
 	if (!overwrite && is_present(autoconf_name))
@@ -1077,23 +1058,13 @@ int conf_write_autoconf(int overwrite)
 	if (!out)
 		return 1;
 
-	tristate = fopen(".tmpconfig_tristate", "w");
-	if (!tristate) {
-		fclose(out);
-		return 1;
-	}
-
 	out_h = fopen(".tmpconfig.h", "w");
 	if (!out_h) {
 		fclose(out);
-		fclose(tristate);
 		return 1;
 	}
 
 	conf_write_heading(out, &kconfig_printer_cb, NULL);
-
-	conf_write_heading(tristate, &tristate_printer_cb, NULL);
-
 	conf_write_heading(out_h, &header_printer_cb, NULL);
 
 	for_all_symbols(i, sym) {
@@ -1101,15 +1072,11 @@ int conf_write_autoconf(int overwrite)
 		if (!(sym->flags & SYMBOL_WRITE) || !sym->name)
 			continue;
 
-		/* write symbol to auto.conf, tristate and header files */
+		/* write symbols to auto.conf and autoconf.h */
 		conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1);
-
-		conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1);
-
 		conf_write_symbol(out_h, sym, &header_printer_cb, NULL);
 	}
 	fclose(out);
-	fclose(tristate);
 	fclose(out_h);
 
 	name = getenv("KCONFIG_AUTOHEADER");
@@ -1120,14 +1087,6 @@ int conf_write_autoconf(int overwrite)
 	if (rename(".tmpconfig.h", name))
 		return 1;
 
-	name = getenv("KCONFIG_TRISTATE");
-	if (!name)
-		name = "include/config/tristate.conf";
-	if (make_parent_dir(name))
-		return 1;
-	if (rename(".tmpconfig_tristate", name))
-		return 1;
-
 	if (make_parent_dir(autoconf_name))
 		return 1;
 	/*
@@ -1362,3 +1321,19 @@ bool conf_set_all_new_symbols(enum conf_def_mode mode)
 
 	return has_changed;
 }
+
+void conf_rewrite_mod_or_yes(enum conf_def_mode mode)
+{
+	struct symbol *sym;
+	int i;
+	tristate old_val = (mode == def_y2m) ? yes : mod;
+	tristate new_val = (mode == def_y2m) ? mod : yes;
+
+	for_all_symbols(i, sym) {
+		if (sym_get_type(sym) == S_TRISTATE &&
+		    sym->def[S_DEF_USER].tri == old_val) {
+			sym->def[S_DEF_USER].tri = new_val;
+			sym_add_change_count(1);
+		}
+	}
+}
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index 9f1de58..81ebf81 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -13,7 +13,6 @@
 
 #define DEBUG_EXPR	0
 
-static int expr_eq(struct expr *e1, struct expr *e2);
 static struct expr *expr_eliminate_yn(struct expr *e);
 
 struct expr *expr_alloc_symbol(struct symbol *sym)
@@ -250,7 +249,7 @@ void expr_eliminate_eq(struct expr **ep1, struct expr **ep2)
  * equals some operand in the other (operands do not need to appear in the same
  * order), recursively.
  */
-static int expr_eq(struct expr *e1, struct expr *e2)
+int expr_eq(struct expr *e1, struct expr *e2)
 {
 	int res, old_count;
 
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 017843c..5c34436 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -191,7 +191,6 @@ enum prop_type {
 
 struct property {
 	struct property *next;     /* next property - null if last */
-	struct symbol *sym;        /* the symbol for which the property is associated */
 	enum prop_type type;       /* type of property */
 	const char *text;          /* the prompt value - P_PROMPT, P_MENU, P_COMMENT */
 	struct expr_value visible;
@@ -301,6 +300,7 @@ struct expr *expr_alloc_or(struct expr *e1, struct expr *e2);
 struct expr *expr_copy(const struct expr *org);
 void expr_free(struct expr *e);
 void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
+int expr_eq(struct expr *e1, struct expr *e2);
 tristate expr_calc_value(struct expr *e);
 struct expr *expr_trans_bool(struct expr *e);
 struct expr *expr_eliminate_dups(struct expr *e);
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index e36b342..5527482 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -18,6 +18,7 @@
 
 #include <stdio.h>
 #include <string.h>
+#include <strings.h>
 #include <unistd.h>
 #include <time.h>
 
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 4fb16f3..d4ca829 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -34,6 +34,8 @@ enum conf_def_mode {
 	def_default,
 	def_yes,
 	def_mod,
+	def_y2m,
+	def_m2y,
 	def_no,
 	def_random
 };
@@ -52,6 +54,7 @@ const char *conf_get_configname(void);
 void sym_set_change_count(int count);
 void sym_add_change_count(int count);
 bool conf_set_all_new_symbols(enum conf_def_mode mode);
+void conf_rewrite_mod_or_yes(enum conf_def_mode mode);
 void set_all_choice_values(struct symbol *csym);
 
 /* confdata.c and expr.c */
@@ -112,7 +115,6 @@ struct symbol *sym_choice_default(struct symbol *sym);
 struct property *sym_get_range_prop(struct symbol *sym);
 const char *sym_get_string_default(struct symbol *sym);
 struct symbol *sym_check_deps(struct symbol *sym);
-struct property *prop_alloc(enum prop_type type, struct symbol *sym);
 struct symbol *prop_get_symbol(struct property *prop);
 
 static inline tristate sym_get_tristate_value(struct symbol *sym)
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 49c26ea..4063dbc 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -15,6 +15,7 @@
 #include <stdarg.h>
 #include <stdlib.h>
 #include <string.h>
+#include <strings.h>
 #include <signal.h>
 #include <unistd.h>
 
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index d9d1646..e436ba4 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -124,59 +124,74 @@ void menu_set_type(int type)
 		sym_type_name(sym->type), sym_type_name(type));
 }
 
-static struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep)
+static struct property *menu_add_prop(enum prop_type type, struct expr *expr,
+				      struct expr *dep)
 {
-	struct property *prop = prop_alloc(type, current_entry->sym);
+	struct property *prop;
 
+	prop = xmalloc(sizeof(*prop));
+	memset(prop, 0, sizeof(*prop));
+	prop->type = type;
+	prop->file = current_file;
+	prop->lineno = zconf_lineno();
 	prop->menu = current_entry;
 	prop->expr = expr;
 	prop->visible.expr = dep;
 
-	if (prompt) {
-		if (isspace(*prompt)) {
-			prop_warn(prop, "leading whitespace ignored");
-			while (isspace(*prompt))
-				prompt++;
-		}
-		if (current_entry->prompt && current_entry != &rootmenu)
-			prop_warn(prop, "prompt redefined");
+	/* append property to the prop list of symbol */
+	if (current_entry->sym) {
+		struct property **propp;
 
-		/* Apply all upper menus' visibilities to actual prompts. */
-		if(type == P_PROMPT) {
-			struct menu *menu = current_entry;
-
-			while ((menu = menu->parent) != NULL) {
-				struct expr *dup_expr;
-
-				if (!menu->visibility)
-					continue;
-				/*
-				 * Do not add a reference to the
-				 * menu's visibility expression but
-				 * use a copy of it.  Otherwise the
-				 * expression reduction functions
-				 * will modify expressions that have
-				 * multiple references which can
-				 * cause unwanted side effects.
-				 */
-				dup_expr = expr_copy(menu->visibility);
-
-				prop->visible.expr
-					= expr_alloc_and(prop->visible.expr,
-							 dup_expr);
-			}
-		}
-
-		current_entry->prompt = prop;
+		for (propp = &current_entry->sym->prop;
+		     *propp;
+		     propp = &(*propp)->next)
+			;
+		*propp = prop;
 	}
-	prop->text = prompt;
 
 	return prop;
 }
 
-struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep)
+struct property *menu_add_prompt(enum prop_type type, char *prompt,
+				 struct expr *dep)
 {
-	return menu_add_prop(type, prompt, NULL, dep);
+	struct property *prop = menu_add_prop(type, NULL, dep);
+
+	if (isspace(*prompt)) {
+		prop_warn(prop, "leading whitespace ignored");
+		while (isspace(*prompt))
+			prompt++;
+	}
+	if (current_entry->prompt)
+		prop_warn(prop, "prompt redefined");
+
+	/* Apply all upper menus' visibilities to actual prompts. */
+	if (type == P_PROMPT) {
+		struct menu *menu = current_entry;
+
+		while ((menu = menu->parent) != NULL) {
+			struct expr *dup_expr;
+
+			if (!menu->visibility)
+				continue;
+			/*
+			 * Do not add a reference to the menu's visibility
+			 * expression but use a copy of it. Otherwise the
+			 * expression reduction functions will modify
+			 * expressions that have multiple references which
+			 * can cause unwanted side effects.
+			 */
+			dup_expr = expr_copy(menu->visibility);
+
+			prop->visible.expr = expr_alloc_and(prop->visible.expr,
+							    dup_expr);
+		}
+	}
+
+	current_entry->prompt = prop;
+	prop->text = prompt;
+
+	return prop;
 }
 
 void menu_add_visibility(struct expr *expr)
@@ -187,12 +202,12 @@ void menu_add_visibility(struct expr *expr)
 
 void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep)
 {
-	menu_add_prop(type, NULL, expr, dep);
+	menu_add_prop(type, expr, dep);
 }
 
 void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep)
 {
-	menu_add_prop(type, NULL, expr_alloc_symbol(sym), dep);
+	menu_add_prop(type, expr_alloc_symbol(sym), dep);
 }
 
 void menu_add_option_modules(void)
@@ -326,12 +341,10 @@ void menu_finalize(struct menu *parent)
 			 * choice value symbols.
 			 */
 			parentdep = expr_alloc_symbol(sym);
-		} else if (parent->prompt)
-			/* Menu node for 'menu' */
-			parentdep = parent->prompt->visible.expr;
-		else
-			/* Menu node for 'if' */
+		} else {
+			/* Menu node for 'menu', 'if' */
 			parentdep = parent->dep;
+		}
 
 		/* For each child menu node... */
 		for (menu = parent->list; menu; menu = menu->next) {
@@ -698,6 +711,21 @@ const char *menu_get_help(struct menu *menu)
 		return "";
 }
 
+static void get_def_str(struct gstr *r, struct menu *menu)
+{
+	str_printf(r, "Defined at %s:%d\n",
+		   menu->file->name, menu->lineno);
+}
+
+static void get_dep_str(struct gstr *r, struct expr *expr, const char *prefix)
+{
+	if (!expr_is_yes(expr)) {
+		str_append(r, prefix);
+		expr_gstr_print(expr, r);
+		str_append(r, "\n");
+	}
+}
+
 static void get_prompt_str(struct gstr *r, struct property *prop,
 			   struct list_head *head)
 {
@@ -705,7 +733,20 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
 	struct menu *submenu[8], *menu, *location = NULL;
 	struct jump_key *jump = NULL;
 
-	str_printf(r, "Prompt: %s\n", prop->text);
+	str_printf(r, "  Prompt: %s\n", prop->text);
+
+	get_dep_str(r, prop->menu->dep, "  Depends on: ");
+	/*
+	 * Most prompts in Linux have visibility that exactly matches their
+	 * dependencies. For these, we print only the dependencies to improve
+	 * readability. However, prompts with inline "if" expressions and
+	 * prompts with a parent that has a "visible if" expression have
+	 * differing dependencies and visibility. In these rare cases, we
+	 * print both.
+	 */
+	if (!expr_eq(prop->menu->dep, prop->visible.expr))
+		get_dep_str(r, prop->visible.expr, "  Visible if: ");
+
 	menu = prop->menu->parent;
 	for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent) {
 		bool accessible = menu_is_visible(menu);
@@ -755,18 +796,6 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
 	}
 }
 
-/*
- * get property of type P_SYMBOL
- */
-static struct property *get_symbol_prop(struct symbol *sym)
-{
-	struct property *prop = NULL;
-
-	for_all_properties(sym, prop, P_SYMBOL)
-		break;
-	return prop;
-}
-
 static void get_symbol_props_str(struct gstr *r, struct symbol *sym,
 				 enum prop_type tok, const char *prefix)
 {
@@ -806,32 +835,34 @@ static void get_symbol_str(struct gstr *r, struct symbol *sym,
 			}
 		}
 	}
-	for_all_prompts(sym, prop)
-		get_prompt_str(r, prop, head);
 
-	prop = get_symbol_prop(sym);
-	if (prop) {
-		str_printf(r, "  Defined at %s:%d\n", prop->menu->file->name,
-			prop->menu->lineno);
-		if (!expr_is_yes(prop->visible.expr)) {
-			str_append(r, "  Depends on: ");
-			expr_gstr_print(prop->visible.expr, r);
-			str_append(r, "\n");
+	/* Print the definitions with prompts before the ones without */
+	for_all_properties(sym, prop, P_SYMBOL) {
+		if (prop->menu->prompt) {
+			get_def_str(r, prop->menu);
+			get_prompt_str(r, prop->menu->prompt, head);
 		}
 	}
 
-	get_symbol_props_str(r, sym, P_SELECT, "  Selects: ");
-	if (sym->rev_dep.expr) {
-		expr_gstr_print_revdep(sym->rev_dep.expr, r, yes, "  Selected by [y]:\n");
-		expr_gstr_print_revdep(sym->rev_dep.expr, r, mod, "  Selected by [m]:\n");
-		expr_gstr_print_revdep(sym->rev_dep.expr, r, no, "  Selected by [n]:\n");
+	for_all_properties(sym, prop, P_SYMBOL) {
+		if (!prop->menu->prompt) {
+			get_def_str(r, prop->menu);
+			get_dep_str(r, prop->menu->dep, "  Depends on: ");
+		}
 	}
 
-	get_symbol_props_str(r, sym, P_IMPLY, "  Implies: ");
+	get_symbol_props_str(r, sym, P_SELECT, "Selects: ");
+	if (sym->rev_dep.expr) {
+		expr_gstr_print_revdep(sym->rev_dep.expr, r, yes, "Selected by [y]:\n");
+		expr_gstr_print_revdep(sym->rev_dep.expr, r, mod, "Selected by [m]:\n");
+		expr_gstr_print_revdep(sym->rev_dep.expr, r, no, "Selected by [n]:\n");
+	}
+
+	get_symbol_props_str(r, sym, P_IMPLY, "Implies: ");
 	if (sym->implied.expr) {
-		expr_gstr_print_revdep(sym->implied.expr, r, yes, "  Implied by [y]:\n");
-		expr_gstr_print_revdep(sym->implied.expr, r, mod, "  Implied by [m]:\n");
-		expr_gstr_print_revdep(sym->implied.expr, r, no, "  Implied by [n]:\n");
+		expr_gstr_print_revdep(sym->implied.expr, r, yes, "Implied by [y]:\n");
+		expr_gstr_print_revdep(sym->implied.expr, r, mod, "Implied by [m]:\n");
+		expr_gstr_print_revdep(sym->implied.expr, r, no, "Implied by [n]:\n");
 	}
 
 	str_append(r, "\n\n");
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index b7c1ef7..daf1c15 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -8,6 +8,7 @@
 #define _GNU_SOURCE
 #endif
 #include <string.h>
+#include <strings.h>
 #include <stdlib.h>
 
 #include "lkc.h"
diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
index b3eff96..708b6c4 100644
--- a/scripts/kconfig/parser.y
+++ b/scripts/kconfig/parser.y
@@ -90,7 +90,6 @@
 %left T_LESS T_LESS_EQUAL T_GREATER T_GREATER_EQUAL
 %nonassoc T_NOT
 
-%type <string> prompt
 %type <symbol> nonconst_symbol
 %type <symbol> symbol
 %type <type> type logic_type default
@@ -113,7 +112,7 @@
 
 /* mainmenu entry */
 
-mainmenu_stmt: T_MAINMENU prompt T_EOL
+mainmenu_stmt: T_MAINMENU T_WORD_QUOTE T_EOL
 {
 	menu_add_prompt(P_MENU, $2, NULL);
 };
@@ -181,7 +180,7 @@
 		$1);
 };
 
-config_option: T_PROMPT prompt if_expr T_EOL
+config_option: T_PROMPT T_WORD_QUOTE if_expr T_EOL
 {
 	menu_add_prompt(P_PROMPT, $2, $3);
 	printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
@@ -265,7 +264,7 @@
 	| choice_option_list help
 ;
 
-choice_option: T_PROMPT prompt if_expr T_EOL
+choice_option: T_PROMPT T_WORD_QUOTE if_expr T_EOL
 {
 	menu_add_prompt(P_PROMPT, $2, $3);
 	printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
@@ -334,7 +333,7 @@
 
 /* menu entry */
 
-menu: T_MENU prompt T_EOL
+menu: T_MENU T_WORD_QUOTE T_EOL
 {
 	menu_add_entry(NULL);
 	menu_add_prompt(P_MENU, $2, NULL);
@@ -363,7 +362,7 @@
 	| menu_option_list depends
 ;
 
-source_stmt: T_SOURCE prompt T_EOL
+source_stmt: T_SOURCE T_WORD_QUOTE T_EOL
 {
 	printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), $2);
 	zconf_nextfile($2);
@@ -372,7 +371,7 @@
 
 /* comment entry */
 
-comment: T_COMMENT prompt T_EOL
+comment: T_COMMENT T_WORD_QUOTE T_EOL
 {
 	menu_add_entry(NULL);
 	menu_add_prompt(P_COMMENT, $2, NULL);
@@ -429,15 +428,11 @@
 
 prompt_stmt_opt:
 	  /* empty */
-	| prompt if_expr
+	| T_WORD_QUOTE if_expr
 {
 	menu_add_prompt(P_PROMPT, $1, $2);
 };
 
-prompt:	  T_WORD
-	| T_WORD_QUOTE
-;
-
 end:	  T_ENDMENU T_EOL	{ $$ = "menu"; }
 	| T_ENDCHOICE T_EOL	{ $$ = "choice"; }
 	| T_ENDIF T_EOL		{ $$ = "if"; }
@@ -665,7 +660,7 @@
 			break;
 		case P_SYMBOL:
 			fputs( "  symbol ", out);
-			fprintf(out, "%s\n", prop->sym->name);
+			fprintf(out, "%s\n", prop->menu->sym->name);
 			break;
 		default:
 			fprintf(out, "  unknown prop %d!\n", prop->type);
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 08d76d7..e2f8504 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -56,8 +56,6 @@
     print STDERR @_;
 }
 
-my $config = ".config";
-
 my $uname = `uname -r`;
 chomp $uname;
 
@@ -374,7 +372,7 @@
 	    $lsmod = "$dir/lsmod";
 	    last;
 	}
-}
+    }
     if (!defined($lsmod)) {
 	# try just the path
 	$lsmod = "lsmod";
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index f56eec5..8d38b70 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -1273,28 +1273,6 @@ struct symbol *sym_check_deps(struct symbol *sym)
 	return sym2;
 }
 
-struct property *prop_alloc(enum prop_type type, struct symbol *sym)
-{
-	struct property *prop;
-	struct property **propp;
-
-	prop = xmalloc(sizeof(*prop));
-	memset(prop, 0, sizeof(*prop));
-	prop->type = type;
-	prop->sym = sym;
-	prop->file = current_file;
-	prop->lineno = zconf_lineno();
-
-	/* append property to the prop list of symbol */
-	if (sym) {
-		for (propp = &sym->prop; *propp; propp = &(*propp)->next)
-			;
-		*propp = prop;
-	}
-
-	return prop;
-}
-
 struct symbol *prop_get_symbol(struct property *prop)
 {
 	if (prop->expr && (prop->expr->type == E_SYMBOL ||
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index bbe9be2..1919c31 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -250,6 +250,10 @@
 
 info MODINFO modules.builtin.modinfo
 ${OBJCOPY} -j .modinfo -O binary vmlinux.o modules.builtin.modinfo
+info GEN modules.builtin
+# The second line aids cases where multiple modules share the same object.
+tr '\0' '\n' < modules.builtin.modinfo | sed -n 's/^[[:alnum:]:_]*\.file=//p' |
+	tr ' ' '\n' | uniq | sed -e 's:^:kernel/:' -e 's/$/.ko/' > modules.builtin
 
 btf_vmlinux_bin_o=""
 if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 6e892c9..7edfdb2 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -12,6 +12,7 @@
  */
 
 #define _GNU_SOURCE
+#include <elf.h>
 #include <stdio.h>
 #include <ctype.h>
 #include <string.h>
@@ -729,12 +730,6 @@ static void handle_symbol(struct module *mod, struct elf_info *info,
 			break;
 		if (ignore_undef_symbol(info, symname))
 			break;
-/* cope with newer glibc (2.3.4 or higher) STT_ definition in elf.h */
-#if defined(STT_REGISTER) || defined(STT_SPARC_REGISTER)
-/* add compatibility with older glibc */
-#ifndef STT_SPARC_REGISTER
-#define STT_SPARC_REGISTER STT_REGISTER
-#endif
 		if (info->hdr->e_machine == EM_SPARC ||
 		    info->hdr->e_machine == EM_SPARCV9) {
 			/* Ignore register directives. */
@@ -747,7 +742,6 @@ static void handle_symbol(struct module *mod, struct elf_info *info,
 				symname = munged;
 			}
 		}
-#endif
 
 		mod->unres = alloc_symbol(symname,
 					  ELF_ST_BIND(sym->st_info) == STB_WEAK,
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index c4c580f..6df3c9f 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -43,17 +43,66 @@
 
 	# Create the package
 	dpkg-gencontrol -p$pname -P"$pdir"
-	dpkg --build "$pdir" ..
+	dpkg-deb ${KDEB_COMPRESS:+-Z$KDEB_COMPRESS} --build "$pdir" ..
+}
+
+deploy_kernel_headers () {
+	pdir=$1
+
+	rm -rf $pdir
+
+	(
+		cd $srctree
+		find . arch/$SRCARCH -maxdepth 1 -name Makefile\*
+		find include scripts -type f -o -type l
+		find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform
+		find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f
+	) > debian/hdrsrcfiles
+
+	{
+		if is_enabled CONFIG_STACK_VALIDATION; then
+			echo tools/objtool/objtool
+		fi
+
+		find arch/$SRCARCH/include Module.symvers include scripts -type f
+
+		if is_enabled CONFIG_GCC_PLUGINS; then
+			find scripts/gcc-plugins -name \*.so
+		fi
+	} > debian/hdrobjfiles
+
+	destdir=$pdir/usr/src/linux-headers-$version
+	mkdir -p $destdir
+	tar -c -f - -C $srctree -T debian/hdrsrcfiles | tar -xf - -C $destdir
+	tar -c -f - -T debian/hdrobjfiles | tar -xf - -C $destdir
+	rm -f debian/hdrsrcfiles debian/hdrobjfiles
+
+	# copy .config manually to be where it's expected to be
+	cp $KCONFIG_CONFIG $destdir/.config
+
+	mkdir -p $pdir/lib/modules/$version/
+	ln -s /usr/src/linux-headers-$version $pdir/lib/modules/$version/build
+}
+
+deploy_libc_headers () {
+	pdir=$1
+
+	rm -rf $pdir
+
+	$MAKE -f $srctree/Makefile headers
+	$MAKE -f $srctree/Makefile headers_install INSTALL_HDR_PATH=$pdir/usr
+
+	# move asm headers to /usr/include/<libc-machine>/asm to match the structure
+	# used by Debian-based distros (to support multi-arch)
+	host_arch=$(dpkg-architecture -a$(cat debian/arch) -qDEB_HOST_MULTIARCH)
+	mkdir $pdir/usr/include/$host_arch
+	mv $pdir/usr/include/asm $pdir/usr/include/$host_arch/
 }
 
 version=$KERNELRELEASE
-tmpdir="$objtree/debian/tmp"
-kernel_headers_dir="$objtree/debian/hdrtmp"
-libc_headers_dir="$objtree/debian/headertmp"
-dbg_dir="$objtree/debian/dbgtmp"
+tmpdir=debian/linux-image
+dbg_dir=debian/linux-image-dbg
 packagename=linux-image-$version
-kernel_headers_packagename=linux-headers-$version
-libc_headers_packagename=linux-libc-dev
 dbg_packagename=$packagename-dbg
 
 if [ "$ARCH" = "um" ] ; then
@@ -77,15 +126,13 @@
 BUILD_DEBUG=$(if_enabled_echo CONFIG_DEBUG_INFO Yes)
 
 # Setup the directory structure
-rm -rf "$tmpdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
+rm -rf "$tmpdir" "$dbg_dir" debian/files
 mkdir -m 755 -p "$tmpdir/DEBIAN"
 mkdir -p "$tmpdir/lib" "$tmpdir/boot"
-mkdir -p "$kernel_headers_dir/lib/modules/$version/"
 
-# Build and install the kernel
+# Install the kernel
 if [ "$ARCH" = "um" ] ; then
 	mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin" "$tmpdir/usr/share/doc/$packagename"
-	$MAKE linux
 	cp System.map "$tmpdir/usr/lib/uml/modules/$version/System.map"
 	cp $KCONFIG_CONFIG "$tmpdir/usr/share/doc/$packagename/config"
 	gzip "$tmpdir/usr/share/doc/$packagename/config"
@@ -129,16 +176,6 @@
 	fi
 fi
 
-if [ "$ARCH" != "um" ]; then
-	$MAKE -f $srctree/Makefile headers
-	$MAKE -f $srctree/Makefile headers_install INSTALL_HDR_PATH="$libc_headers_dir/usr"
-	# move asm headers to /usr/include/<libc-machine>/asm to match the structure
-	# used by Debian-based distros (to support multi-arch)
-	host_arch=$(dpkg-architecture -a$(cat debian/arch) -qDEB_HOST_MULTIARCH)
-	mkdir $libc_headers_dir/usr/include/$host_arch
-	mv $libc_headers_dir/usr/include/asm $libc_headers_dir/usr/include/$host_arch/
-fi
-
 # Install the maintainer scripts
 # Note: hook scripts under /etc/kernel are also executed by official Debian
 # kernel packages, as well as kernel packages built using make-kpkg.
@@ -164,29 +201,12 @@
 	chmod 755 "$tmpdir/DEBIAN/$script"
 done
 
-# Build kernel header package
-(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
-(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles"
-(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
-(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
-if is_enabled CONFIG_STACK_VALIDATION; then
-	(cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
-fi
-(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
-if is_enabled CONFIG_GCC_PLUGINS; then
-	(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
-fi
-destdir=$kernel_headers_dir/usr/src/linux-headers-$version
-mkdir -p "$destdir"
-(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
-(cd $objtree; tar -c -f - -T -) < "$objtree/debian/hdrobjfiles" | (cd $destdir; tar -xf -)
-(cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
-ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
-rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
-
 if [ "$ARCH" != "um" ]; then
-	create_package "$kernel_headers_packagename" "$kernel_headers_dir"
-	create_package "$libc_headers_packagename" "$libc_headers_dir"
+	deploy_kernel_headers debian/linux-headers
+	create_package linux-headers-$version debian/linux-headers
+
+	deploy_libc_headers debian/linux-libc-dev
+	create_package linux-libc-dev debian/linux-libc-dev
 fi
 
 create_package "$packagename" "$tmpdir"
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 672b593..ffa838f 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -39,6 +39,8 @@
 accquire||acquire
 accquired||acquired
 accross||across
+accumalate||accumulate
+accumalator||accumulator
 acessable||accessible
 acess||access
 acessing||accessing
@@ -106,6 +108,7 @@
 alot||a lot
 alow||allow
 alows||allows
+alreay||already
 alredy||already
 altough||although
 alue||value
@@ -241,6 +244,7 @@
 calescing||coalescing
 calle||called
 callibration||calibration
+callled||called
 calucate||calculate
 calulate||calculate
 cancelation||cancellation
@@ -311,6 +315,7 @@
 comparsion||comparison
 compatability||compatibility
 compatable||compatible
+compatibililty||compatibility
 compatibiliy||compatibility
 compatibilty||compatibility
 compatiblity||compatibility
@@ -330,6 +335,7 @@
 conbination||combination
 conditionaly||conditionally
 conditon||condition
+condtion||condition
 conected||connected
 conector||connector
 connecetd||connected
@@ -388,6 +394,8 @@
 deafult||default
 deamon||daemon
 debouce||debounce
+decendant||descendant
+decendants||descendants
 decompres||decompress
 decsribed||described
 decription||description
@@ -411,11 +419,13 @@
 delares||declares
 delaring||declaring
 delemiter||delimiter
+delievered||delivered
 demodualtor||demodulator
 demension||dimension
 dependancies||dependencies
 dependancy||dependency
 dependant||dependent
+dependend||dependent
 depreacted||deprecated
 depreacte||deprecate
 desactivate||deactivate
@@ -791,6 +801,7 @@
 irrelevent||irrelevant
 isnt||isn't
 isssue||issue
+issus||issues
 iternations||iterations
 itertation||iteration
 itslef||itself
@@ -995,6 +1006,7 @@
 pendantic||pedantic
 peprocessor||preprocessor
 perfoming||performing
+perfomring||performing
 peripherial||peripheral
 permissons||permissions
 peroid||period
@@ -1166,6 +1178,8 @@
 retreiving||retrieving
 retrive||retrieve
 retrived||retrieved
+retrun||return
+retun||return
 retuned||returned
 reudce||reduce
 reuest||request
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index 470ccfe..a8f0c00 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -272,7 +272,7 @@
 
 	open IN, "$sphinx --version 2>&1 |" or die "$sphinx returned an error";
 	while (<IN>) {
-		if (m/^\s*sphinx-build\s+([\d\.]+)$/) {
+		if (m/^\s*sphinx-build\s+([\d\.]+)(\+\/[\da-f]+)?$/) {
 			$cur_version=$1;
 			last;
 		}
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index a422a34..0fe3368 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -68,3 +68,19 @@
 	  Set the default value of the apparmor.debug kernel parameter.
 	  When enabled, various debug messages will be logged to
 	  the kernel message buffer.
+
+config SECURITY_APPARMOR_KUNIT_TEST
+	bool "Build KUnit tests for policy_unpack.c"
+	depends on KUNIT=y && SECURITY_APPARMOR
+	help
+	  This builds the AppArmor KUnit tests.
+
+	  KUnit tests run during boot and output the results to the debug log
+	  in TAP format (http://testanything.org/). Only useful for kernel devs
+	  running KUnit test harness and are not for inclusion into a
+	  production build.
+
+	  For more information on KUnit and unit tests in general please refer
+	  to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+	  If unsure, say N.
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 47aff87..280741f 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -2573,16 +2573,18 @@ static const char *policy_get_link(struct dentry *dentry,
 {
 	struct aa_ns *ns;
 	struct path path;
+	int error;
 
 	if (!dentry)
 		return ERR_PTR(-ECHILD);
+
 	ns = aa_get_current_ns();
 	path.mnt = mntget(aafs_mnt);
 	path.dentry = dget(ns_dir(ns));
-	nd_jump_link(&path);
+	error = nd_jump_link(&path);
 	aa_put_ns(ns);
 
-	return NULL;
+	return ERR_PTR(error);
 }
 
 static int policy_readlink(struct dentry *dentry, char __user *buffer,
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 8036431..2d743c0 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -1228,3 +1228,7 @@ int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
 
 	return error;
 }
+
+#ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
+#include "policy_unpack_test.c"
+#endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */
diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
new file mode 100644
index 0000000..533137f
--- /dev/null
+++ b/security/apparmor/policy_unpack_test.c
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for AppArmor's policy unpack.
+ */
+
+#include <kunit/test.h>
+
+#include "include/policy.h"
+#include "include/policy_unpack.h"
+
+#define TEST_STRING_NAME "TEST_STRING"
+#define TEST_STRING_DATA "testing"
+#define TEST_STRING_BUF_OFFSET \
+	(3 + strlen(TEST_STRING_NAME) + 1)
+
+#define TEST_U32_NAME "U32_TEST"
+#define TEST_U32_DATA ((u32)0x01020304)
+#define TEST_NAMED_U32_BUF_OFFSET \
+	(TEST_STRING_BUF_OFFSET + 3 + strlen(TEST_STRING_DATA) + 1)
+#define TEST_U32_BUF_OFFSET \
+	(TEST_NAMED_U32_BUF_OFFSET + 3 + strlen(TEST_U32_NAME) + 1)
+
+#define TEST_U16_OFFSET (TEST_U32_BUF_OFFSET + 3)
+#define TEST_U16_DATA ((u16)(TEST_U32_DATA >> 16))
+
+#define TEST_U64_NAME "U64_TEST"
+#define TEST_U64_DATA ((u64)0x0102030405060708)
+#define TEST_NAMED_U64_BUF_OFFSET (TEST_U32_BUF_OFFSET + sizeof(u32) + 1)
+#define TEST_U64_BUF_OFFSET \
+	(TEST_NAMED_U64_BUF_OFFSET + 3 + strlen(TEST_U64_NAME) + 1)
+
+#define TEST_BLOB_NAME "BLOB_TEST"
+#define TEST_BLOB_DATA "\xde\xad\x00\xbe\xef"
+#define TEST_BLOB_DATA_SIZE (ARRAY_SIZE(TEST_BLOB_DATA))
+#define TEST_NAMED_BLOB_BUF_OFFSET (TEST_U64_BUF_OFFSET + sizeof(u64) + 1)
+#define TEST_BLOB_BUF_OFFSET \
+	(TEST_NAMED_BLOB_BUF_OFFSET + 3 + strlen(TEST_BLOB_NAME) + 1)
+
+#define TEST_ARRAY_NAME "ARRAY_TEST"
+#define TEST_ARRAY_SIZE 16
+#define TEST_NAMED_ARRAY_BUF_OFFSET \
+	(TEST_BLOB_BUF_OFFSET + 5 + TEST_BLOB_DATA_SIZE)
+#define TEST_ARRAY_BUF_OFFSET \
+	(TEST_NAMED_ARRAY_BUF_OFFSET + 3 + strlen(TEST_ARRAY_NAME) + 1)
+
+struct policy_unpack_fixture {
+	struct aa_ext *e;
+	size_t e_size;
+};
+
+struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+				   struct kunit *test, size_t buf_size)
+{
+	char *buf;
+	struct aa_ext *e;
+
+	buf = kunit_kzalloc(test, buf_size, GFP_USER);
+	KUNIT_EXPECT_NOT_ERR_OR_NULL(test, buf);
+
+	e = kunit_kmalloc(test, sizeof(*e), GFP_USER);
+	KUNIT_EXPECT_NOT_ERR_OR_NULL(test, e);
+
+	e->start = buf;
+	e->end = e->start + buf_size;
+	e->pos = e->start;
+
+	*buf = AA_NAME;
+	*(buf + 1) = strlen(TEST_STRING_NAME) + 1;
+	strcpy(buf + 3, TEST_STRING_NAME);
+
+	buf = e->start + TEST_STRING_BUF_OFFSET;
+	*buf = AA_STRING;
+	*(buf + 1) = strlen(TEST_STRING_DATA) + 1;
+	strcpy(buf + 3, TEST_STRING_DATA);
+
+	buf = e->start + TEST_NAMED_U32_BUF_OFFSET;
+	*buf = AA_NAME;
+	*(buf + 1) = strlen(TEST_U32_NAME) + 1;
+	strcpy(buf + 3, TEST_U32_NAME);
+	*(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
+	*((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA;
+
+	buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
+	*buf = AA_NAME;
+	*(buf + 1) = strlen(TEST_U64_NAME) + 1;
+	strcpy(buf + 3, TEST_U64_NAME);
+	*(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64;
+	*((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA;
+
+	buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET;
+	*buf = AA_NAME;
+	*(buf + 1) = strlen(TEST_BLOB_NAME) + 1;
+	strcpy(buf + 3, TEST_BLOB_NAME);
+	*(buf + 3 + strlen(TEST_BLOB_NAME) + 1) = AA_BLOB;
+	*(buf + 3 + strlen(TEST_BLOB_NAME) + 2) = TEST_BLOB_DATA_SIZE;
+	memcpy(buf + 3 + strlen(TEST_BLOB_NAME) + 6,
+		TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE);
+
+	buf = e->start + TEST_NAMED_ARRAY_BUF_OFFSET;
+	*buf = AA_NAME;
+	*(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
+	strcpy(buf + 3, TEST_ARRAY_NAME);
+	*(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
+	*((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE;
+
+	return e;
+}
+
+static int policy_unpack_test_init(struct kunit *test)
+{
+	size_t e_size = TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1;
+	struct policy_unpack_fixture *puf;
+
+	puf = kunit_kmalloc(test, sizeof(*puf), GFP_USER);
+	KUNIT_EXPECT_NOT_ERR_OR_NULL(test, puf);
+
+	puf->e_size = e_size;
+	puf->e = build_aa_ext_struct(puf, test, e_size);
+
+	test->priv = puf;
+	return 0;
+}
+
+static void policy_unpack_test_inbounds_when_inbounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+
+	KUNIT_EXPECT_TRUE(test, inbounds(puf->e, 0));
+	KUNIT_EXPECT_TRUE(test, inbounds(puf->e, puf->e_size / 2));
+	KUNIT_EXPECT_TRUE(test, inbounds(puf->e, puf->e_size));
+}
+
+static void policy_unpack_test_inbounds_when_out_of_bounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+
+	KUNIT_EXPECT_FALSE(test, inbounds(puf->e, puf->e_size + 1));
+}
+
+static void policy_unpack_test_unpack_array_with_null_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	u16 array_size;
+
+	puf->e->pos += TEST_ARRAY_BUF_OFFSET;
+
+	array_size = unpack_array(puf->e, NULL);
+
+	KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+		puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1);
+}
+
+static void policy_unpack_test_unpack_array_with_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char name[] = TEST_ARRAY_NAME;
+	u16 array_size;
+
+	puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+
+	array_size = unpack_array(puf->e, name);
+
+	KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+		puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1);
+}
+
+static void policy_unpack_test_unpack_array_out_of_bounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char name[] = TEST_ARRAY_NAME;
+	u16 array_size;
+
+	puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+	puf->e->end = puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16);
+
+	array_size = unpack_array(puf->e, name);
+
+	KUNIT_EXPECT_EQ(test, array_size, (u16)0);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+		puf->e->start + TEST_NAMED_ARRAY_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_blob_with_null_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	char *blob = NULL;
+	size_t size;
+
+	puf->e->pos += TEST_BLOB_BUF_OFFSET;
+	size = unpack_blob(puf->e, &blob, NULL);
+
+	KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+	KUNIT_EXPECT_TRUE(test,
+		memcmp(blob, TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE) == 0);
+}
+
+static void policy_unpack_test_unpack_blob_with_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	char *blob = NULL;
+	size_t size;
+
+	puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
+	size = unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+
+	KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+	KUNIT_EXPECT_TRUE(test,
+		memcmp(blob, TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE) == 0);
+}
+
+static void policy_unpack_test_unpack_blob_out_of_bounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	char *blob = NULL;
+	void *start;
+	int size;
+
+	puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
+	start = puf->e->pos;
+	puf->e->end = puf->e->start + TEST_BLOB_BUF_OFFSET
+		+ TEST_BLOB_DATA_SIZE - 1;
+
+	size = unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+
+	KUNIT_EXPECT_EQ(test, size, 0);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_str_with_null_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char *string = NULL;
+	size_t size;
+
+	puf->e->pos += TEST_STRING_BUF_OFFSET;
+	size = unpack_str(puf->e, &string, NULL);
+
+	KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+	KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_str_with_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char *string = NULL;
+	size_t size;
+
+	size = unpack_str(puf->e, &string, TEST_STRING_NAME);
+
+	KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+	KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_str_out_of_bounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char *string = NULL;
+	void *start = puf->e->pos;
+	int size;
+
+	puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+		+ strlen(TEST_STRING_DATA) - 1;
+
+	size = unpack_str(puf->e, &string, TEST_STRING_NAME);
+
+	KUNIT_EXPECT_EQ(test, size, 0);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	char *string = NULL;
+	size_t size;
+
+	puf->e->pos += TEST_STRING_BUF_OFFSET;
+	size = unpack_strdup(puf->e, &string, NULL);
+
+	KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+	KUNIT_EXPECT_FALSE(test,
+			   ((uintptr_t)puf->e->start <= (uintptr_t)string)
+			   && ((uintptr_t)string <= (uintptr_t)puf->e->end));
+	KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	char *string = NULL;
+	size_t size;
+
+	size = unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+
+	KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+	KUNIT_EXPECT_FALSE(test,
+			   ((uintptr_t)puf->e->start <= (uintptr_t)string)
+			   && ((uintptr_t)string <= (uintptr_t)puf->e->end));
+	KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	void *start = puf->e->pos;
+	char *string = NULL;
+	int size;
+
+	puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+		+ strlen(TEST_STRING_DATA) - 1;
+
+	size = unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+
+	KUNIT_EXPECT_EQ(test, size, 0);
+	KUNIT_EXPECT_PTR_EQ(test, string, (char *)NULL);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	bool success;
+
+	puf->e->pos += TEST_U32_BUF_OFFSET;
+
+	success = unpack_nameX(puf->e, AA_U32, NULL);
+
+	KUNIT_EXPECT_TRUE(test, success);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			    puf->e->start + TEST_U32_BUF_OFFSET + 1);
+}
+
+static void policy_unpack_test_unpack_nameX_with_wrong_code(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	bool success;
+
+	puf->e->pos += TEST_U32_BUF_OFFSET;
+
+	success = unpack_nameX(puf->e, AA_BLOB, NULL);
+
+	KUNIT_EXPECT_FALSE(test, success);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			    puf->e->start + TEST_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_nameX_with_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char name[] = TEST_U32_NAME;
+	bool success;
+
+	puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+	success = unpack_nameX(puf->e, AA_U32, name);
+
+	KUNIT_EXPECT_TRUE(test, success);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			    puf->e->start + TEST_U32_BUF_OFFSET + 1);
+}
+
+static void policy_unpack_test_unpack_nameX_with_wrong_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	static const char name[] = "12345678";
+	bool success;
+
+	puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+	success = unpack_nameX(puf->e, AA_U32, name);
+
+	KUNIT_EXPECT_FALSE(test, success);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			    puf->e->start + TEST_NAMED_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u16_chunk_basic(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	char *chunk = NULL;
+	size_t size;
+
+	puf->e->pos += TEST_U16_OFFSET;
+	/*
+	 * WARNING: For unit testing purposes, we're pushing puf->e->end past
+	 * the end of the allocated memory. Doing anything other than comparing
+	 * memory addresses is dangerous.
+	 */
+	puf->e->end += TEST_U16_DATA;
+
+	size = unpack_u16_chunk(puf->e, &chunk);
+
+	KUNIT_EXPECT_PTR_EQ(test, (void *)chunk,
+			    puf->e->start + TEST_U16_OFFSET + 2);
+	KUNIT_EXPECT_EQ(test, size, (size_t)TEST_U16_DATA);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, (void *)(chunk + TEST_U16_DATA));
+}
+
+static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_1(
+		struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	char *chunk = NULL;
+	size_t size;
+
+	puf->e->pos = puf->e->end - 1;
+
+	size = unpack_u16_chunk(puf->e, &chunk);
+
+	KUNIT_EXPECT_EQ(test, size, (size_t)0);
+	KUNIT_EXPECT_PTR_EQ(test, chunk, (char *)NULL);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, puf->e->end - 1);
+}
+
+static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_2(
+		struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	char *chunk = NULL;
+	size_t size;
+
+	puf->e->pos += TEST_U16_OFFSET;
+	/*
+	 * WARNING: For unit testing purposes, we're pushing puf->e->end past
+	 * the end of the allocated memory. Doing anything other than comparing
+	 * memory addresses is dangerous.
+	 */
+	puf->e->end = puf->e->pos + TEST_U16_DATA - 1;
+
+	size = unpack_u16_chunk(puf->e, &chunk);
+
+	KUNIT_EXPECT_EQ(test, size, (size_t)0);
+	KUNIT_EXPECT_PTR_EQ(test, chunk, (char *)NULL);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, puf->e->start + TEST_U16_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u32_with_null_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	bool success;
+	u32 data;
+
+	puf->e->pos += TEST_U32_BUF_OFFSET;
+
+	success = unpack_u32(puf->e, &data, NULL);
+
+	KUNIT_EXPECT_TRUE(test, success);
+	KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32) + 1);
+}
+
+static void policy_unpack_test_unpack_u32_with_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char name[] = TEST_U32_NAME;
+	bool success;
+	u32 data;
+
+	puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+	success = unpack_u32(puf->e, &data, name);
+
+	KUNIT_EXPECT_TRUE(test, success);
+	KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32) + 1);
+}
+
+static void policy_unpack_test_unpack_u32_out_of_bounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char name[] = TEST_U32_NAME;
+	bool success;
+	u32 data;
+
+	puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+	puf->e->end = puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32);
+
+	success = unpack_u32(puf->e, &data, name);
+
+	KUNIT_EXPECT_FALSE(test, success);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			puf->e->start + TEST_NAMED_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u64_with_null_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	bool success;
+	u64 data;
+
+	puf->e->pos += TEST_U64_BUF_OFFSET;
+
+	success = unpack_u64(puf->e, &data, NULL);
+
+	KUNIT_EXPECT_TRUE(test, success);
+	KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64) + 1);
+}
+
+static void policy_unpack_test_unpack_u64_with_name(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char name[] = TEST_U64_NAME;
+	bool success;
+	u64 data;
+
+	puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+
+	success = unpack_u64(puf->e, &data, name);
+
+	KUNIT_EXPECT_TRUE(test, success);
+	KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64) + 1);
+}
+
+static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	const char name[] = TEST_U64_NAME;
+	bool success;
+	u64 data;
+
+	puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+	puf->e->end = puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64);
+
+	success = unpack_u64(puf->e, &data, name);
+
+	KUNIT_EXPECT_FALSE(test, success);
+	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+			puf->e->start + TEST_NAMED_U64_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_X_code_match(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	bool success = unpack_X(puf->e, AA_NAME);
+
+	KUNIT_EXPECT_TRUE(test, success);
+	KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start + 1);
+}
+
+static void policy_unpack_test_unpack_X_code_mismatch(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	bool success = unpack_X(puf->e, AA_STRING);
+
+	KUNIT_EXPECT_FALSE(test, success);
+	KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start);
+}
+
+static void policy_unpack_test_unpack_X_out_of_bounds(struct kunit *test)
+{
+	struct policy_unpack_fixture *puf = test->priv;
+	bool success;
+
+	puf->e->pos = puf->e->end;
+	success = unpack_X(puf->e, AA_NAME);
+
+	KUNIT_EXPECT_FALSE(test, success);
+}
+
+static struct kunit_case apparmor_policy_unpack_test_cases[] = {
+	KUNIT_CASE(policy_unpack_test_inbounds_when_inbounds),
+	KUNIT_CASE(policy_unpack_test_inbounds_when_out_of_bounds),
+	KUNIT_CASE(policy_unpack_test_unpack_array_with_null_name),
+	KUNIT_CASE(policy_unpack_test_unpack_array_with_name),
+	KUNIT_CASE(policy_unpack_test_unpack_array_out_of_bounds),
+	KUNIT_CASE(policy_unpack_test_unpack_blob_with_null_name),
+	KUNIT_CASE(policy_unpack_test_unpack_blob_with_name),
+	KUNIT_CASE(policy_unpack_test_unpack_blob_out_of_bounds),
+	KUNIT_CASE(policy_unpack_test_unpack_nameX_with_null_name),
+	KUNIT_CASE(policy_unpack_test_unpack_nameX_with_wrong_code),
+	KUNIT_CASE(policy_unpack_test_unpack_nameX_with_name),
+	KUNIT_CASE(policy_unpack_test_unpack_nameX_with_wrong_name),
+	KUNIT_CASE(policy_unpack_test_unpack_str_with_null_name),
+	KUNIT_CASE(policy_unpack_test_unpack_str_with_name),
+	KUNIT_CASE(policy_unpack_test_unpack_str_out_of_bounds),
+	KUNIT_CASE(policy_unpack_test_unpack_strdup_with_null_name),
+	KUNIT_CASE(policy_unpack_test_unpack_strdup_with_name),
+	KUNIT_CASE(policy_unpack_test_unpack_strdup_out_of_bounds),
+	KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_basic),
+	KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_out_of_bounds_1),
+	KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_out_of_bounds_2),
+	KUNIT_CASE(policy_unpack_test_unpack_u32_with_null_name),
+	KUNIT_CASE(policy_unpack_test_unpack_u32_with_name),
+	KUNIT_CASE(policy_unpack_test_unpack_u32_out_of_bounds),
+	KUNIT_CASE(policy_unpack_test_unpack_u64_with_null_name),
+	KUNIT_CASE(policy_unpack_test_unpack_u64_with_name),
+	KUNIT_CASE(policy_unpack_test_unpack_u64_out_of_bounds),
+	KUNIT_CASE(policy_unpack_test_unpack_X_code_match),
+	KUNIT_CASE(policy_unpack_test_unpack_X_code_mismatch),
+	KUNIT_CASE(policy_unpack_test_unpack_X_out_of_bounds),
+	{},
+};
+
+static struct kunit_suite apparmor_policy_unpack_test_module = {
+	.name = "apparmor_policy_unpack",
+	.init = policy_unpack_test_init,
+	.test_cases = apparmor_policy_unpack_test_cases,
+};
+
+kunit_test_suite(apparmor_policy_unpack_test_module);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d9e8b21..4b6991e 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2778,7 +2778,7 @@ static int selinux_fs_context_dup(struct fs_context *fc,
 	return 0;
 }
 
-static const struct fs_parameter_spec selinux_param_specs[] = {
+static const struct fs_parameter_spec selinux_fs_parameters[] = {
 	fsparam_string(CONTEXT_STR,	Opt_context),
 	fsparam_string(DEFCONTEXT_STR,	Opt_defcontext),
 	fsparam_string(FSCONTEXT_STR,	Opt_fscontext),
@@ -2787,18 +2787,13 @@ static const struct fs_parameter_spec selinux_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description selinux_fs_parameters = {
-	.name		= "SELinux",
-	.specs		= selinux_param_specs,
-};
-
 static int selinux_fs_context_parse_param(struct fs_context *fc,
 					  struct fs_parameter *param)
 {
 	struct fs_parse_result result;
 	int opt, rc;
 
-	opt = fs_parse(fc, &selinux_fs_parameters, param, &result);
+	opt = fs_parse(fc, selinux_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -7193,7 +7188,7 @@ static __init int selinux_init(void)
 	else
 		pr_debug("SELinux:  Starting in permissive mode\n");
 
-	fs_validate_description(&selinux_fs_parameters);
+	fs_validate_description("selinux", selinux_fs_parameters);
 
 	return 0;
 }
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ecea41c..8c61d175 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -678,7 +678,7 @@ static int smack_fs_context_dup(struct fs_context *fc,
 	return 0;
 }
 
-static const struct fs_parameter_spec smack_param_specs[] = {
+static const struct fs_parameter_spec smack_fs_parameters[] = {
 	fsparam_string("smackfsdef",		Opt_fsdefault),
 	fsparam_string("smackfsdefault",	Opt_fsdefault),
 	fsparam_string("smackfsfloor",		Opt_fsfloor),
@@ -688,11 +688,6 @@ static const struct fs_parameter_spec smack_param_specs[] = {
 	{}
 };
 
-static const struct fs_parameter_description smack_fs_parameters = {
-	.name		= "smack",
-	.specs		= smack_param_specs,
-};
-
 /**
  * smack_fs_context_parse_param - Parse a single mount parameter
  * @fc: The new filesystem context being constructed.
@@ -707,7 +702,7 @@ static int smack_fs_context_parse_param(struct fs_context *fc,
 	struct fs_parse_result result;
 	int opt, rc;
 
-	opt = fs_parse(fc, &smack_fs_parameters, param, &result);
+	opt = fs_parse(fc, smack_fs_parameters, param, &result);
 	if (opt < 0)
 		return opt;
 
@@ -2831,42 +2826,39 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
 				int addrlen)
 {
 	int rc = 0;
-#if IS_ENABLED(CONFIG_IPV6)
-	struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
-#endif
-#ifdef SMACK_IPV6_SECMARK_LABELING
-	struct smack_known *rsp;
-	struct socket_smack *ssp;
-#endif
 
 	if (sock->sk == NULL)
 		return 0;
-
+	if (sock->sk->sk_family != PF_INET &&
+	    (!IS_ENABLED(CONFIG_IPV6) || sock->sk->sk_family != PF_INET6))
+		return 0;
+	if (addrlen < offsetofend(struct sockaddr, sa_family))
+		return 0;
+	if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
+		struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
 #ifdef SMACK_IPV6_SECMARK_LABELING
-	ssp = sock->sk->sk_security;
+		struct smack_known *rsp;
 #endif
 
-	switch (sock->sk->sk_family) {
-	case PF_INET:
-		if (addrlen < sizeof(struct sockaddr_in) ||
-		    sap->sa_family != AF_INET)
-			return -EINVAL;
-		rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
-		break;
-	case PF_INET6:
-		if (addrlen < SIN6_LEN_RFC2133 || sap->sa_family != AF_INET6)
-			return -EINVAL;
+		if (addrlen < SIN6_LEN_RFC2133)
+			return 0;
 #ifdef SMACK_IPV6_SECMARK_LABELING
 		rsp = smack_ipv6host_label(sip);
-		if (rsp != NULL)
+		if (rsp != NULL) {
+			struct socket_smack *ssp = sock->sk->sk_security;
+
 			rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
-						SMK_CONNECTING);
+					    SMK_CONNECTING);
+		}
 #endif
 #ifdef SMACK_IPV6_PORT_LABELING
 		rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
 #endif
-		break;
+		return rc;
 	}
+	if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
+		return 0;
+	rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
 	return rc;
 }
 
diff --git a/sound/core/info.c b/sound/core/info.c
index 6801d81..ca87ae4 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -282,17 +282,16 @@ static int snd_info_entry_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static const struct file_operations snd_info_entry_operations =
+static const struct proc_ops snd_info_entry_operations =
 {
-	.owner =		THIS_MODULE,
-	.llseek =		snd_info_entry_llseek,
-	.read =			snd_info_entry_read,
-	.write =		snd_info_entry_write,
-	.poll =			snd_info_entry_poll,
-	.unlocked_ioctl =	snd_info_entry_ioctl,
-	.mmap =			snd_info_entry_mmap,
-	.open =			snd_info_entry_open,
-	.release =		snd_info_entry_release,
+	.proc_lseek	= snd_info_entry_llseek,
+	.proc_read	= snd_info_entry_read,
+	.proc_write	= snd_info_entry_write,
+	.proc_poll	= snd_info_entry_poll,
+	.proc_ioctl	= snd_info_entry_ioctl,
+	.proc_mmap	= snd_info_entry_mmap,
+	.proc_open	= snd_info_entry_open,
+	.proc_release	= snd_info_entry_release,
 };
 
 /*
@@ -421,14 +420,13 @@ static int snd_info_text_entry_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static const struct file_operations snd_info_text_entry_ops =
+static const struct proc_ops snd_info_text_entry_ops =
 {
-	.owner =		THIS_MODULE,
-	.open =			snd_info_text_entry_open,
-	.release =		snd_info_text_entry_release,
-	.write =		snd_info_text_entry_write,
-	.llseek =		seq_lseek,
-	.read =			seq_read,
+	.proc_open	= snd_info_text_entry_open,
+	.proc_release	= snd_info_text_entry_release,
+	.proc_write	= snd_info_text_entry_write,
+	.proc_lseek	= seq_lseek,
+	.proc_read	= seq_read,
 };
 
 static struct snd_info_entry *create_subdir(struct module *mod,
@@ -810,7 +808,7 @@ static int __snd_info_register(struct snd_info_entry *entry)
 			return -ENOMEM;
 		}
 	} else {
-		const struct file_operations *ops;
+		const struct proc_ops *ops;
 		if (entry->content == SNDRV_INFO_CONTENT_DATA)
 			ops = &snd_info_entry_operations;
 		else
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 967c689..590a46a 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -156,7 +156,7 @@ static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
 #endif /* CONFIG_X86_X32 */
 
 struct compat_snd_pcm_status64 {
-	s32 state;
+	snd_pcm_state_t state;
 	u8 rsvd[4]; /* alignment */
 	s64 trigger_tstamp_sec;
 	s64 trigger_tstamp_nsec;
@@ -168,7 +168,7 @@ struct compat_snd_pcm_status64 {
 	u32 avail;
 	u32 avail_max;
 	u32 overrange;
-	s32 suspended_state;
+	snd_pcm_state_t suspended_state;
 	u32 audio_tstamp_data;
 	s64 audio_tstamp_sec;
 	s64 audio_tstamp_nsec;
@@ -376,13 +376,13 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
 #ifdef CONFIG_X86_X32
 /* X32 ABI has 64bit timespec and 64bit alignment */
 struct snd_pcm_mmap_status_x32 {
-	s32 state;
+	snd_pcm_state_t state;
 	s32 pad1;
 	u32 hw_ptr;
 	u32 pad2; /* alignment */
 	s64 tstamp_sec;
 	s64 tstamp_nsec;
-	s32 suspended_state;
+	snd_pcm_state_t suspended_state;
 	s32 pad3;
 	s64 audio_tstamp_sec;
 	s64 audio_tstamp_nsec;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index bb23f50..336406b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -551,7 +551,8 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime)
 	return usecs;
 }
 
-static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_set_state(struct snd_pcm_substream *substream,
+			      snd_pcm_state_t state)
 {
 	snd_pcm_stream_lock_irq(substream);
 	if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
@@ -786,10 +787,22 @@ static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
 	return err;
 }
 
+static int do_hw_free(struct snd_pcm_substream *substream)
+{
+	int result = 0;
+
+	snd_pcm_sync_stop(substream);
+	if (substream->ops->hw_free)
+		result = substream->ops->hw_free(substream);
+	if (substream->managed_buffer_alloc)
+		snd_pcm_lib_free_pages(substream);
+	return result;
+}
+
 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime;
-	int result = 0;
+	int result;
 
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
@@ -806,11 +819,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
 	snd_pcm_stream_unlock_irq(substream);
 	if (atomic_read(&substream->mmap_count))
 		return -EBADFD;
-	snd_pcm_sync_stop(substream);
-	if (substream->ops->hw_free)
-		result = substream->ops->hw_free(substream);
-	if (substream->managed_buffer_alloc)
-		snd_pcm_lib_free_pages(substream);
+	result = do_hw_free(substream);
 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
 	pm_qos_remove_request(&substream->latency_pm_qos_req);
 	return result;
@@ -1097,11 +1106,17 @@ static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
 	runtime->trigger_master = NULL;
 }
 
+#define ACTION_ARG_IGNORE	(__force snd_pcm_state_t)0
+
 struct action_ops {
-	int (*pre_action)(struct snd_pcm_substream *substream, int state);
-	int (*do_action)(struct snd_pcm_substream *substream, int state);
-	void (*undo_action)(struct snd_pcm_substream *substream, int state);
-	void (*post_action)(struct snd_pcm_substream *substream, int state);
+	int (*pre_action)(struct snd_pcm_substream *substream,
+			  snd_pcm_state_t state);
+	int (*do_action)(struct snd_pcm_substream *substream,
+			 snd_pcm_state_t state);
+	void (*undo_action)(struct snd_pcm_substream *substream,
+			    snd_pcm_state_t state);
+	void (*post_action)(struct snd_pcm_substream *substream,
+			    snd_pcm_state_t state);
 };
 
 /*
@@ -1111,7 +1126,8 @@ struct action_ops {
  */
 static int snd_pcm_action_group(const struct action_ops *ops,
 				struct snd_pcm_substream *substream,
-				int state, int do_lock)
+				snd_pcm_state_t state,
+				bool do_lock)
 {
 	struct snd_pcm_substream *s = NULL;
 	struct snd_pcm_substream *s1;
@@ -1168,7 +1184,7 @@ static int snd_pcm_action_group(const struct action_ops *ops,
  */
 static int snd_pcm_action_single(const struct action_ops *ops,
 				 struct snd_pcm_substream *substream,
-				 int state)
+				 snd_pcm_state_t state)
 {
 	int res;
 	
@@ -1249,14 +1265,14 @@ snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
  */
 static int snd_pcm_action(const struct action_ops *ops,
 			  struct snd_pcm_substream *substream,
-			  int state)
+			  snd_pcm_state_t state)
 {
 	struct snd_pcm_group *group;
 	int res;
 
 	group = snd_pcm_stream_group_ref(substream);
 	if (group)
-		res = snd_pcm_action_group(ops, substream, state, 1);
+		res = snd_pcm_action_group(ops, substream, state, true);
 	else
 		res = snd_pcm_action_single(ops, substream, state);
 	snd_pcm_group_unref(group, substream);
@@ -1268,7 +1284,7 @@ static int snd_pcm_action(const struct action_ops *ops,
  */
 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
 				   struct snd_pcm_substream *substream,
-				   int state)
+				   snd_pcm_state_t state)
 {
 	int res;
 
@@ -1282,14 +1298,14 @@ static int snd_pcm_action_lock_irq(const struct action_ops *ops,
  */
 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
 				    struct snd_pcm_substream *substream,
-				    int state)
+				    snd_pcm_state_t state)
 {
 	int res;
 
 	/* Guarantee the group members won't change during non-atomic action */
 	down_read(&snd_pcm_link_rwsem);
 	if (snd_pcm_stream_linked(substream))
-		res = snd_pcm_action_group(ops, substream, state, 0);
+		res = snd_pcm_action_group(ops, substream, state, false);
 	else
 		res = snd_pcm_action_single(ops, substream, state);
 	up_read(&snd_pcm_link_rwsem);
@@ -1299,7 +1315,8 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
 /*
  * start callbacks
  */
-static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_start(struct snd_pcm_substream *substream,
+			     snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
@@ -1312,20 +1329,23 @@ static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
 	return 0;
 }
 
-static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_start(struct snd_pcm_substream *substream,
+			    snd_pcm_state_t state)
 {
 	if (substream->runtime->trigger_master != substream)
 		return 0;
 	return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
 }
 
-static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
+			       snd_pcm_state_t state)
 {
 	if (substream->runtime->trigger_master == substream)
 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
 }
 
-static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_start(struct snd_pcm_substream *substream,
+			       snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	snd_pcm_trigger_tstamp(substream);
@@ -1369,7 +1389,8 @@ static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
 /*
  * stop callbacks
  */
-static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_stop(struct snd_pcm_substream *substream,
+			    snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
@@ -1378,7 +1399,8 @@ static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
 	return 0;
 }
 
-static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_stop(struct snd_pcm_substream *substream,
+			   snd_pcm_state_t state)
 {
 	if (substream->runtime->trigger_master == substream &&
 	    snd_pcm_running(substream))
@@ -1386,7 +1408,8 @@ static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
 	return 0; /* unconditonally stop all substreams */
 }
 
-static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_stop(struct snd_pcm_substream *substream,
+			      snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	if (runtime->status->state != state) {
@@ -1457,14 +1480,17 @@ int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
 
 /*
- * pause callbacks
+ * pause callbacks: pass boolean (to start pause or resume) as state argument
  */
-static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
+#define pause_pushed(state)	(__force bool)(state)
+
+static int snd_pcm_pre_pause(struct snd_pcm_substream *substream,
+			     snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
 		return -ENOSYS;
-	if (push) {
+	if (pause_pushed(state)) {
 		if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
 			return -EBADFD;
 	} else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
@@ -1473,13 +1499,14 @@ static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
 	return 0;
 }
 
-static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
+static int snd_pcm_do_pause(struct snd_pcm_substream *substream,
+			    snd_pcm_state_t state)
 {
 	if (substream->runtime->trigger_master != substream)
 		return 0;
 	/* some drivers might use hw_ptr to recover from the pause -
 	   update the hw_ptr now */
-	if (push)
+	if (pause_pushed(state))
 		snd_pcm_update_hw_ptr(substream);
 	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
 	 * a delta between the current jiffies, this gives a large enough
@@ -1487,23 +1514,27 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
 	 */
 	substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
 	return substream->ops->trigger(substream,
-				       push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
-					      SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
+				       pause_pushed(state) ?
+				       SNDRV_PCM_TRIGGER_PAUSE_PUSH :
+				       SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 }
 
-static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
+static void snd_pcm_undo_pause(struct snd_pcm_substream *substream,
+			       snd_pcm_state_t state)
 {
 	if (substream->runtime->trigger_master == substream)
 		substream->ops->trigger(substream,
-					push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
+					pause_pushed(state) ?
+					SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
 					SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 }
 
-static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
+static void snd_pcm_post_pause(struct snd_pcm_substream *substream,
+			       snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	snd_pcm_trigger_tstamp(substream);
-	if (push) {
+	if (pause_pushed(state)) {
 		runtime->status->state = SNDRV_PCM_STATE_PAUSED;
 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
 		wake_up(&runtime->sleep);
@@ -1524,15 +1555,24 @@ static const struct action_ops snd_pcm_action_pause = {
 /*
  * Push/release the pause for all linked streams.
  */
-static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
+static int snd_pcm_pause(struct snd_pcm_substream *substream, bool push)
 {
-	return snd_pcm_action(&snd_pcm_action_pause, substream, push);
+	return snd_pcm_action(&snd_pcm_action_pause, substream,
+			      (__force snd_pcm_state_t)push);
+}
+
+static int snd_pcm_pause_lock_irq(struct snd_pcm_substream *substream,
+				  bool push)
+{
+	return snd_pcm_action_lock_irq(&snd_pcm_action_pause, substream,
+				       (__force snd_pcm_state_t)push);
 }
 
 #ifdef CONFIG_PM
-/* suspend */
+/* suspend callback: state argument ignored */
 
-static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream,
+			       snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	switch (runtime->status->state) {
@@ -1548,7 +1588,8 @@ static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
 	return 0;
 }
 
-static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_suspend(struct snd_pcm_substream *substream,
+			      snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	if (runtime->trigger_master != substream)
@@ -1559,7 +1600,8 @@ static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
 	return 0; /* suspend unconditionally */
 }
 
-static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_suspend(struct snd_pcm_substream *substream,
+				 snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	snd_pcm_trigger_tstamp(substream);
@@ -1590,7 +1632,8 @@ static int snd_pcm_suspend(struct snd_pcm_substream *substream)
 	unsigned long flags;
 
 	snd_pcm_stream_lock_irqsave(substream, flags);
-	err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
+	err = snd_pcm_action(&snd_pcm_action_suspend, substream,
+			     ACTION_ARG_IGNORE);
 	snd_pcm_stream_unlock_irqrestore(substream, flags);
 	return err;
 }
@@ -1634,9 +1677,10 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
 }
 EXPORT_SYMBOL(snd_pcm_suspend_all);
 
-/* resume */
+/* resume callbacks: state argument ignored */
 
-static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
+			      snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
@@ -1645,7 +1689,8 @@ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
 	return 0;
 }
 
-static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_resume(struct snd_pcm_substream *substream,
+			     snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	if (runtime->trigger_master != substream)
@@ -1658,14 +1703,16 @@ static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
 	return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
 }
 
-static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_undo_resume(struct snd_pcm_substream *substream,
+				snd_pcm_state_t state)
 {
 	if (substream->runtime->trigger_master == substream &&
 	    snd_pcm_running(substream))
 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
 }
 
-static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_resume(struct snd_pcm_substream *substream,
+				snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	snd_pcm_trigger_tstamp(substream);
@@ -1683,7 +1730,8 @@ static const struct action_ops snd_pcm_action_resume = {
 
 static int snd_pcm_resume(struct snd_pcm_substream *substream)
 {
-	return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
+	return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream,
+				       ACTION_ARG_IGNORE);
 }
 
 #else
@@ -1724,7 +1772,9 @@ static int snd_pcm_xrun(struct snd_pcm_substream *substream)
 /*
  * reset ioctl
  */
-static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
+/* reset callbacks:  state argument ignored */
+static int snd_pcm_pre_reset(struct snd_pcm_substream *substream,
+			     snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	switch (runtime->status->state) {
@@ -1738,7 +1788,8 @@ static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
 	}
 }
 
-static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_reset(struct snd_pcm_substream *substream,
+			    snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
@@ -1752,7 +1803,8 @@ static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
 	return 0;
 }
 
-static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_reset(struct snd_pcm_substream *substream,
+			       snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	runtime->control->appl_ptr = runtime->status->hw_ptr;
@@ -1769,17 +1821,20 @@ static const struct action_ops snd_pcm_action_reset = {
 
 static int snd_pcm_reset(struct snd_pcm_substream *substream)
 {
-	return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
+	return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream,
+					ACTION_ARG_IGNORE);
 }
 
 /*
  * prepare ioctl
  */
-/* we use the second argument for updating f_flags */
+/* pass f_flags as state argument */
 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
-			       int f_flags)
+			       snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	int f_flags = (__force int)state;
+
 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
 	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
 		return -EBADFD;
@@ -1789,17 +1844,19 @@ static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_prepare(struct snd_pcm_substream *substream,
+			      snd_pcm_state_t state)
 {
 	int err;
 	snd_pcm_sync_stop(substream);
 	err = substream->ops->prepare(substream);
 	if (err < 0)
 		return err;
-	return snd_pcm_do_reset(substream, 0);
+	return snd_pcm_do_reset(substream, state);
 }
 
-static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_prepare(struct snd_pcm_substream *substream,
+				 snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	runtime->control->appl_ptr = runtime->status->hw_ptr;
@@ -1832,7 +1889,7 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream,
 	snd_pcm_stream_lock_irq(substream);
 	switch (substream->runtime->status->state) {
 	case SNDRV_PCM_STATE_PAUSED:
-		snd_pcm_pause(substream, 0);
+		snd_pcm_pause(substream, false);
 		/* fallthru */
 	case SNDRV_PCM_STATE_SUSPENDED:
 		snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
@@ -1841,14 +1898,17 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream,
 	snd_pcm_stream_unlock_irq(substream);
 
 	return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
-					substream, f_flags);
+					substream,
+					(__force snd_pcm_state_t)f_flags);
 }
 
 /*
  * drain ioctl
  */
 
-static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
+/* drain init callbacks: state argument ignored */
+static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream,
+				  snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	switch (runtime->status->state) {
@@ -1861,7 +1921,8 @@ static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state
 	return 0;
 }
 
-static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream,
+				 snd_pcm_state_t state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -1887,7 +1948,9 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
 	} else {
 		/* stop running stream */
 		if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
-			int new_state = snd_pcm_capture_avail(runtime) > 0 ?
+			snd_pcm_state_t new_state;
+
+			new_state = snd_pcm_capture_avail(runtime) > 0 ?
 				SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
 			snd_pcm_do_stop(substream, new_state);
 			snd_pcm_post_stop(substream, new_state);
@@ -1903,7 +1966,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
 	return 0;
 }
 
-static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream,
+				    snd_pcm_state_t state)
 {
 }
 
@@ -1946,10 +2010,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
 	snd_pcm_stream_lock_irq(substream);
 	/* resume pause */
 	if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
-		snd_pcm_pause(substream, 0);
+		snd_pcm_pause(substream, false);
 
 	/* pre-start/stop - all running streams are changed to DRAINING state */
-	result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
+	result = snd_pcm_action(&snd_pcm_action_drain_init, substream,
+				ACTION_ARG_IGNORE);
 	if (result < 0)
 		goto unlock;
 	/* in non-blocking, we don't wait in ioctl but let caller poll */
@@ -2050,7 +2115,7 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
 	snd_pcm_stream_lock_irq(substream);
 	/* resume pause */
 	if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
-		snd_pcm_pause(substream, 0);
+		snd_pcm_pause(substream, false);
 
 	snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
 	/* runtime->control->appl_ptr = runtime->status->hw_ptr; */
@@ -2529,9 +2594,7 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
 
 	snd_pcm_drop(substream);
 	if (substream->hw_opened) {
-		if (substream->ops->hw_free &&
-		    substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
-			substream->ops->hw_free(substream);
+		do_hw_free(substream);
 		substream->ops->close(substream);
 		substream->hw_opened = 0;
 	}
@@ -2894,12 +2957,12 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
 }
 
 struct snd_pcm_mmap_status32 {
-	s32 state;
+	snd_pcm_state_t state;
 	s32 pad1;
 	u32 hw_ptr;
 	s32 tstamp_sec;
 	s32 tstamp_nsec;
-	s32 suspended_state;
+	snd_pcm_state_t suspended_state;
 	s32 audio_tstamp_sec;
 	s32 audio_tstamp_nsec;
 } __attribute__((packed));
@@ -3177,9 +3240,7 @@ static int snd_pcm_common_ioctl(struct file *file,
 	case SNDRV_PCM_IOCTL_DROP:
 		return snd_pcm_drop(substream);
 	case SNDRV_PCM_IOCTL_PAUSE:
-		return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
-					       substream,
-					       (int)(unsigned long)arg);
+		return snd_pcm_pause_lock_irq(substream, (unsigned long)arg);
 	case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
 	case SNDRV_PCM_IOCTL_READI_FRAMES:
 		return snd_pcm_xferi_frames_ioctl(substream, arg);
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index da0bd89..02ac3f4 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -903,7 +903,7 @@ static void print_formats(struct snd_dummy *dummy,
 {
 	int i;
 
-	for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
+	for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
 		if (dummy->pcm_hw.formats & (1ULL << i))
 			snd_iprintf(buffer, " %s", snd_pcm_format_name(i));
 	}
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index d01e691..a314b03 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -631,20 +631,27 @@ void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
 		nwait = 0;
 		i = 0;
 		list_for_each_entry(s, &bus->stream_list, list) {
-			if (streams & (1 << i)) {
-				if (start) {
-					/* check FIFO gets ready */
-					if (!(snd_hdac_stream_readb(s, SD_STS) &
-					      SD_STS_FIFO_READY))
-						nwait++;
-				} else {
-					/* check RUN bit is cleared */
-					if (snd_hdac_stream_readb(s, SD_CTL) &
-					    SD_CTL_DMA_START)
-						nwait++;
+			if (!(streams & (1 << i++)))
+				continue;
+
+			if (start) {
+				/* check FIFO gets ready */
+				if (!(snd_hdac_stream_readb(s, SD_STS) &
+				      SD_STS_FIFO_READY))
+					nwait++;
+			} else {
+				/* check RUN bit is cleared */
+				if (snd_hdac_stream_readb(s, SD_CTL) &
+				    SD_CTL_DMA_START) {
+					nwait++;
+					/*
+					 * Perform stream reset if DMA RUN
+					 * bit not cleared within given timeout
+					 */
+					if (timeout == 1)
+						snd_hdac_stream_reset(s);
 				}
 			}
-			i++;
 		}
 		if (!nwait)
 			break;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 6b7ff4a..4e76ed0 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -463,7 +463,7 @@ static void snd_emu10k1_write_op(struct snd_emu10k1_fx8010_code *icode,
 	u_int32_t *code;
 	if (snd_BUG_ON(*ptr >= 512))
 		return;
-	code = (u_int32_t __force *)icode->code + (*ptr) * 2;
+	code = icode->code + (*ptr) * 2;
 	set_bit(*ptr, icode->code_valid);
 	code[0] = ((x & 0x3ff) << 10) | (y & 0x3ff);
 	code[1] = ((op & 0x0f) << 20) | ((r & 0x3ff) << 10) | (a & 0x3ff);
@@ -480,7 +480,7 @@ static void snd_emu10k1_audigy_write_op(struct snd_emu10k1_fx8010_code *icode,
 	u_int32_t *code;
 	if (snd_BUG_ON(*ptr >= 1024))
 		return;
-	code = (u_int32_t __force *)icode->code + (*ptr) * 2;
+	code = icode->code + (*ptr) * 2;
 	set_bit(*ptr, icode->code_valid);
 	code[0] = ((x & 0x7ff) << 12) | (y & 0x7ff);
 	code[1] = ((op & 0x0f) << 24) | ((r & 0x7ff) << 12) | (a & 0x7ff);
@@ -513,8 +513,8 @@ static int snd_emu10k1_gpr_poke(struct snd_emu10k1 *emu,
 		if (!test_bit(gpr, icode->gpr_valid))
 			continue;
 		if (in_kernel)
-			val = *(__force u32 *)&icode->gpr_map[gpr];
-		else if (get_user(val, &icode->gpr_map[gpr]))
+			val = icode->gpr_map[gpr];
+		else if (get_user(val, (__user u32 *)&icode->gpr_map[gpr]))
 			return -EFAULT;
 		snd_emu10k1_ptr_write(emu, emu->gpr_base + gpr, 0, val);
 	}
@@ -530,7 +530,7 @@ static int snd_emu10k1_gpr_peek(struct snd_emu10k1 *emu,
 	for (gpr = 0; gpr < (emu->audigy ? 0x200 : 0x100); gpr++) {
 		set_bit(gpr, icode->gpr_valid);
 		val = snd_emu10k1_ptr_read(emu, emu->gpr_base + gpr, 0);
-		if (put_user(val, &icode->gpr_map[gpr]))
+		if (put_user(val, (__user u32 *)&icode->gpr_map[gpr]))
 			return -EFAULT;
 	}
 	return 0;
@@ -547,11 +547,11 @@ static int snd_emu10k1_tram_poke(struct snd_emu10k1 *emu,
 		if (!test_bit(tram, icode->tram_valid))
 			continue;
 		if (in_kernel) {
-			val = *(__force u32 *)&icode->tram_data_map[tram];
-			addr = *(__force u32 *)&icode->tram_addr_map[tram];
+			val = icode->tram_data_map[tram];
+			addr = icode->tram_addr_map[tram];
 		} else {
-			if (get_user(val, &icode->tram_data_map[tram]) ||
-			    get_user(addr, &icode->tram_addr_map[tram]))
+			if (get_user(val, (__user __u32 *)&icode->tram_data_map[tram]) ||
+			    get_user(addr, (__user __u32 *)&icode->tram_addr_map[tram]))
 				return -EFAULT;
 		}
 		snd_emu10k1_ptr_write(emu, TANKMEMDATAREGBASE + tram, 0, val);
@@ -581,8 +581,8 @@ static int snd_emu10k1_tram_peek(struct snd_emu10k1 *emu,
 			addr = snd_emu10k1_ptr_read(emu, TANKMEMADDRREGBASE + tram, 0) >> 12;
 			addr |= snd_emu10k1_ptr_read(emu, A_TANKMEMCTLREGBASE + tram, 0) << 20;
 		}
-		if (put_user(val, &icode->tram_data_map[tram]) ||
-		    put_user(addr, &icode->tram_addr_map[tram]))
+		if (put_user(val, (__user u32 *)&icode->tram_data_map[tram]) ||
+		    put_user(addr, (__user u32 *)&icode->tram_addr_map[tram]))
 			return -EFAULT;
 	}
 	return 0;
@@ -598,11 +598,11 @@ static int snd_emu10k1_code_poke(struct snd_emu10k1 *emu,
 		if (!test_bit(pc / 2, icode->code_valid))
 			continue;
 		if (in_kernel) {
-			lo = *(__force u32 *)&icode->code[pc + 0];
-			hi = *(__force u32 *)&icode->code[pc + 1];
+			lo = icode->code[pc + 0];
+			hi = icode->code[pc + 1];
 		} else {
-			if (get_user(lo, &icode->code[pc + 0]) ||
-			    get_user(hi, &icode->code[pc + 1]))
+			if (get_user(lo, (__user u32 *)&icode->code[pc + 0]) ||
+			    get_user(hi, (__user u32 *)&icode->code[pc + 1]))
 				return -EFAULT;
 		}
 		snd_emu10k1_efx_write(emu, pc + 0, lo);
@@ -619,17 +619,21 @@ static int snd_emu10k1_code_peek(struct snd_emu10k1 *emu,
 	memset(icode->code_valid, 0, sizeof(icode->code_valid));
 	for (pc = 0; pc < (emu->audigy ? 2*1024 : 2*512); pc += 2) {
 		set_bit(pc / 2, icode->code_valid);
-		if (put_user(snd_emu10k1_efx_read(emu, pc + 0), &icode->code[pc + 0]))
+		if (put_user(snd_emu10k1_efx_read(emu, pc + 0),
+			     (__user u32 *)&icode->code[pc + 0]))
 			return -EFAULT;
-		if (put_user(snd_emu10k1_efx_read(emu, pc + 1), &icode->code[pc + 1]))
+		if (put_user(snd_emu10k1_efx_read(emu, pc + 1),
+			     (__user u32 *)&icode->code[pc + 1]))
 			return -EFAULT;
 	}
 	return 0;
 }
 
 static struct snd_emu10k1_fx8010_ctl *
-snd_emu10k1_look_for_ctl(struct snd_emu10k1 *emu, struct emu10k1_ctl_elem_id *id)
+snd_emu10k1_look_for_ctl(struct snd_emu10k1 *emu,
+			 struct emu10k1_ctl_elem_id *_id)
 {
+	struct snd_ctl_elem_id *id = (struct snd_ctl_elem_id *)_id;
 	struct snd_emu10k1_fx8010_ctl *ctl;
 	struct snd_kcontrol *kcontrol;
 
@@ -672,41 +676,60 @@ static unsigned int *copy_tlv(const unsigned int __user *_tlv, bool in_kernel)
 }
 
 static int copy_gctl(struct snd_emu10k1 *emu,
-		     struct snd_emu10k1_fx8010_control_gpr *gctl,
-		     struct snd_emu10k1_fx8010_control_gpr __user *_gctl,
+		     struct snd_emu10k1_fx8010_control_gpr *dst,
+		     struct snd_emu10k1_fx8010_control_gpr *src,
 		     int idx, bool in_kernel)
 {
-	struct snd_emu10k1_fx8010_control_old_gpr __user *octl;
+	struct snd_emu10k1_fx8010_control_gpr __user *_src;
+	struct snd_emu10k1_fx8010_control_old_gpr *octl;
+	struct snd_emu10k1_fx8010_control_old_gpr __user *_octl;
 
+	_src = (struct snd_emu10k1_fx8010_control_gpr __user *)src;
 	if (emu->support_tlv) {
 		if (in_kernel)
-			memcpy(gctl, (__force void *)&_gctl[idx], sizeof(*gctl));
-		else if (copy_from_user(gctl, &_gctl[idx], sizeof(*gctl)))
+			*dst = src[idx];
+		else if (copy_from_user(dst, &_src[idx], sizeof(*src)))
 			return -EFAULT;
 		return 0;
 	}
 
-	octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl;
+	octl = (struct snd_emu10k1_fx8010_control_old_gpr *)src;
+	_octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)octl;
 	if (in_kernel)
-		memcpy(gctl, (__force void *)&octl[idx], sizeof(*octl));
-	else if (copy_from_user(gctl, &octl[idx], sizeof(*octl)))
+		memcpy(dst, &octl[idx], sizeof(*octl));
+	else if (copy_from_user(dst, &_octl[idx], sizeof(*octl)))
 		return -EFAULT;
-	gctl->tlv = NULL;
+	dst->tlv = NULL;
 	return 0;
 }
 
 static int copy_gctl_to_user(struct snd_emu10k1 *emu,
-		     struct snd_emu10k1_fx8010_control_gpr __user *_gctl,
-		     struct snd_emu10k1_fx8010_control_gpr *gctl,
+		     struct snd_emu10k1_fx8010_control_gpr *dst,
+		     struct snd_emu10k1_fx8010_control_gpr *src,
 		     int idx)
 {
+	struct snd_emu10k1_fx8010_control_gpr __user *_dst;
 	struct snd_emu10k1_fx8010_control_old_gpr __user *octl;
 
+	_dst = (struct snd_emu10k1_fx8010_control_gpr __user *)dst;
 	if (emu->support_tlv)
-		return copy_to_user(&_gctl[idx], gctl, sizeof(*gctl));
+		return copy_to_user(&_dst[idx], src, sizeof(*src));
 	
-	octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl;
-	return copy_to_user(&octl[idx], gctl, sizeof(*octl));
+	octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)dst;
+	return copy_to_user(&octl[idx], src, sizeof(*octl));
+}
+
+static int copy_ctl_elem_id(const struct emu10k1_ctl_elem_id *list, int i,
+			    struct emu10k1_ctl_elem_id *ret, bool in_kernel)
+{
+	struct emu10k1_ctl_elem_id __user *_id =
+		(struct emu10k1_ctl_elem_id __user *)&list[i];
+
+	if (in_kernel)
+		*ret = list[i];
+	else if (copy_from_user(ret, _id, sizeof(*ret)))
+		return -EFAULT;
+	return 0;
 }
 
 static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
@@ -714,17 +737,16 @@ static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
 				       bool in_kernel)
 {
 	unsigned int i;
-	struct emu10k1_ctl_elem_id __user *_id;
 	struct emu10k1_ctl_elem_id id;
 	struct snd_emu10k1_fx8010_control_gpr *gctl;
+	struct snd_ctl_elem_id *gctl_id;
 	int err;
 	
-	_id = (__force struct emu10k1_ctl_elem_id __user *)icode->gpr_del_controls;
-	for (i = 0; i < icode->gpr_del_control_count; i++, _id++) {
-		if (in_kernel)
-			id = *(__force struct emu10k1_ctl_elem_id *)_id;
-		else if (copy_from_user(&id, _id, sizeof(id)))
-	     		return -EFAULT;
+	for (i = 0; i < icode->gpr_del_control_count; i++) {
+		err = copy_ctl_elem_id(icode->gpr_del_controls, i, &id,
+				       in_kernel);
+		if (err < 0)
+			return err;
 		if (snd_emu10k1_look_for_ctl(emu, &id) == NULL)
 			return -ENOENT;
 	}
@@ -740,16 +762,16 @@ static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
 		}
 		if (snd_emu10k1_look_for_ctl(emu, &gctl->id))
 			continue;
+		gctl_id = (struct snd_ctl_elem_id *)&gctl->id;
 		down_read(&emu->card->controls_rwsem);
-		if (snd_ctl_find_id(emu->card,
-				    (struct snd_ctl_elem_id *)&gctl->id)) {
+		if (snd_ctl_find_id(emu->card, gctl_id)) {
 			up_read(&emu->card->controls_rwsem);
 			err = -EEXIST;
 			goto __error;
 		}
 		up_read(&emu->card->controls_rwsem);
-		if (gctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER &&
-		    gctl->id.iface != SNDRV_CTL_ELEM_IFACE_PCM) {
+		if (gctl_id->iface != SNDRV_CTL_ELEM_IFACE_MIXER &&
+		    gctl_id->iface != SNDRV_CTL_ELEM_IFACE_PCM) {
 			err = -EINVAL;
 			goto __error;
 		}
@@ -784,6 +806,7 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
 {
 	unsigned int i, j;
 	struct snd_emu10k1_fx8010_control_gpr *gctl;
+	struct snd_ctl_elem_id *gctl_id;
 	struct snd_emu10k1_fx8010_ctl *ctl, *nctl;
 	struct snd_kcontrol_new knew;
 	struct snd_kcontrol *kctl;
@@ -804,24 +827,25 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
 			err = -EFAULT;
 			goto __error;
 		}
-		if (gctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER &&
-		    gctl->id.iface != SNDRV_CTL_ELEM_IFACE_PCM) {
+		gctl_id = (struct snd_ctl_elem_id *)&gctl->id;
+		if (gctl_id->iface != SNDRV_CTL_ELEM_IFACE_MIXER &&
+		    gctl_id->iface != SNDRV_CTL_ELEM_IFACE_PCM) {
 			err = -EINVAL;
 			goto __error;
 		}
-		if (! gctl->id.name[0]) {
+		if (!*gctl_id->name) {
 			err = -EINVAL;
 			goto __error;
 		}
 		ctl = snd_emu10k1_look_for_ctl(emu, &gctl->id);
 		memset(&knew, 0, sizeof(knew));
-		knew.iface = gctl->id.iface;
-		knew.name = gctl->id.name;
-		knew.index = gctl->id.index;
-		knew.device = gctl->id.device;
-		knew.subdevice = gctl->id.subdevice;
+		knew.iface = gctl_id->iface;
+		knew.name = gctl_id->name;
+		knew.index = gctl_id->index;
+		knew.device = gctl_id->device;
+		knew.subdevice = gctl_id->subdevice;
 		knew.info = snd_emu10k1_gpr_ctl_info;
-		knew.tlv.p = copy_tlv((__force const unsigned int __user *)gctl->tlv, in_kernel);
+		knew.tlv.p = copy_tlv((const unsigned int __user *)gctl->tlv, in_kernel);
 		if (knew.tlv.p)
 			knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
 				SNDRV_CTL_ELEM_ACCESS_TLV_READ;
@@ -878,17 +902,15 @@ static int snd_emu10k1_del_controls(struct snd_emu10k1 *emu,
 {
 	unsigned int i;
 	struct emu10k1_ctl_elem_id id;
-	struct emu10k1_ctl_elem_id __user *_id;
 	struct snd_emu10k1_fx8010_ctl *ctl;
 	struct snd_card *card = emu->card;
+	int err;
 	
-	_id = (__force struct emu10k1_ctl_elem_id __user *)icode->gpr_del_controls;
-
-	for (i = 0; i < icode->gpr_del_control_count; i++, _id++) {
-		if (in_kernel)
-			id = *(__force struct emu10k1_ctl_elem_id *)_id;
-		else if (copy_from_user(&id, _id, sizeof(id)))
-			return -EFAULT;
+	for (i = 0; i < icode->gpr_del_control_count; i++) {
+		err = copy_ctl_elem_id(icode->gpr_del_controls, i, &id,
+				       in_kernel);
+		if (err < 0)
+			return err;
 		down_write(&card->controls_rwsem);
 		ctl = snd_emu10k1_look_for_ctl(emu, &id);
 		if (ctl)
@@ -917,7 +939,7 @@ static int snd_emu10k1_list_controls(struct snd_emu10k1 *emu,
 		    i < icode->gpr_list_control_count) {
 			memset(gctl, 0, sizeof(*gctl));
 			id = &ctl->kcontrol->id;
-			gctl->id.iface = id->iface;
+			gctl->id.iface = (__force int)id->iface;
 			strlcpy(gctl->id.name, id->name, sizeof(gctl->id.name));
 			gctl->id.index = id->index;
 			gctl->id.device = id->device;
@@ -1095,7 +1117,7 @@ static void
 snd_emu10k1_init_mono_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
 			      const char *name, int gpr, int defval)
 {
-	ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
 	strcpy(ctl->id.name, name);
 	ctl->vcount = ctl->count = 1;
 	ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
@@ -1116,7 +1138,7 @@ static void
 snd_emu10k1_init_stereo_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
 				const char *name, int gpr, int defval)
 {
-	ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
 	strcpy(ctl->id.name, name);
 	ctl->vcount = ctl->count = 2;
 	ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
@@ -1138,7 +1160,7 @@ static void
 snd_emu10k1_init_mono_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
 				    const char *name, int gpr, int defval)
 {
-	ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
 	strcpy(ctl->id.name, name);
 	ctl->vcount = ctl->count = 1;
 	ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
@@ -1151,7 +1173,7 @@ static void
 snd_emu10k1_init_stereo_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
 				      const char *name, int gpr, int defval)
 {
-	ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
 	strcpy(ctl->id.name, name);
 	ctl->vcount = ctl->count = 2;
 	ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
@@ -1204,8 +1226,8 @@ static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
 	if (!icode)
 		return err;
 
-	icode->gpr_map = (u_int32_t __user *) kcalloc(512 + 256 + 256 + 2 * 1024,
-						      sizeof(u_int32_t), GFP_KERNEL);
+	icode->gpr_map = kcalloc(512 + 256 + 256 + 2 * 1024,
+				 sizeof(u_int32_t), GFP_KERNEL);
 	if (!icode->gpr_map)
 		goto __err_gpr;
 	controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
@@ -1213,7 +1235,7 @@ static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
 	if (!controls)
 		goto __err_ctrls;
 
-	gpr_map = (u32 __force *)icode->gpr_map;
+	gpr_map = icode->gpr_map;
 
 	icode->tram_data_map = icode->gpr_map + 512;
 	icode->tram_addr_map = icode->tram_data_map + 256;
@@ -1468,7 +1490,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
 	
 
 	ctl = &controls[nctl + 0];
-	ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
 	strcpy(ctl->id.name, "Tone Control - Bass");
 	ctl->vcount = 2;
 	ctl->count = 10;
@@ -1477,7 +1499,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
 	ctl->value[0] = ctl->value[1] = 20;
 	ctl->translation = EMU10K1_GPR_TRANSLATION_BASS;
 	ctl = &controls[nctl + 1];
-	ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
 	strcpy(ctl->id.name, "Tone Control - Treble");
 	ctl->vcount = 2;
 	ctl->count = 10;
@@ -1758,7 +1780,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
 		A_OP(icode, &ptr, 0x0f, 0xc0, 0xc0, 0xcf, 0xc0);
 
 	icode->gpr_add_control_count = nctl;
-	icode->gpr_add_controls = (struct snd_emu10k1_fx8010_control_gpr __user *)controls;
+	icode->gpr_add_controls = controls;
 	emu->support_tlv = 1; /* support TLV */
 	err = snd_emu10k1_icode_poke(emu, icode, true);
 	emu->support_tlv = 0; /* clear again */
@@ -1766,7 +1788,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
 __err:
 	kfree(controls);
 __err_ctrls:
-	kfree((void __force *)icode->gpr_map);
+	kfree(icode->gpr_map);
 __err_gpr:
 	kfree(icode);
 	return err;
@@ -1839,8 +1861,8 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
 	if (!icode)
 		return err;
 
-	icode->gpr_map = (u_int32_t __user *) kcalloc(256 + 160 + 160 + 2 * 512,
-						      sizeof(u_int32_t), GFP_KERNEL);
+	icode->gpr_map = kcalloc(256 + 160 + 160 + 2 * 512,
+				 sizeof(u_int32_t), GFP_KERNEL);
 	if (!icode->gpr_map)
 		goto __err_gpr;
 
@@ -1854,7 +1876,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
 	if (!ipcm)
 		goto __err_ipcm;
 
-	gpr_map = (u32 __force *)icode->gpr_map;
+	gpr_map = icode->gpr_map;
 
 	icode->tram_data_map = icode->gpr_map + 256;
 	icode->tram_addr_map = icode->tram_data_map + 160;
@@ -2188,7 +2210,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
 	OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 5), GPR(playback + 5), C_00000000, C_00000000); /* LFE */
 
 	ctl = &controls[i + 0];
-	ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
 	strcpy(ctl->id.name, "Tone Control - Bass");
 	ctl->vcount = 2;
 	ctl->count = 10;
@@ -2198,7 +2220,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
 	ctl->tlv = snd_emu10k1_bass_treble_db_scale;
 	ctl->translation = EMU10K1_GPR_TRANSLATION_BASS;
 	ctl = &controls[i + 1];
-	ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	ctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;
 	strcpy(ctl->id.name, "Tone Control - Treble");
 	ctl->vcount = 2;
 	ctl->count = 10;
@@ -2384,7 +2406,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
 	if ((err = snd_emu10k1_fx8010_tram_setup(emu, ipcm->buffer_size)) < 0)
 		goto __err;
 	icode->gpr_add_control_count = i;
-	icode->gpr_add_controls = (struct snd_emu10k1_fx8010_control_gpr __user *)controls;
+	icode->gpr_add_controls = controls;
 	emu->support_tlv = 1; /* support TLV */
 	err = snd_emu10k1_icode_poke(emu, icode, true);
 	emu->support_tlv = 0; /* clear again */
@@ -2395,7 +2417,7 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
 __err_ipcm:
 	kfree(controls);
 __err_ctrls:
-	kfree((void __force *)icode->gpr_map);
+	kfree(icode->gpr_map);
 __err_gpr:
 	kfree(icode);
 	return err;
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 9757667..2609e39 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1110,16 +1110,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
 			active = true;
 
-		/* clear rirb int */
 		status = azx_readb(chip, RIRBSTS);
 		if (status & RIRB_INT_MASK) {
+			/*
+			 * Clearing the interrupt status here ensures that no
+			 * interrupt gets masked after the RIRB wp is read in
+			 * snd_hdac_bus_update_rirb. This avoids a possible
+			 * race condition where codec response in RIRB may
+			 * remain unserviced by IRQ, eventually falling back
+			 * to polling mode in azx_rirb_get_response.
+			 */
+			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
 			active = true;
 			if (status & RIRB_INT_RESPONSE) {
 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
 					udelay(80);
 				snd_hdac_bus_update_rirb(bus);
 			}
-			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
 		}
 	} while (active && ++repeat < 10);
 
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b5e8d43..92a042e 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2451,6 +2451,8 @@ static const struct pci_device_id azx_ids[] = {
 	/* Jasperlake */
 	{ PCI_DEVICE(0x8086, 0x38c8),
 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+	{ PCI_DEVICE(0x8086, 0x4dc8),
+	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
 	/* Tigerlake */
 	{ PCI_DEVICE(0x8086, 0xa0c8),
 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 48bddc2..5119a9a 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1550,6 +1550,34 @@ static bool update_eld(struct hda_codec *codec,
 	return eld_changed;
 }
 
+static struct snd_jack *pin_idx_to_pcm_jack(struct hda_codec *codec,
+					    struct hdmi_spec_per_pin *per_pin)
+{
+	struct hdmi_spec *spec = codec->spec;
+	struct snd_jack *jack = NULL;
+	struct hda_jack_tbl *jack_tbl;
+
+	/* if !dyn_pcm_assign, get jack from hda_jack_tbl
+	 * in !dyn_pcm_assign case, spec->pcm_rec[].jack is not
+	 * NULL even after snd_hda_jack_tbl_clear() is called to
+	 * free snd_jack. This may cause access invalid memory
+	 * when calling snd_jack_report
+	 */
+	if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign) {
+		jack = spec->pcm_rec[per_pin->pcm_idx].jack;
+	} else if (!spec->dyn_pcm_assign) {
+		/*
+		 * jack tbl doesn't support DP MST
+		 * DP MST will use dyn_pcm_assign,
+		 * so DP MST will never come here
+		 */
+		jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
+						    per_pin->dev_id);
+		if (jack_tbl)
+			jack = jack_tbl->jack;
+	}
+	return jack;
+}
 /* update ELD and jack state via HD-audio verbs */
 static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
 					 int repoll)
@@ -1571,6 +1599,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
 	int present;
 	bool ret;
 	bool do_repoll = false;
+	struct snd_jack *pcm_jack = NULL;
 
 	present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
 
@@ -1598,10 +1627,19 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
 			do_repoll = true;
 	}
 
-	if (do_repoll)
+	if (do_repoll) {
 		schedule_delayed_work(&per_pin->work, msecs_to_jiffies(300));
-	else
+	} else {
+		/*
+		 * pcm_idx >=0 before update_eld() means it is in monitor
+		 * disconnected event. Jack must be fetched before
+		 * update_eld().
+		 */
+		pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
 		update_eld(codec, per_pin, eld);
+		if (!pcm_jack)
+			pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
+	}
 
 	ret = !repoll || !eld->monitor_present || eld->eld_valid;
 
@@ -1610,40 +1648,34 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
 		jack->block_report = !ret;
 		jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
 			AC_PINSENSE_PRESENCE : 0;
+
+		if (spec->dyn_pcm_assign && pcm_jack && !do_repoll) {
+			int state = 0;
+
+			if (jack->pin_sense & AC_PINSENSE_PRESENCE)
+				state = SND_JACK_AVOUT;
+			snd_jack_report(pcm_jack, state);
+		}
+
+		/*
+		 * snd_hda_jack_pin_sense() call at the beginning of this
+		 * function, updates jack->pins_sense and clears
+		 * jack->jack_dirty, therefore snd_hda_jack_report_sync() will
+		 * not override the jack->pin_sense.
+		 *
+		 * snd_hda_jack_report_sync() is superfluous for dyn_pcm_assign
+		 * case. The jack->pin_sense update was already performed, and
+		 * hda_jack->jack is NULL for dyn_pcm_assign.
+		 *
+		 * Don't call snd_hda_jack_report_sync() for
+		 * dyn_pcm_assign.
+		 */
+		ret = ret && !spec->dyn_pcm_assign;
 	}
 	mutex_unlock(&per_pin->lock);
 	return ret;
 }
 
-static struct snd_jack *pin_idx_to_jack(struct hda_codec *codec,
-				 struct hdmi_spec_per_pin *per_pin)
-{
-	struct hdmi_spec *spec = codec->spec;
-	struct snd_jack *jack = NULL;
-	struct hda_jack_tbl *jack_tbl;
-
-	/* if !dyn_pcm_assign, get jack from hda_jack_tbl
-	 * in !dyn_pcm_assign case, spec->pcm_rec[].jack is not
-	 * NULL even after snd_hda_jack_tbl_clear() is called to
-	 * free snd_jack. This may cause access invalid memory
-	 * when calling snd_jack_report
-	 */
-	if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign)
-		jack = spec->pcm_rec[per_pin->pcm_idx].jack;
-	else if (!spec->dyn_pcm_assign) {
-		/*
-		 * jack tbl doesn't support DP MST
-		 * DP MST will use dyn_pcm_assign,
-		 * so DP MST will never come here
-		 */
-		jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
-						    per_pin->dev_id);
-		if (jack_tbl)
-			jack = jack_tbl->jack;
-	}
-	return jack;
-}
-
 /* update ELD and jack state via audio component */
 static void sync_eld_via_acomp(struct hda_codec *codec,
 			       struct hdmi_spec_per_pin *per_pin)
@@ -1677,10 +1709,10 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
 	/* pcm_idx >=0 before update_eld() means it is in monitor
 	 * disconnected event. Jack must be fetched before update_eld()
 	 */
-	jack = pin_idx_to_jack(codec, per_pin);
+	jack = pin_idx_to_pcm_jack(codec, per_pin);
 	changed = update_eld(codec, per_pin, eld);
 	if (jack == NULL)
-		jack = pin_idx_to_jack(codec, per_pin);
+		jack = pin_idx_to_pcm_jack(codec, per_pin);
 	if (changed && jack)
 		snd_jack_report(jack,
 				(eld->monitor_present && eld->eld_valid) ?
@@ -4256,6 +4288,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI",	patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI",	patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",	patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",	patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI",	patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI",	patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",	patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",	patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3b38a13..4770fb3 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9111,6 +9111,7 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
 	SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2,
 		{0x14, 0x01014010},
 		{0x17, 0x90170150},
+		{0x19, 0x02a11060},
 		{0x1b, 0x01813030},
 		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index c1e15ec..cc06f0a 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4802,7 +4802,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
 		break;
 	}
 	case SNDRV_HDSP_IOCTL_UPLOAD_FIRMWARE: {
-		struct hdsp_firmware __user *firmware;
+		struct hdsp_firmware firmware;
 		u32 __user *firmware_data;
 		int err;
 
@@ -4815,10 +4815,9 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
 
 		dev_info(hdsp->card->dev,
 			 "initializing firmware upload\n");
-		firmware = (struct hdsp_firmware __user *)argp;
-
-		if (get_user(firmware_data, (__force void __user **)&firmware->firmware_data))
+		if (copy_from_user(&firmware, argp, sizeof(firmware)))
 			return -EFAULT;
+		firmware_data = (u32 __user *)firmware.firmware_data;
 
 		if (hdsp_check_for_iobox (hdsp))
 			return -EIO;
diff --git a/sound/soc/amd/raven/acp3x-i2s.c b/sound/soc/amd/raven/acp3x-i2s.c
index bf51cad..31cd400 100644
--- a/sound/soc/amd/raven/acp3x-i2s.c
+++ b/sound/soc/amd/raven/acp3x-i2s.c
@@ -234,30 +234,32 @@ static int acp3x_i2s_trigger(struct snd_pcm_substream *substream,
 			switch (rtd->i2s_instance) {
 			case I2S_BT_INSTANCE:
 				reg_val = mmACP_BTTDM_ITER;
-				ier_val = mmACP_BTTDM_IER;
 				break;
 			case I2S_SP_INSTANCE:
 			default:
 				reg_val = mmACP_I2STDM_ITER;
-				ier_val = mmACP_I2STDM_IER;
 			}
 
 		} else {
 			switch (rtd->i2s_instance) {
 			case I2S_BT_INSTANCE:
 				reg_val = mmACP_BTTDM_IRER;
-				ier_val = mmACP_BTTDM_IER;
 				break;
 			case I2S_SP_INSTANCE:
 			default:
 				reg_val = mmACP_I2STDM_IRER;
-				ier_val = mmACP_I2STDM_IER;
 			}
 		}
 		val = rv_readl(rtd->acp3x_base + reg_val);
 		val = val & ~BIT(0);
 		rv_writel(val, rtd->acp3x_base + reg_val);
-		rv_writel(0, rtd->acp3x_base + ier_val);
+
+		if (!(rv_readl(rtd->acp3x_base + mmACP_BTTDM_ITER) & BIT(0)) &&
+		     !(rv_readl(rtd->acp3x_base + mmACP_BTTDM_IRER) & BIT(0)))
+			rv_writel(0, rtd->acp3x_base + mmACP_BTTDM_IER);
+		if (!(rv_readl(rtd->acp3x_base + mmACP_I2STDM_ITER) & BIT(0)) &&
+		     !(rv_readl(rtd->acp3x_base + mmACP_I2STDM_IRER) & BIT(0)))
+			rv_writel(0, rtd->acp3x_base + mmACP_I2STDM_IER);
 		ret = 0;
 		break;
 	default:
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index 5c3ec3c..aecc3c0 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -349,13 +349,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
 	component = snd_soc_rtdcom_lookup(prtd, DRV_NAME);
 	adata = dev_get_drvdata(component->dev);
 
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		adata->play_stream = NULL;
-		adata->i2ssp_play_stream = NULL;
-	} else {
-		adata->capture_stream = NULL;
-		adata->i2ssp_capture_stream = NULL;
-	}
 
 	/* Disable ACP irq, when the current stream is being closed and
 	 * another stream is also not active.
@@ -363,6 +356,13 @@ static int acp3x_dma_close(struct snd_soc_component *component,
 	if (!adata->play_stream && !adata->capture_stream &&
 		!adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
 		rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		adata->play_stream = NULL;
+		adata->i2ssp_play_stream = NULL;
+	} else {
+		adata->capture_stream = NULL;
+		adata->i2ssp_capture_stream = NULL;
+	}
 	return 0;
 }
 
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index c9eb683..7e90f5d 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -214,7 +214,7 @@
 	select SND_SOC_UDA134X
 	select SND_SOC_UDA1380 if I2C
 	select SND_SOC_WCD9335 if SLIMBUS
-	select SND_SOC_WCD934X if MFD_WCD934X
+	select SND_SOC_WCD934X if MFD_WCD934X && COMMON_CLK
 	select SND_SOC_WL1273 if MFD_WL1273_CORE
 	select SND_SOC_WM0010 if SPI_MASTER
 	select SND_SOC_WM1250_EV1 if I2C
@@ -1334,6 +1334,7 @@
 
 config SND_SOC_WCD934X
 	tristate "WCD9340/WCD9341 Codec"
+	depends on COMMON_CLK
 	depends on MFD_WCD934X
 	help
 	  The WCD9340/9341 is a audio codec IC Integrated in
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index e8c5fda..979cfb1 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -295,8 +295,7 @@ static int ak4104_spi_probe(struct spi_device *spi)
 
 	reset_gpiod = devm_gpiod_get_optional(&spi->dev, "reset",
 					      GPIOD_OUT_HIGH);
-	if (IS_ERR(reset_gpiod) &&
-	    PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
+	if (PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	/* read the 'reserved' register - according to the datasheet, it
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 793a14d..5f25b9f 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -681,8 +681,7 @@ static int cs4270_i2c_probe(struct i2c_client *i2c_client,
 
 	reset_gpiod = devm_gpiod_get_optional(&i2c_client->dev, "reset",
 					      GPIOD_OUT_HIGH);
-	if (IS_ERR(reset_gpiod) &&
-	    PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
+	if (PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	cs4270->regmap = devm_regmap_init_i2c(i2c_client, &cs4270_regmap);
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 0313e11..5bc2c64 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -52,7 +52,8 @@ static void max98090_shdn_restore_locked(struct max98090_priv *max98090)
 
 static void max98090_shdn_save(struct max98090_priv *max98090)
 {
-	mutex_lock(&max98090->component->card->dapm_mutex);
+	mutex_lock_nested(&max98090->component->card->dapm_mutex,
+			  SND_SOC_DAPM_CLASS_RUNTIME);
 	max98090_shdn_save_locked(max98090);
 }
 
diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c
index 4a9c5b5..6d490e2 100644
--- a/sound/soc/codecs/rt1015.c
+++ b/sound/soc/codecs/rt1015.c
@@ -389,7 +389,7 @@ static const char * const rt1015_boost_mode[] = {
 	"Bypass", "Adaptive", "Fixed Adaptive"
 };
 
-static const SOC_ENUM_SINGLE_DECL(rt1015_boost_mode_enum, 0, 0,
+static SOC_ENUM_SINGLE_DECL(rt1015_boost_mode_enum, 0, 0,
 	rt1015_boost_mode);
 
 static int rt1015_boost_mode_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
index 8a03dbf..d930f60 100644
--- a/sound/soc/codecs/rt1308-sdw.c
+++ b/sound/soc/codecs/rt1308-sdw.c
@@ -673,7 +673,7 @@ static const struct sdw_device_id rt1308_id[] = {
 };
 MODULE_DEVICE_TABLE(sdw, rt1308_id);
 
-static int rt1308_dev_suspend(struct device *dev)
+static int __maybe_unused rt1308_dev_suspend(struct device *dev)
 {
 	struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
 
@@ -687,7 +687,7 @@ static int rt1308_dev_suspend(struct device *dev)
 
 #define RT1308_PROBE_TIMEOUT 2000
 
-static int rt1308_dev_resume(struct device *dev)
+static int __maybe_unused rt1308_dev_resume(struct device *dev)
 {
 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
 	struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/rt700-sdw.c b/sound/soc/codecs/rt700-sdw.c
index a4b9542..d4e0f95 100644
--- a/sound/soc/codecs/rt700-sdw.c
+++ b/sound/soc/codecs/rt700-sdw.c
@@ -486,7 +486,7 @@ static const struct sdw_device_id rt700_id[] = {
 };
 MODULE_DEVICE_TABLE(sdw, rt700_id);
 
-static int rt700_dev_suspend(struct device *dev)
+static int __maybe_unused rt700_dev_suspend(struct device *dev)
 {
 	struct rt700_priv *rt700 = dev_get_drvdata(dev);
 
@@ -500,7 +500,7 @@ static int rt700_dev_suspend(struct device *dev)
 
 #define RT700_PROBE_TIMEOUT 2000
 
-static int rt700_dev_resume(struct device *dev)
+static int __maybe_unused rt700_dev_resume(struct device *dev)
 {
 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
 	struct rt700_priv *rt700 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
index 85e62e1..fc3a3fa 100644
--- a/sound/soc/codecs/rt711-sdw.c
+++ b/sound/soc/codecs/rt711-sdw.c
@@ -487,7 +487,7 @@ static const struct sdw_device_id rt711_id[] = {
 };
 MODULE_DEVICE_TABLE(sdw, rt711_id);
 
-static int rt711_dev_suspend(struct device *dev)
+static int __maybe_unused rt711_dev_suspend(struct device *dev)
 {
 	struct rt711_priv *rt711 = dev_get_drvdata(dev);
 
@@ -501,7 +501,7 @@ static int rt711_dev_suspend(struct device *dev)
 
 #define RT711_PROBE_TIMEOUT 2000
 
-static int rt711_dev_resume(struct device *dev)
+static int __maybe_unused rt711_dev_resume(struct device *dev)
 {
 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
 	struct rt711_priv *rt711 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
index 6d892c4..64ef56e 100644
--- a/sound/soc/codecs/rt715-sdw.c
+++ b/sound/soc/codecs/rt715-sdw.c
@@ -549,7 +549,7 @@ static const struct sdw_device_id rt715_id[] = {
 };
 MODULE_DEVICE_TABLE(sdw, rt715_id);
 
-static int rt715_dev_suspend(struct device *dev)
+static int __maybe_unused rt715_dev_suspend(struct device *dev)
 {
 	struct rt715_priv *rt715 = dev_get_drvdata(dev);
 
@@ -563,7 +563,7 @@ static int rt715_dev_suspend(struct device *dev)
 
 #define RT715_PROBE_TIMEOUT 2000
 
-static int rt715_dev_resume(struct device *dev)
+static int __maybe_unused rt715_dev_resume(struct device *dev)
 {
 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
 	struct rt715_priv *rt715 = dev_get_drvdata(dev);
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index b4e9a6c..d087f3b 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -1098,11 +1098,9 @@ static int aic32x4_setup_regulators(struct device *dev,
 			return PTR_ERR(aic32x4->supply_av);
 		}
 	} else {
-		if (IS_ERR(aic32x4->supply_dv) &&
-				PTR_ERR(aic32x4->supply_dv) == -EPROBE_DEFER)
+		if (PTR_ERR(aic32x4->supply_dv) == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
-		if (IS_ERR(aic32x4->supply_av) &&
-				PTR_ERR(aic32x4->supply_av) == -EPROBE_DEFER)
+		if (PTR_ERR(aic32x4->supply_av) == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
 	}
 
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 33b13f3..9177401 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -617,12 +617,15 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
 		snd_soc_dapm_add_routes(&card->dapm, broxton_map,
 					ARRAY_SIZE(broxton_map));
 
-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
-			       head);
-	component = pcm->codec_dai->component;
+	if (list_empty(&ctx->hdmi_pcm_list))
+		return -EINVAL;
 
-	if (ctx->common_hdmi_codec_drv)
+	if (ctx->common_hdmi_codec_drv) {
+		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+				       head);
+		component = pcm->codec_dai->component;
 		return hda_dsp_hdmi_build_controls(card, component);
+	}
 
 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
 		component = pcm->codec_dai->component;
@@ -643,9 +646,6 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
 		i++;
 	}
 
-	if (!component)
-		return -EINVAL;
-
 	return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
 
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 067a97e..4b67f26 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -529,12 +529,15 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
 	int err, i = 0;
 	char jack_name[NAME_SIZE];
 
-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
-			       head);
-	component = pcm->codec_dai->component;
+	if (list_empty(&ctx->hdmi_pcm_list))
+		return -EINVAL;
 
-	if (ctx->common_hdmi_codec_drv)
+	if (ctx->common_hdmi_codec_drv) {
+		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+				       head);
+		component = pcm->codec_dai->component;
 		return hda_dsp_hdmi_build_controls(card, component);
+	}
 
 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
 		component = pcm->codec_dai->component;
@@ -555,9 +558,6 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
 		i++;
 	}
 
-	if (!component)
-		return -EINVAL;
-
 	return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
 
diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
index d6efc55..dd80d01 100644
--- a/sound/soc/intel/boards/cml_rt1011_rt5682.c
+++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
@@ -241,12 +241,15 @@ static int sof_card_late_probe(struct snd_soc_card *card)
 	struct hdmi_pcm *pcm;
 	int ret, i = 0;
 
-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
-			       head);
-	component = pcm->codec_dai->component;
+	if (list_empty(&ctx->hdmi_pcm_list))
+		return -EINVAL;
 
-	if (ctx->common_hdmi_codec_drv)
+	if (ctx->common_hdmi_codec_drv) {
+		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
+				       head);
+		component = pcm->codec_dai->component;
 		return hda_dsp_hdmi_build_controls(card, component);
+	}
 
 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
 		component = pcm->codec_dai->component;
@@ -265,8 +268,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
 
 		i++;
 	}
-	if (!component)
-		return -EINVAL;
 
 	return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index 4a6d117..8e947ba 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -534,15 +534,18 @@ static int glk_card_late_probe(struct snd_soc_card *card)
 	struct snd_soc_component *component = NULL;
 	char jack_name[NAME_SIZE];
 	struct glk_hdmi_pcm *pcm;
-	int err = 0;
+	int err;
 	int i = 0;
 
-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct glk_hdmi_pcm,
-			       head);
-	component = pcm->codec_dai->component;
+	if (list_empty(&ctx->hdmi_pcm_list))
+		return -EINVAL;
 
-	if (ctx->common_hdmi_codec_drv)
+	if (ctx->common_hdmi_codec_drv) {
+		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct glk_hdmi_pcm,
+				       head);
+		component = pcm->codec_dai->component;
 		return hda_dsp_hdmi_build_controls(card, component);
+	}
 
 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
 		component = pcm->codec_dai->component;
@@ -563,9 +566,6 @@ static int glk_card_late_probe(struct snd_soc_card *card)
 		i++;
 	}
 
-	if (!component)
-		return -EINVAL;
-
 	return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
 
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 8a13231..5d87887 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -273,19 +273,22 @@ static int sof_card_late_probe(struct snd_soc_card *card)
 	struct snd_soc_component *component = NULL;
 	char jack_name[NAME_SIZE];
 	struct sof_hdmi_pcm *pcm;
-	int err = 0;
+	int err;
 	int i = 0;
 
 	/* HDMI is not supported by SOF on Baytrail/CherryTrail */
 	if (is_legacy_cpu)
 		return 0;
 
-	pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
-			       head);
-	component = pcm->codec_dai->component;
+	if (list_empty(&ctx->hdmi_pcm_list))
+		return -EINVAL;
 
-	if (ctx->common_hdmi_codec_drv)
+	if (ctx->common_hdmi_codec_drv) {
+		pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
+				       head);
+		component = pcm->codec_dai->component;
 		return hda_dsp_hdmi_build_controls(card, component);
+	}
 
 	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
 		component = pcm->codec_dai->component;
@@ -305,8 +308,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
 
 		i++;
 	}
-	if (!component)
-		return -EINVAL;
 
 	return hdac_hdmi_jack_port_init(component, &card->dapm);
 }
diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
index 67e9da4..7527330 100644
--- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
@@ -59,7 +59,7 @@ static const u64 rt1308_2_adr[] = {
 };
 
 static const u64 rt715_3_adr[] = {
-	0x000310025D715000
+	0x000310025D071500
 };
 
 static const struct snd_soc_acpi_link_adr icl_3_in_1_default[] = {
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 44f9c04..34cefba 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -224,12 +224,12 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
 	if (ret < 0) {
 		dev_err(sdev->dev,
 			"error: failed to register DSP DAI driver %d\n", ret);
-		goto fw_run_err;
+		goto fw_trace_err;
 	}
 
 	ret = snd_sof_machine_register(sdev, plat_data);
 	if (ret < 0)
-		goto fw_run_err;
+		goto fw_trace_err;
 
 	/*
 	 * Some platforms in SOF, ex: BYT, may not have their platform PM
@@ -244,7 +244,8 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
 
 	return 0;
 
-#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+fw_trace_err:
+	snd_sof_free_trace(sdev);
 fw_run_err:
 	snd_sof_fw_unload(sdev);
 fw_load_err:
@@ -253,21 +254,10 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
 	snd_sof_free_debug(sdev);
 dbg_err:
 	snd_sof_remove(sdev);
-#else
 
-	/*
-	 * when the probe_continue is handled in a work queue, the
-	 * probe does not fail so we don't release resources here.
-	 * They will be released with an explicit call to
-	 * snd_sof_device_remove() when the PCI/ACPI device is removed
-	 */
-
-fw_run_err:
-fw_load_err:
-ipc_err:
-dbg_err:
-
-#endif
+	/* all resources freed, update state to match */
+	sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+	sdev->first_boot = true;
 
 	return ret;
 }
@@ -350,10 +340,12 @@ int snd_sof_device_remove(struct device *dev)
 	if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
 		cancel_work_sync(&sdev->probe_work);
 
-	snd_sof_fw_unload(sdev);
-	snd_sof_ipc_free(sdev);
-	snd_sof_free_debug(sdev);
-	snd_sof_free_trace(sdev);
+	if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED) {
+		snd_sof_fw_unload(sdev);
+		snd_sof_ipc_free(sdev);
+		snd_sof_free_debug(sdev);
+		snd_sof_free_trace(sdev);
+	}
 
 	/*
 	 * Unregister machine driver. This will unbind the snd_card which
@@ -361,13 +353,15 @@ int snd_sof_device_remove(struct device *dev)
 	 * before freeing the snd_card.
 	 */
 	snd_sof_machine_unregister(sdev, pdata);
+
 	/*
 	 * Unregistering the machine driver results in unloading the topology.
 	 * Some widgets, ex: scheduler, attempt to power down the core they are
 	 * scheduled on, when they are unloaded. Therefore, the DSP must be
 	 * removed only after the topology has been unloaded.
 	 */
-	snd_sof_remove(sdev);
+	if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED)
+		snd_sof_remove(sdev);
 
 	/* release firmware */
 	release_firmware(pdata->fw);
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 78dfd5f..9106ab8 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -170,23 +170,14 @@ EXPORT_SYMBOL_NS(hda_codec_probe_bus, SND_SOC_SOF_HDA_AUDIO_CODEC);
 #if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
 	IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
 
-void hda_codec_i915_get(struct snd_sof_dev *sdev)
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable)
 {
 	struct hdac_bus *bus = sof_to_bus(sdev);
 
-	dev_dbg(bus->dev, "Turning i915 HDAC power on\n");
-	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
+	dev_dbg(bus->dev, "Turning i915 HDAC power %d\n", enable);
+	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, enable);
 }
-EXPORT_SYMBOL_NS(hda_codec_i915_get, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
-
-void hda_codec_i915_put(struct snd_sof_dev *sdev)
-{
-	struct hdac_bus *bus = sof_to_bus(sdev);
-
-	dev_dbg(bus->dev, "Turning i915 HDAC power off\n");
-	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
-}
-EXPORT_SYMBOL_NS(hda_codec_i915_put, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+EXPORT_SYMBOL_NS(hda_codec_i915_display_power, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
 
 int hda_codec_i915_init(struct snd_sof_dev *sdev)
 {
@@ -198,7 +189,7 @@ int hda_codec_i915_init(struct snd_sof_dev *sdev)
 	if (ret < 0)
 		return ret;
 
-	hda_codec_i915_get(sdev);
+	hda_codec_i915_display_power(sdev, true);
 
 	return 0;
 }
@@ -209,7 +200,7 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev)
 	struct hdac_bus *bus = sof_to_bus(sdev);
 	int ret;
 
-	hda_codec_i915_put(sdev);
+	hda_codec_i915_display_power(sdev, false);
 
 	ret = snd_hdac_i915_exit(bus);
 
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index d08462f..65b86dd 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -380,7 +380,8 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
 	/* create codec instances */
 	hda_codec_probe_bus(sdev, hda_codec_use_common_hdmi);
 
-	hda_codec_i915_put(sdev);
+	if (!HDA_IDISP_CODEC(bus->codec_mask))
+		hda_codec_i915_display_power(sdev, false);
 
 	/*
 	 * we are done probing so decrement link counts
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index a4d030b..6191d91 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -586,15 +586,14 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev);
 	(IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
 	 IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
 
-void hda_codec_i915_get(struct snd_sof_dev *sdev);
-void hda_codec_i915_put(struct snd_sof_dev *sdev);
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable);
 int hda_codec_i915_init(struct snd_sof_dev *sdev);
 int hda_codec_i915_exit(struct snd_sof_dev *sdev);
 
 #else
 
-static inline void hda_codec_i915_get(struct snd_sof_dev *sdev)  { }
-static inline void hda_codec_i915_put(struct snd_sof_dev *sdev)  { }
+static inline void hda_codec_i915_display_power(struct snd_sof_dev *sdev,
+						bool enable) { }
 static inline int hda_codec_i915_init(struct snd_sof_dev *sdev) { return 0; }
 static inline int hda_codec_i915_exit(struct snd_sof_dev *sdev) { return 0; }
 
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
index 314f309..29435ba 100644
--- a/sound/soc/sof/pcm.c
+++ b/sound/soc/sof/pcm.c
@@ -591,6 +591,11 @@ static int sof_pcm_new(struct snd_soc_component *component,
 		"spcm: allocate %s playback DMA buffer size 0x%x max 0x%x\n",
 		caps->name, caps->buffer_size_min, caps->buffer_size_max);
 
+	if (!pcm->streams[stream].substream) {
+		dev_err(component->dev, "error: NULL playback substream!\n");
+		return -EINVAL;
+	}
+
 	snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
 				   SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
 				   le32_to_cpu(caps->buffer_size_min),
@@ -609,6 +614,11 @@ static int sof_pcm_new(struct snd_soc_component *component,
 		"spcm: allocate %s capture DMA buffer size 0x%x max 0x%x\n",
 		caps->name, caps->buffer_size_min, caps->buffer_size_max);
 
+	if (!pcm->streams[stream].substream) {
+		dev_err(component->dev, "error: NULL capture substream!\n");
+		return -EINVAL;
+	}
+
 	snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
 				   SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
 				   le32_to_cpu(caps->buffer_size_min),
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
index 84290bb..a0cde05 100644
--- a/sound/soc/sof/pm.c
+++ b/sound/soc/sof/pm.c
@@ -56,6 +56,10 @@ static int sof_resume(struct device *dev, bool runtime_resume)
 	if (!sof_ops(sdev)->resume || !sof_ops(sdev)->runtime_resume)
 		return 0;
 
+	/* DSP was never successfully started, nothing to resume */
+	if (sdev->first_boot)
+		return 0;
+
 	/*
 	 * if the runtime_resume flag is set, call the runtime_resume routine
 	 * or else call the system resume routine
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index d855bc2..cec631a 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -235,6 +235,7 @@ static const struct sof_dev_desc jsl_desc = {
 	.chip_info = &jsl_chip_info,
 	.default_fw_path = "intel/sof",
 	.default_tplg_path = "intel/sof-tplg",
+	.default_fw_filename = "sof-jsl.ri",
 	.nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
 	.ops = &sof_cnl_ops,
 };
@@ -416,6 +417,8 @@ static const struct pci_device_id sof_pci_ids[] = {
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
 	{ PCI_DEVICE(0x8086, 0x38c8),
 		.driver_data = (unsigned long)&jsl_desc},
+	{ PCI_DEVICE(0x8086, 0x4dc8),
+		.driver_data = (unsigned long)&jsl_desc},
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
 	{ PCI_DEVICE(0x8086, 0x02c8),
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
index 4bb6503..d815090 100644
--- a/sound/soc/sof/trace.c
+++ b/sound/soc/sof/trace.c
@@ -343,7 +343,10 @@ void snd_sof_free_trace(struct snd_sof_dev *sdev)
 
 	snd_sof_release_trace(sdev);
 
-	snd_dma_free_pages(&sdev->dmatb);
-	snd_dma_free_pages(&sdev->dmatp);
+	if (sdev->dma_trace_pages) {
+		snd_dma_free_pages(&sdev->dmatb);
+		snd_dma_free_pages(&sdev->dmatp);
+		sdev->dma_trace_pages = 0;
+	}
 }
 EXPORT_SYMBOL(snd_sof_free_trace);
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index cbe598b..98a9fe6 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -555,7 +555,7 @@ static int sun4i_spdif_probe(struct platform_device *pdev)
 	if (quirks->has_reset) {
 		host->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
 								      NULL);
-		if (IS_ERR(host->rst) && PTR_ERR(host->rst) == -EPROBE_DEFER) {
+		if (PTR_ERR(host->rst) == -EPROBE_DEFER) {
 			ret = -EPROBE_DEFER;
 			dev_err(&pdev->dev, "Failed to get reset: %d\n", ret);
 			return ret;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index dbed3c5..d59882e 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -127,7 +127,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
 	struct device *dev = dai->dev;
 	struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
 	unsigned int mask, val, reg;
-	int ret, sample_size, srate, i2sclock, bitcnt, audio_bits;
+	int ret, sample_size, srate, i2sclock, bitcnt;
 	struct tegra30_ahub_cif_conf cif_conf;
 
 	if (params_channels(params) != 2)
@@ -137,19 +137,8 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
 		val = TEGRA30_I2S_CTRL_BIT_SIZE_16;
-		audio_bits = TEGRA30_AUDIOCIF_BITS_16;
 		sample_size = 16;
 		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
-		val = TEGRA30_I2S_CTRL_BIT_SIZE_24;
-		audio_bits = TEGRA30_AUDIOCIF_BITS_24;
-		sample_size = 24;
-		break;
-	case SNDRV_PCM_FORMAT_S32_LE:
-		val = TEGRA30_I2S_CTRL_BIT_SIZE_32;
-		audio_bits = TEGRA30_AUDIOCIF_BITS_32;
-		sample_size = 32;
-		break;
 	default:
 		return -EINVAL;
 	}
@@ -181,8 +170,8 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
 	cif_conf.threshold = 0;
 	cif_conf.audio_channels = 2;
 	cif_conf.client_channels = 2;
-	cif_conf.audio_bits = audio_bits;
-	cif_conf.client_bits = audio_bits;
+	cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
+	cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
 	cif_conf.expand = 0;
 	cif_conf.stereo_conv = 0;
 	cif_conf.replicate = 0;
@@ -317,18 +306,14 @@ static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
 		.channels_min = 2,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
-		.formats = SNDRV_PCM_FMTBIT_S32_LE |
-			   SNDRV_PCM_FMTBIT_S24_LE |
-			   SNDRV_PCM_FMTBIT_S16_LE,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	},
 	.capture = {
 		.stream_name = "Capture",
 		.channels_min = 2,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
-		.formats = SNDRV_PCM_FMTBIT_S32_LE |
-			   SNDRV_PCM_FMTBIT_S24_LE |
-			   SNDRV_PCM_FMTBIT_S16_LE,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	},
 	.ops = &tegra30_i2s_dai_ops,
 	.symmetric_rates = 1,
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 94b903d..74c00c9 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -558,11 +558,11 @@ static const struct scarlett2_config
 
 /* proprietary request/response format */
 struct scarlett2_usb_packet {
-	u32 cmd;
-	u16 size;
-	u16 seq;
-	u32 error;
-	u32 pad;
+	__le32 cmd;
+	__le16 size;
+	__le16 seq;
+	__le32 error;
+	__le32 pad;
 	u8 data[];
 };
 
@@ -664,11 +664,11 @@ static int scarlett2_usb(
 			"Scarlett Gen 2 USB invalid response; "
 			   "cmd tx/rx %d/%d seq %d/%d size %d/%d "
 			   "error %d pad %d\n",
-			le16_to_cpu(req->cmd), le16_to_cpu(resp->cmd),
+			le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
 			le16_to_cpu(req->seq), le16_to_cpu(resp->seq),
 			resp_size, le16_to_cpu(resp->size),
-			le16_to_cpu(resp->error),
-			le16_to_cpu(resp->pad));
+			le32_to_cpu(resp->error),
+			le32_to_cpu(resp->pad));
 		err = -EINVAL;
 		goto unlock;
 	}
@@ -687,7 +687,7 @@ static int scarlett2_usb(
 /* Send SCARLETT2_USB_DATA_CMD SCARLETT2_USB_CONFIG_SAVE */
 static void scarlett2_config_save(struct usb_mixer_interface *mixer)
 {
-	u32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
+	__le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
 
 	scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
 		      &req, sizeof(u32),
@@ -713,11 +713,11 @@ static int scarlett2_usb_set_config(
 	const struct scarlett2_config config_item =
 	       scarlett2_config_items[config_item_num];
 	struct {
-		u32 offset;
-		u32 bytes;
-		s32 value;
+		__le32 offset;
+		__le32 bytes;
+		__le32 value;
 	} __packed req;
-	u32 req2;
+	__le32 req2;
 	int err;
 	struct scarlett2_mixer_data *private = mixer->private_data;
 
@@ -753,8 +753,8 @@ static int scarlett2_usb_get(
 	int offset, void *buf, int size)
 {
 	struct {
-		u32 offset;
-		u32 size;
+		__le32 offset;
+		__le32 size;
 	} __packed req;
 
 	req.offset = cpu_to_le32(offset);
@@ -794,8 +794,8 @@ static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
 	const struct scarlett2_device_info *info = private->info;
 
 	struct {
-		u16 mix_num;
-		u16 data[SCARLETT2_INPUT_MIX_MAX];
+		__le16 mix_num;
+		__le16 data[SCARLETT2_INPUT_MIX_MAX];
 	} __packed req;
 
 	int i, j;
@@ -850,9 +850,9 @@ static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
 	};
 
 	struct {
-		u16 pad;
-		u16 num;
-		u32 data[SCARLETT2_MUX_MAX];
+		__le16 pad;
+		__le16 num;
+		__le32 data[SCARLETT2_MUX_MAX];
 	} __packed req;
 
 	req.pad = 0;
@@ -911,9 +911,9 @@ static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
 					  u16 *levels)
 {
 	struct {
-		u16 pad;
-		u16 num_meters;
-		u32 magic;
+		__le16 pad;
+		__le16 num_meters;
+		__le32 magic;
 	} __packed req;
 	u32 resp[SCARLETT2_NUM_METERS];
 	int i, err;
diff --git a/sound/usb/validate.c b/sound/usb/validate.c
index 4034c20..6fe206f 100644
--- a/sound/usb/validate.c
+++ b/sound/usb/validate.c
@@ -110,7 +110,7 @@ static bool validate_processing_unit(const void *p,
 	default:
 		if (v->type == UAC1_EXTENSION_UNIT)
 			return true; /* OK */
-		switch (d->wProcessType) {
+		switch (le16_to_cpu(d->wProcessType)) {
 		case UAC_PROCESS_UP_DOWNMIX:
 		case UAC_PROCESS_DOLBY_PROLOGIC:
 			if (d->bLength < len + 1) /* bNrModes */
@@ -125,7 +125,7 @@ static bool validate_processing_unit(const void *p,
 	case UAC_VERSION_2:
 		if (v->type == UAC2_EXTENSION_UNIT_V2)
 			return true; /* OK */
-		switch (d->wProcessType) {
+		switch (le16_to_cpu(d->wProcessType)) {
 		case UAC2_PROCESS_UP_DOWNMIX:
 		case UAC2_PROCESS_DOLBY_PROLOCIC: /* SiC! */
 			if (d->bLength < len + 1) /* bNrModes */
@@ -142,7 +142,7 @@ static bool validate_processing_unit(const void *p,
 			len += 2; /* wClusterDescrID */
 			break;
 		}
-		switch (d->wProcessType) {
+		switch (le16_to_cpu(d->wProcessType)) {
 		case UAC3_PROCESS_UP_DOWNMIX:
 			if (d->bLength < len + 1) /* bNrModes */
 				return false;
diff --git a/tools/Makefile b/tools/Makefile
index 7e42f7b..bd77881 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -28,6 +28,7 @@
 	@echo '  pci                    - PCI tools'
 	@echo '  perf                   - Linux performance measurement and analysis tool'
 	@echo '  selftests              - various kernel selftests'
+	@echo '  bootconfig             - boot config tool'
 	@echo '  spi                    - spi tools'
 	@echo '  tmon                   - thermal monitoring and tuning tool'
 	@echo '  turbostat              - Intel CPU idle stats and freq reporting tool'
@@ -63,7 +64,7 @@
 cpupower: FORCE
 	$(call descend,power/$@)
 
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
+cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
 	$(call descend,$@)
 
 liblockdep: FORCE
@@ -96,7 +97,7 @@
 	$(call descend,kvm/$@)
 
 all: acpi cgroup cpupower gpio hv firewire liblockdep \
-		perf selftests spi turbostat usb \
+		perf selftests bootconfig spi turbostat usb \
 		virtio vm bpf x86_energy_perf_policy \
 		tmon freefall iio objtool kvm_stat wmi \
 		pci debugging
@@ -107,7 +108,7 @@
 cpupower_install:
 	$(call descend,power/$(@:_install=),install)
 
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
 	$(call descend,$(@:_install=),install)
 
 liblockdep_install:
@@ -141,7 +142,7 @@
 cpupower_clean:
 	$(call descend,power/cpupower,clean)
 
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
+cgroup_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
 	$(call descend,$(@:_clean=),clean)
 
 liblockdep_clean:
@@ -176,7 +177,7 @@
 	$(call descend,build,clean)
 
 clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
-		perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
+		perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
 		vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
 		freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
 		gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index 3eb8411..e95b72e 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -33,7 +33,7 @@
 #define EXIT_REASON_TRIPLE_FAULT        2
 #define EXIT_REASON_INIT_SIGNAL			3
 
-#define EXIT_REASON_PENDING_INTERRUPT   7
+#define EXIT_REASON_INTERRUPT_WINDOW    7
 #define EXIT_REASON_NMI_WINDOW          8
 #define EXIT_REASON_TASK_SWITCH         9
 #define EXIT_REASON_CPUID               10
@@ -94,7 +94,7 @@
 	{ EXIT_REASON_EXTERNAL_INTERRUPT,    "EXTERNAL_INTERRUPT" }, \
 	{ EXIT_REASON_TRIPLE_FAULT,          "TRIPLE_FAULT" }, \
 	{ EXIT_REASON_INIT_SIGNAL,           "INIT_SIGNAL" }, \
-	{ EXIT_REASON_PENDING_INTERRUPT,     "PENDING_INTERRUPT" }, \
+	{ EXIT_REASON_INTERRUPT_WINDOW,      "INTERRUPT_WINDOW" }, \
 	{ EXIT_REASON_NMI_WINDOW,            "NMI_WINDOW" }, \
 	{ EXIT_REASON_TASK_SWITCH,           "TASK_SWITCH" }, \
 	{ EXIT_REASON_CPUID,                 "CPUID" }, \
diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
index 8908c58..53adc17 100644
--- a/tools/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/arch/x86/lib/x86-opcode-map.txt
@@ -929,7 +929,7 @@
 
 GrpTable: Grp3_2
 0: TEST Ev,Iz
-1:
+1: TEST Ev,Iz
 2: NOT Ev
 3: NEG Ev
 4: MUL rAX,Ev
diff --git a/tools/bootconfig/.gitignore b/tools/bootconfig/.gitignore
new file mode 100644
index 0000000..e7644df
--- /dev/null
+++ b/tools/bootconfig/.gitignore
@@ -0,0 +1 @@
+bootconfig
diff --git a/tools/bootconfig/Makefile b/tools/bootconfig/Makefile
new file mode 100644
index 0000000..a6146ac
--- /dev/null
+++ b/tools/bootconfig/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for bootconfig command
+
+bindir ?= /usr/bin
+
+HEADER = include/linux/bootconfig.h
+CFLAGS = -Wall -g -I./include
+
+PROGS = bootconfig
+
+all: $(PROGS)
+
+bootconfig: ../../lib/bootconfig.c main.c $(HEADER)
+	$(CC) $(filter %.c,$^) $(CFLAGS) -o $@
+
+install: $(PROGS)
+	install bootconfig $(DESTDIR)$(bindir)
+
+test: bootconfig
+	./test-bootconfig.sh
+
+clean:
+	$(RM) -f *.o bootconfig
diff --git a/tools/bootconfig/include/linux/bootconfig.h b/tools/bootconfig/include/linux/bootconfig.h
new file mode 100644
index 0000000..078cbd2
--- /dev/null
+++ b/tools/bootconfig/include/linux/bootconfig.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BOOTCONFIG_LINUX_BOOTCONFIG_H
+#define _BOOTCONFIG_LINUX_BOOTCONFIG_H
+
+#include "../../../../include/linux/bootconfig.h"
+
+#endif
diff --git a/tools/bootconfig/include/linux/bug.h b/tools/bootconfig/include/linux/bug.h
new file mode 100644
index 0000000..7b65a38
--- /dev/null
+++ b/tools/bootconfig/include/linux/bug.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_BUG_H
+#define _SKC_LINUX_BUG_H
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define WARN_ON(cond)	\
+	((cond) ? printf("Internal warning(%s:%d, %s): %s\n",	\
+			__FILE__, __LINE__, __func__, #cond) : 0)
+
+#endif
diff --git a/tools/bootconfig/include/linux/ctype.h b/tools/bootconfig/include/linux/ctype.h
new file mode 100644
index 0000000..c56ecc1
--- /dev/null
+++ b/tools/bootconfig/include/linux/ctype.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_CTYPE_H
+#define _SKC_LINUX_CTYPE_H
+
+#include <ctype.h>
+
+#endif
diff --git a/tools/bootconfig/include/linux/errno.h b/tools/bootconfig/include/linux/errno.h
new file mode 100644
index 0000000..5d9f91e
--- /dev/null
+++ b/tools/bootconfig/include/linux/errno.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_ERRNO_H
+#define _SKC_LINUX_ERRNO_H
+
+#include <asm/errno.h>
+
+#endif
diff --git a/tools/bootconfig/include/linux/kernel.h b/tools/bootconfig/include/linux/kernel.h
new file mode 100644
index 0000000..2d93320
--- /dev/null
+++ b/tools/bootconfig/include/linux/kernel.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_KERNEL_H
+#define _SKC_LINUX_KERNEL_H
+
+#include <stdlib.h>
+#include <stdbool.h>
+
+#include <linux/printk.h>
+
+typedef unsigned short u16;
+typedef unsigned int   u32;
+
+#define unlikely(cond)	(cond)
+
+#define __init
+#define __initdata
+
+#endif
diff --git a/tools/bootconfig/include/linux/printk.h b/tools/bootconfig/include/linux/printk.h
new file mode 100644
index 0000000..017bcd69
--- /dev/null
+++ b/tools/bootconfig/include/linux/printk.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_PRINTK_H
+#define _SKC_LINUX_PRINTK_H
+
+#include <stdio.h>
+
+/* controllable printf */
+extern int pr_output;
+#define printk(fmt, ...)	\
+	(pr_output ? printf(fmt, __VA_ARGS__) : 0)
+
+#define pr_err printk
+#define pr_warn	printk
+#define pr_info	printk
+#define pr_debug printk
+
+#endif
diff --git a/tools/bootconfig/include/linux/string.h b/tools/bootconfig/include/linux/string.h
new file mode 100644
index 0000000..8267af7
--- /dev/null
+++ b/tools/bootconfig/include/linux/string.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_STRING_H
+#define _SKC_LINUX_STRING_H
+
+#include <string.h>
+
+/* Copied from lib/string.c */
+static inline char *skip_spaces(const char *str)
+{
+	while (isspace(*str))
+		++str;
+	return (char *)str;
+}
+
+static inline char *strim(char *s)
+{
+	size_t size;
+	char *end;
+
+	size = strlen(s);
+	if (!size)
+		return s;
+
+	end = s + size - 1;
+	while (end >= s && isspace(*end))
+		end--;
+	*(end + 1) = '\0';
+
+	return skip_spaces(s);
+}
+
+#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
new file mode 100644
index 0000000..47f4884
--- /dev/null
+++ b/tools/bootconfig/main.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Boot config tool for initrd image
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+#include <linux/kernel.h>
+#include <linux/bootconfig.h>
+
+int pr_output = 1;
+
+static int xbc_show_array(struct xbc_node *node)
+{
+	const char *val;
+	int i = 0;
+
+	xbc_array_for_each_value(node, val) {
+		printf("\"%s\"%s", val, node->next ? ", " : ";\n");
+		i++;
+	}
+	return i;
+}
+
+static void xbc_show_compact_tree(void)
+{
+	struct xbc_node *node, *cnode;
+	int depth = 0, i;
+
+	node = xbc_root_node();
+	while (node && xbc_node_is_key(node)) {
+		for (i = 0; i < depth; i++)
+			printf("\t");
+		cnode = xbc_node_get_child(node);
+		while (cnode && xbc_node_is_key(cnode) && !cnode->next) {
+			printf("%s.", xbc_node_get_data(node));
+			node = cnode;
+			cnode = xbc_node_get_child(node);
+		}
+		if (cnode && xbc_node_is_key(cnode)) {
+			printf("%s {\n", xbc_node_get_data(node));
+			depth++;
+			node = cnode;
+			continue;
+		} else if (cnode && xbc_node_is_value(cnode)) {
+			printf("%s = ", xbc_node_get_data(node));
+			if (cnode->next)
+				xbc_show_array(cnode);
+			else
+				printf("\"%s\";\n", xbc_node_get_data(cnode));
+		} else {
+			printf("%s;\n", xbc_node_get_data(node));
+		}
+
+		if (node->next) {
+			node = xbc_node_get_next(node);
+			continue;
+		}
+		while (!node->next) {
+			node = xbc_node_get_parent(node);
+			if (!node)
+				return;
+			if (!xbc_node_get_child(node)->next)
+				continue;
+			depth--;
+			for (i = 0; i < depth; i++)
+				printf("\t");
+			printf("}\n");
+		}
+		node = xbc_node_get_next(node);
+	}
+}
+
+/* Simple real checksum */
+int checksum(unsigned char *buf, int len)
+{
+	int i, sum = 0;
+
+	for (i = 0; i < len; i++)
+		sum += buf[i];
+
+	return sum;
+}
+
+#define PAGE_SIZE	4096
+
+int load_xbc_fd(int fd, char **buf, int size)
+{
+	int ret;
+
+	*buf = malloc(size + 1);
+	if (!*buf)
+		return -ENOMEM;
+
+	ret = read(fd, *buf, size);
+	if (ret < 0)
+		return -errno;
+	(*buf)[size] = '\0';
+
+	return ret;
+}
+
+/* Return the read size or -errno */
+int load_xbc_file(const char *path, char **buf)
+{
+	struct stat stat;
+	int fd, ret;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0)
+		return -errno;
+	ret = fstat(fd, &stat);
+	if (ret < 0)
+		return -errno;
+
+	ret = load_xbc_fd(fd, buf, stat.st_size);
+
+	close(fd);
+
+	return ret;
+}
+
+int load_xbc_from_initrd(int fd, char **buf)
+{
+	struct stat stat;
+	int ret;
+	u32 size = 0, csum = 0, rcsum;
+
+	ret = fstat(fd, &stat);
+	if (ret < 0)
+		return -errno;
+
+	if (stat.st_size < 8)
+		return 0;
+
+	if (lseek(fd, -8, SEEK_END) < 0) {
+		printf("Failed to lseek: %d\n", -errno);
+		return -errno;
+	}
+
+	if (read(fd, &size, sizeof(u32)) < 0)
+		return -errno;
+
+	if (read(fd, &csum, sizeof(u32)) < 0)
+		return -errno;
+
+	/* Wrong size, maybe no boot config here */
+	if (stat.st_size < size + 8)
+		return 0;
+
+	if (lseek(fd, stat.st_size - 8 - size, SEEK_SET) < 0) {
+		printf("Failed to lseek: %d\n", -errno);
+		return -errno;
+	}
+
+	ret = load_xbc_fd(fd, buf, size);
+	if (ret < 0)
+		return ret;
+
+	/* Wrong Checksum, maybe no boot config here */
+	rcsum = checksum((unsigned char *)*buf, size);
+	if (csum != rcsum) {
+		printf("checksum error: %d != %d\n", csum, rcsum);
+		return 0;
+	}
+
+	ret = xbc_init(*buf);
+	/* Wrong data, maybe no boot config here */
+	if (ret < 0)
+		return 0;
+
+	return size;
+}
+
+int show_xbc(const char *path)
+{
+	int ret, fd;
+	char *buf = NULL;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0) {
+		printf("Failed to open initrd %s: %d\n", path, fd);
+		return -errno;
+	}
+
+	ret = load_xbc_from_initrd(fd, &buf);
+	if (ret < 0)
+		printf("Failed to load a boot config from initrd: %d\n", ret);
+	else
+		xbc_show_compact_tree();
+
+	close(fd);
+	free(buf);
+
+	return ret;
+}
+
+int delete_xbc(const char *path)
+{
+	struct stat stat;
+	int ret = 0, fd, size;
+	char *buf = NULL;
+
+	fd = open(path, O_RDWR);
+	if (fd < 0) {
+		printf("Failed to open initrd %s: %d\n", path, fd);
+		return -errno;
+	}
+
+	/*
+	 * Suppress error messages in xbc_init() because it can be just a
+	 * data which concidentally matches the size and checksum footer.
+	 */
+	pr_output = 0;
+	size = load_xbc_from_initrd(fd, &buf);
+	pr_output = 1;
+	if (size < 0) {
+		ret = size;
+		printf("Failed to load a boot config from initrd: %d\n", ret);
+	} else if (size > 0) {
+		ret = fstat(fd, &stat);
+		if (!ret)
+			ret = ftruncate(fd, stat.st_size - size - 8);
+		if (ret)
+			ret = -errno;
+	} /* Ignore if there is no boot config in initrd */
+
+	close(fd);
+	free(buf);
+
+	return ret;
+}
+
+int apply_xbc(const char *path, const char *xbc_path)
+{
+	u32 size, csum;
+	char *buf, *data;
+	int ret, fd;
+
+	ret = load_xbc_file(xbc_path, &buf);
+	if (ret < 0) {
+		printf("Failed to load %s : %d\n", xbc_path, ret);
+		return ret;
+	}
+	size = strlen(buf) + 1;
+	csum = checksum((unsigned char *)buf, size);
+
+	/* Prepare xbc_path data */
+	data = malloc(size + 8);
+	if (!data)
+		return -ENOMEM;
+	strcpy(data, buf);
+	*(u32 *)(data + size) = size;
+	*(u32 *)(data + size + 4) = csum;
+
+	/* Check the data format */
+	ret = xbc_init(buf);
+	if (ret < 0) {
+		printf("Failed to parse %s: %d\n", xbc_path, ret);
+		free(data);
+		free(buf);
+		return ret;
+	}
+	printf("Apply %s to %s\n", xbc_path, path);
+	printf("\tNumber of nodes: %d\n", ret);
+	printf("\tSize: %u bytes\n", (unsigned int)size);
+	printf("\tChecksum: %d\n", (unsigned int)csum);
+
+	/* TODO: Check the options by schema */
+	xbc_destroy_all();
+	free(buf);
+
+	/* Remove old boot config if exists */
+	ret = delete_xbc(path);
+	if (ret < 0) {
+		printf("Failed to delete previous boot config: %d\n", ret);
+		return ret;
+	}
+
+	/* Apply new one */
+	fd = open(path, O_RDWR | O_APPEND);
+	if (fd < 0) {
+		printf("Failed to open %s: %d\n", path, fd);
+		return fd;
+	}
+	/* TODO: Ensure the @path is initramfs/initrd image */
+	ret = write(fd, data, size + 8);
+	if (ret < 0) {
+		printf("Failed to apply a boot config: %d\n", ret);
+		return ret;
+	}
+	close(fd);
+	free(data);
+
+	return 0;
+}
+
+int usage(void)
+{
+	printf("Usage: bootconfig [OPTIONS] <INITRD>\n"
+		" Apply, delete or show boot config to initrd.\n"
+		" Options:\n"
+		"		-a <config>: Apply boot config to initrd\n"
+		"		-d : Delete boot config file from initrd\n\n"
+		" If no option is given, show current applied boot config.\n");
+	return -1;
+}
+
+int main(int argc, char **argv)
+{
+	char *path = NULL;
+	char *apply = NULL;
+	bool delete = false;
+	int opt;
+
+	while ((opt = getopt(argc, argv, "hda:")) != -1) {
+		switch (opt) {
+		case 'd':
+			delete = true;
+			break;
+		case 'a':
+			apply = optarg;
+			break;
+		case 'h':
+		default:
+			return usage();
+		}
+	}
+
+	if (apply && delete) {
+		printf("Error: You can not specify both -a and -d at once.\n");
+		return usage();
+	}
+
+	if (optind >= argc) {
+		printf("Error: No initrd is specified.\n");
+		return usage();
+	}
+
+	path = argv[optind];
+
+	if (apply)
+		return apply_xbc(path, apply);
+	else if (delete)
+		return delete_xbc(path);
+
+	return show_xbc(path);
+}
diff --git a/tools/bootconfig/samples/bad-array-space-comment.bconf b/tools/bootconfig/samples/bad-array-space-comment.bconf
new file mode 100644
index 0000000..fda19e4
--- /dev/null
+++ b/tools/bootconfig/samples/bad-array-space-comment.bconf
@@ -0,0 +1,5 @@
+key =	# comment
+	"value1",	  # comment1
+	"value2" 	  # comment2
+,
+	"value3"
diff --git a/tools/bootconfig/samples/bad-array.bconf b/tools/bootconfig/samples/bad-array.bconf
new file mode 100644
index 0000000..0174af0
--- /dev/null
+++ b/tools/bootconfig/samples/bad-array.bconf
@@ -0,0 +1,2 @@
+# Array must be comma separated.
+key = "value1" "value2"
diff --git a/tools/bootconfig/samples/bad-dotword.bconf b/tools/bootconfig/samples/bad-dotword.bconf
new file mode 100644
index 0000000..ba5557b
--- /dev/null
+++ b/tools/bootconfig/samples/bad-dotword.bconf
@@ -0,0 +1,4 @@
+# do not start keyword with .
+key {
+  .word = 1
+}
diff --git a/tools/bootconfig/samples/bad-empty.bconf b/tools/bootconfig/samples/bad-empty.bconf
new file mode 100644
index 0000000..2ba3f6cc
--- /dev/null
+++ b/tools/bootconfig/samples/bad-empty.bconf
@@ -0,0 +1 @@
+# Wrong boot config: comment only
diff --git a/tools/bootconfig/samples/bad-keyerror.bconf b/tools/bootconfig/samples/bad-keyerror.bconf
new file mode 100644
index 0000000..b6e247a
--- /dev/null
+++ b/tools/bootconfig/samples/bad-keyerror.bconf
@@ -0,0 +1,2 @@
+# key word can not contain ","
+key,word
diff --git a/tools/bootconfig/samples/bad-longkey.bconf b/tools/bootconfig/samples/bad-longkey.bconf
new file mode 100644
index 0000000..eb97369
--- /dev/null
+++ b/tools/bootconfig/samples/bad-longkey.bconf
@@ -0,0 +1 @@
+key_word_is_too_long01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345
diff --git a/tools/bootconfig/samples/bad-manywords.bconf b/tools/bootconfig/samples/bad-manywords.bconf
new file mode 100644
index 0000000..8db8196
--- /dev/null
+++ b/tools/bootconfig/samples/bad-manywords.bconf
@@ -0,0 +1 @@
+key1.is2.too3.long4.5.6.7.8.9.10.11.12.13.14.15.16.17
diff --git a/tools/bootconfig/samples/bad-no-keyword.bconf b/tools/bootconfig/samples/bad-no-keyword.bconf
new file mode 100644
index 0000000..eff2680
--- /dev/null
+++ b/tools/bootconfig/samples/bad-no-keyword.bconf
@@ -0,0 +1,2 @@
+# No keyword
+{}
diff --git a/tools/bootconfig/samples/bad-nonprintable.bconf b/tools/bootconfig/samples/bad-nonprintable.bconf
new file mode 100644
index 0000000..3bb1a28
--- /dev/null
+++ b/tools/bootconfig/samples/bad-nonprintable.bconf
@@ -0,0 +1,2 @@
+# Non printable
+key = ""
diff --git a/tools/bootconfig/samples/bad-spaceword.bconf b/tools/bootconfig/samples/bad-spaceword.bconf
new file mode 100644
index 0000000..90c703d32
--- /dev/null
+++ b/tools/bootconfig/samples/bad-spaceword.bconf
@@ -0,0 +1,2 @@
+# No space between words
+key . word
diff --git a/tools/bootconfig/samples/bad-tree.bconf b/tools/bootconfig/samples/bad-tree.bconf
new file mode 100644
index 0000000..5a6038e
--- /dev/null
+++ b/tools/bootconfig/samples/bad-tree.bconf
@@ -0,0 +1,5 @@
+# brace is not closing
+tree {
+  node {
+    value = 1
+}
diff --git a/tools/bootconfig/samples/bad-value.bconf b/tools/bootconfig/samples/bad-value.bconf
new file mode 100644
index 0000000..a1217fe
--- /dev/null
+++ b/tools/bootconfig/samples/bad-value.bconf
@@ -0,0 +1,3 @@
+# Quotes error
+value = "data
+
diff --git a/tools/bootconfig/samples/escaped.bconf b/tools/bootconfig/samples/escaped.bconf
new file mode 100644
index 0000000..9f72043
--- /dev/null
+++ b/tools/bootconfig/samples/escaped.bconf
@@ -0,0 +1,3 @@
+key1 = "A\B\C"
+key2 = '\'\''
+key3 = "\\"
diff --git a/tools/bootconfig/samples/good-array-space-comment.bconf b/tools/bootconfig/samples/good-array-space-comment.bconf
new file mode 100644
index 0000000..45b938d
--- /dev/null
+++ b/tools/bootconfig/samples/good-array-space-comment.bconf
@@ -0,0 +1,4 @@
+key =	# comment
+	"value1",	  # comment1
+	"value2"	 , # comment2
+	"value3"
diff --git a/tools/bootconfig/samples/good-comment-after-value.bconf b/tools/bootconfig/samples/good-comment-after-value.bconf
new file mode 100644
index 0000000..0d92a85
--- /dev/null
+++ b/tools/bootconfig/samples/good-comment-after-value.bconf
@@ -0,0 +1 @@
+key = "value"  # comment
diff --git a/tools/bootconfig/samples/good-printables.bconf b/tools/bootconfig/samples/good-printables.bconf
new file mode 100644
index 0000000..ebb985a
--- /dev/null
+++ b/tools/bootconfig/samples/good-printables.bconf
@@ -0,0 +1,2 @@
+key = "	
+ !#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
diff --git a/tools/bootconfig/samples/good-simple.bconf b/tools/bootconfig/samples/good-simple.bconf
new file mode 100644
index 0000000..37dd6d2
--- /dev/null
+++ b/tools/bootconfig/samples/good-simple.bconf
@@ -0,0 +1,11 @@
+# A good simple bootconfig
+
+key.word1 = 1
+key.word2=2
+key.word3 = 3;
+
+key {
+word4 = 4 }
+
+key { word5 = 5; word6 = 6 }
+
diff --git a/tools/bootconfig/samples/good-single.bconf b/tools/bootconfig/samples/good-single.bconf
new file mode 100644
index 0000000..98e55ad
--- /dev/null
+++ b/tools/bootconfig/samples/good-single.bconf
@@ -0,0 +1,4 @@
+# single key style
+key = 1
+key2 = 2
+key3 = "alpha", "beta"
diff --git a/tools/bootconfig/samples/good-space-after-value.bconf b/tools/bootconfig/samples/good-space-after-value.bconf
new file mode 100644
index 0000000..56c15cb
--- /dev/null
+++ b/tools/bootconfig/samples/good-space-after-value.bconf
@@ -0,0 +1 @@
+key = "value"   
diff --git a/tools/bootconfig/samples/good-tree.bconf b/tools/bootconfig/samples/good-tree.bconf
new file mode 100644
index 0000000..f2ddefc
--- /dev/null
+++ b/tools/bootconfig/samples/good-tree.bconf
@@ -0,0 +1,12 @@
+key {
+  word {
+    tree {
+      value = "0"}
+  }
+  word2 {
+    tree {
+      value = 1,2 }
+  }
+}
+other.tree {
+  value = 2; value2 = 3;}
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
new file mode 100755
index 0000000..87725e8
--- /dev/null
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -0,0 +1,105 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+echo "Boot config test script"
+
+BOOTCONF=./bootconfig
+INITRD=`mktemp initrd-XXXX`
+TEMPCONF=`mktemp temp-XXXX.bconf`
+NG=0
+
+cleanup() {
+  rm -f $INITRD $TEMPCONF
+  exit $NG
+}
+
+trap cleanup EXIT TERM
+
+NO=1
+
+xpass() { # pass test command
+  echo "test case $NO ($3)... "
+  if ! ($@ && echo "\t\t[OK]"); then
+     echo "\t\t[NG]"; NG=$((NG + 1))
+  fi
+  NO=$((NO + 1))
+}
+
+xfail() { # fail test command
+  echo "test case $NO ($3)... "
+  if ! (! $@ && echo "\t\t[OK]"); then
+     echo "\t\t[NG]"; NG=$((NG + 1))
+  fi
+  NO=$((NO + 1))
+}
+
+echo "Basic command test"
+xpass $BOOTCONF $INITRD
+
+echo "Delete command should success without bootconfig"
+xpass $BOOTCONF -d $INITRD
+
+dd if=/dev/zero of=$INITRD bs=4096 count=1
+echo "key = value;" > $TEMPCONF
+bconf_size=$(stat -c %s $TEMPCONF)
+initrd_size=$(stat -c %s $INITRD)
+
+echo "Apply command test"
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+new_size=$(stat -c %s $INITRD)
+
+echo "File size check"
+xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9)
+
+echo "Apply command repeat test"
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "File size check"
+xpass test $new_size -eq $(stat -c %s $INITRD)
+
+echo "Delete command check"
+xpass $BOOTCONF -d $INITRD
+
+echo "File size check"
+new_size=$(stat -c %s $INITRD)
+xpass test $new_size -eq $initrd_size
+
+echo "Max node number check"
+
+echo -n > $TEMPCONF
+for i in `seq 1 1024` ; do
+   echo "node$i" >> $TEMPCONF
+done
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "badnode" >> $TEMPCONF
+xfail $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "Max filesize check"
+
+# Max size is 32767 (including terminal byte)
+echo -n "data = \"" > $TEMPCONF
+dd if=/dev/urandom bs=768 count=32 | base64 -w0 >> $TEMPCONF
+echo "\"" >> $TEMPCONF
+xfail $BOOTCONF -a $TEMPCONF $INITRD
+
+truncate -s 32764 $TEMPCONF
+echo "\"" >> $TEMPCONF	# add 2 bytes + terminal ('\"\n\0')
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "=== expected failure cases ==="
+for i in samples/bad-* ; do
+  xfail $BOOTCONF -a $i $INITRD
+done
+
+echo "=== expected success cases ==="
+for i in samples/good-* ; do
+  xpass $BOOTCONF -a $i $INITRD
+done
+
+echo
+if [ $NG -eq 0 ]; then
+	echo "All tests passed"
+else
+	echo "$NG tests failed"
+fi
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 446ba89..941873d 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -580,7 +580,7 @@ probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
 	res = bpf_probe_large_insn_limit(ifindex);
 	print_bool_feature("have_large_insn_limit",
 			   "Large program size limit",
-			   "HAVE_LARGE_INSN_LIMIT",
+			   "LARGE_INSN_LIMIT",
 			   res, define_prefix);
 }
 
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index a3521de..b352ab0 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -536,7 +536,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 		buf = (unsigned char *)(info->jited_prog_insns);
 		member_len = info->jited_prog_len;
 	} else {	/* DUMP_XLATED */
-		if (info->xlated_prog_len == 0) {
+		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
 			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
 			return -1;
 		}
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index 0c02135..39edd68 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -41,7 +41,7 @@
 
 $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
 	$(call msg,BINARY,$@)
-	$(Q)$(CC) $(CFLAGS) -lelf -lz $^ -o $@
+	$(Q)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
 
 $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h	      \
 			$(OUTPUT)/runqslower.bpf.o
@@ -75,7 +75,7 @@
 	fi
 	$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
 
-$(BPFOBJ): | $(OUTPUT)
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
 	$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC)			       \
 		    OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
 
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index aea2d91..16d629b 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -80,6 +80,8 @@ static int hv_start_fcopy(struct hv_start_fcopy *smsg)
 
 	error = 0;
 done:
+	if (error)
+		target_fname[0] = '\0';
 	return error;
 }
 
@@ -108,15 +110,29 @@ static int hv_copy_data(struct hv_do_fcopy *cpmsg)
 	return ret;
 }
 
+/*
+ * Reset target_fname to "" in the two below functions for hibernation: if
+ * the fcopy operation is aborted by hibernation, the daemon should remove the
+ * partially-copied file; to achieve this, the hv_utils driver always fakes a
+ * CANCEL_FCOPY message upon suspend, and later when the VM resumes back,
+ * the daemon calls hv_copy_cancel() to remove the file; if a file is copied
+ * successfully before suspend, hv_copy_finished() must reset target_fname to
+ * avoid that the file can be incorrectly removed upon resume, since the faked
+ * CANCEL_FCOPY message is spurious in this case.
+ */
 static int hv_copy_finished(void)
 {
 	close(target_fd);
+	target_fname[0] = '\0';
 	return 0;
 }
 static int hv_copy_cancel(void)
 {
 	close(target_fd);
-	unlink(target_fname);
+	if (strlen(target_fname) > 0) {
+		unlink(target_fname);
+		target_fname[0] = '\0';
+	}
 	return 0;
 
 }
@@ -131,7 +147,7 @@ void print_usage(char *argv[])
 
 int main(int argc, char *argv[])
 {
-	int fcopy_fd;
+	int fcopy_fd = -1;
 	int error;
 	int daemonize = 1, long_index = 0, opt;
 	int version = FCOPY_CURRENT_VERSION;
@@ -141,7 +157,7 @@ int main(int argc, char *argv[])
 		struct hv_do_fcopy copy;
 		__u32 kernel_modver;
 	} buffer = { };
-	int in_handshake = 1;
+	int in_handshake;
 
 	static struct option long_options[] = {
 		{"help",	no_argument,	   0,  'h' },
@@ -170,6 +186,12 @@ int main(int argc, char *argv[])
 	openlog("HV_FCOPY", 0, LOG_USER);
 	syslog(LOG_INFO, "starting; pid is:%d", getpid());
 
+reopen_fcopy_fd:
+	if (fcopy_fd != -1)
+		close(fcopy_fd);
+	/* Remove any possible partially-copied file on error */
+	hv_copy_cancel();
+	in_handshake = 1;
 	fcopy_fd = open("/dev/vmbus/hv_fcopy", O_RDWR);
 
 	if (fcopy_fd < 0) {
@@ -196,7 +218,7 @@ int main(int argc, char *argv[])
 		len = pread(fcopy_fd, &buffer, sizeof(buffer), 0);
 		if (len < 0) {
 			syslog(LOG_ERR, "pread failed: %s", strerror(errno));
-			exit(EXIT_FAILURE);
+			goto reopen_fcopy_fd;
 		}
 
 		if (in_handshake) {
@@ -231,9 +253,14 @@ int main(int argc, char *argv[])
 
 		}
 
+		/*
+		 * pwrite() may return an error due to the faked CANCEL_FCOPY
+		 * message upon hibernation. Ignore the error by resetting the
+		 * dev file, i.e. closing and re-opening it.
+		 */
 		if (pwrite(fcopy_fd, &error, sizeof(int), 0) != sizeof(int)) {
 			syslog(LOG_ERR, "pwrite failed: %s", strerror(errno));
-			exit(EXIT_FAILURE);
+			goto reopen_fcopy_fd;
 		}
 	}
 }
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index e9ef4ca..ee9c1bb 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -76,7 +76,7 @@ enum {
 	DNS
 };
 
-static int in_hand_shake = 1;
+static int in_hand_shake;
 
 static char *os_name = "";
 static char *os_major = "";
@@ -1360,7 +1360,7 @@ void print_usage(char *argv[])
 
 int main(int argc, char *argv[])
 {
-	int kvp_fd, len;
+	int kvp_fd = -1, len;
 	int error;
 	struct pollfd pfd;
 	char    *p;
@@ -1400,14 +1400,6 @@ int main(int argc, char *argv[])
 	openlog("KVP", 0, LOG_USER);
 	syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
 
-	kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC);
-
-	if (kvp_fd < 0) {
-		syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s",
-			errno, strerror(errno));
-		exit(EXIT_FAILURE);
-	}
-
 	/*
 	 * Retrieve OS release information.
 	 */
@@ -1423,6 +1415,18 @@ int main(int argc, char *argv[])
 		exit(EXIT_FAILURE);
 	}
 
+reopen_kvp_fd:
+	if (kvp_fd != -1)
+		close(kvp_fd);
+	in_hand_shake = 1;
+	kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC);
+
+	if (kvp_fd < 0) {
+		syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s",
+		       errno, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
 	/*
 	 * Register ourselves with the kernel.
 	 */
@@ -1456,9 +1460,7 @@ int main(int argc, char *argv[])
 		if (len != sizeof(struct hv_kvp_msg)) {
 			syslog(LOG_ERR, "read failed; error:%d %s",
 			       errno, strerror(errno));
-
-			close(kvp_fd);
-			return EXIT_FAILURE;
+			goto reopen_kvp_fd;
 		}
 
 		/*
@@ -1617,13 +1619,17 @@ int main(int argc, char *argv[])
 			break;
 		}
 
-		/* Send the value back to the kernel. */
+		/*
+		 * Send the value back to the kernel. Note: the write() may
+		 * return an error due to hibernation; we can ignore the error
+		 * by resetting the dev file, i.e. closing and re-opening it.
+		 */
 kvp_done:
 		len = write(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg));
 		if (len != sizeof(struct hv_kvp_msg)) {
 			syslog(LOG_ERR, "write failed; error: %d %s", errno,
 			       strerror(errno));
-			exit(EXIT_FAILURE);
+			goto reopen_kvp_fd;
 		}
 	}
 
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 92902a8..dd11187 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -28,6 +28,8 @@
 #include <stdbool.h>
 #include <dirent.h>
 
+static bool fs_frozen;
+
 /* Don't use syslog() in the function since that can cause write to disk */
 static int vss_do_freeze(char *dir, unsigned int cmd)
 {
@@ -155,18 +157,27 @@ static int vss_operate(int operation)
 			continue;
 		}
 		error |= vss_do_freeze(ent->mnt_dir, cmd);
-		if (error && operation == VSS_OP_FREEZE)
-			goto err;
+		if (operation == VSS_OP_FREEZE) {
+			if (error)
+				goto err;
+			fs_frozen = true;
+		}
 	}
 
 	endmntent(mounts);
 
 	if (root_seen) {
 		error |= vss_do_freeze("/", cmd);
-		if (error && operation == VSS_OP_FREEZE)
-			goto err;
+		if (operation == VSS_OP_FREEZE) {
+			if (error)
+				goto err;
+			fs_frozen = true;
+		}
 	}
 
+	if (operation == VSS_OP_THAW && !error)
+		fs_frozen = false;
+
 	goto out;
 err:
 	save_errno = errno;
@@ -175,6 +186,7 @@ static int vss_operate(int operation)
 		endmntent(mounts);
 	}
 	vss_operate(VSS_OP_THAW);
+	fs_frozen = false;
 	/* Call syslog after we thaw all filesystems */
 	if (ent)
 		syslog(LOG_ERR, "FREEZE of %s failed; error:%d %s",
@@ -196,13 +208,13 @@ void print_usage(char *argv[])
 
 int main(int argc, char *argv[])
 {
-	int vss_fd, len;
+	int vss_fd = -1, len;
 	int error;
 	struct pollfd pfd;
 	int	op;
 	struct hv_vss_msg vss_msg[1];
 	int daemonize = 1, long_index = 0, opt;
-	int in_handshake = 1;
+	int in_handshake;
 	__u32 kernel_modver;
 
 	static struct option long_options[] = {
@@ -232,6 +244,18 @@ int main(int argc, char *argv[])
 	openlog("Hyper-V VSS", 0, LOG_USER);
 	syslog(LOG_INFO, "VSS starting; pid is:%d", getpid());
 
+reopen_vss_fd:
+	if (vss_fd != -1)
+		close(vss_fd);
+	if (fs_frozen) {
+		if (vss_operate(VSS_OP_THAW) || fs_frozen) {
+			syslog(LOG_ERR, "failed to thaw file system: err=%d",
+			       errno);
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	in_handshake = 1;
 	vss_fd = open("/dev/vmbus/hv_vss", O_RDWR);
 	if (vss_fd < 0) {
 		syslog(LOG_ERR, "open /dev/vmbus/hv_vss failed; error: %d %s",
@@ -284,8 +308,7 @@ int main(int argc, char *argv[])
 		if (len != sizeof(struct hv_vss_msg)) {
 			syslog(LOG_ERR, "read failed; error:%d %s",
 			       errno, strerror(errno));
-			close(vss_fd);
-			return EXIT_FAILURE;
+			goto reopen_vss_fd;
 		}
 
 		op = vss_msg->vss_hdr.operation;
@@ -312,14 +335,18 @@ int main(int argc, char *argv[])
 		default:
 			syslog(LOG_ERR, "Illegal op:%d\n", op);
 		}
+
+		/*
+		 * The write() may return an error due to the faked VSS_OP_THAW
+		 * message upon hibernation. Ignore the error by resetting the
+		 * dev file, i.e. closing and re-opening it.
+		 */
 		vss_msg->error = error;
 		len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
 		if (len != sizeof(struct hv_vss_msg)) {
 			syslog(LOG_ERR, "write failed; error: %d %s", errno,
 			       strerror(errno));
-
-			if (op == VSS_OP_FREEZE)
-				vss_operate(VSS_OP_THAW);
+			goto reopen_vss_fd;
 		}
 	}
 
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 140c836..5fca38f 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -14,10 +14,11 @@
 #include <linux/bits.h>
 #include <linux/compiler.h>
 
-#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#define BITS_TO_U64(nr)		DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
-#define BITS_TO_U32(nr)		DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
-#define BITS_TO_BYTES(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE)
+#define BITS_PER_TYPE(type)	(sizeof(type) * BITS_PER_BYTE)
+#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr)		DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr)		DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
+#define BITS_TO_BYTES(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
 
 extern unsigned int __sw_hweight8(unsigned int w);
 extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index ad1b9e6..4cf9311 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -270,6 +270,7 @@
     def __init__(self, exit_reasons):
         self.sc_perf_evt_open = 298
         self.ioctl_numbers = IOCTL_NUMBERS
+        self.exit_reason_field = 'exit_reason'
         self.exit_reasons = exit_reasons
 
     def debugfs_is_child(self, field):
@@ -289,6 +290,7 @@
         # numbers depend on the wordsize.
         char_ptr_size = ctypes.sizeof(ctypes.c_char_p)
         self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16
+        self.exit_reason_field = 'exit_nr'
         self.exit_reasons = {}
 
     def debugfs_is_child(self, field):
@@ -300,6 +302,7 @@
     def __init__(self):
         self.sc_perf_evt_open = 241
         self.ioctl_numbers = IOCTL_NUMBERS
+        self.exit_reason_field = 'esr_ec'
         self.exit_reasons = AARCH64_EXIT_REASONS
 
     def debugfs_is_child(self, field):
@@ -311,6 +314,7 @@
     def __init__(self):
         self.sc_perf_evt_open = 331
         self.ioctl_numbers = IOCTL_NUMBERS
+        self.exit_reason_field = None
         self.exit_reasons = None
 
     def debugfs_is_child(self, field):
@@ -541,8 +545,8 @@
         """
         filters = {}
         filters['kvm_userspace_exit'] = ('reason', USERSPACE_EXIT_REASONS)
-        if ARCH.exit_reasons:
-            filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons)
+        if ARCH.exit_reason_field and ARCH.exit_reasons:
+            filters['kvm_exit'] = (ARCH.exit_reason_field, ARCH.exit_reasons)
         return filters
 
     def _get_available_fields(self):
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index d2a19b0b..ee08aef 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -2,10 +2,6 @@
 include ../scripts/Makefile.include
 include ../scripts/Makefile.arch
 
-ifeq ($(ARCH),x86_64)
-ARCH := x86
-endif
-
 # always use the host compiler
 HOSTAR	?= ar
 HOSTCC	?= gcc
@@ -33,7 +29,7 @@
 
 INCLUDES := -I$(srctree)/tools/include \
 	    -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
-	    -I$(srctree)/tools/arch/$(ARCH)/include
+	    -I$(srctree)/tools/arch/$(SRCARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
 CFLAGS   := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
 LDFLAGS  += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
index 9bd04bb..2a1261b 100755
--- a/tools/objtool/sync-check.sh
+++ b/tools/objtool/sync-check.sh
@@ -48,5 +48,3 @@
 check arch/x86/include/asm/insn.h     '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
 check arch/x86/lib/inat.c             '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
 check arch/x86/lib/insn.c             '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
-
-cd -
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
index 2f55d4d..6e04304 100644
--- a/tools/power/cpupower/lib/cpufreq.c
+++ b/tools/power/cpupower/lib/cpufreq.c
@@ -332,21 +332,18 @@ void cpufreq_put_available_governors(struct cpufreq_available_governors *any)
 }
 
 
-struct cpufreq_frequencies
-*cpufreq_get_frequencies(const char *type, unsigned int cpu)
+struct cpufreq_available_frequencies
+*cpufreq_get_available_frequencies(unsigned int cpu)
 {
-	struct cpufreq_frequencies *first = NULL;
-	struct cpufreq_frequencies *current = NULL;
+	struct cpufreq_available_frequencies *first = NULL;
+	struct cpufreq_available_frequencies *current = NULL;
 	char one_value[SYSFS_PATH_MAX];
 	char linebuf[MAX_LINE_LEN];
-	char fname[MAX_LINE_LEN];
 	unsigned int pos, i;
 	unsigned int len;
 
-	snprintf(fname, MAX_LINE_LEN, "scaling_%s_frequencies", type);
-
-	len = sysfs_cpufreq_read_file(cpu, fname,
-				linebuf, sizeof(linebuf));
+	len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies",
+				      linebuf, sizeof(linebuf));
 	if (len == 0)
 		return NULL;
 
@@ -391,9 +388,65 @@ struct cpufreq_frequencies
 	return NULL;
 }
 
-void cpufreq_put_frequencies(struct cpufreq_frequencies *any)
+struct cpufreq_available_frequencies
+*cpufreq_get_boost_frequencies(unsigned int cpu)
 {
-	struct cpufreq_frequencies *tmp, *next;
+	struct cpufreq_available_frequencies *first = NULL;
+	struct cpufreq_available_frequencies *current = NULL;
+	char one_value[SYSFS_PATH_MAX];
+	char linebuf[MAX_LINE_LEN];
+	unsigned int pos, i;
+	unsigned int len;
+
+	len = sysfs_cpufreq_read_file(cpu, "scaling_boost_frequencies",
+				      linebuf, sizeof(linebuf));
+	if (len == 0)
+		return NULL;
+
+	pos = 0;
+	for (i = 0; i < len; i++) {
+		if (linebuf[i] == ' ' || linebuf[i] == '\n') {
+			if (i - pos < 2)
+				continue;
+			if (i - pos >= SYSFS_PATH_MAX)
+				goto error_out;
+			if (current) {
+				current->next = malloc(sizeof(*current));
+				if (!current->next)
+					goto error_out;
+				current = current->next;
+			} else {
+				first = malloc(sizeof(*first));
+				if (!first)
+					goto error_out;
+				current = first;
+			}
+			current->first = first;
+			current->next = NULL;
+
+			memcpy(one_value, linebuf + pos, i - pos);
+			one_value[i - pos] = '\0';
+			if (sscanf(one_value, "%lu", &current->frequency) != 1)
+				goto error_out;
+
+			pos = i + 1;
+		}
+	}
+
+	return first;
+
+ error_out:
+	while (first) {
+		current = first->next;
+		free(first);
+		first = current;
+	}
+	return NULL;
+}
+
+void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies *any)
+{
+	struct cpufreq_available_frequencies *tmp, *next;
 
 	if (!any)
 		return;
@@ -406,6 +459,11 @@ void cpufreq_put_frequencies(struct cpufreq_frequencies *any)
 	}
 }
 
+void cpufreq_put_boost_frequencies(struct cpufreq_available_frequencies *any)
+{
+	cpufreq_put_available_frequencies(any);
+}
+
 static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu,
 							const char *file)
 {
diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h
index a55f0d1..95f4fd9 100644
--- a/tools/power/cpupower/lib/cpufreq.h
+++ b/tools/power/cpupower/lib/cpufreq.h
@@ -20,10 +20,10 @@ struct cpufreq_available_governors {
 	struct cpufreq_available_governors *first;
 };
 
-struct cpufreq_frequencies {
+struct cpufreq_available_frequencies {
 	unsigned long frequency;
-	struct cpufreq_frequencies *next;
-	struct cpufreq_frequencies *first;
+	struct cpufreq_available_frequencies *next;
+	struct cpufreq_available_frequencies *first;
 };
 
 
@@ -124,11 +124,17 @@ void cpufreq_put_available_governors(
  * cpufreq_put_frequencies after use.
  */
 
-struct cpufreq_frequencies
-*cpufreq_get_frequencies(const char *type, unsigned int cpu);
+struct cpufreq_available_frequencies
+*cpufreq_get_available_frequencies(unsigned int cpu);
 
-void cpufreq_put_frequencies(
-		struct cpufreq_frequencies *first);
+void cpufreq_put_available_frequencies(
+		struct cpufreq_available_frequencies *first);
+
+struct cpufreq_available_frequencies
+*cpufreq_get_boost_frequencies(unsigned int cpu);
+
+void cpufreq_put_boost_frequencies(
+		struct cpufreq_available_frequencies *first);
 
 
 /* determine affected CPUs
diff --git a/tools/power/cpupower/man/cpupower.1 b/tools/power/cpupower/man/cpupower.1
index baf741d..a5e4523 100644
--- a/tools/power/cpupower/man/cpupower.1
+++ b/tools/power/cpupower/man/cpupower.1
@@ -62,9 +62,9 @@
 Print the package name and version number.
 
 .SH "SEE ALSO"
-cpupower-set(1), cpupower-info(1), cpupower-idle(1),
-cpupower-frequency-set(1), cpupower-frequency-info(1), cpupower-monitor(1),
-powertop(1)
+cpupower-set(1), cpupower-info(1), cpupower-idle-info(1),
+cpupower-idle-set(1), cpupower-frequency-set(1), cpupower-frequency-info(1),
+cpupower-monitor(1), powertop(1)
 .PP
 .SH AUTHORS
 .nf
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index e63cf55..6efc0f6 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -244,14 +244,14 @@ static int get_boost_mode_x86(unsigned int cpu)
 
 static int get_boost_mode(unsigned int cpu)
 {
-	struct cpufreq_frequencies *freqs;
+	struct cpufreq_available_frequencies *freqs;
 
 	if (cpupower_cpu_info.vendor == X86_VENDOR_AMD ||
 	    cpupower_cpu_info.vendor == X86_VENDOR_HYGON ||
 	    cpupower_cpu_info.vendor == X86_VENDOR_INTEL)
 		return get_boost_mode_x86(cpu);
 
-	freqs = cpufreq_get_frequencies("boost", cpu);
+	freqs = cpufreq_get_boost_frequencies(cpu);
 	if (freqs) {
 		printf(_("  boost frequency steps: "));
 		while (freqs->next) {
@@ -261,7 +261,7 @@ static int get_boost_mode(unsigned int cpu)
 		}
 		print_speed(freqs->frequency);
 		printf("\n");
-		cpufreq_put_frequencies(freqs);
+		cpufreq_put_available_frequencies(freqs);
 	}
 
 	return 0;
@@ -475,7 +475,7 @@ static int get_latency(unsigned int cpu, unsigned int human)
 
 static void debug_output_one(unsigned int cpu)
 {
-	struct cpufreq_frequencies *freqs;
+	struct cpufreq_available_frequencies *freqs;
 
 	get_driver(cpu);
 	get_related_cpus(cpu);
@@ -483,7 +483,7 @@ static void debug_output_one(unsigned int cpu)
 	get_latency(cpu, 1);
 	get_hardware_limits(cpu, 1);
 
-	freqs = cpufreq_get_frequencies("available", cpu);
+	freqs = cpufreq_get_available_frequencies(cpu);
 	if (freqs) {
 		printf(_("  available frequency steps:  "));
 		while (freqs->next) {
@@ -493,7 +493,7 @@ static void debug_output_one(unsigned int cpu)
 		}
 		print_speed(freqs->frequency);
 		printf("\n");
-		cpufreq_put_frequencies(freqs);
+		cpufreq_put_available_frequencies(freqs);
 	}
 
 	get_available_governors(cpu);
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 2d6d342..256199c 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -11,11 +11,11 @@
 and generates performance plots.
 
 Prerequisites:
-    Python version 2.7.x
+    Python version 2.7.x or higher
     gnuplot 5.0 or higher
-    gnuplot-py 1.8
+    gnuplot-py 1.8 or higher
     (Most of the distributions have these required packages. They may be called
-     gnuplot-py, phython-gnuplot. )
+     gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
 
     HWP (Hardware P-States are disabled)
     Kernel config for Linux trace is enabled
@@ -104,7 +104,7 @@
     if os.path.exists(file_name):
         output_png = "cpu%03d_perf_busy_vs_samples.png" % cpu_index
         g_plot = common_all_gnuplot_settings(output_png)
-        g_plot('set yrange [0:40]')
+#   autoscale this one, no set y1 range
         g_plot('set y2range [0:200]')
         g_plot('set y2tics 0, 10')
         g_plot('set title "{} : cpu perf busy vs. sample : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
@@ -125,7 +125,7 @@
     if os.path.exists(file_name):
         output_png = "cpu%03d_perf_busy.png" % cpu_index
         g_plot = common_all_gnuplot_settings(output_png)
-        g_plot('set yrange [0:40]')
+#   autoscale this one, no set y1 range
         g_plot('set y2range [0:200]')
         g_plot('set y2tics 0, 10')
         g_plot('set title "{} : perf busy : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
@@ -144,9 +144,7 @@
     if os.path.exists(file_name):
         output_png = "cpu%03d_durations.png" % cpu_index
         g_plot = common_all_gnuplot_settings(output_png)
-#       Should autoscale be used here? Should seconds be used here?
-        g_plot('set yrange [0:5000]')
-        g_plot('set ytics 0, 500')
+#       autoscale this one, no set y range
         g_plot('set title "{} : durations : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
         g_plot('set ylabel "Timer Duration (MilliSeconds)"')
 #       override common
@@ -176,12 +174,12 @@
     if os.path.exists('cpu.csv'):
         output_png = 'all_cpu_pstates_vs_samples.png'
         g_plot = common_all_gnuplot_settings(output_png)
-        g_plot('set yrange [0:40]')
+#       autoscale this one, no set y range
 #       override common
         g_plot('set xlabel "Samples"')
         g_plot('set ylabel "P-State"')
         g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
-        title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+        title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
         plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
         g_plot('title_list = "{}"'.format(title_list))
         g_plot(plot_str)
@@ -191,14 +189,14 @@
 
     output_png = 'all_cpu_pstates.png'
     g_plot = common_all_gnuplot_settings(output_png)
-    g_plot('set yrange [0:40]')
+#   autoscale this one, no set y range
     g_plot('set ylabel "P-State"')
     g_plot('set title "{} : cpu pstates : {:%F %H:%M}"'.format(testname, datetime.now()))
 
 #    the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
 #    plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
 #
-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
     g_plot('title_list = "{}"'.format(title_list))
     g_plot(plot_str)
@@ -212,7 +210,7 @@
     g_plot('set ylabel "CPU load (percent)"')
     g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
 
-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
     g_plot('title_list = "{}"'.format(title_list))
     g_plot(plot_str)
@@ -222,11 +220,11 @@
 
     output_png = 'all_cpu_frequencies.png'
     g_plot = common_all_gnuplot_settings(output_png)
-    g_plot('set yrange [0:4]')
+#   autoscale this one, no set y range
     g_plot('set ylabel "CPU Frequency (GHz)"')
     g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
 
-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
     g_plot('title_list = "{}"'.format(title_list))
     g_plot(plot_str)
@@ -236,12 +234,12 @@
 
     output_png = 'all_cpu_durations.png'
     g_plot = common_all_gnuplot_settings(output_png)
-    g_plot('set yrange [0:5000]')
+#   autoscale this one, no set y range
     g_plot('set ytics 0, 500')
     g_plot('set ylabel "Timer Duration (MilliSeconds)"')
     g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
 
-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
     g_plot('title_list = "{}"'.format(title_list))
     g_plot(plot_str)
@@ -255,7 +253,7 @@
     g_plot('set ylabel "Scaled Busy (Unitless)"')
     g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
 
-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
     g_plot('title_list = "{}"'.format(title_list))
     g_plot(plot_str)
@@ -269,7 +267,7 @@
     g_plot('set ylabel "CPU IO Boost (percent)"')
     g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
 
-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
     g_plot('title_list = "{}"'.format(title_list))
     g_plot(plot_str)
@@ -283,7 +281,7 @@
     g_plot('set ylabel "TSC Frequency (GHz)"')
     g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
 
-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
     g_plot('title_list = "{}"'.format(title_list))
     g_plot(plot_str)
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 35027b1..63430e2 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -26,6 +26,7 @@
 TARGETS += kvm
 TARGETS += lib
 TARGETS += livepatch
+TARGETS += lkdtm
 TARGETS += membarrier
 TARGETS += memfd
 TARGETS += memory-hotplug
@@ -41,6 +42,7 @@
 TARGETS += proc
 TARGETS += pstore
 TARGETS += ptrace
+TARGETS += openat2
 TARGETS += rseq
 TARGETS += rtc
 TARGETS += seccomp
@@ -145,11 +147,13 @@
 endif
 
 all: khdr
-	@for TARGET in $(TARGETS); do		\
-		BUILD_TARGET=$$BUILD/$$TARGET;	\
-		mkdir $$BUILD_TARGET  -p;	\
-		$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET;\
-	done;
+	@ret=1;							\
+	for TARGET in $(TARGETS); do				\
+		BUILD_TARGET=$$BUILD/$$TARGET;			\
+		mkdir $$BUILD_TARGET  -p;			\
+		$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET;	\
+		ret=$$((ret * $$?));				\
+	done; exit $$ret;
 
 run_tests: all
 	@for TARGET in $(TARGETS); do \
@@ -198,10 +202,12 @@
 	install -m 744 kselftest/module.sh $(INSTALL_PATH)/kselftest/
 	install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
 	install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
-	@for TARGET in $(TARGETS); do \
+	@ret=1;	\
+	for TARGET in $(TARGETS); do \
 		BUILD_TARGET=$$BUILD/$$TARGET;	\
 		$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
-	done;
+		ret=$$((ret * $$?));		\
+	done; exit $$ret;
 
 	@# Ask all targets to emit their test scripts
 	echo "#!/bin/sh" > $(ALL_SCRIPT)
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
new file mode 100644
index 0000000..07f5b46
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+
+#include "test_progs.h"
+
+static int connected_socket_v4(void)
+{
+	struct sockaddr_in addr = {
+		.sin_family = AF_INET,
+		.sin_port = htons(80),
+		.sin_addr = { inet_addr("127.0.0.1") },
+	};
+	socklen_t len = sizeof(addr);
+	int s, repair, err;
+
+	s = socket(AF_INET, SOCK_STREAM, 0);
+	if (CHECK_FAIL(s == -1))
+		goto error;
+
+	repair = TCP_REPAIR_ON;
+	err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
+	if (CHECK_FAIL(err))
+		goto error;
+
+	err = connect(s, (struct sockaddr *)&addr, len);
+	if (CHECK_FAIL(err))
+		goto error;
+
+	repair = TCP_REPAIR_OFF_NO_WP;
+	err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
+	if (CHECK_FAIL(err))
+		goto error;
+
+	return s;
+error:
+	perror(__func__);
+	close(s);
+	return -1;
+}
+
+/* Create a map, populate it with one socket, and free the map. */
+static void test_sockmap_create_update_free(enum bpf_map_type map_type)
+{
+	const int zero = 0;
+	int s, map, err;
+
+	s = connected_socket_v4();
+	if (CHECK_FAIL(s == -1))
+		return;
+
+	map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
+	if (CHECK_FAIL(map == -1)) {
+		perror("bpf_create_map");
+		goto out;
+	}
+
+	err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
+	if (CHECK_FAIL(err)) {
+		perror("bpf_map_update");
+		goto out;
+	}
+
+out:
+	close(map);
+	close(s);
+}
+
+void test_sockmap_basic(void)
+{
+	if (test__start_subtest("sockmap create_update_free"))
+		test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
+	if (test__start_subtest("sockhash create_update_free"))
+		test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 1235f3d..1f6ccda 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -46,7 +46,7 @@ void test_trampoline_count(void)
 	const char *fentry_name = "fentry/__set_task_comm";
 	const char *fexit_name = "fexit/__set_task_comm";
 	const char *object = "test_trampoline_count.o";
-	struct inst inst[MAX_TRAMP_PROGS] = { 0 };
+	struct inst inst[MAX_TRAMP_PROGS] = {};
 	int err, i = 0, duration = 0;
 	struct bpf_object *obj;
 	struct bpf_link *link;
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index c5ca669..e19ce94 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -369,7 +369,7 @@ static void *dummy_thread_fn(void *arg)
 static int test_cgcore_proc_migration(const char *root)
 {
 	int ret = KSFT_FAIL;
-	int t, c_threads, n_threads = 13;
+	int t, c_threads = 0, n_threads = 13;
 	char *src = NULL, *dst = NULL;
 	pthread_t threads[n_threads];
 
diff --git a/tools/testing/selftests/dmabuf-heaps/Makefile b/tools/testing/selftests/dmabuf-heaps/Makefile
new file mode 100644
index 0000000..607c2ac
--- /dev/null
+++ b/tools/testing/selftests/dmabuf-heaps/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -static -O3 -Wl,-no-as-needed -Wall -I../../../../usr/include
+
+TEST_GEN_PROGS = dmabuf-heap
+
+include ../lib.mk
diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
new file mode 100644
index 0000000..cd5e1f6
--- /dev/null
+++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include <linux/dma-buf.h>
+#include <drm/drm.h>
+
+#include "../../../../include/uapi/linux/dma-heap.h"
+
+#define DEVPATH "/dev/dma_heap"
+
+static int check_vgem(int fd)
+{
+	drm_version_t version = { 0 };
+	char name[5];
+	int ret;
+
+	version.name_len = 4;
+	version.name = name;
+
+	ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
+	if (ret)
+		return 0;
+
+	return !strcmp(name, "vgem");
+}
+
+static int open_vgem(void)
+{
+	int i, fd;
+	const char *drmstr = "/dev/dri/card";
+
+	fd = -1;
+	for (i = 0; i < 16; i++) {
+		char name[80];
+
+		snprintf(name, 80, "%s%u", drmstr, i);
+
+		fd = open(name, O_RDWR);
+		if (fd < 0)
+			continue;
+
+		if (!check_vgem(fd)) {
+			close(fd);
+			fd = -1;
+			continue;
+		} else {
+			break;
+		}
+	}
+	return fd;
+}
+
+static int import_vgem_fd(int vgem_fd, int dma_buf_fd, uint32_t *handle)
+{
+	struct drm_prime_handle import_handle = {
+		.fd = dma_buf_fd,
+		.flags = 0,
+		.handle = 0,
+	 };
+	int ret;
+
+	ret = ioctl(vgem_fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &import_handle);
+	if (ret == 0)
+		*handle = import_handle.handle;
+	return ret;
+}
+
+static void close_handle(int vgem_fd, uint32_t handle)
+{
+	struct drm_gem_close close = {
+		.handle = handle,
+	};
+
+	ioctl(vgem_fd, DRM_IOCTL_GEM_CLOSE, &close);
+}
+
+static int dmabuf_heap_open(char *name)
+{
+	int ret, fd;
+	char buf[256];
+
+	ret = snprintf(buf, 256, "%s/%s", DEVPATH, name);
+	if (ret < 0) {
+		printf("snprintf failed!\n");
+		return ret;
+	}
+
+	fd = open(buf, O_RDWR);
+	if (fd < 0)
+		printf("open %s failed!\n", buf);
+	return fd;
+}
+
+static int dmabuf_heap_alloc_fdflags(int fd, size_t len, unsigned int fd_flags,
+				     unsigned int heap_flags, int *dmabuf_fd)
+{
+	struct dma_heap_allocation_data data = {
+		.len = len,
+		.fd = 0,
+		.fd_flags = fd_flags,
+		.heap_flags = heap_flags,
+	};
+	int ret;
+
+	if (!dmabuf_fd)
+		return -EINVAL;
+
+	ret = ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &data);
+	if (ret < 0)
+		return ret;
+	*dmabuf_fd = (int)data.fd;
+	return ret;
+}
+
+static int dmabuf_heap_alloc(int fd, size_t len, unsigned int flags,
+			     int *dmabuf_fd)
+{
+	return dmabuf_heap_alloc_fdflags(fd, len, O_RDWR | O_CLOEXEC, flags,
+					 dmabuf_fd);
+}
+
+static void dmabuf_sync(int fd, int start_stop)
+{
+	struct dma_buf_sync sync = {
+		.flags = start_stop | DMA_BUF_SYNC_RW,
+	};
+	int ret;
+
+	ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
+	if (ret)
+		printf("sync failed %d\n", errno);
+}
+
+#define ONE_MEG (1024 * 1024)
+
+static int test_alloc_and_import(char *heap_name)
+{
+	int heap_fd = -1, dmabuf_fd = -1, importer_fd = -1;
+	uint32_t handle = 0;
+	void *p = NULL;
+	int ret;
+
+	printf("Testing heap: %s\n", heap_name);
+
+	heap_fd = dmabuf_heap_open(heap_name);
+	if (heap_fd < 0)
+		return -1;
+
+	printf("Allocating 1 MEG\n");
+	ret = dmabuf_heap_alloc(heap_fd, ONE_MEG, 0, &dmabuf_fd);
+	if (ret) {
+		printf("Allocation Failed!\n");
+		ret = -1;
+		goto out;
+	}
+	/* mmap and write a simple pattern */
+	p = mmap(NULL,
+		 ONE_MEG,
+		 PROT_READ | PROT_WRITE,
+		 MAP_SHARED,
+		 dmabuf_fd,
+		 0);
+	if (p == MAP_FAILED) {
+		printf("mmap() failed: %m\n");
+		ret = -1;
+		goto out;
+	}
+	printf("mmap passed\n");
+
+	dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
+	memset(p, 1, ONE_MEG / 2);
+	memset((char *)p + ONE_MEG / 2, 0, ONE_MEG / 2);
+	dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_END);
+
+	importer_fd = open_vgem();
+	if (importer_fd < 0) {
+		ret = importer_fd;
+		printf("Failed to open vgem\n");
+		goto out;
+	}
+
+	ret = import_vgem_fd(importer_fd, dmabuf_fd, &handle);
+	if (ret < 0) {
+		printf("Failed to import buffer\n");
+		goto out;
+	}
+	printf("import passed\n");
+
+	dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
+	memset(p, 0xff, ONE_MEG);
+	dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_END);
+	printf("syncs passed\n");
+
+	close_handle(importer_fd, handle);
+	ret = 0;
+
+out:
+	if (p)
+		munmap(p, ONE_MEG);
+	if (importer_fd >= 0)
+		close(importer_fd);
+	if (dmabuf_fd >= 0)
+		close(dmabuf_fd);
+	if (heap_fd >= 0)
+		close(heap_fd);
+
+	return ret;
+}
+
+/* Test the ioctl version compatibility w/ a smaller structure then expected */
+static int dmabuf_heap_alloc_older(int fd, size_t len, unsigned int flags,
+				   int *dmabuf_fd)
+{
+	int ret;
+	unsigned int older_alloc_ioctl;
+	struct dma_heap_allocation_data_smaller {
+		__u64 len;
+		__u32 fd;
+		__u32 fd_flags;
+	} data = {
+		.len = len,
+		.fd = 0,
+		.fd_flags = O_RDWR | O_CLOEXEC,
+	};
+
+	older_alloc_ioctl = _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,
+				  struct dma_heap_allocation_data_smaller);
+	if (!dmabuf_fd)
+		return -EINVAL;
+
+	ret = ioctl(fd, older_alloc_ioctl, &data);
+	if (ret < 0)
+		return ret;
+	*dmabuf_fd = (int)data.fd;
+	return ret;
+}
+
+/* Test the ioctl version compatibility w/ a larger structure then expected */
+static int dmabuf_heap_alloc_newer(int fd, size_t len, unsigned int flags,
+				   int *dmabuf_fd)
+{
+	int ret;
+	unsigned int newer_alloc_ioctl;
+	struct dma_heap_allocation_data_bigger {
+		__u64 len;
+		__u32 fd;
+		__u32 fd_flags;
+		__u64 heap_flags;
+		__u64 garbage1;
+		__u64 garbage2;
+		__u64 garbage3;
+	} data = {
+		.len = len,
+		.fd = 0,
+		.fd_flags = O_RDWR | O_CLOEXEC,
+		.heap_flags = flags,
+		.garbage1 = 0xffffffff,
+		.garbage2 = 0x88888888,
+		.garbage3 = 0x11111111,
+	};
+
+	newer_alloc_ioctl = _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,
+				  struct dma_heap_allocation_data_bigger);
+	if (!dmabuf_fd)
+		return -EINVAL;
+
+	ret = ioctl(fd, newer_alloc_ioctl, &data);
+	if (ret < 0)
+		return ret;
+
+	*dmabuf_fd = (int)data.fd;
+	return ret;
+}
+
+static int test_alloc_compat(char *heap_name)
+{
+	int heap_fd = -1, dmabuf_fd = -1;
+	int ret;
+
+	heap_fd = dmabuf_heap_open(heap_name);
+	if (heap_fd < 0)
+		return -1;
+
+	printf("Testing (theoretical)older alloc compat\n");
+	ret = dmabuf_heap_alloc_older(heap_fd, ONE_MEG, 0, &dmabuf_fd);
+	if (ret) {
+		printf("Older compat allocation failed!\n");
+		ret = -1;
+		goto out;
+	}
+	close(dmabuf_fd);
+
+	printf("Testing (theoretical)newer alloc compat\n");
+	ret = dmabuf_heap_alloc_newer(heap_fd, ONE_MEG, 0, &dmabuf_fd);
+	if (ret) {
+		printf("Newer compat allocation failed!\n");
+		ret = -1;
+		goto out;
+	}
+	printf("Ioctl compatibility tests passed\n");
+out:
+	if (dmabuf_fd >= 0)
+		close(dmabuf_fd);
+	if (heap_fd >= 0)
+		close(heap_fd);
+
+	return ret;
+}
+
+static int test_alloc_errors(char *heap_name)
+{
+	int heap_fd = -1, dmabuf_fd = -1;
+	int ret;
+
+	heap_fd = dmabuf_heap_open(heap_name);
+	if (heap_fd < 0)
+		return -1;
+
+	printf("Testing expected error cases\n");
+	ret = dmabuf_heap_alloc(0, ONE_MEG, 0x111111, &dmabuf_fd);
+	if (!ret) {
+		printf("Did not see expected error (invalid fd)!\n");
+		ret = -1;
+		goto out;
+	}
+
+	ret = dmabuf_heap_alloc(heap_fd, ONE_MEG, 0x111111, &dmabuf_fd);
+	if (!ret) {
+		printf("Did not see expected error (invalid heap flags)!\n");
+		ret = -1;
+		goto out;
+	}
+
+	ret = dmabuf_heap_alloc_fdflags(heap_fd, ONE_MEG,
+					~(O_RDWR | O_CLOEXEC), 0, &dmabuf_fd);
+	if (!ret) {
+		printf("Did not see expected error (invalid fd flags)!\n");
+		ret = -1;
+		goto out;
+	}
+
+	printf("Expected error checking passed\n");
+out:
+	if (dmabuf_fd >= 0)
+		close(dmabuf_fd);
+	if (heap_fd >= 0)
+		close(heap_fd);
+
+	return ret;
+}
+
+int main(void)
+{
+	DIR *d;
+	struct dirent *dir;
+	int ret = -1;
+
+	d = opendir(DEVPATH);
+	if (!d) {
+		printf("No %s directory?\n", DEVPATH);
+		return -1;
+	}
+
+	while ((dir = readdir(d)) != NULL) {
+		if (!strncmp(dir->d_name, ".", 2))
+			continue;
+		if (!strncmp(dir->d_name, "..", 3))
+			continue;
+
+		ret = test_alloc_and_import(dir->d_name);
+		if (ret)
+			break;
+
+		ret = test_alloc_compat(dir->d_name);
+		if (ret)
+			break;
+
+		ret = test_alloc_errors(dir->d_name);
+		if (ret)
+			break;
+	}
+	closedir(d);
+
+	return ret;
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/fib.sh b/tools/testing/selftests/drivers/net/mlxsw/fib.sh
index 45115f8..eab79b9 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/fib.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/fib.sh
@@ -14,6 +14,7 @@
 	ipv4_plen
 	ipv4_replay
 	ipv4_flush
+	ipv4_local_replace
 	ipv6_add
 	ipv6_metric
 	ipv6_append_single
@@ -26,6 +27,7 @@
 	ipv6_delete_multipath
 	ipv6_replay_single
 	ipv6_replay_multipath
+	ipv6_local_replace
 "
 NUM_NETIFS=0
 source $lib_dir/lib.sh
@@ -89,6 +91,43 @@
 	fib_ipv4_flush_test "testns1"
 }
 
+ipv4_local_replace()
+{
+	local ns="testns1"
+
+	RET=0
+
+	ip -n $ns link add name dummy1 type dummy
+	ip -n $ns link set dev dummy1 up
+
+	ip -n $ns route add table local 192.0.2.1/32 dev dummy1
+	fib4_trap_check $ns "table local 192.0.2.1/32 dev dummy1" false
+	check_err $? "Local table route not in hardware when should"
+
+	ip -n $ns route add table main 192.0.2.1/32 dev dummy1
+	fib4_trap_check $ns "table main 192.0.2.1/32 dev dummy1" true
+	check_err $? "Main table route in hardware when should not"
+
+	fib4_trap_check $ns "table local 192.0.2.1/32 dev dummy1" false
+	check_err $? "Local table route was replaced when should not"
+
+	# Test that local routes can replace routes in main table.
+	ip -n $ns route add table main 192.0.2.2/32 dev dummy1
+	fib4_trap_check $ns "table main 192.0.2.2/32 dev dummy1" false
+	check_err $? "Main table route not in hardware when should"
+
+	ip -n $ns route add table local 192.0.2.2/32 dev dummy1
+	fib4_trap_check $ns "table local 192.0.2.2/32 dev dummy1" false
+	check_err $? "Local table route did not replace route in main table when should"
+
+	fib4_trap_check $ns "table main 192.0.2.2/32 dev dummy1" true
+	check_err $? "Main table route was not replaced when should"
+
+	log_test "IPv4 local table route replacement"
+
+	ip -n $ns link del dev dummy1
+}
+
 ipv6_add()
 {
 	fib_ipv6_add_test "testns1"
@@ -149,6 +188,43 @@
 	fib_ipv6_replay_multipath_test "testns1" "$DEVLINK_DEV"
 }
 
+ipv6_local_replace()
+{
+	local ns="testns1"
+
+	RET=0
+
+	ip -n $ns link add name dummy1 type dummy
+	ip -n $ns link set dev dummy1 up
+
+	ip -n $ns route add table local 2001:db8:1::1/128 dev dummy1
+	fib6_trap_check $ns "table local 2001:db8:1::1/128 dev dummy1" false
+	check_err $? "Local table route not in hardware when should"
+
+	ip -n $ns route add table main 2001:db8:1::1/128 dev dummy1
+	fib6_trap_check $ns "table main 2001:db8:1::1/128 dev dummy1" true
+	check_err $? "Main table route in hardware when should not"
+
+	fib6_trap_check $ns "table local 2001:db8:1::1/128 dev dummy1" false
+	check_err $? "Local table route was replaced when should not"
+
+	# Test that local routes can replace routes in main table.
+	ip -n $ns route add table main 2001:db8:1::2/128 dev dummy1
+	fib6_trap_check $ns "table main 2001:db8:1::2/128 dev dummy1" false
+	check_err $? "Main table route not in hardware when should"
+
+	ip -n $ns route add table local 2001:db8:1::2/128 dev dummy1
+	fib6_trap_check $ns "table local 2001:db8:1::2/128 dev dummy1" false
+	check_err $? "Local route route did not replace route in main table when should"
+
+	fib6_trap_check $ns "table main 2001:db8:1::2/128 dev dummy1" true
+	check_err $? "Main table route was not replaced when should"
+
+	log_test "IPv6 local table route replacement"
+
+	ip -n $ns link del dev dummy1
+}
+
 setup_prepare()
 {
 	ip netns add testns1
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 27a54a1..f4e92afa 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -30,7 +30,7 @@
 ftrace_filter_check 'schedule*' '^schedule.*$'
 
 # filter by *mid*end
-ftrace_filter_check '*aw*lock' '.*aw.*lock$'
+ftrace_filter_check '*pin*lock' '.*pin.*lock$'
 
 # filter by start*mid*
 ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc
new file mode 100644
index 0000000..d44087a
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test histogram parser errors
+
+if [ ! -f set_event -o ! -d events/kmem ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/kmem/kmalloc/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/kmem/kmalloc/hist ]; then
+    echo "hist trigger is not supported"
+    exit_unsupported
+fi
+
+[ -f error_log ] || exit_unsupported
+
+check_error() { # command-with-error-pos-by-^
+    ftrace_errlog_check 'hist:kmem:kmalloc' "$1" 'events/kmem/kmalloc/trigger'
+}
+
+check_error 'hist:keys=common_pid:vals=bytes_req:sort=common_pid,^junk'	# INVALID_SORT_FIELD
+check_error 'hist:keys=common_pid:vals=bytes_req:^sort='		# EMPTY_ASSIGNMENT
+check_error 'hist:keys=common_pid:vals=bytes_req:^sort=common_pid,'	# EMPTY_SORT_FIELD
+check_error 'hist:keys=common_pid:vals=bytes_req:sort=common_pid.^junk'	# INVALID_SORT_MODIFIER
+check_error 'hist:keys=common_pid:vals=bytes_req,bytes_alloc:^sort=common_pid,bytes_req,bytes_alloc'	# TOO_MANY_SORT_FIELDS
+
+exit 0
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
index a8d20cb..e84d901 100644
--- a/tools/testing/selftests/kselftest/runner.sh
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -91,7 +91,7 @@
 run_many()
 {
 	echo "TAP version 13"
-	DIR=$(basename "$PWD")
+	DIR="${PWD#${BASE_DIR}/}"
 	test_num=0
 	total=$(echo "$@" | wc -w)
 	echo "1..$total"
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 608fa83..67abc1d 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -36,6 +36,7 @@
 
 TEST_GEN_PROGS_s390x = s390x/memop
 TEST_GEN_PROGS_s390x += s390x/sync_regs_test
+TEST_GEN_PROGS_s390x += s390x/resets
 TEST_GEN_PROGS_s390x += dirty_log_test
 TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
 
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 29cccaf..ae0d14c 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -125,6 +125,12 @@ void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
 		    struct kvm_sregs *sregs);
 int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
 		    struct kvm_sregs *sregs);
+void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid,
+		  struct kvm_fpu *fpu);
+void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid,
+		  struct kvm_fpu *fpu);
+void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg);
+void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg);
 #ifdef __KVM_HAVE_VCPU_EVENTS
 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
 		     struct kvm_vcpu_events *events);
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index f52e0ba..3d27069 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -18,8 +18,8 @@
 /*
  * Definitions of Primary Processor-Based VM-Execution Controls.
  */
-#define CPU_BASED_VIRTUAL_INTR_PENDING		0x00000004
-#define CPU_BASED_USE_TSC_OFFSETING		0x00000008
+#define CPU_BASED_INTR_WINDOW_EXITING		0x00000004
+#define CPU_BASED_USE_TSC_OFFSETTING		0x00000008
 #define CPU_BASED_HLT_EXITING			0x00000080
 #define CPU_BASED_INVLPG_EXITING		0x00000200
 #define CPU_BASED_MWAIT_EXITING			0x00000400
@@ -30,7 +30,7 @@
 #define CPU_BASED_CR8_LOAD_EXITING		0x00080000
 #define CPU_BASED_CR8_STORE_EXITING		0x00100000
 #define CPU_BASED_TPR_SHADOW			0x00200000
-#define CPU_BASED_VIRTUAL_NMI_PENDING		0x00400000
+#define CPU_BASED_NMI_WINDOW_EXITING		0x00400000
 #define CPU_BASED_MOV_DR_EXITING		0x00800000
 #define CPU_BASED_UNCOND_IO_EXITING		0x01000000
 #define CPU_BASED_USE_IO_BITMAPS		0x02000000
@@ -103,7 +103,7 @@
 #define EXIT_REASON_EXCEPTION_NMI	0
 #define EXIT_REASON_EXTERNAL_INTERRUPT	1
 #define EXIT_REASON_TRIPLE_FAULT	2
-#define EXIT_REASON_PENDING_INTERRUPT	7
+#define EXIT_REASON_INTERRUPT_WINDOW	7
 #define EXIT_REASON_NMI_WINDOW		8
 #define EXIT_REASON_TASK_SWITCH		9
 #define EXIT_REASON_CPUID		10
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 41cf454..a6dd040 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1373,6 +1373,42 @@ int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
 	return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
 }
 
+void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
+{
+	int ret;
+
+	ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu);
+	TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)",
+		    ret, errno, strerror(errno));
+}
+
+void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
+{
+	int ret;
+
+	ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
+	TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)",
+		    ret, errno, strerror(errno));
+}
+
+void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
+{
+	int ret;
+
+	ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg);
+	TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)",
+		    ret, errno, strerror(errno));
+}
+
+void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
+{
+	int ret;
+
+	ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg);
+	TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)",
+		    ret, errno, strerror(errno));
+}
+
 /*
  * VCPU Ioctl
  *
diff --git a/tools/testing/selftests/kvm/s390x/resets.c b/tools/testing/selftests/kvm/s390x/resets.c
new file mode 100644
index 0000000..1485bc6
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/resets.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test for s390x CPU resets
+ *
+ * Copyright (C) 2020, IBM
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+
+#define VCPU_ID 3
+#define LOCAL_IRQS 32
+
+struct kvm_s390_irq buf[VCPU_ID + LOCAL_IRQS];
+
+struct kvm_vm *vm;
+struct kvm_run *run;
+struct kvm_sync_regs *regs;
+static uint64_t regs_null[16];
+
+static uint64_t crs[16] = { 0x40000ULL,
+			    0x42000ULL,
+			    0, 0, 0, 0, 0,
+			    0x43000ULL,
+			    0, 0, 0, 0, 0,
+			    0x44000ULL,
+			    0, 0
+};
+
+static void guest_code_initial(void)
+{
+	/* Round toward 0 */
+	uint32_t fpc = 0x11;
+
+	/* Dirty registers */
+	asm volatile (
+		"	lctlg	0,15,%0\n"
+		"	sfpc	%1\n"
+		: : "Q" (crs), "d" (fpc));
+	GUEST_SYNC(0);
+}
+
+static void test_one_reg(uint64_t id, uint64_t value)
+{
+	struct kvm_one_reg reg;
+	uint64_t eval_reg;
+
+	reg.addr = (uintptr_t)&eval_reg;
+	reg.id = id;
+	vcpu_get_reg(vm, VCPU_ID, &reg);
+	TEST_ASSERT(eval_reg == value, "value == %s", value);
+}
+
+static void assert_noirq(void)
+{
+	struct kvm_s390_irq_state irq_state;
+	int irqs;
+
+	irq_state.len = sizeof(buf);
+	irq_state.buf = (unsigned long)buf;
+	irqs = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_GET_IRQ_STATE, &irq_state);
+	/*
+	 * irqs contains the number of retrieved interrupts. Any interrupt
+	 * (notably, the emergency call interrupt we have injected) should
+	 * be cleared by the resets, so this should be 0.
+	 */
+	TEST_ASSERT(irqs >= 0, "Could not fetch IRQs: errno %d\n", errno);
+	TEST_ASSERT(!irqs, "IRQ pending");
+}
+
+static void assert_clear(void)
+{
+	struct kvm_sregs sregs;
+	struct kvm_regs regs;
+	struct kvm_fpu fpu;
+
+	vcpu_regs_get(vm, VCPU_ID, &regs);
+	TEST_ASSERT(!memcmp(&regs.gprs, regs_null, sizeof(regs.gprs)), "grs == 0");
+
+	vcpu_sregs_get(vm, VCPU_ID, &sregs);
+	TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0");
+
+	vcpu_fpu_get(vm, VCPU_ID, &fpu);
+	TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0");
+}
+
+static void assert_initial(void)
+{
+	struct kvm_sregs sregs;
+	struct kvm_fpu fpu;
+
+	vcpu_sregs_get(vm, VCPU_ID, &sregs);
+	TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0");
+	TEST_ASSERT(sregs.crs[14] == 0xC2000000UL, "cr14 == 0xC2000000");
+	TEST_ASSERT(!memcmp(&sregs.crs[1], regs_null, sizeof(sregs.crs[1]) * 12),
+		    "cr1-13 == 0");
+	TEST_ASSERT(sregs.crs[15] == 0, "cr15 == 0");
+
+	vcpu_fpu_get(vm, VCPU_ID, &fpu);
+	TEST_ASSERT(!fpu.fpc, "fpc == 0");
+
+	test_one_reg(KVM_REG_S390_GBEA, 1);
+	test_one_reg(KVM_REG_S390_PP, 0);
+	test_one_reg(KVM_REG_S390_TODPR, 0);
+	test_one_reg(KVM_REG_S390_CPU_TIMER, 0);
+	test_one_reg(KVM_REG_S390_CLOCK_COMP, 0);
+}
+
+static void assert_normal(void)
+{
+	test_one_reg(KVM_REG_S390_PFTOKEN, KVM_S390_PFAULT_TOKEN_INVALID);
+	assert_noirq();
+}
+
+static void inject_irq(int cpu_id)
+{
+	struct kvm_s390_irq_state irq_state;
+	struct kvm_s390_irq *irq = &buf[0];
+	int irqs;
+
+	/* Inject IRQ */
+	irq_state.len = sizeof(struct kvm_s390_irq);
+	irq_state.buf = (unsigned long)buf;
+	irq->type = KVM_S390_INT_EMERGENCY;
+	irq->u.emerg.code = cpu_id;
+	irqs = _vcpu_ioctl(vm, cpu_id, KVM_S390_SET_IRQ_STATE, &irq_state);
+	TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno);
+}
+
+static void test_normal(void)
+{
+	printf("Testing normal reset\n");
+	/* Create VM */
+	vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
+	run = vcpu_state(vm, VCPU_ID);
+	regs = &run->s.regs;
+
+	vcpu_run(vm, VCPU_ID);
+
+	inject_irq(VCPU_ID);
+
+	vcpu_ioctl(vm, VCPU_ID, KVM_S390_NORMAL_RESET, 0);
+	assert_normal();
+	kvm_vm_free(vm);
+}
+
+static void test_initial(void)
+{
+	printf("Testing initial reset\n");
+	vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
+	run = vcpu_state(vm, VCPU_ID);
+	regs = &run->s.regs;
+
+	vcpu_run(vm, VCPU_ID);
+
+	inject_irq(VCPU_ID);
+
+	vcpu_ioctl(vm, VCPU_ID, KVM_S390_INITIAL_RESET, 0);
+	assert_normal();
+	assert_initial();
+	kvm_vm_free(vm);
+}
+
+static void test_clear(void)
+{
+	printf("Testing clear reset\n");
+	vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
+	run = vcpu_state(vm, VCPU_ID);
+	regs = &run->s.regs;
+
+	vcpu_run(vm, VCPU_ID);
+
+	inject_irq(VCPU_ID);
+
+	vcpu_ioctl(vm, VCPU_ID, KVM_S390_CLEAR_RESET, 0);
+	assert_normal();
+	assert_initial();
+	assert_clear();
+	kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+	setbuf(stdout, NULL);	/* Tell stdout not to buffer its content */
+
+	test_initial();
+	if (kvm_check_cap(KVM_CAP_S390_VCPU_RESETS)) {
+		test_normal();
+		test_clear();
+	}
+	return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 5590fd2..69e482a 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -98,7 +98,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
 	prepare_vmcs(vmx_pages, l2_guest_code,
 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
 	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
-	control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
+	control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
 	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
 	vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
 
diff --git a/tools/testing/selftests/livepatch/README b/tools/testing/selftests/livepatch/README
index b73cd0e..621d325 100644
--- a/tools/testing/selftests/livepatch/README
+++ b/tools/testing/selftests/livepatch/README
@@ -35,7 +35,7 @@
 ------------
 
 See the common functions.sh file for the existing collection of utility
-functions, most importantly set_dynamic_debug() and check_result().  The
+functions, most importantly setup_config() and check_result().  The
 latter function greps the kernel's ring buffer for "livepatch:" and
 "test_klp" strings, so tests be sure to include one of those strings for
 result comparison.  Other utility functions include general module
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index a6e3d55..2aab979 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -64,7 +64,6 @@
 }
 
 function set_ftrace_enabled() {
-	local sysctl="$1"
 	result=$(sysctl kernel.ftrace_enabled="$1" 2>&1 | paste --serial --delimiters=' ')
 	echo "livepatch: $result" > /dev/kmsg
 }
diff --git a/tools/testing/selftests/lkdtm/Makefile b/tools/testing/selftests/lkdtm/Makefile
new file mode 100644
index 0000000..1bcc9ee9
--- /dev/null
+++ b/tools/testing/selftests/lkdtm/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for LKDTM regression tests
+
+include ../lib.mk
+
+# NOTE: $(OUTPUT) won't get default value if used before lib.mk
+TEST_FILES := tests.txt
+TEST_GEN_PROGS = $(patsubst %,$(OUTPUT)/%.sh,$(shell awk '{print $$1}' tests.txt | sed -e 's/\#//'))
+all: $(TEST_GEN_PROGS)
+
+$(OUTPUT)/%: run.sh tests.txt
+	install -m 0744 run.sh $@
diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
new file mode 100644
index 0000000..d874990
--- /dev/null
+++ b/tools/testing/selftests/lkdtm/config
@@ -0,0 +1 @@
+CONFIG_LKDTM=y
diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
new file mode 100755
index 0000000..dadf819
--- /dev/null
+++ b/tools/testing/selftests/lkdtm/run.sh
@@ -0,0 +1,92 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# This reads tests.txt for the list of LKDTM tests to invoke. Any marked
+# with a leading "#" are skipped. The rest of the line after the
+# test name is either the text to look for in dmesg for a "success",
+# or the rationale for why a test is marked to be skipped.
+#
+set -e
+TRIGGER=/sys/kernel/debug/provoke-crash/DIRECT
+KSELFTEST_SKIP_TEST=4
+
+# Verify we have LKDTM available in the kernel.
+if [ ! -r $TRIGGER ] ; then
+	/sbin/modprobe -q lkdtm || true
+	if [ ! -r $TRIGGER ] ; then
+		echo "Cannot find $TRIGGER (missing CONFIG_LKDTM?)"
+	else
+		echo "Cannot write $TRIGGER (need to run as root?)"
+	fi
+	# Skip this test
+	exit $KSELFTEST_SKIP_TEST
+fi
+
+# Figure out which test to run from our script name.
+test=$(basename $0 .sh)
+# Look up details about the test from master list of LKDTM tests.
+line=$(egrep '^#?'"$test"'\b' tests.txt)
+if [ -z "$line" ]; then
+	echo "Skipped: missing test '$test' in tests.txt"
+	exit $KSELFTEST_SKIP_TEST
+fi
+# Check that the test is known to LKDTM.
+if ! egrep -q '^'"$test"'$' "$TRIGGER" ; then
+	echo "Skipped: test '$test' missing in $TRIGGER!"
+	exit $KSELFTEST_SKIP_TEST
+fi
+
+# Extract notes/expected output from test list.
+test=$(echo "$line" | cut -d" " -f1)
+if echo "$line" | grep -q ' ' ; then
+	expect=$(echo "$line" | cut -d" " -f2-)
+else
+	expect=""
+fi
+
+# If the test is commented out, report a skip
+if echo "$test" | grep -q '^#' ; then
+	test=$(echo "$test" | cut -c2-)
+	if [ -z "$expect" ]; then
+		expect="crashes entire system"
+	fi
+	echo "Skipping $test: $expect"
+	exit $KSELFTEST_SKIP_TEST
+fi
+
+# If no expected output given, assume an Oops with back trace is success.
+if [ -z "$expect" ]; then
+	expect="call trace:"
+fi
+
+# Clear out dmesg for output reporting
+dmesg -c >/dev/null
+
+# Prepare log for report checking
+LOG=$(mktemp --tmpdir -t lkdtm-XXXXXX)
+cleanup() {
+	rm -f "$LOG"
+}
+trap cleanup EXIT
+
+# Most shells yell about signals and we're expecting the "cat" process
+# to usually be killed by the kernel. So we have to run it in a sub-shell
+# and silence errors.
+($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
+
+# Record and dump the results
+dmesg -c >"$LOG"
+cat "$LOG"
+# Check for expected output
+if egrep -qi "$expect" "$LOG" ; then
+	echo "$test: saw '$expect': ok"
+	exit 0
+else
+	if egrep -qi XFAIL: "$LOG" ; then
+		echo "$test: saw 'XFAIL': [SKIP]"
+		exit $KSELFTEST_SKIP_TEST
+	else
+		echo "$test: missing '$expect': [FAIL]"
+		exit 1
+	fi
+fi
diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
new file mode 100644
index 0000000..92ca321
--- /dev/null
+++ b/tools/testing/selftests/lkdtm/tests.txt
@@ -0,0 +1,71 @@
+#PANIC
+BUG kernel BUG at
+WARNING WARNING:
+WARNING_MESSAGE message trigger
+EXCEPTION
+#LOOP Hangs the system
+#EXHAUST_STACK Corrupts memory on failure
+#CORRUPT_STACK Crashes entire system on success
+#CORRUPT_STACK_STRONG Crashes entire system on success
+CORRUPT_LIST_ADD list_add corruption
+CORRUPT_LIST_DEL list_del corruption
+CORRUPT_USER_DS Invalid address limit on user-mode return
+STACK_GUARD_PAGE_LEADING
+STACK_GUARD_PAGE_TRAILING
+UNSET_SMEP CR4 bits went missing
+DOUBLE_FAULT
+UNALIGNED_LOAD_STORE_WRITE
+#OVERWRITE_ALLOCATION Corrupts memory on failure
+#WRITE_AFTER_FREE Corrupts memory on failure
+READ_AFTER_FREE
+#WRITE_BUDDY_AFTER_FREE Corrupts memory on failure
+READ_BUDDY_AFTER_FREE
+SLAB_FREE_DOUBLE
+SLAB_FREE_CROSS
+SLAB_FREE_PAGE
+#SOFTLOCKUP Hangs the system
+#HARDLOCKUP Hangs the system
+#SPINLOCKUP Hangs the system
+#HUNG_TASK Hangs the system
+EXEC_DATA
+EXEC_STACK
+EXEC_KMALLOC
+EXEC_VMALLOC
+EXEC_RODATA
+EXEC_USERSPACE
+EXEC_NULL
+ACCESS_USERSPACE
+ACCESS_NULL
+WRITE_RO
+WRITE_RO_AFTER_INIT
+WRITE_KERN
+REFCOUNT_INC_OVERFLOW
+REFCOUNT_ADD_OVERFLOW
+REFCOUNT_INC_NOT_ZERO_OVERFLOW
+REFCOUNT_ADD_NOT_ZERO_OVERFLOW
+REFCOUNT_DEC_ZERO
+REFCOUNT_DEC_NEGATIVE Negative detected: saturated
+REFCOUNT_DEC_AND_TEST_NEGATIVE Negative detected: saturated
+REFCOUNT_SUB_AND_TEST_NEGATIVE Negative detected: saturated
+REFCOUNT_INC_ZERO
+REFCOUNT_ADD_ZERO
+REFCOUNT_INC_SATURATED Saturation detected: still saturated
+REFCOUNT_DEC_SATURATED Saturation detected: still saturated
+REFCOUNT_ADD_SATURATED Saturation detected: still saturated
+REFCOUNT_INC_NOT_ZERO_SATURATED
+REFCOUNT_ADD_NOT_ZERO_SATURATED
+REFCOUNT_DEC_AND_TEST_SATURATED Saturation detected: still saturated
+REFCOUNT_SUB_AND_TEST_SATURATED Saturation detected: still saturated
+#REFCOUNT_TIMING timing only
+#ATOMIC_TIMING timing only
+USERCOPY_HEAP_SIZE_TO
+USERCOPY_HEAP_SIZE_FROM
+USERCOPY_HEAP_WHITELIST_TO
+USERCOPY_HEAP_WHITELIST_FROM
+USERCOPY_STACK_FRAME_TO
+USERCOPY_STACK_FRAME_FROM
+USERCOPY_STACK_BEYOND
+USERCOPY_KERNEL
+USERCOPY_KERNEL_DS
+STACKLEAK_ERASING OK: the rest of the thread stack is properly erased
+CFI_FORWARD_PROTO
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 8aefd81..ecc52d4 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -22,3 +22,4 @@
 so_txtime
 tcp_fastopen_backup_key
 nettest
+fin_ack_lat
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index a8e04d6..b569419 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -11,6 +11,7 @@
 TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
 TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
 TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
+TEST_PROGS += fin_ack_lat.sh
 TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket nettest
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
@@ -18,6 +19,7 @@
 TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
 TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr
 TEST_GEN_FILES += tcp_fastopen_backup_key
+TEST_GEN_FILES += fin_ack_lat
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
 TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
 
diff --git a/tools/testing/selftests/net/fin_ack_lat.c b/tools/testing/selftests/net/fin_ack_lat.c
new file mode 100644
index 0000000..7018749
--- /dev/null
+++ b/tools/testing/selftests/net/fin_ack_lat.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+static int child_pid;
+
+static unsigned long timediff(struct timeval s, struct timeval e)
+{
+	unsigned long s_us, e_us;
+
+	s_us = s.tv_sec * 1000000 + s.tv_usec;
+	e_us = e.tv_sec * 1000000 + e.tv_usec;
+	if (s_us > e_us)
+		return 0;
+	return e_us - s_us;
+}
+
+static void client(int port)
+{
+	int sock = 0;
+	struct sockaddr_in addr, laddr;
+	socklen_t len = sizeof(laddr);
+	struct linger sl;
+	int flag = 1;
+	int buffer;
+	struct timeval start, end;
+	unsigned long lat, sum_lat = 0, nr_lat = 0;
+
+	while (1) {
+		gettimeofday(&start, NULL);
+
+		sock = socket(AF_INET, SOCK_STREAM, 0);
+		if (sock < 0)
+			error(-1, errno, "socket creation");
+
+		sl.l_onoff = 1;
+		sl.l_linger = 0;
+		if (setsockopt(sock, SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)))
+			error(-1, errno, "setsockopt(linger)");
+
+		if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
+					&flag, sizeof(flag)))
+			error(-1, errno, "setsockopt(nodelay)");
+
+		addr.sin_family = AF_INET;
+		addr.sin_port = htons(port);
+
+		if (inet_pton(AF_INET, "127.0.0.1", &addr.sin_addr) <= 0)
+			error(-1, errno, "inet_pton");
+
+		if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) < 0)
+			error(-1, errno, "connect");
+
+		send(sock, &buffer, sizeof(buffer), 0);
+		if (read(sock, &buffer, sizeof(buffer)) == -1)
+			error(-1, errno, "waiting read");
+
+		gettimeofday(&end, NULL);
+		lat = timediff(start, end);
+		sum_lat += lat;
+		nr_lat++;
+		if (lat < 100000)
+			goto close;
+
+		if (getsockname(sock, (struct sockaddr *)&laddr, &len) == -1)
+			error(-1, errno, "getsockname");
+		printf("port: %d, lat: %lu, avg: %lu, nr: %lu\n",
+				ntohs(laddr.sin_port), lat,
+				sum_lat / nr_lat, nr_lat);
+close:
+		fflush(stdout);
+		close(sock);
+	}
+}
+
+static void server(int sock, struct sockaddr_in address)
+{
+	int accepted;
+	int addrlen = sizeof(address);
+	int buffer;
+
+	while (1) {
+		accepted = accept(sock, (struct sockaddr *)&address,
+				(socklen_t *)&addrlen);
+		if (accepted < 0)
+			error(-1, errno, "accept");
+
+		if (read(accepted, &buffer, sizeof(buffer)) == -1)
+			error(-1, errno, "read");
+		close(accepted);
+	}
+}
+
+static void sig_handler(int signum)
+{
+	kill(SIGTERM, child_pid);
+	exit(0);
+}
+
+int main(int argc, char const *argv[])
+{
+	int sock;
+	int opt = 1;
+	struct sockaddr_in address;
+	struct sockaddr_in laddr;
+	socklen_t len = sizeof(laddr);
+
+	if (signal(SIGTERM, sig_handler) == SIG_ERR)
+		error(-1, errno, "signal");
+
+	sock = socket(AF_INET, SOCK_STREAM, 0);
+	if (sock < 0)
+		error(-1, errno, "socket");
+
+	if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT,
+				&opt, sizeof(opt)) == -1)
+		error(-1, errno, "setsockopt");
+
+	address.sin_family = AF_INET;
+	address.sin_addr.s_addr = INADDR_ANY;
+	/* dynamically allocate unused port */
+	address.sin_port = 0;
+
+	if (bind(sock, (struct sockaddr *)&address, sizeof(address)) < 0)
+		error(-1, errno, "bind");
+
+	if (listen(sock, 3) < 0)
+		error(-1, errno, "listen");
+
+	if (getsockname(sock, (struct sockaddr *)&laddr, &len) == -1)
+		error(-1, errno, "getsockname");
+
+	fprintf(stderr, "server port: %d\n", ntohs(laddr.sin_port));
+	child_pid = fork();
+	if (!child_pid)
+		client(ntohs(laddr.sin_port));
+	else
+		server(sock, laddr);
+
+	return 0;
+}
diff --git a/tools/testing/selftests/net/fin_ack_lat.sh b/tools/testing/selftests/net/fin_ack_lat.sh
new file mode 100755
index 0000000..a3ff6e0
--- /dev/null
+++ b/tools/testing/selftests/net/fin_ack_lat.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test latency spikes caused by FIN/ACK handling race.
+
+set +x
+set -e
+
+tmpfile=$(mktemp /tmp/fin_ack_latency.XXXX.log)
+
+cleanup() {
+	kill $(pidof fin_ack_lat)
+	rm -f $tmpfile
+}
+
+trap cleanup EXIT
+
+do_test() {
+	RUNTIME=$1
+
+	./fin_ack_lat | tee $tmpfile &
+	PID=$!
+
+	sleep $RUNTIME
+	NR_SPIKES=$(wc -l $tmpfile | awk '{print $1}')
+	if [ $NR_SPIKES -gt 0 ]
+	then
+		echo "FAIL: $NR_SPIKES spikes detected"
+		return 1
+	fi
+	return 0
+}
+
+do_test "30"
+echo "test done"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index a3dccd8..99579c0 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -634,6 +634,14 @@ static void check_getpeername_connect(int fd)
 			cfg_host, a, cfg_port, b);
 }
 
+static void maybe_close(int fd)
+{
+	unsigned int r = rand();
+
+	if (r & 1)
+		close(fd);
+}
+
 int main_loop_s(int listensock)
 {
 	struct sockaddr_storage ss;
@@ -657,6 +665,7 @@ int main_loop_s(int listensock)
 	salen = sizeof(ss);
 	remotesock = accept(listensock, (struct sockaddr *)&ss, &salen);
 	if (remotesock >= 0) {
+		maybe_close(listensock);
 		check_sockaddr(pf, &ss, salen);
 		check_getpeername(remotesock, &ss, salen);
 
diff --git a/tools/testing/selftests/openat2/.gitignore b/tools/testing/selftests/openat2/.gitignore
new file mode 100644
index 0000000..bd68f6c
--- /dev/null
+++ b/tools/testing/selftests/openat2/.gitignore
@@ -0,0 +1 @@
+/*_test
diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile
new file mode 100644
index 0000000..4b93b14
--- /dev/null
+++ b/tools/testing/selftests/openat2/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined
+TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
+
+include ../lib.mk
+
+$(TEST_GEN_PROGS): helpers.c
diff --git a/tools/testing/selftests/openat2/helpers.c b/tools/testing/selftests/openat2/helpers.c
new file mode 100644
index 0000000..e9a6557
--- /dev/null
+++ b/tools/testing/selftests/openat2/helpers.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Author: Aleksa Sarai <cyphar@cyphar.com>
+ * Copyright (C) 2018-2019 SUSE LLC.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <string.h>
+#include <syscall.h>
+#include <limits.h>
+
+#include "helpers.h"
+
+bool needs_openat2(const struct open_how *how)
+{
+	return how->resolve != 0;
+}
+
+int raw_openat2(int dfd, const char *path, void *how, size_t size)
+{
+	int ret = syscall(__NR_openat2, dfd, path, how, size);
+	return ret >= 0 ? ret : -errno;
+}
+
+int sys_openat2(int dfd, const char *path, struct open_how *how)
+{
+	return raw_openat2(dfd, path, how, sizeof(*how));
+}
+
+int sys_openat(int dfd, const char *path, struct open_how *how)
+{
+	int ret = openat(dfd, path, how->flags, how->mode);
+	return ret >= 0 ? ret : -errno;
+}
+
+int sys_renameat2(int olddirfd, const char *oldpath,
+		  int newdirfd, const char *newpath, unsigned int flags)
+{
+	int ret = syscall(__NR_renameat2, olddirfd, oldpath,
+					  newdirfd, newpath, flags);
+	return ret >= 0 ? ret : -errno;
+}
+
+int touchat(int dfd, const char *path)
+{
+	int fd = openat(dfd, path, O_CREAT);
+	if (fd >= 0)
+		close(fd);
+	return fd;
+}
+
+char *fdreadlink(int fd)
+{
+	char *target, *tmp;
+
+	E_asprintf(&tmp, "/proc/self/fd/%d", fd);
+
+	target = malloc(PATH_MAX);
+	if (!target)
+		ksft_exit_fail_msg("fdreadlink: malloc failed\n");
+	memset(target, 0, PATH_MAX);
+
+	E_readlink(tmp, target, PATH_MAX);
+	free(tmp);
+	return target;
+}
+
+bool fdequal(int fd, int dfd, const char *path)
+{
+	char *fdpath, *dfdpath, *other;
+	bool cmp;
+
+	fdpath = fdreadlink(fd);
+	dfdpath = fdreadlink(dfd);
+
+	if (!path)
+		E_asprintf(&other, "%s", dfdpath);
+	else if (*path == '/')
+		E_asprintf(&other, "%s", path);
+	else
+		E_asprintf(&other, "%s/%s", dfdpath, path);
+
+	cmp = !strcmp(fdpath, other);
+
+	free(fdpath);
+	free(dfdpath);
+	free(other);
+	return cmp;
+}
+
+bool openat2_supported = false;
+
+void __attribute__((constructor)) init(void)
+{
+	struct open_how how = {};
+	int fd;
+
+	BUILD_BUG_ON(sizeof(struct open_how) != OPEN_HOW_SIZE_VER0);
+
+	/* Check openat2(2) support. */
+	fd = sys_openat2(AT_FDCWD, ".", &how);
+	openat2_supported = (fd >= 0);
+
+	if (fd >= 0)
+		close(fd);
+}
diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h
new file mode 100644
index 0000000..a6ea273
--- /dev/null
+++ b/tools/testing/selftests/openat2/helpers.h
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Author: Aleksa Sarai <cyphar@cyphar.com>
+ * Copyright (C) 2018-2019 SUSE LLC.
+ */
+
+#ifndef __RESOLVEAT_H__
+#define __RESOLVEAT_H__
+
+#define _GNU_SOURCE
+#include <stdint.h>
+#include <errno.h>
+#include <linux/types.h>
+#include "../kselftest.h"
+
+#define ARRAY_LEN(X) (sizeof (X) / sizeof (*(X)))
+#define BUILD_BUG_ON(e) ((void)(sizeof(struct { int:(-!!(e)); })))
+
+#ifndef SYS_openat2
+#ifndef __NR_openat2
+#define __NR_openat2 437
+#endif /* __NR_openat2 */
+#define SYS_openat2 __NR_openat2
+#endif /* SYS_openat2 */
+
+/*
+ * Arguments for how openat2(2) should open the target path. If @resolve is
+ * zero, then openat2(2) operates very similarly to openat(2).
+ *
+ * However, unlike openat(2), unknown bits in @flags result in -EINVAL rather
+ * than being silently ignored. @mode must be zero unless one of {O_CREAT,
+ * O_TMPFILE} are set.
+ *
+ * @flags: O_* flags.
+ * @mode: O_CREAT/O_TMPFILE file mode.
+ * @resolve: RESOLVE_* flags.
+ */
+struct open_how {
+	__u64 flags;
+	__u64 mode;
+	__u64 resolve;
+};
+
+#define OPEN_HOW_SIZE_VER0	24 /* sizeof first published struct */
+#define OPEN_HOW_SIZE_LATEST	OPEN_HOW_SIZE_VER0
+
+bool needs_openat2(const struct open_how *how);
+
+#ifndef RESOLVE_IN_ROOT
+/* how->resolve flags for openat2(2). */
+#define RESOLVE_NO_XDEV		0x01 /* Block mount-point crossings
+					(includes bind-mounts). */
+#define RESOLVE_NO_MAGICLINKS	0x02 /* Block traversal through procfs-style
+					"magic-links". */
+#define RESOLVE_NO_SYMLINKS	0x04 /* Block traversal through all symlinks
+					(implies OEXT_NO_MAGICLINKS) */
+#define RESOLVE_BENEATH		0x08 /* Block "lexical" trickery like
+					"..", symlinks, and absolute
+					paths which escape the dirfd. */
+#define RESOLVE_IN_ROOT		0x10 /* Make all jumps to "/" and ".."
+					be scoped inside the dirfd
+					(similar to chroot(2)). */
+#endif /* RESOLVE_IN_ROOT */
+
+#define E_func(func, ...)						\
+	do {								\
+		if (func(__VA_ARGS__) < 0)				\
+			ksft_exit_fail_msg("%s:%d %s failed\n", \
+					   __FILE__, __LINE__, #func);\
+	} while (0)
+
+#define E_asprintf(...)		E_func(asprintf,	__VA_ARGS__)
+#define E_chmod(...)		E_func(chmod,		__VA_ARGS__)
+#define E_dup2(...)		E_func(dup2,		__VA_ARGS__)
+#define E_fchdir(...)		E_func(fchdir,		__VA_ARGS__)
+#define E_fstatat(...)		E_func(fstatat,		__VA_ARGS__)
+#define E_kill(...)		E_func(kill,		__VA_ARGS__)
+#define E_mkdirat(...)		E_func(mkdirat,		__VA_ARGS__)
+#define E_mount(...)		E_func(mount,		__VA_ARGS__)
+#define E_prctl(...)		E_func(prctl,		__VA_ARGS__)
+#define E_readlink(...)		E_func(readlink,	__VA_ARGS__)
+#define E_setresuid(...)	E_func(setresuid,	__VA_ARGS__)
+#define E_symlinkat(...)	E_func(symlinkat,	__VA_ARGS__)
+#define E_touchat(...)		E_func(touchat,		__VA_ARGS__)
+#define E_unshare(...)		E_func(unshare,		__VA_ARGS__)
+
+#define E_assert(expr, msg, ...)					\
+	do {								\
+		if (!(expr))						\
+			ksft_exit_fail_msg("ASSERT(%s:%d) failed (%s): " msg "\n", \
+					   __FILE__, __LINE__, #expr, ##__VA_ARGS__); \
+	} while (0)
+
+int raw_openat2(int dfd, const char *path, void *how, size_t size);
+int sys_openat2(int dfd, const char *path, struct open_how *how);
+int sys_openat(int dfd, const char *path, struct open_how *how);
+int sys_renameat2(int olddirfd, const char *oldpath,
+		  int newdirfd, const char *newpath, unsigned int flags);
+
+int touchat(int dfd, const char *path);
+char *fdreadlink(int fd);
+bool fdequal(int fd, int dfd, const char *path);
+
+extern bool openat2_supported;
+
+#endif /* __RESOLVEAT_H__ */
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
new file mode 100644
index 0000000..b386367
--- /dev/null
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Author: Aleksa Sarai <cyphar@cyphar.com>
+ * Copyright (C) 2018-2019 SUSE LLC.
+ */
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "../kselftest.h"
+#include "helpers.h"
+
+/*
+ * O_LARGEFILE is set to 0 by glibc.
+ * XXX: This is wrong on {mips, parisc, powerpc, sparc}.
+ */
+#undef	O_LARGEFILE
+#define	O_LARGEFILE 0x8000
+
+struct open_how_ext {
+	struct open_how inner;
+	uint32_t extra1;
+	char pad1[128];
+	uint32_t extra2;
+	char pad2[128];
+	uint32_t extra3;
+};
+
+struct struct_test {
+	const char *name;
+	struct open_how_ext arg;
+	size_t size;
+	int err;
+};
+
+#define NUM_OPENAT2_STRUCT_TESTS 7
+#define NUM_OPENAT2_STRUCT_VARIATIONS 13
+
+void test_openat2_struct(void)
+{
+	int misalignments[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 17, 87 };
+
+	struct struct_test tests[] = {
+		/* Normal struct. */
+		{ .name = "normal struct",
+		  .arg.inner.flags = O_RDONLY,
+		  .size = sizeof(struct open_how) },
+		/* Bigger struct, with zeroed out end. */
+		{ .name = "bigger struct (zeroed out)",
+		  .arg.inner.flags = O_RDONLY,
+		  .size = sizeof(struct open_how_ext) },
+
+		/* TODO: Once expanded, check zero-padding. */
+
+		/* Smaller than version-0 struct. */
+		{ .name = "zero-sized 'struct'",
+		  .arg.inner.flags = O_RDONLY, .size = 0, .err = -EINVAL },
+		{ .name = "smaller-than-v0 struct",
+		  .arg.inner.flags = O_RDONLY,
+		  .size = OPEN_HOW_SIZE_VER0 - 1, .err = -EINVAL },
+
+		/* Bigger struct, with non-zero trailing bytes. */
+		{ .name = "bigger struct (non-zero data in first 'future field')",
+		  .arg.inner.flags = O_RDONLY, .arg.extra1 = 0xdeadbeef,
+		  .size = sizeof(struct open_how_ext), .err = -E2BIG },
+		{ .name = "bigger struct (non-zero data in middle of 'future fields')",
+		  .arg.inner.flags = O_RDONLY, .arg.extra2 = 0xfeedcafe,
+		  .size = sizeof(struct open_how_ext), .err = -E2BIG },
+		{ .name = "bigger struct (non-zero data at end of 'future fields')",
+		  .arg.inner.flags = O_RDONLY, .arg.extra3 = 0xabad1dea,
+		  .size = sizeof(struct open_how_ext), .err = -E2BIG },
+	};
+
+	BUILD_BUG_ON(ARRAY_LEN(misalignments) != NUM_OPENAT2_STRUCT_VARIATIONS);
+	BUILD_BUG_ON(ARRAY_LEN(tests) != NUM_OPENAT2_STRUCT_TESTS);
+
+	for (int i = 0; i < ARRAY_LEN(tests); i++) {
+		struct struct_test *test = &tests[i];
+		struct open_how_ext how_ext = test->arg;
+
+		for (int j = 0; j < ARRAY_LEN(misalignments); j++) {
+			int fd, misalign = misalignments[j];
+			char *fdpath = NULL;
+			bool failed;
+			void (*resultfn)(const char *msg, ...) = ksft_test_result_pass;
+
+			void *copy = NULL, *how_copy = &how_ext;
+
+			if (!openat2_supported) {
+				ksft_print_msg("openat2(2) unsupported\n");
+				resultfn = ksft_test_result_skip;
+				goto skip;
+			}
+
+			if (misalign) {
+				/*
+				 * Explicitly misalign the structure copying it with the given
+				 * (mis)alignment offset. The other data is set to be non-zero to
+				 * make sure that non-zero bytes outside the struct aren't checked
+				 *
+				 * This is effectively to check that is_zeroed_user() works.
+				 */
+				copy = malloc(misalign + sizeof(how_ext));
+				how_copy = copy + misalign;
+				memset(copy, 0xff, misalign);
+				memcpy(how_copy, &how_ext, sizeof(how_ext));
+			}
+
+			fd = raw_openat2(AT_FDCWD, ".", how_copy, test->size);
+			if (test->err >= 0)
+				failed = (fd < 0);
+			else
+				failed = (fd != test->err);
+			if (fd >= 0) {
+				fdpath = fdreadlink(fd);
+				close(fd);
+			}
+
+			if (failed) {
+				resultfn = ksft_test_result_fail;
+
+				ksft_print_msg("openat2 unexpectedly returned ");
+				if (fdpath)
+					ksft_print_msg("%d['%s']\n", fd, fdpath);
+				else
+					ksft_print_msg("%d (%s)\n", fd, strerror(-fd));
+			}
+
+skip:
+			if (test->err >= 0)
+				resultfn("openat2 with %s argument [misalign=%d] succeeds\n",
+					 test->name, misalign);
+			else
+				resultfn("openat2 with %s argument [misalign=%d] fails with %d (%s)\n",
+					 test->name, misalign, test->err,
+					 strerror(-test->err));
+
+			free(copy);
+			free(fdpath);
+			fflush(stdout);
+		}
+	}
+}
+
+struct flag_test {
+	const char *name;
+	struct open_how how;
+	int err;
+};
+
+#define NUM_OPENAT2_FLAG_TESTS 23
+
+void test_openat2_flags(void)
+{
+	struct flag_test tests[] = {
+		/* O_TMPFILE is incompatible with O_PATH and O_CREAT. */
+		{ .name = "incompatible flags (O_TMPFILE | O_PATH)",
+		  .how.flags = O_TMPFILE | O_PATH | O_RDWR, .err = -EINVAL },
+		{ .name = "incompatible flags (O_TMPFILE | O_CREAT)",
+		  .how.flags = O_TMPFILE | O_CREAT | O_RDWR, .err = -EINVAL },
+
+		/* O_PATH only permits certain other flags to be set ... */
+		{ .name = "compatible flags (O_PATH | O_CLOEXEC)",
+		  .how.flags = O_PATH | O_CLOEXEC },
+		{ .name = "compatible flags (O_PATH | O_DIRECTORY)",
+		  .how.flags = O_PATH | O_DIRECTORY },
+		{ .name = "compatible flags (O_PATH | O_NOFOLLOW)",
+		  .how.flags = O_PATH | O_NOFOLLOW },
+		/* ... and others are absolutely not permitted. */
+		{ .name = "incompatible flags (O_PATH | O_RDWR)",
+		  .how.flags = O_PATH | O_RDWR, .err = -EINVAL },
+		{ .name = "incompatible flags (O_PATH | O_CREAT)",
+		  .how.flags = O_PATH | O_CREAT, .err = -EINVAL },
+		{ .name = "incompatible flags (O_PATH | O_EXCL)",
+		  .how.flags = O_PATH | O_EXCL, .err = -EINVAL },
+		{ .name = "incompatible flags (O_PATH | O_NOCTTY)",
+		  .how.flags = O_PATH | O_NOCTTY, .err = -EINVAL },
+		{ .name = "incompatible flags (O_PATH | O_DIRECT)",
+		  .how.flags = O_PATH | O_DIRECT, .err = -EINVAL },
+		{ .name = "incompatible flags (O_PATH | O_LARGEFILE)",
+		  .how.flags = O_PATH | O_LARGEFILE, .err = -EINVAL },
+
+		/* ->mode must only be set with O_{CREAT,TMPFILE}. */
+		{ .name = "non-zero how.mode and O_RDONLY",
+		  .how.flags = O_RDONLY, .how.mode = 0600, .err = -EINVAL },
+		{ .name = "non-zero how.mode and O_PATH",
+		  .how.flags = O_PATH,   .how.mode = 0600, .err = -EINVAL },
+		{ .name = "valid how.mode and O_CREAT",
+		  .how.flags = O_CREAT,  .how.mode = 0600 },
+		{ .name = "valid how.mode and O_TMPFILE",
+		  .how.flags = O_TMPFILE | O_RDWR, .how.mode = 0600 },
+		/* ->mode must only contain 0777 bits. */
+		{ .name = "invalid how.mode and O_CREAT",
+		  .how.flags = O_CREAT,
+		  .how.mode = 0xFFFF, .err = -EINVAL },
+		{ .name = "invalid (very large) how.mode and O_CREAT",
+		  .how.flags = O_CREAT,
+		  .how.mode = 0xC000000000000000ULL, .err = -EINVAL },
+		{ .name = "invalid how.mode and O_TMPFILE",
+		  .how.flags = O_TMPFILE | O_RDWR,
+		  .how.mode = 0x1337, .err = -EINVAL },
+		{ .name = "invalid (very large) how.mode and O_TMPFILE",
+		  .how.flags = O_TMPFILE | O_RDWR,
+		  .how.mode = 0x0000A00000000000ULL, .err = -EINVAL },
+
+		/* ->resolve must only contain RESOLVE_* flags. */
+		{ .name = "invalid how.resolve and O_RDONLY",
+		  .how.flags = O_RDONLY,
+		  .how.resolve = 0x1337, .err = -EINVAL },
+		{ .name = "invalid how.resolve and O_CREAT",
+		  .how.flags = O_CREAT,
+		  .how.resolve = 0x1337, .err = -EINVAL },
+		{ .name = "invalid how.resolve and O_TMPFILE",
+		  .how.flags = O_TMPFILE | O_RDWR,
+		  .how.resolve = 0x1337, .err = -EINVAL },
+		{ .name = "invalid how.resolve and O_PATH",
+		  .how.flags = O_PATH,
+		  .how.resolve = 0x1337, .err = -EINVAL },
+	};
+
+	BUILD_BUG_ON(ARRAY_LEN(tests) != NUM_OPENAT2_FLAG_TESTS);
+
+	for (int i = 0; i < ARRAY_LEN(tests); i++) {
+		int fd, fdflags = -1;
+		char *path, *fdpath = NULL;
+		bool failed = false;
+		struct flag_test *test = &tests[i];
+		void (*resultfn)(const char *msg, ...) = ksft_test_result_pass;
+
+		if (!openat2_supported) {
+			ksft_print_msg("openat2(2) unsupported\n");
+			resultfn = ksft_test_result_skip;
+			goto skip;
+		}
+
+		path = (test->how.flags & O_CREAT) ? "/tmp/ksft.openat2_tmpfile" : ".";
+		unlink(path);
+
+		fd = sys_openat2(AT_FDCWD, path, &test->how);
+		if (test->err >= 0)
+			failed = (fd < 0);
+		else
+			failed = (fd != test->err);
+		if (fd >= 0) {
+			int otherflags;
+
+			fdpath = fdreadlink(fd);
+			fdflags = fcntl(fd, F_GETFL);
+			otherflags = fcntl(fd, F_GETFD);
+			close(fd);
+
+			E_assert(fdflags >= 0, "fcntl F_GETFL of new fd");
+			E_assert(otherflags >= 0, "fcntl F_GETFD of new fd");
+
+			/* O_CLOEXEC isn't shown in F_GETFL. */
+			if (otherflags & FD_CLOEXEC)
+				fdflags |= O_CLOEXEC;
+			/* O_CREAT is hidden from F_GETFL. */
+			if (test->how.flags & O_CREAT)
+				fdflags |= O_CREAT;
+			if (!(test->how.flags & O_LARGEFILE))
+				fdflags &= ~O_LARGEFILE;
+			failed |= (fdflags != test->how.flags);
+		}
+
+		if (failed) {
+			resultfn = ksft_test_result_fail;
+
+			ksft_print_msg("openat2 unexpectedly returned ");
+			if (fdpath)
+				ksft_print_msg("%d['%s'] with %X (!= %X)\n",
+					       fd, fdpath, fdflags,
+					       test->how.flags);
+			else
+				ksft_print_msg("%d (%s)\n", fd, strerror(-fd));
+		}
+
+skip:
+		if (test->err >= 0)
+			resultfn("openat2 with %s succeeds\n", test->name);
+		else
+			resultfn("openat2 with %s fails with %d (%s)\n",
+				 test->name, test->err, strerror(-test->err));
+
+		free(fdpath);
+		fflush(stdout);
+	}
+}
+
+#define NUM_TESTS (NUM_OPENAT2_STRUCT_VARIATIONS * NUM_OPENAT2_STRUCT_TESTS + \
+		   NUM_OPENAT2_FLAG_TESTS)
+
+int main(int argc, char **argv)
+{
+	ksft_print_header();
+	ksft_set_plan(NUM_TESTS);
+
+	test_openat2_struct();
+	test_openat2_flags();
+
+	if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
+		ksft_exit_fail();
+	else
+		ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/openat2/rename_attack_test.c b/tools/testing/selftests/openat2/rename_attack_test.c
new file mode 100644
index 0000000..0a770728
--- /dev/null
+++ b/tools/testing/selftests/openat2/rename_attack_test.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Author: Aleksa Sarai <cyphar@cyphar.com>
+ * Copyright (C) 2018-2019 SUSE LLC.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <syscall.h>
+#include <limits.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+#include "helpers.h"
+
+/* Construct a test directory with the following structure:
+ *
+ * root/
+ * |-- a/
+ * |   `-- c/
+ * `-- b/
+ */
+int setup_testdir(void)
+{
+	int dfd;
+	char dirname[] = "/tmp/ksft-openat2-rename-attack.XXXXXX";
+
+	/* Make the top-level directory. */
+	if (!mkdtemp(dirname))
+		ksft_exit_fail_msg("setup_testdir: failed to create tmpdir\n");
+	dfd = open(dirname, O_PATH | O_DIRECTORY);
+	if (dfd < 0)
+		ksft_exit_fail_msg("setup_testdir: failed to open tmpdir\n");
+
+	E_mkdirat(dfd, "a", 0755);
+	E_mkdirat(dfd, "b", 0755);
+	E_mkdirat(dfd, "a/c", 0755);
+
+	return dfd;
+}
+
+/* Swap @dirfd/@a and @dirfd/@b constantly. Parent must kill this process. */
+pid_t spawn_attack(int dirfd, char *a, char *b)
+{
+	pid_t child = fork();
+	if (child != 0)
+		return child;
+
+	/* If the parent (the test process) dies, kill ourselves too. */
+	E_prctl(PR_SET_PDEATHSIG, SIGKILL);
+
+	/* Swap @a and @b. */
+	for (;;)
+		renameat2(dirfd, a, dirfd, b, RENAME_EXCHANGE);
+	exit(1);
+}
+
+#define NUM_RENAME_TESTS 2
+#define ROUNDS 400000
+
+const char *flagname(int resolve)
+{
+	switch (resolve) {
+	case RESOLVE_IN_ROOT:
+		return "RESOLVE_IN_ROOT";
+	case RESOLVE_BENEATH:
+		return "RESOLVE_BENEATH";
+	}
+	return "(unknown)";
+}
+
+void test_rename_attack(int resolve)
+{
+	int dfd, afd;
+	pid_t child;
+	void (*resultfn)(const char *msg, ...) = ksft_test_result_pass;
+	int escapes = 0, other_errs = 0, exdevs = 0, eagains = 0, successes = 0;
+
+	struct open_how how = {
+		.flags = O_PATH,
+		.resolve = resolve,
+	};
+
+	if (!openat2_supported) {
+		how.resolve = 0;
+		ksft_print_msg("openat2(2) unsupported -- using openat(2) instead\n");
+	}
+
+	dfd = setup_testdir();
+	afd = openat(dfd, "a", O_PATH);
+	if (afd < 0)
+		ksft_exit_fail_msg("test_rename_attack: failed to open 'a'\n");
+
+	child = spawn_attack(dfd, "a/c", "b");
+
+	for (int i = 0; i < ROUNDS; i++) {
+		int fd;
+		char *victim_path = "c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../../c/../..";
+
+		if (openat2_supported)
+			fd = sys_openat2(afd, victim_path, &how);
+		else
+			fd = sys_openat(afd, victim_path, &how);
+
+		if (fd < 0) {
+			if (fd == -EAGAIN)
+				eagains++;
+			else if (fd == -EXDEV)
+				exdevs++;
+			else if (fd == -ENOENT)
+				escapes++; /* escaped outside and got ENOENT... */
+			else
+				other_errs++; /* unexpected error */
+		} else {
+			if (fdequal(fd, afd, NULL))
+				successes++;
+			else
+				escapes++; /* we got an unexpected fd */
+		}
+		close(fd);
+	}
+
+	if (escapes > 0)
+		resultfn = ksft_test_result_fail;
+	ksft_print_msg("non-escapes: EAGAIN=%d EXDEV=%d E<other>=%d success=%d\n",
+		       eagains, exdevs, other_errs, successes);
+	resultfn("rename attack with %s (%d runs, got %d escapes)\n",
+		 flagname(resolve), ROUNDS, escapes);
+
+	/* Should be killed anyway, but might as well make sure. */
+	E_kill(child, SIGKILL);
+}
+
+#define NUM_TESTS NUM_RENAME_TESTS
+
+int main(int argc, char **argv)
+{
+	ksft_print_header();
+	ksft_set_plan(NUM_TESTS);
+
+	test_rename_attack(RESOLVE_BENEATH);
+	test_rename_attack(RESOLVE_IN_ROOT);
+
+	if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
+		ksft_exit_fail();
+	else
+		ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/openat2/resolve_test.c b/tools/testing/selftests/openat2/resolve_test.c
new file mode 100644
index 0000000..7a94b1d
--- /dev/null
+++ b/tools/testing/selftests/openat2/resolve_test.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Author: Aleksa Sarai <cyphar@cyphar.com>
+ * Copyright (C) 2018-2019 SUSE LLC.
+ */
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "../kselftest.h"
+#include "helpers.h"
+
+/*
+ * Construct a test directory with the following structure:
+ *
+ * root/
+ * |-- procexe -> /proc/self/exe
+ * |-- procroot -> /proc/self/root
+ * |-- root/
+ * |-- mnt/ [mountpoint]
+ * |   |-- self -> ../mnt/
+ * |   `-- absself -> /mnt/
+ * |-- etc/
+ * |   `-- passwd
+ * |-- creatlink -> /newfile3
+ * |-- reletc -> etc/
+ * |-- relsym -> etc/passwd
+ * |-- absetc -> /etc/
+ * |-- abssym -> /etc/passwd
+ * |-- abscheeky -> /cheeky
+ * `-- cheeky/
+ *     |-- absself -> /
+ *     |-- self -> ../../root/
+ *     |-- garbageself -> /../../root/
+ *     |-- passwd -> ../cheeky/../cheeky/../etc/../etc/passwd
+ *     |-- abspasswd -> /../cheeky/../cheeky/../etc/../etc/passwd
+ *     |-- dotdotlink -> ../../../../../../../../../../../../../../etc/passwd
+ *     `-- garbagelink -> /../../../../../../../../../../../../../../etc/passwd
+ */
+int setup_testdir(void)
+{
+	int dfd, tmpfd;
+	char dirname[] = "/tmp/ksft-openat2-testdir.XXXXXX";
+
+	/* Unshare and make /tmp a new directory. */
+	E_unshare(CLONE_NEWNS);
+	E_mount("", "/tmp", "", MS_PRIVATE, "");
+
+	/* Make the top-level directory. */
+	if (!mkdtemp(dirname))
+		ksft_exit_fail_msg("setup_testdir: failed to create tmpdir\n");
+	dfd = open(dirname, O_PATH | O_DIRECTORY);
+	if (dfd < 0)
+		ksft_exit_fail_msg("setup_testdir: failed to open tmpdir\n");
+
+	/* A sub-directory which is actually used for tests. */
+	E_mkdirat(dfd, "root", 0755);
+	tmpfd = openat(dfd, "root", O_PATH | O_DIRECTORY);
+	if (tmpfd < 0)
+		ksft_exit_fail_msg("setup_testdir: failed to open tmpdir\n");
+	close(dfd);
+	dfd = tmpfd;
+
+	E_symlinkat("/proc/self/exe", dfd, "procexe");
+	E_symlinkat("/proc/self/root", dfd, "procroot");
+	E_mkdirat(dfd, "root", 0755);
+
+	/* There is no mountat(2), so use chdir. */
+	E_mkdirat(dfd, "mnt", 0755);
+	E_fchdir(dfd);
+	E_mount("tmpfs", "./mnt", "tmpfs", MS_NOSUID | MS_NODEV, "");
+	E_symlinkat("../mnt/", dfd, "mnt/self");
+	E_symlinkat("/mnt/", dfd, "mnt/absself");
+
+	E_mkdirat(dfd, "etc", 0755);
+	E_touchat(dfd, "etc/passwd");
+
+	E_symlinkat("/newfile3", dfd, "creatlink");
+	E_symlinkat("etc/", dfd, "reletc");
+	E_symlinkat("etc/passwd", dfd, "relsym");
+	E_symlinkat("/etc/", dfd, "absetc");
+	E_symlinkat("/etc/passwd", dfd, "abssym");
+	E_symlinkat("/cheeky", dfd, "abscheeky");
+
+	E_mkdirat(dfd, "cheeky", 0755);
+
+	E_symlinkat("/", dfd, "cheeky/absself");
+	E_symlinkat("../../root/", dfd, "cheeky/self");
+	E_symlinkat("/../../root/", dfd, "cheeky/garbageself");
+
+	E_symlinkat("../cheeky/../etc/../etc/passwd", dfd, "cheeky/passwd");
+	E_symlinkat("/../cheeky/../etc/../etc/passwd", dfd, "cheeky/abspasswd");
+
+	E_symlinkat("../../../../../../../../../../../../../../etc/passwd",
+		    dfd, "cheeky/dotdotlink");
+	E_symlinkat("/../../../../../../../../../../../../../../etc/passwd",
+		    dfd, "cheeky/garbagelink");
+
+	return dfd;
+}
+
+struct basic_test {
+	const char *name;
+	const char *dir;
+	const char *path;
+	struct open_how how;
+	bool pass;
+	union {
+		int err;
+		const char *path;
+	} out;
+};
+
+#define NUM_OPENAT2_OPATH_TESTS 88
+
+void test_openat2_opath_tests(void)
+{
+	int rootfd, hardcoded_fd;
+	char *procselfexe, *hardcoded_fdpath;
+
+	E_asprintf(&procselfexe, "/proc/%d/exe", getpid());
+	rootfd = setup_testdir();
+
+	hardcoded_fd = open("/dev/null", O_RDONLY);
+	E_assert(hardcoded_fd >= 0, "open fd to hardcode");
+	E_asprintf(&hardcoded_fdpath, "self/fd/%d", hardcoded_fd);
+
+	struct basic_test tests[] = {
+		/** RESOLVE_BENEATH **/
+		/* Attempts to cross dirfd should be blocked. */
+		{ .name = "[beneath] jump to /",
+		  .path = "/",			.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] absolute link to $root",
+		  .path = "cheeky/absself",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] chained absolute links to $root",
+		  .path = "abscheeky/absself",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] jump outside $root",
+		  .path = "..",			.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] temporary jump outside $root",
+		  .path = "../root/",		.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] symlink temporary jump outside $root",
+		  .path = "cheeky/self",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] chained symlink temporary jump outside $root",
+		  .path = "abscheeky/self",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] garbage links to $root",
+		  .path = "cheeky/garbageself",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] chained garbage links to $root",
+		  .path = "abscheeky/garbageself", .how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		/* Only relative paths that stay inside dirfd should work. */
+		{ .name = "[beneath] ordinary path to 'root'",
+		  .path = "root",		.how.resolve = RESOLVE_BENEATH,
+		  .out.path = "root",		.pass = true },
+		{ .name = "[beneath] ordinary path to 'etc'",
+		  .path = "etc",		.how.resolve = RESOLVE_BENEATH,
+		  .out.path = "etc",		.pass = true },
+		{ .name = "[beneath] ordinary path to 'etc/passwd'",
+		  .path = "etc/passwd",		.how.resolve = RESOLVE_BENEATH,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[beneath] relative symlink inside $root",
+		  .path = "relsym",		.how.resolve = RESOLVE_BENEATH,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[beneath] chained-'..' relative symlink inside $root",
+		  .path = "cheeky/passwd",	.how.resolve = RESOLVE_BENEATH,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[beneath] absolute symlink component outside $root",
+		  .path = "abscheeky/passwd",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] absolute symlink target outside $root",
+		  .path = "abssym",		.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] absolute path outside $root",
+		  .path = "/etc/passwd",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] cheeky absolute path outside $root",
+		  .path = "cheeky/abspasswd",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] chained cheeky absolute path outside $root",
+		  .path = "abscheeky/abspasswd", .how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		/* Tricky paths should fail. */
+		{ .name = "[beneath] tricky '..'-chained symlink outside $root",
+		  .path = "cheeky/dotdotlink",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] tricky absolute + '..'-chained symlink outside $root",
+		  .path = "abscheeky/dotdotlink", .how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] tricky garbage link outside $root",
+		  .path = "cheeky/garbagelink",	.how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[beneath] tricky absolute + garbage link outside $root",
+		  .path = "abscheeky/garbagelink", .how.resolve = RESOLVE_BENEATH,
+		  .out.err = -EXDEV,		.pass = false },
+
+		/** RESOLVE_IN_ROOT **/
+		/* All attempts to cross the dirfd will be scoped-to-root. */
+		{ .name = "[in_root] jump to /",
+		  .path = "/",			.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = NULL,		.pass = true },
+		{ .name = "[in_root] absolute symlink to /root",
+		  .path = "cheeky/absself",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = NULL,		.pass = true },
+		{ .name = "[in_root] chained absolute symlinks to /root",
+		  .path = "abscheeky/absself",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = NULL,		.pass = true },
+		{ .name = "[in_root] '..' at root",
+		  .path = "..",			.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = NULL,		.pass = true },
+		{ .name = "[in_root] '../root' at root",
+		  .path = "../root/",		.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "root",		.pass = true },
+		{ .name = "[in_root] relative symlink containing '..' above root",
+		  .path = "cheeky/self",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "root",		.pass = true },
+		{ .name = "[in_root] garbage link to /root",
+		  .path = "cheeky/garbageself",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "root",		.pass = true },
+		{ .name = "[in_root] chainged garbage links to /root",
+		  .path = "abscheeky/garbageself", .how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "root",		.pass = true },
+		{ .name = "[in_root] relative path to 'root'",
+		  .path = "root",		.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "root",		.pass = true },
+		{ .name = "[in_root] relative path to 'etc'",
+		  .path = "etc",		.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc",		.pass = true },
+		{ .name = "[in_root] relative path to 'etc/passwd'",
+		  .path = "etc/passwd",		.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] relative symlink to 'etc/passwd'",
+		  .path = "relsym",		.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] chained-'..' relative symlink to 'etc/passwd'",
+		  .path = "cheeky/passwd",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] chained-'..' absolute + relative symlink to 'etc/passwd'",
+		  .path = "abscheeky/passwd",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] absolute symlink to 'etc/passwd'",
+		  .path = "abssym",		.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] absolute path 'etc/passwd'",
+		  .path = "/etc/passwd",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] cheeky absolute path 'etc/passwd'",
+		  .path = "cheeky/abspasswd",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] chained cheeky absolute path 'etc/passwd'",
+		  .path = "abscheeky/abspasswd", .how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] tricky '..'-chained symlink outside $root",
+		  .path = "cheeky/dotdotlink",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] tricky absolute + '..'-chained symlink outside $root",
+		  .path = "abscheeky/dotdotlink", .how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] tricky absolute path + absolute + '..'-chained symlink outside $root",
+		  .path = "/../../../../abscheeky/dotdotlink", .how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] tricky garbage link outside $root",
+		  .path = "cheeky/garbagelink",	.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] tricky absolute + garbage link outside $root",
+		  .path = "abscheeky/garbagelink", .how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		{ .name = "[in_root] tricky absolute path + absolute + garbage link outside $root",
+		  .path = "/../../../../abscheeky/garbagelink", .how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "etc/passwd",	.pass = true },
+		/* O_CREAT should handle trailing symlinks correctly. */
+		{ .name = "[in_root] O_CREAT of relative path inside $root",
+		  .path = "newfile1",		.how.flags = O_CREAT,
+						.how.mode = 0700,
+						.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "newfile1",	.pass = true },
+		{ .name = "[in_root] O_CREAT of absolute path",
+		  .path = "/newfile2",		.how.flags = O_CREAT,
+						.how.mode = 0700,
+						.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "newfile2",	.pass = true },
+		{ .name = "[in_root] O_CREAT of tricky symlink outside root",
+		  .path = "/creatlink",		.how.flags = O_CREAT,
+						.how.mode = 0700,
+						.how.resolve = RESOLVE_IN_ROOT,
+		  .out.path = "newfile3",	.pass = true },
+
+		/** RESOLVE_NO_XDEV **/
+		/* Crossing *down* into a mountpoint is disallowed. */
+		{ .name = "[no_xdev] cross into $mnt",
+		  .path = "mnt",		.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[no_xdev] cross into $mnt/",
+		  .path = "mnt/",		.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[no_xdev] cross into $mnt/.",
+		  .path = "mnt/.",		.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		/* Crossing *up* out of a mountpoint is disallowed. */
+		{ .name = "[no_xdev] goto mountpoint root",
+		  .dir = "mnt", .path = ".",	.how.resolve = RESOLVE_NO_XDEV,
+		  .out.path = "mnt",		.pass = true },
+		{ .name = "[no_xdev] cross up through '..'",
+		  .dir = "mnt", .path = "..",	.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[no_xdev] temporary cross up through '..'",
+		  .dir = "mnt", .path = "../mnt", .how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[no_xdev] temporary relative symlink cross up",
+		  .dir = "mnt", .path = "self",	.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[no_xdev] temporary absolute symlink cross up",
+		  .dir = "mnt", .path = "absself", .how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		/* Jumping to "/" is ok, but later components cannot cross. */
+		{ .name = "[no_xdev] jump to / directly",
+		  .dir = "mnt", .path = "/",	.how.resolve = RESOLVE_NO_XDEV,
+		  .out.path = "/",		.pass = true },
+		{ .name = "[no_xdev] jump to / (from /) directly",
+		  .dir = "/", .path = "/",	.how.resolve = RESOLVE_NO_XDEV,
+		  .out.path = "/",		.pass = true },
+		{ .name = "[no_xdev] jump to / then proc",
+		  .path = "/proc/1",		.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		{ .name = "[no_xdev] jump to / then tmp",
+		  .path = "/tmp",		.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,		.pass = false },
+		/* Magic-links are blocked since they can switch vfsmounts. */
+		{ .name = "[no_xdev] cross through magic-link to self/root",
+		  .dir = "/proc", .path = "self/root", 	.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,			.pass = false },
+		{ .name = "[no_xdev] cross through magic-link to self/cwd",
+		  .dir = "/proc", .path = "self/cwd",	.how.resolve = RESOLVE_NO_XDEV,
+		  .out.err = -EXDEV,			.pass = false },
+		/* Except magic-link jumps inside the same vfsmount. */
+		{ .name = "[no_xdev] jump through magic-link to same procfs",
+		  .dir = "/proc", .path = hardcoded_fdpath, .how.resolve = RESOLVE_NO_XDEV,
+		  .out.path = "/proc",			    .pass = true, },
+
+		/** RESOLVE_NO_MAGICLINKS **/
+		/* Regular symlinks should work. */
+		{ .name = "[no_magiclinks] ordinary relative symlink",
+		  .path = "relsym",		.how.resolve = RESOLVE_NO_MAGICLINKS,
+		  .out.path = "etc/passwd",	.pass = true },
+		/* Magic-links should not work. */
+		{ .name = "[no_magiclinks] symlink to magic-link",
+		  .path = "procexe",		.how.resolve = RESOLVE_NO_MAGICLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_magiclinks] normal path to magic-link",
+		  .path = "/proc/self/exe",	.how.resolve = RESOLVE_NO_MAGICLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_magiclinks] normal path to magic-link with O_NOFOLLOW",
+		  .path = "/proc/self/exe",	.how.flags = O_NOFOLLOW,
+						.how.resolve = RESOLVE_NO_MAGICLINKS,
+		  .out.path = procselfexe,	.pass = true },
+		{ .name = "[no_magiclinks] symlink to magic-link path component",
+		  .path = "procroot/etc",	.how.resolve = RESOLVE_NO_MAGICLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_magiclinks] magic-link path component",
+		  .path = "/proc/self/root/etc", .how.resolve = RESOLVE_NO_MAGICLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_magiclinks] magic-link path component with O_NOFOLLOW",
+		  .path = "/proc/self/root/etc", .how.flags = O_NOFOLLOW,
+						 .how.resolve = RESOLVE_NO_MAGICLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+
+		/** RESOLVE_NO_SYMLINKS **/
+		/* Normal paths should work. */
+		{ .name = "[no_symlinks] ordinary path to '.'",
+		  .path = ".",			.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.path = NULL,		.pass = true },
+		{ .name = "[no_symlinks] ordinary path to 'root'",
+		  .path = "root",		.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.path = "root",		.pass = true },
+		{ .name = "[no_symlinks] ordinary path to 'etc'",
+		  .path = "etc",		.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.path = "etc",		.pass = true },
+		{ .name = "[no_symlinks] ordinary path to 'etc/passwd'",
+		  .path = "etc/passwd",		.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.path = "etc/passwd",	.pass = true },
+		/* Regular symlinks are blocked. */
+		{ .name = "[no_symlinks] relative symlink target",
+		  .path = "relsym",		.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_symlinks] relative symlink component",
+		  .path = "reletc/passwd",	.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_symlinks] absolute symlink target",
+		  .path = "abssym",		.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_symlinks] absolute symlink component",
+		  .path = "absetc/passwd",	.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_symlinks] cheeky garbage link",
+		  .path = "cheeky/garbagelink",	.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_symlinks] cheeky absolute + garbage link",
+		  .path = "abscheeky/garbagelink", .how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_symlinks] cheeky absolute + absolute symlink",
+		  .path = "abscheeky/absself",	.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		/* Trailing symlinks with NO_FOLLOW. */
+		{ .name = "[no_symlinks] relative symlink with O_NOFOLLOW",
+		  .path = "relsym",		.how.flags = O_NOFOLLOW,
+						.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.path = "relsym",		.pass = true },
+		{ .name = "[no_symlinks] absolute symlink with O_NOFOLLOW",
+		  .path = "abssym",		.how.flags = O_NOFOLLOW,
+						.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.path = "abssym",		.pass = true },
+		{ .name = "[no_symlinks] trailing symlink with O_NOFOLLOW",
+		  .path = "cheeky/garbagelink",	.how.flags = O_NOFOLLOW,
+						.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.path = "cheeky/garbagelink", .pass = true },
+		{ .name = "[no_symlinks] multiple symlink components with O_NOFOLLOW",
+		  .path = "abscheeky/absself",	.how.flags = O_NOFOLLOW,
+						.how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+		{ .name = "[no_symlinks] multiple symlink (and garbage link) components with O_NOFOLLOW",
+		  .path = "abscheeky/garbagelink", .how.flags = O_NOFOLLOW,
+						   .how.resolve = RESOLVE_NO_SYMLINKS,
+		  .out.err = -ELOOP,		.pass = false },
+	};
+
+	BUILD_BUG_ON(ARRAY_LEN(tests) != NUM_OPENAT2_OPATH_TESTS);
+
+	for (int i = 0; i < ARRAY_LEN(tests); i++) {
+		int dfd, fd;
+		char *fdpath = NULL;
+		bool failed;
+		void (*resultfn)(const char *msg, ...) = ksft_test_result_pass;
+		struct basic_test *test = &tests[i];
+
+		if (!openat2_supported) {
+			ksft_print_msg("openat2(2) unsupported\n");
+			resultfn = ksft_test_result_skip;
+			goto skip;
+		}
+
+		/* Auto-set O_PATH. */
+		if (!(test->how.flags & O_CREAT))
+			test->how.flags |= O_PATH;
+
+		if (test->dir)
+			dfd = openat(rootfd, test->dir, O_PATH | O_DIRECTORY);
+		else
+			dfd = dup(rootfd);
+		E_assert(dfd, "failed to openat root '%s': %m", test->dir);
+
+		E_dup2(dfd, hardcoded_fd);
+
+		fd = sys_openat2(dfd, test->path, &test->how);
+		if (test->pass)
+			failed = (fd < 0 || !fdequal(fd, rootfd, test->out.path));
+		else
+			failed = (fd != test->out.err);
+		if (fd >= 0) {
+			fdpath = fdreadlink(fd);
+			close(fd);
+		}
+		close(dfd);
+
+		if (failed) {
+			resultfn = ksft_test_result_fail;
+
+			ksft_print_msg("openat2 unexpectedly returned ");
+			if (fdpath)
+				ksft_print_msg("%d['%s']\n", fd, fdpath);
+			else
+				ksft_print_msg("%d (%s)\n", fd, strerror(-fd));
+		}
+
+skip:
+		if (test->pass)
+			resultfn("%s gives path '%s'\n", test->name,
+				 test->out.path ?: ".");
+		else
+			resultfn("%s fails with %d (%s)\n", test->name,
+				 test->out.err, strerror(-test->out.err));
+
+		fflush(stdout);
+		free(fdpath);
+	}
+
+	free(procselfexe);
+	close(rootfd);
+
+	free(hardcoded_fdpath);
+	close(hardcoded_fd);
+}
+
+#define NUM_TESTS NUM_OPENAT2_OPATH_TESTS
+
+int main(int argc, char **argv)
+{
+	ksft_print_header();
+	ksft_set_plan(NUM_TESTS);
+
+	/* NOTE: We should be checking for CAP_SYS_ADMIN here... */
+	if (geteuid() != 0)
+		ksft_exit_skip("all tests require euid == 0\n");
+
+	test_openat2_opath_tests();
+
+	if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
+		ksft_exit_fail();
+	else
+		ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
index 8d06949..3a779c0 100644
--- a/tools/testing/selftests/pidfd/.gitignore
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -2,3 +2,4 @@
 pidfd_poll_test
 pidfd_test
 pidfd_wait
+pidfd_getfd_test
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index 43db1b9..75a5458 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 CFLAGS += -g -I../../../../usr/include/ -pthread
 
-TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test pidfd_poll_test pidfd_wait
+TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test pidfd_poll_test pidfd_wait pidfd_getfd_test
 
 include ../lib.mk
 
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index c6bc683..d482515 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -36,6 +36,10 @@
 #define __NR_clone3 -1
 #endif
 
+#ifndef __NR_pidfd_getfd
+#define __NR_pidfd_getfd -1
+#endif
+
 /*
  * The kernel reserves 300 pids via RESERVED_PIDS in kernel/pid.c
  * That means, when it wraps around any pid < 300 will be skipped.
@@ -84,4 +88,9 @@ static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
 	return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
 }
 
+static inline int sys_pidfd_getfd(int pidfd, int fd, int flags)
+{
+	return syscall(__NR_pidfd_getfd, pidfd, fd, flags);
+}
+
 #endif /* __PIDFD_H */
diff --git a/tools/testing/selftests/pidfd/pidfd_getfd_test.c b/tools/testing/selftests/pidfd/pidfd_getfd_test.c
new file mode 100644
index 0000000..401a7c1
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_getfd_test.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <linux/types.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <linux/kcmp.h>
+
+#include "pidfd.h"
+#include "../kselftest.h"
+#include "../kselftest_harness.h"
+
+/*
+ * UNKNOWN_FD is an fd number that should never exist in the child, as it is
+ * used to check the negative case.
+ */
+#define UNKNOWN_FD 111
+#define UID_NOBODY 65535
+
+static int sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1,
+		    unsigned long idx2)
+{
+	return syscall(__NR_kcmp, pid1, pid2, type, idx1, idx2);
+}
+
+static int sys_memfd_create(const char *name, unsigned int flags)
+{
+	return syscall(__NR_memfd_create, name, flags);
+}
+
+static int __child(int sk, int memfd)
+{
+	int ret;
+	char buf;
+
+	/*
+	 * Ensure we don't leave around a bunch of orphaned children if our
+	 * tests fail.
+	 */
+	ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
+	if (ret) {
+		fprintf(stderr, "%s: Child could not set DEATHSIG\n",
+			strerror(errno));
+		return -1;
+	}
+
+	ret = send(sk, &memfd, sizeof(memfd), 0);
+	if (ret != sizeof(memfd)) {
+		fprintf(stderr, "%s: Child failed to send fd number\n",
+			strerror(errno));
+		return -1;
+	}
+
+	/*
+	 * The fixture setup is completed at this point. The tests will run.
+	 *
+	 * This blocking recv enables the parent to message the child.
+	 * Either we will read 'P' off of the sk, indicating that we need
+	 * to disable ptrace, or we will read a 0, indicating that the other
+	 * side has closed the sk. This occurs during fixture teardown time,
+	 * indicating that the child should exit.
+	 */
+	while ((ret = recv(sk, &buf, sizeof(buf), 0)) > 0) {
+		if (buf == 'P') {
+			ret = prctl(PR_SET_DUMPABLE, 0);
+			if (ret < 0) {
+				fprintf(stderr,
+					"%s: Child failed to disable ptrace\n",
+					strerror(errno));
+				return -1;
+			}
+		} else {
+			fprintf(stderr, "Child received unknown command %c\n",
+				buf);
+			return -1;
+		}
+		ret = send(sk, &buf, sizeof(buf), 0);
+		if (ret != 1) {
+			fprintf(stderr, "%s: Child failed to ack\n",
+				strerror(errno));
+			return -1;
+		}
+	}
+	if (ret < 0) {
+		fprintf(stderr, "%s: Child failed to read from socket\n",
+			strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int child(int sk)
+{
+	int memfd, ret;
+
+	memfd = sys_memfd_create("test", 0);
+	if (memfd < 0) {
+		fprintf(stderr, "%s: Child could not create memfd\n",
+			strerror(errno));
+		ret = -1;
+	} else {
+		ret = __child(sk, memfd);
+		close(memfd);
+	}
+
+	close(sk);
+	return ret;
+}
+
+FIXTURE(child)
+{
+	/*
+	 * remote_fd is the number of the FD which we are trying to retrieve
+	 * from the child.
+	 */
+	int remote_fd;
+	/* pid points to the child which we are fetching FDs from */
+	pid_t pid;
+	/* pidfd is the pidfd of the child */
+	int pidfd;
+	/*
+	 * sk is our side of the socketpair used to communicate with the child.
+	 * When it is closed, the child will exit.
+	 */
+	int sk;
+};
+
+FIXTURE_SETUP(child)
+{
+	int ret, sk_pair[2];
+
+	ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair)) {
+		TH_LOG("%s: failed to create socketpair", strerror(errno));
+	}
+	self->sk = sk_pair[0];
+
+	self->pid = fork();
+	ASSERT_GE(self->pid, 0);
+
+	if (self->pid == 0) {
+		close(sk_pair[0]);
+		if (child(sk_pair[1]))
+			_exit(EXIT_FAILURE);
+		_exit(EXIT_SUCCESS);
+	}
+
+	close(sk_pair[1]);
+
+	self->pidfd = sys_pidfd_open(self->pid, 0);
+	ASSERT_GE(self->pidfd, 0);
+
+	/*
+	 * Wait for the child to complete setup. It'll send the remote memfd's
+	 * number when ready.
+	 */
+	ret = recv(sk_pair[0], &self->remote_fd, sizeof(self->remote_fd), 0);
+	ASSERT_EQ(sizeof(self->remote_fd), ret);
+}
+
+FIXTURE_TEARDOWN(child)
+{
+	EXPECT_EQ(0, close(self->pidfd));
+	EXPECT_EQ(0, close(self->sk));
+
+	EXPECT_EQ(0, wait_for_pid(self->pid));
+}
+
+TEST_F(child, disable_ptrace)
+{
+	int uid, fd;
+	char c;
+
+	/*
+	 * Turn into nobody if we're root, to avoid CAP_SYS_PTRACE
+	 *
+	 * The tests should run in their own process, so even this test fails,
+	 * it shouldn't result in subsequent tests failing.
+	 */
+	uid = getuid();
+	if (uid == 0)
+		ASSERT_EQ(0, seteuid(UID_NOBODY));
+
+	ASSERT_EQ(1, send(self->sk, "P", 1, 0));
+	ASSERT_EQ(1, recv(self->sk, &c, 1, 0));
+
+	fd = sys_pidfd_getfd(self->pidfd, self->remote_fd, 0);
+	EXPECT_EQ(-1, fd);
+	EXPECT_EQ(EPERM, errno);
+
+	if (uid == 0)
+		ASSERT_EQ(0, seteuid(0));
+}
+
+TEST_F(child, fetch_fd)
+{
+	int fd, ret;
+
+	fd = sys_pidfd_getfd(self->pidfd, self->remote_fd, 0);
+	ASSERT_GE(fd, 0);
+
+	EXPECT_EQ(0, sys_kcmp(getpid(), self->pid, KCMP_FILE, fd, self->remote_fd));
+
+	ret = fcntl(fd, F_GETFD);
+	ASSERT_GE(ret, 0);
+	EXPECT_GE(ret & FD_CLOEXEC, 0);
+
+	close(fd);
+}
+
+TEST_F(child, test_unknown_fd)
+{
+	int fd;
+
+	fd = sys_pidfd_getfd(self->pidfd, UNKNOWN_FD, 0);
+	EXPECT_EQ(-1, fd) {
+		TH_LOG("getfd succeeded while fetching unknown fd");
+	};
+	EXPECT_EQ(EBADF, errno) {
+		TH_LOG("%s: getfd did not get EBADF", strerror(errno));
+	}
+}
+
+TEST(flags_set)
+{
+	ASSERT_EQ(-1, sys_pidfd_getfd(0, 0, 1));
+	EXPECT_EQ(errno, EINVAL);
+}
+
+#if __NR_pidfd_getfd == -1
+int main(void)
+{
+	fprintf(stderr, "__NR_pidfd_getfd undefined. The pidfd_getfd syscall is unavailable. Test aborting\n");
+	return KSFT_SKIP;
+}
+#else
+TEST_HARNESS_MAIN
+#endif
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
index 26112ab..f52ed92 100755
--- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
+++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
@@ -53,9 +53,13 @@
 	# is a no-op.
 	echo $dev >/sys/kernel/debug/powerpc/eeh_dev_check
 
-	# Enforce a 30s timeout for recovery. Even the IPR, which is infamously
-	# slow to reset, should recover within 30s.
-	max_wait=30
+	# Default to a 60s timeout when waiting for a device to recover. This
+	# is an arbitrary default which can be overridden by setting the
+	# EEH_MAX_WAIT environmental variable when required.
+
+	# The current record holder for longest recovery time is:
+	#  "Adaptec Series 8 12G SAS/PCIe 3" at 39 seconds
+	max_wait=${EEH_MAX_WAIT:=60}
 
 	for i in `seq 0 ${max_wait}` ; do
 		if pe_ok $dev ; then
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index 7101ffd..0ebeaea 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -5,3 +5,4 @@
 segv_errors
 wild_bctr
 large_vm_fork_separation
+bad_accesses
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index ed15658..b9103c4 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -3,7 +3,7 @@
 	$(MAKE) -C ../
 
 TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
-		  large_vm_fork_separation
+		  large_vm_fork_separation bad_accesses
 TEST_GEN_PROGS_EXTENDED := tlbie_test
 TEST_GEN_FILES := tempfile
 
@@ -16,6 +16,7 @@
 
 $(OUTPUT)/wild_bctr: CFLAGS += -m64
 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
+$(OUTPUT)/bad_accesses: CFLAGS += -m64
 
 $(OUTPUT)/tempfile:
 	dd if=/dev/zero of=$@ bs=64k count=1
diff --git a/tools/testing/selftests/powerpc/mm/bad_accesses.c b/tools/testing/selftests/powerpc/mm/bad_accesses.c
new file mode 100644
index 0000000..adc465f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/bad_accesses.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2019, Michael Ellerman, IBM Corp.
+//
+// Test that out-of-bounds reads/writes behave as expected.
+
+#include <setjmp.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+// Old distros (Ubuntu 16.04 at least) don't define this
+#ifndef SEGV_BNDERR
+#define SEGV_BNDERR	3
+#endif
+
+// 64-bit kernel is always here
+#define PAGE_OFFSET	(0xcul << 60)
+
+static unsigned long kernel_virt_end;
+
+static volatile int fault_code;
+static volatile unsigned long fault_addr;
+static jmp_buf setjmp_env;
+
+static void segv_handler(int n, siginfo_t *info, void *ctxt_v)
+{
+	fault_code = info->si_code;
+	fault_addr = (unsigned long)info->si_addr;
+	siglongjmp(setjmp_env, 1);
+}
+
+int bad_access(char *p, bool write)
+{
+	char x;
+
+	fault_code = 0;
+	fault_addr = 0;
+
+	if (sigsetjmp(setjmp_env, 1) == 0) {
+		if (write)
+			*p = 1;
+		else
+			x = *p;
+
+		printf("Bad - no SEGV! (%c)\n", x);
+		return 1;
+	}
+
+	// If we see MAPERR that means we took a page fault rather than an SLB
+	// miss. We only expect to take page faults for addresses within the
+	// valid kernel range.
+	FAIL_IF(fault_code == SEGV_MAPERR && \
+		(fault_addr < PAGE_OFFSET || fault_addr >= kernel_virt_end));
+
+	FAIL_IF(fault_code != SEGV_MAPERR && fault_code != SEGV_BNDERR);
+
+	return 0;
+}
+
+static int using_hash_mmu(bool *using_hash)
+{
+	char line[128];
+	FILE *f;
+	int rc;
+
+	f = fopen("/proc/cpuinfo", "r");
+	FAIL_IF(!f);
+
+	rc = 0;
+	while (fgets(line, sizeof(line), f) != NULL) {
+		if (strcmp(line, "MMU		: Hash\n") == 0) {
+			*using_hash = true;
+			goto out;
+		}
+
+		if (strcmp(line, "MMU		: Radix\n") == 0) {
+			*using_hash = false;
+			goto out;
+		}
+	}
+
+	rc = -1;
+out:
+	fclose(f);
+	return rc;
+}
+
+static int test(void)
+{
+	unsigned long i, j, addr, region_shift, page_shift, page_size;
+	struct sigaction sig;
+	bool hash_mmu;
+
+	sig = (struct sigaction) {
+		.sa_sigaction = segv_handler,
+		.sa_flags = SA_SIGINFO,
+	};
+
+	FAIL_IF(sigaction(SIGSEGV, &sig, NULL) != 0);
+
+	FAIL_IF(using_hash_mmu(&hash_mmu));
+
+	page_size = sysconf(_SC_PAGESIZE);
+	if (page_size == (64 * 1024))
+		page_shift = 16;
+	else
+		page_shift = 12;
+
+	if (page_size == (64 * 1024) || !hash_mmu) {
+		region_shift = 52;
+
+		// We have 7 512T regions (4 kernel linear, vmalloc, io, vmemmap)
+		kernel_virt_end = PAGE_OFFSET + (7 * (512ul << 40));
+	} else if (page_size == (4 * 1024) && hash_mmu) {
+		region_shift = 46;
+
+		// We have 7 64T regions (4 kernel linear, vmalloc, io, vmemmap)
+		kernel_virt_end = PAGE_OFFSET + (7 * (64ul << 40));
+	} else
+		FAIL_IF(true);
+
+	printf("Using %s MMU, PAGE_SIZE = %dKB start address 0x%016lx\n",
+	       hash_mmu ? "hash" : "radix",
+	       (1 << page_shift) >> 10,
+	       1ul << region_shift);
+
+	// This generates access patterns like:
+	//   0x0010000000000000
+	//   0x0010000000010000
+	//   0x0010000000020000
+	//   ...
+	//   0x0014000000000000
+	//   0x0018000000000000
+	//   0x0020000000000000
+	//   0x0020000000010000
+	//   0x0020000000020000
+	//   ...
+	//   0xf400000000000000
+	//   0xf800000000000000
+
+	for (i = 1; i <= ((0xful << 60) >> region_shift); i++) {
+		for (j = page_shift - 1; j < 60; j++) {
+			unsigned long base, delta;
+
+			base  = i << region_shift;
+			delta = 1ul << j;
+
+			if (delta >= base)
+				break;
+
+			addr = (base | delta) & ~((1 << page_shift) - 1);
+
+			FAIL_IF(bad_access((char *)addr, false));
+			FAIL_IF(bad_access((char *)addr, true));
+		}
+	}
+
+	return 0;
+}
+
+int main(void)
+{
+	return test_harness(test, "bad_accesses");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
index 7deedbc..fc477df 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
@@ -455,9 +455,8 @@ run_tests(pid_t child_pid, struct ppc_debug_info *dbginfo, bool dawr)
 	if (dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_RANGE) {
 		test_sethwdebug_exact(child_pid);
 
-		if (!is_8xx)
-			test_sethwdebug_range_aligned(child_pid);
-		if (dawr && !is_8xx) {
+		test_sethwdebug_range_aligned(child_pid);
+		if (dawr || is_8xx) {
 			test_sethwdebug_range_unaligned(child_pid);
 			test_sethwdebug_range_unaligned_dar(child_pid);
 			test_sethwdebug_dawr_max_range(child_pid);
diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c
index 2ad45b9..2980b1a 100644
--- a/tools/testing/selftests/size/get_size.c
+++ b/tools/testing/selftests/size/get_size.c
@@ -11,23 +11,35 @@
  * own execution.  It also attempts to have as few dependencies
  * on kernel features as possible.
  *
- * It should be statically linked, with startup libs avoided.
- * It uses no library calls, and only the following 3 syscalls:
+ * It should be statically linked, with startup libs avoided.  It uses
+ * no library calls except the syscall() function for the following 3
+ * syscalls:
  *   sysinfo(), write(), and _exit()
  *
  * For output, it avoids printf (which in some C libraries
  * has large external dependencies) by  implementing it's own
  * number output and print routines, and using __builtin_strlen()
+ *
+ * The test may crash if any of the above syscalls fails because in some
+ * libc implementations (e.g. the GNU C Library) errno is saved in
+ * thread-local storage, which does not get initialized due to avoiding
+ * startup libs.
  */
 
 #include <sys/sysinfo.h>
 #include <unistd.h>
+#include <sys/syscall.h>
 
 #define STDOUT_FILENO 1
 
 static int print(const char *s)
 {
-	return write(STDOUT_FILENO, s, __builtin_strlen(s));
+	size_t len = 0;
+
+	while (s[len] != '\0')
+		len++;
+
+	return syscall(SYS_write, STDOUT_FILENO, s, len);
 }
 
 static inline char *num_to_str(unsigned long num, char *buf, int len)
@@ -79,12 +91,12 @@ void _start(void)
 	print("TAP version 13\n");
 	print("# Testing system size.\n");
 
-	ccode = sysinfo(&info);
+	ccode = syscall(SYS_sysinfo, &info);
 	if (ccode < 0) {
 		print("not ok 1");
 		print(test_name);
 		print(" ---\n reason: \"could not get sysinfo\"\n ...\n");
-		_exit(ccode);
+		syscall(SYS_exit, ccode);
 	}
 	print("ok 1");
 	print(test_name);
@@ -100,5 +112,5 @@ void _start(void)
 	print(" ...\n");
 	print("1..1\n");
 
-	_exit(0);
+	syscall(SYS_exit, 0);
 }
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
index e98c367..d34fe06 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
@@ -54,7 +54,7 @@
             shell=True,
             stdout=subprocess.PIPE,
             stderr=subprocess.PIPE,
-            env=ENVIR)
+            env=os.environ.copy())
         (rawout, serr) = proc.communicate()
 
         if proc.returncode != 0 and len(serr) > 0:
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
index 2e361ce..98a20fa 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
@@ -6,6 +6,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -25,6 +28,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -44,6 +50,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -63,6 +72,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -82,6 +94,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -101,6 +116,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -120,6 +138,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -139,6 +160,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -158,6 +182,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -177,6 +204,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -196,6 +226,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -215,6 +248,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -234,6 +270,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -253,6 +292,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -272,6 +314,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -291,6 +336,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
@@ -310,6 +358,9 @@
             "filter",
             "basic"
         ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
         "setup": [
             "$TC qdisc add dev $DEV1 ingress"
         ],
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index 485cf06..389327e 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -18,6 +18,9 @@
 #define GUP_LONGTERM_BENCHMARK	_IOWR('g', 2, struct gup_benchmark)
 #define GUP_BENCHMARK		_IOWR('g', 3, struct gup_benchmark)
 
+/* Just the flags we need, copied from mm.h: */
+#define FOLL_WRITE	0x01	/* check pte is writable */
+
 struct gup_benchmark {
 	__u64 get_delta_usec;
 	__u64 put_delta_usec;
@@ -85,7 +88,8 @@ int main(int argc, char **argv)
 	}
 
 	gup.nr_pages_per_call = nr_pages;
-	gup.flags = write;
+	if (write)
+		gup.flags |= FOLL_WRITE;
 
 	fd = open("/sys/kernel/debug/gup_benchmark", O_RDWR);
 	if (fd == -1)
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index d5c85c7..f5ab1cd 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -38,9 +38,8 @@
 ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; }
 ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; }
 sleep() { read -t "$1" -N 1 || true; }
-waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; }
-waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; }
-waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; }
+waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; }
+waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; }
 waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; }
 
 cleanup() {
@@ -119,22 +118,22 @@
 
 	# TCP over IPv4
 	n2 iperf3 -s -1 -B 192.168.241.2 &
-	waitiperf $netns2
+	waitiperf $netns2 $!
 	n1 iperf3 -Z -t 3 -c 192.168.241.2
 
 	# TCP over IPv6
 	n1 iperf3 -s -1 -B fd00::1 &
-	waitiperf $netns1
+	waitiperf $netns1 $!
 	n2 iperf3 -Z -t 3 -c fd00::1
 
 	# UDP over IPv4
 	n1 iperf3 -s -1 -B 192.168.241.1 &
-	waitiperf $netns1
+	waitiperf $netns1 $!
 	n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1
 
 	# UDP over IPv6
 	n2 iperf3 -s -1 -B fd00::2 &
-	waitiperf $netns2
+	waitiperf $netns2 $!
 	n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2
 }
 
@@ -207,7 +206,7 @@
 n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24
 exec 4< <(n1 ncat -l -u -p 1111)
 ncat_pid=$!
-waitncatudp $netns1
+waitncatudp $netns1 $ncat_pid
 n2 ncat -u 192.168.241.1 1111 <<<"X"
 read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]]
 kill $ncat_pid
@@ -216,7 +215,7 @@
 n2 wg set wg0 listen-port 9997
 exec 4< <(n1 ncat -l -u -p 1111)
 ncat_pid=$!
-waitncatudp $netns1
+waitncatudp $netns1 $ncat_pid
 n2 ncat -u 192.168.241.1 1111 <<<"X"
 ! read -r -N 1 -t 1 out <&4 || false
 kill $ncat_pid
@@ -516,6 +515,12 @@
 n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0
 n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75
 n0 wg set wg0 peer "$pub2" allowed-ips ::/0
+n0 wg set wg0 peer "$pub2" remove
+low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= )
+n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer }
+[[ -z $(n0 wg show wg0 peers) ]]
+n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer }
+[[ -z $(n0 wg show wg0 peers) ]]
 ip0 link del wg0
 
 declare -A objects
diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config
index b9c7270..5909e7e 100644
--- a/tools/testing/selftests/wireguard/qemu/debug.config
+++ b/tools/testing/selftests/wireguard/qemu/debug.config
@@ -1,5 +1,4 @@
 CONFIG_LOCALVERSION="-debug"
-CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
 CONFIG_FRAME_POINTER=y
 CONFIG_STACK_VALIDATION=y
diff --git a/tools/usb/usbip/README b/tools/usb/usbip/README
index 7844490..2fc021c 100644
--- a/tools/usb/usbip/README
+++ b/tools/usb/usbip/README
@@ -138,28 +138,28 @@
     Local USB devices
     =================
      - busid 1-1 (05a9:a511)
-	     1-1:1.0 -> ov511
+	     1-1:1.0
 
      - busid 3-2 (0711:0902)
-	     3-2:1.0 -> none
+	     3-2:1.0
 
      - busid 3-3.1 (08bb:2702)
-	     3-3.1:1.0 -> snd-usb-audio
-	     3-3.1:1.1 -> snd-usb-audio
+	     3-3.1:1.0
+	     3-3.1:1.1
 
      - busid 3-3.2 (04bb:0206)
-	     3-3.2:1.0 -> usb-storage
+	     3-3.2:1.0
 
      - busid 3-3 (0409:0058)
-	     3-3:1.0 -> hub
+	     3-3:1.0
 
      - busid 4-1 (046d:08b2)
-	     4-1:1.0 -> none
-	     4-1:1.1 -> none
-	     4-1:1.2 -> none
+	     4-1:1.0
+	     4-1:1.1
+	     4-1:1.2
 
      - busid 5-2 (058f:9254)
-	     5-2:1.0 -> hub
+	     5-2:1.0
 
 A USB storage device of busid 3-3.2 is now bound to the usb-storage
 driver. To export this device, we first mark the device as
@@ -180,7 +180,7 @@
     ...
 
      - busid 3-3.2 (04bb:0206)
-	     3-3.2:1.0 -> usbip-host
+	     3-3.2:1.0
     ...
 
 ---------------------------
diff --git a/tools/usb/usbip/src/usbip_network.c b/tools/usb/usbip/src/usbip_network.c
index d595d72..ed4dc8c 100644
--- a/tools/usb/usbip/src/usbip_network.c
+++ b/tools/usb/usbip/src/usbip_network.c
@@ -50,39 +50,39 @@ void usbip_setup_port_number(char *arg)
 	info("using port %d (\"%s\")", usbip_port, usbip_port_string);
 }
 
-void usbip_net_pack_uint32_t(int pack, uint32_t *num)
+uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num)
 {
 	uint32_t i;
 
 	if (pack)
-		i = htonl(*num);
+		i = htonl(num);
 	else
-		i = ntohl(*num);
+		i = ntohl(num);
 
-	*num = i;
+	return i;
 }
 
-void usbip_net_pack_uint16_t(int pack, uint16_t *num)
+uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num)
 {
 	uint16_t i;
 
 	if (pack)
-		i = htons(*num);
+		i = htons(num);
 	else
-		i = ntohs(*num);
+		i = ntohs(num);
 
-	*num = i;
+	return i;
 }
 
 void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev)
 {
-	usbip_net_pack_uint32_t(pack, &udev->busnum);
-	usbip_net_pack_uint32_t(pack, &udev->devnum);
-	usbip_net_pack_uint32_t(pack, &udev->speed);
+	udev->busnum = usbip_net_pack_uint32_t(pack, udev->busnum);
+	udev->devnum = usbip_net_pack_uint32_t(pack, udev->devnum);
+	udev->speed = usbip_net_pack_uint32_t(pack, udev->speed);
 
-	usbip_net_pack_uint16_t(pack, &udev->idVendor);
-	usbip_net_pack_uint16_t(pack, &udev->idProduct);
-	usbip_net_pack_uint16_t(pack, &udev->bcdDevice);
+	udev->idVendor = usbip_net_pack_uint16_t(pack, udev->idVendor);
+	udev->idProduct = usbip_net_pack_uint16_t(pack, udev->idProduct);
+	udev->bcdDevice = usbip_net_pack_uint16_t(pack, udev->bcdDevice);
 }
 
 void usbip_net_pack_usb_interface(int pack __attribute__((unused)),
@@ -129,6 +129,14 @@ ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen)
 	return usbip_net_xmit(sockfd, buff, bufflen, 1);
 }
 
+static inline void usbip_net_pack_op_common(int pack,
+					    struct op_common *op_common)
+{
+	op_common->version = usbip_net_pack_uint16_t(pack, op_common->version);
+	op_common->code = usbip_net_pack_uint16_t(pack, op_common->code);
+	op_common->status = usbip_net_pack_uint32_t(pack, op_common->status);
+}
+
 int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
 {
 	struct op_common op_common;
@@ -140,7 +148,7 @@ int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
 	op_common.code    = code;
 	op_common.status  = status;
 
-	PACK_OP_COMMON(1, &op_common);
+	usbip_net_pack_op_common(1, &op_common);
 
 	rc = usbip_net_send(sockfd, &op_common, sizeof(op_common));
 	if (rc < 0) {
@@ -164,7 +172,7 @@ int usbip_net_recv_op_common(int sockfd, uint16_t *code, int *status)
 		goto err;
 	}
 
-	PACK_OP_COMMON(0, &op_common);
+	usbip_net_pack_op_common(0, &op_common);
 
 	if (op_common.version != USBIP_VERSION) {
 		err("USBIP Kernel and tool version mismatch: %d %d:",
diff --git a/tools/usb/usbip/src/usbip_network.h b/tools/usb/usbip/src/usbip_network.h
index 555215e..83b4c53 100644
--- a/tools/usb/usbip/src/usbip_network.h
+++ b/tools/usb/usbip/src/usbip_network.h
@@ -32,12 +32,6 @@ struct op_common {
 
 } __attribute__((packed));
 
-#define PACK_OP_COMMON(pack, op_common)  do {\
-	usbip_net_pack_uint16_t(pack, &(op_common)->version);\
-	usbip_net_pack_uint16_t(pack, &(op_common)->code);\
-	usbip_net_pack_uint32_t(pack, &(op_common)->status);\
-} while (0)
-
 /* ---------------------------------------------------------------------- */
 /* Dummy Code */
 #define OP_UNSPEC	0x00
@@ -163,11 +157,11 @@ struct op_devlist_reply_extra {
 } while (0)
 
 #define PACK_OP_DEVLIST_REPLY(pack, reply)  do {\
-	usbip_net_pack_uint32_t(pack, &(reply)->ndev);\
+	(reply)->ndev = usbip_net_pack_uint32_t(pack, (reply)->ndev);\
 } while (0)
 
-void usbip_net_pack_uint32_t(int pack, uint32_t *num);
-void usbip_net_pack_uint16_t(int pack, uint16_t *num);
+uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num);
+uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num);
 void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev);
 void usbip_net_pack_usb_interface(int pack, struct usbip_usb_interface *uinf);
 
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 68092d1..9b68658 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -720,11 +720,11 @@ static void slab_debug(struct slabinfo *s)
 		return;
 
 	if (sanity && !s->sanity_checks) {
-		set_obj(s, "sanity", 1);
+		set_obj(s, "sanity_checks", 1);
 	}
 	if (!sanity && s->sanity_checks) {
 		if (slab_empty(s))
-			set_obj(s, "sanity", 0);
+			set_obj(s, "sanity_checks", 0);
 		else
 			fprintf(stderr, "%s not empty cannot disable sanity checks\n", s->name);
 	}
diff --git a/usr/.gitignore b/usr/.gitignore
index be5eae1..610de73 100644
--- a/usr/.gitignore
+++ b/usr/.gitignore
@@ -1,9 +1,3 @@
-#
-# Generated files
-#
 gen_init_cpio
 initramfs_data.cpio
-initramfs_data.cpio.gz
-initramfs_data.cpio.bz2
-initramfs_data.cpio.lzma
-initramfs_list
+/initramfs_inc_data
diff --git a/usr/Kconfig b/usr/Kconfig
index a6b6850..bdf5bbd 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -54,7 +54,6 @@
 
 config RD_GZIP
 	bool "Support initial ramdisk/ramfs compressed using gzip"
-	depends on BLK_DEV_INITRD
 	default y
 	select DECOMPRESS_GZIP
 	help
@@ -64,7 +63,6 @@
 config RD_BZIP2
 	bool "Support initial ramdisk/ramfs compressed using bzip2"
 	default y
-	depends on BLK_DEV_INITRD
 	select DECOMPRESS_BZIP2
 	help
 	  Support loading of a bzip2 encoded initial ramdisk or cpio buffer
@@ -73,7 +71,6 @@
 config RD_LZMA
 	bool "Support initial ramdisk/ramfs compressed using LZMA"
 	default y
-	depends on BLK_DEV_INITRD
 	select DECOMPRESS_LZMA
 	help
 	  Support loading of a LZMA encoded initial ramdisk or cpio buffer
@@ -81,7 +78,6 @@
 
 config RD_XZ
 	bool "Support initial ramdisk/ramfs compressed using XZ"
-	depends on BLK_DEV_INITRD
 	default y
 	select DECOMPRESS_XZ
 	help
@@ -91,7 +87,6 @@
 config RD_LZO
 	bool "Support initial ramdisk/ramfs compressed using LZO"
 	default y
-	depends on BLK_DEV_INITRD
 	select DECOMPRESS_LZO
 	help
 	  Support loading of a LZO encoded initial ramdisk or cpio buffer
@@ -100,7 +95,6 @@
 config RD_LZ4
 	bool "Support initial ramdisk/ramfs compressed using LZ4"
 	default y
-	depends on BLK_DEV_INITRD
 	select DECOMPRESS_LZ4
 	help
 	  Support loading of a LZ4 encoded initial ramdisk or cpio buffer
@@ -108,8 +102,7 @@
 
 choice
 	prompt "Built-in initramfs compression mode"
-	depends on INITRAMFS_SOURCE!=""
-	optional
+	depends on INITRAMFS_SOURCE != ""
 	help
 	  This option allows you to decide by which algorithm the builtin
 	  initramfs will be compressed.  Several compression algorithms are
@@ -215,21 +208,3 @@
 	  by default which could cause a build failure.
 
 endchoice
-
-config INITRAMFS_COMPRESSION
-	depends on INITRAMFS_SOURCE!=""
-	string
-	default ""      if INITRAMFS_COMPRESSION_NONE
-	default ".gz"   if INITRAMFS_COMPRESSION_GZIP
-	default ".bz2"  if INITRAMFS_COMPRESSION_BZIP2
-	default ".lzma" if INITRAMFS_COMPRESSION_LZMA
-	default ".xz"   if INITRAMFS_COMPRESSION_XZ
-	default ".lzo"  if INITRAMFS_COMPRESSION_LZO
-	default ".lz4"  if INITRAMFS_COMPRESSION_LZ4
-	default ".gz"   if RD_GZIP
-	default ".lz4"  if RD_LZ4
-	default ".lzo"  if RD_LZO
-	default ".xz"   if RD_XZ
-	default ".lzma" if RD_LZMA
-	default ".bz2"  if RD_BZIP2
-	default ""
diff --git a/usr/Makefile b/usr/Makefile
index e6f7cb2..b6081bb 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -3,61 +3,86 @@
 # kbuild file for usr/ - including initramfs image
 #
 
-klibcdirs:;
-PHONY += klibcdirs
+# cmd_bzip2, cmd_lzma, cmd_lzo, cmd_lz4 from scripts/Makefile.lib appends the
+# size at the end of the compressed file, which unfortunately does not work
+# with unpack_to_rootfs(). Make size_append no-op.
+override size_append := :
 
-suffix_y = $(subst $\",,$(CONFIG_INITRAMFS_COMPRESSION))
-datafile_y = initramfs_data.cpio$(suffix_y)
-datafile_d_y = .$(datafile_y).d
-AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
+compress-y					:= shipped
+compress-$(CONFIG_INITRAMFS_COMPRESSION_GZIP)	:= gzip
+compress-$(CONFIG_INITRAMFS_COMPRESSION_BZIP2)	:= bzip2
+compress-$(CONFIG_INITRAMFS_COMPRESSION_LZMA)	:= lzma
+compress-$(CONFIG_INITRAMFS_COMPRESSION_XZ)	:= xzmisc
+compress-$(CONFIG_INITRAMFS_COMPRESSION_LZO)	:= lzo
+compress-$(CONFIG_INITRAMFS_COMPRESSION_LZ4)	:= lz4
 
-# clean rules do not have CONFIG_INITRAMFS_COMPRESSION.  So clean up after all
-# possible compression formats.
-clean-files += initramfs_data.cpio*
-
-# Generate builtin.o based on initramfs_data.o
 obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
 
-# initramfs_data.o contains the compressed initramfs_data.cpio image.
-# The image is included using .incbin, a dependency which is not
-# tracked automatically.
-$(obj)/initramfs_data.o: $(obj)/$(datafile_y) FORCE
+$(obj)/initramfs_data.o: $(obj)/initramfs_inc_data
 
-#####
-# Generate the initramfs cpio archive
+ramfs-input := $(strip $(shell echo $(CONFIG_INITRAMFS_SOURCE)))
+cpio-data :=
+
+# If CONFIG_INITRAMFS_SOURCE is empty, generate a small initramfs with the
+# default contents.
+ifeq ($(ramfs-input),)
+ramfs-input := $(srctree)/$(src)/default_cpio_list
+endif
+
+ifeq ($(words $(ramfs-input)),1)
+
+# If CONFIG_INITRAMFS_SOURCE specifies a single file, and it is suffixed with
+# .cpio, use it directly as an initramfs.
+ifneq ($(filter %.cpio,$(ramfs-input)),)
+cpio-data := $(ramfs-input)
+endif
+
+# If CONFIG_INITRAMFS_SOURCE specifies a single file, and it is suffixed with
+# .cpio.*, use it directly as an initramfs, and avoid double compression.
+ifeq ($(words $(subst .cpio.,$(space),$(ramfs-input))),2)
+cpio-data := $(ramfs-input)
+compress-y := shipped
+endif
+
+endif
+
+# For other cases, generate the initramfs cpio archive based on the contents
+# specified by CONFIG_INITRAMFS_SOURCE.
+ifeq ($(cpio-data),)
+
+cpio-data := $(obj)/initramfs_data.cpio
 
 hostprogs-y := gen_init_cpio
-initramfs   := $(CONFIG_SHELL) $(srctree)/$(src)/gen_initramfs_list.sh
-ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \
-			$(shell echo $(CONFIG_INITRAMFS_SOURCE)),-d)
-ramfs-args  := \
-        $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
-        $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID))
 
-# $(datafile_d_y) is used to identify all files included
+# .initramfs_data.cpio.d is used to identify all files included
 # in initramfs and to detect if any files are added/removed.
 # Removed files are identified by directory timestamp being updated
 # The dependency list is generated by gen_initramfs.sh -l
-ifneq ($(wildcard $(obj)/$(datafile_d_y)),)
-	include $(obj)/$(datafile_d_y)
-endif
-
-quiet_cmd_initfs = GEN     $@
-      cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
-
-targets := $(datafile_y)
+-include $(obj)/.initramfs_data.cpio.d
 
 # do not try to update files included in initramfs
 $(deps_initramfs): ;
 
-$(deps_initramfs): klibcdirs
+quiet_cmd_initfs = GEN     $@
+      cmd_initfs = \
+	$(CONFIG_SHELL) $< -o $@ -l $(obj)/.initramfs_data.cpio.d \
+	$(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
+	$(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID)) \
+	$(ramfs-input)
+
 # We rebuild initramfs_data.cpio if:
 # 1) Any included file is newer than initramfs_data.cpio
 # 2) There are changes in which files are included (added or deleted)
 # 3) If gen_init_cpio are newer than initramfs_data.cpio
 # 4) Arguments to gen_initramfs.sh changes
-$(obj)/$(datafile_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
-	$(Q)$(initramfs) -l $(ramfs-input) > $(obj)/$(datafile_d_y)
+$(obj)/initramfs_data.cpio: $(src)/gen_initramfs.sh $(obj)/gen_init_cpio $(deps_initramfs) FORCE
 	$(call if_changed,initfs)
 
+endif
+
+$(obj)/initramfs_inc_data: $(cpio-data) FORCE
+	$(call if_changed,$(compress-y))
+
+targets += initramfs_data.cpio initramfs_inc_data
+
 subdir-$(CONFIG_UAPI_HEADER_TEST) += include
diff --git a/usr/default_cpio_list b/usr/default_cpio_list
new file mode 100644
index 0000000..37b3864
--- /dev/null
+++ b/usr/default_cpio_list
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# This is a very simple, default initramfs
+
+dir /dev 0755 0 0
+nod /dev/console 0600 0 0 c 5 1
+dir /root 0700 0 0
diff --git a/usr/gen_initramfs.sh b/usr/gen_initramfs.sh
new file mode 100755
index 0000000..8ae8316
--- /dev/null
+++ b/usr/gen_initramfs.sh
@@ -0,0 +1,247 @@
+#!/bin/sh
+# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org>
+# Copyright (C) 2006 Sam Ravnborg <sam@ravnborg.org>
+#
+# Released under the terms of the GNU GPL
+#
+# Generate a cpio packed initramfs. It uses gen_init_cpio to generate
+# the cpio archive.
+# This script assumes that gen_init_cpio is located in usr/ directory
+
+# error out on errors
+set -e
+
+usage() {
+cat << EOF
+Usage:
+$0 [-o <file>] [-l <dep_list>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
+	-o <file>      Create initramfs file named <file> by using gen_init_cpio
+	-l <dep_list>  Create dependency list named <dep_list>
+	-u <uid>       User ID to map to user ID 0 (root).
+		       <uid> is only meaningful if <cpio_source> is a
+		       directory.  "squash" forces all files to uid 0.
+	-g <gid>       Group ID to map to group ID 0 (root).
+		       <gid> is only meaningful if <cpio_source> is a
+		       directory.  "squash" forces all files to gid 0.
+	<cpio_source>  File list or directory for cpio archive.
+		       If <cpio_source> is a .cpio file it will be used
+		       as direct input to initramfs.
+
+All options except -o and -l may be repeated and are interpreted
+sequentially and immediately.  -u and -g states are preserved across
+<cpio_source> options so an explicit "-u 0 -g 0" is required
+to reset the root/group mapping.
+EOF
+}
+
+# awk style field access
+# $1 - field number; rest is argument string
+field() {
+	shift $1 ; echo $1
+}
+
+filetype() {
+	local argv1="$1"
+
+	# symlink test must come before file test
+	if [ -L "${argv1}" ]; then
+		echo "slink"
+	elif [ -f "${argv1}" ]; then
+		echo "file"
+	elif [ -d "${argv1}" ]; then
+		echo "dir"
+	elif [ -b "${argv1}" -o -c "${argv1}" ]; then
+		echo "nod"
+	elif [ -p "${argv1}" ]; then
+		echo "pipe"
+	elif [ -S "${argv1}" ]; then
+		echo "sock"
+	else
+		echo "invalid"
+	fi
+	return 0
+}
+
+print_mtime() {
+	local my_mtime="0"
+
+	if [ -e "$1" ]; then
+		my_mtime=$(find "$1" -printf "%T@\n" | sort -r | head -n 1)
+	fi
+
+	echo "# Last modified: ${my_mtime}" >> $cpio_list
+	echo "" >> $cpio_list
+}
+
+list_parse() {
+	if [ -z "$dep_list" -o -L "$1" ]; then
+		return
+	fi
+	echo "$1" | sed 's/:/\\:/g; s/$/ \\/' >> $dep_list
+}
+
+# for each file print a line in following format
+# <filetype> <name> <path to file> <octal mode> <uid> <gid>
+# for links, devices etc the format differs. See gen_init_cpio for details
+parse() {
+	local location="$1"
+	local name="/${location#${srcdir}}"
+	# change '//' into '/'
+	name=$(echo "$name" | sed -e 's://*:/:g')
+	local mode="$2"
+	local uid="$3"
+	local gid="$4"
+	local ftype=$(filetype "${location}")
+	# remap uid/gid to 0 if necessary
+	[ "$root_uid" = "squash" ] && uid=0 || [ "$uid" -eq "$root_uid" ] && uid=0
+	[ "$root_gid" = "squash" ] && gid=0 || [ "$gid" -eq "$root_gid" ] && gid=0
+	local str="${mode} ${uid} ${gid}"
+
+	[ "${ftype}" = "invalid" ] && return 0
+	[ "${location}" = "${srcdir}" ] && return 0
+
+	case "${ftype}" in
+		"file")
+			str="${ftype} ${name} ${location} ${str}"
+			;;
+		"nod")
+			local dev="`LC_ALL=C ls -l "${location}"`"
+			local maj=`field 5 ${dev}`
+			local min=`field 6 ${dev}`
+			maj=${maj%,}
+
+			[ -b "${location}" ] && dev="b" || dev="c"
+
+			str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
+			;;
+		"slink")
+			local target=`readlink "${location}"`
+			str="${ftype} ${name} ${target} ${str}"
+			;;
+		*)
+			str="${ftype} ${name} ${str}"
+			;;
+	esac
+
+	echo "${str}" >> $cpio_list
+
+	return 0
+}
+
+unknown_option() {
+	printf "ERROR: unknown option \"$arg\"\n" >&2
+	printf "If the filename validly begins with '-', " >&2
+	printf "then it must be prefixed\n" >&2
+	printf "by './' so that it won't be interpreted as an option." >&2
+	printf "\n" >&2
+	usage >&2
+	exit 1
+}
+
+header() {
+	printf "\n#####################\n# $1\n" >> $cpio_list
+}
+
+# process one directory (incl sub-directories)
+dir_filelist() {
+	header "$1"
+
+	srcdir=$(echo "$1" | sed -e 's://*:/:g')
+	dirlist=$(find "${srcdir}" -printf "%p %m %U %G\n" | LANG=C sort)
+
+	# If $dirlist is only one line, then the directory is empty
+	if [  "$(echo "${dirlist}" | wc -l)" -gt 1 ]; then
+		print_mtime "$1"
+
+		echo "${dirlist}" | \
+		while read x; do
+			list_parse $x
+			parse $x
+		done
+	fi
+}
+
+input_file() {
+	source="$1"
+	if [ -f "$1" ]; then
+		# If a regular file is specified, assume it is in
+		# gen_init_cpio format
+		header "$1"
+		print_mtime "$1" >> $cpio_list
+		cat "$1"         >> $cpio_list
+		if [ -n "$dep_list" ]; then
+		        echo "$1 \\"  >> $dep_list
+			cat "$1" | while read type dir file perm ; do
+				if [ "$type" = "file" ]; then
+					echo "$file \\" >> $dep_list
+				fi
+			done
+		fi
+	elif [ -d "$1" ]; then
+		# If a directory is specified then add all files in it to fs
+		dir_filelist "$1"
+	else
+		echo "  ${prog}: Cannot open '$1'" >&2
+		exit 1
+	fi
+}
+
+prog=$0
+root_uid=0
+root_gid=0
+dep_list=
+cpio_list=$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)
+output="/dev/stdout"
+
+trap "rm -f $cpio_list" EXIT
+
+while [ $# -gt 0 ]; do
+	arg="$1"
+	shift
+	case "$arg" in
+		"-l")	# files included in initramfs - used by kbuild
+			dep_list="$1"
+			echo "deps_initramfs := \\" > $dep_list
+			shift
+			;;
+		"-o")	# generate cpio image named $1
+			output="$1"
+			shift
+			;;
+		"-u")	# map $1 to uid=0 (root)
+			root_uid="$1"
+			[ "$root_uid" = "-1" ] && root_uid=$(id -u || echo 0)
+			shift
+			;;
+		"-g")	# map $1 to gid=0 (root)
+			root_gid="$1"
+			[ "$root_gid" = "-1" ] && root_gid=$(id -g || echo 0)
+			shift
+			;;
+		"-h")
+			usage
+			exit 0
+			;;
+		*)
+			case "$arg" in
+				"-"*)
+					unknown_option
+					;;
+				*)	# input file/dir - process it
+					input_file "$arg"
+					;;
+			esac
+			;;
+	esac
+done
+
+# If output_file is set we will generate cpio archive
+# we are careful to delete tmp files
+timestamp=
+if test -n "$KBUILD_BUILD_TIMESTAMP"; then
+	timestamp="$(date -d"$KBUILD_BUILD_TIMESTAMP" +%s || :)"
+	if test -n "$timestamp"; then
+		timestamp="-t $timestamp"
+	fi
+fi
+usr/gen_init_cpio $timestamp $cpio_list > $output
diff --git a/usr/gen_initramfs_list.sh b/usr/gen_initramfs_list.sh
deleted file mode 100755
index 2bbac73..0000000
--- a/usr/gen_initramfs_list.sh
+++ /dev/null
@@ -1,328 +0,0 @@
-#!/bin/sh
-# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org>
-# Copyright (C) 2006 Sam Ravnborg <sam@ravnborg.org>
-#
-# Released under the terms of the GNU GPL
-#
-# Generate a cpio packed initramfs. It uses gen_init_cpio to generate
-# the cpio archive, and then compresses it.
-# The script may also be used to generate the inputfile used for gen_init_cpio
-# This script assumes that gen_init_cpio is located in usr/ directory
-
-# error out on errors
-set -e
-
-usage() {
-cat << EOF
-Usage:
-$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
-	-o <file>      Create compressed initramfs file named <file> using
-		       gen_init_cpio and compressor depending on the extension
-	-u <uid>       User ID to map to user ID 0 (root).
-		       <uid> is only meaningful if <cpio_source> is a
-		       directory.  "squash" forces all files to uid 0.
-	-g <gid>       Group ID to map to group ID 0 (root).
-		       <gid> is only meaningful if <cpio_source> is a
-		       directory.  "squash" forces all files to gid 0.
-	<cpio_source>  File list or directory for cpio archive.
-		       If <cpio_source> is a .cpio file it will be used
-		       as direct input to initramfs.
-	-d             Output the default cpio list.
-
-All options except -o and -l may be repeated and are interpreted
-sequentially and immediately.  -u and -g states are preserved across
-<cpio_source> options so an explicit "-u 0 -g 0" is required
-to reset the root/group mapping.
-EOF
-}
-
-# awk style field access
-# $1 - field number; rest is argument string
-field() {
-	shift $1 ; echo $1
-}
-
-list_default_initramfs() {
-	# echo usr/kinit/kinit
-	:
-}
-
-default_initramfs() {
-	cat <<-EOF >> ${output}
-		# This is a very simple, default initramfs
-
-		dir /dev 0755 0 0
-		nod /dev/console 0600 0 0 c 5 1
-		dir /root 0700 0 0
-		# file /kinit usr/kinit/kinit 0755 0 0
-		# slink /init kinit 0755 0 0
-	EOF
-}
-
-filetype() {
-	local argv1="$1"
-
-	# symlink test must come before file test
-	if [ -L "${argv1}" ]; then
-		echo "slink"
-	elif [ -f "${argv1}" ]; then
-		echo "file"
-	elif [ -d "${argv1}" ]; then
-		echo "dir"
-	elif [ -b "${argv1}" -o -c "${argv1}" ]; then
-		echo "nod"
-	elif [ -p "${argv1}" ]; then
-		echo "pipe"
-	elif [ -S "${argv1}" ]; then
-		echo "sock"
-	else
-		echo "invalid"
-	fi
-	return 0
-}
-
-list_print_mtime() {
-	:
-}
-
-print_mtime() {
-	local my_mtime="0"
-
-	if [ -e "$1" ]; then
-		my_mtime=$(find "$1" -printf "%T@\n" | sort -r | head -n 1)
-	fi
-
-	echo "# Last modified: ${my_mtime}" >> ${output}
-	echo "" >> ${output}
-}
-
-list_parse() {
-	if [ -L "$1" ]; then
-		return
-	fi
-	echo "$1" | sed 's/:/\\:/g; s/$/ \\/'
-}
-
-# for each file print a line in following format
-# <filetype> <name> <path to file> <octal mode> <uid> <gid>
-# for links, devices etc the format differs. See gen_init_cpio for details
-parse() {
-	local location="$1"
-	local name="/${location#${srcdir}}"
-	# change '//' into '/'
-	name=$(echo "$name" | sed -e 's://*:/:g')
-	local mode="$2"
-	local uid="$3"
-	local gid="$4"
-	local ftype=$(filetype "${location}")
-	# remap uid/gid to 0 if necessary
-	[ "$root_uid" = "squash" ] && uid=0 || [ "$uid" -eq "$root_uid" ] && uid=0
-	[ "$root_gid" = "squash" ] && gid=0 || [ "$gid" -eq "$root_gid" ] && gid=0
-	local str="${mode} ${uid} ${gid}"
-
-	[ "${ftype}" = "invalid" ] && return 0
-	[ "${location}" = "${srcdir}" ] && return 0
-
-	case "${ftype}" in
-		"file")
-			str="${ftype} ${name} ${location} ${str}"
-			;;
-		"nod")
-			local dev="`LC_ALL=C ls -l "${location}"`"
-			local maj=`field 5 ${dev}`
-			local min=`field 6 ${dev}`
-			maj=${maj%,}
-
-			[ -b "${location}" ] && dev="b" || dev="c"
-
-			str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
-			;;
-		"slink")
-			local target=`readlink "${location}"`
-			str="${ftype} ${name} ${target} ${str}"
-			;;
-		*)
-			str="${ftype} ${name} ${str}"
-			;;
-	esac
-
-	echo "${str}" >> ${output}
-
-	return 0
-}
-
-unknown_option() {
-	printf "ERROR: unknown option \"$arg\"\n" >&2
-	printf "If the filename validly begins with '-', " >&2
-	printf "then it must be prefixed\n" >&2
-	printf "by './' so that it won't be interpreted as an option." >&2
-	printf "\n" >&2
-	usage >&2
-	exit 1
-}
-
-list_header() {
-	:
-}
-
-header() {
-	printf "\n#####################\n# $1\n" >> ${output}
-}
-
-# process one directory (incl sub-directories)
-dir_filelist() {
-	${dep_list}header "$1"
-
-	srcdir=$(echo "$1" | sed -e 's://*:/:g')
-	dirlist=$(find "${srcdir}" -printf "%p %m %U %G\n" | LANG=C sort)
-
-	# If $dirlist is only one line, then the directory is empty
-	if [  "$(echo "${dirlist}" | wc -l)" -gt 1 ]; then
-		${dep_list}print_mtime "$1"
-
-		echo "${dirlist}" | \
-		while read x; do
-			${dep_list}parse ${x}
-		done
-	fi
-}
-
-# if only one file is specified and it is .cpio file then use it direct as fs
-# if a directory is specified then add all files in given direcotry to fs
-# if a regular file is specified assume it is in gen_initramfs format
-input_file() {
-	source="$1"
-	if [ -f "$1" ]; then
-		${dep_list}header "$1"
-		is_cpio="$(echo "$1" | sed 's/^.*\.cpio\(\..*\)\{0,1\}/cpio/')"
-		if [ $2 -eq 0 -a ${is_cpio} = "cpio" ]; then
-			cpio_file=$1
-			echo "$1" | grep -q '^.*\.cpio\..*' && is_cpio_compressed="compressed"
-			[ ! -z ${dep_list} ] && echo "$1"
-			return 0
-		fi
-		if [ -z ${dep_list} ]; then
-			print_mtime "$1" >> ${output}
-			cat "$1"         >> ${output}
-		else
-		        echo "$1 \\"
-			cat "$1" | while read type dir file perm ; do
-				if [ "$type" = "file" ]; then
-					echo "$file \\";
-				fi
-			done
-		fi
-	elif [ -d "$1" ]; then
-		dir_filelist "$1"
-	else
-		echo "  ${prog}: Cannot open '$1'" >&2
-		exit 1
-	fi
-}
-
-prog=$0
-root_uid=0
-root_gid=0
-dep_list=
-cpio_file=
-cpio_list=
-output="/dev/stdout"
-output_file=""
-is_cpio_compressed=
-compr="gzip -n -9 -f"
-
-arg="$1"
-case "$arg" in
-	"-l")	# files included in initramfs - used by kbuild
-		dep_list="list_"
-		echo "deps_initramfs := $0 \\"
-		shift
-		;;
-	"-o")	# generate compressed cpio image named $1
-		shift
-		output_file="$1"
-		cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
-		output=${cpio_list}
-		echo "$output_file" | grep -q "\.gz$" \
-                && [ -x "`which gzip 2> /dev/null`" ] \
-                && compr="gzip -n -9 -f"
-		echo "$output_file" | grep -q "\.bz2$" \
-                && [ -x "`which bzip2 2> /dev/null`" ] \
-                && compr="bzip2 -9 -f"
-		echo "$output_file" | grep -q "\.lzma$" \
-                && [ -x "`which lzma 2> /dev/null`" ] \
-                && compr="lzma -9 -f"
-		echo "$output_file" | grep -q "\.xz$" \
-                && [ -x "`which xz 2> /dev/null`" ] \
-                && compr="xz --check=crc32 --lzma2=dict=1MiB"
-		echo "$output_file" | grep -q "\.lzo$" \
-                && [ -x "`which lzop 2> /dev/null`" ] \
-                && compr="lzop -9 -f"
-		echo "$output_file" | grep -q "\.lz4$" \
-                && [ -x "`which lz4 2> /dev/null`" ] \
-                && compr="lz4 -l -9 -f"
-		echo "$output_file" | grep -q "\.cpio$" && compr="cat"
-		shift
-		;;
-esac
-while [ $# -gt 0 ]; do
-	arg="$1"
-	shift
-	case "$arg" in
-		"-u")	# map $1 to uid=0 (root)
-			root_uid="$1"
-			[ "$root_uid" = "-1" ] && root_uid=$(id -u || echo 0)
-			shift
-			;;
-		"-g")	# map $1 to gid=0 (root)
-			root_gid="$1"
-			[ "$root_gid" = "-1" ] && root_gid=$(id -g || echo 0)
-			shift
-			;;
-		"-d")	# display default initramfs list
-			default_list="$arg"
-			${dep_list}default_initramfs
-			;;
-		"-h")
-			usage
-			exit 0
-			;;
-		*)
-			case "$arg" in
-				"-"*)
-					unknown_option
-					;;
-				*)	# input file/dir - process it
-					input_file "$arg" "$#"
-					;;
-			esac
-			;;
-	esac
-done
-
-# If output_file is set we will generate cpio archive and compress it
-# we are careful to delete tmp files
-if [ ! -z ${output_file} ]; then
-	if [ -z ${cpio_file} ]; then
-		timestamp=
-		if test -n "$KBUILD_BUILD_TIMESTAMP"; then
-			timestamp="$(date -d"$KBUILD_BUILD_TIMESTAMP" +%s || :)"
-			if test -n "$timestamp"; then
-				timestamp="-t $timestamp"
-			fi
-		fi
-		cpio_tfile="$(mktemp ${TMPDIR:-/tmp}/cpiofile.XXXXXX)"
-		usr/gen_init_cpio $timestamp ${cpio_list} > ${cpio_tfile}
-	else
-		cpio_tfile=${cpio_file}
-	fi
-	rm ${cpio_list}
-	if [ "${is_cpio_compressed}" = "compressed" ]; then
-		cat ${cpio_tfile} > ${output_file}
-	else
-		(cat ${cpio_tfile} | ${compr}  - > ${output_file}) \
-		|| (rm -f ${output_file} ; false)
-	fi
-	[ -z ${cpio_file} ] && rm ${cpio_tfile}
-fi
-exit 0
diff --git a/usr/include/Makefile b/usr/include/Makefile
index 8459846..a339ef3 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -16,87 +16,88 @@
 # Please consider to fix the header first.
 #
 # Sorted alphabetically.
-header-test- += asm/shmbuf.h
-header-test- += asm/signal.h
-header-test- += asm/ucontext.h
-header-test- += drm/vmwgfx_drm.h
-header-test- += linux/am437x-vpfe.h
-header-test- += linux/android/binder.h
-header-test- += linux/android/binderfs.h
-header-test- += linux/coda.h
-header-test- += linux/elfcore.h
-header-test- += linux/errqueue.h
-header-test- += linux/fsmap.h
-header-test- += linux/hdlc/ioctl.h
-header-test- += linux/ivtv.h
-header-test- += linux/kexec.h
-header-test- += linux/matroxfb.h
-header-test- += linux/nfc.h
-header-test- += linux/omap3isp.h
-header-test- += linux/omapfb.h
-header-test- += linux/patchkey.h
-header-test- += linux/phonet.h
-header-test- += linux/reiserfs_xattr.h
-header-test- += linux/sctp.h
-header-test- += linux/signal.h
-header-test- += linux/sysctl.h
-header-test- += linux/usb/audio.h
-header-test- += linux/v4l2-mediabus.h
-header-test- += linux/v4l2-subdev.h
-header-test- += linux/videodev2.h
-header-test- += linux/vm_sockets.h
-header-test- += sound/asequencer.h
-header-test- += sound/asoc.h
-header-test- += sound/asound.h
-header-test- += sound/compress_offload.h
-header-test- += sound/emu10k1.h
-header-test- += sound/sfnt_info.h
-header-test- += xen/evtchn.h
-header-test- += xen/gntdev.h
-header-test- += xen/privcmd.h
+no-header-test += asm/shmbuf.h
+no-header-test += asm/signal.h
+no-header-test += asm/ucontext.h
+no-header-test += drm/vmwgfx_drm.h
+no-header-test += linux/am437x-vpfe.h
+no-header-test += linux/android/binder.h
+no-header-test += linux/android/binderfs.h
+no-header-test += linux/coda.h
+no-header-test += linux/elfcore.h
+no-header-test += linux/errqueue.h
+no-header-test += linux/fsmap.h
+no-header-test += linux/hdlc/ioctl.h
+no-header-test += linux/ivtv.h
+no-header-test += linux/kexec.h
+no-header-test += linux/matroxfb.h
+no-header-test += linux/nfc.h
+no-header-test += linux/omap3isp.h
+no-header-test += linux/omapfb.h
+no-header-test += linux/patchkey.h
+no-header-test += linux/phonet.h
+no-header-test += linux/reiserfs_xattr.h
+no-header-test += linux/sctp.h
+no-header-test += linux/signal.h
+no-header-test += linux/sysctl.h
+no-header-test += linux/usb/audio.h
+no-header-test += linux/v4l2-mediabus.h
+no-header-test += linux/v4l2-subdev.h
+no-header-test += linux/videodev2.h
+no-header-test += linux/vm_sockets.h
+no-header-test += sound/asequencer.h
+no-header-test += sound/asoc.h
+no-header-test += sound/asound.h
+no-header-test += sound/compress_offload.h
+no-header-test += sound/emu10k1.h
+no-header-test += sound/sfnt_info.h
+no-header-test += xen/evtchn.h
+no-header-test += xen/gntdev.h
+no-header-test += xen/privcmd.h
 
 # More headers are broken in some architectures
 
 ifeq ($(SRCARCH),arc)
-header-test- += linux/bpf_perf_event.h
+no-header-test += linux/bpf_perf_event.h
 endif
 
 ifeq ($(SRCARCH),ia64)
-header-test- += asm/setup.h
-header-test- += asm/sigcontext.h
-header-test- += asm/perfmon.h
-header-test- += asm/perfmon_default_smpl.h
-header-test- += linux/if_bonding.h
+no-header-test += asm/setup.h
+no-header-test += asm/sigcontext.h
+no-header-test += asm/perfmon.h
+no-header-test += asm/perfmon_default_smpl.h
+no-header-test += linux/if_bonding.h
 endif
 
 ifeq ($(SRCARCH),mips)
-header-test- += asm/stat.h
+no-header-test += asm/stat.h
 endif
 
 ifeq ($(SRCARCH),powerpc)
-header-test- += asm/stat.h
-header-test- += linux/bpf_perf_event.h
+no-header-test += asm/stat.h
+no-header-test += linux/bpf_perf_event.h
 endif
 
 ifeq ($(SRCARCH),riscv)
-header-test- += linux/bpf_perf_event.h
+no-header-test += linux/bpf_perf_event.h
 endif
 
 ifeq ($(SRCARCH),sparc)
-header-test- += asm/stat.h
-header-test- += asm/uctx.h
-header-test- += asm/fbio.h
+no-header-test += asm/stat.h
+no-header-test += asm/uctx.h
+no-header-test += asm/fbio.h
 endif
 
 # asm-generic/*.h is used by asm/*.h, and should not be included directly
-header-test- += asm-generic/%
+no-header-test += asm-generic/%
 
 extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
 
+# Include the header to detect missing include guard.
 quiet_cmd_hdrtest = HDRTEST $<
       cmd_hdrtest = \
 		$(CC) $(c_flags) -S -o /dev/null -x c /dev/null \
-			$(if $(filter-out $(header-test-), $*.h), -include $<); \
+			$(if $(filter-out $(no-header-test), $*.h), -include $< -include $<); \
 		$(PERL) $(srctree)/scripts/headers_check.pl $(obj) $(SRCARCH) $<; \
 		touch $@
 
diff --git a/usr/initramfs_data.S b/usr/initramfs_data.S
index d07648f..cd67edc 100644
--- a/usr/initramfs_data.S
+++ b/usr/initramfs_data.S
@@ -22,12 +22,9 @@
   in the ELF header, as required by certain architectures.
 */
 
-#include <linux/stringify.h>
-#include <asm-generic/vmlinux.lds.h>
-
 .section .init.ramfs,"a"
 __irf_start:
-.incbin __stringify(INITRAMFS_IMAGE)
+.incbin "usr/initramfs_inc_data"
 __irf_end:
 .section .init.ramfs.info,"a"
 .globl __initramfs_size
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
index c4c57ba..0a356aa 100644
--- a/virt/kvm/arm/aarch32.c
+++ b/virt/kvm/arm/aarch32.c
@@ -10,10 +10,15 @@
  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  */
 
+#include <linux/bits.h>
 #include <linux/kvm_host.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
 
+#define DFSR_FSC_EXTABT_LPAE	0x10
+#define DFSR_FSC_EXTABT_nLPAE	0x08
+#define DFSR_LPAE		BIT(9)
+
 /*
  * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
  */
@@ -28,25 +33,115 @@ static const u8 return_offsets[8][2] = {
 	[7] = { 4, 4 },		/* FIQ, unused */
 };
 
+/*
+ * When an exception is taken, most CPSR fields are left unchanged in the
+ * handler. However, some are explicitly overridden (e.g. M[4:0]).
+ *
+ * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
+ * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
+ * obsoleted by the ARMv7 virtualization extensions and is RES0.
+ *
+ * For the SPSR layout seen from AArch32, see:
+ * - ARM DDI 0406C.d, page B1-1148
+ * - ARM DDI 0487E.a, page G8-6264
+ *
+ * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
+ * - ARM DDI 0487E.a, page C5-426
+ *
+ * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
+ * MSB to LSB.
+ */
+static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
+{
+	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+	unsigned long old, new;
+
+	old = *vcpu_cpsr(vcpu);
+	new = 0;
+
+	new |= (old & PSR_AA32_N_BIT);
+	new |= (old & PSR_AA32_Z_BIT);
+	new |= (old & PSR_AA32_C_BIT);
+	new |= (old & PSR_AA32_V_BIT);
+	new |= (old & PSR_AA32_Q_BIT);
+
+	// CPSR.IT[7:0] are set to zero upon any exception
+	// See ARM DDI 0487E.a, section G1.12.3
+	// See ARM DDI 0406C.d, section B1.8.3
+
+	new |= (old & PSR_AA32_DIT_BIT);
+
+	// CPSR.SSBS is set to SCTLR.DSSBS upon any exception
+	// See ARM DDI 0487E.a, page G8-6244
+	if (sctlr & BIT(31))
+		new |= PSR_AA32_SSBS_BIT;
+
+	// CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
+	// SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
+	// See ARM DDI 0487E.a, page G8-6246
+	new |= (old & PSR_AA32_PAN_BIT);
+	if (!(sctlr & BIT(23)))
+		new |= PSR_AA32_PAN_BIT;
+
+	// SS does not exist in AArch32, so ignore
+
+	// CPSR.IL is set to zero upon any exception
+	// See ARM DDI 0487E.a, page G1-5527
+
+	new |= (old & PSR_AA32_GE_MASK);
+
+	// CPSR.IT[7:0] are set to zero upon any exception
+	// See prior comment above
+
+	// CPSR.E is set to SCTLR.EE upon any exception
+	// See ARM DDI 0487E.a, page G8-6245
+	// See ARM DDI 0406C.d, page B4-1701
+	if (sctlr & BIT(25))
+		new |= PSR_AA32_E_BIT;
+
+	// CPSR.A is unchanged upon an exception to Undefined, Supervisor
+	// CPSR.A is set upon an exception to other modes
+	// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
+	// See ARM DDI 0406C.d, page B1-1182
+	new |= (old & PSR_AA32_A_BIT);
+	if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
+		new |= PSR_AA32_A_BIT;
+
+	// CPSR.I is set upon any exception
+	// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
+	// See ARM DDI 0406C.d, page B1-1182
+	new |= PSR_AA32_I_BIT;
+
+	// CPSR.F is set upon an exception to FIQ
+	// CPSR.F is unchanged upon an exception to other modes
+	// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
+	// See ARM DDI 0406C.d, page B1-1182
+	new |= (old & PSR_AA32_F_BIT);
+	if (mode == PSR_AA32_MODE_FIQ)
+		new |= PSR_AA32_F_BIT;
+
+	// CPSR.T is set to SCTLR.TE upon any exception
+	// See ARM DDI 0487E.a, page G8-5514
+	// See ARM DDI 0406C.d, page B1-1181
+	if (sctlr & BIT(30))
+		new |= PSR_AA32_T_BIT;
+
+	new |= mode;
+
+	return new;
+}
+
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
-	unsigned long cpsr;
-	unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
-	bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
+	unsigned long spsr = *vcpu_cpsr(vcpu);
+	bool is_thumb = (spsr & PSR_AA32_T_BIT);
 	u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
 	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
 
-	cpsr = mode | PSR_AA32_I_BIT;
-
-	if (sctlr & (1 << 30))
-		cpsr |= PSR_AA32_T_BIT;
-	if (sctlr & (1 << 25))
-		cpsr |= PSR_AA32_E_BIT;
-
-	*vcpu_cpsr(vcpu) = cpsr;
+	*vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
 
 	/* Note: These now point to the banked copies */
-	vcpu_write_spsr(vcpu, new_spsr_value);
+	vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
 	*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
 
 	/* Branch to exception vector */
@@ -84,16 +179,18 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
 		fsr = &vcpu_cp15(vcpu, c5_DFSR);
 	}
 
-	prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
+	prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset);
 
 	*far = addr;
 
 	/* Give the guest an IMPLEMENTATION DEFINED exception */
 	is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
-	if (is_lpae)
-		*fsr = 1 << 9 | 0x34;
-	else
-		*fsr = 0x14;
+	if (is_lpae) {
+		*fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
+	} else {
+		/* no need to shuffle FS[4] into DFSR[10] as its 0 */
+		*fsr = DFSR_FSC_EXTABT_nLPAE;
+	}
 }
 
 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index f182b23..0d9438e 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -805,6 +805,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
 	switch (treg) {
 	case TIMER_REG_TVAL:
 		val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
+		val &= lower_32_bits(val);
 		break;
 
 	case TIMER_REG_CTL:
@@ -850,7 +851,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
 {
 	switch (treg) {
 	case TIMER_REG_TVAL:
-		timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
+		timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
 		break;
 
 	case TIMER_REG_CTL:
@@ -1022,7 +1023,7 @@ static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
 
 bool kvm_arch_timer_get_input_level(int vintid)
 {
-	struct kvm_vcpu *vcpu = kvm_arm_get_running_vcpu();
+	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
 	struct arch_timer_context *timer;
 
 	if (vintid == vcpu_vtimer(vcpu)->irq.irq)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 8de4daf..d65a0fa 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -20,8 +20,6 @@
 #include <linux/irqbypass.h>
 #include <linux/sched/stat.h>
 #include <trace/events/kvm.h>
-#include <kvm/arm_pmu.h>
-#include <kvm/arm_psci.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -51,9 +49,6 @@ __asm__(".arch_extension	virt");
 DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
 
-/* Per-CPU variable containing the currently running vcpu. */
-static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
-
 /* The VMID used in the VTTBR */
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
@@ -62,31 +57,8 @@ static DEFINE_SPINLOCK(kvm_vmid_lock);
 static bool vgic_present;
 
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
-
-static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
-{
-	__this_cpu_write(kvm_arm_running_vcpu, vcpu);
-}
-
 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
-/**
- * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
- * Must be called from non-preemptible context
- */
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
-{
-	return __this_cpu_read(kvm_arm_running_vcpu);
-}
-
-/**
- * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
- */
-struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
-{
-	return &kvm_arm_running_vcpu;
-}
-
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -194,7 +166,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 		if (kvm->vcpus[i]) {
-			kvm_arch_vcpu_free(kvm->vcpus[i]);
+			kvm_vcpu_destroy(kvm->vcpus[i]);
 			kvm->vcpus[i] = NULL;
 		}
 	}
@@ -279,49 +251,46 @@ void kvm_arch_free_vm(struct kvm *kvm)
 		vfree(kvm);
 }
 
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
+		return -EBUSY;
+
+	if (id >= kvm->arch.max_vcpus)
+		return -EINVAL;
+
+	return 0;
+}
+
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 {
 	int err;
-	struct kvm_vcpu *vcpu;
 
-	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
-		err = -EBUSY;
-		goto out;
-	}
+	/* Force users to call KVM_ARM_VCPU_INIT */
+	vcpu->arch.target = -1;
+	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 
-	if (id >= kvm->arch.max_vcpus) {
-		err = -EINVAL;
-		goto out;
-	}
+	/* Set up the timer */
+	kvm_timer_vcpu_init(vcpu);
 
-	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-	if (!vcpu) {
-		err = -ENOMEM;
-		goto out;
-	}
+	kvm_pmu_vcpu_init(vcpu);
 
-	err = kvm_vcpu_init(vcpu, kvm, id);
+	kvm_arm_reset_debug_ptr(vcpu);
+
+	kvm_arm_pvtime_vcpu_init(&vcpu->arch);
+
+	err = kvm_vgic_vcpu_init(vcpu);
 	if (err)
-		goto free_vcpu;
+		return err;
 
-	err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
-	if (err)
-		goto vcpu_uninit;
-
-	return vcpu;
-vcpu_uninit:
-	kvm_vcpu_uninit(vcpu);
-free_vcpu:
-	kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
-	return ERR_PTR(err);
+	return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
 }
 
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
 }
 
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
 	if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
 		static_branch_dec(&userspace_irqchip_in_use);
@@ -329,13 +298,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 	kvm_mmu_free_memory_caches(vcpu);
 	kvm_timer_vcpu_terminate(vcpu);
 	kvm_pmu_vcpu_destroy(vcpu);
-	kvm_vcpu_uninit(vcpu);
-	kmem_cache_free(kvm_vcpu_cache, vcpu);
-}
 
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-	kvm_arch_vcpu_free(vcpu);
+	kvm_arm_vcpu_destroy(vcpu);
 }
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -368,24 +332,6 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 	preempt_enable();
 }
 
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-	/* Force users to call KVM_ARM_VCPU_INIT */
-	vcpu->arch.target = -1;
-	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
-	/* Set up the timer */
-	kvm_timer_vcpu_init(vcpu);
-
-	kvm_pmu_vcpu_init(vcpu);
-
-	kvm_arm_reset_debug_ptr(vcpu);
-
-	kvm_arm_pvtime_vcpu_init(&vcpu->arch);
-
-	return kvm_vgic_vcpu_init(vcpu);
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 	int *last_ran;
@@ -406,7 +352,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	vcpu->cpu = cpu;
 	vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
 
-	kvm_arm_set_running_vcpu(vcpu);
 	kvm_vgic_load(vcpu);
 	kvm_timer_vcpu_load(vcpu);
 	kvm_vcpu_load_sysregs(vcpu);
@@ -432,8 +377,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 	kvm_vcpu_pmu_restore_host(vcpu);
 
 	vcpu->cpu = -1;
-
-	kvm_arm_set_running_vcpu(NULL);
 }
 
 static void vcpu_power_off(struct kvm_vcpu *vcpu)
@@ -1537,7 +1480,6 @@ static void teardown_hyp_mode(void)
 	free_hyp_pgds();
 	for_each_possible_cpu(cpu)
 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
-	hyp_cpu_pm_exit();
 }
 
 /**
@@ -1751,6 +1693,7 @@ int kvm_arch_init(void *opaque)
 	return 0;
 
 out_hyp:
+	hyp_cpu_pm_exit();
 	if (!in_hyp_mode)
 		teardown_hyp_mode();
 out_err:
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index 70d3b44..aedfcff 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -5,7 +5,6 @@
  */
 
 #include <linux/kvm_host.h>
-#include <asm/kvm_mmio.h>
 #include <asm/kvm_emulate.h>
 #include <trace/events/kvm.h>
 
@@ -92,23 +91,23 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
 	vcpu->mmio_needed = 0;
 
-	if (!run->mmio.is_write) {
-		len = run->mmio.len;
-		if (len > sizeof(unsigned long))
-			return -EINVAL;
-
+	if (!kvm_vcpu_dabt_iswrite(vcpu)) {
+		len = kvm_vcpu_dabt_get_as(vcpu);
 		data = kvm_mmio_read_buf(run->mmio.data, len);
 
-		if (vcpu->arch.mmio_decode.sign_extend &&
+		if (kvm_vcpu_dabt_issext(vcpu) &&
 		    len < sizeof(unsigned long)) {
 			mask = 1U << ((len * 8) - 1);
 			data = (data ^ mask) - mask;
 		}
 
+		if (!kvm_vcpu_dabt_issf(vcpu))
+			data = data & 0xffffffff;
+
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
 			       &data);
 		data = vcpu_data_host_to_guest(vcpu, data, len);
-		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
+		vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
 	}
 
 	/*
@@ -120,33 +119,6 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	return 0;
 }
 
-static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
-{
-	unsigned long rt;
-	int access_size;
-	bool sign_extend;
-
-	if (kvm_vcpu_dabt_iss1tw(vcpu)) {
-		/* page table accesses IO mem: tell guest to fix its TTBR */
-		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-		return 1;
-	}
-
-	access_size = kvm_vcpu_dabt_get_as(vcpu);
-	if (unlikely(access_size < 0))
-		return access_size;
-
-	*is_write = kvm_vcpu_dabt_iswrite(vcpu);
-	sign_extend = kvm_vcpu_dabt_issext(vcpu);
-	rt = kvm_vcpu_dabt_get_rd(vcpu);
-
-	*len = access_size;
-	vcpu->arch.mmio_decode.sign_extend = sign_extend;
-	vcpu->arch.mmio_decode.rt = rt;
-
-	return 0;
-}
-
 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		 phys_addr_t fault_ipa)
 {
@@ -158,15 +130,10 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 	u8 data_buf[8];
 
 	/*
-	 * Prepare MMIO operation. First decode the syndrome data we get
-	 * from the CPU. Then try if some in-kernel emulation feels
-	 * responsible, otherwise let user space do its magic.
+	 * No valid syndrome? Ask userspace for help if it has
+	 * voluntered to do so, and bail out otherwise.
 	 */
-	if (kvm_vcpu_dabt_isvalid(vcpu)) {
-		ret = decode_hsr(vcpu, &is_write, &len);
-		if (ret)
-			return ret;
-	} else {
+	if (!kvm_vcpu_dabt_isvalid(vcpu)) {
 		if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
 			run->exit_reason = KVM_EXIT_ARM_NISV;
 			run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
@@ -178,7 +145,20 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		return -ENOSYS;
 	}
 
-	rt = vcpu->arch.mmio_decode.rt;
+	/* Page table accesses IO mem: tell guest to fix its TTBR */
+	if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+		return 1;
+	}
+
+	/*
+	 * Prepare MMIO operation. First decode the syndrome data we get
+	 * from the CPU. Then try if some in-kernel emulation feels
+	 * responsible, otherwise let user space do its magic.
+	 */
+	is_write = kvm_vcpu_dabt_iswrite(vcpu);
+	len = kvm_vcpu_dabt_get_as(vcpu);
+	rt = kvm_vcpu_dabt_get_rd(vcpu);
 
 	if (is_write) {
 		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 0b32a90..19c961a 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -14,7 +14,6 @@
 #include <asm/cacheflush.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_mmu.h>
-#include <asm/kvm_mmio.h>
 #include <asm/kvm_ras.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
@@ -1377,14 +1376,8 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
 {
 	kvm_pfn_t pfn = *pfnp;
 	gfn_t gfn = *ipap >> PAGE_SHIFT;
-	struct page *page = pfn_to_page(pfn);
 
-	/*
-	 * PageTransCompoundMap() returns true for THP and
-	 * hugetlbfs. Make sure the adjustment is done only for THP
-	 * pages.
-	 */
-	if (!PageHuge(page) && PageTransCompoundMap(page)) {
+	if (kvm_is_transparent_hugepage(pfn)) {
 		unsigned long mask;
 		/*
 		 * The address we faulted on is backed by a transparent huge
@@ -1596,16 +1589,8 @@ static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
 	__invalidate_icache_guest_page(pfn, size);
 }
 
-static void kvm_send_hwpoison_signal(unsigned long address,
-				     struct vm_area_struct *vma)
+static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
 {
-	short lsb;
-
-	if (is_vm_hugetlb_page(vma))
-		lsb = huge_page_shift(hstate_vma(vma));
-	else
-		lsb = PAGE_SHIFT;
-
 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
@@ -1678,6 +1663,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
 	struct vm_area_struct *vma;
+	short vma_shift;
 	kvm_pfn_t pfn;
 	pgprot_t mem_type = PAGE_S2;
 	bool logging_active = memslot_is_logging(memslot);
@@ -1701,7 +1687,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		return -EFAULT;
 	}
 
-	vma_pagesize = vma_kernel_pagesize(vma);
+	if (is_vm_hugetlb_page(vma))
+		vma_shift = huge_page_shift(hstate_vma(vma));
+	else
+		vma_shift = PAGE_SHIFT;
+
+	vma_pagesize = 1ULL << vma_shift;
 	if (logging_active ||
 	    (vma->vm_flags & VM_PFNMAP) ||
 	    !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
@@ -1741,7 +1732,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
 	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
 	if (pfn == KVM_PFN_ERR_HWPOISON) {
-		kvm_send_hwpoison_signal(hva, vma);
+		kvm_send_hwpoison_signal(hva, vma_shift);
 		return 0;
 	}
 	if (is_error_noslot_pfn(pfn))
@@ -2147,7 +2138,8 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 	if (!kvm->arch.pgd)
 		return 0;
 	trace_kvm_test_age_hva(hva);
-	return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+	return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
+				 kvm_test_age_hva_handler, NULL);
 }
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/perf.c b/virt/kvm/arm/perf.c
index 918cdc3..d45b8b9 100644
--- a/virt/kvm/arm/perf.c
+++ b/virt/kvm/arm/perf.c
@@ -13,14 +13,14 @@
 
 static int kvm_is_in_guest(void)
 {
-        return kvm_arm_get_running_vcpu() != NULL;
+        return kvm_get_running_vcpu() != NULL;
 }
 
 static int kvm_is_user_mode(void)
 {
 	struct kvm_vcpu *vcpu;
 
-	vcpu = kvm_arm_get_running_vcpu();
+	vcpu = kvm_get_running_vcpu();
 
 	if (vcpu)
 		return !vcpu_mode_priv(vcpu);
@@ -32,7 +32,7 @@ static unsigned long kvm_get_guest_ip(void)
 {
 	struct kvm_vcpu *vcpu;
 
-	vcpu = kvm_arm_get_running_vcpu();
+	vcpu = kvm_get_running_vcpu();
 
 	if (vcpu)
 		return *vcpu_pc(vcpu);
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 8731dfe..f0d0312 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -15,6 +15,8 @@
 #include <kvm/arm_vgic.h>
 
 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
+static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
+static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
 
 #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
 
@@ -75,6 +77,13 @@ static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
 
 	return pmc;
 }
+static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
+{
+	if (kvm_pmu_idx_is_high_counter(pmc->idx))
+		return pmc - 1;
+	else
+		return pmc + 1;
+}
 
 /**
  * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
@@ -238,10 +247,11 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
  */
 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
 {
-	int i;
+	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	int i;
 
-	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
+	for_each_set_bit(i, &mask, 32)
 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
 
 	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
@@ -294,15 +304,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
 
 		pmc = &pmu->pmc[i];
 
-		/*
-		 * For high counters of chained events we must recreate the
-		 * perf event with the long (64bit) attribute set.
-		 */
-		if (kvm_pmu_pmc_is_chained(pmc) &&
-		    kvm_pmu_idx_is_high_counter(i)) {
-			kvm_pmu_create_perf_event(vcpu, i);
-			continue;
-		}
+		/* A change in the enable state may affect the chain state */
+		kvm_pmu_update_pmc_chained(vcpu, i);
+		kvm_pmu_create_perf_event(vcpu, i);
 
 		/* At this point, pmc must be the canonical */
 		if (pmc->perf_event) {
@@ -335,15 +339,9 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
 
 		pmc = &pmu->pmc[i];
 
-		/*
-		 * For high counters of chained events we must recreate the
-		 * perf event with the long (64bit) attribute unset.
-		 */
-		if (kvm_pmu_pmc_is_chained(pmc) &&
-		    kvm_pmu_idx_is_high_counter(i)) {
-			kvm_pmu_create_perf_event(vcpu, i);
-			continue;
-		}
+		/* A change in the enable state may affect the chain state */
+		kvm_pmu_update_pmc_chained(vcpu, i);
+		kvm_pmu_create_perf_event(vcpu, i);
 
 		/* At this point, pmc must be the canonical */
 		if (pmc->perf_event)
@@ -480,25 +478,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
  */
 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
 {
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
 	int i;
-	u64 type, enable, reg;
 
-	if (val == 0)
+	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
 		return;
 
-	enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+	/* Weed out disabled counters */
+	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+
 	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
+		u64 type, reg;
+
 		if (!(val & BIT(i)))
 			continue;
-		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
-		       & ARMV8_PMU_EVTYPE_EVENT;
-		if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
-		    && (enable & BIT(i))) {
-			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+
+		/* PMSWINC only applies to ... SW_INC! */
+		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
+		type &= ARMV8_PMU_EVTYPE_EVENT;
+		if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
+			continue;
+
+		/* increment this even SW_INC counter */
+		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+		reg = lower_32_bits(reg);
+		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+
+		if (reg) /* no overflow on the low part */
+			continue;
+
+		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
+			/* increment the high counter */
+			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
 			reg = lower_32_bits(reg);
-			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
-			if (!reg)
-				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
+			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
+			if (!reg) /* mark overflow on the high counter */
+				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
+		} else {
+			/* mark overflow on low counter */
+			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
 		}
 	}
 }
@@ -510,10 +528,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
  */
 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 {
-	u64 mask;
+	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
 	int i;
 
-	mask = kvm_pmu_valid_counter_mask(vcpu);
 	if (val & ARMV8_PMU_PMCR_E) {
 		kvm_pmu_enable_counter_mask(vcpu,
 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
@@ -525,7 +542,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
 
 	if (val & ARMV8_PMU_PMCR_P) {
-		for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
+		for_each_set_bit(i, &mask, 32)
 			kvm_pmu_set_counter_value(vcpu, i, 0);
 	}
 }
@@ -582,15 +599,14 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
 
 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
 
-	if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
+	if (kvm_pmu_pmc_is_chained(pmc)) {
 		/**
 		 * The initial sample period (overflow count) of an event. For
 		 * chained counters we only support overflow interrupts on the
 		 * high counter.
 		 */
 		attr.sample_period = (-counter) & GENMASK(63, 0);
-		if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
-			attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+		attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
 
 		event = perf_event_create_kernel_counter(&attr, -1, current,
 							 kvm_pmu_perf_overflow,
@@ -621,25 +637,33 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
  * @select_idx: The number of selected counter
  *
  * Update the chained bitmap based on the event type written in the
- * typer register.
+ * typer register and the enable state of the odd register.
  */
 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
 {
 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+	struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
+	bool new_state, old_state;
 
-	if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
+	old_state = kvm_pmu_pmc_is_chained(pmc);
+	new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
+		    kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
+
+	if (old_state == new_state)
+		return;
+
+	canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
+	kvm_pmu_stop_counter(vcpu, canonical_pmc);
+	if (new_state) {
 		/*
 		 * During promotion from !chained to chained we must ensure
 		 * the adjacent counter is stopped and its event destroyed
 		 */
-		if (!kvm_pmu_pmc_is_chained(pmc))
-			kvm_pmu_stop_counter(vcpu, pmc);
-
+		kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
-	} else {
-		clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
+		return;
 	}
+	clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
 }
 
 /**
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 98c7360..d53d34a 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -839,9 +839,8 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
 	u32 event_id = its_cmd_get_id(its_cmd);
 	struct its_ite *ite;
 
-
 	ite = find_ite(its, device_id, event_id);
-	if (ite && ite->collection) {
+	if (ite && its_is_collection_mapped(ite->collection)) {
 		/*
 		 * Though the spec talks about removing the pending state, we
 		 * don't bother here since we clear the ITTE anyway and the
@@ -2475,7 +2474,8 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
 	target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
 	coll_id = val & KVM_ITS_CTE_ICID_MASK;
 
-	if (target_addr >= atomic_read(&kvm->online_vcpus))
+	if (target_addr != COLLECTION_NOT_MAPPED &&
+	    target_addr >= atomic_read(&kvm->online_vcpus))
 		return -EINVAL;
 
 	collection = find_collection(its, coll_id);
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 7dfd15d..ebc2188 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -414,8 +414,11 @@ static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
 					     gpa_t addr, unsigned int len)
 {
 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	u64 value = vgic_cpu->pendbaser;
 
-	return extract_bytes(vgic_cpu->pendbaser, addr & 7, len);
+	value &= ~GICR_PENDBASER_PTZ;
+
+	return extract_bytes(value, addr & 7, len);
 }
 
 static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 0d09048..d656ebd 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -190,15 +190,6 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
  * value later will give us the same value as we update the per-CPU variable
  * in the preempt notifier handlers.
  */
-static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
-{
-	struct kvm_vcpu *vcpu;
-
-	preempt_disable();
-	vcpu = kvm_arm_get_running_vcpu();
-	preempt_enable();
-	return vcpu;
-}
 
 /* Must be called with irq->irq_lock held */
 static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
@@ -221,7 +212,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
 			      gpa_t addr, unsigned int len,
 			      unsigned long val)
 {
-	bool is_uaccess = !vgic_get_mmio_requester_vcpu();
+	bool is_uaccess = !kvm_get_running_vcpu();
 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 	int i;
 	unsigned long flags;
@@ -274,7 +265,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
 			      gpa_t addr, unsigned int len,
 			      unsigned long val)
 {
-	bool is_uaccess = !vgic_get_mmio_requester_vcpu();
+	bool is_uaccess = !kvm_get_running_vcpu();
 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 	int i;
 	unsigned long flags;
@@ -335,7 +326,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 				    bool active)
 {
 	unsigned long flags;
-	struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
+	struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
 
 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 836f418..5af2aef 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -98,11 +98,6 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
 		.uaccess_write = uwr,					\
 	}
 
-int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
-				  struct vgic_register_region *reg_desc,
-				  struct vgic_io_device *region,
-				  int nr_irqs, bool offset_private);
-
 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
 
 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 35305d6..15e5b03 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -17,21 +17,6 @@
 #include "async_pf.h"
 #include <trace/events/kvm.h>
 
-static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
-					       struct kvm_async_pf *work)
-{
-#ifdef CONFIG_KVM_ASYNC_PF_SYNC
-	kvm_arch_async_page_present(vcpu, work);
-#endif
-}
-static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
-						struct kvm_async_pf *work)
-{
-#ifndef CONFIG_KVM_ASYNC_PF_SYNC
-	kvm_arch_async_page_present(vcpu, work);
-#endif
-}
-
 static struct kmem_cache *async_pf_cache;
 
 int kvm_async_pf_init(void)
@@ -64,7 +49,7 @@ static void async_pf_execute(struct work_struct *work)
 	struct mm_struct *mm = apf->mm;
 	struct kvm_vcpu *vcpu = apf->vcpu;
 	unsigned long addr = apf->addr;
-	gva_t gva = apf->gva;
+	gpa_t cr2_or_gpa = apf->cr2_or_gpa;
 	int locked = 1;
 
 	might_sleep();
@@ -80,7 +65,8 @@ static void async_pf_execute(struct work_struct *work)
 	if (locked)
 		up_read(&mm->mmap_sem);
 
-	kvm_async_page_present_sync(vcpu, apf);
+	if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
+		kvm_arch_async_page_present(vcpu, apf);
 
 	spin_lock(&vcpu->async_pf.lock);
 	list_add_tail(&apf->link, &vcpu->async_pf.done);
@@ -92,7 +78,7 @@ static void async_pf_execute(struct work_struct *work)
 	 * this point
 	 */
 
-	trace_kvm_async_pf_completed(addr, gva);
+	trace_kvm_async_pf_completed(addr, cr2_or_gpa);
 
 	if (swq_has_sleeper(&vcpu->wq))
 		swake_up_one(&vcpu->wq);
@@ -157,7 +143,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
 		spin_unlock(&vcpu->async_pf.lock);
 
 		kvm_arch_async_page_ready(vcpu, work);
-		kvm_async_page_present_async(vcpu, work);
+		if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
+			kvm_arch_async_page_present(vcpu, work);
 
 		list_del(&work->queue);
 		vcpu->async_pf.queued--;
@@ -165,8 +152,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
 	}
 }
 
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
-		       struct kvm_arch_async_pf *arch)
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+		       unsigned long hva, struct kvm_arch_async_pf *arch)
 {
 	struct kvm_async_pf *work;
 
@@ -185,7 +172,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
 
 	work->wakeup_all = false;
 	work->vcpu = vcpu;
-	work->gva = gva;
+	work->cr2_or_gpa = cr2_or_gpa;
 	work->addr = hva;
 	work->arch = *arch;
 	work->mm = current->mm;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0026829..67ae2d5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -104,16 +104,16 @@ static cpumask_var_t cpus_hardware_enabled;
 static int kvm_usage_count;
 static atomic_t hardware_enable_failed;
 
-struct kmem_cache *kvm_vcpu_cache;
-EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+static struct kmem_cache *kvm_vcpu_cache;
 
 static __read_mostly struct preempt_ops kvm_preempt_ops;
+static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
 
 struct dentry *kvm_debugfs_dir;
 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
 
 static int kvm_debugfs_num_entries;
-static const struct file_operations *stat_fops_per_vm[];
+static const struct file_operations stat_fops_per_vm;
 
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 			   unsigned long arg);
@@ -186,17 +186,30 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
 	 */
 	if (pfn_valid(pfn))
 		return PageReserved(pfn_to_page(pfn)) &&
+		       !is_zero_pfn(pfn) &&
 		       !kvm_is_zone_device_pfn(pfn);
 
 	return true;
 }
 
+bool kvm_is_transparent_hugepage(kvm_pfn_t pfn)
+{
+	struct page *page = pfn_to_page(pfn);
+
+	if (!PageTransCompoundMap(page))
+		return false;
+
+	return is_transparent_hugepage(compound_head(page));
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put()
  */
 void vcpu_load(struct kvm_vcpu *vcpu)
 {
 	int cpu = get_cpu();
+
+	__this_cpu_write(kvm_running_vcpu, vcpu);
 	preempt_notifier_register(&vcpu->preempt_notifier);
 	kvm_arch_vcpu_load(vcpu, cpu);
 	put_cpu();
@@ -208,6 +221,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
 	preempt_disable();
 	kvm_arch_vcpu_put(vcpu);
 	preempt_notifier_unregister(&vcpu->preempt_notifier);
+	__this_cpu_write(kvm_running_vcpu, NULL);
 	preempt_enable();
 }
 EXPORT_SYMBOL_GPL(vcpu_put);
@@ -322,11 +336,8 @@ void kvm_reload_remote_mmus(struct kvm *kvm)
 	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
 }
 
-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
+static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 {
-	struct page *page;
-	int r;
-
 	mutex_init(&vcpu->mutex);
 	vcpu->cpu = -1;
 	vcpu->kvm = kvm;
@@ -338,42 +349,28 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 	vcpu->pre_pcpu = -1;
 	INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
 
-	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (!page) {
-		r = -ENOMEM;
-		goto fail;
-	}
-	vcpu->run = page_address(page);
-
 	kvm_vcpu_set_in_spin_loop(vcpu, false);
 	kvm_vcpu_set_dy_eligible(vcpu, false);
 	vcpu->preempted = false;
 	vcpu->ready = false;
-
-	r = kvm_arch_vcpu_init(vcpu);
-	if (r < 0)
-		goto fail_free_run;
-	return 0;
-
-fail_free_run:
-	free_page((unsigned long)vcpu->run);
-fail:
-	return r;
+	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 
-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
+void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+	kvm_arch_vcpu_destroy(vcpu);
+
 	/*
-	 * no need for rcu_read_lock as VCPU_RUN is the only place that
-	 * will change the vcpu->pid pointer and on uninit all file
-	 * descriptors are already gone.
+	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
+	 * the vcpu->pid pointer, and at destruction time all file descriptors
+	 * are already gone.
 	 */
 	put_pid(rcu_dereference_protected(vcpu->pid, 1));
-	kvm_arch_vcpu_uninit(vcpu);
+
 	free_page((unsigned long)vcpu->run);
+	kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
+EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
 
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
@@ -650,11 +647,11 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 			return -ENOMEM;
 
 		stat_data->kvm = kvm;
-		stat_data->offset = p->offset;
-		stat_data->mode = p->mode ? p->mode : 0644;
+		stat_data->dbgfs_item = p;
 		kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
-		debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
-				    stat_data, stat_fops_per_vm[p->kind]);
+		debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
+				    kvm->debugfs_dentry, stat_data,
+				    &stat_fops_per_vm);
 	}
 	return 0;
 }
@@ -964,7 +961,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
 
 	/*
 	 * Increment the new memslot generation a second time, dropping the
-	 * update in-progress flag and incrementing then generation based on
+	 * update in-progress flag and incrementing the generation based on
 	 * the number of address spaces.  This provides a unique and easily
 	 * identifiable generation number while the memslots are in flux.
 	 */
@@ -1117,7 +1114,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
 		 *
 		 * validation of sp->gfn happens in:
 		 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
-		 *	- kvm_is_visible_gfn (mmu_check_roots)
+		 *	- kvm_is_visible_gfn (mmu_check_root)
 		 */
 		kvm_arch_flush_shadow_memslot(kvm, slot);
 
@@ -1406,14 +1403,14 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
 
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
 	struct vm_area_struct *vma;
 	unsigned long addr, size;
 
 	size = PAGE_SIZE;
 
-	addr = gfn_to_hva(kvm, gfn);
+	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
 	if (kvm_is_error_hva(addr))
 		return PAGE_SIZE;
 
@@ -1519,7 +1516,7 @@ static inline int check_user_page_hwpoison(unsigned long addr)
 /*
  * The fast path to get the writable pfn which will be stored in @pfn,
  * true indicates success, otherwise false is returned.  It's also the
- * only part that runs if we can are in atomic context.
+ * only part that runs if we can in atomic context.
  */
 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
 			    bool *writable, kvm_pfn_t *pfn)
@@ -1821,26 +1818,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
-static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
-			 struct kvm_host_map *map)
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
+{
+	if (pfn == 0)
+		return;
+
+	if (cache)
+		cache->pfn = cache->gfn = 0;
+
+	if (dirty)
+		kvm_release_pfn_dirty(pfn);
+	else
+		kvm_release_pfn_clean(pfn);
+}
+
+static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
+				 struct gfn_to_pfn_cache *cache, u64 gen)
+{
+	kvm_release_pfn(cache->pfn, cache->dirty, cache);
+
+	cache->pfn = gfn_to_pfn_memslot(slot, gfn);
+	cache->gfn = gfn;
+	cache->dirty = false;
+	cache->generation = gen;
+}
+
+static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
+			 struct kvm_host_map *map,
+			 struct gfn_to_pfn_cache *cache,
+			 bool atomic)
 {
 	kvm_pfn_t pfn;
 	void *hva = NULL;
 	struct page *page = KVM_UNMAPPED_PAGE;
+	struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
+	u64 gen = slots->generation;
 
 	if (!map)
 		return -EINVAL;
 
-	pfn = gfn_to_pfn_memslot(slot, gfn);
+	if (cache) {
+		if (!cache->pfn || cache->gfn != gfn ||
+			cache->generation != gen) {
+			if (atomic)
+				return -EAGAIN;
+			kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
+		}
+		pfn = cache->pfn;
+	} else {
+		if (atomic)
+			return -EAGAIN;
+		pfn = gfn_to_pfn_memslot(slot, gfn);
+	}
 	if (is_error_noslot_pfn(pfn))
 		return -EINVAL;
 
 	if (pfn_valid(pfn)) {
 		page = pfn_to_page(pfn);
-		hva = kmap(page);
+		if (atomic)
+			hva = kmap_atomic(page);
+		else
+			hva = kmap(page);
 #ifdef CONFIG_HAS_IOMEM
-	} else {
+	} else if (!atomic) {
 		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+	} else {
+		return -EINVAL;
 #endif
 	}
 
@@ -1855,14 +1898,25 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
 	return 0;
 }
 
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+		struct gfn_to_pfn_cache *cache, bool atomic)
+{
+	return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
+			cache, atomic);
+}
+EXPORT_SYMBOL_GPL(kvm_map_gfn);
+
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
-	return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
+	return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
+		NULL, false);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
 
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
-		    bool dirty)
+static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
+			struct kvm_host_map *map,
+			struct gfn_to_pfn_cache *cache,
+			bool dirty, bool atomic)
 {
 	if (!map)
 		return;
@@ -1870,23 +1924,45 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
 	if (!map->hva)
 		return;
 
-	if (map->page != KVM_UNMAPPED_PAGE)
-		kunmap(map->page);
+	if (map->page != KVM_UNMAPPED_PAGE) {
+		if (atomic)
+			kunmap_atomic(map->hva);
+		else
+			kunmap(map->page);
+	}
 #ifdef CONFIG_HAS_IOMEM
-	else
+	else if (!atomic)
 		memunmap(map->hva);
+	else
+		WARN_ONCE(1, "Unexpected unmapping in atomic context");
 #endif
 
-	if (dirty) {
-		kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
-		kvm_release_pfn_dirty(map->pfn);
-	} else {
-		kvm_release_pfn_clean(map->pfn);
-	}
+	if (dirty)
+		mark_page_dirty_in_slot(memslot, map->gfn);
+
+	if (cache)
+		cache->dirty |= dirty;
+	else
+		kvm_release_pfn(map->pfn, dirty, NULL);
 
 	map->hva = NULL;
 	map->page = NULL;
 }
+
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 
+		  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
+{
+	__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
+			cache, dirty, atomic);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
+
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+{
+	__kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
+			dirty, false);
+}
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
 
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -1931,11 +2007,8 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
 
 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
 {
-	if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
-		struct page *page = pfn_to_page(pfn);
-
-		SetPageDirty(page);
-	}
+	if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
+		SetPageDirty(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
@@ -2051,17 +2124,6 @@ static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
 	return 0;
 }
 
-int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
-			  unsigned long len)
-{
-	gfn_t gfn = gpa >> PAGE_SHIFT;
-	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
-	int offset = offset_in_page(gpa);
-
-	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
-}
-EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
-
 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
 			       void *data, unsigned long len)
 {
@@ -2158,33 +2220,36 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
 	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
 	gfn_t nr_pages_avail;
-	int r = start_gfn <= end_gfn ? 0 : -EINVAL;
 
-	ghc->gpa = gpa;
+	/* Update ghc->generation before performing any error checks. */
 	ghc->generation = slots->generation;
-	ghc->len = len;
-	ghc->hva = KVM_HVA_ERR_BAD;
+
+	if (start_gfn > end_gfn) {
+		ghc->hva = KVM_HVA_ERR_BAD;
+		return -EINVAL;
+	}
 
 	/*
 	 * If the requested region crosses two memslots, we still
 	 * verify that the entire region is valid here.
 	 */
-	while (!r && start_gfn <= end_gfn) {
+	for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
 		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
 		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
 					   &nr_pages_avail);
 		if (kvm_is_error_hva(ghc->hva))
-			r = -EFAULT;
-		start_gfn += nr_pages_avail;
+			return -EFAULT;
 	}
 
 	/* Use the slow path for cross page reads and writes. */
-	if (!r && nr_pages_needed == 1)
+	if (nr_pages_needed == 1)
 		ghc->hva += offset;
 	else
 		ghc->memslot = NULL;
 
-	return r;
+	ghc->gpa = gpa;
+	ghc->len = len;
+	return 0;
 }
 
 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
@@ -2205,15 +2270,17 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 
 	BUG_ON(len + offset > ghc->len);
 
-	if (slots->generation != ghc->generation)
-		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
-
-	if (unlikely(!ghc->memslot))
-		return kvm_write_guest(kvm, gpa, data, len);
+	if (slots->generation != ghc->generation) {
+		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
+			return -EFAULT;
+	}
 
 	if (kvm_is_error_hva(ghc->hva))
 		return -EFAULT;
 
+	if (unlikely(!ghc->memslot))
+		return kvm_write_guest(kvm, gpa, data, len);
+
 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
 	if (r)
 		return -EFAULT;
@@ -2238,15 +2305,17 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 
 	BUG_ON(len > ghc->len);
 
-	if (slots->generation != ghc->generation)
-		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
-
-	if (unlikely(!ghc->memslot))
-		return kvm_read_guest(kvm, ghc->gpa, data, len);
+	if (slots->generation != ghc->generation) {
+		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
+			return -EFAULT;
+	}
 
 	if (kvm_is_error_hva(ghc->hva))
 		return -EFAULT;
 
+	if (unlikely(!ghc->memslot))
+		return kvm_read_guest(kvm, ghc->gpa, data, len);
+
 	r = __copy_from_user(data, (void __user *)ghc->hva, len);
 	if (r)
 		return -EFAULT;
@@ -2718,6 +2787,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 {
 	int r;
 	struct kvm_vcpu *vcpu;
+	struct page *page;
 
 	if (id >= KVM_MAX_VCPU_ID)
 		return -EINVAL;
@@ -2731,17 +2801,29 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 	kvm->created_vcpus++;
 	mutex_unlock(&kvm->lock);
 
-	vcpu = kvm_arch_vcpu_create(kvm, id);
-	if (IS_ERR(vcpu)) {
-		r = PTR_ERR(vcpu);
+	r = kvm_arch_vcpu_precreate(kvm, id);
+	if (r)
+		goto vcpu_decrement;
+
+	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+	if (!vcpu) {
+		r = -ENOMEM;
 		goto vcpu_decrement;
 	}
 
-	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
+	BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
+	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!page) {
+		r = -ENOMEM;
+		goto vcpu_free;
+	}
+	vcpu->run = page_address(page);
 
-	r = kvm_arch_vcpu_setup(vcpu);
+	kvm_vcpu_init(vcpu, kvm, id);
+
+	r = kvm_arch_vcpu_create(vcpu);
 	if (r)
-		goto vcpu_destroy;
+		goto vcpu_free_run_page;
 
 	kvm_create_vcpu_debugfs(vcpu);
 
@@ -2778,8 +2860,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 unlock_vcpu_destroy:
 	mutex_unlock(&kvm->lock);
 	debugfs_remove_recursive(vcpu->debugfs_dentry);
-vcpu_destroy:
 	kvm_arch_vcpu_destroy(vcpu);
+vcpu_free_run_page:
+	free_page((unsigned long)vcpu->run);
+vcpu_free:
+	kmem_cache_free(kvm_vcpu_cache, vcpu);
 vcpu_decrement:
 	mutex_lock(&kvm->lock);
 	kvm->created_vcpus--;
@@ -4013,8 +4098,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
 		return -ENOENT;
 
 	if (simple_attr_open(inode, file, get,
-			     stat_data->mode & S_IWUGO ? set : NULL,
-			     fmt)) {
+		    KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222
+		    ? set : NULL,
+		    fmt)) {
 		kvm_put_kvm(stat_data->kvm);
 		return -ENOMEM;
 	}
@@ -4033,105 +4119,111 @@ static int kvm_debugfs_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static int vm_stat_get_per_vm(void *data, u64 *val)
+static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
 {
-	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
-
-	*val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
+	*val = *(ulong *)((void *)kvm + offset);
 
 	return 0;
 }
 
-static int vm_stat_clear_per_vm(void *data, u64 val)
+static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
 {
-	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
-
-	if (val)
-		return -EINVAL;
-
-	*(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
+	*(ulong *)((void *)kvm + offset) = 0;
 
 	return 0;
 }
 
-static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
-{
-	__simple_attr_check_format("%llu\n", 0ull);
-	return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
-				vm_stat_clear_per_vm, "%llu\n");
-}
-
-static const struct file_operations vm_stat_get_per_vm_fops = {
-	.owner   = THIS_MODULE,
-	.open    = vm_stat_get_per_vm_open,
-	.release = kvm_debugfs_release,
-	.read    = simple_attr_read,
-	.write   = simple_attr_write,
-	.llseek  = no_llseek,
-};
-
-static int vcpu_stat_get_per_vm(void *data, u64 *val)
+static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
 {
 	int i;
-	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
 	struct kvm_vcpu *vcpu;
 
 	*val = 0;
 
-	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
-		*val += *(u64 *)((void *)vcpu + stat_data->offset);
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		*val += *(u64 *)((void *)vcpu + offset);
 
 	return 0;
 }
 
-static int vcpu_stat_clear_per_vm(void *data, u64 val)
+static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
 {
 	int i;
-	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
 	struct kvm_vcpu *vcpu;
 
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		*(u64 *)((void *)vcpu + offset) = 0;
+
+	return 0;
+}
+
+static int kvm_stat_data_get(void *data, u64 *val)
+{
+	int r = -EFAULT;
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+
+	switch (stat_data->dbgfs_item->kind) {
+	case KVM_STAT_VM:
+		r = kvm_get_stat_per_vm(stat_data->kvm,
+					stat_data->dbgfs_item->offset, val);
+		break;
+	case KVM_STAT_VCPU:
+		r = kvm_get_stat_per_vcpu(stat_data->kvm,
+					  stat_data->dbgfs_item->offset, val);
+		break;
+	}
+
+	return r;
+}
+
+static int kvm_stat_data_clear(void *data, u64 val)
+{
+	int r = -EFAULT;
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+
 	if (val)
 		return -EINVAL;
 
-	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
-		*(u64 *)((void *)vcpu + stat_data->offset) = 0;
+	switch (stat_data->dbgfs_item->kind) {
+	case KVM_STAT_VM:
+		r = kvm_clear_stat_per_vm(stat_data->kvm,
+					  stat_data->dbgfs_item->offset);
+		break;
+	case KVM_STAT_VCPU:
+		r = kvm_clear_stat_per_vcpu(stat_data->kvm,
+					    stat_data->dbgfs_item->offset);
+		break;
+	}
 
-	return 0;
+	return r;
 }
 
-static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
+static int kvm_stat_data_open(struct inode *inode, struct file *file)
 {
 	__simple_attr_check_format("%llu\n", 0ull);
-	return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
-				 vcpu_stat_clear_per_vm, "%llu\n");
+	return kvm_debugfs_open(inode, file, kvm_stat_data_get,
+				kvm_stat_data_clear, "%llu\n");
 }
 
-static const struct file_operations vcpu_stat_get_per_vm_fops = {
-	.owner   = THIS_MODULE,
-	.open    = vcpu_stat_get_per_vm_open,
+static const struct file_operations stat_fops_per_vm = {
+	.owner = THIS_MODULE,
+	.open = kvm_stat_data_open,
 	.release = kvm_debugfs_release,
-	.read    = simple_attr_read,
-	.write   = simple_attr_write,
-	.llseek  = no_llseek,
-};
-
-static const struct file_operations *stat_fops_per_vm[] = {
-	[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
-	[KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
+	.read = simple_attr_read,
+	.write = simple_attr_write,
+	.llseek = no_llseek,
 };
 
 static int vm_stat_get(void *_offset, u64 *val)
 {
 	unsigned offset = (long)_offset;
 	struct kvm *kvm;
-	struct kvm_stat_data stat_tmp = {.offset = offset};
 	u64 tmp_val;
 
 	*val = 0;
 	mutex_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list) {
-		stat_tmp.kvm = kvm;
-		vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
+		kvm_get_stat_per_vm(kvm, offset, &tmp_val);
 		*val += tmp_val;
 	}
 	mutex_unlock(&kvm_lock);
@@ -4142,15 +4234,13 @@ static int vm_stat_clear(void *_offset, u64 val)
 {
 	unsigned offset = (long)_offset;
 	struct kvm *kvm;
-	struct kvm_stat_data stat_tmp = {.offset = offset};
 
 	if (val)
 		return -EINVAL;
 
 	mutex_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list) {
-		stat_tmp.kvm = kvm;
-		vm_stat_clear_per_vm((void *)&stat_tmp, 0);
+		kvm_clear_stat_per_vm(kvm, offset);
 	}
 	mutex_unlock(&kvm_lock);
 
@@ -4163,14 +4253,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 {
 	unsigned offset = (long)_offset;
 	struct kvm *kvm;
-	struct kvm_stat_data stat_tmp = {.offset = offset};
 	u64 tmp_val;
 
 	*val = 0;
 	mutex_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list) {
-		stat_tmp.kvm = kvm;
-		vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
+		kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
 		*val += tmp_val;
 	}
 	mutex_unlock(&kvm_lock);
@@ -4181,15 +4269,13 @@ static int vcpu_stat_clear(void *_offset, u64 val)
 {
 	unsigned offset = (long)_offset;
 	struct kvm *kvm;
-	struct kvm_stat_data stat_tmp = {.offset = offset};
 
 	if (val)
 		return -EINVAL;
 
 	mutex_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list) {
-		stat_tmp.kvm = kvm;
-		vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
+		kvm_clear_stat_per_vcpu(kvm, offset);
 	}
 	mutex_unlock(&kvm_lock);
 
@@ -4262,9 +4348,8 @@ static void kvm_init_debug(void)
 
 	kvm_debugfs_num_entries = 0;
 	for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
-		int mode = p->mode ? p->mode : 0644;
-		debugfs_create_file(p->name, mode, kvm_debugfs_dir,
-				    (void *)(long)p->offset,
+		debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
+				    kvm_debugfs_dir, (void *)(long)p->offset,
 				    stat_fops[p->kind]);
 	}
 }
@@ -4304,8 +4389,8 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
 	WRITE_ONCE(vcpu->preempted, false);
 	WRITE_ONCE(vcpu->ready, false);
 
+	__this_cpu_write(kvm_running_vcpu, vcpu);
 	kvm_arch_sched_in(vcpu, cpu);
-
 	kvm_arch_vcpu_load(vcpu, cpu);
 }
 
@@ -4319,6 +4404,25 @@ static void kvm_sched_out(struct preempt_notifier *pn,
 		WRITE_ONCE(vcpu->ready, true);
 	}
 	kvm_arch_vcpu_put(vcpu);
+	__this_cpu_write(kvm_running_vcpu, NULL);
+}
+
+/**
+ * kvm_get_running_vcpu - get the vcpu running on the current CPU.
+ * Thanks to preempt notifiers, this can also be called from
+ * preemptible context.
+ */
+struct kvm_vcpu *kvm_get_running_vcpu(void)
+{
+        return __this_cpu_read(kvm_running_vcpu);
+}
+
+/**
+ * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
+ */
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
+{
+        return &kvm_running_vcpu;
 }
 
 static void check_processor_compat(void *rtn)
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index 43de8ae..28fda42 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -85,6 +85,7 @@ int irq_bypass_register_producer(struct irq_bypass_producer *producer)
 {
 	struct irq_bypass_producer *tmp;
 	struct irq_bypass_consumer *consumer;
+	int ret;
 
 	if (!producer->token)
 		return -EINVAL;
@@ -98,20 +99,16 @@ int irq_bypass_register_producer(struct irq_bypass_producer *producer)
 
 	list_for_each_entry(tmp, &producers, node) {
 		if (tmp->token == producer->token) {
-			mutex_unlock(&lock);
-			module_put(THIS_MODULE);
-			return -EBUSY;
+			ret = -EBUSY;
+			goto out_err;
 		}
 	}
 
 	list_for_each_entry(consumer, &consumers, node) {
 		if (consumer->token == producer->token) {
-			int ret = __connect(producer, consumer);
-			if (ret) {
-				mutex_unlock(&lock);
-				module_put(THIS_MODULE);
-				return ret;
-			}
+			ret = __connect(producer, consumer);
+			if (ret)
+				goto out_err;
 			break;
 		}
 	}
@@ -121,6 +118,10 @@ int irq_bypass_register_producer(struct irq_bypass_producer *producer)
 	mutex_unlock(&lock);
 
 	return 0;
+out_err:
+	mutex_unlock(&lock);
+	module_put(THIS_MODULE);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(irq_bypass_register_producer);
 
@@ -179,6 +180,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
 {
 	struct irq_bypass_consumer *tmp;
 	struct irq_bypass_producer *producer;
+	int ret;
 
 	if (!consumer->token ||
 	    !consumer->add_producer || !consumer->del_producer)
@@ -193,20 +195,16 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
 
 	list_for_each_entry(tmp, &consumers, node) {
 		if (tmp->token == consumer->token || tmp == consumer) {
-			mutex_unlock(&lock);
-			module_put(THIS_MODULE);
-			return -EBUSY;
+			ret = -EBUSY;
+			goto out_err;
 		}
 	}
 
 	list_for_each_entry(producer, &producers, node) {
 		if (producer->token == consumer->token) {
-			int ret = __connect(producer, consumer);
-			if (ret) {
-				mutex_unlock(&lock);
-				module_put(THIS_MODULE);
-				return ret;
-			}
+			ret = __connect(producer, consumer);
+			if (ret)
+				goto out_err;
 			break;
 		}
 	}
@@ -216,6 +214,10 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
 	mutex_unlock(&lock);
 
 	return 0;
+out_err:
+	mutex_unlock(&lock);
+	module_put(THIS_MODULE);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(irq_bypass_register_consumer);